From 57f0f512b273f60d52568b8c6b77e17f5636edc0 Mon Sep 17 00:00:00 2001 From: AndrĂ© Fabian Silva Delgado Date: Wed, 5 Aug 2015 17:04:01 -0300 Subject: Initial import --- drivers/net/ethernet/3com/3c509.c | 1454 ++ drivers/net/ethernet/3com/3c515.c | 1580 ++ drivers/net/ethernet/3com/3c574_cs.c | 1167 ++ drivers/net/ethernet/3com/3c589_cs.c | 968 + drivers/net/ethernet/3com/3c59x.c | 3361 ++++ drivers/net/ethernet/3com/Kconfig | 107 + drivers/net/ethernet/3com/Makefile | 10 + drivers/net/ethernet/3com/typhoon.c | 2556 +++ drivers/net/ethernet/3com/typhoon.h | 624 + drivers/net/ethernet/8390/8390.c | 103 + drivers/net/ethernet/8390/8390.h | 225 + drivers/net/ethernet/8390/8390p.c | 105 + drivers/net/ethernet/8390/Kconfig | 206 + drivers/net/ethernet/8390/Makefile | 18 + drivers/net/ethernet/8390/apne.c | 626 + drivers/net/ethernet/8390/ax88796.c | 1015 + drivers/net/ethernet/8390/axnet_cs.c | 1703 ++ drivers/net/ethernet/8390/etherh.c | 874 + drivers/net/ethernet/8390/hydra.c | 278 + drivers/net/ethernet/8390/lib8390.c | 1084 ++ drivers/net/ethernet/8390/mac8390.c | 874 + drivers/net/ethernet/8390/mcf8390.c | 480 + drivers/net/ethernet/8390/ne.c | 1018 ++ drivers/net/ethernet/8390/ne2k-pci.c | 743 + drivers/net/ethernet/8390/pcnet_cs.c | 1699 ++ drivers/net/ethernet/8390/smc-ultra.c | 631 + drivers/net/ethernet/8390/stnic.c | 303 + drivers/net/ethernet/8390/wd.c | 574 + drivers/net/ethernet/8390/zorro8390.c | 458 + drivers/net/ethernet/Kconfig | 178 + drivers/net/ethernet/Makefile | 86 + drivers/net/ethernet/adaptec/Kconfig | 35 + drivers/net/ethernet/adaptec/Makefile | 5 + drivers/net/ethernet/adaptec/starfire.c | 2059 +++ drivers/net/ethernet/adi/Kconfig | 68 + drivers/net/ethernet/adi/Makefile | 5 + drivers/net/ethernet/adi/bfin_mac.c | 1933 ++ drivers/net/ethernet/adi/bfin_mac.h | 112 + drivers/net/ethernet/aeroflex/Kconfig | 11 + drivers/net/ethernet/aeroflex/Makefile | 5 + drivers/net/ethernet/aeroflex/greth.c | 1616 ++ drivers/net/ethernet/aeroflex/greth.h | 142 + drivers/net/ethernet/agere/Kconfig | 31 + drivers/net/ethernet/agere/Makefile | 5 + drivers/net/ethernet/agere/et131x.c | 4123 +++++ drivers/net/ethernet/agere/et131x.h | 1433 ++ drivers/net/ethernet/allwinner/Kconfig | 37 + drivers/net/ethernet/allwinner/Makefile | 5 + drivers/net/ethernet/allwinner/sun4i-emac.c | 972 + drivers/net/ethernet/allwinner/sun4i-emac.h | 108 + drivers/net/ethernet/alteon/Kconfig | 48 + drivers/net/ethernet/alteon/Makefile | 5 + drivers/net/ethernet/alteon/acenic.c | 3189 ++++ drivers/net/ethernet/alteon/acenic.h | 790 + drivers/net/ethernet/altera/Kconfig | 9 + drivers/net/ethernet/altera/Makefile | 8 + drivers/net/ethernet/altera/altera_msgdma.c | 206 + drivers/net/ethernet/altera/altera_msgdma.h | 35 + drivers/net/ethernet/altera/altera_msgdmahw.h | 158 + drivers/net/ethernet/altera/altera_sgdma.c | 540 + drivers/net/ethernet/altera/altera_sgdma.h | 36 + drivers/net/ethernet/altera/altera_sgdmahw.h | 126 + drivers/net/ethernet/altera/altera_tse.h | 537 + drivers/net/ethernet/altera/altera_tse_ethtool.c | 275 + drivers/net/ethernet/altera/altera_tse_main.c | 1634 ++ drivers/net/ethernet/altera/altera_utils.c | 44 + drivers/net/ethernet/altera/altera_utils.h | 27 + drivers/net/ethernet/amd/7990.c | 666 + drivers/net/ethernet/amd/7990.h | 250 + drivers/net/ethernet/amd/Kconfig | 206 + drivers/net/ethernet/amd/Makefile | 20 + drivers/net/ethernet/amd/a2065.c | 783 + drivers/net/ethernet/amd/a2065.h | 173 + drivers/net/ethernet/amd/am79c961a.c | 768 + drivers/net/ethernet/amd/am79c961a.h | 145 + drivers/net/ethernet/amd/amd8111e.c | 1971 ++ drivers/net/ethernet/amd/amd8111e.h | 813 + drivers/net/ethernet/amd/ariadne.c | 792 + drivers/net/ethernet/amd/ariadne.h | 415 + drivers/net/ethernet/amd/atarilance.c | 1169 ++ drivers/net/ethernet/amd/au1000_eth.c | 1469 ++ drivers/net/ethernet/amd/au1000_eth.h | 133 + drivers/net/ethernet/amd/declance.c | 1376 ++ drivers/net/ethernet/amd/hplance.c | 232 + drivers/net/ethernet/amd/hplance.h | 26 + drivers/net/ethernet/amd/lance.c | 1312 ++ drivers/net/ethernet/amd/mvme147.c | 200 + drivers/net/ethernet/amd/ni65.c | 1252 ++ drivers/net/ethernet/amd/ni65.h | 121 + drivers/net/ethernet/amd/nmclan_cs.c | 1512 ++ drivers/net/ethernet/amd/pcnet32.c | 2989 +++ drivers/net/ethernet/amd/sun3lance.c | 956 + drivers/net/ethernet/amd/sunlance.c | 1533 ++ drivers/net/ethernet/amd/xgbe/Makefile | 8 + drivers/net/ethernet/amd/xgbe/xgbe-common.h | 1145 ++ drivers/net/ethernet/amd/xgbe/xgbe-dcb.c | 269 + drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c | 373 + drivers/net/ethernet/amd/xgbe/xgbe-desc.c | 636 + drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 2943 +++ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 2227 +++ drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 601 + drivers/net/ethernet/amd/xgbe/xgbe-main.c | 631 + drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 312 + drivers/net/ethernet/amd/xgbe/xgbe-ptp.c | 279 + drivers/net/ethernet/amd/xgbe/xgbe.h | 867 + drivers/net/ethernet/apm/Kconfig | 1 + drivers/net/ethernet/apm/Makefile | 5 + drivers/net/ethernet/apm/xgene/Kconfig | 11 + drivers/net/ethernet/apm/xgene/Makefile | 7 + .../net/ethernet/apm/xgene/xgene_enet_ethtool.c | 150 + drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | 805 + drivers/net/ethernet/apm/xgene/xgene_enet_hw.h | 331 + drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 1203 ++ drivers/net/ethernet/apm/xgene/xgene_enet_main.h | 186 + drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c | 394 + drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h | 41 + drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c | 337 + drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h | 53 + drivers/net/ethernet/apple/Kconfig | 65 + drivers/net/ethernet/apple/Makefile | 7 + drivers/net/ethernet/apple/bmac.c | 1679 ++ drivers/net/ethernet/apple/bmac.h | 164 + drivers/net/ethernet/apple/mace.c | 1028 ++ drivers/net/ethernet/apple/mace.h | 173 + drivers/net/ethernet/apple/macmace.c | 788 + drivers/net/ethernet/arc/Kconfig | 44 + drivers/net/ethernet/arc/Makefile | 8 + drivers/net/ethernet/arc/emac.h | 216 + drivers/net/ethernet/arc/emac_arc.c | 95 + drivers/net/ethernet/arc/emac_main.c | 874 + drivers/net/ethernet/arc/emac_mdio.c | 151 + drivers/net/ethernet/arc/emac_rockchip.c | 229 + drivers/net/ethernet/atheros/Kconfig | 83 + drivers/net/ethernet/atheros/Makefile | 9 + drivers/net/ethernet/atheros/alx/Makefile | 3 + drivers/net/ethernet/atheros/alx/alx.h | 117 + drivers/net/ethernet/atheros/alx/ethtool.c | 313 + drivers/net/ethernet/atheros/alx/hw.c | 1110 ++ drivers/net/ethernet/atheros/alx/hw.h | 581 + drivers/net/ethernet/atheros/alx/main.c | 1559 ++ drivers/net/ethernet/atheros/alx/reg.h | 854 + drivers/net/ethernet/atheros/atl1c/Makefile | 2 + drivers/net/ethernet/atheros/atl1c/atl1c.h | 605 + drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c | 309 + drivers/net/ethernet/atheros/atl1c/atl1c_hw.c | 863 + drivers/net/ethernet/atheros/atl1c/atl1c_hw.h | 1024 ++ drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 2803 +++ drivers/net/ethernet/atheros/atl1e/Makefile | 2 + drivers/net/ethernet/atheros/atl1e/atl1e.h | 507 + drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c | 392 + drivers/net/ethernet/atheros/atl1e/atl1e_hw.c | 650 + drivers/net/ethernet/atheros/atl1e/atl1e_hw.h | 690 + drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 2578 +++ drivers/net/ethernet/atheros/atl1e/atl1e_param.c | 269 + drivers/net/ethernet/atheros/atlx/Makefile | 3 + drivers/net/ethernet/atheros/atlx/atl1.c | 3690 ++++ drivers/net/ethernet/atheros/atlx/atl1.h | 803 + drivers/net/ethernet/atheros/atlx/atl2.c | 3085 ++++ drivers/net/ethernet/atheros/atlx/atl2.h | 524 + drivers/net/ethernet/atheros/atlx/atlx.c | 279 + drivers/net/ethernet/atheros/atlx/atlx.h | 502 + drivers/net/ethernet/broadcom/Kconfig | 164 + drivers/net/ethernet/broadcom/Makefile | 14 + drivers/net/ethernet/broadcom/b44.c | 2615 +++ drivers/net/ethernet/broadcom/b44.h | 409 + drivers/net/ethernet/broadcom/bcm63xx_enet.c | 2919 +++ drivers/net/ethernet/broadcom/bcm63xx_enet.h | 360 + drivers/net/ethernet/broadcom/bcmsysport.c | 2007 ++ drivers/net/ethernet/broadcom/bcmsysport.h | 695 + drivers/net/ethernet/broadcom/bgmac.c | 1726 ++ drivers/net/ethernet/broadcom/bgmac.h | 493 + drivers/net/ethernet/broadcom/bnx2.c | 8805 +++++++++ drivers/net/ethernet/broadcom/bnx2.h | 7465 ++++++++ drivers/net/ethernet/broadcom/bnx2_fw.h | 89 + drivers/net/ethernet/broadcom/bnx2x/Makefile | 8 + drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 2580 +++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 5038 +++++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 1325 ++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | 2552 +++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h | 205 + drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h | 2216 +++ .../net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 3626 ++++ .../net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h | 396 + .../ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h | 38 + drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h | 5897 ++++++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h | 786 + .../net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h | 842 + drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | 13777 ++++++++++++++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h | 544 + drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 14820 +++++++++++++++ .../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h | 168 + drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h | 7669 ++++++++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | 6060 ++++++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h | 1496 ++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 3140 ++++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h | 608 + drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | 2002 ++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h | 555 + drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 2168 +++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h | 462 + drivers/net/ethernet/broadcom/cnic.c | 5759 ++++++ drivers/net/ethernet/broadcom/cnic.h | 427 + drivers/net/ethernet/broadcom/cnic_defs.h | 5462 ++++++ drivers/net/ethernet/broadcom/cnic_if.h | 371 + drivers/net/ethernet/broadcom/genet/Makefile | 2 + drivers/net/ethernet/broadcom/genet/bcmgenet.c | 3365 ++++ drivers/net/ethernet/broadcom/genet/bcmgenet.h | 686 + drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c | 210 + drivers/net/ethernet/broadcom/genet/bcmmii.c | 592 + drivers/net/ethernet/broadcom/sb1250-mac.c | 2670 +++ drivers/net/ethernet/broadcom/tg3.c | 18271 +++++++++++++++++++ drivers/net/ethernet/broadcom/tg3.h | 3427 ++++ drivers/net/ethernet/brocade/Kconfig | 23 + drivers/net/ethernet/brocade/Makefile | 5 + drivers/net/ethernet/brocade/bna/Kconfig | 17 + drivers/net/ethernet/brocade/bna/Makefile | 13 + drivers/net/ethernet/brocade/bna/bfa_cee.c | 288 + drivers/net/ethernet/brocade/bna/bfa_cee.h | 66 + drivers/net/ethernet/brocade/bna/bfa_cs.h | 125 + drivers/net/ethernet/brocade/bna/bfa_defs.h | 296 + drivers/net/ethernet/brocade/bna/bfa_defs_cna.h | 221 + .../net/ethernet/brocade/bna/bfa_defs_mfg_comm.h | 155 + drivers/net/ethernet/brocade/bna/bfa_defs_status.h | 216 + drivers/net/ethernet/brocade/bna/bfa_ioc.c | 3399 ++++ drivers/net/ethernet/brocade/bna/bfa_ioc.h | 369 + drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c | 945 + drivers/net/ethernet/brocade/bna/bfa_msgq.c | 668 + drivers/net/ethernet/brocade/bna/bfa_msgq.h | 131 + drivers/net/ethernet/brocade/bna/bfi.h | 571 + drivers/net/ethernet/brocade/bna/bfi_cna.h | 164 + drivers/net/ethernet/brocade/bna/bfi_enet.h | 859 + drivers/net/ethernet/brocade/bna/bfi_reg.h | 457 + drivers/net/ethernet/brocade/bna/bna.h | 565 + drivers/net/ethernet/brocade/bna/bna_enet.c | 2159 +++ drivers/net/ethernet/brocade/bna/bna_hw_defs.h | 411 + drivers/net/ethernet/brocade/bna/bna_tx_rx.c | 4018 ++++ drivers/net/ethernet/brocade/bna/bna_types.h | 957 + drivers/net/ethernet/brocade/bna/bnad.c | 3896 ++++ drivers/net/ethernet/brocade/bna/bnad.h | 432 + drivers/net/ethernet/brocade/bna/bnad_debugfs.c | 589 + drivers/net/ethernet/brocade/bna/bnad_ethtool.c | 1140 ++ drivers/net/ethernet/brocade/bna/cna.h | 107 + drivers/net/ethernet/brocade/bna/cna_fwimg.c | 97 + drivers/net/ethernet/cadence/Kconfig | 36 + drivers/net/ethernet/cadence/Makefile | 5 + drivers/net/ethernet/cadence/macb.c | 2935 +++ drivers/net/ethernet/cadence/macb.h | 842 + drivers/net/ethernet/calxeda/Kconfig | 8 + drivers/net/ethernet/calxeda/Makefile | 1 + drivers/net/ethernet/calxeda/xgmac.c | 1949 ++ drivers/net/ethernet/chelsio/Kconfig | 130 + drivers/net/ethernet/chelsio/Makefile | 8 + drivers/net/ethernet/chelsio/cxgb/Makefile | 9 + drivers/net/ethernet/chelsio/cxgb/common.h | 351 + drivers/net/ethernet/chelsio/cxgb/cphy.h | 174 + drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h | 638 + drivers/net/ethernet/chelsio/cxgb/cxgb2.c | 1357 ++ drivers/net/ethernet/chelsio/cxgb/elmer0.h | 157 + drivers/net/ethernet/chelsio/cxgb/espi.c | 372 + drivers/net/ethernet/chelsio/cxgb/espi.h | 67 + drivers/net/ethernet/chelsio/cxgb/fpga_defs.h | 232 + drivers/net/ethernet/chelsio/cxgb/gmac.h | 141 + drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c | 397 + drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h | 127 + drivers/net/ethernet/chelsio/cxgb/mv88x201x.c | 259 + drivers/net/ethernet/chelsio/cxgb/my3126.c | 209 + drivers/net/ethernet/chelsio/cxgb/pm3393.c | 795 + drivers/net/ethernet/chelsio/cxgb/regs.h | 2167 +++ drivers/net/ethernet/chelsio/cxgb/sge.c | 2124 +++ drivers/net/ethernet/chelsio/cxgb/sge.h | 93 + drivers/net/ethernet/chelsio/cxgb/subr.c | 1128 ++ .../net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h | 1642 ++ drivers/net/ethernet/chelsio/cxgb/tp.c | 171 + drivers/net/ethernet/chelsio/cxgb/tp.h | 72 + drivers/net/ethernet/chelsio/cxgb/vsc7326.c | 730 + drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h | 297 + drivers/net/ethernet/chelsio/cxgb3/Makefile | 8 + drivers/net/ethernet/chelsio/cxgb3/adapter.h | 334 + drivers/net/ethernet/chelsio/cxgb3/ael1002.c | 941 + drivers/net/ethernet/chelsio/cxgb3/aq100x.c | 354 + drivers/net/ethernet/chelsio/cxgb3/common.h | 773 + .../net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h | 189 + drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h | 114 + drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h | 177 + drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c | 3431 ++++ drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c | 1427 ++ drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h | 209 + .../net/ethernet/chelsio/cxgb3/firmware_exports.h | 177 + drivers/net/ethernet/chelsio/cxgb3/l2t.c | 470 + drivers/net/ethernet/chelsio/cxgb3/l2t.h | 149 + drivers/net/ethernet/chelsio/cxgb3/mc5.c | 422 + drivers/net/ethernet/chelsio/cxgb3/regs.h | 2563 +++ drivers/net/ethernet/chelsio/cxgb3/sge.c | 3305 ++++ drivers/net/ethernet/chelsio/cxgb3/sge_defs.h | 255 + drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h | 1495 ++ drivers/net/ethernet/chelsio/cxgb3/t3_hw.c | 3777 ++++ drivers/net/ethernet/chelsio/cxgb3/t3cdev.h | 70 + drivers/net/ethernet/chelsio/cxgb3/version.h | 44 + drivers/net/ethernet/chelsio/cxgb3/vsc8211.c | 416 + drivers/net/ethernet/chelsio/cxgb3/xgmac.c | 657 + drivers/net/ethernet/chelsio/cxgb4/Makefile | 10 + drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c | 318 + drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h | 43 + drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 1313 ++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c | 1243 ++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h | 162 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 2073 +++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h | 83 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 915 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c | 122 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.h | 57 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 4826 +++++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | 312 + drivers/net/ethernet/chelsio/cxgb4/l2t.c | 666 + drivers/net/ethernet/chelsio/cxgb4/l2t.h | 111 + drivers/net/ethernet/chelsio/cxgb4/sge.c | 3078 ++++ drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 5719 ++++++ drivers/net/ethernet/chelsio/cxgb4/t4_hw.h | 251 + drivers/net/ethernet/chelsio/cxgb4/t4_msg.h | 1097 ++ drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | 158 + drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 2717 +++ drivers/net/ethernet/chelsio/cxgb4/t4_values.h | 124 + drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 3177 ++++ drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h | 48 + drivers/net/ethernet/chelsio/cxgb4vf/Makefile | 7 + drivers/net/ethernet/chelsio/cxgb4vf/adapter.h | 552 + .../net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 3098 ++++ drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 2655 +++ drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h | 326 + drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h | 121 + drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | 1602 ++ drivers/net/ethernet/cirrus/Kconfig | 65 + drivers/net/ethernet/cirrus/Makefile | 7 + drivers/net/ethernet/cirrus/cs89x0.c | 1907 ++ drivers/net/ethernet/cirrus/cs89x0.h | 465 + drivers/net/ethernet/cirrus/ep93xx_eth.c | 888 + drivers/net/ethernet/cirrus/mac89x0.c | 633 + drivers/net/ethernet/cisco/Kconfig | 23 + drivers/net/ethernet/cisco/Makefile | 5 + drivers/net/ethernet/cisco/enic/Kconfig | 9 + drivers/net/ethernet/cisco/enic/Makefile | 6 + drivers/net/ethernet/cisco/enic/cq_desc.h | 80 + drivers/net/ethernet/cisco/enic/cq_enet_desc.h | 185 + drivers/net/ethernet/cisco/enic/enic.h | 266 + drivers/net/ethernet/cisco/enic/enic_api.c | 48 + drivers/net/ethernet/cisco/enic/enic_api.h | 30 + drivers/net/ethernet/cisco/enic/enic_clsf.c | 284 + drivers/net/ethernet/cisco/enic/enic_clsf.h | 37 + drivers/net/ethernet/cisco/enic/enic_dev.c | 236 + drivers/net/ethernet/cisco/enic/enic_dev.h | 60 + drivers/net/ethernet/cisco/enic/enic_ethtool.c | 507 + drivers/net/ethernet/cisco/enic/enic_main.c | 2724 +++ drivers/net/ethernet/cisco/enic/enic_pp.c | 368 + drivers/net/ethernet/cisco/enic/enic_pp.h | 36 + drivers/net/ethernet/cisco/enic/enic_res.c | 385 + drivers/net/ethernet/cisco/enic/enic_res.h | 153 + drivers/net/ethernet/cisco/enic/rq_enet_desc.h | 60 + drivers/net/ethernet/cisco/enic/vnic_cq.c | 91 + drivers/net/ethernet/cisco/enic/vnic_cq.h | 123 + drivers/net/ethernet/cisco/enic/vnic_dev.c | 1108 ++ drivers/net/ethernet/cisco/enic/vnic_dev.h | 139 + drivers/net/ethernet/cisco/enic/vnic_devcmd.h | 632 + drivers/net/ethernet/cisco/enic/vnic_enet.h | 59 + drivers/net/ethernet/cisco/enic/vnic_intr.c | 68 + drivers/net/ethernet/cisco/enic/vnic_intr.h | 111 + drivers/net/ethernet/cisco/enic/vnic_nic.h | 72 + drivers/net/ethernet/cisco/enic/vnic_resource.h | 76 + drivers/net/ethernet/cisco/enic/vnic_rq.c | 215 + drivers/net/ethernet/cisco/enic/vnic_rq.h | 334 + drivers/net/ethernet/cisco/enic/vnic_rss.h | 45 + drivers/net/ethernet/cisco/enic/vnic_stats.h | 75 + drivers/net/ethernet/cisco/enic/vnic_vic.c | 79 + drivers/net/ethernet/cisco/enic/vnic_vic.h | 83 + drivers/net/ethernet/cisco/enic/vnic_wq.c | 198 + drivers/net/ethernet/cisco/enic/vnic_wq.h | 178 + drivers/net/ethernet/cisco/enic/wq_enet_desc.h | 98 + drivers/net/ethernet/davicom/Kconfig | 23 + drivers/net/ethernet/davicom/Makefile | 5 + drivers/net/ethernet/davicom/dm9000.c | 1804 ++ drivers/net/ethernet/davicom/dm9000.h | 180 + drivers/net/ethernet/dec/Kconfig | 21 + drivers/net/ethernet/dec/Makefile | 5 + drivers/net/ethernet/dec/tulip/21142.c | 257 + drivers/net/ethernet/dec/tulip/Kconfig | 172 + drivers/net/ethernet/dec/tulip/Makefile | 19 + drivers/net/ethernet/dec/tulip/de2104x.c | 2207 +++ drivers/net/ethernet/dec/tulip/de4x5.c | 5575 ++++++ drivers/net/ethernet/dec/tulip/de4x5.h | 1017 ++ drivers/net/ethernet/dec/tulip/dmfe.c | 2273 +++ drivers/net/ethernet/dec/tulip/eeprom.c | 382 + drivers/net/ethernet/dec/tulip/interrupt.c | 814 + drivers/net/ethernet/dec/tulip/media.c | 552 + drivers/net/ethernet/dec/tulip/pnic.c | 170 + drivers/net/ethernet/dec/tulip/pnic2.c | 403 + drivers/net/ethernet/dec/tulip/timer.c | 176 + drivers/net/ethernet/dec/tulip/tulip.h | 570 + drivers/net/ethernet/dec/tulip/tulip_core.c | 2001 ++ drivers/net/ethernet/dec/tulip/uli526x.c | 1845 ++ drivers/net/ethernet/dec/tulip/winbond-840.c | 1665 ++ drivers/net/ethernet/dec/tulip/xircom_cb.c | 1171 ++ drivers/net/ethernet/dlink/Kconfig | 55 + drivers/net/ethernet/dlink/Makefile | 6 + drivers/net/ethernet/dlink/dl2k.c | 1768 ++ drivers/net/ethernet/dlink/dl2k.h | 425 + drivers/net/ethernet/dlink/sundance.c | 2027 ++ drivers/net/ethernet/dnet.c | 945 + drivers/net/ethernet/dnet.h | 225 + drivers/net/ethernet/ec_bhf.c | 625 + drivers/net/ethernet/emulex/Kconfig | 23 + drivers/net/ethernet/emulex/Makefile | 5 + drivers/net/ethernet/emulex/benet/Kconfig | 14 + drivers/net/ethernet/emulex/benet/Makefile | 7 + drivers/net/ethernet/emulex/benet/be.h | 821 + drivers/net/ethernet/emulex/benet/be_cmds.c | 4248 +++++ drivers/net/ethernet/emulex/benet/be_cmds.h | 2369 +++ drivers/net/ethernet/emulex/benet/be_ethtool.c | 1324 ++ drivers/net/ethernet/emulex/benet/be_hw.h | 354 + drivers/net/ethernet/emulex/benet/be_main.c | 5761 ++++++ drivers/net/ethernet/emulex/benet/be_roce.c | 200 + drivers/net/ethernet/emulex/benet/be_roce.h | 79 + drivers/net/ethernet/ethoc.c | 1324 ++ drivers/net/ethernet/faraday/Kconfig | 39 + drivers/net/ethernet/faraday/Makefile | 6 + drivers/net/ethernet/faraday/ftgmac100.c | 1345 ++ drivers/net/ethernet/faraday/ftgmac100.h | 246 + drivers/net/ethernet/faraday/ftmac100.c | 1202 ++ drivers/net/ethernet/faraday/ftmac100.h | 180 + drivers/net/ethernet/fealnx.c | 1971 ++ drivers/net/ethernet/freescale/Kconfig | 96 + drivers/net/ethernet/freescale/Makefile | 19 + drivers/net/ethernet/freescale/fec.h | 569 + drivers/net/ethernet/freescale/fec_main.c | 3503 ++++ drivers/net/ethernet/freescale/fec_mpc52xx.c | 1116 ++ drivers/net/ethernet/freescale/fec_mpc52xx.h | 294 + drivers/net/ethernet/freescale/fec_mpc52xx_phy.c | 158 + drivers/net/ethernet/freescale/fec_ptp.c | 637 + drivers/net/ethernet/freescale/fs_enet/Kconfig | 34 + drivers/net/ethernet/freescale/fs_enet/Makefile | 14 + drivers/net/ethernet/freescale/fs_enet/fec.h | 44 + .../net/ethernet/freescale/fs_enet/fs_enet-main.c | 1141 ++ drivers/net/ethernet/freescale/fs_enet/fs_enet.h | 250 + drivers/net/ethernet/freescale/fs_enet/mac-fcc.c | 614 + drivers/net/ethernet/freescale/fs_enet/mac-fec.c | 533 + drivers/net/ethernet/freescale/fs_enet/mac-scc.c | 516 + .../net/ethernet/freescale/fs_enet/mii-bitbang.c | 233 + drivers/net/ethernet/freescale/fs_enet/mii-fec.c | 234 + drivers/net/ethernet/freescale/fsl_pq_mdio.c | 499 + drivers/net/ethernet/freescale/gianfar.c | 3621 ++++ drivers/net/ethernet/freescale/gianfar.h | 1343 ++ drivers/net/ethernet/freescale/gianfar_ethtool.c | 1911 ++ drivers/net/ethernet/freescale/gianfar_ptp.c | 574 + drivers/net/ethernet/freescale/ucc_geth.c | 3983 ++++ drivers/net/ethernet/freescale/ucc_geth.h | 1238 ++ drivers/net/ethernet/freescale/ucc_geth_ethtool.c | 421 + drivers/net/ethernet/freescale/xgmac_mdio.c | 333 + drivers/net/ethernet/fujitsu/Kconfig | 31 + drivers/net/ethernet/fujitsu/Makefile | 5 + drivers/net/ethernet/fujitsu/fmvj18x_cs.c | 1172 ++ drivers/net/ethernet/hisilicon/Kconfig | 36 + drivers/net/ethernet/hisilicon/Makefile | 6 + drivers/net/ethernet/hisilicon/hip04_eth.c | 975 + drivers/net/ethernet/hisilicon/hip04_mdio.c | 186 + drivers/net/ethernet/hisilicon/hix5hd2_gmac.c | 1066 ++ drivers/net/ethernet/hp/Kconfig | 32 + drivers/net/ethernet/hp/Makefile | 5 + drivers/net/ethernet/hp/hp100.c | 3069 ++++ drivers/net/ethernet/hp/hp100.h | 615 + drivers/net/ethernet/i825xx/82596.c | 1548 ++ drivers/net/ethernet/i825xx/Kconfig | 69 + drivers/net/ethernet/i825xx/Makefile | 10 + drivers/net/ethernet/i825xx/ether1.c | 1087 ++ drivers/net/ethernet/i825xx/ether1.h | 280 + drivers/net/ethernet/i825xx/lasi_82596.c | 237 + drivers/net/ethernet/i825xx/lib82596.c | 1409 ++ drivers/net/ethernet/i825xx/sni_82596.c | 184 + drivers/net/ethernet/i825xx/sun3_82586.c | 1188 ++ drivers/net/ethernet/i825xx/sun3_82586.h | 318 + drivers/net/ethernet/ibm/Kconfig | 42 + drivers/net/ethernet/ibm/Makefile | 7 + drivers/net/ethernet/ibm/ehea/Makefile | 6 + drivers/net/ethernet/ibm/ehea/ehea.h | 490 + drivers/net/ethernet/ibm/ehea/ehea_ethtool.c | 282 + drivers/net/ethernet/ibm/ehea/ehea_hw.h | 267 + drivers/net/ethernet/ibm/ehea/ehea_main.c | 3620 ++++ drivers/net/ethernet/ibm/ehea/ehea_phyp.c | 626 + drivers/net/ethernet/ibm/ehea/ehea_phyp.h | 447 + drivers/net/ethernet/ibm/ehea/ehea_qmr.c | 1022 ++ drivers/net/ethernet/ibm/ehea/ehea_qmr.h | 404 + drivers/net/ethernet/ibm/emac/Kconfig | 76 + drivers/net/ethernet/ibm/emac/Makefile | 11 + drivers/net/ethernet/ibm/emac/core.c | 3100 ++++ drivers/net/ethernet/ibm/emac/core.h | 467 + drivers/net/ethernet/ibm/emac/debug.c | 270 + drivers/net/ethernet/ibm/emac/debug.h | 83 + drivers/net/ethernet/ibm/emac/emac.h | 314 + drivers/net/ethernet/ibm/emac/mal.c | 793 + drivers/net/ethernet/ibm/emac/mal.h | 312 + drivers/net/ethernet/ibm/emac/phy.c | 541 + drivers/net/ethernet/ibm/emac/phy.h | 87 + drivers/net/ethernet/ibm/emac/rgmii.c | 336 + drivers/net/ethernet/ibm/emac/rgmii.h | 82 + drivers/net/ethernet/ibm/emac/tah.c | 180 + drivers/net/ethernet/ibm/emac/tah.h | 95 + drivers/net/ethernet/ibm/emac/zmii.c | 327 + drivers/net/ethernet/ibm/emac/zmii.h | 78 + drivers/net/ethernet/ibm/ibmveth.c | 1663 ++ drivers/net/ethernet/ibm/ibmveth.h | 206 + drivers/net/ethernet/icplus/Kconfig | 13 + drivers/net/ethernet/icplus/Makefile | 5 + drivers/net/ethernet/icplus/ipg.c | 2300 +++ drivers/net/ethernet/icplus/ipg.h | 748 + drivers/net/ethernet/intel/Kconfig | 356 + drivers/net/ethernet/intel/Makefile | 15 + drivers/net/ethernet/intel/e100.c | 3201 ++++ drivers/net/ethernet/intel/e1000/Makefile | 35 + drivers/net/ethernet/intel/e1000/e1000.h | 374 + drivers/net/ethernet/intel/e1000/e1000_ethtool.c | 1911 ++ drivers/net/ethernet/intel/e1000/e1000_hw.c | 5689 ++++++ drivers/net/ethernet/intel/e1000/e1000_hw.h | 3110 ++++ drivers/net/ethernet/intel/e1000/e1000_main.c | 5322 ++++++ drivers/net/ethernet/intel/e1000/e1000_osdep.h | 109 + drivers/net/ethernet/intel/e1000/e1000_param.c | 754 + drivers/net/ethernet/intel/e1000e/80003es2lan.c | 1418 ++ drivers/net/ethernet/intel/e1000e/80003es2lan.h | 88 + drivers/net/ethernet/intel/e1000e/82571.c | 2063 +++ drivers/net/ethernet/intel/e1000e/82571.h | 53 + drivers/net/ethernet/intel/e1000e/Makefile | 37 + drivers/net/ethernet/intel/e1000e/defines.h | 800 + drivers/net/ethernet/intel/e1000e/e1000.h | 595 + drivers/net/ethernet/intel/e1000e/ethtool.c | 2322 +++ drivers/net/ethernet/intel/e1000e/hw.h | 698 + drivers/net/ethernet/intel/e1000e/ich8lan.c | 5802 ++++++ drivers/net/ethernet/intel/e1000e/ich8lan.h | 303 + drivers/net/ethernet/intel/e1000e/mac.c | 1800 ++ drivers/net/ethernet/intel/e1000e/mac.h | 68 + drivers/net/ethernet/intel/e1000e/manage.c | 347 + drivers/net/ethernet/intel/e1000e/manage.h | 65 + drivers/net/ethernet/intel/e1000e/netdev.c | 7314 ++++++++ drivers/net/ethernet/intel/e1000e/nvm.c | 633 + drivers/net/ethernet/intel/e1000e/nvm.h | 40 + drivers/net/ethernet/intel/e1000e/param.c | 533 + drivers/net/ethernet/intel/e1000e/phy.c | 3237 ++++ drivers/net/ethernet/intel/e1000e/phy.h | 236 + drivers/net/ethernet/intel/e1000e/ptp.c | 273 + drivers/net/ethernet/intel/e1000e/regs.h | 250 + drivers/net/ethernet/intel/fm10k/Makefile | 33 + drivers/net/ethernet/intel/fm10k/fm10k.h | 536 + drivers/net/ethernet/intel/fm10k/fm10k_common.c | 533 + drivers/net/ethernet/intel/fm10k/fm10k_common.h | 65 + drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c | 174 + drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c | 261 + drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c | 1108 ++ drivers/net/ethernet/intel/fm10k/fm10k_iov.c | 517 + drivers/net/ethernet/intel/fm10k/fm10k_main.c | 2002 ++ drivers/net/ethernet/intel/fm10k/fm10k_mbx.c | 2140 +++ drivers/net/ethernet/intel/fm10k/fm10k_mbx.h | 307 + drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 1449 ++ drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 2190 +++ drivers/net/ethernet/intel/fm10k/fm10k_pf.c | 1880 ++ drivers/net/ethernet/intel/fm10k/fm10k_pf.h | 135 + drivers/net/ethernet/intel/fm10k/fm10k_ptp.c | 461 + drivers/net/ethernet/intel/fm10k/fm10k_tlv.c | 863 + drivers/net/ethernet/intel/fm10k/fm10k_tlv.h | 186 + drivers/net/ethernet/intel/fm10k/fm10k_type.h | 773 + drivers/net/ethernet/intel/fm10k/fm10k_vf.c | 582 + drivers/net/ethernet/intel/fm10k/fm10k_vf.h | 78 + drivers/net/ethernet/intel/i40e/Makefile | 47 + drivers/net/ethernet/intel/i40e/i40e.h | 759 + drivers/net/ethernet/intel/i40e/i40e_adminq.c | 1034 ++ drivers/net/ethernet/intel/i40e/i40e_adminq.h | 157 + drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 2337 +++ drivers/net/ethernet/intel/i40e/i40e_alloc.h | 58 + drivers/net/ethernet/intel/i40e/i40e_common.c | 3644 ++++ drivers/net/ethernet/intel/i40e/i40e_dcb.c | 720 + drivers/net/ethernet/intel/i40e/i40e_dcb.h | 112 + drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c | 321 + drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 2258 +++ drivers/net/ethernet/intel/i40e/i40e_diag.c | 154 + drivers/net/ethernet/intel/i40e/i40e_diag.h | 51 + drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 2587 +++ drivers/net/ethernet/intel/i40e/i40e_fcoe.c | 1565 ++ drivers/net/ethernet/intel/i40e/i40e_fcoe.h | 127 + drivers/net/ethernet/intel/i40e/i40e_hmc.c | 360 + drivers/net/ethernet/intel/i40e/i40e_hmc.h | 236 + drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c | 1143 ++ drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h | 181 + drivers/net/ethernet/intel/i40e/i40e_main.c | 10368 +++++++++++ drivers/net/ethernet/intel/i40e/i40e_nvm.c | 1006 + drivers/net/ethernet/intel/i40e/i40e_osdep.h | 84 + drivers/net/ethernet/intel/i40e/i40e_prototype.h | 311 + drivers/net/ethernet/intel/i40e/i40e_ptp.c | 728 + drivers/net/ethernet/intel/i40e/i40e_register.h | 3369 ++++ drivers/net/ethernet/intel/i40e/i40e_status.h | 100 + drivers/net/ethernet/intel/i40e/i40e_txrx.c | 2778 +++ drivers/net/ethernet/intel/i40e/i40e_txrx.h | 313 + drivers/net/ethernet/intel/i40e/i40e_type.h | 1421 ++ drivers/net/ethernet/intel/i40e/i40e_virtchnl.h | 362 + drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 2402 +++ drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h | 130 + drivers/net/ethernet/intel/i40evf/Makefile | 36 + drivers/net/ethernet/intel/i40evf/i40e_adminq.c | 979 + drivers/net/ethernet/intel/i40evf/i40e_adminq.h | 157 + .../net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 2269 +++ drivers/net/ethernet/intel/i40evf/i40e_alloc.h | 58 + drivers/net/ethernet/intel/i40evf/i40e_common.c | 636 + drivers/net/ethernet/intel/i40evf/i40e_hmc.h | 236 + drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h | 181 + drivers/net/ethernet/intel/i40evf/i40e_osdep.h | 75 + drivers/net/ethernet/intel/i40evf/i40e_prototype.h | 91 + drivers/net/ethernet/intel/i40evf/i40e_register.h | 3369 ++++ drivers/net/ethernet/intel/i40evf/i40e_status.h | 100 + drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 2003 ++ drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 301 + drivers/net/ethernet/intel/i40evf/i40e_type.h | 1250 ++ drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h | 362 + drivers/net/ethernet/intel/i40evf/i40evf.h | 300 + drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | 722 + drivers/net/ethernet/intel/i40evf/i40evf_main.c | 2561 +++ .../net/ethernet/intel/i40evf/i40evf_virtchnl.c | 759 + drivers/net/ethernet/intel/igb/Makefile | 36 + drivers/net/ethernet/intel/igb/e1000_82575.c | 2884 +++ drivers/net/ethernet/intel/igb/e1000_82575.h | 279 + drivers/net/ethernet/intel/igb/e1000_defines.h | 1016 ++ drivers/net/ethernet/intel/igb/e1000_hw.h | 568 + drivers/net/ethernet/intel/igb/e1000_i210.c | 902 + drivers/net/ethernet/intel/igb/e1000_i210.h | 93 + drivers/net/ethernet/intel/igb/e1000_mac.c | 1606 ++ drivers/net/ethernet/intel/igb/e1000_mac.h | 87 + drivers/net/ethernet/intel/igb/e1000_mbx.c | 443 + drivers/net/ethernet/intel/igb/e1000_mbx.h | 73 + drivers/net/ethernet/intel/igb/e1000_nvm.c | 801 + drivers/net/ethernet/intel/igb/e1000_nvm.h | 56 + drivers/net/ethernet/intel/igb/e1000_phy.c | 2511 +++ drivers/net/ethernet/intel/igb/e1000_phy.h | 174 + drivers/net/ethernet/intel/igb/e1000_regs.h | 426 + drivers/net/ethernet/intel/igb/igb.h | 584 + drivers/net/ethernet/intel/igb/igb_ethtool.c | 3064 ++++ drivers/net/ethernet/intel/igb/igb_hwmon.c | 249 + drivers/net/ethernet/intel/igb/igb_main.c | 8140 +++++++++ drivers/net/ethernet/intel/igb/igb_ptp.c | 1182 ++ drivers/net/ethernet/intel/igbvf/Makefile | 38 + drivers/net/ethernet/intel/igbvf/defines.h | 120 + drivers/net/ethernet/intel/igbvf/ethtool.c | 476 + drivers/net/ethernet/intel/igbvf/igbvf.h | 322 + drivers/net/ethernet/intel/igbvf/mbx.c | 349 + drivers/net/ethernet/intel/igbvf/mbx.h | 74 + drivers/net/ethernet/intel/igbvf/netdev.c | 2920 +++ drivers/net/ethernet/intel/igbvf/regs.h | 107 + drivers/net/ethernet/intel/igbvf/vf.c | 399 + drivers/net/ethernet/intel/igbvf/vf.h | 263 + drivers/net/ethernet/intel/ixgb/Makefile | 35 + drivers/net/ethernet/intel/ixgb/ixgb.h | 206 + drivers/net/ethernet/intel/ixgb/ixgb_ee.c | 605 + drivers/net/ethernet/intel/ixgb/ixgb_ee.h | 104 + drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c | 660 + drivers/net/ethernet/intel/ixgb/ixgb_hw.c | 1263 ++ drivers/net/ethernet/intel/ixgb/ixgb_hw.h | 792 + drivers/net/ethernet/intel/ixgb/ixgb_ids.h | 48 + drivers/net/ethernet/intel/ixgb/ixgb_main.c | 2368 +++ drivers/net/ethernet/intel/ixgb/ixgb_osdep.h | 64 + drivers/net/ethernet/intel/ixgb/ixgb_param.c | 469 + drivers/net/ethernet/intel/ixgbe/Makefile | 44 + drivers/net/ethernet/intel/ixgbe/ixgbe.h | 969 + drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c | 1237 ++ drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c | 2381 +++ drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 3900 ++++ drivers/net/ethernet/intel/ixgbe/ixgbe_common.h | 210 + drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c | 401 + drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h | 171 + drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c | 288 + drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h | 97 + drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c | 363 + drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h | 125 + drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c | 809 + drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c | 276 + drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 3165 ++++ drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 1075 ++ drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h | 88 + drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 1221 ++ drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 9123 +++++++++ drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c | 458 + drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h | 126 + drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 2139 +++ drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h | 168 + drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | 997 + drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 1434 ++ drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h | 71 + drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c | 230 + drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 3342 ++++ drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 863 + drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h | 39 + drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 1532 ++ drivers/net/ethernet/intel/ixgbevf/Makefile | 38 + drivers/net/ethernet/intel/ixgbevf/defines.h | 299 + drivers/net/ethernet/intel/ixgbevf/ethtool.c | 888 + drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 504 + drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 4277 +++++ drivers/net/ethernet/intel/ixgbevf/mbx.c | 348 + drivers/net/ethernet/intel/ixgbevf/mbx.h | 128 + drivers/net/ethernet/intel/ixgbevf/regs.h | 84 + drivers/net/ethernet/intel/ixgbevf/vf.c | 752 + drivers/net/ethernet/intel/ixgbevf/vf.h | 215 + drivers/net/ethernet/jme.c | 3372 ++++ drivers/net/ethernet/jme.h | 1278 ++ drivers/net/ethernet/korina.c | 1230 ++ drivers/net/ethernet/lantiq_etop.c | 816 + drivers/net/ethernet/marvell/Kconfig | 143 + drivers/net/ethernet/marvell/Makefile | 11 + drivers/net/ethernet/marvell/mv643xx_eth.c | 3259 ++++ drivers/net/ethernet/marvell/mvmdio.c | 301 + drivers/net/ethernet/marvell/mvneta.c | 3236 ++++ drivers/net/ethernet/marvell/mvpp2.c | 6435 +++++++ drivers/net/ethernet/marvell/pxa168_eth.c | 1630 ++ drivers/net/ethernet/marvell/skge.c | 4226 +++++ drivers/net/ethernet/marvell/skge.h | 2584 +++ drivers/net/ethernet/marvell/sky2.c | 5245 ++++++ drivers/net/ethernet/marvell/sky2.h | 2432 +++ drivers/net/ethernet/mellanox/Kconfig | 24 + drivers/net/ethernet/mellanox/Makefile | 6 + drivers/net/ethernet/mellanox/mlx4/Kconfig | 46 + drivers/net/ethernet/mellanox/mlx4/Makefile | 11 + drivers/net/ethernet/mellanox/mlx4/alloc.c | 835 + drivers/net/ethernet/mellanox/mlx4/catas.c | 327 + drivers/net/ethernet/mellanox/mlx4/cmd.c | 3212 ++++ drivers/net/ethernet/mellanox/mlx4/cq.c | 409 + drivers/net/ethernet/mellanox/mlx4/en_clock.c | 285 + drivers/net/ethernet/mellanox/mlx4/en_cq.c | 232 + drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c | 486 + drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 2011 ++ drivers/net/ethernet/mellanox/mlx4/en_main.c | 375 + drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 3092 ++++ drivers/net/ethernet/mellanox/mlx4/en_port.c | 352 + drivers/net/ethernet/mellanox/mlx4/en_port.h | 587 + drivers/net/ethernet/mellanox/mlx4/en_resources.c | 119 + drivers/net/ethernet/mellanox/mlx4/en_rx.c | 1307 ++ drivers/net/ethernet/mellanox/mlx4/en_selftest.c | 188 + drivers/net/ethernet/mellanox/mlx4/en_tx.c | 1021 ++ drivers/net/ethernet/mellanox/mlx4/eq.c | 1441 ++ drivers/net/ethernet/mellanox/mlx4/fw.c | 2760 +++ drivers/net/ethernet/mellanox/mlx4/fw.h | 257 + drivers/net/ethernet/mellanox/mlx4/fw_qos.c | 289 + drivers/net/ethernet/mellanox/mlx4/fw_qos.h | 145 + drivers/net/ethernet/mellanox/mlx4/icm.c | 462 + drivers/net/ethernet/mellanox/mlx4/icm.h | 129 + drivers/net/ethernet/mellanox/mlx4/intf.c | 248 + drivers/net/ethernet/mellanox/mlx4/main.c | 3748 ++++ drivers/net/ethernet/mellanox/mlx4/mcg.c | 1634 ++ drivers/net/ethernet/mellanox/mlx4/mlx4.h | 1437 ++ drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 887 + drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h | 107 + drivers/net/ethernet/mellanox/mlx4/mr.c | 1158 ++ drivers/net/ethernet/mellanox/mlx4/pd.c | 287 + drivers/net/ethernet/mellanox/mlx4/port.c | 1445 ++ drivers/net/ethernet/mellanox/mlx4/profile.c | 273 + drivers/net/ethernet/mellanox/mlx4/qp.c | 909 + drivers/net/ethernet/mellanox/mlx4/reset.c | 184 + .../net/ethernet/mellanox/mlx4/resource_tracker.c | 4957 +++++ drivers/net/ethernet/mellanox/mlx4/sense.c | 143 + drivers/net/ethernet/mellanox/mlx4/srq.c | 313 + drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 8 + drivers/net/ethernet/mellanox/mlx5/core/Makefile | 5 + drivers/net/ethernet/mellanox/mlx5/core/alloc.c | 241 + drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 1587 ++ drivers/net/ethernet/mellanox/mlx5/core/cq.c | 237 + drivers/net/ethernet/mellanox/mlx5/core/debugfs.c | 610 + drivers/net/ethernet/mellanox/mlx5/core/eq.c | 539 + drivers/net/ethernet/mellanox/mlx5/core/fw.c | 148 + drivers/net/ethernet/mellanox/mlx5/core/health.c | 200 + drivers/net/ethernet/mellanox/mlx5/core/mad.c | 78 + drivers/net/ethernet/mellanox/mlx5/core/main.c | 1070 ++ drivers/net/ethernet/mellanox/mlx5/core/mcg.c | 106 + .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 75 + drivers/net/ethernet/mellanox/mlx5/core/mr.c | 248 + .../net/ethernet/mellanox/mlx5/core/pagealloc.c | 534 + drivers/net/ethernet/mellanox/mlx5/core/pd.c | 101 + drivers/net/ethernet/mellanox/mlx5/core/port.c | 104 + drivers/net/ethernet/mellanox/mlx5/core/qp.c | 443 + drivers/net/ethernet/mellanox/mlx5/core/srq.c | 223 + drivers/net/ethernet/mellanox/mlx5/core/uar.c | 225 + drivers/net/ethernet/micrel/Kconfig | 66 + drivers/net/ethernet/micrel/Makefile | 9 + drivers/net/ethernet/micrel/ks8695net.c | 1630 ++ drivers/net/ethernet/micrel/ks8695net.h | 107 + drivers/net/ethernet/micrel/ks8842.c | 1269 ++ drivers/net/ethernet/micrel/ks8851.c | 1623 ++ drivers/net/ethernet/micrel/ks8851.h | 298 + drivers/net/ethernet/micrel/ks8851_mll.c | 1695 ++ drivers/net/ethernet/micrel/ksz884x.c | 7255 ++++++++ drivers/net/ethernet/microchip/Kconfig | 38 + drivers/net/ethernet/microchip/Makefile | 5 + drivers/net/ethernet/microchip/enc28j60.c | 1663 ++ drivers/net/ethernet/microchip/enc28j60_hw.h | 309 + drivers/net/ethernet/moxa/Kconfig | 30 + drivers/net/ethernet/moxa/Makefile | 5 + drivers/net/ethernet/moxa/moxart_ether.c | 569 + drivers/net/ethernet/moxa/moxart_ether.h | 330 + drivers/net/ethernet/myricom/Kconfig | 46 + drivers/net/ethernet/myricom/Makefile | 5 + drivers/net/ethernet/myricom/myri10ge/Makefile | 5 + drivers/net/ethernet/myricom/myri10ge/myri10ge.c | 4279 +++++ .../net/ethernet/myricom/myri10ge/myri10ge_mcp.h | 435 + .../myricom/myri10ge/myri10ge_mcp_gen_header.h | 60 + drivers/net/ethernet/natsemi/Kconfig | 67 + drivers/net/ethernet/natsemi/Makefile | 9 + drivers/net/ethernet/natsemi/jazzsonic.c | 293 + drivers/net/ethernet/natsemi/macsonic.c | 636 + drivers/net/ethernet/natsemi/natsemi.c | 3374 ++++ drivers/net/ethernet/natsemi/ns83820.c | 2307 +++ drivers/net/ethernet/natsemi/sonic.c | 741 + drivers/net/ethernet/natsemi/sonic.h | 450 + drivers/net/ethernet/natsemi/xtsonic.c | 321 + drivers/net/ethernet/neterion/Kconfig | 55 + drivers/net/ethernet/neterion/Makefile | 6 + drivers/net/ethernet/neterion/s2io-regs.h | 958 + drivers/net/ethernet/neterion/s2io.c | 8661 +++++++++ drivers/net/ethernet/neterion/s2io.h | 1147 ++ drivers/net/ethernet/neterion/vxge/Makefile | 7 + drivers/net/ethernet/neterion/vxge/vxge-config.c | 5114 ++++++ drivers/net/ethernet/neterion/vxge/vxge-config.h | 2111 +++ drivers/net/ethernet/neterion/vxge/vxge-ethtool.c | 1151 ++ drivers/net/ethernet/neterion/vxge/vxge-ethtool.h | 48 + drivers/net/ethernet/neterion/vxge/vxge-main.c | 4870 +++++ drivers/net/ethernet/neterion/vxge/vxge-main.h | 520 + drivers/net/ethernet/neterion/vxge/vxge-reg.h | 4636 +++++ drivers/net/ethernet/neterion/vxge/vxge-traffic.c | 2480 +++ drivers/net/ethernet/neterion/vxge/vxge-traffic.h | 2290 +++ drivers/net/ethernet/neterion/vxge/vxge-version.h | 49 + drivers/net/ethernet/netx-eth.c | 503 + drivers/net/ethernet/nuvoton/Kconfig | 30 + drivers/net/ethernet/nuvoton/Makefile | 5 + drivers/net/ethernet/nuvoton/w90p910_ether.c | 1093 ++ drivers/net/ethernet/nvidia/Kconfig | 32 + drivers/net/ethernet/nvidia/Makefile | 5 + drivers/net/ethernet/nvidia/forcedeth.c | 6385 +++++++ drivers/net/ethernet/nxp/Kconfig | 8 + drivers/net/ethernet/nxp/Makefile | 1 + drivers/net/ethernet/nxp/lpc_eth.c | 1609 ++ drivers/net/ethernet/octeon/Kconfig | 14 + drivers/net/ethernet/octeon/Makefile | 5 + drivers/net/ethernet/octeon/octeon_mgmt.c | 1597 ++ drivers/net/ethernet/oki-semi/Kconfig | 23 + drivers/net/ethernet/oki-semi/Makefile | 5 + drivers/net/ethernet/oki-semi/pch_gbe/Kconfig | 23 + drivers/net/ethernet/oki-semi/pch_gbe/Makefile | 4 + drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h | 689 + .../net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c | 262 + .../net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h | 35 + .../ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c | 512 + .../net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 2845 +++ .../net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c | 521 + .../net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c | 377 + .../net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h | 37 + drivers/net/ethernet/packetengines/Kconfig | 47 + drivers/net/ethernet/packetengines/Makefile | 6 + drivers/net/ethernet/packetengines/hamachi.c | 1943 ++ drivers/net/ethernet/packetengines/yellowfin.c | 1425 ++ drivers/net/ethernet/pasemi/Kconfig | 30 + drivers/net/ethernet/pasemi/Makefile | 6 + drivers/net/ethernet/pasemi/pasemi_mac.c | 1902 ++ drivers/net/ethernet/pasemi/pasemi_mac.h | 215 + drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c | 159 + drivers/net/ethernet/qlogic/Kconfig | 96 + drivers/net/ethernet/qlogic/Makefile | 8 + drivers/net/ethernet/qlogic/netxen/Makefile | 27 + drivers/net/ethernet/qlogic/netxen/netxen_nic.h | 1889 ++ .../net/ethernet/qlogic/netxen/netxen_nic_ctx.c | 941 + .../ethernet/qlogic/netxen/netxen_nic_ethtool.c | 959 + .../net/ethernet/qlogic/netxen/netxen_nic_hdr.h | 1079 ++ drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c | 2593 +++ drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h | 285 + .../net/ethernet/qlogic/netxen/netxen_nic_init.c | 1933 ++ .../net/ethernet/qlogic/netxen/netxen_nic_main.c | 3526 ++++ drivers/net/ethernet/qlogic/qla3xxx.c | 3949 ++++ drivers/net/ethernet/qlogic/qla3xxx.h | 1189 ++ drivers/net/ethernet/qlogic/qlcnic/Makefile | 15 + drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 2404 +++ .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 4165 +++++ .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h | 661 + .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | 2616 +++ .../net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c | 284 + drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c | 1428 ++ drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c | 1147 ++ drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h | 122 + .../net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | 1882 ++ drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h | 948 + drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c | 1710 ++ drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h | 225 + drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c | 1310 ++ drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | 2230 +++ drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 4341 +++++ .../net/ethernet/qlogic/qlcnic/qlcnic_minidump.c | 1414 ++ drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h | 276 + .../ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | 2214 +++ .../net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c | 2047 +++ drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | 1438 ++ drivers/net/ethernet/qlogic/qlge/Makefile | 7 + drivers/net/ethernet/qlogic/qlge/qlge.h | 2338 +++ drivers/net/ethernet/qlogic/qlge/qlge_dbg.c | 2042 +++ drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c | 735 + drivers/net/ethernet/qlogic/qlge/qlge_main.c | 5016 +++++ drivers/net/ethernet/qlogic/qlge/qlge_mpi.c | 1284 ++ drivers/net/ethernet/qualcomm/Kconfig | 29 + drivers/net/ethernet/qualcomm/Makefile | 6 + drivers/net/ethernet/qualcomm/qca_7k.c | 149 + drivers/net/ethernet/qualcomm/qca_7k.h | 72 + drivers/net/ethernet/qualcomm/qca_debug.c | 311 + drivers/net/ethernet/qualcomm/qca_debug.h | 34 + drivers/net/ethernet/qualcomm/qca_framing.c | 156 + drivers/net/ethernet/qualcomm/qca_framing.h | 134 + drivers/net/ethernet/qualcomm/qca_spi.c | 990 + drivers/net/ethernet/qualcomm/qca_spi.h | 114 + drivers/net/ethernet/rdc/Kconfig | 34 + drivers/net/ethernet/rdc/Makefile | 5 + drivers/net/ethernet/rdc/r6040.c | 1270 ++ drivers/net/ethernet/realtek/8139cp.c | 2120 +++ drivers/net/ethernet/realtek/8139too.c | 2696 +++ drivers/net/ethernet/realtek/Kconfig | 115 + drivers/net/ethernet/realtek/Makefile | 8 + drivers/net/ethernet/realtek/atp.c | 883 + drivers/net/ethernet/realtek/atp.h | 265 + drivers/net/ethernet/realtek/r8169.c | 8355 +++++++++ drivers/net/ethernet/renesas/Kconfig | 17 + drivers/net/ethernet/renesas/Makefile | 5 + drivers/net/ethernet/renesas/sh_eth.c | 3332 ++++ drivers/net/ethernet/renesas/sh_eth.h | 591 + drivers/net/ethernet/rocker/Kconfig | 27 + drivers/net/ethernet/rocker/Makefile | 5 + drivers/net/ethernet/rocker/rocker.c | 5062 +++++ drivers/net/ethernet/rocker/rocker.h | 465 + drivers/net/ethernet/samsung/Kconfig | 32 + drivers/net/ethernet/samsung/Makefile | 5 + drivers/net/ethernet/samsung/sxgbe/Makefile | 4 + drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h | 537 + drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c | 284 + drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c | 522 + drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h | 296 + drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c | 368 + drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h | 50 + drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c | 524 + drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c | 2345 +++ drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c | 254 + drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c | 254 + drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h | 104 + .../net/ethernet/samsung/sxgbe/sxgbe_platform.c | 255 + drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h | 491 + drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c | 91 + drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h | 38 + drivers/net/ethernet/seeq/Kconfig | 35 + drivers/net/ethernet/seeq/Makefile | 6 + drivers/net/ethernet/seeq/ether3.c | 899 + drivers/net/ethernet/seeq/ether3.h | 176 + drivers/net/ethernet/seeq/sgiseeq.c | 837 + drivers/net/ethernet/seeq/sgiseeq.h | 103 + drivers/net/ethernet/sfc/Kconfig | 38 + drivers/net/ethernet/sfc/Makefile | 8 + drivers/net/ethernet/sfc/bitfield.h | 542 + drivers/net/ethernet/sfc/ef10.c | 3713 ++++ drivers/net/ethernet/sfc/ef10_regs.h | 355 + drivers/net/ethernet/sfc/efx.c | 3349 ++++ drivers/net/ethernet/sfc/efx.h | 255 + drivers/net/ethernet/sfc/enum.h | 181 + drivers/net/ethernet/sfc/ethtool.c | 1196 ++ drivers/net/ethernet/sfc/falcon.c | 2892 +++ drivers/net/ethernet/sfc/falcon_boards.c | 764 + drivers/net/ethernet/sfc/farch.c | 2969 +++ drivers/net/ethernet/sfc/farch_regs.h | 2932 +++ drivers/net/ethernet/sfc/filter.h | 272 + drivers/net/ethernet/sfc/io.h | 302 + drivers/net/ethernet/sfc/mcdi.c | 1891 ++ drivers/net/ethernet/sfc/mcdi.h | 361 + drivers/net/ethernet/sfc/mcdi_mon.c | 534 + drivers/net/ethernet/sfc/mcdi_pcol.h | 7907 ++++++++ drivers/net/ethernet/sfc/mcdi_port.c | 1035 ++ drivers/net/ethernet/sfc/mdio_10g.c | 323 + drivers/net/ethernet/sfc/mdio_10g.h | 110 + drivers/net/ethernet/sfc/mtd.c | 133 + drivers/net/ethernet/sfc/net_driver.h | 1504 ++ drivers/net/ethernet/sfc/nic.c | 534 + drivers/net/ethernet/sfc/nic.h | 868 + drivers/net/ethernet/sfc/phy.h | 50 + drivers/net/ethernet/sfc/ptp.c | 1939 ++ drivers/net/ethernet/sfc/qt202x_phy.c | 495 + drivers/net/ethernet/sfc/rx.c | 997 + drivers/net/ethernet/sfc/selftest.c | 788 + drivers/net/ethernet/sfc/selftest.h | 55 + drivers/net/ethernet/sfc/siena.c | 1029 ++ drivers/net/ethernet/sfc/siena_sriov.c | 1668 ++ drivers/net/ethernet/sfc/tenxpress.c | 494 + drivers/net/ethernet/sfc/tx.c | 1332 ++ drivers/net/ethernet/sfc/txc43128_phy.c | 560 + drivers/net/ethernet/sfc/vfdi.h | 255 + drivers/net/ethernet/sfc/workarounds.h | 53 + drivers/net/ethernet/sgi/Kconfig | 35 + drivers/net/ethernet/sgi/Makefile | 6 + drivers/net/ethernet/sgi/ioc3-eth.c | 1672 ++ drivers/net/ethernet/sgi/meth.c | 881 + drivers/net/ethernet/sgi/meth.h | 243 + drivers/net/ethernet/silan/Kconfig | 33 + drivers/net/ethernet/silan/Makefile | 5 + drivers/net/ethernet/silan/sc92031.c | 1584 ++ drivers/net/ethernet/sis/Kconfig | 51 + drivers/net/ethernet/sis/Makefile | 6 + drivers/net/ethernet/sis/sis190.c | 1933 ++ drivers/net/ethernet/sis/sis900.c | 2517 +++ drivers/net/ethernet/sis/sis900.h | 329 + drivers/net/ethernet/smsc/Kconfig | 133 + drivers/net/ethernet/smsc/Makefile | 11 + drivers/net/ethernet/smsc/epic100.c | 1596 ++ drivers/net/ethernet/smsc/smc911x.c | 2179 +++ drivers/net/ethernet/smsc/smc911x.h | 923 + drivers/net/ethernet/smsc/smc9194.c | 1559 ++ drivers/net/ethernet/smsc/smc9194.h | 241 + drivers/net/ethernet/smsc/smc91c92_cs.c | 2052 +++ drivers/net/ethernet/smsc/smc91x.c | 2476 +++ drivers/net/ethernet/smsc/smc91x.h | 1079 ++ drivers/net/ethernet/smsc/smsc911x.c | 2681 +++ drivers/net/ethernet/smsc/smsc911x.h | 405 + drivers/net/ethernet/smsc/smsc9420.c | 1751 ++ drivers/net/ethernet/smsc/smsc9420.h | 275 + drivers/net/ethernet/stmicro/Kconfig | 23 + drivers/net/ethernet/stmicro/Makefile | 5 + drivers/net/ethernet/stmicro/stmmac/Kconfig | 40 + drivers/net/ethernet/stmicro/stmmac/Makefile | 12 + drivers/net/ethernet/stmicro/stmmac/chain_mode.c | 166 + drivers/net/ethernet/stmicro/stmmac/common.h | 480 + drivers/net/ethernet/stmicro/stmmac/descs.h | 219 + drivers/net/ethernet/stmicro/stmmac/descs_com.h | 134 + drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c | 69 + drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | 437 + .../net/ethernet/stmicro/stmmac/dwmac-socfpga.c | 265 + drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c | 366 + drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c | 142 + drivers/net/ethernet/stmicro/stmmac/dwmac100.h | 126 + drivers/net/ethernet/stmicro/stmmac/dwmac1000.h | 318 + .../net/ethernet/stmicro/stmmac/dwmac1000_core.c | 447 + .../net/ethernet/stmicro/stmmac/dwmac1000_dma.c | 221 + .../net/ethernet/stmicro/stmmac/dwmac100_core.c | 198 + drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c | 146 + drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h | 116 + drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c | 267 + drivers/net/ethernet/stmicro/stmmac/enh_desc.c | 402 + drivers/net/ethernet/stmicro/stmmac/mmc.h | 135 + drivers/net/ethernet/stmicro/stmmac/mmc_core.c | 267 + drivers/net/ethernet/stmicro/stmmac/norm_desc.c | 273 + drivers/net/ethernet/stmicro/stmmac/ring_mode.c | 142 + drivers/net/ethernet/stmicro/stmmac/stmmac.h | 144 + .../net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 778 + .../net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c | 148 + drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 3184 ++++ drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | 318 + drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | 294 + .../net/ethernet/stmicro/stmmac/stmmac_platform.c | 458 + .../net/ethernet/stmicro/stmmac/stmmac_platform.h | 29 + drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c | 211 + drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h | 72 + drivers/net/ethernet/sun/Kconfig | 88 + drivers/net/ethernet/sun/Makefile | 11 + drivers/net/ethernet/sun/cassini.c | 5291 ++++++ drivers/net/ethernet/sun/cassini.h | 2912 +++ drivers/net/ethernet/sun/niu.c | 10220 +++++++++++ drivers/net/ethernet/sun/niu.h | 3306 ++++ drivers/net/ethernet/sun/sunbmac.c | 1282 ++ drivers/net/ethernet/sun/sunbmac.h | 338 + drivers/net/ethernet/sun/sungem.c | 3028 +++ drivers/net/ethernet/sun/sungem.h | 1027 ++ drivers/net/ethernet/sun/sunhme.c | 3391 ++++ drivers/net/ethernet/sun/sunhme.h | 513 + drivers/net/ethernet/sun/sunqe.c | 995 + drivers/net/ethernet/sun/sunqe.h | 350 + drivers/net/ethernet/sun/sunvnet.c | 2060 +++ drivers/net/ethernet/sun/sunvnet.h | 114 + drivers/net/ethernet/tehuti/Kconfig | 27 + drivers/net/ethernet/tehuti/Makefile | 5 + drivers/net/ethernet/tehuti/tehuti.c | 2492 +++ drivers/net/ethernet/tehuti/tehuti.h | 561 + drivers/net/ethernet/ti/Kconfig | 133 + drivers/net/ethernet/ti/Makefile | 21 + drivers/net/ethernet/ti/cpmac.c | 1295 ++ drivers/net/ethernet/ti/cpsw-common.c | 55 + drivers/net/ethernet/ti/cpsw-phy-sel.c | 220 + drivers/net/ethernet/ti/cpsw.c | 2557 +++ drivers/net/ethernet/ti/cpsw.h | 47 + drivers/net/ethernet/ti/cpsw_ale.c | 831 + drivers/net/ethernet/ti/cpsw_ale.h | 114 + drivers/net/ethernet/ti/cpts.c | 408 + drivers/net/ethernet/ti/cpts.h | 145 + drivers/net/ethernet/ti/davinci_cpdma.c | 1038 ++ drivers/net/ethernet/ti/davinci_cpdma.h | 117 + drivers/net/ethernet/ti/davinci_emac.c | 2228 +++ drivers/net/ethernet/ti/davinci_mdio.c | 505 + drivers/net/ethernet/ti/netcp.h | 232 + drivers/net/ethernet/ti/netcp_core.c | 2156 +++ drivers/net/ethernet/ti/netcp_ethss.c | 3094 ++++ drivers/net/ethernet/ti/netcp_sgmii.c | 131 + drivers/net/ethernet/ti/netcp_xgbepcsr.c | 501 + drivers/net/ethernet/ti/tlan.c | 3293 ++++ drivers/net/ethernet/ti/tlan.h | 544 + drivers/net/ethernet/tile/Kconfig | 18 + drivers/net/ethernet/tile/Makefile | 10 + drivers/net/ethernet/tile/tilegx.c | 2281 +++ drivers/net/ethernet/tile/tilepro.c | 2423 +++ drivers/net/ethernet/toshiba/Kconfig | 57 + drivers/net/ethernet/toshiba/Makefile | 10 + drivers/net/ethernet/toshiba/ps3_gelic_net.c | 1894 ++ drivers/net/ethernet/toshiba/ps3_gelic_net.h | 384 + drivers/net/ethernet/toshiba/ps3_gelic_wireless.c | 2670 +++ drivers/net/ethernet/toshiba/ps3_gelic_wireless.h | 326 + drivers/net/ethernet/toshiba/spider_net.c | 2572 +++ drivers/net/ethernet/toshiba/spider_net.h | 489 + drivers/net/ethernet/toshiba/spider_net_ethtool.c | 181 + drivers/net/ethernet/toshiba/tc35815.c | 2208 +++ drivers/net/ethernet/tundra/Kconfig | 29 + drivers/net/ethernet/tundra/Makefile | 5 + drivers/net/ethernet/tundra/tsi108_eth.c | 1694 ++ drivers/net/ethernet/tundra/tsi108_eth.h | 354 + drivers/net/ethernet/via/Kconfig | 56 + drivers/net/ethernet/via/Makefile | 6 + drivers/net/ethernet/via/via-rhine.c | 2579 +++ drivers/net/ethernet/via/via-velocity.c | 3765 ++++ drivers/net/ethernet/via/via-velocity.h | 1581 ++ drivers/net/ethernet/wiznet/Kconfig | 74 + drivers/net/ethernet/wiznet/Makefile | 2 + drivers/net/ethernet/wiznet/w5100.c | 797 + drivers/net/ethernet/wiznet/w5300.c | 709 + drivers/net/ethernet/xilinx/Kconfig | 44 + drivers/net/ethernet/xilinx/Makefile | 9 + drivers/net/ethernet/xilinx/ll_temac.h | 385 + drivers/net/ethernet/xilinx/ll_temac_main.c | 1182 ++ drivers/net/ethernet/xilinx/ll_temac_mdio.c | 122 + drivers/net/ethernet/xilinx/xilinx_axienet.h | 504 + drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 1654 ++ drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c | 239 + drivers/net/ethernet/xilinx/xilinx_emaclite.c | 1258 ++ drivers/net/ethernet/xircom/Kconfig | 31 + drivers/net/ethernet/xircom/Makefile | 5 + drivers/net/ethernet/xircom/xirc2ps_cs.c | 1798 ++ drivers/net/ethernet/xscale/Kconfig | 31 + drivers/net/ethernet/xscale/Makefile | 5 + drivers/net/ethernet/xscale/ixp4xx_eth.c | 1537 ++ 1139 files changed, 1128523 insertions(+) create mode 100644 drivers/net/ethernet/3com/3c509.c create mode 100644 drivers/net/ethernet/3com/3c515.c create mode 100644 drivers/net/ethernet/3com/3c574_cs.c create mode 100644 drivers/net/ethernet/3com/3c589_cs.c create mode 100644 drivers/net/ethernet/3com/3c59x.c create mode 100644 drivers/net/ethernet/3com/Kconfig create mode 100644 drivers/net/ethernet/3com/Makefile create mode 100644 drivers/net/ethernet/3com/typhoon.c create mode 100644 drivers/net/ethernet/3com/typhoon.h create mode 100644 drivers/net/ethernet/8390/8390.c create mode 100644 drivers/net/ethernet/8390/8390.h create mode 100644 drivers/net/ethernet/8390/8390p.c create mode 100644 drivers/net/ethernet/8390/Kconfig create mode 100644 drivers/net/ethernet/8390/Makefile create mode 100644 drivers/net/ethernet/8390/apne.c create mode 100644 drivers/net/ethernet/8390/ax88796.c create mode 100644 drivers/net/ethernet/8390/axnet_cs.c create mode 100644 drivers/net/ethernet/8390/etherh.c create mode 100644 drivers/net/ethernet/8390/hydra.c create mode 100644 drivers/net/ethernet/8390/lib8390.c create mode 100644 drivers/net/ethernet/8390/mac8390.c create mode 100644 drivers/net/ethernet/8390/mcf8390.c create mode 100644 drivers/net/ethernet/8390/ne.c create mode 100644 drivers/net/ethernet/8390/ne2k-pci.c create mode 100644 drivers/net/ethernet/8390/pcnet_cs.c create mode 100644 drivers/net/ethernet/8390/smc-ultra.c create mode 100644 drivers/net/ethernet/8390/stnic.c create mode 100644 drivers/net/ethernet/8390/wd.c create mode 100644 drivers/net/ethernet/8390/zorro8390.c create mode 100644 drivers/net/ethernet/Kconfig create mode 100644 drivers/net/ethernet/Makefile create mode 100644 drivers/net/ethernet/adaptec/Kconfig create mode 100644 drivers/net/ethernet/adaptec/Makefile create mode 100644 drivers/net/ethernet/adaptec/starfire.c create mode 100644 drivers/net/ethernet/adi/Kconfig create mode 100644 drivers/net/ethernet/adi/Makefile create mode 100644 drivers/net/ethernet/adi/bfin_mac.c create mode 100644 drivers/net/ethernet/adi/bfin_mac.h create mode 100644 drivers/net/ethernet/aeroflex/Kconfig create mode 100644 drivers/net/ethernet/aeroflex/Makefile create mode 100644 drivers/net/ethernet/aeroflex/greth.c create mode 100644 drivers/net/ethernet/aeroflex/greth.h create mode 100644 drivers/net/ethernet/agere/Kconfig create mode 100644 drivers/net/ethernet/agere/Makefile create mode 100644 drivers/net/ethernet/agere/et131x.c create mode 100644 drivers/net/ethernet/agere/et131x.h create mode 100644 drivers/net/ethernet/allwinner/Kconfig create mode 100644 drivers/net/ethernet/allwinner/Makefile create mode 100644 drivers/net/ethernet/allwinner/sun4i-emac.c create mode 100644 drivers/net/ethernet/allwinner/sun4i-emac.h create mode 100644 drivers/net/ethernet/alteon/Kconfig create mode 100644 drivers/net/ethernet/alteon/Makefile create mode 100644 drivers/net/ethernet/alteon/acenic.c create mode 100644 drivers/net/ethernet/alteon/acenic.h create mode 100644 drivers/net/ethernet/altera/Kconfig create mode 100644 drivers/net/ethernet/altera/Makefile create mode 100644 drivers/net/ethernet/altera/altera_msgdma.c create mode 100644 drivers/net/ethernet/altera/altera_msgdma.h create mode 100644 drivers/net/ethernet/altera/altera_msgdmahw.h create mode 100644 drivers/net/ethernet/altera/altera_sgdma.c create mode 100644 drivers/net/ethernet/altera/altera_sgdma.h create mode 100644 drivers/net/ethernet/altera/altera_sgdmahw.h create mode 100644 drivers/net/ethernet/altera/altera_tse.h create mode 100644 drivers/net/ethernet/altera/altera_tse_ethtool.c create mode 100644 drivers/net/ethernet/altera/altera_tse_main.c create mode 100644 drivers/net/ethernet/altera/altera_utils.c create mode 100644 drivers/net/ethernet/altera/altera_utils.h create mode 100644 drivers/net/ethernet/amd/7990.c create mode 100644 drivers/net/ethernet/amd/7990.h create mode 100644 drivers/net/ethernet/amd/Kconfig create mode 100644 drivers/net/ethernet/amd/Makefile create mode 100644 drivers/net/ethernet/amd/a2065.c create mode 100644 drivers/net/ethernet/amd/a2065.h create mode 100644 drivers/net/ethernet/amd/am79c961a.c create mode 100644 drivers/net/ethernet/amd/am79c961a.h create mode 100644 drivers/net/ethernet/amd/amd8111e.c create mode 100644 drivers/net/ethernet/amd/amd8111e.h create mode 100644 drivers/net/ethernet/amd/ariadne.c create mode 100644 drivers/net/ethernet/amd/ariadne.h create mode 100644 drivers/net/ethernet/amd/atarilance.c create mode 100644 drivers/net/ethernet/amd/au1000_eth.c create mode 100644 drivers/net/ethernet/amd/au1000_eth.h create mode 100644 drivers/net/ethernet/amd/declance.c create mode 100644 drivers/net/ethernet/amd/hplance.c create mode 100644 drivers/net/ethernet/amd/hplance.h create mode 100644 drivers/net/ethernet/amd/lance.c create mode 100644 drivers/net/ethernet/amd/mvme147.c create mode 100644 drivers/net/ethernet/amd/ni65.c create mode 100644 drivers/net/ethernet/amd/ni65.h create mode 100644 drivers/net/ethernet/amd/nmclan_cs.c create mode 100644 drivers/net/ethernet/amd/pcnet32.c create mode 100644 drivers/net/ethernet/amd/sun3lance.c create mode 100644 drivers/net/ethernet/amd/sunlance.c create mode 100644 drivers/net/ethernet/amd/xgbe/Makefile create mode 100644 drivers/net/ethernet/amd/xgbe/xgbe-common.h create mode 100644 drivers/net/ethernet/amd/xgbe/xgbe-dcb.c create mode 100644 drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c create mode 100644 drivers/net/ethernet/amd/xgbe/xgbe-desc.c create mode 100644 drivers/net/ethernet/amd/xgbe/xgbe-dev.c create mode 100644 drivers/net/ethernet/amd/xgbe/xgbe-drv.c create mode 100644 drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c create mode 100644 drivers/net/ethernet/amd/xgbe/xgbe-main.c create mode 100644 drivers/net/ethernet/amd/xgbe/xgbe-mdio.c create mode 100644 drivers/net/ethernet/amd/xgbe/xgbe-ptp.c create mode 100644 drivers/net/ethernet/amd/xgbe/xgbe.h create mode 100644 drivers/net/ethernet/apm/Kconfig create mode 100644 drivers/net/ethernet/apm/Makefile create mode 100644 drivers/net/ethernet/apm/xgene/Kconfig create mode 100644 drivers/net/ethernet/apm/xgene/Makefile create mode 100644 drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c create mode 100644 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c create mode 100644 drivers/net/ethernet/apm/xgene/xgene_enet_hw.h create mode 100644 drivers/net/ethernet/apm/xgene/xgene_enet_main.c create mode 100644 drivers/net/ethernet/apm/xgene/xgene_enet_main.h create mode 100644 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c create mode 100644 drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h create mode 100644 drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c create mode 100644 drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h create mode 100644 drivers/net/ethernet/apple/Kconfig create mode 100644 drivers/net/ethernet/apple/Makefile create mode 100644 drivers/net/ethernet/apple/bmac.c create mode 100644 drivers/net/ethernet/apple/bmac.h create mode 100644 drivers/net/ethernet/apple/mace.c create mode 100644 drivers/net/ethernet/apple/mace.h create mode 100644 drivers/net/ethernet/apple/macmace.c create mode 100644 drivers/net/ethernet/arc/Kconfig create mode 100644 drivers/net/ethernet/arc/Makefile create mode 100644 drivers/net/ethernet/arc/emac.h create mode 100644 drivers/net/ethernet/arc/emac_arc.c create mode 100644 drivers/net/ethernet/arc/emac_main.c create mode 100644 drivers/net/ethernet/arc/emac_mdio.c create mode 100644 drivers/net/ethernet/arc/emac_rockchip.c create mode 100644 drivers/net/ethernet/atheros/Kconfig create mode 100644 drivers/net/ethernet/atheros/Makefile create mode 100644 drivers/net/ethernet/atheros/alx/Makefile create mode 100644 drivers/net/ethernet/atheros/alx/alx.h create mode 100644 drivers/net/ethernet/atheros/alx/ethtool.c create mode 100644 drivers/net/ethernet/atheros/alx/hw.c create mode 100644 drivers/net/ethernet/atheros/alx/hw.h create mode 100644 drivers/net/ethernet/atheros/alx/main.c create mode 100644 drivers/net/ethernet/atheros/alx/reg.h create mode 100644 drivers/net/ethernet/atheros/atl1c/Makefile create mode 100644 drivers/net/ethernet/atheros/atl1c/atl1c.h create mode 100644 drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c create mode 100644 drivers/net/ethernet/atheros/atl1c/atl1c_hw.c create mode 100644 drivers/net/ethernet/atheros/atl1c/atl1c_hw.h create mode 100644 drivers/net/ethernet/atheros/atl1c/atl1c_main.c create mode 100644 drivers/net/ethernet/atheros/atl1e/Makefile create mode 100644 drivers/net/ethernet/atheros/atl1e/atl1e.h create mode 100644 drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c create mode 100644 drivers/net/ethernet/atheros/atl1e/atl1e_hw.c create mode 100644 drivers/net/ethernet/atheros/atl1e/atl1e_hw.h create mode 100644 drivers/net/ethernet/atheros/atl1e/atl1e_main.c create mode 100644 drivers/net/ethernet/atheros/atl1e/atl1e_param.c create mode 100644 drivers/net/ethernet/atheros/atlx/Makefile create mode 100644 drivers/net/ethernet/atheros/atlx/atl1.c create mode 100644 drivers/net/ethernet/atheros/atlx/atl1.h create mode 100644 drivers/net/ethernet/atheros/atlx/atl2.c create mode 100644 drivers/net/ethernet/atheros/atlx/atl2.h create mode 100644 drivers/net/ethernet/atheros/atlx/atlx.c create mode 100644 drivers/net/ethernet/atheros/atlx/atlx.h create mode 100644 drivers/net/ethernet/broadcom/Kconfig create mode 100644 drivers/net/ethernet/broadcom/Makefile create mode 100644 drivers/net/ethernet/broadcom/b44.c create mode 100644 drivers/net/ethernet/broadcom/b44.h create mode 100644 drivers/net/ethernet/broadcom/bcm63xx_enet.c create mode 100644 drivers/net/ethernet/broadcom/bcm63xx_enet.h create mode 100644 drivers/net/ethernet/broadcom/bcmsysport.c create mode 100644 drivers/net/ethernet/broadcom/bcmsysport.h create mode 100644 drivers/net/ethernet/broadcom/bgmac.c create mode 100644 drivers/net/ethernet/broadcom/bgmac.h create mode 100644 drivers/net/ethernet/broadcom/bnx2.c create mode 100644 drivers/net/ethernet/broadcom/bnx2.h create mode 100644 drivers/net/ethernet/broadcom/bnx2_fw.h create mode 100644 drivers/net/ethernet/broadcom/bnx2x/Makefile create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x.h create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h create mode 100644 drivers/net/ethernet/broadcom/cnic.c create mode 100644 drivers/net/ethernet/broadcom/cnic.h create mode 100644 drivers/net/ethernet/broadcom/cnic_defs.h create mode 100644 drivers/net/ethernet/broadcom/cnic_if.h create mode 100644 drivers/net/ethernet/broadcom/genet/Makefile create mode 100644 drivers/net/ethernet/broadcom/genet/bcmgenet.c create mode 100644 drivers/net/ethernet/broadcom/genet/bcmgenet.h create mode 100644 drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c create mode 100644 drivers/net/ethernet/broadcom/genet/bcmmii.c create mode 100644 drivers/net/ethernet/broadcom/sb1250-mac.c create mode 100644 drivers/net/ethernet/broadcom/tg3.c create mode 100644 drivers/net/ethernet/broadcom/tg3.h create mode 100644 drivers/net/ethernet/brocade/Kconfig create mode 100644 drivers/net/ethernet/brocade/Makefile create mode 100644 drivers/net/ethernet/brocade/bna/Kconfig create mode 100644 drivers/net/ethernet/brocade/bna/Makefile create mode 100644 drivers/net/ethernet/brocade/bna/bfa_cee.c create mode 100644 drivers/net/ethernet/brocade/bna/bfa_cee.h create mode 100644 drivers/net/ethernet/brocade/bna/bfa_cs.h create mode 100644 drivers/net/ethernet/brocade/bna/bfa_defs.h create mode 100644 drivers/net/ethernet/brocade/bna/bfa_defs_cna.h create mode 100644 drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h create mode 100644 drivers/net/ethernet/brocade/bna/bfa_defs_status.h create mode 100644 drivers/net/ethernet/brocade/bna/bfa_ioc.c create mode 100644 drivers/net/ethernet/brocade/bna/bfa_ioc.h create mode 100644 drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c create mode 100644 drivers/net/ethernet/brocade/bna/bfa_msgq.c create mode 100644 drivers/net/ethernet/brocade/bna/bfa_msgq.h create mode 100644 drivers/net/ethernet/brocade/bna/bfi.h create mode 100644 drivers/net/ethernet/brocade/bna/bfi_cna.h create mode 100644 drivers/net/ethernet/brocade/bna/bfi_enet.h create mode 100644 drivers/net/ethernet/brocade/bna/bfi_reg.h create mode 100644 drivers/net/ethernet/brocade/bna/bna.h create mode 100644 drivers/net/ethernet/brocade/bna/bna_enet.c create mode 100644 drivers/net/ethernet/brocade/bna/bna_hw_defs.h create mode 100644 drivers/net/ethernet/brocade/bna/bna_tx_rx.c create mode 100644 drivers/net/ethernet/brocade/bna/bna_types.h create mode 100644 drivers/net/ethernet/brocade/bna/bnad.c create mode 100644 drivers/net/ethernet/brocade/bna/bnad.h create mode 100644 drivers/net/ethernet/brocade/bna/bnad_debugfs.c create mode 100644 drivers/net/ethernet/brocade/bna/bnad_ethtool.c create mode 100644 drivers/net/ethernet/brocade/bna/cna.h create mode 100644 drivers/net/ethernet/brocade/bna/cna_fwimg.c create mode 100644 drivers/net/ethernet/cadence/Kconfig create mode 100644 drivers/net/ethernet/cadence/Makefile create mode 100644 drivers/net/ethernet/cadence/macb.c create mode 100644 drivers/net/ethernet/cadence/macb.h create mode 100644 drivers/net/ethernet/calxeda/Kconfig create mode 100644 drivers/net/ethernet/calxeda/Makefile create mode 100644 drivers/net/ethernet/calxeda/xgmac.c create mode 100644 drivers/net/ethernet/chelsio/Kconfig create mode 100644 drivers/net/ethernet/chelsio/Makefile create mode 100644 drivers/net/ethernet/chelsio/cxgb/Makefile create mode 100644 drivers/net/ethernet/chelsio/cxgb/common.h create mode 100644 drivers/net/ethernet/chelsio/cxgb/cphy.h create mode 100644 drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h create mode 100644 drivers/net/ethernet/chelsio/cxgb/cxgb2.c create mode 100644 drivers/net/ethernet/chelsio/cxgb/elmer0.h create mode 100644 drivers/net/ethernet/chelsio/cxgb/espi.c create mode 100644 drivers/net/ethernet/chelsio/cxgb/espi.h create mode 100644 drivers/net/ethernet/chelsio/cxgb/fpga_defs.h create mode 100644 drivers/net/ethernet/chelsio/cxgb/gmac.h create mode 100644 drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c create mode 100644 drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h create mode 100644 drivers/net/ethernet/chelsio/cxgb/mv88x201x.c create mode 100644 drivers/net/ethernet/chelsio/cxgb/my3126.c create mode 100644 drivers/net/ethernet/chelsio/cxgb/pm3393.c create mode 100644 drivers/net/ethernet/chelsio/cxgb/regs.h create mode 100644 drivers/net/ethernet/chelsio/cxgb/sge.c create mode 100644 drivers/net/ethernet/chelsio/cxgb/sge.h create mode 100644 drivers/net/ethernet/chelsio/cxgb/subr.c create mode 100644 drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h create mode 100644 drivers/net/ethernet/chelsio/cxgb/tp.c create mode 100644 drivers/net/ethernet/chelsio/cxgb/tp.h create mode 100644 drivers/net/ethernet/chelsio/cxgb/vsc7326.c create mode 100644 drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h create mode 100644 drivers/net/ethernet/chelsio/cxgb3/Makefile create mode 100644 drivers/net/ethernet/chelsio/cxgb3/adapter.h create mode 100644 drivers/net/ethernet/chelsio/cxgb3/ael1002.c create mode 100644 drivers/net/ethernet/chelsio/cxgb3/aq100x.c create mode 100644 drivers/net/ethernet/chelsio/cxgb3/common.h create mode 100644 drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h create mode 100644 drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h create mode 100644 drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h create mode 100644 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c create mode 100644 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c create mode 100644 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h create mode 100644 drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h create mode 100644 drivers/net/ethernet/chelsio/cxgb3/l2t.c create mode 100644 drivers/net/ethernet/chelsio/cxgb3/l2t.h create mode 100644 drivers/net/ethernet/chelsio/cxgb3/mc5.c create mode 100644 drivers/net/ethernet/chelsio/cxgb3/regs.h create mode 100644 drivers/net/ethernet/chelsio/cxgb3/sge.c create mode 100644 drivers/net/ethernet/chelsio/cxgb3/sge_defs.h create mode 100644 drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h create mode 100644 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c create mode 100644 drivers/net/ethernet/chelsio/cxgb3/t3cdev.h create mode 100644 drivers/net/ethernet/chelsio/cxgb3/version.h create mode 100644 drivers/net/ethernet/chelsio/cxgb3/vsc8211.c create mode 100644 drivers/net/ethernet/chelsio/cxgb3/xgmac.c create mode 100644 drivers/net/ethernet/chelsio/cxgb4/Makefile create mode 100644 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c create mode 100644 drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h create mode 100644 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h create mode 100644 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c create mode 100644 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h create mode 100644 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c create mode 100644 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h create mode 100644 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c create mode 100644 drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c create mode 100644 drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.h create mode 100644 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c create mode 100644 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h create mode 100644 drivers/net/ethernet/chelsio/cxgb4/l2t.c create mode 100644 drivers/net/ethernet/chelsio/cxgb4/l2t.h create mode 100644 drivers/net/ethernet/chelsio/cxgb4/sge.c create mode 100644 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c create mode 100644 drivers/net/ethernet/chelsio/cxgb4/t4_hw.h create mode 100644 drivers/net/ethernet/chelsio/cxgb4/t4_msg.h create mode 100644 drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h create mode 100644 drivers/net/ethernet/chelsio/cxgb4/t4_regs.h create mode 100644 drivers/net/ethernet/chelsio/cxgb4/t4_values.h create mode 100644 drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h create mode 100644 drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h create mode 100644 drivers/net/ethernet/chelsio/cxgb4vf/Makefile create mode 100644 drivers/net/ethernet/chelsio/cxgb4vf/adapter.h create mode 100644 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c create mode 100644 drivers/net/ethernet/chelsio/cxgb4vf/sge.c create mode 100644 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h create mode 100644 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h create mode 100644 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c create mode 100644 drivers/net/ethernet/cirrus/Kconfig create mode 100644 drivers/net/ethernet/cirrus/Makefile create mode 100644 drivers/net/ethernet/cirrus/cs89x0.c create mode 100644 drivers/net/ethernet/cirrus/cs89x0.h create mode 100644 drivers/net/ethernet/cirrus/ep93xx_eth.c create mode 100644 drivers/net/ethernet/cirrus/mac89x0.c create mode 100644 drivers/net/ethernet/cisco/Kconfig create mode 100644 drivers/net/ethernet/cisco/Makefile create mode 100644 drivers/net/ethernet/cisco/enic/Kconfig create mode 100644 drivers/net/ethernet/cisco/enic/Makefile create mode 100644 drivers/net/ethernet/cisco/enic/cq_desc.h create mode 100644 drivers/net/ethernet/cisco/enic/cq_enet_desc.h create mode 100644 drivers/net/ethernet/cisco/enic/enic.h create mode 100644 drivers/net/ethernet/cisco/enic/enic_api.c create mode 100644 drivers/net/ethernet/cisco/enic/enic_api.h create mode 100644 drivers/net/ethernet/cisco/enic/enic_clsf.c create mode 100644 drivers/net/ethernet/cisco/enic/enic_clsf.h create mode 100644 drivers/net/ethernet/cisco/enic/enic_dev.c create mode 100644 drivers/net/ethernet/cisco/enic/enic_dev.h create mode 100644 drivers/net/ethernet/cisco/enic/enic_ethtool.c create mode 100644 drivers/net/ethernet/cisco/enic/enic_main.c create mode 100644 drivers/net/ethernet/cisco/enic/enic_pp.c create mode 100644 drivers/net/ethernet/cisco/enic/enic_pp.h create mode 100644 drivers/net/ethernet/cisco/enic/enic_res.c create mode 100644 drivers/net/ethernet/cisco/enic/enic_res.h create mode 100644 drivers/net/ethernet/cisco/enic/rq_enet_desc.h create mode 100644 drivers/net/ethernet/cisco/enic/vnic_cq.c create mode 100644 drivers/net/ethernet/cisco/enic/vnic_cq.h create mode 100644 drivers/net/ethernet/cisco/enic/vnic_dev.c create mode 100644 drivers/net/ethernet/cisco/enic/vnic_dev.h create mode 100644 drivers/net/ethernet/cisco/enic/vnic_devcmd.h create mode 100644 drivers/net/ethernet/cisco/enic/vnic_enet.h create mode 100644 drivers/net/ethernet/cisco/enic/vnic_intr.c create mode 100644 drivers/net/ethernet/cisco/enic/vnic_intr.h create mode 100644 drivers/net/ethernet/cisco/enic/vnic_nic.h create mode 100644 drivers/net/ethernet/cisco/enic/vnic_resource.h create mode 100644 drivers/net/ethernet/cisco/enic/vnic_rq.c create mode 100644 drivers/net/ethernet/cisco/enic/vnic_rq.h create mode 100644 drivers/net/ethernet/cisco/enic/vnic_rss.h create mode 100644 drivers/net/ethernet/cisco/enic/vnic_stats.h create mode 100644 drivers/net/ethernet/cisco/enic/vnic_vic.c create mode 100644 drivers/net/ethernet/cisco/enic/vnic_vic.h create mode 100644 drivers/net/ethernet/cisco/enic/vnic_wq.c create mode 100644 drivers/net/ethernet/cisco/enic/vnic_wq.h create mode 100644 drivers/net/ethernet/cisco/enic/wq_enet_desc.h create mode 100644 drivers/net/ethernet/davicom/Kconfig create mode 100644 drivers/net/ethernet/davicom/Makefile create mode 100644 drivers/net/ethernet/davicom/dm9000.c create mode 100644 drivers/net/ethernet/davicom/dm9000.h create mode 100644 drivers/net/ethernet/dec/Kconfig create mode 100644 drivers/net/ethernet/dec/Makefile create mode 100644 drivers/net/ethernet/dec/tulip/21142.c create mode 100644 drivers/net/ethernet/dec/tulip/Kconfig create mode 100644 drivers/net/ethernet/dec/tulip/Makefile create mode 100644 drivers/net/ethernet/dec/tulip/de2104x.c create mode 100644 drivers/net/ethernet/dec/tulip/de4x5.c create mode 100644 drivers/net/ethernet/dec/tulip/de4x5.h create mode 100644 drivers/net/ethernet/dec/tulip/dmfe.c create mode 100644 drivers/net/ethernet/dec/tulip/eeprom.c create mode 100644 drivers/net/ethernet/dec/tulip/interrupt.c create mode 100644 drivers/net/ethernet/dec/tulip/media.c create mode 100644 drivers/net/ethernet/dec/tulip/pnic.c create mode 100644 drivers/net/ethernet/dec/tulip/pnic2.c create mode 100644 drivers/net/ethernet/dec/tulip/timer.c create mode 100644 drivers/net/ethernet/dec/tulip/tulip.h create mode 100644 drivers/net/ethernet/dec/tulip/tulip_core.c create mode 100644 drivers/net/ethernet/dec/tulip/uli526x.c create mode 100644 drivers/net/ethernet/dec/tulip/winbond-840.c create mode 100644 drivers/net/ethernet/dec/tulip/xircom_cb.c create mode 100644 drivers/net/ethernet/dlink/Kconfig create mode 100644 drivers/net/ethernet/dlink/Makefile create mode 100644 drivers/net/ethernet/dlink/dl2k.c create mode 100644 drivers/net/ethernet/dlink/dl2k.h create mode 100644 drivers/net/ethernet/dlink/sundance.c create mode 100644 drivers/net/ethernet/dnet.c create mode 100644 drivers/net/ethernet/dnet.h create mode 100644 drivers/net/ethernet/ec_bhf.c create mode 100644 drivers/net/ethernet/emulex/Kconfig create mode 100644 drivers/net/ethernet/emulex/Makefile create mode 100644 drivers/net/ethernet/emulex/benet/Kconfig create mode 100644 drivers/net/ethernet/emulex/benet/Makefile create mode 100644 drivers/net/ethernet/emulex/benet/be.h create mode 100644 drivers/net/ethernet/emulex/benet/be_cmds.c create mode 100644 drivers/net/ethernet/emulex/benet/be_cmds.h create mode 100644 drivers/net/ethernet/emulex/benet/be_ethtool.c create mode 100644 drivers/net/ethernet/emulex/benet/be_hw.h create mode 100644 drivers/net/ethernet/emulex/benet/be_main.c create mode 100644 drivers/net/ethernet/emulex/benet/be_roce.c create mode 100644 drivers/net/ethernet/emulex/benet/be_roce.h create mode 100644 drivers/net/ethernet/ethoc.c create mode 100644 drivers/net/ethernet/faraday/Kconfig create mode 100644 drivers/net/ethernet/faraday/Makefile create mode 100644 drivers/net/ethernet/faraday/ftgmac100.c create mode 100644 drivers/net/ethernet/faraday/ftgmac100.h create mode 100644 drivers/net/ethernet/faraday/ftmac100.c create mode 100644 drivers/net/ethernet/faraday/ftmac100.h create mode 100644 drivers/net/ethernet/fealnx.c create mode 100644 drivers/net/ethernet/freescale/Kconfig create mode 100644 drivers/net/ethernet/freescale/Makefile create mode 100644 drivers/net/ethernet/freescale/fec.h create mode 100644 drivers/net/ethernet/freescale/fec_main.c create mode 100644 drivers/net/ethernet/freescale/fec_mpc52xx.c create mode 100644 drivers/net/ethernet/freescale/fec_mpc52xx.h create mode 100644 drivers/net/ethernet/freescale/fec_mpc52xx_phy.c create mode 100644 drivers/net/ethernet/freescale/fec_ptp.c create mode 100644 drivers/net/ethernet/freescale/fs_enet/Kconfig create mode 100644 drivers/net/ethernet/freescale/fs_enet/Makefile create mode 100644 drivers/net/ethernet/freescale/fs_enet/fec.h create mode 100644 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c create mode 100644 drivers/net/ethernet/freescale/fs_enet/fs_enet.h create mode 100644 drivers/net/ethernet/freescale/fs_enet/mac-fcc.c create mode 100644 drivers/net/ethernet/freescale/fs_enet/mac-fec.c create mode 100644 drivers/net/ethernet/freescale/fs_enet/mac-scc.c create mode 100644 drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c create mode 100644 drivers/net/ethernet/freescale/fs_enet/mii-fec.c create mode 100644 drivers/net/ethernet/freescale/fsl_pq_mdio.c create mode 100644 drivers/net/ethernet/freescale/gianfar.c create mode 100644 drivers/net/ethernet/freescale/gianfar.h create mode 100644 drivers/net/ethernet/freescale/gianfar_ethtool.c create mode 100644 drivers/net/ethernet/freescale/gianfar_ptp.c create mode 100644 drivers/net/ethernet/freescale/ucc_geth.c create mode 100644 drivers/net/ethernet/freescale/ucc_geth.h create mode 100644 drivers/net/ethernet/freescale/ucc_geth_ethtool.c create mode 100644 drivers/net/ethernet/freescale/xgmac_mdio.c create mode 100644 drivers/net/ethernet/fujitsu/Kconfig create mode 100644 drivers/net/ethernet/fujitsu/Makefile create mode 100644 drivers/net/ethernet/fujitsu/fmvj18x_cs.c create mode 100644 drivers/net/ethernet/hisilicon/Kconfig create mode 100644 drivers/net/ethernet/hisilicon/Makefile create mode 100644 drivers/net/ethernet/hisilicon/hip04_eth.c create mode 100644 drivers/net/ethernet/hisilicon/hip04_mdio.c create mode 100644 drivers/net/ethernet/hisilicon/hix5hd2_gmac.c create mode 100644 drivers/net/ethernet/hp/Kconfig create mode 100644 drivers/net/ethernet/hp/Makefile create mode 100644 drivers/net/ethernet/hp/hp100.c create mode 100644 drivers/net/ethernet/hp/hp100.h create mode 100644 drivers/net/ethernet/i825xx/82596.c create mode 100644 drivers/net/ethernet/i825xx/Kconfig create mode 100644 drivers/net/ethernet/i825xx/Makefile create mode 100644 drivers/net/ethernet/i825xx/ether1.c create mode 100644 drivers/net/ethernet/i825xx/ether1.h create mode 100644 drivers/net/ethernet/i825xx/lasi_82596.c create mode 100644 drivers/net/ethernet/i825xx/lib82596.c create mode 100644 drivers/net/ethernet/i825xx/sni_82596.c create mode 100644 drivers/net/ethernet/i825xx/sun3_82586.c create mode 100644 drivers/net/ethernet/i825xx/sun3_82586.h create mode 100644 drivers/net/ethernet/ibm/Kconfig create mode 100644 drivers/net/ethernet/ibm/Makefile create mode 100644 drivers/net/ethernet/ibm/ehea/Makefile create mode 100644 drivers/net/ethernet/ibm/ehea/ehea.h create mode 100644 drivers/net/ethernet/ibm/ehea/ehea_ethtool.c create mode 100644 drivers/net/ethernet/ibm/ehea/ehea_hw.h create mode 100644 drivers/net/ethernet/ibm/ehea/ehea_main.c create mode 100644 drivers/net/ethernet/ibm/ehea/ehea_phyp.c create mode 100644 drivers/net/ethernet/ibm/ehea/ehea_phyp.h create mode 100644 drivers/net/ethernet/ibm/ehea/ehea_qmr.c create mode 100644 drivers/net/ethernet/ibm/ehea/ehea_qmr.h create mode 100644 drivers/net/ethernet/ibm/emac/Kconfig create mode 100644 drivers/net/ethernet/ibm/emac/Makefile create mode 100644 drivers/net/ethernet/ibm/emac/core.c create mode 100644 drivers/net/ethernet/ibm/emac/core.h create mode 100644 drivers/net/ethernet/ibm/emac/debug.c create mode 100644 drivers/net/ethernet/ibm/emac/debug.h create mode 100644 drivers/net/ethernet/ibm/emac/emac.h create mode 100644 drivers/net/ethernet/ibm/emac/mal.c create mode 100644 drivers/net/ethernet/ibm/emac/mal.h create mode 100644 drivers/net/ethernet/ibm/emac/phy.c create mode 100644 drivers/net/ethernet/ibm/emac/phy.h create mode 100644 drivers/net/ethernet/ibm/emac/rgmii.c create mode 100644 drivers/net/ethernet/ibm/emac/rgmii.h create mode 100644 drivers/net/ethernet/ibm/emac/tah.c create mode 100644 drivers/net/ethernet/ibm/emac/tah.h create mode 100644 drivers/net/ethernet/ibm/emac/zmii.c create mode 100644 drivers/net/ethernet/ibm/emac/zmii.h create mode 100644 drivers/net/ethernet/ibm/ibmveth.c create mode 100644 drivers/net/ethernet/ibm/ibmveth.h create mode 100644 drivers/net/ethernet/icplus/Kconfig create mode 100644 drivers/net/ethernet/icplus/Makefile create mode 100644 drivers/net/ethernet/icplus/ipg.c create mode 100644 drivers/net/ethernet/icplus/ipg.h create mode 100644 drivers/net/ethernet/intel/Kconfig create mode 100644 drivers/net/ethernet/intel/Makefile create mode 100644 drivers/net/ethernet/intel/e100.c create mode 100644 drivers/net/ethernet/intel/e1000/Makefile create mode 100644 drivers/net/ethernet/intel/e1000/e1000.h create mode 100644 drivers/net/ethernet/intel/e1000/e1000_ethtool.c create mode 100644 drivers/net/ethernet/intel/e1000/e1000_hw.c create mode 100644 drivers/net/ethernet/intel/e1000/e1000_hw.h create mode 100644 drivers/net/ethernet/intel/e1000/e1000_main.c create mode 100644 drivers/net/ethernet/intel/e1000/e1000_osdep.h create mode 100644 drivers/net/ethernet/intel/e1000/e1000_param.c create mode 100644 drivers/net/ethernet/intel/e1000e/80003es2lan.c create mode 100644 drivers/net/ethernet/intel/e1000e/80003es2lan.h create mode 100644 drivers/net/ethernet/intel/e1000e/82571.c create mode 100644 drivers/net/ethernet/intel/e1000e/82571.h create mode 100644 drivers/net/ethernet/intel/e1000e/Makefile create mode 100644 drivers/net/ethernet/intel/e1000e/defines.h create mode 100644 drivers/net/ethernet/intel/e1000e/e1000.h create mode 100644 drivers/net/ethernet/intel/e1000e/ethtool.c create mode 100644 drivers/net/ethernet/intel/e1000e/hw.h create mode 100644 drivers/net/ethernet/intel/e1000e/ich8lan.c create mode 100644 drivers/net/ethernet/intel/e1000e/ich8lan.h create mode 100644 drivers/net/ethernet/intel/e1000e/mac.c create mode 100644 drivers/net/ethernet/intel/e1000e/mac.h create mode 100644 drivers/net/ethernet/intel/e1000e/manage.c create mode 100644 drivers/net/ethernet/intel/e1000e/manage.h create mode 100644 drivers/net/ethernet/intel/e1000e/netdev.c create mode 100644 drivers/net/ethernet/intel/e1000e/nvm.c create mode 100644 drivers/net/ethernet/intel/e1000e/nvm.h create mode 100644 drivers/net/ethernet/intel/e1000e/param.c create mode 100644 drivers/net/ethernet/intel/e1000e/phy.c create mode 100644 drivers/net/ethernet/intel/e1000e/phy.h create mode 100644 drivers/net/ethernet/intel/e1000e/ptp.c create mode 100644 drivers/net/ethernet/intel/e1000e/regs.h create mode 100644 drivers/net/ethernet/intel/fm10k/Makefile create mode 100644 drivers/net/ethernet/intel/fm10k/fm10k.h create mode 100644 drivers/net/ethernet/intel/fm10k/fm10k_common.c create mode 100644 drivers/net/ethernet/intel/fm10k/fm10k_common.h create mode 100644 drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c create mode 100644 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c create mode 100644 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c create mode 100644 drivers/net/ethernet/intel/fm10k/fm10k_iov.c create mode 100644 drivers/net/ethernet/intel/fm10k/fm10k_main.c create mode 100644 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c create mode 100644 drivers/net/ethernet/intel/fm10k/fm10k_mbx.h create mode 100644 drivers/net/ethernet/intel/fm10k/fm10k_netdev.c create mode 100644 drivers/net/ethernet/intel/fm10k/fm10k_pci.c create mode 100644 drivers/net/ethernet/intel/fm10k/fm10k_pf.c create mode 100644 drivers/net/ethernet/intel/fm10k/fm10k_pf.h create mode 100644 drivers/net/ethernet/intel/fm10k/fm10k_ptp.c create mode 100644 drivers/net/ethernet/intel/fm10k/fm10k_tlv.c create mode 100644 drivers/net/ethernet/intel/fm10k/fm10k_tlv.h create mode 100644 drivers/net/ethernet/intel/fm10k/fm10k_type.h create mode 100644 drivers/net/ethernet/intel/fm10k/fm10k_vf.c create mode 100644 drivers/net/ethernet/intel/fm10k/fm10k_vf.h create mode 100644 drivers/net/ethernet/intel/i40e/Makefile create mode 100644 drivers/net/ethernet/intel/i40e/i40e.h create mode 100644 drivers/net/ethernet/intel/i40e/i40e_adminq.c create mode 100644 drivers/net/ethernet/intel/i40e/i40e_adminq.h create mode 100644 drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h create mode 100644 drivers/net/ethernet/intel/i40e/i40e_alloc.h create mode 100644 drivers/net/ethernet/intel/i40e/i40e_common.c create mode 100644 drivers/net/ethernet/intel/i40e/i40e_dcb.c create mode 100644 drivers/net/ethernet/intel/i40e/i40e_dcb.h create mode 100644 drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c create mode 100644 drivers/net/ethernet/intel/i40e/i40e_debugfs.c create mode 100644 drivers/net/ethernet/intel/i40e/i40e_diag.c create mode 100644 drivers/net/ethernet/intel/i40e/i40e_diag.h create mode 100644 drivers/net/ethernet/intel/i40e/i40e_ethtool.c create mode 100644 drivers/net/ethernet/intel/i40e/i40e_fcoe.c create mode 100644 drivers/net/ethernet/intel/i40e/i40e_fcoe.h create mode 100644 drivers/net/ethernet/intel/i40e/i40e_hmc.c create mode 100644 drivers/net/ethernet/intel/i40e/i40e_hmc.h create mode 100644 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c create mode 100644 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h create mode 100644 drivers/net/ethernet/intel/i40e/i40e_main.c create mode 100644 drivers/net/ethernet/intel/i40e/i40e_nvm.c create mode 100644 drivers/net/ethernet/intel/i40e/i40e_osdep.h create mode 100644 drivers/net/ethernet/intel/i40e/i40e_prototype.h create mode 100644 drivers/net/ethernet/intel/i40e/i40e_ptp.c create mode 100644 drivers/net/ethernet/intel/i40e/i40e_register.h create mode 100644 drivers/net/ethernet/intel/i40e/i40e_status.h create mode 100644 drivers/net/ethernet/intel/i40e/i40e_txrx.c create mode 100644 drivers/net/ethernet/intel/i40e/i40e_txrx.h create mode 100644 drivers/net/ethernet/intel/i40e/i40e_type.h create mode 100644 drivers/net/ethernet/intel/i40e/i40e_virtchnl.h create mode 100644 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c create mode 100644 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h create mode 100644 drivers/net/ethernet/intel/i40evf/Makefile create mode 100644 drivers/net/ethernet/intel/i40evf/i40e_adminq.c create mode 100644 drivers/net/ethernet/intel/i40evf/i40e_adminq.h create mode 100644 drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h create mode 100644 drivers/net/ethernet/intel/i40evf/i40e_alloc.h create mode 100644 drivers/net/ethernet/intel/i40evf/i40e_common.c create mode 100644 drivers/net/ethernet/intel/i40evf/i40e_hmc.h create mode 100644 drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h create mode 100644 drivers/net/ethernet/intel/i40evf/i40e_osdep.h create mode 100644 drivers/net/ethernet/intel/i40evf/i40e_prototype.h create mode 100644 drivers/net/ethernet/intel/i40evf/i40e_register.h create mode 100644 drivers/net/ethernet/intel/i40evf/i40e_status.h create mode 100644 drivers/net/ethernet/intel/i40evf/i40e_txrx.c create mode 100644 drivers/net/ethernet/intel/i40evf/i40e_txrx.h create mode 100644 drivers/net/ethernet/intel/i40evf/i40e_type.h create mode 100644 drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h create mode 100644 drivers/net/ethernet/intel/i40evf/i40evf.h create mode 100644 drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c create mode 100644 drivers/net/ethernet/intel/i40evf/i40evf_main.c create mode 100644 drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c create mode 100644 drivers/net/ethernet/intel/igb/Makefile create mode 100644 drivers/net/ethernet/intel/igb/e1000_82575.c create mode 100644 drivers/net/ethernet/intel/igb/e1000_82575.h create mode 100644 drivers/net/ethernet/intel/igb/e1000_defines.h create mode 100644 drivers/net/ethernet/intel/igb/e1000_hw.h create mode 100644 drivers/net/ethernet/intel/igb/e1000_i210.c create mode 100644 drivers/net/ethernet/intel/igb/e1000_i210.h create mode 100644 drivers/net/ethernet/intel/igb/e1000_mac.c create mode 100644 drivers/net/ethernet/intel/igb/e1000_mac.h create mode 100644 drivers/net/ethernet/intel/igb/e1000_mbx.c create mode 100644 drivers/net/ethernet/intel/igb/e1000_mbx.h create mode 100644 drivers/net/ethernet/intel/igb/e1000_nvm.c create mode 100644 drivers/net/ethernet/intel/igb/e1000_nvm.h create mode 100644 drivers/net/ethernet/intel/igb/e1000_phy.c create mode 100644 drivers/net/ethernet/intel/igb/e1000_phy.h create mode 100644 drivers/net/ethernet/intel/igb/e1000_regs.h create mode 100644 drivers/net/ethernet/intel/igb/igb.h create mode 100644 drivers/net/ethernet/intel/igb/igb_ethtool.c create mode 100644 drivers/net/ethernet/intel/igb/igb_hwmon.c create mode 100644 drivers/net/ethernet/intel/igb/igb_main.c create mode 100644 drivers/net/ethernet/intel/igb/igb_ptp.c create mode 100644 drivers/net/ethernet/intel/igbvf/Makefile create mode 100644 drivers/net/ethernet/intel/igbvf/defines.h create mode 100644 drivers/net/ethernet/intel/igbvf/ethtool.c create mode 100644 drivers/net/ethernet/intel/igbvf/igbvf.h create mode 100644 drivers/net/ethernet/intel/igbvf/mbx.c create mode 100644 drivers/net/ethernet/intel/igbvf/mbx.h create mode 100644 drivers/net/ethernet/intel/igbvf/netdev.c create mode 100644 drivers/net/ethernet/intel/igbvf/regs.h create mode 100644 drivers/net/ethernet/intel/igbvf/vf.c create mode 100644 drivers/net/ethernet/intel/igbvf/vf.h create mode 100644 drivers/net/ethernet/intel/ixgb/Makefile create mode 100644 drivers/net/ethernet/intel/ixgb/ixgb.h create mode 100644 drivers/net/ethernet/intel/ixgb/ixgb_ee.c create mode 100644 drivers/net/ethernet/intel/ixgb/ixgb_ee.h create mode 100644 drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c create mode 100644 drivers/net/ethernet/intel/ixgb/ixgb_hw.c create mode 100644 drivers/net/ethernet/intel/ixgb/ixgb_hw.h create mode 100644 drivers/net/ethernet/intel/ixgb/ixgb_ids.h create mode 100644 drivers/net/ethernet/intel/ixgb/ixgb_main.c create mode 100644 drivers/net/ethernet/intel/ixgb/ixgb_osdep.h create mode 100644 drivers/net/ethernet/intel/ixgb/ixgb_param.c create mode 100644 drivers/net/ethernet/intel/ixgbe/Makefile create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe.h create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_common.c create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_common.h create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_type.h create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c create mode 100644 drivers/net/ethernet/intel/ixgbevf/Makefile create mode 100644 drivers/net/ethernet/intel/ixgbevf/defines.h create mode 100644 drivers/net/ethernet/intel/ixgbevf/ethtool.c create mode 100644 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h create mode 100644 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c create mode 100644 drivers/net/ethernet/intel/ixgbevf/mbx.c create mode 100644 drivers/net/ethernet/intel/ixgbevf/mbx.h create mode 100644 drivers/net/ethernet/intel/ixgbevf/regs.h create mode 100644 drivers/net/ethernet/intel/ixgbevf/vf.c create mode 100644 drivers/net/ethernet/intel/ixgbevf/vf.h create mode 100644 drivers/net/ethernet/jme.c create mode 100644 drivers/net/ethernet/jme.h create mode 100644 drivers/net/ethernet/korina.c create mode 100644 drivers/net/ethernet/lantiq_etop.c create mode 100644 drivers/net/ethernet/marvell/Kconfig create mode 100644 drivers/net/ethernet/marvell/Makefile create mode 100644 drivers/net/ethernet/marvell/mv643xx_eth.c create mode 100644 drivers/net/ethernet/marvell/mvmdio.c create mode 100644 drivers/net/ethernet/marvell/mvneta.c create mode 100644 drivers/net/ethernet/marvell/mvpp2.c create mode 100644 drivers/net/ethernet/marvell/pxa168_eth.c create mode 100644 drivers/net/ethernet/marvell/skge.c create mode 100644 drivers/net/ethernet/marvell/skge.h create mode 100644 drivers/net/ethernet/marvell/sky2.c create mode 100644 drivers/net/ethernet/marvell/sky2.h create mode 100644 drivers/net/ethernet/mellanox/Kconfig create mode 100644 drivers/net/ethernet/mellanox/Makefile create mode 100644 drivers/net/ethernet/mellanox/mlx4/Kconfig create mode 100644 drivers/net/ethernet/mellanox/mlx4/Makefile create mode 100644 drivers/net/ethernet/mellanox/mlx4/alloc.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/catas.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/cmd.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/cq.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/en_clock.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/en_cq.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/en_main.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/en_netdev.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/en_port.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/en_port.h create mode 100644 drivers/net/ethernet/mellanox/mlx4/en_resources.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/en_rx.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/en_selftest.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/en_tx.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/eq.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/fw.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/fw.h create mode 100644 drivers/net/ethernet/mellanox/mlx4/fw_qos.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/fw_qos.h create mode 100644 drivers/net/ethernet/mellanox/mlx4/icm.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/icm.h create mode 100644 drivers/net/ethernet/mellanox/mlx4/intf.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/main.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/mcg.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/mlx4.h create mode 100644 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h create mode 100644 drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h create mode 100644 drivers/net/ethernet/mellanox/mlx4/mr.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/pd.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/port.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/profile.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/qp.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/reset.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/sense.c create mode 100644 drivers/net/ethernet/mellanox/mlx4/srq.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/Kconfig create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/Makefile create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/alloc.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/cmd.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/cq.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/debugfs.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/eq.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fw.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/health.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/mad.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/main.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/mcg.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/mr.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/pd.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/port.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/qp.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/srq.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/uar.c create mode 100644 drivers/net/ethernet/micrel/Kconfig create mode 100644 drivers/net/ethernet/micrel/Makefile create mode 100644 drivers/net/ethernet/micrel/ks8695net.c create mode 100644 drivers/net/ethernet/micrel/ks8695net.h create mode 100644 drivers/net/ethernet/micrel/ks8842.c create mode 100644 drivers/net/ethernet/micrel/ks8851.c create mode 100644 drivers/net/ethernet/micrel/ks8851.h create mode 100644 drivers/net/ethernet/micrel/ks8851_mll.c create mode 100644 drivers/net/ethernet/micrel/ksz884x.c create mode 100644 drivers/net/ethernet/microchip/Kconfig create mode 100644 drivers/net/ethernet/microchip/Makefile create mode 100644 drivers/net/ethernet/microchip/enc28j60.c create mode 100644 drivers/net/ethernet/microchip/enc28j60_hw.h create mode 100644 drivers/net/ethernet/moxa/Kconfig create mode 100644 drivers/net/ethernet/moxa/Makefile create mode 100644 drivers/net/ethernet/moxa/moxart_ether.c create mode 100644 drivers/net/ethernet/moxa/moxart_ether.h create mode 100644 drivers/net/ethernet/myricom/Kconfig create mode 100644 drivers/net/ethernet/myricom/Makefile create mode 100644 drivers/net/ethernet/myricom/myri10ge/Makefile create mode 100644 drivers/net/ethernet/myricom/myri10ge/myri10ge.c create mode 100644 drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h create mode 100644 drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h create mode 100644 drivers/net/ethernet/natsemi/Kconfig create mode 100644 drivers/net/ethernet/natsemi/Makefile create mode 100644 drivers/net/ethernet/natsemi/jazzsonic.c create mode 100644 drivers/net/ethernet/natsemi/macsonic.c create mode 100644 drivers/net/ethernet/natsemi/natsemi.c create mode 100644 drivers/net/ethernet/natsemi/ns83820.c create mode 100644 drivers/net/ethernet/natsemi/sonic.c create mode 100644 drivers/net/ethernet/natsemi/sonic.h create mode 100644 drivers/net/ethernet/natsemi/xtsonic.c create mode 100644 drivers/net/ethernet/neterion/Kconfig create mode 100644 drivers/net/ethernet/neterion/Makefile create mode 100644 drivers/net/ethernet/neterion/s2io-regs.h create mode 100644 drivers/net/ethernet/neterion/s2io.c create mode 100644 drivers/net/ethernet/neterion/s2io.h create mode 100644 drivers/net/ethernet/neterion/vxge/Makefile create mode 100644 drivers/net/ethernet/neterion/vxge/vxge-config.c create mode 100644 drivers/net/ethernet/neterion/vxge/vxge-config.h create mode 100644 drivers/net/ethernet/neterion/vxge/vxge-ethtool.c create mode 100644 drivers/net/ethernet/neterion/vxge/vxge-ethtool.h create mode 100644 drivers/net/ethernet/neterion/vxge/vxge-main.c create mode 100644 drivers/net/ethernet/neterion/vxge/vxge-main.h create mode 100644 drivers/net/ethernet/neterion/vxge/vxge-reg.h create mode 100644 drivers/net/ethernet/neterion/vxge/vxge-traffic.c create mode 100644 drivers/net/ethernet/neterion/vxge/vxge-traffic.h create mode 100644 drivers/net/ethernet/neterion/vxge/vxge-version.h create mode 100644 drivers/net/ethernet/netx-eth.c create mode 100644 drivers/net/ethernet/nuvoton/Kconfig create mode 100644 drivers/net/ethernet/nuvoton/Makefile create mode 100644 drivers/net/ethernet/nuvoton/w90p910_ether.c create mode 100644 drivers/net/ethernet/nvidia/Kconfig create mode 100644 drivers/net/ethernet/nvidia/Makefile create mode 100644 drivers/net/ethernet/nvidia/forcedeth.c create mode 100644 drivers/net/ethernet/nxp/Kconfig create mode 100644 drivers/net/ethernet/nxp/Makefile create mode 100644 drivers/net/ethernet/nxp/lpc_eth.c create mode 100644 drivers/net/ethernet/octeon/Kconfig create mode 100644 drivers/net/ethernet/octeon/Makefile create mode 100644 drivers/net/ethernet/octeon/octeon_mgmt.c create mode 100644 drivers/net/ethernet/oki-semi/Kconfig create mode 100644 drivers/net/ethernet/oki-semi/Makefile create mode 100644 drivers/net/ethernet/oki-semi/pch_gbe/Kconfig create mode 100644 drivers/net/ethernet/oki-semi/pch_gbe/Makefile create mode 100644 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h create mode 100644 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c create mode 100644 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h create mode 100644 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c create mode 100644 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c create mode 100644 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c create mode 100644 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c create mode 100644 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h create mode 100644 drivers/net/ethernet/packetengines/Kconfig create mode 100644 drivers/net/ethernet/packetengines/Makefile create mode 100644 drivers/net/ethernet/packetengines/hamachi.c create mode 100644 drivers/net/ethernet/packetengines/yellowfin.c create mode 100644 drivers/net/ethernet/pasemi/Kconfig create mode 100644 drivers/net/ethernet/pasemi/Makefile create mode 100644 drivers/net/ethernet/pasemi/pasemi_mac.c create mode 100644 drivers/net/ethernet/pasemi/pasemi_mac.h create mode 100644 drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c create mode 100644 drivers/net/ethernet/qlogic/Kconfig create mode 100644 drivers/net/ethernet/qlogic/Makefile create mode 100644 drivers/net/ethernet/qlogic/netxen/Makefile create mode 100644 drivers/net/ethernet/qlogic/netxen/netxen_nic.h create mode 100644 drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c create mode 100644 drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c create mode 100644 drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h create mode 100644 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c create mode 100644 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h create mode 100644 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c create mode 100644 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c create mode 100644 drivers/net/ethernet/qlogic/qla3xxx.c create mode 100644 drivers/net/ethernet/qlogic/qla3xxx.h create mode 100644 drivers/net/ethernet/qlogic/qlcnic/Makefile create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic.h create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c create mode 100644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c create mode 100644 drivers/net/ethernet/qlogic/qlge/Makefile create mode 100644 drivers/net/ethernet/qlogic/qlge/qlge.h create mode 100644 drivers/net/ethernet/qlogic/qlge/qlge_dbg.c create mode 100644 drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c create mode 100644 drivers/net/ethernet/qlogic/qlge/qlge_main.c create mode 100644 drivers/net/ethernet/qlogic/qlge/qlge_mpi.c create mode 100644 drivers/net/ethernet/qualcomm/Kconfig create mode 100644 drivers/net/ethernet/qualcomm/Makefile create mode 100644 drivers/net/ethernet/qualcomm/qca_7k.c create mode 100644 drivers/net/ethernet/qualcomm/qca_7k.h create mode 100644 drivers/net/ethernet/qualcomm/qca_debug.c create mode 100644 drivers/net/ethernet/qualcomm/qca_debug.h create mode 100644 drivers/net/ethernet/qualcomm/qca_framing.c create mode 100644 drivers/net/ethernet/qualcomm/qca_framing.h create mode 100644 drivers/net/ethernet/qualcomm/qca_spi.c create mode 100644 drivers/net/ethernet/qualcomm/qca_spi.h create mode 100644 drivers/net/ethernet/rdc/Kconfig create mode 100644 drivers/net/ethernet/rdc/Makefile create mode 100644 drivers/net/ethernet/rdc/r6040.c create mode 100644 drivers/net/ethernet/realtek/8139cp.c create mode 100644 drivers/net/ethernet/realtek/8139too.c create mode 100644 drivers/net/ethernet/realtek/Kconfig create mode 100644 drivers/net/ethernet/realtek/Makefile create mode 100644 drivers/net/ethernet/realtek/atp.c create mode 100644 drivers/net/ethernet/realtek/atp.h create mode 100644 drivers/net/ethernet/realtek/r8169.c create mode 100644 drivers/net/ethernet/renesas/Kconfig create mode 100644 drivers/net/ethernet/renesas/Makefile create mode 100644 drivers/net/ethernet/renesas/sh_eth.c create mode 100644 drivers/net/ethernet/renesas/sh_eth.h create mode 100644 drivers/net/ethernet/rocker/Kconfig create mode 100644 drivers/net/ethernet/rocker/Makefile create mode 100644 drivers/net/ethernet/rocker/rocker.c create mode 100644 drivers/net/ethernet/rocker/rocker.h create mode 100644 drivers/net/ethernet/samsung/Kconfig create mode 100644 drivers/net/ethernet/samsung/Makefile create mode 100644 drivers/net/ethernet/samsung/sxgbe/Makefile create mode 100644 drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h create mode 100644 drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c create mode 100644 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c create mode 100644 drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h create mode 100644 drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c create mode 100644 drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h create mode 100644 drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c create mode 100644 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c create mode 100644 drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c create mode 100644 drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c create mode 100644 drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h create mode 100644 drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c create mode 100644 drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h create mode 100644 drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c create mode 100644 drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h create mode 100644 drivers/net/ethernet/seeq/Kconfig create mode 100644 drivers/net/ethernet/seeq/Makefile create mode 100644 drivers/net/ethernet/seeq/ether3.c create mode 100644 drivers/net/ethernet/seeq/ether3.h create mode 100644 drivers/net/ethernet/seeq/sgiseeq.c create mode 100644 drivers/net/ethernet/seeq/sgiseeq.h create mode 100644 drivers/net/ethernet/sfc/Kconfig create mode 100644 drivers/net/ethernet/sfc/Makefile create mode 100644 drivers/net/ethernet/sfc/bitfield.h create mode 100644 drivers/net/ethernet/sfc/ef10.c create mode 100644 drivers/net/ethernet/sfc/ef10_regs.h create mode 100644 drivers/net/ethernet/sfc/efx.c create mode 100644 drivers/net/ethernet/sfc/efx.h create mode 100644 drivers/net/ethernet/sfc/enum.h create mode 100644 drivers/net/ethernet/sfc/ethtool.c create mode 100644 drivers/net/ethernet/sfc/falcon.c create mode 100644 drivers/net/ethernet/sfc/falcon_boards.c create mode 100644 drivers/net/ethernet/sfc/farch.c create mode 100644 drivers/net/ethernet/sfc/farch_regs.h create mode 100644 drivers/net/ethernet/sfc/filter.h create mode 100644 drivers/net/ethernet/sfc/io.h create mode 100644 drivers/net/ethernet/sfc/mcdi.c create mode 100644 drivers/net/ethernet/sfc/mcdi.h create mode 100644 drivers/net/ethernet/sfc/mcdi_mon.c create mode 100644 drivers/net/ethernet/sfc/mcdi_pcol.h create mode 100644 drivers/net/ethernet/sfc/mcdi_port.c create mode 100644 drivers/net/ethernet/sfc/mdio_10g.c create mode 100644 drivers/net/ethernet/sfc/mdio_10g.h create mode 100644 drivers/net/ethernet/sfc/mtd.c create mode 100644 drivers/net/ethernet/sfc/net_driver.h create mode 100644 drivers/net/ethernet/sfc/nic.c create mode 100644 drivers/net/ethernet/sfc/nic.h create mode 100644 drivers/net/ethernet/sfc/phy.h create mode 100644 drivers/net/ethernet/sfc/ptp.c create mode 100644 drivers/net/ethernet/sfc/qt202x_phy.c create mode 100644 drivers/net/ethernet/sfc/rx.c create mode 100644 drivers/net/ethernet/sfc/selftest.c create mode 100644 drivers/net/ethernet/sfc/selftest.h create mode 100644 drivers/net/ethernet/sfc/siena.c create mode 100644 drivers/net/ethernet/sfc/siena_sriov.c create mode 100644 drivers/net/ethernet/sfc/tenxpress.c create mode 100644 drivers/net/ethernet/sfc/tx.c create mode 100644 drivers/net/ethernet/sfc/txc43128_phy.c create mode 100644 drivers/net/ethernet/sfc/vfdi.h create mode 100644 drivers/net/ethernet/sfc/workarounds.h create mode 100644 drivers/net/ethernet/sgi/Kconfig create mode 100644 drivers/net/ethernet/sgi/Makefile create mode 100644 drivers/net/ethernet/sgi/ioc3-eth.c create mode 100644 drivers/net/ethernet/sgi/meth.c create mode 100644 drivers/net/ethernet/sgi/meth.h create mode 100644 drivers/net/ethernet/silan/Kconfig create mode 100644 drivers/net/ethernet/silan/Makefile create mode 100644 drivers/net/ethernet/silan/sc92031.c create mode 100644 drivers/net/ethernet/sis/Kconfig create mode 100644 drivers/net/ethernet/sis/Makefile create mode 100644 drivers/net/ethernet/sis/sis190.c create mode 100644 drivers/net/ethernet/sis/sis900.c create mode 100644 drivers/net/ethernet/sis/sis900.h create mode 100644 drivers/net/ethernet/smsc/Kconfig create mode 100644 drivers/net/ethernet/smsc/Makefile create mode 100644 drivers/net/ethernet/smsc/epic100.c create mode 100644 drivers/net/ethernet/smsc/smc911x.c create mode 100644 drivers/net/ethernet/smsc/smc911x.h create mode 100644 drivers/net/ethernet/smsc/smc9194.c create mode 100644 drivers/net/ethernet/smsc/smc9194.h create mode 100644 drivers/net/ethernet/smsc/smc91c92_cs.c create mode 100644 drivers/net/ethernet/smsc/smc91x.c create mode 100644 drivers/net/ethernet/smsc/smc91x.h create mode 100644 drivers/net/ethernet/smsc/smsc911x.c create mode 100644 drivers/net/ethernet/smsc/smsc911x.h create mode 100644 drivers/net/ethernet/smsc/smsc9420.c create mode 100644 drivers/net/ethernet/smsc/smsc9420.h create mode 100644 drivers/net/ethernet/stmicro/Kconfig create mode 100644 drivers/net/ethernet/stmicro/Makefile create mode 100644 drivers/net/ethernet/stmicro/stmmac/Kconfig create mode 100644 drivers/net/ethernet/stmicro/stmmac/Makefile create mode 100644 drivers/net/ethernet/stmicro/stmmac/chain_mode.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/common.h create mode 100644 drivers/net/ethernet/stmicro/stmmac/descs.h create mode 100644 drivers/net/ethernet/stmicro/stmmac/descs_com.h create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac100.h create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac1000.h create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/enh_desc.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/mmc.h create mode 100644 drivers/net/ethernet/stmicro/stmmac/mmc_core.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/norm_desc.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/ring_mode.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/stmmac.h create mode 100644 drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h create mode 100644 drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c create mode 100644 drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h create mode 100644 drivers/net/ethernet/sun/Kconfig create mode 100644 drivers/net/ethernet/sun/Makefile create mode 100644 drivers/net/ethernet/sun/cassini.c create mode 100644 drivers/net/ethernet/sun/cassini.h create mode 100644 drivers/net/ethernet/sun/niu.c create mode 100644 drivers/net/ethernet/sun/niu.h create mode 100644 drivers/net/ethernet/sun/sunbmac.c create mode 100644 drivers/net/ethernet/sun/sunbmac.h create mode 100644 drivers/net/ethernet/sun/sungem.c create mode 100644 drivers/net/ethernet/sun/sungem.h create mode 100644 drivers/net/ethernet/sun/sunhme.c create mode 100644 drivers/net/ethernet/sun/sunhme.h create mode 100644 drivers/net/ethernet/sun/sunqe.c create mode 100644 drivers/net/ethernet/sun/sunqe.h create mode 100644 drivers/net/ethernet/sun/sunvnet.c create mode 100644 drivers/net/ethernet/sun/sunvnet.h create mode 100644 drivers/net/ethernet/tehuti/Kconfig create mode 100644 drivers/net/ethernet/tehuti/Makefile create mode 100644 drivers/net/ethernet/tehuti/tehuti.c create mode 100644 drivers/net/ethernet/tehuti/tehuti.h create mode 100644 drivers/net/ethernet/ti/Kconfig create mode 100644 drivers/net/ethernet/ti/Makefile create mode 100644 drivers/net/ethernet/ti/cpmac.c create mode 100644 drivers/net/ethernet/ti/cpsw-common.c create mode 100644 drivers/net/ethernet/ti/cpsw-phy-sel.c create mode 100644 drivers/net/ethernet/ti/cpsw.c create mode 100644 drivers/net/ethernet/ti/cpsw.h create mode 100644 drivers/net/ethernet/ti/cpsw_ale.c create mode 100644 drivers/net/ethernet/ti/cpsw_ale.h create mode 100644 drivers/net/ethernet/ti/cpts.c create mode 100644 drivers/net/ethernet/ti/cpts.h create mode 100644 drivers/net/ethernet/ti/davinci_cpdma.c create mode 100644 drivers/net/ethernet/ti/davinci_cpdma.h create mode 100644 drivers/net/ethernet/ti/davinci_emac.c create mode 100644 drivers/net/ethernet/ti/davinci_mdio.c create mode 100644 drivers/net/ethernet/ti/netcp.h create mode 100644 drivers/net/ethernet/ti/netcp_core.c create mode 100644 drivers/net/ethernet/ti/netcp_ethss.c create mode 100644 drivers/net/ethernet/ti/netcp_sgmii.c create mode 100644 drivers/net/ethernet/ti/netcp_xgbepcsr.c create mode 100644 drivers/net/ethernet/ti/tlan.c create mode 100644 drivers/net/ethernet/ti/tlan.h create mode 100644 drivers/net/ethernet/tile/Kconfig create mode 100644 drivers/net/ethernet/tile/Makefile create mode 100644 drivers/net/ethernet/tile/tilegx.c create mode 100644 drivers/net/ethernet/tile/tilepro.c create mode 100644 drivers/net/ethernet/toshiba/Kconfig create mode 100644 drivers/net/ethernet/toshiba/Makefile create mode 100644 drivers/net/ethernet/toshiba/ps3_gelic_net.c create mode 100644 drivers/net/ethernet/toshiba/ps3_gelic_net.h create mode 100644 drivers/net/ethernet/toshiba/ps3_gelic_wireless.c create mode 100644 drivers/net/ethernet/toshiba/ps3_gelic_wireless.h create mode 100644 drivers/net/ethernet/toshiba/spider_net.c create mode 100644 drivers/net/ethernet/toshiba/spider_net.h create mode 100644 drivers/net/ethernet/toshiba/spider_net_ethtool.c create mode 100644 drivers/net/ethernet/toshiba/tc35815.c create mode 100644 drivers/net/ethernet/tundra/Kconfig create mode 100644 drivers/net/ethernet/tundra/Makefile create mode 100644 drivers/net/ethernet/tundra/tsi108_eth.c create mode 100644 drivers/net/ethernet/tundra/tsi108_eth.h create mode 100644 drivers/net/ethernet/via/Kconfig create mode 100644 drivers/net/ethernet/via/Makefile create mode 100644 drivers/net/ethernet/via/via-rhine.c create mode 100644 drivers/net/ethernet/via/via-velocity.c create mode 100644 drivers/net/ethernet/via/via-velocity.h create mode 100644 drivers/net/ethernet/wiznet/Kconfig create mode 100644 drivers/net/ethernet/wiznet/Makefile create mode 100644 drivers/net/ethernet/wiznet/w5100.c create mode 100644 drivers/net/ethernet/wiznet/w5300.c create mode 100644 drivers/net/ethernet/xilinx/Kconfig create mode 100644 drivers/net/ethernet/xilinx/Makefile create mode 100644 drivers/net/ethernet/xilinx/ll_temac.h create mode 100644 drivers/net/ethernet/xilinx/ll_temac_main.c create mode 100644 drivers/net/ethernet/xilinx/ll_temac_mdio.c create mode 100644 drivers/net/ethernet/xilinx/xilinx_axienet.h create mode 100644 drivers/net/ethernet/xilinx/xilinx_axienet_main.c create mode 100644 drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c create mode 100644 drivers/net/ethernet/xilinx/xilinx_emaclite.c create mode 100644 drivers/net/ethernet/xircom/Kconfig create mode 100644 drivers/net/ethernet/xircom/Makefile create mode 100644 drivers/net/ethernet/xircom/xirc2ps_cs.c create mode 100644 drivers/net/ethernet/xscale/Kconfig create mode 100644 drivers/net/ethernet/xscale/Makefile create mode 100644 drivers/net/ethernet/xscale/ixp4xx_eth.c (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c new file mode 100644 index 000000000..4547a1b8b --- /dev/null +++ b/drivers/net/ethernet/3com/3c509.c @@ -0,0 +1,1454 @@ +/* 3c509.c: A 3c509 EtherLink3 ethernet driver for linux. */ +/* + Written 1993-2000 by Donald Becker. + + Copyright 1994-2000 by Donald Becker. + Copyright 1993 United States Government as represented by the + Director, National Security Agency. This software may be used and + distributed according to the terms of the GNU General Public License, + incorporated herein by reference. + + This driver is for the 3Com EtherLinkIII series. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + Known limitations: + Because of the way 3c509 ISA detection works it's difficult to predict + a priori which of several ISA-mode cards will be detected first. + + This driver does not use predictive interrupt mode, resulting in higher + packet latency but lower overhead. If interrupts are disabled for an + unusually long time it could also result in missed packets, but in + practice this rarely happens. + + + FIXES: + Alan Cox: Removed the 'Unexpected interrupt' bug. + Michael Meskes: Upgraded to Donald Becker's version 1.07. + Alan Cox: Increased the eeprom delay. Regardless of + what the docs say some people definitely + get problems with lower (but in card spec) + delays + v1.10 4/21/97 Fixed module code so that multiple cards may be detected, + other cleanups. -djb + Andrea Arcangeli: Upgraded to Donald Becker's version 1.12. + Rick Payne: Fixed SMP race condition + v1.13 9/8/97 Made 'max_interrupt_work' an insmod-settable variable -djb + v1.14 10/15/97 Avoided waiting..discard message for fast machines -djb + v1.15 1/31/98 Faster recovery for Tx errors. -djb + v1.16 2/3/98 Different ID port handling to avoid sound cards. -djb + v1.18 12Mar2001 Andrew Morton + - Avoid bogus detect of 3c590's (Andrzej Krzysztofowicz) + - Reviewed against 1.18 from scyld.com + v1.18a 17Nov2001 Jeff Garzik + - ethtool support + v1.18b 1Mar2002 Zwane Mwaikambo + - Power Management support + v1.18c 1Mar2002 David Ruggiero + - Full duplex support + v1.19 16Oct2002 Zwane Mwaikambo + - Additional ethtool features + v1.19a 28Oct2002 Davud Ruggiero + - Increase *read_eeprom udelay to workaround oops with 2 cards. + v1.19b 08Nov2002 Marc Zyngier + - Introduce driver model for EISA cards. + v1.20 04Feb2008 Ondrej Zary + - convert to isa_driver and pnp_driver and some cleanups +*/ + +#define DRV_NAME "3c509" +#define DRV_VERSION "1.20" +#define DRV_RELDATE "04Feb2008" + +/* A few values that may be tweaked. */ + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (400*HZ/1000) + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for udelay() */ +#include +#include +#include +#include +#include + +#include +#include +#include + +static char version[] = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n"; + +#ifdef EL3_DEBUG +static int el3_debug = EL3_DEBUG; +#else +static int el3_debug = 2; +#endif + +/* Used to do a global count of all the cards in the system. Must be + * a global variable so that the eisa probe routines can increment + * it */ +static int el3_cards = 0; +#define EL3_MAX_CARDS 8 + +/* To minimize the size of the driver source I only define operating + constants if they are used several times. You'll need the manual + anyway if you want to understand driver details. */ +/* Offsets from base I/O address. */ +#define EL3_DATA 0x00 +#define EL3_CMD 0x0e +#define EL3_STATUS 0x0e +#define EEPROM_READ 0x80 + +#define EL3_IO_EXTENT 16 + +#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) + + +/* The top five bits written to EL3_CMD are a command, the lower + 11 bits are the parameter, if applicable. */ +enum c509cmd { + TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11, + RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11, + TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11, + FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11, + SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11, + SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11, + StatsDisable = 22<<11, StopCoax = 23<<11, PowerUp = 27<<11, + PowerDown = 28<<11, PowerAuto = 29<<11}; + +enum c509status { + IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004, + TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, + IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000, }; + +/* The SetRxFilter command accepts the following classes: */ +enum RxFilter { + RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 }; + +/* Register window 1 offsets, the window used in normal operation. */ +#define TX_FIFO 0x00 +#define RX_FIFO 0x00 +#define RX_STATUS 0x08 +#define TX_STATUS 0x0B +#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */ + +#define WN0_CONF_CTRL 0x04 /* Window 0: Configuration control register */ +#define WN0_ADDR_CONF 0x06 /* Window 0: Address configuration register */ +#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */ +#define WN4_MEDIA 0x0A /* Window 4: Various transcvr/media bits. */ +#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */ +#define WN4_NETDIAG 0x06 /* Window 4: Net diagnostic */ +#define FD_ENABLE 0x8000 /* Enable full-duplex ("external loopback") */ + +/* + * Must be a power of two (we use a binary and in the + * circular queue) + */ +#define SKB_QUEUE_SIZE 64 + +enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_EISA }; + +struct el3_private { + spinlock_t lock; + /* skb send-queue */ + int head, size; + struct sk_buff *queue[SKB_QUEUE_SIZE]; + enum el3_cardtype type; +}; +static int id_port; +static int current_tag; +static struct net_device *el3_devs[EL3_MAX_CARDS]; + +/* Parameters that may be passed into the module. */ +static int debug = -1; +static int irq[] = {-1, -1, -1, -1, -1, -1, -1, -1}; +/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ +static int max_interrupt_work = 10; +#ifdef CONFIG_PNP +static int nopnp; +#endif + +static int el3_common_init(struct net_device *dev); +static void el3_common_remove(struct net_device *dev); +static ushort id_read_eeprom(int index); +static ushort read_eeprom(int ioaddr, int index); +static int el3_open(struct net_device *dev); +static netdev_tx_t el3_start_xmit(struct sk_buff *skb, struct net_device *dev); +static irqreturn_t el3_interrupt(int irq, void *dev_id); +static void update_stats(struct net_device *dev); +static struct net_device_stats *el3_get_stats(struct net_device *dev); +static int el3_rx(struct net_device *dev); +static int el3_close(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); +static void el3_tx_timeout (struct net_device *dev); +static void el3_down(struct net_device *dev); +static void el3_up(struct net_device *dev); +static const struct ethtool_ops ethtool_ops; +#ifdef CONFIG_PM +static int el3_suspend(struct device *, pm_message_t); +static int el3_resume(struct device *); +#else +#define el3_suspend NULL +#define el3_resume NULL +#endif + + +/* generic device remove for all device types */ +static int el3_device_remove (struct device *device); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void el3_poll_controller(struct net_device *dev); +#endif + +/* Return 0 on success, 1 on error, 2 when found already detected PnP card */ +static int el3_isa_id_sequence(__be16 *phys_addr) +{ + short lrs_state = 0xff; + int i; + + /* ISA boards are detected by sending the ID sequence to the + ID_PORT. We find cards past the first by setting the 'current_tag' + on cards as they are found. Cards with their tag set will not + respond to subsequent ID sequences. */ + + outb(0x00, id_port); + outb(0x00, id_port); + for (i = 0; i < 255; i++) { + outb(lrs_state, id_port); + lrs_state <<= 1; + lrs_state = lrs_state & 0x100 ? lrs_state ^ 0xcf : lrs_state; + } + /* For the first probe, clear all board's tag registers. */ + if (current_tag == 0) + outb(0xd0, id_port); + else /* Otherwise kill off already-found boards. */ + outb(0xd8, id_port); + if (id_read_eeprom(7) != 0x6d50) + return 1; + /* Read in EEPROM data, which does contention-select. + Only the lowest address board will stay "on-line". + 3Com got the byte order backwards. */ + for (i = 0; i < 3; i++) + phys_addr[i] = htons(id_read_eeprom(i)); +#ifdef CONFIG_PNP + if (!nopnp) { + /* The ISA PnP 3c509 cards respond to the ID sequence too. + This check is needed in order not to register them twice. */ + for (i = 0; i < el3_cards; i++) { + struct el3_private *lp = netdev_priv(el3_devs[i]); + if (lp->type == EL3_PNP && + ether_addr_equal((u8 *)phys_addr, el3_devs[i]->dev_addr)) { + if (el3_debug > 3) + pr_debug("3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n", + phys_addr[0] & 0xff, phys_addr[0] >> 8, + phys_addr[1] & 0xff, phys_addr[1] >> 8, + phys_addr[2] & 0xff, phys_addr[2] >> 8); + /* Set the adaptor tag so that the next card can be found. */ + outb(0xd0 + ++current_tag, id_port); + return 2; + } + } + } +#endif /* CONFIG_PNP */ + return 0; + +} + +static void el3_dev_fill(struct net_device *dev, __be16 *phys_addr, int ioaddr, + int irq, int if_port, enum el3_cardtype type) +{ + struct el3_private *lp = netdev_priv(dev); + + memcpy(dev->dev_addr, phys_addr, ETH_ALEN); + dev->base_addr = ioaddr; + dev->irq = irq; + dev->if_port = if_port; + lp->type = type; +} + +static int el3_isa_match(struct device *pdev, unsigned int ndev) +{ + struct net_device *dev; + int ioaddr, isa_irq, if_port, err; + unsigned int iobase; + __be16 phys_addr[3]; + + while ((err = el3_isa_id_sequence(phys_addr)) == 2) + ; /* Skip to next card when PnP card found */ + if (err == 1) + return 0; + + iobase = id_read_eeprom(8); + if_port = iobase >> 14; + ioaddr = 0x200 + ((iobase & 0x1f) << 4); + if (irq[el3_cards] > 1 && irq[el3_cards] < 16) + isa_irq = irq[el3_cards]; + else + isa_irq = id_read_eeprom(9) >> 12; + + dev = alloc_etherdev(sizeof(struct el3_private)); + if (!dev) + return -ENOMEM; + + SET_NETDEV_DEV(dev, pdev); + netdev_boot_setup_check(dev); + + if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) { + free_netdev(dev); + return 0; + } + + /* Set the adaptor tag so that the next card can be found. */ + outb(0xd0 + ++current_tag, id_port); + + /* Activate the adaptor at the EEPROM location. */ + outb((ioaddr >> 4) | 0xe0, id_port); + + EL3WINDOW(0); + if (inw(ioaddr) != 0x6d50) { + free_netdev(dev); + return 0; + } + + /* Free the interrupt so that some other card can use it. */ + outw(0x0f00, ioaddr + WN0_IRQ); + + el3_dev_fill(dev, phys_addr, ioaddr, isa_irq, if_port, EL3_ISA); + dev_set_drvdata(pdev, dev); + if (el3_common_init(dev)) { + free_netdev(dev); + return 0; + } + + el3_devs[el3_cards++] = dev; + return 1; +} + +static int el3_isa_remove(struct device *pdev, + unsigned int ndev) +{ + el3_device_remove(pdev); + dev_set_drvdata(pdev, NULL); + return 0; +} + +#ifdef CONFIG_PM +static int el3_isa_suspend(struct device *dev, unsigned int n, + pm_message_t state) +{ + current_tag = 0; + return el3_suspend(dev, state); +} + +static int el3_isa_resume(struct device *dev, unsigned int n) +{ + struct net_device *ndev = dev_get_drvdata(dev); + int ioaddr = ndev->base_addr, err; + __be16 phys_addr[3]; + + while ((err = el3_isa_id_sequence(phys_addr)) == 2) + ; /* Skip to next card when PnP card found */ + if (err == 1) + return 0; + /* Set the adaptor tag so that the next card can be found. */ + outb(0xd0 + ++current_tag, id_port); + /* Enable the card */ + outb((ioaddr >> 4) | 0xe0, id_port); + EL3WINDOW(0); + if (inw(ioaddr) != 0x6d50) + return 1; + /* Free the interrupt so that some other card can use it. */ + outw(0x0f00, ioaddr + WN0_IRQ); + return el3_resume(dev); +} +#endif + +static struct isa_driver el3_isa_driver = { + .match = el3_isa_match, + .remove = el3_isa_remove, +#ifdef CONFIG_PM + .suspend = el3_isa_suspend, + .resume = el3_isa_resume, +#endif + .driver = { + .name = "3c509" + }, +}; +static int isa_registered; + +#ifdef CONFIG_PNP +static struct pnp_device_id el3_pnp_ids[] = { + { .id = "TCM5090" }, /* 3Com Etherlink III (TP) */ + { .id = "TCM5091" }, /* 3Com Etherlink III */ + { .id = "TCM5094" }, /* 3Com Etherlink III (combo) */ + { .id = "TCM5095" }, /* 3Com Etherlink III (TPO) */ + { .id = "TCM5098" }, /* 3Com Etherlink III (TPC) */ + { .id = "PNP80f7" }, /* 3Com Etherlink III compatible */ + { .id = "PNP80f8" }, /* 3Com Etherlink III compatible */ + { .id = "" } +}; +MODULE_DEVICE_TABLE(pnp, el3_pnp_ids); + +static int el3_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *id) +{ + short i; + int ioaddr, irq, if_port; + __be16 phys_addr[3]; + struct net_device *dev = NULL; + int err; + + ioaddr = pnp_port_start(pdev, 0); + if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-pnp")) + return -EBUSY; + irq = pnp_irq(pdev, 0); + EL3WINDOW(0); + for (i = 0; i < 3; i++) + phys_addr[i] = htons(read_eeprom(ioaddr, i)); + if_port = read_eeprom(ioaddr, 8) >> 14; + dev = alloc_etherdev(sizeof(struct el3_private)); + if (!dev) { + release_region(ioaddr, EL3_IO_EXTENT); + return -ENOMEM; + } + SET_NETDEV_DEV(dev, &pdev->dev); + netdev_boot_setup_check(dev); + + el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_PNP); + pnp_set_drvdata(pdev, dev); + err = el3_common_init(dev); + + if (err) { + pnp_set_drvdata(pdev, NULL); + free_netdev(dev); + return err; + } + + el3_devs[el3_cards++] = dev; + return 0; +} + +static void el3_pnp_remove(struct pnp_dev *pdev) +{ + el3_common_remove(pnp_get_drvdata(pdev)); + pnp_set_drvdata(pdev, NULL); +} + +#ifdef CONFIG_PM +static int el3_pnp_suspend(struct pnp_dev *pdev, pm_message_t state) +{ + return el3_suspend(&pdev->dev, state); +} + +static int el3_pnp_resume(struct pnp_dev *pdev) +{ + return el3_resume(&pdev->dev); +} +#endif + +static struct pnp_driver el3_pnp_driver = { + .name = "3c509", + .id_table = el3_pnp_ids, + .probe = el3_pnp_probe, + .remove = el3_pnp_remove, +#ifdef CONFIG_PM + .suspend = el3_pnp_suspend, + .resume = el3_pnp_resume, +#endif +}; +static int pnp_registered; +#endif /* CONFIG_PNP */ + +#ifdef CONFIG_EISA +static struct eisa_device_id el3_eisa_ids[] = { + { "TCM5090" }, + { "TCM5091" }, + { "TCM5092" }, + { "TCM5093" }, + { "TCM5094" }, + { "TCM5095" }, + { "TCM5098" }, + { "" } +}; +MODULE_DEVICE_TABLE(eisa, el3_eisa_ids); + +static int el3_eisa_probe (struct device *device); + +static struct eisa_driver el3_eisa_driver = { + .id_table = el3_eisa_ids, + .driver = { + .name = "3c579", + .probe = el3_eisa_probe, + .remove = el3_device_remove, + .suspend = el3_suspend, + .resume = el3_resume, + } +}; +static int eisa_registered; +#endif + +static const struct net_device_ops netdev_ops = { + .ndo_open = el3_open, + .ndo_stop = el3_close, + .ndo_start_xmit = el3_start_xmit, + .ndo_get_stats = el3_get_stats, + .ndo_set_rx_mode = set_multicast_list, + .ndo_tx_timeout = el3_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = el3_poll_controller, +#endif +}; + +static int el3_common_init(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + int err; + const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"}; + + spin_lock_init(&lp->lock); + + if (dev->mem_start & 0x05) { /* xcvr codes 1/3/4/12 */ + dev->if_port = (dev->mem_start & 0x0f); + } else { /* xcvr codes 0/8 */ + /* use eeprom value, but save user's full-duplex selection */ + dev->if_port |= (dev->mem_start & 0x08); + } + + /* The EL3-specific entries in the device structure. */ + dev->netdev_ops = &netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + dev->ethtool_ops = ðtool_ops; + + err = register_netdev(dev); + if (err) { + pr_err("Failed to register 3c5x9 at %#3.3lx, IRQ %d.\n", + dev->base_addr, dev->irq); + release_region(dev->base_addr, EL3_IO_EXTENT); + return err; + } + + pr_info("%s: 3c5x9 found at %#3.3lx, %s port, address %pM, IRQ %d.\n", + dev->name, dev->base_addr, if_names[(dev->if_port & 0x03)], + dev->dev_addr, dev->irq); + + if (el3_debug > 0) + pr_info("%s", version); + return 0; + +} + +static void el3_common_remove (struct net_device *dev) +{ + unregister_netdev (dev); + release_region(dev->base_addr, EL3_IO_EXTENT); + free_netdev (dev); +} + +#ifdef CONFIG_EISA +static int __init el3_eisa_probe (struct device *device) +{ + short i; + int ioaddr, irq, if_port; + __be16 phys_addr[3]; + struct net_device *dev = NULL; + struct eisa_device *edev; + int err; + + /* Yeepee, The driver framework is calling us ! */ + edev = to_eisa_device (device); + ioaddr = edev->base_addr; + + if (!request_region(ioaddr, EL3_IO_EXTENT, "3c579-eisa")) + return -EBUSY; + + /* Change the register set to the configuration window 0. */ + outw(SelectWindow | 0, ioaddr + 0xC80 + EL3_CMD); + + irq = inw(ioaddr + WN0_IRQ) >> 12; + if_port = inw(ioaddr + 6)>>14; + for (i = 0; i < 3; i++) + phys_addr[i] = htons(read_eeprom(ioaddr, i)); + + /* Restore the "Product ID" to the EEPROM read register. */ + read_eeprom(ioaddr, 3); + + dev = alloc_etherdev(sizeof (struct el3_private)); + if (dev == NULL) { + release_region(ioaddr, EL3_IO_EXTENT); + return -ENOMEM; + } + + SET_NETDEV_DEV(dev, device); + netdev_boot_setup_check(dev); + + el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA); + eisa_set_drvdata (edev, dev); + err = el3_common_init(dev); + + if (err) { + eisa_set_drvdata (edev, NULL); + free_netdev(dev); + return err; + } + + el3_devs[el3_cards++] = dev; + return 0; +} +#endif + +/* This remove works for all device types. + * + * The net dev must be stored in the driver data field */ +static int el3_device_remove(struct device *device) +{ + struct net_device *dev; + + dev = dev_get_drvdata(device); + + el3_common_remove (dev); + return 0; +} + +/* Read a word from the EEPROM using the regular EEPROM access register. + Assume that we are in register window zero. + */ +static ushort read_eeprom(int ioaddr, int index) +{ + outw(EEPROM_READ + index, ioaddr + 10); + /* Pause for at least 162 us. for the read to take place. + Some chips seem to require much longer */ + mdelay(2); + return inw(ioaddr + 12); +} + +/* Read a word from the EEPROM when in the ISA ID probe state. */ +static ushort id_read_eeprom(int index) +{ + int bit, word = 0; + + /* Issue read command, and pause for at least 162 us. for it to complete. + Assume extra-fast 16Mhz bus. */ + outb(EEPROM_READ + index, id_port); + + /* Pause for at least 162 us. for the read to take place. */ + /* Some chips seem to require much longer */ + mdelay(4); + + for (bit = 15; bit >= 0; bit--) + word = (word << 1) + (inb(id_port) & 0x01); + + if (el3_debug > 3) + pr_debug(" 3c509 EEPROM word %d %#4.4x.\n", index, word); + + return word; +} + + +static int +el3_open(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + int i; + + outw(TxReset, ioaddr + EL3_CMD); + outw(RxReset, ioaddr + EL3_CMD); + outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD); + + i = request_irq(dev->irq, el3_interrupt, 0, dev->name, dev); + if (i) + return i; + + EL3WINDOW(0); + if (el3_debug > 3) + pr_debug("%s: Opening, IRQ %d status@%x %4.4x.\n", dev->name, + dev->irq, ioaddr + EL3_STATUS, inw(ioaddr + EL3_STATUS)); + + el3_up(dev); + + if (el3_debug > 3) + pr_debug("%s: Opened 3c509 IRQ %d status %4.4x.\n", + dev->name, dev->irq, inw(ioaddr + EL3_STATUS)); + + return 0; +} + +static void +el3_tx_timeout (struct net_device *dev) +{ + int ioaddr = dev->base_addr; + + /* Transmitter timeout, serious problems. */ + pr_warn("%s: transmit timed out, Tx_status %2.2x status %4.4x Tx FIFO room %d\n", + dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS), + inw(ioaddr + TX_FREE)); + dev->stats.tx_errors++; + dev->trans_start = jiffies; /* prevent tx timeout */ + /* Issue TX_RESET and TX_START commands. */ + outw(TxReset, ioaddr + EL3_CMD); + outw(TxEnable, ioaddr + EL3_CMD); + netif_wake_queue(dev); +} + + +static netdev_tx_t +el3_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + unsigned long flags; + + netif_stop_queue (dev); + + dev->stats.tx_bytes += skb->len; + + if (el3_debug > 4) { + pr_debug("%s: el3_start_xmit(length = %u) called, status %4.4x.\n", + dev->name, skb->len, inw(ioaddr + EL3_STATUS)); + } + /* + * We lock the driver against other processors. Note + * we don't need to lock versus the IRQ as we suspended + * that. This means that we lose the ability to take + * an RX during a TX upload. That sucks a bit with SMP + * on an original 3c509 (2K buffer) + * + * Using disable_irq stops us crapping on other + * time sensitive devices. + */ + + spin_lock_irqsave(&lp->lock, flags); + + /* Put out the doubleword header... */ + outw(skb->len, ioaddr + TX_FIFO); + outw(0x00, ioaddr + TX_FIFO); + /* ... and the packet rounded to a doubleword. */ + outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); + + if (inw(ioaddr + TX_FREE) > 1536) + netif_start_queue(dev); + else + /* Interrupt us when the FIFO has room for max-sized packet. */ + outw(SetTxThreshold + 1536, ioaddr + EL3_CMD); + + spin_unlock_irqrestore(&lp->lock, flags); + + dev_consume_skb_any (skb); + + /* Clear the Tx status stack. */ + { + short tx_status; + int i = 4; + + while (--i > 0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) { + if (tx_status & 0x38) dev->stats.tx_aborted_errors++; + if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD); + if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD); + outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ + } + } + return NETDEV_TX_OK; +} + +/* The EL3 interrupt handler. */ +static irqreturn_t +el3_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct el3_private *lp; + int ioaddr, status; + int i = max_interrupt_work; + + lp = netdev_priv(dev); + spin_lock(&lp->lock); + + ioaddr = dev->base_addr; + + if (el3_debug > 4) { + status = inw(ioaddr + EL3_STATUS); + pr_debug("%s: interrupt, status %4.4x.\n", dev->name, status); + } + + while ((status = inw(ioaddr + EL3_STATUS)) & + (IntLatch | RxComplete | StatsFull)) { + + if (status & RxComplete) + el3_rx(dev); + + if (status & TxAvailable) { + if (el3_debug > 5) + pr_debug(" TX room bit was handled.\n"); + /* There's room in the FIFO for a full-sized packet. */ + outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); + netif_wake_queue (dev); + } + if (status & (AdapterFailure | RxEarly | StatsFull | TxComplete)) { + /* Handle all uncommon interrupts. */ + if (status & StatsFull) /* Empty statistics. */ + update_stats(dev); + if (status & RxEarly) { /* Rx early is unused. */ + el3_rx(dev); + outw(AckIntr | RxEarly, ioaddr + EL3_CMD); + } + if (status & TxComplete) { /* Really Tx error. */ + short tx_status; + int i = 4; + + while (--i>0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) { + if (tx_status & 0x38) dev->stats.tx_aborted_errors++; + if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD); + if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD); + outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ + } + } + if (status & AdapterFailure) { + /* Adapter failure requires Rx reset and reinit. */ + outw(RxReset, ioaddr + EL3_CMD); + /* Set the Rx filter to the current state. */ + outw(SetRxFilter | RxStation | RxBroadcast + | (dev->flags & IFF_ALLMULTI ? RxMulticast : 0) + | (dev->flags & IFF_PROMISC ? RxProm : 0), + ioaddr + EL3_CMD); + outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */ + outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); + } + } + + if (--i < 0) { + pr_err("%s: Infinite loop in interrupt, status %4.4x.\n", + dev->name, status); + /* Clear all interrupts. */ + outw(AckIntr | 0xFF, ioaddr + EL3_CMD); + break; + } + /* Acknowledge the IRQ. */ + outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); /* Ack IRQ */ + } + + if (el3_debug > 4) { + pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, + inw(ioaddr + EL3_STATUS)); + } + spin_unlock(&lp->lock); + return IRQ_HANDLED; +} + + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling receive - used by netconsole and other diagnostic tools + * to allow network i/o with interrupts disabled. + */ +static void el3_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + el3_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +static struct net_device_stats * +el3_get_stats(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + unsigned long flags; + + /* + * This is fast enough not to bother with disable IRQ + * stuff. + */ + + spin_lock_irqsave(&lp->lock, flags); + update_stats(dev); + spin_unlock_irqrestore(&lp->lock, flags); + return &dev->stats; +} + +/* Update statistics. We change to register window 6, so this should be run + single-threaded if the device is active. This is expected to be a rare + operation, and it's simpler for the rest of the driver to assume that + window 1 is always valid rather than use a special window-state variable. + */ +static void update_stats(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + + if (el3_debug > 5) + pr_debug(" Updating the statistics.\n"); + /* Turn off statistics updates while reading. */ + outw(StatsDisable, ioaddr + EL3_CMD); + /* Switch to the stats window, and read everything. */ + EL3WINDOW(6); + dev->stats.tx_carrier_errors += inb(ioaddr + 0); + dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); + /* Multiple collisions. */ inb(ioaddr + 2); + dev->stats.collisions += inb(ioaddr + 3); + dev->stats.tx_window_errors += inb(ioaddr + 4); + dev->stats.rx_fifo_errors += inb(ioaddr + 5); + dev->stats.tx_packets += inb(ioaddr + 6); + /* Rx packets */ inb(ioaddr + 7); + /* Tx deferrals */ inb(ioaddr + 8); + inw(ioaddr + 10); /* Total Rx and Tx octets. */ + inw(ioaddr + 12); + + /* Back to window 1, and turn statistics back on. */ + EL3WINDOW(1); + outw(StatsEnable, ioaddr + EL3_CMD); +} + +static int +el3_rx(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + short rx_status; + + if (el3_debug > 5) + pr_debug(" In rx_packet(), status %4.4x, rx_status %4.4x.\n", + inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS)); + while ((rx_status = inw(ioaddr + RX_STATUS)) > 0) { + if (rx_status & 0x4000) { /* Error, update stats. */ + short error = rx_status & 0x3800; + + outw(RxDiscard, ioaddr + EL3_CMD); + dev->stats.rx_errors++; + switch (error) { + case 0x0000: dev->stats.rx_over_errors++; break; + case 0x0800: dev->stats.rx_length_errors++; break; + case 0x1000: dev->stats.rx_frame_errors++; break; + case 0x1800: dev->stats.rx_length_errors++; break; + case 0x2000: dev->stats.rx_frame_errors++; break; + case 0x2800: dev->stats.rx_crc_errors++; break; + } + } else { + short pkt_len = rx_status & 0x7ff; + struct sk_buff *skb; + + skb = netdev_alloc_skb(dev, pkt_len + 5); + if (el3_debug > 4) + pr_debug("Receiving packet size %d status %4.4x.\n", + pkt_len, rx_status); + if (skb != NULL) { + skb_reserve(skb, 2); /* Align IP on 16 byte */ + + /* 'skb->data' points to the start of sk_buff data area. */ + insl(ioaddr + RX_FIFO, skb_put(skb,pkt_len), + (pkt_len + 3) >> 2); + + outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ + skb->protocol = eth_type_trans(skb,dev); + netif_rx(skb); + dev->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + continue; + } + outw(RxDiscard, ioaddr + EL3_CMD); + dev->stats.rx_dropped++; + if (el3_debug) + pr_debug("%s: Couldn't allocate a sk_buff of size %d.\n", + dev->name, pkt_len); + } + inw(ioaddr + EL3_STATUS); /* Delay. */ + while (inw(ioaddr + EL3_STATUS) & 0x1000) + pr_debug(" Waiting for 3c509 to discard packet, status %x.\n", + inw(ioaddr + EL3_STATUS) ); + } + + return 0; +} + +/* + * Set or clear the multicast filter for this adaptor. + */ +static void +set_multicast_list(struct net_device *dev) +{ + unsigned long flags; + struct el3_private *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + int mc_count = netdev_mc_count(dev); + + if (el3_debug > 1) { + static int old; + if (old != mc_count) { + old = mc_count; + pr_debug("%s: Setting Rx mode to %d addresses.\n", + dev->name, mc_count); + } + } + spin_lock_irqsave(&lp->lock, flags); + if (dev->flags&IFF_PROMISC) { + outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm, + ioaddr + EL3_CMD); + } + else if (mc_count || (dev->flags&IFF_ALLMULTI)) { + outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast, ioaddr + EL3_CMD); + } + else + outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); + spin_unlock_irqrestore(&lp->lock, flags); +} + +static int +el3_close(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + struct el3_private *lp = netdev_priv(dev); + + if (el3_debug > 2) + pr_debug("%s: Shutting down ethercard.\n", dev->name); + + el3_down(dev); + + free_irq(dev->irq, dev); + /* Switching back to window 0 disables the IRQ. */ + EL3WINDOW(0); + if (lp->type != EL3_EISA) { + /* But we explicitly zero the IRQ line select anyway. Don't do + * it on EISA cards, it prevents the module from getting an + * IRQ after unload+reload... */ + outw(0x0f00, ioaddr + WN0_IRQ); + } + + return 0; +} + +static int +el3_link_ok(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + u16 tmp; + + EL3WINDOW(4); + tmp = inw(ioaddr + WN4_MEDIA); + EL3WINDOW(1); + return tmp & (1<<11); +} + +static int +el3_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + u16 tmp; + int ioaddr = dev->base_addr; + + EL3WINDOW(0); + /* obtain current transceiver via WN4_MEDIA? */ + tmp = inw(ioaddr + WN0_ADDR_CONF); + ecmd->transceiver = XCVR_INTERNAL; + switch (tmp >> 14) { + case 0: + ecmd->port = PORT_TP; + break; + case 1: + ecmd->port = PORT_AUI; + ecmd->transceiver = XCVR_EXTERNAL; + break; + case 3: + ecmd->port = PORT_BNC; + default: + break; + } + + ecmd->duplex = DUPLEX_HALF; + ecmd->supported = 0; + tmp = inw(ioaddr + WN0_CONF_CTRL); + if (tmp & (1<<13)) + ecmd->supported |= SUPPORTED_AUI; + if (tmp & (1<<12)) + ecmd->supported |= SUPPORTED_BNC; + if (tmp & (1<<9)) { + ecmd->supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full; /* hmm... */ + EL3WINDOW(4); + tmp = inw(ioaddr + WN4_NETDIAG); + if (tmp & FD_ENABLE) + ecmd->duplex = DUPLEX_FULL; + } + + ethtool_cmd_speed_set(ecmd, SPEED_10); + EL3WINDOW(1); + return 0; +} + +static int +el3_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + u16 tmp; + int ioaddr = dev->base_addr; + + if (ecmd->speed != SPEED_10) + return -EINVAL; + if ((ecmd->duplex != DUPLEX_HALF) && (ecmd->duplex != DUPLEX_FULL)) + return -EINVAL; + if ((ecmd->transceiver != XCVR_INTERNAL) && (ecmd->transceiver != XCVR_EXTERNAL)) + return -EINVAL; + + /* change XCVR type */ + EL3WINDOW(0); + tmp = inw(ioaddr + WN0_ADDR_CONF); + switch (ecmd->port) { + case PORT_TP: + tmp &= ~(3<<14); + dev->if_port = 0; + break; + case PORT_AUI: + tmp |= (1<<14); + dev->if_port = 1; + break; + case PORT_BNC: + tmp |= (3<<14); + dev->if_port = 3; + break; + default: + return -EINVAL; + } + + outw(tmp, ioaddr + WN0_ADDR_CONF); + if (dev->if_port == 3) { + /* fire up the DC-DC convertor if BNC gets enabled */ + tmp = inw(ioaddr + WN0_ADDR_CONF); + if (tmp & (3 << 14)) { + outw(StartCoax, ioaddr + EL3_CMD); + udelay(800); + } else + return -EIO; + } + + EL3WINDOW(4); + tmp = inw(ioaddr + WN4_NETDIAG); + if (ecmd->duplex == DUPLEX_FULL) + tmp |= FD_ENABLE; + else + tmp &= ~FD_ENABLE; + outw(tmp, ioaddr + WN4_NETDIAG); + EL3WINDOW(1); + + return 0; +} + +static void el3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); +} + +static int el3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct el3_private *lp = netdev_priv(dev); + int ret; + + spin_lock_irq(&lp->lock); + ret = el3_netdev_get_ecmd(dev, ecmd); + spin_unlock_irq(&lp->lock); + return ret; +} + +static int el3_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct el3_private *lp = netdev_priv(dev); + int ret; + + spin_lock_irq(&lp->lock); + ret = el3_netdev_set_ecmd(dev, ecmd); + spin_unlock_irq(&lp->lock); + return ret; +} + +static u32 el3_get_link(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + u32 ret; + + spin_lock_irq(&lp->lock); + ret = el3_link_ok(dev); + spin_unlock_irq(&lp->lock); + return ret; +} + +static u32 el3_get_msglevel(struct net_device *dev) +{ + return el3_debug; +} + +static void el3_set_msglevel(struct net_device *dev, u32 v) +{ + el3_debug = v; +} + +static const struct ethtool_ops ethtool_ops = { + .get_drvinfo = el3_get_drvinfo, + .get_settings = el3_get_settings, + .set_settings = el3_set_settings, + .get_link = el3_get_link, + .get_msglevel = el3_get_msglevel, + .set_msglevel = el3_set_msglevel, +}; + +static void +el3_down(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + + netif_stop_queue(dev); + + /* Turn off statistics ASAP. We update lp->stats below. */ + outw(StatsDisable, ioaddr + EL3_CMD); + + /* Disable the receiver and transmitter. */ + outw(RxDisable, ioaddr + EL3_CMD); + outw(TxDisable, ioaddr + EL3_CMD); + + if (dev->if_port == 3) + /* Turn off thinnet power. Green! */ + outw(StopCoax, ioaddr + EL3_CMD); + else if (dev->if_port == 0) { + /* Disable link beat and jabber, if_port may change here next open(). */ + EL3WINDOW(4); + outw(inw(ioaddr + WN4_MEDIA) & ~MEDIA_TP, ioaddr + WN4_MEDIA); + } + + outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); + + update_stats(dev); +} + +static void +el3_up(struct net_device *dev) +{ + int i, sw_info, net_diag; + int ioaddr = dev->base_addr; + + /* Activating the board required and does no harm otherwise */ + outw(0x0001, ioaddr + 4); + + /* Set the IRQ line. */ + outw((dev->irq << 12) | 0x0f00, ioaddr + WN0_IRQ); + + /* Set the station address in window 2 each time opened. */ + EL3WINDOW(2); + + for (i = 0; i < 6; i++) + outb(dev->dev_addr[i], ioaddr + i); + + if ((dev->if_port & 0x03) == 3) /* BNC interface */ + /* Start the thinnet transceiver. We should really wait 50ms...*/ + outw(StartCoax, ioaddr + EL3_CMD); + else if ((dev->if_port & 0x03) == 0) { /* 10baseT interface */ + /* Combine secondary sw_info word (the adapter level) and primary + sw_info word (duplex setting plus other useless bits) */ + EL3WINDOW(0); + sw_info = (read_eeprom(ioaddr, 0x14) & 0x400f) | + (read_eeprom(ioaddr, 0x0d) & 0xBff0); + + EL3WINDOW(4); + net_diag = inw(ioaddr + WN4_NETDIAG); + net_diag = (net_diag | FD_ENABLE); /* temporarily assume full-duplex will be set */ + pr_info("%s: ", dev->name); + switch (dev->if_port & 0x0c) { + case 12: + /* force full-duplex mode if 3c5x9b */ + if (sw_info & 0x000f) { + pr_cont("Forcing 3c5x9b full-duplex mode"); + break; + } + case 8: + /* set full-duplex mode based on eeprom config setting */ + if ((sw_info & 0x000f) && (sw_info & 0x8000)) { + pr_cont("Setting 3c5x9b full-duplex mode (from EEPROM configuration bit)"); + break; + } + default: + /* xcvr=(0 || 4) OR user has an old 3c5x9 non "B" model */ + pr_cont("Setting 3c5x9/3c5x9B half-duplex mode"); + net_diag = (net_diag & ~FD_ENABLE); /* disable full duplex */ + } + + outw(net_diag, ioaddr + WN4_NETDIAG); + pr_cont(" if_port: %d, sw_info: %4.4x\n", dev->if_port, sw_info); + if (el3_debug > 3) + pr_debug("%s: 3c5x9 net diag word is now: %4.4x.\n", dev->name, net_diag); + /* Enable link beat and jabber check. */ + outw(inw(ioaddr + WN4_MEDIA) | MEDIA_TP, ioaddr + WN4_MEDIA); + } + + /* Switch to the stats window, and clear all stats by reading. */ + outw(StatsDisable, ioaddr + EL3_CMD); + EL3WINDOW(6); + for (i = 0; i < 9; i++) + inb(ioaddr + i); + inw(ioaddr + 10); + inw(ioaddr + 12); + + /* Switch to register set 1 for normal use. */ + EL3WINDOW(1); + + /* Accept b-case and phys addr only. */ + outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); + outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ + + outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ + outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ + /* Allow status bits to be seen. */ + outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); + /* Ack all pending events, and set active indicator mask. */ + outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, + ioaddr + EL3_CMD); + outw(SetIntrEnb | IntLatch|TxAvailable|TxComplete|RxComplete|StatsFull, + ioaddr + EL3_CMD); + + netif_start_queue(dev); +} + +/* Power Management support functions */ +#ifdef CONFIG_PM + +static int +el3_suspend(struct device *pdev, pm_message_t state) +{ + unsigned long flags; + struct net_device *dev; + struct el3_private *lp; + int ioaddr; + + dev = dev_get_drvdata(pdev); + lp = netdev_priv(dev); + ioaddr = dev->base_addr; + + spin_lock_irqsave(&lp->lock, flags); + + if (netif_running(dev)) + netif_device_detach(dev); + + el3_down(dev); + outw(PowerDown, ioaddr + EL3_CMD); + + spin_unlock_irqrestore(&lp->lock, flags); + return 0; +} + +static int +el3_resume(struct device *pdev) +{ + unsigned long flags; + struct net_device *dev; + struct el3_private *lp; + int ioaddr; + + dev = dev_get_drvdata(pdev); + lp = netdev_priv(dev); + ioaddr = dev->base_addr; + + spin_lock_irqsave(&lp->lock, flags); + + outw(PowerUp, ioaddr + EL3_CMD); + EL3WINDOW(0); + el3_up(dev); + + if (netif_running(dev)) + netif_device_attach(dev); + + spin_unlock_irqrestore(&lp->lock, flags); + return 0; +} + +#endif /* CONFIG_PM */ + +module_param(debug,int, 0); +module_param_array(irq, int, NULL, 0); +module_param(max_interrupt_work, int, 0); +MODULE_PARM_DESC(debug, "debug level (0-6)"); +MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); +MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt"); +#ifdef CONFIG_PNP +module_param(nopnp, int, 0); +MODULE_PARM_DESC(nopnp, "disable ISA PnP support (0-1)"); +#endif /* CONFIG_PNP */ +MODULE_DESCRIPTION("3Com Etherlink III (3c509, 3c509B, 3c529, 3c579) ethernet driver"); +MODULE_LICENSE("GPL"); + +static int __init el3_init_module(void) +{ + int ret = 0; + + if (debug >= 0) + el3_debug = debug; + +#ifdef CONFIG_PNP + if (!nopnp) { + ret = pnp_register_driver(&el3_pnp_driver); + if (!ret) + pnp_registered = 1; + } +#endif + /* Select an open I/O location at 0x1*0 to do ISA contention select. */ + /* Start with 0x110 to avoid some sound cards.*/ + for (id_port = 0x110 ; id_port < 0x200; id_port += 0x10) { + if (!request_region(id_port, 1, "3c509-control")) + continue; + outb(0x00, id_port); + outb(0xff, id_port); + if (inb(id_port) & 0x01) + break; + else + release_region(id_port, 1); + } + if (id_port >= 0x200) { + id_port = 0; + pr_err("No I/O port available for 3c509 activation.\n"); + } else { + ret = isa_register_driver(&el3_isa_driver, EL3_MAX_CARDS); + if (!ret) + isa_registered = 1; + } +#ifdef CONFIG_EISA + ret = eisa_driver_register(&el3_eisa_driver); + if (!ret) + eisa_registered = 1; +#endif + +#ifdef CONFIG_PNP + if (pnp_registered) + ret = 0; +#endif + if (isa_registered) + ret = 0; +#ifdef CONFIG_EISA + if (eisa_registered) + ret = 0; +#endif + return ret; +} + +static void __exit el3_cleanup_module(void) +{ +#ifdef CONFIG_PNP + if (pnp_registered) + pnp_unregister_driver(&el3_pnp_driver); +#endif + if (isa_registered) + isa_unregister_driver(&el3_isa_driver); + if (id_port) + release_region(id_port, 1); +#ifdef CONFIG_EISA + if (eisa_registered) + eisa_driver_unregister(&el3_eisa_driver); +#endif +} + +module_init (el3_init_module); +module_exit (el3_cleanup_module); diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c new file mode 100644 index 000000000..942fb0d5a --- /dev/null +++ b/drivers/net/ethernet/3com/3c515.c @@ -0,0 +1,1580 @@ +/* + Written 1997-1998 by Donald Becker. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + This driver is for the 3Com ISA EtherLink XL "Corkscrew" 3c515 ethercard. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + + 2000/2/2- Added support for kernel-level ISAPnP + by Stephen Frost and Alessandro Zummo + Cleaned up for 2.3.x/softnet by Jeff Garzik and Alan Cox. + + 2001/11/17 - Added ethtool support (jgarzik) + + 2002/10/28 - Locking updates for 2.5 (alan@lxorguk.ukuu.org.uk) + +*/ + +#define DRV_NAME "3c515" +#define DRV_VERSION "0.99t-ac" +#define DRV_RELDATE "28-Oct-2002" + +static char *version = +DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " becker@scyld.com and others\n"; + +#define CORKSCREW 1 + +/* "Knobs" that adjust features and parameters. */ +/* Set the copy breakpoint for the copy-only-tiny-frames scheme. + Setting to > 1512 effectively disables this feature. */ +static int rx_copybreak = 200; + +/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */ +static const int mtu = 1500; + +/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ +static int max_interrupt_work = 20; + +/* Enable the automatic media selection code -- usually set. */ +#define AUTOMEDIA 1 + +/* Allow the use of fragment bus master transfers instead of only + programmed-I/O for Vortex cards. Full-bus-master transfers are always + enabled by default on Boomerang cards. If VORTEX_BUS_MASTER is defined, + the feature may be turned on using 'options'. */ +#define VORTEX_BUS_MASTER + +/* A few values that may be tweaked. */ +/* Keep the ring sizes a power of two for efficiency. */ +#define TX_RING_SIZE 16 +#define RX_RING_SIZE 16 +#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define NEW_MULTICAST +#include + +#define MAX_UNITS 8 + +MODULE_AUTHOR("Donald Becker "); +MODULE_DESCRIPTION("3Com 3c515 Corkscrew driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/* "Knobs" for adjusting internal parameters. */ +/* Put out somewhat more debugging messages. (0 - no msg, 1 minimal msgs). */ +#define DRIVER_DEBUG 1 +/* Some values here only for performance evaluation and path-coverage + debugging. */ +static int rx_nocopy, rx_copy, queued_packet; + +/* Number of times to check to see if the Tx FIFO has space, used in some + limited cases. */ +#define WAIT_TX_AVAIL 200 + +/* Operational parameter that usually are not changed. */ +#define TX_TIMEOUT ((4*HZ)/10) /* Time in jiffies before concluding Tx hung */ + +/* The size here is somewhat misleading: the Corkscrew also uses the ISA + aliased registers at +0x400. + */ +#define CORKSCREW_TOTAL_SIZE 0x20 + +#ifdef DRIVER_DEBUG +static int corkscrew_debug = DRIVER_DEBUG; +#else +static int corkscrew_debug = 1; +#endif + +#define CORKSCREW_ID 10 + +/* + Theory of Operation + +I. Board Compatibility + +This device driver is designed for the 3Com 3c515 ISA Fast EtherLink XL, +3Com's ISA bus adapter for Fast Ethernet. Due to the unique I/O port layout, +it's not practical to integrate this driver with the other EtherLink drivers. + +II. Board-specific settings + +The Corkscrew has an EEPROM for configuration, but no special settings are +needed for Linux. + +III. Driver operation + +The 3c515 series use an interface that's very similar to the 3c900 "Boomerang" +PCI cards, with the bus master interface extensively modified to work with +the ISA bus. + +The card is capable of full-bus-master transfers with separate +lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet, +DEC Tulip and Intel Speedo3. + +This driver uses a "RX_COPYBREAK" scheme rather than a fixed intermediate +receive buffer. This scheme allocates full-sized skbuffs as receive +buffers. The value RX_COPYBREAK is used as the copying breakpoint: it is +chosen to trade-off the memory wasted by passing the full-sized skbuff to +the queue layer for all frames vs. the copying cost of copying a frame to a +correctly-sized skbuff. + + +IIIC. Synchronization +The driver runs as two independent, single-threaded flows of control. One +is the send-packet routine, which enforces single-threaded use by the netif +layer. The other thread is the interrupt handler, which is single +threaded by the hardware and other software. + +IV. Notes + +Thanks to Terry Murphy of 3Com for providing documentation and a development +board. + +The names "Vortex", "Boomerang" and "Corkscrew" are the internal 3Com +project names. I use these names to eliminate confusion -- 3Com product +numbers and names are very similar and often confused. + +The new chips support both ethernet (1.5K) and FDDI (4.5K) frame sizes! +This driver only supports ethernet frames because of the recent MTU limit +of 1.5K, but the changes to support 4.5K are minimal. +*/ + +/* Operational definitions. + These are not used by other compilation units and thus are not + exported in a ".h" file. + + First the windows. There are eight register windows, with the command + and status registers available in each. + */ +#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) +#define EL3_CMD 0x0e +#define EL3_STATUS 0x0e + +/* The top five bits written to EL3_CMD are a command, the lower + 11 bits are the parameter, if applicable. + Note that 11 parameters bits was fine for ethernet, but the new chips + can handle FDDI length frames (~4500 octets) and now parameters count + 32-bit 'Dwords' rather than octets. */ + +enum corkscrew_cmd { + TotalReset = 0 << 11, SelectWindow = 1 << 11, StartCoax = 2 << 11, + RxDisable = 3 << 11, RxEnable = 4 << 11, RxReset = 5 << 11, + UpStall = 6 << 11, UpUnstall = (6 << 11) + 1, DownStall = (6 << 11) + 2, + DownUnstall = (6 << 11) + 3, RxDiscard = 8 << 11, TxEnable = 9 << 11, + TxDisable = 10 << 11, TxReset = 11 << 11, FakeIntr = 12 << 11, + AckIntr = 13 << 11, SetIntrEnb = 14 << 11, SetStatusEnb = 15 << 11, + SetRxFilter = 16 << 11, SetRxThreshold = 17 << 11, + SetTxThreshold = 18 << 11, SetTxStart = 19 << 11, StartDMAUp = 20 << 11, + StartDMADown = (20 << 11) + 1, StatsEnable = 21 << 11, + StatsDisable = 22 << 11, StopCoax = 23 << 11, +}; + +/* The SetRxFilter command accepts the following classes: */ +enum RxFilter { + RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 +}; + +/* Bits in the general status register. */ +enum corkscrew_status { + IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004, + TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, + IntReq = 0x0040, StatsFull = 0x0080, + DMADone = 1 << 8, DownComplete = 1 << 9, UpComplete = 1 << 10, + DMAInProgress = 1 << 11, /* DMA controller is still busy. */ + CmdInProgress = 1 << 12, /* EL3_CMD is still busy. */ +}; + +/* Register window 1 offsets, the window used in normal operation. + On the Corkscrew this window is always mapped at offsets 0x10-0x1f. */ +enum Window1 { + TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14, + RxStatus = 0x18, Timer = 0x1A, TxStatus = 0x1B, + TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */ +}; +enum Window0 { + Wn0IRQ = 0x08, +#if defined(CORKSCREW) + Wn0EepromCmd = 0x200A, /* Corkscrew EEPROM command register. */ + Wn0EepromData = 0x200C, /* Corkscrew EEPROM results register. */ +#else + Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */ + Wn0EepromData = 12, /* Window 0: EEPROM results register. */ +#endif +}; +enum Win0_EEPROM_bits { + EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0, + EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */ + EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */ +}; + +/* EEPROM locations. */ +enum eeprom_offset { + PhysAddr01 = 0, PhysAddr23 = 1, PhysAddr45 = 2, ModelID = 3, + EtherLink3ID = 7, +}; + +enum Window3 { /* Window 3: MAC/config bits. */ + Wn3_Config = 0, Wn3_MAC_Ctrl = 6, Wn3_Options = 8, +}; +enum wn3_config { + Ram_size = 7, + Ram_width = 8, + Ram_speed = 0x30, + Rom_size = 0xc0, + Ram_split_shift = 16, + Ram_split = 3 << Ram_split_shift, + Xcvr_shift = 20, + Xcvr = 7 << Xcvr_shift, + Autoselect = 0x1000000, +}; + +enum Window4 { + Wn4_NetDiag = 6, Wn4_Media = 10, /* Window 4: Xcvr/media bits. */ +}; +enum Win4_Media_bits { + Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */ + Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */ + Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */ + Media_LnkBeat = 0x0800, +}; +enum Window7 { /* Window 7: Bus Master control. */ + Wn7_MasterAddr = 0, Wn7_MasterLen = 6, Wn7_MasterStatus = 12, +}; + +/* Boomerang-style bus master control registers. Note ISA aliases! */ +enum MasterCtrl { + PktStatus = 0x400, DownListPtr = 0x404, FragAddr = 0x408, FragLen = + 0x40c, + TxFreeThreshold = 0x40f, UpPktStatus = 0x410, UpListPtr = 0x418, +}; + +/* The Rx and Tx descriptor lists. + Caution Alpha hackers: these types are 32 bits! Note also the 8 byte + alignment contraint on tx_ring[] and rx_ring[]. */ +struct boom_rx_desc { + u32 next; + s32 status; + u32 addr; + s32 length; +}; + +/* Values for the Rx status entry. */ +enum rx_desc_status { + RxDComplete = 0x00008000, RxDError = 0x4000, + /* See boomerang_rx() for actual error bits */ +}; + +struct boom_tx_desc { + u32 next; + s32 status; + u32 addr; + s32 length; +}; + +struct corkscrew_private { + const char *product_name; + struct list_head list; + struct net_device *our_dev; + /* The Rx and Tx rings are here to keep them quad-word-aligned. */ + struct boom_rx_desc rx_ring[RX_RING_SIZE]; + struct boom_tx_desc tx_ring[TX_RING_SIZE]; + /* The addresses of transmit- and receive-in-place skbuffs. */ + struct sk_buff *rx_skbuff[RX_RING_SIZE]; + struct sk_buff *tx_skbuff[TX_RING_SIZE]; + unsigned int cur_rx, cur_tx; /* The next free ring entry */ + unsigned int dirty_rx, dirty_tx;/* The ring entries to be free()ed. */ + struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ + struct timer_list timer; /* Media selection timer. */ + int capabilities ; /* Adapter capabilities word. */ + int options; /* User-settable misc. driver options. */ + int last_rx_packets; /* For media autoselection. */ + unsigned int available_media:8, /* From Wn3_Options */ + media_override:3, /* Passed-in media type. */ + default_media:3, /* Read from the EEPROM. */ + full_duplex:1, autoselect:1, bus_master:1, /* Vortex can only do a fragment bus-m. */ + full_bus_master_tx:1, full_bus_master_rx:1, /* Boomerang */ + tx_full:1; + spinlock_t lock; + struct device *dev; +}; + +/* The action to take with a media selection timer tick. + Note that we deviate from the 3Com order by checking 10base2 before AUI. + */ +enum xcvr_types { + XCVR_10baseT = 0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx, + XCVR_100baseFx, XCVR_MII = 6, XCVR_Default = 8, +}; + +static struct media_table { + char *name; + unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */ + mask:8, /* The transceiver-present bit in Wn3_Config. */ + next:8; /* The media type to try next. */ + short wait; /* Time before we check media status. */ +} media_tbl[] = { + { "10baseT", Media_10TP, 0x08, XCVR_10base2, (14 * HZ) / 10 }, + { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1 * HZ) / 10}, + { "undefined", 0, 0x80, XCVR_10baseT, 10000}, + { "10base2", 0, 0x10, XCVR_AUI, (1 * HZ) / 10}, + { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14 * HZ) / 10}, + { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14 * HZ) / 10}, + { "MII", 0, 0x40, XCVR_10baseT, 3 * HZ}, + { "undefined", 0, 0x01, XCVR_10baseT, 10000}, + { "Default", 0, 0xFF, XCVR_10baseT, 10000}, +}; + +#ifdef __ISAPNP__ +static struct isapnp_device_id corkscrew_isapnp_adapters[] = { + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, + ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5051), + (long) "3Com Fast EtherLink ISA" }, + { } /* terminate list */ +}; + +MODULE_DEVICE_TABLE(isapnp, corkscrew_isapnp_adapters); + +static int nopnp; +#endif /* __ISAPNP__ */ + +static struct net_device *corkscrew_scan(int unit); +static int corkscrew_setup(struct net_device *dev, int ioaddr, + struct pnp_dev *idev, int card_number); +static int corkscrew_open(struct net_device *dev); +static void corkscrew_timer(unsigned long arg); +static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static int corkscrew_rx(struct net_device *dev); +static void corkscrew_timeout(struct net_device *dev); +static int boomerang_rx(struct net_device *dev); +static irqreturn_t corkscrew_interrupt(int irq, void *dev_id); +static int corkscrew_close(struct net_device *dev); +static void update_stats(int addr, struct net_device *dev); +static struct net_device_stats *corkscrew_get_stats(struct net_device *dev); +static void set_rx_mode(struct net_device *dev); +static const struct ethtool_ops netdev_ethtool_ops; + + +/* + Unfortunately maximizing the shared code between the integrated and + module version of the driver results in a complicated set of initialization + procedures. + init_module() -- modules / tc59x_init() -- built-in + The wrappers for corkscrew_scan() + corkscrew_scan() The common routine that scans for PCI and EISA cards + corkscrew_found_device() Allocate a device structure when we find a card. + Different versions exist for modules and built-in. + corkscrew_probe1() Fill in the device structure -- this is separated + so that the modules code can put it in dev->init. +*/ +/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */ +/* Note: this is the only limit on the number of cards supported!! */ +static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1, }; + +#ifdef MODULE +static int debug = -1; + +module_param(debug, int, 0); +module_param_array(options, int, NULL, 0); +module_param(rx_copybreak, int, 0); +module_param(max_interrupt_work, int, 0); +MODULE_PARM_DESC(debug, "3c515 debug level (0-6)"); +MODULE_PARM_DESC(options, "3c515: Bits 0-2: media type, bit 3: full duplex, bit 4: bus mastering"); +MODULE_PARM_DESC(rx_copybreak, "3c515 copy breakpoint for copy-only-tiny-frames"); +MODULE_PARM_DESC(max_interrupt_work, "3c515 maximum events handled per interrupt"); + +/* A list of all installed Vortex devices, for removing the driver module. */ +/* we will need locking (and refcounting) if we ever use it for more */ +static LIST_HEAD(root_corkscrew_dev); + +int init_module(void) +{ + int found = 0; + if (debug >= 0) + corkscrew_debug = debug; + if (corkscrew_debug) + pr_debug("%s", version); + while (corkscrew_scan(-1)) + found++; + return found ? 0 : -ENODEV; +} + +#else +struct net_device *tc515_probe(int unit) +{ + struct net_device *dev = corkscrew_scan(unit); + static int printed; + + if (!dev) + return ERR_PTR(-ENODEV); + + if (corkscrew_debug > 0 && !printed) { + printed = 1; + pr_debug("%s", version); + } + + return dev; +} +#endif /* not MODULE */ + +static int check_device(unsigned ioaddr) +{ + int timer; + + if (!request_region(ioaddr, CORKSCREW_TOTAL_SIZE, "3c515")) + return 0; + /* Check the resource configuration for a matching ioaddr. */ + if ((inw(ioaddr + 0x2002) & 0x1f0) != (ioaddr & 0x1f0)) { + release_region(ioaddr, CORKSCREW_TOTAL_SIZE); + return 0; + } + /* Verify by reading the device ID from the EEPROM. */ + outw(EEPROM_Read + 7, ioaddr + Wn0EepromCmd); + /* Pause for at least 162 us. for the read to take place. */ + for (timer = 4; timer >= 0; timer--) { + udelay(162); + if ((inw(ioaddr + Wn0EepromCmd) & 0x0200) == 0) + break; + } + if (inw(ioaddr + Wn0EepromData) != 0x6d50) { + release_region(ioaddr, CORKSCREW_TOTAL_SIZE); + return 0; + } + return 1; +} + +static void cleanup_card(struct net_device *dev) +{ + struct corkscrew_private *vp = netdev_priv(dev); + list_del_init(&vp->list); + if (dev->dma) + free_dma(dev->dma); + outw(TotalReset, dev->base_addr + EL3_CMD); + release_region(dev->base_addr, CORKSCREW_TOTAL_SIZE); + if (vp->dev) + pnp_device_detach(to_pnp_dev(vp->dev)); +} + +static struct net_device *corkscrew_scan(int unit) +{ + struct net_device *dev; + static int cards_found = 0; + static int ioaddr; + int err; +#ifdef __ISAPNP__ + short i; + static int pnp_cards; +#endif + + dev = alloc_etherdev(sizeof(struct corkscrew_private)); + if (!dev) + return ERR_PTR(-ENOMEM); + + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + } + +#ifdef __ISAPNP__ + if(nopnp == 1) + goto no_pnp; + for(i=0; corkscrew_isapnp_adapters[i].vendor != 0; i++) { + struct pnp_dev *idev = NULL; + int irq; + while((idev = pnp_find_dev(NULL, + corkscrew_isapnp_adapters[i].vendor, + corkscrew_isapnp_adapters[i].function, + idev))) { + + if (pnp_device_attach(idev) < 0) + continue; + if (pnp_activate_dev(idev) < 0) { + pr_warn("pnp activate failed (out of resources?)\n"); + pnp_device_detach(idev); + continue; + } + if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0)) { + pnp_device_detach(idev); + continue; + } + ioaddr = pnp_port_start(idev, 0); + irq = pnp_irq(idev, 0); + if (!check_device(ioaddr)) { + pnp_device_detach(idev); + continue; + } + if(corkscrew_debug) + pr_debug("ISAPNP reports %s at i/o 0x%x, irq %d\n", + (char*) corkscrew_isapnp_adapters[i].driver_data, ioaddr, irq); + pr_info("3c515 Resource configuration register %#4.4x, DCR %4.4x.\n", + inl(ioaddr + 0x2002), inw(ioaddr + 0x2000)); + /* irq = inw(ioaddr + 0x2002) & 15; */ /* Use the irq from isapnp */ + SET_NETDEV_DEV(dev, &idev->dev); + pnp_cards++; + err = corkscrew_setup(dev, ioaddr, idev, cards_found++); + if (!err) + return dev; + cleanup_card(dev); + } + } +no_pnp: +#endif /* __ISAPNP__ */ + + /* Check all locations on the ISA bus -- evil! */ + for (ioaddr = 0x100; ioaddr < 0x400; ioaddr += 0x20) { + if (!check_device(ioaddr)) + continue; + + pr_info("3c515 Resource configuration register %#4.4x, DCR %4.4x.\n", + inl(ioaddr + 0x2002), inw(ioaddr + 0x2000)); + err = corkscrew_setup(dev, ioaddr, NULL, cards_found++); + if (!err) + return dev; + cleanup_card(dev); + } + free_netdev(dev); + return NULL; +} + + +static const struct net_device_ops netdev_ops = { + .ndo_open = corkscrew_open, + .ndo_stop = corkscrew_close, + .ndo_start_xmit = corkscrew_start_xmit, + .ndo_tx_timeout = corkscrew_timeout, + .ndo_get_stats = corkscrew_get_stats, + .ndo_set_rx_mode = set_rx_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + + +static int corkscrew_setup(struct net_device *dev, int ioaddr, + struct pnp_dev *idev, int card_number) +{ + struct corkscrew_private *vp = netdev_priv(dev); + unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ + int i; + int irq; + +#ifdef __ISAPNP__ + if (idev) { + irq = pnp_irq(idev, 0); + vp->dev = &idev->dev; + } else { + irq = inw(ioaddr + 0x2002) & 15; + } +#else + irq = inw(ioaddr + 0x2002) & 15; +#endif + + dev->base_addr = ioaddr; + dev->irq = irq; + dev->dma = inw(ioaddr + 0x2000) & 7; + vp->product_name = "3c515"; + vp->options = dev->mem_start; + vp->our_dev = dev; + + if (!vp->options) { + if (card_number >= MAX_UNITS) + vp->options = -1; + else + vp->options = options[card_number]; + } + + if (vp->options >= 0) { + vp->media_override = vp->options & 7; + if (vp->media_override == 2) + vp->media_override = 0; + vp->full_duplex = (vp->options & 8) ? 1 : 0; + vp->bus_master = (vp->options & 16) ? 1 : 0; + } else { + vp->media_override = 7; + vp->full_duplex = 0; + vp->bus_master = 0; + } +#ifdef MODULE + list_add(&vp->list, &root_corkscrew_dev); +#endif + + pr_info("%s: 3Com %s at %#3x,", dev->name, vp->product_name, ioaddr); + + spin_lock_init(&vp->lock); + + /* Read the station address from the EEPROM. */ + EL3WINDOW(0); + for (i = 0; i < 0x18; i++) { + __be16 *phys_addr = (__be16 *) dev->dev_addr; + int timer; + outw(EEPROM_Read + i, ioaddr + Wn0EepromCmd); + /* Pause for at least 162 us. for the read to take place. */ + for (timer = 4; timer >= 0; timer--) { + udelay(162); + if ((inw(ioaddr + Wn0EepromCmd) & 0x0200) == 0) + break; + } + eeprom[i] = inw(ioaddr + Wn0EepromData); + checksum ^= eeprom[i]; + if (i < 3) + phys_addr[i] = htons(eeprom[i]); + } + checksum = (checksum ^ (checksum >> 8)) & 0xff; + if (checksum != 0x00) + pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum); + pr_cont(" %pM", dev->dev_addr); + if (eeprom[16] == 0x11c7) { /* Corkscrew */ + if (request_dma(dev->dma, "3c515")) { + pr_cont(", DMA %d allocation failed", dev->dma); + dev->dma = 0; + } else + pr_cont(", DMA %d", dev->dma); + } + pr_cont(", IRQ %d\n", dev->irq); + /* Tell them about an invalid IRQ. */ + if (corkscrew_debug && (dev->irq <= 0 || dev->irq > 15)) + pr_warn(" *** Warning: this IRQ is unlikely to work! ***\n"); + + { + static const char * const ram_split[] = { + "5:3", "3:1", "1:1", "3:5" + }; + __u32 config; + EL3WINDOW(3); + vp->available_media = inw(ioaddr + Wn3_Options); + config = inl(ioaddr + Wn3_Config); + if (corkscrew_debug > 1) + pr_info(" Internal config register is %4.4x, transceivers %#x.\n", + config, inw(ioaddr + Wn3_Options)); + pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n", + 8 << config & Ram_size, + config & Ram_width ? "word" : "byte", + ram_split[(config & Ram_split) >> Ram_split_shift], + config & Autoselect ? "autoselect/" : "", + media_tbl[(config & Xcvr) >> Xcvr_shift].name); + vp->default_media = (config & Xcvr) >> Xcvr_shift; + vp->autoselect = config & Autoselect ? 1 : 0; + dev->if_port = vp->default_media; + } + if (vp->media_override != 7) { + pr_info(" Media override to transceiver type %d (%s).\n", + vp->media_override, + media_tbl[vp->media_override].name); + dev->if_port = vp->media_override; + } + + vp->capabilities = eeprom[16]; + vp->full_bus_master_tx = (vp->capabilities & 0x20) ? 1 : 0; + /* Rx is broken at 10mbps, so we always disable it. */ + /* vp->full_bus_master_rx = 0; */ + vp->full_bus_master_rx = (vp->capabilities & 0x20) ? 1 : 0; + + /* The 3c51x-specific entries in the device structure. */ + dev->netdev_ops = &netdev_ops; + dev->watchdog_timeo = (400 * HZ) / 1000; + dev->ethtool_ops = &netdev_ethtool_ops; + + return register_netdev(dev); +} + + +static int corkscrew_open(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + struct corkscrew_private *vp = netdev_priv(dev); + __u32 config; + int i; + + /* Before initializing select the active media port. */ + EL3WINDOW(3); + if (vp->full_duplex) + outb(0x20, ioaddr + Wn3_MAC_Ctrl); /* Set the full-duplex bit. */ + config = inl(ioaddr + Wn3_Config); + + if (vp->media_override != 7) { + if (corkscrew_debug > 1) + pr_info("%s: Media override to transceiver %d (%s).\n", + dev->name, vp->media_override, + media_tbl[vp->media_override].name); + dev->if_port = vp->media_override; + } else if (vp->autoselect) { + /* Find first available media type, starting with 100baseTx. */ + dev->if_port = 4; + while (!(vp->available_media & media_tbl[dev->if_port].mask)) + dev->if_port = media_tbl[dev->if_port].next; + + if (corkscrew_debug > 1) + pr_debug("%s: Initial media type %s.\n", + dev->name, media_tbl[dev->if_port].name); + + init_timer(&vp->timer); + vp->timer.expires = jiffies + media_tbl[dev->if_port].wait; + vp->timer.data = (unsigned long) dev; + vp->timer.function = corkscrew_timer; /* timer handler */ + add_timer(&vp->timer); + } else + dev->if_port = vp->default_media; + + config = (config & ~Xcvr) | (dev->if_port << Xcvr_shift); + outl(config, ioaddr + Wn3_Config); + + if (corkscrew_debug > 1) { + pr_debug("%s: corkscrew_open() InternalConfig %8.8x.\n", + dev->name, config); + } + + outw(TxReset, ioaddr + EL3_CMD); + for (i = 20; i >= 0; i--) + if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) + break; + + outw(RxReset, ioaddr + EL3_CMD); + /* Wait a few ticks for the RxReset command to complete. */ + for (i = 20; i >= 0; i--) + if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) + break; + + outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD); + + /* Use the now-standard shared IRQ implementation. */ + if (vp->capabilities == 0x11c7) { + /* Corkscrew: Cannot share ISA resources. */ + if (dev->irq == 0 || + dev->dma == 0 || + request_irq(dev->irq, corkscrew_interrupt, 0, + vp->product_name, dev)) + return -EAGAIN; + enable_dma(dev->dma); + set_dma_mode(dev->dma, DMA_MODE_CASCADE); + } else if (request_irq(dev->irq, corkscrew_interrupt, IRQF_SHARED, + vp->product_name, dev)) { + return -EAGAIN; + } + + if (corkscrew_debug > 1) { + EL3WINDOW(4); + pr_debug("%s: corkscrew_open() irq %d media status %4.4x.\n", + dev->name, dev->irq, inw(ioaddr + Wn4_Media)); + } + + /* Set the station address and mask in window 2 each time opened. */ + EL3WINDOW(2); + for (i = 0; i < 6; i++) + outb(dev->dev_addr[i], ioaddr + i); + for (; i < 12; i += 2) + outw(0, ioaddr + i); + + if (dev->if_port == 3) + /* Start the thinnet transceiver. We should really wait 50ms... */ + outw(StartCoax, ioaddr + EL3_CMD); + EL3WINDOW(4); + outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP | Media_SQE)) | + media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media); + + /* Switch to the stats window, and clear all stats by reading. */ + outw(StatsDisable, ioaddr + EL3_CMD); + EL3WINDOW(6); + for (i = 0; i < 10; i++) + inb(ioaddr + i); + inw(ioaddr + 10); + inw(ioaddr + 12); + /* New: On the Vortex we must also clear the BadSSD counter. */ + EL3WINDOW(4); + inb(ioaddr + 12); + /* ..and on the Boomerang we enable the extra statistics bits. */ + outw(0x0040, ioaddr + Wn4_NetDiag); + + /* Switch to register set 7 for normal use. */ + EL3WINDOW(7); + + if (vp->full_bus_master_rx) { /* Boomerang bus master. */ + vp->cur_rx = vp->dirty_rx = 0; + if (corkscrew_debug > 2) + pr_debug("%s: Filling in the Rx ring.\n", dev->name); + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb; + if (i < (RX_RING_SIZE - 1)) + vp->rx_ring[i].next = + isa_virt_to_bus(&vp->rx_ring[i + 1]); + else + vp->rx_ring[i].next = 0; + vp->rx_ring[i].status = 0; /* Clear complete bit. */ + vp->rx_ring[i].length = PKT_BUF_SZ | 0x80000000; + skb = netdev_alloc_skb(dev, PKT_BUF_SZ); + vp->rx_skbuff[i] = skb; + if (skb == NULL) + break; /* Bad news! */ + skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + vp->rx_ring[i].addr = isa_virt_to_bus(skb->data); + } + if (i != 0) + vp->rx_ring[i - 1].next = + isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */ + outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr); + } + if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ + vp->cur_tx = vp->dirty_tx = 0; + outb(PKT_BUF_SZ >> 8, ioaddr + TxFreeThreshold); /* Room for a packet. */ + /* Clear the Tx ring. */ + for (i = 0; i < TX_RING_SIZE; i++) + vp->tx_skbuff[i] = NULL; + outl(0, ioaddr + DownListPtr); + } + /* Set receiver mode: presumably accept b-case and phys addr only. */ + set_rx_mode(dev); + outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ + + netif_start_queue(dev); + + outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ + outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ + /* Allow status bits to be seen. */ + outw(SetStatusEnb | AdapterFailure | IntReq | StatsFull | + (vp->full_bus_master_tx ? DownComplete : TxAvailable) | + (vp->full_bus_master_rx ? UpComplete : RxComplete) | + (vp->bus_master ? DMADone : 0), ioaddr + EL3_CMD); + /* Ack all pending events, and set active indicator mask. */ + outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, + ioaddr + EL3_CMD); + outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull + | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete, + ioaddr + EL3_CMD); + + return 0; +} + +static void corkscrew_timer(unsigned long data) +{ +#ifdef AUTOMEDIA + struct net_device *dev = (struct net_device *) data; + struct corkscrew_private *vp = netdev_priv(dev); + int ioaddr = dev->base_addr; + unsigned long flags; + int ok = 0; + + if (corkscrew_debug > 1) + pr_debug("%s: Media selection timer tick happened, %s.\n", + dev->name, media_tbl[dev->if_port].name); + + spin_lock_irqsave(&vp->lock, flags); + + { + int old_window = inw(ioaddr + EL3_CMD) >> 13; + int media_status; + EL3WINDOW(4); + media_status = inw(ioaddr + Wn4_Media); + switch (dev->if_port) { + case 0: + case 4: + case 5: /* 10baseT, 100baseTX, 100baseFX */ + if (media_status & Media_LnkBeat) { + ok = 1; + if (corkscrew_debug > 1) + pr_debug("%s: Media %s has link beat, %x.\n", + dev->name, + media_tbl[dev->if_port].name, + media_status); + } else if (corkscrew_debug > 1) + pr_debug("%s: Media %s is has no link beat, %x.\n", + dev->name, + media_tbl[dev->if_port].name, + media_status); + + break; + default: /* Other media types handled by Tx timeouts. */ + if (corkscrew_debug > 1) + pr_debug("%s: Media %s is has no indication, %x.\n", + dev->name, + media_tbl[dev->if_port].name, + media_status); + ok = 1; + } + if (!ok) { + __u32 config; + + do { + dev->if_port = + media_tbl[dev->if_port].next; + } + while (!(vp->available_media & media_tbl[dev->if_port].mask)); + + if (dev->if_port == 8) { /* Go back to default. */ + dev->if_port = vp->default_media; + if (corkscrew_debug > 1) + pr_debug("%s: Media selection failing, using default %s port.\n", + dev->name, + media_tbl[dev->if_port].name); + } else { + if (corkscrew_debug > 1) + pr_debug("%s: Media selection failed, now trying %s port.\n", + dev->name, + media_tbl[dev->if_port].name); + vp->timer.expires = jiffies + media_tbl[dev->if_port].wait; + add_timer(&vp->timer); + } + outw((media_status & ~(Media_10TP | Media_SQE)) | + media_tbl[dev->if_port].media_bits, + ioaddr + Wn4_Media); + + EL3WINDOW(3); + config = inl(ioaddr + Wn3_Config); + config = (config & ~Xcvr) | (dev->if_port << Xcvr_shift); + outl(config, ioaddr + Wn3_Config); + + outw(dev->if_port == 3 ? StartCoax : StopCoax, + ioaddr + EL3_CMD); + } + EL3WINDOW(old_window); + } + + spin_unlock_irqrestore(&vp->lock, flags); + if (corkscrew_debug > 1) + pr_debug("%s: Media selection timer finished, %s.\n", + dev->name, media_tbl[dev->if_port].name); + +#endif /* AUTOMEDIA */ +} + +static void corkscrew_timeout(struct net_device *dev) +{ + int i; + struct corkscrew_private *vp = netdev_priv(dev); + int ioaddr = dev->base_addr; + + pr_warn("%s: transmit timed out, tx_status %2.2x status %4.4x\n", + dev->name, inb(ioaddr + TxStatus), + inw(ioaddr + EL3_STATUS)); + /* Slight code bloat to be user friendly. */ + if ((inb(ioaddr + TxStatus) & 0x88) == 0x88) + pr_warn("%s: Transmitter encountered 16 collisions -- network cable problem?\n", + dev->name); +#ifndef final_version + pr_debug(" Flags; bus-master %d, full %d; dirty %d current %d.\n", + vp->full_bus_master_tx, vp->tx_full, vp->dirty_tx, + vp->cur_tx); + pr_debug(" Down list %8.8x vs. %p.\n", inl(ioaddr + DownListPtr), + &vp->tx_ring[0]); + for (i = 0; i < TX_RING_SIZE; i++) { + pr_debug(" %d: %p length %8.8x status %8.8x\n", i, + &vp->tx_ring[i], + vp->tx_ring[i].length, vp->tx_ring[i].status); + } +#endif + /* Issue TX_RESET and TX_START commands. */ + outw(TxReset, ioaddr + EL3_CMD); + for (i = 20; i >= 0; i--) + if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) + break; + outw(TxEnable, ioaddr + EL3_CMD); + dev->trans_start = jiffies; /* prevent tx timeout */ + dev->stats.tx_errors++; + dev->stats.tx_dropped++; + netif_wake_queue(dev); +} + +static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct corkscrew_private *vp = netdev_priv(dev); + int ioaddr = dev->base_addr; + + /* Block a timer-based transmit from overlapping. */ + + netif_stop_queue(dev); + + if (vp->full_bus_master_tx) { /* BOOMERANG bus-master */ + /* Calculate the next Tx descriptor entry. */ + int entry = vp->cur_tx % TX_RING_SIZE; + struct boom_tx_desc *prev_entry; + unsigned long flags; + int i; + + if (vp->tx_full) /* No room to transmit with */ + return NETDEV_TX_BUSY; + if (vp->cur_tx != 0) + prev_entry = &vp->tx_ring[(vp->cur_tx - 1) % TX_RING_SIZE]; + else + prev_entry = NULL; + if (corkscrew_debug > 3) + pr_debug("%s: Trying to send a packet, Tx index %d.\n", + dev->name, vp->cur_tx); + /* vp->tx_full = 1; */ + vp->tx_skbuff[entry] = skb; + vp->tx_ring[entry].next = 0; + vp->tx_ring[entry].addr = isa_virt_to_bus(skb->data); + vp->tx_ring[entry].length = skb->len | 0x80000000; + vp->tx_ring[entry].status = skb->len | 0x80000000; + + spin_lock_irqsave(&vp->lock, flags); + outw(DownStall, ioaddr + EL3_CMD); + /* Wait for the stall to complete. */ + for (i = 20; i >= 0; i--) + if ((inw(ioaddr + EL3_STATUS) & CmdInProgress) == 0) + break; + if (prev_entry) + prev_entry->next = isa_virt_to_bus(&vp->tx_ring[entry]); + if (inl(ioaddr + DownListPtr) == 0) { + outl(isa_virt_to_bus(&vp->tx_ring[entry]), + ioaddr + DownListPtr); + queued_packet++; + } + outw(DownUnstall, ioaddr + EL3_CMD); + spin_unlock_irqrestore(&vp->lock, flags); + + vp->cur_tx++; + if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) + vp->tx_full = 1; + else { /* Clear previous interrupt enable. */ + if (prev_entry) + prev_entry->status &= ~0x80000000; + netif_wake_queue(dev); + } + return NETDEV_TX_OK; + } + /* Put out the doubleword header... */ + outl(skb->len, ioaddr + TX_FIFO); + dev->stats.tx_bytes += skb->len; +#ifdef VORTEX_BUS_MASTER + if (vp->bus_master) { + /* Set the bus-master controller to transfer the packet. */ + outl((int) (skb->data), ioaddr + Wn7_MasterAddr); + outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); + vp->tx_skb = skb; + outw(StartDMADown, ioaddr + EL3_CMD); + /* queue will be woken at the DMADone interrupt. */ + } else { + /* ... and the packet rounded to a doubleword. */ + outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); + dev_kfree_skb(skb); + if (inw(ioaddr + TxFree) > 1536) { + netif_wake_queue(dev); + } else + /* Interrupt us when the FIFO has room for max-sized packet. */ + outw(SetTxThreshold + (1536 >> 2), + ioaddr + EL3_CMD); + } +#else + /* ... and the packet rounded to a doubleword. */ + outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); + dev_kfree_skb(skb); + if (inw(ioaddr + TxFree) > 1536) { + netif_wake_queue(dev); + } else + /* Interrupt us when the FIFO has room for max-sized packet. */ + outw(SetTxThreshold + (1536 >> 2), ioaddr + EL3_CMD); +#endif /* bus master */ + + + /* Clear the Tx status stack. */ + { + short tx_status; + int i = 4; + + while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) { + if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */ + if (corkscrew_debug > 2) + pr_debug("%s: Tx error, status %2.2x.\n", + dev->name, tx_status); + if (tx_status & 0x04) + dev->stats.tx_fifo_errors++; + if (tx_status & 0x38) + dev->stats.tx_aborted_errors++; + if (tx_status & 0x30) { + int j; + outw(TxReset, ioaddr + EL3_CMD); + for (j = 20; j >= 0; j--) + if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) + break; + } + outw(TxEnable, ioaddr + EL3_CMD); + } + outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */ + } + } + return NETDEV_TX_OK; +} + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ + +static irqreturn_t corkscrew_interrupt(int irq, void *dev_id) +{ + /* Use the now-standard shared IRQ implementation. */ + struct net_device *dev = dev_id; + struct corkscrew_private *lp = netdev_priv(dev); + int ioaddr, status; + int latency; + int i = max_interrupt_work; + + ioaddr = dev->base_addr; + latency = inb(ioaddr + Timer); + + spin_lock(&lp->lock); + + status = inw(ioaddr + EL3_STATUS); + + if (corkscrew_debug > 4) + pr_debug("%s: interrupt, status %4.4x, timer %d.\n", + dev->name, status, latency); + if ((status & 0xE000) != 0xE000) { + static int donedidthis; + /* Some interrupt controllers store a bogus interrupt from boot-time. + Ignore a single early interrupt, but don't hang the machine for + other interrupt problems. */ + if (donedidthis++ > 100) { + pr_err("%s: Bogus interrupt, bailing. Status %4.4x, start=%d.\n", + dev->name, status, netif_running(dev)); + free_irq(dev->irq, dev); + dev->irq = -1; + } + } + + do { + if (corkscrew_debug > 5) + pr_debug("%s: In interrupt loop, status %4.4x.\n", + dev->name, status); + if (status & RxComplete) + corkscrew_rx(dev); + + if (status & TxAvailable) { + if (corkscrew_debug > 5) + pr_debug(" TX room bit was handled.\n"); + /* There's room in the FIFO for a full-sized packet. */ + outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); + netif_wake_queue(dev); + } + if (status & DownComplete) { + unsigned int dirty_tx = lp->dirty_tx; + + while (lp->cur_tx - dirty_tx > 0) { + int entry = dirty_tx % TX_RING_SIZE; + if (inl(ioaddr + DownListPtr) == isa_virt_to_bus(&lp->tx_ring[entry])) + break; /* It still hasn't been processed. */ + if (lp->tx_skbuff[entry]) { + dev_kfree_skb_irq(lp->tx_skbuff[entry]); + lp->tx_skbuff[entry] = NULL; + } + dirty_tx++; + } + lp->dirty_tx = dirty_tx; + outw(AckIntr | DownComplete, ioaddr + EL3_CMD); + if (lp->tx_full && (lp->cur_tx - dirty_tx <= TX_RING_SIZE - 1)) { + lp->tx_full = 0; + netif_wake_queue(dev); + } + } +#ifdef VORTEX_BUS_MASTER + if (status & DMADone) { + outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ + dev_kfree_skb_irq(lp->tx_skb); /* Release the transferred buffer */ + netif_wake_queue(dev); + } +#endif + if (status & UpComplete) { + boomerang_rx(dev); + outw(AckIntr | UpComplete, ioaddr + EL3_CMD); + } + if (status & (AdapterFailure | RxEarly | StatsFull)) { + /* Handle all uncommon interrupts at once. */ + if (status & RxEarly) { /* Rx early is unused. */ + corkscrew_rx(dev); + outw(AckIntr | RxEarly, ioaddr + EL3_CMD); + } + if (status & StatsFull) { /* Empty statistics. */ + static int DoneDidThat; + if (corkscrew_debug > 4) + pr_debug("%s: Updating stats.\n", dev->name); + update_stats(ioaddr, dev); + /* DEBUG HACK: Disable statistics as an interrupt source. */ + /* This occurs when we have the wrong media type! */ + if (DoneDidThat == 0 && inw(ioaddr + EL3_STATUS) & StatsFull) { + int win, reg; + pr_notice("%s: Updating stats failed, disabling stats as an interrupt source.\n", + dev->name); + for (win = 0; win < 8; win++) { + EL3WINDOW(win); + pr_notice("Vortex window %d:", win); + for (reg = 0; reg < 16; reg++) + pr_cont(" %2.2x", inb(ioaddr + reg)); + pr_cont("\n"); + } + EL3WINDOW(7); + outw(SetIntrEnb | TxAvailable | + RxComplete | AdapterFailure | + UpComplete | DownComplete | + TxComplete, ioaddr + EL3_CMD); + DoneDidThat++; + } + } + if (status & AdapterFailure) { + /* Adapter failure requires Rx reset and reinit. */ + outw(RxReset, ioaddr + EL3_CMD); + /* Set the Rx filter to the current state. */ + set_rx_mode(dev); + outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */ + outw(AckIntr | AdapterFailure, + ioaddr + EL3_CMD); + } + } + + if (--i < 0) { + pr_err("%s: Too much work in interrupt, status %4.4x. Disabling functions (%4.4x).\n", + dev->name, status, SetStatusEnb | ((~status) & 0x7FE)); + /* Disable all pending interrupts. */ + outw(SetStatusEnb | ((~status) & 0x7FE), ioaddr + EL3_CMD); + outw(AckIntr | 0x7FF, ioaddr + EL3_CMD); + break; + } + /* Acknowledge the IRQ. */ + outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); + + } while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete)); + + spin_unlock(&lp->lock); + + if (corkscrew_debug > 4) + pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, status); + return IRQ_HANDLED; +} + +static int corkscrew_rx(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + int i; + short rx_status; + + if (corkscrew_debug > 5) + pr_debug(" In rx_packet(), status %4.4x, rx_status %4.4x.\n", + inw(ioaddr + EL3_STATUS), inw(ioaddr + RxStatus)); + while ((rx_status = inw(ioaddr + RxStatus)) > 0) { + if (rx_status & 0x4000) { /* Error, update stats. */ + unsigned char rx_error = inb(ioaddr + RxErrors); + if (corkscrew_debug > 2) + pr_debug(" Rx error: status %2.2x.\n", + rx_error); + dev->stats.rx_errors++; + if (rx_error & 0x01) + dev->stats.rx_over_errors++; + if (rx_error & 0x02) + dev->stats.rx_length_errors++; + if (rx_error & 0x04) + dev->stats.rx_frame_errors++; + if (rx_error & 0x08) + dev->stats.rx_crc_errors++; + if (rx_error & 0x10) + dev->stats.rx_length_errors++; + } else { + /* The packet length: up to 4.5K!. */ + short pkt_len = rx_status & 0x1fff; + struct sk_buff *skb; + + skb = netdev_alloc_skb(dev, pkt_len + 5 + 2); + if (corkscrew_debug > 4) + pr_debug("Receiving packet size %d status %4.4x.\n", + pkt_len, rx_status); + if (skb != NULL) { + skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + /* 'skb_put()' points to the start of sk_buff data area. */ + insl(ioaddr + RX_FIFO, + skb_put(skb, pkt_len), + (pkt_len + 3) >> 2); + outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + /* Wait a limited time to go to next packet. */ + for (i = 200; i >= 0; i--) + if (! (inw(ioaddr + EL3_STATUS) & CmdInProgress)) + break; + continue; + } else if (corkscrew_debug) + pr_debug("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, pkt_len); + } + outw(RxDiscard, ioaddr + EL3_CMD); + dev->stats.rx_dropped++; + /* Wait a limited time to skip this packet. */ + for (i = 200; i >= 0; i--) + if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) + break; + } + return 0; +} + +static int boomerang_rx(struct net_device *dev) +{ + struct corkscrew_private *vp = netdev_priv(dev); + int entry = vp->cur_rx % RX_RING_SIZE; + int ioaddr = dev->base_addr; + int rx_status; + + if (corkscrew_debug > 5) + pr_debug(" In boomerang_rx(), status %4.4x, rx_status %4.4x.\n", + inw(ioaddr + EL3_STATUS), inw(ioaddr + RxStatus)); + while ((rx_status = vp->rx_ring[entry].status) & RxDComplete) { + if (rx_status & RxDError) { /* Error, update stats. */ + unsigned char rx_error = rx_status >> 16; + if (corkscrew_debug > 2) + pr_debug(" Rx error: status %2.2x.\n", + rx_error); + dev->stats.rx_errors++; + if (rx_error & 0x01) + dev->stats.rx_over_errors++; + if (rx_error & 0x02) + dev->stats.rx_length_errors++; + if (rx_error & 0x04) + dev->stats.rx_frame_errors++; + if (rx_error & 0x08) + dev->stats.rx_crc_errors++; + if (rx_error & 0x10) + dev->stats.rx_length_errors++; + } else { + /* The packet length: up to 4.5K!. */ + short pkt_len = rx_status & 0x1fff; + struct sk_buff *skb; + + dev->stats.rx_bytes += pkt_len; + if (corkscrew_debug > 4) + pr_debug("Receiving packet size %d status %4.4x.\n", + pkt_len, rx_status); + + /* Check if the packet is long enough to just accept without + copying to a properly sized skbuff. */ + if (pkt_len < rx_copybreak && + (skb = netdev_alloc_skb(dev, pkt_len + 4)) != NULL) { + skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + /* 'skb_put()' points to the start of sk_buff data area. */ + memcpy(skb_put(skb, pkt_len), + isa_bus_to_virt(vp->rx_ring[entry]. + addr), pkt_len); + rx_copy++; + } else { + void *temp; + /* Pass up the skbuff already on the Rx ring. */ + skb = vp->rx_skbuff[entry]; + vp->rx_skbuff[entry] = NULL; + temp = skb_put(skb, pkt_len); + /* Remove this checking code for final release. */ + if (isa_bus_to_virt(vp->rx_ring[entry].addr) != temp) + pr_warn("%s: Warning -- the skbuff addresses do not match in boomerang_rx: %p vs. %p / %p\n", + dev->name, + isa_bus_to_virt(vp->rx_ring[entry].addr), + skb->head, temp); + rx_nocopy++; + } + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + } + entry = (++vp->cur_rx) % RX_RING_SIZE; + } + /* Refill the Rx ring buffers. */ + for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) { + struct sk_buff *skb; + entry = vp->dirty_rx % RX_RING_SIZE; + if (vp->rx_skbuff[entry] == NULL) { + skb = netdev_alloc_skb(dev, PKT_BUF_SZ); + if (skb == NULL) + break; /* Bad news! */ + skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + vp->rx_ring[entry].addr = isa_virt_to_bus(skb->data); + vp->rx_skbuff[entry] = skb; + } + vp->rx_ring[entry].status = 0; /* Clear complete bit. */ + } + return 0; +} + +static int corkscrew_close(struct net_device *dev) +{ + struct corkscrew_private *vp = netdev_priv(dev); + int ioaddr = dev->base_addr; + int i; + + netif_stop_queue(dev); + + if (corkscrew_debug > 1) { + pr_debug("%s: corkscrew_close() status %4.4x, Tx status %2.2x.\n", + dev->name, inw(ioaddr + EL3_STATUS), + inb(ioaddr + TxStatus)); + pr_debug("%s: corkscrew close stats: rx_nocopy %d rx_copy %d tx_queued %d.\n", + dev->name, rx_nocopy, rx_copy, queued_packet); + } + + del_timer(&vp->timer); + + /* Turn off statistics ASAP. We update lp->stats below. */ + outw(StatsDisable, ioaddr + EL3_CMD); + + /* Disable the receiver and transmitter. */ + outw(RxDisable, ioaddr + EL3_CMD); + outw(TxDisable, ioaddr + EL3_CMD); + + if (dev->if_port == XCVR_10base2) + /* Turn off thinnet power. Green! */ + outw(StopCoax, ioaddr + EL3_CMD); + + free_irq(dev->irq, dev); + + outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); + + update_stats(ioaddr, dev); + if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ + outl(0, ioaddr + UpListPtr); + for (i = 0; i < RX_RING_SIZE; i++) + if (vp->rx_skbuff[i]) { + dev_kfree_skb(vp->rx_skbuff[i]); + vp->rx_skbuff[i] = NULL; + } + } + if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */ + outl(0, ioaddr + DownListPtr); + for (i = 0; i < TX_RING_SIZE; i++) + if (vp->tx_skbuff[i]) { + dev_kfree_skb(vp->tx_skbuff[i]); + vp->tx_skbuff[i] = NULL; + } + } + + return 0; +} + +static struct net_device_stats *corkscrew_get_stats(struct net_device *dev) +{ + struct corkscrew_private *vp = netdev_priv(dev); + unsigned long flags; + + if (netif_running(dev)) { + spin_lock_irqsave(&vp->lock, flags); + update_stats(dev->base_addr, dev); + spin_unlock_irqrestore(&vp->lock, flags); + } + return &dev->stats; +} + +/* Update statistics. + Unlike with the EL3 we need not worry about interrupts changing + the window setting from underneath us, but we must still guard + against a race condition with a StatsUpdate interrupt updating the + table. This is done by checking that the ASM (!) code generated uses + atomic updates with '+='. + */ +static void update_stats(int ioaddr, struct net_device *dev) +{ + /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ + /* Switch to the stats window, and read everything. */ + EL3WINDOW(6); + dev->stats.tx_carrier_errors += inb(ioaddr + 0); + dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); + /* Multiple collisions. */ inb(ioaddr + 2); + dev->stats.collisions += inb(ioaddr + 3); + dev->stats.tx_window_errors += inb(ioaddr + 4); + dev->stats.rx_fifo_errors += inb(ioaddr + 5); + dev->stats.tx_packets += inb(ioaddr + 6); + dev->stats.tx_packets += (inb(ioaddr + 9) & 0x30) << 4; + /* Rx packets */ inb(ioaddr + 7); + /* Must read to clear */ + /* Tx deferrals */ inb(ioaddr + 8); + /* Don't bother with register 9, an extension of registers 6&7. + If we do use the 6&7 values the atomic update assumption above + is invalid. */ + inw(ioaddr + 10); /* Total Rx and Tx octets. */ + inw(ioaddr + 12); + /* New: On the Vortex we must also clear the BadSSD counter. */ + EL3WINDOW(4); + inb(ioaddr + 12); + + /* We change back to window 7 (not 1) with the Vortex. */ + EL3WINDOW(7); +} + +/* This new version of set_rx_mode() supports v1.4 kernels. + The Vortex chip has no documented multicast filter, so the only + multicast setting is to receive all multicast frames. At least + the chip has a very clean way to set the mode, unlike many others. */ +static void set_rx_mode(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + short new_mode; + + if (dev->flags & IFF_PROMISC) { + if (corkscrew_debug > 3) + pr_debug("%s: Setting promiscuous mode.\n", + dev->name); + new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm; + } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) { + new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast; + } else + new_mode = SetRxFilter | RxStation | RxBroadcast; + + outw(new_mode, ioaddr + EL3_CMD); +} + +static void netdev_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + snprintf(info->bus_info, sizeof(info->bus_info), "ISA 0x%lx", + dev->base_addr); +} + +static u32 netdev_get_msglevel(struct net_device *dev) +{ + return corkscrew_debug; +} + +static void netdev_set_msglevel(struct net_device *dev, u32 level) +{ + corkscrew_debug = level; +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, + .get_msglevel = netdev_get_msglevel, + .set_msglevel = netdev_set_msglevel, +}; + + +#ifdef MODULE +void cleanup_module(void) +{ + while (!list_empty(&root_corkscrew_dev)) { + struct net_device *dev; + struct corkscrew_private *vp; + + vp = list_entry(root_corkscrew_dev.next, + struct corkscrew_private, list); + dev = vp->our_dev; + unregister_netdev(dev); + cleanup_card(dev); + free_netdev(dev); + } +} +#endif /* MODULE */ diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c new file mode 100644 index 000000000..b9948f00c --- /dev/null +++ b/drivers/net/ethernet/3com/3c574_cs.c @@ -0,0 +1,1167 @@ +/* 3c574.c: A PCMCIA ethernet driver for the 3com 3c574 "RoadRunner". + + Written 1993-1998 by + Donald Becker, becker@scyld.com, (driver core) and + David Hinds, dahinds@users.sourceforge.net (from his PC card code). + Locking fixes (C) Copyright 2003 Red Hat Inc + + This software may be used and distributed according to the terms of + the GNU General Public License, incorporated herein by reference. + + This driver derives from Donald Becker's 3c509 core, which has the + following copyright: + Copyright 1993 United States Government as represented by the + Director, National Security Agency. + + +*/ + +/* + Theory of Operation + +I. Board Compatibility + +This device driver is designed for the 3Com 3c574 PC card Fast Ethernet +Adapter. + +II. Board-specific settings + +None -- PC cards are autoconfigured. + +III. Driver operation + +The 3c574 uses a Boomerang-style interface, without the bus-master capability. +See the Boomerang driver and documentation for most details. + +IV. Notes and chip documentation. + +Two added registers are used to enhance PIO performance, RunnerRdCtrl and +RunnerWrCtrl. These are 11 bit down-counters that are preloaded with the +count of word (16 bits) reads or writes the driver is about to do to the Rx +or Tx FIFO. The chip is then able to hide the internal-PCI-bus to PC-card +translation latency by buffering the I/O operations with an 8 word FIFO. +Note: No other chip accesses are permitted when this buffer is used. + +A second enhancement is that both attribute and common memory space +0x0800-0x0fff can translated to the PIO FIFO. Thus memory operations (faster +with *some* PCcard bridges) may be used instead of I/O operations. +This is enabled by setting the 0x10 bit in the PCMCIA LAN COR. + +Some slow PC card bridges work better if they never see a WAIT signal. +This is configured by setting the 0x20 bit in the PCMCIA LAN COR. +Only do this after testing that it is reliable and improves performance. + +The upper five bits of RunnerRdCtrl are used to window into PCcard +configuration space registers. Window 0 is the regular Boomerang/Odie +register set, 1-5 are various PC card control registers, and 16-31 are +the (reversed!) CIS table. + +A final note: writing the InternalConfig register in window 3 with an +invalid ramWidth is Very Bad. + +V. References + +http://www.scyld.com/expert/NWay.html +http://www.national.com/opf/DP/DP83840A.html + +Thanks to Terry Murphy of 3Com for providing development information for +earlier 3Com products. + +*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +/*====================================================================*/ + +/* Module parameters */ + +MODULE_AUTHOR("David Hinds "); +MODULE_DESCRIPTION("3Com 3c574 series PCMCIA ethernet driver"); +MODULE_LICENSE("GPL"); + +#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) + +/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ +INT_MODULE_PARM(max_interrupt_work, 32); + +/* Force full duplex modes? */ +INT_MODULE_PARM(full_duplex, 0); + +/* Autodetect link polarity reversal? */ +INT_MODULE_PARM(auto_polarity, 1); + + +/*====================================================================*/ + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT ((800*HZ)/1000) + +/* To minimize the size of the driver source and make the driver more + readable not all constants are symbolically defined. + You'll need the manual if you want to understand driver details anyway. */ +/* Offsets from base I/O address. */ +#define EL3_DATA 0x00 +#define EL3_CMD 0x0e +#define EL3_STATUS 0x0e + +#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) + +/* The top five bits written to EL3_CMD are a command, the lower + 11 bits are the parameter, if applicable. */ +enum el3_cmds { + TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11, + RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11, + TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11, + FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11, + SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11, + SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11, + StatsDisable = 22<<11, StopCoax = 23<<11, +}; + +enum elxl_status { + IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004, + TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, + IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000 }; + +/* The SetRxFilter command accepts the following classes: */ +enum RxFilter { + RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 +}; + +enum Window0 { + Wn0EepromCmd = 10, Wn0EepromData = 12, /* EEPROM command/address, data. */ + IntrStatus=0x0E, /* Valid in all windows. */ +}; +/* These assumes the larger EEPROM. */ +enum Win0_EEPROM_cmds { + EEPROM_Read = 0x200, EEPROM_WRITE = 0x100, EEPROM_ERASE = 0x300, + EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */ + EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */ +}; + +/* Register window 1 offsets, the window used in normal operation. + On the "Odie" this window is always mapped at offsets 0x10-0x1f. + Except for TxFree, which is overlapped by RunnerWrCtrl. */ +enum Window1 { + TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14, + RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B, + TxFree = 0x0C, /* Remaining free bytes in Tx buffer. */ + RunnerRdCtrl = 0x16, RunnerWrCtrl = 0x1c, +}; + +enum Window3 { /* Window 3: MAC/config bits. */ + Wn3_Config=0, Wn3_MAC_Ctrl=6, Wn3_Options=8, +}; +enum wn3_config { + Ram_size = 7, + Ram_width = 8, + Ram_speed = 0x30, + Rom_size = 0xc0, + Ram_split_shift = 16, + Ram_split = 3 << Ram_split_shift, + Xcvr_shift = 20, + Xcvr = 7 << Xcvr_shift, + Autoselect = 0x1000000, +}; + +enum Window4 { /* Window 4: Xcvr/media bits. */ + Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10, +}; + +#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */ + +struct el3_private { + struct pcmcia_device *p_dev; + u16 advertising, partner; /* NWay media advertisement */ + unsigned char phys; /* MII device address */ + unsigned int autoselect:1, default_media:3; /* Read from the EEPROM/Wn3_Config. */ + /* for transceiver monitoring */ + struct timer_list media; + unsigned short media_status; + unsigned short fast_poll; + unsigned long last_irq; + spinlock_t window_lock; /* Guards the Window selection */ +}; + +/* Set iff a MII transceiver on any interface requires mdio preamble. + This only set with the original DP83840 on older 3c905 boards, so the extra + code size of a per-interface flag is not worthwhile. */ +static char mii_preamble_required = 0; + +/* Index of functions. */ + +static int tc574_config(struct pcmcia_device *link); +static void tc574_release(struct pcmcia_device *link); + +static void mdio_sync(unsigned int ioaddr, int bits); +static int mdio_read(unsigned int ioaddr, int phy_id, int location); +static void mdio_write(unsigned int ioaddr, int phy_id, int location, + int value); +static unsigned short read_eeprom(unsigned int ioaddr, int index); +static void tc574_wait_for_completion(struct net_device *dev, int cmd); + +static void tc574_reset(struct net_device *dev); +static void media_check(unsigned long arg); +static int el3_open(struct net_device *dev); +static netdev_tx_t el3_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static irqreturn_t el3_interrupt(int irq, void *dev_id); +static void update_stats(struct net_device *dev); +static struct net_device_stats *el3_get_stats(struct net_device *dev); +static int el3_rx(struct net_device *dev, int worklimit); +static int el3_close(struct net_device *dev); +static void el3_tx_timeout(struct net_device *dev); +static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static void set_rx_mode(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); + +static void tc574_detach(struct pcmcia_device *p_dev); + +/* + tc574_attach() creates an "instance" of the driver, allocating + local data structures for one device. The device is registered + with Card Services. +*/ +static const struct net_device_ops el3_netdev_ops = { + .ndo_open = el3_open, + .ndo_stop = el3_close, + .ndo_start_xmit = el3_start_xmit, + .ndo_tx_timeout = el3_tx_timeout, + .ndo_get_stats = el3_get_stats, + .ndo_do_ioctl = el3_ioctl, + .ndo_set_rx_mode = set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int tc574_probe(struct pcmcia_device *link) +{ + struct el3_private *lp; + struct net_device *dev; + + dev_dbg(&link->dev, "3c574_attach()\n"); + + /* Create the PC card device object. */ + dev = alloc_etherdev(sizeof(struct el3_private)); + if (!dev) + return -ENOMEM; + lp = netdev_priv(dev); + link->priv = dev; + lp->p_dev = link; + + spin_lock_init(&lp->window_lock); + link->resource[0]->end = 32; + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; + link->config_flags |= CONF_ENABLE_IRQ; + link->config_index = 1; + + dev->netdev_ops = &el3_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + return tc574_config(link); +} + +static void tc574_detach(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + dev_dbg(&link->dev, "3c574_detach()\n"); + + unregister_netdev(dev); + + tc574_release(link); + + free_netdev(dev); +} /* tc574_detach */ + +static const char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; + +static int tc574_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + struct el3_private *lp = netdev_priv(dev); + int ret, i, j; + unsigned int ioaddr; + __be16 *phys_addr; + char *cardname; + __u32 config; + u8 *buf; + size_t len; + + phys_addr = (__be16 *)dev->dev_addr; + + dev_dbg(&link->dev, "3c574_config()\n"); + + link->io_lines = 16; + + for (i = j = 0; j < 0x400; j += 0x20) { + link->resource[0]->start = j ^ 0x300; + i = pcmcia_request_io(link); + if (i == 0) + break; + } + if (i != 0) + goto failed; + + ret = pcmcia_request_irq(link, el3_interrupt); + if (ret) + goto failed; + + ret = pcmcia_enable_device(link); + if (ret) + goto failed; + + dev->irq = link->irq; + dev->base_addr = link->resource[0]->start; + + ioaddr = dev->base_addr; + + /* The 3c574 normally uses an EEPROM for configuration info, including + the hardware address. The future products may include a modem chip + and put the address in the CIS. */ + + len = pcmcia_get_tuple(link, 0x88, &buf); + if (buf && len >= 6) { + for (i = 0; i < 3; i++) + phys_addr[i] = htons(le16_to_cpu(buf[i * 2])); + kfree(buf); + } else { + kfree(buf); /* 0 < len < 6 */ + EL3WINDOW(0); + for (i = 0; i < 3; i++) + phys_addr[i] = htons(read_eeprom(ioaddr, i + 10)); + if (phys_addr[0] == htons(0x6060)) { + pr_notice("IO port conflict at 0x%03lx-0x%03lx\n", + dev->base_addr, dev->base_addr+15); + goto failed; + } + } + if (link->prod_id[1]) + cardname = link->prod_id[1]; + else + cardname = "3Com 3c574"; + + { + u_char mcr; + outw(2<<11, ioaddr + RunnerRdCtrl); + mcr = inb(ioaddr + 2); + outw(0<<11, ioaddr + RunnerRdCtrl); + pr_info(" ASIC rev %d,", mcr>>3); + EL3WINDOW(3); + config = inl(ioaddr + Wn3_Config); + lp->default_media = (config & Xcvr) >> Xcvr_shift; + lp->autoselect = config & Autoselect ? 1 : 0; + } + + init_timer(&lp->media); + + { + int phy; + + /* Roadrunner only: Turn on the MII transceiver */ + outw(0x8040, ioaddr + Wn3_Options); + mdelay(1); + outw(0xc040, ioaddr + Wn3_Options); + tc574_wait_for_completion(dev, TxReset); + tc574_wait_for_completion(dev, RxReset); + mdelay(1); + outw(0x8040, ioaddr + Wn3_Options); + + EL3WINDOW(4); + for (phy = 1; phy <= 32; phy++) { + int mii_status; + mdio_sync(ioaddr, 32); + mii_status = mdio_read(ioaddr, phy & 0x1f, 1); + if (mii_status != 0xffff) { + lp->phys = phy & 0x1f; + dev_dbg(&link->dev, " MII transceiver at " + "index %d, status %x.\n", + phy, mii_status); + if ((mii_status & 0x0040) == 0) + mii_preamble_required = 1; + break; + } + } + if (phy > 32) { + pr_notice(" No MII transceivers found!\n"); + goto failed; + } + i = mdio_read(ioaddr, lp->phys, 16) | 0x40; + mdio_write(ioaddr, lp->phys, 16, i); + lp->advertising = mdio_read(ioaddr, lp->phys, 4); + if (full_duplex) { + /* Only advertise the FD media types. */ + lp->advertising &= ~0x02a0; + mdio_write(ioaddr, lp->phys, 4, lp->advertising); + } + } + + SET_NETDEV_DEV(dev, &link->dev); + + if (register_netdev(dev) != 0) { + pr_notice("register_netdev() failed\n"); + goto failed; + } + + netdev_info(dev, "%s at io %#3lx, irq %d, hw_addr %pM\n", + cardname, dev->base_addr, dev->irq, dev->dev_addr); + netdev_info(dev, " %dK FIFO split %s Rx:Tx, %sMII interface.\n", + 8 << (config & Ram_size), + ram_split[(config & Ram_split) >> Ram_split_shift], + config & Autoselect ? "autoselect " : ""); + + return 0; + +failed: + tc574_release(link); + return -ENODEV; + +} /* tc574_config */ + +static void tc574_release(struct pcmcia_device *link) +{ + pcmcia_disable_device(link); +} + +static int tc574_suspend(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) + netif_device_detach(dev); + + return 0; +} + +static int tc574_resume(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) { + tc574_reset(dev); + netif_device_attach(dev); + } + + return 0; +} + +static void dump_status(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + EL3WINDOW(1); + netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x, tx free %04x\n", + inw(ioaddr+EL3_STATUS), + inw(ioaddr+RxStatus), inb(ioaddr+TxStatus), + inw(ioaddr+TxFree)); + EL3WINDOW(4); + netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n", + inw(ioaddr+0x04), inw(ioaddr+0x06), + inw(ioaddr+0x08), inw(ioaddr+0x0a)); + EL3WINDOW(1); +} + +/* + Use this for commands that may take time to finish +*/ +static void tc574_wait_for_completion(struct net_device *dev, int cmd) +{ + int i = 1500; + outw(cmd, dev->base_addr + EL3_CMD); + while (--i > 0) + if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break; + if (i == 0) + netdev_notice(dev, "command 0x%04x did not complete!\n", cmd); +} + +/* Read a word from the EEPROM using the regular EEPROM access register. + Assume that we are in register window zero. + */ +static unsigned short read_eeprom(unsigned int ioaddr, int index) +{ + int timer; + outw(EEPROM_Read + index, ioaddr + Wn0EepromCmd); + /* Pause for at least 162 usec for the read to take place. */ + for (timer = 1620; timer >= 0; timer--) { + if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0) + break; + } + return inw(ioaddr + Wn0EepromData); +} + +/* MII transceiver control section. + Read and write the MII registers using software-generated serial + MDIO protocol. See the MII specifications or DP83840A data sheet + for details. + The maxium data clock rate is 2.5 Mhz. The timing is easily met by the + slow PC card interface. */ + +#define MDIO_SHIFT_CLK 0x01 +#define MDIO_DIR_WRITE 0x04 +#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE) +#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE) +#define MDIO_DATA_READ 0x02 +#define MDIO_ENB_IN 0x00 + +/* Generate the preamble required for initial synchronization and + a few older transceivers. */ +static void mdio_sync(unsigned int ioaddr, int bits) +{ + unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt; + + /* Establish sync by sending at least 32 logic ones. */ + while (-- bits >= 0) { + outw(MDIO_DATA_WRITE1, mdio_addr); + outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); + } +} + +static int mdio_read(unsigned int ioaddr, int phy_id, int location) +{ + int i; + int read_cmd = (0xf6 << 10) | (phy_id << 5) | location; + unsigned int retval = 0; + unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt; + + if (mii_preamble_required) + mdio_sync(ioaddr, 32); + + /* Shift the read command bits out. */ + for (i = 14; i >= 0; i--) { + int dataval = (read_cmd&(1< 0; i--) { + outw(MDIO_ENB_IN, mdio_addr); + retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0); + outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); + } + return (retval>>1) & 0xffff; +} + +static void mdio_write(unsigned int ioaddr, int phy_id, int location, int value) +{ + int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value; + unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt; + int i; + + if (mii_preamble_required) + mdio_sync(ioaddr, 32); + + /* Shift the command bits out. */ + for (i = 31; i >= 0; i--) { + int dataval = (write_cmd&(1<= 0; i--) { + outw(MDIO_ENB_IN, mdio_addr); + outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); + } +} + +/* Reset and restore all of the 3c574 registers. */ +static void tc574_reset(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + int i; + unsigned int ioaddr = dev->base_addr; + unsigned long flags; + + tc574_wait_for_completion(dev, TotalReset|0x10); + + spin_lock_irqsave(&lp->window_lock, flags); + /* Clear any transactions in progress. */ + outw(0, ioaddr + RunnerWrCtrl); + outw(0, ioaddr + RunnerRdCtrl); + + /* Set the station address and mask. */ + EL3WINDOW(2); + for (i = 0; i < 6; i++) + outb(dev->dev_addr[i], ioaddr + i); + for (; i < 12; i+=2) + outw(0, ioaddr + i); + + /* Reset config options */ + EL3WINDOW(3); + outb((dev->mtu > 1500 ? 0x40 : 0), ioaddr + Wn3_MAC_Ctrl); + outl((lp->autoselect ? 0x01000000 : 0) | 0x0062001b, + ioaddr + Wn3_Config); + /* Roadrunner only: Turn on the MII transceiver. */ + outw(0x8040, ioaddr + Wn3_Options); + mdelay(1); + outw(0xc040, ioaddr + Wn3_Options); + EL3WINDOW(1); + spin_unlock_irqrestore(&lp->window_lock, flags); + + tc574_wait_for_completion(dev, TxReset); + tc574_wait_for_completion(dev, RxReset); + mdelay(1); + spin_lock_irqsave(&lp->window_lock, flags); + EL3WINDOW(3); + outw(0x8040, ioaddr + Wn3_Options); + + /* Switch to the stats window, and clear all stats by reading. */ + outw(StatsDisable, ioaddr + EL3_CMD); + EL3WINDOW(6); + for (i = 0; i < 10; i++) + inb(ioaddr + i); + inw(ioaddr + 10); + inw(ioaddr + 12); + EL3WINDOW(4); + inb(ioaddr + 12); + inb(ioaddr + 13); + + /* .. enable any extra statistics bits.. */ + outw(0x0040, ioaddr + Wn4_NetDiag); + + EL3WINDOW(1); + spin_unlock_irqrestore(&lp->window_lock, flags); + + /* .. re-sync MII and re-fill what NWay is advertising. */ + mdio_sync(ioaddr, 32); + mdio_write(ioaddr, lp->phys, 4, lp->advertising); + if (!auto_polarity) { + /* works for TDK 78Q2120 series MII's */ + i = mdio_read(ioaddr, lp->phys, 16) | 0x20; + mdio_write(ioaddr, lp->phys, 16, i); + } + + spin_lock_irqsave(&lp->window_lock, flags); + /* Switch to register set 1 for normal use, just for TxFree. */ + set_rx_mode(dev); + spin_unlock_irqrestore(&lp->window_lock, flags); + outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ + outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ + outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ + /* Allow status bits to be seen. */ + outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); + /* Ack all pending events, and set active indicator mask. */ + outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, + ioaddr + EL3_CMD); + outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull + | AdapterFailure | RxEarly, ioaddr + EL3_CMD); +} + +static int el3_open(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + struct pcmcia_device *link = lp->p_dev; + + if (!pcmcia_dev_present(link)) + return -ENODEV; + + link->open++; + netif_start_queue(dev); + + tc574_reset(dev); + lp->media.function = media_check; + lp->media.data = (unsigned long) dev; + lp->media.expires = jiffies + HZ; + add_timer(&lp->media); + + dev_dbg(&link->dev, "%s: opened, status %4.4x.\n", + dev->name, inw(dev->base_addr + EL3_STATUS)); + + return 0; +} + +static void el3_tx_timeout(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + + netdev_notice(dev, "Transmit timed out!\n"); + dump_status(dev); + dev->stats.tx_errors++; + dev->trans_start = jiffies; /* prevent tx timeout */ + /* Issue TX_RESET and TX_START commands. */ + tc574_wait_for_completion(dev, TxReset); + outw(TxEnable, ioaddr + EL3_CMD); + netif_wake_queue(dev); +} + +static void pop_tx_status(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + int i; + + /* Clear the Tx status stack. */ + for (i = 32; i > 0; i--) { + u_char tx_status = inb(ioaddr + TxStatus); + if (!(tx_status & 0x84)) + break; + /* reset transmitter on jabber error or underrun */ + if (tx_status & 0x30) + tc574_wait_for_completion(dev, TxReset); + if (tx_status & 0x38) { + pr_debug("%s: transmit error: status 0x%02x\n", + dev->name, tx_status); + outw(TxEnable, ioaddr + EL3_CMD); + dev->stats.tx_aborted_errors++; + } + outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */ + } +} + +static netdev_tx_t el3_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + struct el3_private *lp = netdev_priv(dev); + unsigned long flags; + + pr_debug("%s: el3_start_xmit(length = %ld) called, " + "status %4.4x.\n", dev->name, (long)skb->len, + inw(ioaddr + EL3_STATUS)); + + spin_lock_irqsave(&lp->window_lock, flags); + + dev->stats.tx_bytes += skb->len; + + /* Put out the doubleword header... */ + outw(skb->len, ioaddr + TX_FIFO); + outw(0, ioaddr + TX_FIFO); + /* ... and the packet rounded to a doubleword. */ + outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2); + + /* TxFree appears only in Window 1, not offset 0x1c. */ + if (inw(ioaddr + TxFree) <= 1536) { + netif_stop_queue(dev); + /* Interrupt us when the FIFO has room for max-sized packet. + The threshold is in units of dwords. */ + outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); + } + + pop_tx_status(dev); + spin_unlock_irqrestore(&lp->window_lock, flags); + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +/* The EL3 interrupt handler. */ +static irqreturn_t el3_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct el3_private *lp = netdev_priv(dev); + unsigned int ioaddr; + unsigned status; + int work_budget = max_interrupt_work; + int handled = 0; + + if (!netif_device_present(dev)) + return IRQ_NONE; + ioaddr = dev->base_addr; + + pr_debug("%s: interrupt, status %4.4x.\n", + dev->name, inw(ioaddr + EL3_STATUS)); + + spin_lock(&lp->window_lock); + + while ((status = inw(ioaddr + EL3_STATUS)) & + (IntLatch | RxComplete | RxEarly | StatsFull)) { + if (!netif_device_present(dev) || + ((status & 0xe000) != 0x2000)) { + pr_debug("%s: Interrupt from dead card\n", dev->name); + break; + } + + handled = 1; + + if (status & RxComplete) + work_budget = el3_rx(dev, work_budget); + + if (status & TxAvailable) { + pr_debug(" TX room bit was handled.\n"); + /* There's room in the FIFO for a full-sized packet. */ + outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); + netif_wake_queue(dev); + } + + if (status & TxComplete) + pop_tx_status(dev); + + if (status & (AdapterFailure | RxEarly | StatsFull)) { + /* Handle all uncommon interrupts. */ + if (status & StatsFull) + update_stats(dev); + if (status & RxEarly) { + work_budget = el3_rx(dev, work_budget); + outw(AckIntr | RxEarly, ioaddr + EL3_CMD); + } + if (status & AdapterFailure) { + u16 fifo_diag; + EL3WINDOW(4); + fifo_diag = inw(ioaddr + Wn4_FIFODiag); + EL3WINDOW(1); + netdev_notice(dev, "adapter failure, FIFO diagnostic register %04x\n", + fifo_diag); + if (fifo_diag & 0x0400) { + /* Tx overrun */ + tc574_wait_for_completion(dev, TxReset); + outw(TxEnable, ioaddr + EL3_CMD); + } + if (fifo_diag & 0x2000) { + /* Rx underrun */ + tc574_wait_for_completion(dev, RxReset); + set_rx_mode(dev); + outw(RxEnable, ioaddr + EL3_CMD); + } + outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); + } + } + + if (--work_budget < 0) { + pr_debug("%s: Too much work in interrupt, " + "status %4.4x.\n", dev->name, status); + /* Clear all interrupts */ + outw(AckIntr | 0xFF, ioaddr + EL3_CMD); + break; + } + /* Acknowledge the IRQ. */ + outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); + } + + pr_debug("%s: exiting interrupt, status %4.4x.\n", + dev->name, inw(ioaddr + EL3_STATUS)); + + spin_unlock(&lp->window_lock); + return IRQ_RETVAL(handled); +} + +/* + This timer serves two purposes: to check for missed interrupts + (and as a last resort, poll the NIC for events), and to monitor + the MII, reporting changes in cable status. +*/ +static void media_check(unsigned long arg) +{ + struct net_device *dev = (struct net_device *) arg; + struct el3_private *lp = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + unsigned long flags; + unsigned short /* cable, */ media, partner; + + if (!netif_device_present(dev)) + goto reschedule; + + /* Check for pending interrupt with expired latency timer: with + this, we can limp along even if the interrupt is blocked */ + if ((inw(ioaddr + EL3_STATUS) & IntLatch) && (inb(ioaddr + Timer) == 0xff)) { + if (!lp->fast_poll) + netdev_info(dev, "interrupt(s) dropped!\n"); + + local_irq_save(flags); + el3_interrupt(dev->irq, dev); + local_irq_restore(flags); + + lp->fast_poll = HZ; + } + if (lp->fast_poll) { + lp->fast_poll--; + lp->media.expires = jiffies + 2*HZ/100; + add_timer(&lp->media); + return; + } + + spin_lock_irqsave(&lp->window_lock, flags); + EL3WINDOW(4); + media = mdio_read(ioaddr, lp->phys, 1); + partner = mdio_read(ioaddr, lp->phys, 5); + EL3WINDOW(1); + + if (media != lp->media_status) { + if ((media ^ lp->media_status) & 0x0004) + netdev_info(dev, "%s link beat\n", + (lp->media_status & 0x0004) ? "lost" : "found"); + if ((media ^ lp->media_status) & 0x0020) { + lp->partner = 0; + if (lp->media_status & 0x0020) { + netdev_info(dev, "autonegotiation restarted\n"); + } else if (partner) { + partner &= lp->advertising; + lp->partner = partner; + netdev_info(dev, "autonegotiation complete: " + "%dbaseT-%cD selected\n", + (partner & 0x0180) ? 100 : 10, + (partner & 0x0140) ? 'F' : 'H'); + } else { + netdev_info(dev, "link partner did not autonegotiate\n"); + } + + EL3WINDOW(3); + outb((partner & 0x0140 ? 0x20 : 0) | + (dev->mtu > 1500 ? 0x40 : 0), ioaddr + Wn3_MAC_Ctrl); + EL3WINDOW(1); + + } + if (media & 0x0010) + netdev_info(dev, "remote fault detected\n"); + if (media & 0x0002) + netdev_info(dev, "jabber detected\n"); + lp->media_status = media; + } + spin_unlock_irqrestore(&lp->window_lock, flags); + +reschedule: + lp->media.expires = jiffies + HZ; + add_timer(&lp->media); +} + +static struct net_device_stats *el3_get_stats(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + + if (netif_device_present(dev)) { + unsigned long flags; + spin_lock_irqsave(&lp->window_lock, flags); + update_stats(dev); + spin_unlock_irqrestore(&lp->window_lock, flags); + } + return &dev->stats; +} + +/* Update statistics. + Surprisingly this need not be run single-threaded, but it effectively is. + The counters clear when read, so the adds must merely be atomic. + */ +static void update_stats(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + u8 rx, tx, up; + + pr_debug("%s: updating the statistics.\n", dev->name); + + if (inw(ioaddr+EL3_STATUS) == 0xffff) /* No card. */ + return; + + /* Unlike the 3c509 we need not turn off stats updates while reading. */ + /* Switch to the stats window, and read everything. */ + EL3WINDOW(6); + dev->stats.tx_carrier_errors += inb(ioaddr + 0); + dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); + /* Multiple collisions. */ inb(ioaddr + 2); + dev->stats.collisions += inb(ioaddr + 3); + dev->stats.tx_window_errors += inb(ioaddr + 4); + dev->stats.rx_fifo_errors += inb(ioaddr + 5); + dev->stats.tx_packets += inb(ioaddr + 6); + up = inb(ioaddr + 9); + dev->stats.tx_packets += (up&0x30) << 4; + /* Rx packets */ inb(ioaddr + 7); + /* Tx deferrals */ inb(ioaddr + 8); + rx = inw(ioaddr + 10); + tx = inw(ioaddr + 12); + + EL3WINDOW(4); + /* BadSSD */ inb(ioaddr + 12); + up = inb(ioaddr + 13); + + EL3WINDOW(1); +} + +static int el3_rx(struct net_device *dev, int worklimit) +{ + unsigned int ioaddr = dev->base_addr; + short rx_status; + + pr_debug("%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n", + dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus)); + while (!((rx_status = inw(ioaddr + RxStatus)) & 0x8000) && + worklimit > 0) { + worklimit--; + if (rx_status & 0x4000) { /* Error, update stats. */ + short error = rx_status & 0x3800; + dev->stats.rx_errors++; + switch (error) { + case 0x0000: dev->stats.rx_over_errors++; break; + case 0x0800: dev->stats.rx_length_errors++; break; + case 0x1000: dev->stats.rx_frame_errors++; break; + case 0x1800: dev->stats.rx_length_errors++; break; + case 0x2000: dev->stats.rx_frame_errors++; break; + case 0x2800: dev->stats.rx_crc_errors++; break; + } + } else { + short pkt_len = rx_status & 0x7ff; + struct sk_buff *skb; + + skb = netdev_alloc_skb(dev, pkt_len + 5); + + pr_debug(" Receiving packet size %d status %4.4x.\n", + pkt_len, rx_status); + if (skb != NULL) { + skb_reserve(skb, 2); + insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), + ((pkt_len+3)>>2)); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } else { + pr_debug("%s: couldn't allocate a sk_buff of" + " size %d.\n", dev->name, pkt_len); + dev->stats.rx_dropped++; + } + } + tc574_wait_for_completion(dev, RxDiscard); + } + + return worklimit; +} + +/* Provide ioctl() calls to examine the MII xcvr state. */ +static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct el3_private *lp = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + struct mii_ioctl_data *data = if_mii(rq); + int phy = lp->phys & 0x1f; + + pr_debug("%s: In ioct(%-.6s, %#4.4x) %4.4x %4.4x %4.4x %4.4x.\n", + dev->name, rq->ifr_ifrn.ifrn_name, cmd, + data->phy_id, data->reg_num, data->val_in, data->val_out); + + switch(cmd) { + case SIOCGMIIPHY: /* Get the address of the PHY in use. */ + data->phy_id = phy; + case SIOCGMIIREG: /* Read the specified MII register. */ + { + int saved_window; + unsigned long flags; + + spin_lock_irqsave(&lp->window_lock, flags); + saved_window = inw(ioaddr + EL3_CMD) >> 13; + EL3WINDOW(4); + data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, + data->reg_num & 0x1f); + EL3WINDOW(saved_window); + spin_unlock_irqrestore(&lp->window_lock, flags); + return 0; + } + case SIOCSMIIREG: /* Write the specified MII register */ + { + int saved_window; + unsigned long flags; + + spin_lock_irqsave(&lp->window_lock, flags); + saved_window = inw(ioaddr + EL3_CMD) >> 13; + EL3WINDOW(4); + mdio_write(ioaddr, data->phy_id & 0x1f, + data->reg_num & 0x1f, data->val_in); + EL3WINDOW(saved_window); + spin_unlock_irqrestore(&lp->window_lock, flags); + return 0; + } + default: + return -EOPNOTSUPP; + } +} + +/* The Odie chip has a 64 bin multicast filter, but the bit layout is not + documented. Until it is we revert to receiving all multicast frames when + any multicast reception is desired. + Note: My other drivers emit a log message whenever promiscuous mode is + entered to help detect password sniffers. This is less desirable on + typical PC card machines, so we omit the message. + */ + +static void set_rx_mode(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + + if (dev->flags & IFF_PROMISC) + outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm, + ioaddr + EL3_CMD); + else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) + outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD); + else + outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); +} + +static void set_multicast_list(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&lp->window_lock, flags); + set_rx_mode(dev); + spin_unlock_irqrestore(&lp->window_lock, flags); +} + +static int el3_close(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + struct el3_private *lp = netdev_priv(dev); + struct pcmcia_device *link = lp->p_dev; + + dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); + + if (pcmcia_dev_present(link)) { + unsigned long flags; + + /* Turn off statistics ASAP. We update lp->stats below. */ + outw(StatsDisable, ioaddr + EL3_CMD); + + /* Disable the receiver and transmitter. */ + outw(RxDisable, ioaddr + EL3_CMD); + outw(TxDisable, ioaddr + EL3_CMD); + + /* Note: Switching to window 0 may disable the IRQ. */ + EL3WINDOW(0); + spin_lock_irqsave(&lp->window_lock, flags); + update_stats(dev); + spin_unlock_irqrestore(&lp->window_lock, flags); + + /* force interrupts off */ + outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); + } + + link->open--; + netif_stop_queue(dev); + del_timer_sync(&lp->media); + + return 0; +} + +static const struct pcmcia_device_id tc574_ids[] = { + PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0574), + PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x0556, "cis/3CCFEM556.cis"), + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, tc574_ids); + +static struct pcmcia_driver tc574_driver = { + .owner = THIS_MODULE, + .name = "3c574_cs", + .probe = tc574_probe, + .remove = tc574_detach, + .id_table = tc574_ids, + .suspend = tc574_suspend, + .resume = tc574_resume, +}; +module_pcmcia_driver(tc574_driver); diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c new file mode 100644 index 000000000..c5a320507 --- /dev/null +++ b/drivers/net/ethernet/3com/3c589_cs.c @@ -0,0 +1,968 @@ +/* ====================================================================== + * + * A PCMCIA ethernet driver for the 3com 3c589 card. + * + * Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net + * + * 3c589_cs.c 1.162 2001/10/13 00:08:50 + * + * The network driver code is based on Donald Becker's 3c589 code: + * + * Written 1994 by Donald Becker. + * Copyright 1993 United States Government as represented by the + * Director, National Security Agency. This software may be used and + * distributed according to the terms of the GNU General Public License, + * incorporated herein by reference. + * Donald Becker may be reached at becker@scyld.com + * + * Updated for 2.5.x by Alan Cox + * + * ====================================================================== + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DRV_NAME "3c589_cs" +#define DRV_VERSION "1.162-ac" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + + +/* To minimize the size of the driver source I only define operating + * constants if they are used several times. You'll need the manual + * if you want to understand driver details. + */ + +/* Offsets from base I/O address. */ +#define EL3_DATA 0x00 +#define EL3_TIMER 0x0a +#define EL3_CMD 0x0e +#define EL3_STATUS 0x0e + +#define EEPROM_READ 0x0080 +#define EEPROM_BUSY 0x8000 + +#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) + +/* The top five bits written to EL3_CMD are a command, the lower + * 11 bits are the parameter, if applicable. + */ + +enum c509cmd { + TotalReset = 0<<11, + SelectWindow = 1<<11, + StartCoax = 2<<11, + RxDisable = 3<<11, + RxEnable = 4<<11, + RxReset = 5<<11, + RxDiscard = 8<<11, + TxEnable = 9<<11, + TxDisable = 10<<11, + TxReset = 11<<11, + FakeIntr = 12<<11, + AckIntr = 13<<11, + SetIntrEnb = 14<<11, + SetStatusEnb = 15<<11, + SetRxFilter = 16<<11, + SetRxThreshold = 17<<11, + SetTxThreshold = 18<<11, + SetTxStart = 19<<11, + StatsEnable = 21<<11, + StatsDisable = 22<<11, + StopCoax = 23<<11 +}; + +enum c509status { + IntLatch = 0x0001, + AdapterFailure = 0x0002, + TxComplete = 0x0004, + TxAvailable = 0x0008, + RxComplete = 0x0010, + RxEarly = 0x0020, + IntReq = 0x0040, + StatsFull = 0x0080, + CmdBusy = 0x1000 +}; + +/* The SetRxFilter command accepts the following classes: */ +enum RxFilter { + RxStation = 1, + RxMulticast = 2, + RxBroadcast = 4, + RxProm = 8 +}; + +/* Register window 1 offsets, the window used in normal operation. */ +#define TX_FIFO 0x00 +#define RX_FIFO 0x00 +#define RX_STATUS 0x08 +#define TX_STATUS 0x0B +#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */ + +#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */ +#define WN4_MEDIA 0x0A /* Window 4: Various transcvr/media bits. */ +#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */ +#define MEDIA_LED 0x0001 /* Enable link light on 3C589E cards. */ + +/* Time in jiffies before concluding Tx hung */ +#define TX_TIMEOUT ((400*HZ)/1000) + +struct el3_private { + struct pcmcia_device *p_dev; + /* For transceiver monitoring */ + struct timer_list media; + u16 media_status; + u16 fast_poll; + unsigned long last_irq; + spinlock_t lock; +}; + +static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" }; + +/*====================================================================*/ + +/* Module parameters */ + +MODULE_AUTHOR("David Hinds "); +MODULE_DESCRIPTION("3Com 3c589 series PCMCIA ethernet driver"); +MODULE_LICENSE("GPL"); + +#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) + +/* Special hook for setting if_port when module is loaded */ +INT_MODULE_PARM(if_port, 0); + + +/*====================================================================*/ + +static int tc589_config(struct pcmcia_device *link); +static void tc589_release(struct pcmcia_device *link); + +static u16 read_eeprom(unsigned int ioaddr, int index); +static void tc589_reset(struct net_device *dev); +static void media_check(unsigned long arg); +static int el3_config(struct net_device *dev, struct ifmap *map); +static int el3_open(struct net_device *dev); +static netdev_tx_t el3_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static irqreturn_t el3_interrupt(int irq, void *dev_id); +static void update_stats(struct net_device *dev); +static struct net_device_stats *el3_get_stats(struct net_device *dev); +static int el3_rx(struct net_device *dev); +static int el3_close(struct net_device *dev); +static void el3_tx_timeout(struct net_device *dev); +static void set_rx_mode(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); +static const struct ethtool_ops netdev_ethtool_ops; + +static void tc589_detach(struct pcmcia_device *p_dev); + +static const struct net_device_ops el3_netdev_ops = { + .ndo_open = el3_open, + .ndo_stop = el3_close, + .ndo_start_xmit = el3_start_xmit, + .ndo_tx_timeout = el3_tx_timeout, + .ndo_set_config = el3_config, + .ndo_get_stats = el3_get_stats, + .ndo_set_rx_mode = set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int tc589_probe(struct pcmcia_device *link) +{ + struct el3_private *lp; + struct net_device *dev; + + dev_dbg(&link->dev, "3c589_attach()\n"); + + /* Create new ethernet device */ + dev = alloc_etherdev(sizeof(struct el3_private)); + if (!dev) + return -ENOMEM; + lp = netdev_priv(dev); + link->priv = dev; + lp->p_dev = link; + + spin_lock_init(&lp->lock); + link->resource[0]->end = 16; + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; + + link->config_flags |= CONF_ENABLE_IRQ; + link->config_index = 1; + + dev->netdev_ops = &el3_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + dev->ethtool_ops = &netdev_ethtool_ops; + + return tc589_config(link); +} + +static void tc589_detach(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + dev_dbg(&link->dev, "3c589_detach\n"); + + unregister_netdev(dev); + + tc589_release(link); + + free_netdev(dev); +} /* tc589_detach */ + +static int tc589_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + __be16 *phys_addr; + int ret, i, j, multi = 0, fifo; + unsigned int ioaddr; + static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; + u8 *buf; + size_t len; + + dev_dbg(&link->dev, "3c589_config\n"); + + phys_addr = (__be16 *)dev->dev_addr; + /* Is this a 3c562? */ + if (link->manf_id != MANFID_3COM) + dev_info(&link->dev, "hmmm, is this really a 3Com card??\n"); + multi = (link->card_id == PRODID_3COM_3C562); + + link->io_lines = 16; + + /* For the 3c562, the base address must be xx00-xx7f */ + for (i = j = 0; j < 0x400; j += 0x10) { + if (multi && (j & 0x80)) + continue; + link->resource[0]->start = j ^ 0x300; + i = pcmcia_request_io(link); + if (i == 0) + break; + } + if (i != 0) + goto failed; + + ret = pcmcia_request_irq(link, el3_interrupt); + if (ret) + goto failed; + + ret = pcmcia_enable_device(link); + if (ret) + goto failed; + + dev->irq = link->irq; + dev->base_addr = link->resource[0]->start; + ioaddr = dev->base_addr; + EL3WINDOW(0); + + /* The 3c589 has an extra EEPROM for configuration info, including + * the hardware address. The 3c562 puts the address in the CIS. + */ + len = pcmcia_get_tuple(link, 0x88, &buf); + if (buf && len >= 6) { + for (i = 0; i < 3; i++) + phys_addr[i] = htons(le16_to_cpu(buf[i*2])); + kfree(buf); + } else { + kfree(buf); /* 0 < len < 6 */ + for (i = 0; i < 3; i++) + phys_addr[i] = htons(read_eeprom(ioaddr, i)); + if (phys_addr[0] == htons(0x6060)) { + dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n", + dev->base_addr, dev->base_addr+15); + goto failed; + } + } + + /* The address and resource configuration register aren't loaded from + * the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version. + */ + + outw(0x3f00, ioaddr + 8); + fifo = inl(ioaddr); + + /* The if_port symbol can be set when the module is loaded */ + if ((if_port >= 0) && (if_port <= 3)) + dev->if_port = if_port; + else + dev_err(&link->dev, "invalid if_port requested\n"); + + SET_NETDEV_DEV(dev, &link->dev); + + if (register_netdev(dev) != 0) { + dev_err(&link->dev, "register_netdev() failed\n"); + goto failed; + } + + netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n", + (multi ? "562" : "589"), dev->base_addr, dev->irq, + dev->dev_addr); + netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n", + (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3], + if_names[dev->if_port]); + return 0; + +failed: + tc589_release(link); + return -ENODEV; +} /* tc589_config */ + +static void tc589_release(struct pcmcia_device *link) +{ + pcmcia_disable_device(link); +} + +static int tc589_suspend(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) + netif_device_detach(dev); + + return 0; +} + +static int tc589_resume(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) { + tc589_reset(dev); + netif_device_attach(dev); + } + + return 0; +} + +/*====================================================================*/ + +/* Use this for commands that may take time to finish */ + +static void tc589_wait_for_completion(struct net_device *dev, int cmd) +{ + int i = 100; + outw(cmd, dev->base_addr + EL3_CMD); + while (--i > 0) + if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) + break; + if (i == 0) + netdev_warn(dev, "command 0x%04x did not complete!\n", cmd); +} + +/* Read a word from the EEPROM using the regular EEPROM access register. + * Assume that we are in register window zero. + */ + +static u16 read_eeprom(unsigned int ioaddr, int index) +{ + int i; + outw(EEPROM_READ + index, ioaddr + 10); + /* Reading the eeprom takes 162 us */ + for (i = 1620; i >= 0; i--) + if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0) + break; + return inw(ioaddr + 12); +} + +/* Set transceiver type, perhaps to something other than what the user + * specified in dev->if_port. + */ + +static void tc589_set_xcvr(struct net_device *dev, int if_port) +{ + struct el3_private *lp = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + + EL3WINDOW(0); + switch (if_port) { + case 0: + case 1: + outw(0, ioaddr + 6); + break; + case 2: + outw(3<<14, ioaddr + 6); + break; + case 3: + outw(1<<14, ioaddr + 6); + break; + } + /* On PCMCIA, this just turns on the LED */ + outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD); + /* 10baseT interface, enable link beat and jabber check. */ + EL3WINDOW(4); + outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA); + EL3WINDOW(1); + if (if_port == 2) + lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000); + else + lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800); +} + +static void dump_status(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + EL3WINDOW(1); + netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n", + inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS), + inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE)); + EL3WINDOW(4); + netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n", + inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08), + inw(ioaddr+0x0a)); + EL3WINDOW(1); +} + +/* Reset and restore all of the 3c589 registers. */ +static void tc589_reset(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + int i; + + EL3WINDOW(0); + outw(0x0001, ioaddr + 4); /* Activate board. */ + outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */ + + /* Set the station address in window 2. */ + EL3WINDOW(2); + for (i = 0; i < 6; i++) + outb(dev->dev_addr[i], ioaddr + i); + + tc589_set_xcvr(dev, dev->if_port); + + /* Switch to the stats window, and clear all stats by reading. */ + outw(StatsDisable, ioaddr + EL3_CMD); + EL3WINDOW(6); + for (i = 0; i < 9; i++) + inb(ioaddr+i); + inw(ioaddr + 10); + inw(ioaddr + 12); + + /* Switch to register set 1 for normal use. */ + EL3WINDOW(1); + + set_rx_mode(dev); + outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ + outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ + outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ + /* Allow status bits to be seen. */ + outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); + /* Ack all pending events, and set active indicator mask. */ + outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, + ioaddr + EL3_CMD); + outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull + | AdapterFailure, ioaddr + EL3_CMD); +} + +static void netdev_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + snprintf(info->bus_info, sizeof(info->bus_info), + "PCMCIA 0x%lx", dev->base_addr); +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, +}; + +static int el3_config(struct net_device *dev, struct ifmap *map) +{ + if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { + if (map->port <= 3) { + dev->if_port = map->port; + netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); + tc589_set_xcvr(dev, dev->if_port); + } else { + return -EINVAL; + } + } + return 0; +} + +static int el3_open(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + struct pcmcia_device *link = lp->p_dev; + + if (!pcmcia_dev_present(link)) + return -ENODEV; + + link->open++; + netif_start_queue(dev); + + tc589_reset(dev); + setup_timer(&lp->media, media_check, (unsigned long)dev); + mod_timer(&lp->media, jiffies + HZ); + + dev_dbg(&link->dev, "%s: opened, status %4.4x.\n", + dev->name, inw(dev->base_addr + EL3_STATUS)); + + return 0; +} + +static void el3_tx_timeout(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + + netdev_warn(dev, "Transmit timed out!\n"); + dump_status(dev); + dev->stats.tx_errors++; + dev->trans_start = jiffies; /* prevent tx timeout */ + /* Issue TX_RESET and TX_START commands. */ + tc589_wait_for_completion(dev, TxReset); + outw(TxEnable, ioaddr + EL3_CMD); + netif_wake_queue(dev); +} + +static void pop_tx_status(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + int i; + + /* Clear the Tx status stack. */ + for (i = 32; i > 0; i--) { + u_char tx_status = inb(ioaddr + TX_STATUS); + if (!(tx_status & 0x84)) + break; + /* reset transmitter on jabber error or underrun */ + if (tx_status & 0x30) + tc589_wait_for_completion(dev, TxReset); + if (tx_status & 0x38) { + netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status); + outw(TxEnable, ioaddr + EL3_CMD); + dev->stats.tx_aborted_errors++; + } + outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ + } +} + +static netdev_tx_t el3_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + struct el3_private *priv = netdev_priv(dev); + unsigned long flags; + + netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n", + (long)skb->len, inw(ioaddr + EL3_STATUS)); + + spin_lock_irqsave(&priv->lock, flags); + + dev->stats.tx_bytes += skb->len; + + /* Put out the doubleword header... */ + outw(skb->len, ioaddr + TX_FIFO); + outw(0x00, ioaddr + TX_FIFO); + /* ... and the packet rounded to a doubleword. */ + outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); + + if (inw(ioaddr + TX_FREE) <= 1536) { + netif_stop_queue(dev); + /* Interrupt us when the FIFO has room for max-sized packet. */ + outw(SetTxThreshold + 1536, ioaddr + EL3_CMD); + } + + pop_tx_status(dev); + spin_unlock_irqrestore(&priv->lock, flags); + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +/* The EL3 interrupt handler. */ +static irqreturn_t el3_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct el3_private *lp = netdev_priv(dev); + unsigned int ioaddr; + __u16 status; + int i = 0, handled = 1; + + if (!netif_device_present(dev)) + return IRQ_NONE; + + ioaddr = dev->base_addr; + + netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS)); + + spin_lock(&lp->lock); + while ((status = inw(ioaddr + EL3_STATUS)) & + (IntLatch | RxComplete | StatsFull)) { + if ((status & 0xe000) != 0x2000) { + netdev_dbg(dev, "interrupt from dead card\n"); + handled = 0; + break; + } + if (status & RxComplete) + el3_rx(dev); + if (status & TxAvailable) { + netdev_dbg(dev, " TX room bit was handled.\n"); + /* There's room in the FIFO for a full-sized packet. */ + outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); + netif_wake_queue(dev); + } + if (status & TxComplete) + pop_tx_status(dev); + if (status & (AdapterFailure | RxEarly | StatsFull)) { + /* Handle all uncommon interrupts. */ + if (status & StatsFull) /* Empty statistics. */ + update_stats(dev); + if (status & RxEarly) { + /* Rx early is unused. */ + el3_rx(dev); + outw(AckIntr | RxEarly, ioaddr + EL3_CMD); + } + if (status & AdapterFailure) { + u16 fifo_diag; + EL3WINDOW(4); + fifo_diag = inw(ioaddr + 4); + EL3WINDOW(1); + netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n", + fifo_diag); + if (fifo_diag & 0x0400) { + /* Tx overrun */ + tc589_wait_for_completion(dev, TxReset); + outw(TxEnable, ioaddr + EL3_CMD); + } + if (fifo_diag & 0x2000) { + /* Rx underrun */ + tc589_wait_for_completion(dev, RxReset); + set_rx_mode(dev); + outw(RxEnable, ioaddr + EL3_CMD); + } + outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); + } + } + if (++i > 10) { + netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n", + status); + /* Clear all interrupts */ + outw(AckIntr | 0xFF, ioaddr + EL3_CMD); + break; + } + /* Acknowledge the IRQ. */ + outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); + } + lp->last_irq = jiffies; + spin_unlock(&lp->lock); + netdev_dbg(dev, "exiting interrupt, status %4.4x.\n", + inw(ioaddr + EL3_STATUS)); + return IRQ_RETVAL(handled); +} + +static void media_check(unsigned long arg) +{ + struct net_device *dev = (struct net_device *)(arg); + struct el3_private *lp = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + u16 media, errs; + unsigned long flags; + + if (!netif_device_present(dev)) + goto reschedule; + + /* Check for pending interrupt with expired latency timer: with + * this, we can limp along even if the interrupt is blocked + */ + if ((inw(ioaddr + EL3_STATUS) & IntLatch) && + (inb(ioaddr + EL3_TIMER) == 0xff)) { + if (!lp->fast_poll) + netdev_warn(dev, "interrupt(s) dropped!\n"); + + local_irq_save(flags); + el3_interrupt(dev->irq, dev); + local_irq_restore(flags); + + lp->fast_poll = HZ; + } + if (lp->fast_poll) { + lp->fast_poll--; + lp->media.expires = jiffies + HZ/100; + add_timer(&lp->media); + return; + } + + /* lp->lock guards the EL3 window. Window should always be 1 except + * when the lock is held + */ + + spin_lock_irqsave(&lp->lock, flags); + EL3WINDOW(4); + media = inw(ioaddr+WN4_MEDIA) & 0xc810; + + /* Ignore collisions unless we've had no irq's recently */ + if (time_before(jiffies, lp->last_irq + HZ)) { + media &= ~0x0010; + } else { + /* Try harder to detect carrier errors */ + EL3WINDOW(6); + outw(StatsDisable, ioaddr + EL3_CMD); + errs = inb(ioaddr + 0); + outw(StatsEnable, ioaddr + EL3_CMD); + dev->stats.tx_carrier_errors += errs; + if (errs || (lp->media_status & 0x0010)) + media |= 0x0010; + } + + if (media != lp->media_status) { + if ((media & lp->media_status & 0x8000) && + ((lp->media_status ^ media) & 0x0800)) + netdev_info(dev, "%s link beat\n", + (lp->media_status & 0x0800 ? "lost" : "found")); + else if ((media & lp->media_status & 0x4000) && + ((lp->media_status ^ media) & 0x0010)) + netdev_info(dev, "coax cable %s\n", + (lp->media_status & 0x0010 ? "ok" : "problem")); + if (dev->if_port == 0) { + if (media & 0x8000) { + if (media & 0x0800) + netdev_info(dev, "flipped to 10baseT\n"); + else + tc589_set_xcvr(dev, 2); + } else if (media & 0x4000) { + if (media & 0x0010) + tc589_set_xcvr(dev, 1); + else + netdev_info(dev, "flipped to 10base2\n"); + } + } + lp->media_status = media; + } + + EL3WINDOW(1); + spin_unlock_irqrestore(&lp->lock, flags); + +reschedule: + lp->media.expires = jiffies + HZ; + add_timer(&lp->media); +} + +static struct net_device_stats *el3_get_stats(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + unsigned long flags; + struct pcmcia_device *link = lp->p_dev; + + if (pcmcia_dev_present(link)) { + spin_lock_irqsave(&lp->lock, flags); + update_stats(dev); + spin_unlock_irqrestore(&lp->lock, flags); + } + return &dev->stats; +} + +/* Update statistics. We change to register window 6, so this should be run +* single-threaded if the device is active. This is expected to be a rare +* operation, and it's simpler for the rest of the driver to assume that +* window 1 is always valid rather than use a special window-state variable. +* +* Caller must hold the lock for this +*/ + +static void update_stats(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + + netdev_dbg(dev, "updating the statistics.\n"); + /* Turn off statistics updates while reading. */ + outw(StatsDisable, ioaddr + EL3_CMD); + /* Switch to the stats window, and read everything. */ + EL3WINDOW(6); + dev->stats.tx_carrier_errors += inb(ioaddr + 0); + dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); + /* Multiple collisions. */ + inb(ioaddr + 2); + dev->stats.collisions += inb(ioaddr + 3); + dev->stats.tx_window_errors += inb(ioaddr + 4); + dev->stats.rx_fifo_errors += inb(ioaddr + 5); + dev->stats.tx_packets += inb(ioaddr + 6); + /* Rx packets */ + inb(ioaddr + 7); + /* Tx deferrals */ + inb(ioaddr + 8); + /* Rx octets */ + inw(ioaddr + 10); + /* Tx octets */ + inw(ioaddr + 12); + + /* Back to window 1, and turn statistics back on. */ + EL3WINDOW(1); + outw(StatsEnable, ioaddr + EL3_CMD); +} + +static int el3_rx(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + int worklimit = 32; + short rx_status; + + netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n", + inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS)); + while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) && + worklimit > 0) { + worklimit--; + if (rx_status & 0x4000) { /* Error, update stats. */ + short error = rx_status & 0x3800; + dev->stats.rx_errors++; + switch (error) { + case 0x0000: + dev->stats.rx_over_errors++; + break; + case 0x0800: + dev->stats.rx_length_errors++; + break; + case 0x1000: + dev->stats.rx_frame_errors++; + break; + case 0x1800: + dev->stats.rx_length_errors++; + break; + case 0x2000: + dev->stats.rx_frame_errors++; + break; + case 0x2800: + dev->stats.rx_crc_errors++; + break; + } + } else { + short pkt_len = rx_status & 0x7ff; + struct sk_buff *skb; + + skb = netdev_alloc_skb(dev, pkt_len + 5); + + netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n", + pkt_len, rx_status); + if (skb != NULL) { + skb_reserve(skb, 2); + insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), + (pkt_len+3)>>2); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } else { + netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n", + pkt_len); + dev->stats.rx_dropped++; + } + } + /* Pop the top of the Rx FIFO */ + tc589_wait_for_completion(dev, RxDiscard); + } + if (worklimit == 0) + netdev_warn(dev, "too much work in el3_rx!\n"); + return 0; +} + +static void set_rx_mode(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + u16 opts = SetRxFilter | RxStation | RxBroadcast; + + if (dev->flags & IFF_PROMISC) + opts |= RxMulticast | RxProm; + else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) + opts |= RxMulticast; + outw(opts, ioaddr + EL3_CMD); +} + +static void set_multicast_list(struct net_device *dev) +{ + struct el3_private *priv = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + set_rx_mode(dev); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static int el3_close(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + struct pcmcia_device *link = lp->p_dev; + unsigned int ioaddr = dev->base_addr; + + dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); + + if (pcmcia_dev_present(link)) { + /* Turn off statistics ASAP. We update dev->stats below. */ + outw(StatsDisable, ioaddr + EL3_CMD); + + /* Disable the receiver and transmitter. */ + outw(RxDisable, ioaddr + EL3_CMD); + outw(TxDisable, ioaddr + EL3_CMD); + + if (dev->if_port == 2) + /* Turn off thinnet power. Green! */ + outw(StopCoax, ioaddr + EL3_CMD); + else if (dev->if_port == 1) { + /* Disable link beat and jabber */ + EL3WINDOW(4); + outw(0, ioaddr + WN4_MEDIA); + } + + /* Switching back to window 0 disables the IRQ. */ + EL3WINDOW(0); + /* But we explicitly zero the IRQ line select anyway. */ + outw(0x0f00, ioaddr + WN0_IRQ); + + /* Check if the card still exists */ + if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000) + update_stats(dev); + } + + link->open--; + netif_stop_queue(dev); + del_timer_sync(&lp->media); + + return 0; +} + +static const struct pcmcia_device_id tc589_ids[] = { + PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0101, 0x0562), + PCMCIA_MFC_DEVICE_PROD_ID1(0, "Motorola MARQUIS", 0xf03e4e77), + PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0589), + PCMCIA_DEVICE_PROD_ID12("Farallon", "ENet", 0x58d93fc4, 0x992c2202), + PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x0035, "cis/3CXEM556.cis"), + PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x003d, "cis/3CXEM556.cis"), + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, tc589_ids); + +static struct pcmcia_driver tc589_driver = { + .owner = THIS_MODULE, + .name = "3c589_cs", + .probe = tc589_probe, + .remove = tc589_detach, + .id_table = tc589_ids, + .suspend = tc589_suspend, + .resume = tc589_resume, +}; +module_pcmcia_driver(tc589_driver); diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c new file mode 100644 index 000000000..41095ebad --- /dev/null +++ b/drivers/net/ethernet/3com/3c59x.c @@ -0,0 +1,3361 @@ +/* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */ +/* + Written 1996-1999 by Donald Becker. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + This driver is for the 3Com "Vortex" and "Boomerang" series ethercards. + Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597 + and the EtherLink XL 3c900 and 3c905 cards. + + Problem reports and questions should be directed to + vortex@scyld.com + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + +*/ + +/* + * FIXME: This driver _could_ support MTU changing, but doesn't. See Don's hamachi.c implementation + * as well as other drivers + * + * NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k + * due to dead code elimination. There will be some performance benefits from this due to + * elimination of all the tests and reduced cache footprint. + */ + + +#define DRV_NAME "3c59x" + + + +/* A few values that may be tweaked. */ +/* Keep the ring sizes a power of two for efficiency. */ +#define TX_RING_SIZE 16 +#define RX_RING_SIZE 32 +#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ + +/* "Knobs" that adjust features and parameters. */ +/* Set the copy breakpoint for the copy-only-tiny-frames scheme. + Setting to > 1512 effectively disables this feature. */ +#ifndef __arm__ +static int rx_copybreak = 200; +#else +/* ARM systems perform better by disregarding the bus-master + transfer capability of these cards. -- rmk */ +static int rx_copybreak = 1513; +#endif +/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */ +static const int mtu = 1500; +/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ +static int max_interrupt_work = 32; +/* Tx timeout interval (millisecs) */ +static int watchdog = 5000; + +/* Allow aggregation of Tx interrupts. Saves CPU load at the cost + * of possible Tx stalls if the system is blocking interrupts + * somewhere else. Undefine this to disable. + */ +#define tx_interrupt_mitigation 1 + +/* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */ +#define vortex_debug debug +#ifdef VORTEX_DEBUG +static int vortex_debug = VORTEX_DEBUG; +#else +static int vortex_debug = 1; +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* For nr_irqs only. */ +#include +#include + +/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. + This is only in the support-all-kernels source code. */ + +#define RUN_AT(x) (jiffies + (x)) + +#include + + +static const char version[] = + DRV_NAME ": Donald Becker and others.\n"; + +MODULE_AUTHOR("Donald Becker "); +MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver "); +MODULE_LICENSE("GPL"); + + +/* Operational parameter that usually are not changed. */ + +/* The Vortex size is twice that of the original EtherLinkIII series: the + runtime register window, window 1, is now always mapped in. + The Boomerang size is twice as large as the Vortex -- it has additional + bus master control registers. */ +#define VORTEX_TOTAL_SIZE 0x20 +#define BOOMERANG_TOTAL_SIZE 0x40 + +/* Set iff a MII transceiver on any interface requires mdio preamble. + This only set with the original DP83840 on older 3c905 boards, so the extra + code size of a per-interface flag is not worthwhile. */ +static char mii_preamble_required; + +#define PFX DRV_NAME ": " + + + +/* + Theory of Operation + +I. Board Compatibility + +This device driver is designed for the 3Com FastEtherLink and FastEtherLink +XL, 3Com's PCI to 10/100baseT adapters. It also works with the 10Mbs +versions of the FastEtherLink cards. The supported product IDs are + 3c590, 3c592, 3c595, 3c597, 3c900, 3c905 + +The related ISA 3c515 is supported with a separate driver, 3c515.c, included +with the kernel source or available from + cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html + +II. Board-specific settings + +PCI bus devices are configured by the system at boot time, so no jumpers +need to be set on the board. The system BIOS should be set to assign the +PCI INTA signal to an otherwise unused system IRQ line. + +The EEPROM settings for media type and forced-full-duplex are observed. +The EEPROM media type should be left at the default "autoselect" unless using +10base2 or AUI connections which cannot be reliably detected. + +III. Driver operation + +The 3c59x series use an interface that's very similar to the previous 3c5x9 +series. The primary interface is two programmed-I/O FIFOs, with an +alternate single-contiguous-region bus-master transfer (see next). + +The 3c900 "Boomerang" series uses a full-bus-master interface with separate +lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet, +DEC Tulip and Intel Speedo3. The first chip version retains a compatible +programmed-I/O interface that has been removed in 'B' and subsequent board +revisions. + +One extension that is advertised in a very large font is that the adapters +are capable of being bus masters. On the Vortex chip this capability was +only for a single contiguous region making it far less useful than the full +bus master capability. There is a significant performance impact of taking +an extra interrupt or polling for the completion of each transfer, as well +as difficulty sharing the single transfer engine between the transmit and +receive threads. Using DMA transfers is a win only with large blocks or +with the flawed versions of the Intel Orion motherboard PCI controller. + +The Boomerang chip's full-bus-master interface is useful, and has the +currently-unused advantages over other similar chips that queued transmit +packets may be reordered and receive buffer groups are associated with a +single frame. + +With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme. +Rather than a fixed intermediate receive buffer, this scheme allocates +full-sized skbuffs as receive buffers. The value RX_COPYBREAK is used as +the copying breakpoint: it is chosen to trade-off the memory wasted by +passing the full-sized skbuff to the queue layer for all frames vs. the +copying cost of copying a frame to a correctly-sized skbuff. + +IIIC. Synchronization +The driver runs as two independent, single-threaded flows of control. One +is the send-packet routine, which enforces single-threaded use by the +dev->tbusy flag. The other thread is the interrupt handler, which is single +threaded by the hardware and other software. + +IV. Notes + +Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development +3c590, 3c595, and 3c900 boards. +The name "Vortex" is the internal 3Com project name for the PCI ASIC, and +the EISA version is called "Demon". According to Terry these names come +from rides at the local amusement park. + +The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes! +This driver only supports ethernet packets because of the skbuff allocation +limit of 4K. +*/ + +/* This table drives the PCI probe routines. It's mostly boilerplate in all + of the drivers, and will likely be provided by some future kernel. +*/ +enum pci_flags_bit { + PCI_USES_MASTER=4, +}; + +enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8, + EEPROM_8BIT=0x10, /* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */ + HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100, + INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800, + EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000, WNO_XCVR_PWR=0x4000, + EXTRA_PREAMBLE=0x8000, EEPROM_RESET=0x10000, }; + +enum vortex_chips { + CH_3C590 = 0, + CH_3C592, + CH_3C597, + CH_3C595_1, + CH_3C595_2, + + CH_3C595_3, + CH_3C900_1, + CH_3C900_2, + CH_3C900_3, + CH_3C900_4, + + CH_3C900_5, + CH_3C900B_FL, + CH_3C905_1, + CH_3C905_2, + CH_3C905B_TX, + CH_3C905B_1, + + CH_3C905B_2, + CH_3C905B_FX, + CH_3C905C, + CH_3C9202, + CH_3C980, + CH_3C9805, + + CH_3CSOHO100_TX, + CH_3C555, + CH_3C556, + CH_3C556B, + CH_3C575, + + CH_3C575_1, + CH_3CCFE575, + CH_3CCFE575CT, + CH_3CCFE656, + CH_3CCFEM656, + + CH_3CCFEM656_1, + CH_3C450, + CH_3C920, + CH_3C982A, + CH_3C982B, + + CH_905BT4, + CH_920B_EMB_WNM, +}; + + +/* note: this array directly indexed by above enums, and MUST + * be kept in sync with both the enums above, and the PCI device + * table below + */ +static struct vortex_chip_info { + const char *name; + int flags; + int drv_flags; + int io_size; +} vortex_info_tbl[] = { + {"3c590 Vortex 10Mbps", + PCI_USES_MASTER, IS_VORTEX, 32, }, + {"3c592 EISA 10Mbps Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ + PCI_USES_MASTER, IS_VORTEX, 32, }, + {"3c597 EISA Fast Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ + PCI_USES_MASTER, IS_VORTEX, 32, }, + {"3c595 Vortex 100baseTx", + PCI_USES_MASTER, IS_VORTEX, 32, }, + {"3c595 Vortex 100baseT4", + PCI_USES_MASTER, IS_VORTEX, 32, }, + + {"3c595 Vortex 100base-MII", + PCI_USES_MASTER, IS_VORTEX, 32, }, + {"3c900 Boomerang 10baseT", + PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, }, + {"3c900 Boomerang 10Mbps Combo", + PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, }, + {"3c900 Cyclone 10Mbps TPO", /* AKPM: from Don's 0.99M */ + PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, + {"3c900 Cyclone 10Mbps Combo", + PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, + + {"3c900 Cyclone 10Mbps TPC", /* AKPM: from Don's 0.99M */ + PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, + {"3c900B-FL Cyclone 10base-FL", + PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, + {"3c905 Boomerang 100baseTx", + PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, + {"3c905 Boomerang 100baseT4", + PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, + {"3C905B-TX Fast Etherlink XL PCI", + PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, + {"3c905B Cyclone 100baseTx", + PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, + + {"3c905B Cyclone 10/100/BNC", + PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, + {"3c905B-FX Cyclone 100baseFx", + PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, + {"3c905C Tornado", + PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, + {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)", + PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, }, + {"3c980 Cyclone", + PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, + + {"3c980C Python-T", + PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, + {"3cSOHO100-TX Hurricane", + PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, + {"3c555 Laptop Hurricane", + PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, }, + {"3c556 Laptop Tornado", + PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR| + HAS_HWCKSM, 128, }, + {"3c556B Laptop Hurricane", + PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR| + WNO_XCVR_PWR|HAS_HWCKSM, 128, }, + + {"3c575 [Megahertz] 10/100 LAN CardBus", + PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, + {"3c575 Boomerang CardBus", + PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, + {"3CCFE575BT Cyclone CardBus", + PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT| + INVERT_LED_PWR|HAS_HWCKSM, 128, }, + {"3CCFE575CT Tornado CardBus", + PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| + MAX_COLLISION_RESET|HAS_HWCKSM, 128, }, + {"3CCFE656 Cyclone CardBus", + PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| + INVERT_LED_PWR|HAS_HWCKSM, 128, }, + + {"3CCFEM656B Cyclone+Winmodem CardBus", + PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| + INVERT_LED_PWR|HAS_HWCKSM, 128, }, + {"3CXFEM656C Tornado+Winmodem CardBus", /* From pcmcia-cs-3.1.5 */ + PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| + MAX_COLLISION_RESET|HAS_HWCKSM, 128, }, + {"3c450 HomePNA Tornado", /* AKPM: from Don's 0.99Q */ + PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, + {"3c920 Tornado", + PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, + {"3c982 Hydra Dual Port A", + PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, }, + + {"3c982 Hydra Dual Port B", + PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, }, + {"3c905B-T4", + PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, + {"3c920B-EMB-WNM Tornado", + PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, + + {NULL,}, /* NULL terminated list. */ +}; + + +static const struct pci_device_id vortex_pci_tbl[] = { + { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 }, + { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 }, + { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 }, + { 0x10B7, 0x5950, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_1 }, + { 0x10B7, 0x5951, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_2 }, + + { 0x10B7, 0x5952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_3 }, + { 0x10B7, 0x9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_1 }, + { 0x10B7, 0x9001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_2 }, + { 0x10B7, 0x9004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_3 }, + { 0x10B7, 0x9005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_4 }, + + { 0x10B7, 0x9006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_5 }, + { 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL }, + { 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 }, + { 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 }, + { 0x10B7, 0x9054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_TX }, + { 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 }, + + { 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 }, + { 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX }, + { 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C }, + { 0x10B7, 0x9202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9202 }, + { 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 }, + { 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 }, + + { 0x10B7, 0x7646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CSOHO100_TX }, + { 0x10B7, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C555 }, + { 0x10B7, 0x6055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556 }, + { 0x10B7, 0x6056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556B }, + { 0x10B7, 0x5b57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575 }, + + { 0x10B7, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575_1 }, + { 0x10B7, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575 }, + { 0x10B7, 0x5257, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575CT }, + { 0x10B7, 0x6560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE656 }, + { 0x10B7, 0x6562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656 }, + + { 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 }, + { 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 }, + { 0x10B7, 0x9201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C920 }, + { 0x10B7, 0x1201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982A }, + { 0x10B7, 0x1202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982B }, + + { 0x10B7, 0x9056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_905BT4 }, + { 0x10B7, 0x9210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_920B_EMB_WNM }, + + {0,} /* 0 terminated list. */ +}; +MODULE_DEVICE_TABLE(pci, vortex_pci_tbl); + + +/* Operational definitions. + These are not used by other compilation units and thus are not + exported in a ".h" file. + + First the windows. There are eight register windows, with the command + and status registers available in each. + */ +#define EL3_CMD 0x0e +#define EL3_STATUS 0x0e + +/* The top five bits written to EL3_CMD are a command, the lower + 11 bits are the parameter, if applicable. + Note that 11 parameters bits was fine for ethernet, but the new chip + can handle FDDI length frames (~4500 octets) and now parameters count + 32-bit 'Dwords' rather than octets. */ + +enum vortex_cmd { + TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11, + RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, + UpStall = 6<<11, UpUnstall = (6<<11)+1, + DownStall = (6<<11)+2, DownUnstall = (6<<11)+3, + RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11, + FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11, + SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11, + SetTxThreshold = 18<<11, SetTxStart = 19<<11, + StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11, + StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,}; + +/* The SetRxFilter command accepts the following classes: */ +enum RxFilter { + RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 }; + +/* Bits in the general status register. */ +enum vortex_status { + IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004, + TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, + IntReq = 0x0040, StatsFull = 0x0080, + DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10, + DMAInProgress = 1<<11, /* DMA controller is still busy.*/ + CmdInProgress = 1<<12, /* EL3_CMD is still busy.*/ +}; + +/* Register window 1 offsets, the window used in normal operation. + On the Vortex this window is always mapped at offsets 0x10-0x1f. */ +enum Window1 { + TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14, + RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B, + TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */ +}; +enum Window0 { + Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */ + Wn0EepromData = 12, /* Window 0: EEPROM results register. */ + IntrStatus=0x0E, /* Valid in all windows. */ +}; +enum Win0_EEPROM_bits { + EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0, + EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */ + EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */ +}; +/* EEPROM locations. */ +enum eeprom_offset { + PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3, + EtherLink3ID=7, IFXcvrIO=8, IRQLine=9, + NodeAddr01=10, NodeAddr23=11, NodeAddr45=12, + DriverTune=13, Checksum=15}; + +enum Window2 { /* Window 2. */ + Wn2_ResetOptions=12, +}; +enum Window3 { /* Window 3: MAC/config bits. */ + Wn3_Config=0, Wn3_MaxPktSize=4, Wn3_MAC_Ctrl=6, Wn3_Options=8, +}; + +#define BFEXT(value, offset, bitcount) \ + ((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1)) + +#define BFINS(lhs, rhs, offset, bitcount) \ + (((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) | \ + (((rhs) & ((1 << (bitcount)) - 1)) << (offset))) + +#define RAM_SIZE(v) BFEXT(v, 0, 3) +#define RAM_WIDTH(v) BFEXT(v, 3, 1) +#define RAM_SPEED(v) BFEXT(v, 4, 2) +#define ROM_SIZE(v) BFEXT(v, 6, 2) +#define RAM_SPLIT(v) BFEXT(v, 16, 2) +#define XCVR(v) BFEXT(v, 20, 4) +#define AUTOSELECT(v) BFEXT(v, 24, 1) + +enum Window4 { /* Window 4: Xcvr/media bits. */ + Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10, +}; +enum Win4_Media_bits { + Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */ + Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */ + Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */ + Media_LnkBeat = 0x0800, +}; +enum Window7 { /* Window 7: Bus Master control. */ + Wn7_MasterAddr = 0, Wn7_VlanEtherType=4, Wn7_MasterLen = 6, + Wn7_MasterStatus = 12, +}; +/* Boomerang bus master control registers. */ +enum MasterCtrl { + PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c, + TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38, +}; + +/* The Rx and Tx descriptor lists. + Caution Alpha hackers: these types are 32 bits! Note also the 8 byte + alignment contraint on tx_ring[] and rx_ring[]. */ +#define LAST_FRAG 0x80000000 /* Last Addr/Len pair in descriptor. */ +#define DN_COMPLETE 0x00010000 /* This packet has been downloaded */ +struct boom_rx_desc { + __le32 next; /* Last entry points to 0. */ + __le32 status; + __le32 addr; /* Up to 63 addr/len pairs possible. */ + __le32 length; /* Set LAST_FRAG to indicate last pair. */ +}; +/* Values for the Rx status entry. */ +enum rx_desc_status { + RxDComplete=0x00008000, RxDError=0x4000, + /* See boomerang_rx() for actual error bits */ + IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27, + IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31, +}; + +#ifdef MAX_SKB_FRAGS +#define DO_ZEROCOPY 1 +#else +#define DO_ZEROCOPY 0 +#endif + +struct boom_tx_desc { + __le32 next; /* Last entry points to 0. */ + __le32 status; /* bits 0:12 length, others see below. */ +#if DO_ZEROCOPY + struct { + __le32 addr; + __le32 length; + } frag[1+MAX_SKB_FRAGS]; +#else + __le32 addr; + __le32 length; +#endif +}; + +/* Values for the Tx status entry. */ +enum tx_desc_status { + CRCDisable=0x2000, TxDComplete=0x8000, + AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000, + TxIntrUploaded=0x80000000, /* IRQ when in FIFO, but maybe not sent. */ +}; + +/* Chip features we care about in vp->capabilities, read from the EEPROM. */ +enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 }; + +struct vortex_extra_stats { + unsigned long tx_deferred; + unsigned long tx_max_collisions; + unsigned long tx_multiple_collisions; + unsigned long tx_single_collisions; + unsigned long rx_bad_ssd; +}; + +struct vortex_private { + /* The Rx and Tx rings should be quad-word-aligned. */ + struct boom_rx_desc* rx_ring; + struct boom_tx_desc* tx_ring; + dma_addr_t rx_ring_dma; + dma_addr_t tx_ring_dma; + /* The addresses of transmit- and receive-in-place skbuffs. */ + struct sk_buff* rx_skbuff[RX_RING_SIZE]; + struct sk_buff* tx_skbuff[TX_RING_SIZE]; + unsigned int cur_rx, cur_tx; /* The next free ring entry */ + unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ + struct vortex_extra_stats xstats; /* NIC-specific extra stats */ + struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ + dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ + + /* PCI configuration space information. */ + struct device *gendev; + void __iomem *ioaddr; /* IO address space */ + void __iomem *cb_fn_base; /* CardBus function status addr space. */ + + /* Some values here only for performance evaluation and path-coverage */ + int rx_nocopy, rx_copy, queued_packet, rx_csumhits; + int card_idx; + + /* The remainder are related to chip state, mostly media selection. */ + struct timer_list timer; /* Media selection timer. */ + struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */ + int options; /* User-settable misc. driver options. */ + unsigned int media_override:4, /* Passed-in media type. */ + default_media:4, /* Read from the EEPROM/Wn3_Config. */ + full_duplex:1, autoselect:1, + bus_master:1, /* Vortex can only do a fragment bus-m. */ + full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */ + flow_ctrl:1, /* Use 802.3x flow control (PAUSE only) */ + partner_flow_ctrl:1, /* Partner supports flow control */ + has_nway:1, + enable_wol:1, /* Wake-on-LAN is enabled */ + pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */ + open:1, + medialock:1, + large_frames:1, /* accept large frames */ + handling_irq:1; /* private in_irq indicator */ + /* {get|set}_wol operations are already serialized by rtnl. + * no additional locking is required for the enable_wol and acpi_set_WOL() + */ + int drv_flags; + u16 status_enable; + u16 intr_enable; + u16 available_media; /* From Wn3_Options. */ + u16 capabilities, info1, info2; /* Various, from EEPROM. */ + u16 advertising; /* NWay media advertisement */ + unsigned char phys[2]; /* MII device addresses. */ + u16 deferred; /* Resend these interrupts when we + * bale from the ISR */ + u16 io_size; /* Size of PCI region (for release_region) */ + + /* Serialises access to hardware other than MII and variables below. + * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */ + spinlock_t lock; + + spinlock_t mii_lock; /* Serialises access to MII */ + struct mii_if_info mii; /* MII lib hooks/info */ + spinlock_t window_lock; /* Serialises access to windowed regs */ + int window; /* Register window */ +}; + +static void window_set(struct vortex_private *vp, int window) +{ + if (window != vp->window) { + iowrite16(SelectWindow + window, vp->ioaddr + EL3_CMD); + vp->window = window; + } +} + +#define DEFINE_WINDOW_IO(size) \ +static u ## size \ +window_read ## size(struct vortex_private *vp, int window, int addr) \ +{ \ + unsigned long flags; \ + u ## size ret; \ + spin_lock_irqsave(&vp->window_lock, flags); \ + window_set(vp, window); \ + ret = ioread ## size(vp->ioaddr + addr); \ + spin_unlock_irqrestore(&vp->window_lock, flags); \ + return ret; \ +} \ +static void \ +window_write ## size(struct vortex_private *vp, u ## size value, \ + int window, int addr) \ +{ \ + unsigned long flags; \ + spin_lock_irqsave(&vp->window_lock, flags); \ + window_set(vp, window); \ + iowrite ## size(value, vp->ioaddr + addr); \ + spin_unlock_irqrestore(&vp->window_lock, flags); \ +} +DEFINE_WINDOW_IO(8) +DEFINE_WINDOW_IO(16) +DEFINE_WINDOW_IO(32) + +#ifdef CONFIG_PCI +#define DEVICE_PCI(dev) ((dev_is_pci(dev)) ? to_pci_dev((dev)) : NULL) +#else +#define DEVICE_PCI(dev) NULL +#endif + +#define VORTEX_PCI(vp) \ + ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL)) + +#ifdef CONFIG_EISA +#define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL) +#else +#define DEVICE_EISA(dev) NULL +#endif + +#define VORTEX_EISA(vp) \ + ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL)) + +/* The action to take with a media selection timer tick. + Note that we deviate from the 3Com order by checking 10base2 before AUI. + */ +enum xcvr_types { + XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx, + XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10, +}; + +static const struct media_table { + char *name; + unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */ + mask:8, /* The transceiver-present bit in Wn3_Config.*/ + next:8; /* The media type to try next. */ + int wait; /* Time before we check media status. */ +} media_tbl[] = { + { "10baseT", Media_10TP,0x08, XCVR_10base2, (14*HZ)/10}, + { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10}, + { "undefined", 0, 0x80, XCVR_10baseT, 10000}, + { "10base2", 0, 0x10, XCVR_AUI, (1*HZ)/10}, + { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10}, + { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14*HZ)/10}, + { "MII", 0, 0x41, XCVR_10baseT, 3*HZ }, + { "undefined", 0, 0x01, XCVR_10baseT, 10000}, + { "Autonegotiate", 0, 0x41, XCVR_10baseT, 3*HZ}, + { "MII-External", 0, 0x41, XCVR_10baseT, 3*HZ }, + { "Default", 0, 0xFF, XCVR_10baseT, 10000}, +}; + +static struct { + const char str[ETH_GSTRING_LEN]; +} ethtool_stats_keys[] = { + { "tx_deferred" }, + { "tx_max_collisions" }, + { "tx_multiple_collisions" }, + { "tx_single_collisions" }, + { "rx_bad_ssd" }, +}; + +/* number of ETHTOOL_GSTATS u64's */ +#define VORTEX_NUM_STATS 5 + +static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, + int chip_idx, int card_idx); +static int vortex_up(struct net_device *dev); +static void vortex_down(struct net_device *dev, int final); +static int vortex_open(struct net_device *dev); +static void mdio_sync(struct vortex_private *vp, int bits); +static int mdio_read(struct net_device *dev, int phy_id, int location); +static void mdio_write(struct net_device *vp, int phy_id, int location, int value); +static void vortex_timer(unsigned long arg); +static void rx_oom_timer(unsigned long arg); +static netdev_tx_t vortex_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static int vortex_rx(struct net_device *dev); +static int boomerang_rx(struct net_device *dev); +static irqreturn_t vortex_interrupt(int irq, void *dev_id); +static irqreturn_t boomerang_interrupt(int irq, void *dev_id); +static int vortex_close(struct net_device *dev); +static void dump_tx_ring(struct net_device *dev); +static void update_stats(void __iomem *ioaddr, struct net_device *dev); +static struct net_device_stats *vortex_get_stats(struct net_device *dev); +static void set_rx_mode(struct net_device *dev); +#ifdef CONFIG_PCI +static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +#endif +static void vortex_tx_timeout(struct net_device *dev); +static void acpi_set_WOL(struct net_device *dev); +static const struct ethtool_ops vortex_ethtool_ops; +static void set_8021q_mode(struct net_device *dev, int enable); + +/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */ +/* Option count limit only -- unlimited interfaces are supported. */ +#define MAX_UNITS 8 +static int options[MAX_UNITS] = { [0 ... MAX_UNITS-1] = -1 }; +static int full_duplex[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; +static int hw_checksums[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; +static int flow_ctrl[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; +static int enable_wol[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; +static int use_mmio[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; +static int global_options = -1; +static int global_full_duplex = -1; +static int global_enable_wol = -1; +static int global_use_mmio = -1; + +/* Variables to work-around the Compaq PCI BIOS32 problem. */ +static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900; +static struct net_device *compaq_net_device; + +static int vortex_cards_found; + +module_param(debug, int, 0); +module_param(global_options, int, 0); +module_param_array(options, int, NULL, 0); +module_param(global_full_duplex, int, 0); +module_param_array(full_duplex, int, NULL, 0); +module_param_array(hw_checksums, int, NULL, 0); +module_param_array(flow_ctrl, int, NULL, 0); +module_param(global_enable_wol, int, 0); +module_param_array(enable_wol, int, NULL, 0); +module_param(rx_copybreak, int, 0); +module_param(max_interrupt_work, int, 0); +module_param(compaq_ioaddr, int, 0); +module_param(compaq_irq, int, 0); +module_param(compaq_device_id, int, 0); +module_param(watchdog, int, 0); +module_param(global_use_mmio, int, 0); +module_param_array(use_mmio, int, NULL, 0); +MODULE_PARM_DESC(debug, "3c59x debug level (0-6)"); +MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex"); +MODULE_PARM_DESC(global_options, "3c59x: same as options, but applies to all NICs if options is unset"); +MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)"); +MODULE_PARM_DESC(global_full_duplex, "3c59x: same as full_duplex, but applies to all NICs if full_duplex is unset"); +MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)"); +MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)"); +MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)"); +MODULE_PARM_DESC(global_enable_wol, "3c59x: same as enable_wol, but applies to all NICs if enable_wol is unset"); +MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames"); +MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt"); +MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)"); +MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)"); +MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)"); +MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds"); +MODULE_PARM_DESC(global_use_mmio, "3c59x: same as use_mmio, but applies to all NICs if options is unset"); +MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)"); + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void poll_vortex(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + unsigned long flags; + local_irq_save(flags); + (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); + local_irq_restore(flags); +} +#endif + +#ifdef CONFIG_PM + +static int vortex_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *ndev = pci_get_drvdata(pdev); + + if (!ndev || !netif_running(ndev)) + return 0; + + netif_device_detach(ndev); + vortex_down(ndev, 1); + + return 0; +} + +static int vortex_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *ndev = pci_get_drvdata(pdev); + int err; + + if (!ndev || !netif_running(ndev)) + return 0; + + err = vortex_up(ndev); + if (err) + return err; + + netif_device_attach(ndev); + + return 0; +} + +static const struct dev_pm_ops vortex_pm_ops = { + .suspend = vortex_suspend, + .resume = vortex_resume, + .freeze = vortex_suspend, + .thaw = vortex_resume, + .poweroff = vortex_suspend, + .restore = vortex_resume, +}; + +#define VORTEX_PM_OPS (&vortex_pm_ops) + +#else /* !CONFIG_PM */ + +#define VORTEX_PM_OPS NULL + +#endif /* !CONFIG_PM */ + +#ifdef CONFIG_EISA +static struct eisa_device_id vortex_eisa_ids[] = { + { "TCM5920", CH_3C592 }, + { "TCM5970", CH_3C597 }, + { "" } +}; +MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids); + +static int __init vortex_eisa_probe(struct device *device) +{ + void __iomem *ioaddr; + struct eisa_device *edev; + + edev = to_eisa_device(device); + + if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME)) + return -EBUSY; + + ioaddr = ioport_map(edev->base_addr, VORTEX_TOTAL_SIZE); + + if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12, + edev->id.driver_data, vortex_cards_found)) { + release_region(edev->base_addr, VORTEX_TOTAL_SIZE); + return -ENODEV; + } + + vortex_cards_found++; + + return 0; +} + +static int vortex_eisa_remove(struct device *device) +{ + struct eisa_device *edev; + struct net_device *dev; + struct vortex_private *vp; + void __iomem *ioaddr; + + edev = to_eisa_device(device); + dev = eisa_get_drvdata(edev); + + if (!dev) { + pr_err("vortex_eisa_remove called for Compaq device!\n"); + BUG(); + } + + vp = netdev_priv(dev); + ioaddr = vp->ioaddr; + + unregister_netdev(dev); + iowrite16(TotalReset|0x14, ioaddr + EL3_CMD); + release_region(edev->base_addr, VORTEX_TOTAL_SIZE); + + free_netdev(dev); + return 0; +} + +static struct eisa_driver vortex_eisa_driver = { + .id_table = vortex_eisa_ids, + .driver = { + .name = "3c59x", + .probe = vortex_eisa_probe, + .remove = vortex_eisa_remove + } +}; + +#endif /* CONFIG_EISA */ + +/* returns count found (>= 0), or negative on error */ +static int __init vortex_eisa_init(void) +{ + int eisa_found = 0; + int orig_cards_found = vortex_cards_found; + +#ifdef CONFIG_EISA + int err; + + err = eisa_driver_register (&vortex_eisa_driver); + if (!err) { + /* + * Because of the way EISA bus is probed, we cannot assume + * any device have been found when we exit from + * eisa_driver_register (the bus root driver may not be + * initialized yet). So we blindly assume something was + * found, and let the sysfs magic happened... + */ + eisa_found = 1; + } +#endif + + /* Special code to work-around the Compaq PCI BIOS32 problem. */ + if (compaq_ioaddr) { + vortex_probe1(NULL, ioport_map(compaq_ioaddr, VORTEX_TOTAL_SIZE), + compaq_irq, compaq_device_id, vortex_cards_found++); + } + + return vortex_cards_found - orig_cards_found + eisa_found; +} + +/* returns count (>= 0), or negative on error */ +static int vortex_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int rc, unit, pci_bar; + struct vortex_chip_info *vci; + void __iomem *ioaddr; + + /* wake up and enable device */ + rc = pci_enable_device(pdev); + if (rc < 0) + goto out; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc < 0) + goto out_disable; + + unit = vortex_cards_found; + + if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) { + /* Determine the default if the user didn't override us */ + vci = &vortex_info_tbl[ent->driver_data]; + pci_bar = vci->drv_flags & (IS_CYCLONE | IS_TORNADO) ? 1 : 0; + } else if (unit < MAX_UNITS && use_mmio[unit] >= 0) + pci_bar = use_mmio[unit] ? 1 : 0; + else + pci_bar = global_use_mmio ? 1 : 0; + + ioaddr = pci_iomap(pdev, pci_bar, 0); + if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ + ioaddr = pci_iomap(pdev, 0, 0); + if (!ioaddr) { + rc = -ENOMEM; + goto out_release; + } + + rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq, + ent->driver_data, unit); + if (rc < 0) + goto out_iounmap; + + vortex_cards_found++; + goto out; + +out_iounmap: + pci_iounmap(pdev, ioaddr); +out_release: + pci_release_regions(pdev); +out_disable: + pci_disable_device(pdev); +out: + return rc; +} + +static const struct net_device_ops boomrang_netdev_ops = { + .ndo_open = vortex_open, + .ndo_stop = vortex_close, + .ndo_start_xmit = boomerang_start_xmit, + .ndo_tx_timeout = vortex_tx_timeout, + .ndo_get_stats = vortex_get_stats, +#ifdef CONFIG_PCI + .ndo_do_ioctl = vortex_ioctl, +#endif + .ndo_set_rx_mode = set_rx_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = poll_vortex, +#endif +}; + +static const struct net_device_ops vortex_netdev_ops = { + .ndo_open = vortex_open, + .ndo_stop = vortex_close, + .ndo_start_xmit = vortex_start_xmit, + .ndo_tx_timeout = vortex_tx_timeout, + .ndo_get_stats = vortex_get_stats, +#ifdef CONFIG_PCI + .ndo_do_ioctl = vortex_ioctl, +#endif + .ndo_set_rx_mode = set_rx_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = poll_vortex, +#endif +}; + +/* + * Start up the PCI/EISA device which is described by *gendev. + * Return 0 on success. + * + * NOTE: pdev can be NULL, for the case of a Compaq device + */ +static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, + int chip_idx, int card_idx) +{ + struct vortex_private *vp; + int option; + unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ + int i, step; + struct net_device *dev; + static int printed_version; + int retval, print_info; + struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx]; + const char *print_name = "3c59x"; + struct pci_dev *pdev = NULL; + struct eisa_device *edev = NULL; + + if (!printed_version) { + pr_info("%s", version); + printed_version = 1; + } + + if (gendev) { + if ((pdev = DEVICE_PCI(gendev))) { + print_name = pci_name(pdev); + } + + if ((edev = DEVICE_EISA(gendev))) { + print_name = dev_name(&edev->dev); + } + } + + dev = alloc_etherdev(sizeof(*vp)); + retval = -ENOMEM; + if (!dev) + goto out; + + SET_NETDEV_DEV(dev, gendev); + vp = netdev_priv(dev); + + option = global_options; + + /* The lower four bits are the media type. */ + if (dev->mem_start) { + /* + * The 'options' param is passed in as the third arg to the + * LILO 'ether=' argument for non-modular use + */ + option = dev->mem_start; + } + else if (card_idx < MAX_UNITS) { + if (options[card_idx] >= 0) + option = options[card_idx]; + } + + if (option > 0) { + if (option & 0x8000) + vortex_debug = 7; + if (option & 0x4000) + vortex_debug = 2; + if (option & 0x0400) + vp->enable_wol = 1; + } + + print_info = (vortex_debug > 1); + if (print_info) + pr_info("See Documentation/networking/vortex.txt\n"); + + pr_info("%s: 3Com %s %s at %p.\n", + print_name, + pdev ? "PCI" : "EISA", + vci->name, + ioaddr); + + dev->base_addr = (unsigned long)ioaddr; + dev->irq = irq; + dev->mtu = mtu; + vp->ioaddr = ioaddr; + vp->large_frames = mtu > 1500; + vp->drv_flags = vci->drv_flags; + vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0; + vp->io_size = vci->io_size; + vp->card_idx = card_idx; + vp->window = -1; + + /* module list only for Compaq device */ + if (gendev == NULL) { + compaq_net_device = dev; + } + + /* PCI-only startup logic */ + if (pdev) { + /* enable bus-mastering if necessary */ + if (vci->flags & PCI_USES_MASTER) + pci_set_master(pdev); + + if (vci->drv_flags & IS_VORTEX) { + u8 pci_latency; + u8 new_latency = 248; + + /* Check the PCI latency value. On the 3c590 series the latency timer + must be set to the maximum value to avoid data corruption that occurs + when the timer expires during a transfer. This bug exists the Vortex + chip only. */ + pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency); + if (pci_latency < new_latency) { + pr_info("%s: Overriding PCI latency timer (CFLT) setting of %d, new value is %d.\n", + print_name, pci_latency, new_latency); + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency); + } + } + } + + spin_lock_init(&vp->lock); + spin_lock_init(&vp->mii_lock); + spin_lock_init(&vp->window_lock); + vp->gendev = gendev; + vp->mii.dev = dev; + vp->mii.mdio_read = mdio_read; + vp->mii.mdio_write = mdio_write; + vp->mii.phy_id_mask = 0x1f; + vp->mii.reg_num_mask = 0x1f; + + /* Makes sure rings are at least 16 byte aligned. */ + vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE + + sizeof(struct boom_tx_desc) * TX_RING_SIZE, + &vp->rx_ring_dma); + retval = -ENOMEM; + if (!vp->rx_ring) + goto free_device; + + vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); + vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; + + /* if we are a PCI driver, we store info in pdev->driver_data + * instead of a module list */ + if (pdev) + pci_set_drvdata(pdev, dev); + if (edev) + eisa_set_drvdata(edev, dev); + + vp->media_override = 7; + if (option >= 0) { + vp->media_override = ((option & 7) == 2) ? 0 : option & 15; + if (vp->media_override != 7) + vp->medialock = 1; + vp->full_duplex = (option & 0x200) ? 1 : 0; + vp->bus_master = (option & 16) ? 1 : 0; + } + + if (global_full_duplex > 0) + vp->full_duplex = 1; + if (global_enable_wol > 0) + vp->enable_wol = 1; + + if (card_idx < MAX_UNITS) { + if (full_duplex[card_idx] > 0) + vp->full_duplex = 1; + if (flow_ctrl[card_idx] > 0) + vp->flow_ctrl = 1; + if (enable_wol[card_idx] > 0) + vp->enable_wol = 1; + } + + vp->mii.force_media = vp->full_duplex; + vp->options = option; + /* Read the station address from the EEPROM. */ + { + int base; + + if (vci->drv_flags & EEPROM_8BIT) + base = 0x230; + else if (vci->drv_flags & EEPROM_OFFSET) + base = EEPROM_Read + 0x30; + else + base = EEPROM_Read; + + for (i = 0; i < 0x40; i++) { + int timer; + window_write16(vp, base + i, 0, Wn0EepromCmd); + /* Pause for at least 162 us. for the read to take place. */ + for (timer = 10; timer >= 0; timer--) { + udelay(162); + if ((window_read16(vp, 0, Wn0EepromCmd) & + 0x8000) == 0) + break; + } + eeprom[i] = window_read16(vp, 0, Wn0EepromData); + } + } + for (i = 0; i < 0x18; i++) + checksum ^= eeprom[i]; + checksum = (checksum ^ (checksum >> 8)) & 0xff; + if (checksum != 0x00) { /* Grrr, needless incompatible change 3Com. */ + while (i < 0x21) + checksum ^= eeprom[i++]; + checksum = (checksum ^ (checksum >> 8)) & 0xff; + } + if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO)) + pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum); + for (i = 0; i < 3; i++) + ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]); + if (print_info) + pr_cont(" %pM", dev->dev_addr); + /* Unfortunately an all zero eeprom passes the checksum and this + gets found in the wild in failure cases. Crypto is hard 8) */ + if (!is_valid_ether_addr(dev->dev_addr)) { + retval = -EINVAL; + pr_err("*** EEPROM MAC address is invalid.\n"); + goto free_ring; /* With every pack */ + } + for (i = 0; i < 6; i++) + window_write8(vp, dev->dev_addr[i], 2, i); + + if (print_info) + pr_cont(", IRQ %d\n", dev->irq); + /* Tell them about an invalid IRQ. */ + if (dev->irq <= 0 || dev->irq >= nr_irqs) + pr_warn(" *** Warning: IRQ %d is unlikely to work! ***\n", + dev->irq); + + step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1; + if (print_info) { + pr_info(" product code %02x%02x rev %02x.%d date %02d-%02d-%02d\n", + eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14], + step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9); + } + + + if (pdev && vci->drv_flags & HAS_CB_FNS) { + unsigned short n; + + vp->cb_fn_base = pci_iomap(pdev, 2, 0); + if (!vp->cb_fn_base) { + retval = -ENOMEM; + goto free_ring; + } + + if (print_info) { + pr_info("%s: CardBus functions mapped %16.16llx->%p\n", + print_name, + (unsigned long long)pci_resource_start(pdev, 2), + vp->cb_fn_base); + } + + n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010; + if (vp->drv_flags & INVERT_LED_PWR) + n |= 0x10; + if (vp->drv_flags & INVERT_MII_PWR) + n |= 0x4000; + window_write16(vp, n, 2, Wn2_ResetOptions); + if (vp->drv_flags & WNO_XCVR_PWR) { + window_write16(vp, 0x0800, 0, 0); + } + } + + /* Extract our information from the EEPROM data. */ + vp->info1 = eeprom[13]; + vp->info2 = eeprom[15]; + vp->capabilities = eeprom[16]; + + if (vp->info1 & 0x8000) { + vp->full_duplex = 1; + if (print_info) + pr_info("Full duplex capable\n"); + } + + { + static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; + unsigned int config; + vp->available_media = window_read16(vp, 3, Wn3_Options); + if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */ + vp->available_media = 0x40; + config = window_read32(vp, 3, Wn3_Config); + if (print_info) { + pr_debug(" Internal config register is %4.4x, transceivers %#x.\n", + config, window_read16(vp, 3, Wn3_Options)); + pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n", + 8 << RAM_SIZE(config), + RAM_WIDTH(config) ? "word" : "byte", + ram_split[RAM_SPLIT(config)], + AUTOSELECT(config) ? "autoselect/" : "", + XCVR(config) > XCVR_ExtMII ? "" : + media_tbl[XCVR(config)].name); + } + vp->default_media = XCVR(config); + if (vp->default_media == XCVR_NWAY) + vp->has_nway = 1; + vp->autoselect = AUTOSELECT(config); + } + + if (vp->media_override != 7) { + pr_info("%s: Media override to transceiver type %d (%s).\n", + print_name, vp->media_override, + media_tbl[vp->media_override].name); + dev->if_port = vp->media_override; + } else + dev->if_port = vp->default_media; + + if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) || + dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { + int phy, phy_idx = 0; + mii_preamble_required++; + if (vp->drv_flags & EXTRA_PREAMBLE) + mii_preamble_required++; + mdio_sync(vp, 32); + mdio_read(dev, 24, MII_BMSR); + for (phy = 0; phy < 32 && phy_idx < 1; phy++) { + int mii_status, phyx; + + /* + * For the 3c905CX we look at index 24 first, because it bogusly + * reports an external PHY at all indices + */ + if (phy == 0) + phyx = 24; + else if (phy <= 24) + phyx = phy - 1; + else + phyx = phy; + mii_status = mdio_read(dev, phyx, MII_BMSR); + if (mii_status && mii_status != 0xffff) { + vp->phys[phy_idx++] = phyx; + if (print_info) { + pr_info(" MII transceiver found at address %d, status %4x.\n", + phyx, mii_status); + } + if ((mii_status & 0x0040) == 0) + mii_preamble_required++; + } + } + mii_preamble_required--; + if (phy_idx == 0) { + pr_warn(" ***WARNING*** No MII transceivers found!\n"); + vp->phys[0] = 24; + } else { + vp->advertising = mdio_read(dev, vp->phys[0], MII_ADVERTISE); + if (vp->full_duplex) { + /* Only advertise the FD media types. */ + vp->advertising &= ~0x02A0; + mdio_write(dev, vp->phys[0], 4, vp->advertising); + } + } + vp->mii.phy_id = vp->phys[0]; + } + + if (vp->capabilities & CapBusMaster) { + vp->full_bus_master_tx = 1; + if (print_info) { + pr_info(" Enabling bus-master transmits and %s receives.\n", + (vp->info2 & 1) ? "early" : "whole-frame" ); + } + vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2; + vp->bus_master = 0; /* AKPM: vortex only */ + } + + /* The 3c59x-specific entries in the device structure. */ + if (vp->full_bus_master_tx) { + dev->netdev_ops = &boomrang_netdev_ops; + /* Actually, it still should work with iommu. */ + if (card_idx < MAX_UNITS && + ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) || + hw_checksums[card_idx] == 1)) { + dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; + } + } else + dev->netdev_ops = &vortex_netdev_ops; + + if (print_info) { + pr_info("%s: scatter/gather %sabled. h/w checksums %sabled\n", + print_name, + (dev->features & NETIF_F_SG) ? "en":"dis", + (dev->features & NETIF_F_IP_CSUM) ? "en":"dis"); + } + + dev->ethtool_ops = &vortex_ethtool_ops; + dev->watchdog_timeo = (watchdog * HZ) / 1000; + + if (pdev) { + vp->pm_state_valid = 1; + pci_save_state(pdev); + acpi_set_WOL(dev); + } + retval = register_netdev(dev); + if (retval == 0) + return 0; + +free_ring: + pci_free_consistent(pdev, + sizeof(struct boom_rx_desc) * RX_RING_SIZE + + sizeof(struct boom_tx_desc) * TX_RING_SIZE, + vp->rx_ring, + vp->rx_ring_dma); +free_device: + free_netdev(dev); + pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); +out: + return retval; +} + +static void +issue_and_wait(struct net_device *dev, int cmd) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + int i; + + iowrite16(cmd, ioaddr + EL3_CMD); + for (i = 0; i < 2000; i++) { + if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) + return; + } + + /* OK, that didn't work. Do it the slow way. One second */ + for (i = 0; i < 100000; i++) { + if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) { + if (vortex_debug > 1) + pr_info("%s: command 0x%04x took %d usecs\n", + dev->name, cmd, i * 10); + return; + } + udelay(10); + } + pr_err("%s: command 0x%04x did not complete! Status=0x%x\n", + dev->name, cmd, ioread16(ioaddr + EL3_STATUS)); +} + +static void +vortex_set_duplex(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + + pr_info("%s: setting %s-duplex.\n", + dev->name, (vp->full_duplex) ? "full" : "half"); + + /* Set the full-duplex bit. */ + window_write16(vp, + ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) | + (vp->large_frames ? 0x40 : 0) | + ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? + 0x100 : 0), + 3, Wn3_MAC_Ctrl); +} + +static void vortex_check_media(struct net_device *dev, unsigned int init) +{ + struct vortex_private *vp = netdev_priv(dev); + unsigned int ok_to_print = 0; + + if (vortex_debug > 3) + ok_to_print = 1; + + if (mii_check_media(&vp->mii, ok_to_print, init)) { + vp->full_duplex = vp->mii.full_duplex; + vortex_set_duplex(dev); + } else if (init) { + vortex_set_duplex(dev); + } +} + +static int +vortex_up(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + unsigned int config; + int i, mii_reg1, mii_reg5, err = 0; + + if (VORTEX_PCI(vp)) { + pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ + if (vp->pm_state_valid) + pci_restore_state(VORTEX_PCI(vp)); + err = pci_enable_device(VORTEX_PCI(vp)); + if (err) { + pr_warn("%s: Could not enable device\n", dev->name); + goto err_out; + } + } + + /* Before initializing select the active media port. */ + config = window_read32(vp, 3, Wn3_Config); + + if (vp->media_override != 7) { + pr_info("%s: Media override to transceiver %d (%s).\n", + dev->name, vp->media_override, + media_tbl[vp->media_override].name); + dev->if_port = vp->media_override; + } else if (vp->autoselect) { + if (vp->has_nway) { + if (vortex_debug > 1) + pr_info("%s: using NWAY device table, not %d\n", + dev->name, dev->if_port); + dev->if_port = XCVR_NWAY; + } else { + /* Find first available media type, starting with 100baseTx. */ + dev->if_port = XCVR_100baseTx; + while (! (vp->available_media & media_tbl[dev->if_port].mask)) + dev->if_port = media_tbl[dev->if_port].next; + if (vortex_debug > 1) + pr_info("%s: first available media type: %s\n", + dev->name, media_tbl[dev->if_port].name); + } + } else { + dev->if_port = vp->default_media; + if (vortex_debug > 1) + pr_info("%s: using default media %s\n", + dev->name, media_tbl[dev->if_port].name); + } + + init_timer(&vp->timer); + vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait); + vp->timer.data = (unsigned long)dev; + vp->timer.function = vortex_timer; /* timer handler */ + add_timer(&vp->timer); + + init_timer(&vp->rx_oom_timer); + vp->rx_oom_timer.data = (unsigned long)dev; + vp->rx_oom_timer.function = rx_oom_timer; + + if (vortex_debug > 1) + pr_debug("%s: Initial media type %s.\n", + dev->name, media_tbl[dev->if_port].name); + + vp->full_duplex = vp->mii.force_media; + config = BFINS(config, dev->if_port, 20, 4); + if (vortex_debug > 6) + pr_debug("vortex_up(): writing 0x%x to InternalConfig\n", config); + window_write32(vp, config, 3, Wn3_Config); + + if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { + mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR); + mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA); + vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0); + vp->mii.full_duplex = vp->full_duplex; + + vortex_check_media(dev, 1); + } + else + vortex_set_duplex(dev); + + issue_and_wait(dev, TxReset); + /* + * Don't reset the PHY - that upsets autonegotiation during DHCP operations. + */ + issue_and_wait(dev, RxReset|0x04); + + + iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD); + + if (vortex_debug > 1) { + pr_debug("%s: vortex_up() irq %d media status %4.4x.\n", + dev->name, dev->irq, window_read16(vp, 4, Wn4_Media)); + } + + /* Set the station address and mask in window 2 each time opened. */ + for (i = 0; i < 6; i++) + window_write8(vp, dev->dev_addr[i], 2, i); + for (; i < 12; i+=2) + window_write16(vp, 0, 2, i); + + if (vp->cb_fn_base) { + unsigned short n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010; + if (vp->drv_flags & INVERT_LED_PWR) + n |= 0x10; + if (vp->drv_flags & INVERT_MII_PWR) + n |= 0x4000; + window_write16(vp, n, 2, Wn2_ResetOptions); + } + + if (dev->if_port == XCVR_10base2) + /* Start the thinnet transceiver. We should really wait 50ms...*/ + iowrite16(StartCoax, ioaddr + EL3_CMD); + if (dev->if_port != XCVR_NWAY) { + window_write16(vp, + (window_read16(vp, 4, Wn4_Media) & + ~(Media_10TP|Media_SQE)) | + media_tbl[dev->if_port].media_bits, + 4, Wn4_Media); + } + + /* Switch to the stats window, and clear all stats by reading. */ + iowrite16(StatsDisable, ioaddr + EL3_CMD); + for (i = 0; i < 10; i++) + window_read8(vp, 6, i); + window_read16(vp, 6, 10); + window_read16(vp, 6, 12); + /* New: On the Vortex we must also clear the BadSSD counter. */ + window_read8(vp, 4, 12); + /* ..and on the Boomerang we enable the extra statistics bits. */ + window_write16(vp, 0x0040, 4, Wn4_NetDiag); + + if (vp->full_bus_master_rx) { /* Boomerang bus master. */ + vp->cur_rx = vp->dirty_rx = 0; + /* Initialize the RxEarly register as recommended. */ + iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD); + iowrite32(0x0020, ioaddr + PktStatus); + iowrite32(vp->rx_ring_dma, ioaddr + UpListPtr); + } + if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ + vp->cur_tx = vp->dirty_tx = 0; + if (vp->drv_flags & IS_BOOMERANG) + iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */ + /* Clear the Rx, Tx rings. */ + for (i = 0; i < RX_RING_SIZE; i++) /* AKPM: this is done in vortex_open, too */ + vp->rx_ring[i].status = 0; + for (i = 0; i < TX_RING_SIZE; i++) + vp->tx_skbuff[i] = NULL; + iowrite32(0, ioaddr + DownListPtr); + } + /* Set receiver mode: presumably accept b-case and phys addr only. */ + set_rx_mode(dev); + /* enable 802.1q tagged frames */ + set_8021q_mode(dev, 1); + iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ + + iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ + iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ + /* Allow status bits to be seen. */ + vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete| + (vp->full_bus_master_tx ? DownComplete : TxAvailable) | + (vp->full_bus_master_rx ? UpComplete : RxComplete) | + (vp->bus_master ? DMADone : 0); + vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable | + (vp->full_bus_master_rx ? 0 : RxComplete) | + StatsFull | HostError | TxComplete | IntReq + | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete; + iowrite16(vp->status_enable, ioaddr + EL3_CMD); + /* Ack all pending events, and set active indicator mask. */ + iowrite16(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, + ioaddr + EL3_CMD); + iowrite16(vp->intr_enable, ioaddr + EL3_CMD); + if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ + iowrite32(0x8000, vp->cb_fn_base + 4); + netif_start_queue (dev); +err_out: + return err; +} + +static int +vortex_open(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + int i; + int retval; + + /* Use the now-standard shared IRQ implementation. */ + if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? + boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) { + pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq); + goto err; + } + + if (vp->full_bus_master_rx) { /* Boomerang bus master. */ + if (vortex_debug > 2) + pr_debug("%s: Filling in the Rx ring.\n", dev->name); + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb; + vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1)); + vp->rx_ring[i].status = 0; /* Clear complete bit. */ + vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG); + + skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN, + GFP_KERNEL); + vp->rx_skbuff[i] = skb; + if (skb == NULL) + break; /* Bad news! */ + + skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ + vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); + } + if (i != RX_RING_SIZE) { + int j; + pr_emerg("%s: no memory for rx ring\n", dev->name); + for (j = 0; j < i; j++) { + if (vp->rx_skbuff[j]) { + dev_kfree_skb(vp->rx_skbuff[j]); + vp->rx_skbuff[j] = NULL; + } + } + retval = -ENOMEM; + goto err_free_irq; + } + /* Wrap the ring. */ + vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); + } + + retval = vortex_up(dev); + if (!retval) + goto out; + +err_free_irq: + free_irq(dev->irq, dev); +err: + if (vortex_debug > 1) + pr_err("%s: vortex_open() fails: returning %d\n", dev->name, retval); +out: + return retval; +} + +static void +vortex_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + int next_tick = 60*HZ; + int ok = 0; + int media_status; + + if (vortex_debug > 2) { + pr_debug("%s: Media selection timer tick happened, %s.\n", + dev->name, media_tbl[dev->if_port].name); + pr_debug("dev->watchdog_timeo=%d\n", dev->watchdog_timeo); + } + + media_status = window_read16(vp, 4, Wn4_Media); + switch (dev->if_port) { + case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx: + if (media_status & Media_LnkBeat) { + netif_carrier_on(dev); + ok = 1; + if (vortex_debug > 1) + pr_debug("%s: Media %s has link beat, %x.\n", + dev->name, media_tbl[dev->if_port].name, media_status); + } else { + netif_carrier_off(dev); + if (vortex_debug > 1) { + pr_debug("%s: Media %s has no link beat, %x.\n", + dev->name, media_tbl[dev->if_port].name, media_status); + } + } + break; + case XCVR_MII: case XCVR_NWAY: + { + ok = 1; + vortex_check_media(dev, 0); + } + break; + default: /* Other media types handled by Tx timeouts. */ + if (vortex_debug > 1) + pr_debug("%s: Media %s has no indication, %x.\n", + dev->name, media_tbl[dev->if_port].name, media_status); + ok = 1; + } + + if (dev->flags & IFF_SLAVE || !netif_carrier_ok(dev)) + next_tick = 5*HZ; + + if (vp->medialock) + goto leave_media_alone; + + if (!ok) { + unsigned int config; + + spin_lock_irq(&vp->lock); + + do { + dev->if_port = media_tbl[dev->if_port].next; + } while ( ! (vp->available_media & media_tbl[dev->if_port].mask)); + if (dev->if_port == XCVR_Default) { /* Go back to default. */ + dev->if_port = vp->default_media; + if (vortex_debug > 1) + pr_debug("%s: Media selection failing, using default %s port.\n", + dev->name, media_tbl[dev->if_port].name); + } else { + if (vortex_debug > 1) + pr_debug("%s: Media selection failed, now trying %s port.\n", + dev->name, media_tbl[dev->if_port].name); + next_tick = media_tbl[dev->if_port].wait; + } + window_write16(vp, + (media_status & ~(Media_10TP|Media_SQE)) | + media_tbl[dev->if_port].media_bits, + 4, Wn4_Media); + + config = window_read32(vp, 3, Wn3_Config); + config = BFINS(config, dev->if_port, 20, 4); + window_write32(vp, config, 3, Wn3_Config); + + iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax, + ioaddr + EL3_CMD); + if (vortex_debug > 1) + pr_debug("wrote 0x%08x to Wn3_Config\n", config); + /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */ + + spin_unlock_irq(&vp->lock); + } + +leave_media_alone: + if (vortex_debug > 2) + pr_debug("%s: Media selection timer finished, %s.\n", + dev->name, media_tbl[dev->if_port].name); + + mod_timer(&vp->timer, RUN_AT(next_tick)); + if (vp->deferred) + iowrite16(FakeIntr, ioaddr + EL3_CMD); +} + +static void vortex_tx_timeout(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + + pr_err("%s: transmit timed out, tx_status %2.2x status %4.4x.\n", + dev->name, ioread8(ioaddr + TxStatus), + ioread16(ioaddr + EL3_STATUS)); + pr_err(" diagnostics: net %04x media %04x dma %08x fifo %04x\n", + window_read16(vp, 4, Wn4_NetDiag), + window_read16(vp, 4, Wn4_Media), + ioread32(ioaddr + PktStatus), + window_read16(vp, 4, Wn4_FIFODiag)); + /* Slight code bloat to be user friendly. */ + if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88) + pr_err("%s: Transmitter encountered 16 collisions --" + " network cable problem?\n", dev->name); + if (ioread16(ioaddr + EL3_STATUS) & IntLatch) { + pr_err("%s: Interrupt posted but not delivered --" + " IRQ blocked by another device?\n", dev->name); + /* Bad idea here.. but we might as well handle a few events. */ + { + /* + * Block interrupts because vortex_interrupt does a bare spin_lock() + */ + unsigned long flags; + local_irq_save(flags); + if (vp->full_bus_master_tx) + boomerang_interrupt(dev->irq, dev); + else + vortex_interrupt(dev->irq, dev); + local_irq_restore(flags); + } + } + + if (vortex_debug > 0) + dump_tx_ring(dev); + + issue_and_wait(dev, TxReset); + + dev->stats.tx_errors++; + if (vp->full_bus_master_tx) { + pr_debug("%s: Resetting the Tx ring pointer.\n", dev->name); + if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0) + iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc), + ioaddr + DownListPtr); + if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) + netif_wake_queue (dev); + if (vp->drv_flags & IS_BOOMERANG) + iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); + iowrite16(DownUnstall, ioaddr + EL3_CMD); + } else { + dev->stats.tx_dropped++; + netif_wake_queue(dev); + } + + /* Issue Tx Enable */ + iowrite16(TxEnable, ioaddr + EL3_CMD); + dev->trans_start = jiffies; /* prevent tx timeout */ +} + +/* + * Handle uncommon interrupt sources. This is a separate routine to minimize + * the cache impact. + */ +static void +vortex_error(struct net_device *dev, int status) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + int do_tx_reset = 0, reset_mask = 0; + unsigned char tx_status = 0; + + if (vortex_debug > 2) { + pr_err("%s: vortex_error(), status=0x%x\n", dev->name, status); + } + + if (status & TxComplete) { /* Really "TxError" for us. */ + tx_status = ioread8(ioaddr + TxStatus); + /* Presumably a tx-timeout. We must merely re-enable. */ + if (vortex_debug > 2 || + (tx_status != 0x88 && vortex_debug > 0)) { + pr_err("%s: Transmit error, Tx status register %2.2x.\n", + dev->name, tx_status); + if (tx_status == 0x82) { + pr_err("Probably a duplex mismatch. See " + "Documentation/networking/vortex.txt\n"); + } + dump_tx_ring(dev); + } + if (tx_status & 0x14) dev->stats.tx_fifo_errors++; + if (tx_status & 0x38) dev->stats.tx_aborted_errors++; + if (tx_status & 0x08) vp->xstats.tx_max_collisions++; + iowrite8(0, ioaddr + TxStatus); + if (tx_status & 0x30) { /* txJabber or txUnderrun */ + do_tx_reset = 1; + } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */ + do_tx_reset = 1; + reset_mask = 0x0108; /* Reset interface logic, but not download logic */ + } else { /* Merely re-enable the transmitter. */ + iowrite16(TxEnable, ioaddr + EL3_CMD); + } + } + + if (status & RxEarly) /* Rx early is unused. */ + iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD); + + if (status & StatsFull) { /* Empty statistics. */ + static int DoneDidThat; + if (vortex_debug > 4) + pr_debug("%s: Updating stats.\n", dev->name); + update_stats(ioaddr, dev); + /* HACK: Disable statistics as an interrupt source. */ + /* This occurs when we have the wrong media type! */ + if (DoneDidThat == 0 && + ioread16(ioaddr + EL3_STATUS) & StatsFull) { + pr_warn("%s: Updating statistics failed, disabling stats as an interrupt source\n", + dev->name); + iowrite16(SetIntrEnb | + (window_read16(vp, 5, 10) & ~StatsFull), + ioaddr + EL3_CMD); + vp->intr_enable &= ~StatsFull; + DoneDidThat++; + } + } + if (status & IntReq) { /* Restore all interrupt sources. */ + iowrite16(vp->status_enable, ioaddr + EL3_CMD); + iowrite16(vp->intr_enable, ioaddr + EL3_CMD); + } + if (status & HostError) { + u16 fifo_diag; + fifo_diag = window_read16(vp, 4, Wn4_FIFODiag); + pr_err("%s: Host error, FIFO diagnostic register %4.4x.\n", + dev->name, fifo_diag); + /* Adapter failure requires Tx/Rx reset and reinit. */ + if (vp->full_bus_master_tx) { + int bus_status = ioread32(ioaddr + PktStatus); + /* 0x80000000 PCI master abort. */ + /* 0x40000000 PCI target abort. */ + if (vortex_debug) + pr_err("%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status); + + /* In this case, blow the card away */ + /* Must not enter D3 or we can't legally issue the reset! */ + vortex_down(dev, 0); + issue_and_wait(dev, TotalReset | 0xff); + vortex_up(dev); /* AKPM: bug. vortex_up() assumes that the rx ring is full. It may not be. */ + } else if (fifo_diag & 0x0400) + do_tx_reset = 1; + if (fifo_diag & 0x3000) { + /* Reset Rx fifo and upload logic */ + issue_and_wait(dev, RxReset|0x07); + /* Set the Rx filter to the current state. */ + set_rx_mode(dev); + /* enable 802.1q VLAN tagged frames */ + set_8021q_mode(dev, 1); + iowrite16(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */ + iowrite16(AckIntr | HostError, ioaddr + EL3_CMD); + } + } + + if (do_tx_reset) { + issue_and_wait(dev, TxReset|reset_mask); + iowrite16(TxEnable, ioaddr + EL3_CMD); + if (!vp->full_bus_master_tx) + netif_wake_queue(dev); + } +} + +static netdev_tx_t +vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + + /* Put out the doubleword header... */ + iowrite32(skb->len, ioaddr + TX_FIFO); + if (vp->bus_master) { + /* Set the bus-master controller to transfer the packet. */ + int len = (skb->len + 3) & ~3; + vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, + PCI_DMA_TODEVICE); + spin_lock_irq(&vp->window_lock); + window_set(vp, 7); + iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr); + iowrite16(len, ioaddr + Wn7_MasterLen); + spin_unlock_irq(&vp->window_lock); + vp->tx_skb = skb; + skb_tx_timestamp(skb); + iowrite16(StartDMADown, ioaddr + EL3_CMD); + /* netif_wake_queue() will be called at the DMADone interrupt. */ + } else { + /* ... and the packet rounded to a doubleword. */ + skb_tx_timestamp(skb); + iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); + dev_consume_skb_any (skb); + if (ioread16(ioaddr + TxFree) > 1536) { + netif_start_queue (dev); /* AKPM: redundant? */ + } else { + /* Interrupt us when the FIFO has room for max-sized packet. */ + netif_stop_queue(dev); + iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); + } + } + + + /* Clear the Tx status stack. */ + { + int tx_status; + int i = 32; + + while (--i > 0 && (tx_status = ioread8(ioaddr + TxStatus)) > 0) { + if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */ + if (vortex_debug > 2) + pr_debug("%s: Tx error, status %2.2x.\n", + dev->name, tx_status); + if (tx_status & 0x04) dev->stats.tx_fifo_errors++; + if (tx_status & 0x38) dev->stats.tx_aborted_errors++; + if (tx_status & 0x30) { + issue_and_wait(dev, TxReset); + } + iowrite16(TxEnable, ioaddr + EL3_CMD); + } + iowrite8(0x00, ioaddr + TxStatus); /* Pop the status stack. */ + } + } + return NETDEV_TX_OK; +} + +static netdev_tx_t +boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + /* Calculate the next Tx descriptor entry. */ + int entry = vp->cur_tx % TX_RING_SIZE; + struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; + unsigned long flags; + dma_addr_t dma_addr; + + if (vortex_debug > 6) { + pr_debug("boomerang_start_xmit()\n"); + pr_debug("%s: Trying to send a packet, Tx index %d.\n", + dev->name, vp->cur_tx); + } + + /* + * We can't allow a recursion from our interrupt handler back into the + * tx routine, as they take the same spin lock, and that causes + * deadlock. Just return NETDEV_TX_BUSY and let the stack try again in + * a bit + */ + if (vp->handling_irq) + return NETDEV_TX_BUSY; + + if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { + if (vortex_debug > 0) + pr_warn("%s: BUG! Tx Ring full, refusing to send buffer\n", + dev->name); + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + + vp->tx_skbuff[entry] = skb; + + vp->tx_ring[entry].next = 0; +#if DO_ZEROCOPY + if (skb->ip_summed != CHECKSUM_PARTIAL) + vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); + else + vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); + + if (!skb_shinfo(skb)->nr_frags) { + dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, + PCI_DMA_TODEVICE); + if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) + goto out_dma_err; + + vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); + vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); + } else { + int i; + + dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, + skb_headlen(skb), PCI_DMA_TODEVICE); + if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) + goto out_dma_err; + + vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); + vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag, + 0, + frag->size, + DMA_TO_DEVICE); + if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) { + for(i = i-1; i >= 0; i--) + dma_unmap_page(&VORTEX_PCI(vp)->dev, + le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr), + le32_to_cpu(vp->tx_ring[entry].frag[i+1].length), + DMA_TO_DEVICE); + + pci_unmap_single(VORTEX_PCI(vp), + le32_to_cpu(vp->tx_ring[entry].frag[0].addr), + le32_to_cpu(vp->tx_ring[entry].frag[0].length), + PCI_DMA_TODEVICE); + + goto out_dma_err; + } + + vp->tx_ring[entry].frag[i+1].addr = + cpu_to_le32(dma_addr); + + if (i == skb_shinfo(skb)->nr_frags-1) + vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); + else + vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)); + } + } +#else + dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) + goto out_dma_err; + vp->tx_ring[entry].addr = cpu_to_le32(dma_addr); + vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); + vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); +#endif + + spin_lock_irqsave(&vp->lock, flags); + /* Wait for the stall to complete. */ + issue_and_wait(dev, DownStall); + prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)); + if (ioread32(ioaddr + DownListPtr) == 0) { + iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr); + vp->queued_packet++; + } + + vp->cur_tx++; + if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) { + netif_stop_queue (dev); + } else { /* Clear previous interrupt enable. */ +#if defined(tx_interrupt_mitigation) + /* Dubious. If in boomeang_interrupt "faster" cyclone ifdef + * were selected, this would corrupt DN_COMPLETE. No? + */ + prev_entry->status &= cpu_to_le32(~TxIntrUploaded); +#endif + } + skb_tx_timestamp(skb); + iowrite16(DownUnstall, ioaddr + EL3_CMD); + spin_unlock_irqrestore(&vp->lock, flags); +out: + return NETDEV_TX_OK; +out_dma_err: + dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n"); + goto out; +} + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ + +/* + * This is the ISR for the vortex series chips. + * full_bus_master_tx == 0 && full_bus_master_rx == 0 + */ + +static irqreturn_t +vortex_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr; + int status; + int work_done = max_interrupt_work; + int handled = 0; + + ioaddr = vp->ioaddr; + spin_lock(&vp->lock); + + status = ioread16(ioaddr + EL3_STATUS); + + if (vortex_debug > 6) + pr_debug("vortex_interrupt(). status=0x%4x\n", status); + + if ((status & IntLatch) == 0) + goto handler_exit; /* No interrupt: shared IRQs cause this */ + handled = 1; + + if (status & IntReq) { + status |= vp->deferred; + vp->deferred = 0; + } + + if (status == 0xffff) /* h/w no longer present (hotplug)? */ + goto handler_exit; + + if (vortex_debug > 4) + pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n", + dev->name, status, ioread8(ioaddr + Timer)); + + spin_lock(&vp->window_lock); + window_set(vp, 7); + + do { + if (vortex_debug > 5) + pr_debug("%s: In interrupt loop, status %4.4x.\n", + dev->name, status); + if (status & RxComplete) + vortex_rx(dev); + + if (status & TxAvailable) { + if (vortex_debug > 5) + pr_debug(" TX room bit was handled.\n"); + /* There's room in the FIFO for a full-sized packet. */ + iowrite16(AckIntr | TxAvailable, ioaddr + EL3_CMD); + netif_wake_queue (dev); + } + + if (status & DMADone) { + if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) { + iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ + pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE); + dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ + if (ioread16(ioaddr + TxFree) > 1536) { + /* + * AKPM: FIXME: I don't think we need this. If the queue was stopped due to + * insufficient FIFO room, the TxAvailable test will succeed and call + * netif_wake_queue() + */ + netif_wake_queue(dev); + } else { /* Interrupt when FIFO has room for max-sized packet. */ + iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); + netif_stop_queue(dev); + } + } + } + /* Check for all uncommon interrupts at once. */ + if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { + if (status == 0xffff) + break; + if (status & RxEarly) + vortex_rx(dev); + spin_unlock(&vp->window_lock); + vortex_error(dev, status); + spin_lock(&vp->window_lock); + window_set(vp, 7); + } + + if (--work_done < 0) { + pr_warn("%s: Too much work in interrupt, status %4.4x\n", + dev->name, status); + /* Disable all pending interrupts. */ + do { + vp->deferred |= status; + iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable), + ioaddr + EL3_CMD); + iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD); + } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch); + /* The timer will reenable interrupts. */ + mod_timer(&vp->timer, jiffies + 1*HZ); + break; + } + /* Acknowledge the IRQ. */ + iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); + } while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete)); + + spin_unlock(&vp->window_lock); + + if (vortex_debug > 4) + pr_debug("%s: exiting interrupt, status %4.4x.\n", + dev->name, status); +handler_exit: + spin_unlock(&vp->lock); + return IRQ_RETVAL(handled); +} + +/* + * This is the ISR for the boomerang series chips. + * full_bus_master_tx == 1 && full_bus_master_rx == 1 + */ + +static irqreturn_t +boomerang_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr; + int status; + int work_done = max_interrupt_work; + + ioaddr = vp->ioaddr; + + + /* + * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout + * and boomerang_start_xmit + */ + spin_lock(&vp->lock); + vp->handling_irq = 1; + + status = ioread16(ioaddr + EL3_STATUS); + + if (vortex_debug > 6) + pr_debug("boomerang_interrupt. status=0x%4x\n", status); + + if ((status & IntLatch) == 0) + goto handler_exit; /* No interrupt: shared IRQs can cause this */ + + if (status == 0xffff) { /* h/w no longer present (hotplug)? */ + if (vortex_debug > 1) + pr_debug("boomerang_interrupt(1): status = 0xffff\n"); + goto handler_exit; + } + + if (status & IntReq) { + status |= vp->deferred; + vp->deferred = 0; + } + + if (vortex_debug > 4) + pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n", + dev->name, status, ioread8(ioaddr + Timer)); + do { + if (vortex_debug > 5) + pr_debug("%s: In interrupt loop, status %4.4x.\n", + dev->name, status); + if (status & UpComplete) { + iowrite16(AckIntr | UpComplete, ioaddr + EL3_CMD); + if (vortex_debug > 5) + pr_debug("boomerang_interrupt->boomerang_rx\n"); + boomerang_rx(dev); + } + + if (status & DownComplete) { + unsigned int dirty_tx = vp->dirty_tx; + + iowrite16(AckIntr | DownComplete, ioaddr + EL3_CMD); + while (vp->cur_tx - dirty_tx > 0) { + int entry = dirty_tx % TX_RING_SIZE; +#if 1 /* AKPM: the latter is faster, but cyclone-only */ + if (ioread32(ioaddr + DownListPtr) == + vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)) + break; /* It still hasn't been processed. */ +#else + if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0) + break; /* It still hasn't been processed. */ +#endif + + if (vp->tx_skbuff[entry]) { + struct sk_buff *skb = vp->tx_skbuff[entry]; +#if DO_ZEROCOPY + int i; + for (i=0; i<=skb_shinfo(skb)->nr_frags; i++) + pci_unmap_single(VORTEX_PCI(vp), + le32_to_cpu(vp->tx_ring[entry].frag[i].addr), + le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF, + PCI_DMA_TODEVICE); +#else + pci_unmap_single(VORTEX_PCI(vp), + le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); +#endif + dev_kfree_skb_irq(skb); + vp->tx_skbuff[entry] = NULL; + } else { + pr_debug("boomerang_interrupt: no skb!\n"); + } + /* dev->stats.tx_packets++; Counted below. */ + dirty_tx++; + } + vp->dirty_tx = dirty_tx; + if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) { + if (vortex_debug > 6) + pr_debug("boomerang_interrupt: wake queue\n"); + netif_wake_queue (dev); + } + } + + /* Check for all uncommon interrupts at once. */ + if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) + vortex_error(dev, status); + + if (--work_done < 0) { + pr_warn("%s: Too much work in interrupt, status %4.4x\n", + dev->name, status); + /* Disable all pending interrupts. */ + do { + vp->deferred |= status; + iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable), + ioaddr + EL3_CMD); + iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD); + } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch); + /* The timer will reenable interrupts. */ + mod_timer(&vp->timer, jiffies + 1*HZ); + break; + } + /* Acknowledge the IRQ. */ + iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); + if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ + iowrite32(0x8000, vp->cb_fn_base + 4); + + } while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch); + + if (vortex_debug > 4) + pr_debug("%s: exiting interrupt, status %4.4x.\n", + dev->name, status); +handler_exit: + vp->handling_irq = 0; + spin_unlock(&vp->lock); + return IRQ_HANDLED; +} + +static int vortex_rx(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + int i; + short rx_status; + + if (vortex_debug > 5) + pr_debug("vortex_rx(): status %4.4x, rx_status %4.4x.\n", + ioread16(ioaddr+EL3_STATUS), ioread16(ioaddr+RxStatus)); + while ((rx_status = ioread16(ioaddr + RxStatus)) > 0) { + if (rx_status & 0x4000) { /* Error, update stats. */ + unsigned char rx_error = ioread8(ioaddr + RxErrors); + if (vortex_debug > 2) + pr_debug(" Rx error: status %2.2x.\n", rx_error); + dev->stats.rx_errors++; + if (rx_error & 0x01) dev->stats.rx_over_errors++; + if (rx_error & 0x02) dev->stats.rx_length_errors++; + if (rx_error & 0x04) dev->stats.rx_frame_errors++; + if (rx_error & 0x08) dev->stats.rx_crc_errors++; + if (rx_error & 0x10) dev->stats.rx_length_errors++; + } else { + /* The packet length: up to 4.5K!. */ + int pkt_len = rx_status & 0x1fff; + struct sk_buff *skb; + + skb = netdev_alloc_skb(dev, pkt_len + 5); + if (vortex_debug > 4) + pr_debug("Receiving packet size %d status %4.4x.\n", + pkt_len, rx_status); + if (skb != NULL) { + skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + /* 'skb_put()' points to the start of sk_buff data area. */ + if (vp->bus_master && + ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) { + dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len), + pkt_len, PCI_DMA_FROMDEVICE); + iowrite32(dma, ioaddr + Wn7_MasterAddr); + iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); + iowrite16(StartDMAUp, ioaddr + EL3_CMD); + while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000) + ; + pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE); + } else { + ioread32_rep(ioaddr + RX_FIFO, + skb_put(skb, pkt_len), + (pkt_len + 3) >> 2); + } + iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + /* Wait a limited time to go to next packet. */ + for (i = 200; i >= 0; i--) + if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) + break; + continue; + } else if (vortex_debug > 0) + pr_notice("%s: No memory to allocate a sk_buff of size %d.\n", + dev->name, pkt_len); + dev->stats.rx_dropped++; + } + issue_and_wait(dev, RxDiscard); + } + + return 0; +} + +static int +boomerang_rx(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + int entry = vp->cur_rx % RX_RING_SIZE; + void __iomem *ioaddr = vp->ioaddr; + int rx_status; + int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx; + + if (vortex_debug > 5) + pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS)); + + while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){ + if (--rx_work_limit < 0) + break; + if (rx_status & RxDError) { /* Error, update stats. */ + unsigned char rx_error = rx_status >> 16; + if (vortex_debug > 2) + pr_debug(" Rx error: status %2.2x.\n", rx_error); + dev->stats.rx_errors++; + if (rx_error & 0x01) dev->stats.rx_over_errors++; + if (rx_error & 0x02) dev->stats.rx_length_errors++; + if (rx_error & 0x04) dev->stats.rx_frame_errors++; + if (rx_error & 0x08) dev->stats.rx_crc_errors++; + if (rx_error & 0x10) dev->stats.rx_length_errors++; + } else { + /* The packet length: up to 4.5K!. */ + int pkt_len = rx_status & 0x1fff; + struct sk_buff *skb; + dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr); + + if (vortex_debug > 4) + pr_debug("Receiving packet size %d status %4.4x.\n", + pkt_len, rx_status); + + /* Check if the packet is long enough to just accept without + copying to a properly sized skbuff. */ + if (pkt_len < rx_copybreak && + (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { + skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + /* 'skb_put()' points to the start of sk_buff data area. */ + memcpy(skb_put(skb, pkt_len), + vp->rx_skbuff[entry]->data, + pkt_len); + pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + vp->rx_copy++; + } else { + /* Pass up the skbuff already on the Rx ring. */ + skb = vp->rx_skbuff[entry]; + vp->rx_skbuff[entry] = NULL; + skb_put(skb, pkt_len); + pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + vp->rx_nocopy++; + } + skb->protocol = eth_type_trans(skb, dev); + { /* Use hardware checksum info. */ + int csum_bits = rx_status & 0xee000000; + if (csum_bits && + (csum_bits == (IPChksumValid | TCPChksumValid) || + csum_bits == (IPChksumValid | UDPChksumValid))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + vp->rx_csumhits++; + } + } + netif_rx(skb); + dev->stats.rx_packets++; + } + entry = (++vp->cur_rx) % RX_RING_SIZE; + } + /* Refill the Rx ring buffers. */ + for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) { + struct sk_buff *skb; + entry = vp->dirty_rx % RX_RING_SIZE; + if (vp->rx_skbuff[entry] == NULL) { + skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); + if (skb == NULL) { + static unsigned long last_jif; + if (time_after(jiffies, last_jif + 10 * HZ)) { + pr_warn("%s: memory shortage\n", + dev->name); + last_jif = jiffies; + } + if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) + mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); + break; /* Bad news! */ + } + + vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); + vp->rx_skbuff[entry] = skb; + } + vp->rx_ring[entry].status = 0; /* Clear complete bit. */ + iowrite16(UpUnstall, ioaddr + EL3_CMD); + } + return 0; +} + +/* + * If we've hit a total OOM refilling the Rx ring we poll once a second + * for some memory. Otherwise there is no way to restart the rx process. + */ +static void +rx_oom_timer(unsigned long arg) +{ + struct net_device *dev = (struct net_device *)arg; + struct vortex_private *vp = netdev_priv(dev); + + spin_lock_irq(&vp->lock); + if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */ + boomerang_rx(dev); + if (vortex_debug > 1) { + pr_debug("%s: rx_oom_timer %s\n", dev->name, + ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying"); + } + spin_unlock_irq(&vp->lock); +} + +static void +vortex_down(struct net_device *dev, int final_down) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + + netif_stop_queue (dev); + + del_timer_sync(&vp->rx_oom_timer); + del_timer_sync(&vp->timer); + + /* Turn off statistics ASAP. We update dev->stats below. */ + iowrite16(StatsDisable, ioaddr + EL3_CMD); + + /* Disable the receiver and transmitter. */ + iowrite16(RxDisable, ioaddr + EL3_CMD); + iowrite16(TxDisable, ioaddr + EL3_CMD); + + /* Disable receiving 802.1q tagged frames */ + set_8021q_mode(dev, 0); + + if (dev->if_port == XCVR_10base2) + /* Turn off thinnet power. Green! */ + iowrite16(StopCoax, ioaddr + EL3_CMD); + + iowrite16(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); + + update_stats(ioaddr, dev); + if (vp->full_bus_master_rx) + iowrite32(0, ioaddr + UpListPtr); + if (vp->full_bus_master_tx) + iowrite32(0, ioaddr + DownListPtr); + + if (final_down && VORTEX_PCI(vp)) { + vp->pm_state_valid = 1; + pci_save_state(VORTEX_PCI(vp)); + acpi_set_WOL(dev); + } +} + +static int +vortex_close(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + int i; + + if (netif_device_present(dev)) + vortex_down(dev, 1); + + if (vortex_debug > 1) { + pr_debug("%s: vortex_close() status %4.4x, Tx status %2.2x.\n", + dev->name, ioread16(ioaddr + EL3_STATUS), ioread8(ioaddr + TxStatus)); + pr_debug("%s: vortex close stats: rx_nocopy %d rx_copy %d" + " tx_queued %d Rx pre-checksummed %d.\n", + dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits); + } + +#if DO_ZEROCOPY + if (vp->rx_csumhits && + (vp->drv_flags & HAS_HWCKSM) == 0 && + (vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) { + pr_warn("%s supports hardware checksums, and we're not using them!\n", + dev->name); + } +#endif + + free_irq(dev->irq, dev); + + if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ + for (i = 0; i < RX_RING_SIZE; i++) + if (vp->rx_skbuff[i]) { + pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr), + PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + dev_kfree_skb(vp->rx_skbuff[i]); + vp->rx_skbuff[i] = NULL; + } + } + if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */ + for (i = 0; i < TX_RING_SIZE; i++) { + if (vp->tx_skbuff[i]) { + struct sk_buff *skb = vp->tx_skbuff[i]; +#if DO_ZEROCOPY + int k; + + for (k=0; k<=skb_shinfo(skb)->nr_frags; k++) + pci_unmap_single(VORTEX_PCI(vp), + le32_to_cpu(vp->tx_ring[i].frag[k].addr), + le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF, + PCI_DMA_TODEVICE); +#else + pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE); +#endif + dev_kfree_skb(skb); + vp->tx_skbuff[i] = NULL; + } + } + } + + return 0; +} + +static void +dump_tx_ring(struct net_device *dev) +{ + if (vortex_debug > 0) { + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + + if (vp->full_bus_master_tx) { + int i; + int stalled = ioread32(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */ + + pr_err(" Flags; bus-master %d, dirty %d(%d) current %d(%d)\n", + vp->full_bus_master_tx, + vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE, + vp->cur_tx, vp->cur_tx % TX_RING_SIZE); + pr_err(" Transmit list %8.8x vs. %p.\n", + ioread32(ioaddr + DownListPtr), + &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); + issue_and_wait(dev, DownStall); + for (i = 0; i < TX_RING_SIZE; i++) { + unsigned int length; + +#if DO_ZEROCOPY + length = le32_to_cpu(vp->tx_ring[i].frag[0].length); +#else + length = le32_to_cpu(vp->tx_ring[i].length); +#endif + pr_err(" %d: @%p length %8.8x status %8.8x\n", + i, &vp->tx_ring[i], length, + le32_to_cpu(vp->tx_ring[i].status)); + } + if (!stalled) + iowrite16(DownUnstall, ioaddr + EL3_CMD); + } + } +} + +static struct net_device_stats *vortex_get_stats(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + unsigned long flags; + + if (netif_device_present(dev)) { /* AKPM: Used to be netif_running */ + spin_lock_irqsave (&vp->lock, flags); + update_stats(ioaddr, dev); + spin_unlock_irqrestore (&vp->lock, flags); + } + return &dev->stats; +} + +/* Update statistics. + Unlike with the EL3 we need not worry about interrupts changing + the window setting from underneath us, but we must still guard + against a race condition with a StatsUpdate interrupt updating the + table. This is done by checking that the ASM (!) code generated uses + atomic updates with '+='. + */ +static void update_stats(void __iomem *ioaddr, struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + + /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ + /* Switch to the stats window, and read everything. */ + dev->stats.tx_carrier_errors += window_read8(vp, 6, 0); + dev->stats.tx_heartbeat_errors += window_read8(vp, 6, 1); + dev->stats.tx_window_errors += window_read8(vp, 6, 4); + dev->stats.rx_fifo_errors += window_read8(vp, 6, 5); + dev->stats.tx_packets += window_read8(vp, 6, 6); + dev->stats.tx_packets += (window_read8(vp, 6, 9) & + 0x30) << 4; + /* Rx packets */ window_read8(vp, 6, 7); /* Must read to clear */ + /* Don't bother with register 9, an extension of registers 6&7. + If we do use the 6&7 values the atomic update assumption above + is invalid. */ + dev->stats.rx_bytes += window_read16(vp, 6, 10); + dev->stats.tx_bytes += window_read16(vp, 6, 12); + /* Extra stats for get_ethtool_stats() */ + vp->xstats.tx_multiple_collisions += window_read8(vp, 6, 2); + vp->xstats.tx_single_collisions += window_read8(vp, 6, 3); + vp->xstats.tx_deferred += window_read8(vp, 6, 8); + vp->xstats.rx_bad_ssd += window_read8(vp, 4, 12); + + dev->stats.collisions = vp->xstats.tx_multiple_collisions + + vp->xstats.tx_single_collisions + + vp->xstats.tx_max_collisions; + + { + u8 up = window_read8(vp, 4, 13); + dev->stats.rx_bytes += (up & 0x0f) << 16; + dev->stats.tx_bytes += (up & 0xf0) << 12; + } +} + +static int vortex_nway_reset(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + + return mii_nway_restart(&vp->mii); +} + +static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct vortex_private *vp = netdev_priv(dev); + + return mii_ethtool_gset(&vp->mii, cmd); +} + +static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct vortex_private *vp = netdev_priv(dev); + + return mii_ethtool_sset(&vp->mii, cmd); +} + +static u32 vortex_get_msglevel(struct net_device *dev) +{ + return vortex_debug; +} + +static void vortex_set_msglevel(struct net_device *dev, u32 dbg) +{ + vortex_debug = dbg; +} + +static int vortex_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return VORTEX_NUM_STATS; + default: + return -EOPNOTSUPP; + } +} + +static void vortex_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + unsigned long flags; + + spin_lock_irqsave(&vp->lock, flags); + update_stats(ioaddr, dev); + spin_unlock_irqrestore(&vp->lock, flags); + + data[0] = vp->xstats.tx_deferred; + data[1] = vp->xstats.tx_max_collisions; + data[2] = vp->xstats.tx_multiple_collisions; + data[3] = vp->xstats.tx_single_collisions; + data[4] = vp->xstats.rx_bad_ssd; +} + + +static void vortex_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, ðtool_stats_keys, sizeof(ethtool_stats_keys)); + break; + default: + WARN_ON(1); + break; + } +} + +static void vortex_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct vortex_private *vp = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + if (VORTEX_PCI(vp)) { + strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)), + sizeof(info->bus_info)); + } else { + if (VORTEX_EISA(vp)) + strlcpy(info->bus_info, dev_name(vp->gendev), + sizeof(info->bus_info)); + else + snprintf(info->bus_info, sizeof(info->bus_info), + "EISA 0x%lx %d", dev->base_addr, dev->irq); + } +} + +static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct vortex_private *vp = netdev_priv(dev); + + if (!VORTEX_PCI(vp)) + return; + + wol->supported = WAKE_MAGIC; + + wol->wolopts = 0; + if (vp->enable_wol) + wol->wolopts |= WAKE_MAGIC; +} + +static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct vortex_private *vp = netdev_priv(dev); + + if (!VORTEX_PCI(vp)) + return -EOPNOTSUPP; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + + if (wol->wolopts & WAKE_MAGIC) + vp->enable_wol = 1; + else + vp->enable_wol = 0; + acpi_set_WOL(dev); + + return 0; +} + +static const struct ethtool_ops vortex_ethtool_ops = { + .get_drvinfo = vortex_get_drvinfo, + .get_strings = vortex_get_strings, + .get_msglevel = vortex_get_msglevel, + .set_msglevel = vortex_set_msglevel, + .get_ethtool_stats = vortex_get_ethtool_stats, + .get_sset_count = vortex_get_sset_count, + .get_settings = vortex_get_settings, + .set_settings = vortex_set_settings, + .get_link = ethtool_op_get_link, + .nway_reset = vortex_nway_reset, + .get_wol = vortex_get_wol, + .set_wol = vortex_set_wol, + .get_ts_info = ethtool_op_get_ts_info, +}; + +#ifdef CONFIG_PCI +/* + * Must power the device up to do MDIO operations + */ +static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + int err; + struct vortex_private *vp = netdev_priv(dev); + pci_power_t state = 0; + + if(VORTEX_PCI(vp)) + state = VORTEX_PCI(vp)->current_state; + + /* The kernel core really should have pci_get_power_state() */ + + if(state != 0) + pci_set_power_state(VORTEX_PCI(vp), PCI_D0); + err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL); + if(state != 0) + pci_set_power_state(VORTEX_PCI(vp), state); + + return err; +} +#endif + + +/* Pre-Cyclone chips have no documented multicast filter, so the only + multicast setting is to receive all multicast frames. At least + the chip has a very clean way to set the mode, unlike many others. */ +static void set_rx_mode(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + int new_mode; + + if (dev->flags & IFF_PROMISC) { + if (vortex_debug > 3) + pr_notice("%s: Setting promiscuous mode.\n", dev->name); + new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm; + } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) { + new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast; + } else + new_mode = SetRxFilter | RxStation | RxBroadcast; + + iowrite16(new_mode, ioaddr + EL3_CMD); +} + +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +/* Setup the card so that it can receive frames with an 802.1q VLAN tag. + Note that this must be done after each RxReset due to some backwards + compatibility logic in the Cyclone and Tornado ASICs */ + +/* The Ethernet Type used for 802.1q tagged frames */ +#define VLAN_ETHER_TYPE 0x8100 + +static void set_8021q_mode(struct net_device *dev, int enable) +{ + struct vortex_private *vp = netdev_priv(dev); + int mac_ctrl; + + if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) { + /* cyclone and tornado chipsets can recognize 802.1q + * tagged frames and treat them correctly */ + + int max_pkt_size = dev->mtu+14; /* MTU+Ethernet header */ + if (enable) + max_pkt_size += 4; /* 802.1Q VLAN tag */ + + window_write16(vp, max_pkt_size, 3, Wn3_MaxPktSize); + + /* set VlanEtherType to let the hardware checksumming + treat tagged frames correctly */ + window_write16(vp, VLAN_ETHER_TYPE, 7, Wn7_VlanEtherType); + } else { + /* on older cards we have to enable large frames */ + + vp->large_frames = dev->mtu > 1500 || enable; + + mac_ctrl = window_read16(vp, 3, Wn3_MAC_Ctrl); + if (vp->large_frames) + mac_ctrl |= 0x40; + else + mac_ctrl &= ~0x40; + window_write16(vp, mac_ctrl, 3, Wn3_MAC_Ctrl); + } +} +#else + +static void set_8021q_mode(struct net_device *dev, int enable) +{ +} + + +#endif + +/* MII transceiver control section. + Read and write the MII registers using software-generated serial + MDIO protocol. See the MII specifications or DP83840A data sheet + for details. */ + +/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually + met by back-to-back PCI I/O cycles, but we insert a delay to avoid + "overclocking" issues. */ +static void mdio_delay(struct vortex_private *vp) +{ + window_read32(vp, 4, Wn4_PhysicalMgmt); +} + +#define MDIO_SHIFT_CLK 0x01 +#define MDIO_DIR_WRITE 0x04 +#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE) +#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE) +#define MDIO_DATA_READ 0x02 +#define MDIO_ENB_IN 0x00 + +/* Generate the preamble required for initial synchronization and + a few older transceivers. */ +static void mdio_sync(struct vortex_private *vp, int bits) +{ + /* Establish sync by sending at least 32 logic ones. */ + while (-- bits >= 0) { + window_write16(vp, MDIO_DATA_WRITE1, 4, Wn4_PhysicalMgmt); + mdio_delay(vp); + window_write16(vp, MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, + 4, Wn4_PhysicalMgmt); + mdio_delay(vp); + } +} + +static int mdio_read(struct net_device *dev, int phy_id, int location) +{ + int i; + struct vortex_private *vp = netdev_priv(dev); + int read_cmd = (0xf6 << 10) | (phy_id << 5) | location; + unsigned int retval = 0; + + spin_lock_bh(&vp->mii_lock); + + if (mii_preamble_required) + mdio_sync(vp, 32); + + /* Shift the read command bits out. */ + for (i = 14; i >= 0; i--) { + int dataval = (read_cmd&(1< 0; i--) { + window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt); + mdio_delay(vp); + retval = (retval << 1) | + ((window_read16(vp, 4, Wn4_PhysicalMgmt) & + MDIO_DATA_READ) ? 1 : 0); + window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK, + 4, Wn4_PhysicalMgmt); + mdio_delay(vp); + } + + spin_unlock_bh(&vp->mii_lock); + + return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff; +} + +static void mdio_write(struct net_device *dev, int phy_id, int location, int value) +{ + struct vortex_private *vp = netdev_priv(dev); + int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value; + int i; + + spin_lock_bh(&vp->mii_lock); + + if (mii_preamble_required) + mdio_sync(vp, 32); + + /* Shift the command bits out. */ + for (i = 31; i >= 0; i--) { + int dataval = (write_cmd&(1<= 0; i--) { + window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt); + mdio_delay(vp); + window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK, + 4, Wn4_PhysicalMgmt); + mdio_delay(vp); + } + + spin_unlock_bh(&vp->mii_lock); +} + +/* ACPI: Advanced Configuration and Power Interface. */ +/* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */ +static void acpi_set_WOL(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + void __iomem *ioaddr = vp->ioaddr; + + device_set_wakeup_enable(vp->gendev, vp->enable_wol); + + if (vp->enable_wol) { + /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */ + window_write16(vp, 2, 7, 0x0c); + /* The RxFilter must accept the WOL frames. */ + iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD); + iowrite16(RxEnable, ioaddr + EL3_CMD); + + if (pci_enable_wake(VORTEX_PCI(vp), PCI_D3hot, 1)) { + pr_info("%s: WOL not supported.\n", pci_name(VORTEX_PCI(vp))); + + vp->enable_wol = 0; + return; + } + + if (VORTEX_PCI(vp)->current_state < PCI_D3hot) + return; + + /* Change the power state to D3; RxEnable doesn't take effect. */ + pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot); + } +} + + +static void vortex_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct vortex_private *vp; + + if (!dev) { + pr_err("vortex_remove_one called for Compaq device!\n"); + BUG(); + } + + vp = netdev_priv(dev); + + if (vp->cb_fn_base) + pci_iounmap(pdev, vp->cb_fn_base); + + unregister_netdev(dev); + + pci_set_power_state(pdev, PCI_D0); /* Go active */ + if (vp->pm_state_valid) + pci_restore_state(pdev); + pci_disable_device(pdev); + + /* Should really use issue_and_wait() here */ + iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14), + vp->ioaddr + EL3_CMD); + + pci_iounmap(pdev, vp->ioaddr); + + pci_free_consistent(pdev, + sizeof(struct boom_rx_desc) * RX_RING_SIZE + + sizeof(struct boom_tx_desc) * TX_RING_SIZE, + vp->rx_ring, + vp->rx_ring_dma); + + pci_release_regions(pdev); + + free_netdev(dev); +} + + +static struct pci_driver vortex_driver = { + .name = "3c59x", + .probe = vortex_init_one, + .remove = vortex_remove_one, + .id_table = vortex_pci_tbl, + .driver.pm = VORTEX_PM_OPS, +}; + + +static int vortex_have_pci; +static int vortex_have_eisa; + + +static int __init vortex_init(void) +{ + int pci_rc, eisa_rc; + + pci_rc = pci_register_driver(&vortex_driver); + eisa_rc = vortex_eisa_init(); + + if (pci_rc == 0) + vortex_have_pci = 1; + if (eisa_rc > 0) + vortex_have_eisa = 1; + + return (vortex_have_pci + vortex_have_eisa) ? 0 : -ENODEV; +} + + +static void __exit vortex_eisa_cleanup(void) +{ + void __iomem *ioaddr; + +#ifdef CONFIG_EISA + /* Take care of the EISA devices */ + eisa_driver_unregister(&vortex_eisa_driver); +#endif + + if (compaq_net_device) { + ioaddr = ioport_map(compaq_net_device->base_addr, + VORTEX_TOTAL_SIZE); + + unregister_netdev(compaq_net_device); + iowrite16(TotalReset, ioaddr + EL3_CMD); + release_region(compaq_net_device->base_addr, + VORTEX_TOTAL_SIZE); + + free_netdev(compaq_net_device); + } +} + + +static void __exit vortex_cleanup(void) +{ + if (vortex_have_pci) + pci_unregister_driver(&vortex_driver); + if (vortex_have_eisa) + vortex_eisa_cleanup(); +} + + +module_init(vortex_init); +module_exit(vortex_cleanup); diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig new file mode 100644 index 000000000..afaab4b23 --- /dev/null +++ b/drivers/net/ethernet/3com/Kconfig @@ -0,0 +1,107 @@ +# +# 3Com Ethernet device configuration +# + +config NET_VENDOR_3COM + bool "3Com devices" + default y + depends on ISA || EISA || PCI || PCMCIA + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about 3Com cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_3COM + +config EL3 + tristate "3c509/3c579 \"EtherLink III\" support" + depends on (ISA || EISA) + ---help--- + If you have a network (Ethernet) card belonging to the 3Com + EtherLinkIII series, say Y and read the Ethernet-HOWTO, available + from . + + If your card is not working you may need to use the DOS + setup disk to disable Plug & Play mode, and to select the default + media type. + + To compile this driver as a module, choose M here. The module + will be called 3c509. + +config 3C515 + tristate "3c515 ISA \"Fast EtherLink\"" + depends on ISA && ISA_DMA_API + ---help--- + If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet + network card, say Y and read the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called 3c515. + +config PCMCIA_3C574 + tristate "3Com 3c574 PCMCIA support" + depends on PCMCIA + ---help--- + Say Y here if you intend to attach a 3Com 3c574 or compatible PCMCIA + (PC-card) Fast Ethernet card to your computer. + + To compile this driver as a module, choose M here: the module will be + called 3c574_cs. If unsure, say N. + +config PCMCIA_3C589 + tristate "3Com 3c589 PCMCIA support" + depends on PCMCIA + ---help--- + Say Y here if you intend to attach a 3Com 3c589 or compatible PCMCIA + (PC-card) Ethernet card to your computer. + + To compile this driver as a module, choose M here: the module will be + called 3c589_cs. If unsure, say N. + +config VORTEX + tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support" + depends on (PCI || EISA) && HAS_IOPORT_MAP + select MII + ---help--- + This option enables driver support for a large number of 10Mbps and + 10/100Mbps EISA, PCI and Cardbus 3Com network cards: + + "Vortex" (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI + "Boomerang" (EtherLink XL 3c900 or 3c905) PCI + "Cyclone" (3c540/3c900/3c905/3c980/3c575/3c656) PCI and Cardbus + "Tornado" (3c905) PCI + "Hurricane" (3c555/3cSOHO) PCI + + If you have such a card, say Y and read the Ethernet-HOWTO, + available from . More + specific information is in + and in the comments at + the beginning of . + + To compile this support as a module, choose M here. + +config TYPHOON + tristate "3cr990 series \"Typhoon\" support" + depends on PCI + select CRC32 + ---help--- + This option enables driver support for the 3cr990 series of cards: + + 3C990-TX, 3CR990-TX-95, 3CR990-TX-97, 3CR990-FX-95, 3CR990-FX-97, + 3CR990SVR, 3CR990SVR95, 3CR990SVR97, 3CR990-FX-95 Server, + 3CR990-FX-97 Server, 3C990B-TX-M, 3C990BSVR + + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called typhoon. + +endif # NET_VENDOR_3COM diff --git a/drivers/net/ethernet/3com/Makefile b/drivers/net/ethernet/3com/Makefile new file mode 100644 index 000000000..74046afab --- /dev/null +++ b/drivers/net/ethernet/3com/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the 3Com Ethernet device drivers +# + +obj-$(CONFIG_EL3) += 3c509.o +obj-$(CONFIG_3C515) += 3c515.o +obj-$(CONFIG_PCMCIA_3C589) += 3c589_cs.o +obj-$(CONFIG_PCMCIA_3C574) += 3c574_cs.o +obj-$(CONFIG_VORTEX) += 3c59x.o +obj-$(CONFIG_TYPHOON) += typhoon.o diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c new file mode 100644 index 000000000..44d0767a7 --- /dev/null +++ b/drivers/net/ethernet/3com/typhoon.c @@ -0,0 +1,2556 @@ +/* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */ +/* + Written 2002-2004 by David Dillow + Based on code written 1998-2000 by Donald Becker and + Linux 2.2.x driver by David P. McLean . + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. + + This software is available on a public web site. It may enable + cryptographic capabilities of the 3Com hardware, and may be + exported from the United States under License Exception "TSU" + pursuant to 15 C.F.R. Section 740.13(e). + + This work was funded by the National Library of Medicine under + the Department of Energy project number 0274DD06D1 and NLM project + number Y1-LM-2015-01. + + This driver is designed for the 3Com 3CR990 Family of cards with the + 3XP Processor. It has been tested on x86 and sparc64. + + KNOWN ISSUES: + *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware + issue. Hopefully 3Com will fix it. + *) Waiting for a command response takes 8ms due to non-preemptable + polling. Only significant for getting stats and creating + SAs, but an ugly wart never the less. + + TODO: + *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming. + *) Add more support for ethtool (especially for NIC stats) + *) Allow disabling of RX checksum offloading + *) Fix MAC changing to work while the interface is up + (Need to put commands on the TX ring, which changes + the locking) + *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See + http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org +*/ + +/* Set the copy breakpoint for the copy-only-tiny-frames scheme. + * Setting to > 1518 effectively disables this feature. + */ +static int rx_copybreak = 200; + +/* Should we use MMIO or Port IO? + * 0: Port IO + * 1: MMIO + * 2: Try MMIO, fallback to Port IO + */ +static unsigned int use_mmio = 2; + +/* end user-configurable values */ + +/* Maximum number of multicast addresses to filter (vs. rx-all-multicast). + */ +static const int multicast_filter_limit = 32; + +/* Operational parameters that are set at compile time. */ + +/* Keep the ring sizes a power of two for compile efficiency. + * The compiler will convert '%'<2^N> into a bit mask. + * Making the Tx ring too large decreases the effectiveness of channel + * bonding and packet priority. + * There are no ill effects from too-large receive rings. + * + * We don't currently use the Hi Tx ring so, don't make it very big. + * + * Beware that if we start using the Hi Tx ring, we will need to change + * typhoon_num_free_tx() and typhoon_tx_complete() to account for that. + */ +#define TXHI_ENTRIES 2 +#define TXLO_ENTRIES 128 +#define RX_ENTRIES 32 +#define COMMAND_ENTRIES 16 +#define RESPONSE_ENTRIES 32 + +#define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) +#define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) + +/* The 3XP will preload and remove 64 entries from the free buffer + * list, and we need one entry to keep the ring from wrapping, so + * to keep this a power of two, we use 128 entries. + */ +#define RXFREE_ENTRIES 128 +#define RXENT_ENTRIES (RXFREE_ENTRIES - 1) + +/* Operational parameters that usually are not changed. */ + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (2*HZ) + +#define PKT_BUF_SZ 1536 +#define FIRMWARE_NAME "/*(DEBLOBBED)*/" + +#define pr_fmt(fmt) KBUILD_MODNAME " " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "typhoon.h" + +MODULE_AUTHOR("David Dillow "); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL"); +/*(DEBLOBBED)*/ +MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)"); +MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and " + "the buffer given back to the NIC. Default " + "is 200."); +MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. " + "Default is to try MMIO and fallback to PIO."); +module_param(rx_copybreak, int, 0); +module_param(use_mmio, int, 0); + +#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 +#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO +#undef NETIF_F_TSO +#endif + +#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) +#error TX ring too small! +#endif + +struct typhoon_card_info { + const char *name; + const int capabilities; +}; + +#define TYPHOON_CRYPTO_NONE 0x00 +#define TYPHOON_CRYPTO_DES 0x01 +#define TYPHOON_CRYPTO_3DES 0x02 +#define TYPHOON_CRYPTO_VARIABLE 0x04 +#define TYPHOON_FIBER 0x08 +#define TYPHOON_WAKEUP_NEEDS_RESET 0x10 + +enum typhoon_cards { + TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR, + TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR, + TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR, + TYPHOON_FXM, +}; + +/* directly indexed by enum typhoon_cards, above */ +static struct typhoon_card_info typhoon_card_info[] = { + { "3Com Typhoon (3C990-TX)", + TYPHOON_CRYPTO_NONE}, + { "3Com Typhoon (3CR990-TX-95)", + TYPHOON_CRYPTO_DES}, + { "3Com Typhoon (3CR990-TX-97)", + TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES}, + { "3Com Typhoon (3C990SVR)", + TYPHOON_CRYPTO_NONE}, + { "3Com Typhoon (3CR990SVR95)", + TYPHOON_CRYPTO_DES}, + { "3Com Typhoon (3CR990SVR97)", + TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES}, + { "3Com Typhoon2 (3C990B-TX-M)", + TYPHOON_CRYPTO_VARIABLE}, + { "3Com Typhoon2 (3C990BSVR)", + TYPHOON_CRYPTO_VARIABLE}, + { "3Com Typhoon (3CR990-FX-95)", + TYPHOON_CRYPTO_DES | TYPHOON_FIBER}, + { "3Com Typhoon (3CR990-FX-97)", + TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER}, + { "3Com Typhoon (3CR990-FX-95 Server)", + TYPHOON_CRYPTO_DES | TYPHOON_FIBER}, + { "3Com Typhoon (3CR990-FX-97 Server)", + TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER}, + { "3Com Typhoon2 (3C990B-FX-97)", + TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER}, +}; + +/* Notes on the new subsystem numbering scheme: + * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES + * bit 4 indicates if this card has secured firmware (we don't support it) + * bit 8 indicates if this is a (0) copper or (1) fiber card + * bits 12-16 indicate card type: (0) client and (1) server + */ +static const struct pci_device_id typhoon_pci_tbl[] = { + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990, + PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B, + PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B, + PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B, + PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX, + PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX, + PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX, + PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX, + PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl); + +/* Define the shared memory area + * Align everything the 3XP will normally be using. + * We'll need to move/align txHi if we start using that ring. + */ +#define __3xp_aligned ____cacheline_aligned +struct typhoon_shared { + struct typhoon_interface iface; + struct typhoon_indexes indexes __3xp_aligned; + struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned; + struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned; + struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned; + struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned; + struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned; + struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned; + u32 zeroWord; + struct tx_desc txHi[TXHI_ENTRIES]; +} __packed; + +struct rxbuff_ent { + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +struct typhoon { + /* Tx cache line section */ + struct transmit_ring txLoRing ____cacheline_aligned; + struct pci_dev * tx_pdev; + void __iomem *tx_ioaddr; + u32 txlo_dma_addr; + + /* Irq/Rx cache line section */ + void __iomem *ioaddr ____cacheline_aligned; + struct typhoon_indexes *indexes; + u8 awaiting_resp; + u8 duplex; + u8 speed; + u8 card_state; + struct basic_ring rxLoRing; + struct pci_dev * pdev; + struct net_device * dev; + struct napi_struct napi; + struct basic_ring rxHiRing; + struct basic_ring rxBuffRing; + struct rxbuff_ent rxbuffers[RXENT_ENTRIES]; + + /* general section */ + spinlock_t command_lock ____cacheline_aligned; + struct basic_ring cmdRing; + struct basic_ring respRing; + struct net_device_stats stats; + struct net_device_stats stats_saved; + struct typhoon_shared * shared; + dma_addr_t shared_dma; + __le16 xcvr_select; + __le16 wol_events; + __le32 offload; + + /* unused stuff (future use) */ + int capabilities; + struct transmit_ring txHiRing; +}; + +enum completion_wait_values { + NoWait = 0, WaitNoSleep, WaitSleep, +}; + +/* These are the values for the typhoon.card_state variable. + * These determine where the statistics will come from in get_stats(). + * The sleep image does not support the statistics we need. + */ +enum state_values { + Sleeping = 0, Running, +}; + +/* PCI writes are not guaranteed to be posted in order, but outstanding writes + * cannot pass a read, so this forces current writes to post. + */ +#define typhoon_post_pci_writes(x) \ + do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) + +/* We'll wait up to six seconds for a reset, and half a second normally. + */ +#define TYPHOON_UDELAY 50 +#define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) +#define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) +#define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) + +#if defined(NETIF_F_TSO) +#define skb_tso_size(x) (skb_shinfo(x)->gso_size) +#define TSO_NUM_DESCRIPTORS 2 +#define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT +#else +#define NETIF_F_TSO 0 +#define skb_tso_size(x) 0 +#define TSO_NUM_DESCRIPTORS 0 +#define TSO_OFFLOAD_ON 0 +#endif + +static inline void +typhoon_inc_index(u32 *index, const int count, const int num_entries) +{ + /* Increment a ring index -- we can use this for all rings execept + * the Rx rings, as they use different size descriptors + * otherwise, everything is the same size as a cmd_desc + */ + *index += count * sizeof(struct cmd_desc); + *index %= num_entries * sizeof(struct cmd_desc); +} + +static inline void +typhoon_inc_cmd_index(u32 *index, const int count) +{ + typhoon_inc_index(index, count, COMMAND_ENTRIES); +} + +static inline void +typhoon_inc_resp_index(u32 *index, const int count) +{ + typhoon_inc_index(index, count, RESPONSE_ENTRIES); +} + +static inline void +typhoon_inc_rxfree_index(u32 *index, const int count) +{ + typhoon_inc_index(index, count, RXFREE_ENTRIES); +} + +static inline void +typhoon_inc_tx_index(u32 *index, const int count) +{ + /* if we start using the Hi Tx ring, this needs updating */ + typhoon_inc_index(index, count, TXLO_ENTRIES); +} + +static inline void +typhoon_inc_rx_index(u32 *index, const int count) +{ + /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */ + *index += count * sizeof(struct rx_desc); + *index %= RX_ENTRIES * sizeof(struct rx_desc); +} + +static int +typhoon_reset(void __iomem *ioaddr, int wait_type) +{ + int i, err = 0; + int timeout; + + if(wait_type == WaitNoSleep) + timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP; + else + timeout = TYPHOON_RESET_TIMEOUT_SLEEP; + + iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); + iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS); + + iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET); + typhoon_post_pci_writes(ioaddr); + udelay(1); + iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET); + + if(wait_type != NoWait) { + for(i = 0; i < timeout; i++) { + if(ioread32(ioaddr + TYPHOON_REG_STATUS) == + TYPHOON_STATUS_WAITING_FOR_HOST) + goto out; + + if(wait_type == WaitSleep) + schedule_timeout_uninterruptible(1); + else + udelay(TYPHOON_UDELAY); + } + + err = -ETIMEDOUT; + } + +out: + iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); + iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS); + + /* The 3XP seems to need a little extra time to complete the load + * of the sleep image before we can reliably boot it. Failure to + * do this occasionally results in a hung adapter after boot in + * typhoon_init_one() while trying to read the MAC address or + * putting the card to sleep. 3Com's driver waits 5ms, but + * that seems to be overkill. However, if we can sleep, we might + * as well give it that much time. Otherwise, we'll give it 500us, + * which should be enough (I've see it work well at 100us, but still + * saw occasional problems.) + */ + if(wait_type == WaitSleep) + msleep(5); + else + udelay(500); + return err; +} + +static int +typhoon_wait_status(void __iomem *ioaddr, u32 wait_value) +{ + int i, err = 0; + + for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) { + if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value) + goto out; + udelay(TYPHOON_UDELAY); + } + + err = -ETIMEDOUT; + +out: + return err; +} + +static inline void +typhoon_media_status(struct net_device *dev, struct resp_desc *resp) +{ + if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK) + netif_carrier_off(dev); + else + netif_carrier_on(dev); +} + +static inline void +typhoon_hello(struct typhoon *tp) +{ + struct basic_ring *ring = &tp->cmdRing; + struct cmd_desc *cmd; + + /* We only get a hello request if we've not sent anything to the + * card in a long while. If the lock is held, then we're in the + * process of issuing a command, so we don't need to respond. + */ + if(spin_trylock(&tp->command_lock)) { + cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite); + typhoon_inc_cmd_index(&ring->lastWrite, 1); + + INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP); + wmb(); + iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY); + spin_unlock(&tp->command_lock); + } +} + +static int +typhoon_process_response(struct typhoon *tp, int resp_size, + struct resp_desc *resp_save) +{ + struct typhoon_indexes *indexes = tp->indexes; + struct resp_desc *resp; + u8 *base = tp->respRing.ringBase; + int count, len, wrap_len; + u32 cleared; + u32 ready; + + cleared = le32_to_cpu(indexes->respCleared); + ready = le32_to_cpu(indexes->respReady); + while(cleared != ready) { + resp = (struct resp_desc *)(base + cleared); + count = resp->numDesc + 1; + if(resp_save && resp->seqNo) { + if(count > resp_size) { + resp_save->flags = TYPHOON_RESP_ERROR; + goto cleanup; + } + + wrap_len = 0; + len = count * sizeof(*resp); + if(unlikely(cleared + len > RESPONSE_RING_SIZE)) { + wrap_len = cleared + len - RESPONSE_RING_SIZE; + len = RESPONSE_RING_SIZE - cleared; + } + + memcpy(resp_save, resp, len); + if(unlikely(wrap_len)) { + resp_save += len / sizeof(*resp); + memcpy(resp_save, base, wrap_len); + } + + resp_save = NULL; + } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) { + typhoon_media_status(tp->dev, resp); + } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) { + typhoon_hello(tp); + } else { + netdev_err(tp->dev, + "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n", + le16_to_cpu(resp->cmd), + resp->numDesc, resp->flags, + le16_to_cpu(resp->parm1), + le32_to_cpu(resp->parm2), + le32_to_cpu(resp->parm3)); + } + +cleanup: + typhoon_inc_resp_index(&cleared, count); + } + + indexes->respCleared = cpu_to_le32(cleared); + wmb(); + return resp_save == NULL; +} + +static inline int +typhoon_num_free(int lastWrite, int lastRead, int ringSize) +{ + /* this works for all descriptors but rx_desc, as they are a + * different size than the cmd_desc -- everyone else is the same + */ + lastWrite /= sizeof(struct cmd_desc); + lastRead /= sizeof(struct cmd_desc); + return (ringSize + lastRead - lastWrite - 1) % ringSize; +} + +static inline int +typhoon_num_free_cmd(struct typhoon *tp) +{ + int lastWrite = tp->cmdRing.lastWrite; + int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared); + + return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES); +} + +static inline int +typhoon_num_free_resp(struct typhoon *tp) +{ + int respReady = le32_to_cpu(tp->indexes->respReady); + int respCleared = le32_to_cpu(tp->indexes->respCleared); + + return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES); +} + +static inline int +typhoon_num_free_tx(struct transmit_ring *ring) +{ + /* if we start using the Hi Tx ring, this needs updating */ + return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES); +} + +static int +typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd, + int num_resp, struct resp_desc *resp) +{ + struct typhoon_indexes *indexes = tp->indexes; + struct basic_ring *ring = &tp->cmdRing; + struct resp_desc local_resp; + int i, err = 0; + int got_resp; + int freeCmd, freeResp; + int len, wrap_len; + + spin_lock(&tp->command_lock); + + freeCmd = typhoon_num_free_cmd(tp); + freeResp = typhoon_num_free_resp(tp); + + if(freeCmd < num_cmd || freeResp < num_resp) { + netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n", + freeCmd, num_cmd, freeResp, num_resp); + err = -ENOMEM; + goto out; + } + + if(cmd->flags & TYPHOON_CMD_RESPOND) { + /* If we're expecting a response, but the caller hasn't given + * us a place to put it, we'll provide one. + */ + tp->awaiting_resp = 1; + if(resp == NULL) { + resp = &local_resp; + num_resp = 1; + } + } + + wrap_len = 0; + len = num_cmd * sizeof(*cmd); + if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) { + wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE; + len = COMMAND_RING_SIZE - ring->lastWrite; + } + + memcpy(ring->ringBase + ring->lastWrite, cmd, len); + if(unlikely(wrap_len)) { + struct cmd_desc *wrap_ptr = cmd; + wrap_ptr += len / sizeof(*cmd); + memcpy(ring->ringBase, wrap_ptr, wrap_len); + } + + typhoon_inc_cmd_index(&ring->lastWrite, num_cmd); + + /* "I feel a presence... another warrior is on the mesa." + */ + wmb(); + iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY); + typhoon_post_pci_writes(tp->ioaddr); + + if((cmd->flags & TYPHOON_CMD_RESPOND) == 0) + goto out; + + /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to + * preempt or do anything other than take interrupts. So, don't + * wait for a response unless you have to. + * + * I've thought about trying to sleep here, but we're called + * from many contexts that don't allow that. Also, given the way + * 3Com has implemented irq coalescing, we would likely timeout -- + * this has been observed in real life! + * + * The big killer is we have to wait to get stats from the card, + * though we could go to a periodic refresh of those if we don't + * mind them getting somewhat stale. The rest of the waiting + * commands occur during open/close/suspend/resume, so they aren't + * time critical. Creating SAs in the future will also have to + * wait here. + */ + got_resp = 0; + for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) { + if(indexes->respCleared != indexes->respReady) + got_resp = typhoon_process_response(tp, num_resp, + resp); + udelay(TYPHOON_UDELAY); + } + + if(!got_resp) { + err = -ETIMEDOUT; + goto out; + } + + /* Collect the error response even if we don't care about the + * rest of the response + */ + if(resp->flags & TYPHOON_RESP_ERROR) + err = -EIO; + +out: + if(tp->awaiting_resp) { + tp->awaiting_resp = 0; + smp_wmb(); + + /* Ugh. If a response was added to the ring between + * the call to typhoon_process_response() and the clearing + * of tp->awaiting_resp, we could have missed the interrupt + * and it could hang in the ring an indeterminate amount of + * time. So, check for it, and interrupt ourselves if this + * is the case. + */ + if(indexes->respCleared != indexes->respReady) + iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT); + } + + spin_unlock(&tp->command_lock); + return err; +} + +static inline void +typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing, + u32 ring_dma) +{ + struct tcpopt_desc *tcpd; + u32 tcpd_offset = ring_dma; + + tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite); + tcpd_offset += txRing->lastWrite; + tcpd_offset += offsetof(struct tcpopt_desc, bytesTx); + typhoon_inc_tx_index(&txRing->lastWrite, 1); + + tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG; + tcpd->numDesc = 1; + tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb)); + tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST; + tcpd->respAddrLo = cpu_to_le32(tcpd_offset); + tcpd->bytesTx = cpu_to_le32(skb->len); + tcpd->status = 0; +} + +static netdev_tx_t +typhoon_start_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct typhoon *tp = netdev_priv(dev); + struct transmit_ring *txRing; + struct tx_desc *txd, *first_txd; + dma_addr_t skb_dma; + int numDesc; + + /* we have two rings to choose from, but we only use txLo for now + * If we start using the Hi ring as well, we'll need to update + * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(), + * and TXHI_ENTRIES to match, as well as update the TSO code below + * to get the right DMA address + */ + txRing = &tp->txLoRing; + + /* We need one descriptor for each fragment of the sk_buff, plus the + * one for the ->data area of it. + * + * The docs say a maximum of 16 fragment descriptors per TCP option + * descriptor, then make a new packet descriptor and option descriptor + * for the next 16 fragments. The engineers say just an option + * descriptor is needed. I've tested up to 26 fragments with a single + * packet descriptor/option descriptor combo, so I use that for now. + * + * If problems develop with TSO, check this first. + */ + numDesc = skb_shinfo(skb)->nr_frags + 1; + if (skb_is_gso(skb)) + numDesc++; + + /* When checking for free space in the ring, we need to also + * account for the initial Tx descriptor, and we always must leave + * at least one descriptor unused in the ring so that it doesn't + * wrap and look empty. + * + * The only time we should loop here is when we hit the race + * between marking the queue awake and updating the cleared index. + * Just loop and it will appear. This comes from the acenic driver. + */ + while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2))) + smp_rmb(); + + first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite); + typhoon_inc_tx_index(&txRing->lastWrite, 1); + + first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID; + first_txd->numDesc = 0; + first_txd->len = 0; + first_txd->tx_addr = (u64)((unsigned long) skb); + first_txd->processFlags = 0; + + if(skb->ip_summed == CHECKSUM_PARTIAL) { + /* The 3XP will figure out if this is UDP/TCP */ + first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM; + first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM; + first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM; + } + + if (skb_vlan_tag_present(skb)) { + first_txd->processFlags |= + TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY; + first_txd->processFlags |= + cpu_to_le32(htons(skb_vlan_tag_get(skb)) << + TYPHOON_TX_PF_VLAN_TAG_SHIFT); + } + + if (skb_is_gso(skb)) { + first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT; + first_txd->numDesc++; + + typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr); + } + + txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite); + typhoon_inc_tx_index(&txRing->lastWrite, 1); + + /* No need to worry about padding packet -- the firmware pads + * it with zeros to ETH_ZLEN for us. + */ + if(skb_shinfo(skb)->nr_frags == 0) { + skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len, + PCI_DMA_TODEVICE); + txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID; + txd->len = cpu_to_le16(skb->len); + txd->frag.addr = cpu_to_le32(skb_dma); + txd->frag.addrHi = 0; + first_txd->numDesc++; + } else { + int i, len; + + len = skb_headlen(skb); + skb_dma = pci_map_single(tp->tx_pdev, skb->data, len, + PCI_DMA_TODEVICE); + txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID; + txd->len = cpu_to_le16(len); + txd->frag.addr = cpu_to_le32(skb_dma); + txd->frag.addrHi = 0; + first_txd->numDesc++; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + void *frag_addr; + + txd = (struct tx_desc *) (txRing->ringBase + + txRing->lastWrite); + typhoon_inc_tx_index(&txRing->lastWrite, 1); + + len = skb_frag_size(frag); + frag_addr = skb_frag_address(frag); + skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len, + PCI_DMA_TODEVICE); + txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID; + txd->len = cpu_to_le16(len); + txd->frag.addr = cpu_to_le32(skb_dma); + txd->frag.addrHi = 0; + first_txd->numDesc++; + } + } + + /* Kick the 3XP + */ + wmb(); + iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister); + + /* If we don't have room to put the worst case packet on the + * queue, then we must stop the queue. We need 2 extra + * descriptors -- one to prevent ring wrap, and one for the + * Tx header. + */ + numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1; + + if(typhoon_num_free_tx(txRing) < (numDesc + 2)) { + netif_stop_queue(dev); + + /* A Tx complete IRQ could have gotten between, making + * the ring free again. Only need to recheck here, since + * Tx is serialized. + */ + if(typhoon_num_free_tx(txRing) >= (numDesc + 2)) + netif_wake_queue(dev); + } + + return NETDEV_TX_OK; +} + +static void +typhoon_set_rx_mode(struct net_device *dev) +{ + struct typhoon *tp = netdev_priv(dev); + struct cmd_desc xp_cmd; + u32 mc_filter[2]; + __le16 filter; + + filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST; + if(dev->flags & IFF_PROMISC) { + filter |= TYPHOON_RX_FILTER_PROMISCOUS; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to match, or accept all multicasts. */ + filter |= TYPHOON_RX_FILTER_ALL_MCAST; + } else if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + + memset(mc_filter, 0, sizeof(mc_filter)); + netdev_for_each_mc_addr(ha, dev) { + int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f; + mc_filter[bit >> 5] |= 1 << (bit & 0x1f); + } + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, + TYPHOON_CMD_SET_MULTICAST_HASH); + xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET; + xp_cmd.parm2 = cpu_to_le32(mc_filter[0]); + xp_cmd.parm3 = cpu_to_le32(mc_filter[1]); + typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + + filter |= TYPHOON_RX_FILTER_MCAST_HASH; + } + + INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER); + xp_cmd.parm1 = filter; + typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); +} + +static int +typhoon_do_get_stats(struct typhoon *tp) +{ + struct net_device_stats *stats = &tp->stats; + struct net_device_stats *saved = &tp->stats_saved; + struct cmd_desc xp_cmd; + struct resp_desc xp_resp[7]; + struct stats_resp *s = (struct stats_resp *) xp_resp; + int err; + + INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS); + err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp); + if(err < 0) + return err; + + /* 3Com's Linux driver uses txMultipleCollisions as it's + * collisions value, but there is some other collision info as well... + * + * The extra status reported would be a good candidate for + * ethtool_ops->get_{strings,stats}() + */ + stats->tx_packets = le32_to_cpu(s->txPackets) + + saved->tx_packets; + stats->tx_bytes = le64_to_cpu(s->txBytes) + + saved->tx_bytes; + stats->tx_errors = le32_to_cpu(s->txCarrierLost) + + saved->tx_errors; + stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost) + + saved->tx_carrier_errors; + stats->collisions = le32_to_cpu(s->txMultipleCollisions) + + saved->collisions; + stats->rx_packets = le32_to_cpu(s->rxPacketsGood) + + saved->rx_packets; + stats->rx_bytes = le64_to_cpu(s->rxBytesGood) + + saved->rx_bytes; + stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns) + + saved->rx_fifo_errors; + stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) + + le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors) + + saved->rx_errors; + stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors) + + saved->rx_crc_errors; + stats->rx_length_errors = le32_to_cpu(s->rxOversized) + + saved->rx_length_errors; + tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ? + SPEED_100 : SPEED_10; + tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ? + DUPLEX_FULL : DUPLEX_HALF; + + return 0; +} + +static struct net_device_stats * +typhoon_get_stats(struct net_device *dev) +{ + struct typhoon *tp = netdev_priv(dev); + struct net_device_stats *stats = &tp->stats; + struct net_device_stats *saved = &tp->stats_saved; + + smp_rmb(); + if(tp->card_state == Sleeping) + return saved; + + if(typhoon_do_get_stats(tp) < 0) { + netdev_err(dev, "error getting stats\n"); + return saved; + } + + return stats; +} + +static void +typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct typhoon *tp = netdev_priv(dev); + struct pci_dev *pci_dev = tp->pdev; + struct cmd_desc xp_cmd; + struct resp_desc xp_resp[3]; + + smp_rmb(); + if(tp->card_state == Sleeping) { + strlcpy(info->fw_version, "Sleep image", + sizeof(info->fw_version)); + } else { + INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS); + if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) { + strlcpy(info->fw_version, "Unknown runtime", + sizeof(info->fw_version)); + } else { + u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2); + snprintf(info->fw_version, sizeof(info->fw_version), + "%02x.%03x.%03x", sleep_ver >> 24, + (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff); + } + } + + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info)); +} + +static int +typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct typhoon *tp = netdev_priv(dev); + + cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg; + + switch (tp->xcvr_select) { + case TYPHOON_XCVR_10HALF: + cmd->advertising = ADVERTISED_10baseT_Half; + break; + case TYPHOON_XCVR_10FULL: + cmd->advertising = ADVERTISED_10baseT_Full; + break; + case TYPHOON_XCVR_100HALF: + cmd->advertising = ADVERTISED_100baseT_Half; + break; + case TYPHOON_XCVR_100FULL: + cmd->advertising = ADVERTISED_100baseT_Full; + break; + case TYPHOON_XCVR_AUTONEG: + cmd->advertising = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_Autoneg; + break; + } + + if(tp->capabilities & TYPHOON_FIBER) { + cmd->supported |= SUPPORTED_FIBRE; + cmd->advertising |= ADVERTISED_FIBRE; + cmd->port = PORT_FIBRE; + } else { + cmd->supported |= SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_TP; + cmd->advertising |= ADVERTISED_TP; + cmd->port = PORT_TP; + } + + /* need to get stats to make these link speed/duplex valid */ + typhoon_do_get_stats(tp); + ethtool_cmd_speed_set(cmd, tp->speed); + cmd->duplex = tp->duplex; + cmd->phy_address = 0; + cmd->transceiver = XCVR_INTERNAL; + if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG) + cmd->autoneg = AUTONEG_ENABLE; + else + cmd->autoneg = AUTONEG_DISABLE; + cmd->maxtxpkt = 1; + cmd->maxrxpkt = 1; + + return 0; +} + +static int +typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct typhoon *tp = netdev_priv(dev); + u32 speed = ethtool_cmd_speed(cmd); + struct cmd_desc xp_cmd; + __le16 xcvr; + int err; + + err = -EINVAL; + if (cmd->autoneg == AUTONEG_ENABLE) { + xcvr = TYPHOON_XCVR_AUTONEG; + } else { + if (cmd->duplex == DUPLEX_HALF) { + if (speed == SPEED_10) + xcvr = TYPHOON_XCVR_10HALF; + else if (speed == SPEED_100) + xcvr = TYPHOON_XCVR_100HALF; + else + goto out; + } else if (cmd->duplex == DUPLEX_FULL) { + if (speed == SPEED_10) + xcvr = TYPHOON_XCVR_10FULL; + else if (speed == SPEED_100) + xcvr = TYPHOON_XCVR_100FULL; + else + goto out; + } else + goto out; + } + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT); + xp_cmd.parm1 = xcvr; + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) + goto out; + + tp->xcvr_select = xcvr; + if(cmd->autoneg == AUTONEG_ENABLE) { + tp->speed = 0xff; /* invalid */ + tp->duplex = 0xff; /* invalid */ + } else { + tp->speed = speed; + tp->duplex = cmd->duplex; + } + +out: + return err; +} + +static void +typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct typhoon *tp = netdev_priv(dev); + + wol->supported = WAKE_PHY | WAKE_MAGIC; + wol->wolopts = 0; + if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT) + wol->wolopts |= WAKE_PHY; + if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) + wol->wolopts |= WAKE_MAGIC; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int +typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct typhoon *tp = netdev_priv(dev); + + if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) + return -EINVAL; + + tp->wol_events = 0; + if(wol->wolopts & WAKE_PHY) + tp->wol_events |= TYPHOON_WAKE_LINK_EVENT; + if(wol->wolopts & WAKE_MAGIC) + tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT; + + return 0; +} + +static void +typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +{ + ering->rx_max_pending = RXENT_ENTRIES; + ering->tx_max_pending = TXLO_ENTRIES - 1; + + ering->rx_pending = RXENT_ENTRIES; + ering->tx_pending = TXLO_ENTRIES - 1; +} + +static const struct ethtool_ops typhoon_ethtool_ops = { + .get_settings = typhoon_get_settings, + .set_settings = typhoon_set_settings, + .get_drvinfo = typhoon_get_drvinfo, + .get_wol = typhoon_get_wol, + .set_wol = typhoon_set_wol, + .get_link = ethtool_op_get_link, + .get_ringparam = typhoon_get_ringparam, +}; + +static int +typhoon_wait_interrupt(void __iomem *ioaddr) +{ + int i, err = 0; + + for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) { + if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) & + TYPHOON_INTR_BOOTCMD) + goto out; + udelay(TYPHOON_UDELAY); + } + + err = -ETIMEDOUT; + +out: + iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS); + return err; +} + +#define shared_offset(x) offsetof(struct typhoon_shared, x) + +static void +typhoon_init_interface(struct typhoon *tp) +{ + struct typhoon_interface *iface = &tp->shared->iface; + dma_addr_t shared_dma; + + memset(tp->shared, 0, sizeof(struct typhoon_shared)); + + /* The *Hi members of iface are all init'd to zero by the memset(). + */ + shared_dma = tp->shared_dma + shared_offset(indexes); + iface->ringIndex = cpu_to_le32(shared_dma); + + shared_dma = tp->shared_dma + shared_offset(txLo); + iface->txLoAddr = cpu_to_le32(shared_dma); + iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc)); + + shared_dma = tp->shared_dma + shared_offset(txHi); + iface->txHiAddr = cpu_to_le32(shared_dma); + iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc)); + + shared_dma = tp->shared_dma + shared_offset(rxBuff); + iface->rxBuffAddr = cpu_to_le32(shared_dma); + iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES * + sizeof(struct rx_free)); + + shared_dma = tp->shared_dma + shared_offset(rxLo); + iface->rxLoAddr = cpu_to_le32(shared_dma); + iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc)); + + shared_dma = tp->shared_dma + shared_offset(rxHi); + iface->rxHiAddr = cpu_to_le32(shared_dma); + iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc)); + + shared_dma = tp->shared_dma + shared_offset(cmd); + iface->cmdAddr = cpu_to_le32(shared_dma); + iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE); + + shared_dma = tp->shared_dma + shared_offset(resp); + iface->respAddr = cpu_to_le32(shared_dma); + iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE); + + shared_dma = tp->shared_dma + shared_offset(zeroWord); + iface->zeroAddr = cpu_to_le32(shared_dma); + + tp->indexes = &tp->shared->indexes; + tp->txLoRing.ringBase = (u8 *) tp->shared->txLo; + tp->txHiRing.ringBase = (u8 *) tp->shared->txHi; + tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo; + tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi; + tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff; + tp->cmdRing.ringBase = (u8 *) tp->shared->cmd; + tp->respRing.ringBase = (u8 *) tp->shared->resp; + + tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY; + tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY; + + tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr); + tp->card_state = Sleeping; + + tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM; + tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON; + tp->offload |= TYPHOON_OFFLOAD_VLAN; + + spin_lock_init(&tp->command_lock); + + /* Force the writes to the shared memory area out before continuing. */ + wmb(); +} + +static void +typhoon_init_rings(struct typhoon *tp) +{ + memset(tp->indexes, 0, sizeof(struct typhoon_indexes)); + + tp->txLoRing.lastWrite = 0; + tp->txHiRing.lastWrite = 0; + tp->rxLoRing.lastWrite = 0; + tp->rxHiRing.lastWrite = 0; + tp->rxBuffRing.lastWrite = 0; + tp->cmdRing.lastWrite = 0; + tp->respRing.lastWrite = 0; + + tp->txLoRing.lastRead = 0; + tp->txHiRing.lastRead = 0; +} + +static const struct firmware *typhoon_fw; + +static int +typhoon_request_firmware(struct typhoon *tp) +{ + const struct typhoon_file_header *fHdr; + const struct typhoon_section_header *sHdr; + const u8 *image_data; + u32 numSections; + u32 section_len; + u32 remaining; + int err; + + if (typhoon_fw) + return 0; + + err = reject_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev); + if (err) { + netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", + FIRMWARE_NAME); + return err; + } + + image_data = typhoon_fw->data; + remaining = typhoon_fw->size; + if (remaining < sizeof(struct typhoon_file_header)) + goto invalid_fw; + + fHdr = (struct typhoon_file_header *) image_data; + if (memcmp(fHdr->tag, "TYPHOON", 8)) + goto invalid_fw; + + numSections = le32_to_cpu(fHdr->numSections); + image_data += sizeof(struct typhoon_file_header); + remaining -= sizeof(struct typhoon_file_header); + + while (numSections--) { + if (remaining < sizeof(struct typhoon_section_header)) + goto invalid_fw; + + sHdr = (struct typhoon_section_header *) image_data; + image_data += sizeof(struct typhoon_section_header); + section_len = le32_to_cpu(sHdr->len); + + if (remaining < section_len) + goto invalid_fw; + + image_data += section_len; + remaining -= section_len; + } + + return 0; + +invalid_fw: + netdev_err(tp->dev, "Invalid firmware image\n"); + release_firmware(typhoon_fw); + typhoon_fw = NULL; + return -EINVAL; +} + +static int +typhoon_download_firmware(struct typhoon *tp) +{ + void __iomem *ioaddr = tp->ioaddr; + struct pci_dev *pdev = tp->pdev; + const struct typhoon_file_header *fHdr; + const struct typhoon_section_header *sHdr; + const u8 *image_data; + void *dpage; + dma_addr_t dpage_dma; + __sum16 csum; + u32 irqEnabled; + u32 irqMasked; + u32 numSections; + u32 section_len; + u32 len; + u32 load_addr; + u32 hmac; + int i; + int err; + + image_data = typhoon_fw->data; + fHdr = (struct typhoon_file_header *) image_data; + + /* Cannot just map the firmware image using pci_map_single() as + * the firmware is vmalloc()'d and may not be physically contiguous, + * so we allocate some consistent memory to copy the sections into. + */ + err = -ENOMEM; + dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma); + if(!dpage) { + netdev_err(tp->dev, "no DMA mem for firmware\n"); + goto err_out; + } + + irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE); + iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD, + ioaddr + TYPHOON_REG_INTR_ENABLE); + irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK); + iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD, + ioaddr + TYPHOON_REG_INTR_MASK); + + err = -ETIMEDOUT; + if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { + netdev_err(tp->dev, "card ready timeout\n"); + goto err_out_irq; + } + + numSections = le32_to_cpu(fHdr->numSections); + load_addr = le32_to_cpu(fHdr->startAddr); + + iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS); + iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR); + hmac = le32_to_cpu(fHdr->hmacDigest[0]); + iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0); + hmac = le32_to_cpu(fHdr->hmacDigest[1]); + iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1); + hmac = le32_to_cpu(fHdr->hmacDigest[2]); + iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2); + hmac = le32_to_cpu(fHdr->hmacDigest[3]); + iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3); + hmac = le32_to_cpu(fHdr->hmacDigest[4]); + iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4); + typhoon_post_pci_writes(ioaddr); + iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND); + + image_data += sizeof(struct typhoon_file_header); + + /* The ioread32() in typhoon_wait_interrupt() will force the + * last write to the command register to post, so + * we don't need a typhoon_post_pci_writes() after it. + */ + for(i = 0; i < numSections; i++) { + sHdr = (struct typhoon_section_header *) image_data; + image_data += sizeof(struct typhoon_section_header); + load_addr = le32_to_cpu(sHdr->startAddr); + section_len = le32_to_cpu(sHdr->len); + + while(section_len) { + len = min_t(u32, section_len, PAGE_SIZE); + + if(typhoon_wait_interrupt(ioaddr) < 0 || + ioread32(ioaddr + TYPHOON_REG_STATUS) != + TYPHOON_STATUS_WAITING_FOR_SEGMENT) { + netdev_err(tp->dev, "segment ready timeout\n"); + goto err_out_irq; + } + + /* Do an pseudo IPv4 checksum on the data -- first + * need to convert each u16 to cpu order before + * summing. Fortunately, due to the properties of + * the checksum, we can do this once, at the end. + */ + csum = csum_fold(csum_partial_copy_nocheck(image_data, + dpage, len, + 0)); + + iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH); + iowrite32(le16_to_cpu((__force __le16)csum), + ioaddr + TYPHOON_REG_BOOT_CHECKSUM); + iowrite32(load_addr, + ioaddr + TYPHOON_REG_BOOT_DEST_ADDR); + iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI); + iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO); + typhoon_post_pci_writes(ioaddr); + iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE, + ioaddr + TYPHOON_REG_COMMAND); + + image_data += len; + load_addr += len; + section_len -= len; + } + } + + if(typhoon_wait_interrupt(ioaddr) < 0 || + ioread32(ioaddr + TYPHOON_REG_STATUS) != + TYPHOON_STATUS_WAITING_FOR_SEGMENT) { + netdev_err(tp->dev, "final segment ready timeout\n"); + goto err_out_irq; + } + + iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND); + + if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) { + netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n", + ioread32(ioaddr + TYPHOON_REG_STATUS)); + goto err_out_irq; + } + + err = 0; + +err_out_irq: + iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK); + iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE); + + pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma); + +err_out: + return err; +} + +static int +typhoon_boot_3XP(struct typhoon *tp, u32 initial_status) +{ + void __iomem *ioaddr = tp->ioaddr; + + if(typhoon_wait_status(ioaddr, initial_status) < 0) { + netdev_err(tp->dev, "boot ready timeout\n"); + goto out_timeout; + } + + iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI); + iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO); + typhoon_post_pci_writes(ioaddr); + iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD, + ioaddr + TYPHOON_REG_COMMAND); + + if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) { + netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n", + ioread32(ioaddr + TYPHOON_REG_STATUS)); + goto out_timeout; + } + + /* Clear the Transmit and Command ready registers + */ + iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY); + iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY); + iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY); + typhoon_post_pci_writes(ioaddr); + iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND); + + return 0; + +out_timeout: + return -ETIMEDOUT; +} + +static u32 +typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing, + volatile __le32 * index) +{ + u32 lastRead = txRing->lastRead; + struct tx_desc *tx; + dma_addr_t skb_dma; + int dma_len; + int type; + + while(lastRead != le32_to_cpu(*index)) { + tx = (struct tx_desc *) (txRing->ringBase + lastRead); + type = tx->flags & TYPHOON_TYPE_MASK; + + if(type == TYPHOON_TX_DESC) { + /* This tx_desc describes a packet. + */ + unsigned long ptr = tx->tx_addr; + struct sk_buff *skb = (struct sk_buff *) ptr; + dev_kfree_skb_irq(skb); + } else if(type == TYPHOON_FRAG_DESC) { + /* This tx_desc describes a memory mapping. Free it. + */ + skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr); + dma_len = le16_to_cpu(tx->len); + pci_unmap_single(tp->pdev, skb_dma, dma_len, + PCI_DMA_TODEVICE); + } + + tx->flags = 0; + typhoon_inc_tx_index(&lastRead, 1); + } + + return lastRead; +} + +static void +typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing, + volatile __le32 * index) +{ + u32 lastRead; + int numDesc = MAX_SKB_FRAGS + 1; + + /* This will need changing if we start to use the Hi Tx ring. */ + lastRead = typhoon_clean_tx(tp, txRing, index); + if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite, + lastRead, TXLO_ENTRIES) > (numDesc + 2)) + netif_wake_queue(tp->dev); + + txRing->lastRead = lastRead; + smp_wmb(); +} + +static void +typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx) +{ + struct typhoon_indexes *indexes = tp->indexes; + struct rxbuff_ent *rxb = &tp->rxbuffers[idx]; + struct basic_ring *ring = &tp->rxBuffRing; + struct rx_free *r; + + if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) == + le32_to_cpu(indexes->rxBuffCleared)) { + /* no room in ring, just drop the skb + */ + dev_kfree_skb_any(rxb->skb); + rxb->skb = NULL; + return; + } + + r = (struct rx_free *) (ring->ringBase + ring->lastWrite); + typhoon_inc_rxfree_index(&ring->lastWrite, 1); + r->virtAddr = idx; + r->physAddr = cpu_to_le32(rxb->dma_addr); + + /* Tell the card about it */ + wmb(); + indexes->rxBuffReady = cpu_to_le32(ring->lastWrite); +} + +static int +typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx) +{ + struct typhoon_indexes *indexes = tp->indexes; + struct rxbuff_ent *rxb = &tp->rxbuffers[idx]; + struct basic_ring *ring = &tp->rxBuffRing; + struct rx_free *r; + struct sk_buff *skb; + dma_addr_t dma_addr; + + rxb->skb = NULL; + + if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) == + le32_to_cpu(indexes->rxBuffCleared)) + return -ENOMEM; + + skb = netdev_alloc_skb(tp->dev, PKT_BUF_SZ); + if(!skb) + return -ENOMEM; + +#if 0 + /* Please, 3com, fix the firmware to allow DMA to a unaligned + * address! Pretty please? + */ + skb_reserve(skb, 2); +#endif + + dma_addr = pci_map_single(tp->pdev, skb->data, + PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + + /* Since no card does 64 bit DAC, the high bits will never + * change from zero. + */ + r = (struct rx_free *) (ring->ringBase + ring->lastWrite); + typhoon_inc_rxfree_index(&ring->lastWrite, 1); + r->virtAddr = idx; + r->physAddr = cpu_to_le32(dma_addr); + rxb->skb = skb; + rxb->dma_addr = dma_addr; + + /* Tell the card about it */ + wmb(); + indexes->rxBuffReady = cpu_to_le32(ring->lastWrite); + return 0; +} + +static int +typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready, + volatile __le32 * cleared, int budget) +{ + struct rx_desc *rx; + struct sk_buff *skb, *new_skb; + struct rxbuff_ent *rxb; + dma_addr_t dma_addr; + u32 local_ready; + u32 rxaddr; + int pkt_len; + u32 idx; + __le32 csum_bits; + int received; + + received = 0; + local_ready = le32_to_cpu(*ready); + rxaddr = le32_to_cpu(*cleared); + while(rxaddr != local_ready && budget > 0) { + rx = (struct rx_desc *) (rxRing->ringBase + rxaddr); + idx = rx->addr; + rxb = &tp->rxbuffers[idx]; + skb = rxb->skb; + dma_addr = rxb->dma_addr; + + typhoon_inc_rx_index(&rxaddr, 1); + + if(rx->flags & TYPHOON_RX_ERROR) { + typhoon_recycle_rx_skb(tp, idx); + continue; + } + + pkt_len = le16_to_cpu(rx->frameLen); + + if(pkt_len < rx_copybreak && + (new_skb = netdev_alloc_skb(tp->dev, pkt_len + 2)) != NULL) { + skb_reserve(new_skb, 2); + pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, + PKT_BUF_SZ, + PCI_DMA_FROMDEVICE); + skb_copy_to_linear_data(new_skb, skb->data, pkt_len); + pci_dma_sync_single_for_device(tp->pdev, dma_addr, + PKT_BUF_SZ, + PCI_DMA_FROMDEVICE); + skb_put(new_skb, pkt_len); + typhoon_recycle_rx_skb(tp, idx); + } else { + new_skb = skb; + skb_put(new_skb, pkt_len); + pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ, + PCI_DMA_FROMDEVICE); + typhoon_alloc_rx_skb(tp, idx); + } + new_skb->protocol = eth_type_trans(new_skb, tp->dev); + csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD | + TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD); + if(csum_bits == + (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) || + csum_bits == + (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) { + new_skb->ip_summed = CHECKSUM_UNNECESSARY; + } else + skb_checksum_none_assert(new_skb); + + if (rx->rxStatus & TYPHOON_RX_VLAN) + __vlan_hwaccel_put_tag(new_skb, htons(ETH_P_8021Q), + ntohl(rx->vlanTag) & 0xffff); + netif_receive_skb(new_skb); + + received++; + budget--; + } + *cleared = cpu_to_le32(rxaddr); + + return received; +} + +static void +typhoon_fill_free_ring(struct typhoon *tp) +{ + u32 i; + + for(i = 0; i < RXENT_ENTRIES; i++) { + struct rxbuff_ent *rxb = &tp->rxbuffers[i]; + if(rxb->skb) + continue; + if(typhoon_alloc_rx_skb(tp, i) < 0) + break; + } +} + +static int +typhoon_poll(struct napi_struct *napi, int budget) +{ + struct typhoon *tp = container_of(napi, struct typhoon, napi); + struct typhoon_indexes *indexes = tp->indexes; + int work_done; + + rmb(); + if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared) + typhoon_process_response(tp, 0, NULL); + + if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead) + typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared); + + work_done = 0; + + if(indexes->rxHiCleared != indexes->rxHiReady) { + work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady, + &indexes->rxHiCleared, budget); + } + + if(indexes->rxLoCleared != indexes->rxLoReady) { + work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady, + &indexes->rxLoCleared, budget - work_done); + } + + if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) { + /* rxBuff ring is empty, try to fill it. */ + typhoon_fill_free_ring(tp); + } + + if (work_done < budget) { + napi_complete(napi); + iowrite32(TYPHOON_INTR_NONE, + tp->ioaddr + TYPHOON_REG_INTR_MASK); + typhoon_post_pci_writes(tp->ioaddr); + } + + return work_done; +} + +static irqreturn_t +typhoon_interrupt(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct typhoon *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->ioaddr; + u32 intr_status; + + intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); + if(!(intr_status & TYPHOON_INTR_HOST_INT)) + return IRQ_NONE; + + iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS); + + if (napi_schedule_prep(&tp->napi)) { + iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); + typhoon_post_pci_writes(ioaddr); + __napi_schedule(&tp->napi); + } else { + netdev_err(dev, "Error, poll already scheduled\n"); + } + return IRQ_HANDLED; +} + +static void +typhoon_free_rx_rings(struct typhoon *tp) +{ + u32 i; + + for(i = 0; i < RXENT_ENTRIES; i++) { + struct rxbuff_ent *rxb = &tp->rxbuffers[i]; + if(rxb->skb) { + pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ, + PCI_DMA_FROMDEVICE); + dev_kfree_skb(rxb->skb); + rxb->skb = NULL; + } + } +} + +static int +typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events) +{ + struct pci_dev *pdev = tp->pdev; + void __iomem *ioaddr = tp->ioaddr; + struct cmd_desc xp_cmd; + int err; + + INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS); + xp_cmd.parm1 = events; + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) { + netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n", + err); + return err; + } + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP); + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) { + netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err); + return err; + } + + if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0) + return -ETIMEDOUT; + + /* Since we cannot monitor the status of the link while sleeping, + * tell the world it went away. + */ + netif_carrier_off(tp->dev); + + pci_enable_wake(tp->pdev, state, 1); + pci_disable_device(pdev); + return pci_set_power_state(pdev, state); +} + +static int +typhoon_wakeup(struct typhoon *tp, int wait_type) +{ + struct pci_dev *pdev = tp->pdev; + void __iomem *ioaddr = tp->ioaddr; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + /* Post 2.x.x versions of the Sleep Image require a reset before + * we can download the Runtime Image. But let's not make users of + * the old firmware pay for the reset. + */ + iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND); + if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 || + (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET)) + return typhoon_reset(ioaddr, wait_type); + + return 0; +} + +static int +typhoon_start_runtime(struct typhoon *tp) +{ + struct net_device *dev = tp->dev; + void __iomem *ioaddr = tp->ioaddr; + struct cmd_desc xp_cmd; + int err; + + typhoon_init_rings(tp); + typhoon_fill_free_ring(tp); + + err = typhoon_download_firmware(tp); + if(err < 0) { + netdev_err(tp->dev, "cannot load runtime on 3XP\n"); + goto error_out; + } + + if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) { + netdev_err(tp->dev, "cannot boot 3XP\n"); + err = -EIO; + goto error_out; + } + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE); + xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ); + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) + goto error_out; + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS); + xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0])); + xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2])); + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) + goto error_out; + + /* Disable IRQ coalescing -- we can reenable it when 3Com gives + * us some more information on how to control it. + */ + INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL); + xp_cmd.parm1 = 0; + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) + goto error_out; + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT); + xp_cmd.parm1 = tp->xcvr_select; + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) + goto error_out; + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE); + xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q); + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) + goto error_out; + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS); + xp_cmd.parm2 = tp->offload; + xp_cmd.parm3 = tp->offload; + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) + goto error_out; + + typhoon_set_rx_mode(dev); + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE); + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) + goto error_out; + + INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE); + err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + if(err < 0) + goto error_out; + + tp->card_state = Running; + smp_wmb(); + + iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE); + iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK); + typhoon_post_pci_writes(ioaddr); + + return 0; + +error_out: + typhoon_reset(ioaddr, WaitNoSleep); + typhoon_free_rx_rings(tp); + typhoon_init_rings(tp); + return err; +} + +static int +typhoon_stop_runtime(struct typhoon *tp, int wait_type) +{ + struct typhoon_indexes *indexes = tp->indexes; + struct transmit_ring *txLo = &tp->txLoRing; + void __iomem *ioaddr = tp->ioaddr; + struct cmd_desc xp_cmd; + int i; + + /* Disable interrupts early, since we can't schedule a poll + * when called with !netif_running(). This will be posted + * when we force the posting of the command. + */ + iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE); + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE); + typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + + /* Wait 1/2 sec for any outstanding transmits to occur + * We'll cleanup after the reset if this times out. + */ + for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) { + if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite)) + break; + udelay(TYPHOON_UDELAY); + } + + if(i == TYPHOON_WAIT_TIMEOUT) + netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n"); + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE); + typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + + /* save the statistics so when we bring the interface up again, + * the values reported to userspace are correct. + */ + tp->card_state = Sleeping; + smp_wmb(); + typhoon_do_get_stats(tp); + memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats)); + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT); + typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); + + if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0) + netdev_err(tp->dev, "timed out waiting for 3XP to halt\n"); + + if(typhoon_reset(ioaddr, wait_type) < 0) { + netdev_err(tp->dev, "unable to reset 3XP\n"); + return -ETIMEDOUT; + } + + /* cleanup any outstanding Tx packets */ + if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) { + indexes->txLoCleared = cpu_to_le32(txLo->lastWrite); + typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared); + } + + return 0; +} + +static void +typhoon_tx_timeout(struct net_device *dev) +{ + struct typhoon *tp = netdev_priv(dev); + + if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) { + netdev_warn(dev, "could not reset in tx timeout\n"); + goto truly_dead; + } + + /* If we ever start using the Hi ring, it will need cleaning too */ + typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared); + typhoon_free_rx_rings(tp); + + if(typhoon_start_runtime(tp) < 0) { + netdev_err(dev, "could not start runtime in tx timeout\n"); + goto truly_dead; + } + + netif_wake_queue(dev); + return; + +truly_dead: + /* Reset the hardware, and turn off carrier to avoid more timeouts */ + typhoon_reset(tp->ioaddr, NoWait); + netif_carrier_off(dev); +} + +static int +typhoon_open(struct net_device *dev) +{ + struct typhoon *tp = netdev_priv(dev); + int err; + + err = typhoon_request_firmware(tp); + if (err) + goto out; + + err = typhoon_wakeup(tp, WaitSleep); + if(err < 0) { + netdev_err(dev, "unable to wakeup device\n"); + goto out_sleep; + } + + err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED, + dev->name, dev); + if(err < 0) + goto out_sleep; + + napi_enable(&tp->napi); + + err = typhoon_start_runtime(tp); + if(err < 0) { + napi_disable(&tp->napi); + goto out_irq; + } + + netif_start_queue(dev); + return 0; + +out_irq: + free_irq(dev->irq, dev); + +out_sleep: + if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { + netdev_err(dev, "unable to reboot into sleep img\n"); + typhoon_reset(tp->ioaddr, NoWait); + goto out; + } + + if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) + netdev_err(dev, "unable to go back to sleep\n"); + +out: + return err; +} + +static int +typhoon_close(struct net_device *dev) +{ + struct typhoon *tp = netdev_priv(dev); + + netif_stop_queue(dev); + napi_disable(&tp->napi); + + if(typhoon_stop_runtime(tp, WaitSleep) < 0) + netdev_err(dev, "unable to stop runtime\n"); + + /* Make sure there is no irq handler running on a different CPU. */ + free_irq(dev->irq, dev); + + typhoon_free_rx_rings(tp); + typhoon_init_rings(tp); + + if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) + netdev_err(dev, "unable to boot sleep image\n"); + + if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) + netdev_err(dev, "unable to put card to sleep\n"); + + return 0; +} + +#ifdef CONFIG_PM +static int +typhoon_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct typhoon *tp = netdev_priv(dev); + + /* If we're down, resume when we are upped. + */ + if(!netif_running(dev)) + return 0; + + if(typhoon_wakeup(tp, WaitNoSleep) < 0) { + netdev_err(dev, "critical: could not wake up in resume\n"); + goto reset; + } + + if(typhoon_start_runtime(tp) < 0) { + netdev_err(dev, "critical: could not start runtime in resume\n"); + goto reset; + } + + netif_device_attach(dev); + return 0; + +reset: + typhoon_reset(tp->ioaddr, NoWait); + return -EBUSY; +} + +static int +typhoon_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct typhoon *tp = netdev_priv(dev); + struct cmd_desc xp_cmd; + + /* If we're down, we're already suspended. + */ + if(!netif_running(dev)) + return 0; + + /* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */ + if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) + netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n"); + + netif_device_detach(dev); + + if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) { + netdev_err(dev, "unable to stop runtime\n"); + goto need_resume; + } + + typhoon_free_rx_rings(tp); + typhoon_init_rings(tp); + + if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { + netdev_err(dev, "unable to boot sleep image\n"); + goto need_resume; + } + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS); + xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0])); + xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2])); + if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) { + netdev_err(dev, "unable to set mac address in suspend\n"); + goto need_resume; + } + + INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER); + xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST; + if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) { + netdev_err(dev, "unable to set rx filter in suspend\n"); + goto need_resume; + } + + if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) { + netdev_err(dev, "unable to put card to sleep\n"); + goto need_resume; + } + + return 0; + +need_resume: + typhoon_resume(pdev); + return -EBUSY; +} +#endif + +static int +typhoon_test_mmio(struct pci_dev *pdev) +{ + void __iomem *ioaddr = pci_iomap(pdev, 1, 128); + int mode = 0; + u32 val; + + if(!ioaddr) + goto out; + + if(ioread32(ioaddr + TYPHOON_REG_STATUS) != + TYPHOON_STATUS_WAITING_FOR_HOST) + goto out_unmap; + + iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); + iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS); + iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE); + + /* Ok, see if we can change our interrupt status register by + * sending ourselves an interrupt. If so, then MMIO works. + * The 50usec delay is arbitrary -- it could probably be smaller. + */ + val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); + if((val & TYPHOON_INTR_SELF) == 0) { + iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT); + ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); + udelay(50); + val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); + if(val & TYPHOON_INTR_SELF) + mode = 1; + } + + iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); + iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS); + iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE); + ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); + +out_unmap: + pci_iounmap(pdev, ioaddr); + +out: + if(!mode) + pr_info("%s: falling back to port IO\n", pci_name(pdev)); + return mode; +} + +static const struct net_device_ops typhoon_netdev_ops = { + .ndo_open = typhoon_open, + .ndo_stop = typhoon_close, + .ndo_start_xmit = typhoon_start_tx, + .ndo_set_rx_mode = typhoon_set_rx_mode, + .ndo_tx_timeout = typhoon_tx_timeout, + .ndo_get_stats = typhoon_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static int +typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *dev; + struct typhoon *tp; + int card_id = (int) ent->driver_data; + void __iomem *ioaddr; + void *shared; + dma_addr_t shared_dma; + struct cmd_desc xp_cmd; + struct resp_desc xp_resp[3]; + int err = 0; + const char *err_msg; + + dev = alloc_etherdev(sizeof(*tp)); + if(dev == NULL) { + err_msg = "unable to alloc new net device"; + err = -ENOMEM; + goto error_out; + } + SET_NETDEV_DEV(dev, &pdev->dev); + + err = pci_enable_device(pdev); + if(err < 0) { + err_msg = "unable to enable device"; + goto error_out_dev; + } + + err = pci_set_mwi(pdev); + if(err < 0) { + err_msg = "unable to set MWI"; + goto error_out_disable; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if(err < 0) { + err_msg = "No usable DMA configuration"; + goto error_out_mwi; + } + + /* sanity checks on IO and MMIO BARs + */ + if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) { + err_msg = "region #1 not a PCI IO resource, aborting"; + err = -ENODEV; + goto error_out_mwi; + } + if(pci_resource_len(pdev, 0) < 128) { + err_msg = "Invalid PCI IO region size, aborting"; + err = -ENODEV; + goto error_out_mwi; + } + if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { + err_msg = "region #1 not a PCI MMIO resource, aborting"; + err = -ENODEV; + goto error_out_mwi; + } + if(pci_resource_len(pdev, 1) < 128) { + err_msg = "Invalid PCI MMIO region size, aborting"; + err = -ENODEV; + goto error_out_mwi; + } + + err = pci_request_regions(pdev, KBUILD_MODNAME); + if(err < 0) { + err_msg = "could not request regions"; + goto error_out_mwi; + } + + /* map our registers + */ + if(use_mmio != 0 && use_mmio != 1) + use_mmio = typhoon_test_mmio(pdev); + + ioaddr = pci_iomap(pdev, use_mmio, 128); + if (!ioaddr) { + err_msg = "cannot remap registers, aborting"; + err = -EIO; + goto error_out_regions; + } + + /* allocate pci dma space for rx and tx descriptor rings + */ + shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared), + &shared_dma); + if(!shared) { + err_msg = "could not allocate DMA memory"; + err = -ENOMEM; + goto error_out_remap; + } + + dev->irq = pdev->irq; + tp = netdev_priv(dev); + tp->shared = shared; + tp->shared_dma = shared_dma; + tp->pdev = pdev; + tp->tx_pdev = pdev; + tp->ioaddr = ioaddr; + tp->tx_ioaddr = ioaddr; + tp->dev = dev; + + /* Init sequence: + * 1) Reset the adapter to clear any bad juju + * 2) Reload the sleep image + * 3) Boot the sleep image + * 4) Get the hardware address. + * 5) Put the card to sleep. + */ + if (typhoon_reset(ioaddr, WaitSleep) < 0) { + err_msg = "could not reset 3XP"; + err = -EIO; + goto error_out_dma; + } + + /* Now that we've reset the 3XP and are sure it's not going to + * write all over memory, enable bus mastering, and save our + * state for resuming after a suspend. + */ + pci_set_master(pdev); + pci_save_state(pdev); + + typhoon_init_interface(tp); + typhoon_init_rings(tp); + + if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { + err_msg = "cannot boot 3XP sleep image"; + err = -EIO; + goto error_out_reset; + } + + INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS); + if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) { + err_msg = "cannot read MAC address"; + err = -EIO; + goto error_out_reset; + } + + *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1)); + *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2)); + + if(!is_valid_ether_addr(dev->dev_addr)) { + err_msg = "Could not obtain valid ethernet address, aborting"; + goto error_out_reset; + } + + /* Read the Sleep Image version last, so the response is valid + * later when we print out the version reported. + */ + INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS); + if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) { + err_msg = "Could not get Sleep Image version"; + goto error_out_reset; + } + + tp->capabilities = typhoon_card_info[card_id].capabilities; + tp->xcvr_select = TYPHOON_XCVR_AUTONEG; + + /* Typhoon 1.0 Sleep Images return one response descriptor to the + * READ_VERSIONS command. Those versions are OK after waking up + * from sleep without needing a reset. Typhoon 1.1+ Sleep Images + * seem to need a little extra help to get started. Since we don't + * know how to nudge it along, just kick it. + */ + if(xp_resp[0].numDesc != 0) + tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET; + + if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) { + err_msg = "cannot put adapter to sleep"; + err = -EIO; + goto error_out_reset; + } + + /* The chip-specific entries in the device structure. */ + dev->netdev_ops = &typhoon_netdev_ops; + netif_napi_add(dev, &tp->napi, typhoon_poll, 16); + dev->watchdog_timeo = TX_TIMEOUT; + + dev->ethtool_ops = &typhoon_ethtool_ops; + + /* We can handle scatter gather, up to 16 entries, and + * we can do IP checksumming (only version 4, doh...) + * + * There's no way to turn off the RX VLAN offloading and stripping + * on the current 3XP firmware -- it does not respect the offload + * settings -- so we only allow the user to toggle the TX processing. + */ + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_HW_VLAN_CTAG_TX; + dev->features = dev->hw_features | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM; + + if(register_netdev(dev) < 0) { + err_msg = "unable to register netdev"; + goto error_out_reset; + } + + pci_set_drvdata(pdev, dev); + + netdev_info(dev, "%s at %s 0x%llx, %pM\n", + typhoon_card_info[card_id].name, + use_mmio ? "MMIO" : "IO", + (unsigned long long)pci_resource_start(pdev, use_mmio), + dev->dev_addr); + + /* xp_resp still contains the response to the READ_VERSIONS command. + * For debugging, let the user know what version he has. + */ + if(xp_resp[0].numDesc == 0) { + /* This is the Typhoon 1.0 type Sleep Image, last 16 bits + * of version is Month/Day of build. + */ + u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff; + netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n", + monthday >> 8, monthday & 0xff); + } else if(xp_resp[0].numDesc == 2) { + /* This is the Typhoon 1.1+ type Sleep Image + */ + u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2); + u8 *ver_string = (u8 *) &xp_resp[1]; + ver_string[25] = 0; + netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n", + sleep_ver >> 24, (sleep_ver >> 12) & 0xfff, + sleep_ver & 0xfff, ver_string); + } else { + netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n", + xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2)); + } + + return 0; + +error_out_reset: + typhoon_reset(ioaddr, NoWait); + +error_out_dma: + pci_free_consistent(pdev, sizeof(struct typhoon_shared), + shared, shared_dma); +error_out_remap: + pci_iounmap(pdev, ioaddr); +error_out_regions: + pci_release_regions(pdev); +error_out_mwi: + pci_clear_mwi(pdev); +error_out_disable: + pci_disable_device(pdev); +error_out_dev: + free_netdev(dev); +error_out: + pr_err("%s: %s\n", pci_name(pdev), err_msg); + return err; +} + +static void +typhoon_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct typhoon *tp = netdev_priv(dev); + + unregister_netdev(dev); + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + typhoon_reset(tp->ioaddr, NoWait); + pci_iounmap(pdev, tp->ioaddr); + pci_free_consistent(pdev, sizeof(struct typhoon_shared), + tp->shared, tp->shared_dma); + pci_release_regions(pdev); + pci_clear_mwi(pdev); + pci_disable_device(pdev); + free_netdev(dev); +} + +static struct pci_driver typhoon_driver = { + .name = KBUILD_MODNAME, + .id_table = typhoon_pci_tbl, + .probe = typhoon_init_one, + .remove = typhoon_remove_one, +#ifdef CONFIG_PM + .suspend = typhoon_suspend, + .resume = typhoon_resume, +#endif +}; + +static int __init +typhoon_init(void) +{ + return pci_register_driver(&typhoon_driver); +} + +static void __exit +typhoon_cleanup(void) +{ + release_firmware(typhoon_fw); + pci_unregister_driver(&typhoon_driver); +} + +module_init(typhoon_init); +module_exit(typhoon_cleanup); diff --git a/drivers/net/ethernet/3com/typhoon.h b/drivers/net/ethernet/3com/typhoon.h new file mode 100644 index 000000000..88187fc84 --- /dev/null +++ b/drivers/net/ethernet/3com/typhoon.h @@ -0,0 +1,624 @@ +/* typhoon.h: chip info for the 3Com 3CR990 family of controllers */ +/* + Written 2002-2003 by David Dillow + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. + + This software is available on a public web site. It may enable + cryptographic capabilities of the 3Com hardware, and may be + exported from the United States under License Exception "TSU" + pursuant to 15 C.F.R. Section 740.13(e). + + This work was funded by the National Library of Medicine under + the Department of Energy project number 0274DD06D1 and NLM project + number Y1-LM-2015-01. +*/ + +/* All Typhoon ring positions are specificed in bytes, and point to the + * first "clean" entry in the ring -- ie the next entry we use for whatever + * purpose. + */ + +/* The Typhoon basic ring + * ringBase: where this ring lives (our virtual address) + * lastWrite: the next entry we'll use + */ +struct basic_ring { + u8 *ringBase; + u32 lastWrite; +}; + +/* The Typoon transmit ring -- same as a basic ring, plus: + * lastRead: where we're at in regard to cleaning up the ring + * writeRegister: register to use for writing (different for Hi & Lo rings) + */ +struct transmit_ring { + u8 *ringBase; + u32 lastWrite; + u32 lastRead; + int writeRegister; +}; + +/* The host<->Typhoon ring index structure + * This indicates the current positions in the rings + * + * All values must be in little endian format for the 3XP + * + * rxHiCleared: entry we've cleared to in the Hi receive ring + * rxLoCleared: entry we've cleared to in the Lo receive ring + * rxBuffReady: next entry we'll put a free buffer in + * respCleared: entry we've cleared to in the response ring + * + * txLoCleared: entry the NIC has cleared to in the Lo transmit ring + * txHiCleared: entry the NIC has cleared to in the Hi transmit ring + * rxLoReady: entry the NIC has filled to in the Lo receive ring + * rxBuffCleared: entry the NIC has cleared in the free buffer ring + * cmdCleared: entry the NIC has cleared in the command ring + * respReady: entry the NIC has filled to in the response ring + * rxHiReady: entry the NIC has filled to in the Hi receive ring + */ +struct typhoon_indexes { + /* The first four are written by the host, and read by the NIC */ + volatile __le32 rxHiCleared; + volatile __le32 rxLoCleared; + volatile __le32 rxBuffReady; + volatile __le32 respCleared; + + /* The remaining are written by the NIC, and read by the host */ + volatile __le32 txLoCleared; + volatile __le32 txHiCleared; + volatile __le32 rxLoReady; + volatile __le32 rxBuffCleared; + volatile __le32 cmdCleared; + volatile __le32 respReady; + volatile __le32 rxHiReady; +} __packed; + +/* The host<->Typhoon interface + * Our means of communicating where things are + * + * All values must be in little endian format for the 3XP + * + * ringIndex: 64 bit bus address of the index structure + * txLoAddr: 64 bit bus address of the Lo transmit ring + * txLoSize: size (in bytes) of the Lo transmit ring + * txHi*: as above for the Hi priority transmit ring + * rxLo*: as above for the Lo priority receive ring + * rxBuff*: as above for the free buffer ring + * cmd*: as above for the command ring + * resp*: as above for the response ring + * zeroAddr: 64 bit bus address of a zero word (for DMA) + * rxHi*: as above for the Hi Priority receive ring + * + * While there is room for 64 bit addresses, current versions of the 3XP + * only do 32 bit addresses, so the *Hi for each of the above will always + * be zero. + */ +struct typhoon_interface { + __le32 ringIndex; + __le32 ringIndexHi; + __le32 txLoAddr; + __le32 txLoAddrHi; + __le32 txLoSize; + __le32 txHiAddr; + __le32 txHiAddrHi; + __le32 txHiSize; + __le32 rxLoAddr; + __le32 rxLoAddrHi; + __le32 rxLoSize; + __le32 rxBuffAddr; + __le32 rxBuffAddrHi; + __le32 rxBuffSize; + __le32 cmdAddr; + __le32 cmdAddrHi; + __le32 cmdSize; + __le32 respAddr; + __le32 respAddrHi; + __le32 respSize; + __le32 zeroAddr; + __le32 zeroAddrHi; + __le32 rxHiAddr; + __le32 rxHiAddrHi; + __le32 rxHiSize; +} __packed; + +/* The Typhoon transmit/fragment descriptor + * + * A packet is described by a packet descriptor, followed by option descriptors, + * if any, then one or more fragment descriptors. + * + * Packet descriptor: + * flags: Descriptor type + * len:i zero, or length of this packet + * addr*: 8 bytes of opaque data to the firmware -- for skb pointer + * processFlags: Determine offload tasks to perform on this packet. + * + * Fragment descriptor: + * flags: Descriptor type + * len:i length of this fragment + * addr: low bytes of DMA address for this part of the packet + * addrHi: hi bytes of DMA address for this part of the packet + * processFlags: must be zero + * + * TYPHOON_DESC_VALID is not mentioned in their docs, but their Linux + * driver uses it. + */ +struct tx_desc { + u8 flags; +#define TYPHOON_TYPE_MASK 0x07 +#define TYPHOON_FRAG_DESC 0x00 +#define TYPHOON_TX_DESC 0x01 +#define TYPHOON_CMD_DESC 0x02 +#define TYPHOON_OPT_DESC 0x03 +#define TYPHOON_RX_DESC 0x04 +#define TYPHOON_RESP_DESC 0x05 +#define TYPHOON_OPT_TYPE_MASK 0xf0 +#define TYPHOON_OPT_IPSEC 0x00 +#define TYPHOON_OPT_TCP_SEG 0x10 +#define TYPHOON_CMD_RESPOND 0x40 +#define TYPHOON_RESP_ERROR 0x40 +#define TYPHOON_RX_ERROR 0x40 +#define TYPHOON_DESC_VALID 0x80 + u8 numDesc; + __le16 len; + union { + struct { + __le32 addr; + __le32 addrHi; + } frag; + u64 tx_addr; /* opaque for hardware, for TX_DESC */ + }; + __le32 processFlags; +#define TYPHOON_TX_PF_NO_CRC cpu_to_le32(0x00000001) +#define TYPHOON_TX_PF_IP_CHKSUM cpu_to_le32(0x00000002) +#define TYPHOON_TX_PF_TCP_CHKSUM cpu_to_le32(0x00000004) +#define TYPHOON_TX_PF_TCP_SEGMENT cpu_to_le32(0x00000008) +#define TYPHOON_TX_PF_INSERT_VLAN cpu_to_le32(0x00000010) +#define TYPHOON_TX_PF_IPSEC cpu_to_le32(0x00000020) +#define TYPHOON_TX_PF_VLAN_PRIORITY cpu_to_le32(0x00000040) +#define TYPHOON_TX_PF_UDP_CHKSUM cpu_to_le32(0x00000080) +#define TYPHOON_TX_PF_PAD_FRAME cpu_to_le32(0x00000100) +#define TYPHOON_TX_PF_RESERVED cpu_to_le32(0x00000e00) +#define TYPHOON_TX_PF_VLAN_MASK cpu_to_le32(0x0ffff000) +#define TYPHOON_TX_PF_INTERNAL cpu_to_le32(0xf0000000) +#define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12 +} __packed; + +/* The TCP Segmentation offload option descriptor + * + * flags: descriptor type + * numDesc: must be 1 + * mss_flags: bits 0-11 (little endian) are MSS, 12 is first TSO descriptor + * 13 is list TSO descriptor, set both if only one TSO + * respAddrLo: low bytes of address of the bytesTx field of this descriptor + * bytesTx: total number of bytes in this TSO request + * status: 0 on completion + */ +struct tcpopt_desc { + u8 flags; + u8 numDesc; + __le16 mss_flags; +#define TYPHOON_TSO_FIRST cpu_to_le16(0x1000) +#define TYPHOON_TSO_LAST cpu_to_le16(0x2000) + __le32 respAddrLo; + __le32 bytesTx; + __le32 status; +} __packed; + +/* The IPSEC Offload descriptor + * + * flags: descriptor type + * numDesc: must be 1 + * ipsecFlags: bit 0: 0 -- generate IV, 1 -- use supplied IV + * sa1, sa2: Security Association IDs for this packet + * reserved: set to 0 + */ +struct ipsec_desc { + u8 flags; + u8 numDesc; + __le16 ipsecFlags; +#define TYPHOON_IPSEC_GEN_IV cpu_to_le16(0x0000) +#define TYPHOON_IPSEC_USE_IV cpu_to_le16(0x0001) + __le32 sa1; + __le32 sa2; + __le32 reserved; +} __packed; + +/* The Typhoon receive descriptor (Updated by NIC) + * + * flags: Descriptor type, error indication + * numDesc: Always zero + * frameLen: the size of the packet received + * addr: low 32 bytes of the virtual addr passed in for this buffer + * addrHi: high 32 bytes of the virtual addr passed in for this buffer + * rxStatus: Error if set in flags, otherwise result of offload processing + * filterResults: results of filtering on packet, not used + * ipsecResults: Results of IPSEC processing + * vlanTag: the 801.2q TCI from the packet + */ +struct rx_desc { + u8 flags; + u8 numDesc; + __le16 frameLen; + u32 addr; /* opaque, comes from virtAddr */ + u32 addrHi; /* opaque, comes from virtAddrHi */ + __le32 rxStatus; +#define TYPHOON_RX_ERR_INTERNAL cpu_to_le32(0x00000000) +#define TYPHOON_RX_ERR_FIFO_UNDERRUN cpu_to_le32(0x00000001) +#define TYPHOON_RX_ERR_BAD_SSD cpu_to_le32(0x00000002) +#define TYPHOON_RX_ERR_RUNT cpu_to_le32(0x00000003) +#define TYPHOON_RX_ERR_CRC cpu_to_le32(0x00000004) +#define TYPHOON_RX_ERR_OVERSIZE cpu_to_le32(0x00000005) +#define TYPHOON_RX_ERR_ALIGN cpu_to_le32(0x00000006) +#define TYPHOON_RX_ERR_DRIBBLE cpu_to_le32(0x00000007) +#define TYPHOON_RX_PROTO_MASK cpu_to_le32(0x00000003) +#define TYPHOON_RX_PROTO_UNKNOWN cpu_to_le32(0x00000000) +#define TYPHOON_RX_PROTO_IP cpu_to_le32(0x00000001) +#define TYPHOON_RX_PROTO_IPX cpu_to_le32(0x00000002) +#define TYPHOON_RX_VLAN cpu_to_le32(0x00000004) +#define TYPHOON_RX_IP_FRAG cpu_to_le32(0x00000008) +#define TYPHOON_RX_IPSEC cpu_to_le32(0x00000010) +#define TYPHOON_RX_IP_CHK_FAIL cpu_to_le32(0x00000020) +#define TYPHOON_RX_TCP_CHK_FAIL cpu_to_le32(0x00000040) +#define TYPHOON_RX_UDP_CHK_FAIL cpu_to_le32(0x00000080) +#define TYPHOON_RX_IP_CHK_GOOD cpu_to_le32(0x00000100) +#define TYPHOON_RX_TCP_CHK_GOOD cpu_to_le32(0x00000200) +#define TYPHOON_RX_UDP_CHK_GOOD cpu_to_le32(0x00000400) + __le16 filterResults; +#define TYPHOON_RX_FILTER_MASK cpu_to_le16(0x7fff) +#define TYPHOON_RX_FILTERED cpu_to_le16(0x8000) + __le16 ipsecResults; +#define TYPHOON_RX_OUTER_AH_GOOD cpu_to_le16(0x0001) +#define TYPHOON_RX_OUTER_ESP_GOOD cpu_to_le16(0x0002) +#define TYPHOON_RX_INNER_AH_GOOD cpu_to_le16(0x0004) +#define TYPHOON_RX_INNER_ESP_GOOD cpu_to_le16(0x0008) +#define TYPHOON_RX_OUTER_AH_FAIL cpu_to_le16(0x0010) +#define TYPHOON_RX_OUTER_ESP_FAIL cpu_to_le16(0x0020) +#define TYPHOON_RX_INNER_AH_FAIL cpu_to_le16(0x0040) +#define TYPHOON_RX_INNER_ESP_FAIL cpu_to_le16(0x0080) +#define TYPHOON_RX_UNKNOWN_SA cpu_to_le16(0x0100) +#define TYPHOON_RX_ESP_FORMAT_ERR cpu_to_le16(0x0200) + __be32 vlanTag; +} __packed; + +/* The Typhoon free buffer descriptor, used to give a buffer to the NIC + * + * physAddr: low 32 bits of the bus address of the buffer + * physAddrHi: high 32 bits of the bus address of the buffer, always zero + * virtAddr: low 32 bits of the skb address + * virtAddrHi: high 32 bits of the skb address, always zero + * + * the virt* address is basically two 32 bit cookies, just passed back + * from the NIC + */ +struct rx_free { + __le32 physAddr; + __le32 physAddrHi; + u32 virtAddr; + u32 virtAddrHi; +} __packed; + +/* The Typhoon command descriptor, used for commands and responses + * + * flags: descriptor type + * numDesc: number of descriptors following in this command/response, + * ie, zero for a one descriptor command + * cmd: the command + * seqNo: sequence number (unused) + * parm1: use varies by command + * parm2: use varies by command + * parm3: use varies by command + */ +struct cmd_desc { + u8 flags; + u8 numDesc; + __le16 cmd; +#define TYPHOON_CMD_TX_ENABLE cpu_to_le16(0x0001) +#define TYPHOON_CMD_TX_DISABLE cpu_to_le16(0x0002) +#define TYPHOON_CMD_RX_ENABLE cpu_to_le16(0x0003) +#define TYPHOON_CMD_RX_DISABLE cpu_to_le16(0x0004) +#define TYPHOON_CMD_SET_RX_FILTER cpu_to_le16(0x0005) +#define TYPHOON_CMD_READ_STATS cpu_to_le16(0x0007) +#define TYPHOON_CMD_XCVR_SELECT cpu_to_le16(0x0013) +#define TYPHOON_CMD_SET_MAX_PKT_SIZE cpu_to_le16(0x001a) +#define TYPHOON_CMD_READ_MEDIA_STATUS cpu_to_le16(0x001b) +#define TYPHOON_CMD_GOTO_SLEEP cpu_to_le16(0x0023) +#define TYPHOON_CMD_SET_MULTICAST_HASH cpu_to_le16(0x0025) +#define TYPHOON_CMD_SET_MAC_ADDRESS cpu_to_le16(0x0026) +#define TYPHOON_CMD_READ_MAC_ADDRESS cpu_to_le16(0x0027) +#define TYPHOON_CMD_VLAN_TYPE_WRITE cpu_to_le16(0x002b) +#define TYPHOON_CMD_CREATE_SA cpu_to_le16(0x0034) +#define TYPHOON_CMD_DELETE_SA cpu_to_le16(0x0035) +#define TYPHOON_CMD_READ_VERSIONS cpu_to_le16(0x0043) +#define TYPHOON_CMD_IRQ_COALESCE_CTRL cpu_to_le16(0x0045) +#define TYPHOON_CMD_ENABLE_WAKE_EVENTS cpu_to_le16(0x0049) +#define TYPHOON_CMD_SET_OFFLOAD_TASKS cpu_to_le16(0x004f) +#define TYPHOON_CMD_HELLO_RESP cpu_to_le16(0x0057) +#define TYPHOON_CMD_HALT cpu_to_le16(0x005d) +#define TYPHOON_CMD_READ_IPSEC_INFO cpu_to_le16(0x005e) +#define TYPHOON_CMD_GET_IPSEC_ENABLE cpu_to_le16(0x0067) +#define TYPHOON_CMD_GET_CMD_LVL cpu_to_le16(0x0069) + u16 seqNo; + __le16 parm1; + __le32 parm2; + __le32 parm3; +} __packed; + +/* The Typhoon response descriptor, see command descriptor for details + */ +struct resp_desc { + u8 flags; + u8 numDesc; + __le16 cmd; + __le16 seqNo; + __le16 parm1; + __le32 parm2; + __le32 parm3; +} __packed; + +#define INIT_COMMAND_NO_RESPONSE(x, command) \ + do { struct cmd_desc *_ptr = (x); \ + memset(_ptr, 0, sizeof(struct cmd_desc)); \ + _ptr->flags = TYPHOON_CMD_DESC | TYPHOON_DESC_VALID; \ + _ptr->cmd = command; \ + } while(0) + +/* We set seqNo to 1 if we're expecting a response from this command */ +#define INIT_COMMAND_WITH_RESPONSE(x, command) \ + do { struct cmd_desc *_ptr = (x); \ + memset(_ptr, 0, sizeof(struct cmd_desc)); \ + _ptr->flags = TYPHOON_CMD_RESPOND | TYPHOON_CMD_DESC; \ + _ptr->flags |= TYPHOON_DESC_VALID; \ + _ptr->cmd = command; \ + _ptr->seqNo = 1; \ + } while(0) + +/* TYPHOON_CMD_SET_RX_FILTER filter bits (cmd.parm1) + */ +#define TYPHOON_RX_FILTER_DIRECTED cpu_to_le16(0x0001) +#define TYPHOON_RX_FILTER_ALL_MCAST cpu_to_le16(0x0002) +#define TYPHOON_RX_FILTER_BROADCAST cpu_to_le16(0x0004) +#define TYPHOON_RX_FILTER_PROMISCOUS cpu_to_le16(0x0008) +#define TYPHOON_RX_FILTER_MCAST_HASH cpu_to_le16(0x0010) + +/* TYPHOON_CMD_READ_STATS response format + */ +struct stats_resp { + u8 flags; + u8 numDesc; + __le16 cmd; + __le16 seqNo; + __le16 unused; + __le32 txPackets; + __le64 txBytes; + __le32 txDeferred; + __le32 txLateCollisions; + __le32 txCollisions; + __le32 txCarrierLost; + __le32 txMultipleCollisions; + __le32 txExcessiveCollisions; + __le32 txFifoUnderruns; + __le32 txMulticastTxOverflows; + __le32 txFiltered; + __le32 rxPacketsGood; + __le64 rxBytesGood; + __le32 rxFifoOverruns; + __le32 BadSSD; + __le32 rxCrcErrors; + __le32 rxOversized; + __le32 rxBroadcast; + __le32 rxMulticast; + __le32 rxOverflow; + __le32 rxFiltered; + __le32 linkStatus; +#define TYPHOON_LINK_STAT_MASK cpu_to_le32(0x00000001) +#define TYPHOON_LINK_GOOD cpu_to_le32(0x00000001) +#define TYPHOON_LINK_BAD cpu_to_le32(0x00000000) +#define TYPHOON_LINK_SPEED_MASK cpu_to_le32(0x00000002) +#define TYPHOON_LINK_100MBPS cpu_to_le32(0x00000002) +#define TYPHOON_LINK_10MBPS cpu_to_le32(0x00000000) +#define TYPHOON_LINK_DUPLEX_MASK cpu_to_le32(0x00000004) +#define TYPHOON_LINK_FULL_DUPLEX cpu_to_le32(0x00000004) +#define TYPHOON_LINK_HALF_DUPLEX cpu_to_le32(0x00000000) + __le32 unused2; + __le32 unused3; +} __packed; + +/* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1) + */ +#define TYPHOON_XCVR_10HALF cpu_to_le16(0x0000) +#define TYPHOON_XCVR_10FULL cpu_to_le16(0x0001) +#define TYPHOON_XCVR_100HALF cpu_to_le16(0x0002) +#define TYPHOON_XCVR_100FULL cpu_to_le16(0x0003) +#define TYPHOON_XCVR_AUTONEG cpu_to_le16(0x0004) + +/* TYPHOON_CMD_READ_MEDIA_STATUS (resp.parm1) + */ +#define TYPHOON_MEDIA_STAT_CRC_STRIP_DISABLE cpu_to_le16(0x0004) +#define TYPHOON_MEDIA_STAT_COLLISION_DETECT cpu_to_le16(0x0010) +#define TYPHOON_MEDIA_STAT_CARRIER_SENSE cpu_to_le16(0x0020) +#define TYPHOON_MEDIA_STAT_POLARITY_REV cpu_to_le16(0x0400) +#define TYPHOON_MEDIA_STAT_NO_LINK cpu_to_le16(0x0800) + +/* TYPHOON_CMD_SET_MULTICAST_HASH enable values (cmd.parm1) + */ +#define TYPHOON_MCAST_HASH_DISABLE cpu_to_le16(0x0000) +#define TYPHOON_MCAST_HASH_ENABLE cpu_to_le16(0x0001) +#define TYPHOON_MCAST_HASH_SET cpu_to_le16(0x0002) + +/* TYPHOON_CMD_CREATE_SA descriptor and settings + */ +struct sa_descriptor { + u8 flags; + u8 numDesc; + u16 cmd; + u16 seqNo; + u16 mode; +#define TYPHOON_SA_MODE_NULL cpu_to_le16(0x0000) +#define TYPHOON_SA_MODE_AH cpu_to_le16(0x0001) +#define TYPHOON_SA_MODE_ESP cpu_to_le16(0x0002) + u8 hashFlags; +#define TYPHOON_SA_HASH_ENABLE 0x01 +#define TYPHOON_SA_HASH_SHA1 0x02 +#define TYPHOON_SA_HASH_MD5 0x04 + u8 direction; +#define TYPHOON_SA_DIR_RX 0x00 +#define TYPHOON_SA_DIR_TX 0x01 + u8 encryptionFlags; +#define TYPHOON_SA_ENCRYPT_ENABLE 0x01 +#define TYPHOON_SA_ENCRYPT_DES 0x02 +#define TYPHOON_SA_ENCRYPT_3DES 0x00 +#define TYPHOON_SA_ENCRYPT_3DES_2KEY 0x00 +#define TYPHOON_SA_ENCRYPT_3DES_3KEY 0x04 +#define TYPHOON_SA_ENCRYPT_CBC 0x08 +#define TYPHOON_SA_ENCRYPT_ECB 0x00 + u8 specifyIndex; +#define TYPHOON_SA_SPECIFY_INDEX 0x01 +#define TYPHOON_SA_GENERATE_INDEX 0x00 + u32 SPI; + u32 destAddr; + u32 destMask; + u8 integKey[20]; + u8 confKey[24]; + u32 index; + u32 unused; + u32 unused2; +} __packed; + +/* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx)) + * This is all for IPv4. + */ +#define TYPHOON_OFFLOAD_TCP_CHKSUM cpu_to_le32(0x00000002) +#define TYPHOON_OFFLOAD_UDP_CHKSUM cpu_to_le32(0x00000004) +#define TYPHOON_OFFLOAD_IP_CHKSUM cpu_to_le32(0x00000008) +#define TYPHOON_OFFLOAD_IPSEC cpu_to_le32(0x00000010) +#define TYPHOON_OFFLOAD_BCAST_THROTTLE cpu_to_le32(0x00000020) +#define TYPHOON_OFFLOAD_DHCP_PREVENT cpu_to_le32(0x00000040) +#define TYPHOON_OFFLOAD_VLAN cpu_to_le32(0x00000080) +#define TYPHOON_OFFLOAD_FILTERING cpu_to_le32(0x00000100) +#define TYPHOON_OFFLOAD_TCP_SEGMENT cpu_to_le32(0x00000200) + +/* TYPHOON_CMD_ENABLE_WAKE_EVENTS bits (cmd.parm1) + */ +#define TYPHOON_WAKE_MAGIC_PKT cpu_to_le16(0x01) +#define TYPHOON_WAKE_LINK_EVENT cpu_to_le16(0x02) +#define TYPHOON_WAKE_ICMP_ECHO cpu_to_le16(0x04) +#define TYPHOON_WAKE_ARP cpu_to_le16(0x08) + +/* These are used to load the firmware image on the NIC + */ +struct typhoon_file_header { + u8 tag[8]; + __le32 version; + __le32 numSections; + __le32 startAddr; + __le32 hmacDigest[5]; +} __packed; + +struct typhoon_section_header { + __le32 len; + u16 checksum; + u16 reserved; + __le32 startAddr; +} __packed; + +/* The Typhoon Register offsets + */ +#define TYPHOON_REG_SOFT_RESET 0x00 +#define TYPHOON_REG_INTR_STATUS 0x04 +#define TYPHOON_REG_INTR_ENABLE 0x08 +#define TYPHOON_REG_INTR_MASK 0x0c +#define TYPHOON_REG_SELF_INTERRUPT 0x10 +#define TYPHOON_REG_HOST2ARM7 0x14 +#define TYPHOON_REG_HOST2ARM6 0x18 +#define TYPHOON_REG_HOST2ARM5 0x1c +#define TYPHOON_REG_HOST2ARM4 0x20 +#define TYPHOON_REG_HOST2ARM3 0x24 +#define TYPHOON_REG_HOST2ARM2 0x28 +#define TYPHOON_REG_HOST2ARM1 0x2c +#define TYPHOON_REG_HOST2ARM0 0x30 +#define TYPHOON_REG_ARM2HOST3 0x34 +#define TYPHOON_REG_ARM2HOST2 0x38 +#define TYPHOON_REG_ARM2HOST1 0x3c +#define TYPHOON_REG_ARM2HOST0 0x40 + +#define TYPHOON_REG_BOOT_DATA_LO TYPHOON_REG_HOST2ARM5 +#define TYPHOON_REG_BOOT_DATA_HI TYPHOON_REG_HOST2ARM4 +#define TYPHOON_REG_BOOT_DEST_ADDR TYPHOON_REG_HOST2ARM3 +#define TYPHOON_REG_BOOT_CHECKSUM TYPHOON_REG_HOST2ARM2 +#define TYPHOON_REG_BOOT_LENGTH TYPHOON_REG_HOST2ARM1 + +#define TYPHOON_REG_DOWNLOAD_BOOT_ADDR TYPHOON_REG_HOST2ARM1 +#define TYPHOON_REG_DOWNLOAD_HMAC_0 TYPHOON_REG_HOST2ARM2 +#define TYPHOON_REG_DOWNLOAD_HMAC_1 TYPHOON_REG_HOST2ARM3 +#define TYPHOON_REG_DOWNLOAD_HMAC_2 TYPHOON_REG_HOST2ARM4 +#define TYPHOON_REG_DOWNLOAD_HMAC_3 TYPHOON_REG_HOST2ARM5 +#define TYPHOON_REG_DOWNLOAD_HMAC_4 TYPHOON_REG_HOST2ARM6 + +#define TYPHOON_REG_BOOT_RECORD_ADDR_HI TYPHOON_REG_HOST2ARM2 +#define TYPHOON_REG_BOOT_RECORD_ADDR_LO TYPHOON_REG_HOST2ARM1 + +#define TYPHOON_REG_TX_LO_READY TYPHOON_REG_HOST2ARM3 +#define TYPHOON_REG_CMD_READY TYPHOON_REG_HOST2ARM2 +#define TYPHOON_REG_TX_HI_READY TYPHOON_REG_HOST2ARM1 + +#define TYPHOON_REG_COMMAND TYPHOON_REG_HOST2ARM0 +#define TYPHOON_REG_HEARTBEAT TYPHOON_REG_ARM2HOST3 +#define TYPHOON_REG_STATUS TYPHOON_REG_ARM2HOST0 + +/* 3XP Reset values (TYPHOON_REG_SOFT_RESET) + */ +#define TYPHOON_RESET_ALL 0x7f +#define TYPHOON_RESET_NONE 0x00 + +/* 3XP irq bits (TYPHOON_REG_INTR{STATUS,ENABLE,MASK}) + * + * Some of these came from OpenBSD, as the 3Com docs have it wrong + * (INTR_SELF) or don't list it at all (INTR_*_ABORT) + * + * Enabling irqs on the Heartbeat reg (ArmToHost3) gets you an irq + * about every 8ms, so don't do it. + */ +#define TYPHOON_INTR_HOST_INT 0x00000001 +#define TYPHOON_INTR_ARM2HOST0 0x00000002 +#define TYPHOON_INTR_ARM2HOST1 0x00000004 +#define TYPHOON_INTR_ARM2HOST2 0x00000008 +#define TYPHOON_INTR_ARM2HOST3 0x00000010 +#define TYPHOON_INTR_DMA0 0x00000020 +#define TYPHOON_INTR_DMA1 0x00000040 +#define TYPHOON_INTR_DMA2 0x00000080 +#define TYPHOON_INTR_DMA3 0x00000100 +#define TYPHOON_INTR_MASTER_ABORT 0x00000200 +#define TYPHOON_INTR_TARGET_ABORT 0x00000400 +#define TYPHOON_INTR_SELF 0x00000800 +#define TYPHOON_INTR_RESERVED 0xfffff000 + +#define TYPHOON_INTR_BOOTCMD TYPHOON_INTR_ARM2HOST0 + +#define TYPHOON_INTR_ENABLE_ALL 0xffffffef +#define TYPHOON_INTR_ALL 0xffffffff +#define TYPHOON_INTR_NONE 0x00000000 + +/* The commands for the 3XP chip (TYPHOON_REG_COMMAND) + */ +#define TYPHOON_BOOTCMD_BOOT 0x00 +#define TYPHOON_BOOTCMD_WAKEUP 0xfa +#define TYPHOON_BOOTCMD_DNLD_COMPLETE 0xfb +#define TYPHOON_BOOTCMD_SEG_AVAILABLE 0xfc +#define TYPHOON_BOOTCMD_RUNTIME_IMAGE 0xfd +#define TYPHOON_BOOTCMD_REG_BOOT_RECORD 0xff + +/* 3XP Status values (TYPHOON_REG_STATUS) + */ +#define TYPHOON_STATUS_WAITING_FOR_BOOT 0x07 +#define TYPHOON_STATUS_SECOND_INIT 0x08 +#define TYPHOON_STATUS_RUNNING 0x09 +#define TYPHOON_STATUS_WAITING_FOR_HOST 0x0d +#define TYPHOON_STATUS_WAITING_FOR_SEGMENT 0x10 +#define TYPHOON_STATUS_SLEEPING 0x11 +#define TYPHOON_STATUS_HALTED 0x14 diff --git a/drivers/net/ethernet/8390/8390.c b/drivers/net/ethernet/8390/8390.c new file mode 100644 index 000000000..5db1f55ab --- /dev/null +++ b/drivers/net/ethernet/8390/8390.c @@ -0,0 +1,103 @@ +/* 8390 core for usual drivers */ + +static const char version[] = + "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; + +#include "lib8390.c" + +int ei_open(struct net_device *dev) +{ + return __ei_open(dev); +} +EXPORT_SYMBOL(ei_open); + +int ei_close(struct net_device *dev) +{ + return __ei_close(dev); +} +EXPORT_SYMBOL(ei_close); + +netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + return __ei_start_xmit(skb, dev); +} +EXPORT_SYMBOL(ei_start_xmit); + +struct net_device_stats *ei_get_stats(struct net_device *dev) +{ + return __ei_get_stats(dev); +} +EXPORT_SYMBOL(ei_get_stats); + +void ei_set_multicast_list(struct net_device *dev) +{ + __ei_set_multicast_list(dev); +} +EXPORT_SYMBOL(ei_set_multicast_list); + +void ei_tx_timeout(struct net_device *dev) +{ + __ei_tx_timeout(dev); +} +EXPORT_SYMBOL(ei_tx_timeout); + +irqreturn_t ei_interrupt(int irq, void *dev_id) +{ + return __ei_interrupt(irq, dev_id); +} +EXPORT_SYMBOL(ei_interrupt); + +#ifdef CONFIG_NET_POLL_CONTROLLER +void ei_poll(struct net_device *dev) +{ + __ei_poll(dev); +} +EXPORT_SYMBOL(ei_poll); +#endif + +const struct net_device_ops ei_netdev_ops = { + .ndo_open = ei_open, + .ndo_stop = ei_close, + .ndo_start_xmit = ei_start_xmit, + .ndo_tx_timeout = ei_tx_timeout, + .ndo_get_stats = ei_get_stats, + .ndo_set_rx_mode = ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ei_poll, +#endif +}; +EXPORT_SYMBOL(ei_netdev_ops); + +struct net_device *__alloc_ei_netdev(int size) +{ + struct net_device *dev = ____alloc_ei_netdev(size); + if (dev) + dev->netdev_ops = &ei_netdev_ops; + return dev; +} +EXPORT_SYMBOL(__alloc_ei_netdev); + +void NS8390_init(struct net_device *dev, int startp) +{ + __NS8390_init(dev, startp); +} +EXPORT_SYMBOL(NS8390_init); + +#if defined(MODULE) + +static int __init ns8390_module_init(void) +{ + return 0; +} + +static void __exit ns8390_module_exit(void) +{ +} + +module_init(ns8390_module_init); +module_exit(ns8390_module_exit); +#endif /* MODULE */ +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h new file mode 100644 index 000000000..3e2f2c2e7 --- /dev/null +++ b/drivers/net/ethernet/8390/8390.h @@ -0,0 +1,225 @@ +/* Generic NS8390 register definitions. */ +/* This file is part of Donald Becker's 8390 drivers, and is distributed + under the same license. Auto-loading of 8390.o only in v2.2 - Paul G. + Some of these names and comments originated from the Crynwr + packet drivers, which are distributed under the GPL. */ + +#ifndef _8390_h +#define _8390_h + +#include +#include +#include +#include + +#define TX_PAGES 12 /* Two Tx slots */ + +/* The 8390 specific per-packet-header format. */ +struct e8390_pkt_hdr { + unsigned char status; /* status */ + unsigned char next; /* pointer to next packet. */ + unsigned short count; /* header + packet length in bytes */ +}; + +#ifdef CONFIG_NET_POLL_CONTROLLER +void ei_poll(struct net_device *dev); +void eip_poll(struct net_device *dev); +#endif + + +/* Without I/O delay - non ISA or later chips */ +void NS8390_init(struct net_device *dev, int startp); +int ei_open(struct net_device *dev); +int ei_close(struct net_device *dev); +irqreturn_t ei_interrupt(int irq, void *dev_id); +void ei_tx_timeout(struct net_device *dev); +netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev); +void ei_set_multicast_list(struct net_device *dev); +struct net_device_stats *ei_get_stats(struct net_device *dev); + +extern const struct net_device_ops ei_netdev_ops; + +struct net_device *__alloc_ei_netdev(int size); +static inline struct net_device *alloc_ei_netdev(void) +{ + return __alloc_ei_netdev(0); +} + +/* With I/O delay form */ +void NS8390p_init(struct net_device *dev, int startp); +int eip_open(struct net_device *dev); +int eip_close(struct net_device *dev); +irqreturn_t eip_interrupt(int irq, void *dev_id); +void eip_tx_timeout(struct net_device *dev); +netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev); +void eip_set_multicast_list(struct net_device *dev); +struct net_device_stats *eip_get_stats(struct net_device *dev); + +extern const struct net_device_ops eip_netdev_ops; + +struct net_device *__alloc_eip_netdev(int size); +static inline struct net_device *alloc_eip_netdev(void) +{ + return __alloc_eip_netdev(0); +} + +/* You have one of these per-board */ +struct ei_device { + const char *name; + void (*reset_8390)(struct net_device *); + void (*get_8390_hdr)(struct net_device *, struct e8390_pkt_hdr *, int); + void (*block_output)(struct net_device *, int, const unsigned char *, int); + void (*block_input)(struct net_device *, int, struct sk_buff *, int); + unsigned long rmem_start; + unsigned long rmem_end; + void __iomem *mem; + unsigned char mcfilter[8]; + unsigned open:1; + unsigned word16:1; /* We have the 16-bit (vs 8-bit) version of the card. */ + unsigned bigendian:1; /* 16-bit big endian mode. Do NOT */ + /* set this on random 8390 clones! */ + unsigned txing:1; /* Transmit Active */ + unsigned irqlock:1; /* 8390's intrs disabled when '1'. */ + unsigned dmaing:1; /* Remote DMA Active */ + unsigned char tx_start_page, rx_start_page, stop_page; + unsigned char current_page; /* Read pointer in buffer */ + unsigned char interface_num; /* Net port (AUI, 10bT.) to use. */ + unsigned char txqueue; /* Tx Packet buffer queue length. */ + short tx1, tx2; /* Packet lengths for ping-pong tx. */ + short lasttx; /* Alpha version consistency check. */ + unsigned char reg0; /* Register '0' in a WD8013 */ + unsigned char reg5; /* Register '5' in a WD8013 */ + unsigned char saved_irq; /* Original dev->irq value. */ + u32 *reg_offset; /* Register mapping table */ + spinlock_t page_lock; /* Page register locks */ + unsigned long priv; /* Private field to store bus IDs etc. */ + u32 msg_enable; /* debug message level */ +#ifdef AX88796_PLATFORM + unsigned char rxcr_base; /* default value for RXCR */ +#endif +}; + +/* The maximum number of 8390 interrupt service routines called per IRQ. */ +#define MAX_SERVICE 12 + +/* The maximum time waited (in jiffies) before assuming a Tx failed. (20ms) */ +#define TX_TIMEOUT (20*HZ/100) + +#define ei_status (*(struct ei_device *)netdev_priv(dev)) + +/* Some generic ethernet register configurations. */ +#define E8390_TX_IRQ_MASK 0xa /* For register EN0_ISR */ +#define E8390_RX_IRQ_MASK 0x5 + +#ifdef AX88796_PLATFORM +#define E8390_RXCONFIG (ei_status.rxcr_base | 0x04) +#define E8390_RXOFF (ei_status.rxcr_base | 0x20) +#else +#define E8390_RXCONFIG 0x4 /* EN0_RXCR: broadcasts, no multicast,errors */ +#define E8390_RXOFF 0x20 /* EN0_RXCR: Accept no packets */ +#endif + +#define E8390_TXCONFIG 0x00 /* EN0_TXCR: Normal transmit mode */ +#define E8390_TXOFF 0x02 /* EN0_TXCR: Transmitter off */ + + +/* Register accessed at EN_CMD, the 8390 base addr. */ +#define E8390_STOP 0x01 /* Stop and reset the chip */ +#define E8390_START 0x02 /* Start the chip, clear reset */ +#define E8390_TRANS 0x04 /* Transmit a frame */ +#define E8390_RREAD 0x08 /* Remote read */ +#define E8390_RWRITE 0x10 /* Remote write */ +#define E8390_NODMA 0x20 /* Remote DMA */ +#define E8390_PAGE0 0x00 /* Select page chip registers */ +#define E8390_PAGE1 0x40 /* using the two high-order bits */ +#define E8390_PAGE2 0x80 /* Page 3 is invalid. */ + +/* + * Only generate indirect loads given a machine that needs them. + * - removed AMIGA_PCMCIA from this list, handled as ISA io now + * - the _p for generates no delay by default 8390p.c overrides this. + */ + +#ifndef ei_inb +#define ei_inb(_p) inb(_p) +#define ei_outb(_v,_p) outb(_v,_p) +#define ei_inb_p(_p) inb(_p) +#define ei_outb_p(_v,_p) outb(_v,_p) +#endif + +#ifndef EI_SHIFT +#define EI_SHIFT(x) (x) +#endif + +#define E8390_CMD EI_SHIFT(0x00) /* The command register (for all pages) */ +/* Page 0 register offsets. */ +#define EN0_CLDALO EI_SHIFT(0x01) /* Low byte of current local dma addr RD */ +#define EN0_STARTPG EI_SHIFT(0x01) /* Starting page of ring bfr WR */ +#define EN0_CLDAHI EI_SHIFT(0x02) /* High byte of current local dma addr RD */ +#define EN0_STOPPG EI_SHIFT(0x02) /* Ending page +1 of ring bfr WR */ +#define EN0_BOUNDARY EI_SHIFT(0x03) /* Boundary page of ring bfr RD WR */ +#define EN0_TSR EI_SHIFT(0x04) /* Transmit status reg RD */ +#define EN0_TPSR EI_SHIFT(0x04) /* Transmit starting page WR */ +#define EN0_NCR EI_SHIFT(0x05) /* Number of collision reg RD */ +#define EN0_TCNTLO EI_SHIFT(0x05) /* Low byte of tx byte count WR */ +#define EN0_FIFO EI_SHIFT(0x06) /* FIFO RD */ +#define EN0_TCNTHI EI_SHIFT(0x06) /* High byte of tx byte count WR */ +#define EN0_ISR EI_SHIFT(0x07) /* Interrupt status reg RD WR */ +#define EN0_CRDALO EI_SHIFT(0x08) /* low byte of current remote dma address RD */ +#define EN0_RSARLO EI_SHIFT(0x08) /* Remote start address reg 0 */ +#define EN0_CRDAHI EI_SHIFT(0x09) /* high byte, current remote dma address RD */ +#define EN0_RSARHI EI_SHIFT(0x09) /* Remote start address reg 1 */ +#define EN0_RCNTLO EI_SHIFT(0x0a) /* Remote byte count reg WR */ +#define EN0_RCNTHI EI_SHIFT(0x0b) /* Remote byte count reg WR */ +#define EN0_RSR EI_SHIFT(0x0c) /* rx status reg RD */ +#define EN0_RXCR EI_SHIFT(0x0c) /* RX configuration reg WR */ +#define EN0_TXCR EI_SHIFT(0x0d) /* TX configuration reg WR */ +#define EN0_COUNTER0 EI_SHIFT(0x0d) /* Rcv alignment error counter RD */ +#define EN0_DCFG EI_SHIFT(0x0e) /* Data configuration reg WR */ +#define EN0_COUNTER1 EI_SHIFT(0x0e) /* Rcv CRC error counter RD */ +#define EN0_IMR EI_SHIFT(0x0f) /* Interrupt mask reg WR */ +#define EN0_COUNTER2 EI_SHIFT(0x0f) /* Rcv missed frame error counter RD */ + +/* Bits in EN0_ISR - Interrupt status register */ +#define ENISR_RX 0x01 /* Receiver, no error */ +#define ENISR_TX 0x02 /* Transmitter, no error */ +#define ENISR_RX_ERR 0x04 /* Receiver, with error */ +#define ENISR_TX_ERR 0x08 /* Transmitter, with error */ +#define ENISR_OVER 0x10 /* Receiver overwrote the ring */ +#define ENISR_COUNTERS 0x20 /* Counters need emptying */ +#define ENISR_RDC 0x40 /* remote dma complete */ +#define ENISR_RESET 0x80 /* Reset completed */ +#define ENISR_ALL 0x3f /* Interrupts we will enable */ + +/* Bits in EN0_DCFG - Data config register */ +#define ENDCFG_WTS 0x01 /* word transfer mode selection */ +#define ENDCFG_BOS 0x02 /* byte order selection */ + +/* Page 1 register offsets. */ +#define EN1_PHYS EI_SHIFT(0x01) /* This board's physical enet addr RD WR */ +#define EN1_PHYS_SHIFT(i) EI_SHIFT(i+1) /* Get and set mac address */ +#define EN1_CURPAG EI_SHIFT(0x07) /* Current memory page RD WR */ +#define EN1_MULT EI_SHIFT(0x08) /* Multicast filter mask array (8 bytes) RD WR */ +#define EN1_MULT_SHIFT(i) EI_SHIFT(8+i) /* Get and set multicast filter */ + +/* Bits in received packet status byte and EN0_RSR*/ +#define ENRSR_RXOK 0x01 /* Received a good packet */ +#define ENRSR_CRC 0x02 /* CRC error */ +#define ENRSR_FAE 0x04 /* frame alignment error */ +#define ENRSR_FO 0x08 /* FIFO overrun */ +#define ENRSR_MPA 0x10 /* missed pkt */ +#define ENRSR_PHY 0x20 /* physical/multicast address */ +#define ENRSR_DIS 0x40 /* receiver disable. set in monitor mode */ +#define ENRSR_DEF 0x80 /* deferring */ + +/* Transmitted packet status, EN0_TSR. */ +#define ENTSR_PTX 0x01 /* Packet transmitted without error */ +#define ENTSR_ND 0x02 /* The transmit wasn't deferred. */ +#define ENTSR_COL 0x04 /* The transmit collided at least once. */ +#define ENTSR_ABT 0x08 /* The transmit collided 16 times, and was deferred. */ +#define ENTSR_CRS 0x10 /* The carrier sense was lost. */ +#define ENTSR_FU 0x20 /* A "FIFO underrun" occurred during transmit. */ +#define ENTSR_CDH 0x40 /* The collision detect "heartbeat" signal was lost. */ +#define ENTSR_OWC 0x80 /* There was an out-of-window collision. */ + +#endif /* _8390_h */ diff --git a/drivers/net/ethernet/8390/8390p.c b/drivers/net/ethernet/8390/8390p.c new file mode 100644 index 000000000..e8fc2e87e --- /dev/null +++ b/drivers/net/ethernet/8390/8390p.c @@ -0,0 +1,105 @@ +/* 8390 core for ISA devices needing bus delays */ + +static const char version[] = + "8390p.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; + +#define ei_inb(_p) inb(_p) +#define ei_outb(_v, _p) outb(_v, _p) +#define ei_inb_p(_p) inb_p(_p) +#define ei_outb_p(_v, _p) outb_p(_v, _p) + +#include "lib8390.c" + +int eip_open(struct net_device *dev) +{ + return __ei_open(dev); +} +EXPORT_SYMBOL(eip_open); + +int eip_close(struct net_device *dev) +{ + return __ei_close(dev); +} +EXPORT_SYMBOL(eip_close); + +netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + return __ei_start_xmit(skb, dev); +} +EXPORT_SYMBOL(eip_start_xmit); + +struct net_device_stats *eip_get_stats(struct net_device *dev) +{ + return __ei_get_stats(dev); +} +EXPORT_SYMBOL(eip_get_stats); + +void eip_set_multicast_list(struct net_device *dev) +{ + __ei_set_multicast_list(dev); +} +EXPORT_SYMBOL(eip_set_multicast_list); + +void eip_tx_timeout(struct net_device *dev) +{ + __ei_tx_timeout(dev); +} +EXPORT_SYMBOL(eip_tx_timeout); + +irqreturn_t eip_interrupt(int irq, void *dev_id) +{ + return __ei_interrupt(irq, dev_id); +} +EXPORT_SYMBOL(eip_interrupt); + +#ifdef CONFIG_NET_POLL_CONTROLLER +void eip_poll(struct net_device *dev) +{ + __ei_poll(dev); +} +EXPORT_SYMBOL(eip_poll); +#endif + +const struct net_device_ops eip_netdev_ops = { + .ndo_open = eip_open, + .ndo_stop = eip_close, + .ndo_start_xmit = eip_start_xmit, + .ndo_tx_timeout = eip_tx_timeout, + .ndo_get_stats = eip_get_stats, + .ndo_set_rx_mode = eip_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = eip_poll, +#endif +}; +EXPORT_SYMBOL(eip_netdev_ops); + +struct net_device *__alloc_eip_netdev(int size) +{ + struct net_device *dev = ____alloc_ei_netdev(size); + if (dev) + dev->netdev_ops = &eip_netdev_ops; + return dev; +} +EXPORT_SYMBOL(__alloc_eip_netdev); + +void NS8390p_init(struct net_device *dev, int startp) +{ + __NS8390_init(dev, startp); +} +EXPORT_SYMBOL(NS8390p_init); + +static int __init NS8390p_init_module(void) +{ + return 0; +} + +static void __exit NS8390p_cleanup_module(void) +{ +} + +module_init(NS8390p_init_module); +module_exit(NS8390p_cleanup_module); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig new file mode 100644 index 000000000..2d89bd00d --- /dev/null +++ b/drivers/net/ethernet/8390/Kconfig @@ -0,0 +1,206 @@ +# +# 8390 device configuration +# + +config NET_VENDOR_8390 + bool "National Semi-conductor 8390 devices" + default y + depends on NET_VENDOR_NATSEMI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Western Digital cards. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_8390 + +config PCMCIA_AXNET + tristate "Asix AX88190 PCMCIA support" + depends on PCMCIA + ---help--- + Say Y here if you intend to attach an Asix AX88190-based PCMCIA + (PC-card) Fast Ethernet card to your computer. These cards are + nearly NE2000 compatible but need a separate driver due to a few + misfeatures. + + To compile this driver as a module, choose M here: the module will be + called axnet_cs. If unsure, say N. + +config AX88796 + tristate "ASIX AX88796 NE2000 clone support" + depends on (ARM || MIPS || SUPERH) + select CRC32 + select PHYLIB + select MDIO_BITBANG + ---help--- + AX88796 driver, using platform bus to provide + chip detection and resources + +config AX88796_93CX6 + bool "ASIX AX88796 external 93CX6 eeprom support" + depends on AX88796 + select EEPROM_93CX6 + ---help--- + Select this if your platform comes with an external 93CX6 eeprom. + +config HYDRA + tristate "Hydra support" + depends on ZORRO + select CRC32 + ---help--- + If you have a Hydra Ethernet adapter, say Y. Otherwise, say N. + + To compile this driver as a module, choose M here: the module + will be called hydra. + +config ARM_ETHERH + tristate "I-cubed EtherH/ANT EtherM support" + depends on ARM && ARCH_ACORN + select CRC32 + ---help--- + If you have an Acorn system with one of these network cards, you + should say Y to this option if you wish to use it with Linux. + +config MAC8390 + bool "Macintosh NS 8390 based ethernet cards" + depends on MAC + select CRC32 + ---help--- + If you want to include a driver to support Nubus or LC-PDS + Ethernet cards using an NS8390 chipset or its equivalent, say Y + and read the Ethernet-HOWTO, available from + . + +config MCF8390 + tristate "ColdFire NS8390 based Ethernet support" + depends on COLDFIRE + select CRC32 + ---help--- + This driver is for Ethernet devices using an NS8390-compatible + chipset on many common ColdFire CPU based boards. Many of the older + Freescale dev boards use this, and some other common boards like + some SnapGear routers do as well. + + If you have one of these boards and want to use the network interface + on them then choose Y. To compile this driver as a module, choose M + here, the module will be called mcf8390. + +config NE2000 + tristate "NE2000/NE1000 support" + depends on (ISA || (Q40 && m) || M32R || MACH_TX49XX || \ + ATARI_ETHERNEC) + select CRC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . Many Ethernet cards + without a specific driver are compatible with NE2000. + + If you have a PCI NE2000 card however, say N here and Y to "PCI + NE2000 and clone support" below. + + To compile this driver as a module, choose M here. The module + will be called ne. + +config NE2K_PCI + tristate "PCI NE2000 and clones support (see help)" + depends on PCI + select CRC32 + ---help--- + This driver is for NE2000 compatible PCI cards. It will not work + with ISA NE2000 cards (they have their own driver, "NE2000/NE1000 + support" below). If you have a PCI NE2000 network (Ethernet) card, + say Y and read the Ethernet-HOWTO, available from + . + + This driver also works for the following NE2000 clone cards: + RealTek RTL-8029 Winbond 89C940 Compex RL2000 KTI ET32P2 + NetVin NV5000SC Via 86C926 SureCom NE34 Winbond + Holtek HT80232 Holtek HT80229 + + To compile this driver as a module, choose M here. The module + will be called ne2k-pci. + +config APNE + tristate "PCMCIA NE2000 support" + depends on AMIGA_PCMCIA + select CRC32 + ---help--- + If you have a PCMCIA NE2000 compatible adapter, say Y. Otherwise, + say N. + + To compile this driver as a module, choose M here: the module + will be called apne. + +config PCMCIA_PCNET + tristate "NE2000 compatible PCMCIA support" + depends on PCMCIA + select CRC32 + ---help--- + Say Y here if you intend to attach an NE2000 compatible PCMCIA + (PC-card) Ethernet or Fast Ethernet card to your computer. + + To compile this driver as a module, choose M here: the module will be + called pcnet_cs. If unsure, say N. + +config STNIC + tristate "National DP83902AV support" + depends on SUPERH + select CRC32 + ---help--- + Support for cards based on the National Semiconductor DP83902AV + ST-NIC Serial Network Interface Controller for Twisted Pair. This + is a 10Mbit/sec Ethernet controller. Product overview and specs at + . + + If unsure, say N. + +config ULTRA + tristate "SMC Ultra support" + depends on ISA + select CRC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + Important: There have been many reports that, with some motherboards + mixing an SMC Ultra and an Adaptec AHA154x SCSI card (or compatible, + such as some BusLogic models) causes corruption problems with many + operating systems. The Linux smc-ultra driver has a work-around for + this but keep it in mind if you have such a SCSI card and have + problems. + + To compile this driver as a module, choose M here. The module + will be called smc-ultra. + +config WD80x3 + tristate "WD80*3 support" + depends on ISA + select CRC32 + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called wd. + +config ZORRO8390 + tristate "Zorro NS8390-based Ethernet support" + depends on ZORRO + select CRC32 + ---help--- + This driver is for Zorro Ethernet cards using an NS8390-compatible + chipset, like the Village Tronic Ariadne II and the Individual + Computers X-Surf Ethernet cards. If you have such a card, say Y. + Otherwise, say N. + + To compile this driver as a module, choose M here: the module + will be called zorro8390. + +endif # NET_VENDOR_8390 diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile new file mode 100644 index 000000000..ff3b31894 --- /dev/null +++ b/drivers/net/ethernet/8390/Makefile @@ -0,0 +1,18 @@ +# +# Makefile for the 8390 network device drivers. +# + +obj-$(CONFIG_MAC8390) += mac8390.o +obj-$(CONFIG_APNE) += apne.o 8390.o +obj-$(CONFIG_ARM_ETHERH) += etherh.o +obj-$(CONFIG_AX88796) += ax88796.o +obj-$(CONFIG_HYDRA) += hydra.o 8390.o +obj-$(CONFIG_MCF8390) += mcf8390.o 8390.o +obj-$(CONFIG_NE2000) += ne.o 8390p.o +obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o +obj-$(CONFIG_PCMCIA_AXNET) += axnet_cs.o 8390.o +obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o +obj-$(CONFIG_STNIC) += stnic.o 8390.o +obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o +obj-$(CONFIG_WD80x3) += wd.o 8390.o +obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c new file mode 100644 index 000000000..c56ac9ebc --- /dev/null +++ b/drivers/net/ethernet/8390/apne.c @@ -0,0 +1,626 @@ +/* + * Amiga Linux/68k 8390 based PCMCIA Ethernet Driver for the Amiga 1200 + * + * (C) Copyright 1997 Alain Malek + * (Alain.Malek@cryogen.com) + * + * ---------------------------------------------------------------------------- + * + * This program is based on + * + * ne.c: A general non-shared-memory NS8390 ethernet driver for linux + * Written 1992-94 by Donald Becker. + * + * 8390.c: A general NS8390 ethernet driver core for linux. + * Written 1992-94 by Donald Becker. + * + * cnetdevice: A Sana-II ethernet driver for AmigaOS + * Written by Bruce Abbott (bhabbott@inhb.co.nz) + * + * ---------------------------------------------------------------------------- + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of the Linux + * distribution for more details. + * + * ---------------------------------------------------------------------------- + * + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "8390.h" + +/* ---- No user-serviceable parts below ---- */ + +#define DRV_NAME "apne" + +#define NE_BASE (dev->base_addr) +#define NE_CMD 0x00 +#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */ +#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */ +#define NE_IO_EXTENT 0x20 + +#define NE_EN0_ISR 0x07 +#define NE_EN0_DCFG 0x0e + +#define NE_EN0_RSARLO 0x08 +#define NE_EN0_RSARHI 0x09 +#define NE_EN0_RCNTLO 0x0a +#define NE_EN0_RXCR 0x0c +#define NE_EN0_TXCR 0x0d +#define NE_EN0_RCNTHI 0x0b +#define NE_EN0_IMR 0x0f + +#define NE1SM_START_PG 0x20 /* First page of TX buffer */ +#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ +#define NESM_START_PG 0x40 /* First page of TX buffer */ +#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ + + +struct net_device * __init apne_probe(int unit); +static int apne_probe1(struct net_device *dev, int ioaddr); + +static void apne_reset_8390(struct net_device *dev); +static void apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void apne_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void apne_block_output(struct net_device *dev, const int count, + const unsigned char *buf, const int start_page); +static irqreturn_t apne_interrupt(int irq, void *dev_id); + +static int init_pcmcia(void); + +/* IO base address used for nic */ + +#define IOBASE 0x300 + +/* + use MANUAL_CONFIG and MANUAL_OFFSET for enabling IO by hand + you can find the values to use by looking at the cnet.device + config file example (the default values are for the CNET40BC card) +*/ + +/* +#define MANUAL_CONFIG 0x20 +#define MANUAL_OFFSET 0x3f8 + +#define MANUAL_HWADDR0 0x00 +#define MANUAL_HWADDR1 0x12 +#define MANUAL_HWADDR2 0x34 +#define MANUAL_HWADDR3 0x56 +#define MANUAL_HWADDR4 0x78 +#define MANUAL_HWADDR5 0x9a +*/ + +static const char version[] = + "apne.c:v1.1 7/10/98 Alain Malek (Alain.Malek@cryogen.ch)\n"; + +static int apne_owned; /* signal if card already owned */ + +static u32 apne_msg_enable; +module_param_named(msg_enable, apne_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH)); +MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)"); + +struct net_device * __init apne_probe(int unit) +{ + struct net_device *dev; + struct ei_device *ei_local; + +#ifndef MANUAL_CONFIG + char tuple[8]; +#endif + int err; + + if (!MACH_IS_AMIGA) + return ERR_PTR(-ENODEV); + + if (apne_owned) + return ERR_PTR(-ENODEV); + + if ( !(AMIGAHW_PRESENT(PCMCIA)) ) + return ERR_PTR(-ENODEV); + + pr_info("Looking for PCMCIA ethernet card : "); + + /* check if a card is inserted */ + if (!(PCMCIA_INSERTED)) { + pr_cont("NO PCMCIA card inserted\n"); + return ERR_PTR(-ENODEV); + } + + dev = alloc_ei_netdev(); + if (!dev) + return ERR_PTR(-ENOMEM); + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + } + ei_local = netdev_priv(dev); + ei_local->msg_enable = apne_msg_enable; + + /* disable pcmcia irq for readtuple */ + pcmcia_disable_irq(); + +#ifndef MANUAL_CONFIG + if ((pcmcia_copy_tuple(CISTPL_FUNCID, tuple, 8) < 3) || + (tuple[2] != CISTPL_FUNCID_NETWORK)) { + pr_cont("not an ethernet card\n"); + /* XXX: shouldn't we re-enable irq here? */ + free_netdev(dev); + return ERR_PTR(-ENODEV); + } +#endif + + pr_cont("ethernet PCMCIA card inserted\n"); + + if (!init_pcmcia()) { + /* XXX: shouldn't we re-enable irq here? */ + free_netdev(dev); + return ERR_PTR(-ENODEV); + } + + if (!request_region(IOBASE, 0x20, DRV_NAME)) { + free_netdev(dev); + return ERR_PTR(-EBUSY); + } + + err = apne_probe1(dev, IOBASE); + if (err) { + release_region(IOBASE, 0x20); + free_netdev(dev); + return ERR_PTR(err); + } + err = register_netdev(dev); + if (!err) + return dev; + + pcmcia_disable_irq(); + free_irq(IRQ_AMIGA_PORTS, dev); + pcmcia_reset(); + release_region(IOBASE, 0x20); + free_netdev(dev); + return ERR_PTR(err); +} + +static int __init apne_probe1(struct net_device *dev, int ioaddr) +{ + int i; + unsigned char SA_prom[32]; + int wordlength = 2; + const char *name = NULL; + int start_page, stop_page; +#ifndef MANUAL_HWADDR0 + int neX000, ctron; +#endif + static unsigned version_printed; + + if ((apne_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0)) + netdev_info(dev, version); + + netdev_info(dev, "PCMCIA NE*000 ethercard probe"); + + /* Reset card. Who knows what dain-bramaged state it was left in. */ + { unsigned long reset_start_time = jiffies; + + outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); + + while ((inb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0) + if (time_after(jiffies, reset_start_time + 2*HZ/100)) { + pr_cont(" not found (no reset ack).\n"); + return -ENODEV; + } + + outb(0xff, ioaddr + NE_EN0_ISR); /* Ack all intr. */ + } + +#ifndef MANUAL_HWADDR0 + + /* Read the 16 bytes of station address PROM. + We must first initialize registers, similar to NS8390_init(eifdev, 0). + We can't reliably read the SAPROM address without this. + (I learned the hard way!). */ + { + struct {unsigned long value, offset; } program_seq[] = { + {E8390_NODMA+E8390_PAGE0+E8390_STOP, NE_CMD}, /* Select page 0*/ + {0x48, NE_EN0_DCFG}, /* Set byte-wide (0x48) access. */ + {0x00, NE_EN0_RCNTLO}, /* Clear the count regs. */ + {0x00, NE_EN0_RCNTHI}, + {0x00, NE_EN0_IMR}, /* Mask completion irq. */ + {0xFF, NE_EN0_ISR}, + {E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */ + {E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode. */ + {32, NE_EN0_RCNTLO}, + {0x00, NE_EN0_RCNTHI}, + {0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000. */ + {0x00, NE_EN0_RSARHI}, + {E8390_RREAD+E8390_START, NE_CMD}, + }; + for (i = 0; i < ARRAY_SIZE(program_seq); i++) { + outb(program_seq[i].value, ioaddr + program_seq[i].offset); + } + + } + for(i = 0; i < 32 /*sizeof(SA_prom)*/; i+=2) { + SA_prom[i] = inb(ioaddr + NE_DATAPORT); + SA_prom[i+1] = inb(ioaddr + NE_DATAPORT); + if (SA_prom[i] != SA_prom[i+1]) + wordlength = 1; + } + + /* At this point, wordlength *only* tells us if the SA_prom is doubled + up or not because some broken PCI cards don't respect the byte-wide + request in program_seq above, and hence don't have doubled up values. + These broken cards would otherwise be detected as an ne1000. */ + + if (wordlength == 2) + for (i = 0; i < 16; i++) + SA_prom[i] = SA_prom[i+i]; + + if (wordlength == 2) { + /* We must set the 8390 for word mode. */ + outb(0x49, ioaddr + NE_EN0_DCFG); + start_page = NESM_START_PG; + stop_page = NESM_STOP_PG; + } else { + start_page = NE1SM_START_PG; + stop_page = NE1SM_STOP_PG; + } + + neX000 = (SA_prom[14] == 0x57 && SA_prom[15] == 0x57); + ctron = (SA_prom[0] == 0x00 && SA_prom[1] == 0x00 && SA_prom[2] == 0x1d); + + /* Set up the rest of the parameters. */ + if (neX000) { + name = (wordlength == 2) ? "NE2000" : "NE1000"; + } else if (ctron) { + name = (wordlength == 2) ? "Ctron-8" : "Ctron-16"; + start_page = 0x01; + stop_page = (wordlength == 2) ? 0x40 : 0x20; + } else { + pr_cont(" not found.\n"); + return -ENXIO; + + } + +#else + wordlength = 2; + /* We must set the 8390 for word mode. */ + outb(0x49, ioaddr + NE_EN0_DCFG); + start_page = NESM_START_PG; + stop_page = NESM_STOP_PG; + + SA_prom[0] = MANUAL_HWADDR0; + SA_prom[1] = MANUAL_HWADDR1; + SA_prom[2] = MANUAL_HWADDR2; + SA_prom[3] = MANUAL_HWADDR3; + SA_prom[4] = MANUAL_HWADDR4; + SA_prom[5] = MANUAL_HWADDR5; + name = "NE2000"; +#endif + + dev->base_addr = ioaddr; + dev->irq = IRQ_AMIGA_PORTS; + dev->netdev_ops = &ei_netdev_ops; + + /* Install the Interrupt handler */ + i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev); + if (i) return i; + + for (i = 0; i < ETH_ALEN; i++) + dev->dev_addr[i] = SA_prom[i]; + + pr_cont(" %pM\n", dev->dev_addr); + + netdev_info(dev, "%s found.\n", name); + + ei_status.name = name; + ei_status.tx_start_page = start_page; + ei_status.stop_page = stop_page; + ei_status.word16 = (wordlength == 2); + + ei_status.rx_start_page = start_page + TX_PAGES; + + ei_status.reset_8390 = &apne_reset_8390; + ei_status.block_input = &apne_block_input; + ei_status.block_output = &apne_block_output; + ei_status.get_8390_hdr = &apne_get_8390_hdr; + + NS8390_init(dev, 0); + + pcmcia_ack_int(pcmcia_get_intreq()); /* ack PCMCIA int req */ + pcmcia_enable_irq(); + + apne_owned = 1; + + return 0; +} + +/* Hard reset the card. This used to pause for the same period that a + 8390 reset command required, but that shouldn't be necessary. */ +static void +apne_reset_8390(struct net_device *dev) +{ + unsigned long reset_start_time = jiffies; + struct ei_device *ei_local = netdev_priv(dev); + + init_pcmcia(); + + netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies); + + outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); + + ei_status.txing = 0; + ei_status.dmaing = 0; + + /* This check _should_not_ be necessary, omit eventually. */ + while ((inb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0) + if (time_after(jiffies, reset_start_time + 2*HZ/100)) { + netdev_err(dev, "ne_reset_8390() did not complete.\n"); + break; + } + outb(ENISR_RESET, NE_BASE + NE_EN0_ISR); /* Ack intr. */ +} + +/* Grab the 8390 specific header. Similar to the block_input routine, but + we don't need to be concerned with ring wrap as the header will be at + the start of a page, so we optimize accordingly. */ + +static void +apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + + int nic_base = dev->base_addr; + int cnt; + char *ptrc; + short *ptrs; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) { + netdev_err(dev, "DMAing conflict in ne_get_8390_hdr " + "[DMAstat:%d][irqlock:%d][intr:%d].\n", + ei_status.dmaing, ei_status.irqlock, dev->irq); + return; + } + + ei_status.dmaing |= 0x01; + outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); + outb(ENISR_RDC, nic_base + NE_EN0_ISR); + outb(sizeof(struct e8390_pkt_hdr), nic_base + NE_EN0_RCNTLO); + outb(0, nic_base + NE_EN0_RCNTHI); + outb(0, nic_base + NE_EN0_RSARLO); /* On page boundary */ + outb(ring_page, nic_base + NE_EN0_RSARHI); + outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); + + if (ei_status.word16) { + ptrs = (short*)hdr; + for(cnt = 0; cnt < (sizeof(struct e8390_pkt_hdr)>>1); cnt++) + *ptrs++ = inw(NE_BASE + NE_DATAPORT); + } else { + ptrc = (char*)hdr; + for(cnt = 0; cnt < sizeof(struct e8390_pkt_hdr); cnt++) + *ptrc++ = inb(NE_BASE + NE_DATAPORT); + } + + outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; + + le16_to_cpus(&hdr->count); +} + +/* Block input and output, similar to the Crynwr packet driver. If you + are porting to a new ethercard, look at the packet driver source for hints. + The NEx000 doesn't share the on-board packet memory -- you have to put + the packet out through the "remote DMA" dataport using outb. */ + +static void +apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) +{ + int nic_base = dev->base_addr; + char *buf = skb->data; + char *ptrc; + short *ptrs; + int cnt; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) { + netdev_err(dev, "DMAing conflict in ne_block_input " + "[DMAstat:%d][irqlock:%d][intr:%d].\n", + ei_status.dmaing, ei_status.irqlock, dev->irq); + return; + } + ei_status.dmaing |= 0x01; + outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); + outb(ENISR_RDC, nic_base + NE_EN0_ISR); + outb(count & 0xff, nic_base + NE_EN0_RCNTLO); + outb(count >> 8, nic_base + NE_EN0_RCNTHI); + outb(ring_offset & 0xff, nic_base + NE_EN0_RSARLO); + outb(ring_offset >> 8, nic_base + NE_EN0_RSARHI); + outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); + if (ei_status.word16) { + ptrs = (short*)buf; + for (cnt = 0; cnt < (count>>1); cnt++) + *ptrs++ = inw(NE_BASE + NE_DATAPORT); + if (count & 0x01) { + buf[count-1] = inb(NE_BASE + NE_DATAPORT); + } + } else { + ptrc = buf; + for (cnt = 0; cnt < count; cnt++) + *ptrc++ = inb(NE_BASE + NE_DATAPORT); + } + + outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +static void +apne_block_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page) +{ + int nic_base = NE_BASE; + unsigned long dma_start; + char *ptrc; + short *ptrs; + int cnt; + + /* Round the count up for word writes. Do we need to do this? + What effect will an odd byte count have on the 8390? + I should check someday. */ + if (ei_status.word16 && (count & 0x01)) + count++; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) { + netdev_err(dev, "DMAing conflict in ne_block_output." + "[DMAstat:%d][irqlock:%d][intr:%d]\n", + ei_status.dmaing, ei_status.irqlock, dev->irq); + return; + } + ei_status.dmaing |= 0x01; + /* We should already be in page 0, but to be safe... */ + outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); + + outb(ENISR_RDC, nic_base + NE_EN0_ISR); + + /* Now the normal output. */ + outb(count & 0xff, nic_base + NE_EN0_RCNTLO); + outb(count >> 8, nic_base + NE_EN0_RCNTHI); + outb(0x00, nic_base + NE_EN0_RSARLO); + outb(start_page, nic_base + NE_EN0_RSARHI); + + outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD); + if (ei_status.word16) { + ptrs = (short*)buf; + for (cnt = 0; cnt < count>>1; cnt++) + outw(*ptrs++, NE_BASE+NE_DATAPORT); + } else { + ptrc = (char*)buf; + for (cnt = 0; cnt < count; cnt++) + outb(*ptrc++, NE_BASE + NE_DATAPORT); + } + + dma_start = jiffies; + + while ((inb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0) + if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ + netdev_warn(dev, "timeout waiting for Tx RDC.\n"); + apne_reset_8390(dev); + NS8390_init(dev,1); + break; + } + + outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +static irqreturn_t apne_interrupt(int irq, void *dev_id) +{ + unsigned char pcmcia_intreq; + + if (!(gayle.inten & GAYLE_IRQ_IRQ)) + return IRQ_NONE; + + pcmcia_intreq = pcmcia_get_intreq(); + + if (!(pcmcia_intreq & GAYLE_IRQ_IRQ)) { + pcmcia_ack_int(pcmcia_intreq); + return IRQ_NONE; + } + if (apne_msg_enable & NETIF_MSG_INTR) + pr_debug("pcmcia intreq = %x\n", pcmcia_intreq); + pcmcia_disable_irq(); /* to get rid of the sti() within ei_interrupt */ + ei_interrupt(irq, dev_id); + pcmcia_ack_int(pcmcia_get_intreq()); + pcmcia_enable_irq(); + return IRQ_HANDLED; +} + +#ifdef MODULE +static struct net_device *apne_dev; + +static int __init apne_module_init(void) +{ + apne_dev = apne_probe(-1); + return PTR_ERR_OR_ZERO(apne_dev); +} + +static void __exit apne_module_exit(void) +{ + unregister_netdev(apne_dev); + + pcmcia_disable_irq(); + + free_irq(IRQ_AMIGA_PORTS, apne_dev); + + pcmcia_reset(); + + release_region(IOBASE, 0x20); + + free_netdev(apne_dev); +} +module_init(apne_module_init); +module_exit(apne_module_exit); +#endif + +static int init_pcmcia(void) +{ + u_char config; +#ifndef MANUAL_CONFIG + u_char tuple[32]; + int offset_len; +#endif + u_long offset; + + pcmcia_reset(); + pcmcia_program_voltage(PCMCIA_0V); + pcmcia_access_speed(PCMCIA_SPEED_250NS); + pcmcia_write_enable(); + +#ifdef MANUAL_CONFIG + config = MANUAL_CONFIG; +#else + /* get and write config byte to enable IO port */ + + if (pcmcia_copy_tuple(CISTPL_CFTABLE_ENTRY, tuple, 32) < 3) + return 0; + + config = tuple[2] & 0x3f; +#endif +#ifdef MANUAL_OFFSET + offset = MANUAL_OFFSET; +#else + if (pcmcia_copy_tuple(CISTPL_CONFIG, tuple, 32) < 6) + return 0; + + offset_len = (tuple[2] & 0x3) + 1; + offset = 0; + while(offset_len--) { + offset = (offset << 8) | tuple[4+offset_len]; + } +#endif + + out_8(GAYLE_ATTRIBUTE+offset, config); + + return 1; +} + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c new file mode 100644 index 000000000..0443654f0 --- /dev/null +++ b/drivers/net/ethernet/8390/ax88796.c @@ -0,0 +1,1015 @@ +/* drivers/net/ethernet/8390/ax88796.c + * + * Copyright 2005,2007 Simtec Electronics + * Ben Dooks + * + * Asix AX88796 10/100 Ethernet controller support + * Based on ne.c, by Donald Becker, et-al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + +/* Rename the lib8390.c functions to show that they are in this driver */ +#define __ei_open ax_ei_open +#define __ei_close ax_ei_close +#define __ei_poll ax_ei_poll +#define __ei_start_xmit ax_ei_start_xmit +#define __ei_tx_timeout ax_ei_tx_timeout +#define __ei_get_stats ax_ei_get_stats +#define __ei_set_multicast_list ax_ei_set_multicast_list +#define __ei_interrupt ax_ei_interrupt +#define ____alloc_ei_netdev ax__alloc_ei_netdev +#define __NS8390_init ax_NS8390_init + +/* force unsigned long back to 'void __iomem *' */ +#define ax_convert_addr(_a) ((void __force __iomem *)(_a)) + +#define ei_inb(_a) readb(ax_convert_addr(_a)) +#define ei_outb(_v, _a) writeb(_v, ax_convert_addr(_a)) + +#define ei_inb_p(_a) ei_inb(_a) +#define ei_outb_p(_v, _a) ei_outb(_v, _a) + +/* define EI_SHIFT() to take into account our register offsets */ +#define EI_SHIFT(x) (ei_local->reg_offset[(x)]) + +/* Ensure we have our RCR base value */ +#define AX88796_PLATFORM + +static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electronics\n"; + +#include "lib8390.c" + +#define DRV_NAME "ax88796" +#define DRV_VERSION "1.00" + +/* from ne.c */ +#define NE_CMD EI_SHIFT(0x00) +#define NE_RESET EI_SHIFT(0x1f) +#define NE_DATAPORT EI_SHIFT(0x10) + +#define NE1SM_START_PG 0x20 /* First page of TX buffer */ +#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ +#define NESM_START_PG 0x40 /* First page of TX buffer */ +#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ + +#define AX_GPOC_PPDSET BIT(6) + +static u32 ax_msg_enable; + +/* device private data */ + +struct ax_device { + struct mii_bus *mii_bus; + struct mdiobb_ctrl bb_ctrl; + struct phy_device *phy_dev; + void __iomem *addr_memr; + u8 reg_memr; + int link; + int speed; + int duplex; + + void __iomem *map2; + const struct ax_plat_data *plat; + + unsigned char running; + unsigned char resume_open; + unsigned int irqflags; + + u32 reg_offsets[0x20]; +}; + +static inline struct ax_device *to_ax_dev(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + return (struct ax_device *)(ei_local + 1); +} + +/* + * ax_initial_check + * + * do an initial probe for the card to check whether it exists + * and is functional + */ +static int ax_initial_check(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + void __iomem *ioaddr = ei_local->mem; + int reg0; + int regd; + + reg0 = ei_inb(ioaddr); + if (reg0 == 0xFF) + return -ENODEV; + + ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD); + regd = ei_inb(ioaddr + 0x0d); + ei_outb(0xff, ioaddr + 0x0d); + ei_outb(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD); + ei_inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ + if (ei_inb(ioaddr + EN0_COUNTER0) != 0) { + ei_outb(reg0, ioaddr); + ei_outb(regd, ioaddr + 0x0d); /* Restore the old values. */ + return -ENODEV; + } + + return 0; +} + +/* + * Hard reset the card. This used to pause for the same period that a + * 8390 reset command required, but that shouldn't be necessary. + */ +static void ax_reset_8390(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + unsigned long reset_start_time = jiffies; + void __iomem *addr = (void __iomem *)dev->base_addr; + + netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies); + + ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET); + + ei_local->txing = 0; + ei_local->dmaing = 0; + + /* This check _should_not_ be necessary, omit eventually. */ + while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) { + if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) { + netdev_warn(dev, "%s: did not complete.\n", __func__); + break; + } + } + + ei_outb(ENISR_RESET, addr + EN0_ISR); /* Ack intr. */ +} + + +static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page) +{ + struct ei_device *ei_local = netdev_priv(dev); + void __iomem *nic_base = ei_local->mem; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_local->dmaing) { + netdev_err(dev, "DMAing conflict in %s " + "[DMAstat:%d][irqlock:%d].\n", + __func__, + ei_local->dmaing, ei_local->irqlock); + return; + } + + ei_local->dmaing |= 0x01; + ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD); + ei_outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); + ei_outb(0, nic_base + EN0_RCNTHI); + ei_outb(0, nic_base + EN0_RSARLO); /* On page boundary */ + ei_outb(ring_page, nic_base + EN0_RSARHI); + ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); + + if (ei_local->word16) + ioread16_rep(nic_base + NE_DATAPORT, hdr, + sizeof(struct e8390_pkt_hdr) >> 1); + else + ioread8_rep(nic_base + NE_DATAPORT, hdr, + sizeof(struct e8390_pkt_hdr)); + + ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_local->dmaing &= ~0x01; + + le16_to_cpus(&hdr->count); +} + + +/* + * Block input and output, similar to the Crynwr packet driver. If + * you are porting to a new ethercard, look at the packet driver + * source for hints. The NEx000 doesn't share the on-board packet + * memory -- you have to put the packet out through the "remote DMA" + * dataport using ei_outb. + */ +static void ax_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + struct ei_device *ei_local = netdev_priv(dev); + void __iomem *nic_base = ei_local->mem; + char *buf = skb->data; + + if (ei_local->dmaing) { + netdev_err(dev, + "DMAing conflict in %s " + "[DMAstat:%d][irqlock:%d].\n", + __func__, + ei_local->dmaing, ei_local->irqlock); + return; + } + + ei_local->dmaing |= 0x01; + + ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + NE_CMD); + ei_outb(count & 0xff, nic_base + EN0_RCNTLO); + ei_outb(count >> 8, nic_base + EN0_RCNTHI); + ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO); + ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI); + ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); + + if (ei_local->word16) { + ioread16_rep(nic_base + NE_DATAPORT, buf, count >> 1); + if (count & 0x01) + buf[count-1] = ei_inb(nic_base + NE_DATAPORT); + + } else { + ioread8_rep(nic_base + NE_DATAPORT, buf, count); + } + + ei_local->dmaing &= ~1; +} + +static void ax_block_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page) +{ + struct ei_device *ei_local = netdev_priv(dev); + void __iomem *nic_base = ei_local->mem; + unsigned long dma_start; + + /* + * Round the count up for word writes. Do we need to do this? + * What effect will an odd byte count have on the 8390? I + * should check someday. + */ + if (ei_local->word16 && (count & 0x01)) + count++; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_local->dmaing) { + netdev_err(dev, "DMAing conflict in %s." + "[DMAstat:%d][irqlock:%d]\n", + __func__, + ei_local->dmaing, ei_local->irqlock); + return; + } + + ei_local->dmaing |= 0x01; + /* We should already be in page 0, but to be safe... */ + ei_outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); + + ei_outb(ENISR_RDC, nic_base + EN0_ISR); + + /* Now the normal output. */ + ei_outb(count & 0xff, nic_base + EN0_RCNTLO); + ei_outb(count >> 8, nic_base + EN0_RCNTHI); + ei_outb(0x00, nic_base + EN0_RSARLO); + ei_outb(start_page, nic_base + EN0_RSARHI); + + ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD); + if (ei_local->word16) + iowrite16_rep(nic_base + NE_DATAPORT, buf, count >> 1); + else + iowrite8_rep(nic_base + NE_DATAPORT, buf, count); + + dma_start = jiffies; + + while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) { + if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */ + netdev_warn(dev, "timeout waiting for Tx RDC.\n"); + ax_reset_8390(dev); + ax_NS8390_init(dev, 1); + break; + } + } + + ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_local->dmaing &= ~0x01; +} + +/* definitions for accessing MII/EEPROM interface */ + +#define AX_MEMR EI_SHIFT(0x14) +#define AX_MEMR_MDC BIT(0) +#define AX_MEMR_MDIR BIT(1) +#define AX_MEMR_MDI BIT(2) +#define AX_MEMR_MDO BIT(3) +#define AX_MEMR_EECS BIT(4) +#define AX_MEMR_EEI BIT(5) +#define AX_MEMR_EEO BIT(6) +#define AX_MEMR_EECLK BIT(7) + +static void ax_handle_link_change(struct net_device *dev) +{ + struct ax_device *ax = to_ax_dev(dev); + struct phy_device *phy_dev = ax->phy_dev; + int status_change = 0; + + if (phy_dev->link && ((ax->speed != phy_dev->speed) || + (ax->duplex != phy_dev->duplex))) { + + ax->speed = phy_dev->speed; + ax->duplex = phy_dev->duplex; + status_change = 1; + } + + if (phy_dev->link != ax->link) { + if (!phy_dev->link) { + ax->speed = 0; + ax->duplex = -1; + } + ax->link = phy_dev->link; + + status_change = 1; + } + + if (status_change) + phy_print_status(phy_dev); +} + +static int ax_mii_probe(struct net_device *dev) +{ + struct ax_device *ax = to_ax_dev(dev); + struct phy_device *phy_dev = NULL; + int ret; + + /* find the first phy */ + phy_dev = phy_find_first(ax->mii_bus); + if (!phy_dev) { + netdev_err(dev, "no PHY found\n"); + return -ENODEV; + } + + ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change, + PHY_INTERFACE_MODE_MII); + if (ret) { + netdev_err(dev, "Could not attach to PHY\n"); + return ret; + } + + /* mask with MAC supported features */ + phy_dev->supported &= PHY_BASIC_FEATURES; + phy_dev->advertising = phy_dev->supported; + + ax->phy_dev = phy_dev; + + netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + phy_dev->drv->name, dev_name(&phy_dev->dev), phy_dev->irq); + + return 0; +} + +static void ax_phy_switch(struct net_device *dev, int on) +{ + struct ei_device *ei_local = netdev_priv(dev); + struct ax_device *ax = to_ax_dev(dev); + + u8 reg_gpoc = ax->plat->gpoc_val; + + if (!!on) + reg_gpoc &= ~AX_GPOC_PPDSET; + else + reg_gpoc |= AX_GPOC_PPDSET; + + ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17)); +} + +static int ax_open(struct net_device *dev) +{ + struct ax_device *ax = to_ax_dev(dev); + int ret; + + netdev_dbg(dev, "open\n"); + + ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags, + dev->name, dev); + if (ret) + goto failed_request_irq; + + /* turn the phy on (if turned off) */ + ax_phy_switch(dev, 1); + + ret = ax_mii_probe(dev); + if (ret) + goto failed_mii_probe; + phy_start(ax->phy_dev); + + ret = ax_ei_open(dev); + if (ret) + goto failed_ax_ei_open; + + ax->running = 1; + + return 0; + + failed_ax_ei_open: + phy_disconnect(ax->phy_dev); + failed_mii_probe: + ax_phy_switch(dev, 0); + free_irq(dev->irq, dev); + failed_request_irq: + return ret; +} + +static int ax_close(struct net_device *dev) +{ + struct ax_device *ax = to_ax_dev(dev); + + netdev_dbg(dev, "close\n"); + + ax->running = 0; + wmb(); + + ax_ei_close(dev); + + /* turn the phy off */ + ax_phy_switch(dev, 0); + phy_disconnect(ax->phy_dev); + + free_irq(dev->irq, dev); + return 0; +} + +static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd) +{ + struct ax_device *ax = to_ax_dev(dev); + struct phy_device *phy_dev = ax->phy_dev; + + if (!netif_running(dev)) + return -EINVAL; + + if (!phy_dev) + return -ENODEV; + + return phy_mii_ioctl(phy_dev, req, cmd); +} + +/* ethtool ops */ + +static void ax_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct platform_device *pdev = to_platform_device(dev->dev.parent); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info)); +} + +static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ax_device *ax = to_ax_dev(dev); + struct phy_device *phy_dev = ax->phy_dev; + + if (!phy_dev) + return -ENODEV; + + return phy_ethtool_gset(phy_dev, cmd); +} + +static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ax_device *ax = to_ax_dev(dev); + struct phy_device *phy_dev = ax->phy_dev; + + if (!phy_dev) + return -ENODEV; + + return phy_ethtool_sset(phy_dev, cmd); +} + +static u32 ax_get_msglevel(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + + return ei_local->msg_enable; +} + +static void ax_set_msglevel(struct net_device *dev, u32 v) +{ + struct ei_device *ei_local = netdev_priv(dev); + + ei_local->msg_enable = v; +} + +static const struct ethtool_ops ax_ethtool_ops = { + .get_drvinfo = ax_get_drvinfo, + .get_settings = ax_get_settings, + .set_settings = ax_set_settings, + .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, + .get_msglevel = ax_get_msglevel, + .set_msglevel = ax_set_msglevel, +}; + +#ifdef CONFIG_AX88796_93CX6 +static void ax_eeprom_register_read(struct eeprom_93cx6 *eeprom) +{ + struct ei_device *ei_local = eeprom->data; + u8 reg = ei_inb(ei_local->mem + AX_MEMR); + + eeprom->reg_data_in = reg & AX_MEMR_EEI; + eeprom->reg_data_out = reg & AX_MEMR_EEO; /* Input pin */ + eeprom->reg_data_clock = reg & AX_MEMR_EECLK; + eeprom->reg_chip_select = reg & AX_MEMR_EECS; +} + +static void ax_eeprom_register_write(struct eeprom_93cx6 *eeprom) +{ + struct ei_device *ei_local = eeprom->data; + u8 reg = ei_inb(ei_local->mem + AX_MEMR); + + reg &= ~(AX_MEMR_EEI | AX_MEMR_EECLK | AX_MEMR_EECS); + + if (eeprom->reg_data_in) + reg |= AX_MEMR_EEI; + if (eeprom->reg_data_clock) + reg |= AX_MEMR_EECLK; + if (eeprom->reg_chip_select) + reg |= AX_MEMR_EECS; + + ei_outb(reg, ei_local->mem + AX_MEMR); + udelay(10); +} +#endif + +static const struct net_device_ops ax_netdev_ops = { + .ndo_open = ax_open, + .ndo_stop = ax_close, + .ndo_do_ioctl = ax_ioctl, + + .ndo_start_xmit = ax_ei_start_xmit, + .ndo_tx_timeout = ax_ei_tx_timeout, + .ndo_get_stats = ax_ei_get_stats, + .ndo_set_rx_mode = ax_ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ax_ei_poll, +#endif +}; + +static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level) +{ + struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); + + if (level) + ax->reg_memr |= AX_MEMR_MDC; + else + ax->reg_memr &= ~AX_MEMR_MDC; + + ei_outb(ax->reg_memr, ax->addr_memr); +} + +static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output) +{ + struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); + + if (output) + ax->reg_memr &= ~AX_MEMR_MDIR; + else + ax->reg_memr |= AX_MEMR_MDIR; + + ei_outb(ax->reg_memr, ax->addr_memr); +} + +static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value) +{ + struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); + + if (value) + ax->reg_memr |= AX_MEMR_MDO; + else + ax->reg_memr &= ~AX_MEMR_MDO; + + ei_outb(ax->reg_memr, ax->addr_memr); +} + +static int ax_bb_get_data(struct mdiobb_ctrl *ctrl) +{ + struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); + int reg_memr = ei_inb(ax->addr_memr); + + return reg_memr & AX_MEMR_MDI ? 1 : 0; +} + +static struct mdiobb_ops bb_ops = { + .owner = THIS_MODULE, + .set_mdc = ax_bb_mdc, + .set_mdio_dir = ax_bb_dir, + .set_mdio_data = ax_bb_set_data, + .get_mdio_data = ax_bb_get_data, +}; + +/* setup code */ + +static int ax_mii_init(struct net_device *dev) +{ + struct platform_device *pdev = to_platform_device(dev->dev.parent); + struct ei_device *ei_local = netdev_priv(dev); + struct ax_device *ax = to_ax_dev(dev); + int err, i; + + ax->bb_ctrl.ops = &bb_ops; + ax->addr_memr = ei_local->mem + AX_MEMR; + ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl); + if (!ax->mii_bus) { + err = -ENOMEM; + goto out; + } + + ax->mii_bus->name = "ax88796_mii_bus"; + ax->mii_bus->parent = dev->dev.parent; + snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + + ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!ax->mii_bus->irq) { + err = -ENOMEM; + goto out_free_mdio_bitbang; + } + + for (i = 0; i < PHY_MAX_ADDR; i++) + ax->mii_bus->irq[i] = PHY_POLL; + + err = mdiobus_register(ax->mii_bus); + if (err) + goto out_free_irq; + + return 0; + + out_free_irq: + kfree(ax->mii_bus->irq); + out_free_mdio_bitbang: + free_mdio_bitbang(ax->mii_bus); + out: + return err; +} + +static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local) +{ + void __iomem *ioaddr = ei_local->mem; + struct ax_device *ax = to_ax_dev(dev); + + /* Select page 0 */ + ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_STOP, ioaddr + E8390_CMD); + + /* set to byte access */ + ei_outb(ax->plat->dcr_val & ~1, ioaddr + EN0_DCFG); + ei_outb(ax->plat->gpoc_val, ioaddr + EI_SHIFT(0x17)); +} + +/* + * ax_init_dev + * + * initialise the specified device, taking care to note the MAC + * address it may already have (if configured), ensure + * the device is ready to be used by lib8390.c and registerd with + * the network layer. + */ +static int ax_init_dev(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + struct ax_device *ax = to_ax_dev(dev); + void __iomem *ioaddr = ei_local->mem; + unsigned int start_page; + unsigned int stop_page; + int ret; + int i; + + ret = ax_initial_check(dev); + if (ret) + goto err_out; + + /* setup goes here */ + + ax_initial_setup(dev, ei_local); + + /* read the mac from the card prom if we need it */ + + if (ax->plat->flags & AXFLG_HAS_EEPROM) { + unsigned char SA_prom[32]; + + for (i = 0; i < sizeof(SA_prom); i += 2) { + SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT); + SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT); + } + + if (ax->plat->wordlength == 2) + for (i = 0; i < 16; i++) + SA_prom[i] = SA_prom[i+i]; + + memcpy(dev->dev_addr, SA_prom, ETH_ALEN); + } + +#ifdef CONFIG_AX88796_93CX6 + if (ax->plat->flags & AXFLG_HAS_93CX6) { + unsigned char mac_addr[ETH_ALEN]; + struct eeprom_93cx6 eeprom; + + eeprom.data = ei_local; + eeprom.register_read = ax_eeprom_register_read; + eeprom.register_write = ax_eeprom_register_write; + eeprom.width = PCI_EEPROM_WIDTH_93C56; + + eeprom_93cx6_multiread(&eeprom, 0, + (__le16 __force *)mac_addr, + sizeof(mac_addr) >> 1); + + memcpy(dev->dev_addr, mac_addr, ETH_ALEN); + } +#endif + if (ax->plat->wordlength == 2) { + /* We must set the 8390 for word mode. */ + ei_outb(ax->plat->dcr_val, ei_local->mem + EN0_DCFG); + start_page = NESM_START_PG; + stop_page = NESM_STOP_PG; + } else { + start_page = NE1SM_START_PG; + stop_page = NE1SM_STOP_PG; + } + + /* load the mac-address from the device */ + if (ax->plat->flags & AXFLG_MAC_FROMDEV) { + ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, + ei_local->mem + E8390_CMD); /* 0x61 */ + for (i = 0; i < ETH_ALEN; i++) + dev->dev_addr[i] = + ei_inb(ioaddr + EN1_PHYS_SHIFT(i)); + } + + if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) && + ax->plat->mac_addr) + memcpy(dev->dev_addr, ax->plat->mac_addr, ETH_ALEN); + + ax_reset_8390(dev); + + ei_local->name = "AX88796"; + ei_local->tx_start_page = start_page; + ei_local->stop_page = stop_page; + ei_local->word16 = (ax->plat->wordlength == 2); + ei_local->rx_start_page = start_page + TX_PAGES; + +#ifdef PACKETBUF_MEMSIZE + /* Allow the packet buffer size to be overridden by know-it-alls. */ + ei_local->stop_page = ei_local->tx_start_page + PACKETBUF_MEMSIZE; +#endif + + ei_local->reset_8390 = &ax_reset_8390; + ei_local->block_input = &ax_block_input; + ei_local->block_output = &ax_block_output; + ei_local->get_8390_hdr = &ax_get_8390_hdr; + ei_local->priv = 0; + ei_local->msg_enable = ax_msg_enable; + + dev->netdev_ops = &ax_netdev_ops; + dev->ethtool_ops = &ax_ethtool_ops; + + ret = ax_mii_init(dev); + if (ret) + goto out_irq; + + ax_NS8390_init(dev, 0); + + ret = register_netdev(dev); + if (ret) + goto out_irq; + + netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n", + ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr, + dev->dev_addr); + + return 0; + + out_irq: + /* cleanup irq */ + free_irq(dev->irq, dev); + err_out: + return ret; +} + +static int ax_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct ei_device *ei_local = netdev_priv(dev); + struct ax_device *ax = to_ax_dev(dev); + struct resource *mem; + + unregister_netdev(dev); + free_irq(dev->irq, dev); + + iounmap(ei_local->mem); + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(mem->start, resource_size(mem)); + + if (ax->map2) { + iounmap(ax->map2); + mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + release_mem_region(mem->start, resource_size(mem)); + } + + free_netdev(dev); + + return 0; +} + +/* + * ax_probe + * + * This is the entry point when the platform device system uses to + * notify us of a new device to attach to. Allocate memory, find the + * resources and information passed, and map the necessary registers. + */ +static int ax_probe(struct platform_device *pdev) +{ + struct net_device *dev; + struct ei_device *ei_local; + struct ax_device *ax; + struct resource *irq, *mem, *mem2; + unsigned long mem_size, mem2_size = 0; + int ret = 0; + + dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); + if (dev == NULL) + return -ENOMEM; + + /* ok, let's setup our device */ + SET_NETDEV_DEV(dev, &pdev->dev); + ei_local = netdev_priv(dev); + ax = to_ax_dev(dev); + + ax->plat = dev_get_platdata(&pdev->dev); + platform_set_drvdata(pdev, dev); + + ei_local->rxcr_base = ax->plat->rcr_val; + + /* find the platform resources */ + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!irq) { + dev_err(&pdev->dev, "no IRQ specified\n"); + ret = -ENXIO; + goto exit_mem; + } + + dev->irq = irq->start; + ax->irqflags = irq->flags & IRQF_TRIGGER_MASK; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&pdev->dev, "no MEM specified\n"); + ret = -ENXIO; + goto exit_mem; + } + + mem_size = resource_size(mem); + + /* + * setup the register offsets from either the platform data or + * by using the size of the resource provided + */ + if (ax->plat->reg_offsets) + ei_local->reg_offset = ax->plat->reg_offsets; + else { + ei_local->reg_offset = ax->reg_offsets; + for (ret = 0; ret < 0x18; ret++) + ax->reg_offsets[ret] = (mem_size / 0x18) * ret; + } + + if (!request_mem_region(mem->start, mem_size, pdev->name)) { + dev_err(&pdev->dev, "cannot reserve registers\n"); + ret = -ENXIO; + goto exit_mem; + } + + ei_local->mem = ioremap(mem->start, mem_size); + dev->base_addr = (unsigned long)ei_local->mem; + + if (ei_local->mem == NULL) { + dev_err(&pdev->dev, "Cannot ioremap area %pR\n", mem); + + ret = -ENXIO; + goto exit_req; + } + + /* look for reset area */ + mem2 = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!mem2) { + if (!ax->plat->reg_offsets) { + for (ret = 0; ret < 0x20; ret++) + ax->reg_offsets[ret] = (mem_size / 0x20) * ret; + } + } else { + mem2_size = resource_size(mem2); + + if (!request_mem_region(mem2->start, mem2_size, pdev->name)) { + dev_err(&pdev->dev, "cannot reserve registers\n"); + ret = -ENXIO; + goto exit_mem1; + } + + ax->map2 = ioremap(mem2->start, mem2_size); + if (!ax->map2) { + dev_err(&pdev->dev, "cannot map reset register\n"); + ret = -ENXIO; + goto exit_mem2; + } + + ei_local->reg_offset[0x1f] = ax->map2 - ei_local->mem; + } + + /* got resources, now initialise and register device */ + ret = ax_init_dev(dev); + if (!ret) + return 0; + + if (!ax->map2) + goto exit_mem1; + + iounmap(ax->map2); + + exit_mem2: + release_mem_region(mem2->start, mem2_size); + + exit_mem1: + iounmap(ei_local->mem); + + exit_req: + release_mem_region(mem->start, mem_size); + + exit_mem: + free_netdev(dev); + + return ret; +} + +/* suspend and resume */ + +#ifdef CONFIG_PM +static int ax_suspend(struct platform_device *dev, pm_message_t state) +{ + struct net_device *ndev = platform_get_drvdata(dev); + struct ax_device *ax = to_ax_dev(ndev); + + ax->resume_open = ax->running; + + netif_device_detach(ndev); + ax_close(ndev); + + return 0; +} + +static int ax_resume(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct ax_device *ax = to_ax_dev(ndev); + + ax_initial_setup(ndev, netdev_priv(ndev)); + ax_NS8390_init(ndev, ax->resume_open); + netif_device_attach(ndev); + + if (ax->resume_open) + ax_open(ndev); + + return 0; +} + +#else +#define ax_suspend NULL +#define ax_resume NULL +#endif + +static struct platform_driver axdrv = { + .driver = { + .name = "ax88796", + }, + .probe = ax_probe, + .remove = ax_remove, + .suspend = ax_suspend, + .resume = ax_resume, +}; + +module_platform_driver(axdrv); + +MODULE_DESCRIPTION("AX88796 10/100 Ethernet platform driver"); +MODULE_AUTHOR("Ben Dooks, "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:ax88796"); diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c new file mode 100644 index 000000000..ec6eac1f8 --- /dev/null +++ b/drivers/net/ethernet/8390/axnet_cs.c @@ -0,0 +1,1703 @@ +/*====================================================================== + + A PCMCIA ethernet driver for Asix AX88190-based cards + + The Asix AX88190 is a NS8390-derived chipset with a few nasty + idiosyncracies that make it very inconvenient to support with a + standard 8390 driver. This driver is based on pcnet_cs, with the + tweaked 8390 code grafted on the end. Much of what I did was to + clean up and update a similar driver supplied by Asix, which was + adapted by William Lee, william@asix.com.tw. + + Copyright (C) 2001 David A. Hinds -- dahinds@users.sourceforge.net + + axnet_cs.c 1.28 2002/06/29 06:27:37 + + The network driver code is based on Donald Becker's NE2000 code: + + Written 1992,1993 by Donald Becker. + Copyright 1993 United States Government as represented by the + Director, National Security Agency. This software may be used and + distributed according to the terms of the GNU General Public License, + incorporated herein by reference. + Donald Becker may be reached at becker@scyld.com + +======================================================================*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "8390.h" + +#include +#include +#include +#include + +#include +#include +#include + +#define AXNET_CMD 0x00 +#define AXNET_DATAPORT 0x10 /* NatSemi-defined port window offset. */ +#define AXNET_RESET 0x1f /* Issue a read to reset, a write to clear. */ +#define AXNET_MII_EEP 0x14 /* Offset of MII access port */ +#define AXNET_TEST 0x15 /* Offset of TEST Register port */ +#define AXNET_GPIO 0x17 /* Offset of General Purpose Register Port */ + +#define AXNET_START_PG 0x40 /* First page of TX buffer */ +#define AXNET_STOP_PG 0x80 /* Last page +1 of RX ring */ + +#define AXNET_RDC_TIMEOUT 0x02 /* Max wait in jiffies for Tx RDC */ + +#define IS_AX88190 0x0001 +#define IS_AX88790 0x0002 + +/*====================================================================*/ + +/* Module parameters */ + +MODULE_AUTHOR("David Hinds "); +MODULE_DESCRIPTION("Asix AX88190 PCMCIA ethernet driver"); +MODULE_LICENSE("GPL"); + + +/*====================================================================*/ + +static int axnet_config(struct pcmcia_device *link); +static void axnet_release(struct pcmcia_device *link); +static int axnet_open(struct net_device *dev); +static int axnet_close(struct net_device *dev); +static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static netdev_tx_t axnet_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static struct net_device_stats *get_stats(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); +static void axnet_tx_timeout(struct net_device *dev); +static irqreturn_t ei_irq_wrapper(int irq, void *dev_id); +static void ei_watchdog(u_long arg); +static void axnet_reset_8390(struct net_device *dev); + +static int mdio_read(unsigned int addr, int phy_id, int loc); +static void mdio_write(unsigned int addr, int phy_id, int loc, int value); + +static void get_8390_hdr(struct net_device *, + struct e8390_pkt_hdr *, int); +static void block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void block_output(struct net_device *dev, int count, + const u_char *buf, const int start_page); + +static void axnet_detach(struct pcmcia_device *p_dev); + +static void AX88190_init(struct net_device *dev, int startp); +static int ax_open(struct net_device *dev); +static int ax_close(struct net_device *dev); +static irqreturn_t ax_interrupt(int irq, void *dev_id); +static u32 axnet_msg_enable; + +/*====================================================================*/ + +struct axnet_dev { + struct pcmcia_device *p_dev; + caddr_t base; + struct timer_list watchdog; + int stale, fast_poll; + u_short link_status; + u_char duplex_flag; + int phy_id; + int flags; + int active_low; +}; + +static inline struct axnet_dev *PRIV(struct net_device *dev) +{ + void *p = (char *)netdev_priv(dev) + sizeof(struct ei_device); + return p; +} + +static const struct net_device_ops axnet_netdev_ops = { + .ndo_open = axnet_open, + .ndo_stop = axnet_close, + .ndo_do_ioctl = axnet_ioctl, + .ndo_start_xmit = axnet_start_xmit, + .ndo_tx_timeout = axnet_tx_timeout, + .ndo_get_stats = get_stats, + .ndo_set_rx_mode = set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int axnet_probe(struct pcmcia_device *link) +{ + struct axnet_dev *info; + struct net_device *dev; + struct ei_device *ei_local; + + dev_dbg(&link->dev, "axnet_attach()\n"); + + dev = alloc_etherdev(sizeof(struct ei_device) + sizeof(struct axnet_dev)); + if (!dev) + return -ENOMEM; + + ei_local = netdev_priv(dev); + ei_local->msg_enable = axnet_msg_enable; + spin_lock_init(&ei_local->page_lock); + + info = PRIV(dev); + info->p_dev = link; + link->priv = dev; + link->config_flags |= CONF_ENABLE_IRQ; + + dev->netdev_ops = &axnet_netdev_ops; + + dev->watchdog_timeo = TX_TIMEOUT; + + return axnet_config(link); +} /* axnet_attach */ + +static void axnet_detach(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + dev_dbg(&link->dev, "axnet_detach(0x%p)\n", link); + + unregister_netdev(dev); + + axnet_release(link); + + free_netdev(dev); +} /* axnet_detach */ + +/*====================================================================== + + This probes for a card's hardware address by reading the PROM. + +======================================================================*/ + +static int get_prom(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + unsigned int ioaddr = dev->base_addr; + int i, j; + + /* This is based on drivers/net/ethernet/8390/ne.c */ + struct { + u_char value, offset; + } program_seq[] = { + {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ + {0x01, EN0_DCFG}, /* Set word-wide access. */ + {0x00, EN0_RCNTLO}, /* Clear the count regs. */ + {0x00, EN0_RCNTHI}, + {0x00, EN0_IMR}, /* Mask completion irq. */ + {0xFF, EN0_ISR}, + {E8390_RXOFF|0x40, EN0_RXCR}, /* 0x60 Set to monitor */ + {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ + {0x10, EN0_RCNTLO}, + {0x00, EN0_RCNTHI}, + {0x00, EN0_RSARLO}, /* DMA starting at 0x0400. */ + {0x04, EN0_RSARHI}, + {E8390_RREAD+E8390_START, E8390_CMD}, + }; + + /* Not much of a test, but the alternatives are messy */ + if (link->config_base != 0x03c0) + return 0; + + axnet_reset_8390(dev); + mdelay(10); + + for (i = 0; i < ARRAY_SIZE(program_seq); i++) + outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); + + for (i = 0; i < 6; i += 2) { + j = inw(ioaddr + AXNET_DATAPORT); + dev->dev_addr[i] = j & 0xff; + dev->dev_addr[i+1] = j >> 8; + } + return 1; +} /* get_prom */ + +static int try_io_port(struct pcmcia_device *link) +{ + int j, ret; + link->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + link->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; + if (link->resource[0]->end == 32) { + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; + /* for master/slave multifunction cards */ + if (link->resource[1]->end > 0) + link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; + } else { + /* This should be two 16-port windows */ + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + link->resource[1]->flags |= IO_DATA_PATH_WIDTH_16; + } + if (link->resource[0]->start == 0) { + for (j = 0; j < 0x400; j += 0x20) { + link->resource[0]->start = j ^ 0x300; + link->resource[1]->start = (j ^ 0x300) + 0x10; + link->io_lines = 16; + ret = pcmcia_request_io(link); + if (ret == 0) + return ret; + } + return ret; + } else { + return pcmcia_request_io(link); + } +} + +static int axnet_configcheck(struct pcmcia_device *p_dev, void *priv_data) +{ + if (p_dev->config_index == 0) + return -EINVAL; + + p_dev->config_index = 0x05; + if (p_dev->resource[0]->end + p_dev->resource[1]->end < 32) + return -ENODEV; + + return try_io_port(p_dev); +} + +static int axnet_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + struct axnet_dev *info = PRIV(dev); + int i, j, j2, ret; + + dev_dbg(&link->dev, "axnet_config(0x%p)\n", link); + + /* don't trust the CIS on this; Linksys got it wrong */ + link->config_regs = 0x63; + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + ret = pcmcia_loop_config(link, axnet_configcheck, NULL); + if (ret != 0) + goto failed; + + if (!link->irq) + goto failed; + + if (resource_size(link->resource[1]) == 8) + link->config_flags |= CONF_ENABLE_SPKR; + + ret = pcmcia_enable_device(link); + if (ret) + goto failed; + + dev->irq = link->irq; + dev->base_addr = link->resource[0]->start; + + if (!get_prom(link)) { + pr_notice("this is not an AX88190 card!\n"); + pr_notice("use pcnet_cs instead.\n"); + goto failed; + } + + ei_status.name = "AX88190"; + ei_status.word16 = 1; + ei_status.tx_start_page = AXNET_START_PG; + ei_status.rx_start_page = AXNET_START_PG + TX_PAGES; + ei_status.stop_page = AXNET_STOP_PG; + ei_status.reset_8390 = axnet_reset_8390; + ei_status.get_8390_hdr = get_8390_hdr; + ei_status.block_input = block_input; + ei_status.block_output = block_output; + + if (inb(dev->base_addr + AXNET_TEST) != 0) + info->flags |= IS_AX88790; + else + info->flags |= IS_AX88190; + + if (info->flags & IS_AX88790) + outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */ + + info->active_low = 0; + + for (i = 0; i < 32; i++) { + j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); + j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); + if (j == j2) continue; + if ((j != 0) && (j != 0xffff)) break; + } + + if (i == 32) { + /* Maybe PHY is in power down mode. (PPD_SET = 1) + Bit 2 of CCSR is active low. */ + pcmcia_write_config_byte(link, CISREG_CCSR, 0x04); + for (i = 0; i < 32; i++) { + j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); + j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); + if (j == j2) continue; + if ((j != 0) && (j != 0xffff)) { + info->active_low = 1; + break; + } + } + } + + info->phy_id = (i < 32) ? i : -1; + SET_NETDEV_DEV(dev, &link->dev); + + if (register_netdev(dev) != 0) { + pr_notice("register_netdev() failed\n"); + goto failed; + } + + netdev_info(dev, "Asix AX88%d90: io %#3lx, irq %d, hw_addr %pM\n", + ((info->flags & IS_AX88790) ? 7 : 1), + dev->base_addr, dev->irq, dev->dev_addr); + if (info->phy_id != -1) { + netdev_dbg(dev, " MII transceiver at index %d, status %x\n", + info->phy_id, j); + } else { + netdev_notice(dev, " No MII transceivers found!\n"); + } + return 0; + +failed: + axnet_release(link); + return -ENODEV; +} /* axnet_config */ + +static void axnet_release(struct pcmcia_device *link) +{ + pcmcia_disable_device(link); +} + +static int axnet_suspend(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) + netif_device_detach(dev); + + return 0; +} + +static int axnet_resume(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + struct axnet_dev *info = PRIV(dev); + + if (link->open) { + if (info->active_low == 1) + pcmcia_write_config_byte(link, CISREG_CCSR, 0x04); + + axnet_reset_8390(dev); + AX88190_init(dev, 1); + netif_device_attach(dev); + } + + return 0; +} + + +/*====================================================================== + + MII interface support + +======================================================================*/ + +#define MDIO_SHIFT_CLK 0x01 +#define MDIO_DATA_WRITE0 0x00 +#define MDIO_DATA_WRITE1 0x08 +#define MDIO_DATA_READ 0x04 +#define MDIO_MASK 0x0f +#define MDIO_ENB_IN 0x02 + +static void mdio_sync(unsigned int addr) +{ + int bits; + for (bits = 0; bits < 32; bits++) { + outb_p(MDIO_DATA_WRITE1, addr); + outb_p(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr); + } +} + +static int mdio_read(unsigned int addr, int phy_id, int loc) +{ + u_int cmd = (0xf6<<10)|(phy_id<<5)|loc; + int i, retval = 0; + + mdio_sync(addr); + for (i = 14; i >= 0; i--) { + int dat = (cmd&(1< 0; i--) { + outb_p(MDIO_ENB_IN, addr); + retval = (retval << 1) | ((inb_p(addr) & MDIO_DATA_READ) != 0); + outb_p(MDIO_ENB_IN | MDIO_SHIFT_CLK, addr); + } + return (retval>>1) & 0xffff; +} + +static void mdio_write(unsigned int addr, int phy_id, int loc, int value) +{ + u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value; + int i; + + mdio_sync(addr); + for (i = 31; i >= 0; i--) { + int dat = (cmd&(1<= 0; i--) { + outb_p(MDIO_ENB_IN, addr); + outb_p(MDIO_ENB_IN | MDIO_SHIFT_CLK, addr); + } +} + +/*====================================================================*/ + +static int axnet_open(struct net_device *dev) +{ + int ret; + struct axnet_dev *info = PRIV(dev); + struct pcmcia_device *link = info->p_dev; + unsigned int nic_base = dev->base_addr; + + dev_dbg(&link->dev, "axnet_open('%s')\n", dev->name); + + if (!pcmcia_dev_present(link)) + return -ENODEV; + + outb_p(0xFF, nic_base + EN0_ISR); /* Clear bogus intr. */ + ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, "axnet_cs", dev); + if (ret) + return ret; + + link->open++; + + info->link_status = 0x00; + setup_timer(&info->watchdog, ei_watchdog, (u_long)dev); + mod_timer(&info->watchdog, jiffies + HZ); + + return ax_open(dev); +} /* axnet_open */ + +/*====================================================================*/ + +static int axnet_close(struct net_device *dev) +{ + struct axnet_dev *info = PRIV(dev); + struct pcmcia_device *link = info->p_dev; + + dev_dbg(&link->dev, "axnet_close('%s')\n", dev->name); + + ax_close(dev); + free_irq(dev->irq, dev); + + link->open--; + netif_stop_queue(dev); + del_timer_sync(&info->watchdog); + + return 0; +} /* axnet_close */ + +/*====================================================================== + + Hard reset the card. This used to pause for the same period that + a 8390 reset command required, but that shouldn't be necessary. + +======================================================================*/ + +static void axnet_reset_8390(struct net_device *dev) +{ + unsigned int nic_base = dev->base_addr; + int i; + + ei_status.txing = ei_status.dmaing = 0; + + outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, nic_base + E8390_CMD); + + outb(inb(nic_base + AXNET_RESET), nic_base + AXNET_RESET); + + for (i = 0; i < 100; i++) { + if ((inb_p(nic_base+EN0_ISR) & ENISR_RESET) != 0) + break; + udelay(100); + } + outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */ + + if (i == 100) + netdev_err(dev, "axnet_reset_8390() did not complete\n"); + +} /* axnet_reset_8390 */ + +/*====================================================================*/ + +static irqreturn_t ei_irq_wrapper(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + PRIV(dev)->stale = 0; + return ax_interrupt(irq, dev_id); +} + +static void ei_watchdog(u_long arg) +{ + struct net_device *dev = (struct net_device *)(arg); + struct axnet_dev *info = PRIV(dev); + unsigned int nic_base = dev->base_addr; + unsigned int mii_addr = nic_base + AXNET_MII_EEP; + u_short link; + + if (!netif_device_present(dev)) goto reschedule; + + /* Check for pending interrupt with expired latency timer: with + this, we can limp along even if the interrupt is blocked */ + if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) { + if (!info->fast_poll) + netdev_info(dev, "interrupt(s) dropped!\n"); + ei_irq_wrapper(dev->irq, dev); + info->fast_poll = HZ; + } + if (info->fast_poll) { + info->fast_poll--; + info->watchdog.expires = jiffies + 1; + add_timer(&info->watchdog); + return; + } + + if (info->phy_id < 0) + goto reschedule; + link = mdio_read(mii_addr, info->phy_id, 1); + if (!link || (link == 0xffff)) { + netdev_info(dev, "MII is missing!\n"); + info->phy_id = -1; + goto reschedule; + } + + link &= 0x0004; + if (link != info->link_status) { + u_short p = mdio_read(mii_addr, info->phy_id, 5); + netdev_info(dev, "%s link beat\n", link ? "found" : "lost"); + if (link) { + info->duplex_flag = (p & 0x0140) ? 0x80 : 0x00; + if (p) + netdev_info(dev, "autonegotiation complete: %dbaseT-%cD selected\n", + (p & 0x0180) ? 100 : 10, (p & 0x0140) ? 'F' : 'H'); + else + netdev_info(dev, "link partner did not autonegotiate\n"); + AX88190_init(dev, 1); + } + info->link_status = link; + } + +reschedule: + info->watchdog.expires = jiffies + HZ; + add_timer(&info->watchdog); +} + +/*====================================================================*/ + +static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct axnet_dev *info = PRIV(dev); + struct mii_ioctl_data *data = if_mii(rq); + unsigned int mii_addr = dev->base_addr + AXNET_MII_EEP; + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = info->phy_id; + case SIOCGMIIREG: /* Read MII PHY register. */ + data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f); + return 0; + case SIOCSMIIREG: /* Write MII PHY register. */ + mdio_write(mii_addr, data->phy_id, data->reg_num & 0x1f, data->val_in); + return 0; + } + return -EOPNOTSUPP; +} + +/*====================================================================*/ + +static void get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, + int ring_page) +{ + unsigned int nic_base = dev->base_addr; + + outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */ + outb_p(ring_page, nic_base + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD); + + insw(nic_base + AXNET_DATAPORT, hdr, + sizeof(struct e8390_pkt_hdr)>>1); + /* Fix for big endian systems */ + hdr->count = le16_to_cpu(hdr->count); + +} + +/*====================================================================*/ + +static void block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + unsigned int nic_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + int xfer_count = count; + char *buf = skb->data; + + if ((netif_msg_rx_status(ei_local)) && (count != 4)) + netdev_dbg(dev, "[bi=%d]\n", count+4); + outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); + outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD); + + insw(nic_base + AXNET_DATAPORT,buf,count>>1); + if (count & 0x01) + buf[count-1] = inb(nic_base + AXNET_DATAPORT), xfer_count++; + +} + +/*====================================================================*/ + +static void block_output(struct net_device *dev, int count, + const u_char *buf, const int start_page) +{ + unsigned int nic_base = dev->base_addr; + + pr_debug("%s: [bo=%d]\n", dev->name, count); + + /* Round the count up for word writes. Do we need to do this? + What effect will an odd byte count have on the 8390? + I should check someday. */ + if (count & 0x01) + count++; + + outb_p(0x00, nic_base + EN0_RSARLO); + outb_p(start_page, nic_base + EN0_RSARHI); + outb_p(E8390_RWRITE+E8390_START, nic_base + AXNET_CMD); + outsw(nic_base + AXNET_DATAPORT, buf, count>>1); +} + +static const struct pcmcia_device_id axnet_ids[] = { + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x016c, 0x0081), + PCMCIA_DEVICE_MANF_CARD(0x018a, 0x0301), + PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328), + PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0301), + PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0303), + PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309), + PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1106), + PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab), + PCMCIA_DEVICE_MANF_CARD(0x021b, 0x0202), + PCMCIA_DEVICE_MANF_CARD(0xffff, 0x1090), + PCMCIA_DEVICE_PROD_ID12("AmbiCom,Inc.", "Fast Ethernet PC Card(AMB8110)", 0x49b020a7, 0x119cc9fc), + PCMCIA_DEVICE_PROD_ID124("Fast Ethernet", "16-bit PC Card", "AX88190", 0xb4be14e3, 0x9a12eb6a, 0xab9be5ef), + PCMCIA_DEVICE_PROD_ID12("ASIX", "AX88190", 0x0959823b, 0xab9be5ef), + PCMCIA_DEVICE_PROD_ID12("Billionton", "LNA-100B", 0x552ab682, 0xbc3b87e1), + PCMCIA_DEVICE_PROD_ID12("CHEETAH ETHERCARD", "EN2228", 0x00fa7bc8, 0x00e990cc), + PCMCIA_DEVICE_PROD_ID12("CNet", "CNF301", 0xbc477dde, 0x78c5f40b), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXD", 0x5261440f, 0x436768c5), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEtherII PCC-TXD", 0x5261440f, 0x730df72e), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXM", 0x5261440f, 0x3abbd061), + PCMCIA_DEVICE_PROD_ID12("Dynalink", "L100C16", 0x55632fd5, 0x66bc2a90), + PCMCIA_DEVICE_PROD_ID12("IO DATA", "ETXPCM", 0x547e66dc, 0x233adac2), + PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8), + PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC3-TX", 0x481e0094, 0xf91af609), + PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA411", 0x9aa79dc3, 0x40fad875), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "100BASE", 0x281f1c5d, 0x7c2add04), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEtherCard", 0x281f1c5d, 0x7ef26116), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FEP501", 0x281f1c5d, 0x2e272058), + PCMCIA_DEVICE_PROD_ID14("Network Everywhere", "AX88190", 0x820a67b6, 0xab9be5ef), + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, axnet_ids); + +static struct pcmcia_driver axnet_cs_driver = { + .owner = THIS_MODULE, + .name = "axnet_cs", + .probe = axnet_probe, + .remove = axnet_detach, + .id_table = axnet_ids, + .suspend = axnet_suspend, + .resume = axnet_resume, +}; +module_pcmcia_driver(axnet_cs_driver); + +/*====================================================================*/ + +/* 8390.c: A general NS8390 ethernet driver core for linux. */ +/* + Written 1992-94 by Donald Becker. + + Copyright 1993 United States Government as represented by the + Director, National Security Agency. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + This is the chip-specific code for many 8390-based ethernet adaptors. + This is not a complete driver, it must be combined with board-specific + code such as ne.c, wd.c, 3c503.c, etc. + + Seeing how at least eight drivers use this code, (not counting the + PCMCIA ones either) it is easy to break some card by what seems like + a simple innocent change. Please contact me or Donald if you think + you have found something that needs changing. -- PG + + Changelog: + + Paul Gortmaker : remove set_bit lock, other cleanups. + Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to + ei_block_input() for eth_io_copy_and_sum(). + Paul Gortmaker : exchange static int ei_pingpong for a #define, + also add better Tx error handling. + Paul Gortmaker : rewrite Rx overrun handling as per NS specs. + Alexey Kuznetsov : use the 8390's six bit hash multicast filter. + Paul Gortmaker : tweak ANK's above multicast changes a bit. + Paul Gortmaker : update packet statistics for v2.1.x + Alan Cox : support arbitrary stupid port mappings on the + 68K Macintosh. Support >16bit I/O spaces + Paul Gortmaker : add kmod support for auto-loading of the 8390 + module by all drivers that require it. + Alan Cox : Spinlocking work, added 'BUG_83C690' + Paul Gortmaker : Separate out Tx timeout code from Tx path. + + Sources: + The National Semiconductor LAN Databook, and the 3Com 3c503 databook. + + */ + +#include +#include +#include +#include +#include + +#define BUG_83C690 + +/* These are the operational function interfaces to board-specific + routines. + void reset_8390(struct net_device *dev) + Resets the board associated with DEV, including a hardware reset of + the 8390. This is only called when there is a transmit timeout, and + it is always followed by 8390_init(). + void block_output(struct net_device *dev, int count, const unsigned char *buf, + int start_page) + Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The + "page" value uses the 8390's 256-byte pages. + void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page) + Read the 4 byte, page aligned 8390 header. *If* there is a + subsequent read, it will be of the rest of the packet. + void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) + Read COUNT bytes from the packet buffer into the skb data area. Start + reading from RING_OFFSET, the address as the 8390 sees it. This will always + follow the read of the 8390 header. +*/ +#define ei_reset_8390 (ei_local->reset_8390) +#define ei_block_output (ei_local->block_output) +#define ei_block_input (ei_local->block_input) +#define ei_get_8390_hdr (ei_local->get_8390_hdr) + +/* Index to functions. */ +static void ei_tx_intr(struct net_device *dev); +static void ei_tx_err(struct net_device *dev); +static void ei_receive(struct net_device *dev); +static void ei_rx_overrun(struct net_device *dev); + +/* Routines generic to NS8390-based boards. */ +static void NS8390_trigger_send(struct net_device *dev, unsigned int length, + int start_page); +static void do_set_multicast_list(struct net_device *dev); + +/* + * SMP and the 8390 setup. + * + * The 8390 isn't exactly designed to be multithreaded on RX/TX. There is + * a page register that controls bank and packet buffer access. We guard + * this with ei_local->page_lock. Nobody should assume or set the page other + * than zero when the lock is not held. Lock holders must restore page 0 + * before unlocking. Even pure readers must take the lock to protect in + * page 0. + * + * To make life difficult the chip can also be very slow. We therefore can't + * just use spinlocks. For the longer lockups we disable the irq the device + * sits on and hold the lock. We must hold the lock because there is a dual + * processor case other than interrupts (get stats/set multicast list in + * parallel with each other and transmit). + * + * Note: in theory we can just disable the irq on the card _but_ there is + * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs" + * enter lock, take the queued irq. So we waddle instead of flying. + * + * Finally by special arrangement for the purpose of being generally + * annoying the transmit function is called bh atomic. That places + * restrictions on the user context callers as disable_irq won't save + * them. + */ + +/** + * ax_open - Open/initialize the board. + * @dev: network device to initialize + * + * This routine goes all-out, setting everything + * up anew at each open, even though many of these registers should only + * need to be set once at boot. + */ +static int ax_open(struct net_device *dev) +{ + unsigned long flags; + struct ei_device *ei_local = netdev_priv(dev); + + /* + * Grab the page lock so we own the register set, then call + * the init function. + */ + + spin_lock_irqsave(&ei_local->page_lock, flags); + AX88190_init(dev, 1); + /* Set the flag before we drop the lock, That way the IRQ arrives + after its set and we get no silly warnings */ + netif_start_queue(dev); + spin_unlock_irqrestore(&ei_local->page_lock, flags); + ei_local->irqlock = 0; + return 0; +} + +#define dev_lock(dev) (((struct ei_device *)netdev_priv(dev))->page_lock) + +/** + * ax_close - shut down network device + * @dev: network device to close + * + * Opposite of ax_open(). Only used when "ifconfig down" is done. + */ +static int ax_close(struct net_device *dev) +{ + unsigned long flags; + + /* + * Hold the page lock during close + */ + + spin_lock_irqsave(&dev_lock(dev), flags); + AX88190_init(dev, 0); + spin_unlock_irqrestore(&dev_lock(dev), flags); + netif_stop_queue(dev); + return 0; +} + +/** + * axnet_tx_timeout - handle transmit time out condition + * @dev: network device which has apparently fallen asleep + * + * Called by kernel when device never acknowledges a transmit has + * completed (or failed) - i.e. never posted a Tx related interrupt. + */ + +static void axnet_tx_timeout(struct net_device *dev) +{ + long e8390_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + int txsr, isr, tickssofar = jiffies - dev_trans_start(dev); + unsigned long flags; + + dev->stats.tx_errors++; + + spin_lock_irqsave(&ei_local->page_lock, flags); + txsr = inb(e8390_base+EN0_TSR); + isr = inb(e8390_base+EN0_ISR); + spin_unlock_irqrestore(&ei_local->page_lock, flags); + + netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n", + (txsr & ENTSR_ABT) ? "excess collisions." : + (isr) ? "lost interrupt?" : "cable problem?", + txsr, isr, tickssofar); + + if (!isr && !dev->stats.tx_packets) + { + /* The 8390 probably hasn't gotten on the cable yet. */ + ei_local->interface_num ^= 1; /* Try a different xcvr. */ + } + + /* Ugly but a reset can be slow, yet must be protected */ + + spin_lock_irqsave(&ei_local->page_lock, flags); + + /* Try to restart the card. Perhaps the user has fixed something. */ + ei_reset_8390(dev); + AX88190_init(dev, 1); + + spin_unlock_irqrestore(&ei_local->page_lock, flags); + netif_wake_queue(dev); +} + +/** + * axnet_start_xmit - begin packet transmission + * @skb: packet to be sent + * @dev: network device to which packet is sent + * + * Sends a packet to an 8390 network device. + */ + +static netdev_tx_t axnet_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + long e8390_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + int length, send_length, output_page; + unsigned long flags; + u8 packet[ETH_ZLEN]; + + netif_stop_queue(dev); + + length = skb->len; + + /* Mask interrupts from the ethercard. + SMP: We have to grab the lock here otherwise the IRQ handler + on another CPU can flip window and race the IRQ mask set. We end + up trashing the mcast filter not disabling irqs if we don't lock */ + + spin_lock_irqsave(&ei_local->page_lock, flags); + outb_p(0x00, e8390_base + EN0_IMR); + + /* + * Slow phase with lock held. + */ + + ei_local->irqlock = 1; + + send_length = max(length, ETH_ZLEN); + + /* + * We have two Tx slots available for use. Find the first free + * slot, and then perform some sanity checks. With two Tx bufs, + * you get very close to transmitting back-to-back packets. With + * only one Tx buf, the transmitter sits idle while you reload the + * card, leaving a substantial gap between each transmitted packet. + */ + + if (ei_local->tx1 == 0) + { + output_page = ei_local->tx_start_page; + ei_local->tx1 = send_length; + if ((netif_msg_tx_queued(ei_local)) && + ei_local->tx2 > 0) + netdev_dbg(dev, + "idle transmitter tx2=%d, lasttx=%d, txing=%d\n", + ei_local->tx2, ei_local->lasttx, + ei_local->txing); + } + else if (ei_local->tx2 == 0) + { + output_page = ei_local->tx_start_page + TX_PAGES/2; + ei_local->tx2 = send_length; + if ((netif_msg_tx_queued(ei_local)) && + ei_local->tx1 > 0) + netdev_dbg(dev, + "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n", + ei_local->tx1, ei_local->lasttx, + ei_local->txing); + } + else + { /* We should never get here. */ + netif_dbg(ei_local, tx_err, dev, + "No Tx buffers free! tx1=%d tx2=%d last=%d\n", + ei_local->tx1, ei_local->tx2, + ei_local->lasttx); + ei_local->irqlock = 0; + netif_stop_queue(dev); + outb_p(ENISR_ALL, e8390_base + EN0_IMR); + spin_unlock_irqrestore(&ei_local->page_lock, flags); + dev->stats.tx_errors++; + return NETDEV_TX_BUSY; + } + + /* + * Okay, now upload the packet and trigger a send if the transmitter + * isn't already sending. If it is busy, the interrupt handler will + * trigger the send later, upon receiving a Tx done interrupt. + */ + + if (length == skb->len) + ei_block_output(dev, length, skb->data, output_page); + else { + memset(packet, 0, ETH_ZLEN); + skb_copy_from_linear_data(skb, packet, skb->len); + ei_block_output(dev, length, packet, output_page); + } + + if (! ei_local->txing) + { + ei_local->txing = 1; + NS8390_trigger_send(dev, send_length, output_page); + dev->trans_start = jiffies; + if (output_page == ei_local->tx_start_page) + { + ei_local->tx1 = -1; + ei_local->lasttx = -1; + } + else + { + ei_local->tx2 = -1; + ei_local->lasttx = -2; + } + } + else ei_local->txqueue++; + + if (ei_local->tx1 && ei_local->tx2) + netif_stop_queue(dev); + else + netif_start_queue(dev); + + /* Turn 8390 interrupts back on. */ + ei_local->irqlock = 0; + outb_p(ENISR_ALL, e8390_base + EN0_IMR); + + spin_unlock_irqrestore(&ei_local->page_lock, flags); + + dev_kfree_skb (skb); + dev->stats.tx_bytes += send_length; + + return NETDEV_TX_OK; +} + +/** + * ax_interrupt - handle the interrupts from an 8390 + * @irq: interrupt number + * @dev_id: a pointer to the net_device + * + * Handle the ether interface interrupts. We pull packets from + * the 8390 via the card specific functions and fire them at the networking + * stack. We also handle transmit completions and wake the transmit path if + * necessary. We also update the counters and do other housekeeping as + * needed. + */ + +static irqreturn_t ax_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + long e8390_base; + int interrupts, nr_serviced = 0, i; + struct ei_device *ei_local; + int handled = 0; + unsigned long flags; + + e8390_base = dev->base_addr; + ei_local = netdev_priv(dev); + + /* + * Protect the irq test too. + */ + + spin_lock_irqsave(&ei_local->page_lock, flags); + + if (ei_local->irqlock) { +#if 1 /* This might just be an interrupt for a PCI device sharing this line */ + const char *msg; + /* The "irqlock" check is only for testing. */ + if (ei_local->irqlock) + msg = "Interrupted while interrupts are masked!"; + else + msg = "Reentering the interrupt handler!"; + netdev_info(dev, "%s, isr=%#2x imr=%#2x\n", + msg, + inb_p(e8390_base + EN0_ISR), + inb_p(e8390_base + EN0_IMR)); +#endif + spin_unlock_irqrestore(&ei_local->page_lock, flags); + return IRQ_NONE; + } + + netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n", + inb_p(e8390_base + EN0_ISR)); + + outb_p(0x00, e8390_base + EN0_ISR); + ei_local->irqlock = 1; + + /* !!Assumption!! -- we stay in page 0. Don't break this. */ + while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0 && + ++nr_serviced < MAX_SERVICE) + { + if (!netif_running(dev) || (interrupts == 0xff)) { + netif_warn(ei_local, intr, dev, + "interrupt from stopped card\n"); + outb_p(interrupts, e8390_base + EN0_ISR); + interrupts = 0; + break; + } + handled = 1; + + /* AX88190 bug fix. */ + outb_p(interrupts, e8390_base + EN0_ISR); + for (i = 0; i < 10; i++) { + if (!(inb(e8390_base + EN0_ISR) & interrupts)) + break; + outb_p(0, e8390_base + EN0_ISR); + outb_p(interrupts, e8390_base + EN0_ISR); + } + if (interrupts & ENISR_OVER) + ei_rx_overrun(dev); + else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) + { + /* Got a good (?) packet. */ + ei_receive(dev); + } + /* Push the next to-transmit packet through. */ + if (interrupts & ENISR_TX) + ei_tx_intr(dev); + else if (interrupts & ENISR_TX_ERR) + ei_tx_err(dev); + + if (interrupts & ENISR_COUNTERS) + { + dev->stats.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0); + dev->stats.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1); + dev->stats.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2); + } + } + + if (interrupts && (netif_msg_intr(ei_local))) + { + handled = 1; + if (nr_serviced >= MAX_SERVICE) + { + /* 0xFF is valid for a card removal */ + if (interrupts != 0xFF) + netdev_warn(dev, + "Too much work at interrupt, status %#2.2x\n", + interrupts); + outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */ + } else { + netdev_warn(dev, "unknown interrupt %#2x\n", + interrupts); + outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */ + } + } + + /* Turn 8390 interrupts back on. */ + ei_local->irqlock = 0; + outb_p(ENISR_ALL, e8390_base + EN0_IMR); + + spin_unlock_irqrestore(&ei_local->page_lock, flags); + return IRQ_RETVAL(handled); +} + +/** + * ei_tx_err - handle transmitter error + * @dev: network device which threw the exception + * + * A transmitter error has happened. Most likely excess collisions (which + * is a fairly normal condition). If the error is one where the Tx will + * have been aborted, we try and send another one right away, instead of + * letting the failed packet sit and collect dust in the Tx buffer. This + * is a much better solution as it avoids kernel based Tx timeouts, and + * an unnecessary card reset. + * + * Called with lock held. + */ + +static void ei_tx_err(struct net_device *dev) +{ + long e8390_base = dev->base_addr; + unsigned char txsr = inb_p(e8390_base+EN0_TSR); + unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU); + +#ifdef VERBOSE_ERROR_DUMP + netdev_dbg(dev, "transmitter error (%#2x):", txsr); + if (txsr & ENTSR_ABT) + pr_cont(" excess-collisions"); + if (txsr & ENTSR_ND) + pr_cont(" non-deferral"); + if (txsr & ENTSR_CRS) + pr_cont(" lost-carrier"); + if (txsr & ENTSR_FU) + pr_cont(" FIFO-underrun"); + if (txsr & ENTSR_CDH) + pr_cont(" lost-heartbeat"); + pr_cont("\n"); +#endif + + if (tx_was_aborted) + ei_tx_intr(dev); + else + { + dev->stats.tx_errors++; + if (txsr & ENTSR_CRS) dev->stats.tx_carrier_errors++; + if (txsr & ENTSR_CDH) dev->stats.tx_heartbeat_errors++; + if (txsr & ENTSR_OWC) dev->stats.tx_window_errors++; + } +} + +/** + * ei_tx_intr - transmit interrupt handler + * @dev: network device for which tx intr is handled + * + * We have finished a transmit: check for errors and then trigger the next + * packet to be sent. Called with lock held. + */ + +static void ei_tx_intr(struct net_device *dev) +{ + long e8390_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + int status = inb(e8390_base + EN0_TSR); + + /* + * There are two Tx buffers, see which one finished, and trigger + * the send of another one if it exists. + */ + ei_local->txqueue--; + + if (ei_local->tx1 < 0) + { + if (ei_local->lasttx != 1 && ei_local->lasttx != -1) + netdev_err(dev, "%s: bogus last_tx_buffer %d, tx1=%d\n", + ei_local->name, ei_local->lasttx, + ei_local->tx1); + ei_local->tx1 = 0; + if (ei_local->tx2 > 0) + { + ei_local->txing = 1; + NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6); + dev->trans_start = jiffies; + ei_local->tx2 = -1, + ei_local->lasttx = 2; + } + else ei_local->lasttx = 20, ei_local->txing = 0; + } + else if (ei_local->tx2 < 0) + { + if (ei_local->lasttx != 2 && ei_local->lasttx != -2) + netdev_err(dev, "%s: bogus last_tx_buffer %d, tx2=%d\n", + ei_local->name, ei_local->lasttx, + ei_local->tx2); + ei_local->tx2 = 0; + if (ei_local->tx1 > 0) + { + ei_local->txing = 1; + NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page); + dev->trans_start = jiffies; + ei_local->tx1 = -1; + ei_local->lasttx = 1; + } + else + ei_local->lasttx = 10, ei_local->txing = 0; + } +// else +// netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n", +// ei_local->lasttx); + + /* Minimize Tx latency: update the statistics after we restart TXing. */ + if (status & ENTSR_COL) + dev->stats.collisions++; + if (status & ENTSR_PTX) + dev->stats.tx_packets++; + else + { + dev->stats.tx_errors++; + if (status & ENTSR_ABT) + { + dev->stats.tx_aborted_errors++; + dev->stats.collisions += 16; + } + if (status & ENTSR_CRS) + dev->stats.tx_carrier_errors++; + if (status & ENTSR_FU) + dev->stats.tx_fifo_errors++; + if (status & ENTSR_CDH) + dev->stats.tx_heartbeat_errors++; + if (status & ENTSR_OWC) + dev->stats.tx_window_errors++; + } + netif_wake_queue(dev); +} + +/** + * ei_receive - receive some packets + * @dev: network device with which receive will be run + * + * We have a good packet(s), get it/them out of the buffers. + * Called with lock held. + */ + +static void ei_receive(struct net_device *dev) +{ + long e8390_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + unsigned char rxing_page, this_frame, next_frame; + unsigned short current_offset; + int rx_pkt_count = 0; + struct e8390_pkt_hdr rx_frame; + + while (++rx_pkt_count < 10) + { + int pkt_len, pkt_stat; + + /* Get the rx page (incoming packet pointer). */ + rxing_page = inb_p(e8390_base + EN1_CURPAG -1); + + /* Remove one frame from the ring. Boundary is always a page behind. */ + this_frame = inb_p(e8390_base + EN0_BOUNDARY) + 1; + if (this_frame >= ei_local->stop_page) + this_frame = ei_local->rx_start_page; + + /* Someday we'll omit the previous, iff we never get this message. + (There is at least one clone claimed to have a problem.) + + Keep quiet if it looks like a card removal. One problem here + is that some clones crash in roughly the same way. + */ + if ((netif_msg_rx_err(ei_local)) && + this_frame != ei_local->current_page && + (this_frame != 0x0 || rxing_page != 0xFF)) + netdev_err(dev, "mismatched read page pointers %2x vs %2x\n", + this_frame, ei_local->current_page); + + if (this_frame == rxing_page) /* Read all the frames? */ + break; /* Done for now */ + + current_offset = this_frame << 8; + ei_get_8390_hdr(dev, &rx_frame, this_frame); + + pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr); + pkt_stat = rx_frame.status; + + next_frame = this_frame + 1 + ((pkt_len+4)>>8); + + if (pkt_len < 60 || pkt_len > 1518) + { + netif_err(ei_local, rx_err, dev, + "bogus packet size: %d, status=%#2x nxpg=%#2x\n", + rx_frame.count, rx_frame.status, + rx_frame.next); + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + } + else if ((pkt_stat & 0x0F) == ENRSR_RXOK) + { + struct sk_buff *skb; + + skb = netdev_alloc_skb(dev, pkt_len + 2); + if (skb == NULL) + { + netif_err(ei_local, rx_err, dev, + "Couldn't allocate a sk_buff of size %d\n", + pkt_len); + dev->stats.rx_dropped++; + break; + } + else + { + skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ + skb_put(skb, pkt_len); /* Make room */ + ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); + skb->protocol=eth_type_trans(skb,dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + if (pkt_stat & ENRSR_PHY) + dev->stats.multicast++; + } + } + else + { + netif_err(ei_local, rx_err, dev, + "bogus packet: status=%#2x nxpg=%#2x size=%d\n", + rx_frame.status, rx_frame.next, + rx_frame.count); + dev->stats.rx_errors++; + /* NB: The NIC counts CRC, frame and missed errors. */ + if (pkt_stat & ENRSR_FO) + dev->stats.rx_fifo_errors++; + } + next_frame = rx_frame.next; + + /* This _should_ never happen: it's here for avoiding bad clones. */ + if (next_frame >= ei_local->stop_page) { + netdev_info(dev, "next frame inconsistency, %#2x\n", + next_frame); + next_frame = ei_local->rx_start_page; + } + ei_local->current_page = next_frame; + outb_p(next_frame-1, e8390_base+EN0_BOUNDARY); + } +} + +/** + * ei_rx_overrun - handle receiver overrun + * @dev: network device which threw exception + * + * We have a receiver overrun: we have to kick the 8390 to get it started + * again. Problem is that you have to kick it exactly as NS prescribes in + * the updated datasheets, or "the NIC may act in an unpredictable manner." + * This includes causing "the NIC to defer indefinitely when it is stopped + * on a busy network." Ugh. + * Called with lock held. Don't call this with the interrupts off or your + * computer will hate you - it takes 10ms or so. + */ + +static void ei_rx_overrun(struct net_device *dev) +{ + struct axnet_dev *info = PRIV(dev); + long e8390_base = dev->base_addr; + unsigned char was_txing, must_resend = 0; + struct ei_device *ei_local = netdev_priv(dev); + + /* + * Record whether a Tx was in progress and then issue the + * stop command. + */ + was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS; + outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); + + netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n"); + dev->stats.rx_over_errors++; + + /* + * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. + * We wait at least 2ms. + */ + + mdelay(2); + + /* + * Reset RBCR[01] back to zero as per magic incantation. + */ + outb_p(0x00, e8390_base+EN0_RCNTLO); + outb_p(0x00, e8390_base+EN0_RCNTHI); + + /* + * See if any Tx was interrupted or not. According to NS, this + * step is vital, and skipping it will cause no end of havoc. + */ + + if (was_txing) + { + unsigned char tx_completed = inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR); + if (!tx_completed) + must_resend = 1; + } + + /* + * Have to enter loopback mode and then restart the NIC before + * you are allowed to slurp packets up off the ring. + */ + outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); + outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD); + + /* + * Clear the Rx ring of all the debris, and ack the interrupt. + */ + ei_receive(dev); + + /* + * Leave loopback mode, and resend any packet that got stopped. + */ + outb_p(E8390_TXCONFIG | info->duplex_flag, e8390_base + EN0_TXCR); + if (must_resend) + outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD); +} + +/* + * Collect the stats. This is called unlocked and from several contexts. + */ + +static struct net_device_stats *get_stats(struct net_device *dev) +{ + long ioaddr = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + unsigned long flags; + + /* If the card is stopped, just return the present stats. */ + if (!netif_running(dev)) + return &dev->stats; + + spin_lock_irqsave(&ei_local->page_lock,flags); + /* Read the counter registers, assuming we are in page 0. */ + dev->stats.rx_frame_errors += inb_p(ioaddr + EN0_COUNTER0); + dev->stats.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1); + dev->stats.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2); + spin_unlock_irqrestore(&ei_local->page_lock, flags); + + return &dev->stats; +} + +/* + * Form the 64 bit 8390 multicast table from the linked list of addresses + * associated with this dev structure. + */ + +static inline void make_mc_bits(u8 *bits, struct net_device *dev) +{ + struct netdev_hw_addr *ha; + u32 crc; + + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc(ETH_ALEN, ha->addr); + /* + * The 8390 uses the 6 most significant bits of the + * CRC to index the multicast table. + */ + bits[crc>>29] |= (1<<((crc>>26)&7)); + } +} + +/** + * do_set_multicast_list - set/clear multicast filter + * @dev: net device for which multicast filter is adjusted + * + * Set or clear the multicast filter for this adaptor. + * Must be called with lock held. + */ + +static void do_set_multicast_list(struct net_device *dev) +{ + long e8390_base = dev->base_addr; + int i; + struct ei_device *ei_local = netdev_priv(dev); + + if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) { + memset(ei_local->mcfilter, 0, 8); + if (!netdev_mc_empty(dev)) + make_mc_bits(ei_local->mcfilter, dev); + } else { + /* set to accept-all */ + memset(ei_local->mcfilter, 0xFF, 8); + } + + outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); + for(i = 0; i < 8; i++) + { + outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i)); + } + outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD); + + if(dev->flags&IFF_PROMISC) + outb_p(E8390_RXCONFIG | 0x58, e8390_base + EN0_RXCR); + else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) + outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR); + else + outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); + + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD); +} + +/* + * Called without lock held. This is invoked from user context and may + * be parallel to just about everything else. Its also fairly quick and + * not called too often. Must protect against both bh and irq users + */ + +static void set_multicast_list(struct net_device *dev) +{ + unsigned long flags; + + spin_lock_irqsave(&dev_lock(dev), flags); + do_set_multicast_list(dev); + spin_unlock_irqrestore(&dev_lock(dev), flags); +} + +/* This page of functions should be 8390 generic */ +/* Follow National Semi's recommendations for initializing the "NIC". */ + +/** + * AX88190_init - initialize 8390 hardware + * @dev: network device to initialize + * @startp: boolean. non-zero value to initiate chip processing + * + * Must be called with lock held. + */ + +static void AX88190_init(struct net_device *dev, int startp) +{ + struct axnet_dev *info = PRIV(dev); + long e8390_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + int i; + int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48; + + if(sizeof(struct e8390_pkt_hdr)!=4) + panic("8390.c: header struct mispacked\n"); + /* Follow National Semi's recommendations for initing the DP83902. */ + outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */ + outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */ + /* Clear the remote byte count registers. */ + outb_p(0x00, e8390_base + EN0_RCNTLO); + outb_p(0x00, e8390_base + EN0_RCNTHI); + /* Set to monitor and loopback mode -- this is vital!. */ + outb_p(E8390_RXOFF|0x40, e8390_base + EN0_RXCR); /* 0x60 */ + outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */ + /* Set the transmit page and receive ring. */ + outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR); + ei_local->tx1 = ei_local->tx2 = 0; + outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG); + outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/ + ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */ + outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG); + /* Clear the pending interrupts and mask. */ + outb_p(0xFF, e8390_base + EN0_ISR); + outb_p(0x00, e8390_base + EN0_IMR); + + /* Copy the station address into the DS8390 registers. */ + + outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */ + for(i = 0; i < 6; i++) + { + outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i)); + if(inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i]) + netdev_err(dev, "Hw. address read/write mismap %d\n", i); + } + + outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG); + outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); + + netif_start_queue(dev); + ei_local->tx1 = ei_local->tx2 = 0; + ei_local->txing = 0; + + if (info->flags & IS_AX88790) /* select Internal PHY */ + outb(0x10, e8390_base + AXNET_GPIO); + + if (startp) + { + outb_p(0xff, e8390_base + EN0_ISR); + outb_p(ENISR_ALL, e8390_base + EN0_IMR); + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD); + outb_p(E8390_TXCONFIG | info->duplex_flag, + e8390_base + EN0_TXCR); /* xmit on. */ + /* 3c503 TechMan says rxconfig only after the NIC is started. */ + outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); /* rx on, */ + do_set_multicast_list(dev); /* (re)load the mcast table */ + } +} + +/* Trigger a transmit start, assuming the length is valid. + Always called with the page lock held */ + +static void NS8390_trigger_send(struct net_device *dev, unsigned int length, + int start_page) +{ + long e8390_base = dev->base_addr; + struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev); + + if (inb_p(e8390_base) & E8390_TRANS) + { + netdev_warn(dev, "trigger_send() called with the transmitter busy\n"); + return; + } + outb_p(length & 0xff, e8390_base + EN0_TCNTLO); + outb_p(length >> 8, e8390_base + EN0_TCNTHI); + outb_p(start_page, e8390_base + EN0_TPSR); + outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD); +} diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c new file mode 100644 index 000000000..d686b9cac --- /dev/null +++ b/drivers/net/ethernet/8390/etherh.c @@ -0,0 +1,874 @@ +/* + * linux/drivers/acorn/net/etherh.c + * + * Copyright (C) 2000-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * NS8390 I-cubed EtherH and ANT EtherM specific driver + * Thanks to I-Cubed for information on their cards. + * EtherM conversion (C) 1999 Chris Kemp and Tim Watterton + * EtherM integration (C) 2000 Aleph One Ltd (Tak-Shing Chan) + * EtherM integration re-engineered by Russell King. + * + * Changelog: + * 08-12-1996 RMK 1.00 Created + * RMK 1.03 Added support for EtherLan500 cards + * 23-11-1997 RMK 1.04 Added media autodetection + * 16-04-1998 RMK 1.05 Improved media autodetection + * 10-02-2000 RMK 1.06 Updated for 2.3.43 + * 13-05-2000 RMK 1.07 Updated for 2.3.99-pre8 + * 12-10-1999 CK/TEW EtherM driver first release + * 21-12-2000 TTC EtherH/EtherM integration + * 25-12-2000 RMK 1.08 Clean integration of EtherM into this driver. + * 03-01-2002 RMK 1.09 Always enable IRQs if we're in the nic slot. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define EI_SHIFT(x) (ei_local->reg_offset[x]) + +#define ei_inb(_p) readb((void __iomem *)_p) +#define ei_outb(_v,_p) writeb(_v,(void __iomem *)_p) +#define ei_inb_p(_p) readb((void __iomem *)_p) +#define ei_outb_p(_v,_p) writeb(_v,(void __iomem *)_p) + +#define DRV_NAME "etherh" +#define DRV_VERSION "1.11" + +static char version[] = + "EtherH/EtherM Driver (c) 2002-2004 Russell King " DRV_VERSION "\n"; + +#include "lib8390.c" + +static u32 etherh_msg_enable; + +struct etherh_priv { + void __iomem *ioc_fast; + void __iomem *memc; + void __iomem *dma_base; + unsigned int id; + void __iomem *ctrl_port; + unsigned char ctrl; + u32 supported; +}; + +struct etherh_data { + unsigned long ns8390_offset; + unsigned long dataport_offset; + unsigned long ctrlport_offset; + int ctrl_ioc; + const char name[16]; + u32 supported; + unsigned char tx_start_page; + unsigned char stop_page; +}; + +MODULE_AUTHOR("Russell King"); +MODULE_DESCRIPTION("EtherH/EtherM driver"); +MODULE_LICENSE("GPL"); + +#define ETHERH500_DATAPORT 0x800 /* MEMC */ +#define ETHERH500_NS8390 0x000 /* MEMC */ +#define ETHERH500_CTRLPORT 0x800 /* IOC */ + +#define ETHERH600_DATAPORT 0x040 /* MEMC */ +#define ETHERH600_NS8390 0x800 /* MEMC */ +#define ETHERH600_CTRLPORT 0x200 /* MEMC */ + +#define ETHERH_CP_IE 1 +#define ETHERH_CP_IF 2 +#define ETHERH_CP_HEARTBEAT 2 + +#define ETHERH_TX_START_PAGE 1 +#define ETHERH_STOP_PAGE 127 + +/* + * These came from CK/TEW + */ +#define ETHERM_DATAPORT 0x200 /* MEMC */ +#define ETHERM_NS8390 0x800 /* MEMC */ +#define ETHERM_CTRLPORT 0x23c /* MEMC */ + +#define ETHERM_TX_START_PAGE 64 +#define ETHERM_STOP_PAGE 127 + +/* ------------------------------------------------------------------------ */ + +#define etherh_priv(dev) \ + ((struct etherh_priv *)(((char *)netdev_priv(dev)) + sizeof(struct ei_device))) + +static inline void etherh_set_ctrl(struct etherh_priv *eh, unsigned char mask) +{ + unsigned char ctrl = eh->ctrl | mask; + eh->ctrl = ctrl; + writeb(ctrl, eh->ctrl_port); +} + +static inline void etherh_clr_ctrl(struct etherh_priv *eh, unsigned char mask) +{ + unsigned char ctrl = eh->ctrl & ~mask; + eh->ctrl = ctrl; + writeb(ctrl, eh->ctrl_port); +} + +static inline unsigned int etherh_get_stat(struct etherh_priv *eh) +{ + return readb(eh->ctrl_port); +} + + + + +static void etherh_irq_enable(ecard_t *ec, int irqnr) +{ + struct etherh_priv *eh = ec->irq_data; + + etherh_set_ctrl(eh, ETHERH_CP_IE); +} + +static void etherh_irq_disable(ecard_t *ec, int irqnr) +{ + struct etherh_priv *eh = ec->irq_data; + + etherh_clr_ctrl(eh, ETHERH_CP_IE); +} + +static expansioncard_ops_t etherh_ops = { + .irqenable = etherh_irq_enable, + .irqdisable = etherh_irq_disable, +}; + + + + +static void +etherh_setif(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + unsigned long flags; + void __iomem *addr; + + local_irq_save(flags); + + /* set the interface type */ + switch (etherh_priv(dev)->id) { + case PROD_I3_ETHERLAN600: + case PROD_I3_ETHERLAN600A: + addr = (void __iomem *)dev->base_addr + EN0_RCNTHI; + + switch (dev->if_port) { + case IF_PORT_10BASE2: + writeb((readb(addr) & 0xf8) | 1, addr); + break; + case IF_PORT_10BASET: + writeb((readb(addr) & 0xf8), addr); + break; + } + break; + + case PROD_I3_ETHERLAN500: + switch (dev->if_port) { + case IF_PORT_10BASE2: + etherh_clr_ctrl(etherh_priv(dev), ETHERH_CP_IF); + break; + + case IF_PORT_10BASET: + etherh_set_ctrl(etherh_priv(dev), ETHERH_CP_IF); + break; + } + break; + + default: + break; + } + + local_irq_restore(flags); +} + +static int +etherh_getifstat(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + void __iomem *addr; + int stat = 0; + + switch (etherh_priv(dev)->id) { + case PROD_I3_ETHERLAN600: + case PROD_I3_ETHERLAN600A: + addr = (void __iomem *)dev->base_addr + EN0_RCNTHI; + switch (dev->if_port) { + case IF_PORT_10BASE2: + stat = 1; + break; + case IF_PORT_10BASET: + stat = readb(addr) & 4; + break; + } + break; + + case PROD_I3_ETHERLAN500: + switch (dev->if_port) { + case IF_PORT_10BASE2: + stat = 1; + break; + case IF_PORT_10BASET: + stat = etherh_get_stat(etherh_priv(dev)) & ETHERH_CP_HEARTBEAT; + break; + } + break; + + default: + stat = 0; + break; + } + + return stat != 0; +} + +/* + * Configure the interface. Note that we ignore the other + * parts of ifmap, since its mostly meaningless for this driver. + */ +static int etherh_set_config(struct net_device *dev, struct ifmap *map) +{ + switch (map->port) { + case IF_PORT_10BASE2: + case IF_PORT_10BASET: + /* + * If the user explicitly sets the interface + * media type, turn off automedia detection. + */ + dev->flags &= ~IFF_AUTOMEDIA; + dev->if_port = map->port; + break; + + default: + return -EINVAL; + } + + etherh_setif(dev); + + return 0; +} + +/* + * Reset the 8390 (hard reset). Note that we can't actually do this. + */ +static void +etherh_reset(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + void __iomem *addr = (void __iomem *)dev->base_addr; + + writeb(E8390_NODMA+E8390_PAGE0+E8390_STOP, addr); + + /* + * See if we need to change the interface type. + * Note that we use 'interface_num' as a flag + * to indicate that we need to change the media. + */ + if (dev->flags & IFF_AUTOMEDIA && ei_local->interface_num) { + ei_local->interface_num = 0; + + if (dev->if_port == IF_PORT_10BASET) + dev->if_port = IF_PORT_10BASE2; + else + dev->if_port = IF_PORT_10BASET; + + etherh_setif(dev); + } +} + +/* + * Write a block of data out to the 8390 + */ +static void +etherh_block_output (struct net_device *dev, int count, const unsigned char *buf, int start_page) +{ + struct ei_device *ei_local = netdev_priv(dev); + unsigned long dma_start; + void __iomem *dma_base, *addr; + + if (ei_local->dmaing) { + netdev_err(dev, "DMAing conflict in etherh_block_input: " + " DMAstat %d irqlock %d\n", + ei_local->dmaing, ei_local->irqlock); + return; + } + + /* + * Make sure we have a round number of bytes if we're in word mode. + */ + if (count & 1 && ei_local->word16) + count++; + + ei_local->dmaing = 1; + + addr = (void __iomem *)dev->base_addr; + dma_base = etherh_priv(dev)->dma_base; + + count = (count + 1) & ~1; + writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD); + + writeb (0x42, addr + EN0_RCNTLO); + writeb (0x00, addr + EN0_RCNTHI); + writeb (0x42, addr + EN0_RSARLO); + writeb (0x00, addr + EN0_RSARHI); + writeb (E8390_RREAD | E8390_START, addr + E8390_CMD); + + udelay (1); + + writeb (ENISR_RDC, addr + EN0_ISR); + writeb (count, addr + EN0_RCNTLO); + writeb (count >> 8, addr + EN0_RCNTHI); + writeb (0, addr + EN0_RSARLO); + writeb (start_page, addr + EN0_RSARHI); + writeb (E8390_RWRITE | E8390_START, addr + E8390_CMD); + + if (ei_local->word16) + writesw (dma_base, buf, count >> 1); + else + writesb (dma_base, buf, count); + + dma_start = jiffies; + + while ((readb (addr + EN0_ISR) & ENISR_RDC) == 0) + if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ + netdev_warn(dev, "timeout waiting for TX RDC\n"); + etherh_reset (dev); + __NS8390_init (dev, 1); + break; + } + + writeb (ENISR_RDC, addr + EN0_ISR); + ei_local->dmaing = 0; +} + +/* + * Read a block of data from the 8390 + */ +static void +etherh_block_input (struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) +{ + struct ei_device *ei_local = netdev_priv(dev); + unsigned char *buf; + void __iomem *dma_base, *addr; + + if (ei_local->dmaing) { + netdev_err(dev, "DMAing conflict in etherh_block_input: " + " DMAstat %d irqlock %d\n", + ei_local->dmaing, ei_local->irqlock); + return; + } + + ei_local->dmaing = 1; + + addr = (void __iomem *)dev->base_addr; + dma_base = etherh_priv(dev)->dma_base; + + buf = skb->data; + writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD); + writeb (count, addr + EN0_RCNTLO); + writeb (count >> 8, addr + EN0_RCNTHI); + writeb (ring_offset, addr + EN0_RSARLO); + writeb (ring_offset >> 8, addr + EN0_RSARHI); + writeb (E8390_RREAD | E8390_START, addr + E8390_CMD); + + if (ei_local->word16) { + readsw (dma_base, buf, count >> 1); + if (count & 1) + buf[count - 1] = readb (dma_base); + } else + readsb (dma_base, buf, count); + + writeb (ENISR_RDC, addr + EN0_ISR); + ei_local->dmaing = 0; +} + +/* + * Read a header from the 8390 + */ +static void +etherh_get_header (struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + struct ei_device *ei_local = netdev_priv(dev); + void __iomem *dma_base, *addr; + + if (ei_local->dmaing) { + netdev_err(dev, "DMAing conflict in etherh_get_header: " + " DMAstat %d irqlock %d\n", + ei_local->dmaing, ei_local->irqlock); + return; + } + + ei_local->dmaing = 1; + + addr = (void __iomem *)dev->base_addr; + dma_base = etherh_priv(dev)->dma_base; + + writeb (E8390_NODMA | E8390_PAGE0 | E8390_START, addr + E8390_CMD); + writeb (sizeof (*hdr), addr + EN0_RCNTLO); + writeb (0, addr + EN0_RCNTHI); + writeb (0, addr + EN0_RSARLO); + writeb (ring_page, addr + EN0_RSARHI); + writeb (E8390_RREAD | E8390_START, addr + E8390_CMD); + + if (ei_local->word16) + readsw (dma_base, hdr, sizeof (*hdr) >> 1); + else + readsb (dma_base, hdr, sizeof (*hdr)); + + writeb (ENISR_RDC, addr + EN0_ISR); + ei_local->dmaing = 0; +} + +/* + * Open/initialize the board. This is called (in the current kernel) + * sometime after booting when the 'ifconfig' program is run. + * + * This routine should set everything up anew at each open, even + * registers that "should" only need to be set once at boot, so that + * there is non-reboot way to recover if something goes wrong. + */ +static int +etherh_open(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + + if (request_irq(dev->irq, __ei_interrupt, 0, dev->name, dev)) + return -EAGAIN; + + /* + * Make sure that we aren't going to change the + * media type on the next reset - we are about to + * do automedia manually now. + */ + ei_local->interface_num = 0; + + /* + * If we are doing automedia detection, do it now. + * This is more reliable than the 8390's detection. + */ + if (dev->flags & IFF_AUTOMEDIA) { + dev->if_port = IF_PORT_10BASET; + etherh_setif(dev); + mdelay(1); + if (!etherh_getifstat(dev)) { + dev->if_port = IF_PORT_10BASE2; + etherh_setif(dev); + } + } else + etherh_setif(dev); + + etherh_reset(dev); + __ei_open(dev); + + return 0; +} + +/* + * The inverse routine to etherh_open(). + */ +static int +etherh_close(struct net_device *dev) +{ + __ei_close (dev); + free_irq (dev->irq, dev); + return 0; +} + +/* + * Initialisation + */ + +static void __init etherh_banner(void) +{ + static int version_printed; + + if ((etherh_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0)) + pr_info("%s", version); +} + +/* + * Read the ethernet address string from the on board rom. + * This is an ascii string... + */ +static int etherh_addr(char *addr, struct expansion_card *ec) +{ + struct in_chunk_dir cd; + char *s; + + if (!ecard_readchunk(&cd, ec, 0xf5, 0)) { + printk(KERN_ERR "%s: unable to read module description string\n", + dev_name(&ec->dev)); + goto no_addr; + } + + s = strchr(cd.d.string, '('); + if (s) { + int i; + + for (i = 0; i < 6; i++) { + addr[i] = simple_strtoul(s + 1, &s, 0x10); + if (*s != (i == 5? ')' : ':')) + break; + } + + if (i == 6) + return 0; + } + + printk(KERN_ERR "%s: unable to parse MAC address: %s\n", + dev_name(&ec->dev), cd.d.string); + + no_addr: + return -ENODEV; +} + +/* + * Create an ethernet address from the system serial number. + */ +static int __init etherm_addr(char *addr) +{ + unsigned int serial; + + if (system_serial_low == 0 && system_serial_high == 0) + return -ENODEV; + + serial = system_serial_low | system_serial_high; + + addr[0] = 0; + addr[1] = 0; + addr[2] = 0xa4; + addr[3] = 0x10 + (serial >> 24); + addr[4] = serial >> 16; + addr[5] = serial >> 8; + return 0; +} + +static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(dev->dev.parent), + sizeof(info->bus_info)); +} + +static int etherh_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + cmd->supported = etherh_priv(dev)->supported; + ethtool_cmd_speed_set(cmd, SPEED_10); + cmd->duplex = DUPLEX_HALF; + cmd->port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC; + cmd->autoneg = (dev->flags & IFF_AUTOMEDIA ? + AUTONEG_ENABLE : AUTONEG_DISABLE); + return 0; +} + +static int etherh_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + switch (cmd->autoneg) { + case AUTONEG_ENABLE: + dev->flags |= IFF_AUTOMEDIA; + break; + + case AUTONEG_DISABLE: + switch (cmd->port) { + case PORT_TP: + dev->if_port = IF_PORT_10BASET; + break; + + case PORT_BNC: + dev->if_port = IF_PORT_10BASE2; + break; + + default: + return -EINVAL; + } + dev->flags &= ~IFF_AUTOMEDIA; + break; + + default: + return -EINVAL; + } + + etherh_setif(dev); + + return 0; +} + +static u32 etherh_get_msglevel(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + + return ei_local->msg_enable; +} + +static void etherh_set_msglevel(struct net_device *dev, u32 v) +{ + struct ei_device *ei_local = netdev_priv(dev); + + ei_local->msg_enable = v; +} + +static const struct ethtool_ops etherh_ethtool_ops = { + .get_settings = etherh_get_settings, + .set_settings = etherh_set_settings, + .get_drvinfo = etherh_get_drvinfo, + .get_ts_info = ethtool_op_get_ts_info, + .get_msglevel = etherh_get_msglevel, + .set_msglevel = etherh_set_msglevel, +}; + +static const struct net_device_ops etherh_netdev_ops = { + .ndo_open = etherh_open, + .ndo_stop = etherh_close, + .ndo_set_config = etherh_set_config, + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_rx_mode = __ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = __ei_poll, +#endif +}; + +static u32 etherh_regoffsets[16]; +static u32 etherm_regoffsets[16]; + +static int +etherh_probe(struct expansion_card *ec, const struct ecard_id *id) +{ + const struct etherh_data *data = id->data; + struct ei_device *ei_local; + struct net_device *dev; + struct etherh_priv *eh; + int ret; + + etherh_banner(); + + ret = ecard_request_resources(ec); + if (ret) + goto out; + + dev = ____alloc_ei_netdev(sizeof(struct etherh_priv)); + if (!dev) { + ret = -ENOMEM; + goto release; + } + + SET_NETDEV_DEV(dev, &ec->dev); + + dev->netdev_ops = ðerh_netdev_ops; + dev->irq = ec->irq; + dev->ethtool_ops = ðerh_ethtool_ops; + + if (data->supported & SUPPORTED_Autoneg) + dev->flags |= IFF_AUTOMEDIA; + if (data->supported & SUPPORTED_TP) { + dev->flags |= IFF_PORTSEL; + dev->if_port = IF_PORT_10BASET; + } else if (data->supported & SUPPORTED_BNC) { + dev->flags |= IFF_PORTSEL; + dev->if_port = IF_PORT_10BASE2; + } else + dev->if_port = IF_PORT_UNKNOWN; + + eh = etherh_priv(dev); + eh->supported = data->supported; + eh->ctrl = 0; + eh->id = ec->cid.product; + eh->memc = ecardm_iomap(ec, ECARD_RES_MEMC, 0, PAGE_SIZE); + if (!eh->memc) { + ret = -ENOMEM; + goto free; + } + + eh->ctrl_port = eh->memc; + if (data->ctrl_ioc) { + eh->ioc_fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, PAGE_SIZE); + if (!eh->ioc_fast) { + ret = -ENOMEM; + goto free; + } + eh->ctrl_port = eh->ioc_fast; + } + + dev->base_addr = (unsigned long)eh->memc + data->ns8390_offset; + eh->dma_base = eh->memc + data->dataport_offset; + eh->ctrl_port += data->ctrlport_offset; + + /* + * IRQ and control port handling - only for non-NIC slot cards. + */ + if (ec->slot_no != 8) { + ecard_setirq(ec, ðerh_ops, eh); + } else { + /* + * If we're in the NIC slot, make sure the IRQ is enabled + */ + etherh_set_ctrl(eh, ETHERH_CP_IE); + } + + ei_local = netdev_priv(dev); + spin_lock_init(&ei_local->page_lock); + + if (ec->cid.product == PROD_ANT_ETHERM) { + etherm_addr(dev->dev_addr); + ei_local->reg_offset = etherm_regoffsets; + } else { + etherh_addr(dev->dev_addr, ec); + ei_local->reg_offset = etherh_regoffsets; + } + + ei_local->name = dev->name; + ei_local->word16 = 1; + ei_local->tx_start_page = data->tx_start_page; + ei_local->rx_start_page = ei_local->tx_start_page + TX_PAGES; + ei_local->stop_page = data->stop_page; + ei_local->reset_8390 = etherh_reset; + ei_local->block_input = etherh_block_input; + ei_local->block_output = etherh_block_output; + ei_local->get_8390_hdr = etherh_get_header; + ei_local->interface_num = 0; + ei_local->msg_enable = etherh_msg_enable; + + etherh_reset(dev); + __NS8390_init(dev, 0); + + ret = register_netdev(dev); + if (ret) + goto free; + + netdev_info(dev, "%s in slot %d, %pM\n", + data->name, ec->slot_no, dev->dev_addr); + + ecard_set_drvdata(ec, dev); + + return 0; + + free: + free_netdev(dev); + release: + ecard_release_resources(ec); + out: + return ret; +} + +static void etherh_remove(struct expansion_card *ec) +{ + struct net_device *dev = ecard_get_drvdata(ec); + + ecard_set_drvdata(ec, NULL); + + unregister_netdev(dev); + + free_netdev(dev); + + ecard_release_resources(ec); +} + +static struct etherh_data etherm_data = { + .ns8390_offset = ETHERM_NS8390, + .dataport_offset = ETHERM_NS8390 + ETHERM_DATAPORT, + .ctrlport_offset = ETHERM_NS8390 + ETHERM_CTRLPORT, + .name = "ANT EtherM", + .supported = SUPPORTED_10baseT_Half, + .tx_start_page = ETHERM_TX_START_PAGE, + .stop_page = ETHERM_STOP_PAGE, +}; + +static struct etherh_data etherlan500_data = { + .ns8390_offset = ETHERH500_NS8390, + .dataport_offset = ETHERH500_NS8390 + ETHERH500_DATAPORT, + .ctrlport_offset = ETHERH500_CTRLPORT, + .ctrl_ioc = 1, + .name = "i3 EtherH 500", + .supported = SUPPORTED_10baseT_Half, + .tx_start_page = ETHERH_TX_START_PAGE, + .stop_page = ETHERH_STOP_PAGE, +}; + +static struct etherh_data etherlan600_data = { + .ns8390_offset = ETHERH600_NS8390, + .dataport_offset = ETHERH600_NS8390 + ETHERH600_DATAPORT, + .ctrlport_offset = ETHERH600_NS8390 + ETHERH600_CTRLPORT, + .name = "i3 EtherH 600", + .supported = SUPPORTED_10baseT_Half | SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_Autoneg, + .tx_start_page = ETHERH_TX_START_PAGE, + .stop_page = ETHERH_STOP_PAGE, +}; + +static struct etherh_data etherlan600a_data = { + .ns8390_offset = ETHERH600_NS8390, + .dataport_offset = ETHERH600_NS8390 + ETHERH600_DATAPORT, + .ctrlport_offset = ETHERH600_NS8390 + ETHERH600_CTRLPORT, + .name = "i3 EtherH 600A", + .supported = SUPPORTED_10baseT_Half | SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_Autoneg, + .tx_start_page = ETHERH_TX_START_PAGE, + .stop_page = ETHERH_STOP_PAGE, +}; + +static const struct ecard_id etherh_ids[] = { + { MANU_ANT, PROD_ANT_ETHERM, ðerm_data }, + { MANU_I3, PROD_I3_ETHERLAN500, ðerlan500_data }, + { MANU_I3, PROD_I3_ETHERLAN600, ðerlan600_data }, + { MANU_I3, PROD_I3_ETHERLAN600A, ðerlan600a_data }, + { 0xffff, 0xffff } +}; + +static struct ecard_driver etherh_driver = { + .probe = etherh_probe, + .remove = etherh_remove, + .id_table = etherh_ids, + .drv = { + .name = DRV_NAME, + }, +}; + +static int __init etherh_init(void) +{ + int i; + + for (i = 0; i < 16; i++) { + etherh_regoffsets[i] = i << 2; + etherm_regoffsets[i] = i << 5; + } + + return ecard_register_driver(ðerh_driver); +} + +static void __exit etherh_exit(void) +{ + ecard_remove_driver(ðerh_driver); +} + +module_init(etherh_init); +module_exit(etherh_exit); diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c new file mode 100644 index 000000000..0fe19d609 --- /dev/null +++ b/drivers/net/ethernet/8390/hydra.c @@ -0,0 +1,278 @@ +/* New Hydra driver using generic 8390 core */ +/* Based on old hydra driver by Topi Kanerva (topi@susanna.oulu.fi) */ + +/* This file is subject to the terms and conditions of the GNU General */ +/* Public License. See the file COPYING in the main directory of the */ +/* Linux distribution for more details. */ + +/* Peter De Schrijver (p2@mind.be) */ +/* Oldenburg 2000 */ + +/* The Amiganet is a Zorro-II board made by Hydra Systems. It contains a */ +/* NS8390 NIC (network interface controller) clone, 16 or 64K on-board RAM */ +/* and 10BASE-2 (thin coax) and AUI connectors. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define EI_SHIFT(x) (ei_local->reg_offset[x]) +#define ei_inb(port) in_8(port) +#define ei_outb(val,port) out_8(port,val) +#define ei_inb_p(port) in_8(port) +#define ei_outb_p(val,port) out_8(port,val) + +static const char version[] = + "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; + +#include "lib8390.c" + +#define NE_EN0_DCFG (0x0e*2) + +#define NESM_START_PG 0x0 /* First page of TX buffer */ +#define NESM_STOP_PG 0x40 /* Last page +1 of RX ring */ + +#define HYDRA_NIC_BASE 0xffe1 +#define HYDRA_ADDRPROM 0xffc0 +#define HYDRA_VERSION "v3.0alpha" + +#define WORDSWAP(a) ((((a)>>8)&0xff) | ((a)<<8)) + + +static int hydra_init_one(struct zorro_dev *z, + const struct zorro_device_id *ent); +static int hydra_init(struct zorro_dev *z); +static int hydra_open(struct net_device *dev); +static int hydra_close(struct net_device *dev); +static void hydra_reset_8390(struct net_device *dev); +static void hydra_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page); +static void hydra_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void hydra_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page); +static void hydra_remove_one(struct zorro_dev *z); +static u32 hydra_msg_enable; + +static struct zorro_device_id hydra_zorro_tbl[] = { + { ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET }, + { 0 } +}; +MODULE_DEVICE_TABLE(zorro, hydra_zorro_tbl); + +static struct zorro_driver hydra_driver = { + .name = "hydra", + .id_table = hydra_zorro_tbl, + .probe = hydra_init_one, + .remove = hydra_remove_one, +}; + +static int hydra_init_one(struct zorro_dev *z, + const struct zorro_device_id *ent) +{ + int err; + + if (!request_mem_region(z->resource.start, 0x10000, "Hydra")) + return -EBUSY; + if ((err = hydra_init(z))) { + release_mem_region(z->resource.start, 0x10000); + return -EBUSY; + } + return 0; +} + +static const struct net_device_ops hydra_netdev_ops = { + .ndo_open = hydra_open, + .ndo_stop = hydra_close, + + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_rx_mode = __ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = __ei_poll, +#endif +}; + +static int hydra_init(struct zorro_dev *z) +{ + struct net_device *dev; + unsigned long board = (unsigned long)ZTWO_VADDR(z->resource.start); + unsigned long ioaddr = board+HYDRA_NIC_BASE; + const char name[] = "NE2000"; + int start_page, stop_page; + int j; + int err; + struct ei_device *ei_local; + + static u32 hydra_offsets[16] = { + 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, + 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, + }; + + dev = ____alloc_ei_netdev(0); + if (!dev) + return -ENOMEM; + + for (j = 0; j < ETH_ALEN; j++) + dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j)); + + /* We must set the 8390 for word mode. */ + z_writeb(0x4b, ioaddr + NE_EN0_DCFG); + start_page = NESM_START_PG; + stop_page = NESM_STOP_PG; + + ei_local = netdev_priv(dev); + ei_local->msg_enable = hydra_msg_enable; + dev->base_addr = ioaddr; + dev->irq = IRQ_AMIGA_PORTS; + + /* Install the Interrupt handler */ + if (request_irq(IRQ_AMIGA_PORTS, __ei_interrupt, IRQF_SHARED, "Hydra Ethernet", + dev)) { + free_netdev(dev); + return -EAGAIN; + } + + ei_status.name = name; + ei_status.tx_start_page = start_page; + ei_status.stop_page = stop_page; + ei_status.word16 = 1; + ei_status.bigendian = 1; + + ei_status.rx_start_page = start_page + TX_PAGES; + + ei_status.reset_8390 = hydra_reset_8390; + ei_status.block_input = hydra_block_input; + ei_status.block_output = hydra_block_output; + ei_status.get_8390_hdr = hydra_get_8390_hdr; + ei_status.reg_offset = hydra_offsets; + + dev->netdev_ops = &hydra_netdev_ops; + __NS8390_init(dev, 0); + + err = register_netdev(dev); + if (err) { + free_irq(IRQ_AMIGA_PORTS, dev); + free_netdev(dev); + return err; + } + + zorro_set_drvdata(z, dev); + + pr_info("%s: Hydra at %pR, address %pM (hydra.c " HYDRA_VERSION ")\n", + dev->name, &z->resource, dev->dev_addr); + + return 0; +} + +static int hydra_open(struct net_device *dev) +{ + __ei_open(dev); + return 0; +} + +static int hydra_close(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + + netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard.\n"); + __ei_close(dev); + return 0; +} + +static void hydra_reset_8390(struct net_device *dev) +{ + netdev_info(dev, "Hydra hw reset not there\n"); +} + +static void hydra_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page) +{ + int nic_base = dev->base_addr; + short *ptrs; + unsigned long hdr_start= (nic_base-HYDRA_NIC_BASE) + + ((ring_page - NESM_START_PG)<<8); + ptrs = (short *)hdr; + + *(ptrs++) = z_readw(hdr_start); + *((short *)hdr) = WORDSWAP(*((short *)hdr)); + hdr_start += 2; + *(ptrs++) = z_readw(hdr_start); + *((short *)hdr+1) = WORDSWAP(*((short *)hdr+1)); +} + +static void hydra_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + unsigned long nic_base = dev->base_addr; + unsigned long mem_base = nic_base - HYDRA_NIC_BASE; + unsigned long xfer_start = mem_base + ring_offset - (NESM_START_PG<<8); + + if (count&1) + count++; + + if (xfer_start+count > mem_base + (NESM_STOP_PG<<8)) { + int semi_count = (mem_base + (NESM_STOP_PG<<8)) - xfer_start; + + z_memcpy_fromio(skb->data,xfer_start,semi_count); + count -= semi_count; + z_memcpy_fromio(skb->data+semi_count, mem_base, count); + } else + z_memcpy_fromio(skb->data, xfer_start,count); + +} + +static void hydra_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page) +{ + unsigned long nic_base = dev->base_addr; + unsigned long mem_base = nic_base - HYDRA_NIC_BASE; + + if (count&1) + count++; + + z_memcpy_toio(mem_base+((start_page - NESM_START_PG)<<8), buf, count); +} + +static void hydra_remove_one(struct zorro_dev *z) +{ + struct net_device *dev = zorro_get_drvdata(z); + + unregister_netdev(dev); + free_irq(IRQ_AMIGA_PORTS, dev); + release_mem_region(ZTWO_PADDR(dev->base_addr)-HYDRA_NIC_BASE, 0x10000); + free_netdev(dev); +} + +static int __init hydra_init_module(void) +{ + return zorro_register_driver(&hydra_driver); +} + +static void __exit hydra_cleanup_module(void) +{ + zorro_unregister_driver(&hydra_driver); +} + +module_init(hydra_init_module); +module_exit(hydra_cleanup_module); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c new file mode 100644 index 000000000..b96e8852b --- /dev/null +++ b/drivers/net/ethernet/8390/lib8390.c @@ -0,0 +1,1084 @@ +/* 8390.c: A general NS8390 ethernet driver core for linux. */ +/* + Written 1992-94 by Donald Becker. + + Copyright 1993 United States Government as represented by the + Director, National Security Agency. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + + This is the chip-specific code for many 8390-based ethernet adaptors. + This is not a complete driver, it must be combined with board-specific + code such as ne.c, wd.c, 3c503.c, etc. + + Seeing how at least eight drivers use this code, (not counting the + PCMCIA ones either) it is easy to break some card by what seems like + a simple innocent change. Please contact me or Donald if you think + you have found something that needs changing. -- PG + + + Changelog: + + Paul Gortmaker : remove set_bit lock, other cleanups. + Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to + ei_block_input() for eth_io_copy_and_sum(). + Paul Gortmaker : exchange static int ei_pingpong for a #define, + also add better Tx error handling. + Paul Gortmaker : rewrite Rx overrun handling as per NS specs. + Alexey Kuznetsov : use the 8390's six bit hash multicast filter. + Paul Gortmaker : tweak ANK's above multicast changes a bit. + Paul Gortmaker : update packet statistics for v2.1.x + Alan Cox : support arbitrary stupid port mappings on the + 68K Macintosh. Support >16bit I/O spaces + Paul Gortmaker : add kmod support for auto-loading of the 8390 + module by all drivers that require it. + Alan Cox : Spinlocking work, added 'BUG_83C690' + Paul Gortmaker : Separate out Tx timeout code from Tx path. + Paul Gortmaker : Remove old unused single Tx buffer code. + Hayato Fujiwara : Add m32r support. + Paul Gortmaker : use skb_padto() instead of stack scratch area + + Sources: + The National Semiconductor LAN Databook, and the 3Com 3c503 databook. + + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define NS8390_CORE +#include "8390.h" + +#define BUG_83C690 + +/* These are the operational function interfaces to board-specific + routines. + void reset_8390(struct net_device *dev) + Resets the board associated with DEV, including a hardware reset of + the 8390. This is only called when there is a transmit timeout, and + it is always followed by 8390_init(). + void block_output(struct net_device *dev, int count, const unsigned char *buf, + int start_page) + Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The + "page" value uses the 8390's 256-byte pages. + void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page) + Read the 4 byte, page aligned 8390 header. *If* there is a + subsequent read, it will be of the rest of the packet. + void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) + Read COUNT bytes from the packet buffer into the skb data area. Start + reading from RING_OFFSET, the address as the 8390 sees it. This will always + follow the read of the 8390 header. +*/ +#define ei_reset_8390 (ei_local->reset_8390) +#define ei_block_output (ei_local->block_output) +#define ei_block_input (ei_local->block_input) +#define ei_get_8390_hdr (ei_local->get_8390_hdr) + +/* Index to functions. */ +static void ei_tx_intr(struct net_device *dev); +static void ei_tx_err(struct net_device *dev); +static void ei_receive(struct net_device *dev); +static void ei_rx_overrun(struct net_device *dev); + +/* Routines generic to NS8390-based boards. */ +static void NS8390_trigger_send(struct net_device *dev, unsigned int length, + int start_page); +static void do_set_multicast_list(struct net_device *dev); +static void __NS8390_init(struct net_device *dev, int startp); + +static unsigned version_printed; +static u32 msg_enable; +module_param(msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH)); +MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)"); + +/* + * SMP and the 8390 setup. + * + * The 8390 isn't exactly designed to be multithreaded on RX/TX. There is + * a page register that controls bank and packet buffer access. We guard + * this with ei_local->page_lock. Nobody should assume or set the page other + * than zero when the lock is not held. Lock holders must restore page 0 + * before unlocking. Even pure readers must take the lock to protect in + * page 0. + * + * To make life difficult the chip can also be very slow. We therefore can't + * just use spinlocks. For the longer lockups we disable the irq the device + * sits on and hold the lock. We must hold the lock because there is a dual + * processor case other than interrupts (get stats/set multicast list in + * parallel with each other and transmit). + * + * Note: in theory we can just disable the irq on the card _but_ there is + * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs" + * enter lock, take the queued irq. So we waddle instead of flying. + * + * Finally by special arrangement for the purpose of being generally + * annoying the transmit function is called bh atomic. That places + * restrictions on the user context callers as disable_irq won't save + * them. + * + * Additional explanation of problems with locking by Alan Cox: + * + * "The author (me) didn't use spin_lock_irqsave because the slowness of the + * card means that approach caused horrible problems like losing serial data + * at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA + * chips with FPGA front ends. + * + * Ok the logic behind the 8390 is very simple: + * + * Things to know + * - IRQ delivery is asynchronous to the PCI bus + * - Blocking the local CPU IRQ via spin locks was too slow + * - The chip has register windows needing locking work + * + * So the path was once (I say once as people appear to have changed it + * in the mean time and it now looks rather bogus if the changes to use + * disable_irq_nosync_irqsave are disabling the local IRQ) + * + * + * Take the page lock + * Mask the IRQ on chip + * Disable the IRQ (but not mask locally- someone seems to have + * broken this with the lock validator stuff) + * [This must be _nosync as the page lock may otherwise + * deadlock us] + * Drop the page lock and turn IRQs back on + * + * At this point an existing IRQ may still be running but we can't + * get a new one + * + * Take the lock (so we know the IRQ has terminated) but don't mask + * the IRQs on the processor + * Set irqlock [for debug] + * + * Transmit (slow as ****) + * + * re-enable the IRQ + * + * + * We have to use disable_irq because otherwise you will get delayed + * interrupts on the APIC bus deadlocking the transmit path. + * + * Quite hairy but the chip simply wasn't designed for SMP and you can't + * even ACK an interrupt without risking corrupting other parallel + * activities on the chip." [lkml, 25 Jul 2007] + */ + + + +/** + * ei_open - Open/initialize the board. + * @dev: network device to initialize + * + * This routine goes all-out, setting everything + * up anew at each open, even though many of these registers should only + * need to be set once at boot. + */ +static int __ei_open(struct net_device *dev) +{ + unsigned long flags; + struct ei_device *ei_local = netdev_priv(dev); + + if (dev->watchdog_timeo <= 0) + dev->watchdog_timeo = TX_TIMEOUT; + + /* + * Grab the page lock so we own the register set, then call + * the init function. + */ + + spin_lock_irqsave(&ei_local->page_lock, flags); + __NS8390_init(dev, 1); + /* Set the flag before we drop the lock, That way the IRQ arrives + after its set and we get no silly warnings */ + netif_start_queue(dev); + spin_unlock_irqrestore(&ei_local->page_lock, flags); + ei_local->irqlock = 0; + return 0; +} + +/** + * ei_close - shut down network device + * @dev: network device to close + * + * Opposite of ei_open(). Only used when "ifconfig down" is done. + */ +static int __ei_close(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + unsigned long flags; + + /* + * Hold the page lock during close + */ + + spin_lock_irqsave(&ei_local->page_lock, flags); + __NS8390_init(dev, 0); + spin_unlock_irqrestore(&ei_local->page_lock, flags); + netif_stop_queue(dev); + return 0; +} + +/** + * ei_tx_timeout - handle transmit time out condition + * @dev: network device which has apparently fallen asleep + * + * Called by kernel when device never acknowledges a transmit has + * completed (or failed) - i.e. never posted a Tx related interrupt. + */ + +static void __ei_tx_timeout(struct net_device *dev) +{ + unsigned long e8390_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + int txsr, isr, tickssofar = jiffies - dev_trans_start(dev); + unsigned long flags; + + dev->stats.tx_errors++; + + spin_lock_irqsave(&ei_local->page_lock, flags); + txsr = ei_inb(e8390_base+EN0_TSR); + isr = ei_inb(e8390_base+EN0_ISR); + spin_unlock_irqrestore(&ei_local->page_lock, flags); + + netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d\n", + (txsr & ENTSR_ABT) ? "excess collisions." : + (isr) ? "lost interrupt?" : "cable problem?", + txsr, isr, tickssofar); + + if (!isr && !dev->stats.tx_packets) { + /* The 8390 probably hasn't gotten on the cable yet. */ + ei_local->interface_num ^= 1; /* Try a different xcvr. */ + } + + /* Ugly but a reset can be slow, yet must be protected */ + + disable_irq_nosync_lockdep(dev->irq); + spin_lock(&ei_local->page_lock); + + /* Try to restart the card. Perhaps the user has fixed something. */ + ei_reset_8390(dev); + __NS8390_init(dev, 1); + + spin_unlock(&ei_local->page_lock); + enable_irq_lockdep(dev->irq); + netif_wake_queue(dev); +} + +/** + * ei_start_xmit - begin packet transmission + * @skb: packet to be sent + * @dev: network device to which packet is sent + * + * Sends a packet to an 8390 network device. + */ + +static netdev_tx_t __ei_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + unsigned long e8390_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + int send_length = skb->len, output_page; + unsigned long flags; + char buf[ETH_ZLEN]; + char *data = skb->data; + + if (skb->len < ETH_ZLEN) { + memset(buf, 0, ETH_ZLEN); /* more efficient than doing just the needed bits */ + memcpy(buf, data, skb->len); + send_length = ETH_ZLEN; + data = buf; + } + + /* Mask interrupts from the ethercard. + SMP: We have to grab the lock here otherwise the IRQ handler + on another CPU can flip window and race the IRQ mask set. We end + up trashing the mcast filter not disabling irqs if we don't lock */ + + spin_lock_irqsave(&ei_local->page_lock, flags); + ei_outb_p(0x00, e8390_base + EN0_IMR); + spin_unlock_irqrestore(&ei_local->page_lock, flags); + + + /* + * Slow phase with lock held. + */ + + disable_irq_nosync_lockdep_irqsave(dev->irq, &flags); + + spin_lock(&ei_local->page_lock); + + ei_local->irqlock = 1; + + /* + * We have two Tx slots available for use. Find the first free + * slot, and then perform some sanity checks. With two Tx bufs, + * you get very close to transmitting back-to-back packets. With + * only one Tx buf, the transmitter sits idle while you reload the + * card, leaving a substantial gap between each transmitted packet. + */ + + if (ei_local->tx1 == 0) { + output_page = ei_local->tx_start_page; + ei_local->tx1 = send_length; + if ((netif_msg_tx_queued(ei_local)) && + ei_local->tx2 > 0) + netdev_dbg(dev, + "idle transmitter tx2=%d, lasttx=%d, txing=%d\n", + ei_local->tx2, ei_local->lasttx, ei_local->txing); + } else if (ei_local->tx2 == 0) { + output_page = ei_local->tx_start_page + TX_PAGES/2; + ei_local->tx2 = send_length; + if ((netif_msg_tx_queued(ei_local)) && + ei_local->tx1 > 0) + netdev_dbg(dev, + "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n", + ei_local->tx1, ei_local->lasttx, ei_local->txing); + } else { /* We should never get here. */ + netif_dbg(ei_local, tx_err, dev, + "No Tx buffers free! tx1=%d tx2=%d last=%d\n", + ei_local->tx1, ei_local->tx2, ei_local->lasttx); + ei_local->irqlock = 0; + netif_stop_queue(dev); + ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); + spin_unlock(&ei_local->page_lock); + enable_irq_lockdep_irqrestore(dev->irq, &flags); + dev->stats.tx_errors++; + return NETDEV_TX_BUSY; + } + + /* + * Okay, now upload the packet and trigger a send if the transmitter + * isn't already sending. If it is busy, the interrupt handler will + * trigger the send later, upon receiving a Tx done interrupt. + */ + + ei_block_output(dev, send_length, data, output_page); + + if (!ei_local->txing) { + ei_local->txing = 1; + NS8390_trigger_send(dev, send_length, output_page); + if (output_page == ei_local->tx_start_page) { + ei_local->tx1 = -1; + ei_local->lasttx = -1; + } else { + ei_local->tx2 = -1; + ei_local->lasttx = -2; + } + } else + ei_local->txqueue++; + + if (ei_local->tx1 && ei_local->tx2) + netif_stop_queue(dev); + else + netif_start_queue(dev); + + /* Turn 8390 interrupts back on. */ + ei_local->irqlock = 0; + ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); + + spin_unlock(&ei_local->page_lock); + enable_irq_lockdep_irqrestore(dev->irq, &flags); + skb_tx_timestamp(skb); + dev_consume_skb_any(skb); + dev->stats.tx_bytes += send_length; + + return NETDEV_TX_OK; +} + +/** + * ei_interrupt - handle the interrupts from an 8390 + * @irq: interrupt number + * @dev_id: a pointer to the net_device + * + * Handle the ether interface interrupts. We pull packets from + * the 8390 via the card specific functions and fire them at the networking + * stack. We also handle transmit completions and wake the transmit path if + * necessary. We also update the counters and do other housekeeping as + * needed. + */ + +static irqreturn_t __ei_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + unsigned long e8390_base = dev->base_addr; + int interrupts, nr_serviced = 0; + struct ei_device *ei_local = netdev_priv(dev); + + /* + * Protect the irq test too. + */ + + spin_lock(&ei_local->page_lock); + + if (ei_local->irqlock) { + /* + * This might just be an interrupt for a PCI device sharing + * this line + */ + netdev_err(dev, "Interrupted while interrupts are masked! isr=%#2x imr=%#2x\n", + ei_inb_p(e8390_base + EN0_ISR), + ei_inb_p(e8390_base + EN0_IMR)); + spin_unlock(&ei_local->page_lock); + return IRQ_NONE; + } + + /* Change to page 0 and read the intr status reg. */ + ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD); + netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n", + ei_inb_p(e8390_base + EN0_ISR)); + + /* !!Assumption!! -- we stay in page 0. Don't break this. */ + while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 && + ++nr_serviced < MAX_SERVICE) { + if (!netif_running(dev)) { + netdev_warn(dev, "interrupt from stopped card\n"); + /* rmk - acknowledge the interrupts */ + ei_outb_p(interrupts, e8390_base + EN0_ISR); + interrupts = 0; + break; + } + if (interrupts & ENISR_OVER) + ei_rx_overrun(dev); + else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) { + /* Got a good (?) packet. */ + ei_receive(dev); + } + /* Push the next to-transmit packet through. */ + if (interrupts & ENISR_TX) + ei_tx_intr(dev); + else if (interrupts & ENISR_TX_ERR) + ei_tx_err(dev); + + if (interrupts & ENISR_COUNTERS) { + dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0); + dev->stats.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1); + dev->stats.rx_missed_errors += ei_inb_p(e8390_base + EN0_COUNTER2); + ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */ + } + + /* Ignore any RDC interrupts that make it back to here. */ + if (interrupts & ENISR_RDC) + ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR); + + ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD); + } + + if (interrupts && (netif_msg_intr(ei_local))) { + ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD); + if (nr_serviced >= MAX_SERVICE) { + /* 0xFF is valid for a card removal */ + if (interrupts != 0xFF) + netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n", + interrupts); + ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */ + } else { + netdev_warn(dev, "unknown interrupt %#2x\n", interrupts); + ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */ + } + } + spin_unlock(&ei_local->page_lock); + return IRQ_RETVAL(nr_serviced > 0); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void __ei_poll(struct net_device *dev) +{ + disable_irq(dev->irq); + __ei_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +/** + * ei_tx_err - handle transmitter error + * @dev: network device which threw the exception + * + * A transmitter error has happened. Most likely excess collisions (which + * is a fairly normal condition). If the error is one where the Tx will + * have been aborted, we try and send another one right away, instead of + * letting the failed packet sit and collect dust in the Tx buffer. This + * is a much better solution as it avoids kernel based Tx timeouts, and + * an unnecessary card reset. + * + * Called with lock held. + */ + +static void ei_tx_err(struct net_device *dev) +{ + unsigned long e8390_base = dev->base_addr; + /* ei_local is used on some platforms via the EI_SHIFT macro */ + struct ei_device *ei_local __maybe_unused = netdev_priv(dev); + unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR); + unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU); + +#ifdef VERBOSE_ERROR_DUMP + netdev_dbg(dev, "transmitter error (%#2x):", txsr); + if (txsr & ENTSR_ABT) + pr_cont(" excess-collisions "); + if (txsr & ENTSR_ND) + pr_cont(" non-deferral "); + if (txsr & ENTSR_CRS) + pr_cont(" lost-carrier "); + if (txsr & ENTSR_FU) + pr_cont(" FIFO-underrun "); + if (txsr & ENTSR_CDH) + pr_cont(" lost-heartbeat "); + pr_cont("\n"); +#endif + + ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */ + + if (tx_was_aborted) + ei_tx_intr(dev); + else { + dev->stats.tx_errors++; + if (txsr & ENTSR_CRS) + dev->stats.tx_carrier_errors++; + if (txsr & ENTSR_CDH) + dev->stats.tx_heartbeat_errors++; + if (txsr & ENTSR_OWC) + dev->stats.tx_window_errors++; + } +} + +/** + * ei_tx_intr - transmit interrupt handler + * @dev: network device for which tx intr is handled + * + * We have finished a transmit: check for errors and then trigger the next + * packet to be sent. Called with lock held. + */ + +static void ei_tx_intr(struct net_device *dev) +{ + unsigned long e8390_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + int status = ei_inb(e8390_base + EN0_TSR); + + ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */ + + /* + * There are two Tx buffers, see which one finished, and trigger + * the send of another one if it exists. + */ + ei_local->txqueue--; + + if (ei_local->tx1 < 0) { + if (ei_local->lasttx != 1 && ei_local->lasttx != -1) + pr_err("%s: bogus last_tx_buffer %d, tx1=%d\n", + ei_local->name, ei_local->lasttx, ei_local->tx1); + ei_local->tx1 = 0; + if (ei_local->tx2 > 0) { + ei_local->txing = 1; + NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6); + dev->trans_start = jiffies; + ei_local->tx2 = -1, + ei_local->lasttx = 2; + } else + ei_local->lasttx = 20, ei_local->txing = 0; + } else if (ei_local->tx2 < 0) { + if (ei_local->lasttx != 2 && ei_local->lasttx != -2) + pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n", + ei_local->name, ei_local->lasttx, ei_local->tx2); + ei_local->tx2 = 0; + if (ei_local->tx1 > 0) { + ei_local->txing = 1; + NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page); + dev->trans_start = jiffies; + ei_local->tx1 = -1; + ei_local->lasttx = 1; + } else + ei_local->lasttx = 10, ei_local->txing = 0; + } /* else + netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n", + ei_local->lasttx); +*/ + + /* Minimize Tx latency: update the statistics after we restart TXing. */ + if (status & ENTSR_COL) + dev->stats.collisions++; + if (status & ENTSR_PTX) + dev->stats.tx_packets++; + else { + dev->stats.tx_errors++; + if (status & ENTSR_ABT) { + dev->stats.tx_aborted_errors++; + dev->stats.collisions += 16; + } + if (status & ENTSR_CRS) + dev->stats.tx_carrier_errors++; + if (status & ENTSR_FU) + dev->stats.tx_fifo_errors++; + if (status & ENTSR_CDH) + dev->stats.tx_heartbeat_errors++; + if (status & ENTSR_OWC) + dev->stats.tx_window_errors++; + } + netif_wake_queue(dev); +} + +/** + * ei_receive - receive some packets + * @dev: network device with which receive will be run + * + * We have a good packet(s), get it/them out of the buffers. + * Called with lock held. + */ + +static void ei_receive(struct net_device *dev) +{ + unsigned long e8390_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + unsigned char rxing_page, this_frame, next_frame; + unsigned short current_offset; + int rx_pkt_count = 0; + struct e8390_pkt_hdr rx_frame; + int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page; + + while (++rx_pkt_count < 10) { + int pkt_len, pkt_stat; + + /* Get the rx page (incoming packet pointer). */ + ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD); + rxing_page = ei_inb_p(e8390_base + EN1_CURPAG); + ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD); + + /* Remove one frame from the ring. Boundary is always a page behind. */ + this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1; + if (this_frame >= ei_local->stop_page) + this_frame = ei_local->rx_start_page; + + /* Someday we'll omit the previous, iff we never get this message. + (There is at least one clone claimed to have a problem.) + + Keep quiet if it looks like a card removal. One problem here + is that some clones crash in roughly the same way. + */ + if ((netif_msg_rx_status(ei_local)) && + this_frame != ei_local->current_page && + (this_frame != 0x0 || rxing_page != 0xFF)) + netdev_err(dev, + "mismatched read page pointers %2x vs %2x\n", + this_frame, ei_local->current_page); + + if (this_frame == rxing_page) /* Read all the frames? */ + break; /* Done for now */ + + current_offset = this_frame << 8; + ei_get_8390_hdr(dev, &rx_frame, this_frame); + + pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr); + pkt_stat = rx_frame.status; + + next_frame = this_frame + 1 + ((pkt_len+4)>>8); + + /* Check for bogosity warned by 3c503 book: the status byte is never + written. This happened a lot during testing! This code should be + cleaned up someday. */ + if (rx_frame.next != next_frame && + rx_frame.next != next_frame + 1 && + rx_frame.next != next_frame - num_rx_pages && + rx_frame.next != next_frame + 1 - num_rx_pages) { + ei_local->current_page = rxing_page; + ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY); + dev->stats.rx_errors++; + continue; + } + + if (pkt_len < 60 || pkt_len > 1518) { + netif_dbg(ei_local, rx_status, dev, + "bogus packet size: %d, status=%#2x nxpg=%#2x\n", + rx_frame.count, rx_frame.status, + rx_frame.next); + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + } else if ((pkt_stat & 0x0F) == ENRSR_RXOK) { + struct sk_buff *skb; + + skb = netdev_alloc_skb(dev, pkt_len + 2); + if (skb == NULL) { + netif_err(ei_local, rx_err, dev, + "Couldn't allocate a sk_buff of size %d\n", + pkt_len); + dev->stats.rx_dropped++; + break; + } else { + skb_reserve(skb, 2); /* IP headers on 16 byte boundaries */ + skb_put(skb, pkt_len); /* Make room */ + ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); + skb->protocol = eth_type_trans(skb, dev); + if (!skb_defer_rx_timestamp(skb)) + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + if (pkt_stat & ENRSR_PHY) + dev->stats.multicast++; + } + } else { + netif_err(ei_local, rx_err, dev, + "bogus packet: status=%#2x nxpg=%#2x size=%d\n", + rx_frame.status, rx_frame.next, + rx_frame.count); + dev->stats.rx_errors++; + /* NB: The NIC counts CRC, frame and missed errors. */ + if (pkt_stat & ENRSR_FO) + dev->stats.rx_fifo_errors++; + } + next_frame = rx_frame.next; + + /* This _should_ never happen: it's here for avoiding bad clones. */ + if (next_frame >= ei_local->stop_page) { + netdev_notice(dev, "next frame inconsistency, %#2x\n", + next_frame); + next_frame = ei_local->rx_start_page; + } + ei_local->current_page = next_frame; + ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY); + } + + /* We used to also ack ENISR_OVER here, but that would sometimes mask + a real overrun, leaving the 8390 in a stopped state with rec'vr off. */ + ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR); +} + +/** + * ei_rx_overrun - handle receiver overrun + * @dev: network device which threw exception + * + * We have a receiver overrun: we have to kick the 8390 to get it started + * again. Problem is that you have to kick it exactly as NS prescribes in + * the updated datasheets, or "the NIC may act in an unpredictable manner." + * This includes causing "the NIC to defer indefinitely when it is stopped + * on a busy network." Ugh. + * Called with lock held. Don't call this with the interrupts off or your + * computer will hate you - it takes 10ms or so. + */ + +static void ei_rx_overrun(struct net_device *dev) +{ + unsigned long e8390_base = dev->base_addr; + unsigned char was_txing, must_resend = 0; + /* ei_local is used on some platforms via the EI_SHIFT macro */ + struct ei_device *ei_local __maybe_unused = netdev_priv(dev); + + /* + * Record whether a Tx was in progress and then issue the + * stop command. + */ + was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS; + ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); + + netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n"); + dev->stats.rx_over_errors++; + + /* + * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. + * Early datasheets said to poll the reset bit, but now they say that + * it "is not a reliable indicator and subsequently should be ignored." + * We wait at least 10ms. + */ + + mdelay(10); + + /* + * Reset RBCR[01] back to zero as per magic incantation. + */ + ei_outb_p(0x00, e8390_base+EN0_RCNTLO); + ei_outb_p(0x00, e8390_base+EN0_RCNTHI); + + /* + * See if any Tx was interrupted or not. According to NS, this + * step is vital, and skipping it will cause no end of havoc. + */ + + if (was_txing) { + unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR); + if (!tx_completed) + must_resend = 1; + } + + /* + * Have to enter loopback mode and then restart the NIC before + * you are allowed to slurp packets up off the ring. + */ + ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); + ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD); + + /* + * Clear the Rx ring of all the debris, and ack the interrupt. + */ + ei_receive(dev); + ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR); + + /* + * Leave loopback mode, and resend any packet that got stopped. + */ + ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); + if (must_resend) + ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD); +} + +/* + * Collect the stats. This is called unlocked and from several contexts. + */ + +static struct net_device_stats *__ei_get_stats(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + unsigned long flags; + + /* If the card is stopped, just return the present stats. */ + if (!netif_running(dev)) + return &dev->stats; + + spin_lock_irqsave(&ei_local->page_lock, flags); + /* Read the counter registers, assuming we are in page 0. */ + dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0); + dev->stats.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1); + dev->stats.rx_missed_errors += ei_inb_p(ioaddr + EN0_COUNTER2); + spin_unlock_irqrestore(&ei_local->page_lock, flags); + + return &dev->stats; +} + +/* + * Form the 64 bit 8390 multicast table from the linked list of addresses + * associated with this dev structure. + */ + +static inline void make_mc_bits(u8 *bits, struct net_device *dev) +{ + struct netdev_hw_addr *ha; + + netdev_for_each_mc_addr(ha, dev) { + u32 crc = ether_crc(ETH_ALEN, ha->addr); + /* + * The 8390 uses the 6 most significant bits of the + * CRC to index the multicast table. + */ + bits[crc>>29] |= (1<<((crc>>26)&7)); + } +} + +/** + * do_set_multicast_list - set/clear multicast filter + * @dev: net device for which multicast filter is adjusted + * + * Set or clear the multicast filter for this adaptor. May be called + * from a BH in 2.1.x. Must be called with lock held. + */ + +static void do_set_multicast_list(struct net_device *dev) +{ + unsigned long e8390_base = dev->base_addr; + int i; + struct ei_device *ei_local = netdev_priv(dev); + + if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) { + memset(ei_local->mcfilter, 0, 8); + if (!netdev_mc_empty(dev)) + make_mc_bits(ei_local->mcfilter, dev); + } else + memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */ + + /* + * DP8390 manuals don't specify any magic sequence for altering + * the multicast regs on an already running card. To be safe, we + * ensure multicast mode is off prior to loading up the new hash + * table. If this proves to be not enough, we can always resort + * to stopping the NIC, loading the table and then restarting. + * + * Bug Alert! The MC regs on the SMC 83C690 (SMC Elite and SMC + * Elite16) appear to be write-only. The NS 8390 data sheet lists + * them as r/w so this is a bug. The SMC 83C790 (SMC Ultra and + * Ultra32 EISA) appears to have this bug fixed. + */ + + if (netif_running(dev)) + ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); + ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); + for (i = 0; i < 8; i++) { + ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i)); +#ifndef BUG_83C690 + if (ei_inb_p(e8390_base + EN1_MULT_SHIFT(i)) != ei_local->mcfilter[i]) + netdev_err(dev, "Multicast filter read/write mismap %d\n", + i); +#endif + } + ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD); + + if (dev->flags&IFF_PROMISC) + ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR); + else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) + ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR); + else + ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); +} + +/* + * Called without lock held. This is invoked from user context and may + * be parallel to just about everything else. Its also fairly quick and + * not called too often. Must protect against both bh and irq users + */ + +static void __ei_set_multicast_list(struct net_device *dev) +{ + unsigned long flags; + struct ei_device *ei_local = netdev_priv(dev); + + spin_lock_irqsave(&ei_local->page_lock, flags); + do_set_multicast_list(dev); + spin_unlock_irqrestore(&ei_local->page_lock, flags); +} + +/** + * ethdev_setup - init rest of 8390 device struct + * @dev: network device structure to init + * + * Initialize the rest of the 8390 device structure. Do NOT __init + * this, as it is used by 8390 based modular drivers too. + */ + +static void ethdev_setup(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + + if ((msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0)) + pr_info("%s", version); + + ether_setup(dev); + + spin_lock_init(&ei_local->page_lock); +} + +/** + * alloc_ei_netdev - alloc_etherdev counterpart for 8390 + * @size: extra bytes to allocate + * + * Allocate 8390-specific net_device. + */ +static struct net_device *____alloc_ei_netdev(int size) +{ + return alloc_netdev(sizeof(struct ei_device) + size, "eth%d", + NET_NAME_UNKNOWN, ethdev_setup); +} + + + + +/* This page of functions should be 8390 generic */ +/* Follow National Semi's recommendations for initializing the "NIC". */ + +/** + * NS8390_init - initialize 8390 hardware + * @dev: network device to initialize + * @startp: boolean. non-zero value to initiate chip processing + * + * Must be called with lock held. + */ + +static void __NS8390_init(struct net_device *dev, int startp) +{ + unsigned long e8390_base = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + int i; + int endcfg = ei_local->word16 + ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0)) + : 0x48; + + if (sizeof(struct e8390_pkt_hdr) != 4) + panic("8390.c: header struct mispacked\n"); + /* Follow National Semi's recommendations for initing the DP83902. */ + ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */ + ei_outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */ + /* Clear the remote byte count registers. */ + ei_outb_p(0x00, e8390_base + EN0_RCNTLO); + ei_outb_p(0x00, e8390_base + EN0_RCNTHI); + /* Set to monitor and loopback mode -- this is vital!. */ + ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */ + ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */ + /* Set the transmit page and receive ring. */ + ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR); + ei_local->tx1 = ei_local->tx2 = 0; + ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG); + ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/ + ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */ + ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG); + /* Clear the pending interrupts and mask. */ + ei_outb_p(0xFF, e8390_base + EN0_ISR); + ei_outb_p(0x00, e8390_base + EN0_IMR); + + /* Copy the station address into the DS8390 registers. */ + + ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */ + for (i = 0; i < 6; i++) { + ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i)); + if ((netif_msg_probe(ei_local)) && + ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i]) + netdev_err(dev, + "Hw. address read/write mismap %d\n", i); + } + + ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG); + ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); + + ei_local->tx1 = ei_local->tx2 = 0; + ei_local->txing = 0; + + if (startp) { + ei_outb_p(0xff, e8390_base + EN0_ISR); + ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); + ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD); + ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */ + /* 3c503 TechMan says rxconfig only after the NIC is started. */ + ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */ + do_set_multicast_list(dev); /* (re)load the mcast table */ + } +} + +/* Trigger a transmit start, assuming the length is valid. + Always called with the page lock held */ + +static void NS8390_trigger_send(struct net_device *dev, unsigned int length, + int start_page) +{ + unsigned long e8390_base = dev->base_addr; + struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev); + + ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD); + + if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) { + netdev_warn(dev, "trigger_send() called with the transmitter busy\n"); + return; + } + ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO); + ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI); + ei_outb_p(start_page, e8390_base + EN0_TPSR); + ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD); +} diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c new file mode 100644 index 000000000..65cf60f67 --- /dev/null +++ b/drivers/net/ethernet/8390/mac8390.c @@ -0,0 +1,874 @@ +/* mac8390.c: New driver for 8390-based Nubus (or Nubus-alike) + Ethernet cards on Linux */ +/* Based on the former daynaport.c driver, by Alan Cox. Some code + taken from or inspired by skeleton.c by Donald Becker, acenic.c by + Jes Sorensen, and ne2k-pci.c by Donald Becker and Paul Gortmaker. + + This software may be used and distributed according to the terms of + the GNU Public License, incorporated herein by reference. */ + +/* 2000-02-28: support added for Dayna and Kinetics cards by + A.G.deWijn@phys.uu.nl */ +/* 2000-04-04: support added for Dayna2 by bart@etpmod.phys.tue.nl */ +/* 2001-04-18: support for DaynaPort E/LC-M by rayk@knightsmanor.org */ +/* 2001-05-15: support for Cabletron ported from old daynaport driver + * and fixed access to Sonic Sys card which masquerades as a Farallon + * by rayk@knightsmanor.org */ +/* 2002-12-30: Try to support more cards, some clues from NetBSD driver */ +/* 2003-12-26: Make sure Asante cards always work. */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static char version[] = + "v0.4 2001-05-15 David Huggins-Daines and others\n"; + +#define EI_SHIFT(x) (ei_local->reg_offset[x]) +#define ei_inb(port) in_8(port) +#define ei_outb(val, port) out_8(port, val) +#define ei_inb_p(port) in_8(port) +#define ei_outb_p(val, port) out_8(port, val) + +#include "lib8390.c" + +#define WD_START_PG 0x00 /* First page of TX buffer */ +#define CABLETRON_RX_START_PG 0x00 /* First page of RX buffer */ +#define CABLETRON_RX_STOP_PG 0x30 /* Last page +1 of RX ring */ +#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG + /* First page of TX buffer */ + +/* + * Unfortunately it seems we have to hardcode these for the moment + * Shouldn't the card know about this? + * Does anyone know where to read it off the card? + * Do we trust the data provided by the card? + */ + +#define DAYNA_8390_BASE 0x80000 +#define DAYNA_8390_MEM 0x00000 + +#define CABLETRON_8390_BASE 0x90000 +#define CABLETRON_8390_MEM 0x00000 + +#define INTERLAN_8390_BASE 0xE0000 +#define INTERLAN_8390_MEM 0xD0000 + +enum mac8390_type { + MAC8390_NONE = -1, + MAC8390_APPLE, + MAC8390_ASANTE, + MAC8390_FARALLON, + MAC8390_CABLETRON, + MAC8390_DAYNA, + MAC8390_INTERLAN, + MAC8390_KINETICS, +}; + +static const char *cardname[] = { + "apple", + "asante", + "farallon", + "cabletron", + "dayna", + "interlan", + "kinetics", +}; + +static const int word16[] = { + 1, /* apple */ + 1, /* asante */ + 1, /* farallon */ + 1, /* cabletron */ + 0, /* dayna */ + 1, /* interlan */ + 0, /* kinetics */ +}; + +/* on which cards do we use NuBus resources? */ +static const int useresources[] = { + 1, /* apple */ + 1, /* asante */ + 1, /* farallon */ + 0, /* cabletron */ + 0, /* dayna */ + 0, /* interlan */ + 0, /* kinetics */ +}; + +enum mac8390_access { + ACCESS_UNKNOWN = 0, + ACCESS_32, + ACCESS_16, +}; + +extern int mac8390_memtest(struct net_device *dev); +static int mac8390_initdev(struct net_device *dev, struct nubus_dev *ndev, + enum mac8390_type type); + +static int mac8390_open(struct net_device *dev); +static int mac8390_close(struct net_device *dev); +static void mac8390_no_reset(struct net_device *dev); +static void interlan_reset(struct net_device *dev); + +/* Sane (32-bit chunk memory read/write) - Some Farallon and Apple do this*/ +static void sane_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page); +static void sane_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void sane_block_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page); + +/* dayna_memcpy to and from card */ +static void dayna_memcpy_fromcard(struct net_device *dev, void *to, + int from, int count); +static void dayna_memcpy_tocard(struct net_device *dev, int to, + const void *from, int count); + +/* Dayna - Dayna/Kinetics use this */ +static void dayna_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page); +static void dayna_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void dayna_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page); + +#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c)) +#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c)) + +#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c)) + +/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ +static void slow_sane_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page); +static void slow_sane_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void slow_sane_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page); +static void word_memcpy_tocard(unsigned long tp, const void *fp, int count); +static void word_memcpy_fromcard(void *tp, unsigned long fp, int count); +static u32 mac8390_msg_enable; + +static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev) +{ + switch (dev->dr_sw) { + case NUBUS_DRSW_3COM: + switch (dev->dr_hw) { + case NUBUS_DRHW_APPLE_SONIC_NB: + case NUBUS_DRHW_APPLE_SONIC_LC: + case NUBUS_DRHW_SONNET: + return MAC8390_NONE; + default: + return MAC8390_APPLE; + } + break; + + case NUBUS_DRSW_APPLE: + switch (dev->dr_hw) { + case NUBUS_DRHW_ASANTE_LC: + return MAC8390_NONE; + case NUBUS_DRHW_CABLETRON: + return MAC8390_CABLETRON; + default: + return MAC8390_APPLE; + } + break; + + case NUBUS_DRSW_ASANTE: + return MAC8390_ASANTE; + break; + + case NUBUS_DRSW_TECHWORKS: + case NUBUS_DRSW_DAYNA2: + case NUBUS_DRSW_DAYNA_LC: + if (dev->dr_hw == NUBUS_DRHW_CABLETRON) + return MAC8390_CABLETRON; + else + return MAC8390_APPLE; + break; + + case NUBUS_DRSW_FARALLON: + return MAC8390_FARALLON; + break; + + case NUBUS_DRSW_KINETICS: + switch (dev->dr_hw) { + case NUBUS_DRHW_INTERLAN: + return MAC8390_INTERLAN; + default: + return MAC8390_KINETICS; + } + break; + + case NUBUS_DRSW_DAYNA: + /* + * These correspond to Dayna Sonic cards + * which use the macsonic driver + */ + if (dev->dr_hw == NUBUS_DRHW_SMC9194 || + dev->dr_hw == NUBUS_DRHW_INTERLAN) + return MAC8390_NONE; + else + return MAC8390_DAYNA; + break; + } + return MAC8390_NONE; +} + +static enum mac8390_access __init mac8390_testio(volatile unsigned long membase) +{ + unsigned long outdata = 0xA5A0B5B0; + unsigned long indata = 0x00000000; + /* Try writing 32 bits */ + memcpy_toio(membase, &outdata, 4); + /* Now compare them */ + if (memcmp_withio(&outdata, membase, 4) == 0) + return ACCESS_32; + /* Write 16 bit output */ + word_memcpy_tocard(membase, &outdata, 4); + /* Now read it back */ + word_memcpy_fromcard(&indata, membase, 4); + if (outdata == indata) + return ACCESS_16; + return ACCESS_UNKNOWN; +} + +static int __init mac8390_memsize(unsigned long membase) +{ + unsigned long flags; + int i, j; + + local_irq_save(flags); + /* Check up to 32K in 4K increments */ + for (i = 0; i < 8; i++) { + volatile unsigned short *m = (unsigned short *)(membase + (i * 0x1000)); + + /* Unwriteable - we have a fully decoded card and the + RAM end located */ + if (hwreg_present(m) == 0) + break; + + /* write a distinctive byte */ + *m = 0xA5A0 | i; + /* check that we read back what we wrote */ + if (*m != (0xA5A0 | i)) + break; + + /* check for partial decode and wrap */ + for (j = 0; j < i; j++) { + volatile unsigned short *p = (unsigned short *)(membase + (j * 0x1000)); + if (*p != (0xA5A0 | j)) + break; + } + } + local_irq_restore(flags); + /* + * in any case, we stopped once we tried one block too many, + * or once we reached 32K + */ + return i * 0x1000; +} + +static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev, + enum mac8390_type cardtype) +{ + struct nubus_dir dir; + struct nubus_dirent ent; + int offset; + volatile unsigned short *i; + + printk_once(KERN_INFO pr_fmt("%s"), version); + + dev->irq = SLOT2IRQ(ndev->board->slot); + /* This is getting to be a habit */ + dev->base_addr = (ndev->board->slot_addr | + ((ndev->board->slot & 0xf) << 20)); + + /* + * Get some Nubus info - we will trust the card's idea + * of where its memory and registers are. + */ + + if (nubus_get_func_dir(ndev, &dir) == -1) { + pr_err("%s: Unable to get Nubus functional directory for slot %X!\n", + dev->name, ndev->board->slot); + return false; + } + + /* Get the MAC address */ + if (nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent) == -1) { + pr_info("%s: Couldn't get MAC address!\n", dev->name); + return false; + } + + nubus_get_rsrc_mem(dev->dev_addr, &ent, 6); + + if (useresources[cardtype] == 1) { + nubus_rewinddir(&dir); + if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS, + &ent) == -1) { + pr_err("%s: Memory offset resource for slot %X not found!\n", + dev->name, ndev->board->slot); + return false; + } + nubus_get_rsrc_mem(&offset, &ent, 4); + dev->mem_start = dev->base_addr + offset; + /* yes, this is how the Apple driver does it */ + dev->base_addr = dev->mem_start + 0x10000; + nubus_rewinddir(&dir); + if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH, + &ent) == -1) { + pr_info("%s: Memory length resource for slot %X not found, probing\n", + dev->name, ndev->board->slot); + offset = mac8390_memsize(dev->mem_start); + } else { + nubus_get_rsrc_mem(&offset, &ent, 4); + } + dev->mem_end = dev->mem_start + offset; + } else { + switch (cardtype) { + case MAC8390_KINETICS: + case MAC8390_DAYNA: /* it's the same */ + dev->base_addr = (int)(ndev->board->slot_addr + + DAYNA_8390_BASE); + dev->mem_start = (int)(ndev->board->slot_addr + + DAYNA_8390_MEM); + dev->mem_end = dev->mem_start + + mac8390_memsize(dev->mem_start); + break; + case MAC8390_INTERLAN: + dev->base_addr = (int)(ndev->board->slot_addr + + INTERLAN_8390_BASE); + dev->mem_start = (int)(ndev->board->slot_addr + + INTERLAN_8390_MEM); + dev->mem_end = dev->mem_start + + mac8390_memsize(dev->mem_start); + break; + case MAC8390_CABLETRON: + dev->base_addr = (int)(ndev->board->slot_addr + + CABLETRON_8390_BASE); + dev->mem_start = (int)(ndev->board->slot_addr + + CABLETRON_8390_MEM); + /* The base address is unreadable if 0x00 + * has been written to the command register + * Reset the chip by writing E8390_NODMA + + * E8390_PAGE0 + E8390_STOP just to be + * sure + */ + i = (void *)dev->base_addr; + *i = 0x21; + dev->mem_end = dev->mem_start + + mac8390_memsize(dev->mem_start); + break; + + default: + pr_err("Card type %s is unsupported, sorry\n", + ndev->board->name); + return false; + } + } + + return true; +} + +struct net_device * __init mac8390_probe(int unit) +{ + struct net_device *dev; + struct nubus_dev *ndev = NULL; + int err = -ENODEV; + struct ei_device *ei_local; + + static unsigned int slots; + + enum mac8390_type cardtype; + + /* probably should check for Nubus instead */ + + if (!MACH_IS_MAC) + return ERR_PTR(-ENODEV); + + dev = ____alloc_ei_netdev(0); + if (!dev) + return ERR_PTR(-ENOMEM); + + if (unit >= 0) + sprintf(dev->name, "eth%d", unit); + + while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET, + ndev))) { + /* Have we seen it already? */ + if (slots & (1 << ndev->board->slot)) + continue; + slots |= 1 << ndev->board->slot; + + cardtype = mac8390_ident(ndev); + if (cardtype == MAC8390_NONE) + continue; + + if (!mac8390_init(dev, ndev, cardtype)) + continue; + + /* Do the nasty 8390 stuff */ + if (!mac8390_initdev(dev, ndev, cardtype)) + break; + } + + if (!ndev) + goto out; + + ei_local = netdev_priv(dev); + ei_local->msg_enable = mac8390_msg_enable; + + err = register_netdev(dev); + if (err) + goto out; + return dev; + +out: + free_netdev(dev); + return ERR_PTR(err); +} + +#ifdef MODULE +MODULE_AUTHOR("David Huggins-Daines and others"); +MODULE_DESCRIPTION("Macintosh NS8390-based Nubus Ethernet driver"); +MODULE_LICENSE("GPL"); + +/* overkill, of course */ +static struct net_device *dev_mac8390[15]; +int init_module(void) +{ + int i; + for (i = 0; i < 15; i++) { + struct net_device *dev = mac8390_probe(-1); + if (IS_ERR(dev)) + break; + dev_mac890[i] = dev; + } + if (!i) { + pr_notice("No useable cards found, driver NOT installed.\n"); + return -ENODEV; + } + return 0; +} + +void cleanup_module(void) +{ + int i; + for (i = 0; i < 15; i++) { + struct net_device *dev = dev_mac890[i]; + if (dev) { + unregister_netdev(dev); + free_netdev(dev); + } + } +} + +#endif /* MODULE */ + +static const struct net_device_ops mac8390_netdev_ops = { + .ndo_open = mac8390_open, + .ndo_stop = mac8390_close, + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_rx_mode = __ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = __ei_poll, +#endif +}; + +static int __init mac8390_initdev(struct net_device *dev, + struct nubus_dev *ndev, + enum mac8390_type type) +{ + static u32 fwrd4_offsets[16] = { + 0, 4, 8, 12, + 16, 20, 24, 28, + 32, 36, 40, 44, + 48, 52, 56, 60 + }; + static u32 back4_offsets[16] = { + 60, 56, 52, 48, + 44, 40, 36, 32, + 28, 24, 20, 16, + 12, 8, 4, 0 + }; + static u32 fwrd2_offsets[16] = { + 0, 2, 4, 6, + 8, 10, 12, 14, + 16, 18, 20, 22, + 24, 26, 28, 30 + }; + + int access_bitmode = 0; + + /* Now fill in our stuff */ + dev->netdev_ops = &mac8390_netdev_ops; + + /* GAR, ei_status is actually a macro even though it looks global */ + ei_status.name = cardname[type]; + ei_status.word16 = word16[type]; + + /* Cabletron's TX/RX buffers are backwards */ + if (type == MAC8390_CABLETRON) { + ei_status.tx_start_page = CABLETRON_TX_START_PG; + ei_status.rx_start_page = CABLETRON_RX_START_PG; + ei_status.stop_page = CABLETRON_RX_STOP_PG; + ei_status.rmem_start = dev->mem_start; + ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256; + } else { + ei_status.tx_start_page = WD_START_PG; + ei_status.rx_start_page = WD_START_PG + TX_PAGES; + ei_status.stop_page = (dev->mem_end - dev->mem_start)/256; + ei_status.rmem_start = dev->mem_start + TX_PAGES*256; + ei_status.rmem_end = dev->mem_end; + } + + /* Fill in model-specific information and functions */ + switch (type) { + case MAC8390_FARALLON: + case MAC8390_APPLE: + switch (mac8390_testio(dev->mem_start)) { + case ACCESS_UNKNOWN: + pr_err("Don't know how to access card memory!\n"); + return -ENODEV; + + case ACCESS_16: + /* 16 bit card, register map is reversed */ + ei_status.reset_8390 = mac8390_no_reset; + ei_status.block_input = slow_sane_block_input; + ei_status.block_output = slow_sane_block_output; + ei_status.get_8390_hdr = slow_sane_get_8390_hdr; + ei_status.reg_offset = back4_offsets; + break; + + case ACCESS_32: + /* 32 bit card, register map is reversed */ + ei_status.reset_8390 = mac8390_no_reset; + ei_status.block_input = sane_block_input; + ei_status.block_output = sane_block_output; + ei_status.get_8390_hdr = sane_get_8390_hdr; + ei_status.reg_offset = back4_offsets; + access_bitmode = 1; + break; + } + break; + + case MAC8390_ASANTE: + /* Some Asante cards pass the 32 bit test + * but overwrite system memory when run at 32 bit. + * so we run them all at 16 bit. + */ + ei_status.reset_8390 = mac8390_no_reset; + ei_status.block_input = slow_sane_block_input; + ei_status.block_output = slow_sane_block_output; + ei_status.get_8390_hdr = slow_sane_get_8390_hdr; + ei_status.reg_offset = back4_offsets; + break; + + case MAC8390_CABLETRON: + /* 16 bit card, register map is short forward */ + ei_status.reset_8390 = mac8390_no_reset; + ei_status.block_input = slow_sane_block_input; + ei_status.block_output = slow_sane_block_output; + ei_status.get_8390_hdr = slow_sane_get_8390_hdr; + ei_status.reg_offset = fwrd2_offsets; + break; + + case MAC8390_DAYNA: + case MAC8390_KINETICS: + /* 16 bit memory, register map is forward */ + /* dayna and similar */ + ei_status.reset_8390 = mac8390_no_reset; + ei_status.block_input = dayna_block_input; + ei_status.block_output = dayna_block_output; + ei_status.get_8390_hdr = dayna_get_8390_hdr; + ei_status.reg_offset = fwrd4_offsets; + break; + + case MAC8390_INTERLAN: + /* 16 bit memory, register map is forward */ + ei_status.reset_8390 = interlan_reset; + ei_status.block_input = slow_sane_block_input; + ei_status.block_output = slow_sane_block_output; + ei_status.get_8390_hdr = slow_sane_get_8390_hdr; + ei_status.reg_offset = fwrd4_offsets; + break; + + default: + pr_err("Card type %s is unsupported, sorry\n", + ndev->board->name); + return -ENODEV; + } + + __NS8390_init(dev, 0); + + /* Good, done, now spit out some messages */ + pr_info("%s: %s in slot %X (type %s)\n", + dev->name, ndev->board->name, ndev->board->slot, + cardname[type]); + pr_info("MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n", + dev->dev_addr, dev->irq, + (unsigned int)(dev->mem_end - dev->mem_start) >> 10, + dev->mem_start, access_bitmode ? 32 : 16); + return 0; +} + +static int mac8390_open(struct net_device *dev) +{ + int err; + + __ei_open(dev); + err = request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev); + if (err) + pr_err("%s: unable to get IRQ %d\n", dev->name, dev->irq); + return err; +} + +static int mac8390_close(struct net_device *dev) +{ + free_irq(dev->irq, dev); + __ei_close(dev); + return 0; +} + +static void mac8390_no_reset(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + + ei_status.txing = 0; + netif_info(ei_local, hw, dev, "reset not supported\n"); +} + +static void interlan_reset(struct net_device *dev) +{ + unsigned char *target = nubus_slot_addr(IRQ2SLOT(dev->irq)); + struct ei_device *ei_local = netdev_priv(dev); + + netif_info(ei_local, hw, dev, "Need to reset the NS8390 t=%lu...", + jiffies); + ei_status.txing = 0; + target[0xC0000] = 0; + if (netif_msg_hw(ei_local)) + pr_cont("reset complete\n"); +} + +/* dayna_memcpy_fromio/dayna_memcpy_toio */ +/* directly from daynaport.c by Alan Cox */ +static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from, + int count) +{ + volatile unsigned char *ptr; + unsigned char *target = to; + from <<= 1; /* word, skip overhead */ + ptr = (unsigned char *)(dev->mem_start+from); + /* Leading byte? */ + if (from & 2) { + *target++ = ptr[-1]; + ptr += 2; + count--; + } + while (count >= 2) { + *(unsigned short *)target = *(unsigned short volatile *)ptr; + ptr += 4; /* skip cruft */ + target += 2; + count -= 2; + } + /* Trailing byte? */ + if (count) + *target = *ptr; +} + +static void dayna_memcpy_tocard(struct net_device *dev, int to, + const void *from, int count) +{ + volatile unsigned short *ptr; + const unsigned char *src = from; + to <<= 1; /* word, skip overhead */ + ptr = (unsigned short *)(dev->mem_start+to); + /* Leading byte? */ + if (to & 2) { /* avoid a byte write (stomps on other data) */ + ptr[-1] = (ptr[-1]&0xFF00)|*src++; + ptr++; + count--; + } + while (count >= 2) { + *ptr++ = *(unsigned short *)src; /* Copy and */ + ptr++; /* skip cruft */ + src += 2; + count -= 2; + } + /* Trailing byte? */ + if (count) { + /* card doesn't like byte writes */ + *ptr = (*ptr & 0x00FF) | (*src << 8); + } +} + +/* sane block input/output */ +static void sane_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page) +{ + unsigned long hdr_start = (ring_page - WD_START_PG)<<8; + memcpy_fromio(hdr, dev->mem_start + hdr_start, 4); + /* Fix endianness */ + hdr->count = swab16(hdr->count); +} + +static void sane_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + unsigned long xfer_base = ring_offset - (WD_START_PG<<8); + unsigned long xfer_start = xfer_base + dev->mem_start; + + if (xfer_start + count > ei_status.rmem_end) { + /* We must wrap the input move. */ + int semi_count = ei_status.rmem_end - xfer_start; + memcpy_fromio(skb->data, dev->mem_start + xfer_base, + semi_count); + count -= semi_count; + memcpy_fromio(skb->data + semi_count, ei_status.rmem_start, + count); + } else { + memcpy_fromio(skb->data, dev->mem_start + xfer_base, count); + } +} + +static void sane_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page) +{ + long shmem = (start_page - WD_START_PG)<<8; + + memcpy_toio(dev->mem_start + shmem, buf, count); +} + +/* dayna block input/output */ +static void dayna_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page) +{ + unsigned long hdr_start = (ring_page - WD_START_PG)<<8; + + dayna_memcpy_fromcard(dev, hdr, hdr_start, 4); + /* Fix endianness */ + hdr->count = (hdr->count & 0xFF) << 8 | (hdr->count >> 8); +} + +static void dayna_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + unsigned long xfer_base = ring_offset - (WD_START_PG<<8); + unsigned long xfer_start = xfer_base+dev->mem_start; + + /* Note the offset math is done in card memory space which is word + per long onto our space. */ + + if (xfer_start + count > ei_status.rmem_end) { + /* We must wrap the input move. */ + int semi_count = ei_status.rmem_end - xfer_start; + dayna_memcpy_fromcard(dev, skb->data, xfer_base, semi_count); + count -= semi_count; + dayna_memcpy_fromcard(dev, skb->data + semi_count, + ei_status.rmem_start - dev->mem_start, + count); + } else { + dayna_memcpy_fromcard(dev, skb->data, xfer_base, count); + } +} + +static void dayna_block_output(struct net_device *dev, int count, + const unsigned char *buf, + int start_page) +{ + long shmem = (start_page - WD_START_PG)<<8; + + dayna_memcpy_tocard(dev, shmem, buf, count); +} + +/* Cabletron block I/O */ +static void slow_sane_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, + int ring_page) +{ + unsigned long hdr_start = (ring_page - WD_START_PG)<<8; + word_memcpy_fromcard(hdr, dev->mem_start + hdr_start, 4); + /* Register endianism - fix here rather than 8390.c */ + hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8); +} + +static void slow_sane_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + unsigned long xfer_base = ring_offset - (WD_START_PG<<8); + unsigned long xfer_start = xfer_base+dev->mem_start; + + if (xfer_start + count > ei_status.rmem_end) { + /* We must wrap the input move. */ + int semi_count = ei_status.rmem_end - xfer_start; + word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base, + semi_count); + count -= semi_count; + word_memcpy_fromcard(skb->data + semi_count, + ei_status.rmem_start, count); + } else { + word_memcpy_fromcard(skb->data, dev->mem_start + xfer_base, + count); + } +} + +static void slow_sane_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page) +{ + long shmem = (start_page - WD_START_PG)<<8; + + word_memcpy_tocard(dev->mem_start + shmem, buf, count); +} + +static void word_memcpy_tocard(unsigned long tp, const void *fp, int count) +{ + volatile unsigned short *to = (void *)tp; + const unsigned short *from = fp; + + count++; + count /= 2; + + while (count--) + *to++ = *from++; +} + +static void word_memcpy_fromcard(void *tp, unsigned long fp, int count) +{ + unsigned short *to = tp; + const volatile unsigned short *from = (const void *)fp; + + count++; + count /= 2; + + while (count--) + *to++ = *from++; +} + + diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c new file mode 100644 index 000000000..e1c055574 --- /dev/null +++ b/drivers/net/ethernet/8390/mcf8390.c @@ -0,0 +1,480 @@ +/* + * Support for ColdFire CPU based boards using a NS8390 Ethernet device. + * + * Derived from the many other 8390 drivers. + * + * (C) Copyright 2012, Greg Ungerer + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of the Linux + * distribution for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const char version[] = + "mcf8390.c: (15-06-2012) Greg Ungerer "; + +#define NE_CMD 0x00 +#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset */ +#define NE_RESET 0x1f /* Issue a read to reset ,a write to clear */ +#define NE_EN0_ISR 0x07 +#define NE_EN0_DCFG 0x0e +#define NE_EN0_RSARLO 0x08 +#define NE_EN0_RSARHI 0x09 +#define NE_EN0_RCNTLO 0x0a +#define NE_EN0_RXCR 0x0c +#define NE_EN0_TXCR 0x0d +#define NE_EN0_RCNTHI 0x0b +#define NE_EN0_IMR 0x0f + +#define NESM_START_PG 0x40 /* First page of TX buffer */ +#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ +static u32 mcf8390_msg_enable; + +#ifdef NE2000_ODDOFFSET +/* + * A lot of the ColdFire boards use a separate address region for odd offset + * register addresses. The following functions convert and map as required. + * Note that the data port accesses are treated a little differently, and + * always accessed via the insX/outsX functions. + */ +static inline u32 NE_PTR(u32 addr) +{ + if (addr & 1) + return addr - 1 + NE2000_ODDOFFSET; + return addr; +} + +static inline u32 NE_DATA_PTR(u32 addr) +{ + return addr; +} + +void ei_outb(u32 val, u32 addr) +{ + NE2000_BYTE *rp; + + rp = (NE2000_BYTE *) NE_PTR(addr); + *rp = RSWAP(val); +} + +#define ei_inb ei_inb +u8 ei_inb(u32 addr) +{ + NE2000_BYTE *rp, val; + + rp = (NE2000_BYTE *) NE_PTR(addr); + val = *rp; + return (u8) (RSWAP(val) & 0xff); +} + +void ei_insb(u32 addr, void *vbuf, int len) +{ + NE2000_BYTE *rp, val; + u8 *buf; + + buf = (u8 *) vbuf; + rp = (NE2000_BYTE *) NE_DATA_PTR(addr); + for (; (len > 0); len--) { + val = *rp; + *buf++ = RSWAP(val); + } +} + +void ei_insw(u32 addr, void *vbuf, int len) +{ + volatile u16 *rp; + u16 w, *buf; + + buf = (u16 *) vbuf; + rp = (volatile u16 *) NE_DATA_PTR(addr); + for (; (len > 0); len--) { + w = *rp; + *buf++ = BSWAP(w); + } +} + +void ei_outsb(u32 addr, const void *vbuf, int len) +{ + NE2000_BYTE *rp, val; + u8 *buf; + + buf = (u8 *) vbuf; + rp = (NE2000_BYTE *) NE_DATA_PTR(addr); + for (; (len > 0); len--) { + val = *buf++; + *rp = RSWAP(val); + } +} + +void ei_outsw(u32 addr, const void *vbuf, int len) +{ + volatile u16 *rp; + u16 w, *buf; + + buf = (u16 *) vbuf; + rp = (volatile u16 *) NE_DATA_PTR(addr); + for (; (len > 0); len--) { + w = *buf++; + *rp = BSWAP(w); + } +} + +#else /* !NE2000_ODDOFFSET */ + +#define ei_inb inb +#define ei_outb outb +#define ei_insb insb +#define ei_insw insw +#define ei_outsb outsb +#define ei_outsw outsw + +#endif /* !NE2000_ODDOFFSET */ + +#define ei_inb_p ei_inb +#define ei_outb_p ei_outb + +#include "lib8390.c" + +/* + * Hard reset the card. This used to pause for the same period that a + * 8390 reset command required, but that shouldn't be necessary. + */ +static void mcf8390_reset_8390(struct net_device *dev) +{ + unsigned long reset_start_time = jiffies; + u32 addr = dev->base_addr; + struct ei_device *ei_local = netdev_priv(dev); + + netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies); + + ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET); + + ei_status.txing = 0; + ei_status.dmaing = 0; + + /* This check _should_not_ be necessary, omit eventually. */ + while ((ei_inb(addr + NE_EN0_ISR) & ENISR_RESET) == 0) { + if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) { + netdev_warn(dev, "%s: did not complete\n", __func__); + break; + } + } + + ei_outb(ENISR_RESET, addr + NE_EN0_ISR); +} + +/* + * This *shouldn't* happen. + * If it does, it's the last thing you'll see + */ +static void mcf8390_dmaing_err(const char *func, struct net_device *dev, + struct ei_device *ei_local) +{ + netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n", + func, ei_local->dmaing, ei_local->irqlock); +} + +/* + * Grab the 8390 specific header. Similar to the block_input routine, but + * we don't need to be concerned with ring wrap as the header will be at + * the start of a page, so we optimize accordingly. + */ +static void mcf8390_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page) +{ + struct ei_device *ei_local = netdev_priv(dev); + u32 addr = dev->base_addr; + + if (ei_local->dmaing) { + mcf8390_dmaing_err(__func__, dev, ei_local); + return; + } + + ei_local->dmaing |= 0x01; + ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, addr + NE_CMD); + ei_outb(ENISR_RDC, addr + NE_EN0_ISR); + ei_outb(sizeof(struct e8390_pkt_hdr), addr + NE_EN0_RCNTLO); + ei_outb(0, addr + NE_EN0_RCNTHI); + ei_outb(0, addr + NE_EN0_RSARLO); /* On page boundary */ + ei_outb(ring_page, addr + NE_EN0_RSARHI); + ei_outb(E8390_RREAD + E8390_START, addr + NE_CMD); + + ei_insw(addr + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr) >> 1); + + outb(ENISR_RDC, addr + NE_EN0_ISR); /* Ack intr */ + ei_local->dmaing &= ~0x01; + + hdr->count = cpu_to_le16(hdr->count); +} + +/* + * Block input and output, similar to the Crynwr packet driver. + * If you are porting to a new ethercard, look at the packet driver source + * for hints. The NEx000 doesn't share the on-board packet memory -- + * you have to put the packet out through the "remote DMA" dataport + * using z_writeb. + */ +static void mcf8390_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + struct ei_device *ei_local = netdev_priv(dev); + u32 addr = dev->base_addr; + char *buf = skb->data; + + if (ei_local->dmaing) { + mcf8390_dmaing_err(__func__, dev, ei_local); + return; + } + + ei_local->dmaing |= 0x01; + ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, addr + NE_CMD); + ei_outb(ENISR_RDC, addr + NE_EN0_ISR); + ei_outb(count & 0xff, addr + NE_EN0_RCNTLO); + ei_outb(count >> 8, addr + NE_EN0_RCNTHI); + ei_outb(ring_offset & 0xff, addr + NE_EN0_RSARLO); + ei_outb(ring_offset >> 8, addr + NE_EN0_RSARHI); + ei_outb(E8390_RREAD + E8390_START, addr + NE_CMD); + + ei_insw(addr + NE_DATAPORT, buf, count >> 1); + if (count & 1) + buf[count - 1] = ei_inb(addr + NE_DATAPORT); + + ei_outb(ENISR_RDC, addr + NE_EN0_ISR); /* Ack intr */ + ei_local->dmaing &= ~0x01; +} + +static void mcf8390_block_output(struct net_device *dev, int count, + const unsigned char *buf, + const int start_page) +{ + struct ei_device *ei_local = netdev_priv(dev); + u32 addr = dev->base_addr; + unsigned long dma_start; + + /* Make sure we transfer all bytes if 16bit IO writes */ + if (count & 0x1) + count++; + + if (ei_local->dmaing) { + mcf8390_dmaing_err(__func__, dev, ei_local); + return; + } + + ei_local->dmaing |= 0x01; + /* We should already be in page 0, but to be safe... */ + ei_outb(E8390_PAGE0 + E8390_START + E8390_NODMA, addr + NE_CMD); + + ei_outb(ENISR_RDC, addr + NE_EN0_ISR); + + /* Now the normal output. */ + ei_outb(count & 0xff, addr + NE_EN0_RCNTLO); + ei_outb(count >> 8, addr + NE_EN0_RCNTHI); + ei_outb(0x00, addr + NE_EN0_RSARLO); + ei_outb(start_page, addr + NE_EN0_RSARHI); + ei_outb(E8390_RWRITE + E8390_START, addr + NE_CMD); + + ei_outsw(addr + NE_DATAPORT, buf, count >> 1); + + dma_start = jiffies; + while ((ei_inb(addr + NE_EN0_ISR) & ENISR_RDC) == 0) { + if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */ + netdev_warn(dev, "timeout waiting for Tx RDC\n"); + mcf8390_reset_8390(dev); + __NS8390_init(dev, 1); + break; + } + } + + ei_outb(ENISR_RDC, addr + NE_EN0_ISR); /* Ack intr */ + ei_local->dmaing &= ~0x01; +} + +static const struct net_device_ops mcf8390_netdev_ops = { + .ndo_open = __ei_open, + .ndo_stop = __ei_close, + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_rx_mode = __ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = __ei_poll, +#endif +}; + +static int mcf8390_init(struct net_device *dev) +{ + static u32 offsets[] = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + }; + struct ei_device *ei_local = netdev_priv(dev); + unsigned char SA_prom[32]; + u32 addr = dev->base_addr; + int start_page, stop_page; + int i, ret; + + mcf8390_reset_8390(dev); + + /* + * Read the 16 bytes of station address PROM. + * We must first initialize registers, + * similar to NS8390_init(eifdev, 0). + * We can't reliably read the SAPROM address without this. + * (I learned the hard way!). + */ + { + static const struct { + u32 value; + u32 offset; + } program_seq[] = { + {E8390_NODMA + E8390_PAGE0 + E8390_STOP, NE_CMD}, + /* Select page 0 */ + {0x48, NE_EN0_DCFG}, /* 0x48: Set byte-wide access */ + {0x00, NE_EN0_RCNTLO}, /* Clear the count regs */ + {0x00, NE_EN0_RCNTHI}, + {0x00, NE_EN0_IMR}, /* Mask completion irq */ + {0xFF, NE_EN0_ISR}, + {E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */ + {E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode */ + {32, NE_EN0_RCNTLO}, + {0x00, NE_EN0_RCNTHI}, + {0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000 */ + {0x00, NE_EN0_RSARHI}, + {E8390_RREAD + E8390_START, NE_CMD}, + }; + for (i = 0; i < ARRAY_SIZE(program_seq); i++) { + ei_outb(program_seq[i].value, + addr + program_seq[i].offset); + } + } + + for (i = 0; i < 16; i++) { + SA_prom[i] = ei_inb(addr + NE_DATAPORT); + ei_inb(addr + NE_DATAPORT); + } + + /* We must set the 8390 for word mode. */ + ei_outb(0x49, addr + NE_EN0_DCFG); + start_page = NESM_START_PG; + stop_page = NESM_STOP_PG; + + /* Install the Interrupt handler */ + ret = request_irq(dev->irq, __ei_interrupt, 0, dev->name, dev); + if (ret) + return ret; + + for (i = 0; i < ETH_ALEN; i++) + dev->dev_addr[i] = SA_prom[i]; + + netdev_dbg(dev, "Found ethernet address: %pM\n", dev->dev_addr); + + ei_local->name = "mcf8390"; + ei_local->tx_start_page = start_page; + ei_local->stop_page = stop_page; + ei_local->word16 = 1; + ei_local->rx_start_page = start_page + TX_PAGES; + ei_local->reset_8390 = mcf8390_reset_8390; + ei_local->block_input = mcf8390_block_input; + ei_local->block_output = mcf8390_block_output; + ei_local->get_8390_hdr = mcf8390_get_8390_hdr; + ei_local->reg_offset = offsets; + + dev->netdev_ops = &mcf8390_netdev_ops; + __NS8390_init(dev, 0); + ret = register_netdev(dev); + if (ret) { + free_irq(dev->irq, dev); + return ret; + } + + netdev_info(dev, "addr=0x%08x irq=%d, Ethernet Address %pM\n", + addr, dev->irq, dev->dev_addr); + return 0; +} + +static int mcf8390_probe(struct platform_device *pdev) +{ + struct net_device *dev; + struct ei_device *ei_local; + struct resource *mem, *irq; + resource_size_t msize; + int ret; + + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (irq == NULL) { + dev_err(&pdev->dev, "no IRQ specified?\n"); + return -ENXIO; + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (mem == NULL) { + dev_err(&pdev->dev, "no memory address specified?\n"); + return -ENXIO; + } + msize = resource_size(mem); + if (!request_mem_region(mem->start, msize, pdev->name)) + return -EBUSY; + + dev = ____alloc_ei_netdev(0); + if (dev == NULL) { + release_mem_region(mem->start, msize); + return -ENOMEM; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + platform_set_drvdata(pdev, dev); + ei_local = netdev_priv(dev); + ei_local->msg_enable = mcf8390_msg_enable; + + dev->irq = irq->start; + dev->base_addr = mem->start; + + ret = mcf8390_init(dev); + if (ret) { + release_mem_region(mem->start, msize); + free_netdev(dev); + return ret; + } + return 0; +} + +static int mcf8390_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct resource *mem; + + unregister_netdev(dev); + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (mem) + release_mem_region(mem->start, resource_size(mem)); + free_netdev(dev); + return 0; +} + +static struct platform_driver mcf8390_drv = { + .driver = { + .name = "mcf8390", + }, + .probe = mcf8390_probe, + .remove = mcf8390_remove, +}; + +module_platform_driver(mcf8390_drv); + +MODULE_DESCRIPTION("MCF8390 ColdFire NS8390 driver"); +MODULE_AUTHOR("Greg Ungerer "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:mcf8390"); diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c new file mode 100644 index 000000000..c063b410a --- /dev/null +++ b/drivers/net/ethernet/8390/ne.c @@ -0,0 +1,1018 @@ +/* ne.c: A general non-shared-memory NS8390 ethernet driver for linux. */ +/* + Written 1992-94 by Donald Becker. + + Copyright 1993 United States Government as represented by the + Director, National Security Agency. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 + + This driver should work with many programmed-I/O 8390-based ethernet + boards. Currently it supports the NE1000, NE2000, many clones, + and some Cabletron products. + + Changelog: + + Paul Gortmaker : use ENISR_RDC to monitor Tx PIO uploads, made + sanity checks and bad clone support optional. + Paul Gortmaker : new reset code, reset card after probe at boot. + Paul Gortmaker : multiple card support for module users. + Paul Gortmaker : Support for PCI ne2k clones, similar to lance.c + Paul Gortmaker : Allow users with bad cards to avoid full probe. + Paul Gortmaker : PCI probe changes, more PCI cards supported. + rjohnson@analogic.com : Changed init order so an interrupt will only + occur after memory is allocated for dev->priv. Deallocated memory + last in cleanup_modue() + Richard Guenther : Added support for ISAPnP cards + Paul Gortmaker : Discontinued PCI support - use ne2k-pci.c instead. + Hayato Fujiwara : Add m32r support. + +*/ + +/* Routines for the NatSemi-based designs (NE[12]000). */ + +static const char version1[] = +"ne.c:v1.10 9/23/94 Donald Becker (becker@scyld.com)\n"; +static const char version2[] = +"Last modified Nov 1, 2000 by Paul Gortmaker\n"; + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "8390.h" + +#define DRV_NAME "ne" + +/* Some defines that people can play with if so inclined. */ + +/* Do we support clones that don't adhere to 14,15 of the SAprom ? */ +#define SUPPORT_NE_BAD_CLONES +/* 0xbad = bad sig or no reset ack */ +#define BAD 0xbad + +#define MAX_NE_CARDS 4 /* Max number of NE cards per module */ +static struct platform_device *pdev_ne[MAX_NE_CARDS]; +static int io[MAX_NE_CARDS]; +static int irq[MAX_NE_CARDS]; +static int bad[MAX_NE_CARDS]; +static u32 ne_msg_enable; + +#ifdef MODULE +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param_array(bad, int, NULL, 0); +module_param_named(msg_enable, ne_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH)); +MODULE_PARM_DESC(io, "I/O base address(es),required"); +MODULE_PARM_DESC(irq, "IRQ number(s)"); +MODULE_PARM_DESC(bad, "Accept card(s) with bad signatures"); +MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)"); +MODULE_DESCRIPTION("NE1000/NE2000 ISA/PnP Ethernet driver"); +MODULE_LICENSE("GPL"); +#endif /* MODULE */ + +/* Do we perform extra sanity checks on stuff ? */ +/* #define NE_SANITY_CHECK */ + +/* Do we implement the read before write bugfix ? */ +/* #define NE_RW_BUGFIX */ + +/* Do we have a non std. amount of memory? (in units of 256 byte pages) */ +/* #define PACKETBUF_MEMSIZE 0x40 */ + +/* This is set up so that no ISA autoprobe takes place. We can't guarantee +that the ne2k probe is the last 8390 based probe to take place (as it +is at boot) and so the probe will get confused by any other 8390 cards. +ISA device autoprobes on a running machine are not recommended anyway. */ +#if !defined(MODULE) && (defined(CONFIG_ISA) || defined(CONFIG_M32R)) +/* Do we need a portlist for the ISA auto-probe ? */ +#define NEEDS_PORTLIST +#endif + +/* A zero-terminated list of I/O addresses to be probed at boot. */ +#ifdef NEEDS_PORTLIST +static unsigned int netcard_portlist[] __initdata = { + 0x300, 0x280, 0x320, 0x340, 0x360, 0x380, 0 +}; +#endif + +static struct isapnp_device_id isapnp_clone_list[] __initdata = { + { ISAPNP_CARD_ID('A','X','E',0x2011), + ISAPNP_VENDOR('A','X','E'), ISAPNP_FUNCTION(0x2011), + (long) "NetGear EA201" }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, + ISAPNP_VENDOR('E','D','I'), ISAPNP_FUNCTION(0x0216), + (long) "NN NE2000" }, + { ISAPNP_ANY_ID, ISAPNP_ANY_ID, + ISAPNP_VENDOR('P','N','P'), ISAPNP_FUNCTION(0x80d6), + (long) "Generic PNP" }, + { } /* terminate list */ +}; + +MODULE_DEVICE_TABLE(isapnp, isapnp_clone_list); + +#ifdef SUPPORT_NE_BAD_CLONES +/* A list of bad clones that we none-the-less recognize. */ +static struct { const char *name8, *name16; unsigned char SAprefix[4];} +bad_clone_list[] __initdata = { + {"DE100", "DE200", {0x00, 0xDE, 0x01,}}, + {"DE120", "DE220", {0x00, 0x80, 0xc8,}}, + {"DFI1000", "DFI2000", {'D', 'F', 'I',}}, /* Original, eh? */ + {"EtherNext UTP8", "EtherNext UTP16", {0x00, 0x00, 0x79}}, + {"NE1000","NE2000-invalid", {0x00, 0x00, 0xd8}}, /* Ancient real NE1000. */ + {"NN1000", "NN2000", {0x08, 0x03, 0x08}}, /* Outlaw no-name clone. */ + {"4-DIM8","4-DIM16", {0x00,0x00,0x4d,}}, /* Outlaw 4-Dimension cards. */ + {"Con-Intl_8", "Con-Intl_16", {0x00, 0x00, 0x24}}, /* Connect Int'nl */ + {"ET-100","ET-200", {0x00, 0x45, 0x54}}, /* YANG and YA clone */ + {"COMPEX","COMPEX16",{0x00,0x80,0x48}}, /* Broken ISA Compex cards */ + {"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */ + {"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */ + {"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */ +#ifdef CONFIG_MACH_TX49XX + {"RBHMA4X00-RTL8019", "RBHMA4X00-RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */ +#endif + {"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */ + {NULL,} +}; +#endif + +/* ---- No user-serviceable parts below ---- */ + +#define NE_BASE (dev->base_addr) +#define NE_CMD 0x00 +#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */ +#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */ +#define NE_IO_EXTENT 0x20 + +#define NE1SM_START_PG 0x20 /* First page of TX buffer */ +#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ +#define NESM_START_PG 0x40 /* First page of TX buffer */ +#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ + +#if defined(CONFIG_PLAT_MAPPI) +# define DCR_VAL 0x4b +#elif defined(CONFIG_PLAT_OAKS32R) || \ + defined(CONFIG_MACH_TX49XX) +# define DCR_VAL 0x48 /* 8-bit mode */ +#elif defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */ +# define DCR_VAL (MACH_IS_ATARI ? 0x48 : 0x49) +#else +# define DCR_VAL 0x49 +#endif + +static int ne_probe1(struct net_device *dev, unsigned long ioaddr); +static int ne_probe_isapnp(struct net_device *dev); + +static void ne_reset_8390(struct net_device *dev); +static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void ne_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void ne_block_output(struct net_device *dev, const int count, + const unsigned char *buf, const int start_page); + + +/* Probe for various non-shared-memory ethercards. + + NEx000-clone boards have a Station Address PROM (SAPROM) in the packet + buffer memory space. NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of + the SAPROM, while other supposed NE2000 clones must be detected by their + SA prefix. + + Reading the SAPROM from a word-wide card with the 8390 set in byte-wide + mode results in doubled values, which can be detected and compensated for. + + The probe is also responsible for initializing the card and filling + in the 'dev' and 'ei_status' structures. + + We use the minimum memory size for some ethercard product lines, iff we can't + distinguish models. You can increase the packet buffer size by setting + PACKETBUF_MEMSIZE. Reported Cabletron packet buffer locations are: + E1010 starts at 0x100 and ends at 0x2000. + E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory") + E2010 starts at 0x100 and ends at 0x4000. + E2010-x starts at 0x100 and ends at 0xffff. */ + +static int __init do_ne_probe(struct net_device *dev) +{ + unsigned long base_addr = dev->base_addr; +#ifdef NEEDS_PORTLIST + int orig_irq = dev->irq; +#endif + + /* First check any supplied i/o locations. User knows best. */ + if (base_addr > 0x1ff) { /* Check a single specified location. */ + int ret = ne_probe1(dev, base_addr); + if (ret) + netdev_warn(dev, "ne.c: No NE*000 card found at " + "i/o = %#lx\n", base_addr); + return ret; + } + else if (base_addr != 0) /* Don't probe at all. */ + return -ENXIO; + + /* Then look for any installed ISAPnP clones */ + if (isapnp_present() && (ne_probe_isapnp(dev) == 0)) + return 0; + +#ifdef NEEDS_PORTLIST + /* Last resort. The semi-risky ISA auto-probe. */ + for (base_addr = 0; netcard_portlist[base_addr] != 0; base_addr++) { + int ioaddr = netcard_portlist[base_addr]; + dev->irq = orig_irq; + if (ne_probe1(dev, ioaddr) == 0) + return 0; + } +#endif + + return -ENODEV; +} + +static int __init ne_probe_isapnp(struct net_device *dev) +{ + int i; + + for (i = 0; isapnp_clone_list[i].vendor != 0; i++) { + struct pnp_dev *idev = NULL; + + while ((idev = pnp_find_dev(NULL, + isapnp_clone_list[i].vendor, + isapnp_clone_list[i].function, + idev))) { + /* Avoid already found cards from previous calls */ + if (pnp_device_attach(idev) < 0) + continue; + if (pnp_activate_dev(idev) < 0) { + pnp_device_detach(idev); + continue; + } + /* if no io and irq, search for next */ + if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0)) { + pnp_device_detach(idev); + continue; + } + /* found it */ + dev->base_addr = pnp_port_start(idev, 0); + dev->irq = pnp_irq(idev, 0); + netdev_info(dev, + "ne.c: ISAPnP reports %s at i/o %#lx, irq %d.\n", + (char *) isapnp_clone_list[i].driver_data, + dev->base_addr, dev->irq); + if (ne_probe1(dev, dev->base_addr) != 0) { /* Shouldn't happen. */ + netdev_err(dev, + "ne.c: Probe of ISAPnP card at %#lx failed.\n", + dev->base_addr); + pnp_device_detach(idev); + return -ENXIO; + } + ei_status.priv = (unsigned long)idev; + break; + } + if (!idev) + continue; + return 0; + } + + return -ENODEV; +} + +static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) +{ + int i; + unsigned char SA_prom[32]; + int wordlength = 2; + const char *name = NULL; + int start_page, stop_page; + int neX000, ctron, copam, bad_card; + int reg0, ret; + static unsigned version_printed; + struct ei_device *ei_local = netdev_priv(dev); + + if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME)) + return -EBUSY; + + reg0 = inb_p(ioaddr); + if (reg0 == 0xFF) { + ret = -ENODEV; + goto err_out; + } + + /* Do a preliminary verification that we have a 8390. */ + { + int regd; + outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD); + regd = inb_p(ioaddr + 0x0d); + outb_p(0xff, ioaddr + 0x0d); + outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD); + inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ + if (inb_p(ioaddr + EN0_COUNTER0) != 0) { + outb_p(reg0, ioaddr); + outb_p(regd, ioaddr + 0x0d); /* Restore the old values. */ + ret = -ENODEV; + goto err_out; + } + } + + if ((ne_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0)) + netdev_info(dev, "%s%s", version1, version2); + + netdev_info(dev, "NE*000 ethercard probe at %#3lx:", ioaddr); + + /* A user with a poor card that fails to ack the reset, or that + does not have a valid 0x57,0x57 signature can still use this + without having to recompile. Specifying an i/o address along + with an otherwise unused dev->mem_end value of "0xBAD" will + cause the driver to skip these parts of the probe. */ + + bad_card = ((dev->base_addr != 0) && (dev->mem_end == BAD)); + + /* Reset card. Who knows what dain-bramaged state it was left in. */ + + { + unsigned long reset_start_time = jiffies; + + /* DON'T change these to inb_p/outb_p or reset will fail on clones. */ + outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); + + while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0) + if (time_after(jiffies, reset_start_time + 2*HZ/100)) { + if (bad_card) { + pr_cont(" (warning: no reset ack)"); + break; + } else { + pr_cont(" not found (no reset ack).\n"); + ret = -ENODEV; + goto err_out; + } + } + + outb_p(0xff, ioaddr + EN0_ISR); /* Ack all intr. */ + } + + /* Read the 16 bytes of station address PROM. + We must first initialize registers, similar to NS8390p_init(eifdev, 0). + We can't reliably read the SAPROM address without this. + (I learned the hard way!). */ + { + struct {unsigned char value, offset; } program_seq[] = + { + {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ + {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */ + {0x00, EN0_RCNTLO}, /* Clear the count regs. */ + {0x00, EN0_RCNTHI}, + {0x00, EN0_IMR}, /* Mask completion irq. */ + {0xFF, EN0_ISR}, + {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ + {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ + {32, EN0_RCNTLO}, + {0x00, EN0_RCNTHI}, + {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ + {0x00, EN0_RSARHI}, + {E8390_RREAD+E8390_START, E8390_CMD}, + }; + + for (i = 0; i < ARRAY_SIZE(program_seq); i++) + outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); + + } + for(i = 0; i < 32 /*sizeof(SA_prom)*/; i+=2) { + SA_prom[i] = inb(ioaddr + NE_DATAPORT); + SA_prom[i+1] = inb(ioaddr + NE_DATAPORT); + if (SA_prom[i] != SA_prom[i+1]) + wordlength = 1; + } + + if (wordlength == 2) + { + for (i = 0; i < 16; i++) + SA_prom[i] = SA_prom[i+i]; + /* We must set the 8390 for word mode. */ + outb_p(DCR_VAL, ioaddr + EN0_DCFG); + start_page = NESM_START_PG; + + /* + * Realtek RTL8019AS datasheet says that the PSTOP register + * shouldn't exceed 0x60 in 8-bit mode. + * This chip can be identified by reading the signature from + * the remote byte count registers (otherwise write-only)... + */ + if ((DCR_VAL & 0x01) == 0 && /* 8-bit mode */ + inb(ioaddr + EN0_RCNTLO) == 0x50 && + inb(ioaddr + EN0_RCNTHI) == 0x70) + stop_page = 0x60; + else + stop_page = NESM_STOP_PG; + } else { + start_page = NE1SM_START_PG; + stop_page = NE1SM_STOP_PG; + } + +#if defined(CONFIG_PLAT_MAPPI) || defined(CONFIG_PLAT_OAKS32R) + neX000 = ((SA_prom[14] == 0x57 && SA_prom[15] == 0x57) + || (SA_prom[14] == 0x42 && SA_prom[15] == 0x42)); +#else + neX000 = (SA_prom[14] == 0x57 && SA_prom[15] == 0x57); +#endif + ctron = (SA_prom[0] == 0x00 && SA_prom[1] == 0x00 && SA_prom[2] == 0x1d); + copam = (SA_prom[14] == 0x49 && SA_prom[15] == 0x00); + + /* Set up the rest of the parameters. */ + if (neX000 || bad_card || copam) { + name = (wordlength == 2) ? "NE2000" : "NE1000"; + } + else if (ctron) + { + name = (wordlength == 2) ? "Ctron-8" : "Ctron-16"; + start_page = 0x01; + stop_page = (wordlength == 2) ? 0x40 : 0x20; + } + else + { +#ifdef SUPPORT_NE_BAD_CLONES + /* Ack! Well, there might be a *bad* NE*000 clone there. + Check for total bogus addresses. */ + for (i = 0; bad_clone_list[i].name8; i++) + { + if (SA_prom[0] == bad_clone_list[i].SAprefix[0] && + SA_prom[1] == bad_clone_list[i].SAprefix[1] && + SA_prom[2] == bad_clone_list[i].SAprefix[2]) + { + if (wordlength == 2) + { + name = bad_clone_list[i].name16; + } else { + name = bad_clone_list[i].name8; + } + break; + } + } + if (bad_clone_list[i].name8 == NULL) + { + pr_cont(" not found (invalid signature %2.2x %2.2x).\n", + SA_prom[14], SA_prom[15]); + ret = -ENXIO; + goto err_out; + } +#else + pr_cont(" not found.\n"); + ret = -ENXIO; + goto err_out; +#endif + } + + if (dev->irq < 2) + { + unsigned long cookie = probe_irq_on(); + outb_p(0x50, ioaddr + EN0_IMR); /* Enable one interrupt. */ + outb_p(0x00, ioaddr + EN0_RCNTLO); + outb_p(0x00, ioaddr + EN0_RCNTHI); + outb_p(E8390_RREAD+E8390_START, ioaddr); /* Trigger it... */ + mdelay(10); /* wait 10ms for interrupt to propagate */ + outb_p(0x00, ioaddr + EN0_IMR); /* Mask it again. */ + dev->irq = probe_irq_off(cookie); + if (netif_msg_probe(ei_local)) + pr_cont(" autoirq is %d", dev->irq); + } else if (dev->irq == 2) + /* Fixup for users that don't know that IRQ 2 is really IRQ 9, + or don't know which one to set. */ + dev->irq = 9; + + if (! dev->irq) { + pr_cont(" failed to detect IRQ line.\n"); + ret = -EAGAIN; + goto err_out; + } + + /* Snarf the interrupt now. There's no point in waiting since we cannot + share and the board will usually be enabled. */ + ret = request_irq(dev->irq, eip_interrupt, 0, name, dev); + if (ret) { + pr_cont(" unable to get IRQ %d (errno=%d).\n", dev->irq, ret); + goto err_out; + } + + dev->base_addr = ioaddr; + +#ifdef CONFIG_PLAT_MAPPI + outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, + ioaddr + E8390_CMD); /* 0x61 */ + for (i = 0; i < ETH_ALEN; i++) { + dev->dev_addr[i] = SA_prom[i] + = inb_p(ioaddr + EN1_PHYS_SHIFT(i)); + } +#else + for (i = 0; i < ETH_ALEN; i++) { + dev->dev_addr[i] = SA_prom[i]; + } +#endif + + pr_cont("%pM\n", dev->dev_addr); + + ei_status.name = name; + ei_status.tx_start_page = start_page; + ei_status.stop_page = stop_page; + + /* Use 16-bit mode only if this wasn't overridden by DCR_VAL */ + ei_status.word16 = (wordlength == 2 && (DCR_VAL & 0x01)); + + ei_status.rx_start_page = start_page + TX_PAGES; +#ifdef PACKETBUF_MEMSIZE + /* Allow the packet buffer size to be overridden by know-it-alls. */ + ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; +#endif + + ei_status.reset_8390 = &ne_reset_8390; + ei_status.block_input = &ne_block_input; + ei_status.block_output = &ne_block_output; + ei_status.get_8390_hdr = &ne_get_8390_hdr; + ei_status.priv = 0; + + dev->netdev_ops = &eip_netdev_ops; + NS8390p_init(dev, 0); + + ei_local->msg_enable = ne_msg_enable; + ret = register_netdev(dev); + if (ret) + goto out_irq; + netdev_info(dev, "%s found at %#lx, using IRQ %d.\n", + name, ioaddr, dev->irq); + return 0; + +out_irq: + free_irq(dev->irq, dev); +err_out: + release_region(ioaddr, NE_IO_EXTENT); + return ret; +} + +/* Hard reset the card. This used to pause for the same period that a + 8390 reset command required, but that shouldn't be necessary. */ + +static void ne_reset_8390(struct net_device *dev) +{ + unsigned long reset_start_time = jiffies; + struct ei_device *ei_local = netdev_priv(dev); + + netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", jiffies); + + /* DON'T change these to inb_p/outb_p or reset will fail on clones. */ + outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); + + ei_status.txing = 0; + ei_status.dmaing = 0; + + /* This check _should_not_ be necessary, omit eventually. */ + while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) + if (time_after(jiffies, reset_start_time + 2*HZ/100)) { + netdev_err(dev, "ne_reset_8390() did not complete.\n"); + break; + } + outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */ +} + +/* Grab the 8390 specific header. Similar to the block_input routine, but + we don't need to be concerned with ring wrap as the header will be at + the start of a page, so we optimize accordingly. */ + +static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + int nic_base = dev->base_addr; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + + if (ei_status.dmaing) + { + netdev_err(dev, "DMAing conflict in ne_get_8390_hdr " + "[DMAstat:%d][irqlock:%d].\n", + ei_status.dmaing, ei_status.irqlock); + return; + } + + ei_status.dmaing |= 0x01; + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); + outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); + outb_p(0, nic_base + EN0_RCNTHI); + outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */ + outb_p(ring_page, nic_base + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); + + if (ei_status.word16) + insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); + else + insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)); + + outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; + + le16_to_cpus(&hdr->count); +} + +/* Block input and output, similar to the Crynwr packet driver. If you + are porting to a new ethercard, look at the packet driver source for hints. + The NEx000 doesn't share the on-board packet memory -- you have to put + the packet out through the "remote DMA" dataport using outb. */ + +static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) +{ +#ifdef NE_SANITY_CHECK + int xfer_count = count; + struct ei_device *ei_local = netdev_priv(dev); +#endif + int nic_base = dev->base_addr; + char *buf = skb->data; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) + { + netdev_err(dev, "DMAing conflict in ne_block_input " + "[DMAstat:%d][irqlock:%d].\n", + ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); + outb_p(count & 0xff, nic_base + EN0_RCNTLO); + outb_p(count >> 8, nic_base + EN0_RCNTHI); + outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); + outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); + if (ei_status.word16) + { + insw(NE_BASE + NE_DATAPORT,buf,count>>1); + if (count & 0x01) + { + buf[count-1] = inb(NE_BASE + NE_DATAPORT); +#ifdef NE_SANITY_CHECK + xfer_count++; +#endif + } + } else { + insb(NE_BASE + NE_DATAPORT, buf, count); + } + +#ifdef NE_SANITY_CHECK + /* This was for the ALPHA version only, but enough people have + been encountering problems so it is still here. If you see + this message you either 1) have a slightly incompatible clone + or 2) have noise/speed problems with your bus. */ + + if (netif_msg_rx_status(ei_local)) + { + /* DMA termination address check... */ + int addr, tries = 20; + do { + /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here + -- it's broken for Rx on some cards! */ + int high = inb_p(nic_base + EN0_RSARHI); + int low = inb_p(nic_base + EN0_RSARLO); + addr = (high << 8) + low; + if (((ring_offset + xfer_count) & 0xff) == low) + break; + } while (--tries > 0); + if (tries <= 0) + netdev_warn(dev, "RX transfer address mismatch," + "%#4.4x (expected) vs. %#4.4x (actual).\n", + ring_offset + xfer_count, addr); + } +#endif + outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +static void ne_block_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page) +{ + int nic_base = NE_BASE; + unsigned long dma_start; +#ifdef NE_SANITY_CHECK + int retries = 0; + struct ei_device *ei_local = netdev_priv(dev); +#endif + + /* Round the count up for word writes. Do we need to do this? + What effect will an odd byte count have on the 8390? + I should check someday. */ + + if (ei_status.word16 && (count & 0x01)) + count++; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) + { + netdev_err(dev, "DMAing conflict in ne_block_output." + "[DMAstat:%d][irqlock:%d]\n", + ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + /* We should already be in page 0, but to be safe... */ + outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); + +#ifdef NE_SANITY_CHECK +retry: +#endif + +#ifdef NE8390_RW_BUGFIX + /* Handle the read-before-write bug the same way as the + Crynwr packet driver -- the NatSemi method doesn't work. + Actually this doesn't always work either, but if you have + problems with your NEx000 this is better than nothing! */ + + outb_p(0x42, nic_base + EN0_RCNTLO); + outb_p(0x00, nic_base + EN0_RCNTHI); + outb_p(0x42, nic_base + EN0_RSARLO); + outb_p(0x00, nic_base + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); + /* Make certain that the dummy read has occurred. */ + udelay(6); +#endif + + outb_p(ENISR_RDC, nic_base + EN0_ISR); + + /* Now the normal output. */ + outb_p(count & 0xff, nic_base + EN0_RCNTLO); + outb_p(count >> 8, nic_base + EN0_RCNTHI); + outb_p(0x00, nic_base + EN0_RSARLO); + outb_p(start_page, nic_base + EN0_RSARHI); + + outb_p(E8390_RWRITE+E8390_START, nic_base + NE_CMD); + if (ei_status.word16) { + outsw(NE_BASE + NE_DATAPORT, buf, count>>1); + } else { + outsb(NE_BASE + NE_DATAPORT, buf, count); + } + + dma_start = jiffies; + +#ifdef NE_SANITY_CHECK + /* This was for the ALPHA version only, but enough people have + been encountering problems so it is still here. */ + + if (netif_msg_tx_queued(ei_local)) + { + /* DMA termination address check... */ + int addr, tries = 20; + do { + int high = inb_p(nic_base + EN0_RSARHI); + int low = inb_p(nic_base + EN0_RSARLO); + addr = (high << 8) + low; + if ((start_page << 8) + count == addr) + break; + } while (--tries > 0); + + if (tries <= 0) + { + netdev_warn(dev, "Tx packet transfer address mismatch," + "%#4.4x (expected) vs. %#4.4x (actual).\n", + (start_page << 8) + count, addr); + if (retries++ == 0) + goto retry; + } + } +#endif + + while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) + if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ + netdev_warn(dev, "timeout waiting for Tx RDC.\n"); + ne_reset_8390(dev); + NS8390p_init(dev, 1); + break; + } + + outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +static int __init ne_drv_probe(struct platform_device *pdev) +{ + struct net_device *dev; + int err, this_dev = pdev->id; + struct resource *res; + + dev = alloc_eip_netdev(); + if (!dev) + return -ENOMEM; + + /* ne.c doesn't populate resources in platform_device, but + * rbtx4927_ne_init and rbtx4938_ne_init do register devices + * with resources. + */ + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (res) { + dev->base_addr = res->start; + dev->irq = platform_get_irq(pdev, 0); + } else { + if (this_dev < 0 || this_dev >= MAX_NE_CARDS) { + free_netdev(dev); + return -EINVAL; + } + dev->base_addr = io[this_dev]; + dev->irq = irq[this_dev]; + dev->mem_end = bad[this_dev]; + } + SET_NETDEV_DEV(dev, &pdev->dev); + err = do_ne_probe(dev); + if (err) { + free_netdev(dev); + return err; + } + platform_set_drvdata(pdev, dev); + + /* Update with any values found by probing, don't update if + * resources were specified. + */ + if (!res) { + io[this_dev] = dev->base_addr; + irq[this_dev] = dev->irq; + } + return 0; +} + +static int ne_drv_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + + if (dev) { + struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv; + netif_device_detach(dev); + unregister_netdev(dev); + if (idev) + pnp_device_detach(idev); + /* Careful ne_drv_remove can be called twice, once from + * the platform_driver.remove and again when the + * platform_device is being removed. + */ + ei_status.priv = 0; + free_irq(dev->irq, dev); + release_region(dev->base_addr, NE_IO_EXTENT); + free_netdev(dev); + } + return 0; +} + +/* Remove unused devices or all if true. */ +static void ne_loop_rm_unreg(int all) +{ + int this_dev; + struct platform_device *pdev; + for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { + pdev = pdev_ne[this_dev]; + /* No network device == unused */ + if (pdev && (!platform_get_drvdata(pdev) || all)) { + ne_drv_remove(pdev); + platform_device_unregister(pdev); + pdev_ne[this_dev] = NULL; + } + } +} + +#ifdef CONFIG_PM +static int ne_drv_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct net_device *dev = platform_get_drvdata(pdev); + + if (netif_running(dev)) { + struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv; + netif_device_detach(dev); + if (idev) + pnp_stop_dev(idev); + } + return 0; +} + +static int ne_drv_resume(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + + if (netif_running(dev)) { + struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv; + if (idev) + pnp_start_dev(idev); + ne_reset_8390(dev); + NS8390p_init(dev, 1); + netif_device_attach(dev); + } + return 0; +} +#else +#define ne_drv_suspend NULL +#define ne_drv_resume NULL +#endif + +static struct platform_driver ne_driver = { + .remove = ne_drv_remove, + .suspend = ne_drv_suspend, + .resume = ne_drv_resume, + .driver = { + .name = DRV_NAME, + }, +}; + +static void __init ne_add_devices(void) +{ + int this_dev; + struct platform_device *pdev; + + for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { + if (pdev_ne[this_dev]) + continue; + pdev = platform_device_register_simple( + DRV_NAME, this_dev, NULL, 0); + if (IS_ERR(pdev)) + continue; + pdev_ne[this_dev] = pdev; + } +} + +#ifdef MODULE +int __init init_module(void) +{ + int retval; + ne_add_devices(); + retval = platform_driver_probe(&ne_driver, ne_drv_probe); + if (retval) { + if (io[0] == 0) + pr_notice("ne.c: You must supply \"io=0xNNN\"" + " value(s) for ISA cards.\n"); + ne_loop_rm_unreg(1); + return retval; + } + + /* Unregister unused platform_devices. */ + ne_loop_rm_unreg(0); + return retval; +} +#else /* MODULE */ +static int __init ne_init(void) +{ + int retval = platform_driver_probe(&ne_driver, ne_drv_probe); + + /* Unregister unused platform_devices. */ + ne_loop_rm_unreg(0); + return retval; +} +module_init(ne_init); + +struct net_device * __init ne_probe(int unit) +{ + int this_dev; + struct net_device *dev; + + /* Find an empty slot, that is no net_device and zero io port. */ + this_dev = 0; + while ((pdev_ne[this_dev] && platform_get_drvdata(pdev_ne[this_dev])) || + io[this_dev]) { + if (++this_dev == MAX_NE_CARDS) + return ERR_PTR(-ENOMEM); + } + + /* Get irq, io from kernel command line */ + dev = alloc_eip_netdev(); + if (!dev) + return ERR_PTR(-ENOMEM); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + io[this_dev] = dev->base_addr; + irq[this_dev] = dev->irq; + bad[this_dev] = dev->mem_end; + + free_netdev(dev); + + ne_add_devices(); + + /* return the first device found */ + for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { + if (pdev_ne[this_dev]) { + dev = platform_get_drvdata(pdev_ne[this_dev]); + if (dev) + return dev; + } + } + + return ERR_PTR(-ENODEV); +} +#endif /* MODULE */ + +static void __exit ne_exit(void) +{ + platform_driver_unregister(&ne_driver); + ne_loop_rm_unreg(1); +} +module_exit(ne_exit); diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c new file mode 100644 index 000000000..57e97910c --- /dev/null +++ b/drivers/net/ethernet/8390/ne2k-pci.c @@ -0,0 +1,743 @@ +/* ne2k-pci.c: A NE2000 clone on PCI bus driver for Linux. */ +/* + A Linux device driver for PCI NE2000 clones. + + Authors and other copyright holders: + 1992-2000 by Donald Becker, NE2000 core and various modifications. + 1995-1998 by Paul Gortmaker, core modifications and PCI support. + Copyright 1993 assigned to the United States Government as represented + by the Director, National Security Agency. + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + Issues remaining: + People are making PCI ne2000 clones! Oh the horror, the horror... + Limited full-duplex support. +*/ + +#define DRV_NAME "ne2k-pci" +#define DRV_VERSION "1.03" +#define DRV_RELDATE "9/22/2003" + + +/* The user-configurable values. + These may be modified when a driver module is loaded.*/ + +#define MAX_UNITS 8 /* More are supported, limit only on options */ +/* Used to pass the full-duplex flag, etc. */ +static int full_duplex[MAX_UNITS]; +static int options[MAX_UNITS]; + +/* Force a non std. amount of memory. Units are 256 byte pages. */ +/* #define PACKETBUF_MEMSIZE 0x40 */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "8390.h" + +static u32 ne2k_msg_enable; + +/* These identify the driver base version and may not be removed. */ +static const char version[] = + KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE + " D. Becker/P. Gortmaker\n"; + +#if defined(__powerpc__) +#define inl_le(addr) le32_to_cpu(inl(addr)) +#define inw_le(addr) le16_to_cpu(inw(addr)) +#endif + +#define PFX DRV_NAME ": " + +MODULE_AUTHOR("Donald Becker / Paul Gortmaker"); +MODULE_DESCRIPTION("PCI NE2000 clone driver"); +MODULE_LICENSE("GPL"); + +module_param_named(msg_enable, ne2k_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH)); +module_param_array(options, int, NULL, 0); +module_param_array(full_duplex, int, NULL, 0); +MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)"); +MODULE_PARM_DESC(options, "Bit 5: full duplex"); +MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)"); + +/* Some defines that people can play with if so inclined. */ + +/* Use 32 bit data-movement operations instead of 16 bit. */ +#define USE_LONGIO + +/* Do we implement the read before write bugfix ? */ +/* #define NE_RW_BUGFIX */ + +/* Flags. We rename an existing ei_status field to store flags! */ +/* Thus only the low 8 bits are usable for non-init-time flags. */ +#define ne2k_flags reg0 +enum { + ONLY_16BIT_IO=8, ONLY_32BIT_IO=4, /* Chip can do only 16/32-bit xfers. */ + FORCE_FDX=0x20, /* User override. */ + REALTEK_FDX=0x40, HOLTEK_FDX=0x80, + STOP_PG_0x60=0x100, +}; + +enum ne2k_pci_chipsets { + CH_RealTek_RTL_8029 = 0, + CH_Winbond_89C940, + CH_Compex_RL2000, + CH_KTI_ET32P2, + CH_NetVin_NV5000SC, + CH_Via_86C926, + CH_SureCom_NE34, + CH_Winbond_W89C940F, + CH_Holtek_HT80232, + CH_Holtek_HT80229, + CH_Winbond_89C940_8c4a, +}; + + +static struct { + char *name; + int flags; +} pci_clone_list[] = { + {"RealTek RTL-8029", REALTEK_FDX}, + {"Winbond 89C940", 0}, + {"Compex RL2000", 0}, + {"KTI ET32P2", 0}, + {"NetVin NV5000SC", 0}, + {"Via 86C926", ONLY_16BIT_IO}, + {"SureCom NE34", 0}, + {"Winbond W89C940F", 0}, + {"Holtek HT80232", ONLY_16BIT_IO | HOLTEK_FDX}, + {"Holtek HT80229", ONLY_32BIT_IO | HOLTEK_FDX | STOP_PG_0x60 }, + {"Winbond W89C940(misprogrammed)", 0}, + {NULL,} +}; + + +static const struct pci_device_id ne2k_pci_tbl[] = { + { 0x10ec, 0x8029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_RealTek_RTL_8029 }, + { 0x1050, 0x0940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940 }, + { 0x11f6, 0x1401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Compex_RL2000 }, + { 0x8e2e, 0x3000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_KTI_ET32P2 }, + { 0x4a14, 0x5000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_NetVin_NV5000SC }, + { 0x1106, 0x0926, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Via_86C926 }, + { 0x10bd, 0x0e34, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_SureCom_NE34 }, + { 0x1050, 0x5a5a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_W89C940F }, + { 0x12c3, 0x0058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Holtek_HT80232 }, + { 0x12c3, 0x5598, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Holtek_HT80229 }, + { 0x8c4a, 0x1980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940_8c4a }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, ne2k_pci_tbl); + + +/* ---- No user-serviceable parts below ---- */ + +#define NE_BASE (dev->base_addr) +#define NE_CMD 0x00 +#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */ +#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */ +#define NE_IO_EXTENT 0x20 + +#define NESM_START_PG 0x40 /* First page of TX buffer */ +#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ + + +static int ne2k_pci_open(struct net_device *dev); +static int ne2k_pci_close(struct net_device *dev); + +static void ne2k_pci_reset_8390(struct net_device *dev); +static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void ne2k_pci_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void ne2k_pci_block_output(struct net_device *dev, const int count, + const unsigned char *buf, const int start_page); +static const struct ethtool_ops ne2k_pci_ethtool_ops; + + + +/* There is no room in the standard 8390 structure for extra info we need, + so we build a meta/outer-wrapper structure.. */ +struct ne2k_pci_card { + struct net_device *dev; + struct pci_dev *pci_dev; +}; + + + +/* + NEx000-clone boards have a Station Address (SA) PROM (SAPROM) in the packet + buffer memory space. By-the-spec NE2000 clones have 0x57,0x57 in bytes + 0x0e,0x0f of the SAPROM, while other supposed NE2000 clones must be + detected by their SA prefix. + + Reading the SAPROM from a word-wide card with the 8390 set in byte-wide + mode results in doubled values, which can be detected and compensated for. + + The probe is also responsible for initializing the card and filling + in the 'dev' and 'ei_status' structures. +*/ + +static const struct net_device_ops ne2k_netdev_ops = { + .ndo_open = ne2k_pci_open, + .ndo_stop = ne2k_pci_close, + .ndo_start_xmit = ei_start_xmit, + .ndo_tx_timeout = ei_tx_timeout, + .ndo_get_stats = ei_get_stats, + .ndo_set_rx_mode = ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ei_poll, +#endif +}; + +static int ne2k_pci_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev; + int i; + unsigned char SA_prom[32]; + int start_page, stop_page; + int irq, reg0, chip_idx = ent->driver_data; + static unsigned int fnd_cnt; + long ioaddr; + int flags = pci_clone_list[chip_idx].flags; + struct ei_device *ei_local; + +/* when built into the kernel, we only print version if device is found */ +#ifndef MODULE + static int printed_version; + if (!printed_version++) + printk(version); +#endif + + fnd_cnt++; + + i = pci_enable_device (pdev); + if (i) + return i; + + ioaddr = pci_resource_start (pdev, 0); + irq = pdev->irq; + + if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) { + dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n"); + goto err_out; + } + + if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) { + dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n", + NE_IO_EXTENT, ioaddr); + goto err_out; + } + + reg0 = inb(ioaddr); + if (reg0 == 0xFF) + goto err_out_free_res; + + /* Do a preliminary verification that we have a 8390. */ + { + int regd; + outb(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD); + regd = inb(ioaddr + 0x0d); + outb(0xff, ioaddr + 0x0d); + outb(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD); + inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ + if (inb(ioaddr + EN0_COUNTER0) != 0) { + outb(reg0, ioaddr); + outb(regd, ioaddr + 0x0d); /* Restore the old values. */ + goto err_out_free_res; + } + } + + /* Allocate net_device, dev->priv; fill in 8390 specific dev fields. */ + dev = alloc_ei_netdev(); + if (!dev) { + dev_err(&pdev->dev, "cannot allocate ethernet device\n"); + goto err_out_free_res; + } + dev->netdev_ops = &ne2k_netdev_ops; + ei_local = netdev_priv(dev); + ei_local->msg_enable = ne2k_msg_enable; + + SET_NETDEV_DEV(dev, &pdev->dev); + + /* Reset card. Who knows what dain-bramaged state it was left in. */ + { + unsigned long reset_start_time = jiffies; + + outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET); + + /* This looks like a horrible timing loop, but it should never take + more than a few cycles. + */ + while ((inb(ioaddr + EN0_ISR) & ENISR_RESET) == 0) + /* Limit wait: '2' avoids jiffy roll-over. */ + if (jiffies - reset_start_time > 2) { + dev_err(&pdev->dev, + "Card failure (no reset ack).\n"); + goto err_out_free_netdev; + } + + outb(0xff, ioaddr + EN0_ISR); /* Ack all intr. */ + } + + /* Read the 16 bytes of station address PROM. + We must first initialize registers, similar to NS8390_init(eifdev, 0). + We can't reliably read the SAPROM address without this. + (I learned the hard way!). */ + { + struct {unsigned char value, offset; } program_seq[] = { + {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ + {0x49, EN0_DCFG}, /* Set word-wide access. */ + {0x00, EN0_RCNTLO}, /* Clear the count regs. */ + {0x00, EN0_RCNTHI}, + {0x00, EN0_IMR}, /* Mask completion irq. */ + {0xFF, EN0_ISR}, + {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ + {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ + {32, EN0_RCNTLO}, + {0x00, EN0_RCNTHI}, + {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ + {0x00, EN0_RSARHI}, + {E8390_RREAD+E8390_START, E8390_CMD}, + }; + for (i = 0; i < ARRAY_SIZE(program_seq); i++) + outb(program_seq[i].value, ioaddr + program_seq[i].offset); + + } + + /* Note: all PCI cards have at least 16 bit access, so we don't have + to check for 8 bit cards. Most cards permit 32 bit access. */ + if (flags & ONLY_32BIT_IO) { + for (i = 0; i < 4 ; i++) + ((u32 *)SA_prom)[i] = le32_to_cpu(inl(ioaddr + NE_DATAPORT)); + } else + for(i = 0; i < 32 /*sizeof(SA_prom)*/; i++) + SA_prom[i] = inb(ioaddr + NE_DATAPORT); + + /* We always set the 8390 registers for word mode. */ + outb(0x49, ioaddr + EN0_DCFG); + start_page = NESM_START_PG; + + stop_page = flags & STOP_PG_0x60 ? 0x60 : NESM_STOP_PG; + + /* Set up the rest of the parameters. */ + dev->irq = irq; + dev->base_addr = ioaddr; + pci_set_drvdata(pdev, dev); + + ei_status.name = pci_clone_list[chip_idx].name; + ei_status.tx_start_page = start_page; + ei_status.stop_page = stop_page; + ei_status.word16 = 1; + ei_status.ne2k_flags = flags; + if (fnd_cnt < MAX_UNITS) { + if (full_duplex[fnd_cnt] > 0 || (options[fnd_cnt] & FORCE_FDX)) + ei_status.ne2k_flags |= FORCE_FDX; + } + + ei_status.rx_start_page = start_page + TX_PAGES; +#ifdef PACKETBUF_MEMSIZE + /* Allow the packet buffer size to be overridden by know-it-alls. */ + ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; +#endif + + ei_status.reset_8390 = &ne2k_pci_reset_8390; + ei_status.block_input = &ne2k_pci_block_input; + ei_status.block_output = &ne2k_pci_block_output; + ei_status.get_8390_hdr = &ne2k_pci_get_8390_hdr; + ei_status.priv = (unsigned long) pdev; + + dev->ethtool_ops = &ne2k_pci_ethtool_ops; + NS8390_init(dev, 0); + + memcpy(dev->dev_addr, SA_prom, dev->addr_len); + + i = register_netdev(dev); + if (i) + goto err_out_free_netdev; + + netdev_info(dev, "%s found at %#lx, IRQ %d, %pM.\n", + pci_clone_list[chip_idx].name, ioaddr, dev->irq, + dev->dev_addr); + + return 0; + +err_out_free_netdev: + free_netdev (dev); +err_out_free_res: + release_region (ioaddr, NE_IO_EXTENT); +err_out: + pci_disable_device(pdev); + return -ENODEV; +} + +/* + * Magic incantation sequence for full duplex on the supported cards. + */ +static inline int set_realtek_fdx(struct net_device *dev) +{ + long ioaddr = dev->base_addr; + + outb(0xC0 + E8390_NODMA, ioaddr + NE_CMD); /* Page 3 */ + outb(0xC0, ioaddr + 0x01); /* Enable writes to CONFIG3 */ + outb(0x40, ioaddr + 0x06); /* Enable full duplex */ + outb(0x00, ioaddr + 0x01); /* Disable writes to CONFIG3 */ + outb(E8390_PAGE0 + E8390_NODMA, ioaddr + NE_CMD); /* Page 0 */ + return 0; +} + +static inline int set_holtek_fdx(struct net_device *dev) +{ + long ioaddr = dev->base_addr; + + outb(inb(ioaddr + 0x20) | 0x80, ioaddr + 0x20); + return 0; +} + +static int ne2k_pci_set_fdx(struct net_device *dev) +{ + if (ei_status.ne2k_flags & REALTEK_FDX) + return set_realtek_fdx(dev); + else if (ei_status.ne2k_flags & HOLTEK_FDX) + return set_holtek_fdx(dev); + + return -EOPNOTSUPP; +} + +static int ne2k_pci_open(struct net_device *dev) +{ + int ret = request_irq(dev->irq, ei_interrupt, IRQF_SHARED, dev->name, dev); + if (ret) + return ret; + + if (ei_status.ne2k_flags & FORCE_FDX) + ne2k_pci_set_fdx(dev); + + ei_open(dev); + return 0; +} + +static int ne2k_pci_close(struct net_device *dev) +{ + ei_close(dev); + free_irq(dev->irq, dev); + return 0; +} + +/* Hard reset the card. This used to pause for the same period that a + 8390 reset command required, but that shouldn't be necessary. */ +static void ne2k_pci_reset_8390(struct net_device *dev) +{ + unsigned long reset_start_time = jiffies; + struct ei_device *ei_local = netdev_priv(dev); + + netif_dbg(ei_local, hw, dev, "resetting the 8390 t=%ld...\n", + jiffies); + + outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); + + ei_status.txing = 0; + ei_status.dmaing = 0; + + /* This check _should_not_ be necessary, omit eventually. */ + while ((inb(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) + if (jiffies - reset_start_time > 2) { + netdev_err(dev, "ne2k_pci_reset_8390() did not complete.\n"); + break; + } + outb(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */ +} + +/* Grab the 8390 specific header. Similar to the block_input routine, but + we don't need to be concerned with ring wrap as the header will be at + the start of a page, so we optimize accordingly. */ + +static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + + long nic_base = dev->base_addr; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) { + netdev_err(dev, "DMAing conflict in ne2k_pci_get_8390_hdr " + "[DMAstat:%d][irqlock:%d].\n", + ei_status.dmaing, ei_status.irqlock); + return; + } + + ei_status.dmaing |= 0x01; + outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); + outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); + outb(0, nic_base + EN0_RCNTHI); + outb(0, nic_base + EN0_RSARLO); /* On page boundary */ + outb(ring_page, nic_base + EN0_RSARHI); + outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); + + if (ei_status.ne2k_flags & ONLY_16BIT_IO) { + insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); + } else { + *(u32*)hdr = le32_to_cpu(inl(NE_BASE + NE_DATAPORT)); + le16_to_cpus(&hdr->count); + } + + outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +/* Block input and output, similar to the Crynwr packet driver. If you + are porting to a new ethercard, look at the packet driver source for hints. + The NEx000 doesn't share the on-board packet memory -- you have to put + the packet out through the "remote DMA" dataport using outb. */ + +static void ne2k_pci_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + long nic_base = dev->base_addr; + char *buf = skb->data; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) { + netdev_err(dev, "DMAing conflict in ne2k_pci_block_input " + "[DMAstat:%d][irqlock:%d].\n", + ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + if (ei_status.ne2k_flags & ONLY_32BIT_IO) + count = (count + 3) & 0xFFFC; + outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); + outb(count & 0xff, nic_base + EN0_RCNTLO); + outb(count >> 8, nic_base + EN0_RCNTHI); + outb(ring_offset & 0xff, nic_base + EN0_RSARLO); + outb(ring_offset >> 8, nic_base + EN0_RSARHI); + outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); + + if (ei_status.ne2k_flags & ONLY_16BIT_IO) { + insw(NE_BASE + NE_DATAPORT,buf,count>>1); + if (count & 0x01) { + buf[count-1] = inb(NE_BASE + NE_DATAPORT); + } + } else { + insl(NE_BASE + NE_DATAPORT, buf, count>>2); + if (count & 3) { + buf += count & ~3; + if (count & 2) { + __le16 *b = (__le16 *)buf; + + *b++ = cpu_to_le16(inw(NE_BASE + NE_DATAPORT)); + buf = (char *)b; + } + if (count & 1) + *buf = inb(NE_BASE + NE_DATAPORT); + } + } + + outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +static void ne2k_pci_block_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page) +{ + long nic_base = NE_BASE; + unsigned long dma_start; + + /* On little-endian it's always safe to round the count up for + word writes. */ + if (ei_status.ne2k_flags & ONLY_32BIT_IO) + count = (count + 3) & 0xFFFC; + else + if (count & 0x01) + count++; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) { + netdev_err(dev, "DMAing conflict in ne2k_pci_block_output." + "[DMAstat:%d][irqlock:%d]\n", + ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + /* We should already be in page 0, but to be safe... */ + outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); + +#ifdef NE8390_RW_BUGFIX + /* Handle the read-before-write bug the same way as the + Crynwr packet driver -- the NatSemi method doesn't work. + Actually this doesn't always work either, but if you have + problems with your NEx000 this is better than nothing! */ + outb(0x42, nic_base + EN0_RCNTLO); + outb(0x00, nic_base + EN0_RCNTHI); + outb(0x42, nic_base + EN0_RSARLO); + outb(0x00, nic_base + EN0_RSARHI); + outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); +#endif + outb(ENISR_RDC, nic_base + EN0_ISR); + + /* Now the normal output. */ + outb(count & 0xff, nic_base + EN0_RCNTLO); + outb(count >> 8, nic_base + EN0_RCNTHI); + outb(0x00, nic_base + EN0_RSARLO); + outb(start_page, nic_base + EN0_RSARHI); + outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD); + if (ei_status.ne2k_flags & ONLY_16BIT_IO) { + outsw(NE_BASE + NE_DATAPORT, buf, count>>1); + } else { + outsl(NE_BASE + NE_DATAPORT, buf, count>>2); + if (count & 3) { + buf += count & ~3; + if (count & 2) { + __le16 *b = (__le16 *)buf; + + outw(le16_to_cpu(*b++), NE_BASE + NE_DATAPORT); + buf = (char *)b; + } + } + } + + dma_start = jiffies; + + while ((inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) + if (jiffies - dma_start > 2) { /* Avoid clock roll-over. */ + netdev_warn(dev, "timeout waiting for Tx RDC.\n"); + ne2k_pci_reset_8390(dev); + NS8390_init(dev,1); + break; + } + + outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +static void ne2k_pci_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct ei_device *ei = netdev_priv(dev); + struct pci_dev *pci_dev = (struct pci_dev *) ei->priv; + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info)); +} + +static u32 ne2k_pci_get_msglevel(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + + return ei_local->msg_enable; +} + +static void ne2k_pci_set_msglevel(struct net_device *dev, u32 v) +{ + struct ei_device *ei_local = netdev_priv(dev); + + ei_local->msg_enable = v; +} + +static const struct ethtool_ops ne2k_pci_ethtool_ops = { + .get_drvinfo = ne2k_pci_get_drvinfo, + .get_msglevel = ne2k_pci_get_msglevel, + .set_msglevel = ne2k_pci_set_msglevel, +}; + +static void ne2k_pci_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + BUG_ON(!dev); + unregister_netdev(dev); + release_region(dev->base_addr, NE_IO_EXTENT); + free_netdev(dev); + pci_disable_device(pdev); +} + +#ifdef CONFIG_PM +static int ne2k_pci_suspend (struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata (pdev); + + netif_device_detach(dev); + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int ne2k_pci_resume (struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata (pdev); + int rc; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + rc = pci_enable_device(pdev); + if (rc) + return rc; + + NS8390_init(dev, 1); + netif_device_attach(dev); + + return 0; +} + +#endif /* CONFIG_PM */ + + +static struct pci_driver ne2k_driver = { + .name = DRV_NAME, + .probe = ne2k_pci_init_one, + .remove = ne2k_pci_remove_one, + .id_table = ne2k_pci_tbl, +#ifdef CONFIG_PM + .suspend = ne2k_pci_suspend, + .resume = ne2k_pci_resume, +#endif /* CONFIG_PM */ + +}; + + +static int __init ne2k_pci_init(void) +{ +/* when a module, this is printed whether or not devices are found in probe */ +#ifdef MODULE + printk(version); +#endif + return pci_register_driver(&ne2k_driver); +} + + +static void __exit ne2k_pci_cleanup(void) +{ + pci_unregister_driver (&ne2k_driver); +} + +module_init(ne2k_pci_init); +module_exit(ne2k_pci_cleanup); diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c new file mode 100644 index 000000000..2777289a2 --- /dev/null +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@ -0,0 +1,1699 @@ +/*====================================================================== + + A PCMCIA ethernet driver for NS8390-based cards + + This driver supports the D-Link DE-650 and Linksys EthernetCard + cards, the newer D-Link and Linksys combo cards, Accton EN2212 + cards, the RPTI EP400, and the PreMax PE-200 in non-shared-memory + mode, and the IBM Credit Card Adapter, the NE4100, the Thomas + Conrad ethernet card, and the Kingston KNE-PCM/x in shared-memory + mode. It will also handle the Socket EA card in either mode. + + Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net + + pcnet_cs.c 1.153 2003/11/09 18:53:09 + + The network driver code is based on Donald Becker's NE2000 code: + + Written 1992,1993 by Donald Becker. + Copyright 1993 United States Government as represented by the + Director, National Security Agency. This software may be used and + distributed according to the terms of the GNU General Public License, + incorporated herein by reference. + Donald Becker may be reached at becker@scyld.com + + Based also on Keith Moore's changes to Don Becker's code, for IBM + CCAE support. Drivers merged back together, and shared-memory + Socket EA support added, by Ken Raeburn, September 1995. + +======================================================================*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "8390.h" + +#include +#include +#include +#include + +#include +#include +#include + +#define PCNET_CMD 0x00 +#define PCNET_DATAPORT 0x10 /* NatSemi-defined port window offset. */ +#define PCNET_RESET 0x1f /* Issue a read to reset, a write to clear. */ +#define PCNET_MISC 0x18 /* For IBM CCAE and Socket EA cards */ + +#define PCNET_START_PG 0x40 /* First page of TX buffer */ +#define PCNET_STOP_PG 0x80 /* Last page +1 of RX ring */ + +/* Socket EA cards have a larger packet buffer */ +#define SOCKET_START_PG 0x01 +#define SOCKET_STOP_PG 0xff + +#define PCNET_RDC_TIMEOUT (2*HZ/100) /* Max wait in jiffies for Tx RDC */ + +static const char *if_names[] = { "auto", "10baseT", "10base2"}; +static u32 pcnet_msg_enable; + +/*====================================================================*/ + +/* Module parameters */ + +MODULE_AUTHOR("David Hinds "); +MODULE_DESCRIPTION("NE2000 compatible PCMCIA ethernet driver"); +MODULE_LICENSE("GPL"); + +#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) + +INT_MODULE_PARM(if_port, 1); /* Transceiver type */ +INT_MODULE_PARM(use_big_buf, 1); /* use 64K packet buffer? */ +INT_MODULE_PARM(mem_speed, 0); /* shared mem speed, in ns */ +INT_MODULE_PARM(delay_output, 0); /* pause after xmit? */ +INT_MODULE_PARM(delay_time, 4); /* in usec */ +INT_MODULE_PARM(use_shmem, -1); /* use shared memory? */ +INT_MODULE_PARM(full_duplex, 0); /* full duplex? */ + +/* Ugh! Let the user hardwire the hardware address for queer cards */ +static int hw_addr[6] = { 0, /* ... */ }; +module_param_array(hw_addr, int, NULL, 0); + +/*====================================================================*/ + +static void mii_phy_probe(struct net_device *dev); +static int pcnet_config(struct pcmcia_device *link); +static void pcnet_release(struct pcmcia_device *link); +static int pcnet_open(struct net_device *dev); +static int pcnet_close(struct net_device *dev); +static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static irqreturn_t ei_irq_wrapper(int irq, void *dev_id); +static void ei_watchdog(u_long arg); +static void pcnet_reset_8390(struct net_device *dev); +static int set_config(struct net_device *dev, struct ifmap *map); +static int setup_shmem_window(struct pcmcia_device *link, int start_pg, + int stop_pg, int cm_offset); +static int setup_dma_config(struct pcmcia_device *link, int start_pg, + int stop_pg); + +static void pcnet_detach(struct pcmcia_device *p_dev); + +/*====================================================================*/ + +struct hw_info { + u_int offset; + u_char a0, a1, a2; + u_int flags; +}; + +#define DELAY_OUTPUT 0x01 +#define HAS_MISC_REG 0x02 +#define USE_BIG_BUF 0x04 +#define HAS_IBM_MISC 0x08 +#define IS_DL10019 0x10 +#define IS_DL10022 0x20 +#define HAS_MII 0x40 +#define USE_SHMEM 0x80 /* autodetected */ + +#define AM79C9XX_HOME_PHY 0x00006B90 /* HomePNA PHY */ +#define AM79C9XX_ETH_PHY 0x00006B70 /* 10baseT PHY */ +#define MII_PHYID_REV_MASK 0xfffffff0 +#define MII_PHYID_REG1 0x02 +#define MII_PHYID_REG2 0x03 + +static struct hw_info hw_info[] = { + { /* Accton EN2212 */ 0x0ff0, 0x00, 0x00, 0xe8, DELAY_OUTPUT }, + { /* Allied Telesis LA-PCM */ 0x0ff0, 0x00, 0x00, 0xf4, 0 }, + { /* APEX MultiCard */ 0x03f4, 0x00, 0x20, 0xe5, 0 }, + { /* ASANTE FriendlyNet */ 0x4910, 0x00, 0x00, 0x94, + DELAY_OUTPUT | HAS_IBM_MISC }, + { /* Danpex EN-6200P2 */ 0x0110, 0x00, 0x40, 0xc7, 0 }, + { /* DataTrek NetCard */ 0x0ff0, 0x00, 0x20, 0xe8, 0 }, + { /* Dayna CommuniCard E */ 0x0110, 0x00, 0x80, 0x19, 0 }, + { /* D-Link DE-650 */ 0x0040, 0x00, 0x80, 0xc8, 0 }, + { /* EP-210 Ethernet */ 0x0110, 0x00, 0x40, 0x33, 0 }, + { /* EP4000 Ethernet */ 0x01c0, 0x00, 0x00, 0xb4, 0 }, + { /* Epson EEN10B */ 0x0ff0, 0x00, 0x00, 0x48, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* ELECOM Laneed LD-CDWA */ 0xb8, 0x08, 0x00, 0x42, 0 }, + { /* Hypertec Ethernet */ 0x01c0, 0x00, 0x40, 0x4c, 0 }, + { /* IBM CCAE */ 0x0ff0, 0x08, 0x00, 0x5a, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* IBM CCAE */ 0x0ff0, 0x00, 0x04, 0xac, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* IBM CCAE */ 0x0ff0, 0x00, 0x06, 0x29, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* IBM FME */ 0x0374, 0x08, 0x00, 0x5a, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* IBM FME */ 0x0374, 0x00, 0x04, 0xac, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* Kansai KLA-PCM/T */ 0x0ff0, 0x00, 0x60, 0x87, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* NSC DP83903 */ 0x0374, 0x08, 0x00, 0x17, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* NSC DP83903 */ 0x0374, 0x00, 0xc0, 0xa8, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* NSC DP83903 */ 0x0374, 0x00, 0xa0, 0xb0, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* NSC DP83903 */ 0x0198, 0x00, 0x20, 0xe0, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* I-O DATA PCLA/T */ 0x0ff0, 0x00, 0xa0, 0xb0, 0 }, + { /* Katron PE-520 */ 0x0110, 0x00, 0x40, 0xf6, 0 }, + { /* Kingston KNE-PCM/x */ 0x0ff0, 0x00, 0xc0, 0xf0, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* Kingston KNE-PCM/x */ 0x0ff0, 0xe2, 0x0c, 0x0f, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* Kingston KNE-PC2 */ 0x0180, 0x00, 0xc0, 0xf0, 0 }, + { /* Maxtech PCN2000 */ 0x5000, 0x00, 0x00, 0xe8, 0 }, + { /* NDC Instant-Link */ 0x003a, 0x00, 0x80, 0xc6, 0 }, + { /* NE2000 Compatible */ 0x0ff0, 0x00, 0xa0, 0x0c, 0 }, + { /* Network General Sniffer */ 0x0ff0, 0x00, 0x00, 0x65, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* Panasonic VEL211 */ 0x0ff0, 0x00, 0x80, 0x45, + HAS_MISC_REG | HAS_IBM_MISC }, + { /* PreMax PE-200 */ 0x07f0, 0x00, 0x20, 0xe0, 0 }, + { /* RPTI EP400 */ 0x0110, 0x00, 0x40, 0x95, 0 }, + { /* SCM Ethernet */ 0x0ff0, 0x00, 0x20, 0xcb, 0 }, + { /* Socket EA */ 0x4000, 0x00, 0xc0, 0x1b, + DELAY_OUTPUT | HAS_MISC_REG | USE_BIG_BUF }, + { /* Socket LP-E CF+ */ 0x01c0, 0x00, 0xc0, 0x1b, 0 }, + { /* SuperSocket RE450T */ 0x0110, 0x00, 0xe0, 0x98, 0 }, + { /* Volktek NPL-402CT */ 0x0060, 0x00, 0x40, 0x05, 0 }, + { /* NEC PC-9801N-J12 */ 0x0ff0, 0x00, 0x00, 0x4c, 0 }, + { /* PCMCIA Technology OEM */ 0x01c8, 0x00, 0xa0, 0x0c, 0 } +}; + +#define NR_INFO ARRAY_SIZE(hw_info) + +static struct hw_info default_info = { 0, 0, 0, 0, 0 }; +static struct hw_info dl10019_info = { 0, 0, 0, 0, IS_DL10019|HAS_MII }; +static struct hw_info dl10022_info = { 0, 0, 0, 0, IS_DL10022|HAS_MII }; + +struct pcnet_dev { + struct pcmcia_device *p_dev; + u_int flags; + void __iomem *base; + struct timer_list watchdog; + int stale, fast_poll; + u_char phy_id; + u_char eth_phy, pna_phy; + u_short link_status; + u_long mii_reset; +}; + +static inline struct pcnet_dev *PRIV(struct net_device *dev) +{ + char *p = netdev_priv(dev); + return (struct pcnet_dev *)(p + sizeof(struct ei_device)); +} + +static const struct net_device_ops pcnet_netdev_ops = { + .ndo_open = pcnet_open, + .ndo_stop = pcnet_close, + .ndo_set_config = set_config, + .ndo_start_xmit = ei_start_xmit, + .ndo_get_stats = ei_get_stats, + .ndo_do_ioctl = ei_ioctl, + .ndo_set_rx_mode = ei_set_multicast_list, + .ndo_tx_timeout = ei_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ei_poll, +#endif +}; + +static int pcnet_probe(struct pcmcia_device *link) +{ + struct pcnet_dev *info; + struct net_device *dev; + + dev_dbg(&link->dev, "pcnet_attach()\n"); + + /* Create new ethernet device */ + dev = __alloc_ei_netdev(sizeof(struct pcnet_dev)); + if (!dev) return -ENOMEM; + info = PRIV(dev); + info->p_dev = link; + link->priv = dev; + + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + + dev->netdev_ops = &pcnet_netdev_ops; + + return pcnet_config(link); +} /* pcnet_attach */ + +static void pcnet_detach(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + dev_dbg(&link->dev, "pcnet_detach\n"); + + unregister_netdev(dev); + + pcnet_release(link); + + free_netdev(dev); +} /* pcnet_detach */ + +/*====================================================================== + + This probes for a card's hardware address, for card types that + encode this information in their CIS. + +======================================================================*/ + +static struct hw_info *get_hwinfo(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + u_char __iomem *base, *virt; + int i, j; + + /* Allocate a small memory window */ + link->resource[2]->flags |= WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; + link->resource[2]->start = 0; link->resource[2]->end = 0; + i = pcmcia_request_window(link, link->resource[2], 0); + if (i != 0) + return NULL; + + virt = ioremap(link->resource[2]->start, + resource_size(link->resource[2])); + for (i = 0; i < NR_INFO; i++) { + pcmcia_map_mem_page(link, link->resource[2], + hw_info[i].offset & ~(resource_size(link->resource[2])-1)); + base = &virt[hw_info[i].offset & (resource_size(link->resource[2])-1)]; + if ((readb(base+0) == hw_info[i].a0) && + (readb(base+2) == hw_info[i].a1) && + (readb(base+4) == hw_info[i].a2)) { + for (j = 0; j < 6; j++) + dev->dev_addr[j] = readb(base + (j<<1)); + break; + } + } + + iounmap(virt); + j = pcmcia_release_window(link, link->resource[2]); + return (i < NR_INFO) ? hw_info+i : NULL; +} /* get_hwinfo */ + +/*====================================================================== + + This probes for a card's hardware address by reading the PROM. + It checks the address against a list of known types, then falls + back to a simple NE2000 clone signature check. + +======================================================================*/ + +static struct hw_info *get_prom(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + unsigned int ioaddr = dev->base_addr; + u_char prom[32]; + int i, j; + + /* This is lifted straight from drivers/net/ethernet/8390/ne.c */ + struct { + u_char value, offset; + } program_seq[] = { + {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ + {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */ + {0x00, EN0_RCNTLO}, /* Clear the count regs. */ + {0x00, EN0_RCNTHI}, + {0x00, EN0_IMR}, /* Mask completion irq. */ + {0xFF, EN0_ISR}, + {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ + {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ + {32, EN0_RCNTLO}, + {0x00, EN0_RCNTHI}, + {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ + {0x00, EN0_RSARHI}, + {E8390_RREAD+E8390_START, E8390_CMD}, + }; + + pcnet_reset_8390(dev); + mdelay(10); + + for (i = 0; i < ARRAY_SIZE(program_seq); i++) + outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); + + for (i = 0; i < 32; i++) + prom[i] = inb(ioaddr + PCNET_DATAPORT); + for (i = 0; i < NR_INFO; i++) { + if ((prom[0] == hw_info[i].a0) && + (prom[2] == hw_info[i].a1) && + (prom[4] == hw_info[i].a2)) + break; + } + if ((i < NR_INFO) || ((prom[28] == 0x57) && (prom[30] == 0x57))) { + for (j = 0; j < 6; j++) + dev->dev_addr[j] = prom[j<<1]; + return (i < NR_INFO) ? hw_info+i : &default_info; + } + return NULL; +} /* get_prom */ + +/*====================================================================== + + For DL10019 based cards, like the Linksys EtherFast + +======================================================================*/ + +static struct hw_info *get_dl10019(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + int i; + u_char sum; + + for (sum = 0, i = 0x14; i < 0x1c; i++) + sum += inb_p(dev->base_addr + i); + if (sum != 0xff) + return NULL; + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb_p(dev->base_addr + 0x14 + i); + i = inb(dev->base_addr + 0x1f); + return ((i == 0x91)||(i == 0x99)) ? &dl10022_info : &dl10019_info; +} + +/*====================================================================== + + For Asix AX88190 based cards + +======================================================================*/ + +static struct hw_info *get_ax88190(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + unsigned int ioaddr = dev->base_addr; + int i, j; + + /* Not much of a test, but the alternatives are messy */ + if (link->config_base != 0x03c0) + return NULL; + + outb_p(0x01, ioaddr + EN0_DCFG); /* Set word-wide access. */ + outb_p(0x00, ioaddr + EN0_RSARLO); /* DMA starting at 0x0400. */ + outb_p(0x04, ioaddr + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, ioaddr + E8390_CMD); + + for (i = 0; i < 6; i += 2) { + j = inw(ioaddr + PCNET_DATAPORT); + dev->dev_addr[i] = j & 0xff; + dev->dev_addr[i+1] = j >> 8; + } + return NULL; +} + +/*====================================================================== + + This should be totally unnecessary... but when we can't figure + out the hardware address any other way, we'll let the user hard + wire it when the module is initialized. + +======================================================================*/ + +static struct hw_info *get_hwired(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + int i; + + for (i = 0; i < 6; i++) + if (hw_addr[i] != 0) break; + if (i == 6) + return NULL; + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = hw_addr[i]; + + return &default_info; +} /* get_hwired */ + +static int try_io_port(struct pcmcia_device *link) +{ + int j, ret; + link->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + link->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; + if (link->resource[0]->end == 32) { + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; + if (link->resource[1]->end > 0) { + /* for master/slave multifunction cards */ + link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; + } + } else { + /* This should be two 16-port windows */ + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + link->resource[1]->flags |= IO_DATA_PATH_WIDTH_16; + } + if (link->resource[0]->start == 0) { + for (j = 0; j < 0x400; j += 0x20) { + link->resource[0]->start = j ^ 0x300; + link->resource[1]->start = (j ^ 0x300) + 0x10; + link->io_lines = 16; + ret = pcmcia_request_io(link); + if (ret == 0) + return ret; + } + return ret; + } else { + return pcmcia_request_io(link); + } +} + +static int pcnet_confcheck(struct pcmcia_device *p_dev, void *priv_data) +{ + int *priv = priv_data; + int try = (*priv & 0x1); + + *priv &= (p_dev->resource[2]->end >= 0x4000) ? 0x10 : ~0x10; + + if (p_dev->config_index == 0) + return -EINVAL; + + if (p_dev->resource[0]->end + p_dev->resource[1]->end < 32) + return -EINVAL; + + if (try) + p_dev->io_lines = 16; + return try_io_port(p_dev); +} + +static struct hw_info *pcnet_try_config(struct pcmcia_device *link, + int *has_shmem, int try) +{ + struct net_device *dev = link->priv; + struct hw_info *local_hw_info; + struct pcnet_dev *info = PRIV(dev); + int priv = try; + int ret; + + ret = pcmcia_loop_config(link, pcnet_confcheck, &priv); + if (ret) { + dev_warn(&link->dev, "no useable port range found\n"); + return NULL; + } + *has_shmem = (priv & 0x10); + + if (!link->irq) + return NULL; + + if (resource_size(link->resource[1]) == 8) + link->config_flags |= CONF_ENABLE_SPKR; + + if ((link->manf_id == MANFID_IBM) && + (link->card_id == PRODID_IBM_HOME_AND_AWAY)) + link->config_index |= 0x10; + + ret = pcmcia_enable_device(link); + if (ret) + return NULL; + + dev->irq = link->irq; + dev->base_addr = link->resource[0]->start; + + if (info->flags & HAS_MISC_REG) { + if ((if_port == 1) || (if_port == 2)) + dev->if_port = if_port; + else + dev_notice(&link->dev, "invalid if_port requested\n"); + } else + dev->if_port = 0; + + if ((link->config_base == 0x03c0) && + (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) { + dev_info(&link->dev, + "this is an AX88190 card - use axnet_cs instead.\n"); + return NULL; + } + + local_hw_info = get_hwinfo(link); + if (!local_hw_info) + local_hw_info = get_prom(link); + if (!local_hw_info) + local_hw_info = get_dl10019(link); + if (!local_hw_info) + local_hw_info = get_ax88190(link); + if (!local_hw_info) + local_hw_info = get_hwired(link); + + return local_hw_info; +} + +static int pcnet_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + struct pcnet_dev *info = PRIV(dev); + int start_pg, stop_pg, cm_offset; + int has_shmem = 0; + struct hw_info *local_hw_info; + struct ei_device *ei_local; + + dev_dbg(&link->dev, "pcnet_config\n"); + + local_hw_info = pcnet_try_config(link, &has_shmem, 0); + if (!local_hw_info) { + /* check whether forcing io_lines to 16 helps... */ + pcmcia_disable_device(link); + local_hw_info = pcnet_try_config(link, &has_shmem, 1); + if (local_hw_info == NULL) { + dev_notice(&link->dev, "unable to read hardware net" + " address for io base %#3lx\n", dev->base_addr); + goto failed; + } + } + + info->flags = local_hw_info->flags; + /* Check for user overrides */ + info->flags |= (delay_output) ? DELAY_OUTPUT : 0; + if ((link->manf_id == MANFID_SOCKET) && + ((link->card_id == PRODID_SOCKET_LPE) || + (link->card_id == PRODID_SOCKET_LPE_CF) || + (link->card_id == PRODID_SOCKET_EIO))) + info->flags &= ~USE_BIG_BUF; + if (!use_big_buf) + info->flags &= ~USE_BIG_BUF; + + if (info->flags & USE_BIG_BUF) { + start_pg = SOCKET_START_PG; + stop_pg = SOCKET_STOP_PG; + cm_offset = 0x10000; + } else { + start_pg = PCNET_START_PG; + stop_pg = PCNET_STOP_PG; + cm_offset = 0; + } + + /* has_shmem is ignored if use_shmem != -1 */ + if ((use_shmem == 0) || (!has_shmem && (use_shmem == -1)) || + (setup_shmem_window(link, start_pg, stop_pg, cm_offset) != 0)) + setup_dma_config(link, start_pg, stop_pg); + + ei_status.name = "NE2000"; + ei_status.word16 = 1; + ei_status.reset_8390 = pcnet_reset_8390; + + if (info->flags & (IS_DL10019|IS_DL10022)) + mii_phy_probe(dev); + + SET_NETDEV_DEV(dev, &link->dev); + ei_local = netdev_priv(dev); + ei_local->msg_enable = pcnet_msg_enable; + + if (register_netdev(dev) != 0) { + pr_notice("register_netdev() failed\n"); + goto failed; + } + + if (info->flags & (IS_DL10019|IS_DL10022)) { + u_char id = inb(dev->base_addr + 0x1a); + netdev_info(dev, "NE2000 (DL100%d rev %02x): ", + (info->flags & IS_DL10022) ? 22 : 19, id); + if (info->pna_phy) + pr_cont("PNA, "); + } else { + netdev_info(dev, "NE2000 Compatible: "); + } + pr_cont("io %#3lx, irq %d,", dev->base_addr, dev->irq); + if (info->flags & USE_SHMEM) + pr_cont(" mem %#5lx,", dev->mem_start); + if (info->flags & HAS_MISC_REG) + pr_cont(" %s xcvr,", if_names[dev->if_port]); + pr_cont(" hw_addr %pM\n", dev->dev_addr); + return 0; + +failed: + pcnet_release(link); + return -ENODEV; +} /* pcnet_config */ + +static void pcnet_release(struct pcmcia_device *link) +{ + struct pcnet_dev *info = PRIV(link->priv); + + dev_dbg(&link->dev, "pcnet_release\n"); + + if (info->flags & USE_SHMEM) + iounmap(info->base); + + pcmcia_disable_device(link); +} + +static int pcnet_suspend(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) + netif_device_detach(dev); + + return 0; +} + +static int pcnet_resume(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) { + pcnet_reset_8390(dev); + NS8390_init(dev, 1); + netif_device_attach(dev); + } + + return 0; +} + + +/*====================================================================== + + MII interface support for DL10019 and DL10022 based cards + + On the DL10019, the MII IO direction bit is 0x10; on the DL10022 + it is 0x20. Setting both bits seems to work on both card types. + +======================================================================*/ + +#define DLINK_GPIO 0x1c +#define DLINK_DIAG 0x1d +#define DLINK_EEPROM 0x1e + +#define MDIO_SHIFT_CLK 0x80 +#define MDIO_DATA_OUT 0x40 +#define MDIO_DIR_WRITE 0x30 +#define MDIO_DATA_WRITE0 (MDIO_DIR_WRITE) +#define MDIO_DATA_WRITE1 (MDIO_DIR_WRITE | MDIO_DATA_OUT) +#define MDIO_DATA_READ 0x10 +#define MDIO_MASK 0x0f + +static void mdio_sync(unsigned int addr) +{ + int bits, mask = inb(addr) & MDIO_MASK; + for (bits = 0; bits < 32; bits++) { + outb(mask | MDIO_DATA_WRITE1, addr); + outb(mask | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr); + } +} + +static int mdio_read(unsigned int addr, int phy_id, int loc) +{ + u_int cmd = (0x06<<10)|(phy_id<<5)|loc; + int i, retval = 0, mask = inb(addr) & MDIO_MASK; + + mdio_sync(addr); + for (i = 13; i >= 0; i--) { + int dat = (cmd&(1< 0; i--) { + outb(mask, addr); + retval = (retval << 1) | ((inb(addr) & MDIO_DATA_READ) != 0); + outb(mask | MDIO_SHIFT_CLK, addr); + } + return (retval>>1) & 0xffff; +} + +static void mdio_write(unsigned int addr, int phy_id, int loc, int value) +{ + u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value; + int i, mask = inb(addr) & MDIO_MASK; + + mdio_sync(addr); + for (i = 31; i >= 0; i--) { + int dat = (cmd&(1<= 0; i--) { + outb(mask, addr); + outb(mask | MDIO_SHIFT_CLK, addr); + } +} + +/*====================================================================== + + EEPROM access routines for DL10019 and DL10022 based cards + +======================================================================*/ + +#define EE_EEP 0x40 +#define EE_ASIC 0x10 +#define EE_CS 0x08 +#define EE_CK 0x04 +#define EE_DO 0x02 +#define EE_DI 0x01 +#define EE_ADOT 0x01 /* DataOut for ASIC */ +#define EE_READ_CMD 0x06 + +#define DL19FDUPLX 0x0400 /* DL10019 Full duplex mode */ + +static int read_eeprom(unsigned int ioaddr, int location) +{ + int i, retval = 0; + unsigned int ee_addr = ioaddr + DLINK_EEPROM; + int read_cmd = location | (EE_READ_CMD << 8); + + outb(0, ee_addr); + outb(EE_EEP|EE_CS, ee_addr); + + /* Shift the read command bits out. */ + for (i = 10; i >= 0; i--) { + short dataval = (read_cmd & (1 << i)) ? EE_DO : 0; + outb_p(EE_EEP|EE_CS|dataval, ee_addr); + outb_p(EE_EEP|EE_CS|dataval|EE_CK, ee_addr); + } + outb(EE_EEP|EE_CS, ee_addr); + + for (i = 16; i > 0; i--) { + outb_p(EE_EEP|EE_CS | EE_CK, ee_addr); + retval = (retval << 1) | ((inb(ee_addr) & EE_DI) ? 1 : 0); + outb_p(EE_EEP|EE_CS, ee_addr); + } + + /* Terminate the EEPROM access. */ + outb(0, ee_addr); + return retval; +} + +/* + The internal ASIC registers can be changed by EEPROM READ access + with EE_ASIC bit set. + In ASIC mode, EE_ADOT is used to output the data to the ASIC. +*/ + +static void write_asic(unsigned int ioaddr, int location, short asic_data) +{ + int i; + unsigned int ee_addr = ioaddr + DLINK_EEPROM; + short dataval; + int read_cmd = location | (EE_READ_CMD << 8); + + asic_data |= read_eeprom(ioaddr, location); + + outb(0, ee_addr); + outb(EE_ASIC|EE_CS|EE_DI, ee_addr); + + read_cmd = read_cmd >> 1; + + /* Shift the read command bits out. */ + for (i = 9; i >= 0; i--) { + dataval = (read_cmd & (1 << i)) ? EE_DO : 0; + outb_p(EE_ASIC|EE_CS|EE_DI|dataval, ee_addr); + outb_p(EE_ASIC|EE_CS|EE_DI|dataval|EE_CK, ee_addr); + outb_p(EE_ASIC|EE_CS|EE_DI|dataval, ee_addr); + } + // sync + outb(EE_ASIC|EE_CS, ee_addr); + outb(EE_ASIC|EE_CS|EE_CK, ee_addr); + outb(EE_ASIC|EE_CS, ee_addr); + + for (i = 15; i >= 0; i--) { + dataval = (asic_data & (1 << i)) ? EE_ADOT : 0; + outb_p(EE_ASIC|EE_CS|dataval, ee_addr); + outb_p(EE_ASIC|EE_CS|dataval|EE_CK, ee_addr); + outb_p(EE_ASIC|EE_CS|dataval, ee_addr); + } + + /* Terminate the ASIC access. */ + outb(EE_ASIC|EE_DI, ee_addr); + outb(EE_ASIC|EE_DI| EE_CK, ee_addr); + outb(EE_ASIC|EE_DI, ee_addr); + + outb(0, ee_addr); +} + +/*====================================================================*/ + +static void set_misc_reg(struct net_device *dev) +{ + unsigned int nic_base = dev->base_addr; + struct pcnet_dev *info = PRIV(dev); + u_char tmp; + + if (info->flags & HAS_MISC_REG) { + tmp = inb_p(nic_base + PCNET_MISC) & ~3; + if (dev->if_port == 2) + tmp |= 1; + if (info->flags & USE_BIG_BUF) + tmp |= 2; + if (info->flags & HAS_IBM_MISC) + tmp |= 8; + outb_p(tmp, nic_base + PCNET_MISC); + } + if (info->flags & IS_DL10022) { + if (info->flags & HAS_MII) { + /* Advertise 100F, 100H, 10F, 10H */ + mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 4, 0x01e1); + /* Restart MII autonegotiation */ + mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x0000); + mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x1200); + info->mii_reset = jiffies; + } else { + outb(full_duplex ? 4 : 0, nic_base + DLINK_DIAG); + } + } else if (info->flags & IS_DL10019) { + /* Advertise 100F, 100H, 10F, 10H */ + mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 4, 0x01e1); + /* Restart MII autonegotiation */ + mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x0000); + mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x1200); + } +} + +/*====================================================================*/ + +static void mii_phy_probe(struct net_device *dev) +{ + struct pcnet_dev *info = PRIV(dev); + unsigned int mii_addr = dev->base_addr + DLINK_GPIO; + int i; + u_int tmp, phyid; + + for (i = 31; i >= 0; i--) { + tmp = mdio_read(mii_addr, i, 1); + if ((tmp == 0) || (tmp == 0xffff)) + continue; + tmp = mdio_read(mii_addr, i, MII_PHYID_REG1); + phyid = tmp << 16; + phyid |= mdio_read(mii_addr, i, MII_PHYID_REG2); + phyid &= MII_PHYID_REV_MASK; + netdev_dbg(dev, "MII at %d is 0x%08x\n", i, phyid); + if (phyid == AM79C9XX_HOME_PHY) { + info->pna_phy = i; + } else if (phyid != AM79C9XX_ETH_PHY) { + info->eth_phy = i; + } + } +} + +static int pcnet_open(struct net_device *dev) +{ + int ret; + struct pcnet_dev *info = PRIV(dev); + struct pcmcia_device *link = info->p_dev; + unsigned int nic_base = dev->base_addr; + + dev_dbg(&link->dev, "pcnet_open('%s')\n", dev->name); + + if (!pcmcia_dev_present(link)) + return -ENODEV; + + set_misc_reg(dev); + + outb_p(0xFF, nic_base + EN0_ISR); /* Clear bogus intr. */ + ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, dev->name, dev); + if (ret) + return ret; + + link->open++; + + info->phy_id = info->eth_phy; + info->link_status = 0x00; + setup_timer(&info->watchdog, ei_watchdog, (u_long)dev); + mod_timer(&info->watchdog, jiffies + HZ); + + return ei_open(dev); +} /* pcnet_open */ + +/*====================================================================*/ + +static int pcnet_close(struct net_device *dev) +{ + struct pcnet_dev *info = PRIV(dev); + struct pcmcia_device *link = info->p_dev; + + dev_dbg(&link->dev, "pcnet_close('%s')\n", dev->name); + + ei_close(dev); + free_irq(dev->irq, dev); + + link->open--; + netif_stop_queue(dev); + del_timer_sync(&info->watchdog); + + return 0; +} /* pcnet_close */ + +/*====================================================================== + + Hard reset the card. This used to pause for the same period that + a 8390 reset command required, but that shouldn't be necessary. + +======================================================================*/ + +static void pcnet_reset_8390(struct net_device *dev) +{ + unsigned int nic_base = dev->base_addr; + int i; + + ei_status.txing = ei_status.dmaing = 0; + + outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, nic_base + E8390_CMD); + + outb(inb(nic_base + PCNET_RESET), nic_base + PCNET_RESET); + + for (i = 0; i < 100; i++) { + if ((inb_p(nic_base+EN0_ISR) & ENISR_RESET) != 0) + break; + udelay(100); + } + outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */ + + if (i == 100) + netdev_err(dev, "pcnet_reset_8390() did not complete.\n"); + + set_misc_reg(dev); + +} /* pcnet_reset_8390 */ + +/*====================================================================*/ + +static int set_config(struct net_device *dev, struct ifmap *map) +{ + struct pcnet_dev *info = PRIV(dev); + if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { + if (!(info->flags & HAS_MISC_REG)) + return -EOPNOTSUPP; + else if ((map->port < 1) || (map->port > 2)) + return -EINVAL; + dev->if_port = map->port; + netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); + NS8390_init(dev, 1); + } + return 0; +} + +/*====================================================================*/ + +static irqreturn_t ei_irq_wrapper(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct pcnet_dev *info; + irqreturn_t ret = ei_interrupt(irq, dev_id); + + if (ret == IRQ_HANDLED) { + info = PRIV(dev); + info->stale = 0; + } + return ret; +} + +static void ei_watchdog(u_long arg) +{ + struct net_device *dev = (struct net_device *)arg; + struct pcnet_dev *info = PRIV(dev); + unsigned int nic_base = dev->base_addr; + unsigned int mii_addr = nic_base + DLINK_GPIO; + u_short link; + + if (!netif_device_present(dev)) goto reschedule; + + /* Check for pending interrupt with expired latency timer: with + this, we can limp along even if the interrupt is blocked */ + if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) { + if (!info->fast_poll) + netdev_info(dev, "interrupt(s) dropped!\n"); + ei_irq_wrapper(dev->irq, dev); + info->fast_poll = HZ; + } + if (info->fast_poll) { + info->fast_poll--; + info->watchdog.expires = jiffies + 1; + add_timer(&info->watchdog); + return; + } + + if (!(info->flags & HAS_MII)) + goto reschedule; + + mdio_read(mii_addr, info->phy_id, 1); + link = mdio_read(mii_addr, info->phy_id, 1); + if (!link || (link == 0xffff)) { + if (info->eth_phy) { + info->phy_id = info->eth_phy = 0; + } else { + netdev_info(dev, "MII is missing!\n"); + info->flags &= ~HAS_MII; + } + goto reschedule; + } + + link &= 0x0004; + if (link != info->link_status) { + u_short p = mdio_read(mii_addr, info->phy_id, 5); + netdev_info(dev, "%s link beat\n", link ? "found" : "lost"); + if (link && (info->flags & IS_DL10022)) { + /* Disable collision detection on full duplex links */ + outb((p & 0x0140) ? 4 : 0, nic_base + DLINK_DIAG); + } else if (link && (info->flags & IS_DL10019)) { + /* Disable collision detection on full duplex links */ + write_asic(dev->base_addr, 4, (p & 0x140) ? DL19FDUPLX : 0); + } + if (link) { + if (info->phy_id == info->eth_phy) { + if (p) + netdev_info(dev, "autonegotiation complete: " + "%sbaseT-%cD selected\n", + ((p & 0x0180) ? "100" : "10"), + ((p & 0x0140) ? 'F' : 'H')); + else + netdev_info(dev, "link partner did not autonegotiate\n"); + } + NS8390_init(dev, 1); + } + info->link_status = link; + } + if (info->pna_phy && time_after(jiffies, info->mii_reset + 6*HZ)) { + link = mdio_read(mii_addr, info->eth_phy, 1) & 0x0004; + if (((info->phy_id == info->pna_phy) && link) || + ((info->phy_id != info->pna_phy) && !link)) { + /* isolate this MII and try flipping to the other one */ + mdio_write(mii_addr, info->phy_id, 0, 0x0400); + info->phy_id ^= info->pna_phy ^ info->eth_phy; + netdev_info(dev, "switched to %s transceiver\n", + (info->phy_id == info->eth_phy) ? "ethernet" : "PNA"); + mdio_write(mii_addr, info->phy_id, 0, + (info->phy_id == info->eth_phy) ? 0x1000 : 0); + info->link_status = 0; + info->mii_reset = jiffies; + } + } + +reschedule: + info->watchdog.expires = jiffies + HZ; + add_timer(&info->watchdog); +} + +/*====================================================================*/ + + +static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct pcnet_dev *info = PRIV(dev); + struct mii_ioctl_data *data = if_mii(rq); + unsigned int mii_addr = dev->base_addr + DLINK_GPIO; + + if (!(info->flags & (IS_DL10019|IS_DL10022))) + return -EINVAL; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = info->phy_id; + case SIOCGMIIREG: /* Read MII PHY register. */ + data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f); + return 0; + case SIOCSMIIREG: /* Write MII PHY register. */ + mdio_write(mii_addr, data->phy_id, data->reg_num & 0x1f, data->val_in); + return 0; + } + return -EOPNOTSUPP; +} + +/*====================================================================*/ + +static void dma_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, + int ring_page) +{ + unsigned int nic_base = dev->base_addr; + + if (ei_status.dmaing) { + netdev_err(dev, "DMAing conflict in dma_block_input." + "[DMAstat:%1x][irqlock:%1x]\n", + ei_status.dmaing, ei_status.irqlock); + return; + } + + ei_status.dmaing |= 0x01; + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD); + outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); + outb_p(0, nic_base + EN0_RCNTHI); + outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */ + outb_p(ring_page, nic_base + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD); + + insw(nic_base + PCNET_DATAPORT, hdr, + sizeof(struct e8390_pkt_hdr)>>1); + /* Fix for big endian systems */ + hdr->count = le16_to_cpu(hdr->count); + + outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +/*====================================================================*/ + +static void dma_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + unsigned int nic_base = dev->base_addr; + int xfer_count = count; + char *buf = skb->data; + struct ei_device *ei_local = netdev_priv(dev); + + if ((netif_msg_rx_status(ei_local)) && (count != 4)) + netdev_dbg(dev, "[bi=%d]\n", count+4); + if (ei_status.dmaing) { + netdev_err(dev, "DMAing conflict in dma_block_input." + "[DMAstat:%1x][irqlock:%1x]\n", + ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD); + outb_p(count & 0xff, nic_base + EN0_RCNTLO); + outb_p(count >> 8, nic_base + EN0_RCNTHI); + outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); + outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD); + + insw(nic_base + PCNET_DATAPORT,buf,count>>1); + if (count & 0x01) + buf[count-1] = inb(nic_base + PCNET_DATAPORT), xfer_count++; + + /* This was for the ALPHA version only, but enough people have been + encountering problems that it is still here. */ +#ifdef PCMCIA_DEBUG + /* DMA termination address check... */ + if (netif_msg_rx_status(ei_local)) { + int addr, tries = 20; + do { + /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here + -- it's broken for Rx on some cards! */ + int high = inb_p(nic_base + EN0_RSARHI); + int low = inb_p(nic_base + EN0_RSARLO); + addr = (high << 8) + low; + if (((ring_offset + xfer_count) & 0xff) == (addr & 0xff)) + break; + } while (--tries > 0); + if (tries <= 0) + netdev_notice(dev, "RX transfer address mismatch," + "%#4.4x (expected) vs. %#4.4x (actual).\n", + ring_offset + xfer_count, addr); + } +#endif + outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} /* dma_block_input */ + +/*====================================================================*/ + +static void dma_block_output(struct net_device *dev, int count, + const u_char *buf, const int start_page) +{ + unsigned int nic_base = dev->base_addr; + struct pcnet_dev *info = PRIV(dev); +#ifdef PCMCIA_DEBUG + int retries = 0; + struct ei_device *ei_local = netdev_priv(dev); +#endif + u_long dma_start; + +#ifdef PCMCIA_DEBUG + netif_dbg(ei_local, tx_queued, dev, "[bo=%d]\n", count); +#endif + + /* Round the count up for word writes. Do we need to do this? + What effect will an odd byte count have on the 8390? + I should check someday. */ + if (count & 0x01) + count++; + if (ei_status.dmaing) { + netdev_err(dev, "DMAing conflict in dma_block_output." + "[DMAstat:%1x][irqlock:%1x]\n", + ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + /* We should already be in page 0, but to be safe... */ + outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base+PCNET_CMD); + +#ifdef PCMCIA_DEBUG + retry: +#endif + + outb_p(ENISR_RDC, nic_base + EN0_ISR); + + /* Now the normal output. */ + outb_p(count & 0xff, nic_base + EN0_RCNTLO); + outb_p(count >> 8, nic_base + EN0_RCNTHI); + outb_p(0x00, nic_base + EN0_RSARLO); + outb_p(start_page, nic_base + EN0_RSARHI); + + outb_p(E8390_RWRITE+E8390_START, nic_base + PCNET_CMD); + outsw(nic_base + PCNET_DATAPORT, buf, count>>1); + + dma_start = jiffies; + +#ifdef PCMCIA_DEBUG + /* This was for the ALPHA version only, but enough people have been + encountering problems that it is still here. */ + /* DMA termination address check... */ + if (netif_msg_tx_queued(ei_local)) { + int addr, tries = 20; + do { + int high = inb_p(nic_base + EN0_RSARHI); + int low = inb_p(nic_base + EN0_RSARLO); + addr = (high << 8) + low; + if ((start_page << 8) + count == addr) + break; + } while (--tries > 0); + if (tries <= 0) { + netdev_notice(dev, "Tx packet transfer address mismatch," + "%#4.4x (expected) vs. %#4.4x (actual).\n", + (start_page << 8) + count, addr); + if (retries++ == 0) + goto retry; + } + } +#endif + + while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) + if (time_after(jiffies, dma_start + PCNET_RDC_TIMEOUT)) { + netdev_warn(dev, "timeout waiting for Tx RDC.\n"); + pcnet_reset_8390(dev); + NS8390_init(dev, 1); + break; + } + + outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + if (info->flags & DELAY_OUTPUT) + udelay((long)delay_time); + ei_status.dmaing &= ~0x01; +} + +/*====================================================================*/ + +static int setup_dma_config(struct pcmcia_device *link, int start_pg, + int stop_pg) +{ + struct net_device *dev = link->priv; + + ei_status.tx_start_page = start_pg; + ei_status.rx_start_page = start_pg + TX_PAGES; + ei_status.stop_page = stop_pg; + + /* set up block i/o functions */ + ei_status.get_8390_hdr = dma_get_8390_hdr; + ei_status.block_input = dma_block_input; + ei_status.block_output = dma_block_output; + + return 0; +} + +/*====================================================================*/ + +static void copyin(void *dest, void __iomem *src, int c) +{ + u_short *d = dest; + u_short __iomem *s = src; + int odd; + + if (c <= 0) + return; + odd = (c & 1); c >>= 1; + + if (c) { + do { *d++ = __raw_readw(s++); } while (--c); + } + /* get last byte by fetching a word and masking */ + if (odd) + *((u_char *)d) = readw(s) & 0xff; +} + +static void copyout(void __iomem *dest, const void *src, int c) +{ + u_short __iomem *d = dest; + const u_short *s = src; + int odd; + + if (c <= 0) + return; + odd = (c & 1); c >>= 1; + + if (c) { + do { __raw_writew(*s++, d++); } while (--c); + } + /* copy last byte doing a read-modify-write */ + if (odd) + writew((readw(d) & 0xff00) | *(u_char *)s, d); +} + +/*====================================================================*/ + +static void shmem_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, + int ring_page) +{ + void __iomem *xfer_start = ei_status.mem + (TX_PAGES<<8) + + (ring_page << 8) + - (ei_status.rx_start_page << 8); + + copyin(hdr, xfer_start, sizeof(struct e8390_pkt_hdr)); + /* Fix for big endian systems */ + hdr->count = le16_to_cpu(hdr->count); +} + +/*====================================================================*/ + +static void shmem_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + void __iomem *base = ei_status.mem; + unsigned long offset = (TX_PAGES<<8) + ring_offset + - (ei_status.rx_start_page << 8); + char *buf = skb->data; + + if (offset + count > ei_status.priv) { + /* We must wrap the input move. */ + int semi_count = ei_status.priv - offset; + copyin(buf, base + offset, semi_count); + buf += semi_count; + offset = TX_PAGES<<8; + count -= semi_count; + } + copyin(buf, base + offset, count); +} + +/*====================================================================*/ + +static void shmem_block_output(struct net_device *dev, int count, + const u_char *buf, const int start_page) +{ + void __iomem *shmem = ei_status.mem + (start_page << 8); + shmem -= ei_status.tx_start_page << 8; + copyout(shmem, buf, count); +} + +/*====================================================================*/ + +static int setup_shmem_window(struct pcmcia_device *link, int start_pg, + int stop_pg, int cm_offset) +{ + struct net_device *dev = link->priv; + struct pcnet_dev *info = PRIV(dev); + int i, window_size, offset, ret; + + window_size = (stop_pg - start_pg) << 8; + if (window_size > 32 * 1024) + window_size = 32 * 1024; + + /* Make sure it's a power of two. */ + window_size = roundup_pow_of_two(window_size); + + /* Allocate a memory window */ + link->resource[3]->flags |= WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE; + link->resource[3]->flags |= WIN_USE_WAIT; + link->resource[3]->start = 0; link->resource[3]->end = window_size; + ret = pcmcia_request_window(link, link->resource[3], mem_speed); + if (ret) + goto failed; + + offset = (start_pg << 8) + cm_offset; + offset -= offset % window_size; + ret = pcmcia_map_mem_page(link, link->resource[3], offset); + if (ret) + goto failed; + + /* Try scribbling on the buffer */ + info->base = ioremap(link->resource[3]->start, + resource_size(link->resource[3])); + for (i = 0; i < (TX_PAGES<<8); i += 2) + __raw_writew((i>>1), info->base+offset+i); + udelay(100); + for (i = 0; i < (TX_PAGES<<8); i += 2) + if (__raw_readw(info->base+offset+i) != (i>>1)) break; + pcnet_reset_8390(dev); + if (i != (TX_PAGES<<8)) { + iounmap(info->base); + pcmcia_release_window(link, link->resource[3]); + info->base = NULL; + goto failed; + } + + ei_status.mem = info->base + offset; + ei_status.priv = resource_size(link->resource[3]); + dev->mem_start = (u_long)ei_status.mem; + dev->mem_end = dev->mem_start + resource_size(link->resource[3]); + + ei_status.tx_start_page = start_pg; + ei_status.rx_start_page = start_pg + TX_PAGES; + ei_status.stop_page = start_pg + ( + (resource_size(link->resource[3]) - offset) >> 8); + + /* set up block i/o functions */ + ei_status.get_8390_hdr = shmem_get_8390_hdr; + ei_status.block_input = shmem_block_input; + ei_status.block_output = shmem_block_output; + + info->flags |= USE_SHMEM; + return 0; + +failed: + return 1; +} + +/*====================================================================*/ + +static const struct pcmcia_device_id pcnet_ids[] = { + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0057, 0x0021), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0104, 0x000a), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0xea15), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0143, 0x3341), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0143, 0xc0ab), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x021b, 0x0101), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x08a1, 0xc0ab), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "LINKSYS", "PCMLM336", 0xf7cb0b07, 0x7a821b58), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "MICRO RESEARCH", "COMBO-L/M-336", 0xb2ced065, 0x3ced0555), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f), + PCMCIA_MFC_DEVICE_PROD_ID12(0, "IBM", "Home and Away 28.8 PC Card ", 0xb569a6e5, 0x5bd4ff2c), + PCMCIA_MFC_DEVICE_PROD_ID12(0, "IBM", "Home and Away Credit Card Adapter", 0xb569a6e5, 0x4bdf15c3), + PCMCIA_MFC_DEVICE_PROD_ID12(0, "IBM", "w95 Home and Away Credit Card ", 0xb569a6e5, 0xae911c15), + PCMCIA_MFC_DEVICE_PROD_ID123(0, "APEX DATA", "MULTICARD", "ETHERNET-MODEM", 0x11c2da09, 0x7289dc5d, 0xaad95e1f), + PCMCIA_MFC_DEVICE_PROD_ID2(0, "FAX/Modem/Ethernet Combo Card ", 0x1ed59302), + PCMCIA_DEVICE_MANF_CARD(0x0057, 0x1004), + PCMCIA_DEVICE_MANF_CARD(0x0104, 0x000d), + PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0075), + PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0145), + PCMCIA_DEVICE_MANF_CARD(0x0149, 0x0230), + PCMCIA_DEVICE_MANF_CARD(0x0149, 0x4530), + PCMCIA_DEVICE_MANF_CARD(0x0149, 0xc1ab), + PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0110), + PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x8041), + PCMCIA_DEVICE_MANF_CARD(0x0213, 0x2452), + PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0300), + PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0307), + PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a), + PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1103), + PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1121), + PCMCIA_DEVICE_PROD_ID12("2408LAN", "Ethernet", 0x352fff7f, 0x00b2e941), + PCMCIA_DEVICE_PROD_ID1234("Socket", "CF 10/100 Ethernet Card", "Revision B", "05/11/06", 0xb38bcc2e, 0x4de88352, 0xeaca6c8d, 0x7e57c22e), + PCMCIA_DEVICE_PROD_ID123("Cardwell", "PCMCIA", "ETHERNET", 0x9533672e, 0x281f1c5d, 0x3ff7175b), + PCMCIA_DEVICE_PROD_ID123("CNet ", "CN30BC", "ETHERNET", 0x9fe55d3d, 0x85601198, 0x3ff7175b), + PCMCIA_DEVICE_PROD_ID123("Digital", "Ethernet", "Adapter", 0x9999ab35, 0x00b2e941, 0x4b0d829e), + PCMCIA_DEVICE_PROD_ID123("Edimax Technology Inc.", "PCMCIA", "Ethernet Card", 0x738a0019, 0x281f1c5d, 0x5e9d92c0), + PCMCIA_DEVICE_PROD_ID123("EFA ", "EFA207", "ETHERNET", 0x3d294be4, 0xeb9aab6c, 0x3ff7175b), + PCMCIA_DEVICE_PROD_ID123("I-O DATA", "PCLA", "ETHERNET", 0x1d55d7ec, 0xe4c64d34, 0x3ff7175b), + PCMCIA_DEVICE_PROD_ID123("IO DATA", "PCLATE", "ETHERNET", 0x547e66dc, 0x6b260753, 0x3ff7175b), + PCMCIA_DEVICE_PROD_ID123("KingMax Technology Inc.", "EN10-T2", "PCMCIA Ethernet Card", 0x932b7189, 0x699e4436, 0x6f6652e0), + PCMCIA_DEVICE_PROD_ID123("PCMCIA", "PCMCIA-ETHERNET-CARD", "UE2216", 0x281f1c5d, 0xd4cd2f20, 0xb87add82), + PCMCIA_DEVICE_PROD_ID123("PCMCIA", "PCMCIA-ETHERNET-CARD", "UE2620", 0x281f1c5d, 0xd4cd2f20, 0x7d3d83a8), + PCMCIA_DEVICE_PROD_ID1("2412LAN", 0x67f236ab), + PCMCIA_DEVICE_PROD_ID12("ACCTON", "EN2212", 0xdfc6b5b2, 0xcb112a11), + PCMCIA_DEVICE_PROD_ID12("ACCTON", "EN2216-PCMCIA-ETHERNET", 0xdfc6b5b2, 0x5542bfff), + PCMCIA_DEVICE_PROD_ID12("Allied Telesis, K.K.", "CentreCOM LA100-PCM-T V2 100/10M LAN PC Card", 0xbb7fbdd7, 0xcd91cc68), + PCMCIA_DEVICE_PROD_ID12("Allied Telesis K.K.", "LA100-PCM V2", 0x36634a66, 0xc6d05997), + PCMCIA_DEVICE_PROD_ID12("Allied Telesis, K.K.", "CentreCOM LA-PCM_V2", 0xbb7fBdd7, 0x28e299f8), + PCMCIA_DEVICE_PROD_ID12("Allied Telesis K.K.", "LA-PCM V3", 0x36634a66, 0x62241d96), + PCMCIA_DEVICE_PROD_ID12("AmbiCom", "AMB8010", 0x5070a7f9, 0x82f96e96), + PCMCIA_DEVICE_PROD_ID12("AmbiCom", "AMB8610", 0x5070a7f9, 0x86741224), + PCMCIA_DEVICE_PROD_ID12("AmbiCom Inc", "AMB8002", 0x93b15570, 0x75ec3efb), + PCMCIA_DEVICE_PROD_ID12("AmbiCom Inc", "AMB8002T", 0x93b15570, 0x461c5247), + PCMCIA_DEVICE_PROD_ID12("AmbiCom Inc", "AMB8010", 0x93b15570, 0x82f96e96), + PCMCIA_DEVICE_PROD_ID12("AnyCom", "ECO Ethernet", 0x578ba6e7, 0x0a9888c1), + PCMCIA_DEVICE_PROD_ID12("AnyCom", "ECO Ethernet 10/100", 0x578ba6e7, 0x939fedbd), + PCMCIA_DEVICE_PROD_ID12("AROWANA", "PCMCIA Ethernet LAN Card", 0x313adbc8, 0x08d9f190), + PCMCIA_DEVICE_PROD_ID12("ASANTE", "FriendlyNet PC Card", 0x3a7ade0f, 0x41c64504), + PCMCIA_DEVICE_PROD_ID12("Billionton", "LNT-10TB", 0x552ab682, 0xeeb1ba6a), + PCMCIA_DEVICE_PROD_ID12("CF", "10Base-Ethernet", 0x44ebf863, 0x93ae4d79), + PCMCIA_DEVICE_PROD_ID12("CNet", "CN40BC Ethernet", 0xbc477dde, 0xfba775a7), + PCMCIA_DEVICE_PROD_ID12("COMPU-SHACK", "BASEline PCMCIA 10 MBit Ethernetadapter", 0xfa2e424d, 0xe9190d8a), + PCMCIA_DEVICE_PROD_ID12("COMPU-SHACK", "FASTline PCMCIA 10/100 Fast-Ethernet", 0xfa2e424d, 0x3953d9b9), + PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722), + PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2), + PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether CF-TD LAN Card", 0x5261440f, 0x8797663b), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), + PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether PCC-T", 0x5261440f, 0x6705fcaa), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether PCC-TD", 0x5261440f, 0x47d5ca83), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FastEther PCC-TX", 0x5261440f, 0x485e85d9), + PCMCIA_DEVICE_PROD_ID12("Corega,K.K.", "Ethernet LAN Card", 0x110d26d9, 0x9fd2f0a2), + PCMCIA_DEVICE_PROD_ID12("corega,K.K.", "Ethernet LAN Card", 0x9791a90e, 0x9fd2f0a2), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "(CG-LAPCCTXD)", 0x5261440f, 0x73ec0d88), + PCMCIA_DEVICE_PROD_ID12("CouplerlessPCMCIA", "100BASE", 0xee5af0ad, 0x7c2add04), + PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-010", 0x77008979, 0x9d8d445d), + PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-110E 10/100M LAN Card", 0x77008979, 0xfd184814), + PCMCIA_DEVICE_PROD_ID12("DataTrek.", "NetCard ", 0x5cd66d9d, 0x84697ce0), + PCMCIA_DEVICE_PROD_ID12("Dayna Communications, Inc.", "CommuniCard E", 0x0c629325, 0xb4e7dbaf), + PCMCIA_DEVICE_PROD_ID12("Digicom", "Palladio LAN 10/100", 0x697403d8, 0xe160b995), + PCMCIA_DEVICE_PROD_ID12("Digicom", "Palladio LAN 10/100 Dongless", 0x697403d8, 0xa6d3b233), + PCMCIA_DEVICE_PROD_ID12("DIGITAL", "DEPCM-XX", 0x69616cb3, 0xe600e76e), + PCMCIA_DEVICE_PROD_ID12("D-Link", "DE-650", 0x1a424a1c, 0xf28c8398), + PCMCIA_DEVICE_PROD_ID12("D-Link", "DE-660", 0x1a424a1c, 0xd9a1d05b), + PCMCIA_DEVICE_PROD_ID12("D-Link", "DE-660+", 0x1a424a1c, 0x50dcd0ec), + PCMCIA_DEVICE_PROD_ID12("D-Link", "DFE-650", 0x1a424a1c, 0x0f0073f9), + PCMCIA_DEVICE_PROD_ID12("Dual Speed", "10/100 PC Card", 0x725b842d, 0xf1efee84), + PCMCIA_DEVICE_PROD_ID12("Dual Speed", "10/100 Port Attached PC Card", 0x725b842d, 0x2db1f8e9), + PCMCIA_DEVICE_PROD_ID12("Dynalink", "L10BC", 0x55632fd5, 0xdc65f2b1), + PCMCIA_DEVICE_PROD_ID12("DYNALINK", "L10BC", 0x6a26d1cf, 0xdc65f2b1), + PCMCIA_DEVICE_PROD_ID12("DYNALINK", "L10C", 0x6a26d1cf, 0xc4f84efb), + PCMCIA_DEVICE_PROD_ID12("E-CARD", "E-CARD", 0x6701da11, 0x6701da11), + PCMCIA_DEVICE_PROD_ID12("EIGER Labs Inc.", "Ethernet 10BaseT card", 0x53c864c6, 0xedd059f6), + PCMCIA_DEVICE_PROD_ID12("EIGER Labs Inc.", "Ethernet Combo card", 0x53c864c6, 0x929c486c), + PCMCIA_DEVICE_PROD_ID12("Ethernet", "Adapter", 0x00b2e941, 0x4b0d829e), + PCMCIA_DEVICE_PROD_ID12("Ethernet Adapter", "E2000 PCMCIA Ethernet", 0x96767301, 0x71fbbc61), + PCMCIA_DEVICE_PROD_ID12("Ethernet PCMCIA adapter", "EP-210", 0x8dd86181, 0xf2b52517), + PCMCIA_DEVICE_PROD_ID12("Fast Ethernet", "Adapter", 0xb4be14e3, 0x4b0d829e), + PCMCIA_DEVICE_PROD_ID12("Grey Cell", "GCS2000", 0x2a151fac, 0xf00555cb), + PCMCIA_DEVICE_PROD_ID12("Grey Cell", "GCS2220", 0x2a151fac, 0xc1b7e327), + PCMCIA_DEVICE_PROD_ID12("GVC", "NIC-2000p", 0x76e171bd, 0x6eb1c947), + PCMCIA_DEVICE_PROD_ID12("IBM Corp.", "Ethernet", 0xe3736c88, 0x00b2e941), + PCMCIA_DEVICE_PROD_ID12("IC-CARD", "IC-CARD", 0x60cb09a6, 0x60cb09a6), + PCMCIA_DEVICE_PROD_ID12("IC-CARD+", "IC-CARD+", 0x93693494, 0x93693494), + PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b), + PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0), + PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956), + PCMCIA_DEVICE_PROD_ID12("KENTRONICS", "KEP-230", 0xaf8144c9, 0x868f6616), + PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64), + PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5), + PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3), + PCMCIA_DEVICE_PROD_ID12("Kingston Technology Corp.", "EtheRx PC Card Ethernet Adapter", 0x313c7be3, 0x0afb54a2), + PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-10/100CD", 0x1b7827b2, 0xcda71d1c), + PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDF", 0x1b7827b2, 0xfec71e40), + PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDL/T", 0x1b7827b2, 0x79fba4f7), + PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDS", 0x1b7827b2, 0x931afaab), + PCMCIA_DEVICE_PROD_ID12("LEMEL", "LM-N89TX PRO", 0xbbefb52f, 0xd2897a97), + PCMCIA_DEVICE_PROD_ID12("Linksys", "Combo PCMCIA EthernetCard (EC2T)", 0x0733cc81, 0x32ee8c78), + PCMCIA_DEVICE_PROD_ID12("LINKSYS", "E-CARD", 0xf7cb0b07, 0x6701da11), + PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 Integrated PC Card (PCM100)", 0x0733cc81, 0x453c3f9d), + PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100)", 0x0733cc81, 0x66c5a389), + PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V2)", 0x0733cc81, 0x3a3b28e9), + PCMCIA_DEVICE_PROD_ID12("Linksys", "HomeLink Phoneline + 10/100 Network PC Card (PCM100H1)", 0x733cc81, 0x7a3e5c3a), + PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TX", 0x88fcdeda, 0x6d772737), + PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TE", 0x88fcdeda, 0x0e714bee), + PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN20T", 0x88fcdeda, 0x81090922), + PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN10TE", 0x88fcdeda, 0xc1e2521c), + PCMCIA_DEVICE_PROD_ID12("LONGSHINE", "PCMCIA Ethernet Card", 0xf866b0b0, 0x6f6652e0), + PCMCIA_DEVICE_PROD_ID12("MACNICA", "ME1-JEIDA", 0x20841b68, 0xaf8a3578), + PCMCIA_DEVICE_PROD_ID12("Macsense", "MPC-10", 0xd830297f, 0xd265c307), + PCMCIA_DEVICE_PROD_ID12("Matsushita Electric Industrial Co.,LTD.", "CF-VEL211", 0x44445376, 0x8ded41d4), + PCMCIA_DEVICE_PROD_ID12("MAXTECH", "PCN2000", 0x78d64bc0, 0xca0ca4b8), + PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC2-T", 0x481e0094, 0xa2eb0cf3), + PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC2-TX", 0x481e0094, 0x41a6916c), + PCMCIA_DEVICE_PROD_ID12("Microcom C.E.", "Travel Card LAN 10/100", 0x4b91cec7, 0xe70220d6), + PCMCIA_DEVICE_PROD_ID12("Microdyne", "NE4200", 0x2e6da59b, 0x0478e472), + PCMCIA_DEVICE_PROD_ID12("MIDORI ELEC.", "LT-PCMT", 0x648d55c1, 0xbde526c7), + PCMCIA_DEVICE_PROD_ID12("National Semiconductor", "InfoMover 4100", 0x36e1191f, 0x60c229b9), + PCMCIA_DEVICE_PROD_ID12("National Semiconductor", "InfoMover NE4100", 0x36e1191f, 0xa6617ec8), + PCMCIA_DEVICE_PROD_ID12("NEC", "PC-9801N-J12", 0x18df0ba0, 0xbc912d76), + PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA410TX", 0x9aa79dc3, 0x60e5bc0e), + PCMCIA_DEVICE_PROD_ID12("Network Everywhere", "Fast Ethernet 10/100 PC Card", 0x820a67b6, 0x31ed1a5f), + PCMCIA_DEVICE_PROD_ID12("NextCom K.K.", "Next Hawk", 0xaedaec74, 0xad050ef1), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "10/100Mbps Ethernet Card", 0x281f1c5d, 0x6e41773b), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Ethernet", 0x281f1c5d, 0x00b2e941), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "ETHERNET", 0x281f1c5d, 0x3ff7175b), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Ethernet 10BaseT Card", 0x281f1c5d, 0x4de2f6c8), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Ethernet Card", 0x281f1c5d, 0x5e9d92c0), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Ethernet Combo card", 0x281f1c5d, 0x929c486c), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "ETHERNET V1.0", 0x281f1c5d, 0x4d8817c8), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEthernet", 0x281f1c5d, 0xfe871eeb), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Fast-Ethernet", 0x281f1c5d, 0x45f1f3b4), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FAST ETHERNET CARD", 0x281f1c5d, 0xec5dbca7), + PCMCIA_DEVICE_PROD_ID12("PCMCIA LAN", "Ethernet", 0x7500e246, 0x00b2e941), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "LNT-10TN", 0x281f1c5d, 0xe707f641), + PCMCIA_DEVICE_PROD_ID12("PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "UE2212", 0x281f1c5d, 0xbf17199b), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", " Ethernet NE2000 Compatible", 0x281f1c5d, 0x42d5d7e1), + PCMCIA_DEVICE_PROD_ID12("PRETEC", "Ethernet CompactLAN 10baseT 3.3V", 0xebf91155, 0x30074c80), + PCMCIA_DEVICE_PROD_ID12("PRETEC", "Ethernet CompactLAN 10BaseT 3.3V", 0xebf91155, 0x7f5a4f50), + PCMCIA_DEVICE_PROD_ID12("Psion Dacom", "Gold Card Ethernet", 0xf5f025c2, 0x3a30e110), + PCMCIA_DEVICE_PROD_ID12("=RELIA==", "Ethernet", 0xcdd0644a, 0x00b2e941), + PCMCIA_DEVICE_PROD_ID12("RIOS Systems Co.", "PC CARD3 ETHERNET", 0x7dd33481, 0x10b41826), + PCMCIA_DEVICE_PROD_ID12("RP", "1625B Ethernet NE2000 Compatible", 0xe3e66e22, 0xb96150df), + PCMCIA_DEVICE_PROD_ID12("RPTI", "EP400 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4a7e2ae0), + PCMCIA_DEVICE_PROD_ID12("RPTI", "EP401 Ethernet NE2000 Compatible", 0xdc6f88fd, 0x4bcbd7fd), + PCMCIA_DEVICE_PROD_ID12("RPTI LTD.", "EP400", 0xc53ac515, 0x81e39388), + PCMCIA_DEVICE_PROD_ID12("SCM", "Ethernet Combo card", 0xbdc3b102, 0x929c486c), + PCMCIA_DEVICE_PROD_ID12("Seiko Epson Corp.", "Ethernet", 0x09928730, 0x00b2e941), + PCMCIA_DEVICE_PROD_ID12("SMC", "EZCard-10-PCMCIA", 0xc4f8b18b, 0xfb21d265), + PCMCIA_DEVICE_PROD_ID12("Socket Communications Inc", "Socket EA PCMCIA LAN Adapter Revision D", 0xc70a4760, 0x2ade483e), + PCMCIA_DEVICE_PROD_ID12("Socket Communications Inc", "Socket EA PCMCIA LAN Adapter Revision E", 0xc70a4760, 0x5dd978a8), + PCMCIA_DEVICE_PROD_ID12("TDK", "LAK-CD031 for PCMCIA", 0x1eae9475, 0x0ed386fa), + PCMCIA_DEVICE_PROD_ID12("Telecom Device K.K.", "SuperSocket RE450T", 0x466b05f0, 0x8b74bc4f), + PCMCIA_DEVICE_PROD_ID12("Telecom Device K.K.", "SuperSocket RE550T", 0x466b05f0, 0x33c8db2a), + PCMCIA_DEVICE_PROD_ID13("Hypertec", "EP401", 0x8787bec7, 0xf6e4a31e), + PCMCIA_DEVICE_PROD_ID13("KingMax Technology Inc.", "Ethernet Card", 0x932b7189, 0x5e9d92c0), + PCMCIA_DEVICE_PROD_ID13("LONGSHINE", "EP401", 0xf866b0b0, 0xf6e4a31e), + PCMCIA_DEVICE_PROD_ID13("Xircom", "CFE-10", 0x2e3ee845, 0x22a49f89), + PCMCIA_DEVICE_PROD_ID1("CyQ've 10 Base-T LAN CARD", 0x94faf360), + PCMCIA_DEVICE_PROD_ID1("EP-210 PCMCIA LAN CARD.", 0x8850b4de), + PCMCIA_DEVICE_PROD_ID1("ETHER-C16", 0x06a8514f), + PCMCIA_DEVICE_PROD_ID1("NE2000 Compatible", 0x75b8ad5a), + PCMCIA_DEVICE_PROD_ID2("EN-6200P2", 0xa996d078), + /* too generic! */ + /* PCMCIA_DEVICE_PROD_ID12("PCMCIA", "10/100 Ethernet Card", 0x281f1c5d, 0x11b0ffc0), */ + PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "PCMCIA", "EN2218-LAN/MODEM", 0x281f1c5d, 0x570f348e, "cis/PCMLM28.cis"), + PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "PCMCIA", "UE2218-LAN/MODEM", 0x281f1c5d, 0x6fdcacee, "cis/PCMLM28.cis"), + PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), + PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), + PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), + PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "TOSHIBA", "Modem/LAN Card", 0xb4585a1a, 0x53f922f8, "cis/PCMLM28.cis"), + PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), + PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), + PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("Allied Telesis,K.K", "Ethernet LAN Card", 0x2ad62f3c, 0x9fd2f0a2, "cis/LA-PCM.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"), + PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b), + PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0", + 0xb4be14e3, 0x43ac239b, 0x0877b627), + PCMCIA_DEVICE_NULL +}; +MODULE_DEVICE_TABLE(pcmcia, pcnet_ids); +MODULE_FIRMWARE("cis/PCMLM28.cis"); +MODULE_FIRMWARE("cis/DP83903.cis"); +MODULE_FIRMWARE("cis/LA-PCM.cis"); +MODULE_FIRMWARE("cis/PE520.cis"); +MODULE_FIRMWARE("cis/NE2K.cis"); +MODULE_FIRMWARE("cis/PE-200.cis"); +MODULE_FIRMWARE("cis/tamarack.cis"); + +static struct pcmcia_driver pcnet_driver = { + .name = "pcnet_cs", + .probe = pcnet_probe, + .remove = pcnet_detach, + .owner = THIS_MODULE, + .id_table = pcnet_ids, + .suspend = pcnet_suspend, + .resume = pcnet_resume, +}; +module_pcmcia_driver(pcnet_driver); diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c new file mode 100644 index 000000000..139385dcd --- /dev/null +++ b/drivers/net/ethernet/8390/smc-ultra.c @@ -0,0 +1,631 @@ +/* smc-ultra.c: A SMC Ultra ethernet driver for linux. */ +/* + This is a driver for the SMC Ultra and SMC EtherEZ ISA ethercards. + + Written 1993-1998 by Donald Becker. + + Copyright 1993 United States Government as represented by the + Director, National Security Agency. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + This driver uses the cards in the 8390-compatible mode. + Most of the run-time complexity is handled by the generic code in + 8390.c. The code in this file is responsible for + + ultra_probe() Detecting and initializing the card. + ultra_probe1() + ultra_probe_isapnp() + + ultra_open() The card-specific details of starting, stopping + ultra_reset_8390() and resetting the 8390 NIC core. + ultra_close() + + ultra_block_input() Routines for reading and writing blocks of + ultra_block_output() packet buffer memory. + ultra_pio_input() + ultra_pio_output() + + This driver enables the shared memory only when doing the actual data + transfers to avoid a bug in early version of the card that corrupted + data transferred by a AHA1542. + + This driver now supports the programmed-I/O (PIO) data transfer mode of + the EtherEZ. It does not use the non-8390-compatible "Altego" mode. + That support (if available) is in smc-ez.c. + + Changelog: + + Paul Gortmaker : multiple card support for module users. + Donald Becker : 4/17/96 PIO support, minor potential problems avoided. + Donald Becker : 6/6/96 correctly set auto-wrap bit. + Alexander Sotirov : 1/20/01 Added support for ISAPnP cards + + Note about the ISA PnP support: + + This driver can not autoprobe for more than one SMC EtherEZ PnP card. + You have to configure the second card manually through the /proc/isapnp + interface and then load the module with an explicit io=0x___ option. +*/ + +static const char version[] = + "smc-ultra.c:v2.02 2/3/98 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "8390.h" + +#define DRV_NAME "smc-ultra" + +/* A zero-terminated list of I/O addresses to be probed. */ +static unsigned int ultra_portlist[] __initdata = +{0x200, 0x220, 0x240, 0x280, 0x300, 0x340, 0x380, 0}; + +static int ultra_probe1(struct net_device *dev, int ioaddr); + +#ifdef __ISAPNP__ +static int ultra_probe_isapnp(struct net_device *dev); +#endif + +static int ultra_open(struct net_device *dev); +static void ultra_reset_8390(struct net_device *dev); +static void ultra_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void ultra_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void ultra_block_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page); +static void ultra_pio_get_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void ultra_pio_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void ultra_pio_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page); +static int ultra_close_card(struct net_device *dev); + +#ifdef __ISAPNP__ +static struct isapnp_device_id ultra_device_ids[] __initdata = { + { ISAPNP_VENDOR('S','M','C'), ISAPNP_FUNCTION(0x8416), + ISAPNP_VENDOR('S','M','C'), ISAPNP_FUNCTION(0x8416), + (long) "SMC EtherEZ (8416)" }, + { } /* terminate list */ +}; + +MODULE_DEVICE_TABLE(isapnp, ultra_device_ids); +#endif + +static u32 ultra_msg_enable; + +#define START_PG 0x00 /* First page of TX buffer */ + +#define ULTRA_CMDREG 0 /* Offset to ASIC command register. */ +#define ULTRA_RESET 0x80 /* Board reset, in ULTRA_CMDREG. */ +#define ULTRA_MEMENB 0x40 /* Enable the shared memory. */ +#define IOPD 0x02 /* I/O Pipe Data (16 bits), PIO operation. */ +#define IOPA 0x07 /* I/O Pipe Address for PIO operation. */ +#define ULTRA_NIC_OFFSET 16 /* NIC register offset from the base_addr. */ +#define ULTRA_IO_EXTENT 32 +#define EN0_ERWCNT 0x08 /* Early receive warning count. */ + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void ultra_poll(struct net_device *dev) +{ + disable_irq(dev->irq); + ei_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif +/* Probe for the Ultra. This looks like a 8013 with the station + address PROM at I/O ports +8 to +13, with a checksum + following. +*/ + +static int __init do_ultra_probe(struct net_device *dev) +{ + int i; + int base_addr = dev->base_addr; + int irq = dev->irq; + + if (base_addr > 0x1ff) /* Check a single specified location. */ + return ultra_probe1(dev, base_addr); + else if (base_addr != 0) /* Don't probe at all. */ + return -ENXIO; + +#ifdef __ISAPNP__ + /* Look for any installed ISAPnP cards */ + if (isapnp_present() && (ultra_probe_isapnp(dev) == 0)) + return 0; +#endif + + for (i = 0; ultra_portlist[i]; i++) { + dev->irq = irq; + if (ultra_probe1(dev, ultra_portlist[i]) == 0) + return 0; + } + + return -ENODEV; +} + +#ifndef MODULE +struct net_device * __init ultra_probe(int unit) +{ + struct net_device *dev = alloc_ei_netdev(); + int err; + + if (!dev) + return ERR_PTR(-ENOMEM); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = do_ultra_probe(dev); + if (err) + goto out; + return dev; +out: + free_netdev(dev); + return ERR_PTR(err); +} +#endif + +static const struct net_device_ops ultra_netdev_ops = { + .ndo_open = ultra_open, + .ndo_stop = ultra_close_card, + + .ndo_start_xmit = ei_start_xmit, + .ndo_tx_timeout = ei_tx_timeout, + .ndo_get_stats = ei_get_stats, + .ndo_set_rx_mode = ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ultra_poll, +#endif +}; + +static int __init ultra_probe1(struct net_device *dev, int ioaddr) +{ + int i, retval; + int checksum = 0; + const char *model_name; + unsigned char eeprom_irq = 0; + static unsigned version_printed; + /* Values from various config regs. */ + unsigned char num_pages, irqreg, addr, piomode; + unsigned char idreg = inb(ioaddr + 7); + unsigned char reg4 = inb(ioaddr + 4) & 0x7f; + struct ei_device *ei_local = netdev_priv(dev); + + if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME)) + return -EBUSY; + + /* Check the ID nibble. */ + if ((idreg & 0xF0) != 0x20 /* SMC Ultra */ + && (idreg & 0xF0) != 0x40) { /* SMC EtherEZ */ + retval = -ENODEV; + goto out; + } + + /* Select the station address register set. */ + outb(reg4, ioaddr + 4); + + for (i = 0; i < 8; i++) + checksum += inb(ioaddr + 8 + i); + if ((checksum & 0xff) != 0xFF) { + retval = -ENODEV; + goto out; + } + + if ((ultra_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0)) + netdev_info(dev, version); + + model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ"; + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb(ioaddr + 8 + i); + + netdev_info(dev, "%s at %#3x, %pM", model_name, + ioaddr, dev->dev_addr); + + /* Switch from the station address to the alternate register set and + read the useful registers there. */ + outb(0x80 | reg4, ioaddr + 4); + + /* Enabled FINE16 mode to avoid BIOS ROM width mismatches @ reboot. */ + outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c); + piomode = inb(ioaddr + 0x8); + addr = inb(ioaddr + 0xb); + irqreg = inb(ioaddr + 0xd); + + /* Switch back to the station address register set so that the MS-DOS driver + can find the card after a warm boot. */ + outb(reg4, ioaddr + 4); + + if (dev->irq < 2) { + unsigned char irqmap[] = {0, 9, 3, 5, 7, 10, 11, 15}; + int irq; + + /* The IRQ bits are split. */ + irq = irqmap[((irqreg & 0x40) >> 4) + ((irqreg & 0x0c) >> 2)]; + + if (irq == 0) { + pr_cont(", failed to detect IRQ line.\n"); + retval = -EAGAIN; + goto out; + } + dev->irq = irq; + eeprom_irq = 1; + } + + /* The 8390 isn't at the base address, so fake the offset */ + dev->base_addr = ioaddr+ULTRA_NIC_OFFSET; + + { + static const int addr_tbl[4] = { + 0x0C0000, 0x0E0000, 0xFC0000, 0xFE0000 + }; + static const short num_pages_tbl[4] = { + 0x20, 0x40, 0x80, 0xff + }; + + dev->mem_start = ((addr & 0x0f) << 13) + addr_tbl[(addr >> 6) & 3] ; + num_pages = num_pages_tbl[(addr >> 4) & 3]; + } + + ei_status.name = model_name; + ei_status.word16 = 1; + ei_status.tx_start_page = START_PG; + ei_status.rx_start_page = START_PG + TX_PAGES; + ei_status.stop_page = num_pages; + + ei_status.mem = ioremap(dev->mem_start, (ei_status.stop_page - START_PG)*256); + if (!ei_status.mem) { + pr_cont(", failed to ioremap.\n"); + retval = -ENOMEM; + goto out; + } + + dev->mem_end = dev->mem_start + (ei_status.stop_page - START_PG)*256; + + if (piomode) { + pr_cont(", %s IRQ %d programmed-I/O mode.\n", + eeprom_irq ? "EEPROM" : "assigned ", dev->irq); + ei_status.block_input = &ultra_pio_input; + ei_status.block_output = &ultra_pio_output; + ei_status.get_8390_hdr = &ultra_pio_get_hdr; + } else { + pr_cont(", %s IRQ %d memory %#lx-%#lx.\n", + eeprom_irq ? "" : "assigned ", dev->irq, dev->mem_start, + dev->mem_end-1); + ei_status.block_input = &ultra_block_input; + ei_status.block_output = &ultra_block_output; + ei_status.get_8390_hdr = &ultra_get_8390_hdr; + } + ei_status.reset_8390 = &ultra_reset_8390; + + dev->netdev_ops = &ultra_netdev_ops; + NS8390_init(dev, 0); + ei_local->msg_enable = ultra_msg_enable; + + retval = register_netdev(dev); + if (retval) + goto out; + return 0; +out: + release_region(ioaddr, ULTRA_IO_EXTENT); + return retval; +} + +#ifdef __ISAPNP__ +static int __init ultra_probe_isapnp(struct net_device *dev) +{ + int i; + + for (i = 0; ultra_device_ids[i].vendor != 0; i++) { + struct pnp_dev *idev = NULL; + + while ((idev = pnp_find_dev(NULL, + ultra_device_ids[i].vendor, + ultra_device_ids[i].function, + idev))) { + /* Avoid already found cards from previous calls */ + if (pnp_device_attach(idev) < 0) + continue; + if (pnp_activate_dev(idev) < 0) { + __again: + pnp_device_detach(idev); + continue; + } + /* if no io and irq, search for next */ + if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0)) + goto __again; + /* found it */ + dev->base_addr = pnp_port_start(idev, 0); + dev->irq = pnp_irq(idev, 0); + netdev_info(dev, + "smc-ultra.c: ISAPnP reports %s at i/o %#lx, irq %d.\n", + (char *) ultra_device_ids[i].driver_data, + dev->base_addr, dev->irq); + if (ultra_probe1(dev, dev->base_addr) != 0) { /* Shouldn't happen. */ + netdev_err(dev, + "smc-ultra.c: Probe of ISAPnP card at %#lx failed.\n", + dev->base_addr); + pnp_device_detach(idev); + return -ENXIO; + } + ei_status.priv = (unsigned long)idev; + break; + } + if (!idev) + continue; + return 0; + } + + return -ENODEV; +} +#endif + +static int +ultra_open(struct net_device *dev) +{ + int retval; + int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ + unsigned char irq2reg[] = {0, 0, 0x04, 0x08, 0, 0x0C, 0, 0x40, + 0, 0x04, 0x44, 0x48, 0, 0, 0, 0x4C, }; + + retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev); + if (retval) + return retval; + + outb(0x00, ioaddr); /* Disable shared memory for safety. */ + outb(0x80, ioaddr + 5); + /* Set the IRQ line. */ + outb(inb(ioaddr + 4) | 0x80, ioaddr + 4); + outb((inb(ioaddr + 13) & ~0x4C) | irq2reg[dev->irq], ioaddr + 13); + outb(inb(ioaddr + 4) & 0x7f, ioaddr + 4); + + if (ei_status.block_input == &ultra_pio_input) { + outb(0x11, ioaddr + 6); /* Enable interrupts and PIO. */ + outb(0x01, ioaddr + 0x19); /* Enable ring read auto-wrap. */ + } else + outb(0x01, ioaddr + 6); /* Enable interrupts and memory. */ + /* Set the early receive warning level in window 0 high enough not + to receive ERW interrupts. */ + outb_p(E8390_NODMA+E8390_PAGE0, dev->base_addr); + outb(0xff, dev->base_addr + EN0_ERWCNT); + ei_open(dev); + return 0; +} + +static void +ultra_reset_8390(struct net_device *dev) +{ + int cmd_port = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC base addr */ + struct ei_device *ei_local = netdev_priv(dev); + + outb(ULTRA_RESET, cmd_port); + netif_dbg(ei_local, hw, dev, "resetting Ultra, t=%ld...\n", jiffies); + ei_status.txing = 0; + + outb(0x00, cmd_port); /* Disable shared memory for safety. */ + outb(0x80, cmd_port + 5); + if (ei_status.block_input == &ultra_pio_input) + outb(0x11, cmd_port + 6); /* Enable interrupts and PIO. */ + else + outb(0x01, cmd_port + 6); /* Enable interrupts and memory. */ + + netif_dbg(ei_local, hw, dev, "reset done\n"); +} + +/* Grab the 8390 specific header. Similar to the block_input routine, but + we don't need to be concerned with ring wrap as the header will be at + the start of a page, so we optimize accordingly. */ + +static void +ultra_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + void __iomem *hdr_start = ei_status.mem + ((ring_page - START_PG)<<8); + + outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET); /* shmem on */ +#ifdef __BIG_ENDIAN + /* Officially this is what we are doing, but the readl() is faster */ + /* unfortunately it isn't endian aware of the struct */ + memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); + hdr->count = le16_to_cpu(hdr->count); +#else + ((unsigned int*)hdr)[0] = readl(hdr_start); +#endif + outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* shmem off */ +} + +/* Block input and output are easy on shared memory ethercards, the only + complication is when the ring buffer wraps. */ + +static void +ultra_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) +{ + void __iomem *xfer_start = ei_status.mem + ring_offset - (START_PG<<8); + + /* Enable shared memory. */ + outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET); + + if (ring_offset + count > ei_status.stop_page*256) { + /* We must wrap the input move. */ + int semi_count = ei_status.stop_page*256 - ring_offset; + memcpy_fromio(skb->data, xfer_start, semi_count); + count -= semi_count; + memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); + } else { + memcpy_fromio(skb->data, xfer_start, count); + } + + outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* Disable memory. */ +} + +static void +ultra_block_output(struct net_device *dev, int count, const unsigned char *buf, + int start_page) +{ + void __iomem *shmem = ei_status.mem + ((start_page - START_PG)<<8); + + /* Enable shared memory. */ + outb(ULTRA_MEMENB, dev->base_addr - ULTRA_NIC_OFFSET); + + memcpy_toio(shmem, buf, count); + + outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* Disable memory. */ +} + +/* The identical operations for programmed I/O cards. + The PIO model is trivial to use: the 16 bit start address is written + byte-sequentially to IOPA, with no intervening I/O operations, and the + data is read or written to the IOPD data port. + The only potential complication is that the address register is shared + and must be always be rewritten between each read/write direction change. + This is no problem for us, as the 8390 code ensures that we are single + threaded. */ +static void ultra_pio_get_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page) +{ + int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ + outb(0x00, ioaddr + IOPA); /* Set the address, LSB first. */ + outb(ring_page, ioaddr + IOPA); + insw(ioaddr + IOPD, hdr, sizeof(struct e8390_pkt_hdr)>>1); +} + +static void ultra_pio_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ + char *buf = skb->data; + + /* For now set the address again, although it should already be correct. */ + outb(ring_offset, ioaddr + IOPA); /* Set the address, LSB first. */ + outb(ring_offset >> 8, ioaddr + IOPA); + /* We know skbuffs are padded to at least word alignment. */ + insw(ioaddr + IOPD, buf, (count+1)>>1); +} + +static void ultra_pio_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page) +{ + int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */ + outb(0x00, ioaddr + IOPA); /* Set the address, LSB first. */ + outb(start_page, ioaddr + IOPA); + /* An extra odd byte is OK here as well. */ + outsw(ioaddr + IOPD, buf, (count+1)>>1); +} + +static int +ultra_close_card(struct net_device *dev) +{ + int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* CMDREG */ + struct ei_device *ei_local = netdev_priv(dev); + + netif_stop_queue(dev); + + netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard.\n"); + + outb(0x00, ioaddr + 6); /* Disable interrupts. */ + free_irq(dev->irq, dev); + + NS8390_init(dev, 0); + + /* We should someday disable shared memory and change to 8-bit mode + "just in case"... */ + + return 0; +} + + +#ifdef MODULE +#define MAX_ULTRA_CARDS 4 /* Max number of Ultra cards per module */ +static struct net_device *dev_ultra[MAX_ULTRA_CARDS]; +static int io[MAX_ULTRA_CARDS]; +static int irq[MAX_ULTRA_CARDS]; + +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param_named(msg_enable, ultra_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH)); +MODULE_PARM_DESC(io, "I/O base address(es)"); +MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); +MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)"); +MODULE_DESCRIPTION("SMC Ultra/EtherEZ ISA/PnP Ethernet driver"); +MODULE_LICENSE("GPL"); + +/* This is set up so that only a single autoprobe takes place per call. +ISA device autoprobes on a running machine are not recommended. */ +int __init +init_module(void) +{ + struct net_device *dev; + int this_dev, found = 0; + + for (this_dev = 0; this_dev < MAX_ULTRA_CARDS; this_dev++) { + if (io[this_dev] == 0) { + if (this_dev != 0) break; /* only autoprobe 1st one */ + printk(KERN_NOTICE "smc-ultra.c: Presently autoprobing (not recommended) for a single card.\n"); + } + dev = alloc_ei_netdev(); + if (!dev) + break; + dev->irq = irq[this_dev]; + dev->base_addr = io[this_dev]; + if (do_ultra_probe(dev) == 0) { + dev_ultra[found++] = dev; + continue; + } + free_netdev(dev); + printk(KERN_WARNING "smc-ultra.c: No SMC Ultra card found (i/o = 0x%x).\n", io[this_dev]); + break; + } + if (found) + return 0; + return -ENXIO; +} + +static void cleanup_card(struct net_device *dev) +{ + /* NB: ultra_close_card() does free_irq */ +#ifdef __ISAPNP__ + struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv; + if (idev) + pnp_device_detach(idev); +#endif + release_region(dev->base_addr - ULTRA_NIC_OFFSET, ULTRA_IO_EXTENT); + iounmap(ei_status.mem); +} + +void __exit +cleanup_module(void) +{ + int this_dev; + + for (this_dev = 0; this_dev < MAX_ULTRA_CARDS; this_dev++) { + struct net_device *dev = dev_ultra[this_dev]; + if (dev) { + unregister_netdev(dev); + cleanup_card(dev); + free_netdev(dev); + } + } +} +#endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c new file mode 100644 index 000000000..aca957d4e --- /dev/null +++ b/drivers/net/ethernet/8390/stnic.c @@ -0,0 +1,303 @@ +/* stnic.c : A SH7750 specific part of driver for NS DP83902A ST-NIC. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1999 kaz Kojima + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#ifdef CONFIG_SH_STANDARD_BIOS +#include +#endif + +#include "8390.h" + +#define DRV_NAME "stnic" + +#define byte unsigned char +#define half unsigned short +#define word unsigned int +#define vbyte volatile unsigned char +#define vhalf volatile unsigned short +#define vword volatile unsigned int + +#define STNIC_RUN 0x01 /* 1 == Run, 0 == reset. */ + +#define START_PG 0 /* First page of TX buffer */ +#define STOP_PG 128 /* Last page +1 of RX ring */ + +/* Alias */ +#define STNIC_CR E8390_CMD +#define PG0_RSAR0 EN0_RSARLO +#define PG0_RSAR1 EN0_RSARHI +#define PG0_RBCR0 EN0_RCNTLO +#define PG0_RBCR1 EN0_RCNTHI + +#define CR_RRD E8390_RREAD +#define CR_RWR E8390_RWRITE +#define CR_PG0 E8390_PAGE0 +#define CR_STA E8390_START +#define CR_RDMA E8390_NODMA + +/* FIXME! YOU MUST SET YOUR OWN ETHER ADDRESS. */ +static byte stnic_eadr[6] = +{0x00, 0xc0, 0x6e, 0x00, 0x00, 0x07}; + +static struct net_device *stnic_dev; + +static void stnic_reset (struct net_device *dev); +static void stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void stnic_block_input (struct net_device *dev, int count, + struct sk_buff *skb , int ring_offset); +static void stnic_block_output (struct net_device *dev, int count, + const unsigned char *buf, int start_page); + +static void stnic_init (struct net_device *dev); + +static u32 stnic_msg_enable; + +module_param_named(msg_enable, stnic_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH)); +MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)"); + +/* SH7750 specific read/write io. */ +static inline void +STNIC_DELAY (void) +{ + vword trash; + trash = *(vword *) 0xa0000000; + trash = *(vword *) 0xa0000000; + trash = *(vword *) 0xa0000000; +} + +static inline byte +STNIC_READ (int reg) +{ + byte val; + + val = (*(vhalf *) (PA_83902 + ((reg) << 1)) >> 8) & 0xff; + STNIC_DELAY (); + return val; +} + +static inline void +STNIC_WRITE (int reg, byte val) +{ + *(vhalf *) (PA_83902 + ((reg) << 1)) = ((half) (val) << 8); + STNIC_DELAY (); +} + +static int __init stnic_probe(void) +{ + struct net_device *dev; + int i, err; + struct ei_device *ei_local; + + /* If we are not running on a SolutionEngine, give up now */ + if (! MACH_SE) + return -ENODEV; + + /* New style probing API */ + dev = alloc_ei_netdev(); + if (!dev) + return -ENOMEM; + +#ifdef CONFIG_SH_STANDARD_BIOS + sh_bios_get_node_addr (stnic_eadr); +#endif + for (i = 0; i < ETH_ALEN; i++) + dev->dev_addr[i] = stnic_eadr[i]; + + /* Set the base address to point to the NIC, not the "real" base! */ + dev->base_addr = 0x1000; + dev->irq = IRQ_STNIC; + dev->netdev_ops = &ei_netdev_ops; + + /* Snarf the interrupt now. There's no point in waiting since we cannot + share and the board will usually be enabled. */ + err = request_irq (dev->irq, ei_interrupt, 0, DRV_NAME, dev); + if (err) { + netdev_emerg(dev, " unable to get IRQ %d.\n", dev->irq); + free_netdev(dev); + return err; + } + + ei_status.name = dev->name; + ei_status.word16 = 1; +#ifdef __LITTLE_ENDIAN__ + ei_status.bigendian = 0; +#else + ei_status.bigendian = 1; +#endif + ei_status.tx_start_page = START_PG; + ei_status.rx_start_page = START_PG + TX_PAGES; + ei_status.stop_page = STOP_PG; + + ei_status.reset_8390 = &stnic_reset; + ei_status.get_8390_hdr = &stnic_get_hdr; + ei_status.block_input = &stnic_block_input; + ei_status.block_output = &stnic_block_output; + + stnic_init (dev); + ei_local = netdev_priv(dev); + ei_local->msg_enable = stnic_msg_enable; + + err = register_netdev(dev); + if (err) { + free_irq(dev->irq, dev); + free_netdev(dev); + return err; + } + stnic_dev = dev; + + netdev_info(dev, "NS ST-NIC 83902A\n"); + + return 0; +} + +static void +stnic_reset (struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + + *(vhalf *) PA_83902_RST = 0; + udelay (5); + netif_warn(ei_local, hw, dev, "8390 reset done (%ld).\n", jiffies); + *(vhalf *) PA_83902_RST = ~0; + udelay (5); +} + +static void +stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page) +{ + struct ei_device *ei_local = netdev_priv(dev); + + half buf[2]; + + STNIC_WRITE (PG0_RSAR0, 0); + STNIC_WRITE (PG0_RSAR1, ring_page); + STNIC_WRITE (PG0_RBCR0, 4); + STNIC_WRITE (PG0_RBCR1, 0); + STNIC_WRITE (STNIC_CR, CR_RRD | CR_PG0 | CR_STA); + + buf[0] = *(vhalf *) PA_83902_IF; + STNIC_DELAY (); + buf[1] = *(vhalf *) PA_83902_IF; + STNIC_DELAY (); + hdr->next = buf[0] >> 8; + hdr->status = buf[0] & 0xff; +#ifdef __LITTLE_ENDIAN__ + hdr->count = buf[1]; +#else + hdr->count = ((buf[1] >> 8) & 0xff) | (buf[1] << 8); +#endif + + netif_dbg(ei_local, probe, dev, "ring %x status %02x next %02x count %04x.\n", + ring_page, hdr->status, hdr->next, hdr->count); + + STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA); +} + +/* Block input and output, similar to the Crynwr packet driver. If you are + porting to a new ethercard look at the packet driver source for hints. + The HP LAN doesn't use shared memory -- we put the packet + out through the "remote DMA" dataport. */ + +static void +stnic_block_input (struct net_device *dev, int length, struct sk_buff *skb, + int offset) +{ + char *buf = skb->data; + half val; + + STNIC_WRITE (PG0_RSAR0, offset & 0xff); + STNIC_WRITE (PG0_RSAR1, offset >> 8); + STNIC_WRITE (PG0_RBCR0, length & 0xff); + STNIC_WRITE (PG0_RBCR1, length >> 8); + STNIC_WRITE (STNIC_CR, CR_RRD | CR_PG0 | CR_STA); + + if (length & 1) + length++; + + while (length > 0) + { + val = *(vhalf *) PA_83902_IF; +#ifdef __LITTLE_ENDIAN__ + *buf++ = val & 0xff; + *buf++ = val >> 8; +#else + *buf++ = val >> 8; + *buf++ = val & 0xff; +#endif + STNIC_DELAY (); + length -= sizeof (half); + } + + STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA); +} + +static void +stnic_block_output (struct net_device *dev, int length, + const unsigned char *buf, int output_page) +{ + STNIC_WRITE (PG0_RBCR0, 1); /* Write non-zero value */ + STNIC_WRITE (STNIC_CR, CR_RRD | CR_PG0 | CR_STA); + STNIC_DELAY (); + + STNIC_WRITE (PG0_RBCR0, length & 0xff); + STNIC_WRITE (PG0_RBCR1, length >> 8); + STNIC_WRITE (PG0_RSAR0, 0); + STNIC_WRITE (PG0_RSAR1, output_page); + STNIC_WRITE (STNIC_CR, CR_RWR | CR_PG0 | CR_STA); + + if (length & 1) + length++; + + while (length > 0) + { +#ifdef __LITTLE_ENDIAN__ + *(vhalf *) PA_83902_IF = ((half) buf[1] << 8) | buf[0]; +#else + *(vhalf *) PA_83902_IF = ((half) buf[0] << 8) | buf[1]; +#endif + STNIC_DELAY (); + buf += sizeof (half); + length -= sizeof (half); + } + + STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA); +} + +/* This function resets the STNIC if something screws up. */ +static void +stnic_init (struct net_device *dev) +{ + stnic_reset (dev); + NS8390_init (dev, 0); +} + +static void __exit stnic_cleanup(void) +{ + unregister_netdev(stnic_dev); + free_irq(stnic_dev->irq, stnic_dev); + free_netdev(stnic_dev); +} + +module_init(stnic_probe); +module_exit(stnic_cleanup); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c new file mode 100644 index 000000000..dd7d816bd --- /dev/null +++ b/drivers/net/ethernet/8390/wd.c @@ -0,0 +1,574 @@ +/* wd.c: A WD80x3 ethernet driver for linux. */ +/* + Written 1993-94 by Donald Becker. + + Copyright 1993 United States Government as represented by the + Director, National Security Agency. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + This is a driver for WD8003 and WD8013 "compatible" ethercards. + + Thanks to Russ Nelson (nelson@crnwyr.com) for loaning me a WD8013. + + Changelog: + + Paul Gortmaker : multiple card support for module users, support + for non-standard memory sizes. + + +*/ + +static const char version[] = + "wd.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "8390.h" + +#define DRV_NAME "wd" + +/* A zero-terminated list of I/O addresses to be probed. */ +static unsigned int wd_portlist[] __initdata = +{0x300, 0x280, 0x380, 0x240, 0}; + +static int wd_probe1(struct net_device *dev, int ioaddr); + +static int wd_open(struct net_device *dev); +static void wd_reset_8390(struct net_device *dev); +static void wd_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void wd_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void wd_block_output(struct net_device *dev, int count, + const unsigned char *buf, int start_page); +static int wd_close(struct net_device *dev); + +static u32 wd_msg_enable; + +#define WD_START_PG 0x00 /* First page of TX buffer */ +#define WD03_STOP_PG 0x20 /* Last page +1 of RX ring */ +#define WD13_STOP_PG 0x40 /* Last page +1 of RX ring */ + +#define WD_CMDREG 0 /* Offset to ASIC command register. */ +#define WD_RESET 0x80 /* Board reset, in WD_CMDREG. */ +#define WD_MEMENB 0x40 /* Enable the shared memory. */ +#define WD_CMDREG5 5 /* Offset to 16-bit-only ASIC register 5. */ +#define ISA16 0x80 /* Enable 16 bit access from the ISA bus. */ +#define NIC16 0x40 /* Enable 16 bit access from the 8390. */ +#define WD_NIC_OFFSET 16 /* Offset to the 8390 from the base_addr. */ +#define WD_IO_EXTENT 32 + + +/* Probe for the WD8003 and WD8013. These cards have the station + address PROM at I/O ports +8 to +13, with a checksum + following. A Soundblaster can have the same checksum as an WDethercard, + so we have an extra exclusionary check for it. + + The wd_probe1() routine initializes the card and fills the + station address field. */ + +static int __init do_wd_probe(struct net_device *dev) +{ + int i; + struct resource *r; + int base_addr = dev->base_addr; + int irq = dev->irq; + int mem_start = dev->mem_start; + int mem_end = dev->mem_end; + + if (base_addr > 0x1ff) { /* Check a user specified location. */ + r = request_region(base_addr, WD_IO_EXTENT, "wd-probe"); + if ( r == NULL) + return -EBUSY; + i = wd_probe1(dev, base_addr); + if (i != 0) + release_region(base_addr, WD_IO_EXTENT); + else + r->name = dev->name; + return i; + } + else if (base_addr != 0) /* Don't probe at all. */ + return -ENXIO; + + for (i = 0; wd_portlist[i]; i++) { + int ioaddr = wd_portlist[i]; + r = request_region(ioaddr, WD_IO_EXTENT, "wd-probe"); + if (r == NULL) + continue; + if (wd_probe1(dev, ioaddr) == 0) { + r->name = dev->name; + return 0; + } + release_region(ioaddr, WD_IO_EXTENT); + dev->irq = irq; + dev->mem_start = mem_start; + dev->mem_end = mem_end; + } + + return -ENODEV; +} + +#ifndef MODULE +struct net_device * __init wd_probe(int unit) +{ + struct net_device *dev = alloc_ei_netdev(); + int err; + + if (!dev) + return ERR_PTR(-ENOMEM); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = do_wd_probe(dev); + if (err) + goto out; + return dev; +out: + free_netdev(dev); + return ERR_PTR(err); +} +#endif + +static const struct net_device_ops wd_netdev_ops = { + .ndo_open = wd_open, + .ndo_stop = wd_close, + .ndo_start_xmit = ei_start_xmit, + .ndo_tx_timeout = ei_tx_timeout, + .ndo_get_stats = ei_get_stats, + .ndo_set_rx_mode = ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ei_poll, +#endif +}; + +static int __init wd_probe1(struct net_device *dev, int ioaddr) +{ + int i; + int err; + int checksum = 0; + int ancient = 0; /* An old card without config registers. */ + int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */ + const char *model_name; + static unsigned version_printed; + struct ei_device *ei_local = netdev_priv(dev); + + for (i = 0; i < 8; i++) + checksum += inb(ioaddr + 8 + i); + if (inb(ioaddr + 8) == 0xff /* Extra check to avoid soundcard. */ + || inb(ioaddr + 9) == 0xff + || (checksum & 0xff) != 0xFF) + return -ENODEV; + + /* Check for semi-valid mem_start/end values if supplied. */ + if ((dev->mem_start % 0x2000) || (dev->mem_end % 0x2000)) { + netdev_warn(dev, + "wd.c: user supplied mem_start or mem_end not on 8kB boundary - ignored.\n"); + dev->mem_start = 0; + dev->mem_end = 0; + } + + if ((wd_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0)) + netdev_info(dev, version); + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb(ioaddr + 8 + i); + + netdev_info(dev, "WD80x3 at %#3x, %pM", ioaddr, dev->dev_addr); + + /* The following PureData probe code was contributed by + Mike Jagdis . Puredata does software + configuration differently from others so we have to check for them. + This detects an 8 bit, 16 bit or dumb (Toshiba, jumpered) card. + */ + if (inb(ioaddr+0) == 'P' && inb(ioaddr+1) == 'D') { + unsigned char reg5 = inb(ioaddr+5); + + switch (inb(ioaddr+2)) { + case 0x03: word16 = 0; model_name = "PDI8023-8"; break; + case 0x05: word16 = 0; model_name = "PDUC8023"; break; + case 0x0a: word16 = 1; model_name = "PDI8023-16"; break; + /* Either 0x01 (dumb) or they've released a new version. */ + default: word16 = 0; model_name = "PDI8023"; break; + } + dev->mem_start = ((reg5 & 0x1c) + 0xc0) << 12; + dev->irq = (reg5 & 0xe0) == 0xe0 ? 10 : (reg5 >> 5) + 1; + } else { /* End of PureData probe */ + /* This method of checking for a 16-bit board is borrowed from the + we.c driver. A simpler method is just to look in ASIC reg. 0x03. + I'm comparing the two method in alpha test to make certain they + return the same result. */ + /* Check for the old 8 bit board - it has register 0/8 aliasing. + Do NOT check i>=6 here -- it hangs the old 8003 boards! */ + for (i = 0; i < 6; i++) + if (inb(ioaddr+i) != inb(ioaddr+8+i)) + break; + if (i >= 6) { + ancient = 1; + model_name = "WD8003-old"; + word16 = 0; + } else { + int tmp = inb(ioaddr+1); /* fiddle with 16bit bit */ + outb( tmp ^ 0x01, ioaddr+1 ); /* attempt to clear 16bit bit */ + if (((inb( ioaddr+1) & 0x01) == 0x01) /* A 16 bit card */ + && (tmp & 0x01) == 0x01 ) { /* In a 16 slot. */ + int asic_reg5 = inb(ioaddr+WD_CMDREG5); + /* Magic to set ASIC to word-wide mode. */ + outb( NIC16 | (asic_reg5&0x1f), ioaddr+WD_CMDREG5); + outb(tmp, ioaddr+1); + model_name = "WD8013"; + word16 = 1; /* We have a 16bit board here! */ + } else { + model_name = "WD8003"; + word16 = 0; + } + outb(tmp, ioaddr+1); /* Restore original reg1 value. */ + } +#ifndef final_version + if ( !ancient && (inb(ioaddr+1) & 0x01) != (word16 & 0x01)) + pr_cont("\nWD80?3: Bus width conflict, %d (probe) != %d (reg report).", + word16 ? 16 : 8, + (inb(ioaddr+1) & 0x01) ? 16 : 8); +#endif + } + +#if defined(WD_SHMEM) && WD_SHMEM > 0x80000 + /* Allow a compile-time override. */ + dev->mem_start = WD_SHMEM; +#else + if (dev->mem_start == 0) { + /* Sanity and old 8003 check */ + int reg0 = inb(ioaddr); + if (reg0 == 0xff || reg0 == 0) { + /* Future plan: this could check a few likely locations first. */ + dev->mem_start = 0xd0000; + pr_cont(" assigning address %#lx", dev->mem_start); + } else { + int high_addr_bits = inb(ioaddr+WD_CMDREG5) & 0x1f; + /* Some boards don't have the register 5 -- it returns 0xff. */ + if (high_addr_bits == 0x1f || word16 == 0) + high_addr_bits = 0x01; + dev->mem_start = ((reg0&0x3f) << 13) + (high_addr_bits << 19); + } + } +#endif + + /* The 8390 isn't at the base address -- the ASIC regs are there! */ + dev->base_addr = ioaddr+WD_NIC_OFFSET; + + if (dev->irq < 2) { + static const int irqmap[] = {9, 3, 5, 7, 10, 11, 15, 4}; + int reg1 = inb(ioaddr+1); + int reg4 = inb(ioaddr+4); + if (ancient || reg1 == 0xff) { /* Ack!! No way to read the IRQ! */ + short nic_addr = ioaddr+WD_NIC_OFFSET; + unsigned long irq_mask; + + /* We have an old-style ethercard that doesn't report its IRQ + line. Do autoirq to find the IRQ line. Note that this IS NOT + a reliable way to trigger an interrupt. */ + outb_p(E8390_NODMA + E8390_STOP, nic_addr); + outb(0x00, nic_addr+EN0_IMR); /* Disable all intrs. */ + + irq_mask = probe_irq_on(); + outb_p(0xff, nic_addr + EN0_IMR); /* Enable all interrupts. */ + outb_p(0x00, nic_addr + EN0_RCNTLO); + outb_p(0x00, nic_addr + EN0_RCNTHI); + outb(E8390_RREAD+E8390_START, nic_addr); /* Trigger it... */ + mdelay(20); + dev->irq = probe_irq_off(irq_mask); + + outb_p(0x00, nic_addr+EN0_IMR); /* Mask all intrs. again. */ + + if (netif_msg_drv(ei_local)) + pr_cont(" autoirq is %d", dev->irq); + if (dev->irq < 2) + dev->irq = word16 ? 10 : 5; + } else + dev->irq = irqmap[((reg4 >> 5) & 0x03) + (reg1 & 0x04)]; + } else if (dev->irq == 2) /* Fixup bogosity: IRQ2 is really IRQ9 */ + dev->irq = 9; + + /* Snarf the interrupt now. There's no point in waiting since we cannot + share and the board will usually be enabled. */ + i = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev); + if (i) { + pr_cont(" unable to get IRQ %d.\n", dev->irq); + return i; + } + + /* OK, were are certain this is going to work. Setup the device. */ + ei_status.name = model_name; + ei_status.word16 = word16; + ei_status.tx_start_page = WD_START_PG; + ei_status.rx_start_page = WD_START_PG + TX_PAGES; + + /* Don't map in the shared memory until the board is actually opened. */ + + /* Some cards (eg WD8003EBT) can be jumpered for more (32k!) memory. */ + if (dev->mem_end != 0) { + ei_status.stop_page = (dev->mem_end - dev->mem_start)/256; + ei_status.priv = dev->mem_end - dev->mem_start; + } else { + ei_status.stop_page = word16 ? WD13_STOP_PG : WD03_STOP_PG; + dev->mem_end = dev->mem_start + (ei_status.stop_page - WD_START_PG)*256; + ei_status.priv = (ei_status.stop_page - WD_START_PG)*256; + } + + ei_status.mem = ioremap(dev->mem_start, ei_status.priv); + if (!ei_status.mem) { + free_irq(dev->irq, dev); + return -ENOMEM; + } + + pr_cont(" %s, IRQ %d, shared memory at %#lx-%#lx.\n", + model_name, dev->irq, dev->mem_start, dev->mem_end-1); + + ei_status.reset_8390 = wd_reset_8390; + ei_status.block_input = wd_block_input; + ei_status.block_output = wd_block_output; + ei_status.get_8390_hdr = wd_get_8390_hdr; + + dev->netdev_ops = &wd_netdev_ops; + NS8390_init(dev, 0); + ei_local->msg_enable = wd_msg_enable; + +#if 1 + /* Enable interrupt generation on softconfig cards -- M.U */ + /* .. but possibly potentially unsafe - Donald */ + if (inb(ioaddr+14) & 0x20) + outb(inb(ioaddr+4)|0x80, ioaddr+4); +#endif + + err = register_netdev(dev); + if (err) { + free_irq(dev->irq, dev); + iounmap(ei_status.mem); + } + return err; +} + +static int +wd_open(struct net_device *dev) +{ + int ioaddr = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */ + + /* Map in the shared memory. Always set register 0 last to remain + compatible with very old boards. */ + ei_status.reg0 = ((dev->mem_start>>13) & 0x3f) | WD_MEMENB; + ei_status.reg5 = ((dev->mem_start>>19) & 0x1f) | NIC16; + + if (ei_status.word16) + outb(ei_status.reg5, ioaddr+WD_CMDREG5); + outb(ei_status.reg0, ioaddr); /* WD_CMDREG */ + + return ei_open(dev); +} + +static void +wd_reset_8390(struct net_device *dev) +{ + int wd_cmd_port = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */ + struct ei_device *ei_local = netdev_priv(dev); + + outb(WD_RESET, wd_cmd_port); + netif_dbg(ei_local, hw, dev, "resetting the WD80x3 t=%lu...\n", + jiffies); + ei_status.txing = 0; + + /* Set up the ASIC registers, just in case something changed them. */ + outb((((dev->mem_start>>13) & 0x3f)|WD_MEMENB), wd_cmd_port); + if (ei_status.word16) + outb(NIC16 | ((dev->mem_start>>19) & 0x1f), wd_cmd_port+WD_CMDREG5); + + netif_dbg(ei_local, hw, dev, "reset done\n"); +} + +/* Grab the 8390 specific header. Similar to the block_input routine, but + we don't need to be concerned with ring wrap as the header will be at + the start of a page, so we optimize accordingly. */ + +static void +wd_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + + int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */ + void __iomem *hdr_start = ei_status.mem + ((ring_page - WD_START_PG)<<8); + + /* We'll always get a 4 byte header read followed by a packet read, so + we enable 16 bit mode before the header, and disable after the body. */ + if (ei_status.word16) + outb(ISA16 | ei_status.reg5, wd_cmdreg+WD_CMDREG5); + +#ifdef __BIG_ENDIAN + /* Officially this is what we are doing, but the readl() is faster */ + /* unfortunately it isn't endian aware of the struct */ + memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); + hdr->count = le16_to_cpu(hdr->count); +#else + ((unsigned int*)hdr)[0] = readl(hdr_start); +#endif +} + +/* Block input and output are easy on shared memory ethercards, and trivial + on the Western digital card where there is no choice of how to do it. + The only complications are that the ring buffer wraps, and need to map + switch between 8- and 16-bit modes. */ + +static void +wd_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) +{ + int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */ + unsigned long offset = ring_offset - (WD_START_PG<<8); + void __iomem *xfer_start = ei_status.mem + offset; + + if (offset + count > ei_status.priv) { + /* We must wrap the input move. */ + int semi_count = ei_status.priv - offset; + memcpy_fromio(skb->data, xfer_start, semi_count); + count -= semi_count; + memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); + } else { + /* Packet is in one chunk -- we can copy + cksum. */ + memcpy_fromio(skb->data, xfer_start, count); + } + + /* Turn off 16 bit access so that reboot works. ISA brain-damage */ + if (ei_status.word16) + outb(ei_status.reg5, wd_cmdreg+WD_CMDREG5); +} + +static void +wd_block_output(struct net_device *dev, int count, const unsigned char *buf, + int start_page) +{ + int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */ + void __iomem *shmem = ei_status.mem + ((start_page - WD_START_PG)<<8); + + + if (ei_status.word16) { + /* Turn on and off 16 bit access so that reboot works. */ + outb(ISA16 | ei_status.reg5, wd_cmdreg+WD_CMDREG5); + memcpy_toio(shmem, buf, count); + outb(ei_status.reg5, wd_cmdreg+WD_CMDREG5); + } else + memcpy_toio(shmem, buf, count); +} + + +static int +wd_close(struct net_device *dev) +{ + int wd_cmdreg = dev->base_addr - WD_NIC_OFFSET; /* WD_CMDREG */ + struct ei_device *ei_local = netdev_priv(dev); + + netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard.\n"); + ei_close(dev); + + /* Change from 16-bit to 8-bit shared memory so reboot works. */ + if (ei_status.word16) + outb(ei_status.reg5, wd_cmdreg + WD_CMDREG5 ); + + /* And disable the shared memory. */ + outb(ei_status.reg0 & ~WD_MEMENB, wd_cmdreg); + + return 0; +} + + +#ifdef MODULE +#define MAX_WD_CARDS 4 /* Max number of wd cards per module */ +static struct net_device *dev_wd[MAX_WD_CARDS]; +static int io[MAX_WD_CARDS]; +static int irq[MAX_WD_CARDS]; +static int mem[MAX_WD_CARDS]; +static int mem_end[MAX_WD_CARDS]; /* for non std. mem size */ + +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param_array(mem, int, NULL, 0); +module_param_array(mem_end, int, NULL, 0); +module_param_named(msg_enable, wd_msg_enable, uint, (S_IRUSR|S_IRGRP|S_IROTH)); +MODULE_PARM_DESC(io, "I/O base address(es)"); +MODULE_PARM_DESC(irq, "IRQ number(s) (ignored for PureData boards)"); +MODULE_PARM_DESC(mem, "memory base address(es)(ignored for PureData boards)"); +MODULE_PARM_DESC(mem_end, "memory end address(es)"); +MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)"); +MODULE_DESCRIPTION("ISA Western Digital wd8003/wd8013 ; SMC Elite, Elite16 ethernet driver"); +MODULE_LICENSE("GPL"); + +/* This is set up so that only a single autoprobe takes place per call. +ISA device autoprobes on a running machine are not recommended. */ + +int __init init_module(void) +{ + struct net_device *dev; + int this_dev, found = 0; + + for (this_dev = 0; this_dev < MAX_WD_CARDS; this_dev++) { + if (io[this_dev] == 0) { + if (this_dev != 0) break; /* only autoprobe 1st one */ + printk(KERN_NOTICE "wd.c: Presently autoprobing (not recommended) for a single card.\n"); + } + dev = alloc_ei_netdev(); + if (!dev) + break; + dev->irq = irq[this_dev]; + dev->base_addr = io[this_dev]; + dev->mem_start = mem[this_dev]; + dev->mem_end = mem_end[this_dev]; + if (do_wd_probe(dev) == 0) { + dev_wd[found++] = dev; + continue; + } + free_netdev(dev); + printk(KERN_WARNING "wd.c: No wd80x3 card found (i/o = 0x%x).\n", io[this_dev]); + break; + } + if (found) + return 0; + return -ENXIO; +} + +static void cleanup_card(struct net_device *dev) +{ + free_irq(dev->irq, dev); + release_region(dev->base_addr - WD_NIC_OFFSET, WD_IO_EXTENT); + iounmap(ei_status.mem); +} + +void __exit +cleanup_module(void) +{ + int this_dev; + + for (this_dev = 0; this_dev < MAX_WD_CARDS; this_dev++) { + struct net_device *dev = dev_wd[this_dev]; + if (dev) { + unregister_netdev(dev); + cleanup_card(dev); + free_netdev(dev); + } + } +} +#endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c new file mode 100644 index 000000000..8308728fa --- /dev/null +++ b/drivers/net/ethernet/8390/zorro8390.c @@ -0,0 +1,458 @@ +/* + * Amiga Linux/m68k and Linux/PPC Zorro NS8390 Ethernet Driver + * + * (C) Copyright 1998-2000 by some Elitist 680x0 Users(TM) + * + * --------------------------------------------------------------------------- + * + * This program is based on all the other NE2000 drivers for Linux + * + * --------------------------------------------------------------------------- + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of the Linux + * distribution for more details. + * + * --------------------------------------------------------------------------- + * + * The Ariadne II and X-Surf are Zorro-II boards containing Realtek RTL8019AS + * Ethernet Controllers. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define EI_SHIFT(x) (ei_local->reg_offset[x]) +#define ei_inb(port) in_8(port) +#define ei_outb(val, port) out_8(port, val) +#define ei_inb_p(port) in_8(port) +#define ei_outb_p(val, port) out_8(port, val) + +static const char version[] = + "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; + +static u32 zorro8390_msg_enable; + +#include "lib8390.c" + +#define DRV_NAME "zorro8390" + +#define NE_BASE (dev->base_addr) +#define NE_CMD (0x00 * 2) +#define NE_DATAPORT (0x10 * 2) /* NatSemi-defined port window offset */ +#define NE_RESET (0x1f * 2) /* Issue a read to reset, + * a write to clear. */ +#define NE_IO_EXTENT (0x20 * 2) + +#define NE_EN0_ISR (0x07 * 2) +#define NE_EN0_DCFG (0x0e * 2) + +#define NE_EN0_RSARLO (0x08 * 2) +#define NE_EN0_RSARHI (0x09 * 2) +#define NE_EN0_RCNTLO (0x0a * 2) +#define NE_EN0_RXCR (0x0c * 2) +#define NE_EN0_TXCR (0x0d * 2) +#define NE_EN0_RCNTHI (0x0b * 2) +#define NE_EN0_IMR (0x0f * 2) + +#define NESM_START_PG 0x40 /* First page of TX buffer */ +#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ + +#define WORDSWAP(a) ((((a) >> 8) & 0xff) | ((a) << 8)) + +static struct card_info { + zorro_id id; + const char *name; + unsigned int offset; +} cards[] = { + { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, "Ariadne II", 0x0600 }, + { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, "X-Surf", 0x8600 }, +}; + +/* Hard reset the card. This used to pause for the same period that a + * 8390 reset command required, but that shouldn't be necessary. + */ +static void zorro8390_reset_8390(struct net_device *dev) +{ + unsigned long reset_start_time = jiffies; + struct ei_device *ei_local = netdev_priv(dev); + + netif_dbg(ei_local, hw, dev, "resetting - t=%ld...\n", jiffies); + + z_writeb(z_readb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); + + ei_status.txing = 0; + ei_status.dmaing = 0; + + /* This check _should_not_ be necessary, omit eventually. */ + while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RESET) == 0) + if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) { + netdev_warn(dev, "%s: did not complete\n", __func__); + break; + } + z_writeb(ENISR_RESET, NE_BASE + NE_EN0_ISR); /* Ack intr */ +} + +/* Grab the 8390 specific header. Similar to the block_input routine, but + * we don't need to be concerned with ring wrap as the header will be at + * the start of a page, so we optimize accordingly. + */ +static void zorro8390_get_8390_hdr(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page) +{ + int nic_base = dev->base_addr; + int cnt; + short *ptrs; + + /* This *shouldn't* happen. + * If it does, it's the last thing you'll see + */ + if (ei_status.dmaing) { + netdev_warn(dev, + "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n", + __func__, ei_status.dmaing, ei_status.irqlock); + return; + } + + ei_status.dmaing |= 0x01; + z_writeb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD); + z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); + z_writeb(sizeof(struct e8390_pkt_hdr), nic_base + NE_EN0_RCNTLO); + z_writeb(0, nic_base + NE_EN0_RCNTHI); + z_writeb(0, nic_base + NE_EN0_RSARLO); /* On page boundary */ + z_writeb(ring_page, nic_base + NE_EN0_RSARHI); + z_writeb(E8390_RREAD+E8390_START, nic_base + NE_CMD); + + ptrs = (short *)hdr; + for (cnt = 0; cnt < sizeof(struct e8390_pkt_hdr) >> 1; cnt++) + *ptrs++ = z_readw(NE_BASE + NE_DATAPORT); + + z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */ + + hdr->count = WORDSWAP(hdr->count); + + ei_status.dmaing &= ~0x01; +} + +/* Block input and output, similar to the Crynwr packet driver. + * If you are porting to a new ethercard, look at the packet driver source + * for hints. The NEx000 doesn't share the on-board packet memory -- + * you have to put the packet out through the "remote DMA" dataport + * using z_writeb. + */ +static void zorro8390_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + int nic_base = dev->base_addr; + char *buf = skb->data; + short *ptrs; + int cnt; + + /* This *shouldn't* happen. + * If it does, it's the last thing you'll see + */ + if (ei_status.dmaing) { + netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n", + __func__, ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + z_writeb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD); + z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); + z_writeb(count & 0xff, nic_base + NE_EN0_RCNTLO); + z_writeb(count >> 8, nic_base + NE_EN0_RCNTHI); + z_writeb(ring_offset & 0xff, nic_base + NE_EN0_RSARLO); + z_writeb(ring_offset >> 8, nic_base + NE_EN0_RSARHI); + z_writeb(E8390_RREAD+E8390_START, nic_base + NE_CMD); + ptrs = (short *)buf; + for (cnt = 0; cnt < count >> 1; cnt++) + *ptrs++ = z_readw(NE_BASE + NE_DATAPORT); + if (count & 0x01) + buf[count - 1] = z_readb(NE_BASE + NE_DATAPORT); + + z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */ + ei_status.dmaing &= ~0x01; +} + +static void zorro8390_block_output(struct net_device *dev, int count, + const unsigned char *buf, + const int start_page) +{ + int nic_base = NE_BASE; + unsigned long dma_start; + short *ptrs; + int cnt; + + /* Round the count up for word writes. Do we need to do this? + * What effect will an odd byte count have on the 8390? + * I should check someday. + */ + if (count & 0x01) + count++; + + /* This *shouldn't* happen. + * If it does, it's the last thing you'll see + */ + if (ei_status.dmaing) { + netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n", + __func__, ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + /* We should already be in page 0, but to be safe... */ + z_writeb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); + + z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); + + /* Now the normal output. */ + z_writeb(count & 0xff, nic_base + NE_EN0_RCNTLO); + z_writeb(count >> 8, nic_base + NE_EN0_RCNTHI); + z_writeb(0x00, nic_base + NE_EN0_RSARLO); + z_writeb(start_page, nic_base + NE_EN0_RSARHI); + + z_writeb(E8390_RWRITE + E8390_START, nic_base + NE_CMD); + ptrs = (short *)buf; + for (cnt = 0; cnt < count >> 1; cnt++) + z_writew(*ptrs++, NE_BASE + NE_DATAPORT); + + dma_start = jiffies; + + while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0) + if (time_after(jiffies, dma_start + 2 * HZ / 100)) { + /* 20ms */ + netdev_warn(dev, "timeout waiting for Tx RDC\n"); + zorro8390_reset_8390(dev); + __NS8390_init(dev, 1); + break; + } + + z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */ + ei_status.dmaing &= ~0x01; +} + +static int zorro8390_open(struct net_device *dev) +{ + __ei_open(dev); + return 0; +} + +static int zorro8390_close(struct net_device *dev) +{ + struct ei_device *ei_local = netdev_priv(dev); + + netif_dbg(ei_local, ifdown, dev, "Shutting down ethercard\n"); + __ei_close(dev); + return 0; +} + +static void zorro8390_remove_one(struct zorro_dev *z) +{ + struct net_device *dev = zorro_get_drvdata(z); + + unregister_netdev(dev); + free_irq(IRQ_AMIGA_PORTS, dev); + release_mem_region(ZTWO_PADDR(dev->base_addr), NE_IO_EXTENT * 2); + free_netdev(dev); +} + +static struct zorro_device_id zorro8390_zorro_tbl[] = { + { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, }, + { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, }, + { 0 } +}; +MODULE_DEVICE_TABLE(zorro, zorro8390_zorro_tbl); + +static const struct net_device_ops zorro8390_netdev_ops = { + .ndo_open = zorro8390_open, + .ndo_stop = zorro8390_close, + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_rx_mode = __ei_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = __ei_poll, +#endif +}; + +static int zorro8390_init(struct net_device *dev, unsigned long board, + const char *name, void __iomem *ioaddr) +{ + int i; + int err; + unsigned char SA_prom[32]; + int start_page, stop_page; + struct ei_device *ei_local = netdev_priv(dev); + static u32 zorro8390_offsets[16] = { + 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, + 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, + }; + + /* Reset card. Who knows what dain-bramaged state it was left in. */ + { + unsigned long reset_start_time = jiffies; + + z_writeb(z_readb(ioaddr + NE_RESET), ioaddr + NE_RESET); + + while ((z_readb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0) + if (time_after(jiffies, + reset_start_time + 2 * HZ / 100)) { + netdev_warn(dev, "not found (no reset ack)\n"); + return -ENODEV; + } + + z_writeb(0xff, ioaddr + NE_EN0_ISR); /* Ack all intr. */ + } + + /* Read the 16 bytes of station address PROM. + * We must first initialize registers, + * similar to NS8390_init(eifdev, 0). + * We can't reliably read the SAPROM address without this. + * (I learned the hard way!). + */ + { + static const struct { + u32 value; + u32 offset; + } program_seq[] = { + {E8390_NODMA + E8390_PAGE0 + E8390_STOP, NE_CMD}, + /* Select page 0 */ + {0x48, NE_EN0_DCFG}, /* 0x48: Set byte-wide access */ + {0x00, NE_EN0_RCNTLO}, /* Clear the count regs */ + {0x00, NE_EN0_RCNTHI}, + {0x00, NE_EN0_IMR}, /* Mask completion irq */ + {0xFF, NE_EN0_ISR}, + {E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */ + {E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode */ + {32, NE_EN0_RCNTLO}, + {0x00, NE_EN0_RCNTHI}, + {0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000 */ + {0x00, NE_EN0_RSARHI}, + {E8390_RREAD + E8390_START, NE_CMD}, + }; + for (i = 0; i < ARRAY_SIZE(program_seq); i++) + z_writeb(program_seq[i].value, + ioaddr + program_seq[i].offset); + } + for (i = 0; i < 16; i++) { + SA_prom[i] = z_readb(ioaddr + NE_DATAPORT); + (void)z_readb(ioaddr + NE_DATAPORT); + } + + /* We must set the 8390 for word mode. */ + z_writeb(0x49, ioaddr + NE_EN0_DCFG); + start_page = NESM_START_PG; + stop_page = NESM_STOP_PG; + + dev->base_addr = (unsigned long)ioaddr; + dev->irq = IRQ_AMIGA_PORTS; + + /* Install the Interrupt handler */ + i = request_irq(IRQ_AMIGA_PORTS, __ei_interrupt, + IRQF_SHARED, DRV_NAME, dev); + if (i) + return i; + + for (i = 0; i < ETH_ALEN; i++) + dev->dev_addr[i] = SA_prom[i]; + + pr_debug("Found ethernet address: %pM\n", dev->dev_addr); + + ei_status.name = name; + ei_status.tx_start_page = start_page; + ei_status.stop_page = stop_page; + ei_status.word16 = 1; + + ei_status.rx_start_page = start_page + TX_PAGES; + + ei_status.reset_8390 = zorro8390_reset_8390; + ei_status.block_input = zorro8390_block_input; + ei_status.block_output = zorro8390_block_output; + ei_status.get_8390_hdr = zorro8390_get_8390_hdr; + ei_status.reg_offset = zorro8390_offsets; + + dev->netdev_ops = &zorro8390_netdev_ops; + __NS8390_init(dev, 0); + + ei_local->msg_enable = zorro8390_msg_enable; + + err = register_netdev(dev); + if (err) { + free_irq(IRQ_AMIGA_PORTS, dev); + return err; + } + + netdev_info(dev, "%s at 0x%08lx, Ethernet Address %pM\n", + name, board, dev->dev_addr); + + return 0; +} + +static int zorro8390_init_one(struct zorro_dev *z, + const struct zorro_device_id *ent) +{ + struct net_device *dev; + unsigned long board, ioaddr; + int err, i; + + for (i = ARRAY_SIZE(cards) - 1; i >= 0; i--) + if (z->id == cards[i].id) + break; + if (i < 0) + return -ENODEV; + + board = z->resource.start; + ioaddr = board + cards[i].offset; + dev = ____alloc_ei_netdev(0); + if (!dev) + return -ENOMEM; + if (!request_mem_region(ioaddr, NE_IO_EXTENT * 2, DRV_NAME)) { + free_netdev(dev); + return -EBUSY; + } + err = zorro8390_init(dev, board, cards[i].name, ZTWO_VADDR(ioaddr)); + if (err) { + release_mem_region(ioaddr, NE_IO_EXTENT * 2); + free_netdev(dev); + return err; + } + zorro_set_drvdata(z, dev); + return 0; +} + +static struct zorro_driver zorro8390_driver = { + .name = "zorro8390", + .id_table = zorro8390_zorro_tbl, + .probe = zorro8390_init_one, + .remove = zorro8390_remove_one, +}; + +static int __init zorro8390_init_module(void) +{ + return zorro_register_driver(&zorro8390_driver); +} + +static void __exit zorro8390_cleanup_module(void) +{ + zorro_unregister_driver(&zorro8390_driver); +} + +module_init(zorro8390_init_module); +module_exit(zorro8390_cleanup_module); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig new file mode 100644 index 000000000..eadcb0538 --- /dev/null +++ b/drivers/net/ethernet/Kconfig @@ -0,0 +1,178 @@ +# +# Ethernet LAN device configuration +# + +menuconfig ETHERNET + bool "Ethernet driver support" + depends on NET + default y + ---help--- + This section contains all the Ethernet device drivers. + +if ETHERNET + +config MDIO + tristate + +config SUNGEM_PHY + tristate + +source "drivers/net/ethernet/3com/Kconfig" +source "drivers/net/ethernet/adaptec/Kconfig" +source "drivers/net/ethernet/aeroflex/Kconfig" +source "drivers/net/ethernet/agere/Kconfig" +source "drivers/net/ethernet/allwinner/Kconfig" +source "drivers/net/ethernet/alteon/Kconfig" +source "drivers/net/ethernet/altera/Kconfig" +source "drivers/net/ethernet/amd/Kconfig" +source "drivers/net/ethernet/apm/Kconfig" +source "drivers/net/ethernet/apple/Kconfig" +source "drivers/net/ethernet/arc/Kconfig" +source "drivers/net/ethernet/atheros/Kconfig" +source "drivers/net/ethernet/cadence/Kconfig" +source "drivers/net/ethernet/adi/Kconfig" +source "drivers/net/ethernet/broadcom/Kconfig" +source "drivers/net/ethernet/brocade/Kconfig" +source "drivers/net/ethernet/calxeda/Kconfig" +source "drivers/net/ethernet/chelsio/Kconfig" +source "drivers/net/ethernet/cirrus/Kconfig" +source "drivers/net/ethernet/cisco/Kconfig" + +config CX_ECAT + tristate "Beckhoff CX5020 EtherCAT master support" + depends on PCI + depends on X86 || COMPILE_TEST + ---help--- + Driver for EtherCAT master module located on CCAT FPGA + that can be found on Beckhoff CX5020, and possibly other of CX + Beckhoff CX series industrial PCs. + + To compile this driver as a module, choose M here. The module + will be called ec_bhf. + +source "drivers/net/ethernet/davicom/Kconfig" + +config DNET + tristate "Dave ethernet support (DNET)" + depends on HAS_IOMEM + select PHYLIB + ---help--- + The Dave ethernet interface (DNET) is found on Qong Board FPGA. + Say Y to include support for the DNET chip. + + To compile this driver as a module, choose M here: the module + will be called dnet. + +source "drivers/net/ethernet/dec/Kconfig" +source "drivers/net/ethernet/dlink/Kconfig" +source "drivers/net/ethernet/emulex/Kconfig" +source "drivers/net/ethernet/neterion/Kconfig" +source "drivers/net/ethernet/faraday/Kconfig" +source "drivers/net/ethernet/freescale/Kconfig" +source "drivers/net/ethernet/fujitsu/Kconfig" +source "drivers/net/ethernet/hisilicon/Kconfig" +source "drivers/net/ethernet/hp/Kconfig" +source "drivers/net/ethernet/ibm/Kconfig" +source "drivers/net/ethernet/intel/Kconfig" +source "drivers/net/ethernet/i825xx/Kconfig" +source "drivers/net/ethernet/xscale/Kconfig" +source "drivers/net/ethernet/icplus/Kconfig" + +config JME + tristate "JMicron(R) PCI-Express Gigabit Ethernet support" + depends on PCI + select CRC32 + select MII + ---help--- + This driver supports the PCI-Express gigabit ethernet adapters + based on JMicron JMC250 chipset. + + To compile this driver as a module, choose M here. The module + will be called jme. + +config KORINA + tristate "Korina (IDT RC32434) Ethernet support" + depends on MIKROTIK_RB532 + ---help--- + If you have a Mikrotik RouterBoard 500 or IDT RC32434 + based system say Y. Otherwise say N. + +config LANTIQ_ETOP + tristate "Lantiq SoC ETOP driver" + depends on SOC_TYPE_XWAY + ---help--- + Support for the MII0 inside the Lantiq SoC + +source "drivers/net/ethernet/marvell/Kconfig" +source "drivers/net/ethernet/mellanox/Kconfig" +source "drivers/net/ethernet/micrel/Kconfig" +source "drivers/net/ethernet/microchip/Kconfig" +source "drivers/net/ethernet/moxa/Kconfig" +source "drivers/net/ethernet/myricom/Kconfig" + +config FEALNX + tristate "Myson MTD-8xx PCI Ethernet support" + depends on PCI + select CRC32 + select MII + ---help--- + Say Y here to support the Myson MTD-800 family of PCI-based Ethernet + cards. + +source "drivers/net/ethernet/natsemi/Kconfig" +source "drivers/net/ethernet/8390/Kconfig" + +config NET_NETX + tristate "NetX Ethernet support" + select MII + depends on ARCH_NETX + ---help--- + This is support for the Hilscher netX builtin Ethernet ports + + To compile this driver as a module, choose M here. The module + will be called netx-eth. + +source "drivers/net/ethernet/nuvoton/Kconfig" +source "drivers/net/ethernet/nvidia/Kconfig" +source "drivers/net/ethernet/nxp/Kconfig" +source "drivers/net/ethernet/octeon/Kconfig" +source "drivers/net/ethernet/oki-semi/Kconfig" + +config ETHOC + tristate "OpenCores 10/100 Mbps Ethernet MAC support" + depends on HAS_IOMEM && HAS_DMA + select MII + select PHYLIB + select CRC32 + select BITREVERSE + ---help--- + Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC. + +source "drivers/net/ethernet/packetengines/Kconfig" +source "drivers/net/ethernet/pasemi/Kconfig" +source "drivers/net/ethernet/qlogic/Kconfig" +source "drivers/net/ethernet/qualcomm/Kconfig" +source "drivers/net/ethernet/realtek/Kconfig" +source "drivers/net/ethernet/renesas/Kconfig" +source "drivers/net/ethernet/rdc/Kconfig" +source "drivers/net/ethernet/rocker/Kconfig" +source "drivers/net/ethernet/samsung/Kconfig" +source "drivers/net/ethernet/seeq/Kconfig" +source "drivers/net/ethernet/silan/Kconfig" +source "drivers/net/ethernet/sis/Kconfig" +source "drivers/net/ethernet/sfc/Kconfig" +source "drivers/net/ethernet/sgi/Kconfig" +source "drivers/net/ethernet/smsc/Kconfig" +source "drivers/net/ethernet/stmicro/Kconfig" +source "drivers/net/ethernet/sun/Kconfig" +source "drivers/net/ethernet/tehuti/Kconfig" +source "drivers/net/ethernet/ti/Kconfig" +source "drivers/net/ethernet/tile/Kconfig" +source "drivers/net/ethernet/toshiba/Kconfig" +source "drivers/net/ethernet/tundra/Kconfig" +source "drivers/net/ethernet/via/Kconfig" +source "drivers/net/ethernet/wiznet/Kconfig" +source "drivers/net/ethernet/xilinx/Kconfig" +source "drivers/net/ethernet/xircom/Kconfig" + +endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile new file mode 100644 index 000000000..1367afcd0 --- /dev/null +++ b/drivers/net/ethernet/Makefile @@ -0,0 +1,86 @@ +# +# Makefile for the Linux network Ethernet device drivers. +# + +obj-$(CONFIG_NET_VENDOR_3COM) += 3com/ +obj-$(CONFIG_NET_VENDOR_8390) += 8390/ +obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/ +obj-$(CONFIG_GRETH) += aeroflex/ +obj-$(CONFIG_NET_VENDOR_AGERE) += agere/ +obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/ +obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/ +obj-$(CONFIG_ALTERA_TSE) += altera/ +obj-$(CONFIG_NET_VENDOR_AMD) += amd/ +obj-$(CONFIG_NET_XGENE) += apm/ +obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ +obj-$(CONFIG_NET_VENDOR_ARC) += arc/ +obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ +obj-$(CONFIG_NET_CADENCE) += cadence/ +obj-$(CONFIG_NET_BFIN) += adi/ +obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ +obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/ +obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/ +obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ +obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ +obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ +obj-$(CONFIG_CX_ECAT) += ec_bhf.o +obj-$(CONFIG_DM9000) += davicom/ +obj-$(CONFIG_DNET) += dnet.o +obj-$(CONFIG_NET_VENDOR_DEC) += dec/ +obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/ +obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/ +obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/ +obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/ +obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/ +obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/ +obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/ +obj-$(CONFIG_NET_VENDOR_HP) += hp/ +obj-$(CONFIG_NET_VENDOR_IBM) += ibm/ +obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ +obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ +obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ +obj-$(CONFIG_IP1000) += icplus/ +obj-$(CONFIG_JME) += jme.o +obj-$(CONFIG_KORINA) += korina.o +obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o +obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/ +obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ +obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ +obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ +obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/ +obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ +obj-$(CONFIG_FEALNX) += fealnx.o +obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/ +obj-$(CONFIG_NET_NETX) += netx-eth.o +obj-$(CONFIG_NET_VENDOR_NUVOTON) += nuvoton/ +obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/ +obj-$(CONFIG_LPC_ENET) += nxp/ +obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/ +obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/ +obj-$(CONFIG_ETHOC) += ethoc.o +obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/ +obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/ +obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ +obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/ +obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/ +obj-$(CONFIG_SH_ETH) += renesas/ +obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ +obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/ +obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/ +obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/ +obj-$(CONFIG_NET_VENDOR_SILAN) += silan/ +obj-$(CONFIG_NET_VENDOR_SIS) += sis/ +obj-$(CONFIG_SFC) += sfc/ +obj-$(CONFIG_NET_VENDOR_SGI) += sgi/ +obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/ +obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/ +obj-$(CONFIG_NET_VENDOR_SUN) += sun/ +obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/ +obj-$(CONFIG_NET_VENDOR_TI) += ti/ +obj-$(CONFIG_TILE_NET) += tile/ +obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/ +obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/ +obj-$(CONFIG_NET_VENDOR_VIA) += via/ +obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/ +obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ +obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ diff --git a/drivers/net/ethernet/adaptec/Kconfig b/drivers/net/ethernet/adaptec/Kconfig new file mode 100644 index 000000000..5c804bbe3 --- /dev/null +++ b/drivers/net/ethernet/adaptec/Kconfig @@ -0,0 +1,35 @@ +# +# Adaptec network device configuration +# + +config NET_VENDOR_ADAPTEC + bool "Adaptec devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Adaptec cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_ADAPTEC + +config ADAPTEC_STARFIRE + tristate "Adaptec Starfire/DuraLAN support" + depends on PCI + select CRC32 + select MII + ---help--- + Say Y here if you have an Adaptec Starfire (or DuraLAN) PCI network + adapter. The DuraLAN chip is used on the 64 bit PCI boards from + Adaptec e.g. the ANA-6922A. The older 32 bit boards use the tulip + driver. + + To compile this driver as a module, choose M here: the module + will be called starfire. This is recommended. + +endif # NET_VENDOR_ADAPTEC diff --git a/drivers/net/ethernet/adaptec/Makefile b/drivers/net/ethernet/adaptec/Makefile new file mode 100644 index 000000000..6c07b758a --- /dev/null +++ b/drivers/net/ethernet/adaptec/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Adaptec network device drivers. +# + +obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c new file mode 100644 index 000000000..e498eb0b9 --- /dev/null +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -0,0 +1,2059 @@ +/* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */ +/* + Written 1998-2000 by Donald Becker. + + Current maintainer is Ion Badulescu . Please + send all bug reports to me, and not to Donald Becker, as this code + has been heavily modified from Donald's original version. + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. + + The information below comes from Donald Becker's original driver: + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + Support and updates available at + http://www.scyld.com/network/starfire.html + [link no longer provides useful info -jgarzik] + +*/ + +#define DRV_NAME "starfire" +#define DRV_VERSION "2.1" +#define DRV_RELDATE "July 6, 2008" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* Processor type for cache alignment. */ +#include +#include + +/* + * The current frame processor firmware fails to checksum a fragment + * of length 1. If and when this is fixed, the #define below can be removed. + */ +#define HAS_BROKEN_FIRMWARE + +/* + * If using the broken firmware, data must be padded to the next 32-bit boundary. + */ +#ifdef HAS_BROKEN_FIRMWARE +#define PADDING_MASK 3 +#endif + +/* + * Define this if using the driver with the zero-copy patch + */ +#define ZEROCOPY + +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#define VLAN_SUPPORT +#endif + +/* The user-configurable values. + These may be modified when a driver module is loaded.*/ + +/* Used for tuning interrupt latency vs. overhead. */ +static int intr_latency; +static int small_frames; + +static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ +static int max_interrupt_work = 20; +static int mtu; +/* Maximum number of multicast addresses to filter (vs. rx-all-multicast). + The Starfire has a 512 element hash table based on the Ethernet CRC. */ +static const int multicast_filter_limit = 512; +/* Whether to do TCP/UDP checksums in hardware */ +static int enable_hw_cksum = 1; + +#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ +/* + * Set the copy breakpoint for the copy-only-tiny-frames scheme. + * Setting to > 1518 effectively disables this feature. + * + * NOTE: + * The ia64 doesn't allow for unaligned loads even of integers being + * misaligned on a 2 byte boundary. Thus always force copying of + * packets as the starfire doesn't allow for misaligned DMAs ;-( + * 23/10/2000 - Jes + * + * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64, + * at least, having unaligned frames leads to a rather serious performance + * penalty. -Ion + */ +#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) +static int rx_copybreak = PKT_BUF_SZ; +#else +static int rx_copybreak /* = 0 */; +#endif + +/* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */ +#ifdef __sparc__ +#define DMA_BURST_SIZE 64 +#else +#define DMA_BURST_SIZE 128 +#endif + +/* Operational parameters that are set at compile time. */ + +/* The "native" ring sizes are either 256 or 2048. + However in some modes a descriptor may be marked to wrap the ring earlier. +*/ +#define RX_RING_SIZE 256 +#define TX_RING_SIZE 32 +/* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */ +#define DONE_Q_SIZE 1024 +/* All queues must be aligned on a 256-byte boundary */ +#define QUEUE_ALIGN 256 + +#if RX_RING_SIZE > 256 +#define RX_Q_ENTRIES Rx2048QEntries +#else +#define RX_Q_ENTRIES Rx256QEntries +#endif + +/* Operational parameters that usually are not changed. */ +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (2 * HZ) + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT +/* 64-bit dma_addr_t */ +#define ADDR_64BITS /* This chip uses 64 bit addresses. */ +#define netdrv_addr_t __le64 +#define cpu_to_dma(x) cpu_to_le64(x) +#define dma_to_cpu(x) le64_to_cpu(x) +#define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit +#define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit +#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit +#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit +#define RX_DESC_ADDR_SIZE RxDescAddr64bit +#else /* 32-bit dma_addr_t */ +#define netdrv_addr_t __le32 +#define cpu_to_dma(x) cpu_to_le32(x) +#define dma_to_cpu(x) le32_to_cpu(x) +#define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit +#define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit +#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit +#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit +#define RX_DESC_ADDR_SIZE RxDescAddr32bit +#endif + +#define skb_first_frag_len(skb) skb_headlen(skb) +#define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) + +/* Firmware names */ +#define FIRMWARE_RX "/*(DEBLOBBED)*/" +#define FIRMWARE_TX "/*(DEBLOBBED)*/" + +/* These identify the driver base version and may not be removed. */ +static const char version[] = +KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker \n" +" (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n"; + +MODULE_AUTHOR("Donald Becker "); +MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); +/*(DEBLOBBED)*/ + +module_param(max_interrupt_work, int, 0); +module_param(mtu, int, 0); +module_param(debug, int, 0); +module_param(rx_copybreak, int, 0); +module_param(intr_latency, int, 0); +module_param(small_frames, int, 0); +module_param(enable_hw_cksum, int, 0); +MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt"); +MODULE_PARM_DESC(mtu, "MTU (all boards)"); +MODULE_PARM_DESC(debug, "Debug level (0-6)"); +MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); +MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds"); +MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)"); +MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)"); + +/* + Theory of Operation + +I. Board Compatibility + +This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter. + +II. Board-specific settings + +III. Driver operation + +IIIa. Ring buffers + +The Starfire hardware uses multiple fixed-size descriptor queues/rings. The +ring sizes are set fixed by the hardware, but may optionally be wrapped +earlier by the END bit in the descriptor. +This driver uses that hardware queue size for the Rx ring, where a large +number of entries has no ill effect beyond increases the potential backlog. +The Tx ring is wrapped with the END bit, since a large hardware Tx queue +disables the queue layer priority ordering and we have no mechanism to +utilize the hardware two-level priority queue. When modifying the +RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning +levels. + +IIIb/c. Transmit/Receive Structure + +See the Adaptec manual for the many possible structures, and options for +each structure. There are far too many to document all of them here. + +For transmit this driver uses type 0/1 transmit descriptors (depending +on the 32/64 bitness of the architecture), and relies on automatic +minimum-length padding. It does not use the completion queue +consumer index, but instead checks for non-zero status entries. + +For receive this driver uses type 2/3 receive descriptors. The driver +allocates full frame size skbuffs for the Rx ring buffers, so all frames +should fit in a single descriptor. The driver does not use the completion +queue consumer index, but instead checks for non-zero status entries. + +When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff +is allocated and the frame is copied to the new skbuff. When the incoming +frame is larger, the skbuff is passed directly up the protocol stack. +Buffers consumed this way are replaced by newly allocated skbuffs in a later +phase of receive. + +A notable aspect of operation is that unaligned buffers are not permitted by +the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame +isn't longword aligned, which may cause problems on some machine +e.g. Alphas and IA64. For these architectures, the driver is forced to copy +the frame into a new skbuff unconditionally. Copied frames are put into the +skbuff at an offset of "+2", thus 16-byte aligning the IP header. + +IIId. Synchronization + +The driver runs as two independent, single-threaded flows of control. One +is the send-packet routine, which enforces single-threaded use by the +dev->tbusy flag. The other thread is the interrupt handler, which is single +threaded by the hardware and interrupt handling software. + +The send packet thread has partial control over the Tx ring and the netif_queue +status. If the number of free Tx slots in the ring falls below a certain number +(currently hardcoded to 4), it signals the upper layer to stop the queue. + +The interrupt handler has exclusive control over the Rx ring and records stats +from the Tx ring. After reaping the stats, it marks the Tx queue entry as +empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the +number of free Tx slow is above the threshold, it signals the upper layer to +restart the queue. + +IV. Notes + +IVb. References + +The Adaptec Starfire manuals, available only from Adaptec. +http://www.scyld.com/expert/100mbps.html +http://www.scyld.com/expert/NWay.html + +IVc. Errata + +- StopOnPerr is broken, don't enable +- Hardware ethernet padding exposes random data, perform software padding + instead (unverified -- works correctly for all the hardware I have) + +*/ + + + +enum chip_capability_flags {CanHaveMII=1, }; + +enum chipset { + CH_6915 = 0, +}; + +static const struct pci_device_id starfire_pci_tbl[] = { + { PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, starfire_pci_tbl); + +/* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */ +static const struct chip_info { + const char *name; + int drv_flags; +} netdrv_tbl[] = { + { "Adaptec Starfire 6915", CanHaveMII }, +}; + + +/* Offsets to the device registers. + Unlike software-only systems, device drivers interact with complex hardware. + It's not useful to define symbolic names for every register bit in the + device. The name can only partially document the semantics and make + the driver longer and more difficult to read. + In general, only the important configuration values or bits changed + multiple times should be defined symbolically. +*/ +enum register_offsets { + PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074, + IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088, + MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000, + GPIOCtrl=0x5008C, TxDescCtrl=0x50090, + TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */ + TxRingHiAddr=0x5009C, /* 64 bit address extension. */ + TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4, + TxThreshold=0x500B0, + CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8, + RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0, + CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0, + RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0, + RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4, + TxMode=0x55000, VlanType=0x55064, + PerfFilterTable=0x56000, HashTable=0x56100, + TxGfpMem=0x58000, RxGfpMem=0x5a000, +}; + +/* + * Bits in the interrupt status/mask registers. + * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register + * enables all the interrupt sources that are or'ed into those status bits. + */ +enum intr_status_bits { + IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000, + IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000, + IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000, + IntrTxComplQLow=0x200000, IntrPCI=0x100000, + IntrDMAErr=0x080000, IntrTxDataLow=0x040000, + IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000, + IntrNormalSummary=0x8000, IntrTxDone=0x4000, + IntrTxDMADone=0x2000, IntrTxEmpty=0x1000, + IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400, + IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100, + IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40, + IntrNoTxCsum=0x20, IntrTxBadID=0x10, + IntrHiPriTxBadID=0x08, IntrRxGfp=0x04, + IntrTxGfp=0x02, IntrPCIPad=0x01, + /* not quite bits */ + IntrRxDone=IntrRxQ2Done | IntrRxQ1Done, + IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low, + IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe, +}; + +/* Bits in the RxFilterMode register. */ +enum rx_mode_bits { + AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01, + AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30, + PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200, + WakeupOnGFP=0x0800, +}; + +/* Bits in the TxMode register */ +enum tx_mode_bits { + MiiSoftReset=0x8000, MIILoopback=0x4000, + TxFlowEnable=0x0800, RxFlowEnable=0x0400, + PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01, +}; + +/* Bits in the TxDescCtrl register. */ +enum tx_ctrl_bits { + TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20, + TxDescSpace128=0x30, TxDescSpace256=0x40, + TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02, + TxDescType3=0x03, TxDescType4=0x04, + TxNoDMACompletion=0x08, + TxDescQAddr64bit=0x80, TxDescQAddr32bit=0, + TxHiPriFIFOThreshShift=24, TxPadLenShift=16, + TxDMABurstSizeShift=8, +}; + +/* Bits in the RxDescQCtrl register. */ +enum rx_ctrl_bits { + RxBufferLenShift=16, RxMinDescrThreshShift=0, + RxPrefetchMode=0x8000, RxVariableQ=0x2000, + Rx2048QEntries=0x4000, Rx256QEntries=0, + RxDescAddr64bit=0x1000, RxDescAddr32bit=0, + RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0, + RxDescSpace4=0x000, RxDescSpace8=0x100, + RxDescSpace16=0x200, RxDescSpace32=0x300, + RxDescSpace64=0x400, RxDescSpace128=0x500, + RxConsumerWrEn=0x80, +}; + +/* Bits in the RxDMACtrl register. */ +enum rx_dmactrl_bits { + RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000, + RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000, + RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000, + RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000, + RxChecksumRejectTCPOnly=0x01000000, + RxCompletionQ2Enable=0x800000, + RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000, + RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000, + RxDMAQ2NonIP=0x400000, + RxUseBackupQueue=0x080000, RxDMACRC=0x040000, + RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8, + RxBurstSizeShift=0, +}; + +/* Bits in the RxCompletionAddr register */ +enum rx_compl_bits { + RxComplQAddr64bit=0x80, RxComplQAddr32bit=0, + RxComplProducerWrEn=0x40, + RxComplType0=0x00, RxComplType1=0x10, + RxComplType2=0x20, RxComplType3=0x30, + RxComplThreshShift=0, +}; + +/* Bits in the TxCompletionAddr register */ +enum tx_compl_bits { + TxComplQAddr64bit=0x80, TxComplQAddr32bit=0, + TxComplProducerWrEn=0x40, + TxComplIntrStatus=0x20, + CommonQueueMode=0x10, + TxComplThreshShift=0, +}; + +/* Bits in the GenCtrl register */ +enum gen_ctrl_bits { + RxEnable=0x05, TxEnable=0x0a, + RxGFPEnable=0x10, TxGFPEnable=0x20, +}; + +/* Bits in the IntrTimerCtrl register */ +enum intr_ctrl_bits { + Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100, + SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600, + IntrLatencyMask=0x1f, +}; + +/* The Rx and Tx buffer descriptors. */ +struct starfire_rx_desc { + netdrv_addr_t rxaddr; +}; +enum rx_desc_bits { + RxDescValid=1, RxDescEndRing=2, +}; + +/* Completion queue entry. */ +struct short_rx_done_desc { + __le32 status; /* Low 16 bits is length. */ +}; +struct basic_rx_done_desc { + __le32 status; /* Low 16 bits is length. */ + __le16 vlanid; + __le16 status2; +}; +struct csum_rx_done_desc { + __le32 status; /* Low 16 bits is length. */ + __le16 csum; /* Partial checksum */ + __le16 status2; +}; +struct full_rx_done_desc { + __le32 status; /* Low 16 bits is length. */ + __le16 status3; + __le16 status2; + __le16 vlanid; + __le16 csum; /* partial checksum */ + __le32 timestamp; +}; +/* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */ +#ifdef VLAN_SUPPORT +typedef struct full_rx_done_desc rx_done_desc; +#define RxComplType RxComplType3 +#else /* not VLAN_SUPPORT */ +typedef struct csum_rx_done_desc rx_done_desc; +#define RxComplType RxComplType2 +#endif /* not VLAN_SUPPORT */ + +enum rx_done_bits { + RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000, +}; + +/* Type 1 Tx descriptor. */ +struct starfire_tx_desc_1 { + __le32 status; /* Upper bits are status, lower 16 length. */ + __le32 addr; +}; + +/* Type 2 Tx descriptor. */ +struct starfire_tx_desc_2 { + __le32 status; /* Upper bits are status, lower 16 length. */ + __le32 reserved; + __le64 addr; +}; + +#ifdef ADDR_64BITS +typedef struct starfire_tx_desc_2 starfire_tx_desc; +#define TX_DESC_TYPE TxDescType2 +#else /* not ADDR_64BITS */ +typedef struct starfire_tx_desc_1 starfire_tx_desc; +#define TX_DESC_TYPE TxDescType1 +#endif /* not ADDR_64BITS */ +#define TX_DESC_SPACING TxDescSpaceUnlim + +enum tx_desc_bits { + TxDescID=0xB0000000, + TxCRCEn=0x01000000, TxDescIntr=0x08000000, + TxRingWrap=0x04000000, TxCalTCP=0x02000000, +}; +struct tx_done_desc { + __le32 status; /* timestamp, index. */ +#if 0 + __le32 intrstatus; /* interrupt status */ +#endif +}; + +struct rx_ring_info { + struct sk_buff *skb; + dma_addr_t mapping; +}; +struct tx_ring_info { + struct sk_buff *skb; + dma_addr_t mapping; + unsigned int used_slots; +}; + +#define PHY_CNT 2 +struct netdev_private { + /* Descriptor rings first for alignment. */ + struct starfire_rx_desc *rx_ring; + starfire_tx_desc *tx_ring; + dma_addr_t rx_ring_dma; + dma_addr_t tx_ring_dma; + /* The addresses of rx/tx-in-place skbuffs. */ + struct rx_ring_info rx_info[RX_RING_SIZE]; + struct tx_ring_info tx_info[TX_RING_SIZE]; + /* Pointers to completion queues (full pages). */ + rx_done_desc *rx_done_q; + dma_addr_t rx_done_q_dma; + unsigned int rx_done; + struct tx_done_desc *tx_done_q; + dma_addr_t tx_done_q_dma; + unsigned int tx_done; + struct napi_struct napi; + struct net_device *dev; + struct pci_dev *pci_dev; +#ifdef VLAN_SUPPORT + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +#endif + void *queue_mem; + dma_addr_t queue_mem_dma; + size_t queue_mem_size; + + /* Frequently used values: keep some adjacent for cache effect. */ + spinlock_t lock; + unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ + unsigned int cur_tx, dirty_tx, reap_tx; + unsigned int rx_buf_sz; /* Based on MTU+slack. */ + /* These values keep track of the transceiver/media in use. */ + int speed100; /* Set if speed == 100MBit. */ + u32 tx_mode; + u32 intr_timer_ctrl; + u8 tx_threshold; + /* MII transceiver section. */ + struct mii_if_info mii_if; /* MII lib hooks/info */ + int phy_cnt; /* MII device addresses. */ + unsigned char phys[PHY_CNT]; /* MII device addresses. */ + void __iomem *base; +}; + + +static int mdio_read(struct net_device *dev, int phy_id, int location); +static void mdio_write(struct net_device *dev, int phy_id, int location, int value); +static int netdev_open(struct net_device *dev); +static void check_duplex(struct net_device *dev); +static void tx_timeout(struct net_device *dev); +static void init_ring(struct net_device *dev); +static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); +static irqreturn_t intr_handler(int irq, void *dev_instance); +static void netdev_error(struct net_device *dev, int intr_status); +static int __netdev_rx(struct net_device *dev, int *quota); +static int netdev_poll(struct napi_struct *napi, int budget); +static void refill_rx_ring(struct net_device *dev); +static void netdev_error(struct net_device *dev, int intr_status); +static void set_rx_mode(struct net_device *dev); +static struct net_device_stats *get_stats(struct net_device *dev); +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int netdev_close(struct net_device *dev); +static void netdev_media_change(struct net_device *dev); +static const struct ethtool_ops ethtool_ops; + + +#ifdef VLAN_SUPPORT +static int netdev_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct netdev_private *np = netdev_priv(dev); + + spin_lock(&np->lock); + if (debug > 1) + printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid); + set_bit(vid, np->active_vlans); + set_rx_mode(dev); + spin_unlock(&np->lock); + + return 0; +} + +static int netdev_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct netdev_private *np = netdev_priv(dev); + + spin_lock(&np->lock); + if (debug > 1) + printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid); + clear_bit(vid, np->active_vlans); + set_rx_mode(dev); + spin_unlock(&np->lock); + + return 0; +} +#endif /* VLAN_SUPPORT */ + + +static const struct net_device_ops netdev_ops = { + .ndo_open = netdev_open, + .ndo_stop = netdev_close, + .ndo_start_xmit = start_tx, + .ndo_tx_timeout = tx_timeout, + .ndo_get_stats = get_stats, + .ndo_set_rx_mode = set_rx_mode, + .ndo_do_ioctl = netdev_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef VLAN_SUPPORT + .ndo_vlan_rx_add_vid = netdev_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = netdev_vlan_rx_kill_vid, +#endif +}; + +static int starfire_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct device *d = &pdev->dev; + struct netdev_private *np; + int i, irq, chip_idx = ent->driver_data; + struct net_device *dev; + long ioaddr; + void __iomem *base; + int drv_flags, io_size; + int boguscnt; + +/* when built into the kernel, we only print version if device is found */ +#ifndef MODULE + static int printed_version; + if (!printed_version++) + printk(version); +#endif + + if (pci_enable_device (pdev)) + return -EIO; + + ioaddr = pci_resource_start(pdev, 0); + io_size = pci_resource_len(pdev, 0); + if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) { + dev_err(d, "no PCI MEM resources, aborting\n"); + return -ENODEV; + } + + dev = alloc_etherdev(sizeof(*np)); + if (!dev) + return -ENOMEM; + + SET_NETDEV_DEV(dev, &pdev->dev); + + irq = pdev->irq; + + if (pci_request_regions (pdev, DRV_NAME)) { + dev_err(d, "cannot reserve PCI resources, aborting\n"); + goto err_out_free_netdev; + } + + base = ioremap(ioaddr, io_size); + if (!base) { + dev_err(d, "cannot remap %#x @ %#lx, aborting\n", + io_size, ioaddr); + goto err_out_free_res; + } + + pci_set_master(pdev); + + /* enable MWI -- it vastly improves Rx performance on sparc64 */ + pci_try_set_mwi(pdev); + +#ifdef ZEROCOPY + /* Starfire can do TCP/UDP checksumming */ + if (enable_hw_cksum) + dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; +#endif /* ZEROCOPY */ + +#ifdef VLAN_SUPPORT + dev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; +#endif /* VLAN_RX_KILL_VID */ +#ifdef ADDR_64BITS + dev->features |= NETIF_F_HIGHDMA; +#endif /* ADDR_64BITS */ + + /* Serial EEPROM reads are hidden by the hardware. */ + for (i = 0; i < 6; i++) + dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i); + +#if ! defined(final_version) /* Dump the EEPROM contents during development. */ + if (debug > 4) + for (i = 0; i < 0x20; i++) + printk("%2.2x%s", + (unsigned int)readb(base + EEPROMCtrl + i), + i % 16 != 15 ? " " : "\n"); +#endif + + /* Issue soft reset */ + writel(MiiSoftReset, base + TxMode); + udelay(1000); + writel(0, base + TxMode); + + /* Reset the chip to erase previous misconfiguration. */ + writel(1, base + PCIDeviceConfig); + boguscnt = 1000; + while (--boguscnt > 0) { + udelay(10); + if ((readl(base + PCIDeviceConfig) & 1) == 0) + break; + } + if (boguscnt == 0) + printk("%s: chipset reset never completed!\n", dev->name); + /* wait a little longer */ + udelay(1000); + + np = netdev_priv(dev); + np->dev = dev; + np->base = base; + spin_lock_init(&np->lock); + pci_set_drvdata(pdev, dev); + + np->pci_dev = pdev; + + np->mii_if.dev = dev; + np->mii_if.mdio_read = mdio_read; + np->mii_if.mdio_write = mdio_write; + np->mii_if.phy_id_mask = 0x1f; + np->mii_if.reg_num_mask = 0x1f; + + drv_flags = netdrv_tbl[chip_idx].drv_flags; + + np->speed100 = 1; + + /* timer resolution is 128 * 0.8us */ + np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) | + Timer10X | EnableIntrMasking; + + if (small_frames > 0) { + np->intr_timer_ctrl |= SmallFrameBypass; + switch (small_frames) { + case 1 ... 64: + np->intr_timer_ctrl |= SmallFrame64; + break; + case 65 ... 128: + np->intr_timer_ctrl |= SmallFrame128; + break; + case 129 ... 256: + np->intr_timer_ctrl |= SmallFrame256; + break; + default: + np->intr_timer_ctrl |= SmallFrame512; + if (small_frames > 512) + printk("Adjusting small_frames down to 512\n"); + break; + } + } + + dev->netdev_ops = &netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + dev->ethtool_ops = ðtool_ops; + + netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work); + + if (mtu) + dev->mtu = mtu; + + if (register_netdev(dev)) + goto err_out_cleardev; + + printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n", + dev->name, netdrv_tbl[chip_idx].name, base, + dev->dev_addr, irq); + + if (drv_flags & CanHaveMII) { + int phy, phy_idx = 0; + int mii_status; + for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) { + mdio_write(dev, phy, MII_BMCR, BMCR_RESET); + mdelay(100); + boguscnt = 1000; + while (--boguscnt > 0) + if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0) + break; + if (boguscnt == 0) { + printk("%s: PHY#%d reset never completed!\n", dev->name, phy); + continue; + } + mii_status = mdio_read(dev, phy, MII_BMSR); + if (mii_status != 0) { + np->phys[phy_idx++] = phy; + np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE); + printk(KERN_INFO "%s: MII PHY found at address %d, status " + "%#4.4x advertising %#4.4x.\n", + dev->name, phy, mii_status, np->mii_if.advertising); + /* there can be only one PHY on-board */ + break; + } + } + np->phy_cnt = phy_idx; + if (np->phy_cnt > 0) + np->mii_if.phy_id = np->phys[0]; + else + memset(&np->mii_if, 0, sizeof(np->mii_if)); + } + + printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n", + dev->name, enable_hw_cksum ? "enabled" : "disabled"); + return 0; + +err_out_cleardev: + iounmap(base); +err_out_free_res: + pci_release_regions (pdev); +err_out_free_netdev: + free_netdev(dev); + return -ENODEV; +} + + +/* Read the MII Management Data I/O (MDIO) interfaces. */ +static int mdio_read(struct net_device *dev, int phy_id, int location) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2); + int result, boguscnt=1000; + /* ??? Should we add a busy-wait here? */ + do { + result = readl(mdio_addr); + } while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0); + if (boguscnt == 0) + return 0; + if ((result & 0xffff) == 0xffff) + return 0; + return result & 0xffff; +} + + +static void mdio_write(struct net_device *dev, int phy_id, int location, int value) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2); + writel(value, mdio_addr); + /* The busy-wait will occur before a read. */ +} + + +static int netdev_open(struct net_device *dev) +{ + const struct firmware *fw_rx, *fw_tx; + const __be32 *fw_rx_data, *fw_tx_data; + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + const int irq = np->pci_dev->irq; + int i, retval; + size_t tx_size, rx_size; + size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size; + + /* Do we ever need to reset the chip??? */ + + retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev); + if (retval) + return retval; + + /* Disable the Rx and Tx, and reset the chip. */ + writel(0, ioaddr + GenCtrl); + writel(1, ioaddr + PCIDeviceConfig); + if (debug > 1) + printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", + dev->name, irq); + + /* Allocate the various queues. */ + if (!np->queue_mem) { + tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN; + rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN; + tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN; + rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE; + np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size; + np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma); + if (np->queue_mem == NULL) { + free_irq(irq, dev); + return -ENOMEM; + } + + np->tx_done_q = np->queue_mem; + np->tx_done_q_dma = np->queue_mem_dma; + np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size; + np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size; + np->tx_ring = (void *) np->rx_done_q + rx_done_q_size; + np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size; + np->rx_ring = (void *) np->tx_ring + tx_ring_size; + np->rx_ring_dma = np->tx_ring_dma + tx_ring_size; + } + + /* Start with no carrier, it gets adjusted later */ + netif_carrier_off(dev); + init_ring(dev); + /* Set the size of the Rx buffers. */ + writel((np->rx_buf_sz << RxBufferLenShift) | + (0 << RxMinDescrThreshShift) | + RxPrefetchMode | RxVariableQ | + RX_Q_ENTRIES | + RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE | + RxDescSpace4, + ioaddr + RxDescQCtrl); + + /* Set up the Rx DMA controller. */ + writel(RxChecksumIgnore | + (0 << RxEarlyIntThreshShift) | + (6 << RxHighPrioThreshShift) | + ((DMA_BURST_SIZE / 32) << RxBurstSizeShift), + ioaddr + RxDMACtrl); + + /* Set Tx descriptor */ + writel((2 << TxHiPriFIFOThreshShift) | + (0 << TxPadLenShift) | + ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) | + TX_DESC_Q_ADDR_SIZE | + TX_DESC_SPACING | TX_DESC_TYPE, + ioaddr + TxDescCtrl); + + writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr); + writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr); + writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr); + writel(np->rx_ring_dma, ioaddr + RxDescQAddr); + writel(np->tx_ring_dma, ioaddr + TxRingPtr); + + writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr); + writel(np->rx_done_q_dma | + RxComplType | + (0 << RxComplThreshShift), + ioaddr + RxCompletionAddr); + + if (debug > 1) + printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name); + + /* Fill both the Tx SA register and the Rx perfect filter. */ + for (i = 0; i < 6; i++) + writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i); + /* The first entry is special because it bypasses the VLAN filter. + Don't use it. */ + writew(0, ioaddr + PerfFilterTable); + writew(0, ioaddr + PerfFilterTable + 4); + writew(0, ioaddr + PerfFilterTable + 8); + for (i = 1; i < 16; i++) { + __be16 *eaddrs = (__be16 *)dev->dev_addr; + void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16; + writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4; + writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4; + writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8; + } + + /* Initialize other registers. */ + /* Configure the PCI bus bursts and FIFO thresholds. */ + np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */ + writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode); + udelay(1000); + writel(np->tx_mode, ioaddr + TxMode); + np->tx_threshold = 4; + writel(np->tx_threshold, ioaddr + TxThreshold); + + writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl); + + napi_enable(&np->napi); + + netif_start_queue(dev); + + if (debug > 1) + printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name); + set_rx_mode(dev); + + np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE); + check_duplex(dev); + + /* Enable GPIO interrupts on link change */ + writel(0x0f00ff00, ioaddr + GPIOCtrl); + + /* Set the interrupt mask */ + writel(IntrRxDone | IntrRxEmpty | IntrDMAErr | + IntrTxDMADone | IntrStatsMax | IntrLinkChange | + IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID, + ioaddr + IntrEnable); + /* Enable PCI interrupts. */ + writel(0x00800000 | readl(ioaddr + PCIDeviceConfig), + ioaddr + PCIDeviceConfig); + +#ifdef VLAN_SUPPORT + /* Set VLAN type to 802.1q */ + writel(ETH_P_8021Q, ioaddr + VlanType); +#endif /* VLAN_SUPPORT */ + + retval = reject_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev); + if (retval) { + printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n", + FIRMWARE_RX); + goto out_init; + } + if (fw_rx->size % 4) { + printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n", + fw_rx->size, FIRMWARE_RX); + retval = -EINVAL; + goto out_rx; + } + retval = reject_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev); + if (retval) { + printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n", + FIRMWARE_TX); + goto out_rx; + } + if (fw_tx->size % 4) { + printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n", + fw_tx->size, FIRMWARE_TX); + retval = -EINVAL; + goto out_tx; + } + fw_rx_data = (const __be32 *)&fw_rx->data[0]; + fw_tx_data = (const __be32 *)&fw_tx->data[0]; + rx_size = fw_rx->size / 4; + tx_size = fw_tx->size / 4; + + /* Load Rx/Tx firmware into the frame processors */ + for (i = 0; i < rx_size; i++) + writel(be32_to_cpup(&fw_rx_data[i]), ioaddr + RxGfpMem + i * 4); + for (i = 0; i < tx_size; i++) + writel(be32_to_cpup(&fw_tx_data[i]), ioaddr + TxGfpMem + i * 4); + if (enable_hw_cksum) + /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */ + writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl); + else + /* Enable the Rx and Tx units only. */ + writel(TxEnable|RxEnable, ioaddr + GenCtrl); + + if (debug > 1) + printk(KERN_DEBUG "%s: Done netdev_open().\n", + dev->name); + +out_tx: + release_firmware(fw_tx); +out_rx: + release_firmware(fw_rx); +out_init: + if (retval) + netdev_close(dev); + return retval; +} + + +static void check_duplex(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + u16 reg0; + int silly_count = 1000; + + mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising); + mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET); + udelay(500); + while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET) + /* do nothing */; + if (!silly_count) { + printk("%s: MII reset failed!\n", dev->name); + return; + } + + reg0 = mdio_read(dev, np->phys[0], MII_BMCR); + + if (!np->mii_if.force_media) { + reg0 |= BMCR_ANENABLE | BMCR_ANRESTART; + } else { + reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART); + if (np->speed100) + reg0 |= BMCR_SPEED100; + if (np->mii_if.full_duplex) + reg0 |= BMCR_FULLDPLX; + printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n", + dev->name, + np->speed100 ? "100" : "10", + np->mii_if.full_duplex ? "full" : "half"); + } + mdio_write(dev, np->phys[0], MII_BMCR, reg0); +} + + +static void tx_timeout(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + int old_debug; + + printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, " + "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus)); + + /* Perhaps we should reinitialize the hardware here. */ + + /* + * Stop and restart the interface. + * Cheat and increase the debug level temporarily. + */ + old_debug = debug; + debug = 2; + netdev_close(dev); + netdev_open(dev); + debug = old_debug; + + /* Trigger an immediate transmit demand. */ + + dev->trans_start = jiffies; /* prevent tx timeout */ + dev->stats.tx_errors++; + netif_wake_queue(dev); +} + + +/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ +static void init_ring(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + int i; + + np->cur_rx = np->cur_tx = np->reap_tx = 0; + np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0; + + np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); + + /* Fill in the Rx buffers. Handle allocation failure gracefully. */ + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz); + np->rx_info[i].skb = skb; + if (skb == NULL) + break; + np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); + /* Grrr, we cannot offset to correctly align the IP header. */ + np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); + } + writew(i - 1, np->base + RxDescQIdx); + np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); + + /* Clear the remainder of the Rx buffer ring. */ + for ( ; i < RX_RING_SIZE; i++) { + np->rx_ring[i].rxaddr = 0; + np->rx_info[i].skb = NULL; + np->rx_info[i].mapping = 0; + } + /* Mark the last entry as wrapping the ring. */ + np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing); + + /* Clear the completion rings. */ + for (i = 0; i < DONE_Q_SIZE; i++) { + np->rx_done_q[i].status = 0; + np->tx_done_q[i].status = 0; + } + + for (i = 0; i < TX_RING_SIZE; i++) + memset(&np->tx_info[i], 0, sizeof(np->tx_info[i])); +} + + +static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + unsigned int entry; + u32 status; + int i; + + /* + * be cautious here, wrapping the queue has weird semantics + * and we may not have enough slots even when it seems we do. + */ + if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) { + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + +#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK)) + return NETDEV_TX_OK; + } +#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ + + entry = np->cur_tx % TX_RING_SIZE; + for (i = 0; i < skb_num_frags(skb); i++) { + int wrap_ring = 0; + status = TxDescID; + + if (i == 0) { + np->tx_info[entry].skb = skb; + status |= TxCRCEn; + if (entry >= TX_RING_SIZE - skb_num_frags(skb)) { + status |= TxRingWrap; + wrap_ring = 1; + } + if (np->reap_tx) { + status |= TxDescIntr; + np->reap_tx = 0; + } + if (skb->ip_summed == CHECKSUM_PARTIAL) { + status |= TxCalTCP; + dev->stats.tx_compressed++; + } + status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16); + + np->tx_info[entry].mapping = + pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE); + } else { + const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1]; + status |= skb_frag_size(this_frag); + np->tx_info[entry].mapping = + pci_map_single(np->pci_dev, + skb_frag_address(this_frag), + skb_frag_size(this_frag), + PCI_DMA_TODEVICE); + } + + np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); + np->tx_ring[entry].status = cpu_to_le32(status); + if (debug > 3) + printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n", + dev->name, np->cur_tx, np->dirty_tx, + entry, status); + if (wrap_ring) { + np->tx_info[entry].used_slots = TX_RING_SIZE - entry; + np->cur_tx += np->tx_info[entry].used_slots; + entry = 0; + } else { + np->tx_info[entry].used_slots = 1; + np->cur_tx += np->tx_info[entry].used_slots; + entry++; + } + /* scavenge the tx descriptors twice per TX_RING_SIZE */ + if (np->cur_tx % (TX_RING_SIZE / 2) == 0) + np->reap_tx = 1; + } + + /* Non-x86: explicitly flush descriptor cache lines here. */ + /* Ensure all descriptors are written back before the transmit is + initiated. - Jes */ + wmb(); + + /* Update the producer index. */ + writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx); + + /* 4 is arbitrary, but should be ok */ + if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE) + netif_stop_queue(dev); + + return NETDEV_TX_OK; +} + + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ +static irqreturn_t intr_handler(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + int boguscnt = max_interrupt_work; + int consumer; + int tx_status; + int handled = 0; + + do { + u32 intr_status = readl(ioaddr + IntrClear); + + if (debug > 4) + printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n", + dev->name, intr_status); + + if (intr_status == 0 || intr_status == (u32) -1) + break; + + handled = 1; + + if (intr_status & (IntrRxDone | IntrRxEmpty)) { + u32 enable; + + if (likely(napi_schedule_prep(&np->napi))) { + __napi_schedule(&np->napi); + enable = readl(ioaddr + IntrEnable); + enable &= ~(IntrRxDone | IntrRxEmpty); + writel(enable, ioaddr + IntrEnable); + /* flush PCI posting buffers */ + readl(ioaddr + IntrEnable); + } else { + /* Paranoia check */ + enable = readl(ioaddr + IntrEnable); + if (enable & (IntrRxDone | IntrRxEmpty)) { + printk(KERN_INFO + "%s: interrupt while in poll!\n", + dev->name); + enable &= ~(IntrRxDone | IntrRxEmpty); + writel(enable, ioaddr + IntrEnable); + } + } + } + + /* Scavenge the skbuff list based on the Tx-done queue. + There are redundant checks here that may be cleaned up + after the driver has proven to be reliable. */ + consumer = readl(ioaddr + TxConsumerIdx); + if (debug > 3) + printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n", + dev->name, consumer); + + while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) { + if (debug > 3) + printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n", + dev->name, np->dirty_tx, np->tx_done, tx_status); + if ((tx_status & 0xe0000000) == 0xa0000000) { + dev->stats.tx_packets++; + } else if ((tx_status & 0xe0000000) == 0x80000000) { + u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc); + struct sk_buff *skb = np->tx_info[entry].skb; + np->tx_info[entry].skb = NULL; + pci_unmap_single(np->pci_dev, + np->tx_info[entry].mapping, + skb_first_frag_len(skb), + PCI_DMA_TODEVICE); + np->tx_info[entry].mapping = 0; + np->dirty_tx += np->tx_info[entry].used_slots; + entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE; + { + int i; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + pci_unmap_single(np->pci_dev, + np->tx_info[entry].mapping, + skb_frag_size(&skb_shinfo(skb)->frags[i]), + PCI_DMA_TODEVICE); + np->dirty_tx++; + entry++; + } + } + + dev_kfree_skb_irq(skb); + } + np->tx_done_q[np->tx_done].status = 0; + np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE; + } + writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2); + + if (netif_queue_stopped(dev) && + (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) { + /* The ring is no longer full, wake the queue. */ + netif_wake_queue(dev); + } + + /* Stats overflow */ + if (intr_status & IntrStatsMax) + get_stats(dev); + + /* Media change interrupt. */ + if (intr_status & IntrLinkChange) + netdev_media_change(dev); + + /* Abnormal error summary/uncommon events handlers. */ + if (intr_status & IntrAbnormalSummary) + netdev_error(dev, intr_status); + + if (--boguscnt < 0) { + if (debug > 1) + printk(KERN_WARNING "%s: Too much work at interrupt, " + "status=%#8.8x.\n", + dev->name, intr_status); + break; + } + } while (1); + + if (debug > 4) + printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n", + dev->name, (int) readl(ioaddr + IntrStatus)); + return IRQ_RETVAL(handled); +} + + +/* + * This routine is logically part of the interrupt/poll handler, but separated + * for clarity and better register allocation. + */ +static int __netdev_rx(struct net_device *dev, int *quota) +{ + struct netdev_private *np = netdev_priv(dev); + u32 desc_status; + int retcode = 0; + + /* If EOP is set on the next entry, it's a new packet. Send it up. */ + while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) { + struct sk_buff *skb; + u16 pkt_len; + int entry; + rx_done_desc *desc = &np->rx_done_q[np->rx_done]; + + if (debug > 4) + printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status); + if (!(desc_status & RxOK)) { + /* There was an error. */ + if (debug > 2) + printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status); + dev->stats.rx_errors++; + if (desc_status & RxFIFOErr) + dev->stats.rx_fifo_errors++; + goto next_rx; + } + + if (*quota <= 0) { /* out of rx quota */ + retcode = 1; + goto out; + } + (*quota)--; + + pkt_len = desc_status; /* Implicitly Truncate */ + entry = (desc_status >> 16) & 0x7ff; + + if (debug > 4) + printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota); + /* Check if the packet is long enough to accept without copying + to a minimally-sized skbuff. */ + if (pkt_len < rx_copybreak && + (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { + skb_reserve(skb, 2); /* 16 byte align the IP header */ + pci_dma_sync_single_for_cpu(np->pci_dev, + np->rx_info[entry].mapping, + pkt_len, PCI_DMA_FROMDEVICE); + skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len); + pci_dma_sync_single_for_device(np->pci_dev, + np->rx_info[entry].mapping, + pkt_len, PCI_DMA_FROMDEVICE); + skb_put(skb, pkt_len); + } else { + pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE); + skb = np->rx_info[entry].skb; + skb_put(skb, pkt_len); + np->rx_info[entry].skb = NULL; + np->rx_info[entry].mapping = 0; + } +#ifndef final_version /* Remove after testing. */ + /* You will want this info for the initial debug. */ + if (debug > 5) { + printk(KERN_DEBUG " Rx data %pM %pM %2.2x%2.2x.\n", + skb->data, skb->data + 6, + skb->data[12], skb->data[13]); + } +#endif + + skb->protocol = eth_type_trans(skb, dev); +#ifdef VLAN_SUPPORT + if (debug > 4) + printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2)); +#endif + if (le16_to_cpu(desc->status2) & 0x0100) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + dev->stats.rx_compressed++; + } + /* + * This feature doesn't seem to be working, at least + * with the two firmware versions I have. If the GFP sees + * an IP fragment, it either ignores it completely, or reports + * "bad checksum" on it. + * + * Maybe I missed something -- corrections are welcome. + * Until then, the printk stays. :-) -Ion + */ + else if (le16_to_cpu(desc->status2) & 0x0040) { + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = le16_to_cpu(desc->csum); + printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2)); + } +#ifdef VLAN_SUPPORT + if (le16_to_cpu(desc->status2) & 0x0200) { + u16 vlid = le16_to_cpu(desc->vlanid); + + if (debug > 4) { + printk(KERN_DEBUG " netdev_rx() vlanid = %d\n", + vlid); + } + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlid); + } +#endif /* VLAN_SUPPORT */ + netif_receive_skb(skb); + dev->stats.rx_packets++; + + next_rx: + np->cur_rx++; + desc->status = 0; + np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE; + } + + if (*quota == 0) { /* out of rx quota */ + retcode = 1; + goto out; + } + writew(np->rx_done, np->base + CompletionQConsumerIdx); + + out: + refill_rx_ring(dev); + if (debug > 5) + printk(KERN_DEBUG " exiting netdev_rx(): %d, status of %d was %#8.8x.\n", + retcode, np->rx_done, desc_status); + return retcode; +} + +static int netdev_poll(struct napi_struct *napi, int budget) +{ + struct netdev_private *np = container_of(napi, struct netdev_private, napi); + struct net_device *dev = np->dev; + u32 intr_status; + void __iomem *ioaddr = np->base; + int quota = budget; + + do { + writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear); + + if (__netdev_rx(dev, "a)) + goto out; + + intr_status = readl(ioaddr + IntrStatus); + } while (intr_status & (IntrRxDone | IntrRxEmpty)); + + napi_complete(napi); + intr_status = readl(ioaddr + IntrEnable); + intr_status |= IntrRxDone | IntrRxEmpty; + writel(intr_status, ioaddr + IntrEnable); + + out: + if (debug > 5) + printk(KERN_DEBUG " exiting netdev_poll(): %d.\n", + budget - quota); + + /* Restart Rx engine if stopped. */ + return budget - quota; +} + +static void refill_rx_ring(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + struct sk_buff *skb; + int entry = -1; + + /* Refill the Rx ring buffers. */ + for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { + entry = np->dirty_rx % RX_RING_SIZE; + if (np->rx_info[entry].skb == NULL) { + skb = netdev_alloc_skb(dev, np->rx_buf_sz); + np->rx_info[entry].skb = skb; + if (skb == NULL) + break; /* Better luck next round. */ + np->rx_info[entry].mapping = + pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); + np->rx_ring[entry].rxaddr = + cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); + } + if (entry == RX_RING_SIZE - 1) + np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing); + } + if (entry >= 0) + writew(entry, np->base + RxDescQIdx); +} + + +static void netdev_media_change(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + u16 reg0, reg1, reg4, reg5; + u32 new_tx_mode; + u32 new_intr_timer_ctrl; + + /* reset status first */ + mdio_read(dev, np->phys[0], MII_BMCR); + mdio_read(dev, np->phys[0], MII_BMSR); + + reg0 = mdio_read(dev, np->phys[0], MII_BMCR); + reg1 = mdio_read(dev, np->phys[0], MII_BMSR); + + if (reg1 & BMSR_LSTATUS) { + /* link is up */ + if (reg0 & BMCR_ANENABLE) { + /* autonegotiation is enabled */ + reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE); + reg5 = mdio_read(dev, np->phys[0], MII_LPA); + if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) { + np->speed100 = 1; + np->mii_if.full_duplex = 1; + } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) { + np->speed100 = 1; + np->mii_if.full_duplex = 0; + } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) { + np->speed100 = 0; + np->mii_if.full_duplex = 1; + } else { + np->speed100 = 0; + np->mii_if.full_duplex = 0; + } + } else { + /* autonegotiation is disabled */ + if (reg0 & BMCR_SPEED100) + np->speed100 = 1; + else + np->speed100 = 0; + if (reg0 & BMCR_FULLDPLX) + np->mii_if.full_duplex = 1; + else + np->mii_if.full_duplex = 0; + } + netif_carrier_on(dev); + printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n", + dev->name, + np->speed100 ? "100" : "10", + np->mii_if.full_duplex ? "full" : "half"); + + new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */ + if (np->mii_if.full_duplex) + new_tx_mode |= FullDuplex; + if (np->tx_mode != new_tx_mode) { + np->tx_mode = new_tx_mode; + writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode); + udelay(1000); + writel(np->tx_mode, ioaddr + TxMode); + } + + new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X; + if (np->speed100) + new_intr_timer_ctrl |= Timer10X; + if (np->intr_timer_ctrl != new_intr_timer_ctrl) { + np->intr_timer_ctrl = new_intr_timer_ctrl; + writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl); + } + } else { + netif_carrier_off(dev); + printk(KERN_DEBUG "%s: Link is down\n", dev->name); + } +} + + +static void netdev_error(struct net_device *dev, int intr_status) +{ + struct netdev_private *np = netdev_priv(dev); + + /* Came close to underrunning the Tx FIFO, increase threshold. */ + if (intr_status & IntrTxDataLow) { + if (np->tx_threshold <= PKT_BUF_SZ / 16) { + writel(++np->tx_threshold, np->base + TxThreshold); + printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n", + dev->name, np->tx_threshold * 16); + } else + printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name); + } + if (intr_status & IntrRxGFPDead) { + dev->stats.rx_fifo_errors++; + dev->stats.rx_errors++; + } + if (intr_status & (IntrNoTxCsum | IntrDMAErr)) { + dev->stats.tx_fifo_errors++; + dev->stats.tx_errors++; + } + if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug) + printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n", + dev->name, intr_status); +} + + +static struct net_device_stats *get_stats(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + + /* This adapter architecture needs no SMP locks. */ + dev->stats.tx_bytes = readl(ioaddr + 0x57010); + dev->stats.rx_bytes = readl(ioaddr + 0x57044); + dev->stats.tx_packets = readl(ioaddr + 0x57000); + dev->stats.tx_aborted_errors = + readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028); + dev->stats.tx_window_errors = readl(ioaddr + 0x57018); + dev->stats.collisions = + readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008); + + /* The chip only need report frame silently dropped. */ + dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus); + writew(0, ioaddr + RxDMAStatus); + dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C); + dev->stats.rx_frame_errors = readl(ioaddr + 0x57040); + dev->stats.rx_length_errors = readl(ioaddr + 0x57058); + dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C); + + return &dev->stats; +} + +#ifdef VLAN_SUPPORT +static u32 set_vlan_mode(struct netdev_private *np) +{ + u32 ret = VlanMode; + u16 vid; + void __iomem *filter_addr = np->base + HashTable + 8; + int vlan_count = 0; + + for_each_set_bit(vid, np->active_vlans, VLAN_N_VID) { + if (vlan_count == 32) + break; + writew(vid, filter_addr); + filter_addr += 16; + vlan_count++; + } + if (vlan_count == 32) { + ret |= PerfectFilterVlan; + while (vlan_count < 32) { + writew(0, filter_addr); + filter_addr += 16; + vlan_count++; + } + } + return ret; +} +#endif /* VLAN_SUPPORT */ + +static void set_rx_mode(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + u32 rx_mode = MinVLANPrio; + struct netdev_hw_addr *ha; + int i; + +#ifdef VLAN_SUPPORT + rx_mode |= set_vlan_mode(np); +#endif /* VLAN_SUPPORT */ + + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + rx_mode |= AcceptAll; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to match, or accept all multicasts. */ + rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter; + } else if (netdev_mc_count(dev) <= 14) { + /* Use the 16 element perfect filter, skip first two entries. */ + void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16; + __be16 *eaddrs; + netdev_for_each_mc_addr(ha, dev) { + eaddrs = (__be16 *) ha->addr; + writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4; + writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; + writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8; + } + eaddrs = (__be16 *)dev->dev_addr; + i = netdev_mc_count(dev) + 2; + while (i++ < 16) { + writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4; + writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; + writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8; + } + rx_mode |= AcceptBroadcast|PerfectFilter; + } else { + /* Must use a multicast hash table. */ + void __iomem *filter_addr; + __be16 *eaddrs; + __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */ + + memset(mc_filter, 0, sizeof(mc_filter)); + netdev_for_each_mc_addr(ha, dev) { + /* The chip uses the upper 9 CRC bits + as index into the hash table */ + int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23; + __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1]; + + *fptr |= cpu_to_le32(1 << (bit_nr & 31)); + } + /* Clear the perfect filter list, skip first two entries. */ + filter_addr = ioaddr + PerfFilterTable + 2 * 16; + eaddrs = (__be16 *)dev->dev_addr; + for (i = 2; i < 16; i++) { + writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4; + writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; + writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8; + } + for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++) + writew(mc_filter[i], filter_addr); + rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter; + } + writel(rx_mode, ioaddr + RxFilterMode); +} + +static int check_if_running(struct net_device *dev) +{ + if (!netif_running(dev)) + return -EINVAL; + return 0; +} + +static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct netdev_private *np = netdev_priv(dev); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); +} + +static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct netdev_private *np = netdev_priv(dev); + spin_lock_irq(&np->lock); + mii_ethtool_gset(&np->mii_if, ecmd); + spin_unlock_irq(&np->lock); + return 0; +} + +static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct netdev_private *np = netdev_priv(dev); + int res; + spin_lock_irq(&np->lock); + res = mii_ethtool_sset(&np->mii_if, ecmd); + spin_unlock_irq(&np->lock); + check_duplex(dev); + return res; +} + +static int nway_reset(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + return mii_nway_restart(&np->mii_if); +} + +static u32 get_link(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + return mii_link_ok(&np->mii_if); +} + +static u32 get_msglevel(struct net_device *dev) +{ + return debug; +} + +static void set_msglevel(struct net_device *dev, u32 val) +{ + debug = val; +} + +static const struct ethtool_ops ethtool_ops = { + .begin = check_if_running, + .get_drvinfo = get_drvinfo, + .get_settings = get_settings, + .set_settings = set_settings, + .nway_reset = nway_reset, + .get_link = get_link, + .get_msglevel = get_msglevel, + .set_msglevel = set_msglevel, +}; + +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct netdev_private *np = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(rq); + int rc; + + if (!netif_running(dev)) + return -EINVAL; + + spin_lock_irq(&np->lock); + rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL); + spin_unlock_irq(&np->lock); + + if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0])) + check_duplex(dev); + + return rc; +} + +static int netdev_close(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + int i; + + netif_stop_queue(dev); + + napi_disable(&np->napi); + + if (debug > 1) { + printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n", + dev->name, (int) readl(ioaddr + IntrStatus)); + printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", + dev->name, np->cur_tx, np->dirty_tx, + np->cur_rx, np->dirty_rx); + } + + /* Disable interrupts by clearing the interrupt mask. */ + writel(0, ioaddr + IntrEnable); + + /* Stop the chip's Tx and Rx processes. */ + writel(0, ioaddr + GenCtrl); + readl(ioaddr + GenCtrl); + + if (debug > 5) { + printk(KERN_DEBUG" Tx ring at %#llx:\n", + (long long) np->tx_ring_dma); + for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++) + printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n", + i, le32_to_cpu(np->tx_ring[i].status), + (long long) dma_to_cpu(np->tx_ring[i].addr), + le32_to_cpu(np->tx_done_q[i].status)); + printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n", + (long long) np->rx_ring_dma, np->rx_done_q); + if (np->rx_done_q) + for (i = 0; i < 8 /* RX_RING_SIZE */; i++) { + printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n", + i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status)); + } + } + + free_irq(np->pci_dev->irq, dev); + + /* Free all the skbuffs in the Rx queue. */ + for (i = 0; i < RX_RING_SIZE; i++) { + np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */ + if (np->rx_info[i].skb != NULL) { + pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE); + dev_kfree_skb(np->rx_info[i].skb); + } + np->rx_info[i].skb = NULL; + np->rx_info[i].mapping = 0; + } + for (i = 0; i < TX_RING_SIZE; i++) { + struct sk_buff *skb = np->tx_info[i].skb; + if (skb == NULL) + continue; + pci_unmap_single(np->pci_dev, + np->tx_info[i].mapping, + skb_first_frag_len(skb), PCI_DMA_TODEVICE); + np->tx_info[i].mapping = 0; + dev_kfree_skb(skb); + np->tx_info[i].skb = NULL; + } + + return 0; +} + +#ifdef CONFIG_PM +static int starfire_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (netif_running(dev)) { + netif_device_detach(dev); + netdev_close(dev); + } + + pci_save_state(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev,state)); + + return 0; +} + +static int starfire_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + if (netif_running(dev)) { + netdev_open(dev); + netif_device_attach(dev); + } + + return 0; +} +#endif /* CONFIG_PM */ + + +static void starfire_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct netdev_private *np = netdev_priv(dev); + + BUG_ON(!dev); + + unregister_netdev(dev); + + if (np->queue_mem) + pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma); + + + /* XXX: add wakeup code -- requires firmware for MagicPacket */ + pci_set_power_state(pdev, PCI_D3hot); /* go to sleep in D3 mode */ + pci_disable_device(pdev); + + iounmap(np->base); + pci_release_regions(pdev); + + free_netdev(dev); /* Will also free np!! */ +} + + +static struct pci_driver starfire_driver = { + .name = DRV_NAME, + .probe = starfire_init_one, + .remove = starfire_remove_one, +#ifdef CONFIG_PM + .suspend = starfire_suspend, + .resume = starfire_resume, +#endif /* CONFIG_PM */ + .id_table = starfire_pci_tbl, +}; + + +static int __init starfire_init (void) +{ +/* when a module, this is printed whether or not devices are found in probe */ +#ifdef MODULE + printk(version); + + printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n"); +#endif + + BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(netdrv_addr_t)); + + return pci_register_driver(&starfire_driver); +} + + +static void __exit starfire_cleanup (void) +{ + pci_unregister_driver (&starfire_driver); +} + + +module_init(starfire_init); +module_exit(starfire_cleanup); + + +/* + * Local variables: + * c-basic-offset: 8 + * tab-width: 8 + * End: + */ diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig new file mode 100644 index 000000000..c9cd3592a --- /dev/null +++ b/drivers/net/ethernet/adi/Kconfig @@ -0,0 +1,68 @@ +# +# Blackfin device configuration +# + +config NET_BFIN + bool "Blackfin devices" + depends on BF516 || BF518 || BF526 || BF527 || BF536 || BF537 + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y. + Make sure you know the name of your card. Read the Ethernet-HOWTO, + available from . + + If unsure, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the remaining Blackfin card questions. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_BFIN + +config BFIN_MAC + tristate "Blackfin on-chip MAC support" + depends on (BF516 || BF518 || BF526 || BF527 || BF536 || BF537) + select CRC32 + select MII + select PHYLIB + select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE + ---help--- + This is the driver for Blackfin on-chip mac device. Say Y if you want + it compiled into the kernel. This driver is also available as a + module ( = code which can be inserted in and removed from the running + kernel whenever you want). The module will be called bfin_mac. + +config BFIN_MAC_USE_L1 + bool "Use L1 memory for rx/tx packets" + depends on BFIN_MAC && (BF527 || BF537) + default y + ---help--- + To get maximum network performance, you should use L1 memory as rx/tx + buffers. Say N here if you want to reserve L1 memory for other uses. + +config BFIN_TX_DESC_NUM + int "Number of transmit buffer packets" + depends on BFIN_MAC + range 6 10 if BFIN_MAC_USE_L1 + range 10 100 + default "10" + ---help--- + Set the number of buffer packets used in driver. + +config BFIN_RX_DESC_NUM + int "Number of receive buffer packets" + depends on BFIN_MAC + range 20 64 + default "20" + ---help--- + Set the number of buffer packets used in driver. + +config BFIN_MAC_USE_HWSTAMP + bool "Use IEEE 1588 hwstamp" + depends on BFIN_MAC && BF518 + select PTP_1588_CLOCK + default y + ---help--- + To support the IEEE 1588 Precision Time Protocol (PTP), select y here + +endif # NET_BFIN diff --git a/drivers/net/ethernet/adi/Makefile b/drivers/net/ethernet/adi/Makefile new file mode 100644 index 000000000..b1fbe195d --- /dev/null +++ b/drivers/net/ethernet/adi/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Blackfin device drivers. +# + +obj-$(CONFIG_BFIN_MAC) += bfin_mac.o diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c new file mode 100644 index 000000000..096531a73 --- /dev/null +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -0,0 +1,1933 @@ +/* + * Blackfin On-Chip MAC Driver + * + * Copyright 2004-2010 Analog Devices Inc. + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Licensed under the GPL-2 or later. + */ + +#define DRV_VERSION "1.1" +#define DRV_DESC "Blackfin on-chip Ethernet MAC driver" + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "bfin_mac.h" + +MODULE_AUTHOR("Bryan Wu, Luke Yang"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION(DRV_DESC); +MODULE_ALIAS("platform:bfin_mac"); + +#if defined(CONFIG_BFIN_MAC_USE_L1) +# define bfin_mac_alloc(dma_handle, size, num) l1_data_sram_zalloc(size*num) +# define bfin_mac_free(dma_handle, ptr, num) l1_data_sram_free(ptr) +#else +# define bfin_mac_alloc(dma_handle, size, num) \ + dma_alloc_coherent(NULL, size*num, dma_handle, GFP_KERNEL) +# define bfin_mac_free(dma_handle, ptr, num) \ + dma_free_coherent(NULL, sizeof(*ptr)*num, ptr, dma_handle) +#endif + +#define PKT_BUF_SZ 1580 + +#define MAX_TIMEOUT_CNT 500 + +/* pointers to maintain transmit list */ +static struct net_dma_desc_tx *tx_list_head; +static struct net_dma_desc_tx *tx_list_tail; +static struct net_dma_desc_rx *rx_list_head; +static struct net_dma_desc_rx *rx_list_tail; +static struct net_dma_desc_rx *current_rx_ptr; +static struct net_dma_desc_tx *current_tx_ptr; +static struct net_dma_desc_tx *tx_desc; +static struct net_dma_desc_rx *rx_desc; + +static void desc_list_free(void) +{ + struct net_dma_desc_rx *r; + struct net_dma_desc_tx *t; + int i; +#if !defined(CONFIG_BFIN_MAC_USE_L1) + dma_addr_t dma_handle = 0; +#endif + + if (tx_desc) { + t = tx_list_head; + for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) { + if (t) { + if (t->skb) { + dev_kfree_skb(t->skb); + t->skb = NULL; + } + t = t->next; + } + } + bfin_mac_free(dma_handle, tx_desc, CONFIG_BFIN_TX_DESC_NUM); + } + + if (rx_desc) { + r = rx_list_head; + for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) { + if (r) { + if (r->skb) { + dev_kfree_skb(r->skb); + r->skb = NULL; + } + r = r->next; + } + } + bfin_mac_free(dma_handle, rx_desc, CONFIG_BFIN_RX_DESC_NUM); + } +} + +static int desc_list_init(struct net_device *dev) +{ + int i; + struct sk_buff *new_skb; +#if !defined(CONFIG_BFIN_MAC_USE_L1) + /* + * This dma_handle is useless in Blackfin dma_alloc_coherent(). + * The real dma handler is the return value of dma_alloc_coherent(). + */ + dma_addr_t dma_handle; +#endif + + tx_desc = bfin_mac_alloc(&dma_handle, + sizeof(struct net_dma_desc_tx), + CONFIG_BFIN_TX_DESC_NUM); + if (tx_desc == NULL) + goto init_error; + + rx_desc = bfin_mac_alloc(&dma_handle, + sizeof(struct net_dma_desc_rx), + CONFIG_BFIN_RX_DESC_NUM); + if (rx_desc == NULL) + goto init_error; + + /* init tx_list */ + tx_list_head = tx_list_tail = tx_desc; + + for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) { + struct net_dma_desc_tx *t = tx_desc + i; + struct dma_descriptor *a = &(t->desc_a); + struct dma_descriptor *b = &(t->desc_b); + + /* + * disable DMA + * read from memory WNR = 0 + * wordsize is 32 bits + * 6 half words is desc size + * large desc flow + */ + a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE; + a->start_addr = (unsigned long)t->packet; + a->x_count = 0; + a->next_dma_desc = b; + + /* + * enabled DMA + * write to memory WNR = 1 + * wordsize is 32 bits + * disable interrupt + * 6 half words is desc size + * large desc flow + */ + b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE; + b->start_addr = (unsigned long)(&(t->status)); + b->x_count = 0; + + t->skb = NULL; + tx_list_tail->desc_b.next_dma_desc = a; + tx_list_tail->next = t; + tx_list_tail = t; + } + tx_list_tail->next = tx_list_head; /* tx_list is a circle */ + tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a); + current_tx_ptr = tx_list_head; + + /* init rx_list */ + rx_list_head = rx_list_tail = rx_desc; + + for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) { + struct net_dma_desc_rx *r = rx_desc + i; + struct dma_descriptor *a = &(r->desc_a); + struct dma_descriptor *b = &(r->desc_b); + + /* allocate a new skb for next time receive */ + new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); + if (!new_skb) + goto init_error; + + skb_reserve(new_skb, NET_IP_ALIGN); + /* Invidate the data cache of skb->data range when it is write back + * cache. It will prevent overwritting the new data from DMA + */ + blackfin_dcache_invalidate_range((unsigned long)new_skb->head, + (unsigned long)new_skb->end); + r->skb = new_skb; + + /* + * enabled DMA + * write to memory WNR = 1 + * wordsize is 32 bits + * disable interrupt + * 6 half words is desc size + * large desc flow + */ + a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE; + /* since RXDWA is enabled */ + a->start_addr = (unsigned long)new_skb->data - 2; + a->x_count = 0; + a->next_dma_desc = b; + + /* + * enabled DMA + * write to memory WNR = 1 + * wordsize is 32 bits + * enable interrupt + * 6 half words is desc size + * large desc flow + */ + b->config = DMAEN | WNR | WDSIZE_32 | DI_EN | + NDSIZE_6 | DMAFLOW_LARGE; + b->start_addr = (unsigned long)(&(r->status)); + b->x_count = 0; + + rx_list_tail->desc_b.next_dma_desc = a; + rx_list_tail->next = r; + rx_list_tail = r; + } + rx_list_tail->next = rx_list_head; /* rx_list is a circle */ + rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a); + current_rx_ptr = rx_list_head; + + return 0; + +init_error: + desc_list_free(); + pr_err("kmalloc failed\n"); + return -ENOMEM; +} + + +/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/ + +/* + * MII operations + */ +/* Wait until the previous MDC/MDIO transaction has completed */ +static int bfin_mdio_poll(void) +{ + int timeout_cnt = MAX_TIMEOUT_CNT; + + /* poll the STABUSY bit */ + while ((bfin_read_EMAC_STAADD()) & STABUSY) { + udelay(1); + if (timeout_cnt-- < 0) { + pr_err("wait MDC/MDIO transaction to complete timeout\n"); + return -ETIMEDOUT; + } + } + + return 0; +} + +/* Read an off-chip register in a PHY through the MDC/MDIO port */ +static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) +{ + int ret; + + ret = bfin_mdio_poll(); + if (ret) + return ret; + + /* read mode */ + bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) | + SET_REGAD((u16) regnum) | + STABUSY); + + ret = bfin_mdio_poll(); + if (ret) + return ret; + + return (int) bfin_read_EMAC_STADAT(); +} + +/* Write an off-chip register in a PHY through the MDC/MDIO port */ +static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum, + u16 value) +{ + int ret; + + ret = bfin_mdio_poll(); + if (ret) + return ret; + + bfin_write_EMAC_STADAT((u32) value); + + /* write mode */ + bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) | + SET_REGAD((u16) regnum) | + STAOP | + STABUSY); + + return bfin_mdio_poll(); +} + +static void bfin_mac_adjust_link(struct net_device *dev) +{ + struct bfin_mac_local *lp = netdev_priv(dev); + struct phy_device *phydev = lp->phydev; + unsigned long flags; + int new_state = 0; + + spin_lock_irqsave(&lp->lock, flags); + if (phydev->link) { + /* Now we make sure that we can be in full duplex mode. + * If not, we operate in half-duplex mode. */ + if (phydev->duplex != lp->old_duplex) { + u32 opmode = bfin_read_EMAC_OPMODE(); + new_state = 1; + + if (phydev->duplex) + opmode |= FDMODE; + else + opmode &= ~(FDMODE); + + bfin_write_EMAC_OPMODE(opmode); + lp->old_duplex = phydev->duplex; + } + + if (phydev->speed != lp->old_speed) { + if (phydev->interface == PHY_INTERFACE_MODE_RMII) { + u32 opmode = bfin_read_EMAC_OPMODE(); + switch (phydev->speed) { + case 10: + opmode |= RMII_10; + break; + case 100: + opmode &= ~RMII_10; + break; + default: + netdev_warn(dev, + "Ack! Speed (%d) is not 10/100!\n", + phydev->speed); + break; + } + bfin_write_EMAC_OPMODE(opmode); + } + + new_state = 1; + lp->old_speed = phydev->speed; + } + + if (!lp->old_link) { + new_state = 1; + lp->old_link = 1; + } + } else if (lp->old_link) { + new_state = 1; + lp->old_link = 0; + lp->old_speed = 0; + lp->old_duplex = -1; + } + + if (new_state) { + u32 opmode = bfin_read_EMAC_OPMODE(); + phy_print_status(phydev); + pr_debug("EMAC_OPMODE = 0x%08x\n", opmode); + } + + spin_unlock_irqrestore(&lp->lock, flags); +} + +/* MDC = 2.5 MHz */ +#define MDC_CLK 2500000 + +static int mii_probe(struct net_device *dev, int phy_mode) +{ + struct bfin_mac_local *lp = netdev_priv(dev); + struct phy_device *phydev = NULL; + unsigned short sysctl; + int i; + u32 sclk, mdc_div; + + /* Enable PHY output early */ + if (!(bfin_read_VR_CTL() & CLKBUFOE)) + bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE); + + sclk = get_sclk(); + mdc_div = ((sclk / MDC_CLK) / 2) - 1; + + sysctl = bfin_read_EMAC_SYSCTL(); + sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div); + bfin_write_EMAC_SYSCTL(sysctl); + + /* search for connected PHY device */ + for (i = 0; i < PHY_MAX_ADDR; ++i) { + struct phy_device *const tmp_phydev = lp->mii_bus->phy_map[i]; + + if (!tmp_phydev) + continue; /* no PHY here... */ + + phydev = tmp_phydev; + break; /* found it */ + } + + /* now we are supposed to have a proper phydev, to attach to... */ + if (!phydev) { + netdev_err(dev, "no phy device found\n"); + return -ENODEV; + } + + if (phy_mode != PHY_INTERFACE_MODE_RMII && + phy_mode != PHY_INTERFACE_MODE_MII) { + netdev_err(dev, "invalid phy interface mode\n"); + return -EINVAL; + } + + phydev = phy_connect(dev, dev_name(&phydev->dev), + &bfin_mac_adjust_link, phy_mode); + + if (IS_ERR(phydev)) { + netdev_err(dev, "could not attach PHY\n"); + return PTR_ERR(phydev); + } + + /* mask with MAC supported features */ + phydev->supported &= (SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full + | SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full + | SUPPORTED_Autoneg + | SUPPORTED_Pause | SUPPORTED_Asym_Pause + | SUPPORTED_MII + | SUPPORTED_TP); + + phydev->advertising = phydev->supported; + + lp->old_link = 0; + lp->old_speed = 0; + lp->old_duplex = -1; + lp->phydev = phydev; + + pr_info("attached PHY driver [%s] " + "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n", + phydev->drv->name, dev_name(&phydev->dev), phydev->irq, + MDC_CLK, mdc_div, sclk/1000000); + + return 0; +} + +/* + * Ethtool support + */ + +/* + * interrupt routine for magic packet wakeup + */ +static irqreturn_t bfin_mac_wake_interrupt(int irq, void *dev_id) +{ + return IRQ_HANDLED; +} + +static int +bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct bfin_mac_local *lp = netdev_priv(dev); + + if (lp->phydev) + return phy_ethtool_gset(lp->phydev, cmd); + + return -EINVAL; +} + +static int +bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct bfin_mac_local *lp = netdev_priv(dev); + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (lp->phydev) + return phy_ethtool_sset(lp->phydev, cmd); + + return -EINVAL; +} + +static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); + strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info)); +} + +static void bfin_mac_ethtool_getwol(struct net_device *dev, + struct ethtool_wolinfo *wolinfo) +{ + struct bfin_mac_local *lp = netdev_priv(dev); + + wolinfo->supported = WAKE_MAGIC; + wolinfo->wolopts = lp->wol; +} + +static int bfin_mac_ethtool_setwol(struct net_device *dev, + struct ethtool_wolinfo *wolinfo) +{ + struct bfin_mac_local *lp = netdev_priv(dev); + int rc; + + if (wolinfo->wolopts & (WAKE_MAGICSECURE | + WAKE_UCAST | + WAKE_MCAST | + WAKE_BCAST | + WAKE_ARP)) + return -EOPNOTSUPP; + + lp->wol = wolinfo->wolopts; + + if (lp->wol && !lp->irq_wake_requested) { + /* register wake irq handler */ + rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt, + 0, "EMAC_WAKE", dev); + if (rc) + return rc; + lp->irq_wake_requested = true; + } + + if (!lp->wol && lp->irq_wake_requested) { + free_irq(IRQ_MAC_WAKEDET, dev); + lp->irq_wake_requested = false; + } + + /* Make sure the PHY driver doesn't suspend */ + device_init_wakeup(&dev->dev, lp->wol); + + return 0; +} + +#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP +static int bfin_mac_ethtool_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct bfin_mac_local *lp = netdev_priv(dev); + + info->so_timestamping = + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->phc_index = lp->phc_index; + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); + return 0; +} +#endif + +static const struct ethtool_ops bfin_mac_ethtool_ops = { + .get_settings = bfin_mac_ethtool_getsettings, + .set_settings = bfin_mac_ethtool_setsettings, + .get_link = ethtool_op_get_link, + .get_drvinfo = bfin_mac_ethtool_getdrvinfo, + .get_wol = bfin_mac_ethtool_getwol, + .set_wol = bfin_mac_ethtool_setwol, +#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP + .get_ts_info = bfin_mac_ethtool_get_ts_info, +#endif +}; + +/**************************************************************************/ +static void setup_system_regs(struct net_device *dev) +{ + struct bfin_mac_local *lp = netdev_priv(dev); + int i; + unsigned short sysctl; + + /* + * Odd word alignment for Receive Frame DMA word + * Configure checksum support and rcve frame word alignment + */ + sysctl = bfin_read_EMAC_SYSCTL(); + /* + * check if interrupt is requested for any PHY, + * enable PHY interrupt only if needed + */ + for (i = 0; i < PHY_MAX_ADDR; ++i) + if (lp->mii_bus->irq[i] != PHY_POLL) + break; + if (i < PHY_MAX_ADDR) + sysctl |= PHYIE; + sysctl |= RXDWA; +#if defined(BFIN_MAC_CSUM_OFFLOAD) + sysctl |= RXCKS; +#else + sysctl &= ~RXCKS; +#endif + bfin_write_EMAC_SYSCTL(sysctl); + + bfin_write_EMAC_MMC_CTL(RSTC | CROLL); + + /* Set vlan regs to let 1522 bytes long packets pass through */ + bfin_write_EMAC_VLAN1(lp->vlan1_mask); + bfin_write_EMAC_VLAN2(lp->vlan2_mask); + + /* Initialize the TX DMA channel registers */ + bfin_write_DMA2_X_COUNT(0); + bfin_write_DMA2_X_MODIFY(4); + bfin_write_DMA2_Y_COUNT(0); + bfin_write_DMA2_Y_MODIFY(0); + + /* Initialize the RX DMA channel registers */ + bfin_write_DMA1_X_COUNT(0); + bfin_write_DMA1_X_MODIFY(4); + bfin_write_DMA1_Y_COUNT(0); + bfin_write_DMA1_Y_MODIFY(0); +} + +static void setup_mac_addr(u8 *mac_addr) +{ + u32 addr_low = le32_to_cpu(*(__le32 *) & mac_addr[0]); + u16 addr_hi = le16_to_cpu(*(__le16 *) & mac_addr[4]); + + /* this depends on a little-endian machine */ + bfin_write_EMAC_ADDRLO(addr_low); + bfin_write_EMAC_ADDRHI(addr_hi); +} + +static int bfin_mac_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + if (netif_running(dev)) + return -EBUSY; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + setup_mac_addr(dev->dev_addr); + return 0; +} + +#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP +#define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE) + +static u32 bfin_select_phc_clock(u32 input_clk, unsigned int *shift_result) +{ + u32 ipn = 1000000000UL / input_clk; + u32 ppn = 1; + unsigned int shift = 0; + + while (ppn <= ipn) { + ppn <<= 1; + shift++; + } + *shift_result = shift; + return 1000000000UL / ppn; +} + +static int bfin_mac_hwtstamp_set(struct net_device *netdev, + struct ifreq *ifr) +{ + struct hwtstamp_config config; + struct bfin_mac_local *lp = netdev_priv(netdev); + u16 ptpctl; + u32 ptpfv1, ptpfv2, ptpfv3, ptpfoff; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + pr_debug("%s config flag:0x%x, tx_type:0x%x, rx_filter:0x%x\n", + __func__, config.flags, config.tx_type, config.rx_filter); + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + if ((config.tx_type != HWTSTAMP_TX_OFF) && + (config.tx_type != HWTSTAMP_TX_ON)) + return -ERANGE; + + ptpctl = bfin_read_EMAC_PTP_CTL(); + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + /* + * Dont allow any timestamping + */ + ptpfv3 = 0xFFFFFFFF; + bfin_write_EMAC_PTP_FV3(ptpfv3); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + /* + * Clear the five comparison mask bits (bits[12:8]) in EMAC_PTP_CTL) + * to enable all the field matches. + */ + ptpctl &= ~0x1F00; + bfin_write_EMAC_PTP_CTL(ptpctl); + /* + * Keep the default values of the EMAC_PTP_FOFF register. + */ + ptpfoff = 0x4A24170C; + bfin_write_EMAC_PTP_FOFF(ptpfoff); + /* + * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2 + * registers. + */ + ptpfv1 = 0x11040800; + bfin_write_EMAC_PTP_FV1(ptpfv1); + ptpfv2 = 0x0140013F; + bfin_write_EMAC_PTP_FV2(ptpfv2); + /* + * The default value (0xFFFC) allows the timestamping of both + * received Sync messages and Delay_Req messages. + */ + ptpfv3 = 0xFFFFFFFC; + bfin_write_EMAC_PTP_FV3(ptpfv3); + + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + /* Clear all five comparison mask bits (bits[12:8]) in the + * EMAC_PTP_CTL register to enable all the field matches. + */ + ptpctl &= ~0x1F00; + bfin_write_EMAC_PTP_CTL(ptpctl); + /* + * Keep the default values of the EMAC_PTP_FOFF register, except set + * the PTPCOF field to 0x2A. + */ + ptpfoff = 0x2A24170C; + bfin_write_EMAC_PTP_FOFF(ptpfoff); + /* + * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2 + * registers. + */ + ptpfv1 = 0x11040800; + bfin_write_EMAC_PTP_FV1(ptpfv1); + ptpfv2 = 0x0140013F; + bfin_write_EMAC_PTP_FV2(ptpfv2); + /* + * To allow the timestamping of Pdelay_Req and Pdelay_Resp, set + * the value to 0xFFF0. + */ + ptpfv3 = 0xFFFFFFF0; + bfin_write_EMAC_PTP_FV3(ptpfv3); + + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + /* + * Clear bits 8 and 12 of the EMAC_PTP_CTL register to enable only the + * EFTM and PTPCM field comparison. + */ + ptpctl &= ~0x1100; + bfin_write_EMAC_PTP_CTL(ptpctl); + /* + * Keep the default values of all the fields of the EMAC_PTP_FOFF + * register, except set the PTPCOF field to 0x0E. + */ + ptpfoff = 0x0E24170C; + bfin_write_EMAC_PTP_FOFF(ptpfoff); + /* + * Program bits [15:0] of the EMAC_PTP_FV1 register to 0x88F7, which + * corresponds to PTP messages on the MAC layer. + */ + ptpfv1 = 0x110488F7; + bfin_write_EMAC_PTP_FV1(ptpfv1); + ptpfv2 = 0x0140013F; + bfin_write_EMAC_PTP_FV2(ptpfv2); + /* + * To allow the timestamping of Pdelay_Req and Pdelay_Resp + * messages, set the value to 0xFFF0. + */ + ptpfv3 = 0xFFFFFFF0; + bfin_write_EMAC_PTP_FV3(ptpfv3); + + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + break; + default: + return -ERANGE; + } + + if (config.tx_type == HWTSTAMP_TX_OFF && + bfin_mac_hwtstamp_is_none(config.rx_filter)) { + ptpctl &= ~PTP_EN; + bfin_write_EMAC_PTP_CTL(ptpctl); + + SSYNC(); + } else { + ptpctl |= PTP_EN; + bfin_write_EMAC_PTP_CTL(ptpctl); + + /* + * clear any existing timestamp + */ + bfin_read_EMAC_PTP_RXSNAPLO(); + bfin_read_EMAC_PTP_RXSNAPHI(); + + bfin_read_EMAC_PTP_TXSNAPLO(); + bfin_read_EMAC_PTP_TXSNAPHI(); + + SSYNC(); + } + + lp->stamp_cfg = config; + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static int bfin_mac_hwtstamp_get(struct net_device *netdev, + struct ifreq *ifr) +{ + struct bfin_mac_local *lp = netdev_priv(netdev); + + return copy_to_user(ifr->ifr_data, &lp->stamp_cfg, + sizeof(lp->stamp_cfg)) ? + -EFAULT : 0; +} + +static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb) +{ + struct bfin_mac_local *lp = netdev_priv(netdev); + + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + int timeout_cnt = MAX_TIMEOUT_CNT; + + /* When doing time stamping, keep the connection to the socket + * a while longer + */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + /* + * The timestamping is done at the EMAC module's MII/RMII interface + * when the module sees the Start of Frame of an event message packet. This + * interface is the closest possible place to the physical Ethernet transmission + * medium, providing the best timing accuracy. + */ + while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt)) + udelay(1); + if (timeout_cnt == 0) + netdev_err(netdev, "timestamp the TX packet failed\n"); + else { + struct skb_shared_hwtstamps shhwtstamps; + u64 ns; + u64 regval; + + regval = bfin_read_EMAC_PTP_TXSNAPLO(); + regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32; + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + ns = regval << lp->shift; + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &shhwtstamps); + } + } +} + +static void bfin_rx_hwtstamp(struct net_device *netdev, struct sk_buff *skb) +{ + struct bfin_mac_local *lp = netdev_priv(netdev); + u32 valid; + u64 regval, ns; + struct skb_shared_hwtstamps *shhwtstamps; + + if (bfin_mac_hwtstamp_is_none(lp->stamp_cfg.rx_filter)) + return; + + valid = bfin_read_EMAC_PTP_ISTAT() & RXEL; + if (!valid) + return; + + shhwtstamps = skb_hwtstamps(skb); + + regval = bfin_read_EMAC_PTP_RXSNAPLO(); + regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32; + ns = regval << lp->shift; + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + shhwtstamps->hwtstamp = ns_to_ktime(ns); +} + +static void bfin_mac_hwtstamp_init(struct net_device *netdev) +{ + struct bfin_mac_local *lp = netdev_priv(netdev); + u64 addend, ppb; + u32 input_clk, phc_clk; + + /* Initialize hardware timer */ + input_clk = get_sclk(); + phc_clk = bfin_select_phc_clock(input_clk, &lp->shift); + addend = phc_clk * (1ULL << 32); + do_div(addend, input_clk); + bfin_write_EMAC_PTP_ADDEND((u32)addend); + + lp->addend = addend; + ppb = 1000000000ULL * input_clk; + do_div(ppb, phc_clk); + lp->max_ppb = ppb - 1000000000ULL - 1ULL; + + /* Initialize hwstamp config */ + lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE; + lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF; +} + +static u64 bfin_ptp_time_read(struct bfin_mac_local *lp) +{ + u64 ns; + u32 lo, hi; + + lo = bfin_read_EMAC_PTP_TIMELO(); + hi = bfin_read_EMAC_PTP_TIMEHI(); + + ns = ((u64) hi) << 32; + ns |= lo; + ns <<= lp->shift; + + return ns; +} + +static void bfin_ptp_time_write(struct bfin_mac_local *lp, u64 ns) +{ + u32 hi, lo; + + ns >>= lp->shift; + hi = ns >> 32; + lo = ns & 0xffffffff; + + bfin_write_EMAC_PTP_TIMELO(lo); + bfin_write_EMAC_PTP_TIMEHI(hi); +} + +/* PTP Hardware Clock operations */ + +static int bfin_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + u64 adj; + u32 diff, addend; + int neg_adj = 0; + struct bfin_mac_local *lp = + container_of(ptp, struct bfin_mac_local, caps); + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + addend = lp->addend; + adj = addend; + adj *= ppb; + diff = div_u64(adj, 1000000000ULL); + + addend = neg_adj ? addend - diff : addend + diff; + + bfin_write_EMAC_PTP_ADDEND(addend); + + return 0; +} + +static int bfin_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + s64 now; + unsigned long flags; + struct bfin_mac_local *lp = + container_of(ptp, struct bfin_mac_local, caps); + + spin_lock_irqsave(&lp->phc_lock, flags); + + now = bfin_ptp_time_read(lp); + now += delta; + bfin_ptp_time_write(lp, now); + + spin_unlock_irqrestore(&lp->phc_lock, flags); + + return 0; +} + +static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + u64 ns; + unsigned long flags; + struct bfin_mac_local *lp = + container_of(ptp, struct bfin_mac_local, caps); + + spin_lock_irqsave(&lp->phc_lock, flags); + + ns = bfin_ptp_time_read(lp); + + spin_unlock_irqrestore(&lp->phc_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int bfin_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + u64 ns; + unsigned long flags; + struct bfin_mac_local *lp = + container_of(ptp, struct bfin_mac_local, caps); + + ns = timespec64_to_ns(ts); + + spin_lock_irqsave(&lp->phc_lock, flags); + + bfin_ptp_time_write(lp, ns); + + spin_unlock_irqrestore(&lp->phc_lock, flags); + + return 0; +} + +static int bfin_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +static struct ptp_clock_info bfin_ptp_caps = { + .owner = THIS_MODULE, + .name = "BF518 clock", + .max_adj = 0, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .n_pins = 0, + .pps = 0, + .adjfreq = bfin_ptp_adjfreq, + .adjtime = bfin_ptp_adjtime, + .gettime64 = bfin_ptp_gettime, + .settime64 = bfin_ptp_settime, + .enable = bfin_ptp_enable, +}; + +static int bfin_phc_init(struct net_device *netdev, struct device *dev) +{ + struct bfin_mac_local *lp = netdev_priv(netdev); + + lp->caps = bfin_ptp_caps; + lp->caps.max_adj = lp->max_ppb; + lp->clock = ptp_clock_register(&lp->caps, dev); + if (IS_ERR(lp->clock)) + return PTR_ERR(lp->clock); + + lp->phc_index = ptp_clock_index(lp->clock); + spin_lock_init(&lp->phc_lock); + + return 0; +} + +static void bfin_phc_release(struct bfin_mac_local *lp) +{ + ptp_clock_unregister(lp->clock); +} + +#else +# define bfin_mac_hwtstamp_is_none(cfg) 0 +# define bfin_mac_hwtstamp_init(dev) +# define bfin_mac_hwtstamp_set(dev, ifr) (-EOPNOTSUPP) +# define bfin_mac_hwtstamp_get(dev, ifr) (-EOPNOTSUPP) +# define bfin_rx_hwtstamp(dev, skb) +# define bfin_tx_hwtstamp(dev, skb) +# define bfin_phc_init(netdev, dev) 0 +# define bfin_phc_release(lp) +#endif + +static inline void _tx_reclaim_skb(void) +{ + do { + tx_list_head->desc_a.config &= ~DMAEN; + tx_list_head->status.status_word = 0; + if (tx_list_head->skb) { + dev_consume_skb_any(tx_list_head->skb); + tx_list_head->skb = NULL; + } + tx_list_head = tx_list_head->next; + + } while (tx_list_head->status.status_word != 0); +} + +static void tx_reclaim_skb(struct bfin_mac_local *lp) +{ + int timeout_cnt = MAX_TIMEOUT_CNT; + + if (tx_list_head->status.status_word != 0) + _tx_reclaim_skb(); + + if (current_tx_ptr->next == tx_list_head) { + while (tx_list_head->status.status_word == 0) { + /* slow down polling to avoid too many queue stop. */ + udelay(10); + /* reclaim skb if DMA is not running. */ + if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)) + break; + if (timeout_cnt-- < 0) + break; + } + + if (timeout_cnt >= 0) + _tx_reclaim_skb(); + else + netif_stop_queue(lp->ndev); + } + + if (current_tx_ptr->next != tx_list_head && + netif_queue_stopped(lp->ndev)) + netif_wake_queue(lp->ndev); + + if (tx_list_head != current_tx_ptr) { + /* shorten the timer interval if tx queue is stopped */ + if (netif_queue_stopped(lp->ndev)) + lp->tx_reclaim_timer.expires = + jiffies + (TX_RECLAIM_JIFFIES >> 4); + else + lp->tx_reclaim_timer.expires = + jiffies + TX_RECLAIM_JIFFIES; + + mod_timer(&lp->tx_reclaim_timer, + lp->tx_reclaim_timer.expires); + } + + return; +} + +static void tx_reclaim_skb_timeout(unsigned long lp) +{ + tx_reclaim_skb((struct bfin_mac_local *)lp); +} + +static int bfin_mac_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct bfin_mac_local *lp = netdev_priv(dev); + u16 *data; + u32 data_align = (unsigned long)(skb->data) & 0x3; + + current_tx_ptr->skb = skb; + + if (data_align == 0x2) { + /* move skb->data to current_tx_ptr payload */ + data = (u16 *)(skb->data) - 1; + *data = (u16)(skb->len); + /* + * When transmitting an Ethernet packet, the PTP_TSYNC module requires + * a DMA_Length_Word field associated with the packet. The lower 12 bits + * of this field are the length of the packet payload in bytes and the higher + * 4 bits are the timestamping enable field. + */ + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) + *data |= 0x1000; + + current_tx_ptr->desc_a.start_addr = (u32)data; + /* this is important! */ + blackfin_dcache_flush_range((u32)data, + (u32)((u8 *)data + skb->len + 4)); + } else { + *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len); + /* enable timestamping for the sent packet */ + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) + *((u16 *)(current_tx_ptr->packet)) |= 0x1000; + memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data, + skb->len); + current_tx_ptr->desc_a.start_addr = + (u32)current_tx_ptr->packet; + blackfin_dcache_flush_range( + (u32)current_tx_ptr->packet, + (u32)(current_tx_ptr->packet + skb->len + 2)); + } + + /* make sure the internal data buffers in the core are drained + * so that the DMA descriptors are completely written when the + * DMA engine goes to fetch them below + */ + SSYNC(); + + /* always clear status buffer before start tx dma */ + current_tx_ptr->status.status_word = 0; + + /* enable this packet's dma */ + current_tx_ptr->desc_a.config |= DMAEN; + + /* tx dma is running, just return */ + if (bfin_read_DMA2_IRQ_STATUS() & DMA_RUN) + goto out; + + /* tx dma is not running */ + bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr->desc_a)); + /* dma enabled, read from memory, size is 6 */ + bfin_write_DMA2_CONFIG(current_tx_ptr->desc_a.config); + /* Turn on the EMAC tx */ + bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE); + +out: + bfin_tx_hwtstamp(dev, skb); + + current_tx_ptr = current_tx_ptr->next; + dev->stats.tx_packets++; + dev->stats.tx_bytes += (skb->len); + + tx_reclaim_skb(lp); + + return NETDEV_TX_OK; +} + +#define IP_HEADER_OFF 0 +#define RX_ERROR_MASK (RX_LONG | RX_ALIGN | RX_CRC | RX_LEN | \ + RX_FRAG | RX_ADDR | RX_DMAO | RX_PHY | RX_LATE | RX_RANGE) + +static void bfin_mac_rx(struct bfin_mac_local *lp) +{ + struct net_device *dev = lp->ndev; + struct sk_buff *skb, *new_skb; + unsigned short len; +#if defined(BFIN_MAC_CSUM_OFFLOAD) + unsigned int i; + unsigned char fcs[ETH_FCS_LEN + 1]; +#endif + + /* check if frame status word reports an error condition + * we which case we simply drop the packet + */ + if (current_rx_ptr->status.status_word & RX_ERROR_MASK) { + netdev_notice(dev, "rx: receive error - packet dropped\n"); + dev->stats.rx_dropped++; + goto out; + } + + /* allocate a new skb for next time receive */ + skb = current_rx_ptr->skb; + + new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); + if (!new_skb) { + dev->stats.rx_dropped++; + goto out; + } + /* reserve 2 bytes for RXDWA padding */ + skb_reserve(new_skb, NET_IP_ALIGN); + /* Invidate the data cache of skb->data range when it is write back + * cache. It will prevent overwritting the new data from DMA + */ + blackfin_dcache_invalidate_range((unsigned long)new_skb->head, + (unsigned long)new_skb->end); + + current_rx_ptr->skb = new_skb; + current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2; + + len = (unsigned short)(current_rx_ptr->status.status_word & RX_FRLEN); + /* Deduce Ethernet FCS length from Ethernet payload length */ + len -= ETH_FCS_LEN; + skb_put(skb, len); + + skb->protocol = eth_type_trans(skb, dev); + + bfin_rx_hwtstamp(dev, skb); + +#if defined(BFIN_MAC_CSUM_OFFLOAD) + /* Checksum offloading only works for IPv4 packets with the standard IP header + * length of 20 bytes, because the blackfin MAC checksum calculation is + * based on that assumption. We must NOT use the calculated checksum if our + * IP version or header break that assumption. + */ + if (skb->data[IP_HEADER_OFF] == 0x45) { + skb->csum = current_rx_ptr->status.ip_payload_csum; + /* + * Deduce Ethernet FCS from hardware generated IP payload checksum. + * IP checksum is based on 16-bit one's complement algorithm. + * To deduce a value from checksum is equal to add its inversion. + * If the IP payload len is odd, the inversed FCS should also + * begin from odd address and leave first byte zero. + */ + if (skb->len % 2) { + fcs[0] = 0; + for (i = 0; i < ETH_FCS_LEN; i++) + fcs[i + 1] = ~skb->data[skb->len + i]; + skb->csum = csum_partial(fcs, ETH_FCS_LEN + 1, skb->csum); + } else { + for (i = 0; i < ETH_FCS_LEN; i++) + fcs[i] = ~skb->data[skb->len + i]; + skb->csum = csum_partial(fcs, ETH_FCS_LEN, skb->csum); + } + skb->ip_summed = CHECKSUM_COMPLETE; + } +#endif + + napi_gro_receive(&lp->napi, skb); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; +out: + current_rx_ptr->status.status_word = 0x00000000; + current_rx_ptr = current_rx_ptr->next; +} + +static int bfin_mac_poll(struct napi_struct *napi, int budget) +{ + int i = 0; + struct bfin_mac_local *lp = container_of(napi, + struct bfin_mac_local, + napi); + + while (current_rx_ptr->status.status_word != 0 && i < budget) { + bfin_mac_rx(lp); + i++; + } + + if (i < budget) { + napi_complete(napi); + if (test_and_clear_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags)) + enable_irq(IRQ_MAC_RX); + } + + return i; +} + +/* interrupt routine to handle rx and error signal */ +static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id) +{ + struct bfin_mac_local *lp = netdev_priv(dev_id); + u32 status; + + status = bfin_read_DMA1_IRQ_STATUS(); + + bfin_write_DMA1_IRQ_STATUS(status | DMA_DONE | DMA_ERR); + if (status & DMA_DONE) { + disable_irq_nosync(IRQ_MAC_RX); + set_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags); + napi_schedule(&lp->napi); + } + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void bfin_mac_poll_controller(struct net_device *dev) +{ + struct bfin_mac_local *lp = netdev_priv(dev); + + bfin_mac_interrupt(IRQ_MAC_RX, dev); + tx_reclaim_skb(lp); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static void bfin_mac_disable(void) +{ + unsigned int opmode; + + opmode = bfin_read_EMAC_OPMODE(); + opmode &= (~RE); + opmode &= (~TE); + /* Turn off the EMAC */ + bfin_write_EMAC_OPMODE(opmode); +} + +/* + * Enable Interrupts, Receive, and Transmit + */ +static int bfin_mac_enable(struct phy_device *phydev) +{ + int ret; + u32 opmode; + + pr_debug("%s\n", __func__); + + /* Set RX DMA */ + bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); + bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config); + + /* Wait MII done */ + ret = bfin_mdio_poll(); + if (ret) + return ret; + + /* We enable only RX here */ + /* ASTP : Enable Automatic Pad Stripping + PR : Promiscuous Mode for test + PSF : Receive frames with total length less than 64 bytes. + FDMODE : Full Duplex Mode + LB : Internal Loopback for test + RE : Receiver Enable */ + opmode = bfin_read_EMAC_OPMODE(); + if (opmode & FDMODE) + opmode |= PSF; + else + opmode |= DRO | DC | PSF; + opmode |= RE; + + if (phydev->interface == PHY_INTERFACE_MODE_RMII) { + opmode |= RMII; /* For Now only 100MBit are supported */ +#if defined(CONFIG_BF537) || defined(CONFIG_BF536) + if (__SILICON_REVISION__ < 3) { + /* + * This isn't publicly documented (fun times!), but in + * silicon <=0.2, the RX and TX pins are clocked together. + * So in order to recv, we must enable the transmit side + * as well. This will cause a spurious TX interrupt too, + * but we can easily consume that. + */ + opmode |= TE; + } +#endif + } + + /* Turn on the EMAC rx */ + bfin_write_EMAC_OPMODE(opmode); + + return 0; +} + +/* Our watchdog timed out. Called by the networking layer */ +static void bfin_mac_timeout(struct net_device *dev) +{ + struct bfin_mac_local *lp = netdev_priv(dev); + + pr_debug("%s: %s\n", dev->name, __func__); + + bfin_mac_disable(); + + del_timer(&lp->tx_reclaim_timer); + + /* reset tx queue and free skb */ + while (tx_list_head != current_tx_ptr) { + tx_list_head->desc_a.config &= ~DMAEN; + tx_list_head->status.status_word = 0; + if (tx_list_head->skb) { + dev_kfree_skb(tx_list_head->skb); + tx_list_head->skb = NULL; + } + tx_list_head = tx_list_head->next; + } + + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + + bfin_mac_enable(lp->phydev); + + /* We can accept TX packets again */ + dev->trans_start = jiffies; /* prevent tx timeout */ +} + +static void bfin_mac_multicast_hash(struct net_device *dev) +{ + u32 emac_hashhi, emac_hashlo; + struct netdev_hw_addr *ha; + u32 crc; + + emac_hashhi = emac_hashlo = 0; + + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc(ETH_ALEN, ha->addr); + crc >>= 26; + + if (crc & 0x20) + emac_hashhi |= 1 << (crc & 0x1f); + else + emac_hashlo |= 1 << (crc & 0x1f); + } + + bfin_write_EMAC_HASHHI(emac_hashhi); + bfin_write_EMAC_HASHLO(emac_hashlo); +} + +/* + * This routine will, depending on the values passed to it, + * either make it accept multicast packets, go into + * promiscuous mode (for TCPDUMP and cousins) or accept + * a select set of multicast packets + */ +static void bfin_mac_set_multicast_list(struct net_device *dev) +{ + u32 sysctl; + + if (dev->flags & IFF_PROMISC) { + netdev_info(dev, "set promisc mode\n"); + sysctl = bfin_read_EMAC_OPMODE(); + sysctl |= PR; + bfin_write_EMAC_OPMODE(sysctl); + } else if (dev->flags & IFF_ALLMULTI) { + /* accept all multicast */ + sysctl = bfin_read_EMAC_OPMODE(); + sysctl |= PAM; + bfin_write_EMAC_OPMODE(sysctl); + } else if (!netdev_mc_empty(dev)) { + /* set up multicast hash table */ + sysctl = bfin_read_EMAC_OPMODE(); + sysctl |= HM; + bfin_write_EMAC_OPMODE(sysctl); + bfin_mac_multicast_hash(dev); + } else { + /* clear promisc or multicast mode */ + sysctl = bfin_read_EMAC_OPMODE(); + sysctl &= ~(RAF | PAM); + bfin_write_EMAC_OPMODE(sysctl); + } +} + +static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct bfin_mac_local *lp = netdev_priv(netdev); + + if (!netif_running(netdev)) + return -EINVAL; + + switch (cmd) { + case SIOCSHWTSTAMP: + return bfin_mac_hwtstamp_set(netdev, ifr); + case SIOCGHWTSTAMP: + return bfin_mac_hwtstamp_get(netdev, ifr); + default: + if (lp->phydev) + return phy_mii_ioctl(lp->phydev, ifr, cmd); + else + return -EOPNOTSUPP; + } +} + +/* + * this puts the device in an inactive state + */ +static void bfin_mac_shutdown(struct net_device *dev) +{ + /* Turn off the EMAC */ + bfin_write_EMAC_OPMODE(0x00000000); + /* Turn off the EMAC RX DMA */ + bfin_write_DMA1_CONFIG(0x0000); + bfin_write_DMA2_CONFIG(0x0000); +} + +/* + * Open and Initialize the interface + * + * Set up everything, reset the card, etc.. + */ +static int bfin_mac_open(struct net_device *dev) +{ + struct bfin_mac_local *lp = netdev_priv(dev); + int ret; + pr_debug("%s: %s\n", dev->name, __func__); + + /* + * Check that the address is valid. If its not, refuse + * to bring the device up. The user must specify an + * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx + */ + if (!is_valid_ether_addr(dev->dev_addr)) { + netdev_warn(dev, "no valid ethernet hw addr\n"); + return -EINVAL; + } + + /* initial rx and tx list */ + ret = desc_list_init(dev); + if (ret) + return ret; + + phy_start(lp->phydev); + setup_system_regs(dev); + setup_mac_addr(dev->dev_addr); + + bfin_mac_disable(); + ret = bfin_mac_enable(lp->phydev); + if (ret) + return ret; + pr_debug("hardware init finished\n"); + + napi_enable(&lp->napi); + netif_start_queue(dev); + netif_carrier_on(dev); + + return 0; +} + +/* + * this makes the board clean up everything that it can + * and not talk to the outside world. Caused by + * an 'ifconfig ethX down' + */ +static int bfin_mac_close(struct net_device *dev) +{ + struct bfin_mac_local *lp = netdev_priv(dev); + pr_debug("%s: %s\n", dev->name, __func__); + + netif_stop_queue(dev); + napi_disable(&lp->napi); + netif_carrier_off(dev); + + phy_stop(lp->phydev); + phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN); + + /* clear everything */ + bfin_mac_shutdown(dev); + + /* free the rx/tx buffers */ + desc_list_free(); + + return 0; +} + +static const struct net_device_ops bfin_mac_netdev_ops = { + .ndo_open = bfin_mac_open, + .ndo_stop = bfin_mac_close, + .ndo_start_xmit = bfin_mac_hard_start_xmit, + .ndo_set_mac_address = bfin_mac_set_mac_address, + .ndo_tx_timeout = bfin_mac_timeout, + .ndo_set_rx_mode = bfin_mac_set_multicast_list, + .ndo_do_ioctl = bfin_mac_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bfin_mac_poll_controller, +#endif +}; + +static int bfin_mac_probe(struct platform_device *pdev) +{ + struct net_device *ndev; + struct bfin_mac_local *lp; + struct platform_device *pd; + struct bfin_mii_bus_platform_data *mii_bus_data; + int rc; + + ndev = alloc_etherdev(sizeof(struct bfin_mac_local)); + if (!ndev) + return -ENOMEM; + + SET_NETDEV_DEV(ndev, &pdev->dev); + platform_set_drvdata(pdev, ndev); + lp = netdev_priv(ndev); + lp->ndev = ndev; + + /* Grab the MAC address in the MAC */ + *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO()); + *(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI()); + + /* probe mac */ + /*todo: how to proble? which is revision_register */ + bfin_write_EMAC_ADDRLO(0x12345678); + if (bfin_read_EMAC_ADDRLO() != 0x12345678) { + dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n"); + rc = -ENODEV; + goto out_err_probe_mac; + } + + + /* + * Is it valid? (Did bootloader initialize it?) + * Grab the MAC from the board somehow + * this is done in the arch/blackfin/mach-bfxxx/boards/eth_mac.c + */ + if (!is_valid_ether_addr(ndev->dev_addr)) { + if (bfin_get_ether_addr(ndev->dev_addr) || + !is_valid_ether_addr(ndev->dev_addr)) { + /* Still not valid, get a random one */ + netdev_warn(ndev, "Setting Ethernet MAC to a random one\n"); + eth_hw_addr_random(ndev); + } + } + + setup_mac_addr(ndev->dev_addr); + + if (!dev_get_platdata(&pdev->dev)) { + dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n"); + rc = -ENODEV; + goto out_err_probe_mac; + } + pd = dev_get_platdata(&pdev->dev); + lp->mii_bus = platform_get_drvdata(pd); + if (!lp->mii_bus) { + dev_err(&pdev->dev, "Cannot get mii_bus!\n"); + rc = -ENODEV; + goto out_err_probe_mac; + } + lp->mii_bus->priv = ndev; + mii_bus_data = dev_get_platdata(&pd->dev); + + rc = mii_probe(ndev, mii_bus_data->phy_mode); + if (rc) { + dev_err(&pdev->dev, "MII Probe failed!\n"); + goto out_err_mii_probe; + } + + lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask; + lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask; + + ndev->netdev_ops = &bfin_mac_netdev_ops; + ndev->ethtool_ops = &bfin_mac_ethtool_ops; + + init_timer(&lp->tx_reclaim_timer); + lp->tx_reclaim_timer.data = (unsigned long)lp; + lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout; + + lp->flags = 0; + netif_napi_add(ndev, &lp->napi, bfin_mac_poll, CONFIG_BFIN_RX_DESC_NUM); + + spin_lock_init(&lp->lock); + + /* now, enable interrupts */ + /* register irq handler */ + rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt, + 0, "EMAC_RX", ndev); + if (rc) { + dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n"); + rc = -EBUSY; + goto out_err_request_irq; + } + + rc = register_netdev(ndev); + if (rc) { + dev_err(&pdev->dev, "Cannot register net device!\n"); + goto out_err_reg_ndev; + } + + bfin_mac_hwtstamp_init(ndev); + rc = bfin_phc_init(ndev, &pdev->dev); + if (rc) { + dev_err(&pdev->dev, "Cannot register PHC device!\n"); + goto out_err_phc; + } + + /* now, print out the card info, in a short format.. */ + netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); + + return 0; + +out_err_phc: +out_err_reg_ndev: + free_irq(IRQ_MAC_RX, ndev); +out_err_request_irq: + netif_napi_del(&lp->napi); +out_err_mii_probe: + mdiobus_unregister(lp->mii_bus); + mdiobus_free(lp->mii_bus); +out_err_probe_mac: + free_netdev(ndev); + + return rc; +} + +static int bfin_mac_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct bfin_mac_local *lp = netdev_priv(ndev); + + bfin_phc_release(lp); + + lp->mii_bus->priv = NULL; + + unregister_netdev(ndev); + + netif_napi_del(&lp->napi); + + free_irq(IRQ_MAC_RX, ndev); + + free_netdev(ndev); + + return 0; +} + +#ifdef CONFIG_PM +static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg) +{ + struct net_device *net_dev = platform_get_drvdata(pdev); + struct bfin_mac_local *lp = netdev_priv(net_dev); + + if (lp->wol) { + bfin_write_EMAC_OPMODE((bfin_read_EMAC_OPMODE() & ~TE) | RE); + bfin_write_EMAC_WKUP_CTL(MPKE); + enable_irq_wake(IRQ_MAC_WAKEDET); + } else { + if (netif_running(net_dev)) + bfin_mac_close(net_dev); + } + + return 0; +} + +static int bfin_mac_resume(struct platform_device *pdev) +{ + struct net_device *net_dev = platform_get_drvdata(pdev); + struct bfin_mac_local *lp = netdev_priv(net_dev); + + if (lp->wol) { + bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE); + bfin_write_EMAC_WKUP_CTL(0); + disable_irq_wake(IRQ_MAC_WAKEDET); + } else { + if (netif_running(net_dev)) + bfin_mac_open(net_dev); + } + + return 0; +} +#else +#define bfin_mac_suspend NULL +#define bfin_mac_resume NULL +#endif /* CONFIG_PM */ + +static int bfin_mii_bus_probe(struct platform_device *pdev) +{ + struct mii_bus *miibus; + struct bfin_mii_bus_platform_data *mii_bus_pd; + const unsigned short *pin_req; + int rc, i; + + mii_bus_pd = dev_get_platdata(&pdev->dev); + if (!mii_bus_pd) { + dev_err(&pdev->dev, "No peripherals in platform data!\n"); + return -EINVAL; + } + + /* + * We are setting up a network card, + * so set the GPIO pins to Ethernet mode + */ + pin_req = mii_bus_pd->mac_peripherals; + rc = peripheral_request_list(pin_req, KBUILD_MODNAME); + if (rc) { + dev_err(&pdev->dev, "Requesting peripherals failed!\n"); + return rc; + } + + rc = -ENOMEM; + miibus = mdiobus_alloc(); + if (miibus == NULL) + goto out_err_alloc; + miibus->read = bfin_mdiobus_read; + miibus->write = bfin_mdiobus_write; + + miibus->parent = &pdev->dev; + miibus->name = "bfin_mii_bus"; + miibus->phy_mask = mii_bus_pd->phy_mask; + + snprintf(miibus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); + if (!miibus->irq) + goto out_err_irq_alloc; + + for (i = rc; i < PHY_MAX_ADDR; ++i) + miibus->irq[i] = PHY_POLL; + + rc = clamp(mii_bus_pd->phydev_number, 0, PHY_MAX_ADDR); + if (rc != mii_bus_pd->phydev_number) + dev_err(&pdev->dev, "Invalid number (%i) of phydevs\n", + mii_bus_pd->phydev_number); + for (i = 0; i < rc; ++i) { + unsigned short phyaddr = mii_bus_pd->phydev_data[i].addr; + if (phyaddr < PHY_MAX_ADDR) + miibus->irq[phyaddr] = mii_bus_pd->phydev_data[i].irq; + else + dev_err(&pdev->dev, + "Invalid PHY address %i for phydev %i\n", + phyaddr, i); + } + + rc = mdiobus_register(miibus); + if (rc) { + dev_err(&pdev->dev, "Cannot register MDIO bus!\n"); + goto out_err_mdiobus_register; + } + + platform_set_drvdata(pdev, miibus); + return 0; + +out_err_mdiobus_register: + kfree(miibus->irq); +out_err_irq_alloc: + mdiobus_free(miibus); +out_err_alloc: + peripheral_free_list(pin_req); + + return rc; +} + +static int bfin_mii_bus_remove(struct platform_device *pdev) +{ + struct mii_bus *miibus = platform_get_drvdata(pdev); + struct bfin_mii_bus_platform_data *mii_bus_pd = + dev_get_platdata(&pdev->dev); + + mdiobus_unregister(miibus); + kfree(miibus->irq); + mdiobus_free(miibus); + peripheral_free_list(mii_bus_pd->mac_peripherals); + + return 0; +} + +static struct platform_driver bfin_mii_bus_driver = { + .probe = bfin_mii_bus_probe, + .remove = bfin_mii_bus_remove, + .driver = { + .name = "bfin_mii_bus", + }, +}; + +static struct platform_driver bfin_mac_driver = { + .probe = bfin_mac_probe, + .remove = bfin_mac_remove, + .resume = bfin_mac_resume, + .suspend = bfin_mac_suspend, + .driver = { + .name = KBUILD_MODNAME, + }, +}; + +static int __init bfin_mac_init(void) +{ + int ret; + ret = platform_driver_register(&bfin_mii_bus_driver); + if (!ret) + return platform_driver_register(&bfin_mac_driver); + return -ENODEV; +} + +module_init(bfin_mac_init); + +static void __exit bfin_mac_cleanup(void) +{ + platform_driver_unregister(&bfin_mac_driver); + platform_driver_unregister(&bfin_mii_bus_driver); +} + +module_exit(bfin_mac_cleanup); + diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h new file mode 100644 index 000000000..d1217db70 --- /dev/null +++ b/drivers/net/ethernet/adi/bfin_mac.h @@ -0,0 +1,112 @@ +/* + * Blackfin On-Chip MAC Driver + * + * Copyright 2004-2007 Analog Devices Inc. + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Licensed under the GPL-2 or later. + */ +#ifndef _BFIN_MAC_H_ +#define _BFIN_MAC_H_ + +#include +#include +#include +#include +#include + +/* + * Disable hardware checksum for bug #5600 if writeback cache is + * enabled. Otherwize, corrupted RX packet will be sent up stack + * without error mark. + */ +#ifndef CONFIG_BFIN_EXTMEM_WRITEBACK +#define BFIN_MAC_CSUM_OFFLOAD +#endif + +#define TX_RECLAIM_JIFFIES (HZ / 5) +#define BFIN_MAC_RX_IRQ_DISABLED 1 + +struct dma_descriptor { + struct dma_descriptor *next_dma_desc; + unsigned long start_addr; + unsigned short config; + unsigned short x_count; +}; + +struct status_area_rx { +#if defined(BFIN_MAC_CSUM_OFFLOAD) + unsigned short ip_hdr_csum; /* ip header checksum */ + /* ip payload(udp or tcp or others) checksum */ + unsigned short ip_payload_csum; +#endif + unsigned long status_word; /* the frame status word */ +}; + +struct status_area_tx { + unsigned long status_word; /* the frame status word */ +}; + +/* use two descriptors for a packet */ +struct net_dma_desc_rx { + struct net_dma_desc_rx *next; + struct sk_buff *skb; + struct dma_descriptor desc_a; + struct dma_descriptor desc_b; + struct status_area_rx status; +}; + +/* use two descriptors for a packet */ +struct net_dma_desc_tx { + struct net_dma_desc_tx *next; + struct sk_buff *skb; + struct dma_descriptor desc_a; + struct dma_descriptor desc_b; + unsigned char packet[1560]; + struct status_area_tx status; +}; + +struct bfin_mac_local { + /* + * these are things that the kernel wants me to keep, so users + * can find out semi-useless statistics of how well the card is + * performing + */ + struct net_device_stats stats; + + spinlock_t lock; + + int wol; /* Wake On Lan */ + int irq_wake_requested; + struct timer_list tx_reclaim_timer; + struct net_device *ndev; + struct napi_struct napi; + unsigned long flags; + + /* Data for EMAC_VLAN1 regs */ + u16 vlan1_mask, vlan2_mask; + + /* MII and PHY stuffs */ + int old_link; /* used by bf537_adjust_link */ + int old_speed; + int old_duplex; + + struct phy_device *phydev; + struct mii_bus *mii_bus; + +#if defined(CONFIG_BFIN_MAC_USE_HWSTAMP) + u32 addend; + unsigned int shift; + s32 max_ppb; + struct hwtstamp_config stamp_cfg; + struct ptp_clock_info caps; + struct ptp_clock *clock; + int phc_index; + spinlock_t phc_lock; /* protects time lo/hi registers */ +#endif +}; + +int bfin_get_ether_addr(char *addr); + +#endif diff --git a/drivers/net/ethernet/aeroflex/Kconfig b/drivers/net/ethernet/aeroflex/Kconfig new file mode 100644 index 000000000..4f4a8d78f --- /dev/null +++ b/drivers/net/ethernet/aeroflex/Kconfig @@ -0,0 +1,11 @@ +# +# Aeroflex Gaisler network device configuration +# + +config GRETH + tristate "Aeroflex Gaisler GRETH Ethernet MAC support" + depends on SPARC + select PHYLIB + select CRC32 + ---help--- + Say Y here if you want to use the Aeroflex Gaisler GRETH Ethernet MAC. diff --git a/drivers/net/ethernet/aeroflex/Makefile b/drivers/net/ethernet/aeroflex/Makefile new file mode 100644 index 000000000..6e62a6792 --- /dev/null +++ b/drivers/net/ethernet/aeroflex/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Aeroflex Gaisler network device drivers. +# + +obj-$(CONFIG_GRETH) += greth.o diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c new file mode 100644 index 000000000..ae89de7de --- /dev/null +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -0,0 +1,1616 @@ +/* + * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC. + * + * 2005-2010 (c) Aeroflex Gaisler AB + * + * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs + * available in the GRLIB VHDL IP core library. + * + * Full documentation of both cores can be found here: + * http://www.gaisler.com/products/grlib/grip.pdf + * + * The Gigabit version supports scatter/gather DMA, any alignment of + * buffers and checksum offloading. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Contributors: Kristoffer Glembo + * Daniel Hellstrom + * Marko Isomaki + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SPARC +#include +#endif + +#include "greth.h" + +#define GRETH_DEF_MSG_ENABLE \ + (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + +static int greth_debug = -1; /* -1 == use GRETH_DEF_MSG_ENABLE as value */ +module_param(greth_debug, int, 0); +MODULE_PARM_DESC(greth_debug, "GRETH bitmapped debugging message enable value"); + +/* Accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */ +static int macaddr[6]; +module_param_array(macaddr, int, NULL, 0); +MODULE_PARM_DESC(macaddr, "GRETH Ethernet MAC address"); + +static int greth_edcl = 1; +module_param(greth_edcl, int, 0); +MODULE_PARM_DESC(greth_edcl, "GRETH EDCL usage indicator. Set to 1 if EDCL is used."); + +static int greth_open(struct net_device *dev); +static netdev_tx_t greth_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb, + struct net_device *dev); +static int greth_rx(struct net_device *dev, int limit); +static int greth_rx_gbit(struct net_device *dev, int limit); +static void greth_clean_tx(struct net_device *dev); +static void greth_clean_tx_gbit(struct net_device *dev); +static irqreturn_t greth_interrupt(int irq, void *dev_id); +static int greth_close(struct net_device *dev); +static int greth_set_mac_add(struct net_device *dev, void *p); +static void greth_set_multicast_list(struct net_device *dev); + +#define GRETH_REGLOAD(a) (be32_to_cpu(__raw_readl(&(a)))) +#define GRETH_REGSAVE(a, v) (__raw_writel(cpu_to_be32(v), &(a))) +#define GRETH_REGORIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) | (v)))) +#define GRETH_REGANDIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) & (v)))) + +#define NEXT_TX(N) (((N) + 1) & GRETH_TXBD_NUM_MASK) +#define SKIP_TX(N, C) (((N) + C) & GRETH_TXBD_NUM_MASK) +#define NEXT_RX(N) (((N) + 1) & GRETH_RXBD_NUM_MASK) + +static void greth_print_rx_packet(void *addr, int len) +{ + print_hex_dump(KERN_DEBUG, "RX: ", DUMP_PREFIX_OFFSET, 16, 1, + addr, len, true); +} + +static void greth_print_tx_packet(struct sk_buff *skb) +{ + int i; + int length; + + if (skb_shinfo(skb)->nr_frags == 0) + length = skb->len; + else + length = skb_headlen(skb); + + print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, length, true); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + + print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1, + skb_frag_address(&skb_shinfo(skb)->frags[i]), + skb_shinfo(skb)->frags[i].size, true); + } +} + +static inline void greth_enable_tx(struct greth_private *greth) +{ + wmb(); + GRETH_REGORIN(greth->regs->control, GRETH_TXEN); +} + +static inline void greth_enable_tx_and_irq(struct greth_private *greth) +{ + wmb(); /* BDs must been written to memory before enabling TX */ + GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI); +} + +static inline void greth_disable_tx(struct greth_private *greth) +{ + GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN); +} + +static inline void greth_enable_rx(struct greth_private *greth) +{ + wmb(); + GRETH_REGORIN(greth->regs->control, GRETH_RXEN); +} + +static inline void greth_disable_rx(struct greth_private *greth) +{ + GRETH_REGANDIN(greth->regs->control, ~GRETH_RXEN); +} + +static inline void greth_enable_irqs(struct greth_private *greth) +{ + GRETH_REGORIN(greth->regs->control, GRETH_RXI | GRETH_TXI); +} + +static inline void greth_disable_irqs(struct greth_private *greth) +{ + GRETH_REGANDIN(greth->regs->control, ~(GRETH_RXI|GRETH_TXI)); +} + +static inline void greth_write_bd(u32 *bd, u32 val) +{ + __raw_writel(cpu_to_be32(val), bd); +} + +static inline u32 greth_read_bd(u32 *bd) +{ + return be32_to_cpu(__raw_readl(bd)); +} + +static void greth_clean_rings(struct greth_private *greth) +{ + int i; + struct greth_bd *rx_bdp = greth->rx_bd_base; + struct greth_bd *tx_bdp = greth->tx_bd_base; + + if (greth->gbit_mac) { + + /* Free and unmap RX buffers */ + for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) { + if (greth->rx_skbuff[i] != NULL) { + dev_kfree_skb(greth->rx_skbuff[i]); + dma_unmap_single(greth->dev, + greth_read_bd(&rx_bdp->addr), + MAX_FRAME_SIZE+NET_IP_ALIGN, + DMA_FROM_DEVICE); + } + } + + /* TX buffers */ + while (greth->tx_free < GRETH_TXBD_NUM) { + + struct sk_buff *skb = greth->tx_skbuff[greth->tx_last]; + int nr_frags = skb_shinfo(skb)->nr_frags; + tx_bdp = greth->tx_bd_base + greth->tx_last; + greth->tx_last = NEXT_TX(greth->tx_last); + + dma_unmap_single(greth->dev, + greth_read_bd(&tx_bdp->addr), + skb_headlen(skb), + DMA_TO_DEVICE); + + for (i = 0; i < nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + tx_bdp = greth->tx_bd_base + greth->tx_last; + + dma_unmap_page(greth->dev, + greth_read_bd(&tx_bdp->addr), + skb_frag_size(frag), + DMA_TO_DEVICE); + + greth->tx_last = NEXT_TX(greth->tx_last); + } + greth->tx_free += nr_frags+1; + dev_kfree_skb(skb); + } + + + } else { /* 10/100 Mbps MAC */ + + for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) { + kfree(greth->rx_bufs[i]); + dma_unmap_single(greth->dev, + greth_read_bd(&rx_bdp->addr), + MAX_FRAME_SIZE, + DMA_FROM_DEVICE); + } + for (i = 0; i < GRETH_TXBD_NUM; i++, tx_bdp++) { + kfree(greth->tx_bufs[i]); + dma_unmap_single(greth->dev, + greth_read_bd(&tx_bdp->addr), + MAX_FRAME_SIZE, + DMA_TO_DEVICE); + } + } +} + +static int greth_init_rings(struct greth_private *greth) +{ + struct sk_buff *skb; + struct greth_bd *rx_bd, *tx_bd; + u32 dma_addr; + int i; + + rx_bd = greth->rx_bd_base; + tx_bd = greth->tx_bd_base; + + /* Initialize descriptor rings and buffers */ + if (greth->gbit_mac) { + + for (i = 0; i < GRETH_RXBD_NUM; i++) { + skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN); + if (skb == NULL) { + if (netif_msg_ifup(greth)) + dev_err(greth->dev, "Error allocating DMA ring.\n"); + goto cleanup; + } + skb_reserve(skb, NET_IP_ALIGN); + dma_addr = dma_map_single(greth->dev, + skb->data, + MAX_FRAME_SIZE+NET_IP_ALIGN, + DMA_FROM_DEVICE); + + if (dma_mapping_error(greth->dev, dma_addr)) { + if (netif_msg_ifup(greth)) + dev_err(greth->dev, "Could not create initial DMA mapping\n"); + goto cleanup; + } + greth->rx_skbuff[i] = skb; + greth_write_bd(&rx_bd[i].addr, dma_addr); + greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE); + } + + } else { + + /* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */ + for (i = 0; i < GRETH_RXBD_NUM; i++) { + + greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL); + + if (greth->rx_bufs[i] == NULL) { + if (netif_msg_ifup(greth)) + dev_err(greth->dev, "Error allocating DMA ring.\n"); + goto cleanup; + } + + dma_addr = dma_map_single(greth->dev, + greth->rx_bufs[i], + MAX_FRAME_SIZE, + DMA_FROM_DEVICE); + + if (dma_mapping_error(greth->dev, dma_addr)) { + if (netif_msg_ifup(greth)) + dev_err(greth->dev, "Could not create initial DMA mapping\n"); + goto cleanup; + } + greth_write_bd(&rx_bd[i].addr, dma_addr); + greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE); + } + for (i = 0; i < GRETH_TXBD_NUM; i++) { + + greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL); + + if (greth->tx_bufs[i] == NULL) { + if (netif_msg_ifup(greth)) + dev_err(greth->dev, "Error allocating DMA ring.\n"); + goto cleanup; + } + + dma_addr = dma_map_single(greth->dev, + greth->tx_bufs[i], + MAX_FRAME_SIZE, + DMA_TO_DEVICE); + + if (dma_mapping_error(greth->dev, dma_addr)) { + if (netif_msg_ifup(greth)) + dev_err(greth->dev, "Could not create initial DMA mapping\n"); + goto cleanup; + } + greth_write_bd(&tx_bd[i].addr, dma_addr); + greth_write_bd(&tx_bd[i].stat, 0); + } + } + greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat, + greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR); + + /* Initialize pointers. */ + greth->rx_cur = 0; + greth->tx_next = 0; + greth->tx_last = 0; + greth->tx_free = GRETH_TXBD_NUM; + + /* Initialize descriptor base address */ + GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys); + GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys); + + return 0; + +cleanup: + greth_clean_rings(greth); + return -ENOMEM; +} + +static int greth_open(struct net_device *dev) +{ + struct greth_private *greth = netdev_priv(dev); + int err; + + err = greth_init_rings(greth); + if (err) { + if (netif_msg_ifup(greth)) + dev_err(&dev->dev, "Could not allocate memory for DMA rings\n"); + return err; + } + + err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev); + if (err) { + if (netif_msg_ifup(greth)) + dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq); + greth_clean_rings(greth); + return err; + } + + if (netif_msg_ifup(greth)) + dev_dbg(&dev->dev, " starting queue\n"); + netif_start_queue(dev); + + GRETH_REGSAVE(greth->regs->status, 0xFF); + + napi_enable(&greth->napi); + + greth_enable_irqs(greth); + greth_enable_tx(greth); + greth_enable_rx(greth); + return 0; + +} + +static int greth_close(struct net_device *dev) +{ + struct greth_private *greth = netdev_priv(dev); + + napi_disable(&greth->napi); + + greth_disable_irqs(greth); + greth_disable_tx(greth); + greth_disable_rx(greth); + + netif_stop_queue(dev); + + free_irq(greth->irq, (void *) dev); + + greth_clean_rings(greth); + + return 0; +} + +static netdev_tx_t +greth_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct greth_private *greth = netdev_priv(dev); + struct greth_bd *bdp; + int err = NETDEV_TX_OK; + u32 status, dma_addr, ctrl; + unsigned long flags; + + /* Clean TX Ring */ + greth_clean_tx(greth->netdev); + + if (unlikely(greth->tx_free <= 0)) { + spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ + ctrl = GRETH_REGLOAD(greth->regs->control); + /* Enable TX IRQ only if not already in poll() routine */ + if (ctrl & GRETH_RXI) + GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); + netif_stop_queue(dev); + spin_unlock_irqrestore(&greth->devlock, flags); + return NETDEV_TX_BUSY; + } + + if (netif_msg_pktdata(greth)) + greth_print_tx_packet(skb); + + + if (unlikely(skb->len > MAX_FRAME_SIZE)) { + dev->stats.tx_errors++; + goto out; + } + + bdp = greth->tx_bd_base + greth->tx_next; + dma_addr = greth_read_bd(&bdp->addr); + + memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len); + + dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); + + status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); + greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN; + + /* Wrap around descriptor ring */ + if (greth->tx_next == GRETH_TXBD_NUM_MASK) { + status |= GRETH_BD_WR; + } + + greth->tx_next = NEXT_TX(greth->tx_next); + greth->tx_free--; + + /* Write descriptor control word and enable transmission */ + greth_write_bd(&bdp->stat, status); + spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ + greth_enable_tx(greth); + spin_unlock_irqrestore(&greth->devlock, flags); + +out: + dev_kfree_skb(skb); + return err; +} + +static inline u16 greth_num_free_bds(u16 tx_last, u16 tx_next) +{ + if (tx_next < tx_last) + return (tx_last - tx_next) - 1; + else + return GRETH_TXBD_NUM - (tx_next - tx_last) - 1; +} + +static netdev_tx_t +greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) +{ + struct greth_private *greth = netdev_priv(dev); + struct greth_bd *bdp; + u32 status, dma_addr; + int curr_tx, nr_frags, i, err = NETDEV_TX_OK; + unsigned long flags; + u16 tx_last; + + nr_frags = skb_shinfo(skb)->nr_frags; + tx_last = greth->tx_last; + rmb(); /* tx_last is updated by the poll task */ + + if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) { + netif_stop_queue(dev); + err = NETDEV_TX_BUSY; + goto out; + } + + if (netif_msg_pktdata(greth)) + greth_print_tx_packet(skb); + + if (unlikely(skb->len > MAX_FRAME_SIZE)) { + dev->stats.tx_errors++; + goto out; + } + + /* Save skb pointer. */ + greth->tx_skbuff[greth->tx_next] = skb; + + /* Linear buf */ + if (nr_frags != 0) + status = GRETH_TXBD_MORE; + else + status = GRETH_BD_IE; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + status |= GRETH_TXBD_CSALL; + status |= skb_headlen(skb) & GRETH_BD_LEN; + if (greth->tx_next == GRETH_TXBD_NUM_MASK) + status |= GRETH_BD_WR; + + + bdp = greth->tx_bd_base + greth->tx_next; + greth_write_bd(&bdp->stat, status); + dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(greth->dev, dma_addr))) + goto map_error; + + greth_write_bd(&bdp->addr, dma_addr); + + curr_tx = NEXT_TX(greth->tx_next); + + /* Frags */ + for (i = 0; i < nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + greth->tx_skbuff[curr_tx] = NULL; + bdp = greth->tx_bd_base + curr_tx; + + status = GRETH_BD_EN; + if (skb->ip_summed == CHECKSUM_PARTIAL) + status |= GRETH_TXBD_CSALL; + status |= skb_frag_size(frag) & GRETH_BD_LEN; + + /* Wrap around descriptor ring */ + if (curr_tx == GRETH_TXBD_NUM_MASK) + status |= GRETH_BD_WR; + + /* More fragments left */ + if (i < nr_frags - 1) + status |= GRETH_TXBD_MORE; + else + status |= GRETH_BD_IE; /* enable IRQ on last fragment */ + + greth_write_bd(&bdp->stat, status); + + dma_addr = skb_frag_dma_map(greth->dev, frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(greth->dev, dma_addr))) + goto frag_map_error; + + greth_write_bd(&bdp->addr, dma_addr); + + curr_tx = NEXT_TX(curr_tx); + } + + wmb(); + + /* Enable the descriptor chain by enabling the first descriptor */ + bdp = greth->tx_bd_base + greth->tx_next; + greth_write_bd(&bdp->stat, + greth_read_bd(&bdp->stat) | GRETH_BD_EN); + + spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ + greth->tx_next = curr_tx; + greth_enable_tx_and_irq(greth); + spin_unlock_irqrestore(&greth->devlock, flags); + + return NETDEV_TX_OK; + +frag_map_error: + /* Unmap SKB mappings that succeeded and disable descriptor */ + for (i = 0; greth->tx_next + i != curr_tx; i++) { + bdp = greth->tx_bd_base + greth->tx_next + i; + dma_unmap_single(greth->dev, + greth_read_bd(&bdp->addr), + greth_read_bd(&bdp->stat) & GRETH_BD_LEN, + DMA_TO_DEVICE); + greth_write_bd(&bdp->stat, 0); + } +map_error: + if (net_ratelimit()) + dev_warn(greth->dev, "Could not create TX DMA mapping\n"); + dev_kfree_skb(skb); +out: + return err; +} + +static irqreturn_t greth_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct greth_private *greth; + u32 status, ctrl; + irqreturn_t retval = IRQ_NONE; + + greth = netdev_priv(dev); + + spin_lock(&greth->devlock); + + /* Get the interrupt events that caused us to be here. */ + status = GRETH_REGLOAD(greth->regs->status); + + /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be + * set regardless of whether IRQ is enabled or not. Especially + * important when shared IRQ. + */ + ctrl = GRETH_REGLOAD(greth->regs->control); + + /* Handle rx and tx interrupts through poll */ + if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) || + ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) { + retval = IRQ_HANDLED; + + /* Disable interrupts and schedule poll() */ + greth_disable_irqs(greth); + napi_schedule(&greth->napi); + } + + mmiowb(); + spin_unlock(&greth->devlock); + + return retval; +} + +static void greth_clean_tx(struct net_device *dev) +{ + struct greth_private *greth; + struct greth_bd *bdp; + u32 stat; + + greth = netdev_priv(dev); + + while (1) { + bdp = greth->tx_bd_base + greth->tx_last; + GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); + mb(); + stat = greth_read_bd(&bdp->stat); + + if (unlikely(stat & GRETH_BD_EN)) + break; + + if (greth->tx_free == GRETH_TXBD_NUM) + break; + + /* Check status for errors */ + if (unlikely(stat & GRETH_TXBD_STATUS)) { + dev->stats.tx_errors++; + if (stat & GRETH_TXBD_ERR_AL) + dev->stats.tx_aborted_errors++; + if (stat & GRETH_TXBD_ERR_UE) + dev->stats.tx_fifo_errors++; + } + dev->stats.tx_packets++; + dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last]; + greth->tx_last = NEXT_TX(greth->tx_last); + greth->tx_free++; + } + + if (greth->tx_free > 0) { + netif_wake_queue(dev); + } +} + +static inline void greth_update_tx_stats(struct net_device *dev, u32 stat) +{ + /* Check status for errors */ + if (unlikely(stat & GRETH_TXBD_STATUS)) { + dev->stats.tx_errors++; + if (stat & GRETH_TXBD_ERR_AL) + dev->stats.tx_aborted_errors++; + if (stat & GRETH_TXBD_ERR_UE) + dev->stats.tx_fifo_errors++; + if (stat & GRETH_TXBD_ERR_LC) + dev->stats.tx_aborted_errors++; + } + dev->stats.tx_packets++; +} + +static void greth_clean_tx_gbit(struct net_device *dev) +{ + struct greth_private *greth; + struct greth_bd *bdp, *bdp_last_frag; + struct sk_buff *skb = NULL; + u32 stat; + int nr_frags, i; + u16 tx_last; + + greth = netdev_priv(dev); + tx_last = greth->tx_last; + + while (tx_last != greth->tx_next) { + + skb = greth->tx_skbuff[tx_last]; + + nr_frags = skb_shinfo(skb)->nr_frags; + + /* We only clean fully completed SKBs */ + bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags); + + GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); + mb(); + stat = greth_read_bd(&bdp_last_frag->stat); + + if (stat & GRETH_BD_EN) + break; + + greth->tx_skbuff[tx_last] = NULL; + + greth_update_tx_stats(dev, stat); + dev->stats.tx_bytes += skb->len; + + bdp = greth->tx_bd_base + tx_last; + + tx_last = NEXT_TX(tx_last); + + dma_unmap_single(greth->dev, + greth_read_bd(&bdp->addr), + skb_headlen(skb), + DMA_TO_DEVICE); + + for (i = 0; i < nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + bdp = greth->tx_bd_base + tx_last; + + dma_unmap_page(greth->dev, + greth_read_bd(&bdp->addr), + skb_frag_size(frag), + DMA_TO_DEVICE); + + tx_last = NEXT_TX(tx_last); + } + dev_kfree_skb(skb); + } + if (skb) { /* skb is set only if the above while loop was entered */ + wmb(); + greth->tx_last = tx_last; + + if (netif_queue_stopped(dev) && + (greth_num_free_bds(tx_last, greth->tx_next) > + (MAX_SKB_FRAGS+1))) + netif_wake_queue(dev); + } +} + +static int greth_rx(struct net_device *dev, int limit) +{ + struct greth_private *greth; + struct greth_bd *bdp; + struct sk_buff *skb; + int pkt_len; + int bad, count; + u32 status, dma_addr; + unsigned long flags; + + greth = netdev_priv(dev); + + for (count = 0; count < limit; ++count) { + + bdp = greth->rx_bd_base + greth->rx_cur; + GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); + mb(); + status = greth_read_bd(&bdp->stat); + + if (unlikely(status & GRETH_BD_EN)) { + break; + } + + dma_addr = greth_read_bd(&bdp->addr); + bad = 0; + + /* Check status for errors. */ + if (unlikely(status & GRETH_RXBD_STATUS)) { + if (status & GRETH_RXBD_ERR_FT) { + dev->stats.rx_length_errors++; + bad = 1; + } + if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) { + dev->stats.rx_frame_errors++; + bad = 1; + } + if (status & GRETH_RXBD_ERR_CRC) { + dev->stats.rx_crc_errors++; + bad = 1; + } + } + if (unlikely(bad)) { + dev->stats.rx_errors++; + + } else { + + pkt_len = status & GRETH_BD_LEN; + + skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN); + + if (unlikely(skb == NULL)) { + + if (net_ratelimit()) + dev_warn(&dev->dev, "low on memory - " "packet dropped\n"); + + dev->stats.rx_dropped++; + + } else { + skb_reserve(skb, NET_IP_ALIGN); + + dma_sync_single_for_cpu(greth->dev, + dma_addr, + pkt_len, + DMA_FROM_DEVICE); + + if (netif_msg_pktdata(greth)) + greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len); + + memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len); + + skb->protocol = eth_type_trans(skb, dev); + dev->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + netif_receive_skb(skb); + } + } + + status = GRETH_BD_EN | GRETH_BD_IE; + if (greth->rx_cur == GRETH_RXBD_NUM_MASK) { + status |= GRETH_BD_WR; + } + + wmb(); + greth_write_bd(&bdp->stat, status); + + dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE); + + spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */ + greth_enable_rx(greth); + spin_unlock_irqrestore(&greth->devlock, flags); + + greth->rx_cur = NEXT_RX(greth->rx_cur); + } + + return count; +} + +static inline int hw_checksummed(u32 status) +{ + + if (status & GRETH_RXBD_IP_FRAG) + return 0; + + if (status & GRETH_RXBD_IP && status & GRETH_RXBD_IP_CSERR) + return 0; + + if (status & GRETH_RXBD_UDP && status & GRETH_RXBD_UDP_CSERR) + return 0; + + if (status & GRETH_RXBD_TCP && status & GRETH_RXBD_TCP_CSERR) + return 0; + + return 1; +} + +static int greth_rx_gbit(struct net_device *dev, int limit) +{ + struct greth_private *greth; + struct greth_bd *bdp; + struct sk_buff *skb, *newskb; + int pkt_len; + int bad, count = 0; + u32 status, dma_addr; + unsigned long flags; + + greth = netdev_priv(dev); + + for (count = 0; count < limit; ++count) { + + bdp = greth->rx_bd_base + greth->rx_cur; + skb = greth->rx_skbuff[greth->rx_cur]; + GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); + mb(); + status = greth_read_bd(&bdp->stat); + bad = 0; + + if (status & GRETH_BD_EN) + break; + + /* Check status for errors. */ + if (unlikely(status & GRETH_RXBD_STATUS)) { + + if (status & GRETH_RXBD_ERR_FT) { + dev->stats.rx_length_errors++; + bad = 1; + } else if (status & + (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) { + dev->stats.rx_frame_errors++; + bad = 1; + } else if (status & GRETH_RXBD_ERR_CRC) { + dev->stats.rx_crc_errors++; + bad = 1; + } + } + + /* Allocate new skb to replace current, not needed if the + * current skb can be reused */ + if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) { + skb_reserve(newskb, NET_IP_ALIGN); + + dma_addr = dma_map_single(greth->dev, + newskb->data, + MAX_FRAME_SIZE + NET_IP_ALIGN, + DMA_FROM_DEVICE); + + if (!dma_mapping_error(greth->dev, dma_addr)) { + /* Process the incoming frame. */ + pkt_len = status & GRETH_BD_LEN; + + dma_unmap_single(greth->dev, + greth_read_bd(&bdp->addr), + MAX_FRAME_SIZE + NET_IP_ALIGN, + DMA_FROM_DEVICE); + + if (netif_msg_pktdata(greth)) + greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len); + + skb_put(skb, pkt_len); + + if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + + skb->protocol = eth_type_trans(skb, dev); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + netif_receive_skb(skb); + + greth->rx_skbuff[greth->rx_cur] = newskb; + greth_write_bd(&bdp->addr, dma_addr); + } else { + if (net_ratelimit()) + dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n"); + dev_kfree_skb(newskb); + /* reusing current skb, so it is a drop */ + dev->stats.rx_dropped++; + } + } else if (bad) { + /* Bad Frame transfer, the skb is reused */ + dev->stats.rx_dropped++; + } else { + /* Failed Allocating a new skb. This is rather stupid + * but the current "filled" skb is reused, as if + * transfer failure. One could argue that RX descriptor + * table handling should be divided into cleaning and + * filling as the TX part of the driver + */ + if (net_ratelimit()) + dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n"); + /* reusing current skb, so it is a drop */ + dev->stats.rx_dropped++; + } + + status = GRETH_BD_EN | GRETH_BD_IE; + if (greth->rx_cur == GRETH_RXBD_NUM_MASK) { + status |= GRETH_BD_WR; + } + + wmb(); + greth_write_bd(&bdp->stat, status); + spin_lock_irqsave(&greth->devlock, flags); + greth_enable_rx(greth); + spin_unlock_irqrestore(&greth->devlock, flags); + greth->rx_cur = NEXT_RX(greth->rx_cur); + } + + return count; + +} + +static int greth_poll(struct napi_struct *napi, int budget) +{ + struct greth_private *greth; + int work_done = 0; + unsigned long flags; + u32 mask, ctrl; + greth = container_of(napi, struct greth_private, napi); + +restart_txrx_poll: + if (greth->gbit_mac) { + greth_clean_tx_gbit(greth->netdev); + work_done += greth_rx_gbit(greth->netdev, budget - work_done); + } else { + if (netif_queue_stopped(greth->netdev)) + greth_clean_tx(greth->netdev); + work_done += greth_rx(greth->netdev, budget - work_done); + } + + if (work_done < budget) { + + spin_lock_irqsave(&greth->devlock, flags); + + ctrl = GRETH_REGLOAD(greth->regs->control); + if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) || + (!greth->gbit_mac && netif_queue_stopped(greth->netdev))) { + GRETH_REGSAVE(greth->regs->control, + ctrl | GRETH_TXI | GRETH_RXI); + mask = GRETH_INT_RX | GRETH_INT_RE | + GRETH_INT_TX | GRETH_INT_TE; + } else { + GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI); + mask = GRETH_INT_RX | GRETH_INT_RE; + } + + if (GRETH_REGLOAD(greth->regs->status) & mask) { + GRETH_REGSAVE(greth->regs->control, ctrl); + spin_unlock_irqrestore(&greth->devlock, flags); + goto restart_txrx_poll; + } else { + __napi_complete(napi); + spin_unlock_irqrestore(&greth->devlock, flags); + } + } + + return work_done; +} + +static int greth_set_mac_add(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct greth_private *greth; + struct greth_regs *regs; + + greth = netdev_priv(dev); + regs = greth->regs; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]); + GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 | + dev->dev_addr[4] << 8 | dev->dev_addr[5]); + + return 0; +} + +static u32 greth_hash_get_index(__u8 *addr) +{ + return (ether_crc(6, addr)) & 0x3F; +} + +static void greth_set_hash_filter(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + struct greth_private *greth = netdev_priv(dev); + struct greth_regs *regs = greth->regs; + u32 mc_filter[2]; + unsigned int bitnr; + + mc_filter[0] = mc_filter[1] = 0; + + netdev_for_each_mc_addr(ha, dev) { + bitnr = greth_hash_get_index(ha->addr); + mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); + } + + GRETH_REGSAVE(regs->hash_msb, mc_filter[1]); + GRETH_REGSAVE(regs->hash_lsb, mc_filter[0]); +} + +static void greth_set_multicast_list(struct net_device *dev) +{ + int cfg; + struct greth_private *greth = netdev_priv(dev); + struct greth_regs *regs = greth->regs; + + cfg = GRETH_REGLOAD(regs->control); + if (dev->flags & IFF_PROMISC) + cfg |= GRETH_CTRL_PR; + else + cfg &= ~GRETH_CTRL_PR; + + if (greth->multicast) { + if (dev->flags & IFF_ALLMULTI) { + GRETH_REGSAVE(regs->hash_msb, -1); + GRETH_REGSAVE(regs->hash_lsb, -1); + cfg |= GRETH_CTRL_MCEN; + GRETH_REGSAVE(regs->control, cfg); + return; + } + + if (netdev_mc_empty(dev)) { + cfg &= ~GRETH_CTRL_MCEN; + GRETH_REGSAVE(regs->control, cfg); + return; + } + + /* Setup multicast filter */ + greth_set_hash_filter(dev); + cfg |= GRETH_CTRL_MCEN; + } + GRETH_REGSAVE(regs->control, cfg); +} + +static u32 greth_get_msglevel(struct net_device *dev) +{ + struct greth_private *greth = netdev_priv(dev); + return greth->msg_enable; +} + +static void greth_set_msglevel(struct net_device *dev, u32 value) +{ + struct greth_private *greth = netdev_priv(dev); + greth->msg_enable = value; +} +static int greth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct greth_private *greth = netdev_priv(dev); + struct phy_device *phy = greth->phy; + + if (!phy) + return -ENODEV; + + return phy_ethtool_gset(phy, cmd); +} + +static int greth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct greth_private *greth = netdev_priv(dev); + struct phy_device *phy = greth->phy; + + if (!phy) + return -ENODEV; + + return phy_ethtool_sset(phy, cmd); +} + +static int greth_get_regs_len(struct net_device *dev) +{ + return sizeof(struct greth_regs); +} + +static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct greth_private *greth = netdev_priv(dev); + + strlcpy(info->driver, dev_driver_string(greth->dev), + sizeof(info->driver)); + strlcpy(info->version, "revision: 1.0", sizeof(info->version)); + strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info)); + strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); + info->eedump_len = 0; + info->regdump_len = sizeof(struct greth_regs); +} + +static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) +{ + int i; + struct greth_private *greth = netdev_priv(dev); + u32 __iomem *greth_regs = (u32 __iomem *) greth->regs; + u32 *buff = p; + + for (i = 0; i < sizeof(struct greth_regs) / sizeof(u32); i++) + buff[i] = greth_read_bd(&greth_regs[i]); +} + +static const struct ethtool_ops greth_ethtool_ops = { + .get_msglevel = greth_get_msglevel, + .set_msglevel = greth_set_msglevel, + .get_settings = greth_get_settings, + .set_settings = greth_set_settings, + .get_drvinfo = greth_get_drvinfo, + .get_regs_len = greth_get_regs_len, + .get_regs = greth_get_regs, + .get_link = ethtool_op_get_link, +}; + +static struct net_device_ops greth_netdev_ops = { + .ndo_open = greth_open, + .ndo_stop = greth_close, + .ndo_start_xmit = greth_start_xmit, + .ndo_set_mac_address = greth_set_mac_add, + .ndo_validate_addr = eth_validate_addr, +}; + +static inline int wait_for_mdio(struct greth_private *greth) +{ + unsigned long timeout = jiffies + 4*HZ/100; + while (GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_BUSY) { + if (time_after(jiffies, timeout)) + return 0; + } + return 1; +} + +static int greth_mdio_read(struct mii_bus *bus, int phy, int reg) +{ + struct greth_private *greth = bus->priv; + int data; + + if (!wait_for_mdio(greth)) + return -EBUSY; + + GRETH_REGSAVE(greth->regs->mdio, ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 2); + + if (!wait_for_mdio(greth)) + return -EBUSY; + + if (!(GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_NVALID)) { + data = (GRETH_REGLOAD(greth->regs->mdio) >> 16) & 0xFFFF; + return data; + + } else { + return -1; + } +} + +static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) +{ + struct greth_private *greth = bus->priv; + + if (!wait_for_mdio(greth)) + return -EBUSY; + + GRETH_REGSAVE(greth->regs->mdio, + ((val & 0xFFFF) << 16) | ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 1); + + if (!wait_for_mdio(greth)) + return -EBUSY; + + return 0; +} + +static void greth_link_change(struct net_device *dev) +{ + struct greth_private *greth = netdev_priv(dev); + struct phy_device *phydev = greth->phy; + unsigned long flags; + int status_change = 0; + u32 ctrl; + + spin_lock_irqsave(&greth->devlock, flags); + + if (phydev->link) { + + if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) { + ctrl = GRETH_REGLOAD(greth->regs->control) & + ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB); + + if (phydev->duplex) + ctrl |= GRETH_CTRL_FD; + + if (phydev->speed == SPEED_100) + ctrl |= GRETH_CTRL_SP; + else if (phydev->speed == SPEED_1000) + ctrl |= GRETH_CTRL_GB; + + GRETH_REGSAVE(greth->regs->control, ctrl); + greth->speed = phydev->speed; + greth->duplex = phydev->duplex; + status_change = 1; + } + } + + if (phydev->link != greth->link) { + if (!phydev->link) { + greth->speed = 0; + greth->duplex = -1; + } + greth->link = phydev->link; + + status_change = 1; + } + + spin_unlock_irqrestore(&greth->devlock, flags); + + if (status_change) { + if (phydev->link) + pr_debug("%s: link up (%d/%s)\n", + dev->name, phydev->speed, + DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); + else + pr_debug("%s: link down\n", dev->name); + } +} + +static int greth_mdio_probe(struct net_device *dev) +{ + struct greth_private *greth = netdev_priv(dev); + struct phy_device *phy = NULL; + int ret; + + /* Find the first PHY */ + phy = phy_find_first(greth->mdio); + + if (!phy) { + if (netif_msg_probe(greth)) + dev_err(&dev->dev, "no PHY found\n"); + return -ENXIO; + } + + ret = phy_connect_direct(dev, phy, &greth_link_change, + greth->gbit_mac ? PHY_INTERFACE_MODE_GMII : PHY_INTERFACE_MODE_MII); + if (ret) { + if (netif_msg_ifup(greth)) + dev_err(&dev->dev, "could not attach to PHY\n"); + return ret; + } + + if (greth->gbit_mac) + phy->supported &= PHY_GBIT_FEATURES; + else + phy->supported &= PHY_BASIC_FEATURES; + + phy->advertising = phy->supported; + + greth->link = 0; + greth->speed = 0; + greth->duplex = -1; + greth->phy = phy; + + return 0; +} + +static inline int phy_aneg_done(struct phy_device *phydev) +{ + int retval; + + retval = phy_read(phydev, MII_BMSR); + + return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE); +} + +static int greth_mdio_init(struct greth_private *greth) +{ + int ret, phy; + unsigned long timeout; + + greth->mdio = mdiobus_alloc(); + if (!greth->mdio) { + return -ENOMEM; + } + + greth->mdio->name = "greth-mdio"; + snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq); + greth->mdio->read = greth_mdio_read; + greth->mdio->write = greth_mdio_write; + greth->mdio->priv = greth; + + greth->mdio->irq = greth->mdio_irqs; + + for (phy = 0; phy < PHY_MAX_ADDR; phy++) + greth->mdio->irq[phy] = PHY_POLL; + + ret = mdiobus_register(greth->mdio); + if (ret) { + goto error; + } + + ret = greth_mdio_probe(greth->netdev); + if (ret) { + if (netif_msg_probe(greth)) + dev_err(&greth->netdev->dev, "failed to probe MDIO bus\n"); + goto unreg_mdio; + } + + phy_start(greth->phy); + + /* If Ethernet debug link is used make autoneg happen right away */ + if (greth->edcl && greth_edcl == 1) { + phy_start_aneg(greth->phy); + timeout = jiffies + 6*HZ; + while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) { + } + phy_read_status(greth->phy); + greth_link_change(greth->netdev); + } + + return 0; + +unreg_mdio: + mdiobus_unregister(greth->mdio); +error: + mdiobus_free(greth->mdio); + return ret; +} + +/* Initialize the GRETH MAC */ +static int greth_of_probe(struct platform_device *ofdev) +{ + struct net_device *dev; + struct greth_private *greth; + struct greth_regs *regs; + + int i; + int err; + int tmp; + unsigned long timeout; + + dev = alloc_etherdev(sizeof(struct greth_private)); + + if (dev == NULL) + return -ENOMEM; + + greth = netdev_priv(dev); + greth->netdev = dev; + greth->dev = &ofdev->dev; + + if (greth_debug > 0) + greth->msg_enable = greth_debug; + else + greth->msg_enable = GRETH_DEF_MSG_ENABLE; + + spin_lock_init(&greth->devlock); + + greth->regs = of_ioremap(&ofdev->resource[0], 0, + resource_size(&ofdev->resource[0]), + "grlib-greth regs"); + + if (greth->regs == NULL) { + if (netif_msg_probe(greth)) + dev_err(greth->dev, "ioremap failure.\n"); + err = -EIO; + goto error1; + } + + regs = greth->regs; + greth->irq = ofdev->archdata.irqs[0]; + + dev_set_drvdata(greth->dev, dev); + SET_NETDEV_DEV(dev, greth->dev); + + if (netif_msg_probe(greth)) + dev_dbg(greth->dev, "resetting controller.\n"); + + /* Reset the controller. */ + GRETH_REGSAVE(regs->control, GRETH_RESET); + + /* Wait for MAC to reset itself */ + timeout = jiffies + HZ/100; + while (GRETH_REGLOAD(regs->control) & GRETH_RESET) { + if (time_after(jiffies, timeout)) { + err = -EIO; + if (netif_msg_probe(greth)) + dev_err(greth->dev, "timeout when waiting for reset.\n"); + goto error2; + } + } + + /* Get default PHY address */ + greth->phyaddr = (GRETH_REGLOAD(regs->mdio) >> 11) & 0x1F; + + /* Check if we have GBIT capable MAC */ + tmp = GRETH_REGLOAD(regs->control); + greth->gbit_mac = (tmp >> 27) & 1; + + /* Check for multicast capability */ + greth->multicast = (tmp >> 25) & 1; + + greth->edcl = (tmp >> 31) & 1; + + /* If we have EDCL we disable the EDCL speed-duplex FSM so + * it doesn't interfere with the software */ + if (greth->edcl != 0) + GRETH_REGORIN(regs->control, GRETH_CTRL_DISDUPLEX); + + /* Check if MAC can handle MDIO interrupts */ + greth->mdio_int_en = (tmp >> 26) & 1; + + err = greth_mdio_init(greth); + if (err) { + if (netif_msg_probe(greth)) + dev_err(greth->dev, "failed to register MDIO bus\n"); + goto error2; + } + + /* Allocate TX descriptor ring in coherent memory */ + greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024, + &greth->tx_bd_base_phys, + GFP_KERNEL); + if (!greth->tx_bd_base) { + err = -ENOMEM; + goto error3; + } + + /* Allocate RX descriptor ring in coherent memory */ + greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024, + &greth->rx_bd_base_phys, + GFP_KERNEL); + if (!greth->rx_bd_base) { + err = -ENOMEM; + goto error4; + } + + /* Get MAC address from: module param, OF property or ID prom */ + for (i = 0; i < 6; i++) { + if (macaddr[i] != 0) + break; + } + if (i == 6) { + const unsigned char *addr; + int len; + addr = of_get_property(ofdev->dev.of_node, "local-mac-address", + &len); + if (addr != NULL && len == 6) { + for (i = 0; i < 6; i++) + macaddr[i] = (unsigned int) addr[i]; + } else { +#ifdef CONFIG_SPARC + for (i = 0; i < 6; i++) + macaddr[i] = (unsigned int) idprom->id_ethaddr[i]; +#endif + } + } + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = macaddr[i]; + + macaddr[5]++; + + if (!is_valid_ether_addr(&dev->dev_addr[0])) { + if (netif_msg_probe(greth)) + dev_err(greth->dev, "no valid ethernet address, aborting.\n"); + err = -EINVAL; + goto error5; + } + + GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]); + GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 | + dev->dev_addr[4] << 8 | dev->dev_addr[5]); + + /* Clear all pending interrupts except PHY irq */ + GRETH_REGSAVE(regs->status, 0xFF); + + if (greth->gbit_mac) { + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_RXCSUM; + dev->features = dev->hw_features | NETIF_F_HIGHDMA; + greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit; + } + + if (greth->multicast) { + greth_netdev_ops.ndo_set_rx_mode = greth_set_multicast_list; + dev->flags |= IFF_MULTICAST; + } else { + dev->flags &= ~IFF_MULTICAST; + } + + dev->netdev_ops = &greth_netdev_ops; + dev->ethtool_ops = &greth_ethtool_ops; + + err = register_netdev(dev); + if (err) { + if (netif_msg_probe(greth)) + dev_err(greth->dev, "netdevice registration failed.\n"); + goto error5; + } + + /* setup NAPI */ + netif_napi_add(dev, &greth->napi, greth_poll, 64); + + return 0; + +error5: + dma_free_coherent(greth->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys); +error4: + dma_free_coherent(greth->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys); +error3: + mdiobus_unregister(greth->mdio); +error2: + of_iounmap(&ofdev->resource[0], greth->regs, resource_size(&ofdev->resource[0])); +error1: + free_netdev(dev); + return err; +} + +static int greth_of_remove(struct platform_device *of_dev) +{ + struct net_device *ndev = platform_get_drvdata(of_dev); + struct greth_private *greth = netdev_priv(ndev); + + /* Free descriptor areas */ + dma_free_coherent(&of_dev->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys); + + dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys); + + if (greth->phy) + phy_stop(greth->phy); + mdiobus_unregister(greth->mdio); + + unregister_netdev(ndev); + free_netdev(ndev); + + of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0])); + + return 0; +} + +static const struct of_device_id greth_of_match[] = { + { + .name = "GAISLER_ETHMAC", + }, + { + .name = "01_01d", + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, greth_of_match); + +static struct platform_driver greth_of_driver = { + .driver = { + .name = "grlib-greth", + .of_match_table = greth_of_match, + }, + .probe = greth_of_probe, + .remove = greth_of_remove, +}; + +module_platform_driver(greth_of_driver); + +MODULE_AUTHOR("Aeroflex Gaisler AB."); +MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h new file mode 100644 index 000000000..ae16ac94d --- /dev/null +++ b/drivers/net/ethernet/aeroflex/greth.h @@ -0,0 +1,142 @@ +#ifndef GRETH_H +#define GRETH_H + +#include + +/* Register bits and masks */ +#define GRETH_RESET 0x40 +#define GRETH_MII_BUSY 0x8 +#define GRETH_MII_NVALID 0x10 + +#define GRETH_CTRL_FD 0x10 +#define GRETH_CTRL_PR 0x20 +#define GRETH_CTRL_SP 0x80 +#define GRETH_CTRL_GB 0x100 +#define GRETH_CTRL_PSTATIEN 0x400 +#define GRETH_CTRL_MCEN 0x800 +#define GRETH_CTRL_DISDUPLEX 0x1000 +#define GRETH_STATUS_PHYSTAT 0x100 + +#define GRETH_BD_EN 0x800 +#define GRETH_BD_WR 0x1000 +#define GRETH_BD_IE 0x2000 +#define GRETH_BD_LEN 0x7FF + +#define GRETH_TXEN 0x1 +#define GRETH_INT_TE 0x2 +#define GRETH_INT_TX 0x8 +#define GRETH_TXI 0x4 +#define GRETH_TXBD_STATUS 0x0001C000 +#define GRETH_TXBD_MORE 0x20000 +#define GRETH_TXBD_IPCS 0x40000 +#define GRETH_TXBD_TCPCS 0x80000 +#define GRETH_TXBD_UDPCS 0x100000 +#define GRETH_TXBD_CSALL (GRETH_TXBD_IPCS | GRETH_TXBD_TCPCS | GRETH_TXBD_UDPCS) +#define GRETH_TXBD_ERR_LC 0x10000 +#define GRETH_TXBD_ERR_UE 0x4000 +#define GRETH_TXBD_ERR_AL 0x8000 + +#define GRETH_INT_RE 0x1 +#define GRETH_INT_RX 0x4 +#define GRETH_RXEN 0x2 +#define GRETH_RXI 0x8 +#define GRETH_RXBD_STATUS 0xFFFFC000 +#define GRETH_RXBD_ERR_AE 0x4000 +#define GRETH_RXBD_ERR_FT 0x8000 +#define GRETH_RXBD_ERR_CRC 0x10000 +#define GRETH_RXBD_ERR_OE 0x20000 +#define GRETH_RXBD_ERR_LE 0x40000 +#define GRETH_RXBD_IP 0x80000 +#define GRETH_RXBD_IP_CSERR 0x100000 +#define GRETH_RXBD_UDP 0x200000 +#define GRETH_RXBD_UDP_CSERR 0x400000 +#define GRETH_RXBD_TCP 0x800000 +#define GRETH_RXBD_TCP_CSERR 0x1000000 +#define GRETH_RXBD_IP_FRAG 0x2000000 +#define GRETH_RXBD_MCAST 0x4000000 + +/* Descriptor parameters */ +#define GRETH_TXBD_NUM 128 +#define GRETH_TXBD_NUM_MASK (GRETH_TXBD_NUM-1) +#define GRETH_TX_BUF_SIZE 2048 +#define GRETH_RXBD_NUM 128 +#define GRETH_RXBD_NUM_MASK (GRETH_RXBD_NUM-1) +#define GRETH_RX_BUF_SIZE 2048 + +/* Buffers per page */ +#define GRETH_RX_BUF_PPGAE (PAGE_SIZE/GRETH_RX_BUF_SIZE) +#define GRETH_TX_BUF_PPGAE (PAGE_SIZE/GRETH_TX_BUF_SIZE) + +/* How many pages are needed for buffers */ +#define GRETH_RX_BUF_PAGE_NUM (GRETH_RXBD_NUM/GRETH_RX_BUF_PPGAE) +#define GRETH_TX_BUF_PAGE_NUM (GRETH_TXBD_NUM/GRETH_TX_BUF_PPGAE) + +/* Buffer size. + * Gbit MAC uses tagged maximum frame size which is 1518 excluding CRC. + * Set to 1520 to make all buffers word aligned for non-gbit MAC. + */ +#define MAX_FRAME_SIZE 1520 + +/* GRETH APB registers */ +struct greth_regs { + u32 control; + u32 status; + u32 esa_msb; + u32 esa_lsb; + u32 mdio; + u32 tx_desc_p; + u32 rx_desc_p; + u32 edclip; + u32 hash_msb; + u32 hash_lsb; +}; + +/* GRETH buffer descriptor */ +struct greth_bd { + u32 stat; + u32 addr; +}; + +struct greth_private { + struct sk_buff *rx_skbuff[GRETH_RXBD_NUM]; + struct sk_buff *tx_skbuff[GRETH_TXBD_NUM]; + + unsigned char *tx_bufs[GRETH_TXBD_NUM]; + unsigned char *rx_bufs[GRETH_RXBD_NUM]; + u16 tx_bufs_length[GRETH_TXBD_NUM]; + + u16 tx_next; + u16 tx_last; + u16 tx_free; /* only used on 10/100Mbit */ + u16 rx_cur; + + struct greth_regs *regs; /* Address of controller registers. */ + struct greth_bd *rx_bd_base; /* Address of Rx BDs. */ + struct greth_bd *tx_bd_base; /* Address of Tx BDs. */ + dma_addr_t rx_bd_base_phys; + dma_addr_t tx_bd_base_phys; + + int irq; + + struct device *dev; /* Pointer to platform_device->dev */ + struct net_device *netdev; + struct napi_struct napi; + spinlock_t devlock; + + struct phy_device *phy; + struct mii_bus *mdio; + int mdio_irqs[PHY_MAX_ADDR]; + unsigned int link; + unsigned int speed; + unsigned int duplex; + + u32 msg_enable; + + u8 phyaddr; + u8 multicast; + u8 gbit_mac; + u8 mdio_int_en; + u8 edcl; +}; + +#endif diff --git a/drivers/net/ethernet/agere/Kconfig b/drivers/net/ethernet/agere/Kconfig new file mode 100644 index 000000000..63e805de6 --- /dev/null +++ b/drivers/net/ethernet/agere/Kconfig @@ -0,0 +1,31 @@ +# +# Agere device configuration +# + +config NET_VENDOR_AGERE + bool "Agere devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Agere devices. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_AGERE + +config ET131X + tristate "Agere ET-1310 Gigabit Ethernet support" + depends on PCI + select PHYLIB + ---help--- + This driver supports Agere ET-1310 ethernet adapters. + + To compile this driver as a module, choose M here. The module + will be called et131x. + +endif # NET_VENDOR_AGERE diff --git a/drivers/net/ethernet/agere/Makefile b/drivers/net/ethernet/agere/Makefile new file mode 100644 index 000000000..027ff9453 --- /dev/null +++ b/drivers/net/ethernet/agere/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Agere ET-131x ethernet driver +# + +obj-$(CONFIG_ET131X) += et131x.o diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c new file mode 100644 index 000000000..e0f3d197e --- /dev/null +++ b/drivers/net/ethernet/agere/et131x.c @@ -0,0 +1,4123 @@ +/* Agere Systems Inc. + * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs + * + * Copyright © 2005 Agere Systems Inc. + * All rights reserved. + * http://www.agere.com + * + * Copyright (c) 2011 Mark Einon + * + *------------------------------------------------------------------------------ + * + * SOFTWARE LICENSE + * + * This software is provided subject to the following terms and conditions, + * which you should read carefully before using the software. Using this + * software indicates your acceptance of these terms and conditions. If you do + * not agree with these terms and conditions, do not use the software. + * + * Copyright © 2005 Agere Systems Inc. + * All rights reserved. + * + * Redistribution and use in source or binary forms, with or without + * modifications, are permitted provided that the following conditions are met: + * + * . Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following Disclaimer as comments in the code as + * well as in the documentation and/or other materials provided with the + * distribution. + * + * . Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following Disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * . Neither the name of Agere Systems Inc. nor the names of the contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * Disclaimer + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY + * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN + * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "et131x.h" + +MODULE_AUTHOR("Victor Soriano "); +MODULE_AUTHOR("Mark Einon "); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems"); + +/* EEPROM defines */ +#define MAX_NUM_REGISTER_POLLS 1000 +#define MAX_NUM_WRITE_RETRIES 2 + +/* MAC defines */ +#define COUNTER_WRAP_16_BIT 0x10000 +#define COUNTER_WRAP_12_BIT 0x1000 + +/* PCI defines */ +#define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */ +#define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */ + +/* ISR defines */ +/* For interrupts, normal running is: + * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt, + * watchdog_interrupt & txdma_xfer_done + * + * In both cases, when flow control is enabled for either Tx or bi-direction, + * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the + * buffer rings are running low. + */ +#define INT_MASK_DISABLE 0xffffffff + +/* NOTE: Masking out MAC_STAT Interrupt for now... + * #define INT_MASK_ENABLE 0xfff6bf17 + * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7 + */ +#define INT_MASK_ENABLE 0xfffebf17 +#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7 + +/* General defines */ +/* Packet and header sizes */ +#define NIC_MIN_PACKET_SIZE 60 + +/* Multicast list size */ +#define NIC_MAX_MCAST_LIST 128 + +/* Supported Filters */ +#define ET131X_PACKET_TYPE_DIRECTED 0x0001 +#define ET131X_PACKET_TYPE_MULTICAST 0x0002 +#define ET131X_PACKET_TYPE_BROADCAST 0x0004 +#define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008 +#define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010 + +/* Tx Timeout */ +#define ET131X_TX_TIMEOUT (1 * HZ) +#define NIC_SEND_HANG_THRESHOLD 0 + +/* MP_ADAPTER flags */ +#define FMP_ADAPTER_INTERRUPT_IN_USE 0x00000008 + +/* MP_SHARED flags */ +#define FMP_ADAPTER_LOWER_POWER 0x00200000 + +#define FMP_ADAPTER_NON_RECOVER_ERROR 0x00800000 +#define FMP_ADAPTER_HARDWARE_ERROR 0x04000000 + +#define FMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000 + +/* Some offsets in PCI config space that are actually used. */ +#define ET1310_PCI_MAC_ADDRESS 0xA4 +#define ET1310_PCI_EEPROM_STATUS 0xB2 +#define ET1310_PCI_ACK_NACK 0xC0 +#define ET1310_PCI_REPLAY 0xC2 +#define ET1310_PCI_L0L1LATENCY 0xCF + +/* PCI Product IDs */ +#define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */ +#define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */ + +/* Define order of magnitude converter */ +#define NANO_IN_A_MICRO 1000 + +#define PARM_RX_NUM_BUFS_DEF 4 +#define PARM_RX_TIME_INT_DEF 10 +#define PARM_RX_MEM_END_DEF 0x2bc +#define PARM_TX_TIME_INT_DEF 40 +#define PARM_TX_NUM_BUFS_DEF 4 +#define PARM_DMA_CACHE_DEF 0 + +/* RX defines */ +#define FBR_CHUNKS 32 +#define MAX_DESC_PER_RING_RX 1024 + +/* number of RFDs - default and min */ +#define RFD_LOW_WATER_MARK 40 +#define NIC_DEFAULT_NUM_RFD 1024 +#define NUM_FBRS 2 + +#define MAX_PACKETS_HANDLED 256 + +#define ALCATEL_MULTICAST_PKT 0x01000000 +#define ALCATEL_BROADCAST_PKT 0x02000000 + +/* typedefs for Free Buffer Descriptors */ +struct fbr_desc { + u32 addr_lo; + u32 addr_hi; + u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */ +}; + +/* Packet Status Ring Descriptors + * + * Word 0: + * + * top 16 bits are from the Alcatel Status Word as enumerated in + * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2) + * + * 0: hp hash pass + * 1: ipa IP checksum assist + * 2: ipp IP checksum pass + * 3: tcpa TCP checksum assist + * 4: tcpp TCP checksum pass + * 5: wol WOL Event + * 6: rxmac_error RXMAC Error Indicator + * 7: drop Drop packet + * 8: ft Frame Truncated + * 9: jp Jumbo Packet + * 10: vp VLAN Packet + * 11-15: unused + * 16: asw_prev_pkt_dropped e.g. IFG too small on previous + * 17: asw_RX_DV_event short receive event detected + * 18: asw_false_carrier_event bad carrier since last good packet + * 19: asw_code_err one or more nibbles signalled as errors + * 20: asw_CRC_err CRC error + * 21: asw_len_chk_err frame length field incorrect + * 22: asw_too_long frame length > 1518 bytes + * 23: asw_OK valid CRC + no code error + * 24: asw_multicast has a multicast address + * 25: asw_broadcast has a broadcast address + * 26: asw_dribble_nibble spurious bits after EOP + * 27: asw_control_frame is a control frame + * 28: asw_pause_frame is a pause frame + * 29: asw_unsupported_op unsupported OP code + * 30: asw_VLAN_tag VLAN tag detected + * 31: asw_long_evt Rx long event + * + * Word 1: + * 0-15: length length in bytes + * 16-25: bi Buffer Index + * 26-27: ri Ring Index + * 28-31: reserved + */ +struct pkt_stat_desc { + u32 word0; + u32 word1; +}; + +/* Typedefs for the RX DMA status word */ + +/* rx status word 0 holds part of the status bits of the Rx DMA engine + * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word + * which contains the Free Buffer ring 0 and 1 available offset. + * + * bit 0-9 FBR1 offset + * bit 10 Wrap flag for FBR1 + * bit 16-25 FBR0 offset + * bit 26 Wrap flag for FBR0 + */ + +/* RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine + * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word + * which contains the Packet Status Ring available offset. + * + * bit 0-15 reserved + * bit 16-27 PSRoffset + * bit 28 PSRwrap + * bit 29-31 unused + */ + +/* struct rx_status_block is a structure representing the status of the Rx + * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020 + */ +struct rx_status_block { + u32 word0; + u32 word1; +}; + +/* Structure for look-up table holding free buffer ring pointers, addresses + * and state. + */ +struct fbr_lookup { + void *virt[MAX_DESC_PER_RING_RX]; + u32 bus_high[MAX_DESC_PER_RING_RX]; + u32 bus_low[MAX_DESC_PER_RING_RX]; + void *ring_virtaddr; + dma_addr_t ring_physaddr; + void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; + dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; + u32 local_full; + u32 num_entries; + dma_addr_t buffsize; +}; + +/* struct rx_ring is the structure representing the adaptor's local + * reference(s) to the rings + */ +struct rx_ring { + struct fbr_lookup *fbr[NUM_FBRS]; + void *ps_ring_virtaddr; + dma_addr_t ps_ring_physaddr; + u32 local_psr_full; + u32 psr_entries; + + struct rx_status_block *rx_status_block; + dma_addr_t rx_status_bus; + + struct list_head recv_list; + u32 num_ready_recv; + + u32 num_rfd; + + bool unfinished_receives; +}; + +/* TX defines */ +/* word 2 of the control bits in the Tx Descriptor ring for the ET-1310 + * + * 0-15: length of packet + * 16-27: VLAN tag + * 28: VLAN CFI + * 29-31: VLAN priority + * + * word 3 of the control bits in the Tx Descriptor ring for the ET-1310 + * + * 0: last packet in the sequence + * 1: first packet in the sequence + * 2: interrupt the processor when this pkt sent + * 3: Control word - no packet data + * 4: Issue half-duplex backpressure : XON/XOFF + * 5: send pause frame + * 6: Tx frame has error + * 7: append CRC + * 8: MAC override + * 9: pad packet + * 10: Packet is a Huge packet + * 11: append VLAN tag + * 12: IP checksum assist + * 13: TCP checksum assist + * 14: UDP checksum assist + */ +#define TXDESC_FLAG_LASTPKT 0x0001 +#define TXDESC_FLAG_FIRSTPKT 0x0002 +#define TXDESC_FLAG_INTPROC 0x0004 + +/* struct tx_desc represents each descriptor on the ring */ +struct tx_desc { + u32 addr_hi; + u32 addr_lo; + u32 len_vlan; /* control words how to xmit the */ + u32 flags; /* data (detailed above) */ +}; + +/* The status of the Tx DMA engine it sits in free memory, and is pointed to + * by 0x101c / 0x1020. This is a DMA10 type + */ + +/* TCB (Transmit Control Block: Host Side) */ +struct tcb { + struct tcb *next; /* Next entry in ring */ + u32 count; /* Used to spot stuck/lost packets */ + u32 stale; /* Used to spot stuck/lost packets */ + struct sk_buff *skb; /* Network skb we are tied to */ + u32 index; /* Ring indexes */ + u32 index_start; +}; + +/* Structure representing our local reference(s) to the ring */ +struct tx_ring { + /* TCB (Transmit Control Block) memory and lists */ + struct tcb *tcb_ring; + + /* List of TCBs that are ready to be used */ + struct tcb *tcb_qhead; + struct tcb *tcb_qtail; + + /* list of TCBs that are currently being sent. */ + struct tcb *send_head; + struct tcb *send_tail; + int used; + + /* The actual descriptor ring */ + struct tx_desc *tx_desc_ring; + dma_addr_t tx_desc_ring_pa; + + /* send_idx indicates where we last wrote to in the descriptor ring. */ + u32 send_idx; + + /* The location of the write-back status block */ + u32 *tx_status; + dma_addr_t tx_status_pa; + + /* Packets since the last IRQ: used for interrupt coalescing */ + int since_irq; +}; + +/* Do not change these values: if changed, then change also in respective + * TXdma and Rxdma engines + */ +#define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */ +#define NUM_TCB 64 + +/* These values are all superseded by registry entries to facilitate tuning. + * Once the desired performance has been achieved, the optimal registry values + * should be re-populated to these #defines: + */ +#define TX_ERROR_PERIOD 1000 + +#define LO_MARK_PERCENT_FOR_PSR 15 +#define LO_MARK_PERCENT_FOR_RX 15 + +/* RFD (Receive Frame Descriptor) */ +struct rfd { + struct list_head list_node; + struct sk_buff *skb; + u32 len; /* total size of receive frame */ + u16 bufferindex; + u8 ringindex; +}; + +/* Flow Control */ +#define FLOW_BOTH 0 +#define FLOW_TXONLY 1 +#define FLOW_RXONLY 2 +#define FLOW_NONE 3 + +/* Struct to define some device statistics */ +struct ce_stats { + u32 multicast_pkts_rcvd; + u32 rcvd_pkts_dropped; + + u32 tx_underflows; + u32 tx_collisions; + u32 tx_excessive_collisions; + u32 tx_first_collisions; + u32 tx_late_collisions; + u32 tx_max_pkt_errs; + u32 tx_deferred; + + u32 rx_overflows; + u32 rx_length_errs; + u32 rx_align_errs; + u32 rx_crc_errs; + u32 rx_code_violations; + u32 rx_other_errs; + + u32 interrupt_status; +}; + +/* The private adapter structure */ +struct et131x_adapter { + struct net_device *netdev; + struct pci_dev *pdev; + struct mii_bus *mii_bus; + struct phy_device *phydev; + struct napi_struct napi; + + /* Flags that indicate current state of the adapter */ + u32 flags; + + /* local link state, to determine if a state change has occurred */ + int link; + + /* Configuration */ + u8 rom_addr[ETH_ALEN]; + u8 addr[ETH_ALEN]; + bool has_eeprom; + u8 eeprom_data[2]; + + spinlock_t tcb_send_qlock; /* protects the tx_ring send tcb list */ + spinlock_t tcb_ready_qlock; /* protects the tx_ring ready tcb list */ + spinlock_t rcv_lock; /* protects the rx_ring receive list */ + + /* Packet Filter and look ahead size */ + u32 packet_filter; + + /* multicast list */ + u32 multicast_addr_count; + u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN]; + + /* Pointer to the device's PCI register space */ + struct address_map __iomem *regs; + + /* Registry parameters */ + u8 wanted_flow; /* Flow we want for 802.3x flow control */ + u32 registry_jumbo_packet; /* Max supported ethernet packet size */ + + /* Derived from the registry: */ + u8 flow; /* flow control validated by the far-end */ + + /* Minimize init-time */ + struct timer_list error_timer; + + /* variable putting the phy into coma mode when boot up with no cable + * plugged in after 5 seconds + */ + u8 boot_coma; + + /* Tx Memory Variables */ + struct tx_ring tx_ring; + + /* Rx Memory Variables */ + struct rx_ring rx_ring; + + struct ce_stats stats; +}; + +static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status) +{ + u32 reg; + int i; + + /* 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and + * bits 7,1:0 both equal to 1, at least once after reset. + * Subsequent operations need only to check that bits 1:0 are equal + * to 1 prior to starting a single byte read/write + */ + for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) { + if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, ®)) + return -EIO; + + /* I2C idle and Phy Queue Avail both true */ + if ((reg & 0x3000) == 0x3000) { + if (status) + *status = reg; + return reg & 0xFF; + } + } + return -ETIMEDOUT; +} + +static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data) +{ + struct pci_dev *pdev = adapter->pdev; + int index = 0; + int retries; + int err = 0; + int writeok = 0; + u32 status; + u32 val = 0; + + /* For an EEPROM, an I2C single byte write is defined as a START + * condition followed by the device address, EEPROM address, one byte + * of data and a STOP condition. The STOP condition will trigger the + * EEPROM's internally timed write cycle to the nonvolatile memory. + * All inputs are disabled during this write cycle and the EEPROM will + * not respond to any access until the internal write is complete. + */ + err = eeprom_wait_ready(pdev, NULL); + if (err < 0) + return err; + + /* 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0, + * and bits 1:0 both =0. Bit 5 should be set according to the + * type of EEPROM being accessed (1=two byte addressing, 0=one + * byte addressing). + */ + if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, + LBCIF_CONTROL_LBCIF_ENABLE | + LBCIF_CONTROL_I2C_WRITE)) + return -EIO; + + /* Prepare EEPROM address for Step 3 */ + for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) { + if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) + break; + /* Write the data to the LBCIF Data Register (the I2C write + * will begin). + */ + if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data)) + break; + /* Monitor bit 1:0 of the LBCIF Status Register. When bits + * 1:0 are both equal to 1, the I2C write has completed and the + * internal write cycle of the EEPROM is about to start. + * (bits 1:0 = 01 is a legal state while waiting from both + * equal to 1, but bits 1:0 = 10 is invalid and implies that + * something is broken). + */ + err = eeprom_wait_ready(pdev, &status); + if (err < 0) + return 0; + + /* Check bit 3 of the LBCIF Status Register. If equal to 1, + * an error has occurred.Don't break here if we are revision + * 1, this is so we do a blind write for load bug. + */ + if ((status & LBCIF_STATUS_GENERAL_ERROR) && + adapter->pdev->revision == 0) + break; + + /* Check bit 2 of the LBCIF Status Register. If equal to 1 an + * ACK error has occurred on the address phase of the write. + * This could be due to an actual hardware failure or the + * EEPROM may still be in its internal write cycle from a + * previous write. This write operation was ignored and must be + *repeated later. + */ + if (status & LBCIF_STATUS_ACK_ERROR) { + /* This could be due to an actual hardware failure + * or the EEPROM may still be in its internal write + * cycle from a previous write. This write operation + * was ignored and must be repeated later. + */ + udelay(10); + continue; + } + + writeok = 1; + break; + } + + udelay(10); + + while (1) { + if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, + LBCIF_CONTROL_LBCIF_ENABLE)) + writeok = 0; + + /* Do read until internal ACK_ERROR goes away meaning write + * completed + */ + do { + pci_write_config_dword(pdev, + LBCIF_ADDRESS_REGISTER, + addr); + do { + pci_read_config_dword(pdev, + LBCIF_DATA_REGISTER, + &val); + } while ((val & 0x00010000) == 0); + } while (val & 0x00040000); + + if ((val & 0xFF00) != 0xC000 || index == 10000) + break; + index++; + } + return writeok ? 0 : -EIO; +} + +static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata) +{ + struct pci_dev *pdev = adapter->pdev; + int err; + u32 status; + + /* A single byte read is similar to the single byte write, with the + * exception of the data flow: + */ + err = eeprom_wait_ready(pdev, NULL); + if (err < 0) + return err; + /* Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0, + * and bits 1:0 both =0. Bit 5 should be set according to the type + * of EEPROM being accessed (1=two byte addressing, 0=one byte + * addressing). + */ + if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, + LBCIF_CONTROL_LBCIF_ENABLE)) + return -EIO; + /* Write the address to the LBCIF Address Register (I2C read will + * begin). + */ + if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) + return -EIO; + /* Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read + * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure + * has occurred). + */ + err = eeprom_wait_ready(pdev, &status); + if (err < 0) + return err; + /* Regardless of error status, read data byte from LBCIF Data + * Register. + */ + *pdata = err; + + return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0; +} + +static int et131x_init_eeprom(struct et131x_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + u8 eestatus; + + pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus); + + /* THIS IS A WORKAROUND: + * I need to call this function twice to get my card in a + * LG M1 Express Dual running. I tried also a msleep before this + * function, because I thought there could be some time conditions + * but it didn't work. Call the whole function twice also work. + */ + if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) { + dev_err(&pdev->dev, + "Could not read PCI config space for EEPROM Status\n"); + return -EIO; + } + + /* Determine if the error(s) we care about are present. If they are + * present we need to fail. + */ + if (eestatus & 0x4C) { + int write_failed = 0; + + if (pdev->revision == 0x01) { + int i; + static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF }; + + /* Re-write the first 4 bytes if we have an eeprom + * present and the revision id is 1, this fixes the + * corruption seen with 1310 B Silicon + */ + for (i = 0; i < 3; i++) + if (eeprom_write(adapter, i, eedata[i]) < 0) + write_failed = 1; + } + if (pdev->revision != 0x01 || write_failed) { + dev_err(&pdev->dev, + "Fatal EEPROM Status Error - 0x%04x\n", + eestatus); + + /* This error could mean that there was an error + * reading the eeprom or that the eeprom doesn't exist. + * We will treat each case the same and not try to + * gather additional information that normally would + * come from the eeprom, like MAC Address + */ + adapter->has_eeprom = 0; + return -EIO; + } + } + adapter->has_eeprom = 1; + + /* Read the EEPROM for information regarding LED behavior. Refer to + * et131x_xcvr_init() for its use. + */ + eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]); + eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]); + + if (adapter->eeprom_data[0] != 0xcd) + /* Disable all optional features */ + adapter->eeprom_data[1] = 0x00; + + return 0; +} + +static void et131x_rx_dma_enable(struct et131x_adapter *adapter) +{ + /* Setup the receive dma configuration register for normal operation */ + u32 csr = ET_RXDMA_CSR_FBR1_ENABLE; + struct rx_ring *rx_ring = &adapter->rx_ring; + + if (rx_ring->fbr[1]->buffsize == 4096) + csr |= ET_RXDMA_CSR_FBR1_SIZE_LO; + else if (rx_ring->fbr[1]->buffsize == 8192) + csr |= ET_RXDMA_CSR_FBR1_SIZE_HI; + else if (rx_ring->fbr[1]->buffsize == 16384) + csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI; + + csr |= ET_RXDMA_CSR_FBR0_ENABLE; + if (rx_ring->fbr[0]->buffsize == 256) + csr |= ET_RXDMA_CSR_FBR0_SIZE_LO; + else if (rx_ring->fbr[0]->buffsize == 512) + csr |= ET_RXDMA_CSR_FBR0_SIZE_HI; + else if (rx_ring->fbr[0]->buffsize == 1024) + csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI; + writel(csr, &adapter->regs->rxdma.csr); + + csr = readl(&adapter->regs->rxdma.csr); + if (csr & ET_RXDMA_CSR_HALT_STATUS) { + udelay(5); + csr = readl(&adapter->regs->rxdma.csr); + if (csr & ET_RXDMA_CSR_HALT_STATUS) { + dev_err(&adapter->pdev->dev, + "RX Dma failed to exit halt state. CSR 0x%08x\n", + csr); + } + } +} + +static void et131x_rx_dma_disable(struct et131x_adapter *adapter) +{ + u32 csr; + /* Setup the receive dma configuration register */ + writel(ET_RXDMA_CSR_HALT | ET_RXDMA_CSR_FBR1_ENABLE, + &adapter->regs->rxdma.csr); + csr = readl(&adapter->regs->rxdma.csr); + if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) { + udelay(5); + csr = readl(&adapter->regs->rxdma.csr); + if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) + dev_err(&adapter->pdev->dev, + "RX Dma failed to enter halt state. CSR 0x%08x\n", + csr); + } +} + +static void et131x_tx_dma_enable(struct et131x_adapter *adapter) +{ + /* Setup the transmit dma configuration register for normal + * operation + */ + writel(ET_TXDMA_SNGL_EPKT | (PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT), + &adapter->regs->txdma.csr); +} + +static inline void add_10bit(u32 *v, int n) +{ + *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP); +} + +static inline void add_12bit(u32 *v, int n) +{ + *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP); +} + +static void et1310_config_mac_regs1(struct et131x_adapter *adapter) +{ + struct mac_regs __iomem *macregs = &adapter->regs->mac; + u32 station1; + u32 station2; + u32 ipg; + + /* First we need to reset everything. Write to MAC configuration + * register 1 to perform reset. + */ + writel(ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET | + ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | + ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC, + ¯egs->cfg1); + + /* Next lets configure the MAC Inter-packet gap register */ + ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */ + ipg |= 0x50 << 8; /* ifg enforce 0x50 */ + writel(ipg, ¯egs->ipg); + + /* Next lets configure the MAC Half Duplex register */ + /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */ + writel(0x00A1F037, ¯egs->hfdp); + + /* Next lets configure the MAC Interface Control register */ + writel(0, ¯egs->if_ctrl); + + writel(ET_MAC_MIIMGMT_CLK_RST, ¯egs->mii_mgmt_cfg); + + /* Next lets configure the MAC Station Address register. These + * values are read from the EEPROM during initialization and stored + * in the adapter structure. We write what is stored in the adapter + * structure to the MAC Station Address registers high and low. This + * station address is used for generating and checking pause control + * packets. + */ + station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) | + (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT); + station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) | + (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) | + (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) | + adapter->addr[2]; + writel(station1, ¯egs->station_addr_1); + writel(station2, ¯egs->station_addr_2); + + /* Max ethernet packet in bytes that will be passed by the mac without + * being truncated. Allow the MAC to pass 4 more than our max packet + * size. This is 4 for the Ethernet CRC. + * + * Packets larger than (registry_jumbo_packet) that do not contain a + * VLAN ID will be dropped by the Rx function. + */ + writel(adapter->registry_jumbo_packet + 4, ¯egs->max_fm_len); + + /* clear out MAC config reset */ + writel(0, ¯egs->cfg1); +} + +static void et1310_config_mac_regs2(struct et131x_adapter *adapter) +{ + int32_t delay = 0; + struct mac_regs __iomem *mac = &adapter->regs->mac; + struct phy_device *phydev = adapter->phydev; + u32 cfg1; + u32 cfg2; + u32 ifctrl; + u32 ctl; + + ctl = readl(&adapter->regs->txmac.ctl); + cfg1 = readl(&mac->cfg1); + cfg2 = readl(&mac->cfg2); + ifctrl = readl(&mac->if_ctrl); + + /* Set up the if mode bits */ + cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK; + if (phydev->speed == SPEED_1000) { + cfg2 |= ET_MAC_CFG2_IFMODE_1000; + ifctrl &= ~ET_MAC_IFCTRL_PHYMODE; + } else { + cfg2 |= ET_MAC_CFG2_IFMODE_100; + ifctrl |= ET_MAC_IFCTRL_PHYMODE; + } + + cfg1 |= ET_MAC_CFG1_RX_ENABLE | ET_MAC_CFG1_TX_ENABLE | + ET_MAC_CFG1_TX_FLOW; + + cfg1 &= ~(ET_MAC_CFG1_LOOPBACK | ET_MAC_CFG1_RX_FLOW); + if (adapter->flow == FLOW_RXONLY || adapter->flow == FLOW_BOTH) + cfg1 |= ET_MAC_CFG1_RX_FLOW; + writel(cfg1, &mac->cfg1); + + /* Now we need to initialize the MAC Configuration 2 register */ + /* preamble 7, check length, huge frame off, pad crc, crc enable + * full duplex off + */ + cfg2 |= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT; + cfg2 |= ET_MAC_CFG2_IFMODE_LEN_CHECK; + cfg2 |= ET_MAC_CFG2_IFMODE_PAD_CRC; + cfg2 |= ET_MAC_CFG2_IFMODE_CRC_ENABLE; + cfg2 &= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME; + cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX; + + if (phydev->duplex == DUPLEX_FULL) + cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX; + + ifctrl &= ~ET_MAC_IFCTRL_GHDMODE; + if (phydev->duplex == DUPLEX_HALF) + ifctrl |= ET_MAC_IFCTRL_GHDMODE; + + writel(ifctrl, &mac->if_ctrl); + writel(cfg2, &mac->cfg2); + + do { + udelay(10); + delay++; + cfg1 = readl(&mac->cfg1); + } while ((cfg1 & ET_MAC_CFG1_WAIT) != ET_MAC_CFG1_WAIT && delay < 100); + + if (delay == 100) { + dev_warn(&adapter->pdev->dev, + "Syncd bits did not respond correctly cfg1 word 0x%08x\n", + cfg1); + } + + ctl |= ET_TX_CTRL_TXMAC_ENABLE | ET_TX_CTRL_FC_DISABLE; + writel(ctl, &adapter->regs->txmac.ctl); + + if (adapter->flags & FMP_ADAPTER_LOWER_POWER) { + et131x_rx_dma_enable(adapter); + et131x_tx_dma_enable(adapter); + } +} + +static int et1310_in_phy_coma(struct et131x_adapter *adapter) +{ + u32 pmcsr = readl(&adapter->regs->global.pm_csr); + + return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0; +} + +static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter) +{ + struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; + u32 hash1 = 0; + u32 hash2 = 0; + u32 hash3 = 0; + u32 hash4 = 0; + u32 pm_csr; + + /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision + * the multi-cast LIST. If it is NOT specified, (and "ALL" is not + * specified) then we should pass NO multi-cast addresses to the + * driver. + */ + if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) { + int i; + + /* Loop through our multicast array and set up the device */ + for (i = 0; i < adapter->multicast_addr_count; i++) { + u32 result; + + result = ether_crc(6, adapter->multicast_list[i]); + + result = (result & 0x3F800000) >> 23; + + if (result < 32) { + hash1 |= (1 << result); + } else if ((31 < result) && (result < 64)) { + result -= 32; + hash2 |= (1 << result); + } else if ((63 < result) && (result < 96)) { + result -= 64; + hash3 |= (1 << result); + } else { + result -= 96; + hash4 |= (1 << result); + } + } + } + + /* Write out the new hash to the device */ + pm_csr = readl(&adapter->regs->global.pm_csr); + if (!et1310_in_phy_coma(adapter)) { + writel(hash1, &rxmac->multi_hash1); + writel(hash2, &rxmac->multi_hash2); + writel(hash3, &rxmac->multi_hash3); + writel(hash4, &rxmac->multi_hash4); + } +} + +static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter) +{ + struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; + u32 uni_pf1; + u32 uni_pf2; + u32 uni_pf3; + u32 pm_csr; + + /* Set up unicast packet filter reg 3 to be the first two octets of + * the MAC address for both address + * + * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the + * MAC address for second address + * + * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the + * MAC address for first address + */ + uni_pf3 = (adapter->addr[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT) | + (adapter->addr[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT) | + (adapter->addr[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT) | + adapter->addr[1]; + + uni_pf2 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT) | + (adapter->addr[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT) | + (adapter->addr[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT) | + adapter->addr[5]; + + uni_pf1 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT) | + (adapter->addr[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT) | + (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) | + adapter->addr[5]; + + pm_csr = readl(&adapter->regs->global.pm_csr); + if (!et1310_in_phy_coma(adapter)) { + writel(uni_pf1, &rxmac->uni_pf_addr1); + writel(uni_pf2, &rxmac->uni_pf_addr2); + writel(uni_pf3, &rxmac->uni_pf_addr3); + } +} + +static void et1310_config_rxmac_regs(struct et131x_adapter *adapter) +{ + struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; + struct phy_device *phydev = adapter->phydev; + u32 sa_lo; + u32 sa_hi = 0; + u32 pf_ctrl = 0; + u32 __iomem *wolw; + + /* Disable the MAC while it is being configured (also disable WOL) */ + writel(0x8, &rxmac->ctrl); + + /* Initialize WOL to disabled. */ + writel(0, &rxmac->crc0); + writel(0, &rxmac->crc12); + writel(0, &rxmac->crc34); + + /* We need to set the WOL mask0 - mask4 next. We initialize it to + * its default Values of 0x00000000 because there are not WOL masks + * as of this time. + */ + for (wolw = &rxmac->mask0_word0; wolw <= &rxmac->mask4_word3; wolw++) + writel(0, wolw); + + /* Lets setup the WOL Source Address */ + sa_lo = (adapter->addr[2] << ET_RX_WOL_LO_SA3_SHIFT) | + (adapter->addr[3] << ET_RX_WOL_LO_SA4_SHIFT) | + (adapter->addr[4] << ET_RX_WOL_LO_SA5_SHIFT) | + adapter->addr[5]; + writel(sa_lo, &rxmac->sa_lo); + + sa_hi = (u32)(adapter->addr[0] << ET_RX_WOL_HI_SA1_SHIFT) | + adapter->addr[1]; + writel(sa_hi, &rxmac->sa_hi); + + /* Disable all Packet Filtering */ + writel(0, &rxmac->pf_ctrl); + + /* Let's initialize the Unicast Packet filtering address */ + if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) { + et1310_setup_device_for_unicast(adapter); + pf_ctrl |= ET_RX_PFCTRL_UNICST_FILTER_ENABLE; + } else { + writel(0, &rxmac->uni_pf_addr1); + writel(0, &rxmac->uni_pf_addr2); + writel(0, &rxmac->uni_pf_addr3); + } + + /* Let's initialize the Multicast hash */ + if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) { + pf_ctrl |= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE; + et1310_setup_device_for_multicast(adapter); + } + + /* Runt packet filtering. Didn't work in version A silicon. */ + pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT; + pf_ctrl |= ET_RX_PFCTRL_FRAG_FILTER_ENABLE; + + if (adapter->registry_jumbo_packet > 8192) + /* In order to transmit jumbo packets greater than 8k, the + * FIFO between RxMAC and RxDMA needs to be reduced in size + * to (16k - Jumbo packet size). In order to implement this, + * we must use "cut through" mode in the RxMAC, which chops + * packets down into segments which are (max_size * 16). In + * this case we selected 256 bytes, since this is the size of + * the PCI-Express TLP's that the 1310 uses. + * + * seg_en on, fc_en off, size 0x10 + */ + writel(0x41, &rxmac->mcif_ctrl_max_seg); + else + writel(0, &rxmac->mcif_ctrl_max_seg); + + writel(0, &rxmac->mcif_water_mark); + writel(0, &rxmac->mif_ctrl); + writel(0, &rxmac->space_avail); + + /* Initialize the the mif_ctrl register + * bit 3: Receive code error. One or more nibbles were signaled as + * errors during the reception of the packet. Clear this + * bit in Gigabit, set it in 100Mbit. This was derived + * experimentally at UNH. + * bit 4: Receive CRC error. The packet's CRC did not match the + * internally generated CRC. + * bit 5: Receive length check error. Indicates that frame length + * field value in the packet does not match the actual data + * byte length and is not a type field. + * bit 16: Receive frame truncated. + * bit 17: Drop packet enable + */ + if (phydev && phydev->speed == SPEED_100) + writel(0x30038, &rxmac->mif_ctrl); + else + writel(0x30030, &rxmac->mif_ctrl); + + /* Finally we initialize RxMac to be enabled & WOL disabled. Packet + * filter is always enabled since it is where the runt packets are + * supposed to be dropped. For version A silicon, runt packet + * dropping doesn't work, so it is disabled in the pf_ctrl register, + * but we still leave the packet filter on. + */ + writel(pf_ctrl, &rxmac->pf_ctrl); + writel(ET_RX_CTRL_RXMAC_ENABLE | ET_RX_CTRL_WOL_DISABLE, &rxmac->ctrl); +} + +static void et1310_config_txmac_regs(struct et131x_adapter *adapter) +{ + struct txmac_regs __iomem *txmac = &adapter->regs->txmac; + + /* We need to update the Control Frame Parameters + * cfpt - control frame pause timer set to 64 (0x40) + * cfep - control frame extended pause timer set to 0x0 + */ + if (adapter->flow == FLOW_NONE) + writel(0, &txmac->cf_param); + else + writel(0x40, &txmac->cf_param); +} + +static void et1310_config_macstat_regs(struct et131x_adapter *adapter) +{ + struct macstat_regs __iomem *macstat = &adapter->regs->macstat; + u32 __iomem *reg; + + /* initialize all the macstat registers to zero on the device */ + for (reg = &macstat->txrx_0_64_byte_frames; + reg <= &macstat->carry_reg2; reg++) + writel(0, reg); + + /* Unmask any counters that we want to track the overflow of. + * Initially this will be all counters. It may become clear later + * that we do not need to track all counters. + */ + writel(0xFFFFBE32, &macstat->carry_reg1_mask); + writel(0xFFFE7E8B, &macstat->carry_reg2_mask); +} + +static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr, + u8 reg, u16 *value) +{ + struct mac_regs __iomem *mac = &adapter->regs->mac; + int status = 0; + u32 delay = 0; + u32 mii_addr; + u32 mii_cmd; + u32 mii_indicator; + + /* Save a local copy of the registers we are dealing with so we can + * set them back + */ + mii_addr = readl(&mac->mii_mgmt_addr); + mii_cmd = readl(&mac->mii_mgmt_cmd); + + /* Stop the current operation */ + writel(0, &mac->mii_mgmt_cmd); + + /* Set up the register we need to read from on the correct PHY */ + writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr); + + writel(0x1, &mac->mii_mgmt_cmd); + + do { + udelay(50); + delay++; + mii_indicator = readl(&mac->mii_mgmt_indicator); + } while ((mii_indicator & ET_MAC_MGMT_WAIT) && delay < 50); + + /* If we hit the max delay, we could not read the register */ + if (delay == 50) { + dev_warn(&adapter->pdev->dev, + "reg 0x%08x could not be read\n", reg); + dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", + mii_indicator); + + status = -EIO; + goto out; + } + + /* If we hit here we were able to read the register and we need to + * return the value to the caller + */ + *value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK; + +out: + /* Stop the read operation */ + writel(0, &mac->mii_mgmt_cmd); + + /* set the registers we touched back to the state at which we entered + * this function + */ + writel(mii_addr, &mac->mii_mgmt_addr); + writel(mii_cmd, &mac->mii_mgmt_cmd); + + return status; +} + +static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) +{ + struct phy_device *phydev = adapter->phydev; + + if (!phydev) + return -EIO; + + return et131x_phy_mii_read(adapter, phydev->addr, reg, value); +} + +static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg, + u16 value) +{ + struct mac_regs __iomem *mac = &adapter->regs->mac; + int status = 0; + u32 delay = 0; + u32 mii_addr; + u32 mii_cmd; + u32 mii_indicator; + + /* Save a local copy of the registers we are dealing with so we can + * set them back + */ + mii_addr = readl(&mac->mii_mgmt_addr); + mii_cmd = readl(&mac->mii_mgmt_cmd); + + /* Stop the current operation */ + writel(0, &mac->mii_mgmt_cmd); + + /* Set up the register we need to write to on the correct PHY */ + writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr); + + /* Add the value to write to the registers to the mac */ + writel(value, &mac->mii_mgmt_ctrl); + + do { + udelay(50); + delay++; + mii_indicator = readl(&mac->mii_mgmt_indicator); + } while ((mii_indicator & ET_MAC_MGMT_BUSY) && delay < 100); + + /* If we hit the max delay, we could not write the register */ + if (delay == 100) { + u16 tmp; + + dev_warn(&adapter->pdev->dev, + "reg 0x%08x could not be written", reg); + dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", + mii_indicator); + dev_warn(&adapter->pdev->dev, "command is 0x%08x\n", + readl(&mac->mii_mgmt_cmd)); + + et131x_mii_read(adapter, reg, &tmp); + + status = -EIO; + } + /* Stop the write operation */ + writel(0, &mac->mii_mgmt_cmd); + + /* set the registers we touched back to the state at which we entered + * this function + */ + writel(mii_addr, &mac->mii_mgmt_addr); + writel(mii_cmd, &mac->mii_mgmt_cmd); + + return status; +} + +static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter, + u16 regnum, + u16 bitnum, + u8 *value) +{ + u16 reg; + u16 mask = 1 << bitnum; + + et131x_mii_read(adapter, regnum, ®); + + *value = (reg & mask) >> bitnum; +} + +static void et1310_config_flow_control(struct et131x_adapter *adapter) +{ + struct phy_device *phydev = adapter->phydev; + + if (phydev->duplex == DUPLEX_HALF) { + adapter->flow = FLOW_NONE; + } else { + char remote_pause, remote_async_pause; + + et1310_phy_read_mii_bit(adapter, 5, 10, &remote_pause); + et1310_phy_read_mii_bit(adapter, 5, 11, &remote_async_pause); + + if (remote_pause && remote_async_pause) { + adapter->flow = adapter->wanted_flow; + } else if (remote_pause && !remote_async_pause) { + if (adapter->wanted_flow == FLOW_BOTH) + adapter->flow = FLOW_BOTH; + else + adapter->flow = FLOW_NONE; + } else if (!remote_pause && !remote_async_pause) { + adapter->flow = FLOW_NONE; + } else { + if (adapter->wanted_flow == FLOW_BOTH) + adapter->flow = FLOW_RXONLY; + else + adapter->flow = FLOW_NONE; + } + } +} + +/* et1310_update_macstat_host_counters - Update local copy of the statistics */ +static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter) +{ + struct ce_stats *stats = &adapter->stats; + struct macstat_regs __iomem *macstat = + &adapter->regs->macstat; + + stats->tx_collisions += readl(&macstat->tx_total_collisions); + stats->tx_first_collisions += readl(&macstat->tx_single_collisions); + stats->tx_deferred += readl(&macstat->tx_deferred); + stats->tx_excessive_collisions += + readl(&macstat->tx_multiple_collisions); + stats->tx_late_collisions += readl(&macstat->tx_late_collisions); + stats->tx_underflows += readl(&macstat->tx_undersize_frames); + stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames); + + stats->rx_align_errs += readl(&macstat->rx_align_errs); + stats->rx_crc_errs += readl(&macstat->rx_code_errs); + stats->rcvd_pkts_dropped += readl(&macstat->rx_drops); + stats->rx_overflows += readl(&macstat->rx_oversize_packets); + stats->rx_code_violations += readl(&macstat->rx_fcs_errs); + stats->rx_length_errs += readl(&macstat->rx_frame_len_errs); + stats->rx_other_errs += readl(&macstat->rx_fragment_packets); +} + +/* et1310_handle_macstat_interrupt + * + * One of the MACSTAT counters has wrapped. Update the local copy of + * the statistics held in the adapter structure, checking the "wrap" + * bit for each counter. + */ +static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter) +{ + u32 carry_reg1; + u32 carry_reg2; + + /* Read the interrupt bits from the register(s). These are Clear On + * Write. + */ + carry_reg1 = readl(&adapter->regs->macstat.carry_reg1); + carry_reg2 = readl(&adapter->regs->macstat.carry_reg2); + + writel(carry_reg1, &adapter->regs->macstat.carry_reg1); + writel(carry_reg2, &adapter->regs->macstat.carry_reg2); + + /* We need to do update the host copy of all the MAC_STAT counters. + * For each counter, check it's overflow bit. If the overflow bit is + * set, then increment the host version of the count by one complete + * revolution of the counter. This routine is called when the counter + * block indicates that one of the counters has wrapped. + */ + if (carry_reg1 & (1 << 14)) + adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT; + if (carry_reg1 & (1 << 8)) + adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT; + if (carry_reg1 & (1 << 7)) + adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT; + if (carry_reg1 & (1 << 2)) + adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT; + if (carry_reg1 & (1 << 6)) + adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT; + if (carry_reg1 & (1 << 3)) + adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT; + if (carry_reg1 & (1 << 0)) + adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT; + if (carry_reg2 & (1 << 16)) + adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT; + if (carry_reg2 & (1 << 15)) + adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT; + if (carry_reg2 & (1 << 6)) + adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT; + if (carry_reg2 & (1 << 8)) + adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT; + if (carry_reg2 & (1 << 5)) + adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT; + if (carry_reg2 & (1 << 4)) + adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT; + if (carry_reg2 & (1 << 2)) + adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT; +} + +static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg) +{ + struct net_device *netdev = bus->priv; + struct et131x_adapter *adapter = netdev_priv(netdev); + u16 value; + int ret; + + ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value); + + if (ret < 0) + return ret; + + return value; +} + +static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, + int reg, u16 value) +{ + struct net_device *netdev = bus->priv; + struct et131x_adapter *adapter = netdev_priv(netdev); + + return et131x_mii_write(adapter, phy_addr, reg, value); +} + +/* et1310_phy_power_switch - PHY power control + * @adapter: device to control + * @down: true for off/false for back on + * + * one hundred, ten, one thousand megs + * How would you like to have your LAN accessed + * Can't you see that this code processed + * Phy power, phy power.. + */ +static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down) +{ + u16 data; + struct phy_device *phydev = adapter->phydev; + + et131x_mii_read(adapter, MII_BMCR, &data); + data &= ~BMCR_PDOWN; + if (down) + data |= BMCR_PDOWN; + et131x_mii_write(adapter, phydev->addr, MII_BMCR, data); +} + +/* et131x_xcvr_init - Init the phy if we are setting it into force mode */ +static void et131x_xcvr_init(struct et131x_adapter *adapter) +{ + u16 lcr2; + struct phy_device *phydev = adapter->phydev; + + /* Set the LED behavior such that LED 1 indicates speed (off = + * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates + * link and activity (on for link, blink off for activity). + * + * NOTE: Some customizations have been added here for specific + * vendors; The LED behavior is now determined by vendor data in the + * EEPROM. However, the above description is the default. + */ + if ((adapter->eeprom_data[1] & 0x4) == 0) { + et131x_mii_read(adapter, PHY_LED_2, &lcr2); + + lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T); + lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT); + + if ((adapter->eeprom_data[1] & 0x8) == 0) + lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT); + else + lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); + + et131x_mii_write(adapter, phydev->addr, PHY_LED_2, lcr2); + } +} + +/* et131x_configure_global_regs - configure JAGCore global regs */ +static void et131x_configure_global_regs(struct et131x_adapter *adapter) +{ + struct global_regs __iomem *regs = &adapter->regs->global; + + writel(0, ®s->rxq_start_addr); + writel(INTERNAL_MEM_SIZE - 1, ®s->txq_end_addr); + + if (adapter->registry_jumbo_packet < 2048) { + /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word + * block of RAM that the driver can split between Tx + * and Rx as it desires. Our default is to split it + * 50/50: + */ + writel(PARM_RX_MEM_END_DEF, ®s->rxq_end_addr); + writel(PARM_RX_MEM_END_DEF + 1, ®s->txq_start_addr); + } else if (adapter->registry_jumbo_packet < 8192) { + /* For jumbo packets > 2k but < 8k, split 50-50. */ + writel(INTERNAL_MEM_RX_OFFSET, ®s->rxq_end_addr); + writel(INTERNAL_MEM_RX_OFFSET + 1, ®s->txq_start_addr); + } else { + /* 9216 is the only packet size greater than 8k that + * is available. The Tx buffer has to be big enough + * for one whole packet on the Tx side. We'll make + * the Tx 9408, and give the rest to Rx + */ + writel(0x01b3, ®s->rxq_end_addr); + writel(0x01b4, ®s->txq_start_addr); + } + + /* Initialize the loopback register. Disable all loopbacks. */ + writel(0, ®s->loopback); + + writel(0, ®s->msi_config); + + /* By default, disable the watchdog timer. It will be enabled when + * a packet is queued. + */ + writel(0, ®s->watchdog_timer); +} + +/* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence */ +static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) +{ + struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; + struct rx_ring *rx_local = &adapter->rx_ring; + struct fbr_desc *fbr_entry; + u32 entry; + u32 psr_num_des; + unsigned long flags; + u8 id; + + et131x_rx_dma_disable(adapter); + + /* Load the completion writeback physical address */ + writel(upper_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_hi); + writel(lower_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_lo); + + memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block)); + + /* Set the address and parameters of the packet status ring */ + writel(upper_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_hi); + writel(lower_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_lo); + writel(rx_local->psr_entries - 1, &rx_dma->psr_num_des); + writel(0, &rx_dma->psr_full_offset); + + psr_num_des = readl(&rx_dma->psr_num_des) & ET_RXDMA_PSR_NUM_DES_MASK; + writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100, + &rx_dma->psr_min_des); + + spin_lock_irqsave(&adapter->rcv_lock, flags); + + /* These local variables track the PSR in the adapter structure */ + rx_local->local_psr_full = 0; + + for (id = 0; id < NUM_FBRS; id++) { + u32 __iomem *num_des; + u32 __iomem *full_offset; + u32 __iomem *min_des; + u32 __iomem *base_hi; + u32 __iomem *base_lo; + struct fbr_lookup *fbr = rx_local->fbr[id]; + + if (id == 0) { + num_des = &rx_dma->fbr0_num_des; + full_offset = &rx_dma->fbr0_full_offset; + min_des = &rx_dma->fbr0_min_des; + base_hi = &rx_dma->fbr0_base_hi; + base_lo = &rx_dma->fbr0_base_lo; + } else { + num_des = &rx_dma->fbr1_num_des; + full_offset = &rx_dma->fbr1_full_offset; + min_des = &rx_dma->fbr1_min_des; + base_hi = &rx_dma->fbr1_base_hi; + base_lo = &rx_dma->fbr1_base_lo; + } + + /* Now's the best time to initialize FBR contents */ + fbr_entry = fbr->ring_virtaddr; + for (entry = 0; entry < fbr->num_entries; entry++) { + fbr_entry->addr_hi = fbr->bus_high[entry]; + fbr_entry->addr_lo = fbr->bus_low[entry]; + fbr_entry->word2 = entry; + fbr_entry++; + } + + /* Set the address and parameters of Free buffer ring 1 and 0 */ + writel(upper_32_bits(fbr->ring_physaddr), base_hi); + writel(lower_32_bits(fbr->ring_physaddr), base_lo); + writel(fbr->num_entries - 1, num_des); + writel(ET_DMA10_WRAP, full_offset); + + /* This variable tracks the free buffer ring 1 full position, + * so it has to match the above. + */ + fbr->local_full = ET_DMA10_WRAP; + writel(((fbr->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, + min_des); + } + + /* Program the number of packets we will receive before generating an + * interrupt. + * For version B silicon, this value gets updated once autoneg is + *complete. + */ + writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done); + + /* The "time_done" is not working correctly to coalesce interrupts + * after a given time period, but rather is giving us an interrupt + * regardless of whether we have received packets. + * This value gets updated once autoneg is complete. + */ + writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time); + + spin_unlock_irqrestore(&adapter->rcv_lock, flags); +} + +/* et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore. + * + * Configure the transmit engine with the ring buffers we have created + * and prepare it for use. + */ +static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter) +{ + struct txdma_regs __iomem *txdma = &adapter->regs->txdma; + struct tx_ring *tx_ring = &adapter->tx_ring; + + /* Load the hardware with the start of the transmit descriptor ring. */ + writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi); + writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo); + + /* Initialise the transmit DMA engine */ + writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des); + + /* Load the completion writeback physical address */ + writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi); + writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo); + + *tx_ring->tx_status = 0; + + writel(0, &txdma->service_request); + tx_ring->send_idx = 0; +} + +/* et131x_adapter_setup - Set the adapter up as per cassini+ documentation */ +static void et131x_adapter_setup(struct et131x_adapter *adapter) +{ + et131x_configure_global_regs(adapter); + et1310_config_mac_regs1(adapter); + + /* Configure the MMC registers */ + /* All we need to do is initialize the Memory Control Register */ + writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl); + + et1310_config_rxmac_regs(adapter); + et1310_config_txmac_regs(adapter); + + et131x_config_rx_dma_regs(adapter); + et131x_config_tx_dma_regs(adapter); + + et1310_config_macstat_regs(adapter); + + et1310_phy_power_switch(adapter, 0); + et131x_xcvr_init(adapter); +} + +/* et131x_soft_reset - Issue soft reset to the hardware, complete for ET1310 */ +static void et131x_soft_reset(struct et131x_adapter *adapter) +{ + u32 reg; + + /* Disable MAC Core */ + reg = ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET | + ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | + ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC; + writel(reg, &adapter->regs->mac.cfg1); + + reg = ET_RESET_ALL; + writel(reg, &adapter->regs->global.sw_reset); + + reg = ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | + ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC; + writel(reg, &adapter->regs->mac.cfg1); + writel(0, &adapter->regs->mac.cfg1); +} + +static void et131x_enable_interrupts(struct et131x_adapter *adapter) +{ + u32 mask; + + if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) + mask = INT_MASK_ENABLE; + else + mask = INT_MASK_ENABLE_NO_FLOW; + + writel(mask, &adapter->regs->global.int_mask); +} + +static void et131x_disable_interrupts(struct et131x_adapter *adapter) +{ + writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask); +} + +static void et131x_tx_dma_disable(struct et131x_adapter *adapter) +{ + /* Setup the transmit dma configuration register */ + writel(ET_TXDMA_CSR_HALT | ET_TXDMA_SNGL_EPKT, + &adapter->regs->txdma.csr); +} + +static void et131x_enable_txrx(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + et131x_rx_dma_enable(adapter); + et131x_tx_dma_enable(adapter); + + if (adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE) + et131x_enable_interrupts(adapter); + + netif_start_queue(netdev); +} + +static void et131x_disable_txrx(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + netif_stop_queue(netdev); + + et131x_rx_dma_disable(adapter); + et131x_tx_dma_disable(adapter); + + et131x_disable_interrupts(adapter); +} + +static void et131x_init_send(struct et131x_adapter *adapter) +{ + int i; + struct tx_ring *tx_ring = &adapter->tx_ring; + struct tcb *tcb = tx_ring->tcb_ring; + + tx_ring->tcb_qhead = tcb; + + memset(tcb, 0, sizeof(struct tcb) * NUM_TCB); + + for (i = 0; i < NUM_TCB; i++) { + tcb->next = tcb + 1; + tcb++; + } + + tcb--; + tx_ring->tcb_qtail = tcb; + tcb->next = NULL; + /* Curr send queue should now be empty */ + tx_ring->send_head = NULL; + tx_ring->send_tail = NULL; +} + +/* et1310_enable_phy_coma + * + * driver receive an phy status change interrupt while in D0 and check that + * phy_status is down. + * + * -- gate off JAGCore; + * -- set gigE PHY in Coma mode + * -- wake on phy_interrupt; Perform software reset JAGCore, + * re-initialize jagcore and gigE PHY + */ +static void et1310_enable_phy_coma(struct et131x_adapter *adapter) +{ + u32 pmcsr = readl(&adapter->regs->global.pm_csr); + + /* Stop sending packets. */ + adapter->flags |= FMP_ADAPTER_LOWER_POWER; + + /* Wait for outstanding Receive packets */ + et131x_disable_txrx(adapter->netdev); + + /* Gate off JAGCore 3 clock domains */ + pmcsr &= ~ET_PMCSR_INIT; + writel(pmcsr, &adapter->regs->global.pm_csr); + + /* Program gigE PHY in to Coma mode */ + pmcsr |= ET_PM_PHY_SW_COMA; + writel(pmcsr, &adapter->regs->global.pm_csr); +} + +static void et1310_disable_phy_coma(struct et131x_adapter *adapter) +{ + u32 pmcsr; + + pmcsr = readl(&adapter->regs->global.pm_csr); + + /* Disable phy_sw_coma register and re-enable JAGCore clocks */ + pmcsr |= ET_PMCSR_INIT; + pmcsr &= ~ET_PM_PHY_SW_COMA; + writel(pmcsr, &adapter->regs->global.pm_csr); + + /* Restore the GbE PHY speed and duplex modes; + * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY + */ + + /* Re-initialize the send structures */ + et131x_init_send(adapter); + + /* Bring the device back to the state it was during init prior to + * autonegotiation being complete. This way, when we get the auto-neg + * complete interrupt, we can complete init by calling ConfigMacREGS2. + */ + et131x_soft_reset(adapter); + + et131x_adapter_setup(adapter); + + /* Allow Tx to restart */ + adapter->flags &= ~FMP_ADAPTER_LOWER_POWER; + + et131x_enable_txrx(adapter->netdev); +} + +static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit) +{ + u32 tmp_free_buff_ring = *free_buff_ring; + + tmp_free_buff_ring++; + /* This works for all cases where limit < 1024. The 1023 case + * works because 1023++ is 1024 which means the if condition is not + * taken but the carry of the bit into the wrap bit toggles the wrap + * value correctly + */ + if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) { + tmp_free_buff_ring &= ~ET_DMA10_MASK; + tmp_free_buff_ring ^= ET_DMA10_WRAP; + } + /* For the 1023 case */ + tmp_free_buff_ring &= (ET_DMA10_MASK | ET_DMA10_WRAP); + *free_buff_ring = tmp_free_buff_ring; + return tmp_free_buff_ring; +} + +/* et131x_rx_dma_memory_alloc + * + * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required, + * and the Packet Status Ring. + */ +static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) +{ + u8 id; + u32 i, j; + u32 bufsize; + u32 psr_size; + u32 fbr_chunksize; + struct rx_ring *rx_ring = &adapter->rx_ring; + struct fbr_lookup *fbr; + + /* Alloc memory for the lookup table */ + rx_ring->fbr[0] = kzalloc(sizeof(*fbr), GFP_KERNEL); + if (rx_ring->fbr[0] == NULL) + return -ENOMEM; + rx_ring->fbr[1] = kzalloc(sizeof(*fbr), GFP_KERNEL); + if (rx_ring->fbr[1] == NULL) + return -ENOMEM; + + /* The first thing we will do is configure the sizes of the buffer + * rings. These will change based on jumbo packet support. Larger + * jumbo packets increases the size of each entry in FBR0, and the + * number of entries in FBR0, while at the same time decreasing the + * number of entries in FBR1. + * + * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1 + * entries are huge in order to accommodate a "jumbo" frame, then it + * will have less entries. Conversely, FBR1 will now be relied upon + * to carry more "normal" frames, thus it's entry size also increases + * and the number of entries goes up too (since it now carries + * "small" + "regular" packets. + * + * In this scheme, we try to maintain 512 entries between the two + * rings. Also, FBR1 remains a constant size - when it's size doubles + * the number of entries halves. FBR0 increases in size, however. + */ + if (adapter->registry_jumbo_packet < 2048) { + rx_ring->fbr[0]->buffsize = 256; + rx_ring->fbr[0]->num_entries = 512; + rx_ring->fbr[1]->buffsize = 2048; + rx_ring->fbr[1]->num_entries = 512; + } else if (adapter->registry_jumbo_packet < 4096) { + rx_ring->fbr[0]->buffsize = 512; + rx_ring->fbr[0]->num_entries = 1024; + rx_ring->fbr[1]->buffsize = 4096; + rx_ring->fbr[1]->num_entries = 512; + } else { + rx_ring->fbr[0]->buffsize = 1024; + rx_ring->fbr[0]->num_entries = 768; + rx_ring->fbr[1]->buffsize = 16384; + rx_ring->fbr[1]->num_entries = 128; + } + + rx_ring->psr_entries = rx_ring->fbr[0]->num_entries + + rx_ring->fbr[1]->num_entries; + + for (id = 0; id < NUM_FBRS; id++) { + fbr = rx_ring->fbr[id]; + /* Allocate an area of memory for Free Buffer Ring */ + bufsize = sizeof(struct fbr_desc) * fbr->num_entries; + fbr->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, + bufsize, + &fbr->ring_physaddr, + GFP_KERNEL); + if (!fbr->ring_virtaddr) { + dev_err(&adapter->pdev->dev, + "Cannot alloc memory for Free Buffer Ring %d\n", + id); + return -ENOMEM; + } + } + + for (id = 0; id < NUM_FBRS; id++) { + fbr = rx_ring->fbr[id]; + fbr_chunksize = (FBR_CHUNKS * fbr->buffsize); + + for (i = 0; i < fbr->num_entries / FBR_CHUNKS; i++) { + dma_addr_t fbr_physaddr; + + fbr->mem_virtaddrs[i] = dma_alloc_coherent( + &adapter->pdev->dev, fbr_chunksize, + &fbr->mem_physaddrs[i], + GFP_KERNEL); + + if (!fbr->mem_virtaddrs[i]) { + dev_err(&adapter->pdev->dev, + "Could not alloc memory\n"); + return -ENOMEM; + } + + /* See NOTE in "Save Physical Address" comment above */ + fbr_physaddr = fbr->mem_physaddrs[i]; + + for (j = 0; j < FBR_CHUNKS; j++) { + u32 k = (i * FBR_CHUNKS) + j; + + /* Save the Virtual address of this index for + * quick access later + */ + fbr->virt[k] = (u8 *)fbr->mem_virtaddrs[i] + + (j * fbr->buffsize); + + /* now store the physical address in the + * descriptor so the device can access it + */ + fbr->bus_high[k] = upper_32_bits(fbr_physaddr); + fbr->bus_low[k] = lower_32_bits(fbr_physaddr); + fbr_physaddr += fbr->buffsize; + } + } + } + + /* Allocate an area of memory for FIFO of Packet Status ring entries */ + psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries; + + rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, + psr_size, + &rx_ring->ps_ring_physaddr, + GFP_KERNEL); + + if (!rx_ring->ps_ring_virtaddr) { + dev_err(&adapter->pdev->dev, + "Cannot alloc memory for Packet Status Ring\n"); + return -ENOMEM; + } + + /* Allocate an area of memory for writeback of status information */ + rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev, + sizeof(struct rx_status_block), + &rx_ring->rx_status_bus, + GFP_KERNEL); + if (!rx_ring->rx_status_block) { + dev_err(&adapter->pdev->dev, + "Cannot alloc memory for Status Block\n"); + return -ENOMEM; + } + rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD; + + /* The RFDs are going to be put on lists later on, so initialize the + * lists now. + */ + INIT_LIST_HEAD(&rx_ring->recv_list); + return 0; +} + +static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) +{ + u8 id; + u32 ii; + u32 bufsize; + u32 psr_size; + struct rfd *rfd; + struct rx_ring *rx_ring = &adapter->rx_ring; + struct fbr_lookup *fbr; + + /* Free RFDs and associated packet descriptors */ + WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd); + + while (!list_empty(&rx_ring->recv_list)) { + rfd = list_entry(rx_ring->recv_list.next, + struct rfd, list_node); + + list_del(&rfd->list_node); + rfd->skb = NULL; + kfree(rfd); + } + + /* Free Free Buffer Rings */ + for (id = 0; id < NUM_FBRS; id++) { + fbr = rx_ring->fbr[id]; + + if (!fbr || !fbr->ring_virtaddr) + continue; + + /* First the packet memory */ + for (ii = 0; ii < fbr->num_entries / FBR_CHUNKS; ii++) { + if (fbr->mem_virtaddrs[ii]) { + bufsize = fbr->buffsize * FBR_CHUNKS; + + dma_free_coherent(&adapter->pdev->dev, + bufsize, + fbr->mem_virtaddrs[ii], + fbr->mem_physaddrs[ii]); + + fbr->mem_virtaddrs[ii] = NULL; + } + } + + bufsize = sizeof(struct fbr_desc) * fbr->num_entries; + + dma_free_coherent(&adapter->pdev->dev, + bufsize, + fbr->ring_virtaddr, + fbr->ring_physaddr); + + fbr->ring_virtaddr = NULL; + } + + /* Free Packet Status Ring */ + if (rx_ring->ps_ring_virtaddr) { + psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries; + + dma_free_coherent(&adapter->pdev->dev, psr_size, + rx_ring->ps_ring_virtaddr, + rx_ring->ps_ring_physaddr); + + rx_ring->ps_ring_virtaddr = NULL; + } + + /* Free area of memory for the writeback of status information */ + if (rx_ring->rx_status_block) { + dma_free_coherent(&adapter->pdev->dev, + sizeof(struct rx_status_block), + rx_ring->rx_status_block, + rx_ring->rx_status_bus); + rx_ring->rx_status_block = NULL; + } + + /* Free the FBR Lookup Table */ + kfree(rx_ring->fbr[0]); + kfree(rx_ring->fbr[1]); + + /* Reset Counters */ + rx_ring->num_ready_recv = 0; +} + +/* et131x_init_recv - Initialize receive data structures */ +static int et131x_init_recv(struct et131x_adapter *adapter) +{ + struct rfd *rfd; + u32 rfdct; + struct rx_ring *rx_ring = &adapter->rx_ring; + + /* Setup each RFD */ + for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) { + rfd = kzalloc(sizeof(*rfd), GFP_ATOMIC | GFP_DMA); + if (!rfd) + return -ENOMEM; + + rfd->skb = NULL; + + /* Add this RFD to the recv_list */ + list_add_tail(&rfd->list_node, &rx_ring->recv_list); + + /* Increment the available RFD's */ + rx_ring->num_ready_recv++; + } + + return 0; +} + +/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */ +static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter) +{ + struct phy_device *phydev = adapter->phydev; + + /* For version B silicon, we do not use the RxDMA timer for 10 and 100 + * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. + */ + if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) { + writel(0, &adapter->regs->rxdma.max_pkt_time); + writel(1, &adapter->regs->rxdma.num_pkt_done); + } +} + +/* nic_return_rfd - Recycle a RFD and put it back onto the receive list */ +static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd) +{ + struct rx_ring *rx_local = &adapter->rx_ring; + struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; + u16 buff_index = rfd->bufferindex; + u8 ring_index = rfd->ringindex; + unsigned long flags; + struct fbr_lookup *fbr = rx_local->fbr[ring_index]; + + /* We don't use any of the OOB data besides status. Otherwise, we + * need to clean up OOB data + */ + if (buff_index < fbr->num_entries) { + u32 free_buff_ring; + u32 __iomem *offset; + struct fbr_desc *next; + + if (ring_index == 0) + offset = &rx_dma->fbr0_full_offset; + else + offset = &rx_dma->fbr1_full_offset; + + next = (struct fbr_desc *)(fbr->ring_virtaddr) + + INDEX10(fbr->local_full); + + /* Handle the Free Buffer Ring advancement here. Write + * the PA / Buffer Index for the returned buffer into + * the oldest (next to be freed)FBR entry + */ + next->addr_hi = fbr->bus_high[buff_index]; + next->addr_lo = fbr->bus_low[buff_index]; + next->word2 = buff_index; + + free_buff_ring = bump_free_buff_ring(&fbr->local_full, + fbr->num_entries - 1); + writel(free_buff_ring, offset); + } else { + dev_err(&adapter->pdev->dev, + "%s illegal Buffer Index returned\n", __func__); + } + + /* The processing on this RFD is done, so put it back on the tail of + * our list + */ + spin_lock_irqsave(&adapter->rcv_lock, flags); + list_add_tail(&rfd->list_node, &rx_local->recv_list); + rx_local->num_ready_recv++; + spin_unlock_irqrestore(&adapter->rcv_lock, flags); + + WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd); +} + +/* nic_rx_pkts - Checks the hardware for available packets + * + * Checks the hardware for available packets, using completion ring + * If packets are available, it gets an RFD from the recv_list, attaches + * the packet to it, puts the RFD in the RecvPendList, and also returns + * the pointer to the RFD. + */ +static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) +{ + struct rx_ring *rx_local = &adapter->rx_ring; + struct rx_status_block *status; + struct pkt_stat_desc *psr; + struct rfd *rfd; + unsigned long flags; + struct list_head *element; + u8 ring_index; + u16 buff_index; + u32 len; + u32 word0; + u32 word1; + struct sk_buff *skb; + struct fbr_lookup *fbr; + + /* RX Status block is written by the DMA engine prior to every + * interrupt. It contains the next to be used entry in the Packet + * Status Ring, and also the two Free Buffer rings. + */ + status = rx_local->rx_status_block; + word1 = status->word1 >> 16; + + /* Check the PSR and wrap bits do not match */ + if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF)) + return NULL; /* Looks like this ring is not updated yet */ + + /* The packet status ring indicates that data is available. */ + psr = (struct pkt_stat_desc *)(rx_local->ps_ring_virtaddr) + + (rx_local->local_psr_full & 0xFFF); + + /* Grab any information that is required once the PSR is advanced, + * since we can no longer rely on the memory being accurate + */ + len = psr->word1 & 0xFFFF; + ring_index = (psr->word1 >> 26) & 0x03; + fbr = rx_local->fbr[ring_index]; + buff_index = (psr->word1 >> 16) & 0x3FF; + word0 = psr->word0; + + /* Indicate that we have used this PSR entry. */ + /* FIXME wrap 12 */ + add_12bit(&rx_local->local_psr_full, 1); + if ((rx_local->local_psr_full & 0xFFF) > rx_local->psr_entries - 1) { + /* Clear psr full and toggle the wrap bit */ + rx_local->local_psr_full &= ~0xFFF; + rx_local->local_psr_full ^= 0x1000; + } + + writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset); + + if (ring_index > 1 || buff_index > fbr->num_entries - 1) { + /* Illegal buffer or ring index cannot be used by S/W*/ + dev_err(&adapter->pdev->dev, + "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n", + rx_local->local_psr_full & 0xFFF, len, buff_index); + return NULL; + } + + /* Get and fill the RFD. */ + spin_lock_irqsave(&adapter->rcv_lock, flags); + + element = rx_local->recv_list.next; + rfd = list_entry(element, struct rfd, list_node); + + if (!rfd) { + spin_unlock_irqrestore(&adapter->rcv_lock, flags); + return NULL; + } + + list_del(&rfd->list_node); + rx_local->num_ready_recv--; + + spin_unlock_irqrestore(&adapter->rcv_lock, flags); + + rfd->bufferindex = buff_index; + rfd->ringindex = ring_index; + + /* In V1 silicon, there is a bug which screws up filtering of runt + * packets. Therefore runt packet filtering is disabled in the MAC and + * the packets are dropped here. They are also counted here. + */ + if (len < (NIC_MIN_PACKET_SIZE + 4)) { + adapter->stats.rx_other_errs++; + rfd->len = 0; + goto out; + } + + if ((word0 & ALCATEL_MULTICAST_PKT) && !(word0 & ALCATEL_BROADCAST_PKT)) + adapter->stats.multicast_pkts_rcvd++; + + rfd->len = len; + + skb = dev_alloc_skb(rfd->len + 2); + if (!skb) + return NULL; + + adapter->netdev->stats.rx_bytes += rfd->len; + + memcpy(skb_put(skb, rfd->len), fbr->virt[buff_index], rfd->len); + + skb->protocol = eth_type_trans(skb, adapter->netdev); + skb->ip_summed = CHECKSUM_NONE; + netif_receive_skb(skb); + +out: + nic_return_rfd(adapter, rfd); + return rfd; +} + +static int et131x_handle_recv_pkts(struct et131x_adapter *adapter, int budget) +{ + struct rfd *rfd = NULL; + int count = 0; + int limit = budget; + bool done = true; + struct rx_ring *rx_ring = &adapter->rx_ring; + + if (budget > MAX_PACKETS_HANDLED) + limit = MAX_PACKETS_HANDLED; + + /* Process up to available RFD's */ + while (count < limit) { + if (list_empty(&rx_ring->recv_list)) { + WARN_ON(rx_ring->num_ready_recv != 0); + done = false; + break; + } + + rfd = nic_rx_pkts(adapter); + + if (rfd == NULL) + break; + + /* Do not receive any packets until a filter has been set. + * Do not receive any packets until we have link. + * If length is zero, return the RFD in order to advance the + * Free buffer ring. + */ + if (!adapter->packet_filter || + !netif_carrier_ok(adapter->netdev) || + rfd->len == 0) + continue; + + adapter->netdev->stats.rx_packets++; + + if (rx_ring->num_ready_recv < RFD_LOW_WATER_MARK) + dev_warn(&adapter->pdev->dev, "RFD's are running out\n"); + + count++; + } + + if (count == limit || !done) { + rx_ring->unfinished_receives = true; + writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, + &adapter->regs->global.watchdog_timer); + } else { + /* Watchdog timer will disable itself if appropriate. */ + rx_ring->unfinished_receives = false; + } + + return count; +} + +/* et131x_tx_dma_memory_alloc + * + * Allocates memory that will be visible both to the device and to the CPU. + * The OS will pass us packets, pointers to which we will insert in the Tx + * Descriptor queue. The device will read this queue to find the packets in + * memory. The device will update the "status" in memory each time it xmits a + * packet. + */ +static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) +{ + int desc_size = 0; + struct tx_ring *tx_ring = &adapter->tx_ring; + + /* Allocate memory for the TCB's (Transmit Control Block) */ + tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb), + GFP_ATOMIC | GFP_DMA); + if (!tx_ring->tcb_ring) + return -ENOMEM; + + desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX); + tx_ring->tx_desc_ring = dma_alloc_coherent(&adapter->pdev->dev, + desc_size, + &tx_ring->tx_desc_ring_pa, + GFP_KERNEL); + if (!tx_ring->tx_desc_ring) { + dev_err(&adapter->pdev->dev, + "Cannot alloc memory for Tx Ring\n"); + return -ENOMEM; + } + + tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev, + sizeof(u32), + &tx_ring->tx_status_pa, + GFP_KERNEL); + if (!tx_ring->tx_status_pa) { + dev_err(&adapter->pdev->dev, + "Cannot alloc memory for Tx status block\n"); + return -ENOMEM; + } + return 0; +} + +static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) +{ + int desc_size = 0; + struct tx_ring *tx_ring = &adapter->tx_ring; + + if (tx_ring->tx_desc_ring) { + /* Free memory relating to Tx rings here */ + desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX); + dma_free_coherent(&adapter->pdev->dev, + desc_size, + tx_ring->tx_desc_ring, + tx_ring->tx_desc_ring_pa); + tx_ring->tx_desc_ring = NULL; + } + + /* Free memory for the Tx status block */ + if (tx_ring->tx_status) { + dma_free_coherent(&adapter->pdev->dev, + sizeof(u32), + tx_ring->tx_status, + tx_ring->tx_status_pa); + + tx_ring->tx_status = NULL; + } + /* Free the memory for the tcb structures */ + kfree(tx_ring->tcb_ring); +} + +/* nic_send_packet - NIC specific send handler for version B silicon. */ +static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) +{ + u32 i; + struct tx_desc desc[24]; + u32 frag = 0; + u32 thiscopy, remainder; + struct sk_buff *skb = tcb->skb; + u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; + struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; + struct phy_device *phydev = adapter->phydev; + dma_addr_t dma_addr; + struct tx_ring *tx_ring = &adapter->tx_ring; + + /* Part of the optimizations of this send routine restrict us to + * sending 24 fragments at a pass. In practice we should never see + * more than 5 fragments. + */ + + /* nr_frags should be no more than 18. */ + BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23); + + memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1)); + + for (i = 0; i < nr_frags; i++) { + /* If there is something in this element, lets get a + * descriptor from the ring and get the necessary data + */ + if (i == 0) { + /* If the fragments are smaller than a standard MTU, + * then map them to a single descriptor in the Tx + * Desc ring. However, if they're larger, as is + * possible with support for jumbo packets, then + * split them each across 2 descriptors. + * + * This will work until we determine why the hardware + * doesn't seem to like large fragments. + */ + if (skb_headlen(skb) <= 1514) { + /* Low 16bits are length, high is vlan and + * unused currently so zero + */ + desc[frag].len_vlan = skb_headlen(skb); + dma_addr = dma_map_single(&adapter->pdev->dev, + skb->data, + skb_headlen(skb), + DMA_TO_DEVICE); + desc[frag].addr_lo = lower_32_bits(dma_addr); + desc[frag].addr_hi = upper_32_bits(dma_addr); + frag++; + } else { + desc[frag].len_vlan = skb_headlen(skb) / 2; + dma_addr = dma_map_single(&adapter->pdev->dev, + skb->data, + skb_headlen(skb) / 2, + DMA_TO_DEVICE); + desc[frag].addr_lo = lower_32_bits(dma_addr); + desc[frag].addr_hi = upper_32_bits(dma_addr); + frag++; + + desc[frag].len_vlan = skb_headlen(skb) / 2; + dma_addr = dma_map_single(&adapter->pdev->dev, + skb->data + + skb_headlen(skb) / 2, + skb_headlen(skb) / 2, + DMA_TO_DEVICE); + desc[frag].addr_lo = lower_32_bits(dma_addr); + desc[frag].addr_hi = upper_32_bits(dma_addr); + frag++; + } + } else { + desc[frag].len_vlan = frags[i - 1].size; + dma_addr = skb_frag_dma_map(&adapter->pdev->dev, + &frags[i - 1], + 0, + frags[i - 1].size, + DMA_TO_DEVICE); + desc[frag].addr_lo = lower_32_bits(dma_addr); + desc[frag].addr_hi = upper_32_bits(dma_addr); + frag++; + } + } + + if (phydev && phydev->speed == SPEED_1000) { + if (++tx_ring->since_irq == PARM_TX_NUM_BUFS_DEF) { + /* Last element & Interrupt flag */ + desc[frag - 1].flags = + TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT; + tx_ring->since_irq = 0; + } else { /* Last element */ + desc[frag - 1].flags = TXDESC_FLAG_LASTPKT; + } + } else { + desc[frag - 1].flags = + TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT; + } + + desc[0].flags |= TXDESC_FLAG_FIRSTPKT; + + tcb->index_start = tx_ring->send_idx; + tcb->stale = 0; + + thiscopy = NUM_DESC_PER_RING_TX - INDEX10(tx_ring->send_idx); + + if (thiscopy >= frag) { + remainder = 0; + thiscopy = frag; + } else { + remainder = frag - thiscopy; + } + + memcpy(tx_ring->tx_desc_ring + INDEX10(tx_ring->send_idx), + desc, + sizeof(struct tx_desc) * thiscopy); + + add_10bit(&tx_ring->send_idx, thiscopy); + + if (INDEX10(tx_ring->send_idx) == 0 || + INDEX10(tx_ring->send_idx) == NUM_DESC_PER_RING_TX) { + tx_ring->send_idx &= ~ET_DMA10_MASK; + tx_ring->send_idx ^= ET_DMA10_WRAP; + } + + if (remainder) { + memcpy(tx_ring->tx_desc_ring, + desc + thiscopy, + sizeof(struct tx_desc) * remainder); + + add_10bit(&tx_ring->send_idx, remainder); + } + + if (INDEX10(tx_ring->send_idx) == 0) { + if (tx_ring->send_idx) + tcb->index = NUM_DESC_PER_RING_TX - 1; + else + tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1); + } else { + tcb->index = tx_ring->send_idx - 1; + } + + spin_lock(&adapter->tcb_send_qlock); + + if (tx_ring->send_tail) + tx_ring->send_tail->next = tcb; + else + tx_ring->send_head = tcb; + + tx_ring->send_tail = tcb; + + WARN_ON(tcb->next != NULL); + + tx_ring->used++; + + spin_unlock(&adapter->tcb_send_qlock); + + /* Write the new write pointer back to the device. */ + writel(tx_ring->send_idx, &adapter->regs->txdma.service_request); + + /* For Gig only, we use Tx Interrupt coalescing. Enable the software + * timer to wake us up if this packet isn't followed by N more. + */ + if (phydev && phydev->speed == SPEED_1000) { + writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, + &adapter->regs->global.watchdog_timer); + } + return 0; +} + +static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter) +{ + int status; + struct tcb *tcb; + unsigned long flags; + struct tx_ring *tx_ring = &adapter->tx_ring; + + /* All packets must have at least a MAC address and a protocol type */ + if (skb->len < ETH_HLEN) + return -EIO; + + spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); + + tcb = tx_ring->tcb_qhead; + + if (tcb == NULL) { + spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); + return -ENOMEM; + } + + tx_ring->tcb_qhead = tcb->next; + + if (tx_ring->tcb_qhead == NULL) + tx_ring->tcb_qtail = NULL; + + spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); + + tcb->skb = skb; + tcb->next = NULL; + + status = nic_send_packet(adapter, tcb); + + if (status != 0) { + spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); + + if (tx_ring->tcb_qtail) + tx_ring->tcb_qtail->next = tcb; + else + /* Apparently ready Q is empty. */ + tx_ring->tcb_qhead = tcb; + + tx_ring->tcb_qtail = tcb; + spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); + return status; + } + WARN_ON(tx_ring->used > NUM_TCB); + return 0; +} + +/* free_send_packet - Recycle a struct tcb */ +static inline void free_send_packet(struct et131x_adapter *adapter, + struct tcb *tcb) +{ + unsigned long flags; + struct tx_desc *desc = NULL; + struct net_device_stats *stats = &adapter->netdev->stats; + struct tx_ring *tx_ring = &adapter->tx_ring; + u64 dma_addr; + + if (tcb->skb) { + stats->tx_bytes += tcb->skb->len; + + /* Iterate through the TX descriptors on the ring + * corresponding to this packet and umap the fragments + * they point to + */ + do { + desc = tx_ring->tx_desc_ring + + INDEX10(tcb->index_start); + + dma_addr = desc->addr_lo; + dma_addr |= (u64)desc->addr_hi << 32; + + dma_unmap_single(&adapter->pdev->dev, + dma_addr, + desc->len_vlan, DMA_TO_DEVICE); + + add_10bit(&tcb->index_start, 1); + if (INDEX10(tcb->index_start) >= + NUM_DESC_PER_RING_TX) { + tcb->index_start &= ~ET_DMA10_MASK; + tcb->index_start ^= ET_DMA10_WRAP; + } + } while (desc != tx_ring->tx_desc_ring + INDEX10(tcb->index)); + + dev_kfree_skb_any(tcb->skb); + } + + memset(tcb, 0, sizeof(struct tcb)); + + /* Add the TCB to the Ready Q */ + spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); + + stats->tx_packets++; + + if (tx_ring->tcb_qtail) + tx_ring->tcb_qtail->next = tcb; + else /* Apparently ready Q is empty. */ + tx_ring->tcb_qhead = tcb; + + tx_ring->tcb_qtail = tcb; + + spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); + WARN_ON(tx_ring->used < 0); +} + +/* et131x_free_busy_send_packets - Free and complete the stopped active sends */ +static void et131x_free_busy_send_packets(struct et131x_adapter *adapter) +{ + struct tcb *tcb; + unsigned long flags; + u32 freed = 0; + struct tx_ring *tx_ring = &adapter->tx_ring; + + /* Any packets being sent? Check the first TCB on the send list */ + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); + + tcb = tx_ring->send_head; + + while (tcb != NULL && freed < NUM_TCB) { + struct tcb *next = tcb->next; + + tx_ring->send_head = next; + + if (next == NULL) + tx_ring->send_tail = NULL; + + tx_ring->used--; + + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); + + freed++; + free_send_packet(adapter, tcb); + + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); + + tcb = tx_ring->send_head; + } + + WARN_ON(freed == NUM_TCB); + + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); + + tx_ring->used = 0; +} + +/* et131x_handle_send_pkts + * + * Re-claim the send resources, complete sends and get more to send from + * the send wait queue. + */ +static void et131x_handle_send_pkts(struct et131x_adapter *adapter) +{ + unsigned long flags; + u32 serviced; + struct tcb *tcb; + u32 index; + struct tx_ring *tx_ring = &adapter->tx_ring; + + serviced = readl(&adapter->regs->txdma.new_service_complete); + index = INDEX10(serviced); + + /* Has the ring wrapped? Process any descriptors that do not have + * the same "wrap" indicator as the current completion indicator + */ + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); + + tcb = tx_ring->send_head; + + while (tcb && + ((serviced ^ tcb->index) & ET_DMA10_WRAP) && + index < INDEX10(tcb->index)) { + tx_ring->used--; + tx_ring->send_head = tcb->next; + if (tcb->next == NULL) + tx_ring->send_tail = NULL; + + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); + free_send_packet(adapter, tcb); + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); + + /* Goto the next packet */ + tcb = tx_ring->send_head; + } + while (tcb && + !((serviced ^ tcb->index) & ET_DMA10_WRAP) && + index > (tcb->index & ET_DMA10_MASK)) { + tx_ring->used--; + tx_ring->send_head = tcb->next; + if (tcb->next == NULL) + tx_ring->send_tail = NULL; + + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); + free_send_packet(adapter, tcb); + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); + + /* Goto the next packet */ + tcb = tx_ring->send_head; + } + + /* Wake up the queue when we hit a low-water mark */ + if (tx_ring->used <= NUM_TCB / 3) + netif_wake_queue(adapter->netdev); + + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); +} + +static int et131x_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + return phy_ethtool_gset(adapter->phydev, cmd); +} + +static int et131x_set_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + return phy_ethtool_sset(adapter->phydev, cmd); +} + +static int et131x_get_regs_len(struct net_device *netdev) +{ +#define ET131X_REGS_LEN 256 + return ET131X_REGS_LEN * sizeof(u32); +} + +static void et131x_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *regs_data) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct address_map __iomem *aregs = adapter->regs; + u32 *regs_buff = regs_data; + u32 num = 0; + u16 tmp; + + memset(regs_data, 0, et131x_get_regs_len(netdev)); + + regs->version = (1 << 24) | (adapter->pdev->revision << 16) | + adapter->pdev->device; + + /* PHY regs */ + et131x_mii_read(adapter, MII_BMCR, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_BMSR, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_PHYSID1, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_PHYSID2, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_ADVERTISE, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_LPA, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_EXPANSION, &tmp); + regs_buff[num++] = tmp; + /* Autoneg next page transmit reg */ + et131x_mii_read(adapter, 0x07, &tmp); + regs_buff[num++] = tmp; + /* Link partner next page reg */ + et131x_mii_read(adapter, 0x08, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_CTRL1000, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_STAT1000, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, 0x0b, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, 0x0c, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_MMD_CTRL, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_MMD_DATA, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, MII_ESTATUS, &tmp); + regs_buff[num++] = tmp; + + et131x_mii_read(adapter, PHY_INDEX_REG, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_DATA_REG, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL + 1, &tmp); + regs_buff[num++] = tmp; + + et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_CONFIG, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_PHY_CONTROL, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_PHY_STATUS, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_LED_1, &tmp); + regs_buff[num++] = tmp; + et131x_mii_read(adapter, PHY_LED_2, &tmp); + regs_buff[num++] = tmp; + + /* Global regs */ + regs_buff[num++] = readl(&aregs->global.txq_start_addr); + regs_buff[num++] = readl(&aregs->global.txq_end_addr); + regs_buff[num++] = readl(&aregs->global.rxq_start_addr); + regs_buff[num++] = readl(&aregs->global.rxq_end_addr); + regs_buff[num++] = readl(&aregs->global.pm_csr); + regs_buff[num++] = adapter->stats.interrupt_status; + regs_buff[num++] = readl(&aregs->global.int_mask); + regs_buff[num++] = readl(&aregs->global.int_alias_clr_en); + regs_buff[num++] = readl(&aregs->global.int_status_alias); + regs_buff[num++] = readl(&aregs->global.sw_reset); + regs_buff[num++] = readl(&aregs->global.slv_timer); + regs_buff[num++] = readl(&aregs->global.msi_config); + regs_buff[num++] = readl(&aregs->global.loopback); + regs_buff[num++] = readl(&aregs->global.watchdog_timer); + + /* TXDMA regs */ + regs_buff[num++] = readl(&aregs->txdma.csr); + regs_buff[num++] = readl(&aregs->txdma.pr_base_hi); + regs_buff[num++] = readl(&aregs->txdma.pr_base_lo); + regs_buff[num++] = readl(&aregs->txdma.pr_num_des); + regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr); + regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext); + regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr); + regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi); + regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo); + regs_buff[num++] = readl(&aregs->txdma.service_request); + regs_buff[num++] = readl(&aregs->txdma.service_complete); + regs_buff[num++] = readl(&aregs->txdma.cache_rd_index); + regs_buff[num++] = readl(&aregs->txdma.cache_wr_index); + regs_buff[num++] = readl(&aregs->txdma.tx_dma_error); + regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt); + regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt); + regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt); + regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt); + regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt); + regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt); + regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt); + regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt); + regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt); + regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt); + regs_buff[num++] = readl(&aregs->txdma.new_service_complete); + regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt); + + /* RXDMA regs */ + regs_buff[num++] = readl(&aregs->rxdma.csr); + regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi); + regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo); + regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done); + regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time); + regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr); + regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext); + regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr); + regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi); + regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo); + regs_buff[num++] = readl(&aregs->rxdma.psr_num_des); + regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset); + regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset); + regs_buff[num++] = readl(&aregs->rxdma.psr_access_index); + regs_buff[num++] = readl(&aregs->rxdma.psr_min_des); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index); + regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index); + regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des); +} + +static void et131x_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver)); + strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(adapter->pdev), + sizeof(info->bus_info)); +} + +static struct ethtool_ops et131x_ethtool_ops = { + .get_settings = et131x_get_settings, + .set_settings = et131x_set_settings, + .get_drvinfo = et131x_get_drvinfo, + .get_regs_len = et131x_get_regs_len, + .get_regs = et131x_get_regs, + .get_link = ethtool_op_get_link, +}; + +/* et131x_hwaddr_init - set up the MAC Address */ +static void et131x_hwaddr_init(struct et131x_adapter *adapter) +{ + /* If have our default mac from init and no mac address from + * EEPROM then we need to generate the last octet and set it on the + * device + */ + if (is_zero_ether_addr(adapter->rom_addr)) { + /* We need to randomly generate the last octet so we + * decrease our chances of setting the mac address to + * same as another one of our cards in the system + */ + get_random_bytes(&adapter->addr[5], 1); + /* We have the default value in the register we are + * working with so we need to copy the current + * address into the permanent address + */ + ether_addr_copy(adapter->rom_addr, adapter->addr); + } else { + /* We do not have an override address, so set the + * current address to the permanent address and add + * it to the device + */ + ether_addr_copy(adapter->addr, adapter->rom_addr); + } +} + +static int et131x_pci_init(struct et131x_adapter *adapter, + struct pci_dev *pdev) +{ + u16 max_payload; + int i, rc; + + rc = et131x_init_eeprom(adapter); + if (rc < 0) + goto out; + + if (!pci_is_pcie(pdev)) { + dev_err(&pdev->dev, "Missing PCIe capabilities\n"); + goto err_out; + } + + /* Program the Ack/Nak latency and replay timers */ + max_payload = pdev->pcie_mpss; + + if (max_payload < 2) { + static const u16 acknak[2] = { 0x76, 0xD0 }; + static const u16 replay[2] = { 0x1E0, 0x2ED }; + + if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK, + acknak[max_payload])) { + dev_err(&pdev->dev, + "Could not write PCI config space for ACK/NAK\n"); + goto err_out; + } + if (pci_write_config_word(pdev, ET1310_PCI_REPLAY, + replay[max_payload])) { + dev_err(&pdev->dev, + "Could not write PCI config space for Replay Timer\n"); + goto err_out; + } + } + + /* l0s and l1 latency timers. We are using default values. + * Representing 001 for L0s and 010 for L1 + */ + if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) { + dev_err(&pdev->dev, + "Could not write PCI config space for Latency Timers\n"); + goto err_out; + } + + /* Change the max read size to 2k */ + if (pcie_set_readrq(pdev, 2048)) { + dev_err(&pdev->dev, + "Couldn't change PCI config space for Max read size\n"); + goto err_out; + } + + /* Get MAC address from config space if an eeprom exists, otherwise + * the MAC address there will not be valid + */ + if (!adapter->has_eeprom) { + et131x_hwaddr_init(adapter); + return 0; + } + + for (i = 0; i < ETH_ALEN; i++) { + if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i, + adapter->rom_addr + i)) { + dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n"); + goto err_out; + } + } + ether_addr_copy(adapter->addr, adapter->rom_addr); +out: + return rc; +err_out: + rc = -EIO; + goto out; +} + +/* et131x_error_timer_handler + * @data: timer-specific variable; here a pointer to our adapter structure + * + * The routine called when the error timer expires, to track the number of + * recurring errors. + */ +static void et131x_error_timer_handler(unsigned long data) +{ + struct et131x_adapter *adapter = (struct et131x_adapter *)data; + struct phy_device *phydev = adapter->phydev; + + if (et1310_in_phy_coma(adapter)) { + /* Bring the device immediately out of coma, to + * prevent it from sleeping indefinitely, this + * mechanism could be improved! + */ + et1310_disable_phy_coma(adapter); + adapter->boot_coma = 20; + } else { + et1310_update_macstat_host_counters(adapter); + } + + if (!phydev->link && adapter->boot_coma < 11) + adapter->boot_coma++; + + if (adapter->boot_coma == 10) { + if (!phydev->link) { + if (!et1310_in_phy_coma(adapter)) { + /* NOTE - This was originally a 'sync with + * interrupt'. How to do that under Linux? + */ + et131x_enable_interrupts(adapter); + et1310_enable_phy_coma(adapter); + } + } + } + + /* This is a periodic timer, so reschedule */ + mod_timer(&adapter->error_timer, jiffies + + msecs_to_jiffies(TX_ERROR_PERIOD)); +} + +static void et131x_adapter_memory_free(struct et131x_adapter *adapter) +{ + et131x_tx_dma_memory_free(adapter); + et131x_rx_dma_memory_free(adapter); +} + +static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) +{ + int status; + + status = et131x_tx_dma_memory_alloc(adapter); + if (status) { + dev_err(&adapter->pdev->dev, + "et131x_tx_dma_memory_alloc FAILED\n"); + et131x_tx_dma_memory_free(adapter); + return status; + } + + status = et131x_rx_dma_memory_alloc(adapter); + if (status) { + dev_err(&adapter->pdev->dev, + "et131x_rx_dma_memory_alloc FAILED\n"); + et131x_adapter_memory_free(adapter); + return status; + } + + status = et131x_init_recv(adapter); + if (status) { + dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n"); + et131x_adapter_memory_free(adapter); + } + return status; +} + +static void et131x_adjust_link(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = adapter->phydev; + + if (!phydev) + return; + if (phydev->link == adapter->link) + return; + + /* Check to see if we are in coma mode and if + * so, disable it because we will not be able + * to read PHY values until we are out. + */ + if (et1310_in_phy_coma(adapter)) + et1310_disable_phy_coma(adapter); + + adapter->link = phydev->link; + phy_print_status(phydev); + + if (phydev->link) { + adapter->boot_coma = 20; + if (phydev->speed == SPEED_10) { + u16 register18; + + et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, + ®ister18); + et131x_mii_write(adapter, phydev->addr, + PHY_MPHY_CONTROL_REG, + register18 | 0x4); + et131x_mii_write(adapter, phydev->addr, PHY_INDEX_REG, + register18 | 0x8402); + et131x_mii_write(adapter, phydev->addr, PHY_DATA_REG, + register18 | 511); + et131x_mii_write(adapter, phydev->addr, + PHY_MPHY_CONTROL_REG, register18); + } + + et1310_config_flow_control(adapter); + + if (phydev->speed == SPEED_1000 && + adapter->registry_jumbo_packet > 2048) { + u16 reg; + + et131x_mii_read(adapter, PHY_CONFIG, ®); + reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; + reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; + et131x_mii_write(adapter, phydev->addr, PHY_CONFIG, + reg); + } + + et131x_set_rx_dma_timer(adapter); + et1310_config_mac_regs2(adapter); + } else { + adapter->boot_coma = 0; + + if (phydev->speed == SPEED_10) { + u16 register18; + + et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, + ®ister18); + et131x_mii_write(adapter, phydev->addr, + PHY_MPHY_CONTROL_REG, + register18 | 0x4); + et131x_mii_write(adapter, phydev->addr, + PHY_INDEX_REG, register18 | 0x8402); + et131x_mii_write(adapter, phydev->addr, + PHY_DATA_REG, register18 | 511); + et131x_mii_write(adapter, phydev->addr, + PHY_MPHY_CONTROL_REG, register18); + } + + et131x_free_busy_send_packets(adapter); + et131x_init_send(adapter); + + /* Bring the device back to the state it was during + * init prior to autonegotiation being complete. This + * way, when we get the auto-neg complete interrupt, + * we can complete init by calling config_mac_regs2. + */ + et131x_soft_reset(adapter); + + et131x_adapter_setup(adapter); + + et131x_disable_txrx(netdev); + et131x_enable_txrx(netdev); + } +} + +static int et131x_mii_probe(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = NULL; + + phydev = phy_find_first(adapter->mii_bus); + if (!phydev) { + dev_err(&adapter->pdev->dev, "no PHY found\n"); + return -ENODEV; + } + + phydev = phy_connect(netdev, dev_name(&phydev->dev), + &et131x_adjust_link, PHY_INTERFACE_MODE_MII); + + if (IS_ERR(phydev)) { + dev_err(&adapter->pdev->dev, "Could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + phydev->supported &= (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_MII | + SUPPORTED_TP); + + if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST) + phydev->supported |= SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full; + + phydev->advertising = phydev->supported; + phydev->autoneg = AUTONEG_ENABLE; + adapter->phydev = phydev; + + dev_info(&adapter->pdev->dev, + "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", + phydev->drv->name, dev_name(&phydev->dev)); + + return 0; +} + +static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev, + struct pci_dev *pdev) +{ + static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 }; + + struct et131x_adapter *adapter; + + adapter = netdev_priv(netdev); + adapter->pdev = pci_dev_get(pdev); + adapter->netdev = netdev; + + spin_lock_init(&adapter->tcb_send_qlock); + spin_lock_init(&adapter->tcb_ready_qlock); + spin_lock_init(&adapter->rcv_lock); + + adapter->registry_jumbo_packet = 1514; /* 1514-9216 */ + + ether_addr_copy(adapter->addr, default_mac); + + return adapter; +} + +static void et131x_pci_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct et131x_adapter *adapter = netdev_priv(netdev); + + unregister_netdev(netdev); + netif_napi_del(&adapter->napi); + phy_disconnect(adapter->phydev); + mdiobus_unregister(adapter->mii_bus); + kfree(adapter->mii_bus->irq); + mdiobus_free(adapter->mii_bus); + + et131x_adapter_memory_free(adapter); + iounmap(adapter->regs); + pci_dev_put(pdev); + + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static void et131x_up(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + et131x_enable_txrx(netdev); + phy_start(adapter->phydev); +} + +static void et131x_down(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + /* Save the timestamp for the TX watchdog, prevent a timeout */ + netdev->trans_start = jiffies; + + phy_stop(adapter->phydev); + et131x_disable_txrx(netdev); +} + +#ifdef CONFIG_PM_SLEEP +static int et131x_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netif_running(netdev)) { + netif_device_detach(netdev); + et131x_down(netdev); + pci_save_state(pdev); + } + + return 0; +} + +static int et131x_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netif_running(netdev)) { + pci_restore_state(pdev); + et131x_up(netdev); + netif_device_attach(netdev); + } + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume); + +static irqreturn_t et131x_isr(int irq, void *dev_id) +{ + bool handled = true; + bool enable_interrupts = true; + struct net_device *netdev = dev_id; + struct et131x_adapter *adapter = netdev_priv(netdev); + struct address_map __iomem *iomem = adapter->regs; + struct rx_ring *rx_ring = &adapter->rx_ring; + struct tx_ring *tx_ring = &adapter->tx_ring; + u32 status; + + if (!netif_device_present(netdev)) { + handled = false; + enable_interrupts = false; + goto out; + } + + et131x_disable_interrupts(adapter); + + status = readl(&adapter->regs->global.int_status); + + if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) + status &= ~INT_MASK_ENABLE; + else + status &= ~INT_MASK_ENABLE_NO_FLOW; + + /* Make sure this is our interrupt */ + if (!status) { + handled = false; + et131x_enable_interrupts(adapter); + goto out; + } + + /* This is our interrupt, so process accordingly */ + if (status & ET_INTR_WATCHDOG) { + struct tcb *tcb = tx_ring->send_head; + + if (tcb) + if (++tcb->stale > 1) + status |= ET_INTR_TXDMA_ISR; + + if (rx_ring->unfinished_receives) + status |= ET_INTR_RXDMA_XFR_DONE; + else if (tcb == NULL) + writel(0, &adapter->regs->global.watchdog_timer); + + status &= ~ET_INTR_WATCHDOG; + } + + if (status & (ET_INTR_RXDMA_XFR_DONE | ET_INTR_TXDMA_ISR)) { + enable_interrupts = false; + napi_schedule(&adapter->napi); + } + + status &= ~(ET_INTR_TXDMA_ISR | ET_INTR_RXDMA_XFR_DONE); + + if (!status) + goto out; + + if (status & ET_INTR_TXDMA_ERR) { + /* Following read also clears the register (COR) */ + u32 txdma_err = readl(&iomem->txdma.tx_dma_error); + + dev_warn(&adapter->pdev->dev, + "TXDMA_ERR interrupt, error = %d\n", + txdma_err); + } + + if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) { + /* This indicates the number of unused buffers in RXDMA free + * buffer ring 0 is <= the limit you programmed. Free buffer + * resources need to be returned. Free buffers are consumed as + * packets are passed from the network to the host. The host + * becomes aware of the packets from the contents of the packet + * status ring. This ring is queried when the packet done + * interrupt occurs. Packets are then passed to the OS. When + * the OS is done with the packets the resources can be + * returned to the ET1310 for re-use. This interrupt is one + * method of returning resources. + */ + + /* If the user has flow control on, then we will + * send a pause packet, otherwise just exit + */ + if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) { + u32 pm_csr; + + /* Tell the device to send a pause packet via the back + * pressure register (bp req and bp xon/xoff) + */ + pm_csr = readl(&iomem->global.pm_csr); + if (!et1310_in_phy_coma(adapter)) + writel(3, &iomem->txmac.bp_ctrl); + } + } + + /* Handle Packet Status Ring Low Interrupt */ + if (status & ET_INTR_RXDMA_STAT_LOW) { + /* Same idea as with the two Free Buffer Rings. Packets going + * from the network to the host each consume a free buffer + * resource and a packet status resource. These resources are + * passed to the OS. When the OS is done with the resources, + * they need to be returned to the ET1310. This is one method + * of returning the resources. + */ + } + + if (status & ET_INTR_RXDMA_ERR) { + /* The rxdma_error interrupt is sent when a time-out on a + * request issued by the JAGCore has occurred or a completion is + * returned with an un-successful status. In both cases the + * request is considered complete. The JAGCore will + * automatically re-try the request in question. Normally + * information on events like these are sent to the host using + * the "Advanced Error Reporting" capability. This interrupt is + * another way of getting similar information. The only thing + * required is to clear the interrupt by reading the ISR in the + * global resources. The JAGCore will do a re-try on the + * request. Normally you should never see this interrupt. If + * you start to see this interrupt occurring frequently then + * something bad has occurred. A reset might be the thing to do. + */ + /* TRAP();*/ + + dev_warn(&adapter->pdev->dev, "RxDMA_ERR interrupt, error %x\n", + readl(&iomem->txmac.tx_test)); + } + + /* Handle the Wake on LAN Event */ + if (status & ET_INTR_WOL) { + /* This is a secondary interrupt for wake on LAN. The driver + * should never see this, if it does, something serious is + * wrong. + */ + dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n"); + } + + if (status & ET_INTR_TXMAC) { + u32 err = readl(&iomem->txmac.err); + + /* When any of the errors occur and TXMAC generates an + * interrupt to report these errors, it usually means that + * TXMAC has detected an error in the data stream retrieved + * from the on-chip Tx Q. All of these errors are catastrophic + * and TXMAC won't be able to recover data when these errors + * occur. In a nutshell, the whole Tx path will have to be reset + * and re-configured afterwards. + */ + dev_warn(&adapter->pdev->dev, "TXMAC interrupt, error 0x%08x\n", + err); + + /* If we are debugging, we want to see this error, otherwise we + * just want the device to be reset and continue + */ + } + + if (status & ET_INTR_RXMAC) { + /* These interrupts are catastrophic to the device, what we need + * to do is disable the interrupts and set the flag to cause us + * to reset so we can solve this issue. + */ + dev_warn(&adapter->pdev->dev, + "RXMAC interrupt, error 0x%08x. Requesting reset\n", + readl(&iomem->rxmac.err_reg)); + + dev_warn(&adapter->pdev->dev, + "Enable 0x%08x, Diag 0x%08x\n", + readl(&iomem->rxmac.ctrl), + readl(&iomem->rxmac.rxq_diag)); + + /* If we are debugging, we want to see this error, otherwise we + * just want the device to be reset and continue + */ + } + + if (status & ET_INTR_MAC_STAT) { + /* This means at least one of the un-masked counters in the + * MAC_STAT block has rolled over. Use this to maintain the top, + * software managed bits of the counter(s). + */ + et1310_handle_macstat_interrupt(adapter); + } + + if (status & ET_INTR_SLV_TIMEOUT) { + /* This means a timeout has occurred on a read or write request + * to one of the JAGCore registers. The Global Resources block + * has terminated the request and on a read request, returned a + * "fake" value. The most likely reasons are: Bad Address or the + * addressed module is in a power-down state and can't respond. + */ + } + +out: + if (enable_interrupts) + et131x_enable_interrupts(adapter); + + return IRQ_RETVAL(handled); +} + +static int et131x_poll(struct napi_struct *napi, int budget) +{ + struct et131x_adapter *adapter = + container_of(napi, struct et131x_adapter, napi); + int work_done = et131x_handle_recv_pkts(adapter, budget); + + et131x_handle_send_pkts(adapter); + + if (work_done < budget) { + napi_complete(&adapter->napi); + et131x_enable_interrupts(adapter); + } + + return work_done; +} + +/* et131x_stats - Return the current device statistics */ +static struct net_device_stats *et131x_stats(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct net_device_stats *stats = &adapter->netdev->stats; + struct ce_stats *devstat = &adapter->stats; + + stats->rx_errors = devstat->rx_length_errs + + devstat->rx_align_errs + + devstat->rx_crc_errs + + devstat->rx_code_violations + + devstat->rx_other_errs; + stats->tx_errors = devstat->tx_max_pkt_errs; + stats->multicast = devstat->multicast_pkts_rcvd; + stats->collisions = devstat->tx_collisions; + + stats->rx_length_errors = devstat->rx_length_errs; + stats->rx_over_errors = devstat->rx_overflows; + stats->rx_crc_errors = devstat->rx_crc_errs; + stats->rx_dropped = devstat->rcvd_pkts_dropped; + + /* NOTE: Not used, can't find analogous statistics */ + /* stats->rx_frame_errors = devstat->; */ + /* stats->rx_fifo_errors = devstat->; */ + /* stats->rx_missed_errors = devstat->; */ + + /* stats->tx_aborted_errors = devstat->; */ + /* stats->tx_carrier_errors = devstat->; */ + /* stats->tx_fifo_errors = devstat->; */ + /* stats->tx_heartbeat_errors = devstat->; */ + /* stats->tx_window_errors = devstat->; */ + return stats; +} + +static int et131x_open(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + unsigned int irq = pdev->irq; + int result; + + /* Start the timer to track NIC errors */ + init_timer(&adapter->error_timer); + adapter->error_timer.expires = jiffies + + msecs_to_jiffies(TX_ERROR_PERIOD); + adapter->error_timer.function = et131x_error_timer_handler; + adapter->error_timer.data = (unsigned long)adapter; + add_timer(&adapter->error_timer); + + result = request_irq(irq, et131x_isr, + IRQF_SHARED, netdev->name, netdev); + if (result) { + dev_err(&pdev->dev, "could not register IRQ %d\n", irq); + return result; + } + + adapter->flags |= FMP_ADAPTER_INTERRUPT_IN_USE; + + napi_enable(&adapter->napi); + + et131x_up(netdev); + + return result; +} + +static int et131x_close(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + et131x_down(netdev); + napi_disable(&adapter->napi); + + adapter->flags &= ~FMP_ADAPTER_INTERRUPT_IN_USE; + free_irq(adapter->pdev->irq, netdev); + + /* Stop the error timer */ + return del_timer_sync(&adapter->error_timer); +} + +static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, + int cmd) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + + if (!adapter->phydev) + return -EINVAL; + + return phy_mii_ioctl(adapter->phydev, reqbuf, cmd); +} + +/* et131x_set_packet_filter - Configures the Rx Packet filtering */ +static int et131x_set_packet_filter(struct et131x_adapter *adapter) +{ + int filter = adapter->packet_filter; + u32 ctrl; + u32 pf_ctrl; + + ctrl = readl(&adapter->regs->rxmac.ctrl); + pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl); + + /* Default to disabled packet filtering */ + ctrl |= 0x04; + + /* Set us to be in promiscuous mode so we receive everything, this + * is also true when we get a packet filter of 0 + */ + if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0) + pf_ctrl &= ~7; /* Clear filter bits */ + else { + /* Set us up with Multicast packet filtering. Three cases are + * possible - (1) we have a multi-cast list, (2) we receive ALL + * multicast entries or (3) we receive none. + */ + if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST) + pf_ctrl &= ~2; /* Multicast filter bit */ + else { + et1310_setup_device_for_multicast(adapter); + pf_ctrl |= 2; + ctrl &= ~0x04; + } + + /* Set us up with Unicast packet filtering */ + if (filter & ET131X_PACKET_TYPE_DIRECTED) { + et1310_setup_device_for_unicast(adapter); + pf_ctrl |= 4; + ctrl &= ~0x04; + } + + /* Set us up with Broadcast packet filtering */ + if (filter & ET131X_PACKET_TYPE_BROADCAST) { + pf_ctrl |= 1; /* Broadcast filter bit */ + ctrl &= ~0x04; + } else { + pf_ctrl &= ~1; + } + + /* Setup the receive mac configuration registers - Packet + * Filter control + the enable / disable for packet filter + * in the control reg. + */ + writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl); + writel(ctrl, &adapter->regs->rxmac.ctrl); + } + return 0; +} + +static void et131x_multicast(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + int packet_filter; + struct netdev_hw_addr *ha; + int i; + + /* Before we modify the platform-independent filter flags, store them + * locally. This allows us to determine if anything's changed and if + * we even need to bother the hardware + */ + packet_filter = adapter->packet_filter; + + /* Clear the 'multicast' flag locally; because we only have a single + * flag to check multicast, and multiple multicast addresses can be + * set, this is the easiest way to determine if more than one + * multicast address is being set. + */ + packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; + + /* Check the net_device flags and set the device independent flags + * accordingly + */ + if (netdev->flags & IFF_PROMISC) + adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS; + else + adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS; + + if ((netdev->flags & IFF_ALLMULTI) || + (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)) + adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; + + if (netdev_mc_count(netdev) < 1) { + adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST; + adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; + } else { + adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST; + } + + /* Set values in the private adapter struct */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == NIC_MAX_MCAST_LIST) + break; + ether_addr_copy(adapter->multicast_list[i++], ha->addr); + } + adapter->multicast_addr_count = i; + + /* Are the new flags different from the previous ones? If not, then no + * action is required + * + * NOTE - This block will always update the multicast_list with the + * hardware, even if the addresses aren't the same. + */ + if (packet_filter != adapter->packet_filter) + et131x_set_packet_filter(adapter); +} + +static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct tx_ring *tx_ring = &adapter->tx_ring; + + /* stop the queue if it's getting full */ + if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev)) + netif_stop_queue(netdev); + + /* Save the timestamp for the TX timeout watchdog */ + netdev->trans_start = jiffies; + + /* TCB is not available */ + if (tx_ring->used >= NUM_TCB) + goto drop_err; + + if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) || + !netif_carrier_ok(netdev)) + goto drop_err; + + if (send_packet(skb, adapter)) + goto drop_err; + + return NETDEV_TX_OK; + +drop_err: + dev_kfree_skb_any(skb); + adapter->netdev->stats.tx_dropped++; + return NETDEV_TX_OK; +} + +/* et131x_tx_timeout - Timeout handler + * + * The handler called when a Tx request times out. The timeout period is + * specified by the 'tx_timeo" element in the net_device structure (see + * et131x_alloc_device() to see how this value is set). + */ +static void et131x_tx_timeout(struct net_device *netdev) +{ + struct et131x_adapter *adapter = netdev_priv(netdev); + struct tx_ring *tx_ring = &adapter->tx_ring; + struct tcb *tcb; + unsigned long flags; + + /* If the device is closed, ignore the timeout */ + if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)) + return; + + /* Any nonrecoverable hardware error? + * Checks adapter->flags for any failure in phy reading + */ + if (adapter->flags & FMP_ADAPTER_NON_RECOVER_ERROR) + return; + + /* Hardware failure? */ + if (adapter->flags & FMP_ADAPTER_HARDWARE_ERROR) { + dev_err(&adapter->pdev->dev, "hardware error - reset\n"); + return; + } + + /* Is send stuck? */ + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); + tcb = tx_ring->send_head; + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); + + if (tcb) { + tcb->count++; + + if (tcb->count > NIC_SEND_HANG_THRESHOLD) { + dev_warn(&adapter->pdev->dev, + "Send stuck - reset. tcb->WrIndex %x\n", + tcb->index); + + adapter->netdev->stats.tx_errors++; + + /* perform reset of tx/rx */ + et131x_disable_txrx(netdev); + et131x_enable_txrx(netdev); + } + } +} + +static int et131x_change_mtu(struct net_device *netdev, int new_mtu) +{ + int result = 0; + struct et131x_adapter *adapter = netdev_priv(netdev); + + if (new_mtu < 64 || new_mtu > 9216) + return -EINVAL; + + et131x_disable_txrx(netdev); + + netdev->mtu = new_mtu; + + et131x_adapter_memory_free(adapter); + + /* Set the config parameter for Jumbo Packet support */ + adapter->registry_jumbo_packet = new_mtu + 14; + et131x_soft_reset(adapter); + + result = et131x_adapter_memory_alloc(adapter); + if (result != 0) { + dev_warn(&adapter->pdev->dev, + "Change MTU failed; couldn't re-alloc DMA memory\n"); + return result; + } + + et131x_init_send(adapter); + et131x_hwaddr_init(adapter); + ether_addr_copy(netdev->dev_addr, adapter->addr); + + /* Init the device with the new settings */ + et131x_adapter_setup(adapter); + et131x_enable_txrx(netdev); + + return result; +} + +static const struct net_device_ops et131x_netdev_ops = { + .ndo_open = et131x_open, + .ndo_stop = et131x_close, + .ndo_start_xmit = et131x_tx, + .ndo_set_rx_mode = et131x_multicast, + .ndo_tx_timeout = et131x_tx_timeout, + .ndo_change_mtu = et131x_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_get_stats = et131x_stats, + .ndo_do_ioctl = et131x_ioctl, +}; + +static int et131x_pci_setup(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct et131x_adapter *adapter; + int rc; + int ii; + + rc = pci_enable_device(pdev); + if (rc < 0) { + dev_err(&pdev->dev, "pci_enable_device() failed\n"); + goto out; + } + + /* Perform some basic PCI checks */ + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "Can't find PCI device's base address\n"); + rc = -ENODEV; + goto err_disable; + } + + rc = pci_request_regions(pdev, DRIVER_NAME); + if (rc < 0) { + dev_err(&pdev->dev, "Can't get PCI resources\n"); + goto err_disable; + } + + pci_set_master(pdev); + + /* Check the DMA addressing support of this device */ + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) && + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + dev_err(&pdev->dev, "No usable DMA addressing method\n"); + rc = -EIO; + goto err_release_res; + } + + netdev = alloc_etherdev(sizeof(struct et131x_adapter)); + if (!netdev) { + dev_err(&pdev->dev, "Couldn't alloc netdev struct\n"); + rc = -ENOMEM; + goto err_release_res; + } + + netdev->watchdog_timeo = ET131X_TX_TIMEOUT; + netdev->netdev_ops = &et131x_netdev_ops; + + SET_NETDEV_DEV(netdev, &pdev->dev); + netdev->ethtool_ops = &et131x_ethtool_ops; + + adapter = et131x_adapter_init(netdev, pdev); + + rc = et131x_pci_init(adapter, pdev); + if (rc < 0) + goto err_free_dev; + + /* Map the bus-relative registers to system virtual memory */ + adapter->regs = pci_ioremap_bar(pdev, 0); + if (!adapter->regs) { + dev_err(&pdev->dev, "Cannot map device registers\n"); + rc = -ENOMEM; + goto err_free_dev; + } + + /* If Phy COMA mode was enabled when we went down, disable it here. */ + writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr); + + et131x_soft_reset(adapter); + et131x_disable_interrupts(adapter); + + rc = et131x_adapter_memory_alloc(adapter); + if (rc < 0) { + dev_err(&pdev->dev, "Could not alloc adapter memory (DMA)\n"); + goto err_iounmap; + } + + et131x_init_send(adapter); + + netif_napi_add(netdev, &adapter->napi, et131x_poll, 64); + + ether_addr_copy(netdev->dev_addr, adapter->addr); + + rc = -ENOMEM; + + adapter->mii_bus = mdiobus_alloc(); + if (!adapter->mii_bus) { + dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n"); + goto err_mem_free; + } + + adapter->mii_bus->name = "et131x_eth_mii"; + snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x", + (adapter->pdev->bus->number << 8) | adapter->pdev->devfn); + adapter->mii_bus->priv = netdev; + adapter->mii_bus->read = et131x_mdio_read; + adapter->mii_bus->write = et131x_mdio_write; + adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), + GFP_KERNEL); + if (!adapter->mii_bus->irq) + goto err_mdio_free; + + for (ii = 0; ii < PHY_MAX_ADDR; ii++) + adapter->mii_bus->irq[ii] = PHY_POLL; + + rc = mdiobus_register(adapter->mii_bus); + if (rc < 0) { + dev_err(&pdev->dev, "failed to register MII bus\n"); + goto err_mdio_free_irq; + } + + rc = et131x_mii_probe(netdev); + if (rc < 0) { + dev_err(&pdev->dev, "failed to probe MII bus\n"); + goto err_mdio_unregister; + } + + et131x_adapter_setup(adapter); + + /* Init variable for counting how long we do not have link status */ + adapter->boot_coma = 0; + et1310_disable_phy_coma(adapter); + + /* We can enable interrupts now + * + * NOTE - Because registration of interrupt handler is done in the + * device's open(), defer enabling device interrupts to that + * point + */ + + rc = register_netdev(netdev); + if (rc < 0) { + dev_err(&pdev->dev, "register_netdev() failed\n"); + goto err_phy_disconnect; + } + + /* Register the net_device struct with the PCI subsystem. Save a copy + * of the PCI config space for this device now that the device has + * been initialized, just in case it needs to be quickly restored. + */ + pci_set_drvdata(pdev, netdev); +out: + return rc; + +err_phy_disconnect: + phy_disconnect(adapter->phydev); +err_mdio_unregister: + mdiobus_unregister(adapter->mii_bus); +err_mdio_free_irq: + kfree(adapter->mii_bus->irq); +err_mdio_free: + mdiobus_free(adapter->mii_bus); +err_mem_free: + et131x_adapter_memory_free(adapter); +err_iounmap: + iounmap(adapter->regs); +err_free_dev: + pci_dev_put(pdev); + free_netdev(netdev); +err_release_res: + pci_release_regions(pdev); +err_disable: + pci_disable_device(pdev); + goto out; +} + +static const struct pci_device_id et131x_pci_table[] = { + { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL}, + { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL}, + { 0,} +}; +MODULE_DEVICE_TABLE(pci, et131x_pci_table); + +static struct pci_driver et131x_driver = { + .name = DRIVER_NAME, + .id_table = et131x_pci_table, + .probe = et131x_pci_setup, + .remove = et131x_pci_remove, + .driver.pm = &et131x_pm_ops, +}; + +module_pci_driver(et131x_driver); diff --git a/drivers/net/ethernet/agere/et131x.h b/drivers/net/ethernet/agere/et131x.h new file mode 100644 index 000000000..be9a11c02 --- /dev/null +++ b/drivers/net/ethernet/agere/et131x.h @@ -0,0 +1,1433 @@ +/* Copyright © 2005 Agere Systems Inc. + * All rights reserved. + * http://www.agere.com + * + * SOFTWARE LICENSE + * + * This software is provided subject to the following terms and conditions, + * which you should read carefully before using the software. Using this + * software indicates your acceptance of these terms and conditions. If you do + * not agree with these terms and conditions, do not use the software. + * + * Copyright © 2005 Agere Systems Inc. + * All rights reserved. + * + * Redistribution and use in source or binary forms, with or without + * modifications, are permitted provided that the following conditions are met: + * + * . Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following Disclaimer as comments in the code as + * well as in the documentation and/or other materials provided with the + * distribution. + * + * . Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following Disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * . Neither the name of Agere Systems Inc. nor the names of the contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * Disclaimer + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY + * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN + * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + */ + +#define DRIVER_NAME "et131x" +#define DRIVER_VERSION "v2.0" + +/* EEPROM registers */ + +/* LBCIF Register Groups (addressed via 32-bit offsets) */ +#define LBCIF_DWORD0_GROUP 0xAC +#define LBCIF_DWORD1_GROUP 0xB0 + +/* LBCIF Registers (addressed via 8-bit offsets) */ +#define LBCIF_ADDRESS_REGISTER 0xAC +#define LBCIF_DATA_REGISTER 0xB0 +#define LBCIF_CONTROL_REGISTER 0xB1 +#define LBCIF_STATUS_REGISTER 0xB2 + +/* LBCIF Control Register Bits */ +#define LBCIF_CONTROL_SEQUENTIAL_READ 0x01 +#define LBCIF_CONTROL_PAGE_WRITE 0x02 +#define LBCIF_CONTROL_EEPROM_RELOAD 0x08 +#define LBCIF_CONTROL_TWO_BYTE_ADDR 0x20 +#define LBCIF_CONTROL_I2C_WRITE 0x40 +#define LBCIF_CONTROL_LBCIF_ENABLE 0x80 + +/* LBCIF Status Register Bits */ +#define LBCIF_STATUS_PHY_QUEUE_AVAIL 0x01 +#define LBCIF_STATUS_I2C_IDLE 0x02 +#define LBCIF_STATUS_ACK_ERROR 0x04 +#define LBCIF_STATUS_GENERAL_ERROR 0x08 +#define LBCIF_STATUS_CHECKSUM_ERROR 0x40 +#define LBCIF_STATUS_EEPROM_PRESENT 0x80 + +/* START OF GLOBAL REGISTER ADDRESS MAP */ +/* 10bit registers + * + * Tx queue start address reg in global address map at address 0x0000 + * tx queue end address reg in global address map at address 0x0004 + * rx queue start address reg in global address map at address 0x0008 + * rx queue end address reg in global address map at address 0x000C + */ + +/* structure for power management control status reg in global address map + * located at address 0x0010 + * jagcore_rx_rdy bit 9 + * jagcore_tx_rdy bit 8 + * phy_lped_en bit 7 + * phy_sw_coma bit 6 + * rxclk_gate bit 5 + * txclk_gate bit 4 + * sysclk_gate bit 3 + * jagcore_rx_en bit 2 + * jagcore_tx_en bit 1 + * gigephy_en bit 0 + */ +#define ET_PM_PHY_SW_COMA 0x40 +#define ET_PMCSR_INIT 0x38 + +/* Interrupt status reg at address 0x0018 + */ +#define ET_INTR_TXDMA_ISR 0x00000008 +#define ET_INTR_TXDMA_ERR 0x00000010 +#define ET_INTR_RXDMA_XFR_DONE 0x00000020 +#define ET_INTR_RXDMA_FB_R0_LOW 0x00000040 +#define ET_INTR_RXDMA_FB_R1_LOW 0x00000080 +#define ET_INTR_RXDMA_STAT_LOW 0x00000100 +#define ET_INTR_RXDMA_ERR 0x00000200 +#define ET_INTR_WATCHDOG 0x00004000 +#define ET_INTR_WOL 0x00008000 +#define ET_INTR_PHY 0x00010000 +#define ET_INTR_TXMAC 0x00020000 +#define ET_INTR_RXMAC 0x00040000 +#define ET_INTR_MAC_STAT 0x00080000 +#define ET_INTR_SLV_TIMEOUT 0x00100000 + +/* Interrupt mask register at address 0x001C + * Interrupt alias clear mask reg at address 0x0020 + * Interrupt status alias reg at address 0x0024 + * + * Same masks as above + */ + +/* Software reset reg at address 0x0028 + * 0: txdma_sw_reset + * 1: rxdma_sw_reset + * 2: txmac_sw_reset + * 3: rxmac_sw_reset + * 4: mac_sw_reset + * 5: mac_stat_sw_reset + * 6: mmc_sw_reset + *31: selfclr_disable + */ +#define ET_RESET_ALL 0x007F + +/* SLV Timer reg at address 0x002C (low 24 bits) + */ + +/* MSI Configuration reg at address 0x0030 + */ +#define ET_MSI_VECTOR 0x0000001F +#define ET_MSI_TC 0x00070000 + +/* Loopback reg located at address 0x0034 + */ +#define ET_LOOP_MAC 0x00000001 +#define ET_LOOP_DMA 0x00000002 + +/* GLOBAL Module of JAGCore Address Mapping + * Located at address 0x0000 + */ +struct global_regs { /* Location: */ + u32 txq_start_addr; /* 0x0000 */ + u32 txq_end_addr; /* 0x0004 */ + u32 rxq_start_addr; /* 0x0008 */ + u32 rxq_end_addr; /* 0x000C */ + u32 pm_csr; /* 0x0010 */ + u32 unused; /* 0x0014 */ + u32 int_status; /* 0x0018 */ + u32 int_mask; /* 0x001C */ + u32 int_alias_clr_en; /* 0x0020 */ + u32 int_status_alias; /* 0x0024 */ + u32 sw_reset; /* 0x0028 */ + u32 slv_timer; /* 0x002C */ + u32 msi_config; /* 0x0030 */ + u32 loopback; /* 0x0034 */ + u32 watchdog_timer; /* 0x0038 */ +}; + +/* START OF TXDMA REGISTER ADDRESS MAP */ +/* txdma control status reg at address 0x1000 + */ +#define ET_TXDMA_CSR_HALT 0x00000001 +#define ET_TXDMA_DROP_TLP 0x00000002 +#define ET_TXDMA_CACHE_THRS 0x000000F0 +#define ET_TXDMA_CACHE_SHIFT 4 +#define ET_TXDMA_SNGL_EPKT 0x00000100 +#define ET_TXDMA_CLASS 0x00001E00 + +/* structure for txdma packet ring base address hi reg in txdma address map + * located at address 0x1004 + * Defined earlier (u32) + */ + +/* structure for txdma packet ring base address low reg in txdma address map + * located at address 0x1008 + * Defined earlier (u32) + */ + +/* structure for txdma packet ring number of descriptor reg in txdma address + * map. Located at address 0x100C + * + * 31-10: unused + * 9-0: pr ndes + */ +#define ET_DMA12_MASK 0x0FFF /* 12 bit mask for DMA12W types */ +#define ET_DMA12_WRAP 0x1000 +#define ET_DMA10_MASK 0x03FF /* 10 bit mask for DMA10W types */ +#define ET_DMA10_WRAP 0x0400 +#define ET_DMA4_MASK 0x000F /* 4 bit mask for DMA4W types */ +#define ET_DMA4_WRAP 0x0010 + +#define INDEX12(x) ((x) & ET_DMA12_MASK) +#define INDEX10(x) ((x) & ET_DMA10_MASK) +#define INDEX4(x) ((x) & ET_DMA4_MASK) + +/* 10bit DMA with wrap + * txdma tx queue write address reg in txdma address map at 0x1010 + * txdma tx queue write address external reg in txdma address map at 0x1014 + * txdma tx queue read address reg in txdma address map at 0x1018 + * + * u32 + * txdma status writeback address hi reg in txdma address map at0x101C + * txdma status writeback address lo reg in txdma address map at 0x1020 + * + * 10bit DMA with wrap + * txdma service request reg in txdma address map at 0x1024 + * structure for txdma service complete reg in txdma address map at 0x1028 + * + * 4bit DMA with wrap + * txdma tx descriptor cache read index reg in txdma address map at 0x102C + * txdma tx descriptor cache write index reg in txdma address map at 0x1030 + * + * txdma error reg in txdma address map at address 0x1034 + * 0: PyldResend + * 1: PyldRewind + * 4: DescrResend + * 5: DescrRewind + * 8: WrbkResend + * 9: WrbkRewind + */ + +/* Tx DMA Module of JAGCore Address Mapping + * Located at address 0x1000 + */ +struct txdma_regs { /* Location: */ + u32 csr; /* 0x1000 */ + u32 pr_base_hi; /* 0x1004 */ + u32 pr_base_lo; /* 0x1008 */ + u32 pr_num_des; /* 0x100C */ + u32 txq_wr_addr; /* 0x1010 */ + u32 txq_wr_addr_ext; /* 0x1014 */ + u32 txq_rd_addr; /* 0x1018 */ + u32 dma_wb_base_hi; /* 0x101C */ + u32 dma_wb_base_lo; /* 0x1020 */ + u32 service_request; /* 0x1024 */ + u32 service_complete; /* 0x1028 */ + u32 cache_rd_index; /* 0x102C */ + u32 cache_wr_index; /* 0x1030 */ + u32 tx_dma_error; /* 0x1034 */ + u32 desc_abort_cnt; /* 0x1038 */ + u32 payload_abort_cnt; /* 0x103c */ + u32 writeback_abort_cnt; /* 0x1040 */ + u32 desc_timeout_cnt; /* 0x1044 */ + u32 payload_timeout_cnt; /* 0x1048 */ + u32 writeback_timeout_cnt; /* 0x104c */ + u32 desc_error_cnt; /* 0x1050 */ + u32 payload_error_cnt; /* 0x1054 */ + u32 writeback_error_cnt; /* 0x1058 */ + u32 dropped_tlp_cnt; /* 0x105c */ + u32 new_service_complete; /* 0x1060 */ + u32 ethernet_packet_cnt; /* 0x1064 */ +}; + +/* END OF TXDMA REGISTER ADDRESS MAP */ + +/* START OF RXDMA REGISTER ADDRESS MAP */ +/* structure for control status reg in rxdma address map + * Located at address 0x2000 + * + * CSR + * 0: halt + * 1-3: tc + * 4: fbr_big_endian + * 5: psr_big_endian + * 6: pkt_big_endian + * 7: dma_big_endian + * 8-9: fbr0_size + * 10: fbr0_enable + * 11-12: fbr1_size + * 13: fbr1_enable + * 14: unused + * 15: pkt_drop_disable + * 16: pkt_done_flush + * 17: halt_status + * 18-31: unused + */ +#define ET_RXDMA_CSR_HALT 0x0001 +#define ET_RXDMA_CSR_FBR0_SIZE_LO 0x0100 +#define ET_RXDMA_CSR_FBR0_SIZE_HI 0x0200 +#define ET_RXDMA_CSR_FBR0_ENABLE 0x0400 +#define ET_RXDMA_CSR_FBR1_SIZE_LO 0x0800 +#define ET_RXDMA_CSR_FBR1_SIZE_HI 0x1000 +#define ET_RXDMA_CSR_FBR1_ENABLE 0x2000 +#define ET_RXDMA_CSR_HALT_STATUS 0x00020000 + +/* structure for dma writeback lo reg in rxdma address map + * located at address 0x2004 + * Defined earlier (u32) + */ + +/* structure for dma writeback hi reg in rxdma address map + * located at address 0x2008 + * Defined earlier (u32) + */ + +/* structure for number of packets done reg in rxdma address map + * located at address 0x200C + * + * 31-8: unused + * 7-0: num done + */ + +/* structure for max packet time reg in rxdma address map + * located at address 0x2010 + * + * 31-18: unused + * 17-0: time done + */ + +/* structure for rx queue read address reg in rxdma address map + * located at address 0x2014 + * Defined earlier (u32) + */ + +/* structure for rx queue read address external reg in rxdma address map + * located at address 0x2018 + * Defined earlier (u32) + */ + +/* structure for rx queue write address reg in rxdma address map + * located at address 0x201C + * Defined earlier (u32) + */ + +/* structure for packet status ring base address lo reg in rxdma address map + * located at address 0x2020 + * Defined earlier (u32) + */ + +/* structure for packet status ring base address hi reg in rxdma address map + * located at address 0x2024 + * Defined earlier (u32) + */ + +/* structure for packet status ring number of descriptors reg in rxdma address + * map. Located at address 0x2028 + * + * 31-12: unused + * 11-0: psr ndes + */ +#define ET_RXDMA_PSR_NUM_DES_MASK 0xFFF + +/* structure for packet status ring available offset reg in rxdma address map + * located at address 0x202C + * + * 31-13: unused + * 12: psr avail wrap + * 11-0: psr avail + */ + +/* structure for packet status ring full offset reg in rxdma address map + * located at address 0x2030 + * + * 31-13: unused + * 12: psr full wrap + * 11-0: psr full + */ + +/* structure for packet status ring access index reg in rxdma address map + * located at address 0x2034 + * + * 31-5: unused + * 4-0: psr_ai + */ + +/* structure for packet status ring minimum descriptors reg in rxdma address + * map. Located at address 0x2038 + * + * 31-12: unused + * 11-0: psr_min + */ + +/* structure for free buffer ring base lo address reg in rxdma address map + * located at address 0x203C + * Defined earlier (u32) + */ + +/* structure for free buffer ring base hi address reg in rxdma address map + * located at address 0x2040 + * Defined earlier (u32) + */ + +/* structure for free buffer ring number of descriptors reg in rxdma address + * map. Located at address 0x2044 + * + * 31-10: unused + * 9-0: fbr ndesc + */ + +/* structure for free buffer ring 0 available offset reg in rxdma address map + * located at address 0x2048 + * Defined earlier (u32) + */ + +/* structure for free buffer ring 0 full offset reg in rxdma address map + * located at address 0x204C + * Defined earlier (u32) + */ + +/* structure for free buffer cache 0 full offset reg in rxdma address map + * located at address 0x2050 + * + * 31-5: unused + * 4-0: fbc rdi + */ + +/* structure for free buffer ring 0 minimum descriptor reg in rxdma address map + * located at address 0x2054 + * + * 31-10: unused + * 9-0: fbr min + */ + +/* structure for free buffer ring 1 base address lo reg in rxdma address map + * located at address 0x2058 - 0x205C + * Defined earlier (RXDMA_FBR_BASE_LO_t and RXDMA_FBR_BASE_HI_t) + */ + +/* structure for free buffer ring 1 number of descriptors reg in rxdma address + * map. Located at address 0x2060 + * Defined earlier (RXDMA_FBR_NUM_DES_t) + */ + +/* structure for free buffer ring 1 available offset reg in rxdma address map + * located at address 0x2064 + * Defined Earlier (RXDMA_FBR_AVAIL_OFFSET_t) + */ + +/* structure for free buffer ring 1 full offset reg in rxdma address map + * located at address 0x2068 + * Defined Earlier (RXDMA_FBR_FULL_OFFSET_t) + */ + +/* structure for free buffer cache 1 read index reg in rxdma address map + * located at address 0x206C + * Defined Earlier (RXDMA_FBC_RD_INDEX_t) + */ + +/* structure for free buffer ring 1 minimum descriptor reg in rxdma address map + * located at address 0x2070 + * Defined Earlier (RXDMA_FBR_MIN_DES_t) + */ + +/* Rx DMA Module of JAGCore Address Mapping + * Located at address 0x2000 + */ +struct rxdma_regs { /* Location: */ + u32 csr; /* 0x2000 */ + u32 dma_wb_base_lo; /* 0x2004 */ + u32 dma_wb_base_hi; /* 0x2008 */ + u32 num_pkt_done; /* 0x200C */ + u32 max_pkt_time; /* 0x2010 */ + u32 rxq_rd_addr; /* 0x2014 */ + u32 rxq_rd_addr_ext; /* 0x2018 */ + u32 rxq_wr_addr; /* 0x201C */ + u32 psr_base_lo; /* 0x2020 */ + u32 psr_base_hi; /* 0x2024 */ + u32 psr_num_des; /* 0x2028 */ + u32 psr_avail_offset; /* 0x202C */ + u32 psr_full_offset; /* 0x2030 */ + u32 psr_access_index; /* 0x2034 */ + u32 psr_min_des; /* 0x2038 */ + u32 fbr0_base_lo; /* 0x203C */ + u32 fbr0_base_hi; /* 0x2040 */ + u32 fbr0_num_des; /* 0x2044 */ + u32 fbr0_avail_offset; /* 0x2048 */ + u32 fbr0_full_offset; /* 0x204C */ + u32 fbr0_rd_index; /* 0x2050 */ + u32 fbr0_min_des; /* 0x2054 */ + u32 fbr1_base_lo; /* 0x2058 */ + u32 fbr1_base_hi; /* 0x205C */ + u32 fbr1_num_des; /* 0x2060 */ + u32 fbr1_avail_offset; /* 0x2064 */ + u32 fbr1_full_offset; /* 0x2068 */ + u32 fbr1_rd_index; /* 0x206C */ + u32 fbr1_min_des; /* 0x2070 */ +}; + +/* END OF RXDMA REGISTER ADDRESS MAP */ + +/* START OF TXMAC REGISTER ADDRESS MAP */ +/* structure for control reg in txmac address map + * located at address 0x3000 + * + * bits + * 31-8: unused + * 7: cklseg_disable + * 6: ckbcnt_disable + * 5: cksegnum + * 4: async_disable + * 3: fc_disable + * 2: mcif_disable + * 1: mif_disable + * 0: txmac_en + */ +#define ET_TX_CTRL_FC_DISABLE 0x0008 +#define ET_TX_CTRL_TXMAC_ENABLE 0x0001 + +/* structure for shadow pointer reg in txmac address map + * located at address 0x3004 + * 31-27: reserved + * 26-16: txq rd ptr + * 15-11: reserved + * 10-0: txq wr ptr + */ + +/* structure for error count reg in txmac address map + * located at address 0x3008 + * + * 31-12: unused + * 11-8: reserved + * 7-4: txq_underrun + * 3-0: fifo_underrun + */ + +/* structure for max fill reg in txmac address map + * located at address 0x300C + * 31-12: unused + * 11-0: max fill + */ + +/* structure for cf parameter reg in txmac address map + * located at address 0x3010 + * 31-16: cfep + * 15-0: cfpt + */ + +/* structure for tx test reg in txmac address map + * located at address 0x3014 + * 31-17: unused + * 16: reserved + * 15: txtest_en + * 14-11: unused + * 10-0: txq test pointer + */ + +/* structure for error reg in txmac address map + * located at address 0x3018 + * + * 31-9: unused + * 8: fifo_underrun + * 7-6: unused + * 5: ctrl2_err + * 4: txq_underrun + * 3: bcnt_err + * 2: lseg_err + * 1: segnum_err + * 0: seg0_err + */ + +/* structure for error interrupt reg in txmac address map + * located at address 0x301C + * + * 31-9: unused + * 8: fifo_underrun + * 7-6: unused + * 5: ctrl2_err + * 4: txq_underrun + * 3: bcnt_err + * 2: lseg_err + * 1: segnum_err + * 0: seg0_err + */ + +/* structure for error interrupt reg in txmac address map + * located at address 0x3020 + * + * 31-2: unused + * 1: bp_req + * 0: bp_xonxoff + */ + +/* Tx MAC Module of JAGCore Address Mapping + */ +struct txmac_regs { /* Location: */ + u32 ctl; /* 0x3000 */ + u32 shadow_ptr; /* 0x3004 */ + u32 err_cnt; /* 0x3008 */ + u32 max_fill; /* 0x300C */ + u32 cf_param; /* 0x3010 */ + u32 tx_test; /* 0x3014 */ + u32 err; /* 0x3018 */ + u32 err_int; /* 0x301C */ + u32 bp_ctrl; /* 0x3020 */ +}; + +/* END OF TXMAC REGISTER ADDRESS MAP */ + +/* START OF RXMAC REGISTER ADDRESS MAP */ + +/* structure for rxmac control reg in rxmac address map + * located at address 0x4000 + * + * 31-7: reserved + * 6: rxmac_int_disable + * 5: async_disable + * 4: mif_disable + * 3: wol_disable + * 2: pkt_filter_disable + * 1: mcif_disable + * 0: rxmac_en + */ +#define ET_RX_CTRL_WOL_DISABLE 0x0008 +#define ET_RX_CTRL_RXMAC_ENABLE 0x0001 + +/* structure for Wake On Lan Control and CRC 0 reg in rxmac address map + * located at address 0x4004 + * 31-16: crc + * 15-12: reserved + * 11: ignore_pp + * 10: ignore_mp + * 9: clr_intr + * 8: ignore_link_chg + * 7: ignore_uni + * 6: ignore_multi + * 5: ignore_broad + * 4-0: valid_crc 4-0 + */ + +/* structure for CRC 1 and CRC 2 reg in rxmac address map + * located at address 0x4008 + * + * 31-16: crc2 + * 15-0: crc1 + */ + +/* structure for CRC 3 and CRC 4 reg in rxmac address map + * located at address 0x400C + * + * 31-16: crc4 + * 15-0: crc3 + */ + +/* structure for Wake On Lan Source Address Lo reg in rxmac address map + * located at address 0x4010 + * + * 31-24: sa3 + * 23-16: sa4 + * 15-8: sa5 + * 7-0: sa6 + */ +#define ET_RX_WOL_LO_SA3_SHIFT 24 +#define ET_RX_WOL_LO_SA4_SHIFT 16 +#define ET_RX_WOL_LO_SA5_SHIFT 8 + +/* structure for Wake On Lan Source Address Hi reg in rxmac address map + * located at address 0x4014 + * + * 31-16: reserved + * 15-8: sa1 + * 7-0: sa2 + */ +#define ET_RX_WOL_HI_SA1_SHIFT 8 + +/* structure for Wake On Lan mask reg in rxmac address map + * located at address 0x4018 - 0x4064 + * Defined earlier (u32) + */ + +/* structure for Unicast Packet Filter Address 1 reg in rxmac address map + * located at address 0x4068 + * + * 31-24: addr1_3 + * 23-16: addr1_4 + * 15-8: addr1_5 + * 7-0: addr1_6 + */ +#define ET_RX_UNI_PF_ADDR1_3_SHIFT 24 +#define ET_RX_UNI_PF_ADDR1_4_SHIFT 16 +#define ET_RX_UNI_PF_ADDR1_5_SHIFT 8 + +/* structure for Unicast Packet Filter Address 2 reg in rxmac address map + * located at address 0x406C + * + * 31-24: addr2_3 + * 23-16: addr2_4 + * 15-8: addr2_5 + * 7-0: addr2_6 + */ +#define ET_RX_UNI_PF_ADDR2_3_SHIFT 24 +#define ET_RX_UNI_PF_ADDR2_4_SHIFT 16 +#define ET_RX_UNI_PF_ADDR2_5_SHIFT 8 + +/* structure for Unicast Packet Filter Address 1 & 2 reg in rxmac address map + * located at address 0x4070 + * + * 31-24: addr2_1 + * 23-16: addr2_2 + * 15-8: addr1_1 + * 7-0: addr1_2 + */ +#define ET_RX_UNI_PF_ADDR2_1_SHIFT 24 +#define ET_RX_UNI_PF_ADDR2_2_SHIFT 16 +#define ET_RX_UNI_PF_ADDR1_1_SHIFT 8 + +/* structure for Multicast Hash reg in rxmac address map + * located at address 0x4074 - 0x4080 + * Defined earlier (u32) + */ + +/* structure for Packet Filter Control reg in rxmac address map + * located at address 0x4084 + * + * 31-23: unused + * 22-16: min_pkt_size + * 15-4: unused + * 3: filter_frag_en + * 2: filter_uni_en + * 1: filter_multi_en + * 0: filter_broad_en + */ +#define ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT 16 +#define ET_RX_PFCTRL_FRAG_FILTER_ENABLE 0x0008 +#define ET_RX_PFCTRL_UNICST_FILTER_ENABLE 0x0004 +#define ET_RX_PFCTRL_MLTCST_FILTER_ENABLE 0x0002 +#define ET_RX_PFCTRL_BRDCST_FILTER_ENABLE 0x0001 + +/* structure for Memory Controller Interface Control Max Segment reg in rxmac + * address map. Located at address 0x4088 + * + * 31-10: reserved + * 9-2: max_size + * 1: fc_en + * 0: seg_en + */ +#define ET_RX_MCIF_CTRL_MAX_SEG_SIZE_SHIFT 2 +#define ET_RX_MCIF_CTRL_MAX_SEG_FC_ENABLE 0x0002 +#define ET_RX_MCIF_CTRL_MAX_SEG_ENABLE 0x0001 + +/* structure for Memory Controller Interface Water Mark reg in rxmac address + * map. Located at address 0x408C + * + * 31-26: unused + * 25-16: mark_hi + * 15-10: unused + * 9-0: mark_lo + */ + +/* structure for Rx Queue Dialog reg in rxmac address map. + * located at address 0x4090 + * + * 31-26: reserved + * 25-16: rd_ptr + * 15-10: reserved + * 9-0: wr_ptr + */ + +/* structure for space available reg in rxmac address map. + * located at address 0x4094 + * + * 31-17: reserved + * 16: space_avail_en + * 15-10: reserved + * 9-0: space_avail + */ + +/* structure for management interface reg in rxmac address map. + * located at address 0x4098 + * + * 31-18: reserved + * 17: drop_pkt_en + * 16-0: drop_pkt_mask + */ + +/* structure for Error reg in rxmac address map. + * located at address 0x409C + * + * 31-4: unused + * 3: mif + * 2: async + * 1: pkt_filter + * 0: mcif + */ + +/* Rx MAC Module of JAGCore Address Mapping + */ +struct rxmac_regs { /* Location: */ + u32 ctrl; /* 0x4000 */ + u32 crc0; /* 0x4004 */ + u32 crc12; /* 0x4008 */ + u32 crc34; /* 0x400C */ + u32 sa_lo; /* 0x4010 */ + u32 sa_hi; /* 0x4014 */ + u32 mask0_word0; /* 0x4018 */ + u32 mask0_word1; /* 0x401C */ + u32 mask0_word2; /* 0x4020 */ + u32 mask0_word3; /* 0x4024 */ + u32 mask1_word0; /* 0x4028 */ + u32 mask1_word1; /* 0x402C */ + u32 mask1_word2; /* 0x4030 */ + u32 mask1_word3; /* 0x4034 */ + u32 mask2_word0; /* 0x4038 */ + u32 mask2_word1; /* 0x403C */ + u32 mask2_word2; /* 0x4040 */ + u32 mask2_word3; /* 0x4044 */ + u32 mask3_word0; /* 0x4048 */ + u32 mask3_word1; /* 0x404C */ + u32 mask3_word2; /* 0x4050 */ + u32 mask3_word3; /* 0x4054 */ + u32 mask4_word0; /* 0x4058 */ + u32 mask4_word1; /* 0x405C */ + u32 mask4_word2; /* 0x4060 */ + u32 mask4_word3; /* 0x4064 */ + u32 uni_pf_addr1; /* 0x4068 */ + u32 uni_pf_addr2; /* 0x406C */ + u32 uni_pf_addr3; /* 0x4070 */ + u32 multi_hash1; /* 0x4074 */ + u32 multi_hash2; /* 0x4078 */ + u32 multi_hash3; /* 0x407C */ + u32 multi_hash4; /* 0x4080 */ + u32 pf_ctrl; /* 0x4084 */ + u32 mcif_ctrl_max_seg; /* 0x4088 */ + u32 mcif_water_mark; /* 0x408C */ + u32 rxq_diag; /* 0x4090 */ + u32 space_avail; /* 0x4094 */ + + u32 mif_ctrl; /* 0x4098 */ + u32 err_reg; /* 0x409C */ +}; + +/* END OF RXMAC REGISTER ADDRESS MAP */ + +/* START OF MAC REGISTER ADDRESS MAP */ +/* structure for configuration #1 reg in mac address map. + * located at address 0x5000 + * + * 31: soft reset + * 30: sim reset + * 29-20: reserved + * 19: reset rx mc + * 18: reset tx mc + * 17: reset rx func + * 16: reset tx fnc + * 15-9: reserved + * 8: loopback + * 7-6: reserved + * 5: rx flow + * 4: tx flow + * 3: syncd rx en + * 2: rx enable + * 1: syncd tx en + * 0: tx enable + */ +#define ET_MAC_CFG1_SOFT_RESET 0x80000000 +#define ET_MAC_CFG1_SIM_RESET 0x40000000 +#define ET_MAC_CFG1_RESET_RXMC 0x00080000 +#define ET_MAC_CFG1_RESET_TXMC 0x00040000 +#define ET_MAC_CFG1_RESET_RXFUNC 0x00020000 +#define ET_MAC_CFG1_RESET_TXFUNC 0x00010000 +#define ET_MAC_CFG1_LOOPBACK 0x00000100 +#define ET_MAC_CFG1_RX_FLOW 0x00000020 +#define ET_MAC_CFG1_TX_FLOW 0x00000010 +#define ET_MAC_CFG1_RX_ENABLE 0x00000004 +#define ET_MAC_CFG1_TX_ENABLE 0x00000001 +#define ET_MAC_CFG1_WAIT 0x0000000A /* RX & TX syncd */ + +/* structure for configuration #2 reg in mac address map. + * located at address 0x5004 + * 31-16: reserved + * 15-12: preamble + * 11-10: reserved + * 9-8: if mode + * 7-6: reserved + * 5: huge frame + * 4: length check + * 3: undefined + * 2: pad crc + * 1: crc enable + * 0: full duplex + */ +#define ET_MAC_CFG2_PREAMBLE_SHIFT 12 +#define ET_MAC_CFG2_IFMODE_MASK 0x0300 +#define ET_MAC_CFG2_IFMODE_1000 0x0200 +#define ET_MAC_CFG2_IFMODE_100 0x0100 +#define ET_MAC_CFG2_IFMODE_HUGE_FRAME 0x0020 +#define ET_MAC_CFG2_IFMODE_LEN_CHECK 0x0010 +#define ET_MAC_CFG2_IFMODE_PAD_CRC 0x0004 +#define ET_MAC_CFG2_IFMODE_CRC_ENABLE 0x0002 +#define ET_MAC_CFG2_IFMODE_FULL_DPLX 0x0001 + +/* structure for Interpacket gap reg in mac address map. + * located at address 0x5008 + * + * 31: reserved + * 30-24: non B2B ipg 1 + * 23: undefined + * 22-16: non B2B ipg 2 + * 15-8: Min ifg enforce + * 7-0: B2B ipg + * + * structure for half duplex reg in mac address map. + * located at address 0x500C + * 31-24: reserved + * 23-20: Alt BEB trunc + * 19: Alt BEB enable + * 18: BP no backoff + * 17: no backoff + * 16: excess defer + * 15-12: re-xmit max + * 11-10: reserved + * 9-0: collision window + */ + +/* structure for Maximum Frame Length reg in mac address map. + * located at address 0x5010: bits 0-15 hold the length. + */ + +/* structure for Reserve 1 reg in mac address map. + * located at address 0x5014 - 0x5018 + * Defined earlier (u32) + */ + +/* structure for Test reg in mac address map. + * located at address 0x501C + * test: bits 0-2, rest unused + */ + +/* structure for MII Management Configuration reg in mac address map. + * located at address 0x5020 + * + * 31: reset MII mgmt + * 30-6: unused + * 5: scan auto increment + * 4: preamble suppress + * 3: undefined + * 2-0: mgmt clock reset + */ +#define ET_MAC_MIIMGMT_CLK_RST 0x0007 + +/* structure for MII Management Command reg in mac address map. + * located at address 0x5024 + * bit 1: scan cycle + * bit 0: read cycle + */ + +/* structure for MII Management Address reg in mac address map. + * located at address 0x5028 + * 31-13: reserved + * 12-8: phy addr + * 7-5: reserved + * 4-0: register + */ +#define ET_MAC_MII_ADDR(phy, reg) ((phy) << 8 | (reg)) + +/* structure for MII Management Control reg in mac address map. + * located at address 0x502C + * 31-16: reserved + * 15-0: phy control + */ + +/* structure for MII Management Status reg in mac address map. + * located at address 0x5030 + * 31-16: reserved + * 15-0: phy control + */ +#define ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK 0xFFFF + +/* structure for MII Management Indicators reg in mac address map. + * located at address 0x5034 + * 31-3: reserved + * 2: not valid + * 1: scanning + * 0: busy + */ +#define ET_MAC_MGMT_BUSY 0x00000001 /* busy */ +#define ET_MAC_MGMT_WAIT 0x00000005 /* busy | not valid */ + +/* structure for Interface Control reg in mac address map. + * located at address 0x5038 + * + * 31: reset if module + * 30-28: reserved + * 27: tbi mode + * 26: ghd mode + * 25: lhd mode + * 24: phy mode + * 23: reset per mii + * 22-17: reserved + * 16: speed + * 15: reset pe100x + * 14-11: reserved + * 10: force quiet + * 9: no cipher + * 8: disable link fail + * 7: reset gpsi + * 6-1: reserved + * 0: enable jabber protection + */ +#define ET_MAC_IFCTRL_GHDMODE (1 << 26) +#define ET_MAC_IFCTRL_PHYMODE (1 << 24) + +/* structure for Interface Status reg in mac address map. + * located at address 0x503C + * + * 31-10: reserved + * 9: excess_defer + * 8: clash + * 7: phy_jabber + * 6: phy_link_ok + * 5: phy_full_duplex + * 4: phy_speed + * 3: pe100x_link_fail + * 2: pe10t_loss_carrier + * 1: pe10t_sqe_error + * 0: pe10t_jabber + */ + +/* structure for Mac Station Address, Part 1 reg in mac address map. + * located at address 0x5040 + * + * 31-24: Octet6 + * 23-16: Octet5 + * 15-8: Octet4 + * 7-0: Octet3 + */ +#define ET_MAC_STATION_ADDR1_OC6_SHIFT 24 +#define ET_MAC_STATION_ADDR1_OC5_SHIFT 16 +#define ET_MAC_STATION_ADDR1_OC4_SHIFT 8 + +/* structure for Mac Station Address, Part 2 reg in mac address map. + * located at address 0x5044 + * + * 31-24: Octet2 + * 23-16: Octet1 + * 15-0: reserved + */ +#define ET_MAC_STATION_ADDR2_OC2_SHIFT 24 +#define ET_MAC_STATION_ADDR2_OC1_SHIFT 16 + +/* MAC Module of JAGCore Address Mapping + */ +struct mac_regs { /* Location: */ + u32 cfg1; /* 0x5000 */ + u32 cfg2; /* 0x5004 */ + u32 ipg; /* 0x5008 */ + u32 hfdp; /* 0x500C */ + u32 max_fm_len; /* 0x5010 */ + u32 rsv1; /* 0x5014 */ + u32 rsv2; /* 0x5018 */ + u32 mac_test; /* 0x501C */ + u32 mii_mgmt_cfg; /* 0x5020 */ + u32 mii_mgmt_cmd; /* 0x5024 */ + u32 mii_mgmt_addr; /* 0x5028 */ + u32 mii_mgmt_ctrl; /* 0x502C */ + u32 mii_mgmt_stat; /* 0x5030 */ + u32 mii_mgmt_indicator; /* 0x5034 */ + u32 if_ctrl; /* 0x5038 */ + u32 if_stat; /* 0x503C */ + u32 station_addr_1; /* 0x5040 */ + u32 station_addr_2; /* 0x5044 */ +}; + +/* END OF MAC REGISTER ADDRESS MAP */ + +/* START OF MAC STAT REGISTER ADDRESS MAP */ +/* structure for Carry Register One and it's Mask Register reg located in mac + * stat address map address 0x6130 and 0x6138. + * + * 31: tr64 + * 30: tr127 + * 29: tr255 + * 28: tr511 + * 27: tr1k + * 26: trmax + * 25: trmgv + * 24-17: unused + * 16: rbyt + * 15: rpkt + * 14: rfcs + * 13: rmca + * 12: rbca + * 11: rxcf + * 10: rxpf + * 9: rxuo + * 8: raln + * 7: rflr + * 6: rcde + * 5: rcse + * 4: rund + * 3: rovr + * 2: rfrg + * 1: rjbr + * 0: rdrp + */ + +/* structure for Carry Register Two Mask Register reg in mac stat address map. + * located at address 0x613C + * + * 31-20: unused + * 19: tjbr + * 18: tfcs + * 17: txcf + * 16: tovr + * 15: tund + * 14: trfg + * 13: tbyt + * 12: tpkt + * 11: tmca + * 10: tbca + * 9: txpf + * 8: tdfr + * 7: tedf + * 6: tscl + * 5: tmcl + * 4: tlcl + * 3: txcl + * 2: tncl + * 1: tpfh + * 0: tdrp + */ + +/* MAC STATS Module of JAGCore Address Mapping + */ +struct macstat_regs { /* Location: */ + u32 pad[32]; /* 0x6000 - 607C */ + + /* counters */ + u32 txrx_0_64_byte_frames; /* 0x6080 */ + u32 txrx_65_127_byte_frames; /* 0x6084 */ + u32 txrx_128_255_byte_frames; /* 0x6088 */ + u32 txrx_256_511_byte_frames; /* 0x608C */ + u32 txrx_512_1023_byte_frames; /* 0x6090 */ + u32 txrx_1024_1518_byte_frames; /* 0x6094 */ + u32 txrx_1519_1522_gvln_frames; /* 0x6098 */ + u32 rx_bytes; /* 0x609C */ + u32 rx_packets; /* 0x60A0 */ + u32 rx_fcs_errs; /* 0x60A4 */ + u32 rx_multicast_packets; /* 0x60A8 */ + u32 rx_broadcast_packets; /* 0x60AC */ + u32 rx_control_frames; /* 0x60B0 */ + u32 rx_pause_frames; /* 0x60B4 */ + u32 rx_unknown_opcodes; /* 0x60B8 */ + u32 rx_align_errs; /* 0x60BC */ + u32 rx_frame_len_errs; /* 0x60C0 */ + u32 rx_code_errs; /* 0x60C4 */ + u32 rx_carrier_sense_errs; /* 0x60C8 */ + u32 rx_undersize_packets; /* 0x60CC */ + u32 rx_oversize_packets; /* 0x60D0 */ + u32 rx_fragment_packets; /* 0x60D4 */ + u32 rx_jabbers; /* 0x60D8 */ + u32 rx_drops; /* 0x60DC */ + u32 tx_bytes; /* 0x60E0 */ + u32 tx_packets; /* 0x60E4 */ + u32 tx_multicast_packets; /* 0x60E8 */ + u32 tx_broadcast_packets; /* 0x60EC */ + u32 tx_pause_frames; /* 0x60F0 */ + u32 tx_deferred; /* 0x60F4 */ + u32 tx_excessive_deferred; /* 0x60F8 */ + u32 tx_single_collisions; /* 0x60FC */ + u32 tx_multiple_collisions; /* 0x6100 */ + u32 tx_late_collisions; /* 0x6104 */ + u32 tx_excessive_collisions; /* 0x6108 */ + u32 tx_total_collisions; /* 0x610C */ + u32 tx_pause_honored_frames; /* 0x6110 */ + u32 tx_drops; /* 0x6114 */ + u32 tx_jabbers; /* 0x6118 */ + u32 tx_fcs_errs; /* 0x611C */ + u32 tx_control_frames; /* 0x6120 */ + u32 tx_oversize_frames; /* 0x6124 */ + u32 tx_undersize_frames; /* 0x6128 */ + u32 tx_fragments; /* 0x612C */ + u32 carry_reg1; /* 0x6130 */ + u32 carry_reg2; /* 0x6134 */ + u32 carry_reg1_mask; /* 0x6138 */ + u32 carry_reg2_mask; /* 0x613C */ +}; + +/* END OF MAC STAT REGISTER ADDRESS MAP */ + +/* START OF MMC REGISTER ADDRESS MAP */ +/* Main Memory Controller Control reg in mmc address map. + * located at address 0x7000 + */ +#define ET_MMC_ENABLE 1 +#define ET_MMC_ARB_DISABLE 2 +#define ET_MMC_RXMAC_DISABLE 4 +#define ET_MMC_TXMAC_DISABLE 8 +#define ET_MMC_TXDMA_DISABLE 16 +#define ET_MMC_RXDMA_DISABLE 32 +#define ET_MMC_FORCE_CE 64 + +/* Main Memory Controller Host Memory Access Address reg in mmc + * address map. Located at address 0x7004. Top 16 bits hold the address bits + */ +#define ET_SRAM_REQ_ACCESS 1 +#define ET_SRAM_WR_ACCESS 2 +#define ET_SRAM_IS_CTRL 4 + +/* structure for Main Memory Controller Host Memory Access Data reg in mmc + * address map. Located at address 0x7008 - 0x7014 + * Defined earlier (u32) + */ + +/* Memory Control Module of JAGCore Address Mapping + */ +struct mmc_regs { /* Location: */ + u32 mmc_ctrl; /* 0x7000 */ + u32 sram_access; /* 0x7004 */ + u32 sram_word1; /* 0x7008 */ + u32 sram_word2; /* 0x700C */ + u32 sram_word3; /* 0x7010 */ + u32 sram_word4; /* 0x7014 */ +}; + +/* END OF MMC REGISTER ADDRESS MAP */ + +/* JAGCore Address Mapping + */ +struct address_map { + struct global_regs global; + /* unused section of global address map */ + u8 unused_global[4096 - sizeof(struct global_regs)]; + struct txdma_regs txdma; + /* unused section of txdma address map */ + u8 unused_txdma[4096 - sizeof(struct txdma_regs)]; + struct rxdma_regs rxdma; + /* unused section of rxdma address map */ + u8 unused_rxdma[4096 - sizeof(struct rxdma_regs)]; + struct txmac_regs txmac; + /* unused section of txmac address map */ + u8 unused_txmac[4096 - sizeof(struct txmac_regs)]; + struct rxmac_regs rxmac; + /* unused section of rxmac address map */ + u8 unused_rxmac[4096 - sizeof(struct rxmac_regs)]; + struct mac_regs mac; + /* unused section of mac address map */ + u8 unused_mac[4096 - sizeof(struct mac_regs)]; + struct macstat_regs macstat; + /* unused section of mac stat address map */ + u8 unused_mac_stat[4096 - sizeof(struct macstat_regs)]; + struct mmc_regs mmc; + /* unused section of mmc address map */ + u8 unused_mmc[4096 - sizeof(struct mmc_regs)]; + /* unused section of address map */ + u8 unused_[1015808]; + u8 unused_exp_rom[4096]; /* MGS-size TBD */ + u8 unused__[524288]; /* unused section of address map */ +}; + +/* Defines for generic MII registers 0x00 -> 0x0F can be found in + * include/linux/mii.h + */ +/* some defines for modem registers that seem to be 'reserved' */ +#define PHY_INDEX_REG 0x10 +#define PHY_DATA_REG 0x11 +#define PHY_MPHY_CONTROL_REG 0x12 + +/* defines for specified registers */ +#define PHY_LOOPBACK_CONTROL 0x13 /* TRU_VMI_LOOPBACK_CONTROL_1_REG 19 */ + /* TRU_VMI_LOOPBACK_CONTROL_2_REG 20 */ +#define PHY_REGISTER_MGMT_CONTROL 0x15 /* TRU_VMI_MI_SEQ_CONTROL_REG 21 */ +#define PHY_CONFIG 0x16 /* TRU_VMI_CONFIGURATION_REG 22 */ +#define PHY_PHY_CONTROL 0x17 /* TRU_VMI_PHY_CONTROL_REG 23 */ +#define PHY_INTERRUPT_MASK 0x18 /* TRU_VMI_INTERRUPT_MASK_REG 24 */ +#define PHY_INTERRUPT_STATUS 0x19 /* TRU_VMI_INTERRUPT_STATUS_REG 25 */ +#define PHY_PHY_STATUS 0x1A /* TRU_VMI_PHY_STATUS_REG 26 */ +#define PHY_LED_1 0x1B /* TRU_VMI_LED_CONTROL_1_REG 27 */ +#define PHY_LED_2 0x1C /* TRU_VMI_LED_CONTROL_2_REG 28 */ + /* TRU_VMI_LINK_CONTROL_REG 29 */ + /* TRU_VMI_TIMING_CONTROL_REG */ + +/* MI Register 10: Gigabit basic mode status reg(Reg 0x0A) */ +#define ET_1000BT_MSTR_SLV 0x4000 + +/* MI Register 16 - 18: Reserved Reg(0x10-0x12) */ + +/* MI Register 19: Loopback Control Reg(0x13) + * 15: mii_en + * 14: pcs_en + * 13: pmd_en + * 12: all_digital_en + * 11: replica_en + * 10: line_driver_en + * 9-0: reserved + */ + +/* MI Register 20: Reserved Reg(0x14) */ + +/* MI Register 21: Management Interface Control Reg(0x15) + * 15-11: reserved + * 10-4: mi_error_count + * 3: reserved + * 2: ignore_10g_fr + * 1: reserved + * 0: preamble_suppress_en + */ + +/* MI Register 22: PHY Configuration Reg(0x16) + * 15: crs_tx_en + * 14: reserved + * 13-12: tx_fifo_depth + * 11-10: speed_downshift + * 9: pbi_detect + * 8: tbi_rate + * 7: alternate_np + * 6: group_mdio_en + * 5: tx_clock_en + * 4: sys_clock_en + * 3: reserved + * 2-0: mac_if_mode + */ +#define ET_PHY_CONFIG_TX_FIFO_DEPTH 0x3000 + +#define ET_PHY_CONFIG_FIFO_DEPTH_8 0x0000 +#define ET_PHY_CONFIG_FIFO_DEPTH_16 0x1000 +#define ET_PHY_CONFIG_FIFO_DEPTH_32 0x2000 +#define ET_PHY_CONFIG_FIFO_DEPTH_64 0x3000 + +/* MI Register 23: PHY CONTROL Reg(0x17) + * 15: reserved + * 14: tdr_en + * 13: reserved + * 12-11: downshift_attempts + * 10-6: reserved + * 5: jabber_10baseT + * 4: sqe_10baseT + * 3: tp_loopback_10baseT + * 2: preamble_gen_en + * 1: reserved + * 0: force_int + */ + +/* MI Register 24: Interrupt Mask Reg(0x18) + * 15-10: reserved + * 9: mdio_sync_lost + * 8: autoneg_status + * 7: hi_bit_err + * 6: np_rx + * 5: err_counter_full + * 4: fifo_over_underflow + * 3: rx_status + * 2: link_status + * 1: automatic_speed + * 0: int_en + */ + +/* MI Register 25: Interrupt Status Reg(0x19) + * 15-10: reserved + * 9: mdio_sync_lost + * 8: autoneg_status + * 7: hi_bit_err + * 6: np_rx + * 5: err_counter_full + * 4: fifo_over_underflow + * 3: rx_status + * 2: link_status + * 1: automatic_speed + * 0: int_en + */ + +/* MI Register 26: PHY Status Reg(0x1A) + * 15: reserved + * 14-13: autoneg_fault + * 12: autoneg_status + * 11: mdi_x_status + * 10: polarity_status + * 9-8: speed_status + * 7: duplex_status + * 6: link_status + * 5: tx_status + * 4: rx_status + * 3: collision_status + * 2: autoneg_en + * 1: pause_en + * 0: asymmetric_dir + */ +#define ET_PHY_AUTONEG_STATUS 0x1000 +#define ET_PHY_POLARITY_STATUS 0x0400 +#define ET_PHY_SPEED_STATUS 0x0300 +#define ET_PHY_DUPLEX_STATUS 0x0080 +#define ET_PHY_LSTATUS 0x0040 +#define ET_PHY_AUTONEG_ENABLE 0x0020 + +/* MI Register 27: LED Control Reg 1(0x1B) + * 15-14: reserved + * 13-12: led_dup_indicate + * 11-10: led_10baseT + * 9-8: led_collision + * 7-4: reserved + * 3-2: pulse_dur + * 1: pulse_stretch1 + * 0: pulse_stretch0 + */ + +/* MI Register 28: LED Control Reg 2(0x1C) + * 15-12: led_link + * 11-8: led_tx_rx + * 7-4: led_100BaseTX + * 3-0: led_1000BaseT + */ +#define ET_LED2_LED_LINK 0xF000 +#define ET_LED2_LED_TXRX 0x0F00 +#define ET_LED2_LED_100TX 0x00F0 +#define ET_LED2_LED_1000T 0x000F + +/* defines for LED control reg 2 values */ +#define LED_VAL_1000BT 0x0 +#define LED_VAL_100BTX 0x1 +#define LED_VAL_10BT 0x2 +#define LED_VAL_1000BT_100BTX 0x3 /* 1000BT on, 100BTX blink */ +#define LED_VAL_LINKON 0x4 +#define LED_VAL_TX 0x5 +#define LED_VAL_RX 0x6 +#define LED_VAL_TXRX 0x7 /* TX or RX */ +#define LED_VAL_DUPLEXFULL 0x8 +#define LED_VAL_COLLISION 0x9 +#define LED_VAL_LINKON_ACTIVE 0xA /* Link on, activity blink */ +#define LED_VAL_LINKON_RECV 0xB /* Link on, receive blink */ +#define LED_VAL_DUPLEXFULL_COLLISION 0xC /* Duplex on, collision blink */ +#define LED_VAL_BLINK 0xD +#define LED_VAL_ON 0xE +#define LED_VAL_OFF 0xF + +#define LED_LINK_SHIFT 12 +#define LED_TXRX_SHIFT 8 +#define LED_100TX_SHIFT 4 + +/* MI Register 29 - 31: Reserved Reg(0x1D - 0x1E) */ diff --git a/drivers/net/ethernet/allwinner/Kconfig b/drivers/net/ethernet/allwinner/Kconfig new file mode 100644 index 000000000..d8d95d4cd --- /dev/null +++ b/drivers/net/ethernet/allwinner/Kconfig @@ -0,0 +1,37 @@ +# +# Allwinner device configuration +# + +config NET_VENDOR_ALLWINNER + bool "Allwinner devices" + default y + + depends on ARCH_SUNXI + ---help--- + If you have a network (Ethernet) card belonging to this + class, say Y and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly + affect the kernel: saying N will just cause the configurator + to skip all the questions about Allwinner cards. If you say Y, + you will be asked for your specific card in the following + questions. + +if NET_VENDOR_ALLWINNER + +config SUN4I_EMAC + tristate "Allwinner A10 EMAC support" + depends on ARCH_SUNXI + depends on OF + select CRC32 + select MII + select PHYLIB + select MDIO_SUN4I + ---help--- + Support for Allwinner A10 EMAC ethernet driver. + + To compile this driver as a module, choose M here. The module + will be called sun4i-emac. + +endif # NET_VENDOR_ALLWINNER diff --git a/drivers/net/ethernet/allwinner/Makefile b/drivers/net/ethernet/allwinner/Makefile new file mode 100644 index 000000000..03129f796 --- /dev/null +++ b/drivers/net/ethernet/allwinner/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Allwinner device drivers. +# + +obj-$(CONFIG_SUN4I_EMAC) += sun4i-emac.o diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c new file mode 100644 index 000000000..bab01c849 --- /dev/null +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -0,0 +1,972 @@ +/* + * Allwinner EMAC Fast Ethernet driver for Linux. + * + * Copyright 2012-2013 Stefan Roese + * Copyright 2013 Maxime Ripard + * + * Based on the Linux driver provided by Allwinner: + * Copyright (C) 1997 Sten Wang + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sun4i-emac.h" + +#define DRV_NAME "sun4i-emac" +#define DRV_VERSION "1.02" + +#define EMAC_MAX_FRAME_LEN 0x0600 + +/* Transmit timeout, default 5 seconds. */ +static int watchdog = 5000; +module_param(watchdog, int, 0400); +MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); + +/* EMAC register address locking. + * + * The EMAC uses an address register to control where data written + * to the data register goes. This means that the address register + * must be preserved over interrupts or similar calls. + * + * During interrupt and other critical calls, a spinlock is used to + * protect the system, but the calls themselves save the address + * in the address register in case they are interrupting another + * access to the device. + * + * For general accesses a lock is provided so that calls which are + * allowed to sleep are serialised so that the address register does + * not need to be saved. This lock also serves to serialise access + * to the EEPROM and PHY access registers which are shared between + * these two devices. + */ + +/* The driver supports the original EMACE, and now the two newer + * devices, EMACA and EMACB. + */ + +struct emac_board_info { + struct clk *clk; + struct device *dev; + struct platform_device *pdev; + spinlock_t lock; + void __iomem *membase; + u32 msg_enable; + struct net_device *ndev; + struct sk_buff *skb_last; + u16 tx_fifo_stat; + + int emacrx_completed_flag; + + struct phy_device *phy_dev; + struct device_node *phy_node; + unsigned int link; + unsigned int speed; + unsigned int duplex; + + phy_interface_t phy_interface; +}; + +static void emac_update_speed(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + unsigned int reg_val; + + /* set EMAC SPEED, depend on PHY */ + reg_val = readl(db->membase + EMAC_MAC_SUPP_REG); + reg_val &= ~(0x1 << 8); + if (db->speed == SPEED_100) + reg_val |= 1 << 8; + writel(reg_val, db->membase + EMAC_MAC_SUPP_REG); +} + +static void emac_update_duplex(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + unsigned int reg_val; + + /* set duplex depend on phy */ + reg_val = readl(db->membase + EMAC_MAC_CTL1_REG); + reg_val &= ~EMAC_MAC_CTL1_DUPLEX_EN; + if (db->duplex) + reg_val |= EMAC_MAC_CTL1_DUPLEX_EN; + writel(reg_val, db->membase + EMAC_MAC_CTL1_REG); +} + +static void emac_handle_link_change(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + struct phy_device *phydev = db->phy_dev; + unsigned long flags; + int status_change = 0; + + if (phydev->link) { + if (db->speed != phydev->speed) { + spin_lock_irqsave(&db->lock, flags); + db->speed = phydev->speed; + emac_update_speed(dev); + spin_unlock_irqrestore(&db->lock, flags); + status_change = 1; + } + + if (db->duplex != phydev->duplex) { + spin_lock_irqsave(&db->lock, flags); + db->duplex = phydev->duplex; + emac_update_duplex(dev); + spin_unlock_irqrestore(&db->lock, flags); + status_change = 1; + } + } + + if (phydev->link != db->link) { + if (!phydev->link) { + db->speed = 0; + db->duplex = -1; + } + db->link = phydev->link; + + status_change = 1; + } + + if (status_change) + phy_print_status(phydev); +} + +static int emac_mdio_probe(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + + /* to-do: PHY interrupts are currently not supported */ + + /* attach the mac to the phy */ + db->phy_dev = of_phy_connect(db->ndev, db->phy_node, + &emac_handle_link_change, 0, + db->phy_interface); + if (!db->phy_dev) { + netdev_err(db->ndev, "could not find the PHY\n"); + return -ENODEV; + } + + /* mask with MAC supported features */ + db->phy_dev->supported &= PHY_BASIC_FEATURES; + db->phy_dev->advertising = db->phy_dev->supported; + + db->link = 0; + db->speed = 0; + db->duplex = -1; + + return 0; +} + +static void emac_mdio_remove(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + + phy_disconnect(db->phy_dev); + db->phy_dev = NULL; +} + +static void emac_reset(struct emac_board_info *db) +{ + dev_dbg(db->dev, "resetting device\n"); + + /* RESET device */ + writel(0, db->membase + EMAC_CTL_REG); + udelay(200); + writel(EMAC_CTL_RESET, db->membase + EMAC_CTL_REG); + udelay(200); +} + +static void emac_outblk_32bit(void __iomem *reg, void *data, int count) +{ + writesl(reg, data, round_up(count, 4) / 4); +} + +static void emac_inblk_32bit(void __iomem *reg, void *data, int count) +{ + readsl(reg, data, round_up(count, 4) / 4); +} + +static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct emac_board_info *dm = netdev_priv(dev); + struct phy_device *phydev = dm->phy_dev; + + if (!netif_running(dev)) + return -EINVAL; + + if (!phydev) + return -ENODEV; + + return phy_mii_ioctl(phydev, rq, cmd); +} + +/* ethtool ops */ +static void emac_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(DRV_NAME)); + strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION)); + strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info)); +} + +static int emac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct emac_board_info *dm = netdev_priv(dev); + struct phy_device *phydev = dm->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_gset(phydev, cmd); +} + +static int emac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct emac_board_info *dm = netdev_priv(dev); + struct phy_device *phydev = dm->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_sset(phydev, cmd); +} + +static const struct ethtool_ops emac_ethtool_ops = { + .get_drvinfo = emac_get_drvinfo, + .get_settings = emac_get_settings, + .set_settings = emac_set_settings, + .get_link = ethtool_op_get_link, +}; + +static unsigned int emac_setup(struct net_device *ndev) +{ + struct emac_board_info *db = netdev_priv(ndev); + unsigned int reg_val; + + /* set up TX */ + reg_val = readl(db->membase + EMAC_TX_MODE_REG); + + writel(reg_val | EMAC_TX_MODE_ABORTED_FRAME_EN, + db->membase + EMAC_TX_MODE_REG); + + /* set MAC */ + /* set MAC CTL0 */ + reg_val = readl(db->membase + EMAC_MAC_CTL0_REG); + writel(reg_val | EMAC_MAC_CTL0_RX_FLOW_CTL_EN | + EMAC_MAC_CTL0_TX_FLOW_CTL_EN, + db->membase + EMAC_MAC_CTL0_REG); + + /* set MAC CTL1 */ + reg_val = readl(db->membase + EMAC_MAC_CTL1_REG); + reg_val |= EMAC_MAC_CTL1_LEN_CHECK_EN; + reg_val |= EMAC_MAC_CTL1_CRC_EN; + reg_val |= EMAC_MAC_CTL1_PAD_EN; + writel(reg_val, db->membase + EMAC_MAC_CTL1_REG); + + /* set up IPGT */ + writel(EMAC_MAC_IPGT_FULL_DUPLEX, db->membase + EMAC_MAC_IPGT_REG); + + /* set up IPGR */ + writel((EMAC_MAC_IPGR_IPG1 << 8) | EMAC_MAC_IPGR_IPG2, + db->membase + EMAC_MAC_IPGR_REG); + + /* set up Collison window */ + writel((EMAC_MAC_CLRT_COLLISION_WINDOW << 8) | EMAC_MAC_CLRT_RM, + db->membase + EMAC_MAC_CLRT_REG); + + /* set up Max Frame Length */ + writel(EMAC_MAX_FRAME_LEN, + db->membase + EMAC_MAC_MAXF_REG); + + return 0; +} + +static void emac_set_rx_mode(struct net_device *ndev) +{ + struct emac_board_info *db = netdev_priv(ndev); + unsigned int reg_val; + + /* set up RX */ + reg_val = readl(db->membase + EMAC_RX_CTL_REG); + + if (ndev->flags & IFF_PROMISC) + reg_val |= EMAC_RX_CTL_PASS_ALL_EN; + else + reg_val &= ~EMAC_RX_CTL_PASS_ALL_EN; + + writel(reg_val | EMAC_RX_CTL_PASS_LEN_OOR_EN | + EMAC_RX_CTL_ACCEPT_UNICAST_EN | EMAC_RX_CTL_DA_FILTER_EN | + EMAC_RX_CTL_ACCEPT_MULTICAST_EN | + EMAC_RX_CTL_ACCEPT_BROADCAST_EN, + db->membase + EMAC_RX_CTL_REG); +} + +static unsigned int emac_powerup(struct net_device *ndev) +{ + struct emac_board_info *db = netdev_priv(ndev); + unsigned int reg_val; + + /* initial EMAC */ + /* flush RX FIFO */ + reg_val = readl(db->membase + EMAC_RX_CTL_REG); + reg_val |= 0x8; + writel(reg_val, db->membase + EMAC_RX_CTL_REG); + udelay(1); + + /* initial MAC */ + /* soft reset MAC */ + reg_val = readl(db->membase + EMAC_MAC_CTL0_REG); + reg_val &= ~EMAC_MAC_CTL0_SOFT_RESET; + writel(reg_val, db->membase + EMAC_MAC_CTL0_REG); + + /* set MII clock */ + reg_val = readl(db->membase + EMAC_MAC_MCFG_REG); + reg_val &= (~(0xf << 2)); + reg_val |= (0xD << 2); + writel(reg_val, db->membase + EMAC_MAC_MCFG_REG); + + /* clear RX counter */ + writel(0x0, db->membase + EMAC_RX_FBC_REG); + + /* disable all interrupt and clear interrupt status */ + writel(0, db->membase + EMAC_INT_CTL_REG); + reg_val = readl(db->membase + EMAC_INT_STA_REG); + writel(reg_val, db->membase + EMAC_INT_STA_REG); + + udelay(1); + + /* set up EMAC */ + emac_setup(ndev); + + /* set mac_address to chip */ + writel(ndev->dev_addr[0] << 16 | ndev->dev_addr[1] << 8 | ndev-> + dev_addr[2], db->membase + EMAC_MAC_A1_REG); + writel(ndev->dev_addr[3] << 16 | ndev->dev_addr[4] << 8 | ndev-> + dev_addr[5], db->membase + EMAC_MAC_A0_REG); + + mdelay(1); + + return 0; +} + +static int emac_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct emac_board_info *db = netdev_priv(dev); + + if (netif_running(dev)) + return -EBUSY; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + + writel(dev->dev_addr[0] << 16 | dev->dev_addr[1] << 8 | dev-> + dev_addr[2], db->membase + EMAC_MAC_A1_REG); + writel(dev->dev_addr[3] << 16 | dev->dev_addr[4] << 8 | dev-> + dev_addr[5], db->membase + EMAC_MAC_A0_REG); + + return 0; +} + +/* Initialize emac board */ +static void emac_init_device(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + unsigned long flags; + unsigned int reg_val; + + spin_lock_irqsave(&db->lock, flags); + + emac_update_speed(dev); + emac_update_duplex(dev); + + /* enable RX/TX */ + reg_val = readl(db->membase + EMAC_CTL_REG); + writel(reg_val | EMAC_CTL_RESET | EMAC_CTL_TX_EN | EMAC_CTL_RX_EN, + db->membase + EMAC_CTL_REG); + + /* enable RX/TX0/RX Hlevel interrup */ + reg_val = readl(db->membase + EMAC_INT_CTL_REG); + reg_val |= (0xf << 0) | (0x01 << 8); + writel(reg_val, db->membase + EMAC_INT_CTL_REG); + + spin_unlock_irqrestore(&db->lock, flags); +} + +/* Our watchdog timed out. Called by the networking layer */ +static void emac_timeout(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + unsigned long flags; + + if (netif_msg_timer(db)) + dev_err(db->dev, "tx time out.\n"); + + /* Save previous register address */ + spin_lock_irqsave(&db->lock, flags); + + netif_stop_queue(dev); + emac_reset(db); + emac_init_device(dev); + /* We can accept TX packets again */ + dev->trans_start = jiffies; + netif_wake_queue(dev); + + /* Restore previous register address */ + spin_unlock_irqrestore(&db->lock, flags); +} + +/* Hardware start transmission. + * Send a packet to media from the upper layer. + */ +static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + unsigned long channel; + unsigned long flags; + + channel = db->tx_fifo_stat & 3; + if (channel == 3) + return 1; + + channel = (channel == 1 ? 1 : 0); + + spin_lock_irqsave(&db->lock, flags); + + writel(channel, db->membase + EMAC_TX_INS_REG); + + emac_outblk_32bit(db->membase + EMAC_TX_IO_DATA_REG, + skb->data, skb->len); + dev->stats.tx_bytes += skb->len; + + db->tx_fifo_stat |= 1 << channel; + /* TX control: First packet immediately send, second packet queue */ + if (channel == 0) { + /* set TX len */ + writel(skb->len, db->membase + EMAC_TX_PL0_REG); + /* start translate from fifo to phy */ + writel(readl(db->membase + EMAC_TX_CTL0_REG) | 1, + db->membase + EMAC_TX_CTL0_REG); + + /* save the time stamp */ + dev->trans_start = jiffies; + } else if (channel == 1) { + /* set TX len */ + writel(skb->len, db->membase + EMAC_TX_PL1_REG); + /* start translate from fifo to phy */ + writel(readl(db->membase + EMAC_TX_CTL1_REG) | 1, + db->membase + EMAC_TX_CTL1_REG); + + /* save the time stamp */ + dev->trans_start = jiffies; + } + + if ((db->tx_fifo_stat & 3) == 3) { + /* Second packet */ + netif_stop_queue(dev); + } + + spin_unlock_irqrestore(&db->lock, flags); + + /* free this SKB */ + dev_consume_skb_any(skb); + + return NETDEV_TX_OK; +} + +/* EMAC interrupt handler + * receive the packet to upper layer, free the transmitted packet + */ +static void emac_tx_done(struct net_device *dev, struct emac_board_info *db, + unsigned int tx_status) +{ + /* One packet sent complete */ + db->tx_fifo_stat &= ~(tx_status & 3); + if (3 == (tx_status & 3)) + dev->stats.tx_packets += 2; + else + dev->stats.tx_packets++; + + if (netif_msg_tx_done(db)) + dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status); + + netif_wake_queue(dev); +} + +/* Received a packet and pass to upper layer + */ +static void emac_rx(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + struct sk_buff *skb; + u8 *rdptr; + bool good_packet; + static int rxlen_last; + unsigned int reg_val; + u32 rxhdr, rxstatus, rxcount, rxlen; + + /* Check packet ready or not */ + while (1) { + /* race warning: the first packet might arrive with + * the interrupts disabled, but the second will fix + * it + */ + rxcount = readl(db->membase + EMAC_RX_FBC_REG); + + if (netif_msg_rx_status(db)) + dev_dbg(db->dev, "RXCount: %x\n", rxcount); + + if ((db->skb_last != NULL) && (rxlen_last > 0)) { + dev->stats.rx_bytes += rxlen_last; + + /* Pass to upper layer */ + db->skb_last->protocol = eth_type_trans(db->skb_last, + dev); + netif_rx(db->skb_last); + dev->stats.rx_packets++; + db->skb_last = NULL; + rxlen_last = 0; + + reg_val = readl(db->membase + EMAC_RX_CTL_REG); + reg_val &= ~EMAC_RX_CTL_DMA_EN; + writel(reg_val, db->membase + EMAC_RX_CTL_REG); + } + + if (!rxcount) { + db->emacrx_completed_flag = 1; + reg_val = readl(db->membase + EMAC_INT_CTL_REG); + reg_val |= (0xf << 0) | (0x01 << 8); + writel(reg_val, db->membase + EMAC_INT_CTL_REG); + + /* had one stuck? */ + rxcount = readl(db->membase + EMAC_RX_FBC_REG); + if (!rxcount) + return; + } + + reg_val = readl(db->membase + EMAC_RX_IO_DATA_REG); + if (netif_msg_rx_status(db)) + dev_dbg(db->dev, "receive header: %x\n", reg_val); + if (reg_val != EMAC_UNDOCUMENTED_MAGIC) { + /* disable RX */ + reg_val = readl(db->membase + EMAC_CTL_REG); + writel(reg_val & ~EMAC_CTL_RX_EN, + db->membase + EMAC_CTL_REG); + + /* Flush RX FIFO */ + reg_val = readl(db->membase + EMAC_RX_CTL_REG); + writel(reg_val | (1 << 3), + db->membase + EMAC_RX_CTL_REG); + + do { + reg_val = readl(db->membase + EMAC_RX_CTL_REG); + } while (reg_val & (1 << 3)); + + /* enable RX */ + reg_val = readl(db->membase + EMAC_CTL_REG); + writel(reg_val | EMAC_CTL_RX_EN, + db->membase + EMAC_CTL_REG); + reg_val = readl(db->membase + EMAC_INT_CTL_REG); + reg_val |= (0xf << 0) | (0x01 << 8); + writel(reg_val, db->membase + EMAC_INT_CTL_REG); + + db->emacrx_completed_flag = 1; + + return; + } + + /* A packet ready now & Get status/length */ + good_packet = true; + + emac_inblk_32bit(db->membase + EMAC_RX_IO_DATA_REG, + &rxhdr, sizeof(rxhdr)); + + if (netif_msg_rx_status(db)) + dev_dbg(db->dev, "rxhdr: %x\n", *((int *)(&rxhdr))); + + rxlen = EMAC_RX_IO_DATA_LEN(rxhdr); + rxstatus = EMAC_RX_IO_DATA_STATUS(rxhdr); + + if (netif_msg_rx_status(db)) + dev_dbg(db->dev, "RX: status %02x, length %04x\n", + rxstatus, rxlen); + + /* Packet Status check */ + if (rxlen < 0x40) { + good_packet = false; + if (netif_msg_rx_err(db)) + dev_dbg(db->dev, "RX: Bad Packet (runt)\n"); + } + + if (unlikely(!(rxstatus & EMAC_RX_IO_DATA_STATUS_OK))) { + good_packet = false; + + if (rxstatus & EMAC_RX_IO_DATA_STATUS_CRC_ERR) { + if (netif_msg_rx_err(db)) + dev_dbg(db->dev, "crc error\n"); + dev->stats.rx_crc_errors++; + } + + if (rxstatus & EMAC_RX_IO_DATA_STATUS_LEN_ERR) { + if (netif_msg_rx_err(db)) + dev_dbg(db->dev, "length error\n"); + dev->stats.rx_length_errors++; + } + } + + /* Move data from EMAC */ + if (good_packet) { + skb = netdev_alloc_skb(dev, rxlen + 4); + if (!skb) + continue; + skb_reserve(skb, 2); + rdptr = (u8 *) skb_put(skb, rxlen - 4); + + /* Read received packet from RX SRAM */ + if (netif_msg_rx_status(db)) + dev_dbg(db->dev, "RxLen %x\n", rxlen); + + emac_inblk_32bit(db->membase + EMAC_RX_IO_DATA_REG, + rdptr, rxlen); + dev->stats.rx_bytes += rxlen; + + /* Pass to upper layer */ + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + } + } +} + +static irqreturn_t emac_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct emac_board_info *db = netdev_priv(dev); + int int_status; + unsigned long flags; + unsigned int reg_val; + + /* A real interrupt coming */ + + /* holders of db->lock must always block IRQs */ + spin_lock_irqsave(&db->lock, flags); + + /* Disable all interrupts */ + writel(0, db->membase + EMAC_INT_CTL_REG); + + /* Got EMAC interrupt status */ + /* Got ISR */ + int_status = readl(db->membase + EMAC_INT_STA_REG); + /* Clear ISR status */ + writel(int_status, db->membase + EMAC_INT_STA_REG); + + if (netif_msg_intr(db)) + dev_dbg(db->dev, "emac interrupt %02x\n", int_status); + + /* Received the coming packet */ + if ((int_status & 0x100) && (db->emacrx_completed_flag == 1)) { + /* carrier lost */ + db->emacrx_completed_flag = 0; + emac_rx(dev); + } + + /* Transmit Interrupt check */ + if (int_status & (0x01 | 0x02)) + emac_tx_done(dev, db, int_status); + + if (int_status & (0x04 | 0x08)) + netdev_info(dev, " ab : %x\n", int_status); + + /* Re-enable interrupt mask */ + if (db->emacrx_completed_flag == 1) { + reg_val = readl(db->membase + EMAC_INT_CTL_REG); + reg_val |= (0xf << 0) | (0x01 << 8); + writel(reg_val, db->membase + EMAC_INT_CTL_REG); + } + spin_unlock_irqrestore(&db->lock, flags); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Used by netconsole + */ +static void emac_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + emac_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +/* Open the interface. + * The interface is opened whenever "ifconfig" actives it. + */ +static int emac_open(struct net_device *dev) +{ + struct emac_board_info *db = netdev_priv(dev); + int ret; + + if (netif_msg_ifup(db)) + dev_dbg(db->dev, "enabling %s\n", dev->name); + + if (request_irq(dev->irq, &emac_interrupt, 0, dev->name, dev)) + return -EAGAIN; + + /* Initialize EMAC board */ + emac_reset(db); + emac_init_device(dev); + + ret = emac_mdio_probe(dev); + if (ret < 0) { + free_irq(dev->irq, dev); + netdev_err(dev, "cannot probe MDIO bus\n"); + return ret; + } + + phy_start(db->phy_dev); + netif_start_queue(dev); + + return 0; +} + +static void emac_shutdown(struct net_device *dev) +{ + unsigned int reg_val; + struct emac_board_info *db = netdev_priv(dev); + + /* Disable all interrupt */ + writel(0, db->membase + EMAC_INT_CTL_REG); + + /* clear interrupt status */ + reg_val = readl(db->membase + EMAC_INT_STA_REG); + writel(reg_val, db->membase + EMAC_INT_STA_REG); + + /* Disable RX/TX */ + reg_val = readl(db->membase + EMAC_CTL_REG); + reg_val &= ~(EMAC_CTL_TX_EN | EMAC_CTL_RX_EN | EMAC_CTL_RESET); + writel(reg_val, db->membase + EMAC_CTL_REG); +} + +/* Stop the interface. + * The interface is stopped when it is brought. + */ +static int emac_stop(struct net_device *ndev) +{ + struct emac_board_info *db = netdev_priv(ndev); + + if (netif_msg_ifdown(db)) + dev_dbg(db->dev, "shutting down %s\n", ndev->name); + + netif_stop_queue(ndev); + netif_carrier_off(ndev); + + phy_stop(db->phy_dev); + + emac_mdio_remove(ndev); + + emac_shutdown(ndev); + + free_irq(ndev->irq, ndev); + + return 0; +} + +static const struct net_device_ops emac_netdev_ops = { + .ndo_open = emac_open, + .ndo_stop = emac_stop, + .ndo_start_xmit = emac_start_xmit, + .ndo_tx_timeout = emac_timeout, + .ndo_set_rx_mode = emac_set_rx_mode, + .ndo_do_ioctl = emac_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = emac_set_mac_address, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = emac_poll_controller, +#endif +}; + +/* Search EMAC board, allocate space and register it + */ +static int emac_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct emac_board_info *db; + struct net_device *ndev; + int ret = 0; + const char *mac_addr; + + ndev = alloc_etherdev(sizeof(struct emac_board_info)); + if (!ndev) { + dev_err(&pdev->dev, "could not allocate device.\n"); + return -ENOMEM; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + + db = netdev_priv(ndev); + memset(db, 0, sizeof(*db)); + + db->dev = &pdev->dev; + db->ndev = ndev; + db->pdev = pdev; + + spin_lock_init(&db->lock); + + db->membase = of_iomap(np, 0); + if (!db->membase) { + dev_err(&pdev->dev, "failed to remap registers\n"); + ret = -ENOMEM; + goto out; + } + + /* fill in parameters for net-dev structure */ + ndev->base_addr = (unsigned long)db->membase; + ndev->irq = irq_of_parse_and_map(np, 0); + if (ndev->irq == -ENXIO) { + netdev_err(ndev, "No irq resource\n"); + ret = ndev->irq; + goto out; + } + + db->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(db->clk)) { + ret = PTR_ERR(db->clk); + goto out; + } + + clk_prepare_enable(db->clk); + + db->phy_node = of_parse_phandle(np, "phy", 0); + if (!db->phy_node) { + dev_err(&pdev->dev, "no associated PHY\n"); + ret = -ENODEV; + goto out; + } + + /* Read MAC-address from DT */ + mac_addr = of_get_mac_address(np); + if (mac_addr) + memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + + /* Check if the MAC address is valid, if not get a random one */ + if (!is_valid_ether_addr(ndev->dev_addr)) { + eth_hw_addr_random(ndev); + dev_warn(&pdev->dev, "using random MAC address %pM\n", + ndev->dev_addr); + } + + db->emacrx_completed_flag = 1; + emac_powerup(ndev); + emac_reset(db); + + ndev->netdev_ops = &emac_netdev_ops; + ndev->watchdog_timeo = msecs_to_jiffies(watchdog); + ndev->ethtool_ops = &emac_ethtool_ops; + + platform_set_drvdata(pdev, ndev); + + /* Carrier starts down, phylib will bring it up */ + netif_carrier_off(ndev); + + ret = register_netdev(ndev); + if (ret) { + dev_err(&pdev->dev, "Registering netdev failed!\n"); + ret = -ENODEV; + goto out; + } + + dev_info(&pdev->dev, "%s: at %p, IRQ %d MAC: %pM\n", + ndev->name, db->membase, ndev->irq, ndev->dev_addr); + + return 0; + +out: + dev_err(db->dev, "not found (%d).\n", ret); + + free_netdev(ndev); + + return ret; +} + +static int emac_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + + unregister_netdev(ndev); + free_netdev(ndev); + + dev_dbg(&pdev->dev, "released and freed device\n"); + return 0; +} + +static int emac_suspend(struct platform_device *dev, pm_message_t state) +{ + struct net_device *ndev = platform_get_drvdata(dev); + + netif_carrier_off(ndev); + netif_device_detach(ndev); + emac_shutdown(ndev); + + return 0; +} + +static int emac_resume(struct platform_device *dev) +{ + struct net_device *ndev = platform_get_drvdata(dev); + struct emac_board_info *db = netdev_priv(ndev); + + emac_reset(db); + emac_init_device(ndev); + netif_device_attach(ndev); + + return 0; +} + +static const struct of_device_id emac_of_match[] = { + {.compatible = "allwinner,sun4i-a10-emac",}, + + /* Deprecated */ + {.compatible = "allwinner,sun4i-emac",}, + {}, +}; + +MODULE_DEVICE_TABLE(of, emac_of_match); + +static struct platform_driver emac_driver = { + .driver = { + .name = "sun4i-emac", + .of_match_table = emac_of_match, + }, + .probe = emac_probe, + .remove = emac_remove, + .suspend = emac_suspend, + .resume = emac_resume, +}; + +module_platform_driver(emac_driver); + +MODULE_AUTHOR("Stefan Roese "); +MODULE_AUTHOR("Maxime Ripard "); +MODULE_DESCRIPTION("Allwinner A10 emac network driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.h b/drivers/net/ethernet/allwinner/sun4i-emac.h new file mode 100644 index 000000000..38c72d9ec --- /dev/null +++ b/drivers/net/ethernet/allwinner/sun4i-emac.h @@ -0,0 +1,108 @@ +/* + * Allwinner EMAC Fast Ethernet driver for Linux. + * + * Copyright 2012 Stefan Roese + * Copyright 2013 Maxime Ripard + * + * Based on the Linux driver provided by Allwinner: + * Copyright (C) 1997 Sten Wang + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _SUN4I_EMAC_H_ +#define _SUN4I_EMAC_H_ + +#define EMAC_CTL_REG (0x00) +#define EMAC_CTL_RESET (1 << 0) +#define EMAC_CTL_TX_EN (1 << 1) +#define EMAC_CTL_RX_EN (1 << 2) +#define EMAC_TX_MODE_REG (0x04) +#define EMAC_TX_MODE_ABORTED_FRAME_EN (1 << 0) +#define EMAC_TX_MODE_DMA_EN (1 << 1) +#define EMAC_TX_FLOW_REG (0x08) +#define EMAC_TX_CTL0_REG (0x0c) +#define EMAC_TX_CTL1_REG (0x10) +#define EMAC_TX_INS_REG (0x14) +#define EMAC_TX_PL0_REG (0x18) +#define EMAC_TX_PL1_REG (0x1c) +#define EMAC_TX_STA_REG (0x20) +#define EMAC_TX_IO_DATA_REG (0x24) +#define EMAC_TX_IO_DATA1_REG (0x28) +#define EMAC_TX_TSVL0_REG (0x2c) +#define EMAC_TX_TSVH0_REG (0x30) +#define EMAC_TX_TSVL1_REG (0x34) +#define EMAC_TX_TSVH1_REG (0x38) +#define EMAC_RX_CTL_REG (0x3c) +#define EMAC_RX_CTL_AUTO_DRQ_EN (1 << 1) +#define EMAC_RX_CTL_DMA_EN (1 << 2) +#define EMAC_RX_CTL_PASS_ALL_EN (1 << 4) +#define EMAC_RX_CTL_PASS_CTL_EN (1 << 5) +#define EMAC_RX_CTL_PASS_CRC_ERR_EN (1 << 6) +#define EMAC_RX_CTL_PASS_LEN_ERR_EN (1 << 7) +#define EMAC_RX_CTL_PASS_LEN_OOR_EN (1 << 8) +#define EMAC_RX_CTL_ACCEPT_UNICAST_EN (1 << 16) +#define EMAC_RX_CTL_DA_FILTER_EN (1 << 17) +#define EMAC_RX_CTL_ACCEPT_MULTICAST_EN (1 << 20) +#define EMAC_RX_CTL_HASH_FILTER_EN (1 << 21) +#define EMAC_RX_CTL_ACCEPT_BROADCAST_EN (1 << 22) +#define EMAC_RX_CTL_SA_FILTER_EN (1 << 24) +#define EMAC_RX_CTL_SA_FILTER_INVERT_EN (1 << 25) +#define EMAC_RX_HASH0_REG (0x40) +#define EMAC_RX_HASH1_REG (0x44) +#define EMAC_RX_STA_REG (0x48) +#define EMAC_RX_IO_DATA_REG (0x4c) +#define EMAC_RX_IO_DATA_LEN(x) (x & 0xffff) +#define EMAC_RX_IO_DATA_STATUS(x) ((x >> 16) & 0xffff) +#define EMAC_RX_IO_DATA_STATUS_CRC_ERR (1 << 4) +#define EMAC_RX_IO_DATA_STATUS_LEN_ERR (3 << 5) +#define EMAC_RX_IO_DATA_STATUS_OK (1 << 7) +#define EMAC_RX_FBC_REG (0x50) +#define EMAC_INT_CTL_REG (0x54) +#define EMAC_INT_STA_REG (0x58) +#define EMAC_MAC_CTL0_REG (0x5c) +#define EMAC_MAC_CTL0_RX_FLOW_CTL_EN (1 << 2) +#define EMAC_MAC_CTL0_TX_FLOW_CTL_EN (1 << 3) +#define EMAC_MAC_CTL0_SOFT_RESET (1 << 15) +#define EMAC_MAC_CTL1_REG (0x60) +#define EMAC_MAC_CTL1_DUPLEX_EN (1 << 0) +#define EMAC_MAC_CTL1_LEN_CHECK_EN (1 << 1) +#define EMAC_MAC_CTL1_HUGE_FRAME_EN (1 << 2) +#define EMAC_MAC_CTL1_DELAYED_CRC_EN (1 << 3) +#define EMAC_MAC_CTL1_CRC_EN (1 << 4) +#define EMAC_MAC_CTL1_PAD_EN (1 << 5) +#define EMAC_MAC_CTL1_PAD_CRC_EN (1 << 6) +#define EMAC_MAC_CTL1_AD_SHORT_FRAME_EN (1 << 7) +#define EMAC_MAC_CTL1_BACKOFF_DIS (1 << 12) +#define EMAC_MAC_IPGT_REG (0x64) +#define EMAC_MAC_IPGT_HALF_DUPLEX (0x12) +#define EMAC_MAC_IPGT_FULL_DUPLEX (0x15) +#define EMAC_MAC_IPGR_REG (0x68) +#define EMAC_MAC_IPGR_IPG1 (0x0c) +#define EMAC_MAC_IPGR_IPG2 (0x12) +#define EMAC_MAC_CLRT_REG (0x6c) +#define EMAC_MAC_CLRT_COLLISION_WINDOW (0x37) +#define EMAC_MAC_CLRT_RM (0x0f) +#define EMAC_MAC_MAXF_REG (0x70) +#define EMAC_MAC_SUPP_REG (0x74) +#define EMAC_MAC_TEST_REG (0x78) +#define EMAC_MAC_MCFG_REG (0x7c) +#define EMAC_MAC_A0_REG (0x98) +#define EMAC_MAC_A1_REG (0x9c) +#define EMAC_MAC_A2_REG (0xa0) +#define EMAC_SAFX_L_REG0 (0xa4) +#define EMAC_SAFX_H_REG0 (0xa8) +#define EMAC_SAFX_L_REG1 (0xac) +#define EMAC_SAFX_H_REG1 (0xb0) +#define EMAC_SAFX_L_REG2 (0xb4) +#define EMAC_SAFX_H_REG2 (0xb8) +#define EMAC_SAFX_L_REG3 (0xbc) +#define EMAC_SAFX_H_REG3 (0xc0) + +#define EMAC_PHY_DUPLEX (1 << 8) + +#define EMAC_EEPROM_MAGIC (0x444d394b) +#define EMAC_UNDOCUMENTED_MAGIC (0x0143414d) +#endif /* _SUN4I_EMAC_H_ */ diff --git a/drivers/net/ethernet/alteon/Kconfig b/drivers/net/ethernet/alteon/Kconfig new file mode 100644 index 000000000..799a85282 --- /dev/null +++ b/drivers/net/ethernet/alteon/Kconfig @@ -0,0 +1,48 @@ +# +# Alteon network device configuration +# + +config NET_VENDOR_ALTEON + bool "Alteon devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Alteon cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_ALTEON + +config ACENIC + tristate "Alteon AceNIC/3Com 3C985/NetGear GA620 Gigabit support" + depends on PCI + ---help--- + Say Y here if you have an Alteon AceNIC, 3Com 3C985(B), NetGear + GA620, SGI Gigabit or Farallon PN9000-SX PCI Gigabit Ethernet + adapter. The driver allows for using the Jumbo Frame option (9000 + bytes/frame) however it requires that your switches can handle this + as well. To enable Jumbo Frames, add `mtu 9000' to your ifconfig + line. + + To compile this driver as a module, choose M here: the + module will be called acenic. + +config ACENIC_OMIT_TIGON_I + bool "Omit support for old Tigon I based AceNICs" + depends on ACENIC + ---help--- + Say Y here if you only have Tigon II based AceNICs and want to leave + out support for the older Tigon I based cards which are no longer + being sold (ie. the original Alteon AceNIC and 3Com 3C985 (non B + version)). This will reduce the size of the driver object by + app. 100KB. If you are not sure whether your card is a Tigon I or a + Tigon II, say N here. + + The safe and default value for this is N. + +endif # NET_VENDOR_ALTEON diff --git a/drivers/net/ethernet/alteon/Makefile b/drivers/net/ethernet/alteon/Makefile new file mode 100644 index 000000000..a2ca173f2 --- /dev/null +++ b/drivers/net/ethernet/alteon/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Alteon network device drivers. +# + +obj-$(CONFIG_ACENIC) += acenic.o diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c new file mode 100644 index 000000000..66d89c23b --- /dev/null +++ b/drivers/net/ethernet/alteon/acenic.c @@ -0,0 +1,3189 @@ +/* + * acenic.c: Linux driver for the Alteon AceNIC Gigabit Ethernet card + * and other Tigon based cards. + * + * Copyright 1998-2002 by Jes Sorensen, . + * + * Thanks to Alteon and 3Com for providing hardware and documentation + * enabling me to write this driver. + * + * A mailing list for discussing the use of this driver has been + * setup, please subscribe to the lists if you have any questions + * about the driver. Send mail to linux-acenic-help@sunsite.auc.dk to + * see how to subscribe. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Additional credits: + * Pete Wyckoff : Initial Linux/Alpha and trace + * dump support. The trace dump support has not been + * integrated yet however. + * Troy Benjegerdes: Big Endian (PPC) patches. + * Nate Stahl: Better out of memory handling and stats support. + * Aman Singla: Nasty race between interrupt handler and tx code dealing + * with 'testing the tx_ret_csm and setting tx_full' + * David S. Miller : conversion to new PCI dma mapping + * infrastructure and Sparc support + * Pierrick Pinasseau (CERN): For lending me an Ultra 5 to test the + * driver under Linux/Sparc64 + * Matt Domsch : Detect Alteon 1000baseT cards + * ETHTOOL_GDRVINFO support + * Chip Salzenberg : Fix race condition between tx + * handler and close() cleanup. + * Ken Aaker : Correct check for whether + * memory mapped IO is enabled to + * make the driver work on RS/6000. + * Takayoshi Kouchi : Identifying problem + * where the driver would disable + * bus master mode if it had to disable + * write and invalidate. + * Stephen Hack : Fixed ace_set_mac_addr for little + * endian systems. + * Val Henson : Reset Jumbo skb producer and + * rx producer index when + * flushing the Jumbo ring. + * Hans Grobler : Memory leak fixes in the + * driver init path. + * Grant Grundler : PCI write posting fixes. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef SIOCETHTOOL +#include +#endif + +#include +#include + +#include +#include +#include +#include + + +#define DRV_NAME "acenic" + +#undef INDEX_DEBUG + +#ifdef CONFIG_ACENIC_OMIT_TIGON_I +#define ACE_IS_TIGON_I(ap) 0 +#define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES +#else +#define ACE_IS_TIGON_I(ap) (ap->version == 1) +#define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries +#endif + +#ifndef PCI_VENDOR_ID_ALTEON +#define PCI_VENDOR_ID_ALTEON 0x12ae +#endif +#ifndef PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE +#define PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE 0x0001 +#define PCI_DEVICE_ID_ALTEON_ACENIC_COPPER 0x0002 +#endif +#ifndef PCI_DEVICE_ID_3COM_3C985 +#define PCI_DEVICE_ID_3COM_3C985 0x0001 +#endif +#ifndef PCI_VENDOR_ID_NETGEAR +#define PCI_VENDOR_ID_NETGEAR 0x1385 +#define PCI_DEVICE_ID_NETGEAR_GA620 0x620a +#endif +#ifndef PCI_DEVICE_ID_NETGEAR_GA620T +#define PCI_DEVICE_ID_NETGEAR_GA620T 0x630a +#endif + + +/* + * Farallon used the DEC vendor ID by mistake and they seem not + * to care - stinky! + */ +#ifndef PCI_DEVICE_ID_FARALLON_PN9000SX +#define PCI_DEVICE_ID_FARALLON_PN9000SX 0x1a +#endif +#ifndef PCI_DEVICE_ID_FARALLON_PN9100T +#define PCI_DEVICE_ID_FARALLON_PN9100T 0xfa +#endif +#ifndef PCI_VENDOR_ID_SGI +#define PCI_VENDOR_ID_SGI 0x10a9 +#endif +#ifndef PCI_DEVICE_ID_SGI_ACENIC +#define PCI_DEVICE_ID_SGI_ACENIC 0x0009 +#endif + +static const struct pci_device_id acenic_pci_tbl[] = { + { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, + { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C985, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, + { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, + { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620T, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, + /* + * Farallon used the DEC vendor ID on their cards incorrectly, + * then later Alteon's ID. + */ + { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_FARALLON_PN9000SX, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, + { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_FARALLON_PN9100T, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, + { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_ACENIC, + PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, + { } +}; +MODULE_DEVICE_TABLE(pci, acenic_pci_tbl); + +#define ace_sync_irq(irq) synchronize_irq(irq) + +#ifndef offset_in_page +#define offset_in_page(ptr) ((unsigned long)(ptr) & ~PAGE_MASK) +#endif + +#define ACE_MAX_MOD_PARMS 8 +#define BOARD_IDX_STATIC 0 +#define BOARD_IDX_OVERFLOW -1 + +#include "acenic.h" + +/* + * These must be defined before the firmware is included. + */ +#define MAX_TEXT_LEN 96*1024 +#define MAX_RODATA_LEN 8*1024 +#define MAX_DATA_LEN 2*1024 + +#ifndef tigon2FwReleaseLocal +#define tigon2FwReleaseLocal 0 +#endif + +/* + * This driver currently supports Tigon I and Tigon II based cards + * including the Alteon AceNIC, the 3Com 3C985[B] and NetGear + * GA620. The driver should also work on the SGI, DEC and Farallon + * versions of the card, however I have not been able to test that + * myself. + * + * This card is really neat, it supports receive hardware checksumming + * and jumbo frames (up to 9000 bytes) and does a lot of work in the + * firmware. Also the programming interface is quite neat, except for + * the parts dealing with the i2c eeprom on the card ;-) + * + * Using jumbo frames: + * + * To enable jumbo frames, simply specify an mtu between 1500 and 9000 + * bytes to ifconfig. Jumbo frames can be enabled or disabled at any time + * by running `ifconfig eth mtu ' with being the Ethernet + * interface number and being the MTU value. + * + * Module parameters: + * + * When compiled as a loadable module, the driver allows for a number + * of module parameters to be specified. The driver supports the + * following module parameters: + * + * trace= - Firmware trace level. This requires special traced + * firmware to replace the firmware supplied with + * the driver - for debugging purposes only. + * + * link= - Link state. Normally you want to use the default link + * parameters set by the driver. This can be used to + * override these in case your switch doesn't negotiate + * the link properly. Valid values are: + * 0x0001 - Force half duplex link. + * 0x0002 - Do not negotiate line speed with the other end. + * 0x0010 - 10Mbit/sec link. + * 0x0020 - 100Mbit/sec link. + * 0x0040 - 1000Mbit/sec link. + * 0x0100 - Do not negotiate flow control. + * 0x0200 - Enable RX flow control Y + * 0x0400 - Enable TX flow control Y (Tigon II NICs only). + * Default value is 0x0270, ie. enable link+flow + * control negotiation. Negotiating the highest + * possible link speed with RX flow control enabled. + * + * When disabling link speed negotiation, only one link + * speed is allowed to be specified! + * + * tx_coal_tick= - number of coalescing clock ticks (us) allowed + * to wait for more packets to arive before + * interrupting the host, from the time the first + * packet arrives. + * + * rx_coal_tick= - number of coalescing clock ticks (us) allowed + * to wait for more packets to arive in the transmit ring, + * before interrupting the host, after transmitting the + * first packet in the ring. + * + * max_tx_desc= - maximum number of transmit descriptors + * (packets) transmitted before interrupting the host. + * + * max_rx_desc= - maximum number of receive descriptors + * (packets) received before interrupting the host. + * + * tx_ratio= - 7 bit value (0 - 63) specifying the split in 64th + * increments of the NIC's on board memory to be used for + * transmit and receive buffers. For the 1MB NIC app. 800KB + * is available, on the 1/2MB NIC app. 300KB is available. + * 68KB will always be available as a minimum for both + * directions. The default value is a 50/50 split. + * dis_pci_mem_inval= - disable PCI memory write and invalidate + * operations, default (1) is to always disable this as + * that is what Alteon does on NT. I have not been able + * to measure any real performance differences with + * this on my systems. Set =0 if you want to + * enable these operations. + * + * If you use more than one NIC, specify the parameters for the + * individual NICs with a comma, ie. trace=0,0x00001fff,0 you want to + * run tracing on NIC #2 but not on NIC #1 and #3. + * + * TODO: + * + * - Proper multicast support. + * - NIC dump support. + * - More tuning parameters. + * + * The mini ring is not used under Linux and I am not sure it makes sense + * to actually use it. + * + * New interrupt handler strategy: + * + * The old interrupt handler worked using the traditional method of + * replacing an skbuff with a new one when a packet arrives. However + * the rx rings do not need to contain a static number of buffer + * descriptors, thus it makes sense to move the memory allocation out + * of the main interrupt handler and do it in a bottom half handler + * and only allocate new buffers when the number of buffers in the + * ring is below a certain threshold. In order to avoid starving the + * NIC under heavy load it is however necessary to force allocation + * when hitting a minimum threshold. The strategy for alloction is as + * follows: + * + * RX_LOW_BUF_THRES - allocate buffers in the bottom half + * RX_PANIC_LOW_THRES - we are very low on buffers, allocate + * the buffers in the interrupt handler + * RX_RING_THRES - maximum number of buffers in the rx ring + * RX_MINI_THRES - maximum number of buffers in the mini ring + * RX_JUMBO_THRES - maximum number of buffers in the jumbo ring + * + * One advantagous side effect of this allocation approach is that the + * entire rx processing can be done without holding any spin lock + * since the rx rings and registers are totally independent of the tx + * ring and its registers. This of course includes the kmalloc's of + * new skb's. Thus start_xmit can run in parallel with rx processing + * and the memory allocation on SMP systems. + * + * Note that running the skb reallocation in a bottom half opens up + * another can of races which needs to be handled properly. In + * particular it can happen that the interrupt handler tries to run + * the reallocation while the bottom half is either running on another + * CPU or was interrupted on the same CPU. To get around this the + * driver uses bitops to prevent the reallocation routines from being + * reentered. + * + * TX handling can also be done without holding any spin lock, wheee + * this is fun! since tx_ret_csm is only written to by the interrupt + * handler. The case to be aware of is when shutting down the device + * and cleaning up where it is necessary to make sure that + * start_xmit() is not running while this is happening. Well DaveM + * informs me that this case is already protected against ... bye bye + * Mr. Spin Lock, it was nice to know you. + * + * TX interrupts are now partly disabled so the NIC will only generate + * TX interrupts for the number of coal ticks, not for the number of + * TX packets in the queue. This should reduce the number of TX only, + * ie. when no RX processing is done, interrupts seen. + */ + +/* + * Threshold values for RX buffer allocation - the low water marks for + * when to start refilling the rings are set to 75% of the ring + * sizes. It seems to make sense to refill the rings entirely from the + * intrrupt handler once it gets below the panic threshold, that way + * we don't risk that the refilling is moved to another CPU when the + * one running the interrupt handler just got the slab code hot in its + * cache. + */ +#define RX_RING_SIZE 72 +#define RX_MINI_SIZE 64 +#define RX_JUMBO_SIZE 48 + +#define RX_PANIC_STD_THRES 16 +#define RX_PANIC_STD_REFILL (3*RX_PANIC_STD_THRES)/2 +#define RX_LOW_STD_THRES (3*RX_RING_SIZE)/4 +#define RX_PANIC_MINI_THRES 12 +#define RX_PANIC_MINI_REFILL (3*RX_PANIC_MINI_THRES)/2 +#define RX_LOW_MINI_THRES (3*RX_MINI_SIZE)/4 +#define RX_PANIC_JUMBO_THRES 6 +#define RX_PANIC_JUMBO_REFILL (3*RX_PANIC_JUMBO_THRES)/2 +#define RX_LOW_JUMBO_THRES (3*RX_JUMBO_SIZE)/4 + + +/* + * Size of the mini ring entries, basically these just should be big + * enough to take TCP ACKs + */ +#define ACE_MINI_SIZE 100 + +#define ACE_MINI_BUFSIZE ACE_MINI_SIZE +#define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 4) +#define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 4) + +/* + * There seems to be a magic difference in the effect between 995 and 996 + * but little difference between 900 and 995 ... no idea why. + * + * There is now a default set of tuning parameters which is set, depending + * on whether or not the user enables Jumbo frames. It's assumed that if + * Jumbo frames are enabled, the user wants optimal tuning for that case. + */ +#define DEF_TX_COAL 400 /* 996 */ +#define DEF_TX_MAX_DESC 60 /* was 40 */ +#define DEF_RX_COAL 120 /* 1000 */ +#define DEF_RX_MAX_DESC 25 +#define DEF_TX_RATIO 21 /* 24 */ + +#define DEF_JUMBO_TX_COAL 20 +#define DEF_JUMBO_TX_MAX_DESC 60 +#define DEF_JUMBO_RX_COAL 30 +#define DEF_JUMBO_RX_MAX_DESC 6 +#define DEF_JUMBO_TX_RATIO 21 + +#if tigon2FwReleaseLocal < 20001118 +/* + * Standard firmware and early modifications duplicate + * IRQ load without this flag (coal timer is never reset). + * Note that with this flag tx_coal should be less than + * time to xmit full tx ring. + * 400usec is not so bad for tx ring size of 128. + */ +#define TX_COAL_INTS_ONLY 1 /* worth it */ +#else +/* + * With modified firmware, this is not necessary, but still useful. + */ +#define TX_COAL_INTS_ONLY 1 +#endif + +#define DEF_TRACE 0 +#define DEF_STAT (2 * TICKS_PER_SEC) + + +static int link_state[ACE_MAX_MOD_PARMS]; +static int trace[ACE_MAX_MOD_PARMS]; +static int tx_coal_tick[ACE_MAX_MOD_PARMS]; +static int rx_coal_tick[ACE_MAX_MOD_PARMS]; +static int max_tx_desc[ACE_MAX_MOD_PARMS]; +static int max_rx_desc[ACE_MAX_MOD_PARMS]; +static int tx_ratio[ACE_MAX_MOD_PARMS]; +static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1}; + +MODULE_AUTHOR("Jes Sorensen "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver"); +#ifndef CONFIG_ACENIC_OMIT_TIGON_I +/*(DEBLOBBED)*/ +#endif +/*(DEBLOBBED)*/ + +module_param_array_named(link, link_state, int, NULL, 0); +module_param_array(trace, int, NULL, 0); +module_param_array(tx_coal_tick, int, NULL, 0); +module_param_array(max_tx_desc, int, NULL, 0); +module_param_array(rx_coal_tick, int, NULL, 0); +module_param_array(max_rx_desc, int, NULL, 0); +module_param_array(tx_ratio, int, NULL, 0); +MODULE_PARM_DESC(link, "AceNIC/3C985/NetGear link state"); +MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level"); +MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives"); +MODULE_PARM_DESC(max_tx_desc, "AceNIC/3C985/GA620 max number of transmit descriptors to wait"); +MODULE_PARM_DESC(rx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives"); +MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descriptors to wait"); +MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)"); + + +static const char version[] = + "acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n" + " http://home.cern.ch/~jes/gige/acenic.html\n"; + +static int ace_get_settings(struct net_device *, struct ethtool_cmd *); +static int ace_set_settings(struct net_device *, struct ethtool_cmd *); +static void ace_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); + +static const struct ethtool_ops ace_ethtool_ops = { + .get_settings = ace_get_settings, + .set_settings = ace_set_settings, + .get_drvinfo = ace_get_drvinfo, +}; + +static void ace_watchdog(struct net_device *dev); + +static const struct net_device_ops ace_netdev_ops = { + .ndo_open = ace_open, + .ndo_stop = ace_close, + .ndo_tx_timeout = ace_watchdog, + .ndo_get_stats = ace_get_stats, + .ndo_start_xmit = ace_start_xmit, + .ndo_set_rx_mode = ace_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ace_set_mac_addr, + .ndo_change_mtu = ace_change_mtu, +}; + +static int acenic_probe_one(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct net_device *dev; + struct ace_private *ap; + static int boards_found; + + dev = alloc_etherdev(sizeof(struct ace_private)); + if (dev == NULL) + return -ENOMEM; + + SET_NETDEV_DEV(dev, &pdev->dev); + + ap = netdev_priv(dev); + ap->pdev = pdev; + ap->name = pci_name(pdev); + + dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + + dev->watchdog_timeo = 5*HZ; + + dev->netdev_ops = &ace_netdev_ops; + dev->ethtool_ops = &ace_ethtool_ops; + + /* we only display this string ONCE */ + if (!boards_found) + printk(version); + + if (pci_enable_device(pdev)) + goto fail_free_netdev; + + /* + * Enable master mode before we start playing with the + * pci_command word since pci_set_master() will modify + * it. + */ + pci_set_master(pdev); + + pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command); + + /* OpenFirmware on Mac's does not set this - DOH.. */ + if (!(ap->pci_command & PCI_COMMAND_MEMORY)) { + printk(KERN_INFO "%s: Enabling PCI Memory Mapped " + "access - was not enabled by BIOS/Firmware\n", + ap->name); + ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY; + pci_write_config_word(ap->pdev, PCI_COMMAND, + ap->pci_command); + wmb(); + } + + pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ap->pci_latency); + if (ap->pci_latency <= 0x40) { + ap->pci_latency = 0x40; + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ap->pci_latency); + } + + /* + * Remap the regs into kernel space - this is abuse of + * dev->base_addr since it was means for I/O port + * addresses but who gives a damn. + */ + dev->base_addr = pci_resource_start(pdev, 0); + ap->regs = ioremap(dev->base_addr, 0x4000); + if (!ap->regs) { + printk(KERN_ERR "%s: Unable to map I/O register, " + "AceNIC %i will be disabled.\n", + ap->name, boards_found); + goto fail_free_netdev; + } + + switch(pdev->vendor) { + case PCI_VENDOR_ID_ALTEON: + if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T) { + printk(KERN_INFO "%s: Farallon PN9100-T ", + ap->name); + } else { + printk(KERN_INFO "%s: Alteon AceNIC ", + ap->name); + } + break; + case PCI_VENDOR_ID_3COM: + printk(KERN_INFO "%s: 3Com 3C985 ", ap->name); + break; + case PCI_VENDOR_ID_NETGEAR: + printk(KERN_INFO "%s: NetGear GA620 ", ap->name); + break; + case PCI_VENDOR_ID_DEC: + if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX) { + printk(KERN_INFO "%s: Farallon PN9000-SX ", + ap->name); + break; + } + case PCI_VENDOR_ID_SGI: + printk(KERN_INFO "%s: SGI AceNIC ", ap->name); + break; + default: + printk(KERN_INFO "%s: Unknown AceNIC ", ap->name); + break; + } + + printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr); + printk("irq %d\n", pdev->irq); + +#ifdef CONFIG_ACENIC_OMIT_TIGON_I + if ((readl(&ap->regs->HostCtrl) >> 28) == 4) { + printk(KERN_ERR "%s: Driver compiled without Tigon I" + " support - NIC disabled\n", dev->name); + goto fail_uninit; + } +#endif + + if (ace_allocate_descriptors(dev)) + goto fail_free_netdev; + +#ifdef MODULE + if (boards_found >= ACE_MAX_MOD_PARMS) + ap->board_idx = BOARD_IDX_OVERFLOW; + else + ap->board_idx = boards_found; +#else + ap->board_idx = BOARD_IDX_STATIC; +#endif + + if (ace_init(dev)) + goto fail_free_netdev; + + if (register_netdev(dev)) { + printk(KERN_ERR "acenic: device registration failed\n"); + goto fail_uninit; + } + ap->name = dev->name; + + if (ap->pci_using_dac) + dev->features |= NETIF_F_HIGHDMA; + + pci_set_drvdata(pdev, dev); + + boards_found++; + return 0; + + fail_uninit: + ace_init_cleanup(dev); + fail_free_netdev: + free_netdev(dev); + return -ENODEV; +} + +static void acenic_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + short i; + + unregister_netdev(dev); + + writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl); + if (ap->version >= 2) + writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl); + + /* + * This clears any pending interrupts + */ + writel(1, ®s->Mb0Lo); + readl(®s->CpuCtrl); /* flush */ + + /* + * Make sure no other CPUs are processing interrupts + * on the card before the buffers are being released. + * Otherwise one might experience some `interesting' + * effects. + * + * Then release the RX buffers - jumbo buffers were + * already released in ace_close(). + */ + ace_sync_irq(dev->irq); + + for (i = 0; i < RX_STD_RING_ENTRIES; i++) { + struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb; + + if (skb) { + struct ring_info *ringp; + dma_addr_t mapping; + + ringp = &ap->skb->rx_std_skbuff[i]; + mapping = dma_unmap_addr(ringp, mapping); + pci_unmap_page(ap->pdev, mapping, + ACE_STD_BUFSIZE, + PCI_DMA_FROMDEVICE); + + ap->rx_std_ring[i].size = 0; + ap->skb->rx_std_skbuff[i].skb = NULL; + dev_kfree_skb(skb); + } + } + + if (ap->version >= 2) { + for (i = 0; i < RX_MINI_RING_ENTRIES; i++) { + struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb; + + if (skb) { + struct ring_info *ringp; + dma_addr_t mapping; + + ringp = &ap->skb->rx_mini_skbuff[i]; + mapping = dma_unmap_addr(ringp,mapping); + pci_unmap_page(ap->pdev, mapping, + ACE_MINI_BUFSIZE, + PCI_DMA_FROMDEVICE); + + ap->rx_mini_ring[i].size = 0; + ap->skb->rx_mini_skbuff[i].skb = NULL; + dev_kfree_skb(skb); + } + } + } + + for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) { + struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb; + if (skb) { + struct ring_info *ringp; + dma_addr_t mapping; + + ringp = &ap->skb->rx_jumbo_skbuff[i]; + mapping = dma_unmap_addr(ringp, mapping); + pci_unmap_page(ap->pdev, mapping, + ACE_JUMBO_BUFSIZE, + PCI_DMA_FROMDEVICE); + + ap->rx_jumbo_ring[i].size = 0; + ap->skb->rx_jumbo_skbuff[i].skb = NULL; + dev_kfree_skb(skb); + } + } + + ace_init_cleanup(dev); + free_netdev(dev); +} + +static struct pci_driver acenic_pci_driver = { + .name = "acenic", + .id_table = acenic_pci_tbl, + .probe = acenic_probe_one, + .remove = acenic_remove_one, +}; + +static void ace_free_descriptors(struct net_device *dev) +{ + struct ace_private *ap = netdev_priv(dev); + int size; + + if (ap->rx_std_ring != NULL) { + size = (sizeof(struct rx_desc) * + (RX_STD_RING_ENTRIES + + RX_JUMBO_RING_ENTRIES + + RX_MINI_RING_ENTRIES + + RX_RETURN_RING_ENTRIES)); + pci_free_consistent(ap->pdev, size, ap->rx_std_ring, + ap->rx_ring_base_dma); + ap->rx_std_ring = NULL; + ap->rx_jumbo_ring = NULL; + ap->rx_mini_ring = NULL; + ap->rx_return_ring = NULL; + } + if (ap->evt_ring != NULL) { + size = (sizeof(struct event) * EVT_RING_ENTRIES); + pci_free_consistent(ap->pdev, size, ap->evt_ring, + ap->evt_ring_dma); + ap->evt_ring = NULL; + } + if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) { + size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES); + pci_free_consistent(ap->pdev, size, ap->tx_ring, + ap->tx_ring_dma); + } + ap->tx_ring = NULL; + + if (ap->evt_prd != NULL) { + pci_free_consistent(ap->pdev, sizeof(u32), + (void *)ap->evt_prd, ap->evt_prd_dma); + ap->evt_prd = NULL; + } + if (ap->rx_ret_prd != NULL) { + pci_free_consistent(ap->pdev, sizeof(u32), + (void *)ap->rx_ret_prd, + ap->rx_ret_prd_dma); + ap->rx_ret_prd = NULL; + } + if (ap->tx_csm != NULL) { + pci_free_consistent(ap->pdev, sizeof(u32), + (void *)ap->tx_csm, ap->tx_csm_dma); + ap->tx_csm = NULL; + } +} + + +static int ace_allocate_descriptors(struct net_device *dev) +{ + struct ace_private *ap = netdev_priv(dev); + int size; + + size = (sizeof(struct rx_desc) * + (RX_STD_RING_ENTRIES + + RX_JUMBO_RING_ENTRIES + + RX_MINI_RING_ENTRIES + + RX_RETURN_RING_ENTRIES)); + + ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size, + &ap->rx_ring_base_dma); + if (ap->rx_std_ring == NULL) + goto fail; + + ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES; + ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES; + ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES; + + size = (sizeof(struct event) * EVT_RING_ENTRIES); + + ap->evt_ring = pci_alloc_consistent(ap->pdev, size, &ap->evt_ring_dma); + + if (ap->evt_ring == NULL) + goto fail; + + /* + * Only allocate a host TX ring for the Tigon II, the Tigon I + * has to use PCI registers for this ;-( + */ + if (!ACE_IS_TIGON_I(ap)) { + size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES); + + ap->tx_ring = pci_alloc_consistent(ap->pdev, size, + &ap->tx_ring_dma); + + if (ap->tx_ring == NULL) + goto fail; + } + + ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32), + &ap->evt_prd_dma); + if (ap->evt_prd == NULL) + goto fail; + + ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32), + &ap->rx_ret_prd_dma); + if (ap->rx_ret_prd == NULL) + goto fail; + + ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32), + &ap->tx_csm_dma); + if (ap->tx_csm == NULL) + goto fail; + + return 0; + +fail: + /* Clean up. */ + ace_init_cleanup(dev); + return 1; +} + + +/* + * Generic cleanup handling data allocated during init. Used when the + * module is unloaded or if an error occurs during initialization + */ +static void ace_init_cleanup(struct net_device *dev) +{ + struct ace_private *ap; + + ap = netdev_priv(dev); + + ace_free_descriptors(dev); + + if (ap->info) + pci_free_consistent(ap->pdev, sizeof(struct ace_info), + ap->info, ap->info_dma); + kfree(ap->skb); + kfree(ap->trace_buf); + + if (dev->irq) + free_irq(dev->irq, dev); + + iounmap(ap->regs); +} + + +/* + * Commands are considered to be slow. + */ +static inline void ace_issue_cmd(struct ace_regs __iomem *regs, struct cmd *cmd) +{ + u32 idx; + + idx = readl(®s->CmdPrd); + + writel(*(u32 *)(cmd), ®s->CmdRng[idx]); + idx = (idx + 1) % CMD_RING_ENTRIES; + + writel(idx, ®s->CmdPrd); +} + + +static int ace_init(struct net_device *dev) +{ + struct ace_private *ap; + struct ace_regs __iomem *regs; + struct ace_info *info = NULL; + struct pci_dev *pdev; + unsigned long myjif; + u64 tmp_ptr; + u32 tig_ver, mac1, mac2, tmp, pci_state; + int board_idx, ecode = 0; + short i; + unsigned char cache_size; + + ap = netdev_priv(dev); + regs = ap->regs; + + board_idx = ap->board_idx; + + /* + * aman@sgi.com - its useful to do a NIC reset here to + * address the `Firmware not running' problem subsequent + * to any crashes involving the NIC + */ + writel(HW_RESET | (HW_RESET << 24), ®s->HostCtrl); + readl(®s->HostCtrl); /* PCI write posting */ + udelay(5); + + /* + * Don't access any other registers before this point! + */ +#ifdef __BIG_ENDIAN + /* + * This will most likely need BYTE_SWAP once we switch + * to using __raw_writel() + */ + writel((WORD_SWAP | CLR_INT | ((WORD_SWAP | CLR_INT) << 24)), + ®s->HostCtrl); +#else + writel((CLR_INT | WORD_SWAP | ((CLR_INT | WORD_SWAP) << 24)), + ®s->HostCtrl); +#endif + readl(®s->HostCtrl); /* PCI write posting */ + + /* + * Stop the NIC CPU and clear pending interrupts + */ + writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl); + readl(®s->CpuCtrl); /* PCI write posting */ + writel(0, ®s->Mb0Lo); + + tig_ver = readl(®s->HostCtrl) >> 28; + + switch(tig_ver){ +#ifndef CONFIG_ACENIC_OMIT_TIGON_I + case 4: + case 5: + printk(KERN_INFO " Tigon I (Rev. %i), Firmware: %i.%i.%i, ", + tig_ver, ap->firmware_major, ap->firmware_minor, + ap->firmware_fix); + writel(0, ®s->LocalCtrl); + ap->version = 1; + ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES; + break; +#endif + case 6: + printk(KERN_INFO " Tigon II (Rev. %i), Firmware: %i.%i.%i, ", + tig_ver, ap->firmware_major, ap->firmware_minor, + ap->firmware_fix); + writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl); + readl(®s->CpuBCtrl); /* PCI write posting */ + /* + * The SRAM bank size does _not_ indicate the amount + * of memory on the card, it controls the _bank_ size! + * Ie. a 1MB AceNIC will have two banks of 512KB. + */ + writel(SRAM_BANK_512K, ®s->LocalCtrl); + writel(SYNC_SRAM_TIMING, ®s->MiscCfg); + ap->version = 2; + ap->tx_ring_entries = MAX_TX_RING_ENTRIES; + break; + default: + printk(KERN_WARNING " Unsupported Tigon version detected " + "(%i)\n", tig_ver); + ecode = -ENODEV; + goto init_error; + } + + /* + * ModeStat _must_ be set after the SRAM settings as this change + * seems to corrupt the ModeStat and possible other registers. + * The SRAM settings survive resets and setting it to the same + * value a second time works as well. This is what caused the + * `Firmware not running' problem on the Tigon II. + */ +#ifdef __BIG_ENDIAN + writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | ACE_BYTE_SWAP_BD | + ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat); +#else + writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | + ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat); +#endif + readl(®s->ModeStat); /* PCI write posting */ + + mac1 = 0; + for(i = 0; i < 4; i++) { + int t; + + mac1 = mac1 << 8; + t = read_eeprom_byte(dev, 0x8c+i); + if (t < 0) { + ecode = -EIO; + goto init_error; + } else + mac1 |= (t & 0xff); + } + mac2 = 0; + for(i = 4; i < 8; i++) { + int t; + + mac2 = mac2 << 8; + t = read_eeprom_byte(dev, 0x8c+i); + if (t < 0) { + ecode = -EIO; + goto init_error; + } else + mac2 |= (t & 0xff); + } + + writel(mac1, ®s->MacAddrHi); + writel(mac2, ®s->MacAddrLo); + + dev->dev_addr[0] = (mac1 >> 8) & 0xff; + dev->dev_addr[1] = mac1 & 0xff; + dev->dev_addr[2] = (mac2 >> 24) & 0xff; + dev->dev_addr[3] = (mac2 >> 16) & 0xff; + dev->dev_addr[4] = (mac2 >> 8) & 0xff; + dev->dev_addr[5] = mac2 & 0xff; + + printk("MAC: %pM\n", dev->dev_addr); + + /* + * Looks like this is necessary to deal with on all architectures, + * even this %$#%$# N440BX Intel based thing doesn't get it right. + * Ie. having two NICs in the machine, one will have the cache + * line set at boot time, the other will not. + */ + pdev = ap->pdev; + pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_size); + cache_size <<= 2; + if (cache_size != SMP_CACHE_BYTES) { + printk(KERN_INFO " PCI cache line size set incorrectly " + "(%i bytes) by BIOS/FW, ", cache_size); + if (cache_size > SMP_CACHE_BYTES) + printk("expecting %i\n", SMP_CACHE_BYTES); + else { + printk("correcting to %i\n", SMP_CACHE_BYTES); + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, + SMP_CACHE_BYTES >> 2); + } + } + + pci_state = readl(®s->PciState); + printk(KERN_INFO " PCI bus width: %i bits, speed: %iMHz, " + "latency: %i clks\n", + (pci_state & PCI_32BIT) ? 32 : 64, + (pci_state & PCI_66MHZ) ? 66 : 33, + ap->pci_latency); + + /* + * Set the max DMA transfer size. Seems that for most systems + * the performance is better when no MAX parameter is + * set. However for systems enabling PCI write and invalidate, + * DMA writes must be set to the L1 cache line size to get + * optimal performance. + * + * The default is now to turn the PCI write and invalidate off + * - that is what Alteon does for NT. + */ + tmp = READ_CMD_MEM | WRITE_CMD_MEM; + if (ap->version >= 2) { + tmp |= (MEM_READ_MULTIPLE | (pci_state & PCI_66MHZ)); + /* + * Tuning parameters only supported for 8 cards + */ + if (board_idx == BOARD_IDX_OVERFLOW || + dis_pci_mem_inval[board_idx]) { + if (ap->pci_command & PCI_COMMAND_INVALIDATE) { + ap->pci_command &= ~PCI_COMMAND_INVALIDATE; + pci_write_config_word(pdev, PCI_COMMAND, + ap->pci_command); + printk(KERN_INFO " Disabling PCI memory " + "write and invalidate\n"); + } + } else if (ap->pci_command & PCI_COMMAND_INVALIDATE) { + printk(KERN_INFO " PCI memory write & invalidate " + "enabled by BIOS, enabling counter measures\n"); + + switch(SMP_CACHE_BYTES) { + case 16: + tmp |= DMA_WRITE_MAX_16; + break; + case 32: + tmp |= DMA_WRITE_MAX_32; + break; + case 64: + tmp |= DMA_WRITE_MAX_64; + break; + case 128: + tmp |= DMA_WRITE_MAX_128; + break; + default: + printk(KERN_INFO " Cache line size %i not " + "supported, PCI write and invalidate " + "disabled\n", SMP_CACHE_BYTES); + ap->pci_command &= ~PCI_COMMAND_INVALIDATE; + pci_write_config_word(pdev, PCI_COMMAND, + ap->pci_command); + } + } + } + +#ifdef __sparc__ + /* + * On this platform, we know what the best dma settings + * are. We use 64-byte maximum bursts, because if we + * burst larger than the cache line size (or even cross + * a 64byte boundary in a single burst) the UltraSparc + * PCI controller will disconnect at 64-byte multiples. + * + * Read-multiple will be properly enabled above, and when + * set will give the PCI controller proper hints about + * prefetching. + */ + tmp &= ~DMA_READ_WRITE_MASK; + tmp |= DMA_READ_MAX_64; + tmp |= DMA_WRITE_MAX_64; +#endif +#ifdef __alpha__ + tmp &= ~DMA_READ_WRITE_MASK; + tmp |= DMA_READ_MAX_128; + /* + * All the docs say MUST NOT. Well, I did. + * Nothing terrible happens, if we load wrong size. + * Bit w&i still works better! + */ + tmp |= DMA_WRITE_MAX_128; +#endif + writel(tmp, ®s->PciState); + +#if 0 + /* + * The Host PCI bus controller driver has to set FBB. + * If all devices on that PCI bus support FBB, then the controller + * can enable FBB support in the Host PCI Bus controller (or on + * the PCI-PCI bridge if that applies). + * -ggg + */ + /* + * I have received reports from people having problems when this + * bit is enabled. + */ + if (!(ap->pci_command & PCI_COMMAND_FAST_BACK)) { + printk(KERN_INFO " Enabling PCI Fast Back to Back\n"); + ap->pci_command |= PCI_COMMAND_FAST_BACK; + pci_write_config_word(pdev, PCI_COMMAND, ap->pci_command); + } +#endif + + /* + * Configure DMA attributes. + */ + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + ap->pci_using_dac = 1; + } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + ap->pci_using_dac = 0; + } else { + ecode = -ENODEV; + goto init_error; + } + + /* + * Initialize the generic info block and the command+event rings + * and the control blocks for the transmit and receive rings + * as they need to be setup once and for all. + */ + if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info), + &ap->info_dma))) { + ecode = -EAGAIN; + goto init_error; + } + ap->info = info; + + /* + * Get the memory for the skb rings. + */ + if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) { + ecode = -EAGAIN; + goto init_error; + } + + ecode = request_irq(pdev->irq, ace_interrupt, IRQF_SHARED, + DRV_NAME, dev); + if (ecode) { + printk(KERN_WARNING "%s: Requested IRQ %d is busy\n", + DRV_NAME, pdev->irq); + goto init_error; + } else + dev->irq = pdev->irq; + +#ifdef INDEX_DEBUG + spin_lock_init(&ap->debug_lock); + ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1; + ap->last_std_rx = 0; + ap->last_mini_rx = 0; +#endif + + memset(ap->info, 0, sizeof(struct ace_info)); + memset(ap->skb, 0, sizeof(struct ace_skb)); + + ecode = ace_load_firmware(dev); + if (ecode) + goto init_error; + + ap->fw_running = 0; + + tmp_ptr = ap->info_dma; + writel(tmp_ptr >> 32, ®s->InfoPtrHi); + writel(tmp_ptr & 0xffffffff, ®s->InfoPtrLo); + + memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event)); + + set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma); + info->evt_ctrl.flags = 0; + + *(ap->evt_prd) = 0; + wmb(); + set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma); + writel(0, ®s->EvtCsm); + + set_aceaddr(&info->cmd_ctrl.rngptr, 0x100); + info->cmd_ctrl.flags = 0; + info->cmd_ctrl.max_len = 0; + + for (i = 0; i < CMD_RING_ENTRIES; i++) + writel(0, ®s->CmdRng[i]); + + writel(0, ®s->CmdPrd); + writel(0, ®s->CmdCsm); + + tmp_ptr = ap->info_dma; + tmp_ptr += (unsigned long) &(((struct ace_info *)0)->s.stats); + set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr); + + set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma); + info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE; + info->rx_std_ctrl.flags = + RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST; + + memset(ap->rx_std_ring, 0, + RX_STD_RING_ENTRIES * sizeof(struct rx_desc)); + + for (i = 0; i < RX_STD_RING_ENTRIES; i++) + ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM; + + ap->rx_std_skbprd = 0; + atomic_set(&ap->cur_rx_bufs, 0); + + set_aceaddr(&info->rx_jumbo_ctrl.rngptr, + (ap->rx_ring_base_dma + + (sizeof(struct rx_desc) * RX_STD_RING_ENTRIES))); + info->rx_jumbo_ctrl.max_len = 0; + info->rx_jumbo_ctrl.flags = + RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST; + + memset(ap->rx_jumbo_ring, 0, + RX_JUMBO_RING_ENTRIES * sizeof(struct rx_desc)); + + for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) + ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO; + + ap->rx_jumbo_skbprd = 0; + atomic_set(&ap->cur_jumbo_bufs, 0); + + memset(ap->rx_mini_ring, 0, + RX_MINI_RING_ENTRIES * sizeof(struct rx_desc)); + + if (ap->version >= 2) { + set_aceaddr(&info->rx_mini_ctrl.rngptr, + (ap->rx_ring_base_dma + + (sizeof(struct rx_desc) * + (RX_STD_RING_ENTRIES + + RX_JUMBO_RING_ENTRIES)))); + info->rx_mini_ctrl.max_len = ACE_MINI_SIZE; + info->rx_mini_ctrl.flags = + RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|RCB_FLG_VLAN_ASSIST; + + for (i = 0; i < RX_MINI_RING_ENTRIES; i++) + ap->rx_mini_ring[i].flags = + BD_FLG_TCP_UDP_SUM | BD_FLG_MINI; + } else { + set_aceaddr(&info->rx_mini_ctrl.rngptr, 0); + info->rx_mini_ctrl.flags = RCB_FLG_RNG_DISABLE; + info->rx_mini_ctrl.max_len = 0; + } + + ap->rx_mini_skbprd = 0; + atomic_set(&ap->cur_mini_bufs, 0); + + set_aceaddr(&info->rx_return_ctrl.rngptr, + (ap->rx_ring_base_dma + + (sizeof(struct rx_desc) * + (RX_STD_RING_ENTRIES + + RX_JUMBO_RING_ENTRIES + + RX_MINI_RING_ENTRIES)))); + info->rx_return_ctrl.flags = 0; + info->rx_return_ctrl.max_len = RX_RETURN_RING_ENTRIES; + + memset(ap->rx_return_ring, 0, + RX_RETURN_RING_ENTRIES * sizeof(struct rx_desc)); + + set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma); + *(ap->rx_ret_prd) = 0; + + writel(TX_RING_BASE, ®s->WinBase); + + if (ACE_IS_TIGON_I(ap)) { + ap->tx_ring = (__force struct tx_desc *) regs->Window; + for (i = 0; i < (TIGON_I_TX_RING_ENTRIES + * sizeof(struct tx_desc)) / sizeof(u32); i++) + writel(0, (__force void __iomem *)ap->tx_ring + i * 4); + + set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE); + } else { + memset(ap->tx_ring, 0, + MAX_TX_RING_ENTRIES * sizeof(struct tx_desc)); + + set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma); + } + + info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap); + tmp = RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST; + + /* + * The Tigon I does not like having the TX ring in host memory ;-( + */ + if (!ACE_IS_TIGON_I(ap)) + tmp |= RCB_FLG_TX_HOST_RING; +#if TX_COAL_INTS_ONLY + tmp |= RCB_FLG_COAL_INT_ONLY; +#endif + info->tx_ctrl.flags = tmp; + + set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma); + + /* + * Potential item for tuning parameter + */ +#if 0 /* NO */ + writel(DMA_THRESH_16W, ®s->DmaReadCfg); + writel(DMA_THRESH_16W, ®s->DmaWriteCfg); +#else + writel(DMA_THRESH_8W, ®s->DmaReadCfg); + writel(DMA_THRESH_8W, ®s->DmaWriteCfg); +#endif + + writel(0, ®s->MaskInt); + writel(1, ®s->IfIdx); +#if 0 + /* + * McKinley boxes do not like us fiddling with AssistState + * this early + */ + writel(1, ®s->AssistState); +#endif + + writel(DEF_STAT, ®s->TuneStatTicks); + writel(DEF_TRACE, ®s->TuneTrace); + + ace_set_rxtx_parms(dev, 0); + + if (board_idx == BOARD_IDX_OVERFLOW) { + printk(KERN_WARNING "%s: more than %i NICs detected, " + "ignoring module parameters!\n", + ap->name, ACE_MAX_MOD_PARMS); + } else if (board_idx >= 0) { + if (tx_coal_tick[board_idx]) + writel(tx_coal_tick[board_idx], + ®s->TuneTxCoalTicks); + if (max_tx_desc[board_idx]) + writel(max_tx_desc[board_idx], ®s->TuneMaxTxDesc); + + if (rx_coal_tick[board_idx]) + writel(rx_coal_tick[board_idx], + ®s->TuneRxCoalTicks); + if (max_rx_desc[board_idx]) + writel(max_rx_desc[board_idx], ®s->TuneMaxRxDesc); + + if (trace[board_idx]) + writel(trace[board_idx], ®s->TuneTrace); + + if ((tx_ratio[board_idx] > 0) && (tx_ratio[board_idx] < 64)) + writel(tx_ratio[board_idx], ®s->TxBufRat); + } + + /* + * Default link parameters + */ + tmp = LNK_ENABLE | LNK_FULL_DUPLEX | LNK_1000MB | LNK_100MB | + LNK_10MB | LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL | LNK_NEGOTIATE; + if(ap->version >= 2) + tmp |= LNK_TX_FLOW_CTL_Y; + + /* + * Override link default parameters + */ + if ((board_idx >= 0) && link_state[board_idx]) { + int option = link_state[board_idx]; + + tmp = LNK_ENABLE; + + if (option & 0x01) { + printk(KERN_INFO "%s: Setting half duplex link\n", + ap->name); + tmp &= ~LNK_FULL_DUPLEX; + } + if (option & 0x02) + tmp &= ~LNK_NEGOTIATE; + if (option & 0x10) + tmp |= LNK_10MB; + if (option & 0x20) + tmp |= LNK_100MB; + if (option & 0x40) + tmp |= LNK_1000MB; + if ((option & 0x70) == 0) { + printk(KERN_WARNING "%s: No media speed specified, " + "forcing auto negotiation\n", ap->name); + tmp |= LNK_NEGOTIATE | LNK_1000MB | + LNK_100MB | LNK_10MB; + } + if ((option & 0x100) == 0) + tmp |= LNK_NEG_FCTL; + else + printk(KERN_INFO "%s: Disabling flow control " + "negotiation\n", ap->name); + if (option & 0x200) + tmp |= LNK_RX_FLOW_CTL_Y; + if ((option & 0x400) && (ap->version >= 2)) { + printk(KERN_INFO "%s: Enabling TX flow control\n", + ap->name); + tmp |= LNK_TX_FLOW_CTL_Y; + } + } + + ap->link = tmp; + writel(tmp, ®s->TuneLink); + if (ap->version >= 2) + writel(tmp, ®s->TuneFastLink); + + writel(ap->firmware_start, ®s->Pc); + + writel(0, ®s->Mb0Lo); + + /* + * Set tx_csm before we start receiving interrupts, otherwise + * the interrupt handler might think it is supposed to process + * tx ints before we are up and running, which may cause a null + * pointer access in the int handler. + */ + ap->cur_rx = 0; + ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0; + + wmb(); + ace_set_txprd(regs, ap, 0); + writel(0, ®s->RxRetCsm); + + /* + * Enable DMA engine now. + * If we do this sooner, Mckinley box pukes. + * I assume it's because Tigon II DMA engine wants to check + * *something* even before the CPU is started. + */ + writel(1, ®s->AssistState); /* enable DMA */ + + /* + * Start the NIC CPU + */ + writel(readl(®s->CpuCtrl) & ~(CPU_HALT|CPU_TRACE), ®s->CpuCtrl); + readl(®s->CpuCtrl); + + /* + * Wait for the firmware to spin up - max 3 seconds. + */ + myjif = jiffies + 3 * HZ; + while (time_before(jiffies, myjif) && !ap->fw_running) + cpu_relax(); + + if (!ap->fw_running) { + printk(KERN_ERR "%s: Firmware NOT running!\n", ap->name); + + ace_dump_trace(ap); + writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl); + readl(®s->CpuCtrl); + + /* aman@sgi.com - account for badly behaving firmware/NIC: + * - have observed that the NIC may continue to generate + * interrupts for some reason; attempt to stop it - halt + * second CPU for Tigon II cards, and also clear Mb0 + * - if we're a module, we'll fail to load if this was + * the only GbE card in the system => if the kernel does + * see an interrupt from the NIC, code to handle it is + * gone and OOps! - so free_irq also + */ + if (ap->version >= 2) + writel(readl(®s->CpuBCtrl) | CPU_HALT, + ®s->CpuBCtrl); + writel(0, ®s->Mb0Lo); + readl(®s->Mb0Lo); + + ecode = -EBUSY; + goto init_error; + } + + /* + * We load the ring here as there seem to be no way to tell the + * firmware to wipe the ring without re-initializing it. + */ + if (!test_and_set_bit(0, &ap->std_refill_busy)) + ace_load_std_rx_ring(dev, RX_RING_SIZE); + else + printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n", + ap->name); + if (ap->version >= 2) { + if (!test_and_set_bit(0, &ap->mini_refill_busy)) + ace_load_mini_rx_ring(dev, RX_MINI_SIZE); + else + printk(KERN_ERR "%s: Someone is busy refilling " + "the RX mini ring\n", ap->name); + } + return 0; + + init_error: + ace_init_cleanup(dev); + return ecode; +} + + +static void ace_set_rxtx_parms(struct net_device *dev, int jumbo) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + int board_idx = ap->board_idx; + + if (board_idx >= 0) { + if (!jumbo) { + if (!tx_coal_tick[board_idx]) + writel(DEF_TX_COAL, ®s->TuneTxCoalTicks); + if (!max_tx_desc[board_idx]) + writel(DEF_TX_MAX_DESC, ®s->TuneMaxTxDesc); + if (!rx_coal_tick[board_idx]) + writel(DEF_RX_COAL, ®s->TuneRxCoalTicks); + if (!max_rx_desc[board_idx]) + writel(DEF_RX_MAX_DESC, ®s->TuneMaxRxDesc); + if (!tx_ratio[board_idx]) + writel(DEF_TX_RATIO, ®s->TxBufRat); + } else { + if (!tx_coal_tick[board_idx]) + writel(DEF_JUMBO_TX_COAL, + ®s->TuneTxCoalTicks); + if (!max_tx_desc[board_idx]) + writel(DEF_JUMBO_TX_MAX_DESC, + ®s->TuneMaxTxDesc); + if (!rx_coal_tick[board_idx]) + writel(DEF_JUMBO_RX_COAL, + ®s->TuneRxCoalTicks); + if (!max_rx_desc[board_idx]) + writel(DEF_JUMBO_RX_MAX_DESC, + ®s->TuneMaxRxDesc); + if (!tx_ratio[board_idx]) + writel(DEF_JUMBO_TX_RATIO, ®s->TxBufRat); + } + } +} + + +static void ace_watchdog(struct net_device *data) +{ + struct net_device *dev = data; + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + + /* + * We haven't received a stats update event for more than 2.5 + * seconds and there is data in the transmit queue, thus we + * assume the card is stuck. + */ + if (*ap->tx_csm != ap->tx_ret_csm) { + printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n", + dev->name, (unsigned int)readl(®s->HostCtrl)); + /* This can happen due to ieee flow control. */ + } else { + printk(KERN_DEBUG "%s: BUG... transmitter died. Kicking it.\n", + dev->name); +#if 0 + netif_wake_queue(dev); +#endif + } +} + + +static void ace_tasklet(unsigned long arg) +{ + struct net_device *dev = (struct net_device *) arg; + struct ace_private *ap = netdev_priv(dev); + int cur_size; + + cur_size = atomic_read(&ap->cur_rx_bufs); + if ((cur_size < RX_LOW_STD_THRES) && + !test_and_set_bit(0, &ap->std_refill_busy)) { +#ifdef DEBUG + printk("refilling buffers (current %i)\n", cur_size); +#endif + ace_load_std_rx_ring(dev, RX_RING_SIZE - cur_size); + } + + if (ap->version >= 2) { + cur_size = atomic_read(&ap->cur_mini_bufs); + if ((cur_size < RX_LOW_MINI_THRES) && + !test_and_set_bit(0, &ap->mini_refill_busy)) { +#ifdef DEBUG + printk("refilling mini buffers (current %i)\n", + cur_size); +#endif + ace_load_mini_rx_ring(dev, RX_MINI_SIZE - cur_size); + } + } + + cur_size = atomic_read(&ap->cur_jumbo_bufs); + if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) && + !test_and_set_bit(0, &ap->jumbo_refill_busy)) { +#ifdef DEBUG + printk("refilling jumbo buffers (current %i)\n", cur_size); +#endif + ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size); + } + ap->tasklet_pending = 0; +} + + +/* + * Copy the contents of the NIC's trace buffer to kernel memory. + */ +static void ace_dump_trace(struct ace_private *ap) +{ +#if 0 + if (!ap->trace_buf) + if (!(ap->trace_buf = kmalloc(ACE_TRACE_SIZE, GFP_KERNEL))) + return; +#endif +} + + +/* + * Load the standard rx ring. + * + * Loading rings is safe without holding the spin lock since this is + * done only before the device is enabled, thus no interrupts are + * generated and by the interrupt handler/tasklet handler. + */ +static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + short i, idx; + + + prefetchw(&ap->cur_rx_bufs); + + idx = ap->rx_std_skbprd; + + for (i = 0; i < nr_bufs; i++) { + struct sk_buff *skb; + struct rx_desc *rd; + dma_addr_t mapping; + + skb = netdev_alloc_skb_ip_align(dev, ACE_STD_BUFSIZE); + if (!skb) + break; + + mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), + offset_in_page(skb->data), + ACE_STD_BUFSIZE, + PCI_DMA_FROMDEVICE); + ap->skb->rx_std_skbuff[idx].skb = skb; + dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx], + mapping, mapping); + + rd = &ap->rx_std_ring[idx]; + set_aceaddr(&rd->addr, mapping); + rd->size = ACE_STD_BUFSIZE; + rd->idx = idx; + idx = (idx + 1) % RX_STD_RING_ENTRIES; + } + + if (!i) + goto error_out; + + atomic_add(i, &ap->cur_rx_bufs); + ap->rx_std_skbprd = idx; + + if (ACE_IS_TIGON_I(ap)) { + struct cmd cmd; + cmd.evt = C_SET_RX_PRD_IDX; + cmd.code = 0; + cmd.idx = ap->rx_std_skbprd; + ace_issue_cmd(regs, &cmd); + } else { + writel(idx, ®s->RxStdPrd); + wmb(); + } + + out: + clear_bit(0, &ap->std_refill_busy); + return; + + error_out: + printk(KERN_INFO "Out of memory when allocating " + "standard receive buffers\n"); + goto out; +} + + +static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + short i, idx; + + prefetchw(&ap->cur_mini_bufs); + + idx = ap->rx_mini_skbprd; + for (i = 0; i < nr_bufs; i++) { + struct sk_buff *skb; + struct rx_desc *rd; + dma_addr_t mapping; + + skb = netdev_alloc_skb_ip_align(dev, ACE_MINI_BUFSIZE); + if (!skb) + break; + + mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), + offset_in_page(skb->data), + ACE_MINI_BUFSIZE, + PCI_DMA_FROMDEVICE); + ap->skb->rx_mini_skbuff[idx].skb = skb; + dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx], + mapping, mapping); + + rd = &ap->rx_mini_ring[idx]; + set_aceaddr(&rd->addr, mapping); + rd->size = ACE_MINI_BUFSIZE; + rd->idx = idx; + idx = (idx + 1) % RX_MINI_RING_ENTRIES; + } + + if (!i) + goto error_out; + + atomic_add(i, &ap->cur_mini_bufs); + + ap->rx_mini_skbprd = idx; + + writel(idx, ®s->RxMiniPrd); + wmb(); + + out: + clear_bit(0, &ap->mini_refill_busy); + return; + error_out: + printk(KERN_INFO "Out of memory when allocating " + "mini receive buffers\n"); + goto out; +} + + +/* + * Load the jumbo rx ring, this may happen at any time if the MTU + * is changed to a value > 1500. + */ +static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + short i, idx; + + idx = ap->rx_jumbo_skbprd; + + for (i = 0; i < nr_bufs; i++) { + struct sk_buff *skb; + struct rx_desc *rd; + dma_addr_t mapping; + + skb = netdev_alloc_skb_ip_align(dev, ACE_JUMBO_BUFSIZE); + if (!skb) + break; + + mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), + offset_in_page(skb->data), + ACE_JUMBO_BUFSIZE, + PCI_DMA_FROMDEVICE); + ap->skb->rx_jumbo_skbuff[idx].skb = skb; + dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx], + mapping, mapping); + + rd = &ap->rx_jumbo_ring[idx]; + set_aceaddr(&rd->addr, mapping); + rd->size = ACE_JUMBO_BUFSIZE; + rd->idx = idx; + idx = (idx + 1) % RX_JUMBO_RING_ENTRIES; + } + + if (!i) + goto error_out; + + atomic_add(i, &ap->cur_jumbo_bufs); + ap->rx_jumbo_skbprd = idx; + + if (ACE_IS_TIGON_I(ap)) { + struct cmd cmd; + cmd.evt = C_SET_RX_JUMBO_PRD_IDX; + cmd.code = 0; + cmd.idx = ap->rx_jumbo_skbprd; + ace_issue_cmd(regs, &cmd); + } else { + writel(idx, ®s->RxJumboPrd); + wmb(); + } + + out: + clear_bit(0, &ap->jumbo_refill_busy); + return; + error_out: + if (net_ratelimit()) + printk(KERN_INFO "Out of memory when allocating " + "jumbo receive buffers\n"); + goto out; +} + + +/* + * All events are considered to be slow (RX/TX ints do not generate + * events) and are handled here, outside the main interrupt handler, + * to reduce the size of the handler. + */ +static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd) +{ + struct ace_private *ap; + + ap = netdev_priv(dev); + + while (evtcsm != evtprd) { + switch (ap->evt_ring[evtcsm].evt) { + case E_FW_RUNNING: + printk(KERN_INFO "%s: Firmware up and running\n", + ap->name); + ap->fw_running = 1; + wmb(); + break; + case E_STATS_UPDATED: + break; + case E_LNK_STATE: + { + u16 code = ap->evt_ring[evtcsm].code; + switch (code) { + case E_C_LINK_UP: + { + u32 state = readl(&ap->regs->GigLnkState); + printk(KERN_WARNING "%s: Optical link UP " + "(%s Duplex, Flow Control: %s%s)\n", + ap->name, + state & LNK_FULL_DUPLEX ? "Full":"Half", + state & LNK_TX_FLOW_CTL_Y ? "TX " : "", + state & LNK_RX_FLOW_CTL_Y ? "RX" : ""); + break; + } + case E_C_LINK_DOWN: + printk(KERN_WARNING "%s: Optical link DOWN\n", + ap->name); + break; + case E_C_LINK_10_100: + printk(KERN_WARNING "%s: 10/100BaseT link " + "UP\n", ap->name); + break; + default: + printk(KERN_ERR "%s: Unknown optical link " + "state %02x\n", ap->name, code); + } + break; + } + case E_ERROR: + switch(ap->evt_ring[evtcsm].code) { + case E_C_ERR_INVAL_CMD: + printk(KERN_ERR "%s: invalid command error\n", + ap->name); + break; + case E_C_ERR_UNIMP_CMD: + printk(KERN_ERR "%s: unimplemented command " + "error\n", ap->name); + break; + case E_C_ERR_BAD_CFG: + printk(KERN_ERR "%s: bad config error\n", + ap->name); + break; + default: + printk(KERN_ERR "%s: unknown error %02x\n", + ap->name, ap->evt_ring[evtcsm].code); + } + break; + case E_RESET_JUMBO_RNG: + { + int i; + for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) { + if (ap->skb->rx_jumbo_skbuff[i].skb) { + ap->rx_jumbo_ring[i].size = 0; + set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0); + dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb); + ap->skb->rx_jumbo_skbuff[i].skb = NULL; + } + } + + if (ACE_IS_TIGON_I(ap)) { + struct cmd cmd; + cmd.evt = C_SET_RX_JUMBO_PRD_IDX; + cmd.code = 0; + cmd.idx = 0; + ace_issue_cmd(ap->regs, &cmd); + } else { + writel(0, &((ap->regs)->RxJumboPrd)); + wmb(); + } + + ap->jumbo = 0; + ap->rx_jumbo_skbprd = 0; + printk(KERN_INFO "%s: Jumbo ring flushed\n", + ap->name); + clear_bit(0, &ap->jumbo_refill_busy); + break; + } + default: + printk(KERN_ERR "%s: Unhandled event 0x%02x\n", + ap->name, ap->evt_ring[evtcsm].evt); + } + evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES; + } + + return evtcsm; +} + + +static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm) +{ + struct ace_private *ap = netdev_priv(dev); + u32 idx; + int mini_count = 0, std_count = 0; + + idx = rxretcsm; + + prefetchw(&ap->cur_rx_bufs); + prefetchw(&ap->cur_mini_bufs); + + while (idx != rxretprd) { + struct ring_info *rip; + struct sk_buff *skb; + struct rx_desc *rxdesc, *retdesc; + u32 skbidx; + int bd_flags, desc_type, mapsize; + u16 csum; + + + /* make sure the rx descriptor isn't read before rxretprd */ + if (idx == rxretcsm) + rmb(); + + retdesc = &ap->rx_return_ring[idx]; + skbidx = retdesc->idx; + bd_flags = retdesc->flags; + desc_type = bd_flags & (BD_FLG_JUMBO | BD_FLG_MINI); + + switch(desc_type) { + /* + * Normal frames do not have any flags set + * + * Mini and normal frames arrive frequently, + * so use a local counter to avoid doing + * atomic operations for each packet arriving. + */ + case 0: + rip = &ap->skb->rx_std_skbuff[skbidx]; + mapsize = ACE_STD_BUFSIZE; + rxdesc = &ap->rx_std_ring[skbidx]; + std_count++; + break; + case BD_FLG_JUMBO: + rip = &ap->skb->rx_jumbo_skbuff[skbidx]; + mapsize = ACE_JUMBO_BUFSIZE; + rxdesc = &ap->rx_jumbo_ring[skbidx]; + atomic_dec(&ap->cur_jumbo_bufs); + break; + case BD_FLG_MINI: + rip = &ap->skb->rx_mini_skbuff[skbidx]; + mapsize = ACE_MINI_BUFSIZE; + rxdesc = &ap->rx_mini_ring[skbidx]; + mini_count++; + break; + default: + printk(KERN_INFO "%s: unknown frame type (0x%02x) " + "returned by NIC\n", dev->name, + retdesc->flags); + goto error; + } + + skb = rip->skb; + rip->skb = NULL; + pci_unmap_page(ap->pdev, + dma_unmap_addr(rip, mapping), + mapsize, + PCI_DMA_FROMDEVICE); + skb_put(skb, retdesc->size); + + /* + * Fly baby, fly! + */ + csum = retdesc->tcp_udp_csum; + + skb->protocol = eth_type_trans(skb, dev); + + /* + * Instead of forcing the poor tigon mips cpu to calculate + * pseudo hdr checksum, we do this ourselves. + */ + if (bd_flags & BD_FLG_TCP_UDP_SUM) { + skb->csum = htons(csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } else { + skb_checksum_none_assert(skb); + } + + /* send it up */ + if ((bd_flags & BD_FLG_VLAN_TAG)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), retdesc->vlan); + netif_rx(skb); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += retdesc->size; + + idx = (idx + 1) % RX_RETURN_RING_ENTRIES; + } + + atomic_sub(std_count, &ap->cur_rx_bufs); + if (!ACE_IS_TIGON_I(ap)) + atomic_sub(mini_count, &ap->cur_mini_bufs); + + out: + /* + * According to the documentation RxRetCsm is obsolete with + * the 12.3.x Firmware - my Tigon I NICs seem to disagree! + */ + if (ACE_IS_TIGON_I(ap)) { + writel(idx, &ap->regs->RxRetCsm); + } + ap->cur_rx = idx; + + return; + error: + idx = rxretprd; + goto out; +} + + +static inline void ace_tx_int(struct net_device *dev, + u32 txcsm, u32 idx) +{ + struct ace_private *ap = netdev_priv(dev); + + do { + struct sk_buff *skb; + struct tx_ring_info *info; + + info = ap->skb->tx_skbuff + idx; + skb = info->skb; + + if (dma_unmap_len(info, maplen)) { + pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping), + dma_unmap_len(info, maplen), + PCI_DMA_TODEVICE); + dma_unmap_len_set(info, maplen, 0); + } + + if (skb) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + dev_kfree_skb_irq(skb); + info->skb = NULL; + } + + idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); + } while (idx != txcsm); + + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + + wmb(); + ap->tx_ret_csm = txcsm; + + /* So... tx_ret_csm is advanced _after_ check for device wakeup. + * + * We could try to make it before. In this case we would get + * the following race condition: hard_start_xmit on other cpu + * enters after we advanced tx_ret_csm and fills space, + * which we have just freed, so that we make illegal device wakeup. + * There is no good way to workaround this (at entry + * to ace_start_xmit detects this condition and prevents + * ring corruption, but it is not a good workaround.) + * + * When tx_ret_csm is advanced after, we wake up device _only_ + * if we really have some space in ring (though the core doing + * hard_start_xmit can see full ring for some period and has to + * synchronize.) Superb. + * BUT! We get another subtle race condition. hard_start_xmit + * may think that ring is full between wakeup and advancing + * tx_ret_csm and will stop device instantly! It is not so bad. + * We are guaranteed that there is something in ring, so that + * the next irq will resume transmission. To speedup this we could + * mark descriptor, which closes ring with BD_FLG_COAL_NOW + * (see ace_start_xmit). + * + * Well, this dilemma exists in all lock-free devices. + * We, following scheme used in drivers by Donald Becker, + * select the least dangerous. + * --ANK + */ +} + + +static irqreturn_t ace_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + u32 idx; + u32 txcsm, rxretcsm, rxretprd; + u32 evtcsm, evtprd; + + /* + * In case of PCI shared interrupts or spurious interrupts, + * we want to make sure it is actually our interrupt before + * spending any time in here. + */ + if (!(readl(®s->HostCtrl) & IN_INT)) + return IRQ_NONE; + + /* + * ACK intr now. Otherwise we will lose updates to rx_ret_prd, + * which happened _after_ rxretprd = *ap->rx_ret_prd; but before + * writel(0, ®s->Mb0Lo). + * + * "IRQ avoidance" recommended in docs applies to IRQs served + * threads and it is wrong even for that case. + */ + writel(0, ®s->Mb0Lo); + readl(®s->Mb0Lo); + + /* + * There is no conflict between transmit handling in + * start_xmit and receive processing, thus there is no reason + * to take a spin lock for RX handling. Wait until we start + * working on the other stuff - hey we don't need a spin lock + * anymore. + */ + rxretprd = *ap->rx_ret_prd; + rxretcsm = ap->cur_rx; + + if (rxretprd != rxretcsm) + ace_rx_int(dev, rxretprd, rxretcsm); + + txcsm = *ap->tx_csm; + idx = ap->tx_ret_csm; + + if (txcsm != idx) { + /* + * If each skb takes only one descriptor this check degenerates + * to identity, because new space has just been opened. + * But if skbs are fragmented we must check that this index + * update releases enough of space, otherwise we just + * wait for device to make more work. + */ + if (!tx_ring_full(ap, txcsm, ap->tx_prd)) + ace_tx_int(dev, txcsm, idx); + } + + evtcsm = readl(®s->EvtCsm); + evtprd = *ap->evt_prd; + + if (evtcsm != evtprd) { + evtcsm = ace_handle_event(dev, evtcsm, evtprd); + writel(evtcsm, ®s->EvtCsm); + } + + /* + * This has to go last in the interrupt handler and run with + * the spin lock released ... what lock? + */ + if (netif_running(dev)) { + int cur_size; + int run_tasklet = 0; + + cur_size = atomic_read(&ap->cur_rx_bufs); + if (cur_size < RX_LOW_STD_THRES) { + if ((cur_size < RX_PANIC_STD_THRES) && + !test_and_set_bit(0, &ap->std_refill_busy)) { +#ifdef DEBUG + printk("low on std buffers %i\n", cur_size); +#endif + ace_load_std_rx_ring(dev, + RX_RING_SIZE - cur_size); + } else + run_tasklet = 1; + } + + if (!ACE_IS_TIGON_I(ap)) { + cur_size = atomic_read(&ap->cur_mini_bufs); + if (cur_size < RX_LOW_MINI_THRES) { + if ((cur_size < RX_PANIC_MINI_THRES) && + !test_and_set_bit(0, + &ap->mini_refill_busy)) { +#ifdef DEBUG + printk("low on mini buffers %i\n", + cur_size); +#endif + ace_load_mini_rx_ring(dev, + RX_MINI_SIZE - cur_size); + } else + run_tasklet = 1; + } + } + + if (ap->jumbo) { + cur_size = atomic_read(&ap->cur_jumbo_bufs); + if (cur_size < RX_LOW_JUMBO_THRES) { + if ((cur_size < RX_PANIC_JUMBO_THRES) && + !test_and_set_bit(0, + &ap->jumbo_refill_busy)){ +#ifdef DEBUG + printk("low on jumbo buffers %i\n", + cur_size); +#endif + ace_load_jumbo_rx_ring(dev, + RX_JUMBO_SIZE - cur_size); + } else + run_tasklet = 1; + } + } + if (run_tasklet && !ap->tasklet_pending) { + ap->tasklet_pending = 1; + tasklet_schedule(&ap->ace_tasklet); + } + } + + return IRQ_HANDLED; +} + +static int ace_open(struct net_device *dev) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + struct cmd cmd; + + if (!(ap->fw_running)) { + printk(KERN_WARNING "%s: Firmware not running!\n", dev->name); + return -EBUSY; + } + + writel(dev->mtu + ETH_HLEN + 4, ®s->IfMtu); + + cmd.evt = C_CLEAR_STATS; + cmd.code = 0; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + + cmd.evt = C_HOST_STATE; + cmd.code = C_C_STACK_UP; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + + if (ap->jumbo && + !test_and_set_bit(0, &ap->jumbo_refill_busy)) + ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE); + + if (dev->flags & IFF_PROMISC) { + cmd.evt = C_SET_PROMISC_MODE; + cmd.code = C_C_PROMISC_ENABLE; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + + ap->promisc = 1; + }else + ap->promisc = 0; + ap->mcast_all = 0; + +#if 0 + cmd.evt = C_LNK_NEGOTIATION; + cmd.code = 0; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); +#endif + + netif_start_queue(dev); + + /* + * Setup the bottom half rx ring refill handler + */ + tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev); + return 0; +} + + +static int ace_close(struct net_device *dev) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + struct cmd cmd; + unsigned long flags; + short i; + + /* + * Without (or before) releasing irq and stopping hardware, this + * is an absolute non-sense, by the way. It will be reset instantly + * by the first irq. + */ + netif_stop_queue(dev); + + + if (ap->promisc) { + cmd.evt = C_SET_PROMISC_MODE; + cmd.code = C_C_PROMISC_DISABLE; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + ap->promisc = 0; + } + + cmd.evt = C_HOST_STATE; + cmd.code = C_C_STACK_DOWN; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + + tasklet_kill(&ap->ace_tasklet); + + /* + * Make sure one CPU is not processing packets while + * buffers are being released by another. + */ + + local_irq_save(flags); + ace_mask_irq(dev); + + for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) { + struct sk_buff *skb; + struct tx_ring_info *info; + + info = ap->skb->tx_skbuff + i; + skb = info->skb; + + if (dma_unmap_len(info, maplen)) { + if (ACE_IS_TIGON_I(ap)) { + /* NB: TIGON_1 is special, tx_ring is in io space */ + struct tx_desc __iomem *tx; + tx = (__force struct tx_desc __iomem *) &ap->tx_ring[i]; + writel(0, &tx->addr.addrhi); + writel(0, &tx->addr.addrlo); + writel(0, &tx->flagsize); + } else + memset(ap->tx_ring + i, 0, + sizeof(struct tx_desc)); + pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping), + dma_unmap_len(info, maplen), + PCI_DMA_TODEVICE); + dma_unmap_len_set(info, maplen, 0); + } + if (skb) { + dev_kfree_skb(skb); + info->skb = NULL; + } + } + + if (ap->jumbo) { + cmd.evt = C_RESET_JUMBO_RNG; + cmd.code = 0; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + } + + ace_unmask_irq(dev); + local_irq_restore(flags); + + return 0; +} + + +static inline dma_addr_t +ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb, + struct sk_buff *tail, u32 idx) +{ + dma_addr_t mapping; + struct tx_ring_info *info; + + mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), + offset_in_page(skb->data), + skb->len, PCI_DMA_TODEVICE); + + info = ap->skb->tx_skbuff + idx; + info->skb = tail; + dma_unmap_addr_set(info, mapping, mapping); + dma_unmap_len_set(info, maplen, skb->len); + return mapping; +} + + +static inline void +ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr, + u32 flagsize, u32 vlan_tag) +{ +#if !USE_TX_COAL_NOW + flagsize &= ~BD_FLG_COAL_NOW; +#endif + + if (ACE_IS_TIGON_I(ap)) { + struct tx_desc __iomem *io = (__force struct tx_desc __iomem *) desc; + writel(addr >> 32, &io->addr.addrhi); + writel(addr & 0xffffffff, &io->addr.addrlo); + writel(flagsize, &io->flagsize); + writel(vlan_tag, &io->vlanres); + } else { + desc->addr.addrhi = addr >> 32; + desc->addr.addrlo = addr; + desc->flagsize = flagsize; + desc->vlanres = vlan_tag; + } +} + + +static netdev_tx_t ace_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + struct tx_desc *desc; + u32 idx, flagsize; + unsigned long maxjiff = jiffies + 3*HZ; + +restart: + idx = ap->tx_prd; + + if (tx_ring_full(ap, ap->tx_ret_csm, idx)) + goto overflow; + + if (!skb_shinfo(skb)->nr_frags) { + dma_addr_t mapping; + u32 vlan_tag = 0; + + mapping = ace_map_tx_skb(ap, skb, skb, idx); + flagsize = (skb->len << 16) | (BD_FLG_END); + if (skb->ip_summed == CHECKSUM_PARTIAL) + flagsize |= BD_FLG_TCP_UDP_SUM; + if (skb_vlan_tag_present(skb)) { + flagsize |= BD_FLG_VLAN_TAG; + vlan_tag = skb_vlan_tag_get(skb); + } + desc = ap->tx_ring + idx; + idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); + + /* Look at ace_tx_int for explanations. */ + if (tx_ring_full(ap, ap->tx_ret_csm, idx)) + flagsize |= BD_FLG_COAL_NOW; + + ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag); + } else { + dma_addr_t mapping; + u32 vlan_tag = 0; + int i, len = 0; + + mapping = ace_map_tx_skb(ap, skb, NULL, idx); + flagsize = (skb_headlen(skb) << 16); + if (skb->ip_summed == CHECKSUM_PARTIAL) + flagsize |= BD_FLG_TCP_UDP_SUM; + if (skb_vlan_tag_present(skb)) { + flagsize |= BD_FLG_VLAN_TAG; + vlan_tag = skb_vlan_tag_get(skb); + } + + ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag); + + idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + struct tx_ring_info *info; + + len += skb_frag_size(frag); + info = ap->skb->tx_skbuff + idx; + desc = ap->tx_ring + idx; + + mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + + flagsize = skb_frag_size(frag) << 16; + if (skb->ip_summed == CHECKSUM_PARTIAL) + flagsize |= BD_FLG_TCP_UDP_SUM; + idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); + + if (i == skb_shinfo(skb)->nr_frags - 1) { + flagsize |= BD_FLG_END; + if (tx_ring_full(ap, ap->tx_ret_csm, idx)) + flagsize |= BD_FLG_COAL_NOW; + + /* + * Only the last fragment frees + * the skb! + */ + info->skb = skb; + } else { + info->skb = NULL; + } + dma_unmap_addr_set(info, mapping, mapping); + dma_unmap_len_set(info, maplen, skb_frag_size(frag)); + ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag); + } + } + + wmb(); + ap->tx_prd = idx; + ace_set_txprd(regs, ap, idx); + + if (flagsize & BD_FLG_COAL_NOW) { + netif_stop_queue(dev); + + /* + * A TX-descriptor producer (an IRQ) might have gotten + * between, making the ring free again. Since xmit is + * serialized, this is the only situation we have to + * re-test. + */ + if (!tx_ring_full(ap, ap->tx_ret_csm, idx)) + netif_wake_queue(dev); + } + + return NETDEV_TX_OK; + +overflow: + /* + * This race condition is unavoidable with lock-free drivers. + * We wake up the queue _before_ tx_prd is advanced, so that we can + * enter hard_start_xmit too early, while tx ring still looks closed. + * This happens ~1-4 times per 100000 packets, so that we can allow + * to loop syncing to other CPU. Probably, we need an additional + * wmb() in ace_tx_intr as well. + * + * Note that this race is relieved by reserving one more entry + * in tx ring than it is necessary (see original non-SG driver). + * However, with SG we need to reserve 2*MAX_SKB_FRAGS+1, which + * is already overkill. + * + * Alternative is to return with 1 not throttling queue. In this + * case loop becomes longer, no more useful effects. + */ + if (time_before(jiffies, maxjiff)) { + barrier(); + cpu_relax(); + goto restart; + } + + /* The ring is stuck full. */ + printk(KERN_WARNING "%s: Transmit ring stuck full\n", dev->name); + return NETDEV_TX_BUSY; +} + + +static int ace_change_mtu(struct net_device *dev, int new_mtu) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + + if (new_mtu > ACE_JUMBO_MTU) + return -EINVAL; + + writel(new_mtu + ETH_HLEN + 4, ®s->IfMtu); + dev->mtu = new_mtu; + + if (new_mtu > ACE_STD_MTU) { + if (!(ap->jumbo)) { + printk(KERN_INFO "%s: Enabling Jumbo frame " + "support\n", dev->name); + ap->jumbo = 1; + if (!test_and_set_bit(0, &ap->jumbo_refill_busy)) + ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE); + ace_set_rxtx_parms(dev, 1); + } + } else { + while (test_and_set_bit(0, &ap->jumbo_refill_busy)); + ace_sync_irq(dev->irq); + ace_set_rxtx_parms(dev, 0); + if (ap->jumbo) { + struct cmd cmd; + + cmd.evt = C_RESET_JUMBO_RNG; + cmd.code = 0; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + } + } + + return 0; +} + +static int ace_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + u32 link; + + memset(ecmd, 0, sizeof(struct ethtool_cmd)); + ecmd->supported = + (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_FIBRE); + + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_INTERNAL; + + link = readl(®s->GigLnkState); + if (link & LNK_1000MB) + ethtool_cmd_speed_set(ecmd, SPEED_1000); + else { + link = readl(®s->FastLnkState); + if (link & LNK_100MB) + ethtool_cmd_speed_set(ecmd, SPEED_100); + else if (link & LNK_10MB) + ethtool_cmd_speed_set(ecmd, SPEED_10); + else + ethtool_cmd_speed_set(ecmd, 0); + } + if (link & LNK_FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + + if (link & LNK_NEGOTIATE) + ecmd->autoneg = AUTONEG_ENABLE; + else + ecmd->autoneg = AUTONEG_DISABLE; + +#if 0 + /* + * Current struct ethtool_cmd is insufficient + */ + ecmd->trace = readl(®s->TuneTrace); + + ecmd->txcoal = readl(®s->TuneTxCoalTicks); + ecmd->rxcoal = readl(®s->TuneRxCoalTicks); +#endif + ecmd->maxtxpkt = readl(®s->TuneMaxTxDesc); + ecmd->maxrxpkt = readl(®s->TuneMaxRxDesc); + + return 0; +} + +static int ace_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + u32 link, speed; + + link = readl(®s->GigLnkState); + if (link & LNK_1000MB) + speed = SPEED_1000; + else { + link = readl(®s->FastLnkState); + if (link & LNK_100MB) + speed = SPEED_100; + else if (link & LNK_10MB) + speed = SPEED_10; + else + speed = SPEED_100; + } + + link = LNK_ENABLE | LNK_1000MB | LNK_100MB | LNK_10MB | + LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL; + if (!ACE_IS_TIGON_I(ap)) + link |= LNK_TX_FLOW_CTL_Y; + if (ecmd->autoneg == AUTONEG_ENABLE) + link |= LNK_NEGOTIATE; + if (ethtool_cmd_speed(ecmd) != speed) { + link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB); + switch (ethtool_cmd_speed(ecmd)) { + case SPEED_1000: + link |= LNK_1000MB; + break; + case SPEED_100: + link |= LNK_100MB; + break; + case SPEED_10: + link |= LNK_10MB; + break; + } + } + + if (ecmd->duplex == DUPLEX_FULL) + link |= LNK_FULL_DUPLEX; + + if (link != ap->link) { + struct cmd cmd; + printk(KERN_INFO "%s: Renegotiating link state\n", + dev->name); + + ap->link = link; + writel(link, ®s->TuneLink); + if (!ACE_IS_TIGON_I(ap)) + writel(link, ®s->TuneFastLink); + wmb(); + + cmd.evt = C_LNK_NEGOTIATION; + cmd.code = 0; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + } + return 0; +} + +static void ace_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct ace_private *ap = netdev_priv(dev); + + strlcpy(info->driver, "acenic", sizeof(info->driver)); + snprintf(info->version, sizeof(info->version), "%i.%i.%i", + ap->firmware_major, ap->firmware_minor, + ap->firmware_fix); + + if (ap->pdev) + strlcpy(info->bus_info, pci_name(ap->pdev), + sizeof(info->bus_info)); + +} + +/* + * Set the hardware MAC address. + */ +static int ace_set_mac_addr(struct net_device *dev, void *p) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + struct sockaddr *addr=p; + u8 *da; + struct cmd cmd; + + if(netif_running(dev)) + return -EBUSY; + + memcpy(dev->dev_addr, addr->sa_data,dev->addr_len); + + da = (u8 *)dev->dev_addr; + + writel(da[0] << 8 | da[1], ®s->MacAddrHi); + writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5], + ®s->MacAddrLo); + + cmd.evt = C_SET_MAC_ADDR; + cmd.code = 0; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + + return 0; +} + + +static void ace_set_multicast_list(struct net_device *dev) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + struct cmd cmd; + + if ((dev->flags & IFF_ALLMULTI) && !(ap->mcast_all)) { + cmd.evt = C_SET_MULTICAST_MODE; + cmd.code = C_C_MCAST_ENABLE; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + ap->mcast_all = 1; + } else if (ap->mcast_all) { + cmd.evt = C_SET_MULTICAST_MODE; + cmd.code = C_C_MCAST_DISABLE; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + ap->mcast_all = 0; + } + + if ((dev->flags & IFF_PROMISC) && !(ap->promisc)) { + cmd.evt = C_SET_PROMISC_MODE; + cmd.code = C_C_PROMISC_ENABLE; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + ap->promisc = 1; + }else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)) { + cmd.evt = C_SET_PROMISC_MODE; + cmd.code = C_C_PROMISC_DISABLE; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + ap->promisc = 0; + } + + /* + * For the time being multicast relies on the upper layers + * filtering it properly. The Firmware does not allow one to + * set the entire multicast list at a time and keeping track of + * it here is going to be messy. + */ + if (!netdev_mc_empty(dev) && !ap->mcast_all) { + cmd.evt = C_SET_MULTICAST_MODE; + cmd.code = C_C_MCAST_ENABLE; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + }else if (!ap->mcast_all) { + cmd.evt = C_SET_MULTICAST_MODE; + cmd.code = C_C_MCAST_DISABLE; + cmd.idx = 0; + ace_issue_cmd(regs, &cmd); + } +} + + +static struct net_device_stats *ace_get_stats(struct net_device *dev) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_mac_stats __iomem *mac_stats = + (struct ace_mac_stats __iomem *)ap->regs->Stats; + + dev->stats.rx_missed_errors = readl(&mac_stats->drop_space); + dev->stats.multicast = readl(&mac_stats->kept_mc); + dev->stats.collisions = readl(&mac_stats->coll); + + return &dev->stats; +} + + +static void ace_copy(struct ace_regs __iomem *regs, const __be32 *src, + u32 dest, int size) +{ + void __iomem *tdest; + short tsize, i; + + if (size <= 0) + return; + + while (size > 0) { + tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1), + min_t(u32, size, ACE_WINDOW_SIZE)); + tdest = (void __iomem *) ®s->Window + + (dest & (ACE_WINDOW_SIZE - 1)); + writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase); + for (i = 0; i < (tsize / 4); i++) { + /* Firmware is big-endian */ + writel(be32_to_cpup(src), tdest); + src++; + tdest += 4; + dest += 4; + size -= 4; + } + } +} + + +static void ace_clear(struct ace_regs __iomem *regs, u32 dest, int size) +{ + void __iomem *tdest; + short tsize = 0, i; + + if (size <= 0) + return; + + while (size > 0) { + tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1), + min_t(u32, size, ACE_WINDOW_SIZE)); + tdest = (void __iomem *) ®s->Window + + (dest & (ACE_WINDOW_SIZE - 1)); + writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase); + + for (i = 0; i < (tsize / 4); i++) { + writel(0, tdest + i*4); + } + + dest += tsize; + size -= tsize; + } +} + + +/* + * Download the firmware into the SRAM on the NIC + * + * This operation requires the NIC to be halted and is performed with + * interrupts disabled and with the spinlock hold. + */ +static int ace_load_firmware(struct net_device *dev) +{ + const struct firmware *fw; + const char *fw_name = "/*(DEBLOBBED)*/"; + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + const __be32 *fw_data; + u32 load_addr; + int ret; + + if (!(readl(®s->CpuCtrl) & CPU_HALTED)) { + printk(KERN_ERR "%s: trying to download firmware while the " + "CPU is running!\n", ap->name); + return -EFAULT; + } + + if (ACE_IS_TIGON_I(ap)) + fw_name = "/*(DEBLOBBED)*/"; + + ret = reject_firmware(&fw, fw_name, &ap->pdev->dev); + if (ret) { + printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n", + ap->name, fw_name); + return ret; + } + + fw_data = (void *)fw->data; + + /* Firmware blob starts with version numbers, followed by + load and start address. Remainder is the blob to be loaded + contiguously from load address. We don't bother to represent + the BSS/SBSS sections any more, since we were clearing the + whole thing anyway. */ + ap->firmware_major = fw->data[0]; + ap->firmware_minor = fw->data[1]; + ap->firmware_fix = fw->data[2]; + + ap->firmware_start = be32_to_cpu(fw_data[1]); + if (ap->firmware_start < 0x4000 || ap->firmware_start >= 0x80000) { + printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n", + ap->name, ap->firmware_start, fw_name); + ret = -EINVAL; + goto out; + } + + load_addr = be32_to_cpu(fw_data[2]); + if (load_addr < 0x4000 || load_addr >= 0x80000) { + printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n", + ap->name, load_addr, fw_name); + ret = -EINVAL; + goto out; + } + + /* + * Do not try to clear more than 512KiB or we end up seeing + * funny things on NICs with only 512KiB SRAM + */ + ace_clear(regs, 0x2000, 0x80000-0x2000); + ace_copy(regs, &fw_data[3], load_addr, fw->size-12); + out: + release_firmware(fw); + return ret; +} + + +/* + * The eeprom on the AceNIC is an Atmel i2c EEPROM. + * + * Accessing the EEPROM is `interesting' to say the least - don't read + * this code right after dinner. + * + * This is all about black magic and bit-banging the device .... I + * wonder in what hospital they have put the guy who designed the i2c + * specs. + * + * Oh yes, this is only the beginning! + * + * Thanks to Stevarino Webinski for helping tracking down the bugs in the + * code i2c readout code by beta testing all my hacks. + */ +static void eeprom_start(struct ace_regs __iomem *regs) +{ + u32 local; + + readl(®s->LocalCtrl); + udelay(ACE_SHORT_DELAY); + local = readl(®s->LocalCtrl); + local |= EEPROM_DATA_OUT | EEPROM_WRITE_ENABLE; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + local |= EEPROM_CLK_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + local &= ~EEPROM_DATA_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + local &= ~EEPROM_CLK_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); +} + + +static void eeprom_prep(struct ace_regs __iomem *regs, u8 magic) +{ + short i; + u32 local; + + udelay(ACE_SHORT_DELAY); + local = readl(®s->LocalCtrl); + local &= ~EEPROM_DATA_OUT; + local |= EEPROM_WRITE_ENABLE; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + + for (i = 0; i < 8; i++, magic <<= 1) { + udelay(ACE_SHORT_DELAY); + if (magic & 0x80) + local |= EEPROM_DATA_OUT; + else + local &= ~EEPROM_DATA_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + + udelay(ACE_SHORT_DELAY); + local |= EEPROM_CLK_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + local &= ~(EEPROM_CLK_OUT | EEPROM_DATA_OUT); + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + } +} + + +static int eeprom_check_ack(struct ace_regs __iomem *regs) +{ + int state; + u32 local; + + local = readl(®s->LocalCtrl); + local &= ~EEPROM_WRITE_ENABLE; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_LONG_DELAY); + local |= EEPROM_CLK_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + /* sample data in middle of high clk */ + state = (readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0; + udelay(ACE_SHORT_DELAY); + mb(); + writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + + return state; +} + + +static void eeprom_stop(struct ace_regs __iomem *regs) +{ + u32 local; + + udelay(ACE_SHORT_DELAY); + local = readl(®s->LocalCtrl); + local |= EEPROM_WRITE_ENABLE; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + local &= ~EEPROM_DATA_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + local |= EEPROM_CLK_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + local |= EEPROM_DATA_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_LONG_DELAY); + local &= ~EEPROM_CLK_OUT; + writel(local, ®s->LocalCtrl); + mb(); +} + + +/* + * Read a whole byte from the EEPROM. + */ +static int read_eeprom_byte(struct net_device *dev, unsigned long offset) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + unsigned long flags; + u32 local; + int result = 0; + short i; + + /* + * Don't take interrupts on this CPU will bit banging + * the %#%#@$ I2C device + */ + local_irq_save(flags); + + eeprom_start(regs); + + eeprom_prep(regs, EEPROM_WRITE_SELECT); + if (eeprom_check_ack(regs)) { + local_irq_restore(flags); + printk(KERN_ERR "%s: Unable to sync eeprom\n", ap->name); + result = -EIO; + goto eeprom_read_error; + } + + eeprom_prep(regs, (offset >> 8) & 0xff); + if (eeprom_check_ack(regs)) { + local_irq_restore(flags); + printk(KERN_ERR "%s: Unable to set address byte 0\n", + ap->name); + result = -EIO; + goto eeprom_read_error; + } + + eeprom_prep(regs, offset & 0xff); + if (eeprom_check_ack(regs)) { + local_irq_restore(flags); + printk(KERN_ERR "%s: Unable to set address byte 1\n", + ap->name); + result = -EIO; + goto eeprom_read_error; + } + + eeprom_start(regs); + eeprom_prep(regs, EEPROM_READ_SELECT); + if (eeprom_check_ack(regs)) { + local_irq_restore(flags); + printk(KERN_ERR "%s: Unable to set READ_SELECT\n", + ap->name); + result = -EIO; + goto eeprom_read_error; + } + + for (i = 0; i < 8; i++) { + local = readl(®s->LocalCtrl); + local &= ~EEPROM_WRITE_ENABLE; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + udelay(ACE_LONG_DELAY); + mb(); + local |= EEPROM_CLK_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + /* sample data mid high clk */ + result = (result << 1) | + ((readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0); + udelay(ACE_SHORT_DELAY); + mb(); + local = readl(®s->LocalCtrl); + local &= ~EEPROM_CLK_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + udelay(ACE_SHORT_DELAY); + mb(); + if (i == 7) { + local |= EEPROM_WRITE_ENABLE; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + } + } + + local |= EEPROM_DATA_OUT; + writel(local, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + writel(readl(®s->LocalCtrl) | EEPROM_CLK_OUT, ®s->LocalCtrl); + readl(®s->LocalCtrl); + udelay(ACE_LONG_DELAY); + writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl); + readl(®s->LocalCtrl); + mb(); + udelay(ACE_SHORT_DELAY); + eeprom_stop(regs); + + local_irq_restore(flags); + out: + return result; + + eeprom_read_error: + printk(KERN_ERR "%s: Unable to read eeprom byte 0x%02lx\n", + ap->name, offset); + goto out; +} + +module_pci_driver(acenic_pci_driver); diff --git a/drivers/net/ethernet/alteon/acenic.h b/drivers/net/ethernet/alteon/acenic.h new file mode 100644 index 000000000..51c486cfb --- /dev/null +++ b/drivers/net/ethernet/alteon/acenic.h @@ -0,0 +1,790 @@ +#ifndef _ACENIC_H_ +#define _ACENIC_H_ +#include + + +/* + * Generate TX index update each time, when TX ring is closed. + * Normally, this is not useful, because results in more dma (and irqs + * without TX_COAL_INTS_ONLY). + */ +#define USE_TX_COAL_NOW 0 + +/* + * Addressing: + * + * The Tigon uses 64-bit host addresses, regardless of their actual + * length, and it expects a big-endian format. For 32 bit systems the + * upper 32 bits of the address are simply ignored (zero), however for + * little endian 64 bit systems (Alpha) this looks strange with the + * two parts of the address word being swapped. + * + * The addresses are split in two 32 bit words for all architectures + * as some of them are in PCI shared memory and it is necessary to use + * readl/writel to access them. + * + * The addressing code is derived from Pete Wyckoff's work, but + * modified to deal properly with readl/writel usage. + */ + +struct ace_regs { + u32 pad0[16]; /* PCI control registers */ + + u32 HostCtrl; /* 0x40 */ + u32 LocalCtrl; + + u32 pad1[2]; + + u32 MiscCfg; /* 0x50 */ + + u32 pad2[2]; + + u32 PciState; + + u32 pad3[2]; /* 0x60 */ + + u32 WinBase; + u32 WinData; + + u32 pad4[12]; /* 0x70 */ + + u32 DmaWriteState; /* 0xa0 */ + u32 pad5[3]; + u32 DmaReadState; /* 0xb0 */ + + u32 pad6[26]; + + u32 AssistState; + + u32 pad7[8]; /* 0x120 */ + + u32 CpuCtrl; /* 0x140 */ + u32 Pc; + + u32 pad8[3]; + + u32 SramAddr; /* 0x154 */ + u32 SramData; + + u32 pad9[49]; + + u32 MacRxState; /* 0x220 */ + + u32 pad10[7]; + + u32 CpuBCtrl; /* 0x240 */ + u32 PcB; + + u32 pad11[3]; + + u32 SramBAddr; /* 0x254 */ + u32 SramBData; + + u32 pad12[105]; + + u32 pad13[32]; /* 0x400 */ + u32 Stats[32]; + + u32 Mb0Hi; /* 0x500 */ + u32 Mb0Lo; + u32 Mb1Hi; + u32 CmdPrd; + u32 Mb2Hi; + u32 TxPrd; + u32 Mb3Hi; + u32 RxStdPrd; + u32 Mb4Hi; + u32 RxJumboPrd; + u32 Mb5Hi; + u32 RxMiniPrd; + u32 Mb6Hi; + u32 Mb6Lo; + u32 Mb7Hi; + u32 Mb7Lo; + u32 Mb8Hi; + u32 Mb8Lo; + u32 Mb9Hi; + u32 Mb9Lo; + u32 MbAHi; + u32 MbALo; + u32 MbBHi; + u32 MbBLo; + u32 MbCHi; + u32 MbCLo; + u32 MbDHi; + u32 MbDLo; + u32 MbEHi; + u32 MbELo; + u32 MbFHi; + u32 MbFLo; + + u32 pad14[32]; + + u32 MacAddrHi; /* 0x600 */ + u32 MacAddrLo; + u32 InfoPtrHi; + u32 InfoPtrLo; + u32 MultiCastHi; /* 0x610 */ + u32 MultiCastLo; + u32 ModeStat; + u32 DmaReadCfg; + u32 DmaWriteCfg; /* 0x620 */ + u32 TxBufRat; + u32 EvtCsm; + u32 CmdCsm; + u32 TuneRxCoalTicks;/* 0x630 */ + u32 TuneTxCoalTicks; + u32 TuneStatTicks; + u32 TuneMaxTxDesc; + u32 TuneMaxRxDesc; /* 0x640 */ + u32 TuneTrace; + u32 TuneLink; + u32 TuneFastLink; + u32 TracePtr; /* 0x650 */ + u32 TraceStrt; + u32 TraceLen; + u32 IfIdx; + u32 IfMtu; /* 0x660 */ + u32 MaskInt; + u32 GigLnkState; + u32 FastLnkState; + u32 pad16[4]; /* 0x670 */ + u32 RxRetCsm; /* 0x680 */ + + u32 pad17[31]; + + u32 CmdRng[64]; /* 0x700 */ + u32 Window[0x200]; +}; + + +typedef struct { + u32 addrhi; + u32 addrlo; +} aceaddr; + + +#define ACE_WINDOW_SIZE 0x800 + +#define ACE_JUMBO_MTU 9000 +#define ACE_STD_MTU 1500 + +#define ACE_TRACE_SIZE 0x8000 + +/* + * Host control register bits. + */ + +#define IN_INT 0x01 +#define CLR_INT 0x02 +#define HW_RESET 0x08 +#define BYTE_SWAP 0x10 +#define WORD_SWAP 0x20 +#define MASK_INTS 0x40 + +/* + * Local control register bits. + */ + +#define EEPROM_DATA_IN 0x800000 +#define EEPROM_DATA_OUT 0x400000 +#define EEPROM_WRITE_ENABLE 0x200000 +#define EEPROM_CLK_OUT 0x100000 + +#define EEPROM_BASE 0xa0000000 + +#define EEPROM_WRITE_SELECT 0xa0 +#define EEPROM_READ_SELECT 0xa1 + +#define SRAM_BANK_512K 0x200 + + +/* + * udelay() values for when clocking the eeprom + */ +#define ACE_SHORT_DELAY 2 +#define ACE_LONG_DELAY 4 + + +/* + * Misc Config bits + */ + +#define SYNC_SRAM_TIMING 0x100000 + + +/* + * CPU state bits. + */ + +#define CPU_RESET 0x01 +#define CPU_TRACE 0x02 +#define CPU_PROM_FAILED 0x10 +#define CPU_HALT 0x00010000 +#define CPU_HALTED 0xffff0000 + + +/* + * PCI State bits. + */ + +#define DMA_READ_MAX_4 0x04 +#define DMA_READ_MAX_16 0x08 +#define DMA_READ_MAX_32 0x0c +#define DMA_READ_MAX_64 0x10 +#define DMA_READ_MAX_128 0x14 +#define DMA_READ_MAX_256 0x18 +#define DMA_READ_MAX_1K 0x1c +#define DMA_WRITE_MAX_4 0x20 +#define DMA_WRITE_MAX_16 0x40 +#define DMA_WRITE_MAX_32 0x60 +#define DMA_WRITE_MAX_64 0x80 +#define DMA_WRITE_MAX_128 0xa0 +#define DMA_WRITE_MAX_256 0xc0 +#define DMA_WRITE_MAX_1K 0xe0 +#define DMA_READ_WRITE_MASK 0xfc +#define MEM_READ_MULTIPLE 0x00020000 +#define PCI_66MHZ 0x00080000 +#define PCI_32BIT 0x00100000 +#define DMA_WRITE_ALL_ALIGN 0x00800000 +#define READ_CMD_MEM 0x06000000 +#define WRITE_CMD_MEM 0x70000000 + + +/* + * Mode status + */ + +#define ACE_BYTE_SWAP_BD 0x02 +#define ACE_WORD_SWAP_BD 0x04 /* not actually used */ +#define ACE_WARN 0x08 +#define ACE_BYTE_SWAP_DMA 0x10 +#define ACE_NO_JUMBO_FRAG 0x200 +#define ACE_FATAL 0x40000000 + + +/* + * DMA config + */ + +#define DMA_THRESH_1W 0x10 +#define DMA_THRESH_2W 0x20 +#define DMA_THRESH_4W 0x40 +#define DMA_THRESH_8W 0x80 +#define DMA_THRESH_16W 0x100 +#define DMA_THRESH_32W 0x0 /* not described in doc, but exists. */ + + +/* + * Tuning parameters + */ + +#define TICKS_PER_SEC 1000000 + + +/* + * Link bits + */ + +#define LNK_PREF 0x00008000 +#define LNK_10MB 0x00010000 +#define LNK_100MB 0x00020000 +#define LNK_1000MB 0x00040000 +#define LNK_FULL_DUPLEX 0x00080000 +#define LNK_HALF_DUPLEX 0x00100000 +#define LNK_TX_FLOW_CTL_Y 0x00200000 +#define LNK_NEG_ADVANCED 0x00400000 +#define LNK_RX_FLOW_CTL_Y 0x00800000 +#define LNK_NIC 0x01000000 +#define LNK_JAM 0x02000000 +#define LNK_JUMBO 0x04000000 +#define LNK_ALTEON 0x08000000 +#define LNK_NEG_FCTL 0x10000000 +#define LNK_NEGOTIATE 0x20000000 +#define LNK_ENABLE 0x40000000 +#define LNK_UP 0x80000000 + + +/* + * Event definitions + */ + +#define EVT_RING_ENTRIES 256 +#define EVT_RING_SIZE (EVT_RING_ENTRIES * sizeof(struct event)) + +struct event { +#ifdef __LITTLE_ENDIAN_BITFIELD + u32 idx:12; + u32 code:12; + u32 evt:8; +#else + u32 evt:8; + u32 code:12; + u32 idx:12; +#endif + u32 pad; +}; + + +/* + * Events + */ + +#define E_FW_RUNNING 0x01 +#define E_STATS_UPDATED 0x04 + +#define E_STATS_UPDATE 0x04 + +#define E_LNK_STATE 0x06 +#define E_C_LINK_UP 0x01 +#define E_C_LINK_DOWN 0x02 +#define E_C_LINK_10_100 0x03 + +#define E_ERROR 0x07 +#define E_C_ERR_INVAL_CMD 0x01 +#define E_C_ERR_UNIMP_CMD 0x02 +#define E_C_ERR_BAD_CFG 0x03 + +#define E_MCAST_LIST 0x08 +#define E_C_MCAST_ADDR_ADD 0x01 +#define E_C_MCAST_ADDR_DEL 0x02 + +#define E_RESET_JUMBO_RNG 0x09 + + +/* + * Commands + */ + +#define CMD_RING_ENTRIES 64 + +struct cmd { +#ifdef __LITTLE_ENDIAN_BITFIELD + u32 idx:12; + u32 code:12; + u32 evt:8; +#else + u32 evt:8; + u32 code:12; + u32 idx:12; +#endif +}; + + +#define C_HOST_STATE 0x01 +#define C_C_STACK_UP 0x01 +#define C_C_STACK_DOWN 0x02 + +#define C_FDR_FILTERING 0x02 +#define C_C_FDR_FILT_ENABLE 0x01 +#define C_C_FDR_FILT_DISABLE 0x02 + +#define C_SET_RX_PRD_IDX 0x03 +#define C_UPDATE_STATS 0x04 +#define C_RESET_JUMBO_RNG 0x05 +#define C_ADD_MULTICAST_ADDR 0x08 +#define C_DEL_MULTICAST_ADDR 0x09 + +#define C_SET_PROMISC_MODE 0x0a +#define C_C_PROMISC_ENABLE 0x01 +#define C_C_PROMISC_DISABLE 0x02 + +#define C_LNK_NEGOTIATION 0x0b +#define C_C_NEGOTIATE_BOTH 0x00 +#define C_C_NEGOTIATE_GIG 0x01 +#define C_C_NEGOTIATE_10_100 0x02 + +#define C_SET_MAC_ADDR 0x0c +#define C_CLEAR_PROFILE 0x0d + +#define C_SET_MULTICAST_MODE 0x0e +#define C_C_MCAST_ENABLE 0x01 +#define C_C_MCAST_DISABLE 0x02 + +#define C_CLEAR_STATS 0x0f +#define C_SET_RX_JUMBO_PRD_IDX 0x10 +#define C_REFRESH_STATS 0x11 + + +/* + * Descriptor flags + */ +#define BD_FLG_TCP_UDP_SUM 0x01 +#define BD_FLG_IP_SUM 0x02 +#define BD_FLG_END 0x04 +#define BD_FLG_MORE 0x08 +#define BD_FLG_JUMBO 0x10 +#define BD_FLG_UCAST 0x20 +#define BD_FLG_MCAST 0x40 +#define BD_FLG_BCAST 0x60 +#define BD_FLG_TYP_MASK 0x60 +#define BD_FLG_IP_FRAG 0x80 +#define BD_FLG_IP_FRAG_END 0x100 +#define BD_FLG_VLAN_TAG 0x200 +#define BD_FLG_FRAME_ERROR 0x400 +#define BD_FLG_COAL_NOW 0x800 +#define BD_FLG_MINI 0x1000 + + +/* + * Ring Control block flags + */ +#define RCB_FLG_TCP_UDP_SUM 0x01 +#define RCB_FLG_IP_SUM 0x02 +#define RCB_FLG_NO_PSEUDO_HDR 0x08 +#define RCB_FLG_VLAN_ASSIST 0x10 +#define RCB_FLG_COAL_INT_ONLY 0x20 +#define RCB_FLG_TX_HOST_RING 0x40 +#define RCB_FLG_IEEE_SNAP_SUM 0x80 +#define RCB_FLG_EXT_RX_BD 0x100 +#define RCB_FLG_RNG_DISABLE 0x200 + + +/* + * TX ring - maximum TX ring entries for Tigon I's is 128 + */ +#define MAX_TX_RING_ENTRIES 256 +#define TIGON_I_TX_RING_ENTRIES 128 +#define TX_RING_SIZE (MAX_TX_RING_ENTRIES * sizeof(struct tx_desc)) +#define TX_RING_BASE 0x3800 + +struct tx_desc{ + aceaddr addr; + u32 flagsize; +#if 0 +/* + * This is in PCI shared mem and must be accessed with readl/writel + * real layout is: + */ +#if __LITTLE_ENDIAN + u16 flags; + u16 size; + u16 vlan; + u16 reserved; +#else + u16 size; + u16 flags; + u16 reserved; + u16 vlan; +#endif +#endif + u32 vlanres; +}; + + +#define RX_STD_RING_ENTRIES 512 +#define RX_STD_RING_SIZE (RX_STD_RING_ENTRIES * sizeof(struct rx_desc)) + +#define RX_JUMBO_RING_ENTRIES 256 +#define RX_JUMBO_RING_SIZE (RX_JUMBO_RING_ENTRIES *sizeof(struct rx_desc)) + +#define RX_MINI_RING_ENTRIES 1024 +#define RX_MINI_RING_SIZE (RX_MINI_RING_ENTRIES *sizeof(struct rx_desc)) + +#define RX_RETURN_RING_ENTRIES 2048 +#define RX_RETURN_RING_SIZE (RX_MAX_RETURN_RING_ENTRIES * \ + sizeof(struct rx_desc)) + +struct rx_desc{ + aceaddr addr; +#ifdef __LITTLE_ENDIAN + u16 size; + u16 idx; +#else + u16 idx; + u16 size; +#endif +#ifdef __LITTLE_ENDIAN + u16 flags; + u16 type; +#else + u16 type; + u16 flags; +#endif +#ifdef __LITTLE_ENDIAN + u16 tcp_udp_csum; + u16 ip_csum; +#else + u16 ip_csum; + u16 tcp_udp_csum; +#endif +#ifdef __LITTLE_ENDIAN + u16 vlan; + u16 err_flags; +#else + u16 err_flags; + u16 vlan; +#endif + u32 reserved; + u32 opague; +}; + + +/* + * This struct is shared with the NIC firmware. + */ +struct ring_ctrl { + aceaddr rngptr; +#ifdef __LITTLE_ENDIAN + u16 flags; + u16 max_len; +#else + u16 max_len; + u16 flags; +#endif + u32 pad; +}; + + +struct ace_mac_stats { + u32 excess_colls; + u32 coll_1; + u32 coll_2; + u32 coll_3; + u32 coll_4; + u32 coll_5; + u32 coll_6; + u32 coll_7; + u32 coll_8; + u32 coll_9; + u32 coll_10; + u32 coll_11; + u32 coll_12; + u32 coll_13; + u32 coll_14; + u32 coll_15; + u32 late_coll; + u32 defers; + u32 crc_err; + u32 underrun; + u32 crs_err; + u32 pad[3]; + u32 drop_ula; + u32 drop_mc; + u32 drop_fc; + u32 drop_space; + u32 coll; + u32 kept_bc; + u32 kept_mc; + u32 kept_uc; +}; + + +struct ace_info { + union { + u32 stats[256]; + } s; + struct ring_ctrl evt_ctrl; + struct ring_ctrl cmd_ctrl; + struct ring_ctrl tx_ctrl; + struct ring_ctrl rx_std_ctrl; + struct ring_ctrl rx_jumbo_ctrl; + struct ring_ctrl rx_mini_ctrl; + struct ring_ctrl rx_return_ctrl; + aceaddr evt_prd_ptr; + aceaddr rx_ret_prd_ptr; + aceaddr tx_csm_ptr; + aceaddr stats2_ptr; +}; + + +struct ring_info { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapping); +}; + + +/* + * Funny... As soon as we add maplen on alpha, it starts to work + * much slower. Hmm... is it because struct does not fit to one cacheline? + * So, split tx_ring_info. + */ +struct tx_ring_info { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapping); + DEFINE_DMA_UNMAP_LEN(maplen); +}; + + +/* + * struct ace_skb holding the rings of skb's. This is an awful lot of + * pointers, but I don't see any other smart mode to do this in an + * efficient manner ;-( + */ +struct ace_skb +{ + struct tx_ring_info tx_skbuff[MAX_TX_RING_ENTRIES]; + struct ring_info rx_std_skbuff[RX_STD_RING_ENTRIES]; + struct ring_info rx_mini_skbuff[RX_MINI_RING_ENTRIES]; + struct ring_info rx_jumbo_skbuff[RX_JUMBO_RING_ENTRIES]; +}; + + +/* + * Struct private for the AceNIC. + * + * Elements are grouped so variables used by the tx handling goes + * together, and will go into the same cache lines etc. in order to + * avoid cache line contention between the rx and tx handling on SMP. + * + * Frequently accessed variables are put at the beginning of the + * struct to help the compiler generate better/shorter code. + */ +struct ace_private +{ + struct ace_info *info; + struct ace_regs __iomem *regs; /* register base */ + struct ace_skb *skb; + dma_addr_t info_dma; /* 32/64 bit */ + + int version, link; + int promisc, mcast_all; + + /* + * TX elements + */ + struct tx_desc *tx_ring; + u32 tx_prd; + volatile u32 tx_ret_csm; + int tx_ring_entries; + + /* + * RX elements + */ + unsigned long std_refill_busy + __attribute__ ((aligned (SMP_CACHE_BYTES))); + unsigned long mini_refill_busy, jumbo_refill_busy; + atomic_t cur_rx_bufs; + atomic_t cur_mini_bufs; + atomic_t cur_jumbo_bufs; + u32 rx_std_skbprd, rx_mini_skbprd, rx_jumbo_skbprd; + u32 cur_rx; + + struct rx_desc *rx_std_ring; + struct rx_desc *rx_jumbo_ring; + struct rx_desc *rx_mini_ring; + struct rx_desc *rx_return_ring; + + int tasklet_pending, jumbo; + struct tasklet_struct ace_tasklet; + + struct event *evt_ring; + + volatile u32 *evt_prd, *rx_ret_prd, *tx_csm; + + dma_addr_t tx_ring_dma; /* 32/64 bit */ + dma_addr_t rx_ring_base_dma; + dma_addr_t evt_ring_dma; + dma_addr_t evt_prd_dma, rx_ret_prd_dma, tx_csm_dma; + + unsigned char *trace_buf; + struct pci_dev *pdev; + struct net_device *next; + volatile int fw_running; + int board_idx; + u16 pci_command; + u8 pci_latency; + const char *name; +#ifdef INDEX_DEBUG + spinlock_t debug_lock + __attribute__ ((aligned (SMP_CACHE_BYTES))); + u32 last_tx, last_std_rx, last_mini_rx; +#endif + int pci_using_dac; + u8 firmware_major; + u8 firmware_minor; + u8 firmware_fix; + u32 firmware_start; +}; + + +#define TX_RESERVED MAX_SKB_FRAGS + +static inline int tx_space (struct ace_private *ap, u32 csm, u32 prd) +{ + return (csm - prd - 1) & (ACE_TX_RING_ENTRIES(ap) - 1); +} + +#define tx_free(ap) tx_space((ap)->tx_ret_csm, (ap)->tx_prd, ap) +#define tx_ring_full(ap, csm, prd) (tx_space(ap, csm, prd) <= TX_RESERVED) + +static inline void set_aceaddr(aceaddr *aa, dma_addr_t addr) +{ + u64 baddr = (u64) addr; + aa->addrlo = baddr & 0xffffffff; + aa->addrhi = baddr >> 32; + wmb(); +} + + +static inline void ace_set_txprd(struct ace_regs __iomem *regs, + struct ace_private *ap, u32 value) +{ +#ifdef INDEX_DEBUG + unsigned long flags; + spin_lock_irqsave(&ap->debug_lock, flags); + writel(value, ®s->TxPrd); + if (value == ap->last_tx) + printk(KERN_ERR "AceNIC RACE ALERT! writing identical value " + "to tx producer (%i)\n", value); + ap->last_tx = value; + spin_unlock_irqrestore(&ap->debug_lock, flags); +#else + writel(value, ®s->TxPrd); +#endif + wmb(); +} + + +static inline void ace_mask_irq(struct net_device *dev) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + + if (ACE_IS_TIGON_I(ap)) + writel(1, ®s->MaskInt); + else + writel(readl(®s->HostCtrl) | MASK_INTS, ®s->HostCtrl); + + ace_sync_irq(dev->irq); +} + + +static inline void ace_unmask_irq(struct net_device *dev) +{ + struct ace_private *ap = netdev_priv(dev); + struct ace_regs __iomem *regs = ap->regs; + + if (ACE_IS_TIGON_I(ap)) + writel(0, ®s->MaskInt); + else + writel(readl(®s->HostCtrl) & ~MASK_INTS, ®s->HostCtrl); +} + + +/* + * Prototypes + */ +static int ace_init(struct net_device *dev); +static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs); +static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs); +static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs); +static irqreturn_t ace_interrupt(int irq, void *dev_id); +static int ace_load_firmware(struct net_device *dev); +static int ace_open(struct net_device *dev); +static netdev_tx_t ace_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static int ace_close(struct net_device *dev); +static void ace_tasklet(unsigned long dev); +static void ace_dump_trace(struct ace_private *ap); +static void ace_set_multicast_list(struct net_device *dev); +static int ace_change_mtu(struct net_device *dev, int new_mtu); +static int ace_set_mac_addr(struct net_device *dev, void *p); +static void ace_set_rxtx_parms(struct net_device *dev, int jumbo); +static int ace_allocate_descriptors(struct net_device *dev); +static void ace_free_descriptors(struct net_device *dev); +static void ace_init_cleanup(struct net_device *dev); +static struct net_device_stats *ace_get_stats(struct net_device *dev); +static int read_eeprom_byte(struct net_device *dev, unsigned long offset); + +#endif /* _ACENIC_H_ */ diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig new file mode 100644 index 000000000..fdddba514 --- /dev/null +++ b/drivers/net/ethernet/altera/Kconfig @@ -0,0 +1,9 @@ +config ALTERA_TSE + tristate "Altera Triple-Speed Ethernet MAC support" + depends on HAS_DMA + select PHYLIB + ---help--- + This driver supports the Altera Triple-Speed (TSE) Ethernet MAC. + + To compile this driver as a module, choose M here. The module + will be called alteratse. diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile new file mode 100644 index 000000000..3eff2fd39 --- /dev/null +++ b/drivers/net/ethernet/altera/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the Altera device drivers. +# + +obj-$(CONFIG_ALTERA_TSE) += altera_tse.o +altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \ +altera_msgdma.o altera_sgdma.o altera_utils.o +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c new file mode 100644 index 000000000..0fb986ba3 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_msgdma.c @@ -0,0 +1,206 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include "altera_utils.h" +#include "altera_tse.h" +#include "altera_msgdmahw.h" +#include "altera_msgdma.h" + +/* No initialization work to do for MSGDMA */ +int msgdma_initialize(struct altera_tse_private *priv) +{ + return 0; +} + +void msgdma_uninitialize(struct altera_tse_private *priv) +{ +} + +void msgdma_start_rxdma(struct altera_tse_private *priv) +{ +} + +void msgdma_reset(struct altera_tse_private *priv) +{ + int counter; + + /* Reset Rx mSGDMA */ + csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, + msgdma_csroffs(status)); + csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr, + msgdma_csroffs(control)); + + counter = 0; + while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { + if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status), + MSGDMA_CSR_STAT_RESETTING)) + break; + udelay(1); + } + + if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) + netif_warn(priv, drv, priv->dev, + "TSE Rx mSGDMA resetting bit never cleared!\n"); + + /* clear all status bits */ + csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status)); + + /* Reset Tx mSGDMA */ + csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, + msgdma_csroffs(status)); + + csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr, + msgdma_csroffs(control)); + + counter = 0; + while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { + if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status), + MSGDMA_CSR_STAT_RESETTING)) + break; + udelay(1); + } + + if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) + netif_warn(priv, drv, priv->dev, + "TSE Tx mSGDMA resetting bit never cleared!\n"); + + /* clear all status bits */ + csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status)); +} + +void msgdma_disable_rxirq(struct altera_tse_private *priv) +{ + tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control), + MSGDMA_CSR_CTL_GLOBAL_INTR); +} + +void msgdma_enable_rxirq(struct altera_tse_private *priv) +{ + tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control), + MSGDMA_CSR_CTL_GLOBAL_INTR); +} + +void msgdma_disable_txirq(struct altera_tse_private *priv) +{ + tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control), + MSGDMA_CSR_CTL_GLOBAL_INTR); +} + +void msgdma_enable_txirq(struct altera_tse_private *priv) +{ + tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control), + MSGDMA_CSR_CTL_GLOBAL_INTR); +} + +void msgdma_clear_rxirq(struct altera_tse_private *priv) +{ + csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status)); +} + +void msgdma_clear_txirq(struct altera_tse_private *priv) +{ + csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status)); +} + +/* return 0 to indicate transmit is pending */ +int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) +{ + csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc, + msgdma_descroffs(read_addr_lo)); + csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc, + msgdma_descroffs(read_addr_hi)); + csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo)); + csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi)); + csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len)); + csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num)); + csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc, + msgdma_descroffs(stride)); + csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc, + msgdma_descroffs(control)); + return 0; +} + +u32 msgdma_tx_completions(struct altera_tse_private *priv) +{ + u32 ready = 0; + u32 inuse; + u32 status; + + /* Get number of sent descriptors */ + inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level)) + & 0xffff; + + if (inuse) { /* Tx FIFO is not empty */ + ready = priv->tx_prod - priv->tx_cons - inuse - 1; + } else { + /* Check for buffered last packet */ + status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status)); + if (status & MSGDMA_CSR_STAT_BUSY) + ready = priv->tx_prod - priv->tx_cons - 1; + else + ready = priv->tx_prod - priv->tx_cons; + } + return ready; +} + +/* Put buffer to the mSGDMA RX FIFO + */ +void msgdma_add_rx_desc(struct altera_tse_private *priv, + struct tse_buffer *rxbuffer) +{ + u32 len = priv->rx_dma_buf_sz; + dma_addr_t dma_addr = rxbuffer->dma_addr; + u32 control = (MSGDMA_DESC_CTL_END_ON_EOP + | MSGDMA_DESC_CTL_END_ON_LEN + | MSGDMA_DESC_CTL_TR_COMP_IRQ + | MSGDMA_DESC_CTL_EARLY_IRQ + | MSGDMA_DESC_CTL_TR_ERR_IRQ + | MSGDMA_DESC_CTL_GO); + + csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo)); + csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi)); + csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc, + msgdma_descroffs(write_addr_lo)); + csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc, + msgdma_descroffs(write_addr_hi)); + csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len)); + csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num)); + csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride)); + csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control)); +} + +/* status is returned on upper 16 bits, + * length is returned in lower 16 bits + */ +u32 msgdma_rx_status(struct altera_tse_private *priv) +{ + u32 rxstatus = 0; + u32 pktlength; + u32 pktstatus; + + if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level)) + & 0xffff) { + pktlength = csrrd32(priv->rx_dma_resp, + msgdma_respoffs(bytes_transferred)); + pktstatus = csrrd32(priv->rx_dma_resp, + msgdma_respoffs(status)); + rxstatus = pktstatus; + rxstatus = rxstatus << 16; + rxstatus |= (pktlength & 0xffff); + } + return rxstatus; +} diff --git a/drivers/net/ethernet/altera/altera_msgdma.h b/drivers/net/ethernet/altera/altera_msgdma.h new file mode 100644 index 000000000..42cf61c81 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_msgdma.h @@ -0,0 +1,35 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __ALTERA_MSGDMA_H__ +#define __ALTERA_MSGDMA_H__ + +void msgdma_reset(struct altera_tse_private *); +void msgdma_enable_txirq(struct altera_tse_private *); +void msgdma_enable_rxirq(struct altera_tse_private *); +void msgdma_disable_rxirq(struct altera_tse_private *); +void msgdma_disable_txirq(struct altera_tse_private *); +void msgdma_clear_rxirq(struct altera_tse_private *); +void msgdma_clear_txirq(struct altera_tse_private *); +u32 msgdma_tx_completions(struct altera_tse_private *); +void msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *); +int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *); +u32 msgdma_rx_status(struct altera_tse_private *); +int msgdma_initialize(struct altera_tse_private *); +void msgdma_uninitialize(struct altera_tse_private *); +void msgdma_start_rxdma(struct altera_tse_private *); + +#endif /* __ALTERA_MSGDMA_H__ */ diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h new file mode 100644 index 000000000..89cd11d86 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_msgdmahw.h @@ -0,0 +1,158 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __ALTERA_MSGDMAHW_H__ +#define __ALTERA_MSGDMAHW_H__ + +/* mSGDMA extended descriptor format + */ +struct msgdma_extended_desc { + u32 read_addr_lo; /* data buffer source address low bits */ + u32 write_addr_lo; /* data buffer destination address low bits */ + u32 len; /* the number of bytes to transfer + * per descriptor + */ + u32 burst_seq_num; /* bit 31:24 write burst + * bit 23:16 read burst + * bit 15:0 sequence number + */ + u32 stride; /* bit 31:16 write stride + * bit 15:0 read stride + */ + u32 read_addr_hi; /* data buffer source address high bits */ + u32 write_addr_hi; /* data buffer destination address high bits */ + u32 control; /* characteristics of the transfer */ +}; + +/* mSGDMA descriptor control field bit definitions + */ +#define MSGDMA_DESC_CTL_SET_CH(x) ((x) & 0xff) +#define MSGDMA_DESC_CTL_GEN_SOP BIT(8) +#define MSGDMA_DESC_CTL_GEN_EOP BIT(9) +#define MSGDMA_DESC_CTL_PARK_READS BIT(10) +#define MSGDMA_DESC_CTL_PARK_WRITES BIT(11) +#define MSGDMA_DESC_CTL_END_ON_EOP BIT(12) +#define MSGDMA_DESC_CTL_END_ON_LEN BIT(13) +#define MSGDMA_DESC_CTL_TR_COMP_IRQ BIT(14) +#define MSGDMA_DESC_CTL_EARLY_IRQ BIT(15) +#define MSGDMA_DESC_CTL_TR_ERR_IRQ (0xff << 16) +#define MSGDMA_DESC_CTL_EARLY_DONE BIT(24) +/* Writing ‘1’ to the ‘go’ bit commits the entire descriptor into the + * descriptor FIFO(s) + */ +#define MSGDMA_DESC_CTL_GO BIT(31) + +/* Tx buffer control flags + */ +#define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \ + MSGDMA_DESC_CTL_GO) + +#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_GO) + +#define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \ + MSGDMA_DESC_CTL_TR_COMP_IRQ | \ + MSGDMA_DESC_CTL_GO) + +#define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \ + MSGDMA_DESC_CTL_GEN_EOP | \ + MSGDMA_DESC_CTL_TR_COMP_IRQ | \ + MSGDMA_DESC_CTL_GO) + +#define MSGDMA_DESC_CTL_RX_SINGLE (MSGDMA_DESC_CTL_END_ON_EOP | \ + MSGDMA_DESC_CTL_END_ON_LEN | \ + MSGDMA_DESC_CTL_TR_COMP_IRQ | \ + MSGDMA_DESC_CTL_EARLY_IRQ | \ + MSGDMA_DESC_CTL_TR_ERR_IRQ | \ + MSGDMA_DESC_CTL_GO) + +/* mSGDMA extended descriptor stride definitions + */ +#define MSGDMA_DESC_TX_STRIDE (0x00010001) +#define MSGDMA_DESC_RX_STRIDE (0x00010001) + +/* mSGDMA dispatcher control and status register map + */ +struct msgdma_csr { + u32 status; /* Read/Clear */ + u32 control; /* Read/Write */ + u32 rw_fill_level; /* bit 31:16 - write fill level + * bit 15:0 - read fill level + */ + u32 resp_fill_level; /* bit 15:0 */ + u32 rw_seq_num; /* bit 31:16 - write sequence number + * bit 15:0 - read sequence number + */ + u32 pad[3]; /* reserved */ +}; + +/* mSGDMA CSR status register bit definitions + */ +#define MSGDMA_CSR_STAT_BUSY BIT(0) +#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY BIT(1) +#define MSGDMA_CSR_STAT_DESC_BUF_FULL BIT(2) +#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY BIT(3) +#define MSGDMA_CSR_STAT_RESP_BUF_FULL BIT(4) +#define MSGDMA_CSR_STAT_STOPPED BIT(5) +#define MSGDMA_CSR_STAT_RESETTING BIT(6) +#define MSGDMA_CSR_STAT_STOPPED_ON_ERR BIT(7) +#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY BIT(8) +#define MSGDMA_CSR_STAT_IRQ BIT(9) +#define MSGDMA_CSR_STAT_MASK 0x3FF +#define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ 0x1FF + +#define MSGDMA_CSR_STAT_BUSY_GET(v) GET_BIT_VALUE(v, 0) +#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY_GET(v) GET_BIT_VALUE(v, 1) +#define MSGDMA_CSR_STAT_DESC_BUF_FULL_GET(v) GET_BIT_VALUE(v, 2) +#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY_GET(v) GET_BIT_VALUE(v, 3) +#define MSGDMA_CSR_STAT_RESP_BUF_FULL_GET(v) GET_BIT_VALUE(v, 4) +#define MSGDMA_CSR_STAT_STOPPED_GET(v) GET_BIT_VALUE(v, 5) +#define MSGDMA_CSR_STAT_RESETTING_GET(v) GET_BIT_VALUE(v, 6) +#define MSGDMA_CSR_STAT_STOPPED_ON_ERR_GET(v) GET_BIT_VALUE(v, 7) +#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY_GET(v) GET_BIT_VALUE(v, 8) +#define MSGDMA_CSR_STAT_IRQ_GET(v) GET_BIT_VALUE(v, 9) + +/* mSGDMA CSR control register bit definitions + */ +#define MSGDMA_CSR_CTL_STOP BIT(0) +#define MSGDMA_CSR_CTL_RESET BIT(1) +#define MSGDMA_CSR_CTL_STOP_ON_ERR BIT(2) +#define MSGDMA_CSR_CTL_STOP_ON_EARLY BIT(3) +#define MSGDMA_CSR_CTL_GLOBAL_INTR BIT(4) +#define MSGDMA_CSR_CTL_STOP_DESCS BIT(5) + +/* mSGDMA CSR fill level bits + */ +#define MSGDMA_CSR_WR_FILL_LEVEL_GET(v) (((v) & 0xffff0000) >> 16) +#define MSGDMA_CSR_RD_FILL_LEVEL_GET(v) ((v) & 0x0000ffff) +#define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v) ((v) & 0x0000ffff) + +/* mSGDMA response register map + */ +struct msgdma_response { + u32 bytes_transferred; + u32 status; +}; + +#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a)) +#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a)) +#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a)) + +/* mSGDMA response register bit definitions + */ +#define MSGDMA_RESP_EARLY_TERM BIT(8) +#define MSGDMA_RESP_ERR_MASK 0xFF + +#endif /* __ALTERA_MSGDMA_H__*/ diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c new file mode 100644 index 000000000..580553d42 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_sgdma.c @@ -0,0 +1,540 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include "altera_utils.h" +#include "altera_tse.h" +#include "altera_sgdmahw.h" +#include "altera_sgdma.h" + +static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, + struct sgdma_descrip __iomem *ndesc, + dma_addr_t ndesc_phys, + dma_addr_t raddr, + dma_addr_t waddr, + u16 length, + int generate_eop, + int rfixed, + int wfixed); + +static int sgdma_async_write(struct altera_tse_private *priv, + struct sgdma_descrip __iomem *desc); + +static int sgdma_async_read(struct altera_tse_private *priv); + +static dma_addr_t +sgdma_txphysaddr(struct altera_tse_private *priv, + struct sgdma_descrip __iomem *desc); + +static dma_addr_t +sgdma_rxphysaddr(struct altera_tse_private *priv, + struct sgdma_descrip __iomem *desc); + +static int sgdma_txbusy(struct altera_tse_private *priv); + +static int sgdma_rxbusy(struct altera_tse_private *priv); + +static void +queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer); + +static void +queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer); + +static struct tse_buffer * +dequeue_tx(struct altera_tse_private *priv); + +static struct tse_buffer * +dequeue_rx(struct altera_tse_private *priv); + +static struct tse_buffer * +queue_rx_peekhead(struct altera_tse_private *priv); + +int sgdma_initialize(struct altera_tse_private *priv) +{ + priv->txctrlreg = SGDMA_CTRLREG_ILASTD | + SGDMA_CTRLREG_INTEN; + + priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP | + SGDMA_CTRLREG_INTEN | + SGDMA_CTRLREG_ILASTD; + + priv->sgdmadesclen = sizeof(struct sgdma_descrip); + + INIT_LIST_HEAD(&priv->txlisthd); + INIT_LIST_HEAD(&priv->rxlisthd); + + priv->rxdescphys = (dma_addr_t) 0; + priv->txdescphys = (dma_addr_t) 0; + + priv->rxdescphys = dma_map_single(priv->device, + (void __force *)priv->rx_dma_desc, + priv->rxdescmem, DMA_BIDIRECTIONAL); + + if (dma_mapping_error(priv->device, priv->rxdescphys)) { + sgdma_uninitialize(priv); + netdev_err(priv->dev, "error mapping rx descriptor memory\n"); + return -EINVAL; + } + + priv->txdescphys = dma_map_single(priv->device, + (void __force *)priv->tx_dma_desc, + priv->txdescmem, DMA_TO_DEVICE); + + if (dma_mapping_error(priv->device, priv->txdescphys)) { + sgdma_uninitialize(priv); + netdev_err(priv->dev, "error mapping tx descriptor memory\n"); + return -EINVAL; + } + + /* Initialize descriptor memory to all 0's, sync memory to cache */ + memset_io(priv->tx_dma_desc, 0, priv->txdescmem); + memset_io(priv->rx_dma_desc, 0, priv->rxdescmem); + + dma_sync_single_for_device(priv->device, priv->txdescphys, + priv->txdescmem, DMA_TO_DEVICE); + + dma_sync_single_for_device(priv->device, priv->rxdescphys, + priv->rxdescmem, DMA_TO_DEVICE); + + return 0; +} + +void sgdma_uninitialize(struct altera_tse_private *priv) +{ + if (priv->rxdescphys) + dma_unmap_single(priv->device, priv->rxdescphys, + priv->rxdescmem, DMA_BIDIRECTIONAL); + + if (priv->txdescphys) + dma_unmap_single(priv->device, priv->txdescphys, + priv->txdescmem, DMA_TO_DEVICE); +} + +/* This function resets the SGDMA controller and clears the + * descriptor memory used for transmits and receives. + */ +void sgdma_reset(struct altera_tse_private *priv) +{ + /* Initialize descriptor memory to 0 */ + memset_io(priv->tx_dma_desc, 0, priv->txdescmem); + memset_io(priv->rx_dma_desc, 0, priv->rxdescmem); + + csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control)); + csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control)); + + csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control)); + csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control)); +} + +/* For SGDMA, interrupts remain enabled after initially enabling, + * so no need to provide implementations for abstract enable + * and disable + */ + +void sgdma_enable_rxirq(struct altera_tse_private *priv) +{ +} + +void sgdma_enable_txirq(struct altera_tse_private *priv) +{ +} + +void sgdma_disable_rxirq(struct altera_tse_private *priv) +{ +} + +void sgdma_disable_txirq(struct altera_tse_private *priv) +{ +} + +void sgdma_clear_rxirq(struct altera_tse_private *priv) +{ + tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control), + SGDMA_CTRLREG_CLRINT); +} + +void sgdma_clear_txirq(struct altera_tse_private *priv) +{ + tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control), + SGDMA_CTRLREG_CLRINT); +} + +/* transmits buffer through SGDMA. Returns number of buffers + * transmitted, 0 if not possible. + * + * tx_lock is held by the caller + */ +int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) +{ + struct sgdma_descrip __iomem *descbase = + (struct sgdma_descrip __iomem *)priv->tx_dma_desc; + + struct sgdma_descrip __iomem *cdesc = &descbase[0]; + struct sgdma_descrip __iomem *ndesc = &descbase[1]; + + /* wait 'til the tx sgdma is ready for the next transmit request */ + if (sgdma_txbusy(priv)) + return 0; + + sgdma_setup_descrip(cdesc, /* current descriptor */ + ndesc, /* next descriptor */ + sgdma_txphysaddr(priv, ndesc), + buffer->dma_addr, /* address of packet to xmit */ + 0, /* write addr 0 for tx dma */ + buffer->len, /* length of packet */ + SGDMA_CONTROL_EOP, /* Generate EOP */ + 0, /* read fixed */ + SGDMA_CONTROL_WR_FIXED); /* Generate SOP */ + + sgdma_async_write(priv, cdesc); + + /* enqueue the request to the pending transmit queue */ + queue_tx(priv, buffer); + + return 1; +} + + +/* tx_lock held to protect access to queued tx list + */ +u32 sgdma_tx_completions(struct altera_tse_private *priv) +{ + u32 ready = 0; + + if (!sgdma_txbusy(priv) && + ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control)) + & SGDMA_CONTROL_HW_OWNED) == 0) && + (dequeue_tx(priv))) { + ready = 1; + } + + return ready; +} + +void sgdma_start_rxdma(struct altera_tse_private *priv) +{ + sgdma_async_read(priv); +} + +void sgdma_add_rx_desc(struct altera_tse_private *priv, + struct tse_buffer *rxbuffer) +{ + queue_rx(priv, rxbuffer); +} + +/* status is returned on upper 16 bits, + * length is returned in lower 16 bits + */ +u32 sgdma_rx_status(struct altera_tse_private *priv) +{ + struct sgdma_descrip __iomem *base = + (struct sgdma_descrip __iomem *)priv->rx_dma_desc; + struct sgdma_descrip __iomem *desc = NULL; + struct tse_buffer *rxbuffer = NULL; + unsigned int rxstatus = 0; + + u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status)); + + desc = &base[0]; + if (sts & SGDMA_STSREG_EOP) { + unsigned int pktlength = 0; + unsigned int pktstatus = 0; + dma_sync_single_for_cpu(priv->device, + priv->rxdescphys, + priv->sgdmadesclen, + DMA_FROM_DEVICE); + + pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred)); + pktstatus = csrrd8(desc, sgdma_descroffs(status)); + rxstatus = pktstatus & ~SGDMA_STATUS_EOP; + rxstatus = rxstatus << 16; + rxstatus |= (pktlength & 0xffff); + + if (rxstatus) { + csrwr8(0, desc, sgdma_descroffs(status)); + + rxbuffer = dequeue_rx(priv); + if (rxbuffer == NULL) + netdev_info(priv->dev, + "sgdma rx and rx queue empty!\n"); + + /* Clear control */ + csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control)); + /* clear status */ + csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status)); + + /* kick the rx sgdma after reaping this descriptor */ + sgdma_async_read(priv); + + } else { + /* If the SGDMA indicated an end of packet on recv, + * then it's expected that the rxstatus from the + * descriptor is non-zero - meaning a valid packet + * with a nonzero length, or an error has been + * indicated. if not, then all we can do is signal + * an error and return no packet received. Most likely + * there is a system design error, or an error in the + * underlying kernel (cache or cache management problem) + */ + netdev_err(priv->dev, + "SGDMA RX Error Info: %x, %x, %x\n", + sts, csrrd8(desc, sgdma_descroffs(status)), + rxstatus); + } + } else if (sts == 0) { + sgdma_async_read(priv); + } + + return rxstatus; +} + + +/* Private functions */ +static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, + struct sgdma_descrip __iomem *ndesc, + dma_addr_t ndesc_phys, + dma_addr_t raddr, + dma_addr_t waddr, + u16 length, + int generate_eop, + int rfixed, + int wfixed) +{ + /* Clear the next descriptor as not owned by hardware */ + + u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control)); + ctrl &= ~SGDMA_CONTROL_HW_OWNED; + csrwr8(ctrl, ndesc, sgdma_descroffs(control)); + + ctrl = SGDMA_CONTROL_HW_OWNED; + ctrl |= generate_eop; + ctrl |= rfixed; + ctrl |= wfixed; + + /* Channel is implicitly zero, initialized to 0 by default */ + csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr)); + csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr)); + + csrwr32(0, desc, sgdma_descroffs(pad1)); + csrwr32(0, desc, sgdma_descroffs(pad2)); + csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next)); + + csrwr8(ctrl, desc, sgdma_descroffs(control)); + csrwr8(0, desc, sgdma_descroffs(status)); + csrwr8(0, desc, sgdma_descroffs(wburst)); + csrwr8(0, desc, sgdma_descroffs(rburst)); + csrwr16(length, desc, sgdma_descroffs(bytes)); + csrwr16(0, desc, sgdma_descroffs(bytes_xferred)); +} + +/* If hardware is busy, don't restart async read. + * if status register is 0 - meaning initial state, restart async read, + * probably for the first time when populating a receive buffer. + * If read status indicate not busy and a status, restart the async + * DMA read. + */ +static int sgdma_async_read(struct altera_tse_private *priv) +{ + struct sgdma_descrip __iomem *descbase = + (struct sgdma_descrip __iomem *)priv->rx_dma_desc; + + struct sgdma_descrip __iomem *cdesc = &descbase[0]; + struct sgdma_descrip __iomem *ndesc = &descbase[1]; + struct tse_buffer *rxbuffer = NULL; + + if (!sgdma_rxbusy(priv)) { + rxbuffer = queue_rx_peekhead(priv); + if (rxbuffer == NULL) { + netdev_err(priv->dev, "no rx buffers available\n"); + return 0; + } + + sgdma_setup_descrip(cdesc, /* current descriptor */ + ndesc, /* next descriptor */ + sgdma_rxphysaddr(priv, ndesc), + 0, /* read addr 0 for rx dma */ + rxbuffer->dma_addr, /* write addr for rx dma */ + 0, /* read 'til EOP */ + 0, /* EOP: NA for rx dma */ + 0, /* read fixed: NA for rx dma */ + 0); /* SOP: NA for rx DMA */ + + dma_sync_single_for_device(priv->device, + priv->rxdescphys, + priv->sgdmadesclen, + DMA_TO_DEVICE); + + csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), + priv->rx_dma_csr, + sgdma_csroffs(next_descrip)); + + csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START), + priv->rx_dma_csr, + sgdma_csroffs(control)); + + return 1; + } + + return 0; +} + +static int sgdma_async_write(struct altera_tse_private *priv, + struct sgdma_descrip __iomem *desc) +{ + if (sgdma_txbusy(priv)) + return 0; + + /* clear control and status */ + csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control)); + csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status)); + + dma_sync_single_for_device(priv->device, priv->txdescphys, + priv->sgdmadesclen, DMA_TO_DEVICE); + + csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)), + priv->tx_dma_csr, + sgdma_csroffs(next_descrip)); + + csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START), + priv->tx_dma_csr, + sgdma_csroffs(control)); + + return 1; +} + +static dma_addr_t +sgdma_txphysaddr(struct altera_tse_private *priv, + struct sgdma_descrip __iomem *desc) +{ + dma_addr_t paddr = priv->txdescmem_busaddr; + uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc; + return (dma_addr_t)((uintptr_t)paddr + offs); +} + +static dma_addr_t +sgdma_rxphysaddr(struct altera_tse_private *priv, + struct sgdma_descrip __iomem *desc) +{ + dma_addr_t paddr = priv->rxdescmem_busaddr; + uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc; + return (dma_addr_t)((uintptr_t)paddr + offs); +} + +#define list_remove_head(list, entry, type, member) \ + do { \ + entry = NULL; \ + if (!list_empty(list)) { \ + entry = list_entry((list)->next, type, member); \ + list_del_init(&entry->member); \ + } \ + } while (0) + +#define list_peek_head(list, entry, type, member) \ + do { \ + entry = NULL; \ + if (!list_empty(list)) { \ + entry = list_entry((list)->next, type, member); \ + } \ + } while (0) + +/* adds a tse_buffer to the tail of a tx buffer list. + * assumes the caller is managing and holding a mutual exclusion + * primitive to avoid simultaneous pushes/pops to the list. + */ +static void +queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer) +{ + list_add_tail(&buffer->lh, &priv->txlisthd); +} + + +/* adds a tse_buffer to the tail of a rx buffer list + * assumes the caller is managing and holding a mutual exclusion + * primitive to avoid simultaneous pushes/pops to the list. + */ +static void +queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer) +{ + list_add_tail(&buffer->lh, &priv->rxlisthd); +} + +/* dequeues a tse_buffer from the transmit buffer list, otherwise + * returns NULL if empty. + * assumes the caller is managing and holding a mutual exclusion + * primitive to avoid simultaneous pushes/pops to the list. + */ +static struct tse_buffer * +dequeue_tx(struct altera_tse_private *priv) +{ + struct tse_buffer *buffer = NULL; + list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh); + return buffer; +} + +/* dequeues a tse_buffer from the receive buffer list, otherwise + * returns NULL if empty + * assumes the caller is managing and holding a mutual exclusion + * primitive to avoid simultaneous pushes/pops to the list. + */ +static struct tse_buffer * +dequeue_rx(struct altera_tse_private *priv) +{ + struct tse_buffer *buffer = NULL; + list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh); + return buffer; +} + +/* dequeues a tse_buffer from the receive buffer list, otherwise + * returns NULL if empty + * assumes the caller is managing and holding a mutual exclusion + * primitive to avoid simultaneous pushes/pops to the list while the + * head is being examined. + */ +static struct tse_buffer * +queue_rx_peekhead(struct altera_tse_private *priv) +{ + struct tse_buffer *buffer = NULL; + list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh); + return buffer; +} + +/* check and return rx sgdma status without polling + */ +static int sgdma_rxbusy(struct altera_tse_private *priv) +{ + return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status)) + & SGDMA_STSREG_BUSY; +} + +/* waits for the tx sgdma to finish it's current operation, returns 0 + * when it transitions to nonbusy, returns 1 if the operation times out + */ +static int sgdma_txbusy(struct altera_tse_private *priv) +{ + int delay = 0; + + /* if DMA is busy, wait for current transactino to finish */ + while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status)) + & SGDMA_STSREG_BUSY) && (delay++ < 100)) + udelay(1); + + if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status)) + & SGDMA_STSREG_BUSY) { + netdev_err(priv->dev, "timeout waiting for tx dma\n"); + return 1; + } + return 0; +} diff --git a/drivers/net/ethernet/altera/altera_sgdma.h b/drivers/net/ethernet/altera/altera_sgdma.h new file mode 100644 index 000000000..584977e29 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_sgdma.h @@ -0,0 +1,36 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __ALTERA_SGDMA_H__ +#define __ALTERA_SGDMA_H__ + +void sgdma_reset(struct altera_tse_private *); +void sgdma_enable_txirq(struct altera_tse_private *); +void sgdma_enable_rxirq(struct altera_tse_private *); +void sgdma_disable_rxirq(struct altera_tse_private *); +void sgdma_disable_txirq(struct altera_tse_private *); +void sgdma_clear_rxirq(struct altera_tse_private *); +void sgdma_clear_txirq(struct altera_tse_private *); +int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *); +u32 sgdma_tx_completions(struct altera_tse_private *); +void sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *); +void sgdma_status(struct altera_tse_private *); +u32 sgdma_rx_status(struct altera_tse_private *); +int sgdma_initialize(struct altera_tse_private *); +void sgdma_uninitialize(struct altera_tse_private *); +void sgdma_start_rxdma(struct altera_tse_private *); + +#endif /* __ALTERA_SGDMA_H__ */ diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h new file mode 100644 index 000000000..85bc33b21 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_sgdmahw.h @@ -0,0 +1,126 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __ALTERA_SGDMAHW_H__ +#define __ALTERA_SGDMAHW_H__ + +/* SGDMA descriptor structure */ +struct sgdma_descrip { + u32 raddr; /* address of data to be read */ + u32 pad1; + u32 waddr; + u32 pad2; + u32 next; + u32 pad3; + u16 bytes; + u8 rburst; + u8 wburst; + u16 bytes_xferred; /* 16 bits, bytes xferred */ + + /* bit 0: error + * bit 1: length error + * bit 2: crc error + * bit 3: truncated error + * bit 4: phy error + * bit 5: collision error + * bit 6: reserved + * bit 7: status eop for recv case + */ + u8 status; + + /* bit 0: eop + * bit 1: read_fixed + * bit 2: write fixed + * bits 3,4,5,6: Channel (always 0) + * bit 7: hardware owned + */ + u8 control; +} __packed; + + +#define SGDMA_STATUS_ERR BIT(0) +#define SGDMA_STATUS_LENGTH_ERR BIT(1) +#define SGDMA_STATUS_CRC_ERR BIT(2) +#define SGDMA_STATUS_TRUNC_ERR BIT(3) +#define SGDMA_STATUS_PHY_ERR BIT(4) +#define SGDMA_STATUS_COLL_ERR BIT(5) +#define SGDMA_STATUS_EOP BIT(7) + +#define SGDMA_CONTROL_EOP BIT(0) +#define SGDMA_CONTROL_RD_FIXED BIT(1) +#define SGDMA_CONTROL_WR_FIXED BIT(2) + +/* Channel is always 0, so just zero initialize it */ + +#define SGDMA_CONTROL_HW_OWNED BIT(7) + +/* SGDMA register space */ +struct sgdma_csr { + /* bit 0: error + * bit 1: eop + * bit 2: descriptor completed + * bit 3: chain completed + * bit 4: busy + * remainder reserved + */ + u32 status; + u32 pad1[3]; + + /* bit 0: interrupt on error + * bit 1: interrupt on eop + * bit 2: interrupt after every descriptor + * bit 3: interrupt after last descrip in a chain + * bit 4: global interrupt enable + * bit 5: starts descriptor processing + * bit 6: stop core on dma error + * bit 7: interrupt on max descriptors + * bits 8-15: max descriptors to generate interrupt + * bit 16: Software reset + * bit 17: clears owned by hardware if 0, does not clear otherwise + * bit 18: enables descriptor polling mode + * bit 19-26: clocks before polling again + * bit 27-30: reserved + * bit 31: clear interrupt + */ + u32 control; + u32 pad2[3]; + u32 next_descrip; + u32 pad3[3]; +}; + +#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a)) +#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a)) + +#define SGDMA_STSREG_ERR BIT(0) /* Error */ +#define SGDMA_STSREG_EOP BIT(1) /* EOP */ +#define SGDMA_STSREG_DESCRIP BIT(2) /* Descriptor completed */ +#define SGDMA_STSREG_CHAIN BIT(3) /* Chain completed */ +#define SGDMA_STSREG_BUSY BIT(4) /* Controller busy */ + +#define SGDMA_CTRLREG_IOE BIT(0) /* Interrupt on error */ +#define SGDMA_CTRLREG_IOEOP BIT(1) /* Interrupt on EOP */ +#define SGDMA_CTRLREG_IDESCRIP BIT(2) /* Interrupt after every descriptor */ +#define SGDMA_CTRLREG_ILASTD BIT(3) /* Interrupt after last descriptor */ +#define SGDMA_CTRLREG_INTEN BIT(4) /* Global Interrupt enable */ +#define SGDMA_CTRLREG_START BIT(5) /* starts descriptor processing */ +#define SGDMA_CTRLREG_STOPERR BIT(6) /* stop on dma error */ +#define SGDMA_CTRLREG_INTMAX BIT(7) /* Interrupt on max descriptors */ +#define SGDMA_CTRLREG_RESET BIT(16)/* Software reset */ +#define SGDMA_CTRLREG_COBHW BIT(17)/* Clears owned by hardware */ +#define SGDMA_CTRLREG_POLL BIT(18)/* enables descriptor polling mode */ +#define SGDMA_CTRLREG_CLRINT BIT(31)/* Clears interrupt */ + +#endif /* __ALTERA_SGDMAHW_H__ */ diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h new file mode 100644 index 000000000..2adb24d45 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_tse.h @@ -0,0 +1,537 @@ +/* Altera Triple-Speed Ethernet MAC driver + * Copyright (C) 2008-2014 Altera Corporation. All rights reserved + * + * Contributors: + * Dalon Westergreen + * Thomas Chou + * Ian Abbott + * Yuriy Kozlov + * Tobias Klauser + * Andriy Smolskyy + * Roman Bulgakov + * Dmytro Mytarchuk + * Matthew Gerlach + * + * Original driver contributed by SLS. + * Major updates contributed by GlobalLogic + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#ifndef __ALTERA_TSE_H__ +#define __ALTERA_TSE_H__ + +#define ALTERA_TSE_RESOURCE_NAME "altera_tse" + +#include +#include +#include +#include +#include + +#define ALTERA_TSE_SW_RESET_WATCHDOG_CNTR 10000 +#define ALTERA_TSE_MAC_FIFO_WIDTH 4 /* TX/RX FIFO width in + * bytes + */ +/* Rx FIFO default settings */ +#define ALTERA_TSE_RX_SECTION_EMPTY 16 +#define ALTERA_TSE_RX_SECTION_FULL 0 +#define ALTERA_TSE_RX_ALMOST_EMPTY 8 +#define ALTERA_TSE_RX_ALMOST_FULL 8 + +/* Tx FIFO default settings */ +#define ALTERA_TSE_TX_SECTION_EMPTY 16 +#define ALTERA_TSE_TX_SECTION_FULL 0 +#define ALTERA_TSE_TX_ALMOST_EMPTY 8 +#define ALTERA_TSE_TX_ALMOST_FULL 3 + +/* MAC function configuration default settings */ +#define ALTERA_TSE_TX_IPG_LENGTH 12 + +#define ALTERA_TSE_PAUSE_QUANTA 0xffff + +#define GET_BIT_VALUE(v, bit) (((v) >> (bit)) & 0x1) + +/* MAC Command_Config Register Bit Definitions + */ +#define MAC_CMDCFG_TX_ENA BIT(0) +#define MAC_CMDCFG_RX_ENA BIT(1) +#define MAC_CMDCFG_XON_GEN BIT(2) +#define MAC_CMDCFG_ETH_SPEED BIT(3) +#define MAC_CMDCFG_PROMIS_EN BIT(4) +#define MAC_CMDCFG_PAD_EN BIT(5) +#define MAC_CMDCFG_CRC_FWD BIT(6) +#define MAC_CMDCFG_PAUSE_FWD BIT(7) +#define MAC_CMDCFG_PAUSE_IGNORE BIT(8) +#define MAC_CMDCFG_TX_ADDR_INS BIT(9) +#define MAC_CMDCFG_HD_ENA BIT(10) +#define MAC_CMDCFG_EXCESS_COL BIT(11) +#define MAC_CMDCFG_LATE_COL BIT(12) +#define MAC_CMDCFG_SW_RESET BIT(13) +#define MAC_CMDCFG_MHASH_SEL BIT(14) +#define MAC_CMDCFG_LOOP_ENA BIT(15) +#define MAC_CMDCFG_TX_ADDR_SEL(v) (((v) & 0x7) << 16) +#define MAC_CMDCFG_MAGIC_ENA BIT(19) +#define MAC_CMDCFG_SLEEP BIT(20) +#define MAC_CMDCFG_WAKEUP BIT(21) +#define MAC_CMDCFG_XOFF_GEN BIT(22) +#define MAC_CMDCFG_CNTL_FRM_ENA BIT(23) +#define MAC_CMDCFG_NO_LGTH_CHECK BIT(24) +#define MAC_CMDCFG_ENA_10 BIT(25) +#define MAC_CMDCFG_RX_ERR_DISC BIT(26) +#define MAC_CMDCFG_DISABLE_READ_TIMEOUT BIT(27) +#define MAC_CMDCFG_CNT_RESET BIT(31) + +#define MAC_CMDCFG_TX_ENA_GET(v) GET_BIT_VALUE(v, 0) +#define MAC_CMDCFG_RX_ENA_GET(v) GET_BIT_VALUE(v, 1) +#define MAC_CMDCFG_XON_GEN_GET(v) GET_BIT_VALUE(v, 2) +#define MAC_CMDCFG_ETH_SPEED_GET(v) GET_BIT_VALUE(v, 3) +#define MAC_CMDCFG_PROMIS_EN_GET(v) GET_BIT_VALUE(v, 4) +#define MAC_CMDCFG_PAD_EN_GET(v) GET_BIT_VALUE(v, 5) +#define MAC_CMDCFG_CRC_FWD_GET(v) GET_BIT_VALUE(v, 6) +#define MAC_CMDCFG_PAUSE_FWD_GET(v) GET_BIT_VALUE(v, 7) +#define MAC_CMDCFG_PAUSE_IGNORE_GET(v) GET_BIT_VALUE(v, 8) +#define MAC_CMDCFG_TX_ADDR_INS_GET(v) GET_BIT_VALUE(v, 9) +#define MAC_CMDCFG_HD_ENA_GET(v) GET_BIT_VALUE(v, 10) +#define MAC_CMDCFG_EXCESS_COL_GET(v) GET_BIT_VALUE(v, 11) +#define MAC_CMDCFG_LATE_COL_GET(v) GET_BIT_VALUE(v, 12) +#define MAC_CMDCFG_SW_RESET_GET(v) GET_BIT_VALUE(v, 13) +#define MAC_CMDCFG_MHASH_SEL_GET(v) GET_BIT_VALUE(v, 14) +#define MAC_CMDCFG_LOOP_ENA_GET(v) GET_BIT_VALUE(v, 15) +#define MAC_CMDCFG_TX_ADDR_SEL_GET(v) (((v) >> 16) & 0x7) +#define MAC_CMDCFG_MAGIC_ENA_GET(v) GET_BIT_VALUE(v, 19) +#define MAC_CMDCFG_SLEEP_GET(v) GET_BIT_VALUE(v, 20) +#define MAC_CMDCFG_WAKEUP_GET(v) GET_BIT_VALUE(v, 21) +#define MAC_CMDCFG_XOFF_GEN_GET(v) GET_BIT_VALUE(v, 22) +#define MAC_CMDCFG_CNTL_FRM_ENA_GET(v) GET_BIT_VALUE(v, 23) +#define MAC_CMDCFG_NO_LGTH_CHECK_GET(v) GET_BIT_VALUE(v, 24) +#define MAC_CMDCFG_ENA_10_GET(v) GET_BIT_VALUE(v, 25) +#define MAC_CMDCFG_RX_ERR_DISC_GET(v) GET_BIT_VALUE(v, 26) +#define MAC_CMDCFG_DISABLE_READ_TIMEOUT_GET(v) GET_BIT_VALUE(v, 27) +#define MAC_CMDCFG_CNT_RESET_GET(v) GET_BIT_VALUE(v, 31) + +/* MDIO registers within MAC register Space + */ +struct altera_tse_mdio { + u32 control; /* PHY device operation control register */ + u32 status; /* PHY device operation status register */ + u32 phy_id1; /* Bits 31:16 of PHY identifier */ + u32 phy_id2; /* Bits 15:0 of PHY identifier */ + u32 auto_negotiation_advertisement; /* Auto-negotiation + * advertisement + * register + */ + u32 remote_partner_base_page_ability; + + u32 reg6; + u32 reg7; + u32 reg8; + u32 reg9; + u32 rega; + u32 regb; + u32 regc; + u32 regd; + u32 rege; + u32 regf; + u32 reg10; + u32 reg11; + u32 reg12; + u32 reg13; + u32 reg14; + u32 reg15; + u32 reg16; + u32 reg17; + u32 reg18; + u32 reg19; + u32 reg1a; + u32 reg1b; + u32 reg1c; + u32 reg1d; + u32 reg1e; + u32 reg1f; +}; + +/* MAC register Space. Note that some of these registers may or may not be + * present depending upon options chosen by the user when the core was + * configured and built. Please consult the Altera Triple Speed Ethernet User + * Guide for details. + */ +struct altera_tse_mac { + /* Bits 15:0: MegaCore function revision (0x0800). Bit 31:16: Customer + * specific revision + */ + u32 megacore_revision; + /* Provides a memory location for user applications to test the device + * memory operation. + */ + u32 scratch_pad; + /* The host processor uses this register to control and configure the + * MAC block + */ + u32 command_config; + /* 32-bit primary MAC address word 0 bits 0 to 31 of the primary + * MAC address + */ + u32 mac_addr_0; + /* 32-bit primary MAC address word 1 bits 32 to 47 of the primary + * MAC address + */ + u32 mac_addr_1; + /* 14-bit maximum frame length. The MAC receive logic */ + u32 frm_length; + /* The pause quanta is used in each pause frame sent to a remote + * Ethernet device, in increments of 512 Ethernet bit times + */ + u32 pause_quanta; + /* 12-bit receive FIFO section-empty threshold */ + u32 rx_section_empty; + /* 12-bit receive FIFO section-full threshold */ + u32 rx_section_full; + /* 12-bit transmit FIFO section-empty threshold */ + u32 tx_section_empty; + /* 12-bit transmit FIFO section-full threshold */ + u32 tx_section_full; + /* 12-bit receive FIFO almost-empty threshold */ + u32 rx_almost_empty; + /* 12-bit receive FIFO almost-full threshold */ + u32 rx_almost_full; + /* 12-bit transmit FIFO almost-empty threshold */ + u32 tx_almost_empty; + /* 12-bit transmit FIFO almost-full threshold */ + u32 tx_almost_full; + /* MDIO address of PHY Device 0. Bits 0 to 4 hold a 5-bit PHY address */ + u32 mdio_phy0_addr; + /* MDIO address of PHY Device 1. Bits 0 to 4 hold a 5-bit PHY address */ + u32 mdio_phy1_addr; + + /* Bit[15:0]—16-bit holdoff quanta */ + u32 holdoff_quant; + + /* only if 100/1000 BaseX PCS, reserved otherwise */ + u32 reserved1[5]; + + /* Minimum IPG between consecutive transmit frame in terms of bytes */ + u32 tx_ipg_length; + + /* IEEE 802.3 oEntity Managed Object Support */ + + /* The MAC addresses */ + u32 mac_id_1; + u32 mac_id_2; + + /* Number of frames transmitted without error including pause frames */ + u32 frames_transmitted_ok; + /* Number of frames received without error including pause frames */ + u32 frames_received_ok; + /* Number of frames received with a CRC error */ + u32 frames_check_sequence_errors; + /* Frame received with an alignment error */ + u32 alignment_errors; + /* Sum of payload and padding octets of frames transmitted without + * error + */ + u32 octets_transmitted_ok; + /* Sum of payload and padding octets of frames received without error */ + u32 octets_received_ok; + + /* IEEE 802.3 oPausedEntity Managed Object Support */ + + /* Number of transmitted pause frames */ + u32 tx_pause_mac_ctrl_frames; + /* Number of Received pause frames */ + u32 rx_pause_mac_ctrl_frames; + + /* IETF MIB (MIB-II) Object Support */ + + /* Number of frames received with error */ + u32 if_in_errors; + /* Number of frames transmitted with error */ + u32 if_out_errors; + /* Number of valid received unicast frames */ + u32 if_in_ucast_pkts; + /* Number of valid received multicasts frames (without pause) */ + u32 if_in_multicast_pkts; + /* Number of valid received broadcast frames */ + u32 if_in_broadcast_pkts; + u32 if_out_discards; + /* The number of valid unicast frames transmitted */ + u32 if_out_ucast_pkts; + /* The number of valid multicast frames transmitted, + * excluding pause frames + */ + u32 if_out_multicast_pkts; + u32 if_out_broadcast_pkts; + + /* IETF RMON MIB Object Support */ + + /* Counts the number of dropped packets due to internal errors + * of the MAC client. + */ + u32 ether_stats_drop_events; + /* Total number of bytes received. Good and bad frames. */ + u32 ether_stats_octets; + /* Total number of packets received. Counts good and bad packets. */ + u32 ether_stats_pkts; + /* Number of packets received with less than 64 bytes. */ + u32 ether_stats_undersize_pkts; + /* The number of frames received that are longer than the + * value configured in the frm_length register + */ + u32 ether_stats_oversize_pkts; + /* Number of received packet with 64 bytes */ + u32 ether_stats_pkts_64_octets; + /* Frames (good and bad) with 65 to 127 bytes */ + u32 ether_stats_pkts_65to127_octets; + /* Frames (good and bad) with 128 to 255 bytes */ + u32 ether_stats_pkts_128to255_octets; + /* Frames (good and bad) with 256 to 511 bytes */ + u32 ether_stats_pkts_256to511_octets; + /* Frames (good and bad) with 512 to 1023 bytes */ + u32 ether_stats_pkts_512to1023_octets; + /* Frames (good and bad) with 1024 to 1518 bytes */ + u32 ether_stats_pkts_1024to1518_octets; + + /* Any frame length from 1519 to the maximum length configured in the + * frm_length register, if it is greater than 1518 + */ + u32 ether_stats_pkts_1519tox_octets; + /* Too long frames with CRC error */ + u32 ether_stats_jabbers; + /* Too short frames with CRC error */ + u32 ether_stats_fragments; + + u32 reserved2; + + /* FIFO control register */ + u32 tx_cmd_stat; + u32 rx_cmd_stat; + + /* Extended Statistics Counters */ + u32 msb_octets_transmitted_ok; + u32 msb_octets_received_ok; + u32 msb_ether_stats_octets; + + u32 reserved3; + + /* Multicast address resolution table, mapped in the controller address + * space + */ + u32 hash_table[64]; + + /* Registers 0 to 31 within PHY device 0/1 connected to the MDIO PHY + * management interface + */ + struct altera_tse_mdio mdio_phy0; + struct altera_tse_mdio mdio_phy1; + + /* 4 Supplemental MAC Addresses */ + u32 supp_mac_addr_0_0; + u32 supp_mac_addr_0_1; + u32 supp_mac_addr_1_0; + u32 supp_mac_addr_1_1; + u32 supp_mac_addr_2_0; + u32 supp_mac_addr_2_1; + u32 supp_mac_addr_3_0; + u32 supp_mac_addr_3_1; + + u32 reserved4[8]; + + /* IEEE 1588v2 Feature */ + u32 tx_period; + u32 tx_adjust_fns; + u32 tx_adjust_ns; + u32 rx_period; + u32 rx_adjust_fns; + u32 rx_adjust_ns; + + u32 reserved5[42]; +}; + +#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a)) + +/* Transmit and Receive Command Registers Bit Definitions + */ +#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17) +#define ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 BIT(18) +#define ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16 BIT(25) + +/* Wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct tse_buffer { + struct list_head lh; + struct sk_buff *skb; + dma_addr_t dma_addr; + u32 len; + int mapped_as_page; +}; + +struct altera_tse_private; + +#define ALTERA_DTYPE_SGDMA 1 +#define ALTERA_DTYPE_MSGDMA 2 + +/* standard DMA interface for SGDMA and MSGDMA */ +struct altera_dmaops { + int altera_dtype; + int dmamask; + void (*reset_dma)(struct altera_tse_private *); + void (*enable_txirq)(struct altera_tse_private *); + void (*enable_rxirq)(struct altera_tse_private *); + void (*disable_txirq)(struct altera_tse_private *); + void (*disable_rxirq)(struct altera_tse_private *); + void (*clear_txirq)(struct altera_tse_private *); + void (*clear_rxirq)(struct altera_tse_private *); + int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *); + u32 (*tx_completions)(struct altera_tse_private *); + void (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *); + u32 (*get_rx_status)(struct altera_tse_private *); + int (*init_dma)(struct altera_tse_private *); + void (*uninit_dma)(struct altera_tse_private *); + void (*start_rxdma)(struct altera_tse_private *); +}; + +/* This structure is private to each device. + */ +struct altera_tse_private { + struct net_device *dev; + struct device *device; + struct napi_struct napi; + + /* MAC address space */ + struct altera_tse_mac __iomem *mac_dev; + + /* TSE Revision */ + u32 revision; + + /* mSGDMA Rx Dispatcher address space */ + void __iomem *rx_dma_csr; + void __iomem *rx_dma_desc; + void __iomem *rx_dma_resp; + + /* mSGDMA Tx Dispatcher address space */ + void __iomem *tx_dma_csr; + void __iomem *tx_dma_desc; + + /* Rx buffers queue */ + struct tse_buffer *rx_ring; + u32 rx_cons; + u32 rx_prod; + u32 rx_ring_size; + u32 rx_dma_buf_sz; + + /* Tx ring buffer */ + struct tse_buffer *tx_ring; + u32 tx_prod; + u32 tx_cons; + u32 tx_ring_size; + + /* Interrupts */ + u32 tx_irq; + u32 rx_irq; + + /* RX/TX MAC FIFO configs */ + u32 tx_fifo_depth; + u32 rx_fifo_depth; + u32 max_mtu; + + /* Hash filter settings */ + u32 hash_filter; + u32 added_unicast; + + /* Descriptor memory info for managing SGDMA */ + u32 txdescmem; + u32 rxdescmem; + dma_addr_t rxdescmem_busaddr; + dma_addr_t txdescmem_busaddr; + u32 txctrlreg; + u32 rxctrlreg; + dma_addr_t rxdescphys; + dma_addr_t txdescphys; + size_t sgdmadesclen; + + struct list_head txlisthd; + struct list_head rxlisthd; + + /* MAC command_config register protection */ + spinlock_t mac_cfg_lock; + /* Tx path protection */ + spinlock_t tx_lock; + /* Rx DMA & interrupt control protection */ + spinlock_t rxdma_irq_lock; + + /* PHY */ + int phy_addr; /* PHY's MDIO address, -1 for autodetection */ + phy_interface_t phy_iface; + struct mii_bus *mdio; + struct phy_device *phydev; + int oldspeed; + int oldduplex; + int oldlink; + + /* ethtool msglvl option */ + u32 msg_enable; + + struct altera_dmaops *dmaops; +}; + +/* Function prototypes + */ +void altera_tse_set_ethtool_ops(struct net_device *); + +static inline +u32 csrrd32(void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + return readl(paddr); +} + +static inline +u16 csrrd16(void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + return readw(paddr); +} + +static inline +u8 csrrd8(void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + return readb(paddr); +} + +static inline +void csrwr32(u32 val, void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + + writel(val, paddr); +} + +static inline +void csrwr16(u16 val, void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + + writew(val, paddr); +} + +static inline +void csrwr8(u8 val, void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + + writeb(val, paddr); +} + +#endif /* __ALTERA_TSE_H__ */ diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c new file mode 100644 index 000000000..be72e1e64 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c @@ -0,0 +1,275 @@ +/* Ethtool support for Altera Triple-Speed Ethernet MAC driver + * Copyright (C) 2008-2014 Altera Corporation. All rights reserved + * + * Contributors: + * Dalon Westergreen + * Thomas Chou + * Ian Abbott + * Yuriy Kozlov + * Tobias Klauser + * Andriy Smolskyy + * Roman Bulgakov + * Dmytro Mytarchuk + * + * Original driver contributed by SLS. + * Major updates contributed by GlobalLogic + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include + +#include "altera_tse.h" + +#define TSE_STATS_LEN 31 +#define TSE_NUM_REGS 128 + +static char const stat_gstrings[][ETH_GSTRING_LEN] = { + "tx_packets", + "rx_packets", + "rx_crc_errors", + "rx_align_errors", + "tx_bytes", + "rx_bytes", + "tx_pause", + "rx_pause", + "rx_errors", + "tx_errors", + "rx_unicast", + "rx_multicast", + "rx_broadcast", + "tx_discards", + "tx_unicast", + "tx_multicast", + "tx_broadcast", + "ether_drops", + "rx_total_bytes", + "rx_total_packets", + "rx_undersize", + "rx_oversize", + "rx_64_bytes", + "rx_65_127_bytes", + "rx_128_255_bytes", + "rx_256_511_bytes", + "rx_512_1023_bytes", + "rx_1024_1518_bytes", + "rx_gte_1519_bytes", + "rx_jabbers", + "rx_runts", +}; + +static void tse_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct altera_tse_private *priv = netdev_priv(dev); + u32 rev = ioread32(&priv->mac_dev->megacore_revision); + + strcpy(info->driver, "altera_tse"); + strcpy(info->version, "v8.0"); + snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d", + rev & 0xFFFF, (rev & 0xFFFF0000) >> 16); + sprintf(info->bus_info, "platform"); +} + +/* Fill in a buffer with the strings which correspond to the + * stats + */ +static void tse_gstrings(struct net_device *dev, u32 stringset, u8 *buf) +{ + memcpy(buf, stat_gstrings, TSE_STATS_LEN * ETH_GSTRING_LEN); +} + +static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, + u64 *buf) +{ + struct altera_tse_private *priv = netdev_priv(dev); + u64 ext; + + buf[0] = csrrd32(priv->mac_dev, + tse_csroffs(frames_transmitted_ok)); + buf[1] = csrrd32(priv->mac_dev, + tse_csroffs(frames_received_ok)); + buf[2] = csrrd32(priv->mac_dev, + tse_csroffs(frames_check_sequence_errors)); + buf[3] = csrrd32(priv->mac_dev, + tse_csroffs(alignment_errors)); + + /* Extended aOctetsTransmittedOK counter */ + ext = (u64) csrrd32(priv->mac_dev, + tse_csroffs(msb_octets_transmitted_ok)) << 32; + + ext |= csrrd32(priv->mac_dev, + tse_csroffs(octets_transmitted_ok)); + buf[4] = ext; + + /* Extended aOctetsReceivedOK counter */ + ext = (u64) csrrd32(priv->mac_dev, + tse_csroffs(msb_octets_received_ok)) << 32; + + ext |= csrrd32(priv->mac_dev, + tse_csroffs(octets_received_ok)); + buf[5] = ext; + + buf[6] = csrrd32(priv->mac_dev, + tse_csroffs(tx_pause_mac_ctrl_frames)); + buf[7] = csrrd32(priv->mac_dev, + tse_csroffs(rx_pause_mac_ctrl_frames)); + buf[8] = csrrd32(priv->mac_dev, + tse_csroffs(if_in_errors)); + buf[9] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_errors)); + buf[10] = csrrd32(priv->mac_dev, + tse_csroffs(if_in_ucast_pkts)); + buf[11] = csrrd32(priv->mac_dev, + tse_csroffs(if_in_multicast_pkts)); + buf[12] = csrrd32(priv->mac_dev, + tse_csroffs(if_in_broadcast_pkts)); + buf[13] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_discards)); + buf[14] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_ucast_pkts)); + buf[15] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_multicast_pkts)); + buf[16] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_broadcast_pkts)); + buf[17] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_drop_events)); + + /* Extended etherStatsOctets counter */ + ext = (u64) csrrd32(priv->mac_dev, + tse_csroffs(msb_ether_stats_octets)) << 32; + ext |= csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_octets)); + buf[18] = ext; + + buf[19] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts)); + buf[20] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_undersize_pkts)); + buf[21] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_oversize_pkts)); + buf[22] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_64_octets)); + buf[23] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_65to127_octets)); + buf[24] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_128to255_octets)); + buf[25] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_256to511_octets)); + buf[26] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_512to1023_octets)); + buf[27] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_1024to1518_octets)); + buf[28] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_1519tox_octets)); + buf[29] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_jabbers)); + buf[30] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_fragments)); +} + +static int tse_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return TSE_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static u32 tse_get_msglevel(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + return priv->msg_enable; +} + +static void tse_set_msglevel(struct net_device *dev, uint32_t data) +{ + struct altera_tse_private *priv = netdev_priv(dev); + priv->msg_enable = data; +} + +static int tse_reglen(struct net_device *dev) +{ + return TSE_NUM_REGS * sizeof(u32); +} + +static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *regbuf) +{ + int i; + struct altera_tse_private *priv = netdev_priv(dev); + u32 *buf = regbuf; + + /* Set version to a known value, so ethtool knows + * how to do any special formatting of this data. + * This version number will need to change if and + * when this register table is changed. + * + * version[31:0] = 1: Dump the first 128 TSE Registers + * Upper bits are all 0 by default + * + * Upper 16-bits will indicate feature presence for + * Ethtool register decoding in future version. + */ + + regs->version = 1; + + for (i = 0; i < TSE_NUM_REGS; i++) + buf[i] = csrrd32(priv->mac_dev, i * 4); +} + +static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct altera_tse_private *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + + if (phydev == NULL) + return -ENODEV; + + return phy_ethtool_gset(phydev, cmd); +} + +static int tse_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct altera_tse_private *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + + if (phydev == NULL) + return -ENODEV; + + return phy_ethtool_sset(phydev, cmd); +} + +static const struct ethtool_ops tse_ethtool_ops = { + .get_drvinfo = tse_get_drvinfo, + .get_regs_len = tse_reglen, + .get_regs = tse_get_regs, + .get_link = ethtool_op_get_link, + .get_settings = tse_get_settings, + .set_settings = tse_set_settings, + .get_strings = tse_gstrings, + .get_sset_count = tse_sset_count, + .get_ethtool_stats = tse_fill_stats, + .get_msglevel = tse_get_msglevel, + .set_msglevel = tse_set_msglevel, +}; + +void altera_tse_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &tse_ethtool_ops; +} diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c new file mode 100644 index 000000000..da48e6637 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -0,0 +1,1634 @@ +/* Altera Triple-Speed Ethernet MAC driver + * Copyright (C) 2008-2014 Altera Corporation. All rights reserved + * + * Contributors: + * Dalon Westergreen + * Thomas Chou + * Ian Abbott + * Yuriy Kozlov + * Tobias Klauser + * Andriy Smolskyy + * Roman Bulgakov + * Dmytro Mytarchuk + * Matthew Gerlach + * + * Original driver contributed by SLS. + * Major updates contributed by GlobalLogic + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "altera_utils.h" +#include "altera_tse.h" +#include "altera_sgdma.h" +#include "altera_msgdma.h" + +static atomic_t instance_count = ATOMIC_INIT(~0); +/* Module parameters */ +static int debug = -1; +module_param(debug, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); + +static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN); + +#define RX_DESCRIPTORS 64 +static int dma_rx_num = RX_DESCRIPTORS; +module_param(dma_rx_num, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list"); + +#define TX_DESCRIPTORS 64 +static int dma_tx_num = TX_DESCRIPTORS; +module_param(dma_tx_num, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list"); + + +#define POLL_PHY (-1) + +/* Make sure DMA buffer size is larger than the max frame size + * plus some alignment offset and a VLAN header. If the max frame size is + * 1518, a VLAN header would be additional 4 bytes and additional + * headroom for alignment is 2 bytes, 2048 is just fine. + */ +#define ALTERA_RXDMABUFFER_SIZE 2048 + +/* Allow network stack to resume queueing packets after we've + * finished transmitting at least 1/4 of the packets in the queue. + */ +#define TSE_TX_THRESH(x) (x->tx_ring_size / 4) + +#define TXQUEUESTOP_THRESHHOLD 2 + +static const struct of_device_id altera_tse_ids[]; + +static inline u32 tse_tx_avail(struct altera_tse_private *priv) +{ + return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1; +} + +/* MDIO specific functions + */ +static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct net_device *ndev = bus->priv; + struct altera_tse_private *priv = netdev_priv(ndev); + + /* set MDIO address */ + csrwr32((mii_id & 0x1f), priv->mac_dev, + tse_csroffs(mdio_phy1_addr)); + + /* get the data */ + return csrrd32(priv->mac_dev, + tse_csroffs(mdio_phy1) + regnum * 4) & 0xffff; +} + +static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) +{ + struct net_device *ndev = bus->priv; + struct altera_tse_private *priv = netdev_priv(ndev); + + /* set MDIO address */ + csrwr32((mii_id & 0x1f), priv->mac_dev, + tse_csroffs(mdio_phy1_addr)); + + /* write the data */ + csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy1) + regnum * 4); + return 0; +} + +static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) +{ + struct altera_tse_private *priv = netdev_priv(dev); + int ret; + int i; + struct device_node *mdio_node = NULL; + struct mii_bus *mdio = NULL; + struct device_node *child_node = NULL; + + for_each_child_of_node(priv->device->of_node, child_node) { + if (of_device_is_compatible(child_node, "altr,tse-mdio")) { + mdio_node = child_node; + break; + } + } + + if (mdio_node) { + netdev_dbg(dev, "FOUND MDIO subnode\n"); + } else { + netdev_dbg(dev, "NO MDIO subnode\n"); + return 0; + } + + mdio = mdiobus_alloc(); + if (mdio == NULL) { + netdev_err(dev, "Error allocating MDIO bus\n"); + return -ENOMEM; + } + + mdio->name = ALTERA_TSE_RESOURCE_NAME; + mdio->read = &altera_tse_mdio_read; + mdio->write = &altera_tse_mdio_write; + snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id); + + mdio->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); + if (mdio->irq == NULL) { + ret = -ENOMEM; + goto out_free_mdio; + } + for (i = 0; i < PHY_MAX_ADDR; i++) + mdio->irq[i] = PHY_POLL; + + mdio->priv = dev; + mdio->parent = priv->device; + + ret = of_mdiobus_register(mdio, mdio_node); + if (ret != 0) { + netdev_err(dev, "Cannot register MDIO bus %s\n", + mdio->id); + goto out_free_mdio_irq; + } + + if (netif_msg_drv(priv)) + netdev_info(dev, "MDIO bus %s: created\n", mdio->id); + + priv->mdio = mdio; + return 0; +out_free_mdio_irq: + kfree(mdio->irq); +out_free_mdio: + mdiobus_free(mdio); + mdio = NULL; + return ret; +} + +static void altera_tse_mdio_destroy(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + + if (priv->mdio == NULL) + return; + + if (netif_msg_drv(priv)) + netdev_info(dev, "MDIO bus %s: removed\n", + priv->mdio->id); + + mdiobus_unregister(priv->mdio); + kfree(priv->mdio->irq); + mdiobus_free(priv->mdio); + priv->mdio = NULL; +} + +static int tse_init_rx_buffer(struct altera_tse_private *priv, + struct tse_buffer *rxbuffer, int len) +{ + rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len); + if (!rxbuffer->skb) + return -ENOMEM; + + rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data, + len, + DMA_FROM_DEVICE); + + if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) { + netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); + dev_kfree_skb_any(rxbuffer->skb); + return -EINVAL; + } + rxbuffer->dma_addr &= (dma_addr_t)~3; + rxbuffer->len = len; + return 0; +} + +static void tse_free_rx_buffer(struct altera_tse_private *priv, + struct tse_buffer *rxbuffer) +{ + struct sk_buff *skb = rxbuffer->skb; + dma_addr_t dma_addr = rxbuffer->dma_addr; + + if (skb != NULL) { + if (dma_addr) + dma_unmap_single(priv->device, dma_addr, + rxbuffer->len, + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + rxbuffer->skb = NULL; + rxbuffer->dma_addr = 0; + } +} + +/* Unmap and free Tx buffer resources + */ +static void tse_free_tx_buffer(struct altera_tse_private *priv, + struct tse_buffer *buffer) +{ + if (buffer->dma_addr) { + if (buffer->mapped_as_page) + dma_unmap_page(priv->device, buffer->dma_addr, + buffer->len, DMA_TO_DEVICE); + else + dma_unmap_single(priv->device, buffer->dma_addr, + buffer->len, DMA_TO_DEVICE); + buffer->dma_addr = 0; + } + if (buffer->skb) { + dev_kfree_skb_any(buffer->skb); + buffer->skb = NULL; + } +} + +static int alloc_init_skbufs(struct altera_tse_private *priv) +{ + unsigned int rx_descs = priv->rx_ring_size; + unsigned int tx_descs = priv->tx_ring_size; + int ret = -ENOMEM; + int i; + + /* Create Rx ring buffer */ + priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer), + GFP_KERNEL); + if (!priv->rx_ring) + goto err_rx_ring; + + /* Create Tx ring buffer */ + priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer), + GFP_KERNEL); + if (!priv->tx_ring) + goto err_tx_ring; + + priv->tx_cons = 0; + priv->tx_prod = 0; + + /* Init Rx ring */ + for (i = 0; i < rx_descs; i++) { + ret = tse_init_rx_buffer(priv, &priv->rx_ring[i], + priv->rx_dma_buf_sz); + if (ret) + goto err_init_rx_buffers; + } + + priv->rx_cons = 0; + priv->rx_prod = 0; + + return 0; +err_init_rx_buffers: + while (--i >= 0) + tse_free_rx_buffer(priv, &priv->rx_ring[i]); + kfree(priv->tx_ring); +err_tx_ring: + kfree(priv->rx_ring); +err_rx_ring: + return ret; +} + +static void free_skbufs(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + unsigned int rx_descs = priv->rx_ring_size; + unsigned int tx_descs = priv->tx_ring_size; + int i; + + /* Release the DMA TX/RX socket buffers */ + for (i = 0; i < rx_descs; i++) + tse_free_rx_buffer(priv, &priv->rx_ring[i]); + for (i = 0; i < tx_descs; i++) + tse_free_tx_buffer(priv, &priv->tx_ring[i]); + + + kfree(priv->tx_ring); +} + +/* Reallocate the skb for the reception process + */ +static inline void tse_rx_refill(struct altera_tse_private *priv) +{ + unsigned int rxsize = priv->rx_ring_size; + unsigned int entry; + int ret; + + for (; priv->rx_cons - priv->rx_prod > 0; + priv->rx_prod++) { + entry = priv->rx_prod % rxsize; + if (likely(priv->rx_ring[entry].skb == NULL)) { + ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry], + priv->rx_dma_buf_sz); + if (unlikely(ret != 0)) + break; + priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]); + } + } +} + +/* Pull out the VLAN tag and fix up the packet + */ +static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb) +{ + struct ethhdr *eth_hdr; + u16 vid; + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + !__vlan_get_tag(skb, &vid)) { + eth_hdr = (struct ethhdr *)skb->data; + memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2); + skb_pull(skb, VLAN_HLEN); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } +} + +/* Receive a packet: retrieve and pass over to upper levels + */ +static int tse_rx(struct altera_tse_private *priv, int limit) +{ + unsigned int count = 0; + unsigned int next_entry; + struct sk_buff *skb; + unsigned int entry = priv->rx_cons % priv->rx_ring_size; + u32 rxstatus; + u16 pktlength; + u16 pktstatus; + + /* Check for count < limit first as get_rx_status is changing + * the response-fifo so we must process the next packet + * after calling get_rx_status if a response is pending. + * (reading the last byte of the response pops the value from the fifo.) + */ + while ((count < limit) && + ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) { + pktstatus = rxstatus >> 16; + pktlength = rxstatus & 0xffff; + + if ((pktstatus & 0xFF) || (pktlength == 0)) + netdev_err(priv->dev, + "RCV pktstatus %08X pktlength %08X\n", + pktstatus, pktlength); + + /* DMA trasfer from TSE starts with 2 aditional bytes for + * IP payload alignment. Status returned by get_rx_status() + * contains DMA transfer length. Packet is 2 bytes shorter. + */ + pktlength -= 2; + + count++; + next_entry = (++priv->rx_cons) % priv->rx_ring_size; + + skb = priv->rx_ring[entry].skb; + if (unlikely(!skb)) { + netdev_err(priv->dev, + "%s: Inconsistent Rx descriptor chain\n", + __func__); + priv->dev->stats.rx_dropped++; + break; + } + priv->rx_ring[entry].skb = NULL; + + skb_put(skb, pktlength); + + /* make cache consistent with receive packet buffer */ + dma_sync_single_for_cpu(priv->device, + priv->rx_ring[entry].dma_addr, + priv->rx_ring[entry].len, + DMA_FROM_DEVICE); + + dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr, + priv->rx_ring[entry].len, DMA_FROM_DEVICE); + + if (netif_msg_pktdata(priv)) { + netdev_info(priv->dev, "frame received %d bytes\n", + pktlength); + print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET, + 16, 1, skb->data, pktlength, true); + } + + tse_rx_vlan(priv->dev, skb); + + skb->protocol = eth_type_trans(skb, priv->dev); + skb_checksum_none_assert(skb); + + napi_gro_receive(&priv->napi, skb); + + priv->dev->stats.rx_packets++; + priv->dev->stats.rx_bytes += pktlength; + + entry = next_entry; + + tse_rx_refill(priv); + } + + return count; +} + +/* Reclaim resources after transmission completes + */ +static int tse_tx_complete(struct altera_tse_private *priv) +{ + unsigned int txsize = priv->tx_ring_size; + u32 ready; + unsigned int entry; + struct tse_buffer *tx_buff; + int txcomplete = 0; + + spin_lock(&priv->tx_lock); + + ready = priv->dmaops->tx_completions(priv); + + /* Free sent buffers */ + while (ready && (priv->tx_cons != priv->tx_prod)) { + entry = priv->tx_cons % txsize; + tx_buff = &priv->tx_ring[entry]; + + if (netif_msg_tx_done(priv)) + netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n", + __func__, priv->tx_prod, priv->tx_cons); + + if (likely(tx_buff->skb)) + priv->dev->stats.tx_packets++; + + tse_free_tx_buffer(priv, tx_buff); + priv->tx_cons++; + + txcomplete++; + ready--; + } + + if (unlikely(netif_queue_stopped(priv->dev) && + tse_tx_avail(priv) > TSE_TX_THRESH(priv))) { + netif_tx_lock(priv->dev); + if (netif_queue_stopped(priv->dev) && + tse_tx_avail(priv) > TSE_TX_THRESH(priv)) { + if (netif_msg_tx_done(priv)) + netdev_dbg(priv->dev, "%s: restart transmit\n", + __func__); + netif_wake_queue(priv->dev); + } + netif_tx_unlock(priv->dev); + } + + spin_unlock(&priv->tx_lock); + return txcomplete; +} + +/* NAPI polling function + */ +static int tse_poll(struct napi_struct *napi, int budget) +{ + struct altera_tse_private *priv = + container_of(napi, struct altera_tse_private, napi); + int rxcomplete = 0; + unsigned long int flags; + + tse_tx_complete(priv); + + rxcomplete = tse_rx(priv, budget); + + if (rxcomplete < budget) { + + napi_gro_flush(napi, false); + __napi_complete(napi); + + netdev_dbg(priv->dev, + "NAPI Complete, did %d packets with budget %d\n", + rxcomplete, budget); + + spin_lock_irqsave(&priv->rxdma_irq_lock, flags); + priv->dmaops->enable_rxirq(priv); + priv->dmaops->enable_txirq(priv); + spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); + } + return rxcomplete; +} + +/* DMA TX & RX FIFO interrupt routing + */ +static irqreturn_t altera_isr(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct altera_tse_private *priv; + + if (unlikely(!dev)) { + pr_err("%s: invalid dev pointer\n", __func__); + return IRQ_NONE; + } + priv = netdev_priv(dev); + + spin_lock(&priv->rxdma_irq_lock); + /* reset IRQs */ + priv->dmaops->clear_rxirq(priv); + priv->dmaops->clear_txirq(priv); + spin_unlock(&priv->rxdma_irq_lock); + + if (likely(napi_schedule_prep(&priv->napi))) { + spin_lock(&priv->rxdma_irq_lock); + priv->dmaops->disable_rxirq(priv); + priv->dmaops->disable_txirq(priv); + spin_unlock(&priv->rxdma_irq_lock); + __napi_schedule(&priv->napi); + } + + + return IRQ_HANDLED; +} + +/* Transmit a packet (called by the kernel). Dispatches + * either the SGDMA method for transmitting or the + * MSGDMA method, assumes no scatter/gather support, + * implying an assumption that there's only one + * physically contiguous fragment starting at + * skb->data, for length of skb_headlen(skb). + */ +static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + unsigned int txsize = priv->tx_ring_size; + unsigned int entry; + struct tse_buffer *buffer = NULL; + int nfrags = skb_shinfo(skb)->nr_frags; + unsigned int nopaged_len = skb_headlen(skb); + enum netdev_tx ret = NETDEV_TX_OK; + dma_addr_t dma_addr; + + spin_lock_bh(&priv->tx_lock); + + if (unlikely(tse_tx_avail(priv) < nfrags + 1)) { + if (!netif_queue_stopped(dev)) { + netif_stop_queue(dev); + /* This is a hard error, log it. */ + netdev_err(priv->dev, + "%s: Tx list full when queue awake\n", + __func__); + } + ret = NETDEV_TX_BUSY; + goto out; + } + + /* Map the first skb fragment */ + entry = priv->tx_prod % txsize; + buffer = &priv->tx_ring[entry]; + + dma_addr = dma_map_single(priv->device, skb->data, nopaged_len, + DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, dma_addr)) { + netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); + ret = NETDEV_TX_OK; + goto out; + } + + buffer->skb = skb; + buffer->dma_addr = dma_addr; + buffer->len = nopaged_len; + + /* Push data out of the cache hierarchy into main memory */ + dma_sync_single_for_device(priv->device, buffer->dma_addr, + buffer->len, DMA_TO_DEVICE); + + priv->dmaops->tx_buffer(priv, buffer); + + skb_tx_timestamp(skb); + + priv->tx_prod++; + dev->stats.tx_bytes += skb->len; + + if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) { + if (netif_msg_hw(priv)) + netdev_dbg(priv->dev, "%s: stop transmitted packets\n", + __func__); + netif_stop_queue(dev); + } + +out: + spin_unlock_bh(&priv->tx_lock); + + return ret; +} + +/* Called every time the controller might need to be made + * aware of new link state. The PHY code conveys this + * information through variables in the phydev structure, and this + * function converts those variables into the appropriate + * register values, and can bring down the device if needed. + */ +static void altera_tse_adjust_link(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + int new_state = 0; + + /* only change config if there is a link */ + spin_lock(&priv->mac_cfg_lock); + if (phydev->link) { + /* Read old config */ + u32 cfg_reg = ioread32(&priv->mac_dev->command_config); + + /* Check duplex */ + if (phydev->duplex != priv->oldduplex) { + new_state = 1; + if (!(phydev->duplex)) + cfg_reg |= MAC_CMDCFG_HD_ENA; + else + cfg_reg &= ~MAC_CMDCFG_HD_ENA; + + netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n", + dev->name, phydev->duplex); + + priv->oldduplex = phydev->duplex; + } + + /* Check speed */ + if (phydev->speed != priv->oldspeed) { + new_state = 1; + switch (phydev->speed) { + case 1000: + cfg_reg |= MAC_CMDCFG_ETH_SPEED; + cfg_reg &= ~MAC_CMDCFG_ENA_10; + break; + case 100: + cfg_reg &= ~MAC_CMDCFG_ETH_SPEED; + cfg_reg &= ~MAC_CMDCFG_ENA_10; + break; + case 10: + cfg_reg &= ~MAC_CMDCFG_ETH_SPEED; + cfg_reg |= MAC_CMDCFG_ENA_10; + break; + default: + if (netif_msg_link(priv)) + netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n", + phydev->speed); + break; + } + priv->oldspeed = phydev->speed; + } + iowrite32(cfg_reg, &priv->mac_dev->command_config); + + if (!priv->oldlink) { + new_state = 1; + priv->oldlink = 1; + } + } else if (priv->oldlink) { + new_state = 1; + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + } + + if (new_state && netif_msg_link(priv)) + phy_print_status(phydev); + + spin_unlock(&priv->mac_cfg_lock); +} +static struct phy_device *connect_local_phy(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + struct phy_device *phydev = NULL; + char phy_id_fmt[MII_BUS_ID_SIZE + 3]; + + if (priv->phy_addr != POLL_PHY) { + snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, + priv->mdio->id, priv->phy_addr); + + netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt); + + phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link, + priv->phy_iface); + if (IS_ERR(phydev)) + netdev_err(dev, "Could not attach to PHY\n"); + + } else { + int ret; + phydev = phy_find_first(priv->mdio); + if (phydev == NULL) { + netdev_err(dev, "No PHY found\n"); + return phydev; + } + + ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link, + priv->phy_iface); + if (ret != 0) { + netdev_err(dev, "Could not attach to PHY\n"); + phydev = NULL; + } + } + return phydev; +} + +static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + struct device_node *np = priv->device->of_node; + int ret = 0; + + priv->phy_iface = of_get_phy_mode(np); + + /* Avoid get phy addr and create mdio if no phy is present */ + if (!priv->phy_iface) + return 0; + + /* try to get PHY address from device tree, use PHY autodetection if + * no valid address is given + */ + + if (of_property_read_u32(priv->device->of_node, "phy-addr", + &priv->phy_addr)) { + priv->phy_addr = POLL_PHY; + } + + if (!((priv->phy_addr == POLL_PHY) || + ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) { + netdev_err(dev, "invalid phy-addr specified %d\n", + priv->phy_addr); + return -ENODEV; + } + + /* Create/attach to MDIO bus */ + ret = altera_tse_mdio_create(dev, + atomic_add_return(1, &instance_count)); + + if (ret) + return -ENODEV; + + return 0; +} + +/* Initialize driver's PHY state, and attach to the PHY + */ +static int init_phy(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + struct phy_device *phydev; + struct device_node *phynode; + bool fixed_link = false; + int rc = 0; + + /* Avoid init phy in case of no phy present */ + if (!priv->phy_iface) + return 0; + + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + + phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0); + + if (!phynode) { + /* check if a fixed-link is defined in device-tree */ + if (of_phy_is_fixed_link(priv->device->of_node)) { + rc = of_phy_register_fixed_link(priv->device->of_node); + if (rc < 0) { + netdev_err(dev, "cannot register fixed PHY\n"); + return rc; + } + + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + phynode = of_node_get(priv->device->of_node); + fixed_link = true; + + netdev_dbg(dev, "fixed-link detected\n"); + phydev = of_phy_connect(dev, phynode, + &altera_tse_adjust_link, + 0, priv->phy_iface); + } else { + netdev_dbg(dev, "no phy-handle found\n"); + if (!priv->mdio) { + netdev_err(dev, "No phy-handle nor local mdio specified\n"); + return -ENODEV; + } + phydev = connect_local_phy(dev); + } + } else { + netdev_dbg(dev, "phy-handle found\n"); + phydev = of_phy_connect(dev, phynode, + &altera_tse_adjust_link, 0, priv->phy_iface); + } + + if (!phydev) { + netdev_err(dev, "Could not find the PHY\n"); + return -ENODEV; + } + + /* Stop Advertising 1000BASE Capability if interface is not GMII + * Note: Checkpatch throws CHECKs for the camel case defines below, + * it's ok to ignore. + */ + if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) || + (priv->phy_iface == PHY_INTERFACE_MODE_RMII)) + phydev->advertising &= ~(SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + + /* Broken HW is sometimes missing the pull-up resistor on the + * MDIO line, which results in reads to non-existent devices returning + * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent + * device as well. If a fixed-link is used the phy_id is always 0. + * Note: phydev->phy_id is the result of reading the UID PHY registers. + */ + if ((phydev->phy_id == 0) && !fixed_link) { + netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id); + phy_disconnect(phydev); + return -ENODEV; + } + + netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n", + phydev->addr, phydev->phy_id, phydev->link); + + priv->phydev = phydev; + return 0; +} + +static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) +{ + u32 msb; + u32 lsb; + + msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + lsb = ((addr[5] << 8) | addr[4]) & 0xffff; + + /* Set primary MAC address */ + csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0)); + csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1)); +} + +/* MAC software reset. + * When reset is triggered, the MAC function completes the current + * transmission or reception, and subsequently disables the transmit and + * receive logic, flushes the receive FIFO buffer, and resets the statistics + * counters. + */ +static int reset_mac(struct altera_tse_private *priv) +{ + int counter; + u32 dat; + + dat = csrrd32(priv->mac_dev, tse_csroffs(command_config)); + dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); + dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET; + csrwr32(dat, priv->mac_dev, tse_csroffs(command_config)); + + counter = 0; + while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { + if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config), + MAC_CMDCFG_SW_RESET)) + break; + udelay(1); + } + + if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { + dat = csrrd32(priv->mac_dev, tse_csroffs(command_config)); + dat &= ~MAC_CMDCFG_SW_RESET; + csrwr32(dat, priv->mac_dev, tse_csroffs(command_config)); + return -1; + } + return 0; +} + +/* Initialize MAC core registers +*/ +static int init_mac(struct altera_tse_private *priv) +{ + unsigned int cmd = 0; + u32 frm_length; + + /* Setup Rx FIFO */ + csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY, + priv->mac_dev, tse_csroffs(rx_section_empty)); + + csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev, + tse_csroffs(rx_section_full)); + + csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev, + tse_csroffs(rx_almost_empty)); + + csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev, + tse_csroffs(rx_almost_full)); + + /* Setup Tx FIFO */ + csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY, + priv->mac_dev, tse_csroffs(tx_section_empty)); + + csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev, + tse_csroffs(tx_section_full)); + + csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev, + tse_csroffs(tx_almost_empty)); + + csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev, + tse_csroffs(tx_almost_full)); + + /* MAC Address Configuration */ + tse_update_mac_addr(priv, priv->dev->dev_addr); + + /* MAC Function Configuration */ + frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN; + csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length)); + + csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev, + tse_csroffs(tx_ipg_length)); + + /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit + * start address + */ + tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat), + ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); + + tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat), + ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 | + ALTERA_TSE_TX_CMD_STAT_OMIT_CRC); + + /* Set the MAC options */ + cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config)); + cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */ + cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */ + cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames + * with CRC errors + */ + cmd |= MAC_CMDCFG_CNTL_FRM_ENA; + cmd &= ~MAC_CMDCFG_TX_ENA; + cmd &= ~MAC_CMDCFG_RX_ENA; + + /* Default speed and duplex setting, full/100 */ + cmd &= ~MAC_CMDCFG_HD_ENA; + cmd &= ~MAC_CMDCFG_ETH_SPEED; + cmd &= ~MAC_CMDCFG_ENA_10; + + csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config)); + + csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev, + tse_csroffs(pause_quanta)); + + if (netif_msg_hw(priv)) + dev_dbg(priv->device, + "MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd); + + return 0; +} + +/* Start/stop MAC transmission logic + */ +static void tse_set_mac(struct altera_tse_private *priv, bool enable) +{ + u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config)); + + if (enable) + value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA; + else + value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); + + csrwr32(value, priv->mac_dev, tse_csroffs(command_config)); +} + +/* Change the MTU + */ +static int tse_change_mtu(struct net_device *dev, int new_mtu) +{ + struct altera_tse_private *priv = netdev_priv(dev); + unsigned int max_mtu = priv->max_mtu; + unsigned int min_mtu = ETH_ZLEN + ETH_FCS_LEN; + + if (netif_running(dev)) { + netdev_err(dev, "must be stopped to change its MTU\n"); + return -EBUSY; + } + + if ((new_mtu < min_mtu) || (new_mtu > max_mtu)) { + netdev_err(dev, "invalid MTU, max MTU is: %u\n", max_mtu); + return -EINVAL; + } + + dev->mtu = new_mtu; + netdev_update_features(dev); + + return 0; +} + +static void altera_tse_set_mcfilter(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + int i; + struct netdev_hw_addr *ha; + + /* clear the hash filter */ + for (i = 0; i < 64; i++) + csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4); + + netdev_for_each_mc_addr(ha, dev) { + unsigned int hash = 0; + int mac_octet; + + for (mac_octet = 5; mac_octet >= 0; mac_octet--) { + unsigned char xor_bit = 0; + unsigned char octet = ha->addr[mac_octet]; + unsigned int bitshift; + + for (bitshift = 0; bitshift < 8; bitshift++) + xor_bit ^= ((octet >> bitshift) & 0x01); + + hash = (hash << 1) | xor_bit; + } + csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4); + } +} + + +static void altera_tse_set_mcfilterall(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + int i; + + /* set the hash filter */ + for (i = 0; i < 64; i++) + csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4); +} + +/* Set or clear the multicast filter for this adaptor + */ +static void tse_set_rx_mode_hashfilter(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + + spin_lock(&priv->mac_cfg_lock); + + if (dev->flags & IFF_PROMISC) + tse_set_bit(priv->mac_dev, tse_csroffs(command_config), + MAC_CMDCFG_PROMIS_EN); + + if (dev->flags & IFF_ALLMULTI) + altera_tse_set_mcfilterall(dev); + else + altera_tse_set_mcfilter(dev); + + spin_unlock(&priv->mac_cfg_lock); +} + +/* Set or clear the multicast filter for this adaptor + */ +static void tse_set_rx_mode(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + + spin_lock(&priv->mac_cfg_lock); + + if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) || + !netdev_mc_empty(dev) || !netdev_uc_empty(dev)) + tse_set_bit(priv->mac_dev, tse_csroffs(command_config), + MAC_CMDCFG_PROMIS_EN); + else + tse_clear_bit(priv->mac_dev, tse_csroffs(command_config), + MAC_CMDCFG_PROMIS_EN); + + spin_unlock(&priv->mac_cfg_lock); +} + +/* Open and initialize the interface + */ +static int tse_open(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + int ret = 0; + int i; + unsigned long int flags; + + /* Reset and configure TSE MAC and probe associated PHY */ + ret = priv->dmaops->init_dma(priv); + if (ret != 0) { + netdev_err(dev, "Cannot initialize DMA\n"); + goto phy_error; + } + + if (netif_msg_ifup(priv)) + netdev_warn(dev, "device MAC address %pM\n", + dev->dev_addr); + + if ((priv->revision < 0xd00) || (priv->revision > 0xe00)) + netdev_warn(dev, "TSE revision %x\n", priv->revision); + + spin_lock(&priv->mac_cfg_lock); + ret = reset_mac(priv); + /* Note that reset_mac will fail if the clocks are gated by the PHY + * due to the PHY being put into isolation or power down mode. + * This is not an error if reset fails due to no clock. + */ + if (ret) + netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret); + + ret = init_mac(priv); + spin_unlock(&priv->mac_cfg_lock); + if (ret) { + netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret); + goto alloc_skbuf_error; + } + + priv->dmaops->reset_dma(priv); + + /* Create and initialize the TX/RX descriptors chains. */ + priv->rx_ring_size = dma_rx_num; + priv->tx_ring_size = dma_tx_num; + ret = alloc_init_skbufs(priv); + if (ret) { + netdev_err(dev, "DMA descriptors initialization failed\n"); + goto alloc_skbuf_error; + } + + + /* Register RX interrupt */ + ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED, + dev->name, dev); + if (ret) { + netdev_err(dev, "Unable to register RX interrupt %d\n", + priv->rx_irq); + goto init_error; + } + + /* Register TX interrupt */ + ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED, + dev->name, dev); + if (ret) { + netdev_err(dev, "Unable to register TX interrupt %d\n", + priv->tx_irq); + goto tx_request_irq_error; + } + + /* Enable DMA interrupts */ + spin_lock_irqsave(&priv->rxdma_irq_lock, flags); + priv->dmaops->enable_rxirq(priv); + priv->dmaops->enable_txirq(priv); + + /* Setup RX descriptor chain */ + for (i = 0; i < priv->rx_ring_size; i++) + priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]); + + spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); + + if (priv->phydev) + phy_start(priv->phydev); + + napi_enable(&priv->napi); + netif_start_queue(dev); + + priv->dmaops->start_rxdma(priv); + + /* Start MAC Rx/Tx */ + spin_lock(&priv->mac_cfg_lock); + tse_set_mac(priv, true); + spin_unlock(&priv->mac_cfg_lock); + + return 0; + +tx_request_irq_error: + free_irq(priv->rx_irq, dev); +init_error: + free_skbufs(dev); +alloc_skbuf_error: +phy_error: + return ret; +} + +/* Stop TSE MAC interface and put the device in an inactive state + */ +static int tse_shutdown(struct net_device *dev) +{ + struct altera_tse_private *priv = netdev_priv(dev); + int ret; + unsigned long int flags; + + /* Stop the PHY */ + if (priv->phydev) + phy_stop(priv->phydev); + + netif_stop_queue(dev); + napi_disable(&priv->napi); + + /* Disable DMA interrupts */ + spin_lock_irqsave(&priv->rxdma_irq_lock, flags); + priv->dmaops->disable_rxirq(priv); + priv->dmaops->disable_txirq(priv); + spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); + + /* Free the IRQ lines */ + free_irq(priv->rx_irq, dev); + free_irq(priv->tx_irq, dev); + + /* disable and reset the MAC, empties fifo */ + spin_lock(&priv->mac_cfg_lock); + spin_lock(&priv->tx_lock); + + ret = reset_mac(priv); + /* Note that reset_mac will fail if the clocks are gated by the PHY + * due to the PHY being put into isolation or power down mode. + * This is not an error if reset fails due to no clock. + */ + if (ret) + netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret); + priv->dmaops->reset_dma(priv); + free_skbufs(dev); + + spin_unlock(&priv->tx_lock); + spin_unlock(&priv->mac_cfg_lock); + + priv->dmaops->uninit_dma(priv); + + return 0; +} + +static struct net_device_ops altera_tse_netdev_ops = { + .ndo_open = tse_open, + .ndo_stop = tse_shutdown, + .ndo_start_xmit = tse_start_xmit, + .ndo_set_mac_address = eth_mac_addr, + .ndo_set_rx_mode = tse_set_rx_mode, + .ndo_change_mtu = tse_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + +static int request_and_map(struct platform_device *pdev, const char *name, + struct resource **res, void __iomem **ptr) +{ + struct resource *region; + struct device *device = &pdev->dev; + + *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + if (*res == NULL) { + dev_err(device, "resource %s not defined\n", name); + return -ENODEV; + } + + region = devm_request_mem_region(device, (*res)->start, + resource_size(*res), dev_name(device)); + if (region == NULL) { + dev_err(device, "unable to request %s\n", name); + return -EBUSY; + } + + *ptr = devm_ioremap_nocache(device, region->start, + resource_size(region)); + if (*ptr == NULL) { + dev_err(device, "ioremap_nocache of %s failed!", name); + return -ENOMEM; + } + + return 0; +} + +/* Probe Altera TSE MAC device + */ +static int altera_tse_probe(struct platform_device *pdev) +{ + struct net_device *ndev; + int ret = -ENODEV; + struct resource *control_port; + struct resource *dma_res; + struct altera_tse_private *priv; + const unsigned char *macaddr; + void __iomem *descmap; + const struct of_device_id *of_id = NULL; + + ndev = alloc_etherdev(sizeof(struct altera_tse_private)); + if (!ndev) { + dev_err(&pdev->dev, "Could not allocate network device\n"); + return -ENODEV; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + + priv = netdev_priv(ndev); + priv->device = &pdev->dev; + priv->dev = ndev; + priv->msg_enable = netif_msg_init(debug, default_msg_level); + + of_id = of_match_device(altera_tse_ids, &pdev->dev); + + if (of_id) + priv->dmaops = (struct altera_dmaops *)of_id->data; + + + if (priv->dmaops && + priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) { + /* Get the mapped address to the SGDMA descriptor memory */ + ret = request_and_map(pdev, "s1", &dma_res, &descmap); + if (ret) + goto err_free_netdev; + + /* Start of that memory is for transmit descriptors */ + priv->tx_dma_desc = descmap; + + /* First half is for tx descriptors, other half for tx */ + priv->txdescmem = resource_size(dma_res)/2; + + priv->txdescmem_busaddr = (dma_addr_t)dma_res->start; + + priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap + + priv->txdescmem)); + priv->rxdescmem = resource_size(dma_res)/2; + priv->rxdescmem_busaddr = dma_res->start; + priv->rxdescmem_busaddr += priv->txdescmem; + + if (upper_32_bits(priv->rxdescmem_busaddr)) { + dev_dbg(priv->device, + "SGDMA bus addresses greater than 32-bits\n"); + goto err_free_netdev; + } + if (upper_32_bits(priv->txdescmem_busaddr)) { + dev_dbg(priv->device, + "SGDMA bus addresses greater than 32-bits\n"); + goto err_free_netdev; + } + } else if (priv->dmaops && + priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) { + ret = request_and_map(pdev, "rx_resp", &dma_res, + &priv->rx_dma_resp); + if (ret) + goto err_free_netdev; + + ret = request_and_map(pdev, "tx_desc", &dma_res, + &priv->tx_dma_desc); + if (ret) + goto err_free_netdev; + + priv->txdescmem = resource_size(dma_res); + priv->txdescmem_busaddr = dma_res->start; + + ret = request_and_map(pdev, "rx_desc", &dma_res, + &priv->rx_dma_desc); + if (ret) + goto err_free_netdev; + + priv->rxdescmem = resource_size(dma_res); + priv->rxdescmem_busaddr = dma_res->start; + + } else { + goto err_free_netdev; + } + + if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) + dma_set_coherent_mask(priv->device, + DMA_BIT_MASK(priv->dmaops->dmamask)); + else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) + dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32)); + else + goto err_free_netdev; + + /* MAC address space */ + ret = request_and_map(pdev, "control_port", &control_port, + (void __iomem **)&priv->mac_dev); + if (ret) + goto err_free_netdev; + + /* xSGDMA Rx Dispatcher address space */ + ret = request_and_map(pdev, "rx_csr", &dma_res, + &priv->rx_dma_csr); + if (ret) + goto err_free_netdev; + + + /* xSGDMA Tx Dispatcher address space */ + ret = request_and_map(pdev, "tx_csr", &dma_res, + &priv->tx_dma_csr); + if (ret) + goto err_free_netdev; + + + /* Rx IRQ */ + priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq"); + if (priv->rx_irq == -ENXIO) { + dev_err(&pdev->dev, "cannot obtain Rx IRQ\n"); + ret = -ENXIO; + goto err_free_netdev; + } + + /* Tx IRQ */ + priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq"); + if (priv->tx_irq == -ENXIO) { + dev_err(&pdev->dev, "cannot obtain Tx IRQ\n"); + ret = -ENXIO; + goto err_free_netdev; + } + + /* get FIFO depths from device tree */ + if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", + &priv->rx_fifo_depth)) { + dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n"); + ret = -ENXIO; + goto err_free_netdev; + } + + if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", + &priv->tx_fifo_depth)) { + dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); + ret = -ENXIO; + goto err_free_netdev; + } + + /* get hash filter settings for this instance */ + priv->hash_filter = + of_property_read_bool(pdev->dev.of_node, + "altr,has-hash-multicast-filter"); + + /* Set hash filter to not set for now until the + * multicast filter receive issue is debugged + */ + priv->hash_filter = 0; + + /* get supplemental address settings for this instance */ + priv->added_unicast = + of_property_read_bool(pdev->dev.of_node, + "altr,has-supplementary-unicast"); + + /* Max MTU is 1500, ETH_DATA_LEN */ + priv->max_mtu = ETH_DATA_LEN; + + /* Get the max mtu from the device tree. Note that the + * "max-frame-size" parameter is actually max mtu. Definition + * in the ePAPR v1.1 spec and usage differ, so go with usage. + */ + of_property_read_u32(pdev->dev.of_node, "max-frame-size", + &priv->max_mtu); + + /* The DMA buffer size already accounts for an alignment bias + * to avoid unaligned access exceptions for the NIOS processor, + */ + priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE; + + /* get default MAC address from device tree */ + macaddr = of_get_mac_address(pdev->dev.of_node); + if (macaddr) + ether_addr_copy(ndev->dev_addr, macaddr); + else + eth_hw_addr_random(ndev); + + /* get phy addr and create mdio */ + ret = altera_tse_phy_get_addr_mdio_create(ndev); + + if (ret) + goto err_free_netdev; + + /* initialize netdev */ + ndev->mem_start = control_port->start; + ndev->mem_end = control_port->end; + ndev->netdev_ops = &altera_tse_netdev_ops; + altera_tse_set_ethtool_ops(ndev); + + altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode; + + if (priv->hash_filter) + altera_tse_netdev_ops.ndo_set_rx_mode = + tse_set_rx_mode_hashfilter; + + /* Scatter/gather IO is not supported, + * so it is turned off + */ + ndev->hw_features &= ~NETIF_F_SG; + ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; + + /* VLAN offloading of tagging, stripping and filtering is not + * supported by hardware, but driver will accommodate the + * extra 4-byte VLAN tag for processing by upper layers + */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; + + /* setup NAPI interface */ + netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT); + + spin_lock_init(&priv->mac_cfg_lock); + spin_lock_init(&priv->tx_lock); + spin_lock_init(&priv->rxdma_irq_lock); + + ret = register_netdev(ndev); + if (ret) { + dev_err(&pdev->dev, "failed to register TSE net device\n"); + goto err_register_netdev; + } + + platform_set_drvdata(pdev, ndev); + + priv->revision = ioread32(&priv->mac_dev->megacore_revision); + + if (netif_msg_probe(priv)) + dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n", + (priv->revision >> 8) & 0xff, + priv->revision & 0xff, + (unsigned long) control_port->start, priv->rx_irq, + priv->tx_irq); + + ret = init_phy(ndev); + if (ret != 0) { + netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret); + goto err_init_phy; + } + return 0; + +err_init_phy: + unregister_netdev(ndev); +err_register_netdev: + netif_napi_del(&priv->napi); + altera_tse_mdio_destroy(ndev); +err_free_netdev: + free_netdev(ndev); + return ret; +} + +/* Remove Altera TSE MAC device + */ +static int altera_tse_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct altera_tse_private *priv = netdev_priv(ndev); + + if (priv->phydev) + phy_disconnect(priv->phydev); + + platform_set_drvdata(pdev, NULL); + altera_tse_mdio_destroy(ndev); + unregister_netdev(ndev); + free_netdev(ndev); + + return 0; +} + +static const struct altera_dmaops altera_dtype_sgdma = { + .altera_dtype = ALTERA_DTYPE_SGDMA, + .dmamask = 32, + .reset_dma = sgdma_reset, + .enable_txirq = sgdma_enable_txirq, + .enable_rxirq = sgdma_enable_rxirq, + .disable_txirq = sgdma_disable_txirq, + .disable_rxirq = sgdma_disable_rxirq, + .clear_txirq = sgdma_clear_txirq, + .clear_rxirq = sgdma_clear_rxirq, + .tx_buffer = sgdma_tx_buffer, + .tx_completions = sgdma_tx_completions, + .add_rx_desc = sgdma_add_rx_desc, + .get_rx_status = sgdma_rx_status, + .init_dma = sgdma_initialize, + .uninit_dma = sgdma_uninitialize, + .start_rxdma = sgdma_start_rxdma, +}; + +static const struct altera_dmaops altera_dtype_msgdma = { + .altera_dtype = ALTERA_DTYPE_MSGDMA, + .dmamask = 64, + .reset_dma = msgdma_reset, + .enable_txirq = msgdma_enable_txirq, + .enable_rxirq = msgdma_enable_rxirq, + .disable_txirq = msgdma_disable_txirq, + .disable_rxirq = msgdma_disable_rxirq, + .clear_txirq = msgdma_clear_txirq, + .clear_rxirq = msgdma_clear_rxirq, + .tx_buffer = msgdma_tx_buffer, + .tx_completions = msgdma_tx_completions, + .add_rx_desc = msgdma_add_rx_desc, + .get_rx_status = msgdma_rx_status, + .init_dma = msgdma_initialize, + .uninit_dma = msgdma_uninitialize, + .start_rxdma = msgdma_start_rxdma, +}; + +static const struct of_device_id altera_tse_ids[] = { + { .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, }, + { .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, }, + { .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, }, + {}, +}; +MODULE_DEVICE_TABLE(of, altera_tse_ids); + +static struct platform_driver altera_tse_driver = { + .probe = altera_tse_probe, + .remove = altera_tse_remove, + .suspend = NULL, + .resume = NULL, + .driver = { + .name = ALTERA_TSE_RESOURCE_NAME, + .of_match_table = altera_tse_ids, + }, +}; + +module_platform_driver(altera_tse_driver); + +MODULE_AUTHOR("Altera Corporation"); +MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c new file mode 100644 index 000000000..d7eeb1713 --- /dev/null +++ b/drivers/net/ethernet/altera/altera_utils.c @@ -0,0 +1,44 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "altera_tse.h" +#include "altera_utils.h" + +void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask) +{ + u32 value = csrrd32(ioaddr, offs); + value |= bit_mask; + csrwr32(value, ioaddr, offs); +} + +void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask) +{ + u32 value = csrrd32(ioaddr, offs); + value &= ~bit_mask; + csrwr32(value, ioaddr, offs); +} + +int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask) +{ + u32 value = csrrd32(ioaddr, offs); + return (value & bit_mask) ? 1 : 0; +} + +int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask) +{ + u32 value = csrrd32(ioaddr, offs); + return (value & bit_mask) ? 0 : 1; +} diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h new file mode 100644 index 000000000..baf100ccf --- /dev/null +++ b/drivers/net/ethernet/altera/altera_utils.h @@ -0,0 +1,27 @@ +/* Altera TSE SGDMA and MSGDMA Linux driver + * Copyright (C) 2014 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include + +#ifndef __ALTERA_UTILS_H__ +#define __ALTERA_UTILS_H__ + +void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask); +void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask); +int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask); +int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask); + +#endif /* __ALTERA_UTILS_H__*/ diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c new file mode 100644 index 000000000..98a10d555 --- /dev/null +++ b/drivers/net/ethernet/amd/7990.c @@ -0,0 +1,666 @@ +/* + * 7990.c -- LANCE ethernet IC generic routines. + * This is an attempt to separate out the bits of various ethernet + * drivers that are common because they all use the AMD 7990 LANCE + * (Local Area Network Controller for Ethernet) chip. + * + * Copyright (C) 05/1998 Peter Maydell + * + * Most of this stuff was obtained by looking at other LANCE drivers, + * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful. + * NB: this was made easy by the fact that Jes Sorensen had cleaned up + * most of a2025 and sunlance with the aim of merging them, so the + * common code was pretty obvious. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/* Used for the temporal inet entries and routing */ +#include +#include + +#include +#include +#include +#ifdef CONFIG_HP300 +#include +#endif + +#include "7990.h" + +#define WRITERAP(lp, x) out_be16(lp->base + LANCE_RAP, (x)) +#define WRITERDP(lp, x) out_be16(lp->base + LANCE_RDP, (x)) +#define READRDP(lp) in_be16(lp->base + LANCE_RDP) + +#if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE) +#include "hplance.h" + +#undef WRITERAP +#undef WRITERDP +#undef READRDP + +#if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE) + +/* Lossage Factor Nine, Mr Sulu. */ +#define WRITERAP(lp, x) (lp->writerap(lp, x)) +#define WRITERDP(lp, x) (lp->writerdp(lp, x)) +#define READRDP(lp) (lp->readrdp(lp)) + +#else + +/* These inlines can be used if only CONFIG_HPLANCE is defined */ +static inline void WRITERAP(struct lance_private *lp, __u16 value) +{ + do { + out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value); + } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); +} + +static inline void WRITERDP(struct lance_private *lp, __u16 value) +{ + do { + out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value); + } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); +} + +static inline __u16 READRDP(struct lance_private *lp) +{ + __u16 value; + do { + value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP); + } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); + return value; +} + +#endif +#endif /* CONFIG_HPLANCE || CONFIG_HPLANCE_MODULE */ + +/* debugging output macros, various flavours */ +/* #define TEST_HITS */ +#ifdef UNDEF +#define PRINT_RINGS() \ +do { \ + int t; \ + for (t = 0; t < RX_RING_SIZE; t++) { \ + printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n", \ + t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0, \ + ib->brx_ring[t].length, \ + ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits); \ + } \ + for (t = 0; t < TX_RING_SIZE; t++) { \ + printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n", \ + t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0, \ + ib->btx_ring[t].length, \ + ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits); \ + } \ +} while (0) +#else +#define PRINT_RINGS() +#endif + +/* Load the CSR registers. The LANCE has to be STOPped when we do this! */ +static void load_csrs(struct lance_private *lp) +{ + volatile struct lance_init_block *aib = lp->lance_init_block; + int leptr; + + leptr = LANCE_ADDR(aib); + + WRITERAP(lp, LE_CSR1); /* load address of init block */ + WRITERDP(lp, leptr & 0xFFFF); + WRITERAP(lp, LE_CSR2); + WRITERDP(lp, leptr >> 16); + WRITERAP(lp, LE_CSR3); + WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */ + + /* Point back to csr0 */ + WRITERAP(lp, LE_CSR0); +} + +/* #define to 0 or 1 appropriately */ +#define DEBUG_IRING 0 +/* Set up the Lance Rx and Tx rings and the init block */ +static void lance_init_ring(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */ + int leptr; + int i; + + aib = lp->lance_init_block; + + lp->rx_new = lp->tx_new = 0; + lp->rx_old = lp->tx_old = 0; + + ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */ + + /* Copy the ethernet address to the lance init block + * Notice that we do a byteswap if we're big endian. + * [I think this is the right criterion; at least, sunlance, + * a2065 and atarilance do the byteswap and lance.c (PC) doesn't. + * However, the datasheet says that the BSWAP bit doesn't affect + * the init block, so surely it should be low byte first for + * everybody? Um.] + * We could define the ib->physaddr as three 16bit values and + * use (addr[1] << 8) | addr[0] & co, but this is more efficient. + */ +#ifdef __BIG_ENDIAN + ib->phys_addr[0] = dev->dev_addr[1]; + ib->phys_addr[1] = dev->dev_addr[0]; + ib->phys_addr[2] = dev->dev_addr[3]; + ib->phys_addr[3] = dev->dev_addr[2]; + ib->phys_addr[4] = dev->dev_addr[5]; + ib->phys_addr[5] = dev->dev_addr[4]; +#else + for (i = 0; i < 6; i++) + ib->phys_addr[i] = dev->dev_addr[i]; +#endif + + if (DEBUG_IRING) + printk("TX rings:\n"); + + lp->tx_full = 0; + /* Setup the Tx ring entries */ + for (i = 0; i < (1 << lp->lance_log_tx_bufs); i++) { + leptr = LANCE_ADDR(&aib->tx_buf[i][0]); + ib->btx_ring[i].tmd0 = leptr; + ib->btx_ring[i].tmd1_hadr = leptr >> 16; + ib->btx_ring[i].tmd1_bits = 0; + ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */ + ib->btx_ring[i].misc = 0; + if (DEBUG_IRING) + printk("%d: 0x%8.8x\n", i, leptr); + } + + /* Setup the Rx ring entries */ + if (DEBUG_IRING) + printk("RX rings:\n"); + for (i = 0; i < (1 << lp->lance_log_rx_bufs); i++) { + leptr = LANCE_ADDR(&aib->rx_buf[i][0]); + + ib->brx_ring[i].rmd0 = leptr; + ib->brx_ring[i].rmd1_hadr = leptr >> 16; + ib->brx_ring[i].rmd1_bits = LE_R1_OWN; + /* 0xf000 == bits that must be one (reserved, presumably) */ + ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000; + ib->brx_ring[i].mblength = 0; + if (DEBUG_IRING) + printk("%d: 0x%8.8x\n", i, leptr); + } + + /* Setup the initialization block */ + + /* Setup rx descriptor pointer */ + leptr = LANCE_ADDR(&aib->brx_ring); + ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16); + ib->rx_ptr = leptr; + if (DEBUG_IRING) + printk("RX ptr: %8.8x\n", leptr); + + /* Setup tx descriptor pointer */ + leptr = LANCE_ADDR(&aib->btx_ring); + ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16); + ib->tx_ptr = leptr; + if (DEBUG_IRING) + printk("TX ptr: %8.8x\n", leptr); + + /* Clear the multicast filter */ + ib->filter[0] = 0; + ib->filter[1] = 0; + PRINT_RINGS(); +} + +/* LANCE must be STOPped before we do this, too... */ +static int init_restart_lance(struct lance_private *lp) +{ + int i; + + WRITERAP(lp, LE_CSR0); + WRITERDP(lp, LE_C0_INIT); + + /* Need a hook here for sunlance ledma stuff */ + + /* Wait for the lance to complete initialization */ + for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++) + barrier(); + if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) { + printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp)); + return -1; + } + + /* Clear IDON by writing a "1", enable interrupts and start lance */ + WRITERDP(lp, LE_C0_IDON); + WRITERDP(lp, LE_C0_INEA | LE_C0_STRT); + + return 0; +} + +static int lance_reset(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + int status; + + /* Stop the lance */ + WRITERAP(lp, LE_CSR0); + WRITERDP(lp, LE_C0_STOP); + + load_csrs(lp); + lance_init_ring(dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + status = init_restart_lance(lp); +#ifdef DEBUG_DRIVER + printk("Lance restart=%d\n", status); +#endif + return status; +} + +static int lance_rx(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + volatile struct lance_rx_desc *rd; + unsigned char bits; +#ifdef TEST_HITS + int i; +#endif + +#ifdef TEST_HITS + printk("["); + for (i = 0; i < RX_RING_SIZE; i++) { + if (i == lp->rx_new) + printk("%s", + ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "_" : "X"); + else + printk("%s", + ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "." : "1"); + } + printk("]"); +#endif +#ifdef CONFIG_HP300 + blinken_leds(0x40, 0); +#endif + WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */ + for (rd = &ib->brx_ring[lp->rx_new]; /* For each Rx ring we own... */ + !((bits = rd->rmd1_bits) & LE_R1_OWN); + rd = &ib->brx_ring[lp->rx_new]) { + + /* We got an incomplete frame? */ + if ((bits & LE_R1_POK) != LE_R1_POK) { + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; + continue; + } else if (bits & LE_R1_ERR) { + /* Count only the end frame as a rx error, + * not the beginning + */ + if (bits & LE_R1_BUF) + dev->stats.rx_fifo_errors++; + if (bits & LE_R1_CRC) + dev->stats.rx_crc_errors++; + if (bits & LE_R1_OFL) + dev->stats.rx_over_errors++; + if (bits & LE_R1_FRA) + dev->stats.rx_frame_errors++; + if (bits & LE_R1_EOP) + dev->stats.rx_errors++; + } else { + int len = (rd->mblength & 0xfff) - 4; + struct sk_buff *skb = netdev_alloc_skb(dev, len + 2); + + if (!skb) { + dev->stats.rx_dropped++; + rd->mblength = 0; + rd->rmd1_bits = LE_R1_OWN; + lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; + return 0; + } + + skb_reserve(skb, 2); /* 16 byte align */ + skb_put(skb, len); /* make room */ + skb_copy_to_linear_data(skb, + (unsigned char *)&(ib->rx_buf[lp->rx_new][0]), + len); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + } + + /* Return the packet to the pool */ + rd->mblength = 0; + rd->rmd1_bits = LE_R1_OWN; + lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; + } + return 0; +} + +static int lance_tx(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + volatile struct lance_tx_desc *td; + int i, j; + int status; + +#ifdef CONFIG_HP300 + blinken_leds(0x80, 0); +#endif + /* csr0 is 2f3 */ + WRITERDP(lp, LE_C0_TINT | LE_C0_INEA); + /* csr0 is 73 */ + + j = lp->tx_old; + for (i = j; i != lp->tx_new; i = j) { + td = &ib->btx_ring[i]; + + /* If we hit a packet not owned by us, stop */ + if (td->tmd1_bits & LE_T1_OWN) + break; + + if (td->tmd1_bits & LE_T1_ERR) { + status = td->misc; + + dev->stats.tx_errors++; + if (status & LE_T3_RTY) + dev->stats.tx_aborted_errors++; + if (status & LE_T3_LCOL) + dev->stats.tx_window_errors++; + + if (status & LE_T3_CLOS) { + dev->stats.tx_carrier_errors++; + if (lp->auto_select) { + lp->tpe = 1 - lp->tpe; + printk("%s: Carrier Lost, trying %s\n", + dev->name, + lp->tpe ? "TPE" : "AUI"); + /* Stop the lance */ + WRITERAP(lp, LE_CSR0); + WRITERDP(lp, LE_C0_STOP); + lance_init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + return 0; + } + } + + /* buffer errors and underflows turn off the transmitter */ + /* Restart the adapter */ + if (status & (LE_T3_BUF|LE_T3_UFL)) { + dev->stats.tx_fifo_errors++; + + printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n", + dev->name); + /* Stop the lance */ + WRITERAP(lp, LE_CSR0); + WRITERDP(lp, LE_C0_STOP); + lance_init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + return 0; + } + } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) { + /* + * So we don't count the packet more than once. + */ + td->tmd1_bits &= ~(LE_T1_POK); + + /* One collision before packet was sent. */ + if (td->tmd1_bits & LE_T1_EONE) + dev->stats.collisions++; + + /* More than one collision, be optimistic. */ + if (td->tmd1_bits & LE_T1_EMORE) + dev->stats.collisions += 2; + + dev->stats.tx_packets++; + } + + j = (j + 1) & lp->tx_ring_mod_mask; + } + lp->tx_old = j; + WRITERDP(lp, LE_C0_TINT | LE_C0_INEA); + return 0; +} + +static irqreturn_t +lance_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct lance_private *lp = netdev_priv(dev); + int csr0; + + spin_lock(&lp->devlock); + + WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */ + csr0 = READRDP(lp); + + PRINT_RINGS(); + + if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */ + spin_unlock(&lp->devlock); + return IRQ_NONE; /* been generated by the Lance. */ + } + + /* Acknowledge all the interrupt sources ASAP */ + WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT)); + + if ((csr0 & LE_C0_ERR)) { + /* Clear the error condition */ + WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA); + } + + if (csr0 & LE_C0_RINT) + lance_rx(dev); + + if (csr0 & LE_C0_TINT) + lance_tx(dev); + + /* Log misc errors. */ + if (csr0 & LE_C0_BABL) + dev->stats.tx_errors++; /* Tx babble. */ + if (csr0 & LE_C0_MISS) + dev->stats.rx_errors++; /* Missed a Rx frame. */ + if (csr0 & LE_C0_MERR) { + printk("%s: Bus master arbitration failure, status %4.4x.\n", + dev->name, csr0); + /* Restart the chip. */ + WRITERDP(lp, LE_C0_STRT); + } + + if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) { + lp->tx_full = 0; + netif_wake_queue(dev); + } + + WRITERAP(lp, LE_CSR0); + WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA); + + spin_unlock(&lp->devlock); + return IRQ_HANDLED; +} + +int lance_open(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + int res; + + /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */ + if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev)) + return -EAGAIN; + + res = lance_reset(dev); + spin_lock_init(&lp->devlock); + netif_start_queue(dev); + + return res; +} +EXPORT_SYMBOL_GPL(lance_open); + +int lance_close(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + + netif_stop_queue(dev); + + /* Stop the LANCE */ + WRITERAP(lp, LE_CSR0); + WRITERDP(lp, LE_C0_STOP); + + free_irq(lp->irq, dev); + + return 0; +} +EXPORT_SYMBOL_GPL(lance_close); + +void lance_tx_timeout(struct net_device *dev) +{ + printk("lance_tx_timeout\n"); + lance_reset(dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); +} +EXPORT_SYMBOL_GPL(lance_tx_timeout); + +int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + int entry, skblen, len; + static int outs; + unsigned long flags; + + if (!TX_BUFFS_AVAIL) + return NETDEV_TX_LOCKED; + + netif_stop_queue(dev); + + skblen = skb->len; + +#ifdef DEBUG_DRIVER + /* dump the packet */ + { + int i; + + for (i = 0; i < 64; i++) { + if ((i % 16) == 0) + printk("\n"); + printk("%2.2x ", skb->data[i]); + } + } +#endif + len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; + entry = lp->tx_new & lp->tx_ring_mod_mask; + ib->btx_ring[entry].length = (-len) | 0xf000; + ib->btx_ring[entry].misc = 0; + + if (skb->len < ETH_ZLEN) + memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN); + skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen); + + /* Now, give the packet to the lance */ + ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); + lp->tx_new = (lp->tx_new + 1) & lp->tx_ring_mod_mask; + + outs++; + /* Kick the lance: transmit now */ + WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD); + dev_consume_skb_any(skb); + + spin_lock_irqsave(&lp->devlock, flags); + if (TX_BUFFS_AVAIL) + netif_start_queue(dev); + else + lp->tx_full = 1; + spin_unlock_irqrestore(&lp->devlock, flags); + + return NETDEV_TX_OK; +} +EXPORT_SYMBOL_GPL(lance_start_xmit); + +/* taken from the depca driver via a2065.c */ +static void lance_load_multicast(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + volatile u16 *mcast_table = (u16 *)&ib->filter; + struct netdev_hw_addr *ha; + u32 crc; + + /* set all multicast bits */ + if (dev->flags & IFF_ALLMULTI) { + ib->filter[0] = 0xffffffff; + ib->filter[1] = 0xffffffff; + return; + } + /* clear the multicast filter */ + ib->filter[0] = 0; + ib->filter[1] = 0; + + /* Add addresses */ + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr); + crc = crc >> 26; + mcast_table[crc >> 4] |= 1 << (crc & 0xf); + } +} + + +void lance_set_multicast(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + int stopped; + + stopped = netif_queue_stopped(dev); + if (!stopped) + netif_stop_queue(dev); + + while (lp->tx_old != lp->tx_new) + schedule(); + + WRITERAP(lp, LE_CSR0); + WRITERDP(lp, LE_C0_STOP); + lance_init_ring(dev); + + if (dev->flags & IFF_PROMISC) { + ib->mode |= LE_MO_PROM; + } else { + ib->mode &= ~LE_MO_PROM; + lance_load_multicast(dev); + } + load_csrs(lp); + init_restart_lance(lp); + + if (!stopped) + netif_start_queue(dev); +} +EXPORT_SYMBOL_GPL(lance_set_multicast); + +#ifdef CONFIG_NET_POLL_CONTROLLER +void lance_poll(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + + spin_lock(&lp->devlock); + WRITERAP(lp, LE_CSR0); + WRITERDP(lp, LE_C0_STRT); + spin_unlock(&lp->devlock); + lance_interrupt(dev->irq, dev); +} +#endif + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h new file mode 100644 index 000000000..e9e0be313 --- /dev/null +++ b/drivers/net/ethernet/amd/7990.h @@ -0,0 +1,250 @@ +/* + * 7990.h -- LANCE ethernet IC generic routines. + * This is an attempt to separate out the bits of various ethernet + * drivers that are common because they all use the AMD 7990 LANCE + * (Local Area Network Controller for Ethernet) chip. + * + * Copyright (C) 05/1998 Peter Maydell + * + * Most of this stuff was obtained by looking at other LANCE drivers, + * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful. + */ + +#ifndef _7990_H +#define _7990_H + +/* The lance only has two register locations. We communicate mostly via memory. */ +#define LANCE_RDP 0 /* Register Data Port */ +#define LANCE_RAP 2 /* Register Address Port */ + +/* Transmit/receive ring definitions. + * We allow the specific drivers to override these defaults if they want to. + * NB: according to lance.c, increasing the number of buffers is a waste + * of space and reduces the chance that an upper layer will be able to + * reorder queued Tx packets based on priority. [Clearly there is a minimum + * limit too: too small and we drop rx packets and can't tx at full speed.] + * 4+4 seems to be the usual setting; the atarilance driver uses 3 and 5. + */ + +/* Blast! This won't work. The problem is that we can't specify a default + * setting because that would cause the lance_init_block struct to be + * too long (and overflow the RAM on shared-memory cards like the HP LANCE. + */ +#ifndef LANCE_LOG_TX_BUFFERS +#define LANCE_LOG_TX_BUFFERS 1 +#define LANCE_LOG_RX_BUFFERS 3 +#endif + +#define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS) +#define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS) +#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) +#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) +#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29) +#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29) +#define PKT_BUFF_SIZE (1544) +#define RX_BUFF_SIZE PKT_BUFF_SIZE +#define TX_BUFF_SIZE PKT_BUFF_SIZE + +/* Each receive buffer is described by a receive message descriptor (RMD) */ +struct lance_rx_desc { + volatile unsigned short rmd0; /* low address of packet */ + volatile unsigned char rmd1_bits; /* descriptor bits */ + volatile unsigned char rmd1_hadr; /* high address of packet */ + volatile short length; /* This length is 2s complement (negative)! + * Buffer length */ + volatile unsigned short mblength; /* Actual number of bytes received */ +}; + +/* Ditto for TMD: */ +struct lance_tx_desc { + volatile unsigned short tmd0; /* low address of packet */ + volatile unsigned char tmd1_bits; /* descriptor bits */ + volatile unsigned char tmd1_hadr; /* high address of packet */ + volatile short length; /* Length is 2s complement (negative)! */ + volatile unsigned short misc; +}; + +/* There are three memory structures accessed by the LANCE: + * the initialization block, the receive and transmit descriptor rings, + * and the data buffers themselves. In fact we might as well put the + * init block,the Tx and Rx rings and the buffers together in memory: + */ +struct lance_init_block { + volatile unsigned short mode; /* Pre-set mode (reg. 15) */ + volatile unsigned char phys_addr[6]; /* Physical ethernet address */ + volatile unsigned filter[2]; /* Multicast filter (64 bits) */ + + /* Receive and transmit ring base, along with extra bits. */ + volatile unsigned short rx_ptr; /* receive descriptor addr */ + volatile unsigned short rx_len; /* receive len and high addr */ + volatile unsigned short tx_ptr; /* transmit descriptor addr */ + volatile unsigned short tx_len; /* transmit len and high addr */ + + /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. + * This will be true if this whole struct is 8-byte aligned. + */ + volatile struct lance_tx_desc btx_ring[TX_RING_SIZE]; + volatile struct lance_rx_desc brx_ring[RX_RING_SIZE]; + + volatile char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE]; + volatile char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE]; + /* we use this just to make the struct big enough that we can move its startaddr + * in order to force alignment to an eight byte boundary. + */ +}; + +/* This is where we keep all the stuff the driver needs to know about. + * I'm definitely unhappy about the mechanism for allowing specific + * drivers to add things... + */ +struct lance_private { + const char *name; + unsigned long base; + volatile struct lance_init_block *init_block; /* CPU address of RAM */ + volatile struct lance_init_block *lance_init_block; /* LANCE address of RAM */ + + int rx_new, tx_new; + int rx_old, tx_old; + + int lance_log_rx_bufs, lance_log_tx_bufs; + int rx_ring_mod_mask, tx_ring_mod_mask; + + int tpe; /* TPE is selected */ + int auto_select; /* cable-selection is by carrier */ + unsigned short busmaster_regval; + + unsigned int irq; /* IRQ to register */ + + /* This is because the HP LANCE is disgusting and you have to check + * a DIO-specific register every time you read/write the LANCE regs :-< + * [could we get away with making these some sort of macro?] + */ + void (*writerap)(void *, unsigned short); + void (*writerdp)(void *, unsigned short); + unsigned short (*readrdp)(void *); + spinlock_t devlock; + char tx_full; +}; + +/* + * Am7990 Control and Status Registers + */ +#define LE_CSR0 0x0000 /* LANCE Controller Status */ +#define LE_CSR1 0x0001 /* IADR[15:0] (bit0==0 ie word aligned) */ +#define LE_CSR2 0x0002 /* IADR[23:16] (high bits reserved) */ +#define LE_CSR3 0x0003 /* Misc */ + +/* + * Bit definitions for CSR0 (LANCE Controller Status) + */ +#define LE_C0_ERR 0x8000 /* Error = BABL | CERR | MISS | MERR */ +#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */ +#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */ +#define LE_C0_MISS 0x1000 /* Missed Frame (no rx buffer to put it in) */ +#define LE_C0_MERR 0x0800 /* Memory Error */ +#define LE_C0_RINT 0x0400 /* Receive Interrupt */ +#define LE_C0_TINT 0x0200 /* Transmit Interrupt */ +#define LE_C0_IDON 0x0100 /* Initialization Done */ +#define LE_C0_INTR 0x0080 /* Interrupt Flag + = BABL | MISS | MERR | RINT | TINT | IDON */ +#define LE_C0_INEA 0x0040 /* Interrupt Enable */ +#define LE_C0_RXON 0x0020 /* Receive On */ +#define LE_C0_TXON 0x0010 /* Transmit On */ +#define LE_C0_TDMD 0x0008 /* Transmit Demand */ +#define LE_C0_STOP 0x0004 /* Stop */ +#define LE_C0_STRT 0x0002 /* Start */ +#define LE_C0_INIT 0x0001 /* Initialize */ + + +/* + * Bit definitions for CSR3 + */ +#define LE_C3_BSWP 0x0004 /* Byte Swap (on for big endian byte order) */ +#define LE_C3_ACON 0x0002 /* ALE Control (on for active low ALE) */ +#define LE_C3_BCON 0x0001 /* Byte Control */ + + +/* + * Mode Flags + */ +#define LE_MO_PROM 0x8000 /* Promiscuous Mode */ +/* these next ones 0x4000 -- 0x0080 are not available on the LANCE 7990, + * but they are in NetBSD's am7990.h, presumably for backwards-compatible chips + */ +#define LE_MO_DRCVBC 0x4000 /* disable receive broadcast */ +#define LE_MO_DRCVPA 0x2000 /* disable physical address detection */ +#define LE_MO_DLNKTST 0x1000 /* disable link status */ +#define LE_MO_DAPC 0x0800 /* disable automatic polarity correction */ +#define LE_MO_MENDECL 0x0400 /* MENDEC loopback mode */ +#define LE_MO_LRTTSEL 0x0200 /* lower RX threshold / TX mode selection */ +#define LE_MO_PSEL1 0x0100 /* port selection bit1 */ +#define LE_MO_PSEL0 0x0080 /* port selection bit0 */ +/* and this one is from the C-LANCE data sheet... */ +#define LE_MO_EMBA 0x0080 /* Enable Modified Backoff Algorithm + (C-LANCE, not original LANCE) */ +#define LE_MO_INTL 0x0040 /* Internal Loopback */ +#define LE_MO_DRTY 0x0020 /* Disable Retry */ +#define LE_MO_FCOLL 0x0010 /* Force Collision */ +#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */ +#define LE_MO_LOOP 0x0004 /* Loopback Enable */ +#define LE_MO_DTX 0x0002 /* Disable Transmitter */ +#define LE_MO_DRX 0x0001 /* Disable Receiver */ + + +/* + * Receive Flags + */ +#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */ +#define LE_R1_ERR 0x40 /* Error */ +#define LE_R1_FRA 0x20 /* Framing Error */ +#define LE_R1_OFL 0x10 /* Overflow Error */ +#define LE_R1_CRC 0x08 /* CRC Error */ +#define LE_R1_BUF 0x04 /* Buffer Error */ +#define LE_R1_SOP 0x02 /* Start of Packet */ +#define LE_R1_EOP 0x01 /* End of Packet */ +#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */ + + +/* + * Transmit Flags + */ +#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */ +#define LE_T1_ERR 0x40 /* Error */ +#define LE_T1_RES 0x20 /* Reserved, LANCE writes this with a zero */ +#define LE_T1_EMORE 0x10 /* More than one retry needed */ +#define LE_T1_EONE 0x08 /* One retry needed */ +#define LE_T1_EDEF 0x04 /* Deferred */ +#define LE_T1_SOP 0x02 /* Start of Packet */ +#define LE_T1_EOP 0x01 /* End of Packet */ +#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */ + +/* + * Error Flags + */ +#define LE_T3_BUF 0x8000 /* Buffer Error */ +#define LE_T3_UFL 0x4000 /* Underflow Error */ +#define LE_T3_LCOL 0x1000 /* Late Collision */ +#define LE_T3_CLOS 0x0800 /* Loss of Carrier */ +#define LE_T3_RTY 0x0400 /* Retry Error */ +#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */ + +/* Miscellaneous useful macros */ + +#define TX_BUFFS_AVAIL ((lp->tx_old <= lp->tx_new) ? \ + lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new : \ + lp->tx_old - lp->tx_new - 1) + +/* The LANCE only uses 24 bit addresses. This does the obvious thing. */ +#define LANCE_ADDR(x) ((int)(x) & ~0xff000000) + +/* Now the prototypes we export */ +int lance_open(struct net_device *dev); +int lance_close(struct net_device *dev); +int lance_start_xmit(struct sk_buff *skb, struct net_device *dev); +void lance_set_multicast(struct net_device *dev); +void lance_tx_timeout(struct net_device *dev); +#ifdef CONFIG_NET_POLL_CONTROLLER +void lance_poll(struct net_device *dev); +#endif + +#endif /* ndef _7990_H */ diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig new file mode 100644 index 000000000..426916036 --- /dev/null +++ b/drivers/net/ethernet/amd/Kconfig @@ -0,0 +1,206 @@ +# +# AMD network device configuration +# + +config NET_VENDOR_AMD + bool "AMD devices" + default y + depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \ + SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \ + (ARM && ARCH_EBSA110) || ISA || EISA || PCMCIA || ARM64 + ---help--- + If you have a network (Ethernet) chipset belonging to this class, + say Y. + + Note that the answer to this question does not directly affect + the kernel: saying N will just case the configurator to skip all + the questions regarding AMD chipsets. If you say Y, you will be asked + for your specific chipset/driver in the following questions. + +if NET_VENDOR_AMD + +config A2065 + tristate "A2065 support" + depends on ZORRO + select CRC32 + ---help--- + If you have a Commodore A2065 Ethernet adapter, say Y. Otherwise, + say N. + + To compile this driver as a module, choose M here: the module + will be called a2065. + +config AMD8111_ETH + tristate "AMD 8111 (new PCI LANCE) support" + depends on PCI + select CRC32 + select MII + ---help--- + If you have an AMD 8111-based PCI LANCE ethernet card, + answer Y here and read the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called amd8111e. + +config LANCE + tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" + depends on ISA && ISA_DMA_API && !ARM + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . Some LinkSys cards are + of this type. + + To compile this driver as a module, choose M here: the module + will be called lance. This is recommended. + +config PCNET32 + tristate "AMD PCnet32 PCI support" + depends on PCI + select CRC32 + select MII + ---help--- + If you have a PCnet32 or PCnetPCI based network (Ethernet) card, + answer Y here and read the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called pcnet32. + +config ARIADNE + tristate "Ariadne support" + depends on ZORRO + ---help--- + If you have a Village Tronic Ariadne Ethernet adapter, say Y. + Otherwise, say N. + + To compile this driver as a module, choose M here: the module + will be called ariadne. + +config ARM_AM79C961A + bool "ARM EBSA110 AM79C961A support" + depends on ARM && ARCH_EBSA110 + select CRC32 + ---help--- + If you wish to compile a kernel for the EBSA-110, then you should + always answer Y to this. + +config ATARILANCE + tristate "Atari LANCE support" + depends on ATARI + ---help--- + Say Y to include support for several Atari Ethernet adapters based + on the AMD LANCE chipset: RieblCard (with or without battery), or + PAMCard VME (also the version by Rhotron, with different addresses). + +config DECLANCE + tristate "DEC LANCE ethernet controller support" + depends on MACH_DECSTATION + select CRC32 + ---help--- + This driver is for the series of Ethernet controllers produced by + DEC (now Compaq) based on the AMD LANCE chipset, including the + DEPCA series. (This chipset is better known via the NE2100 cards.) + +config HPLANCE + bool "HP on-board LANCE support" + depends on DIO + select CRC32 + ---help--- + If you want to use the builtin "LANCE" Ethernet controller on an + HP300 machine, say Y here. + +config MIPS_AU1X00_ENET + tristate "MIPS AU1000 Ethernet support" + depends on MIPS_ALCHEMY + select PHYLIB + select CRC32 + ---help--- + If you have an Alchemy Semi AU1X00 based system + say Y. Otherwise, say N. + +config MVME147_NET + tristate "MVME147 (LANCE) Ethernet support" + depends on MVME147 + select CRC32 + ---help--- + Support for the on-board Ethernet interface on the Motorola MVME147 + single-board computer. Say Y here to include the + driver for this chip in your kernel. + To compile this driver as a module, choose M here. + +config PCMCIA_NMCLAN + tristate "New Media PCMCIA support" + depends on PCMCIA + help + Say Y here if you intend to attach a New Media Ethernet or LiveWire + PCMCIA (PC-card) Ethernet card to your computer. + + To compile this driver as a module, choose M here: the module will be + called nmclan_cs. If unsure, say N. + +config NI65 + tristate "NI6510 support" + depends on ISA && ISA_DMA_API && !ARM + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called ni65. + +config SUN3LANCE + tristate "Sun3/Sun3x on-board LANCE support" + depends on (SUN3 || SUN3X) + ---help--- + Most Sun3 and Sun3x motherboards (including the 3/50, 3/60 and 3/80) + featured an AMD LANCE 10Mbit Ethernet controller on board; say Y + here to compile in the Linux driver for this and enable Ethernet. + General Linux information on the Sun 3 and 3x series (now + discontinued) is at + . + + If you're not building a kernel for a Sun 3, say N. + +config SUNLANCE + tristate "Sun LANCE support" + depends on SBUS + select CRC32 + ---help--- + This driver supports the "le" interface present on all 32-bit Sparc + systems, on some older Ultra systems and as an Sbus option. These + cards are based on the AMD LANCE chipset, which is better known + via the NE2100 cards. + + To compile this driver as a module, choose M here: the module + will be called sunlance. + +config AMD_XGBE + tristate "AMD 10GbE Ethernet driver" + depends on (OF_NET || ACPI) && HAS_IOMEM && HAS_DMA + depends on ARM64 || COMPILE_TEST + select PHYLIB + select AMD_XGBE_PHY + select BITREVERSE + select CRC32 + select PTP_1588_CLOCK + ---help--- + This driver supports the AMD 10GbE Ethernet device found on an + AMD SoC. + + To compile this driver as a module, choose M here: the module + will be called amd-xgbe. + +config AMD_XGBE_DCB + bool "Data Center Bridging (DCB) support" + default n + depends on AMD_XGBE && DCB + ---help--- + Say Y here to enable Data Center Bridging (DCB) support in the + driver. + + If unsure, say N. + +endif # NET_VENDOR_AMD diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile new file mode 100644 index 000000000..a38a2dce3 --- /dev/null +++ b/drivers/net/ethernet/amd/Makefile @@ -0,0 +1,20 @@ +# +# Makefile for the AMD network device drivers. +# + +obj-$(CONFIG_A2065) += a2065.o +obj-$(CONFIG_AMD8111_ETH) += amd8111e.o +obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o +obj-$(CONFIG_ARIADNE) += ariadne.o +obj-$(CONFIG_ATARILANCE) += atarilance.o +obj-$(CONFIG_DECLANCE) += declance.o +obj-$(CONFIG_HPLANCE) += hplance.o 7990.o +obj-$(CONFIG_LANCE) += lance.o +obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o +obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o +obj-$(CONFIG_PCMCIA_NMCLAN) += nmclan_cs.o +obj-$(CONFIG_NI65) += ni65.o +obj-$(CONFIG_PCNET32) += pcnet32.o +obj-$(CONFIG_SUN3LANCE) += sun3lance.o +obj-$(CONFIG_SUNLANCE) += sunlance.o +obj-$(CONFIG_AMD_XGBE) += xgbe/ diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c new file mode 100644 index 000000000..56139184b --- /dev/null +++ b/drivers/net/ethernet/amd/a2065.c @@ -0,0 +1,783 @@ +/* + * Amiga Linux/68k A2065 Ethernet Driver + * + * (C) Copyright 1995-2003 by Geert Uytterhoeven + * + * Fixes and tips by: + * - Janos Farkas (CHEXUM@sparta.banki.hu) + * - Jes Degn Soerensen (jds@kom.auc.dk) + * - Matt Domsch (Matt_Domsch@dell.com) + * + * ---------------------------------------------------------------------------- + * + * This program is based on + * + * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver + * (C) Copyright 1995 by Geert Uytterhoeven, + * Peter De Schrijver + * + * lance.c: An AMD LANCE ethernet driver for linux. + * Written 1993-94 by Donald Becker. + * + * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller + * Advanced Micro Devices + * Publication #16907, Rev. B, Amendment/0, May 1994 + * + * ---------------------------------------------------------------------------- + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of the Linux + * distribution for more details. + * + * ---------------------------------------------------------------------------- + * + * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains: + * + * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with + * both 10BASE-2 (thin coax) and AUI (DB-15) connectors + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +/*#define DEBUG*/ +/*#define TEST_HITS*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "a2065.h" + +/* Transmit/Receive Ring Definitions */ + +#define LANCE_LOG_TX_BUFFERS (2) +#define LANCE_LOG_RX_BUFFERS (4) + +#define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS) +#define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS) + +#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) +#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) + +#define PKT_BUF_SIZE (1544) +#define RX_BUFF_SIZE PKT_BUF_SIZE +#define TX_BUFF_SIZE PKT_BUF_SIZE + +/* Layout of the Lance's RAM Buffer */ + +struct lance_init_block { + unsigned short mode; /* Pre-set mode (reg. 15) */ + unsigned char phys_addr[6]; /* Physical ethernet address */ + unsigned filter[2]; /* Multicast filter. */ + + /* Receive and transmit ring base, along with extra bits. */ + unsigned short rx_ptr; /* receive descriptor addr */ + unsigned short rx_len; /* receive len and high addr */ + unsigned short tx_ptr; /* transmit descriptor addr */ + unsigned short tx_len; /* transmit len and high addr */ + + /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */ + struct lance_rx_desc brx_ring[RX_RING_SIZE]; + struct lance_tx_desc btx_ring[TX_RING_SIZE]; + + char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE]; + char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE]; +}; + +/* Private Device Data */ + +struct lance_private { + char *name; + volatile struct lance_regs *ll; + volatile struct lance_init_block *init_block; /* Hosts view */ + volatile struct lance_init_block *lance_init_block; /* Lance view */ + + int rx_new, tx_new; + int rx_old, tx_old; + + int lance_log_rx_bufs, lance_log_tx_bufs; + int rx_ring_mod_mask, tx_ring_mod_mask; + + int tpe; /* cable-selection is TPE */ + int auto_select; /* cable-selection by carrier */ + unsigned short busmaster_regval; + +#ifdef CONFIG_SUNLANCE + struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */ + int burst_sizes; /* ledma SBus burst sizes */ +#endif + struct timer_list multicast_timer; +}; + +#define LANCE_ADDR(x) ((int)(x) & ~0xff000000) + +/* Load the CSR registers */ +static void load_csrs(struct lance_private *lp) +{ + volatile struct lance_regs *ll = lp->ll; + volatile struct lance_init_block *aib = lp->lance_init_block; + int leptr = LANCE_ADDR(aib); + + ll->rap = LE_CSR1; + ll->rdp = (leptr & 0xFFFF); + ll->rap = LE_CSR2; + ll->rdp = leptr >> 16; + ll->rap = LE_CSR3; + ll->rdp = lp->busmaster_regval; + + /* Point back to csr0 */ + ll->rap = LE_CSR0; +} + +/* Setup the Lance Rx and Tx rings */ +static void lance_init_ring(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + volatile struct lance_init_block *aib = lp->lance_init_block; + /* for LANCE_ADDR computations */ + int leptr; + int i; + + /* Lock out other processes while setting up hardware */ + netif_stop_queue(dev); + lp->rx_new = lp->tx_new = 0; + lp->rx_old = lp->tx_old = 0; + + ib->mode = 0; + + /* Copy the ethernet address to the lance init block + * Note that on the sparc you need to swap the ethernet address. + */ + ib->phys_addr[0] = dev->dev_addr[1]; + ib->phys_addr[1] = dev->dev_addr[0]; + ib->phys_addr[2] = dev->dev_addr[3]; + ib->phys_addr[3] = dev->dev_addr[2]; + ib->phys_addr[4] = dev->dev_addr[5]; + ib->phys_addr[5] = dev->dev_addr[4]; + + /* Setup the Tx ring entries */ + netdev_dbg(dev, "TX rings:\n"); + for (i = 0; i <= 1 << lp->lance_log_tx_bufs; i++) { + leptr = LANCE_ADDR(&aib->tx_buf[i][0]); + ib->btx_ring[i].tmd0 = leptr; + ib->btx_ring[i].tmd1_hadr = leptr >> 16; + ib->btx_ring[i].tmd1_bits = 0; + ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */ + ib->btx_ring[i].misc = 0; + if (i < 3) + netdev_dbg(dev, "%d: 0x%08x\n", i, leptr); + } + + /* Setup the Rx ring entries */ + netdev_dbg(dev, "RX rings:\n"); + for (i = 0; i < 1 << lp->lance_log_rx_bufs; i++) { + leptr = LANCE_ADDR(&aib->rx_buf[i][0]); + + ib->brx_ring[i].rmd0 = leptr; + ib->brx_ring[i].rmd1_hadr = leptr >> 16; + ib->brx_ring[i].rmd1_bits = LE_R1_OWN; + ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000; + ib->brx_ring[i].mblength = 0; + if (i < 3) + netdev_dbg(dev, "%d: 0x%08x\n", i, leptr); + } + + /* Setup the initialization block */ + + /* Setup rx descriptor pointer */ + leptr = LANCE_ADDR(&aib->brx_ring); + ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16); + ib->rx_ptr = leptr; + netdev_dbg(dev, "RX ptr: %08x\n", leptr); + + /* Setup tx descriptor pointer */ + leptr = LANCE_ADDR(&aib->btx_ring); + ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16); + ib->tx_ptr = leptr; + netdev_dbg(dev, "TX ptr: %08x\n", leptr); + + /* Clear the multicast filter */ + ib->filter[0] = 0; + ib->filter[1] = 0; +} + +static int init_restart_lance(struct lance_private *lp) +{ + volatile struct lance_regs *ll = lp->ll; + int i; + + ll->rap = LE_CSR0; + ll->rdp = LE_C0_INIT; + + /* Wait for the lance to complete initialization */ + for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++) + barrier(); + if ((i == 100) || (ll->rdp & LE_C0_ERR)) { + pr_err("unopened after %d ticks, csr0=%04x\n", i, ll->rdp); + return -EIO; + } + + /* Clear IDON by writing a "1", enable interrupts and start lance */ + ll->rdp = LE_C0_IDON; + ll->rdp = LE_C0_INEA | LE_C0_STRT; + + return 0; +} + +static int lance_rx(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + volatile struct lance_regs *ll = lp->ll; + volatile struct lance_rx_desc *rd; + unsigned char bits; + +#ifdef TEST_HITS + int i; + char buf[RX_RING_SIZE + 1]; + + for (i = 0; i < RX_RING_SIZE; i++) { + char r1_own = ib->brx_ring[i].rmd1_bits & LE_R1_OWN; + if (i == lp->rx_new) + buf[i] = r1_own ? '_' : 'X'; + else + buf[i] = r1_own ? '.' : '1'; + } + buf[RX_RING_SIZE] = 0; + + pr_debug("RxRing TestHits: [%s]\n", buf); +#endif + + ll->rdp = LE_C0_RINT | LE_C0_INEA; + for (rd = &ib->brx_ring[lp->rx_new]; + !((bits = rd->rmd1_bits) & LE_R1_OWN); + rd = &ib->brx_ring[lp->rx_new]) { + + /* We got an incomplete frame? */ + if ((bits & LE_R1_POK) != LE_R1_POK) { + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; + continue; + } else if (bits & LE_R1_ERR) { + /* Count only the end frame as a rx error, + * not the beginning + */ + if (bits & LE_R1_BUF) + dev->stats.rx_fifo_errors++; + if (bits & LE_R1_CRC) + dev->stats.rx_crc_errors++; + if (bits & LE_R1_OFL) + dev->stats.rx_over_errors++; + if (bits & LE_R1_FRA) + dev->stats.rx_frame_errors++; + if (bits & LE_R1_EOP) + dev->stats.rx_errors++; + } else { + int len = (rd->mblength & 0xfff) - 4; + struct sk_buff *skb = netdev_alloc_skb(dev, len + 2); + + if (!skb) { + dev->stats.rx_dropped++; + rd->mblength = 0; + rd->rmd1_bits = LE_R1_OWN; + lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; + return 0; + } + + skb_reserve(skb, 2); /* 16 byte align */ + skb_put(skb, len); /* make room */ + skb_copy_to_linear_data(skb, + (unsigned char *)&ib->rx_buf[lp->rx_new][0], + len); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + } + + /* Return the packet to the pool */ + rd->mblength = 0; + rd->rmd1_bits = LE_R1_OWN; + lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; + } + return 0; +} + +static int lance_tx(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + volatile struct lance_regs *ll = lp->ll; + volatile struct lance_tx_desc *td; + int i, j; + int status; + + /* csr0 is 2f3 */ + ll->rdp = LE_C0_TINT | LE_C0_INEA; + /* csr0 is 73 */ + + j = lp->tx_old; + for (i = j; i != lp->tx_new; i = j) { + td = &ib->btx_ring[i]; + + /* If we hit a packet not owned by us, stop */ + if (td->tmd1_bits & LE_T1_OWN) + break; + + if (td->tmd1_bits & LE_T1_ERR) { + status = td->misc; + + dev->stats.tx_errors++; + if (status & LE_T3_RTY) + dev->stats.tx_aborted_errors++; + if (status & LE_T3_LCOL) + dev->stats.tx_window_errors++; + + if (status & LE_T3_CLOS) { + dev->stats.tx_carrier_errors++; + if (lp->auto_select) { + lp->tpe = 1 - lp->tpe; + netdev_err(dev, "Carrier Lost, trying %s\n", + lp->tpe ? "TPE" : "AUI"); + /* Stop the lance */ + ll->rap = LE_CSR0; + ll->rdp = LE_C0_STOP; + lance_init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + return 0; + } + } + + /* buffer errors and underflows turn off + * the transmitter, so restart the adapter + */ + if (status & (LE_T3_BUF | LE_T3_UFL)) { + dev->stats.tx_fifo_errors++; + + netdev_err(dev, "Tx: ERR_BUF|ERR_UFL, restarting\n"); + /* Stop the lance */ + ll->rap = LE_CSR0; + ll->rdp = LE_C0_STOP; + lance_init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + return 0; + } + } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) { + /* So we don't count the packet more than once. */ + td->tmd1_bits &= ~(LE_T1_POK); + + /* One collision before packet was sent. */ + if (td->tmd1_bits & LE_T1_EONE) + dev->stats.collisions++; + + /* More than one collision, be optimistic. */ + if (td->tmd1_bits & LE_T1_EMORE) + dev->stats.collisions += 2; + + dev->stats.tx_packets++; + } + + j = (j + 1) & lp->tx_ring_mod_mask; + } + lp->tx_old = j; + ll->rdp = LE_C0_TINT | LE_C0_INEA; + return 0; +} + +static int lance_tx_buffs_avail(struct lance_private *lp) +{ + if (lp->tx_old <= lp->tx_new) + return lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new; + return lp->tx_old - lp->tx_new - 1; +} + +static irqreturn_t lance_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + int csr0; + + ll->rap = LE_CSR0; /* LANCE Controller Status */ + csr0 = ll->rdp; + + if (!(csr0 & LE_C0_INTR)) /* Check if any interrupt has */ + return IRQ_NONE; /* been generated by the Lance. */ + + /* Acknowledge all the interrupt sources ASAP */ + ll->rdp = csr0 & ~(LE_C0_INEA | LE_C0_TDMD | LE_C0_STOP | LE_C0_STRT | + LE_C0_INIT); + + if (csr0 & LE_C0_ERR) { + /* Clear the error condition */ + ll->rdp = LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | LE_C0_INEA; + } + + if (csr0 & LE_C0_RINT) + lance_rx(dev); + + if (csr0 & LE_C0_TINT) + lance_tx(dev); + + /* Log misc errors. */ + if (csr0 & LE_C0_BABL) + dev->stats.tx_errors++; /* Tx babble. */ + if (csr0 & LE_C0_MISS) + dev->stats.rx_errors++; /* Missed a Rx frame. */ + if (csr0 & LE_C0_MERR) { + netdev_err(dev, "Bus master arbitration failure, status %04x\n", + csr0); + /* Restart the chip. */ + ll->rdp = LE_C0_STRT; + } + + if (netif_queue_stopped(dev) && lance_tx_buffs_avail(lp) > 0) + netif_wake_queue(dev); + + ll->rap = LE_CSR0; + ll->rdp = (LE_C0_BABL | LE_C0_CERR | LE_C0_MISS | LE_C0_MERR | + LE_C0_IDON | LE_C0_INEA); + return IRQ_HANDLED; +} + +static int lance_open(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + int ret; + + /* Stop the Lance */ + ll->rap = LE_CSR0; + ll->rdp = LE_C0_STOP; + + /* Install the Interrupt handler */ + ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, IRQF_SHARED, + dev->name, dev); + if (ret) + return ret; + + load_csrs(lp); + lance_init_ring(dev); + + netif_start_queue(dev); + + return init_restart_lance(lp); +} + +static int lance_close(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + + netif_stop_queue(dev); + del_timer_sync(&lp->multicast_timer); + + /* Stop the card */ + ll->rap = LE_CSR0; + ll->rdp = LE_C0_STOP; + + free_irq(IRQ_AMIGA_PORTS, dev); + return 0; +} + +static inline int lance_reset(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + int status; + + /* Stop the lance */ + ll->rap = LE_CSR0; + ll->rdp = LE_C0_STOP; + + load_csrs(lp); + + lance_init_ring(dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_start_queue(dev); + + status = init_restart_lance(lp); + netdev_dbg(dev, "Lance restart=%d\n", status); + + return status; +} + +static void lance_tx_timeout(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + + netdev_err(dev, "transmit timed out, status %04x, reset\n", ll->rdp); + lance_reset(dev); + netif_wake_queue(dev); +} + +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + volatile struct lance_init_block *ib = lp->init_block; + int entry, skblen; + int status = NETDEV_TX_OK; + unsigned long flags; + + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + skblen = max_t(unsigned, skb->len, ETH_ZLEN); + + local_irq_save(flags); + + if (!lance_tx_buffs_avail(lp)) { + local_irq_restore(flags); + return NETDEV_TX_LOCKED; + } + +#ifdef DEBUG + /* dump the packet */ + print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE, + 16, 1, skb->data, 64, true); +#endif + entry = lp->tx_new & lp->tx_ring_mod_mask; + ib->btx_ring[entry].length = (-skblen) | 0xf000; + ib->btx_ring[entry].misc = 0; + + skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen); + + /* Now, give the packet to the lance */ + ib->btx_ring[entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN); + lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask; + dev->stats.tx_bytes += skblen; + + if (lance_tx_buffs_avail(lp) <= 0) + netif_stop_queue(dev); + + /* Kick the lance: transmit now */ + ll->rdp = LE_C0_INEA | LE_C0_TDMD; + dev_kfree_skb(skb); + + local_irq_restore(flags); + + return status; +} + +/* taken from the depca driver */ +static void lance_load_multicast(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + volatile u16 *mcast_table = (u16 *)&ib->filter; + struct netdev_hw_addr *ha; + u32 crc; + + /* set all multicast bits */ + if (dev->flags & IFF_ALLMULTI) { + ib->filter[0] = 0xffffffff; + ib->filter[1] = 0xffffffff; + return; + } + /* clear the multicast filter */ + ib->filter[0] = 0; + ib->filter[1] = 0; + + /* Add addresses */ + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr); + crc = crc >> 26; + mcast_table[crc >> 4] |= 1 << (crc & 0xf); + } +} + +static void lance_set_multicast(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_init_block *ib = lp->init_block; + volatile struct lance_regs *ll = lp->ll; + + if (!netif_running(dev)) + return; + + if (lp->tx_old != lp->tx_new) { + mod_timer(&lp->multicast_timer, jiffies + 4); + netif_wake_queue(dev); + return; + } + + netif_stop_queue(dev); + + ll->rap = LE_CSR0; + ll->rdp = LE_C0_STOP; + lance_init_ring(dev); + + if (dev->flags & IFF_PROMISC) { + ib->mode |= LE_MO_PROM; + } else { + ib->mode &= ~LE_MO_PROM; + lance_load_multicast(dev); + } + load_csrs(lp); + init_restart_lance(lp); + netif_wake_queue(dev); +} + +static int a2065_init_one(struct zorro_dev *z, + const struct zorro_device_id *ent); +static void a2065_remove_one(struct zorro_dev *z); + + +static struct zorro_device_id a2065_zorro_tbl[] = { + { ZORRO_PROD_CBM_A2065_1 }, + { ZORRO_PROD_CBM_A2065_2 }, + { ZORRO_PROD_AMERISTAR_A2065 }, + { 0 } +}; +MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl); + +static struct zorro_driver a2065_driver = { + .name = "a2065", + .id_table = a2065_zorro_tbl, + .probe = a2065_init_one, + .remove = a2065_remove_one, +}; + +static const struct net_device_ops lance_netdev_ops = { + .ndo_open = lance_open, + .ndo_stop = lance_close, + .ndo_start_xmit = lance_start_xmit, + .ndo_tx_timeout = lance_tx_timeout, + .ndo_set_rx_mode = lance_set_multicast, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + +static int a2065_init_one(struct zorro_dev *z, + const struct zorro_device_id *ent) +{ + struct net_device *dev; + struct lance_private *priv; + unsigned long board = z->resource.start; + unsigned long base_addr = board + A2065_LANCE; + unsigned long mem_start = board + A2065_RAM; + struct resource *r1, *r2; + u32 serial; + int err; + + r1 = request_mem_region(base_addr, sizeof(struct lance_regs), + "Am7990"); + if (!r1) + return -EBUSY; + r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM"); + if (!r2) { + release_mem_region(base_addr, sizeof(struct lance_regs)); + return -EBUSY; + } + + dev = alloc_etherdev(sizeof(struct lance_private)); + if (dev == NULL) { + release_mem_region(base_addr, sizeof(struct lance_regs)); + release_mem_region(mem_start, A2065_RAM_SIZE); + return -ENOMEM; + } + + priv = netdev_priv(dev); + + r1->name = dev->name; + r2->name = dev->name; + + serial = be32_to_cpu(z->rom.er_SerialNumber); + dev->dev_addr[0] = 0x00; + if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */ + dev->dev_addr[1] = 0x80; + dev->dev_addr[2] = 0x10; + } else { /* Ameristar */ + dev->dev_addr[1] = 0x00; + dev->dev_addr[2] = 0x9f; + } + dev->dev_addr[3] = (serial >> 16) & 0xff; + dev->dev_addr[4] = (serial >> 8) & 0xff; + dev->dev_addr[5] = serial & 0xff; + dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr); + dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start); + dev->mem_end = dev->mem_start + A2065_RAM_SIZE; + + priv->ll = (volatile struct lance_regs *)dev->base_addr; + priv->init_block = (struct lance_init_block *)dev->mem_start; + priv->lance_init_block = (struct lance_init_block *)A2065_RAM; + priv->auto_select = 0; + priv->busmaster_regval = LE_C3_BSWP; + + priv->lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS; + priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS; + priv->rx_ring_mod_mask = RX_RING_MOD_MASK; + priv->tx_ring_mod_mask = TX_RING_MOD_MASK; + + dev->netdev_ops = &lance_netdev_ops; + dev->watchdog_timeo = 5*HZ; + dev->dma = 0; + + init_timer(&priv->multicast_timer); + priv->multicast_timer.data = (unsigned long) dev; + priv->multicast_timer.function = + (void (*)(unsigned long))lance_set_multicast; + + err = register_netdev(dev); + if (err) { + release_mem_region(base_addr, sizeof(struct lance_regs)); + release_mem_region(mem_start, A2065_RAM_SIZE); + free_netdev(dev); + return err; + } + zorro_set_drvdata(z, dev); + + netdev_info(dev, "A2065 at 0x%08lx, Ethernet Address %pM\n", + board, dev->dev_addr); + + return 0; +} + + +static void a2065_remove_one(struct zorro_dev *z) +{ + struct net_device *dev = zorro_get_drvdata(z); + + unregister_netdev(dev); + release_mem_region(ZTWO_PADDR(dev->base_addr), + sizeof(struct lance_regs)); + release_mem_region(ZTWO_PADDR(dev->mem_start), A2065_RAM_SIZE); + free_netdev(dev); +} + +static int __init a2065_init_module(void) +{ + return zorro_register_driver(&a2065_driver); +} + +static void __exit a2065_cleanup_module(void) +{ + zorro_unregister_driver(&a2065_driver); +} + +module_init(a2065_init_module); +module_exit(a2065_cleanup_module); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/a2065.h b/drivers/net/ethernet/amd/a2065.h new file mode 100644 index 000000000..5117759d4 --- /dev/null +++ b/drivers/net/ethernet/amd/a2065.h @@ -0,0 +1,173 @@ +/* + * Amiga Linux/68k A2065 Ethernet Driver + * + * (C) Copyright 1995 by Geert Uytterhoeven + * + * --------------------------------------------------------------------------- + * + * This program is based on + * + * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver + * (C) Copyright 1995 by Geert Uytterhoeven, + * Peter De Schrijver + * + * lance.c: An AMD LANCE ethernet driver for linux. + * Written 1993-94 by Donald Becker. + * + * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller + * Advanced Micro Devices + * Publication #16907, Rev. B, Amendment/0, May 1994 + * + * --------------------------------------------------------------------------- + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of the Linux + * distribution for more details. + * + * --------------------------------------------------------------------------- + * + * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains: + * + * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with + * both 10BASE-2 (thin coax) and AUI (DB-15) connectors + */ + + +/* + * Am7990 Local Area Network Controller for Ethernet (LANCE) + */ + +struct lance_regs { + unsigned short rdp; /* Register Data Port */ + unsigned short rap; /* Register Address Port */ +}; + + +/* + * Am7990 Control and Status Registers + */ + +#define LE_CSR0 0x0000 /* LANCE Controller Status */ +#define LE_CSR1 0x0001 /* IADR[15:0] */ +#define LE_CSR2 0x0002 /* IADR[23:16] */ +#define LE_CSR3 0x0003 /* Misc */ + + +/* + * Bit definitions for CSR0 (LANCE Controller Status) + */ + +#define LE_C0_ERR 0x8000 /* Error */ +#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */ +#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */ +#define LE_C0_MISS 0x1000 /* Missed Frame */ +#define LE_C0_MERR 0x0800 /* Memory Error */ +#define LE_C0_RINT 0x0400 /* Receive Interrupt */ +#define LE_C0_TINT 0x0200 /* Transmit Interrupt */ +#define LE_C0_IDON 0x0100 /* Initialization Done */ +#define LE_C0_INTR 0x0080 /* Interrupt Flag */ +#define LE_C0_INEA 0x0040 /* Interrupt Enable */ +#define LE_C0_RXON 0x0020 /* Receive On */ +#define LE_C0_TXON 0x0010 /* Transmit On */ +#define LE_C0_TDMD 0x0008 /* Transmit Demand */ +#define LE_C0_STOP 0x0004 /* Stop */ +#define LE_C0_STRT 0x0002 /* Start */ +#define LE_C0_INIT 0x0001 /* Initialize */ + + +/* + * Bit definitions for CSR3 + */ + +#define LE_C3_BSWP 0x0004 /* Byte Swap + (on for big endian byte order) */ +#define LE_C3_ACON 0x0002 /* ALE Control + (on for active low ALE) */ +#define LE_C3_BCON 0x0001 /* Byte Control */ + + +/* + * Mode Flags + */ + +#define LE_MO_PROM 0x8000 /* Promiscuous Mode */ +#define LE_MO_INTL 0x0040 /* Internal Loopback */ +#define LE_MO_DRTY 0x0020 /* Disable Retry */ +#define LE_MO_FCOLL 0x0010 /* Force Collision */ +#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */ +#define LE_MO_LOOP 0x0004 /* Loopback Enable */ +#define LE_MO_DTX 0x0002 /* Disable Transmitter */ +#define LE_MO_DRX 0x0001 /* Disable Receiver */ + + +struct lance_rx_desc { + unsigned short rmd0; /* low address of packet */ + unsigned char rmd1_bits; /* descriptor bits */ + unsigned char rmd1_hadr; /* high address of packet */ + short length; /* This length is 2s complement (negative)! + * Buffer length + */ + unsigned short mblength; /* Aactual number of bytes received */ +}; + +struct lance_tx_desc { + unsigned short tmd0; /* low address of packet */ + unsigned char tmd1_bits; /* descriptor bits */ + unsigned char tmd1_hadr; /* high address of packet */ + short length; /* Length is 2s complement (negative)! */ + unsigned short misc; +}; + + +/* + * Receive Flags + */ + +#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */ +#define LE_R1_ERR 0x40 /* Error */ +#define LE_R1_FRA 0x20 /* Framing Error */ +#define LE_R1_OFL 0x10 /* Overflow Error */ +#define LE_R1_CRC 0x08 /* CRC Error */ +#define LE_R1_BUF 0x04 /* Buffer Error */ +#define LE_R1_SOP 0x02 /* Start of Packet */ +#define LE_R1_EOP 0x01 /* End of Packet */ +#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */ + + +/* + * Transmit Flags + */ + +#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */ +#define LE_T1_ERR 0x40 /* Error */ +#define LE_T1_RES 0x20 /* Reserved, + LANCE writes this with a zero */ +#define LE_T1_EMORE 0x10 /* More than one retry needed */ +#define LE_T1_EONE 0x08 /* One retry needed */ +#define LE_T1_EDEF 0x04 /* Deferred */ +#define LE_T1_SOP 0x02 /* Start of Packet */ +#define LE_T1_EOP 0x01 /* End of Packet */ +#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */ + + +/* + * Error Flags + */ + +#define LE_T3_BUF 0x8000 /* Buffer Error */ +#define LE_T3_UFL 0x4000 /* Underflow Error */ +#define LE_T3_LCOL 0x1000 /* Late Collision */ +#define LE_T3_CLOS 0x0800 /* Loss of Carrier */ +#define LE_T3_RTY 0x0400 /* Retry Error */ +#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */ + + +/* + * A2065 Expansion Board Structure + */ + +#define A2065_LANCE 0x4000 + +#define A2065_RAM 0x8000 +#define A2065_RAM_SIZE 0x8000 + diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c new file mode 100644 index 000000000..87e727b92 --- /dev/null +++ b/drivers/net/ethernet/amd/am79c961a.c @@ -0,0 +1,768 @@ +/* + * linux/drivers/net/ethernet/amd/am79c961a.c + * + * by Russell King 1995-2001. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Derived from various things including skeleton.c + * + * This is a special driver for the am79c961A Lance chip used in the + * Intel (formally Digital Equipment Corp) EBSA110 platform. Please + * note that this can not be built as a module (it doesn't make sense). + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define TX_BUFFERS 15 +#define RX_BUFFERS 25 + +#include "am79c961a.h" + +static irqreturn_t +am79c961_interrupt (int irq, void *dev_id); + +static unsigned int net_debug = NET_DEBUG; + +static const char version[] = + "am79c961 ethernet driver (C) 1995-2001 Russell King v0.04\n"; + +/* --------------------------------------------------------------------------- */ + +#ifdef __arm__ +static void write_rreg(u_long base, u_int reg, u_int val) +{ + asm volatile( + "str%?h %1, [%2] @ NET_RAP\n\t" + "str%?h %0, [%2, #-4] @ NET_RDP" + : + : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); +} + +static inline unsigned short read_rreg(u_long base_addr, u_int reg) +{ + unsigned short v; + asm volatile( + "str%?h %1, [%2] @ NET_RAP\n\t" + "ldr%?h %0, [%2, #-4] @ NET_RDP" + : "=r" (v) + : "r" (reg), "r" (ISAIO_BASE + 0x0464)); + return v; +} + +static inline void write_ireg(u_long base, u_int reg, u_int val) +{ + asm volatile( + "str%?h %1, [%2] @ NET_RAP\n\t" + "str%?h %0, [%2, #8] @ NET_IDP" + : + : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); +} + +static inline unsigned short read_ireg(u_long base_addr, u_int reg) +{ + u_short v; + asm volatile( + "str%?h %1, [%2] @ NAT_RAP\n\t" + "ldr%?h %0, [%2, #8] @ NET_IDP\n\t" + : "=r" (v) + : "r" (reg), "r" (ISAIO_BASE + 0x0464)); + return v; +} + +#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1)) +#define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1)) + +static void +am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length) +{ + offset = ISAMEM_BASE + (offset << 1); + length = (length + 1) & ~1; + if ((int)buf & 2) { + asm volatile("str%?h %2, [%0], #4" + : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); + buf += 2; + length -= 2; + } + while (length > 8) { + register unsigned int tmp asm("r2"), tmp2 asm("r3"); + asm volatile( + "ldm%?ia %0!, {%1, %2}" + : "+r" (buf), "=&r" (tmp), "=&r" (tmp2)); + length -= 8; + asm volatile( + "str%?h %1, [%0], #4\n\t" + "mov%? %1, %1, lsr #16\n\t" + "str%?h %1, [%0], #4\n\t" + "str%?h %2, [%0], #4\n\t" + "mov%? %2, %2, lsr #16\n\t" + "str%?h %2, [%0], #4" + : "+r" (offset), "=&r" (tmp), "=&r" (tmp2)); + } + while (length > 0) { + asm volatile("str%?h %2, [%0], #4" + : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); + buf += 2; + length -= 2; + } +} + +static void +am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length) +{ + offset = ISAMEM_BASE + (offset << 1); + length = (length + 1) & ~1; + if ((int)buf & 2) { + unsigned int tmp; + asm volatile( + "ldr%?h %2, [%0], #4\n\t" + "str%?b %2, [%1], #1\n\t" + "mov%? %2, %2, lsr #8\n\t" + "str%?b %2, [%1], #1" + : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf)); + length -= 2; + } + while (length > 8) { + register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3; + asm volatile( + "ldr%?h %2, [%0], #4\n\t" + "ldr%?h %4, [%0], #4\n\t" + "ldr%?h %3, [%0], #4\n\t" + "orr%? %2, %2, %4, lsl #16\n\t" + "ldr%?h %4, [%0], #4\n\t" + "orr%? %3, %3, %4, lsl #16\n\t" + "stm%?ia %1!, {%2, %3}" + : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3) + : "0" (offset), "1" (buf)); + length -= 8; + } + while (length > 0) { + unsigned int tmp; + asm volatile( + "ldr%?h %2, [%0], #4\n\t" + "str%?b %2, [%1], #1\n\t" + "mov%? %2, %2, lsr #8\n\t" + "str%?b %2, [%1], #1" + : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf)); + length -= 2; + } +} +#else +#error Not compatible +#endif + +static int +am79c961_ramtest(struct net_device *dev, unsigned int val) +{ + unsigned char *buffer = kmalloc (65536, GFP_KERNEL); + int i, error = 0, errorcount = 0; + + if (!buffer) + return 0; + memset (buffer, val, 65536); + am_writebuffer(dev, 0, buffer, 65536); + memset (buffer, val ^ 255, 65536); + am_readbuffer(dev, 0, buffer, 65536); + for (i = 0; i < 65536; i++) { + if (buffer[i] != val && !error) { + printk ("%s: buffer error (%02X %02X) %05X - ", dev->name, val, buffer[i], i); + error = 1; + errorcount ++; + } else if (error && buffer[i] == val) { + printk ("%05X\n", i); + error = 0; + } + } + if (error) + printk ("10000\n"); + kfree (buffer); + return errorcount; +} + +static void am79c961_mc_hash(char *addr, u16 *hash) +{ + int idx, bit; + u32 crc; + + crc = ether_crc_le(ETH_ALEN, addr); + + idx = crc >> 30; + bit = (crc >> 26) & 15; + + hash[idx] |= 1 << bit; +} + +static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash) +{ + unsigned int mode = MODE_PORT_10BT; + + if (dev->flags & IFF_PROMISC) { + mode |= MODE_PROMISC; + memset(hash, 0xff, 4 * sizeof(*hash)); + } else if (dev->flags & IFF_ALLMULTI) { + memset(hash, 0xff, 4 * sizeof(*hash)); + } else { + struct netdev_hw_addr *ha; + + memset(hash, 0, 4 * sizeof(*hash)); + + netdev_for_each_mc_addr(ha, dev) + am79c961_mc_hash(ha->addr, hash); + } + + return mode; +} + +static void +am79c961_init_for_open(struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + unsigned long flags; + unsigned char *p; + u_int hdr_addr, first_free_addr; + u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash); + int i; + + /* + * Stop the chip. + */ + spin_lock_irqsave(&priv->chip_lock, flags); + write_rreg (dev->base_addr, CSR0, CSR0_BABL|CSR0_CERR|CSR0_MISS|CSR0_MERR|CSR0_TINT|CSR0_RINT|CSR0_STOP); + spin_unlock_irqrestore(&priv->chip_lock, flags); + + write_ireg (dev->base_addr, 5, 0x00a0); /* Receive address LED */ + write_ireg (dev->base_addr, 6, 0x0081); /* Collision LED */ + write_ireg (dev->base_addr, 7, 0x0090); /* XMIT LED */ + write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */ + + for (i = LADRL; i <= LADRH; i++) + write_rreg (dev->base_addr, i, multi_hash[i - LADRL]); + + for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2) + write_rreg (dev->base_addr, i, p[0] | (p[1] << 8)); + + write_rreg (dev->base_addr, MODE, mode); + write_rreg (dev->base_addr, POLLINT, 0); + write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS); + write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS); + + first_free_addr = RX_BUFFERS * 8 + TX_BUFFERS * 8 + 16; + hdr_addr = 0; + + priv->rxhead = 0; + priv->rxtail = 0; + priv->rxhdr = hdr_addr; + + for (i = 0; i < RX_BUFFERS; i++) { + priv->rxbuffer[i] = first_free_addr; + am_writeword (dev, hdr_addr, first_free_addr); + am_writeword (dev, hdr_addr + 2, RMD_OWN); + am_writeword (dev, hdr_addr + 4, (-1600)); + am_writeword (dev, hdr_addr + 6, 0); + first_free_addr += 1600; + hdr_addr += 8; + } + priv->txhead = 0; + priv->txtail = 0; + priv->txhdr = hdr_addr; + for (i = 0; i < TX_BUFFERS; i++) { + priv->txbuffer[i] = first_free_addr; + am_writeword (dev, hdr_addr, first_free_addr); + am_writeword (dev, hdr_addr + 2, TMD_STP|TMD_ENP); + am_writeword (dev, hdr_addr + 4, 0xf000); + am_writeword (dev, hdr_addr + 6, 0); + first_free_addr += 1600; + hdr_addr += 8; + } + + write_rreg (dev->base_addr, BASERXL, priv->rxhdr); + write_rreg (dev->base_addr, BASERXH, 0); + write_rreg (dev->base_addr, BASETXL, priv->txhdr); + write_rreg (dev->base_addr, BASERXH, 0); + write_rreg (dev->base_addr, CSR0, CSR0_STOP); + write_rreg (dev->base_addr, CSR3, CSR3_IDONM|CSR3_BABLM|CSR3_DXSUFLO); + write_rreg (dev->base_addr, CSR4, CSR4_APAD_XMIT|CSR4_MFCOM|CSR4_RCVCCOM|CSR4_TXSTRTM|CSR4_JABM); + write_rreg (dev->base_addr, CSR0, CSR0_IENA|CSR0_STRT); +} + +static void am79c961_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct dev_priv *priv = netdev_priv(dev); + unsigned int lnkstat, carrier; + unsigned long flags; + + spin_lock_irqsave(&priv->chip_lock, flags); + lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST; + spin_unlock_irqrestore(&priv->chip_lock, flags); + carrier = netif_carrier_ok(dev); + + if (lnkstat && !carrier) { + netif_carrier_on(dev); + printk("%s: link up\n", dev->name); + } else if (!lnkstat && carrier) { + netif_carrier_off(dev); + printk("%s: link down\n", dev->name); + } + + mod_timer(&priv->timer, jiffies + msecs_to_jiffies(500)); +} + +/* + * Open/initialize the board. + */ +static int +am79c961_open(struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + int ret; + + ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev); + if (ret) + return ret; + + am79c961_init_for_open(dev); + + netif_carrier_off(dev); + + priv->timer.expires = jiffies; + add_timer(&priv->timer); + + netif_start_queue(dev); + + return 0; +} + +/* + * The inverse routine to am79c961_open(). + */ +static int +am79c961_close(struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + unsigned long flags; + + del_timer_sync(&priv->timer); + + netif_stop_queue(dev); + netif_carrier_off(dev); + + spin_lock_irqsave(&priv->chip_lock, flags); + write_rreg (dev->base_addr, CSR0, CSR0_STOP); + write_rreg (dev->base_addr, CSR3, CSR3_MASKALL); + spin_unlock_irqrestore(&priv->chip_lock, flags); + + free_irq (dev->irq, dev); + + return 0; +} + +/* + * Set or clear promiscuous/multicast mode filter for this adapter. + */ +static void am79c961_setmulticastlist (struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + unsigned long flags; + u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash); + int i, stopped; + + spin_lock_irqsave(&priv->chip_lock, flags); + + stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP; + + if (!stopped) { + /* + * Put the chip into suspend mode + */ + write_rreg(dev->base_addr, CTRL1, CTRL1_SPND); + + /* + * Spin waiting for chip to report suspend mode + */ + while ((read_rreg(dev->base_addr, CTRL1) & CTRL1_SPND) == 0) { + spin_unlock_irqrestore(&priv->chip_lock, flags); + nop(); + spin_lock_irqsave(&priv->chip_lock, flags); + } + } + + /* + * Update the multicast hash table + */ + for (i = 0; i < ARRAY_SIZE(multi_hash); i++) + write_rreg(dev->base_addr, i + LADRL, multi_hash[i]); + + /* + * Write the mode register + */ + write_rreg(dev->base_addr, MODE, mode); + + if (!stopped) { + /* + * Put the chip back into running mode + */ + write_rreg(dev->base_addr, CTRL1, 0); + } + + spin_unlock_irqrestore(&priv->chip_lock, flags); +} + +static void am79c961_timeout(struct net_device *dev) +{ + printk(KERN_WARNING "%s: transmit timed out, network cable problem?\n", + dev->name); + + /* + * ought to do some setup of the tx side here + */ + + netif_wake_queue(dev); +} + +/* + * Transmit a packet + */ +static int +am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + unsigned int hdraddr, bufaddr; + unsigned int head; + unsigned long flags; + + head = priv->txhead; + hdraddr = priv->txhdr + (head << 3); + bufaddr = priv->txbuffer[head]; + head += 1; + if (head >= TX_BUFFERS) + head = 0; + + am_writebuffer (dev, bufaddr, skb->data, skb->len); + am_writeword (dev, hdraddr + 4, -skb->len); + am_writeword (dev, hdraddr + 2, TMD_OWN|TMD_STP|TMD_ENP); + priv->txhead = head; + + spin_lock_irqsave(&priv->chip_lock, flags); + write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA); + spin_unlock_irqrestore(&priv->chip_lock, flags); + + /* + * If the next packet is owned by the ethernet device, + * then the tx ring is full and we can't add another + * packet. + */ + if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN) + netif_stop_queue(dev); + + dev_consume_skb_any(skb); + + return NETDEV_TX_OK; +} + +/* + * If we have a good packet(s), get it/them out of the buffers. + */ +static void +am79c961_rx(struct net_device *dev, struct dev_priv *priv) +{ + do { + struct sk_buff *skb; + u_int hdraddr; + u_int pktaddr; + u_int status; + int len; + + hdraddr = priv->rxhdr + (priv->rxtail << 3); + pktaddr = priv->rxbuffer[priv->rxtail]; + + status = am_readword (dev, hdraddr + 2); + if (status & RMD_OWN) /* do we own it? */ + break; + + priv->rxtail ++; + if (priv->rxtail >= RX_BUFFERS) + priv->rxtail = 0; + + if ((status & (RMD_ERR|RMD_STP|RMD_ENP)) != (RMD_STP|RMD_ENP)) { + am_writeword (dev, hdraddr + 2, RMD_OWN); + dev->stats.rx_errors++; + if (status & RMD_ERR) { + if (status & RMD_FRAM) + dev->stats.rx_frame_errors++; + if (status & RMD_CRC) + dev->stats.rx_crc_errors++; + } else if (status & RMD_STP) + dev->stats.rx_length_errors++; + continue; + } + + len = am_readword(dev, hdraddr + 6); + skb = netdev_alloc_skb(dev, len + 2); + + if (skb) { + skb_reserve(skb, 2); + + am_readbuffer(dev, pktaddr, skb_put(skb, len), len); + am_writeword(dev, hdraddr + 2, RMD_OWN); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_bytes += len; + dev->stats.rx_packets++; + } else { + am_writeword (dev, hdraddr + 2, RMD_OWN); + dev->stats.rx_dropped++; + break; + } + } while (1); +} + +/* + * Update stats for the transmitted packet + */ +static void +am79c961_tx(struct net_device *dev, struct dev_priv *priv) +{ + do { + short len; + u_int hdraddr; + u_int status; + + hdraddr = priv->txhdr + (priv->txtail << 3); + status = am_readword (dev, hdraddr + 2); + if (status & TMD_OWN) + break; + + priv->txtail ++; + if (priv->txtail >= TX_BUFFERS) + priv->txtail = 0; + + if (status & TMD_ERR) { + u_int status2; + + dev->stats.tx_errors++; + + status2 = am_readword (dev, hdraddr + 6); + + /* + * Clear the error byte + */ + am_writeword (dev, hdraddr + 6, 0); + + if (status2 & TST_RTRY) + dev->stats.collisions += 16; + if (status2 & TST_LCOL) + dev->stats.tx_window_errors++; + if (status2 & TST_LCAR) + dev->stats.tx_carrier_errors++; + if (status2 & TST_UFLO) + dev->stats.tx_fifo_errors++; + continue; + } + dev->stats.tx_packets++; + len = am_readword (dev, hdraddr + 4); + dev->stats.tx_bytes += -len; + } while (priv->txtail != priv->txhead); + + netif_wake_queue(dev); +} + +static irqreturn_t +am79c961_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct dev_priv *priv = netdev_priv(dev); + u_int status, n = 100; + int handled = 0; + + do { + status = read_rreg(dev->base_addr, CSR0); + write_rreg(dev->base_addr, CSR0, status & + (CSR0_IENA|CSR0_TINT|CSR0_RINT| + CSR0_MERR|CSR0_MISS|CSR0_CERR|CSR0_BABL)); + + if (status & CSR0_RINT) { + handled = 1; + am79c961_rx(dev, priv); + } + if (status & CSR0_TINT) { + handled = 1; + am79c961_tx(dev, priv); + } + if (status & CSR0_MISS) { + handled = 1; + dev->stats.rx_dropped++; + } + if (status & CSR0_CERR) { + handled = 1; + mod_timer(&priv->timer, jiffies); + } + } while (--n && status & (CSR0_RINT | CSR0_TINT)); + + return IRQ_RETVAL(handled); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void am79c961_poll_controller(struct net_device *dev) +{ + unsigned long flags; + local_irq_save(flags); + am79c961_interrupt(dev->irq, dev); + local_irq_restore(flags); +} +#endif + +/* + * Initialise the chip. Note that we always expect + * to be entered with interrupts enabled. + */ +static int +am79c961_hw_init(struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + + spin_lock_irq(&priv->chip_lock); + write_rreg (dev->base_addr, CSR0, CSR0_STOP); + write_rreg (dev->base_addr, CSR3, CSR3_MASKALL); + spin_unlock_irq(&priv->chip_lock); + + am79c961_ramtest(dev, 0x66); + am79c961_ramtest(dev, 0x99); + + return 0; +} + +static void __init am79c961_banner(void) +{ + static unsigned version_printed; + + if (net_debug && version_printed++ == 0) + printk(KERN_INFO "%s", version); +} +static const struct net_device_ops am79c961_netdev_ops = { + .ndo_open = am79c961_open, + .ndo_stop = am79c961_close, + .ndo_start_xmit = am79c961_sendpacket, + .ndo_set_rx_mode = am79c961_setmulticastlist, + .ndo_tx_timeout = am79c961_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = am79c961_poll_controller, +#endif +}; + +static int am79c961_probe(struct platform_device *pdev) +{ + struct resource *res; + struct net_device *dev; + struct dev_priv *priv; + int i, ret; + + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!res) + return -ENODEV; + + dev = alloc_etherdev(sizeof(struct dev_priv)); + ret = -ENOMEM; + if (!dev) + goto out; + + SET_NETDEV_DEV(dev, &pdev->dev); + + priv = netdev_priv(dev); + + /* + * Fixed address and IRQ lines here. + * The PNP initialisation should have been + * done by the ether bootp loader. + */ + dev->base_addr = res->start; + ret = platform_get_irq(pdev, 0); + + if (ret < 0) { + ret = -ENODEV; + goto nodev; + } + dev->irq = ret; + + ret = -ENODEV; + if (!request_region(dev->base_addr, 0x18, dev->name)) + goto nodev; + + /* + * Reset the device. + */ + inb(dev->base_addr + NET_RESET); + udelay(5); + + /* + * Check the manufacturer part of the + * ether address. + */ + if (inb(dev->base_addr) != 0x08 || + inb(dev->base_addr + 2) != 0x00 || + inb(dev->base_addr + 4) != 0x2b) + goto release; + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff; + + am79c961_banner(); + + spin_lock_init(&priv->chip_lock); + init_timer(&priv->timer); + priv->timer.data = (unsigned long)dev; + priv->timer.function = am79c961_timer; + + if (am79c961_hw_init(dev)) + goto release; + + dev->netdev_ops = &am79c961_netdev_ops; + + ret = register_netdev(dev); + if (ret == 0) { + printk(KERN_INFO "%s: ether address %pM\n", + dev->name, dev->dev_addr); + return 0; + } + +release: + release_region(dev->base_addr, 0x18); +nodev: + free_netdev(dev); +out: + return ret; +} + +static struct platform_driver am79c961_driver = { + .probe = am79c961_probe, + .driver = { + .name = "am79c961", + }, +}; + +static int __init am79c961_init(void) +{ + return platform_driver_register(&am79c961_driver); +} + +__initcall(am79c961_init); diff --git a/drivers/net/ethernet/amd/am79c961a.h b/drivers/net/ethernet/amd/am79c961a.h new file mode 100644 index 000000000..9f384b795 --- /dev/null +++ b/drivers/net/ethernet/amd/am79c961a.h @@ -0,0 +1,145 @@ +/* + * linux/drivers/net/ethernet/amd/am79c961a.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_am79c961a_H +#define _LINUX_am79c961a_H + +/* use 0 for production, 1 for verification, >2 for debug. debug flags: */ +#define DEBUG_TX 2 +#define DEBUG_RX 4 +#define DEBUG_INT 8 +#define DEBUG_IC 16 +#ifndef NET_DEBUG +#define NET_DEBUG 0 +#endif + +#define NET_UID 0 +#define NET_RDP 0x10 +#define NET_RAP 0x12 +#define NET_RESET 0x14 +#define NET_IDP 0x16 + +/* + * RAP registers + */ +#define CSR0 0 +#define CSR0_INIT 0x0001 +#define CSR0_STRT 0x0002 +#define CSR0_STOP 0x0004 +#define CSR0_TDMD 0x0008 +#define CSR0_TXON 0x0010 +#define CSR0_RXON 0x0020 +#define CSR0_IENA 0x0040 +#define CSR0_INTR 0x0080 +#define CSR0_IDON 0x0100 +#define CSR0_TINT 0x0200 +#define CSR0_RINT 0x0400 +#define CSR0_MERR 0x0800 +#define CSR0_MISS 0x1000 +#define CSR0_CERR 0x2000 +#define CSR0_BABL 0x4000 +#define CSR0_ERR 0x8000 + +#define CSR3 3 +#define CSR3_EMBA 0x0008 +#define CSR3_DXMT2PD 0x0010 +#define CSR3_LAPPEN 0x0020 +#define CSR3_DXSUFLO 0x0040 +#define CSR3_IDONM 0x0100 +#define CSR3_TINTM 0x0200 +#define CSR3_RINTM 0x0400 +#define CSR3_MERRM 0x0800 +#define CSR3_MISSM 0x1000 +#define CSR3_BABLM 0x4000 +#define CSR3_MASKALL 0x5F00 + +#define CSR4 4 +#define CSR4_JABM 0x0001 +#define CSR4_JAB 0x0002 +#define CSR4_TXSTRTM 0x0004 +#define CSR4_TXSTRT 0x0008 +#define CSR4_RCVCCOM 0x0010 +#define CSR4_RCVCCO 0x0020 +#define CSR4_MFCOM 0x0100 +#define CSR4_MFCO 0x0200 +#define CSR4_ASTRP_RCV 0x0400 +#define CSR4_APAD_XMIT 0x0800 + +#define CTRL1 5 +#define CTRL1_SPND 0x0001 + +#define LADRL 8 +#define LADRM1 9 +#define LADRM2 10 +#define LADRH 11 +#define PADRL 12 +#define PADRM 13 +#define PADRH 14 + +#define MODE 15 +#define MODE_DISRX 0x0001 +#define MODE_DISTX 0x0002 +#define MODE_LOOP 0x0004 +#define MODE_DTCRC 0x0008 +#define MODE_COLL 0x0010 +#define MODE_DRETRY 0x0020 +#define MODE_INTLOOP 0x0040 +#define MODE_PORT_AUI 0x0000 +#define MODE_PORT_10BT 0x0080 +#define MODE_DRXPA 0x2000 +#define MODE_DRXBA 0x4000 +#define MODE_PROMISC 0x8000 + +#define BASERXL 24 +#define BASERXH 25 +#define BASETXL 30 +#define BASETXH 31 + +#define POLLINT 47 + +#define SIZERXR 76 +#define SIZETXR 78 + +#define CSR_MFC 112 + +#define RMD_ENP 0x0100 +#define RMD_STP 0x0200 +#define RMD_CRC 0x0800 +#define RMD_FRAM 0x2000 +#define RMD_ERR 0x4000 +#define RMD_OWN 0x8000 + +#define TMD_ENP 0x0100 +#define TMD_STP 0x0200 +#define TMD_MORE 0x1000 +#define TMD_ERR 0x4000 +#define TMD_OWN 0x8000 + +#define TST_RTRY 0x0400 +#define TST_LCAR 0x0800 +#define TST_LCOL 0x1000 +#define TST_UFLO 0x4000 +#define TST_BUFF 0x8000 + +#define ISALED0 0x0004 +#define ISALED0_LNKST 0x8000 + +struct dev_priv { + unsigned long rxbuffer[RX_BUFFERS]; + unsigned long txbuffer[TX_BUFFERS]; + unsigned char txhead; + unsigned char txtail; + unsigned char rxhead; + unsigned char rxtail; + unsigned long rxhdr; + unsigned long txhdr; + spinlock_t chip_lock; + struct timer_list timer; +}; + +#endif diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c new file mode 100644 index 000000000..94960055f --- /dev/null +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -0,0 +1,1971 @@ + +/* Advanced Micro Devices Inc. AMD8111E Linux Network Driver + * Copyright (C) 2004 Advanced Micro Devices + * + * + * Copyright 2001,2002 Jeff Garzik [ 8139cp.c,tg3.c ] + * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c] + * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ] + * Derived from the lance driver written 1993,1994,1995 by Donald Becker. + * Copyright 1993 United States Government as represented by the + * Director, National Security Agency.[ pcnet32.c ] + * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ] + * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + +Module Name: + + amd8111e.c + +Abstract: + + AMD8111 based 10/100 Ethernet Controller Driver. + +Environment: + + Kernel Mode + +Revision History: + 3.0.0 + Initial Revision. + 3.0.1 + 1. Dynamic interrupt coalescing. + 2. Removed prev_stats. + 3. MII support. + 4. Dynamic IPG support + 3.0.2 05/29/2003 + 1. Bug fix: Fixed failure to send jumbo packets larger than 4k. + 2. Bug fix: Fixed VLAN support failure. + 3. Bug fix: Fixed receive interrupt coalescing bug. + 4. Dynamic IPG support is disabled by default. + 3.0.3 06/05/2003 + 1. Bug fix: Fixed failure to close the interface if SMP is enabled. + 3.0.4 12/09/2003 + 1. Added set_mac_address routine for bonding driver support. + 2. Tested the driver for bonding support + 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth + indicated to the h/w. + 4. Modified amd8111e_rx() routine to receive all the received packets + in the first interrupt. + 5. Bug fix: Corrected rx_errors reported in get_stats() function. + 3.0.5 03/22/2004 + 1. Added NAPI support + +*/ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#define AMD8111E_VLAN_TAG_USED 1 +#else +#define AMD8111E_VLAN_TAG_USED 0 +#endif + +#include "amd8111e.h" +#define MODULE_NAME "amd8111e" +#define MODULE_VERS "3.0.7" +MODULE_AUTHOR("Advanced Micro Devices, Inc."); +MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "MODULE_VERS); +MODULE_LICENSE("GPL"); +module_param_array(speed_duplex, int, NULL, 0); +MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex"); +module_param_array(coalesce, bool, NULL, 0); +MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable"); +module_param_array(dynamic_ipg, bool, NULL, 0); +MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable"); + +/* This function will read the PHY registers. */ +static int amd8111e_read_phy(struct amd8111e_priv *lp, + int phy_id, int reg, u32 *val) +{ + void __iomem *mmio = lp->mmio; + unsigned int reg_val; + unsigned int repeat= REPEAT_CNT; + + reg_val = readl(mmio + PHY_ACCESS); + while (reg_val & PHY_CMD_ACTIVE) + reg_val = readl( mmio + PHY_ACCESS ); + + writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) | + ((reg & 0x1f) << 16), mmio +PHY_ACCESS); + do{ + reg_val = readl(mmio + PHY_ACCESS); + udelay(30); /* It takes 30 us to read/write data */ + } while (--repeat && (reg_val & PHY_CMD_ACTIVE)); + if(reg_val & PHY_RD_ERR) + goto err_phy_read; + + *val = reg_val & 0xffff; + return 0; +err_phy_read: + *val = 0; + return -EINVAL; + +} + +/* This function will write into PHY registers. */ +static int amd8111e_write_phy(struct amd8111e_priv *lp, + int phy_id, int reg, u32 val) +{ + unsigned int repeat = REPEAT_CNT; + void __iomem *mmio = lp->mmio; + unsigned int reg_val; + + reg_val = readl(mmio + PHY_ACCESS); + while (reg_val & PHY_CMD_ACTIVE) + reg_val = readl( mmio + PHY_ACCESS ); + + writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) | + ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS); + + do{ + reg_val = readl(mmio + PHY_ACCESS); + udelay(30); /* It takes 30 us to read/write the data */ + } while (--repeat && (reg_val & PHY_CMD_ACTIVE)); + + if(reg_val & PHY_RD_ERR) + goto err_phy_write; + + return 0; + +err_phy_write: + return -EINVAL; + +} + +/* This is the mii register read function provided to the mii interface. */ +static int amd8111e_mdio_read(struct net_device *dev, int phy_id, int reg_num) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + unsigned int reg_val; + + amd8111e_read_phy(lp,phy_id,reg_num,®_val); + return reg_val; + +} + +/* This is the mii register write function provided to the mii interface. */ +static void amd8111e_mdio_write(struct net_device *dev, + int phy_id, int reg_num, int val) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + + amd8111e_write_phy(lp, phy_id, reg_num, val); +} + +/* This function will set PHY speed. During initialization sets + * the original speed to 100 full + */ +static void amd8111e_set_ext_phy(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + u32 bmcr,advert,tmp; + + /* Determine mii register values to set the speed */ + advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE); + tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); + switch (lp->ext_phy_option){ + + default: + case SPEED_AUTONEG: /* advertise all values */ + tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL| + ADVERTISE_100HALF|ADVERTISE_100FULL) ; + break; + case SPEED10_HALF: + tmp |= ADVERTISE_10HALF; + break; + case SPEED10_FULL: + tmp |= ADVERTISE_10FULL; + break; + case SPEED100_HALF: + tmp |= ADVERTISE_100HALF; + break; + case SPEED100_FULL: + tmp |= ADVERTISE_100FULL; + break; + } + + if(advert != tmp) + amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp); + /* Restart auto negotiation */ + bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR); + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr); + +} + +/* This function will unmap skb->data space and will free + * all transmit and receive skbuffs. + */ +static int amd8111e_free_skbs(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + struct sk_buff *rx_skbuff; + int i; + + /* Freeing transmit skbs */ + for(i = 0; i < NUM_TX_BUFFERS; i++){ + if(lp->tx_skbuff[i]){ + pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i], lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE); + dev_kfree_skb (lp->tx_skbuff[i]); + lp->tx_skbuff[i] = NULL; + lp->tx_dma_addr[i] = 0; + } + } + /* Freeing previously allocated receive buffers */ + for (i = 0; i < NUM_RX_BUFFERS; i++){ + rx_skbuff = lp->rx_skbuff[i]; + if(rx_skbuff != NULL){ + pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i], + lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE); + dev_kfree_skb(lp->rx_skbuff[i]); + lp->rx_skbuff[i] = NULL; + lp->rx_dma_addr[i] = 0; + } + } + + return 0; +} + +/* This will set the receive buffer length corresponding + * to the mtu size of networkinterface. + */ +static inline void amd8111e_set_rx_buff_len(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + unsigned int mtu = dev->mtu; + + if (mtu > ETH_DATA_LEN){ + /* MTU + ethernet header + FCS + * + optional VLAN tag + skb reserve space 2 + */ + lp->rx_buff_len = mtu + ETH_HLEN + 10; + lp->options |= OPTION_JUMBO_ENABLE; + } else{ + lp->rx_buff_len = PKT_BUFF_SZ; + lp->options &= ~OPTION_JUMBO_ENABLE; + } +} + +/* This function will free all the previously allocated buffers, + * determine new receive buffer length and will allocate new receive buffers. + * This function also allocates and initializes both the transmitter + * and receive hardware descriptors. + */ +static int amd8111e_init_ring(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + int i; + + lp->rx_idx = lp->tx_idx = 0; + lp->tx_complete_idx = 0; + lp->tx_ring_idx = 0; + + + if(lp->opened) + /* Free previously allocated transmit and receive skbs */ + amd8111e_free_skbs(dev); + + else{ + /* allocate the tx and rx descriptors */ + if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev, + sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR, + &lp->tx_ring_dma_addr)) == NULL) + + goto err_no_mem; + + if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev, + sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR, + &lp->rx_ring_dma_addr)) == NULL) + + goto err_free_tx_ring; + + } + /* Set new receive buff size */ + amd8111e_set_rx_buff_len(dev); + + /* Allocating receive skbs */ + for (i = 0; i < NUM_RX_BUFFERS; i++) { + + lp->rx_skbuff[i] = netdev_alloc_skb(dev, lp->rx_buff_len); + if (!lp->rx_skbuff[i]) { + /* Release previos allocated skbs */ + for(--i; i >= 0 ;i--) + dev_kfree_skb(lp->rx_skbuff[i]); + goto err_free_rx_ring; + } + skb_reserve(lp->rx_skbuff[i],2); + } + /* Initilaizing receive descriptors */ + for (i = 0; i < NUM_RX_BUFFERS; i++) { + lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, + lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); + + lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]); + lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2); + wmb(); + lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT); + } + + /* Initializing transmit descriptors */ + for (i = 0; i < NUM_TX_RING_DR; i++) { + lp->tx_ring[i].buff_phy_addr = 0; + lp->tx_ring[i].tx_flags = 0; + lp->tx_ring[i].buff_count = 0; + } + + return 0; + +err_free_rx_ring: + + pci_free_consistent(lp->pci_dev, + sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring, + lp->rx_ring_dma_addr); + +err_free_tx_ring: + + pci_free_consistent(lp->pci_dev, + sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring, + lp->tx_ring_dma_addr); + +err_no_mem: + return -ENOMEM; +} + +/* This function will set the interrupt coalescing according + * to the input arguments + */ +static int amd8111e_set_coalesce(struct net_device *dev, enum coal_mode cmod) +{ + unsigned int timeout; + unsigned int event_count; + + struct amd8111e_priv *lp = netdev_priv(dev); + void __iomem *mmio = lp->mmio; + struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf; + + + switch(cmod) + { + case RX_INTR_COAL : + timeout = coal_conf->rx_timeout; + event_count = coal_conf->rx_event_count; + if( timeout > MAX_TIMEOUT || + event_count > MAX_EVENT_COUNT ) + return -EINVAL; + + timeout = timeout * DELAY_TIMER_CONV; + writel(VAL0|STINTEN, mmio+INTEN0); + writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout, + mmio+DLY_INT_A); + break; + + case TX_INTR_COAL : + timeout = coal_conf->tx_timeout; + event_count = coal_conf->tx_event_count; + if( timeout > MAX_TIMEOUT || + event_count > MAX_EVENT_COUNT ) + return -EINVAL; + + + timeout = timeout * DELAY_TIMER_CONV; + writel(VAL0|STINTEN,mmio+INTEN0); + writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout, + mmio+DLY_INT_B); + break; + + case DISABLE_COAL: + writel(0,mmio+STVAL); + writel(STINTEN, mmio+INTEN0); + writel(0, mmio +DLY_INT_B); + writel(0, mmio+DLY_INT_A); + break; + case ENABLE_COAL: + /* Start the timer */ + writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /* 0.5 sec */ + writel(VAL0|STINTEN, mmio+INTEN0); + break; + default: + break; + + } + return 0; + +} + +/* This function initializes the device registers and starts the device. */ +static int amd8111e_restart(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + void __iomem *mmio = lp->mmio; + int i,reg_val; + + /* stop the chip */ + writel(RUN, mmio + CMD0); + + if(amd8111e_init_ring(dev)) + return -ENOMEM; + + /* enable the port manager and set auto negotiation always */ + writel((u32) VAL1|EN_PMGR, mmio + CMD3 ); + writel((u32)XPHYANE|XPHYRST , mmio + CTRL2); + + amd8111e_set_ext_phy(dev); + + /* set control registers */ + reg_val = readl(mmio + CTRL1); + reg_val &= ~XMTSP_MASK; + writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 ); + + /* enable interrupt */ + writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN | + APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN | + SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0); + + writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0); + + /* initialize tx and rx ring base addresses */ + writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0); + writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0); + + writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0); + writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0); + + /* set default IPG to 96 */ + writew((u32)DEFAULT_IPG,mmio+IPG); + writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1); + + if(lp->options & OPTION_JUMBO_ENABLE){ + writel((u32)VAL2|JUMBO, mmio + CMD3); + /* Reset REX_UFLO */ + writel( REX_UFLO, mmio + CMD2); + /* Should not set REX_UFLO for jumbo frames */ + writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2); + }else{ + writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2); + writel((u32)JUMBO, mmio + CMD3); + } + +#if AMD8111E_VLAN_TAG_USED + writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3); +#endif + writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 ); + + /* Setting the MAC address to the device */ + for (i = 0; i < ETH_ALEN; i++) + writeb( dev->dev_addr[i], mmio + PADR + i ); + + /* Enable interrupt coalesce */ + if(lp->options & OPTION_INTR_COAL_ENABLE){ + netdev_info(dev, "Interrupt Coalescing Enabled.\n"); + amd8111e_set_coalesce(dev,ENABLE_COAL); + } + + /* set RUN bit to start the chip */ + writel(VAL2 | RDMD0, mmio + CMD0); + writel(VAL0 | INTREN | RUN, mmio + CMD0); + + /* To avoid PCI posting bug */ + readl(mmio+CMD0); + return 0; +} + +/* This function clears necessary the device registers. */ +static void amd8111e_init_hw_default(struct amd8111e_priv *lp) +{ + unsigned int reg_val; + unsigned int logic_filter[2] ={0,}; + void __iomem *mmio = lp->mmio; + + + /* stop the chip */ + writel(RUN, mmio + CMD0); + + /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */ + writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0); + + /* Clear RCV_RING_BASE_ADDR */ + writel(0, mmio + RCV_RING_BASE_ADDR0); + + /* Clear XMT_RING_BASE_ADDR */ + writel(0, mmio + XMT_RING_BASE_ADDR0); + writel(0, mmio + XMT_RING_BASE_ADDR1); + writel(0, mmio + XMT_RING_BASE_ADDR2); + writel(0, mmio + XMT_RING_BASE_ADDR3); + + /* Clear CMD0 */ + writel(CMD0_CLEAR,mmio + CMD0); + + /* Clear CMD2 */ + writel(CMD2_CLEAR, mmio +CMD2); + + /* Clear CMD7 */ + writel(CMD7_CLEAR , mmio + CMD7); + + /* Clear DLY_INT_A and DLY_INT_B */ + writel(0x0, mmio + DLY_INT_A); + writel(0x0, mmio + DLY_INT_B); + + /* Clear FLOW_CONTROL */ + writel(0x0, mmio + FLOW_CONTROL); + + /* Clear INT0 write 1 to clear register */ + reg_val = readl(mmio + INT0); + writel(reg_val, mmio + INT0); + + /* Clear STVAL */ + writel(0x0, mmio + STVAL); + + /* Clear INTEN0 */ + writel( INTEN0_CLEAR, mmio + INTEN0); + + /* Clear LADRF */ + writel(0x0 , mmio + LADRF); + + /* Set SRAM_SIZE & SRAM_BOUNDARY registers */ + writel( 0x80010,mmio + SRAM_SIZE); + + /* Clear RCV_RING0_LEN */ + writel(0x0, mmio + RCV_RING_LEN0); + + /* Clear XMT_RING0/1/2/3_LEN */ + writel(0x0, mmio + XMT_RING_LEN0); + writel(0x0, mmio + XMT_RING_LEN1); + writel(0x0, mmio + XMT_RING_LEN2); + writel(0x0, mmio + XMT_RING_LEN3); + + /* Clear XMT_RING_LIMIT */ + writel(0x0, mmio + XMT_RING_LIMIT); + + /* Clear MIB */ + writew(MIB_CLEAR, mmio + MIB_ADDR); + + /* Clear LARF */ + amd8111e_writeq(*(u64 *)logic_filter, mmio + LADRF); + + /* SRAM_SIZE register */ + reg_val = readl(mmio + SRAM_SIZE); + + if(lp->options & OPTION_JUMBO_ENABLE) + writel( VAL2|JUMBO, mmio + CMD3); +#if AMD8111E_VLAN_TAG_USED + writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 ); +#endif + /* Set default value to CTRL1 Register */ + writel(CTRL1_DEFAULT, mmio + CTRL1); + + /* To avoid PCI posting bug */ + readl(mmio + CMD2); + +} + +/* This function disables the interrupt and clears all the pending + * interrupts in INT0 + */ +static void amd8111e_disable_interrupt(struct amd8111e_priv *lp) +{ + u32 intr0; + + /* Disable interrupt */ + writel(INTREN, lp->mmio + CMD0); + + /* Clear INT0 */ + intr0 = readl(lp->mmio + INT0); + writel(intr0, lp->mmio + INT0); + + /* To avoid PCI posting bug */ + readl(lp->mmio + INT0); + +} + +/* This function stops the chip. */ +static void amd8111e_stop_chip(struct amd8111e_priv *lp) +{ + writel(RUN, lp->mmio + CMD0); + + /* To avoid PCI posting bug */ + readl(lp->mmio + CMD0); +} + +/* This function frees the transmiter and receiver descriptor rings. */ +static void amd8111e_free_ring(struct amd8111e_priv *lp) +{ + /* Free transmit and receive descriptor rings */ + if(lp->rx_ring){ + pci_free_consistent(lp->pci_dev, + sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR, + lp->rx_ring, lp->rx_ring_dma_addr); + lp->rx_ring = NULL; + } + + if(lp->tx_ring){ + pci_free_consistent(lp->pci_dev, + sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR, + lp->tx_ring, lp->tx_ring_dma_addr); + + lp->tx_ring = NULL; + } + +} + +/* This function will free all the transmit skbs that are actually + * transmitted by the device. It will check the ownership of the + * skb before freeing the skb. + */ +static int amd8111e_tx(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK; + int status; + /* Complete all the transmit packet */ + while (lp->tx_complete_idx != lp->tx_idx){ + tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK; + status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags); + + if(status & OWN_BIT) + break; /* It still hasn't been Txed */ + + lp->tx_ring[tx_index].buff_phy_addr = 0; + + /* We must free the original skb */ + if (lp->tx_skbuff[tx_index]) { + pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index], + lp->tx_skbuff[tx_index]->len, + PCI_DMA_TODEVICE); + dev_kfree_skb_irq (lp->tx_skbuff[tx_index]); + lp->tx_skbuff[tx_index] = NULL; + lp->tx_dma_addr[tx_index] = 0; + } + lp->tx_complete_idx++; + /*COAL update tx coalescing parameters */ + lp->coal_conf.tx_packets++; + lp->coal_conf.tx_bytes += + le16_to_cpu(lp->tx_ring[tx_index].buff_count); + + if (netif_queue_stopped(dev) && + lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){ + /* The ring is no longer full, clear tbusy. */ + /* lp->tx_full = 0; */ + netif_wake_queue (dev); + } + } + return 0; +} + +/* This function handles the driver receive operation in polling mode */ +static int amd8111e_rx_poll(struct napi_struct *napi, int budget) +{ + struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi); + struct net_device *dev = lp->amd8111e_net_dev; + int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK; + void __iomem *mmio = lp->mmio; + struct sk_buff *skb,*new_skb; + int min_pkt_len, status; + unsigned int intr0; + int num_rx_pkt = 0; + short pkt_len; +#if AMD8111E_VLAN_TAG_USED + short vtag; +#endif + int rx_pkt_limit = budget; + unsigned long flags; + + if (rx_pkt_limit <= 0) + goto rx_not_empty; + + do{ + /* process receive packets until we use the quota. + * If we own the next entry, it's a new packet. Send it up. + */ + while(1) { + status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags); + if (status & OWN_BIT) + break; + + /* There is a tricky error noted by John Murphy, + * to Russ Nelson: Even with + * full-sized * buffers it's possible for a + * jabber packet to use two buffers, with only + * the last correctly noting the error. + */ + if(status & ERR_BIT) { + /* resetting flags */ + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + goto err_next_pkt; + } + /* check for STP and ENP */ + if(!((status & STP_BIT) && (status & ENP_BIT))){ + /* resetting flags */ + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + goto err_next_pkt; + } + pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4; + +#if AMD8111E_VLAN_TAG_USED + vtag = status & TT_MASK; + /*MAC will strip vlan tag*/ + if (vtag != 0) + min_pkt_len =MIN_PKT_LEN - 4; + else +#endif + min_pkt_len =MIN_PKT_LEN; + + if (pkt_len < min_pkt_len) { + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + lp->drv_rx_errors++; + goto err_next_pkt; + } + if(--rx_pkt_limit < 0) + goto rx_not_empty; + new_skb = netdev_alloc_skb(dev, lp->rx_buff_len); + if (!new_skb) { + /* if allocation fail, + * ignore that pkt and go to next one + */ + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; + lp->drv_rx_errors++; + goto err_next_pkt; + } + + skb_reserve(new_skb, 2); + skb = lp->rx_skbuff[rx_index]; + pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], + lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); + skb_put(skb, pkt_len); + lp->rx_skbuff[rx_index] = new_skb; + lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, + new_skb->data, + lp->rx_buff_len-2, + PCI_DMA_FROMDEVICE); + + skb->protocol = eth_type_trans(skb, dev); + +#if AMD8111E_VLAN_TAG_USED + if (vtag == TT_VLAN_TAGGED){ + u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + } +#endif + netif_receive_skb(skb); + /*COAL update rx coalescing parameters*/ + lp->coal_conf.rx_packets++; + lp->coal_conf.rx_bytes += pkt_len; + num_rx_pkt++; + + err_next_pkt: + lp->rx_ring[rx_index].buff_phy_addr + = cpu_to_le32(lp->rx_dma_addr[rx_index]); + lp->rx_ring[rx_index].buff_count = + cpu_to_le16(lp->rx_buff_len-2); + wmb(); + lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT); + rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK; + } + /* Check the interrupt status register for more packets in the + * mean time. Process them since we have not used up our quota. + */ + intr0 = readl(mmio + INT0); + /*Ack receive packets */ + writel(intr0 & RINT0,mmio + INT0); + + } while(intr0 & RINT0); + + if (rx_pkt_limit > 0) { + /* Receive descriptor is empty now */ + spin_lock_irqsave(&lp->lock, flags); + __napi_complete(napi); + writel(VAL0|RINTEN0, mmio + INTEN0); + writel(VAL2 | RDMD0, mmio + CMD0); + spin_unlock_irqrestore(&lp->lock, flags); + } + +rx_not_empty: + return num_rx_pkt; +} + +/* This function will indicate the link status to the kernel. */ +static int amd8111e_link_change(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + int status0,speed; + + /* read the link change */ + status0 = readl(lp->mmio + STAT0); + + if(status0 & LINK_STATS){ + if(status0 & AUTONEG_COMPLETE) + lp->link_config.autoneg = AUTONEG_ENABLE; + else + lp->link_config.autoneg = AUTONEG_DISABLE; + + if(status0 & FULL_DPLX) + lp->link_config.duplex = DUPLEX_FULL; + else + lp->link_config.duplex = DUPLEX_HALF; + speed = (status0 & SPEED_MASK) >> 7; + if(speed == PHY_SPEED_10) + lp->link_config.speed = SPEED_10; + else if(speed == PHY_SPEED_100) + lp->link_config.speed = SPEED_100; + + netdev_info(dev, "Link is Up. Speed is %s Mbps %s Duplex\n", + (lp->link_config.speed == SPEED_100) ? + "100" : "10", + (lp->link_config.duplex == DUPLEX_FULL) ? + "Full" : "Half"); + + netif_carrier_on(dev); + } + else{ + lp->link_config.speed = SPEED_INVALID; + lp->link_config.duplex = DUPLEX_INVALID; + lp->link_config.autoneg = AUTONEG_INVALID; + netdev_info(dev, "Link is Down.\n"); + netif_carrier_off(dev); + } + + return 0; +} + +/* This function reads the mib counters. */ +static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER) +{ + unsigned int status; + unsigned int data; + unsigned int repeat = REPEAT_CNT; + + writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR); + do { + status = readw(mmio + MIB_ADDR); + udelay(2); /* controller takes MAX 2 us to get mib data */ + } + while (--repeat && (status & MIB_CMD_ACTIVE)); + + data = readl(mmio + MIB_DATA); + return data; +} + +/* This function reads the mib registers and returns the hardware statistics. + * It updates previous internal driver statistics with new values. + */ +static struct net_device_stats *amd8111e_get_stats(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + void __iomem *mmio = lp->mmio; + unsigned long flags; + struct net_device_stats *new_stats = &dev->stats; + + if (!lp->opened) + return new_stats; + spin_lock_irqsave (&lp->lock, flags); + + /* stats.rx_packets */ + new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+ + amd8111e_read_mib(mmio, rcv_multicast_pkts)+ + amd8111e_read_mib(mmio, rcv_unicast_pkts); + + /* stats.tx_packets */ + new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets); + + /*stats.rx_bytes */ + new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets); + + /* stats.tx_bytes */ + new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets); + + /* stats.rx_errors */ + /* hw errors + errors driver reported */ + new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+ + amd8111e_read_mib(mmio, rcv_fragments)+ + amd8111e_read_mib(mmio, rcv_jabbers)+ + amd8111e_read_mib(mmio, rcv_alignment_errors)+ + amd8111e_read_mib(mmio, rcv_fcs_errors)+ + amd8111e_read_mib(mmio, rcv_miss_pkts)+ + lp->drv_rx_errors; + + /* stats.tx_errors */ + new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts); + + /* stats.rx_dropped*/ + new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts); + + /* stats.tx_dropped*/ + new_stats->tx_dropped = amd8111e_read_mib(mmio, xmt_underrun_pkts); + + /* stats.multicast*/ + new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts); + + /* stats.collisions*/ + new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions); + + /* stats.rx_length_errors*/ + new_stats->rx_length_errors = + amd8111e_read_mib(mmio, rcv_undersize_pkts)+ + amd8111e_read_mib(mmio, rcv_oversize_pkts); + + /* stats.rx_over_errors*/ + new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts); + + /* stats.rx_crc_errors*/ + new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors); + + /* stats.rx_frame_errors*/ + new_stats->rx_frame_errors = + amd8111e_read_mib(mmio, rcv_alignment_errors); + + /* stats.rx_fifo_errors */ + new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts); + + /* stats.rx_missed_errors */ + new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts); + + /* stats.tx_aborted_errors*/ + new_stats->tx_aborted_errors = + amd8111e_read_mib(mmio, xmt_excessive_collision); + + /* stats.tx_carrier_errors*/ + new_stats->tx_carrier_errors = + amd8111e_read_mib(mmio, xmt_loss_carrier); + + /* stats.tx_fifo_errors*/ + new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts); + + /* stats.tx_window_errors*/ + new_stats->tx_window_errors = + amd8111e_read_mib(mmio, xmt_late_collision); + + /* Reset the mibs for collecting new statistics */ + /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/ + + spin_unlock_irqrestore (&lp->lock, flags); + + return new_stats; +} + +/* This function recalculate the interrupt coalescing mode on every interrupt + * according to the datarate and the packet rate. + */ +static int amd8111e_calc_coalesce(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf; + int tx_pkt_rate; + int rx_pkt_rate; + int tx_data_rate; + int rx_data_rate; + int rx_pkt_size; + int tx_pkt_size; + + tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets; + coal_conf->tx_prev_packets = coal_conf->tx_packets; + + tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes; + coal_conf->tx_prev_bytes = coal_conf->tx_bytes; + + rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets; + coal_conf->rx_prev_packets = coal_conf->rx_packets; + + rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes; + coal_conf->rx_prev_bytes = coal_conf->rx_bytes; + + if(rx_pkt_rate < 800){ + if(coal_conf->rx_coal_type != NO_COALESCE){ + + coal_conf->rx_timeout = 0x0; + coal_conf->rx_event_count = 0; + amd8111e_set_coalesce(dev,RX_INTR_COAL); + coal_conf->rx_coal_type = NO_COALESCE; + } + } + else{ + + rx_pkt_size = rx_data_rate/rx_pkt_rate; + if (rx_pkt_size < 128){ + if(coal_conf->rx_coal_type != NO_COALESCE){ + + coal_conf->rx_timeout = 0; + coal_conf->rx_event_count = 0; + amd8111e_set_coalesce(dev,RX_INTR_COAL); + coal_conf->rx_coal_type = NO_COALESCE; + } + + } + else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){ + + if(coal_conf->rx_coal_type != LOW_COALESCE){ + coal_conf->rx_timeout = 1; + coal_conf->rx_event_count = 4; + amd8111e_set_coalesce(dev,RX_INTR_COAL); + coal_conf->rx_coal_type = LOW_COALESCE; + } + } + else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){ + + if(coal_conf->rx_coal_type != MEDIUM_COALESCE){ + coal_conf->rx_timeout = 1; + coal_conf->rx_event_count = 4; + amd8111e_set_coalesce(dev,RX_INTR_COAL); + coal_conf->rx_coal_type = MEDIUM_COALESCE; + } + + } + else if(rx_pkt_size >= 1024){ + if(coal_conf->rx_coal_type != HIGH_COALESCE){ + coal_conf->rx_timeout = 2; + coal_conf->rx_event_count = 3; + amd8111e_set_coalesce(dev,RX_INTR_COAL); + coal_conf->rx_coal_type = HIGH_COALESCE; + } + } + } + /* NOW FOR TX INTR COALESC */ + if(tx_pkt_rate < 800){ + if(coal_conf->tx_coal_type != NO_COALESCE){ + + coal_conf->tx_timeout = 0x0; + coal_conf->tx_event_count = 0; + amd8111e_set_coalesce(dev,TX_INTR_COAL); + coal_conf->tx_coal_type = NO_COALESCE; + } + } + else{ + + tx_pkt_size = tx_data_rate/tx_pkt_rate; + if (tx_pkt_size < 128){ + + if(coal_conf->tx_coal_type != NO_COALESCE){ + + coal_conf->tx_timeout = 0; + coal_conf->tx_event_count = 0; + amd8111e_set_coalesce(dev,TX_INTR_COAL); + coal_conf->tx_coal_type = NO_COALESCE; + } + + } + else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){ + + if(coal_conf->tx_coal_type != LOW_COALESCE){ + coal_conf->tx_timeout = 1; + coal_conf->tx_event_count = 2; + amd8111e_set_coalesce(dev,TX_INTR_COAL); + coal_conf->tx_coal_type = LOW_COALESCE; + + } + } + else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){ + + if(coal_conf->tx_coal_type != MEDIUM_COALESCE){ + coal_conf->tx_timeout = 2; + coal_conf->tx_event_count = 5; + amd8111e_set_coalesce(dev,TX_INTR_COAL); + coal_conf->tx_coal_type = MEDIUM_COALESCE; + } + + } + else if(tx_pkt_size >= 1024){ + if (tx_pkt_size >= 1024){ + if(coal_conf->tx_coal_type != HIGH_COALESCE){ + coal_conf->tx_timeout = 4; + coal_conf->tx_event_count = 8; + amd8111e_set_coalesce(dev,TX_INTR_COAL); + coal_conf->tx_coal_type = HIGH_COALESCE; + } + } + } + } + return 0; + +} + +/* This is device interrupt function. It handles transmit, + * receive,link change and hardware timer interrupts. + */ +static irqreturn_t amd8111e_interrupt(int irq, void *dev_id) +{ + + struct net_device *dev = (struct net_device *)dev_id; + struct amd8111e_priv *lp = netdev_priv(dev); + void __iomem *mmio = lp->mmio; + unsigned int intr0, intren0; + unsigned int handled = 1; + + if(unlikely(dev == NULL)) + return IRQ_NONE; + + spin_lock(&lp->lock); + + /* disabling interrupt */ + writel(INTREN, mmio + CMD0); + + /* Read interrupt status */ + intr0 = readl(mmio + INT0); + intren0 = readl(mmio + INTEN0); + + /* Process all the INT event until INTR bit is clear. */ + + if (!(intr0 & INTR)){ + handled = 0; + goto err_no_interrupt; + } + + /* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */ + writel(intr0, mmio + INT0); + + /* Check if Receive Interrupt has occurred. */ + if (intr0 & RINT0) { + if (napi_schedule_prep(&lp->napi)) { + /* Disable receive interupts */ + writel(RINTEN0, mmio + INTEN0); + /* Schedule a polling routine */ + __napi_schedule(&lp->napi); + } else if (intren0 & RINTEN0) { + netdev_dbg(dev, "************Driver bug! interrupt while in poll\n"); + /* Fix by disable receive interrupts */ + writel(RINTEN0, mmio + INTEN0); + } + } + + /* Check if Transmit Interrupt has occurred. */ + if (intr0 & TINT0) + amd8111e_tx(dev); + + /* Check if Link Change Interrupt has occurred. */ + if (intr0 & LCINT) + amd8111e_link_change(dev); + + /* Check if Hardware Timer Interrupt has occurred. */ + if (intr0 & STINT) + amd8111e_calc_coalesce(dev); + +err_no_interrupt: + writel( VAL0 | INTREN,mmio + CMD0); + + spin_unlock(&lp->lock); + + return IRQ_RETVAL(handled); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void amd8111e_poll(struct net_device *dev) +{ + unsigned long flags; + local_irq_save(flags); + amd8111e_interrupt(0, dev); + local_irq_restore(flags); +} +#endif + + +/* This function closes the network interface and updates + * the statistics so that most recent statistics will be + * available after the interface is down. + */ +static int amd8111e_close(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + netif_stop_queue(dev); + + napi_disable(&lp->napi); + + spin_lock_irq(&lp->lock); + + amd8111e_disable_interrupt(lp); + amd8111e_stop_chip(lp); + + /* Free transmit and receive skbs */ + amd8111e_free_skbs(lp->amd8111e_net_dev); + + netif_carrier_off(lp->amd8111e_net_dev); + + /* Delete ipg timer */ + if(lp->options & OPTION_DYN_IPG_ENABLE) + del_timer_sync(&lp->ipg_data.ipg_timer); + + spin_unlock_irq(&lp->lock); + free_irq(dev->irq, dev); + amd8111e_free_ring(lp); + + /* Update the statistics before closing */ + amd8111e_get_stats(dev); + lp->opened = 0; + return 0; +} + +/* This function opens new interface.It requests irq for the device, + * initializes the device,buffers and descriptors, and starts the device. + */ +static int amd8111e_open(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + + if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, IRQF_SHARED, + dev->name, dev)) + return -EAGAIN; + + napi_enable(&lp->napi); + + spin_lock_irq(&lp->lock); + + amd8111e_init_hw_default(lp); + + if(amd8111e_restart(dev)){ + spin_unlock_irq(&lp->lock); + napi_disable(&lp->napi); + if (dev->irq) + free_irq(dev->irq, dev); + return -ENOMEM; + } + /* Start ipg timer */ + if(lp->options & OPTION_DYN_IPG_ENABLE){ + add_timer(&lp->ipg_data.ipg_timer); + netdev_info(dev, "Dynamic IPG Enabled\n"); + } + + lp->opened = 1; + + spin_unlock_irq(&lp->lock); + + netif_start_queue(dev); + + return 0; +} + +/* This function checks if there is any transmit descriptors + * available to queue more packet. + */ +static int amd8111e_tx_queue_avail(struct amd8111e_priv *lp) +{ + int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK; + if (lp->tx_skbuff[tx_index]) + return -1; + else + return 0; + +} + +/* This function will queue the transmit packets to the + * descriptors and will trigger the send operation. It also + * initializes the transmit descriptors with buffer physical address, + * byte count, ownership to hardware etc. + */ +static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + int tx_index; + unsigned long flags; + + spin_lock_irqsave(&lp->lock, flags); + + tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK; + + lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len); + + lp->tx_skbuff[tx_index] = skb; + lp->tx_ring[tx_index].tx_flags = 0; + +#if AMD8111E_VLAN_TAG_USED + if (skb_vlan_tag_present(skb)) { + lp->tx_ring[tx_index].tag_ctrl_cmd |= + cpu_to_le16(TCC_VLAN_INSERT); + lp->tx_ring[tx_index].tag_ctrl_info = + cpu_to_le16(skb_vlan_tag_get(skb)); + + } +#endif + lp->tx_dma_addr[tx_index] = + pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); + lp->tx_ring[tx_index].buff_phy_addr = + cpu_to_le32(lp->tx_dma_addr[tx_index]); + + /* Set FCS and LTINT bits */ + wmb(); + lp->tx_ring[tx_index].tx_flags |= + cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT); + + lp->tx_idx++; + + /* Trigger an immediate send poll. */ + writel( VAL1 | TDMD0, lp->mmio + CMD0); + writel( VAL2 | RDMD0,lp->mmio + CMD0); + + if(amd8111e_tx_queue_avail(lp) < 0){ + netif_stop_queue(dev); + } + spin_unlock_irqrestore(&lp->lock, flags); + return NETDEV_TX_OK; +} +/* This function returns all the memory mapped registers of the device. */ +static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf) +{ + void __iomem *mmio = lp->mmio; + /* Read only necessary registers */ + buf[0] = readl(mmio + XMT_RING_BASE_ADDR0); + buf[1] = readl(mmio + XMT_RING_LEN0); + buf[2] = readl(mmio + RCV_RING_BASE_ADDR0); + buf[3] = readl(mmio + RCV_RING_LEN0); + buf[4] = readl(mmio + CMD0); + buf[5] = readl(mmio + CMD2); + buf[6] = readl(mmio + CMD3); + buf[7] = readl(mmio + CMD7); + buf[8] = readl(mmio + INT0); + buf[9] = readl(mmio + INTEN0); + buf[10] = readl(mmio + LADRF); + buf[11] = readl(mmio + LADRF+4); + buf[12] = readl(mmio + STAT0); +} + + +/* This function sets promiscuos mode, all-multi mode or the multicast address + * list to the device. + */ +static void amd8111e_set_multicast_list(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + struct amd8111e_priv *lp = netdev_priv(dev); + u32 mc_filter[2] ; + int bit_num; + + if(dev->flags & IFF_PROMISC){ + writel( VAL2 | PROM, lp->mmio + CMD2); + return; + } + else + writel( PROM, lp->mmio + CMD2); + if (dev->flags & IFF_ALLMULTI || + netdev_mc_count(dev) > MAX_FILTER_SIZE) { + /* get all multicast packet */ + mc_filter[1] = mc_filter[0] = 0xffffffff; + lp->options |= OPTION_MULTICAST_ENABLE; + amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF); + return; + } + if (netdev_mc_empty(dev)) { + /* get only own packets */ + mc_filter[1] = mc_filter[0] = 0; + lp->options &= ~OPTION_MULTICAST_ENABLE; + amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF); + /* disable promiscuous mode */ + writel(PROM, lp->mmio + CMD2); + return; + } + /* load all the multicast addresses in the logic filter */ + lp->options |= OPTION_MULTICAST_ENABLE; + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f; + mc_filter[bit_num >> 5] |= 1 << (bit_num & 31); + } + amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF); + + /* To eliminate PCI posting bug */ + readl(lp->mmio + CMD2); + +} + +static void amd8111e_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + struct pci_dev *pci_dev = lp->pci_dev; + strlcpy(info->driver, MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, MODULE_VERS, sizeof(info->version)); + snprintf(info->fw_version, sizeof(info->fw_version), + "%u", chip_version); + strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info)); +} + +static int amd8111e_get_regs_len(struct net_device *dev) +{ + return AMD8111E_REG_DUMP_LEN; +} + +static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + regs->version = 0; + amd8111e_read_regs(lp, buf); +} + +static int amd8111e_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + spin_lock_irq(&lp->lock); + mii_ethtool_gset(&lp->mii_if, ecmd); + spin_unlock_irq(&lp->lock); + return 0; +} + +static int amd8111e_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + int res; + spin_lock_irq(&lp->lock); + res = mii_ethtool_sset(&lp->mii_if, ecmd); + spin_unlock_irq(&lp->lock); + return res; +} + +static int amd8111e_nway_reset(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + return mii_nway_restart(&lp->mii_if); +} + +static u32 amd8111e_get_link(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + return mii_link_ok(&lp->mii_if); +} + +static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + wol_info->supported = WAKE_MAGIC|WAKE_PHY; + if (lp->options & OPTION_WOL_ENABLE) + wol_info->wolopts = WAKE_MAGIC; +} + +static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY)) + return -EINVAL; + spin_lock_irq(&lp->lock); + if (wol_info->wolopts & WAKE_MAGIC) + lp->options |= + (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE); + else if(wol_info->wolopts & WAKE_PHY) + lp->options |= + (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE); + else + lp->options &= ~OPTION_WOL_ENABLE; + spin_unlock_irq(&lp->lock); + return 0; +} + +static const struct ethtool_ops ops = { + .get_drvinfo = amd8111e_get_drvinfo, + .get_regs_len = amd8111e_get_regs_len, + .get_regs = amd8111e_get_regs, + .get_settings = amd8111e_get_settings, + .set_settings = amd8111e_set_settings, + .nway_reset = amd8111e_nway_reset, + .get_link = amd8111e_get_link, + .get_wol = amd8111e_get_wol, + .set_wol = amd8111e_set_wol, +}; + +/* This function handles all the ethtool ioctls. It gives driver info, + * gets/sets driver speed, gets memory mapped register values, forces + * auto negotiation, sets/gets WOL options for ethtool application. + */ +static int amd8111e_ioctl(struct net_device *dev , struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + struct amd8111e_priv *lp = netdev_priv(dev); + int err; + u32 mii_regval; + + switch(cmd) { + case SIOCGMIIPHY: + data->phy_id = lp->ext_phy_addr; + + /* fallthru */ + case SIOCGMIIREG: + + spin_lock_irq(&lp->lock); + err = amd8111e_read_phy(lp, data->phy_id, + data->reg_num & PHY_REG_ADDR_MASK, &mii_regval); + spin_unlock_irq(&lp->lock); + + data->val_out = mii_regval; + return err; + + case SIOCSMIIREG: + + spin_lock_irq(&lp->lock); + err = amd8111e_write_phy(lp, data->phy_id, + data->reg_num & PHY_REG_ADDR_MASK, data->val_in); + spin_unlock_irq(&lp->lock); + + return err; + + default: + /* do nothing */ + break; + } + return -EOPNOTSUPP; +} +static int amd8111e_set_mac_address(struct net_device *dev, void *p) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + int i; + struct sockaddr *addr = p; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + spin_lock_irq(&lp->lock); + /* Setting the MAC address to the device */ + for (i = 0; i < ETH_ALEN; i++) + writeb( dev->dev_addr[i], lp->mmio + PADR + i ); + + spin_unlock_irq(&lp->lock); + + return 0; +} + +/* This function changes the mtu of the device. It restarts the device to + * initialize the descriptor with new receive buffers. + */ +static int amd8111e_change_mtu(struct net_device *dev, int new_mtu) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + int err; + + if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU)) + return -EINVAL; + + if (!netif_running(dev)) { + /* new_mtu will be used + * when device starts netxt time + */ + dev->mtu = new_mtu; + return 0; + } + + spin_lock_irq(&lp->lock); + + /* stop the chip */ + writel(RUN, lp->mmio + CMD0); + + dev->mtu = new_mtu; + + err = amd8111e_restart(dev); + spin_unlock_irq(&lp->lock); + if(!err) + netif_start_queue(dev); + return err; +} + +static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp) +{ + writel( VAL1|MPPLBA, lp->mmio + CMD3); + writel( VAL0|MPEN_SW, lp->mmio + CMD7); + + /* To eliminate PCI posting bug */ + readl(lp->mmio + CMD7); + return 0; +} + +static int amd8111e_enable_link_change(struct amd8111e_priv *lp) +{ + + /* Adapter is already stoped/suspended/interrupt-disabled */ + writel(VAL0|LCMODE_SW,lp->mmio + CMD7); + + /* To eliminate PCI posting bug */ + readl(lp->mmio + CMD7); + return 0; +} + +/* This function is called when a packet transmission fails to complete + * within a reasonable period, on the assumption that an interrupt have + * failed or the interface is locked up. This function will reinitialize + * the hardware. + */ +static void amd8111e_tx_timeout(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + int err; + + netdev_err(dev, "transmit timed out, resetting\n"); + + spin_lock_irq(&lp->lock); + err = amd8111e_restart(dev); + spin_unlock_irq(&lp->lock); + if(!err) + netif_wake_queue(dev); +} +static int amd8111e_suspend(struct pci_dev *pci_dev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pci_dev); + struct amd8111e_priv *lp = netdev_priv(dev); + + if (!netif_running(dev)) + return 0; + + /* disable the interrupt */ + spin_lock_irq(&lp->lock); + amd8111e_disable_interrupt(lp); + spin_unlock_irq(&lp->lock); + + netif_device_detach(dev); + + /* stop chip */ + spin_lock_irq(&lp->lock); + if(lp->options & OPTION_DYN_IPG_ENABLE) + del_timer_sync(&lp->ipg_data.ipg_timer); + amd8111e_stop_chip(lp); + spin_unlock_irq(&lp->lock); + + if(lp->options & OPTION_WOL_ENABLE){ + /* enable wol */ + if(lp->options & OPTION_WAKE_MAGIC_ENABLE) + amd8111e_enable_magicpkt(lp); + if(lp->options & OPTION_WAKE_PHY_ENABLE) + amd8111e_enable_link_change(lp); + + pci_enable_wake(pci_dev, PCI_D3hot, 1); + pci_enable_wake(pci_dev, PCI_D3cold, 1); + + } + else{ + pci_enable_wake(pci_dev, PCI_D3hot, 0); + pci_enable_wake(pci_dev, PCI_D3cold, 0); + } + + pci_save_state(pci_dev); + pci_set_power_state(pci_dev, PCI_D3hot); + + return 0; +} +static int amd8111e_resume(struct pci_dev *pci_dev) +{ + struct net_device *dev = pci_get_drvdata(pci_dev); + struct amd8111e_priv *lp = netdev_priv(dev); + + if (!netif_running(dev)) + return 0; + + pci_set_power_state(pci_dev, PCI_D0); + pci_restore_state(pci_dev); + + pci_enable_wake(pci_dev, PCI_D3hot, 0); + pci_enable_wake(pci_dev, PCI_D3cold, 0); /* D3 cold */ + + netif_device_attach(dev); + + spin_lock_irq(&lp->lock); + amd8111e_restart(dev); + /* Restart ipg timer */ + if(lp->options & OPTION_DYN_IPG_ENABLE) + mod_timer(&lp->ipg_data.ipg_timer, + jiffies + IPG_CONVERGE_JIFFIES); + spin_unlock_irq(&lp->lock); + + return 0; +} + +static void amd8111e_config_ipg(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + struct ipg_info *ipg_data = &lp->ipg_data; + void __iomem *mmio = lp->mmio; + unsigned int prev_col_cnt = ipg_data->col_cnt; + unsigned int total_col_cnt; + unsigned int tmp_ipg; + + if(lp->link_config.duplex == DUPLEX_FULL){ + ipg_data->ipg = DEFAULT_IPG; + return; + } + + if(ipg_data->ipg_state == SSTATE){ + + if(ipg_data->timer_tick == IPG_STABLE_TIME){ + + ipg_data->timer_tick = 0; + ipg_data->ipg = MIN_IPG - IPG_STEP; + ipg_data->current_ipg = MIN_IPG; + ipg_data->diff_col_cnt = 0xFFFFFFFF; + ipg_data->ipg_state = CSTATE; + } + else + ipg_data->timer_tick++; + } + + if(ipg_data->ipg_state == CSTATE){ + + /* Get the current collision count */ + + total_col_cnt = ipg_data->col_cnt = + amd8111e_read_mib(mmio, xmt_collisions); + + if ((total_col_cnt - prev_col_cnt) < + (ipg_data->diff_col_cnt)){ + + ipg_data->diff_col_cnt = + total_col_cnt - prev_col_cnt ; + + ipg_data->ipg = ipg_data->current_ipg; + } + + ipg_data->current_ipg += IPG_STEP; + + if (ipg_data->current_ipg <= MAX_IPG) + tmp_ipg = ipg_data->current_ipg; + else{ + tmp_ipg = ipg_data->ipg; + ipg_data->ipg_state = SSTATE; + } + writew((u32)tmp_ipg, mmio + IPG); + writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1); + } + mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES); + return; + +} + +static void amd8111e_probe_ext_phy(struct net_device *dev) +{ + struct amd8111e_priv *lp = netdev_priv(dev); + int i; + + for (i = 0x1e; i >= 0; i--) { + u32 id1, id2; + + if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1)) + continue; + if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2)) + continue; + lp->ext_phy_id = (id1 << 16) | id2; + lp->ext_phy_addr = i; + return; + } + lp->ext_phy_id = 0; + lp->ext_phy_addr = 1; +} + +static const struct net_device_ops amd8111e_netdev_ops = { + .ndo_open = amd8111e_open, + .ndo_stop = amd8111e_close, + .ndo_start_xmit = amd8111e_start_xmit, + .ndo_tx_timeout = amd8111e_tx_timeout, + .ndo_get_stats = amd8111e_get_stats, + .ndo_set_rx_mode = amd8111e_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = amd8111e_set_mac_address, + .ndo_do_ioctl = amd8111e_ioctl, + .ndo_change_mtu = amd8111e_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = amd8111e_poll, +#endif +}; + +static int amd8111e_probe_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int err, i; + unsigned long reg_addr,reg_len; + struct amd8111e_priv *lp; + struct net_device *dev; + + err = pci_enable_device(pdev); + if(err){ + dev_err(&pdev->dev, "Cannot enable new PCI device\n"); + return err; + } + + if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){ + dev_err(&pdev->dev, "Cannot find PCI base address\n"); + err = -ENODEV; + goto err_disable_pdev; + } + + err = pci_request_regions(pdev, MODULE_NAME); + if(err){ + dev_err(&pdev->dev, "Cannot obtain PCI resources\n"); + goto err_disable_pdev; + } + + pci_set_master(pdev); + + /* Find power-management capability. */ + if (!pdev->pm_cap) { + dev_err(&pdev->dev, "No Power Management capability\n"); + err = -ENODEV; + goto err_free_reg; + } + + /* Initialize DMA */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) { + dev_err(&pdev->dev, "DMA not supported\n"); + err = -ENODEV; + goto err_free_reg; + } + + reg_addr = pci_resource_start(pdev, 0); + reg_len = pci_resource_len(pdev, 0); + + dev = alloc_etherdev(sizeof(struct amd8111e_priv)); + if (!dev) { + err = -ENOMEM; + goto err_free_reg; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + +#if AMD8111E_VLAN_TAG_USED + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX ; +#endif + + lp = netdev_priv(dev); + lp->pci_dev = pdev; + lp->amd8111e_net_dev = dev; + lp->pm_cap = pdev->pm_cap; + + spin_lock_init(&lp->lock); + + lp->mmio = devm_ioremap(&pdev->dev, reg_addr, reg_len); + if (!lp->mmio) { + dev_err(&pdev->dev, "Cannot map device registers\n"); + err = -ENOMEM; + goto err_free_dev; + } + + /* Initializing MAC address */ + for (i = 0; i < ETH_ALEN; i++) + dev->dev_addr[i] = readb(lp->mmio + PADR + i); + + /* Setting user defined parametrs */ + lp->ext_phy_option = speed_duplex[card_idx]; + if(coalesce[card_idx]) + lp->options |= OPTION_INTR_COAL_ENABLE; + if(dynamic_ipg[card_idx++]) + lp->options |= OPTION_DYN_IPG_ENABLE; + + + /* Initialize driver entry points */ + dev->netdev_ops = &amd8111e_netdev_ops; + dev->ethtool_ops = &ops; + dev->irq =pdev->irq; + dev->watchdog_timeo = AMD8111E_TX_TIMEOUT; + netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32); + +#if AMD8111E_VLAN_TAG_USED + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; +#endif + /* Probe the external PHY */ + amd8111e_probe_ext_phy(dev); + + /* setting mii default values */ + lp->mii_if.dev = dev; + lp->mii_if.mdio_read = amd8111e_mdio_read; + lp->mii_if.mdio_write = amd8111e_mdio_write; + lp->mii_if.phy_id = lp->ext_phy_addr; + + /* Set receive buffer length and set jumbo option*/ + amd8111e_set_rx_buff_len(dev); + + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "Cannot register net device\n"); + goto err_free_dev; + } + + pci_set_drvdata(pdev, dev); + + /* Initialize software ipg timer */ + if(lp->options & OPTION_DYN_IPG_ENABLE){ + init_timer(&lp->ipg_data.ipg_timer); + lp->ipg_data.ipg_timer.data = (unsigned long) dev; + lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg; + lp->ipg_data.ipg_timer.expires = jiffies + + IPG_CONVERGE_JIFFIES; + lp->ipg_data.ipg = DEFAULT_IPG; + lp->ipg_data.ipg_state = CSTATE; + } + + /* display driver and device information */ + chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28; + dev_info(&pdev->dev, "AMD-8111e Driver Version: %s\n", MODULE_VERS); + dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n", + chip_version, dev->dev_addr); + if (lp->ext_phy_id) + dev_info(&pdev->dev, "Found MII PHY ID 0x%08x at address 0x%02x\n", + lp->ext_phy_id, lp->ext_phy_addr); + else + dev_info(&pdev->dev, "Couldn't detect MII PHY, assuming address 0x01\n"); + + return 0; + +err_free_dev: + free_netdev(dev); + +err_free_reg: + pci_release_regions(pdev); + +err_disable_pdev: + pci_disable_device(pdev); + return err; + +} + +static void amd8111e_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (dev) { + unregister_netdev(dev); + free_netdev(dev); + pci_release_regions(pdev); + pci_disable_device(pdev); + } +} + +static const struct pci_device_id amd8111e_pci_tbl[] = { + { + .vendor = PCI_VENDOR_ID_AMD, + .device = PCI_DEVICE_ID_AMD8111E_7462, + }, + { + .vendor = 0, + } +}; +MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl); + +static struct pci_driver amd8111e_driver = { + .name = MODULE_NAME, + .id_table = amd8111e_pci_tbl, + .probe = amd8111e_probe_one, + .remove = amd8111e_remove_one, + .suspend = amd8111e_suspend, + .resume = amd8111e_resume +}; + +module_pci_driver(amd8111e_driver); diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h new file mode 100644 index 000000000..7cdb18512 --- /dev/null +++ b/drivers/net/ethernet/amd/amd8111e.h @@ -0,0 +1,813 @@ +/* + * Advanced Micro Devices Inc. AMD8111E Linux Network Driver + * Copyright (C) 2003 Advanced Micro Devices + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + +Module Name: + + amd8111e.h + +Abstract: + + AMD8111 based 10/100 Ethernet Controller driver definitions. + +Environment: + + Kernel Mode + +Revision History: + 3.0.0 + Initial Revision. + 3.0.1 +*/ + +#ifndef _AMD811E_H +#define _AMD811E_H + +/* Command style register access + +Registers CMD0, CMD2, CMD3,CMD7 and INTEN0 uses a write access technique called command style access. It allows the write to selected bits of this register without altering the bits that are not selected. Command style registers are divided into 4 bytes that can be written independently. Higher order bit of each byte is the value bit that specifies the value that will be written into the selected bits of register. + +eg., if the value 10011010b is written into the least significant byte of a command style register, bits 1,3 and 4 of the register will be set to 1, and the other bits will not be altered. If the value 00011010b is written into the same byte, bits 1,3 and 4 will be cleared to 0 and the other bits will not be altered. + +*/ + +/* Offset for Memory Mapped Registers. */ +/* 32 bit registers */ + +#define ASF_STAT 0x00 /* ASF status register */ +#define CHIPID 0x04 /* Chip ID regsiter */ +#define MIB_DATA 0x10 /* MIB data register */ +#define MIB_ADDR 0x14 /* MIB address register */ +#define STAT0 0x30 /* Status0 register */ +#define INT0 0x38 /* Interrupt0 register */ +#define INTEN0 0x40 /* Interrupt0 enable register*/ +#define CMD0 0x48 /* Command0 register */ +#define CMD2 0x50 /* Command2 register */ +#define CMD3 0x54 /* Command3 resiter */ +#define CMD7 0x64 /* Command7 register */ + +#define CTRL1 0x6C /* Control1 register */ +#define CTRL2 0x70 /* Control2 register */ + +#define XMT_RING_LIMIT 0x7C /* Transmit ring limit register */ + +#define AUTOPOLL0 0x88 /* Auto-poll0 register */ +#define AUTOPOLL1 0x8A /* Auto-poll1 register */ +#define AUTOPOLL2 0x8C /* Auto-poll2 register */ +#define AUTOPOLL3 0x8E /* Auto-poll3 register */ +#define AUTOPOLL4 0x90 /* Auto-poll4 register */ +#define AUTOPOLL5 0x92 /* Auto-poll5 register */ + +#define AP_VALUE 0x98 /* Auto-poll value register */ +#define DLY_INT_A 0xA8 /* Group A delayed interrupt register */ +#define DLY_INT_B 0xAC /* Group B delayed interrupt register */ + +#define FLOW_CONTROL 0xC8 /* Flow control register */ +#define PHY_ACCESS 0xD0 /* PHY access register */ + +#define STVAL 0xD8 /* Software timer value register */ + +#define XMT_RING_BASE_ADDR0 0x100 /* Transmit ring0 base addr register */ +#define XMT_RING_BASE_ADDR1 0x108 /* Transmit ring1 base addr register */ +#define XMT_RING_BASE_ADDR2 0x110 /* Transmit ring2 base addr register */ +#define XMT_RING_BASE_ADDR3 0x118 /* Transmit ring2 base addr register */ + +#define RCV_RING_BASE_ADDR0 0x120 /* Transmit ring0 base addr register */ + +#define PMAT0 0x190 /* OnNow pattern register0 */ +#define PMAT1 0x194 /* OnNow pattern register1 */ + +/* 16bit registers */ + +#define XMT_RING_LEN0 0x140 /* Transmit Ring0 length register */ +#define XMT_RING_LEN1 0x144 /* Transmit Ring1 length register */ +#define XMT_RING_LEN2 0x148 /* Transmit Ring2 length register */ +#define XMT_RING_LEN3 0x14C /* Transmit Ring3 length register */ + +#define RCV_RING_LEN0 0x150 /* Receive Ring0 length register */ + +#define SRAM_SIZE 0x178 /* SRAM size register */ +#define SRAM_BOUNDARY 0x17A /* SRAM boundary register */ + +/* 48bit register */ + +#define PADR 0x160 /* Physical address register */ + +#define IFS1 0x18C /* Inter-frame spacing Part1 register */ +#define IFS 0x18D /* Inter-frame spacing register */ +#define IPG 0x18E /* Inter-frame gap register */ +/* 64bit register */ + +#define LADRF 0x168 /* Logical address filter register */ + + +/* Register Bit Definitions */ +typedef enum { + + ASF_INIT_DONE = (1 << 1), + ASF_INIT_PRESENT = (1 << 0), + +}STAT_ASF_BITS; + +typedef enum { + + MIB_CMD_ACTIVE = (1 << 15 ), + MIB_RD_CMD = (1 << 13 ), + MIB_CLEAR = (1 << 12 ), + MIB_ADDRESS = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)| + (1 << 4) | (1 << 5), +}MIB_ADDR_BITS; + + +typedef enum { + + PMAT_DET = (1 << 12), + MP_DET = (1 << 11), + LC_DET = (1 << 10), + SPEED_MASK = (1 << 9)|(1 << 8)|(1 << 7), + FULL_DPLX = (1 << 6), + LINK_STATS = (1 << 5), + AUTONEG_COMPLETE = (1 << 4), + MIIPD = (1 << 3), + RX_SUSPENDED = (1 << 2), + TX_SUSPENDED = (1 << 1), + RUNNING = (1 << 0), + +}STAT0_BITS; + +#define PHY_SPEED_10 0x2 +#define PHY_SPEED_100 0x3 + +/* INT0 0x38, 32bit register */ +typedef enum { + + INTR = (1 << 31), + PCSINT = (1 << 28), + LCINT = (1 << 27), + APINT5 = (1 << 26), + APINT4 = (1 << 25), + APINT3 = (1 << 24), + TINT_SUM = (1 << 23), + APINT2 = (1 << 22), + APINT1 = (1 << 21), + APINT0 = (1 << 20), + MIIPDTINT = (1 << 19), + MCCINT = (1 << 17), + MREINT = (1 << 16), + RINT_SUM = (1 << 15), + SPNDINT = (1 << 14), + MPINT = (1 << 13), + SINT = (1 << 12), + TINT3 = (1 << 11), + TINT2 = (1 << 10), + TINT1 = (1 << 9), + TINT0 = (1 << 8), + UINT = (1 << 7), + STINT = (1 << 4), + RINT0 = (1 << 0), + +}INT0_BITS; + +typedef enum { + + VAL3 = (1 << 31), /* VAL bit for byte 3 */ + VAL2 = (1 << 23), /* VAL bit for byte 2 */ + VAL1 = (1 << 15), /* VAL bit for byte 1 */ + VAL0 = (1 << 7), /* VAL bit for byte 0 */ + +}VAL_BITS; + +typedef enum { + + /* VAL3 */ + LCINTEN = (1 << 27), + APINT5EN = (1 << 26), + APINT4EN = (1 << 25), + APINT3EN = (1 << 24), + /* VAL2 */ + APINT2EN = (1 << 22), + APINT1EN = (1 << 21), + APINT0EN = (1 << 20), + MIIPDTINTEN = (1 << 19), + MCCIINTEN = (1 << 18), + MCCINTEN = (1 << 17), + MREINTEN = (1 << 16), + /* VAL1 */ + SPNDINTEN = (1 << 14), + MPINTEN = (1 << 13), + TINTEN3 = (1 << 11), + SINTEN = (1 << 12), + TINTEN2 = (1 << 10), + TINTEN1 = (1 << 9), + TINTEN0 = (1 << 8), + /* VAL0 */ + STINTEN = (1 << 4), + RINTEN0 = (1 << 0), + + INTEN0_CLEAR = 0x1F7F7F1F, /* Command style register */ + +}INTEN0_BITS; + +typedef enum { + /* VAL2 */ + RDMD0 = (1 << 16), + /* VAL1 */ + TDMD3 = (1 << 11), + TDMD2 = (1 << 10), + TDMD1 = (1 << 9), + TDMD0 = (1 << 8), + /* VAL0 */ + UINTCMD = (1 << 6), + RX_FAST_SPND = (1 << 5), + TX_FAST_SPND = (1 << 4), + RX_SPND = (1 << 3), + TX_SPND = (1 << 2), + INTREN = (1 << 1), + RUN = (1 << 0), + + CMD0_CLEAR = 0x000F0F7F, /* Command style register */ + +}CMD0_BITS; + +typedef enum { + + /* VAL3 */ + CONDUIT_MODE = (1 << 29), + /* VAL2 */ + RPA = (1 << 19), + DRCVPA = (1 << 18), + DRCVBC = (1 << 17), + PROM = (1 << 16), + /* VAL1 */ + ASTRP_RCV = (1 << 13), + RCV_DROP0 = (1 << 12), + EMBA = (1 << 11), + DXMT2PD = (1 << 10), + LTINTEN = (1 << 9), + DXMTFCS = (1 << 8), + /* VAL0 */ + APAD_XMT = (1 << 6), + DRTY = (1 << 5), + INLOOP = (1 << 4), + EXLOOP = (1 << 3), + REX_RTRY = (1 << 2), + REX_UFLO = (1 << 1), + REX_LCOL = (1 << 0), + + CMD2_CLEAR = 0x3F7F3F7F, /* Command style register */ + +}CMD2_BITS; + +typedef enum { + + /* VAL3 */ + ASF_INIT_DONE_ALIAS = (1 << 29), + /* VAL2 */ + JUMBO = (1 << 21), + VSIZE = (1 << 20), + VLONLY = (1 << 19), + VL_TAG_DEL = (1 << 18), + /* VAL1 */ + EN_PMGR = (1 << 14), + INTLEVEL = (1 << 13), + FORCE_FULL_DUPLEX = (1 << 12), + FORCE_LINK_STATUS = (1 << 11), + APEP = (1 << 10), + MPPLBA = (1 << 9), + /* VAL0 */ + RESET_PHY_PULSE = (1 << 2), + RESET_PHY = (1 << 1), + PHY_RST_POL = (1 << 0), + +}CMD3_BITS; + + +typedef enum { + + /* VAL0 */ + PMAT_SAVE_MATCH = (1 << 4), + PMAT_MODE = (1 << 3), + MPEN_SW = (1 << 1), + LCMODE_SW = (1 << 0), + + CMD7_CLEAR = 0x0000001B /* Command style register */ + +}CMD7_BITS; + + +typedef enum { + + RESET_PHY_WIDTH = (0xF << 16) | (0xF<< 20), /* 0x00FF0000 */ + XMTSP_MASK = (1 << 9) | (1 << 8), /* 9:8 */ + XMTSP_128 = (1 << 9), /* 9 */ + XMTSP_64 = (1 << 8), + CACHE_ALIGN = (1 << 4), + BURST_LIMIT_MASK = (0xF << 0 ), + CTRL1_DEFAULT = 0x00010111, + +}CTRL1_BITS; + +typedef enum { + + FMDC_MASK = (1 << 9)|(1 << 8), /* 9:8 */ + XPHYRST = (1 << 7), + XPHYANE = (1 << 6), + XPHYFD = (1 << 5), + XPHYSP = (1 << 4) | (1 << 3), /* 4:3 */ + APDW_MASK = (1 << 2) | (1 << 1) | (1 << 0), /* 2:0 */ + +}CTRL2_BITS; + +/* XMT_RING_LIMIT 0x7C, 32bit register */ +typedef enum { + + XMT_RING2_LIMIT = (0xFF << 16), /* 23:16 */ + XMT_RING1_LIMIT = (0xFF << 8), /* 15:8 */ + XMT_RING0_LIMIT = (0xFF << 0), /* 7:0 */ + +}XMT_RING_LIMIT_BITS; + +typedef enum { + + AP_REG0_EN = (1 << 15), + AP_REG0_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */ + AP_PHY0_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */ + +}AUTOPOLL0_BITS; + +/* AUTOPOLL1 0x8A, 16bit register */ +typedef enum { + + AP_REG1_EN = (1 << 15), + AP_REG1_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */ + AP_PRE_SUP1 = (1 << 6), + AP_PHY1_DFLT = (1 << 5), + AP_PHY1_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */ + +}AUTOPOLL1_BITS; + + +typedef enum { + + AP_REG2_EN = (1 << 15), + AP_REG2_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */ + AP_PRE_SUP2 = (1 << 6), + AP_PHY2_DFLT = (1 << 5), + AP_PHY2_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */ + +}AUTOPOLL2_BITS; + +typedef enum { + + AP_REG3_EN = (1 << 15), + AP_REG3_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */ + AP_PRE_SUP3 = (1 << 6), + AP_PHY3_DFLT = (1 << 5), + AP_PHY3_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */ + +}AUTOPOLL3_BITS; + + +typedef enum { + + AP_REG4_EN = (1 << 15), + AP_REG4_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */ + AP_PRE_SUP4 = (1 << 6), + AP_PHY4_DFLT = (1 << 5), + AP_PHY4_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */ + +}AUTOPOLL4_BITS; + + +typedef enum { + + AP_REG5_EN = (1 << 15), + AP_REG5_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */ + AP_PRE_SUP5 = (1 << 6), + AP_PHY5_DFLT = (1 << 5), + AP_PHY5_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */ + +}AUTOPOLL5_BITS; + + + + +/* AP_VALUE 0x98, 32bit ragister */ +typedef enum { + + AP_VAL_ACTIVE = (1 << 31), + AP_VAL_RD_CMD = ( 1 << 29), + AP_ADDR = (1 << 18)|(1 << 17)|(1 << 16), /* 18:16 */ + AP_VAL = (0xF << 0) | (0xF << 4) |( 0xF << 8) | + (0xF << 12), /* 15:0 */ + +}AP_VALUE_BITS; + +typedef enum { + + DLY_INT_A_R3 = (1 << 31), + DLY_INT_A_R2 = (1 << 30), + DLY_INT_A_R1 = (1 << 29), + DLY_INT_A_R0 = (1 << 28), + DLY_INT_A_T3 = (1 << 27), + DLY_INT_A_T2 = (1 << 26), + DLY_INT_A_T1 = (1 << 25), + DLY_INT_A_T0 = ( 1 << 24), + EVENT_COUNT_A = (0xF << 16) | (0x1 << 20),/* 20:16 */ + MAX_DELAY_TIME_A = (0xF << 0) | (0xF << 4) | (1 << 8)| + (1 << 9) | (1 << 10), /* 10:0 */ + +}DLY_INT_A_BITS; + +typedef enum { + + DLY_INT_B_R3 = (1 << 31), + DLY_INT_B_R2 = (1 << 30), + DLY_INT_B_R1 = (1 << 29), + DLY_INT_B_R0 = (1 << 28), + DLY_INT_B_T3 = (1 << 27), + DLY_INT_B_T2 = (1 << 26), + DLY_INT_B_T1 = (1 << 25), + DLY_INT_B_T0 = ( 1 << 24), + EVENT_COUNT_B = (0xF << 16) | (0x1 << 20),/* 20:16 */ + MAX_DELAY_TIME_B = (0xF << 0) | (0xF << 4) | (1 << 8)| + (1 << 9) | (1 << 10), /* 10:0 */ +}DLY_INT_B_BITS; + + +/* FLOW_CONTROL 0xC8, 32bit register */ +typedef enum { + + PAUSE_LEN_CHG = (1 << 30), + FTPE = (1 << 22), + FRPE = (1 << 21), + NAPA = (1 << 20), + NPA = (1 << 19), + FIXP = ( 1 << 18), + FCCMD = ( 1 << 16), + PAUSE_LEN = (0xF << 0) | (0xF << 4) |( 0xF << 8) | (0xF << 12), /* 15:0 */ + +}FLOW_CONTROL_BITS; + +/* PHY_ ACCESS 0xD0, 32bit register */ +typedef enum { + + PHY_CMD_ACTIVE = (1 << 31), + PHY_WR_CMD = (1 << 30), + PHY_RD_CMD = (1 << 29), + PHY_RD_ERR = (1 << 28), + PHY_PRE_SUP = (1 << 27), + PHY_ADDR = (1 << 21) | (1 << 22) | (1 << 23)| + (1 << 24) |(1 << 25),/* 25:21 */ + PHY_REG_ADDR = (1 << 16) | (1 << 17) | (1 << 18)| (1 << 19) | (1 << 20),/* 20:16 */ + PHY_DATA = (0xF << 0)|(0xF << 4) |(0xF << 8)| + (0xF << 12),/* 15:0 */ + +}PHY_ACCESS_BITS; + + +/* PMAT0 0x190, 32bit register */ +typedef enum { + PMR_ACTIVE = (1 << 31), + PMR_WR_CMD = (1 << 30), + PMR_RD_CMD = (1 << 29), + PMR_BANK = (1 <<28), + PMR_ADDR = (0xF << 16)|(1 << 20)|(1 << 21)| + (1 << 22),/* 22:16 */ + PMR_B4 = (0xF << 0) | (0xF << 4),/* 15:0 */ +}PMAT0_BITS; + + +/* PMAT1 0x194, 32bit register */ +typedef enum { + PMR_B3 = (0xF << 24) | (0xF <<28),/* 31:24 */ + PMR_B2 = (0xF << 16) |(0xF << 20),/* 23:16 */ + PMR_B1 = (0xF << 8) | (0xF <<12), /* 15:8 */ + PMR_B0 = (0xF << 0)|(0xF << 4),/* 7:0 */ +}PMAT1_BITS; + +/************************************************************************/ +/* */ +/* MIB counter definitions */ +/* */ +/************************************************************************/ + +#define rcv_miss_pkts 0x00 +#define rcv_octets 0x01 +#define rcv_broadcast_pkts 0x02 +#define rcv_multicast_pkts 0x03 +#define rcv_undersize_pkts 0x04 +#define rcv_oversize_pkts 0x05 +#define rcv_fragments 0x06 +#define rcv_jabbers 0x07 +#define rcv_unicast_pkts 0x08 +#define rcv_alignment_errors 0x09 +#define rcv_fcs_errors 0x0A +#define rcv_good_octets 0x0B +#define rcv_mac_ctrl 0x0C +#define rcv_flow_ctrl 0x0D +#define rcv_pkts_64_octets 0x0E +#define rcv_pkts_65to127_octets 0x0F +#define rcv_pkts_128to255_octets 0x10 +#define rcv_pkts_256to511_octets 0x11 +#define rcv_pkts_512to1023_octets 0x12 +#define rcv_pkts_1024to1518_octets 0x13 +#define rcv_unsupported_opcode 0x14 +#define rcv_symbol_errors 0x15 +#define rcv_drop_pkts_ring1 0x16 +#define rcv_drop_pkts_ring2 0x17 +#define rcv_drop_pkts_ring3 0x18 +#define rcv_drop_pkts_ring4 0x19 +#define rcv_jumbo_pkts 0x1A + +#define xmt_underrun_pkts 0x20 +#define xmt_octets 0x21 +#define xmt_packets 0x22 +#define xmt_broadcast_pkts 0x23 +#define xmt_multicast_pkts 0x24 +#define xmt_collisions 0x25 +#define xmt_unicast_pkts 0x26 +#define xmt_one_collision 0x27 +#define xmt_multiple_collision 0x28 +#define xmt_deferred_transmit 0x29 +#define xmt_late_collision 0x2A +#define xmt_excessive_defer 0x2B +#define xmt_loss_carrier 0x2C +#define xmt_excessive_collision 0x2D +#define xmt_back_pressure 0x2E +#define xmt_flow_ctrl 0x2F +#define xmt_pkts_64_octets 0x30 +#define xmt_pkts_65to127_octets 0x31 +#define xmt_pkts_128to255_octets 0x32 +#define xmt_pkts_256to511_octets 0x33 +#define xmt_pkts_512to1023_octets 0x34 +#define xmt_pkts_1024to1518_octet 0x35 +#define xmt_oversize_pkts 0x36 +#define xmt_jumbo_pkts 0x37 + + +/* Driver definitions */ + +#define PCI_VENDOR_ID_AMD 0x1022 +#define PCI_DEVICE_ID_AMD8111E_7462 0x7462 + +#define MAX_UNITS 8 /* Maximum number of devices possible */ + +#define NUM_TX_BUFFERS 32 /* Number of transmit buffers */ +#define NUM_RX_BUFFERS 32 /* Number of receive buffers */ + +#define TX_BUFF_MOD_MASK 31 /* (NUM_TX_BUFFERS -1) */ +#define RX_BUFF_MOD_MASK 31 /* (NUM_RX_BUFFERS -1) */ + +#define NUM_TX_RING_DR 32 +#define NUM_RX_RING_DR 32 + +#define TX_RING_DR_MOD_MASK 31 /* (NUM_TX_RING_DR -1) */ +#define RX_RING_DR_MOD_MASK 31 /* (NUM_RX_RING_DR -1) */ + +#define MAX_FILTER_SIZE 64 /* Maximum multicast address */ +#define AMD8111E_MIN_MTU 60 +#define AMD8111E_MAX_MTU 9000 + +#define PKT_BUFF_SZ 1536 +#define MIN_PKT_LEN 60 + +#define AMD8111E_TX_TIMEOUT (3 * HZ)/* 3 sec */ +#define SOFT_TIMER_FREQ 0xBEBC /* 0.5 sec */ +#define DELAY_TIMER_CONV 50 /* msec to 10 usec conversion. + Only 500 usec resolution */ +#define OPTION_VLAN_ENABLE 0x0001 +#define OPTION_JUMBO_ENABLE 0x0002 +#define OPTION_MULTICAST_ENABLE 0x0004 +#define OPTION_WOL_ENABLE 0x0008 +#define OPTION_WAKE_MAGIC_ENABLE 0x0010 +#define OPTION_WAKE_PHY_ENABLE 0x0020 +#define OPTION_INTR_COAL_ENABLE 0x0040 +#define OPTION_DYN_IPG_ENABLE 0x0080 + +#define PHY_REG_ADDR_MASK 0x1f + +/* ipg parameters */ +#define DEFAULT_IPG 0x60 +#define IFS1_DELTA 36 +#define IPG_CONVERGE_JIFFIES (HZ/2) +#define IPG_STABLE_TIME 5 +#define MIN_IPG 96 +#define MAX_IPG 255 +#define IPG_STEP 16 +#define CSTATE 1 +#define SSTATE 2 + +/* Assume contoller gets data 10 times the maximum processing time */ +#define REPEAT_CNT 10 + +/* amd8111e descriptor flag definitions */ +typedef enum { + + OWN_BIT = (1 << 15), + ADD_FCS_BIT = (1 << 13), + LTINT_BIT = (1 << 12), + STP_BIT = (1 << 9), + ENP_BIT = (1 << 8), + KILL_BIT = (1 << 6), + TCC_VLAN_INSERT = (1 << 1), + TCC_VLAN_REPLACE = (1 << 1) |( 1<< 0), + +}TX_FLAG_BITS; + +typedef enum { + ERR_BIT = (1 << 14), + FRAM_BIT = (1 << 13), + OFLO_BIT = (1 << 12), + CRC_BIT = (1 << 11), + PAM_BIT = (1 << 6), + LAFM_BIT = (1 << 5), + BAM_BIT = (1 << 4), + TT_VLAN_TAGGED = (1 << 3) |(1 << 2),/* 0x000 */ + TT_PRTY_TAGGED = (1 << 3),/* 0x0008 */ + +}RX_FLAG_BITS; + +#define RESET_RX_FLAGS 0x0000 +#define TT_MASK 0x000c +#define TCC_MASK 0x0003 + +/* driver ioctl parameters */ +#define AMD8111E_REG_DUMP_LEN 13*sizeof(u32) + +/* amd8111e desriptor format */ + +struct amd8111e_tx_dr{ + + __le16 buff_count; /* Size of the buffer pointed by this descriptor */ + + __le16 tx_flags; + + __le16 tag_ctrl_info; + + __le16 tag_ctrl_cmd; + + __le32 buff_phy_addr; + + __le32 reserved; +}; + +struct amd8111e_rx_dr{ + + __le32 reserved; + + __le16 msg_count; /* Received message len */ + + __le16 tag_ctrl_info; + + __le16 buff_count; /* Len of the buffer pointed by descriptor. */ + + __le16 rx_flags; + + __le32 buff_phy_addr; + +}; +struct amd8111e_link_config{ + +#define SPEED_INVALID 0xffff +#define DUPLEX_INVALID 0xff +#define AUTONEG_INVALID 0xff + + unsigned long orig_phy_option; + u16 speed; + u8 duplex; + u8 autoneg; + u8 reserved; /* 32bit alignment */ +}; + +enum coal_type{ + + NO_COALESCE, + LOW_COALESCE, + MEDIUM_COALESCE, + HIGH_COALESCE, + +}; + +enum coal_mode{ + RX_INTR_COAL, + TX_INTR_COAL, + DISABLE_COAL, + ENABLE_COAL, + +}; +#define MAX_TIMEOUT 40 +#define MAX_EVENT_COUNT 31 +struct amd8111e_coalesce_conf{ + + unsigned int rx_timeout; + unsigned int rx_event_count; + unsigned long rx_packets; + unsigned long rx_prev_packets; + unsigned long rx_bytes; + unsigned long rx_prev_bytes; + unsigned int rx_coal_type; + + unsigned int tx_timeout; + unsigned int tx_event_count; + unsigned long tx_packets; + unsigned long tx_prev_packets; + unsigned long tx_bytes; + unsigned long tx_prev_bytes; + unsigned int tx_coal_type; + +}; +struct ipg_info{ + + unsigned int ipg_state; + unsigned int ipg; + unsigned int current_ipg; + unsigned int col_cnt; + unsigned int diff_col_cnt; + unsigned int timer_tick; + unsigned int prev_ipg; + struct timer_list ipg_timer; +}; + +struct amd8111e_priv{ + + struct amd8111e_tx_dr* tx_ring; + struct amd8111e_rx_dr* rx_ring; + dma_addr_t tx_ring_dma_addr; /* tx descriptor ring base address */ + dma_addr_t rx_ring_dma_addr; /* rx descriptor ring base address */ + const char *name; + struct pci_dev *pci_dev; /* Ptr to the associated pci_dev */ + struct net_device* amd8111e_net_dev; /* ptr to associated net_device */ + /* Transmit and receive skbs */ + struct sk_buff *tx_skbuff[NUM_TX_BUFFERS]; + struct sk_buff *rx_skbuff[NUM_RX_BUFFERS]; + /* Transmit and receive dma mapped addr */ + dma_addr_t tx_dma_addr[NUM_TX_BUFFERS]; + dma_addr_t rx_dma_addr[NUM_RX_BUFFERS]; + /* Reg memory mapped address */ + void __iomem *mmio; + + struct napi_struct napi; + + spinlock_t lock; /* Guard lock */ + unsigned long rx_idx, tx_idx; /* The next free ring entry */ + unsigned long tx_complete_idx; + unsigned long tx_ring_complete_idx; + unsigned long tx_ring_idx; + unsigned int rx_buff_len; /* Buffer length of rx buffers */ + int options; /* Options enabled/disabled for the device */ + + unsigned long ext_phy_option; + int ext_phy_addr; + u32 ext_phy_id; + + struct amd8111e_link_config link_config; + int pm_cap; + + struct net_device *next; + int mii; + struct mii_if_info mii_if; + char opened; + unsigned int drv_rx_errors; + struct amd8111e_coalesce_conf coal_conf; + + struct ipg_info ipg_data; + +}; + +/* kernel provided writeq does not write 64 bits into the amd8111e device register instead writes only higher 32bits data into lower 32bits of the register. +BUG? */ +#define amd8111e_writeq(_UlData,_memMap) \ + writel(*(u32*)(&_UlData), _memMap); \ + writel(*(u32*)((u8*)(&_UlData)+4), _memMap+4) + +/* maps the external speed options to internal value */ +typedef enum { + SPEED_AUTONEG, + SPEED10_HALF, + SPEED10_FULL, + SPEED100_HALF, + SPEED100_FULL, +}EXT_PHY_OPTION; + +static int card_idx; +static int speed_duplex[MAX_UNITS] = { 0, }; +static bool coalesce[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = true }; +static bool dynamic_ipg[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = false }; +static unsigned int chip_version; + +#endif /* _AMD8111E_H */ + diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c new file mode 100644 index 000000000..968b7bfac --- /dev/null +++ b/drivers/net/ethernet/amd/ariadne.c @@ -0,0 +1,792 @@ +/* + * Amiga Linux/m68k Ariadne Ethernet Driver + * + * © Copyright 1995-2003 by Geert Uytterhoeven (geert@linux-m68k.org) + * Peter De Schrijver (p2@mind.be) + * + * --------------------------------------------------------------------------- + * + * This program is based on + * + * lance.c: An AMD LANCE ethernet driver for linux. + * Written 1993-94 by Donald Becker. + * + * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller + * Advanced Micro Devices + * Publication #16907, Rev. B, Amendment/0, May 1994 + * + * MC68230: Parallel Interface/Timer (PI/T) + * Motorola Semiconductors, December, 1983 + * + * --------------------------------------------------------------------------- + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of the Linux + * distribution for more details. + * + * --------------------------------------------------------------------------- + * + * The Ariadne is a Zorro-II board made by Village Tronic. It contains: + * + * - an Am79C960 PCnet-ISA Single-Chip Ethernet Controller with both + * 10BASE-2 (thin coax) and 10BASE-T (UTP) connectors + * + * - an MC68230 Parallel Interface/Timer configured as 2 parallel ports + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +/*#define DEBUG*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "ariadne.h" + +#ifdef ARIADNE_DEBUG +int ariadne_debug = ARIADNE_DEBUG; +#else +int ariadne_debug = 1; +#endif + +/* Macros to Fix Endianness problems */ + +/* Swap the Bytes in a WORD */ +#define swapw(x) (((x >> 8) & 0x00ff) | ((x << 8) & 0xff00)) +/* Get the Low BYTE in a WORD */ +#define lowb(x) (x & 0xff) +/* Get the Swapped High WORD in a LONG */ +#define swhighw(x) ((((x) >> 8) & 0xff00) | (((x) >> 24) & 0x00ff)) +/* Get the Swapped Low WORD in a LONG */ +#define swloww(x) ((((x) << 8) & 0xff00) | (((x) >> 8) & 0x00ff)) + +/* Transmit/Receive Ring Definitions */ + +#define TX_RING_SIZE 5 +#define RX_RING_SIZE 16 + +#define PKT_BUF_SIZE 1520 + +/* Private Device Data */ + +struct ariadne_private { + volatile struct TDRE *tx_ring[TX_RING_SIZE]; + volatile struct RDRE *rx_ring[RX_RING_SIZE]; + volatile u_short *tx_buff[TX_RING_SIZE]; + volatile u_short *rx_buff[RX_RING_SIZE]; + int cur_tx, cur_rx; /* The next free ring entry */ + int dirty_tx; /* The ring entries to be free()ed */ + char tx_full; +}; + +/* Structure Created in the Ariadne's RAM Buffer */ + +struct lancedata { + struct TDRE tx_ring[TX_RING_SIZE]; + struct RDRE rx_ring[RX_RING_SIZE]; + u_short tx_buff[TX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)]; + u_short rx_buff[RX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)]; +}; + +static void memcpyw(volatile u_short *dest, u_short *src, int len) +{ + while (len >= 2) { + *(dest++) = *(src++); + len -= 2; + } + if (len == 1) + *dest = (*(u_char *)src) << 8; +} + +static void ariadne_init_ring(struct net_device *dev) +{ + struct ariadne_private *priv = netdev_priv(dev); + volatile struct lancedata *lancedata = (struct lancedata *)dev->mem_start; + int i; + + netif_stop_queue(dev); + + priv->tx_full = 0; + priv->cur_rx = priv->cur_tx = 0; + priv->dirty_tx = 0; + + /* Set up TX Ring */ + for (i = 0; i < TX_RING_SIZE; i++) { + volatile struct TDRE *t = &lancedata->tx_ring[i]; + t->TMD0 = swloww(ARIADNE_RAM + + offsetof(struct lancedata, tx_buff[i])); + t->TMD1 = swhighw(ARIADNE_RAM + + offsetof(struct lancedata, tx_buff[i])) | + TF_STP | TF_ENP; + t->TMD2 = swapw((u_short)-PKT_BUF_SIZE); + t->TMD3 = 0; + priv->tx_ring[i] = &lancedata->tx_ring[i]; + priv->tx_buff[i] = lancedata->tx_buff[i]; + netdev_dbg(dev, "TX Entry %2d at %p, Buf at %p\n", + i, &lancedata->tx_ring[i], lancedata->tx_buff[i]); + } + + /* Set up RX Ring */ + for (i = 0; i < RX_RING_SIZE; i++) { + volatile struct RDRE *r = &lancedata->rx_ring[i]; + r->RMD0 = swloww(ARIADNE_RAM + + offsetof(struct lancedata, rx_buff[i])); + r->RMD1 = swhighw(ARIADNE_RAM + + offsetof(struct lancedata, rx_buff[i])) | + RF_OWN; + r->RMD2 = swapw((u_short)-PKT_BUF_SIZE); + r->RMD3 = 0x0000; + priv->rx_ring[i] = &lancedata->rx_ring[i]; + priv->rx_buff[i] = lancedata->rx_buff[i]; + netdev_dbg(dev, "RX Entry %2d at %p, Buf at %p\n", + i, &lancedata->rx_ring[i], lancedata->rx_buff[i]); + } +} + +static int ariadne_rx(struct net_device *dev) +{ + struct ariadne_private *priv = netdev_priv(dev); + int entry = priv->cur_rx % RX_RING_SIZE; + int i; + + /* If we own the next entry, it's a new packet. Send it up */ + while (!(lowb(priv->rx_ring[entry]->RMD1) & RF_OWN)) { + int status = lowb(priv->rx_ring[entry]->RMD1); + + if (status != (RF_STP | RF_ENP)) { /* There was an error */ + /* There is a tricky error noted by + * John Murphy to Russ Nelson: + * Even with full-sized buffers it's possible for a + * jabber packet to use two buffers, with only the + * last correctly noting the error + */ + /* Only count a general error at the end of a packet */ + if (status & RF_ENP) + dev->stats.rx_errors++; + if (status & RF_FRAM) + dev->stats.rx_frame_errors++; + if (status & RF_OFLO) + dev->stats.rx_over_errors++; + if (status & RF_CRC) + dev->stats.rx_crc_errors++; + if (status & RF_BUFF) + dev->stats.rx_fifo_errors++; + priv->rx_ring[entry]->RMD1 &= 0xff00 | RF_STP | RF_ENP; + } else { + /* Malloc up new buffer, compatible with net-3 */ + short pkt_len = swapw(priv->rx_ring[entry]->RMD3); + struct sk_buff *skb; + + skb = netdev_alloc_skb(dev, pkt_len + 2); + if (skb == NULL) { + for (i = 0; i < RX_RING_SIZE; i++) + if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN) + break; + + if (i > RX_RING_SIZE - 2) { + dev->stats.rx_dropped++; + priv->rx_ring[entry]->RMD1 |= RF_OWN; + priv->cur_rx++; + } + break; + } + + + skb_reserve(skb, 2); /* 16 byte align */ + skb_put(skb, pkt_len); /* Make room */ + skb_copy_to_linear_data(skb, + (const void *)priv->rx_buff[entry], + pkt_len); + skb->protocol = eth_type_trans(skb, dev); + netdev_dbg(dev, "RX pkt type 0x%04x from %pM to %pM data %p len %u\n", + ((u_short *)skb->data)[6], + skb->data + 6, skb->data, + skb->data, skb->len); + + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + + priv->rx_ring[entry]->RMD1 |= RF_OWN; + entry = (++priv->cur_rx) % RX_RING_SIZE; + } + + priv->cur_rx = priv->cur_rx % RX_RING_SIZE; + + /* We should check that at least two ring entries are free. + * If not, we should free one and mark stats->rx_dropped++ + */ + + return 0; +} + +static irqreturn_t ariadne_interrupt(int irq, void *data) +{ + struct net_device *dev = (struct net_device *)data; + volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; + struct ariadne_private *priv; + int csr0, boguscnt; + int handled = 0; + + lance->RAP = CSR0; /* PCnet-ISA Controller Status */ + + if (!(lance->RDP & INTR)) /* Check if any interrupt has been */ + return IRQ_NONE; /* generated by the board */ + + priv = netdev_priv(dev); + + boguscnt = 10; + while ((csr0 = lance->RDP) & (ERR | RINT | TINT) && --boguscnt >= 0) { + /* Acknowledge all of the current interrupt sources ASAP */ + lance->RDP = csr0 & ~(INEA | TDMD | STOP | STRT | INIT); + +#ifdef DEBUG + if (ariadne_debug > 5) { + netdev_dbg(dev, "interrupt csr0=%#02x new csr=%#02x [", + csr0, lance->RDP); + if (csr0 & INTR) + pr_cont(" INTR"); + if (csr0 & INEA) + pr_cont(" INEA"); + if (csr0 & RXON) + pr_cont(" RXON"); + if (csr0 & TXON) + pr_cont(" TXON"); + if (csr0 & TDMD) + pr_cont(" TDMD"); + if (csr0 & STOP) + pr_cont(" STOP"); + if (csr0 & STRT) + pr_cont(" STRT"); + if (csr0 & INIT) + pr_cont(" INIT"); + if (csr0 & ERR) + pr_cont(" ERR"); + if (csr0 & BABL) + pr_cont(" BABL"); + if (csr0 & CERR) + pr_cont(" CERR"); + if (csr0 & MISS) + pr_cont(" MISS"); + if (csr0 & MERR) + pr_cont(" MERR"); + if (csr0 & RINT) + pr_cont(" RINT"); + if (csr0 & TINT) + pr_cont(" TINT"); + if (csr0 & IDON) + pr_cont(" IDON"); + pr_cont(" ]\n"); + } +#endif + + if (csr0 & RINT) { /* Rx interrupt */ + handled = 1; + ariadne_rx(dev); + } + + if (csr0 & TINT) { /* Tx-done interrupt */ + int dirty_tx = priv->dirty_tx; + + handled = 1; + while (dirty_tx < priv->cur_tx) { + int entry = dirty_tx % TX_RING_SIZE; + int status = lowb(priv->tx_ring[entry]->TMD1); + + if (status & TF_OWN) + break; /* It still hasn't been Txed */ + + priv->tx_ring[entry]->TMD1 &= 0xff00; + + if (status & TF_ERR) { + /* There was an major error, log it */ + int err_status = priv->tx_ring[entry]->TMD3; + dev->stats.tx_errors++; + if (err_status & EF_RTRY) + dev->stats.tx_aborted_errors++; + if (err_status & EF_LCAR) + dev->stats.tx_carrier_errors++; + if (err_status & EF_LCOL) + dev->stats.tx_window_errors++; + if (err_status & EF_UFLO) { + /* Ackk! On FIFO errors the Tx unit is turned off! */ + dev->stats.tx_fifo_errors++; + /* Remove this verbosity later! */ + netdev_err(dev, "Tx FIFO error! Status %04x\n", + csr0); + /* Restart the chip */ + lance->RDP = STRT; + } + } else { + if (status & (TF_MORE | TF_ONE)) + dev->stats.collisions++; + dev->stats.tx_packets++; + } + dirty_tx++; + } + +#ifndef final_version + if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) { + netdev_err(dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n", + dirty_tx, priv->cur_tx, + priv->tx_full); + dirty_tx += TX_RING_SIZE; + } +#endif + + if (priv->tx_full && netif_queue_stopped(dev) && + dirty_tx > priv->cur_tx - TX_RING_SIZE + 2) { + /* The ring is no longer full */ + priv->tx_full = 0; + netif_wake_queue(dev); + } + + priv->dirty_tx = dirty_tx; + } + + /* Log misc errors */ + if (csr0 & BABL) { + handled = 1; + dev->stats.tx_errors++; /* Tx babble */ + } + if (csr0 & MISS) { + handled = 1; + dev->stats.rx_errors++; /* Missed a Rx frame */ + } + if (csr0 & MERR) { + handled = 1; + netdev_err(dev, "Bus master arbitration failure, status %04x\n", + csr0); + /* Restart the chip */ + lance->RDP = STRT; + } + } + + /* Clear any other interrupt, and set interrupt enable */ + lance->RAP = CSR0; /* PCnet-ISA Controller Status */ + lance->RDP = INEA | BABL | CERR | MISS | MERR | IDON; + + if (ariadne_debug > 4) + netdev_dbg(dev, "exiting interrupt, csr%d=%#04x\n", + lance->RAP, lance->RDP); + + return IRQ_RETVAL(handled); +} + +static int ariadne_open(struct net_device *dev) +{ + volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; + u_short in; + u_long version; + int i; + + /* Reset the LANCE */ + in = lance->Reset; + + /* Stop the LANCE */ + lance->RAP = CSR0; /* PCnet-ISA Controller Status */ + lance->RDP = STOP; + + /* Check the LANCE version */ + lance->RAP = CSR88; /* Chip ID */ + version = swapw(lance->RDP); + lance->RAP = CSR89; /* Chip ID */ + version |= swapw(lance->RDP) << 16; + if ((version & 0x00000fff) != 0x00000003) { + pr_warn("Couldn't find AMD Ethernet Chip\n"); + return -EAGAIN; + } + if ((version & 0x0ffff000) != 0x00003000) { + pr_warn("Couldn't find Am79C960 (Wrong part number = %ld)\n", + (version & 0x0ffff000) >> 12); + return -EAGAIN; + } + + netdev_dbg(dev, "Am79C960 (PCnet-ISA) Revision %ld\n", + (version & 0xf0000000) >> 28); + + ariadne_init_ring(dev); + + /* Miscellaneous Stuff */ + lance->RAP = CSR3; /* Interrupt Masks and Deferral Control */ + lance->RDP = 0x0000; + lance->RAP = CSR4; /* Test and Features Control */ + lance->RDP = DPOLL | APAD_XMT | MFCOM | RCVCCOM | TXSTRTM | JABM; + + /* Set the Multicast Table */ + lance->RAP = CSR8; /* Logical Address Filter, LADRF[15:0] */ + lance->RDP = 0x0000; + lance->RAP = CSR9; /* Logical Address Filter, LADRF[31:16] */ + lance->RDP = 0x0000; + lance->RAP = CSR10; /* Logical Address Filter, LADRF[47:32] */ + lance->RDP = 0x0000; + lance->RAP = CSR11; /* Logical Address Filter, LADRF[63:48] */ + lance->RDP = 0x0000; + + /* Set the Ethernet Hardware Address */ + lance->RAP = CSR12; /* Physical Address Register, PADR[15:0] */ + lance->RDP = ((u_short *)&dev->dev_addr[0])[0]; + lance->RAP = CSR13; /* Physical Address Register, PADR[31:16] */ + lance->RDP = ((u_short *)&dev->dev_addr[0])[1]; + lance->RAP = CSR14; /* Physical Address Register, PADR[47:32] */ + lance->RDP = ((u_short *)&dev->dev_addr[0])[2]; + + /* Set the Init Block Mode */ + lance->RAP = CSR15; /* Mode Register */ + lance->RDP = 0x0000; + + /* Set the Transmit Descriptor Ring Pointer */ + lance->RAP = CSR30; /* Base Address of Transmit Ring */ + lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, tx_ring)); + lance->RAP = CSR31; /* Base Address of transmit Ring */ + lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, tx_ring)); + + /* Set the Receive Descriptor Ring Pointer */ + lance->RAP = CSR24; /* Base Address of Receive Ring */ + lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, rx_ring)); + lance->RAP = CSR25; /* Base Address of Receive Ring */ + lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, rx_ring)); + + /* Set the Number of RX and TX Ring Entries */ + lance->RAP = CSR76; /* Receive Ring Length */ + lance->RDP = swapw(((u_short)-RX_RING_SIZE)); + lance->RAP = CSR78; /* Transmit Ring Length */ + lance->RDP = swapw(((u_short)-TX_RING_SIZE)); + + /* Enable Media Interface Port Auto Select (10BASE-2/10BASE-T) */ + lance->RAP = ISACSR2; /* Miscellaneous Configuration */ + lance->IDP = ASEL; + + /* LED Control */ + lance->RAP = ISACSR5; /* LED1 Status */ + lance->IDP = PSE|XMTE; + lance->RAP = ISACSR6; /* LED2 Status */ + lance->IDP = PSE|COLE; + lance->RAP = ISACSR7; /* LED3 Status */ + lance->IDP = PSE|RCVE; + + netif_start_queue(dev); + + i = request_irq(IRQ_AMIGA_PORTS, ariadne_interrupt, IRQF_SHARED, + dev->name, dev); + if (i) + return i; + + lance->RAP = CSR0; /* PCnet-ISA Controller Status */ + lance->RDP = INEA | STRT; + + return 0; +} + +static int ariadne_close(struct net_device *dev) +{ + volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; + + netif_stop_queue(dev); + + lance->RAP = CSR112; /* Missed Frame Count */ + dev->stats.rx_missed_errors = swapw(lance->RDP); + lance->RAP = CSR0; /* PCnet-ISA Controller Status */ + + if (ariadne_debug > 1) { + netdev_dbg(dev, "Shutting down ethercard, status was %02x\n", + lance->RDP); + netdev_dbg(dev, "%lu packets missed\n", + dev->stats.rx_missed_errors); + } + + /* We stop the LANCE here -- it occasionally polls memory if we don't */ + lance->RDP = STOP; + + free_irq(IRQ_AMIGA_PORTS, dev); + + return 0; +} + +static inline void ariadne_reset(struct net_device *dev) +{ + volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; + + lance->RAP = CSR0; /* PCnet-ISA Controller Status */ + lance->RDP = STOP; + ariadne_init_ring(dev); + lance->RDP = INEA | STRT; + netif_start_queue(dev); +} + +static void ariadne_tx_timeout(struct net_device *dev) +{ + volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; + + netdev_err(dev, "transmit timed out, status %04x, resetting\n", + lance->RDP); + ariadne_reset(dev); + netif_wake_queue(dev); +} + +static netdev_tx_t ariadne_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct ariadne_private *priv = netdev_priv(dev); + volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; + int entry; + unsigned long flags; + int len = skb->len; + +#if 0 + if (ariadne_debug > 3) { + lance->RAP = CSR0; /* PCnet-ISA Controller Status */ + netdev_dbg(dev, "%s: csr0 %04x\n", __func__, lance->RDP); + lance->RDP = 0x0000; + } +#endif + + /* FIXME: is the 79C960 new enough to do its own padding right ? */ + if (skb->len < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + len = ETH_ZLEN; + } + + /* Fill in a Tx ring entry */ + + netdev_dbg(dev, "TX pkt type 0x%04x from %pM to %pM data %p len %u\n", + ((u_short *)skb->data)[6], + skb->data + 6, skb->data, + skb->data, skb->len); + + local_irq_save(flags); + + entry = priv->cur_tx % TX_RING_SIZE; + + /* Caution: the write order is important here, set the base address with + the "ownership" bits last */ + + priv->tx_ring[entry]->TMD2 = swapw((u_short)-skb->len); + priv->tx_ring[entry]->TMD3 = 0x0000; + memcpyw(priv->tx_buff[entry], (u_short *)skb->data, len); + +#ifdef DEBUG + print_hex_dump(KERN_DEBUG, "tx_buff: ", DUMP_PREFIX_OFFSET, 16, 1, + (void *)priv->tx_buff[entry], + skb->len > 64 ? 64 : skb->len, true); +#endif + + priv->tx_ring[entry]->TMD1 = (priv->tx_ring[entry]->TMD1 & 0xff00) + | TF_OWN | TF_STP | TF_ENP; + + dev_kfree_skb(skb); + + priv->cur_tx++; + if ((priv->cur_tx >= TX_RING_SIZE) && + (priv->dirty_tx >= TX_RING_SIZE)) { + + netdev_dbg(dev, "*** Subtracting TX_RING_SIZE from cur_tx (%d) and dirty_tx (%d)\n", + priv->cur_tx, priv->dirty_tx); + + priv->cur_tx -= TX_RING_SIZE; + priv->dirty_tx -= TX_RING_SIZE; + } + dev->stats.tx_bytes += len; + + /* Trigger an immediate send poll */ + lance->RAP = CSR0; /* PCnet-ISA Controller Status */ + lance->RDP = INEA | TDMD; + + if (lowb(priv->tx_ring[(entry + 1) % TX_RING_SIZE]->TMD1) != 0) { + netif_stop_queue(dev); + priv->tx_full = 1; + } + local_irq_restore(flags); + + return NETDEV_TX_OK; +} + +static struct net_device_stats *ariadne_get_stats(struct net_device *dev) +{ + volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; + short saved_addr; + unsigned long flags; + + local_irq_save(flags); + saved_addr = lance->RAP; + lance->RAP = CSR112; /* Missed Frame Count */ + dev->stats.rx_missed_errors = swapw(lance->RDP); + lance->RAP = saved_addr; + local_irq_restore(flags); + + return &dev->stats; +} + +/* Set or clear the multicast filter for this adaptor. + * num_addrs == -1 Promiscuous mode, receive all packets + * num_addrs == 0 Normal mode, clear multicast list + * num_addrs > 0 Multicast mode, receive normal and MC packets, + * and do best-effort filtering. + */ +static void set_multicast_list(struct net_device *dev) +{ + volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; + + if (!netif_running(dev)) + return; + + netif_stop_queue(dev); + + /* We take the simple way out and always enable promiscuous mode */ + lance->RAP = CSR0; /* PCnet-ISA Controller Status */ + lance->RDP = STOP; /* Temporarily stop the lance */ + ariadne_init_ring(dev); + + if (dev->flags & IFF_PROMISC) { + lance->RAP = CSR15; /* Mode Register */ + lance->RDP = PROM; /* Set promiscuous mode */ + } else { + short multicast_table[4]; + int num_addrs = netdev_mc_count(dev); + int i; + /* We don't use the multicast table, + * but rely on upper-layer filtering + */ + memset(multicast_table, (num_addrs == 0) ? 0 : -1, + sizeof(multicast_table)); + for (i = 0; i < 4; i++) { + lance->RAP = CSR8 + (i << 8); + /* Logical Address Filter */ + lance->RDP = swapw(multicast_table[i]); + } + lance->RAP = CSR15; /* Mode Register */ + lance->RDP = 0x0000; /* Unset promiscuous mode */ + } + + lance->RAP = CSR0; /* PCnet-ISA Controller Status */ + lance->RDP = INEA | STRT | IDON;/* Resume normal operation */ + + netif_wake_queue(dev); +} + + +static void ariadne_remove_one(struct zorro_dev *z) +{ + struct net_device *dev = zorro_get_drvdata(z); + + unregister_netdev(dev); + release_mem_region(ZTWO_PADDR(dev->base_addr), sizeof(struct Am79C960)); + release_mem_region(ZTWO_PADDR(dev->mem_start), ARIADNE_RAM_SIZE); + free_netdev(dev); +} + +static struct zorro_device_id ariadne_zorro_tbl[] = { + { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE }, + { 0 } +}; +MODULE_DEVICE_TABLE(zorro, ariadne_zorro_tbl); + +static const struct net_device_ops ariadne_netdev_ops = { + .ndo_open = ariadne_open, + .ndo_stop = ariadne_close, + .ndo_start_xmit = ariadne_start_xmit, + .ndo_tx_timeout = ariadne_tx_timeout, + .ndo_get_stats = ariadne_get_stats, + .ndo_set_rx_mode = set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + +static int ariadne_init_one(struct zorro_dev *z, + const struct zorro_device_id *ent) +{ + unsigned long board = z->resource.start; + unsigned long base_addr = board + ARIADNE_LANCE; + unsigned long mem_start = board + ARIADNE_RAM; + struct resource *r1, *r2; + struct net_device *dev; + u32 serial; + int err; + + r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960"); + if (!r1) + return -EBUSY; + r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM"); + if (!r2) { + release_mem_region(base_addr, sizeof(struct Am79C960)); + return -EBUSY; + } + + dev = alloc_etherdev(sizeof(struct ariadne_private)); + if (dev == NULL) { + release_mem_region(base_addr, sizeof(struct Am79C960)); + release_mem_region(mem_start, ARIADNE_RAM_SIZE); + return -ENOMEM; + } + + r1->name = dev->name; + r2->name = dev->name; + + serial = be32_to_cpu(z->rom.er_SerialNumber); + dev->dev_addr[0] = 0x00; + dev->dev_addr[1] = 0x60; + dev->dev_addr[2] = 0x30; + dev->dev_addr[3] = (serial >> 16) & 0xff; + dev->dev_addr[4] = (serial >> 8) & 0xff; + dev->dev_addr[5] = serial & 0xff; + dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr); + dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start); + dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE; + + dev->netdev_ops = &ariadne_netdev_ops; + dev->watchdog_timeo = 5 * HZ; + + err = register_netdev(dev); + if (err) { + release_mem_region(base_addr, sizeof(struct Am79C960)); + release_mem_region(mem_start, ARIADNE_RAM_SIZE); + free_netdev(dev); + return err; + } + zorro_set_drvdata(z, dev); + + netdev_info(dev, "Ariadne at 0x%08lx, Ethernet Address %pM\n", + board, dev->dev_addr); + + return 0; +} + +static struct zorro_driver ariadne_driver = { + .name = "ariadne", + .id_table = ariadne_zorro_tbl, + .probe = ariadne_init_one, + .remove = ariadne_remove_one, +}; + +static int __init ariadne_init_module(void) +{ + return zorro_register_driver(&ariadne_driver); +} + +static void __exit ariadne_cleanup_module(void) +{ + zorro_unregister_driver(&ariadne_driver); +} + +module_init(ariadne_init_module); +module_exit(ariadne_cleanup_module); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/ariadne.h b/drivers/net/ethernet/amd/ariadne.h new file mode 100644 index 000000000..727be5cdd --- /dev/null +++ b/drivers/net/ethernet/amd/ariadne.h @@ -0,0 +1,415 @@ +/* + * Amiga Linux/m68k Ariadne Ethernet Driver + * + * © Copyright 1995 by Geert Uytterhoeven (geert@linux-m68k.org) + * Peter De Schrijver + * (Peter.DeSchrijver@linux.cc.kuleuven.ac.be) + * + * ---------------------------------------------------------------------------------- + * + * This program is based on + * + * lance.c: An AMD LANCE ethernet driver for linux. + * Written 1993-94 by Donald Becker. + * + * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller + * Advanced Micro Devices + * Publication #16907, Rev. B, Amendment/0, May 1994 + * + * MC68230: Parallel Interface/Timer (PI/T) + * Motorola Semiconductors, December, 1983 + * + * ---------------------------------------------------------------------------------- + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of the Linux + * distribution for more details. + * + * ---------------------------------------------------------------------------------- + * + * The Ariadne is a Zorro-II board made by Village Tronic. It contains: + * + * - an Am79C960 PCnet-ISA Single-Chip Ethernet Controller with both + * 10BASE-2 (thin coax) and 10BASE-T (UTP) connectors + * + * - an MC68230 Parallel Interface/Timer configured as 2 parallel ports + */ + + + /* + * Am79C960 PCnet-ISA + */ + +struct Am79C960 { + volatile u_short AddressPROM[8]; + /* IEEE Address PROM (Unused in the Ariadne) */ + volatile u_short RDP; /* Register Data Port */ + volatile u_short RAP; /* Register Address Port */ + volatile u_short Reset; /* Reset Chip on Read Access */ + volatile u_short IDP; /* ISACSR Data Port */ +}; + + + /* + * Am79C960 Control and Status Registers + * + * These values are already swap()ed!! + * + * Only registers marked with a `-' are intended for network software + * access + */ + +#define CSR0 0x0000 /* - PCnet-ISA Controller Status */ +#define CSR1 0x0100 /* - IADR[15:0] */ +#define CSR2 0x0200 /* - IADR[23:16] */ +#define CSR3 0x0300 /* - Interrupt Masks and Deferral Control */ +#define CSR4 0x0400 /* - Test and Features Control */ +#define CSR6 0x0600 /* RCV/XMT Descriptor Table Length */ +#define CSR8 0x0800 /* - Logical Address Filter, LADRF[15:0] */ +#define CSR9 0x0900 /* - Logical Address Filter, LADRF[31:16] */ +#define CSR10 0x0a00 /* - Logical Address Filter, LADRF[47:32] */ +#define CSR11 0x0b00 /* - Logical Address Filter, LADRF[63:48] */ +#define CSR12 0x0c00 /* - Physical Address Register, PADR[15:0] */ +#define CSR13 0x0d00 /* - Physical Address Register, PADR[31:16] */ +#define CSR14 0x0e00 /* - Physical Address Register, PADR[47:32] */ +#define CSR15 0x0f00 /* - Mode Register */ +#define CSR16 0x1000 /* Initialization Block Address Lower */ +#define CSR17 0x1100 /* Initialization Block Address Upper */ +#define CSR18 0x1200 /* Current Receive Buffer Address */ +#define CSR19 0x1300 /* Current Receive Buffer Address */ +#define CSR20 0x1400 /* Current Transmit Buffer Address */ +#define CSR21 0x1500 /* Current Transmit Buffer Address */ +#define CSR22 0x1600 /* Next Receive Buffer Address */ +#define CSR23 0x1700 /* Next Receive Buffer Address */ +#define CSR24 0x1800 /* - Base Address of Receive Ring */ +#define CSR25 0x1900 /* - Base Address of Receive Ring */ +#define CSR26 0x1a00 /* Next Receive Descriptor Address */ +#define CSR27 0x1b00 /* Next Receive Descriptor Address */ +#define CSR28 0x1c00 /* Current Receive Descriptor Address */ +#define CSR29 0x1d00 /* Current Receive Descriptor Address */ +#define CSR30 0x1e00 /* - Base Address of Transmit Ring */ +#define CSR31 0x1f00 /* - Base Address of transmit Ring */ +#define CSR32 0x2000 /* Next Transmit Descriptor Address */ +#define CSR33 0x2100 /* Next Transmit Descriptor Address */ +#define CSR34 0x2200 /* Current Transmit Descriptor Address */ +#define CSR35 0x2300 /* Current Transmit Descriptor Address */ +#define CSR36 0x2400 /* Next Next Receive Descriptor Address */ +#define CSR37 0x2500 /* Next Next Receive Descriptor Address */ +#define CSR38 0x2600 /* Next Next Transmit Descriptor Address */ +#define CSR39 0x2700 /* Next Next Transmit Descriptor Address */ +#define CSR40 0x2800 /* Current Receive Status and Byte Count */ +#define CSR41 0x2900 /* Current Receive Status and Byte Count */ +#define CSR42 0x2a00 /* Current Transmit Status and Byte Count */ +#define CSR43 0x2b00 /* Current Transmit Status and Byte Count */ +#define CSR44 0x2c00 /* Next Receive Status and Byte Count */ +#define CSR45 0x2d00 /* Next Receive Status and Byte Count */ +#define CSR46 0x2e00 /* Poll Time Counter */ +#define CSR47 0x2f00 /* Polling Interval */ +#define CSR48 0x3000 /* Temporary Storage */ +#define CSR49 0x3100 /* Temporary Storage */ +#define CSR50 0x3200 /* Temporary Storage */ +#define CSR51 0x3300 /* Temporary Storage */ +#define CSR52 0x3400 /* Temporary Storage */ +#define CSR53 0x3500 /* Temporary Storage */ +#define CSR54 0x3600 /* Temporary Storage */ +#define CSR55 0x3700 /* Temporary Storage */ +#define CSR56 0x3800 /* Temporary Storage */ +#define CSR57 0x3900 /* Temporary Storage */ +#define CSR58 0x3a00 /* Temporary Storage */ +#define CSR59 0x3b00 /* Temporary Storage */ +#define CSR60 0x3c00 /* Previous Transmit Descriptor Address */ +#define CSR61 0x3d00 /* Previous Transmit Descriptor Address */ +#define CSR62 0x3e00 /* Previous Transmit Status and Byte Count */ +#define CSR63 0x3f00 /* Previous Transmit Status and Byte Count */ +#define CSR64 0x4000 /* Next Transmit Buffer Address */ +#define CSR65 0x4100 /* Next Transmit Buffer Address */ +#define CSR66 0x4200 /* Next Transmit Status and Byte Count */ +#define CSR67 0x4300 /* Next Transmit Status and Byte Count */ +#define CSR68 0x4400 /* Transmit Status Temporary Storage */ +#define CSR69 0x4500 /* Transmit Status Temporary Storage */ +#define CSR70 0x4600 /* Temporary Storage */ +#define CSR71 0x4700 /* Temporary Storage */ +#define CSR72 0x4800 /* Receive Ring Counter */ +#define CSR74 0x4a00 /* Transmit Ring Counter */ +#define CSR76 0x4c00 /* - Receive Ring Length */ +#define CSR78 0x4e00 /* - Transmit Ring Length */ +#define CSR80 0x5000 /* - Burst and FIFO Threshold Control */ +#define CSR82 0x5200 /* - Bus Activity Timer */ +#define CSR84 0x5400 /* DMA Address */ +#define CSR85 0x5500 /* DMA Address */ +#define CSR86 0x5600 /* Buffer Byte Counter */ +#define CSR88 0x5800 /* - Chip ID */ +#define CSR89 0x5900 /* - Chip ID */ +#define CSR92 0x5c00 /* Ring Length Conversion */ +#define CSR94 0x5e00 /* Transmit Time Domain Reflectometry Count */ +#define CSR96 0x6000 /* Bus Interface Scratch Register 0 */ +#define CSR97 0x6100 /* Bus Interface Scratch Register 0 */ +#define CSR98 0x6200 /* Bus Interface Scratch Register 1 */ +#define CSR99 0x6300 /* Bus Interface Scratch Register 1 */ +#define CSR104 0x6800 /* SWAP */ +#define CSR105 0x6900 /* SWAP */ +#define CSR108 0x6c00 /* Buffer Management Scratch */ +#define CSR109 0x6d00 /* Buffer Management Scratch */ +#define CSR112 0x7000 /* - Missed Frame Count */ +#define CSR114 0x7200 /* - Receive Collision Count */ +#define CSR124 0x7c00 /* - Buffer Management Unit Test */ + + + /* + * Am79C960 ISA Control and Status Registers + * + * These values are already swap()ed!! + */ + +#define ISACSR0 0x0000 /* Master Mode Read Active */ +#define ISACSR1 0x0100 /* Master Mode Write Active */ +#define ISACSR2 0x0200 /* Miscellaneous Configuration */ +#define ISACSR4 0x0400 /* LED0 Status (Link Integrity) */ +#define ISACSR5 0x0500 /* LED1 Status */ +#define ISACSR6 0x0600 /* LED2 Status */ +#define ISACSR7 0x0700 /* LED3 Status */ + + + /* + * Bit definitions for CSR0 (PCnet-ISA Controller Status) + * + * These values are already swap()ed!! + */ + +#define ERR 0x0080 /* Error */ +#define BABL 0x0040 /* Babble: Transmitted too many bits */ +#define CERR 0x0020 /* No Heartbeat (10BASE-T) */ +#define MISS 0x0010 /* Missed Frame */ +#define MERR 0x0008 /* Memory Error */ +#define RINT 0x0004 /* Receive Interrupt */ +#define TINT 0x0002 /* Transmit Interrupt */ +#define IDON 0x0001 /* Initialization Done */ +#define INTR 0x8000 /* Interrupt Flag */ +#define INEA 0x4000 /* Interrupt Enable */ +#define RXON 0x2000 /* Receive On */ +#define TXON 0x1000 /* Transmit On */ +#define TDMD 0x0800 /* Transmit Demand */ +#define STOP 0x0400 /* Stop */ +#define STRT 0x0200 /* Start */ +#define INIT 0x0100 /* Initialize */ + + + /* + * Bit definitions for CSR3 (Interrupt Masks and Deferral Control) + * + * These values are already swap()ed!! + */ + +#define BABLM 0x0040 /* Babble Mask */ +#define MISSM 0x0010 /* Missed Frame Mask */ +#define MERRM 0x0008 /* Memory Error Mask */ +#define RINTM 0x0004 /* Receive Interrupt Mask */ +#define TINTM 0x0002 /* Transmit Interrupt Mask */ +#define IDONM 0x0001 /* Initialization Done Mask */ +#define DXMT2PD 0x1000 /* Disable Transmit Two Part Deferral */ +#define EMBA 0x0800 /* Enable Modified Back-off Algorithm */ + + + /* + * Bit definitions for CSR4 (Test and Features Control) + * + * These values are already swap()ed!! + */ + +#define ENTST 0x0080 /* Enable Test Mode */ +#define DMAPLUS 0x0040 /* Disable Burst Transaction Counter */ +#define TIMER 0x0020 /* Timer Enable Register */ +#define DPOLL 0x0010 /* Disable Transmit Polling */ +#define APAD_XMT 0x0008 /* Auto Pad Transmit */ +#define ASTRP_RCV 0x0004 /* Auto Pad Stripping */ +#define MFCO 0x0002 /* Missed Frame Counter Overflow Interrupt */ +#define MFCOM 0x0001 /* Missed Frame Counter Overflow Mask */ +#define RCVCCO 0x2000 /* Receive Collision Counter Overflow Interrupt */ +#define RCVCCOM 0x1000 /* Receive Collision Counter Overflow Mask */ +#define TXSTRT 0x0800 /* Transmit Start Status */ +#define TXSTRTM 0x0400 /* Transmit Start Mask */ +#define JAB 0x0200 /* Jabber Error */ +#define JABM 0x0100 /* Jabber Error Mask */ + + + /* + * Bit definitions for CSR15 (Mode Register) + * + * These values are already swap()ed!! + */ + +#define PROM 0x0080 /* Promiscuous Mode */ +#define DRCVBC 0x0040 /* Disable Receive Broadcast */ +#define DRCVPA 0x0020 /* Disable Receive Physical Address */ +#define DLNKTST 0x0010 /* Disable Link Status */ +#define DAPC 0x0008 /* Disable Automatic Polarity Correction */ +#define MENDECL 0x0004 /* MENDEC Loopback Mode */ +#define LRTTSEL 0x0002 /* Low Receive Threshold/Transmit Mode Select */ +#define PORTSEL1 0x0001 /* Port Select Bits */ +#define PORTSEL2 0x8000 /* Port Select Bits */ +#define INTL 0x4000 /* Internal Loopback */ +#define DRTY 0x2000 /* Disable Retry */ +#define FCOLL 0x1000 /* Force Collision */ +#define DXMTFCS 0x0800 /* Disable Transmit CRC */ +#define LOOP 0x0400 /* Loopback Enable */ +#define DTX 0x0200 /* Disable Transmitter */ +#define DRX 0x0100 /* Disable Receiver */ + + + /* + * Bit definitions for ISACSR2 (Miscellaneous Configuration) + * + * These values are already swap()ed!! + */ + +#define ASEL 0x0200 /* Media Interface Port Auto Select */ + + + /* + * Bit definitions for ISACSR5-7 (LED1-3 Status) + * + * These values are already swap()ed!! + */ + +#define LEDOUT 0x0080 /* Current LED Status */ +#define PSE 0x8000 /* Pulse Stretcher Enable */ +#define XMTE 0x1000 /* Enable Transmit Status Signal */ +#define RVPOLE 0x0800 /* Enable Receive Polarity Signal */ +#define RCVE 0x0400 /* Enable Receive Status Signal */ +#define JABE 0x0200 /* Enable Jabber Signal */ +#define COLE 0x0100 /* Enable Collision Signal */ + + + /* + * Receive Descriptor Ring Entry + */ + +struct RDRE { + volatile u_short RMD0; /* LADR[15:0] */ + volatile u_short RMD1; /* HADR[23:16] | Receive Flags */ + volatile u_short RMD2; /* Buffer Byte Count (two's complement) */ + volatile u_short RMD3; /* Message Byte Count */ +}; + + + /* + * Transmit Descriptor Ring Entry + */ + +struct TDRE { + volatile u_short TMD0; /* LADR[15:0] */ + volatile u_short TMD1; /* HADR[23:16] | Transmit Flags */ + volatile u_short TMD2; /* Buffer Byte Count (two's complement) */ + volatile u_short TMD3; /* Error Flags */ +}; + + + /* + * Receive Flags + */ + +#define RF_OWN 0x0080 /* PCnet-ISA controller owns the descriptor */ +#define RF_ERR 0x0040 /* Error */ +#define RF_FRAM 0x0020 /* Framing Error */ +#define RF_OFLO 0x0010 /* Overflow Error */ +#define RF_CRC 0x0008 /* CRC Error */ +#define RF_BUFF 0x0004 /* Buffer Error */ +#define RF_STP 0x0002 /* Start of Packet */ +#define RF_ENP 0x0001 /* End of Packet */ + + + /* + * Transmit Flags + */ + +#define TF_OWN 0x0080 /* PCnet-ISA controller owns the descriptor */ +#define TF_ERR 0x0040 /* Error */ +#define TF_ADD_FCS 0x0020 /* Controls FCS Generation */ +#define TF_MORE 0x0010 /* More than one retry needed */ +#define TF_ONE 0x0008 /* One retry needed */ +#define TF_DEF 0x0004 /* Deferred */ +#define TF_STP 0x0002 /* Start of Packet */ +#define TF_ENP 0x0001 /* End of Packet */ + + + /* + * Error Flags + */ + +#define EF_BUFF 0x0080 /* Buffer Error */ +#define EF_UFLO 0x0040 /* Underflow Error */ +#define EF_LCOL 0x0010 /* Late Collision */ +#define EF_LCAR 0x0008 /* Loss of Carrier */ +#define EF_RTRY 0x0004 /* Retry Error */ +#define EF_TDR 0xff03 /* Time Domain Reflectometry */ + + + + /* + * MC68230 Parallel Interface/Timer + */ + +struct MC68230 { + volatile u_char PGCR; /* Port General Control Register */ + u_char Pad1[1]; + volatile u_char PSRR; /* Port Service Request Register */ + u_char Pad2[1]; + volatile u_char PADDR; /* Port A Data Direction Register */ + u_char Pad3[1]; + volatile u_char PBDDR; /* Port B Data Direction Register */ + u_char Pad4[1]; + volatile u_char PCDDR; /* Port C Data Direction Register */ + u_char Pad5[1]; + volatile u_char PIVR; /* Port Interrupt Vector Register */ + u_char Pad6[1]; + volatile u_char PACR; /* Port A Control Register */ + u_char Pad7[1]; + volatile u_char PBCR; /* Port B Control Register */ + u_char Pad8[1]; + volatile u_char PADR; /* Port A Data Register */ + u_char Pad9[1]; + volatile u_char PBDR; /* Port B Data Register */ + u_char Pad10[1]; + volatile u_char PAAR; /* Port A Alternate Register */ + u_char Pad11[1]; + volatile u_char PBAR; /* Port B Alternate Register */ + u_char Pad12[1]; + volatile u_char PCDR; /* Port C Data Register */ + u_char Pad13[1]; + volatile u_char PSR; /* Port Status Register */ + u_char Pad14[5]; + volatile u_char TCR; /* Timer Control Register */ + u_char Pad15[1]; + volatile u_char TIVR; /* Timer Interrupt Vector Register */ + u_char Pad16[3]; + volatile u_char CPRH; /* Counter Preload Register (High) */ + u_char Pad17[1]; + volatile u_char CPRM; /* Counter Preload Register (Mid) */ + u_char Pad18[1]; + volatile u_char CPRL; /* Counter Preload Register (Low) */ + u_char Pad19[3]; + volatile u_char CNTRH; /* Count Register (High) */ + u_char Pad20[1]; + volatile u_char CNTRM; /* Count Register (Mid) */ + u_char Pad21[1]; + volatile u_char CNTRL; /* Count Register (Low) */ + u_char Pad22[1]; + volatile u_char TSR; /* Timer Status Register */ + u_char Pad23[11]; +}; + + + /* + * Ariadne Expansion Board Structure + */ + +#define ARIADNE_LANCE 0x360 + +#define ARIADNE_PIT 0x1000 + +#define ARIADNE_BOOTPROM 0x4000 /* I guess it's here :-) */ +#define ARIADNE_BOOTPROM_SIZE 0x4000 + +#define ARIADNE_RAM 0x8000 /* Always access WORDs!! */ +#define ARIADNE_RAM_SIZE 0x8000 + diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c new file mode 100644 index 000000000..b10964e8c --- /dev/null +++ b/drivers/net/ethernet/amd/atarilance.c @@ -0,0 +1,1169 @@ +/* atarilance.c: Ethernet driver for VME Lance cards on the Atari */ +/* + Written 1995/96 by Roman Hodek (Roman.Hodek@informatik.uni-erlangen.de) + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + This drivers was written with the following sources of reference: + - The driver for the Riebl Lance card by the TU Vienna. + - The modified TUW driver for PAM's VME cards + - The PC-Linux driver for Lance cards (but this is for bus master + cards, not the shared memory ones) + - The Amiga Ariadne driver + + v1.0: (in 1.2.13pl4/0.9.13) + Initial version + v1.1: (in 1.2.13pl5) + more comments + deleted some debugging stuff + optimized register access (keep AREG pointing to CSR0) + following AMD, CSR0_STRT should be set only after IDON is detected + use memcpy() for data transfers, that also employs long word moves + better probe procedure for 24-bit systems + non-VME-RieblCards need extra delays in memcpy + must also do write test, since 0xfxe00000 may hit ROM + use 8/32 tx/rx buffers, which should give better NFS performance; + this is made possible by shifting the last packet buffer after the + RieblCard reserved area + v1.2: (in 1.2.13pl8) + again fixed probing for the Falcon; 0xfe01000 hits phys. 0x00010000 + and thus RAM, in case of no Lance found all memory contents have to + be restored! + Now possible to compile as module. + v1.3: 03/30/96 Jes Sorensen, Roman (in 1.3) + Several little 1.3 adaptions + When the lance is stopped it jumps back into little-endian + mode. It is therefore necessary to put it back where it + belongs, in big endian mode, in order to make things work. + This might be the reason why multicast-mode didn't work + before, but I'm not able to test it as I only got an Amiga + (we had similar problems with the A2065 driver). + +*/ + +static char version[] = "atarilance.c: v1.3 04/04/96 " + "Roman.Hodek@informatik.uni-erlangen.de\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/* Debug level: + * 0 = silent, print only serious errors + * 1 = normal, print error messages + * 2 = debug, print debug infos + * 3 = debug, print even more debug infos (packet data) + */ + +#define LANCE_DEBUG 1 + +#ifdef LANCE_DEBUG +static int lance_debug = LANCE_DEBUG; +#else +static int lance_debug = 1; +#endif +module_param(lance_debug, int, 0); +MODULE_PARM_DESC(lance_debug, "atarilance debug level (0-3)"); +MODULE_LICENSE("GPL"); + +/* Print debug messages on probing? */ +#undef LANCE_DEBUG_PROBE + +#define DPRINTK(n,a) \ + do { \ + if (lance_debug >= n) \ + printk a; \ + } while( 0 ) + +#ifdef LANCE_DEBUG_PROBE +# define PROBE_PRINT(a) printk a +#else +# define PROBE_PRINT(a) +#endif + +/* These define the number of Rx and Tx buffers as log2. (Only powers + * of two are valid) + * Much more rx buffers (32) are reserved than tx buffers (8), since receiving + * is more time critical then sending and packets may have to remain in the + * board's memory when main memory is low. + */ + +#define TX_LOG_RING_SIZE 3 +#define RX_LOG_RING_SIZE 5 + +/* These are the derived values */ + +#define TX_RING_SIZE (1 << TX_LOG_RING_SIZE) +#define TX_RING_LEN_BITS (TX_LOG_RING_SIZE << 5) +#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) + +#define RX_RING_SIZE (1 << RX_LOG_RING_SIZE) +#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5) +#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) + +#define TX_TIMEOUT (HZ/5) + +/* The LANCE Rx and Tx ring descriptors. */ +struct lance_rx_head { + unsigned short base; /* Low word of base addr */ + volatile unsigned char flag; + unsigned char base_hi; /* High word of base addr (unused) */ + short buf_length; /* This length is 2s complement! */ + volatile short msg_length; /* This length is "normal". */ +}; + +struct lance_tx_head { + unsigned short base; /* Low word of base addr */ + volatile unsigned char flag; + unsigned char base_hi; /* High word of base addr (unused) */ + short length; /* Length is 2s complement! */ + volatile short misc; +}; + +struct ringdesc { + unsigned short adr_lo; /* Low 16 bits of address */ + unsigned char len; /* Length bits */ + unsigned char adr_hi; /* High 8 bits of address (unused) */ +}; + +/* The LANCE initialization block, described in databook. */ +struct lance_init_block { + unsigned short mode; /* Pre-set mode */ + unsigned char hwaddr[6]; /* Physical ethernet address */ + unsigned filter[2]; /* Multicast filter (unused). */ + /* Receive and transmit ring base, along with length bits. */ + struct ringdesc rx_ring; + struct ringdesc tx_ring; +}; + +/* The whole layout of the Lance shared memory */ +struct lance_memory { + struct lance_init_block init; + struct lance_tx_head tx_head[TX_RING_SIZE]; + struct lance_rx_head rx_head[RX_RING_SIZE]; + char packet_area[0]; /* packet data follow after the + * init block and the ring + * descriptors and are located + * at runtime */ +}; + +/* RieblCard specifics: + * The original TOS driver for these cards reserves the area from offset + * 0xee70 to 0xeebb for storing configuration data. Of interest to us is the + * Ethernet address there, and the magic for verifying the data's validity. + * The reserved area isn't touch by packet buffers. Furthermore, offset 0xfffe + * is reserved for the interrupt vector number. + */ +#define RIEBL_RSVD_START 0xee70 +#define RIEBL_RSVD_END 0xeec0 +#define RIEBL_MAGIC 0x09051990 +#define RIEBL_MAGIC_ADDR ((unsigned long *)(((char *)MEM) + 0xee8a)) +#define RIEBL_HWADDR_ADDR ((unsigned char *)(((char *)MEM) + 0xee8e)) +#define RIEBL_IVEC_ADDR ((unsigned short *)(((char *)MEM) + 0xfffe)) + +/* This is a default address for the old RieblCards without a battery + * that have no ethernet address at boot time. 00:00:36:04 is the + * prefix for Riebl cards, the 00:00 at the end is arbitrary. + */ + +static unsigned char OldRieblDefHwaddr[6] = { + 0x00, 0x00, 0x36, 0x04, 0x00, 0x00 +}; + + +/* I/O registers of the Lance chip */ + +struct lance_ioreg { +/* base+0x0 */ volatile unsigned short data; +/* base+0x2 */ volatile unsigned short addr; + unsigned char _dummy1[3]; +/* base+0x7 */ volatile unsigned char ivec; + unsigned char _dummy2[5]; +/* base+0xd */ volatile unsigned char eeprom; + unsigned char _dummy3; +/* base+0xf */ volatile unsigned char mem; +}; + +/* Types of boards this driver supports */ + +enum lance_type { + OLD_RIEBL, /* old Riebl card without battery */ + NEW_RIEBL, /* new Riebl card with battery */ + PAM_CARD /* PAM card with EEPROM */ +}; + +static char *lance_names[] = { + "Riebl-Card (without battery)", + "Riebl-Card (with battery)", + "PAM intern card" +}; + +/* The driver's private device structure */ + +struct lance_private { + enum lance_type cardtype; + struct lance_ioreg *iobase; + struct lance_memory *mem; + int cur_rx, cur_tx; /* The next free ring entry */ + int dirty_tx; /* Ring entries to be freed. */ + /* copy function */ + void *(*memcpy_f)( void *, const void *, size_t ); +/* This must be long for set_bit() */ + long tx_full; + spinlock_t devlock; +}; + +/* I/O register access macros */ + +#define MEM lp->mem +#define DREG IO->data +#define AREG IO->addr +#define REGA(a) (*( AREG = (a), &DREG )) + +/* Definitions for packet buffer access: */ +#define PKT_BUF_SZ 1544 +/* Get the address of a packet buffer corresponding to a given buffer head */ +#define PKTBUF_ADDR(head) (((unsigned char *)(MEM)) + (head)->base) + +/* Possible memory/IO addresses for probing */ + +static struct lance_addr { + unsigned long memaddr; + unsigned long ioaddr; + int slow_flag; +} lance_addr_list[] = { + { 0xfe010000, 0xfe00fff0, 0 }, /* RieblCard VME in TT */ + { 0xffc10000, 0xffc0fff0, 0 }, /* RieblCard VME in MegaSTE + (highest byte stripped) */ + { 0xffe00000, 0xffff7000, 1 }, /* RieblCard in ST + (highest byte stripped) */ + { 0xffd00000, 0xffff7000, 1 }, /* RieblCard in ST with hw modif. to + avoid conflict with ROM + (highest byte stripped) */ + { 0xffcf0000, 0xffcffff0, 0 }, /* PAMCard VME in TT and MSTE + (highest byte stripped) */ + { 0xfecf0000, 0xfecffff0, 0 }, /* Rhotron's PAMCard VME in TT and MSTE + (highest byte stripped) */ +}; + +#define N_LANCE_ADDR ARRAY_SIZE(lance_addr_list) + + +/* Definitions for the Lance */ + +/* tx_head flags */ +#define TMD1_ENP 0x01 /* end of packet */ +#define TMD1_STP 0x02 /* start of packet */ +#define TMD1_DEF 0x04 /* deferred */ +#define TMD1_ONE 0x08 /* one retry needed */ +#define TMD1_MORE 0x10 /* more than one retry needed */ +#define TMD1_ERR 0x40 /* error summary */ +#define TMD1_OWN 0x80 /* ownership (set: chip owns) */ + +#define TMD1_OWN_CHIP TMD1_OWN +#define TMD1_OWN_HOST 0 + +/* tx_head misc field */ +#define TMD3_TDR 0x03FF /* Time Domain Reflectometry counter */ +#define TMD3_RTRY 0x0400 /* failed after 16 retries */ +#define TMD3_LCAR 0x0800 /* carrier lost */ +#define TMD3_LCOL 0x1000 /* late collision */ +#define TMD3_UFLO 0x4000 /* underflow (late memory) */ +#define TMD3_BUFF 0x8000 /* buffering error (no ENP) */ + +/* rx_head flags */ +#define RMD1_ENP 0x01 /* end of packet */ +#define RMD1_STP 0x02 /* start of packet */ +#define RMD1_BUFF 0x04 /* buffer error */ +#define RMD1_CRC 0x08 /* CRC error */ +#define RMD1_OFLO 0x10 /* overflow */ +#define RMD1_FRAM 0x20 /* framing error */ +#define RMD1_ERR 0x40 /* error summary */ +#define RMD1_OWN 0x80 /* ownership (set: ship owns) */ + +#define RMD1_OWN_CHIP RMD1_OWN +#define RMD1_OWN_HOST 0 + +/* register names */ +#define CSR0 0 /* mode/status */ +#define CSR1 1 /* init block addr (low) */ +#define CSR2 2 /* init block addr (high) */ +#define CSR3 3 /* misc */ +#define CSR8 8 /* address filter */ +#define CSR15 15 /* promiscuous mode */ + +/* CSR0 */ +/* (R=readable, W=writeable, S=set on write, C=clear on write) */ +#define CSR0_INIT 0x0001 /* initialize (RS) */ +#define CSR0_STRT 0x0002 /* start (RS) */ +#define CSR0_STOP 0x0004 /* stop (RS) */ +#define CSR0_TDMD 0x0008 /* transmit demand (RS) */ +#define CSR0_TXON 0x0010 /* transmitter on (R) */ +#define CSR0_RXON 0x0020 /* receiver on (R) */ +#define CSR0_INEA 0x0040 /* interrupt enable (RW) */ +#define CSR0_INTR 0x0080 /* interrupt active (R) */ +#define CSR0_IDON 0x0100 /* initialization done (RC) */ +#define CSR0_TINT 0x0200 /* transmitter interrupt (RC) */ +#define CSR0_RINT 0x0400 /* receiver interrupt (RC) */ +#define CSR0_MERR 0x0800 /* memory error (RC) */ +#define CSR0_MISS 0x1000 /* missed frame (RC) */ +#define CSR0_CERR 0x2000 /* carrier error (no heartbeat :-) (RC) */ +#define CSR0_BABL 0x4000 /* babble: tx-ed too many bits (RC) */ +#define CSR0_ERR 0x8000 /* error (RC) */ + +/* CSR3 */ +#define CSR3_BCON 0x0001 /* byte control */ +#define CSR3_ACON 0x0002 /* ALE control */ +#define CSR3_BSWP 0x0004 /* byte swap (1=big endian) */ + + + +/***************************** Prototypes *****************************/ + +static unsigned long lance_probe1( struct net_device *dev, struct lance_addr + *init_rec ); +static int lance_open( struct net_device *dev ); +static void lance_init_ring( struct net_device *dev ); +static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); +static irqreturn_t lance_interrupt( int irq, void *dev_id ); +static int lance_rx( struct net_device *dev ); +static int lance_close( struct net_device *dev ); +static void set_multicast_list( struct net_device *dev ); +static int lance_set_mac_address( struct net_device *dev, void *addr ); +static void lance_tx_timeout (struct net_device *dev); + +/************************* End of Prototypes **************************/ + + + + + +static void *slow_memcpy( void *dst, const void *src, size_t len ) + +{ char *cto = dst; + const char *cfrom = src; + + while( len-- ) { + *cto++ = *cfrom++; + MFPDELAY(); + } + return dst; +} + + +struct net_device * __init atarilance_probe(int unit) +{ + int i; + static int found; + struct net_device *dev; + int err = -ENODEV; + + if (!MACH_IS_ATARI || found) + /* Assume there's only one board possible... That seems true, since + * the Riebl/PAM board's address cannot be changed. */ + return ERR_PTR(-ENODEV); + + dev = alloc_etherdev(sizeof(struct lance_private)); + if (!dev) + return ERR_PTR(-ENOMEM); + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + } + + for( i = 0; i < N_LANCE_ADDR; ++i ) { + if (lance_probe1( dev, &lance_addr_list[i] )) { + found = 1; + err = register_netdev(dev); + if (!err) + return dev; + free_irq(dev->irq, dev); + break; + } + } + free_netdev(dev); + return ERR_PTR(err); +} + + +/* Derived from hwreg_present() in atari/config.c: */ + +static noinline int __init addr_accessible(volatile void *regp, int wordflag, + int writeflag) +{ + int ret; + unsigned long flags; + long *vbr, save_berr; + + local_irq_save(flags); + + __asm__ __volatile__ ( "movec %/vbr,%0" : "=r" (vbr) : ); + save_berr = vbr[2]; + + __asm__ __volatile__ + ( "movel %/sp,%/d1\n\t" + "movel #Lberr,%2@\n\t" + "moveq #0,%0\n\t" + "tstl %3\n\t" + "bne 1f\n\t" + "moveb %1@,%/d0\n\t" + "nop \n\t" + "bra 2f\n" +"1: movew %1@,%/d0\n\t" + "nop \n" +"2: tstl %4\n\t" + "beq 2f\n\t" + "tstl %3\n\t" + "bne 1f\n\t" + "clrb %1@\n\t" + "nop \n\t" + "moveb %/d0,%1@\n\t" + "nop \n\t" + "bra 2f\n" +"1: clrw %1@\n\t" + "nop \n\t" + "movew %/d0,%1@\n\t" + "nop \n" +"2: moveq #1,%0\n" +"Lberr: movel %/d1,%/sp" + : "=&d" (ret) + : "a" (regp), "a" (&vbr[2]), "rm" (wordflag), "rm" (writeflag) + : "d0", "d1", "memory" + ); + + vbr[2] = save_berr; + local_irq_restore(flags); + + return ret; +} + +static const struct net_device_ops lance_netdev_ops = { + .ndo_open = lance_open, + .ndo_stop = lance_close, + .ndo_start_xmit = lance_start_xmit, + .ndo_set_rx_mode = set_multicast_list, + .ndo_set_mac_address = lance_set_mac_address, + .ndo_tx_timeout = lance_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static unsigned long __init lance_probe1( struct net_device *dev, + struct lance_addr *init_rec ) +{ + volatile unsigned short *memaddr = + (volatile unsigned short *)init_rec->memaddr; + volatile unsigned short *ioaddr = + (volatile unsigned short *)init_rec->ioaddr; + struct lance_private *lp; + struct lance_ioreg *IO; + int i; + static int did_version; + unsigned short save1, save2; + + PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n", + (long)memaddr, (long)ioaddr )); + + /* Test whether memory readable and writable */ + PROBE_PRINT(( "lance_probe1: testing memory to be accessible\n" )); + if (!addr_accessible( memaddr, 1, 1 )) goto probe_fail; + + /* Written values should come back... */ + PROBE_PRINT(( "lance_probe1: testing memory to be writable (1)\n" )); + save1 = *memaddr; + *memaddr = 0x0001; + if (*memaddr != 0x0001) goto probe_fail; + PROBE_PRINT(( "lance_probe1: testing memory to be writable (2)\n" )); + *memaddr = 0x0000; + if (*memaddr != 0x0000) goto probe_fail; + *memaddr = save1; + + /* First port should be readable and writable */ + PROBE_PRINT(( "lance_probe1: testing ioport to be accessible\n" )); + if (!addr_accessible( ioaddr, 1, 1 )) goto probe_fail; + + /* and written values should be readable */ + PROBE_PRINT(( "lance_probe1: testing ioport to be writeable\n" )); + save2 = ioaddr[1]; + ioaddr[1] = 0x0001; + if (ioaddr[1] != 0x0001) goto probe_fail; + + /* The CSR0_INIT bit should not be readable */ + PROBE_PRINT(( "lance_probe1: testing CSR0 register function (1)\n" )); + save1 = ioaddr[0]; + ioaddr[1] = CSR0; + ioaddr[0] = CSR0_INIT | CSR0_STOP; + if (ioaddr[0] != CSR0_STOP) { + ioaddr[0] = save1; + ioaddr[1] = save2; + goto probe_fail; + } + PROBE_PRINT(( "lance_probe1: testing CSR0 register function (2)\n" )); + ioaddr[0] = CSR0_STOP; + if (ioaddr[0] != CSR0_STOP) { + ioaddr[0] = save1; + ioaddr[1] = save2; + goto probe_fail; + } + + /* Now ok... */ + PROBE_PRINT(( "lance_probe1: Lance card detected\n" )); + goto probe_ok; + + probe_fail: + return 0; + + probe_ok: + lp = netdev_priv(dev); + MEM = (struct lance_memory *)memaddr; + IO = lp->iobase = (struct lance_ioreg *)ioaddr; + dev->base_addr = (unsigned long)ioaddr; /* informational only */ + lp->memcpy_f = init_rec->slow_flag ? slow_memcpy : memcpy; + + REGA( CSR0 ) = CSR0_STOP; + + /* Now test for type: If the eeprom I/O port is readable, it is a + * PAM card */ + if (addr_accessible( &(IO->eeprom), 0, 0 )) { + /* Switch back to Ram */ + i = IO->mem; + lp->cardtype = PAM_CARD; + } + else if (*RIEBL_MAGIC_ADDR == RIEBL_MAGIC) { + lp->cardtype = NEW_RIEBL; + } + else + lp->cardtype = OLD_RIEBL; + + if (lp->cardtype == PAM_CARD || + memaddr == (unsigned short *)0xffe00000) { + /* PAMs card and Riebl on ST use level 5 autovector */ + if (request_irq(IRQ_AUTO_5, lance_interrupt, 0, + "PAM,Riebl-ST Ethernet", dev)) { + printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 ); + return 0; + } + dev->irq = IRQ_AUTO_5; + } + else { + /* For VME-RieblCards, request a free VME int */ + unsigned int irq = atari_register_vme_int(); + if (!irq) { + printk( "Lance: request for VME interrupt failed\n" ); + return 0; + } + if (request_irq(irq, lance_interrupt, 0, "Riebl-VME Ethernet", + dev)) { + printk( "Lance: request for irq %u failed\n", irq ); + return 0; + } + dev->irq = irq; + } + + printk("%s: %s at io %#lx, mem %#lx, irq %d%s, hwaddr ", + dev->name, lance_names[lp->cardtype], + (unsigned long)ioaddr, + (unsigned long)memaddr, + dev->irq, + init_rec->slow_flag ? " (slow memcpy)" : "" ); + + /* Get the ethernet address */ + switch( lp->cardtype ) { + case OLD_RIEBL: + /* No ethernet address! (Set some default address) */ + memcpy(dev->dev_addr, OldRieblDefHwaddr, ETH_ALEN); + break; + case NEW_RIEBL: + lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN); + break; + case PAM_CARD: + i = IO->eeprom; + for( i = 0; i < 6; ++i ) + dev->dev_addr[i] = + ((((unsigned short *)MEM)[i*2] & 0x0f) << 4) | + ((((unsigned short *)MEM)[i*2+1] & 0x0f)); + i = IO->mem; + break; + } + printk("%pM\n", dev->dev_addr); + if (lp->cardtype == OLD_RIEBL) { + printk( "%s: Warning: This is a default ethernet address!\n", + dev->name ); + printk( " Use \"ifconfig hw ether ...\" to set the address.\n" ); + } + + spin_lock_init(&lp->devlock); + + MEM->init.mode = 0x0000; /* Disable Rx and Tx. */ + for( i = 0; i < 6; i++ ) + MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */ + MEM->init.filter[0] = 0x00000000; + MEM->init.filter[1] = 0x00000000; + MEM->init.rx_ring.adr_lo = offsetof( struct lance_memory, rx_head ); + MEM->init.rx_ring.adr_hi = 0; + MEM->init.rx_ring.len = RX_RING_LEN_BITS; + MEM->init.tx_ring.adr_lo = offsetof( struct lance_memory, tx_head ); + MEM->init.tx_ring.adr_hi = 0; + MEM->init.tx_ring.len = TX_RING_LEN_BITS; + + if (lp->cardtype == PAM_CARD) + IO->ivec = IRQ_SOURCE_TO_VECTOR(dev->irq); + else + *RIEBL_IVEC_ADDR = IRQ_SOURCE_TO_VECTOR(dev->irq); + + if (did_version++ == 0) + DPRINTK( 1, ( version )); + + dev->netdev_ops = &lance_netdev_ops; + + /* XXX MSch */ + dev->watchdog_timeo = TX_TIMEOUT; + + return 1; +} + + +static int lance_open( struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_ioreg *IO = lp->iobase; + int i; + + DPRINTK( 2, ( "%s: lance_open()\n", dev->name )); + + lance_init_ring(dev); + /* Re-initialize the LANCE, and start it when done. */ + + REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0); + REGA( CSR2 ) = 0; + REGA( CSR1 ) = 0; + REGA( CSR0 ) = CSR0_INIT; + /* From now on, AREG is kept to point to CSR0 */ + + i = 1000000; + while (--i > 0) + if (DREG & CSR0_IDON) + break; + if (i <= 0 || (DREG & CSR0_ERR)) { + DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n", + dev->name, i, DREG )); + DREG = CSR0_STOP; + return -EIO; + } + DREG = CSR0_IDON; + DREG = CSR0_STRT; + DREG = CSR0_INEA; + + netif_start_queue (dev); + + DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG )); + + return 0; +} + + +/* Initialize the LANCE Rx and Tx rings. */ + +static void lance_init_ring( struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + int i; + unsigned offset; + + lp->tx_full = 0; + lp->cur_rx = lp->cur_tx = 0; + lp->dirty_tx = 0; + + offset = offsetof( struct lance_memory, packet_area ); + +/* If the packet buffer at offset 'o' would conflict with the reserved area + * of RieblCards, advance it */ +#define CHECK_OFFSET(o) \ + do { \ + if (lp->cardtype == OLD_RIEBL || lp->cardtype == NEW_RIEBL) { \ + if (((o) < RIEBL_RSVD_START) ? (o)+PKT_BUF_SZ > RIEBL_RSVD_START \ + : (o) < RIEBL_RSVD_END) \ + (o) = RIEBL_RSVD_END; \ + } \ + } while(0) + + for( i = 0; i < TX_RING_SIZE; i++ ) { + CHECK_OFFSET(offset); + MEM->tx_head[i].base = offset; + MEM->tx_head[i].flag = TMD1_OWN_HOST; + MEM->tx_head[i].base_hi = 0; + MEM->tx_head[i].length = 0; + MEM->tx_head[i].misc = 0; + offset += PKT_BUF_SZ; + } + + for( i = 0; i < RX_RING_SIZE; i++ ) { + CHECK_OFFSET(offset); + MEM->rx_head[i].base = offset; + MEM->rx_head[i].flag = TMD1_OWN_CHIP; + MEM->rx_head[i].base_hi = 0; + MEM->rx_head[i].buf_length = -PKT_BUF_SZ; + MEM->rx_head[i].msg_length = 0; + offset += PKT_BUF_SZ; + } +} + + +/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ + + +static void lance_tx_timeout (struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_ioreg *IO = lp->iobase; + + AREG = CSR0; + DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n", + dev->name, DREG )); + DREG = CSR0_STOP; + /* + * Always set BSWP after a STOP as STOP puts it back into + * little endian mode. + */ + REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0); + dev->stats.tx_errors++; +#ifndef final_version + { int i; + DPRINTK( 2, ( "Ring data: dirty_tx %d cur_tx %d%s cur_rx %d\n", + lp->dirty_tx, lp->cur_tx, + lp->tx_full ? " (full)" : "", + lp->cur_rx )); + for( i = 0 ; i < RX_RING_SIZE; i++ ) + DPRINTK( 2, ( "rx #%d: base=%04x blen=%04x mlen=%04x\n", + i, MEM->rx_head[i].base, + -MEM->rx_head[i].buf_length, + MEM->rx_head[i].msg_length )); + for( i = 0 ; i < TX_RING_SIZE; i++ ) + DPRINTK( 2, ( "tx #%d: base=%04x len=%04x misc=%04x\n", + i, MEM->tx_head[i].base, + -MEM->tx_head[i].length, + MEM->tx_head[i].misc )); + } +#endif + /* XXX MSch: maybe purge/reinit ring here */ + /* lance_restart, essentially */ + lance_init_ring(dev); + REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT; + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); +} + +/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ + +static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_ioreg *IO = lp->iobase; + int entry, len; + struct lance_tx_head *head; + unsigned long flags; + + DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n", + dev->name, DREG )); + + + /* The old LANCE chips doesn't automatically pad buffers to min. size. */ + len = skb->len; + if (len < ETH_ZLEN) + len = ETH_ZLEN; + /* PAM-Card has a bug: Can only send packets with even number of bytes! */ + else if (lp->cardtype == PAM_CARD && (len & 1)) + ++len; + + if (len > skb->len) { + if (skb_padto(skb, len)) + return NETDEV_TX_OK; + } + + netif_stop_queue (dev); + + /* Fill in a Tx ring entry */ + if (lance_debug >= 3) { + printk( "%s: TX pkt type 0x%04x from %pM to %pM" + " data at 0x%08x len %d\n", + dev->name, ((u_short *)skb->data)[6], + &skb->data[6], skb->data, + (int)skb->data, (int)skb->len ); + } + + /* We're not prepared for the int until the last flags are set/reset. And + * the int may happen already after setting the OWN_CHIP... */ + spin_lock_irqsave (&lp->devlock, flags); + + /* Mask to ring buffer boundary. */ + entry = lp->cur_tx & TX_RING_MOD_MASK; + head = &(MEM->tx_head[entry]); + + /* Caution: the write order is important here, set the "ownership" bits + * last. + */ + + + head->length = -len; + head->misc = 0; + lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len ); + head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; + dev->stats.tx_bytes += skb->len; + dev_kfree_skb( skb ); + lp->cur_tx++; + while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) { + lp->cur_tx -= TX_RING_SIZE; + lp->dirty_tx -= TX_RING_SIZE; + } + + /* Trigger an immediate send poll. */ + DREG = CSR0_INEA | CSR0_TDMD; + + if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) == + TMD1_OWN_HOST) + netif_start_queue (dev); + else + lp->tx_full = 1; + spin_unlock_irqrestore (&lp->devlock, flags); + + return NETDEV_TX_OK; +} + +/* The LANCE interrupt handler. */ + +static irqreturn_t lance_interrupt( int irq, void *dev_id ) +{ + struct net_device *dev = dev_id; + struct lance_private *lp; + struct lance_ioreg *IO; + int csr0, boguscnt = 10; + int handled = 0; + + if (dev == NULL) { + DPRINTK( 1, ( "lance_interrupt(): interrupt for unknown device.\n" )); + return IRQ_NONE; + } + + lp = netdev_priv(dev); + IO = lp->iobase; + spin_lock (&lp->devlock); + + AREG = CSR0; + + while( ((csr0 = DREG) & (CSR0_ERR | CSR0_TINT | CSR0_RINT)) && + --boguscnt >= 0) { + handled = 1; + /* Acknowledge all of the current interrupt sources ASAP. */ + DREG = csr0 & ~(CSR0_INIT | CSR0_STRT | CSR0_STOP | + CSR0_TDMD | CSR0_INEA); + + DPRINTK( 2, ( "%s: interrupt csr0=%04x new csr=%04x.\n", + dev->name, csr0, DREG )); + + if (csr0 & CSR0_RINT) /* Rx interrupt */ + lance_rx( dev ); + + if (csr0 & CSR0_TINT) { /* Tx-done interrupt */ + int dirty_tx = lp->dirty_tx; + + while( dirty_tx < lp->cur_tx) { + int entry = dirty_tx & TX_RING_MOD_MASK; + int status = MEM->tx_head[entry].flag; + + if (status & TMD1_OWN_CHIP) + break; /* It still hasn't been Txed */ + + MEM->tx_head[entry].flag = 0; + + if (status & TMD1_ERR) { + /* There was an major error, log it. */ + int err_status = MEM->tx_head[entry].misc; + dev->stats.tx_errors++; + if (err_status & TMD3_RTRY) dev->stats.tx_aborted_errors++; + if (err_status & TMD3_LCAR) dev->stats.tx_carrier_errors++; + if (err_status & TMD3_LCOL) dev->stats.tx_window_errors++; + if (err_status & TMD3_UFLO) { + /* Ackk! On FIFO errors the Tx unit is turned off! */ + dev->stats.tx_fifo_errors++; + /* Remove this verbosity later! */ + DPRINTK( 1, ( "%s: Tx FIFO error! Status %04x\n", + dev->name, csr0 )); + /* Restart the chip. */ + DREG = CSR0_STRT; + } + } else { + if (status & (TMD1_MORE | TMD1_ONE | TMD1_DEF)) + dev->stats.collisions++; + dev->stats.tx_packets++; + } + + /* XXX MSch: free skb?? */ + dirty_tx++; + } + +#ifndef final_version + if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { + DPRINTK( 0, ( "out-of-sync dirty pointer," + " %d vs. %d, full=%ld.\n", + dirty_tx, lp->cur_tx, lp->tx_full )); + dirty_tx += TX_RING_SIZE; + } +#endif + + if (lp->tx_full && (netif_queue_stopped(dev)) && + dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) { + /* The ring is no longer full, clear tbusy. */ + lp->tx_full = 0; + netif_wake_queue (dev); + } + + lp->dirty_tx = dirty_tx; + } + + /* Log misc errors. */ + if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */ + if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */ + if (csr0 & CSR0_MERR) { + DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), " + "status %04x.\n", dev->name, csr0 )); + /* Restart the chip. */ + DREG = CSR0_STRT; + } + } + + /* Clear any other interrupt, and set interrupt enable. */ + DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR | + CSR0_IDON | CSR0_INEA; + + DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n", + dev->name, DREG )); + + spin_unlock (&lp->devlock); + return IRQ_RETVAL(handled); +} + + +static int lance_rx( struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + int entry = lp->cur_rx & RX_RING_MOD_MASK; + int i; + + DPRINTK( 2, ( "%s: rx int, flag=%04x\n", dev->name, + MEM->rx_head[entry].flag )); + + /* If we own the next entry, it's a new packet. Send it up. */ + while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) { + struct lance_rx_head *head = &(MEM->rx_head[entry]); + int status = head->flag; + + if (status != (RMD1_ENP|RMD1_STP)) { /* There was an error. */ + /* There is a tricky error noted by John Murphy, + to Russ Nelson: Even with full-sized + buffers it's possible for a jabber packet to use two + buffers, with only the last correctly noting the error. */ + if (status & RMD1_ENP) /* Only count a general error at the */ + dev->stats.rx_errors++; /* end of a packet.*/ + if (status & RMD1_FRAM) dev->stats.rx_frame_errors++; + if (status & RMD1_OFLO) dev->stats.rx_over_errors++; + if (status & RMD1_CRC) dev->stats.rx_crc_errors++; + if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++; + head->flag &= (RMD1_ENP|RMD1_STP); + } else { + /* Malloc up new buffer, compatible with net-3. */ + short pkt_len = head->msg_length & 0xfff; + struct sk_buff *skb; + + if (pkt_len < 60) { + printk( "%s: Runt packet!\n", dev->name ); + dev->stats.rx_errors++; + } + else { + skb = netdev_alloc_skb(dev, pkt_len + 2); + if (skb == NULL) { + for( i = 0; i < RX_RING_SIZE; i++ ) + if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag & + RMD1_OWN_CHIP) + break; + + if (i > RX_RING_SIZE - 2) { + dev->stats.rx_dropped++; + head->flag |= RMD1_OWN_CHIP; + lp->cur_rx++; + } + break; + } + + if (lance_debug >= 3) { + u_char *data = PKTBUF_ADDR(head); + + printk(KERN_DEBUG "%s: RX pkt type 0x%04x from %pM to %pM " + "data %02x %02x %02x %02x %02x %02x %02x %02x " + "len %d\n", + dev->name, ((u_short *)data)[6], + &data[6], data, + data[15], data[16], data[17], data[18], + data[19], data[20], data[21], data[22], + pkt_len); + } + + skb_reserve( skb, 2 ); /* 16 byte align */ + skb_put( skb, pkt_len ); /* Make room */ + lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len ); + skb->protocol = eth_type_trans( skb, dev ); + netif_rx( skb ); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + } + + head->flag |= RMD1_OWN_CHIP; + entry = (++lp->cur_rx) & RX_RING_MOD_MASK; + } + lp->cur_rx &= RX_RING_MOD_MASK; + + /* From lance.c (Donald Becker): */ + /* We should check that at least two ring entries are free. If not, + we should free one and mark stats->rx_dropped++. */ + + return 0; +} + + +static int lance_close( struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_ioreg *IO = lp->iobase; + + netif_stop_queue (dev); + + AREG = CSR0; + + DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n", + dev->name, DREG )); + + /* We stop the LANCE here -- it occasionally polls + memory if we don't. */ + DREG = CSR0_STOP; + + return 0; +} + + +/* Set or clear the multicast filter for this adaptor. + num_addrs == -1 Promiscuous mode, receive all packets + num_addrs == 0 Normal mode, clear multicast list + num_addrs > 0 Multicast mode, receive normal and MC packets, and do + best-effort filtering. + */ + +static void set_multicast_list( struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_ioreg *IO = lp->iobase; + + if (netif_running(dev)) + /* Only possible if board is already started */ + return; + + /* We take the simple way out and always enable promiscuous mode. */ + DREG = CSR0_STOP; /* Temporarily stop the lance. */ + + if (dev->flags & IFF_PROMISC) { + /* Log any net taps. */ + DPRINTK( 2, ( "%s: Promiscuous mode enabled.\n", dev->name )); + REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */ + } else { + short multicast_table[4]; + int num_addrs = netdev_mc_count(dev); + int i; + /* We don't use the multicast table, but rely on upper-layer + * filtering. */ + memset( multicast_table, (num_addrs == 0) ? 0 : -1, + sizeof(multicast_table) ); + for( i = 0; i < 4; i++ ) + REGA( CSR8+i ) = multicast_table[i]; + REGA( CSR15 ) = 0; /* Unset promiscuous mode */ + } + + /* + * Always set BSWP after a STOP as STOP puts it back into + * little endian mode. + */ + REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0); + + /* Resume normal operation and reset AREG to CSR0 */ + REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT; +} + + +/* This is needed for old RieblCards and possible for new RieblCards */ + +static int lance_set_mac_address( struct net_device *dev, void *addr ) +{ + struct lance_private *lp = netdev_priv(dev); + struct sockaddr *saddr = addr; + int i; + + if (lp->cardtype != OLD_RIEBL && lp->cardtype != NEW_RIEBL) + return -EOPNOTSUPP; + + if (netif_running(dev)) { + /* Only possible while card isn't started */ + DPRINTK( 1, ( "%s: hwaddr can be set only while card isn't open.\n", + dev->name )); + return -EIO; + } + + memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len ); + for( i = 0; i < 6; i++ ) + MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */ + lp->memcpy_f( RIEBL_HWADDR_ADDR, dev->dev_addr, 6 ); + /* set also the magic for future sessions */ + *RIEBL_MAGIC_ADDR = RIEBL_MAGIC; + + return 0; +} + + +#ifdef MODULE +static struct net_device *atarilance_dev; + +static int __init atarilance_module_init(void) +{ + atarilance_dev = atarilance_probe(-1); + return PTR_ERR_OR_ZERO(atarilance_dev); +} + +static void __exit atarilance_module_exit(void) +{ + unregister_netdev(atarilance_dev); + free_irq(atarilance_dev->irq, atarilance_dev); + free_netdev(atarilance_dev); +} +module_init(atarilance_module_init); +module_exit(atarilance_module_exit); +#endif /* MODULE */ + + +/* + * Local variables: + * c-indent-level: 4 + * tab-width: 4 + * End: + */ diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c new file mode 100644 index 000000000..cb367cc59 --- /dev/null +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -0,0 +1,1469 @@ +/* + * + * Alchemy Au1x00 ethernet driver + * + * Copyright 2001-2003, 2006 MontaVista Software Inc. + * Copyright 2002 TimeSys Corp. + * Added ethtool/mii-tool support, + * Copyright 2004 Matt Porter + * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de + * or riemer@riemer-nt.de: fixed the link beat detection with + * ioctls (SIOCGMIIPHY) + * Copyright 2006 Herbert Valerio Riedel + * converted to use linux-2.6.x's PHY framework + * + * Author: MontaVista Software, Inc. + * ppopov@mvista.com or source@mvista.com + * + * ######################################################################## + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * ######################################################################## + * + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include "au1000_eth.h" + +#ifdef AU1000_ETH_DEBUG +static int au1000_debug = 5; +#else +static int au1000_debug = 3; +#endif + +#define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + +#define DRV_NAME "au1000_eth" +#define DRV_VERSION "1.7" +#define DRV_AUTHOR "Pete Popov " +#define DRV_DESC "Au1xxx on-chip Ethernet driver" + +MODULE_AUTHOR(DRV_AUTHOR); +MODULE_DESCRIPTION(DRV_DESC); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/* AU1000 MAC registers and bits */ +#define MAC_CONTROL 0x0 +# define MAC_RX_ENABLE (1 << 2) +# define MAC_TX_ENABLE (1 << 3) +# define MAC_DEF_CHECK (1 << 5) +# define MAC_SET_BL(X) (((X) & 0x3) << 6) +# define MAC_AUTO_PAD (1 << 8) +# define MAC_DISABLE_RETRY (1 << 10) +# define MAC_DISABLE_BCAST (1 << 11) +# define MAC_LATE_COL (1 << 12) +# define MAC_HASH_MODE (1 << 13) +# define MAC_HASH_ONLY (1 << 15) +# define MAC_PASS_ALL (1 << 16) +# define MAC_INVERSE_FILTER (1 << 17) +# define MAC_PROMISCUOUS (1 << 18) +# define MAC_PASS_ALL_MULTI (1 << 19) +# define MAC_FULL_DUPLEX (1 << 20) +# define MAC_NORMAL_MODE 0 +# define MAC_INT_LOOPBACK (1 << 21) +# define MAC_EXT_LOOPBACK (1 << 22) +# define MAC_DISABLE_RX_OWN (1 << 23) +# define MAC_BIG_ENDIAN (1 << 30) +# define MAC_RX_ALL (1 << 31) +#define MAC_ADDRESS_HIGH 0x4 +#define MAC_ADDRESS_LOW 0x8 +#define MAC_MCAST_HIGH 0xC +#define MAC_MCAST_LOW 0x10 +#define MAC_MII_CNTRL 0x14 +# define MAC_MII_BUSY (1 << 0) +# define MAC_MII_READ 0 +# define MAC_MII_WRITE (1 << 1) +# define MAC_SET_MII_SELECT_REG(X) (((X) & 0x1f) << 6) +# define MAC_SET_MII_SELECT_PHY(X) (((X) & 0x1f) << 11) +#define MAC_MII_DATA 0x18 +#define MAC_FLOW_CNTRL 0x1C +# define MAC_FLOW_CNTRL_BUSY (1 << 0) +# define MAC_FLOW_CNTRL_ENABLE (1 << 1) +# define MAC_PASS_CONTROL (1 << 2) +# define MAC_SET_PAUSE(X) (((X) & 0xffff) << 16) +#define MAC_VLAN1_TAG 0x20 +#define MAC_VLAN2_TAG 0x24 + +/* Ethernet Controller Enable */ +# define MAC_EN_CLOCK_ENABLE (1 << 0) +# define MAC_EN_RESET0 (1 << 1) +# define MAC_EN_TOSS (0 << 2) +# define MAC_EN_CACHEABLE (1 << 3) +# define MAC_EN_RESET1 (1 << 4) +# define MAC_EN_RESET2 (1 << 5) +# define MAC_DMA_RESET (1 << 6) + +/* Ethernet Controller DMA Channels */ +/* offsets from MAC_TX_RING_ADDR address */ +#define MAC_TX_BUFF0_STATUS 0x0 +# define TX_FRAME_ABORTED (1 << 0) +# define TX_JAB_TIMEOUT (1 << 1) +# define TX_NO_CARRIER (1 << 2) +# define TX_LOSS_CARRIER (1 << 3) +# define TX_EXC_DEF (1 << 4) +# define TX_LATE_COLL_ABORT (1 << 5) +# define TX_EXC_COLL (1 << 6) +# define TX_UNDERRUN (1 << 7) +# define TX_DEFERRED (1 << 8) +# define TX_LATE_COLL (1 << 9) +# define TX_COLL_CNT_MASK (0xF << 10) +# define TX_PKT_RETRY (1 << 31) +#define MAC_TX_BUFF0_ADDR 0x4 +# define TX_DMA_ENABLE (1 << 0) +# define TX_T_DONE (1 << 1) +# define TX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3) +#define MAC_TX_BUFF0_LEN 0x8 +#define MAC_TX_BUFF1_STATUS 0x10 +#define MAC_TX_BUFF1_ADDR 0x14 +#define MAC_TX_BUFF1_LEN 0x18 +#define MAC_TX_BUFF2_STATUS 0x20 +#define MAC_TX_BUFF2_ADDR 0x24 +#define MAC_TX_BUFF2_LEN 0x28 +#define MAC_TX_BUFF3_STATUS 0x30 +#define MAC_TX_BUFF3_ADDR 0x34 +#define MAC_TX_BUFF3_LEN 0x38 + +/* offsets from MAC_RX_RING_ADDR */ +#define MAC_RX_BUFF0_STATUS 0x0 +# define RX_FRAME_LEN_MASK 0x3fff +# define RX_WDOG_TIMER (1 << 14) +# define RX_RUNT (1 << 15) +# define RX_OVERLEN (1 << 16) +# define RX_COLL (1 << 17) +# define RX_ETHER (1 << 18) +# define RX_MII_ERROR (1 << 19) +# define RX_DRIBBLING (1 << 20) +# define RX_CRC_ERROR (1 << 21) +# define RX_VLAN1 (1 << 22) +# define RX_VLAN2 (1 << 23) +# define RX_LEN_ERROR (1 << 24) +# define RX_CNTRL_FRAME (1 << 25) +# define RX_U_CNTRL_FRAME (1 << 26) +# define RX_MCAST_FRAME (1 << 27) +# define RX_BCAST_FRAME (1 << 28) +# define RX_FILTER_FAIL (1 << 29) +# define RX_PACKET_FILTER (1 << 30) +# define RX_MISSED_FRAME (1 << 31) + +# define RX_ERROR (RX_WDOG_TIMER | RX_RUNT | RX_OVERLEN | \ + RX_COLL | RX_MII_ERROR | RX_CRC_ERROR | \ + RX_LEN_ERROR | RX_U_CNTRL_FRAME | RX_MISSED_FRAME) +#define MAC_RX_BUFF0_ADDR 0x4 +# define RX_DMA_ENABLE (1 << 0) +# define RX_T_DONE (1 << 1) +# define RX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3) +# define RX_SET_BUFF_ADDR(X) ((X) & 0xffffffc0) +#define MAC_RX_BUFF1_STATUS 0x10 +#define MAC_RX_BUFF1_ADDR 0x14 +#define MAC_RX_BUFF2_STATUS 0x20 +#define MAC_RX_BUFF2_ADDR 0x24 +#define MAC_RX_BUFF3_STATUS 0x30 +#define MAC_RX_BUFF3_ADDR 0x34 + +/* + * Theory of operation + * + * The Au1000 MACs use a simple rx and tx descriptor ring scheme. + * There are four receive and four transmit descriptors. These + * descriptors are not in memory; rather, they are just a set of + * hardware registers. + * + * Since the Au1000 has a coherent data cache, the receive and + * transmit buffers are allocated from the KSEG0 segment. The + * hardware registers, however, are still mapped at KSEG1 to + * make sure there's no out-of-order writes, and that all writes + * complete immediately. + */ + +/* + * board-specific configurations + * + * PHY detection algorithm + * + * If phy_static_config is undefined, the PHY setup is + * autodetected: + * + * mii_probe() first searches the current MAC's MII bus for a PHY, + * selecting the first (or last, if phy_search_highest_addr is + * defined) PHY address not already claimed by another netdev. + * + * If nothing was found that way when searching for the 2nd ethernet + * controller's PHY and phy1_search_mac0 is defined, then + * the first MII bus is searched as well for an unclaimed PHY; this is + * needed in case of a dual-PHY accessible only through the MAC0's MII + * bus. + * + * Finally, if no PHY is found, then the corresponding ethernet + * controller is not registered to the network subsystem. + */ + +/* autodetection defaults: phy1_search_mac0 */ + +/* static PHY setup + * + * most boards PHY setup should be detectable properly with the + * autodetection algorithm in mii_probe(), but in some cases (e.g. if + * you have a switch attached, or want to use the PHY's interrupt + * notification capabilities) you can provide a static PHY + * configuration here + * + * IRQs may only be set, if a PHY address was configured + * If a PHY address is given, also a bus id is required to be set + * + * ps: make sure the used irqs are configured properly in the board + * specific irq-map + */ + +static void au1000_enable_mac(struct net_device *dev, int force_reset) +{ + unsigned long flags; + struct au1000_private *aup = netdev_priv(dev); + + spin_lock_irqsave(&aup->lock, flags); + + if (force_reset || (!aup->mac_enabled)) { + writel(MAC_EN_CLOCK_ENABLE, aup->enable); + wmb(); /* drain writebuffer */ + mdelay(2); + writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 + | MAC_EN_CLOCK_ENABLE), aup->enable); + wmb(); /* drain writebuffer */ + mdelay(2); + + aup->mac_enabled = 1; + } + + spin_unlock_irqrestore(&aup->lock, flags); +} + +/* + * MII operations + */ +static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg) +{ + struct au1000_private *aup = netdev_priv(dev); + u32 *const mii_control_reg = &aup->mac->mii_control; + u32 *const mii_data_reg = &aup->mac->mii_data; + u32 timedout = 20; + u32 mii_control; + + while (readl(mii_control_reg) & MAC_MII_BUSY) { + mdelay(1); + if (--timedout == 0) { + netdev_err(dev, "read_MII busy timeout!!\n"); + return -1; + } + } + + mii_control = MAC_SET_MII_SELECT_REG(reg) | + MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ; + + writel(mii_control, mii_control_reg); + + timedout = 20; + while (readl(mii_control_reg) & MAC_MII_BUSY) { + mdelay(1); + if (--timedout == 0) { + netdev_err(dev, "mdio_read busy timeout!!\n"); + return -1; + } + } + return readl(mii_data_reg); +} + +static void au1000_mdio_write(struct net_device *dev, int phy_addr, + int reg, u16 value) +{ + struct au1000_private *aup = netdev_priv(dev); + u32 *const mii_control_reg = &aup->mac->mii_control; + u32 *const mii_data_reg = &aup->mac->mii_data; + u32 timedout = 20; + u32 mii_control; + + while (readl(mii_control_reg) & MAC_MII_BUSY) { + mdelay(1); + if (--timedout == 0) { + netdev_err(dev, "mdio_write busy timeout!!\n"); + return; + } + } + + mii_control = MAC_SET_MII_SELECT_REG(reg) | + MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE; + + writel(value, mii_data_reg); + writel(mii_control, mii_control_reg); +} + +static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) +{ + /* WARNING: bus->phy_map[phy_addr].attached_dev == dev does + * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) + */ + struct net_device *const dev = bus->priv; + + /* make sure the MAC associated with this + * mii_bus is enabled + */ + au1000_enable_mac(dev, 0); + + return au1000_mdio_read(dev, phy_addr, regnum); +} + +static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum, + u16 value) +{ + struct net_device *const dev = bus->priv; + + /* make sure the MAC associated with this + * mii_bus is enabled + */ + au1000_enable_mac(dev, 0); + + au1000_mdio_write(dev, phy_addr, regnum, value); + return 0; +} + +static int au1000_mdiobus_reset(struct mii_bus *bus) +{ + struct net_device *const dev = bus->priv; + + /* make sure the MAC associated with this + * mii_bus is enabled + */ + au1000_enable_mac(dev, 0); + + return 0; +} + +static void au1000_hard_stop(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + u32 reg; + + netif_dbg(aup, drv, dev, "hard stop\n"); + + reg = readl(&aup->mac->control); + reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE); + writel(reg, &aup->mac->control); + wmb(); /* drain writebuffer */ + mdelay(10); +} + +static void au1000_enable_rx_tx(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + u32 reg; + + netif_dbg(aup, hw, dev, "enable_rx_tx\n"); + + reg = readl(&aup->mac->control); + reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE); + writel(reg, &aup->mac->control); + wmb(); /* drain writebuffer */ + mdelay(10); +} + +static void +au1000_adjust_link(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + struct phy_device *phydev = aup->phy_dev; + unsigned long flags; + u32 reg; + + int status_change = 0; + + BUG_ON(!aup->phy_dev); + + spin_lock_irqsave(&aup->lock, flags); + + if (phydev->link && (aup->old_speed != phydev->speed)) { + /* speed changed */ + + switch (phydev->speed) { + case SPEED_10: + case SPEED_100: + break; + default: + netdev_warn(dev, "Speed (%d) is not 10/100 ???\n", + phydev->speed); + break; + } + + aup->old_speed = phydev->speed; + + status_change = 1; + } + + if (phydev->link && (aup->old_duplex != phydev->duplex)) { + /* duplex mode changed */ + + /* switching duplex mode requires to disable rx and tx! */ + au1000_hard_stop(dev); + + reg = readl(&aup->mac->control); + if (DUPLEX_FULL == phydev->duplex) { + reg |= MAC_FULL_DUPLEX; + reg &= ~MAC_DISABLE_RX_OWN; + } else { + reg &= ~MAC_FULL_DUPLEX; + reg |= MAC_DISABLE_RX_OWN; + } + writel(reg, &aup->mac->control); + wmb(); /* drain writebuffer */ + mdelay(1); + + au1000_enable_rx_tx(dev); + aup->old_duplex = phydev->duplex; + + status_change = 1; + } + + if (phydev->link != aup->old_link) { + /* link state changed */ + + if (!phydev->link) { + /* link went down */ + aup->old_speed = 0; + aup->old_duplex = -1; + } + + aup->old_link = phydev->link; + status_change = 1; + } + + spin_unlock_irqrestore(&aup->lock, flags); + + if (status_change) { + if (phydev->link) + netdev_info(dev, "link up (%d/%s)\n", + phydev->speed, + DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); + else + netdev_info(dev, "link down\n"); + } +} + +static int au1000_mii_probe(struct net_device *dev) +{ + struct au1000_private *const aup = netdev_priv(dev); + struct phy_device *phydev = NULL; + int phy_addr; + + if (aup->phy_static_config) { + BUG_ON(aup->mac_id < 0 || aup->mac_id > 1); + + if (aup->phy_addr) + phydev = aup->mii_bus->phy_map[aup->phy_addr]; + else + netdev_info(dev, "using PHY-less setup\n"); + return 0; + } + + /* find the first (lowest address) PHY + * on the current MAC's MII bus + */ + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) + if (aup->mii_bus->phy_map[phy_addr]) { + phydev = aup->mii_bus->phy_map[phy_addr]; + if (!aup->phy_search_highest_addr) + /* break out with first one found */ + break; + } + + if (aup->phy1_search_mac0) { + /* try harder to find a PHY */ + if (!phydev && (aup->mac_id == 1)) { + /* no PHY found, maybe we have a dual PHY? */ + dev_info(&dev->dev, ": no PHY found on MAC1, " + "let's see if it's attached to MAC0...\n"); + + /* find the first (lowest address) non-attached + * PHY on the MAC0 MII bus + */ + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { + struct phy_device *const tmp_phydev = + aup->mii_bus->phy_map[phy_addr]; + + if (aup->mac_id == 1) + break; + + /* no PHY here... */ + if (!tmp_phydev) + continue; + + /* already claimed by MAC0 */ + if (tmp_phydev->attached_dev) + continue; + + phydev = tmp_phydev; + break; /* found it */ + } + } + } + + if (!phydev) { + netdev_err(dev, "no PHY found\n"); + return -1; + } + + /* now we are supposed to have a proper phydev, to attach to... */ + BUG_ON(phydev->attached_dev); + + phydev = phy_connect(dev, dev_name(&phydev->dev), + &au1000_adjust_link, PHY_INTERFACE_MODE_MII); + + if (IS_ERR(phydev)) { + netdev_err(dev, "Could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + /* mask with MAC supported features */ + phydev->supported &= (SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full + | SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full + | SUPPORTED_Autoneg + /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */ + | SUPPORTED_MII + | SUPPORTED_TP); + + phydev->advertising = phydev->supported; + + aup->old_link = 0; + aup->old_speed = 0; + aup->old_duplex = -1; + aup->phy_dev = phydev; + + netdev_info(dev, "attached PHY driver [%s] " + "(mii_bus:phy_addr=%s, irq=%d)\n", + phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + + return 0; +} + + +/* + * Buffer allocation/deallocation routines. The buffer descriptor returned + * has the virtual and dma address of a buffer suitable for + * both, receive and transmit operations. + */ +static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup) +{ + struct db_dest *pDB; + pDB = aup->pDBfree; + + if (pDB) + aup->pDBfree = pDB->pnext; + + return pDB; +} + +void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB) +{ + struct db_dest *pDBfree = aup->pDBfree; + if (pDBfree) + pDBfree->pnext = pDB; + aup->pDBfree = pDB; +} + +static void au1000_reset_mac_unlocked(struct net_device *dev) +{ + struct au1000_private *const aup = netdev_priv(dev); + int i; + + au1000_hard_stop(dev); + + writel(MAC_EN_CLOCK_ENABLE, aup->enable); + wmb(); /* drain writebuffer */ + mdelay(2); + writel(0, aup->enable); + wmb(); /* drain writebuffer */ + mdelay(2); + + aup->tx_full = 0; + for (i = 0; i < NUM_RX_DMA; i++) { + /* reset control bits */ + aup->rx_dma_ring[i]->buff_stat &= ~0xf; + } + for (i = 0; i < NUM_TX_DMA; i++) { + /* reset control bits */ + aup->tx_dma_ring[i]->buff_stat &= ~0xf; + } + + aup->mac_enabled = 0; + +} + +static void au1000_reset_mac(struct net_device *dev) +{ + struct au1000_private *const aup = netdev_priv(dev); + unsigned long flags; + + netif_dbg(aup, hw, dev, "reset mac, aup %x\n", + (unsigned)aup); + + spin_lock_irqsave(&aup->lock, flags); + + au1000_reset_mac_unlocked(dev); + + spin_unlock_irqrestore(&aup->lock, flags); +} + +/* + * Setup the receive and transmit "rings". These pointers are the addresses + * of the rx and tx MAC DMA registers so they are fixed by the hardware -- + * these are not descriptors sitting in memory. + */ +static void +au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base) +{ + int i; + + for (i = 0; i < NUM_RX_DMA; i++) { + aup->rx_dma_ring[i] = (struct rx_dma *) + (tx_base + 0x100 + sizeof(struct rx_dma) * i); + } + for (i = 0; i < NUM_TX_DMA; i++) { + aup->tx_dma_ring[i] = (struct tx_dma *) + (tx_base + sizeof(struct tx_dma) * i); + } +} + +/* + * ethtool operations + */ + +static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct au1000_private *aup = netdev_priv(dev); + + if (aup->phy_dev) + return phy_ethtool_gset(aup->phy_dev, cmd); + + return -EINVAL; +} + +static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct au1000_private *aup = netdev_priv(dev); + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (aup->phy_dev) + return phy_ethtool_sset(aup->phy_dev, cmd); + + return -EINVAL; +} + +static void +au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct au1000_private *aup = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME, + aup->mac_id); + info->regdump_len = 0; +} + +static void au1000_set_msglevel(struct net_device *dev, u32 value) +{ + struct au1000_private *aup = netdev_priv(dev); + aup->msg_enable = value; +} + +static u32 au1000_get_msglevel(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + return aup->msg_enable; +} + +static const struct ethtool_ops au1000_ethtool_ops = { + .get_settings = au1000_get_settings, + .set_settings = au1000_set_settings, + .get_drvinfo = au1000_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = au1000_get_msglevel, + .set_msglevel = au1000_set_msglevel, +}; + + +/* + * Initialize the interface. + * + * When the device powers up, the clocks are disabled and the + * mac is in reset state. When the interface is closed, we + * do the same -- reset the device and disable the clocks to + * conserve power. Thus, whenever au1000_init() is called, + * the device should already be in reset state. + */ +static int au1000_init(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + unsigned long flags; + int i; + u32 control; + + netif_dbg(aup, hw, dev, "au1000_init\n"); + + /* bring the device out of reset */ + au1000_enable_mac(dev, 1); + + spin_lock_irqsave(&aup->lock, flags); + + writel(0, &aup->mac->control); + aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2; + aup->tx_tail = aup->tx_head; + aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2; + + writel(dev->dev_addr[5]<<8 | dev->dev_addr[4], + &aup->mac->mac_addr_high); + writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 | + dev->dev_addr[1]<<8 | dev->dev_addr[0], + &aup->mac->mac_addr_low); + + + for (i = 0; i < NUM_RX_DMA; i++) + aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE; + + wmb(); /* drain writebuffer */ + + control = MAC_RX_ENABLE | MAC_TX_ENABLE; +#ifndef CONFIG_CPU_LITTLE_ENDIAN + control |= MAC_BIG_ENDIAN; +#endif + if (aup->phy_dev) { + if (aup->phy_dev->link && (DUPLEX_FULL == aup->phy_dev->duplex)) + control |= MAC_FULL_DUPLEX; + else + control |= MAC_DISABLE_RX_OWN; + } else { /* PHY-less op, assume full-duplex */ + control |= MAC_FULL_DUPLEX; + } + + writel(control, &aup->mac->control); + writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */ + wmb(); /* drain writebuffer */ + + spin_unlock_irqrestore(&aup->lock, flags); + return 0; +} + +static inline void au1000_update_rx_stats(struct net_device *dev, u32 status) +{ + struct net_device_stats *ps = &dev->stats; + + ps->rx_packets++; + if (status & RX_MCAST_FRAME) + ps->multicast++; + + if (status & RX_ERROR) { + ps->rx_errors++; + if (status & RX_MISSED_FRAME) + ps->rx_missed_errors++; + if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR)) + ps->rx_length_errors++; + if (status & RX_CRC_ERROR) + ps->rx_crc_errors++; + if (status & RX_COLL) + ps->collisions++; + } else + ps->rx_bytes += status & RX_FRAME_LEN_MASK; + +} + +/* + * Au1000 receive routine. + */ +static int au1000_rx(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + struct sk_buff *skb; + struct rx_dma *prxd; + u32 buff_stat, status; + struct db_dest *pDB; + u32 frmlen; + + netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head); + + prxd = aup->rx_dma_ring[aup->rx_head]; + buff_stat = prxd->buff_stat; + while (buff_stat & RX_T_DONE) { + status = prxd->status; + pDB = aup->rx_db_inuse[aup->rx_head]; + au1000_update_rx_stats(dev, status); + if (!(status & RX_ERROR)) { + + /* good frame */ + frmlen = (status & RX_FRAME_LEN_MASK); + frmlen -= 4; /* Remove FCS */ + skb = netdev_alloc_skb(dev, frmlen + 2); + if (skb == NULL) { + dev->stats.rx_dropped++; + continue; + } + skb_reserve(skb, 2); /* 16 byte IP header align */ + skb_copy_to_linear_data(skb, + (unsigned char *)pDB->vaddr, frmlen); + skb_put(skb, frmlen); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); /* pass the packet to upper layers */ + } else { + if (au1000_debug > 4) { + pr_err("rx_error(s):"); + if (status & RX_MISSED_FRAME) + pr_cont(" miss"); + if (status & RX_WDOG_TIMER) + pr_cont(" wdog"); + if (status & RX_RUNT) + pr_cont(" runt"); + if (status & RX_OVERLEN) + pr_cont(" overlen"); + if (status & RX_COLL) + pr_cont(" coll"); + if (status & RX_MII_ERROR) + pr_cont(" mii error"); + if (status & RX_CRC_ERROR) + pr_cont(" crc error"); + if (status & RX_LEN_ERROR) + pr_cont(" len error"); + if (status & RX_U_CNTRL_FRAME) + pr_cont(" u control frame"); + pr_cont("\n"); + } + } + prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); + aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1); + wmb(); /* drain writebuffer */ + + /* next descriptor */ + prxd = aup->rx_dma_ring[aup->rx_head]; + buff_stat = prxd->buff_stat; + } + return 0; +} + +static void au1000_update_tx_stats(struct net_device *dev, u32 status) +{ + struct au1000_private *aup = netdev_priv(dev); + struct net_device_stats *ps = &dev->stats; + + if (status & TX_FRAME_ABORTED) { + if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) { + if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) { + /* any other tx errors are only valid + * in half duplex mode + */ + ps->tx_errors++; + ps->tx_aborted_errors++; + } + } else { + ps->tx_errors++; + ps->tx_aborted_errors++; + if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER)) + ps->tx_carrier_errors++; + } + } +} + +/* + * Called from the interrupt service routine to acknowledge + * the TX DONE bits. This is a must if the irq is setup as + * edge triggered. + */ +static void au1000_tx_ack(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + struct tx_dma *ptxd; + + ptxd = aup->tx_dma_ring[aup->tx_tail]; + + while (ptxd->buff_stat & TX_T_DONE) { + au1000_update_tx_stats(dev, ptxd->status); + ptxd->buff_stat &= ~TX_T_DONE; + ptxd->len = 0; + wmb(); /* drain writebuffer */ + + aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1); + ptxd = aup->tx_dma_ring[aup->tx_tail]; + + if (aup->tx_full) { + aup->tx_full = 0; + netif_wake_queue(dev); + } + } +} + +/* + * Au1000 interrupt service routine. + */ +static irqreturn_t au1000_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + + /* Handle RX interrupts first to minimize chance of overrun */ + + au1000_rx(dev); + au1000_tx_ack(dev); + return IRQ_RETVAL(1); +} + +static int au1000_open(struct net_device *dev) +{ + int retval; + struct au1000_private *aup = netdev_priv(dev); + + netif_dbg(aup, drv, dev, "open: dev=%p\n", dev); + + retval = request_irq(dev->irq, au1000_interrupt, 0, + dev->name, dev); + if (retval) { + netdev_err(dev, "unable to get IRQ %d\n", dev->irq); + return retval; + } + + retval = au1000_init(dev); + if (retval) { + netdev_err(dev, "error in au1000_init\n"); + free_irq(dev->irq, dev); + return retval; + } + + if (aup->phy_dev) { + /* cause the PHY state machine to schedule a link state check */ + aup->phy_dev->state = PHY_CHANGELINK; + phy_start(aup->phy_dev); + } + + netif_start_queue(dev); + + netif_dbg(aup, drv, dev, "open: Initialization done.\n"); + + return 0; +} + +static int au1000_close(struct net_device *dev) +{ + unsigned long flags; + struct au1000_private *const aup = netdev_priv(dev); + + netif_dbg(aup, drv, dev, "close: dev=%p\n", dev); + + if (aup->phy_dev) + phy_stop(aup->phy_dev); + + spin_lock_irqsave(&aup->lock, flags); + + au1000_reset_mac_unlocked(dev); + + /* stop the device */ + netif_stop_queue(dev); + + /* disable the interrupt */ + free_irq(dev->irq, dev); + spin_unlock_irqrestore(&aup->lock, flags); + + return 0; +} + +/* + * Au1000 transmit routine. + */ +static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + struct net_device_stats *ps = &dev->stats; + struct tx_dma *ptxd; + u32 buff_stat; + struct db_dest *pDB; + int i; + + netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n", + (unsigned)aup, skb->len, + skb->data, aup->tx_head); + + ptxd = aup->tx_dma_ring[aup->tx_head]; + buff_stat = ptxd->buff_stat; + if (buff_stat & TX_DMA_ENABLE) { + /* We've wrapped around and the transmitter is still busy */ + netif_stop_queue(dev); + aup->tx_full = 1; + return NETDEV_TX_BUSY; + } else if (buff_stat & TX_T_DONE) { + au1000_update_tx_stats(dev, ptxd->status); + ptxd->len = 0; + } + + if (aup->tx_full) { + aup->tx_full = 0; + netif_wake_queue(dev); + } + + pDB = aup->tx_db_inuse[aup->tx_head]; + skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len); + if (skb->len < ETH_ZLEN) { + for (i = skb->len; i < ETH_ZLEN; i++) + ((char *)pDB->vaddr)[i] = 0; + + ptxd->len = ETH_ZLEN; + } else + ptxd->len = skb->len; + + ps->tx_packets++; + ps->tx_bytes += ptxd->len; + + ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE; + wmb(); /* drain writebuffer */ + dev_kfree_skb(skb); + aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1); + return NETDEV_TX_OK; +} + +/* + * The Tx ring has been full longer than the watchdog timeout + * value. The transmitter must be hung? + */ +static void au1000_tx_timeout(struct net_device *dev) +{ + netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev); + au1000_reset_mac(dev); + au1000_init(dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); +} + +static void au1000_multicast_list(struct net_device *dev) +{ + struct au1000_private *aup = netdev_priv(dev); + u32 reg; + + netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags); + reg = readl(&aup->mac->control); + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + reg |= MAC_PROMISCUOUS; + } else if ((dev->flags & IFF_ALLMULTI) || + netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) { + reg |= MAC_PASS_ALL_MULTI; + reg &= ~MAC_PROMISCUOUS; + netdev_info(dev, "Pass all multicast\n"); + } else { + struct netdev_hw_addr *ha; + u32 mc_filter[2]; /* Multicast hash filter */ + + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) + set_bit(ether_crc(ETH_ALEN, ha->addr)>>26, + (long *)mc_filter); + writel(mc_filter[1], &aup->mac->multi_hash_high); + writel(mc_filter[0], &aup->mac->multi_hash_low); + reg &= ~MAC_PROMISCUOUS; + reg |= MAC_HASH_MODE; + } + writel(reg, &aup->mac->control); +} + +static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct au1000_private *aup = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + if (!aup->phy_dev) + return -EINVAL; /* PHY not controllable */ + + return phy_mii_ioctl(aup->phy_dev, rq, cmd); +} + +static const struct net_device_ops au1000_netdev_ops = { + .ndo_open = au1000_open, + .ndo_stop = au1000_close, + .ndo_start_xmit = au1000_tx, + .ndo_set_rx_mode = au1000_multicast_list, + .ndo_do_ioctl = au1000_ioctl, + .ndo_tx_timeout = au1000_tx_timeout, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static int au1000_probe(struct platform_device *pdev) +{ + struct au1000_private *aup = NULL; + struct au1000_eth_platform_data *pd; + struct net_device *dev = NULL; + struct db_dest *pDB, *pDBfree; + int irq, i, err = 0; + struct resource *base, *macen, *macdma; + + base = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!base) { + dev_err(&pdev->dev, "failed to retrieve base register\n"); + err = -ENODEV; + goto out; + } + + macen = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!macen) { + dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n"); + err = -ENODEV; + goto out; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "failed to retrieve IRQ\n"); + err = -ENODEV; + goto out; + } + + macdma = platform_get_resource(pdev, IORESOURCE_MEM, 2); + if (!macdma) { + dev_err(&pdev->dev, "failed to retrieve MACDMA registers\n"); + err = -ENODEV; + goto out; + } + + if (!request_mem_region(base->start, resource_size(base), + pdev->name)) { + dev_err(&pdev->dev, "failed to request memory region for base registers\n"); + err = -ENXIO; + goto out; + } + + if (!request_mem_region(macen->start, resource_size(macen), + pdev->name)) { + dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n"); + err = -ENXIO; + goto err_request; + } + + if (!request_mem_region(macdma->start, resource_size(macdma), + pdev->name)) { + dev_err(&pdev->dev, "failed to request MACDMA memory region\n"); + err = -ENXIO; + goto err_macdma; + } + + dev = alloc_etherdev(sizeof(struct au1000_private)); + if (!dev) { + err = -ENOMEM; + goto err_alloc; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + platform_set_drvdata(pdev, dev); + aup = netdev_priv(dev); + + spin_lock_init(&aup->lock); + aup->msg_enable = (au1000_debug < 4 ? + AU1000_DEF_MSG_ENABLE : au1000_debug); + + /* Allocate the data buffers + * Snooping works fine with eth on all au1xxx + */ + aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE * + (NUM_TX_BUFFS + NUM_RX_BUFFS), + &aup->dma_addr, 0); + if (!aup->vaddr) { + dev_err(&pdev->dev, "failed to allocate data buffers\n"); + err = -ENOMEM; + goto err_vaddr; + } + + /* aup->mac is the base address of the MAC's registers */ + aup->mac = (struct mac_reg *) + ioremap_nocache(base->start, resource_size(base)); + if (!aup->mac) { + dev_err(&pdev->dev, "failed to ioremap MAC registers\n"); + err = -ENXIO; + goto err_remap1; + } + + /* Setup some variables for quick register address access */ + aup->enable = (u32 *)ioremap_nocache(macen->start, + resource_size(macen)); + if (!aup->enable) { + dev_err(&pdev->dev, "failed to ioremap MAC enable register\n"); + err = -ENXIO; + goto err_remap2; + } + aup->mac_id = pdev->id; + + aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma)); + if (!aup->macdma) { + dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n"); + err = -ENXIO; + goto err_remap3; + } + + au1000_setup_hw_rings(aup, aup->macdma); + + writel(0, aup->enable); + aup->mac_enabled = 0; + + pd = dev_get_platdata(&pdev->dev); + if (!pd) { + dev_info(&pdev->dev, "no platform_data passed," + " PHY search on MAC0\n"); + aup->phy1_search_mac0 = 1; + } else { + if (is_valid_ether_addr(pd->mac)) { + memcpy(dev->dev_addr, pd->mac, ETH_ALEN); + } else { + /* Set a random MAC since no valid provided by platform_data. */ + eth_hw_addr_random(dev); + } + + aup->phy_static_config = pd->phy_static_config; + aup->phy_search_highest_addr = pd->phy_search_highest_addr; + aup->phy1_search_mac0 = pd->phy1_search_mac0; + aup->phy_addr = pd->phy_addr; + aup->phy_busid = pd->phy_busid; + aup->phy_irq = pd->phy_irq; + } + + if (aup->phy_busid && aup->phy_busid > 0) { + dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n"); + err = -ENODEV; + goto err_mdiobus_alloc; + } + + aup->mii_bus = mdiobus_alloc(); + if (aup->mii_bus == NULL) { + dev_err(&pdev->dev, "failed to allocate mdiobus structure\n"); + err = -ENOMEM; + goto err_mdiobus_alloc; + } + + aup->mii_bus->priv = dev; + aup->mii_bus->read = au1000_mdiobus_read; + aup->mii_bus->write = au1000_mdiobus_write; + aup->mii_bus->reset = au1000_mdiobus_reset; + aup->mii_bus->name = "au1000_eth_mii"; + snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, aup->mac_id); + aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); + if (aup->mii_bus->irq == NULL) { + err = -ENOMEM; + goto err_out; + } + + for (i = 0; i < PHY_MAX_ADDR; ++i) + aup->mii_bus->irq[i] = PHY_POLL; + /* if known, set corresponding PHY IRQs */ + if (aup->phy_static_config) + if (aup->phy_irq && aup->phy_busid == aup->mac_id) + aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq; + + err = mdiobus_register(aup->mii_bus); + if (err) { + dev_err(&pdev->dev, "failed to register MDIO bus\n"); + goto err_mdiobus_reg; + } + + err = au1000_mii_probe(dev); + if (err != 0) + goto err_out; + + pDBfree = NULL; + /* setup the data buffer descriptors and attach a buffer to each one */ + pDB = aup->db; + for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) { + pDB->pnext = pDBfree; + pDBfree = pDB; + pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i); + pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); + pDB++; + } + aup->pDBfree = pDBfree; + + err = -ENODEV; + for (i = 0; i < NUM_RX_DMA; i++) { + pDB = au1000_GetFreeDB(aup); + if (!pDB) + goto err_out; + + aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; + aup->rx_db_inuse[i] = pDB; + } + + err = -ENODEV; + for (i = 0; i < NUM_TX_DMA; i++) { + pDB = au1000_GetFreeDB(aup); + if (!pDB) + goto err_out; + + aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; + aup->tx_dma_ring[i]->len = 0; + aup->tx_db_inuse[i] = pDB; + } + + dev->base_addr = base->start; + dev->irq = irq; + dev->netdev_ops = &au1000_netdev_ops; + dev->ethtool_ops = &au1000_ethtool_ops; + dev->watchdog_timeo = ETH_TX_TIMEOUT; + + /* + * The boot code uses the ethernet controller, so reset it to start + * fresh. au1000_init() expects that the device is in reset state. + */ + au1000_reset_mac(dev); + + err = register_netdev(dev); + if (err) { + netdev_err(dev, "Cannot register net device, aborting.\n"); + goto err_out; + } + + netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n", + (unsigned long)base->start, irq); + + pr_info_once("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR); + + return 0; + +err_out: + if (aup->mii_bus != NULL) + mdiobus_unregister(aup->mii_bus); + + /* here we should have a valid dev plus aup-> register addresses + * so we can reset the mac properly. + */ + au1000_reset_mac(dev); + + for (i = 0; i < NUM_RX_DMA; i++) { + if (aup->rx_db_inuse[i]) + au1000_ReleaseDB(aup, aup->rx_db_inuse[i]); + } + for (i = 0; i < NUM_TX_DMA; i++) { + if (aup->tx_db_inuse[i]) + au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); + } +err_mdiobus_reg: + mdiobus_free(aup->mii_bus); +err_mdiobus_alloc: + iounmap(aup->macdma); +err_remap3: + iounmap(aup->enable); +err_remap2: + iounmap(aup->mac); +err_remap1: + dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), + (void *)aup->vaddr, aup->dma_addr); +err_vaddr: + free_netdev(dev); +err_alloc: + release_mem_region(macdma->start, resource_size(macdma)); +err_macdma: + release_mem_region(macen->start, resource_size(macen)); +err_request: + release_mem_region(base->start, resource_size(base)); +out: + return err; +} + +static int au1000_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct au1000_private *aup = netdev_priv(dev); + int i; + struct resource *base, *macen; + + unregister_netdev(dev); + mdiobus_unregister(aup->mii_bus); + mdiobus_free(aup->mii_bus); + + for (i = 0; i < NUM_RX_DMA; i++) + if (aup->rx_db_inuse[i]) + au1000_ReleaseDB(aup, aup->rx_db_inuse[i]); + + for (i = 0; i < NUM_TX_DMA; i++) + if (aup->tx_db_inuse[i]) + au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); + + dma_free_noncoherent(NULL, MAX_BUF_SIZE * + (NUM_TX_BUFFS + NUM_RX_BUFFS), + (void *)aup->vaddr, aup->dma_addr); + + iounmap(aup->macdma); + iounmap(aup->mac); + iounmap(aup->enable); + + base = platform_get_resource(pdev, IORESOURCE_MEM, 2); + release_mem_region(base->start, resource_size(base)); + + base = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(base->start, resource_size(base)); + + macen = platform_get_resource(pdev, IORESOURCE_MEM, 1); + release_mem_region(macen->start, resource_size(macen)); + + free_netdev(dev); + + return 0; +} + +static struct platform_driver au1000_eth_driver = { + .probe = au1000_probe, + .remove = au1000_remove, + .driver = { + .name = "au1000-eth", + }, +}; + +module_platform_driver(au1000_eth_driver); + +MODULE_ALIAS("platform:au1000-eth"); diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h new file mode 100644 index 000000000..ca53024f0 --- /dev/null +++ b/drivers/net/ethernet/amd/au1000_eth.h @@ -0,0 +1,133 @@ +/* + * + * Alchemy Au1x00 ethernet driver include file + * + * Author: Pete Popov + * + * Copyright 2001 MontaVista Software Inc. + * + * ######################################################################## + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * ######################################################################## + * + * + */ + + +#define MAC_IOSIZE 0x10000 +#define NUM_RX_DMA 4 /* Au1x00 has 4 rx hardware descriptors */ +#define NUM_TX_DMA 4 /* Au1x00 has 4 tx hardware descriptors */ + +#define NUM_RX_BUFFS 4 +#define NUM_TX_BUFFS 4 +#define MAX_BUF_SIZE 2048 + +#define ETH_TX_TIMEOUT (HZ/4) +#define MAC_MIN_PKT_SIZE 64 + +#define MULTICAST_FILTER_LIMIT 64 + +/* + * Data Buffer Descriptor. Data buffers must be aligned on 32 byte + * boundary for both, receive and transmit. + */ +struct db_dest { + struct db_dest *pnext; + u32 *vaddr; + dma_addr_t dma_addr; +}; + +/* + * The transmit and receive descriptors are memory + * mapped registers. + */ +struct tx_dma { + u32 status; + u32 buff_stat; + u32 len; + u32 pad; +}; + +struct rx_dma { + u32 status; + u32 buff_stat; + u32 pad[2]; +}; + + +/* + * MAC control registers, memory mapped. + */ +struct mac_reg { + u32 control; + u32 mac_addr_high; + u32 mac_addr_low; + u32 multi_hash_high; + u32 multi_hash_low; + u32 mii_control; + u32 mii_data; + u32 flow_control; + u32 vlan1_tag; + u32 vlan2_tag; +}; + + +struct au1000_private { + struct db_dest *pDBfree; + struct db_dest db[NUM_RX_BUFFS+NUM_TX_BUFFS]; + struct rx_dma *rx_dma_ring[NUM_RX_DMA]; + struct tx_dma *tx_dma_ring[NUM_TX_DMA]; + struct db_dest *rx_db_inuse[NUM_RX_DMA]; + struct db_dest *tx_db_inuse[NUM_TX_DMA]; + u32 rx_head; + u32 tx_head; + u32 tx_tail; + u32 tx_full; + + int mac_id; + + int mac_enabled; /* whether MAC is currently enabled and running + * (req. for mdio) + */ + + int old_link; /* used by au1000_adjust_link */ + int old_speed; + int old_duplex; + + struct phy_device *phy_dev; + struct mii_bus *mii_bus; + + /* PHY configuration */ + int phy_static_config; + int phy_search_highest_addr; + int phy1_search_mac0; + + int phy_addr; + int phy_busid; + int phy_irq; + + /* These variables are just for quick access + * to certain regs addresses. + */ + struct mac_reg *mac; /* mac registers */ + u32 *enable; /* address of MAC Enable Register */ + void __iomem *macdma; /* base of MAC DMA port */ + u32 vaddr; /* virtual address of rx/tx buffers */ + dma_addr_t dma_addr; /* dma address of rx/tx buffers */ + + spinlock_t lock; /* Serialise access to device */ + + u32 msg_enable; +}; diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c new file mode 100644 index 000000000..b584b7823 --- /dev/null +++ b/drivers/net/ethernet/amd/declance.c @@ -0,0 +1,1376 @@ +/* + * Lance ethernet driver for the MIPS processor based + * DECstation family + * + * + * adopted from sunlance.c by Richard van den Berg + * + * Copyright (C) 2002, 2003, 2005, 2006 Maciej W. Rozycki + * + * additional sources: + * - PMAD-AA TURBOchannel Ethernet Module Functional Specification, + * Revision 1.2 + * + * History: + * + * v0.001: The kernel accepts the code and it shows the hardware address. + * + * v0.002: Removed most sparc stuff, left only some module and dma stuff. + * + * v0.003: Enhanced base address calculation from proposals by + * Harald Koerfgen and Thomas Riemer. + * + * v0.004: lance-regs is pointing at the right addresses, added prom + * check. First start of address mapping and DMA. + * + * v0.005: started to play around with LANCE-DMA. This driver will not + * work for non IOASIC lances. HK + * + * v0.006: added pointer arrays to lance_private and setup routine for + * them in dec_lance_init. HK + * + * v0.007: Big shit. The LANCE seems to use a different DMA mechanism to + * access the init block. This looks like one (short) word at a + * time, but the smallest amount the IOASIC can transfer is a + * (long) word. So we have a 2-2 padding here. Changed + * lance_init_block accordingly. The 16-16 padding for the buffers + * seems to be correct. HK + * + * v0.008: mods to make PMAX_LANCE work. 01/09/1999 triemer + * + * v0.009: Module support fixes, multiple interfaces support, various + * bits. macro + * + * v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the + * PMAX requirement to only use halfword accesses to the + * buffer. macro + * + * v0.011: Converted the PMAD to the driver model. macro + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +static char version[] = +"declance.c: v0.011 by Linux MIPS DECstation task force\n"; + +MODULE_AUTHOR("Linux MIPS DECstation task force"); +MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver"); +MODULE_LICENSE("GPL"); + +#define __unused __attribute__ ((unused)) + +/* + * card types + */ +#define ASIC_LANCE 1 +#define PMAD_LANCE 2 +#define PMAX_LANCE 3 + + +#define LE_CSR0 0 +#define LE_CSR1 1 +#define LE_CSR2 2 +#define LE_CSR3 3 + +#define LE_MO_PROM 0x8000 /* Enable promiscuous mode */ + +#define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */ +#define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */ +#define LE_C0_CERR 0x2000 /* SQE: Signal quality error */ +#define LE_C0_MISS 0x1000 /* MISS: Missed a packet */ +#define LE_C0_MERR 0x0800 /* ME: Memory error */ +#define LE_C0_RINT 0x0400 /* Received interrupt */ +#define LE_C0_TINT 0x0200 /* Transmitter Interrupt */ +#define LE_C0_IDON 0x0100 /* IFIN: Init finished. */ +#define LE_C0_INTR 0x0080 /* Interrupt or error */ +#define LE_C0_INEA 0x0040 /* Interrupt enable */ +#define LE_C0_RXON 0x0020 /* Receiver on */ +#define LE_C0_TXON 0x0010 /* Transmitter on */ +#define LE_C0_TDMD 0x0008 /* Transmitter demand */ +#define LE_C0_STOP 0x0004 /* Stop the card */ +#define LE_C0_STRT 0x0002 /* Start the card */ +#define LE_C0_INIT 0x0001 /* Init the card */ + +#define LE_C3_BSWP 0x4 /* SWAP */ +#define LE_C3_ACON 0x2 /* ALE Control */ +#define LE_C3_BCON 0x1 /* Byte control */ + +/* Receive message descriptor 1 */ +#define LE_R1_OWN 0x8000 /* Who owns the entry */ +#define LE_R1_ERR 0x4000 /* Error: if FRA, OFL, CRC or BUF is set */ +#define LE_R1_FRA 0x2000 /* FRA: Frame error */ +#define LE_R1_OFL 0x1000 /* OFL: Frame overflow */ +#define LE_R1_CRC 0x0800 /* CRC error */ +#define LE_R1_BUF 0x0400 /* BUF: Buffer error */ +#define LE_R1_SOP 0x0200 /* Start of packet */ +#define LE_R1_EOP 0x0100 /* End of packet */ +#define LE_R1_POK 0x0300 /* Packet is complete: SOP + EOP */ + +/* Transmit message descriptor 1 */ +#define LE_T1_OWN 0x8000 /* Lance owns the packet */ +#define LE_T1_ERR 0x4000 /* Error summary */ +#define LE_T1_EMORE 0x1000 /* Error: more than one retry needed */ +#define LE_T1_EONE 0x0800 /* Error: one retry needed */ +#define LE_T1_EDEF 0x0400 /* Error: deferred */ +#define LE_T1_SOP 0x0200 /* Start of packet */ +#define LE_T1_EOP 0x0100 /* End of packet */ +#define LE_T1_POK 0x0300 /* Packet is complete: SOP + EOP */ + +#define LE_T3_BUF 0x8000 /* Buffer error */ +#define LE_T3_UFL 0x4000 /* Error underflow */ +#define LE_T3_LCOL 0x1000 /* Error late collision */ +#define LE_T3_CLOS 0x0800 /* Error carrier loss */ +#define LE_T3_RTY 0x0400 /* Error retry */ +#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */ + +/* Define: 2^4 Tx buffers and 2^4 Rx buffers */ + +#ifndef LANCE_LOG_TX_BUFFERS +#define LANCE_LOG_TX_BUFFERS 4 +#define LANCE_LOG_RX_BUFFERS 4 +#endif + +#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) +#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) + +#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS)) +#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) + +#define PKT_BUF_SZ 1536 +#define RX_BUFF_SIZE PKT_BUF_SZ +#define TX_BUFF_SIZE PKT_BUF_SZ + +#undef TEST_HITS +#define ZERO 0 + +/* + * The DS2100/3100 have a linear 64 kB buffer which supports halfword + * accesses only. Each halfword of the buffer is word-aligned in the + * CPU address space. + * + * The PMAD-AA has a 128 kB buffer on-board. + * + * The IOASIC LANCE devices use a shared memory region. This region + * as seen from the CPU is (max) 128 kB long and has to be on an 128 kB + * boundary. The LANCE sees this as a 64 kB long continuous memory + * region. + * + * The LANCE's DMA address is used as an index in this buffer and DMA + * takes place in bursts of eight 16-bit words which are packed into + * four 32-bit words by the IOASIC. This leads to a strange padding: + * 16 bytes of valid data followed by a 16 byte gap :-(. + */ + +struct lance_rx_desc { + unsigned short rmd0; /* low address of packet */ + unsigned short rmd1; /* high address of packet + and descriptor bits */ + short length; /* 2s complement (negative!) + of buffer length */ + unsigned short mblength; /* actual number of bytes received */ +}; + +struct lance_tx_desc { + unsigned short tmd0; /* low address of packet */ + unsigned short tmd1; /* high address of packet + and descriptor bits */ + short length; /* 2s complement (negative!) + of buffer length */ + unsigned short misc; +}; + + +/* First part of the LANCE initialization block, described in databook. */ +struct lance_init_block { + unsigned short mode; /* pre-set mode (reg. 15) */ + + unsigned short phys_addr[3]; /* physical ethernet address */ + unsigned short filter[4]; /* multicast filter */ + + /* Receive and transmit ring base, along with extra bits. */ + unsigned short rx_ptr; /* receive descriptor addr */ + unsigned short rx_len; /* receive len and high addr */ + unsigned short tx_ptr; /* transmit descriptor addr */ + unsigned short tx_len; /* transmit len and high addr */ + + short gap[4]; + + /* The buffer descriptors */ + struct lance_rx_desc brx_ring[RX_RING_SIZE]; + struct lance_tx_desc btx_ring[TX_RING_SIZE]; +}; + +#define BUF_OFFSET_CPU sizeof(struct lance_init_block) +#define BUF_OFFSET_LNC sizeof(struct lance_init_block) + +#define shift_off(off, type) \ + (type == ASIC_LANCE || type == PMAX_LANCE ? off << 1 : off) + +#define lib_off(rt, type) \ + shift_off(offsetof(struct lance_init_block, rt), type) + +#define lib_ptr(ib, rt, type) \ + ((volatile u16 *)((u8 *)(ib) + lib_off(rt, type))) + +#define rds_off(rt, type) \ + shift_off(offsetof(struct lance_rx_desc, rt), type) + +#define rds_ptr(rd, rt, type) \ + ((volatile u16 *)((u8 *)(rd) + rds_off(rt, type))) + +#define tds_off(rt, type) \ + shift_off(offsetof(struct lance_tx_desc, rt), type) + +#define tds_ptr(td, rt, type) \ + ((volatile u16 *)((u8 *)(td) + tds_off(rt, type))) + +struct lance_private { + struct net_device *next; + int type; + int dma_irq; + volatile struct lance_regs *ll; + + spinlock_t lock; + + int rx_new, tx_new; + int rx_old, tx_old; + + unsigned short busmaster_regval; + + struct timer_list multicast_timer; + + /* Pointers to the ring buffers as seen from the CPU */ + char *rx_buf_ptr_cpu[RX_RING_SIZE]; + char *tx_buf_ptr_cpu[TX_RING_SIZE]; + + /* Pointers to the ring buffers as seen from the LANCE */ + uint rx_buf_ptr_lnc[RX_RING_SIZE]; + uint tx_buf_ptr_lnc[TX_RING_SIZE]; +}; + +#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\ + lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\ + lp->tx_old - lp->tx_new-1) + +/* The lance control ports are at an absolute address, machine and tc-slot + * dependent. + * DECstations do only 32-bit access and the LANCE uses 16 bit addresses, + * so we have to give the structure an extra member making rap pointing + * at the right address + */ +struct lance_regs { + volatile unsigned short rdp; /* register data port */ + unsigned short pad; + volatile unsigned short rap; /* register address port */ +}; + +int dec_lance_debug = 2; + +static struct tc_driver dec_lance_tc_driver; +static struct net_device *root_lance_dev; + +static inline void writereg(volatile unsigned short *regptr, short value) +{ + *regptr = value; + iob(); +} + +/* Load the CSR registers */ +static void load_csrs(struct lance_private *lp) +{ + volatile struct lance_regs *ll = lp->ll; + uint leptr; + + /* The address space as seen from the LANCE + * begins at address 0. HK + */ + leptr = 0; + + writereg(&ll->rap, LE_CSR1); + writereg(&ll->rdp, (leptr & 0xFFFF)); + writereg(&ll->rap, LE_CSR2); + writereg(&ll->rdp, leptr >> 16); + writereg(&ll->rap, LE_CSR3); + writereg(&ll->rdp, lp->busmaster_regval); + + /* Point back to csr0 */ + writereg(&ll->rap, LE_CSR0); +} + +/* + * Our specialized copy routines + * + */ +static void cp_to_buf(const int type, void *to, const void *from, int len) +{ + unsigned short *tp; + const unsigned short *fp; + unsigned short clen; + unsigned char *rtp; + const unsigned char *rfp; + + if (type == PMAD_LANCE) { + memcpy(to, from, len); + } else if (type == PMAX_LANCE) { + clen = len >> 1; + tp = to; + fp = from; + + while (clen--) { + *tp++ = *fp++; + tp++; + } + + clen = len & 1; + rtp = (unsigned char *)tp; + rfp = (const unsigned char *)fp; + while (clen--) { + *rtp++ = *rfp++; + } + } else { + /* + * copy 16 Byte chunks + */ + clen = len >> 4; + tp = to; + fp = from; + while (clen--) { + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + tp += 8; + } + + /* + * do the rest, if any. + */ + clen = len & 15; + rtp = (unsigned char *)tp; + rfp = (const unsigned char *)fp; + while (clen--) { + *rtp++ = *rfp++; + } + } + + iob(); +} + +static void cp_from_buf(const int type, void *to, const void *from, int len) +{ + unsigned short *tp; + const unsigned short *fp; + unsigned short clen; + unsigned char *rtp; + const unsigned char *rfp; + + if (type == PMAD_LANCE) { + memcpy(to, from, len); + } else if (type == PMAX_LANCE) { + clen = len >> 1; + tp = to; + fp = from; + while (clen--) { + *tp++ = *fp++; + fp++; + } + + clen = len & 1; + + rtp = (unsigned char *)tp; + rfp = (const unsigned char *)fp; + + while (clen--) { + *rtp++ = *rfp++; + } + } else { + + /* + * copy 16 Byte chunks + */ + clen = len >> 4; + tp = to; + fp = from; + while (clen--) { + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + *tp++ = *fp++; + fp += 8; + } + + /* + * do the rest, if any. + */ + clen = len & 15; + rtp = (unsigned char *)tp; + rfp = (const unsigned char *)fp; + while (clen--) { + *rtp++ = *rfp++; + } + + + } + +} + +/* Setup the Lance Rx and Tx rings */ +static void lance_init_ring(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile u16 *ib = (volatile u16 *)dev->mem_start; + uint leptr; + int i; + + /* Lock out other processes while setting up hardware */ + netif_stop_queue(dev); + lp->rx_new = lp->tx_new = 0; + lp->rx_old = lp->tx_old = 0; + + /* Copy the ethernet address to the lance init block. + * XXX bit 0 of the physical address registers has to be zero + */ + *lib_ptr(ib, phys_addr[0], lp->type) = (dev->dev_addr[1] << 8) | + dev->dev_addr[0]; + *lib_ptr(ib, phys_addr[1], lp->type) = (dev->dev_addr[3] << 8) | + dev->dev_addr[2]; + *lib_ptr(ib, phys_addr[2], lp->type) = (dev->dev_addr[5] << 8) | + dev->dev_addr[4]; + /* Setup the initialization block */ + + /* Setup rx descriptor pointer */ + leptr = offsetof(struct lance_init_block, brx_ring); + *lib_ptr(ib, rx_len, lp->type) = (LANCE_LOG_RX_BUFFERS << 13) | + (leptr >> 16); + *lib_ptr(ib, rx_ptr, lp->type) = leptr; + if (ZERO) + printk("RX ptr: %8.8x(%8.8x)\n", + leptr, (uint)lib_off(brx_ring, lp->type)); + + /* Setup tx descriptor pointer */ + leptr = offsetof(struct lance_init_block, btx_ring); + *lib_ptr(ib, tx_len, lp->type) = (LANCE_LOG_TX_BUFFERS << 13) | + (leptr >> 16); + *lib_ptr(ib, tx_ptr, lp->type) = leptr; + if (ZERO) + printk("TX ptr: %8.8x(%8.8x)\n", + leptr, (uint)lib_off(btx_ring, lp->type)); + + if (ZERO) + printk("TX rings:\n"); + + /* Setup the Tx ring entries */ + for (i = 0; i < TX_RING_SIZE; i++) { + leptr = lp->tx_buf_ptr_lnc[i]; + *lib_ptr(ib, btx_ring[i].tmd0, lp->type) = leptr; + *lib_ptr(ib, btx_ring[i].tmd1, lp->type) = (leptr >> 16) & + 0xff; + *lib_ptr(ib, btx_ring[i].length, lp->type) = 0xf000; + /* The ones required by tmd2 */ + *lib_ptr(ib, btx_ring[i].misc, lp->type) = 0; + if (i < 3 && ZERO) + printk("%d: %8.8x(%p)\n", + i, leptr, lp->tx_buf_ptr_cpu[i]); + } + + /* Setup the Rx ring entries */ + if (ZERO) + printk("RX rings:\n"); + for (i = 0; i < RX_RING_SIZE; i++) { + leptr = lp->rx_buf_ptr_lnc[i]; + *lib_ptr(ib, brx_ring[i].rmd0, lp->type) = leptr; + *lib_ptr(ib, brx_ring[i].rmd1, lp->type) = ((leptr >> 16) & + 0xff) | + LE_R1_OWN; + *lib_ptr(ib, brx_ring[i].length, lp->type) = -RX_BUFF_SIZE | + 0xf000; + *lib_ptr(ib, brx_ring[i].mblength, lp->type) = 0; + if (i < 3 && ZERO) + printk("%d: %8.8x(%p)\n", + i, leptr, lp->rx_buf_ptr_cpu[i]); + } + iob(); +} + +static int init_restart_lance(struct lance_private *lp) +{ + volatile struct lance_regs *ll = lp->ll; + int i; + + writereg(&ll->rap, LE_CSR0); + writereg(&ll->rdp, LE_C0_INIT); + + /* Wait for the lance to complete initialization */ + for (i = 0; (i < 100) && !(ll->rdp & LE_C0_IDON); i++) { + udelay(10); + } + if ((i == 100) || (ll->rdp & LE_C0_ERR)) { + printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", + i, ll->rdp); + return -1; + } + if ((ll->rdp & LE_C0_ERR)) { + printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", + i, ll->rdp); + return -1; + } + writereg(&ll->rdp, LE_C0_IDON); + writereg(&ll->rdp, LE_C0_STRT); + writereg(&ll->rdp, LE_C0_INEA); + + return 0; +} + +static int lance_rx(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile u16 *ib = (volatile u16 *)dev->mem_start; + volatile u16 *rd; + unsigned short bits; + int entry, len; + struct sk_buff *skb; + +#ifdef TEST_HITS + { + int i; + + printk("["); + for (i = 0; i < RX_RING_SIZE; i++) { + if (i == lp->rx_new) + printk("%s", *lib_ptr(ib, brx_ring[i].rmd1, + lp->type) & + LE_R1_OWN ? "_" : "X"); + else + printk("%s", *lib_ptr(ib, brx_ring[i].rmd1, + lp->type) & + LE_R1_OWN ? "." : "1"); + } + printk("]"); + } +#endif + + for (rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type); + !((bits = *rds_ptr(rd, rmd1, lp->type)) & LE_R1_OWN); + rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type)) { + entry = lp->rx_new; + + /* We got an incomplete frame? */ + if ((bits & LE_R1_POK) != LE_R1_POK) { + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; + } else if (bits & LE_R1_ERR) { + /* Count only the end frame as a rx error, + * not the beginning + */ + if (bits & LE_R1_BUF) + dev->stats.rx_fifo_errors++; + if (bits & LE_R1_CRC) + dev->stats.rx_crc_errors++; + if (bits & LE_R1_OFL) + dev->stats.rx_over_errors++; + if (bits & LE_R1_FRA) + dev->stats.rx_frame_errors++; + if (bits & LE_R1_EOP) + dev->stats.rx_errors++; + } else { + len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4; + skb = netdev_alloc_skb(dev, len + 2); + + if (skb == 0) { + dev->stats.rx_dropped++; + *rds_ptr(rd, mblength, lp->type) = 0; + *rds_ptr(rd, rmd1, lp->type) = + ((lp->rx_buf_ptr_lnc[entry] >> 16) & + 0xff) | LE_R1_OWN; + lp->rx_new = (entry + 1) & RX_RING_MOD_MASK; + return 0; + } + dev->stats.rx_bytes += len; + + skb_reserve(skb, 2); /* 16 byte align */ + skb_put(skb, len); /* make room */ + + cp_from_buf(lp->type, skb->data, + lp->rx_buf_ptr_cpu[entry], len); + + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + } + + /* Return the packet to the pool */ + *rds_ptr(rd, mblength, lp->type) = 0; + *rds_ptr(rd, length, lp->type) = -RX_BUFF_SIZE | 0xf000; + *rds_ptr(rd, rmd1, lp->type) = + ((lp->rx_buf_ptr_lnc[entry] >> 16) & 0xff) | LE_R1_OWN; + lp->rx_new = (entry + 1) & RX_RING_MOD_MASK; + } + return 0; +} + +static void lance_tx(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile u16 *ib = (volatile u16 *)dev->mem_start; + volatile struct lance_regs *ll = lp->ll; + volatile u16 *td; + int i, j; + int status; + + j = lp->tx_old; + + spin_lock(&lp->lock); + + for (i = j; i != lp->tx_new; i = j) { + td = lib_ptr(ib, btx_ring[i], lp->type); + /* If we hit a packet not owned by us, stop */ + if (*tds_ptr(td, tmd1, lp->type) & LE_T1_OWN) + break; + + if (*tds_ptr(td, tmd1, lp->type) & LE_T1_ERR) { + status = *tds_ptr(td, misc, lp->type); + + dev->stats.tx_errors++; + if (status & LE_T3_RTY) + dev->stats.tx_aborted_errors++; + if (status & LE_T3_LCOL) + dev->stats.tx_window_errors++; + + if (status & LE_T3_CLOS) { + dev->stats.tx_carrier_errors++; + printk("%s: Carrier Lost\n", dev->name); + /* Stop the lance */ + writereg(&ll->rap, LE_CSR0); + writereg(&ll->rdp, LE_C0_STOP); + lance_init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + goto out; + } + /* Buffer errors and underflows turn off the + * transmitter, restart the adapter. + */ + if (status & (LE_T3_BUF | LE_T3_UFL)) { + dev->stats.tx_fifo_errors++; + + printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n", + dev->name); + /* Stop the lance */ + writereg(&ll->rap, LE_CSR0); + writereg(&ll->rdp, LE_C0_STOP); + lance_init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + goto out; + } + } else if ((*tds_ptr(td, tmd1, lp->type) & LE_T1_POK) == + LE_T1_POK) { + /* + * So we don't count the packet more than once. + */ + *tds_ptr(td, tmd1, lp->type) &= ~(LE_T1_POK); + + /* One collision before packet was sent. */ + if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EONE) + dev->stats.collisions++; + + /* More than one collision, be optimistic. */ + if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EMORE) + dev->stats.collisions += 2; + + dev->stats.tx_packets++; + } + j = (j + 1) & TX_RING_MOD_MASK; + } + lp->tx_old = j; +out: + if (netif_queue_stopped(dev) && + TX_BUFFS_AVAIL > 0) + netif_wake_queue(dev); + + spin_unlock(&lp->lock); +} + +static irqreturn_t lance_dma_merr_int(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + + printk(KERN_ERR "%s: DMA error\n", dev->name); + return IRQ_HANDLED; +} + +static irqreturn_t lance_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + int csr0; + + writereg(&ll->rap, LE_CSR0); + csr0 = ll->rdp; + + /* Acknowledge all the interrupt sources ASAP */ + writereg(&ll->rdp, csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT)); + + if ((csr0 & LE_C0_ERR)) { + /* Clear the error condition */ + writereg(&ll->rdp, LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | + LE_C0_CERR | LE_C0_MERR); + } + if (csr0 & LE_C0_RINT) + lance_rx(dev); + + if (csr0 & LE_C0_TINT) + lance_tx(dev); + + if (csr0 & LE_C0_BABL) + dev->stats.tx_errors++; + + if (csr0 & LE_C0_MISS) + dev->stats.rx_errors++; + + if (csr0 & LE_C0_MERR) { + printk("%s: Memory error, status %04x\n", dev->name, csr0); + + writereg(&ll->rdp, LE_C0_STOP); + + lance_init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + netif_wake_queue(dev); + } + + writereg(&ll->rdp, LE_C0_INEA); + writereg(&ll->rdp, LE_C0_INEA); + return IRQ_HANDLED; +} + +static int lance_open(struct net_device *dev) +{ + volatile u16 *ib = (volatile u16 *)dev->mem_start; + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + int status = 0; + + /* Stop the Lance */ + writereg(&ll->rap, LE_CSR0); + writereg(&ll->rdp, LE_C0_STOP); + + /* Set mode and clear multicast filter only at device open, + * so that lance_init_ring() called at any error will not + * forget multicast filters. + * + * BTW it is common bug in all lance drivers! --ANK + */ + *lib_ptr(ib, mode, lp->type) = 0; + *lib_ptr(ib, filter[0], lp->type) = 0; + *lib_ptr(ib, filter[1], lp->type) = 0; + *lib_ptr(ib, filter[2], lp->type) = 0; + *lib_ptr(ib, filter[3], lp->type) = 0; + + lance_init_ring(dev); + load_csrs(lp); + + netif_start_queue(dev); + + /* Associate IRQ with lance_interrupt */ + if (request_irq(dev->irq, lance_interrupt, 0, "lance", dev)) { + printk("%s: Can't get IRQ %d\n", dev->name, dev->irq); + return -EAGAIN; + } + if (lp->dma_irq >= 0) { + unsigned long flags; + + if (request_irq(lp->dma_irq, lance_dma_merr_int, IRQF_ONESHOT, + "lance error", dev)) { + free_irq(dev->irq, dev); + printk("%s: Can't get DMA IRQ %d\n", dev->name, + lp->dma_irq); + return -EAGAIN; + } + + spin_lock_irqsave(&ioasic_ssr_lock, flags); + + fast_mb(); + /* Enable I/O ASIC LANCE DMA. */ + ioasic_write(IO_REG_SSR, + ioasic_read(IO_REG_SSR) | IO_SSR_LANCE_DMA_EN); + + fast_mb(); + spin_unlock_irqrestore(&ioasic_ssr_lock, flags); + } + + status = init_restart_lance(lp); + return status; +} + +static int lance_close(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + + netif_stop_queue(dev); + del_timer_sync(&lp->multicast_timer); + + /* Stop the card */ + writereg(&ll->rap, LE_CSR0); + writereg(&ll->rdp, LE_C0_STOP); + + if (lp->dma_irq >= 0) { + unsigned long flags; + + spin_lock_irqsave(&ioasic_ssr_lock, flags); + + fast_mb(); + /* Disable I/O ASIC LANCE DMA. */ + ioasic_write(IO_REG_SSR, + ioasic_read(IO_REG_SSR) & ~IO_SSR_LANCE_DMA_EN); + + fast_iob(); + spin_unlock_irqrestore(&ioasic_ssr_lock, flags); + + free_irq(lp->dma_irq, dev); + } + free_irq(dev->irq, dev); + return 0; +} + +static inline int lance_reset(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + int status; + + /* Stop the lance */ + writereg(&ll->rap, LE_CSR0); + writereg(&ll->rdp, LE_C0_STOP); + + lance_init_ring(dev); + load_csrs(lp); + dev->trans_start = jiffies; /* prevent tx timeout */ + status = init_restart_lance(lp); + return status; +} + +static void lance_tx_timeout(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + + printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n", + dev->name, ll->rdp); + lance_reset(dev); + netif_wake_queue(dev); +} + +static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile struct lance_regs *ll = lp->ll; + volatile u16 *ib = (volatile u16 *)dev->mem_start; + unsigned long flags; + int entry, len; + + len = skb->len; + + if (len < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + len = ETH_ZLEN; + } + + dev->stats.tx_bytes += len; + + spin_lock_irqsave(&lp->lock, flags); + + entry = lp->tx_new; + *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len); + *lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0; + + cp_to_buf(lp->type, lp->tx_buf_ptr_cpu[entry], skb->data, len); + + /* Now, give the packet to the lance */ + *lib_ptr(ib, btx_ring[entry].tmd1, lp->type) = + ((lp->tx_buf_ptr_lnc[entry] >> 16) & 0xff) | + (LE_T1_POK | LE_T1_OWN); + lp->tx_new = (entry + 1) & TX_RING_MOD_MASK; + + if (TX_BUFFS_AVAIL <= 0) + netif_stop_queue(dev); + + /* Kick the lance: transmit now */ + writereg(&ll->rdp, LE_C0_INEA | LE_C0_TDMD); + + spin_unlock_irqrestore(&lp->lock, flags); + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +static void lance_load_multicast(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile u16 *ib = (volatile u16 *)dev->mem_start; + struct netdev_hw_addr *ha; + u32 crc; + + /* set all multicast bits */ + if (dev->flags & IFF_ALLMULTI) { + *lib_ptr(ib, filter[0], lp->type) = 0xffff; + *lib_ptr(ib, filter[1], lp->type) = 0xffff; + *lib_ptr(ib, filter[2], lp->type) = 0xffff; + *lib_ptr(ib, filter[3], lp->type) = 0xffff; + return; + } + /* clear the multicast filter */ + *lib_ptr(ib, filter[0], lp->type) = 0; + *lib_ptr(ib, filter[1], lp->type) = 0; + *lib_ptr(ib, filter[2], lp->type) = 0; + *lib_ptr(ib, filter[3], lp->type) = 0; + + /* Add addresses */ + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(ETH_ALEN, ha->addr); + crc = crc >> 26; + *lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf); + } +} + +static void lance_set_multicast(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + volatile u16 *ib = (volatile u16 *)dev->mem_start; + volatile struct lance_regs *ll = lp->ll; + + if (!netif_running(dev)) + return; + + if (lp->tx_old != lp->tx_new) { + mod_timer(&lp->multicast_timer, jiffies + 4 * HZ/100); + netif_wake_queue(dev); + return; + } + + netif_stop_queue(dev); + + writereg(&ll->rap, LE_CSR0); + writereg(&ll->rdp, LE_C0_STOP); + + lance_init_ring(dev); + + if (dev->flags & IFF_PROMISC) { + *lib_ptr(ib, mode, lp->type) |= LE_MO_PROM; + } else { + *lib_ptr(ib, mode, lp->type) &= ~LE_MO_PROM; + lance_load_multicast(dev); + } + load_csrs(lp); + init_restart_lance(lp); + netif_wake_queue(dev); +} + +static void lance_set_multicast_retry(unsigned long _opaque) +{ + struct net_device *dev = (struct net_device *) _opaque; + + lance_set_multicast(dev); +} + +static const struct net_device_ops lance_netdev_ops = { + .ndo_open = lance_open, + .ndo_stop = lance_close, + .ndo_start_xmit = lance_start_xmit, + .ndo_tx_timeout = lance_tx_timeout, + .ndo_set_rx_mode = lance_set_multicast, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +}; + +static int dec_lance_probe(struct device *bdev, const int type) +{ + static unsigned version_printed; + static const char fmt[] = "declance%d"; + char name[10]; + struct net_device *dev; + struct lance_private *lp; + volatile struct lance_regs *ll; + resource_size_t start = 0, len = 0; + int i, ret; + unsigned long esar_base; + unsigned char *esar; + + if (dec_lance_debug && version_printed++ == 0) + printk(version); + + if (bdev) + snprintf(name, sizeof(name), "%s", dev_name(bdev)); + else { + i = 0; + dev = root_lance_dev; + while (dev) { + i++; + lp = netdev_priv(dev); + dev = lp->next; + } + snprintf(name, sizeof(name), fmt, i); + } + + dev = alloc_etherdev(sizeof(struct lance_private)); + if (!dev) { + ret = -ENOMEM; + goto err_out; + } + + /* + * alloc_etherdev ensures the data structures used by the LANCE + * are aligned. + */ + lp = netdev_priv(dev); + spin_lock_init(&lp->lock); + + lp->type = type; + switch (type) { + case ASIC_LANCE: + dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE); + + /* buffer space for the on-board LANCE shared memory */ + /* + * FIXME: ugly hack! + */ + dev->mem_start = CKSEG1ADDR(0x00020000); + dev->mem_end = dev->mem_start + 0x00020000; + dev->irq = dec_interrupt[DEC_IRQ_LANCE]; + esar_base = CKSEG1ADDR(dec_kn_slot_base + IOASIC_ESAR); + + /* Workaround crash with booting KN04 2.1k from Disk */ + memset((void *)dev->mem_start, 0, + dev->mem_end - dev->mem_start); + + /* + * setup the pointer arrays, this sucks [tm] :-( + */ + for (i = 0; i < RX_RING_SIZE; i++) { + lp->rx_buf_ptr_cpu[i] = + (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU + + 2 * i * RX_BUFF_SIZE); + lp->rx_buf_ptr_lnc[i] = + (BUF_OFFSET_LNC + i * RX_BUFF_SIZE); + } + for (i = 0; i < TX_RING_SIZE; i++) { + lp->tx_buf_ptr_cpu[i] = + (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU + + 2 * RX_RING_SIZE * RX_BUFF_SIZE + + 2 * i * TX_BUFF_SIZE); + lp->tx_buf_ptr_lnc[i] = + (BUF_OFFSET_LNC + + RX_RING_SIZE * RX_BUFF_SIZE + + i * TX_BUFF_SIZE); + } + + /* Setup I/O ASIC LANCE DMA. */ + lp->dma_irq = dec_interrupt[DEC_IRQ_LANCE_MERR]; + ioasic_write(IO_REG_LANCE_DMA_P, + CPHYSADDR(dev->mem_start) << 3); + + break; +#ifdef CONFIG_TC + case PMAD_LANCE: + dev_set_drvdata(bdev, dev); + + start = to_tc_dev(bdev)->resource.start; + len = to_tc_dev(bdev)->resource.end - start + 1; + if (!request_mem_region(start, len, dev_name(bdev))) { + printk(KERN_ERR + "%s: Unable to reserve MMIO resource\n", + dev_name(bdev)); + ret = -EBUSY; + goto err_out_dev; + } + + dev->mem_start = CKSEG1ADDR(start); + dev->mem_end = dev->mem_start + 0x100000; + dev->base_addr = dev->mem_start + 0x100000; + dev->irq = to_tc_dev(bdev)->interrupt; + esar_base = dev->mem_start + 0x1c0002; + lp->dma_irq = -1; + + for (i = 0; i < RX_RING_SIZE; i++) { + lp->rx_buf_ptr_cpu[i] = + (char *)(dev->mem_start + BUF_OFFSET_CPU + + i * RX_BUFF_SIZE); + lp->rx_buf_ptr_lnc[i] = + (BUF_OFFSET_LNC + i * RX_BUFF_SIZE); + } + for (i = 0; i < TX_RING_SIZE; i++) { + lp->tx_buf_ptr_cpu[i] = + (char *)(dev->mem_start + BUF_OFFSET_CPU + + RX_RING_SIZE * RX_BUFF_SIZE + + i * TX_BUFF_SIZE); + lp->tx_buf_ptr_lnc[i] = + (BUF_OFFSET_LNC + + RX_RING_SIZE * RX_BUFF_SIZE + + i * TX_BUFF_SIZE); + } + + break; +#endif + case PMAX_LANCE: + dev->irq = dec_interrupt[DEC_IRQ_LANCE]; + dev->base_addr = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE); + dev->mem_start = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE_MEM); + dev->mem_end = dev->mem_start + KN01_SLOT_SIZE; + esar_base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_ESAR + 1); + lp->dma_irq = -1; + + /* + * setup the pointer arrays, this sucks [tm] :-( + */ + for (i = 0; i < RX_RING_SIZE; i++) { + lp->rx_buf_ptr_cpu[i] = + (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU + + 2 * i * RX_BUFF_SIZE); + lp->rx_buf_ptr_lnc[i] = + (BUF_OFFSET_LNC + i * RX_BUFF_SIZE); + } + for (i = 0; i < TX_RING_SIZE; i++) { + lp->tx_buf_ptr_cpu[i] = + (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU + + 2 * RX_RING_SIZE * RX_BUFF_SIZE + + 2 * i * TX_BUFF_SIZE); + lp->tx_buf_ptr_lnc[i] = + (BUF_OFFSET_LNC + + RX_RING_SIZE * RX_BUFF_SIZE + + i * TX_BUFF_SIZE); + } + + break; + + default: + printk(KERN_ERR "%s: declance_init called with unknown type\n", + name); + ret = -ENODEV; + goto err_out_dev; + } + + ll = (struct lance_regs *) dev->base_addr; + esar = (unsigned char *) esar_base; + + /* prom checks */ + /* First, check for test pattern */ + if (esar[0x60] != 0xff && esar[0x64] != 0x00 && + esar[0x68] != 0x55 && esar[0x6c] != 0xaa) { + printk(KERN_ERR + "%s: Ethernet station address prom not found!\n", + name); + ret = -ENODEV; + goto err_out_resource; + } + /* Check the prom contents */ + for (i = 0; i < 8; i++) { + if (esar[i * 4] != esar[0x3c - i * 4] && + esar[i * 4] != esar[0x40 + i * 4] && + esar[0x3c - i * 4] != esar[0x40 + i * 4]) { + printk(KERN_ERR "%s: Something is wrong with the " + "ethernet station address prom!\n", name); + ret = -ENODEV; + goto err_out_resource; + } + } + + /* Copy the ethernet address to the device structure, later to the + * lance initialization block so the lance gets it every time it's + * (re)initialized. + */ + switch (type) { + case ASIC_LANCE: + printk("%s: IOASIC onboard LANCE", name); + break; + case PMAD_LANCE: + printk("%s: PMAD-AA", name); + break; + case PMAX_LANCE: + printk("%s: PMAX onboard LANCE", name); + break; + } + for (i = 0; i < 6; i++) + dev->dev_addr[i] = esar[i * 4]; + + printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq); + + dev->netdev_ops = &lance_netdev_ops; + dev->watchdog_timeo = 5*HZ; + + /* lp->ll is the location of the registers for lance card */ + lp->ll = ll; + + /* busmaster_regval (CSR3) should be zero according to the PMAD-AA + * specification. + */ + lp->busmaster_regval = 0; + + dev->dma = 0; + + /* We cannot sleep if the chip is busy during a + * multicast list update event, because such events + * can occur from interrupts (ex. IPv6). So we + * use a timer to try again later when necessary. -DaveM + */ + init_timer(&lp->multicast_timer); + lp->multicast_timer.data = (unsigned long) dev; + lp->multicast_timer.function = lance_set_multicast_retry; + + ret = register_netdev(dev); + if (ret) { + printk(KERN_ERR + "%s: Unable to register netdev, aborting.\n", name); + goto err_out_resource; + } + + if (!bdev) { + lp->next = root_lance_dev; + root_lance_dev = dev; + } + + printk("%s: registered as %s.\n", name, dev->name); + return 0; + +err_out_resource: + if (bdev) + release_mem_region(start, len); + +err_out_dev: + free_netdev(dev); + +err_out: + return ret; +} + +static void __exit dec_lance_remove(struct device *bdev) +{ + struct net_device *dev = dev_get_drvdata(bdev); + resource_size_t start, len; + + unregister_netdev(dev); + start = to_tc_dev(bdev)->resource.start; + len = to_tc_dev(bdev)->resource.end - start + 1; + release_mem_region(start, len); + free_netdev(dev); +} + +/* Find all the lance cards on the system and initialize them */ +static int __init dec_lance_platform_probe(void) +{ + int count = 0; + + if (dec_interrupt[DEC_IRQ_LANCE] >= 0) { + if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) { + if (dec_lance_probe(NULL, ASIC_LANCE) >= 0) + count++; + } else if (!TURBOCHANNEL) { + if (dec_lance_probe(NULL, PMAX_LANCE) >= 0) + count++; + } + } + + return (count > 0) ? 0 : -ENODEV; +} + +static void __exit dec_lance_platform_remove(void) +{ + while (root_lance_dev) { + struct net_device *dev = root_lance_dev; + struct lance_private *lp = netdev_priv(dev); + + unregister_netdev(dev); + root_lance_dev = lp->next; + free_netdev(dev); + } +} + +#ifdef CONFIG_TC +static int dec_lance_tc_probe(struct device *dev); +static int __exit dec_lance_tc_remove(struct device *dev); + +static const struct tc_device_id dec_lance_tc_table[] = { + { "DEC ", "PMAD-AA " }, + { } +}; +MODULE_DEVICE_TABLE(tc, dec_lance_tc_table); + +static struct tc_driver dec_lance_tc_driver = { + .id_table = dec_lance_tc_table, + .driver = { + .name = "declance", + .bus = &tc_bus_type, + .probe = dec_lance_tc_probe, + .remove = __exit_p(dec_lance_tc_remove), + }, +}; + +static int dec_lance_tc_probe(struct device *dev) +{ + int status = dec_lance_probe(dev, PMAD_LANCE); + if (!status) + get_device(dev); + return status; +} + +static int __exit dec_lance_tc_remove(struct device *dev) +{ + put_device(dev); + dec_lance_remove(dev); + return 0; +} +#endif + +static int __init dec_lance_init(void) +{ + int status; + + status = tc_register_driver(&dec_lance_tc_driver); + if (!status) + dec_lance_platform_probe(); + return status; +} + +static void __exit dec_lance_exit(void) +{ + dec_lance_platform_remove(); + tc_unregister_driver(&dec_lance_tc_driver); +} + + +module_init(dec_lance_init); +module_exit(dec_lance_exit); diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c new file mode 100644 index 000000000..6c9de117f --- /dev/null +++ b/drivers/net/ethernet/amd/hplance.c @@ -0,0 +1,232 @@ +/* hplance.c : the Linux/hp300/lance ethernet driver + * + * Copyright (C) 05/1998 Peter Maydell + * Based on the Sun Lance driver and the NetBSD HP Lance driver + * Uses the generic 7990.c LANCE code. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +/* Used for the temporal inet entries and routing */ +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "hplance.h" + +/* We have 16392 bytes of RAM for the init block and buffers. This places + * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx + * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes. + */ +#define LANCE_LOG_TX_BUFFERS 1 +#define LANCE_LOG_RX_BUFFERS 3 + +#include "7990.h" /* use generic LANCE code */ + +/* Our private data structure */ +struct hplance_private { + struct lance_private lance; +}; + +/* function prototypes... This is easy because all the grot is in the + * generic LANCE support. All we have to support is probing for boards, + * plus board-specific init, open and close actions. + * Oh, and we need to tell the generic code how to read and write LANCE registers... + */ +static int hplance_init_one(struct dio_dev *d, const struct dio_device_id *ent); +static void hplance_init(struct net_device *dev, struct dio_dev *d); +static void hplance_remove_one(struct dio_dev *d); +static void hplance_writerap(void *priv, unsigned short value); +static void hplance_writerdp(void *priv, unsigned short value); +static unsigned short hplance_readrdp(void *priv); +static int hplance_open(struct net_device *dev); +static int hplance_close(struct net_device *dev); + +static struct dio_device_id hplance_dio_tbl[] = { + { DIO_ID_LAN }, + { 0 } +}; + +static struct dio_driver hplance_driver = { + .name = "hplance", + .id_table = hplance_dio_tbl, + .probe = hplance_init_one, + .remove = hplance_remove_one, +}; + +static const struct net_device_ops hplance_netdev_ops = { + .ndo_open = hplance_open, + .ndo_stop = hplance_close, + .ndo_start_xmit = lance_start_xmit, + .ndo_set_rx_mode = lance_set_multicast, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = lance_poll, +#endif +}; + +/* Find all the HP Lance boards and initialise them... */ +static int hplance_init_one(struct dio_dev *d, const struct dio_device_id *ent) +{ + struct net_device *dev; + int err = -ENOMEM; + + dev = alloc_etherdev(sizeof(struct hplance_private)); + if (!dev) + goto out; + + err = -EBUSY; + if (!request_mem_region(dio_resource_start(d), + dio_resource_len(d), d->name)) + goto out_free_netdev; + + hplance_init(dev, d); + err = register_netdev(dev); + if (err) + goto out_release_mem_region; + + dio_set_drvdata(d, dev); + + printk(KERN_INFO "%s: %s; select code %d, addr %pM, irq %d\n", + dev->name, d->name, d->scode, dev->dev_addr, d->ipl); + + return 0; + + out_release_mem_region: + release_mem_region(dio_resource_start(d), dio_resource_len(d)); + out_free_netdev: + free_netdev(dev); + out: + return err; +} + +static void hplance_remove_one(struct dio_dev *d) +{ + struct net_device *dev = dio_get_drvdata(d); + + unregister_netdev(dev); + release_mem_region(dio_resource_start(d), dio_resource_len(d)); + free_netdev(dev); +} + +/* Initialise a single lance board at the given DIO device */ +static void hplance_init(struct net_device *dev, struct dio_dev *d) +{ + unsigned long va = (d->resource.start + DIO_VIRADDRBASE); + struct hplance_private *lp; + int i; + + /* reset the board */ + out_8(va + DIO_IDOFF, 0xff); + udelay(100); /* ariba! ariba! udelay! udelay! */ + + /* Fill the dev fields */ + dev->base_addr = va; + dev->netdev_ops = &hplance_netdev_ops; + dev->dma = 0; + + for (i = 0; i < 6; i++) { + /* The NVRAM holds our ethernet address, one nibble per byte, + * at bytes NVRAMOFF+1,3,5,7,9... + */ + dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4) + | (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF); + } + + lp = netdev_priv(dev); + lp->lance.name = d->name; + lp->lance.base = va; + lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */ + lp->lance.lance_init_block = NULL; /* LANCE addr of same RAM */ + lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */ + lp->lance.irq = d->ipl; + lp->lance.writerap = hplance_writerap; + lp->lance.writerdp = hplance_writerdp; + lp->lance.readrdp = hplance_readrdp; + lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS; + lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS; + lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK; + lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK; +} + +/* This is disgusting. We have to check the DIO status register for ack every + * time we read or write the LANCE registers. + */ +static void hplance_writerap(void *priv, unsigned short value) +{ + struct lance_private *lp = (struct lance_private *)priv; + do { + out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value); + } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); +} + +static void hplance_writerdp(void *priv, unsigned short value) +{ + struct lance_private *lp = (struct lance_private *)priv; + do { + out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value); + } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); +} + +static unsigned short hplance_readrdp(void *priv) +{ + struct lance_private *lp = (struct lance_private *)priv; + __u16 value; + do { + value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP); + } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); + return value; +} + +static int hplance_open(struct net_device *dev) +{ + int status; + struct lance_private *lp = netdev_priv(dev); + + status = lance_open(dev); /* call generic lance open code */ + if (status) + return status; + /* enable interrupts at board level. */ + out_8(lp->base + HPLANCE_STATUS, LE_IE); + + return 0; +} + +static int hplance_close(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + + out_8(lp->base + HPLANCE_STATUS, 0); /* disable interrupts at boardlevel */ + lance_close(dev); + return 0; +} + +static int __init hplance_init_module(void) +{ + return dio_register_driver(&hplance_driver); +} + +static void __exit hplance_cleanup_module(void) +{ + dio_unregister_driver(&hplance_driver); +} + +module_init(hplance_init_module); +module_exit(hplance_cleanup_module); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/hplance.h b/drivers/net/ethernet/amd/hplance.h new file mode 100644 index 000000000..04aee9e03 --- /dev/null +++ b/drivers/net/ethernet/amd/hplance.h @@ -0,0 +1,26 @@ +/* Random defines and structures for the HP Lance driver. + * Copyright (C) 05/1998 Peter Maydell + * Based on the Sun Lance driver and the NetBSD HP Lance driver + */ + +/* Registers */ +#define HPLANCE_ID 0x01 /* DIO register: ID byte */ +#define HPLANCE_STATUS 0x03 /* DIO register: interrupt enable/status */ + +/* Control and status bits for the status register */ +#define LE_IE 0x80 /* interrupt enable */ +#define LE_IR 0x40 /* interrupt requested */ +#define LE_LOCK 0x08 /* lock status register */ +#define LE_ACK 0x04 /* ack of lock */ +#define LE_JAB 0x02 /* loss of tx clock (???) */ +/* We can also extract the IPL from the status register with the standard + * DIO_IPL(hplance) macro, or using dio_scodetoipl() + */ + +/* These are the offsets for the DIO regs (hplance_reg), lance_ioreg, + * memory and NVRAM: + */ +#define HPLANCE_IDOFF 0 /* board baseaddr */ +#define HPLANCE_REGOFF 0x4000 /* lance registers */ +#define HPLANCE_MEMOFF 0x8000 /* struct lance_init_block */ +#define HPLANCE_NVRAMOFF 0xC008 /* etheraddress as one *nibble* per byte */ diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c new file mode 100644 index 000000000..256f590f6 --- /dev/null +++ b/drivers/net/ethernet/amd/lance.c @@ -0,0 +1,1312 @@ +/* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */ +/* + Written/copyright 1993-1998 by Donald Becker. + + Copyright 1993 United States Government as represented by the + Director, National Security Agency. + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + This driver is for the Allied Telesis AT1500 and HP J2405A, and should work + with most other LANCE-based bus-master (NE2100/NE2500) ethercards. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + Andrey V. Savochkin: + - alignment problem with 1.3.* kernel and some minor changes. + Thomas Bogendoerfer (tsbogend@bigbug.franken.de): + - added support for Linux/Alpha, but removed most of it, because + it worked only for the PCI chip. + - added hook for the 32bit lance driver + - added PCnetPCI II (79C970A) to chip table + Paul Gortmaker (gpg109@rsphy1.anu.edu.au): + - hopefully fix above so Linux/Alpha can use ISA cards too. + 8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb + v1.12 10/27/97 Module support -djb + v1.14 2/3/98 Module support modified, made PCI support optional -djb + v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed + before unregister_netdev() which caused NULL pointer + reference later in the chain (in rtnetlink_fill_ifinfo()) + -- Mika Kuoppala + + Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from + the 2.1 version of the old driver - Alan Cox + + Get rid of check_region, check kmalloc return in lance_probe1 + Arnaldo Carvalho de Melo - 11/01/2001 + + Reworked detection, added support for Racal InterLan EtherBlaster cards + Vesselin Kostadinov - 22/4/2004 +*/ + +static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0}; +static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options); +static int __init do_lance_probe(struct net_device *dev); + + +static struct card { + char id_offset14; + char id_offset15; +} cards[] = { + { //"normal" + .id_offset14 = 0x57, + .id_offset15 = 0x57, + }, + { //NI6510EB + .id_offset14 = 0x52, + .id_offset15 = 0x44, + }, + { //Racal InterLan EtherBlaster + .id_offset14 = 0x52, + .id_offset15 = 0x49, + }, +}; +#define NUM_CARDS 3 + +#ifdef LANCE_DEBUG +static int lance_debug = LANCE_DEBUG; +#else +static int lance_debug = 1; +#endif + +/* + Theory of Operation + +I. Board Compatibility + +This device driver is designed for the AMD 79C960, the "PCnet-ISA +single-chip ethernet controller for ISA". This chip is used in a wide +variety of boards from vendors such as Allied Telesis, HP, Kingston, +and Boca. This driver is also intended to work with older AMD 7990 +designs, such as the NE1500 and NE2100, and newer 79C961. For convenience, +I use the name LANCE to refer to all of the AMD chips, even though it properly +refers only to the original 7990. + +II. Board-specific settings + +The driver is designed to work the boards that use the faster +bus-master mode, rather than in shared memory mode. (Only older designs +have on-board buffer memory needed to support the slower shared memory mode.) + +Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA +channel. This driver probes the likely base addresses: +{0x300, 0x320, 0x340, 0x360}. +After the board is found it generates a DMA-timeout interrupt and uses +autoIRQ to find the IRQ line. The DMA channel can be set with the low bits +of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is +probed for by enabling each free DMA channel in turn and checking if +initialization succeeds. + +The HP-J2405A board is an exception: with this board it is easy to read the +EEPROM-set values for the base, IRQ, and DMA. (Of course you must already +_know_ the base address -- that field is for writing the EEPROM.) + +III. Driver operation + +IIIa. Ring buffers +The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes +the base and length of the data buffer, along with status bits. The length +of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of +the buffer length (rather than being directly the buffer length) for +implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to +ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries +needlessly uses extra space and reduces the chance that an upper layer will +be able to reorder queued Tx packets based on priority. Decreasing the number +of entries makes it more difficult to achieve back-to-back packet transmission +and increases the chance that Rx ring will overflow. (Consider the worst case +of receiving back-to-back minimum-sized packets.) + +The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver +statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to +avoid the administrative overhead. For the Rx side this avoids dynamically +allocating full-sized buffers "just in case", at the expense of a +memory-to-memory data copy for each packet received. For most systems this +is a good tradeoff: the Rx buffer will always be in low memory, the copy +is inexpensive, and it primes the cache for later packet processing. For Tx +the buffers are only used when needed as low-memory bounce buffers. + +IIIB. 16M memory limitations. +For the ISA bus master mode all structures used directly by the LANCE, +the initialization block, Rx and Tx rings, and data buffers, must be +accessible from the ISA bus, i.e. in the lower 16M of real memory. +This is a problem for current Linux kernels on >16M machines. The network +devices are initialized after memory initialization, and the kernel doles out +memory from the top of memory downward. The current solution is to have a +special network initialization routine that's called before memory +initialization; this will eventually be generalized for all network devices. +As mentioned before, low-memory "bounce-buffers" are used when needed. + +IIIC. Synchronization +The driver runs as two independent, single-threaded flows of control. One +is the send-packet routine, which enforces single-threaded use by the +dev->tbusy flag. The other thread is the interrupt handler, which is single +threaded by the hardware and other software. + +The send packet thread has partial control over the Tx ring and 'dev->tbusy' +flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next +queue slot is empty, it clears the tbusy flag when finished otherwise it sets +the 'lp->tx_full' flag. + +The interrupt handler has exclusive control over the Rx ring and records stats +from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so +we can't avoid the interrupt overhead by having the Tx routine reap the Tx +stats.) After reaping the stats, it marks the queue entry as empty by setting +the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the +tx_full and tbusy flags. + +*/ + +/* Set the number of Tx and Rx buffers, using Log_2(# buffers). + Reasonable default values are 16 Tx buffers, and 16 Rx buffers. + That translates to 4 and 4 (16 == 2^^4). + This is a compile-time option for efficiency. + */ +#ifndef LANCE_LOG_TX_BUFFERS +#define LANCE_LOG_TX_BUFFERS 4 +#define LANCE_LOG_RX_BUFFERS 4 +#endif + +#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) +#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) +#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29) + +#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS)) +#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) +#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29) + +#define PKT_BUF_SZ 1544 + +/* Offsets from base I/O address. */ +#define LANCE_DATA 0x10 +#define LANCE_ADDR 0x12 +#define LANCE_RESET 0x14 +#define LANCE_BUS_IF 0x16 +#define LANCE_TOTAL_SIZE 0x18 + +#define TX_TIMEOUT (HZ/5) + +/* The LANCE Rx and Tx ring descriptors. */ +struct lance_rx_head { + s32 base; + s16 buf_length; /* This length is 2s complement (negative)! */ + s16 msg_length; /* This length is "normal". */ +}; + +struct lance_tx_head { + s32 base; + s16 length; /* Length is 2s complement (negative)! */ + s16 misc; +}; + +/* The LANCE initialization block, described in databook. */ +struct lance_init_block { + u16 mode; /* Pre-set mode (reg. 15) */ + u8 phys_addr[6]; /* Physical ethernet address */ + u32 filter[2]; /* Multicast filter (unused). */ + /* Receive and transmit ring base, along with extra bits. */ + u32 rx_ring; /* Tx and Rx ring base pointers */ + u32 tx_ring; +}; + +struct lance_private { + /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */ + struct lance_rx_head rx_ring[RX_RING_SIZE]; + struct lance_tx_head tx_ring[TX_RING_SIZE]; + struct lance_init_block init_block; + const char *name; + /* The saved address of a sent-in-place packet/buffer, for skfree(). */ + struct sk_buff* tx_skbuff[TX_RING_SIZE]; + /* The addresses of receive-in-place skbuffs. */ + struct sk_buff* rx_skbuff[RX_RING_SIZE]; + unsigned long rx_buffs; /* Address of Rx and Tx buffers. */ + /* Tx low-memory "bounce buffer" address. */ + char (*tx_bounce_buffs)[PKT_BUF_SZ]; + int cur_rx, cur_tx; /* The next free ring entry */ + int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ + int dma; + unsigned char chip_version; /* See lance_chip_type. */ + spinlock_t devlock; +}; + +#define LANCE_MUST_PAD 0x00000001 +#define LANCE_ENABLE_AUTOSELECT 0x00000002 +#define LANCE_MUST_REINIT_RING 0x00000004 +#define LANCE_MUST_UNRESET 0x00000008 +#define LANCE_HAS_MISSED_FRAME 0x00000010 + +/* A mapping from the chip ID number to the part number and features. + These are from the datasheets -- in real life the '970 version + reportedly has the same ID as the '965. */ +static struct lance_chip_type { + int id_number; + const char *name; + int flags; +} chip_table[] = { + {0x0000, "LANCE 7990", /* Ancient lance chip. */ + LANCE_MUST_PAD + LANCE_MUST_UNRESET}, + {0x0003, "PCnet/ISA 79C960", /* 79C960 PCnet/ISA. */ + LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING + + LANCE_HAS_MISSED_FRAME}, + {0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */ + LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING + + LANCE_HAS_MISSED_FRAME}, + {0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */ + LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING + + LANCE_HAS_MISSED_FRAME}, + /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call + it the PCnet32. */ + {0x2430, "PCnet32", /* 79C965 PCnet for VL bus. */ + LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING + + LANCE_HAS_MISSED_FRAME}, + {0x2621, "PCnet/PCI-II 79C970A", /* 79C970A PCInetPCI II. */ + LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING + + LANCE_HAS_MISSED_FRAME}, + {0x0, "PCnet (unknown)", + LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING + + LANCE_HAS_MISSED_FRAME}, +}; + +enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6}; + + +/* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers. + Assume yes until we know the memory size. */ +static unsigned char lance_need_isa_bounce_buffers = 1; + +static int lance_open(struct net_device *dev); +static void lance_init_ring(struct net_device *dev, gfp_t mode); +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static int lance_rx(struct net_device *dev); +static irqreturn_t lance_interrupt(int irq, void *dev_id); +static int lance_close(struct net_device *dev); +static struct net_device_stats *lance_get_stats(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); +static void lance_tx_timeout (struct net_device *dev); + + + +#ifdef MODULE +#define MAX_CARDS 8 /* Max number of interfaces (cards) per module */ + +static struct net_device *dev_lance[MAX_CARDS]; +static int io[MAX_CARDS]; +static int dma[MAX_CARDS]; +static int irq[MAX_CARDS]; + +module_param_array(io, int, NULL, 0); +module_param_array(dma, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param(lance_debug, int, 0); +MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required"); +MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)"); +MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)"); +MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)"); + +int __init init_module(void) +{ + struct net_device *dev; + int this_dev, found = 0; + + for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) { + if (io[this_dev] == 0) { + if (this_dev != 0) /* only complain once */ + break; + printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n"); + return -EPERM; + } + dev = alloc_etherdev(0); + if (!dev) + break; + dev->irq = irq[this_dev]; + dev->base_addr = io[this_dev]; + dev->dma = dma[this_dev]; + if (do_lance_probe(dev) == 0) { + dev_lance[found++] = dev; + continue; + } + free_netdev(dev); + break; + } + if (found != 0) + return 0; + return -ENXIO; +} + +static void cleanup_card(struct net_device *dev) +{ + struct lance_private *lp = dev->ml_priv; + if (dev->dma != 4) + free_dma(dev->dma); + release_region(dev->base_addr, LANCE_TOTAL_SIZE); + kfree(lp->tx_bounce_buffs); + kfree((void*)lp->rx_buffs); + kfree(lp); +} + +void __exit cleanup_module(void) +{ + int this_dev; + + for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) { + struct net_device *dev = dev_lance[this_dev]; + if (dev) { + unregister_netdev(dev); + cleanup_card(dev); + free_netdev(dev); + } + } +} +#endif /* MODULE */ +MODULE_LICENSE("GPL"); + + +/* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other + board probes now that kmalloc() can allocate ISA DMA-able regions. + This also allows the LANCE driver to be used as a module. + */ +static int __init do_lance_probe(struct net_device *dev) +{ + unsigned int *port; + int result; + + if (high_memory <= phys_to_virt(16*1024*1024)) + lance_need_isa_bounce_buffers = 0; + + for (port = lance_portlist; *port; port++) { + int ioaddr = *port; + struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE, + "lance-probe"); + + if (r) { + /* Detect the card with minimal I/O reads */ + char offset14 = inb(ioaddr + 14); + int card; + for (card = 0; card < NUM_CARDS; ++card) + if (cards[card].id_offset14 == offset14) + break; + if (card < NUM_CARDS) {/*yes, the first byte matches*/ + char offset15 = inb(ioaddr + 15); + for (card = 0; card < NUM_CARDS; ++card) + if ((cards[card].id_offset14 == offset14) && + (cards[card].id_offset15 == offset15)) + break; + } + if (card < NUM_CARDS) { /*Signature OK*/ + result = lance_probe1(dev, ioaddr, 0, 0); + if (!result) { + struct lance_private *lp = dev->ml_priv; + int ver = lp->chip_version; + + r->name = chip_table[ver].name; + return 0; + } + } + release_region(ioaddr, LANCE_TOTAL_SIZE); + } + } + return -ENODEV; +} + +#ifndef MODULE +struct net_device * __init lance_probe(int unit) +{ + struct net_device *dev = alloc_etherdev(0); + int err; + + if (!dev) + return ERR_PTR(-ENODEV); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = do_lance_probe(dev); + if (err) + goto out; + return dev; +out: + free_netdev(dev); + return ERR_PTR(err); +} +#endif + +static const struct net_device_ops lance_netdev_ops = { + .ndo_open = lance_open, + .ndo_start_xmit = lance_start_xmit, + .ndo_stop = lance_close, + .ndo_get_stats = lance_get_stats, + .ndo_set_rx_mode = set_multicast_list, + .ndo_tx_timeout = lance_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options) +{ + struct lance_private *lp; + unsigned long dma_channels; /* Mark spuriously-busy DMA channels */ + int i, reset_val, lance_version; + const char *chipname; + /* Flags for specific chips or boards. */ + unsigned char hpJ2405A = 0; /* HP ISA adaptor */ + int hp_builtin = 0; /* HP on-board ethernet. */ + static int did_version; /* Already printed version info. */ + unsigned long flags; + int err = -ENOMEM; + void __iomem *bios; + + /* First we look for special cases. + Check for HP's on-board ethernet by looking for 'HP' in the BIOS. + There are two HP versions, check the BIOS for the configuration port. + This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com. + */ + bios = ioremap(0xf00f0, 0x14); + if (!bios) + return -ENOMEM; + if (readw(bios + 0x12) == 0x5048) { + static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360}; + int hp_port = (readl(bios + 1) & 1) ? 0x499 : 0x99; + /* We can have boards other than the built-in! Verify this is on-board. */ + if ((inb(hp_port) & 0xc0) == 0x80 && + ioaddr_table[inb(hp_port) & 3] == ioaddr) + hp_builtin = hp_port; + } + iounmap(bios); + /* We also recognize the HP Vectra on-board here, but check below. */ + hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00 && + inb(ioaddr+2) == 0x09); + + /* Reset the LANCE. */ + reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */ + + /* The Un-Reset needed is only needed for the real NE2100, and will + confuse the HP board. */ + if (!hpJ2405A) + outw(reset_val, ioaddr+LANCE_RESET); + + outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */ + if (inw(ioaddr+LANCE_DATA) != 0x0004) + return -ENODEV; + + /* Get the version of the chip. */ + outw(88, ioaddr+LANCE_ADDR); + if (inw(ioaddr+LANCE_ADDR) != 88) { + lance_version = 0; + } else { /* Good, it's a newer chip. */ + int chip_version = inw(ioaddr+LANCE_DATA); + outw(89, ioaddr+LANCE_ADDR); + chip_version |= inw(ioaddr+LANCE_DATA) << 16; + if (lance_debug > 2) + printk(" LANCE chip version is %#x.\n", chip_version); + if ((chip_version & 0xfff) != 0x003) + return -ENODEV; + chip_version = (chip_version >> 12) & 0xffff; + for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) { + if (chip_table[lance_version].id_number == chip_version) + break; + } + } + + /* We can't allocate private data from alloc_etherdev() because it must + a ISA DMA-able region. */ + chipname = chip_table[lance_version].name; + printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr); + + /* There is a 16 byte station address PROM at the base address. + The first six bytes are the station address. */ + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb(ioaddr + i); + printk("%pM", dev->dev_addr); + + dev->base_addr = ioaddr; + /* Make certain the data structures used by the LANCE are aligned and DMAble. */ + + lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL); + if(lp==NULL) + return -ENODEV; + if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp); + dev->ml_priv = lp; + lp->name = chipname; + lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE, + GFP_DMA | GFP_KERNEL); + if (!lp->rx_buffs) + goto out_lp; + if (lance_need_isa_bounce_buffers) { + lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE, + GFP_DMA | GFP_KERNEL); + if (!lp->tx_bounce_buffs) + goto out_rx; + } else + lp->tx_bounce_buffs = NULL; + + lp->chip_version = lance_version; + spin_lock_init(&lp->devlock); + + lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */ + for (i = 0; i < 6; i++) + lp->init_block.phys_addr[i] = dev->dev_addr[i]; + lp->init_block.filter[0] = 0x00000000; + lp->init_block.filter[1] = 0x00000000; + lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS; + lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS; + + outw(0x0001, ioaddr+LANCE_ADDR); + inw(ioaddr+LANCE_ADDR); + outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA); + outw(0x0002, ioaddr+LANCE_ADDR); + inw(ioaddr+LANCE_ADDR); + outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA); + outw(0x0000, ioaddr+LANCE_ADDR); + inw(ioaddr+LANCE_ADDR); + + if (irq) { /* Set iff PCI card. */ + dev->dma = 4; /* Native bus-master, no DMA channel needed. */ + dev->irq = irq; + } else if (hp_builtin) { + static const char dma_tbl[4] = {3, 5, 6, 0}; + static const char irq_tbl[4] = {3, 4, 5, 9}; + unsigned char port_val = inb(hp_builtin); + dev->dma = dma_tbl[(port_val >> 4) & 3]; + dev->irq = irq_tbl[(port_val >> 2) & 3]; + printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma); + } else if (hpJ2405A) { + static const char dma_tbl[4] = {3, 5, 6, 7}; + static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15}; + short reset_val = inw(ioaddr+LANCE_RESET); + dev->dma = dma_tbl[(reset_val >> 2) & 3]; + dev->irq = irq_tbl[(reset_val >> 4) & 7]; + printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma); + } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */ + short bus_info; + outw(8, ioaddr+LANCE_ADDR); + bus_info = inw(ioaddr+LANCE_BUS_IF); + dev->dma = bus_info & 0x07; + dev->irq = (bus_info >> 4) & 0x0F; + } else { + /* The DMA channel may be passed in PARAM1. */ + if (dev->mem_start & 0x07) + dev->dma = dev->mem_start & 0x07; + } + + if (dev->dma == 0) { + /* Read the DMA channel status register, so that we can avoid + stuck DMA channels in the DMA detection below. */ + dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) | + (inb(DMA2_STAT_REG) & 0xf0); + } + err = -ENODEV; + if (dev->irq >= 2) + printk(" assigned IRQ %d", dev->irq); + else if (lance_version != 0) { /* 7990 boards need DMA detection first. */ + unsigned long irq_mask; + + /* To auto-IRQ we enable the initialization-done and DMA error + interrupts. For ISA boards we get a DMA error, but VLB and PCI + boards will work. */ + irq_mask = probe_irq_on(); + + /* Trigger an initialization just for the interrupt. */ + outw(0x0041, ioaddr+LANCE_DATA); + + mdelay(20); + dev->irq = probe_irq_off(irq_mask); + if (dev->irq) + printk(", probed IRQ %d", dev->irq); + else { + printk(", failed to detect IRQ line.\n"); + goto out_tx; + } + + /* Check for the initialization done bit, 0x0100, which means + that we don't need a DMA channel. */ + if (inw(ioaddr+LANCE_DATA) & 0x0100) + dev->dma = 4; + } + + if (dev->dma == 4) { + printk(", no DMA needed.\n"); + } else if (dev->dma) { + if (request_dma(dev->dma, chipname)) { + printk("DMA %d allocation failed.\n", dev->dma); + goto out_tx; + } else + printk(", assigned DMA %d.\n", dev->dma); + } else { /* OK, we have to auto-DMA. */ + for (i = 0; i < 4; i++) { + static const char dmas[] = { 5, 6, 7, 3 }; + int dma = dmas[i]; + int boguscnt; + + /* Don't enable a permanently busy DMA channel, or the machine + will hang. */ + if (test_bit(dma, &dma_channels)) + continue; + outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */ + if (request_dma(dma, chipname)) + continue; + + flags=claim_dma_lock(); + set_dma_mode(dma, DMA_MODE_CASCADE); + enable_dma(dma); + release_dma_lock(flags); + + /* Trigger an initialization. */ + outw(0x0001, ioaddr+LANCE_DATA); + for (boguscnt = 100; boguscnt > 0; --boguscnt) + if (inw(ioaddr+LANCE_DATA) & 0x0900) + break; + if (inw(ioaddr+LANCE_DATA) & 0x0100) { + dev->dma = dma; + printk(", DMA %d.\n", dev->dma); + break; + } else { + flags=claim_dma_lock(); + disable_dma(dma); + release_dma_lock(flags); + free_dma(dma); + } + } + if (i == 4) { /* Failure: bail. */ + printk("DMA detection failed.\n"); + goto out_tx; + } + } + + if (lance_version == 0 && dev->irq == 0) { + /* We may auto-IRQ now that we have a DMA channel. */ + /* Trigger an initialization just for the interrupt. */ + unsigned long irq_mask; + + irq_mask = probe_irq_on(); + outw(0x0041, ioaddr+LANCE_DATA); + + mdelay(40); + dev->irq = probe_irq_off(irq_mask); + if (dev->irq == 0) { + printk(" Failed to detect the 7990 IRQ line.\n"); + goto out_dma; + } + printk(" Auto-IRQ detected IRQ%d.\n", dev->irq); + } + + if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) { + /* Turn on auto-select of media (10baseT or BNC) so that the user + can watch the LEDs even if the board isn't opened. */ + outw(0x0002, ioaddr+LANCE_ADDR); + /* Don't touch 10base2 power bit. */ + outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF); + } + + if (lance_debug > 0 && did_version++ == 0) + printk(version); + + /* The LANCE-specific entries in the device structure. */ + dev->netdev_ops = &lance_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + err = register_netdev(dev); + if (err) + goto out_dma; + return 0; +out_dma: + if (dev->dma != 4) + free_dma(dev->dma); +out_tx: + kfree(lp->tx_bounce_buffs); +out_rx: + kfree((void*)lp->rx_buffs); +out_lp: + kfree(lp); + return err; +} + + +static int +lance_open(struct net_device *dev) +{ + struct lance_private *lp = dev->ml_priv; + int ioaddr = dev->base_addr; + int i; + + if (dev->irq == 0 || + request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) { + return -EAGAIN; + } + + /* We used to allocate DMA here, but that was silly. + DMA lines can't be shared! We now permanently allocate them. */ + + /* Reset the LANCE */ + inw(ioaddr+LANCE_RESET); + + /* The DMA controller is used as a no-operation slave, "cascade mode". */ + if (dev->dma != 4) { + unsigned long flags=claim_dma_lock(); + enable_dma(dev->dma); + set_dma_mode(dev->dma, DMA_MODE_CASCADE); + release_dma_lock(flags); + } + + /* Un-Reset the LANCE, needed only for the NE2100. */ + if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET) + outw(0, ioaddr+LANCE_RESET); + + if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) { + /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */ + outw(0x0002, ioaddr+LANCE_ADDR); + /* Only touch autoselect bit. */ + outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF); + } + + if (lance_debug > 1) + printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n", + dev->name, dev->irq, dev->dma, + (u32) isa_virt_to_bus(lp->tx_ring), + (u32) isa_virt_to_bus(lp->rx_ring), + (u32) isa_virt_to_bus(&lp->init_block)); + + lance_init_ring(dev, GFP_KERNEL); + /* Re-initialize the LANCE, and start it when done. */ + outw(0x0001, ioaddr+LANCE_ADDR); + outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA); + outw(0x0002, ioaddr+LANCE_ADDR); + outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA); + + outw(0x0004, ioaddr+LANCE_ADDR); + outw(0x0915, ioaddr+LANCE_DATA); + + outw(0x0000, ioaddr+LANCE_ADDR); + outw(0x0001, ioaddr+LANCE_DATA); + + netif_start_queue (dev); + + i = 0; + while (i++ < 100) + if (inw(ioaddr+LANCE_DATA) & 0x0100) + break; + /* + * We used to clear the InitDone bit, 0x0100, here but Mark Stockton + * reports that doing so triggers a bug in the '974. + */ + outw(0x0042, ioaddr+LANCE_DATA); + + if (lance_debug > 2) + printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n", + dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA)); + + return 0; /* Always succeed */ +} + +/* The LANCE has been halted for one reason or another (busmaster memory + arbitration error, Tx FIFO underflow, driver stopped it to reconfigure, + etc.). Modern LANCE variants always reload their ring-buffer + configuration when restarted, so we must reinitialize our ring + context before restarting. As part of this reinitialization, + find all packets still on the Tx ring and pretend that they had been + sent (in effect, drop the packets on the floor) - the higher-level + protocols will time out and retransmit. It'd be better to shuffle + these skbs to a temp list and then actually re-Tx them after + restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com +*/ + +static void +lance_purge_ring(struct net_device *dev) +{ + struct lance_private *lp = dev->ml_priv; + int i; + + /* Free all the skbuffs in the Rx and Tx queues. */ + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb = lp->rx_skbuff[i]; + lp->rx_skbuff[i] = NULL; + lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */ + if (skb) + dev_kfree_skb_any(skb); + } + for (i = 0; i < TX_RING_SIZE; i++) { + if (lp->tx_skbuff[i]) { + dev_kfree_skb_any(lp->tx_skbuff[i]); + lp->tx_skbuff[i] = NULL; + } + } +} + + +/* Initialize the LANCE Rx and Tx rings. */ +static void +lance_init_ring(struct net_device *dev, gfp_t gfp) +{ + struct lance_private *lp = dev->ml_priv; + int i; + + lp->cur_rx = lp->cur_tx = 0; + lp->dirty_rx = lp->dirty_tx = 0; + + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb; + void *rx_buff; + + skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp); + lp->rx_skbuff[i] = skb; + if (skb) + rx_buff = skb->data; + else + rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp); + if (rx_buff == NULL) + lp->rx_ring[i].base = 0; + else + lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000; + lp->rx_ring[i].buf_length = -PKT_BUF_SZ; + } + /* The Tx buffer address is filled in as needed, but we do need to clear + the upper ownership bit. */ + for (i = 0; i < TX_RING_SIZE; i++) { + lp->tx_skbuff[i] = NULL; + lp->tx_ring[i].base = 0; + } + + lp->init_block.mode = 0x0000; + for (i = 0; i < 6; i++) + lp->init_block.phys_addr[i] = dev->dev_addr[i]; + lp->init_block.filter[0] = 0x00000000; + lp->init_block.filter[1] = 0x00000000; + lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS; + lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS; +} + +static void +lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit) +{ + struct lance_private *lp = dev->ml_priv; + + if (must_reinit || + (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) { + lance_purge_ring(dev); + lance_init_ring(dev, GFP_ATOMIC); + } + outw(0x0000, dev->base_addr + LANCE_ADDR); + outw(csr0_bits, dev->base_addr + LANCE_DATA); +} + + +static void lance_tx_timeout (struct net_device *dev) +{ + struct lance_private *lp = (struct lance_private *) dev->ml_priv; + int ioaddr = dev->base_addr; + + outw (0, ioaddr + LANCE_ADDR); + printk ("%s: transmit timed out, status %4.4x, resetting.\n", + dev->name, inw (ioaddr + LANCE_DATA)); + outw (0x0004, ioaddr + LANCE_DATA); + dev->stats.tx_errors++; +#ifndef final_version + if (lance_debug > 3) { + int i; + printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", + lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "", + lp->cur_rx); + for (i = 0; i < RX_RING_SIZE; i++) + printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ", + lp->rx_ring[i].base, -lp->rx_ring[i].buf_length, + lp->rx_ring[i].msg_length); + for (i = 0; i < TX_RING_SIZE; i++) + printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ", + lp->tx_ring[i].base, -lp->tx_ring[i].length, + lp->tx_ring[i].misc); + printk ("\n"); + } +#endif + lance_restart (dev, 0x0043, 1); + + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue (dev); +} + + +static netdev_tx_t lance_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct lance_private *lp = dev->ml_priv; + int ioaddr = dev->base_addr; + int entry; + unsigned long flags; + + spin_lock_irqsave(&lp->devlock, flags); + + if (lance_debug > 3) { + outw(0x0000, ioaddr+LANCE_ADDR); + printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name, + inw(ioaddr+LANCE_DATA)); + outw(0x0000, ioaddr+LANCE_DATA); + } + + /* Fill in a Tx ring entry */ + + /* Mask to ring buffer boundary. */ + entry = lp->cur_tx & TX_RING_MOD_MASK; + + /* Caution: the write order is important here, set the base address + with the "ownership" bits last. */ + + /* The old LANCE chips doesn't automatically pad buffers to min. size. */ + if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) { + if (skb->len < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) + goto out; + lp->tx_ring[entry].length = -ETH_ZLEN; + } + else + lp->tx_ring[entry].length = -skb->len; + } else + lp->tx_ring[entry].length = -skb->len; + + lp->tx_ring[entry].misc = 0x0000; + + dev->stats.tx_bytes += skb->len; + + /* If any part of this buffer is >16M we must copy it to a low-memory + buffer. */ + if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) { + if (lance_debug > 5) + printk("%s: bouncing a high-memory packet (%#x).\n", + dev->name, (u32)isa_virt_to_bus(skb->data)); + skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len); + lp->tx_ring[entry].base = + ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000; + dev_kfree_skb(skb); + } else { + lp->tx_skbuff[entry] = skb; + lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000; + } + lp->cur_tx++; + + /* Trigger an immediate send poll. */ + outw(0x0000, ioaddr+LANCE_ADDR); + outw(0x0048, ioaddr+LANCE_DATA); + + if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE) + netif_stop_queue(dev); + +out: + spin_unlock_irqrestore(&lp->devlock, flags); + return NETDEV_TX_OK; +} + +/* The LANCE interrupt handler. */ +static irqreturn_t lance_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct lance_private *lp; + int csr0, ioaddr, boguscnt=10; + int must_restart; + + ioaddr = dev->base_addr; + lp = dev->ml_priv; + + spin_lock (&lp->devlock); + + outw(0x00, dev->base_addr + LANCE_ADDR); + while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600 && + --boguscnt >= 0) { + /* Acknowledge all of the current interrupt sources ASAP. */ + outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA); + + must_restart = 0; + + if (lance_debug > 5) + printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n", + dev->name, csr0, inw(dev->base_addr + LANCE_DATA)); + + if (csr0 & 0x0400) /* Rx interrupt */ + lance_rx(dev); + + if (csr0 & 0x0200) { /* Tx-done interrupt */ + int dirty_tx = lp->dirty_tx; + + while (dirty_tx < lp->cur_tx) { + int entry = dirty_tx & TX_RING_MOD_MASK; + int status = lp->tx_ring[entry].base; + + if (status < 0) + break; /* It still hasn't been Txed */ + + lp->tx_ring[entry].base = 0; + + if (status & 0x40000000) { + /* There was an major error, log it. */ + int err_status = lp->tx_ring[entry].misc; + dev->stats.tx_errors++; + if (err_status & 0x0400) + dev->stats.tx_aborted_errors++; + if (err_status & 0x0800) + dev->stats.tx_carrier_errors++; + if (err_status & 0x1000) + dev->stats.tx_window_errors++; + if (err_status & 0x4000) { + /* Ackk! On FIFO errors the Tx unit is turned off! */ + dev->stats.tx_fifo_errors++; + /* Remove this verbosity later! */ + printk("%s: Tx FIFO error! Status %4.4x.\n", + dev->name, csr0); + /* Restart the chip. */ + must_restart = 1; + } + } else { + if (status & 0x18000000) + dev->stats.collisions++; + dev->stats.tx_packets++; + } + + /* We must free the original skb if it's not a data-only copy + in the bounce buffer. */ + if (lp->tx_skbuff[entry]) { + dev_kfree_skb_irq(lp->tx_skbuff[entry]); + lp->tx_skbuff[entry] = NULL; + } + dirty_tx++; + } + +#ifndef final_version + if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { + printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n", + dirty_tx, lp->cur_tx, + netif_queue_stopped(dev) ? "yes" : "no"); + dirty_tx += TX_RING_SIZE; + } +#endif + + /* if the ring is no longer full, accept more packets */ + if (netif_queue_stopped(dev) && + dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) + netif_wake_queue (dev); + + lp->dirty_tx = dirty_tx; + } + + /* Log misc errors. */ + if (csr0 & 0x4000) + dev->stats.tx_errors++; /* Tx babble. */ + if (csr0 & 0x1000) + dev->stats.rx_errors++; /* Missed a Rx frame. */ + if (csr0 & 0x0800) { + printk("%s: Bus master arbitration failure, status %4.4x.\n", + dev->name, csr0); + /* Restart the chip. */ + must_restart = 1; + } + + if (must_restart) { + /* stop the chip to clear the error condition, then restart */ + outw(0x0000, dev->base_addr + LANCE_ADDR); + outw(0x0004, dev->base_addr + LANCE_DATA); + lance_restart(dev, 0x0002, 0); + } + } + + /* Clear any other interrupt, and set interrupt enable. */ + outw(0x0000, dev->base_addr + LANCE_ADDR); + outw(0x7940, dev->base_addr + LANCE_DATA); + + if (lance_debug > 4) + printk("%s: exiting interrupt, csr%d=%#4.4x.\n", + dev->name, inw(ioaddr + LANCE_ADDR), + inw(dev->base_addr + LANCE_DATA)); + + spin_unlock (&lp->devlock); + return IRQ_HANDLED; +} + +static int +lance_rx(struct net_device *dev) +{ + struct lance_private *lp = dev->ml_priv; + int entry = lp->cur_rx & RX_RING_MOD_MASK; + int i; + + /* If we own the next entry, it's a new packet. Send it up. */ + while (lp->rx_ring[entry].base >= 0) { + int status = lp->rx_ring[entry].base >> 24; + + if (status != 0x03) { /* There was an error. */ + /* There is a tricky error noted by John Murphy, + to Russ Nelson: Even with full-sized + buffers it's possible for a jabber packet to use two + buffers, with only the last correctly noting the error. */ + if (status & 0x01) /* Only count a general error at the */ + dev->stats.rx_errors++; /* end of a packet.*/ + if (status & 0x20) + dev->stats.rx_frame_errors++; + if (status & 0x10) + dev->stats.rx_over_errors++; + if (status & 0x08) + dev->stats.rx_crc_errors++; + if (status & 0x04) + dev->stats.rx_fifo_errors++; + lp->rx_ring[entry].base &= 0x03ffffff; + } + else + { + /* Malloc up new buffer, compatible with net3. */ + short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4; + struct sk_buff *skb; + + if(pkt_len<60) + { + printk("%s: Runt packet!\n",dev->name); + dev->stats.rx_errors++; + } + else + { + skb = dev_alloc_skb(pkt_len+2); + if (skb == NULL) + { + printk("%s: Memory squeeze, deferring packet.\n", dev->name); + for (i=0; i < RX_RING_SIZE; i++) + if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0) + break; + + if (i > RX_RING_SIZE -2) + { + dev->stats.rx_dropped++; + lp->rx_ring[entry].base |= 0x80000000; + lp->cur_rx++; + } + break; + } + skb_reserve(skb,2); /* 16 byte align */ + skb_put(skb,pkt_len); /* Make room */ + skb_copy_to_linear_data(skb, + (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)), + pkt_len); + skb->protocol=eth_type_trans(skb,dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + } + /* The docs say that the buffer length isn't touched, but Andrew Boyd + of QNX reports that some revs of the 79C965 clear it. */ + lp->rx_ring[entry].buf_length = -PKT_BUF_SZ; + lp->rx_ring[entry].base |= 0x80000000; + entry = (++lp->cur_rx) & RX_RING_MOD_MASK; + } + + /* We should check that at least two ring entries are free. If not, + we should free one and mark stats->rx_dropped++. */ + + return 0; +} + +static int +lance_close(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + struct lance_private *lp = dev->ml_priv; + + netif_stop_queue (dev); + + if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) { + outw(112, ioaddr+LANCE_ADDR); + dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA); + } + outw(0, ioaddr+LANCE_ADDR); + + if (lance_debug > 1) + printk("%s: Shutting down ethercard, status was %2.2x.\n", + dev->name, inw(ioaddr+LANCE_DATA)); + + /* We stop the LANCE here -- it occasionally polls + memory if we don't. */ + outw(0x0004, ioaddr+LANCE_DATA); + + if (dev->dma != 4) + { + unsigned long flags=claim_dma_lock(); + disable_dma(dev->dma); + release_dma_lock(flags); + } + free_irq(dev->irq, dev); + + lance_purge_ring(dev); + + return 0; +} + +static struct net_device_stats *lance_get_stats(struct net_device *dev) +{ + struct lance_private *lp = dev->ml_priv; + + if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) { + short ioaddr = dev->base_addr; + short saved_addr; + unsigned long flags; + + spin_lock_irqsave(&lp->devlock, flags); + saved_addr = inw(ioaddr+LANCE_ADDR); + outw(112, ioaddr+LANCE_ADDR); + dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA); + outw(saved_addr, ioaddr+LANCE_ADDR); + spin_unlock_irqrestore(&lp->devlock, flags); + } + + return &dev->stats; +} + +/* Set or clear the multicast filter for this adaptor. + */ + +static void set_multicast_list(struct net_device *dev) +{ + short ioaddr = dev->base_addr; + + outw(0, ioaddr+LANCE_ADDR); + outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */ + + if (dev->flags&IFF_PROMISC) { + outw(15, ioaddr+LANCE_ADDR); + outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */ + } else { + short multicast_table[4]; + int i; + int num_addrs=netdev_mc_count(dev); + if(dev->flags&IFF_ALLMULTI) + num_addrs=1; + /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */ + memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table)); + for (i = 0; i < 4; i++) { + outw(8 + i, ioaddr+LANCE_ADDR); + outw(multicast_table[i], ioaddr+LANCE_DATA); + } + outw(15, ioaddr+LANCE_ADDR); + outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */ + } + + lance_restart(dev, 0x0142, 0); /* Resume normal operation */ + +} + diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c new file mode 100644 index 000000000..0660ac584 --- /dev/null +++ b/drivers/net/ethernet/amd/mvme147.c @@ -0,0 +1,200 @@ +/* mvme147.c : the Linux/mvme147/lance ethernet driver + * + * Copyright (C) 05/1998 Peter Maydell + * Based on the Sun Lance driver and the NetBSD HP Lance driver + * Uses the generic 7990.c LANCE code. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/* Used for the temporal inet entries and routing */ +#include +#include +#include +#include +#include + +#include +#include +#include + +/* We have 32K of RAM for the init block and buffers. This places + * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx + * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes. + */ +#define LANCE_LOG_TX_BUFFERS 1 +#define LANCE_LOG_RX_BUFFERS 3 + +#include "7990.h" /* use generic LANCE code */ + +/* Our private data structure */ +struct m147lance_private { + struct lance_private lance; + unsigned long ram; +}; + +/* function prototypes... This is easy because all the grot is in the + * generic LANCE support. All we have to support is probing for boards, + * plus board-specific init, open and close actions. + * Oh, and we need to tell the generic code how to read and write LANCE registers... + */ +static int m147lance_open(struct net_device *dev); +static int m147lance_close(struct net_device *dev); +static void m147lance_writerap(struct lance_private *lp, unsigned short value); +static void m147lance_writerdp(struct lance_private *lp, unsigned short value); +static unsigned short m147lance_readrdp(struct lance_private *lp); + +typedef void (*writerap_t)(void *, unsigned short); +typedef void (*writerdp_t)(void *, unsigned short); +typedef unsigned short (*readrdp_t)(void *); + +static const struct net_device_ops lance_netdev_ops = { + .ndo_open = m147lance_open, + .ndo_stop = m147lance_close, + .ndo_start_xmit = lance_start_xmit, + .ndo_set_rx_mode = lance_set_multicast, + .ndo_tx_timeout = lance_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +}; + +/* Initialise the one and only on-board 7990 */ +struct net_device * __init mvme147lance_probe(int unit) +{ + struct net_device *dev; + static int called; + static const char name[] = "MVME147 LANCE"; + struct m147lance_private *lp; + u_long *addr; + u_long address; + int err; + + if (!MACH_IS_MVME147 || called) + return ERR_PTR(-ENODEV); + called++; + + dev = alloc_etherdev(sizeof(struct m147lance_private)); + if (!dev) + return ERR_PTR(-ENOMEM); + + if (unit >= 0) + sprintf(dev->name, "eth%d", unit); + + /* Fill the dev fields */ + dev->base_addr = (unsigned long)MVME147_LANCE_BASE; + dev->netdev_ops = &lance_netdev_ops; + dev->dma = 0; + + addr = (u_long *)ETHERNET_ADDRESS; + address = *addr; + dev->dev_addr[0] = 0x08; + dev->dev_addr[1] = 0x00; + dev->dev_addr[2] = 0x3e; + address = address >> 8; + dev->dev_addr[5] = address&0xff; + address = address >> 8; + dev->dev_addr[4] = address&0xff; + address = address >> 8; + dev->dev_addr[3] = address&0xff; + + printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n", + dev->name, dev->base_addr, MVME147_LANCE_IRQ, + dev->dev_addr); + + lp = netdev_priv(dev); + lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 32K */ + if (!lp->ram) { + printk("%s: No memory for LANCE buffers\n", dev->name); + free_netdev(dev); + return ERR_PTR(-ENOMEM); + } + + lp->lance.name = name; + lp->lance.base = dev->base_addr; + lp->lance.init_block = (struct lance_init_block *)(lp->ram); /* CPU addr */ + lp->lance.lance_init_block = (struct lance_init_block *)(lp->ram); /* LANCE addr of same RAM */ + lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */ + lp->lance.irq = MVME147_LANCE_IRQ; + lp->lance.writerap = (writerap_t)m147lance_writerap; + lp->lance.writerdp = (writerdp_t)m147lance_writerdp; + lp->lance.readrdp = (readrdp_t)m147lance_readrdp; + lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS; + lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS; + lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK; + lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK; + + err = register_netdev(dev); + if (err) { + free_pages(lp->ram, 3); + free_netdev(dev); + return ERR_PTR(err); + } + + return dev; +} + +static void m147lance_writerap(struct lance_private *lp, unsigned short value) +{ + out_be16(lp->base + LANCE_RAP, value); +} + +static void m147lance_writerdp(struct lance_private *lp, unsigned short value) +{ + out_be16(lp->base + LANCE_RDP, value); +} + +static unsigned short m147lance_readrdp(struct lance_private *lp) +{ + return in_be16(lp->base + LANCE_RDP); +} + +static int m147lance_open(struct net_device *dev) +{ + int status; + + status = lance_open(dev); /* call generic lance open code */ + if (status) + return status; + /* enable interrupts at board level. */ + m147_pcc->lan_cntrl = 0; /* clear the interrupts (if any) */ + m147_pcc->lan_cntrl = 0x08 | 0x04; /* Enable irq 4 */ + + return 0; +} + +static int m147lance_close(struct net_device *dev) +{ + /* disable interrupts at boardlevel */ + m147_pcc->lan_cntrl = 0x0; /* disable interrupts */ + lance_close(dev); + return 0; +} + +#ifdef MODULE +MODULE_LICENSE("GPL"); + +static struct net_device *dev_mvme147_lance; +int __init init_module(void) +{ + dev_mvme147_lance = mvme147lance_probe(-1); + return PTR_ERR_OR_ZERO(dev_mvme147_lance); +} + +void __exit cleanup_module(void) +{ + struct m147lance_private *lp = netdev_priv(dev_mvme147_lance); + unregister_netdev(dev_mvme147_lance); + free_pages(lp->ram, 3); + free_netdev(dev_mvme147_lance); +} + +#endif /* MODULE */ diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c new file mode 100644 index 000000000..1cf33addd --- /dev/null +++ b/drivers/net/ethernet/amd/ni65.c @@ -0,0 +1,1252 @@ +/* + * ni6510 (am7990 'lance' chip) driver for Linux-net-3 + * BETAcode v0.71 (96/09/29) for 2.0.0 (or later) + * copyrights (c) 1994,1995,1996 by M.Hipp + * + * This driver can handle the old ni6510 board and the newer ni6510 + * EtherBlaster. (probably it also works with every full NE2100 + * compatible card) + * + * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7 + * + * This is an extension to the Linux operating system, and is covered by the + * same GNU General Public License that covers the Linux-kernel. + * + * comments/bugs/suggestions can be sent to: + * Michael Hipp + * email: hippm@informatik.uni-tuebingen.de + * + * sources: + * some things are from the 'ni6510-packet-driver for dos by Russ Nelson' + * and from the original drivers by D.Becker + * + * known problems: + * - on some PCI boards (including my own) the card/board/ISA-bridge has + * problems with bus master DMA. This results in lotsa overruns. + * It may help to '#define RCV_PARANOIA_CHECK' or try to #undef + * the XMT and RCV_VIA_SKB option .. this reduces driver performance. + * Or just play with your BIOS options to optimize ISA-DMA access. + * Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE + * defines -> please report me your experience then + * - Harald reported for ASUS SP3G mainboards, that you should use + * the 'optimal settings' from the user's manual on page 3-12! + * + * credits: + * thanx to Jason Sullivan for sending me a ni6510 card! + * lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig + * + * simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB) + * average: FTP -> 8384421 bytes received in 8.5 seconds + * (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS) + * peak: FTP -> 8384421 bytes received in 7.5 seconds + * (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS) + */ + +/* + * 99.Jun.8: added support for /proc/net/dev byte count for xosview (HK) + * 96.Sept.29: virt_to_bus stuff added for new memory modell + * 96.April.29: Added Harald Koenig's Patches (MH) + * 96.April.13: enhanced error handling .. more tests (MH) + * 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH) + * 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH) + * 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH) + * hopefully no more 16MB limit + * + * 95.Nov.18: multicast tweaked (AC). + * + * 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH) + * + * 94.July.16: fixed bugs in recv_skb and skb-alloc stuff (MH) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "ni65.h" + +/* + * the current setting allows an acceptable performance + * for 'RCV_PARANOIA_CHECK' read the 'known problems' part in + * the header of this file + * 'invert' the defines for max. performance. This may cause DMA problems + * on some boards (e.g on my ASUS SP3G) + */ +#undef XMT_VIA_SKB +#undef RCV_VIA_SKB +#define RCV_PARANOIA_CHECK + +#define MID_PERFORMANCE + +#if defined( LOW_PERFORMANCE ) + static int isa0=7,isa1=7,csr80=0x0c10; +#elif defined( MID_PERFORMANCE ) + static int isa0=5,isa1=5,csr80=0x2810; +#else /* high performance */ + static int isa0=4,isa1=4,csr80=0x0017; +#endif + +/* + * a few card/vendor specific defines + */ +#define NI65_ID0 0x00 +#define NI65_ID1 0x55 +#define NI65_EB_ID0 0x52 +#define NI65_EB_ID1 0x44 +#define NE2100_ID0 0x57 +#define NE2100_ID1 0x57 + +#define PORT p->cmdr_addr + +/* + * buffer configuration + */ +#if 1 +#define RMDNUM 16 +#define RMDNUMMASK 0x80000000 +#else +#define RMDNUM 8 +#define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */ +#endif + +#if 0 +#define TMDNUM 1 +#define TMDNUMMASK 0x00000000 +#else +#define TMDNUM 4 +#define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */ +#endif + +/* slightly oversized */ +#define R_BUF_SIZE 1544 +#define T_BUF_SIZE 1544 + +/* + * lance register defines + */ +#define L_DATAREG 0x00 +#define L_ADDRREG 0x02 +#define L_RESET 0x04 +#define L_CONFIG 0x05 +#define L_BUSIF 0x06 + +/* + * to access the lance/am7990-regs, you have to write + * reg-number into L_ADDRREG, then you can access it using L_DATAREG + */ +#define CSR0 0x00 +#define CSR1 0x01 +#define CSR2 0x02 +#define CSR3 0x03 + +#define INIT_RING_BEFORE_START 0x1 +#define FULL_RESET_ON_ERROR 0x2 + +#if 0 +#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \ + outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);} +#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\ + inw(PORT+L_DATAREG)) +#if 0 +#define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);} +#else +#define writedatareg(val) { writereg(val,CSR0); } +#endif +#else +#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);} +#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG)) +#define writedatareg(val) { writereg(val,CSR0); } +#endif + +static unsigned char ni_vendor[] = { 0x02,0x07,0x01 }; + +static struct card { + unsigned char id0,id1; + short id_offset; + short total_size; + short cmd_offset; + short addr_offset; + unsigned char *vendor_id; + char *cardname; + unsigned long config; +} cards[] = { + { + .id0 = NI65_ID0, + .id1 = NI65_ID1, + .id_offset = 0x0e, + .total_size = 0x10, + .cmd_offset = 0x0, + .addr_offset = 0x8, + .vendor_id = ni_vendor, + .cardname = "ni6510", + .config = 0x1, + }, + { + .id0 = NI65_EB_ID0, + .id1 = NI65_EB_ID1, + .id_offset = 0x0e, + .total_size = 0x18, + .cmd_offset = 0x10, + .addr_offset = 0x0, + .vendor_id = ni_vendor, + .cardname = "ni6510 EtherBlaster", + .config = 0x2, + }, + { + .id0 = NE2100_ID0, + .id1 = NE2100_ID1, + .id_offset = 0x0e, + .total_size = 0x18, + .cmd_offset = 0x10, + .addr_offset = 0x0, + .vendor_id = NULL, + .cardname = "generic NE2100", + .config = 0x0, + }, +}; +#define NUM_CARDS 3 + +struct priv +{ + struct rmd rmdhead[RMDNUM]; + struct tmd tmdhead[TMDNUM]; + struct init_block ib; + int rmdnum; + int tmdnum,tmdlast; +#ifdef RCV_VIA_SKB + struct sk_buff *recv_skb[RMDNUM]; +#else + void *recvbounce[RMDNUM]; +#endif +#ifdef XMT_VIA_SKB + struct sk_buff *tmd_skb[TMDNUM]; +#endif + void *tmdbounce[TMDNUM]; + int tmdbouncenum; + int lock,xmit_queued; + + void *self; + int cmdr_addr; + int cardno; + int features; + spinlock_t ring_lock; +}; + +static int ni65_probe1(struct net_device *dev,int); +static irqreturn_t ni65_interrupt(int irq, void * dev_id); +static void ni65_recv_intr(struct net_device *dev,int); +static void ni65_xmit_intr(struct net_device *dev,int); +static int ni65_open(struct net_device *dev); +static int ni65_lance_reinit(struct net_device *dev); +static void ni65_init_lance(struct priv *p,unsigned char*,int,int); +static netdev_tx_t ni65_send_packet(struct sk_buff *skb, + struct net_device *dev); +static void ni65_timeout(struct net_device *dev); +static int ni65_close(struct net_device *dev); +static int ni65_alloc_buffer(struct net_device *dev); +static void ni65_free_buffer(struct priv *p); +static void set_multicast_list(struct net_device *dev); + +static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */ +static int dmatab[] __initdata = { 0,3,5,6,7 }; /* dma config-translate and autodetect */ + +static int debuglevel = 1; + +/* + * set 'performance' registers .. we must STOP lance for that + */ +static void ni65_set_performance(struct priv *p) +{ + writereg(CSR0_STOP | CSR0_CLRALL,CSR0); /* STOP */ + + if( !(cards[p->cardno].config & 0x02) ) + return; + + outw(80,PORT+L_ADDRREG); + if(inw(PORT+L_ADDRREG) != 80) + return; + + writereg( (csr80 & 0x3fff) ,80); /* FIFO watermarks */ + outw(0,PORT+L_ADDRREG); + outw((short)isa0,PORT+L_BUSIF); /* write ISA 0: DMA_R : isa0 * 50ns */ + outw(1,PORT+L_ADDRREG); + outw((short)isa1,PORT+L_BUSIF); /* write ISA 1: DMA_W : isa1 * 50ns */ + + outw(CSR0,PORT+L_ADDRREG); /* switch back to CSR0 */ +} + +/* + * open interface (up) + */ +static int ni65_open(struct net_device *dev) +{ + struct priv *p = dev->ml_priv; + int irqval = request_irq(dev->irq, ni65_interrupt,0, + cards[p->cardno].cardname,dev); + if (irqval) { + printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n", + dev->name,dev->irq, irqval); + return -EAGAIN; + } + + if(ni65_lance_reinit(dev)) + { + netif_start_queue(dev); + return 0; + } + else + { + free_irq(dev->irq,dev); + return -EAGAIN; + } +} + +/* + * close interface (down) + */ +static int ni65_close(struct net_device *dev) +{ + struct priv *p = dev->ml_priv; + + netif_stop_queue(dev); + + outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */ + +#ifdef XMT_VIA_SKB + { + int i; + for(i=0;itmd_skb[i]) { + dev_kfree_skb(p->tmd_skb[i]); + p->tmd_skb[i] = NULL; + } + } + } +#endif + free_irq(dev->irq,dev); + return 0; +} + +static void cleanup_card(struct net_device *dev) +{ + struct priv *p = dev->ml_priv; + disable_dma(dev->dma); + free_dma(dev->dma); + release_region(dev->base_addr, cards[p->cardno].total_size); + ni65_free_buffer(p); +} + +/* set: io,irq,dma or set it when calling insmod */ +static int irq; +static int io; +static int dma; + +/* + * Probe The Card (not the lance-chip) + */ +struct net_device * __init ni65_probe(int unit) +{ + struct net_device *dev = alloc_etherdev(0); + static const int ports[] = { 0x360, 0x300, 0x320, 0x340, 0 }; + const int *port; + int err = 0; + + if (!dev) + return ERR_PTR(-ENOMEM); + + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + irq = dev->irq; + dma = dev->dma; + } else { + dev->base_addr = io; + } + + if (dev->base_addr > 0x1ff) { /* Check a single specified location. */ + err = ni65_probe1(dev, dev->base_addr); + } else if (dev->base_addr > 0) { /* Don't probe at all. */ + err = -ENXIO; + } else { + for (port = ports; *port && ni65_probe1(dev, *port); port++) + ; + if (!*port) + err = -ENODEV; + } + if (err) + goto out; + + err = register_netdev(dev); + if (err) + goto out1; + return dev; +out1: + cleanup_card(dev); +out: + free_netdev(dev); + return ERR_PTR(err); +} + +static const struct net_device_ops ni65_netdev_ops = { + .ndo_open = ni65_open, + .ndo_stop = ni65_close, + .ndo_start_xmit = ni65_send_packet, + .ndo_tx_timeout = ni65_timeout, + .ndo_set_rx_mode = set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +/* + * this is the real card probe .. + */ +static int __init ni65_probe1(struct net_device *dev,int ioaddr) +{ + int i,j; + struct priv *p; + unsigned long flags; + + dev->irq = irq; + dev->dma = dma; + + for(i=0;i= 0) { + if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 || + inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) { + release_region(ioaddr, cards[i].total_size); + continue; + } + } + if(cards[i].vendor_id) { + for(j=0;j<3;j++) + if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) { + release_region(ioaddr, cards[i].total_size); + continue; + } + } + break; + } + if(i == NUM_CARDS) + return -ENODEV; + + for(j=0;j<6;j++) + dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j); + + if( (j=ni65_alloc_buffer(dev)) < 0) { + release_region(ioaddr, cards[i].total_size); + return j; + } + p = dev->ml_priv; + p->cmdr_addr = ioaddr + cards[i].cmd_offset; + p->cardno = i; + spin_lock_init(&p->ring_lock); + + printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr); + + outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */ + if( (j=readreg(CSR0)) != 0x4) { + printk("failed.\n"); + printk(KERN_ERR "%s: Can't RESET card: %04x\n", dev->name, j); + ni65_free_buffer(p); + release_region(ioaddr, cards[p->cardno].total_size); + return -EAGAIN; + } + + outw(88,PORT+L_ADDRREG); + if(inw(PORT+L_ADDRREG) == 88) { + unsigned long v; + v = inw(PORT+L_DATAREG); + v <<= 16; + outw(89,PORT+L_ADDRREG); + v |= inw(PORT+L_DATAREG); + printk("Version %#08lx, ",v); + p->features = INIT_RING_BEFORE_START; + } + else { + printk("ancient LANCE, "); + p->features = 0x0; + } + + if(test_bit(0,&cards[i].config)) { + dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3]; + dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3]; + printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma); + } + else { + if(dev->dma == 0) { + /* 'stuck test' from lance.c */ + unsigned long dma_channels = + ((inb(DMA1_STAT_REG) >> 4) & 0x0f) + | (inb(DMA2_STAT_REG) & 0xf0); + for(i=1;i<5;i++) { + int dma = dmatab[i]; + if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510")) + continue; + + flags=claim_dma_lock(); + disable_dma(dma); + set_dma_mode(dma,DMA_MODE_CASCADE); + enable_dma(dma); + release_dma_lock(flags); + + ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */ + + flags=claim_dma_lock(); + disable_dma(dma); + free_dma(dma); + release_dma_lock(flags); + + if(readreg(CSR0) & CSR0_IDON) + break; + } + if(i == 5) { + printk("failed.\n"); + printk(KERN_ERR "%s: Can't detect DMA channel!\n", dev->name); + ni65_free_buffer(p); + release_region(ioaddr, cards[p->cardno].total_size); + return -EAGAIN; + } + dev->dma = dmatab[i]; + printk("DMA %d (autodetected), ",dev->dma); + } + else + printk("DMA %d (assigned), ",dev->dma); + + if(dev->irq < 2) + { + unsigned long irq_mask; + + ni65_init_lance(p,dev->dev_addr,0,0); + irq_mask = probe_irq_on(); + writereg(CSR0_INIT|CSR0_INEA,CSR0); /* trigger interrupt */ + msleep(20); + dev->irq = probe_irq_off(irq_mask); + if(!dev->irq) + { + printk("Failed to detect IRQ line!\n"); + ni65_free_buffer(p); + release_region(ioaddr, cards[p->cardno].total_size); + return -EAGAIN; + } + printk("IRQ %d (autodetected).\n",dev->irq); + } + else + printk("IRQ %d (assigned).\n",dev->irq); + } + + if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0) + { + printk(KERN_ERR "%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma); + ni65_free_buffer(p); + release_region(ioaddr, cards[p->cardno].total_size); + return -EAGAIN; + } + + dev->base_addr = ioaddr; + dev->netdev_ops = &ni65_netdev_ops; + dev->watchdog_timeo = HZ/2; + + return 0; /* everything is OK */ +} + +/* + * set lance register and trigger init + */ +static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode) +{ + int i; + u32 pib; + + writereg(CSR0_CLRALL|CSR0_STOP,CSR0); + + for(i=0;i<6;i++) + p->ib.eaddr[i] = daddr[i]; + + for(i=0;i<8;i++) + p->ib.filter[i] = filter; + p->ib.mode = mode; + + p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK; + p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK; + writereg(0,CSR3); /* busmaster/no word-swap */ + pib = (u32) isa_virt_to_bus(&p->ib); + writereg(pib & 0xffff,CSR1); + writereg(pib >> 16,CSR2); + + writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */ + + for(i=0;i<32;i++) + { + mdelay(4); + if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) ) + break; /* init ok ? */ + } +} + +/* + * allocate memory area and check the 16MB border + */ +static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type) +{ + struct sk_buff *skb=NULL; + unsigned char *ptr; + void *ret; + + if(type) { + ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA); + if(!skb) { + printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what); + return NULL; + } + skb_reserve(skb,2+16); + skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */ + ptr = skb->data; + } + else { + ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA); + if(!ret) + return NULL; + } + if( (u32) virt_to_phys(ptr+size) > 0x1000000) { + printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what); + if(type) + kfree_skb(skb); + else + kfree(ptr); + return NULL; + } + return ret; +} + +/* + * allocate all memory structures .. send/recv buffers etc ... + */ +static int ni65_alloc_buffer(struct net_device *dev) +{ + unsigned char *ptr; + struct priv *p; + int i; + + /* + * we need 8-aligned memory .. + */ + ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0); + if(!ptr) + return -ENOMEM; + + p = dev->ml_priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7); + memset((char *)p, 0, sizeof(struct priv)); + p->self = ptr; + + for(i=0;itmd_skb[i] = NULL; +#endif + p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0); + if(!p->tmdbounce[i]) { + ni65_free_buffer(p); + return -ENOMEM; + } + } + + for(i=0;irecv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1); + if(!p->recv_skb[i]) { + ni65_free_buffer(p); + return -ENOMEM; + } +#else + p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0); + if(!p->recvbounce[i]) { + ni65_free_buffer(p); + return -ENOMEM; + } +#endif + } + + return 0; /* everything is OK */ +} + +/* + * free buffers and private struct + */ +static void ni65_free_buffer(struct priv *p) +{ + int i; + + if(!p) + return; + + for(i=0;itmdbounce[i]); +#ifdef XMT_VIA_SKB + if(p->tmd_skb[i]) + dev_kfree_skb(p->tmd_skb[i]); +#endif + } + + for(i=0;irecv_skb[i]) + dev_kfree_skb(p->recv_skb[i]); +#else + kfree(p->recvbounce[i]); +#endif + } + kfree(p->self); +} + + +/* + * stop and (re)start lance .. e.g after an error + */ +static void ni65_stop_start(struct net_device *dev,struct priv *p) +{ + int csr0 = CSR0_INEA; + + writedatareg(CSR0_STOP); + + if(debuglevel > 1) + printk(KERN_DEBUG "ni65_stop_start\n"); + + if(p->features & INIT_RING_BEFORE_START) { + int i; +#ifdef XMT_VIA_SKB + struct sk_buff *skb_save[TMDNUM]; +#endif + unsigned long buffer[TMDNUM]; + short blen[TMDNUM]; + + if(p->xmit_queued) { + while(1) { + if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN)) + break; + p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1); + if(p->tmdlast == p->tmdnum) + break; + } + } + + for(i=0;itmdhead + i; +#ifdef XMT_VIA_SKB + skb_save[i] = p->tmd_skb[i]; +#endif + buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer); + blen[i] = tmdp->blen; + tmdp->u.s.status = 0x0; + } + + for(i=0;irmdhead + i; + rmdp->u.s.status = RCV_OWN; + } + p->tmdnum = p->xmit_queued = 0; + writedatareg(CSR0_STRT | csr0); + + for(i=0;itmdlast) & (TMDNUM-1); + p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]); /* status is part of buffer field */ + p->tmdhead[i].blen = blen[num]; + if(p->tmdhead[i].u.s.status & XMIT_OWN) { + p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1); + p->xmit_queued = 1; + writedatareg(CSR0_TDMD | CSR0_INEA | csr0); + } +#ifdef XMT_VIA_SKB + p->tmd_skb[i] = skb_save[num]; +#endif + } + p->rmdnum = p->tmdlast = 0; + if(!p->lock) + if (p->tmdnum || !p->xmit_queued) + netif_wake_queue(dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + } + else + writedatareg(CSR0_STRT | csr0); +} + +/* + * init lance (write init-values .. init-buffers) (open-helper) + */ +static int ni65_lance_reinit(struct net_device *dev) +{ + int i; + struct priv *p = dev->ml_priv; + unsigned long flags; + + p->lock = 0; + p->xmit_queued = 0; + + flags=claim_dma_lock(); + disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */ + set_dma_mode(dev->dma,DMA_MODE_CASCADE); + enable_dma(dev->dma); + release_dma_lock(flags); + + outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */ + if( (i=readreg(CSR0) ) != 0x4) + { + printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name, + cards[p->cardno].cardname,(int) i); + flags=claim_dma_lock(); + disable_dma(dev->dma); + release_dma_lock(flags); + return 0; + } + + p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0; + for(i=0;itmdhead + i; +#ifdef XMT_VIA_SKB + if(p->tmd_skb[i]) { + dev_kfree_skb(p->tmd_skb[i]); + p->tmd_skb[i] = NULL; + } +#endif + tmdp->u.buffer = 0x0; + tmdp->u.s.status = XMIT_START | XMIT_END; + tmdp->blen = tmdp->status2 = 0; + } + + for(i=0;irmdhead + i; +#ifdef RCV_VIA_SKB + rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data); +#else + rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]); +#endif + rmdp->blen = -(R_BUF_SIZE-8); + rmdp->mlen = 0; + rmdp->u.s.status = RCV_OWN; + } + + if(dev->flags & IFF_PROMISC) + ni65_init_lance(p,dev->dev_addr,0x00,M_PROM); + else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI) + ni65_init_lance(p,dev->dev_addr,0xff,0x0); + else + ni65_init_lance(p,dev->dev_addr,0x00,0x00); + + /* + * ni65_set_lance_mem() sets L_ADDRREG to CSR0 + * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED + */ + + if(inw(PORT+L_DATAREG) & CSR0_IDON) { + ni65_set_performance(p); + /* init OK: start lance , enable interrupts */ + writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT); + return 1; /* ->OK */ + } + printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG)); + flags=claim_dma_lock(); + disable_dma(dev->dma); + release_dma_lock(flags); + return 0; /* ->Error */ +} + +/* + * interrupt handler + */ +static irqreturn_t ni65_interrupt(int irq, void * dev_id) +{ + int csr0 = 0; + struct net_device *dev = dev_id; + struct priv *p; + int bcnt = 32; + + p = dev->ml_priv; + + spin_lock(&p->ring_lock); + + while(--bcnt) { + csr0 = inw(PORT+L_DATAREG); + +#if 0 + writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */ +#else + writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */ +#endif + + if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT))) + break; + + if(csr0 & CSR0_RINT) /* RECV-int? */ + ni65_recv_intr(dev,csr0); + if(csr0 & CSR0_TINT) /* XMIT-int? */ + ni65_xmit_intr(dev,csr0); + + if(csr0 & CSR0_ERR) + { + if(debuglevel > 1) + printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0); + if(csr0 & CSR0_BABL) + dev->stats.tx_errors++; + if(csr0 & CSR0_MISS) { + int i; + for(i=0;irmdhead[i].u.s.status); + printk("\n"); + dev->stats.rx_errors++; + } + if(csr0 & CSR0_MERR) { + if(debuglevel > 1) + printk(KERN_ERR "%s: Ooops .. memory error: %04x.\n",dev->name,csr0); + ni65_stop_start(dev,p); + } + } + } + +#ifdef RCV_PARANOIA_CHECK +{ + int j; + for(j=0;j0;i--) { + num2 = (p->rmdnum + i) & (RMDNUM-1); + if(!(p->rmdhead[num2].u.s.status & RCV_OWN)) + break; + } + + if(i) { + int k, num1; + for(k=0;krmdnum + k) & (RMDNUM-1); + if(!(p->rmdhead[num1].u.s.status & RCV_OWN)) + break; + } + if(!k) + break; + + if(debuglevel > 0) + { + char buf[256],*buf1; + buf1 = buf; + for(k=0;krmdhead[k].u.s.status)); /* & RCV_OWN) ); */ + buf1 += 3; + } + *buf1 = 0; + printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf); + } + + p->rmdnum = num1; + ni65_recv_intr(dev,csr0); + if((p->rmdhead[num2].u.s.status & RCV_OWN)) + break; /* ok, we are 'in sync' again */ + } + else + break; + } +} +#endif + + if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) { + printk(KERN_DEBUG "%s: RX or TX was offline -> restart\n",dev->name); + ni65_stop_start(dev,p); + } + else + writedatareg(CSR0_INEA); + + spin_unlock(&p->ring_lock); + return IRQ_HANDLED; +} + +/* + * We have received an Xmit-Interrupt .. + * send a new packet if necessary + */ +static void ni65_xmit_intr(struct net_device *dev,int csr0) +{ + struct priv *p = dev->ml_priv; + + while(p->xmit_queued) + { + struct tmd *tmdp = p->tmdhead + p->tmdlast; + int tmdstat = tmdp->u.s.status; + + if(tmdstat & XMIT_OWN) + break; + + if(tmdstat & XMIT_ERR) + { +#if 0 + if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3) + printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name); +#endif + /* checking some errors */ + if(tmdp->status2 & XMIT_RTRY) + dev->stats.tx_aborted_errors++; + if(tmdp->status2 & XMIT_LCAR) + dev->stats.tx_carrier_errors++; + if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) { + /* this stops the xmitter */ + dev->stats.tx_fifo_errors++; + if(debuglevel > 0) + printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name); + if(p->features & INIT_RING_BEFORE_START) { + tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; /* test: resend this frame */ + ni65_stop_start(dev,p); + break; /* no more Xmit processing .. */ + } + else + ni65_stop_start(dev,p); + } + if(debuglevel > 2) + printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2); + if(!(csr0 & CSR0_BABL)) /* don't count errors twice */ + dev->stats.tx_errors++; + tmdp->status2 = 0; + } + else { + dev->stats.tx_bytes -= (short)(tmdp->blen); + dev->stats.tx_packets++; + } + +#ifdef XMT_VIA_SKB + if(p->tmd_skb[p->tmdlast]) { + dev_kfree_skb_irq(p->tmd_skb[p->tmdlast]); + p->tmd_skb[p->tmdlast] = NULL; + } +#endif + + p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1); + if(p->tmdlast == p->tmdnum) + p->xmit_queued = 0; + } + netif_wake_queue(dev); +} + +/* + * We have received a packet + */ +static void ni65_recv_intr(struct net_device *dev,int csr0) +{ + struct rmd *rmdp; + int rmdstat,len; + int cnt=0; + struct priv *p = dev->ml_priv; + + rmdp = p->rmdhead + p->rmdnum; + while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN)) + { + cnt++; + if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */ + { + if(!(rmdstat & RCV_ERR)) { + if(rmdstat & RCV_START) + { + dev->stats.rx_length_errors++; + printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff); + } + } + else { + if(debuglevel > 2) + printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n", + dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) ); + if(rmdstat & RCV_FRAM) + dev->stats.rx_frame_errors++; + if(rmdstat & RCV_OFLO) + dev->stats.rx_over_errors++; + if(rmdstat & RCV_CRC) + dev->stats.rx_crc_errors++; + if(rmdstat & RCV_BUF_ERR) + dev->stats.rx_fifo_errors++; + } + if(!(csr0 & CSR0_MISS)) /* don't count errors twice */ + dev->stats.rx_errors++; + } + else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60) + { +#ifdef RCV_VIA_SKB + struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC); + if (skb) + skb_reserve(skb,16); +#else + struct sk_buff *skb = netdev_alloc_skb(dev, len + 2); +#endif + if(skb) + { + skb_reserve(skb,2); +#ifdef RCV_VIA_SKB + if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) { + skb_put(skb,len); + skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len); + } + else { + struct sk_buff *skb1 = p->recv_skb[p->rmdnum]; + skb_put(skb,R_BUF_SIZE); + p->recv_skb[p->rmdnum] = skb; + rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data); + skb = skb1; + skb_trim(skb,len); + } +#else + skb_put(skb,len); + skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len); +#endif + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + skb->protocol=eth_type_trans(skb,dev); + netif_rx(skb); + } + else + { + printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name); + dev->stats.rx_dropped++; + } + } + else { + printk(KERN_INFO "%s: received runt packet\n",dev->name); + dev->stats.rx_errors++; + } + rmdp->blen = -(R_BUF_SIZE-8); + rmdp->mlen = 0; + rmdp->u.s.status = RCV_OWN; /* change owner */ + p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1); + rmdp = p->rmdhead + p->rmdnum; + } +} + +/* + * kick xmitter .. + */ + +static void ni65_timeout(struct net_device *dev) +{ + int i; + struct priv *p = dev->ml_priv; + + printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name); + for(i=0;itmdhead[i].u.s.status); + printk("\n"); + ni65_lance_reinit(dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); +} + +/* + * Send a packet + */ + +static netdev_tx_t ni65_send_packet(struct sk_buff *skb, + struct net_device *dev) +{ + struct priv *p = dev->ml_priv; + + netif_stop_queue(dev); + + if (test_and_set_bit(0, (void*)&p->lock)) { + printk(KERN_ERR "%s: Queue was locked.\n", dev->name); + return NETDEV_TX_BUSY; + } + + { + short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; + struct tmd *tmdp; + unsigned long flags; + +#ifdef XMT_VIA_SKB + if( (unsigned long) (skb->data + skb->len) > 0x1000000) { +#endif + + skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum], + skb->len > T_BUF_SIZE ? T_BUF_SIZE : + skb->len); + if (len > skb->len) + memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len); + dev_kfree_skb (skb); + + spin_lock_irqsave(&p->ring_lock, flags); + tmdp = p->tmdhead + p->tmdnum; + tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]); + p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1); + +#ifdef XMT_VIA_SKB + } + else { + spin_lock_irqsave(&p->ring_lock, flags); + + tmdp = p->tmdhead + p->tmdnum; + tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data); + p->tmd_skb[p->tmdnum] = skb; + } +#endif + tmdp->blen = -len; + + tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; + writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */ + + p->xmit_queued = 1; + p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1); + + if(p->tmdnum != p->tmdlast) + netif_wake_queue(dev); + + p->lock = 0; + + spin_unlock_irqrestore(&p->ring_lock, flags); + } + + return NETDEV_TX_OK; +} + +static void set_multicast_list(struct net_device *dev) +{ + if(!ni65_lance_reinit(dev)) + printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name); + netif_wake_queue(dev); +} + +#ifdef MODULE +static struct net_device *dev_ni65; + +module_param(irq, int, 0); +module_param(io, int, 0); +module_param(dma, int, 0); +MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)"); +MODULE_PARM_DESC(io, "ni6510 I/O base address"); +MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)"); + +int __init init_module(void) +{ + dev_ni65 = ni65_probe(-1); + return PTR_ERR_OR_ZERO(dev_ni65); +} + +void __exit cleanup_module(void) +{ + unregister_netdev(dev_ni65); + cleanup_card(dev_ni65); + free_netdev(dev_ni65); +} +#endif /* MODULE */ + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/ni65.h b/drivers/net/ethernet/amd/ni65.h new file mode 100644 index 000000000..e6217e35e --- /dev/null +++ b/drivers/net/ethernet/amd/ni65.h @@ -0,0 +1,121 @@ +/* am7990 (lance) definitions + * + * This is an extension to the Linux operating system, and is covered by + * same GNU General Public License that covers that work. + * + * Michael Hipp + * email: mhipp@student.uni-tuebingen.de + * + * sources: (mail me or ask archie if you need them) + * crynwr-packet-driver + */ + +/* + * Control and Status Register 0 (CSR0) bit definitions + * (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write) + * + */ + +#define CSR0_ERR 0x8000 /* Error summary (R) */ +#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */ +#define CSR0_CERR 0x2000 /* Collision Error (RC) */ +#define CSR0_MISS 0x1000 /* Missed packet (RC) */ +#define CSR0_MERR 0x0800 /* Memory Error (RC) */ +#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */ +#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */ +#define CSR0_IDON 0x0100 /* Initialization Done (RC) */ +#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */ +#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */ +#define CSR0_RXON 0x0020 /* Receiver on (R) */ +#define CSR0_TXON 0x0010 /* Transmitter on (R) */ +#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */ +#define CSR0_STOP 0x0004 /* Stop (RS) */ +#define CSR0_STRT 0x0002 /* Start (RS) */ +#define CSR0_INIT 0x0001 /* Initialize (RS) */ + +#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */ +/* + * Initialization Block Mode operation Bit Definitions. + */ + +#define M_PROM 0x8000 /* Promiscuous Mode */ +#define M_INTL 0x0040 /* Internal Loopback */ +#define M_DRTY 0x0020 /* Disable Retry */ +#define M_COLL 0x0010 /* Force Collision */ +#define M_DTCR 0x0008 /* Disable Transmit CRC) */ +#define M_LOOP 0x0004 /* Loopback */ +#define M_DTX 0x0002 /* Disable the Transmitter */ +#define M_DRX 0x0001 /* Disable the Receiver */ + + +/* + * Receive message descriptor bit definitions. + */ + +#define RCV_OWN 0x80 /* owner bit 0 = host, 1 = lance */ +#define RCV_ERR 0x40 /* Error Summary */ +#define RCV_FRAM 0x20 /* Framing Error */ +#define RCV_OFLO 0x10 /* Overflow Error */ +#define RCV_CRC 0x08 /* CRC Error */ +#define RCV_BUF_ERR 0x04 /* Buffer Error */ +#define RCV_START 0x02 /* Start of Packet */ +#define RCV_END 0x01 /* End of Packet */ + + +/* + * Transmit message descriptor bit definitions. + */ + +#define XMIT_OWN 0x80 /* owner bit 0 = host, 1 = lance */ +#define XMIT_ERR 0x40 /* Error Summary */ +#define XMIT_RETRY 0x10 /* more the 1 retry needed to Xmit */ +#define XMIT_1_RETRY 0x08 /* one retry needed to Xmit */ +#define XMIT_DEF 0x04 /* Deferred */ +#define XMIT_START 0x02 /* Start of Packet */ +#define XMIT_END 0x01 /* End of Packet */ + +/* + * transmit status (2) (valid if XMIT_ERR == 1) + */ + +#define XMIT_TDRMASK 0x03ff /* time-domain-reflectometer-value */ +#define XMIT_RTRY 0x0400 /* Failed after 16 retransmissions */ +#define XMIT_LCAR 0x0800 /* Loss of Carrier */ +#define XMIT_LCOL 0x1000 /* Late collision */ +#define XMIT_RESERV 0x2000 /* Reserved */ +#define XMIT_UFLO 0x4000 /* Underflow (late memory) */ +#define XMIT_BUFF 0x8000 /* Buffering error (no ENP) */ + +struct init_block { + unsigned short mode; + unsigned char eaddr[6]; + unsigned char filter[8]; + /* bit 29-31: number of rmd's (power of 2) */ + u32 rrp; /* receive ring pointer (align 8) */ + /* bit 29-31: number of tmd's (power of 2) */ + u32 trp; /* transmit ring pointer (align 8) */ +}; + +struct rmd { /* Receive Message Descriptor */ + union { + volatile u32 buffer; + struct { + volatile unsigned char dummy[3]; + volatile unsigned char status; + } s; + } u; + volatile short blen; + volatile unsigned short mlen; +}; + +struct tmd { + union { + volatile u32 buffer; + struct { + volatile unsigned char dummy[3]; + volatile unsigned char status; + } s; + } u; + volatile unsigned short blen; + volatile unsigned short status2; +}; diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c new file mode 100644 index 000000000..27245efe9 --- /dev/null +++ b/drivers/net/ethernet/amd/nmclan_cs.c @@ -0,0 +1,1512 @@ +/* ---------------------------------------------------------------------------- +Linux PCMCIA ethernet adapter driver for the New Media Ethernet LAN. + nmclan_cs.c,v 0.16 1995/07/01 06:42:17 rpao Exp rpao + + The Ethernet LAN uses the Advanced Micro Devices (AMD) Am79C940 Media + Access Controller for Ethernet (MACE). It is essentially the Am2150 + PCMCIA Ethernet card contained in the Am2150 Demo Kit. + +Written by Roger C. Pao + Copyright 1995 Roger C. Pao + Linux 2.5 cleanups Copyright Red Hat 2003 + + This software may be used and distributed according to the terms of + the GNU General Public License. + +Ported to Linux 1.3.* network driver environment by + Matti Aarnio + +References + + Am2150 Technical Reference Manual, Revision 1.0, August 17, 1993 + Am79C940 (MACE) Data Sheet, 1994 + Am79C90 (C-LANCE) Data Sheet, 1994 + Linux PCMCIA Programmer's Guide v1.17 + /usr/src/linux/net/inet/dev.c, Linux kernel 1.2.8 + + Eric Mears, New Media Corporation + Tom Pollard, New Media Corporation + Dean Siasoyco, New Media Corporation + Ken Lesniak, Silicon Graphics, Inc. + Donald Becker + David Hinds + + The Linux client driver is based on the 3c589_cs.c client driver by + David Hinds. + + The Linux network driver outline is based on the 3c589_cs.c driver, + the 8390.c driver, and the example skeleton.c kernel code, which are + by Donald Becker. + + The Am2150 network driver hardware interface code is based on the + OS/9000 driver for the New Media Ethernet LAN by Eric Mears. + + Special thanks for testing and help in debugging this driver goes + to Ken Lesniak. + +------------------------------------------------------------------------------- +Driver Notes and Issues +------------------------------------------------------------------------------- + +1. Developed on a Dell 320SLi + PCMCIA Card Services 2.6.2 + Linux dell 1.2.10 #1 Thu Jun 29 20:23:41 PDT 1995 i386 + +2. rc.pcmcia may require loading pcmcia_core with io_speed=300: + 'insmod pcmcia_core.o io_speed=300'. + This will avoid problems with fast systems which causes rx_framecnt + to return random values. + +3. If hot extraction does not work for you, use 'ifconfig eth0 down' + before extraction. + +4. There is a bad slow-down problem in this driver. + +5. Future: Multicast processing. In the meantime, do _not_ compile your + kernel with multicast ip enabled. + +------------------------------------------------------------------------------- +History +------------------------------------------------------------------------------- +Log: nmclan_cs.c,v + * 2.5.75-ac1 2003/07/11 Alan Cox + * Fixed hang on card eject as we probe it + * Cleaned up to use new style locking. + * + * Revision 0.16 1995/07/01 06:42:17 rpao + * Bug fix: nmclan_reset() called CardServices incorrectly. + * + * Revision 0.15 1995/05/24 08:09:47 rpao + * Re-implement MULTI_TX dev->tbusy handling. + * + * Revision 0.14 1995/05/23 03:19:30 rpao + * Added, in nmclan_config(), "tuple.Attributes = 0;". + * Modified MACE ID check to ignore chip revision level. + * Avoid tx_free_frames race condition between _start_xmit and _interrupt. + * + * Revision 0.13 1995/05/18 05:56:34 rpao + * Statistics changes. + * Bug fix: nmclan_reset did not enable TX and RX: call restore_multicast_list. + * Bug fix: mace_interrupt checks ~MACE_IMR_DEFAULT. Fixes driver lockup. + * + * Revision 0.12 1995/05/14 00:12:23 rpao + * Statistics overhaul. + * + +95/05/13 rpao V0.10a + Bug fix: MACE statistics counters used wrong I/O ports. + Bug fix: mace_interrupt() needed to allow statistics to be + processed without RX or TX interrupts pending. +95/05/11 rpao V0.10 + Multiple transmit request processing. + Modified statistics to use MACE counters where possible. +95/05/10 rpao V0.09 Bug fix: Must use IO_DATA_PATH_WIDTH_AUTO. + *Released +95/05/10 rpao V0.08 + Bug fix: Make all non-exported functions private by using + static keyword. + Bug fix: Test IntrCnt _before_ reading MACE_IR. +95/05/10 rpao V0.07 Statistics. +95/05/09 rpao V0.06 Fix rx_framecnt problem by addition of PCIC wait states. + +---------------------------------------------------------------------------- */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DRV_NAME "nmclan_cs" +#define DRV_VERSION "0.16" + + +/* ---------------------------------------------------------------------------- +Conditional Compilation Options +---------------------------------------------------------------------------- */ + +#define MULTI_TX 0 +#define RESET_ON_TIMEOUT 1 +#define TX_INTERRUPTABLE 1 +#define RESET_XILINX 0 + +/* ---------------------------------------------------------------------------- +Include Files +---------------------------------------------------------------------------- */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +/* ---------------------------------------------------------------------------- +Defines +---------------------------------------------------------------------------- */ + +#define MACE_LADRF_LEN 8 + /* 8 bytes in Logical Address Filter */ + +/* Loop Control Defines */ +#define MACE_MAX_IR_ITERATIONS 10 +#define MACE_MAX_RX_ITERATIONS 12 + /* + TBD: Dean brought this up, and I assumed the hardware would + handle it: + + If MACE_MAX_RX_ITERATIONS is > 1, rx_framecnt may still be + non-zero when the isr exits. We may not get another interrupt + to process the remaining packets for some time. + */ + +/* +The Am2150 has a Xilinx XC3042 field programmable gate array (FPGA) +which manages the interface between the MACE and the PCMCIA bus. It +also includes buffer management for the 32K x 8 SRAM to control up to +four transmit and 12 receive frames at a time. +*/ +#define AM2150_MAX_TX_FRAMES 4 +#define AM2150_MAX_RX_FRAMES 12 + +/* Am2150 Ethernet Card I/O Mapping */ +#define AM2150_RCV 0x00 +#define AM2150_XMT 0x04 +#define AM2150_XMT_SKIP 0x09 +#define AM2150_RCV_NEXT 0x0A +#define AM2150_RCV_FRAME_COUNT 0x0B +#define AM2150_MACE_BANK 0x0C +#define AM2150_MACE_BASE 0x10 + +/* MACE Registers */ +#define MACE_RCVFIFO 0 +#define MACE_XMTFIFO 1 +#define MACE_XMTFC 2 +#define MACE_XMTFS 3 +#define MACE_XMTRC 4 +#define MACE_RCVFC 5 +#define MACE_RCVFS 6 +#define MACE_FIFOFC 7 +#define MACE_IR 8 +#define MACE_IMR 9 +#define MACE_PR 10 +#define MACE_BIUCC 11 +#define MACE_FIFOCC 12 +#define MACE_MACCC 13 +#define MACE_PLSCC 14 +#define MACE_PHYCC 15 +#define MACE_CHIPIDL 16 +#define MACE_CHIPIDH 17 +#define MACE_IAC 18 +/* Reserved */ +#define MACE_LADRF 20 +#define MACE_PADR 21 +/* Reserved */ +/* Reserved */ +#define MACE_MPC 24 +/* Reserved */ +#define MACE_RNTPC 26 +#define MACE_RCVCC 27 +/* Reserved */ +#define MACE_UTR 29 +#define MACE_RTR1 30 +#define MACE_RTR2 31 + +/* MACE Bit Masks */ +#define MACE_XMTRC_EXDEF 0x80 +#define MACE_XMTRC_XMTRC 0x0F + +#define MACE_XMTFS_XMTSV 0x80 +#define MACE_XMTFS_UFLO 0x40 +#define MACE_XMTFS_LCOL 0x20 +#define MACE_XMTFS_MORE 0x10 +#define MACE_XMTFS_ONE 0x08 +#define MACE_XMTFS_DEFER 0x04 +#define MACE_XMTFS_LCAR 0x02 +#define MACE_XMTFS_RTRY 0x01 + +#define MACE_RCVFS_RCVSTS 0xF000 +#define MACE_RCVFS_OFLO 0x8000 +#define MACE_RCVFS_CLSN 0x4000 +#define MACE_RCVFS_FRAM 0x2000 +#define MACE_RCVFS_FCS 0x1000 + +#define MACE_FIFOFC_RCVFC 0xF0 +#define MACE_FIFOFC_XMTFC 0x0F + +#define MACE_IR_JAB 0x80 +#define MACE_IR_BABL 0x40 +#define MACE_IR_CERR 0x20 +#define MACE_IR_RCVCCO 0x10 +#define MACE_IR_RNTPCO 0x08 +#define MACE_IR_MPCO 0x04 +#define MACE_IR_RCVINT 0x02 +#define MACE_IR_XMTINT 0x01 + +#define MACE_MACCC_PROM 0x80 +#define MACE_MACCC_DXMT2PD 0x40 +#define MACE_MACCC_EMBA 0x20 +#define MACE_MACCC_RESERVED 0x10 +#define MACE_MACCC_DRCVPA 0x08 +#define MACE_MACCC_DRCVBC 0x04 +#define MACE_MACCC_ENXMT 0x02 +#define MACE_MACCC_ENRCV 0x01 + +#define MACE_PHYCC_LNKFL 0x80 +#define MACE_PHYCC_DLNKTST 0x40 +#define MACE_PHYCC_REVPOL 0x20 +#define MACE_PHYCC_DAPC 0x10 +#define MACE_PHYCC_LRT 0x08 +#define MACE_PHYCC_ASEL 0x04 +#define MACE_PHYCC_RWAKE 0x02 +#define MACE_PHYCC_AWAKE 0x01 + +#define MACE_IAC_ADDRCHG 0x80 +#define MACE_IAC_PHYADDR 0x04 +#define MACE_IAC_LOGADDR 0x02 + +#define MACE_UTR_RTRE 0x80 +#define MACE_UTR_RTRD 0x40 +#define MACE_UTR_RPA 0x20 +#define MACE_UTR_FCOLL 0x10 +#define MACE_UTR_RCVFCSE 0x08 +#define MACE_UTR_LOOP_INCL_MENDEC 0x06 +#define MACE_UTR_LOOP_NO_MENDEC 0x04 +#define MACE_UTR_LOOP_EXTERNAL 0x02 +#define MACE_UTR_LOOP_NONE 0x00 +#define MACE_UTR_RESERVED 0x01 + +/* Switch MACE register bank (only 0 and 1 are valid) */ +#define MACEBANK(win_num) outb((win_num), ioaddr + AM2150_MACE_BANK) + +#define MACE_IMR_DEFAULT \ + (0xFF - \ + ( \ + MACE_IR_CERR | \ + MACE_IR_RCVCCO | \ + MACE_IR_RNTPCO | \ + MACE_IR_MPCO | \ + MACE_IR_RCVINT | \ + MACE_IR_XMTINT \ + ) \ + ) +#undef MACE_IMR_DEFAULT +#define MACE_IMR_DEFAULT 0x00 /* New statistics handling: grab everything */ + +#define TX_TIMEOUT ((400*HZ)/1000) + +/* ---------------------------------------------------------------------------- +Type Definitions +---------------------------------------------------------------------------- */ + +typedef struct _mace_statistics { + /* MACE_XMTFS */ + int xmtsv; + int uflo; + int lcol; + int more; + int one; + int defer; + int lcar; + int rtry; + + /* MACE_XMTRC */ + int exdef; + int xmtrc; + + /* RFS1--Receive Status (RCVSTS) */ + int oflo; + int clsn; + int fram; + int fcs; + + /* RFS2--Runt Packet Count (RNTPC) */ + int rfs_rntpc; + + /* RFS3--Receive Collision Count (RCVCC) */ + int rfs_rcvcc; + + /* MACE_IR */ + int jab; + int babl; + int cerr; + int rcvcco; + int rntpco; + int mpco; + + /* MACE_MPC */ + int mpc; + + /* MACE_RNTPC */ + int rntpc; + + /* MACE_RCVCC */ + int rcvcc; +} mace_statistics; + +typedef struct _mace_private { + struct pcmcia_device *p_dev; + struct net_device_stats linux_stats; /* Linux statistics counters */ + mace_statistics mace_stats; /* MACE chip statistics counters */ + + /* restore_multicast_list() state variables */ + int multicast_ladrf[MACE_LADRF_LEN]; /* Logical address filter */ + int multicast_num_addrs; + + char tx_free_frames; /* Number of free transmit frame buffers */ + char tx_irq_disabled; /* MACE TX interrupt disabled */ + + spinlock_t bank_lock; /* Must be held if you step off bank 0 */ +} mace_private; + +/* ---------------------------------------------------------------------------- +Private Global Variables +---------------------------------------------------------------------------- */ + +static const char *if_names[]={ + "Auto", "10baseT", "BNC", +}; + +/* ---------------------------------------------------------------------------- +Parameters + These are the parameters that can be set during loading with + 'insmod'. +---------------------------------------------------------------------------- */ + +MODULE_DESCRIPTION("New Media PCMCIA ethernet driver"); +MODULE_LICENSE("GPL"); + +#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) + +/* 0=auto, 1=10baseT, 2 = 10base2, default=auto */ +INT_MODULE_PARM(if_port, 0); + + +/* ---------------------------------------------------------------------------- +Function Prototypes +---------------------------------------------------------------------------- */ + +static int nmclan_config(struct pcmcia_device *link); +static void nmclan_release(struct pcmcia_device *link); + +static void nmclan_reset(struct net_device *dev); +static int mace_config(struct net_device *dev, struct ifmap *map); +static int mace_open(struct net_device *dev); +static int mace_close(struct net_device *dev); +static netdev_tx_t mace_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static void mace_tx_timeout(struct net_device *dev); +static irqreturn_t mace_interrupt(int irq, void *dev_id); +static struct net_device_stats *mace_get_stats(struct net_device *dev); +static int mace_rx(struct net_device *dev, unsigned char RxCnt); +static void restore_multicast_list(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); +static const struct ethtool_ops netdev_ethtool_ops; + + +static void nmclan_detach(struct pcmcia_device *p_dev); + +static const struct net_device_ops mace_netdev_ops = { + .ndo_open = mace_open, + .ndo_stop = mace_close, + .ndo_start_xmit = mace_start_xmit, + .ndo_tx_timeout = mace_tx_timeout, + .ndo_set_config = mace_config, + .ndo_get_stats = mace_get_stats, + .ndo_set_rx_mode = set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int nmclan_probe(struct pcmcia_device *link) +{ + mace_private *lp; + struct net_device *dev; + + dev_dbg(&link->dev, "nmclan_attach()\n"); + + /* Create new ethernet device */ + dev = alloc_etherdev(sizeof(mace_private)); + if (!dev) + return -ENOMEM; + lp = netdev_priv(dev); + lp->p_dev = link; + link->priv = dev; + + spin_lock_init(&lp->bank_lock); + link->resource[0]->end = 32; + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; + link->config_flags |= CONF_ENABLE_IRQ; + link->config_index = 1; + link->config_regs = PRESENT_OPTION; + + lp->tx_free_frames=AM2150_MAX_TX_FRAMES; + + dev->netdev_ops = &mace_netdev_ops; + dev->ethtool_ops = &netdev_ethtool_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + return nmclan_config(link); +} /* nmclan_attach */ + +static void nmclan_detach(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + dev_dbg(&link->dev, "nmclan_detach\n"); + + unregister_netdev(dev); + + nmclan_release(link); + + free_netdev(dev); +} /* nmclan_detach */ + +/* ---------------------------------------------------------------------------- +mace_read + Reads a MACE register. This is bank independent; however, the + caller must ensure that this call is not interruptable. We are + assuming that during normal operation, the MACE is always in + bank 0. +---------------------------------------------------------------------------- */ +static int mace_read(mace_private *lp, unsigned int ioaddr, int reg) +{ + int data = 0xFF; + unsigned long flags; + + switch (reg >> 4) { + case 0: /* register 0-15 */ + data = inb(ioaddr + AM2150_MACE_BASE + reg); + break; + case 1: /* register 16-31 */ + spin_lock_irqsave(&lp->bank_lock, flags); + MACEBANK(1); + data = inb(ioaddr + AM2150_MACE_BASE + (reg & 0x0F)); + MACEBANK(0); + spin_unlock_irqrestore(&lp->bank_lock, flags); + break; + } + return data & 0xFF; +} /* mace_read */ + +/* ---------------------------------------------------------------------------- +mace_write + Writes to a MACE register. This is bank independent; however, + the caller must ensure that this call is not interruptable. We + are assuming that during normal operation, the MACE is always in + bank 0. +---------------------------------------------------------------------------- */ +static void mace_write(mace_private *lp, unsigned int ioaddr, int reg, + int data) +{ + unsigned long flags; + + switch (reg >> 4) { + case 0: /* register 0-15 */ + outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + reg); + break; + case 1: /* register 16-31 */ + spin_lock_irqsave(&lp->bank_lock, flags); + MACEBANK(1); + outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + (reg & 0x0F)); + MACEBANK(0); + spin_unlock_irqrestore(&lp->bank_lock, flags); + break; + } +} /* mace_write */ + +/* ---------------------------------------------------------------------------- +mace_init + Resets the MACE chip. +---------------------------------------------------------------------------- */ +static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr) +{ + int i; + int ct = 0; + + /* MACE Software reset */ + mace_write(lp, ioaddr, MACE_BIUCC, 1); + while (mace_read(lp, ioaddr, MACE_BIUCC) & 0x01) { + /* Wait for reset bit to be cleared automatically after <= 200ns */; + if(++ct > 500) + { + pr_err("reset failed, card removed?\n"); + return -1; + } + udelay(1); + } + mace_write(lp, ioaddr, MACE_BIUCC, 0); + + /* The Am2150 requires that the MACE FIFOs operate in burst mode. */ + mace_write(lp, ioaddr, MACE_FIFOCC, 0x0F); + + mace_write(lp,ioaddr, MACE_RCVFC, 0); /* Disable Auto Strip Receive */ + mace_write(lp, ioaddr, MACE_IMR, 0xFF); /* Disable all interrupts until _open */ + + /* + * Bit 2-1 PORTSEL[1-0] Port Select. + * 00 AUI/10Base-2 + * 01 10Base-T + * 10 DAI Port (reserved in Am2150) + * 11 GPSI + * For this card, only the first two are valid. + * So, PLSCC should be set to + * 0x00 for 10Base-2 + * 0x02 for 10Base-T + * Or just set ASEL in PHYCC below! + */ + switch (if_port) { + case 1: + mace_write(lp, ioaddr, MACE_PLSCC, 0x02); + break; + case 2: + mace_write(lp, ioaddr, MACE_PLSCC, 0x00); + break; + default: + mace_write(lp, ioaddr, MACE_PHYCC, /* ASEL */ 4); + /* ASEL Auto Select. When set, the PORTSEL[1-0] bits are overridden, + and the MACE device will automatically select the operating media + interface port. */ + break; + } + + mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_PHYADDR); + /* Poll ADDRCHG bit */ + ct = 0; + while (mace_read(lp, ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG) + { + if(++ ct > 500) + { + pr_err("ADDRCHG timeout, card removed?\n"); + return -1; + } + } + /* Set PADR register */ + for (i = 0; i < ETH_ALEN; i++) + mace_write(lp, ioaddr, MACE_PADR, enet_addr[i]); + + /* MAC Configuration Control Register should be written last */ + /* Let set_multicast_list set this. */ + /* mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); */ + mace_write(lp, ioaddr, MACE_MACCC, 0x00); + return 0; +} /* mace_init */ + +static int nmclan_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + mace_private *lp = netdev_priv(dev); + u8 *buf; + size_t len; + int i, ret; + unsigned int ioaddr; + + dev_dbg(&link->dev, "nmclan_config\n"); + + link->io_lines = 5; + ret = pcmcia_request_io(link); + if (ret) + goto failed; + ret = pcmcia_request_irq(link, mace_interrupt); + if (ret) + goto failed; + ret = pcmcia_enable_device(link); + if (ret) + goto failed; + + dev->irq = link->irq; + dev->base_addr = link->resource[0]->start; + + ioaddr = dev->base_addr; + + /* Read the ethernet address from the CIS. */ + len = pcmcia_get_tuple(link, 0x80, &buf); + if (!buf || len < ETH_ALEN) { + kfree(buf); + goto failed; + } + memcpy(dev->dev_addr, buf, ETH_ALEN); + kfree(buf); + + /* Verify configuration by reading the MACE ID. */ + { + char sig[2]; + + sig[0] = mace_read(lp, ioaddr, MACE_CHIPIDL); + sig[1] = mace_read(lp, ioaddr, MACE_CHIPIDH); + if ((sig[0] == 0x40) && ((sig[1] & 0x0F) == 0x09)) { + dev_dbg(&link->dev, "nmclan_cs configured: mace id=%x %x\n", + sig[0], sig[1]); + } else { + pr_notice("mace id not found: %x %x should be 0x40 0x?9\n", + sig[0], sig[1]); + return -ENODEV; + } + } + + if(mace_init(lp, ioaddr, dev->dev_addr) == -1) + goto failed; + + /* The if_port symbol can be set when the module is loaded */ + if (if_port <= 2) + dev->if_port = if_port; + else + pr_notice("invalid if_port requested\n"); + + SET_NETDEV_DEV(dev, &link->dev); + + i = register_netdev(dev); + if (i != 0) { + pr_notice("register_netdev() failed\n"); + goto failed; + } + + netdev_info(dev, "nmclan: port %#3lx, irq %d, %s port, hw_addr %pM\n", + dev->base_addr, dev->irq, if_names[dev->if_port], dev->dev_addr); + return 0; + +failed: + nmclan_release(link); + return -ENODEV; +} /* nmclan_config */ + +static void nmclan_release(struct pcmcia_device *link) +{ + dev_dbg(&link->dev, "nmclan_release\n"); + pcmcia_disable_device(link); +} + +static int nmclan_suspend(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) + netif_device_detach(dev); + + return 0; +} + +static int nmclan_resume(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) { + nmclan_reset(dev); + netif_device_attach(dev); + } + + return 0; +} + + +/* ---------------------------------------------------------------------------- +nmclan_reset + Reset and restore all of the Xilinx and MACE registers. +---------------------------------------------------------------------------- */ +static void nmclan_reset(struct net_device *dev) +{ + mace_private *lp = netdev_priv(dev); + +#if RESET_XILINX + struct pcmcia_device *link = &lp->link; + u8 OrigCorValue; + + /* Save original COR value */ + pcmcia_read_config_byte(link, CISREG_COR, &OrigCorValue); + + /* Reset Xilinx */ + dev_dbg(&link->dev, "nmclan_reset: OrigCorValue=0x%x, resetting...\n", + OrigCorValue); + pcmcia_write_config_byte(link, CISREG_COR, COR_SOFT_RESET); + /* Need to wait for 20 ms for PCMCIA to finish reset. */ + + /* Restore original COR configuration index */ + pcmcia_write_config_byte(link, CISREG_COR, + (COR_LEVEL_REQ | (OrigCorValue & COR_CONFIG_MASK))); + /* Xilinx is now completely reset along with the MACE chip. */ + lp->tx_free_frames=AM2150_MAX_TX_FRAMES; + +#endif /* #if RESET_XILINX */ + + /* Xilinx is now completely reset along with the MACE chip. */ + lp->tx_free_frames=AM2150_MAX_TX_FRAMES; + + /* Reinitialize the MACE chip for operation. */ + mace_init(lp, dev->base_addr, dev->dev_addr); + mace_write(lp, dev->base_addr, MACE_IMR, MACE_IMR_DEFAULT); + + /* Restore the multicast list and enable TX and RX. */ + restore_multicast_list(dev); +} /* nmclan_reset */ + +/* ---------------------------------------------------------------------------- +mace_config + [Someone tell me what this is supposed to do? Is if_port a defined + standard? If so, there should be defines to indicate 1=10Base-T, + 2=10Base-2, etc. including limited automatic detection.] +---------------------------------------------------------------------------- */ +static int mace_config(struct net_device *dev, struct ifmap *map) +{ + if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { + if (map->port <= 2) { + dev->if_port = map->port; + netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); + } else + return -EINVAL; + } + return 0; +} /* mace_config */ + +/* ---------------------------------------------------------------------------- +mace_open + Open device driver. +---------------------------------------------------------------------------- */ +static int mace_open(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + mace_private *lp = netdev_priv(dev); + struct pcmcia_device *link = lp->p_dev; + + if (!pcmcia_dev_present(link)) + return -ENODEV; + + link->open++; + + MACEBANK(0); + + netif_start_queue(dev); + nmclan_reset(dev); + + return 0; /* Always succeed */ +} /* mace_open */ + +/* ---------------------------------------------------------------------------- +mace_close + Closes device driver. +---------------------------------------------------------------------------- */ +static int mace_close(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + mace_private *lp = netdev_priv(dev); + struct pcmcia_device *link = lp->p_dev; + + dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); + + /* Mask off all interrupts from the MACE chip. */ + outb(0xFF, ioaddr + AM2150_MACE_BASE + MACE_IMR); + + link->open--; + netif_stop_queue(dev); + + return 0; +} /* mace_close */ + +static void netdev_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + snprintf(info->bus_info, sizeof(info->bus_info), + "PCMCIA 0x%lx", dev->base_addr); +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, +}; + +/* ---------------------------------------------------------------------------- +mace_start_xmit + This routine begins the packet transmit function. When completed, + it will generate a transmit interrupt. + + According to /usr/src/linux/net/inet/dev.c, if _start_xmit + returns 0, the "packet is now solely the responsibility of the + driver." If _start_xmit returns non-zero, the "transmission + failed, put skb back into a list." +---------------------------------------------------------------------------- */ + +static void mace_tx_timeout(struct net_device *dev) +{ + mace_private *lp = netdev_priv(dev); + struct pcmcia_device *link = lp->p_dev; + + netdev_notice(dev, "transmit timed out -- "); +#if RESET_ON_TIMEOUT + pr_cont("resetting card\n"); + pcmcia_reset_card(link->socket); +#else /* #if RESET_ON_TIMEOUT */ + pr_cont("NOT resetting card\n"); +#endif /* #if RESET_ON_TIMEOUT */ + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); +} + +static netdev_tx_t mace_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + mace_private *lp = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + + netif_stop_queue(dev); + + pr_debug("%s: mace_start_xmit(length = %ld) called.\n", + dev->name, (long)skb->len); + +#if (!TX_INTERRUPTABLE) + /* Disable MACE TX interrupts. */ + outb(MACE_IMR_DEFAULT | MACE_IR_XMTINT, + ioaddr + AM2150_MACE_BASE + MACE_IMR); + lp->tx_irq_disabled=1; +#endif /* #if (!TX_INTERRUPTABLE) */ + + { + /* This block must not be interrupted by another transmit request! + mace_tx_timeout will take care of timer-based retransmissions from + the upper layers. The interrupt handler is guaranteed never to + service a transmit interrupt while we are in here. + */ + + lp->linux_stats.tx_bytes += skb->len; + lp->tx_free_frames--; + + /* WARNING: Write the _exact_ number of bytes written in the header! */ + /* Put out the word header [must be an outw()] . . . */ + outw(skb->len, ioaddr + AM2150_XMT); + /* . . . and the packet [may be any combination of outw() and outb()] */ + outsw(ioaddr + AM2150_XMT, skb->data, skb->len >> 1); + if (skb->len & 1) { + /* Odd byte transfer */ + outb(skb->data[skb->len-1], ioaddr + AM2150_XMT); + } + +#if MULTI_TX + if (lp->tx_free_frames > 0) + netif_start_queue(dev); +#endif /* #if MULTI_TX */ + } + +#if (!TX_INTERRUPTABLE) + /* Re-enable MACE TX interrupts. */ + lp->tx_irq_disabled=0; + outb(MACE_IMR_DEFAULT, ioaddr + AM2150_MACE_BASE + MACE_IMR); +#endif /* #if (!TX_INTERRUPTABLE) */ + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} /* mace_start_xmit */ + +/* ---------------------------------------------------------------------------- +mace_interrupt + The interrupt handler. +---------------------------------------------------------------------------- */ +static irqreturn_t mace_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + mace_private *lp = netdev_priv(dev); + unsigned int ioaddr; + int status; + int IntrCnt = MACE_MAX_IR_ITERATIONS; + + if (dev == NULL) { + pr_debug("mace_interrupt(): irq 0x%X for unknown device.\n", + irq); + return IRQ_NONE; + } + + ioaddr = dev->base_addr; + + if (lp->tx_irq_disabled) { + const char *msg; + if (lp->tx_irq_disabled) + msg = "Interrupt with tx_irq_disabled"; + else + msg = "Re-entering the interrupt handler"; + netdev_notice(dev, "%s [isr=%02X, imr=%02X]\n", + msg, + inb(ioaddr + AM2150_MACE_BASE + MACE_IR), + inb(ioaddr + AM2150_MACE_BASE + MACE_IMR)); + /* WARNING: MACE_IR has been read! */ + return IRQ_NONE; + } + + if (!netif_device_present(dev)) { + netdev_dbg(dev, "interrupt from dead card\n"); + return IRQ_NONE; + } + + do { + /* WARNING: MACE_IR is a READ/CLEAR port! */ + status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR); + if (!(status & ~MACE_IMR_DEFAULT) && IntrCnt == MACE_MAX_IR_ITERATIONS) + return IRQ_NONE; + + pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status); + + if (status & MACE_IR_RCVINT) { + mace_rx(dev, MACE_MAX_RX_ITERATIONS); + } + + if (status & MACE_IR_XMTINT) { + unsigned char fifofc; + unsigned char xmtrc; + unsigned char xmtfs; + + fifofc = inb(ioaddr + AM2150_MACE_BASE + MACE_FIFOFC); + if ((fifofc & MACE_FIFOFC_XMTFC)==0) { + lp->linux_stats.tx_errors++; + outb(0xFF, ioaddr + AM2150_XMT_SKIP); + } + + /* Transmit Retry Count (XMTRC, reg 4) */ + xmtrc = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTRC); + if (xmtrc & MACE_XMTRC_EXDEF) lp->mace_stats.exdef++; + lp->mace_stats.xmtrc += (xmtrc & MACE_XMTRC_XMTRC); + + if ( + (xmtfs = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTFS)) & + MACE_XMTFS_XMTSV /* Transmit Status Valid */ + ) { + lp->mace_stats.xmtsv++; + + if (xmtfs & ~MACE_XMTFS_XMTSV) { + if (xmtfs & MACE_XMTFS_UFLO) { + /* Underflow. Indicates that the Transmit FIFO emptied before + the end of frame was reached. */ + lp->mace_stats.uflo++; + } + if (xmtfs & MACE_XMTFS_LCOL) { + /* Late Collision */ + lp->mace_stats.lcol++; + } + if (xmtfs & MACE_XMTFS_MORE) { + /* MORE than one retry was needed */ + lp->mace_stats.more++; + } + if (xmtfs & MACE_XMTFS_ONE) { + /* Exactly ONE retry occurred */ + lp->mace_stats.one++; + } + if (xmtfs & MACE_XMTFS_DEFER) { + /* Transmission was defered */ + lp->mace_stats.defer++; + } + if (xmtfs & MACE_XMTFS_LCAR) { + /* Loss of carrier */ + lp->mace_stats.lcar++; + } + if (xmtfs & MACE_XMTFS_RTRY) { + /* Retry error: transmit aborted after 16 attempts */ + lp->mace_stats.rtry++; + } + } /* if (xmtfs & ~MACE_XMTFS_XMTSV) */ + + } /* if (xmtfs & MACE_XMTFS_XMTSV) */ + + lp->linux_stats.tx_packets++; + lp->tx_free_frames++; + netif_wake_queue(dev); + } /* if (status & MACE_IR_XMTINT) */ + + if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) { + if (status & MACE_IR_JAB) { + /* Jabber Error. Excessive transmit duration (20-150ms). */ + lp->mace_stats.jab++; + } + if (status & MACE_IR_BABL) { + /* Babble Error. >1518 bytes transmitted. */ + lp->mace_stats.babl++; + } + if (status & MACE_IR_CERR) { + /* Collision Error. CERR indicates the absence of the + Signal Quality Error Test message after a packet + transmission. */ + lp->mace_stats.cerr++; + } + if (status & MACE_IR_RCVCCO) { + /* Receive Collision Count Overflow; */ + lp->mace_stats.rcvcco++; + } + if (status & MACE_IR_RNTPCO) { + /* Runt Packet Count Overflow */ + lp->mace_stats.rntpco++; + } + if (status & MACE_IR_MPCO) { + /* Missed Packet Count Overflow */ + lp->mace_stats.mpco++; + } + } /* if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) */ + + } while ((status & ~MACE_IMR_DEFAULT) && (--IntrCnt)); + + return IRQ_HANDLED; +} /* mace_interrupt */ + +/* ---------------------------------------------------------------------------- +mace_rx + Receives packets. +---------------------------------------------------------------------------- */ +static int mace_rx(struct net_device *dev, unsigned char RxCnt) +{ + mace_private *lp = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + unsigned char rx_framecnt; + unsigned short rx_status; + + while ( + ((rx_framecnt = inb(ioaddr + AM2150_RCV_FRAME_COUNT)) > 0) && + (rx_framecnt <= 12) && /* rx_framecnt==0xFF if card is extracted. */ + (RxCnt--) + ) { + rx_status = inw(ioaddr + AM2150_RCV); + + pr_debug("%s: in mace_rx(), framecnt 0x%X, rx_status" + " 0x%X.\n", dev->name, rx_framecnt, rx_status); + + if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */ + lp->linux_stats.rx_errors++; + if (rx_status & MACE_RCVFS_OFLO) { + lp->mace_stats.oflo++; + } + if (rx_status & MACE_RCVFS_CLSN) { + lp->mace_stats.clsn++; + } + if (rx_status & MACE_RCVFS_FRAM) { + lp->mace_stats.fram++; + } + if (rx_status & MACE_RCVFS_FCS) { + lp->mace_stats.fcs++; + } + } else { + short pkt_len = (rx_status & ~MACE_RCVFS_RCVSTS) - 4; + /* Auto Strip is off, always subtract 4 */ + struct sk_buff *skb; + + lp->mace_stats.rfs_rntpc += inb(ioaddr + AM2150_RCV); + /* runt packet count */ + lp->mace_stats.rfs_rcvcc += inb(ioaddr + AM2150_RCV); + /* rcv collision count */ + + pr_debug(" receiving packet size 0x%X rx_status" + " 0x%X.\n", pkt_len, rx_status); + + skb = netdev_alloc_skb(dev, pkt_len + 2); + + if (skb != NULL) { + skb_reserve(skb, 2); + insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1); + if (pkt_len & 1) + *(skb_tail_pointer(skb) - 1) = inb(ioaddr + AM2150_RCV); + skb->protocol = eth_type_trans(skb, dev); + + netif_rx(skb); /* Send the packet to the upper (protocol) layers. */ + + lp->linux_stats.rx_packets++; + lp->linux_stats.rx_bytes += pkt_len; + outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */ + continue; + } else { + pr_debug("%s: couldn't allocate a sk_buff of size" + " %d.\n", dev->name, pkt_len); + lp->linux_stats.rx_dropped++; + } + } + outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */ + } /* while */ + + return 0; +} /* mace_rx */ + +/* ---------------------------------------------------------------------------- +pr_linux_stats +---------------------------------------------------------------------------- */ +static void pr_linux_stats(struct net_device_stats *pstats) +{ + pr_debug("pr_linux_stats\n"); + pr_debug(" rx_packets=%-7ld tx_packets=%ld\n", + (long)pstats->rx_packets, (long)pstats->tx_packets); + pr_debug(" rx_errors=%-7ld tx_errors=%ld\n", + (long)pstats->rx_errors, (long)pstats->tx_errors); + pr_debug(" rx_dropped=%-7ld tx_dropped=%ld\n", + (long)pstats->rx_dropped, (long)pstats->tx_dropped); + pr_debug(" multicast=%-7ld collisions=%ld\n", + (long)pstats->multicast, (long)pstats->collisions); + + pr_debug(" rx_length_errors=%-7ld rx_over_errors=%ld\n", + (long)pstats->rx_length_errors, (long)pstats->rx_over_errors); + pr_debug(" rx_crc_errors=%-7ld rx_frame_errors=%ld\n", + (long)pstats->rx_crc_errors, (long)pstats->rx_frame_errors); + pr_debug(" rx_fifo_errors=%-7ld rx_missed_errors=%ld\n", + (long)pstats->rx_fifo_errors, (long)pstats->rx_missed_errors); + + pr_debug(" tx_aborted_errors=%-7ld tx_carrier_errors=%ld\n", + (long)pstats->tx_aborted_errors, (long)pstats->tx_carrier_errors); + pr_debug(" tx_fifo_errors=%-7ld tx_heartbeat_errors=%ld\n", + (long)pstats->tx_fifo_errors, (long)pstats->tx_heartbeat_errors); + pr_debug(" tx_window_errors=%ld\n", + (long)pstats->tx_window_errors); +} /* pr_linux_stats */ + +/* ---------------------------------------------------------------------------- +pr_mace_stats +---------------------------------------------------------------------------- */ +static void pr_mace_stats(mace_statistics *pstats) +{ + pr_debug("pr_mace_stats\n"); + + pr_debug(" xmtsv=%-7d uflo=%d\n", + pstats->xmtsv, pstats->uflo); + pr_debug(" lcol=%-7d more=%d\n", + pstats->lcol, pstats->more); + pr_debug(" one=%-7d defer=%d\n", + pstats->one, pstats->defer); + pr_debug(" lcar=%-7d rtry=%d\n", + pstats->lcar, pstats->rtry); + + /* MACE_XMTRC */ + pr_debug(" exdef=%-7d xmtrc=%d\n", + pstats->exdef, pstats->xmtrc); + + /* RFS1--Receive Status (RCVSTS) */ + pr_debug(" oflo=%-7d clsn=%d\n", + pstats->oflo, pstats->clsn); + pr_debug(" fram=%-7d fcs=%d\n", + pstats->fram, pstats->fcs); + + /* RFS2--Runt Packet Count (RNTPC) */ + /* RFS3--Receive Collision Count (RCVCC) */ + pr_debug(" rfs_rntpc=%-7d rfs_rcvcc=%d\n", + pstats->rfs_rntpc, pstats->rfs_rcvcc); + + /* MACE_IR */ + pr_debug(" jab=%-7d babl=%d\n", + pstats->jab, pstats->babl); + pr_debug(" cerr=%-7d rcvcco=%d\n", + pstats->cerr, pstats->rcvcco); + pr_debug(" rntpco=%-7d mpco=%d\n", + pstats->rntpco, pstats->mpco); + + /* MACE_MPC */ + pr_debug(" mpc=%d\n", pstats->mpc); + + /* MACE_RNTPC */ + pr_debug(" rntpc=%d\n", pstats->rntpc); + + /* MACE_RCVCC */ + pr_debug(" rcvcc=%d\n", pstats->rcvcc); + +} /* pr_mace_stats */ + +/* ---------------------------------------------------------------------------- +update_stats + Update statistics. We change to register window 1, so this + should be run single-threaded if the device is active. This is + expected to be a rare operation, and it's simpler for the rest + of the driver to assume that window 0 is always valid rather + than use a special window-state variable. + + oflo & uflo should _never_ occur since it would mean the Xilinx + was not able to transfer data between the MACE FIFO and the + card's SRAM fast enough. If this happens, something is + seriously wrong with the hardware. +---------------------------------------------------------------------------- */ +static void update_stats(unsigned int ioaddr, struct net_device *dev) +{ + mace_private *lp = netdev_priv(dev); + + lp->mace_stats.rcvcc += mace_read(lp, ioaddr, MACE_RCVCC); + lp->mace_stats.rntpc += mace_read(lp, ioaddr, MACE_RNTPC); + lp->mace_stats.mpc += mace_read(lp, ioaddr, MACE_MPC); + /* At this point, mace_stats is fully updated for this call. + We may now update the linux_stats. */ + + /* The MACE has no equivalent for linux_stats field which are commented + out. */ + + /* lp->linux_stats.multicast; */ + lp->linux_stats.collisions = + lp->mace_stats.rcvcco * 256 + lp->mace_stats.rcvcc; + /* Collision: The MACE may retry sending a packet 15 times + before giving up. The retry count is in XMTRC. + Does each retry constitute a collision? + If so, why doesn't the RCVCC record these collisions? */ + + /* detailed rx_errors: */ + lp->linux_stats.rx_length_errors = + lp->mace_stats.rntpco * 256 + lp->mace_stats.rntpc; + /* lp->linux_stats.rx_over_errors */ + lp->linux_stats.rx_crc_errors = lp->mace_stats.fcs; + lp->linux_stats.rx_frame_errors = lp->mace_stats.fram; + lp->linux_stats.rx_fifo_errors = lp->mace_stats.oflo; + lp->linux_stats.rx_missed_errors = + lp->mace_stats.mpco * 256 + lp->mace_stats.mpc; + + /* detailed tx_errors */ + lp->linux_stats.tx_aborted_errors = lp->mace_stats.rtry; + lp->linux_stats.tx_carrier_errors = lp->mace_stats.lcar; + /* LCAR usually results from bad cabling. */ + lp->linux_stats.tx_fifo_errors = lp->mace_stats.uflo; + lp->linux_stats.tx_heartbeat_errors = lp->mace_stats.cerr; + /* lp->linux_stats.tx_window_errors; */ +} /* update_stats */ + +/* ---------------------------------------------------------------------------- +mace_get_stats + Gathers ethernet statistics from the MACE chip. +---------------------------------------------------------------------------- */ +static struct net_device_stats *mace_get_stats(struct net_device *dev) +{ + mace_private *lp = netdev_priv(dev); + + update_stats(dev->base_addr, dev); + + pr_debug("%s: updating the statistics.\n", dev->name); + pr_linux_stats(&lp->linux_stats); + pr_mace_stats(&lp->mace_stats); + + return &lp->linux_stats; +} /* net_device_stats */ + +/* ---------------------------------------------------------------------------- +updateCRC + Modified from Am79C90 data sheet. +---------------------------------------------------------------------------- */ + +#ifdef BROKEN_MULTICAST + +static void updateCRC(int *CRC, int bit) +{ + static const int poly[]={ + 1,1,1,0, 1,1,0,1, + 1,0,1,1, 1,0,0,0, + 1,0,0,0, 0,0,1,1, + 0,0,1,0, 0,0,0,0 + }; /* CRC polynomial. poly[n] = coefficient of the x**n term of the + CRC generator polynomial. */ + + int j; + + /* shift CRC and control bit (CRC[32]) */ + for (j = 32; j > 0; j--) + CRC[j] = CRC[j-1]; + CRC[0] = 0; + + /* If bit XOR(control bit) = 1, set CRC = CRC XOR polynomial. */ + if (bit ^ CRC[32]) + for (j = 0; j < 32; j++) + CRC[j] ^= poly[j]; +} /* updateCRC */ + +/* ---------------------------------------------------------------------------- +BuildLAF + Build logical address filter. + Modified from Am79C90 data sheet. + +Input + ladrf: logical address filter (contents initialized to 0) + adr: ethernet address +---------------------------------------------------------------------------- */ +static void BuildLAF(int *ladrf, int *adr) +{ + int CRC[33]={1}; /* CRC register, 1 word/bit + extra control bit */ + + int i, byte; /* temporary array indices */ + int hashcode; /* the output object */ + + CRC[32]=0; + + for (byte = 0; byte < 6; byte++) + for (i = 0; i < 8; i++) + updateCRC(CRC, (adr[byte] >> i) & 1); + + hashcode = 0; + for (i = 0; i < 6; i++) + hashcode = (hashcode << 1) + CRC[i]; + + byte = hashcode >> 3; + ladrf[byte] |= (1 << (hashcode & 7)); + +#ifdef PCMCIA_DEBUG + if (0) + printk(KERN_DEBUG " adr =%pM\n", adr); + printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode); + for (i = 0; i < 8; i++) + pr_cont(" %02X", ladrf[i]); + pr_cont("\n"); +#endif +} /* BuildLAF */ + +/* ---------------------------------------------------------------------------- +restore_multicast_list + Restores the multicast filter for MACE chip to the last + set_multicast_list() call. + +Input + multicast_num_addrs + multicast_ladrf[] +---------------------------------------------------------------------------- */ +static void restore_multicast_list(struct net_device *dev) +{ + mace_private *lp = netdev_priv(dev); + int num_addrs = lp->multicast_num_addrs; + int *ladrf = lp->multicast_ladrf; + unsigned int ioaddr = dev->base_addr; + int i; + + pr_debug("%s: restoring Rx mode to %d addresses.\n", + dev->name, num_addrs); + + if (num_addrs > 0) { + + pr_debug("Attempt to restore multicast list detected.\n"); + + mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_LOGADDR); + /* Poll ADDRCHG bit */ + while (mace_read(lp, ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG) + ; + /* Set LADRF register */ + for (i = 0; i < MACE_LADRF_LEN; i++) + mace_write(lp, ioaddr, MACE_LADRF, ladrf[i]); + + mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_RCVFCSE | MACE_UTR_LOOP_EXTERNAL); + mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); + + } else if (num_addrs < 0) { + + /* Promiscuous mode: receive all packets */ + mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL); + mace_write(lp, ioaddr, MACE_MACCC, + MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV + ); + + } else { + + /* Normal mode */ + mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL); + mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); + + } +} /* restore_multicast_list */ + +/* ---------------------------------------------------------------------------- +set_multicast_list + Set or clear the multicast filter for this adaptor. + +Input + num_addrs == -1 Promiscuous mode, receive all packets + num_addrs == 0 Normal mode, clear multicast list + num_addrs > 0 Multicast mode, receive normal and MC packets, and do + best-effort filtering. +Output + multicast_num_addrs + multicast_ladrf[] +---------------------------------------------------------------------------- */ + +static void set_multicast_list(struct net_device *dev) +{ + mace_private *lp = netdev_priv(dev); + int adr[ETH_ALEN] = {0}; /* Ethernet address */ + struct netdev_hw_addr *ha; + +#ifdef PCMCIA_DEBUG + { + static int old; + if (netdev_mc_count(dev) != old) { + old = netdev_mc_count(dev); + pr_debug("%s: setting Rx mode to %d addresses.\n", + dev->name, old); + } + } +#endif + + /* Set multicast_num_addrs. */ + lp->multicast_num_addrs = netdev_mc_count(dev); + + /* Set multicast_ladrf. */ + if (num_addrs > 0) { + /* Calculate multicast logical address filter */ + memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN); + netdev_for_each_mc_addr(ha, dev) { + memcpy(adr, ha->addr, ETH_ALEN); + BuildLAF(lp->multicast_ladrf, adr); + } + } + + restore_multicast_list(dev); + +} /* set_multicast_list */ + +#endif /* BROKEN_MULTICAST */ + +static void restore_multicast_list(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + mace_private *lp = netdev_priv(dev); + + pr_debug("%s: restoring Rx mode to %d addresses.\n", dev->name, + lp->multicast_num_addrs); + + if (dev->flags & IFF_PROMISC) { + /* Promiscuous mode: receive all packets */ + mace_write(lp,ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL); + mace_write(lp, ioaddr, MACE_MACCC, + MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV + ); + } else { + /* Normal mode */ + mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL); + mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); + } +} /* restore_multicast_list */ + +static void set_multicast_list(struct net_device *dev) +{ + mace_private *lp = netdev_priv(dev); + +#ifdef PCMCIA_DEBUG + { + static int old; + if (netdev_mc_count(dev) != old) { + old = netdev_mc_count(dev); + pr_debug("%s: setting Rx mode to %d addresses.\n", + dev->name, old); + } + } +#endif + + lp->multicast_num_addrs = netdev_mc_count(dev); + restore_multicast_list(dev); + +} /* set_multicast_list */ + +static const struct pcmcia_device_id nmclan_ids[] = { + PCMCIA_DEVICE_PROD_ID12("New Media Corporation", "Ethernet", 0x085a850b, 0x00b2e941), + PCMCIA_DEVICE_PROD_ID12("Portable Add-ons", "Ethernet+", 0xebf1d60, 0xad673aaf), + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, nmclan_ids); + +static struct pcmcia_driver nmclan_cs_driver = { + .owner = THIS_MODULE, + .name = "nmclan_cs", + .probe = nmclan_probe, + .remove = nmclan_detach, + .id_table = nmclan_ids, + .suspend = nmclan_suspend, + .resume = nmclan_resume, +}; +module_pcmcia_driver(nmclan_cs_driver); diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c new file mode 100644 index 000000000..bc8b04f42 --- /dev/null +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -0,0 +1,2989 @@ +/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */ +/* + * Copyright 1996-1999 Thomas Bogendoerfer + * + * Derived from the lance driver written 1993,1994,1995 by Donald Becker. + * + * Copyright 1993 United States Government as represented by the + * Director, National Security Agency. + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + * This driver is for PCnet32 and PCnetPCI based ethercards + */ +/************************************************************************** + * 23 Oct, 2000. + * Fixed a few bugs, related to running the controller in 32bit mode. + * + * Carsten Langgaard, carstenl@mips.com + * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. + * + *************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DRV_NAME "pcnet32" +#define DRV_VERSION "1.35" +#define DRV_RELDATE "21.Apr.2008" +#define PFX DRV_NAME ": " + +static const char *const version = + DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * PCI device identifiers for "new style" Linux PCI Device Drivers + */ +static const struct pci_device_id pcnet32_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), }, + + /* + * Adapters that were sold with IBM's RS/6000 or pSeries hardware have + * the incorrect vendor id. + */ + { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE), + .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, }, + + { } /* terminate list */ +}; + +MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl); + +static int cards_found; + +/* + * VLB I/O addresses + */ +static unsigned int pcnet32_portlist[] = + { 0x300, 0x320, 0x340, 0x360, 0 }; + +static int pcnet32_debug; +static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */ +static int pcnet32vlb; /* check for VLB cards ? */ + +static struct net_device *pcnet32_dev; + +static int max_interrupt_work = 2; +static int rx_copybreak = 200; + +#define PCNET32_PORT_AUI 0x00 +#define PCNET32_PORT_10BT 0x01 +#define PCNET32_PORT_GPSI 0x02 +#define PCNET32_PORT_MII 0x03 + +#define PCNET32_PORT_PORTSEL 0x03 +#define PCNET32_PORT_ASEL 0x04 +#define PCNET32_PORT_100 0x40 +#define PCNET32_PORT_FD 0x80 + +#define PCNET32_DMA_MASK 0xffffffff + +#define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ)) +#define PCNET32_BLINK_TIMEOUT (jiffies + (HZ/4)) + +/* + * table to translate option values from tulip + * to internal options + */ +static const unsigned char options_mapping[] = { + PCNET32_PORT_ASEL, /* 0 Auto-select */ + PCNET32_PORT_AUI, /* 1 BNC/AUI */ + PCNET32_PORT_AUI, /* 2 AUI/BNC */ + PCNET32_PORT_ASEL, /* 3 not supported */ + PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */ + PCNET32_PORT_ASEL, /* 5 not supported */ + PCNET32_PORT_ASEL, /* 6 not supported */ + PCNET32_PORT_ASEL, /* 7 not supported */ + PCNET32_PORT_ASEL, /* 8 not supported */ + PCNET32_PORT_MII, /* 9 MII 10baseT */ + PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */ + PCNET32_PORT_MII, /* 11 MII (autosel) */ + PCNET32_PORT_10BT, /* 12 10BaseT */ + PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */ + /* 14 MII 100BaseTx-FD */ + PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD, + PCNET32_PORT_ASEL /* 15 not supported */ +}; + +static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = { + "Loopback test (offline)" +}; + +#define PCNET32_TEST_LEN ARRAY_SIZE(pcnet32_gstrings_test) + +#define PCNET32_NUM_REGS 136 + +#define MAX_UNITS 8 /* More are supported, limit only on options */ +static int options[MAX_UNITS]; +static int full_duplex[MAX_UNITS]; +static int homepna[MAX_UNITS]; + +/* + * Theory of Operation + * + * This driver uses the same software structure as the normal lance + * driver. So look for a verbose description in lance.c. The differences + * to the normal lance driver is the use of the 32bit mode of PCnet32 + * and PCnetPCI chips. Because these chips are 32bit chips, there is no + * 16MB limitation and we don't need bounce buffers. + */ + +/* + * Set the number of Tx and Rx buffers, using Log_2(# buffers). + * Reasonable default values are 4 Tx buffers, and 16 Rx buffers. + * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). + */ +#ifndef PCNET32_LOG_TX_BUFFERS +#define PCNET32_LOG_TX_BUFFERS 4 +#define PCNET32_LOG_RX_BUFFERS 5 +#define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */ +#define PCNET32_LOG_MAX_RX_BUFFERS 9 +#endif + +#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS)) +#define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS)) + +#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS)) +#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS)) + +#define PKT_BUF_SKB 1544 +/* actual buffer length after being aligned */ +#define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN) +/* chip wants twos complement of the (aligned) buffer length */ +#define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB) + +/* Offsets from base I/O address. */ +#define PCNET32_WIO_RDP 0x10 +#define PCNET32_WIO_RAP 0x12 +#define PCNET32_WIO_RESET 0x14 +#define PCNET32_WIO_BDP 0x16 + +#define PCNET32_DWIO_RDP 0x10 +#define PCNET32_DWIO_RAP 0x14 +#define PCNET32_DWIO_RESET 0x18 +#define PCNET32_DWIO_BDP 0x1C + +#define PCNET32_TOTAL_SIZE 0x20 + +#define CSR0 0 +#define CSR0_INIT 0x1 +#define CSR0_START 0x2 +#define CSR0_STOP 0x4 +#define CSR0_TXPOLL 0x8 +#define CSR0_INTEN 0x40 +#define CSR0_IDON 0x0100 +#define CSR0_NORMAL (CSR0_START | CSR0_INTEN) +#define PCNET32_INIT_LOW 1 +#define PCNET32_INIT_HIGH 2 +#define CSR3 3 +#define CSR4 4 +#define CSR5 5 +#define CSR5_SUSPEND 0x0001 +#define CSR15 15 +#define PCNET32_MC_FILTER 8 + +#define PCNET32_79C970A 0x2621 + +/* The PCNET32 Rx and Tx ring descriptors. */ +struct pcnet32_rx_head { + __le32 base; + __le16 buf_length; /* two`s complement of length */ + __le16 status; + __le32 msg_length; + __le32 reserved; +}; + +struct pcnet32_tx_head { + __le32 base; + __le16 length; /* two`s complement of length */ + __le16 status; + __le32 misc; + __le32 reserved; +}; + +/* The PCNET32 32-Bit initialization block, described in databook. */ +struct pcnet32_init_block { + __le16 mode; + __le16 tlen_rlen; + u8 phys_addr[6]; + __le16 reserved; + __le32 filter[2]; + /* Receive and transmit ring base, along with extra bits. */ + __le32 rx_ring; + __le32 tx_ring; +}; + +/* PCnet32 access functions */ +struct pcnet32_access { + u16 (*read_csr) (unsigned long, int); + void (*write_csr) (unsigned long, int, u16); + u16 (*read_bcr) (unsigned long, int); + void (*write_bcr) (unsigned long, int, u16); + u16 (*read_rap) (unsigned long); + void (*write_rap) (unsigned long, u16); + void (*reset) (unsigned long); +}; + +/* + * The first field of pcnet32_private is read by the ethernet device + * so the structure should be allocated using pci_alloc_consistent(). + */ +struct pcnet32_private { + struct pcnet32_init_block *init_block; + /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ + struct pcnet32_rx_head *rx_ring; + struct pcnet32_tx_head *tx_ring; + dma_addr_t init_dma_addr;/* DMA address of beginning of the init block, + returned by pci_alloc_consistent */ + struct pci_dev *pci_dev; + const char *name; + /* The saved address of a sent-in-place packet/buffer, for skfree(). */ + struct sk_buff **tx_skbuff; + struct sk_buff **rx_skbuff; + dma_addr_t *tx_dma_addr; + dma_addr_t *rx_dma_addr; + const struct pcnet32_access *a; + spinlock_t lock; /* Guard lock */ + unsigned int cur_rx, cur_tx; /* The next free ring entry */ + unsigned int rx_ring_size; /* current rx ring size */ + unsigned int tx_ring_size; /* current tx ring size */ + unsigned int rx_mod_mask; /* rx ring modular mask */ + unsigned int tx_mod_mask; /* tx ring modular mask */ + unsigned short rx_len_bits; + unsigned short tx_len_bits; + dma_addr_t rx_ring_dma_addr; + dma_addr_t tx_ring_dma_addr; + unsigned int dirty_rx, /* ring entries to be freed. */ + dirty_tx; + + struct net_device *dev; + struct napi_struct napi; + char tx_full; + char phycount; /* number of phys found */ + int options; + unsigned int shared_irq:1, /* shared irq possible */ + dxsuflo:1, /* disable transmit stop on uflo */ + mii:1; /* mii port available */ + struct net_device *next; + struct mii_if_info mii_if; + struct timer_list watchdog_timer; + u32 msg_enable; /* debug message level */ + + /* each bit indicates an available PHY */ + u32 phymask; + unsigned short chip_version; /* which variant this is */ + + /* saved registers during ethtool blink */ + u16 save_regs[4]; +}; + +static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); +static int pcnet32_probe1(unsigned long, int, struct pci_dev *); +static int pcnet32_open(struct net_device *); +static int pcnet32_init_ring(struct net_device *); +static netdev_tx_t pcnet32_start_xmit(struct sk_buff *, + struct net_device *); +static void pcnet32_tx_timeout(struct net_device *dev); +static irqreturn_t pcnet32_interrupt(int, void *); +static int pcnet32_close(struct net_device *); +static struct net_device_stats *pcnet32_get_stats(struct net_device *); +static void pcnet32_load_multicast(struct net_device *dev); +static void pcnet32_set_multicast_list(struct net_device *); +static int pcnet32_ioctl(struct net_device *, struct ifreq *, int); +static void pcnet32_watchdog(struct net_device *); +static int mdio_read(struct net_device *dev, int phy_id, int reg_num); +static void mdio_write(struct net_device *dev, int phy_id, int reg_num, + int val); +static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits); +static void pcnet32_ethtool_test(struct net_device *dev, + struct ethtool_test *eth_test, u64 * data); +static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1); +static int pcnet32_get_regs_len(struct net_device *dev); +static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *ptr); +static void pcnet32_purge_tx_ring(struct net_device *dev); +static int pcnet32_alloc_ring(struct net_device *dev, const char *name); +static void pcnet32_free_ring(struct net_device *dev); +static void pcnet32_check_media(struct net_device *dev, int verbose); + +static u16 pcnet32_wio_read_csr(unsigned long addr, int index) +{ + outw(index, addr + PCNET32_WIO_RAP); + return inw(addr + PCNET32_WIO_RDP); +} + +static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val) +{ + outw(index, addr + PCNET32_WIO_RAP); + outw(val, addr + PCNET32_WIO_RDP); +} + +static u16 pcnet32_wio_read_bcr(unsigned long addr, int index) +{ + outw(index, addr + PCNET32_WIO_RAP); + return inw(addr + PCNET32_WIO_BDP); +} + +static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val) +{ + outw(index, addr + PCNET32_WIO_RAP); + outw(val, addr + PCNET32_WIO_BDP); +} + +static u16 pcnet32_wio_read_rap(unsigned long addr) +{ + return inw(addr + PCNET32_WIO_RAP); +} + +static void pcnet32_wio_write_rap(unsigned long addr, u16 val) +{ + outw(val, addr + PCNET32_WIO_RAP); +} + +static void pcnet32_wio_reset(unsigned long addr) +{ + inw(addr + PCNET32_WIO_RESET); +} + +static int pcnet32_wio_check(unsigned long addr) +{ + outw(88, addr + PCNET32_WIO_RAP); + return inw(addr + PCNET32_WIO_RAP) == 88; +} + +static const struct pcnet32_access pcnet32_wio = { + .read_csr = pcnet32_wio_read_csr, + .write_csr = pcnet32_wio_write_csr, + .read_bcr = pcnet32_wio_read_bcr, + .write_bcr = pcnet32_wio_write_bcr, + .read_rap = pcnet32_wio_read_rap, + .write_rap = pcnet32_wio_write_rap, + .reset = pcnet32_wio_reset +}; + +static u16 pcnet32_dwio_read_csr(unsigned long addr, int index) +{ + outl(index, addr + PCNET32_DWIO_RAP); + return inl(addr + PCNET32_DWIO_RDP) & 0xffff; +} + +static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val) +{ + outl(index, addr + PCNET32_DWIO_RAP); + outl(val, addr + PCNET32_DWIO_RDP); +} + +static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index) +{ + outl(index, addr + PCNET32_DWIO_RAP); + return inl(addr + PCNET32_DWIO_BDP) & 0xffff; +} + +static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val) +{ + outl(index, addr + PCNET32_DWIO_RAP); + outl(val, addr + PCNET32_DWIO_BDP); +} + +static u16 pcnet32_dwio_read_rap(unsigned long addr) +{ + return inl(addr + PCNET32_DWIO_RAP) & 0xffff; +} + +static void pcnet32_dwio_write_rap(unsigned long addr, u16 val) +{ + outl(val, addr + PCNET32_DWIO_RAP); +} + +static void pcnet32_dwio_reset(unsigned long addr) +{ + inl(addr + PCNET32_DWIO_RESET); +} + +static int pcnet32_dwio_check(unsigned long addr) +{ + outl(88, addr + PCNET32_DWIO_RAP); + return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88; +} + +static const struct pcnet32_access pcnet32_dwio = { + .read_csr = pcnet32_dwio_read_csr, + .write_csr = pcnet32_dwio_write_csr, + .read_bcr = pcnet32_dwio_read_bcr, + .write_bcr = pcnet32_dwio_write_bcr, + .read_rap = pcnet32_dwio_read_rap, + .write_rap = pcnet32_dwio_write_rap, + .reset = pcnet32_dwio_reset +}; + +static void pcnet32_netif_stop(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + + dev->trans_start = jiffies; /* prevent tx timeout */ + napi_disable(&lp->napi); + netif_tx_disable(dev); +} + +static void pcnet32_netif_start(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + ulong ioaddr = dev->base_addr; + u16 val; + + netif_wake_queue(dev); + val = lp->a->read_csr(ioaddr, CSR3); + val &= 0x00ff; + lp->a->write_csr(ioaddr, CSR3, val); + napi_enable(&lp->napi); +} + +/* + * Allocate space for the new sized tx ring. + * Free old resources + * Save new resources. + * Any failure keeps old resources. + * Must be called with lp->lock held. + */ +static void pcnet32_realloc_tx_ring(struct net_device *dev, + struct pcnet32_private *lp, + unsigned int size) +{ + dma_addr_t new_ring_dma_addr; + dma_addr_t *new_dma_addr_list; + struct pcnet32_tx_head *new_tx_ring; + struct sk_buff **new_skb_list; + unsigned int entries = BIT(size); + + pcnet32_purge_tx_ring(dev); + + new_tx_ring = + pci_zalloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * entries, + &new_ring_dma_addr); + if (new_tx_ring == NULL) + return; + + new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC); + if (!new_dma_addr_list) + goto free_new_tx_ring; + + new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC); + if (!new_skb_list) + goto free_new_lists; + + kfree(lp->tx_skbuff); + kfree(lp->tx_dma_addr); + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, + lp->tx_ring, lp->tx_ring_dma_addr); + + lp->tx_ring_size = entries; + lp->tx_mod_mask = lp->tx_ring_size - 1; + lp->tx_len_bits = (size << 12); + lp->tx_ring = new_tx_ring; + lp->tx_ring_dma_addr = new_ring_dma_addr; + lp->tx_dma_addr = new_dma_addr_list; + lp->tx_skbuff = new_skb_list; + return; + +free_new_lists: + kfree(new_dma_addr_list); +free_new_tx_ring: + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * entries, + new_tx_ring, + new_ring_dma_addr); +} + +/* + * Allocate space for the new sized rx ring. + * Re-use old receive buffers. + * alloc extra buffers + * free unneeded buffers + * free unneeded buffers + * Save new resources. + * Any failure keeps old resources. + * Must be called with lp->lock held. + */ +static void pcnet32_realloc_rx_ring(struct net_device *dev, + struct pcnet32_private *lp, + unsigned int size) +{ + dma_addr_t new_ring_dma_addr; + dma_addr_t *new_dma_addr_list; + struct pcnet32_rx_head *new_rx_ring; + struct sk_buff **new_skb_list; + int new, overlap; + unsigned int entries = BIT(size); + + new_rx_ring = + pci_zalloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * entries, + &new_ring_dma_addr); + if (new_rx_ring == NULL) + return; + + new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC); + if (!new_dma_addr_list) + goto free_new_rx_ring; + + new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC); + if (!new_skb_list) + goto free_new_lists; + + /* first copy the current receive buffers */ + overlap = min(entries, lp->rx_ring_size); + for (new = 0; new < overlap; new++) { + new_rx_ring[new] = lp->rx_ring[new]; + new_dma_addr_list[new] = lp->rx_dma_addr[new]; + new_skb_list[new] = lp->rx_skbuff[new]; + } + /* now allocate any new buffers needed */ + for (; new < entries; new++) { + struct sk_buff *rx_skbuff; + new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB); + rx_skbuff = new_skb_list[new]; + if (!rx_skbuff) { + /* keep the original lists and buffers */ + netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n", + __func__); + goto free_all_new; + } + skb_reserve(rx_skbuff, NET_IP_ALIGN); + + new_dma_addr_list[new] = + pci_map_single(lp->pci_dev, rx_skbuff->data, + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(lp->pci_dev, + new_dma_addr_list[new])) { + netif_err(lp, drv, dev, "%s dma mapping failed\n", + __func__); + dev_kfree_skb(new_skb_list[new]); + goto free_all_new; + } + new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]); + new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE); + new_rx_ring[new].status = cpu_to_le16(0x8000); + } + /* and free any unneeded buffers */ + for (; new < lp->rx_ring_size; new++) { + if (lp->rx_skbuff[new]) { + if (!pci_dma_mapping_error(lp->pci_dev, + lp->rx_dma_addr[new])) + pci_unmap_single(lp->pci_dev, + lp->rx_dma_addr[new], + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); + dev_kfree_skb(lp->rx_skbuff[new]); + } + } + + kfree(lp->rx_skbuff); + kfree(lp->rx_dma_addr); + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + lp->rx_ring_size, lp->rx_ring, + lp->rx_ring_dma_addr); + + lp->rx_ring_size = entries; + lp->rx_mod_mask = lp->rx_ring_size - 1; + lp->rx_len_bits = (size << 4); + lp->rx_ring = new_rx_ring; + lp->rx_ring_dma_addr = new_ring_dma_addr; + lp->rx_dma_addr = new_dma_addr_list; + lp->rx_skbuff = new_skb_list; + return; + +free_all_new: + while (--new >= lp->rx_ring_size) { + if (new_skb_list[new]) { + if (!pci_dma_mapping_error(lp->pci_dev, + new_dma_addr_list[new])) + pci_unmap_single(lp->pci_dev, + new_dma_addr_list[new], + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); + dev_kfree_skb(new_skb_list[new]); + } + } + kfree(new_skb_list); +free_new_lists: + kfree(new_dma_addr_list); +free_new_rx_ring: + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * entries, + new_rx_ring, + new_ring_dma_addr); +} + +static void pcnet32_purge_rx_ring(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int i; + + /* free all allocated skbuffs */ + for (i = 0; i < lp->rx_ring_size; i++) { + lp->rx_ring[i].status = 0; /* CPU owns buffer */ + wmb(); /* Make sure adapter sees owner change */ + if (lp->rx_skbuff[i]) { + if (!pci_dma_mapping_error(lp->pci_dev, + lp->rx_dma_addr[i])) + pci_unmap_single(lp->pci_dev, + lp->rx_dma_addr[i], + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(lp->rx_skbuff[i]); + } + lp->rx_skbuff[i] = NULL; + lp->rx_dma_addr[i] = 0; + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void pcnet32_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + pcnet32_interrupt(0, dev); + enable_irq(dev->irq); +} +#endif + +static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + int r = -EOPNOTSUPP; + + if (lp->mii) { + spin_lock_irqsave(&lp->lock, flags); + mii_ethtool_gset(&lp->mii_if, cmd); + spin_unlock_irqrestore(&lp->lock, flags); + r = 0; + } + return r; +} + +static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + int r = -EOPNOTSUPP; + + if (lp->mii) { + spin_lock_irqsave(&lp->lock, flags); + r = mii_ethtool_sset(&lp->mii_if, cmd); + spin_unlock_irqrestore(&lp->lock, flags); + } + return r; +} + +static void pcnet32_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct pcnet32_private *lp = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + if (lp->pci_dev) + strlcpy(info->bus_info, pci_name(lp->pci_dev), + sizeof(info->bus_info)); + else + snprintf(info->bus_info, sizeof(info->bus_info), + "VLB 0x%lx", dev->base_addr); +} + +static u32 pcnet32_get_link(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + int r; + + spin_lock_irqsave(&lp->lock, flags); + if (lp->mii) { + r = mii_link_ok(&lp->mii_if); + } else if (lp->chip_version >= PCNET32_79C970A) { + ulong ioaddr = dev->base_addr; /* card base I/O address */ + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0); + } else { /* can not detect link on really old chips */ + r = 1; + } + spin_unlock_irqrestore(&lp->lock, flags); + + return r; +} + +static u32 pcnet32_get_msglevel(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + return lp->msg_enable; +} + +static void pcnet32_set_msglevel(struct net_device *dev, u32 value) +{ + struct pcnet32_private *lp = netdev_priv(dev); + lp->msg_enable = value; +} + +static int pcnet32_nway_reset(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + int r = -EOPNOTSUPP; + + if (lp->mii) { + spin_lock_irqsave(&lp->lock, flags); + r = mii_nway_restart(&lp->mii_if); + spin_unlock_irqrestore(&lp->lock, flags); + } + return r; +} + +static void pcnet32_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct pcnet32_private *lp = netdev_priv(dev); + + ering->tx_max_pending = TX_MAX_RING_SIZE; + ering->tx_pending = lp->tx_ring_size; + ering->rx_max_pending = RX_MAX_RING_SIZE; + ering->rx_pending = lp->rx_ring_size; +} + +static int pcnet32_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + unsigned int size; + ulong ioaddr = dev->base_addr; + int i; + + if (ering->rx_mini_pending || ering->rx_jumbo_pending) + return -EINVAL; + + if (netif_running(dev)) + pcnet32_netif_stop(dev); + + spin_lock_irqsave(&lp->lock, flags); + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ + + size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); + + /* set the minimum ring size to 4, to allow the loopback test to work + * unchanged. + */ + for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { + if (size <= (1 << i)) + break; + } + if ((1 << i) != lp->tx_ring_size) + pcnet32_realloc_tx_ring(dev, lp, i); + + size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE); + for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { + if (size <= (1 << i)) + break; + } + if ((1 << i) != lp->rx_ring_size) + pcnet32_realloc_rx_ring(dev, lp, i); + + lp->napi.weight = lp->rx_ring_size / 2; + + if (netif_running(dev)) { + pcnet32_netif_start(dev); + pcnet32_restart(dev, CSR0_NORMAL); + } + + spin_unlock_irqrestore(&lp->lock, flags); + + netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n", + lp->rx_ring_size, lp->tx_ring_size); + + return 0; +} + +static void pcnet32_get_strings(struct net_device *dev, u32 stringset, + u8 *data) +{ + memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test)); +} + +static int pcnet32_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return PCNET32_TEST_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void pcnet32_ethtool_test(struct net_device *dev, + struct ethtool_test *test, u64 * data) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int rc; + + if (test->flags == ETH_TEST_FL_OFFLINE) { + rc = pcnet32_loopback_test(dev, data); + if (rc) { + netif_printk(lp, hw, KERN_DEBUG, dev, + "Loopback test failed\n"); + test->flags |= ETH_TEST_FL_FAILED; + } else + netif_printk(lp, hw, KERN_DEBUG, dev, + "Loopback test passed\n"); + } else + netif_printk(lp, hw, KERN_DEBUG, dev, + "No tests to run (specify 'Offline' on ethtool)\n"); +} /* end pcnet32_ethtool_test */ + +static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) +{ + struct pcnet32_private *lp = netdev_priv(dev); + const struct pcnet32_access *a = lp->a; /* access to registers */ + ulong ioaddr = dev->base_addr; /* card base I/O address */ + struct sk_buff *skb; /* sk buff */ + int x, i; /* counters */ + int numbuffs = 4; /* number of TX/RX buffers and descs */ + u16 status = 0x8300; /* TX ring status */ + __le16 teststatus; /* test of ring status */ + int rc; /* return code */ + int size; /* size of packets */ + unsigned char *packet; /* source packet data */ + static const int data_len = 60; /* length of source packets */ + unsigned long flags; + unsigned long ticks; + + rc = 1; /* default to fail */ + + if (netif_running(dev)) + pcnet32_netif_stop(dev); + + spin_lock_irqsave(&lp->lock, flags); + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ + + numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size)); + + /* Reset the PCNET32 */ + lp->a->reset(ioaddr); + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ + + /* switch pcnet32 to 32bit mode */ + lp->a->write_bcr(ioaddr, 20, 2); + + /* purge & init rings but don't actually restart */ + pcnet32_restart(dev, 0x0000); + + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ + + /* Initialize Transmit buffers. */ + size = data_len + 15; + for (x = 0; x < numbuffs; x++) { + skb = netdev_alloc_skb(dev, size); + if (!skb) { + netif_printk(lp, hw, KERN_DEBUG, dev, + "Cannot allocate skb at line: %d!\n", + __LINE__); + goto clean_up; + } + packet = skb->data; + skb_put(skb, size); /* create space for data */ + lp->tx_skbuff[x] = skb; + lp->tx_ring[x].length = cpu_to_le16(-skb->len); + lp->tx_ring[x].misc = 0; + + /* put DA and SA into the skb */ + for (i = 0; i < 6; i++) + *packet++ = dev->dev_addr[i]; + for (i = 0; i < 6; i++) + *packet++ = dev->dev_addr[i]; + /* type */ + *packet++ = 0x08; + *packet++ = 0x06; + /* packet number */ + *packet++ = x; + /* fill packet with data */ + for (i = 0; i < data_len; i++) + *packet++ = i; + + lp->tx_dma_addr[x] = + pci_map_single(lp->pci_dev, skb->data, skb->len, + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[x])) { + netif_printk(lp, hw, KERN_DEBUG, dev, + "DMA mapping error at line: %d!\n", + __LINE__); + goto clean_up; + } + lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]); + wmb(); /* Make sure owner changes after all others are visible */ + lp->tx_ring[x].status = cpu_to_le16(status); + } + + x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */ + a->write_bcr(ioaddr, 32, x | 0x0002); + + /* set int loopback in CSR15 */ + x = a->read_csr(ioaddr, CSR15) & 0xfffc; + lp->a->write_csr(ioaddr, CSR15, x | 0x0044); + + teststatus = cpu_to_le16(0x8000); + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */ + + /* Check status of descriptors */ + for (x = 0; x < numbuffs; x++) { + ticks = 0; + rmb(); + while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { + spin_unlock_irqrestore(&lp->lock, flags); + msleep(1); + spin_lock_irqsave(&lp->lock, flags); + rmb(); + ticks++; + } + if (ticks == 200) { + netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x); + break; + } + } + + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ + wmb(); + if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { + netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n"); + + for (x = 0; x < numbuffs; x++) { + netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x); + skb = lp->rx_skbuff[x]; + for (i = 0; i < size; i++) + pr_cont(" %02x", *(skb->data + i)); + pr_cont("\n"); + } + } + + x = 0; + rc = 0; + while (x < numbuffs && !rc) { + skb = lp->rx_skbuff[x]; + packet = lp->tx_skbuff[x]->data; + for (i = 0; i < size; i++) { + if (*(skb->data + i) != packet[i]) { + netif_printk(lp, hw, KERN_DEBUG, dev, + "Error in compare! %2x - %02x %02x\n", + i, *(skb->data + i), packet[i]); + rc = 1; + break; + } + } + x++; + } + +clean_up: + *data1 = rc; + pcnet32_purge_tx_ring(dev); + + x = a->read_csr(ioaddr, CSR15); + a->write_csr(ioaddr, CSR15, (x & ~0x0044)); /* reset bits 6 and 2 */ + + x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ + a->write_bcr(ioaddr, 32, (x & ~0x0002)); + + if (netif_running(dev)) { + pcnet32_netif_start(dev); + pcnet32_restart(dev, CSR0_NORMAL); + } else { + pcnet32_purge_rx_ring(dev); + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ + } + spin_unlock_irqrestore(&lp->lock, flags); + + return rc; +} /* end pcnet32_loopback_test */ + +static int pcnet32_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct pcnet32_private *lp = netdev_priv(dev); + const struct pcnet32_access *a = lp->a; + ulong ioaddr = dev->base_addr; + unsigned long flags; + int i; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + /* Save the current value of the bcrs */ + spin_lock_irqsave(&lp->lock, flags); + for (i = 4; i < 8; i++) + lp->save_regs[i - 4] = a->read_bcr(ioaddr, i); + spin_unlock_irqrestore(&lp->lock, flags); + return 2; /* cycle on/off twice per second */ + + case ETHTOOL_ID_ON: + case ETHTOOL_ID_OFF: + /* Blink the led */ + spin_lock_irqsave(&lp->lock, flags); + for (i = 4; i < 8; i++) + a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000); + spin_unlock_irqrestore(&lp->lock, flags); + break; + + case ETHTOOL_ID_INACTIVE: + /* Restore the original value of the bcrs */ + spin_lock_irqsave(&lp->lock, flags); + for (i = 4; i < 8; i++) + a->write_bcr(ioaddr, i, lp->save_regs[i - 4]); + spin_unlock_irqrestore(&lp->lock, flags); + } + return 0; +} + +/* + * lp->lock must be held. + */ +static int pcnet32_suspend(struct net_device *dev, unsigned long *flags, + int can_sleep) +{ + int csr5; + struct pcnet32_private *lp = netdev_priv(dev); + const struct pcnet32_access *a = lp->a; + ulong ioaddr = dev->base_addr; + int ticks; + + /* really old chips have to be stopped. */ + if (lp->chip_version < PCNET32_79C970A) + return 0; + + /* set SUSPEND (SPND) - CSR5 bit 0 */ + csr5 = a->read_csr(ioaddr, CSR5); + a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND); + + /* poll waiting for bit to be set */ + ticks = 0; + while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) { + spin_unlock_irqrestore(&lp->lock, *flags); + if (can_sleep) + msleep(1); + else + mdelay(1); + spin_lock_irqsave(&lp->lock, *flags); + ticks++; + if (ticks > 200) { + netif_printk(lp, hw, KERN_DEBUG, dev, + "Error getting into suspend!\n"); + return 0; + } + } + return 1; +} + +/* + * process one receive descriptor entry + */ + +static void pcnet32_rx_entry(struct net_device *dev, + struct pcnet32_private *lp, + struct pcnet32_rx_head *rxp, + int entry) +{ + int status = (short)le16_to_cpu(rxp->status) >> 8; + int rx_in_place = 0; + struct sk_buff *skb; + short pkt_len; + + if (status != 0x03) { /* There was an error. */ + /* + * There is a tricky error noted by John Murphy, + * to Russ Nelson: Even with full-sized + * buffers it's possible for a jabber packet to use two + * buffers, with only the last correctly noting the error. + */ + if (status & 0x01) /* Only count a general error at the */ + dev->stats.rx_errors++; /* end of a packet. */ + if (status & 0x20) + dev->stats.rx_frame_errors++; + if (status & 0x10) + dev->stats.rx_over_errors++; + if (status & 0x08) + dev->stats.rx_crc_errors++; + if (status & 0x04) + dev->stats.rx_fifo_errors++; + return; + } + + pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4; + + /* Discard oversize frames. */ + if (unlikely(pkt_len > PKT_BUF_SIZE)) { + netif_err(lp, drv, dev, "Impossible packet size %d!\n", + pkt_len); + dev->stats.rx_errors++; + return; + } + if (pkt_len < 60) { + netif_err(lp, rx_err, dev, "Runt packet!\n"); + dev->stats.rx_errors++; + return; + } + + if (pkt_len > rx_copybreak) { + struct sk_buff *newskb; + dma_addr_t new_dma_addr; + + newskb = netdev_alloc_skb(dev, PKT_BUF_SKB); + /* + * map the new buffer, if mapping fails, drop the packet and + * reuse the old buffer + */ + if (newskb) { + skb_reserve(newskb, NET_IP_ALIGN); + new_dma_addr = pci_map_single(lp->pci_dev, + newskb->data, + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(lp->pci_dev, new_dma_addr)) { + netif_err(lp, rx_err, dev, + "DMA mapping error.\n"); + dev_kfree_skb(newskb); + skb = NULL; + } else { + skb = lp->rx_skbuff[entry]; + pci_unmap_single(lp->pci_dev, + lp->rx_dma_addr[entry], + PKT_BUF_SIZE, + PCI_DMA_FROMDEVICE); + skb_put(skb, pkt_len); + lp->rx_skbuff[entry] = newskb; + lp->rx_dma_addr[entry] = new_dma_addr; + rxp->base = cpu_to_le32(new_dma_addr); + rx_in_place = 1; + } + } else + skb = NULL; + } else + skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN); + + if (skb == NULL) { + dev->stats.rx_dropped++; + return; + } + if (!rx_in_place) { + skb_reserve(skb, NET_IP_ALIGN); + skb_put(skb, pkt_len); /* Make room */ + pci_dma_sync_single_for_cpu(lp->pci_dev, + lp->rx_dma_addr[entry], + pkt_len, + PCI_DMA_FROMDEVICE); + skb_copy_to_linear_data(skb, + (unsigned char *)(lp->rx_skbuff[entry]->data), + pkt_len); + pci_dma_sync_single_for_device(lp->pci_dev, + lp->rx_dma_addr[entry], + pkt_len, + PCI_DMA_FROMDEVICE); + } + dev->stats.rx_bytes += skb->len; + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); + dev->stats.rx_packets++; +} + +static int pcnet32_rx(struct net_device *dev, int budget) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int entry = lp->cur_rx & lp->rx_mod_mask; + struct pcnet32_rx_head *rxp = &lp->rx_ring[entry]; + int npackets = 0; + + /* If we own the next entry, it's a new packet. Send it up. */ + while (npackets < budget && (short)le16_to_cpu(rxp->status) >= 0) { + pcnet32_rx_entry(dev, lp, rxp, entry); + npackets += 1; + /* + * The docs say that the buffer length isn't touched, but Andrew + * Boyd of QNX reports that some revs of the 79C965 clear it. + */ + rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE); + wmb(); /* Make sure owner changes after others are visible */ + rxp->status = cpu_to_le16(0x8000); + entry = (++lp->cur_rx) & lp->rx_mod_mask; + rxp = &lp->rx_ring[entry]; + } + + return npackets; +} + +static int pcnet32_tx(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned int dirty_tx = lp->dirty_tx; + int delta; + int must_restart = 0; + + while (dirty_tx != lp->cur_tx) { + int entry = dirty_tx & lp->tx_mod_mask; + int status = (short)le16_to_cpu(lp->tx_ring[entry].status); + + if (status < 0) + break; /* It still hasn't been Txed */ + + lp->tx_ring[entry].base = 0; + + if (status & 0x4000) { + /* There was a major error, log it. */ + int err_status = le32_to_cpu(lp->tx_ring[entry].misc); + dev->stats.tx_errors++; + netif_err(lp, tx_err, dev, + "Tx error status=%04x err_status=%08x\n", + status, err_status); + if (err_status & 0x04000000) + dev->stats.tx_aborted_errors++; + if (err_status & 0x08000000) + dev->stats.tx_carrier_errors++; + if (err_status & 0x10000000) + dev->stats.tx_window_errors++; +#ifndef DO_DXSUFLO + if (err_status & 0x40000000) { + dev->stats.tx_fifo_errors++; + /* Ackk! On FIFO errors the Tx unit is turned off! */ + /* Remove this verbosity later! */ + netif_err(lp, tx_err, dev, "Tx FIFO error!\n"); + must_restart = 1; + } +#else + if (err_status & 0x40000000) { + dev->stats.tx_fifo_errors++; + if (!lp->dxsuflo) { /* If controller doesn't recover ... */ + /* Ackk! On FIFO errors the Tx unit is turned off! */ + /* Remove this verbosity later! */ + netif_err(lp, tx_err, dev, "Tx FIFO error!\n"); + must_restart = 1; + } + } +#endif + } else { + if (status & 0x1800) + dev->stats.collisions++; + dev->stats.tx_packets++; + } + + /* We must free the original skb */ + if (lp->tx_skbuff[entry]) { + pci_unmap_single(lp->pci_dev, + lp->tx_dma_addr[entry], + lp->tx_skbuff[entry]-> + len, PCI_DMA_TODEVICE); + dev_kfree_skb_any(lp->tx_skbuff[entry]); + lp->tx_skbuff[entry] = NULL; + lp->tx_dma_addr[entry] = 0; + } + dirty_tx++; + } + + delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size); + if (delta > lp->tx_ring_size) { + netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n", + dirty_tx, lp->cur_tx, lp->tx_full); + dirty_tx += lp->tx_ring_size; + delta -= lp->tx_ring_size; + } + + if (lp->tx_full && + netif_queue_stopped(dev) && + delta < lp->tx_ring_size - 2) { + /* The ring is no longer full, clear tbusy. */ + lp->tx_full = 0; + netif_wake_queue(dev); + } + lp->dirty_tx = dirty_tx; + + return must_restart; +} + +static int pcnet32_poll(struct napi_struct *napi, int budget) +{ + struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi); + struct net_device *dev = lp->dev; + unsigned long ioaddr = dev->base_addr; + unsigned long flags; + int work_done; + u16 val; + + work_done = pcnet32_rx(dev, budget); + + spin_lock_irqsave(&lp->lock, flags); + if (pcnet32_tx(dev)) { + /* reset the chip to clear the error condition, then restart */ + lp->a->reset(ioaddr); + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ + pcnet32_restart(dev, CSR0_START); + netif_wake_queue(dev); + } + spin_unlock_irqrestore(&lp->lock, flags); + + if (work_done < budget) { + spin_lock_irqsave(&lp->lock, flags); + + __napi_complete(napi); + + /* clear interrupt masks */ + val = lp->a->read_csr(ioaddr, CSR3); + val &= 0x00ff; + lp->a->write_csr(ioaddr, CSR3, val); + + /* Set interrupt enable. */ + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN); + + spin_unlock_irqrestore(&lp->lock, flags); + } + return work_done; +} + +#define PCNET32_REGS_PER_PHY 32 +#define PCNET32_MAX_PHYS 32 +static int pcnet32_get_regs_len(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int j = lp->phycount * PCNET32_REGS_PER_PHY; + + return (PCNET32_NUM_REGS + j) * sizeof(u16); +} + +static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *ptr) +{ + int i, csr0; + u16 *buff = ptr; + struct pcnet32_private *lp = netdev_priv(dev); + const struct pcnet32_access *a = lp->a; + ulong ioaddr = dev->base_addr; + unsigned long flags; + + spin_lock_irqsave(&lp->lock, flags); + + csr0 = a->read_csr(ioaddr, CSR0); + if (!(csr0 & CSR0_STOP)) /* If not stopped */ + pcnet32_suspend(dev, &flags, 1); + + /* read address PROM */ + for (i = 0; i < 16; i += 2) + *buff++ = inw(ioaddr + i); + + /* read control and status registers */ + for (i = 0; i < 90; i++) + *buff++ = a->read_csr(ioaddr, i); + + *buff++ = a->read_csr(ioaddr, 112); + *buff++ = a->read_csr(ioaddr, 114); + + /* read bus configuration registers */ + for (i = 0; i < 30; i++) + *buff++ = a->read_bcr(ioaddr, i); + + *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */ + + for (i = 31; i < 36; i++) + *buff++ = a->read_bcr(ioaddr, i); + + /* read mii phy registers */ + if (lp->mii) { + int j; + for (j = 0; j < PCNET32_MAX_PHYS; j++) { + if (lp->phymask & (1 << j)) { + for (i = 0; i < PCNET32_REGS_PER_PHY; i++) { + lp->a->write_bcr(ioaddr, 33, + (j << 5) | i); + *buff++ = lp->a->read_bcr(ioaddr, 34); + } + } + } + } + + if (!(csr0 & CSR0_STOP)) { /* If not stopped */ + int csr5; + + /* clear SUSPEND (SPND) - CSR5 bit 0 */ + csr5 = a->read_csr(ioaddr, CSR5); + a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); + } + + spin_unlock_irqrestore(&lp->lock, flags); +} + +static const struct ethtool_ops pcnet32_ethtool_ops = { + .get_settings = pcnet32_get_settings, + .set_settings = pcnet32_set_settings, + .get_drvinfo = pcnet32_get_drvinfo, + .get_msglevel = pcnet32_get_msglevel, + .set_msglevel = pcnet32_set_msglevel, + .nway_reset = pcnet32_nway_reset, + .get_link = pcnet32_get_link, + .get_ringparam = pcnet32_get_ringparam, + .set_ringparam = pcnet32_set_ringparam, + .get_strings = pcnet32_get_strings, + .self_test = pcnet32_ethtool_test, + .set_phys_id = pcnet32_set_phys_id, + .get_regs_len = pcnet32_get_regs_len, + .get_regs = pcnet32_get_regs, + .get_sset_count = pcnet32_get_sset_count, +}; + +/* only probes for non-PCI devices, the rest are handled by + * pci_register_driver via pcnet32_probe_pci */ + +static void pcnet32_probe_vlbus(unsigned int *pcnet32_portlist) +{ + unsigned int *port, ioaddr; + + /* search for PCnet32 VLB cards at known addresses */ + for (port = pcnet32_portlist; (ioaddr = *port); port++) { + if (request_region + (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) { + /* check if there is really a pcnet chip on that ioaddr */ + if ((inb(ioaddr + 14) == 0x57) && + (inb(ioaddr + 15) == 0x57)) { + pcnet32_probe1(ioaddr, 0, NULL); + } else { + release_region(ioaddr, PCNET32_TOTAL_SIZE); + } + } + } +} + +static int +pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + unsigned long ioaddr; + int err; + + err = pci_enable_device(pdev); + if (err < 0) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("failed to enable device -- err=%d\n", err); + return err; + } + pci_set_master(pdev); + + ioaddr = pci_resource_start(pdev, 0); + if (!ioaddr) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("card has no PCI IO resources, aborting\n"); + return -ENODEV; + } + + if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("architecture does not support 32bit PCI busmaster DMA\n"); + return -ENODEV; + } + if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("io address range already allocated\n"); + return -EBUSY; + } + + err = pcnet32_probe1(ioaddr, 1, pdev); + if (err < 0) + pci_disable_device(pdev); + + return err; +} + +static const struct net_device_ops pcnet32_netdev_ops = { + .ndo_open = pcnet32_open, + .ndo_stop = pcnet32_close, + .ndo_start_xmit = pcnet32_start_xmit, + .ndo_tx_timeout = pcnet32_tx_timeout, + .ndo_get_stats = pcnet32_get_stats, + .ndo_set_rx_mode = pcnet32_set_multicast_list, + .ndo_do_ioctl = pcnet32_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = pcnet32_poll_controller, +#endif +}; + +/* pcnet32_probe1 + * Called from both pcnet32_probe_vlbus and pcnet_probe_pci. + * pdev will be NULL when called from pcnet32_probe_vlbus. + */ +static int +pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) +{ + struct pcnet32_private *lp; + int i, media; + int fdx, mii, fset, dxsuflo, sram; + int chip_version; + char *chipname; + struct net_device *dev; + const struct pcnet32_access *a = NULL; + u8 promaddr[ETH_ALEN]; + int ret = -ENODEV; + + /* reset the chip */ + pcnet32_wio_reset(ioaddr); + + /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */ + if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) { + a = &pcnet32_wio; + } else { + pcnet32_dwio_reset(ioaddr); + if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 && + pcnet32_dwio_check(ioaddr)) { + a = &pcnet32_dwio; + } else { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("No access methods\n"); + goto err_release_region; + } + } + + chip_version = + a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16); + if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW)) + pr_info(" PCnet chip version is %#x\n", chip_version); + if ((chip_version & 0xfff) != 0x003) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_info("Unsupported chip version\n"); + goto err_release_region; + } + + /* initialize variables */ + fdx = mii = fset = dxsuflo = sram = 0; + chip_version = (chip_version >> 12) & 0xffff; + + switch (chip_version) { + case 0x2420: + chipname = "PCnet/PCI 79C970"; /* PCI */ + break; + case 0x2430: + if (shared) + chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */ + else + chipname = "PCnet/32 79C965"; /* 486/VL bus */ + break; + case 0x2621: + chipname = "PCnet/PCI II 79C970A"; /* PCI */ + fdx = 1; + break; + case 0x2623: + chipname = "PCnet/FAST 79C971"; /* PCI */ + fdx = 1; + mii = 1; + fset = 1; + break; + case 0x2624: + chipname = "PCnet/FAST+ 79C972"; /* PCI */ + fdx = 1; + mii = 1; + fset = 1; + break; + case 0x2625: + chipname = "PCnet/FAST III 79C973"; /* PCI */ + fdx = 1; + mii = 1; + sram = 1; + break; + case 0x2626: + chipname = "PCnet/Home 79C978"; /* PCI */ + fdx = 1; + /* + * This is based on specs published at www.amd.com. This section + * assumes that a card with a 79C978 wants to go into standard + * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode, + * and the module option homepna=1 can select this instead. + */ + media = a->read_bcr(ioaddr, 49); + media &= ~3; /* default to 10Mb ethernet */ + if (cards_found < MAX_UNITS && homepna[cards_found]) + media |= 1; /* switch to home wiring mode */ + if (pcnet32_debug & NETIF_MSG_PROBE) + printk(KERN_DEBUG PFX "media set to %sMbit mode\n", + (media & 1) ? "1" : "10"); + a->write_bcr(ioaddr, 49, media); + break; + case 0x2627: + chipname = "PCnet/FAST III 79C975"; /* PCI */ + fdx = 1; + mii = 1; + sram = 1; + break; + case 0x2628: + chipname = "PCnet/PRO 79C976"; + fdx = 1; + mii = 1; + break; + default: + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_info("PCnet version %#x, no PCnet32 chip\n", + chip_version); + goto err_release_region; + } + + /* + * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit + * starting until the packet is loaded. Strike one for reliability, lose + * one for latency - although on PCI this isn't a big loss. Older chips + * have FIFO's smaller than a packet, so you can't do this. + * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn. + */ + + if (fset) { + a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860)); + a->write_csr(ioaddr, 80, + (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00); + dxsuflo = 1; + } + + /* + * The Am79C973/Am79C975 controllers come with 12K of SRAM + * which we can use for the Tx/Rx buffers but most importantly, + * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid + * Tx fifo underflows. + */ + if (sram) { + /* + * The SRAM is being configured in two steps. First we + * set the SRAM size in the BCR25:SRAM_SIZE bits. According + * to the datasheet, each bit corresponds to a 512-byte + * page so we can have at most 24 pages. The SRAM_SIZE + * holds the value of the upper 8 bits of the 16-bit SRAM size. + * The low 8-bits start at 0x00 and end at 0xff. So the + * address range is from 0x0000 up to 0x17ff. Therefore, + * the SRAM_SIZE is set to 0x17. The next step is to set + * the BCR26:SRAM_BND midway through so the Tx and Rx + * buffers can share the SRAM equally. + */ + a->write_bcr(ioaddr, 25, 0x17); + a->write_bcr(ioaddr, 26, 0xc); + /* And finally enable the NOUFLO bit */ + a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11)); + } + + dev = alloc_etherdev(sizeof(*lp)); + if (!dev) { + ret = -ENOMEM; + goto err_release_region; + } + + if (pdev) + SET_NETDEV_DEV(dev, &pdev->dev); + + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_info("%s at %#3lx,", chipname, ioaddr); + + /* In most chips, after a chip reset, the ethernet address is read from the + * station address PROM at the base address and programmed into the + * "Physical Address Registers" CSR12-14. + * As a precautionary measure, we read the PROM values and complain if + * they disagree with the CSRs. If they miscompare, and the PROM addr + * is valid, then the PROM addr is used. + */ + for (i = 0; i < 3; i++) { + unsigned int val; + val = a->read_csr(ioaddr, i + 12) & 0x0ffff; + /* There may be endianness issues here. */ + dev->dev_addr[2 * i] = val & 0x0ff; + dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff; + } + + /* read PROM address and compare with CSR address */ + for (i = 0; i < ETH_ALEN; i++) + promaddr[i] = inb(ioaddr + i); + + if (!ether_addr_equal(promaddr, dev->dev_addr) || + !is_valid_ether_addr(dev->dev_addr)) { + if (is_valid_ether_addr(promaddr)) { + if (pcnet32_debug & NETIF_MSG_PROBE) { + pr_cont(" warning: CSR address invalid,\n"); + pr_info(" using instead PROM address of"); + } + memcpy(dev->dev_addr, promaddr, ETH_ALEN); + } + } + + /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ + if (!is_valid_ether_addr(dev->dev_addr)) + eth_zero_addr(dev->dev_addr); + + if (pcnet32_debug & NETIF_MSG_PROBE) { + pr_cont(" %pM", dev->dev_addr); + + /* Version 0x2623 and 0x2624 */ + if (((chip_version + 1) & 0xfffe) == 0x2624) { + i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */ + pr_info(" tx_start_pt(0x%04x):", i); + switch (i >> 10) { + case 0: + pr_cont(" 20 bytes,"); + break; + case 1: + pr_cont(" 64 bytes,"); + break; + case 2: + pr_cont(" 128 bytes,"); + break; + case 3: + pr_cont("~220 bytes,"); + break; + } + i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */ + pr_cont(" BCR18(%x):", i & 0xffff); + if (i & (1 << 5)) + pr_cont("BurstWrEn "); + if (i & (1 << 6)) + pr_cont("BurstRdEn "); + if (i & (1 << 7)) + pr_cont("DWordIO "); + if (i & (1 << 11)) + pr_cont("NoUFlow "); + i = a->read_bcr(ioaddr, 25); + pr_info(" SRAMSIZE=0x%04x,", i << 8); + i = a->read_bcr(ioaddr, 26); + pr_cont(" SRAM_BND=0x%04x,", i << 8); + i = a->read_bcr(ioaddr, 27); + if (i & (1 << 14)) + pr_cont("LowLatRx"); + } + } + + dev->base_addr = ioaddr; + lp = netdev_priv(dev); + /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */ + lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block), + &lp->init_dma_addr); + if (!lp->init_block) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_err("Consistent memory allocation failed\n"); + ret = -ENOMEM; + goto err_free_netdev; + } + lp->pci_dev = pdev; + + lp->dev = dev; + + spin_lock_init(&lp->lock); + + lp->name = chipname; + lp->shared_irq = shared; + lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ + lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */ + lp->tx_mod_mask = lp->tx_ring_size - 1; + lp->rx_mod_mask = lp->rx_ring_size - 1; + lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12); + lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4); + lp->mii_if.full_duplex = fdx; + lp->mii_if.phy_id_mask = 0x1f; + lp->mii_if.reg_num_mask = 0x1f; + lp->dxsuflo = dxsuflo; + lp->mii = mii; + lp->chip_version = chip_version; + lp->msg_enable = pcnet32_debug; + if ((cards_found >= MAX_UNITS) || + (options[cards_found] >= sizeof(options_mapping))) + lp->options = PCNET32_PORT_ASEL; + else + lp->options = options_mapping[options[cards_found]]; + lp->mii_if.dev = dev; + lp->mii_if.mdio_read = mdio_read; + lp->mii_if.mdio_write = mdio_write; + + /* napi.weight is used in both the napi and non-napi cases */ + lp->napi.weight = lp->rx_ring_size / 2; + + netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2); + + if (fdx && !(lp->options & PCNET32_PORT_ASEL) && + ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) + lp->options |= PCNET32_PORT_FD; + + lp->a = a; + + /* prior to register_netdev, dev->name is not yet correct */ + if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) { + ret = -ENOMEM; + goto err_free_ring; + } + /* detect special T1/E1 WAN card by checking for MAC address */ + if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 && + dev->dev_addr[2] == 0x75) + lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; + + lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */ + lp->init_block->tlen_rlen = + cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); + for (i = 0; i < 6; i++) + lp->init_block->phys_addr[i] = dev->dev_addr[i]; + lp->init_block->filter[0] = 0x00000000; + lp->init_block->filter[1] = 0x00000000; + lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); + lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); + + /* switch pcnet32 to 32bit mode */ + a->write_bcr(ioaddr, 20, 2); + + a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); + a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); + + if (pdev) { /* use the IRQ provided by PCI */ + dev->irq = pdev->irq; + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_cont(" assigned IRQ %d\n", dev->irq); + } else { + unsigned long irq_mask = probe_irq_on(); + + /* + * To auto-IRQ we enable the initialization-done and DMA error + * interrupts. For ISA boards we get a DMA error, but VLB and PCI + * boards will work. + */ + /* Trigger an initialization just for the interrupt. */ + a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_INIT); + mdelay(1); + + dev->irq = probe_irq_off(irq_mask); + if (!dev->irq) { + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_cont(", failed to detect IRQ line\n"); + ret = -ENODEV; + goto err_free_ring; + } + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_cont(", probed IRQ %d\n", dev->irq); + } + + /* Set the mii phy_id so that we can query the link state */ + if (lp->mii) { + /* lp->phycount and lp->phymask are set to 0 by memset above */ + + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f; + /* scan for PHYs */ + for (i = 0; i < PCNET32_MAX_PHYS; i++) { + unsigned short id1, id2; + + id1 = mdio_read(dev, i, MII_PHYSID1); + if (id1 == 0xffff) + continue; + id2 = mdio_read(dev, i, MII_PHYSID2); + if (id2 == 0xffff) + continue; + if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624) + continue; /* 79C971 & 79C972 have phantom phy at id 31 */ + lp->phycount++; + lp->phymask |= (1 << i); + lp->mii_if.phy_id = i; + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_info("Found PHY %04x:%04x at address %d\n", + id1, id2, i); + } + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5); + if (lp->phycount > 1) + lp->options |= PCNET32_PORT_MII; + } + + init_timer(&lp->watchdog_timer); + lp->watchdog_timer.data = (unsigned long)dev; + lp->watchdog_timer.function = (void *)&pcnet32_watchdog; + + /* The PCNET32-specific entries in the device structure. */ + dev->netdev_ops = &pcnet32_netdev_ops; + dev->ethtool_ops = &pcnet32_ethtool_ops; + dev->watchdog_timeo = (5 * HZ); + + /* Fill in the generic fields of the device structure. */ + if (register_netdev(dev)) + goto err_free_ring; + + if (pdev) { + pci_set_drvdata(pdev, dev); + } else { + lp->next = pcnet32_dev; + pcnet32_dev = dev; + } + + if (pcnet32_debug & NETIF_MSG_PROBE) + pr_info("%s: registered as %s\n", dev->name, lp->name); + cards_found++; + + /* enable LED writes */ + a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000); + + return 0; + +err_free_ring: + pcnet32_free_ring(dev); + pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), + lp->init_block, lp->init_dma_addr); +err_free_netdev: + free_netdev(dev); +err_release_region: + release_region(ioaddr, PCNET32_TOTAL_SIZE); + return ret; +} + +/* if any allocation fails, caller must also call pcnet32_free_ring */ +static int pcnet32_alloc_ring(struct net_device *dev, const char *name) +{ + struct pcnet32_private *lp = netdev_priv(dev); + + lp->tx_ring = pci_alloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * + lp->tx_ring_size, + &lp->tx_ring_dma_addr); + if (lp->tx_ring == NULL) { + netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); + return -ENOMEM; + } + + lp->rx_ring = pci_alloc_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + lp->rx_ring_size, + &lp->rx_ring_dma_addr); + if (lp->rx_ring == NULL) { + netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); + return -ENOMEM; + } + + lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t), + GFP_ATOMIC); + if (!lp->tx_dma_addr) + return -ENOMEM; + + lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t), + GFP_ATOMIC); + if (!lp->rx_dma_addr) + return -ENOMEM; + + lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *), + GFP_ATOMIC); + if (!lp->tx_skbuff) + return -ENOMEM; + + lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *), + GFP_ATOMIC); + if (!lp->rx_skbuff) + return -ENOMEM; + + return 0; +} + +static void pcnet32_free_ring(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + + kfree(lp->tx_skbuff); + lp->tx_skbuff = NULL; + + kfree(lp->rx_skbuff); + lp->rx_skbuff = NULL; + + kfree(lp->tx_dma_addr); + lp->tx_dma_addr = NULL; + + kfree(lp->rx_dma_addr); + lp->rx_dma_addr = NULL; + + if (lp->tx_ring) { + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_tx_head) * + lp->tx_ring_size, lp->tx_ring, + lp->tx_ring_dma_addr); + lp->tx_ring = NULL; + } + + if (lp->rx_ring) { + pci_free_consistent(lp->pci_dev, + sizeof(struct pcnet32_rx_head) * + lp->rx_ring_size, lp->rx_ring, + lp->rx_ring_dma_addr); + lp->rx_ring = NULL; + } +} + +static int pcnet32_open(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + struct pci_dev *pdev = lp->pci_dev; + unsigned long ioaddr = dev->base_addr; + u16 val; + int i; + int rc; + unsigned long flags; + + if (request_irq(dev->irq, pcnet32_interrupt, + lp->shared_irq ? IRQF_SHARED : 0, dev->name, + (void *)dev)) { + return -EAGAIN; + } + + spin_lock_irqsave(&lp->lock, flags); + /* Check for a valid station address */ + if (!is_valid_ether_addr(dev->dev_addr)) { + rc = -EINVAL; + goto err_free_irq; + } + + /* Reset the PCNET32 */ + lp->a->reset(ioaddr); + + /* switch pcnet32 to 32bit mode */ + lp->a->write_bcr(ioaddr, 20, 2); + + netif_printk(lp, ifup, KERN_DEBUG, dev, + "%s() irq %d tx/rx rings %#x/%#x init %#x\n", + __func__, dev->irq, (u32) (lp->tx_ring_dma_addr), + (u32) (lp->rx_ring_dma_addr), + (u32) (lp->init_dma_addr)); + + /* set/reset autoselect bit */ + val = lp->a->read_bcr(ioaddr, 2) & ~2; + if (lp->options & PCNET32_PORT_ASEL) + val |= 2; + lp->a->write_bcr(ioaddr, 2, val); + + /* handle full duplex setting */ + if (lp->mii_if.full_duplex) { + val = lp->a->read_bcr(ioaddr, 9) & ~3; + if (lp->options & PCNET32_PORT_FD) { + val |= 1; + if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI)) + val |= 2; + } else if (lp->options & PCNET32_PORT_ASEL) { + /* workaround of xSeries250, turn on for 79C975 only */ + if (lp->chip_version == 0x2627) + val |= 3; + } + lp->a->write_bcr(ioaddr, 9, val); + } + + /* set/reset GPSI bit in test register */ + val = lp->a->read_csr(ioaddr, 124) & ~0x10; + if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI) + val |= 0x10; + lp->a->write_csr(ioaddr, 124, val); + + /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ + if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT && + (pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || + pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { + if (lp->options & PCNET32_PORT_ASEL) { + lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; + netif_printk(lp, link, KERN_DEBUG, dev, + "Setting 100Mb-Full Duplex\n"); + } + } + if (lp->phycount < 2) { + /* + * 24 Jun 2004 according AMD, in order to change the PHY, + * DANAS (or DISPM for 79C976) must be set; then select the speed, + * duplex, and/or enable auto negotiation, and clear DANAS + */ + if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) { + lp->a->write_bcr(ioaddr, 32, + lp->a->read_bcr(ioaddr, 32) | 0x0080); + /* disable Auto Negotiation, set 10Mpbs, HD */ + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8; + if (lp->options & PCNET32_PORT_FD) + val |= 0x10; + if (lp->options & PCNET32_PORT_100) + val |= 0x08; + lp->a->write_bcr(ioaddr, 32, val); + } else { + if (lp->options & PCNET32_PORT_ASEL) { + lp->a->write_bcr(ioaddr, 32, + lp->a->read_bcr(ioaddr, + 32) | 0x0080); + /* enable auto negotiate, setup, disable fd */ + val = lp->a->read_bcr(ioaddr, 32) & ~0x98; + val |= 0x20; + lp->a->write_bcr(ioaddr, 32, val); + } + } + } else { + int first_phy = -1; + u16 bmcr; + u32 bcr9; + struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; + + /* + * There is really no good other way to handle multiple PHYs + * other than turning off all automatics + */ + val = lp->a->read_bcr(ioaddr, 2); + lp->a->write_bcr(ioaddr, 2, val & ~2); + val = lp->a->read_bcr(ioaddr, 32); + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */ + + if (!(lp->options & PCNET32_PORT_ASEL)) { + /* setup ecmd */ + ecmd.port = PORT_MII; + ecmd.transceiver = XCVR_INTERNAL; + ecmd.autoneg = AUTONEG_DISABLE; + ethtool_cmd_speed_set(&ecmd, + (lp->options & PCNET32_PORT_100) ? + SPEED_100 : SPEED_10); + bcr9 = lp->a->read_bcr(ioaddr, 9); + + if (lp->options & PCNET32_PORT_FD) { + ecmd.duplex = DUPLEX_FULL; + bcr9 |= (1 << 0); + } else { + ecmd.duplex = DUPLEX_HALF; + bcr9 |= ~(1 << 0); + } + lp->a->write_bcr(ioaddr, 9, bcr9); + } + + for (i = 0; i < PCNET32_MAX_PHYS; i++) { + if (lp->phymask & (1 << i)) { + /* isolate all but the first PHY */ + bmcr = mdio_read(dev, i, MII_BMCR); + if (first_phy == -1) { + first_phy = i; + mdio_write(dev, i, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + } else { + mdio_write(dev, i, MII_BMCR, + bmcr | BMCR_ISOLATE); + } + /* use mii_ethtool_sset to setup PHY */ + lp->mii_if.phy_id = i; + ecmd.phy_address = i; + if (lp->options & PCNET32_PORT_ASEL) { + mii_ethtool_gset(&lp->mii_if, &ecmd); + ecmd.autoneg = AUTONEG_ENABLE; + } + mii_ethtool_sset(&lp->mii_if, &ecmd); + } + } + lp->mii_if.phy_id = first_phy; + netif_info(lp, link, dev, "Using PHY number %d\n", first_phy); + } + +#ifdef DO_DXSUFLO + if (lp->dxsuflo) { /* Disable transmit stop on underflow */ + val = lp->a->read_csr(ioaddr, CSR3); + val |= 0x40; + lp->a->write_csr(ioaddr, CSR3, val); + } +#endif + + lp->init_block->mode = + cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); + pcnet32_load_multicast(dev); + + if (pcnet32_init_ring(dev)) { + rc = -ENOMEM; + goto err_free_ring; + } + + napi_enable(&lp->napi); + + /* Re-initialize the PCNET32, and start it when done. */ + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); + + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT); + + netif_start_queue(dev); + + if (lp->chip_version >= PCNET32_79C970A) { + /* Print the link status and start the watchdog */ + pcnet32_check_media(dev, 1); + mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT); + } + + i = 0; + while (i++ < 100) + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON) + break; + /* + * We used to clear the InitDone bit, 0x0100, here but Mark Stockton + * reports that doing so triggers a bug in the '974. + */ + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL); + + netif_printk(lp, ifup, KERN_DEBUG, dev, + "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n", + i, + (u32) (lp->init_dma_addr), + lp->a->read_csr(ioaddr, CSR0)); + + spin_unlock_irqrestore(&lp->lock, flags); + + return 0; /* Always succeed */ + +err_free_ring: + /* free any allocated skbuffs */ + pcnet32_purge_rx_ring(dev); + + /* + * Switch back to 16bit mode to avoid problems with dumb + * DOS packet driver after a warm reboot + */ + lp->a->write_bcr(ioaddr, 20, 4); + +err_free_irq: + spin_unlock_irqrestore(&lp->lock, flags); + free_irq(dev->irq, dev); + return rc; +} + +/* + * The LANCE has been halted for one reason or another (busmaster memory + * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure, + * etc.). Modern LANCE variants always reload their ring-buffer + * configuration when restarted, so we must reinitialize our ring + * context before restarting. As part of this reinitialization, + * find all packets still on the Tx ring and pretend that they had been + * sent (in effect, drop the packets on the floor) - the higher-level + * protocols will time out and retransmit. It'd be better to shuffle + * these skbs to a temp list and then actually re-Tx them after + * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com + */ + +static void pcnet32_purge_tx_ring(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int i; + + for (i = 0; i < lp->tx_ring_size; i++) { + lp->tx_ring[i].status = 0; /* CPU owns buffer */ + wmb(); /* Make sure adapter sees owner change */ + if (lp->tx_skbuff[i]) { + if (!pci_dma_mapping_error(lp->pci_dev, + lp->tx_dma_addr[i])) + pci_unmap_single(lp->pci_dev, + lp->tx_dma_addr[i], + lp->tx_skbuff[i]->len, + PCI_DMA_TODEVICE); + dev_kfree_skb_any(lp->tx_skbuff[i]); + } + lp->tx_skbuff[i] = NULL; + lp->tx_dma_addr[i] = 0; + } +} + +/* Initialize the PCNET32 Rx and Tx rings. */ +static int pcnet32_init_ring(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int i; + + lp->tx_full = 0; + lp->cur_rx = lp->cur_tx = 0; + lp->dirty_rx = lp->dirty_tx = 0; + + for (i = 0; i < lp->rx_ring_size; i++) { + struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; + if (rx_skbuff == NULL) { + lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB); + rx_skbuff = lp->rx_skbuff[i]; + if (!rx_skbuff) { + /* there is not much we can do at this point */ + netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n", + __func__); + return -1; + } + skb_reserve(rx_skbuff, NET_IP_ALIGN); + } + + rmb(); + if (lp->rx_dma_addr[i] == 0) { + lp->rx_dma_addr[i] = + pci_map_single(lp->pci_dev, rx_skbuff->data, + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(lp->pci_dev, + lp->rx_dma_addr[i])) { + /* there is not much we can do at this point */ + netif_err(lp, drv, dev, + "%s pci dma mapping error\n", + __func__); + return -1; + } + } + lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); + lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE); + wmb(); /* Make sure owner changes after all others are visible */ + lp->rx_ring[i].status = cpu_to_le16(0x8000); + } + /* The Tx buffer address is filled in as needed, but we do need to clear + * the upper ownership bit. */ + for (i = 0; i < lp->tx_ring_size; i++) { + lp->tx_ring[i].status = 0; /* CPU owns buffer */ + wmb(); /* Make sure adapter sees owner change */ + lp->tx_ring[i].base = 0; + lp->tx_dma_addr[i] = 0; + } + + lp->init_block->tlen_rlen = + cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); + for (i = 0; i < 6; i++) + lp->init_block->phys_addr[i] = dev->dev_addr[i]; + lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); + lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); + wmb(); /* Make sure all changes are visible */ + return 0; +} + +/* the pcnet32 has been issued a stop or reset. Wait for the stop bit + * then flush the pending transmit operations, re-initialize the ring, + * and tell the chip to initialize. + */ +static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + int i; + + /* wait for stop */ + for (i = 0; i < 100; i++) + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP) + break; + + if (i >= 100) + netif_err(lp, drv, dev, "%s timed out waiting for stop\n", + __func__); + + pcnet32_purge_tx_ring(dev); + if (pcnet32_init_ring(dev)) + return; + + /* ReInit Ring */ + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT); + i = 0; + while (i++ < 1000) + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON) + break; + + lp->a->write_csr(ioaddr, CSR0, csr0_bits); +} + +static void pcnet32_tx_timeout(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr, flags; + + spin_lock_irqsave(&lp->lock, flags); + /* Transmitter timeout, serious problems. */ + if (pcnet32_debug & NETIF_MSG_DRV) + pr_err("%s: transmit timed out, status %4.4x, resetting\n", + dev->name, lp->a->read_csr(ioaddr, CSR0)); + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); + dev->stats.tx_errors++; + if (netif_msg_tx_err(lp)) { + int i; + printk(KERN_DEBUG + " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", + lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", + lp->cur_rx); + for (i = 0; i < lp->rx_ring_size; i++) + printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", + le32_to_cpu(lp->rx_ring[i].base), + (-le16_to_cpu(lp->rx_ring[i].buf_length)) & + 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length), + le16_to_cpu(lp->rx_ring[i].status)); + for (i = 0; i < lp->tx_ring_size; i++) + printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", + le32_to_cpu(lp->tx_ring[i].base), + (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, + le32_to_cpu(lp->tx_ring[i].misc), + le16_to_cpu(lp->tx_ring[i].status)); + printk("\n"); + } + pcnet32_restart(dev, CSR0_NORMAL); + + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); + + spin_unlock_irqrestore(&lp->lock, flags); +} + +static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + u16 status; + int entry; + unsigned long flags; + + spin_lock_irqsave(&lp->lock, flags); + + netif_printk(lp, tx_queued, KERN_DEBUG, dev, + "%s() called, csr0 %4.4x\n", + __func__, lp->a->read_csr(ioaddr, CSR0)); + + /* Default status -- will not enable Successful-TxDone + * interrupt when that option is available to us. + */ + status = 0x8300; + + /* Fill in a Tx ring entry */ + + /* Mask to ring buffer boundary. */ + entry = lp->cur_tx & lp->tx_mod_mask; + + /* Caution: the write order is important here, set the status + * with the "ownership" bits last. */ + + lp->tx_ring[entry].length = cpu_to_le16(-skb->len); + + lp->tx_ring[entry].misc = 0x00000000; + + lp->tx_dma_addr[entry] = + pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) { + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + goto drop_packet; + } + lp->tx_skbuff[entry] = skb; + lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]); + wmb(); /* Make sure owner changes after all others are visible */ + lp->tx_ring[entry].status = cpu_to_le16(status); + + lp->cur_tx++; + dev->stats.tx_bytes += skb->len; + + /* Trigger an immediate send poll. */ + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL); + + if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) { + lp->tx_full = 1; + netif_stop_queue(dev); + } +drop_packet: + spin_unlock_irqrestore(&lp->lock, flags); + return NETDEV_TX_OK; +} + +/* The PCNET32 interrupt handler. */ +static irqreturn_t +pcnet32_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct pcnet32_private *lp; + unsigned long ioaddr; + u16 csr0; + int boguscnt = max_interrupt_work; + + ioaddr = dev->base_addr; + lp = netdev_priv(dev); + + spin_lock(&lp->lock); + + csr0 = lp->a->read_csr(ioaddr, CSR0); + while ((csr0 & 0x8f00) && --boguscnt >= 0) { + if (csr0 == 0xffff) + break; /* PCMCIA remove happened */ + /* Acknowledge all of the current interrupt sources ASAP. */ + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f); + + netif_printk(lp, intr, KERN_DEBUG, dev, + "interrupt csr0=%#2.2x new csr=%#2.2x\n", + csr0, lp->a->read_csr(ioaddr, CSR0)); + + /* Log misc errors. */ + if (csr0 & 0x4000) + dev->stats.tx_errors++; /* Tx babble. */ + if (csr0 & 0x1000) { + /* + * This happens when our receive ring is full. This + * shouldn't be a problem as we will see normal rx + * interrupts for the frames in the receive ring. But + * there are some PCI chipsets (I can reproduce this + * on SP3G with Intel saturn chipset) which have + * sometimes problems and will fill up the receive + * ring with error descriptors. In this situation we + * don't get a rx interrupt, but a missed frame + * interrupt sooner or later. + */ + dev->stats.rx_errors++; /* Missed a Rx frame. */ + } + if (csr0 & 0x0800) { + netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n", + csr0); + /* unlike for the lance, there is no restart needed */ + } + if (napi_schedule_prep(&lp->napi)) { + u16 val; + /* set interrupt masks */ + val = lp->a->read_csr(ioaddr, CSR3); + val |= 0x5f00; + lp->a->write_csr(ioaddr, CSR3, val); + + __napi_schedule(&lp->napi); + break; + } + csr0 = lp->a->read_csr(ioaddr, CSR0); + } + + netif_printk(lp, intr, KERN_DEBUG, dev, + "exiting interrupt, csr0=%#4.4x\n", + lp->a->read_csr(ioaddr, CSR0)); + + spin_unlock(&lp->lock); + + return IRQ_HANDLED; +} + +static int pcnet32_close(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr; + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + + del_timer_sync(&lp->watchdog_timer); + + netif_stop_queue(dev); + napi_disable(&lp->napi); + + spin_lock_irqsave(&lp->lock, flags); + + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112); + + netif_printk(lp, ifdown, KERN_DEBUG, dev, + "Shutting down ethercard, status was %2.2x\n", + lp->a->read_csr(ioaddr, CSR0)); + + /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */ + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); + + /* + * Switch back to 16bit mode to avoid problems with dumb + * DOS packet driver after a warm reboot + */ + lp->a->write_bcr(ioaddr, 20, 4); + + spin_unlock_irqrestore(&lp->lock, flags); + + free_irq(dev->irq, dev); + + spin_lock_irqsave(&lp->lock, flags); + + pcnet32_purge_rx_ring(dev); + pcnet32_purge_tx_ring(dev); + + spin_unlock_irqrestore(&lp->lock, flags); + + return 0; +} + +static struct net_device_stats *pcnet32_get_stats(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + unsigned long flags; + + spin_lock_irqsave(&lp->lock, flags); + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112); + spin_unlock_irqrestore(&lp->lock, flags); + + return &dev->stats; +} + +/* taken from the sunlance driver, which it took from the depca driver */ +static void pcnet32_load_multicast(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + volatile struct pcnet32_init_block *ib = lp->init_block; + volatile __le16 *mcast_table = (__le16 *)ib->filter; + struct netdev_hw_addr *ha; + unsigned long ioaddr = dev->base_addr; + int i; + u32 crc; + + /* set all multicast bits */ + if (dev->flags & IFF_ALLMULTI) { + ib->filter[0] = cpu_to_le32(~0U); + ib->filter[1] = cpu_to_le32(~0U); + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff); + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff); + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff); + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff); + return; + } + /* clear the multicast filter */ + ib->filter[0] = 0; + ib->filter[1] = 0; + + /* Add addresses */ + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr); + crc = crc >> 26; + mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf)); + } + for (i = 0; i < 4; i++) + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i, + le16_to_cpu(mcast_table[i])); +} + +/* + * Set or clear the multicast filter for this adaptor. + */ +static void pcnet32_set_multicast_list(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr, flags; + struct pcnet32_private *lp = netdev_priv(dev); + int csr15, suspended; + + spin_lock_irqsave(&lp->lock, flags); + suspended = pcnet32_suspend(dev, &flags, 0); + csr15 = lp->a->read_csr(ioaddr, CSR15); + if (dev->flags & IFF_PROMISC) { + /* Log any net taps. */ + netif_info(lp, hw, dev, "Promiscuous mode enabled\n"); + lp->init_block->mode = + cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << + 7); + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000); + } else { + lp->init_block->mode = + cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff); + pcnet32_load_multicast(dev); + } + + if (suspended) { + int csr5; + /* clear SUSPEND (SPND) - CSR5 bit 0 */ + csr5 = lp->a->read_csr(ioaddr, CSR5); + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); + } else { + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); + pcnet32_restart(dev, CSR0_NORMAL); + netif_wake_queue(dev); + } + + spin_unlock_irqrestore(&lp->lock, flags); +} + +/* This routine assumes that the lp->lock is held */ +static int mdio_read(struct net_device *dev, int phy_id, int reg_num) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + u16 val_out; + + if (!lp->mii) + return 0; + + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); + val_out = lp->a->read_bcr(ioaddr, 34); + + return val_out; +} + +/* This routine assumes that the lp->lock is held */ +static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + + if (!lp->mii) + return; + + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); + lp->a->write_bcr(ioaddr, 34, val); +} + +static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int rc; + unsigned long flags; + + /* SIOC[GS]MIIxxx ioctls */ + if (lp->mii) { + spin_lock_irqsave(&lp->lock, flags); + rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL); + spin_unlock_irqrestore(&lp->lock, flags); + } else { + rc = -EOPNOTSUPP; + } + + return rc; +} + +static int pcnet32_check_otherphy(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + struct mii_if_info mii = lp->mii_if; + u16 bmcr; + int i; + + for (i = 0; i < PCNET32_MAX_PHYS; i++) { + if (i == lp->mii_if.phy_id) + continue; /* skip active phy */ + if (lp->phymask & (1 << i)) { + mii.phy_id = i; + if (mii_link_ok(&mii)) { + /* found PHY with active link */ + netif_info(lp, link, dev, "Using PHY number %d\n", + i); + + /* isolate inactive phy */ + bmcr = + mdio_read(dev, lp->mii_if.phy_id, MII_BMCR); + mdio_write(dev, lp->mii_if.phy_id, MII_BMCR, + bmcr | BMCR_ISOLATE); + + /* de-isolate new phy */ + bmcr = mdio_read(dev, i, MII_BMCR); + mdio_write(dev, i, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + + /* set new phy address */ + lp->mii_if.phy_id = i; + return 1; + } + } + } + return 0; +} + +/* + * Show the status of the media. Similar to mii_check_media however it + * correctly shows the link speed for all (tested) pcnet32 variants. + * Devices with no mii just report link state without speed. + * + * Caller is assumed to hold and release the lp->lock. + */ + +static void pcnet32_check_media(struct net_device *dev, int verbose) +{ + struct pcnet32_private *lp = netdev_priv(dev); + int curr_link; + int prev_link = netif_carrier_ok(dev) ? 1 : 0; + u32 bcr9; + + if (lp->mii) { + curr_link = mii_link_ok(&lp->mii_if); + } else { + ulong ioaddr = dev->base_addr; /* card base I/O address */ + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0); + } + if (!curr_link) { + if (prev_link || verbose) { + netif_carrier_off(dev); + netif_info(lp, link, dev, "link down\n"); + } + if (lp->phycount > 1) { + curr_link = pcnet32_check_otherphy(dev); + prev_link = 0; + } + } else if (verbose || !prev_link) { + netif_carrier_on(dev); + if (lp->mii) { + if (netif_msg_link(lp)) { + struct ethtool_cmd ecmd = { + .cmd = ETHTOOL_GSET }; + mii_ethtool_gset(&lp->mii_if, &ecmd); + netdev_info(dev, "link up, %uMbps, %s-duplex\n", + ethtool_cmd_speed(&ecmd), + (ecmd.duplex == DUPLEX_FULL) + ? "full" : "half"); + } + bcr9 = lp->a->read_bcr(dev->base_addr, 9); + if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) { + if (lp->mii_if.full_duplex) + bcr9 |= (1 << 0); + else + bcr9 &= ~(1 << 0); + lp->a->write_bcr(dev->base_addr, 9, bcr9); + } + } else { + netif_info(lp, link, dev, "link up\n"); + } + } +} + +/* + * Check for loss of link and link establishment. + * Could possibly be changed to use mii_check_media instead. + */ + +static void pcnet32_watchdog(struct net_device *dev) +{ + struct pcnet32_private *lp = netdev_priv(dev); + unsigned long flags; + + /* Print the link status if it has changed */ + spin_lock_irqsave(&lp->lock, flags); + pcnet32_check_media(dev, 0); + spin_unlock_irqrestore(&lp->lock, flags); + + mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT)); +} + +static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (netif_running(dev)) { + netif_device_detach(dev); + pcnet32_close(dev); + } + pci_save_state(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +static int pcnet32_pm_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + if (netif_running(dev)) { + pcnet32_open(dev); + netif_device_attach(dev); + } + return 0; +} + +static void pcnet32_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (dev) { + struct pcnet32_private *lp = netdev_priv(dev); + + unregister_netdev(dev); + pcnet32_free_ring(dev); + release_region(dev->base_addr, PCNET32_TOTAL_SIZE); + pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), + lp->init_block, lp->init_dma_addr); + free_netdev(dev); + pci_disable_device(pdev); + } +} + +static struct pci_driver pcnet32_driver = { + .name = DRV_NAME, + .probe = pcnet32_probe_pci, + .remove = pcnet32_remove_one, + .id_table = pcnet32_pci_tbl, + .suspend = pcnet32_pm_suspend, + .resume = pcnet32_pm_resume, +}; + +/* An additional parameter that may be passed in... */ +static int debug = -1; +static int tx_start_pt = -1; +static int pcnet32_have_pci; + +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, DRV_NAME " debug level"); +module_param(max_interrupt_work, int, 0); +MODULE_PARM_DESC(max_interrupt_work, + DRV_NAME " maximum events handled per interrupt"); +module_param(rx_copybreak, int, 0); +MODULE_PARM_DESC(rx_copybreak, + DRV_NAME " copy breakpoint for copy-only-tiny-frames"); +module_param(tx_start_pt, int, 0); +MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)"); +module_param(pcnet32vlb, int, 0); +MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)"); +module_param_array(options, int, NULL, 0); +MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)"); +module_param_array(full_duplex, int, NULL, 0); +MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)"); +/* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */ +module_param_array(homepna, int, NULL, 0); +MODULE_PARM_DESC(homepna, + DRV_NAME + " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet"); + +MODULE_AUTHOR("Thomas Bogendoerfer"); +MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards"); +MODULE_LICENSE("GPL"); + +#define PCNET32_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +static int __init pcnet32_init_module(void) +{ + pr_info("%s", version); + + pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT); + + if ((tx_start_pt >= 0) && (tx_start_pt <= 3)) + tx_start = tx_start_pt; + + /* find the PCI devices */ + if (!pci_register_driver(&pcnet32_driver)) + pcnet32_have_pci = 1; + + /* should we find any remaining VLbus devices ? */ + if (pcnet32vlb) + pcnet32_probe_vlbus(pcnet32_portlist); + + if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) + pr_info("%d cards_found\n", cards_found); + + return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV; +} + +static void __exit pcnet32_cleanup_module(void) +{ + struct net_device *next_dev; + + while (pcnet32_dev) { + struct pcnet32_private *lp = netdev_priv(pcnet32_dev); + next_dev = lp->next; + unregister_netdev(pcnet32_dev); + pcnet32_free_ring(pcnet32_dev); + release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); + pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), + lp->init_block, lp->init_dma_addr); + free_netdev(pcnet32_dev); + pcnet32_dev = next_dev; + } + + if (pcnet32_have_pci) + pci_unregister_driver(&pcnet32_driver); +} + +module_init(pcnet32_init_module); +module_exit(pcnet32_cleanup_module); + +/* + * Local variables: + * c-indent-level: 4 + * tab-width: 8 + * End: + */ diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c new file mode 100644 index 000000000..3d8c6b2cd --- /dev/null +++ b/drivers/net/ethernet/amd/sun3lance.c @@ -0,0 +1,956 @@ +/* sun3lance.c: Ethernet driver for SUN3 Lance chip */ +/* + + Sun3 Lance ethernet driver, by Sam Creasey (sammy@users.qual.net). + This driver is a part of the linux kernel, and is thus distributed + under the GNU General Public License. + + The values used in LANCE_OBIO and LANCE_IRQ seem to be empirically + true for the correct IRQ and address of the lance registers. They + have not been widely tested, however. What we probably need is a + "proper" way to search for a device in the sun3's prom, but, alas, + linux has no such thing. + + This driver is largely based on atarilance.c, by Roman Hodek. Other + sources of inspiration were the NetBSD sun3 am7990 driver, and the + linux sparc lance driver (sunlance.c). + + There are more assumptions made throughout this driver, it almost + certainly still needs work, but it does work at least for RARP/BOOTP and + mounting the root NFS filesystem. + +*/ + +static char *version = "sun3lance.c: v1.2 1/12/2001 Sam Creasey (sammy@sammy.net)\n"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SUN3 +#include +#else +#include +#endif + +/* sun3/60 addr/irq for the lance chip. If your sun is different, + change this. */ +#define LANCE_OBIO 0x120000 +#define LANCE_IRQ IRQ_AUTO_3 + +/* Debug level: + * 0 = silent, print only serious errors + * 1 = normal, print error messages + * 2 = debug, print debug infos + * 3 = debug, print even more debug infos (packet data) + */ + +#define LANCE_DEBUG 0 + +#ifdef LANCE_DEBUG +static int lance_debug = LANCE_DEBUG; +#else +static int lance_debug = 1; +#endif +module_param(lance_debug, int, 0); +MODULE_PARM_DESC(lance_debug, "SUN3 Lance debug level (0-3)"); +MODULE_LICENSE("GPL"); + +#define DPRINTK(n,a) \ + do { \ + if (lance_debug >= n) \ + printk a; \ + } while( 0 ) + + +/* we're only using 32k of memory, so we use 4 TX + buffers and 16 RX buffers. These values are expressed as log2. */ + +#define TX_LOG_RING_SIZE 3 +#define RX_LOG_RING_SIZE 5 + +/* These are the derived values */ + +#define TX_RING_SIZE (1 << TX_LOG_RING_SIZE) +#define TX_RING_LEN_BITS (TX_LOG_RING_SIZE << 5) +#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) + +#define RX_RING_SIZE (1 << RX_LOG_RING_SIZE) +#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5) +#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) + +/* Definitions for packet buffer access: */ +#define PKT_BUF_SZ 1544 + +/* Get the address of a packet buffer corresponding to a given buffer head */ +#define PKTBUF_ADDR(head) (void *)((unsigned long)(MEM) | (head)->base) + + +/* The LANCE Rx and Tx ring descriptors. */ +struct lance_rx_head { + unsigned short base; /* Low word of base addr */ + volatile unsigned char flag; + unsigned char base_hi; /* High word of base addr (unused) */ + short buf_length; /* This length is 2s complement! */ + volatile short msg_length; /* This length is "normal". */ +}; + +struct lance_tx_head { + unsigned short base; /* Low word of base addr */ + volatile unsigned char flag; + unsigned char base_hi; /* High word of base addr (unused) */ + short length; /* Length is 2s complement! */ + volatile short misc; +}; + +/* The LANCE initialization block, described in databook. */ +struct lance_init_block { + unsigned short mode; /* Pre-set mode */ + unsigned char hwaddr[6]; /* Physical ethernet address */ + unsigned int filter[2]; /* Multicast filter (unused). */ + /* Receive and transmit ring base, along with length bits. */ + unsigned short rdra; + unsigned short rlen; + unsigned short tdra; + unsigned short tlen; + unsigned short pad[4]; /* is thie needed? */ +}; + +/* The whole layout of the Lance shared memory */ +struct lance_memory { + struct lance_init_block init; + struct lance_tx_head tx_head[TX_RING_SIZE]; + struct lance_rx_head rx_head[RX_RING_SIZE]; + char rx_data[RX_RING_SIZE][PKT_BUF_SZ]; + char tx_data[TX_RING_SIZE][PKT_BUF_SZ]; +}; + +/* The driver's private device structure */ + +struct lance_private { + volatile unsigned short *iobase; + struct lance_memory *mem; + int new_rx, new_tx; /* The next free ring entry */ + int old_tx, old_rx; /* ring entry to be processed */ +/* These two must be longs for set_bit() */ + long tx_full; + long lock; +}; + +/* I/O register access macros */ + +#define MEM lp->mem +#define DREG lp->iobase[0] +#define AREG lp->iobase[1] +#define REGA(a) (*( AREG = (a), &DREG )) + +/* Definitions for the Lance */ + +/* tx_head flags */ +#define TMD1_ENP 0x01 /* end of packet */ +#define TMD1_STP 0x02 /* start of packet */ +#define TMD1_DEF 0x04 /* deferred */ +#define TMD1_ONE 0x08 /* one retry needed */ +#define TMD1_MORE 0x10 /* more than one retry needed */ +#define TMD1_ERR 0x40 /* error summary */ +#define TMD1_OWN 0x80 /* ownership (set: chip owns) */ + +#define TMD1_OWN_CHIP TMD1_OWN +#define TMD1_OWN_HOST 0 + +/* tx_head misc field */ +#define TMD3_TDR 0x03FF /* Time Domain Reflectometry counter */ +#define TMD3_RTRY 0x0400 /* failed after 16 retries */ +#define TMD3_LCAR 0x0800 /* carrier lost */ +#define TMD3_LCOL 0x1000 /* late collision */ +#define TMD3_UFLO 0x4000 /* underflow (late memory) */ +#define TMD3_BUFF 0x8000 /* buffering error (no ENP) */ + +/* rx_head flags */ +#define RMD1_ENP 0x01 /* end of packet */ +#define RMD1_STP 0x02 /* start of packet */ +#define RMD1_BUFF 0x04 /* buffer error */ +#define RMD1_CRC 0x08 /* CRC error */ +#define RMD1_OFLO 0x10 /* overflow */ +#define RMD1_FRAM 0x20 /* framing error */ +#define RMD1_ERR 0x40 /* error summary */ +#define RMD1_OWN 0x80 /* ownership (set: ship owns) */ + +#define RMD1_OWN_CHIP RMD1_OWN +#define RMD1_OWN_HOST 0 + +/* register names */ +#define CSR0 0 /* mode/status */ +#define CSR1 1 /* init block addr (low) */ +#define CSR2 2 /* init block addr (high) */ +#define CSR3 3 /* misc */ +#define CSR8 8 /* address filter */ +#define CSR15 15 /* promiscuous mode */ + +/* CSR0 */ +/* (R=readable, W=writeable, S=set on write, C=clear on write) */ +#define CSR0_INIT 0x0001 /* initialize (RS) */ +#define CSR0_STRT 0x0002 /* start (RS) */ +#define CSR0_STOP 0x0004 /* stop (RS) */ +#define CSR0_TDMD 0x0008 /* transmit demand (RS) */ +#define CSR0_TXON 0x0010 /* transmitter on (R) */ +#define CSR0_RXON 0x0020 /* receiver on (R) */ +#define CSR0_INEA 0x0040 /* interrupt enable (RW) */ +#define CSR0_INTR 0x0080 /* interrupt active (R) */ +#define CSR0_IDON 0x0100 /* initialization done (RC) */ +#define CSR0_TINT 0x0200 /* transmitter interrupt (RC) */ +#define CSR0_RINT 0x0400 /* receiver interrupt (RC) */ +#define CSR0_MERR 0x0800 /* memory error (RC) */ +#define CSR0_MISS 0x1000 /* missed frame (RC) */ +#define CSR0_CERR 0x2000 /* carrier error (no heartbeat :-) (RC) */ +#define CSR0_BABL 0x4000 /* babble: tx-ed too many bits (RC) */ +#define CSR0_ERR 0x8000 /* error (RC) */ + +/* CSR3 */ +#define CSR3_BCON 0x0001 /* byte control */ +#define CSR3_ACON 0x0002 /* ALE control */ +#define CSR3_BSWP 0x0004 /* byte swap (1=big endian) */ + +/***************************** Prototypes *****************************/ + +static int lance_probe( struct net_device *dev); +static int lance_open( struct net_device *dev ); +static void lance_init_ring( struct net_device *dev ); +static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); +static irqreturn_t lance_interrupt( int irq, void *dev_id); +static int lance_rx( struct net_device *dev ); +static int lance_close( struct net_device *dev ); +static void set_multicast_list( struct net_device *dev ); + +/************************* End of Prototypes **************************/ + +struct net_device * __init sun3lance_probe(int unit) +{ + struct net_device *dev; + static int found; + int err = -ENODEV; + + if (!MACH_IS_SUN3 && !MACH_IS_SUN3X) + return ERR_PTR(-ENODEV); + + /* check that this machine has an onboard lance */ + switch(idprom->id_machtype) { + case SM_SUN3|SM_3_50: + case SM_SUN3|SM_3_60: + case SM_SUN3X|SM_3_80: + /* these machines have lance */ + break; + + default: + return ERR_PTR(-ENODEV); + } + + if (found) + return ERR_PTR(-ENODEV); + + dev = alloc_etherdev(sizeof(struct lance_private)); + if (!dev) + return ERR_PTR(-ENOMEM); + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + } + + if (!lance_probe(dev)) + goto out; + + err = register_netdev(dev); + if (err) + goto out1; + found = 1; + return dev; + +out1: +#ifdef CONFIG_SUN3 + iounmap((void __iomem *)dev->base_addr); +#endif +out: + free_netdev(dev); + return ERR_PTR(err); +} + +static const struct net_device_ops lance_netdev_ops = { + .ndo_open = lance_open, + .ndo_stop = lance_close, + .ndo_start_xmit = lance_start_xmit, + .ndo_set_rx_mode = set_multicast_list, + .ndo_set_mac_address = NULL, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + +static int __init lance_probe( struct net_device *dev) +{ + unsigned long ioaddr; + + struct lance_private *lp; + int i; + static int did_version; + volatile unsigned short *ioaddr_probe; + unsigned short tmp1, tmp2; + +#ifdef CONFIG_SUN3 + ioaddr = (unsigned long)ioremap(LANCE_OBIO, PAGE_SIZE); + if (!ioaddr) + return 0; +#else + ioaddr = SUN3X_LANCE; +#endif + + /* test to see if there's really a lance here */ + /* (CSRO_INIT shouldn't be readable) */ + + ioaddr_probe = (volatile unsigned short *)ioaddr; + tmp1 = ioaddr_probe[0]; + tmp2 = ioaddr_probe[1]; + + ioaddr_probe[1] = CSR0; + ioaddr_probe[0] = CSR0_INIT | CSR0_STOP; + + if(ioaddr_probe[0] != CSR0_STOP) { + ioaddr_probe[0] = tmp1; + ioaddr_probe[1] = tmp2; + +#ifdef CONFIG_SUN3 + iounmap((void __iomem *)ioaddr); +#endif + return 0; + } + + lp = netdev_priv(dev); + + /* XXX - leak? */ + MEM = dvma_malloc_align(sizeof(struct lance_memory), 0x10000); + if (MEM == NULL) { +#ifdef CONFIG_SUN3 + iounmap((void __iomem *)ioaddr); +#endif + printk(KERN_WARNING "SUN3 Lance couldn't allocate DVMA memory\n"); + return 0; + } + + lp->iobase = (volatile unsigned short *)ioaddr; + dev->base_addr = (unsigned long)ioaddr; /* informational only */ + + REGA(CSR0) = CSR0_STOP; + + if (request_irq(LANCE_IRQ, lance_interrupt, 0, "SUN3 Lance", dev) < 0) { +#ifdef CONFIG_SUN3 + iounmap((void __iomem *)ioaddr); +#endif + dvma_free((void *)MEM); + printk(KERN_WARNING "SUN3 Lance unable to allocate IRQ\n"); + return 0; + } + dev->irq = (unsigned short)LANCE_IRQ; + + + printk("%s: SUN3 Lance at io %#lx, mem %#lx, irq %d, hwaddr ", + dev->name, + (unsigned long)ioaddr, + (unsigned long)MEM, + dev->irq); + + /* copy in the ethernet address from the prom */ + for(i = 0; i < 6 ; i++) + dev->dev_addr[i] = idprom->id_ethaddr[i]; + + /* tell the card it's ether address, bytes swapped */ + MEM->init.hwaddr[0] = dev->dev_addr[1]; + MEM->init.hwaddr[1] = dev->dev_addr[0]; + MEM->init.hwaddr[2] = dev->dev_addr[3]; + MEM->init.hwaddr[3] = dev->dev_addr[2]; + MEM->init.hwaddr[4] = dev->dev_addr[5]; + MEM->init.hwaddr[5] = dev->dev_addr[4]; + + printk("%pM\n", dev->dev_addr); + + MEM->init.mode = 0x0000; + MEM->init.filter[0] = 0x00000000; + MEM->init.filter[1] = 0x00000000; + MEM->init.rdra = dvma_vtob(MEM->rx_head); + MEM->init.rlen = (RX_LOG_RING_SIZE << 13) | + (dvma_vtob(MEM->rx_head) >> 16); + MEM->init.tdra = dvma_vtob(MEM->tx_head); + MEM->init.tlen = (TX_LOG_RING_SIZE << 13) | + (dvma_vtob(MEM->tx_head) >> 16); + + DPRINTK(2, ("initaddr: %08lx rx_ring: %08lx tx_ring: %08lx\n", + dvma_vtob(&(MEM->init)), dvma_vtob(MEM->rx_head), + (dvma_vtob(MEM->tx_head)))); + + if (did_version++ == 0) + printk( version ); + + dev->netdev_ops = &lance_netdev_ops; +// KLUDGE -- REMOVE ME + set_bit(__LINK_STATE_PRESENT, &dev->state); + + + return 1; +} + +static int lance_open( struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + int i; + + DPRINTK( 2, ( "%s: lance_open()\n", dev->name )); + + REGA(CSR0) = CSR0_STOP; + + lance_init_ring(dev); + + /* From now on, AREG is kept to point to CSR0 */ + REGA(CSR0) = CSR0_INIT; + + i = 1000000; + while (--i > 0) + if (DREG & CSR0_IDON) + break; + if (i <= 0 || (DREG & CSR0_ERR)) { + DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n", + dev->name, i, DREG )); + DREG = CSR0_STOP; + return -EIO; + } + + DREG = CSR0_IDON | CSR0_STRT | CSR0_INEA; + + netif_start_queue(dev); + + DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG )); + + return 0; +} + + +/* Initialize the LANCE Rx and Tx rings. */ + +static void lance_init_ring( struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + int i; + + lp->lock = 0; + lp->tx_full = 0; + lp->new_rx = lp->new_tx = 0; + lp->old_rx = lp->old_tx = 0; + + for( i = 0; i < TX_RING_SIZE; i++ ) { + MEM->tx_head[i].base = dvma_vtob(MEM->tx_data[i]); + MEM->tx_head[i].flag = 0; + MEM->tx_head[i].base_hi = + (dvma_vtob(MEM->tx_data[i])) >>16; + MEM->tx_head[i].length = 0; + MEM->tx_head[i].misc = 0; + } + + for( i = 0; i < RX_RING_SIZE; i++ ) { + MEM->rx_head[i].base = dvma_vtob(MEM->rx_data[i]); + MEM->rx_head[i].flag = RMD1_OWN_CHIP; + MEM->rx_head[i].base_hi = + (dvma_vtob(MEM->rx_data[i])) >> 16; + MEM->rx_head[i].buf_length = -PKT_BUF_SZ | 0xf000; + MEM->rx_head[i].msg_length = 0; + } + + /* tell the card it's ether address, bytes swapped */ + MEM->init.hwaddr[0] = dev->dev_addr[1]; + MEM->init.hwaddr[1] = dev->dev_addr[0]; + MEM->init.hwaddr[2] = dev->dev_addr[3]; + MEM->init.hwaddr[3] = dev->dev_addr[2]; + MEM->init.hwaddr[4] = dev->dev_addr[5]; + MEM->init.hwaddr[5] = dev->dev_addr[4]; + + MEM->init.mode = 0x0000; + MEM->init.filter[0] = 0x00000000; + MEM->init.filter[1] = 0x00000000; + MEM->init.rdra = dvma_vtob(MEM->rx_head); + MEM->init.rlen = (RX_LOG_RING_SIZE << 13) | + (dvma_vtob(MEM->rx_head) >> 16); + MEM->init.tdra = dvma_vtob(MEM->tx_head); + MEM->init.tlen = (TX_LOG_RING_SIZE << 13) | + (dvma_vtob(MEM->tx_head) >> 16); + + + /* tell the lance the address of its init block */ + REGA(CSR1) = dvma_vtob(&(MEM->init)); + REGA(CSR2) = dvma_vtob(&(MEM->init)) >> 16; + +#ifdef CONFIG_SUN3X + REGA(CSR3) = CSR3_BSWP | CSR3_ACON | CSR3_BCON; +#else + REGA(CSR3) = CSR3_BSWP; +#endif + +} + + +static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + int entry, len; + struct lance_tx_head *head; + unsigned long flags; + + DPRINTK( 1, ( "%s: transmit start.\n", + dev->name)); + + /* Transmitter timeout, serious problems. */ + if (netif_queue_stopped(dev)) { + int tickssofar = jiffies - dev_trans_start(dev); + if (tickssofar < HZ/5) + return NETDEV_TX_BUSY; + + DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n", + dev->name, DREG )); + DREG = CSR0_STOP; + /* + * Always set BSWP after a STOP as STOP puts it back into + * little endian mode. + */ + REGA(CSR3) = CSR3_BSWP; + dev->stats.tx_errors++; + + if(lance_debug >= 2) { + int i; + printk("Ring data: old_tx %d new_tx %d%s new_rx %d\n", + lp->old_tx, lp->new_tx, + lp->tx_full ? " (full)" : "", + lp->new_rx ); + for( i = 0 ; i < RX_RING_SIZE; i++ ) + printk( "rx #%d: base=%04x blen=%04x mlen=%04x\n", + i, MEM->rx_head[i].base, + -MEM->rx_head[i].buf_length, + MEM->rx_head[i].msg_length); + for( i = 0 ; i < TX_RING_SIZE; i++ ) + printk("tx #%d: base=%04x len=%04x misc=%04x\n", + i, MEM->tx_head[i].base, + -MEM->tx_head[i].length, + MEM->tx_head[i].misc ); + } + + lance_init_ring(dev); + REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT; + + netif_start_queue(dev); + + return NETDEV_TX_OK; + } + + + /* Block a timer-based transmit from overlapping. This could better be + done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */ + + /* Block a timer-based transmit from overlapping with us by + stopping the queue for a bit... */ + + netif_stop_queue(dev); + + if (test_and_set_bit( 0, (void*)&lp->lock ) != 0) { + printk( "%s: tx queue lock!.\n", dev->name); + /* don't clear dev->tbusy flag. */ + return NETDEV_TX_BUSY; + } + + AREG = CSR0; + DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n", + dev->name, DREG )); + +#ifdef CONFIG_SUN3X + /* this weirdness doesn't appear on sun3... */ + if(!(DREG & CSR0_INIT)) { + DPRINTK( 1, ("INIT not set, reinitializing...\n")); + REGA( CSR0 ) = CSR0_STOP; + lance_init_ring(dev); + REGA( CSR0 ) = CSR0_INIT | CSR0_STRT; + } +#endif + + /* Fill in a Tx ring entry */ +#if 0 + if (lance_debug >= 2) { + printk( "%s: TX pkt %d type 0x%04x" + " from %s to %s" + " data at 0x%08x len %d\n", + dev->name, lp->new_tx, ((u_short *)skb->data)[6], + DEV_ADDR(&skb->data[6]), DEV_ADDR(skb->data), + (int)skb->data, (int)skb->len ); + } +#endif + /* We're not prepared for the int until the last flags are set/reset. + * And the int may happen already after setting the OWN_CHIP... */ + local_irq_save(flags); + + /* Mask to ring buffer boundary. */ + entry = lp->new_tx; + head = &(MEM->tx_head[entry]); + + /* Caution: the write order is important here, set the "ownership" bits + * last. + */ + + /* the sun3's lance needs it's buffer padded to the minimum + size */ + len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; + +// head->length = -len; + head->length = (-len) | 0xf000; + head->misc = 0; + + skb_copy_from_linear_data(skb, PKTBUF_ADDR(head), skb->len); + if (len != skb->len) + memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len); + + head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; + lp->new_tx = (lp->new_tx + 1) & TX_RING_MOD_MASK; + dev->stats.tx_bytes += skb->len; + + /* Trigger an immediate send poll. */ + REGA(CSR0) = CSR0_INEA | CSR0_TDMD | CSR0_STRT; + AREG = CSR0; + DPRINTK( 2, ( "%s: lance_start_xmit() exiting, csr0 %4.4x.\n", + dev->name, DREG )); + dev_kfree_skb(skb); + + lp->lock = 0; + if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) == + TMD1_OWN_HOST) + netif_start_queue(dev); + + local_irq_restore(flags); + + return NETDEV_TX_OK; +} + +/* The LANCE interrupt handler. */ + +static irqreturn_t lance_interrupt( int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct lance_private *lp = netdev_priv(dev); + int csr0; + static int in_interrupt; + + if (dev == NULL) { + DPRINTK( 1, ( "lance_interrupt(): invalid dev_id\n" )); + return IRQ_NONE; + } + + if (in_interrupt) + DPRINTK( 2, ( "%s: Re-entering the interrupt handler.\n", dev->name )); + in_interrupt = 1; + + still_more: + flush_cache_all(); + + AREG = CSR0; + csr0 = DREG; + + /* ack interrupts */ + DREG = csr0 & (CSR0_TINT | CSR0_RINT | CSR0_IDON); + + /* clear errors */ + if(csr0 & CSR0_ERR) + DREG = CSR0_BABL | CSR0_MERR | CSR0_CERR | CSR0_MISS; + + + DPRINTK( 2, ( "%s: interrupt csr0=%04x new csr=%04x.\n", + dev->name, csr0, DREG )); + + if (csr0 & CSR0_TINT) { /* Tx-done interrupt */ + int old_tx = lp->old_tx; + +// if(lance_debug >= 3) { +// int i; +// +// printk("%s: tx int\n", dev->name); +// +// for(i = 0; i < TX_RING_SIZE; i++) +// printk("ring %d flag=%04x\n", i, +// MEM->tx_head[i].flag); +// } + + while( old_tx != lp->new_tx) { + struct lance_tx_head *head = &(MEM->tx_head[old_tx]); + + DPRINTK(3, ("on tx_ring %d\n", old_tx)); + + if (head->flag & TMD1_OWN_CHIP) + break; /* It still hasn't been Txed */ + + if (head->flag & TMD1_ERR) { + int status = head->misc; + dev->stats.tx_errors++; + if (status & TMD3_RTRY) dev->stats.tx_aborted_errors++; + if (status & TMD3_LCAR) dev->stats.tx_carrier_errors++; + if (status & TMD3_LCOL) dev->stats.tx_window_errors++; + if (status & (TMD3_UFLO | TMD3_BUFF)) { + dev->stats.tx_fifo_errors++; + printk("%s: Tx FIFO error\n", + dev->name); + REGA(CSR0) = CSR0_STOP; + REGA(CSR3) = CSR3_BSWP; + lance_init_ring(dev); + REGA(CSR0) = CSR0_STRT | CSR0_INEA; + return IRQ_HANDLED; + } + } else if(head->flag & (TMD1_ENP | TMD1_STP)) { + + head->flag &= ~(TMD1_ENP | TMD1_STP); + if(head->flag & (TMD1_ONE | TMD1_MORE)) + dev->stats.collisions++; + + dev->stats.tx_packets++; + DPRINTK(3, ("cleared tx ring %d\n", old_tx)); + } + old_tx = (old_tx +1) & TX_RING_MOD_MASK; + } + + lp->old_tx = old_tx; + } + + + if (netif_queue_stopped(dev)) { + /* The ring is no longer full, clear tbusy. */ + netif_start_queue(dev); + netif_wake_queue(dev); + } + + if (csr0 & CSR0_RINT) /* Rx interrupt */ + lance_rx( dev ); + + /* Log misc errors. */ + if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */ + if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */ + if (csr0 & CSR0_MERR) { + DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), " + "status %04x.\n", dev->name, csr0 )); + /* Restart the chip. */ + REGA(CSR0) = CSR0_STOP; + REGA(CSR3) = CSR3_BSWP; + lance_init_ring(dev); + REGA(CSR0) = CSR0_STRT | CSR0_INEA; + } + + + /* Clear any other interrupt, and set interrupt enable. */ +// DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR | +// CSR0_IDON | CSR0_INEA; + + REGA(CSR0) = CSR0_INEA; + + if(DREG & (CSR0_RINT | CSR0_TINT)) { + DPRINTK(2, ("restarting interrupt, csr0=%#04x\n", DREG)); + goto still_more; + } + + DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n", + dev->name, DREG )); + in_interrupt = 0; + return IRQ_HANDLED; +} + +/* get packet, toss into skbuff */ +static int lance_rx( struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + int entry = lp->new_rx; + + /* If we own the next entry, it's a new packet. Send it up. */ + while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) { + struct lance_rx_head *head = &(MEM->rx_head[entry]); + int status = head->flag; + + if (status != (RMD1_ENP|RMD1_STP)) { /* There was an error. */ + /* There is a tricky error noted by John Murphy, + to Russ Nelson: Even with + full-sized buffers it's possible for a jabber packet to use two + buffers, with only the last correctly noting the error. */ + if (status & RMD1_ENP) /* Only count a general error at the */ + dev->stats.rx_errors++; /* end of a packet.*/ + if (status & RMD1_FRAM) dev->stats.rx_frame_errors++; + if (status & RMD1_OFLO) dev->stats.rx_over_errors++; + if (status & RMD1_CRC) dev->stats.rx_crc_errors++; + if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++; + head->flag &= (RMD1_ENP|RMD1_STP); + } else { + /* Malloc up new buffer, compatible with net-3. */ +// short pkt_len = head->msg_length;// & 0xfff; + short pkt_len = (head->msg_length & 0xfff) - 4; + struct sk_buff *skb; + + if (pkt_len < 60) { + printk( "%s: Runt packet!\n", dev->name ); + dev->stats.rx_errors++; + } + else { + skb = netdev_alloc_skb(dev, pkt_len + 2); + if (skb == NULL) { + dev->stats.rx_dropped++; + head->msg_length = 0; + head->flag |= RMD1_OWN_CHIP; + lp->new_rx = (lp->new_rx+1) & + RX_RING_MOD_MASK; + } + +#if 0 + if (lance_debug >= 3) { + u_char *data = PKTBUF_ADDR(head); + printk("%s: RX pkt %d type 0x%04x" + " from %pM to %pM", + dev->name, lp->new_tx, ((u_short *)data)[6], + &data[6], data); + + printk(" data %02x %02x %02x %02x %02x %02x %02x %02x " + "len %d at %08x\n", + data[15], data[16], data[17], data[18], + data[19], data[20], data[21], data[22], + pkt_len, data); + } +#endif + if (lance_debug >= 3) { + u_char *data = PKTBUF_ADDR(head); + printk( "%s: RX pkt %d type 0x%04x len %d\n ", dev->name, entry, ((u_short *)data)[6], pkt_len); + } + + + skb_reserve( skb, 2 ); /* 16 byte align */ + skb_put( skb, pkt_len ); /* Make room */ + skb_copy_to_linear_data(skb, + PKTBUF_ADDR(head), + pkt_len); + + skb->protocol = eth_type_trans( skb, dev ); + netif_rx( skb ); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + } + +// head->buf_length = -PKT_BUF_SZ | 0xf000; + head->msg_length = 0; + head->flag = RMD1_OWN_CHIP; + + entry = lp->new_rx = (lp->new_rx +1) & RX_RING_MOD_MASK; + } + + /* From lance.c (Donald Becker): */ + /* We should check that at least two ring entries are free. + If not, we should free one and mark stats->rx_dropped++. */ + + return 0; +} + + +static int lance_close( struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + + netif_stop_queue(dev); + + AREG = CSR0; + + DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n", + dev->name, DREG )); + + /* We stop the LANCE here -- it occasionally polls + memory if we don't. */ + DREG = CSR0_STOP; + return 0; +} + + +/* Set or clear the multicast filter for this adaptor. + num_addrs == -1 Promiscuous mode, receive all packets + num_addrs == 0 Normal mode, clear multicast list + num_addrs > 0 Multicast mode, receive normal and MC packets, and do + best-effort filtering. + */ + +/* completely untested on a sun3 */ +static void set_multicast_list( struct net_device *dev ) +{ + struct lance_private *lp = netdev_priv(dev); + + if(netif_queue_stopped(dev)) + /* Only possible if board is already started */ + return; + + /* We take the simple way out and always enable promiscuous mode. */ + DREG = CSR0_STOP; /* Temporarily stop the lance. */ + + if (dev->flags & IFF_PROMISC) { + /* Log any net taps. */ + DPRINTK( 3, ( "%s: Promiscuous mode enabled.\n", dev->name )); + REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */ + } else { + short multicast_table[4]; + int num_addrs = netdev_mc_count(dev); + int i; + /* We don't use the multicast table, but rely on upper-layer + * filtering. */ + memset( multicast_table, (num_addrs == 0) ? 0 : -1, + sizeof(multicast_table) ); + for( i = 0; i < 4; i++ ) + REGA( CSR8+i ) = multicast_table[i]; + REGA( CSR15 ) = 0; /* Unset promiscuous mode */ + } + + /* + * Always set BSWP after a STOP as STOP puts it back into + * little endian mode. + */ + REGA( CSR3 ) = CSR3_BSWP; + + /* Resume normal operation and reset AREG to CSR0 */ + REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT; +} + + +#ifdef MODULE + +static struct net_device *sun3lance_dev; + +int __init init_module(void) +{ + sun3lance_dev = sun3lance_probe(-1); + return PTR_ERR_OR_ZERO(sun3lance_dev); +} + +void __exit cleanup_module(void) +{ + unregister_netdev(sun3lance_dev); +#ifdef CONFIG_SUN3 + iounmap((void __iomem *)sun3lance_dev->base_addr); +#endif + free_netdev(sun3lance_dev); +} + +#endif /* MODULE */ + diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c new file mode 100644 index 000000000..7847638bd --- /dev/null +++ b/drivers/net/ethernet/amd/sunlance.c @@ -0,0 +1,1533 @@ +/* $Id: sunlance.c,v 1.112 2002/01/15 06:48:55 davem Exp $ + * lance.c: Linux/Sparc/Lance driver + * + * Written 1995, 1996 by Miguel de Icaza + * Sources: + * The Linux depca driver + * The Linux lance driver. + * The Linux skeleton driver. + * The NetBSD Sparc/Lance driver. + * Theo de Raadt (deraadt@openbsd.org) + * NCR92C990 Lan Controller manual + * + * 1.4: + * Added support to run with a ledma on the Sun4m + * + * 1.5: + * Added multiple card detection. + * + * 4/17/96: Burst sizes and tpe selection on sun4m by Eddie C. Dost + * (ecd@skynet.be) + * + * 5/15/96: auto carrier detection on sun4m by Eddie C. Dost + * (ecd@skynet.be) + * + * 5/17/96: lebuffer on scsi/ether cards now work David S. Miller + * (davem@caip.rutgers.edu) + * + * 5/29/96: override option 'tpe-link-test?', if it is 'false', as + * this disables auto carrier detection on sun4m. Eddie C. Dost + * (ecd@skynet.be) + * + * 1.7: + * 6/26/96: Bug fix for multiple ledmas, miguel. + * + * 1.8: + * Stole multicast code from depca.c, fixed lance_tx. + * + * 1.9: + * 8/21/96: Fixed the multicast code (Pedro Roque) + * + * 8/28/96: Send fake packet in lance_open() if auto_select is true, + * so we can detect the carrier loss condition in time. + * Eddie C. Dost (ecd@skynet.be) + * + * 9/15/96: Align rx_buf so that eth_copy_and_sum() won't cause an + * MNA trap during chksum_partial_copy(). (ecd@skynet.be) + * + * 11/17/96: Handle LE_C0_MERR in lance_interrupt(). (ecd@skynet.be) + * + * 12/22/96: Don't loop forever in lance_rx() on incomplete packets. + * This was the sun4c killer. Shit, stupid bug. + * (ecd@skynet.be) + * + * 1.10: + * 1/26/97: Modularize driver. (ecd@skynet.be) + * + * 1.11: + * 12/27/97: Added sun4d support. (jj@sunsite.mff.cuni.cz) + * + * 1.12: + * 11/3/99: Fixed SMP race in lance_start_xmit found by davem. + * Anton Blanchard (anton@progsoc.uts.edu.au) + * 2.00: 11/9/99: Massive overhaul and port to new SBUS driver interfaces. + * David S. Miller (davem@redhat.com) + * 2.01: + * 11/08/01: Use library crc32 functions (Matt_Domsch@dell.com) + * + */ + +#undef DEBUG_DRIVER + +static char lancestr[] = "LANCE"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* Used for the temporal inet entries and routing */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include /* Used by the checksum routines */ +#include +#include +#include /* For tpe-link-test? setting */ +#include + +#define DRV_NAME "sunlance" +#define DRV_VERSION "2.02" +#define DRV_RELDATE "8/24/03" +#define DRV_AUTHOR "Miguel de Icaza (miguel@nuclecu.unam.mx)" + +static char version[] = + DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; + +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_AUTHOR); +MODULE_DESCRIPTION("Sun Lance ethernet driver"); +MODULE_LICENSE("GPL"); + +/* Define: 2^4 Tx buffers and 2^4 Rx buffers */ +#ifndef LANCE_LOG_TX_BUFFERS +#define LANCE_LOG_TX_BUFFERS 4 +#define LANCE_LOG_RX_BUFFERS 4 +#endif + +#define LE_CSR0 0 +#define LE_CSR1 1 +#define LE_CSR2 2 +#define LE_CSR3 3 + +#define LE_MO_PROM 0x8000 /* Enable promiscuous mode */ + +#define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */ +#define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */ +#define LE_C0_CERR 0x2000 /* SQE: Signal quality error */ +#define LE_C0_MISS 0x1000 /* MISS: Missed a packet */ +#define LE_C0_MERR 0x0800 /* ME: Memory error */ +#define LE_C0_RINT 0x0400 /* Received interrupt */ +#define LE_C0_TINT 0x0200 /* Transmitter Interrupt */ +#define LE_C0_IDON 0x0100 /* IFIN: Init finished. */ +#define LE_C0_INTR 0x0080 /* Interrupt or error */ +#define LE_C0_INEA 0x0040 /* Interrupt enable */ +#define LE_C0_RXON 0x0020 /* Receiver on */ +#define LE_C0_TXON 0x0010 /* Transmitter on */ +#define LE_C0_TDMD 0x0008 /* Transmitter demand */ +#define LE_C0_STOP 0x0004 /* Stop the card */ +#define LE_C0_STRT 0x0002 /* Start the card */ +#define LE_C0_INIT 0x0001 /* Init the card */ + +#define LE_C3_BSWP 0x4 /* SWAP */ +#define LE_C3_ACON 0x2 /* ALE Control */ +#define LE_C3_BCON 0x1 /* Byte control */ + +/* Receive message descriptor 1 */ +#define LE_R1_OWN 0x80 /* Who owns the entry */ +#define LE_R1_ERR 0x40 /* Error: if FRA, OFL, CRC or BUF is set */ +#define LE_R1_FRA 0x20 /* FRA: Frame error */ +#define LE_R1_OFL 0x10 /* OFL: Frame overflow */ +#define LE_R1_CRC 0x08 /* CRC error */ +#define LE_R1_BUF 0x04 /* BUF: Buffer error */ +#define LE_R1_SOP 0x02 /* Start of packet */ +#define LE_R1_EOP 0x01 /* End of packet */ +#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */ + +#define LE_T1_OWN 0x80 /* Lance owns the packet */ +#define LE_T1_ERR 0x40 /* Error summary */ +#define LE_T1_EMORE 0x10 /* Error: more than one retry needed */ +#define LE_T1_EONE 0x08 /* Error: one retry needed */ +#define LE_T1_EDEF 0x04 /* Error: deferred */ +#define LE_T1_SOP 0x02 /* Start of packet */ +#define LE_T1_EOP 0x01 /* End of packet */ +#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */ + +#define LE_T3_BUF 0x8000 /* Buffer error */ +#define LE_T3_UFL 0x4000 /* Error underflow */ +#define LE_T3_LCOL 0x1000 /* Error late collision */ +#define LE_T3_CLOS 0x0800 /* Error carrier loss */ +#define LE_T3_RTY 0x0400 /* Error retry */ +#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */ + +#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) +#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) +#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29) +#define TX_NEXT(__x) (((__x)+1) & TX_RING_MOD_MASK) + +#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS)) +#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) +#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29) +#define RX_NEXT(__x) (((__x)+1) & RX_RING_MOD_MASK) + +#define PKT_BUF_SZ 1544 +#define RX_BUFF_SIZE PKT_BUF_SZ +#define TX_BUFF_SIZE PKT_BUF_SZ + +struct lance_rx_desc { + u16 rmd0; /* low address of packet */ + u8 rmd1_bits; /* descriptor bits */ + u8 rmd1_hadr; /* high address of packet */ + s16 length; /* This length is 2s complement (negative)! + * Buffer length + */ + u16 mblength; /* This is the actual number of bytes received */ +}; + +struct lance_tx_desc { + u16 tmd0; /* low address of packet */ + u8 tmd1_bits; /* descriptor bits */ + u8 tmd1_hadr; /* high address of packet */ + s16 length; /* Length is 2s complement (negative)! */ + u16 misc; +}; + +/* The LANCE initialization block, described in databook. */ +/* On the Sparc, this block should be on a DMA region */ +struct lance_init_block { + u16 mode; /* Pre-set mode (reg. 15) */ + u8 phys_addr[6]; /* Physical ethernet address */ + u32 filter[2]; /* Multicast filter. */ + + /* Receive and transmit ring base, along with extra bits. */ + u16 rx_ptr; /* receive descriptor addr */ + u16 rx_len; /* receive len and high addr */ + u16 tx_ptr; /* transmit descriptor addr */ + u16 tx_len; /* transmit len and high addr */ + + /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */ + struct lance_rx_desc brx_ring[RX_RING_SIZE]; + struct lance_tx_desc btx_ring[TX_RING_SIZE]; + + u8 tx_buf [TX_RING_SIZE][TX_BUFF_SIZE]; + u8 pad[2]; /* align rx_buf for copy_and_sum(). */ + u8 rx_buf [RX_RING_SIZE][RX_BUFF_SIZE]; +}; + +#define libdesc_offset(rt, elem) \ +((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem]))))) + +#define libbuff_offset(rt, elem) \ +((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem][0]))))) + +struct lance_private { + void __iomem *lregs; /* Lance RAP/RDP regs. */ + void __iomem *dregs; /* DMA controller regs. */ + struct lance_init_block __iomem *init_block_iomem; + struct lance_init_block *init_block_mem; + + spinlock_t lock; + + int rx_new, tx_new; + int rx_old, tx_old; + + struct platform_device *ledma; /* If set this points to ledma */ + char tpe; /* cable-selection is TPE */ + char auto_select; /* cable-selection by carrier */ + char burst_sizes; /* ledma SBus burst sizes */ + char pio_buffer; /* init block in PIO space? */ + + unsigned short busmaster_regval; + + void (*init_ring)(struct net_device *); + void (*rx)(struct net_device *); + void (*tx)(struct net_device *); + + char *name; + dma_addr_t init_block_dvma; + struct net_device *dev; /* Backpointer */ + struct platform_device *op; + struct platform_device *lebuffer; + struct timer_list multicast_timer; +}; + +#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\ + lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\ + lp->tx_old - lp->tx_new-1) + +/* Lance registers. */ +#define RDP 0x00UL /* register data port */ +#define RAP 0x02UL /* register address port */ +#define LANCE_REG_SIZE 0x04UL + +#define STOP_LANCE(__lp) \ +do { void __iomem *__base = (__lp)->lregs; \ + sbus_writew(LE_CSR0, __base + RAP); \ + sbus_writew(LE_C0_STOP, __base + RDP); \ +} while (0) + +int sparc_lance_debug = 2; + +/* The Lance uses 24 bit addresses */ +/* On the Sun4c the DVMA will provide the remaining bytes for us */ +/* On the Sun4m we have to instruct the ledma to provide them */ +/* Even worse, on scsi/ether SBUS cards, the init block and the + * transmit/receive buffers are addresses as offsets from absolute + * zero on the lebuffer PIO area. -DaveM + */ + +#define LANCE_ADDR(x) ((long)(x) & ~0xff000000) + +/* Load the CSR registers */ +static void load_csrs(struct lance_private *lp) +{ + u32 leptr; + + if (lp->pio_buffer) + leptr = 0; + else + leptr = LANCE_ADDR(lp->init_block_dvma); + + sbus_writew(LE_CSR1, lp->lregs + RAP); + sbus_writew(leptr & 0xffff, lp->lregs + RDP); + sbus_writew(LE_CSR2, lp->lregs + RAP); + sbus_writew(leptr >> 16, lp->lregs + RDP); + sbus_writew(LE_CSR3, lp->lregs + RAP); + sbus_writew(lp->busmaster_regval, lp->lregs + RDP); + + /* Point back to csr0 */ + sbus_writew(LE_CSR0, lp->lregs + RAP); +} + +/* Setup the Lance Rx and Tx rings */ +static void lance_init_ring_dvma(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_init_block *ib = lp->init_block_mem; + dma_addr_t aib = lp->init_block_dvma; + __u32 leptr; + int i; + + /* Lock out other processes while setting up hardware */ + netif_stop_queue(dev); + lp->rx_new = lp->tx_new = 0; + lp->rx_old = lp->tx_old = 0; + + /* Copy the ethernet address to the lance init block + * Note that on the sparc you need to swap the ethernet address. + */ + ib->phys_addr [0] = dev->dev_addr [1]; + ib->phys_addr [1] = dev->dev_addr [0]; + ib->phys_addr [2] = dev->dev_addr [3]; + ib->phys_addr [3] = dev->dev_addr [2]; + ib->phys_addr [4] = dev->dev_addr [5]; + ib->phys_addr [5] = dev->dev_addr [4]; + + /* Setup the Tx ring entries */ + for (i = 0; i < TX_RING_SIZE; i++) { + leptr = LANCE_ADDR(aib + libbuff_offset(tx_buf, i)); + ib->btx_ring [i].tmd0 = leptr; + ib->btx_ring [i].tmd1_hadr = leptr >> 16; + ib->btx_ring [i].tmd1_bits = 0; + ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */ + ib->btx_ring [i].misc = 0; + } + + /* Setup the Rx ring entries */ + for (i = 0; i < RX_RING_SIZE; i++) { + leptr = LANCE_ADDR(aib + libbuff_offset(rx_buf, i)); + + ib->brx_ring [i].rmd0 = leptr; + ib->brx_ring [i].rmd1_hadr = leptr >> 16; + ib->brx_ring [i].rmd1_bits = LE_R1_OWN; + ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000; + ib->brx_ring [i].mblength = 0; + } + + /* Setup the initialization block */ + + /* Setup rx descriptor pointer */ + leptr = LANCE_ADDR(aib + libdesc_offset(brx_ring, 0)); + ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16); + ib->rx_ptr = leptr; + + /* Setup tx descriptor pointer */ + leptr = LANCE_ADDR(aib + libdesc_offset(btx_ring, 0)); + ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16); + ib->tx_ptr = leptr; +} + +static void lance_init_ring_pio(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_init_block __iomem *ib = lp->init_block_iomem; + u32 leptr; + int i; + + /* Lock out other processes while setting up hardware */ + netif_stop_queue(dev); + lp->rx_new = lp->tx_new = 0; + lp->rx_old = lp->tx_old = 0; + + /* Copy the ethernet address to the lance init block + * Note that on the sparc you need to swap the ethernet address. + */ + sbus_writeb(dev->dev_addr[1], &ib->phys_addr[0]); + sbus_writeb(dev->dev_addr[0], &ib->phys_addr[1]); + sbus_writeb(dev->dev_addr[3], &ib->phys_addr[2]); + sbus_writeb(dev->dev_addr[2], &ib->phys_addr[3]); + sbus_writeb(dev->dev_addr[5], &ib->phys_addr[4]); + sbus_writeb(dev->dev_addr[4], &ib->phys_addr[5]); + + /* Setup the Tx ring entries */ + for (i = 0; i < TX_RING_SIZE; i++) { + leptr = libbuff_offset(tx_buf, i); + sbus_writew(leptr, &ib->btx_ring [i].tmd0); + sbus_writeb(leptr >> 16,&ib->btx_ring [i].tmd1_hadr); + sbus_writeb(0, &ib->btx_ring [i].tmd1_bits); + + /* The ones required by tmd2 */ + sbus_writew(0xf000, &ib->btx_ring [i].length); + sbus_writew(0, &ib->btx_ring [i].misc); + } + + /* Setup the Rx ring entries */ + for (i = 0; i < RX_RING_SIZE; i++) { + leptr = libbuff_offset(rx_buf, i); + + sbus_writew(leptr, &ib->brx_ring [i].rmd0); + sbus_writeb(leptr >> 16,&ib->brx_ring [i].rmd1_hadr); + sbus_writeb(LE_R1_OWN, &ib->brx_ring [i].rmd1_bits); + sbus_writew(-RX_BUFF_SIZE|0xf000, + &ib->brx_ring [i].length); + sbus_writew(0, &ib->brx_ring [i].mblength); + } + + /* Setup the initialization block */ + + /* Setup rx descriptor pointer */ + leptr = libdesc_offset(brx_ring, 0); + sbus_writew((LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16), + &ib->rx_len); + sbus_writew(leptr, &ib->rx_ptr); + + /* Setup tx descriptor pointer */ + leptr = libdesc_offset(btx_ring, 0); + sbus_writew((LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16), + &ib->tx_len); + sbus_writew(leptr, &ib->tx_ptr); +} + +static void init_restart_ledma(struct lance_private *lp) +{ + u32 csr = sbus_readl(lp->dregs + DMA_CSR); + + if (!(csr & DMA_HNDL_ERROR)) { + /* E-Cache draining */ + while (sbus_readl(lp->dregs + DMA_CSR) & DMA_FIFO_ISDRAIN) + barrier(); + } + + csr = sbus_readl(lp->dregs + DMA_CSR); + csr &= ~DMA_E_BURSTS; + if (lp->burst_sizes & DMA_BURST32) + csr |= DMA_E_BURST32; + else + csr |= DMA_E_BURST16; + + csr |= (DMA_DSBL_RD_DRN | DMA_DSBL_WR_INV | DMA_FIFO_INV); + + if (lp->tpe) + csr |= DMA_EN_ENETAUI; + else + csr &= ~DMA_EN_ENETAUI; + udelay(20); + sbus_writel(csr, lp->dregs + DMA_CSR); + udelay(200); +} + +static int init_restart_lance(struct lance_private *lp) +{ + u16 regval = 0; + int i; + + if (lp->dregs) + init_restart_ledma(lp); + + sbus_writew(LE_CSR0, lp->lregs + RAP); + sbus_writew(LE_C0_INIT, lp->lregs + RDP); + + /* Wait for the lance to complete initialization */ + for (i = 0; i < 100; i++) { + regval = sbus_readw(lp->lregs + RDP); + + if (regval & (LE_C0_ERR | LE_C0_IDON)) + break; + barrier(); + } + if (i == 100 || (regval & LE_C0_ERR)) { + printk(KERN_ERR "LANCE unopened after %d ticks, csr0=%4.4x.\n", + i, regval); + if (lp->dregs) + printk("dcsr=%8.8x\n", sbus_readl(lp->dregs + DMA_CSR)); + return -1; + } + + /* Clear IDON by writing a "1", enable interrupts and start lance */ + sbus_writew(LE_C0_IDON, lp->lregs + RDP); + sbus_writew(LE_C0_INEA | LE_C0_STRT, lp->lregs + RDP); + + if (lp->dregs) { + u32 csr = sbus_readl(lp->dregs + DMA_CSR); + + csr |= DMA_INT_ENAB; + sbus_writel(csr, lp->dregs + DMA_CSR); + } + + return 0; +} + +static void lance_rx_dvma(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_init_block *ib = lp->init_block_mem; + struct lance_rx_desc *rd; + u8 bits; + int len, entry = lp->rx_new; + struct sk_buff *skb; + + for (rd = &ib->brx_ring [entry]; + !((bits = rd->rmd1_bits) & LE_R1_OWN); + rd = &ib->brx_ring [entry]) { + + /* We got an incomplete frame? */ + if ((bits & LE_R1_POK) != LE_R1_POK) { + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; + } else if (bits & LE_R1_ERR) { + /* Count only the end frame as a rx error, + * not the beginning + */ + if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; + if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; + if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; + if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; + if (bits & LE_R1_EOP) dev->stats.rx_errors++; + } else { + len = (rd->mblength & 0xfff) - 4; + skb = netdev_alloc_skb(dev, len + 2); + + if (skb == NULL) { + dev->stats.rx_dropped++; + rd->mblength = 0; + rd->rmd1_bits = LE_R1_OWN; + lp->rx_new = RX_NEXT(entry); + return; + } + + dev->stats.rx_bytes += len; + + skb_reserve(skb, 2); /* 16 byte align */ + skb_put(skb, len); /* make room */ + skb_copy_to_linear_data(skb, + (unsigned char *)&(ib->rx_buf [entry][0]), + len); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + } + + /* Return the packet to the pool */ + rd->mblength = 0; + rd->rmd1_bits = LE_R1_OWN; + entry = RX_NEXT(entry); + } + + lp->rx_new = entry; +} + +static void lance_tx_dvma(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_init_block *ib = lp->init_block_mem; + int i, j; + + spin_lock(&lp->lock); + + j = lp->tx_old; + for (i = j; i != lp->tx_new; i = j) { + struct lance_tx_desc *td = &ib->btx_ring [i]; + u8 bits = td->tmd1_bits; + + /* If we hit a packet not owned by us, stop */ + if (bits & LE_T1_OWN) + break; + + if (bits & LE_T1_ERR) { + u16 status = td->misc; + + dev->stats.tx_errors++; + if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; + if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; + + if (status & LE_T3_CLOS) { + dev->stats.tx_carrier_errors++; + if (lp->auto_select) { + lp->tpe = 1 - lp->tpe; + printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", + dev->name, lp->tpe?"TPE":"AUI"); + STOP_LANCE(lp); + lp->init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + goto out; + } + } + + /* Buffer errors and underflows turn off the + * transmitter, restart the adapter. + */ + if (status & (LE_T3_BUF|LE_T3_UFL)) { + dev->stats.tx_fifo_errors++; + + printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", + dev->name); + STOP_LANCE(lp); + lp->init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + goto out; + } + } else if ((bits & LE_T1_POK) == LE_T1_POK) { + /* + * So we don't count the packet more than once. + */ + td->tmd1_bits = bits & ~(LE_T1_POK); + + /* One collision before packet was sent. */ + if (bits & LE_T1_EONE) + dev->stats.collisions++; + + /* More than one collision, be optimistic. */ + if (bits & LE_T1_EMORE) + dev->stats.collisions += 2; + + dev->stats.tx_packets++; + } + + j = TX_NEXT(j); + } + lp->tx_old = j; +out: + if (netif_queue_stopped(dev) && + TX_BUFFS_AVAIL > 0) + netif_wake_queue(dev); + + spin_unlock(&lp->lock); +} + +static void lance_piocopy_to_skb(struct sk_buff *skb, void __iomem *piobuf, int len) +{ + u16 *p16 = (u16 *) skb->data; + u32 *p32; + u8 *p8; + void __iomem *pbuf = piobuf; + + /* We know here that both src and dest are on a 16bit boundary. */ + *p16++ = sbus_readw(pbuf); + p32 = (u32 *) p16; + pbuf += 2; + len -= 2; + + while (len >= 4) { + *p32++ = sbus_readl(pbuf); + pbuf += 4; + len -= 4; + } + p8 = (u8 *) p32; + if (len >= 2) { + p16 = (u16 *) p32; + *p16++ = sbus_readw(pbuf); + pbuf += 2; + len -= 2; + p8 = (u8 *) p16; + } + if (len >= 1) + *p8 = sbus_readb(pbuf); +} + +static void lance_rx_pio(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_init_block __iomem *ib = lp->init_block_iomem; + struct lance_rx_desc __iomem *rd; + unsigned char bits; + int len, entry; + struct sk_buff *skb; + + entry = lp->rx_new; + for (rd = &ib->brx_ring [entry]; + !((bits = sbus_readb(&rd->rmd1_bits)) & LE_R1_OWN); + rd = &ib->brx_ring [entry]) { + + /* We got an incomplete frame? */ + if ((bits & LE_R1_POK) != LE_R1_POK) { + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; + } else if (bits & LE_R1_ERR) { + /* Count only the end frame as a rx error, + * not the beginning + */ + if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; + if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; + if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; + if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; + if (bits & LE_R1_EOP) dev->stats.rx_errors++; + } else { + len = (sbus_readw(&rd->mblength) & 0xfff) - 4; + skb = netdev_alloc_skb(dev, len + 2); + + if (skb == NULL) { + dev->stats.rx_dropped++; + sbus_writew(0, &rd->mblength); + sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); + lp->rx_new = RX_NEXT(entry); + return; + } + + dev->stats.rx_bytes += len; + + skb_reserve (skb, 2); /* 16 byte align */ + skb_put(skb, len); /* make room */ + lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + } + + /* Return the packet to the pool */ + sbus_writew(0, &rd->mblength); + sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); + entry = RX_NEXT(entry); + } + + lp->rx_new = entry; +} + +static void lance_tx_pio(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_init_block __iomem *ib = lp->init_block_iomem; + int i, j; + + spin_lock(&lp->lock); + + j = lp->tx_old; + for (i = j; i != lp->tx_new; i = j) { + struct lance_tx_desc __iomem *td = &ib->btx_ring [i]; + u8 bits = sbus_readb(&td->tmd1_bits); + + /* If we hit a packet not owned by us, stop */ + if (bits & LE_T1_OWN) + break; + + if (bits & LE_T1_ERR) { + u16 status = sbus_readw(&td->misc); + + dev->stats.tx_errors++; + if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; + if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; + + if (status & LE_T3_CLOS) { + dev->stats.tx_carrier_errors++; + if (lp->auto_select) { + lp->tpe = 1 - lp->tpe; + printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", + dev->name, lp->tpe?"TPE":"AUI"); + STOP_LANCE(lp); + lp->init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + goto out; + } + } + + /* Buffer errors and underflows turn off the + * transmitter, restart the adapter. + */ + if (status & (LE_T3_BUF|LE_T3_UFL)) { + dev->stats.tx_fifo_errors++; + + printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", + dev->name); + STOP_LANCE(lp); + lp->init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + goto out; + } + } else if ((bits & LE_T1_POK) == LE_T1_POK) { + /* + * So we don't count the packet more than once. + */ + sbus_writeb(bits & ~(LE_T1_POK), &td->tmd1_bits); + + /* One collision before packet was sent. */ + if (bits & LE_T1_EONE) + dev->stats.collisions++; + + /* More than one collision, be optimistic. */ + if (bits & LE_T1_EMORE) + dev->stats.collisions += 2; + + dev->stats.tx_packets++; + } + + j = TX_NEXT(j); + } + lp->tx_old = j; + + if (netif_queue_stopped(dev) && + TX_BUFFS_AVAIL > 0) + netif_wake_queue(dev); +out: + spin_unlock(&lp->lock); +} + +static irqreturn_t lance_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct lance_private *lp = netdev_priv(dev); + int csr0; + + sbus_writew(LE_CSR0, lp->lregs + RAP); + csr0 = sbus_readw(lp->lregs + RDP); + + /* Acknowledge all the interrupt sources ASAP */ + sbus_writew(csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT), + lp->lregs + RDP); + + if ((csr0 & LE_C0_ERR) != 0) { + /* Clear the error condition */ + sbus_writew((LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | + LE_C0_CERR | LE_C0_MERR), + lp->lregs + RDP); + } + + if (csr0 & LE_C0_RINT) + lp->rx(dev); + + if (csr0 & LE_C0_TINT) + lp->tx(dev); + + if (csr0 & LE_C0_BABL) + dev->stats.tx_errors++; + + if (csr0 & LE_C0_MISS) + dev->stats.rx_errors++; + + if (csr0 & LE_C0_MERR) { + if (lp->dregs) { + u32 addr = sbus_readl(lp->dregs + DMA_ADDR); + + printk(KERN_ERR "%s: Memory error, status %04x, addr %06x\n", + dev->name, csr0, addr & 0xffffff); + } else { + printk(KERN_ERR "%s: Memory error, status %04x\n", + dev->name, csr0); + } + + sbus_writew(LE_C0_STOP, lp->lregs + RDP); + + if (lp->dregs) { + u32 dma_csr = sbus_readl(lp->dregs + DMA_CSR); + + dma_csr |= DMA_FIFO_INV; + sbus_writel(dma_csr, lp->dregs + DMA_CSR); + } + + lp->init_ring(dev); + load_csrs(lp); + init_restart_lance(lp); + netif_wake_queue(dev); + } + + sbus_writew(LE_C0_INEA, lp->lregs + RDP); + + return IRQ_HANDLED; +} + +/* Build a fake network packet and send it to ourselves. */ +static void build_fake_packet(struct lance_private *lp) +{ + struct net_device *dev = lp->dev; + int i, entry; + + entry = lp->tx_new & TX_RING_MOD_MASK; + if (lp->pio_buffer) { + struct lance_init_block __iomem *ib = lp->init_block_iomem; + u16 __iomem *packet = (u16 __iomem *) &(ib->tx_buf[entry][0]); + struct ethhdr __iomem *eth = (struct ethhdr __iomem *) packet; + for (i = 0; i < (ETH_ZLEN / sizeof(u16)); i++) + sbus_writew(0, &packet[i]); + for (i = 0; i < 6; i++) { + sbus_writeb(dev->dev_addr[i], ð->h_dest[i]); + sbus_writeb(dev->dev_addr[i], ð->h_source[i]); + } + sbus_writew((-ETH_ZLEN) | 0xf000, &ib->btx_ring[entry].length); + sbus_writew(0, &ib->btx_ring[entry].misc); + sbus_writeb(LE_T1_POK|LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits); + } else { + struct lance_init_block *ib = lp->init_block_mem; + u16 *packet = (u16 *) &(ib->tx_buf[entry][0]); + struct ethhdr *eth = (struct ethhdr *) packet; + memset(packet, 0, ETH_ZLEN); + for (i = 0; i < 6; i++) { + eth->h_dest[i] = dev->dev_addr[i]; + eth->h_source[i] = dev->dev_addr[i]; + } + ib->btx_ring[entry].length = (-ETH_ZLEN) | 0xf000; + ib->btx_ring[entry].misc = 0; + ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); + } + lp->tx_new = TX_NEXT(entry); +} + +static int lance_open(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + int status = 0; + + STOP_LANCE(lp); + + if (request_irq(dev->irq, lance_interrupt, IRQF_SHARED, + lancestr, (void *) dev)) { + printk(KERN_ERR "Lance: Can't get irq %d\n", dev->irq); + return -EAGAIN; + } + + /* On the 4m, setup the ledma to provide the upper bits for buffers */ + if (lp->dregs) { + u32 regval = lp->init_block_dvma & 0xff000000; + + sbus_writel(regval, lp->dregs + DMA_TEST); + } + + /* Set mode and clear multicast filter only at device open, + * so that lance_init_ring() called at any error will not + * forget multicast filters. + * + * BTW it is common bug in all lance drivers! --ANK + */ + if (lp->pio_buffer) { + struct lance_init_block __iomem *ib = lp->init_block_iomem; + sbus_writew(0, &ib->mode); + sbus_writel(0, &ib->filter[0]); + sbus_writel(0, &ib->filter[1]); + } else { + struct lance_init_block *ib = lp->init_block_mem; + ib->mode = 0; + ib->filter [0] = 0; + ib->filter [1] = 0; + } + + lp->init_ring(dev); + load_csrs(lp); + + netif_start_queue(dev); + + status = init_restart_lance(lp); + if (!status && lp->auto_select) { + build_fake_packet(lp); + sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP); + } + + return status; +} + +static int lance_close(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + + netif_stop_queue(dev); + del_timer_sync(&lp->multicast_timer); + + STOP_LANCE(lp); + + free_irq(dev->irq, (void *) dev); + return 0; +} + +static int lance_reset(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + int status; + + STOP_LANCE(lp); + + /* On the 4m, reset the dma too */ + if (lp->dregs) { + u32 csr, addr; + + printk(KERN_ERR "resetting ledma\n"); + csr = sbus_readl(lp->dregs + DMA_CSR); + sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR); + udelay(200); + sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR); + + addr = lp->init_block_dvma & 0xff000000; + sbus_writel(addr, lp->dregs + DMA_TEST); + } + lp->init_ring(dev); + load_csrs(lp); + dev->trans_start = jiffies; /* prevent tx timeout */ + status = init_restart_lance(lp); + return status; +} + +static void lance_piocopy_from_skb(void __iomem *dest, unsigned char *src, int len) +{ + void __iomem *piobuf = dest; + u32 *p32; + u16 *p16; + u8 *p8; + + switch ((unsigned long)src & 0x3) { + case 0: + p32 = (u32 *) src; + while (len >= 4) { + sbus_writel(*p32, piobuf); + p32++; + piobuf += 4; + len -= 4; + } + src = (char *) p32; + break; + case 1: + case 3: + p8 = (u8 *) src; + while (len >= 4) { + u32 val; + + val = p8[0] << 24; + val |= p8[1] << 16; + val |= p8[2] << 8; + val |= p8[3]; + sbus_writel(val, piobuf); + p8 += 4; + piobuf += 4; + len -= 4; + } + src = (char *) p8; + break; + case 2: + p16 = (u16 *) src; + while (len >= 4) { + u32 val = p16[0]<<16 | p16[1]; + sbus_writel(val, piobuf); + p16 += 2; + piobuf += 4; + len -= 4; + } + src = (char *) p16; + break; + } + if (len >= 2) { + u16 val = src[0] << 8 | src[1]; + sbus_writew(val, piobuf); + src += 2; + piobuf += 2; + len -= 2; + } + if (len >= 1) + sbus_writeb(src[0], piobuf); +} + +static void lance_piozero(void __iomem *dest, int len) +{ + void __iomem *piobuf = dest; + + if ((unsigned long)piobuf & 1) { + sbus_writeb(0, piobuf); + piobuf += 1; + len -= 1; + if (len == 0) + return; + } + if (len == 1) { + sbus_writeb(0, piobuf); + return; + } + if ((unsigned long)piobuf & 2) { + sbus_writew(0, piobuf); + piobuf += 2; + len -= 2; + if (len == 0) + return; + } + while (len >= 4) { + sbus_writel(0, piobuf); + piobuf += 4; + len -= 4; + } + if (len >= 2) { + sbus_writew(0, piobuf); + piobuf += 2; + len -= 2; + } + if (len >= 1) + sbus_writeb(0, piobuf); +} + +static void lance_tx_timeout(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + + printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n", + dev->name, sbus_readw(lp->lregs + RDP)); + lance_reset(dev); + netif_wake_queue(dev); +} + +static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + int entry, skblen, len; + + skblen = skb->len; + + len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; + + spin_lock_irq(&lp->lock); + + dev->stats.tx_bytes += len; + + entry = lp->tx_new & TX_RING_MOD_MASK; + if (lp->pio_buffer) { + struct lance_init_block __iomem *ib = lp->init_block_iomem; + sbus_writew((-len) | 0xf000, &ib->btx_ring[entry].length); + sbus_writew(0, &ib->btx_ring[entry].misc); + lance_piocopy_from_skb(&ib->tx_buf[entry][0], skb->data, skblen); + if (len != skblen) + lance_piozero(&ib->tx_buf[entry][skblen], len - skblen); + sbus_writeb(LE_T1_POK | LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits); + } else { + struct lance_init_block *ib = lp->init_block_mem; + ib->btx_ring [entry].length = (-len) | 0xf000; + ib->btx_ring [entry].misc = 0; + skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen); + if (len != skblen) + memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen); + ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN); + } + + lp->tx_new = TX_NEXT(entry); + + if (TX_BUFFS_AVAIL <= 0) + netif_stop_queue(dev); + + /* Kick the lance: transmit now */ + sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP); + + /* Read back CSR to invalidate the E-Cache. + * This is needed, because DMA_DSBL_WR_INV is set. + */ + if (lp->dregs) + sbus_readw(lp->lregs + RDP); + + spin_unlock_irq(&lp->lock); + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +/* taken from the depca driver */ +static void lance_load_multicast(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + struct netdev_hw_addr *ha; + u32 crc; + u32 val; + + /* set all multicast bits */ + if (dev->flags & IFF_ALLMULTI) + val = ~0; + else + val = 0; + + if (lp->pio_buffer) { + struct lance_init_block __iomem *ib = lp->init_block_iomem; + sbus_writel(val, &ib->filter[0]); + sbus_writel(val, &ib->filter[1]); + } else { + struct lance_init_block *ib = lp->init_block_mem; + ib->filter [0] = val; + ib->filter [1] = val; + } + + if (dev->flags & IFF_ALLMULTI) + return; + + /* Add addresses */ + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr); + crc = crc >> 26; + if (lp->pio_buffer) { + struct lance_init_block __iomem *ib = lp->init_block_iomem; + u16 __iomem *mcast_table = (u16 __iomem *) &ib->filter; + u16 tmp = sbus_readw(&mcast_table[crc>>4]); + tmp |= 1 << (crc & 0xf); + sbus_writew(tmp, &mcast_table[crc>>4]); + } else { + struct lance_init_block *ib = lp->init_block_mem; + u16 *mcast_table = (u16 *) &ib->filter; + mcast_table [crc >> 4] |= 1 << (crc & 0xf); + } + } +} + +static void lance_set_multicast(struct net_device *dev) +{ + struct lance_private *lp = netdev_priv(dev); + struct lance_init_block *ib_mem = lp->init_block_mem; + struct lance_init_block __iomem *ib_iomem = lp->init_block_iomem; + u16 mode; + + if (!netif_running(dev)) + return; + + if (lp->tx_old != lp->tx_new) { + mod_timer(&lp->multicast_timer, jiffies + 4); + netif_wake_queue(dev); + return; + } + + netif_stop_queue(dev); + + STOP_LANCE(lp); + lp->init_ring(dev); + + if (lp->pio_buffer) + mode = sbus_readw(&ib_iomem->mode); + else + mode = ib_mem->mode; + if (dev->flags & IFF_PROMISC) { + mode |= LE_MO_PROM; + if (lp->pio_buffer) + sbus_writew(mode, &ib_iomem->mode); + else + ib_mem->mode = mode; + } else { + mode &= ~LE_MO_PROM; + if (lp->pio_buffer) + sbus_writew(mode, &ib_iomem->mode); + else + ib_mem->mode = mode; + lance_load_multicast(dev); + } + load_csrs(lp); + init_restart_lance(lp); + netif_wake_queue(dev); +} + +static void lance_set_multicast_retry(unsigned long _opaque) +{ + struct net_device *dev = (struct net_device *) _opaque; + + lance_set_multicast(dev); +} + +static void lance_free_hwresources(struct lance_private *lp) +{ + if (lp->lregs) + of_iounmap(&lp->op->resource[0], lp->lregs, LANCE_REG_SIZE); + if (lp->dregs) { + struct platform_device *ledma = lp->ledma; + + of_iounmap(&ledma->resource[0], lp->dregs, + resource_size(&ledma->resource[0])); + } + if (lp->init_block_iomem) { + of_iounmap(&lp->lebuffer->resource[0], lp->init_block_iomem, + sizeof(struct lance_init_block)); + } else if (lp->init_block_mem) { + dma_free_coherent(&lp->op->dev, + sizeof(struct lance_init_block), + lp->init_block_mem, + lp->init_block_dvma); + } +} + +/* Ethtool support... */ +static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, "sunlance", sizeof(info->driver)); + strlcpy(info->version, "2.02", sizeof(info->version)); +} + +static const struct ethtool_ops sparc_lance_ethtool_ops = { + .get_drvinfo = sparc_lance_get_drvinfo, + .get_link = ethtool_op_get_link, +}; + +static const struct net_device_ops sparc_lance_ops = { + .ndo_open = lance_open, + .ndo_stop = lance_close, + .ndo_start_xmit = lance_start_xmit, + .ndo_set_rx_mode = lance_set_multicast, + .ndo_tx_timeout = lance_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int sparc_lance_probe_one(struct platform_device *op, + struct platform_device *ledma, + struct platform_device *lebuffer) +{ + struct device_node *dp = op->dev.of_node; + static unsigned version_printed; + struct lance_private *lp; + struct net_device *dev; + int i; + + dev = alloc_etherdev(sizeof(struct lance_private) + 8); + if (!dev) + return -ENOMEM; + + lp = netdev_priv(dev); + + if (sparc_lance_debug && version_printed++ == 0) + printk (KERN_INFO "%s", version); + + spin_lock_init(&lp->lock); + + /* Copy the IDPROM ethernet address to the device structure, later we + * will copy the address in the device structure to the lance + * initialization block. + */ + for (i = 0; i < 6; i++) + dev->dev_addr[i] = idprom->id_ethaddr[i]; + + /* Get the IO region */ + lp->lregs = of_ioremap(&op->resource[0], 0, + LANCE_REG_SIZE, lancestr); + if (!lp->lregs) { + printk(KERN_ERR "SunLance: Cannot map registers.\n"); + goto fail; + } + + lp->ledma = ledma; + if (lp->ledma) { + lp->dregs = of_ioremap(&ledma->resource[0], 0, + resource_size(&ledma->resource[0]), + "ledma"); + if (!lp->dregs) { + printk(KERN_ERR "SunLance: Cannot map " + "ledma registers.\n"); + goto fail; + } + } + + lp->op = op; + lp->lebuffer = lebuffer; + if (lebuffer) { + /* sanity check */ + if (lebuffer->resource[0].start & 7) { + printk(KERN_ERR "SunLance: ERROR: Rx and Tx rings not on even boundary.\n"); + goto fail; + } + lp->init_block_iomem = + of_ioremap(&lebuffer->resource[0], 0, + sizeof(struct lance_init_block), "lebuffer"); + if (!lp->init_block_iomem) { + printk(KERN_ERR "SunLance: Cannot map PIO buffer.\n"); + goto fail; + } + lp->init_block_dvma = 0; + lp->pio_buffer = 1; + lp->init_ring = lance_init_ring_pio; + lp->rx = lance_rx_pio; + lp->tx = lance_tx_pio; + } else { + lp->init_block_mem = + dma_alloc_coherent(&op->dev, + sizeof(struct lance_init_block), + &lp->init_block_dvma, GFP_ATOMIC); + if (!lp->init_block_mem) + goto fail; + + lp->pio_buffer = 0; + lp->init_ring = lance_init_ring_dvma; + lp->rx = lance_rx_dvma; + lp->tx = lance_tx_dvma; + } + lp->busmaster_regval = of_getintprop_default(dp, "busmaster-regval", + (LE_C3_BSWP | + LE_C3_ACON | + LE_C3_BCON)); + + lp->name = lancestr; + + lp->burst_sizes = 0; + if (lp->ledma) { + struct device_node *ledma_dp = ledma->dev.of_node; + struct device_node *sbus_dp; + unsigned int sbmask; + const char *prop; + u32 csr; + + /* Find burst-size property for ledma */ + lp->burst_sizes = of_getintprop_default(ledma_dp, + "burst-sizes", 0); + + /* ledma may be capable of fast bursts, but sbus may not. */ + sbus_dp = ledma_dp->parent; + sbmask = of_getintprop_default(sbus_dp, "burst-sizes", + DMA_BURSTBITS); + lp->burst_sizes &= sbmask; + + /* Get the cable-selection property */ + prop = of_get_property(ledma_dp, "cable-selection", NULL); + if (!prop || prop[0] == '\0') { + struct device_node *nd; + + printk(KERN_INFO "SunLance: using " + "auto-carrier-detection.\n"); + + nd = of_find_node_by_path("/options"); + if (!nd) + goto no_link_test; + + prop = of_get_property(nd, "tpe-link-test?", NULL); + if (!prop) + goto no_link_test; + + if (strcmp(prop, "true")) { + printk(KERN_NOTICE "SunLance: warning: overriding option " + "'tpe-link-test?'\n"); + printk(KERN_NOTICE "SunLance: warning: mail any problems " + "to ecd@skynet.be\n"); + auxio_set_lte(AUXIO_LTE_ON); + } +no_link_test: + lp->auto_select = 1; + lp->tpe = 0; + } else if (!strcmp(prop, "aui")) { + lp->auto_select = 0; + lp->tpe = 0; + } else { + lp->auto_select = 0; + lp->tpe = 1; + } + + /* Reset ledma */ + csr = sbus_readl(lp->dregs + DMA_CSR); + sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR); + udelay(200); + sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR); + } else + lp->dregs = NULL; + + lp->dev = dev; + SET_NETDEV_DEV(dev, &op->dev); + dev->watchdog_timeo = 5*HZ; + dev->ethtool_ops = &sparc_lance_ethtool_ops; + dev->netdev_ops = &sparc_lance_ops; + + dev->irq = op->archdata.irqs[0]; + + /* We cannot sleep if the chip is busy during a + * multicast list update event, because such events + * can occur from interrupts (ex. IPv6). So we + * use a timer to try again later when necessary. -DaveM + */ + init_timer(&lp->multicast_timer); + lp->multicast_timer.data = (unsigned long) dev; + lp->multicast_timer.function = lance_set_multicast_retry; + + if (register_netdev(dev)) { + printk(KERN_ERR "SunLance: Cannot register device.\n"); + goto fail; + } + + platform_set_drvdata(op, lp); + + printk(KERN_INFO "%s: LANCE %pM\n", + dev->name, dev->dev_addr); + + return 0; + +fail: + lance_free_hwresources(lp); + free_netdev(dev); + return -ENODEV; +} + +static int sunlance_sbus_probe(struct platform_device *op) +{ + struct platform_device *parent = to_platform_device(op->dev.parent); + struct device_node *parent_dp = parent->dev.of_node; + int err; + + if (!strcmp(parent_dp->name, "ledma")) { + err = sparc_lance_probe_one(op, parent, NULL); + } else if (!strcmp(parent_dp->name, "lebuffer")) { + err = sparc_lance_probe_one(op, NULL, parent); + } else + err = sparc_lance_probe_one(op, NULL, NULL); + + return err; +} + +static int sunlance_sbus_remove(struct platform_device *op) +{ + struct lance_private *lp = platform_get_drvdata(op); + struct net_device *net_dev = lp->dev; + + unregister_netdev(net_dev); + + lance_free_hwresources(lp); + + free_netdev(net_dev); + + return 0; +} + +static const struct of_device_id sunlance_sbus_match[] = { + { + .name = "le", + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, sunlance_sbus_match); + +static struct platform_driver sunlance_sbus_driver = { + .driver = { + .name = "sunlance", + .of_match_table = sunlance_sbus_match, + }, + .probe = sunlance_sbus_probe, + .remove = sunlance_sbus_remove, +}; + +module_platform_driver(sunlance_sbus_driver); diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile new file mode 100644 index 000000000..171a7e680 --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/Makefile @@ -0,0 +1,8 @@ +obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o + +amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \ + xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \ + xgbe-ptp.o + +amd-xgbe-$(CONFIG_AMD_XGBE_DCB) += xgbe-dcb.o +amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h new file mode 100644 index 000000000..34c28aac7 --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -0,0 +1,1145 @@ +/* + * AMD 10Gb Ethernet driver + * + * This file is available to you under your choice of the following two + * licenses: + * + * License 1: GPLv2 + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * + * This file is free software; you may copy, redistribute and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or (at + * your option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + * + * + * License 2: Modified BSD + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Advanced Micro Devices, Inc. nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __XGBE_COMMON_H__ +#define __XGBE_COMMON_H__ + +/* DMA register offsets */ +#define DMA_MR 0x3000 +#define DMA_SBMR 0x3004 +#define DMA_ISR 0x3008 +#define DMA_AXIARCR 0x3010 +#define DMA_AXIAWCR 0x3018 +#define DMA_DSR0 0x3020 +#define DMA_DSR1 0x3024 + +/* DMA register entry bit positions and sizes */ +#define DMA_AXIARCR_DRC_INDEX 0 +#define DMA_AXIARCR_DRC_WIDTH 4 +#define DMA_AXIARCR_DRD_INDEX 4 +#define DMA_AXIARCR_DRD_WIDTH 2 +#define DMA_AXIARCR_TEC_INDEX 8 +#define DMA_AXIARCR_TEC_WIDTH 4 +#define DMA_AXIARCR_TED_INDEX 12 +#define DMA_AXIARCR_TED_WIDTH 2 +#define DMA_AXIARCR_THC_INDEX 16 +#define DMA_AXIARCR_THC_WIDTH 4 +#define DMA_AXIARCR_THD_INDEX 20 +#define DMA_AXIARCR_THD_WIDTH 2 +#define DMA_AXIAWCR_DWC_INDEX 0 +#define DMA_AXIAWCR_DWC_WIDTH 4 +#define DMA_AXIAWCR_DWD_INDEX 4 +#define DMA_AXIAWCR_DWD_WIDTH 2 +#define DMA_AXIAWCR_RPC_INDEX 8 +#define DMA_AXIAWCR_RPC_WIDTH 4 +#define DMA_AXIAWCR_RPD_INDEX 12 +#define DMA_AXIAWCR_RPD_WIDTH 2 +#define DMA_AXIAWCR_RHC_INDEX 16 +#define DMA_AXIAWCR_RHC_WIDTH 4 +#define DMA_AXIAWCR_RHD_INDEX 20 +#define DMA_AXIAWCR_RHD_WIDTH 2 +#define DMA_AXIAWCR_TDC_INDEX 24 +#define DMA_AXIAWCR_TDC_WIDTH 4 +#define DMA_AXIAWCR_TDD_INDEX 28 +#define DMA_AXIAWCR_TDD_WIDTH 2 +#define DMA_ISR_MACIS_INDEX 17 +#define DMA_ISR_MACIS_WIDTH 1 +#define DMA_ISR_MTLIS_INDEX 16 +#define DMA_ISR_MTLIS_WIDTH 1 +#define DMA_MR_SWR_INDEX 0 +#define DMA_MR_SWR_WIDTH 1 +#define DMA_SBMR_EAME_INDEX 11 +#define DMA_SBMR_EAME_WIDTH 1 +#define DMA_SBMR_BLEN_256_INDEX 7 +#define DMA_SBMR_BLEN_256_WIDTH 1 +#define DMA_SBMR_UNDEF_INDEX 0 +#define DMA_SBMR_UNDEF_WIDTH 1 + +/* DMA register values */ +#define DMA_DSR_RPS_WIDTH 4 +#define DMA_DSR_TPS_WIDTH 4 +#define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH) +#define DMA_DSR0_RPS_START 8 +#define DMA_DSR0_TPS_START 12 +#define DMA_DSRX_FIRST_QUEUE 3 +#define DMA_DSRX_INC 4 +#define DMA_DSRX_QPR 4 +#define DMA_DSRX_RPS_START 0 +#define DMA_DSRX_TPS_START 4 +#define DMA_TPS_STOPPED 0x00 +#define DMA_TPS_SUSPENDED 0x06 + +/* DMA channel register offsets + * Multiple channels can be active. The first channel has registers + * that begin at 0x3100. Each subsequent channel has registers that + * are accessed using an offset of 0x80 from the previous channel. + */ +#define DMA_CH_BASE 0x3100 +#define DMA_CH_INC 0x80 + +#define DMA_CH_CR 0x00 +#define DMA_CH_TCR 0x04 +#define DMA_CH_RCR 0x08 +#define DMA_CH_TDLR_HI 0x10 +#define DMA_CH_TDLR_LO 0x14 +#define DMA_CH_RDLR_HI 0x18 +#define DMA_CH_RDLR_LO 0x1c +#define DMA_CH_TDTR_LO 0x24 +#define DMA_CH_RDTR_LO 0x2c +#define DMA_CH_TDRLR 0x30 +#define DMA_CH_RDRLR 0x34 +#define DMA_CH_IER 0x38 +#define DMA_CH_RIWT 0x3c +#define DMA_CH_CATDR_LO 0x44 +#define DMA_CH_CARDR_LO 0x4c +#define DMA_CH_CATBR_HI 0x50 +#define DMA_CH_CATBR_LO 0x54 +#define DMA_CH_CARBR_HI 0x58 +#define DMA_CH_CARBR_LO 0x5c +#define DMA_CH_SR 0x60 + +/* DMA channel register entry bit positions and sizes */ +#define DMA_CH_CR_PBLX8_INDEX 16 +#define DMA_CH_CR_PBLX8_WIDTH 1 +#define DMA_CH_CR_SPH_INDEX 24 +#define DMA_CH_CR_SPH_WIDTH 1 +#define DMA_CH_IER_AIE_INDEX 15 +#define DMA_CH_IER_AIE_WIDTH 1 +#define DMA_CH_IER_FBEE_INDEX 12 +#define DMA_CH_IER_FBEE_WIDTH 1 +#define DMA_CH_IER_NIE_INDEX 16 +#define DMA_CH_IER_NIE_WIDTH 1 +#define DMA_CH_IER_RBUE_INDEX 7 +#define DMA_CH_IER_RBUE_WIDTH 1 +#define DMA_CH_IER_RIE_INDEX 6 +#define DMA_CH_IER_RIE_WIDTH 1 +#define DMA_CH_IER_RSE_INDEX 8 +#define DMA_CH_IER_RSE_WIDTH 1 +#define DMA_CH_IER_TBUE_INDEX 2 +#define DMA_CH_IER_TBUE_WIDTH 1 +#define DMA_CH_IER_TIE_INDEX 0 +#define DMA_CH_IER_TIE_WIDTH 1 +#define DMA_CH_IER_TXSE_INDEX 1 +#define DMA_CH_IER_TXSE_WIDTH 1 +#define DMA_CH_RCR_PBL_INDEX 16 +#define DMA_CH_RCR_PBL_WIDTH 6 +#define DMA_CH_RCR_RBSZ_INDEX 1 +#define DMA_CH_RCR_RBSZ_WIDTH 14 +#define DMA_CH_RCR_SR_INDEX 0 +#define DMA_CH_RCR_SR_WIDTH 1 +#define DMA_CH_RIWT_RWT_INDEX 0 +#define DMA_CH_RIWT_RWT_WIDTH 8 +#define DMA_CH_SR_FBE_INDEX 12 +#define DMA_CH_SR_FBE_WIDTH 1 +#define DMA_CH_SR_RBU_INDEX 7 +#define DMA_CH_SR_RBU_WIDTH 1 +#define DMA_CH_SR_RI_INDEX 6 +#define DMA_CH_SR_RI_WIDTH 1 +#define DMA_CH_SR_RPS_INDEX 8 +#define DMA_CH_SR_RPS_WIDTH 1 +#define DMA_CH_SR_TBU_INDEX 2 +#define DMA_CH_SR_TBU_WIDTH 1 +#define DMA_CH_SR_TI_INDEX 0 +#define DMA_CH_SR_TI_WIDTH 1 +#define DMA_CH_SR_TPS_INDEX 1 +#define DMA_CH_SR_TPS_WIDTH 1 +#define DMA_CH_TCR_OSP_INDEX 4 +#define DMA_CH_TCR_OSP_WIDTH 1 +#define DMA_CH_TCR_PBL_INDEX 16 +#define DMA_CH_TCR_PBL_WIDTH 6 +#define DMA_CH_TCR_ST_INDEX 0 +#define DMA_CH_TCR_ST_WIDTH 1 +#define DMA_CH_TCR_TSE_INDEX 12 +#define DMA_CH_TCR_TSE_WIDTH 1 + +/* DMA channel register values */ +#define DMA_OSP_DISABLE 0x00 +#define DMA_OSP_ENABLE 0x01 +#define DMA_PBL_1 1 +#define DMA_PBL_2 2 +#define DMA_PBL_4 4 +#define DMA_PBL_8 8 +#define DMA_PBL_16 16 +#define DMA_PBL_32 32 +#define DMA_PBL_64 64 /* 8 x 8 */ +#define DMA_PBL_128 128 /* 8 x 16 */ +#define DMA_PBL_256 256 /* 8 x 32 */ +#define DMA_PBL_X8_DISABLE 0x00 +#define DMA_PBL_X8_ENABLE 0x01 + +/* MAC register offsets */ +#define MAC_TCR 0x0000 +#define MAC_RCR 0x0004 +#define MAC_PFR 0x0008 +#define MAC_WTR 0x000c +#define MAC_HTR0 0x0010 +#define MAC_VLANTR 0x0050 +#define MAC_VLANHTR 0x0058 +#define MAC_VLANIR 0x0060 +#define MAC_IVLANIR 0x0064 +#define MAC_RETMR 0x006c +#define MAC_Q0TFCR 0x0070 +#define MAC_RFCR 0x0090 +#define MAC_RQC0R 0x00a0 +#define MAC_RQC1R 0x00a4 +#define MAC_RQC2R 0x00a8 +#define MAC_RQC3R 0x00ac +#define MAC_ISR 0x00b0 +#define MAC_IER 0x00b4 +#define MAC_RTSR 0x00b8 +#define MAC_PMTCSR 0x00c0 +#define MAC_RWKPFR 0x00c4 +#define MAC_LPICSR 0x00d0 +#define MAC_LPITCR 0x00d4 +#define MAC_VR 0x0110 +#define MAC_DR 0x0114 +#define MAC_HWF0R 0x011c +#define MAC_HWF1R 0x0120 +#define MAC_HWF2R 0x0124 +#define MAC_GPIOCR 0x0278 +#define MAC_GPIOSR 0x027c +#define MAC_MACA0HR 0x0300 +#define MAC_MACA0LR 0x0304 +#define MAC_MACA1HR 0x0308 +#define MAC_MACA1LR 0x030c +#define MAC_RSSCR 0x0c80 +#define MAC_RSSAR 0x0c88 +#define MAC_RSSDR 0x0c8c +#define MAC_TSCR 0x0d00 +#define MAC_SSIR 0x0d04 +#define MAC_STSR 0x0d08 +#define MAC_STNR 0x0d0c +#define MAC_STSUR 0x0d10 +#define MAC_STNUR 0x0d14 +#define MAC_TSAR 0x0d18 +#define MAC_TSSR 0x0d20 +#define MAC_TXSNR 0x0d30 +#define MAC_TXSSR 0x0d34 + +#define MAC_QTFCR_INC 4 +#define MAC_MACA_INC 4 +#define MAC_HTR_INC 4 + +#define MAC_RQC2_INC 4 +#define MAC_RQC2_Q_PER_REG 4 + +/* MAC register entry bit positions and sizes */ +#define MAC_HWF0R_ADDMACADRSEL_INDEX 18 +#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5 +#define MAC_HWF0R_ARPOFFSEL_INDEX 9 +#define MAC_HWF0R_ARPOFFSEL_WIDTH 1 +#define MAC_HWF0R_EEESEL_INDEX 13 +#define MAC_HWF0R_EEESEL_WIDTH 1 +#define MAC_HWF0R_GMIISEL_INDEX 1 +#define MAC_HWF0R_GMIISEL_WIDTH 1 +#define MAC_HWF0R_MGKSEL_INDEX 7 +#define MAC_HWF0R_MGKSEL_WIDTH 1 +#define MAC_HWF0R_MMCSEL_INDEX 8 +#define MAC_HWF0R_MMCSEL_WIDTH 1 +#define MAC_HWF0R_RWKSEL_INDEX 6 +#define MAC_HWF0R_RWKSEL_WIDTH 1 +#define MAC_HWF0R_RXCOESEL_INDEX 16 +#define MAC_HWF0R_RXCOESEL_WIDTH 1 +#define MAC_HWF0R_SAVLANINS_INDEX 27 +#define MAC_HWF0R_SAVLANINS_WIDTH 1 +#define MAC_HWF0R_SMASEL_INDEX 5 +#define MAC_HWF0R_SMASEL_WIDTH 1 +#define MAC_HWF0R_TSSEL_INDEX 12 +#define MAC_HWF0R_TSSEL_WIDTH 1 +#define MAC_HWF0R_TSSTSSEL_INDEX 25 +#define MAC_HWF0R_TSSTSSEL_WIDTH 2 +#define MAC_HWF0R_TXCOESEL_INDEX 14 +#define MAC_HWF0R_TXCOESEL_WIDTH 1 +#define MAC_HWF0R_VLHASH_INDEX 4 +#define MAC_HWF0R_VLHASH_WIDTH 1 +#define MAC_HWF1R_ADDR64_INDEX 14 +#define MAC_HWF1R_ADDR64_WIDTH 2 +#define MAC_HWF1R_ADVTHWORD_INDEX 13 +#define MAC_HWF1R_ADVTHWORD_WIDTH 1 +#define MAC_HWF1R_DBGMEMA_INDEX 19 +#define MAC_HWF1R_DBGMEMA_WIDTH 1 +#define MAC_HWF1R_DCBEN_INDEX 16 +#define MAC_HWF1R_DCBEN_WIDTH 1 +#define MAC_HWF1R_HASHTBLSZ_INDEX 24 +#define MAC_HWF1R_HASHTBLSZ_WIDTH 3 +#define MAC_HWF1R_L3L4FNUM_INDEX 27 +#define MAC_HWF1R_L3L4FNUM_WIDTH 4 +#define MAC_HWF1R_NUMTC_INDEX 21 +#define MAC_HWF1R_NUMTC_WIDTH 3 +#define MAC_HWF1R_RSSEN_INDEX 20 +#define MAC_HWF1R_RSSEN_WIDTH 1 +#define MAC_HWF1R_RXFIFOSIZE_INDEX 0 +#define MAC_HWF1R_RXFIFOSIZE_WIDTH 5 +#define MAC_HWF1R_SPHEN_INDEX 17 +#define MAC_HWF1R_SPHEN_WIDTH 1 +#define MAC_HWF1R_TSOEN_INDEX 18 +#define MAC_HWF1R_TSOEN_WIDTH 1 +#define MAC_HWF1R_TXFIFOSIZE_INDEX 6 +#define MAC_HWF1R_TXFIFOSIZE_WIDTH 5 +#define MAC_HWF2R_AUXSNAPNUM_INDEX 28 +#define MAC_HWF2R_AUXSNAPNUM_WIDTH 3 +#define MAC_HWF2R_PPSOUTNUM_INDEX 24 +#define MAC_HWF2R_PPSOUTNUM_WIDTH 3 +#define MAC_HWF2R_RXCHCNT_INDEX 12 +#define MAC_HWF2R_RXCHCNT_WIDTH 4 +#define MAC_HWF2R_RXQCNT_INDEX 0 +#define MAC_HWF2R_RXQCNT_WIDTH 4 +#define MAC_HWF2R_TXCHCNT_INDEX 18 +#define MAC_HWF2R_TXCHCNT_WIDTH 4 +#define MAC_HWF2R_TXQCNT_INDEX 6 +#define MAC_HWF2R_TXQCNT_WIDTH 4 +#define MAC_IER_TSIE_INDEX 12 +#define MAC_IER_TSIE_WIDTH 1 +#define MAC_ISR_MMCRXIS_INDEX 9 +#define MAC_ISR_MMCRXIS_WIDTH 1 +#define MAC_ISR_MMCTXIS_INDEX 10 +#define MAC_ISR_MMCTXIS_WIDTH 1 +#define MAC_ISR_PMTIS_INDEX 4 +#define MAC_ISR_PMTIS_WIDTH 1 +#define MAC_ISR_TSIS_INDEX 12 +#define MAC_ISR_TSIS_WIDTH 1 +#define MAC_MACA1HR_AE_INDEX 31 +#define MAC_MACA1HR_AE_WIDTH 1 +#define MAC_PFR_HMC_INDEX 2 +#define MAC_PFR_HMC_WIDTH 1 +#define MAC_PFR_HPF_INDEX 10 +#define MAC_PFR_HPF_WIDTH 1 +#define MAC_PFR_HUC_INDEX 1 +#define MAC_PFR_HUC_WIDTH 1 +#define MAC_PFR_PM_INDEX 4 +#define MAC_PFR_PM_WIDTH 1 +#define MAC_PFR_PR_INDEX 0 +#define MAC_PFR_PR_WIDTH 1 +#define MAC_PFR_VTFE_INDEX 16 +#define MAC_PFR_VTFE_WIDTH 1 +#define MAC_PMTCSR_MGKPKTEN_INDEX 1 +#define MAC_PMTCSR_MGKPKTEN_WIDTH 1 +#define MAC_PMTCSR_PWRDWN_INDEX 0 +#define MAC_PMTCSR_PWRDWN_WIDTH 1 +#define MAC_PMTCSR_RWKFILTRST_INDEX 31 +#define MAC_PMTCSR_RWKFILTRST_WIDTH 1 +#define MAC_PMTCSR_RWKPKTEN_INDEX 2 +#define MAC_PMTCSR_RWKPKTEN_WIDTH 1 +#define MAC_Q0TFCR_PT_INDEX 16 +#define MAC_Q0TFCR_PT_WIDTH 16 +#define MAC_Q0TFCR_TFE_INDEX 1 +#define MAC_Q0TFCR_TFE_WIDTH 1 +#define MAC_RCR_ACS_INDEX 1 +#define MAC_RCR_ACS_WIDTH 1 +#define MAC_RCR_CST_INDEX 2 +#define MAC_RCR_CST_WIDTH 1 +#define MAC_RCR_DCRCC_INDEX 3 +#define MAC_RCR_DCRCC_WIDTH 1 +#define MAC_RCR_HDSMS_INDEX 12 +#define MAC_RCR_HDSMS_WIDTH 3 +#define MAC_RCR_IPC_INDEX 9 +#define MAC_RCR_IPC_WIDTH 1 +#define MAC_RCR_JE_INDEX 8 +#define MAC_RCR_JE_WIDTH 1 +#define MAC_RCR_LM_INDEX 10 +#define MAC_RCR_LM_WIDTH 1 +#define MAC_RCR_RE_INDEX 0 +#define MAC_RCR_RE_WIDTH 1 +#define MAC_RFCR_PFCE_INDEX 8 +#define MAC_RFCR_PFCE_WIDTH 1 +#define MAC_RFCR_RFE_INDEX 0 +#define MAC_RFCR_RFE_WIDTH 1 +#define MAC_RFCR_UP_INDEX 1 +#define MAC_RFCR_UP_WIDTH 1 +#define MAC_RQC0R_RXQ0EN_INDEX 0 +#define MAC_RQC0R_RXQ0EN_WIDTH 2 +#define MAC_RSSAR_ADDRT_INDEX 2 +#define MAC_RSSAR_ADDRT_WIDTH 1 +#define MAC_RSSAR_CT_INDEX 1 +#define MAC_RSSAR_CT_WIDTH 1 +#define MAC_RSSAR_OB_INDEX 0 +#define MAC_RSSAR_OB_WIDTH 1 +#define MAC_RSSAR_RSSIA_INDEX 8 +#define MAC_RSSAR_RSSIA_WIDTH 8 +#define MAC_RSSCR_IP2TE_INDEX 1 +#define MAC_RSSCR_IP2TE_WIDTH 1 +#define MAC_RSSCR_RSSE_INDEX 0 +#define MAC_RSSCR_RSSE_WIDTH 1 +#define MAC_RSSCR_TCP4TE_INDEX 2 +#define MAC_RSSCR_TCP4TE_WIDTH 1 +#define MAC_RSSCR_UDP4TE_INDEX 3 +#define MAC_RSSCR_UDP4TE_WIDTH 1 +#define MAC_RSSDR_DMCH_INDEX 0 +#define MAC_RSSDR_DMCH_WIDTH 4 +#define MAC_SSIR_SNSINC_INDEX 8 +#define MAC_SSIR_SNSINC_WIDTH 8 +#define MAC_SSIR_SSINC_INDEX 16 +#define MAC_SSIR_SSINC_WIDTH 8 +#define MAC_TCR_SS_INDEX 29 +#define MAC_TCR_SS_WIDTH 2 +#define MAC_TCR_TE_INDEX 0 +#define MAC_TCR_TE_WIDTH 1 +#define MAC_TSCR_AV8021ASMEN_INDEX 28 +#define MAC_TSCR_AV8021ASMEN_WIDTH 1 +#define MAC_TSCR_SNAPTYPSEL_INDEX 16 +#define MAC_TSCR_SNAPTYPSEL_WIDTH 2 +#define MAC_TSCR_TSADDREG_INDEX 5 +#define MAC_TSCR_TSADDREG_WIDTH 1 +#define MAC_TSCR_TSCFUPDT_INDEX 1 +#define MAC_TSCR_TSCFUPDT_WIDTH 1 +#define MAC_TSCR_TSCTRLSSR_INDEX 9 +#define MAC_TSCR_TSCTRLSSR_WIDTH 1 +#define MAC_TSCR_TSENA_INDEX 0 +#define MAC_TSCR_TSENA_WIDTH 1 +#define MAC_TSCR_TSENALL_INDEX 8 +#define MAC_TSCR_TSENALL_WIDTH 1 +#define MAC_TSCR_TSEVNTENA_INDEX 14 +#define MAC_TSCR_TSEVNTENA_WIDTH 1 +#define MAC_TSCR_TSINIT_INDEX 2 +#define MAC_TSCR_TSINIT_WIDTH 1 +#define MAC_TSCR_TSIPENA_INDEX 11 +#define MAC_TSCR_TSIPENA_WIDTH 1 +#define MAC_TSCR_TSIPV4ENA_INDEX 13 +#define MAC_TSCR_TSIPV4ENA_WIDTH 1 +#define MAC_TSCR_TSIPV6ENA_INDEX 12 +#define MAC_TSCR_TSIPV6ENA_WIDTH 1 +#define MAC_TSCR_TSMSTRENA_INDEX 15 +#define MAC_TSCR_TSMSTRENA_WIDTH 1 +#define MAC_TSCR_TSVER2ENA_INDEX 10 +#define MAC_TSCR_TSVER2ENA_WIDTH 1 +#define MAC_TSCR_TXTSSTSM_INDEX 24 +#define MAC_TSCR_TXTSSTSM_WIDTH 1 +#define MAC_TSSR_TXTSC_INDEX 15 +#define MAC_TSSR_TXTSC_WIDTH 1 +#define MAC_TXSNR_TXTSSTSMIS_INDEX 31 +#define MAC_TXSNR_TXTSSTSMIS_WIDTH 1 +#define MAC_VLANHTR_VLHT_INDEX 0 +#define MAC_VLANHTR_VLHT_WIDTH 16 +#define MAC_VLANIR_VLTI_INDEX 20 +#define MAC_VLANIR_VLTI_WIDTH 1 +#define MAC_VLANIR_CSVL_INDEX 19 +#define MAC_VLANIR_CSVL_WIDTH 1 +#define MAC_VLANTR_DOVLTC_INDEX 20 +#define MAC_VLANTR_DOVLTC_WIDTH 1 +#define MAC_VLANTR_ERSVLM_INDEX 19 +#define MAC_VLANTR_ERSVLM_WIDTH 1 +#define MAC_VLANTR_ESVL_INDEX 18 +#define MAC_VLANTR_ESVL_WIDTH 1 +#define MAC_VLANTR_ETV_INDEX 16 +#define MAC_VLANTR_ETV_WIDTH 1 +#define MAC_VLANTR_EVLS_INDEX 21 +#define MAC_VLANTR_EVLS_WIDTH 2 +#define MAC_VLANTR_EVLRXS_INDEX 24 +#define MAC_VLANTR_EVLRXS_WIDTH 1 +#define MAC_VLANTR_VL_INDEX 0 +#define MAC_VLANTR_VL_WIDTH 16 +#define MAC_VLANTR_VTHM_INDEX 25 +#define MAC_VLANTR_VTHM_WIDTH 1 +#define MAC_VLANTR_VTIM_INDEX 17 +#define MAC_VLANTR_VTIM_WIDTH 1 +#define MAC_VR_DEVID_INDEX 8 +#define MAC_VR_DEVID_WIDTH 8 +#define MAC_VR_SNPSVER_INDEX 0 +#define MAC_VR_SNPSVER_WIDTH 8 +#define MAC_VR_USERVER_INDEX 16 +#define MAC_VR_USERVER_WIDTH 8 + +/* MMC register offsets */ +#define MMC_CR 0x0800 +#define MMC_RISR 0x0804 +#define MMC_TISR 0x0808 +#define MMC_RIER 0x080c +#define MMC_TIER 0x0810 +#define MMC_TXOCTETCOUNT_GB_LO 0x0814 +#define MMC_TXOCTETCOUNT_GB_HI 0x0818 +#define MMC_TXFRAMECOUNT_GB_LO 0x081c +#define MMC_TXFRAMECOUNT_GB_HI 0x0820 +#define MMC_TXBROADCASTFRAMES_G_LO 0x0824 +#define MMC_TXBROADCASTFRAMES_G_HI 0x0828 +#define MMC_TXMULTICASTFRAMES_G_LO 0x082c +#define MMC_TXMULTICASTFRAMES_G_HI 0x0830 +#define MMC_TX64OCTETS_GB_LO 0x0834 +#define MMC_TX64OCTETS_GB_HI 0x0838 +#define MMC_TX65TO127OCTETS_GB_LO 0x083c +#define MMC_TX65TO127OCTETS_GB_HI 0x0840 +#define MMC_TX128TO255OCTETS_GB_LO 0x0844 +#define MMC_TX128TO255OCTETS_GB_HI 0x0848 +#define MMC_TX256TO511OCTETS_GB_LO 0x084c +#define MMC_TX256TO511OCTETS_GB_HI 0x0850 +#define MMC_TX512TO1023OCTETS_GB_LO 0x0854 +#define MMC_TX512TO1023OCTETS_GB_HI 0x0858 +#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c +#define MMC_TX1024TOMAXOCTETS_GB_HI 0x0860 +#define MMC_TXUNICASTFRAMES_GB_LO 0x0864 +#define MMC_TXUNICASTFRAMES_GB_HI 0x0868 +#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c +#define MMC_TXMULTICASTFRAMES_GB_HI 0x0870 +#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874 +#define MMC_TXBROADCASTFRAMES_GB_HI 0x0878 +#define MMC_TXUNDERFLOWERROR_LO 0x087c +#define MMC_TXUNDERFLOWERROR_HI 0x0880 +#define MMC_TXOCTETCOUNT_G_LO 0x0884 +#define MMC_TXOCTETCOUNT_G_HI 0x0888 +#define MMC_TXFRAMECOUNT_G_LO 0x088c +#define MMC_TXFRAMECOUNT_G_HI 0x0890 +#define MMC_TXPAUSEFRAMES_LO 0x0894 +#define MMC_TXPAUSEFRAMES_HI 0x0898 +#define MMC_TXVLANFRAMES_G_LO 0x089c +#define MMC_TXVLANFRAMES_G_HI 0x08a0 +#define MMC_RXFRAMECOUNT_GB_LO 0x0900 +#define MMC_RXFRAMECOUNT_GB_HI 0x0904 +#define MMC_RXOCTETCOUNT_GB_LO 0x0908 +#define MMC_RXOCTETCOUNT_GB_HI 0x090c +#define MMC_RXOCTETCOUNT_G_LO 0x0910 +#define MMC_RXOCTETCOUNT_G_HI 0x0914 +#define MMC_RXBROADCASTFRAMES_G_LO 0x0918 +#define MMC_RXBROADCASTFRAMES_G_HI 0x091c +#define MMC_RXMULTICASTFRAMES_G_LO 0x0920 +#define MMC_RXMULTICASTFRAMES_G_HI 0x0924 +#define MMC_RXCRCERROR_LO 0x0928 +#define MMC_RXCRCERROR_HI 0x092c +#define MMC_RXRUNTERROR 0x0930 +#define MMC_RXJABBERERROR 0x0934 +#define MMC_RXUNDERSIZE_G 0x0938 +#define MMC_RXOVERSIZE_G 0x093c +#define MMC_RX64OCTETS_GB_LO 0x0940 +#define MMC_RX64OCTETS_GB_HI 0x0944 +#define MMC_RX65TO127OCTETS_GB_LO 0x0948 +#define MMC_RX65TO127OCTETS_GB_HI 0x094c +#define MMC_RX128TO255OCTETS_GB_LO 0x0950 +#define MMC_RX128TO255OCTETS_GB_HI 0x0954 +#define MMC_RX256TO511OCTETS_GB_LO 0x0958 +#define MMC_RX256TO511OCTETS_GB_HI 0x095c +#define MMC_RX512TO1023OCTETS_GB_LO 0x0960 +#define MMC_RX512TO1023OCTETS_GB_HI 0x0964 +#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968 +#define MMC_RX1024TOMAXOCTETS_GB_HI 0x096c +#define MMC_RXUNICASTFRAMES_G_LO 0x0970 +#define MMC_RXUNICASTFRAMES_G_HI 0x0974 +#define MMC_RXLENGTHERROR_LO 0x0978 +#define MMC_RXLENGTHERROR_HI 0x097c +#define MMC_RXOUTOFRANGETYPE_LO 0x0980 +#define MMC_RXOUTOFRANGETYPE_HI 0x0984 +#define MMC_RXPAUSEFRAMES_LO 0x0988 +#define MMC_RXPAUSEFRAMES_HI 0x098c +#define MMC_RXFIFOOVERFLOW_LO 0x0990 +#define MMC_RXFIFOOVERFLOW_HI 0x0994 +#define MMC_RXVLANFRAMES_GB_LO 0x0998 +#define MMC_RXVLANFRAMES_GB_HI 0x099c +#define MMC_RXWATCHDOGERROR 0x09a0 + +/* MMC register entry bit positions and sizes */ +#define MMC_CR_CR_INDEX 0 +#define MMC_CR_CR_WIDTH 1 +#define MMC_CR_CSR_INDEX 1 +#define MMC_CR_CSR_WIDTH 1 +#define MMC_CR_ROR_INDEX 2 +#define MMC_CR_ROR_WIDTH 1 +#define MMC_CR_MCF_INDEX 3 +#define MMC_CR_MCF_WIDTH 1 +#define MMC_CR_MCT_INDEX 4 +#define MMC_CR_MCT_WIDTH 2 +#define MMC_RIER_ALL_INTERRUPTS_INDEX 0 +#define MMC_RIER_ALL_INTERRUPTS_WIDTH 23 +#define MMC_RISR_RXFRAMECOUNT_GB_INDEX 0 +#define MMC_RISR_RXFRAMECOUNT_GB_WIDTH 1 +#define MMC_RISR_RXOCTETCOUNT_GB_INDEX 1 +#define MMC_RISR_RXOCTETCOUNT_GB_WIDTH 1 +#define MMC_RISR_RXOCTETCOUNT_G_INDEX 2 +#define MMC_RISR_RXOCTETCOUNT_G_WIDTH 1 +#define MMC_RISR_RXBROADCASTFRAMES_G_INDEX 3 +#define MMC_RISR_RXBROADCASTFRAMES_G_WIDTH 1 +#define MMC_RISR_RXMULTICASTFRAMES_G_INDEX 4 +#define MMC_RISR_RXMULTICASTFRAMES_G_WIDTH 1 +#define MMC_RISR_RXCRCERROR_INDEX 5 +#define MMC_RISR_RXCRCERROR_WIDTH 1 +#define MMC_RISR_RXRUNTERROR_INDEX 6 +#define MMC_RISR_RXRUNTERROR_WIDTH 1 +#define MMC_RISR_RXJABBERERROR_INDEX 7 +#define MMC_RISR_RXJABBERERROR_WIDTH 1 +#define MMC_RISR_RXUNDERSIZE_G_INDEX 8 +#define MMC_RISR_RXUNDERSIZE_G_WIDTH 1 +#define MMC_RISR_RXOVERSIZE_G_INDEX 9 +#define MMC_RISR_RXOVERSIZE_G_WIDTH 1 +#define MMC_RISR_RX64OCTETS_GB_INDEX 10 +#define MMC_RISR_RX64OCTETS_GB_WIDTH 1 +#define MMC_RISR_RX65TO127OCTETS_GB_INDEX 11 +#define MMC_RISR_RX65TO127OCTETS_GB_WIDTH 1 +#define MMC_RISR_RX128TO255OCTETS_GB_INDEX 12 +#define MMC_RISR_RX128TO255OCTETS_GB_WIDTH 1 +#define MMC_RISR_RX256TO511OCTETS_GB_INDEX 13 +#define MMC_RISR_RX256TO511OCTETS_GB_WIDTH 1 +#define MMC_RISR_RX512TO1023OCTETS_GB_INDEX 14 +#define MMC_RISR_RX512TO1023OCTETS_GB_WIDTH 1 +#define MMC_RISR_RX1024TOMAXOCTETS_GB_INDEX 15 +#define MMC_RISR_RX1024TOMAXOCTETS_GB_WIDTH 1 +#define MMC_RISR_RXUNICASTFRAMES_G_INDEX 16 +#define MMC_RISR_RXUNICASTFRAMES_G_WIDTH 1 +#define MMC_RISR_RXLENGTHERROR_INDEX 17 +#define MMC_RISR_RXLENGTHERROR_WIDTH 1 +#define MMC_RISR_RXOUTOFRANGETYPE_INDEX 18 +#define MMC_RISR_RXOUTOFRANGETYPE_WIDTH 1 +#define MMC_RISR_RXPAUSEFRAMES_INDEX 19 +#define MMC_RISR_RXPAUSEFRAMES_WIDTH 1 +#define MMC_RISR_RXFIFOOVERFLOW_INDEX 20 +#define MMC_RISR_RXFIFOOVERFLOW_WIDTH 1 +#define MMC_RISR_RXVLANFRAMES_GB_INDEX 21 +#define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1 +#define MMC_RISR_RXWATCHDOGERROR_INDEX 22 +#define MMC_RISR_RXWATCHDOGERROR_WIDTH 1 +#define MMC_TIER_ALL_INTERRUPTS_INDEX 0 +#define MMC_TIER_ALL_INTERRUPTS_WIDTH 18 +#define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0 +#define MMC_TISR_TXOCTETCOUNT_GB_WIDTH 1 +#define MMC_TISR_TXFRAMECOUNT_GB_INDEX 1 +#define MMC_TISR_TXFRAMECOUNT_GB_WIDTH 1 +#define MMC_TISR_TXBROADCASTFRAMES_G_INDEX 2 +#define MMC_TISR_TXBROADCASTFRAMES_G_WIDTH 1 +#define MMC_TISR_TXMULTICASTFRAMES_G_INDEX 3 +#define MMC_TISR_TXMULTICASTFRAMES_G_WIDTH 1 +#define MMC_TISR_TX64OCTETS_GB_INDEX 4 +#define MMC_TISR_TX64OCTETS_GB_WIDTH 1 +#define MMC_TISR_TX65TO127OCTETS_GB_INDEX 5 +#define MMC_TISR_TX65TO127OCTETS_GB_WIDTH 1 +#define MMC_TISR_TX128TO255OCTETS_GB_INDEX 6 +#define MMC_TISR_TX128TO255OCTETS_GB_WIDTH 1 +#define MMC_TISR_TX256TO511OCTETS_GB_INDEX 7 +#define MMC_TISR_TX256TO511OCTETS_GB_WIDTH 1 +#define MMC_TISR_TX512TO1023OCTETS_GB_INDEX 8 +#define MMC_TISR_TX512TO1023OCTETS_GB_WIDTH 1 +#define MMC_TISR_TX1024TOMAXOCTETS_GB_INDEX 9 +#define MMC_TISR_TX1024TOMAXOCTETS_GB_WIDTH 1 +#define MMC_TISR_TXUNICASTFRAMES_GB_INDEX 10 +#define MMC_TISR_TXUNICASTFRAMES_GB_WIDTH 1 +#define MMC_TISR_TXMULTICASTFRAMES_GB_INDEX 11 +#define MMC_TISR_TXMULTICASTFRAMES_GB_WIDTH 1 +#define MMC_TISR_TXBROADCASTFRAMES_GB_INDEX 12 +#define MMC_TISR_TXBROADCASTFRAMES_GB_WIDTH 1 +#define MMC_TISR_TXUNDERFLOWERROR_INDEX 13 +#define MMC_TISR_TXUNDERFLOWERROR_WIDTH 1 +#define MMC_TISR_TXOCTETCOUNT_G_INDEX 14 +#define MMC_TISR_TXOCTETCOUNT_G_WIDTH 1 +#define MMC_TISR_TXFRAMECOUNT_G_INDEX 15 +#define MMC_TISR_TXFRAMECOUNT_G_WIDTH 1 +#define MMC_TISR_TXPAUSEFRAMES_INDEX 16 +#define MMC_TISR_TXPAUSEFRAMES_WIDTH 1 +#define MMC_TISR_TXVLANFRAMES_G_INDEX 17 +#define MMC_TISR_TXVLANFRAMES_G_WIDTH 1 + +/* MTL register offsets */ +#define MTL_OMR 0x1000 +#define MTL_FDCR 0x1008 +#define MTL_FDSR 0x100c +#define MTL_FDDR 0x1010 +#define MTL_ISR 0x1020 +#define MTL_RQDCM0R 0x1030 +#define MTL_TCPM0R 0x1040 +#define MTL_TCPM1R 0x1044 + +#define MTL_RQDCM_INC 4 +#define MTL_RQDCM_Q_PER_REG 4 +#define MTL_TCPM_INC 4 +#define MTL_TCPM_TC_PER_REG 4 + +/* MTL register entry bit positions and sizes */ +#define MTL_OMR_ETSALG_INDEX 5 +#define MTL_OMR_ETSALG_WIDTH 2 +#define MTL_OMR_RAA_INDEX 2 +#define MTL_OMR_RAA_WIDTH 1 + +/* MTL queue register offsets + * Multiple queues can be active. The first queue has registers + * that begin at 0x1100. Each subsequent queue has registers that + * are accessed using an offset of 0x80 from the previous queue. + */ +#define MTL_Q_BASE 0x1100 +#define MTL_Q_INC 0x80 + +#define MTL_Q_TQOMR 0x00 +#define MTL_Q_TQUR 0x04 +#define MTL_Q_TQDR 0x08 +#define MTL_Q_RQOMR 0x40 +#define MTL_Q_RQMPOCR 0x44 +#define MTL_Q_RQDR 0x4c +#define MTL_Q_RQFCR 0x50 +#define MTL_Q_IER 0x70 +#define MTL_Q_ISR 0x74 + +/* MTL queue register entry bit positions and sizes */ +#define MTL_Q_RQFCR_RFA_INDEX 1 +#define MTL_Q_RQFCR_RFA_WIDTH 6 +#define MTL_Q_RQFCR_RFD_INDEX 17 +#define MTL_Q_RQFCR_RFD_WIDTH 6 +#define MTL_Q_RQOMR_EHFC_INDEX 7 +#define MTL_Q_RQOMR_EHFC_WIDTH 1 +#define MTL_Q_RQOMR_RQS_INDEX 16 +#define MTL_Q_RQOMR_RQS_WIDTH 9 +#define MTL_Q_RQOMR_RSF_INDEX 5 +#define MTL_Q_RQOMR_RSF_WIDTH 1 +#define MTL_Q_RQOMR_RTC_INDEX 0 +#define MTL_Q_RQOMR_RTC_WIDTH 2 +#define MTL_Q_TQOMR_FTQ_INDEX 0 +#define MTL_Q_TQOMR_FTQ_WIDTH 1 +#define MTL_Q_TQOMR_Q2TCMAP_INDEX 8 +#define MTL_Q_TQOMR_Q2TCMAP_WIDTH 3 +#define MTL_Q_TQOMR_TQS_INDEX 16 +#define MTL_Q_TQOMR_TQS_WIDTH 10 +#define MTL_Q_TQOMR_TSF_INDEX 1 +#define MTL_Q_TQOMR_TSF_WIDTH 1 +#define MTL_Q_TQOMR_TTC_INDEX 4 +#define MTL_Q_TQOMR_TTC_WIDTH 3 +#define MTL_Q_TQOMR_TXQEN_INDEX 2 +#define MTL_Q_TQOMR_TXQEN_WIDTH 2 + +/* MTL queue register value */ +#define MTL_RSF_DISABLE 0x00 +#define MTL_RSF_ENABLE 0x01 +#define MTL_TSF_DISABLE 0x00 +#define MTL_TSF_ENABLE 0x01 + +#define MTL_RX_THRESHOLD_64 0x00 +#define MTL_RX_THRESHOLD_96 0x02 +#define MTL_RX_THRESHOLD_128 0x03 +#define MTL_TX_THRESHOLD_32 0x01 +#define MTL_TX_THRESHOLD_64 0x00 +#define MTL_TX_THRESHOLD_96 0x02 +#define MTL_TX_THRESHOLD_128 0x03 +#define MTL_TX_THRESHOLD_192 0x04 +#define MTL_TX_THRESHOLD_256 0x05 +#define MTL_TX_THRESHOLD_384 0x06 +#define MTL_TX_THRESHOLD_512 0x07 + +#define MTL_ETSALG_WRR 0x00 +#define MTL_ETSALG_WFQ 0x01 +#define MTL_ETSALG_DWRR 0x02 +#define MTL_RAA_SP 0x00 +#define MTL_RAA_WSP 0x01 + +#define MTL_Q_DISABLED 0x00 +#define MTL_Q_ENABLED 0x02 + +/* MTL traffic class register offsets + * Multiple traffic classes can be active. The first class has registers + * that begin at 0x1100. Each subsequent queue has registers that + * are accessed using an offset of 0x80 from the previous queue. + */ +#define MTL_TC_BASE MTL_Q_BASE +#define MTL_TC_INC MTL_Q_INC + +#define MTL_TC_ETSCR 0x10 +#define MTL_TC_ETSSR 0x14 +#define MTL_TC_QWR 0x18 + +/* MTL traffic class register entry bit positions and sizes */ +#define MTL_TC_ETSCR_TSA_INDEX 0 +#define MTL_TC_ETSCR_TSA_WIDTH 2 +#define MTL_TC_QWR_QW_INDEX 0 +#define MTL_TC_QWR_QW_WIDTH 21 + +/* MTL traffic class register value */ +#define MTL_TSA_SP 0x00 +#define MTL_TSA_ETS 0x02 + +/* PCS MMD select register offset + * The MMD select register is used for accessing PCS registers + * when the underlying APB3 interface is using indirect addressing. + * Indirect addressing requires accessing registers in two phases, + * an address phase and a data phase. The address phases requires + * writing an address selection value to the MMD select regiesters. + */ +#define PCS_MMD_SELECT 0xff + +/* Descriptor/Packet entry bit positions and sizes */ +#define RX_PACKET_ERRORS_CRC_INDEX 2 +#define RX_PACKET_ERRORS_CRC_WIDTH 1 +#define RX_PACKET_ERRORS_FRAME_INDEX 3 +#define RX_PACKET_ERRORS_FRAME_WIDTH 1 +#define RX_PACKET_ERRORS_LENGTH_INDEX 0 +#define RX_PACKET_ERRORS_LENGTH_WIDTH 1 +#define RX_PACKET_ERRORS_OVERRUN_INDEX 1 +#define RX_PACKET_ERRORS_OVERRUN_WIDTH 1 + +#define RX_PACKET_ATTRIBUTES_CSUM_DONE_INDEX 0 +#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1 +#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2 +#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3 +#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4 +#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5 +#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6 +#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1 + +#define RX_NORMAL_DESC0_OVT_INDEX 0 +#define RX_NORMAL_DESC0_OVT_WIDTH 16 +#define RX_NORMAL_DESC2_HL_INDEX 0 +#define RX_NORMAL_DESC2_HL_WIDTH 10 +#define RX_NORMAL_DESC3_CDA_INDEX 27 +#define RX_NORMAL_DESC3_CDA_WIDTH 1 +#define RX_NORMAL_DESC3_CTXT_INDEX 30 +#define RX_NORMAL_DESC3_CTXT_WIDTH 1 +#define RX_NORMAL_DESC3_ES_INDEX 15 +#define RX_NORMAL_DESC3_ES_WIDTH 1 +#define RX_NORMAL_DESC3_ETLT_INDEX 16 +#define RX_NORMAL_DESC3_ETLT_WIDTH 4 +#define RX_NORMAL_DESC3_FD_INDEX 29 +#define RX_NORMAL_DESC3_FD_WIDTH 1 +#define RX_NORMAL_DESC3_INTE_INDEX 30 +#define RX_NORMAL_DESC3_INTE_WIDTH 1 +#define RX_NORMAL_DESC3_L34T_INDEX 20 +#define RX_NORMAL_DESC3_L34T_WIDTH 4 +#define RX_NORMAL_DESC3_LD_INDEX 28 +#define RX_NORMAL_DESC3_LD_WIDTH 1 +#define RX_NORMAL_DESC3_OWN_INDEX 31 +#define RX_NORMAL_DESC3_OWN_WIDTH 1 +#define RX_NORMAL_DESC3_PL_INDEX 0 +#define RX_NORMAL_DESC3_PL_WIDTH 14 +#define RX_NORMAL_DESC3_RSV_INDEX 26 +#define RX_NORMAL_DESC3_RSV_WIDTH 1 + +#define RX_DESC3_L34T_IPV4_TCP 1 +#define RX_DESC3_L34T_IPV4_UDP 2 +#define RX_DESC3_L34T_IPV4_ICMP 3 +#define RX_DESC3_L34T_IPV6_TCP 9 +#define RX_DESC3_L34T_IPV6_UDP 10 +#define RX_DESC3_L34T_IPV6_ICMP 11 + +#define RX_CONTEXT_DESC3_TSA_INDEX 4 +#define RX_CONTEXT_DESC3_TSA_WIDTH 1 +#define RX_CONTEXT_DESC3_TSD_INDEX 6 +#define RX_CONTEXT_DESC3_TSD_WIDTH 1 + +#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0 +#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1 +#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1 +#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1 +#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2 +#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 +#define TX_PACKET_ATTRIBUTES_PTP_INDEX 3 +#define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1 + +#define TX_CONTEXT_DESC2_MSS_INDEX 0 +#define TX_CONTEXT_DESC2_MSS_WIDTH 15 +#define TX_CONTEXT_DESC3_CTXT_INDEX 30 +#define TX_CONTEXT_DESC3_CTXT_WIDTH 1 +#define TX_CONTEXT_DESC3_TCMSSV_INDEX 26 +#define TX_CONTEXT_DESC3_TCMSSV_WIDTH 1 +#define TX_CONTEXT_DESC3_VLTV_INDEX 16 +#define TX_CONTEXT_DESC3_VLTV_WIDTH 1 +#define TX_CONTEXT_DESC3_VT_INDEX 0 +#define TX_CONTEXT_DESC3_VT_WIDTH 16 + +#define TX_NORMAL_DESC2_HL_B1L_INDEX 0 +#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14 +#define TX_NORMAL_DESC2_IC_INDEX 31 +#define TX_NORMAL_DESC2_IC_WIDTH 1 +#define TX_NORMAL_DESC2_TTSE_INDEX 30 +#define TX_NORMAL_DESC2_TTSE_WIDTH 1 +#define TX_NORMAL_DESC2_VTIR_INDEX 14 +#define TX_NORMAL_DESC2_VTIR_WIDTH 2 +#define TX_NORMAL_DESC3_CIC_INDEX 16 +#define TX_NORMAL_DESC3_CIC_WIDTH 2 +#define TX_NORMAL_DESC3_CPC_INDEX 26 +#define TX_NORMAL_DESC3_CPC_WIDTH 2 +#define TX_NORMAL_DESC3_CTXT_INDEX 30 +#define TX_NORMAL_DESC3_CTXT_WIDTH 1 +#define TX_NORMAL_DESC3_FD_INDEX 29 +#define TX_NORMAL_DESC3_FD_WIDTH 1 +#define TX_NORMAL_DESC3_FL_INDEX 0 +#define TX_NORMAL_DESC3_FL_WIDTH 15 +#define TX_NORMAL_DESC3_LD_INDEX 28 +#define TX_NORMAL_DESC3_LD_WIDTH 1 +#define TX_NORMAL_DESC3_OWN_INDEX 31 +#define TX_NORMAL_DESC3_OWN_WIDTH 1 +#define TX_NORMAL_DESC3_TCPHDRLEN_INDEX 19 +#define TX_NORMAL_DESC3_TCPHDRLEN_WIDTH 4 +#define TX_NORMAL_DESC3_TCPPL_INDEX 0 +#define TX_NORMAL_DESC3_TCPPL_WIDTH 18 +#define TX_NORMAL_DESC3_TSE_INDEX 18 +#define TX_NORMAL_DESC3_TSE_WIDTH 1 + +#define TX_NORMAL_DESC2_VLAN_INSERT 0x2 + +/* MDIO undefined or vendor specific registers */ +#ifndef MDIO_AN_COMP_STAT +#define MDIO_AN_COMP_STAT 0x0030 +#endif + +/* Bit setting and getting macros + * The get macro will extract the current bit field value from within + * the variable + * + * The set macro will clear the current bit field value within the + * variable and then set the bit field of the variable to the + * specified value + */ +#define GET_BITS(_var, _index, _width) \ + (((_var) >> (_index)) & ((0x1 << (_width)) - 1)) + +#define SET_BITS(_var, _index, _width, _val) \ +do { \ + (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \ + (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \ +} while (0) + +#define GET_BITS_LE(_var, _index, _width) \ + ((le32_to_cpu((_var)) >> (_index)) & ((0x1 << (_width)) - 1)) + +#define SET_BITS_LE(_var, _index, _width, _val) \ +do { \ + (_var) &= cpu_to_le32(~(((0x1 << (_width)) - 1) << (_index))); \ + (_var) |= cpu_to_le32((((_val) & \ + ((0x1 << (_width)) - 1)) << (_index))); \ +} while (0) + +/* Bit setting and getting macros based on register fields + * The get macro uses the bit field definitions formed using the input + * names to extract the current bit field value from within the + * variable + * + * The set macro uses the bit field definitions formed using the input + * names to set the bit field of the variable to the specified value + */ +#define XGMAC_GET_BITS(_var, _prefix, _field) \ + GET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define XGMAC_SET_BITS(_var, _prefix, _field, _val) \ + SET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +#define XGMAC_GET_BITS_LE(_var, _prefix, _field) \ + GET_BITS_LE((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define XGMAC_SET_BITS_LE(_var, _prefix, _field, _val) \ + SET_BITS_LE((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +/* Macros for reading or writing registers + * The ioread macros will get bit fields or full values using the + * register definitions formed using the input names + * + * The iowrite macros will set bit fields or full values using the + * register definitions formed using the input names + */ +#define XGMAC_IOREAD(_pdata, _reg) \ + ioread32((_pdata)->xgmac_regs + _reg) + +#define XGMAC_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(XGMAC_IOREAD((_pdata), _reg), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define XGMAC_IOWRITE(_pdata, _reg, _val) \ + iowrite32((_val), (_pdata)->xgmac_regs + _reg) + +#define XGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + u32 reg_val = XGMAC_IOREAD((_pdata), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + XGMAC_IOWRITE((_pdata), _reg, reg_val); \ +} while (0) + +/* Macros for reading or writing MTL queue or traffic class registers + * Similar to the standard read and write macros except that the + * base register value is calculated by the queue or traffic class number + */ +#define XGMAC_MTL_IOREAD(_pdata, _n, _reg) \ + ioread32((_pdata)->xgmac_regs + \ + MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg) + +#define XGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \ + GET_BITS(XGMAC_MTL_IOREAD((_pdata), (_n), _reg), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define XGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \ + iowrite32((_val), (_pdata)->xgmac_regs + \ + MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg) + +#define XGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \ +do { \ + u32 reg_val = XGMAC_MTL_IOREAD((_pdata), (_n), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + XGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \ +} while (0) + +/* Macros for reading or writing DMA channel registers + * Similar to the standard read and write macros except that the + * base register value is obtained from the ring + */ +#define XGMAC_DMA_IOREAD(_channel, _reg) \ + ioread32((_channel)->dma_regs + _reg) + +#define XGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \ + GET_BITS(XGMAC_DMA_IOREAD((_channel), _reg), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define XGMAC_DMA_IOWRITE(_channel, _reg, _val) \ + iowrite32((_val), (_channel)->dma_regs + _reg) + +#define XGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \ +do { \ + u32 reg_val = XGMAC_DMA_IOREAD((_channel), _reg); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + XGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \ +} while (0) + +/* Macros for building, reading or writing register values or bits + * within the register values of XPCS registers. + */ +#define XPCS_IOWRITE(_pdata, _off, _val) \ + iowrite32(_val, (_pdata)->xpcs_regs + (_off)) + +#define XPCS_IOREAD(_pdata, _off) \ + ioread32((_pdata)->xpcs_regs + (_off)) + +/* Macros for building, reading or writing register values or bits + * using MDIO. Different from above because of the use of standardized + * Linux include values. No shifting is performed with the bit + * operations, everything works on mask values. + */ +#define XMDIO_READ(_pdata, _mmd, _reg) \ + ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \ + MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff))) + +#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \ + (XMDIO_READ((_pdata), _mmd, _reg) & _mask) + +#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \ + ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \ + MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val))) + +#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \ +do { \ + u32 mmd_val = XMDIO_READ((_pdata), _mmd, _reg); \ + mmd_val &= ~_mask; \ + mmd_val |= (_val); \ + XMDIO_WRITE((_pdata), _mmd, _reg, mmd_val); \ +} while (0) + +#endif diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c new file mode 100644 index 000000000..8a50b01c2 --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c @@ -0,0 +1,269 @@ +/* + * AMD 10Gb Ethernet driver + * + * This file is available to you under your choice of the following two + * licenses: + * + * License 1: GPLv2 + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * + * This file is free software; you may copy, redistribute and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or (at + * your option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + * + * + * License 2: Modified BSD + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Advanced Micro Devices, Inc. nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include "xgbe.h" +#include "xgbe-common.h" + +static int xgbe_dcb_ieee_getets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + /* Set number of supported traffic classes */ + ets->ets_cap = pdata->hw_feat.tc_cnt; + + if (pdata->ets) { + ets->cbs = pdata->ets->cbs; + memcpy(ets->tc_tx_bw, pdata->ets->tc_tx_bw, + sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_tsa, pdata->ets->tc_tsa, + sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, pdata->ets->prio_tc, + sizeof(ets->prio_tc)); + } + + return 0; +} + +static int xgbe_dcb_ieee_setets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + unsigned int i, tc_ets, tc_ets_weight; + + tc_ets = 0; + tc_ets_weight = 0; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + DBGPR(" TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i, + ets->tc_tx_bw[i], ets->tc_rx_bw[i], ets->tc_tsa[i]); + DBGPR(" PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]); + + if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && + (i >= pdata->hw_feat.tc_cnt)) + return -EINVAL; + + if (ets->prio_tc[i] >= pdata->hw_feat.tc_cnt) + return -EINVAL; + + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + break; + case IEEE_8021QAZ_TSA_ETS: + tc_ets = 1; + tc_ets_weight += ets->tc_tx_bw[i]; + break; + + default: + return -EINVAL; + } + } + + /* Weights must add up to 100% */ + if (tc_ets && (tc_ets_weight != 100)) + return -EINVAL; + + if (!pdata->ets) { + pdata->ets = devm_kzalloc(pdata->dev, sizeof(*pdata->ets), + GFP_KERNEL); + if (!pdata->ets) + return -ENOMEM; + } + + memcpy(pdata->ets, ets, sizeof(*pdata->ets)); + + pdata->hw_if.config_dcb_tc(pdata); + + return 0; +} + +static int xgbe_dcb_ieee_getpfc(struct net_device *netdev, + struct ieee_pfc *pfc) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + /* Set number of supported PFC traffic classes */ + pfc->pfc_cap = pdata->hw_feat.tc_cnt; + + if (pdata->pfc) { + pfc->pfc_en = pdata->pfc->pfc_en; + pfc->mbc = pdata->pfc->mbc; + pfc->delay = pdata->pfc->delay; + } + + return 0; +} + +static int xgbe_dcb_ieee_setpfc(struct net_device *netdev, + struct ieee_pfc *pfc) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + DBGPR(" cap=%hhu, en=%hhx, mbc=%hhu, delay=%hhu\n", + pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay); + + if (!pdata->pfc) { + pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc), + GFP_KERNEL); + if (!pdata->pfc) + return -ENOMEM; + } + + memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc)); + + pdata->hw_if.config_dcb_pfc(pdata); + + return 0; +} + +static u8 xgbe_dcb_getdcbx(struct net_device *netdev) +{ + return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; +} + +static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx) +{ + u8 support = xgbe_dcb_getdcbx(netdev); + + DBGPR(" DCBX=%#hhx\n", dcbx); + + if (dcbx & ~support) + return 1; + + if ((dcbx & support) != support) + return 1; + + return 0; +} + +static const struct dcbnl_rtnl_ops xgbe_dcbnl_ops = { + /* IEEE 802.1Qaz std */ + .ieee_getets = xgbe_dcb_ieee_getets, + .ieee_setets = xgbe_dcb_ieee_setets, + .ieee_getpfc = xgbe_dcb_ieee_getpfc, + .ieee_setpfc = xgbe_dcb_ieee_setpfc, + + /* DCBX configuration */ + .getdcbx = xgbe_dcb_getdcbx, + .setdcbx = xgbe_dcb_setdcbx, +}; + +const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void) +{ + return &xgbe_dcbnl_ops; +} diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c new file mode 100644 index 000000000..2c063b60d --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -0,0 +1,373 @@ +/* + * AMD 10Gb Ethernet driver + * + * This file is available to you under your choice of the following two + * licenses: + * + * License 1: GPLv2 + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * + * This file is free software; you may copy, redistribute and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or (at + * your option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + * + * + * License 2: Modified BSD + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Advanced Micro Devices, Inc. nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + +#include "xgbe.h" +#include "xgbe-common.h" + +static ssize_t xgbe_common_read(char __user *buffer, size_t count, + loff_t *ppos, unsigned int value) +{ + char *buf; + ssize_t len; + + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "0x%08x\n", value); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + kfree(buf); + + return len; +} + +static ssize_t xgbe_common_write(const char __user *buffer, size_t count, + loff_t *ppos, unsigned int *value) +{ + char workarea[32]; + ssize_t len; + int ret; + + if (*ppos != 0) + return 0; + + if (count >= sizeof(workarea)) + return -ENOSPC; + + len = simple_write_to_buffer(workarea, sizeof(workarea) - 1, ppos, + buffer, count); + if (len < 0) + return len; + + workarea[len] = '\0'; + ret = kstrtouint(workarea, 16, value); + if (ret) + return -EIO; + + return len; +} + +static ssize_t xgmac_reg_addr_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct xgbe_prv_data *pdata = filp->private_data; + + return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xgmac_reg); +} + +static ssize_t xgmac_reg_addr_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct xgbe_prv_data *pdata = filp->private_data; + + return xgbe_common_write(buffer, count, ppos, + &pdata->debugfs_xgmac_reg); +} + +static ssize_t xgmac_reg_value_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct xgbe_prv_data *pdata = filp->private_data; + unsigned int value; + + value = XGMAC_IOREAD(pdata, pdata->debugfs_xgmac_reg); + + return xgbe_common_read(buffer, count, ppos, value); +} + +static ssize_t xgmac_reg_value_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct xgbe_prv_data *pdata = filp->private_data; + unsigned int value; + ssize_t len; + + len = xgbe_common_write(buffer, count, ppos, &value); + if (len < 0) + return len; + + XGMAC_IOWRITE(pdata, pdata->debugfs_xgmac_reg, value); + + return len; +} + +static const struct file_operations xgmac_reg_addr_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = xgmac_reg_addr_read, + .write = xgmac_reg_addr_write, +}; + +static const struct file_operations xgmac_reg_value_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = xgmac_reg_value_read, + .write = xgmac_reg_value_write, +}; + +static ssize_t xpcs_mmd_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct xgbe_prv_data *pdata = filp->private_data; + + return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_mmd); +} + +static ssize_t xpcs_mmd_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct xgbe_prv_data *pdata = filp->private_data; + + return xgbe_common_write(buffer, count, ppos, + &pdata->debugfs_xpcs_mmd); +} + +static ssize_t xpcs_reg_addr_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct xgbe_prv_data *pdata = filp->private_data; + + return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_reg); +} + +static ssize_t xpcs_reg_addr_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct xgbe_prv_data *pdata = filp->private_data; + + return xgbe_common_write(buffer, count, ppos, + &pdata->debugfs_xpcs_reg); +} + +static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct xgbe_prv_data *pdata = filp->private_data; + unsigned int value; + + value = XMDIO_READ(pdata, pdata->debugfs_xpcs_mmd, + pdata->debugfs_xpcs_reg); + + return xgbe_common_read(buffer, count, ppos, value); +} + +static ssize_t xpcs_reg_value_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct xgbe_prv_data *pdata = filp->private_data; + unsigned int value; + ssize_t len; + + len = xgbe_common_write(buffer, count, ppos, &value); + if (len < 0) + return len; + + XMDIO_WRITE(pdata, pdata->debugfs_xpcs_mmd, pdata->debugfs_xpcs_reg, + value); + + return len; +} + +static const struct file_operations xpcs_mmd_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = xpcs_mmd_read, + .write = xpcs_mmd_write, +}; + +static const struct file_operations xpcs_reg_addr_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = xpcs_reg_addr_read, + .write = xpcs_reg_addr_write, +}; + +static const struct file_operations xpcs_reg_value_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = xpcs_reg_value_read, + .write = xpcs_reg_value_write, +}; + +void xgbe_debugfs_init(struct xgbe_prv_data *pdata) +{ + struct dentry *pfile; + char *buf; + + /* Set defaults */ + pdata->debugfs_xgmac_reg = 0; + pdata->debugfs_xpcs_mmd = 1; + pdata->debugfs_xpcs_reg = 0; + + buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name); + pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL); + if (!pdata->xgbe_debugfs) { + netdev_err(pdata->netdev, "debugfs_create_dir failed\n"); + return; + } + + pfile = debugfs_create_file("xgmac_register", 0600, + pdata->xgbe_debugfs, pdata, + &xgmac_reg_addr_fops); + if (!pfile) + netdev_err(pdata->netdev, "debugfs_create_file failed\n"); + + pfile = debugfs_create_file("xgmac_register_value", 0600, + pdata->xgbe_debugfs, pdata, + &xgmac_reg_value_fops); + if (!pfile) + netdev_err(pdata->netdev, "debugfs_create_file failed\n"); + + pfile = debugfs_create_file("xpcs_mmd", 0600, + pdata->xgbe_debugfs, pdata, + &xpcs_mmd_fops); + if (!pfile) + netdev_err(pdata->netdev, "debugfs_create_file failed\n"); + + pfile = debugfs_create_file("xpcs_register", 0600, + pdata->xgbe_debugfs, pdata, + &xpcs_reg_addr_fops); + if (!pfile) + netdev_err(pdata->netdev, "debugfs_create_file failed\n"); + + pfile = debugfs_create_file("xpcs_register_value", 0600, + pdata->xgbe_debugfs, pdata, + &xpcs_reg_value_fops); + if (!pfile) + netdev_err(pdata->netdev, "debugfs_create_file failed\n"); + + kfree(buf); +} + +void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) +{ + debugfs_remove_recursive(pdata->xgbe_debugfs); + pdata->xgbe_debugfs = NULL; +} diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c new file mode 100644 index 000000000..5c92fb71b --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -0,0 +1,636 @@ +/* + * AMD 10Gb Ethernet driver + * + * This file is available to you under your choice of the following two + * licenses: + * + * License 1: GPLv2 + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * + * This file is free software; you may copy, redistribute and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or (at + * your option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + * + * + * License 2: Modified BSD + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Advanced Micro Devices, Inc. nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "xgbe.h" +#include "xgbe-common.h" + +static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *); + +static void xgbe_free_ring(struct xgbe_prv_data *pdata, + struct xgbe_ring *ring) +{ + struct xgbe_ring_data *rdata; + unsigned int i; + + if (!ring) + return; + + if (ring->rdata) { + for (i = 0; i < ring->rdesc_count; i++) { + rdata = XGBE_GET_DESC_DATA(ring, i); + xgbe_unmap_rdata(pdata, rdata); + } + + kfree(ring->rdata); + ring->rdata = NULL; + } + + if (ring->rx_hdr_pa.pages) { + dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma, + ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE); + put_page(ring->rx_hdr_pa.pages); + + ring->rx_hdr_pa.pages = NULL; + ring->rx_hdr_pa.pages_len = 0; + ring->rx_hdr_pa.pages_offset = 0; + ring->rx_hdr_pa.pages_dma = 0; + } + + if (ring->rx_buf_pa.pages) { + dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma, + ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE); + put_page(ring->rx_buf_pa.pages); + + ring->rx_buf_pa.pages = NULL; + ring->rx_buf_pa.pages_len = 0; + ring->rx_buf_pa.pages_offset = 0; + ring->rx_buf_pa.pages_dma = 0; + } + + if (ring->rdesc) { + dma_free_coherent(pdata->dev, + (sizeof(struct xgbe_ring_desc) * + ring->rdesc_count), + ring->rdesc, ring->rdesc_dma); + ring->rdesc = NULL; + } +} + +static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + DBGPR("-->xgbe_free_ring_resources\n"); + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + xgbe_free_ring(pdata, channel->tx_ring); + xgbe_free_ring(pdata, channel->rx_ring); + } + + DBGPR("<--xgbe_free_ring_resources\n"); +} + +static int xgbe_init_ring(struct xgbe_prv_data *pdata, + struct xgbe_ring *ring, unsigned int rdesc_count) +{ + DBGPR("-->xgbe_init_ring\n"); + + if (!ring) + return 0; + + /* Descriptors */ + ring->rdesc_count = rdesc_count; + ring->rdesc = dma_alloc_coherent(pdata->dev, + (sizeof(struct xgbe_ring_desc) * + rdesc_count), &ring->rdesc_dma, + GFP_KERNEL); + if (!ring->rdesc) + return -ENOMEM; + + /* Descriptor information */ + ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data), + GFP_KERNEL); + if (!ring->rdata) + return -ENOMEM; + + DBGPR(" rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n", + ring->rdesc, ring->rdesc_dma, ring->rdata); + + DBGPR("<--xgbe_init_ring\n"); + + return 0; +} + +static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + int ret; + + DBGPR("-->xgbe_alloc_ring_resources\n"); + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + DBGPR(" %s - tx_ring:\n", channel->name); + ret = xgbe_init_ring(pdata, channel->tx_ring, + pdata->tx_desc_count); + if (ret) { + netdev_alert(pdata->netdev, + "error initializing Tx ring\n"); + goto err_ring; + } + + DBGPR(" %s - rx_ring:\n", channel->name); + ret = xgbe_init_ring(pdata, channel->rx_ring, + pdata->rx_desc_count); + if (ret) { + netdev_alert(pdata->netdev, + "error initializing Tx ring\n"); + goto err_ring; + } + } + + DBGPR("<--xgbe_alloc_ring_resources\n"); + + return 0; + +err_ring: + xgbe_free_ring_resources(pdata); + + return ret; +} + +static int xgbe_alloc_pages(struct xgbe_prv_data *pdata, + struct xgbe_page_alloc *pa, gfp_t gfp, int order) +{ + struct page *pages = NULL; + dma_addr_t pages_dma; + int ret; + + /* Try to obtain pages, decreasing order if necessary */ + gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN; + while (order >= 0) { + pages = alloc_pages(gfp, order); + if (pages) + break; + + order--; + } + if (!pages) + return -ENOMEM; + + /* Map the pages */ + pages_dma = dma_map_page(pdata->dev, pages, 0, + PAGE_SIZE << order, DMA_FROM_DEVICE); + ret = dma_mapping_error(pdata->dev, pages_dma); + if (ret) { + put_page(pages); + return ret; + } + + pa->pages = pages; + pa->pages_len = PAGE_SIZE << order; + pa->pages_offset = 0; + pa->pages_dma = pages_dma; + + return 0; +} + +static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd, + struct xgbe_page_alloc *pa, + unsigned int len) +{ + get_page(pa->pages); + bd->pa = *pa; + + bd->dma = pa->pages_dma + pa->pages_offset; + bd->dma_len = len; + + pa->pages_offset += len; + if ((pa->pages_offset + len) > pa->pages_len) { + /* This data descriptor is responsible for unmapping page(s) */ + bd->pa_unmap = *pa; + + /* Get a new allocation next time */ + pa->pages = NULL; + pa->pages_len = 0; + pa->pages_offset = 0; + pa->pages_dma = 0; + } +} + +static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata, + struct xgbe_ring *ring, + struct xgbe_ring_data *rdata) +{ + int order, ret; + + if (!ring->rx_hdr_pa.pages) { + ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0); + if (ret) + return ret; + } + + if (!ring->rx_buf_pa.pages) { + order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0); + ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC, + order); + if (ret) + return ret; + } + + /* Set up the header page info */ + xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, + XGBE_SKB_ALLOC_SIZE); + + /* Set up the buffer page info */ + xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa, + pdata->rx_buf_size); + + return 0; +} + +static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata) +{ + struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_channel *channel; + struct xgbe_ring *ring; + struct xgbe_ring_data *rdata; + struct xgbe_ring_desc *rdesc; + dma_addr_t rdesc_dma; + unsigned int i, j; + + DBGPR("-->xgbe_wrapper_tx_descriptor_init\n"); + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + ring = channel->tx_ring; + if (!ring) + break; + + rdesc = ring->rdesc; + rdesc_dma = ring->rdesc_dma; + + for (j = 0; j < ring->rdesc_count; j++) { + rdata = XGBE_GET_DESC_DATA(ring, j); + + rdata->rdesc = rdesc; + rdata->rdesc_dma = rdesc_dma; + + rdesc++; + rdesc_dma += sizeof(struct xgbe_ring_desc); + } + + ring->cur = 0; + ring->dirty = 0; + memset(&ring->tx, 0, sizeof(ring->tx)); + + hw_if->tx_desc_init(channel); + } + + DBGPR("<--xgbe_wrapper_tx_descriptor_init\n"); +} + +static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) +{ + struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_channel *channel; + struct xgbe_ring *ring; + struct xgbe_ring_desc *rdesc; + struct xgbe_ring_data *rdata; + dma_addr_t rdesc_dma; + unsigned int i, j; + + DBGPR("-->xgbe_wrapper_rx_descriptor_init\n"); + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + ring = channel->rx_ring; + if (!ring) + break; + + rdesc = ring->rdesc; + rdesc_dma = ring->rdesc_dma; + + for (j = 0; j < ring->rdesc_count; j++) { + rdata = XGBE_GET_DESC_DATA(ring, j); + + rdata->rdesc = rdesc; + rdata->rdesc_dma = rdesc_dma; + + if (xgbe_map_rx_buffer(pdata, ring, rdata)) + break; + + rdesc++; + rdesc_dma += sizeof(struct xgbe_ring_desc); + } + + ring->cur = 0; + ring->dirty = 0; + + hw_if->rx_desc_init(channel); + } + + DBGPR("<--xgbe_wrapper_rx_descriptor_init\n"); +} + +static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata, + struct xgbe_ring_data *rdata) +{ + if (rdata->skb_dma) { + if (rdata->mapped_as_page) { + dma_unmap_page(pdata->dev, rdata->skb_dma, + rdata->skb_dma_len, DMA_TO_DEVICE); + } else { + dma_unmap_single(pdata->dev, rdata->skb_dma, + rdata->skb_dma_len, DMA_TO_DEVICE); + } + rdata->skb_dma = 0; + rdata->skb_dma_len = 0; + } + + if (rdata->skb) { + dev_kfree_skb_any(rdata->skb); + rdata->skb = NULL; + } + + if (rdata->rx.hdr.pa.pages) + put_page(rdata->rx.hdr.pa.pages); + + if (rdata->rx.hdr.pa_unmap.pages) { + dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma, + rdata->rx.hdr.pa_unmap.pages_len, + DMA_FROM_DEVICE); + put_page(rdata->rx.hdr.pa_unmap.pages); + } + + if (rdata->rx.buf.pa.pages) + put_page(rdata->rx.buf.pa.pages); + + if (rdata->rx.buf.pa_unmap.pages) { + dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma, + rdata->rx.buf.pa_unmap.pages_len, + DMA_FROM_DEVICE); + put_page(rdata->rx.buf.pa_unmap.pages); + } + + memset(&rdata->tx, 0, sizeof(rdata->tx)); + memset(&rdata->rx, 0, sizeof(rdata->rx)); + + rdata->mapped_as_page = 0; + + if (rdata->state_saved) { + rdata->state_saved = 0; + rdata->state.incomplete = 0; + rdata->state.context_next = 0; + rdata->state.skb = NULL; + rdata->state.len = 0; + rdata->state.error = 0; + } +} + +static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb) +{ + struct xgbe_prv_data *pdata = channel->pdata; + struct xgbe_ring *ring = channel->tx_ring; + struct xgbe_ring_data *rdata; + struct xgbe_packet_data *packet; + struct skb_frag_struct *frag; + dma_addr_t skb_dma; + unsigned int start_index, cur_index; + unsigned int offset, tso, vlan, datalen, len; + unsigned int i; + + DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur); + + offset = 0; + start_index = ring->cur; + cur_index = ring->cur; + + packet = &ring->packet_data; + packet->rdesc_count = 0; + packet->length = 0; + + tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, + TSO_ENABLE); + vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, + VLAN_CTAG); + + /* Save space for a context descriptor if needed */ + if ((tso && (packet->mss != ring->tx.cur_mss)) || + (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))) + cur_index++; + rdata = XGBE_GET_DESC_DATA(ring, cur_index); + + if (tso) { + DBGPR(" TSO packet\n"); + + /* Map the TSO header */ + skb_dma = dma_map_single(pdata->dev, skb->data, + packet->header_len, DMA_TO_DEVICE); + if (dma_mapping_error(pdata->dev, skb_dma)) { + netdev_alert(pdata->netdev, "dma_map_single failed\n"); + goto err_out; + } + rdata->skb_dma = skb_dma; + rdata->skb_dma_len = packet->header_len; + + offset = packet->header_len; + + packet->length += packet->header_len; + + cur_index++; + rdata = XGBE_GET_DESC_DATA(ring, cur_index); + } + + /* Map the (remainder of the) packet */ + for (datalen = skb_headlen(skb) - offset; datalen; ) { + len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE); + + skb_dma = dma_map_single(pdata->dev, skb->data + offset, len, + DMA_TO_DEVICE); + if (dma_mapping_error(pdata->dev, skb_dma)) { + netdev_alert(pdata->netdev, "dma_map_single failed\n"); + goto err_out; + } + rdata->skb_dma = skb_dma; + rdata->skb_dma_len = len; + DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n", + cur_index, skb_dma, len); + + datalen -= len; + offset += len; + + packet->length += len; + + cur_index++; + rdata = XGBE_GET_DESC_DATA(ring, cur_index); + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + DBGPR(" mapping frag %u\n", i); + + frag = &skb_shinfo(skb)->frags[i]; + offset = 0; + + for (datalen = skb_frag_size(frag); datalen; ) { + len = min_t(unsigned int, datalen, + XGBE_TX_MAX_BUF_SIZE); + + skb_dma = skb_frag_dma_map(pdata->dev, frag, offset, + len, DMA_TO_DEVICE); + if (dma_mapping_error(pdata->dev, skb_dma)) { + netdev_alert(pdata->netdev, + "skb_frag_dma_map failed\n"); + goto err_out; + } + rdata->skb_dma = skb_dma; + rdata->skb_dma_len = len; + rdata->mapped_as_page = 1; + DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n", + cur_index, skb_dma, len); + + datalen -= len; + offset += len; + + packet->length += len; + + cur_index++; + rdata = XGBE_GET_DESC_DATA(ring, cur_index); + } + } + + /* Save the skb address in the last entry. We always have some data + * that has been mapped so rdata is always advanced past the last + * piece of mapped data - use the entry pointed to by cur_index - 1. + */ + rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1); + rdata->skb = skb; + + /* Save the number of descriptor entries used */ + packet->rdesc_count = cur_index - start_index; + + DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count); + + return packet->rdesc_count; + +err_out: + while (start_index < cur_index) { + rdata = XGBE_GET_DESC_DATA(ring, start_index++); + xgbe_unmap_rdata(pdata, rdata); + } + + DBGPR("<--xgbe_map_tx_skb: count=0\n"); + + return 0; +} + +void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if) +{ + DBGPR("-->xgbe_init_function_ptrs_desc\n"); + + desc_if->alloc_ring_resources = xgbe_alloc_ring_resources; + desc_if->free_ring_resources = xgbe_free_ring_resources; + desc_if->map_tx_skb = xgbe_map_tx_skb; + desc_if->map_rx_buffer = xgbe_map_rx_buffer; + desc_if->unmap_rdata = xgbe_unmap_rdata; + desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init; + desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init; + + DBGPR("<--xgbe_init_function_ptrs_desc\n"); +} diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c new file mode 100644 index 000000000..21d949751 --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -0,0 +1,2943 @@ +/* + * AMD 10Gb Ethernet driver + * + * This file is available to you under your choice of the following two + * licenses: + * + * License 1: GPLv2 + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * + * This file is free software; you may copy, redistribute and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or (at + * your option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + * + * + * License 2: Modified BSD + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Advanced Micro Devices, Inc. nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include + +#include "xgbe.h" +#include "xgbe-common.h" + +static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, + unsigned int usec) +{ + unsigned long rate; + unsigned int ret; + + DBGPR("-->xgbe_usec_to_riwt\n"); + + rate = pdata->sysclk_rate; + + /* + * Convert the input usec value to the watchdog timer value. Each + * watchdog timer value is equivalent to 256 clock cycles. + * Calculate the required value as: + * ( usec * ( system_clock_mhz / 10^6 ) / 256 + */ + ret = (usec * (rate / 1000000)) / 256; + + DBGPR("<--xgbe_usec_to_riwt\n"); + + return ret; +} + +static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata, + unsigned int riwt) +{ + unsigned long rate; + unsigned int ret; + + DBGPR("-->xgbe_riwt_to_usec\n"); + + rate = pdata->sysclk_rate; + + /* + * Convert the input watchdog timer value to the usec value. Each + * watchdog timer value is equivalent to 256 clock cycles. + * Calculate the required value as: + * ( riwt * 256 ) / ( system_clock_mhz / 10^6 ) + */ + ret = (riwt * 256) / (rate / 1000000); + + DBGPR("<--xgbe_riwt_to_usec\n"); + + return ret; +} + +static int xgbe_config_pblx8(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) + XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8, + pdata->pblx8); + + return 0; +} + +static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata) +{ + return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL); +} + +static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL, + pdata->tx_pbl); + } + + return 0; +} + +static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata) +{ + return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL); +} + +static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL, + pdata->rx_pbl); + } + + return 0; +} + +static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP, + pdata->tx_osp_mode); + } + + return 0; +} + +static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val) +{ + unsigned int i; + + for (i = 0; i < pdata->rx_q_count; i++) + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); + + return 0; +} + +static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val) +{ + unsigned int i; + + for (i = 0; i < pdata->tx_q_count; i++) + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); + + return 0; +} + +static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata, + unsigned int val) +{ + unsigned int i; + + for (i = 0; i < pdata->rx_q_count; i++) + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); + + return 0; +} + +static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata, + unsigned int val) +{ + unsigned int i; + + for (i = 0; i < pdata->tx_q_count; i++) + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); + + return 0; +} + +static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT, + pdata->rx_riwt); + } + + return 0; +} + +static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata) +{ + return 0; +} + +static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ, + pdata->rx_buf_size); + } +} + +static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1); + } +} + +static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1); + } + + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); +} + +static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, + unsigned int index, unsigned int val) +{ + unsigned int wait; + int ret = 0; + + mutex_lock(&pdata->rss_mutex); + + if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) { + ret = -EBUSY; + goto unlock; + } + + XGMAC_IOWRITE(pdata, MAC_RSSDR, val); + + XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index); + XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type); + XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0); + XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1); + + wait = 1000; + while (wait--) { + if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) + goto unlock; + + usleep_range(1000, 1500); + } + + ret = -EBUSY; + +unlock: + mutex_unlock(&pdata->rss_mutex); + + return ret; +} + +static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata) +{ + unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); + unsigned int *key = (unsigned int *)&pdata->rss_key; + int ret; + + while (key_regs--) { + ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE, + key_regs, *key++); + if (ret) + return ret; + } + + return 0; +} + +static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata) +{ + unsigned int i; + int ret; + + for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { + ret = xgbe_write_rss_reg(pdata, + XGBE_RSS_LOOKUP_TABLE_TYPE, i, + pdata->rss_table[i]); + if (ret) + return ret; + } + + return 0; +} + +static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key) +{ + memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); + + return xgbe_write_rss_hash_key(pdata); +} + +static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata, + const u32 *table) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) + XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]); + + return xgbe_write_rss_lookup_table(pdata); +} + +static int xgbe_enable_rss(struct xgbe_prv_data *pdata) +{ + int ret; + + if (!pdata->hw_feat.rss) + return -EOPNOTSUPP; + + /* Program the hash key */ + ret = xgbe_write_rss_hash_key(pdata); + if (ret) + return ret; + + /* Program the lookup table */ + ret = xgbe_write_rss_lookup_table(pdata); + if (ret) + return ret; + + /* Set the RSS options */ + XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); + + /* Enable RSS */ + XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1); + + return 0; +} + +static int xgbe_disable_rss(struct xgbe_prv_data *pdata) +{ + if (!pdata->hw_feat.rss) + return -EOPNOTSUPP; + + XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0); + + return 0; +} + +static void xgbe_config_rss(struct xgbe_prv_data *pdata) +{ + int ret; + + if (!pdata->hw_feat.rss) + return; + + if (pdata->netdev->features & NETIF_F_RXHASH) + ret = xgbe_enable_rss(pdata); + else + ret = xgbe_disable_rss(pdata); + + if (ret) + netdev_err(pdata->netdev, + "error configuring RSS, RSS disabled\n"); +} + +static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) +{ + unsigned int max_q_count, q_count; + unsigned int reg, reg_val; + unsigned int i; + + /* Clear MTL flow control */ + for (i = 0; i < pdata->rx_q_count; i++) + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); + + /* Clear MAC flow control */ + max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; + q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); + reg = MAC_Q0TFCR; + for (i = 0; i < q_count; i++) { + reg_val = XGMAC_IOREAD(pdata, reg); + XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0); + XGMAC_IOWRITE(pdata, reg, reg_val); + + reg += MAC_QTFCR_INC; + } + + return 0; +} + +static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) +{ + unsigned int max_q_count, q_count; + unsigned int reg, reg_val; + unsigned int i; + + /* Set MTL flow control */ + for (i = 0; i < pdata->rx_q_count; i++) + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1); + + /* Set MAC flow control */ + max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; + q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); + reg = MAC_Q0TFCR; + for (i = 0; i < q_count; i++) { + reg_val = XGMAC_IOREAD(pdata, reg); + + /* Enable transmit flow control */ + XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1); + /* Set pause time */ + XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff); + + XGMAC_IOWRITE(pdata, reg, reg_val); + + reg += MAC_QTFCR_INC; + } + + return 0; +} + +static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata) +{ + XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0); + + return 0; +} + +static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata) +{ + XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1); + + return 0; +} + +static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata) +{ + struct ieee_pfc *pfc = pdata->pfc; + + if (pdata->tx_pause || (pfc && pfc->pfc_en)) + xgbe_enable_tx_flow_control(pdata); + else + xgbe_disable_tx_flow_control(pdata); + + return 0; +} + +static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata) +{ + struct ieee_pfc *pfc = pdata->pfc; + + if (pdata->rx_pause || (pfc && pfc->pfc_en)) + xgbe_enable_rx_flow_control(pdata); + else + xgbe_disable_rx_flow_control(pdata); + + return 0; +} + +static void xgbe_config_flow_control(struct xgbe_prv_data *pdata) +{ + struct ieee_pfc *pfc = pdata->pfc; + + xgbe_config_tx_flow_control(pdata); + xgbe_config_rx_flow_control(pdata); + + XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, + (pfc && pfc->pfc_en) ? 1 : 0); +} + +static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int dma_ch_isr, dma_ch_ier; + unsigned int i; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + /* Clear all the interrupts which are set */ + dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); + XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); + + /* Clear all interrupt enable bits */ + dma_ch_ier = 0; + + /* Enable following interrupts + * NIE - Normal Interrupt Summary Enable + * AIE - Abnormal Interrupt Summary Enable + * FBEE - Fatal Bus Error Enable + */ + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); + + if (channel->tx_ring) { + /* Enable the following Tx interrupts + * TIE - Transmit Interrupt Enable (unless using + * per channel interrupts) + */ + if (!pdata->per_channel_irq) + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); + } + if (channel->rx_ring) { + /* Enable following Rx interrupts + * RBUE - Receive Buffer Unavailable Enable + * RIE - Receive Interrupt Enable (unless using + * per channel interrupts) + */ + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); + if (!pdata->per_channel_irq) + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); + } + + XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); + } +} + +static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata) +{ + unsigned int mtl_q_isr; + unsigned int q_count, i; + + q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); + for (i = 0; i < q_count; i++) { + /* Clear all the interrupts which are set */ + mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR); + XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr); + + /* No MTL interrupts to be enabled */ + XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0); + } +} + +static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) +{ + unsigned int mac_ier = 0; + + /* Enable Timestamp interrupt */ + XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1); + + XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); + + /* Enable all counter interrupts */ + XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff); + XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff); +} + +static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata) +{ + if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x3) + return 0; + + XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3); + + return 0; +} + +static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata) +{ + if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x2) + return 0; + + XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2); + + return 0; +} + +static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata) +{ + if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0) + return 0; + + XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0); + + return 0; +} + +static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, + unsigned int enable) +{ + unsigned int val = enable ? 1 : 0; + + if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val) + return 0; + + DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving"); + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); + + return 0; +} + +static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, + unsigned int enable) +{ + unsigned int val = enable ? 1 : 0; + + if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val) + return 0; + + DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving"); + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val); + + return 0; +} + +static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata, + struct netdev_hw_addr *ha, unsigned int *mac_reg) +{ + unsigned int mac_addr_hi, mac_addr_lo; + u8 *mac_addr; + + mac_addr_lo = 0; + mac_addr_hi = 0; + + if (ha) { + mac_addr = (u8 *)&mac_addr_lo; + mac_addr[0] = ha->addr[0]; + mac_addr[1] = ha->addr[1]; + mac_addr[2] = ha->addr[2]; + mac_addr[3] = ha->addr[3]; + mac_addr = (u8 *)&mac_addr_hi; + mac_addr[0] = ha->addr[4]; + mac_addr[1] = ha->addr[5]; + + DBGPR(" adding mac address %pM at 0x%04x\n", ha->addr, + *mac_reg); + + XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); + } + + XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi); + *mac_reg += MAC_MACA_INC; + XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo); + *mac_reg += MAC_MACA_INC; +} + +static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata) +{ + struct net_device *netdev = pdata->netdev; + struct netdev_hw_addr *ha; + unsigned int mac_reg; + unsigned int addn_macs; + + mac_reg = MAC_MACA1HR; + addn_macs = pdata->hw_feat.addn_mac; + + if (netdev_uc_count(netdev) > addn_macs) { + xgbe_set_promiscuous_mode(pdata, 1); + } else { + netdev_for_each_uc_addr(ha, netdev) { + xgbe_set_mac_reg(pdata, ha, &mac_reg); + addn_macs--; + } + + if (netdev_mc_count(netdev) > addn_macs) { + xgbe_set_all_multicast_mode(pdata, 1); + } else { + netdev_for_each_mc_addr(ha, netdev) { + xgbe_set_mac_reg(pdata, ha, &mac_reg); + addn_macs--; + } + } + } + + /* Clear remaining additional MAC address entries */ + while (addn_macs--) + xgbe_set_mac_reg(pdata, NULL, &mac_reg); +} + +static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata) +{ + struct net_device *netdev = pdata->netdev; + struct netdev_hw_addr *ha; + unsigned int hash_reg; + unsigned int hash_table_shift, hash_table_count; + u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE]; + u32 crc; + unsigned int i; + + hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7); + hash_table_count = pdata->hw_feat.hash_table_size / 32; + memset(hash_table, 0, sizeof(hash_table)); + + /* Build the MAC Hash Table register values */ + netdev_for_each_uc_addr(ha, netdev) { + crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); + crc >>= hash_table_shift; + hash_table[crc >> 5] |= (1 << (crc & 0x1f)); + } + + netdev_for_each_mc_addr(ha, netdev) { + crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); + crc >>= hash_table_shift; + hash_table[crc >> 5] |= (1 << (crc & 0x1f)); + } + + /* Set the MAC Hash Table registers */ + hash_reg = MAC_HTR0; + for (i = 0; i < hash_table_count; i++) { + XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]); + hash_reg += MAC_HTR_INC; + } +} + +static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata) +{ + if (pdata->hw_feat.hash_table_size) + xgbe_set_mac_hash_table(pdata); + else + xgbe_set_mac_addn_addrs(pdata); + + return 0; +} + +static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr) +{ + unsigned int mac_addr_hi, mac_addr_lo; + + mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); + mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | + (addr[1] << 8) | (addr[0] << 0); + + XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi); + XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo); + + return 0; +} + +static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata) +{ + struct net_device *netdev = pdata->netdev; + unsigned int pr_mode, am_mode; + + pr_mode = ((netdev->flags & IFF_PROMISC) != 0); + am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); + + xgbe_set_promiscuous_mode(pdata, pr_mode); + xgbe_set_all_multicast_mode(pdata, am_mode); + + xgbe_add_mac_addresses(pdata); + + return 0; +} + +static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, + int mmd_reg) +{ + unsigned int mmd_address; + int mmd_data; + + if (mmd_reg & MII_ADDR_C45) + mmd_address = mmd_reg & ~MII_ADDR_C45; + else + mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + + /* The PCS registers are accessed using mmio. The underlying APB3 + * management interface uses indirect addressing to access the MMD + * register sets. This requires accessing of the PCS register in two + * phases, an address phase and a data phase. + * + * The mmio interface is based on 32-bit offsets and values. All + * register offsets must therefore be adjusted by left shifting the + * offset 2 bits and reading 32 bits of data. + */ + mutex_lock(&pdata->xpcs_mutex); + XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8); + mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2); + mutex_unlock(&pdata->xpcs_mutex); + + return mmd_data; +} + +static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, + int mmd_reg, int mmd_data) +{ + unsigned int mmd_address; + + if (mmd_reg & MII_ADDR_C45) + mmd_address = mmd_reg & ~MII_ADDR_C45; + else + mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + + /* If the PCS is changing modes, match the MAC speed to it */ + if (((mmd_address >> 16) == MDIO_MMD_PCS) && + ((mmd_address & 0xffff) == MDIO_CTRL2)) { + struct phy_device *phydev = pdata->phydev; + + if (mmd_data & MDIO_PCS_CTRL2_TYPE) { + /* KX mode */ + if (phydev->supported & SUPPORTED_1000baseKX_Full) + xgbe_set_gmii_speed(pdata); + else + xgbe_set_gmii_2500_speed(pdata); + } else { + /* KR mode */ + xgbe_set_xgmii_speed(pdata); + } + } + + /* The PCS registers are accessed using mmio. The underlying APB3 + * management interface uses indirect addressing to access the MMD + * register sets. This requires accessing of the PCS register in two + * phases, an address phase and a data phase. + * + * The mmio interface is based on 32-bit offsets and values. All + * register offsets must therefore be adjusted by left shifting the + * offset 2 bits and reading 32 bits of data. + */ + mutex_lock(&pdata->xpcs_mutex); + XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8); + XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data); + mutex_unlock(&pdata->xpcs_mutex); +} + +static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc) +{ + return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN); +} + +static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata) +{ + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0); + + return 0; +} + +static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata) +{ + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1); + + return 0; +} + +static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata) +{ + /* Put the VLAN tag in the Rx descriptor */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1); + + /* Don't check the VLAN type */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1); + + /* Check only C-TAG (0x8100) packets */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0); + + /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0); + + /* Enable VLAN tag stripping */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3); + + return 0; +} + +static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) +{ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0); + + return 0; +} + +static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata) +{ + /* Enable VLAN filtering */ + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); + + /* Enable VLAN Hash Table filtering */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1); + + /* Disable VLAN tag inverse matching */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0); + + /* Only filter on the lower 12-bits of the VLAN tag */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1); + + /* In order for the VLAN Hash Table filtering to be effective, + * the VLAN tag identifier in the VLAN Tag Register must not + * be zero. Set the VLAN tag identifier to "1" to enable the + * VLAN Hash Table filtering. This implies that a VLAN tag of + * 1 will always pass filtering. + */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1); + + return 0; +} + +static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) +{ + /* Disable VLAN filtering */ + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); + + return 0; +} + +#ifndef CRCPOLY_LE +#define CRCPOLY_LE 0xedb88320 +#endif +static u32 xgbe_vid_crc32_le(__le16 vid_le) +{ + u32 poly = CRCPOLY_LE; + u32 crc = ~0; + u32 temp = 0; + unsigned char *data = (unsigned char *)&vid_le; + unsigned char data_byte = 0; + int i, bits; + + bits = get_bitmask_order(VLAN_VID_MASK); + for (i = 0; i < bits; i++) { + if ((i % 8) == 0) + data_byte = data[i / 8]; + + temp = ((crc & 1) ^ data_byte) & 1; + crc >>= 1; + data_byte >>= 1; + + if (temp) + crc ^= poly; + } + + return crc; +} + +static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata) +{ + u32 crc; + u16 vid; + __le16 vid_le; + u16 vlan_hash_table = 0; + + /* Generate the VLAN Hash Table value */ + for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { + /* Get the CRC32 value of the VLAN ID */ + vid_le = cpu_to_le16(vid); + crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28; + + vlan_hash_table |= (1 << crc); + } + + /* Set the VLAN Hash Table filtering register */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table); + + return 0; +} + +static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) +{ + struct xgbe_ring_desc *rdesc = rdata->rdesc; + + /* Reset the Tx descriptor + * Set buffer 1 (lo) address to zero + * Set buffer 1 (hi) address to zero + * Reset all other control bits (IC, TTSE, B2L & B1L) + * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) + */ + rdesc->desc0 = 0; + rdesc->desc1 = 0; + rdesc->desc2 = 0; + rdesc->desc3 = 0; + + /* Make sure ownership is written to the descriptor */ + dma_wmb(); +} + +static void xgbe_tx_desc_init(struct xgbe_channel *channel) +{ + struct xgbe_ring *ring = channel->tx_ring; + struct xgbe_ring_data *rdata; + int i; + int start_index = ring->cur; + + DBGPR("-->tx_desc_init\n"); + + /* Initialze all descriptors */ + for (i = 0; i < ring->rdesc_count; i++) { + rdata = XGBE_GET_DESC_DATA(ring, i); + + /* Initialize Tx descriptor */ + xgbe_tx_desc_reset(rdata); + } + + /* Update the total number of Tx descriptors */ + XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); + + /* Update the starting address of descriptor ring */ + rdata = XGBE_GET_DESC_DATA(ring, start_index); + XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI, + upper_32_bits(rdata->rdesc_dma)); + XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO, + lower_32_bits(rdata->rdesc_dma)); + + DBGPR("<--tx_desc_init\n"); +} + +static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata, + struct xgbe_ring_data *rdata, unsigned int index) +{ + struct xgbe_ring_desc *rdesc = rdata->rdesc; + unsigned int rx_usecs = pdata->rx_usecs; + unsigned int rx_frames = pdata->rx_frames; + unsigned int inte; + + if (!rx_usecs && !rx_frames) { + /* No coalescing, interrupt for every descriptor */ + inte = 1; + } else { + /* Set interrupt based on Rx frame coalescing setting */ + if (rx_frames && !((index + 1) % rx_frames)) + inte = 1; + else + inte = 0; + } + + /* Reset the Rx descriptor + * Set buffer 1 (lo) address to header dma address (lo) + * Set buffer 1 (hi) address to header dma address (hi) + * Set buffer 2 (lo) address to buffer dma address (lo) + * Set buffer 2 (hi) address to buffer dma address (hi) and + * set control bits OWN and INTE + */ + rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma)); + rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma)); + rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma)); + rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma)); + + XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); + + /* Since the Rx DMA engine is likely running, make sure everything + * is written to the descriptor(s) before setting the OWN bit + * for the descriptor + */ + dma_wmb(); + + XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1); + + /* Make sure ownership is written to the descriptor */ + dma_wmb(); +} + +static void xgbe_rx_desc_init(struct xgbe_channel *channel) +{ + struct xgbe_prv_data *pdata = channel->pdata; + struct xgbe_ring *ring = channel->rx_ring; + struct xgbe_ring_data *rdata; + unsigned int start_index = ring->cur; + unsigned int i; + + DBGPR("-->rx_desc_init\n"); + + /* Initialize all descriptors */ + for (i = 0; i < ring->rdesc_count; i++) { + rdata = XGBE_GET_DESC_DATA(ring, i); + + /* Initialize Rx descriptor */ + xgbe_rx_desc_reset(pdata, rdata, i); + } + + /* Update the total number of Rx descriptors */ + XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); + + /* Update the starting address of descriptor ring */ + rdata = XGBE_GET_DESC_DATA(ring, start_index); + XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI, + upper_32_bits(rdata->rdesc_dma)); + XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO, + lower_32_bits(rdata->rdesc_dma)); + + /* Update the Rx Descriptor Tail Pointer */ + rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1); + XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, + lower_32_bits(rdata->rdesc_dma)); + + DBGPR("<--rx_desc_init\n"); +} + +static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata, + unsigned int addend) +{ + /* Set the addend register value and tell the device */ + XGMAC_IOWRITE(pdata, MAC_TSAR, addend); + XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1); + + /* Wait for addend update to complete */ + while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG)) + udelay(5); +} + +static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec, + unsigned int nsec) +{ + /* Set the time values and tell the device */ + XGMAC_IOWRITE(pdata, MAC_STSUR, sec); + XGMAC_IOWRITE(pdata, MAC_STNUR, nsec); + XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1); + + /* Wait for time update to complete */ + while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT)) + udelay(5); +} + +static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata) +{ + u64 nsec; + + nsec = XGMAC_IOREAD(pdata, MAC_STSR); + nsec *= NSEC_PER_SEC; + nsec += XGMAC_IOREAD(pdata, MAC_STNR); + + return nsec; +} + +static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata) +{ + unsigned int tx_snr; + u64 nsec; + + tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR); + if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS)) + return 0; + + nsec = XGMAC_IOREAD(pdata, MAC_TXSSR); + nsec *= NSEC_PER_SEC; + nsec += tx_snr; + + return nsec; +} + +static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet, + struct xgbe_ring_desc *rdesc) +{ + u64 nsec; + + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) && + !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) { + nsec = le32_to_cpu(rdesc->desc1); + nsec <<= 32; + nsec |= le32_to_cpu(rdesc->desc0); + if (nsec != 0xffffffffffffffffULL) { + packet->rx_tstamp = nsec; + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + RX_TSTAMP, 1); + } + } +} + +static int xgbe_config_tstamp(struct xgbe_prv_data *pdata, + unsigned int mac_tscr) +{ + /* Set one nano-second accuracy */ + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1); + + /* Set fine timestamp update */ + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1); + + /* Overwrite earlier timestamps */ + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1); + + XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr); + + /* Exit if timestamping is not enabled */ + if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA)) + return 0; + + /* Initialize time registers */ + XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC); + XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC); + xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend); + xgbe_set_tstamp_time(pdata, 0, 0); + + /* Initialize the timecounter */ + timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, + ktime_to_ns(ktime_get_real())); + + return 0; +} + +static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) +{ + struct ieee_ets *ets = pdata->ets; + unsigned int total_weight, min_weight, weight; + unsigned int i; + + if (!ets) + return; + + /* Set Tx to deficit weighted round robin scheduling algorithm (when + * traffic class is using ETS algorithm) + */ + XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR); + + /* Set Traffic Class algorithms */ + total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt; + min_weight = total_weight / 100; + if (!min_weight) + min_weight = 1; + + for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + DBGPR(" TC%u using SP\n", i); + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, + MTL_TSA_SP); + break; + case IEEE_8021QAZ_TSA_ETS: + weight = total_weight * ets->tc_tx_bw[i] / 100; + weight = clamp(weight, min_weight, total_weight); + + DBGPR(" TC%u using DWRR (weight %u)\n", i, weight); + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, + MTL_TSA_ETS); + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, + weight); + break; + } + } +} + +static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata) +{ + struct ieee_pfc *pfc = pdata->pfc; + struct ieee_ets *ets = pdata->ets; + unsigned int mask, reg, reg_val; + unsigned int tc, prio; + + if (!pfc || !ets) + return; + + for (tc = 0; tc < pdata->hw_feat.tc_cnt; tc++) { + mask = 0; + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { + if ((pfc->pfc_en & (1 << prio)) && + (ets->prio_tc[prio] == tc)) + mask |= (1 << prio); + } + mask &= 0xff; + + DBGPR(" TC%u PFC mask=%#x\n", tc, mask); + reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG)); + reg_val = XGMAC_IOREAD(pdata, reg); + + reg_val &= ~(0xff << ((tc % MTL_TCPM_TC_PER_REG) << 3)); + reg_val |= (mask << ((tc % MTL_TCPM_TC_PER_REG) << 3)); + + XGMAC_IOWRITE(pdata, reg, reg_val); + } + + xgbe_config_flow_control(pdata); +} + +static void xgbe_tx_start_xmit(struct xgbe_channel *channel, + struct xgbe_ring *ring) +{ + struct xgbe_prv_data *pdata = channel->pdata; + struct xgbe_ring_data *rdata; + + /* Make sure everything is written before the register write */ + wmb(); + + /* Issue a poll command to Tx DMA by writing address + * of next immediate free descriptor */ + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); + XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, + lower_32_bits(rdata->rdesc_dma)); + + /* Start the Tx timer */ + if (pdata->tx_usecs && !channel->tx_timer_active) { + channel->tx_timer_active = 1; + mod_timer(&channel->tx_timer, + jiffies + usecs_to_jiffies(pdata->tx_usecs)); + } + + ring->tx.xmit_more = 0; +} + +static void xgbe_dev_xmit(struct xgbe_channel *channel) +{ + struct xgbe_prv_data *pdata = channel->pdata; + struct xgbe_ring *ring = channel->tx_ring; + struct xgbe_ring_data *rdata; + struct xgbe_ring_desc *rdesc; + struct xgbe_packet_data *packet = &ring->packet_data; + unsigned int csum, tso, vlan; + unsigned int tso_context, vlan_context; + unsigned int tx_set_ic; + int start_index = ring->cur; + int cur_index = ring->cur; + int i; + + DBGPR("-->xgbe_dev_xmit\n"); + + csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, + CSUM_ENABLE); + tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, + TSO_ENABLE); + vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, + VLAN_CTAG); + + if (tso && (packet->mss != ring->tx.cur_mss)) + tso_context = 1; + else + tso_context = 0; + + if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)) + vlan_context = 1; + else + vlan_context = 0; + + /* Determine if an interrupt should be generated for this Tx: + * Interrupt: + * - Tx frame count exceeds the frame count setting + * - Addition of Tx frame count to the frame count since the + * last interrupt was set exceeds the frame count setting + * No interrupt: + * - No frame count setting specified (ethtool -C ethX tx-frames 0) + * - Addition of Tx frame count to the frame count since the + * last interrupt was set does not exceed the frame count setting + */ + ring->coalesce_count += packet->tx_packets; + if (!pdata->tx_frames) + tx_set_ic = 0; + else if (packet->tx_packets > pdata->tx_frames) + tx_set_ic = 1; + else if ((ring->coalesce_count % pdata->tx_frames) < + packet->tx_packets) + tx_set_ic = 1; + else + tx_set_ic = 0; + + rdata = XGBE_GET_DESC_DATA(ring, cur_index); + rdesc = rdata->rdesc; + + /* Create a context descriptor if this is a TSO packet */ + if (tso_context || vlan_context) { + if (tso_context) { + DBGPR(" TSO context descriptor, mss=%u\n", + packet->mss); + + /* Set the MSS size */ + XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2, + MSS, packet->mss); + + /* Mark it as a CONTEXT descriptor */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, + CTXT, 1); + + /* Indicate this descriptor contains the MSS */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, + TCMSSV, 1); + + ring->tx.cur_mss = packet->mss; + } + + if (vlan_context) { + DBGPR(" VLAN context descriptor, ctag=%u\n", + packet->vlan_ctag); + + /* Mark it as a CONTEXT descriptor */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, + CTXT, 1); + + /* Set the VLAN tag */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, + VT, packet->vlan_ctag); + + /* Indicate this descriptor contains the VLAN tag */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, + VLTV, 1); + + ring->tx.cur_vlan_ctag = packet->vlan_ctag; + } + + cur_index++; + rdata = XGBE_GET_DESC_DATA(ring, cur_index); + rdesc = rdata->rdesc; + } + + /* Update buffer address (for TSO this is the header) */ + rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); + rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); + + /* Update the buffer length */ + XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, + rdata->skb_dma_len); + + /* VLAN tag insertion check */ + if (vlan) + XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR, + TX_NORMAL_DESC2_VLAN_INSERT); + + /* Timestamp enablement check */ + if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) + XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1); + + /* Mark it as First Descriptor */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1); + + /* Mark it as a NORMAL descriptor */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); + + /* Set OWN bit if not the first descriptor */ + if (cur_index != start_index) + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); + + if (tso) { + /* Enable TSO */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1); + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL, + packet->tcp_payload_len); + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN, + packet->tcp_header_len / 4); + } else { + /* Enable CRC and Pad Insertion */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); + + /* Enable HW CSUM */ + if (csum) + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, + CIC, 0x3); + + /* Set the total length to be transmitted */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL, + packet->length); + } + + for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) { + cur_index++; + rdata = XGBE_GET_DESC_DATA(ring, cur_index); + rdesc = rdata->rdesc; + + /* Update buffer address */ + rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); + rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); + + /* Update the buffer length */ + XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, + rdata->skb_dma_len); + + /* Set OWN bit */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); + + /* Mark it as NORMAL descriptor */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); + + /* Enable HW CSUM */ + if (csum) + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, + CIC, 0x3); + } + + /* Set LAST bit for the last descriptor */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1); + + /* Set IC bit based on Tx coalescing settings */ + if (tx_set_ic) + XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); + + /* Save the Tx info to report back during cleanup */ + rdata->tx.packets = packet->tx_packets; + rdata->tx.bytes = packet->tx_bytes; + + /* In case the Tx DMA engine is running, make sure everything + * is written to the descriptor(s) before setting the OWN bit + * for the first descriptor + */ + dma_wmb(); + + /* Set OWN bit for the first descriptor */ + rdata = XGBE_GET_DESC_DATA(ring, start_index); + rdesc = rdata->rdesc; + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); + +#ifdef XGMAC_ENABLE_TX_DESC_DUMP + xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1); +#endif + + /* Make sure ownership is written to the descriptor */ + dma_wmb(); + + ring->cur = cur_index + 1; + if (!packet->skb->xmit_more || + netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, + channel->queue_index))) + xgbe_tx_start_xmit(channel, ring); + else + ring->tx.xmit_more = 1; + + DBGPR(" %s: descriptors %u to %u written\n", + channel->name, start_index & (ring->rdesc_count - 1), + (ring->cur - 1) & (ring->rdesc_count - 1)); + + DBGPR("<--xgbe_dev_xmit\n"); +} + +static int xgbe_dev_read(struct xgbe_channel *channel) +{ + struct xgbe_ring *ring = channel->rx_ring; + struct xgbe_ring_data *rdata; + struct xgbe_ring_desc *rdesc; + struct xgbe_packet_data *packet = &ring->packet_data; + struct net_device *netdev = channel->pdata->netdev; + unsigned int err, etlt, l34t; + + DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur); + + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); + rdesc = rdata->rdesc; + + /* Check for data availability */ + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN)) + return 1; + + /* Make sure descriptor fields are read after reading the OWN bit */ + dma_rmb(); + +#ifdef XGMAC_ENABLE_RX_DESC_DUMP + xgbe_dump_rx_desc(ring, rdesc, ring->cur); +#endif + + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) { + /* Timestamp Context Descriptor */ + xgbe_get_rx_tstamp(packet, rdesc); + + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + CONTEXT, 1); + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + CONTEXT_NEXT, 0); + return 0; + } + + /* Normal Descriptor, be sure Context Descriptor bit is off */ + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0); + + /* Indicate if a Context Descriptor is next */ + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA)) + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + CONTEXT_NEXT, 1); + + /* Get the header length */ + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) + rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, + RX_NORMAL_DESC2, HL); + + /* Get the RSS hash */ + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) { + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + RSS_HASH, 1); + + packet->rss_hash = le32_to_cpu(rdesc->desc1); + + l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); + switch (l34t) { + case RX_DESC3_L34T_IPV4_TCP: + case RX_DESC3_L34T_IPV4_UDP: + case RX_DESC3_L34T_IPV6_TCP: + case RX_DESC3_L34T_IPV6_UDP: + packet->rss_hash_type = PKT_HASH_TYPE_L4; + break; + default: + packet->rss_hash_type = PKT_HASH_TYPE_L3; + } + } + + /* Get the packet length */ + rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); + + if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) { + /* Not all the data has been transferred for this packet */ + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + INCOMPLETE, 1); + return 0; + } + + /* This is the last of the data for this packet */ + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + INCOMPLETE, 0); + + /* Set checksum done indicator as appropriate */ + if (channel->pdata->netdev->features & NETIF_F_RXCSUM) + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + CSUM_DONE, 1); + + /* Check for errors (only valid in last descriptor) */ + err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); + etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT); + DBGPR(" err=%u, etlt=%#x\n", err, etlt); + + if (!err || !etlt) { + /* No error if err is 0 or etlt is 0 */ + if ((etlt == 0x09) && + (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + VLAN_CTAG, 1); + packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0, + RX_NORMAL_DESC0, + OVT); + DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag); + } + } else { + if ((etlt == 0x05) || (etlt == 0x06)) + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + CSUM_DONE, 0); + else + XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS, + FRAME, 1); + } + + DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name, + ring->cur & (ring->rdesc_count - 1), ring->cur); + + return 0; +} + +static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc) +{ + /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ + return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT); +} + +static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc) +{ + /* Rx and Tx share LD bit, so check TDES3.LD bit */ + return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD); +} + +static int xgbe_enable_int(struct xgbe_channel *channel, + enum xgbe_int int_id) +{ + unsigned int dma_ch_ier; + + dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); + + switch (int_id) { + case XGMAC_INT_DMA_CH_SR_TI: + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); + break; + case XGMAC_INT_DMA_CH_SR_TPS: + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1); + break; + case XGMAC_INT_DMA_CH_SR_TBU: + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1); + break; + case XGMAC_INT_DMA_CH_SR_RI: + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); + break; + case XGMAC_INT_DMA_CH_SR_RBU: + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); + break; + case XGMAC_INT_DMA_CH_SR_RPS: + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1); + break; + case XGMAC_INT_DMA_CH_SR_TI_RI: + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); + break; + case XGMAC_INT_DMA_CH_SR_FBE: + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); + break; + case XGMAC_INT_DMA_ALL: + dma_ch_ier |= channel->saved_ier; + break; + default: + return -1; + } + + XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); + + return 0; +} + +static int xgbe_disable_int(struct xgbe_channel *channel, + enum xgbe_int int_id) +{ + unsigned int dma_ch_ier; + + dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); + + switch (int_id) { + case XGMAC_INT_DMA_CH_SR_TI: + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0); + break; + case XGMAC_INT_DMA_CH_SR_TPS: + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0); + break; + case XGMAC_INT_DMA_CH_SR_TBU: + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0); + break; + case XGMAC_INT_DMA_CH_SR_RI: + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); + break; + case XGMAC_INT_DMA_CH_SR_RBU: + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0); + break; + case XGMAC_INT_DMA_CH_SR_RPS: + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0); + break; + case XGMAC_INT_DMA_CH_SR_TI_RI: + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); + break; + case XGMAC_INT_DMA_CH_SR_FBE: + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0); + break; + case XGMAC_INT_DMA_ALL: + channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK; + dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK; + break; + default: + return -1; + } + + XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); + + return 0; +} + +static int xgbe_exit(struct xgbe_prv_data *pdata) +{ + unsigned int count = 2000; + + DBGPR("-->xgbe_exit\n"); + + /* Issue a software reset */ + XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1); + usleep_range(10, 15); + + /* Poll Until Poll Condition */ + while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) + usleep_range(500, 600); + + if (!count) + return -EBUSY; + + DBGPR("<--xgbe_exit\n"); + + return 0; +} + +static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) +{ + unsigned int i, count; + + if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) + return 0; + + for (i = 0; i < pdata->tx_q_count; i++) + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); + + /* Poll Until Poll Condition */ + for (i = 0; i < pdata->tx_q_count; i++) { + count = 2000; + while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i, + MTL_Q_TQOMR, FTQ)) + usleep_range(500, 600); + + if (!count) + return -EBUSY; + } + + return 0; +} + +static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata) +{ + /* Set enhanced addressing mode */ + XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1); + + /* Set the System Bus mode */ + XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1); + XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1); +} + +static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata) +{ + unsigned int arcache, awcache; + + arcache = 0; + XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache); + XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain); + XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache); + XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain); + XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache); + XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain); + XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache); + + awcache = 0; + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache); + XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain); + XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache); +} + +static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata) +{ + unsigned int i; + + /* Set Tx to weighted round robin scheduling algorithm */ + XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR); + + /* Set Tx traffic classes to use WRR algorithm with equal weights */ + for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, + MTL_TSA_ETS); + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1); + } + + /* Set Rx to strict priority algorithm */ + XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); +} + +static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size, + unsigned int queue_count) +{ + unsigned int q_fifo_size = 0; + enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256; + + /* Calculate Tx/Rx fifo share per queue */ + switch (fifo_size) { + case 0: + q_fifo_size = XGBE_FIFO_SIZE_B(128); + break; + case 1: + q_fifo_size = XGBE_FIFO_SIZE_B(256); + break; + case 2: + q_fifo_size = XGBE_FIFO_SIZE_B(512); + break; + case 3: + q_fifo_size = XGBE_FIFO_SIZE_KB(1); + break; + case 4: + q_fifo_size = XGBE_FIFO_SIZE_KB(2); + break; + case 5: + q_fifo_size = XGBE_FIFO_SIZE_KB(4); + break; + case 6: + q_fifo_size = XGBE_FIFO_SIZE_KB(8); + break; + case 7: + q_fifo_size = XGBE_FIFO_SIZE_KB(16); + break; + case 8: + q_fifo_size = XGBE_FIFO_SIZE_KB(32); + break; + case 9: + q_fifo_size = XGBE_FIFO_SIZE_KB(64); + break; + case 10: + q_fifo_size = XGBE_FIFO_SIZE_KB(128); + break; + case 11: + q_fifo_size = XGBE_FIFO_SIZE_KB(256); + break; + } + + /* The configured value is not the actual amount of fifo RAM */ + q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size); + + q_fifo_size = q_fifo_size / queue_count; + + /* Set the queue fifo size programmable value */ + if (q_fifo_size >= XGBE_FIFO_SIZE_KB(256)) + p_fifo = XGMAC_MTL_FIFO_SIZE_256K; + else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(128)) + p_fifo = XGMAC_MTL_FIFO_SIZE_128K; + else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(64)) + p_fifo = XGMAC_MTL_FIFO_SIZE_64K; + else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(32)) + p_fifo = XGMAC_MTL_FIFO_SIZE_32K; + else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(16)) + p_fifo = XGMAC_MTL_FIFO_SIZE_16K; + else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(8)) + p_fifo = XGMAC_MTL_FIFO_SIZE_8K; + else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(4)) + p_fifo = XGMAC_MTL_FIFO_SIZE_4K; + else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(2)) + p_fifo = XGMAC_MTL_FIFO_SIZE_2K; + else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(1)) + p_fifo = XGMAC_MTL_FIFO_SIZE_1K; + else if (q_fifo_size >= XGBE_FIFO_SIZE_B(512)) + p_fifo = XGMAC_MTL_FIFO_SIZE_512; + else if (q_fifo_size >= XGBE_FIFO_SIZE_B(256)) + p_fifo = XGMAC_MTL_FIFO_SIZE_256; + + return p_fifo; +} + +static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) +{ + enum xgbe_mtl_fifo_size fifo_size; + unsigned int i; + + fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size, + pdata->tx_q_count); + + for (i = 0; i < pdata->tx_q_count; i++) + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size); + + netdev_notice(pdata->netdev, + "%d Tx hardware queues, %d byte fifo per queue\n", + pdata->tx_q_count, ((fifo_size + 1) * 256)); +} + +static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) +{ + enum xgbe_mtl_fifo_size fifo_size; + unsigned int i; + + fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size, + pdata->rx_q_count); + + for (i = 0; i < pdata->rx_q_count; i++) + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size); + + netdev_notice(pdata->netdev, + "%d Rx hardware queues, %d byte fifo per queue\n", + pdata->rx_q_count, ((fifo_size + 1) * 256)); +} + +static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) +{ + unsigned int qptc, qptc_extra, queue; + unsigned int prio_queues; + unsigned int ppq, ppq_extra, prio; + unsigned int mask; + unsigned int i, j, reg, reg_val; + + /* Map the MTL Tx Queues to Traffic Classes + * Note: Tx Queues >= Traffic Classes + */ + qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; + qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; + + for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { + for (j = 0; j < qptc; j++) { + DBGPR(" TXq%u mapped to TC%u\n", queue, i); + XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, + Q2TCMAP, i); + pdata->q2tc_map[queue++] = i; + } + + if (i < qptc_extra) { + DBGPR(" TXq%u mapped to TC%u\n", queue, i); + XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, + Q2TCMAP, i); + pdata->q2tc_map[queue++] = i; + } + } + + /* Map the 8 VLAN priority values to available MTL Rx queues */ + prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, + pdata->rx_q_count); + ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; + ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; + + reg = MAC_RQC2R; + reg_val = 0; + for (i = 0, prio = 0; i < prio_queues;) { + mask = 0; + for (j = 0; j < ppq; j++) { + DBGPR(" PRIO%u mapped to RXq%u\n", prio, i); + mask |= (1 << prio); + pdata->prio2q_map[prio++] = i; + } + + if (i < ppq_extra) { + DBGPR(" PRIO%u mapped to RXq%u\n", prio, i); + mask |= (1 << prio); + pdata->prio2q_map[prio++] = i; + } + + reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); + + if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) + continue; + + XGMAC_IOWRITE(pdata, reg, reg_val); + reg += MAC_RQC2_INC; + reg_val = 0; + } + + /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ + reg = MTL_RQDCM0R; + reg_val = 0; + for (i = 0; i < pdata->rx_q_count;) { + reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3)); + + if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count)) + continue; + + XGMAC_IOWRITE(pdata, reg, reg_val); + + reg += MTL_RQDCM_INC; + reg_val = 0; + } +} + +static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) +{ + unsigned int i; + + for (i = 0; i < pdata->rx_q_count; i++) { + /* Activate flow control when less than 4k left in fifo */ + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2); + + /* De-activate flow control when more than 6k left in fifo */ + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4); + } +} + +static void xgbe_config_mac_address(struct xgbe_prv_data *pdata) +{ + xgbe_set_mac_address(pdata, pdata->netdev->dev_addr); + + /* Filtering is done using perfect filtering and hash filtering */ + if (pdata->hw_feat.hash_table_size) { + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1); + } +} + +static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata) +{ + unsigned int val; + + val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0; + + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); +} + +static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata) +{ + switch (pdata->phy_speed) { + case SPEED_10000: + xgbe_set_xgmii_speed(pdata); + break; + + case SPEED_2500: + xgbe_set_gmii_2500_speed(pdata); + break; + + case SPEED_1000: + xgbe_set_gmii_speed(pdata); + break; + } +} + +static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata) +{ + if (pdata->netdev->features & NETIF_F_RXCSUM) + xgbe_enable_rx_csum(pdata); + else + xgbe_disable_rx_csum(pdata); +} + +static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata) +{ + /* Indicate that VLAN Tx CTAGs come from context descriptors */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0); + XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1); + + /* Set the current VLAN Hash Table register value */ + xgbe_update_vlan_hash_table(pdata); + + if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + xgbe_enable_rx_vlan_filtering(pdata); + else + xgbe_disable_rx_vlan_filtering(pdata); + + if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + xgbe_enable_rx_vlan_stripping(pdata); + else + xgbe_disable_rx_vlan_stripping(pdata); +} + +static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo) +{ + bool read_hi; + u64 val; + + switch (reg_lo) { + /* These registers are always 64 bit */ + case MMC_TXOCTETCOUNT_GB_LO: + case MMC_TXOCTETCOUNT_G_LO: + case MMC_RXOCTETCOUNT_GB_LO: + case MMC_RXOCTETCOUNT_G_LO: + read_hi = true; + break; + + default: + read_hi = false; + }; + + val = XGMAC_IOREAD(pdata, reg_lo); + + if (read_hi) + val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32); + + return val; +} + +static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) +{ + struct xgbe_mmc_stats *stats = &pdata->mmc_stats; + unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR); + + if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) + stats->txoctetcount_gb += + xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) + stats->txframecount_gb += + xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) + stats->txbroadcastframes_g += + xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) + stats->txmulticastframes_g += + xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) + stats->tx64octets_gb += + xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) + stats->tx65to127octets_gb += + xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) + stats->tx128to255octets_gb += + xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) + stats->tx256to511octets_gb += + xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) + stats->tx512to1023octets_gb += + xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) + stats->tx1024tomaxoctets_gb += + xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) + stats->txunicastframes_gb += + xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) + stats->txmulticastframes_gb += + xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) + stats->txbroadcastframes_g += + xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) + stats->txunderflowerror += + xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) + stats->txoctetcount_g += + xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) + stats->txframecount_g += + xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) + stats->txpauseframes += + xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) + stats->txvlanframes_g += + xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); +} + +static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) +{ + struct xgbe_mmc_stats *stats = &pdata->mmc_stats; + unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) + stats->rxframecount_gb += + xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) + stats->rxoctetcount_gb += + xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) + stats->rxoctetcount_g += + xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) + stats->rxbroadcastframes_g += + xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) + stats->rxmulticastframes_g += + xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) + stats->rxcrcerror += + xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) + stats->rxrunterror += + xgbe_mmc_read(pdata, MMC_RXRUNTERROR); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) + stats->rxjabbererror += + xgbe_mmc_read(pdata, MMC_RXJABBERERROR); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) + stats->rxundersize_g += + xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) + stats->rxoversize_g += + xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) + stats->rx64octets_gb += + xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) + stats->rx65to127octets_gb += + xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) + stats->rx128to255octets_gb += + xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) + stats->rx256to511octets_gb += + xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) + stats->rx512to1023octets_gb += + xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) + stats->rx1024tomaxoctets_gb += + xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) + stats->rxunicastframes_g += + xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) + stats->rxlengtherror += + xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) + stats->rxoutofrangetype += + xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) + stats->rxpauseframes += + xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) + stats->rxfifooverflow += + xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) + stats->rxvlanframes_gb += + xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); + + if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) + stats->rxwatchdogerror += + xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); +} + +static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) +{ + struct xgbe_mmc_stats *stats = &pdata->mmc_stats; + + /* Freeze counters */ + XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); + + stats->txoctetcount_gb += + xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); + + stats->txframecount_gb += + xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); + + stats->txbroadcastframes_g += + xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); + + stats->txmulticastframes_g += + xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); + + stats->tx64octets_gb += + xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); + + stats->tx65to127octets_gb += + xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); + + stats->tx128to255octets_gb += + xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); + + stats->tx256to511octets_gb += + xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); + + stats->tx512to1023octets_gb += + xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); + + stats->tx1024tomaxoctets_gb += + xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); + + stats->txunicastframes_gb += + xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); + + stats->txmulticastframes_gb += + xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); + + stats->txbroadcastframes_g += + xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); + + stats->txunderflowerror += + xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); + + stats->txoctetcount_g += + xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); + + stats->txframecount_g += + xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); + + stats->txpauseframes += + xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); + + stats->txvlanframes_g += + xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); + + stats->rxframecount_gb += + xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); + + stats->rxoctetcount_gb += + xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); + + stats->rxoctetcount_g += + xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); + + stats->rxbroadcastframes_g += + xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); + + stats->rxmulticastframes_g += + xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); + + stats->rxcrcerror += + xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); + + stats->rxrunterror += + xgbe_mmc_read(pdata, MMC_RXRUNTERROR); + + stats->rxjabbererror += + xgbe_mmc_read(pdata, MMC_RXJABBERERROR); + + stats->rxundersize_g += + xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); + + stats->rxoversize_g += + xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); + + stats->rx64octets_gb += + xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); + + stats->rx65to127octets_gb += + xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); + + stats->rx128to255octets_gb += + xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); + + stats->rx256to511octets_gb += + xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); + + stats->rx512to1023octets_gb += + xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); + + stats->rx1024tomaxoctets_gb += + xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); + + stats->rxunicastframes_g += + xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); + + stats->rxlengtherror += + xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); + + stats->rxoutofrangetype += + xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); + + stats->rxpauseframes += + xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); + + stats->rxfifooverflow += + xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); + + stats->rxvlanframes_gb += + xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); + + stats->rxwatchdogerror += + xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); + + /* Un-freeze counters */ + XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); +} + +static void xgbe_config_mmc(struct xgbe_prv_data *pdata) +{ + /* Set counters to reset on read */ + XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1); + + /* Reset the counters */ + XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1); +} + +static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata, + struct xgbe_channel *channel) +{ + unsigned int tx_dsr, tx_pos, tx_qidx; + unsigned int tx_status; + unsigned long tx_timeout; + + /* Calculate the status register to read and the position within */ + if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) { + tx_dsr = DMA_DSR0; + tx_pos = (channel->queue_index * DMA_DSR_Q_WIDTH) + + DMA_DSR0_TPS_START; + } else { + tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE; + + tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); + tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + + DMA_DSRX_TPS_START; + } + + /* The Tx engine cannot be stopped if it is actively processing + * descriptors. Wait for the Tx engine to enter the stopped or + * suspended state. Don't wait forever though... + */ + tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ); + while (time_before(jiffies, tx_timeout)) { + tx_status = XGMAC_IOREAD(pdata, tx_dsr); + tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); + if ((tx_status == DMA_TPS_STOPPED) || + (tx_status == DMA_TPS_SUSPENDED)) + break; + + usleep_range(500, 1000); + } + + if (!time_before(jiffies, tx_timeout)) + netdev_info(pdata->netdev, + "timed out waiting for Tx DMA channel %u to stop\n", + channel->queue_index); +} + +static void xgbe_enable_tx(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + /* Enable each Tx DMA channel */ + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1); + } + + /* Enable each Tx queue */ + for (i = 0; i < pdata->tx_q_count; i++) + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, + MTL_Q_ENABLED); + + /* Enable MAC Tx */ + XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); +} + +static void xgbe_disable_tx(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + /* Prepare for Tx DMA channel stop */ + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + xgbe_prepare_tx_stop(pdata, channel); + } + + /* Disable MAC Tx */ + XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); + + /* Disable each Tx queue */ + for (i = 0; i < pdata->tx_q_count; i++) + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0); + + /* Disable each Tx DMA channel */ + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0); + } +} + +static void xgbe_enable_rx(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int reg_val, i; + + /* Enable each Rx DMA channel */ + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1); + } + + /* Enable each Rx queue */ + reg_val = 0; + for (i = 0; i < pdata->rx_q_count; i++) + reg_val |= (0x02 << (i << 1)); + XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); + + /* Enable MAC Rx */ + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); +} + +static void xgbe_disable_rx(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + /* Disable MAC Rx */ + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); + + /* Disable each Rx queue */ + XGMAC_IOWRITE(pdata, MAC_RQC0R, 0); + + /* Disable each Rx DMA channel */ + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0); + } +} + +static void xgbe_powerup_tx(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + /* Enable each Tx DMA channel */ + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1); + } + + /* Enable MAC Tx */ + XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); +} + +static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + /* Prepare for Tx DMA channel stop */ + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + xgbe_prepare_tx_stop(pdata, channel); + } + + /* Disable MAC Tx */ + XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); + + /* Disable each Tx DMA channel */ + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0); + } +} + +static void xgbe_powerup_rx(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + /* Enable each Rx DMA channel */ + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1); + } +} + +static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + /* Disable each Rx DMA channel */ + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0); + } +} + +static int xgbe_init(struct xgbe_prv_data *pdata) +{ + struct xgbe_desc_if *desc_if = &pdata->desc_if; + int ret; + + DBGPR("-->xgbe_init\n"); + + /* Flush Tx queues */ + ret = xgbe_flush_tx_queues(pdata); + if (ret) + return ret; + + /* + * Initialize DMA related features + */ + xgbe_config_dma_bus(pdata); + xgbe_config_dma_cache(pdata); + xgbe_config_osp_mode(pdata); + xgbe_config_pblx8(pdata); + xgbe_config_tx_pbl_val(pdata); + xgbe_config_rx_pbl_val(pdata); + xgbe_config_rx_coalesce(pdata); + xgbe_config_tx_coalesce(pdata); + xgbe_config_rx_buffer_size(pdata); + xgbe_config_tso_mode(pdata); + xgbe_config_sph_mode(pdata); + xgbe_config_rss(pdata); + desc_if->wrapper_tx_desc_init(pdata); + desc_if->wrapper_rx_desc_init(pdata); + xgbe_enable_dma_interrupts(pdata); + + /* + * Initialize MTL related features + */ + xgbe_config_mtl_mode(pdata); + xgbe_config_queue_mapping(pdata); + xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode); + xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode); + xgbe_config_tx_threshold(pdata, pdata->tx_threshold); + xgbe_config_rx_threshold(pdata, pdata->rx_threshold); + xgbe_config_tx_fifo_size(pdata); + xgbe_config_rx_fifo_size(pdata); + xgbe_config_flow_control_threshold(pdata); + /*TODO: Error Packet and undersized good Packet forwarding enable + (FEP and FUP) + */ + xgbe_config_dcb_tc(pdata); + xgbe_config_dcb_pfc(pdata); + xgbe_enable_mtl_interrupts(pdata); + + /* + * Initialize MAC related features + */ + xgbe_config_mac_address(pdata); + xgbe_config_rx_mode(pdata); + xgbe_config_jumbo_enable(pdata); + xgbe_config_flow_control(pdata); + xgbe_config_mac_speed(pdata); + xgbe_config_checksum_offload(pdata); + xgbe_config_vlan_support(pdata); + xgbe_config_mmc(pdata); + xgbe_enable_mac_interrupts(pdata); + + DBGPR("<--xgbe_init\n"); + + return 0; +} + +void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) +{ + DBGPR("-->xgbe_init_function_ptrs\n"); + + hw_if->tx_complete = xgbe_tx_complete; + + hw_if->set_mac_address = xgbe_set_mac_address; + hw_if->config_rx_mode = xgbe_config_rx_mode; + + hw_if->enable_rx_csum = xgbe_enable_rx_csum; + hw_if->disable_rx_csum = xgbe_disable_rx_csum; + + hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping; + hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping; + hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering; + hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering; + hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table; + + hw_if->read_mmd_regs = xgbe_read_mmd_regs; + hw_if->write_mmd_regs = xgbe_write_mmd_regs; + + hw_if->set_gmii_speed = xgbe_set_gmii_speed; + hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed; + hw_if->set_xgmii_speed = xgbe_set_xgmii_speed; + + hw_if->enable_tx = xgbe_enable_tx; + hw_if->disable_tx = xgbe_disable_tx; + hw_if->enable_rx = xgbe_enable_rx; + hw_if->disable_rx = xgbe_disable_rx; + + hw_if->powerup_tx = xgbe_powerup_tx; + hw_if->powerdown_tx = xgbe_powerdown_tx; + hw_if->powerup_rx = xgbe_powerup_rx; + hw_if->powerdown_rx = xgbe_powerdown_rx; + + hw_if->dev_xmit = xgbe_dev_xmit; + hw_if->dev_read = xgbe_dev_read; + hw_if->enable_int = xgbe_enable_int; + hw_if->disable_int = xgbe_disable_int; + hw_if->init = xgbe_init; + hw_if->exit = xgbe_exit; + + /* Descriptor related Sequences have to be initialized here */ + hw_if->tx_desc_init = xgbe_tx_desc_init; + hw_if->rx_desc_init = xgbe_rx_desc_init; + hw_if->tx_desc_reset = xgbe_tx_desc_reset; + hw_if->rx_desc_reset = xgbe_rx_desc_reset; + hw_if->is_last_desc = xgbe_is_last_desc; + hw_if->is_context_desc = xgbe_is_context_desc; + hw_if->tx_start_xmit = xgbe_tx_start_xmit; + + /* For FLOW ctrl */ + hw_if->config_tx_flow_control = xgbe_config_tx_flow_control; + hw_if->config_rx_flow_control = xgbe_config_rx_flow_control; + + /* For RX coalescing */ + hw_if->config_rx_coalesce = xgbe_config_rx_coalesce; + hw_if->config_tx_coalesce = xgbe_config_tx_coalesce; + hw_if->usec_to_riwt = xgbe_usec_to_riwt; + hw_if->riwt_to_usec = xgbe_riwt_to_usec; + + /* For RX and TX threshold config */ + hw_if->config_rx_threshold = xgbe_config_rx_threshold; + hw_if->config_tx_threshold = xgbe_config_tx_threshold; + + /* For RX and TX Store and Forward Mode config */ + hw_if->config_rsf_mode = xgbe_config_rsf_mode; + hw_if->config_tsf_mode = xgbe_config_tsf_mode; + + /* For TX DMA Operating on Second Frame config */ + hw_if->config_osp_mode = xgbe_config_osp_mode; + + /* For RX and TX PBL config */ + hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val; + hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val; + hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val; + hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val; + hw_if->config_pblx8 = xgbe_config_pblx8; + + /* For MMC statistics support */ + hw_if->tx_mmc_int = xgbe_tx_mmc_int; + hw_if->rx_mmc_int = xgbe_rx_mmc_int; + hw_if->read_mmc_stats = xgbe_read_mmc_stats; + + /* For PTP config */ + hw_if->config_tstamp = xgbe_config_tstamp; + hw_if->update_tstamp_addend = xgbe_update_tstamp_addend; + hw_if->set_tstamp_time = xgbe_set_tstamp_time; + hw_if->get_tstamp_time = xgbe_get_tstamp_time; + hw_if->get_tx_tstamp = xgbe_get_tx_tstamp; + + /* For Data Center Bridging config */ + hw_if->config_dcb_tc = xgbe_config_dcb_tc; + hw_if->config_dcb_pfc = xgbe_config_dcb_pfc; + + /* For Receive Side Scaling */ + hw_if->enable_rss = xgbe_enable_rss; + hw_if->disable_rss = xgbe_disable_rss; + hw_if->set_rss_hash_key = xgbe_set_rss_hash_key; + hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table; + + DBGPR("<--xgbe_init_function_ptrs\n"); +} diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c new file mode 100644 index 000000000..9fd6c69a8 --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -0,0 +1,2227 @@ +/* + * AMD 10Gb Ethernet driver + * + * This file is available to you under your choice of the following two + * licenses: + * + * License 1: GPLv2 + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * + * This file is free software; you may copy, redistribute and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or (at + * your option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + * + * + * License 2: Modified BSD + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Advanced Micro Devices, Inc. nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "xgbe.h" +#include "xgbe-common.h" + +static int xgbe_one_poll(struct napi_struct *, int); +static int xgbe_all_poll(struct napi_struct *, int); + +static int xgbe_alloc_channels(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel_mem, *channel; + struct xgbe_ring *tx_ring, *rx_ring; + unsigned int count, i; + int ret = -ENOMEM; + + count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); + + channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL); + if (!channel_mem) + goto err_channel; + + tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring), + GFP_KERNEL); + if (!tx_ring) + goto err_tx_ring; + + rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring), + GFP_KERNEL); + if (!rx_ring) + goto err_rx_ring; + + for (i = 0, channel = channel_mem; i < count; i++, channel++) { + snprintf(channel->name, sizeof(channel->name), "channel-%d", i); + channel->pdata = pdata; + channel->queue_index = i; + channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE + + (DMA_CH_INC * i); + + if (pdata->per_channel_irq) { + /* Get the DMA interrupt (offset 1) */ + ret = platform_get_irq(pdata->pdev, i + 1); + if (ret < 0) { + netdev_err(pdata->netdev, + "platform_get_irq %u failed\n", + i + 1); + goto err_irq; + } + + channel->dma_irq = ret; + } + + if (i < pdata->tx_ring_count) { + spin_lock_init(&tx_ring->lock); + channel->tx_ring = tx_ring++; + } + + if (i < pdata->rx_ring_count) { + spin_lock_init(&rx_ring->lock); + channel->rx_ring = rx_ring++; + } + + DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n", + channel->name, channel->queue_index, channel->dma_regs, + channel->dma_irq, channel->tx_ring, channel->rx_ring); + } + + pdata->channel = channel_mem; + pdata->channel_count = count; + + return 0; + +err_irq: + kfree(rx_ring); + +err_rx_ring: + kfree(tx_ring); + +err_tx_ring: + kfree(channel_mem); + +err_channel: + return ret; +} + +static void xgbe_free_channels(struct xgbe_prv_data *pdata) +{ + if (!pdata->channel) + return; + + kfree(pdata->channel->rx_ring); + kfree(pdata->channel->tx_ring); + kfree(pdata->channel); + + pdata->channel = NULL; + pdata->channel_count = 0; +} + +static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring) +{ + return (ring->rdesc_count - (ring->cur - ring->dirty)); +} + +static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring) +{ + return (ring->cur - ring->dirty); +} + +static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel, + struct xgbe_ring *ring, unsigned int count) +{ + struct xgbe_prv_data *pdata = channel->pdata; + + if (count > xgbe_tx_avail_desc(ring)) { + DBGPR(" Tx queue stopped, not enough descriptors available\n"); + netif_stop_subqueue(pdata->netdev, channel->queue_index); + ring->tx.queue_stopped = 1; + + /* If we haven't notified the hardware because of xmit_more + * support, tell it now + */ + if (ring->tx.xmit_more) + pdata->hw_if.tx_start_xmit(channel, ring); + + return NETDEV_TX_BUSY; + } + + return 0; +} + +static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) +{ + unsigned int rx_buf_size; + + if (mtu > XGMAC_JUMBO_PACKET_MTU) { + netdev_alert(netdev, "MTU exceeds maximum supported value\n"); + return -EINVAL; + } + + rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE); + + rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) & + ~(XGBE_RX_BUF_ALIGN - 1); + + return rx_buf_size; +} + +static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata) +{ + struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_channel *channel; + enum xgbe_int int_id; + unsigned int i; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (channel->tx_ring && channel->rx_ring) + int_id = XGMAC_INT_DMA_CH_SR_TI_RI; + else if (channel->tx_ring) + int_id = XGMAC_INT_DMA_CH_SR_TI; + else if (channel->rx_ring) + int_id = XGMAC_INT_DMA_CH_SR_RI; + else + continue; + + hw_if->enable_int(channel, int_id); + } +} + +static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata) +{ + struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_channel *channel; + enum xgbe_int int_id; + unsigned int i; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (channel->tx_ring && channel->rx_ring) + int_id = XGMAC_INT_DMA_CH_SR_TI_RI; + else if (channel->tx_ring) + int_id = XGMAC_INT_DMA_CH_SR_TI; + else if (channel->rx_ring) + int_id = XGMAC_INT_DMA_CH_SR_RI; + else + continue; + + hw_if->disable_int(channel, int_id); + } +} + +static irqreturn_t xgbe_isr(int irq, void *data) +{ + struct xgbe_prv_data *pdata = data; + struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_channel *channel; + unsigned int dma_isr, dma_ch_isr; + unsigned int mac_isr, mac_tssr; + unsigned int i; + + /* The DMA interrupt status register also reports MAC and MTL + * interrupts. So for polling mode, we just need to check for + * this register to be non-zero + */ + dma_isr = XGMAC_IOREAD(pdata, DMA_ISR); + if (!dma_isr) + goto isr_done; + + DBGPR(" DMA_ISR = %08x\n", dma_isr); + + for (i = 0; i < pdata->channel_count; i++) { + if (!(dma_isr & (1 << i))) + continue; + + channel = pdata->channel + i; + + dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); + DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr); + + /* The TI or RI interrupt bits may still be set even if using + * per channel DMA interrupts. Check to be sure those are not + * enabled before using the private data napi structure. + */ + if (!pdata->per_channel_irq && + (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || + XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) { + if (napi_schedule_prep(&pdata->napi)) { + /* Disable Tx and Rx interrupts */ + xgbe_disable_rx_tx_ints(pdata); + + /* Turn on polling */ + __napi_schedule(&pdata->napi); + } + } + + /* Restart the device on a Fatal Bus Error */ + if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE)) + schedule_work(&pdata->restart_work); + + /* Clear all interrupt signals */ + XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); + } + + if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) { + mac_isr = XGMAC_IOREAD(pdata, MAC_ISR); + + if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS)) + hw_if->tx_mmc_int(pdata); + + if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS)) + hw_if->rx_mmc_int(pdata); + + if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) { + mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR); + + if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) { + /* Read Tx Timestamp to clear interrupt */ + pdata->tx_tstamp = + hw_if->get_tx_tstamp(pdata); + schedule_work(&pdata->tx_tstamp_work); + } + } + } + + DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR)); + +isr_done: + return IRQ_HANDLED; +} + +static irqreturn_t xgbe_dma_isr(int irq, void *data) +{ + struct xgbe_channel *channel = data; + + /* Per channel DMA interrupts are enabled, so we use the per + * channel napi structure and not the private data napi structure + */ + if (napi_schedule_prep(&channel->napi)) { + /* Disable Tx and Rx interrupts */ + disable_irq_nosync(channel->dma_irq); + + /* Turn on polling */ + __napi_schedule(&channel->napi); + } + + return IRQ_HANDLED; +} + +static void xgbe_tx_timer(unsigned long data) +{ + struct xgbe_channel *channel = (struct xgbe_channel *)data; + struct xgbe_prv_data *pdata = channel->pdata; + struct napi_struct *napi; + + DBGPR("-->xgbe_tx_timer\n"); + + napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; + + if (napi_schedule_prep(napi)) { + /* Disable Tx and Rx interrupts */ + if (pdata->per_channel_irq) + disable_irq_nosync(channel->dma_irq); + else + xgbe_disable_rx_tx_ints(pdata); + + /* Turn on polling */ + __napi_schedule(napi); + } + + channel->tx_timer_active = 0; + + DBGPR("<--xgbe_tx_timer\n"); +} + +static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + DBGPR("-->xgbe_init_tx_timers\n"); + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + DBGPR(" %s adding tx timer\n", channel->name); + setup_timer(&channel->tx_timer, xgbe_tx_timer, + (unsigned long)channel); + } + + DBGPR("<--xgbe_init_tx_timers\n"); +} + +static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + DBGPR("-->xgbe_stop_tx_timers\n"); + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + DBGPR(" %s deleting tx timer\n", channel->name); + del_timer_sync(&channel->tx_timer); + } + + DBGPR("<--xgbe_stop_tx_timers\n"); +} + +void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) +{ + unsigned int mac_hfr0, mac_hfr1, mac_hfr2; + struct xgbe_hw_features *hw_feat = &pdata->hw_feat; + + DBGPR("-->xgbe_get_all_hw_features\n"); + + mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R); + mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R); + mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R); + + memset(hw_feat, 0, sizeof(*hw_feat)); + + hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR); + + /* Hardware feature register 0 */ + hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); + hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); + hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); + hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); + hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); + hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); + hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); + hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); + hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); + hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); + hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); + hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, + ADDMACADRSEL); + hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); + hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); + + /* Hardware feature register 1 */ + hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, + RXFIFOSIZE); + hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, + TXFIFOSIZE); + hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); + hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); + hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); + hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); + hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); + hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); + hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); + hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, + HASHTBLSZ); + hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, + L3L4FNUM); + + /* Hardware feature register 2 */ + hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); + hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); + hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); + hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); + hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); + hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM); + + /* Translate the Hash Table size into actual number */ + switch (hw_feat->hash_table_size) { + case 0: + break; + case 1: + hw_feat->hash_table_size = 64; + break; + case 2: + hw_feat->hash_table_size = 128; + break; + case 3: + hw_feat->hash_table_size = 256; + break; + } + + /* Translate the address width setting into actual number */ + switch (hw_feat->dma_width) { + case 0: + hw_feat->dma_width = 32; + break; + case 1: + hw_feat->dma_width = 40; + break; + case 2: + hw_feat->dma_width = 48; + break; + default: + hw_feat->dma_width = 32; + } + + /* The Queue, Channel and TC counts are zero based so increment them + * to get the actual number + */ + hw_feat->rx_q_cnt++; + hw_feat->tx_q_cnt++; + hw_feat->rx_ch_cnt++; + hw_feat->tx_ch_cnt++; + hw_feat->tc_cnt++; + + DBGPR("<--xgbe_get_all_hw_features\n"); +} + +static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add) +{ + struct xgbe_channel *channel; + unsigned int i; + + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (add) + netif_napi_add(pdata->netdev, &channel->napi, + xgbe_one_poll, NAPI_POLL_WEIGHT); + + napi_enable(&channel->napi); + } + } else { + if (add) + netif_napi_add(pdata->netdev, &pdata->napi, + xgbe_all_poll, NAPI_POLL_WEIGHT); + + napi_enable(&pdata->napi); + } +} + +static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del) +{ + struct xgbe_channel *channel; + unsigned int i; + + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + napi_disable(&channel->napi); + + if (del) + netif_napi_del(&channel->napi); + } + } else { + napi_disable(&pdata->napi); + + if (del) + netif_napi_del(&pdata->napi); + } +} + +static int xgbe_request_irqs(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + struct net_device *netdev = pdata->netdev; + unsigned int i; + int ret; + + ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, + netdev->name, pdata); + if (ret) { + netdev_alert(netdev, "error requesting irq %d\n", + pdata->dev_irq); + return ret; + } + + if (!pdata->per_channel_irq) + return 0; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + snprintf(channel->dma_irq_name, + sizeof(channel->dma_irq_name) - 1, + "%s-TxRx-%u", netdev_name(netdev), + channel->queue_index); + + ret = devm_request_irq(pdata->dev, channel->dma_irq, + xgbe_dma_isr, 0, + channel->dma_irq_name, channel); + if (ret) { + netdev_alert(netdev, "error requesting irq %d\n", + channel->dma_irq); + goto err_irq; + } + } + + return 0; + +err_irq: + /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ + for (i--, channel--; i < pdata->channel_count; i--, channel--) + devm_free_irq(pdata->dev, channel->dma_irq, channel); + + devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + + return ret; +} + +static void xgbe_free_irqs(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + + if (!pdata->per_channel_irq) + return; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) + devm_free_irq(pdata->dev, channel->dma_irq, channel); +} + +void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) +{ + struct xgbe_hw_if *hw_if = &pdata->hw_if; + + DBGPR("-->xgbe_init_tx_coalesce\n"); + + pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS; + pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES; + + hw_if->config_tx_coalesce(pdata); + + DBGPR("<--xgbe_init_tx_coalesce\n"); +} + +void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata) +{ + struct xgbe_hw_if *hw_if = &pdata->hw_if; + + DBGPR("-->xgbe_init_rx_coalesce\n"); + + pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS); + pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS; + pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES; + + hw_if->config_rx_coalesce(pdata); + + DBGPR("<--xgbe_init_rx_coalesce\n"); +} + +static void xgbe_free_tx_data(struct xgbe_prv_data *pdata) +{ + struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct xgbe_channel *channel; + struct xgbe_ring *ring; + struct xgbe_ring_data *rdata; + unsigned int i, j; + + DBGPR("-->xgbe_free_tx_data\n"); + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + ring = channel->tx_ring; + if (!ring) + break; + + for (j = 0; j < ring->rdesc_count; j++) { + rdata = XGBE_GET_DESC_DATA(ring, j); + desc_if->unmap_rdata(pdata, rdata); + } + } + + DBGPR("<--xgbe_free_tx_data\n"); +} + +static void xgbe_free_rx_data(struct xgbe_prv_data *pdata) +{ + struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct xgbe_channel *channel; + struct xgbe_ring *ring; + struct xgbe_ring_data *rdata; + unsigned int i, j; + + DBGPR("-->xgbe_free_rx_data\n"); + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + ring = channel->rx_ring; + if (!ring) + break; + + for (j = 0; j < ring->rdesc_count; j++) { + rdata = XGBE_GET_DESC_DATA(ring, j); + desc_if->unmap_rdata(pdata, rdata); + } + } + + DBGPR("<--xgbe_free_rx_data\n"); +} + +static void xgbe_adjust_link(struct net_device *netdev) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct phy_device *phydev = pdata->phydev; + int new_state = 0; + + if (!phydev) + return; + + if (phydev->link) { + /* Flow control support */ + if (pdata->pause_autoneg) { + if (phydev->pause || phydev->asym_pause) { + pdata->tx_pause = 1; + pdata->rx_pause = 1; + } else { + pdata->tx_pause = 0; + pdata->rx_pause = 0; + } + } + + if (pdata->tx_pause != pdata->phy_tx_pause) { + hw_if->config_tx_flow_control(pdata); + pdata->phy_tx_pause = pdata->tx_pause; + } + + if (pdata->rx_pause != pdata->phy_rx_pause) { + hw_if->config_rx_flow_control(pdata); + pdata->phy_rx_pause = pdata->rx_pause; + } + + /* Speed support */ + if (phydev->speed != pdata->phy_speed) { + new_state = 1; + + switch (phydev->speed) { + case SPEED_10000: + hw_if->set_xgmii_speed(pdata); + break; + + case SPEED_2500: + hw_if->set_gmii_2500_speed(pdata); + break; + + case SPEED_1000: + hw_if->set_gmii_speed(pdata); + break; + } + pdata->phy_speed = phydev->speed; + } + + if (phydev->link != pdata->phy_link) { + new_state = 1; + pdata->phy_link = 1; + } + } else if (pdata->phy_link) { + new_state = 1; + pdata->phy_link = 0; + pdata->phy_speed = SPEED_UNKNOWN; + } + + if (new_state) + phy_print_status(phydev); +} + +static int xgbe_phy_init(struct xgbe_prv_data *pdata) +{ + struct net_device *netdev = pdata->netdev; + struct phy_device *phydev = pdata->phydev; + int ret; + + pdata->phy_link = -1; + pdata->phy_speed = SPEED_UNKNOWN; + pdata->phy_tx_pause = pdata->tx_pause; + pdata->phy_rx_pause = pdata->rx_pause; + + ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link, + pdata->phy_mode); + if (ret) { + netdev_err(netdev, "phy_connect_direct failed\n"); + return ret; + } + + if (!phydev->drv || (phydev->drv->phy_id == 0)) { + netdev_err(netdev, "phy_id not valid\n"); + ret = -ENODEV; + goto err_phy_connect; + } + DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n", + dev_name(&phydev->dev), phydev->link); + + return 0; + +err_phy_connect: + phy_disconnect(phydev); + + return ret; +} + +static void xgbe_phy_exit(struct xgbe_prv_data *pdata) +{ + if (!pdata->phydev) + return; + + phy_disconnect(pdata->phydev); +} + +int xgbe_powerdown(struct net_device *netdev, unsigned int caller) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_hw_if *hw_if = &pdata->hw_if; + unsigned long flags; + + DBGPR("-->xgbe_powerdown\n"); + + if (!netif_running(netdev) || + (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) { + netdev_alert(netdev, "Device is already powered down\n"); + DBGPR("<--xgbe_powerdown\n"); + return -EINVAL; + } + + spin_lock_irqsave(&pdata->lock, flags); + + if (caller == XGMAC_DRIVER_CONTEXT) + netif_device_detach(netdev); + + netif_tx_stop_all_queues(netdev); + + hw_if->powerdown_tx(pdata); + hw_if->powerdown_rx(pdata); + + xgbe_napi_disable(pdata, 0); + + phy_stop(pdata->phydev); + + pdata->power_down = 1; + + spin_unlock_irqrestore(&pdata->lock, flags); + + DBGPR("<--xgbe_powerdown\n"); + + return 0; +} + +int xgbe_powerup(struct net_device *netdev, unsigned int caller) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_hw_if *hw_if = &pdata->hw_if; + unsigned long flags; + + DBGPR("-->xgbe_powerup\n"); + + if (!netif_running(netdev) || + (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) { + netdev_alert(netdev, "Device is already powered up\n"); + DBGPR("<--xgbe_powerup\n"); + return -EINVAL; + } + + spin_lock_irqsave(&pdata->lock, flags); + + pdata->power_down = 0; + + phy_start(pdata->phydev); + + xgbe_napi_enable(pdata, 0); + + hw_if->powerup_tx(pdata); + hw_if->powerup_rx(pdata); + + if (caller == XGMAC_DRIVER_CONTEXT) + netif_device_attach(netdev); + + netif_tx_start_all_queues(netdev); + + spin_unlock_irqrestore(&pdata->lock, flags); + + DBGPR("<--xgbe_powerup\n"); + + return 0; +} + +static int xgbe_start(struct xgbe_prv_data *pdata) +{ + struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct net_device *netdev = pdata->netdev; + int ret; + + DBGPR("-->xgbe_start\n"); + + hw_if->init(pdata); + + phy_start(pdata->phydev); + + xgbe_napi_enable(pdata, 1); + + ret = xgbe_request_irqs(pdata); + if (ret) + goto err_napi; + + hw_if->enable_tx(pdata); + hw_if->enable_rx(pdata); + + xgbe_init_tx_timers(pdata); + + netif_tx_start_all_queues(netdev); + + DBGPR("<--xgbe_start\n"); + + return 0; + +err_napi: + xgbe_napi_disable(pdata, 1); + + phy_stop(pdata->phydev); + + hw_if->exit(pdata); + + return ret; +} + +static void xgbe_stop(struct xgbe_prv_data *pdata) +{ + struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_channel *channel; + struct net_device *netdev = pdata->netdev; + struct netdev_queue *txq; + unsigned int i; + + DBGPR("-->xgbe_stop\n"); + + netif_tx_stop_all_queues(netdev); + + xgbe_stop_tx_timers(pdata); + + hw_if->disable_tx(pdata); + hw_if->disable_rx(pdata); + + xgbe_free_irqs(pdata); + + xgbe_napi_disable(pdata, 1); + + phy_stop(pdata->phydev); + + hw_if->exit(pdata); + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + continue; + + txq = netdev_get_tx_queue(netdev, channel->queue_index); + netdev_tx_reset_queue(txq); + } + + DBGPR("<--xgbe_stop\n"); +} + +static void xgbe_restart_dev(struct xgbe_prv_data *pdata) +{ + DBGPR("-->xgbe_restart_dev\n"); + + /* If not running, "restart" will happen on open */ + if (!netif_running(pdata->netdev)) + return; + + xgbe_stop(pdata); + + xgbe_free_tx_data(pdata); + xgbe_free_rx_data(pdata); + + xgbe_start(pdata); + + DBGPR("<--xgbe_restart_dev\n"); +} + +static void xgbe_restart(struct work_struct *work) +{ + struct xgbe_prv_data *pdata = container_of(work, + struct xgbe_prv_data, + restart_work); + + rtnl_lock(); + + xgbe_restart_dev(pdata); + + rtnl_unlock(); +} + +static void xgbe_tx_tstamp(struct work_struct *work) +{ + struct xgbe_prv_data *pdata = container_of(work, + struct xgbe_prv_data, + tx_tstamp_work); + struct skb_shared_hwtstamps hwtstamps; + u64 nsec; + unsigned long flags; + + if (pdata->tx_tstamp) { + nsec = timecounter_cyc2time(&pdata->tstamp_tc, + pdata->tx_tstamp); + + memset(&hwtstamps, 0, sizeof(hwtstamps)); + hwtstamps.hwtstamp = ns_to_ktime(nsec); + skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps); + } + + dev_kfree_skb_any(pdata->tx_tstamp_skb); + + spin_lock_irqsave(&pdata->tstamp_lock, flags); + pdata->tx_tstamp_skb = NULL; + spin_unlock_irqrestore(&pdata->tstamp_lock, flags); +} + +static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata, + struct ifreq *ifreq) +{ + if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config, + sizeof(pdata->tstamp_config))) + return -EFAULT; + + return 0; +} + +static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, + struct ifreq *ifreq) +{ + struct hwtstamp_config config; + unsigned int mac_tscr; + + if (copy_from_user(&config, ifreq->ifr_data, sizeof(config))) + return -EFAULT; + + if (config.flags) + return -EINVAL; + + mac_tscr = 0; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + break; + + case HWTSTAMP_TX_ON: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + + case HWTSTAMP_FILTER_ALL: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + /* PTP v2, UDP, any kind of event packet */ + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); + /* PTP v1, UDP, any kind of event packet */ + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + /* PTP v2, UDP, Sync packet */ + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); + /* PTP v1, UDP, Sync packet */ + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + /* PTP v2, UDP, Delay_req packet */ + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); + /* PTP v1, UDP, Delay_req packet */ + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + /* 802.AS1, Ethernet, any kind of event packet */ + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + /* 802.AS1, Ethernet, Sync packet */ + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + /* 802.AS1, Ethernet, Delay_req packet */ + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + /* PTP v2/802.AS1, any layer, any kind of event packet */ + case HWTSTAMP_FILTER_PTP_V2_EVENT: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + /* PTP v2/802.AS1, any layer, Sync packet */ + case HWTSTAMP_FILTER_PTP_V2_SYNC: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + /* PTP v2/802.AS1, any layer, Delay_req packet */ + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1); + XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1); + break; + + default: + return -ERANGE; + } + + pdata->hw_if.config_tstamp(pdata, mac_tscr); + + memcpy(&pdata->tstamp_config, &config, sizeof(config)); + + return 0; +} + +static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata, + struct sk_buff *skb, + struct xgbe_packet_data *packet) +{ + unsigned long flags; + + if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) { + spin_lock_irqsave(&pdata->tstamp_lock, flags); + if (pdata->tx_tstamp_skb) { + /* Another timestamp in progress, ignore this one */ + XGMAC_SET_BITS(packet->attributes, + TX_PACKET_ATTRIBUTES, PTP, 0); + } else { + pdata->tx_tstamp_skb = skb_get(skb); + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + } + spin_unlock_irqrestore(&pdata->tstamp_lock, flags); + } + + if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) + skb_tx_timestamp(skb); +} + +static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet) +{ + if (skb_vlan_tag_present(skb)) + packet->vlan_ctag = skb_vlan_tag_get(skb); +} + +static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet) +{ + int ret; + + if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, + TSO_ENABLE)) + return 0; + + ret = skb_cow_head(skb, 0); + if (ret) + return ret; + + packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + packet->tcp_header_len = tcp_hdrlen(skb); + packet->tcp_payload_len = skb->len - packet->header_len; + packet->mss = skb_shinfo(skb)->gso_size; + DBGPR(" packet->header_len=%u\n", packet->header_len); + DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n", + packet->tcp_header_len, packet->tcp_payload_len); + DBGPR(" packet->mss=%u\n", packet->mss); + + /* Update the number of packets that will ultimately be transmitted + * along with the extra bytes for each extra packet + */ + packet->tx_packets = skb_shinfo(skb)->gso_segs; + packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len; + + return 0; +} + +static int xgbe_is_tso(struct sk_buff *skb) +{ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + DBGPR(" TSO packet to be processed\n"); + + return 1; +} + +static void xgbe_packet_info(struct xgbe_prv_data *pdata, + struct xgbe_ring *ring, struct sk_buff *skb, + struct xgbe_packet_data *packet) +{ + struct skb_frag_struct *frag; + unsigned int context_desc; + unsigned int len; + unsigned int i; + + packet->skb = skb; + + context_desc = 0; + packet->rdesc_count = 0; + + packet->tx_packets = 1; + packet->tx_bytes = skb->len; + + if (xgbe_is_tso(skb)) { + /* TSO requires an extra descriptor if mss is different */ + if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) { + context_desc = 1; + packet->rdesc_count++; + } + + /* TSO requires an extra descriptor for TSO header */ + packet->rdesc_count++; + + XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, + TSO_ENABLE, 1); + XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, + CSUM_ENABLE, 1); + } else if (skb->ip_summed == CHECKSUM_PARTIAL) + XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, + CSUM_ENABLE, 1); + + if (skb_vlan_tag_present(skb)) { + /* VLAN requires an extra descriptor if tag is different */ + if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag) + /* We can share with the TSO context descriptor */ + if (!context_desc) { + context_desc = 1; + packet->rdesc_count++; + } + + XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, + VLAN_CTAG, 1); + } + + if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON)) + XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, + PTP, 1); + + for (len = skb_headlen(skb); len;) { + packet->rdesc_count++; + len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE); + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + for (len = skb_frag_size(frag); len; ) { + packet->rdesc_count++; + len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE); + } + } +} + +static int xgbe_open(struct net_device *netdev) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_desc_if *desc_if = &pdata->desc_if; + int ret; + + DBGPR("-->xgbe_open\n"); + + /* Initialize the phy */ + ret = xgbe_phy_init(pdata); + if (ret) + return ret; + + /* Enable the clocks */ + ret = clk_prepare_enable(pdata->sysclk); + if (ret) { + netdev_alert(netdev, "dma clk_prepare_enable failed\n"); + goto err_phy_init; + } + + ret = clk_prepare_enable(pdata->ptpclk); + if (ret) { + netdev_alert(netdev, "ptp clk_prepare_enable failed\n"); + goto err_sysclk; + } + + /* Calculate the Rx buffer size before allocating rings */ + ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu); + if (ret < 0) + goto err_ptpclk; + pdata->rx_buf_size = ret; + + /* Allocate the channel and ring structures */ + ret = xgbe_alloc_channels(pdata); + if (ret) + goto err_ptpclk; + + /* Allocate the ring descriptors and buffers */ + ret = desc_if->alloc_ring_resources(pdata); + if (ret) + goto err_channels; + + /* Initialize the device restart and Tx timestamp work struct */ + INIT_WORK(&pdata->restart_work, xgbe_restart); + INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); + + ret = xgbe_start(pdata); + if (ret) + goto err_rings; + + DBGPR("<--xgbe_open\n"); + + return 0; + +err_rings: + desc_if->free_ring_resources(pdata); + +err_channels: + xgbe_free_channels(pdata); + +err_ptpclk: + clk_disable_unprepare(pdata->ptpclk); + +err_sysclk: + clk_disable_unprepare(pdata->sysclk); + +err_phy_init: + xgbe_phy_exit(pdata); + + return ret; +} + +static int xgbe_close(struct net_device *netdev) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_desc_if *desc_if = &pdata->desc_if; + + DBGPR("-->xgbe_close\n"); + + /* Stop the device */ + xgbe_stop(pdata); + + /* Free the ring descriptors and buffers */ + desc_if->free_ring_resources(pdata); + + /* Free the channel and ring structures */ + xgbe_free_channels(pdata); + + /* Disable the clocks */ + clk_disable_unprepare(pdata->ptpclk); + clk_disable_unprepare(pdata->sysclk); + + /* Release the phy */ + xgbe_phy_exit(pdata); + + DBGPR("<--xgbe_close\n"); + + return 0; +} + +static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct xgbe_channel *channel; + struct xgbe_ring *ring; + struct xgbe_packet_data *packet; + struct netdev_queue *txq; + int ret; + + DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len); + + channel = pdata->channel + skb->queue_mapping; + txq = netdev_get_tx_queue(netdev, channel->queue_index); + ring = channel->tx_ring; + packet = &ring->packet_data; + + ret = NETDEV_TX_OK; + + if (skb->len == 0) { + netdev_err(netdev, "empty skb received from stack\n"); + dev_kfree_skb_any(skb); + goto tx_netdev_return; + } + + /* Calculate preliminary packet info */ + memset(packet, 0, sizeof(*packet)); + xgbe_packet_info(pdata, ring, skb, packet); + + /* Check that there are enough descriptors available */ + ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count); + if (ret) + goto tx_netdev_return; + + ret = xgbe_prep_tso(skb, packet); + if (ret) { + netdev_err(netdev, "error processing TSO packet\n"); + dev_kfree_skb_any(skb); + goto tx_netdev_return; + } + xgbe_prep_vlan(skb, packet); + + if (!desc_if->map_tx_skb(channel, skb)) { + dev_kfree_skb_any(skb); + goto tx_netdev_return; + } + + xgbe_prep_tx_tstamp(pdata, skb, packet); + + /* Report on the actual number of bytes (to be) sent */ + netdev_tx_sent_queue(txq, packet->tx_bytes); + + /* Configure required descriptor fields for transmission */ + hw_if->dev_xmit(channel); + +#ifdef XGMAC_ENABLE_TX_PKT_DUMP + xgbe_print_pkt(netdev, skb, true); +#endif + + /* Stop the queue in advance if there may not be enough descriptors */ + xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS); + + ret = NETDEV_TX_OK; + +tx_netdev_return: + return ret; +} + +static void xgbe_set_rx_mode(struct net_device *netdev) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_hw_if *hw_if = &pdata->hw_if; + + DBGPR("-->xgbe_set_rx_mode\n"); + + hw_if->config_rx_mode(pdata); + + DBGPR("<--xgbe_set_rx_mode\n"); +} + +static int xgbe_set_mac_address(struct net_device *netdev, void *addr) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct sockaddr *saddr = addr; + + DBGPR("-->xgbe_set_mac_address\n"); + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len); + + hw_if->set_mac_address(pdata, netdev->dev_addr); + + DBGPR("<--xgbe_set_mac_address\n"); + + return 0; +} + +static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + int ret; + + switch (cmd) { + case SIOCGHWTSTAMP: + ret = xgbe_get_hwtstamp_settings(pdata, ifreq); + break; + + case SIOCSHWTSTAMP: + ret = xgbe_set_hwtstamp_settings(pdata, ifreq); + break; + + default: + ret = -EOPNOTSUPP; + } + + return ret; +} + +static int xgbe_change_mtu(struct net_device *netdev, int mtu) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + int ret; + + DBGPR("-->xgbe_change_mtu\n"); + + ret = xgbe_calc_rx_buf_size(netdev, mtu); + if (ret < 0) + return ret; + + pdata->rx_buf_size = ret; + netdev->mtu = mtu; + + xgbe_restart_dev(pdata); + + DBGPR("<--xgbe_change_mtu\n"); + + return 0; +} + +static void xgbe_tx_timeout(struct net_device *netdev) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + netdev_warn(netdev, "tx timeout, device restarting\n"); + schedule_work(&pdata->restart_work); +} + +static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *s) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_mmc_stats *pstats = &pdata->mmc_stats; + + DBGPR("-->%s\n", __func__); + + pdata->hw_if.read_mmc_stats(pdata); + + s->rx_packets = pstats->rxframecount_gb; + s->rx_bytes = pstats->rxoctetcount_gb; + s->rx_errors = pstats->rxframecount_gb - + pstats->rxbroadcastframes_g - + pstats->rxmulticastframes_g - + pstats->rxunicastframes_g; + s->multicast = pstats->rxmulticastframes_g; + s->rx_length_errors = pstats->rxlengtherror; + s->rx_crc_errors = pstats->rxcrcerror; + s->rx_fifo_errors = pstats->rxfifooverflow; + + s->tx_packets = pstats->txframecount_gb; + s->tx_bytes = pstats->txoctetcount_gb; + s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g; + s->tx_dropped = netdev->stats.tx_dropped; + + DBGPR("<--%s\n", __func__); + + return s; +} + +static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, + u16 vid) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_hw_if *hw_if = &pdata->hw_if; + + DBGPR("-->%s\n", __func__); + + set_bit(vid, pdata->active_vlans); + hw_if->update_vlan_hash_table(pdata); + + DBGPR("<--%s\n", __func__); + + return 0; +} + +static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, + u16 vid) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_hw_if *hw_if = &pdata->hw_if; + + DBGPR("-->%s\n", __func__); + + clear_bit(vid, pdata->active_vlans); + hw_if->update_vlan_hash_table(pdata); + + DBGPR("<--%s\n", __func__); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void xgbe_poll_controller(struct net_device *netdev) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_channel *channel; + unsigned int i; + + DBGPR("-->xgbe_poll_controller\n"); + + if (pdata->per_channel_irq) { + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) + xgbe_dma_isr(channel->dma_irq, channel); + } else { + disable_irq(pdata->dev_irq); + xgbe_isr(pdata->dev_irq, pdata); + enable_irq(pdata->dev_irq); + } + + DBGPR("<--xgbe_poll_controller\n"); +} +#endif /* End CONFIG_NET_POLL_CONTROLLER */ + +static int xgbe_setup_tc(struct net_device *netdev, u8 tc) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + unsigned int offset, queue; + u8 i; + + if (tc && (tc != pdata->hw_feat.tc_cnt)) + return -EINVAL; + + if (tc) { + netdev_set_num_tc(netdev, tc); + for (i = 0, queue = 0, offset = 0; i < tc; i++) { + while ((queue < pdata->tx_q_count) && + (pdata->q2tc_map[queue] == i)) + queue++; + + DBGPR(" TC%u using TXq%u-%u\n", i, offset, queue - 1); + netdev_set_tc_queue(netdev, i, queue - offset, offset); + offset = queue; + } + } else { + netdev_reset_tc(netdev); + } + + return 0; +} + +static int xgbe_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_hw_if *hw_if = &pdata->hw_if; + netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter; + int ret = 0; + + rxhash = pdata->netdev_features & NETIF_F_RXHASH; + rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; + rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; + rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; + + if ((features & NETIF_F_RXHASH) && !rxhash) + ret = hw_if->enable_rss(pdata); + else if (!(features & NETIF_F_RXHASH) && rxhash) + ret = hw_if->disable_rss(pdata); + if (ret) + return ret; + + if ((features & NETIF_F_RXCSUM) && !rxcsum) + hw_if->enable_rx_csum(pdata); + else if (!(features & NETIF_F_RXCSUM) && rxcsum) + hw_if->disable_rx_csum(pdata); + + if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan) + hw_if->enable_rx_vlan_stripping(pdata); + else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan) + hw_if->disable_rx_vlan_stripping(pdata); + + if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter) + hw_if->enable_rx_vlan_filtering(pdata); + else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter) + hw_if->disable_rx_vlan_filtering(pdata); + + pdata->netdev_features = features; + + DBGPR("<--xgbe_set_features\n"); + + return 0; +} + +static const struct net_device_ops xgbe_netdev_ops = { + .ndo_open = xgbe_open, + .ndo_stop = xgbe_close, + .ndo_start_xmit = xgbe_xmit, + .ndo_set_rx_mode = xgbe_set_rx_mode, + .ndo_set_mac_address = xgbe_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = xgbe_ioctl, + .ndo_change_mtu = xgbe_change_mtu, + .ndo_tx_timeout = xgbe_tx_timeout, + .ndo_get_stats64 = xgbe_get_stats64, + .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = xgbe_poll_controller, +#endif + .ndo_setup_tc = xgbe_setup_tc, + .ndo_set_features = xgbe_set_features, +}; + +struct net_device_ops *xgbe_get_netdev_ops(void) +{ + return (struct net_device_ops *)&xgbe_netdev_ops; +} + +static void xgbe_rx_refresh(struct xgbe_channel *channel) +{ + struct xgbe_prv_data *pdata = channel->pdata; + struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct xgbe_ring *ring = channel->rx_ring; + struct xgbe_ring_data *rdata; + + while (ring->dirty != ring->cur) { + rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); + + /* Reset rdata values */ + desc_if->unmap_rdata(pdata, rdata); + + if (desc_if->map_rx_buffer(pdata, ring, rdata)) + break; + + hw_if->rx_desc_reset(pdata, rdata, ring->dirty); + + ring->dirty++; + } + + /* Make sure everything is written before the register write */ + wmb(); + + /* Update the Rx Tail Pointer Register with address of + * the last cleaned entry */ + rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1); + XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, + lower_32_bits(rdata->rdesc_dma)); +} + +static struct sk_buff *xgbe_create_skb(struct napi_struct *napi, + struct xgbe_ring_data *rdata, + unsigned int *len) +{ + struct sk_buff *skb; + u8 *packet; + unsigned int copy_len; + + skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len); + if (!skb) + return NULL; + + packet = page_address(rdata->rx.hdr.pa.pages) + + rdata->rx.hdr.pa.pages_offset; + copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len; + copy_len = min(rdata->rx.hdr.dma_len, copy_len); + skb_copy_to_linear_data(skb, packet, copy_len); + skb_put(skb, copy_len); + + *len -= copy_len; + + return skb; +} + +static int xgbe_tx_poll(struct xgbe_channel *channel) +{ + struct xgbe_prv_data *pdata = channel->pdata; + struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct xgbe_ring *ring = channel->tx_ring; + struct xgbe_ring_data *rdata; + struct xgbe_ring_desc *rdesc; + struct net_device *netdev = pdata->netdev; + struct netdev_queue *txq; + int processed = 0; + unsigned int tx_packets = 0, tx_bytes = 0; + + DBGPR("-->xgbe_tx_poll\n"); + + /* Nothing to do if there isn't a Tx ring for this channel */ + if (!ring) + return 0; + + txq = netdev_get_tx_queue(netdev, channel->queue_index); + + while ((processed < XGBE_TX_DESC_MAX_PROC) && + (ring->dirty != ring->cur)) { + rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); + rdesc = rdata->rdesc; + + if (!hw_if->tx_complete(rdesc)) + break; + + /* Make sure descriptor fields are read after reading the OWN + * bit */ + dma_rmb(); + +#ifdef XGMAC_ENABLE_TX_DESC_DUMP + xgbe_dump_tx_desc(ring, ring->dirty, 1, 0); +#endif + + if (hw_if->is_last_desc(rdesc)) { + tx_packets += rdata->tx.packets; + tx_bytes += rdata->tx.bytes; + } + + /* Free the SKB and reset the descriptor for re-use */ + desc_if->unmap_rdata(pdata, rdata); + hw_if->tx_desc_reset(rdata); + + processed++; + ring->dirty++; + } + + if (!processed) + return 0; + + netdev_tx_completed_queue(txq, tx_packets, tx_bytes); + + if ((ring->tx.queue_stopped == 1) && + (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) { + ring->tx.queue_stopped = 0; + netif_tx_wake_queue(txq); + } + + DBGPR("<--xgbe_tx_poll: processed=%d\n", processed); + + return processed; +} + +static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) +{ + struct xgbe_prv_data *pdata = channel->pdata; + struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_ring *ring = channel->rx_ring; + struct xgbe_ring_data *rdata; + struct xgbe_packet_data *packet; + struct net_device *netdev = pdata->netdev; + struct napi_struct *napi; + struct sk_buff *skb; + struct skb_shared_hwtstamps *hwtstamps; + unsigned int incomplete, error, context_next, context; + unsigned int len, put_len, max_len; + unsigned int received = 0; + int packet_count = 0; + + DBGPR("-->xgbe_rx_poll: budget=%d\n", budget); + + /* Nothing to do if there isn't a Rx ring for this channel */ + if (!ring) + return 0; + + napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; + + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); + packet = &ring->packet_data; + while (packet_count < budget) { + DBGPR(" cur = %d\n", ring->cur); + + /* First time in loop see if we need to restore state */ + if (!received && rdata->state_saved) { + incomplete = rdata->state.incomplete; + context_next = rdata->state.context_next; + skb = rdata->state.skb; + error = rdata->state.error; + len = rdata->state.len; + } else { + memset(packet, 0, sizeof(*packet)); + incomplete = 0; + context_next = 0; + skb = NULL; + error = 0; + len = 0; + } + +read_again: + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); + + if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3)) + xgbe_rx_refresh(channel); + + if (hw_if->dev_read(channel)) + break; + + received++; + ring->cur++; + + incomplete = XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, + INCOMPLETE); + context_next = XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, + CONTEXT_NEXT); + context = XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, + CONTEXT); + + /* Earlier error, just drain the remaining data */ + if ((incomplete || context_next) && error) + goto read_again; + + if (error || packet->errors) { + if (packet->errors) + DBGPR("Error in received packet\n"); + dev_kfree_skb(skb); + goto next_packet; + } + + if (!context) { + put_len = rdata->rx.len - len; + len += put_len; + + if (!skb) { + dma_sync_single_for_cpu(pdata->dev, + rdata->rx.hdr.dma, + rdata->rx.hdr.dma_len, + DMA_FROM_DEVICE); + + skb = xgbe_create_skb(napi, rdata, &put_len); + if (!skb) { + error = 1; + goto skip_data; + } + } + + if (put_len) { + dma_sync_single_for_cpu(pdata->dev, + rdata->rx.buf.dma, + rdata->rx.buf.dma_len, + DMA_FROM_DEVICE); + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + rdata->rx.buf.pa.pages, + rdata->rx.buf.pa.pages_offset, + put_len, rdata->rx.buf.dma_len); + rdata->rx.buf.pa.pages = NULL; + } + } + +skip_data: + if (incomplete || context_next) + goto read_again; + + if (!skb) + goto next_packet; + + /* Be sure we don't exceed the configured MTU */ + max_len = netdev->mtu + ETH_HLEN; + if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (skb->protocol == htons(ETH_P_8021Q))) + max_len += VLAN_HLEN; + + if (skb->len > max_len) { + DBGPR("packet length exceeds configured MTU\n"); + dev_kfree_skb(skb); + goto next_packet; + } + +#ifdef XGMAC_ENABLE_RX_PKT_DUMP + xgbe_print_pkt(netdev, skb, false); +#endif + + skb_checksum_none_assert(skb); + if (XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, CSUM_DONE)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, VLAN_CTAG)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + packet->vlan_ctag); + + if (XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, RX_TSTAMP)) { + u64 nsec; + + nsec = timecounter_cyc2time(&pdata->tstamp_tc, + packet->rx_tstamp); + hwtstamps = skb_hwtstamps(skb); + hwtstamps->hwtstamp = ns_to_ktime(nsec); + } + + if (XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, RSS_HASH)) + skb_set_hash(skb, packet->rss_hash, + packet->rss_hash_type); + + skb->dev = netdev; + skb->protocol = eth_type_trans(skb, netdev); + skb_record_rx_queue(skb, channel->queue_index); + skb_mark_napi_id(skb, napi); + + netdev->last_rx = jiffies; + napi_gro_receive(napi, skb); + +next_packet: + packet_count++; + } + + /* Check if we need to save state before leaving */ + if (received && (incomplete || context_next)) { + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); + rdata->state_saved = 1; + rdata->state.incomplete = incomplete; + rdata->state.context_next = context_next; + rdata->state.skb = skb; + rdata->state.len = len; + rdata->state.error = error; + } + + DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count); + + return packet_count; +} + +static int xgbe_one_poll(struct napi_struct *napi, int budget) +{ + struct xgbe_channel *channel = container_of(napi, struct xgbe_channel, + napi); + int processed = 0; + + DBGPR("-->xgbe_one_poll: budget=%d\n", budget); + + /* Cleanup Tx ring first */ + xgbe_tx_poll(channel); + + /* Process Rx ring next */ + processed = xgbe_rx_poll(channel, budget); + + /* If we processed everything, we are done */ + if (processed < budget) { + /* Turn off polling */ + napi_complete(napi); + + /* Enable Tx and Rx interrupts */ + enable_irq(channel->dma_irq); + } + + DBGPR("<--xgbe_one_poll: received = %d\n", processed); + + return processed; +} + +static int xgbe_all_poll(struct napi_struct *napi, int budget) +{ + struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data, + napi); + struct xgbe_channel *channel; + int ring_budget; + int processed, last_processed; + unsigned int i; + + DBGPR("-->xgbe_all_poll: budget=%d\n", budget); + + processed = 0; + ring_budget = budget / pdata->rx_ring_count; + do { + last_processed = processed; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + /* Cleanup Tx ring first */ + xgbe_tx_poll(channel); + + /* Process Rx ring next */ + if (ring_budget > (budget - processed)) + ring_budget = budget - processed; + processed += xgbe_rx_poll(channel, ring_budget); + } + } while ((processed < budget) && (processed != last_processed)); + + /* If we processed everything, we are done */ + if (processed < budget) { + /* Turn off polling */ + napi_complete(napi); + + /* Enable Tx and Rx interrupts */ + xgbe_enable_rx_tx_ints(pdata); + } + + DBGPR("<--xgbe_all_poll: received = %d\n", processed); + + return processed; +} + +void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx, + unsigned int count, unsigned int flag) +{ + struct xgbe_ring_data *rdata; + struct xgbe_ring_desc *rdesc; + + while (count--) { + rdata = XGBE_GET_DESC_DATA(ring, idx); + rdesc = rdata->rdesc; + pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx, + (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE", + le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1), + le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3)); + idx++; + } +} + +void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc, + unsigned int idx) +{ + pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx, + le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1), + le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3)); +} + +void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) +{ + struct ethhdr *eth = (struct ethhdr *)skb->data; + unsigned char *buf = skb->data; + unsigned char buffer[128]; + unsigned int i, j; + + netdev_alert(netdev, "\n************** SKB dump ****************\n"); + + netdev_alert(netdev, "%s packet of %d bytes\n", + (tx_rx ? "TX" : "RX"), skb->len); + + netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest); + netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source); + netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto)); + + for (i = 0, j = 0; i < skb->len;) { + j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx", + buf[i++]); + + if ((i % 32) == 0) { + netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer); + j = 0; + } else if ((i % 16) == 0) { + buffer[j++] = ' '; + buffer[j++] = ' '; + } else if ((i % 4) == 0) { + buffer[j++] = ' '; + } + } + if (i % 32) + netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer); + + netdev_alert(netdev, "\n************** SKB dump ****************\n"); +} diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c new file mode 100644 index 000000000..5f149e8ee --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -0,0 +1,601 @@ +/* + * AMD 10Gb Ethernet driver + * + * This file is available to you under your choice of the following two + * licenses: + * + * License 1: GPLv2 + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * + * This file is free software; you may copy, redistribute and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or (at + * your option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + * + * + * License 2: Modified BSD + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Advanced Micro Devices, Inc. nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + +#include "xgbe.h" +#include "xgbe-common.h" + +struct xgbe_stats { + char stat_string[ETH_GSTRING_LEN]; + int stat_size; + int stat_offset; +}; + +#define XGMAC_MMC_STAT(_string, _var) \ + { _string, \ + FIELD_SIZEOF(struct xgbe_mmc_stats, _var), \ + offsetof(struct xgbe_prv_data, mmc_stats._var), \ + } + +static const struct xgbe_stats xgbe_gstring_stats[] = { + XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb), + XGMAC_MMC_STAT("tx_packets", txframecount_gb), + XGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb), + XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb), + XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb), + XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g), + XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb), + XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb), + XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb), + XGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb), + XGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb), + XGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb), + XGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror), + XGMAC_MMC_STAT("tx_pause_frames", txpauseframes), + + XGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb), + XGMAC_MMC_STAT("rx_packets", rxframecount_gb), + XGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g), + XGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g), + XGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g), + XGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb), + XGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb), + XGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb), + XGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb), + XGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb), + XGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb), + XGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb), + XGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g), + XGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g), + XGMAC_MMC_STAT("rx_crc_errors", rxcrcerror), + XGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror), + XGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror), + XGMAC_MMC_STAT("rx_length_errors", rxlengtherror), + XGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype), + XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow), + XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror), + XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes), +}; + +#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats) + +static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + int i; + + DBGPR("-->%s\n", __func__); + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < XGBE_STATS_COUNT; i++) { + memcpy(data, xgbe_gstring_stats[i].stat_string, + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + } + + DBGPR("<--%s\n", __func__); +} + +static void xgbe_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + u8 *stat; + int i; + + DBGPR("-->%s\n", __func__); + + pdata->hw_if.read_mmc_stats(pdata); + for (i = 0; i < XGBE_STATS_COUNT; i++) { + stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset; + *data++ = *(u64 *)stat; + } + + DBGPR("<--%s\n", __func__); +} + +static int xgbe_get_sset_count(struct net_device *netdev, int stringset) +{ + int ret; + + DBGPR("-->%s\n", __func__); + + switch (stringset) { + case ETH_SS_STATS: + ret = XGBE_STATS_COUNT; + break; + + default: + ret = -EOPNOTSUPP; + } + + DBGPR("<--%s\n", __func__); + + return ret; +} + +static void xgbe_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + DBGPR("-->xgbe_get_pauseparam\n"); + + pause->autoneg = pdata->pause_autoneg; + pause->tx_pause = pdata->tx_pause; + pause->rx_pause = pdata->rx_pause; + + DBGPR("<--xgbe_get_pauseparam\n"); +} + +static int xgbe_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct phy_device *phydev = pdata->phydev; + int ret = 0; + + DBGPR("-->xgbe_set_pauseparam\n"); + + DBGPR(" autoneg = %d, tx_pause = %d, rx_pause = %d\n", + pause->autoneg, pause->tx_pause, pause->rx_pause); + + pdata->pause_autoneg = pause->autoneg; + if (pause->autoneg) { + phydev->advertising |= ADVERTISED_Pause; + phydev->advertising |= ADVERTISED_Asym_Pause; + + } else { + phydev->advertising &= ~ADVERTISED_Pause; + phydev->advertising &= ~ADVERTISED_Asym_Pause; + + pdata->tx_pause = pause->tx_pause; + pdata->rx_pause = pause->rx_pause; + } + + if (netif_running(netdev)) + ret = phy_start_aneg(phydev); + + DBGPR("<--xgbe_set_pauseparam\n"); + + return ret; +} + +static int xgbe_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + int ret; + + DBGPR("-->xgbe_get_settings\n"); + + if (!pdata->phydev) + return -ENODEV; + + ret = phy_ethtool_gset(pdata->phydev, cmd); + + DBGPR("<--xgbe_get_settings\n"); + + return ret; +} + +static int xgbe_set_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct phy_device *phydev = pdata->phydev; + u32 speed; + int ret; + + DBGPR("-->xgbe_set_settings\n"); + + if (!pdata->phydev) + return -ENODEV; + + speed = ethtool_cmd_speed(cmd); + + if (cmd->phy_address != phydev->addr) + return -EINVAL; + + if ((cmd->autoneg != AUTONEG_ENABLE) && + (cmd->autoneg != AUTONEG_DISABLE)) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_DISABLE) { + switch (speed) { + case SPEED_10000: + case SPEED_2500: + case SPEED_1000: + break; + default: + return -EINVAL; + } + + if (cmd->duplex != DUPLEX_FULL) + return -EINVAL; + } + + cmd->advertising &= phydev->supported; + if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising) + return -EINVAL; + + ret = 0; + phydev->autoneg = cmd->autoneg; + phydev->speed = speed; + phydev->duplex = cmd->duplex; + phydev->advertising = cmd->advertising; + + if (cmd->autoneg == AUTONEG_ENABLE) + phydev->advertising |= ADVERTISED_Autoneg; + else + phydev->advertising &= ~ADVERTISED_Autoneg; + + if (netif_running(netdev)) + ret = phy_start_aneg(phydev); + + DBGPR("<--xgbe_set_settings\n"); + + return ret; +} + +static void xgbe_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_hw_features *hw_feat = &pdata->hw_feat; + + strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, dev_name(pdata->dev), + sizeof(drvinfo->bus_info)); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d", + XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER), + XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID), + XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER)); + drvinfo->n_stats = XGBE_STATS_COUNT; +} + +static int xgbe_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + DBGPR("-->xgbe_get_coalesce\n"); + + memset(ec, 0, sizeof(struct ethtool_coalesce)); + + ec->rx_coalesce_usecs = pdata->rx_usecs; + ec->rx_max_coalesced_frames = pdata->rx_frames; + + ec->tx_max_coalesced_frames = pdata->tx_frames; + + DBGPR("<--xgbe_get_coalesce\n"); + + return 0; +} + +static int xgbe_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_hw_if *hw_if = &pdata->hw_if; + unsigned int rx_frames, rx_riwt, rx_usecs; + unsigned int tx_frames; + + DBGPR("-->xgbe_set_coalesce\n"); + + /* Check for not supported parameters */ + if ((ec->rx_coalesce_usecs_irq) || + (ec->rx_max_coalesced_frames_irq) || + (ec->tx_coalesce_usecs) || + (ec->tx_coalesce_usecs_irq) || + (ec->tx_max_coalesced_frames_irq) || + (ec->stats_block_coalesce_usecs) || + (ec->use_adaptive_rx_coalesce) || + (ec->use_adaptive_tx_coalesce) || + (ec->pkt_rate_low) || + (ec->rx_coalesce_usecs_low) || + (ec->rx_max_coalesced_frames_low) || + (ec->tx_coalesce_usecs_low) || + (ec->tx_max_coalesced_frames_low) || + (ec->pkt_rate_high) || + (ec->rx_coalesce_usecs_high) || + (ec->rx_max_coalesced_frames_high) || + (ec->tx_coalesce_usecs_high) || + (ec->tx_max_coalesced_frames_high) || + (ec->rate_sample_interval)) + return -EOPNOTSUPP; + + rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs); + rx_usecs = ec->rx_coalesce_usecs; + rx_frames = ec->rx_max_coalesced_frames; + + /* Use smallest possible value if conversion resulted in zero */ + if (rx_usecs && !rx_riwt) + rx_riwt = 1; + + /* Check the bounds of values for Rx */ + if (rx_riwt > XGMAC_MAX_DMA_RIWT) { + netdev_alert(netdev, "rx-usec is limited to %d usecs\n", + hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT)); + return -EINVAL; + } + if (rx_frames > pdata->rx_desc_count) { + netdev_alert(netdev, "rx-frames is limited to %d frames\n", + pdata->rx_desc_count); + return -EINVAL; + } + + tx_frames = ec->tx_max_coalesced_frames; + + /* Check the bounds of values for Tx */ + if (tx_frames > pdata->tx_desc_count) { + netdev_alert(netdev, "tx-frames is limited to %d frames\n", + pdata->tx_desc_count); + return -EINVAL; + } + + pdata->rx_riwt = rx_riwt; + pdata->rx_usecs = rx_usecs; + pdata->rx_frames = rx_frames; + hw_if->config_rx_coalesce(pdata); + + pdata->tx_frames = tx_frames; + hw_if->config_tx_coalesce(pdata); + + DBGPR("<--xgbe_set_coalesce\n"); + + return 0; +} + +static int xgbe_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *rxnfc, u32 *rule_locs) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + switch (rxnfc->cmd) { + case ETHTOOL_GRXRINGS: + rxnfc->data = pdata->rx_ring_count; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static u32 xgbe_get_rxfh_key_size(struct net_device *netdev) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + return sizeof(pdata->rss_key); +} + +static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + return ARRAY_SIZE(pdata->rss_table); +} + +static int xgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + unsigned int i; + + if (indir) { + for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) + indir[i] = XGMAC_GET_BITS(pdata->rss_table[i], + MAC_RSSDR, DMCH); + } + + if (key) + memcpy(key, pdata->rss_key, sizeof(pdata->rss_key)); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + return 0; +} + +static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_hw_if *hw_if = &pdata->hw_if; + unsigned int ret; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (indir) { + ret = hw_if->set_rss_lookup_table(pdata, indir); + if (ret) + return ret; + } + + if (key) { + ret = hw_if->set_rss_hash_key(pdata, key); + if (ret) + return ret; + } + + return 0; +} + +static int xgbe_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *ts_info) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (pdata->ptp_clock) + ts_info->phc_index = ptp_clock_index(pdata->ptp_clock); + else + ts_info->phc_index = -1; + + ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_ALL); + + return 0; +} + +static const struct ethtool_ops xgbe_ethtool_ops = { + .get_settings = xgbe_get_settings, + .set_settings = xgbe_set_settings, + .get_drvinfo = xgbe_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_coalesce = xgbe_get_coalesce, + .set_coalesce = xgbe_set_coalesce, + .get_pauseparam = xgbe_get_pauseparam, + .set_pauseparam = xgbe_set_pauseparam, + .get_strings = xgbe_get_strings, + .get_ethtool_stats = xgbe_get_ethtool_stats, + .get_sset_count = xgbe_get_sset_count, + .get_rxnfc = xgbe_get_rxnfc, + .get_rxfh_key_size = xgbe_get_rxfh_key_size, + .get_rxfh_indir_size = xgbe_get_rxfh_indir_size, + .get_rxfh = xgbe_get_rxfh, + .set_rxfh = xgbe_set_rxfh, + .get_ts_info = xgbe_get_ts_info, +}; + +struct ethtool_ops *xgbe_get_ethtool_ops(void) +{ + return (struct ethtool_ops *)&xgbe_ethtool_ops; +} diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c new file mode 100644 index 000000000..714905384 --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -0,0 +1,631 @@ +/* + * AMD 10Gb Ethernet driver + * + * This file is available to you under your choice of the following two + * licenses: + * + * License 1: GPLv2 + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * + * This file is free software; you may copy, redistribute and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or (at + * your option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + * + * + * License 2: Modified BSD + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Advanced Micro Devices, Inc. nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "xgbe.h" +#include "xgbe-common.h" + +MODULE_AUTHOR("Tom Lendacky "); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(XGBE_DRV_VERSION); +MODULE_DESCRIPTION(XGBE_DRV_DESC); + +static void xgbe_default_config(struct xgbe_prv_data *pdata) +{ + DBGPR("-->xgbe_default_config\n"); + + pdata->pblx8 = DMA_PBL_X8_ENABLE; + pdata->tx_sf_mode = MTL_TSF_ENABLE; + pdata->tx_threshold = MTL_TX_THRESHOLD_64; + pdata->tx_pbl = DMA_PBL_16; + pdata->tx_osp_mode = DMA_OSP_ENABLE; + pdata->rx_sf_mode = MTL_RSF_DISABLE; + pdata->rx_threshold = MTL_RX_THRESHOLD_64; + pdata->rx_pbl = DMA_PBL_16; + pdata->pause_autoneg = 1; + pdata->tx_pause = 1; + pdata->rx_pause = 1; + pdata->phy_speed = SPEED_UNKNOWN; + pdata->power_down = 0; + pdata->default_autoneg = AUTONEG_ENABLE; + pdata->default_speed = SPEED_10000; + + DBGPR("<--xgbe_default_config\n"); +} + +static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata) +{ + xgbe_init_function_ptrs_dev(&pdata->hw_if); + xgbe_init_function_ptrs_desc(&pdata->desc_if); +} + +#ifdef CONFIG_ACPI +static int xgbe_acpi_support(struct xgbe_prv_data *pdata) +{ + struct acpi_device *adev = pdata->adev; + struct device *dev = pdata->dev; + u32 property; + acpi_handle handle; + acpi_status status; + unsigned long long data; + int cca; + int ret; + + /* Obtain the system clock setting */ + ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property); + if (ret) { + dev_err(dev, "unable to obtain %s property\n", + XGBE_ACPI_DMA_FREQ); + return ret; + } + pdata->sysclk_rate = property; + + /* Obtain the PTP clock setting */ + ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property); + if (ret) { + dev_err(dev, "unable to obtain %s property\n", + XGBE_ACPI_PTP_FREQ); + return ret; + } + pdata->ptpclk_rate = property; + + /* Retrieve the device cache coherency value */ + handle = adev->handle; + do { + status = acpi_evaluate_integer(handle, "_CCA", NULL, &data); + if (!ACPI_FAILURE(status)) { + cca = data; + break; + } + + status = acpi_get_parent(handle, &handle); + } while (!ACPI_FAILURE(status)); + + if (ACPI_FAILURE(status)) { + dev_err(dev, "error obtaining acpi coherency value\n"); + return -EINVAL; + } + pdata->coherent = !!cca; + + return 0; +} +#else /* CONFIG_ACPI */ +static int xgbe_acpi_support(struct xgbe_prv_data *pdata) +{ + return -EINVAL; +} +#endif /* CONFIG_ACPI */ + +#ifdef CONFIG_OF +static int xgbe_of_support(struct xgbe_prv_data *pdata) +{ + struct device *dev = pdata->dev; + + /* Obtain the system clock setting */ + pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK); + if (IS_ERR(pdata->sysclk)) { + dev_err(dev, "dma devm_clk_get failed\n"); + return PTR_ERR(pdata->sysclk); + } + pdata->sysclk_rate = clk_get_rate(pdata->sysclk); + + /* Obtain the PTP clock setting */ + pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK); + if (IS_ERR(pdata->ptpclk)) { + dev_err(dev, "ptp devm_clk_get failed\n"); + return PTR_ERR(pdata->ptpclk); + } + pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk); + + /* Retrieve the device cache coherency value */ + pdata->coherent = of_dma_is_coherent(dev->of_node); + + return 0; +} +#else /* CONFIG_OF */ +static int xgbe_of_support(struct xgbe_prv_data *pdata) +{ + return -EINVAL; +} +#endif /*CONFIG_OF */ + +static int xgbe_probe(struct platform_device *pdev) +{ + struct xgbe_prv_data *pdata; + struct xgbe_hw_if *hw_if; + struct xgbe_desc_if *desc_if; + struct net_device *netdev; + struct device *dev = &pdev->dev; + struct resource *res; + const char *phy_mode; + unsigned int i; + int ret; + + DBGPR("--> xgbe_probe\n"); + + netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data), + XGBE_MAX_DMA_CHANNELS); + if (!netdev) { + dev_err(dev, "alloc_etherdev failed\n"); + ret = -ENOMEM; + goto err_alloc; + } + SET_NETDEV_DEV(netdev, dev); + pdata = netdev_priv(netdev); + pdata->netdev = netdev; + pdata->pdev = pdev; + pdata->adev = ACPI_COMPANION(dev); + pdata->dev = dev; + platform_set_drvdata(pdev, netdev); + + spin_lock_init(&pdata->lock); + mutex_init(&pdata->xpcs_mutex); + mutex_init(&pdata->rss_mutex); + spin_lock_init(&pdata->tstamp_lock); + + /* Check if we should use ACPI or DT */ + pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1; + + /* Set and validate the number of descriptors for a ring */ + BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT); + pdata->tx_desc_count = XGBE_TX_DESC_CNT; + if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) { + dev_err(dev, "tx descriptor count (%d) is not valid\n", + pdata->tx_desc_count); + ret = -EINVAL; + goto err_io; + } + BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT); + pdata->rx_desc_count = XGBE_RX_DESC_CNT; + if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) { + dev_err(dev, "rx descriptor count (%d) is not valid\n", + pdata->rx_desc_count); + ret = -EINVAL; + goto err_io; + } + + /* Obtain the mmio areas for the device */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pdata->xgmac_regs = devm_ioremap_resource(dev, res); + if (IS_ERR(pdata->xgmac_regs)) { + dev_err(dev, "xgmac ioremap failed\n"); + ret = PTR_ERR(pdata->xgmac_regs); + goto err_io; + } + DBGPR(" xgmac_regs = %p\n", pdata->xgmac_regs); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + pdata->xpcs_regs = devm_ioremap_resource(dev, res); + if (IS_ERR(pdata->xpcs_regs)) { + dev_err(dev, "xpcs ioremap failed\n"); + ret = PTR_ERR(pdata->xpcs_regs); + goto err_io; + } + DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs); + + /* Retrieve the MAC address */ + ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY, + pdata->mac_addr, + sizeof(pdata->mac_addr)); + if (ret || !is_valid_ether_addr(pdata->mac_addr)) { + dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY); + if (!ret) + ret = -EINVAL; + goto err_io; + } + + /* Retrieve the PHY mode - it must be "xgmii" */ + ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY, + &phy_mode); + if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) { + dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY); + if (!ret) + ret = -EINVAL; + goto err_io; + } + pdata->phy_mode = PHY_INTERFACE_MODE_XGMII; + + /* Check for per channel interrupt support */ + if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY)) + pdata->per_channel_irq = 1; + + /* Obtain device settings unique to ACPI/OF */ + if (pdata->use_acpi) + ret = xgbe_acpi_support(pdata); + else + ret = xgbe_of_support(pdata); + if (ret) + goto err_io; + + /* Set the DMA coherency values */ + if (pdata->coherent) { + pdata->axdomain = XGBE_DMA_OS_AXDOMAIN; + pdata->arcache = XGBE_DMA_OS_ARCACHE; + pdata->awcache = XGBE_DMA_OS_AWCACHE; + } else { + pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN; + pdata->arcache = XGBE_DMA_SYS_ARCACHE; + pdata->awcache = XGBE_DMA_SYS_AWCACHE; + } + + /* Get the device interrupt */ + ret = platform_get_irq(pdev, 0); + if (ret < 0) { + dev_err(dev, "platform_get_irq 0 failed\n"); + goto err_io; + } + pdata->dev_irq = ret; + + netdev->irq = pdata->dev_irq; + netdev->base_addr = (unsigned long)pdata->xgmac_regs; + memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len); + + /* Set all the function pointers */ + xgbe_init_all_fptrs(pdata); + hw_if = &pdata->hw_if; + desc_if = &pdata->desc_if; + + /* Issue software reset to device */ + hw_if->exit(pdata); + + /* Populate the hardware features */ + xgbe_get_all_hw_features(pdata); + + /* Set default configuration data */ + xgbe_default_config(pdata); + + /* Set the DMA mask */ + if (!dev->dma_mask) + dev->dma_mask = &dev->coherent_dma_mask; + ret = dma_set_mask_and_coherent(dev, + DMA_BIT_MASK(pdata->hw_feat.dma_width)); + if (ret) { + dev_err(dev, "dma_set_mask_and_coherent failed\n"); + goto err_io; + } + + /* Calculate the number of Tx and Rx rings to be created + * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set + * the number of Tx queues to the number of Tx channels + * enabled + * -Rx (DMA) Channels do not map 1-to-1 so use the actual + * number of Rx queues + */ + pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(), + pdata->hw_feat.tx_ch_cnt); + pdata->tx_q_count = pdata->tx_ring_count; + ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count); + if (ret) { + dev_err(dev, "error setting real tx queue count\n"); + goto err_io; + } + + pdata->rx_ring_count = min_t(unsigned int, + netif_get_num_default_rss_queues(), + pdata->hw_feat.rx_ch_cnt); + pdata->rx_q_count = pdata->hw_feat.rx_q_cnt; + ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count); + if (ret) { + dev_err(dev, "error setting real rx queue count\n"); + goto err_io; + } + + /* Initialize RSS hash key and lookup table */ + netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key)); + + for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++) + XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, + i % pdata->rx_ring_count); + + XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); + XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); + XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); + + /* Prepare to regsiter with MDIO */ + pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name); + if (!pdata->mii_bus_id) { + dev_err(dev, "failed to allocate mii bus id\n"); + ret = -ENOMEM; + goto err_io; + } + ret = xgbe_mdio_register(pdata); + if (ret) + goto err_bus_id; + + /* Set device operations */ + netdev->netdev_ops = xgbe_get_netdev_ops(); + netdev->ethtool_ops = xgbe_get_ethtool_ops(); +#ifdef CONFIG_AMD_XGBE_DCB + netdev->dcbnl_ops = xgbe_get_dcbnl_ops(); +#endif + + /* Set device features */ + netdev->hw_features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_GRO | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER; + + if (pdata->hw_feat.rss) + netdev->hw_features |= NETIF_F_RXHASH; + + netdev->vlan_features |= NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6; + + netdev->features |= netdev->hw_features; + pdata->netdev_features = netdev->features; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + /* Use default watchdog timeout */ + netdev->watchdog_timeo = 0; + + xgbe_init_rx_coalesce(pdata); + xgbe_init_tx_coalesce(pdata); + + netif_carrier_off(netdev); + ret = register_netdev(netdev); + if (ret) { + dev_err(dev, "net device registration failed\n"); + goto err_reg_netdev; + } + + xgbe_ptp_register(pdata); + + xgbe_debugfs_init(pdata); + + netdev_notice(netdev, "net device enabled\n"); + + DBGPR("<-- xgbe_probe\n"); + + return 0; + +err_reg_netdev: + xgbe_mdio_unregister(pdata); + +err_bus_id: + kfree(pdata->mii_bus_id); + +err_io: + free_netdev(netdev); + +err_alloc: + dev_notice(dev, "net device not enabled\n"); + + return ret; +} + +static int xgbe_remove(struct platform_device *pdev) +{ + struct net_device *netdev = platform_get_drvdata(pdev); + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + DBGPR("-->xgbe_remove\n"); + + xgbe_debugfs_exit(pdata); + + xgbe_ptp_unregister(pdata); + + unregister_netdev(netdev); + + xgbe_mdio_unregister(pdata); + + kfree(pdata->mii_bus_id); + + free_netdev(netdev); + + DBGPR("<--xgbe_remove\n"); + + return 0; +} + +#ifdef CONFIG_PM +static int xgbe_suspend(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + int ret; + + DBGPR("-->xgbe_suspend\n"); + + if (!netif_running(netdev)) { + DBGPR("<--xgbe_dev_suspend\n"); + return -EINVAL; + } + + ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT); + + DBGPR("<--xgbe_suspend\n"); + + return ret; +} + +static int xgbe_resume(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + int ret; + + DBGPR("-->xgbe_resume\n"); + + if (!netif_running(netdev)) { + DBGPR("<--xgbe_dev_resume\n"); + return -EINVAL; + } + + ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT); + + DBGPR("<--xgbe_resume\n"); + + return ret; +} +#endif /* CONFIG_PM */ + +#ifdef CONFIG_ACPI +static const struct acpi_device_id xgbe_acpi_match[] = { + { "AMDI8001", 0 }, + {}, +}; + +MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match); +#endif + +#ifdef CONFIG_OF +static const struct of_device_id xgbe_of_match[] = { + { .compatible = "amd,xgbe-seattle-v1a", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, xgbe_of_match); +#endif + +static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume); + +static struct platform_driver xgbe_driver = { + .driver = { + .name = "amd-xgbe", +#ifdef CONFIG_ACPI + .acpi_match_table = xgbe_acpi_match, +#endif +#ifdef CONFIG_OF + .of_match_table = xgbe_of_match, +#endif + .pm = &xgbe_pm_ops, + }, + .probe = xgbe_probe, + .remove = xgbe_remove, +}; + +module_platform_driver(xgbe_driver); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c new file mode 100644 index 000000000..59e267f3f --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -0,0 +1,312 @@ +/* + * AMD 10Gb Ethernet driver + * + * This file is available to you under your choice of the following two + * licenses: + * + * License 1: GPLv2 + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * + * This file is free software; you may copy, redistribute and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or (at + * your option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + * + * + * License 2: Modified BSD + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Advanced Micro Devices, Inc. nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include + +#include "xgbe.h" +#include "xgbe-common.h" + +static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg) +{ + struct xgbe_prv_data *pdata = mii->priv; + struct xgbe_hw_if *hw_if = &pdata->hw_if; + int mmd_data; + + DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n", + prtad, mmd_reg); + + mmd_data = hw_if->read_mmd_regs(pdata, prtad, mmd_reg); + + DBGPR_MDIO("<--xgbe_mdio_read: mmd_data=%#x\n", mmd_data); + + return mmd_data; +} + +static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg, + u16 mmd_val) +{ + struct xgbe_prv_data *pdata = mii->priv; + struct xgbe_hw_if *hw_if = &pdata->hw_if; + int mmd_data = mmd_val; + + DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n", + prtad, mmd_reg, mmd_data); + + hw_if->write_mmd_regs(pdata, prtad, mmd_reg, mmd_data); + + DBGPR_MDIO("<--xgbe_mdio_write\n"); + + return 0; +} + +void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata) +{ + struct device *dev = pdata->dev; + struct phy_device *phydev = pdata->mii->phy_map[XGBE_PRTAD]; + int i; + + dev_alert(dev, "\n************* PHY Reg dump **********************\n"); + + dev_alert(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1, + XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1)); + dev_alert(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1, + XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1)); + dev_alert(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1, + XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1)); + dev_alert(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2, + XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2)); + dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1, + XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1)); + dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2, + XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2)); + + dev_alert(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1, + XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1)); + dev_alert(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1, + XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1)); + dev_alert(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n", + MDIO_AN_ADVERTISE, + XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE)); + dev_alert(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n", + MDIO_AN_ADVERTISE + 1, + XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1)); + dev_alert(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n", + MDIO_AN_ADVERTISE + 2, + XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2)); + dev_alert(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n", + MDIO_AN_COMP_STAT, + XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT)); + + dev_alert(dev, "MMD Device Mask = %#x\n", + phydev->c45_ids.devices_in_package); + for (i = 0; i < ARRAY_SIZE(phydev->c45_ids.device_ids); i++) + dev_alert(dev, " MMD %d: ID = %#08x\n", i, + phydev->c45_ids.device_ids[i]); + + dev_alert(dev, "\n*************************************************\n"); +} + +int xgbe_mdio_register(struct xgbe_prv_data *pdata) +{ + struct mii_bus *mii; + struct phy_device *phydev; + int ret = 0; + + DBGPR("-->xgbe_mdio_register\n"); + + mii = mdiobus_alloc(); + if (!mii) { + dev_err(pdata->dev, "mdiobus_alloc failed\n"); + return -ENOMEM; + } + + /* Register on the MDIO bus (don't probe any PHYs) */ + mii->name = XGBE_PHY_NAME; + mii->read = xgbe_mdio_read; + mii->write = xgbe_mdio_write; + snprintf(mii->id, sizeof(mii->id), "%s", pdata->mii_bus_id); + mii->priv = pdata; + mii->phy_mask = ~0; + mii->parent = pdata->dev; + ret = mdiobus_register(mii); + if (ret) { + dev_err(pdata->dev, "mdiobus_register failed\n"); + goto err_mdiobus_alloc; + } + DBGPR(" mdiobus_register succeeded for %s\n", pdata->mii_bus_id); + + /* Probe the PCS using Clause 45 */ + phydev = get_phy_device(mii, XGBE_PRTAD, true); + if (IS_ERR(phydev) || !phydev || + !phydev->c45_ids.device_ids[MDIO_MMD_PCS]) { + dev_err(pdata->dev, "get_phy_device failed\n"); + ret = phydev ? PTR_ERR(phydev) : -ENOLINK; + goto err_mdiobus_register; + } + request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, + MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS])); + + ret = phy_device_register(phydev); + if (ret) { + dev_err(pdata->dev, "phy_device_register failed\n"); + goto err_phy_device; + } + if (!phydev->dev.driver) { + dev_err(pdata->dev, "phy driver probe failed\n"); + ret = -EIO; + goto err_phy_device; + } + + /* Add a reference to the PHY driver so it can't be unloaded */ + pdata->phy_module = phydev->dev.driver->owner; + if (!try_module_get(pdata->phy_module)) { + dev_err(pdata->dev, "try_module_get failed\n"); + ret = -EIO; + goto err_phy_device; + } + + pdata->mii = mii; + pdata->mdio_mmd = MDIO_MMD_PCS; + + phydev->autoneg = pdata->default_autoneg; + if (phydev->autoneg == AUTONEG_DISABLE) { + phydev->speed = pdata->default_speed; + phydev->duplex = DUPLEX_FULL; + + phydev->advertising &= ~ADVERTISED_Autoneg; + } + + pdata->phydev = phydev; + + DBGPHY_REGS(pdata); + + DBGPR("<--xgbe_mdio_register\n"); + + return 0; + +err_phy_device: + phy_device_free(phydev); + +err_mdiobus_register: + mdiobus_unregister(mii); + +err_mdiobus_alloc: + mdiobus_free(mii); + + return ret; +} + +void xgbe_mdio_unregister(struct xgbe_prv_data *pdata) +{ + DBGPR("-->xgbe_mdio_unregister\n"); + + pdata->phydev = NULL; + + module_put(pdata->phy_module); + pdata->phy_module = NULL; + + mdiobus_unregister(pdata->mii); + pdata->mii->priv = NULL; + + mdiobus_free(pdata->mii); + pdata->mii = NULL; + + DBGPR("<--xgbe_mdio_unregister\n"); +} diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c new file mode 100644 index 000000000..b03e4f58d --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c @@ -0,0 +1,279 @@ +/* + * AMD 10Gb Ethernet driver + * + * This file is available to you under your choice of the following two + * licenses: + * + * License 1: GPLv2 + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * + * This file is free software; you may copy, redistribute and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or (at + * your option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + * + * + * License 2: Modified BSD + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Advanced Micro Devices, Inc. nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + +#include "xgbe.h" +#include "xgbe-common.h" + +static cycle_t xgbe_cc_read(const struct cyclecounter *cc) +{ + struct xgbe_prv_data *pdata = container_of(cc, + struct xgbe_prv_data, + tstamp_cc); + u64 nsec; + + nsec = pdata->hw_if.get_tstamp_time(pdata); + + return nsec; +} + +static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta) +{ + struct xgbe_prv_data *pdata = container_of(info, + struct xgbe_prv_data, + ptp_clock_info); + unsigned long flags; + u64 adjust; + u32 addend, diff; + unsigned int neg_adjust = 0; + + if (delta < 0) { + neg_adjust = 1; + delta = -delta; + } + + adjust = pdata->tstamp_addend; + adjust *= delta; + diff = div_u64(adjust, 1000000000UL); + + addend = (neg_adjust) ? pdata->tstamp_addend - diff : + pdata->tstamp_addend + diff; + + spin_lock_irqsave(&pdata->tstamp_lock, flags); + + pdata->hw_if.update_tstamp_addend(pdata, addend); + + spin_unlock_irqrestore(&pdata->tstamp_lock, flags); + + return 0; +} + +static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta) +{ + struct xgbe_prv_data *pdata = container_of(info, + struct xgbe_prv_data, + ptp_clock_info); + unsigned long flags; + + spin_lock_irqsave(&pdata->tstamp_lock, flags); + timecounter_adjtime(&pdata->tstamp_tc, delta); + spin_unlock_irqrestore(&pdata->tstamp_lock, flags); + + return 0; +} + +static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts) +{ + struct xgbe_prv_data *pdata = container_of(info, + struct xgbe_prv_data, + ptp_clock_info); + unsigned long flags; + u64 nsec; + + spin_lock_irqsave(&pdata->tstamp_lock, flags); + + nsec = timecounter_read(&pdata->tstamp_tc); + + spin_unlock_irqrestore(&pdata->tstamp_lock, flags); + + *ts = ns_to_timespec64(nsec); + + return 0; +} + +static int xgbe_settime(struct ptp_clock_info *info, + const struct timespec64 *ts) +{ + struct xgbe_prv_data *pdata = container_of(info, + struct xgbe_prv_data, + ptp_clock_info); + unsigned long flags; + u64 nsec; + + nsec = timespec64_to_ns(ts); + + spin_lock_irqsave(&pdata->tstamp_lock, flags); + + timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec); + + spin_unlock_irqrestore(&pdata->tstamp_lock, flags); + + return 0; +} + +static int xgbe_enable(struct ptp_clock_info *info, + struct ptp_clock_request *request, int on) +{ + return -EOPNOTSUPP; +} + +void xgbe_ptp_register(struct xgbe_prv_data *pdata) +{ + struct ptp_clock_info *info = &pdata->ptp_clock_info; + struct ptp_clock *clock; + struct cyclecounter *cc = &pdata->tstamp_cc; + u64 dividend; + + snprintf(info->name, sizeof(info->name), "%s", + netdev_name(pdata->netdev)); + info->owner = THIS_MODULE; + info->max_adj = pdata->ptpclk_rate; + info->adjfreq = xgbe_adjfreq; + info->adjtime = xgbe_adjtime; + info->gettime64 = xgbe_gettime; + info->settime64 = xgbe_settime; + info->enable = xgbe_enable; + + clock = ptp_clock_register(info, pdata->dev); + if (IS_ERR(clock)) { + dev_err(pdata->dev, "ptp_clock_register failed\n"); + return; + } + + pdata->ptp_clock = clock; + + /* Calculate the addend: + * addend = 2^32 / (PTP ref clock / 50Mhz) + * = (2^32 * 50Mhz) / PTP ref clock + */ + dividend = 50000000; + dividend <<= 32; + pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate); + + /* Setup the timecounter */ + cc->read = xgbe_cc_read; + cc->mask = CLOCKSOURCE_MASK(64); + cc->mult = 1; + cc->shift = 0; + + timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, + ktime_to_ns(ktime_get_real())); + + /* Disable all timestamping to start */ + XGMAC_IOWRITE(pdata, MAC_TCR, 0); + pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + pdata->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; +} + +void xgbe_ptp_unregister(struct xgbe_prv_data *pdata) +{ + if (pdata->ptp_clock) + ptp_clock_unregister(pdata->ptp_clock); +} diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h new file mode 100644 index 000000000..e62dfa2de --- /dev/null +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -0,0 +1,867 @@ +/* + * AMD 10Gb Ethernet driver + * + * This file is available to you under your choice of the following two + * licenses: + * + * License 1: GPLv2 + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * + * This file is free software; you may copy, redistribute and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or (at + * your option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + * + * + * License 2: Modified BSD + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Advanced Micro Devices, Inc. nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __XGBE_H__ +#define __XGBE_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define XGBE_DRV_NAME "amd-xgbe" +#define XGBE_DRV_VERSION "1.0.0-a" +#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver" + +/* Descriptor related defines */ +#define XGBE_TX_DESC_CNT 512 +#define XGBE_TX_DESC_MIN_FREE (XGBE_TX_DESC_CNT >> 3) +#define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1) +#define XGBE_RX_DESC_CNT 512 + +#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) + +/* Descriptors required for maximum contigous TSO/GSO packet */ +#define XGBE_TX_MAX_SPLIT ((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1) + +/* Maximum possible descriptors needed for an SKB: + * - Maximum number of SKB frags + * - Maximum descriptors for contiguous TSO/GSO packet + * - Possible context descriptor + * - Possible TSO header descriptor + */ +#define XGBE_TX_MAX_DESCS (MAX_SKB_FRAGS + XGBE_TX_MAX_SPLIT + 2) + +#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) +#define XGBE_RX_BUF_ALIGN 64 +#define XGBE_SKB_ALLOC_SIZE 256 +#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */ + +#define XGBE_MAX_DMA_CHANNELS 16 +#define XGBE_MAX_QUEUES 16 +#define XGBE_DMA_STOP_TIMEOUT 5 + +/* DMA cache settings - Outer sharable, write-back, write-allocate */ +#define XGBE_DMA_OS_AXDOMAIN 0x2 +#define XGBE_DMA_OS_ARCACHE 0xb +#define XGBE_DMA_OS_AWCACHE 0xf + +/* DMA cache settings - System, no caches used */ +#define XGBE_DMA_SYS_AXDOMAIN 0x3 +#define XGBE_DMA_SYS_ARCACHE 0x0 +#define XGBE_DMA_SYS_AWCACHE 0x0 + +#define XGBE_DMA_INTERRUPT_MASK 0x31c7 + +#define XGMAC_MIN_PACKET 60 +#define XGMAC_STD_PACKET_MTU 1500 +#define XGMAC_MAX_STD_PACKET 1518 +#define XGMAC_JUMBO_PACKET_MTU 9000 +#define XGMAC_MAX_JUMBO_PACKET 9018 + +/* MDIO bus phy name */ +#define XGBE_PHY_NAME "amd_xgbe_phy" +#define XGBE_PRTAD 0 + +/* Common property names */ +#define XGBE_MAC_ADDR_PROPERTY "mac-address" +#define XGBE_PHY_MODE_PROPERTY "phy-mode" +#define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt" + +/* Device-tree clock names */ +#define XGBE_DMA_CLOCK "dma_clk" +#define XGBE_PTP_CLOCK "ptp_clk" + +/* ACPI property names */ +#define XGBE_ACPI_DMA_FREQ "amd,dma-freq" +#define XGBE_ACPI_PTP_FREQ "amd,ptp-freq" + +/* Timestamp support - values based on 50MHz PTP clock + * 50MHz => 20 nsec + */ +#define XGBE_TSTAMP_SSINC 20 +#define XGBE_TSTAMP_SNSINC 0 + +/* Driver PMT macros */ +#define XGMAC_DRIVER_CONTEXT 1 +#define XGMAC_IOCTL_CONTEXT 2 + +#define XGBE_FIFO_MAX 81920 +#define XGBE_FIFO_SIZE_B(x) (x) +#define XGBE_FIFO_SIZE_KB(x) (x * 1024) + +#define XGBE_TC_MIN_QUANTUM 10 + +/* Helper macro for descriptor handling + * Always use XGBE_GET_DESC_DATA to access the descriptor data + * since the index is free-running and needs to be and-ed + * with the descriptor count value of the ring to index to + * the proper descriptor data. + */ +#define XGBE_GET_DESC_DATA(_ring, _idx) \ + ((_ring)->rdata + \ + ((_idx) & ((_ring)->rdesc_count - 1))) + +/* Default coalescing parameters */ +#define XGMAC_INIT_DMA_TX_USECS 1000 +#define XGMAC_INIT_DMA_TX_FRAMES 25 + +#define XGMAC_MAX_DMA_RIWT 0xff +#define XGMAC_INIT_DMA_RX_USECS 30 +#define XGMAC_INIT_DMA_RX_FRAMES 25 + +/* Flow control queue count */ +#define XGMAC_MAX_FLOW_CONTROL_QUEUES 8 + +/* Maximum MAC address hash table size (256 bits = 8 bytes) */ +#define XGBE_MAC_HASH_TABLE_SIZE 8 + +/* Receive Side Scaling */ +#define XGBE_RSS_HASH_KEY_SIZE 40 +#define XGBE_RSS_MAX_TABLE_SIZE 256 +#define XGBE_RSS_LOOKUP_TABLE_TYPE 0 +#define XGBE_RSS_HASH_KEY_TYPE 1 + +struct xgbe_prv_data; + +struct xgbe_packet_data { + struct sk_buff *skb; + + unsigned int attributes; + + unsigned int errors; + + unsigned int rdesc_count; + unsigned int length; + + unsigned int header_len; + unsigned int tcp_header_len; + unsigned int tcp_payload_len; + unsigned short mss; + + unsigned short vlan_ctag; + + u64 rx_tstamp; + + u32 rss_hash; + enum pkt_hash_types rss_hash_type; + + unsigned int tx_packets; + unsigned int tx_bytes; +}; + +/* Common Rx and Tx descriptor mapping */ +struct xgbe_ring_desc { + __le32 desc0; + __le32 desc1; + __le32 desc2; + __le32 desc3; +}; + +/* Page allocation related values */ +struct xgbe_page_alloc { + struct page *pages; + unsigned int pages_len; + unsigned int pages_offset; + + dma_addr_t pages_dma; +}; + +/* Ring entry buffer data */ +struct xgbe_buffer_data { + struct xgbe_page_alloc pa; + struct xgbe_page_alloc pa_unmap; + + dma_addr_t dma; + unsigned int dma_len; +}; + +/* Tx-related ring data */ +struct xgbe_tx_ring_data { + unsigned int packets; /* BQL packet count */ + unsigned int bytes; /* BQL byte count */ +}; + +/* Rx-related ring data */ +struct xgbe_rx_ring_data { + struct xgbe_buffer_data hdr; /* Header locations */ + struct xgbe_buffer_data buf; /* Payload locations */ + + unsigned short hdr_len; /* Length of received header */ + unsigned short len; /* Length of received packet */ +}; + +/* Structure used to hold information related to the descriptor + * and the packet associated with the descriptor (always use + * use the XGBE_GET_DESC_DATA macro to access this data from the ring) + */ +struct xgbe_ring_data { + struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */ + dma_addr_t rdesc_dma; /* DMA address of descriptor */ + + struct sk_buff *skb; /* Virtual address of SKB */ + dma_addr_t skb_dma; /* DMA address of SKB data */ + unsigned int skb_dma_len; /* Length of SKB DMA area */ + + struct xgbe_tx_ring_data tx; /* Tx-related data */ + struct xgbe_rx_ring_data rx; /* Rx-related data */ + + unsigned int mapped_as_page; + + /* Incomplete receive save location. If the budget is exhausted + * or the last descriptor (last normal descriptor or a following + * context descriptor) has not been DMA'd yet the current state + * of the receive processing needs to be saved. + */ + unsigned int state_saved; + struct { + unsigned int incomplete; + unsigned int context_next; + struct sk_buff *skb; + unsigned int len; + unsigned int error; + } state; +}; + +struct xgbe_ring { + /* Ring lock - used just for TX rings at the moment */ + spinlock_t lock; + + /* Per packet related information */ + struct xgbe_packet_data packet_data; + + /* Virtual/DMA addresses and count of allocated descriptor memory */ + struct xgbe_ring_desc *rdesc; + dma_addr_t rdesc_dma; + unsigned int rdesc_count; + + /* Array of descriptor data corresponding the descriptor memory + * (always use the XGBE_GET_DESC_DATA macro to access this data) + */ + struct xgbe_ring_data *rdata; + + /* Page allocation for RX buffers */ + struct xgbe_page_alloc rx_hdr_pa; + struct xgbe_page_alloc rx_buf_pa; + + /* Ring index values + * cur - Tx: index of descriptor to be used for current transfer + * Rx: index of descriptor to check for packet availability + * dirty - Tx: index of descriptor to check for transfer complete + * Rx: index of descriptor to check for buffer reallocation + */ + unsigned int cur; + unsigned int dirty; + + /* Coalesce frame count used for interrupt bit setting */ + unsigned int coalesce_count; + + union { + struct { + unsigned int queue_stopped; + unsigned int xmit_more; + unsigned short cur_mss; + unsigned short cur_vlan_ctag; + } tx; + }; +} ____cacheline_aligned; + +/* Structure used to describe the descriptor rings associated with + * a DMA channel. + */ +struct xgbe_channel { + char name[16]; + + /* Address of private data area for device */ + struct xgbe_prv_data *pdata; + + /* Queue index and base address of queue's DMA registers */ + unsigned int queue_index; + void __iomem *dma_regs; + + /* Per channel interrupt irq number */ + int dma_irq; + char dma_irq_name[IFNAMSIZ + 32]; + + /* Netdev related settings */ + struct napi_struct napi; + + unsigned int saved_ier; + + unsigned int tx_timer_active; + struct timer_list tx_timer; + + struct xgbe_ring *tx_ring; + struct xgbe_ring *rx_ring; +} ____cacheline_aligned; + +enum xgbe_int { + XGMAC_INT_DMA_CH_SR_TI, + XGMAC_INT_DMA_CH_SR_TPS, + XGMAC_INT_DMA_CH_SR_TBU, + XGMAC_INT_DMA_CH_SR_RI, + XGMAC_INT_DMA_CH_SR_RBU, + XGMAC_INT_DMA_CH_SR_RPS, + XGMAC_INT_DMA_CH_SR_TI_RI, + XGMAC_INT_DMA_CH_SR_FBE, + XGMAC_INT_DMA_ALL, +}; + +enum xgbe_int_state { + XGMAC_INT_STATE_SAVE, + XGMAC_INT_STATE_RESTORE, +}; + +enum xgbe_mtl_fifo_size { + XGMAC_MTL_FIFO_SIZE_256 = 0x00, + XGMAC_MTL_FIFO_SIZE_512 = 0x01, + XGMAC_MTL_FIFO_SIZE_1K = 0x03, + XGMAC_MTL_FIFO_SIZE_2K = 0x07, + XGMAC_MTL_FIFO_SIZE_4K = 0x0f, + XGMAC_MTL_FIFO_SIZE_8K = 0x1f, + XGMAC_MTL_FIFO_SIZE_16K = 0x3f, + XGMAC_MTL_FIFO_SIZE_32K = 0x7f, + XGMAC_MTL_FIFO_SIZE_64K = 0xff, + XGMAC_MTL_FIFO_SIZE_128K = 0x1ff, + XGMAC_MTL_FIFO_SIZE_256K = 0x3ff, +}; + +struct xgbe_mmc_stats { + /* Tx Stats */ + u64 txoctetcount_gb; + u64 txframecount_gb; + u64 txbroadcastframes_g; + u64 txmulticastframes_g; + u64 tx64octets_gb; + u64 tx65to127octets_gb; + u64 tx128to255octets_gb; + u64 tx256to511octets_gb; + u64 tx512to1023octets_gb; + u64 tx1024tomaxoctets_gb; + u64 txunicastframes_gb; + u64 txmulticastframes_gb; + u64 txbroadcastframes_gb; + u64 txunderflowerror; + u64 txoctetcount_g; + u64 txframecount_g; + u64 txpauseframes; + u64 txvlanframes_g; + + /* Rx Stats */ + u64 rxframecount_gb; + u64 rxoctetcount_gb; + u64 rxoctetcount_g; + u64 rxbroadcastframes_g; + u64 rxmulticastframes_g; + u64 rxcrcerror; + u64 rxrunterror; + u64 rxjabbererror; + u64 rxundersize_g; + u64 rxoversize_g; + u64 rx64octets_gb; + u64 rx65to127octets_gb; + u64 rx128to255octets_gb; + u64 rx256to511octets_gb; + u64 rx512to1023octets_gb; + u64 rx1024tomaxoctets_gb; + u64 rxunicastframes_g; + u64 rxlengtherror; + u64 rxoutofrangetype; + u64 rxpauseframes; + u64 rxfifooverflow; + u64 rxvlanframes_gb; + u64 rxwatchdogerror; +}; + +struct xgbe_hw_if { + int (*tx_complete)(struct xgbe_ring_desc *); + + int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr); + int (*config_rx_mode)(struct xgbe_prv_data *); + + int (*enable_rx_csum)(struct xgbe_prv_data *); + int (*disable_rx_csum)(struct xgbe_prv_data *); + + int (*enable_rx_vlan_stripping)(struct xgbe_prv_data *); + int (*disable_rx_vlan_stripping)(struct xgbe_prv_data *); + int (*enable_rx_vlan_filtering)(struct xgbe_prv_data *); + int (*disable_rx_vlan_filtering)(struct xgbe_prv_data *); + int (*update_vlan_hash_table)(struct xgbe_prv_data *); + + int (*read_mmd_regs)(struct xgbe_prv_data *, int, int); + void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int); + int (*set_gmii_speed)(struct xgbe_prv_data *); + int (*set_gmii_2500_speed)(struct xgbe_prv_data *); + int (*set_xgmii_speed)(struct xgbe_prv_data *); + + void (*enable_tx)(struct xgbe_prv_data *); + void (*disable_tx)(struct xgbe_prv_data *); + void (*enable_rx)(struct xgbe_prv_data *); + void (*disable_rx)(struct xgbe_prv_data *); + + void (*powerup_tx)(struct xgbe_prv_data *); + void (*powerdown_tx)(struct xgbe_prv_data *); + void (*powerup_rx)(struct xgbe_prv_data *); + void (*powerdown_rx)(struct xgbe_prv_data *); + + int (*init)(struct xgbe_prv_data *); + int (*exit)(struct xgbe_prv_data *); + + int (*enable_int)(struct xgbe_channel *, enum xgbe_int); + int (*disable_int)(struct xgbe_channel *, enum xgbe_int); + void (*dev_xmit)(struct xgbe_channel *); + int (*dev_read)(struct xgbe_channel *); + void (*tx_desc_init)(struct xgbe_channel *); + void (*rx_desc_init)(struct xgbe_channel *); + void (*tx_desc_reset)(struct xgbe_ring_data *); + void (*rx_desc_reset)(struct xgbe_prv_data *, struct xgbe_ring_data *, + unsigned int); + int (*is_last_desc)(struct xgbe_ring_desc *); + int (*is_context_desc)(struct xgbe_ring_desc *); + void (*tx_start_xmit)(struct xgbe_channel *, struct xgbe_ring *); + + /* For FLOW ctrl */ + int (*config_tx_flow_control)(struct xgbe_prv_data *); + int (*config_rx_flow_control)(struct xgbe_prv_data *); + + /* For RX coalescing */ + int (*config_rx_coalesce)(struct xgbe_prv_data *); + int (*config_tx_coalesce)(struct xgbe_prv_data *); + unsigned int (*usec_to_riwt)(struct xgbe_prv_data *, unsigned int); + unsigned int (*riwt_to_usec)(struct xgbe_prv_data *, unsigned int); + + /* For RX and TX threshold config */ + int (*config_rx_threshold)(struct xgbe_prv_data *, unsigned int); + int (*config_tx_threshold)(struct xgbe_prv_data *, unsigned int); + + /* For RX and TX Store and Forward Mode config */ + int (*config_rsf_mode)(struct xgbe_prv_data *, unsigned int); + int (*config_tsf_mode)(struct xgbe_prv_data *, unsigned int); + + /* For TX DMA Operate on Second Frame config */ + int (*config_osp_mode)(struct xgbe_prv_data *); + + /* For RX and TX PBL config */ + int (*config_rx_pbl_val)(struct xgbe_prv_data *); + int (*get_rx_pbl_val)(struct xgbe_prv_data *); + int (*config_tx_pbl_val)(struct xgbe_prv_data *); + int (*get_tx_pbl_val)(struct xgbe_prv_data *); + int (*config_pblx8)(struct xgbe_prv_data *); + + /* For MMC statistics */ + void (*rx_mmc_int)(struct xgbe_prv_data *); + void (*tx_mmc_int)(struct xgbe_prv_data *); + void (*read_mmc_stats)(struct xgbe_prv_data *); + + /* For Timestamp config */ + int (*config_tstamp)(struct xgbe_prv_data *, unsigned int); + void (*update_tstamp_addend)(struct xgbe_prv_data *, unsigned int); + void (*set_tstamp_time)(struct xgbe_prv_data *, unsigned int sec, + unsigned int nsec); + u64 (*get_tstamp_time)(struct xgbe_prv_data *); + u64 (*get_tx_tstamp)(struct xgbe_prv_data *); + + /* For Data Center Bridging config */ + void (*config_dcb_tc)(struct xgbe_prv_data *); + void (*config_dcb_pfc)(struct xgbe_prv_data *); + + /* For Receive Side Scaling */ + int (*enable_rss)(struct xgbe_prv_data *); + int (*disable_rss)(struct xgbe_prv_data *); + int (*set_rss_hash_key)(struct xgbe_prv_data *, const u8 *); + int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *); +}; + +struct xgbe_desc_if { + int (*alloc_ring_resources)(struct xgbe_prv_data *); + void (*free_ring_resources)(struct xgbe_prv_data *); + int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *); + int (*map_rx_buffer)(struct xgbe_prv_data *, struct xgbe_ring *, + struct xgbe_ring_data *); + void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *); + void (*wrapper_tx_desc_init)(struct xgbe_prv_data *); + void (*wrapper_rx_desc_init)(struct xgbe_prv_data *); +}; + +/* This structure contains flags that indicate what hardware features + * or configurations are present in the device. + */ +struct xgbe_hw_features { + /* HW Version */ + unsigned int version; + + /* HW Feature Register0 */ + unsigned int gmii; /* 1000 Mbps support */ + unsigned int vlhash; /* VLAN Hash Filter */ + unsigned int sma; /* SMA(MDIO) Interface */ + unsigned int rwk; /* PMT remote wake-up packet */ + unsigned int mgk; /* PMT magic packet */ + unsigned int mmc; /* RMON module */ + unsigned int aoe; /* ARP Offload */ + unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */ + unsigned int eee; /* Energy Efficient Ethernet */ + unsigned int tx_coe; /* Tx Checksum Offload */ + unsigned int rx_coe; /* Rx Checksum Offload */ + unsigned int addn_mac; /* Additional MAC Addresses */ + unsigned int ts_src; /* Timestamp Source */ + unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */ + + /* HW Feature Register1 */ + unsigned int rx_fifo_size; /* MTL Receive FIFO Size */ + unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */ + unsigned int adv_ts_hi; /* Advance Timestamping High Word */ + unsigned int dma_width; /* DMA width */ + unsigned int dcb; /* DCB Feature */ + unsigned int sph; /* Split Header Feature */ + unsigned int tso; /* TCP Segmentation Offload */ + unsigned int dma_debug; /* DMA Debug Registers */ + unsigned int rss; /* Receive Side Scaling */ + unsigned int tc_cnt; /* Number of Traffic Classes */ + unsigned int hash_table_size; /* Hash Table Size */ + unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */ + + /* HW Feature Register2 */ + unsigned int rx_q_cnt; /* Number of MTL Receive Queues */ + unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */ + unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */ + unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */ + unsigned int pps_out_num; /* Number of PPS outputs */ + unsigned int aux_snap_num; /* Number of Aux snapshot inputs */ +}; + +struct xgbe_prv_data { + struct net_device *netdev; + struct platform_device *pdev; + struct acpi_device *adev; + struct device *dev; + + /* ACPI or DT flag */ + unsigned int use_acpi; + + /* XGMAC/XPCS related mmio registers */ + void __iomem *xgmac_regs; /* XGMAC CSRs */ + void __iomem *xpcs_regs; /* XPCS MMD registers */ + + /* Overall device lock */ + spinlock_t lock; + + /* XPCS indirect addressing mutex */ + struct mutex xpcs_mutex; + + /* RSS addressing mutex */ + struct mutex rss_mutex; + + int dev_irq; + unsigned int per_channel_irq; + + struct xgbe_hw_if hw_if; + struct xgbe_desc_if desc_if; + + /* AXI DMA settings */ + unsigned int coherent; + unsigned int axdomain; + unsigned int arcache; + unsigned int awcache; + + /* Rings for Tx/Rx on a DMA channel */ + struct xgbe_channel *channel; + unsigned int channel_count; + unsigned int tx_ring_count; + unsigned int tx_desc_count; + unsigned int rx_ring_count; + unsigned int rx_desc_count; + + unsigned int tx_q_count; + unsigned int rx_q_count; + + /* Tx/Rx common settings */ + unsigned int pblx8; + + /* Tx settings */ + unsigned int tx_sf_mode; + unsigned int tx_threshold; + unsigned int tx_pbl; + unsigned int tx_osp_mode; + + /* Rx settings */ + unsigned int rx_sf_mode; + unsigned int rx_threshold; + unsigned int rx_pbl; + + /* Tx coalescing settings */ + unsigned int tx_usecs; + unsigned int tx_frames; + + /* Rx coalescing settings */ + unsigned int rx_riwt; + unsigned int rx_usecs; + unsigned int rx_frames; + + /* Current Rx buffer size */ + unsigned int rx_buf_size; + + /* Flow control settings */ + unsigned int pause_autoneg; + unsigned int tx_pause; + unsigned int rx_pause; + + /* Receive Side Scaling settings */ + u8 rss_key[XGBE_RSS_HASH_KEY_SIZE]; + u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE]; + u32 rss_options; + + /* MDIO settings */ + struct module *phy_module; + char *mii_bus_id; + struct mii_bus *mii; + int mdio_mmd; + struct phy_device *phydev; + int default_autoneg; + int default_speed; + + /* Current PHY settings */ + phy_interface_t phy_mode; + int phy_link; + int phy_speed; + unsigned int phy_tx_pause; + unsigned int phy_rx_pause; + + /* Netdev related settings */ + unsigned char mac_addr[ETH_ALEN]; + netdev_features_t netdev_features; + struct napi_struct napi; + struct xgbe_mmc_stats mmc_stats; + + /* Filtering support */ + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + /* Device clocks */ + struct clk *sysclk; + unsigned long sysclk_rate; + struct clk *ptpclk; + unsigned long ptpclk_rate; + + /* Timestamp support */ + spinlock_t tstamp_lock; + struct ptp_clock_info ptp_clock_info; + struct ptp_clock *ptp_clock; + struct hwtstamp_config tstamp_config; + struct cyclecounter tstamp_cc; + struct timecounter tstamp_tc; + unsigned int tstamp_addend; + struct work_struct tx_tstamp_work; + struct sk_buff *tx_tstamp_skb; + u64 tx_tstamp; + + /* DCB support */ + struct ieee_ets *ets; + struct ieee_pfc *pfc; + unsigned int q2tc_map[XGBE_MAX_QUEUES]; + unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS]; + + /* Hardware features of the device */ + struct xgbe_hw_features hw_feat; + + /* Device restart work structure */ + struct work_struct restart_work; + + /* Keeps track of power mode */ + unsigned int power_down; + +#ifdef CONFIG_DEBUG_FS + struct dentry *xgbe_debugfs; + + unsigned int debugfs_xgmac_reg; + + unsigned int debugfs_xpcs_mmd; + unsigned int debugfs_xpcs_reg; +#endif +}; + +/* Function prototypes*/ + +void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *); +void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *); +struct net_device_ops *xgbe_get_netdev_ops(void); +struct ethtool_ops *xgbe_get_ethtool_ops(void); +#ifdef CONFIG_AMD_XGBE_DCB +const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void); +#endif + +int xgbe_mdio_register(struct xgbe_prv_data *); +void xgbe_mdio_unregister(struct xgbe_prv_data *); +void xgbe_dump_phy_registers(struct xgbe_prv_data *); +void xgbe_ptp_register(struct xgbe_prv_data *); +void xgbe_ptp_unregister(struct xgbe_prv_data *); +void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int, + unsigned int); +void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *, + unsigned int); +void xgbe_print_pkt(struct net_device *, struct sk_buff *, bool); +void xgbe_get_all_hw_features(struct xgbe_prv_data *); +int xgbe_powerup(struct net_device *, unsigned int); +int xgbe_powerdown(struct net_device *, unsigned int); +void xgbe_init_rx_coalesce(struct xgbe_prv_data *); +void xgbe_init_tx_coalesce(struct xgbe_prv_data *); + +#ifdef CONFIG_DEBUG_FS +void xgbe_debugfs_init(struct xgbe_prv_data *); +void xgbe_debugfs_exit(struct xgbe_prv_data *); +#else +static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {} +static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {} +#endif /* CONFIG_DEBUG_FS */ + +/* NOTE: Uncomment for TX and RX DESCRIPTOR DUMP in KERNEL LOG */ +#if 0 +#define XGMAC_ENABLE_TX_DESC_DUMP +#define XGMAC_ENABLE_RX_DESC_DUMP +#endif + +/* NOTE: Uncomment for TX and RX PACKET DUMP in KERNEL LOG */ +#if 0 +#define XGMAC_ENABLE_TX_PKT_DUMP +#define XGMAC_ENABLE_RX_PKT_DUMP +#endif + +/* NOTE: Uncomment for function trace log messages in KERNEL LOG */ +#if 0 +#define YDEBUG +#define YDEBUG_MDIO +#endif + +/* For debug prints */ +#ifdef YDEBUG +#define DBGPR(x...) pr_alert(x) +#define DBGPHY_REGS(x...) xgbe_dump_phy_registers(x) +#else +#define DBGPR(x...) do { } while (0) +#define DBGPHY_REGS(x...) do { } while (0) +#endif + +#ifdef YDEBUG_MDIO +#define DBGPR_MDIO(x...) pr_alert(x) +#else +#define DBGPR_MDIO(x...) do { } while (0) +#endif + +#endif diff --git a/drivers/net/ethernet/apm/Kconfig b/drivers/net/ethernet/apm/Kconfig new file mode 100644 index 000000000..ec63d706d --- /dev/null +++ b/drivers/net/ethernet/apm/Kconfig @@ -0,0 +1 @@ +source "drivers/net/ethernet/apm/xgene/Kconfig" diff --git a/drivers/net/ethernet/apm/Makefile b/drivers/net/ethernet/apm/Makefile new file mode 100644 index 000000000..65ce32ad1 --- /dev/null +++ b/drivers/net/ethernet/apm/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for APM X-GENE Ethernet driver. +# + +obj-$(CONFIG_NET_XGENE) += xgene/ diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig new file mode 100644 index 000000000..19e38afbc --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/Kconfig @@ -0,0 +1,11 @@ +config NET_XGENE + tristate "APM X-Gene SoC Ethernet Driver" + depends on HAS_DMA + depends on ARCH_XGENE || COMPILE_TEST + select PHYLIB + help + This is the Ethernet driver for the on-chip ethernet interface on the + APM X-Gene SoC. + + To compile this driver as a module, choose M here. This module will + be called xgene_enet. diff --git a/drivers/net/ethernet/apm/xgene/Makefile b/drivers/net/ethernet/apm/xgene/Makefile new file mode 100644 index 000000000..68be56554 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for APM X-Gene Ethernet Driver. +# + +xgene-enet-objs := xgene_enet_hw.o xgene_enet_sgmac.o xgene_enet_xgmac.o \ + xgene_enet_main.o xgene_enet_ethtool.o +obj-$(CONFIG_NET_XGENE) += xgene-enet.o diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c new file mode 100644 index 000000000..416d6ebfc --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c @@ -0,0 +1,150 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include "xgene_enet_main.h" + +struct xgene_gstrings_stats { + char name[ETH_GSTRING_LEN]; + int offset; +}; + +#define XGENE_STAT(m) { #m, offsetof(struct xgene_enet_pdata, stats.m) } + +static const struct xgene_gstrings_stats gstrings_stats[] = { + XGENE_STAT(rx_packets), + XGENE_STAT(tx_packets), + XGENE_STAT(rx_bytes), + XGENE_STAT(tx_bytes), + XGENE_STAT(rx_errors), + XGENE_STAT(tx_errors), + XGENE_STAT(rx_length_errors), + XGENE_STAT(rx_crc_errors), + XGENE_STAT(rx_frame_errors), + XGENE_STAT(rx_fifo_errors) +}; + +#define XGENE_STATS_LEN ARRAY_SIZE(gstrings_stats) + +static void xgene_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct platform_device *pdev = pdata->pdev; + + strcpy(info->driver, "xgene_enet"); + strcpy(info->version, XGENE_DRV_VERSION); + snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "N/A"); + sprintf(info->bus_info, "%s", pdev->name); +} + +static int xgene_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct phy_device *phydev = pdata->phy_dev; + + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { + if (phydev == NULL) + return -ENODEV; + + return phy_ethtool_gset(phydev, cmd); + } else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { + cmd->supported = SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_MII; + cmd->advertising = cmd->supported; + ethtool_cmd_speed_set(cmd, SPEED_1000); + cmd->duplex = DUPLEX_FULL; + cmd->port = PORT_MII; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = AUTONEG_ENABLE; + } else { + cmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE; + cmd->advertising = cmd->supported; + ethtool_cmd_speed_set(cmd, SPEED_10000); + cmd->duplex = DUPLEX_FULL; + cmd->port = PORT_FIBRE; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = AUTONEG_DISABLE; + } + + return 0; +} + +static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct phy_device *phydev = pdata->phy_dev; + + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { + if (phydev == NULL) + return -ENODEV; + + return phy_ethtool_sset(phydev, cmd); + } + + return -EINVAL; +} + +static void xgene_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + int i; + u8 *p = data; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < XGENE_STATS_LEN; i++) { + memcpy(p, gstrings_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } +} + +static int xgene_get_sset_count(struct net_device *ndev, int sset) +{ + if (sset != ETH_SS_STATS) + return -EINVAL; + + return XGENE_STATS_LEN; +} + +static void xgene_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *dummy, + u64 *data) +{ + void *pdata = netdev_priv(ndev); + int i; + + for (i = 0; i < XGENE_STATS_LEN; i++) + *data++ = *(u64 *)(pdata + gstrings_stats[i].offset); +} + +static const struct ethtool_ops xgene_ethtool_ops = { + .get_drvinfo = xgene_get_drvinfo, + .get_settings = xgene_get_settings, + .set_settings = xgene_set_settings, + .get_link = ethtool_op_get_link, + .get_strings = xgene_get_strings, + .get_sset_count = xgene_get_sset_count, + .get_ethtool_stats = xgene_get_ethtool_stats +}; + +void xgene_enet_set_ethtool_ops(struct net_device *ndev) +{ + ndev->ethtool_ops = &xgene_ethtool_ops; +} diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c new file mode 100644 index 000000000..b927021c6 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -0,0 +1,805 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian + * Ravi Patel + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "xgene_enet_main.h" +#include "xgene_enet_hw.h" + +static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) +{ + u32 *ring_cfg = ring->state; + u64 addr = ring->dma; + enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize; + + ring_cfg[4] |= (1 << SELTHRSH_POS) & + CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN); + ring_cfg[3] |= ACCEPTLERR; + ring_cfg[2] |= QCOHERENT; + + addr >>= 8; + ring_cfg[2] |= (addr << RINGADDRL_POS) & + CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN); + addr >>= RINGADDRL_LEN; + ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN); + ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) & + CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN); +} + +static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring) +{ + u32 *ring_cfg = ring->state; + bool is_bufpool; + u32 val; + + is_bufpool = xgene_enet_is_bufpool(ring->id); + val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR; + ring_cfg[4] |= (val << RINGTYPE_POS) & + CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN); + + if (is_bufpool) { + ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) & + CREATE_MASK(RINGMODE_POS, RINGMODE_LEN); + } +} + +static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring) +{ + u32 *ring_cfg = ring->state; + + ring_cfg[3] |= RECOMBBUF; + ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) & + CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN); + ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN); +} + +static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring, + u32 offset, u32 data) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); + + iowrite32(data, pdata->ring_csr_addr + offset); +} + +static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring, + u32 offset, u32 *data) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); + + *data = ioread32(pdata->ring_csr_addr + offset); +} + +static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring) +{ + int i; + + xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num); + for (i = 0; i < NUM_RING_CONFIG; i++) { + xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4), + ring->state[i]); + } +} + +static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring) +{ + memset(ring->state, 0, sizeof(u32) * NUM_RING_CONFIG); + xgene_enet_write_ring_state(ring); +} + +static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring) +{ + xgene_enet_ring_set_type(ring); + + if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0) + xgene_enet_ring_set_recombbuf(ring); + + xgene_enet_ring_init(ring); + xgene_enet_write_ring_state(ring); +} + +static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring) +{ + u32 ring_id_val, ring_id_buf; + bool is_bufpool; + + is_bufpool = xgene_enet_is_bufpool(ring->id); + + ring_id_val = ring->id & GENMASK(9, 0); + ring_id_val |= OVERWRITE; + + ring_id_buf = (ring->num << 9) & GENMASK(18, 9); + ring_id_buf |= PREFETCH_BUF_EN; + if (is_bufpool) + ring_id_buf |= IS_BUFFER_POOL; + + xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val); + xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf); +} + +static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring) +{ + u32 ring_id; + + ring_id = ring->id | OVERWRITE; + xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id); + xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0); +} + +struct xgene_enet_desc_ring *xgene_enet_setup_ring( + struct xgene_enet_desc_ring *ring) +{ + u32 size = ring->size; + u32 i, data; + bool is_bufpool; + + xgene_enet_clr_ring_state(ring); + xgene_enet_set_ring_state(ring); + xgene_enet_set_ring_id(ring); + + ring->slots = xgene_enet_get_numslots(ring->id, size); + + is_bufpool = xgene_enet_is_bufpool(ring->id); + if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) + return ring; + + for (i = 0; i < ring->slots; i++) + xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]); + + xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); + data |= BIT(31 - xgene_enet_ring_bufnum(ring->id)); + xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); + + return ring; +} + +void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring) +{ + u32 data; + bool is_bufpool; + + is_bufpool = xgene_enet_is_bufpool(ring->id); + if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) + goto out; + + xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); + data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id)); + xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); + +out: + xgene_enet_clr_desc_ring_id(ring); + xgene_enet_clr_ring_state(ring); +} + +void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, + struct xgene_enet_pdata *pdata, + enum xgene_enet_err_code status) +{ + struct rtnl_link_stats64 *stats = &pdata->stats; + + switch (status) { + case INGRESS_CRC: + stats->rx_crc_errors++; + break; + case INGRESS_CHECKSUM: + case INGRESS_CHECKSUM_COMPUTE: + stats->rx_errors++; + break; + case INGRESS_TRUNC_FRAME: + stats->rx_frame_errors++; + break; + case INGRESS_PKT_LEN: + stats->rx_length_errors++; + break; + case INGRESS_PKT_UNDER: + stats->rx_frame_errors++; + break; + case INGRESS_FIFO_OVERRUN: + stats->rx_fifo_errors++; + break; + default: + break; + } +} + +static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 val) +{ + void __iomem *addr = pdata->eth_csr_addr + offset; + + iowrite32(val, addr); +} + +static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata, + u32 offset, u32 val) +{ + void __iomem *addr = pdata->eth_ring_if_addr + offset; + + iowrite32(val, addr); +} + +static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 val) +{ + void __iomem *addr = pdata->eth_diag_csr_addr + offset; + + iowrite32(val, addr); +} + +static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 val) +{ + void __iomem *addr = pdata->mcx_mac_csr_addr + offset; + + iowrite32(val, addr); +} + +static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr, + void __iomem *cmd, void __iomem *cmd_done, + u32 wr_addr, u32 wr_data) +{ + u32 done; + u8 wait = 10; + + iowrite32(wr_addr, addr); + iowrite32(wr_data, wr); + iowrite32(XGENE_ENET_WR_CMD, cmd); + + /* wait for write command to complete */ + while (!(done = ioread32(cmd_done)) && wait--) + udelay(1); + + if (!done) + return false; + + iowrite32(0, cmd); + + return true; +} + +static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata, + u32 wr_addr, u32 wr_data) +{ + void __iomem *addr, *wr, *cmd, *cmd_done; + + addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; + wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET; + cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; + cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; + + if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data)) + netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n", + wr_addr); +} + +static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 *val) +{ + void __iomem *addr = pdata->eth_csr_addr + offset; + + *val = ioread32(addr); +} + +static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 *val) +{ + void __iomem *addr = pdata->eth_diag_csr_addr + offset; + + *val = ioread32(addr); +} + +static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 *val) +{ + void __iomem *addr = pdata->mcx_mac_csr_addr + offset; + + *val = ioread32(addr); +} + +static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd, + void __iomem *cmd, void __iomem *cmd_done, + u32 rd_addr, u32 *rd_data) +{ + u32 done; + u8 wait = 10; + + iowrite32(rd_addr, addr); + iowrite32(XGENE_ENET_RD_CMD, cmd); + + /* wait for read command to complete */ + while (!(done = ioread32(cmd_done)) && wait--) + udelay(1); + + if (!done) + return false; + + *rd_data = ioread32(rd); + iowrite32(0, cmd); + + return true; +} + +static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata, + u32 rd_addr, u32 *rd_data) +{ + void __iomem *addr, *rd, *cmd, *cmd_done; + + addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; + rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET; + cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; + cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; + + if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data)) + netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n", + rd_addr); +} + +static int xgene_mii_phy_write(struct xgene_enet_pdata *pdata, int phy_id, + u32 reg, u16 data) +{ + u32 addr = 0, wr_data = 0; + u32 done; + u8 wait = 10; + + PHY_ADDR_SET(&addr, phy_id); + REG_ADDR_SET(&addr, reg); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr); + + PHY_CONTROL_SET(&wr_data, data); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONTROL_ADDR, wr_data); + do { + usleep_range(5, 10); + xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done); + } while ((done & BUSY_MASK) && wait--); + + if (done & BUSY_MASK) { + netdev_err(pdata->ndev, "MII_MGMT write failed\n"); + return -EBUSY; + } + + return 0; +} + +static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata, + u8 phy_id, u32 reg) +{ + u32 addr = 0; + u32 data, done; + u8 wait = 10; + + PHY_ADDR_SET(&addr, phy_id); + REG_ADDR_SET(&addr, reg); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK); + do { + usleep_range(5, 10); + xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done); + } while ((done & BUSY_MASK) && wait--); + + if (done & BUSY_MASK) { + netdev_err(pdata->ndev, "MII_MGMT read failed\n"); + return -EBUSY; + } + + xgene_enet_rd_mcx_mac(pdata, MII_MGMT_STATUS_ADDR, &data); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, 0); + + return data; +} + +static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata) +{ + u32 addr0, addr1; + u8 *dev_addr = pdata->ndev->dev_addr; + + addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | + (dev_addr[1] << 8) | dev_addr[0]; + addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16); + + xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0); + xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1); +} + +static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata) +{ + struct net_device *ndev = pdata->ndev; + u32 data; + u8 wait = 10; + + xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0); + do { + usleep_range(100, 110); + xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data); + } while ((data != 0xffffffff) && wait--); + + if (data != 0xffffffff) { + netdev_err(ndev, "Failed to release memory from shutdown\n"); + return -ENODEV; + } + + return 0; +} + +static void xgene_gmac_reset(struct xgene_enet_pdata *pdata) +{ + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1); + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0); +} + +static void xgene_gmac_init(struct xgene_enet_pdata *pdata) +{ + u32 value, mc2; + u32 intf_ctl, rgmii; + u32 icm0, icm2; + + xgene_gmac_reset(pdata); + + xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0); + xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2); + xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2); + xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl); + xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii); + + switch (pdata->phy_speed) { + case SPEED_10: + ENET_INTERFACE_MODE2_SET(&mc2, 1); + CFG_MACMODE_SET(&icm0, 0); + CFG_WAITASYNCRD_SET(&icm2, 500); + rgmii &= ~CFG_SPEED_1250; + break; + case SPEED_100: + ENET_INTERFACE_MODE2_SET(&mc2, 1); + intf_ctl |= ENET_LHD_MODE; + CFG_MACMODE_SET(&icm0, 1); + CFG_WAITASYNCRD_SET(&icm2, 80); + rgmii &= ~CFG_SPEED_1250; + break; + default: + ENET_INTERFACE_MODE2_SET(&mc2, 2); + intf_ctl |= ENET_GHD_MODE; + CFG_TXCLK_MUXSEL0_SET(&rgmii, 4); + xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value); + value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX; + xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value); + break; + } + + mc2 |= FULL_DUPLEX2; + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2); + xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl); + + xgene_gmac_set_mac_addr(pdata); + + /* Adjust MDC clock frequency */ + xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value); + MGMT_CLOCK_SEL_SET(&value, 7); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value); + + /* Enable drop if bufpool not available */ + xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value); + value |= CFG_RSIF_FPBUFF_TIMEOUT_EN; + xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value); + + /* Rtype should be copied from FP */ + xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0); + xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii); + + /* Rx-Tx traffic resume */ + xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0); + + xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0); + xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2); + + xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value); + value &= ~TX_DV_GATE_EN0; + value &= ~RX_DV_GATE_EN0; + value |= RESUME_RX0; + xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value); + + xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX); +} + +static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata) +{ + u32 val = 0xffffffff; + + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val); + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val); + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val); + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val); +} + +static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, + u32 dst_ring_num, u16 bufpool_id) +{ + u32 cb; + u32 fpsel; + + fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; + + xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb); + cb |= CFG_CLE_BYPASS_EN0; + CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); + xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb); + + xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb); + CFG_CLE_DSTQID0_SET(&cb, dst_ring_num); + CFG_CLE_FPSEL0_SET(&cb, fpsel); + xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb); +} + +static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN); +} + +static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN); +} + +static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN); +} + +static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); +} + +bool xgene_ring_mgr_init(struct xgene_enet_pdata *p) +{ + if (!ioread32(p->ring_csr_addr + CLKEN_ADDR)) + return false; + + if (ioread32(p->ring_csr_addr + SRST_ADDR)) + return false; + + return true; +} + +static int xgene_enet_reset(struct xgene_enet_pdata *pdata) +{ + u32 val; + + if (!xgene_ring_mgr_init(pdata)) + return -ENODEV; + + if (pdata->clk) { + clk_prepare_enable(pdata->clk); + clk_disable_unprepare(pdata->clk); + clk_prepare_enable(pdata->clk); + xgene_enet_ecc_init(pdata); + } + xgene_enet_config_ring_if_assoc(pdata); + + /* Enable auto-incr for scanning */ + xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &val); + val |= SCAN_AUTO_INCR; + MGMT_CLOCK_SEL_SET(&val, 1); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val); + + return 0; +} + +static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) +{ + clk_disable_unprepare(pdata->clk); +} + +static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct xgene_enet_pdata *pdata = bus->priv; + u32 val; + + val = xgene_mii_phy_read(pdata, mii_id, regnum); + netdev_dbg(pdata->ndev, "mdio_rd: bus=%d reg=%d val=%x\n", + mii_id, regnum, val); + + return val; +} + +static int xgene_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + u16 val) +{ + struct xgene_enet_pdata *pdata = bus->priv; + + netdev_dbg(pdata->ndev, "mdio_wr: bus=%d reg=%d val=%x\n", + mii_id, regnum, val); + return xgene_mii_phy_write(pdata, mii_id, regnum, val); +} + +static void xgene_enet_adjust_link(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct phy_device *phydev = pdata->phy_dev; + + if (phydev->link) { + if (pdata->phy_speed != phydev->speed) { + pdata->phy_speed = phydev->speed; + xgene_gmac_init(pdata); + xgene_gmac_rx_enable(pdata); + xgene_gmac_tx_enable(pdata); + phy_print_status(phydev); + } + } else { + xgene_gmac_rx_disable(pdata); + xgene_gmac_tx_disable(pdata); + pdata->phy_speed = SPEED_UNKNOWN; + phy_print_status(phydev); + } +} + +static int xgene_enet_phy_connect(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct device_node *phy_np; + struct phy_device *phy_dev; + struct device *dev = &pdata->pdev->dev; + + if (dev->of_node) { + phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0); + if (!phy_np) { + netdev_dbg(ndev, "No phy-handle found in DT\n"); + return -ENODEV; + } + pdata->phy_dev = of_phy_find_device(phy_np); + } + + phy_dev = pdata->phy_dev; + + if (!phy_dev || + phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link, + pdata->phy_mode)) { + netdev_err(ndev, "Could not connect to PHY\n"); + return -ENODEV; + } + + pdata->phy_speed = SPEED_UNKNOWN; + phy_dev->supported &= ~SUPPORTED_10baseT_Half & + ~SUPPORTED_100baseT_Half & + ~SUPPORTED_1000baseT_Half; + phy_dev->advertising = phy_dev->supported; + + return 0; +} + +static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata, + struct mii_bus *mdio) +{ + struct device *dev = &pdata->pdev->dev; + struct net_device *ndev = pdata->ndev; + struct phy_device *phy; + struct device_node *child_np; + struct device_node *mdio_np = NULL; + int ret; + u32 phy_id; + + if (dev->of_node) { + for_each_child_of_node(dev->of_node, child_np) { + if (of_device_is_compatible(child_np, + "apm,xgene-mdio")) { + mdio_np = child_np; + break; + } + } + + if (!mdio_np) { + netdev_dbg(ndev, "No mdio node in the dts\n"); + return -ENXIO; + } + + return of_mdiobus_register(mdio, mdio_np); + } + + /* Mask out all PHYs from auto probing. */ + mdio->phy_mask = ~0; + + /* Register the MDIO bus */ + ret = mdiobus_register(mdio); + if (ret) + return ret; + + ret = device_property_read_u32(dev, "phy-channel", &phy_id); + if (ret) + ret = device_property_read_u32(dev, "phy-addr", &phy_id); + if (ret) + return -EINVAL; + + phy = get_phy_device(mdio, phy_id, true); + if (!phy || IS_ERR(phy)) + return -EIO; + + ret = phy_device_register(phy); + if (ret) + phy_device_free(phy); + else + pdata->phy_dev = phy; + + return ret; +} + +int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata) +{ + struct net_device *ndev = pdata->ndev; + struct mii_bus *mdio_bus; + int ret; + + mdio_bus = mdiobus_alloc(); + if (!mdio_bus) + return -ENOMEM; + + mdio_bus->name = "APM X-Gene MDIO bus"; + mdio_bus->read = xgene_enet_mdio_read; + mdio_bus->write = xgene_enet_mdio_write; + snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii", + ndev->name); + + mdio_bus->priv = pdata; + mdio_bus->parent = &ndev->dev; + + ret = xgene_mdiobus_register(pdata, mdio_bus); + if (ret) { + netdev_err(ndev, "Failed to register MDIO bus\n"); + mdiobus_free(mdio_bus); + return ret; + } + pdata->mdio_bus = mdio_bus; + + ret = xgene_enet_phy_connect(ndev); + if (ret) + xgene_enet_mdio_remove(pdata); + + return ret; +} + +void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata) +{ + mdiobus_unregister(pdata->mdio_bus); + mdiobus_free(pdata->mdio_bus); + pdata->mdio_bus = NULL; +} + +struct xgene_mac_ops xgene_gmac_ops = { + .init = xgene_gmac_init, + .reset = xgene_gmac_reset, + .rx_enable = xgene_gmac_rx_enable, + .tx_enable = xgene_gmac_tx_enable, + .rx_disable = xgene_gmac_rx_disable, + .tx_disable = xgene_gmac_tx_disable, + .set_mac_addr = xgene_gmac_set_mac_addr, +}; + +struct xgene_port_ops xgene_gport_ops = { + .reset = xgene_enet_reset, + .cle_bypass = xgene_enet_cle_bypass, + .shutdown = xgene_gport_shutdown, +}; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h new file mode 100644 index 000000000..d9bc89d69 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -0,0 +1,331 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian + * Ravi Patel + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __XGENE_ENET_HW_H__ +#define __XGENE_ENET_HW_H__ + +#include "xgene_enet_main.h" + +struct xgene_enet_pdata; +struct xgene_enet_stats; + +/* clears and then set bits */ +static inline void xgene_set_bits(u32 *dst, u32 val, u32 start, u32 len) +{ + u32 end = start + len - 1; + u32 mask = GENMASK(end, start); + + *dst &= ~mask; + *dst |= (val << start) & mask; +} + +static inline u32 xgene_get_bits(u32 val, u32 start, u32 end) +{ + return (val & GENMASK(end, start)) >> start; +} + +enum xgene_enet_rm { + RM0, + RM1, + RM3 = 3 +}; + +#define CSR_RING_ID 0x0008 +#define OVERWRITE BIT(31) +#define IS_BUFFER_POOL BIT(20) +#define PREFETCH_BUF_EN BIT(21) +#define CSR_RING_ID_BUF 0x000c +#define CSR_RING_NE_INT_MODE 0x017c +#define CSR_RING_CONFIG 0x006c +#define CSR_RING_WR_BASE 0x0070 +#define NUM_RING_CONFIG 5 +#define BUFPOOL_MODE 3 +#define INC_DEC_CMD_ADDR 0x002c +#define UDP_HDR_SIZE 2 +#define BUF_LEN_CODE_2K 0x5000 + +#define CREATE_MASK(pos, len) GENMASK((pos)+(len)-1, (pos)) +#define CREATE_MASK_ULL(pos, len) GENMASK_ULL((pos)+(len)-1, (pos)) + +/* Empty slot soft signature */ +#define EMPTY_SLOT_INDEX 1 +#define EMPTY_SLOT ~0ULL + +#define WORK_DESC_SIZE 32 +#define BUFPOOL_DESC_SIZE 16 + +#define RING_OWNER_MASK GENMASK(9, 6) +#define RING_BUFNUM_MASK GENMASK(5, 0) + +#define SELTHRSH_POS 3 +#define SELTHRSH_LEN 3 +#define RINGADDRL_POS 5 +#define RINGADDRL_LEN 27 +#define RINGADDRH_POS 0 +#define RINGADDRH_LEN 6 +#define RINGSIZE_POS 23 +#define RINGSIZE_LEN 3 +#define RINGTYPE_POS 19 +#define RINGTYPE_LEN 2 +#define RINGMODE_POS 20 +#define RINGMODE_LEN 3 +#define RECOMTIMEOUTL_POS 28 +#define RECOMTIMEOUTL_LEN 3 +#define RECOMTIMEOUTH_POS 0 +#define RECOMTIMEOUTH_LEN 2 +#define NUMMSGSINQ_POS 1 +#define NUMMSGSINQ_LEN 16 +#define ACCEPTLERR BIT(19) +#define QCOHERENT BIT(4) +#define RECOMBBUF BIT(27) + +#define MAC_OFFSET 0x30 + +#define BLOCK_ETH_CSR_OFFSET 0x2000 +#define BLOCK_ETH_RING_IF_OFFSET 0x9000 +#define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000 + +#define BLOCK_ETH_MAC_OFFSET 0x0000 +#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800 + +#define CLKEN_ADDR 0xc208 +#define SRST_ADDR 0xc200 + +#define MAC_ADDR_REG_OFFSET 0x00 +#define MAC_COMMAND_REG_OFFSET 0x04 +#define MAC_WRITE_REG_OFFSET 0x08 +#define MAC_READ_REG_OFFSET 0x0c +#define MAC_COMMAND_DONE_REG_OFFSET 0x10 + +#define MII_MGMT_CONFIG_ADDR 0x20 +#define MII_MGMT_COMMAND_ADDR 0x24 +#define MII_MGMT_ADDRESS_ADDR 0x28 +#define MII_MGMT_CONTROL_ADDR 0x2c +#define MII_MGMT_STATUS_ADDR 0x30 +#define MII_MGMT_INDICATORS_ADDR 0x34 + +#define BUSY_MASK BIT(0) +#define READ_CYCLE_MASK BIT(0) +#define PHY_CONTROL_SET(dst, val) xgene_set_bits(dst, val, 0, 16) + +#define ENET_SPARE_CFG_REG_ADDR 0x0750 +#define RSIF_CONFIG_REG_ADDR 0x0010 +#define RSIF_RAM_DBG_REG0_ADDR 0x0048 +#define RGMII_REG_0_ADDR 0x07e0 +#define CFG_LINK_AGGR_RESUME_0_ADDR 0x07c8 +#define DEBUG_REG_ADDR 0x0700 +#define CFG_BYPASS_ADDR 0x0294 +#define CLE_BYPASS_REG0_0_ADDR 0x0490 +#define CLE_BYPASS_REG1_0_ADDR 0x0494 +#define CFG_RSIF_FPBUFF_TIMEOUT_EN BIT(31) +#define RESUME_TX BIT(0) +#define CFG_SPEED_1250 BIT(24) +#define TX_PORT0 BIT(0) +#define CFG_BYPASS_UNISEC_TX BIT(2) +#define CFG_BYPASS_UNISEC_RX BIT(1) +#define CFG_CLE_BYPASS_EN0 BIT(31) +#define CFG_TXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 29, 3) + +#define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2) +#define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12) +#define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4) +#define CFG_MACMODE_SET(dst, val) xgene_set_bits(dst, val, 18, 2) +#define CFG_WAITASYNCRD_SET(dst, val) xgene_set_bits(dst, val, 0, 16) +#define CFG_CLE_DSTQID0(val) (val & GENMASK(11, 0)) +#define CFG_CLE_FPSEL0(val) ((val << 16) & GENMASK(19, 16)) +#define ICM_CONFIG0_REG_0_ADDR 0x0400 +#define ICM_CONFIG2_REG_0_ADDR 0x0410 +#define RX_DV_GATE_REG_0_ADDR 0x05fc +#define TX_DV_GATE_EN0 BIT(2) +#define RX_DV_GATE_EN0 BIT(1) +#define RESUME_RX0 BIT(0) +#define ENET_CFGSSQMIWQASSOC_ADDR 0xe0 +#define ENET_CFGSSQMIFPQASSOC_ADDR 0xdc +#define ENET_CFGSSQMIQMLITEFPQASSOC_ADDR 0xf0 +#define ENET_CFGSSQMIQMLITEWQASSOC_ADDR 0xf4 +#define ENET_CFG_MEM_RAM_SHUTDOWN_ADDR 0x70 +#define ENET_BLOCK_MEM_RDY_ADDR 0x74 +#define MAC_CONFIG_1_ADDR 0x00 +#define MAC_CONFIG_2_ADDR 0x04 +#define MAX_FRAME_LEN_ADDR 0x10 +#define INTERFACE_CONTROL_ADDR 0x38 +#define STATION_ADDR0_ADDR 0x40 +#define STATION_ADDR1_ADDR 0x44 +#define PHY_ADDR_SET(dst, val) xgene_set_bits(dst, val, 8, 5) +#define REG_ADDR_SET(dst, val) xgene_set_bits(dst, val, 0, 5) +#define ENET_INTERFACE_MODE2_SET(dst, val) xgene_set_bits(dst, val, 8, 2) +#define MGMT_CLOCK_SEL_SET(dst, val) xgene_set_bits(dst, val, 0, 3) +#define SOFT_RESET1 BIT(31) +#define TX_EN BIT(0) +#define RX_EN BIT(2) +#define ENET_LHD_MODE BIT(25) +#define ENET_GHD_MODE BIT(26) +#define FULL_DUPLEX2 BIT(0) +#define SCAN_AUTO_INCR BIT(5) +#define TBYT_ADDR 0x38 +#define TPKT_ADDR 0x39 +#define TDRP_ADDR 0x45 +#define TFCS_ADDR 0x47 +#define TUND_ADDR 0x4a + +#define TSO_IPPROTO_TCP 1 + +#define USERINFO_POS 0 +#define USERINFO_LEN 32 +#define FPQNUM_POS 32 +#define FPQNUM_LEN 12 +#define LERR_POS 60 +#define LERR_LEN 3 +#define STASH_POS 52 +#define STASH_LEN 2 +#define BUFDATALEN_POS 48 +#define BUFDATALEN_LEN 12 +#define DATAADDR_POS 0 +#define DATAADDR_LEN 42 +#define COHERENT_POS 63 +#define HENQNUM_POS 48 +#define HENQNUM_LEN 12 +#define TYPESEL_POS 44 +#define TYPESEL_LEN 4 +#define ETHHDR_POS 12 +#define ETHHDR_LEN 8 +#define IC_POS 35 /* Insert CRC */ +#define TCPHDR_POS 0 +#define TCPHDR_LEN 6 +#define IPHDR_POS 6 +#define IPHDR_LEN 6 +#define EC_POS 22 /* Enable checksum */ +#define EC_LEN 1 +#define IS_POS 24 /* IP protocol select */ +#define IS_LEN 1 +#define TYPE_ETH_WORK_MESSAGE_POS 44 + +struct xgene_enet_raw_desc { + __le64 m0; + __le64 m1; + __le64 m2; + __le64 m3; +}; + +struct xgene_enet_raw_desc16 { + __le64 m0; + __le64 m1; +}; + +static inline void xgene_enet_mark_desc_slot_empty(void *desc_slot_ptr) +{ + __le64 *desc_slot = desc_slot_ptr; + + desc_slot[EMPTY_SLOT_INDEX] = cpu_to_le64(EMPTY_SLOT); +} + +static inline bool xgene_enet_is_desc_slot_empty(void *desc_slot_ptr) +{ + __le64 *desc_slot = desc_slot_ptr; + + return (desc_slot[EMPTY_SLOT_INDEX] == cpu_to_le64(EMPTY_SLOT)); +} + +enum xgene_enet_ring_cfgsize { + RING_CFGSIZE_512B, + RING_CFGSIZE_2KB, + RING_CFGSIZE_16KB, + RING_CFGSIZE_64KB, + RING_CFGSIZE_512KB, + RING_CFGSIZE_INVALID +}; + +enum xgene_enet_ring_type { + RING_DISABLED, + RING_REGULAR, + RING_BUFPOOL +}; + +enum xgene_ring_owner { + RING_OWNER_ETH0, + RING_OWNER_CPU = 15, + RING_OWNER_INVALID +}; + +enum xgene_enet_ring_bufnum { + RING_BUFNUM_REGULAR = 0x0, + RING_BUFNUM_BUFPOOL = 0x20, + RING_BUFNUM_INVALID +}; + +enum xgene_enet_cmd { + XGENE_ENET_WR_CMD = BIT(31), + XGENE_ENET_RD_CMD = BIT(30) +}; + +enum xgene_enet_err_code { + HBF_READ_DATA = 3, + HBF_LL_READ = 4, + BAD_WORK_MSG = 6, + BUFPOOL_TIMEOUT = 15, + INGRESS_CRC = 16, + INGRESS_CHECKSUM = 17, + INGRESS_TRUNC_FRAME = 18, + INGRESS_PKT_LEN = 19, + INGRESS_PKT_UNDER = 20, + INGRESS_FIFO_OVERRUN = 21, + INGRESS_CHECKSUM_COMPUTE = 26, + ERR_CODE_INVALID +}; + +static inline enum xgene_ring_owner xgene_enet_ring_owner(u16 id) +{ + return (id & RING_OWNER_MASK) >> 6; +} + +static inline u8 xgene_enet_ring_bufnum(u16 id) +{ + return id & RING_BUFNUM_MASK; +} + +static inline bool xgene_enet_is_bufpool(u16 id) +{ + return ((id & RING_BUFNUM_MASK) >= 0x20) ? true : false; +} + +static inline u16 xgene_enet_get_numslots(u16 id, u32 size) +{ + bool is_bufpool = xgene_enet_is_bufpool(id); + + return (is_bufpool) ? size / BUFPOOL_DESC_SIZE : + size / WORK_DESC_SIZE; +} + +struct xgene_enet_desc_ring *xgene_enet_setup_ring( + struct xgene_enet_desc_ring *ring); +void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring); +void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, + struct xgene_enet_pdata *pdata, + enum xgene_enet_err_code status); + +int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata); +void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata); +bool xgene_ring_mgr_init(struct xgene_enet_pdata *p); + +extern struct xgene_mac_ops xgene_gmac_ops; +extern struct xgene_port_ops xgene_gport_ops; + +#endif /* __XGENE_ENET_HW_H__ */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c new file mode 100644 index 000000000..40d3530d7 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -0,0 +1,1203 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian + * Ravi Patel + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "xgene_enet_main.h" +#include "xgene_enet_hw.h" +#include "xgene_enet_sgmac.h" +#include "xgene_enet_xgmac.h" + +#define RES_ENET_CSR 0 +#define RES_RING_CSR 1 +#define RES_RING_CMD 2 + +static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) +{ + struct xgene_enet_raw_desc16 *raw_desc; + int i; + + for (i = 0; i < buf_pool->slots; i++) { + raw_desc = &buf_pool->raw_desc16[i]; + + /* Hardware expects descriptor in little endian format */ + raw_desc->m0 = cpu_to_le64(i | + SET_VAL(FPQNUM, buf_pool->dst_ring_num) | + SET_VAL(STASH, 3)); + } +} + +static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, + u32 nbuf) +{ + struct sk_buff *skb; + struct xgene_enet_raw_desc16 *raw_desc; + struct net_device *ndev; + struct device *dev; + dma_addr_t dma_addr; + u32 tail = buf_pool->tail; + u32 slots = buf_pool->slots - 1; + u16 bufdatalen, len; + int i; + + ndev = buf_pool->ndev; + dev = ndev_to_dev(buf_pool->ndev); + bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0)); + len = XGENE_ENET_MAX_MTU; + + for (i = 0; i < nbuf; i++) { + raw_desc = &buf_pool->raw_desc16[tail]; + + skb = netdev_alloc_skb_ip_align(ndev, len); + if (unlikely(!skb)) + return -ENOMEM; + buf_pool->rx_skb[tail] = skb; + + dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + netdev_err(ndev, "DMA mapping error\n"); + dev_kfree_skb_any(skb); + return -EINVAL; + } + + raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | + SET_VAL(BUFDATALEN, bufdatalen) | + SET_BIT(COHERENT)); + tail = (tail + 1) & slots; + } + + iowrite32(nbuf, buf_pool->cmd); + buf_pool->tail = tail; + + return 0; +} + +static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); + + return ((u16)pdata->rm << 10) | ring->num; +} + +static u8 xgene_enet_hdr_len(const void *data) +{ + const struct ethhdr *eth = data; + + return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN; +} + +static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) +{ + u32 __iomem *cmd_base = ring->cmd_base; + u32 ring_state, num_msgs; + + ring_state = ioread32(&cmd_base[1]); + num_msgs = ring_state & CREATE_MASK(NUMMSGSINQ_POS, NUMMSGSINQ_LEN); + + return num_msgs >> NUMMSGSINQ_POS; +} + +static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool) +{ + struct xgene_enet_raw_desc16 *raw_desc; + u32 slots = buf_pool->slots - 1; + u32 tail = buf_pool->tail; + u32 userinfo; + int i, len; + + len = xgene_enet_ring_len(buf_pool); + for (i = 0; i < len; i++) { + tail = (tail - 1) & slots; + raw_desc = &buf_pool->raw_desc16[tail]; + + /* Hardware stores descriptor in little endian format */ + userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); + dev_kfree_skb_any(buf_pool->rx_skb[userinfo]); + } + + iowrite32(-len, buf_pool->cmd); + buf_pool->tail = tail; +} + +static irqreturn_t xgene_enet_rx_irq(const int irq, void *data) +{ + struct xgene_enet_desc_ring *rx_ring = data; + + if (napi_schedule_prep(&rx_ring->napi)) { + disable_irq_nosync(irq); + __napi_schedule(&rx_ring->napi); + } + + return IRQ_HANDLED; +} + +static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring, + struct xgene_enet_raw_desc *raw_desc) +{ + struct sk_buff *skb; + struct device *dev; + u16 skb_index; + u8 status; + int ret = 0; + + skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); + skb = cp_ring->cp_skb[skb_index]; + + dev = ndev_to_dev(cp_ring->ndev); + dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), + GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)), + DMA_TO_DEVICE); + + /* Checking for error */ + status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); + if (unlikely(status > 2)) { + xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev), + status); + ret = -EIO; + } + + if (likely(skb)) { + dev_kfree_skb_any(skb); + } else { + netdev_err(cp_ring->ndev, "completion skb is NULL\n"); + ret = -EIO; + } + + return ret; +} + +static u64 xgene_enet_work_msg(struct sk_buff *skb) +{ + struct iphdr *iph; + u8 l3hlen, l4hlen = 0; + u8 csum_enable = 0; + u8 proto = 0; + u8 ethhdr; + u64 hopinfo; + + if (unlikely(skb->protocol != htons(ETH_P_IP)) && + unlikely(skb->protocol != htons(ETH_P_8021Q))) + goto out; + + if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM))) + goto out; + + iph = ip_hdr(skb); + if (unlikely(ip_is_fragment(iph))) + goto out; + + if (likely(iph->protocol == IPPROTO_TCP)) { + l4hlen = tcp_hdrlen(skb) >> 2; + csum_enable = 1; + proto = TSO_IPPROTO_TCP; + } else if (iph->protocol == IPPROTO_UDP) { + l4hlen = UDP_HDR_SIZE; + csum_enable = 1; + } +out: + l3hlen = ip_hdrlen(skb) >> 2; + ethhdr = xgene_enet_hdr_len(skb->data); + hopinfo = SET_VAL(TCPHDR, l4hlen) | + SET_VAL(IPHDR, l3hlen) | + SET_VAL(ETHHDR, ethhdr) | + SET_VAL(EC, csum_enable) | + SET_VAL(IS, proto) | + SET_BIT(IC) | + SET_BIT(TYPE_ETH_WORK_MESSAGE); + + return hopinfo; +} + +static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, + struct sk_buff *skb) +{ + struct device *dev = ndev_to_dev(tx_ring->ndev); + struct xgene_enet_raw_desc *raw_desc; + dma_addr_t dma_addr; + u16 tail = tx_ring->tail; + u64 hopinfo; + + raw_desc = &tx_ring->raw_desc[tail]; + memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc)); + + dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + netdev_err(tx_ring->ndev, "DMA mapping error\n"); + return -EINVAL; + } + + /* Hardware expects descriptor in little endian format */ + raw_desc->m0 = cpu_to_le64(tail); + raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | + SET_VAL(BUFDATALEN, skb->len) | + SET_BIT(COHERENT)); + hopinfo = xgene_enet_work_msg(skb); + raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) | + hopinfo); + tx_ring->cp_ring->cp_skb[tail] = skb; + + return 0; +} + +static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; + struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring; + u32 tx_level, cq_level; + + tx_level = xgene_enet_ring_len(tx_ring); + cq_level = xgene_enet_ring_len(cp_ring); + if (unlikely(tx_level > pdata->tx_qcnt_hi || + cq_level > pdata->cp_qcnt_hi)) { + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; + } + + if (xgene_enet_setup_tx_desc(tx_ring, skb)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + iowrite32(1, tx_ring->cmd); + skb_tx_timestamp(skb); + tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1); + + pdata->stats.tx_packets++; + pdata->stats.tx_bytes += skb->len; + + return NETDEV_TX_OK; +} + +static void xgene_enet_skip_csum(struct sk_buff *skb) +{ + struct iphdr *iph = ip_hdr(skb); + + if (!ip_is_fragment(iph) || + (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } +} + +static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, + struct xgene_enet_raw_desc *raw_desc) +{ + struct net_device *ndev; + struct xgene_enet_pdata *pdata; + struct device *dev; + struct xgene_enet_desc_ring *buf_pool; + u32 datalen, skb_index; + struct sk_buff *skb; + u8 status; + int ret = 0; + + ndev = rx_ring->ndev; + pdata = netdev_priv(ndev); + dev = ndev_to_dev(rx_ring->ndev); + buf_pool = rx_ring->buf_pool; + + dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), + XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE); + skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); + skb = buf_pool->rx_skb[skb_index]; + + /* checking for error */ + status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); + if (unlikely(status > 2)) { + dev_kfree_skb_any(skb); + xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev), + status); + pdata->stats.rx_dropped++; + ret = -EIO; + goto out; + } + + /* strip off CRC as HW isn't doing this */ + datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)); + datalen -= 4; + prefetch(skb->data - NET_IP_ALIGN); + skb_put(skb, datalen); + + skb_checksum_none_assert(skb); + skb->protocol = eth_type_trans(skb, ndev); + if (likely((ndev->features & NETIF_F_IP_CSUM) && + skb->protocol == htons(ETH_P_IP))) { + xgene_enet_skip_csum(skb); + } + + pdata->stats.rx_packets++; + pdata->stats.rx_bytes += datalen; + napi_gro_receive(&rx_ring->napi, skb); +out: + if (--rx_ring->nbufpool == 0) { + ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL); + rx_ring->nbufpool = NUM_BUFPOOL; + } + + return ret; +} + +static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc) +{ + return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false; +} + +static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, + int budget) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); + struct xgene_enet_raw_desc *raw_desc; + u16 head = ring->head; + u16 slots = ring->slots - 1; + int ret, count = 0; + + do { + raw_desc = &ring->raw_desc[head]; + if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) + break; + + /* read fpqnum field after dataaddr field */ + dma_rmb(); + if (is_rx_desc(raw_desc)) + ret = xgene_enet_rx_frame(ring, raw_desc); + else + ret = xgene_enet_tx_completion(ring, raw_desc); + xgene_enet_mark_desc_slot_empty(raw_desc); + + head = (head + 1) & slots; + count++; + + if (ret) + break; + } while (--budget); + + if (likely(count)) { + iowrite32(-count, ring->cmd); + ring->head = head; + + if (netif_queue_stopped(ring->ndev)) { + if (xgene_enet_ring_len(ring) < pdata->cp_qcnt_low) + netif_wake_queue(ring->ndev); + } + } + + return count; +} + +static int xgene_enet_napi(struct napi_struct *napi, const int budget) +{ + struct xgene_enet_desc_ring *ring; + int processed; + + ring = container_of(napi, struct xgene_enet_desc_ring, napi); + processed = xgene_enet_process_ring(ring, budget); + + if (processed != budget) { + napi_complete(napi); + enable_irq(ring->irq); + } + + return processed; +} + +static void xgene_enet_timeout(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + + pdata->mac_ops->reset(pdata); +} + +static int xgene_enet_register_irq(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct device *dev = ndev_to_dev(ndev); + struct xgene_enet_desc_ring *ring; + int ret; + + ring = pdata->rx_ring; + ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, + IRQF_SHARED, ring->irq_name, ring); + if (ret) + netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name); + + if (pdata->cq_cnt) { + ring = pdata->tx_ring->cp_ring; + ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, + IRQF_SHARED, ring->irq_name, ring); + if (ret) { + netdev_err(ndev, "Failed to request irq %s\n", + ring->irq_name); + } + } + + return ret; +} + +static void xgene_enet_free_irq(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata; + struct device *dev; + + pdata = netdev_priv(ndev); + dev = ndev_to_dev(ndev); + devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring); + + if (pdata->cq_cnt) { + devm_free_irq(dev, pdata->tx_ring->cp_ring->irq, + pdata->tx_ring->cp_ring); + } +} + +static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata) +{ + struct napi_struct *napi; + + napi = &pdata->rx_ring->napi; + napi_enable(napi); + + if (pdata->cq_cnt) { + napi = &pdata->tx_ring->cp_ring->napi; + napi_enable(napi); + } +} + +static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata) +{ + struct napi_struct *napi; + + napi = &pdata->rx_ring->napi; + napi_disable(napi); + + if (pdata->cq_cnt) { + napi = &pdata->tx_ring->cp_ring->napi; + napi_disable(napi); + } +} + +static int xgene_enet_open(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct xgene_mac_ops *mac_ops = pdata->mac_ops; + int ret; + + mac_ops->tx_enable(pdata); + mac_ops->rx_enable(pdata); + + ret = xgene_enet_register_irq(ndev); + if (ret) + return ret; + xgene_enet_napi_enable(pdata); + + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + phy_start(pdata->phy_dev); + else + schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF); + + netif_start_queue(ndev); + + return ret; +} + +static int xgene_enet_close(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct xgene_mac_ops *mac_ops = pdata->mac_ops; + + netif_stop_queue(ndev); + + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + phy_stop(pdata->phy_dev); + else + cancel_delayed_work_sync(&pdata->link_work); + + xgene_enet_napi_disable(pdata); + xgene_enet_free_irq(ndev); + xgene_enet_process_ring(pdata->rx_ring, -1); + + mac_ops->tx_disable(pdata); + mac_ops->rx_disable(pdata); + + return 0; +} + +static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) +{ + struct xgene_enet_pdata *pdata; + struct device *dev; + + pdata = netdev_priv(ring->ndev); + dev = ndev_to_dev(ring->ndev); + + xgene_enet_clear_ring(ring); + dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); +} + +static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) +{ + struct xgene_enet_desc_ring *buf_pool; + + if (pdata->tx_ring) { + xgene_enet_delete_ring(pdata->tx_ring); + pdata->tx_ring = NULL; + } + + if (pdata->rx_ring) { + buf_pool = pdata->rx_ring->buf_pool; + xgene_enet_delete_bufpool(buf_pool); + xgene_enet_delete_ring(buf_pool); + xgene_enet_delete_ring(pdata->rx_ring); + pdata->rx_ring = NULL; + } +} + +static int xgene_enet_get_ring_size(struct device *dev, + enum xgene_enet_ring_cfgsize cfgsize) +{ + int size = -EINVAL; + + switch (cfgsize) { + case RING_CFGSIZE_512B: + size = 0x200; + break; + case RING_CFGSIZE_2KB: + size = 0x800; + break; + case RING_CFGSIZE_16KB: + size = 0x4000; + break; + case RING_CFGSIZE_64KB: + size = 0x10000; + break; + case RING_CFGSIZE_512KB: + size = 0x80000; + break; + default: + dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize); + break; + } + + return size; +} + +static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring) +{ + struct device *dev; + + if (!ring) + return; + + dev = ndev_to_dev(ring->ndev); + + if (ring->desc_addr) { + xgene_enet_clear_ring(ring); + dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); + } + devm_kfree(dev, ring); +} + +static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) +{ + struct device *dev = &pdata->pdev->dev; + struct xgene_enet_desc_ring *ring; + + ring = pdata->tx_ring; + if (ring) { + if (ring->cp_ring && ring->cp_ring->cp_skb) + devm_kfree(dev, ring->cp_ring->cp_skb); + if (ring->cp_ring && pdata->cq_cnt) + xgene_enet_free_desc_ring(ring->cp_ring); + xgene_enet_free_desc_ring(ring); + } + + ring = pdata->rx_ring; + if (ring) { + if (ring->buf_pool) { + if (ring->buf_pool->rx_skb) + devm_kfree(dev, ring->buf_pool->rx_skb); + xgene_enet_free_desc_ring(ring->buf_pool); + } + xgene_enet_free_desc_ring(ring); + } +} + +static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( + struct net_device *ndev, u32 ring_num, + enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id) +{ + struct xgene_enet_desc_ring *ring; + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct device *dev = ndev_to_dev(ndev); + int size; + + size = xgene_enet_get_ring_size(dev, cfgsize); + if (size < 0) + return NULL; + + ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring), + GFP_KERNEL); + if (!ring) + return NULL; + + ring->ndev = ndev; + ring->num = ring_num; + ring->cfgsize = cfgsize; + ring->id = ring_id; + + ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma, + GFP_KERNEL); + if (!ring->desc_addr) { + devm_kfree(dev, ring); + return NULL; + } + ring->size = size; + + ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6); + ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR; + ring = xgene_enet_setup_ring(ring); + netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n", + ring->num, ring->size, ring->id, ring->slots); + + return ring; +} + +static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum) +{ + return (owner << 6) | (bufnum & GENMASK(5, 0)); +} + +static int xgene_enet_create_desc_rings(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct device *dev = ndev_to_dev(ndev); + struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; + struct xgene_enet_desc_ring *buf_pool = NULL; + u8 cpu_bufnum = pdata->cpu_bufnum; + u8 eth_bufnum = pdata->eth_bufnum; + u8 bp_bufnum = pdata->bp_bufnum; + u16 ring_num = pdata->ring_num; + u16 ring_id; + int ret; + + /* allocate rx descriptor ring */ + ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); + rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_16KB, ring_id); + if (!rx_ring) { + ret = -ENOMEM; + goto err; + } + + /* allocate buffer pool for receiving packets */ + ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, bp_bufnum++); + buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_2KB, ring_id); + if (!buf_pool) { + ret = -ENOMEM; + goto err; + } + + rx_ring->nbufpool = NUM_BUFPOOL; + rx_ring->buf_pool = buf_pool; + rx_ring->irq = pdata->rx_irq; + if (!pdata->cq_cnt) { + snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc", + ndev->name); + } else { + snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx", ndev->name); + } + buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, + sizeof(struct sk_buff *), GFP_KERNEL); + if (!buf_pool->rx_skb) { + ret = -ENOMEM; + goto err; + } + + buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool); + rx_ring->buf_pool = buf_pool; + pdata->rx_ring = rx_ring; + + /* allocate tx descriptor ring */ + ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, eth_bufnum++); + tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_16KB, ring_id); + if (!tx_ring) { + ret = -ENOMEM; + goto err; + } + pdata->tx_ring = tx_ring; + + if (!pdata->cq_cnt) { + cp_ring = pdata->rx_ring; + } else { + /* allocate tx completion descriptor ring */ + ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); + cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_16KB, + ring_id); + if (!cp_ring) { + ret = -ENOMEM; + goto err; + } + cp_ring->irq = pdata->txc_irq; + snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc", ndev->name); + } + + cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, + sizeof(struct sk_buff *), GFP_KERNEL); + if (!cp_ring->cp_skb) { + ret = -ENOMEM; + goto err; + } + pdata->tx_ring->cp_ring = cp_ring; + pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); + + pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2; + pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2; + pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2; + + return 0; + +err: + xgene_enet_free_desc_rings(pdata); + return ret; +} + +static struct rtnl_link_stats64 *xgene_enet_get_stats64( + struct net_device *ndev, + struct rtnl_link_stats64 *storage) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct rtnl_link_stats64 *stats = &pdata->stats; + + stats->rx_errors += stats->rx_length_errors + + stats->rx_crc_errors + + stats->rx_frame_errors + + stats->rx_fifo_errors; + memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64)); + + return storage; +} + +static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + int ret; + + ret = eth_mac_addr(ndev, addr); + if (ret) + return ret; + pdata->mac_ops->set_mac_addr(pdata); + + return ret; +} + +static const struct net_device_ops xgene_ndev_ops = { + .ndo_open = xgene_enet_open, + .ndo_stop = xgene_enet_close, + .ndo_start_xmit = xgene_enet_start_xmit, + .ndo_tx_timeout = xgene_enet_timeout, + .ndo_get_stats64 = xgene_enet_get_stats64, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = xgene_enet_set_mac_address, +}; + +static int xgene_get_port_id(struct device *dev, struct xgene_enet_pdata *pdata) +{ + u32 id = 0; + int ret; + + ret = device_property_read_u32(dev, "port-id", &id); + if (!ret && id > 1) { + dev_err(dev, "Incorrect port-id specified\n"); + return -ENODEV; + } + + pdata->port_id = id; + + return 0; +} + +static int xgene_get_mac_address(struct device *dev, + unsigned char *addr) +{ + int ret; + + ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6); + if (ret) + ret = device_property_read_u8_array(dev, "mac-address", + addr, 6); + if (ret) + return -ENODEV; + + return ETH_ALEN; +} + +static int xgene_get_phy_mode(struct device *dev) +{ + int i, ret; + char *modestr; + + ret = device_property_read_string(dev, "phy-connection-type", + (const char **)&modestr); + if (ret) + ret = device_property_read_string(dev, "phy-mode", + (const char **)&modestr); + if (ret) + return -ENODEV; + + for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) { + if (!strcasecmp(modestr, phy_modes(i))) + return i; + } + return -ENODEV; +} + +static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) +{ + struct platform_device *pdev; + struct net_device *ndev; + struct device *dev; + struct resource *res; + void __iomem *base_addr; + int ret; + + pdev = pdata->pdev; + dev = &pdev->dev; + ndev = pdata->ndev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR); + if (!res) { + dev_err(dev, "Resource enet_csr not defined\n"); + return -ENODEV; + } + pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res)); + if (!pdata->base_addr) { + dev_err(dev, "Unable to retrieve ENET Port CSR region\n"); + return -ENOMEM; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR); + if (!res) { + dev_err(dev, "Resource ring_csr not defined\n"); + return -ENODEV; + } + pdata->ring_csr_addr = devm_ioremap(dev, res->start, + resource_size(res)); + if (!pdata->ring_csr_addr) { + dev_err(dev, "Unable to retrieve ENET Ring CSR region\n"); + return -ENOMEM; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD); + if (!res) { + dev_err(dev, "Resource ring_cmd not defined\n"); + return -ENODEV; + } + pdata->ring_cmd_addr = devm_ioremap(dev, res->start, + resource_size(res)); + if (!pdata->ring_cmd_addr) { + dev_err(dev, "Unable to retrieve ENET Ring command region\n"); + return -ENOMEM; + } + + ret = xgene_get_port_id(dev, pdata); + if (ret) + return ret; + + if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN) + eth_hw_addr_random(ndev); + + memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); + + pdata->phy_mode = xgene_get_phy_mode(dev); + if (pdata->phy_mode < 0) { + dev_err(dev, "Unable to get phy-connection-type\n"); + return pdata->phy_mode; + } + if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII && + pdata->phy_mode != PHY_INTERFACE_MODE_SGMII && + pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) { + dev_err(dev, "Incorrect phy-connection-type specified\n"); + return -ENODEV; + } + + ret = platform_get_irq(pdev, 0); + if (ret <= 0) { + dev_err(dev, "Unable to get ENET Rx IRQ\n"); + ret = ret ? : -ENXIO; + return ret; + } + pdata->rx_irq = ret; + + if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) { + ret = platform_get_irq(pdev, 1); + if (ret <= 0) { + dev_err(dev, "Unable to get ENET Tx completion IRQ\n"); + ret = ret ? : -ENXIO; + return ret; + } + pdata->txc_irq = ret; + } + + pdata->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(pdata->clk)) { + /* Firmware may have set up the clock already. */ + pdata->clk = NULL; + } + + base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET); + pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; + pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; + pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII || + pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { + pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET; + pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET; + } else { + pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET; + pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET; + } + pdata->rx_buff_cnt = NUM_PKT_BUF; + + return 0; +} + +static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) +{ + struct net_device *ndev = pdata->ndev; + struct xgene_enet_desc_ring *buf_pool; + u16 dst_ring_num; + int ret; + + ret = pdata->port_ops->reset(pdata); + if (ret) + return ret; + + ret = xgene_enet_create_desc_rings(ndev); + if (ret) { + netdev_err(ndev, "Error in ring configuration\n"); + return ret; + } + + /* setup buffer pool */ + buf_pool = pdata->rx_ring->buf_pool; + xgene_enet_init_bufpool(buf_pool); + ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt); + if (ret) { + xgene_enet_delete_desc_rings(pdata); + return ret; + } + + dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring); + pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id); + pdata->mac_ops->init(pdata); + + return ret; +} + +static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) +{ + switch (pdata->phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + pdata->mac_ops = &xgene_gmac_ops; + pdata->port_ops = &xgene_gport_ops; + pdata->rm = RM3; + break; + case PHY_INTERFACE_MODE_SGMII: + pdata->mac_ops = &xgene_sgmac_ops; + pdata->port_ops = &xgene_sgport_ops; + pdata->rm = RM1; + pdata->cq_cnt = XGENE_MAX_TXC_RINGS; + break; + default: + pdata->mac_ops = &xgene_xgmac_ops; + pdata->port_ops = &xgene_xgport_ops; + pdata->rm = RM0; + pdata->cq_cnt = XGENE_MAX_TXC_RINGS; + break; + } + + switch (pdata->port_id) { + case 0: + pdata->cpu_bufnum = START_CPU_BUFNUM_0; + pdata->eth_bufnum = START_ETH_BUFNUM_0; + pdata->bp_bufnum = START_BP_BUFNUM_0; + pdata->ring_num = START_RING_NUM_0; + break; + case 1: + pdata->cpu_bufnum = START_CPU_BUFNUM_1; + pdata->eth_bufnum = START_ETH_BUFNUM_1; + pdata->bp_bufnum = START_BP_BUFNUM_1; + pdata->ring_num = START_RING_NUM_1; + break; + default: + break; + } + +} + +static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) +{ + struct napi_struct *napi; + + napi = &pdata->rx_ring->napi; + netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); + + if (pdata->cq_cnt) { + napi = &pdata->tx_ring->cp_ring->napi; + netif_napi_add(pdata->ndev, napi, xgene_enet_napi, + NAPI_POLL_WEIGHT); + } +} + +static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata) +{ + struct napi_struct *napi; + + napi = &pdata->rx_ring->napi; + netif_napi_del(napi); + + if (pdata->cq_cnt) { + napi = &pdata->tx_ring->cp_ring->napi; + netif_napi_del(napi); + } +} + +static int xgene_enet_probe(struct platform_device *pdev) +{ + struct net_device *ndev; + struct xgene_enet_pdata *pdata; + struct device *dev = &pdev->dev; + struct xgene_mac_ops *mac_ops; + int ret; + + ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata)); + if (!ndev) + return -ENOMEM; + + pdata = netdev_priv(ndev); + + pdata->pdev = pdev; + pdata->ndev = ndev; + SET_NETDEV_DEV(ndev, dev); + platform_set_drvdata(pdev, pdata); + ndev->netdev_ops = &xgene_ndev_ops; + xgene_enet_set_ethtool_ops(ndev); + ndev->features |= NETIF_F_IP_CSUM | + NETIF_F_GSO | + NETIF_F_GRO; + + ret = xgene_enet_get_resources(pdata); + if (ret) + goto err; + + xgene_enet_setup_ops(pdata); + + ret = register_netdev(ndev); + if (ret) { + netdev_err(ndev, "Failed to register netdev\n"); + goto err; + } + + ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (ret) { + netdev_err(ndev, "No usable DMA configuration\n"); + goto err; + } + + ret = xgene_enet_init_hw(pdata); + if (ret) + goto err; + + xgene_enet_napi_add(pdata); + mac_ops = pdata->mac_ops; + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + ret = xgene_enet_mdio_config(pdata); + else + INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); + + return ret; +err: + unregister_netdev(ndev); + free_netdev(ndev); + return ret; +} + +static int xgene_enet_remove(struct platform_device *pdev) +{ + struct xgene_enet_pdata *pdata; + struct xgene_mac_ops *mac_ops; + struct net_device *ndev; + + pdata = platform_get_drvdata(pdev); + mac_ops = pdata->mac_ops; + ndev = pdata->ndev; + + mac_ops->rx_disable(pdata); + mac_ops->tx_disable(pdata); + + xgene_enet_napi_del(pdata); + xgene_enet_mdio_remove(pdata); + xgene_enet_delete_desc_rings(pdata); + unregister_netdev(ndev); + pdata->port_ops->shutdown(pdata); + free_netdev(ndev); + + return 0; +} + +#ifdef CONFIG_ACPI +static const struct acpi_device_id xgene_enet_acpi_match[] = { + { "APMC0D05", }, + { "APMC0D30", }, + { "APMC0D31", }, + { } +}; +MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); +#endif + +#ifdef CONFIG_OF +static const struct of_device_id xgene_enet_of_match[] = { + {.compatible = "apm,xgene-enet",}, + {.compatible = "apm,xgene1-sgenet",}, + {.compatible = "apm,xgene1-xgenet",}, + {}, +}; + +MODULE_DEVICE_TABLE(of, xgene_enet_of_match); +#endif + +static struct platform_driver xgene_enet_driver = { + .driver = { + .name = "xgene-enet", + .of_match_table = of_match_ptr(xgene_enet_of_match), + .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match), + }, + .probe = xgene_enet_probe, + .remove = xgene_enet_remove, +}; + +module_platform_driver(xgene_enet_driver); + +MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver"); +MODULE_VERSION(XGENE_DRV_VERSION); +MODULE_AUTHOR("Iyappan Subramanian "); +MODULE_AUTHOR("Keyur Chudgar "); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h new file mode 100644 index 000000000..8f3d232b0 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -0,0 +1,186 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian + * Ravi Patel + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __XGENE_ENET_MAIN_H__ +#define __XGENE_ENET_MAIN_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "xgene_enet_hw.h" + +#define XGENE_DRV_VERSION "v1.0" +#define XGENE_ENET_MAX_MTU 1536 +#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN) +#define NUM_PKT_BUF 64 +#define NUM_BUFPOOL 32 + +#define START_CPU_BUFNUM_0 0 +#define START_ETH_BUFNUM_0 2 +#define START_BP_BUFNUM_0 0x22 +#define START_RING_NUM_0 8 +#define START_CPU_BUFNUM_1 12 +#define START_ETH_BUFNUM_1 10 +#define START_BP_BUFNUM_1 0x2A +#define START_RING_NUM_1 264 + +#define IRQ_ID_SIZE 16 +#define XGENE_MAX_TXC_RINGS 1 + +#define PHY_POLL_LINK_ON (10 * HZ) +#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5) + +/* software context of a descriptor ring */ +struct xgene_enet_desc_ring { + struct net_device *ndev; + u16 id; + u16 num; + u16 head; + u16 tail; + u16 slots; + u16 irq; + char irq_name[IRQ_ID_SIZE]; + u32 size; + u32 state[NUM_RING_CONFIG]; + void __iomem *cmd_base; + void __iomem *cmd; + dma_addr_t dma; + u16 dst_ring_num; + u8 nbufpool; + struct sk_buff *(*rx_skb); + struct sk_buff *(*cp_skb); + enum xgene_enet_ring_cfgsize cfgsize; + struct xgene_enet_desc_ring *cp_ring; + struct xgene_enet_desc_ring *buf_pool; + struct napi_struct napi; + union { + void *desc_addr; + struct xgene_enet_raw_desc *raw_desc; + struct xgene_enet_raw_desc16 *raw_desc16; + }; +}; + +struct xgene_mac_ops { + void (*init)(struct xgene_enet_pdata *pdata); + void (*reset)(struct xgene_enet_pdata *pdata); + void (*tx_enable)(struct xgene_enet_pdata *pdata); + void (*rx_enable)(struct xgene_enet_pdata *pdata); + void (*tx_disable)(struct xgene_enet_pdata *pdata); + void (*rx_disable)(struct xgene_enet_pdata *pdata); + void (*set_mac_addr)(struct xgene_enet_pdata *pdata); + void (*link_state)(struct work_struct *work); +}; + +struct xgene_port_ops { + int (*reset)(struct xgene_enet_pdata *pdata); + void (*cle_bypass)(struct xgene_enet_pdata *pdata, + u32 dst_ring_num, u16 bufpool_id); + void (*shutdown)(struct xgene_enet_pdata *pdata); +}; + +/* ethernet private data */ +struct xgene_enet_pdata { + struct net_device *ndev; + struct mii_bus *mdio_bus; + struct phy_device *phy_dev; + int phy_speed; + struct clk *clk; + struct platform_device *pdev; + struct xgene_enet_desc_ring *tx_ring; + struct xgene_enet_desc_ring *rx_ring; + char *dev_name; + u32 rx_buff_cnt; + u32 tx_qcnt_hi; + u32 cp_qcnt_hi; + u32 cp_qcnt_low; + u32 rx_irq; + u32 txc_irq; + u8 cq_cnt; + void __iomem *eth_csr_addr; + void __iomem *eth_ring_if_addr; + void __iomem *eth_diag_csr_addr; + void __iomem *mcx_mac_addr; + void __iomem *mcx_mac_csr_addr; + void __iomem *base_addr; + void __iomem *ring_csr_addr; + void __iomem *ring_cmd_addr; + int phy_mode; + enum xgene_enet_rm rm; + struct rtnl_link_stats64 stats; + struct xgene_mac_ops *mac_ops; + struct xgene_port_ops *port_ops; + struct delayed_work link_work; + u32 port_id; + u8 cpu_bufnum; + u8 eth_bufnum; + u8 bp_bufnum; + u16 ring_num; +}; + +struct xgene_indirect_ctl { + void __iomem *addr; + void __iomem *ctl; + void __iomem *cmd; + void __iomem *cmd_done; +}; + +/* Set the specified value into a bit-field defined by its starting position + * and length within a single u64. + */ +static inline u64 xgene_enet_set_field_value(int pos, int len, u64 val) +{ + return (val & ((1ULL << len) - 1)) << pos; +} + +#define SET_VAL(field, val) \ + xgene_enet_set_field_value(field ## _POS, field ## _LEN, val) + +#define SET_BIT(field) \ + xgene_enet_set_field_value(field ## _POS, 1, 1) + +/* Get the value from a bit-field defined by its starting position + * and length within the specified u64. + */ +static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src) +{ + return (src >> pos) & ((1ULL << len) - 1); +} + +#define GET_VAL(field, src) \ + xgene_enet_get_field_value(field ## _POS, field ## _LEN, src) + +static inline struct device *ndev_to_dev(struct net_device *ndev) +{ + return ndev->dev.parent; +} + +void xgene_enet_set_ethtool_ops(struct net_device *netdev); + +#endif /* __XGENE_ENET_MAIN_H__ */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c new file mode 100644 index 000000000..f27fb6f2a --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c @@ -0,0 +1,394 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "xgene_enet_main.h" +#include "xgene_enet_hw.h" +#include "xgene_enet_sgmac.h" + +static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val) +{ + iowrite32(val, p->eth_csr_addr + offset); +} + +static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *p, + u32 offset, u32 val) +{ + iowrite32(val, p->eth_ring_if_addr + offset); +} + +static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p, + u32 offset, u32 val) +{ + iowrite32(val, p->eth_diag_csr_addr + offset); +} + +static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl *ctl, + u32 wr_addr, u32 wr_data) +{ + int i; + + iowrite32(wr_addr, ctl->addr); + iowrite32(wr_data, ctl->ctl); + iowrite32(XGENE_ENET_WR_CMD, ctl->cmd); + + /* wait for write command to complete */ + for (i = 0; i < 10; i++) { + if (ioread32(ctl->cmd_done)) { + iowrite32(0, ctl->cmd); + return true; + } + udelay(1); + } + + return false; +} + +static void xgene_enet_wr_mac(struct xgene_enet_pdata *p, + u32 wr_addr, u32 wr_data) +{ + struct xgene_indirect_ctl ctl = { + .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET, + .ctl = p->mcx_mac_addr + MAC_WRITE_REG_OFFSET, + .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET, + .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET + }; + + if (!xgene_enet_wr_indirect(&ctl, wr_addr, wr_data)) + netdev_err(p->ndev, "mac write failed, addr: %04x\n", wr_addr); +} + +static u32 xgene_enet_rd_csr(struct xgene_enet_pdata *p, u32 offset) +{ + return ioread32(p->eth_csr_addr + offset); +} + +static u32 xgene_enet_rd_diag_csr(struct xgene_enet_pdata *p, u32 offset) +{ + return ioread32(p->eth_diag_csr_addr + offset); +} + +static u32 xgene_enet_rd_indirect(struct xgene_indirect_ctl *ctl, u32 rd_addr) +{ + u32 rd_data; + int i; + + iowrite32(rd_addr, ctl->addr); + iowrite32(XGENE_ENET_RD_CMD, ctl->cmd); + + /* wait for read command to complete */ + for (i = 0; i < 10; i++) { + if (ioread32(ctl->cmd_done)) { + rd_data = ioread32(ctl->ctl); + iowrite32(0, ctl->cmd); + + return rd_data; + } + udelay(1); + } + + pr_err("%s: mac read failed, addr: %04x\n", __func__, rd_addr); + + return 0; +} + +static u32 xgene_enet_rd_mac(struct xgene_enet_pdata *p, u32 rd_addr) +{ + struct xgene_indirect_ctl ctl = { + .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET, + .ctl = p->mcx_mac_addr + MAC_READ_REG_OFFSET, + .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET, + .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET + }; + + return xgene_enet_rd_indirect(&ctl, rd_addr); +} + +static int xgene_enet_ecc_init(struct xgene_enet_pdata *p) +{ + struct net_device *ndev = p->ndev; + u32 data; + int i = 0; + + xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0); + do { + usleep_range(100, 110); + data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR); + if (data == ~0U) + return 0; + } while (++i < 10); + + netdev_err(ndev, "Failed to release memory from shutdown\n"); + return -ENODEV; +} + +static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p) +{ + u32 val = 0xffffffff; + + xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val); + xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val); +} + +static void xgene_mii_phy_write(struct xgene_enet_pdata *p, u8 phy_id, + u32 reg, u16 data) +{ + u32 addr, wr_data, done; + int i; + + addr = PHY_ADDR(phy_id) | REG_ADDR(reg); + xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr); + + wr_data = PHY_CONTROL(data); + xgene_enet_wr_mac(p, MII_MGMT_CONTROL_ADDR, wr_data); + + for (i = 0; i < 10; i++) { + done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR); + if (!(done & BUSY_MASK)) + return; + usleep_range(10, 20); + } + + netdev_err(p->ndev, "MII_MGMT write failed\n"); +} + +static u32 xgene_mii_phy_read(struct xgene_enet_pdata *p, u8 phy_id, u32 reg) +{ + u32 addr, data, done; + int i; + + addr = PHY_ADDR(phy_id) | REG_ADDR(reg); + xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr); + xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK); + + for (i = 0; i < 10; i++) { + done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR); + if (!(done & BUSY_MASK)) { + data = xgene_enet_rd_mac(p, MII_MGMT_STATUS_ADDR); + xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, 0); + + return data; + } + usleep_range(10, 20); + } + + netdev_err(p->ndev, "MII_MGMT read failed\n"); + + return 0; +} + +static void xgene_sgmac_reset(struct xgene_enet_pdata *p) +{ + xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, SOFT_RESET1); + xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, 0); +} + +static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p) +{ + u32 addr0, addr1; + u8 *dev_addr = p->ndev->dev_addr; + + addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | + (dev_addr[1] << 8) | dev_addr[0]; + xgene_enet_wr_mac(p, STATION_ADDR0_ADDR, addr0); + + addr1 = xgene_enet_rd_mac(p, STATION_ADDR1_ADDR); + addr1 |= (dev_addr[5] << 24) | (dev_addr[4] << 16); + xgene_enet_wr_mac(p, STATION_ADDR1_ADDR, addr1); +} + +static u32 xgene_enet_link_status(struct xgene_enet_pdata *p) +{ + u32 data; + + data = xgene_mii_phy_read(p, INT_PHY_ADDR, + SGMII_BASE_PAGE_ABILITY_ADDR >> 2); + + return data & LINK_UP; +} + +static void xgene_sgmac_init(struct xgene_enet_pdata *p) +{ + u32 data, loop = 10; + u32 offset = p->port_id * 4; + + xgene_sgmac_reset(p); + + /* Enable auto-negotiation */ + xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x1000); + xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0); + + while (loop--) { + data = xgene_mii_phy_read(p, INT_PHY_ADDR, + SGMII_STATUS_ADDR >> 2); + if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS)) + break; + usleep_range(10, 20); + } + if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS)) + netdev_err(p->ndev, "Auto-negotiation failed\n"); + + data = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR); + ENET_INTERFACE_MODE2_SET(&data, 2); + xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, data | FULL_DUPLEX2); + xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, ENET_GHD_MODE); + + data = xgene_enet_rd_csr(p, ENET_SPARE_CFG_REG_ADDR); + data |= MPA_IDLE_WITH_QMI_EMPTY; + xgene_enet_wr_csr(p, ENET_SPARE_CFG_REG_ADDR, data); + + xgene_sgmac_set_mac_addr(p); + + data = xgene_enet_rd_csr(p, DEBUG_REG_ADDR); + data |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX; + xgene_enet_wr_csr(p, DEBUG_REG_ADDR, data); + + /* Adjust MDC clock frequency */ + data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR); + MGMT_CLOCK_SEL_SET(&data, 7); + xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data); + + /* Enable drop if bufpool not available */ + data = xgene_enet_rd_csr(p, RSIF_CONFIG_REG_ADDR); + data |= CFG_RSIF_FPBUFF_TIMEOUT_EN; + xgene_enet_wr_csr(p, RSIF_CONFIG_REG_ADDR, data); + + /* Rtype should be copied from FP */ + xgene_enet_wr_csr(p, RSIF_RAM_DBG_REG0_ADDR, 0); + + /* Bypass traffic gating */ + xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR + offset, TX_PORT0); + xgene_enet_wr_csr(p, CFG_BYPASS_ADDR, RESUME_TX); + xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR + offset, RESUME_RX0); +} + +static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set) +{ + u32 data; + + data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR); + + if (set) + data |= bits; + else + data &= ~bits; + + xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data); +} + +static void xgene_sgmac_rx_enable(struct xgene_enet_pdata *p) +{ + xgene_sgmac_rxtx(p, RX_EN, true); +} + +static void xgene_sgmac_tx_enable(struct xgene_enet_pdata *p) +{ + xgene_sgmac_rxtx(p, TX_EN, true); +} + +static void xgene_sgmac_rx_disable(struct xgene_enet_pdata *p) +{ + xgene_sgmac_rxtx(p, RX_EN, false); +} + +static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p) +{ + xgene_sgmac_rxtx(p, TX_EN, false); +} + +static int xgene_enet_reset(struct xgene_enet_pdata *p) +{ + if (!xgene_ring_mgr_init(p)) + return -ENODEV; + + clk_prepare_enable(p->clk); + clk_disable_unprepare(p->clk); + clk_prepare_enable(p->clk); + + xgene_enet_ecc_init(p); + xgene_enet_config_ring_if_assoc(p); + + return 0; +} + +static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p, + u32 dst_ring_num, u16 bufpool_id) +{ + u32 data, fpsel; + u32 offset = p->port_id * MAC_OFFSET; + + data = CFG_CLE_BYPASS_EN0; + xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR + offset, data); + + fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; + data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel); + xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR + offset, data); +} + +static void xgene_enet_shutdown(struct xgene_enet_pdata *p) +{ + clk_disable_unprepare(p->clk); +} + +static void xgene_enet_link_state(struct work_struct *work) +{ + struct xgene_enet_pdata *p = container_of(to_delayed_work(work), + struct xgene_enet_pdata, link_work); + struct net_device *ndev = p->ndev; + u32 link, poll_interval; + + link = xgene_enet_link_status(p); + if (link) { + if (!netif_carrier_ok(ndev)) { + netif_carrier_on(ndev); + xgene_sgmac_init(p); + xgene_sgmac_rx_enable(p); + xgene_sgmac_tx_enable(p); + netdev_info(ndev, "Link is Up - 1Gbps\n"); + } + poll_interval = PHY_POLL_LINK_ON; + } else { + if (netif_carrier_ok(ndev)) { + xgene_sgmac_rx_disable(p); + xgene_sgmac_tx_disable(p); + netif_carrier_off(ndev); + netdev_info(ndev, "Link is Down\n"); + } + poll_interval = PHY_POLL_LINK_OFF; + } + + schedule_delayed_work(&p->link_work, poll_interval); +} + +struct xgene_mac_ops xgene_sgmac_ops = { + .init = xgene_sgmac_init, + .reset = xgene_sgmac_reset, + .rx_enable = xgene_sgmac_rx_enable, + .tx_enable = xgene_sgmac_tx_enable, + .rx_disable = xgene_sgmac_rx_disable, + .tx_disable = xgene_sgmac_tx_disable, + .set_mac_addr = xgene_sgmac_set_mac_addr, + .link_state = xgene_enet_link_state +}; + +struct xgene_port_ops xgene_sgport_ops = { + .reset = xgene_enet_reset, + .cle_bypass = xgene_enet_cle_bypass, + .shutdown = xgene_enet_shutdown +}; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h new file mode 100644 index 000000000..de4324650 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h @@ -0,0 +1,41 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __XGENE_ENET_SGMAC_H__ +#define __XGENE_ENET_SGMAC_H__ + +#define PHY_ADDR(src) (((src)<<8) & GENMASK(12, 8)) +#define REG_ADDR(src) ((src) & GENMASK(4, 0)) +#define PHY_CONTROL(src) ((src) & GENMASK(15, 0)) +#define INT_PHY_ADDR 0x1e +#define SGMII_TBI_CONTROL_ADDR 0x44 +#define SGMII_CONTROL_ADDR 0x00 +#define SGMII_STATUS_ADDR 0x04 +#define SGMII_BASE_PAGE_ABILITY_ADDR 0x14 +#define AUTO_NEG_COMPLETE BIT(5) +#define LINK_STATUS BIT(2) +#define LINK_UP BIT(15) +#define MPA_IDLE_WITH_QMI_EMPTY BIT(12) +#define SG_RX_DV_GATE_REG_0_ADDR 0x0dfc + +extern struct xgene_mac_ops xgene_sgmac_ops; +extern struct xgene_port_ops xgene_sgport_ops; + +#endif /* __XGENE_ENET_SGMAC_H__ */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c new file mode 100644 index 000000000..a18a9d1f1 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c @@ -0,0 +1,337 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "xgene_enet_main.h" +#include "xgene_enet_hw.h" +#include "xgene_enet_xgmac.h" + +static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 val) +{ + void __iomem *addr = pdata->eth_csr_addr + offset; + + iowrite32(val, addr); +} + +static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata, + u32 offset, u32 val) +{ + void __iomem *addr = pdata->eth_ring_if_addr + offset; + + iowrite32(val, addr); +} + +static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 val) +{ + void __iomem *addr = pdata->eth_diag_csr_addr + offset; + + iowrite32(val, addr); +} + +static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr, + void __iomem *cmd, void __iomem *cmd_done, + u32 wr_addr, u32 wr_data) +{ + u32 done; + u8 wait = 10; + + iowrite32(wr_addr, addr); + iowrite32(wr_data, wr); + iowrite32(XGENE_ENET_WR_CMD, cmd); + + /* wait for write command to complete */ + while (!(done = ioread32(cmd_done)) && wait--) + udelay(1); + + if (!done) + return false; + + iowrite32(0, cmd); + + return true; +} + +static void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata, + u32 wr_addr, u32 wr_data) +{ + void __iomem *addr, *wr, *cmd, *cmd_done; + + addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; + wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET; + cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; + cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; + + if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data)) + netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n", + wr_addr); +} + +static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 *val) +{ + void __iomem *addr = pdata->eth_csr_addr + offset; + + *val = ioread32(addr); +} + +static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 *val) +{ + void __iomem *addr = pdata->eth_diag_csr_addr + offset; + + *val = ioread32(addr); +} + +static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd, + void __iomem *cmd, void __iomem *cmd_done, + u32 rd_addr, u32 *rd_data) +{ + u32 done; + u8 wait = 10; + + iowrite32(rd_addr, addr); + iowrite32(XGENE_ENET_RD_CMD, cmd); + + /* wait for read command to complete */ + while (!(done = ioread32(cmd_done)) && wait--) + udelay(1); + + if (!done) + return false; + + *rd_data = ioread32(rd); + iowrite32(0, cmd); + + return true; +} + +static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata, + u32 rd_addr, u32 *rd_data) +{ + void __iomem *addr, *rd, *cmd, *cmd_done; + + addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; + rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET; + cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; + cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; + + if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data)) + netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n", + rd_addr); +} + +static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata) +{ + struct net_device *ndev = pdata->ndev; + u32 data; + u8 wait = 10; + + xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0); + do { + usleep_range(100, 110); + xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data); + } while ((data != 0xffffffff) && wait--); + + if (data != 0xffffffff) { + netdev_err(ndev, "Failed to release memory from shutdown\n"); + return -ENODEV; + } + + return 0; +} + +static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata) +{ + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, 0); + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, 0); + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, 0); + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, 0); +} + +static void xgene_xgmac_reset(struct xgene_enet_pdata *pdata) +{ + xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, HSTMACRST); + xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, 0); +} + +static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata) +{ + u32 addr0, addr1; + u8 *dev_addr = pdata->ndev->dev_addr; + + addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | + (dev_addr[1] << 8) | dev_addr[0]; + addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16); + + xgene_enet_wr_mac(pdata, HSTMACADR_LSW_ADDR, addr0); + xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1); +} + +static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_csr(pdata, XG_LINK_STATUS_ADDR, &data); + + return data; +} + +static void xgene_xgmac_init(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_xgmac_reset(pdata); + + xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data); + data |= HSTPPEN; + data &= ~HSTLENCHK; + xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data); + + xgene_enet_wr_mac(pdata, HSTMAXFRAME_LENGTH_ADDR, 0x06000600); + xgene_xgmac_set_mac_addr(pdata); + + xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data); + data |= CFG_RSIF_FPBUFF_TIMEOUT_EN; + xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data); + + xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX); + xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0); + xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data); + data |= BIT(12); + xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data); + xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x82); +} + +static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data); + xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTRFEN); +} + +static void xgene_xgmac_tx_enable(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data); + xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTTFEN); +} + +static void xgene_xgmac_rx_disable(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data); + xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTRFEN); +} + +static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data); + xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN); +} + +static int xgene_enet_reset(struct xgene_enet_pdata *pdata) +{ + if (!xgene_ring_mgr_init(pdata)) + return -ENODEV; + + clk_prepare_enable(pdata->clk); + clk_disable_unprepare(pdata->clk); + clk_prepare_enable(pdata->clk); + + xgene_enet_ecc_init(pdata); + xgene_enet_config_ring_if_assoc(pdata); + + return 0; +} + +static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata, + u32 dst_ring_num, u16 bufpool_id) +{ + u32 cb, fpsel; + + xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG0_ADDR, &cb); + cb |= CFG_CLE_BYPASS_EN0; + CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); + xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG0_ADDR, cb); + + fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; + xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG1_ADDR, &cb); + CFG_CLE_DSTQID0_SET(&cb, dst_ring_num); + CFG_CLE_FPSEL0_SET(&cb, fpsel); + xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG1_ADDR, cb); +} + +static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata) +{ + clk_disable_unprepare(pdata->clk); +} + +static void xgene_enet_link_state(struct work_struct *work) +{ + struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work), + struct xgene_enet_pdata, link_work); + struct net_device *ndev = pdata->ndev; + u32 link_status, poll_interval; + + link_status = xgene_enet_link_status(pdata); + if (link_status) { + if (!netif_carrier_ok(ndev)) { + netif_carrier_on(ndev); + xgene_xgmac_init(pdata); + xgene_xgmac_rx_enable(pdata); + xgene_xgmac_tx_enable(pdata); + netdev_info(ndev, "Link is Up - 10Gbps\n"); + } + poll_interval = PHY_POLL_LINK_ON; + } else { + if (netif_carrier_ok(ndev)) { + xgene_xgmac_rx_disable(pdata); + xgene_xgmac_tx_disable(pdata); + netif_carrier_off(ndev); + netdev_info(ndev, "Link is Down\n"); + } + poll_interval = PHY_POLL_LINK_OFF; + } + + schedule_delayed_work(&pdata->link_work, poll_interval); +} + +struct xgene_mac_ops xgene_xgmac_ops = { + .init = xgene_xgmac_init, + .reset = xgene_xgmac_reset, + .rx_enable = xgene_xgmac_rx_enable, + .tx_enable = xgene_xgmac_tx_enable, + .rx_disable = xgene_xgmac_rx_disable, + .tx_disable = xgene_xgmac_tx_disable, + .set_mac_addr = xgene_xgmac_set_mac_addr, + .link_state = xgene_enet_link_state +}; + +struct xgene_port_ops xgene_xgport_ops = { + .reset = xgene_enet_reset, + .cle_bypass = xgene_enet_xgcle_bypass, + .shutdown = xgene_enet_shutdown, +}; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h new file mode 100644 index 000000000..5a5296a6d --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h @@ -0,0 +1,53 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __XGENE_ENET_XGMAC_H__ +#define __XGENE_ENET_XGMAC_H__ + +#define BLOCK_AXG_MAC_OFFSET 0x0800 +#define BLOCK_AXG_MAC_CSR_OFFSET 0x2000 + +#define AXGMAC_CONFIG_0 0x0000 +#define AXGMAC_CONFIG_1 0x0004 +#define HSTMACRST BIT(31) +#define HSTTCTLEN BIT(31) +#define HSTTFEN BIT(30) +#define HSTRCTLEN BIT(29) +#define HSTRFEN BIT(28) +#define HSTPPEN BIT(7) +#define HSTDRPLT64 BIT(5) +#define HSTLENCHK BIT(3) +#define HSTMACADR_LSW_ADDR 0x0010 +#define HSTMACADR_MSW_ADDR 0x0014 +#define HSTMAXFRAME_LENGTH_ADDR 0x0020 + +#define XG_RSIF_CONFIG_REG_ADDR 0x00a0 +#define XCLE_BYPASS_REG0_ADDR 0x0160 +#define XCLE_BYPASS_REG1_ADDR 0x0164 +#define XG_CFG_BYPASS_ADDR 0x0204 +#define XG_LINK_STATUS_ADDR 0x0228 +#define XG_ENET_SPARE_CFG_REG_ADDR 0x040c +#define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410 +#define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804 + +extern struct xgene_mac_ops xgene_xgmac_ops; +extern struct xgene_port_ops xgene_xgport_ops; + +#endif /* __XGENE_ENET_XGMAC_H__ */ diff --git a/drivers/net/ethernet/apple/Kconfig b/drivers/net/ethernet/apple/Kconfig new file mode 100644 index 000000000..1375e2dc9 --- /dev/null +++ b/drivers/net/ethernet/apple/Kconfig @@ -0,0 +1,65 @@ +# +# Apple device configuration +# + +config NET_VENDOR_APPLE + bool "Apple devices" + default y + depends on (PPC_PMAC && PPC32) || MAC + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about IBM devices. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_APPLE + +config MACE + tristate "MACE (Power Mac ethernet) support" + depends on PPC_PMAC && PPC32 + select CRC32 + ---help--- + Power Macintoshes and clones with Ethernet built-in on the + motherboard will usually use a MACE (Medium Access Control for + Ethernet) interface. Say Y to include support for the MACE chip. + + To compile this driver as a module, choose M here: the module + will be called mace. + +config MACE_AAUI_PORT + bool "Use AAUI port instead of TP by default" + depends on MACE + ---help--- + Some Apple machines (notably the Apple Network Server) which use the + MACE ethernet chip have an Apple AUI port (small 15-pin connector), + instead of an 8-pin RJ45 connector for twisted-pair ethernet. Say + Y here if you have such a machine. If unsure, say N. + The driver will default to AAUI on ANS anyway, and if you use it as + a module, you can provide the port_aaui=0|1 to force the driver. + +config BMAC + tristate "BMAC (G3 ethernet) support" + depends on PPC_PMAC && PPC32 + select CRC32 + ---help--- + Say Y for support of BMAC Ethernet interfaces. These are used on G3 + computers. + + To compile this driver as a module, choose M here: the module + will be called bmac. + +config MACMACE + bool "Macintosh (AV) onboard MACE ethernet" + depends on MAC + select CRC32 + ---help--- + Support for the onboard AMD 79C940 MACE Ethernet controller used in + the 660AV and 840AV Macintosh. If you have one of these Macintoshes + say Y and read the Ethernet-HOWTO, available from + . + +endif # NET_VENDOR_APPLE diff --git a/drivers/net/ethernet/apple/Makefile b/drivers/net/ethernet/apple/Makefile new file mode 100644 index 000000000..86eaa17af --- /dev/null +++ b/drivers/net/ethernet/apple/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the Apple network device drivers. +# + +obj-$(CONFIG_MACE) += mace.o +obj-$(CONFIG_BMAC) += bmac.o +obj-$(CONFIG_MACMACE) += macmace.o diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c new file mode 100644 index 000000000..a65d7a60f --- /dev/null +++ b/drivers/net/ethernet/apple/bmac.c @@ -0,0 +1,1679 @@ +/* + * Network device driver for the BMAC ethernet controller on + * Apple Powermacs. Assumes it's under a DBDMA controller. + * + * Copyright (C) 1998 Randy Gobbel. + * + * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to + * dynamic procfs inode. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bmac.h" + +#define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1)))) +#define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1))) + +/* + * CRC polynomial - used in working out multicast filter bits. + */ +#define ENET_CRCPOLY 0x04c11db7 + +/* switch to use multicast code lifted from sunhme driver */ +#define SUNHME_MULTICAST + +#define N_RX_RING 64 +#define N_TX_RING 32 +#define MAX_TX_ACTIVE 1 +#define ETHERCRC 4 +#define ETHERMINPACKET 64 +#define ETHERMTU 1500 +#define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2) +#define TX_TIMEOUT HZ /* 1 second */ + +/* Bits in transmit DMA status */ +#define TX_DMA_ERR 0x80 + +#define XXDEBUG(args) + +struct bmac_data { + /* volatile struct bmac *bmac; */ + struct sk_buff_head *queue; + volatile struct dbdma_regs __iomem *tx_dma; + int tx_dma_intr; + volatile struct dbdma_regs __iomem *rx_dma; + int rx_dma_intr; + volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */ + volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */ + struct macio_dev *mdev; + int is_bmac_plus; + struct sk_buff *rx_bufs[N_RX_RING]; + int rx_fill; + int rx_empty; + struct sk_buff *tx_bufs[N_TX_RING]; + int tx_fill; + int tx_empty; + unsigned char tx_fullup; + struct timer_list tx_timeout; + int timeout_active; + int sleeping; + int opened; + unsigned short hash_use_count[64]; + unsigned short hash_table_mask[4]; + spinlock_t lock; +}; + +#if 0 /* Move that to ethtool */ + +typedef struct bmac_reg_entry { + char *name; + unsigned short reg_offset; +} bmac_reg_entry_t; + +#define N_REG_ENTRIES 31 + +static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = { + {"MEMADD", MEMADD}, + {"MEMDATAHI", MEMDATAHI}, + {"MEMDATALO", MEMDATALO}, + {"TXPNTR", TXPNTR}, + {"RXPNTR", RXPNTR}, + {"IPG1", IPG1}, + {"IPG2", IPG2}, + {"ALIMIT", ALIMIT}, + {"SLOT", SLOT}, + {"PALEN", PALEN}, + {"PAPAT", PAPAT}, + {"TXSFD", TXSFD}, + {"JAM", JAM}, + {"TXCFG", TXCFG}, + {"TXMAX", TXMAX}, + {"TXMIN", TXMIN}, + {"PAREG", PAREG}, + {"DCNT", DCNT}, + {"NCCNT", NCCNT}, + {"NTCNT", NTCNT}, + {"EXCNT", EXCNT}, + {"LTCNT", LTCNT}, + {"TXSM", TXSM}, + {"RXCFG", RXCFG}, + {"RXMAX", RXMAX}, + {"RXMIN", RXMIN}, + {"FRCNT", FRCNT}, + {"AECNT", AECNT}, + {"FECNT", FECNT}, + {"RXSM", RXSM}, + {"RXCV", RXCV} +}; + +#endif + +static unsigned char *bmac_emergency_rxbuf; + +/* + * Number of bytes of private data per BMAC: allow enough for + * the rx and tx dma commands plus a branch dma command each, + * and another 16 bytes to allow us to align the dma command + * buffers on a 16 byte boundary. + */ +#define PRIV_BYTES (sizeof(struct bmac_data) \ + + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \ + + sizeof(struct sk_buff_head)) + +static int bmac_open(struct net_device *dev); +static int bmac_close(struct net_device *dev); +static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev); +static void bmac_set_multicast(struct net_device *dev); +static void bmac_reset_and_enable(struct net_device *dev); +static void bmac_start_chip(struct net_device *dev); +static void bmac_init_chip(struct net_device *dev); +static void bmac_init_registers(struct net_device *dev); +static void bmac_enable_and_reset_chip(struct net_device *dev); +static int bmac_set_address(struct net_device *dev, void *addr); +static irqreturn_t bmac_misc_intr(int irq, void *dev_id); +static irqreturn_t bmac_txdma_intr(int irq, void *dev_id); +static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id); +static void bmac_set_timeout(struct net_device *dev); +static void bmac_tx_timeout(unsigned long data); +static int bmac_output(struct sk_buff *skb, struct net_device *dev); +static void bmac_start(struct net_device *dev); + +#define DBDMA_SET(x) ( ((x) | (x) << 16) ) +#define DBDMA_CLEAR(x) ( (x) << 16) + +static inline void +dbdma_st32(volatile __u32 __iomem *a, unsigned long x) +{ + __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory"); +} + +static inline unsigned long +dbdma_ld32(volatile __u32 __iomem *a) +{ + __u32 swap; + __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a)); + return swap; +} + +static void +dbdma_continue(volatile struct dbdma_regs __iomem *dmap) +{ + dbdma_st32(&dmap->control, + DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD)); + eieio(); +} + +static void +dbdma_reset(volatile struct dbdma_regs __iomem *dmap) +{ + dbdma_st32(&dmap->control, + DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)); + eieio(); + while (dbdma_ld32(&dmap->status) & RUN) + eieio(); +} + +static void +dbdma_setcmd(volatile struct dbdma_cmd *cp, + unsigned short cmd, unsigned count, unsigned long addr, + unsigned long cmd_dep) +{ + out_le16(&cp->command, cmd); + out_le16(&cp->req_count, count); + out_le32(&cp->phy_addr, addr); + out_le32(&cp->cmd_dep, cmd_dep); + out_le16(&cp->xfer_status, 0); + out_le16(&cp->res_count, 0); +} + +static inline +void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data ) +{ + out_le16((void __iomem *)dev->base_addr + reg_offset, data); +} + + +static inline +unsigned short bmread(struct net_device *dev, unsigned long reg_offset ) +{ + return in_le16((void __iomem *)dev->base_addr + reg_offset); +} + +static void +bmac_enable_and_reset_chip(struct net_device *dev) +{ + struct bmac_data *bp = netdev_priv(dev); + volatile struct dbdma_regs __iomem *rd = bp->rx_dma; + volatile struct dbdma_regs __iomem *td = bp->tx_dma; + + if (rd) + dbdma_reset(rd); + if (td) + dbdma_reset(td); + + pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1); +} + +#define MIFDELAY udelay(10) + +static unsigned int +bmac_mif_readbits(struct net_device *dev, int nb) +{ + unsigned int val = 0; + + while (--nb >= 0) { + bmwrite(dev, MIFCSR, 0); + MIFDELAY; + if (bmread(dev, MIFCSR) & 8) + val |= 1 << nb; + bmwrite(dev, MIFCSR, 1); + MIFDELAY; + } + bmwrite(dev, MIFCSR, 0); + MIFDELAY; + bmwrite(dev, MIFCSR, 1); + MIFDELAY; + return val; +} + +static void +bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb) +{ + int b; + + while (--nb >= 0) { + b = (val & (1 << nb))? 6: 4; + bmwrite(dev, MIFCSR, b); + MIFDELAY; + bmwrite(dev, MIFCSR, b|1); + MIFDELAY; + } +} + +static unsigned int +bmac_mif_read(struct net_device *dev, unsigned int addr) +{ + unsigned int val; + + bmwrite(dev, MIFCSR, 4); + MIFDELAY; + bmac_mif_writebits(dev, ~0U, 32); + bmac_mif_writebits(dev, 6, 4); + bmac_mif_writebits(dev, addr, 10); + bmwrite(dev, MIFCSR, 2); + MIFDELAY; + bmwrite(dev, MIFCSR, 1); + MIFDELAY; + val = bmac_mif_readbits(dev, 17); + bmwrite(dev, MIFCSR, 4); + MIFDELAY; + return val; +} + +static void +bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val) +{ + bmwrite(dev, MIFCSR, 4); + MIFDELAY; + bmac_mif_writebits(dev, ~0U, 32); + bmac_mif_writebits(dev, 5, 4); + bmac_mif_writebits(dev, addr, 10); + bmac_mif_writebits(dev, 2, 2); + bmac_mif_writebits(dev, val, 16); + bmac_mif_writebits(dev, 3, 2); +} + +static void +bmac_init_registers(struct net_device *dev) +{ + struct bmac_data *bp = netdev_priv(dev); + volatile unsigned short regValue; + unsigned short *pWord16; + int i; + + /* XXDEBUG(("bmac: enter init_registers\n")); */ + + bmwrite(dev, RXRST, RxResetValue); + bmwrite(dev, TXRST, TxResetBit); + + i = 100; + do { + --i; + udelay(10000); + regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */ + } while ((regValue & TxResetBit) && i > 0); + + if (!bp->is_bmac_plus) { + regValue = bmread(dev, XCVRIF); + regValue |= ClkBit | SerialMode | COLActiveLow; + bmwrite(dev, XCVRIF, regValue); + udelay(10000); + } + + bmwrite(dev, RSEED, (unsigned short)0x1968); + + regValue = bmread(dev, XIFC); + regValue |= TxOutputEnable; + bmwrite(dev, XIFC, regValue); + + bmread(dev, PAREG); + + /* set collision counters to 0 */ + bmwrite(dev, NCCNT, 0); + bmwrite(dev, NTCNT, 0); + bmwrite(dev, EXCNT, 0); + bmwrite(dev, LTCNT, 0); + + /* set rx counters to 0 */ + bmwrite(dev, FRCNT, 0); + bmwrite(dev, LECNT, 0); + bmwrite(dev, AECNT, 0); + bmwrite(dev, FECNT, 0); + bmwrite(dev, RXCV, 0); + + /* set tx fifo information */ + bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */ + + bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */ + bmwrite(dev, TXFIFOCSR, TxFIFOEnable ); + + /* set rx fifo information */ + bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */ + bmwrite(dev, RXFIFOCSR, RxFIFOEnable ); + + //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */ + bmread(dev, STATUS); /* read it just to clear it */ + + /* zero out the chip Hash Filter registers */ + for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; + bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ + bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ + bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ + bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ + + pWord16 = (unsigned short *)dev->dev_addr; + bmwrite(dev, MADD0, *pWord16++); + bmwrite(dev, MADD1, *pWord16++); + bmwrite(dev, MADD2, *pWord16); + + bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets); + + bmwrite(dev, INTDISABLE, EnableNormal); +} + +#if 0 +static void +bmac_disable_interrupts(struct net_device *dev) +{ + bmwrite(dev, INTDISABLE, DisableAll); +} + +static void +bmac_enable_interrupts(struct net_device *dev) +{ + bmwrite(dev, INTDISABLE, EnableNormal); +} +#endif + + +static void +bmac_start_chip(struct net_device *dev) +{ + struct bmac_data *bp = netdev_priv(dev); + volatile struct dbdma_regs __iomem *rd = bp->rx_dma; + unsigned short oldConfig; + + /* enable rx dma channel */ + dbdma_continue(rd); + + oldConfig = bmread(dev, TXCFG); + bmwrite(dev, TXCFG, oldConfig | TxMACEnable ); + + /* turn on rx plus any other bits already on (promiscuous possibly) */ + oldConfig = bmread(dev, RXCFG); + bmwrite(dev, RXCFG, oldConfig | RxMACEnable ); + udelay(20000); +} + +static void +bmac_init_phy(struct net_device *dev) +{ + unsigned int addr; + struct bmac_data *bp = netdev_priv(dev); + + printk(KERN_DEBUG "phy registers:"); + for (addr = 0; addr < 32; ++addr) { + if ((addr & 7) == 0) + printk(KERN_DEBUG); + printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr)); + } + printk(KERN_CONT "\n"); + + if (bp->is_bmac_plus) { + unsigned int capable, ctrl; + + ctrl = bmac_mif_read(dev, 0); + capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1; + if (bmac_mif_read(dev, 4) != capable || + (ctrl & 0x1000) == 0) { + bmac_mif_write(dev, 4, capable); + bmac_mif_write(dev, 0, 0x1200); + } else + bmac_mif_write(dev, 0, 0x1000); + } +} + +static void bmac_init_chip(struct net_device *dev) +{ + bmac_init_phy(dev); + bmac_init_registers(dev); +} + +#ifdef CONFIG_PM +static int bmac_suspend(struct macio_dev *mdev, pm_message_t state) +{ + struct net_device* dev = macio_get_drvdata(mdev); + struct bmac_data *bp = netdev_priv(dev); + unsigned long flags; + unsigned short config; + int i; + + netif_device_detach(dev); + /* prolly should wait for dma to finish & turn off the chip */ + spin_lock_irqsave(&bp->lock, flags); + if (bp->timeout_active) { + del_timer(&bp->tx_timeout); + bp->timeout_active = 0; + } + disable_irq(dev->irq); + disable_irq(bp->tx_dma_intr); + disable_irq(bp->rx_dma_intr); + bp->sleeping = 1; + spin_unlock_irqrestore(&bp->lock, flags); + if (bp->opened) { + volatile struct dbdma_regs __iomem *rd = bp->rx_dma; + volatile struct dbdma_regs __iomem *td = bp->tx_dma; + + config = bmread(dev, RXCFG); + bmwrite(dev, RXCFG, (config & ~RxMACEnable)); + config = bmread(dev, TXCFG); + bmwrite(dev, TXCFG, (config & ~TxMACEnable)); + bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */ + /* disable rx and tx dma */ + rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ + td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ + /* free some skb's */ + for (i=0; irx_bufs[i] != NULL) { + dev_kfree_skb(bp->rx_bufs[i]); + bp->rx_bufs[i] = NULL; + } + } + for (i = 0; itx_bufs[i] != NULL) { + dev_kfree_skb(bp->tx_bufs[i]); + bp->tx_bufs[i] = NULL; + } + } + } + pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); + return 0; +} + +static int bmac_resume(struct macio_dev *mdev) +{ + struct net_device* dev = macio_get_drvdata(mdev); + struct bmac_data *bp = netdev_priv(dev); + + /* see if this is enough */ + if (bp->opened) + bmac_reset_and_enable(dev); + + enable_irq(dev->irq); + enable_irq(bp->tx_dma_intr); + enable_irq(bp->rx_dma_intr); + netif_device_attach(dev); + + return 0; +} +#endif /* CONFIG_PM */ + +static int bmac_set_address(struct net_device *dev, void *addr) +{ + struct bmac_data *bp = netdev_priv(dev); + unsigned char *p = addr; + unsigned short *pWord16; + unsigned long flags; + int i; + + XXDEBUG(("bmac: enter set_address\n")); + spin_lock_irqsave(&bp->lock, flags); + + for (i = 0; i < 6; ++i) { + dev->dev_addr[i] = p[i]; + } + /* load up the hardware address */ + pWord16 = (unsigned short *)dev->dev_addr; + bmwrite(dev, MADD0, *pWord16++); + bmwrite(dev, MADD1, *pWord16++); + bmwrite(dev, MADD2, *pWord16); + + spin_unlock_irqrestore(&bp->lock, flags); + XXDEBUG(("bmac: exit set_address\n")); + return 0; +} + +static inline void bmac_set_timeout(struct net_device *dev) +{ + struct bmac_data *bp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&bp->lock, flags); + if (bp->timeout_active) + del_timer(&bp->tx_timeout); + bp->tx_timeout.expires = jiffies + TX_TIMEOUT; + bp->tx_timeout.function = bmac_tx_timeout; + bp->tx_timeout.data = (unsigned long) dev; + add_timer(&bp->tx_timeout); + bp->timeout_active = 1; + spin_unlock_irqrestore(&bp->lock, flags); +} + +static void +bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp) +{ + void *vaddr; + unsigned long baddr; + unsigned long len; + + len = skb->len; + vaddr = skb->data; + baddr = virt_to_bus(vaddr); + + dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0); +} + +static void +bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp) +{ + unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf; + + dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN, + virt_to_bus(addr), 0); +} + +static void +bmac_init_tx_ring(struct bmac_data *bp) +{ + volatile struct dbdma_regs __iomem *td = bp->tx_dma; + + memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd)); + + bp->tx_empty = 0; + bp->tx_fill = 0; + bp->tx_fullup = 0; + + /* put a branch at the end of the tx command list */ + dbdma_setcmd(&bp->tx_cmds[N_TX_RING], + (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds)); + + /* reset tx dma */ + dbdma_reset(td); + out_le32(&td->wait_sel, 0x00200020); + out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds)); +} + +static int +bmac_init_rx_ring(struct net_device *dev) +{ + struct bmac_data *bp = netdev_priv(dev); + volatile struct dbdma_regs __iomem *rd = bp->rx_dma; + int i; + struct sk_buff *skb; + + /* initialize list of sk_buffs for receiving and set up recv dma */ + memset((char *)bp->rx_cmds, 0, + (N_RX_RING + 1) * sizeof(struct dbdma_cmd)); + for (i = 0; i < N_RX_RING; i++) { + if ((skb = bp->rx_bufs[i]) == NULL) { + bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2); + if (skb != NULL) + skb_reserve(skb, 2); + } + bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); + } + + bp->rx_empty = 0; + bp->rx_fill = i; + + /* Put a branch back to the beginning of the receive command list */ + dbdma_setcmd(&bp->rx_cmds[N_RX_RING], + (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds)); + + /* start rx dma */ + dbdma_reset(rd); + out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds)); + + return 1; +} + + +static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev) +{ + struct bmac_data *bp = netdev_priv(dev); + volatile struct dbdma_regs __iomem *td = bp->tx_dma; + int i; + + /* see if there's a free slot in the tx ring */ + /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */ + /* bp->tx_empty, bp->tx_fill)); */ + i = bp->tx_fill + 1; + if (i >= N_TX_RING) + i = 0; + if (i == bp->tx_empty) { + netif_stop_queue(dev); + bp->tx_fullup = 1; + XXDEBUG(("bmac_transmit_packet: tx ring full\n")); + return -1; /* can't take it at the moment */ + } + + dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0); + + bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]); + + bp->tx_bufs[bp->tx_fill] = skb; + bp->tx_fill = i; + + dev->stats.tx_bytes += skb->len; + + dbdma_continue(td); + + return 0; +} + +static int rxintcount; + +static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct bmac_data *bp = netdev_priv(dev); + volatile struct dbdma_regs __iomem *rd = bp->rx_dma; + volatile struct dbdma_cmd *cp; + int i, nb, stat; + struct sk_buff *skb; + unsigned int residual; + int last; + unsigned long flags; + + spin_lock_irqsave(&bp->lock, flags); + + if (++rxintcount < 10) { + XXDEBUG(("bmac_rxdma_intr\n")); + } + + last = -1; + i = bp->rx_empty; + + while (1) { + cp = &bp->rx_cmds[i]; + stat = le16_to_cpu(cp->xfer_status); + residual = le16_to_cpu(cp->res_count); + if ((stat & ACTIVE) == 0) + break; + nb = RX_BUFLEN - residual - 2; + if (nb < (ETHERMINPACKET - ETHERCRC)) { + skb = NULL; + dev->stats.rx_length_errors++; + dev->stats.rx_errors++; + } else { + skb = bp->rx_bufs[i]; + bp->rx_bufs[i] = NULL; + } + if (skb != NULL) { + nb -= ETHERCRC; + skb_put(skb, nb); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + ++dev->stats.rx_packets; + dev->stats.rx_bytes += nb; + } else { + ++dev->stats.rx_dropped; + } + if ((skb = bp->rx_bufs[i]) == NULL) { + bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2); + if (skb != NULL) + skb_reserve(bp->rx_bufs[i], 2); + } + bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); + cp->res_count = cpu_to_le16(0); + cp->xfer_status = cpu_to_le16(0); + last = i; + if (++i >= N_RX_RING) i = 0; + } + + if (last != -1) { + bp->rx_fill = last; + bp->rx_empty = i; + } + + dbdma_continue(rd); + spin_unlock_irqrestore(&bp->lock, flags); + + if (rxintcount < 10) { + XXDEBUG(("bmac_rxdma_intr done\n")); + } + return IRQ_HANDLED; +} + +static int txintcount; + +static irqreturn_t bmac_txdma_intr(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct bmac_data *bp = netdev_priv(dev); + volatile struct dbdma_cmd *cp; + int stat; + unsigned long flags; + + spin_lock_irqsave(&bp->lock, flags); + + if (txintcount++ < 10) { + XXDEBUG(("bmac_txdma_intr\n")); + } + + /* del_timer(&bp->tx_timeout); */ + /* bp->timeout_active = 0; */ + + while (1) { + cp = &bp->tx_cmds[bp->tx_empty]; + stat = le16_to_cpu(cp->xfer_status); + if (txintcount < 10) { + XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat)); + } + if (!(stat & ACTIVE)) { + /* + * status field might not have been filled by DBDMA + */ + if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr))) + break; + } + + if (bp->tx_bufs[bp->tx_empty]) { + ++dev->stats.tx_packets; + dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); + } + bp->tx_bufs[bp->tx_empty] = NULL; + bp->tx_fullup = 0; + netif_wake_queue(dev); + if (++bp->tx_empty >= N_TX_RING) + bp->tx_empty = 0; + if (bp->tx_empty == bp->tx_fill) + break; + } + + spin_unlock_irqrestore(&bp->lock, flags); + + if (txintcount < 10) { + XXDEBUG(("bmac_txdma_intr done->bmac_start\n")); + } + + bmac_start(dev); + return IRQ_HANDLED; +} + +#ifndef SUNHME_MULTICAST +/* Real fast bit-reversal algorithm, 6-bit values */ +static int reverse6[64] = { + 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38, + 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c, + 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a, + 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e, + 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39, + 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d, + 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b, + 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f +}; + +static unsigned int +crc416(unsigned int curval, unsigned short nxtval) +{ + register unsigned int counter, cur = curval, next = nxtval; + register int high_crc_set, low_data_set; + + /* Swap bytes */ + next = ((next & 0x00FF) << 8) | (next >> 8); + + /* Compute bit-by-bit */ + for (counter = 0; counter < 16; ++counter) { + /* is high CRC bit set? */ + if ((cur & 0x80000000) == 0) high_crc_set = 0; + else high_crc_set = 1; + + cur = cur << 1; + + if ((next & 0x0001) == 0) low_data_set = 0; + else low_data_set = 1; + + next = next >> 1; + + /* do the XOR */ + if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY; + } + return cur; +} + +static unsigned int +bmac_crc(unsigned short *address) +{ + unsigned int newcrc; + + XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2])); + newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */ + newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */ + newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */ + + return(newcrc); +} + +/* + * Add requested mcast addr to BMac's hash table filter. + * + */ + +static void +bmac_addhash(struct bmac_data *bp, unsigned char *addr) +{ + unsigned int crc; + unsigned short mask; + + if (!(*addr)) return; + crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */ + crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ + if (bp->hash_use_count[crc]++) return; /* This bit is already set */ + mask = crc % 16; + mask = (unsigned char)1 << mask; + bp->hash_use_count[crc/16] |= mask; +} + +static void +bmac_removehash(struct bmac_data *bp, unsigned char *addr) +{ + unsigned int crc; + unsigned char mask; + + /* Now, delete the address from the filter copy, as indicated */ + crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */ + crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ + if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */ + if (--bp->hash_use_count[crc]) return; /* That bit is still in use */ + mask = crc % 16; + mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */ + bp->hash_table_mask[crc/16] &= mask; +} + +/* + * Sync the adapter with the software copy of the multicast mask + * (logical address filter). + */ + +static void +bmac_rx_off(struct net_device *dev) +{ + unsigned short rx_cfg; + + rx_cfg = bmread(dev, RXCFG); + rx_cfg &= ~RxMACEnable; + bmwrite(dev, RXCFG, rx_cfg); + do { + rx_cfg = bmread(dev, RXCFG); + } while (rx_cfg & RxMACEnable); +} + +unsigned short +bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable) +{ + unsigned short rx_cfg; + + rx_cfg = bmread(dev, RXCFG); + rx_cfg |= RxMACEnable; + if (hash_enable) rx_cfg |= RxHashFilterEnable; + else rx_cfg &= ~RxHashFilterEnable; + if (promisc_enable) rx_cfg |= RxPromiscEnable; + else rx_cfg &= ~RxPromiscEnable; + bmwrite(dev, RXRST, RxResetValue); + bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */ + bmwrite(dev, RXFIFOCSR, RxFIFOEnable ); + bmwrite(dev, RXCFG, rx_cfg ); + return rx_cfg; +} + +static void +bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp) +{ + bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ + bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ + bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ + bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ +} + +#if 0 +static void +bmac_add_multi(struct net_device *dev, + struct bmac_data *bp, unsigned char *addr) +{ + /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */ + bmac_addhash(bp, addr); + bmac_rx_off(dev); + bmac_update_hash_table_mask(dev, bp); + bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0); + /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */ +} + +static void +bmac_remove_multi(struct net_device *dev, + struct bmac_data *bp, unsigned char *addr) +{ + bmac_removehash(bp, addr); + bmac_rx_off(dev); + bmac_update_hash_table_mask(dev, bp); + bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0); +} +#endif + +/* Set or clear the multicast filter for this adaptor. + num_addrs == -1 Promiscuous mode, receive all packets + num_addrs == 0 Normal mode, clear multicast list + num_addrs > 0 Multicast mode, receive normal and MC packets, and do + best-effort filtering. + */ +static void bmac_set_multicast(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + struct bmac_data *bp = netdev_priv(dev); + int num_addrs = netdev_mc_count(dev); + unsigned short rx_cfg; + int i; + + if (bp->sleeping) + return; + + XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs)); + + if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { + for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff; + bmac_update_hash_table_mask(dev, bp); + rx_cfg = bmac_rx_on(dev, 1, 0); + XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n")); + } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) { + rx_cfg = bmread(dev, RXCFG); + rx_cfg |= RxPromiscEnable; + bmwrite(dev, RXCFG, rx_cfg); + rx_cfg = bmac_rx_on(dev, 0, 1); + XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg)); + } else { + for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; + for (i=0; i<64; i++) bp->hash_use_count[i] = 0; + if (num_addrs == 0) { + rx_cfg = bmac_rx_on(dev, 0, 0); + XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg)); + } else { + netdev_for_each_mc_addr(ha, dev) + bmac_addhash(bp, ha->addr); + bmac_update_hash_table_mask(dev, bp); + rx_cfg = bmac_rx_on(dev, 1, 0); + XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg)); + } + } + /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */ +} +#else /* ifdef SUNHME_MULTICAST */ + +/* The version of set_multicast below was lifted from sunhme.c */ + +static void bmac_set_multicast(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + unsigned short rx_cfg; + u32 crc; + + if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { + bmwrite(dev, BHASH0, 0xffff); + bmwrite(dev, BHASH1, 0xffff); + bmwrite(dev, BHASH2, 0xffff); + bmwrite(dev, BHASH3, 0xffff); + } else if(dev->flags & IFF_PROMISC) { + rx_cfg = bmread(dev, RXCFG); + rx_cfg |= RxPromiscEnable; + bmwrite(dev, RXCFG, rx_cfg); + } else { + u16 hash_table[4] = { 0 }; + + rx_cfg = bmread(dev, RXCFG); + rx_cfg &= ~RxPromiscEnable; + bmwrite(dev, RXCFG, rx_cfg); + + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr); + crc >>= 26; + hash_table[crc >> 4] |= 1 << (crc & 0xf); + } + bmwrite(dev, BHASH0, hash_table[0]); + bmwrite(dev, BHASH1, hash_table[1]); + bmwrite(dev, BHASH2, hash_table[2]); + bmwrite(dev, BHASH3, hash_table[3]); + } +} +#endif /* SUNHME_MULTICAST */ + +static int miscintcount; + +static irqreturn_t bmac_misc_intr(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + unsigned int status = bmread(dev, STATUS); + if (miscintcount++ < 10) { + XXDEBUG(("bmac_misc_intr\n")); + } + /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */ + /* bmac_txdma_intr_inner(irq, dev_id); */ + /* if (status & FrameReceived) dev->stats.rx_dropped++; */ + if (status & RxErrorMask) dev->stats.rx_errors++; + if (status & RxCRCCntExp) dev->stats.rx_crc_errors++; + if (status & RxLenCntExp) dev->stats.rx_length_errors++; + if (status & RxOverFlow) dev->stats.rx_over_errors++; + if (status & RxAlignCntExp) dev->stats.rx_frame_errors++; + + /* if (status & FrameSent) dev->stats.tx_dropped++; */ + if (status & TxErrorMask) dev->stats.tx_errors++; + if (status & TxUnderrun) dev->stats.tx_fifo_errors++; + if (status & TxNormalCollExp) dev->stats.collisions++; + return IRQ_HANDLED; +} + +/* + * Procedure for reading EEPROM + */ +#define SROMAddressLength 5 +#define DataInOn 0x0008 +#define DataInOff 0x0000 +#define Clk 0x0002 +#define ChipSelect 0x0001 +#define SDIShiftCount 3 +#define SD0ShiftCount 2 +#define DelayValue 1000 /* number of microseconds */ +#define SROMStartOffset 10 /* this is in words */ +#define SROMReadCount 3 /* number of words to read from SROM */ +#define SROMAddressBits 6 +#define EnetAddressOffset 20 + +static unsigned char +bmac_clock_out_bit(struct net_device *dev) +{ + unsigned short data; + unsigned short val; + + bmwrite(dev, SROMCSR, ChipSelect | Clk); + udelay(DelayValue); + + data = bmread(dev, SROMCSR); + udelay(DelayValue); + val = (data >> SD0ShiftCount) & 1; + + bmwrite(dev, SROMCSR, ChipSelect); + udelay(DelayValue); + + return val; +} + +static void +bmac_clock_in_bit(struct net_device *dev, unsigned int val) +{ + unsigned short data; + + if (val != 0 && val != 1) return; + + data = (val << SDIShiftCount); + bmwrite(dev, SROMCSR, data | ChipSelect ); + udelay(DelayValue); + + bmwrite(dev, SROMCSR, data | ChipSelect | Clk ); + udelay(DelayValue); + + bmwrite(dev, SROMCSR, data | ChipSelect); + udelay(DelayValue); +} + +static void +reset_and_select_srom(struct net_device *dev) +{ + /* first reset */ + bmwrite(dev, SROMCSR, 0); + udelay(DelayValue); + + /* send it the read command (110) */ + bmac_clock_in_bit(dev, 1); + bmac_clock_in_bit(dev, 1); + bmac_clock_in_bit(dev, 0); +} + +static unsigned short +read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len) +{ + unsigned short data, val; + int i; + + /* send out the address we want to read from */ + for (i = 0; i < addr_len; i++) { + val = addr >> (addr_len-i-1); + bmac_clock_in_bit(dev, val & 1); + } + + /* Now read in the 16-bit data */ + data = 0; + for (i = 0; i < 16; i++) { + val = bmac_clock_out_bit(dev); + data <<= 1; + data |= val; + } + bmwrite(dev, SROMCSR, 0); + + return data; +} + +/* + * It looks like Cogent and SMC use different methods for calculating + * checksums. What a pain.. + */ + +static int +bmac_verify_checksum(struct net_device *dev) +{ + unsigned short data, storedCS; + + reset_and_select_srom(dev); + data = read_srom(dev, 3, SROMAddressBits); + storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00); + + return 0; +} + + +static void +bmac_get_station_address(struct net_device *dev, unsigned char *ea) +{ + int i; + unsigned short data; + + for (i = 0; i < 6; i++) + { + reset_and_select_srom(dev); + data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits); + ea[2*i] = bitrev8(data & 0x0ff); + ea[2*i+1] = bitrev8((data >> 8) & 0x0ff); + } +} + +static void bmac_reset_and_enable(struct net_device *dev) +{ + struct bmac_data *bp = netdev_priv(dev); + unsigned long flags; + struct sk_buff *skb; + unsigned char *data; + + spin_lock_irqsave(&bp->lock, flags); + bmac_enable_and_reset_chip(dev); + bmac_init_tx_ring(bp); + bmac_init_rx_ring(dev); + bmac_init_chip(dev); + bmac_start_chip(dev); + bmwrite(dev, INTDISABLE, EnableNormal); + bp->sleeping = 0; + + /* + * It seems that the bmac can't receive until it's transmitted + * a packet. So we give it a dummy packet to transmit. + */ + skb = netdev_alloc_skb(dev, ETHERMINPACKET); + if (skb != NULL) { + data = skb_put(skb, ETHERMINPACKET); + memset(data, 0, ETHERMINPACKET); + memcpy(data, dev->dev_addr, ETH_ALEN); + memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN); + bmac_transmit_packet(skb, dev); + } + spin_unlock_irqrestore(&bp->lock, flags); +} + +static const struct ethtool_ops bmac_ethtool_ops = { + .get_link = ethtool_op_get_link, +}; + +static const struct net_device_ops bmac_netdev_ops = { + .ndo_open = bmac_open, + .ndo_stop = bmac_close, + .ndo_start_xmit = bmac_output, + .ndo_set_rx_mode = bmac_set_multicast, + .ndo_set_mac_address = bmac_set_address, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + +static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match) +{ + int j, rev, ret; + struct bmac_data *bp; + const unsigned char *prop_addr; + unsigned char addr[6]; + struct net_device *dev; + int is_bmac_plus = ((int)match->data) != 0; + + if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { + printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n"); + return -ENODEV; + } + prop_addr = of_get_property(macio_get_of_node(mdev), + "mac-address", NULL); + if (prop_addr == NULL) { + prop_addr = of_get_property(macio_get_of_node(mdev), + "local-mac-address", NULL); + if (prop_addr == NULL) { + printk(KERN_ERR "BMAC: Can't get mac-address\n"); + return -ENODEV; + } + } + memcpy(addr, prop_addr, sizeof(addr)); + + dev = alloc_etherdev(PRIV_BYTES); + if (!dev) + return -ENOMEM; + + bp = netdev_priv(dev); + SET_NETDEV_DEV(dev, &mdev->ofdev.dev); + macio_set_drvdata(mdev, dev); + + bp->mdev = mdev; + spin_lock_init(&bp->lock); + + if (macio_request_resources(mdev, "bmac")) { + printk(KERN_ERR "BMAC: can't request IO resource !\n"); + goto out_free; + } + + dev->base_addr = (unsigned long) + ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0)); + if (dev->base_addr == 0) + goto out_release; + + dev->irq = macio_irq(mdev, 0); + + bmac_enable_and_reset_chip(dev); + bmwrite(dev, INTDISABLE, DisableAll); + + rev = addr[0] == 0 && addr[1] == 0xA0; + for (j = 0; j < 6; ++j) + dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j]; + + /* Enable chip without interrupts for now */ + bmac_enable_and_reset_chip(dev); + bmwrite(dev, INTDISABLE, DisableAll); + + dev->netdev_ops = &bmac_netdev_ops; + dev->ethtool_ops = &bmac_ethtool_ops; + + bmac_get_station_address(dev, addr); + if (bmac_verify_checksum(dev) != 0) + goto err_out_iounmap; + + bp->is_bmac_plus = is_bmac_plus; + bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1)); + if (!bp->tx_dma) + goto err_out_iounmap; + bp->tx_dma_intr = macio_irq(mdev, 1); + bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2)); + if (!bp->rx_dma) + goto err_out_iounmap_tx; + bp->rx_dma_intr = macio_irq(mdev, 2); + + bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1); + bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1; + + bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1); + skb_queue_head_init(bp->queue); + + init_timer(&bp->tx_timeout); + + ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev); + if (ret) { + printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq); + goto err_out_iounmap_rx; + } + ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev); + if (ret) { + printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr); + goto err_out_irq0; + } + ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev); + if (ret) { + printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr); + goto err_out_irq1; + } + + /* Mask chip interrupts and disable chip, will be + * re-enabled on open() + */ + disable_irq(dev->irq); + pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); + + if (register_netdev(dev) != 0) { + printk(KERN_ERR "BMAC: Ethernet registration failed\n"); + goto err_out_irq2; + } + + printk(KERN_INFO "%s: BMAC%s at %pM", + dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr); + XXDEBUG((", base_addr=%#0lx", dev->base_addr)); + printk("\n"); + + return 0; + +err_out_irq2: + free_irq(bp->rx_dma_intr, dev); +err_out_irq1: + free_irq(bp->tx_dma_intr, dev); +err_out_irq0: + free_irq(dev->irq, dev); +err_out_iounmap_rx: + iounmap(bp->rx_dma); +err_out_iounmap_tx: + iounmap(bp->tx_dma); +err_out_iounmap: + iounmap((void __iomem *)dev->base_addr); +out_release: + macio_release_resources(mdev); +out_free: + pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); + free_netdev(dev); + + return -ENODEV; +} + +static int bmac_open(struct net_device *dev) +{ + struct bmac_data *bp = netdev_priv(dev); + /* XXDEBUG(("bmac: enter open\n")); */ + /* reset the chip */ + bp->opened = 1; + bmac_reset_and_enable(dev); + enable_irq(dev->irq); + return 0; +} + +static int bmac_close(struct net_device *dev) +{ + struct bmac_data *bp = netdev_priv(dev); + volatile struct dbdma_regs __iomem *rd = bp->rx_dma; + volatile struct dbdma_regs __iomem *td = bp->tx_dma; + unsigned short config; + int i; + + bp->sleeping = 1; + + /* disable rx and tx */ + config = bmread(dev, RXCFG); + bmwrite(dev, RXCFG, (config & ~RxMACEnable)); + + config = bmread(dev, TXCFG); + bmwrite(dev, TXCFG, (config & ~TxMACEnable)); + + bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */ + + /* disable rx and tx dma */ + rd->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ + td->control = cpu_to_le32(DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ + + /* free some skb's */ + XXDEBUG(("bmac: free rx bufs\n")); + for (i=0; irx_bufs[i] != NULL) { + dev_kfree_skb(bp->rx_bufs[i]); + bp->rx_bufs[i] = NULL; + } + } + XXDEBUG(("bmac: free tx bufs\n")); + for (i = 0; itx_bufs[i] != NULL) { + dev_kfree_skb(bp->tx_bufs[i]); + bp->tx_bufs[i] = NULL; + } + } + XXDEBUG(("bmac: all bufs freed\n")); + + bp->opened = 0; + disable_irq(dev->irq); + pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); + + return 0; +} + +static void +bmac_start(struct net_device *dev) +{ + struct bmac_data *bp = netdev_priv(dev); + int i; + struct sk_buff *skb; + unsigned long flags; + + if (bp->sleeping) + return; + + spin_lock_irqsave(&bp->lock, flags); + while (1) { + i = bp->tx_fill + 1; + if (i >= N_TX_RING) + i = 0; + if (i == bp->tx_empty) + break; + skb = skb_dequeue(bp->queue); + if (skb == NULL) + break; + bmac_transmit_packet(skb, dev); + } + spin_unlock_irqrestore(&bp->lock, flags); +} + +static int +bmac_output(struct sk_buff *skb, struct net_device *dev) +{ + struct bmac_data *bp = netdev_priv(dev); + skb_queue_tail(bp->queue, skb); + bmac_start(dev); + return NETDEV_TX_OK; +} + +static void bmac_tx_timeout(unsigned long data) +{ + struct net_device *dev = (struct net_device *) data; + struct bmac_data *bp = netdev_priv(dev); + volatile struct dbdma_regs __iomem *td = bp->tx_dma; + volatile struct dbdma_regs __iomem *rd = bp->rx_dma; + volatile struct dbdma_cmd *cp; + unsigned long flags; + unsigned short config, oldConfig; + int i; + + XXDEBUG(("bmac: tx_timeout called\n")); + spin_lock_irqsave(&bp->lock, flags); + bp->timeout_active = 0; + + /* update various counters */ +/* bmac_handle_misc_intrs(bp, 0); */ + + cp = &bp->tx_cmds[bp->tx_empty]; +/* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */ +/* le32_to_cpu(td->status), le16_to_cpu(cp->xfer_status), bp->tx_bad_runt, */ +/* mb->pr, mb->xmtfs, mb->fifofc)); */ + + /* turn off both tx and rx and reset the chip */ + config = bmread(dev, RXCFG); + bmwrite(dev, RXCFG, (config & ~RxMACEnable)); + config = bmread(dev, TXCFG); + bmwrite(dev, TXCFG, (config & ~TxMACEnable)); + out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD)); + printk(KERN_ERR "bmac: transmit timeout - resetting\n"); + bmac_enable_and_reset_chip(dev); + + /* restart rx dma */ + cp = bus_to_virt(le32_to_cpu(rd->cmdptr)); + out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD)); + out_le16(&cp->xfer_status, 0); + out_le32(&rd->cmdptr, virt_to_bus(cp)); + out_le32(&rd->control, DBDMA_SET(RUN|WAKE)); + + /* fix up the transmit side */ + XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n", + bp->tx_empty, bp->tx_fill, bp->tx_fullup)); + i = bp->tx_empty; + ++dev->stats.tx_errors; + if (i != bp->tx_fill) { + dev_kfree_skb(bp->tx_bufs[i]); + bp->tx_bufs[i] = NULL; + if (++i >= N_TX_RING) i = 0; + bp->tx_empty = i; + } + bp->tx_fullup = 0; + netif_wake_queue(dev); + if (i != bp->tx_fill) { + cp = &bp->tx_cmds[i]; + out_le16(&cp->xfer_status, 0); + out_le16(&cp->command, OUTPUT_LAST); + out_le32(&td->cmdptr, virt_to_bus(cp)); + out_le32(&td->control, DBDMA_SET(RUN)); + /* bmac_set_timeout(dev); */ + XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i)); + } + + /* turn it back on */ + oldConfig = bmread(dev, RXCFG); + bmwrite(dev, RXCFG, oldConfig | RxMACEnable ); + oldConfig = bmread(dev, TXCFG); + bmwrite(dev, TXCFG, oldConfig | TxMACEnable ); + + spin_unlock_irqrestore(&bp->lock, flags); +} + +#if 0 +static void dump_dbdma(volatile struct dbdma_cmd *cp,int count) +{ + int i,*ip; + + for (i=0;i< count;i++) { + ip = (int*)(cp+i); + + printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n", + le32_to_cpup(ip+0), + le32_to_cpup(ip+1), + le32_to_cpup(ip+2), + le32_to_cpup(ip+3)); + } + +} +#endif + +#if 0 +static int +bmac_proc_info(char *buffer, char **start, off_t offset, int length) +{ + int len = 0; + off_t pos = 0; + off_t begin = 0; + int i; + + if (bmac_devs == NULL) + return -ENOSYS; + + len += sprintf(buffer, "BMAC counters & registers\n"); + + for (i = 0; i offset+length) break; + } + + *start = buffer + (offset - begin); + len -= (offset - begin); + + if (len > length) len = length; + + return len; +} +#endif + +static int bmac_remove(struct macio_dev *mdev) +{ + struct net_device *dev = macio_get_drvdata(mdev); + struct bmac_data *bp = netdev_priv(dev); + + unregister_netdev(dev); + + free_irq(dev->irq, dev); + free_irq(bp->tx_dma_intr, dev); + free_irq(bp->rx_dma_intr, dev); + + iounmap((void __iomem *)dev->base_addr); + iounmap(bp->tx_dma); + iounmap(bp->rx_dma); + + macio_release_resources(mdev); + + free_netdev(dev); + + return 0; +} + +static const struct of_device_id bmac_match[] = +{ + { + .name = "bmac", + .data = (void *)0, + }, + { + .type = "network", + .compatible = "bmac+", + .data = (void *)1, + }, + {}, +}; +MODULE_DEVICE_TABLE (of, bmac_match); + +static struct macio_driver bmac_driver = +{ + .driver = { + .name = "bmac", + .owner = THIS_MODULE, + .of_match_table = bmac_match, + }, + .probe = bmac_probe, + .remove = bmac_remove, +#ifdef CONFIG_PM + .suspend = bmac_suspend, + .resume = bmac_resume, +#endif +}; + + +static int __init bmac_init(void) +{ + if (bmac_emergency_rxbuf == NULL) { + bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL); + if (bmac_emergency_rxbuf == NULL) + return -ENOMEM; + } + + return macio_register_driver(&bmac_driver); +} + +static void __exit bmac_exit(void) +{ + macio_unregister_driver(&bmac_driver); + + kfree(bmac_emergency_rxbuf); + bmac_emergency_rxbuf = NULL; +} + +MODULE_AUTHOR("Randy Gobbel/Paul Mackerras"); +MODULE_DESCRIPTION("PowerMac BMAC ethernet driver."); +MODULE_LICENSE("GPL"); + +module_init(bmac_init); +module_exit(bmac_exit); diff --git a/drivers/net/ethernet/apple/bmac.h b/drivers/net/ethernet/apple/bmac.h new file mode 100644 index 000000000..a1d19d867 --- /dev/null +++ b/drivers/net/ethernet/apple/bmac.h @@ -0,0 +1,164 @@ +/* + * mace.h - definitions for the registers in the "Big Mac" + * Ethernet controller found in PowerMac G3 models. + * + * Copyright (C) 1998 Randy Gobbel. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +/* The "Big MAC" appears to have some parts in common with the Sun "Happy Meal" + * (HME) controller. See sunhme.h + */ + + +/* register offsets */ + +/* global status and control */ +#define XIFC 0x000 /* low-level interface control */ +# define TxOutputEnable 0x0001 /* output driver enable */ +# define XIFLoopback 0x0002 /* Loopback-mode XIF enable */ +# define MIILoopback 0x0004 /* Loopback-mode MII enable */ +# define MIILoopbackBits 0x0006 +# define MIIBuffDisable 0x0008 /* MII receive buffer disable */ +# define SQETestEnable 0x0010 /* SQE test enable */ +# define SQETimeWindow 0x03e0 /* SQE time window */ +# define XIFLanceMode 0x0010 /* Lance mode enable */ +# define XIFLanceIPG0 0x03e0 /* Lance mode IPG0 */ +#define TXFIFOCSR 0x100 /* transmit FIFO control */ +# define TxFIFOEnable 0x0001 +#define TXTH 0x110 /* transmit threshold */ +# define TxThreshold 0x0004 +#define RXFIFOCSR 0x120 /* receive FIFO control */ +# define RxFIFOEnable 0x0001 +#define MEMADD 0x130 /* memory address, unknown function */ +#define MEMDATAHI 0x140 /* memory data high, presently unused in driver */ +#define MEMDATALO 0x150 /* memory data low, presently unused in driver */ +#define XCVRIF 0x160 /* transceiver interface control */ +# define COLActiveLow 0x0002 +# define SerialMode 0x0004 +# define ClkBit 0x0008 +# define LinkStatus 0x0100 +#define CHIPID 0x170 /* chip ID */ +#define MIFCSR 0x180 /* ??? */ +#define SROMCSR 0x190 /* SROM control */ +# define ChipSelect 0x0001 +# define Clk 0x0002 +#define TXPNTR 0x1a0 /* transmit pointer */ +#define RXPNTR 0x1b0 /* receive pointer */ +#define STATUS 0x200 /* status--reading this clears it */ +#define INTDISABLE 0x210 /* interrupt enable/disable control */ +/* bits below are the same in both STATUS and INTDISABLE registers */ +# define FrameReceived 0x00000001 /* Received a frame */ +# define RxFrameCntExp 0x00000002 /* Receive frame counter expired */ +# define RxAlignCntExp 0x00000004 /* Align-error counter expired */ +# define RxCRCCntExp 0x00000008 /* CRC-error counter expired */ +# define RxLenCntExp 0x00000010 /* Length-error counter expired */ +# define RxOverFlow 0x00000020 /* Receive FIFO overflow */ +# define RxCodeViolation 0x00000040 /* Code-violation counter expired */ +# define SQETestError 0x00000080 /* Test error in XIF for SQE */ +# define FrameSent 0x00000100 /* Transmitted a frame */ +# define TxUnderrun 0x00000200 /* Transmit FIFO underrun */ +# define TxMaxSizeError 0x00000400 /* Max-packet size error */ +# define TxNormalCollExp 0x00000800 /* Normal-collision counter expired */ +# define TxExcessCollExp 0x00001000 /* Excess-collision counter expired */ +# define TxLateCollExp 0x00002000 /* Late-collision counter expired */ +# define TxNetworkCollExp 0x00004000 /* First-collision counter expired */ +# define TxDeferTimerExp 0x00008000 /* Defer-timer expired */ +# define RxFIFOToHost 0x00010000 /* Data moved from FIFO to host */ +# define RxNoDescriptors 0x00020000 /* No more receive descriptors */ +# define RxDMAError 0x00040000 /* Error during receive DMA */ +# define RxDMALateErr 0x00080000 /* Receive DMA, data late */ +# define RxParityErr 0x00100000 /* Parity error during receive DMA */ +# define RxTagError 0x00200000 /* Tag error during receive DMA */ +# define TxEOPError 0x00400000 /* Tx descriptor did not have EOP set */ +# define MIFIntrEvent 0x00800000 /* MIF is signaling an interrupt */ +# define TxHostToFIFO 0x01000000 /* Data moved from host to FIFO */ +# define TxFIFOAllSent 0x02000000 /* Transmitted all packets in FIFO */ +# define TxDMAError 0x04000000 /* Error during transmit DMA */ +# define TxDMALateError 0x08000000 /* Late error during transmit DMA */ +# define TxParityError 0x10000000 /* Parity error during transmit DMA */ +# define TxTagError 0x20000000 /* Tag error during transmit DMA */ +# define PIOError 0x40000000 /* PIO access got an error */ +# define PIOParityError 0x80000000 /* PIO access got a parity error */ +# define DisableAll 0xffffffff +# define EnableAll 0x00000000 +/* # define NormalIntEvents ~(FrameReceived | FrameSent | TxUnderrun) */ +# define EnableNormal ~(FrameReceived | FrameSent) +# define EnableErrors (FrameReceived | FrameSent) +# define RxErrorMask (RxFrameCntExp | RxAlignCntExp | RxCRCCntExp | \ + RxLenCntExp | RxOverFlow | RxCodeViolation) +# define TxErrorMask (TxUnderrun | TxMaxSizeError | TxExcessCollExp | \ + TxLateCollExp | TxNetworkCollExp | TxDeferTimerExp) + +/* transmit control */ +#define TXRST 0x420 /* transmit reset */ +# define TxResetBit 0x0001 +#define TXCFG 0x430 /* transmit configuration control*/ +# define TxMACEnable 0x0001 /* output driver enable */ +# define TxSlowMode 0x0020 /* enable slow mode */ +# define TxIgnoreColl 0x0040 /* ignore transmit collisions */ +# define TxNoFCS 0x0080 /* do not emit FCS */ +# define TxNoBackoff 0x0100 /* no backoff in case of collisions */ +# define TxFullDuplex 0x0200 /* enable full-duplex */ +# define TxNeverGiveUp 0x0400 /* don't give up on transmits */ +#define IPG1 0x440 /* Inter-packet gap 1 */ +#define IPG2 0x450 /* Inter-packet gap 2 */ +#define ALIMIT 0x460 /* Transmit attempt limit */ +#define SLOT 0x470 /* Transmit slot time */ +#define PALEN 0x480 /* Size of transmit preamble */ +#define PAPAT 0x490 /* Pattern for transmit preamble */ +#define TXSFD 0x4a0 /* Transmit frame delimiter */ +#define JAM 0x4b0 /* Jam size */ +#define TXMAX 0x4c0 /* Transmit max pkt size */ +#define TXMIN 0x4d0 /* Transmit min pkt size */ +#define PAREG 0x4e0 /* Count of transmit peak attempts */ +#define DCNT 0x4f0 /* Transmit defer timer */ +#define NCCNT 0x500 /* Transmit normal-collision counter */ +#define NTCNT 0x510 /* Transmit first-collision counter */ +#define EXCNT 0x520 /* Transmit excess-collision counter */ +#define LTCNT 0x530 /* Transmit late-collision counter */ +#define RSEED 0x540 /* Transmit random number seed */ +#define TXSM 0x550 /* Transmit state machine */ + +/* receive control */ +#define RXRST 0x620 /* receive reset */ +# define RxResetValue 0x0000 +#define RXCFG 0x630 /* receive configuration control */ +# define RxMACEnable 0x0001 /* receiver overall enable */ +# define RxCFGReserved 0x0004 +# define RxPadStripEnab 0x0020 /* enable pad byte stripping */ +# define RxPromiscEnable 0x0040 /* turn on promiscuous mode */ +# define RxNoErrCheck 0x0080 /* disable receive error checking */ +# define RxCRCNoStrip 0x0100 /* disable auto-CRC-stripping */ +# define RxRejectOwnPackets 0x0200 /* don't receive our own packets */ +# define RxGrpPromisck 0x0400 /* enable group promiscuous mode */ +# define RxHashFilterEnable 0x0800 /* enable hash filter */ +# define RxAddrFilterEnable 0x1000 /* enable address filter */ +#define RXMAX 0x640 /* Max receive packet size */ +#define RXMIN 0x650 /* Min receive packet size */ +#define MADD2 0x660 /* our enet address, high part */ +#define MADD1 0x670 /* our enet address, middle part */ +#define MADD0 0x680 /* our enet address, low part */ +#define FRCNT 0x690 /* receive frame counter */ +#define LECNT 0x6a0 /* Receive excess length error counter */ +#define AECNT 0x6b0 /* Receive misaligned error counter */ +#define FECNT 0x6c0 /* Receive CRC error counter */ +#define RXSM 0x6d0 /* Receive state machine */ +#define RXCV 0x6e0 /* Receive code violation */ + +#define BHASH3 0x700 /* multicast hash register */ +#define BHASH2 0x710 /* multicast hash register */ +#define BHASH1 0x720 /* multicast hash register */ +#define BHASH0 0x730 /* multicast hash register */ + +#define AFR2 0x740 /* address filtering setup? */ +#define AFR1 0x750 /* address filtering setup? */ +#define AFR0 0x760 /* address filtering setup? */ +#define AFCR 0x770 /* address filter compare register? */ +# define EnableAllCompares 0x0fff + +/* bits in XIFC */ diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c new file mode 100644 index 000000000..e58a7c737 --- /dev/null +++ b/drivers/net/ethernet/apple/mace.c @@ -0,0 +1,1028 @@ +/* + * Network device driver for the MACE ethernet controller on + * Apple Powermacs. Assumes it's under a DBDMA controller. + * + * Copyright (C) 1996 Paul Mackerras. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mace.h" + +static int port_aaui = -1; + +#define N_RX_RING 8 +#define N_TX_RING 6 +#define MAX_TX_ACTIVE 1 +#define NCMDS_TX 1 /* dma commands per element in tx ring */ +#define RX_BUFLEN (ETH_FRAME_LEN + 8) +#define TX_TIMEOUT HZ /* 1 second */ + +/* Chip rev needs workaround on HW & multicast addr change */ +#define BROKEN_ADDRCHG_REV 0x0941 + +/* Bits in transmit DMA status */ +#define TX_DMA_ERR 0x80 + +struct mace_data { + volatile struct mace __iomem *mace; + volatile struct dbdma_regs __iomem *tx_dma; + int tx_dma_intr; + volatile struct dbdma_regs __iomem *rx_dma; + int rx_dma_intr; + volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */ + volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */ + struct sk_buff *rx_bufs[N_RX_RING]; + int rx_fill; + int rx_empty; + struct sk_buff *tx_bufs[N_TX_RING]; + int tx_fill; + int tx_empty; + unsigned char maccc; + unsigned char tx_fullup; + unsigned char tx_active; + unsigned char tx_bad_runt; + struct timer_list tx_timeout; + int timeout_active; + int port_aaui; + int chipid; + struct macio_dev *mdev; + spinlock_t lock; +}; + +/* + * Number of bytes of private data per MACE: allow enough for + * the rx and tx dma commands plus a branch dma command each, + * and another 16 bytes to allow us to align the dma command + * buffers on a 16 byte boundary. + */ +#define PRIV_BYTES (sizeof(struct mace_data) \ + + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd)) + +static int mace_open(struct net_device *dev); +static int mace_close(struct net_device *dev); +static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); +static void mace_set_multicast(struct net_device *dev); +static void mace_reset(struct net_device *dev); +static int mace_set_address(struct net_device *dev, void *addr); +static irqreturn_t mace_interrupt(int irq, void *dev_id); +static irqreturn_t mace_txdma_intr(int irq, void *dev_id); +static irqreturn_t mace_rxdma_intr(int irq, void *dev_id); +static void mace_set_timeout(struct net_device *dev); +static void mace_tx_timeout(unsigned long data); +static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma); +static inline void mace_clean_rings(struct mace_data *mp); +static void __mace_set_address(struct net_device *dev, void *addr); + +/* + * If we can't get a skbuff when we need it, we use this area for DMA. + */ +static unsigned char *dummy_buf; + +static const struct net_device_ops mace_netdev_ops = { + .ndo_open = mace_open, + .ndo_stop = mace_close, + .ndo_start_xmit = mace_xmit_start, + .ndo_set_rx_mode = mace_set_multicast, + .ndo_set_mac_address = mace_set_address, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + +static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match) +{ + struct device_node *mace = macio_get_of_node(mdev); + struct net_device *dev; + struct mace_data *mp; + const unsigned char *addr; + int j, rev, rc = -EBUSY; + + if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { + printk(KERN_ERR "can't use MACE %s: need 3 addrs and 3 irqs\n", + mace->full_name); + return -ENODEV; + } + + addr = of_get_property(mace, "mac-address", NULL); + if (addr == NULL) { + addr = of_get_property(mace, "local-mac-address", NULL); + if (addr == NULL) { + printk(KERN_ERR "Can't get mac-address for MACE %s\n", + mace->full_name); + return -ENODEV; + } + } + + /* + * lazy allocate the driver-wide dummy buffer. (Note that we + * never have more than one MACE in the system anyway) + */ + if (dummy_buf == NULL) { + dummy_buf = kmalloc(RX_BUFLEN+2, GFP_KERNEL); + if (dummy_buf == NULL) + return -ENOMEM; + } + + if (macio_request_resources(mdev, "mace")) { + printk(KERN_ERR "MACE: can't request IO resources !\n"); + return -EBUSY; + } + + dev = alloc_etherdev(PRIV_BYTES); + if (!dev) { + rc = -ENOMEM; + goto err_release; + } + SET_NETDEV_DEV(dev, &mdev->ofdev.dev); + + mp = netdev_priv(dev); + mp->mdev = mdev; + macio_set_drvdata(mdev, dev); + + dev->base_addr = macio_resource_start(mdev, 0); + mp->mace = ioremap(dev->base_addr, 0x1000); + if (mp->mace == NULL) { + printk(KERN_ERR "MACE: can't map IO resources !\n"); + rc = -ENOMEM; + goto err_free; + } + dev->irq = macio_irq(mdev, 0); + + rev = addr[0] == 0 && addr[1] == 0xA0; + for (j = 0; j < 6; ++j) { + dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j]; + } + mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) | + in_8(&mp->mace->chipid_lo); + + + mp = netdev_priv(dev); + mp->maccc = ENXMT | ENRCV; + + mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000); + if (mp->tx_dma == NULL) { + printk(KERN_ERR "MACE: can't map TX DMA resources !\n"); + rc = -ENOMEM; + goto err_unmap_io; + } + mp->tx_dma_intr = macio_irq(mdev, 1); + + mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000); + if (mp->rx_dma == NULL) { + printk(KERN_ERR "MACE: can't map RX DMA resources !\n"); + rc = -ENOMEM; + goto err_unmap_tx_dma; + } + mp->rx_dma_intr = macio_irq(mdev, 2); + + mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1); + mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1; + + memset((char *) mp->tx_cmds, 0, + (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd)); + init_timer(&mp->tx_timeout); + spin_lock_init(&mp->lock); + mp->timeout_active = 0; + + if (port_aaui >= 0) + mp->port_aaui = port_aaui; + else { + /* Apple Network Server uses the AAUI port */ + if (of_machine_is_compatible("AAPL,ShinerESB")) + mp->port_aaui = 1; + else { +#ifdef CONFIG_MACE_AAUI_PORT + mp->port_aaui = 1; +#else + mp->port_aaui = 0; +#endif + } + } + + dev->netdev_ops = &mace_netdev_ops; + + /* + * Most of what is below could be moved to mace_open() + */ + mace_reset(dev); + + rc = request_irq(dev->irq, mace_interrupt, 0, "MACE", dev); + if (rc) { + printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq); + goto err_unmap_rx_dma; + } + rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev); + if (rc) { + printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr); + goto err_free_irq; + } + rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev); + if (rc) { + printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr); + goto err_free_tx_irq; + } + + rc = register_netdev(dev); + if (rc) { + printk(KERN_ERR "MACE: Cannot register net device, aborting.\n"); + goto err_free_rx_irq; + } + + printk(KERN_INFO "%s: MACE at %pM, chip revision %d.%d\n", + dev->name, dev->dev_addr, + mp->chipid >> 8, mp->chipid & 0xff); + + return 0; + + err_free_rx_irq: + free_irq(macio_irq(mdev, 2), dev); + err_free_tx_irq: + free_irq(macio_irq(mdev, 1), dev); + err_free_irq: + free_irq(macio_irq(mdev, 0), dev); + err_unmap_rx_dma: + iounmap(mp->rx_dma); + err_unmap_tx_dma: + iounmap(mp->tx_dma); + err_unmap_io: + iounmap(mp->mace); + err_free: + free_netdev(dev); + err_release: + macio_release_resources(mdev); + + return rc; +} + +static int mace_remove(struct macio_dev *mdev) +{ + struct net_device *dev = macio_get_drvdata(mdev); + struct mace_data *mp; + + BUG_ON(dev == NULL); + + macio_set_drvdata(mdev, NULL); + + mp = netdev_priv(dev); + + unregister_netdev(dev); + + free_irq(dev->irq, dev); + free_irq(mp->tx_dma_intr, dev); + free_irq(mp->rx_dma_intr, dev); + + iounmap(mp->rx_dma); + iounmap(mp->tx_dma); + iounmap(mp->mace); + + free_netdev(dev); + + macio_release_resources(mdev); + + return 0; +} + +static void dbdma_reset(volatile struct dbdma_regs __iomem *dma) +{ + int i; + + out_le32(&dma->control, (WAKE|FLUSH|PAUSE|RUN) << 16); + + /* + * Yes this looks peculiar, but apparently it needs to be this + * way on some machines. + */ + for (i = 200; i > 0; --i) + if (le32_to_cpu(dma->control) & RUN) + udelay(1); +} + +static void mace_reset(struct net_device *dev) +{ + struct mace_data *mp = netdev_priv(dev); + volatile struct mace __iomem *mb = mp->mace; + int i; + + /* soft-reset the chip */ + i = 200; + while (--i) { + out_8(&mb->biucc, SWRST); + if (in_8(&mb->biucc) & SWRST) { + udelay(10); + continue; + } + break; + } + if (!i) { + printk(KERN_ERR "mace: cannot reset chip!\n"); + return; + } + + out_8(&mb->imr, 0xff); /* disable all intrs for now */ + i = in_8(&mb->ir); + out_8(&mb->maccc, 0); /* turn off tx, rx */ + + out_8(&mb->biucc, XMTSP_64); + out_8(&mb->utr, RTRD); + out_8(&mb->fifocc, RCVFW_32 | XMTFW_16 | XMTFWU | RCVFWU | XMTBRST); + out_8(&mb->xmtfc, AUTO_PAD_XMIT); /* auto-pad short frames */ + out_8(&mb->rcvfc, 0); + + /* load up the hardware address */ + __mace_set_address(dev, dev->dev_addr); + + /* clear the multicast filter */ + if (mp->chipid == BROKEN_ADDRCHG_REV) + out_8(&mb->iac, LOGADDR); + else { + out_8(&mb->iac, ADDRCHG | LOGADDR); + while ((in_8(&mb->iac) & ADDRCHG) != 0) + ; + } + for (i = 0; i < 8; ++i) + out_8(&mb->ladrf, 0); + + /* done changing address */ + if (mp->chipid != BROKEN_ADDRCHG_REV) + out_8(&mb->iac, 0); + + if (mp->port_aaui) + out_8(&mb->plscc, PORTSEL_AUI + ENPLSIO); + else + out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO); +} + +static void __mace_set_address(struct net_device *dev, void *addr) +{ + struct mace_data *mp = netdev_priv(dev); + volatile struct mace __iomem *mb = mp->mace; + unsigned char *p = addr; + int i; + + /* load up the hardware address */ + if (mp->chipid == BROKEN_ADDRCHG_REV) + out_8(&mb->iac, PHYADDR); + else { + out_8(&mb->iac, ADDRCHG | PHYADDR); + while ((in_8(&mb->iac) & ADDRCHG) != 0) + ; + } + for (i = 0; i < 6; ++i) + out_8(&mb->padr, dev->dev_addr[i] = p[i]); + if (mp->chipid != BROKEN_ADDRCHG_REV) + out_8(&mb->iac, 0); +} + +static int mace_set_address(struct net_device *dev, void *addr) +{ + struct mace_data *mp = netdev_priv(dev); + volatile struct mace __iomem *mb = mp->mace; + unsigned long flags; + + spin_lock_irqsave(&mp->lock, flags); + + __mace_set_address(dev, addr); + + /* note: setting ADDRCHG clears ENRCV */ + out_8(&mb->maccc, mp->maccc); + + spin_unlock_irqrestore(&mp->lock, flags); + return 0; +} + +static inline void mace_clean_rings(struct mace_data *mp) +{ + int i; + + /* free some skb's */ + for (i = 0; i < N_RX_RING; ++i) { + if (mp->rx_bufs[i] != NULL) { + dev_kfree_skb(mp->rx_bufs[i]); + mp->rx_bufs[i] = NULL; + } + } + for (i = mp->tx_empty; i != mp->tx_fill; ) { + dev_kfree_skb(mp->tx_bufs[i]); + if (++i >= N_TX_RING) + i = 0; + } +} + +static int mace_open(struct net_device *dev) +{ + struct mace_data *mp = netdev_priv(dev); + volatile struct mace __iomem *mb = mp->mace; + volatile struct dbdma_regs __iomem *rd = mp->rx_dma; + volatile struct dbdma_regs __iomem *td = mp->tx_dma; + volatile struct dbdma_cmd *cp; + int i; + struct sk_buff *skb; + unsigned char *data; + + /* reset the chip */ + mace_reset(dev); + + /* initialize list of sk_buffs for receiving and set up recv dma */ + mace_clean_rings(mp); + memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd)); + cp = mp->rx_cmds; + for (i = 0; i < N_RX_RING - 1; ++i) { + skb = netdev_alloc_skb(dev, RX_BUFLEN + 2); + if (!skb) { + data = dummy_buf; + } else { + skb_reserve(skb, 2); /* so IP header lands on 4-byte bdry */ + data = skb->data; + } + mp->rx_bufs[i] = skb; + cp->req_count = cpu_to_le16(RX_BUFLEN); + cp->command = cpu_to_le16(INPUT_LAST + INTR_ALWAYS); + cp->phy_addr = cpu_to_le32(virt_to_bus(data)); + cp->xfer_status = 0; + ++cp; + } + mp->rx_bufs[i] = NULL; + cp->command = cpu_to_le16(DBDMA_STOP); + mp->rx_fill = i; + mp->rx_empty = 0; + + /* Put a branch back to the beginning of the receive command list */ + ++cp; + cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS); + cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->rx_cmds)); + + /* start rx dma */ + out_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ + out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds)); + out_le32(&rd->control, (RUN << 16) | RUN); + + /* put a branch at the end of the tx command list */ + cp = mp->tx_cmds + NCMDS_TX * N_TX_RING; + cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS); + cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->tx_cmds)); + + /* reset tx dma */ + out_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); + out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds)); + mp->tx_fill = 0; + mp->tx_empty = 0; + mp->tx_fullup = 0; + mp->tx_active = 0; + mp->tx_bad_runt = 0; + + /* turn it on! */ + out_8(&mb->maccc, mp->maccc); + /* enable all interrupts except receive interrupts */ + out_8(&mb->imr, RCVINT); + + return 0; +} + +static int mace_close(struct net_device *dev) +{ + struct mace_data *mp = netdev_priv(dev); + volatile struct mace __iomem *mb = mp->mace; + volatile struct dbdma_regs __iomem *rd = mp->rx_dma; + volatile struct dbdma_regs __iomem *td = mp->tx_dma; + + /* disable rx and tx */ + out_8(&mb->maccc, 0); + out_8(&mb->imr, 0xff); /* disable all intrs */ + + /* disable rx and tx dma */ + rd->control = cpu_to_le32((RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ + td->control = cpu_to_le32((RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ + + mace_clean_rings(mp); + + return 0; +} + +static inline void mace_set_timeout(struct net_device *dev) +{ + struct mace_data *mp = netdev_priv(dev); + + if (mp->timeout_active) + del_timer(&mp->tx_timeout); + mp->tx_timeout.expires = jiffies + TX_TIMEOUT; + mp->tx_timeout.function = mace_tx_timeout; + mp->tx_timeout.data = (unsigned long) dev; + add_timer(&mp->tx_timeout); + mp->timeout_active = 1; +} + +static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) +{ + struct mace_data *mp = netdev_priv(dev); + volatile struct dbdma_regs __iomem *td = mp->tx_dma; + volatile struct dbdma_cmd *cp, *np; + unsigned long flags; + int fill, next, len; + + /* see if there's a free slot in the tx ring */ + spin_lock_irqsave(&mp->lock, flags); + fill = mp->tx_fill; + next = fill + 1; + if (next >= N_TX_RING) + next = 0; + if (next == mp->tx_empty) { + netif_stop_queue(dev); + mp->tx_fullup = 1; + spin_unlock_irqrestore(&mp->lock, flags); + return NETDEV_TX_BUSY; /* can't take it at the moment */ + } + spin_unlock_irqrestore(&mp->lock, flags); + + /* partially fill in the dma command block */ + len = skb->len; + if (len > ETH_FRAME_LEN) { + printk(KERN_DEBUG "mace: xmit frame too long (%d)\n", len); + len = ETH_FRAME_LEN; + } + mp->tx_bufs[fill] = skb; + cp = mp->tx_cmds + NCMDS_TX * fill; + cp->req_count = cpu_to_le16(len); + cp->phy_addr = cpu_to_le32(virt_to_bus(skb->data)); + + np = mp->tx_cmds + NCMDS_TX * next; + out_le16(&np->command, DBDMA_STOP); + + /* poke the tx dma channel */ + spin_lock_irqsave(&mp->lock, flags); + mp->tx_fill = next; + if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) { + out_le16(&cp->xfer_status, 0); + out_le16(&cp->command, OUTPUT_LAST); + out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); + ++mp->tx_active; + mace_set_timeout(dev); + } + if (++next >= N_TX_RING) + next = 0; + if (next == mp->tx_empty) + netif_stop_queue(dev); + spin_unlock_irqrestore(&mp->lock, flags); + + return NETDEV_TX_OK; +} + +static void mace_set_multicast(struct net_device *dev) +{ + struct mace_data *mp = netdev_priv(dev); + volatile struct mace __iomem *mb = mp->mace; + int i; + u32 crc; + unsigned long flags; + + spin_lock_irqsave(&mp->lock, flags); + mp->maccc &= ~PROM; + if (dev->flags & IFF_PROMISC) { + mp->maccc |= PROM; + } else { + unsigned char multicast_filter[8]; + struct netdev_hw_addr *ha; + + if (dev->flags & IFF_ALLMULTI) { + for (i = 0; i < 8; i++) + multicast_filter[i] = 0xff; + } else { + for (i = 0; i < 8; i++) + multicast_filter[i] = 0; + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr); + i = crc >> 26; /* bit number in multicast_filter */ + multicast_filter[i >> 3] |= 1 << (i & 7); + } + } +#if 0 + printk("Multicast filter :"); + for (i = 0; i < 8; i++) + printk("%02x ", multicast_filter[i]); + printk("\n"); +#endif + + if (mp->chipid == BROKEN_ADDRCHG_REV) + out_8(&mb->iac, LOGADDR); + else { + out_8(&mb->iac, ADDRCHG | LOGADDR); + while ((in_8(&mb->iac) & ADDRCHG) != 0) + ; + } + for (i = 0; i < 8; ++i) + out_8(&mb->ladrf, multicast_filter[i]); + if (mp->chipid != BROKEN_ADDRCHG_REV) + out_8(&mb->iac, 0); + } + /* reset maccc */ + out_8(&mb->maccc, mp->maccc); + spin_unlock_irqrestore(&mp->lock, flags); +} + +static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev) +{ + volatile struct mace __iomem *mb = mp->mace; + static int mace_babbles, mace_jabbers; + + if (intr & MPCO) + dev->stats.rx_missed_errors += 256; + dev->stats.rx_missed_errors += in_8(&mb->mpc); /* reading clears it */ + if (intr & RNTPCO) + dev->stats.rx_length_errors += 256; + dev->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */ + if (intr & CERR) + ++dev->stats.tx_heartbeat_errors; + if (intr & BABBLE) + if (mace_babbles++ < 4) + printk(KERN_DEBUG "mace: babbling transmitter\n"); + if (intr & JABBER) + if (mace_jabbers++ < 4) + printk(KERN_DEBUG "mace: jabbering transceiver\n"); +} + +static irqreturn_t mace_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct mace_data *mp = netdev_priv(dev); + volatile struct mace __iomem *mb = mp->mace; + volatile struct dbdma_regs __iomem *td = mp->tx_dma; + volatile struct dbdma_cmd *cp; + int intr, fs, i, stat, x; + int xcount, dstat; + unsigned long flags; + /* static int mace_last_fs, mace_last_xcount; */ + + spin_lock_irqsave(&mp->lock, flags); + intr = in_8(&mb->ir); /* read interrupt register */ + in_8(&mb->xmtrc); /* get retries */ + mace_handle_misc_intrs(mp, intr, dev); + + i = mp->tx_empty; + while (in_8(&mb->pr) & XMTSV) { + del_timer(&mp->tx_timeout); + mp->timeout_active = 0; + /* + * Clear any interrupt indication associated with this status + * word. This appears to unlatch any error indication from + * the DMA controller. + */ + intr = in_8(&mb->ir); + if (intr != 0) + mace_handle_misc_intrs(mp, intr, dev); + if (mp->tx_bad_runt) { + fs = in_8(&mb->xmtfs); + mp->tx_bad_runt = 0; + out_8(&mb->xmtfc, AUTO_PAD_XMIT); + continue; + } + dstat = le32_to_cpu(td->status); + /* stop DMA controller */ + out_le32(&td->control, RUN << 16); + /* + * xcount is the number of complete frames which have been + * written to the fifo but for which status has not been read. + */ + xcount = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK; + if (xcount == 0 || (dstat & DEAD)) { + /* + * If a packet was aborted before the DMA controller has + * finished transferring it, it seems that there are 2 bytes + * which are stuck in some buffer somewhere. These will get + * transmitted as soon as we read the frame status (which + * reenables the transmit data transfer request). Turning + * off the DMA controller and/or resetting the MACE doesn't + * help. So we disable auto-padding and FCS transmission + * so the two bytes will only be a runt packet which should + * be ignored by other stations. + */ + out_8(&mb->xmtfc, DXMTFCS); + } + fs = in_8(&mb->xmtfs); + if ((fs & XMTSV) == 0) { + printk(KERN_ERR "mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n", + fs, xcount, dstat); + mace_reset(dev); + /* + * XXX mace likes to hang the machine after a xmtfs error. + * This is hard to reproduce, resetting *may* help + */ + } + cp = mp->tx_cmds + NCMDS_TX * i; + stat = le16_to_cpu(cp->xfer_status); + if ((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount == 0) { + /* + * Check whether there were in fact 2 bytes written to + * the transmit FIFO. + */ + udelay(1); + x = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK; + if (x != 0) { + /* there were two bytes with an end-of-packet indication */ + mp->tx_bad_runt = 1; + mace_set_timeout(dev); + } else { + /* + * Either there weren't the two bytes buffered up, or they + * didn't have an end-of-packet indication. + * We flush the transmit FIFO just in case (by setting the + * XMTFWU bit with the transmitter disabled). + */ + out_8(&mb->maccc, in_8(&mb->maccc) & ~ENXMT); + out_8(&mb->fifocc, in_8(&mb->fifocc) | XMTFWU); + udelay(1); + out_8(&mb->maccc, in_8(&mb->maccc) | ENXMT); + out_8(&mb->xmtfc, AUTO_PAD_XMIT); + } + } + /* dma should have finished */ + if (i == mp->tx_fill) { + printk(KERN_DEBUG "mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n", + fs, xcount, dstat); + continue; + } + /* Update stats */ + if (fs & (UFLO|LCOL|LCAR|RTRY)) { + ++dev->stats.tx_errors; + if (fs & LCAR) + ++dev->stats.tx_carrier_errors; + if (fs & (UFLO|LCOL|RTRY)) + ++dev->stats.tx_aborted_errors; + } else { + dev->stats.tx_bytes += mp->tx_bufs[i]->len; + ++dev->stats.tx_packets; + } + dev_kfree_skb_irq(mp->tx_bufs[i]); + --mp->tx_active; + if (++i >= N_TX_RING) + i = 0; +#if 0 + mace_last_fs = fs; + mace_last_xcount = xcount; +#endif + } + + if (i != mp->tx_empty) { + mp->tx_fullup = 0; + netif_wake_queue(dev); + } + mp->tx_empty = i; + i += mp->tx_active; + if (i >= N_TX_RING) + i -= N_TX_RING; + if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) { + do { + /* set up the next one */ + cp = mp->tx_cmds + NCMDS_TX * i; + out_le16(&cp->xfer_status, 0); + out_le16(&cp->command, OUTPUT_LAST); + ++mp->tx_active; + if (++i >= N_TX_RING) + i = 0; + } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE); + out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); + mace_set_timeout(dev); + } + spin_unlock_irqrestore(&mp->lock, flags); + return IRQ_HANDLED; +} + +static void mace_tx_timeout(unsigned long data) +{ + struct net_device *dev = (struct net_device *) data; + struct mace_data *mp = netdev_priv(dev); + volatile struct mace __iomem *mb = mp->mace; + volatile struct dbdma_regs __iomem *td = mp->tx_dma; + volatile struct dbdma_regs __iomem *rd = mp->rx_dma; + volatile struct dbdma_cmd *cp; + unsigned long flags; + int i; + + spin_lock_irqsave(&mp->lock, flags); + mp->timeout_active = 0; + if (mp->tx_active == 0 && !mp->tx_bad_runt) + goto out; + + /* update various counters */ + mace_handle_misc_intrs(mp, in_8(&mb->ir), dev); + + cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty; + + /* turn off both tx and rx and reset the chip */ + out_8(&mb->maccc, 0); + printk(KERN_ERR "mace: transmit timeout - resetting\n"); + dbdma_reset(td); + mace_reset(dev); + + /* restart rx dma */ + cp = bus_to_virt(le32_to_cpu(rd->cmdptr)); + dbdma_reset(rd); + out_le16(&cp->xfer_status, 0); + out_le32(&rd->cmdptr, virt_to_bus(cp)); + out_le32(&rd->control, (RUN << 16) | RUN); + + /* fix up the transmit side */ + i = mp->tx_empty; + mp->tx_active = 0; + ++dev->stats.tx_errors; + if (mp->tx_bad_runt) { + mp->tx_bad_runt = 0; + } else if (i != mp->tx_fill) { + dev_kfree_skb(mp->tx_bufs[i]); + if (++i >= N_TX_RING) + i = 0; + mp->tx_empty = i; + } + mp->tx_fullup = 0; + netif_wake_queue(dev); + if (i != mp->tx_fill) { + cp = mp->tx_cmds + NCMDS_TX * i; + out_le16(&cp->xfer_status, 0); + out_le16(&cp->command, OUTPUT_LAST); + out_le32(&td->cmdptr, virt_to_bus(cp)); + out_le32(&td->control, (RUN << 16) | RUN); + ++mp->tx_active; + mace_set_timeout(dev); + } + + /* turn it back on */ + out_8(&mb->imr, RCVINT); + out_8(&mb->maccc, mp->maccc); + +out: + spin_unlock_irqrestore(&mp->lock, flags); +} + +static irqreturn_t mace_txdma_intr(int irq, void *dev_id) +{ + return IRQ_HANDLED; +} + +static irqreturn_t mace_rxdma_intr(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct mace_data *mp = netdev_priv(dev); + volatile struct dbdma_regs __iomem *rd = mp->rx_dma; + volatile struct dbdma_cmd *cp, *np; + int i, nb, stat, next; + struct sk_buff *skb; + unsigned frame_status; + static int mace_lost_status; + unsigned char *data; + unsigned long flags; + + spin_lock_irqsave(&mp->lock, flags); + for (i = mp->rx_empty; i != mp->rx_fill; ) { + cp = mp->rx_cmds + i; + stat = le16_to_cpu(cp->xfer_status); + if ((stat & ACTIVE) == 0) { + next = i + 1; + if (next >= N_RX_RING) + next = 0; + np = mp->rx_cmds + next; + if (next != mp->rx_fill && + (le16_to_cpu(np->xfer_status) & ACTIVE) != 0) { + printk(KERN_DEBUG "mace: lost a status word\n"); + ++mace_lost_status; + } else + break; + } + nb = le16_to_cpu(cp->req_count) - le16_to_cpu(cp->res_count); + out_le16(&cp->command, DBDMA_STOP); + /* got a packet, have a look at it */ + skb = mp->rx_bufs[i]; + if (!skb) { + ++dev->stats.rx_dropped; + } else if (nb > 8) { + data = skb->data; + frame_status = (data[nb-3] << 8) + data[nb-4]; + if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) { + ++dev->stats.rx_errors; + if (frame_status & RS_OFLO) + ++dev->stats.rx_over_errors; + if (frame_status & RS_FRAMERR) + ++dev->stats.rx_frame_errors; + if (frame_status & RS_FCSERR) + ++dev->stats.rx_crc_errors; + } else { + /* Mace feature AUTO_STRIP_RCV is on by default, dropping the + * FCS on frames with 802.3 headers. This means that Ethernet + * frames have 8 extra octets at the end, while 802.3 frames + * have only 4. We need to correctly account for this. */ + if (*(unsigned short *)(data+12) < 1536) /* 802.3 header */ + nb -= 4; + else /* Ethernet header; mace includes FCS */ + nb -= 8; + skb_put(skb, nb); + skb->protocol = eth_type_trans(skb, dev); + dev->stats.rx_bytes += skb->len; + netif_rx(skb); + mp->rx_bufs[i] = NULL; + ++dev->stats.rx_packets; + } + } else { + ++dev->stats.rx_errors; + ++dev->stats.rx_length_errors; + } + + /* advance to next */ + if (++i >= N_RX_RING) + i = 0; + } + mp->rx_empty = i; + + i = mp->rx_fill; + for (;;) { + next = i + 1; + if (next >= N_RX_RING) + next = 0; + if (next == mp->rx_empty) + break; + cp = mp->rx_cmds + i; + skb = mp->rx_bufs[i]; + if (!skb) { + skb = netdev_alloc_skb(dev, RX_BUFLEN + 2); + if (skb) { + skb_reserve(skb, 2); + mp->rx_bufs[i] = skb; + } + } + cp->req_count = cpu_to_le16(RX_BUFLEN); + data = skb? skb->data: dummy_buf; + cp->phy_addr = cpu_to_le32(virt_to_bus(data)); + out_le16(&cp->xfer_status, 0); + out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS); +#if 0 + if ((le32_to_cpu(rd->status) & ACTIVE) != 0) { + out_le32(&rd->control, (PAUSE << 16) | PAUSE); + while ((in_le32(&rd->status) & ACTIVE) != 0) + ; + } +#endif + i = next; + } + if (i != mp->rx_fill) { + out_le32(&rd->control, ((RUN|WAKE) << 16) | (RUN|WAKE)); + mp->rx_fill = i; + } + spin_unlock_irqrestore(&mp->lock, flags); + return IRQ_HANDLED; +} + +static const struct of_device_id mace_match[] = +{ + { + .name = "mace", + }, + {}, +}; +MODULE_DEVICE_TABLE (of, mace_match); + +static struct macio_driver mace_driver = +{ + .driver = { + .name = "mace", + .owner = THIS_MODULE, + .of_match_table = mace_match, + }, + .probe = mace_probe, + .remove = mace_remove, +}; + + +static int __init mace_init(void) +{ + return macio_register_driver(&mace_driver); +} + +static void __exit mace_cleanup(void) +{ + macio_unregister_driver(&mace_driver); + + kfree(dummy_buf); + dummy_buf = NULL; +} + +MODULE_AUTHOR("Paul Mackerras"); +MODULE_DESCRIPTION("PowerMac MACE driver."); +module_param(port_aaui, int, 0); +MODULE_PARM_DESC(port_aaui, "MACE uses AAUI port (0-1)"); +MODULE_LICENSE("GPL"); + +module_init(mace_init); +module_exit(mace_cleanup); diff --git a/drivers/net/ethernet/apple/mace.h b/drivers/net/ethernet/apple/mace.h new file mode 100644 index 000000000..30b7ec0ce --- /dev/null +++ b/drivers/net/ethernet/apple/mace.h @@ -0,0 +1,173 @@ +/* + * mace.h - definitions for the registers in the Am79C940 MACE + * (Medium Access Control for Ethernet) controller. + * + * Copyright (C) 1996 Paul Mackerras. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define REG(x) volatile unsigned char x; char x ## _pad[15] + +struct mace { + REG(rcvfifo); /* receive FIFO */ + REG(xmtfifo); /* transmit FIFO */ + REG(xmtfc); /* transmit frame control */ + REG(xmtfs); /* transmit frame status */ + REG(xmtrc); /* transmit retry count */ + REG(rcvfc); /* receive frame control */ + REG(rcvfs); /* receive frame status (4 bytes) */ + REG(fifofc); /* FIFO frame count */ + REG(ir); /* interrupt register */ + REG(imr); /* interrupt mask register */ + REG(pr); /* poll register */ + REG(biucc); /* bus interface unit config control */ + REG(fifocc); /* FIFO configuration control */ + REG(maccc); /* medium access control config control */ + REG(plscc); /* phys layer signalling config control */ + REG(phycc); /* physical configuration control */ + REG(chipid_lo); /* chip ID, lsb */ + REG(chipid_hi); /* chip ID, msb */ + REG(iac); /* internal address config */ + REG(reg19); + REG(ladrf); /* logical address filter (8 bytes) */ + REG(padr); /* physical address (6 bytes) */ + REG(reg22); + REG(reg23); + REG(mpc); /* missed packet count (clears when read) */ + REG(reg25); + REG(rntpc); /* runt packet count (clears when read) */ + REG(rcvcc); /* recv collision count (clears when read) */ + REG(reg28); + REG(utr); /* user test reg */ + REG(reg30); + REG(reg31); +}; + +/* Bits in XMTFC */ +#define DRTRY 0x80 /* don't retry transmission after collision */ +#define DXMTFCS 0x08 /* don't append FCS to transmitted frame */ +#define AUTO_PAD_XMIT 0x01 /* auto-pad short packets on transmission */ + +/* Bits in XMTFS: only valid when XMTSV is set in PR and XMTFS */ +#define XMTSV 0x80 /* transmit status (i.e. XMTFS) valid */ +#define UFLO 0x40 /* underflow - xmit fifo ran dry */ +#define LCOL 0x20 /* late collision (transmission aborted) */ +#define MORE 0x10 /* 2 or more retries needed to xmit frame */ +#define ONE 0x08 /* 1 retry needed to xmit frame */ +#define DEFER 0x04 /* MACE had to defer xmission (enet busy) */ +#define LCAR 0x02 /* loss of carrier (transmission aborted) */ +#define RTRY 0x01 /* too many retries (transmission aborted) */ + +/* Bits in XMTRC: only valid when XMTSV is set in PR (and XMTFS) */ +#define EXDEF 0x80 /* had to defer for excessive time */ +#define RETRY_MASK 0x0f /* number of retries (0 - 15) */ + +/* Bits in RCVFC */ +#define LLRCV 0x08 /* low latency receive: early DMA request */ +#define M_RBAR 0x04 /* sets function of EAM/R pin */ +#define AUTO_STRIP_RCV 0x01 /* auto-strip short LLC frames on recv */ + +/* + * Bits in RCVFS. After a frame is received, four bytes of status + * are automatically read from this register and appended to the frame + * data in memory. These are: + * Byte 0 and 1: message byte count and frame status + * Byte 2: runt packet count + * Byte 3: receive collision count + */ +#define RS_OFLO 0x8000 /* receive FIFO overflowed */ +#define RS_CLSN 0x4000 /* received frame suffered (late) collision */ +#define RS_FRAMERR 0x2000 /* framing error flag */ +#define RS_FCSERR 0x1000 /* frame had FCS error */ +#define RS_COUNT 0x0fff /* mask for byte count field */ + +/* Bits (fields) in FIFOFC */ +#define RCVFC_SH 4 /* receive frame count in FIFO */ +#define RCVFC_MASK 0x0f +#define XMTFC_SH 0 /* transmit frame count in FIFO */ +#define XMTFC_MASK 0x0f + +/* + * Bits in IR and IMR. The IR clears itself when read. + * Setting a bit in the IMR will disable the corresponding interrupt. + */ +#define JABBER 0x80 /* jabber error - 10baseT xmission too long */ +#define BABBLE 0x40 /* babble - xmitter xmitting for too long */ +#define CERR 0x20 /* collision err - no SQE test (heartbeat) */ +#define RCVCCO 0x10 /* RCVCC overflow */ +#define RNTPCO 0x08 /* RNTPC overflow */ +#define MPCO 0x04 /* MPC overflow */ +#define RCVINT 0x02 /* receive interrupt */ +#define XMTINT 0x01 /* transmitter interrupt */ + +/* Bits in PR */ +#define XMTSV 0x80 /* XMTFS valid (same as in XMTFS) */ +#define TDTREQ 0x40 /* set when xmit fifo is requesting data */ +#define RDTREQ 0x20 /* set when recv fifo requests data xfer */ + +/* Bits in BIUCC */ +#define BSWP 0x40 /* byte swap, i.e. big-endian bus */ +#define XMTSP_4 0x00 /* start xmitting when 4 bytes in FIFO */ +#define XMTSP_16 0x10 /* start xmitting when 16 bytes in FIFO */ +#define XMTSP_64 0x20 /* start xmitting when 64 bytes in FIFO */ +#define XMTSP_112 0x30 /* start xmitting when 112 bytes in FIFO */ +#define SWRST 0x01 /* software reset */ + +/* Bits in FIFOCC */ +#define XMTFW_8 0x00 /* xmit fifo watermark = 8 words free */ +#define XMTFW_16 0x40 /* 16 words free */ +#define XMTFW_32 0x80 /* 32 words free */ +#define RCVFW_16 0x00 /* recv fifo watermark = 16 bytes avail */ +#define RCVFW_32 0x10 /* 32 bytes avail */ +#define RCVFW_64 0x20 /* 64 bytes avail */ +#define XMTFWU 0x08 /* xmit fifo watermark update enable */ +#define RCVFWU 0x04 /* recv fifo watermark update enable */ +#define XMTBRST 0x02 /* enable transmit burst mode */ +#define RCVBRST 0x01 /* enable receive burst mode */ + +/* Bits in MACCC */ +#define PROM 0x80 /* promiscuous mode */ +#define DXMT2PD 0x40 /* disable xmit two-part deferral algorithm */ +#define EMBA 0x20 /* enable modified backoff algorithm */ +#define DRCVPA 0x08 /* disable receiving physical address */ +#define DRCVBC 0x04 /* disable receiving broadcasts */ +#define ENXMT 0x02 /* enable transmitter */ +#define ENRCV 0x01 /* enable receiver */ + +/* Bits in PLSCC */ +#define XMTSEL 0x08 /* select DO+/DO- state when idle */ +#define PORTSEL_AUI 0x00 /* select AUI port */ +#define PORTSEL_10T 0x02 /* select 10Base-T port */ +#define PORTSEL_DAI 0x04 /* select DAI port */ +#define PORTSEL_GPSI 0x06 /* select GPSI port */ +#define ENPLSIO 0x01 /* enable optional PLS I/O pins */ + +/* Bits in PHYCC */ +#define LNKFL 0x80 /* reports 10Base-T link failure */ +#define DLNKTST 0x40 /* disable 10Base-T link test */ +#define REVPOL 0x20 /* 10Base-T receiver polarity reversed */ +#define DAPC 0x10 /* disable auto receiver polarity correction */ +#define LRT 0x08 /* low receive threshold for long links */ +#define ASEL 0x04 /* auto-select AUI or 10Base-T port */ +#define RWAKE 0x02 /* remote wake function */ +#define AWAKE 0x01 /* auto wake function */ + +/* Bits in IAC */ +#define ADDRCHG 0x80 /* request address change */ +#define PHYADDR 0x04 /* access physical address */ +#define LOGADDR 0x02 /* access multicast filter */ + +/* Bits in UTR */ +#define RTRE 0x80 /* reserved test register enable. DON'T SET. */ +#define RTRD 0x40 /* reserved test register disable. Sticky */ +#define RPAC 0x20 /* accept runt packets */ +#define FCOLL 0x10 /* force collision */ +#define RCVFCSE 0x08 /* receive FCS enable */ +#define LOOP_NONE 0x00 /* no loopback */ +#define LOOP_EXT 0x02 /* external loopback */ +#define LOOP_INT 0x04 /* internal loopback, excludes MENDEC */ +#define LOOP_MENDEC 0x06 /* internal loopback, includes MENDEC */ diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c new file mode 100644 index 000000000..89914ca17 --- /dev/null +++ b/drivers/net/ethernet/apple/macmace.c @@ -0,0 +1,788 @@ +/* + * Driver for the Macintosh 68K onboard MACE controller with PSC + * driven DMA. The MACE driver code is derived from mace.c. The + * Mac68k theory of operation is courtesy of the MacBSD wizards. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Copyright (C) 1996 Paul Mackerras. + * Copyright (C) 1998 Alan Cox + * + * Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver + * + * Copyright (C) 2007 Finn Thain + * + * Converted to DMA API, converted to unified driver model, + * sync'd some routines with mace.c and fixed various bugs. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mace.h" + +static char mac_mace_string[] = "macmace"; + +#define N_TX_BUFF_ORDER 0 +#define N_TX_RING (1 << N_TX_BUFF_ORDER) +#define N_RX_BUFF_ORDER 3 +#define N_RX_RING (1 << N_RX_BUFF_ORDER) + +#define TX_TIMEOUT HZ + +#define MACE_BUFF_SIZE 0x800 + +/* Chip rev needs workaround on HW & multicast addr change */ +#define BROKEN_ADDRCHG_REV 0x0941 + +/* The MACE is simply wired down on a Mac68K box */ + +#define MACE_BASE (void *)(0x50F1C000) +#define MACE_PROM (void *)(0x50F08001) + +struct mace_data { + volatile struct mace *mace; + unsigned char *tx_ring; + dma_addr_t tx_ring_phys; + unsigned char *rx_ring; + dma_addr_t rx_ring_phys; + int dma_intr; + int rx_slot, rx_tail; + int tx_slot, tx_sloti, tx_count; + int chipid; + struct device *device; +}; + +struct mace_frame { + u8 rcvcnt; + u8 pad1; + u8 rcvsts; + u8 pad2; + u8 rntpc; + u8 pad3; + u8 rcvcc; + u8 pad4; + u32 pad5; + u32 pad6; + u8 data[1]; + /* And frame continues.. */ +}; + +#define PRIV_BYTES sizeof(struct mace_data) + +static int mace_open(struct net_device *dev); +static int mace_close(struct net_device *dev); +static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); +static void mace_set_multicast(struct net_device *dev); +static int mace_set_address(struct net_device *dev, void *addr); +static void mace_reset(struct net_device *dev); +static irqreturn_t mace_interrupt(int irq, void *dev_id); +static irqreturn_t mace_dma_intr(int irq, void *dev_id); +static void mace_tx_timeout(struct net_device *dev); +static void __mace_set_address(struct net_device *dev, void *addr); + +/* + * Load a receive DMA channel with a base address and ring length + */ + +static void mace_load_rxdma_base(struct net_device *dev, int set) +{ + struct mace_data *mp = netdev_priv(dev); + + psc_write_word(PSC_ENETRD_CMD + set, 0x0100); + psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys); + psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING); + psc_write_word(PSC_ENETRD_CMD + set, 0x9800); + mp->rx_tail = 0; +} + +/* + * Reset the receive DMA subsystem + */ + +static void mace_rxdma_reset(struct net_device *dev) +{ + struct mace_data *mp = netdev_priv(dev); + volatile struct mace *mace = mp->mace; + u8 maccc = mace->maccc; + + mace->maccc = maccc & ~ENRCV; + + psc_write_word(PSC_ENETRD_CTL, 0x8800); + mace_load_rxdma_base(dev, 0x00); + psc_write_word(PSC_ENETRD_CTL, 0x0400); + + psc_write_word(PSC_ENETRD_CTL, 0x8800); + mace_load_rxdma_base(dev, 0x10); + psc_write_word(PSC_ENETRD_CTL, 0x0400); + + mace->maccc = maccc; + mp->rx_slot = 0; + + psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800); + psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800); +} + +/* + * Reset the transmit DMA subsystem + */ + +static void mace_txdma_reset(struct net_device *dev) +{ + struct mace_data *mp = netdev_priv(dev); + volatile struct mace *mace = mp->mace; + u8 maccc; + + psc_write_word(PSC_ENETWR_CTL, 0x8800); + + maccc = mace->maccc; + mace->maccc = maccc & ~ENXMT; + + mp->tx_slot = mp->tx_sloti = 0; + mp->tx_count = N_TX_RING; + + psc_write_word(PSC_ENETWR_CTL, 0x0400); + mace->maccc = maccc; +} + +/* + * Disable DMA + */ + +static void mace_dma_off(struct net_device *dev) +{ + psc_write_word(PSC_ENETRD_CTL, 0x8800); + psc_write_word(PSC_ENETRD_CTL, 0x1000); + psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100); + psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100); + + psc_write_word(PSC_ENETWR_CTL, 0x8800); + psc_write_word(PSC_ENETWR_CTL, 0x1000); + psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100); + psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100); +} + +static const struct net_device_ops mace_netdev_ops = { + .ndo_open = mace_open, + .ndo_stop = mace_close, + .ndo_start_xmit = mace_xmit_start, + .ndo_tx_timeout = mace_tx_timeout, + .ndo_set_rx_mode = mace_set_multicast, + .ndo_set_mac_address = mace_set_address, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + +/* + * Not really much of a probe. The hardware table tells us if this + * model of Macintrash has a MACE (AV macintoshes) + */ + +static int mace_probe(struct platform_device *pdev) +{ + int j; + struct mace_data *mp; + unsigned char *addr; + struct net_device *dev; + unsigned char checksum = 0; + int err; + + dev = alloc_etherdev(PRIV_BYTES); + if (!dev) + return -ENOMEM; + + mp = netdev_priv(dev); + + mp->device = &pdev->dev; + platform_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + + dev->base_addr = (u32)MACE_BASE; + mp->mace = MACE_BASE; + + dev->irq = IRQ_MAC_MACE; + mp->dma_intr = IRQ_MAC_MACE_DMA; + + mp->chipid = mp->mace->chipid_hi << 8 | mp->mace->chipid_lo; + + /* + * The PROM contains 8 bytes which total 0xFF when XOR'd + * together. Due to the usual peculiar apple brain damage + * the bytes are spaced out in a strange boundary and the + * bits are reversed. + */ + + addr = MACE_PROM; + + for (j = 0; j < 6; ++j) { + u8 v = bitrev8(addr[j<<4]); + checksum ^= v; + dev->dev_addr[j] = v; + } + for (; j < 8; ++j) { + checksum ^= bitrev8(addr[j<<4]); + } + + if (checksum != 0xFF) { + free_netdev(dev); + return -ENODEV; + } + + dev->netdev_ops = &mace_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + printk(KERN_INFO "%s: 68K MACE, hardware address %pM\n", + dev->name, dev->dev_addr); + + err = register_netdev(dev); + if (!err) + return 0; + + free_netdev(dev); + return err; +} + +/* + * Reset the chip. + */ + +static void mace_reset(struct net_device *dev) +{ + struct mace_data *mp = netdev_priv(dev); + volatile struct mace *mb = mp->mace; + int i; + + /* soft-reset the chip */ + i = 200; + while (--i) { + mb->biucc = SWRST; + if (mb->biucc & SWRST) { + udelay(10); + continue; + } + break; + } + if (!i) { + printk(KERN_ERR "macmace: cannot reset chip!\n"); + return; + } + + mb->maccc = 0; /* turn off tx, rx */ + mb->imr = 0xFF; /* disable all intrs for now */ + i = mb->ir; + + mb->biucc = XMTSP_64; + mb->utr = RTRD; + mb->fifocc = XMTFW_8 | RCVFW_64 | XMTFWU | RCVFWU; + + mb->xmtfc = AUTO_PAD_XMIT; /* auto-pad short frames */ + mb->rcvfc = 0; + + /* load up the hardware address */ + __mace_set_address(dev, dev->dev_addr); + + /* clear the multicast filter */ + if (mp->chipid == BROKEN_ADDRCHG_REV) + mb->iac = LOGADDR; + else { + mb->iac = ADDRCHG | LOGADDR; + while ((mb->iac & ADDRCHG) != 0) + ; + } + for (i = 0; i < 8; ++i) + mb->ladrf = 0; + + /* done changing address */ + if (mp->chipid != BROKEN_ADDRCHG_REV) + mb->iac = 0; + + mb->plscc = PORTSEL_AUI; +} + +/* + * Load the address on a mace controller. + */ + +static void __mace_set_address(struct net_device *dev, void *addr) +{ + struct mace_data *mp = netdev_priv(dev); + volatile struct mace *mb = mp->mace; + unsigned char *p = addr; + int i; + + /* load up the hardware address */ + if (mp->chipid == BROKEN_ADDRCHG_REV) + mb->iac = PHYADDR; + else { + mb->iac = ADDRCHG | PHYADDR; + while ((mb->iac & ADDRCHG) != 0) + ; + } + for (i = 0; i < 6; ++i) + mb->padr = dev->dev_addr[i] = p[i]; + if (mp->chipid != BROKEN_ADDRCHG_REV) + mb->iac = 0; +} + +static int mace_set_address(struct net_device *dev, void *addr) +{ + struct mace_data *mp = netdev_priv(dev); + volatile struct mace *mb = mp->mace; + unsigned long flags; + u8 maccc; + + local_irq_save(flags); + + maccc = mb->maccc; + + __mace_set_address(dev, addr); + + mb->maccc = maccc; + + local_irq_restore(flags); + + return 0; +} + +/* + * Open the Macintosh MACE. Most of this is playing with the DMA + * engine. The ethernet chip is quite friendly. + */ + +static int mace_open(struct net_device *dev) +{ + struct mace_data *mp = netdev_priv(dev); + volatile struct mace *mb = mp->mace; + + /* reset the chip */ + mace_reset(dev); + + if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) { + printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq); + return -EAGAIN; + } + if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) { + printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr); + free_irq(dev->irq, dev); + return -EAGAIN; + } + + /* Allocate the DMA ring buffers */ + + mp->tx_ring = dma_alloc_coherent(mp->device, + N_TX_RING * MACE_BUFF_SIZE, + &mp->tx_ring_phys, GFP_KERNEL); + if (mp->tx_ring == NULL) + goto out1; + + mp->rx_ring = dma_alloc_coherent(mp->device, + N_RX_RING * MACE_BUFF_SIZE, + &mp->rx_ring_phys, GFP_KERNEL); + if (mp->rx_ring == NULL) + goto out2; + + mace_dma_off(dev); + + /* Not sure what these do */ + + psc_write_word(PSC_ENETWR_CTL, 0x9000); + psc_write_word(PSC_ENETRD_CTL, 0x9000); + psc_write_word(PSC_ENETWR_CTL, 0x0400); + psc_write_word(PSC_ENETRD_CTL, 0x0400); + + mace_rxdma_reset(dev); + mace_txdma_reset(dev); + + /* turn it on! */ + mb->maccc = ENXMT | ENRCV; + /* enable all interrupts except receive interrupts */ + mb->imr = RCVINT; + return 0; + +out2: + dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE, + mp->tx_ring, mp->tx_ring_phys); +out1: + free_irq(dev->irq, dev); + free_irq(mp->dma_intr, dev); + return -ENOMEM; +} + +/* + * Shut down the mace and its interrupt channel + */ + +static int mace_close(struct net_device *dev) +{ + struct mace_data *mp = netdev_priv(dev); + volatile struct mace *mb = mp->mace; + + mb->maccc = 0; /* disable rx and tx */ + mb->imr = 0xFF; /* disable all irqs */ + mace_dma_off(dev); /* disable rx and tx dma */ + + return 0; +} + +/* + * Transmit a frame + */ + +static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) +{ + struct mace_data *mp = netdev_priv(dev); + unsigned long flags; + + /* Stop the queue since there's only the one buffer */ + + local_irq_save(flags); + netif_stop_queue(dev); + if (!mp->tx_count) { + printk(KERN_ERR "macmace: tx queue running but no free buffers.\n"); + local_irq_restore(flags); + return NETDEV_TX_BUSY; + } + mp->tx_count--; + local_irq_restore(flags); + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + /* We need to copy into our xmit buffer to take care of alignment and caching issues */ + skb_copy_from_linear_data(skb, mp->tx_ring, skb->len); + + /* load the Tx DMA and fire it off */ + + psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32) mp->tx_ring_phys); + psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len); + psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800); + + mp->tx_slot ^= 0x10; + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +static void mace_set_multicast(struct net_device *dev) +{ + struct mace_data *mp = netdev_priv(dev); + volatile struct mace *mb = mp->mace; + int i; + u32 crc; + u8 maccc; + unsigned long flags; + + local_irq_save(flags); + maccc = mb->maccc; + mb->maccc &= ~PROM; + + if (dev->flags & IFF_PROMISC) { + mb->maccc |= PROM; + } else { + unsigned char multicast_filter[8]; + struct netdev_hw_addr *ha; + + if (dev->flags & IFF_ALLMULTI) { + for (i = 0; i < 8; i++) { + multicast_filter[i] = 0xFF; + } + } else { + for (i = 0; i < 8; i++) + multicast_filter[i] = 0; + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr); + /* bit number in multicast_filter */ + i = crc >> 26; + multicast_filter[i >> 3] |= 1 << (i & 7); + } + } + + if (mp->chipid == BROKEN_ADDRCHG_REV) + mb->iac = LOGADDR; + else { + mb->iac = ADDRCHG | LOGADDR; + while ((mb->iac & ADDRCHG) != 0) + ; + } + for (i = 0; i < 8; ++i) + mb->ladrf = multicast_filter[i]; + if (mp->chipid != BROKEN_ADDRCHG_REV) + mb->iac = 0; + } + + mb->maccc = maccc; + local_irq_restore(flags); +} + +static void mace_handle_misc_intrs(struct net_device *dev, int intr) +{ + struct mace_data *mp = netdev_priv(dev); + volatile struct mace *mb = mp->mace; + static int mace_babbles, mace_jabbers; + + if (intr & MPCO) + dev->stats.rx_missed_errors += 256; + dev->stats.rx_missed_errors += mb->mpc; /* reading clears it */ + if (intr & RNTPCO) + dev->stats.rx_length_errors += 256; + dev->stats.rx_length_errors += mb->rntpc; /* reading clears it */ + if (intr & CERR) + ++dev->stats.tx_heartbeat_errors; + if (intr & BABBLE) + if (mace_babbles++ < 4) + printk(KERN_DEBUG "macmace: babbling transmitter\n"); + if (intr & JABBER) + if (mace_jabbers++ < 4) + printk(KERN_DEBUG "macmace: jabbering transceiver\n"); +} + +static irqreturn_t mace_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct mace_data *mp = netdev_priv(dev); + volatile struct mace *mb = mp->mace; + int intr, fs; + unsigned long flags; + + /* don't want the dma interrupt handler to fire */ + local_irq_save(flags); + + intr = mb->ir; /* read interrupt register */ + mace_handle_misc_intrs(dev, intr); + + if (intr & XMTINT) { + fs = mb->xmtfs; + if ((fs & XMTSV) == 0) { + printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs); + mace_reset(dev); + /* + * XXX mace likes to hang the machine after a xmtfs error. + * This is hard to reproduce, resetting *may* help + */ + } + /* dma should have finished */ + if (!mp->tx_count) { + printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs); + } + /* Update stats */ + if (fs & (UFLO|LCOL|LCAR|RTRY)) { + ++dev->stats.tx_errors; + if (fs & LCAR) + ++dev->stats.tx_carrier_errors; + else if (fs & (UFLO|LCOL|RTRY)) { + ++dev->stats.tx_aborted_errors; + if (mb->xmtfs & UFLO) { + printk(KERN_ERR "%s: DMA underrun.\n", dev->name); + dev->stats.tx_fifo_errors++; + mace_txdma_reset(dev); + } + } + } + } + + if (mp->tx_count) + netif_wake_queue(dev); + + local_irq_restore(flags); + + return IRQ_HANDLED; +} + +static void mace_tx_timeout(struct net_device *dev) +{ + struct mace_data *mp = netdev_priv(dev); + volatile struct mace *mb = mp->mace; + unsigned long flags; + + local_irq_save(flags); + + /* turn off both tx and rx and reset the chip */ + mb->maccc = 0; + printk(KERN_ERR "macmace: transmit timeout - resetting\n"); + mace_txdma_reset(dev); + mace_reset(dev); + + /* restart rx dma */ + mace_rxdma_reset(dev); + + mp->tx_count = N_TX_RING; + netif_wake_queue(dev); + + /* turn it on! */ + mb->maccc = ENXMT | ENRCV; + /* enable all interrupts except receive interrupts */ + mb->imr = RCVINT; + + local_irq_restore(flags); +} + +/* + * Handle a newly arrived frame + */ + +static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf) +{ + struct sk_buff *skb; + unsigned int frame_status = mf->rcvsts; + + if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) { + dev->stats.rx_errors++; + if (frame_status & RS_OFLO) { + printk(KERN_DEBUG "%s: fifo overflow.\n", dev->name); + dev->stats.rx_fifo_errors++; + } + if (frame_status & RS_CLSN) + dev->stats.collisions++; + if (frame_status & RS_FRAMERR) + dev->stats.rx_frame_errors++; + if (frame_status & RS_FCSERR) + dev->stats.rx_crc_errors++; + } else { + unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 ); + + skb = netdev_alloc_skb(dev, frame_length + 2); + if (!skb) { + dev->stats.rx_dropped++; + return; + } + skb_reserve(skb, 2); + memcpy(skb_put(skb, frame_length), mf->data, frame_length); + + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += frame_length; + } +} + +/* + * The PSC has passed us a DMA interrupt event. + */ + +static irqreturn_t mace_dma_intr(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct mace_data *mp = netdev_priv(dev); + int left, head; + u16 status; + u32 baka; + + /* Not sure what this does */ + + while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY)); + if (!(baka & 0x60000000)) return IRQ_NONE; + + /* + * Process the read queue + */ + + status = psc_read_word(PSC_ENETRD_CTL); + + if (status & 0x2000) { + mace_rxdma_reset(dev); + } else if (status & 0x0100) { + psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100); + + left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot); + head = N_RX_RING - left; + + /* Loop through the ring buffer and process new packages */ + + while (mp->rx_tail < head) { + mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring + + (mp->rx_tail * MACE_BUFF_SIZE))); + mp->rx_tail++; + } + + /* If we're out of buffers in this ring then switch to */ + /* the other set, otherwise just reactivate this one. */ + + if (!left) { + mace_load_rxdma_base(dev, mp->rx_slot); + mp->rx_slot ^= 0x10; + } else { + psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800); + } + } + + /* + * Process the write queue + */ + + status = psc_read_word(PSC_ENETWR_CTL); + + if (status & 0x2000) { + mace_txdma_reset(dev); + } else if (status & 0x0100) { + psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100); + mp->tx_sloti ^= 0x10; + mp->tx_count++; + } + return IRQ_HANDLED; +} + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Macintosh MACE ethernet driver"); +MODULE_ALIAS("platform:macmace"); + +static int mac_mace_device_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct mace_data *mp = netdev_priv(dev); + + unregister_netdev(dev); + + free_irq(dev->irq, dev); + free_irq(IRQ_MAC_MACE_DMA, dev); + + dma_free_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE, + mp->rx_ring, mp->rx_ring_phys); + dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE, + mp->tx_ring, mp->tx_ring_phys); + + free_netdev(dev); + + return 0; +} + +static struct platform_driver mac_mace_driver = { + .probe = mace_probe, + .remove = mac_mace_device_remove, + .driver = { + .name = mac_mace_string, + }, +}; + +static int __init mac_mace_init_module(void) +{ + if (!MACH_IS_MAC) + return -ENODEV; + + return platform_driver_register(&mac_mace_driver); +} + +static void __exit mac_mace_cleanup_module(void) +{ + platform_driver_unregister(&mac_mace_driver); +} + +module_init(mac_mace_init_module); +module_exit(mac_mace_cleanup_module); diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig new file mode 100644 index 000000000..dea29ee24 --- /dev/null +++ b/drivers/net/ethernet/arc/Kconfig @@ -0,0 +1,44 @@ +# +# ARC EMAC network device configuration +# + +config NET_VENDOR_ARC + bool "ARC devices" + default y + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about ARC cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_ARC + +config ARC_EMAC_CORE + tristate + select MII + select PHYLIB + +config ARC_EMAC + tristate "ARC EMAC support" + select ARC_EMAC_CORE + depends on OF_IRQ && OF_NET && HAS_DMA + ---help--- + On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x + non-standard on-chip ethernet device ARC EMAC 10/100 is used. + Say Y here if you have such a board. If unsure, say N. + +config EMAC_ROCKCHIP + tristate "Rockchip EMAC support" + select ARC_EMAC_CORE + depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA + ---help--- + Support for Rockchip RK3066/RK3188 EMAC ethernet controllers. + This selects Rockchip SoC glue layer support for the + emac device driver. This driver is used for RK3066/RK3188 + EMAC ethernet controller. + +endif # NET_VENDOR_ARC diff --git a/drivers/net/ethernet/arc/Makefile b/drivers/net/ethernet/arc/Makefile new file mode 100644 index 000000000..79108af55 --- /dev/null +++ b/drivers/net/ethernet/arc/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the ARC network device drivers. +# + +arc_emac-objs := emac_main.o emac_mdio.o +obj-$(CONFIG_ARC_EMAC_CORE) += arc_emac.o +obj-$(CONFIG_ARC_EMAC) += emac_arc.o +obj-$(CONFIG_EMAC_ROCKCHIP) += emac_rockchip.o diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h new file mode 100644 index 000000000..dae1ac300 --- /dev/null +++ b/drivers/net/ethernet/arc/emac.h @@ -0,0 +1,216 @@ +/* + * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com) + * + * Registers and bits definitions of ARC EMAC + */ + +#ifndef ARC_EMAC_H +#define ARC_EMAC_H + +#include +#include +#include +#include +#include + +/* STATUS and ENABLE Register bit masks */ +#define TXINT_MASK (1<<0) /* Transmit interrupt */ +#define RXINT_MASK (1<<1) /* Receive interrupt */ +#define ERR_MASK (1<<2) /* Error interrupt */ +#define TXCH_MASK (1<<3) /* Transmit chaining error interrupt */ +#define MSER_MASK (1<<4) /* Missed packet counter error */ +#define RXCR_MASK (1<<8) /* RXCRCERR counter rolled over */ +#define RXFR_MASK (1<<9) /* RXFRAMEERR counter rolled over */ +#define RXFL_MASK (1<<10) /* RXOFLOWERR counter rolled over */ +#define MDIO_MASK (1<<12) /* MDIO complete interrupt */ +#define TXPL_MASK (1<<31) /* Force polling of BD by EMAC */ + +/* CONTROL Register bit masks */ +#define EN_MASK (1<<0) /* VMAC enable */ +#define TXRN_MASK (1<<3) /* TX enable */ +#define RXRN_MASK (1<<4) /* RX enable */ +#define DSBC_MASK (1<<8) /* Disable receive broadcast */ +#define ENFL_MASK (1<<10) /* Enable Full-duplex */ +#define PROM_MASK (1<<11) /* Promiscuous mode */ + +/* Buffer descriptor INFO bit masks */ +#define OWN_MASK (1<<31) /* 0-CPU owns buffer, 1-EMAC owns buffer */ +#define FIRST_MASK (1<<16) /* First buffer in chain */ +#define LAST_MASK (1<<17) /* Last buffer in chain */ +#define LEN_MASK 0x000007FF /* last 11 bits */ +#define CRLS (1<<21) +#define DEFR (1<<22) +#define DROP (1<<23) +#define RTRY (1<<24) +#define LTCL (1<<28) +#define UFLO (1<<29) + +#define FOR_EMAC OWN_MASK +#define FOR_CPU 0 + +/* ARC EMAC register set combines entries for MAC and MDIO */ +enum { + R_ID = 0, + R_STATUS, + R_ENABLE, + R_CTRL, + R_POLLRATE, + R_RXERR, + R_MISS, + R_TX_RING, + R_RX_RING, + R_ADDRL, + R_ADDRH, + R_LAFL, + R_LAFH, + R_MDIO, +}; + +#define TX_TIMEOUT (400*HZ/1000) /* Transmission timeout */ + +#define ARC_EMAC_NAPI_WEIGHT 40 /* Workload for NAPI */ + +#define EMAC_BUFFER_SIZE 1536 /* EMAC buffer size */ + +/** + * struct arc_emac_bd - EMAC buffer descriptor (BD). + * + * @info: Contains status information on the buffer itself. + * @data: 32-bit byte addressable pointer to the packet data. + */ +struct arc_emac_bd { + __le32 info; + dma_addr_t data; +}; + +/* Number of Rx/Tx BD's */ +#define RX_BD_NUM 128 +#define TX_BD_NUM 128 + +#define RX_RING_SZ (RX_BD_NUM * sizeof(struct arc_emac_bd)) +#define TX_RING_SZ (TX_BD_NUM * sizeof(struct arc_emac_bd)) + +/** + * struct buffer_state - Stores Rx/Tx buffer state. + * @sk_buff: Pointer to socket buffer. + * @addr: Start address of DMA-mapped memory region. + * @len: Length of DMA-mapped memory region. + */ +struct buffer_state { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(addr); + DEFINE_DMA_UNMAP_LEN(len); +}; + +/** + * struct arc_emac_priv - Storage of EMAC's private information. + * @dev: Pointer to the current device. + * @phy_dev: Pointer to attached PHY device. + * @bus: Pointer to the current MII bus. + * @regs: Base address of EMAC memory-mapped control registers. + * @napi: Structure for NAPI. + * @rxbd: Pointer to Rx BD ring. + * @txbd: Pointer to Tx BD ring. + * @rxbd_dma: DMA handle for Rx BD ring. + * @txbd_dma: DMA handle for Tx BD ring. + * @rx_buff: Storage for Rx buffers states. + * @tx_buff: Storage for Tx buffers states. + * @txbd_curr: Index of Tx BD to use on the next "ndo_start_xmit". + * @txbd_dirty: Index of Tx BD to free on the next Tx interrupt. + * @last_rx_bd: Index of the last Rx BD we've got from EMAC. + * @link: PHY's last seen link state. + * @duplex: PHY's last set duplex mode. + * @speed: PHY's last set speed. + */ +struct arc_emac_priv { + const char *drv_name; + const char *drv_version; + void (*set_mac_speed)(void *priv, unsigned int speed); + + /* Devices */ + struct device *dev; + struct phy_device *phy_dev; + struct mii_bus *bus; + + void __iomem *regs; + struct clk *clk; + + struct napi_struct napi; + + struct arc_emac_bd *rxbd; + struct arc_emac_bd *txbd; + + dma_addr_t rxbd_dma; + dma_addr_t txbd_dma; + + struct buffer_state rx_buff[RX_BD_NUM]; + struct buffer_state tx_buff[TX_BD_NUM]; + unsigned int txbd_curr; + unsigned int txbd_dirty; + + unsigned int last_rx_bd; + + unsigned int link; + unsigned int duplex; + unsigned int speed; +}; + +/** + * arc_reg_set - Sets EMAC register with provided value. + * @priv: Pointer to ARC EMAC private data structure. + * @reg: Register offset from base address. + * @value: Value to set in register. + */ +static inline void arc_reg_set(struct arc_emac_priv *priv, int reg, int value) +{ + iowrite32(value, priv->regs + reg * sizeof(int)); +} + +/** + * arc_reg_get - Gets value of specified EMAC register. + * @priv: Pointer to ARC EMAC private data structure. + * @reg: Register offset from base address. + * + * returns: Value of requested register. + */ +static inline unsigned int arc_reg_get(struct arc_emac_priv *priv, int reg) +{ + return ioread32(priv->regs + reg * sizeof(int)); +} + +/** + * arc_reg_or - Applies mask to specified EMAC register - ("reg" | "mask"). + * @priv: Pointer to ARC EMAC private data structure. + * @reg: Register offset from base address. + * @mask: Mask to apply to specified register. + * + * This function reads initial register value, then applies provided mask + * to it and then writes register back. + */ +static inline void arc_reg_or(struct arc_emac_priv *priv, int reg, int mask) +{ + unsigned int value = arc_reg_get(priv, reg); + arc_reg_set(priv, reg, value | mask); +} + +/** + * arc_reg_clr - Applies mask to specified EMAC register - ("reg" & ~"mask"). + * @priv: Pointer to ARC EMAC private data structure. + * @reg: Register offset from base address. + * @mask: Mask to apply to specified register. + * + * This function reads initial register value, then applies provided mask + * to it and then writes register back. + */ +static inline void arc_reg_clr(struct arc_emac_priv *priv, int reg, int mask) +{ + unsigned int value = arc_reg_get(priv, reg); + arc_reg_set(priv, reg, value & ~mask); +} + +int arc_mdio_probe(struct arc_emac_priv *priv); +int arc_mdio_remove(struct arc_emac_priv *priv); +int arc_emac_probe(struct net_device *ndev, int interface); +int arc_emac_remove(struct net_device *ndev); + +#endif /* ARC_EMAC_H */ diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c new file mode 100644 index 000000000..f9cb99bfb --- /dev/null +++ b/drivers/net/ethernet/arc/emac_arc.c @@ -0,0 +1,95 @@ +/** + * emac_arc.c - ARC EMAC specific glue layer + * + * Copyright (C) 2014 Romain Perier + * + * Romain Perier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include + +#include "emac.h" + +#define DRV_NAME "emac_arc" +#define DRV_VERSION "1.0" + +static int emac_arc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct net_device *ndev; + struct arc_emac_priv *priv; + int interface, err; + + if (!dev->of_node) + return -ENODEV; + + ndev = alloc_etherdev(sizeof(struct arc_emac_priv)); + if (!ndev) + return -ENOMEM; + platform_set_drvdata(pdev, ndev); + SET_NETDEV_DEV(ndev, dev); + + priv = netdev_priv(ndev); + priv->drv_name = DRV_NAME; + priv->drv_version = DRV_VERSION; + + interface = of_get_phy_mode(dev->of_node); + if (interface < 0) + interface = PHY_INTERFACE_MODE_MII; + + priv->clk = devm_clk_get(dev, "hclk"); + if (IS_ERR(priv->clk)) { + dev_err(dev, "failed to retrieve host clock from device tree\n"); + err = -EINVAL; + goto out_netdev; + } + + err = arc_emac_probe(ndev, interface); +out_netdev: + if (err) + free_netdev(ndev); + return err; +} + +static int emac_arc_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + int err; + + err = arc_emac_remove(ndev); + free_netdev(ndev); + return err; +} + +static const struct of_device_id emac_arc_dt_ids[] = { + { .compatible = "snps,arc-emac" }, + { /* Sentinel */ } +}; + +static struct platform_driver emac_arc_driver = { + .probe = emac_arc_probe, + .remove = emac_arc_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = emac_arc_dt_ids, + }, +}; + +module_platform_driver(emac_arc_driver); + +MODULE_AUTHOR("Romain Perier "); +MODULE_DESCRIPTION("ARC EMAC platform driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c new file mode 100644 index 000000000..abe1eabc0 --- /dev/null +++ b/drivers/net/ethernet/arc/emac_main.c @@ -0,0 +1,874 @@ +/* + * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Driver for the ARC EMAC 10100 (hardware revision 5) + * + * Contributors: + * Amit Bhor + * Sameer Dhavale + * Vineet Gupta + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "emac.h" + + +/** + * arc_emac_tx_avail - Return the number of available slots in the tx ring. + * @priv: Pointer to ARC EMAC private data structure. + * + * returns: the number of slots available for transmission in tx the ring. + */ +static inline int arc_emac_tx_avail(struct arc_emac_priv *priv) +{ + return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM; +} + +/** + * arc_emac_adjust_link - Adjust the PHY link duplex. + * @ndev: Pointer to the net_device structure. + * + * This function is called to change the duplex setting after auto negotiation + * is done by the PHY. + */ +static void arc_emac_adjust_link(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + struct phy_device *phy_dev = priv->phy_dev; + unsigned int reg, state_changed = 0; + + if (priv->link != phy_dev->link) { + priv->link = phy_dev->link; + state_changed = 1; + } + + if (priv->speed != phy_dev->speed) { + priv->speed = phy_dev->speed; + state_changed = 1; + if (priv->set_mac_speed) + priv->set_mac_speed(priv, priv->speed); + } + + if (priv->duplex != phy_dev->duplex) { + reg = arc_reg_get(priv, R_CTRL); + + if (DUPLEX_FULL == phy_dev->duplex) + reg |= ENFL_MASK; + else + reg &= ~ENFL_MASK; + + arc_reg_set(priv, R_CTRL, reg); + priv->duplex = phy_dev->duplex; + state_changed = 1; + } + + if (state_changed) + phy_print_status(phy_dev); +} + +/** + * arc_emac_get_settings - Get PHY settings. + * @ndev: Pointer to net_device structure. + * @cmd: Pointer to ethtool_cmd structure. + * + * This implements ethtool command for getting PHY settings. If PHY could + * not be found, the function returns -ENODEV. This function calls the + * relevant PHY ethtool API to get the PHY settings. + * Issue "ethtool ethX" under linux prompt to execute this function. + */ +static int arc_emac_get_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + + return phy_ethtool_gset(priv->phy_dev, cmd); +} + +/** + * arc_emac_set_settings - Set PHY settings as passed in the argument. + * @ndev: Pointer to net_device structure. + * @cmd: Pointer to ethtool_cmd structure. + * + * This implements ethtool command for setting various PHY settings. If PHY + * could not be found, the function returns -ENODEV. This function calls the + * relevant PHY ethtool API to set the PHY. + * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this + * function. + */ +static int arc_emac_set_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + return phy_ethtool_sset(priv->phy_dev, cmd); +} + +/** + * arc_emac_get_drvinfo - Get EMAC driver information. + * @ndev: Pointer to net_device structure. + * @info: Pointer to ethtool_drvinfo structure. + * + * This implements ethtool command for getting the driver information. + * Issue "ethtool -i ethX" under linux prompt to execute this function. + */ +static void arc_emac_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + + strlcpy(info->driver, priv->drv_name, sizeof(info->driver)); + strlcpy(info->version, priv->drv_version, sizeof(info->version)); +} + +static const struct ethtool_ops arc_emac_ethtool_ops = { + .get_settings = arc_emac_get_settings, + .set_settings = arc_emac_set_settings, + .get_drvinfo = arc_emac_get_drvinfo, + .get_link = ethtool_op_get_link, +}; + +#define FIRST_OR_LAST_MASK (FIRST_MASK | LAST_MASK) + +/** + * arc_emac_tx_clean - clears processed by EMAC Tx BDs. + * @ndev: Pointer to the network device. + */ +static void arc_emac_tx_clean(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + unsigned int i; + + for (i = 0; i < TX_BD_NUM; i++) { + unsigned int *txbd_dirty = &priv->txbd_dirty; + struct arc_emac_bd *txbd = &priv->txbd[*txbd_dirty]; + struct buffer_state *tx_buff = &priv->tx_buff[*txbd_dirty]; + struct sk_buff *skb = tx_buff->skb; + unsigned int info = le32_to_cpu(txbd->info); + + if ((info & FOR_EMAC) || !txbd->data) + break; + + if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) { + stats->tx_errors++; + stats->tx_dropped++; + + if (info & DEFR) + stats->tx_carrier_errors++; + + if (info & LTCL) + stats->collisions++; + + if (info & UFLO) + stats->tx_fifo_errors++; + } else if (likely(info & FIRST_OR_LAST_MASK)) { + stats->tx_packets++; + stats->tx_bytes += skb->len; + } + + dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr), + dma_unmap_len(tx_buff, len), DMA_TO_DEVICE); + + /* return the sk_buff to system */ + dev_kfree_skb_irq(skb); + + txbd->data = 0; + txbd->info = 0; + + *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; + } + + /* Ensure that txbd_dirty is visible to tx() before checking + * for queue stopped. + */ + smp_mb(); + + if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv)) + netif_wake_queue(ndev); +} + +/** + * arc_emac_rx - processing of Rx packets. + * @ndev: Pointer to the network device. + * @budget: How many BDs to process on 1 call. + * + * returns: Number of processed BDs + * + * Iterate through Rx BDs and deliver received packages to upper layer. + */ +static int arc_emac_rx(struct net_device *ndev, int budget) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + unsigned int work_done; + + for (work_done = 0; work_done < budget; work_done++) { + unsigned int *last_rx_bd = &priv->last_rx_bd; + struct net_device_stats *stats = &ndev->stats; + struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; + struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; + unsigned int pktlen, info = le32_to_cpu(rxbd->info); + struct sk_buff *skb; + dma_addr_t addr; + + if (unlikely((info & OWN_MASK) == FOR_EMAC)) + break; + + /* Make a note that we saw a packet at this BD. + * So next time, driver starts from this + 1 + */ + *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; + + if (unlikely((info & FIRST_OR_LAST_MASK) != + FIRST_OR_LAST_MASK)) { + /* We pre-allocate buffers of MTU size so incoming + * packets won't be split/chained. + */ + if (net_ratelimit()) + netdev_err(ndev, "incomplete packet received\n"); + + /* Return ownership to EMAC */ + rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); + stats->rx_errors++; + stats->rx_length_errors++; + continue; + } + + pktlen = info & LEN_MASK; + stats->rx_packets++; + stats->rx_bytes += pktlen; + skb = rx_buff->skb; + skb_put(skb, pktlen); + skb->dev = ndev; + skb->protocol = eth_type_trans(skb, ndev); + + dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), + dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); + + /* Prepare the BD for next cycle */ + rx_buff->skb = netdev_alloc_skb_ip_align(ndev, + EMAC_BUFFER_SIZE); + if (unlikely(!rx_buff->skb)) { + stats->rx_errors++; + /* Because receive_skb is below, increment rx_dropped */ + stats->rx_dropped++; + continue; + } + + /* receive_skb only if new skb was allocated to avoid holes */ + netif_receive_skb(skb); + + addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, + EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&ndev->dev, addr)) { + if (net_ratelimit()) + netdev_err(ndev, "cannot dma map\n"); + dev_kfree_skb(rx_buff->skb); + stats->rx_errors++; + continue; + } + dma_unmap_addr_set(rx_buff, addr, addr); + dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); + + rxbd->data = cpu_to_le32(addr); + + /* Make sure pointer to data buffer is set */ + wmb(); + + /* Return ownership to EMAC */ + rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); + } + + return work_done; +} + +/** + * arc_emac_poll - NAPI poll handler. + * @napi: Pointer to napi_struct structure. + * @budget: How many BDs to process on 1 call. + * + * returns: Number of processed BDs + */ +static int arc_emac_poll(struct napi_struct *napi, int budget) +{ + struct net_device *ndev = napi->dev; + struct arc_emac_priv *priv = netdev_priv(ndev); + unsigned int work_done; + + arc_emac_tx_clean(ndev); + + work_done = arc_emac_rx(ndev, budget); + if (work_done < budget) { + napi_complete(napi); + arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); + } + + return work_done; +} + +/** + * arc_emac_intr - Global interrupt handler for EMAC. + * @irq: irq number. + * @dev_instance: device instance. + * + * returns: IRQ_HANDLED for all cases. + * + * ARC EMAC has only 1 interrupt line, and depending on bits raised in + * STATUS register we may tell what is a reason for interrupt to fire. + */ +static irqreturn_t arc_emac_intr(int irq, void *dev_instance) +{ + struct net_device *ndev = dev_instance; + struct arc_emac_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + unsigned int status; + + status = arc_reg_get(priv, R_STATUS); + status &= ~MDIO_MASK; + + /* Reset all flags except "MDIO complete" */ + arc_reg_set(priv, R_STATUS, status); + + if (status & (RXINT_MASK | TXINT_MASK)) { + if (likely(napi_schedule_prep(&priv->napi))) { + arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); + __napi_schedule(&priv->napi); + } + } + + if (status & ERR_MASK) { + /* MSER/RXCR/RXFR/RXFL interrupt fires on corresponding + * 8-bit error counter overrun. + */ + + if (status & MSER_MASK) { + stats->rx_missed_errors += 0x100; + stats->rx_errors += 0x100; + } + + if (status & RXCR_MASK) { + stats->rx_crc_errors += 0x100; + stats->rx_errors += 0x100; + } + + if (status & RXFR_MASK) { + stats->rx_frame_errors += 0x100; + stats->rx_errors += 0x100; + } + + if (status & RXFL_MASK) { + stats->rx_over_errors += 0x100; + stats->rx_errors += 0x100; + } + } + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void arc_emac_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + arc_emac_intr(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +/** + * arc_emac_open - Open the network device. + * @ndev: Pointer to the network device. + * + * returns: 0, on success or non-zero error value on failure. + * + * This function sets the MAC address, requests and enables an IRQ + * for the EMAC device and starts the Tx queue. + * It also connects to the phy device. + */ +static int arc_emac_open(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + struct phy_device *phy_dev = priv->phy_dev; + int i; + + phy_dev->autoneg = AUTONEG_ENABLE; + phy_dev->speed = 0; + phy_dev->duplex = 0; + phy_dev->advertising &= phy_dev->supported; + + priv->last_rx_bd = 0; + + /* Allocate and set buffers for Rx BD's */ + for (i = 0; i < RX_BD_NUM; i++) { + dma_addr_t addr; + unsigned int *last_rx_bd = &priv->last_rx_bd; + struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd]; + struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; + + rx_buff->skb = netdev_alloc_skb_ip_align(ndev, + EMAC_BUFFER_SIZE); + if (unlikely(!rx_buff->skb)) + return -ENOMEM; + + addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data, + EMAC_BUFFER_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&ndev->dev, addr)) { + netdev_err(ndev, "cannot dma map\n"); + dev_kfree_skb(rx_buff->skb); + return -ENOMEM; + } + dma_unmap_addr_set(rx_buff, addr, addr); + dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE); + + rxbd->data = cpu_to_le32(addr); + + /* Make sure pointer to data buffer is set */ + wmb(); + + /* Return ownership to EMAC */ + rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE); + + *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; + } + + /* Clean Tx BD's */ + memset(priv->txbd, 0, TX_RING_SZ); + + /* Initialize logical address filter */ + arc_reg_set(priv, R_LAFL, 0); + arc_reg_set(priv, R_LAFH, 0); + + /* Set BD ring pointers for device side */ + arc_reg_set(priv, R_RX_RING, (unsigned int)priv->rxbd_dma); + arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); + + /* Enable interrupts */ + arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); + + /* Set CONTROL */ + arc_reg_set(priv, R_CTRL, + (RX_BD_NUM << 24) | /* RX BD table length */ + (TX_BD_NUM << 16) | /* TX BD table length */ + TXRN_MASK | RXRN_MASK); + + napi_enable(&priv->napi); + + /* Enable EMAC */ + arc_reg_or(priv, R_CTRL, EN_MASK); + + phy_start_aneg(priv->phy_dev); + + netif_start_queue(ndev); + + return 0; +} + +/** + * arc_emac_set_rx_mode - Change the receive filtering mode. + * @ndev: Pointer to the network device. + * + * This function enables/disables promiscuous or all-multicast mode + * and updates the multicast filtering list of the network device. + */ +static void arc_emac_set_rx_mode(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + + if (ndev->flags & IFF_PROMISC) { + arc_reg_or(priv, R_CTRL, PROM_MASK); + } else { + arc_reg_clr(priv, R_CTRL, PROM_MASK); + + if (ndev->flags & IFF_ALLMULTI) { + arc_reg_set(priv, R_LAFL, ~0); + arc_reg_set(priv, R_LAFH, ~0); + } else { + struct netdev_hw_addr *ha; + unsigned int filter[2] = { 0, 0 }; + int bit; + + netdev_for_each_mc_addr(ha, ndev) { + bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26; + filter[bit >> 5] |= 1 << (bit & 31); + } + + arc_reg_set(priv, R_LAFL, filter[0]); + arc_reg_set(priv, R_LAFH, filter[1]); + } + } +} + +/** + * arc_emac_stop - Close the network device. + * @ndev: Pointer to the network device. + * + * This function stops the Tx queue, disables interrupts and frees the IRQ for + * the EMAC device. + * It also disconnects the PHY device associated with the EMAC device. + */ +static int arc_emac_stop(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + + napi_disable(&priv->napi); + netif_stop_queue(ndev); + + /* Disable interrupts */ + arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); + + /* Disable EMAC */ + arc_reg_clr(priv, R_CTRL, EN_MASK); + + return 0; +} + +/** + * arc_emac_stats - Get system network statistics. + * @ndev: Pointer to net_device structure. + * + * Returns the address of the device statistics structure. + * Statistics are updated in interrupt handler. + */ +static struct net_device_stats *arc_emac_stats(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + unsigned long miss, rxerr; + u8 rxcrc, rxfram, rxoflow; + + rxerr = arc_reg_get(priv, R_RXERR); + miss = arc_reg_get(priv, R_MISS); + + rxcrc = rxerr; + rxfram = rxerr >> 8; + rxoflow = rxerr >> 16; + + stats->rx_errors += miss; + stats->rx_errors += rxcrc + rxfram + rxoflow; + + stats->rx_over_errors += rxoflow; + stats->rx_frame_errors += rxfram; + stats->rx_crc_errors += rxcrc; + stats->rx_missed_errors += miss; + + return stats; +} + +/** + * arc_emac_tx - Starts the data transmission. + * @skb: sk_buff pointer that contains data to be Transmitted. + * @ndev: Pointer to net_device structure. + * + * returns: NETDEV_TX_OK, on success + * NETDEV_TX_BUSY, if any of the descriptors are not free. + * + * This function is invoked from upper layers to initiate transmission. + */ +static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + unsigned int len, *txbd_curr = &priv->txbd_curr; + struct net_device_stats *stats = &ndev->stats; + __le32 *info = &priv->txbd[*txbd_curr].info; + dma_addr_t addr; + + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + len = max_t(unsigned int, ETH_ZLEN, skb->len); + + if (unlikely(!arc_emac_tx_avail(priv))) { + netif_stop_queue(ndev); + netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n"); + return NETDEV_TX_BUSY; + } + + addr = dma_map_single(&ndev->dev, (void *)skb->data, len, + DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(&ndev->dev, addr))) { + stats->tx_dropped++; + stats->tx_errors++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr); + dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len); + + priv->tx_buff[*txbd_curr].skb = skb; + priv->txbd[*txbd_curr].data = cpu_to_le32(addr); + + /* Make sure pointer to data buffer is set */ + wmb(); + + skb_tx_timestamp(skb); + + *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); + + /* Increment index to point to the next BD */ + *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; + + /* Ensure that tx_clean() sees the new txbd_curr before + * checking the queue status. This prevents an unneeded wake + * of the queue in tx_clean(). + */ + smp_mb(); + + if (!arc_emac_tx_avail(priv)) { + netif_stop_queue(ndev); + /* Refresh tx_dirty */ + smp_mb(); + if (arc_emac_tx_avail(priv)) + netif_start_queue(ndev); + } + + arc_reg_set(priv, R_STATUS, TXPL_MASK); + + return NETDEV_TX_OK; +} + +static void arc_emac_set_address_internal(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + unsigned int addr_low, addr_hi; + + addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]); + addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]); + + arc_reg_set(priv, R_ADDRL, addr_low); + arc_reg_set(priv, R_ADDRH, addr_hi); +} + +/** + * arc_emac_set_address - Set the MAC address for this device. + * @ndev: Pointer to net_device structure. + * @p: 6 byte Address to be written as MAC address. + * + * This function copies the HW address from the sockaddr structure to the + * net_device structure and updates the address in HW. + * + * returns: -EBUSY if the net device is busy or 0 if the address is set + * successfully. + */ +static int arc_emac_set_address(struct net_device *ndev, void *p) +{ + struct sockaddr *addr = p; + + if (netif_running(ndev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + + arc_emac_set_address_internal(ndev); + + return 0; +} + +static const struct net_device_ops arc_emac_netdev_ops = { + .ndo_open = arc_emac_open, + .ndo_stop = arc_emac_stop, + .ndo_start_xmit = arc_emac_tx, + .ndo_set_mac_address = arc_emac_set_address, + .ndo_get_stats = arc_emac_stats, + .ndo_set_rx_mode = arc_emac_set_rx_mode, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = arc_emac_poll_controller, +#endif +}; + +int arc_emac_probe(struct net_device *ndev, int interface) +{ + struct device *dev = ndev->dev.parent; + struct resource res_regs; + struct device_node *phy_node; + struct arc_emac_priv *priv; + const char *mac_addr; + unsigned int id, clock_frequency, irq; + int err; + + + /* Get PHY from device tree */ + phy_node = of_parse_phandle(dev->of_node, "phy", 0); + if (!phy_node) { + dev_err(dev, "failed to retrieve phy description from device tree\n"); + return -ENODEV; + } + + /* Get EMAC registers base address from device tree */ + err = of_address_to_resource(dev->of_node, 0, &res_regs); + if (err) { + dev_err(dev, "failed to retrieve registers base from device tree\n"); + return -ENODEV; + } + + /* Get IRQ from device tree */ + irq = irq_of_parse_and_map(dev->of_node, 0); + if (!irq) { + dev_err(dev, "failed to retrieve value from device tree\n"); + return -ENODEV; + } + + + ndev->netdev_ops = &arc_emac_netdev_ops; + ndev->ethtool_ops = &arc_emac_ethtool_ops; + ndev->watchdog_timeo = TX_TIMEOUT; + /* FIXME :: no multicast support yet */ + ndev->flags &= ~IFF_MULTICAST; + + priv = netdev_priv(ndev); + priv->dev = dev; + + priv->regs = devm_ioremap_resource(dev, &res_regs); + if (IS_ERR(priv->regs)) { + return PTR_ERR(priv->regs); + } + dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); + + if (priv->clk) { + err = clk_prepare_enable(priv->clk); + if (err) { + dev_err(dev, "failed to enable clock\n"); + return err; + } + + clock_frequency = clk_get_rate(priv->clk); + } else { + /* Get CPU clock frequency from device tree */ + if (of_property_read_u32(dev->of_node, "clock-frequency", + &clock_frequency)) { + dev_err(dev, "failed to retrieve from device tree\n"); + return -EINVAL; + } + } + + id = arc_reg_get(priv, R_ID); + + /* Check for EMAC revision 5 or 7, magic number */ + if (!(id == 0x0005fd02 || id == 0x0007fd02)) { + dev_err(dev, "ARC EMAC not detected, id=0x%x\n", id); + err = -ENODEV; + goto out_clken; + } + dev_info(dev, "ARC EMAC detected with id: 0x%x\n", id); + + /* Set poll rate so that it polls every 1 ms */ + arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000); + + ndev->irq = irq; + dev_info(dev, "IRQ is %d\n", ndev->irq); + + /* Register interrupt handler for device */ + err = devm_request_irq(dev, ndev->irq, arc_emac_intr, 0, + ndev->name, ndev); + if (err) { + dev_err(dev, "could not allocate IRQ\n"); + goto out_clken; + } + + /* Get MAC address from device tree */ + mac_addr = of_get_mac_address(dev->of_node); + + if (mac_addr) + memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + else + eth_hw_addr_random(ndev); + + arc_emac_set_address_internal(ndev); + dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr); + + /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */ + priv->rxbd = dmam_alloc_coherent(dev, RX_RING_SZ + TX_RING_SZ, + &priv->rxbd_dma, GFP_KERNEL); + + if (!priv->rxbd) { + dev_err(dev, "failed to allocate data buffers\n"); + err = -ENOMEM; + goto out_clken; + } + + priv->txbd = priv->rxbd + RX_BD_NUM; + + priv->txbd_dma = priv->rxbd_dma + RX_RING_SZ; + dev_dbg(dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n", + (unsigned int)priv->rxbd_dma, (unsigned int)priv->txbd_dma); + + err = arc_mdio_probe(priv); + if (err) { + dev_err(dev, "failed to probe MII bus\n"); + goto out_clken; + } + + priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, + interface); + if (!priv->phy_dev) { + dev_err(dev, "of_phy_connect() failed\n"); + err = -ENODEV; + goto out_mdio; + } + + dev_info(dev, "connected to %s phy with id 0x%x\n", + priv->phy_dev->drv->name, priv->phy_dev->phy_id); + + netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT); + + err = register_netdev(ndev); + if (err) { + dev_err(dev, "failed to register network device\n"); + goto out_netif_api; + } + + return 0; + +out_netif_api: + netif_napi_del(&priv->napi); + phy_disconnect(priv->phy_dev); + priv->phy_dev = NULL; +out_mdio: + arc_mdio_remove(priv); +out_clken: + if (priv->clk) + clk_disable_unprepare(priv->clk); + return err; +} +EXPORT_SYMBOL_GPL(arc_emac_probe); + +int arc_emac_remove(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + + phy_disconnect(priv->phy_dev); + priv->phy_dev = NULL; + arc_mdio_remove(priv); + unregister_netdev(ndev); + netif_napi_del(&priv->napi); + + if (!IS_ERR(priv->clk)) { + clk_disable_unprepare(priv->clk); + } + + + return 0; +} +EXPORT_SYMBOL_GPL(arc_emac_remove); + +MODULE_AUTHOR("Alexey Brodkin "); +MODULE_DESCRIPTION("ARC EMAC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c new file mode 100644 index 000000000..d5ee98693 --- /dev/null +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -0,0 +1,151 @@ +/* + * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com) + * + * MDIO implementation for ARC EMAC + */ + +#include +#include +#include + +#include "emac.h" + +/* Number of seconds we wait for "MDIO complete" flag to appear */ +#define ARC_MDIO_COMPLETE_POLL_COUNT 1 + +/** + * arc_mdio_complete_wait - Waits until MDIO transaction is completed. + * @priv: Pointer to ARC EMAC private data structure. + * + * returns: 0 on success, -ETIMEDOUT on a timeout. + */ +static int arc_mdio_complete_wait(struct arc_emac_priv *priv) +{ + unsigned int i; + + for (i = 0; i < ARC_MDIO_COMPLETE_POLL_COUNT * 40; i++) { + unsigned int status = arc_reg_get(priv, R_STATUS); + + status &= MDIO_MASK; + + if (status) { + /* Reset "MDIO complete" flag */ + arc_reg_set(priv, R_STATUS, status); + return 0; + } + + msleep(25); + } + + return -ETIMEDOUT; +} + +/** + * arc_mdio_read - MDIO interface read function. + * @bus: Pointer to MII bus structure. + * @phy_addr: Address of the PHY device. + * @reg_num: PHY register to read. + * + * returns: The register contents on success, -ETIMEDOUT on a timeout. + * + * Reads the contents of the requested register from the requested PHY + * address. + */ +static int arc_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) +{ + struct arc_emac_priv *priv = bus->priv; + unsigned int value; + int error; + + arc_reg_set(priv, R_MDIO, + 0x60020000 | (phy_addr << 23) | (reg_num << 18)); + + error = arc_mdio_complete_wait(priv); + if (error < 0) + return error; + + value = arc_reg_get(priv, R_MDIO) & 0xffff; + + dev_dbg(priv->dev, "arc_mdio_read(phy_addr=%i, reg_num=%x) = %x\n", + phy_addr, reg_num, value); + + return value; +} + +/** + * arc_mdio_write - MDIO interface write function. + * @bus: Pointer to MII bus structure. + * @phy_addr: Address of the PHY device. + * @reg_num: PHY register to write to. + * @value: Value to be written into the register. + * + * returns: 0 on success, -ETIMEDOUT on a timeout. + * + * Writes the value to the requested register. + */ +static int arc_mdio_write(struct mii_bus *bus, int phy_addr, + int reg_num, u16 value) +{ + struct arc_emac_priv *priv = bus->priv; + + dev_dbg(priv->dev, + "arc_mdio_write(phy_addr=%i, reg_num=%x, value=%x)\n", + phy_addr, reg_num, value); + + arc_reg_set(priv, R_MDIO, + 0x50020000 | (phy_addr << 23) | (reg_num << 18) | value); + + return arc_mdio_complete_wait(priv); +} + +/** + * arc_mdio_probe - MDIO probe function. + * @priv: Pointer to ARC EMAC private data structure. + * + * returns: 0 on success, -ENOMEM when mdiobus_alloc + * (to allocate memory for MII bus structure) fails. + * + * Sets up and registers the MDIO interface. + */ +int arc_mdio_probe(struct arc_emac_priv *priv) +{ + struct mii_bus *bus; + int error; + + bus = mdiobus_alloc(); + if (!bus) + return -ENOMEM; + + priv->bus = bus; + bus->priv = priv; + bus->parent = priv->dev; + bus->name = "Synopsys MII Bus", + bus->read = &arc_mdio_read; + bus->write = &arc_mdio_write; + + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", bus->name); + + error = of_mdiobus_register(bus, priv->dev->of_node); + if (error) { + dev_err(priv->dev, "cannot register MDIO bus %s\n", bus->name); + mdiobus_free(bus); + return error; + } + + return 0; +} + +/** + * arc_mdio_remove - MDIO remove function. + * @priv: Pointer to ARC EMAC private data structure. + * + * Unregisters the MDIO and frees any associate memory for MII bus. + */ +int arc_mdio_remove(struct arc_emac_priv *priv) +{ + mdiobus_unregister(priv->bus); + mdiobus_free(priv->bus); + priv->bus = NULL; + + return 0; +} diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c new file mode 100644 index 000000000..c31c7407b --- /dev/null +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -0,0 +1,229 @@ +/** + * emac-rockchip.c - Rockchip EMAC specific glue layer + * + * Copyright (C) 2014 Romain Perier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "emac.h" + +#define DRV_NAME "rockchip_emac" +#define DRV_VERSION "1.0" + +#define GRF_MODE_MII (1UL << 0) +#define GRF_MODE_RMII (0UL << 0) +#define GRF_SPEED_10M (0UL << 1) +#define GRF_SPEED_100M (1UL << 1) +#define GRF_SPEED_ENABLE_BIT (1UL << 17) +#define GRF_MODE_ENABLE_BIT (1UL << 16) + +struct emac_rockchip_soc_data { + int grf_offset; +}; + +struct rockchip_priv_data { + struct arc_emac_priv emac; + struct regmap *grf; + const struct emac_rockchip_soc_data *soc_data; + struct regulator *regulator; + struct clk *refclk; +}; + +static void emac_rockchip_set_mac_speed(void *priv, unsigned int speed) +{ + struct rockchip_priv_data *emac = priv; + u32 data; + int err = 0; + + /* write-enable bits */ + data = GRF_SPEED_ENABLE_BIT; + + switch(speed) { + case 10: + data |= GRF_SPEED_10M; + break; + case 100: + data |= GRF_SPEED_100M; + break; + default: + pr_err("speed %u not supported\n", speed); + return; + } + + err = regmap_write(emac->grf, emac->soc_data->grf_offset, data); + if (err) + pr_err("unable to apply speed %u to grf (%d)\n", speed, err); +} + +static const struct emac_rockchip_soc_data emac_rockchip_dt_data[] = { + { .grf_offset = 0x154 }, /* rk3066 */ + { .grf_offset = 0x0a4 }, /* rk3188 */ +}; + +static const struct of_device_id emac_rockchip_dt_ids[] = { + { .compatible = "rockchip,rk3066-emac", .data = &emac_rockchip_dt_data[0] }, + { .compatible = "rockchip,rk3188-emac", .data = &emac_rockchip_dt_data[1] }, + { /* Sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, emac_rockchip_dt_ids); + +static int emac_rockchip_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct net_device *ndev; + struct rockchip_priv_data *priv; + const struct of_device_id *match; + u32 data; + int err, interface; + + if (!pdev->dev.of_node) + return -ENODEV; + + ndev = alloc_etherdev(sizeof(struct rockchip_priv_data)); + if (!ndev) + return -ENOMEM; + platform_set_drvdata(pdev, ndev); + SET_NETDEV_DEV(ndev, dev); + + priv = netdev_priv(ndev); + priv->emac.drv_name = DRV_NAME; + priv->emac.drv_version = DRV_VERSION; + priv->emac.set_mac_speed = emac_rockchip_set_mac_speed; + + interface = of_get_phy_mode(dev->of_node); + + /* RK3066 and RK3188 SoCs only support RMII */ + if (interface != PHY_INTERFACE_MODE_RMII) { + dev_err(dev, "unsupported phy interface mode %d\n", interface); + err = -ENOTSUPP; + goto out_netdev; + } + + priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); + if (IS_ERR(priv->grf)) { + dev_err(dev, "failed to retrieve global register file (%ld)\n", PTR_ERR(priv->grf)); + err = PTR_ERR(priv->grf); + goto out_netdev; + } + + match = of_match_node(emac_rockchip_dt_ids, dev->of_node); + priv->soc_data = match->data; + + priv->emac.clk = devm_clk_get(dev, "hclk"); + if (IS_ERR(priv->emac.clk)) { + dev_err(dev, "failed to retrieve host clock (%ld)\n", PTR_ERR(priv->emac.clk)); + err = PTR_ERR(priv->emac.clk); + goto out_netdev; + } + + priv->refclk = devm_clk_get(dev, "macref"); + if (IS_ERR(priv->refclk)) { + dev_err(dev, "failed to retrieve reference clock (%ld)\n", PTR_ERR(priv->refclk)); + err = PTR_ERR(priv->refclk); + goto out_netdev; + } + + err = clk_prepare_enable(priv->refclk); + if (err) { + dev_err(dev, "failed to enable reference clock (%d)\n", err); + goto out_netdev; + } + + /* Optional regulator for PHY */ + priv->regulator = devm_regulator_get_optional(dev, "phy"); + if (IS_ERR(priv->regulator)) { + if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_err(dev, "no regulator found\n"); + priv->regulator = NULL; + } + + if (priv->regulator) { + err = regulator_enable(priv->regulator); + if (err) { + dev_err(dev, "failed to enable phy-supply (%d)\n", err); + goto out_clk_disable; + } + } + + err = arc_emac_probe(ndev, interface); + if (err) + goto out_regulator_disable; + + /* write-enable bits */ + data = GRF_MODE_ENABLE_BIT | GRF_SPEED_ENABLE_BIT; + + data |= GRF_SPEED_100M; + data |= GRF_MODE_RMII; + + err = regmap_write(priv->grf, priv->soc_data->grf_offset, data); + if (err) { + dev_err(dev, "unable to apply initial settings to grf (%d)\n", err); + goto out_regulator_disable; + } + + /* RMII interface needs always a rate of 50MHz */ + err = clk_set_rate(priv->refclk, 50000000); + if (err) + dev_err(dev, "failed to change reference clock rate (%d)\n", err); + return 0; + +out_regulator_disable: + if (priv->regulator) + regulator_disable(priv->regulator); +out_clk_disable: + clk_disable_unprepare(priv->refclk); +out_netdev: + free_netdev(ndev); + return err; +} + +static int emac_rockchip_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct rockchip_priv_data *priv = netdev_priv(ndev); + int err; + + err = arc_emac_remove(ndev); + + clk_disable_unprepare(priv->refclk); + + if (priv->regulator) + regulator_disable(priv->regulator); + + free_netdev(ndev); + return err; +} + +static struct platform_driver emac_rockchip_driver = { + .probe = emac_rockchip_probe, + .remove = emac_rockchip_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = emac_rockchip_dt_ids, + }, +}; + +module_platform_driver(emac_rockchip_driver); + +MODULE_AUTHOR("Romain Perier "); +MODULE_DESCRIPTION("Rockchip EMAC platform driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig new file mode 100644 index 000000000..58ad37c73 --- /dev/null +++ b/drivers/net/ethernet/atheros/Kconfig @@ -0,0 +1,83 @@ +# +# Atheros device configuration +# + +config NET_VENDOR_ATHEROS + bool "Atheros devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Atheros devices. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_ATHEROS + +config ATL2 + tristate "Atheros L2 Fast Ethernet support" + depends on PCI + select CRC32 + select MII + ---help--- + This driver supports the Atheros L2 fast ethernet adapter. + + To compile this driver as a module, choose M here. The module + will be called atl2. + +config ATL1 + tristate "Atheros/Attansic L1 Gigabit Ethernet support" + depends on PCI + select CRC32 + select MII + ---help--- + This driver supports the Atheros/Attansic L1 gigabit ethernet + adapter. + + To compile this driver as a module, choose M here. The module + will be called atl1. + +config ATL1E + tristate "Atheros L1E Gigabit Ethernet support" + depends on PCI + select CRC32 + select MII + ---help--- + This driver supports the Atheros L1E gigabit ethernet adapter. + + To compile this driver as a module, choose M here. The module + will be called atl1e. + +config ATL1C + tristate "Atheros L1C Gigabit Ethernet support" + depends on PCI + select CRC32 + select MII + ---help--- + This driver supports the Atheros L1C gigabit ethernet adapter. + + To compile this driver as a module, choose M here. The module + will be called atl1c. + +config ALX + tristate "Qualcomm Atheros AR816x/AR817x support" + depends on PCI + select CRC32 + select MDIO + help + This driver supports the Qualcomm Atheros L1F ethernet adapter, + i.e. the following chipsets: + + 1969:1091 - AR8161 Gigabit Ethernet + 1969:1090 - AR8162 Fast Ethernet + 1969:10A1 - AR8171 Gigabit Ethernet + 1969:10A0 - AR8172 Fast Ethernet + + To compile this driver as a module, choose M here. The module + will be called alx. + +endif # NET_VENDOR_ATHEROS diff --git a/drivers/net/ethernet/atheros/Makefile b/drivers/net/ethernet/atheros/Makefile new file mode 100644 index 000000000..5cf1c65bb --- /dev/null +++ b/drivers/net/ethernet/atheros/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for the Atheros network device drivers. +# + +obj-$(CONFIG_ATL1) += atlx/ +obj-$(CONFIG_ATL2) += atlx/ +obj-$(CONFIG_ATL1E) += atl1e/ +obj-$(CONFIG_ATL1C) += atl1c/ +obj-$(CONFIG_ALX) += alx/ diff --git a/drivers/net/ethernet/atheros/alx/Makefile b/drivers/net/ethernet/atheros/alx/Makefile new file mode 100644 index 000000000..5901fa407 --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_ALX) += alx.o +alx-objs := main.o ethtool.o hw.o +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h new file mode 100644 index 000000000..8fc93c5f6 --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/alx.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2013 Johannes Berg + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _ALX_H_ +#define _ALX_H_ + +#include +#include +#include +#include +#include "hw.h" + +#define ALX_WATCHDOG_TIME (5 * HZ) + +struct alx_buffer { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(size); +}; + +struct alx_rx_queue { + struct alx_rrd *rrd; + dma_addr_t rrd_dma; + + struct alx_rfd *rfd; + dma_addr_t rfd_dma; + + struct alx_buffer *bufs; + + u16 write_idx, read_idx; + u16 rrd_read_idx; +}; +#define ALX_RX_ALLOC_THRESH 32 + +struct alx_tx_queue { + struct alx_txd *tpd; + dma_addr_t tpd_dma; + struct alx_buffer *bufs; + u16 write_idx, read_idx; +}; + +#define ALX_DEFAULT_TX_WORK 128 + +enum alx_device_quirks { + ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0), +}; + +struct alx_priv { + struct net_device *dev; + + struct alx_hw hw; + + /* all descriptor memory */ + struct { + dma_addr_t dma; + void *virt; + unsigned int size; + } descmem; + + /* protect int_mask updates */ + spinlock_t irq_lock; + u32 int_mask; + + unsigned int tx_ringsz; + unsigned int rx_ringsz; + unsigned int rxbuf_size; + + struct napi_struct napi; + struct alx_tx_queue txq; + struct alx_rx_queue rxq; + + struct work_struct link_check_wk; + struct work_struct reset_wk; + + u16 msg_enable; + + bool msi; + + /* protects hw.stats */ + spinlock_t stats_lock; +}; + +extern const struct ethtool_ops alx_ethtool_ops; +extern const char alx_drv_name[]; + +#endif diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c new file mode 100644 index 000000000..08e22df2a --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/ethtool.c @@ -0,0 +1,313 @@ +/* + * Copyright (c) 2013 Johannes Berg + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "alx.h" +#include "reg.h" +#include "hw.h" + +/* The order of these strings must match the order of the fields in + * struct alx_hw_stats + * See hw.h + */ +static const char alx_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_packets", + "rx_bcast_packets", + "rx_mcast_packets", + "rx_pause_packets", + "rx_ctrl_packets", + "rx_fcs_errors", + "rx_length_errors", + "rx_bytes", + "rx_runt_packets", + "rx_fragments", + "rx_64B_or_less_packets", + "rx_65B_to_127B_packets", + "rx_128B_to_255B_packets", + "rx_256B_to_511B_packets", + "rx_512B_to_1023B_packets", + "rx_1024B_to_1518B_packets", + "rx_1519B_to_mtu_packets", + "rx_oversize_packets", + "rx_rxf_ov_drop_packets", + "rx_rrd_ov_drop_packets", + "rx_align_errors", + "rx_bcast_bytes", + "rx_mcast_bytes", + "rx_address_errors", + "tx_packets", + "tx_bcast_packets", + "tx_mcast_packets", + "tx_pause_packets", + "tx_exc_defer_packets", + "tx_ctrl_packets", + "tx_defer_packets", + "tx_bytes", + "tx_64B_or_less_packets", + "tx_65B_to_127B_packets", + "tx_128B_to_255B_packets", + "tx_256B_to_511B_packets", + "tx_512B_to_1023B_packets", + "tx_1024B_to_1518B_packets", + "tx_1519B_to_mtu_packets", + "tx_single_collision", + "tx_multiple_collisions", + "tx_late_collision", + "tx_abort_collision", + "tx_underrun", + "tx_trd_eop", + "tx_length_errors", + "tx_trunc_packets", + "tx_bcast_bytes", + "tx_mcast_bytes", + "tx_update", +}; + +#define ALX_NUM_STATS ARRAY_SIZE(alx_gstrings_stats) + + +static u32 alx_get_supported_speeds(struct alx_hw *hw) +{ + u32 supported = SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full; + + if (alx_hw_giga(hw)) + supported |= SUPPORTED_1000baseT_Full; + + BUILD_BUG_ON(SUPPORTED_10baseT_Half != ADVERTISED_10baseT_Half); + BUILD_BUG_ON(SUPPORTED_10baseT_Full != ADVERTISED_10baseT_Full); + BUILD_BUG_ON(SUPPORTED_100baseT_Half != ADVERTISED_100baseT_Half); + BUILD_BUG_ON(SUPPORTED_100baseT_Full != ADVERTISED_100baseT_Full); + BUILD_BUG_ON(SUPPORTED_1000baseT_Full != ADVERTISED_1000baseT_Full); + + return supported; +} + +static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_TP | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause; + if (alx_hw_giga(hw)) + ecmd->supported |= SUPPORTED_1000baseT_Full; + ecmd->supported |= alx_get_supported_speeds(hw); + + ecmd->advertising = ADVERTISED_TP; + if (hw->adv_cfg & ADVERTISED_Autoneg) + ecmd->advertising |= hw->adv_cfg; + + ecmd->port = PORT_TP; + ecmd->phy_address = 0; + + if (hw->adv_cfg & ADVERTISED_Autoneg) + ecmd->autoneg = AUTONEG_ENABLE; + else + ecmd->autoneg = AUTONEG_DISABLE; + ecmd->transceiver = XCVR_INTERNAL; + + if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) { + if (hw->flowctrl & ALX_FC_RX) { + ecmd->advertising |= ADVERTISED_Pause; + + if (!(hw->flowctrl & ALX_FC_TX)) + ecmd->advertising |= ADVERTISED_Asym_Pause; + } else if (hw->flowctrl & ALX_FC_TX) { + ecmd->advertising |= ADVERTISED_Asym_Pause; + } + } + + ethtool_cmd_speed_set(ecmd, hw->link_speed); + ecmd->duplex = hw->duplex; + + return 0; +} + +static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + u32 adv_cfg; + + ASSERT_RTNL(); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + if (ecmd->advertising & ~alx_get_supported_speeds(hw)) + return -EINVAL; + adv_cfg = ecmd->advertising | ADVERTISED_Autoneg; + } else { + adv_cfg = alx_speed_to_ethadv(ethtool_cmd_speed(ecmd), + ecmd->duplex); + + if (!adv_cfg || adv_cfg == ADVERTISED_1000baseT_Full) + return -EINVAL; + } + + hw->adv_cfg = adv_cfg; + return alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl); +} + +static void alx_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + + pause->autoneg = !!(hw->flowctrl & ALX_FC_ANEG && + hw->adv_cfg & ADVERTISED_Autoneg); + pause->tx_pause = !!(hw->flowctrl & ALX_FC_TX); + pause->rx_pause = !!(hw->flowctrl & ALX_FC_RX); +} + + +static int alx_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + int err = 0; + bool reconfig_phy = false; + u8 fc = 0; + + if (pause->tx_pause) + fc |= ALX_FC_TX; + if (pause->rx_pause) + fc |= ALX_FC_RX; + if (pause->autoneg) + fc |= ALX_FC_ANEG; + + ASSERT_RTNL(); + + /* restart auto-neg for auto-mode */ + if (hw->adv_cfg & ADVERTISED_Autoneg) { + if (!((fc ^ hw->flowctrl) & ALX_FC_ANEG)) + reconfig_phy = true; + if (fc & hw->flowctrl & ALX_FC_ANEG && + (fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX)) + reconfig_phy = true; + } + + if (reconfig_phy) { + err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc); + if (err) + return err; + } + + /* flow control on mac */ + if ((fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX)) + alx_cfg_mac_flowcontrol(hw, fc); + + hw->flowctrl = fc; + + return 0; +} + +static u32 alx_get_msglevel(struct net_device *netdev) +{ + struct alx_priv *alx = netdev_priv(netdev); + + return alx->msg_enable; +} + +static void alx_set_msglevel(struct net_device *netdev, u32 data) +{ + struct alx_priv *alx = netdev_priv(netdev); + + alx->msg_enable = data; +} + +static void alx_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *estats, u64 *data) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + + spin_lock(&alx->stats_lock); + + alx_update_hw_stats(hw); + BUILD_BUG_ON(sizeof(hw->stats) - offsetof(struct alx_hw_stats, rx_ok) < + ALX_NUM_STATS * sizeof(u64)); + memcpy(data, &hw->stats.rx_ok, ALX_NUM_STATS * sizeof(u64)); + + spin_unlock(&alx->stats_lock); +} + +static void alx_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, &alx_gstrings_stats, sizeof(alx_gstrings_stats)); + break; + default: + WARN_ON(1); + break; + } +} + +static int alx_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ALX_NUM_STATS; + default: + return -EINVAL; + } +} + +const struct ethtool_ops alx_ethtool_ops = { + .get_settings = alx_get_settings, + .set_settings = alx_set_settings, + .get_pauseparam = alx_get_pauseparam, + .set_pauseparam = alx_set_pauseparam, + .get_msglevel = alx_get_msglevel, + .set_msglevel = alx_set_msglevel, + .get_link = ethtool_op_get_link, + .get_strings = alx_get_strings, + .get_sset_count = alx_get_sset_count, + .get_ethtool_stats = alx_get_ethtool_stats, +}; diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c new file mode 100644 index 000000000..7712f068f --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/hw.c @@ -0,0 +1,1110 @@ +/* + * Copyright (c) 2013 Johannes Berg + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include +#include +#include +#include +#include "reg.h" +#include "hw.h" + +static inline bool alx_is_rev_a(u8 rev) +{ + return rev == ALX_REV_A0 || rev == ALX_REV_A1; +} + +static int alx_wait_mdio_idle(struct alx_hw *hw) +{ + u32 val; + int i; + + for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) { + val = alx_read_mem32(hw, ALX_MDIO); + if (!(val & ALX_MDIO_BUSY)) + return 0; + udelay(10); + } + + return -ETIMEDOUT; +} + +static int alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev, + u16 reg, u16 *phy_data) +{ + u32 val, clk_sel; + int err; + + *phy_data = 0; + + /* use slow clock when it's in hibernation status */ + clk_sel = hw->link_speed != SPEED_UNKNOWN ? + ALX_MDIO_CLK_SEL_25MD4 : + ALX_MDIO_CLK_SEL_25MD128; + + if (ext) { + val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT | + reg << ALX_MDIO_EXTN_REG_SHIFT; + alx_write_mem32(hw, ALX_MDIO_EXTN, val); + + val = ALX_MDIO_SPRES_PRMBL | ALX_MDIO_START | + ALX_MDIO_MODE_EXT | ALX_MDIO_OP_READ | + clk_sel << ALX_MDIO_CLK_SEL_SHIFT; + } else { + val = ALX_MDIO_SPRES_PRMBL | + clk_sel << ALX_MDIO_CLK_SEL_SHIFT | + reg << ALX_MDIO_REG_SHIFT | + ALX_MDIO_START | ALX_MDIO_OP_READ; + } + alx_write_mem32(hw, ALX_MDIO, val); + + err = alx_wait_mdio_idle(hw); + if (err) + return err; + val = alx_read_mem32(hw, ALX_MDIO); + *phy_data = ALX_GET_FIELD(val, ALX_MDIO_DATA); + return 0; +} + +static int alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev, + u16 reg, u16 phy_data) +{ + u32 val, clk_sel; + + /* use slow clock when it's in hibernation status */ + clk_sel = hw->link_speed != SPEED_UNKNOWN ? + ALX_MDIO_CLK_SEL_25MD4 : + ALX_MDIO_CLK_SEL_25MD128; + + if (ext) { + val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT | + reg << ALX_MDIO_EXTN_REG_SHIFT; + alx_write_mem32(hw, ALX_MDIO_EXTN, val); + + val = ALX_MDIO_SPRES_PRMBL | + clk_sel << ALX_MDIO_CLK_SEL_SHIFT | + phy_data << ALX_MDIO_DATA_SHIFT | + ALX_MDIO_START | ALX_MDIO_MODE_EXT; + } else { + val = ALX_MDIO_SPRES_PRMBL | + clk_sel << ALX_MDIO_CLK_SEL_SHIFT | + reg << ALX_MDIO_REG_SHIFT | + phy_data << ALX_MDIO_DATA_SHIFT | + ALX_MDIO_START; + } + alx_write_mem32(hw, ALX_MDIO, val); + + return alx_wait_mdio_idle(hw); +} + +static int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data) +{ + return alx_read_phy_core(hw, false, 0, reg, phy_data); +} + +static int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data) +{ + return alx_write_phy_core(hw, false, 0, reg, phy_data); +} + +static int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata) +{ + return alx_read_phy_core(hw, true, dev, reg, pdata); +} + +static int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data) +{ + return alx_write_phy_core(hw, true, dev, reg, data); +} + +static int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata) +{ + int err; + + err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg); + if (err) + return err; + + return __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata); +} + +static int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data) +{ + int err; + + err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg); + if (err) + return err; + + return __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data); +} + +int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_read_phy_reg(hw, reg, phy_data); + spin_unlock(&hw->mdio_lock); + + return err; +} + +int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_write_phy_reg(hw, reg, phy_data); + spin_unlock(&hw->mdio_lock); + + return err; +} + +int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_read_phy_ext(hw, dev, reg, pdata); + spin_unlock(&hw->mdio_lock); + + return err; +} + +int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_write_phy_ext(hw, dev, reg, data); + spin_unlock(&hw->mdio_lock); + + return err; +} + +static int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_read_phy_dbg(hw, reg, pdata); + spin_unlock(&hw->mdio_lock); + + return err; +} + +static int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data) +{ + int err; + + spin_lock(&hw->mdio_lock); + err = __alx_write_phy_dbg(hw, reg, data); + spin_unlock(&hw->mdio_lock); + + return err; +} + +static u16 alx_get_phy_config(struct alx_hw *hw) +{ + u32 val; + u16 phy_val; + + val = alx_read_mem32(hw, ALX_PHY_CTRL); + /* phy in reset */ + if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0) + return ALX_DRV_PHY_UNKNOWN; + + val = alx_read_mem32(hw, ALX_DRV); + val = ALX_GET_FIELD(val, ALX_DRV_PHY); + if (ALX_DRV_PHY_UNKNOWN == val) + return ALX_DRV_PHY_UNKNOWN; + + alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val); + if (ALX_PHY_INITED == phy_val) + return val; + + return ALX_DRV_PHY_UNKNOWN; +} + +static bool alx_wait_reg(struct alx_hw *hw, u32 reg, u32 wait, u32 *val) +{ + u32 read; + int i; + + for (i = 0; i < ALX_SLD_MAX_TO; i++) { + read = alx_read_mem32(hw, reg); + if ((read & wait) == 0) { + if (val) + *val = read; + return true; + } + mdelay(1); + } + + return false; +} + +static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr) +{ + u32 mac0, mac1; + + mac0 = alx_read_mem32(hw, ALX_STAD0); + mac1 = alx_read_mem32(hw, ALX_STAD1); + + /* addr should be big-endian */ + put_unaligned(cpu_to_be32(mac0), (__be32 *)(addr + 2)); + put_unaligned(cpu_to_be16(mac1), (__be16 *)addr); + + return is_valid_ether_addr(addr); +} + +int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr) +{ + u32 val; + + /* try to get it from register first */ + if (alx_read_macaddr(hw, addr)) + return 0; + + /* try to load from efuse */ + if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_STAT | ALX_SLD_START, &val)) + return -EIO; + alx_write_mem32(hw, ALX_SLD, val | ALX_SLD_START); + if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_START, NULL)) + return -EIO; + if (alx_read_macaddr(hw, addr)) + return 0; + + /* try to load from flash/eeprom (if present) */ + val = alx_read_mem32(hw, ALX_EFLD); + if (val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) { + if (!alx_wait_reg(hw, ALX_EFLD, + ALX_EFLD_STAT | ALX_EFLD_START, &val)) + return -EIO; + alx_write_mem32(hw, ALX_EFLD, val | ALX_EFLD_START); + if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_START, NULL)) + return -EIO; + if (alx_read_macaddr(hw, addr)) + return 0; + } + + return -EIO; +} + +void alx_set_macaddr(struct alx_hw *hw, const u8 *addr) +{ + u32 val; + + /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */ + val = be32_to_cpu(get_unaligned((__be32 *)(addr + 2))); + alx_write_mem32(hw, ALX_STAD0, val); + val = be16_to_cpu(get_unaligned((__be16 *)addr)); + alx_write_mem32(hw, ALX_STAD1, val); +} + +static void alx_reset_osc(struct alx_hw *hw, u8 rev) +{ + u32 val, val2; + + /* clear Internal OSC settings, switching OSC by hw itself */ + val = alx_read_mem32(hw, ALX_MISC3); + alx_write_mem32(hw, ALX_MISC3, + (val & ~ALX_MISC3_25M_BY_SW) | + ALX_MISC3_25M_NOTO_INTNL); + + /* 25M clk from chipset may be unstable 1s after de-assert of + * PERST, driver need re-calibrate before enter Sleep for WoL + */ + val = alx_read_mem32(hw, ALX_MISC); + if (rev >= ALX_REV_B0) { + /* restore over current protection def-val, + * this val could be reset by MAC-RST + */ + ALX_SET_FIELD(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF); + /* a 0->1 change will update the internal val of osc */ + val &= ~ALX_MISC_INTNLOSC_OPEN; + alx_write_mem32(hw, ALX_MISC, val); + alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN); + /* hw will automatically dis OSC after cab. */ + val2 = alx_read_mem32(hw, ALX_MSIC2); + val2 &= ~ALX_MSIC2_CALB_START; + alx_write_mem32(hw, ALX_MSIC2, val2); + alx_write_mem32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START); + } else { + val &= ~ALX_MISC_INTNLOSC_OPEN; + /* disable isolate for rev A devices */ + if (alx_is_rev_a(rev)) + val &= ~ALX_MISC_ISO_EN; + + alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN); + alx_write_mem32(hw, ALX_MISC, val); + } + + udelay(20); +} + +static int alx_stop_mac(struct alx_hw *hw) +{ + u32 rxq, txq, val; + u16 i; + + rxq = alx_read_mem32(hw, ALX_RXQ0); + alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN); + txq = alx_read_mem32(hw, ALX_TXQ0); + alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN); + + udelay(40); + + hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN); + alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); + + for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) { + val = alx_read_mem32(hw, ALX_MAC_STS); + if (!(val & ALX_MAC_STS_IDLE)) + return 0; + udelay(10); + } + + return -ETIMEDOUT; +} + +int alx_reset_mac(struct alx_hw *hw) +{ + u32 val, pmctrl; + int i, ret; + u8 rev; + bool a_cr; + + pmctrl = 0; + rev = alx_hw_revision(hw); + a_cr = alx_is_rev_a(rev) && alx_hw_with_cr(hw); + + /* disable all interrupts, RXQ/TXQ */ + alx_write_mem32(hw, ALX_MSIX_MASK, 0xFFFFFFFF); + alx_write_mem32(hw, ALX_IMR, 0); + alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS); + + ret = alx_stop_mac(hw); + if (ret) + return ret; + + /* mac reset workaroud */ + alx_write_mem32(hw, ALX_RFD_PIDX, 1); + + /* dis l0s/l1 before mac reset */ + if (a_cr) { + pmctrl = alx_read_mem32(hw, ALX_PMCTRL); + if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN)) + alx_write_mem32(hw, ALX_PMCTRL, + pmctrl & ~(ALX_PMCTRL_L1_EN | + ALX_PMCTRL_L0S_EN)); + } + + /* reset whole mac safely */ + val = alx_read_mem32(hw, ALX_MASTER); + alx_write_mem32(hw, ALX_MASTER, + val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS); + + /* make sure it's real idle */ + udelay(10); + for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) { + val = alx_read_mem32(hw, ALX_RFD_PIDX); + if (val == 0) + break; + udelay(10); + } + for (; i < ALX_DMA_MAC_RST_TO; i++) { + val = alx_read_mem32(hw, ALX_MASTER); + if ((val & ALX_MASTER_DMA_MAC_RST) == 0) + break; + udelay(10); + } + if (i == ALX_DMA_MAC_RST_TO) + return -EIO; + udelay(10); + + if (a_cr) { + alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS); + /* restore l0s / l1 */ + if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN)) + alx_write_mem32(hw, ALX_PMCTRL, pmctrl); + } + + alx_reset_osc(hw, rev); + + /* clear Internal OSC settings, switching OSC by hw itself, + * disable isolate for rev A devices + */ + val = alx_read_mem32(hw, ALX_MISC3); + alx_write_mem32(hw, ALX_MISC3, + (val & ~ALX_MISC3_25M_BY_SW) | + ALX_MISC3_25M_NOTO_INTNL); + val = alx_read_mem32(hw, ALX_MISC); + val &= ~ALX_MISC_INTNLOSC_OPEN; + if (alx_is_rev_a(rev)) + val &= ~ALX_MISC_ISO_EN; + alx_write_mem32(hw, ALX_MISC, val); + udelay(20); + + /* driver control speed/duplex, hash-alg */ + alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); + + val = alx_read_mem32(hw, ALX_SERDES); + alx_write_mem32(hw, ALX_SERDES, + val | ALX_SERDES_MACCLK_SLWDWN | + ALX_SERDES_PHYCLK_SLWDWN); + + return 0; +} + +void alx_reset_phy(struct alx_hw *hw) +{ + int i; + u32 val; + u16 phy_val; + + /* (DSP)reset PHY core */ + val = alx_read_mem32(hw, ALX_PHY_CTRL); + val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ | + ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN | + ALX_PHY_CTRL_CLS); + val |= ALX_PHY_CTRL_RST_ANALOG; + + val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN); + alx_write_mem32(hw, ALX_PHY_CTRL, val); + udelay(10); + alx_write_mem32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT); + + for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++) + udelay(10); + + /* phy power saving & hib */ + alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF); + alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL, + ALX_SYSMODCTRL_IECHOADJ_DEF); + alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS, + ALX_VDRVBIAS_DEF); + + /* EEE advertisement */ + val = alx_read_mem32(hw, ALX_LPI_CTRL); + alx_write_mem32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN); + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_LOCAL_EEEADV, 0); + + /* phy power saving */ + alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF); + alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF); + alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF); + alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF); + alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val); + alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, + phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN); + /* rtl8139c, 120m issue */ + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78, + ALX_MIIEXT_NLP78_120M_DEF); + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10, + ALX_MIIEXT_S3DIG10_DEF); + + if (hw->lnk_patch) { + /* Turn off half amplitude */ + alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3, + &phy_val); + alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3, + phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT); + /* Turn off Green feature */ + alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val); + alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, + phy_val | ALX_GREENCFG2_BP_GREEN); + /* Turn off half Bias */ + alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5, + &phy_val); + alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5, + phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS); + } + + /* set phy interrupt mask */ + alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP | ALX_IER_LINK_DOWN); +} + +#define ALX_PCI_CMD (PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO) + +void alx_reset_pcie(struct alx_hw *hw) +{ + u8 rev = alx_hw_revision(hw); + u32 val; + u16 val16; + + /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */ + pci_read_config_word(hw->pdev, PCI_COMMAND, &val16); + if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) { + val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(hw->pdev, PCI_COMMAND, val16); + } + + /* clear WoL setting/status */ + val = alx_read_mem32(hw, ALX_WOL0); + alx_write_mem32(hw, ALX_WOL0, 0); + + val = alx_read_mem32(hw, ALX_PDLL_TRNS1); + alx_write_mem32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN); + + /* mask some pcie error bits */ + val = alx_read_mem32(hw, ALX_UE_SVRT); + val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR); + alx_write_mem32(hw, ALX_UE_SVRT, val); + + /* wol 25M & pclk */ + val = alx_read_mem32(hw, ALX_MASTER); + if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) { + if ((val & ALX_MASTER_WAKEN_25M) == 0 || + (val & ALX_MASTER_PCLKSEL_SRDS) == 0) + alx_write_mem32(hw, ALX_MASTER, + val | ALX_MASTER_PCLKSEL_SRDS | + ALX_MASTER_WAKEN_25M); + } else { + if ((val & ALX_MASTER_WAKEN_25M) == 0 || + (val & ALX_MASTER_PCLKSEL_SRDS) != 0) + alx_write_mem32(hw, ALX_MASTER, + (val & ~ALX_MASTER_PCLKSEL_SRDS) | + ALX_MASTER_WAKEN_25M); + } + + /* ASPM setting */ + alx_enable_aspm(hw, true, true); + + udelay(10); +} + +void alx_start_mac(struct alx_hw *hw) +{ + u32 mac, txq, rxq; + + rxq = alx_read_mem32(hw, ALX_RXQ0); + alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN); + txq = alx_read_mem32(hw, ALX_TXQ0); + alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN); + + mac = hw->rx_ctrl; + if (hw->duplex == DUPLEX_FULL) + mac |= ALX_MAC_CTRL_FULLD; + else + mac &= ~ALX_MAC_CTRL_FULLD; + ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, + hw->link_speed == SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 : + ALX_MAC_CTRL_SPEED_10_100); + mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN; + hw->rx_ctrl = mac; + alx_write_mem32(hw, ALX_MAC_CTRL, mac); +} + +void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc) +{ + if (fc & ALX_FC_RX) + hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN; + else + hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN; + + if (fc & ALX_FC_TX) + hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN; + else + hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN; + + alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); +} + +void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en) +{ + u32 pmctrl; + u8 rev = alx_hw_revision(hw); + + pmctrl = alx_read_mem32(hw, ALX_PMCTRL); + + ALX_SET_FIELD(pmctrl, ALX_PMCTRL_LCKDET_TIMER, + ALX_PMCTRL_LCKDET_TIMER_DEF); + pmctrl |= ALX_PMCTRL_RCVR_WT_1US | + ALX_PMCTRL_L1_CLKSW_EN | + ALX_PMCTRL_L1_SRDSRX_PWD; + ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF); + ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US); + pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN | + ALX_PMCTRL_L1_SRDSPLL_EN | + ALX_PMCTRL_L1_BUFSRX_EN | + ALX_PMCTRL_SADLY_EN | + ALX_PMCTRL_HOTRST_WTEN| + ALX_PMCTRL_L0S_EN | + ALX_PMCTRL_L1_EN | + ALX_PMCTRL_ASPM_FCEN | + ALX_PMCTRL_TXL1_AFTER_L0S | + ALX_PMCTRL_RXL1_AFTER_L0S); + if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) + pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN; + + if (l0s_en) + pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN); + if (l1_en) + pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN); + + alx_write_mem32(hw, ALX_PMCTRL, pmctrl); +} + + +static u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg) +{ + u32 cfg = 0; + + if (ethadv_cfg & ADVERTISED_Autoneg) { + cfg |= ALX_DRV_PHY_AUTO; + if (ethadv_cfg & ADVERTISED_10baseT_Half) + cfg |= ALX_DRV_PHY_10; + if (ethadv_cfg & ADVERTISED_10baseT_Full) + cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX; + if (ethadv_cfg & ADVERTISED_100baseT_Half) + cfg |= ALX_DRV_PHY_100; + if (ethadv_cfg & ADVERTISED_100baseT_Full) + cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX; + if (ethadv_cfg & ADVERTISED_1000baseT_Half) + cfg |= ALX_DRV_PHY_1000; + if (ethadv_cfg & ADVERTISED_1000baseT_Full) + cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX; + if (ethadv_cfg & ADVERTISED_Pause) + cfg |= ADVERTISE_PAUSE_CAP; + if (ethadv_cfg & ADVERTISED_Asym_Pause) + cfg |= ADVERTISE_PAUSE_ASYM; + } else { + switch (ethadv_cfg) { + case ADVERTISED_10baseT_Half: + cfg |= ALX_DRV_PHY_10; + break; + case ADVERTISED_100baseT_Half: + cfg |= ALX_DRV_PHY_100; + break; + case ADVERTISED_10baseT_Full: + cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX; + break; + case ADVERTISED_100baseT_Full: + cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX; + break; + } + } + + return cfg; +} + +int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl) +{ + u16 adv, giga, cr; + u32 val; + int err = 0; + + alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0); + val = alx_read_mem32(hw, ALX_DRV); + ALX_SET_FIELD(val, ALX_DRV_PHY, 0); + + if (ethadv & ADVERTISED_Autoneg) { + adv = ADVERTISE_CSMA; + adv |= ethtool_adv_to_mii_adv_t(ethadv); + + if (flowctrl & ALX_FC_ANEG) { + if (flowctrl & ALX_FC_RX) { + adv |= ADVERTISED_Pause; + if (!(flowctrl & ALX_FC_TX)) + adv |= ADVERTISED_Asym_Pause; + } else if (flowctrl & ALX_FC_TX) { + adv |= ADVERTISED_Asym_Pause; + } + } + giga = 0; + if (alx_hw_giga(hw)) + giga = ethtool_adv_to_mii_ctrl1000_t(ethadv); + + cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART; + + if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) || + alx_write_phy_reg(hw, MII_CTRL1000, giga) || + alx_write_phy_reg(hw, MII_BMCR, cr)) + err = -EBUSY; + } else { + cr = BMCR_RESET; + if (ethadv == ADVERTISED_100baseT_Half || + ethadv == ADVERTISED_100baseT_Full) + cr |= BMCR_SPEED100; + if (ethadv == ADVERTISED_10baseT_Full || + ethadv == ADVERTISED_100baseT_Full) + cr |= BMCR_FULLDPLX; + + err = alx_write_phy_reg(hw, MII_BMCR, cr); + } + + if (!err) { + alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED); + val |= ethadv_to_hw_cfg(hw, ethadv); + } + + alx_write_mem32(hw, ALX_DRV, val); + + return err; +} + + +void alx_post_phy_link(struct alx_hw *hw) +{ + u16 phy_val, len, agc; + u8 revid = alx_hw_revision(hw); + bool adj_th = revid == ALX_REV_B0; + + if (revid != ALX_REV_B0 && !alx_is_rev_a(revid)) + return; + + /* 1000BT/AZ, wrong cable length */ + if (hw->link_speed != SPEED_UNKNOWN) { + alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6, + &phy_val); + len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN); + alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val); + agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA); + + if ((hw->link_speed == SPEED_1000 && + (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G || + (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) || + (hw->link_speed == SPEED_100 && + (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M || + (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) { + alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT, + ALX_AZ_ANADECT_LONG); + alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, + &phy_val); + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, + phy_val | ALX_AFE_10BT_100M_TH); + } else { + alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT, + ALX_AZ_ANADECT_DEF); + alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, + ALX_MIIEXT_AFE, &phy_val); + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, + phy_val & ~ALX_AFE_10BT_100M_TH); + } + + /* threshold adjust */ + if (adj_th && hw->lnk_patch) { + if (hw->link_speed == SPEED_100) { + alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB, + ALX_MSE16DB_UP); + } else if (hw->link_speed == SPEED_1000) { + /* + * Giga link threshold, raise the tolerance of + * noise 50% + */ + alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, + &phy_val); + ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH, + ALX_MSE20DB_TH_HI); + alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, + phy_val); + } + } + } else { + alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, + &phy_val); + alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE, + phy_val & ~ALX_AFE_10BT_100M_TH); + + if (adj_th && hw->lnk_patch) { + alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB, + ALX_MSE16DB_DOWN); + alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val); + ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH, + ALX_MSE20DB_TH_DEF); + alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val); + } + } +} + +bool alx_phy_configured(struct alx_hw *hw) +{ + u32 cfg, hw_cfg; + + cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg); + cfg = ALX_GET_FIELD(cfg, ALX_DRV_PHY); + hw_cfg = alx_get_phy_config(hw); + + if (hw_cfg == ALX_DRV_PHY_UNKNOWN) + return false; + + return cfg == hw_cfg; +} + +int alx_read_phy_link(struct alx_hw *hw) +{ + struct pci_dev *pdev = hw->pdev; + u16 bmsr, giga; + int err; + + err = alx_read_phy_reg(hw, MII_BMSR, &bmsr); + if (err) + return err; + + err = alx_read_phy_reg(hw, MII_BMSR, &bmsr); + if (err) + return err; + + if (!(bmsr & BMSR_LSTATUS)) { + hw->link_speed = SPEED_UNKNOWN; + hw->duplex = DUPLEX_UNKNOWN; + return 0; + } + + /* speed/duplex result is saved in PHY Specific Status Register */ + err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga); + if (err) + return err; + + if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED)) + goto wrong_speed; + + switch (giga & ALX_GIGA_PSSR_SPEED) { + case ALX_GIGA_PSSR_1000MBS: + hw->link_speed = SPEED_1000; + break; + case ALX_GIGA_PSSR_100MBS: + hw->link_speed = SPEED_100; + break; + case ALX_GIGA_PSSR_10MBS: + hw->link_speed = SPEED_10; + break; + default: + goto wrong_speed; + } + + hw->duplex = (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF; + return 0; + +wrong_speed: + dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga); + return -EINVAL; +} + +int alx_clear_phy_intr(struct alx_hw *hw) +{ + u16 isr; + + /* clear interrupt status by reading it */ + return alx_read_phy_reg(hw, ALX_MII_ISR, &isr); +} + +void alx_disable_rss(struct alx_hw *hw) +{ + u32 ctrl = alx_read_mem32(hw, ALX_RXQ0); + + ctrl &= ~ALX_RXQ0_RSS_HASH_EN; + alx_write_mem32(hw, ALX_RXQ0, ctrl); +} + +void alx_configure_basic(struct alx_hw *hw) +{ + u32 val, raw_mtu, max_payload; + u16 val16; + u8 chip_rev = alx_hw_revision(hw); + + alx_set_macaddr(hw, hw->mac_addr); + + alx_write_mem32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL); + + /* idle timeout to switch clk_125M */ + if (chip_rev >= ALX_REV_B0) + alx_write_mem32(hw, ALX_IDLE_DECISN_TIMER, + ALX_IDLE_DECISN_TIMER_DEF); + + alx_write_mem32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL); + + val = alx_read_mem32(hw, ALX_MASTER); + val |= ALX_MASTER_IRQMOD2_EN | + ALX_MASTER_IRQMOD1_EN | + ALX_MASTER_SYSALVTIMER_EN; + alx_write_mem32(hw, ALX_MASTER, val); + alx_write_mem32(hw, ALX_IRQ_MODU_TIMER, + (hw->imt >> 1) << ALX_IRQ_MODU_TIMER1_SHIFT); + /* intr re-trig timeout */ + alx_write_mem32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO); + /* tpd threshold to trig int */ + alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd); + alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt); + + raw_mtu = hw->mtu + ETH_HLEN; + alx_write_mem32(hw, ALX_MTU, raw_mtu + 8); + if (raw_mtu > ALX_MTU_JUMBO_TH) + hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE; + + if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH) + val = (raw_mtu + 8 + 7) >> 3; + else + val = ALX_TXQ1_JUMBO_TSO_TH >> 3; + alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN); + + max_payload = pcie_get_readrq(hw->pdev) >> 8; + /* + * if BIOS had changed the default dma read max length, + * restore it to default value + */ + if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN) + pcie_set_readrq(hw->pdev, 128 << ALX_DEV_CTRL_MAXRRS_MIN); + + val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_TXQ0_TPD_BURSTPREF_SHIFT | + ALX_TXQ0_MODE_ENHANCE | ALX_TXQ0_LSO_8023_EN | + ALX_TXQ0_SUPT_IPOPT | + ALX_TXQ_TXF_BURST_PREF_DEF << ALX_TXQ0_TXF_BURST_PREF_SHIFT; + alx_write_mem32(hw, ALX_TXQ0, val); + val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q1_NUMPREF_SHIFT | + ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q2_NUMPREF_SHIFT | + ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q3_NUMPREF_SHIFT | + ALX_HQTPD_BURST_EN; + alx_write_mem32(hw, ALX_HQTPD, val); + + /* rxq, flow control */ + val = alx_read_mem32(hw, ALX_SRAM5); + val = ALX_GET_FIELD(val, ALX_SRAM_RXF_LEN) << 3; + if (val > ALX_SRAM_RXF_LEN_8K) { + val16 = ALX_MTU_STD_ALGN >> 3; + val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3; + } else { + val16 = ALX_MTU_STD_ALGN >> 3; + val = (val - ALX_MTU_STD_ALGN) >> 3; + } + alx_write_mem32(hw, ALX_RXQ2, + val16 << ALX_RXQ2_RXF_XOFF_THRESH_SHIFT | + val << ALX_RXQ2_RXF_XON_THRESH_SHIFT); + val = ALX_RXQ0_NUM_RFD_PREF_DEF << ALX_RXQ0_NUM_RFD_PREF_SHIFT | + ALX_RXQ0_RSS_MODE_DIS << ALX_RXQ0_RSS_MODE_SHIFT | + ALX_RXQ0_IDT_TBL_SIZE_DEF << ALX_RXQ0_IDT_TBL_SIZE_SHIFT | + ALX_RXQ0_RSS_HSTYP_ALL | ALX_RXQ0_RSS_HASH_EN | + ALX_RXQ0_IPV6_PARSE_EN; + + if (alx_hw_giga(hw)) + ALX_SET_FIELD(val, ALX_RXQ0_ASPM_THRESH, + ALX_RXQ0_ASPM_THRESH_100M); + + alx_write_mem32(hw, ALX_RXQ0, val); + + val = alx_read_mem32(hw, ALX_DMA); + val = ALX_DMA_RORDER_MODE_OUT << ALX_DMA_RORDER_MODE_SHIFT | + ALX_DMA_RREQ_PRI_DATA | + max_payload << ALX_DMA_RREQ_BLEN_SHIFT | + ALX_DMA_WDLY_CNT_DEF << ALX_DMA_WDLY_CNT_SHIFT | + ALX_DMA_RDLY_CNT_DEF << ALX_DMA_RDLY_CNT_SHIFT | + (hw->dma_chnl - 1) << ALX_DMA_RCHNL_SEL_SHIFT; + alx_write_mem32(hw, ALX_DMA, val); + + /* default multi-tx-q weights */ + val = ALX_WRR_PRI_RESTRICT_NONE << ALX_WRR_PRI_SHIFT | + 4 << ALX_WRR_PRI0_SHIFT | + 4 << ALX_WRR_PRI1_SHIFT | + 4 << ALX_WRR_PRI2_SHIFT | + 4 << ALX_WRR_PRI3_SHIFT; + alx_write_mem32(hw, ALX_WRR, val); +} + +bool alx_get_phy_info(struct alx_hw *hw) +{ + u16 devs1, devs2; + + if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) || + alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1])) + return false; + + /* since we haven't PMA/PMD status2 register, we can't + * use mdio45_probe function for prtad and mmds. + * use fixed MMD3 to get mmds. + */ + if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) || + alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2)) + return false; + hw->mdio.mmds = devs1 | devs2 << 16; + + return true; +} + +void alx_update_hw_stats(struct alx_hw *hw) +{ + /* RX stats */ + hw->stats.rx_ok += alx_read_mem32(hw, ALX_MIB_RX_OK); + hw->stats.rx_bcast += alx_read_mem32(hw, ALX_MIB_RX_BCAST); + hw->stats.rx_mcast += alx_read_mem32(hw, ALX_MIB_RX_MCAST); + hw->stats.rx_pause += alx_read_mem32(hw, ALX_MIB_RX_PAUSE); + hw->stats.rx_ctrl += alx_read_mem32(hw, ALX_MIB_RX_CTRL); + hw->stats.rx_fcs_err += alx_read_mem32(hw, ALX_MIB_RX_FCS_ERR); + hw->stats.rx_len_err += alx_read_mem32(hw, ALX_MIB_RX_LEN_ERR); + hw->stats.rx_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_BYTE_CNT); + hw->stats.rx_runt += alx_read_mem32(hw, ALX_MIB_RX_RUNT); + hw->stats.rx_frag += alx_read_mem32(hw, ALX_MIB_RX_FRAG); + hw->stats.rx_sz_64B += alx_read_mem32(hw, ALX_MIB_RX_SZ_64B); + hw->stats.rx_sz_127B += alx_read_mem32(hw, ALX_MIB_RX_SZ_127B); + hw->stats.rx_sz_255B += alx_read_mem32(hw, ALX_MIB_RX_SZ_255B); + hw->stats.rx_sz_511B += alx_read_mem32(hw, ALX_MIB_RX_SZ_511B); + hw->stats.rx_sz_1023B += alx_read_mem32(hw, ALX_MIB_RX_SZ_1023B); + hw->stats.rx_sz_1518B += alx_read_mem32(hw, ALX_MIB_RX_SZ_1518B); + hw->stats.rx_sz_max += alx_read_mem32(hw, ALX_MIB_RX_SZ_MAX); + hw->stats.rx_ov_sz += alx_read_mem32(hw, ALX_MIB_RX_OV_SZ); + hw->stats.rx_ov_rxf += alx_read_mem32(hw, ALX_MIB_RX_OV_RXF); + hw->stats.rx_ov_rrd += alx_read_mem32(hw, ALX_MIB_RX_OV_RRD); + hw->stats.rx_align_err += alx_read_mem32(hw, ALX_MIB_RX_ALIGN_ERR); + hw->stats.rx_bc_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_BCCNT); + hw->stats.rx_mc_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_MCCNT); + hw->stats.rx_err_addr += alx_read_mem32(hw, ALX_MIB_RX_ERRADDR); + + /* TX stats */ + hw->stats.tx_ok += alx_read_mem32(hw, ALX_MIB_TX_OK); + hw->stats.tx_bcast += alx_read_mem32(hw, ALX_MIB_TX_BCAST); + hw->stats.tx_mcast += alx_read_mem32(hw, ALX_MIB_TX_MCAST); + hw->stats.tx_pause += alx_read_mem32(hw, ALX_MIB_TX_PAUSE); + hw->stats.tx_exc_defer += alx_read_mem32(hw, ALX_MIB_TX_EXC_DEFER); + hw->stats.tx_ctrl += alx_read_mem32(hw, ALX_MIB_TX_CTRL); + hw->stats.tx_defer += alx_read_mem32(hw, ALX_MIB_TX_DEFER); + hw->stats.tx_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_BYTE_CNT); + hw->stats.tx_sz_64B += alx_read_mem32(hw, ALX_MIB_TX_SZ_64B); + hw->stats.tx_sz_127B += alx_read_mem32(hw, ALX_MIB_TX_SZ_127B); + hw->stats.tx_sz_255B += alx_read_mem32(hw, ALX_MIB_TX_SZ_255B); + hw->stats.tx_sz_511B += alx_read_mem32(hw, ALX_MIB_TX_SZ_511B); + hw->stats.tx_sz_1023B += alx_read_mem32(hw, ALX_MIB_TX_SZ_1023B); + hw->stats.tx_sz_1518B += alx_read_mem32(hw, ALX_MIB_TX_SZ_1518B); + hw->stats.tx_sz_max += alx_read_mem32(hw, ALX_MIB_TX_SZ_MAX); + hw->stats.tx_single_col += alx_read_mem32(hw, ALX_MIB_TX_SINGLE_COL); + hw->stats.tx_multi_col += alx_read_mem32(hw, ALX_MIB_TX_MULTI_COL); + hw->stats.tx_late_col += alx_read_mem32(hw, ALX_MIB_TX_LATE_COL); + hw->stats.tx_abort_col += alx_read_mem32(hw, ALX_MIB_TX_ABORT_COL); + hw->stats.tx_underrun += alx_read_mem32(hw, ALX_MIB_TX_UNDERRUN); + hw->stats.tx_trd_eop += alx_read_mem32(hw, ALX_MIB_TX_TRD_EOP); + hw->stats.tx_len_err += alx_read_mem32(hw, ALX_MIB_TX_LEN_ERR); + hw->stats.tx_trunc += alx_read_mem32(hw, ALX_MIB_TX_TRUNC); + hw->stats.tx_bc_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_BCCNT); + hw->stats.tx_mc_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_MCCNT); + + hw->stats.update += alx_read_mem32(hw, ALX_MIB_UPDATE); +} diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h new file mode 100644 index 000000000..15548802d --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/hw.h @@ -0,0 +1,581 @@ +/* + * Copyright (c) 2013 Johannes Berg + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef ALX_HW_H_ +#define ALX_HW_H_ +#include +#include +#include +#include "reg.h" + +/* Transmit Packet Descriptor, contains 4 32-bit words. + * + * 31 16 0 + * +----------------+----------------+ + * | vlan-tag | buf length | + * +----------------+----------------+ + * | Word 1 | + * +----------------+----------------+ + * | Word 2: buf addr lo | + * +----------------+----------------+ + * | Word 3: buf addr hi | + * +----------------+----------------+ + * + * Word 2 and 3 combine to form a 64-bit buffer address + * + * Word 1 has three forms, depending on the state of bit 8/12/13: + * if bit8 =='1', the definition is just for custom checksum offload. + * if bit8 == '0' && bit12 == '1' && bit13 == '1', the *FIRST* descriptor + * for the skb is special for LSO V2, Word 2 become total skb length , + * Word 3 is meaningless. + * other condition, the definition is for general skb or ip/tcp/udp + * checksum or LSO(TSO) offload. + * + * Here is the depiction: + * + * 0-+ 0-+ + * 1 | 1 | + * 2 | 2 | + * 3 | Payload offset 3 | L4 header offset + * 4 | (7:0) 4 | (7:0) + * 5 | 5 | + * 6 | 6 | + * 7-+ 7-+ + * 8 Custom csum enable = 1 8 Custom csum enable = 0 + * 9 General IPv4 checksum 9 General IPv4 checksum + * 10 General TCP checksum 10 General TCP checksum + * 11 General UDP checksum 11 General UDP checksum + * 12 Large Send Segment enable 12 Large Send Segment enable + * 13 Large Send Segment type 13 Large Send Segment type + * 14 VLAN tagged 14 VLAN tagged + * 15 Insert VLAN tag 15 Insert VLAN tag + * 16 IPv4 packet 16 IPv4 packet + * 17 Ethernet frame type 17 Ethernet frame type + * 18-+ 18-+ + * 19 | 19 | + * 20 | 20 | + * 21 | Custom csum offset 21 | + * 22 | (25:18) 22 | + * 23 | 23 | MSS (30:18) + * 24 | 24 | + * 25-+ 25 | + * 26-+ 26 | + * 27 | 27 | + * 28 | Reserved 28 | + * 29 | 29 | + * 30-+ 30-+ + * 31 End of packet 31 End of packet + */ +struct alx_txd { + __le16 len; + __le16 vlan_tag; + __le32 word1; + union { + __le64 addr; + struct { + __le32 pkt_len; + __le32 resvd; + } l; + } adrl; +} __packed; + +/* tpd word 1 */ +#define TPD_CXSUMSTART_MASK 0x00FF +#define TPD_CXSUMSTART_SHIFT 0 +#define TPD_L4HDROFFSET_MASK 0x00FF +#define TPD_L4HDROFFSET_SHIFT 0 +#define TPD_CXSUM_EN_MASK 0x0001 +#define TPD_CXSUM_EN_SHIFT 8 +#define TPD_IP_XSUM_MASK 0x0001 +#define TPD_IP_XSUM_SHIFT 9 +#define TPD_TCP_XSUM_MASK 0x0001 +#define TPD_TCP_XSUM_SHIFT 10 +#define TPD_UDP_XSUM_MASK 0x0001 +#define TPD_UDP_XSUM_SHIFT 11 +#define TPD_LSO_EN_MASK 0x0001 +#define TPD_LSO_EN_SHIFT 12 +#define TPD_LSO_V2_MASK 0x0001 +#define TPD_LSO_V2_SHIFT 13 +#define TPD_VLTAGGED_MASK 0x0001 +#define TPD_VLTAGGED_SHIFT 14 +#define TPD_INS_VLTAG_MASK 0x0001 +#define TPD_INS_VLTAG_SHIFT 15 +#define TPD_IPV4_MASK 0x0001 +#define TPD_IPV4_SHIFT 16 +#define TPD_ETHTYPE_MASK 0x0001 +#define TPD_ETHTYPE_SHIFT 17 +#define TPD_CXSUMOFFSET_MASK 0x00FF +#define TPD_CXSUMOFFSET_SHIFT 18 +#define TPD_MSS_MASK 0x1FFF +#define TPD_MSS_SHIFT 18 +#define TPD_EOP_MASK 0x0001 +#define TPD_EOP_SHIFT 31 + +#define DESC_GET(_x, _name) ((_x) >> _name##SHIFT & _name##MASK) + +/* Receive Free Descriptor */ +struct alx_rfd { + __le64 addr; /* data buffer address, length is + * declared in register --- every + * buffer has the same size + */ +} __packed; + +/* Receive Return Descriptor, contains 4 32-bit words. + * + * 31 16 0 + * +----------------+----------------+ + * | Word 0 | + * +----------------+----------------+ + * | Word 1: RSS Hash value | + * +----------------+----------------+ + * | Word 2 | + * +----------------+----------------+ + * | Word 3 | + * +----------------+----------------+ + * + * Word 0 depiction & Word 2 depiction: + * + * 0--+ 0--+ + * 1 | 1 | + * 2 | 2 | + * 3 | 3 | + * 4 | 4 | + * 5 | 5 | + * 6 | 6 | + * 7 | IP payload checksum 7 | VLAN tag + * 8 | (15:0) 8 | (15:0) + * 9 | 9 | + * 10 | 10 | + * 11 | 11 | + * 12 | 12 | + * 13 | 13 | + * 14 | 14 | + * 15-+ 15-+ + * 16-+ 16-+ + * 17 | Number of RFDs 17 | + * 18 | (19:16) 18 | + * 19-+ 19 | Protocol ID + * 20-+ 20 | (23:16) + * 21 | 21 | + * 22 | 22 | + * 23 | 23-+ + * 24 | 24 | Reserved + * 25 | Start index of RFD-ring 25-+ + * 26 | (31:20) 26 | RSS Q-num (27:25) + * 27 | 27-+ + * 28 | 28-+ + * 29 | 29 | RSS Hash algorithm + * 30 | 30 | (31:28) + * 31-+ 31-+ + * + * Word 3 depiction: + * + * 0--+ + * 1 | + * 2 | + * 3 | + * 4 | + * 5 | + * 6 | + * 7 | Packet length (include FCS) + * 8 | (13:0) + * 9 | + * 10 | + * 11 | + * 12 | + * 13-+ + * 14 L4 Header checksum error + * 15 IPv4 checksum error + * 16 VLAN tagged + * 17-+ + * 18 | Protocol ID (19:17) + * 19-+ + * 20 Receive error summary + * 21 FCS(CRC) error + * 22 Frame alignment error + * 23 Truncated packet + * 24 Runt packet + * 25 Incomplete packet due to insufficient rx-desc + * 26 Broadcast packet + * 27 Multicast packet + * 28 Ethernet type (EII or 802.3) + * 29 FIFO overflow + * 30 Length error (for 802.3, length field mismatch with actual len) + * 31 Updated, indicate to driver that this RRD is refreshed. + */ +struct alx_rrd { + __le32 word0; + __le32 rss_hash; + __le32 word2; + __le32 word3; +} __packed; + +/* rrd word 0 */ +#define RRD_XSUM_MASK 0xFFFF +#define RRD_XSUM_SHIFT 0 +#define RRD_NOR_MASK 0x000F +#define RRD_NOR_SHIFT 16 +#define RRD_SI_MASK 0x0FFF +#define RRD_SI_SHIFT 20 + +/* rrd word 2 */ +#define RRD_VLTAG_MASK 0xFFFF +#define RRD_VLTAG_SHIFT 0 +#define RRD_PID_MASK 0x00FF +#define RRD_PID_SHIFT 16 +/* non-ip packet */ +#define RRD_PID_NONIP 0 +/* ipv4(only) */ +#define RRD_PID_IPV4 1 +/* tcp/ipv6 */ +#define RRD_PID_IPV6TCP 2 +/* tcp/ipv4 */ +#define RRD_PID_IPV4TCP 3 +/* udp/ipv6 */ +#define RRD_PID_IPV6UDP 4 +/* udp/ipv4 */ +#define RRD_PID_IPV4UDP 5 +/* ipv6(only) */ +#define RRD_PID_IPV6 6 +/* LLDP packet */ +#define RRD_PID_LLDP 7 +/* 1588 packet */ +#define RRD_PID_1588 8 +#define RRD_RSSQ_MASK 0x0007 +#define RRD_RSSQ_SHIFT 25 +#define RRD_RSSALG_MASK 0x000F +#define RRD_RSSALG_SHIFT 28 +#define RRD_RSSALG_TCPV6 0x1 +#define RRD_RSSALG_IPV6 0x2 +#define RRD_RSSALG_TCPV4 0x4 +#define RRD_RSSALG_IPV4 0x8 + +/* rrd word 3 */ +#define RRD_PKTLEN_MASK 0x3FFF +#define RRD_PKTLEN_SHIFT 0 +#define RRD_ERR_L4_MASK 0x0001 +#define RRD_ERR_L4_SHIFT 14 +#define RRD_ERR_IPV4_MASK 0x0001 +#define RRD_ERR_IPV4_SHIFT 15 +#define RRD_VLTAGGED_MASK 0x0001 +#define RRD_VLTAGGED_SHIFT 16 +#define RRD_OLD_PID_MASK 0x0007 +#define RRD_OLD_PID_SHIFT 17 +#define RRD_ERR_RES_MASK 0x0001 +#define RRD_ERR_RES_SHIFT 20 +#define RRD_ERR_FCS_MASK 0x0001 +#define RRD_ERR_FCS_SHIFT 21 +#define RRD_ERR_FAE_MASK 0x0001 +#define RRD_ERR_FAE_SHIFT 22 +#define RRD_ERR_TRUNC_MASK 0x0001 +#define RRD_ERR_TRUNC_SHIFT 23 +#define RRD_ERR_RUNT_MASK 0x0001 +#define RRD_ERR_RUNT_SHIFT 24 +#define RRD_ERR_ICMP_MASK 0x0001 +#define RRD_ERR_ICMP_SHIFT 25 +#define RRD_BCAST_MASK 0x0001 +#define RRD_BCAST_SHIFT 26 +#define RRD_MCAST_MASK 0x0001 +#define RRD_MCAST_SHIFT 27 +#define RRD_ETHTYPE_MASK 0x0001 +#define RRD_ETHTYPE_SHIFT 28 +#define RRD_ERR_FIFOV_MASK 0x0001 +#define RRD_ERR_FIFOV_SHIFT 29 +#define RRD_ERR_LEN_MASK 0x0001 +#define RRD_ERR_LEN_SHIFT 30 +#define RRD_UPDATED_MASK 0x0001 +#define RRD_UPDATED_SHIFT 31 + + +#define ALX_MAX_SETUP_LNK_CYCLE 50 + +/* for FlowControl */ +#define ALX_FC_RX 0x01 +#define ALX_FC_TX 0x02 +#define ALX_FC_ANEG 0x04 + +/* for sleep control */ +#define ALX_SLEEP_WOL_PHY 0x00000001 +#define ALX_SLEEP_WOL_MAGIC 0x00000002 +#define ALX_SLEEP_CIFS 0x00000004 +#define ALX_SLEEP_ACTIVE (ALX_SLEEP_WOL_PHY | \ + ALX_SLEEP_WOL_MAGIC | \ + ALX_SLEEP_CIFS) + +/* for RSS hash type */ +#define ALX_RSS_HASH_TYPE_IPV4 0x1 +#define ALX_RSS_HASH_TYPE_IPV4_TCP 0x2 +#define ALX_RSS_HASH_TYPE_IPV6 0x4 +#define ALX_RSS_HASH_TYPE_IPV6_TCP 0x8 +#define ALX_RSS_HASH_TYPE_ALL (ALX_RSS_HASH_TYPE_IPV4 | \ + ALX_RSS_HASH_TYPE_IPV4_TCP | \ + ALX_RSS_HASH_TYPE_IPV6 | \ + ALX_RSS_HASH_TYPE_IPV6_TCP) +#define ALX_DEF_RXBUF_SIZE 1536 +#define ALX_MAX_JUMBO_PKT_SIZE (9*1024) +#define ALX_MAX_TSO_PKT_SIZE (7*1024) +#define ALX_MAX_FRAME_SIZE ALX_MAX_JUMBO_PKT_SIZE +#define ALX_MIN_FRAME_SIZE 68 +#define ALX_RAW_MTU(_mtu) (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) + +#define ALX_MAX_RX_QUEUES 8 +#define ALX_MAX_TX_QUEUES 4 +#define ALX_MAX_HANDLED_INTRS 5 + +#define ALX_ISR_MISC (ALX_ISR_PCIE_LNKDOWN | \ + ALX_ISR_DMAW | \ + ALX_ISR_DMAR | \ + ALX_ISR_SMB | \ + ALX_ISR_MANU | \ + ALX_ISR_TIMER) + +#define ALX_ISR_FATAL (ALX_ISR_PCIE_LNKDOWN | \ + ALX_ISR_DMAW | ALX_ISR_DMAR) + +#define ALX_ISR_ALERT (ALX_ISR_RXF_OV | \ + ALX_ISR_TXF_UR | \ + ALX_ISR_RFD_UR) + +#define ALX_ISR_ALL_QUEUES (ALX_ISR_TX_Q0 | \ + ALX_ISR_TX_Q1 | \ + ALX_ISR_TX_Q2 | \ + ALX_ISR_TX_Q3 | \ + ALX_ISR_RX_Q0 | \ + ALX_ISR_RX_Q1 | \ + ALX_ISR_RX_Q2 | \ + ALX_ISR_RX_Q3 | \ + ALX_ISR_RX_Q4 | \ + ALX_ISR_RX_Q5 | \ + ALX_ISR_RX_Q6 | \ + ALX_ISR_RX_Q7) + +/* Statistics counters collected by the MAC + * + * The order of the fields must match the strings in alx_gstrings_stats + * All stats fields should be u64 + * See ethtool.c + */ +struct alx_hw_stats { + /* rx */ + u64 rx_ok; /* good RX packets */ + u64 rx_bcast; /* good RX broadcast packets */ + u64 rx_mcast; /* good RX multicast packets */ + u64 rx_pause; /* RX pause frames */ + u64 rx_ctrl; /* RX control packets other than pause frames */ + u64 rx_fcs_err; /* RX packets with bad FCS */ + u64 rx_len_err; /* RX packets with length != actual size */ + u64 rx_byte_cnt; /* good bytes received. FCS is NOT included */ + u64 rx_runt; /* RX packets < 64 bytes with good FCS */ + u64 rx_frag; /* RX packets < 64 bytes with bad FCS */ + u64 rx_sz_64B; /* 64 byte RX packets */ + u64 rx_sz_127B; /* 65-127 byte RX packets */ + u64 rx_sz_255B; /* 128-255 byte RX packets */ + u64 rx_sz_511B; /* 256-511 byte RX packets */ + u64 rx_sz_1023B; /* 512-1023 byte RX packets */ + u64 rx_sz_1518B; /* 1024-1518 byte RX packets */ + u64 rx_sz_max; /* 1519 byte to MTU RX packets */ + u64 rx_ov_sz; /* truncated RX packets, size > MTU */ + u64 rx_ov_rxf; /* frames dropped due to RX FIFO overflow */ + u64 rx_ov_rrd; /* frames dropped due to RRD overflow */ + u64 rx_align_err; /* alignment errors */ + u64 rx_bc_byte_cnt; /* RX broadcast bytes, excluding FCS */ + u64 rx_mc_byte_cnt; /* RX multicast bytes, excluding FCS */ + u64 rx_err_addr; /* packets dropped due to address filtering */ + + /* tx */ + u64 tx_ok; /* good TX packets */ + u64 tx_bcast; /* good TX broadcast packets */ + u64 tx_mcast; /* good TX multicast packets */ + u64 tx_pause; /* TX pause frames */ + u64 tx_exc_defer; /* TX packets deferred excessively */ + u64 tx_ctrl; /* TX control frames, excluding pause frames */ + u64 tx_defer; /* TX packets deferred */ + u64 tx_byte_cnt; /* bytes transmitted, FCS is NOT included */ + u64 tx_sz_64B; /* 64 byte TX packets */ + u64 tx_sz_127B; /* 65-127 byte TX packets */ + u64 tx_sz_255B; /* 128-255 byte TX packets */ + u64 tx_sz_511B; /* 256-511 byte TX packets */ + u64 tx_sz_1023B; /* 512-1023 byte TX packets */ + u64 tx_sz_1518B; /* 1024-1518 byte TX packets */ + u64 tx_sz_max; /* 1519 byte to MTU TX packets */ + u64 tx_single_col; /* packets TX after a single collision */ + u64 tx_multi_col; /* packets TX after multiple collisions */ + u64 tx_late_col; /* TX packets with late collisions */ + u64 tx_abort_col; /* TX packets aborted w/excessive collisions */ + u64 tx_underrun; /* TX packets aborted due to TX FIFO underrun + * or TRD FIFO underrun + */ + u64 tx_trd_eop; /* reads beyond the EOP into the next frame + * when TRD was not written timely + */ + u64 tx_len_err; /* TX packets where length != actual size */ + u64 tx_trunc; /* TX packets truncated due to size > MTU */ + u64 tx_bc_byte_cnt; /* broadcast bytes transmitted, excluding FCS */ + u64 tx_mc_byte_cnt; /* multicast bytes transmitted, excluding FCS */ + u64 update; +}; + + +/* maximum interrupt vectors for msix */ +#define ALX_MAX_MSIX_INTRS 16 + +#define ALX_GET_FIELD(_data, _field) \ + (((_data) >> _field ## _SHIFT) & _field ## _MASK) + +#define ALX_SET_FIELD(_data, _field, _value) do { \ + (_data) &= ~(_field ## _MASK << _field ## _SHIFT); \ + (_data) |= ((_value) & _field ## _MASK) << _field ## _SHIFT;\ + } while (0) + +struct alx_hw { + struct pci_dev *pdev; + u8 __iomem *hw_addr; + + /* current & permanent mac addr */ + u8 mac_addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + + u16 mtu; + u16 imt; + u8 dma_chnl; + u8 max_dma_chnl; + /* tpd threshold to trig INT */ + u32 ith_tpd; + u32 rx_ctrl; + u32 mc_hash[2]; + + u32 smb_timer; + /* SPEED_* + DUPLEX_*, SPEED_UNKNOWN if link is down */ + int link_speed; + u8 duplex; + + /* auto-neg advertisement or force mode config */ + u8 flowctrl; + u32 adv_cfg; + + spinlock_t mdio_lock; + struct mdio_if_info mdio; + u16 phy_id[2]; + + /* PHY link patch flag */ + bool lnk_patch; + + /* cumulated stats from the hardware (registers are cleared on read) */ + struct alx_hw_stats stats; +}; + +static inline int alx_hw_revision(struct alx_hw *hw) +{ + return hw->pdev->revision >> ALX_PCI_REVID_SHIFT; +} + +static inline bool alx_hw_with_cr(struct alx_hw *hw) +{ + return hw->pdev->revision & 1; +} + +static inline bool alx_hw_giga(struct alx_hw *hw) +{ + return hw->pdev->device & 1; +} + +static inline void alx_write_mem8(struct alx_hw *hw, u32 reg, u8 val) +{ + writeb(val, hw->hw_addr + reg); +} + +static inline void alx_write_mem16(struct alx_hw *hw, u32 reg, u16 val) +{ + writew(val, hw->hw_addr + reg); +} + +static inline u16 alx_read_mem16(struct alx_hw *hw, u32 reg) +{ + return readw(hw->hw_addr + reg); +} + +static inline void alx_write_mem32(struct alx_hw *hw, u32 reg, u32 val) +{ + writel(val, hw->hw_addr + reg); +} + +static inline u32 alx_read_mem32(struct alx_hw *hw, u32 reg) +{ + return readl(hw->hw_addr + reg); +} + +static inline void alx_post_write(struct alx_hw *hw) +{ + readl(hw->hw_addr); +} + +int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr); +void alx_reset_phy(struct alx_hw *hw); +void alx_reset_pcie(struct alx_hw *hw); +void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en); +int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl); +void alx_post_phy_link(struct alx_hw *hw); +int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data); +int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data); +int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata); +int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data); +int alx_read_phy_link(struct alx_hw *hw); +int alx_clear_phy_intr(struct alx_hw *hw); +void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc); +void alx_start_mac(struct alx_hw *hw); +int alx_reset_mac(struct alx_hw *hw); +void alx_set_macaddr(struct alx_hw *hw, const u8 *addr); +bool alx_phy_configured(struct alx_hw *hw); +void alx_configure_basic(struct alx_hw *hw); +void alx_disable_rss(struct alx_hw *hw); +bool alx_get_phy_info(struct alx_hw *hw); +void alx_update_hw_stats(struct alx_hw *hw); + +static inline u32 alx_speed_to_ethadv(int speed, u8 duplex) +{ + if (speed == SPEED_1000 && duplex == DUPLEX_FULL) + return ADVERTISED_1000baseT_Full; + if (speed == SPEED_100 && duplex == DUPLEX_FULL) + return ADVERTISED_100baseT_Full; + if (speed == SPEED_100 && duplex== DUPLEX_HALF) + return ADVERTISED_100baseT_Half; + if (speed == SPEED_10 && duplex == DUPLEX_FULL) + return ADVERTISED_10baseT_Full; + if (speed == SPEED_10 && duplex == DUPLEX_HALF) + return ADVERTISED_10baseT_Half; + return 0; +} + +#endif diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c new file mode 100644 index 000000000..c8af3ce3e --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -0,0 +1,1559 @@ +/* + * Copyright (c) 2013 Johannes Berg + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "alx.h" +#include "hw.h" +#include "reg.h" + +const char alx_drv_name[] = "alx"; + + +static void alx_free_txbuf(struct alx_priv *alx, int entry) +{ + struct alx_buffer *txb = &alx->txq.bufs[entry]; + + if (dma_unmap_len(txb, size)) { + dma_unmap_single(&alx->hw.pdev->dev, + dma_unmap_addr(txb, dma), + dma_unmap_len(txb, size), + DMA_TO_DEVICE); + dma_unmap_len_set(txb, size, 0); + } + + if (txb->skb) { + dev_kfree_skb_any(txb->skb); + txb->skb = NULL; + } +} + +static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) +{ + struct alx_rx_queue *rxq = &alx->rxq; + struct sk_buff *skb; + struct alx_buffer *cur_buf; + dma_addr_t dma; + u16 cur, next, count = 0; + + next = cur = rxq->write_idx; + if (++next == alx->rx_ringsz) + next = 0; + cur_buf = &rxq->bufs[cur]; + + while (!cur_buf->skb && next != rxq->read_idx) { + struct alx_rfd *rfd = &rxq->rfd[cur]; + + skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp); + if (!skb) + break; + dma = dma_map_single(&alx->hw.pdev->dev, + skb->data, alx->rxbuf_size, + DMA_FROM_DEVICE); + if (dma_mapping_error(&alx->hw.pdev->dev, dma)) { + dev_kfree_skb(skb); + break; + } + + /* Unfortunately, RX descriptor buffers must be 4-byte + * aligned, so we can't use IP alignment. + */ + if (WARN_ON(dma & 3)) { + dev_kfree_skb(skb); + break; + } + + cur_buf->skb = skb; + dma_unmap_len_set(cur_buf, size, alx->rxbuf_size); + dma_unmap_addr_set(cur_buf, dma, dma); + rfd->addr = cpu_to_le64(dma); + + cur = next; + if (++next == alx->rx_ringsz) + next = 0; + cur_buf = &rxq->bufs[cur]; + count++; + } + + if (count) { + /* flush all updates before updating hardware */ + wmb(); + rxq->write_idx = cur; + alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); + } + + return count; +} + +static inline int alx_tpd_avail(struct alx_priv *alx) +{ + struct alx_tx_queue *txq = &alx->txq; + + if (txq->write_idx >= txq->read_idx) + return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1; + return txq->read_idx - txq->write_idx - 1; +} + +static bool alx_clean_tx_irq(struct alx_priv *alx) +{ + struct alx_tx_queue *txq = &alx->txq; + u16 hw_read_idx, sw_read_idx; + unsigned int total_bytes = 0, total_packets = 0; + int budget = ALX_DEFAULT_TX_WORK; + + sw_read_idx = txq->read_idx; + hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX); + + if (sw_read_idx != hw_read_idx) { + while (sw_read_idx != hw_read_idx && budget > 0) { + struct sk_buff *skb; + + skb = txq->bufs[sw_read_idx].skb; + if (skb) { + total_bytes += skb->len; + total_packets++; + budget--; + } + + alx_free_txbuf(alx, sw_read_idx); + + if (++sw_read_idx == alx->tx_ringsz) + sw_read_idx = 0; + } + txq->read_idx = sw_read_idx; + + netdev_completed_queue(alx->dev, total_packets, total_bytes); + } + + if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) && + alx_tpd_avail(alx) > alx->tx_ringsz/4) + netif_wake_queue(alx->dev); + + return sw_read_idx == hw_read_idx; +} + +static void alx_schedule_link_check(struct alx_priv *alx) +{ + schedule_work(&alx->link_check_wk); +} + +static void alx_schedule_reset(struct alx_priv *alx) +{ + schedule_work(&alx->reset_wk); +} + +static int alx_clean_rx_irq(struct alx_priv *alx, int budget) +{ + struct alx_rx_queue *rxq = &alx->rxq; + struct alx_rrd *rrd; + struct alx_buffer *rxb; + struct sk_buff *skb; + u16 length, rfd_cleaned = 0; + int work = 0; + + while (work < budget) { + rrd = &rxq->rrd[rxq->rrd_read_idx]; + if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) + break; + rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT); + + if (ALX_GET_FIELD(le32_to_cpu(rrd->word0), + RRD_SI) != rxq->read_idx || + ALX_GET_FIELD(le32_to_cpu(rrd->word0), + RRD_NOR) != 1) { + alx_schedule_reset(alx); + return work; + } + + rxb = &rxq->bufs[rxq->read_idx]; + dma_unmap_single(&alx->hw.pdev->dev, + dma_unmap_addr(rxb, dma), + dma_unmap_len(rxb, size), + DMA_FROM_DEVICE); + dma_unmap_len_set(rxb, size, 0); + skb = rxb->skb; + rxb->skb = NULL; + + if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) || + rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) { + rrd->word3 = 0; + dev_kfree_skb_any(skb); + goto next_pkt; + } + + length = ALX_GET_FIELD(le32_to_cpu(rrd->word3), + RRD_PKTLEN) - ETH_FCS_LEN; + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, alx->dev); + + skb_checksum_none_assert(skb); + if (alx->dev->features & NETIF_F_RXCSUM && + !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) | + cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) { + switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2), + RRD_PID)) { + case RRD_PID_IPV6UDP: + case RRD_PID_IPV4UDP: + case RRD_PID_IPV4TCP: + case RRD_PID_IPV6TCP: + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + } + } + + napi_gro_receive(&alx->napi, skb); + work++; + +next_pkt: + if (++rxq->read_idx == alx->rx_ringsz) + rxq->read_idx = 0; + if (++rxq->rrd_read_idx == alx->rx_ringsz) + rxq->rrd_read_idx = 0; + + if (++rfd_cleaned > ALX_RX_ALLOC_THRESH) + rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC); + } + + if (rfd_cleaned) + alx_refill_rx_ring(alx, GFP_ATOMIC); + + return work; +} + +static int alx_poll(struct napi_struct *napi, int budget) +{ + struct alx_priv *alx = container_of(napi, struct alx_priv, napi); + struct alx_hw *hw = &alx->hw; + unsigned long flags; + bool tx_complete; + int work; + + tx_complete = alx_clean_tx_irq(alx); + work = alx_clean_rx_irq(alx, budget); + + if (!tx_complete || work == budget) + return budget; + + napi_complete(&alx->napi); + + /* enable interrupt */ + spin_lock_irqsave(&alx->irq_lock, flags); + alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; + alx_write_mem32(hw, ALX_IMR, alx->int_mask); + spin_unlock_irqrestore(&alx->irq_lock, flags); + + alx_post_write(hw); + + return work; +} + +static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr) +{ + struct alx_hw *hw = &alx->hw; + bool write_int_mask = false; + + spin_lock(&alx->irq_lock); + + /* ACK interrupt */ + alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS); + intr &= alx->int_mask; + + if (intr & ALX_ISR_FATAL) { + netif_warn(alx, hw, alx->dev, + "fatal interrupt 0x%x, resetting\n", intr); + alx_schedule_reset(alx); + goto out; + } + + if (intr & ALX_ISR_ALERT) + netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr); + + if (intr & ALX_ISR_PHY) { + /* suppress PHY interrupt, because the source + * is from PHY internal. only the internal status + * is cleared, the interrupt status could be cleared. + */ + alx->int_mask &= ~ALX_ISR_PHY; + write_int_mask = true; + alx_schedule_link_check(alx); + } + + if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) { + napi_schedule(&alx->napi); + /* mask rx/tx interrupt, enable them when napi complete */ + alx->int_mask &= ~ALX_ISR_ALL_QUEUES; + write_int_mask = true; + } + + if (write_int_mask) + alx_write_mem32(hw, ALX_IMR, alx->int_mask); + + alx_write_mem32(hw, ALX_ISR, 0); + + out: + spin_unlock(&alx->irq_lock); + return IRQ_HANDLED; +} + +static irqreturn_t alx_intr_msi(int irq, void *data) +{ + struct alx_priv *alx = data; + + return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR)); +} + +static irqreturn_t alx_intr_legacy(int irq, void *data) +{ + struct alx_priv *alx = data; + struct alx_hw *hw = &alx->hw; + u32 intr; + + intr = alx_read_mem32(hw, ALX_ISR); + + if (intr & ALX_ISR_DIS || !(intr & alx->int_mask)) + return IRQ_NONE; + + return alx_intr_handle(alx, intr); +} + +static void alx_init_ring_ptrs(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + u32 addr_hi = ((u64)alx->descmem.dma) >> 32; + + alx->rxq.read_idx = 0; + alx->rxq.write_idx = 0; + alx->rxq.rrd_read_idx = 0; + alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi); + alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma); + alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz); + alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma); + alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz); + alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size); + + alx->txq.read_idx = 0; + alx->txq.write_idx = 0; + alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi); + alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma); + alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz); + + /* load these pointers into the chip */ + alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR); +} + +static void alx_free_txring_buf(struct alx_priv *alx) +{ + struct alx_tx_queue *txq = &alx->txq; + int i; + + if (!txq->bufs) + return; + + for (i = 0; i < alx->tx_ringsz; i++) + alx_free_txbuf(alx, i); + + memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer)); + memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd)); + txq->write_idx = 0; + txq->read_idx = 0; + + netdev_reset_queue(alx->dev); +} + +static void alx_free_rxring_buf(struct alx_priv *alx) +{ + struct alx_rx_queue *rxq = &alx->rxq; + struct alx_buffer *cur_buf; + u16 i; + + if (rxq == NULL) + return; + + for (i = 0; i < alx->rx_ringsz; i++) { + cur_buf = rxq->bufs + i; + if (cur_buf->skb) { + dma_unmap_single(&alx->hw.pdev->dev, + dma_unmap_addr(cur_buf, dma), + dma_unmap_len(cur_buf, size), + DMA_FROM_DEVICE); + dev_kfree_skb(cur_buf->skb); + cur_buf->skb = NULL; + dma_unmap_len_set(cur_buf, size, 0); + dma_unmap_addr_set(cur_buf, dma, 0); + } + } + + rxq->write_idx = 0; + rxq->read_idx = 0; + rxq->rrd_read_idx = 0; +} + +static void alx_free_buffers(struct alx_priv *alx) +{ + alx_free_txring_buf(alx); + alx_free_rxring_buf(alx); +} + +static int alx_reinit_rings(struct alx_priv *alx) +{ + alx_free_buffers(alx); + + alx_init_ring_ptrs(alx); + + if (!alx_refill_rx_ring(alx, GFP_KERNEL)) + return -ENOMEM; + + return 0; +} + +static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash) +{ + u32 crc32, bit, reg; + + crc32 = ether_crc(ETH_ALEN, addr); + reg = (crc32 >> 31) & 0x1; + bit = (crc32 >> 26) & 0x1F; + + mc_hash[reg] |= BIT(bit); +} + +static void __alx_set_rx_mode(struct net_device *netdev) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + struct netdev_hw_addr *ha; + u32 mc_hash[2] = {}; + + if (!(netdev->flags & IFF_ALLMULTI)) { + netdev_for_each_mc_addr(ha, netdev) + alx_add_mc_addr(hw, ha->addr, mc_hash); + + alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]); + alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]); + } + + hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN); + if (netdev->flags & IFF_PROMISC) + hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN; + if (netdev->flags & IFF_ALLMULTI) + hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN; + + alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); +} + +static void alx_set_rx_mode(struct net_device *netdev) +{ + __alx_set_rx_mode(netdev); +} + +static int alx_set_mac_address(struct net_device *netdev, void *data) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + struct sockaddr *addr = data; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (netdev->addr_assign_type & NET_ADDR_RANDOM) + netdev->addr_assign_type ^= NET_ADDR_RANDOM; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); + alx_set_macaddr(hw, hw->mac_addr); + + return 0; +} + +static int alx_alloc_descriptors(struct alx_priv *alx) +{ + alx->txq.bufs = kcalloc(alx->tx_ringsz, + sizeof(struct alx_buffer), + GFP_KERNEL); + if (!alx->txq.bufs) + return -ENOMEM; + + alx->rxq.bufs = kcalloc(alx->rx_ringsz, + sizeof(struct alx_buffer), + GFP_KERNEL); + if (!alx->rxq.bufs) + goto out_free; + + /* physical tx/rx ring descriptors + * + * Allocate them as a single chunk because they must not cross a + * 4G boundary (hardware has a single register for high 32 bits + * of addresses only) + */ + alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz + + sizeof(struct alx_rrd) * alx->rx_ringsz + + sizeof(struct alx_rfd) * alx->rx_ringsz; + alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev, + alx->descmem.size, + &alx->descmem.dma, + GFP_KERNEL); + if (!alx->descmem.virt) + goto out_free; + + alx->txq.tpd = alx->descmem.virt; + alx->txq.tpd_dma = alx->descmem.dma; + + /* alignment requirement for next block */ + BUILD_BUG_ON(sizeof(struct alx_txd) % 8); + + alx->rxq.rrd = + (void *)((u8 *)alx->descmem.virt + + sizeof(struct alx_txd) * alx->tx_ringsz); + alx->rxq.rrd_dma = alx->descmem.dma + + sizeof(struct alx_txd) * alx->tx_ringsz; + + /* alignment requirement for next block */ + BUILD_BUG_ON(sizeof(struct alx_rrd) % 8); + + alx->rxq.rfd = + (void *)((u8 *)alx->descmem.virt + + sizeof(struct alx_txd) * alx->tx_ringsz + + sizeof(struct alx_rrd) * alx->rx_ringsz); + alx->rxq.rfd_dma = alx->descmem.dma + + sizeof(struct alx_txd) * alx->tx_ringsz + + sizeof(struct alx_rrd) * alx->rx_ringsz; + + return 0; +out_free: + kfree(alx->txq.bufs); + kfree(alx->rxq.bufs); + return -ENOMEM; +} + +static int alx_alloc_rings(struct alx_priv *alx) +{ + int err; + + err = alx_alloc_descriptors(alx); + if (err) + return err; + + alx->int_mask &= ~ALX_ISR_ALL_QUEUES; + alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; + alx->tx_ringsz = alx->tx_ringsz; + + netif_napi_add(alx->dev, &alx->napi, alx_poll, 64); + + alx_reinit_rings(alx); + return 0; +} + +static void alx_free_rings(struct alx_priv *alx) +{ + netif_napi_del(&alx->napi); + alx_free_buffers(alx); + + kfree(alx->txq.bufs); + kfree(alx->rxq.bufs); + + dma_free_coherent(&alx->hw.pdev->dev, + alx->descmem.size, + alx->descmem.virt, + alx->descmem.dma); +} + +static void alx_config_vector_mapping(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + + alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0); + alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0); + alx_write_mem32(hw, ALX_MSI_ID_MAP, 0); +} + +static void alx_irq_enable(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + + /* level-1 interrupt switch */ + alx_write_mem32(hw, ALX_ISR, 0); + alx_write_mem32(hw, ALX_IMR, alx->int_mask); + alx_post_write(hw); +} + +static void alx_irq_disable(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + + alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS); + alx_write_mem32(hw, ALX_IMR, 0); + alx_post_write(hw); + + synchronize_irq(alx->hw.pdev->irq); +} + +static int alx_request_irq(struct alx_priv *alx) +{ + struct pci_dev *pdev = alx->hw.pdev; + struct alx_hw *hw = &alx->hw; + int err; + u32 msi_ctrl; + + msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT; + + if (!pci_enable_msi(alx->hw.pdev)) { + alx->msi = true; + + alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, + msi_ctrl | ALX_MSI_MASK_SEL_LINE); + err = request_irq(pdev->irq, alx_intr_msi, 0, + alx->dev->name, alx); + if (!err) + goto out; + /* fall back to legacy interrupt */ + pci_disable_msi(alx->hw.pdev); + } + + alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0); + err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED, + alx->dev->name, alx); +out: + if (!err) + alx_config_vector_mapping(alx); + return err; +} + +static void alx_free_irq(struct alx_priv *alx) +{ + struct pci_dev *pdev = alx->hw.pdev; + + free_irq(pdev->irq, alx); + + if (alx->msi) { + pci_disable_msi(alx->hw.pdev); + alx->msi = false; + } +} + +static int alx_identify_hw(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + int rev = alx_hw_revision(hw); + + if (rev > ALX_REV_C0) + return -EINVAL; + + hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2; + + return 0; +} + +static int alx_init_sw(struct alx_priv *alx) +{ + struct pci_dev *pdev = alx->hw.pdev; + struct alx_hw *hw = &alx->hw; + int err; + + err = alx_identify_hw(alx); + if (err) { + dev_err(&pdev->dev, "unrecognized chip, aborting\n"); + return err; + } + + alx->hw.lnk_patch = + pdev->device == ALX_DEV_ID_AR8161 && + pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC && + pdev->subsystem_device == 0x0091 && + pdev->revision == 0; + + hw->smb_timer = 400; + hw->mtu = alx->dev->mtu; + alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8); + alx->tx_ringsz = 256; + alx->rx_ringsz = 512; + hw->imt = 200; + alx->int_mask = ALX_ISR_MISC; + hw->dma_chnl = hw->max_dma_chnl; + hw->ith_tpd = alx->tx_ringsz / 3; + hw->link_speed = SPEED_UNKNOWN; + hw->duplex = DUPLEX_UNKNOWN; + hw->adv_cfg = ADVERTISED_Autoneg | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_1000baseT_Full; + hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX; + + hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN | + ALX_MAC_CTRL_MHASH_ALG_HI5B | + ALX_MAC_CTRL_BRD_EN | + ALX_MAC_CTRL_PCRCE | + ALX_MAC_CTRL_CRCE | + ALX_MAC_CTRL_RXFC_EN | + ALX_MAC_CTRL_TXFC_EN | + 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT; + + return err; +} + + +static netdev_features_t alx_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE) + features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + + return features; +} + +static void alx_netif_stop(struct alx_priv *alx) +{ + alx->dev->trans_start = jiffies; + if (netif_carrier_ok(alx->dev)) { + netif_carrier_off(alx->dev); + netif_tx_disable(alx->dev); + napi_disable(&alx->napi); + } +} + +static void alx_halt(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + + alx_netif_stop(alx); + hw->link_speed = SPEED_UNKNOWN; + hw->duplex = DUPLEX_UNKNOWN; + + alx_reset_mac(hw); + + /* disable l0s/l1 */ + alx_enable_aspm(hw, false, false); + alx_irq_disable(alx); + alx_free_buffers(alx); +} + +static void alx_configure(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + + alx_configure_basic(hw); + alx_disable_rss(hw); + __alx_set_rx_mode(alx->dev); + + alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); +} + +static void alx_activate(struct alx_priv *alx) +{ + /* hardware setting lost, restore it */ + alx_reinit_rings(alx); + alx_configure(alx); + + /* clear old interrupts */ + alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); + + alx_irq_enable(alx); + + alx_schedule_link_check(alx); +} + +static void alx_reinit(struct alx_priv *alx) +{ + ASSERT_RTNL(); + + alx_halt(alx); + alx_activate(alx); +} + +static int alx_change_mtu(struct net_device *netdev, int mtu) +{ + struct alx_priv *alx = netdev_priv(netdev); + int max_frame = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if ((max_frame < ALX_MIN_FRAME_SIZE) || + (max_frame > ALX_MAX_FRAME_SIZE)) + return -EINVAL; + + if (netdev->mtu == mtu) + return 0; + + netdev->mtu = mtu; + alx->hw.mtu = mtu; + alx->rxbuf_size = mtu > ALX_DEF_RXBUF_SIZE ? + ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE; + netdev_update_features(netdev); + if (netif_running(netdev)) + alx_reinit(alx); + return 0; +} + +static void alx_netif_start(struct alx_priv *alx) +{ + netif_tx_wake_all_queues(alx->dev); + napi_enable(&alx->napi); + netif_carrier_on(alx->dev); +} + +static int __alx_open(struct alx_priv *alx, bool resume) +{ + int err; + + if (!resume) + netif_carrier_off(alx->dev); + + err = alx_alloc_rings(alx); + if (err) + return err; + + alx_configure(alx); + + err = alx_request_irq(alx); + if (err) + goto out_free_rings; + + /* clear old interrupts */ + alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); + + alx_irq_enable(alx); + + if (!resume) + netif_tx_start_all_queues(alx->dev); + + alx_schedule_link_check(alx); + return 0; + +out_free_rings: + alx_free_rings(alx); + return err; +} + +static void __alx_stop(struct alx_priv *alx) +{ + alx_halt(alx); + alx_free_irq(alx); + alx_free_rings(alx); +} + +static const char *alx_speed_desc(struct alx_hw *hw) +{ + switch (alx_speed_to_ethadv(hw->link_speed, hw->duplex)) { + case ADVERTISED_1000baseT_Full: + return "1 Gbps Full"; + case ADVERTISED_100baseT_Full: + return "100 Mbps Full"; + case ADVERTISED_100baseT_Half: + return "100 Mbps Half"; + case ADVERTISED_10baseT_Full: + return "10 Mbps Full"; + case ADVERTISED_10baseT_Half: + return "10 Mbps Half"; + default: + return "Unknown speed"; + } +} + +static void alx_check_link(struct alx_priv *alx) +{ + struct alx_hw *hw = &alx->hw; + unsigned long flags; + int old_speed; + u8 old_duplex; + int err; + + /* clear PHY internal interrupt status, otherwise the main + * interrupt status will be asserted forever + */ + alx_clear_phy_intr(hw); + + old_speed = hw->link_speed; + old_duplex = hw->duplex; + err = alx_read_phy_link(hw); + if (err < 0) + goto reset; + + spin_lock_irqsave(&alx->irq_lock, flags); + alx->int_mask |= ALX_ISR_PHY; + alx_write_mem32(hw, ALX_IMR, alx->int_mask); + spin_unlock_irqrestore(&alx->irq_lock, flags); + + if (old_speed == hw->link_speed) + return; + + if (hw->link_speed != SPEED_UNKNOWN) { + netif_info(alx, link, alx->dev, + "NIC Up: %s\n", alx_speed_desc(hw)); + alx_post_phy_link(hw); + alx_enable_aspm(hw, true, true); + alx_start_mac(hw); + + if (old_speed == SPEED_UNKNOWN) + alx_netif_start(alx); + } else { + /* link is now down */ + alx_netif_stop(alx); + netif_info(alx, link, alx->dev, "Link Down\n"); + err = alx_reset_mac(hw); + if (err) + goto reset; + alx_irq_disable(alx); + + /* MAC reset causes all HW settings to be lost, restore all */ + err = alx_reinit_rings(alx); + if (err) + goto reset; + alx_configure(alx); + alx_enable_aspm(hw, false, true); + alx_post_phy_link(hw); + alx_irq_enable(alx); + } + + return; + +reset: + alx_schedule_reset(alx); +} + +static int alx_open(struct net_device *netdev) +{ + return __alx_open(netdev_priv(netdev), false); +} + +static int alx_stop(struct net_device *netdev) +{ + __alx_stop(netdev_priv(netdev)); + return 0; +} + +static void alx_link_check(struct work_struct *work) +{ + struct alx_priv *alx; + + alx = container_of(work, struct alx_priv, link_check_wk); + + rtnl_lock(); + alx_check_link(alx); + rtnl_unlock(); +} + +static void alx_reset(struct work_struct *work) +{ + struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk); + + rtnl_lock(); + alx_reinit(alx); + rtnl_unlock(); +} + +static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first) +{ + u8 cso, css; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + cso = skb_checksum_start_offset(skb); + if (cso & 1) + return -EINVAL; + + css = cso + skb->csum_offset; + first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT); + first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT); + first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT); + + return 0; +} + +static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb) +{ + struct alx_tx_queue *txq = &alx->txq; + struct alx_txd *tpd, *first_tpd; + dma_addr_t dma; + int maplen, f, first_idx = txq->write_idx; + + first_tpd = &txq->tpd[txq->write_idx]; + tpd = first_tpd; + + maplen = skb_headlen(skb); + dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen, + DMA_TO_DEVICE); + if (dma_mapping_error(&alx->hw.pdev->dev, dma)) + goto err_dma; + + dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); + dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); + + tpd->adrl.addr = cpu_to_le64(dma); + tpd->len = cpu_to_le16(maplen); + + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + + if (++txq->write_idx == alx->tx_ringsz) + txq->write_idx = 0; + tpd = &txq->tpd[txq->write_idx]; + + tpd->word1 = first_tpd->word1; + + maplen = skb_frag_size(frag); + dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0, + maplen, DMA_TO_DEVICE); + if (dma_mapping_error(&alx->hw.pdev->dev, dma)) + goto err_dma; + dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); + dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); + + tpd->adrl.addr = cpu_to_le64(dma); + tpd->len = cpu_to_le16(maplen); + } + + /* last TPD, set EOP flag and store skb */ + tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT); + txq->bufs[txq->write_idx].skb = skb; + + if (++txq->write_idx == alx->tx_ringsz) + txq->write_idx = 0; + + return 0; + +err_dma: + f = first_idx; + while (f != txq->write_idx) { + alx_free_txbuf(alx, f); + if (++f == alx->tx_ringsz) + f = 0; + } + return -ENOMEM; +} + +static netdev_tx_t alx_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_tx_queue *txq = &alx->txq; + struct alx_txd *first; + int tpdreq = skb_shinfo(skb)->nr_frags + 1; + + if (alx_tpd_avail(alx) < tpdreq) { + netif_stop_queue(alx->dev); + goto drop; + } + + first = &txq->tpd[txq->write_idx]; + memset(first, 0, sizeof(*first)); + + if (alx_tx_csum(skb, first)) + goto drop; + + if (alx_map_tx_skb(alx, skb) < 0) + goto drop; + + netdev_sent_queue(alx->dev, skb->len); + + /* flush updates before updating hardware */ + wmb(); + alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx); + + if (alx_tpd_avail(alx) < alx->tx_ringsz/8) + netif_stop_queue(alx->dev); + + return NETDEV_TX_OK; + +drop: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static void alx_tx_timeout(struct net_device *dev) +{ + struct alx_priv *alx = netdev_priv(dev); + + alx_schedule_reset(alx); +} + +static int alx_mdio_read(struct net_device *netdev, + int prtad, int devad, u16 addr) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + u16 val; + int err; + + if (prtad != hw->mdio.prtad) + return -EINVAL; + + if (devad == MDIO_DEVAD_NONE) + err = alx_read_phy_reg(hw, addr, &val); + else + err = alx_read_phy_ext(hw, devad, addr, &val); + + if (err) + return err; + return val; +} + +static int alx_mdio_write(struct net_device *netdev, + int prtad, int devad, u16 addr, u16 val) +{ + struct alx_priv *alx = netdev_priv(netdev); + struct alx_hw *hw = &alx->hw; + + if (prtad != hw->mdio.prtad) + return -EINVAL; + + if (devad == MDIO_DEVAD_NONE) + return alx_write_phy_reg(hw, addr, val); + + return alx_write_phy_ext(hw, devad, addr, val); +} + +static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct alx_priv *alx = netdev_priv(netdev); + + if (!netif_running(netdev)) + return -EAGAIN; + + return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void alx_poll_controller(struct net_device *netdev) +{ + struct alx_priv *alx = netdev_priv(netdev); + + if (alx->msi) + alx_intr_msi(0, alx); + else + alx_intr_legacy(0, alx); +} +#endif + +static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *net_stats) +{ + struct alx_priv *alx = netdev_priv(dev); + struct alx_hw_stats *hw_stats = &alx->hw.stats; + + spin_lock(&alx->stats_lock); + + alx_update_hw_stats(&alx->hw); + + net_stats->tx_bytes = hw_stats->tx_byte_cnt; + net_stats->rx_bytes = hw_stats->rx_byte_cnt; + net_stats->multicast = hw_stats->rx_mcast; + net_stats->collisions = hw_stats->tx_single_col + + hw_stats->tx_multi_col + + hw_stats->tx_late_col + + hw_stats->tx_abort_col; + + net_stats->rx_errors = hw_stats->rx_frag + + hw_stats->rx_fcs_err + + hw_stats->rx_len_err + + hw_stats->rx_ov_sz + + hw_stats->rx_ov_rrd + + hw_stats->rx_align_err + + hw_stats->rx_ov_rxf; + + net_stats->rx_fifo_errors = hw_stats->rx_ov_rxf; + net_stats->rx_length_errors = hw_stats->rx_len_err; + net_stats->rx_crc_errors = hw_stats->rx_fcs_err; + net_stats->rx_frame_errors = hw_stats->rx_align_err; + net_stats->rx_dropped = hw_stats->rx_ov_rrd; + + net_stats->tx_errors = hw_stats->tx_late_col + + hw_stats->tx_abort_col + + hw_stats->tx_underrun + + hw_stats->tx_trunc; + + net_stats->tx_aborted_errors = hw_stats->tx_abort_col; + net_stats->tx_fifo_errors = hw_stats->tx_underrun; + net_stats->tx_window_errors = hw_stats->tx_late_col; + + net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors; + net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; + + spin_unlock(&alx->stats_lock); + + return net_stats; +} + +static const struct net_device_ops alx_netdev_ops = { + .ndo_open = alx_open, + .ndo_stop = alx_stop, + .ndo_start_xmit = alx_start_xmit, + .ndo_get_stats64 = alx_get_stats64, + .ndo_set_rx_mode = alx_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = alx_set_mac_address, + .ndo_change_mtu = alx_change_mtu, + .ndo_do_ioctl = alx_ioctl, + .ndo_tx_timeout = alx_tx_timeout, + .ndo_fix_features = alx_fix_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = alx_poll_controller, +#endif +}; + +static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct alx_priv *alx; + struct alx_hw *hw; + bool phy_configured; + int bars, err; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + /* The alx chip can DMA to 64-bit addresses, but it uses a single + * shared register for the high 32 bits, so only a single, aligned, + * 4 GB physical address range can be used for descriptors. + */ + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n"); + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "No usable DMA config, aborting\n"); + goto out_pci_disable; + } + } + + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_request_selected_regions(pdev, bars, alx_drv_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed(bars:%d)\n", bars); + goto out_pci_disable; + } + + pci_enable_pcie_error_reporting(pdev); + pci_set_master(pdev); + + if (!pdev->pm_cap) { + dev_err(&pdev->dev, + "Can't find power management capability, aborting\n"); + err = -EIO; + goto out_pci_release; + } + + netdev = alloc_etherdev(sizeof(*alx)); + if (!netdev) { + err = -ENOMEM; + goto out_pci_release; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + alx = netdev_priv(netdev); + spin_lock_init(&alx->hw.mdio_lock); + spin_lock_init(&alx->irq_lock); + spin_lock_init(&alx->stats_lock); + alx->dev = netdev; + alx->hw.pdev = pdev; + alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP | + NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL; + hw = &alx->hw; + pci_set_drvdata(pdev, alx); + + hw->hw_addr = pci_ioremap_bar(pdev, 0); + if (!hw->hw_addr) { + dev_err(&pdev->dev, "cannot map device registers\n"); + err = -EIO; + goto out_free_netdev; + } + + netdev->netdev_ops = &alx_netdev_ops; + netdev->ethtool_ops = &alx_ethtool_ops; + netdev->irq = pdev->irq; + netdev->watchdog_timeo = ALX_WATCHDOG_TIME; + + if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG) + pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; + + err = alx_init_sw(alx); + if (err) { + dev_err(&pdev->dev, "net device private data init failed\n"); + goto out_unmap; + } + + alx_reset_pcie(hw); + + phy_configured = alx_phy_configured(hw); + + if (!phy_configured) + alx_reset_phy(hw); + + err = alx_reset_mac(hw); + if (err) { + dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err); + goto out_unmap; + } + + /* setup link to put it in a known good starting state */ + if (!phy_configured) { + err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl); + if (err) { + dev_err(&pdev->dev, + "failed to configure PHY speed/duplex (err=%d)\n", + err); + goto out_unmap; + } + } + + netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; + + if (alx_get_perm_macaddr(hw, hw->perm_addr)) { + dev_warn(&pdev->dev, + "Invalid permanent address programmed, using random one\n"); + eth_hw_addr_random(netdev); + memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len); + } + + memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN); + memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN); + memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN); + + hw->mdio.prtad = 0; + hw->mdio.mmds = 0; + hw->mdio.dev = netdev; + hw->mdio.mode_support = MDIO_SUPPORTS_C45 | + MDIO_SUPPORTS_C22 | + MDIO_EMULATE_C22; + hw->mdio.mdio_read = alx_mdio_read; + hw->mdio.mdio_write = alx_mdio_write; + + if (!alx_get_phy_info(hw)) { + dev_err(&pdev->dev, "failed to identify PHY\n"); + err = -EIO; + goto out_unmap; + } + + INIT_WORK(&alx->link_check_wk, alx_link_check); + INIT_WORK(&alx->reset_wk, alx_reset); + netif_carrier_off(netdev); + + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "register netdevice failed\n"); + goto out_unmap; + } + + netdev_info(netdev, + "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n", + netdev->dev_addr); + + return 0; + +out_unmap: + iounmap(hw->hw_addr); +out_free_netdev: + free_netdev(netdev); +out_pci_release: + pci_release_selected_regions(pdev, bars); +out_pci_disable: + pci_disable_device(pdev); + return err; +} + +static void alx_remove(struct pci_dev *pdev) +{ + struct alx_priv *alx = pci_get_drvdata(pdev); + struct alx_hw *hw = &alx->hw; + + cancel_work_sync(&alx->link_check_wk); + cancel_work_sync(&alx->reset_wk); + + /* restore permanent mac address */ + alx_set_macaddr(hw, hw->perm_addr); + + unregister_netdev(alx->dev); + iounmap(hw->hw_addr); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + + free_netdev(alx->dev); +} + +#ifdef CONFIG_PM_SLEEP +static int alx_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct alx_priv *alx = pci_get_drvdata(pdev); + + if (!netif_running(alx->dev)) + return 0; + netif_device_detach(alx->dev); + __alx_stop(alx); + return 0; +} + +static int alx_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct alx_priv *alx = pci_get_drvdata(pdev); + struct alx_hw *hw = &alx->hw; + + alx_reset_phy(hw); + + if (!netif_running(alx->dev)) + return 0; + netif_device_attach(alx->dev); + return __alx_open(alx, true); +} + +static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); +#define ALX_PM_OPS (&alx_pm_ops) +#else +#define ALX_PM_OPS NULL +#endif + + +static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct alx_priv *alx = pci_get_drvdata(pdev); + struct net_device *netdev = alx->dev; + pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET; + + dev_info(&pdev->dev, "pci error detected\n"); + + rtnl_lock(); + + if (netif_running(netdev)) { + netif_device_detach(netdev); + alx_halt(alx); + } + + if (state == pci_channel_io_perm_failure) + rc = PCI_ERS_RESULT_DISCONNECT; + else + pci_disable_device(pdev); + + rtnl_unlock(); + + return rc; +} + +static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev) +{ + struct alx_priv *alx = pci_get_drvdata(pdev); + struct alx_hw *hw = &alx->hw; + pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; + + dev_info(&pdev->dev, "pci error slot reset\n"); + + rtnl_lock(); + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n"); + goto out; + } + + pci_set_master(pdev); + + alx_reset_pcie(hw); + if (!alx_reset_mac(hw)) + rc = PCI_ERS_RESULT_RECOVERED; +out: + pci_cleanup_aer_uncorrect_error_status(pdev); + + rtnl_unlock(); + + return rc; +} + +static void alx_pci_error_resume(struct pci_dev *pdev) +{ + struct alx_priv *alx = pci_get_drvdata(pdev); + struct net_device *netdev = alx->dev; + + dev_info(&pdev->dev, "pci error resume\n"); + + rtnl_lock(); + + if (netif_running(netdev)) { + alx_activate(alx); + netif_device_attach(netdev); + } + + rtnl_unlock(); +} + +static const struct pci_error_handlers alx_err_handlers = { + .error_detected = alx_pci_error_detected, + .slot_reset = alx_pci_error_slot_reset, + .resume = alx_pci_error_resume, +}; + +static const struct pci_device_id alx_pci_tbl[] = { + { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161), + .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, + { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200), + .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, + { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), + .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, + { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, + { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) }, + {} +}; + +static struct pci_driver alx_driver = { + .name = alx_drv_name, + .id_table = alx_pci_tbl, + .probe = alx_probe, + .remove = alx_remove, + .err_handler = &alx_err_handlers, + .driver.pm = ALX_PM_OPS, +}; + +module_pci_driver(alx_driver); +MODULE_DEVICE_TABLE(pci, alx_pci_tbl); +MODULE_AUTHOR("Johannes Berg "); +MODULE_AUTHOR("Qualcomm Corporation, "); +MODULE_DESCRIPTION( + "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h new file mode 100644 index 000000000..af006b44b --- /dev/null +++ b/drivers/net/ethernet/atheros/alx/reg.h @@ -0,0 +1,854 @@ +/* + * Copyright (c) 2013 Johannes Berg + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Copyright (c) 2012 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef ALX_REG_H +#define ALX_REG_H + +#define ALX_DEV_ID_AR8161 0x1091 +#define ALX_DEV_ID_E2200 0xe091 +#define ALX_DEV_ID_AR8162 0x1090 +#define ALX_DEV_ID_AR8171 0x10A1 +#define ALX_DEV_ID_AR8172 0x10A0 + +/* rev definition, + * bit(0): with xD support + * bit(1): with Card Reader function + * bit(7:2): real revision + */ +#define ALX_PCI_REVID_SHIFT 3 +#define ALX_REV_A0 0 +#define ALX_REV_A1 1 +#define ALX_REV_B0 2 +#define ALX_REV_C0 3 + +#define ALX_DEV_CTRL 0x0060 +#define ALX_DEV_CTRL_MAXRRS_MIN 2 + +#define ALX_MSIX_MASK 0x0090 + +#define ALX_UE_SVRT 0x010C +#define ALX_UE_SVRT_FCPROTERR BIT(13) +#define ALX_UE_SVRT_DLPROTERR BIT(4) + +/* eeprom & flash load register */ +#define ALX_EFLD 0x0204 +#define ALX_EFLD_F_EXIST BIT(10) +#define ALX_EFLD_E_EXIST BIT(9) +#define ALX_EFLD_STAT BIT(5) +#define ALX_EFLD_START BIT(0) + +/* eFuse load register */ +#define ALX_SLD 0x0218 +#define ALX_SLD_STAT BIT(12) +#define ALX_SLD_START BIT(11) +#define ALX_SLD_MAX_TO 100 + +#define ALX_PDLL_TRNS1 0x1104 +#define ALX_PDLL_TRNS1_D3PLLOFF_EN BIT(11) + +#define ALX_PMCTRL 0x12F8 +#define ALX_PMCTRL_HOTRST_WTEN BIT(31) +/* bit30: L0s/L1 controlled by MAC based on throughput(setting in 15A0) */ +#define ALX_PMCTRL_ASPM_FCEN BIT(30) +#define ALX_PMCTRL_SADLY_EN BIT(29) +#define ALX_PMCTRL_LCKDET_TIMER_MASK 0xF +#define ALX_PMCTRL_LCKDET_TIMER_SHIFT 24 +#define ALX_PMCTRL_LCKDET_TIMER_DEF 0xC +/* bit[23:20] if pm_request_l1 time > @, then enter L0s not L1 */ +#define ALX_PMCTRL_L1REQ_TO_MASK 0xF +#define ALX_PMCTRL_L1REQ_TO_SHIFT 20 +#define ALX_PMCTRL_L1REG_TO_DEF 0xF +#define ALX_PMCTRL_TXL1_AFTER_L0S BIT(19) +#define ALX_PMCTRL_L1_TIMER_MASK 0x7 +#define ALX_PMCTRL_L1_TIMER_SHIFT 16 +#define ALX_PMCTRL_L1_TIMER_16US 4 +#define ALX_PMCTRL_RCVR_WT_1US BIT(15) +/* bit13: enable pcie clk switch in L1 state */ +#define ALX_PMCTRL_L1_CLKSW_EN BIT(13) +#define ALX_PMCTRL_L0S_EN BIT(12) +#define ALX_PMCTRL_RXL1_AFTER_L0S BIT(11) +#define ALX_PMCTRL_L1_BUFSRX_EN BIT(7) +/* bit6: power down serdes RX */ +#define ALX_PMCTRL_L1_SRDSRX_PWD BIT(6) +#define ALX_PMCTRL_L1_SRDSPLL_EN BIT(5) +#define ALX_PMCTRL_L1_SRDS_EN BIT(4) +#define ALX_PMCTRL_L1_EN BIT(3) + +/*******************************************************/ +/* following registers are mapped only to memory space */ +/*******************************************************/ + +#define ALX_MASTER 0x1400 +/* bit12: 1:alwys select pclk from serdes, not sw to 25M */ +#define ALX_MASTER_PCLKSEL_SRDS BIT(12) +/* bit11: irq moduration for rx */ +#define ALX_MASTER_IRQMOD2_EN BIT(11) +/* bit10: irq moduration for tx/rx */ +#define ALX_MASTER_IRQMOD1_EN BIT(10) +#define ALX_MASTER_SYSALVTIMER_EN BIT(7) +#define ALX_MASTER_OOB_DIS BIT(6) +/* bit5: wakeup without pcie clk */ +#define ALX_MASTER_WAKEN_25M BIT(5) +/* bit0: MAC & DMA reset */ +#define ALX_MASTER_DMA_MAC_RST BIT(0) +#define ALX_DMA_MAC_RST_TO 50 + +#define ALX_IRQ_MODU_TIMER 0x1408 +#define ALX_IRQ_MODU_TIMER1_MASK 0xFFFF +#define ALX_IRQ_MODU_TIMER1_SHIFT 0 + +#define ALX_PHY_CTRL 0x140C +#define ALX_PHY_CTRL_100AB_EN BIT(17) +/* bit14: affect MAC & PHY, go to low power sts */ +#define ALX_PHY_CTRL_POWER_DOWN BIT(14) +/* bit13: 1:pll always ON, 0:can switch in lpw */ +#define ALX_PHY_CTRL_PLL_ON BIT(13) +#define ALX_PHY_CTRL_RST_ANALOG BIT(12) +#define ALX_PHY_CTRL_HIB_PULSE BIT(11) +#define ALX_PHY_CTRL_HIB_EN BIT(10) +#define ALX_PHY_CTRL_IDDQ BIT(7) +#define ALX_PHY_CTRL_GATE_25M BIT(5) +#define ALX_PHY_CTRL_LED_MODE BIT(2) +/* bit0: out of dsp RST state */ +#define ALX_PHY_CTRL_DSPRST_OUT BIT(0) +#define ALX_PHY_CTRL_DSPRST_TO 80 +#define ALX_PHY_CTRL_CLS (ALX_PHY_CTRL_LED_MODE | \ + ALX_PHY_CTRL_100AB_EN | \ + ALX_PHY_CTRL_PLL_ON) + +#define ALX_MAC_STS 0x1410 +#define ALX_MAC_STS_TXQ_BUSY BIT(3) +#define ALX_MAC_STS_RXQ_BUSY BIT(2) +#define ALX_MAC_STS_TXMAC_BUSY BIT(1) +#define ALX_MAC_STS_RXMAC_BUSY BIT(0) +#define ALX_MAC_STS_IDLE (ALX_MAC_STS_TXQ_BUSY | \ + ALX_MAC_STS_RXQ_BUSY | \ + ALX_MAC_STS_TXMAC_BUSY | \ + ALX_MAC_STS_RXMAC_BUSY) + +#define ALX_MDIO 0x1414 +#define ALX_MDIO_MODE_EXT BIT(30) +#define ALX_MDIO_BUSY BIT(27) +#define ALX_MDIO_CLK_SEL_MASK 0x7 +#define ALX_MDIO_CLK_SEL_SHIFT 24 +#define ALX_MDIO_CLK_SEL_25MD4 0 +#define ALX_MDIO_CLK_SEL_25MD128 7 +#define ALX_MDIO_START BIT(23) +#define ALX_MDIO_SPRES_PRMBL BIT(22) +/* bit21: 1:read,0:write */ +#define ALX_MDIO_OP_READ BIT(21) +#define ALX_MDIO_REG_MASK 0x1F +#define ALX_MDIO_REG_SHIFT 16 +#define ALX_MDIO_DATA_MASK 0xFFFF +#define ALX_MDIO_DATA_SHIFT 0 +#define ALX_MDIO_MAX_AC_TO 120 + +#define ALX_MDIO_EXTN 0x1448 +#define ALX_MDIO_EXTN_DEVAD_MASK 0x1F +#define ALX_MDIO_EXTN_DEVAD_SHIFT 16 +#define ALX_MDIO_EXTN_REG_MASK 0xFFFF +#define ALX_MDIO_EXTN_REG_SHIFT 0 + +#define ALX_SERDES 0x1424 +#define ALX_SERDES_PHYCLK_SLWDWN BIT(18) +#define ALX_SERDES_MACCLK_SLWDWN BIT(17) + +#define ALX_LPI_CTRL 0x1440 +#define ALX_LPI_CTRL_EN BIT(0) + +/* for B0+, bit[13..] for C0+ */ +#define ALX_HRTBT_EXT_CTRL 0x1AD0 +#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_MASK 0x3F +#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_SHIFT 24 +#define L1F_HRTBT_EXT_CTRL_SWOI_STARTUP_PKT_EN BIT(23) +#define L1F_HRTBT_EXT_CTRL_IOAC_2_FRAGMENTED BIT(22) +#define L1F_HRTBT_EXT_CTRL_IOAC_1_FRAGMENTED BIT(21) +#define L1F_HRTBT_EXT_CTRL_IOAC_1_KEEPALIVE_EN BIT(20) +#define L1F_HRTBT_EXT_CTRL_IOAC_1_HAS_VLAN BIT(19) +#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_8023 BIT(18) +#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_IPV6 BIT(17) +#define L1F_HRTBT_EXT_CTRL_IOAC_2_KEEPALIVE_EN BIT(16) +#define L1F_HRTBT_EXT_CTRL_IOAC_2_HAS_VLAN BIT(15) +#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_8023 BIT(14) +#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_IPV6 BIT(13) +#define ALX_HRTBT_EXT_CTRL_NS_EN BIT(12) +#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_MASK 0xFF +#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_SHIFT 4 +#define ALX_HRTBT_EXT_CTRL_IS_8023 BIT(3) +#define ALX_HRTBT_EXT_CTRL_IS_IPV6 BIT(2) +#define ALX_HRTBT_EXT_CTRL_WAKEUP_EN BIT(1) +#define ALX_HRTBT_EXT_CTRL_ARP_EN BIT(0) + +#define ALX_HRTBT_REM_IPV4_ADDR 0x1AD4 +#define ALX_HRTBT_HOST_IPV4_ADDR 0x1478 +#define ALX_HRTBT_REM_IPV6_ADDR3 0x1AD8 +#define ALX_HRTBT_REM_IPV6_ADDR2 0x1ADC +#define ALX_HRTBT_REM_IPV6_ADDR1 0x1AE0 +#define ALX_HRTBT_REM_IPV6_ADDR0 0x1AE4 + +/* 1B8C ~ 1B94 for C0+ */ +#define ALX_SWOI_ACER_CTRL 0x1B8C +#define ALX_SWOI_ORIG_ACK_NAK_EN BIT(20) +#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_MASK 0XFF +#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_SHIFT 12 +#define ALX_SWOI_ORIG_ACK_ADDR_MASK 0XFFF +#define ALX_SWOI_ORIG_ACK_ADDR_SHIFT 0 + +#define ALX_SWOI_IOAC_CTRL_2 0x1B90 +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_MASK 0xFF +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_SHIFT 24 +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_MASK 0xFFF +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_SHIFT 12 +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_MASK 0xFFF +#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_SHIFT 0 + +#define ALX_SWOI_IOAC_CTRL_3 0x1B94 +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_MASK 0xFF +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_SHIFT 24 +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_MASK 0xFFF +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_SHIFT 12 +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_MASK 0xFFF +#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_SHIFT 0 + +/* for B0 */ +#define ALX_IDLE_DECISN_TIMER 0x1474 +/* 1ms */ +#define ALX_IDLE_DECISN_TIMER_DEF 0x400 + +#define ALX_MAC_CTRL 0x1480 +#define ALX_MAC_CTRL_FAST_PAUSE BIT(31) +#define ALX_MAC_CTRL_WOLSPED_SWEN BIT(30) +/* bit29: 1:legacy(hi5b), 0:marvl(lo5b)*/ +#define ALX_MAC_CTRL_MHASH_ALG_HI5B BIT(29) +#define ALX_MAC_CTRL_BRD_EN BIT(26) +#define ALX_MAC_CTRL_MULTIALL_EN BIT(25) +#define ALX_MAC_CTRL_SPEED_MASK 0x3 +#define ALX_MAC_CTRL_SPEED_SHIFT 20 +#define ALX_MAC_CTRL_SPEED_10_100 1 +#define ALX_MAC_CTRL_SPEED_1000 2 +#define ALX_MAC_CTRL_PROMISC_EN BIT(15) +#define ALX_MAC_CTRL_VLANSTRIP BIT(14) +#define ALX_MAC_CTRL_PRMBLEN_MASK 0xF +#define ALX_MAC_CTRL_PRMBLEN_SHIFT 10 +#define ALX_MAC_CTRL_PCRCE BIT(7) +#define ALX_MAC_CTRL_CRCE BIT(6) +#define ALX_MAC_CTRL_FULLD BIT(5) +#define ALX_MAC_CTRL_RXFC_EN BIT(3) +#define ALX_MAC_CTRL_TXFC_EN BIT(2) +#define ALX_MAC_CTRL_RX_EN BIT(1) +#define ALX_MAC_CTRL_TX_EN BIT(0) + +#define ALX_STAD0 0x1488 +#define ALX_STAD1 0x148C + +#define ALX_HASH_TBL0 0x1490 +#define ALX_HASH_TBL1 0x1494 + +#define ALX_MTU 0x149C +#define ALX_MTU_JUMBO_TH 1514 +#define ALX_MTU_STD_ALGN 1536 + +#define ALX_SRAM5 0x1524 +#define ALX_SRAM_RXF_LEN_MASK 0xFFF +#define ALX_SRAM_RXF_LEN_SHIFT 0 +#define ALX_SRAM_RXF_LEN_8K (8*1024) + +#define ALX_SRAM9 0x1534 +#define ALX_SRAM_LOAD_PTR BIT(0) + +#define ALX_RX_BASE_ADDR_HI 0x1540 + +#define ALX_TX_BASE_ADDR_HI 0x1544 + +#define ALX_RFD_ADDR_LO 0x1550 +#define ALX_RFD_RING_SZ 0x1560 +#define ALX_RFD_BUF_SZ 0x1564 + +#define ALX_RRD_ADDR_LO 0x1568 +#define ALX_RRD_RING_SZ 0x1578 + +/* pri3: highest, pri0: lowest */ +#define ALX_TPD_PRI3_ADDR_LO 0x14E4 +#define ALX_TPD_PRI2_ADDR_LO 0x14E0 +#define ALX_TPD_PRI1_ADDR_LO 0x157C +#define ALX_TPD_PRI0_ADDR_LO 0x1580 + +/* producer index is 16bit */ +#define ALX_TPD_PRI3_PIDX 0x1618 +#define ALX_TPD_PRI2_PIDX 0x161A +#define ALX_TPD_PRI1_PIDX 0x15F0 +#define ALX_TPD_PRI0_PIDX 0x15F2 + +/* consumer index is 16bit */ +#define ALX_TPD_PRI3_CIDX 0x161C +#define ALX_TPD_PRI2_CIDX 0x161E +#define ALX_TPD_PRI1_CIDX 0x15F4 +#define ALX_TPD_PRI0_CIDX 0x15F6 + +#define ALX_TPD_RING_SZ 0x1584 + +#define ALX_TXQ0 0x1590 +#define ALX_TXQ0_TXF_BURST_PREF_MASK 0xFFFF +#define ALX_TXQ0_TXF_BURST_PREF_SHIFT 16 +#define ALX_TXQ_TXF_BURST_PREF_DEF 0x200 +#define ALX_TXQ0_LSO_8023_EN BIT(7) +#define ALX_TXQ0_MODE_ENHANCE BIT(6) +#define ALX_TXQ0_EN BIT(5) +#define ALX_TXQ0_SUPT_IPOPT BIT(4) +#define ALX_TXQ0_TPD_BURSTPREF_MASK 0xF +#define ALX_TXQ0_TPD_BURSTPREF_SHIFT 0 +#define ALX_TXQ_TPD_BURSTPREF_DEF 5 + +#define ALX_TXQ1 0x1594 +/* bit11: drop large packet, len > (rfd buf) */ +#define ALX_TXQ1_ERRLGPKT_DROP_EN BIT(11) +#define ALX_TXQ1_JUMBO_TSO_TH (7*1024) + +#define ALX_RXQ0 0x15A0 +#define ALX_RXQ0_EN BIT(31) +#define ALX_RXQ0_RSS_HASH_EN BIT(29) +#define ALX_RXQ0_RSS_MODE_MASK 0x3 +#define ALX_RXQ0_RSS_MODE_SHIFT 26 +#define ALX_RXQ0_RSS_MODE_DIS 0 +#define ALX_RXQ0_RSS_MODE_MQMI 3 +#define ALX_RXQ0_NUM_RFD_PREF_MASK 0x3F +#define ALX_RXQ0_NUM_RFD_PREF_SHIFT 20 +#define ALX_RXQ0_NUM_RFD_PREF_DEF 8 +#define ALX_RXQ0_IDT_TBL_SIZE_MASK 0x1FF +#define ALX_RXQ0_IDT_TBL_SIZE_SHIFT 8 +#define ALX_RXQ0_IDT_TBL_SIZE_DEF 0x100 +#define ALX_RXQ0_IDT_TBL_SIZE_NORMAL 128 +#define ALX_RXQ0_IPV6_PARSE_EN BIT(7) +#define ALX_RXQ0_RSS_HSTYP_MASK 0xF +#define ALX_RXQ0_RSS_HSTYP_SHIFT 2 +#define ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN BIT(5) +#define ALX_RXQ0_RSS_HSTYP_IPV6_EN BIT(4) +#define ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN BIT(3) +#define ALX_RXQ0_RSS_HSTYP_IPV4_EN BIT(2) +#define ALX_RXQ0_RSS_HSTYP_ALL (ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN | \ + ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN | \ + ALX_RXQ0_RSS_HSTYP_IPV6_EN | \ + ALX_RXQ0_RSS_HSTYP_IPV4_EN) +#define ALX_RXQ0_ASPM_THRESH_MASK 0x3 +#define ALX_RXQ0_ASPM_THRESH_SHIFT 0 +#define ALX_RXQ0_ASPM_THRESH_100M 3 + +#define ALX_RXQ2 0x15A8 +#define ALX_RXQ2_RXF_XOFF_THRESH_MASK 0xFFF +#define ALX_RXQ2_RXF_XOFF_THRESH_SHIFT 16 +#define ALX_RXQ2_RXF_XON_THRESH_MASK 0xFFF +#define ALX_RXQ2_RXF_XON_THRESH_SHIFT 0 +/* Size = tx-packet(1522) + IPG(12) + SOF(8) + 64(Pause) + IPG(12) + SOF(8) + + * rx-packet(1522) + delay-of-link(64) + * = 3212. + */ +#define ALX_RXQ2_RXF_FLOW_CTRL_RSVD 3212 + +#define ALX_DMA 0x15C0 +#define ALX_DMA_RCHNL_SEL_MASK 0x3 +#define ALX_DMA_RCHNL_SEL_SHIFT 26 +#define ALX_DMA_WDLY_CNT_MASK 0xF +#define ALX_DMA_WDLY_CNT_SHIFT 16 +#define ALX_DMA_WDLY_CNT_DEF 4 +#define ALX_DMA_RDLY_CNT_MASK 0x1F +#define ALX_DMA_RDLY_CNT_SHIFT 11 +#define ALX_DMA_RDLY_CNT_DEF 15 +/* bit10: 0:tpd with pri, 1: data */ +#define ALX_DMA_RREQ_PRI_DATA BIT(10) +#define ALX_DMA_RREQ_BLEN_MASK 0x7 +#define ALX_DMA_RREQ_BLEN_SHIFT 4 +#define ALX_DMA_RORDER_MODE_MASK 0x7 +#define ALX_DMA_RORDER_MODE_SHIFT 0 +#define ALX_DMA_RORDER_MODE_OUT 4 + +#define ALX_WOL0 0x14A0 +#define ALX_WOL0_PME_LINK BIT(5) +#define ALX_WOL0_LINK_EN BIT(4) +#define ALX_WOL0_PME_MAGIC_EN BIT(3) +#define ALX_WOL0_MAGIC_EN BIT(2) + +#define ALX_RFD_PIDX 0x15E0 + +#define ALX_RFD_CIDX 0x15F8 + +/* MIB */ +#define ALX_MIB_BASE 0x1700 + +#define ALX_MIB_RX_OK (ALX_MIB_BASE + 0) +#define ALX_MIB_RX_BCAST (ALX_MIB_BASE + 4) +#define ALX_MIB_RX_MCAST (ALX_MIB_BASE + 8) +#define ALX_MIB_RX_PAUSE (ALX_MIB_BASE + 12) +#define ALX_MIB_RX_CTRL (ALX_MIB_BASE + 16) +#define ALX_MIB_RX_FCS_ERR (ALX_MIB_BASE + 20) +#define ALX_MIB_RX_LEN_ERR (ALX_MIB_BASE + 24) +#define ALX_MIB_RX_BYTE_CNT (ALX_MIB_BASE + 28) +#define ALX_MIB_RX_RUNT (ALX_MIB_BASE + 32) +#define ALX_MIB_RX_FRAG (ALX_MIB_BASE + 36) +#define ALX_MIB_RX_SZ_64B (ALX_MIB_BASE + 40) +#define ALX_MIB_RX_SZ_127B (ALX_MIB_BASE + 44) +#define ALX_MIB_RX_SZ_255B (ALX_MIB_BASE + 48) +#define ALX_MIB_RX_SZ_511B (ALX_MIB_BASE + 52) +#define ALX_MIB_RX_SZ_1023B (ALX_MIB_BASE + 56) +#define ALX_MIB_RX_SZ_1518B (ALX_MIB_BASE + 60) +#define ALX_MIB_RX_SZ_MAX (ALX_MIB_BASE + 64) +#define ALX_MIB_RX_OV_SZ (ALX_MIB_BASE + 68) +#define ALX_MIB_RX_OV_RXF (ALX_MIB_BASE + 72) +#define ALX_MIB_RX_OV_RRD (ALX_MIB_BASE + 76) +#define ALX_MIB_RX_ALIGN_ERR (ALX_MIB_BASE + 80) +#define ALX_MIB_RX_BCCNT (ALX_MIB_BASE + 84) +#define ALX_MIB_RX_MCCNT (ALX_MIB_BASE + 88) +#define ALX_MIB_RX_ERRADDR (ALX_MIB_BASE + 92) + +#define ALX_MIB_TX_OK (ALX_MIB_BASE + 96) +#define ALX_MIB_TX_BCAST (ALX_MIB_BASE + 100) +#define ALX_MIB_TX_MCAST (ALX_MIB_BASE + 104) +#define ALX_MIB_TX_PAUSE (ALX_MIB_BASE + 108) +#define ALX_MIB_TX_EXC_DEFER (ALX_MIB_BASE + 112) +#define ALX_MIB_TX_CTRL (ALX_MIB_BASE + 116) +#define ALX_MIB_TX_DEFER (ALX_MIB_BASE + 120) +#define ALX_MIB_TX_BYTE_CNT (ALX_MIB_BASE + 124) +#define ALX_MIB_TX_SZ_64B (ALX_MIB_BASE + 128) +#define ALX_MIB_TX_SZ_127B (ALX_MIB_BASE + 132) +#define ALX_MIB_TX_SZ_255B (ALX_MIB_BASE + 136) +#define ALX_MIB_TX_SZ_511B (ALX_MIB_BASE + 140) +#define ALX_MIB_TX_SZ_1023B (ALX_MIB_BASE + 144) +#define ALX_MIB_TX_SZ_1518B (ALX_MIB_BASE + 148) +#define ALX_MIB_TX_SZ_MAX (ALX_MIB_BASE + 152) +#define ALX_MIB_TX_SINGLE_COL (ALX_MIB_BASE + 156) +#define ALX_MIB_TX_MULTI_COL (ALX_MIB_BASE + 160) +#define ALX_MIB_TX_LATE_COL (ALX_MIB_BASE + 164) +#define ALX_MIB_TX_ABORT_COL (ALX_MIB_BASE + 168) +#define ALX_MIB_TX_UNDERRUN (ALX_MIB_BASE + 172) +#define ALX_MIB_TX_TRD_EOP (ALX_MIB_BASE + 176) +#define ALX_MIB_TX_LEN_ERR (ALX_MIB_BASE + 180) +#define ALX_MIB_TX_TRUNC (ALX_MIB_BASE + 184) +#define ALX_MIB_TX_BCCNT (ALX_MIB_BASE + 188) +#define ALX_MIB_TX_MCCNT (ALX_MIB_BASE + 192) +#define ALX_MIB_UPDATE (ALX_MIB_BASE + 196) + + +#define ALX_ISR 0x1600 +#define ALX_ISR_DIS BIT(31) +#define ALX_ISR_RX_Q7 BIT(30) +#define ALX_ISR_RX_Q6 BIT(29) +#define ALX_ISR_RX_Q5 BIT(28) +#define ALX_ISR_RX_Q4 BIT(27) +#define ALX_ISR_PCIE_LNKDOWN BIT(26) +#define ALX_ISR_RX_Q3 BIT(19) +#define ALX_ISR_RX_Q2 BIT(18) +#define ALX_ISR_RX_Q1 BIT(17) +#define ALX_ISR_RX_Q0 BIT(16) +#define ALX_ISR_TX_Q0 BIT(15) +#define ALX_ISR_PHY BIT(12) +#define ALX_ISR_DMAW BIT(10) +#define ALX_ISR_DMAR BIT(9) +#define ALX_ISR_TXF_UR BIT(8) +#define ALX_ISR_TX_Q3 BIT(7) +#define ALX_ISR_TX_Q2 BIT(6) +#define ALX_ISR_TX_Q1 BIT(5) +#define ALX_ISR_RFD_UR BIT(4) +#define ALX_ISR_RXF_OV BIT(3) +#define ALX_ISR_MANU BIT(2) +#define ALX_ISR_TIMER BIT(1) +#define ALX_ISR_SMB BIT(0) + +#define ALX_IMR 0x1604 + +/* re-send assert msg if SW no response */ +#define ALX_INT_RETRIG 0x1608 +/* 40ms */ +#define ALX_INT_RETRIG_TO 20000 + +#define ALX_SMB_TIMER 0x15C4 + +#define ALX_TINT_TPD_THRSHLD 0x15C8 + +#define ALX_TINT_TIMER 0x15CC + +#define ALX_CLK_GATE 0x1814 +#define ALX_CLK_GATE_RXMAC BIT(5) +#define ALX_CLK_GATE_TXMAC BIT(4) +#define ALX_CLK_GATE_RXQ BIT(3) +#define ALX_CLK_GATE_TXQ BIT(2) +#define ALX_CLK_GATE_DMAR BIT(1) +#define ALX_CLK_GATE_DMAW BIT(0) +#define ALX_CLK_GATE_ALL (ALX_CLK_GATE_RXMAC | \ + ALX_CLK_GATE_TXMAC | \ + ALX_CLK_GATE_RXQ | \ + ALX_CLK_GATE_TXQ | \ + ALX_CLK_GATE_DMAR | \ + ALX_CLK_GATE_DMAW) + +/* interop between drivers */ +#define ALX_DRV 0x1804 +#define ALX_DRV_PHY_AUTO BIT(28) +#define ALX_DRV_PHY_1000 BIT(27) +#define ALX_DRV_PHY_100 BIT(26) +#define ALX_DRV_PHY_10 BIT(25) +#define ALX_DRV_PHY_DUPLEX BIT(24) +/* bit23: adv Pause */ +#define ALX_DRV_PHY_PAUSE BIT(23) +/* bit22: adv Asym Pause */ +#define ALX_DRV_PHY_MASK 0xFF +#define ALX_DRV_PHY_SHIFT 21 +#define ALX_DRV_PHY_UNKNOWN 0 + +/* flag of phy inited */ +#define ALX_PHY_INITED 0x003F + +/* reg 1830 ~ 186C for C0+, 16 bit map patterns and wake packet detection */ +#define ALX_WOL_CTRL2 0x1830 +#define ALX_WOL_CTRL2_DATA_STORE BIT(3) +#define ALX_WOL_CTRL2_PTRN_EVT BIT(2) +#define ALX_WOL_CTRL2_PME_PTRN_EN BIT(1) +#define ALX_WOL_CTRL2_PTRN_EN BIT(0) + +#define ALX_WOL_CTRL3 0x1834 +#define ALX_WOL_CTRL3_PTRN_ADDR_MASK 0xFFFFF +#define ALX_WOL_CTRL3_PTRN_ADDR_SHIFT 0 + +#define ALX_WOL_CTRL4 0x1838 +#define ALX_WOL_CTRL4_PT15_MATCH BIT(31) +#define ALX_WOL_CTRL4_PT14_MATCH BIT(30) +#define ALX_WOL_CTRL4_PT13_MATCH BIT(29) +#define ALX_WOL_CTRL4_PT12_MATCH BIT(28) +#define ALX_WOL_CTRL4_PT11_MATCH BIT(27) +#define ALX_WOL_CTRL4_PT10_MATCH BIT(26) +#define ALX_WOL_CTRL4_PT9_MATCH BIT(25) +#define ALX_WOL_CTRL4_PT8_MATCH BIT(24) +#define ALX_WOL_CTRL4_PT7_MATCH BIT(23) +#define ALX_WOL_CTRL4_PT6_MATCH BIT(22) +#define ALX_WOL_CTRL4_PT5_MATCH BIT(21) +#define ALX_WOL_CTRL4_PT4_MATCH BIT(20) +#define ALX_WOL_CTRL4_PT3_MATCH BIT(19) +#define ALX_WOL_CTRL4_PT2_MATCH BIT(18) +#define ALX_WOL_CTRL4_PT1_MATCH BIT(17) +#define ALX_WOL_CTRL4_PT0_MATCH BIT(16) +#define ALX_WOL_CTRL4_PT15_EN BIT(15) +#define ALX_WOL_CTRL4_PT14_EN BIT(14) +#define ALX_WOL_CTRL4_PT13_EN BIT(13) +#define ALX_WOL_CTRL4_PT12_EN BIT(12) +#define ALX_WOL_CTRL4_PT11_EN BIT(11) +#define ALX_WOL_CTRL4_PT10_EN BIT(10) +#define ALX_WOL_CTRL4_PT9_EN BIT(9) +#define ALX_WOL_CTRL4_PT8_EN BIT(8) +#define ALX_WOL_CTRL4_PT7_EN BIT(7) +#define ALX_WOL_CTRL4_PT6_EN BIT(6) +#define ALX_WOL_CTRL4_PT5_EN BIT(5) +#define ALX_WOL_CTRL4_PT4_EN BIT(4) +#define ALX_WOL_CTRL4_PT3_EN BIT(3) +#define ALX_WOL_CTRL4_PT2_EN BIT(2) +#define ALX_WOL_CTRL4_PT1_EN BIT(1) +#define ALX_WOL_CTRL4_PT0_EN BIT(0) + +#define ALX_WOL_CTRL5 0x183C +#define ALX_WOL_CTRL5_PT3_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT3_LEN_SHIFT 24 +#define ALX_WOL_CTRL5_PT2_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT2_LEN_SHIFT 16 +#define ALX_WOL_CTRL5_PT1_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT1_LEN_SHIFT 8 +#define ALX_WOL_CTRL5_PT0_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT0_LEN_SHIFT 0 + +#define ALX_WOL_CTRL6 0x1840 +#define ALX_WOL_CTRL5_PT7_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT7_LEN_SHIFT 24 +#define ALX_WOL_CTRL5_PT6_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT6_LEN_SHIFT 16 +#define ALX_WOL_CTRL5_PT5_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT5_LEN_SHIFT 8 +#define ALX_WOL_CTRL5_PT4_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT4_LEN_SHIFT 0 + +#define ALX_WOL_CTRL7 0x1844 +#define ALX_WOL_CTRL5_PT11_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT11_LEN_SHIFT 24 +#define ALX_WOL_CTRL5_PT10_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT10_LEN_SHIFT 16 +#define ALX_WOL_CTRL5_PT9_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT9_LEN_SHIFT 8 +#define ALX_WOL_CTRL5_PT8_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT8_LEN_SHIFT 0 + +#define ALX_WOL_CTRL8 0x1848 +#define ALX_WOL_CTRL5_PT15_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT15_LEN_SHIFT 24 +#define ALX_WOL_CTRL5_PT14_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT14_LEN_SHIFT 16 +#define ALX_WOL_CTRL5_PT13_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT13_LEN_SHIFT 8 +#define ALX_WOL_CTRL5_PT12_LEN_MASK 0xFF +#define ALX_WOL_CTRL5_PT12_LEN_SHIFT 0 + +#define ALX_ACER_FIXED_PTN0 0x1850 +#define ALX_ACER_FIXED_PTN0_MASK 0xFFFFFFFF +#define ALX_ACER_FIXED_PTN0_SHIFT 0 + +#define ALX_ACER_FIXED_PTN1 0x1854 +#define ALX_ACER_FIXED_PTN1_MASK 0xFFFF +#define ALX_ACER_FIXED_PTN1_SHIFT 0 + +#define ALX_ACER_RANDOM_NUM0 0x1858 +#define ALX_ACER_RANDOM_NUM0_MASK 0xFFFFFFFF +#define ALX_ACER_RANDOM_NUM0_SHIFT 0 + +#define ALX_ACER_RANDOM_NUM1 0x185C +#define ALX_ACER_RANDOM_NUM1_MASK 0xFFFFFFFF +#define ALX_ACER_RANDOM_NUM1_SHIFT 0 + +#define ALX_ACER_RANDOM_NUM2 0x1860 +#define ALX_ACER_RANDOM_NUM2_MASK 0xFFFFFFFF +#define ALX_ACER_RANDOM_NUM2_SHIFT 0 + +#define ALX_ACER_RANDOM_NUM3 0x1864 +#define ALX_ACER_RANDOM_NUM3_MASK 0xFFFFFFFF +#define ALX_ACER_RANDOM_NUM3_SHIFT 0 + +#define ALX_ACER_MAGIC 0x1868 +#define ALX_ACER_MAGIC_EN BIT(31) +#define ALX_ACER_MAGIC_PME_EN BIT(30) +#define ALX_ACER_MAGIC_MATCH BIT(29) +#define ALX_ACER_MAGIC_FF_CHECK BIT(10) +#define ALX_ACER_MAGIC_RAN_LEN_MASK 0x1F +#define ALX_ACER_MAGIC_RAN_LEN_SHIFT 5 +#define ALX_ACER_MAGIC_FIX_LEN_MASK 0x1F +#define ALX_ACER_MAGIC_FIX_LEN_SHIFT 0 + +#define ALX_ACER_TIMER 0x186C +#define ALX_ACER_TIMER_EN BIT(31) +#define ALX_ACER_TIMER_PME_EN BIT(30) +#define ALX_ACER_TIMER_MATCH BIT(29) +#define ALX_ACER_TIMER_THRES_MASK 0x1FFFF +#define ALX_ACER_TIMER_THRES_SHIFT 0 +#define ALX_ACER_TIMER_THRES_DEF 1 + +/* RSS definitions */ +#define ALX_RSS_KEY0 0x14B0 +#define ALX_RSS_KEY1 0x14B4 +#define ALX_RSS_KEY2 0x14B8 +#define ALX_RSS_KEY3 0x14BC +#define ALX_RSS_KEY4 0x14C0 +#define ALX_RSS_KEY5 0x14C4 +#define ALX_RSS_KEY6 0x14C8 +#define ALX_RSS_KEY7 0x14CC +#define ALX_RSS_KEY8 0x14D0 +#define ALX_RSS_KEY9 0x14D4 + +#define ALX_RSS_IDT_TBL0 0x1B00 + +#define ALX_MSI_MAP_TBL1 0x15D0 +#define ALX_MSI_MAP_TBL1_TXQ1_SHIFT 20 +#define ALX_MSI_MAP_TBL1_TXQ0_SHIFT 16 +#define ALX_MSI_MAP_TBL1_RXQ3_SHIFT 12 +#define ALX_MSI_MAP_TBL1_RXQ2_SHIFT 8 +#define ALX_MSI_MAP_TBL1_RXQ1_SHIFT 4 +#define ALX_MSI_MAP_TBL1_RXQ0_SHIFT 0 + +#define ALX_MSI_MAP_TBL2 0x15D8 +#define ALX_MSI_MAP_TBL2_TXQ3_SHIFT 20 +#define ALX_MSI_MAP_TBL2_TXQ2_SHIFT 16 +#define ALX_MSI_MAP_TBL2_RXQ7_SHIFT 12 +#define ALX_MSI_MAP_TBL2_RXQ6_SHIFT 8 +#define ALX_MSI_MAP_TBL2_RXQ5_SHIFT 4 +#define ALX_MSI_MAP_TBL2_RXQ4_SHIFT 0 + +#define ALX_MSI_ID_MAP 0x15D4 + +#define ALX_MSI_RETRANS_TIMER 0x1920 +/* bit16: 1:line,0:standard */ +#define ALX_MSI_MASK_SEL_LINE BIT(16) +#define ALX_MSI_RETRANS_TM_MASK 0xFFFF +#define ALX_MSI_RETRANS_TM_SHIFT 0 + +/* CR DMA ctrl */ + +/* TX QoS */ +#define ALX_WRR 0x1938 +#define ALX_WRR_PRI_MASK 0x3 +#define ALX_WRR_PRI_SHIFT 29 +#define ALX_WRR_PRI_RESTRICT_NONE 3 +#define ALX_WRR_PRI3_MASK 0x1F +#define ALX_WRR_PRI3_SHIFT 24 +#define ALX_WRR_PRI2_MASK 0x1F +#define ALX_WRR_PRI2_SHIFT 16 +#define ALX_WRR_PRI1_MASK 0x1F +#define ALX_WRR_PRI1_SHIFT 8 +#define ALX_WRR_PRI0_MASK 0x1F +#define ALX_WRR_PRI0_SHIFT 0 + +#define ALX_HQTPD 0x193C +#define ALX_HQTPD_BURST_EN BIT(31) +#define ALX_HQTPD_Q3_NUMPREF_MASK 0xF +#define ALX_HQTPD_Q3_NUMPREF_SHIFT 8 +#define ALX_HQTPD_Q2_NUMPREF_MASK 0xF +#define ALX_HQTPD_Q2_NUMPREF_SHIFT 4 +#define ALX_HQTPD_Q1_NUMPREF_MASK 0xF +#define ALX_HQTPD_Q1_NUMPREF_SHIFT 0 + +#define ALX_MISC 0x19C0 +#define ALX_MISC_PSW_OCP_MASK 0x7 +#define ALX_MISC_PSW_OCP_SHIFT 21 +#define ALX_MISC_PSW_OCP_DEF 0x7 +#define ALX_MISC_ISO_EN BIT(12) +#define ALX_MISC_INTNLOSC_OPEN BIT(3) + +#define ALX_MSIC2 0x19C8 +#define ALX_MSIC2_CALB_START BIT(0) + +#define ALX_MISC3 0x19CC +/* bit1: 1:Software control 25M */ +#define ALX_MISC3_25M_BY_SW BIT(1) +/* bit0: 25M switch to intnl OSC */ +#define ALX_MISC3_25M_NOTO_INTNL BIT(0) + +/* MSIX tbl in memory space */ +#define ALX_MSIX_ENTRY_BASE 0x2000 + +/********************* PHY regs definition ***************************/ + +/* PHY Specific Status Register */ +#define ALX_MII_GIGA_PSSR 0x11 +#define ALX_GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800 +#define ALX_GIGA_PSSR_DPLX 0x2000 +#define ALX_GIGA_PSSR_SPEED 0xC000 +#define ALX_GIGA_PSSR_10MBS 0x0000 +#define ALX_GIGA_PSSR_100MBS 0x4000 +#define ALX_GIGA_PSSR_1000MBS 0x8000 + +/* PHY Interrupt Enable Register */ +#define ALX_MII_IER 0x12 +#define ALX_IER_LINK_UP 0x0400 +#define ALX_IER_LINK_DOWN 0x0800 + +/* PHY Interrupt Status Register */ +#define ALX_MII_ISR 0x13 + +#define ALX_MII_DBG_ADDR 0x1D +#define ALX_MII_DBG_DATA 0x1E + +/***************************** debug port *************************************/ + +#define ALX_MIIDBG_ANACTRL 0x00 +#define ALX_ANACTRL_DEF 0x02EF + +#define ALX_MIIDBG_SYSMODCTRL 0x04 +/* en half bias */ +#define ALX_SYSMODCTRL_IECHOADJ_DEF 0xBB8B + +#define ALX_MIIDBG_SRDSYSMOD 0x05 +#define ALX_SRDSYSMOD_DEEMP_EN 0x0040 +#define ALX_SRDSYSMOD_DEF 0x2C46 + +#define ALX_MIIDBG_HIBNEG 0x0B +#define ALX_HIBNEG_PSHIB_EN 0x8000 +#define ALX_HIBNEG_HIB_PSE 0x1000 +#define ALX_HIBNEG_DEF 0xBC40 +#define ALX_HIBNEG_NOHIB (ALX_HIBNEG_DEF & \ + ~(ALX_HIBNEG_PSHIB_EN | ALX_HIBNEG_HIB_PSE)) + +#define ALX_MIIDBG_TST10BTCFG 0x12 +#define ALX_TST10BTCFG_DEF 0x4C04 + +#define ALX_MIIDBG_AZ_ANADECT 0x15 +#define ALX_AZ_ANADECT_DEF 0x3220 +#define ALX_AZ_ANADECT_LONG 0x3210 + +#define ALX_MIIDBG_MSE16DB 0x18 +#define ALX_MSE16DB_UP 0x05EA +#define ALX_MSE16DB_DOWN 0x02EA + +#define ALX_MIIDBG_MSE20DB 0x1C +#define ALX_MSE20DB_TH_MASK 0x7F +#define ALX_MSE20DB_TH_SHIFT 2 +#define ALX_MSE20DB_TH_DEF 0x2E +#define ALX_MSE20DB_TH_HI 0x54 + +#define ALX_MIIDBG_AGC 0x23 +#define ALX_AGC_2_VGA_MASK 0x3FU +#define ALX_AGC_2_VGA_SHIFT 8 +#define ALX_AGC_LONG1G_LIMT 40 +#define ALX_AGC_LONG100M_LIMT 44 + +#define ALX_MIIDBG_LEGCYPS 0x29 +#define ALX_LEGCYPS_EN 0x8000 +#define ALX_LEGCYPS_DEF 0x129D + +#define ALX_MIIDBG_TST100BTCFG 0x36 +#define ALX_TST100BTCFG_DEF 0xE12C + +#define ALX_MIIDBG_GREENCFG 0x3B +#define ALX_GREENCFG_DEF 0x7078 + +#define ALX_MIIDBG_GREENCFG2 0x3D +#define ALX_GREENCFG2_BP_GREEN 0x8000 +#define ALX_GREENCFG2_GATE_DFSE_EN 0x0080 + +/******* dev 3 *********/ +#define ALX_MIIEXT_PCS 3 + +#define ALX_MIIEXT_CLDCTRL3 0x8003 +#define ALX_CLDCTRL3_BP_CABLE1TH_DET_GT 0x8000 + +#define ALX_MIIEXT_CLDCTRL5 0x8005 +#define ALX_CLDCTRL5_BP_VD_HLFBIAS 0x4000 + +#define ALX_MIIEXT_CLDCTRL6 0x8006 +#define ALX_CLDCTRL6_CAB_LEN_MASK 0xFF +#define ALX_CLDCTRL6_CAB_LEN_SHIFT 0 +#define ALX_CLDCTRL6_CAB_LEN_SHORT1G 116 +#define ALX_CLDCTRL6_CAB_LEN_SHORT100M 152 + +#define ALX_MIIEXT_VDRVBIAS 0x8062 +#define ALX_VDRVBIAS_DEF 0x3 + +/********* dev 7 **********/ +#define ALX_MIIEXT_ANEG 7 + +#define ALX_MIIEXT_LOCAL_EEEADV 0x3C +#define ALX_LOCAL_EEEADV_1000BT 0x0004 +#define ALX_LOCAL_EEEADV_100BT 0x0002 + +#define ALX_MIIEXT_AFE 0x801A +#define ALX_AFE_10BT_100M_TH 0x0040 + +#define ALX_MIIEXT_S3DIG10 0x8023 +/* bit0: 1:bypass 10BT rx fifo, 0:original 10BT rx */ +#define ALX_MIIEXT_S3DIG10_SL 0x0001 +#define ALX_MIIEXT_S3DIG10_DEF 0 + +#define ALX_MIIEXT_NLP78 0x8027 +#define ALX_MIIEXT_NLP78_120M_DEF 0x8A05 + +#endif diff --git a/drivers/net/ethernet/atheros/atl1c/Makefile b/drivers/net/ethernet/atheros/atl1c/Makefile new file mode 100644 index 000000000..c37d96695 --- /dev/null +++ b/drivers/net/ethernet/atheros/atl1c/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_ATL1C) += atl1c.o +atl1c-objs := atl1c_main.o atl1c_hw.o atl1c_ethtool.o diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h new file mode 100644 index 000000000..b9203d928 --- /dev/null +++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h @@ -0,0 +1,605 @@ +/* + * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _ATL1C_H_ +#define _ATL1C_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "atl1c_hw.h" + +/* Wake Up Filter Control */ +#define AT_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define AT_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define AT_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define AT_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */ +#define AT_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +#define AT_VLAN_TO_TAG(_vlan, _tag) \ + _tag = ((((_vlan) >> 8) & 0xFF) |\ + (((_vlan) & 0xFF) << 8)) + +#define AT_TAG_TO_VLAN(_tag, _vlan) \ + _vlan = ((((_tag) >> 8) & 0xFF) |\ + (((_tag) & 0xFF) << 8)) + +#define SPEED_0 0xffff +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +#define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN) +#define MAX_JUMBO_FRAME_SIZE (6*1024) + +#define AT_MAX_RECEIVE_QUEUE 4 +#define AT_DEF_RECEIVE_QUEUE 1 +#define AT_MAX_TRANSMIT_QUEUE 2 + +#define AT_DMA_HI_ADDR_MASK 0xffffffff00000000ULL +#define AT_DMA_LO_ADDR_MASK 0x00000000ffffffffULL + +#define AT_TX_WATCHDOG (5 * HZ) +#define AT_MAX_INT_WORK 5 +#define AT_TWSI_EEPROM_TIMEOUT 100 +#define AT_HW_MAX_IDLE_DELAY 10 +#define AT_SUSPEND_LINK_TIMEOUT 100 + +#define AT_ASPM_L0S_TIMER 6 +#define AT_ASPM_L1_TIMER 12 +#define AT_LCKDET_TIMER 12 + +#define ATL1C_PCIE_L0S_L1_DISABLE 0x01 +#define ATL1C_PCIE_PHY_RESET 0x02 + +#define ATL1C_ASPM_L0s_ENABLE 0x0001 +#define ATL1C_ASPM_L1_ENABLE 0x0002 + +#define AT_REGS_LEN (74 * sizeof(u32)) +#define AT_EEPROM_LEN 512 + +#define ATL1C_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i])) +#define ATL1C_RFD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_rx_free_desc) +#define ATL1C_TPD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_tpd_desc) +#define ATL1C_RRD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_recv_ret_status) + +/* tpd word 1 bit 0:7 General Checksum task offload */ +#define TPD_L4HDR_OFFSET_MASK 0x00FF +#define TPD_L4HDR_OFFSET_SHIFT 0 + +/* tpd word 1 bit 0:7 Large Send task offload (IPv4/IPV6) */ +#define TPD_TCPHDR_OFFSET_MASK 0x00FF +#define TPD_TCPHDR_OFFSET_SHIFT 0 + +/* tpd word 1 bit 0:7 Custom Checksum task offload */ +#define TPD_PLOADOFFSET_MASK 0x00FF +#define TPD_PLOADOFFSET_SHIFT 0 + +/* tpd word 1 bit 8:17 */ +#define TPD_CCSUM_EN_MASK 0x0001 +#define TPD_CCSUM_EN_SHIFT 8 +#define TPD_IP_CSUM_MASK 0x0001 +#define TPD_IP_CSUM_SHIFT 9 +#define TPD_TCP_CSUM_MASK 0x0001 +#define TPD_TCP_CSUM_SHIFT 10 +#define TPD_UDP_CSUM_MASK 0x0001 +#define TPD_UDP_CSUM_SHIFT 11 +#define TPD_LSO_EN_MASK 0x0001 /* TCP Large Send Offload */ +#define TPD_LSO_EN_SHIFT 12 +#define TPD_LSO_VER_MASK 0x0001 +#define TPD_LSO_VER_SHIFT 13 /* 0 : ipv4; 1 : ipv4/ipv6 */ +#define TPD_CON_VTAG_MASK 0x0001 +#define TPD_CON_VTAG_SHIFT 14 +#define TPD_INS_VTAG_MASK 0x0001 +#define TPD_INS_VTAG_SHIFT 15 +#define TPD_IPV4_PACKET_MASK 0x0001 /* valid when LSO VER is 1 */ +#define TPD_IPV4_PACKET_SHIFT 16 +#define TPD_ETH_TYPE_MASK 0x0001 +#define TPD_ETH_TYPE_SHIFT 17 /* 0 : 802.3 frame; 1 : Ethernet */ + +/* tpd word 18:25 Custom Checksum task offload */ +#define TPD_CCSUM_OFFSET_MASK 0x00FF +#define TPD_CCSUM_OFFSET_SHIFT 18 +#define TPD_CCSUM_EPAD_MASK 0x0001 +#define TPD_CCSUM_EPAD_SHIFT 30 + +/* tpd word 18:30 Large Send task offload (IPv4/IPV6) */ +#define TPD_MSS_MASK 0x1FFF +#define TPD_MSS_SHIFT 18 + +#define TPD_EOP_MASK 0x0001 +#define TPD_EOP_SHIFT 31 + +struct atl1c_tpd_desc { + __le16 buffer_len; /* include 4-byte CRC */ + __le16 vlan_tag; + __le32 word1; + __le64 buffer_addr; +}; + +struct atl1c_tpd_ext_desc { + u32 reservd_0; + __le32 word1; + __le32 pkt_len; + u32 reservd_1; +}; +/* rrs word 0 bit 0:31 */ +#define RRS_RX_CSUM_MASK 0xFFFF +#define RRS_RX_CSUM_SHIFT 0 +#define RRS_RX_RFD_CNT_MASK 0x000F +#define RRS_RX_RFD_CNT_SHIFT 16 +#define RRS_RX_RFD_INDEX_MASK 0x0FFF +#define RRS_RX_RFD_INDEX_SHIFT 20 + +/* rrs flag bit 0:16 */ +#define RRS_HEAD_LEN_MASK 0x00FF +#define RRS_HEAD_LEN_SHIFT 0 +#define RRS_HDS_TYPE_MASK 0x0003 +#define RRS_HDS_TYPE_SHIFT 8 +#define RRS_CPU_NUM_MASK 0x0003 +#define RRS_CPU_NUM_SHIFT 10 +#define RRS_HASH_FLG_MASK 0x000F +#define RRS_HASH_FLG_SHIFT 12 + +#define RRS_HDS_TYPE_HEAD 1 +#define RRS_HDS_TYPE_DATA 2 + +#define RRS_IS_NO_HDS_TYPE(flag) \ + ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == 0) + +#define RRS_IS_HDS_HEAD(flag) \ + ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \ + RRS_HDS_TYPE_HEAD) + +#define RRS_IS_HDS_DATA(flag) \ + ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \ + RRS_HDS_TYPE_DATA) + +/* rrs word 3 bit 0:31 */ +#define RRS_PKT_SIZE_MASK 0x3FFF +#define RRS_PKT_SIZE_SHIFT 0 +#define RRS_ERR_L4_CSUM_MASK 0x0001 +#define RRS_ERR_L4_CSUM_SHIFT 14 +#define RRS_ERR_IP_CSUM_MASK 0x0001 +#define RRS_ERR_IP_CSUM_SHIFT 15 +#define RRS_VLAN_INS_MASK 0x0001 +#define RRS_VLAN_INS_SHIFT 16 +#define RRS_PROT_ID_MASK 0x0007 +#define RRS_PROT_ID_SHIFT 17 +#define RRS_RX_ERR_SUM_MASK 0x0001 +#define RRS_RX_ERR_SUM_SHIFT 20 +#define RRS_RX_ERR_CRC_MASK 0x0001 +#define RRS_RX_ERR_CRC_SHIFT 21 +#define RRS_RX_ERR_FAE_MASK 0x0001 +#define RRS_RX_ERR_FAE_SHIFT 22 +#define RRS_RX_ERR_TRUNC_MASK 0x0001 +#define RRS_RX_ERR_TRUNC_SHIFT 23 +#define RRS_RX_ERR_RUNC_MASK 0x0001 +#define RRS_RX_ERR_RUNC_SHIFT 24 +#define RRS_RX_ERR_ICMP_MASK 0x0001 +#define RRS_RX_ERR_ICMP_SHIFT 25 +#define RRS_PACKET_BCAST_MASK 0x0001 +#define RRS_PACKET_BCAST_SHIFT 26 +#define RRS_PACKET_MCAST_MASK 0x0001 +#define RRS_PACKET_MCAST_SHIFT 27 +#define RRS_PACKET_TYPE_MASK 0x0001 +#define RRS_PACKET_TYPE_SHIFT 28 +#define RRS_FIFO_FULL_MASK 0x0001 +#define RRS_FIFO_FULL_SHIFT 29 +#define RRS_802_3_LEN_ERR_MASK 0x0001 +#define RRS_802_3_LEN_ERR_SHIFT 30 +#define RRS_RXD_UPDATED_MASK 0x0001 +#define RRS_RXD_UPDATED_SHIFT 31 + +#define RRS_ERR_L4_CSUM 0x00004000 +#define RRS_ERR_IP_CSUM 0x00008000 +#define RRS_VLAN_INS 0x00010000 +#define RRS_RX_ERR_SUM 0x00100000 +#define RRS_RX_ERR_CRC 0x00200000 +#define RRS_802_3_LEN_ERR 0x40000000 +#define RRS_RXD_UPDATED 0x80000000 + +#define RRS_PACKET_TYPE_802_3 1 +#define RRS_PACKET_TYPE_ETH 0 +#define RRS_PACKET_IS_ETH(word) \ + ((((word) >> RRS_PACKET_TYPE_SHIFT) & RRS_PACKET_TYPE_MASK) == \ + RRS_PACKET_TYPE_ETH) +#define RRS_RXD_IS_VALID(word) \ + ((((word) >> RRS_RXD_UPDATED_SHIFT) & RRS_RXD_UPDATED_MASK) == 1) + +#define RRS_PACKET_PROT_IS_IPV4_ONLY(word) \ + ((((word) >> RRS_PROT_ID_SHIFT) & RRS_PROT_ID_MASK) == 1) +#define RRS_PACKET_PROT_IS_IPV6_ONLY(word) \ + ((((word) >> RRS_PROT_ID_SHIFT) & RRS_PROT_ID_MASK) == 6) + +struct atl1c_recv_ret_status { + __le32 word0; + __le32 rss_hash; + __le16 vlan_tag; + __le16 flag; + __le32 word3; +}; + +/* RFD descriptor */ +struct atl1c_rx_free_desc { + __le64 buffer_addr; +}; + +/* DMA Order Settings */ +enum atl1c_dma_order { + atl1c_dma_ord_in = 1, + atl1c_dma_ord_enh = 2, + atl1c_dma_ord_out = 4 +}; + +enum atl1c_dma_rcb { + atl1c_rcb_64 = 0, + atl1c_rcb_128 = 1 +}; + +enum atl1c_mac_speed { + atl1c_mac_speed_0 = 0, + atl1c_mac_speed_10_100 = 1, + atl1c_mac_speed_1000 = 2 +}; + +enum atl1c_dma_req_block { + atl1c_dma_req_128 = 0, + atl1c_dma_req_256 = 1, + atl1c_dma_req_512 = 2, + atl1c_dma_req_1024 = 3, + atl1c_dma_req_2048 = 4, + atl1c_dma_req_4096 = 5 +}; + + +enum atl1c_nic_type { + athr_l1c = 0, + athr_l2c = 1, + athr_l2c_b, + athr_l2c_b2, + athr_l1d, + athr_l1d_2, +}; + +enum atl1c_trans_queue { + atl1c_trans_normal = 0, + atl1c_trans_high = 1 +}; + +struct atl1c_hw_stats { + /* rx */ + unsigned long rx_ok; /* The number of good packet received. */ + unsigned long rx_bcast; /* The number of good broadcast packet received. */ + unsigned long rx_mcast; /* The number of good multicast packet received. */ + unsigned long rx_pause; /* The number of Pause packet received. */ + unsigned long rx_ctrl; /* The number of Control packet received other than Pause frame. */ + unsigned long rx_fcs_err; /* The number of packets with bad FCS. */ + unsigned long rx_len_err; /* The number of packets with mismatch of length field and actual size. */ + unsigned long rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */ + unsigned long rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */ + unsigned long rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */ + unsigned long rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */ + unsigned long rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */ + unsigned long rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */ + unsigned long rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */ + unsigned long rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */ + unsigned long rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */ + unsigned long rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */ + unsigned long rx_sz_ov; /* The number of good and bad packets received that are more than MTU size truncated by Selene. */ + unsigned long rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */ + unsigned long rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */ + unsigned long rx_align_err; /* Alignment Error */ + unsigned long rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */ + unsigned long rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */ + unsigned long rx_err_addr; /* The number of packets dropped due to address filtering. */ + + /* tx */ + unsigned long tx_ok; /* The number of good packet transmitted. */ + unsigned long tx_bcast; /* The number of good broadcast packet transmitted. */ + unsigned long tx_mcast; /* The number of good multicast packet transmitted. */ + unsigned long tx_pause; /* The number of Pause packet transmitted. */ + unsigned long tx_exc_defer; /* The number of packets transmitted with excessive deferral. */ + unsigned long tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */ + unsigned long tx_defer; /* The number of packets transmitted that is deferred. */ + unsigned long tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */ + unsigned long tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */ + unsigned long tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */ + unsigned long tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */ + unsigned long tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */ + unsigned long tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */ + unsigned long tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */ + unsigned long tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */ + unsigned long tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */ + unsigned long tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */ + unsigned long tx_late_col; /* The number of packets transmitted with late collisions. */ + unsigned long tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */ + unsigned long tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */ + unsigned long tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */ + unsigned long tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */ + unsigned long tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */ + unsigned long tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */ + unsigned long tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */ +}; + +struct atl1c_hw { + u8 __iomem *hw_addr; /* inner register address */ + struct atl1c_adapter *adapter; + enum atl1c_nic_type nic_type; + enum atl1c_dma_order dma_order; + enum atl1c_dma_rcb rcb_value; + enum atl1c_dma_req_block dmar_block; + + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + u16 phy_id1; + u16 phy_id2; + + u32 intr_mask; + + u8 preamble_len; + u16 max_frame_size; + u16 min_frame_size; + + enum atl1c_mac_speed mac_speed; + bool mac_duplex; + bool hibernate; + u16 media_type; +#define MEDIA_TYPE_AUTO_SENSOR 0 +#define MEDIA_TYPE_100M_FULL 1 +#define MEDIA_TYPE_100M_HALF 2 +#define MEDIA_TYPE_10M_FULL 3 +#define MEDIA_TYPE_10M_HALF 4 + + u16 autoneg_advertised; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + u16 tx_imt; /* TX Interrupt Moderator timer ( 2us resolution) */ + u16 rx_imt; /* RX Interrupt Moderator timer ( 2us resolution) */ + u16 ict; /* Interrupt Clear timer (2us resolution) */ + u16 ctrl_flags; +#define ATL1C_INTR_CLEAR_ON_READ 0x0001 +#define ATL1C_INTR_MODRT_ENABLE 0x0002 +#define ATL1C_CMB_ENABLE 0x0004 +#define ATL1C_SMB_ENABLE 0x0010 +#define ATL1C_TXQ_MODE_ENHANCE 0x0020 +#define ATL1C_RX_IPV6_CHKSUM 0x0040 +#define ATL1C_ASPM_L0S_SUPPORT 0x0080 +#define ATL1C_ASPM_L1_SUPPORT 0x0100 +#define ATL1C_ASPM_CTRL_MON 0x0200 +#define ATL1C_HIB_DISABLE 0x0400 +#define ATL1C_APS_MODE_ENABLE 0x0800 +#define ATL1C_LINK_EXT_SYNC 0x1000 +#define ATL1C_CLK_GATING_EN 0x2000 +#define ATL1C_FPGA_VERSION 0x8000 + u16 link_cap_flags; +#define ATL1C_LINK_CAP_1000M 0x0001 + u32 smb_timer; + + u16 rrd_thresh; /* Threshold of number of RRD produced to trigger + interrupt request */ + u16 tpd_thresh; + u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */ + u8 rfd_burst; + u32 base_cpu; + u32 indirect_tab; + u8 mac_addr[ETH_ALEN]; + u8 perm_mac_addr[ETH_ALEN]; + + bool phy_configured; + bool re_autoneg; + bool emi_ca; + bool msi_lnkpatch; /* link patch for specific platforms */ +}; + +/* + * atl1c_ring_header represents a single, contiguous block of DMA space + * mapped for the three descriptor rings (tpd, rfd, rrd) described below + */ +struct atl1c_ring_header { + void *desc; /* virtual address */ + dma_addr_t dma; /* physical address*/ + unsigned int size; /* length in bytes */ +}; + +/* + * atl1c_buffer is wrapper around a pointer to a socket buffer + * so a DMA handle can be stored along with the skb + */ +struct atl1c_buffer { + struct sk_buff *skb; /* socket buffer */ + u16 length; /* rx buffer length */ + u16 flags; /* information of buffer */ +#define ATL1C_BUFFER_FREE 0x0001 +#define ATL1C_BUFFER_BUSY 0x0002 +#define ATL1C_BUFFER_STATE_MASK 0x0003 + +#define ATL1C_PCIMAP_SINGLE 0x0004 +#define ATL1C_PCIMAP_PAGE 0x0008 +#define ATL1C_PCIMAP_TYPE_MASK 0x000C + +#define ATL1C_PCIMAP_TODEVICE 0x0010 +#define ATL1C_PCIMAP_FROMDEVICE 0x0020 +#define ATL1C_PCIMAP_DIRECTION_MASK 0x0030 + dma_addr_t dma; +}; + +#define ATL1C_SET_BUFFER_STATE(buff, state) do { \ + ((buff)->flags) &= ~ATL1C_BUFFER_STATE_MASK; \ + ((buff)->flags) |= (state); \ + } while (0) + +#define ATL1C_SET_PCIMAP_TYPE(buff, type, direction) do { \ + ((buff)->flags) &= ~ATL1C_PCIMAP_TYPE_MASK; \ + ((buff)->flags) |= (type); \ + ((buff)->flags) &= ~ATL1C_PCIMAP_DIRECTION_MASK; \ + ((buff)->flags) |= (direction); \ + } while (0) + +/* transimit packet descriptor (tpd) ring */ +struct atl1c_tpd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + u16 next_to_use; /* this is protectd by adapter->tx_lock */ + atomic_t next_to_clean; + struct atl1c_buffer *buffer_info; +}; + +/* receive free descriptor (rfd) ring */ +struct atl1c_rfd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + u16 next_to_use; + u16 next_to_clean; + struct atl1c_buffer *buffer_info; +}; + +/* receive return descriptor (rrd) ring */ +struct atl1c_rrd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + u16 next_to_use; + u16 next_to_clean; +}; + +/* board specific private data structure */ +struct atl1c_adapter { + struct net_device *netdev; + struct pci_dev *pdev; + struct napi_struct napi; + struct page *rx_page; + unsigned int rx_page_offset; + unsigned int rx_frag_size; + struct atl1c_hw hw; + struct atl1c_hw_stats hw_stats; + struct mii_if_info mii; /* MII interface info */ + u16 rx_buffer_len; + + unsigned long flags; +#define __AT_TESTING 0x0001 +#define __AT_RESETTING 0x0002 +#define __AT_DOWN 0x0003 + unsigned long work_event; +#define ATL1C_WORK_EVENT_RESET 0 +#define ATL1C_WORK_EVENT_LINK_CHANGE 1 + u32 msg_enable; + + bool have_msi; + u32 wol; + u16 link_speed; + u16 link_duplex; + + spinlock_t mdio_lock; + spinlock_t tx_lock; + atomic_t irq_sem; + + struct work_struct common_task; + struct timer_list watchdog_timer; + struct timer_list phy_config_timer; + + /* All Descriptor memory */ + struct atl1c_ring_header ring_header; + struct atl1c_tpd_ring tpd_ring[AT_MAX_TRANSMIT_QUEUE]; + struct atl1c_rfd_ring rfd_ring; + struct atl1c_rrd_ring rrd_ring; + u32 bd_number; /* board number;*/ +}; + +#define AT_WRITE_REG(a, reg, value) ( \ + writel((value), ((a)->hw_addr + reg))) + +#define AT_WRITE_FLUSH(a) (\ + readl((a)->hw_addr)) + +#define AT_READ_REG(a, reg, pdata) do { \ + if (unlikely((a)->hibernate)) { \ + readl((a)->hw_addr + reg); \ + *(u32 *)pdata = readl((a)->hw_addr + reg); \ + } else { \ + *(u32 *)pdata = readl((a)->hw_addr + reg); \ + } \ + } while (0) + +#define AT_WRITE_REGB(a, reg, value) (\ + writeb((value), ((a)->hw_addr + reg))) + +#define AT_READ_REGB(a, reg) (\ + readb((a)->hw_addr + reg)) + +#define AT_WRITE_REGW(a, reg, value) (\ + writew((value), ((a)->hw_addr + reg))) + +#define AT_READ_REGW(a, reg, pdata) do { \ + if (unlikely((a)->hibernate)) { \ + readw((a)->hw_addr + reg); \ + *(u16 *)pdata = readw((a)->hw_addr + reg); \ + } else { \ + *(u16 *)pdata = readw((a)->hw_addr + reg); \ + } \ + } while (0) + +#define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), (((a)->hw_addr + reg) + ((offset) << 2)))) + +#define AT_READ_REG_ARRAY(a, reg, offset) ( \ + readl(((a)->hw_addr + reg) + ((offset) << 2))) + +extern char atl1c_driver_name[]; +extern char atl1c_driver_version[]; + +void atl1c_reinit_locked(struct atl1c_adapter *adapter); +s32 atl1c_reset_hw(struct atl1c_hw *hw); +void atl1c_set_ethtool_ops(struct net_device *netdev); +#endif /* _ATL1C_H_ */ diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c new file mode 100644 index 000000000..48694c239 --- /dev/null +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c @@ -0,0 +1,309 @@ +/* + * Copyright(c) 2009 - 2009 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#include +#include +#include + +#include "atl1c.h" + +static int atl1c_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M) + ecmd->supported |= SUPPORTED_1000baseT_Full; + + ecmd->advertising = ADVERTISED_TP; + + ecmd->advertising |= hw->autoneg_advertised; + + ecmd->port = PORT_TP; + ecmd->phy_address = 0; + ecmd->transceiver = XCVR_INTERNAL; + + if (adapter->link_speed != SPEED_0) { + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + if (adapter->link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } + + ecmd->autoneg = AUTONEG_ENABLE; + return 0; +} + +static int atl1c_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + u16 autoneg_advertised; + + while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) + msleep(1); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + autoneg_advertised = ADVERTISED_Autoneg; + } else { + u32 speed = ethtool_cmd_speed(ecmd); + if (speed == SPEED_1000) { + if (ecmd->duplex != DUPLEX_FULL) { + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, + "1000M half is invalid\n"); + clear_bit(__AT_RESETTING, &adapter->flags); + return -EINVAL; + } + autoneg_advertised = ADVERTISED_1000baseT_Full; + } else if (speed == SPEED_100) { + if (ecmd->duplex == DUPLEX_FULL) + autoneg_advertised = ADVERTISED_100baseT_Full; + else + autoneg_advertised = ADVERTISED_100baseT_Half; + } else { + if (ecmd->duplex == DUPLEX_FULL) + autoneg_advertised = ADVERTISED_10baseT_Full; + else + autoneg_advertised = ADVERTISED_10baseT_Half; + } + } + + if (hw->autoneg_advertised != autoneg_advertised) { + hw->autoneg_advertised = autoneg_advertised; + if (atl1c_restart_autoneg(hw) != 0) { + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, + "ethtool speed/duplex setting failed\n"); + clear_bit(__AT_RESETTING, &adapter->flags); + return -EINVAL; + } + } + clear_bit(__AT_RESETTING, &adapter->flags); + return 0; +} + +static u32 atl1c_get_msglevel(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void atl1c_set_msglevel(struct net_device *netdev, u32 data) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int atl1c_get_regs_len(struct net_device *netdev) +{ + return AT_REGS_LEN; +} + +static void atl1c_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, AT_REGS_LEN); + + regs->version = 1; + AT_READ_REG(hw, REG_PM_CTRL, p++); + AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL, p++); + AT_READ_REG(hw, REG_TWSI_CTRL, p++); + AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL, p++); + AT_READ_REG(hw, REG_MASTER_CTRL, p++); + AT_READ_REG(hw, REG_MANUAL_TIMER_INIT, p++); + AT_READ_REG(hw, REG_IRQ_MODRT_TIMER_INIT, p++); + AT_READ_REG(hw, REG_GPHY_CTRL, p++); + AT_READ_REG(hw, REG_LINK_CTRL, p++); + AT_READ_REG(hw, REG_IDLE_STATUS, p++); + AT_READ_REG(hw, REG_MDIO_CTRL, p++); + AT_READ_REG(hw, REG_SERDES, p++); + AT_READ_REG(hw, REG_MAC_CTRL, p++); + AT_READ_REG(hw, REG_MAC_IPG_IFG, p++); + AT_READ_REG(hw, REG_MAC_STA_ADDR, p++); + AT_READ_REG(hw, REG_MAC_STA_ADDR+4, p++); + AT_READ_REG(hw, REG_RX_HASH_TABLE, p++); + AT_READ_REG(hw, REG_RX_HASH_TABLE+4, p++); + AT_READ_REG(hw, REG_RXQ_CTRL, p++); + AT_READ_REG(hw, REG_TXQ_CTRL, p++); + AT_READ_REG(hw, REG_MTU, p++); + AT_READ_REG(hw, REG_WOL_CTRL, p++); + + atl1c_read_phy_reg(hw, MII_BMCR, &phy_data); + regs_buff[AT_REGS_LEN/sizeof(u32) - 2] = (u32) phy_data; + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + regs_buff[AT_REGS_LEN/sizeof(u32) - 1] = (u32) phy_data; +} + +static int atl1c_get_eeprom_len(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + if (atl1c_check_eeprom_exist(&adapter->hw)) + return AT_EEPROM_LEN; + else + return 0; +} + +static int atl1c_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + u32 *eeprom_buff; + int first_dword, last_dword; + int ret_val = 0; + int i; + + if (eeprom->len == 0) + return -EINVAL; + + if (!atl1c_check_eeprom_exist(hw)) /* not exist */ + return -EINVAL; + + eeprom->magic = adapter->pdev->vendor | + (adapter->pdev->device << 16); + + first_dword = eeprom->offset >> 2; + last_dword = (eeprom->offset + eeprom->len - 1) >> 2; + + eeprom_buff = kmalloc(sizeof(u32) * + (last_dword - first_dword + 1), GFP_KERNEL); + if (eeprom_buff == NULL) + return -ENOMEM; + + for (i = first_dword; i < last_dword; i++) { + if (!atl1c_read_eeprom(hw, i * 4, &(eeprom_buff[i-first_dword]))) { + kfree(eeprom_buff); + return -EIO; + } + } + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; + return 0; +} + +static void atl1c_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, atl1c_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = 0; + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = atl1c_get_regs_len(netdev); + drvinfo->eedump_len = atl1c_get_eeprom_len(netdev); +} + +static void atl1c_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_MAGIC | WAKE_PHY; + wol->wolopts = 0; + + if (adapter->wol & AT_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & AT_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & AT_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & AT_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & AT_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | + WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)) + return -EOPNOTSUPP; + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= AT_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= AT_WUFC_LNKC; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int atl1c_nway_reset(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + atl1c_reinit_locked(adapter); + return 0; +} + +static const struct ethtool_ops atl1c_ethtool_ops = { + .get_settings = atl1c_get_settings, + .set_settings = atl1c_set_settings, + .get_drvinfo = atl1c_get_drvinfo, + .get_regs_len = atl1c_get_regs_len, + .get_regs = atl1c_get_regs, + .get_wol = atl1c_get_wol, + .set_wol = atl1c_set_wol, + .get_msglevel = atl1c_get_msglevel, + .set_msglevel = atl1c_set_msglevel, + .nway_reset = atl1c_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = atl1c_get_eeprom_len, + .get_eeprom = atl1c_get_eeprom, +}; + +void atl1c_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &atl1c_ethtool_ops; +} diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c new file mode 100644 index 000000000..a8b80c56a --- /dev/null +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c @@ -0,0 +1,863 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include +#include +#include + +#include "atl1c.h" + +/* + * check_eeprom_exist + * return 1 if eeprom exist + */ +int atl1c_check_eeprom_exist(struct atl1c_hw *hw) +{ + u32 data; + + AT_READ_REG(hw, REG_TWSI_DEBUG, &data); + if (data & TWSI_DEBUG_DEV_EXIST) + return 1; + + AT_READ_REG(hw, REG_MASTER_CTRL, &data); + if (data & MASTER_CTRL_OTP_SEL) + return 1; + return 0; +} + +void atl1c_hw_set_mac_addr(struct atl1c_hw *hw, u8 *mac_addr) +{ + u32 value; + /* + * 00-0B-6A-F6-00-DC + * 0: 6AF600DC 1: 000B + * low dword + */ + value = mac_addr[2] << 24 | + mac_addr[3] << 16 | + mac_addr[4] << 8 | + mac_addr[5]; + AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value); + /* hight dword */ + value = mac_addr[0] << 8 | + mac_addr[1]; + AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value); +} + +/* read mac address from hardware register */ +static bool atl1c_read_current_addr(struct atl1c_hw *hw, u8 *eth_addr) +{ + u32 addr[2]; + + AT_READ_REG(hw, REG_MAC_STA_ADDR, &addr[0]); + AT_READ_REG(hw, REG_MAC_STA_ADDR + 4, &addr[1]); + + *(u32 *) ð_addr[2] = htonl(addr[0]); + *(u16 *) ð_addr[0] = htons((u16)addr[1]); + + return is_valid_ether_addr(eth_addr); +} + +/* + * atl1c_get_permanent_address + * return 0 if get valid mac address, + */ +static int atl1c_get_permanent_address(struct atl1c_hw *hw) +{ + u32 i; + u32 otp_ctrl_data; + u32 twsi_ctrl_data; + u16 phy_data; + bool raise_vol = false; + + /* MAC-address from BIOS is the 1st priority */ + if (atl1c_read_current_addr(hw, hw->perm_mac_addr)) + return 0; + + /* init */ + AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); + if (atl1c_check_eeprom_exist(hw)) { + if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) { + /* Enable OTP CLK */ + if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) { + otp_ctrl_data |= OTP_CTRL_CLK_EN; + AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); + AT_WRITE_FLUSH(hw); + msleep(1); + } + } + /* raise voltage temporally for l2cb */ + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) { + atl1c_read_phy_dbg(hw, MIIDBG_ANACTRL, &phy_data); + phy_data &= ~ANACTRL_HB_EN; + atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, phy_data); + atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data); + phy_data |= VOLT_CTRL_SWLOWEST; + atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data); + udelay(20); + raise_vol = true; + } + + AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data); + twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART; + AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data); + for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) { + msleep(10); + AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data); + if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0) + break; + } + if (i >= AT_TWSI_EEPROM_TIMEOUT) + return -1; + } + /* Disable OTP_CLK */ + if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) { + otp_ctrl_data &= ~OTP_CTRL_CLK_EN; + AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); + msleep(1); + } + if (raise_vol) { + atl1c_read_phy_dbg(hw, MIIDBG_ANACTRL, &phy_data); + phy_data |= ANACTRL_HB_EN; + atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, phy_data); + atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data); + phy_data &= ~VOLT_CTRL_SWLOWEST; + atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data); + udelay(20); + } + + if (atl1c_read_current_addr(hw, hw->perm_mac_addr)) + return 0; + + return -1; +} + +bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value) +{ + int i; + bool ret = false; + u32 otp_ctrl_data; + u32 control; + u32 data; + + if (offset & 3) + return ret; /* address do not align */ + + AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); + if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) + AT_WRITE_REG(hw, REG_OTP_CTRL, + (otp_ctrl_data | OTP_CTRL_CLK_EN)); + + AT_WRITE_REG(hw, REG_EEPROM_DATA_LO, 0); + control = (offset & EEPROM_CTRL_ADDR_MASK) << EEPROM_CTRL_ADDR_SHIFT; + AT_WRITE_REG(hw, REG_EEPROM_CTRL, control); + + for (i = 0; i < 10; i++) { + udelay(100); + AT_READ_REG(hw, REG_EEPROM_CTRL, &control); + if (control & EEPROM_CTRL_RW) + break; + } + if (control & EEPROM_CTRL_RW) { + AT_READ_REG(hw, REG_EEPROM_CTRL, &data); + AT_READ_REG(hw, REG_EEPROM_DATA_LO, p_value); + data = data & 0xFFFF; + *p_value = swab32((data << 16) | (*p_value >> 16)); + ret = true; + } + if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) + AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); + + return ret; +} +/* + * Reads the adapter's MAC address from the EEPROM + * + * hw - Struct containing variables accessed by shared code + */ +int atl1c_read_mac_addr(struct atl1c_hw *hw) +{ + int err = 0; + + err = atl1c_get_permanent_address(hw); + if (err) + eth_random_addr(hw->perm_mac_addr); + + memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr)); + return err; +} + +/* + * atl1c_hash_mc_addr + * purpose + * set hash value for a multicast address + * hash calcu processing : + * 1. calcu 32bit CRC for multicast address + * 2. reverse crc with MSB to LSB + */ +u32 atl1c_hash_mc_addr(struct atl1c_hw *hw, u8 *mc_addr) +{ + u32 crc32; + u32 value = 0; + int i; + + crc32 = ether_crc_le(6, mc_addr); + for (i = 0; i < 32; i++) + value |= (((crc32 >> i) & 1) << (31 - i)); + + return value; +} + +/* + * Sets the bit in the multicast table corresponding to the hash value. + * hw - Struct containing variables accessed by shared code + * hash_value - Multicast address hash value + */ +void atl1c_hash_set(struct atl1c_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg; + u32 mta; + + /* + * The HASH Table is a register array of 2 32-bit registers. + * It is treated like an array of 64 bits. We want to set + * bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The register is determined by the + * upper bit of the hash value and the bit within that + * register are determined by the lower 5 bits of the value. + */ + hash_reg = (hash_value >> 31) & 0x1; + hash_bit = (hash_value >> 26) & 0x1F; + + mta = AT_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg); + + mta |= (1 << hash_bit); + + AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta); +} + +/* + * wait mdio module be idle + * return true: idle + * false: still busy + */ +bool atl1c_wait_mdio_idle(struct atl1c_hw *hw) +{ + u32 val; + int i; + + for (i = 0; i < MDIO_MAX_AC_TO; i++) { + AT_READ_REG(hw, REG_MDIO_CTRL, &val); + if (!(val & (MDIO_CTRL_BUSY | MDIO_CTRL_START))) + break; + udelay(10); + } + + return i != MDIO_MAX_AC_TO; +} + +void atl1c_stop_phy_polling(struct atl1c_hw *hw) +{ + if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION)) + return; + + AT_WRITE_REG(hw, REG_MDIO_CTRL, 0); + atl1c_wait_mdio_idle(hw); +} + +void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel) +{ + u32 val; + + if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION)) + return; + + val = MDIO_CTRL_SPRES_PRMBL | + FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | + FIELDX(MDIO_CTRL_REG, 1) | + MDIO_CTRL_START | + MDIO_CTRL_OP_READ; + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + atl1c_wait_mdio_idle(hw); + val |= MDIO_CTRL_AP_EN; + val &= ~MDIO_CTRL_START; + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + udelay(30); +} + + +/* + * atl1c_read_phy_core + * core function to read register in PHY via MDIO control regsiter. + * ext: extension register (see IEEE 802.3) + * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0) + * reg: reg to read + */ +int atl1c_read_phy_core(struct atl1c_hw *hw, bool ext, u8 dev, + u16 reg, u16 *phy_data) +{ + u32 val; + u16 clk_sel = MDIO_CTRL_CLK_25_4; + + atl1c_stop_phy_polling(hw); + + *phy_data = 0; + + /* only l2c_b2 & l1d_2 could use slow clock */ + if ((hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) && + hw->hibernate) + clk_sel = MDIO_CTRL_CLK_25_128; + if (ext) { + val = FIELDX(MDIO_EXTN_DEVAD, dev) | FIELDX(MDIO_EXTN_REG, reg); + AT_WRITE_REG(hw, REG_MDIO_EXTN, val); + val = MDIO_CTRL_SPRES_PRMBL | + FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | + MDIO_CTRL_START | + MDIO_CTRL_MODE_EXT | + MDIO_CTRL_OP_READ; + } else { + val = MDIO_CTRL_SPRES_PRMBL | + FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | + FIELDX(MDIO_CTRL_REG, reg) | + MDIO_CTRL_START | + MDIO_CTRL_OP_READ; + } + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + + if (!atl1c_wait_mdio_idle(hw)) + return -1; + + AT_READ_REG(hw, REG_MDIO_CTRL, &val); + *phy_data = (u16)FIELD_GETX(val, MDIO_CTRL_DATA); + + atl1c_start_phy_polling(hw, clk_sel); + + return 0; +} + +/* + * atl1c_write_phy_core + * core function to write to register in PHY via MDIO control register. + * ext: extension register (see IEEE 802.3) + * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0) + * reg: reg to write + */ +int atl1c_write_phy_core(struct atl1c_hw *hw, bool ext, u8 dev, + u16 reg, u16 phy_data) +{ + u32 val; + u16 clk_sel = MDIO_CTRL_CLK_25_4; + + atl1c_stop_phy_polling(hw); + + + /* only l2c_b2 & l1d_2 could use slow clock */ + if ((hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) && + hw->hibernate) + clk_sel = MDIO_CTRL_CLK_25_128; + + if (ext) { + val = FIELDX(MDIO_EXTN_DEVAD, dev) | FIELDX(MDIO_EXTN_REG, reg); + AT_WRITE_REG(hw, REG_MDIO_EXTN, val); + val = MDIO_CTRL_SPRES_PRMBL | + FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | + FIELDX(MDIO_CTRL_DATA, phy_data) | + MDIO_CTRL_START | + MDIO_CTRL_MODE_EXT; + } else { + val = MDIO_CTRL_SPRES_PRMBL | + FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | + FIELDX(MDIO_CTRL_DATA, phy_data) | + FIELDX(MDIO_CTRL_REG, reg) | + MDIO_CTRL_START; + } + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + + if (!atl1c_wait_mdio_idle(hw)) + return -1; + + atl1c_start_phy_polling(hw, clk_sel); + + return 0; +} + +/* + * Reads the value from a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to read + */ +int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data) +{ + return atl1c_read_phy_core(hw, false, 0, reg_addr, phy_data); +} + +/* + * Writes a value to a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to write + * data - data to write to the PHY + */ +int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data) +{ + return atl1c_write_phy_core(hw, false, 0, reg_addr, phy_data); +} + +/* read from PHY extension register */ +int atl1c_read_phy_ext(struct atl1c_hw *hw, u8 dev_addr, + u16 reg_addr, u16 *phy_data) +{ + return atl1c_read_phy_core(hw, true, dev_addr, reg_addr, phy_data); +} + +/* write to PHY extension register */ +int atl1c_write_phy_ext(struct atl1c_hw *hw, u8 dev_addr, + u16 reg_addr, u16 phy_data) +{ + return atl1c_write_phy_core(hw, true, dev_addr, reg_addr, phy_data); +} + +int atl1c_read_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data) +{ + int err; + + err = atl1c_write_phy_reg(hw, MII_DBG_ADDR, reg_addr); + if (unlikely(err)) + return err; + else + err = atl1c_read_phy_reg(hw, MII_DBG_DATA, phy_data); + + return err; +} + +int atl1c_write_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 phy_data) +{ + int err; + + err = atl1c_write_phy_reg(hw, MII_DBG_ADDR, reg_addr); + if (unlikely(err)) + return err; + else + err = atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data); + + return err; +} + +/* + * Configures PHY autoneg and flow control advertisement settings + * + * hw - Struct containing variables accessed by shared code + */ +static int atl1c_phy_setup_adv(struct atl1c_hw *hw) +{ + u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_ALL; + u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP & + ~GIGA_CR_1000T_SPEED_MASK; + + if (hw->autoneg_advertised & ADVERTISED_10baseT_Half) + mii_adv_data |= ADVERTISE_10HALF; + if (hw->autoneg_advertised & ADVERTISED_10baseT_Full) + mii_adv_data |= ADVERTISE_10FULL; + if (hw->autoneg_advertised & ADVERTISED_100baseT_Half) + mii_adv_data |= ADVERTISE_100HALF; + if (hw->autoneg_advertised & ADVERTISED_100baseT_Full) + mii_adv_data |= ADVERTISE_100FULL; + + if (hw->autoneg_advertised & ADVERTISED_Autoneg) + mii_adv_data |= ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL; + + if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M) { + if (hw->autoneg_advertised & ADVERTISED_1000baseT_Half) + mii_giga_ctrl_data |= ADVERTISE_1000HALF; + if (hw->autoneg_advertised & ADVERTISED_1000baseT_Full) + mii_giga_ctrl_data |= ADVERTISE_1000FULL; + if (hw->autoneg_advertised & ADVERTISED_Autoneg) + mii_giga_ctrl_data |= ADVERTISE_1000HALF | + ADVERTISE_1000FULL; + } + + if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 || + atl1c_write_phy_reg(hw, MII_CTRL1000, mii_giga_ctrl_data) != 0) + return -1; + return 0; +} + +void atl1c_phy_disable(struct atl1c_hw *hw) +{ + atl1c_power_saving(hw, 0); +} + + +int atl1c_phy_reset(struct atl1c_hw *hw) +{ + struct atl1c_adapter *adapter = hw->adapter; + struct pci_dev *pdev = adapter->pdev; + u16 phy_data; + u32 phy_ctrl_data, lpi_ctrl; + int err; + + /* reset PHY core */ + AT_READ_REG(hw, REG_GPHY_CTRL, &phy_ctrl_data); + phy_ctrl_data &= ~(GPHY_CTRL_EXT_RESET | GPHY_CTRL_PHY_IDDQ | + GPHY_CTRL_GATE_25M_EN | GPHY_CTRL_PWDOWN_HW | GPHY_CTRL_CLS); + phy_ctrl_data |= GPHY_CTRL_SEL_ANA_RST; + if (!(hw->ctrl_flags & ATL1C_HIB_DISABLE)) + phy_ctrl_data |= (GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE); + else + phy_ctrl_data &= ~(GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE); + AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data); + AT_WRITE_FLUSH(hw); + udelay(10); + AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data | GPHY_CTRL_EXT_RESET); + AT_WRITE_FLUSH(hw); + udelay(10 * GPHY_CTRL_EXT_RST_TO); /* delay 800us */ + + /* switch clock */ + if (hw->nic_type == athr_l2c_b) { + atl1c_read_phy_dbg(hw, MIIDBG_CFGLPSPD, &phy_data); + atl1c_write_phy_dbg(hw, MIIDBG_CFGLPSPD, + phy_data & ~CFGLPSPD_RSTCNT_CLK125SW); + } + + /* tx-half amplitude issue fix */ + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) { + atl1c_read_phy_dbg(hw, MIIDBG_CABLE1TH_DET, &phy_data); + phy_data |= CABLE1TH_DET_EN; + atl1c_write_phy_dbg(hw, MIIDBG_CABLE1TH_DET, phy_data); + } + + /* clear bit3 of dbgport 3B to lower voltage */ + if (!(hw->ctrl_flags & ATL1C_HIB_DISABLE)) { + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) { + atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data); + phy_data &= ~VOLT_CTRL_SWLOWEST; + atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data); + } + /* power saving config */ + phy_data = + hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2 ? + L1D_LEGCYPS_DEF : L1C_LEGCYPS_DEF; + atl1c_write_phy_dbg(hw, MIIDBG_LEGCYPS, phy_data); + /* hib */ + atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL, + SYSMODCTRL_IECHOADJ_DEF); + } else { + /* disable pws */ + atl1c_read_phy_dbg(hw, MIIDBG_LEGCYPS, &phy_data); + atl1c_write_phy_dbg(hw, MIIDBG_LEGCYPS, + phy_data & ~LEGCYPS_EN); + /* disable hibernate */ + atl1c_read_phy_dbg(hw, MIIDBG_HIBNEG, &phy_data); + atl1c_write_phy_dbg(hw, MIIDBG_HIBNEG, + phy_data & HIBNEG_PSHIB_EN); + } + /* disable AZ(EEE) by default */ + if (hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2 || + hw->nic_type == athr_l2c_b2) { + AT_READ_REG(hw, REG_LPI_CTRL, &lpi_ctrl); + AT_WRITE_REG(hw, REG_LPI_CTRL, lpi_ctrl & ~LPI_CTRL_EN); + atl1c_write_phy_ext(hw, MIIEXT_ANEG, MIIEXT_LOCAL_EEEADV, 0); + atl1c_write_phy_ext(hw, MIIEXT_PCS, MIIEXT_CLDCTRL3, + L2CB_CLDCTRL3); + } + + /* other debug port to set */ + atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, ANACTRL_DEF); + atl1c_write_phy_dbg(hw, MIIDBG_SRDSYSMOD, SRDSYSMOD_DEF); + atl1c_write_phy_dbg(hw, MIIDBG_TST10BTCFG, TST10BTCFG_DEF); + /* UNH-IOL test issue, set bit7 */ + atl1c_write_phy_dbg(hw, MIIDBG_TST100BTCFG, + TST100BTCFG_DEF | TST100BTCFG_LITCH_EN); + + /* set phy interrupt mask */ + phy_data = IER_LINK_UP | IER_LINK_DOWN; + err = atl1c_write_phy_reg(hw, MII_IER, phy_data); + if (err) { + if (netif_msg_hw(adapter)) + dev_err(&pdev->dev, + "Error enable PHY linkChange Interrupt\n"); + return err; + } + return 0; +} + +int atl1c_phy_init(struct atl1c_hw *hw) +{ + struct atl1c_adapter *adapter = hw->adapter; + struct pci_dev *pdev = adapter->pdev; + int ret_val; + u16 mii_bmcr_data = BMCR_RESET; + + if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) || + (atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) { + dev_err(&pdev->dev, "Error get phy ID\n"); + return -1; + } + switch (hw->media_type) { + case MEDIA_TYPE_AUTO_SENSOR: + ret_val = atl1c_phy_setup_adv(hw); + if (ret_val) { + if (netif_msg_link(adapter)) + dev_err(&pdev->dev, + "Error Setting up Auto-Negotiation\n"); + return ret_val; + } + mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART; + break; + case MEDIA_TYPE_100M_FULL: + mii_bmcr_data |= BMCR_SPEED100 | BMCR_FULLDPLX; + break; + case MEDIA_TYPE_100M_HALF: + mii_bmcr_data |= BMCR_SPEED100; + break; + case MEDIA_TYPE_10M_FULL: + mii_bmcr_data |= BMCR_FULLDPLX; + break; + case MEDIA_TYPE_10M_HALF: + break; + default: + if (netif_msg_link(adapter)) + dev_err(&pdev->dev, "Wrong Media type %d\n", + hw->media_type); + return -1; + } + + ret_val = atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data); + if (ret_val) + return ret_val; + hw->phy_configured = true; + + return 0; +} + +/* + * Detects the current speed and duplex settings of the hardware. + * + * hw - Struct containing variables accessed by shared code + * speed - Speed of the connection + * duplex - Duplex setting of the connection + */ +int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex) +{ + int err; + u16 phy_data; + + /* Read PHY Specific Status Register (17) */ + err = atl1c_read_phy_reg(hw, MII_GIGA_PSSR, &phy_data); + if (err) + return err; + + if (!(phy_data & GIGA_PSSR_SPD_DPLX_RESOLVED)) + return -1; + + switch (phy_data & GIGA_PSSR_SPEED) { + case GIGA_PSSR_1000MBS: + *speed = SPEED_1000; + break; + case GIGA_PSSR_100MBS: + *speed = SPEED_100; + break; + case GIGA_PSSR_10MBS: + *speed = SPEED_10; + break; + default: + return -1; + } + + if (phy_data & GIGA_PSSR_DPLX) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + return 0; +} + +/* select one link mode to get lower power consumption */ +int atl1c_phy_to_ps_link(struct atl1c_hw *hw) +{ + struct atl1c_adapter *adapter = hw->adapter; + struct pci_dev *pdev = adapter->pdev; + int ret = 0; + u16 autoneg_advertised = ADVERTISED_10baseT_Half; + u16 save_autoneg_advertised; + u16 phy_data; + u16 mii_lpa_data; + u16 speed = SPEED_0; + u16 duplex = FULL_DUPLEX; + int i; + + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + if (phy_data & BMSR_LSTATUS) { + atl1c_read_phy_reg(hw, MII_LPA, &mii_lpa_data); + if (mii_lpa_data & LPA_10FULL) + autoneg_advertised = ADVERTISED_10baseT_Full; + else if (mii_lpa_data & LPA_10HALF) + autoneg_advertised = ADVERTISED_10baseT_Half; + else if (mii_lpa_data & LPA_100HALF) + autoneg_advertised = ADVERTISED_100baseT_Half; + else if (mii_lpa_data & LPA_100FULL) + autoneg_advertised = ADVERTISED_100baseT_Full; + + save_autoneg_advertised = hw->autoneg_advertised; + hw->phy_configured = false; + hw->autoneg_advertised = autoneg_advertised; + if (atl1c_restart_autoneg(hw) != 0) { + dev_dbg(&pdev->dev, "phy autoneg failed\n"); + ret = -1; + } + hw->autoneg_advertised = save_autoneg_advertised; + + if (mii_lpa_data) { + for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) { + mdelay(100); + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + if (phy_data & BMSR_LSTATUS) { + if (atl1c_get_speed_and_duplex(hw, &speed, + &duplex) != 0) + dev_dbg(&pdev->dev, + "get speed and duplex failed\n"); + break; + } + } + } + } else { + speed = SPEED_10; + duplex = HALF_DUPLEX; + } + adapter->link_speed = speed; + adapter->link_duplex = duplex; + + return ret; +} + +int atl1c_restart_autoneg(struct atl1c_hw *hw) +{ + int err = 0; + u16 mii_bmcr_data = BMCR_RESET; + + err = atl1c_phy_setup_adv(hw); + if (err) + return err; + mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART; + + return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data); +} + +int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc) +{ + struct atl1c_adapter *adapter = hw->adapter; + struct pci_dev *pdev = adapter->pdev; + u32 master_ctrl, mac_ctrl, phy_ctrl; + u32 wol_ctrl, speed; + u16 phy_data; + + wol_ctrl = 0; + speed = adapter->link_speed == SPEED_1000 ? + MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100; + + AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl); + AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl); + AT_READ_REG(hw, REG_GPHY_CTRL, &phy_ctrl); + + master_ctrl &= ~MASTER_CTRL_CLK_SEL_DIS; + mac_ctrl = FIELD_SETX(mac_ctrl, MAC_CTRL_SPEED, speed); + mac_ctrl &= ~(MAC_CTRL_DUPLX | MAC_CTRL_RX_EN | MAC_CTRL_TX_EN); + if (adapter->link_duplex == FULL_DUPLEX) + mac_ctrl |= MAC_CTRL_DUPLX; + phy_ctrl &= ~(GPHY_CTRL_EXT_RESET | GPHY_CTRL_CLS); + phy_ctrl |= GPHY_CTRL_SEL_ANA_RST | GPHY_CTRL_HIB_PULSE | + GPHY_CTRL_HIB_EN; + if (!wufc) { /* without WoL */ + master_ctrl |= MASTER_CTRL_CLK_SEL_DIS; + phy_ctrl |= GPHY_CTRL_PHY_IDDQ | GPHY_CTRL_PWDOWN_HW; + AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl); + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl); + AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl); + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + hw->phy_configured = false; /* re-init PHY when resume */ + return 0; + } + phy_ctrl |= GPHY_CTRL_EXT_RESET; + if (wufc & AT_WUFC_MAG) { + mac_ctrl |= MAC_CTRL_RX_EN | MAC_CTRL_BC_EN; + wol_ctrl |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN; + if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V11) + wol_ctrl |= WOL_PATTERN_EN | WOL_PATTERN_PME_EN; + } + if (wufc & AT_WUFC_LNKC) { + wol_ctrl |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; + if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) { + dev_dbg(&pdev->dev, "%s: write phy MII_IER failed.\n", + atl1c_driver_name); + } + } + /* clear PHY interrupt */ + atl1c_read_phy_reg(hw, MII_ISR, &phy_data); + + dev_dbg(&pdev->dev, "%s: suspend MAC=%x,MASTER=%x,PHY=0x%x,WOL=%x\n", + atl1c_driver_name, mac_ctrl, master_ctrl, phy_ctrl, wol_ctrl); + AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl); + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl); + AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl); + AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl); + + return 0; +} + + +/* configure phy after Link change Event */ +void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed) +{ + u16 phy_val; + bool adj_thresh = false; + + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 || + hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2) + adj_thresh = true; + + if (link_speed != SPEED_0) { /* link up */ + /* az with brcm, half-amp */ + if (hw->nic_type == athr_l1d_2) { + atl1c_read_phy_ext(hw, MIIEXT_PCS, MIIEXT_CLDCTRL6, + &phy_val); + phy_val = FIELD_GETX(phy_val, CLDCTRL6_CAB_LEN); + phy_val = phy_val > CLDCTRL6_CAB_LEN_SHORT ? + AZ_ANADECT_LONG : AZ_ANADECT_DEF; + atl1c_write_phy_dbg(hw, MIIDBG_AZ_ANADECT, phy_val); + } + /* threshold adjust */ + if (adj_thresh && link_speed == SPEED_100 && hw->msi_lnkpatch) { + atl1c_write_phy_dbg(hw, MIIDBG_MSE16DB, L1D_MSE16DB_UP); + atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL, + L1D_SYSMODCTRL_IECHOADJ_DEF); + } + } else { /* link down */ + if (adj_thresh && hw->msi_lnkpatch) { + atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL, + SYSMODCTRL_IECHOADJ_DEF); + atl1c_write_phy_dbg(hw, MIIDBG_MSE16DB, + L1D_MSE16DB_DOWN); + } + } +} diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h new file mode 100644 index 000000000..21d8c4dbd --- /dev/null +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h @@ -0,0 +1,1024 @@ +/* + * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _ATL1C_HW_H_ +#define _ATL1C_HW_H_ + +#include +#include + +#define FIELD_GETX(_x, _name) ((_x) >> (_name##_SHIFT) & (_name##_MASK)) +#define FIELD_SETX(_x, _name, _v) \ +(((_x) & ~((_name##_MASK) << (_name##_SHIFT))) |\ +(((_v) & (_name##_MASK)) << (_name##_SHIFT))) +#define FIELDX(_name, _v) (((_v) & (_name##_MASK)) << (_name##_SHIFT)) + +struct atl1c_adapter; +struct atl1c_hw; + +/* function prototype */ +void atl1c_phy_disable(struct atl1c_hw *hw); +void atl1c_hw_set_mac_addr(struct atl1c_hw *hw, u8 *mac_addr); +int atl1c_phy_reset(struct atl1c_hw *hw); +int atl1c_read_mac_addr(struct atl1c_hw *hw); +int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex); +u32 atl1c_hash_mc_addr(struct atl1c_hw *hw, u8 *mc_addr); +void atl1c_hash_set(struct atl1c_hw *hw, u32 hash_value); +int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data); +int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data); +bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value); +int atl1c_phy_init(struct atl1c_hw *hw); +int atl1c_check_eeprom_exist(struct atl1c_hw *hw); +int atl1c_restart_autoneg(struct atl1c_hw *hw); +int atl1c_phy_to_ps_link(struct atl1c_hw *hw); +int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc); +bool atl1c_wait_mdio_idle(struct atl1c_hw *hw); +void atl1c_stop_phy_polling(struct atl1c_hw *hw); +void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel); +int atl1c_read_phy_core(struct atl1c_hw *hw, bool ext, u8 dev, + u16 reg, u16 *phy_data); +int atl1c_write_phy_core(struct atl1c_hw *hw, bool ext, u8 dev, + u16 reg, u16 phy_data); +int atl1c_read_phy_ext(struct atl1c_hw *hw, u8 dev_addr, + u16 reg_addr, u16 *phy_data); +int atl1c_write_phy_ext(struct atl1c_hw *hw, u8 dev_addr, + u16 reg_addr, u16 phy_data); +int atl1c_read_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data); +int atl1c_write_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 phy_data); +void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed); + +/* hw-ids */ +#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062 +#define PCI_DEVICE_ID_ATTANSIC_L1C 0x1063 +#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */ +#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */ +#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */ +#define PCI_DEVICE_ID_ATHEROS_L1D_2_0 0x1083 /* AR8151 v2.0 Gigabit 1000 */ +#define L2CB_V10 0xc0 +#define L2CB_V11 0xc1 +#define L2CB_V20 0xc0 +#define L2CB_V21 0xc1 + +/* register definition */ +#define REG_DEVICE_CAP 0x5C +#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7 +#define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0 + +#define DEVICE_CTRL_MAXRRS_MIN 2 + +#define REG_LINK_CTRL 0x68 +#define LINK_CTRL_L0S_EN 0x01 +#define LINK_CTRL_L1_EN 0x02 +#define LINK_CTRL_EXT_SYNC 0x80 + +#define REG_PCIE_IND_ACC_ADDR 0x80 +#define REG_PCIE_IND_ACC_DATA 0x84 + +#define REG_DEV_SERIALNUM_CTRL 0x200 +#define REG_DEV_MAC_SEL_MASK 0x0 /* 0:EUI; 1:MAC */ +#define REG_DEV_MAC_SEL_SHIFT 0 +#define REG_DEV_SERIAL_NUM_EN_MASK 0x1 +#define REG_DEV_SERIAL_NUM_EN_SHIFT 1 + +#define REG_TWSI_CTRL 0x218 +#define TWSI_CTLR_FREQ_MASK 0x3UL +#define TWSI_CTRL_FREQ_SHIFT 24 +#define TWSI_CTRL_FREQ_100K 0 +#define TWSI_CTRL_FREQ_200K 1 +#define TWSI_CTRL_FREQ_300K 2 +#define TWSI_CTRL_FREQ_400K 3 +#define TWSI_CTRL_LD_EXIST BIT(23) +#define TWSI_CTRL_HW_LDSTAT BIT(12) /* 0:finish,1:in progress */ +#define TWSI_CTRL_SW_LDSTART BIT(11) +#define TWSI_CTRL_LD_OFFSET_MASK 0xFF +#define TWSI_CTRL_LD_OFFSET_SHIFT 0 + +#define REG_PCIE_DEV_MISC_CTRL 0x21C +#define PCIE_DEV_MISC_EXT_PIPE 0x2 +#define PCIE_DEV_MISC_RETRY_BUFDIS 0x1 +#define PCIE_DEV_MISC_SPIROM_EXIST 0x4 +#define PCIE_DEV_MISC_SERDES_ENDIAN 0x8 +#define PCIE_DEV_MISC_SERDES_SEL_DIN 0x10 + +#define REG_PCIE_PHYMISC 0x1000 +#define PCIE_PHYMISC_FORCE_RCV_DET BIT(2) +#define PCIE_PHYMISC_NFTS_MASK 0xFFUL +#define PCIE_PHYMISC_NFTS_SHIFT 16 + +#define REG_PCIE_PHYMISC2 0x1004 +#define PCIE_PHYMISC2_L0S_TH_MASK 0x3UL +#define PCIE_PHYMISC2_L0S_TH_SHIFT 18 +#define L2CB1_PCIE_PHYMISC2_L0S_TH 3 +#define PCIE_PHYMISC2_CDR_BW_MASK 0x3UL +#define PCIE_PHYMISC2_CDR_BW_SHIFT 16 +#define L2CB1_PCIE_PHYMISC2_CDR_BW 3 + +#define REG_TWSI_DEBUG 0x1108 +#define TWSI_DEBUG_DEV_EXIST BIT(29) + +#define REG_DMA_DBG 0x1114 +#define DMA_DBG_VENDOR_MSG BIT(0) + +#define REG_EEPROM_CTRL 0x12C0 +#define EEPROM_CTRL_DATA_HI_MASK 0xFFFF +#define EEPROM_CTRL_DATA_HI_SHIFT 0 +#define EEPROM_CTRL_ADDR_MASK 0x3FF +#define EEPROM_CTRL_ADDR_SHIFT 16 +#define EEPROM_CTRL_ACK 0x40000000 +#define EEPROM_CTRL_RW 0x80000000 + +#define REG_EEPROM_DATA_LO 0x12C4 + +#define REG_OTP_CTRL 0x12F0 +#define OTP_CTRL_CLK_EN BIT(1) + +#define REG_PM_CTRL 0x12F8 +#define PM_CTRL_HOTRST BIT(31) +#define PM_CTRL_MAC_ASPM_CHK BIT(30) /* L0s/L1 dis by MAC based on + * thrghput(setting in 15A0) */ +#define PM_CTRL_SA_DLY_EN BIT(29) +#define PM_CTRL_L0S_BUFSRX_EN BIT(28) +#define PM_CTRL_LCKDET_TIMER_MASK 0xFUL +#define PM_CTRL_LCKDET_TIMER_SHIFT 24 +#define PM_CTRL_LCKDET_TIMER_DEF 0xC +#define PM_CTRL_PM_REQ_TIMER_MASK 0xFUL +#define PM_CTRL_PM_REQ_TIMER_SHIFT 20 /* pm_request_l1 time > @ + * ->L0s not L1 */ +#define PM_CTRL_PM_REQ_TO_DEF 0xF +#define PMCTRL_TXL1_AFTER_L0S BIT(19) /* l1dv2.0+ */ +#define L1D_PMCTRL_L1_ENTRY_TM_MASK 7UL /* l1dv2.0+, 3bits */ +#define L1D_PMCTRL_L1_ENTRY_TM_SHIFT 16 +#define L1D_PMCTRL_L1_ENTRY_TM_DIS 0 +#define L1D_PMCTRL_L1_ENTRY_TM_2US 1 +#define L1D_PMCTRL_L1_ENTRY_TM_4US 2 +#define L1D_PMCTRL_L1_ENTRY_TM_8US 3 +#define L1D_PMCTRL_L1_ENTRY_TM_16US 4 +#define L1D_PMCTRL_L1_ENTRY_TM_24US 5 +#define L1D_PMCTRL_L1_ENTRY_TM_32US 6 +#define L1D_PMCTRL_L1_ENTRY_TM_63US 7 +#define PM_CTRL_L1_ENTRY_TIMER_MASK 0xFUL /* l1C 4bits */ +#define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16 +#define L2CB1_PM_CTRL_L1_ENTRY_TM 7 +#define L1C_PM_CTRL_L1_ENTRY_TM 0xF +#define PM_CTRL_RCVR_WT_TIMER BIT(15) /* 1:1us, 0:2ms */ +#define PM_CTRL_CLK_PWM_VER1_1 BIT(14) /* 0:1.0a,1:1.1 */ +#define PM_CTRL_CLK_SWH_L1 BIT(13) /* en pcie clk sw in L1 */ +#define PM_CTRL_ASPM_L0S_EN BIT(12) +#define PM_CTRL_RXL1_AFTER_L0S BIT(11) /* l1dv2.0+ */ +#define L1D_PMCTRL_L0S_TIMER_MASK 7UL /* l1d2.0+, 3bits*/ +#define L1D_PMCTRL_L0S_TIMER_SHIFT 8 +#define PM_CTRL_L0S_ENTRY_TIMER_MASK 0xFUL /* l1c, 4bits */ +#define PM_CTRL_L0S_ENTRY_TIMER_SHIFT 8 +#define PM_CTRL_SERDES_BUFS_RX_L1_EN BIT(7) +#define PM_CTRL_SERDES_PD_EX_L1 BIT(6) /* power down serdes rx */ +#define PM_CTRL_SERDES_PLL_L1_EN BIT(5) +#define PM_CTRL_SERDES_L1_EN BIT(4) +#define PM_CTRL_ASPM_L1_EN BIT(3) +#define PM_CTRL_CLK_REQ_EN BIT(2) +#define PM_CTRL_RBER_EN BIT(1) +#define PM_CTRL_SPRSDWER_EN BIT(0) + +#define REG_LTSSM_ID_CTRL 0x12FC +#define LTSSM_ID_EN_WRO 0x1000 + + +/* Selene Master Control Register */ +#define REG_MASTER_CTRL 0x1400 +#define MASTER_CTRL_OTP_SEL BIT(31) +#define MASTER_DEV_NUM_MASK 0x7FUL +#define MASTER_DEV_NUM_SHIFT 24 +#define MASTER_REV_NUM_MASK 0xFFUL +#define MASTER_REV_NUM_SHIFT 16 +#define MASTER_CTRL_INT_RDCLR BIT(14) +#define MASTER_CTRL_CLK_SEL_DIS BIT(12) /* 1:alwys sel pclk from + * serdes, not sw to 25M */ +#define MASTER_CTRL_RX_ITIMER_EN BIT(11) /* IRQ MODURATION FOR RX */ +#define MASTER_CTRL_TX_ITIMER_EN BIT(10) /* MODURATION FOR TX/RX */ +#define MASTER_CTRL_MANU_INT BIT(9) /* SOFT MANUAL INT */ +#define MASTER_CTRL_MANUTIMER_EN BIT(8) +#define MASTER_CTRL_SA_TIMER_EN BIT(7) /* SYS ALIVE TIMER EN */ +#define MASTER_CTRL_OOB_DIS BIT(6) /* OUT OF BOX DIS */ +#define MASTER_CTRL_WAKEN_25M BIT(5) /* WAKE WO. PCIE CLK */ +#define MASTER_CTRL_BERT_START BIT(4) +#define MASTER_PCIE_TSTMOD_MASK 3UL +#define MASTER_PCIE_TSTMOD_SHIFT 2 +#define MASTER_PCIE_RST BIT(1) +#define MASTER_CTRL_SOFT_RST BIT(0) /* RST MAC & DMA */ +#define DMA_MAC_RST_TO 50 + +/* Timer Initial Value Register */ +#define REG_MANUAL_TIMER_INIT 0x1404 + +/* IRQ ModeratorTimer Initial Value Register */ +#define REG_IRQ_MODRT_TIMER_INIT 0x1408 +#define IRQ_MODRT_TIMER_MASK 0xffff +#define IRQ_MODRT_TX_TIMER_SHIFT 0 +#define IRQ_MODRT_RX_TIMER_SHIFT 16 + +#define REG_GPHY_CTRL 0x140C +#define GPHY_CTRL_ADDR_MASK 0x1FUL +#define GPHY_CTRL_ADDR_SHIFT 19 +#define GPHY_CTRL_BP_VLTGSW BIT(18) +#define GPHY_CTRL_100AB_EN BIT(17) +#define GPHY_CTRL_10AB_EN BIT(16) +#define GPHY_CTRL_PHY_PLL_BYPASS BIT(15) +#define GPHY_CTRL_PWDOWN_HW BIT(14) /* affect MAC&PHY, to low pw */ +#define GPHY_CTRL_PHY_PLL_ON BIT(13) /* 1:pll always on, 0:can sw */ +#define GPHY_CTRL_SEL_ANA_RST BIT(12) +#define GPHY_CTRL_HIB_PULSE BIT(11) +#define GPHY_CTRL_HIB_EN BIT(10) +#define GPHY_CTRL_GIGA_DIS BIT(9) +#define GPHY_CTRL_PHY_IDDQ_DIS BIT(8) /* pw on RST */ +#define GPHY_CTRL_PHY_IDDQ BIT(7) /* bit8 affect bit7 while rb */ +#define GPHY_CTRL_LPW_EXIT BIT(6) +#define GPHY_CTRL_GATE_25M_EN BIT(5) +#define GPHY_CTRL_REV_ANEG BIT(4) +#define GPHY_CTRL_ANEG_NOW BIT(3) +#define GPHY_CTRL_LED_MODE BIT(2) +#define GPHY_CTRL_RTL_MODE BIT(1) +#define GPHY_CTRL_EXT_RESET BIT(0) /* 1:out of DSP RST status */ +#define GPHY_CTRL_EXT_RST_TO 80 /* 800us atmost */ +#define GPHY_CTRL_CLS (\ + GPHY_CTRL_LED_MODE |\ + GPHY_CTRL_100AB_EN |\ + GPHY_CTRL_PHY_PLL_ON) + +/* Block IDLE Status Register */ +#define REG_IDLE_STATUS 0x1410 +#define IDLE_STATUS_SFORCE_MASK 0xFUL +#define IDLE_STATUS_SFORCE_SHIFT 14 +#define IDLE_STATUS_CALIB_DONE BIT(13) +#define IDLE_STATUS_CALIB_RES_MASK 0x1FUL +#define IDLE_STATUS_CALIB_RES_SHIFT 8 +#define IDLE_STATUS_CALIBERR_MASK 0xFUL +#define IDLE_STATUS_CALIBERR_SHIFT 4 +#define IDLE_STATUS_TXQ_BUSY BIT(3) +#define IDLE_STATUS_RXQ_BUSY BIT(2) +#define IDLE_STATUS_TXMAC_BUSY BIT(1) +#define IDLE_STATUS_RXMAC_BUSY BIT(0) +#define IDLE_STATUS_MASK (\ + IDLE_STATUS_TXQ_BUSY |\ + IDLE_STATUS_RXQ_BUSY |\ + IDLE_STATUS_TXMAC_BUSY |\ + IDLE_STATUS_RXMAC_BUSY) + +/* MDIO Control Register */ +#define REG_MDIO_CTRL 0x1414 +#define MDIO_CTRL_MODE_EXT BIT(30) +#define MDIO_CTRL_POST_READ BIT(29) +#define MDIO_CTRL_AP_EN BIT(28) +#define MDIO_CTRL_BUSY BIT(27) +#define MDIO_CTRL_CLK_SEL_MASK 0x7UL +#define MDIO_CTRL_CLK_SEL_SHIFT 24 +#define MDIO_CTRL_CLK_25_4 0 /* 25MHz divide 4 */ +#define MDIO_CTRL_CLK_25_6 2 +#define MDIO_CTRL_CLK_25_8 3 +#define MDIO_CTRL_CLK_25_10 4 +#define MDIO_CTRL_CLK_25_32 5 +#define MDIO_CTRL_CLK_25_64 6 +#define MDIO_CTRL_CLK_25_128 7 +#define MDIO_CTRL_START BIT(23) +#define MDIO_CTRL_SPRES_PRMBL BIT(22) +#define MDIO_CTRL_OP_READ BIT(21) /* 1:read, 0:write */ +#define MDIO_CTRL_REG_MASK 0x1FUL +#define MDIO_CTRL_REG_SHIFT 16 +#define MDIO_CTRL_DATA_MASK 0xFFFFUL +#define MDIO_CTRL_DATA_SHIFT 0 +#define MDIO_MAX_AC_TO 120 /* 1.2ms timeout for slow clk */ + +/* for extension reg access */ +#define REG_MDIO_EXTN 0x1448 +#define MDIO_EXTN_PORTAD_MASK 0x1FUL +#define MDIO_EXTN_PORTAD_SHIFT 21 +#define MDIO_EXTN_DEVAD_MASK 0x1FUL +#define MDIO_EXTN_DEVAD_SHIFT 16 +#define MDIO_EXTN_REG_MASK 0xFFFFUL +#define MDIO_EXTN_REG_SHIFT 0 + +/* BIST Control and Status Register0 (for the Packet Memory) */ +#define REG_BIST0_CTRL 0x141c +#define BIST0_NOW 0x1 +#define BIST0_SRAM_FAIL 0x2 /* 1: The SRAM failure is + * un-repairable because + * it has address decoder + * failure or more than 1 cell + * stuck-to-x failure */ +#define BIST0_FUSE_FLAG 0x4 + +/* BIST Control and Status Register1(for the retry buffer of PCI Express) */ +#define REG_BIST1_CTRL 0x1420 +#define BIST1_NOW 0x1 +#define BIST1_SRAM_FAIL 0x2 +#define BIST1_FUSE_FLAG 0x4 + +/* SerDes Lock Detect Control and Status Register */ +#define REG_SERDES 0x1424 +#define SERDES_PHY_CLK_SLOWDOWN BIT(18) +#define SERDES_MAC_CLK_SLOWDOWN BIT(17) +#define SERDES_SELFB_PLL_MASK 0x3UL +#define SERDES_SELFB_PLL_SHIFT 14 +#define SERDES_PHYCLK_SEL_GTX BIT(13) /* 1:gtx_clk, 0:25M */ +#define SERDES_PCIECLK_SEL_SRDS BIT(12) /* 1:serdes,0:25M */ +#define SERDES_BUFS_RX_EN BIT(11) +#define SERDES_PD_RX BIT(10) +#define SERDES_PLL_EN BIT(9) +#define SERDES_EN BIT(8) +#define SERDES_SELFB_PLL_SEL_CSR BIT(6) /* 0:state-machine,1:csr */ +#define SERDES_SELFB_PLL_CSR_MASK 0x3UL +#define SERDES_SELFB_PLL_CSR_SHIFT 4 +#define SERDES_SELFB_PLL_CSR_4 3 /* 4-12% OV-CLK */ +#define SERDES_SELFB_PLL_CSR_0 2 /* 0-4% OV-CLK */ +#define SERDES_SELFB_PLL_CSR_12 1 /* 12-18% OV-CLK */ +#define SERDES_SELFB_PLL_CSR_18 0 /* 18-25% OV-CLK */ +#define SERDES_VCO_SLOW BIT(3) +#define SERDES_VCO_FAST BIT(2) +#define SERDES_LOCK_DETECT_EN BIT(1) +#define SERDES_LOCK_DETECT BIT(0) + +#define REG_LPI_DECISN_TIMER 0x143C +#define L2CB_LPI_DESISN_TIMER 0x7D00 + +#define REG_LPI_CTRL 0x1440 +#define LPI_CTRL_CHK_DA BIT(31) +#define LPI_CTRL_ENH_TO_MASK 0x1FFFUL +#define LPI_CTRL_ENH_TO_SHIFT 12 +#define LPI_CTRL_ENH_TH_MASK 0x1FUL +#define LPI_CTRL_ENH_TH_SHIFT 6 +#define LPI_CTRL_ENH_EN BIT(5) +#define LPI_CTRL_CHK_RX BIT(4) +#define LPI_CTRL_CHK_STATE BIT(3) +#define LPI_CTRL_GMII BIT(2) +#define LPI_CTRL_TO_PHY BIT(1) +#define LPI_CTRL_EN BIT(0) + +#define REG_LPI_WAIT 0x1444 +#define LPI_WAIT_TIMER_MASK 0xFFFFUL +#define LPI_WAIT_TIMER_SHIFT 0 + +/* MAC Control Register */ +#define REG_MAC_CTRL 0x1480 +#define MAC_CTRL_SPEED_MODE_SW BIT(30) /* 0:phy,1:sw */ +#define MAC_CTRL_HASH_ALG_CRC32 BIT(29) /* 1:legacy,0:lw_5b */ +#define MAC_CTRL_SINGLE_PAUSE_EN BIT(28) +#define MAC_CTRL_DBG BIT(27) +#define MAC_CTRL_BC_EN BIT(26) +#define MAC_CTRL_MC_ALL_EN BIT(25) +#define MAC_CTRL_RX_CHKSUM_EN BIT(24) +#define MAC_CTRL_TX_HUGE BIT(23) +#define MAC_CTRL_DBG_TX_BKPRESURE BIT(22) +#define MAC_CTRL_SPEED_MASK 3UL +#define MAC_CTRL_SPEED_SHIFT 20 +#define MAC_CTRL_SPEED_10_100 1 +#define MAC_CTRL_SPEED_1000 2 +#define MAC_CTRL_TX_SIMURST BIT(19) +#define MAC_CTRL_SCNT BIT(17) +#define MAC_CTRL_TX_PAUSE BIT(16) +#define MAC_CTRL_PROMIS_EN BIT(15) +#define MAC_CTRL_RMV_VLAN BIT(14) +#define MAC_CTRL_PRMLEN_MASK 0xFUL +#define MAC_CTRL_PRMLEN_SHIFT 10 +#define MAC_CTRL_HUGE_EN BIT(9) +#define MAC_CTRL_LENCHK BIT(8) +#define MAC_CTRL_PAD BIT(7) +#define MAC_CTRL_ADD_CRC BIT(6) +#define MAC_CTRL_DUPLX BIT(5) +#define MAC_CTRL_LOOPBACK BIT(4) +#define MAC_CTRL_RX_FLOW BIT(3) +#define MAC_CTRL_TX_FLOW BIT(2) +#define MAC_CTRL_RX_EN BIT(1) +#define MAC_CTRL_TX_EN BIT(0) + +/* MAC IPG/IFG Control Register */ +#define REG_MAC_IPG_IFG 0x1484 +#define MAC_IPG_IFG_IPGT_SHIFT 0 /* Desired back to back + * inter-packet gap. The + * default is 96-bit time */ +#define MAC_IPG_IFG_IPGT_MASK 0x7f +#define MAC_IPG_IFG_MIFG_SHIFT 8 /* Minimum number of IFG to + * enforce in between RX frames */ +#define MAC_IPG_IFG_MIFG_MASK 0xff /* Frame gap below such IFP is dropped */ +#define MAC_IPG_IFG_IPGR1_SHIFT 16 /* 64bit Carrier-Sense window */ +#define MAC_IPG_IFG_IPGR1_MASK 0x7f +#define MAC_IPG_IFG_IPGR2_SHIFT 24 /* 96-bit IPG window */ +#define MAC_IPG_IFG_IPGR2_MASK 0x7f + +/* MAC STATION ADDRESS */ +#define REG_MAC_STA_ADDR 0x1488 + +/* Hash table for multicast address */ +#define REG_RX_HASH_TABLE 0x1490 + +/* MAC Half-Duplex Control Register */ +#define REG_MAC_HALF_DUPLX_CTRL 0x1498 +#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 /* Collision Window */ +#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff +#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12 +#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf +#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000 +#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000 +#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 /* No back-off on backpressure, + * immediately start the + * transmission after back pressure */ +#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 /* 1: Alternative Binary Exponential Back-off Enabled */ +#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 /* Maximum binary exponential number */ +#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf +#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 /* IPG to start JAM for collision based flow control in half-duplex */ +#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf /* mode. In unit of 8-bit time */ + +/* Maximum Frame Length Control Register */ +#define REG_MTU 0x149c + +/* Wake-On-Lan control register */ +#define REG_WOL_CTRL 0x14a0 +#define WOL_PT7_MATCH BIT(31) +#define WOL_PT6_MATCH BIT(30) +#define WOL_PT5_MATCH BIT(29) +#define WOL_PT4_MATCH BIT(28) +#define WOL_PT3_MATCH BIT(27) +#define WOL_PT2_MATCH BIT(26) +#define WOL_PT1_MATCH BIT(25) +#define WOL_PT0_MATCH BIT(24) +#define WOL_PT7_EN BIT(23) +#define WOL_PT6_EN BIT(22) +#define WOL_PT5_EN BIT(21) +#define WOL_PT4_EN BIT(20) +#define WOL_PT3_EN BIT(19) +#define WOL_PT2_EN BIT(18) +#define WOL_PT1_EN BIT(17) +#define WOL_PT0_EN BIT(16) +#define WOL_LNKCHG_ST BIT(10) +#define WOL_MAGIC_ST BIT(9) +#define WOL_PATTERN_ST BIT(8) +#define WOL_OOB_EN BIT(6) +#define WOL_LINK_CHG_PME_EN BIT(5) +#define WOL_LINK_CHG_EN BIT(4) +#define WOL_MAGIC_PME_EN BIT(3) +#define WOL_MAGIC_EN BIT(2) +#define WOL_PATTERN_PME_EN BIT(1) +#define WOL_PATTERN_EN BIT(0) + +/* WOL Length ( 2 DWORD ) */ +#define REG_WOL_PTLEN1 0x14A4 +#define WOL_PTLEN1_3_MASK 0xFFUL +#define WOL_PTLEN1_3_SHIFT 24 +#define WOL_PTLEN1_2_MASK 0xFFUL +#define WOL_PTLEN1_2_SHIFT 16 +#define WOL_PTLEN1_1_MASK 0xFFUL +#define WOL_PTLEN1_1_SHIFT 8 +#define WOL_PTLEN1_0_MASK 0xFFUL +#define WOL_PTLEN1_0_SHIFT 0 + +#define REG_WOL_PTLEN2 0x14A8 +#define WOL_PTLEN2_7_MASK 0xFFUL +#define WOL_PTLEN2_7_SHIFT 24 +#define WOL_PTLEN2_6_MASK 0xFFUL +#define WOL_PTLEN2_6_SHIFT 16 +#define WOL_PTLEN2_5_MASK 0xFFUL +#define WOL_PTLEN2_5_SHIFT 8 +#define WOL_PTLEN2_4_MASK 0xFFUL +#define WOL_PTLEN2_4_SHIFT 0 + +/* Internal SRAM Partition Register */ +#define RFDX_HEAD_ADDR_MASK 0x03FF +#define RFDX_HARD_ADDR_SHIFT 0 +#define RFDX_TAIL_ADDR_MASK 0x03FF +#define RFDX_TAIL_ADDR_SHIFT 16 + +#define REG_SRAM_RFD0_INFO 0x1500 +#define REG_SRAM_RFD1_INFO 0x1504 +#define REG_SRAM_RFD2_INFO 0x1508 +#define REG_SRAM_RFD3_INFO 0x150C + +#define REG_RFD_NIC_LEN 0x1510 /* In 8-bytes */ +#define RFD_NIC_LEN_MASK 0x03FF + +#define REG_SRAM_TRD_ADDR 0x1518 +#define TPD_HEAD_ADDR_MASK 0x03FF +#define TPD_HEAD_ADDR_SHIFT 0 +#define TPD_TAIL_ADDR_MASK 0x03FF +#define TPD_TAIL_ADDR_SHIFT 16 + +#define REG_SRAM_TRD_LEN 0x151C /* In 8-bytes */ +#define TPD_NIC_LEN_MASK 0x03FF + +#define REG_SRAM_RXF_ADDR 0x1520 +#define REG_SRAM_RXF_LEN 0x1524 +#define REG_SRAM_TXF_ADDR 0x1528 +#define REG_SRAM_TXF_LEN 0x152C +#define REG_SRAM_TCPH_ADDR 0x1530 +#define REG_SRAM_PKTH_ADDR 0x1532 + +/* + * Load Ptr Register + * Software sets this bit after the initialization of the head and tail */ +#define REG_LOAD_PTR 0x1534 + +/* + * addresses of all descriptors, as well as the following descriptor + * control register, which triggers each function block to load the head + * pointer to prepare for the operation. This bit is then self-cleared + * after one cycle. + */ +#define REG_RX_BASE_ADDR_HI 0x1540 +#define REG_TX_BASE_ADDR_HI 0x1544 +#define REG_RFD0_HEAD_ADDR_LO 0x1550 +#define REG_RFD_RING_SIZE 0x1560 +#define RFD_RING_SIZE_MASK 0x0FFF +#define REG_RX_BUF_SIZE 0x1564 +#define RX_BUF_SIZE_MASK 0xFFFF +#define REG_RRD0_HEAD_ADDR_LO 0x1568 +#define REG_RRD_RING_SIZE 0x1578 +#define RRD_RING_SIZE_MASK 0x0FFF +#define REG_TPD_PRI1_ADDR_LO 0x157C +#define REG_TPD_PRI0_ADDR_LO 0x1580 +#define REG_TPD_RING_SIZE 0x1584 +#define TPD_RING_SIZE_MASK 0xFFFF + +/* TXQ Control Register */ +#define REG_TXQ_CTRL 0x1590 +#define TXQ_TXF_BURST_NUM_MASK 0xFFFFUL +#define TXQ_TXF_BURST_NUM_SHIFT 16 +#define L1C_TXQ_TXF_BURST_PREF 0x200 +#define L2CB_TXQ_TXF_BURST_PREF 0x40 +#define TXQ_CTRL_PEDING_CLR BIT(8) +#define TXQ_CTRL_LS_8023_EN BIT(7) +#define TXQ_CTRL_ENH_MODE BIT(6) +#define TXQ_CTRL_EN BIT(5) +#define TXQ_CTRL_IP_OPTION_EN BIT(4) +#define TXQ_NUM_TPD_BURST_MASK 0xFUL +#define TXQ_NUM_TPD_BURST_SHIFT 0 +#define TXQ_NUM_TPD_BURST_DEF 5 +#define TXQ_CFGV (\ + FIELDX(TXQ_NUM_TPD_BURST, TXQ_NUM_TPD_BURST_DEF) |\ + TXQ_CTRL_ENH_MODE |\ + TXQ_CTRL_LS_8023_EN |\ + TXQ_CTRL_IP_OPTION_EN) +#define L1C_TXQ_CFGV (\ + TXQ_CFGV |\ + FIELDX(TXQ_TXF_BURST_NUM, L1C_TXQ_TXF_BURST_PREF)) +#define L2CB_TXQ_CFGV (\ + TXQ_CFGV |\ + FIELDX(TXQ_TXF_BURST_NUM, L2CB_TXQ_TXF_BURST_PREF)) + + +/* Jumbo packet Threshold for task offload */ +#define REG_TX_TSO_OFFLOAD_THRESH 0x1594 /* In 8-bytes */ +#define TX_TSO_OFFLOAD_THRESH_MASK 0x07FF +#define MAX_TSO_FRAME_SIZE (7*1024) + +#define REG_TXF_WATER_MARK 0x1598 /* In 8-bytes */ +#define TXF_WATER_MARK_MASK 0x0FFF +#define TXF_LOW_WATER_MARK_SHIFT 0 +#define TXF_HIGH_WATER_MARK_SHIFT 16 +#define TXQ_CTRL_BURST_MODE_EN 0x80000000 + +#define REG_THRUPUT_MON_CTRL 0x159C +#define THRUPUT_MON_RATE_MASK 0x3 +#define THRUPUT_MON_RATE_SHIFT 0 +#define THRUPUT_MON_EN 0x80 + +/* RXQ Control Register */ +#define REG_RXQ_CTRL 0x15A0 +#define ASPM_THRUPUT_LIMIT_MASK 0x3 +#define ASPM_THRUPUT_LIMIT_SHIFT 0 +#define ASPM_THRUPUT_LIMIT_NO 0x00 +#define ASPM_THRUPUT_LIMIT_1M 0x01 +#define ASPM_THRUPUT_LIMIT_10M 0x02 +#define ASPM_THRUPUT_LIMIT_100M 0x03 +#define IPV6_CHKSUM_CTRL_EN BIT(7) +#define RXQ_RFD_BURST_NUM_MASK 0x003F +#define RXQ_RFD_BURST_NUM_SHIFT 20 +#define RXQ_NUM_RFD_PREF_DEF 8 +#define RSS_MODE_MASK 3UL +#define RSS_MODE_SHIFT 26 +#define RSS_MODE_DIS 0 +#define RSS_MODE_SQSI 1 +#define RSS_MODE_MQSI 2 +#define RSS_MODE_MQMI 3 +#define RSS_NIP_QUEUE_SEL BIT(28) /* 0:q0, 1:table */ +#define RRS_HASH_CTRL_EN BIT(29) +#define RX_CUT_THRU_EN BIT(30) +#define RXQ_CTRL_EN BIT(31) + +#define REG_RFD_FREE_THRESH 0x15A4 +#define RFD_FREE_THRESH_MASK 0x003F +#define RFD_FREE_HI_THRESH_SHIFT 0 +#define RFD_FREE_LO_THRESH_SHIFT 6 + +/* RXF flow control register */ +#define REG_RXQ_RXF_PAUSE_THRESH 0x15A8 +#define RXQ_RXF_PAUSE_TH_HI_SHIFT 0 +#define RXQ_RXF_PAUSE_TH_HI_MASK 0x0FFF +#define RXQ_RXF_PAUSE_TH_LO_SHIFT 16 +#define RXQ_RXF_PAUSE_TH_LO_MASK 0x0FFF + +#define REG_RXD_DMA_CTRL 0x15AC +#define RXD_DMA_THRESH_MASK 0x0FFF /* In 8-bytes */ +#define RXD_DMA_THRESH_SHIFT 0 +#define RXD_DMA_DOWN_TIMER_MASK 0xFFFF +#define RXD_DMA_DOWN_TIMER_SHIFT 16 + +/* DMA Engine Control Register */ +#define REG_DMA_CTRL 0x15C0 +#define DMA_CTRL_SMB_NOW BIT(31) +#define DMA_CTRL_WPEND_CLR BIT(30) +#define DMA_CTRL_RPEND_CLR BIT(29) +#define DMA_CTRL_WDLY_CNT_MASK 0xFUL +#define DMA_CTRL_WDLY_CNT_SHIFT 16 +#define DMA_CTRL_WDLY_CNT_DEF 4 +#define DMA_CTRL_RDLY_CNT_MASK 0x1FUL +#define DMA_CTRL_RDLY_CNT_SHIFT 11 +#define DMA_CTRL_RDLY_CNT_DEF 15 +#define DMA_CTRL_RREQ_PRI_DATA BIT(10) /* 0:tpd, 1:data */ +#define DMA_CTRL_WREQ_BLEN_MASK 7UL +#define DMA_CTRL_WREQ_BLEN_SHIFT 7 +#define DMA_CTRL_RREQ_BLEN_MASK 7UL +#define DMA_CTRL_RREQ_BLEN_SHIFT 4 +#define L1C_CTRL_DMA_RCB_LEN128 BIT(3) /* 0:64bytes,1:128bytes */ +#define DMA_CTRL_RORDER_MODE_MASK 7UL +#define DMA_CTRL_RORDER_MODE_SHIFT 0 +#define DMA_CTRL_RORDER_MODE_OUT 4 +#define DMA_CTRL_RORDER_MODE_ENHANCE 2 +#define DMA_CTRL_RORDER_MODE_IN 1 + +/* INT-triggle/SMB Control Register */ +#define REG_SMB_STAT_TIMER 0x15C4 /* 2us resolution */ +#define SMB_STAT_TIMER_MASK 0xFFFFFF +#define REG_TINT_TPD_THRESH 0x15C8 /* tpd th to trig intrrupt */ + +/* Mail box */ +#define MB_RFDX_PROD_IDX_MASK 0xFFFF +#define REG_MB_RFD0_PROD_IDX 0x15E0 + +#define REG_TPD_PRI1_PIDX 0x15F0 /* 16bit,hi-tpd producer idx */ +#define REG_TPD_PRI0_PIDX 0x15F2 /* 16bit,lo-tpd producer idx */ +#define REG_TPD_PRI1_CIDX 0x15F4 /* 16bit,hi-tpd consumer idx */ +#define REG_TPD_PRI0_CIDX 0x15F6 /* 16bit,lo-tpd consumer idx */ + +#define REG_MB_RFD01_CONS_IDX 0x15F8 +#define MB_RFD0_CONS_IDX_MASK 0x0000FFFF +#define MB_RFD1_CONS_IDX_MASK 0xFFFF0000 + +/* Interrupt Status Register */ +#define REG_ISR 0x1600 +#define ISR_SMB 0x00000001 +#define ISR_TIMER 0x00000002 +/* + * Software manual interrupt, for debug. Set when SW_MAN_INT_EN is set + * in Table 51 Selene Master Control Register (Offset 0x1400). + */ +#define ISR_MANUAL 0x00000004 +#define ISR_HW_RXF_OV 0x00000008 /* RXF overflow interrupt */ +#define ISR_RFD0_UR 0x00000010 /* RFD0 under run */ +#define ISR_RFD1_UR 0x00000020 +#define ISR_RFD2_UR 0x00000040 +#define ISR_RFD3_UR 0x00000080 +#define ISR_TXF_UR 0x00000100 +#define ISR_DMAR_TO_RST 0x00000200 +#define ISR_DMAW_TO_RST 0x00000400 +#define ISR_TX_CREDIT 0x00000800 +#define ISR_GPHY 0x00001000 +/* GPHY low power state interrupt */ +#define ISR_GPHY_LPW 0x00002000 +#define ISR_TXQ_TO_RST 0x00004000 +#define ISR_TX_PKT 0x00008000 +#define ISR_RX_PKT_0 0x00010000 +#define ISR_RX_PKT_1 0x00020000 +#define ISR_RX_PKT_2 0x00040000 +#define ISR_RX_PKT_3 0x00080000 +#define ISR_MAC_RX 0x00100000 +#define ISR_MAC_TX 0x00200000 +#define ISR_UR_DETECTED 0x00400000 +#define ISR_FERR_DETECTED 0x00800000 +#define ISR_NFERR_DETECTED 0x01000000 +#define ISR_CERR_DETECTED 0x02000000 +#define ISR_PHY_LINKDOWN 0x04000000 +#define ISR_DIS_INT 0x80000000 + +/* Interrupt Mask Register */ +#define REG_IMR 0x1604 + +#define IMR_NORMAL_MASK (\ + ISR_MANUAL |\ + ISR_HW_RXF_OV |\ + ISR_RFD0_UR |\ + ISR_TXF_UR |\ + ISR_DMAR_TO_RST |\ + ISR_TXQ_TO_RST |\ + ISR_DMAW_TO_RST |\ + ISR_GPHY |\ + ISR_TX_PKT |\ + ISR_RX_PKT_0 |\ + ISR_GPHY_LPW |\ + ISR_PHY_LINKDOWN) + +#define ISR_RX_PKT (\ + ISR_RX_PKT_0 |\ + ISR_RX_PKT_1 |\ + ISR_RX_PKT_2 |\ + ISR_RX_PKT_3) + +#define ISR_OVER (\ + ISR_RFD0_UR |\ + ISR_RFD1_UR |\ + ISR_RFD2_UR |\ + ISR_RFD3_UR |\ + ISR_HW_RXF_OV |\ + ISR_TXF_UR) + +#define ISR_ERROR (\ + ISR_DMAR_TO_RST |\ + ISR_TXQ_TO_RST |\ + ISR_DMAW_TO_RST |\ + ISR_PHY_LINKDOWN) + +#define REG_INT_RETRIG_TIMER 0x1608 +#define INT_RETRIG_TIMER_MASK 0xFFFF + +#define REG_MAC_RX_STATUS_BIN 0x1700 +#define REG_MAC_RX_STATUS_END 0x175c +#define REG_MAC_TX_STATUS_BIN 0x1760 +#define REG_MAC_TX_STATUS_END 0x17c0 + +#define REG_CLK_GATING_CTRL 0x1814 +#define CLK_GATING_DMAW_EN 0x0001 +#define CLK_GATING_DMAR_EN 0x0002 +#define CLK_GATING_TXQ_EN 0x0004 +#define CLK_GATING_RXQ_EN 0x0008 +#define CLK_GATING_TXMAC_EN 0x0010 +#define CLK_GATING_RXMAC_EN 0x0020 + +#define CLK_GATING_EN_ALL (CLK_GATING_DMAW_EN |\ + CLK_GATING_DMAR_EN |\ + CLK_GATING_TXQ_EN |\ + CLK_GATING_RXQ_EN |\ + CLK_GATING_TXMAC_EN|\ + CLK_GATING_RXMAC_EN) + +/* DEBUG ADDR */ +#define REG_DEBUG_DATA0 0x1900 +#define REG_DEBUG_DATA1 0x1904 + +#define L1D_MPW_PHYID1 0xD01C /* V7 */ +#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */ +#define L1D_MPW_PHYID3 0xD01E /* V8 */ + + +/* Autoneg Advertisement Register */ +#define ADVERTISE_DEFAULT_CAP \ + (ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM) + +/* 1000BASE-T Control Register */ +#define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */ + +#define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */ +#define GIGA_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value 0=Automatic Master/Slave config */ +#define GIGA_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define GIGA_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define GIGA_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define GIGA_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define GIGA_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ +#define GIGA_CR_1000T_SPEED_MASK 0x0300 +#define GIGA_CR_1000T_DEFAULT_CAP 0x0300 + +/* PHY Specific Status Register */ +#define MII_GIGA_PSSR 0x11 +#define GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define GIGA_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define GIGA_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define GIGA_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define GIGA_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define GIGA_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +/* PHY Interrupt Enable Register */ +#define MII_IER 0x12 +#define IER_LINK_UP 0x0400 +#define IER_LINK_DOWN 0x0800 + +/* PHY Interrupt Status Register */ +#define MII_ISR 0x13 +#define ISR_LINK_UP 0x0400 +#define ISR_LINK_DOWN 0x0800 + +/* Cable-Detect-Test Control Register */ +#define MII_CDTC 0x16 +#define CDTC_EN_OFF 0 /* sc */ +#define CDTC_EN_BITS 1 +#define CDTC_PAIR_OFF 8 +#define CDTC_PAIR_BIT 2 + +/* Cable-Detect-Test Status Register */ +#define MII_CDTS 0x1C +#define CDTS_STATUS_OFF 8 +#define CDTS_STATUS_BITS 2 +#define CDTS_STATUS_NORMAL 0 +#define CDTS_STATUS_SHORT 1 +#define CDTS_STATUS_OPEN 2 +#define CDTS_STATUS_INVALID 3 + +#define MII_DBG_ADDR 0x1D +#define MII_DBG_DATA 0x1E + +/***************************** debug port *************************************/ + +#define MIIDBG_ANACTRL 0x00 +#define ANACTRL_CLK125M_DELAY_EN 0x8000 +#define ANACTRL_VCO_FAST 0x4000 +#define ANACTRL_VCO_SLOW 0x2000 +#define ANACTRL_AFE_MODE_EN 0x1000 +#define ANACTRL_LCKDET_PHY 0x800 +#define ANACTRL_LCKDET_EN 0x400 +#define ANACTRL_OEN_125M 0x200 +#define ANACTRL_HBIAS_EN 0x100 +#define ANACTRL_HB_EN 0x80 +#define ANACTRL_SEL_HSP 0x40 +#define ANACTRL_CLASSA_EN 0x20 +#define ANACTRL_MANUSWON_SWR_MASK 3U +#define ANACTRL_MANUSWON_SWR_SHIFT 2 +#define ANACTRL_MANUSWON_SWR_2V 0 +#define ANACTRL_MANUSWON_SWR_1P9V 1 +#define ANACTRL_MANUSWON_SWR_1P8V 2 +#define ANACTRL_MANUSWON_SWR_1P7V 3 +#define ANACTRL_MANUSWON_BW3_4M 0x2 +#define ANACTRL_RESTART_CAL 0x1 +#define ANACTRL_DEF 0x02EF + +#define MIIDBG_SYSMODCTRL 0x04 +#define SYSMODCTRL_IECHOADJ_PFMH_PHY 0x8000 +#define SYSMODCTRL_IECHOADJ_BIASGEN 0x4000 +#define SYSMODCTRL_IECHOADJ_PFML_PHY 0x2000 +#define SYSMODCTRL_IECHOADJ_PS_MASK 3U +#define SYSMODCTRL_IECHOADJ_PS_SHIFT 10 +#define SYSMODCTRL_IECHOADJ_PS_40 3 +#define SYSMODCTRL_IECHOADJ_PS_20 2 +#define SYSMODCTRL_IECHOADJ_PS_0 1 +#define SYSMODCTRL_IECHOADJ_10BT_100MV 0x40 /* 1:100mv, 0:200mv */ +#define SYSMODCTRL_IECHOADJ_HLFAP_MASK 3U +#define SYSMODCTRL_IECHOADJ_HLFAP_SHIFT 4 +#define SYSMODCTRL_IECHOADJ_VDFULBW 0x8 +#define SYSMODCTRL_IECHOADJ_VDBIASHLF 0x4 +#define SYSMODCTRL_IECHOADJ_VDAMPHLF 0x2 +#define SYSMODCTRL_IECHOADJ_VDLANSW 0x1 +#define SYSMODCTRL_IECHOADJ_DEF 0x88BB /* ???? */ + +/* for l1d & l2cb */ +#define SYSMODCTRL_IECHOADJ_CUR_ADD 0x8000 +#define SYSMODCTRL_IECHOADJ_CUR_MASK 7U +#define SYSMODCTRL_IECHOADJ_CUR_SHIFT 12 +#define SYSMODCTRL_IECHOADJ_VOL_MASK 0xFU +#define SYSMODCTRL_IECHOADJ_VOL_SHIFT 8 +#define SYSMODCTRL_IECHOADJ_VOL_17ALL 3 +#define SYSMODCTRL_IECHOADJ_VOL_100M15 1 +#define SYSMODCTRL_IECHOADJ_VOL_10M17 0 +#define SYSMODCTRL_IECHOADJ_BIAS1_MASK 0xFU +#define SYSMODCTRL_IECHOADJ_BIAS1_SHIFT 4 +#define SYSMODCTRL_IECHOADJ_BIAS2_MASK 0xFU +#define SYSMODCTRL_IECHOADJ_BIAS2_SHIFT 0 +#define L1D_SYSMODCTRL_IECHOADJ_DEF 0x4FBB + +#define MIIDBG_SRDSYSMOD 0x05 +#define SRDSYSMOD_LCKDET_EN 0x2000 +#define SRDSYSMOD_PLL_EN 0x800 +#define SRDSYSMOD_SEL_HSP 0x400 +#define SRDSYSMOD_HLFTXDR 0x200 +#define SRDSYSMOD_TXCLK_DELAY_EN 0x100 +#define SRDSYSMOD_TXELECIDLE 0x80 +#define SRDSYSMOD_DEEMP_EN 0x40 +#define SRDSYSMOD_MS_PAD 0x4 +#define SRDSYSMOD_CDR_ADC_VLTG 0x2 +#define SRDSYSMOD_CDR_DAC_1MA 0x1 +#define SRDSYSMOD_DEF 0x2C46 + +#define MIIDBG_CFGLPSPD 0x0A +#define CFGLPSPD_RSTCNT_MASK 3U +#define CFGLPSPD_RSTCNT_SHIFT 14 +#define CFGLPSPD_RSTCNT_CLK125SW 0x2000 + +#define MIIDBG_HIBNEG 0x0B +#define HIBNEG_PSHIB_EN 0x8000 +#define HIBNEG_WAKE_BOTH 0x4000 +#define HIBNEG_ONOFF_ANACHG_SUDEN 0x2000 +#define HIBNEG_HIB_PULSE 0x1000 +#define HIBNEG_GATE_25M_EN 0x800 +#define HIBNEG_RST_80U 0x400 +#define HIBNEG_RST_TIMER_MASK 3U +#define HIBNEG_RST_TIMER_SHIFT 8 +#define HIBNEG_GTX_CLK_DELAY_MASK 3U +#define HIBNEG_GTX_CLK_DELAY_SHIFT 5 +#define HIBNEG_BYPSS_BRKTIMER 0x10 +#define HIBNEG_DEF 0xBC40 + +#define MIIDBG_TST10BTCFG 0x12 +#define TST10BTCFG_INTV_TIMER_MASK 3U +#define TST10BTCFG_INTV_TIMER_SHIFT 14 +#define TST10BTCFG_TRIGER_TIMER_MASK 3U +#define TST10BTCFG_TRIGER_TIMER_SHIFT 12 +#define TST10BTCFG_DIV_MAN_MLT3_EN 0x800 +#define TST10BTCFG_OFF_DAC_IDLE 0x400 +#define TST10BTCFG_LPBK_DEEP 0x4 /* 1:deep,0:shallow */ +#define TST10BTCFG_DEF 0x4C04 + +#define MIIDBG_AZ_ANADECT 0x15 +#define AZ_ANADECT_10BTRX_TH 0x8000 +#define AZ_ANADECT_BOTH_01CHNL 0x4000 +#define AZ_ANADECT_INTV_MASK 0x3FU +#define AZ_ANADECT_INTV_SHIFT 8 +#define AZ_ANADECT_THRESH_MASK 0xFU +#define AZ_ANADECT_THRESH_SHIFT 4 +#define AZ_ANADECT_CHNL_MASK 0xFU +#define AZ_ANADECT_CHNL_SHIFT 0 +#define AZ_ANADECT_DEF 0x3220 +#define AZ_ANADECT_LONG 0xb210 + +#define MIIDBG_MSE16DB 0x18 /* l1d */ +#define L1D_MSE16DB_UP 0x05EA +#define L1D_MSE16DB_DOWN 0x02EA + +#define MIIDBG_LEGCYPS 0x29 +#define LEGCYPS_EN 0x8000 +#define LEGCYPS_DAC_AMP1000_MASK 7U +#define LEGCYPS_DAC_AMP1000_SHIFT 12 +#define LEGCYPS_DAC_AMP100_MASK 7U +#define LEGCYPS_DAC_AMP100_SHIFT 9 +#define LEGCYPS_DAC_AMP10_MASK 7U +#define LEGCYPS_DAC_AMP10_SHIFT 6 +#define LEGCYPS_UNPLUG_TIMER_MASK 7U +#define LEGCYPS_UNPLUG_TIMER_SHIFT 3 +#define LEGCYPS_UNPLUG_DECT_EN 0x4 +#define LEGCYPS_ECNC_PS_EN 0x1 +#define L1D_LEGCYPS_DEF 0x129D +#define L1C_LEGCYPS_DEF 0x36DD + +#define MIIDBG_TST100BTCFG 0x36 +#define TST100BTCFG_NORMAL_BW_EN 0x8000 +#define TST100BTCFG_BADLNK_BYPASS 0x4000 +#define TST100BTCFG_SHORTCABL_TH_MASK 0x3FU +#define TST100BTCFG_SHORTCABL_TH_SHIFT 8 +#define TST100BTCFG_LITCH_EN 0x80 +#define TST100BTCFG_VLT_SW 0x40 +#define TST100BTCFG_LONGCABL_TH_MASK 0x3FU +#define TST100BTCFG_LONGCABL_TH_SHIFT 0 +#define TST100BTCFG_DEF 0xE12C + +#define MIIDBG_VOLT_CTRL 0x3B /* only for l2cb 1 & 2 */ +#define VOLT_CTRL_CABLE1TH_MASK 0x1FFU +#define VOLT_CTRL_CABLE1TH_SHIFT 7 +#define VOLT_CTRL_AMPCTRL_MASK 3U +#define VOLT_CTRL_AMPCTRL_SHIFT 5 +#define VOLT_CTRL_SW_BYPASS 0x10 +#define VOLT_CTRL_SWLOWEST 0x8 +#define VOLT_CTRL_DACAMP10_MASK 7U +#define VOLT_CTRL_DACAMP10_SHIFT 0 + +#define MIIDBG_CABLE1TH_DET 0x3E +#define CABLE1TH_DET_EN 0x8000 + + +/******* dev 3 *********/ +#define MIIEXT_PCS 3 + +#define MIIEXT_CLDCTRL3 0x8003 +#define CLDCTRL3_BP_CABLE1TH_DET_GT 0x8000 +#define CLDCTRL3_AZ_DISAMP 0x1000 +#define L2CB_CLDCTRL3 0x4D19 +#define L1D_CLDCTRL3 0xDD19 + +#define MIIEXT_CLDCTRL6 0x8006 +#define CLDCTRL6_CAB_LEN_MASK 0x1FFU +#define CLDCTRL6_CAB_LEN_SHIFT 0 +#define CLDCTRL6_CAB_LEN_SHORT 0x50 + +/********* dev 7 **********/ +#define MIIEXT_ANEG 7 + +#define MIIEXT_LOCAL_EEEADV 0x3C +#define LOCAL_EEEADV_1000BT 0x4 +#define LOCAL_EEEADV_100BT 0x2 + +#define MIIEXT_REMOTE_EEEADV 0x3D +#define REMOTE_EEEADV_1000BT 0x4 +#define REMOTE_EEEADV_100BT 0x2 + +#define MIIEXT_EEE_ANEG 0x8000 +#define EEE_ANEG_1000M 0x4 +#define EEE_ANEG_100M 0x2 + +#endif /*_ATL1C_HW_H_*/ diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c new file mode 100644 index 000000000..932bd1862 --- /dev/null +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -0,0 +1,2803 @@ +/* + * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include "atl1c.h" + +#define ATL1C_DRV_VERSION "1.0.1.1-NAPI" +char atl1c_driver_name[] = "atl1c"; +char atl1c_driver_version[] = ATL1C_DRV_VERSION; + +/* + * atl1c_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id atl1c_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)}, + /* required last entry */ + { 0 } +}; +MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl); + +MODULE_AUTHOR("Jie Yang"); +MODULE_AUTHOR("Qualcomm Atheros Inc., "); +MODULE_DESCRIPTION("Qualcomm Atheros 100/1000M Ethernet Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ATL1C_DRV_VERSION); + +static int atl1c_stop_mac(struct atl1c_hw *hw); +static void atl1c_disable_l0s_l1(struct atl1c_hw *hw); +static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed); +static void atl1c_start_mac(struct atl1c_adapter *adapter); +static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, + int *work_done, int work_to_do); +static int atl1c_up(struct atl1c_adapter *adapter); +static void atl1c_down(struct atl1c_adapter *adapter); +static int atl1c_reset_mac(struct atl1c_hw *hw); +static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter); +static int atl1c_configure(struct atl1c_adapter *adapter); +static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter); + +static const u16 atl1c_pay_load_size[] = { + 128, 256, 512, 1024, 2048, 4096, +}; + + +static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; +static void atl1c_pcie_patch(struct atl1c_hw *hw) +{ + u32 mst_data, data; + + /* pclk sel could switch to 25M */ + AT_READ_REG(hw, REG_MASTER_CTRL, &mst_data); + mst_data &= ~MASTER_CTRL_CLK_SEL_DIS; + AT_WRITE_REG(hw, REG_MASTER_CTRL, mst_data); + + /* WoL/PCIE related settings */ + if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) { + AT_READ_REG(hw, REG_PCIE_PHYMISC, &data); + data |= PCIE_PHYMISC_FORCE_RCV_DET; + AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data); + } else { /* new dev set bit5 of MASTER */ + if (!(mst_data & MASTER_CTRL_WAKEN_25M)) + AT_WRITE_REG(hw, REG_MASTER_CTRL, + mst_data | MASTER_CTRL_WAKEN_25M); + } + /* aspm/PCIE setting only for l2cb 1.0 */ + if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) { + AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data); + data = FIELD_SETX(data, PCIE_PHYMISC2_CDR_BW, + L2CB1_PCIE_PHYMISC2_CDR_BW); + data = FIELD_SETX(data, PCIE_PHYMISC2_L0S_TH, + L2CB1_PCIE_PHYMISC2_L0S_TH); + AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data); + /* extend L1 sync timer */ + AT_READ_REG(hw, REG_LINK_CTRL, &data); + data |= LINK_CTRL_EXT_SYNC; + AT_WRITE_REG(hw, REG_LINK_CTRL, data); + } + /* l2cb 1.x & l1d 1.x */ + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d) { + AT_READ_REG(hw, REG_PM_CTRL, &data); + data |= PM_CTRL_L0S_BUFSRX_EN; + AT_WRITE_REG(hw, REG_PM_CTRL, data); + /* clear vendor msg */ + AT_READ_REG(hw, REG_DMA_DBG, &data); + AT_WRITE_REG(hw, REG_DMA_DBG, data & ~DMA_DBG_VENDOR_MSG); + } +} + +/* FIXME: no need any more ? */ +/* + * atl1c_init_pcie - init PCIE module + */ +static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag) +{ + u32 data; + u32 pci_cmd; + struct pci_dev *pdev = hw->adapter->pdev; + int pos; + + AT_READ_REG(hw, PCI_COMMAND, &pci_cmd); + pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; + pci_cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | + PCI_COMMAND_IO); + AT_WRITE_REG(hw, PCI_COMMAND, pci_cmd); + + /* + * Clear any PowerSaveing Settings + */ + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + /* wol sts read-clear */ + AT_READ_REG(hw, REG_WOL_CTRL, &data); + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + + /* + * Mask some pcie error bits + */ + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); + if (pos) { + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data); + data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); + pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); + } + /* clear error status */ + pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, + PCI_EXP_DEVSTA_NFED | + PCI_EXP_DEVSTA_FED | + PCI_EXP_DEVSTA_CED | + PCI_EXP_DEVSTA_URD); + + AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data); + data &= ~LTSSM_ID_EN_WRO; + AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, data); + + atl1c_pcie_patch(hw); + if (flag & ATL1C_PCIE_L0S_L1_DISABLE) + atl1c_disable_l0s_l1(hw); + + msleep(5); +} + +/** + * atl1c_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static inline void atl1c_irq_enable(struct atl1c_adapter *adapter) +{ + if (likely(atomic_dec_and_test(&adapter->irq_sem))) { + AT_WRITE_REG(&adapter->hw, REG_ISR, 0x7FFFFFFF); + AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); + AT_WRITE_FLUSH(&adapter->hw); + } +} + +/** + * atl1c_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static inline void atl1c_irq_disable(struct atl1c_adapter *adapter) +{ + atomic_inc(&adapter->irq_sem); + AT_WRITE_REG(&adapter->hw, REG_IMR, 0); + AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT); + AT_WRITE_FLUSH(&adapter->hw); + synchronize_irq(adapter->pdev->irq); +} + +/** + * atl1c_irq_reset - reset interrupt confiure on the NIC + * @adapter: board private structure + */ +static inline void atl1c_irq_reset(struct atl1c_adapter *adapter) +{ + atomic_set(&adapter->irq_sem, 1); + atl1c_irq_enable(adapter); +} + +/* + * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads + * of the idle status register until the device is actually idle + */ +static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl) +{ + int timeout; + u32 data; + + for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) { + AT_READ_REG(hw, REG_IDLE_STATUS, &data); + if ((data & modu_ctrl) == 0) + return 0; + msleep(1); + } + return data; +} + +/** + * atl1c_phy_config - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl1c_phy_config(unsigned long data) +{ + struct atl1c_adapter *adapter = (struct atl1c_adapter *) data; + struct atl1c_hw *hw = &adapter->hw; + unsigned long flags; + + spin_lock_irqsave(&adapter->mdio_lock, flags); + atl1c_restart_autoneg(hw); + spin_unlock_irqrestore(&adapter->mdio_lock, flags); +} + +void atl1c_reinit_locked(struct atl1c_adapter *adapter) +{ + WARN_ON(in_interrupt()); + atl1c_down(adapter); + atl1c_up(adapter); + clear_bit(__AT_RESETTING, &adapter->flags); +} + +static void atl1c_check_link_status(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err; + unsigned long flags; + u16 speed, duplex, phy_data; + + spin_lock_irqsave(&adapter->mdio_lock, flags); + /* MII_BMSR must read twise */ + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + spin_unlock_irqrestore(&adapter->mdio_lock, flags); + + if ((phy_data & BMSR_LSTATUS) == 0) { + /* link down */ + netif_carrier_off(netdev); + hw->hibernate = true; + if (atl1c_reset_mac(hw) != 0) + if (netif_msg_hw(adapter)) + dev_warn(&pdev->dev, "reset mac failed\n"); + atl1c_set_aspm(hw, SPEED_0); + atl1c_post_phy_linkchg(hw, SPEED_0); + atl1c_reset_dma_ring(adapter); + atl1c_configure(adapter); + } else { + /* Link Up */ + hw->hibernate = false; + spin_lock_irqsave(&adapter->mdio_lock, flags); + err = atl1c_get_speed_and_duplex(hw, &speed, &duplex); + spin_unlock_irqrestore(&adapter->mdio_lock, flags); + if (unlikely(err)) + return; + /* link result is our setting */ + if (adapter->link_speed != speed || + adapter->link_duplex != duplex) { + adapter->link_speed = speed; + adapter->link_duplex = duplex; + atl1c_set_aspm(hw, speed); + atl1c_post_phy_linkchg(hw, speed); + atl1c_start_mac(adapter); + if (netif_msg_link(adapter)) + dev_info(&pdev->dev, + "%s: %s NIC Link is Up<%d Mbps %s>\n", + atl1c_driver_name, netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex"); + } + if (!netif_carrier_ok(netdev)) + netif_carrier_on(netdev); + } +} + +static void atl1c_link_chg_event(struct atl1c_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + u16 phy_data; + u16 link_up; + + spin_lock(&adapter->mdio_lock); + atl1c_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + atl1c_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + spin_unlock(&adapter->mdio_lock); + link_up = phy_data & BMSR_LSTATUS; + /* notify upper layer link down ASAP */ + if (!link_up) { + if (netif_carrier_ok(netdev)) { + /* old link state: Up */ + netif_carrier_off(netdev); + if (netif_msg_link(adapter)) + dev_info(&pdev->dev, + "%s: %s NIC Link is Down\n", + atl1c_driver_name, netdev->name); + adapter->link_speed = SPEED_0; + } + } + + set_bit(ATL1C_WORK_EVENT_LINK_CHANGE, &adapter->work_event); + schedule_work(&adapter->common_task); +} + +static void atl1c_common_task(struct work_struct *work) +{ + struct atl1c_adapter *adapter; + struct net_device *netdev; + + adapter = container_of(work, struct atl1c_adapter, common_task); + netdev = adapter->netdev; + + if (test_bit(__AT_DOWN, &adapter->flags)) + return; + + if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) { + netif_device_detach(netdev); + atl1c_down(adapter); + atl1c_up(adapter); + netif_device_attach(netdev); + } + + if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE, + &adapter->work_event)) { + atl1c_irq_disable(adapter); + atl1c_check_link_status(adapter); + atl1c_irq_enable(adapter); + } +} + + +static void atl1c_del_timer(struct atl1c_adapter *adapter) +{ + del_timer_sync(&adapter->phy_config_timer); +} + + +/** + * atl1c_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + */ +static void atl1c_tx_timeout(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event); + schedule_work(&adapter->common_task); +} + +/** + * atl1c_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + */ +static void atl1c_set_multi(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u32 mac_ctrl_data; + u32 hash_value; + + /* Check for Promiscuous and All Multicast modes */ + AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data); + + if (netdev->flags & IFF_PROMISC) { + mac_ctrl_data |= MAC_CTRL_PROMIS_EN; + } else if (netdev->flags & IFF_ALLMULTI) { + mac_ctrl_data |= MAC_CTRL_MC_ALL_EN; + mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN; + } else { + mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); + } + + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); + + /* clear the old settings from the multicast hash table */ + AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); + AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); + + /* comoute mc addresses' hash value ,and put it into hash table */ + netdev_for_each_mc_addr(ha, netdev) { + hash_value = atl1c_hash_mc_addr(hw, ha->addr); + atl1c_hash_set(hw, hash_value); + } +} + +static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) +{ + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + /* enable VLAN tag insert/strip */ + *mac_ctrl_data |= MAC_CTRL_RMV_VLAN; + } else { + /* disable VLAN tag insert/strip */ + *mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN; + } +} + +static void atl1c_vlan_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + u32 mac_ctrl_data = 0; + + if (netif_msg_pktdata(adapter)) + dev_dbg(&pdev->dev, "atl1c_vlan_mode\n"); + + atl1c_irq_disable(adapter); + AT_READ_REG(&adapter->hw, REG_MAC_CTRL, &mac_ctrl_data); + __atl1c_vlan_mode(features, &mac_ctrl_data); + AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data); + atl1c_irq_enable(adapter); +} + +static void atl1c_restore_vlan(struct atl1c_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + + if (netif_msg_pktdata(adapter)) + dev_dbg(&pdev->dev, "atl1c_restore_vlan\n"); + atl1c_vlan_mode(adapter->netdev, adapter->netdev->features); +} + +/** + * atl1c_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int atl1c_set_mac_addr(struct net_device *netdev, void *p) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (netif_running(netdev)) + return -EBUSY; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); + + atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr); + + return 0; +} + +static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter, + struct net_device *dev) +{ + unsigned int head_size; + int mtu = dev->mtu; + + adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ? + roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE; + + head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + adapter->rx_frag_size = roundup_pow_of_two(head_size); +} + +static netdev_features_t atl1c_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* + * Since there is no support for separate rx/tx vlan accel + * enable/disable make sure tx flag is always in same state as rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + if (netdev->mtu > MAX_TSO_FRAME_SIZE) + features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + + return features; +} + +static int atl1c_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + atl1c_vlan_mode(netdev, features); + + return 0; +} + +/** + * atl1c_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int atl1c_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + int old_mtu = netdev->mtu; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + /* Fast Ethernet controller doesn't support jumbo packet */ + if (((hw->nic_type == athr_l2c || + hw->nic_type == athr_l2c_b || + hw->nic_type == athr_l2c_b2) && new_mtu > ETH_DATA_LEN) || + max_frame < ETH_ZLEN + ETH_FCS_LEN || + max_frame > MAX_JUMBO_FRAME_SIZE) { + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); + return -EINVAL; + } + /* set MTU */ + if (old_mtu != new_mtu && netif_running(netdev)) { + while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) + msleep(1); + netdev->mtu = new_mtu; + adapter->hw.max_frame_size = new_mtu; + atl1c_set_rxbufsize(adapter, netdev); + atl1c_down(adapter); + netdev_update_features(netdev); + atl1c_up(adapter); + clear_bit(__AT_RESETTING, &adapter->flags); + } + return 0; +} + +/* + * caller should hold mdio_lock + */ +static int atl1c_mdio_read(struct net_device *netdev, int phy_id, int reg_num) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + u16 result; + + atl1c_read_phy_reg(&adapter->hw, reg_num, &result); + return result; +} + +static void atl1c_mdio_write(struct net_device *netdev, int phy_id, + int reg_num, int val) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + atl1c_write_phy_reg(&adapter->hw, reg_num, val); +} + +static int atl1c_mii_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct mii_ioctl_data *data = if_mii(ifr); + unsigned long flags; + int retval = 0; + + if (!netif_running(netdev)) + return -EINVAL; + + spin_lock_irqsave(&adapter->mdio_lock, flags); + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = 0; + break; + + case SIOCGMIIREG: + if (atl1c_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) { + retval = -EIO; + goto out; + } + break; + + case SIOCSMIIREG: + if (data->reg_num & ~(0x1F)) { + retval = -EFAULT; + goto out; + } + + dev_dbg(&pdev->dev, " write %x %x", + data->reg_num, data->val_in); + if (atl1c_write_phy_reg(&adapter->hw, + data->reg_num, data->val_in)) { + retval = -EIO; + goto out; + } + break; + + default: + retval = -EOPNOTSUPP; + break; + } +out: + spin_unlock_irqrestore(&adapter->mdio_lock, flags); + return retval; +} + +static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return atl1c_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * atl1c_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + */ +static int atl1c_alloc_queues(struct atl1c_adapter *adapter) +{ + return 0; +} + +static void atl1c_set_mac_type(struct atl1c_hw *hw) +{ + switch (hw->device_id) { + case PCI_DEVICE_ID_ATTANSIC_L2C: + hw->nic_type = athr_l2c; + break; + case PCI_DEVICE_ID_ATTANSIC_L1C: + hw->nic_type = athr_l1c; + break; + case PCI_DEVICE_ID_ATHEROS_L2C_B: + hw->nic_type = athr_l2c_b; + break; + case PCI_DEVICE_ID_ATHEROS_L2C_B2: + hw->nic_type = athr_l2c_b2; + break; + case PCI_DEVICE_ID_ATHEROS_L1D: + hw->nic_type = athr_l1d; + break; + case PCI_DEVICE_ID_ATHEROS_L1D_2_0: + hw->nic_type = athr_l1d_2; + break; + default: + break; + } +} + +static int atl1c_setup_mac_funcs(struct atl1c_hw *hw) +{ + u32 link_ctrl_data; + + atl1c_set_mac_type(hw); + AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); + + hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE | + ATL1C_TXQ_MODE_ENHANCE; + hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT | + ATL1C_ASPM_L1_SUPPORT; + hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON; + + if (hw->nic_type == athr_l1c || + hw->nic_type == athr_l1d || + hw->nic_type == athr_l1d_2) + hw->link_cap_flags |= ATL1C_LINK_CAP_1000M; + return 0; +} + +struct atl1c_platform_patch { + u16 pci_did; + u8 pci_revid; + u16 subsystem_vid; + u16 subsystem_did; + u32 patch_flag; +#define ATL1C_LINK_PATCH 0x1 +}; +static const struct atl1c_platform_patch plats[] = { +{0x2060, 0xC1, 0x1019, 0x8152, 0x1}, +{0x2060, 0xC1, 0x1019, 0x2060, 0x1}, +{0x2060, 0xC1, 0x1019, 0xE000, 0x1}, +{0x2062, 0xC0, 0x1019, 0x8152, 0x1}, +{0x2062, 0xC0, 0x1019, 0x2062, 0x1}, +{0x2062, 0xC0, 0x1458, 0xE000, 0x1}, +{0x2062, 0xC1, 0x1019, 0x8152, 0x1}, +{0x2062, 0xC1, 0x1019, 0x2062, 0x1}, +{0x2062, 0xC1, 0x1458, 0xE000, 0x1}, +{0x2062, 0xC1, 0x1565, 0x2802, 0x1}, +{0x2062, 0xC1, 0x1565, 0x2801, 0x1}, +{0x1073, 0xC0, 0x1019, 0x8151, 0x1}, +{0x1073, 0xC0, 0x1019, 0x1073, 0x1}, +{0x1073, 0xC0, 0x1458, 0xE000, 0x1}, +{0x1083, 0xC0, 0x1458, 0xE000, 0x1}, +{0x1083, 0xC0, 0x1019, 0x8151, 0x1}, +{0x1083, 0xC0, 0x1019, 0x1083, 0x1}, +{0x1083, 0xC0, 0x1462, 0x7680, 0x1}, +{0x1083, 0xC0, 0x1565, 0x2803, 0x1}, +{0}, +}; + +static void atl1c_patch_assign(struct atl1c_hw *hw) +{ + struct pci_dev *pdev = hw->adapter->pdev; + u32 misc_ctrl; + int i = 0; + + hw->msi_lnkpatch = false; + + while (plats[i].pci_did != 0) { + if (plats[i].pci_did == hw->device_id && + plats[i].pci_revid == hw->revision_id && + plats[i].subsystem_vid == hw->subsystem_vendor_id && + plats[i].subsystem_did == hw->subsystem_id) { + if (plats[i].patch_flag & ATL1C_LINK_PATCH) + hw->msi_lnkpatch = true; + } + i++; + } + + if (hw->device_id == PCI_DEVICE_ID_ATHEROS_L2C_B2 && + hw->revision_id == L2CB_V21) { + /* config access mode */ + pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR, + REG_PCIE_DEV_MISC_CTRL); + pci_read_config_dword(pdev, REG_PCIE_IND_ACC_DATA, &misc_ctrl); + misc_ctrl &= ~0x100; + pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR, + REG_PCIE_DEV_MISC_CTRL); + pci_write_config_dword(pdev, REG_PCIE_IND_ACC_DATA, misc_ctrl); + } +} +/** + * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter) + * @adapter: board private structure to initialize + * + * atl1c_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int atl1c_sw_init(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + u32 revision; + + + adapter->wol = 0; + device_set_wakeup_enable(&pdev->dev, false); + adapter->link_speed = SPEED_0; + adapter->link_duplex = FULL_DUPLEX; + adapter->tpd_ring[0].count = 1024; + adapter->rfd_ring.count = 512; + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + pci_read_config_dword(pdev, PCI_CLASS_REVISION, &revision); + hw->revision_id = revision & 0xFF; + /* before link up, we assume hibernate is true */ + hw->hibernate = true; + hw->media_type = MEDIA_TYPE_AUTO_SENSOR; + if (atl1c_setup_mac_funcs(hw) != 0) { + dev_err(&pdev->dev, "set mac function pointers failed\n"); + return -1; + } + atl1c_patch_assign(hw); + + hw->intr_mask = IMR_NORMAL_MASK; + hw->phy_configured = false; + hw->preamble_len = 7; + hw->max_frame_size = adapter->netdev->mtu; + hw->autoneg_advertised = ADVERTISED_Autoneg; + hw->indirect_tab = 0xE4E4E4E4; + hw->base_cpu = 0; + + hw->ict = 50000; /* 100ms */ + hw->smb_timer = 200000; /* 400ms */ + hw->rx_imt = 200; + hw->tx_imt = 1000; + + hw->tpd_burst = 5; + hw->rfd_burst = 8; + hw->dma_order = atl1c_dma_ord_out; + hw->dmar_block = atl1c_dma_req_1024; + + if (atl1c_alloc_queues(adapter)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + /* TODO */ + atl1c_set_rxbufsize(adapter, adapter->netdev); + atomic_set(&adapter->irq_sem, 1); + spin_lock_init(&adapter->mdio_lock); + spin_lock_init(&adapter->tx_lock); + set_bit(__AT_DOWN, &adapter->flags); + + return 0; +} + +static inline void atl1c_clean_buffer(struct pci_dev *pdev, + struct atl1c_buffer *buffer_info) +{ + u16 pci_driection; + if (buffer_info->flags & ATL1C_BUFFER_FREE) + return; + if (buffer_info->dma) { + if (buffer_info->flags & ATL1C_PCIMAP_FROMDEVICE) + pci_driection = PCI_DMA_FROMDEVICE; + else + pci_driection = PCI_DMA_TODEVICE; + + if (buffer_info->flags & ATL1C_PCIMAP_SINGLE) + pci_unmap_single(pdev, buffer_info->dma, + buffer_info->length, pci_driection); + else if (buffer_info->flags & ATL1C_PCIMAP_PAGE) + pci_unmap_page(pdev, buffer_info->dma, + buffer_info->length, pci_driection); + } + if (buffer_info->skb) + dev_consume_skb_any(buffer_info->skb); + buffer_info->dma = 0; + buffer_info->skb = NULL; + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); +} +/** + * atl1c_clean_tx_ring - Free Tx-skb + * @adapter: board private structure + */ +static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter, + enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + struct atl1c_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + u16 index, ring_count; + + ring_count = tpd_ring->count; + for (index = 0; index < ring_count; index++) { + buffer_info = &tpd_ring->buffer_info[index]; + atl1c_clean_buffer(pdev, buffer_info); + } + + /* Zero out Tx-buffers */ + memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) * + ring_count); + atomic_set(&tpd_ring->next_to_clean, 0); + tpd_ring->next_to_use = 0; +} + +/** + * atl1c_clean_rx_ring - Free rx-reservation skbs + * @adapter: board private structure + */ +static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter) +{ + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1c_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + int j; + + for (j = 0; j < rfd_ring->count; j++) { + buffer_info = &rfd_ring->buffer_info[j]; + atl1c_clean_buffer(pdev, buffer_info); + } + /* zero out the descriptor ring */ + memset(rfd_ring->desc, 0, rfd_ring->size); + rfd_ring->next_to_clean = 0; + rfd_ring->next_to_use = 0; + rrd_ring->next_to_use = 0; + rrd_ring->next_to_clean = 0; +} + +/* + * Read / Write Ptr Initialize: + */ +static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter) +{ + struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1c_buffer *buffer_info; + int i, j; + + for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) { + tpd_ring[i].next_to_use = 0; + atomic_set(&tpd_ring[i].next_to_clean, 0); + buffer_info = tpd_ring[i].buffer_info; + for (j = 0; j < tpd_ring->count; j++) + ATL1C_SET_BUFFER_STATE(&buffer_info[i], + ATL1C_BUFFER_FREE); + } + rfd_ring->next_to_use = 0; + rfd_ring->next_to_clean = 0; + rrd_ring->next_to_use = 0; + rrd_ring->next_to_clean = 0; + for (j = 0; j < rfd_ring->count; j++) { + buffer_info = &rfd_ring->buffer_info[j]; + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); + } +} + +/** + * atl1c_free_ring_resources - Free Tx / RX descriptor Resources + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void atl1c_free_ring_resources(struct atl1c_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + + pci_free_consistent(pdev, adapter->ring_header.size, + adapter->ring_header.desc, + adapter->ring_header.dma); + adapter->ring_header.desc = NULL; + + /* Note: just free tdp_ring.buffer_info, + * it contain rfd_ring.buffer_info, do not double free */ + if (adapter->tpd_ring[0].buffer_info) { + kfree(adapter->tpd_ring[0].buffer_info); + adapter->tpd_ring[0].buffer_info = NULL; + } + if (adapter->rx_page) { + put_page(adapter->rx_page); + adapter->rx_page = NULL; + } +} + +/** + * atl1c_setup_mem_resources - allocate Tx / RX descriptor resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1c_ring_header *ring_header = &adapter->ring_header; + int size; + int i; + int count = 0; + int rx_desc_count = 0; + u32 offset = 0; + + rrd_ring->count = rfd_ring->count; + for (i = 1; i < AT_MAX_TRANSMIT_QUEUE; i++) + tpd_ring[i].count = tpd_ring[0].count; + + /* 2 tpd queue, one high priority queue, + * another normal priority queue */ + size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 + + rfd_ring->count); + tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); + if (unlikely(!tpd_ring->buffer_info)) + goto err_nomem; + + for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) { + tpd_ring[i].buffer_info = + (tpd_ring->buffer_info + count); + count += tpd_ring[i].count; + } + + rfd_ring->buffer_info = + (tpd_ring->buffer_info + count); + count += rfd_ring->count; + rx_desc_count += rfd_ring->count; + + /* + * real ring DMA buffer + * each ring/block may need up to 8 bytes for alignment, hence the + * additional bytes tacked onto the end. + */ + ring_header->size = size = + sizeof(struct atl1c_tpd_desc) * tpd_ring->count * 2 + + sizeof(struct atl1c_rx_free_desc) * rx_desc_count + + sizeof(struct atl1c_recv_ret_status) * rx_desc_count + + 8 * 4; + + ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, + &ring_header->dma); + if (unlikely(!ring_header->desc)) { + dev_err(&pdev->dev, "pci_alloc_consistend failed\n"); + goto err_nomem; + } + memset(ring_header->desc, 0, ring_header->size); + /* init TPD ring */ + + tpd_ring[0].dma = roundup(ring_header->dma, 8); + offset = tpd_ring[0].dma - ring_header->dma; + for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) { + tpd_ring[i].dma = ring_header->dma + offset; + tpd_ring[i].desc = (u8 *) ring_header->desc + offset; + tpd_ring[i].size = + sizeof(struct atl1c_tpd_desc) * tpd_ring[i].count; + offset += roundup(tpd_ring[i].size, 8); + } + /* init RFD ring */ + rfd_ring->dma = ring_header->dma + offset; + rfd_ring->desc = (u8 *) ring_header->desc + offset; + rfd_ring->size = sizeof(struct atl1c_rx_free_desc) * rfd_ring->count; + offset += roundup(rfd_ring->size, 8); + + /* init RRD ring */ + rrd_ring->dma = ring_header->dma + offset; + rrd_ring->desc = (u8 *) ring_header->desc + offset; + rrd_ring->size = sizeof(struct atl1c_recv_ret_status) * + rrd_ring->count; + offset += roundup(rrd_ring->size, 8); + + return 0; + +err_nomem: + kfree(tpd_ring->buffer_info); + return -ENOMEM; +} + +static void atl1c_configure_des_ring(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *) + adapter->tpd_ring; + + /* TPD */ + AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI, + (u32)((tpd_ring[atl1c_trans_normal].dma & + AT_DMA_HI_ADDR_MASK) >> 32)); + /* just enable normal priority TX queue */ + AT_WRITE_REG(hw, REG_TPD_PRI0_ADDR_LO, + (u32)(tpd_ring[atl1c_trans_normal].dma & + AT_DMA_LO_ADDR_MASK)); + AT_WRITE_REG(hw, REG_TPD_PRI1_ADDR_LO, + (u32)(tpd_ring[atl1c_trans_high].dma & + AT_DMA_LO_ADDR_MASK)); + AT_WRITE_REG(hw, REG_TPD_RING_SIZE, + (u32)(tpd_ring[0].count & TPD_RING_SIZE_MASK)); + + + /* RFD */ + AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI, + (u32)((rfd_ring->dma & AT_DMA_HI_ADDR_MASK) >> 32)); + AT_WRITE_REG(hw, REG_RFD0_HEAD_ADDR_LO, + (u32)(rfd_ring->dma & AT_DMA_LO_ADDR_MASK)); + + AT_WRITE_REG(hw, REG_RFD_RING_SIZE, + rfd_ring->count & RFD_RING_SIZE_MASK); + AT_WRITE_REG(hw, REG_RX_BUF_SIZE, + adapter->rx_buffer_len & RX_BUF_SIZE_MASK); + + /* RRD */ + AT_WRITE_REG(hw, REG_RRD0_HEAD_ADDR_LO, + (u32)(rrd_ring->dma & AT_DMA_LO_ADDR_MASK)); + AT_WRITE_REG(hw, REG_RRD_RING_SIZE, + (rrd_ring->count & RRD_RING_SIZE_MASK)); + + if (hw->nic_type == athr_l2c_b) { + AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L); + AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L); + AT_WRITE_REG(hw, REG_SRAM_RXF_ADDR, 0x029f0000L); + AT_WRITE_REG(hw, REG_SRAM_RFD0_INFO, 0x02bf02a0L); + AT_WRITE_REG(hw, REG_SRAM_TXF_ADDR, 0x03bf02c0L); + AT_WRITE_REG(hw, REG_SRAM_TRD_ADDR, 0x03df03c0L); + AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0); /* TX watermark, to enter l1 state.*/ + AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0); /* RXD threshold.*/ + } + /* Load all of base address above */ + AT_WRITE_REG(hw, REG_LOAD_PTR, 1); +} + +static void atl1c_configure_tx(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + int max_pay_load; + u16 tx_offload_thresh; + u32 txq_ctrl_data; + + tx_offload_thresh = MAX_TSO_FRAME_SIZE; + AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH, + (tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK); + max_pay_load = pcie_get_readrq(adapter->pdev) >> 8; + hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block); + /* + * if BIOS had changed the dam-read-max-length to an invalid value, + * restore it to default value + */ + if (hw->dmar_block < DEVICE_CTRL_MAXRRS_MIN) { + pcie_set_readrq(adapter->pdev, 128 << DEVICE_CTRL_MAXRRS_MIN); + hw->dmar_block = DEVICE_CTRL_MAXRRS_MIN; + } + txq_ctrl_data = + hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 ? + L2CB_TXQ_CFGV : L1C_TXQ_CFGV; + + AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data); +} + +static void atl1c_configure_rx(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + u32 rxq_ctrl_data; + + rxq_ctrl_data = (hw->rfd_burst & RXQ_RFD_BURST_NUM_MASK) << + RXQ_RFD_BURST_NUM_SHIFT; + + if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM) + rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN; + + /* aspm for gigabit */ + if (hw->nic_type != athr_l1d_2 && (hw->device_id & 1) != 0) + rxq_ctrl_data = FIELD_SETX(rxq_ctrl_data, ASPM_THRUPUT_LIMIT, + ASPM_THRUPUT_LIMIT_100M); + + AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); +} + +static void atl1c_configure_dma(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + u32 dma_ctrl_data; + + dma_ctrl_data = FIELDX(DMA_CTRL_RORDER_MODE, DMA_CTRL_RORDER_MODE_OUT) | + DMA_CTRL_RREQ_PRI_DATA | + FIELDX(DMA_CTRL_RREQ_BLEN, hw->dmar_block) | + FIELDX(DMA_CTRL_WDLY_CNT, DMA_CTRL_WDLY_CNT_DEF) | + FIELDX(DMA_CTRL_RDLY_CNT, DMA_CTRL_RDLY_CNT_DEF); + + AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data); +} + +/* + * Stop the mac, transmit and receive units + * hw - Struct containing variables accessed by shared code + * return : 0 or idle status (if error) + */ +static int atl1c_stop_mac(struct atl1c_hw *hw) +{ + u32 data; + + AT_READ_REG(hw, REG_RXQ_CTRL, &data); + data &= ~RXQ_CTRL_EN; + AT_WRITE_REG(hw, REG_RXQ_CTRL, data); + + AT_READ_REG(hw, REG_TXQ_CTRL, &data); + data &= ~TXQ_CTRL_EN; + AT_WRITE_REG(hw, REG_TXQ_CTRL, data); + + atl1c_wait_until_idle(hw, IDLE_STATUS_RXQ_BUSY | IDLE_STATUS_TXQ_BUSY); + + AT_READ_REG(hw, REG_MAC_CTRL, &data); + data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN); + AT_WRITE_REG(hw, REG_MAC_CTRL, data); + + return (int)atl1c_wait_until_idle(hw, + IDLE_STATUS_TXMAC_BUSY | IDLE_STATUS_RXMAC_BUSY); +} + +static void atl1c_start_mac(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + u32 mac, txq, rxq; + + hw->mac_duplex = adapter->link_duplex == FULL_DUPLEX ? true : false; + hw->mac_speed = adapter->link_speed == SPEED_1000 ? + atl1c_mac_speed_1000 : atl1c_mac_speed_10_100; + + AT_READ_REG(hw, REG_TXQ_CTRL, &txq); + AT_READ_REG(hw, REG_RXQ_CTRL, &rxq); + AT_READ_REG(hw, REG_MAC_CTRL, &mac); + + txq |= TXQ_CTRL_EN; + rxq |= RXQ_CTRL_EN; + mac |= MAC_CTRL_TX_EN | MAC_CTRL_TX_FLOW | + MAC_CTRL_RX_EN | MAC_CTRL_RX_FLOW | + MAC_CTRL_ADD_CRC | MAC_CTRL_PAD | + MAC_CTRL_BC_EN | MAC_CTRL_SINGLE_PAUSE_EN | + MAC_CTRL_HASH_ALG_CRC32; + if (hw->mac_duplex) + mac |= MAC_CTRL_DUPLX; + else + mac &= ~MAC_CTRL_DUPLX; + mac = FIELD_SETX(mac, MAC_CTRL_SPEED, hw->mac_speed); + mac = FIELD_SETX(mac, MAC_CTRL_PRMLEN, hw->preamble_len); + + AT_WRITE_REG(hw, REG_TXQ_CTRL, txq); + AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq); + AT_WRITE_REG(hw, REG_MAC_CTRL, mac); +} + +/* + * Reset the transmit and receive units; mask and clear all interrupts. + * hw - Struct containing variables accessed by shared code + * return : 0 or idle status (if error) + */ +static int atl1c_reset_mac(struct atl1c_hw *hw) +{ + struct atl1c_adapter *adapter = hw->adapter; + struct pci_dev *pdev = adapter->pdev; + u32 ctrl_data = 0; + + atl1c_stop_mac(hw); + /* + * Issue Soft Reset to the MAC. This will reset the chip's + * transmit, receive, DMA. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + AT_READ_REG(hw, REG_MASTER_CTRL, &ctrl_data); + ctrl_data |= MASTER_CTRL_OOB_DIS; + AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data | MASTER_CTRL_SOFT_RST); + + AT_WRITE_FLUSH(hw); + msleep(10); + /* Wait at least 10ms for All module to be Idle */ + + if (atl1c_wait_until_idle(hw, IDLE_STATUS_MASK)) { + dev_err(&pdev->dev, + "MAC state machine can't be idle since" + " disabled for 10ms second\n"); + return -1; + } + AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data); + + /* driver control speed/duplex */ + AT_READ_REG(hw, REG_MAC_CTRL, &ctrl_data); + AT_WRITE_REG(hw, REG_MAC_CTRL, ctrl_data | MAC_CTRL_SPEED_MODE_SW); + + /* clk switch setting */ + AT_READ_REG(hw, REG_SERDES, &ctrl_data); + switch (hw->nic_type) { + case athr_l2c_b: + ctrl_data &= ~(SERDES_PHY_CLK_SLOWDOWN | + SERDES_MAC_CLK_SLOWDOWN); + AT_WRITE_REG(hw, REG_SERDES, ctrl_data); + break; + case athr_l2c_b2: + case athr_l1d_2: + ctrl_data |= SERDES_PHY_CLK_SLOWDOWN | SERDES_MAC_CLK_SLOWDOWN; + AT_WRITE_REG(hw, REG_SERDES, ctrl_data); + break; + default: + break; + } + + return 0; +} + +static void atl1c_disable_l0s_l1(struct atl1c_hw *hw) +{ + u16 ctrl_flags = hw->ctrl_flags; + + hw->ctrl_flags &= ~(ATL1C_ASPM_L0S_SUPPORT | ATL1C_ASPM_L1_SUPPORT); + atl1c_set_aspm(hw, SPEED_0); + hw->ctrl_flags = ctrl_flags; +} + +/* + * Set ASPM state. + * Enable/disable L0s/L1 depend on link state. + */ +static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed) +{ + u32 pm_ctrl_data; + u32 link_l1_timer; + + AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data); + pm_ctrl_data &= ~(PM_CTRL_ASPM_L1_EN | + PM_CTRL_ASPM_L0S_EN | + PM_CTRL_MAC_ASPM_CHK); + /* L1 timer */ + if (hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { + pm_ctrl_data &= ~PMCTRL_TXL1_AFTER_L0S; + link_l1_timer = + link_speed == SPEED_1000 || link_speed == SPEED_100 ? + L1D_PMCTRL_L1_ENTRY_TM_16US : 1; + pm_ctrl_data = FIELD_SETX(pm_ctrl_data, + L1D_PMCTRL_L1_ENTRY_TM, link_l1_timer); + } else { + link_l1_timer = hw->nic_type == athr_l2c_b ? + L2CB1_PM_CTRL_L1_ENTRY_TM : L1C_PM_CTRL_L1_ENTRY_TM; + if (link_speed != SPEED_1000 && link_speed != SPEED_100) + link_l1_timer = 1; + pm_ctrl_data = FIELD_SETX(pm_ctrl_data, + PM_CTRL_L1_ENTRY_TIMER, link_l1_timer); + } + + /* L0S/L1 enable */ + if ((hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT) && link_speed != SPEED_0) + pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN | PM_CTRL_MAC_ASPM_CHK; + if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT) + pm_ctrl_data |= PM_CTRL_ASPM_L1_EN | PM_CTRL_MAC_ASPM_CHK; + + /* l2cb & l1d & l2cb2 & l1d2 */ + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d || + hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { + pm_ctrl_data = FIELD_SETX(pm_ctrl_data, + PM_CTRL_PM_REQ_TIMER, PM_CTRL_PM_REQ_TO_DEF); + pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER | + PM_CTRL_SERDES_PD_EX_L1 | + PM_CTRL_CLK_SWH_L1; + pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN | + PM_CTRL_SERDES_PLL_L1_EN | + PM_CTRL_SERDES_BUFS_RX_L1_EN | + PM_CTRL_SA_DLY_EN | + PM_CTRL_HOTRST); + /* disable l0s if link down or l2cb */ + if (link_speed == SPEED_0 || hw->nic_type == athr_l2c_b) + pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; + } else { /* l1c */ + pm_ctrl_data = + FIELD_SETX(pm_ctrl_data, PM_CTRL_L1_ENTRY_TIMER, 0); + if (link_speed != SPEED_0) { + pm_ctrl_data |= PM_CTRL_SERDES_L1_EN | + PM_CTRL_SERDES_PLL_L1_EN | + PM_CTRL_SERDES_BUFS_RX_L1_EN; + pm_ctrl_data &= ~(PM_CTRL_SERDES_PD_EX_L1 | + PM_CTRL_CLK_SWH_L1 | + PM_CTRL_ASPM_L0S_EN | + PM_CTRL_ASPM_L1_EN); + } else { /* link down */ + pm_ctrl_data |= PM_CTRL_CLK_SWH_L1; + pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN | + PM_CTRL_SERDES_PLL_L1_EN | + PM_CTRL_SERDES_BUFS_RX_L1_EN | + PM_CTRL_ASPM_L0S_EN); + } + } + AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data); + + return; +} + +/** + * atl1c_configure - Configure Transmit&Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Tx /Rx unit of the MAC after a reset. + */ +static int atl1c_configure_mac(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + u32 master_ctrl_data = 0; + u32 intr_modrt_data; + u32 data; + + AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); + master_ctrl_data &= ~(MASTER_CTRL_TX_ITIMER_EN | + MASTER_CTRL_RX_ITIMER_EN | + MASTER_CTRL_INT_RDCLR); + /* clear interrupt status */ + AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF); + /* Clear any WOL status */ + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + /* set Interrupt Clear Timer + * HW will enable self to assert interrupt event to system after + * waiting x-time for software to notify it accept interrupt. + */ + + data = CLK_GATING_EN_ALL; + if (hw->ctrl_flags & ATL1C_CLK_GATING_EN) { + if (hw->nic_type == athr_l2c_b) + data &= ~CLK_GATING_RXMAC_EN; + } else + data = 0; + AT_WRITE_REG(hw, REG_CLK_GATING_CTRL, data); + + AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER, + hw->ict & INT_RETRIG_TIMER_MASK); + + atl1c_configure_des_ring(adapter); + + if (hw->ctrl_flags & ATL1C_INTR_MODRT_ENABLE) { + intr_modrt_data = (hw->tx_imt & IRQ_MODRT_TIMER_MASK) << + IRQ_MODRT_TX_TIMER_SHIFT; + intr_modrt_data |= (hw->rx_imt & IRQ_MODRT_TIMER_MASK) << + IRQ_MODRT_RX_TIMER_SHIFT; + AT_WRITE_REG(hw, REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data); + master_ctrl_data |= + MASTER_CTRL_TX_ITIMER_EN | MASTER_CTRL_RX_ITIMER_EN; + } + + if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ) + master_ctrl_data |= MASTER_CTRL_INT_RDCLR; + + master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN; + AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); + + AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, + hw->smb_timer & SMB_STAT_TIMER_MASK); + + /* set MTU */ + AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN + + VLAN_HLEN + ETH_FCS_LEN); + + atl1c_configure_tx(adapter); + atl1c_configure_rx(adapter); + atl1c_configure_dma(adapter); + + return 0; +} + +static int atl1c_configure(struct atl1c_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int num; + + atl1c_init_ring_ptrs(adapter); + atl1c_set_multi(netdev); + atl1c_restore_vlan(adapter); + + num = atl1c_alloc_rx_buffer(adapter); + if (unlikely(num == 0)) + return -ENOMEM; + + if (atl1c_configure_mac(adapter)) + return -EIO; + + return 0; +} + +static void atl1c_update_hw_stats(struct atl1c_adapter *adapter) +{ + u16 hw_reg_addr = 0; + unsigned long *stats_item = NULL; + u32 data; + + /* update rx status */ + hw_reg_addr = REG_MAC_RX_STATUS_BIN; + stats_item = &adapter->hw_stats.rx_ok; + while (hw_reg_addr <= REG_MAC_RX_STATUS_END) { + AT_READ_REG(&adapter->hw, hw_reg_addr, &data); + *stats_item += data; + stats_item++; + hw_reg_addr += 4; + } +/* update tx status */ + hw_reg_addr = REG_MAC_TX_STATUS_BIN; + stats_item = &adapter->hw_stats.tx_ok; + while (hw_reg_addr <= REG_MAC_TX_STATUS_END) { + AT_READ_REG(&adapter->hw, hw_reg_addr, &data); + *stats_item += data; + stats_item++; + hw_reg_addr += 4; + } +} + +/** + * atl1c_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + */ +static struct net_device_stats *atl1c_get_stats(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw_stats *hw_stats = &adapter->hw_stats; + struct net_device_stats *net_stats = &netdev->stats; + + atl1c_update_hw_stats(adapter); + net_stats->rx_bytes = hw_stats->rx_byte_cnt; + net_stats->tx_bytes = hw_stats->tx_byte_cnt; + net_stats->multicast = hw_stats->rx_mcast; + net_stats->collisions = hw_stats->tx_1_col + + hw_stats->tx_2_col + + hw_stats->tx_late_col + + hw_stats->tx_abort_col; + + net_stats->rx_errors = hw_stats->rx_frag + + hw_stats->rx_fcs_err + + hw_stats->rx_len_err + + hw_stats->rx_sz_ov + + hw_stats->rx_rrd_ov + + hw_stats->rx_align_err + + hw_stats->rx_rxf_ov; + + net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov; + net_stats->rx_length_errors = hw_stats->rx_len_err; + net_stats->rx_crc_errors = hw_stats->rx_fcs_err; + net_stats->rx_frame_errors = hw_stats->rx_align_err; + net_stats->rx_dropped = hw_stats->rx_rrd_ov; + + net_stats->tx_errors = hw_stats->tx_late_col + + hw_stats->tx_abort_col + + hw_stats->tx_underrun + + hw_stats->tx_trunc; + + net_stats->tx_fifo_errors = hw_stats->tx_underrun; + net_stats->tx_aborted_errors = hw_stats->tx_abort_col; + net_stats->tx_window_errors = hw_stats->tx_late_col; + + net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; + net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors; + + return net_stats; +} + +static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter) +{ + u16 phy_data; + + spin_lock(&adapter->mdio_lock); + atl1c_read_phy_reg(&adapter->hw, MII_ISR, &phy_data); + spin_unlock(&adapter->mdio_lock); +} + +static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter, + enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + struct atl1c_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); + u16 hw_next_to_clean; + u16 reg; + + reg = type == atl1c_trans_high ? REG_TPD_PRI1_CIDX : REG_TPD_PRI0_CIDX; + + AT_READ_REGW(&adapter->hw, reg, &hw_next_to_clean); + + while (next_to_clean != hw_next_to_clean) { + buffer_info = &tpd_ring->buffer_info[next_to_clean]; + atl1c_clean_buffer(pdev, buffer_info); + if (++next_to_clean == tpd_ring->count) + next_to_clean = 0; + atomic_set(&tpd_ring->next_to_clean, next_to_clean); + } + + if (netif_queue_stopped(adapter->netdev) && + netif_carrier_ok(adapter->netdev)) { + netif_wake_queue(adapter->netdev); + } + + return true; +} + +/** + * atl1c_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t atl1c_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct atl1c_hw *hw = &adapter->hw; + int max_ints = AT_MAX_INT_WORK; + int handled = IRQ_NONE; + u32 status; + u32 reg_data; + + do { + AT_READ_REG(hw, REG_ISR, ®_data); + status = reg_data & hw->intr_mask; + + if (status == 0 || (status & ISR_DIS_INT) != 0) { + if (max_ints != AT_MAX_INT_WORK) + handled = IRQ_HANDLED; + break; + } + /* link event */ + if (status & ISR_GPHY) + atl1c_clear_phy_int(adapter); + /* Ack ISR */ + AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT); + if (status & ISR_RX_PKT) { + if (likely(napi_schedule_prep(&adapter->napi))) { + hw->intr_mask &= ~ISR_RX_PKT; + AT_WRITE_REG(hw, REG_IMR, hw->intr_mask); + __napi_schedule(&adapter->napi); + } + } + if (status & ISR_TX_PKT) + atl1c_clean_tx_irq(adapter, atl1c_trans_normal); + + handled = IRQ_HANDLED; + /* check if PCIE PHY Link down */ + if (status & ISR_ERROR) { + if (netif_msg_hw(adapter)) + dev_err(&pdev->dev, + "atl1c hardware error (status = 0x%x)\n", + status & ISR_ERROR); + /* reset MAC */ + set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event); + schedule_work(&adapter->common_task); + return IRQ_HANDLED; + } + + if (status & ISR_OVER) + if (netif_msg_intr(adapter)) + dev_warn(&pdev->dev, + "TX/RX overflow (status = 0x%x)\n", + status & ISR_OVER); + + /* link event */ + if (status & (ISR_GPHY | ISR_MANUAL)) { + netdev->stats.tx_carrier_errors++; + atl1c_link_chg_event(adapter); + break; + } + + } while (--max_ints > 0); + /* re-enable Interrupt*/ + AT_WRITE_REG(&adapter->hw, REG_ISR, 0); + return handled; +} + +static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter, + struct sk_buff *skb, struct atl1c_recv_ret_status *prrs) +{ + /* + * The pid field in RRS in not correct sometimes, so we + * cannot figure out if the packet is fragmented or not, + * so we tell the KERNEL CHECKSUM_NONE + */ + skb_checksum_none_assert(skb); +} + +static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter) +{ + struct sk_buff *skb; + struct page *page; + + if (adapter->rx_frag_size > PAGE_SIZE) + return netdev_alloc_skb(adapter->netdev, + adapter->rx_buffer_len); + + page = adapter->rx_page; + if (!page) { + adapter->rx_page = page = alloc_page(GFP_ATOMIC); + if (unlikely(!page)) + return NULL; + adapter->rx_page_offset = 0; + } + + skb = build_skb(page_address(page) + adapter->rx_page_offset, + adapter->rx_frag_size); + if (likely(skb)) { + adapter->rx_page_offset += adapter->rx_frag_size; + if (adapter->rx_page_offset >= PAGE_SIZE) + adapter->rx_page = NULL; + else + get_page(page); + } + return skb; +} + +static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) +{ + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct pci_dev *pdev = adapter->pdev; + struct atl1c_buffer *buffer_info, *next_info; + struct sk_buff *skb; + void *vir_addr = NULL; + u16 num_alloc = 0; + u16 rfd_next_to_use, next_next; + struct atl1c_rx_free_desc *rfd_desc; + dma_addr_t mapping; + + next_next = rfd_next_to_use = rfd_ring->next_to_use; + if (++next_next == rfd_ring->count) + next_next = 0; + buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; + next_info = &rfd_ring->buffer_info[next_next]; + + while (next_info->flags & ATL1C_BUFFER_FREE) { + rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use); + + skb = atl1c_alloc_skb(adapter); + if (unlikely(!skb)) { + if (netif_msg_rx_err(adapter)) + dev_warn(&pdev->dev, "alloc rx buffer failed\n"); + break; + } + + /* + * Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + vir_addr = skb->data; + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); + buffer_info->skb = skb; + buffer_info->length = adapter->rx_buffer_len; + mapping = pci_map_single(pdev, vir_addr, + buffer_info->length, + PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(pdev, mapping))) { + dev_kfree_skb(skb); + buffer_info->skb = NULL; + buffer_info->length = 0; + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); + netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed"); + break; + } + buffer_info->dma = mapping; + ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, + ATL1C_PCIMAP_FROMDEVICE); + rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + rfd_next_to_use = next_next; + if (++next_next == rfd_ring->count) + next_next = 0; + buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; + next_info = &rfd_ring->buffer_info[next_next]; + num_alloc++; + } + + if (num_alloc) { + /* TODO: update mailbox here */ + wmb(); + rfd_ring->next_to_use = rfd_next_to_use; + AT_WRITE_REG(&adapter->hw, REG_MB_RFD0_PROD_IDX, + rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK); + } + + return num_alloc; +} + +static void atl1c_clean_rrd(struct atl1c_rrd_ring *rrd_ring, + struct atl1c_recv_ret_status *rrs, u16 num) +{ + u16 i; + /* the relationship between rrd and rfd is one map one */ + for (i = 0; i < num; i++, rrs = ATL1C_RRD_DESC(rrd_ring, + rrd_ring->next_to_clean)) { + rrs->word3 &= ~RRS_RXD_UPDATED; + if (++rrd_ring->next_to_clean == rrd_ring->count) + rrd_ring->next_to_clean = 0; + } +} + +static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring, + struct atl1c_recv_ret_status *rrs, u16 num) +{ + u16 i; + u16 rfd_index; + struct atl1c_buffer *buffer_info = rfd_ring->buffer_info; + + rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) & + RRS_RX_RFD_INDEX_MASK; + for (i = 0; i < num; i++) { + buffer_info[rfd_index].skb = NULL; + ATL1C_SET_BUFFER_STATE(&buffer_info[rfd_index], + ATL1C_BUFFER_FREE); + if (++rfd_index == rfd_ring->count) + rfd_index = 0; + } + rfd_ring->next_to_clean = rfd_index; +} + +static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, + int *work_done, int work_to_do) +{ + u16 rfd_num, rfd_index; + u16 count = 0; + u16 length; + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct sk_buff *skb; + struct atl1c_recv_ret_status *rrs; + struct atl1c_buffer *buffer_info; + + while (1) { + if (*work_done >= work_to_do) + break; + rrs = ATL1C_RRD_DESC(rrd_ring, rrd_ring->next_to_clean); + if (likely(RRS_RXD_IS_VALID(rrs->word3))) { + rfd_num = (rrs->word0 >> RRS_RX_RFD_CNT_SHIFT) & + RRS_RX_RFD_CNT_MASK; + if (unlikely(rfd_num != 1)) + /* TODO support mul rfd*/ + if (netif_msg_rx_err(adapter)) + dev_warn(&pdev->dev, + "Multi rfd not support yet!\n"); + goto rrs_checked; + } else { + break; + } +rrs_checked: + atl1c_clean_rrd(rrd_ring, rrs, rfd_num); + if (rrs->word3 & (RRS_RX_ERR_SUM | RRS_802_3_LEN_ERR)) { + atl1c_clean_rfd(rfd_ring, rrs, rfd_num); + if (netif_msg_rx_err(adapter)) + dev_warn(&pdev->dev, + "wrong packet! rrs word3 is %x\n", + rrs->word3); + continue; + } + + length = le16_to_cpu((rrs->word3 >> RRS_PKT_SIZE_SHIFT) & + RRS_PKT_SIZE_MASK); + /* Good Receive */ + if (likely(rfd_num == 1)) { + rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) & + RRS_RX_RFD_INDEX_MASK; + buffer_info = &rfd_ring->buffer_info[rfd_index]; + pci_unmap_single(pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_FROMDEVICE); + skb = buffer_info->skb; + } else { + /* TODO */ + if (netif_msg_rx_err(adapter)) + dev_warn(&pdev->dev, + "Multi rfd not support yet!\n"); + break; + } + atl1c_clean_rfd(rfd_ring, rrs, rfd_num); + skb_put(skb, length - ETH_FCS_LEN); + skb->protocol = eth_type_trans(skb, netdev); + atl1c_rx_checksum(adapter, skb, rrs); + if (rrs->word3 & RRS_VLAN_INS) { + u16 vlan; + + AT_TAG_TO_VLAN(rrs->vlan_tag, vlan); + vlan = le16_to_cpu(vlan); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan); + } + netif_receive_skb(skb); + + (*work_done)++; + count++; + } + if (count) + atl1c_alloc_rx_buffer(adapter); +} + +/** + * atl1c_clean - NAPI Rx polling callback + */ +static int atl1c_clean(struct napi_struct *napi, int budget) +{ + struct atl1c_adapter *adapter = + container_of(napi, struct atl1c_adapter, napi); + int work_done = 0; + + /* Keep link state information with original netdev */ + if (!netif_carrier_ok(adapter->netdev)) + goto quit_polling; + /* just enable one RXQ */ + atl1c_clean_rx_irq(adapter, &work_done, budget); + + if (work_done < budget) { +quit_polling: + napi_complete(napi); + adapter->hw.intr_mask |= ISR_RX_PKT; + AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); + } + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER + +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void atl1c_netpoll(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + atl1c_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +static inline u16 atl1c_tpd_avail(struct atl1c_adapter *adapter, enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + u16 next_to_use = 0; + u16 next_to_clean = 0; + + next_to_clean = atomic_read(&tpd_ring->next_to_clean); + next_to_use = tpd_ring->next_to_use; + + return (u16)(next_to_clean > next_to_use) ? + (next_to_clean - next_to_use - 1) : + (tpd_ring->count + next_to_clean - next_to_use - 1); +} + +/* + * get next usable tpd + * Note: should call atl1c_tdp_avail to make sure + * there is enough tpd to use + */ +static struct atl1c_tpd_desc *atl1c_get_tpd(struct atl1c_adapter *adapter, + enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + struct atl1c_tpd_desc *tpd_desc; + u16 next_to_use = 0; + + next_to_use = tpd_ring->next_to_use; + if (++tpd_ring->next_to_use == tpd_ring->count) + tpd_ring->next_to_use = 0; + tpd_desc = ATL1C_TPD_DESC(tpd_ring, next_to_use); + memset(tpd_desc, 0, sizeof(struct atl1c_tpd_desc)); + return tpd_desc; +} + +static struct atl1c_buffer * +atl1c_get_tx_buffer(struct atl1c_adapter *adapter, struct atl1c_tpd_desc *tpd) +{ + struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; + + return &tpd_ring->buffer_info[tpd - + (struct atl1c_tpd_desc *)tpd_ring->desc]; +} + +/* Calculate the transmit packet descript needed*/ +static u16 atl1c_cal_tpd_req(const struct sk_buff *skb) +{ + u16 tpd_req; + u16 proto_hdr_len = 0; + + tpd_req = skb_shinfo(skb)->nr_frags + 1; + + if (skb_is_gso(skb)) { + proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (proto_hdr_len < skb_headlen(skb)) + tpd_req++; + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + tpd_req++; + } + return tpd_req; +} + +static int atl1c_tso_csum(struct atl1c_adapter *adapter, + struct sk_buff *skb, + struct atl1c_tpd_desc **tpd, + enum atl1c_trans_queue type) +{ + struct pci_dev *pdev = adapter->pdev; + unsigned short offload_type; + u8 hdr_len; + u32 real_len; + + if (skb_is_gso(skb)) { + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + offload_type = skb_shinfo(skb)->gso_type; + + if (offload_type & SKB_GSO_TCPV4) { + real_len = (((unsigned char *)ip_hdr(skb) - skb->data) + + ntohs(ip_hdr(skb)->tot_len)); + + if (real_len < skb->len) + pskb_trim(skb, real_len); + + hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (unlikely(skb->len == hdr_len)) { + /* only xsum need */ + if (netif_msg_tx_queued(adapter)) + dev_warn(&pdev->dev, + "IPV4 tso with zero data??\n"); + goto check_sum; + } else { + ip_hdr(skb)->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic( + ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + (*tpd)->word1 |= 1 << TPD_IPV4_PACKET_SHIFT; + } + } + + if (offload_type & SKB_GSO_TCPV6) { + struct atl1c_tpd_ext_desc *etpd = + *(struct atl1c_tpd_ext_desc **)(tpd); + + memset(etpd, 0, sizeof(struct atl1c_tpd_ext_desc)); + *tpd = atl1c_get_tpd(adapter, type); + ipv6_hdr(skb)->payload_len = 0; + /* check payload == 0 byte ? */ + hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (unlikely(skb->len == hdr_len)) { + /* only xsum need */ + if (netif_msg_tx_queued(adapter)) + dev_warn(&pdev->dev, + "IPV6 tso with zero data??\n"); + goto check_sum; + } else + tcp_hdr(skb)->check = ~csum_ipv6_magic( + &ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + etpd->word1 |= 1 << TPD_LSO_EN_SHIFT; + etpd->word1 |= 1 << TPD_LSO_VER_SHIFT; + etpd->pkt_len = cpu_to_le32(skb->len); + (*tpd)->word1 |= 1 << TPD_LSO_VER_SHIFT; + } + + (*tpd)->word1 |= 1 << TPD_LSO_EN_SHIFT; + (*tpd)->word1 |= (skb_transport_offset(skb) & TPD_TCPHDR_OFFSET_MASK) << + TPD_TCPHDR_OFFSET_SHIFT; + (*tpd)->word1 |= (skb_shinfo(skb)->gso_size & TPD_MSS_MASK) << + TPD_MSS_SHIFT; + return 0; + } + +check_sum: + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + u8 css, cso; + cso = skb_checksum_start_offset(skb); + + if (unlikely(cso & 0x1)) { + if (netif_msg_tx_err(adapter)) + dev_err(&adapter->pdev->dev, + "payload offset should not an event number\n"); + return -1; + } else { + css = cso + skb->csum_offset; + + (*tpd)->word1 |= ((cso >> 1) & TPD_PLOADOFFSET_MASK) << + TPD_PLOADOFFSET_SHIFT; + (*tpd)->word1 |= ((css >> 1) & TPD_CCSUM_OFFSET_MASK) << + TPD_CCSUM_OFFSET_SHIFT; + (*tpd)->word1 |= 1 << TPD_CCSUM_EN_SHIFT; + } + } + return 0; +} + +static void atl1c_tx_rollback(struct atl1c_adapter *adpt, + struct atl1c_tpd_desc *first_tpd, + enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[type]; + struct atl1c_buffer *buffer_info; + struct atl1c_tpd_desc *tpd; + u16 first_index, index; + + first_index = first_tpd - (struct atl1c_tpd_desc *)tpd_ring->desc; + index = first_index; + while (index != tpd_ring->next_to_use) { + tpd = ATL1C_TPD_DESC(tpd_ring, index); + buffer_info = &tpd_ring->buffer_info[index]; + atl1c_clean_buffer(adpt->pdev, buffer_info); + memset(tpd, 0, sizeof(struct atl1c_tpd_desc)); + if (++index == tpd_ring->count) + index = 0; + } + tpd_ring->next_to_use = first_index; +} + +static int atl1c_tx_map(struct atl1c_adapter *adapter, + struct sk_buff *skb, struct atl1c_tpd_desc *tpd, + enum atl1c_trans_queue type) +{ + struct atl1c_tpd_desc *use_tpd = NULL; + struct atl1c_buffer *buffer_info = NULL; + u16 buf_len = skb_headlen(skb); + u16 map_len = 0; + u16 mapped_len = 0; + u16 hdr_len = 0; + u16 nr_frags; + u16 f; + int tso; + + nr_frags = skb_shinfo(skb)->nr_frags; + tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK; + if (tso) { + /* TSO */ + map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + use_tpd = tpd; + + buffer_info = atl1c_get_tx_buffer(adapter, use_tpd); + buffer_info->length = map_len; + buffer_info->dma = pci_map_single(adapter->pdev, + skb->data, hdr_len, PCI_DMA_TODEVICE); + if (unlikely(pci_dma_mapping_error(adapter->pdev, + buffer_info->dma))) + goto err_dma; + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); + ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, + ATL1C_PCIMAP_TODEVICE); + mapped_len += map_len; + use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); + use_tpd->buffer_len = cpu_to_le16(buffer_info->length); + } + + if (mapped_len < buf_len) { + /* mapped_len == 0, means we should use the first tpd, + which is given by caller */ + if (mapped_len == 0) + use_tpd = tpd; + else { + use_tpd = atl1c_get_tpd(adapter, type); + memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc)); + } + buffer_info = atl1c_get_tx_buffer(adapter, use_tpd); + buffer_info->length = buf_len - mapped_len; + buffer_info->dma = + pci_map_single(adapter->pdev, skb->data + mapped_len, + buffer_info->length, PCI_DMA_TODEVICE); + if (unlikely(pci_dma_mapping_error(adapter->pdev, + buffer_info->dma))) + goto err_dma; + + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); + ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, + ATL1C_PCIMAP_TODEVICE); + use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); + use_tpd->buffer_len = cpu_to_le16(buffer_info->length); + } + + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + + use_tpd = atl1c_get_tpd(adapter, type); + memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc)); + + buffer_info = atl1c_get_tx_buffer(adapter, use_tpd); + buffer_info->length = skb_frag_size(frag); + buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev, + frag, 0, + buffer_info->length, + DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) + goto err_dma; + + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); + ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE, + ATL1C_PCIMAP_TODEVICE); + use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); + use_tpd->buffer_len = cpu_to_le16(buffer_info->length); + } + + /* The last tpd */ + use_tpd->word1 |= 1 << TPD_EOP_SHIFT; + /* The last buffer info contain the skb address, + so it will be free after unmap */ + buffer_info->skb = skb; + + return 0; + +err_dma: + buffer_info->dma = 0; + buffer_info->length = 0; + return -1; +} + +static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb, + struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + u16 reg; + + reg = type == atl1c_trans_high ? REG_TPD_PRI1_PIDX : REG_TPD_PRI0_PIDX; + AT_WRITE_REGW(&adapter->hw, reg, tpd_ring->next_to_use); +} + +static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + unsigned long flags; + u16 tpd_req = 1; + struct atl1c_tpd_desc *tpd; + enum atl1c_trans_queue type = atl1c_trans_normal; + + if (test_bit(__AT_DOWN, &adapter->flags)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + tpd_req = atl1c_cal_tpd_req(skb); + if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) { + if (netif_msg_pktdata(adapter)) + dev_info(&adapter->pdev->dev, "tx locked\n"); + return NETDEV_TX_LOCKED; + } + + if (atl1c_tpd_avail(adapter, type) < tpd_req) { + /* no enough descriptor, just stop queue */ + netif_stop_queue(netdev); + spin_unlock_irqrestore(&adapter->tx_lock, flags); + return NETDEV_TX_BUSY; + } + + tpd = atl1c_get_tpd(adapter, type); + + /* do TSO and check sum */ + if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) { + spin_unlock_irqrestore(&adapter->tx_lock, flags); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (unlikely(skb_vlan_tag_present(skb))) { + u16 vlan = skb_vlan_tag_get(skb); + __le16 tag; + + vlan = cpu_to_le16(vlan); + AT_VLAN_TO_TAG(vlan, tag); + tpd->word1 |= 1 << TPD_INS_VTAG_SHIFT; + tpd->vlan_tag = tag; + } + + if (skb_network_offset(skb) != ETH_HLEN) + tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */ + + if (atl1c_tx_map(adapter, skb, tpd, type) < 0) { + netif_info(adapter, tx_done, adapter->netdev, + "tx-skb droppted due to dma error\n"); + /* roll back tpd/buffer */ + atl1c_tx_rollback(adapter, tpd, type); + spin_unlock_irqrestore(&adapter->tx_lock, flags); + dev_kfree_skb_any(skb); + } else { + atl1c_tx_queue(adapter, skb, tpd, type); + spin_unlock_irqrestore(&adapter->tx_lock, flags); + } + + return NETDEV_TX_OK; +} + +static void atl1c_free_irq(struct atl1c_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); + + if (adapter->have_msi) + pci_disable_msi(adapter->pdev); +} + +static int atl1c_request_irq(struct atl1c_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + int flags = 0; + int err = 0; + + adapter->have_msi = true; + err = pci_enable_msi(adapter->pdev); + if (err) { + if (netif_msg_ifup(adapter)) + dev_err(&pdev->dev, + "Unable to allocate MSI interrupt Error: %d\n", + err); + adapter->have_msi = false; + } + + if (!adapter->have_msi) + flags |= IRQF_SHARED; + err = request_irq(adapter->pdev->irq, atl1c_intr, flags, + netdev->name, netdev); + if (err) { + if (netif_msg_ifup(adapter)) + dev_err(&pdev->dev, + "Unable to allocate interrupt Error: %d\n", + err); + if (adapter->have_msi) + pci_disable_msi(adapter->pdev); + return err; + } + if (netif_msg_ifup(adapter)) + dev_dbg(&pdev->dev, "atl1c_request_irq OK\n"); + return err; +} + + +static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter) +{ + /* release tx-pending skbs and reset tx/rx ring index */ + atl1c_clean_tx_ring(adapter, atl1c_trans_normal); + atl1c_clean_tx_ring(adapter, atl1c_trans_high); + atl1c_clean_rx_ring(adapter); +} + +static int atl1c_up(struct atl1c_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + netif_carrier_off(netdev); + + err = atl1c_configure(adapter); + if (unlikely(err)) + goto err_up; + + err = atl1c_request_irq(adapter); + if (unlikely(err)) + goto err_up; + + atl1c_check_link_status(adapter); + clear_bit(__AT_DOWN, &adapter->flags); + napi_enable(&adapter->napi); + atl1c_irq_enable(adapter); + netif_start_queue(netdev); + return err; + +err_up: + atl1c_clean_rx_ring(adapter); + return err; +} + +static void atl1c_down(struct atl1c_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + atl1c_del_timer(adapter); + adapter->work_event = 0; /* clear all event */ + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer */ + set_bit(__AT_DOWN, &adapter->flags); + netif_carrier_off(netdev); + napi_disable(&adapter->napi); + atl1c_irq_disable(adapter); + atl1c_free_irq(adapter); + /* disable ASPM if device inactive */ + atl1c_disable_l0s_l1(&adapter->hw); + /* reset MAC to disable all RX/TX */ + atl1c_reset_mac(&adapter->hw); + msleep(1); + + adapter->link_speed = SPEED_0; + adapter->link_duplex = -1; + atl1c_reset_dma_ring(adapter); +} + +/** + * atl1c_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int atl1c_open(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + int err; + + /* disallow open during test */ + if (test_bit(__AT_TESTING, &adapter->flags)) + return -EBUSY; + + /* allocate rx/tx dma buffer & descriptors */ + err = atl1c_setup_ring_resources(adapter); + if (unlikely(err)) + return err; + + err = atl1c_up(adapter); + if (unlikely(err)) + goto err_up; + + return 0; + +err_up: + atl1c_free_irq(adapter); + atl1c_free_ring_resources(adapter); + atl1c_reset_mac(&adapter->hw); + return err; +} + +/** + * atl1c_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int atl1c_close(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); + set_bit(__AT_DOWN, &adapter->flags); + cancel_work_sync(&adapter->common_task); + atl1c_down(adapter); + atl1c_free_ring_resources(adapter); + return 0; +} + +static int atl1c_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + u32 wufc = adapter->wol; + + atl1c_disable_l0s_l1(hw); + if (netif_running(netdev)) { + WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); + atl1c_down(adapter); + } + netif_device_detach(netdev); + + if (wufc) + if (atl1c_phy_to_ps_link(hw) != 0) + dev_dbg(&pdev->dev, "phy power saving failed"); + + atl1c_power_saving(hw, wufc); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int atl1c_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); + atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE); + + atl1c_phy_reset(&adapter->hw); + atl1c_reset_mac(&adapter->hw); + atl1c_phy_init(&adapter->hw); + +#if 0 + AT_READ_REG(&adapter->hw, REG_PM_CTRLSTAT, &pm_data); + pm_data &= ~PM_CTRLSTAT_PME_EN; + AT_WRITE_REG(&adapter->hw, REG_PM_CTRLSTAT, pm_data); +#endif + + netif_device_attach(netdev); + if (netif_running(netdev)) + atl1c_up(adapter); + + return 0; +} +#endif + +static void atl1c_shutdown(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + atl1c_suspend(&pdev->dev); + pci_wake_from_d3(pdev, adapter->wol); + pci_set_power_state(pdev, PCI_D3hot); +} + +static const struct net_device_ops atl1c_netdev_ops = { + .ndo_open = atl1c_open, + .ndo_stop = atl1c_close, + .ndo_validate_addr = eth_validate_addr, + .ndo_start_xmit = atl1c_xmit_frame, + .ndo_set_mac_address = atl1c_set_mac_addr, + .ndo_set_rx_mode = atl1c_set_multi, + .ndo_change_mtu = atl1c_change_mtu, + .ndo_fix_features = atl1c_fix_features, + .ndo_set_features = atl1c_set_features, + .ndo_do_ioctl = atl1c_ioctl, + .ndo_tx_timeout = atl1c_tx_timeout, + .ndo_get_stats = atl1c_get_stats, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = atl1c_netpoll, +#endif +}; + +static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev) +{ + SET_NETDEV_DEV(netdev, &pdev->dev); + pci_set_drvdata(pdev, netdev); + + netdev->netdev_ops = &atl1c_netdev_ops; + netdev->watchdog_timeo = AT_TX_WATCHDOG; + atl1c_set_ethtool_ops(netdev); + + /* TODO: add when ready */ + netdev->hw_features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_TSO | + NETIF_F_TSO6; + netdev->features = netdev->hw_features | + NETIF_F_HW_VLAN_CTAG_TX; + return 0; +} + +/** + * atl1c_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in atl1c_pci_tbl + * + * Returns 0 on success, negative on failure + * + * atl1c_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + */ +static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct atl1c_adapter *adapter; + static int cards_found; + + int err = 0; + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + return err; + } + + /* + * The atl1c chip can DMA to 64-bit addresses, but it uses a single + * shared register for the high 32 bits, so only a single, aligned, + * 4 GB physical address range can be used at a time. + * + * Supporting 64-bit DMA on this hardware is more trouble than it's + * worth. It is far easier to limit to 32-bit DMA than update + * various kernel subsystems to support the mechanics required by a + * fixed-high-32-bit system. + */ + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || + (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { + dev_err(&pdev->dev, "No usable DMA configuration,aborting\n"); + goto err_dma; + } + + err = pci_request_regions(pdev, atl1c_driver_name); + if (err) { + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + goto err_pci_reg; + } + + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct atl1c_adapter)); + if (netdev == NULL) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + err = atl1c_init_netdev(netdev, pdev); + if (err) { + dev_err(&pdev->dev, "init netdevice failed\n"); + goto err_init_netdev; + } + adapter = netdev_priv(netdev); + adapter->bd_number = cards_found; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->hw.adapter = adapter; + adapter->msg_enable = netif_msg_init(-1, atl1c_default_msg); + adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + if (!adapter->hw.hw_addr) { + err = -EIO; + dev_err(&pdev->dev, "cannot map device registers\n"); + goto err_ioremap; + } + + /* init mii data */ + adapter->mii.dev = netdev; + adapter->mii.mdio_read = atl1c_mdio_read; + adapter->mii.mdio_write = atl1c_mdio_write; + adapter->mii.phy_id_mask = 0x1f; + adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK; + netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64); + setup_timer(&adapter->phy_config_timer, atl1c_phy_config, + (unsigned long)adapter); + /* setup the private structure */ + err = atl1c_sw_init(adapter); + if (err) { + dev_err(&pdev->dev, "net device private data init failed\n"); + goto err_sw_init; + } + atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE); + + /* Init GPHY as early as possible due to power saving issue */ + atl1c_phy_reset(&adapter->hw); + + err = atl1c_reset_mac(&adapter->hw); + if (err) { + err = -EIO; + goto err_reset; + } + + /* reset the controller to + * put the device in a known good starting state */ + err = atl1c_phy_init(&adapter->hw); + if (err) { + err = -EIO; + goto err_reset; + } + if (atl1c_read_mac_addr(&adapter->hw)) { + /* got a random MAC address, set NET_ADDR_RANDOM to netdev */ + netdev->addr_assign_type = NET_ADDR_RANDOM; + } + memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + if (netif_msg_probe(adapter)) + dev_dbg(&pdev->dev, "mac address : %pM\n", + adapter->hw.mac_addr); + + atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr); + INIT_WORK(&adapter->common_task, atl1c_common_task); + adapter->work_event = 0; + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "register netdevice failed\n"); + goto err_register; + } + + if (netif_msg_probe(adapter)) + dev_info(&pdev->dev, "version %s\n", ATL1C_DRV_VERSION); + cards_found++; + return 0; + +err_reset: +err_register: +err_sw_init: + iounmap(adapter->hw.hw_addr); +err_init_netdev: +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * atl1c_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * atl1c_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void atl1c_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + unregister_netdev(netdev); + /* restore permanent address */ + atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.perm_mac_addr); + atl1c_phy_disable(&adapter->hw); + + iounmap(adapter->hw.hw_addr); + + pci_release_regions(pdev); + pci_disable_device(pdev); + free_netdev(netdev); +} + +/** + * atl1c_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + atl1c_down(adapter); + + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * atl1c_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t atl1c_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + if (netif_msg_hw(adapter)) + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + atl1c_reset_mac(&adapter->hw); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * atl1c_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the atl1c_resume routine. + */ +static void atl1c_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (atl1c_up(adapter)) { + if (netif_msg_hw(adapter)) + dev_err(&pdev->dev, + "Cannot bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +static const struct pci_error_handlers atl1c_err_handler = { + .error_detected = atl1c_io_error_detected, + .slot_reset = atl1c_io_slot_reset, + .resume = atl1c_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(atl1c_pm_ops, atl1c_suspend, atl1c_resume); + +static struct pci_driver atl1c_driver = { + .name = atl1c_driver_name, + .id_table = atl1c_pci_tbl, + .probe = atl1c_probe, + .remove = atl1c_remove, + .shutdown = atl1c_shutdown, + .err_handler = &atl1c_err_handler, + .driver.pm = &atl1c_pm_ops, +}; + +module_pci_driver(atl1c_driver); diff --git a/drivers/net/ethernet/atheros/atl1e/Makefile b/drivers/net/ethernet/atheros/atl1e/Makefile new file mode 100644 index 000000000..bc11be824 --- /dev/null +++ b/drivers/net/ethernet/atheros/atl1e/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_ATL1E) += atl1e.o +atl1e-objs += atl1e_main.o atl1e_hw.o atl1e_ethtool.o atl1e_param.o diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h new file mode 100644 index 000000000..0212dac7e --- /dev/null +++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h @@ -0,0 +1,507 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * Copyright(c) 2007 xiong huang + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _ATL1E_H_ +#define _ATL1E_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "atl1e_hw.h" + +#define PCI_REG_COMMAND 0x04 /* PCI Command Register */ +#define CMD_IO_SPACE 0x0001 +#define CMD_MEMORY_SPACE 0x0002 +#define CMD_BUS_MASTER 0x0004 + +#define BAR_0 0 +#define BAR_1 1 +#define BAR_5 5 + +/* Wake Up Filter Control */ +#define AT_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define AT_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define AT_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define AT_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */ +#define AT_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +#define SPEED_0 0xffff +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* Error Codes */ +#define AT_ERR_EEPROM 1 +#define AT_ERR_PHY 2 +#define AT_ERR_CONFIG 3 +#define AT_ERR_PARAM 4 +#define AT_ERR_MAC_TYPE 5 +#define AT_ERR_PHY_TYPE 6 +#define AT_ERR_PHY_SPEED 7 +#define AT_ERR_PHY_RES 8 +#define AT_ERR_TIMEOUT 9 + +#define MAX_JUMBO_FRAME_SIZE 0x2000 + +#define AT_VLAN_TAG_TO_TPD_TAG(_vlan, _tpd) \ + _tpd = (((_vlan) << (4)) | (((_vlan) >> 13) & 7) |\ + (((_vlan) >> 9) & 8)) + +#define AT_TPD_TAG_TO_VLAN_TAG(_tpd, _vlan) \ + _vlan = (((_tpd) >> 8) | (((_tpd) & 0x77) << 9) |\ + (((_tdp) & 0x88) << 5)) + +#define AT_MAX_RECEIVE_QUEUE 4 +#define AT_PAGE_NUM_PER_QUEUE 2 + +#define AT_DMA_HI_ADDR_MASK 0xffffffff00000000ULL +#define AT_DMA_LO_ADDR_MASK 0x00000000ffffffffULL + +#define AT_TX_WATCHDOG (5 * HZ) +#define AT_MAX_INT_WORK 10 +#define AT_TWSI_EEPROM_TIMEOUT 100 +#define AT_HW_MAX_IDLE_DELAY 10 +#define AT_SUSPEND_LINK_TIMEOUT 28 + +#define AT_REGS_LEN 75 +#define AT_EEPROM_LEN 512 +#define AT_ADV_MASK (ADVERTISE_10_HALF |\ + ADVERTISE_10_FULL |\ + ADVERTISE_100_HALF |\ + ADVERTISE_100_FULL |\ + ADVERTISE_1000_FULL) + +/* tpd word 2 */ +#define TPD_BUFLEN_MASK 0x3FFF +#define TPD_BUFLEN_SHIFT 0 +#define TPD_DMAINT_MASK 0x0001 +#define TPD_DMAINT_SHIFT 14 +#define TPD_PKTNT_MASK 0x0001 +#define TPD_PKTINT_SHIFT 15 +#define TPD_VLANTAG_MASK 0xFFFF +#define TPD_VLAN_SHIFT 16 + +/* tpd word 3 bits 0:4 */ +#define TPD_EOP_MASK 0x0001 +#define TPD_EOP_SHIFT 0 +#define TPD_IP_VERSION_MASK 0x0001 +#define TPD_IP_VERSION_SHIFT 1 /* 0 : IPV4, 1 : IPV6 */ +#define TPD_INS_VL_TAG_MASK 0x0001 +#define TPD_INS_VL_TAG_SHIFT 2 +#define TPD_CC_SEGMENT_EN_MASK 0x0001 +#define TPD_CC_SEGMENT_EN_SHIFT 3 +#define TPD_SEGMENT_EN_MASK 0x0001 +#define TPD_SEGMENT_EN_SHIFT 4 + +/* tdp word 3 bits 5:7 if ip version is 0 */ +#define TPD_IP_CSUM_MASK 0x0001 +#define TPD_IP_CSUM_SHIFT 5 +#define TPD_TCP_CSUM_MASK 0x0001 +#define TPD_TCP_CSUM_SHIFT 6 +#define TPD_UDP_CSUM_MASK 0x0001 +#define TPD_UDP_CSUM_SHIFT 7 + +/* tdp word 3 bits 5:7 if ip version is 1 */ +#define TPD_V6_IPHLLO_MASK 0x0007 +#define TPD_V6_IPHLLO_SHIFT 7 + +/* tpd word 3 bits 8:9 bit */ +#define TPD_VL_TAGGED_MASK 0x0001 +#define TPD_VL_TAGGED_SHIFT 8 +#define TPD_ETHTYPE_MASK 0x0001 +#define TPD_ETHTYPE_SHIFT 9 + +/* tdp word 3 bits 10:13 if ip version is 0 */ +#define TDP_V4_IPHL_MASK 0x000F +#define TPD_V4_IPHL_SHIFT 10 + +/* tdp word 3 bits 10:13 if ip version is 1 */ +#define TPD_V6_IPHLHI_MASK 0x000F +#define TPD_V6_IPHLHI_SHIFT 10 + +/* tpd word 3 bit 14:31 if segment enabled */ +#define TPD_TCPHDRLEN_MASK 0x000F +#define TPD_TCPHDRLEN_SHIFT 14 +#define TPD_HDRFLAG_MASK 0x0001 +#define TPD_HDRFLAG_SHIFT 18 +#define TPD_MSS_MASK 0x1FFF +#define TPD_MSS_SHIFT 19 + +/* tdp word 3 bit 16:31 if custom csum enabled */ +#define TPD_PLOADOFFSET_MASK 0x00FF +#define TPD_PLOADOFFSET_SHIFT 16 +#define TPD_CCSUMOFFSET_MASK 0x00FF +#define TPD_CCSUMOFFSET_SHIFT 24 + +struct atl1e_tpd_desc { + __le64 buffer_addr; + __le32 word2; + __le32 word3; +}; + +/* how about 0x2000 */ +#define MAX_TX_BUF_LEN 0x2000 +#define MAX_TX_BUF_SHIFT 13 +#define MAX_TSO_SEG_SIZE 0x3c00 + +/* rrs word 1 bit 0:31 */ +#define RRS_RX_CSUM_MASK 0xFFFF +#define RRS_RX_CSUM_SHIFT 0 +#define RRS_PKT_SIZE_MASK 0x3FFF +#define RRS_PKT_SIZE_SHIFT 16 +#define RRS_CPU_NUM_MASK 0x0003 +#define RRS_CPU_NUM_SHIFT 30 + +#define RRS_IS_RSS_IPV4 0x0001 +#define RRS_IS_RSS_IPV4_TCP 0x0002 +#define RRS_IS_RSS_IPV6 0x0004 +#define RRS_IS_RSS_IPV6_TCP 0x0008 +#define RRS_IS_IPV6 0x0010 +#define RRS_IS_IP_FRAG 0x0020 +#define RRS_IS_IP_DF 0x0040 +#define RRS_IS_802_3 0x0080 +#define RRS_IS_VLAN_TAG 0x0100 +#define RRS_IS_ERR_FRAME 0x0200 +#define RRS_IS_IPV4 0x0400 +#define RRS_IS_UDP 0x0800 +#define RRS_IS_TCP 0x1000 +#define RRS_IS_BCAST 0x2000 +#define RRS_IS_MCAST 0x4000 +#define RRS_IS_PAUSE 0x8000 + +#define RRS_ERR_BAD_CRC 0x0001 +#define RRS_ERR_CODE 0x0002 +#define RRS_ERR_DRIBBLE 0x0004 +#define RRS_ERR_RUNT 0x0008 +#define RRS_ERR_RX_OVERFLOW 0x0010 +#define RRS_ERR_TRUNC 0x0020 +#define RRS_ERR_IP_CSUM 0x0040 +#define RRS_ERR_L4_CSUM 0x0080 +#define RRS_ERR_LENGTH 0x0100 +#define RRS_ERR_DES_ADDR 0x0200 + +struct atl1e_recv_ret_status { + u16 seq_num; + u16 hash_lo; + __le32 word1; + u16 pkt_flag; + u16 err_flag; + u16 hash_hi; + u16 vtag; +}; + +enum atl1e_dma_req_block { + atl1e_dma_req_128 = 0, + atl1e_dma_req_256 = 1, + atl1e_dma_req_512 = 2, + atl1e_dma_req_1024 = 3, + atl1e_dma_req_2048 = 4, + atl1e_dma_req_4096 = 5 +}; + +enum atl1e_rrs_type { + atl1e_rrs_disable = 0, + atl1e_rrs_ipv4 = 1, + atl1e_rrs_ipv4_tcp = 2, + atl1e_rrs_ipv6 = 4, + atl1e_rrs_ipv6_tcp = 8 +}; + +enum atl1e_nic_type { + athr_l1e = 0, + athr_l2e_revA = 1, + athr_l2e_revB = 2 +}; + +struct atl1e_hw_stats { + /* rx */ + unsigned long rx_ok; /* The number of good packet received. */ + unsigned long rx_bcast; /* The number of good broadcast packet received. */ + unsigned long rx_mcast; /* The number of good multicast packet received. */ + unsigned long rx_pause; /* The number of Pause packet received. */ + unsigned long rx_ctrl; /* The number of Control packet received other than Pause frame. */ + unsigned long rx_fcs_err; /* The number of packets with bad FCS. */ + unsigned long rx_len_err; /* The number of packets with mismatch of length field and actual size. */ + unsigned long rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */ + unsigned long rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */ + unsigned long rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */ + unsigned long rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */ + unsigned long rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */ + unsigned long rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */ + unsigned long rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */ + unsigned long rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */ + unsigned long rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */ + unsigned long rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */ + unsigned long rx_sz_ov; /* The number of good and bad packets received that are more than MTU size truncated by Selene. */ + unsigned long rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */ + unsigned long rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */ + unsigned long rx_align_err; /* Alignment Error */ + unsigned long rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */ + unsigned long rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */ + unsigned long rx_err_addr; /* The number of packets dropped due to address filtering. */ + + /* tx */ + unsigned long tx_ok; /* The number of good packet transmitted. */ + unsigned long tx_bcast; /* The number of good broadcast packet transmitted. */ + unsigned long tx_mcast; /* The number of good multicast packet transmitted. */ + unsigned long tx_pause; /* The number of Pause packet transmitted. */ + unsigned long tx_exc_defer; /* The number of packets transmitted with excessive deferral. */ + unsigned long tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */ + unsigned long tx_defer; /* The number of packets transmitted that is deferred. */ + unsigned long tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */ + unsigned long tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */ + unsigned long tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */ + unsigned long tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */ + unsigned long tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */ + unsigned long tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */ + unsigned long tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */ + unsigned long tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */ + unsigned long tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */ + unsigned long tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */ + unsigned long tx_late_col; /* The number of packets transmitted with late collisions. */ + unsigned long tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */ + unsigned long tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */ + unsigned long tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */ + unsigned long tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */ + unsigned long tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */ + unsigned long tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */ + unsigned long tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */ +}; + +struct atl1e_hw { + u8 __iomem *hw_addr; /* inner register address */ + resource_size_t mem_rang; + struct atl1e_adapter *adapter; + enum atl1e_nic_type nic_type; + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + u16 pci_cmd_word; + u8 mac_addr[ETH_ALEN]; + u8 perm_mac_addr[ETH_ALEN]; + u8 preamble_len; + u16 max_frame_size; + u16 rx_jumbo_th; + u16 tx_jumbo_th; + + u16 media_type; +#define MEDIA_TYPE_AUTO_SENSOR 0 +#define MEDIA_TYPE_100M_FULL 1 +#define MEDIA_TYPE_100M_HALF 2 +#define MEDIA_TYPE_10M_FULL 3 +#define MEDIA_TYPE_10M_HALF 4 + + u16 autoneg_advertised; +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + u16 imt; /* Interrupt Moderator timer ( 2us resolution) */ + u16 ict; /* Interrupt Clear timer (2us resolution) */ + u32 smb_timer; + u16 rrd_thresh; /* Threshold of number of RRD produced to trigger + interrupt request */ + u16 tpd_thresh; + u16 rx_count_down; /* 2us resolution */ + u16 tx_count_down; + + u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */ + enum atl1e_rrs_type rrs_type; + u32 base_cpu; + u32 indirect_tab; + + enum atl1e_dma_req_block dmar_block; + enum atl1e_dma_req_block dmaw_block; + u8 dmaw_dly_cnt; + u8 dmar_dly_cnt; + + bool phy_configured; + bool re_autoneg; + bool emi_ca; +}; + +/* + * wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct atl1e_tx_buffer { + struct sk_buff *skb; + u16 flags; +#define ATL1E_TX_PCIMAP_SINGLE 0x0001 +#define ATL1E_TX_PCIMAP_PAGE 0x0002 +#define ATL1E_TX_PCIMAP_TYPE_MASK 0x0003 + u16 length; + dma_addr_t dma; +}; + +#define ATL1E_SET_PCIMAP_TYPE(tx_buff, type) do { \ + ((tx_buff)->flags) &= ~ATL1E_TX_PCIMAP_TYPE_MASK; \ + ((tx_buff)->flags) |= (type); \ + } while (0) + +struct atl1e_rx_page { + dma_addr_t dma; /* receive rage DMA address */ + u8 *addr; /* receive rage virtual address */ + dma_addr_t write_offset_dma; /* the DMA address which contain the + receive data offset in the page */ + u32 *write_offset_addr; /* the virtaul address which contain + the receive data offset in the page */ + u32 read_offset; /* the offset where we have read */ +}; + +struct atl1e_rx_page_desc { + struct atl1e_rx_page rx_page[AT_PAGE_NUM_PER_QUEUE]; + u8 rx_using; + u16 rx_nxseq; +}; + +/* transmit packet descriptor (tpd) ring */ +struct atl1e_tx_ring { + struct atl1e_tpd_desc *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 count; /* the count of transmit rings */ + rwlock_t tx_lock; + u16 next_to_use; + atomic_t next_to_clean; + struct atl1e_tx_buffer *tx_buffer; + dma_addr_t cmb_dma; + u32 *cmb; +}; + +/* receive packet descriptor ring */ +struct atl1e_rx_ring { + void *desc; + dma_addr_t dma; + int size; + u32 page_size; /* bytes length of rxf page */ + u32 real_page_size; /* real_page_size = page_size + jumbo + aliagn */ + struct atl1e_rx_page_desc rx_page_desc[AT_MAX_RECEIVE_QUEUE]; +}; + +/* board specific private data structure */ +struct atl1e_adapter { + struct net_device *netdev; + struct pci_dev *pdev; + struct napi_struct napi; + struct mii_if_info mii; /* MII interface info */ + struct atl1e_hw hw; + struct atl1e_hw_stats hw_stats; + + u32 wol; + u16 link_speed; + u16 link_duplex; + + spinlock_t mdio_lock; + spinlock_t tx_lock; + atomic_t irq_sem; + + struct work_struct reset_task; + struct work_struct link_chg_task; + struct timer_list watchdog_timer; + struct timer_list phy_config_timer; + + /* All Descriptor memory */ + dma_addr_t ring_dma; + void *ring_vir_addr; + u32 ring_size; + + struct atl1e_tx_ring tx_ring; + struct atl1e_rx_ring rx_ring; + int num_rx_queues; + unsigned long flags; +#define __AT_TESTING 0x0001 +#define __AT_RESETTING 0x0002 +#define __AT_DOWN 0x0003 + + u32 bd_number; /* board number;*/ + u32 pci_state[16]; + u32 *config_space; +}; + +#define AT_WRITE_REG(a, reg, value) ( \ + writel((value), ((a)->hw_addr + reg))) + +#define AT_WRITE_FLUSH(a) (\ + readl((a)->hw_addr)) + +#define AT_READ_REG(a, reg) ( \ + readl((a)->hw_addr + reg)) + +#define AT_WRITE_REGB(a, reg, value) (\ + writeb((value), ((a)->hw_addr + reg))) + +#define AT_READ_REGB(a, reg) (\ + readb((a)->hw_addr + reg)) + +#define AT_WRITE_REGW(a, reg, value) (\ + writew((value), ((a)->hw_addr + reg))) + +#define AT_READ_REGW(a, reg) (\ + readw((a)->hw_addr + reg)) + +#define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), (((a)->hw_addr + reg) + ((offset) << 2)))) + +#define AT_READ_REG_ARRAY(a, reg, offset) ( \ + readl(((a)->hw_addr + reg) + ((offset) << 2))) + +extern char atl1e_driver_name[]; +extern char atl1e_driver_version[]; + +void atl1e_check_options(struct atl1e_adapter *adapter); +int atl1e_up(struct atl1e_adapter *adapter); +void atl1e_down(struct atl1e_adapter *adapter); +void atl1e_reinit_locked(struct atl1e_adapter *adapter); +s32 atl1e_reset_hw(struct atl1e_hw *hw); +void atl1e_set_ethtool_ops(struct net_device *netdev); +#endif /* _ATL1_E_H_ */ diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c new file mode 100644 index 000000000..1be072f4a --- /dev/null +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c @@ -0,0 +1,392 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#include +#include +#include + +#include "atl1e.h" + +static int atl1e_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + if (hw->nic_type == athr_l1e) + ecmd->supported |= SUPPORTED_1000baseT_Full; + + ecmd->advertising = ADVERTISED_TP; + + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->advertising |= hw->autoneg_advertised; + + ecmd->port = PORT_TP; + ecmd->phy_address = 0; + ecmd->transceiver = XCVR_INTERNAL; + + if (adapter->link_speed != SPEED_0) { + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + if (adapter->link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } + + ecmd->autoneg = AUTONEG_ENABLE; + return 0; +} + +static int atl1e_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + + while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) + msleep(1); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + u16 adv4, adv9; + + if ((ecmd->advertising&ADVERTISE_1000_FULL)) { + if (hw->nic_type == athr_l1e) { + hw->autoneg_advertised = + ecmd->advertising & AT_ADV_MASK; + } else { + clear_bit(__AT_RESETTING, &adapter->flags); + return -EINVAL; + } + } else if (ecmd->advertising&ADVERTISE_1000_HALF) { + clear_bit(__AT_RESETTING, &adapter->flags); + return -EINVAL; + } else { + hw->autoneg_advertised = + ecmd->advertising & AT_ADV_MASK; + } + ecmd->advertising = hw->autoneg_advertised | + ADVERTISED_TP | ADVERTISED_Autoneg; + + adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL; + adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK; + if (hw->autoneg_advertised & ADVERTISE_10_HALF) + adv4 |= ADVERTISE_10HALF; + if (hw->autoneg_advertised & ADVERTISE_10_FULL) + adv4 |= ADVERTISE_10FULL; + if (hw->autoneg_advertised & ADVERTISE_100_HALF) + adv4 |= ADVERTISE_100HALF; + if (hw->autoneg_advertised & ADVERTISE_100_FULL) + adv4 |= ADVERTISE_100FULL; + if (hw->autoneg_advertised & ADVERTISE_1000_FULL) + adv9 |= ADVERTISE_1000FULL; + + if (adv4 != hw->mii_autoneg_adv_reg || + adv9 != hw->mii_1000t_ctrl_reg) { + hw->mii_autoneg_adv_reg = adv4; + hw->mii_1000t_ctrl_reg = adv9; + hw->re_autoneg = true; + } + + } else { + clear_bit(__AT_RESETTING, &adapter->flags); + return -EINVAL; + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + atl1e_down(adapter); + atl1e_up(adapter); + } else + atl1e_reset_hw(&adapter->hw); + + clear_bit(__AT_RESETTING, &adapter->flags); + return 0; +} + +static u32 atl1e_get_msglevel(struct net_device *netdev) +{ +#ifdef DBG + return 1; +#else + return 0; +#endif +} + +static int atl1e_get_regs_len(struct net_device *netdev) +{ + return AT_REGS_LEN * sizeof(u32); +} + +static void atl1e_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, AT_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + regs_buff[0] = AT_READ_REG(hw, REG_VPD_CAP); + regs_buff[1] = AT_READ_REG(hw, REG_SPI_FLASH_CTRL); + regs_buff[2] = AT_READ_REG(hw, REG_SPI_FLASH_CONFIG); + regs_buff[3] = AT_READ_REG(hw, REG_TWSI_CTRL); + regs_buff[4] = AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL); + regs_buff[5] = AT_READ_REG(hw, REG_MASTER_CTRL); + regs_buff[6] = AT_READ_REG(hw, REG_MANUAL_TIMER_INIT); + regs_buff[7] = AT_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT); + regs_buff[8] = AT_READ_REG(hw, REG_GPHY_CTRL); + regs_buff[9] = AT_READ_REG(hw, REG_CMBDISDMA_TIMER); + regs_buff[10] = AT_READ_REG(hw, REG_IDLE_STATUS); + regs_buff[11] = AT_READ_REG(hw, REG_MDIO_CTRL); + regs_buff[12] = AT_READ_REG(hw, REG_SERDES_LOCK); + regs_buff[13] = AT_READ_REG(hw, REG_MAC_CTRL); + regs_buff[14] = AT_READ_REG(hw, REG_MAC_IPG_IFG); + regs_buff[15] = AT_READ_REG(hw, REG_MAC_STA_ADDR); + regs_buff[16] = AT_READ_REG(hw, REG_MAC_STA_ADDR+4); + regs_buff[17] = AT_READ_REG(hw, REG_RX_HASH_TABLE); + regs_buff[18] = AT_READ_REG(hw, REG_RX_HASH_TABLE+4); + regs_buff[19] = AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL); + regs_buff[20] = AT_READ_REG(hw, REG_MTU); + regs_buff[21] = AT_READ_REG(hw, REG_WOL_CTRL); + regs_buff[22] = AT_READ_REG(hw, REG_SRAM_TRD_ADDR); + regs_buff[23] = AT_READ_REG(hw, REG_SRAM_TRD_LEN); + regs_buff[24] = AT_READ_REG(hw, REG_SRAM_RXF_ADDR); + regs_buff[25] = AT_READ_REG(hw, REG_SRAM_RXF_LEN); + regs_buff[26] = AT_READ_REG(hw, REG_SRAM_TXF_ADDR); + regs_buff[27] = AT_READ_REG(hw, REG_SRAM_TXF_LEN); + regs_buff[28] = AT_READ_REG(hw, REG_SRAM_TCPH_ADDR); + regs_buff[29] = AT_READ_REG(hw, REG_SRAM_PKTH_ADDR); + + atl1e_read_phy_reg(hw, MII_BMCR, &phy_data); + regs_buff[73] = (u32)phy_data; + atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); + regs_buff[74] = (u32)phy_data; +} + +static int atl1e_get_eeprom_len(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + if (!atl1e_check_eeprom_exist(&adapter->hw)) + return AT_EEPROM_LEN; + else + return 0; +} + +static int atl1e_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + u32 *eeprom_buff; + int first_dword, last_dword; + int ret_val = 0; + int i; + + if (eeprom->len == 0) + return -EINVAL; + + if (atl1e_check_eeprom_exist(hw)) /* not exist */ + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_dword = eeprom->offset >> 2; + last_dword = (eeprom->offset + eeprom->len - 1) >> 2; + + eeprom_buff = kmalloc(sizeof(u32) * + (last_dword - first_dword + 1), GFP_KERNEL); + if (eeprom_buff == NULL) + return -ENOMEM; + + for (i = first_dword; i < last_dword; i++) { + if (!atl1e_read_eeprom(hw, i * 4, &(eeprom_buff[i-first_dword]))) { + kfree(eeprom_buff); + return -EIO; + } + } + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int atl1e_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + u32 *eeprom_buff; + u32 *ptr; + int first_dword, last_dword; + int ret_val = 0; + int i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EINVAL; + + first_dword = eeprom->offset >> 2; + last_dword = (eeprom->offset + eeprom->len - 1) >> 2; + eeprom_buff = kmalloc(AT_EEPROM_LEN, GFP_KERNEL); + if (eeprom_buff == NULL) + return -ENOMEM; + + ptr = eeprom_buff; + + if (eeprom->offset & 3) { + /* need read/modify/write of first changed EEPROM word */ + /* only the second byte of the word is being modified */ + if (!atl1e_read_eeprom(hw, first_dword * 4, &(eeprom_buff[0]))) { + ret_val = -EIO; + goto out; + } + ptr++; + } + if (((eeprom->offset + eeprom->len) & 3)) { + /* need read/modify/write of last changed EEPROM word */ + /* only the first byte of the word is being modified */ + + if (!atl1e_read_eeprom(hw, last_dword * 4, + &(eeprom_buff[last_dword - first_dword]))) { + ret_val = -EIO; + goto out; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_dword - first_dword + 1; i++) { + if (!atl1e_write_eeprom(hw, ((first_dword + i) * 4), + eeprom_buff[i])) { + ret_val = -EIO; + goto out; + } + } +out: + kfree(eeprom_buff); + return ret_val; +} + +static void atl1e_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, atl1e_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = 0; + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = atl1e_get_regs_len(netdev); + drvinfo->eedump_len = atl1e_get_eeprom_len(netdev); +} + +static void atl1e_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_MAGIC | WAKE_PHY; + wol->wolopts = 0; + + if (adapter->wol & AT_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & AT_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & AT_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & AT_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & AT_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | + WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)) + return -EOPNOTSUPP; + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= AT_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= AT_WUFC_LNKC; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int atl1e_nway_reset(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + atl1e_reinit_locked(adapter); + return 0; +} + +static const struct ethtool_ops atl1e_ethtool_ops = { + .get_settings = atl1e_get_settings, + .set_settings = atl1e_set_settings, + .get_drvinfo = atl1e_get_drvinfo, + .get_regs_len = atl1e_get_regs_len, + .get_regs = atl1e_get_regs, + .get_wol = atl1e_get_wol, + .set_wol = atl1e_set_wol, + .get_msglevel = atl1e_get_msglevel, + .nway_reset = atl1e_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = atl1e_get_eeprom_len, + .get_eeprom = atl1e_get_eeprom, + .set_eeprom = atl1e_set_eeprom, +}; + +void atl1e_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &atl1e_ethtool_ops; +} diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_hw.c b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.c new file mode 100644 index 000000000..113565da1 --- /dev/null +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.c @@ -0,0 +1,650 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include +#include +#include + +#include "atl1e.h" + +/* + * check_eeprom_exist + * return 0 if eeprom exist + */ +int atl1e_check_eeprom_exist(struct atl1e_hw *hw) +{ + u32 value; + + value = AT_READ_REG(hw, REG_SPI_FLASH_CTRL); + if (value & SPI_FLASH_CTRL_EN_VPD) { + value &= ~SPI_FLASH_CTRL_EN_VPD; + AT_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); + } + value = AT_READ_REGW(hw, REG_PCIE_CAP_LIST); + return ((value & 0xFF00) == 0x6C00) ? 0 : 1; +} + +void atl1e_hw_set_mac_addr(struct atl1e_hw *hw) +{ + u32 value; + /* + * 00-0B-6A-F6-00-DC + * 0: 6AF600DC 1: 000B + * low dword + */ + value = (((u32)hw->mac_addr[2]) << 24) | + (((u32)hw->mac_addr[3]) << 16) | + (((u32)hw->mac_addr[4]) << 8) | + (((u32)hw->mac_addr[5])) ; + AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value); + /* hight dword */ + value = (((u32)hw->mac_addr[0]) << 8) | + (((u32)hw->mac_addr[1])) ; + AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value); +} + +/* + * atl1e_get_permanent_address + * return 0 if get valid mac address, + */ +static int atl1e_get_permanent_address(struct atl1e_hw *hw) +{ + u32 addr[2]; + u32 i; + u32 twsi_ctrl_data; + u8 eth_addr[ETH_ALEN]; + + if (is_valid_ether_addr(hw->perm_mac_addr)) + return 0; + + /* init */ + addr[0] = addr[1] = 0; + + if (!atl1e_check_eeprom_exist(hw)) { + /* eeprom exist */ + twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL); + twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART; + AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data); + for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) { + msleep(10); + twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL); + if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0) + break; + } + if (i >= AT_TWSI_EEPROM_TIMEOUT) + return AT_ERR_TIMEOUT; + } + + /* maybe MAC-address is from BIOS */ + addr[0] = AT_READ_REG(hw, REG_MAC_STA_ADDR); + addr[1] = AT_READ_REG(hw, REG_MAC_STA_ADDR + 4); + *(u32 *) ð_addr[2] = swab32(addr[0]); + *(u16 *) ð_addr[0] = swab16(*(u16 *)&addr[1]); + + if (is_valid_ether_addr(eth_addr)) { + memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + return 0; + } + + return AT_ERR_EEPROM; +} + +bool atl1e_write_eeprom(struct atl1e_hw *hw, u32 offset, u32 value) +{ + return true; +} + +bool atl1e_read_eeprom(struct atl1e_hw *hw, u32 offset, u32 *p_value) +{ + int i; + u32 control; + + if (offset & 3) + return false; /* address do not align */ + + AT_WRITE_REG(hw, REG_VPD_DATA, 0); + control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; + AT_WRITE_REG(hw, REG_VPD_CAP, control); + + for (i = 0; i < 10; i++) { + msleep(2); + control = AT_READ_REG(hw, REG_VPD_CAP); + if (control & VPD_CAP_VPD_FLAG) + break; + } + if (control & VPD_CAP_VPD_FLAG) { + *p_value = AT_READ_REG(hw, REG_VPD_DATA); + return true; + } + return false; /* timeout */ +} + +void atl1e_force_ps(struct atl1e_hw *hw) +{ + AT_WRITE_REGW(hw, REG_GPHY_CTRL, + GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET); +} + +/* + * Reads the adapter's MAC address from the EEPROM + * + * hw - Struct containing variables accessed by shared code + */ +int atl1e_read_mac_addr(struct atl1e_hw *hw) +{ + int err = 0; + + err = atl1e_get_permanent_address(hw); + if (err) + return AT_ERR_EEPROM; + memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr)); + return 0; +} + +/* + * atl1e_hash_mc_addr + * purpose + * set hash value for a multicast address + */ +u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr) +{ + u32 crc32; + u32 value = 0; + int i; + + crc32 = ether_crc_le(6, mc_addr); + for (i = 0; i < 32; i++) + value |= (((crc32 >> i) & 1) << (31 - i)); + + return value; +} + +/* + * Sets the bit in the multicast table corresponding to the hash value. + * hw - Struct containing variables accessed by shared code + * hash_value - Multicast address hash value + */ +void atl1e_hash_set(struct atl1e_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg; + u32 mta; + + /* + * The HASH Table is a register array of 2 32-bit registers. + * It is treated like an array of 64 bits. We want to set + * bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The register is determined by the + * upper 7 bits of the hash value and the bit within that + * register are determined by the lower 5 bits of the value. + */ + hash_reg = (hash_value >> 31) & 0x1; + hash_bit = (hash_value >> 26) & 0x1F; + + mta = AT_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg); + + mta |= (1 << hash_bit); + + AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta); +} +/* + * Reads the value from a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to read + */ +int atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data) +{ + u32 val; + int i; + + val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | + MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | + MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; + + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + + wmb(); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = AT_READ_REG(hw, REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + wmb(); + } + if (!(val & (MDIO_START | MDIO_BUSY))) { + *phy_data = (u16)val; + return 0; + } + + return AT_ERR_PHY; +} + +/* + * Writes a value to a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to write + * data - data to write to the PHY + */ +int atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data) +{ + int i; + u32 val; + + val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | + (reg_addr&MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | + MDIO_SUP_PREAMBLE | + MDIO_START | + MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; + + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + wmb(); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = AT_READ_REG(hw, REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + wmb(); + } + + if (!(val & (MDIO_START | MDIO_BUSY))) + return 0; + + return AT_ERR_PHY; +} + +/* + * atl1e_init_pcie - init PCIE module + */ +static void atl1e_init_pcie(struct atl1e_hw *hw) +{ + u32 value; + /* comment 2lines below to save more power when sususpend + value = LTSSM_TEST_MODE_DEF; + AT_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value); + */ + + /* pcie flow control mode change */ + value = AT_READ_REG(hw, 0x1008); + value |= 0x8000; + AT_WRITE_REG(hw, 0x1008, value); +} +/* + * Configures PHY autoneg and flow control advertisement settings + * + * hw - Struct containing variables accessed by shared code + */ +static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw) +{ + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + if (0 != hw->mii_autoneg_adv_reg) + return 0; + /* Read the MII Auto-Neg Advertisement Register (Address 4/9). */ + mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; + mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK; + + /* + * Need to parse autoneg_advertised and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* + * First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~ADVERTISE_ALL; + mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK; + + /* + * Need to parse MediaType and setup the + * appropriate PHY registers. + */ + switch (hw->media_type) { + case MEDIA_TYPE_AUTO_SENSOR: + mii_autoneg_adv_reg |= ADVERTISE_ALL; + hw->autoneg_advertised = ADVERTISE_ALL; + if (hw->nic_type == athr_l1e) { + mii_1000t_ctrl_reg |= ADVERTISE_1000FULL; + hw->autoneg_advertised |= ADVERTISE_1000_FULL; + } + break; + + case MEDIA_TYPE_100M_FULL: + mii_autoneg_adv_reg |= ADVERTISE_100FULL; + hw->autoneg_advertised = ADVERTISE_100_FULL; + break; + + case MEDIA_TYPE_100M_HALF: + mii_autoneg_adv_reg |= ADVERTISE_100_HALF; + hw->autoneg_advertised = ADVERTISE_100_HALF; + break; + + case MEDIA_TYPE_10M_FULL: + mii_autoneg_adv_reg |= ADVERTISE_10_FULL; + hw->autoneg_advertised = ADVERTISE_10_FULL; + break; + + default: + mii_autoneg_adv_reg |= ADVERTISE_10_HALF; + hw->autoneg_advertised = ADVERTISE_10_HALF; + break; + } + + /* flow control fixed to enable all */ + mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + + hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; + hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; + + ret_val = atl1e_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { + ret_val = atl1e_write_phy_reg(hw, MII_CTRL1000, + mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + return 0; +} + + +/* + * Resets the PHY and make all config validate + * + * hw - Struct containing variables accessed by shared code + * + * Sets bit 15 and 12 of the MII control regiser (for F001 bug) + */ +int atl1e_phy_commit(struct atl1e_hw *hw) +{ + struct atl1e_adapter *adapter = hw->adapter; + int ret_val; + u16 phy_data; + + phy_data = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART; + + ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data); + if (ret_val) { + u32 val; + int i; + /************************************** + * pcie serdes link may be down ! + **************************************/ + for (i = 0; i < 25; i++) { + msleep(1); + val = AT_READ_REG(hw, REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + + if (0 != (val & (MDIO_START | MDIO_BUSY))) { + netdev_err(adapter->netdev, + "pcie linkdown at least for 25ms\n"); + return ret_val; + } + + netdev_err(adapter->netdev, "pcie linkup after %d ms\n", i); + } + return 0; +} + +int atl1e_phy_init(struct atl1e_hw *hw) +{ + struct atl1e_adapter *adapter = hw->adapter; + s32 ret_val; + u16 phy_val; + + if (hw->phy_configured) { + if (hw->re_autoneg) { + hw->re_autoneg = false; + return atl1e_restart_autoneg(hw); + } + return 0; + } + + /* RESET GPHY Core */ + AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT); + msleep(2); + AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT | + GPHY_CTRL_EXT_RESET); + msleep(2); + + /* patches */ + /* p1. eable hibernation mode */ + ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0xB); + if (ret_val) + return ret_val; + ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0xBC00); + if (ret_val) + return ret_val; + /* p2. set Class A/B for all modes */ + ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0); + if (ret_val) + return ret_val; + phy_val = 0x02ef; + /* remove Class AB */ + /* phy_val = hw->emi_ca ? 0x02ef : 0x02df; */ + ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, phy_val); + if (ret_val) + return ret_val; + /* p3. 10B ??? */ + ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x12); + if (ret_val) + return ret_val; + ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x4C04); + if (ret_val) + return ret_val; + /* p4. 1000T power */ + ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x4); + if (ret_val) + return ret_val; + ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x8BBB); + if (ret_val) + return ret_val; + + ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x5); + if (ret_val) + return ret_val; + ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x2C46); + if (ret_val) + return ret_val; + + msleep(1); + + /*Enable PHY LinkChange Interrupt */ + ret_val = atl1e_write_phy_reg(hw, MII_INT_CTRL, 0xC00); + if (ret_val) { + netdev_err(adapter->netdev, + "Error enable PHY linkChange Interrupt\n"); + return ret_val; + } + /* setup AutoNeg parameters */ + ret_val = atl1e_phy_setup_autoneg_adv(hw); + if (ret_val) { + netdev_err(adapter->netdev, + "Error Setting up Auto-Negotiation\n"); + return ret_val; + } + /* SW.Reset & En-Auto-Neg to restart Auto-Neg*/ + netdev_dbg(adapter->netdev, "Restarting Auto-Negotiation\n"); + ret_val = atl1e_phy_commit(hw); + if (ret_val) { + netdev_err(adapter->netdev, "Error resetting the phy\n"); + return ret_val; + } + + hw->phy_configured = true; + + return 0; +} + +/* + * Reset the transmit and receive units; mask and clear all interrupts. + * hw - Struct containing variables accessed by shared code + * return : 0 or idle status (if error) + */ +int atl1e_reset_hw(struct atl1e_hw *hw) +{ + struct atl1e_adapter *adapter = hw->adapter; + struct pci_dev *pdev = adapter->pdev; + + u32 idle_status_data = 0; + u16 pci_cfg_cmd_word = 0; + int timeout = 0; + + /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */ + pci_read_config_word(pdev, PCI_REG_COMMAND, &pci_cfg_cmd_word); + if ((pci_cfg_cmd_word & (CMD_IO_SPACE | + CMD_MEMORY_SPACE | CMD_BUS_MASTER)) + != (CMD_IO_SPACE | CMD_MEMORY_SPACE | CMD_BUS_MASTER)) { + pci_cfg_cmd_word |= (CMD_IO_SPACE | + CMD_MEMORY_SPACE | CMD_BUS_MASTER); + pci_write_config_word(pdev, PCI_REG_COMMAND, pci_cfg_cmd_word); + } + + /* + * Issue Soft Reset to the MAC. This will reset the chip's + * transmit, receive, DMA. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + AT_WRITE_REG(hw, REG_MASTER_CTRL, + MASTER_CTRL_LED_MODE | MASTER_CTRL_SOFT_RST); + wmb(); + msleep(1); + + /* Wait at least 10ms for All module to be Idle */ + for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) { + idle_status_data = AT_READ_REG(hw, REG_IDLE_STATUS); + if (idle_status_data == 0) + break; + msleep(1); + cpu_relax(); + } + + if (timeout >= AT_HW_MAX_IDLE_DELAY) { + netdev_err(adapter->netdev, + "MAC state machine can't be idle since disabled for 10ms second\n"); + return AT_ERR_TIMEOUT; + } + + return 0; +} + + +/* + * Performs basic configuration of the adapter. + * + * hw - Struct containing variables accessed by shared code + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes multicast table, + * and Calls routines to setup link + * Leaves the transmit and receive units disabled and uninitialized. + */ +int atl1e_init_hw(struct atl1e_hw *hw) +{ + s32 ret_val = 0; + + atl1e_init_pcie(hw); + + /* Zero out the Multicast HASH table */ + /* clear the old settings from the multicast hash table */ + AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); + AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); + + ret_val = atl1e_phy_init(hw); + + return ret_val; +} + +/* + * Detects the current speed and duplex settings of the hardware. + * + * hw - Struct containing variables accessed by shared code + * speed - Speed of the connection + * duplex - Duplex setting of the connection + */ +int atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex) +{ + int err; + u16 phy_data; + + /* Read PHY Specific Status Register (17) */ + err = atl1e_read_phy_reg(hw, MII_AT001_PSSR, &phy_data); + if (err) + return err; + + if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED)) + return AT_ERR_PHY_RES; + + switch (phy_data & MII_AT001_PSSR_SPEED) { + case MII_AT001_PSSR_1000MBS: + *speed = SPEED_1000; + break; + case MII_AT001_PSSR_100MBS: + *speed = SPEED_100; + break; + case MII_AT001_PSSR_10MBS: + *speed = SPEED_10; + break; + default: + return AT_ERR_PHY_SPEED; + } + + if (phy_data & MII_AT001_PSSR_DPLX) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + return 0; +} + +int atl1e_restart_autoneg(struct atl1e_hw *hw) +{ + int err = 0; + + err = atl1e_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); + if (err) + return err; + + if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { + err = atl1e_write_phy_reg(hw, MII_CTRL1000, + hw->mii_1000t_ctrl_reg); + if (err) + return err; + } + + err = atl1e_write_phy_reg(hw, MII_BMCR, + BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART); + return err; +} + diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h new file mode 100644 index 000000000..88a6271de --- /dev/null +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h @@ -0,0 +1,690 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _ATHL1E_HW_H_ +#define _ATHL1E_HW_H_ + +#include +#include + +struct atl1e_adapter; +struct atl1e_hw; + +/* function prototype */ +s32 atl1e_reset_hw(struct atl1e_hw *hw); +s32 atl1e_read_mac_addr(struct atl1e_hw *hw); +s32 atl1e_init_hw(struct atl1e_hw *hw); +s32 atl1e_phy_commit(struct atl1e_hw *hw); +s32 atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex); +u32 atl1e_auto_get_fc(struct atl1e_adapter *adapter, u16 duplex); +u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr); +void atl1e_hash_set(struct atl1e_hw *hw, u32 hash_value); +s32 atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data); +s32 atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data); +s32 atl1e_validate_mdi_setting(struct atl1e_hw *hw); +void atl1e_hw_set_mac_addr(struct atl1e_hw *hw); +bool atl1e_read_eeprom(struct atl1e_hw *hw, u32 offset, u32 *p_value); +bool atl1e_write_eeprom(struct atl1e_hw *hw, u32 offset, u32 value); +s32 atl1e_phy_enter_power_saving(struct atl1e_hw *hw); +s32 atl1e_phy_leave_power_saving(struct atl1e_hw *hw); +s32 atl1e_phy_init(struct atl1e_hw *hw); +int atl1e_check_eeprom_exist(struct atl1e_hw *hw); +void atl1e_force_ps(struct atl1e_hw *hw); +s32 atl1e_restart_autoneg(struct atl1e_hw *hw); + +/* register definition */ +#define REG_PM_CTRLSTAT 0x44 + +#define REG_PCIE_CAP_LIST 0x58 + +#define REG_DEVICE_CAP 0x5C +#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7 +#define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0 + +#define REG_DEVICE_CTRL 0x60 +#define DEVICE_CTRL_MAX_PAYLOAD_MASK 0x7 +#define DEVICE_CTRL_MAX_PAYLOAD_SHIFT 5 +#define DEVICE_CTRL_MAX_RREQ_SZ_MASK 0x7 +#define DEVICE_CTRL_MAX_RREQ_SZ_SHIFT 12 + +#define REG_VPD_CAP 0x6C +#define VPD_CAP_ID_MASK 0xff +#define VPD_CAP_ID_SHIFT 0 +#define VPD_CAP_NEXT_PTR_MASK 0xFF +#define VPD_CAP_NEXT_PTR_SHIFT 8 +#define VPD_CAP_VPD_ADDR_MASK 0x7FFF +#define VPD_CAP_VPD_ADDR_SHIFT 16 +#define VPD_CAP_VPD_FLAG 0x80000000 + +#define REG_VPD_DATA 0x70 + +#define REG_SPI_FLASH_CTRL 0x200 +#define SPI_FLASH_CTRL_STS_NON_RDY 0x1 +#define SPI_FLASH_CTRL_STS_WEN 0x2 +#define SPI_FLASH_CTRL_STS_WPEN 0x80 +#define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF +#define SPI_FLASH_CTRL_DEV_STS_SHIFT 0 +#define SPI_FLASH_CTRL_INS_MASK 0x7 +#define SPI_FLASH_CTRL_INS_SHIFT 8 +#define SPI_FLASH_CTRL_START 0x800 +#define SPI_FLASH_CTRL_EN_VPD 0x2000 +#define SPI_FLASH_CTRL_LDSTART 0x8000 +#define SPI_FLASH_CTRL_CS_HI_MASK 0x3 +#define SPI_FLASH_CTRL_CS_HI_SHIFT 16 +#define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3 +#define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18 +#define SPI_FLASH_CTRL_CLK_LO_MASK 0x3 +#define SPI_FLASH_CTRL_CLK_LO_SHIFT 20 +#define SPI_FLASH_CTRL_CLK_HI_MASK 0x3 +#define SPI_FLASH_CTRL_CLK_HI_SHIFT 22 +#define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3 +#define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24 +#define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3 +#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26 +#define SPI_FLASH_CTRL_WAIT_READY 0x10000000 + +#define REG_SPI_ADDR 0x204 + +#define REG_SPI_DATA 0x208 + +#define REG_SPI_FLASH_CONFIG 0x20C +#define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF +#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0 +#define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3 +#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24 +#define SPI_FLASH_CONFIG_LD_EXIST 0x4000000 + + +#define REG_SPI_FLASH_OP_PROGRAM 0x210 +#define REG_SPI_FLASH_OP_SC_ERASE 0x211 +#define REG_SPI_FLASH_OP_CHIP_ERASE 0x212 +#define REG_SPI_FLASH_OP_RDID 0x213 +#define REG_SPI_FLASH_OP_WREN 0x214 +#define REG_SPI_FLASH_OP_RDSR 0x215 +#define REG_SPI_FLASH_OP_WRSR 0x216 +#define REG_SPI_FLASH_OP_READ 0x217 + +#define REG_TWSI_CTRL 0x218 +#define TWSI_CTRL_LD_OFFSET_MASK 0xFF +#define TWSI_CTRL_LD_OFFSET_SHIFT 0 +#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7 +#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8 +#define TWSI_CTRL_SW_LDSTART 0x800 +#define TWSI_CTRL_HW_LDSTART 0x1000 +#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F +#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15 +#define TWSI_CTRL_LD_EXIST 0x400000 +#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3 +#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23 +#define TWSI_CTRL_FREQ_SEL_100K 0 +#define TWSI_CTRL_FREQ_SEL_200K 1 +#define TWSI_CTRL_FREQ_SEL_300K 2 +#define TWSI_CTRL_FREQ_SEL_400K 3 +#define TWSI_CTRL_SMB_SLV_ADDR +#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3 +#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24 + + +#define REG_PCIE_DEV_MISC_CTRL 0x21C +#define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2 +#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1 +#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4 +#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8 +#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10 + +#define REG_PCIE_PHYMISC 0x1000 +#define PCIE_PHYMISC_FORCE_RCV_DET 0x4 + +#define REG_LTSSM_TEST_MODE 0x12FC +#define LTSSM_TEST_MODE_DEF 0xE000 + +/* Selene Master Control Register */ +#define REG_MASTER_CTRL 0x1400 +#define MASTER_CTRL_SOFT_RST 0x1 +#define MASTER_CTRL_MTIMER_EN 0x2 +#define MASTER_CTRL_ITIMER_EN 0x4 +#define MASTER_CTRL_MANUAL_INT 0x8 +#define MASTER_CTRL_ITIMER2_EN 0x20 +#define MASTER_CTRL_INT_RDCLR 0x40 +#define MASTER_CTRL_LED_MODE 0x200 +#define MASTER_CTRL_REV_NUM_SHIFT 16 +#define MASTER_CTRL_REV_NUM_MASK 0xff +#define MASTER_CTRL_DEV_ID_SHIFT 24 +#define MASTER_CTRL_DEV_ID_MASK 0xff + +/* Timer Initial Value Register */ +#define REG_MANUAL_TIMER_INIT 0x1404 + + +/* IRQ ModeratorTimer Initial Value Register */ +#define REG_IRQ_MODU_TIMER_INIT 0x1408 /* w */ +#define REG_IRQ_MODU_TIMER2_INIT 0x140A /* w */ + + +#define REG_GPHY_CTRL 0x140C +#define GPHY_CTRL_EXT_RESET 1 +#define GPHY_CTRL_PIPE_MOD 2 +#define GPHY_CTRL_TEST_MODE_MASK 3 +#define GPHY_CTRL_TEST_MODE_SHIFT 2 +#define GPHY_CTRL_BERT_START 0x10 +#define GPHY_CTRL_GATE_25M_EN 0x20 +#define GPHY_CTRL_LPW_EXIT 0x40 +#define GPHY_CTRL_PHY_IDDQ 0x80 +#define GPHY_CTRL_PHY_IDDQ_DIS 0x100 +#define GPHY_CTRL_PCLK_SEL_DIS 0x200 +#define GPHY_CTRL_HIB_EN 0x400 +#define GPHY_CTRL_HIB_PULSE 0x800 +#define GPHY_CTRL_SEL_ANA_RST 0x1000 +#define GPHY_CTRL_PHY_PLL_ON 0x2000 +#define GPHY_CTRL_PWDOWN_HW 0x4000 +#define GPHY_CTRL_DEFAULT (\ + GPHY_CTRL_PHY_PLL_ON |\ + GPHY_CTRL_SEL_ANA_RST |\ + GPHY_CTRL_HIB_PULSE |\ + GPHY_CTRL_HIB_EN) + +#define GPHY_CTRL_PW_WOL_DIS (\ + GPHY_CTRL_PHY_PLL_ON |\ + GPHY_CTRL_SEL_ANA_RST |\ + GPHY_CTRL_HIB_PULSE |\ + GPHY_CTRL_HIB_EN |\ + GPHY_CTRL_PWDOWN_HW |\ + GPHY_CTRL_PCLK_SEL_DIS |\ + GPHY_CTRL_PHY_IDDQ) + +/* IRQ Anti-Lost Timer Initial Value Register */ +#define REG_CMBDISDMA_TIMER 0x140E + + +/* Block IDLE Status Register */ +#define REG_IDLE_STATUS 0x1410 +#define IDLE_STATUS_RXMAC 1 /* 1: RXMAC state machine is in non-IDLE state. 0: RXMAC is idling */ +#define IDLE_STATUS_TXMAC 2 /* 1: TXMAC state machine is in non-IDLE state. 0: TXMAC is idling */ +#define IDLE_STATUS_RXQ 4 /* 1: RXQ state machine is in non-IDLE state. 0: RXQ is idling */ +#define IDLE_STATUS_TXQ 8 /* 1: TXQ state machine is in non-IDLE state. 0: TXQ is idling */ +#define IDLE_STATUS_DMAR 0x10 /* 1: DMAR state machine is in non-IDLE state. 0: DMAR is idling */ +#define IDLE_STATUS_DMAW 0x20 /* 1: DMAW state machine is in non-IDLE state. 0: DMAW is idling */ +#define IDLE_STATUS_SMB 0x40 /* 1: SMB state machine is in non-IDLE state. 0: SMB is idling */ +#define IDLE_STATUS_CMB 0x80 /* 1: CMB state machine is in non-IDLE state. 0: CMB is idling */ + +/* MDIO Control Register */ +#define REG_MDIO_CTRL 0x1414 +#define MDIO_DATA_MASK 0xffff /* On MDIO write, the 16-bit control data to write to PHY MII management register */ +#define MDIO_DATA_SHIFT 0 /* On MDIO read, the 16-bit status data that was read from the PHY MII management register*/ +#define MDIO_REG_ADDR_MASK 0x1f /* MDIO register address */ +#define MDIO_REG_ADDR_SHIFT 16 +#define MDIO_RW 0x200000 /* 1: read, 0: write */ +#define MDIO_SUP_PREAMBLE 0x400000 /* Suppress preamble */ +#define MDIO_START 0x800000 /* Write 1 to initiate the MDIO master. And this bit is self cleared after one cycle*/ +#define MDIO_CLK_SEL_SHIFT 24 +#define MDIO_CLK_25_4 0 +#define MDIO_CLK_25_6 2 +#define MDIO_CLK_25_8 3 +#define MDIO_CLK_25_10 4 +#define MDIO_CLK_25_14 5 +#define MDIO_CLK_25_20 6 +#define MDIO_CLK_25_28 7 +#define MDIO_BUSY 0x8000000 +#define MDIO_AP_EN 0x10000000 +#define MDIO_WAIT_TIMES 10 + +/* MII PHY Status Register */ +#define REG_PHY_STATUS 0x1418 +#define PHY_STATUS_100M 0x20000 +#define PHY_STATUS_EMI_CA 0x40000 + +/* BIST Control and Status Register0 (for the Packet Memory) */ +#define REG_BIST0_CTRL 0x141c +#define BIST0_NOW 0x1 /* 1: To trigger BIST0 logic. This bit stays high during the */ +/* BIST process and reset to zero when BIST is done */ +#define BIST0_SRAM_FAIL 0x2 /* 1: The SRAM failure is un-repairable because it has address */ +/* decoder failure or more than 1 cell stuck-to-x failure */ +#define BIST0_FUSE_FLAG 0x4 /* 1: Indicating one cell has been fixed */ + +/* BIST Control and Status Register1(for the retry buffer of PCI Express) */ +#define REG_BIST1_CTRL 0x1420 +#define BIST1_NOW 0x1 /* 1: To trigger BIST0 logic. This bit stays high during the */ +/* BIST process and reset to zero when BIST is done */ +#define BIST1_SRAM_FAIL 0x2 /* 1: The SRAM failure is un-repairable because it has address */ +/* decoder failure or more than 1 cell stuck-to-x failure.*/ +#define BIST1_FUSE_FLAG 0x4 + +/* SerDes Lock Detect Control and Status Register */ +#define REG_SERDES_LOCK 0x1424 +#define SERDES_LOCK_DETECT 1 /* 1: SerDes lock detected . This signal comes from Analog SerDes */ +#define SERDES_LOCK_DETECT_EN 2 /* 1: Enable SerDes Lock detect function */ + +/* MAC Control Register */ +#define REG_MAC_CTRL 0x1480 +#define MAC_CTRL_TX_EN 1 /* 1: Transmit Enable */ +#define MAC_CTRL_RX_EN 2 /* 1: Receive Enable */ +#define MAC_CTRL_TX_FLOW 4 /* 1: Transmit Flow Control Enable */ +#define MAC_CTRL_RX_FLOW 8 /* 1: Receive Flow Control Enable */ +#define MAC_CTRL_LOOPBACK 0x10 /* 1: Loop back at G/MII Interface */ +#define MAC_CTRL_DUPLX 0x20 /* 1: Full-duplex mode 0: Half-duplex mode */ +#define MAC_CTRL_ADD_CRC 0x40 /* 1: Instruct MAC to attach CRC on all egress Ethernet frames */ +#define MAC_CTRL_PAD 0x80 /* 1: Instruct MAC to pad short frames to 60-bytes, and then attach CRC. This bit has higher priority over CRC_EN */ +#define MAC_CTRL_LENCHK 0x100 /* 1: Instruct MAC to check if length field matches the real packet length */ +#define MAC_CTRL_HUGE_EN 0x200 /* 1: receive Jumbo frame enable */ +#define MAC_CTRL_PRMLEN_SHIFT 10 /* Preamble length */ +#define MAC_CTRL_PRMLEN_MASK 0xf +#define MAC_CTRL_RMV_VLAN 0x4000 /* 1: to remove VLAN Tag automatically from all receive packets */ +#define MAC_CTRL_PROMIS_EN 0x8000 /* 1: Promiscuous Mode Enable */ +#define MAC_CTRL_TX_PAUSE 0x10000 /* 1: transmit test pause */ +#define MAC_CTRL_SCNT 0x20000 /* 1: shortcut slot time counter */ +#define MAC_CTRL_SRST_TX 0x40000 /* 1: synchronized reset Transmit MAC module */ +#define MAC_CTRL_TX_SIMURST 0x80000 /* 1: transmit simulation reset */ +#define MAC_CTRL_SPEED_SHIFT 20 /* 10: gigabit 01:10M/100M */ +#define MAC_CTRL_SPEED_MASK 0x300000 +#define MAC_CTRL_SPEED_1000 2 +#define MAC_CTRL_SPEED_10_100 1 +#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000 /* 1: transmit maximum backoff (half-duplex test bit) */ +#define MAC_CTRL_TX_HUGE 0x800000 /* 1: transmit huge enable */ +#define MAC_CTRL_RX_CHKSUM_EN 0x1000000 /* 1: RX checksum enable */ +#define MAC_CTRL_MC_ALL_EN 0x2000000 /* 1: upload all multicast frame without error to system */ +#define MAC_CTRL_BC_EN 0x4000000 /* 1: upload all broadcast frame without error to system */ +#define MAC_CTRL_DBG 0x8000000 /* 1: upload all received frame to system (Debug Mode) */ + +/* MAC IPG/IFG Control Register */ +#define REG_MAC_IPG_IFG 0x1484 +#define MAC_IPG_IFG_IPGT_SHIFT 0 /* Desired back to back inter-packet gap. The default is 96-bit time */ +#define MAC_IPG_IFG_IPGT_MASK 0x7f +#define MAC_IPG_IFG_MIFG_SHIFT 8 /* Minimum number of IFG to enforce in between RX frames */ +#define MAC_IPG_IFG_MIFG_MASK 0xff /* Frame gap below such IFP is dropped */ +#define MAC_IPG_IFG_IPGR1_SHIFT 16 /* 64bit Carrier-Sense window */ +#define MAC_IPG_IFG_IPGR1_MASK 0x7f +#define MAC_IPG_IFG_IPGR2_SHIFT 24 /* 96-bit IPG window */ +#define MAC_IPG_IFG_IPGR2_MASK 0x7f + +/* MAC STATION ADDRESS */ +#define REG_MAC_STA_ADDR 0x1488 + +/* Hash table for multicast address */ +#define REG_RX_HASH_TABLE 0x1490 + + +/* MAC Half-Duplex Control Register */ +#define REG_MAC_HALF_DUPLX_CTRL 0x1498 +#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 /* Collision Window */ +#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff +#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12 /* Retransmission maximum, afterwards the packet will be discarded */ +#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf +#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000 /* 1: Allow the transmission of a packet which has been excessively deferred */ +#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000 /* 1: No back-off on collision, immediately start the retransmission */ +#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 /* 1: No back-off on backpressure, immediately start the transmission after back pressure */ +#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 /* 1: Alternative Binary Exponential Back-off Enabled */ +#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 /* Maximum binary exponential number */ +#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf +#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 /* IPG to start JAM for collision based flow control in half-duplex */ +#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf /* mode. In unit of 8-bit time */ + +/* Maximum Frame Length Control Register */ +#define REG_MTU 0x149c + +/* Wake-On-Lan control register */ +#define REG_WOL_CTRL 0x14a0 +#define WOL_PATTERN_EN 0x00000001 +#define WOL_PATTERN_PME_EN 0x00000002 +#define WOL_MAGIC_EN 0x00000004 +#define WOL_MAGIC_PME_EN 0x00000008 +#define WOL_LINK_CHG_EN 0x00000010 +#define WOL_LINK_CHG_PME_EN 0x00000020 +#define WOL_PATTERN_ST 0x00000100 +#define WOL_MAGIC_ST 0x00000200 +#define WOL_LINKCHG_ST 0x00000400 +#define WOL_CLK_SWITCH_EN 0x00008000 +#define WOL_PT0_EN 0x00010000 +#define WOL_PT1_EN 0x00020000 +#define WOL_PT2_EN 0x00040000 +#define WOL_PT3_EN 0x00080000 +#define WOL_PT4_EN 0x00100000 +#define WOL_PT5_EN 0x00200000 +#define WOL_PT6_EN 0x00400000 +/* WOL Length ( 2 DWORD ) */ +#define REG_WOL_PATTERN_LEN 0x14a4 +#define WOL_PT_LEN_MASK 0x7f +#define WOL_PT0_LEN_SHIFT 0 +#define WOL_PT1_LEN_SHIFT 8 +#define WOL_PT2_LEN_SHIFT 16 +#define WOL_PT3_LEN_SHIFT 24 +#define WOL_PT4_LEN_SHIFT 0 +#define WOL_PT5_LEN_SHIFT 8 +#define WOL_PT6_LEN_SHIFT 16 + +/* Internal SRAM Partition Register */ +#define REG_SRAM_TRD_ADDR 0x1518 +#define REG_SRAM_TRD_LEN 0x151C +#define REG_SRAM_RXF_ADDR 0x1520 +#define REG_SRAM_RXF_LEN 0x1524 +#define REG_SRAM_TXF_ADDR 0x1528 +#define REG_SRAM_TXF_LEN 0x152C +#define REG_SRAM_TCPH_ADDR 0x1530 +#define REG_SRAM_PKTH_ADDR 0x1532 + +/* Load Ptr Register */ +#define REG_LOAD_PTR 0x1534 /* Software sets this bit after the initialization of the head and tail */ + +/* + * addresses of all descriptors, as well as the following descriptor + * control register, which triggers each function block to load the head + * pointer to prepare for the operation. This bit is then self-cleared + * after one cycle. + */ + +/* Descriptor Control register */ +#define REG_RXF3_BASE_ADDR_HI 0x153C +#define REG_DESC_BASE_ADDR_HI 0x1540 +#define REG_RXF0_BASE_ADDR_HI 0x1540 /* share with DESC BASE ADDR HI */ +#define REG_HOST_RXF0_PAGE0_LO 0x1544 +#define REG_HOST_RXF0_PAGE1_LO 0x1548 +#define REG_TPD_BASE_ADDR_LO 0x154C +#define REG_RXF1_BASE_ADDR_HI 0x1550 +#define REG_RXF2_BASE_ADDR_HI 0x1554 +#define REG_HOST_RXFPAGE_SIZE 0x1558 +#define REG_TPD_RING_SIZE 0x155C +/* RSS about */ +#define REG_RSS_KEY0 0x14B0 +#define REG_RSS_KEY1 0x14B4 +#define REG_RSS_KEY2 0x14B8 +#define REG_RSS_KEY3 0x14BC +#define REG_RSS_KEY4 0x14C0 +#define REG_RSS_KEY5 0x14C4 +#define REG_RSS_KEY6 0x14C8 +#define REG_RSS_KEY7 0x14CC +#define REG_RSS_KEY8 0x14D0 +#define REG_RSS_KEY9 0x14D4 +#define REG_IDT_TABLE4 0x14E0 +#define REG_IDT_TABLE5 0x14E4 +#define REG_IDT_TABLE6 0x14E8 +#define REG_IDT_TABLE7 0x14EC +#define REG_IDT_TABLE0 0x1560 +#define REG_IDT_TABLE1 0x1564 +#define REG_IDT_TABLE2 0x1568 +#define REG_IDT_TABLE3 0x156C +#define REG_IDT_TABLE REG_IDT_TABLE0 +#define REG_RSS_HASH_VALUE 0x1570 +#define REG_RSS_HASH_FLAG 0x1574 +#define REG_BASE_CPU_NUMBER 0x157C + + +/* TXQ Control Register */ +#define REG_TXQ_CTRL 0x1580 +#define TXQ_CTRL_NUM_TPD_BURST_MASK 0xF +#define TXQ_CTRL_NUM_TPD_BURST_SHIFT 0 +#define TXQ_CTRL_EN 0x20 /* 1: Enable TXQ */ +#define TXQ_CTRL_ENH_MODE 0x40 /* Performance enhancement mode, in which up to two back-to-back DMA read commands might be dispatched. */ +#define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16 /* Number of data byte to read in a cache-aligned burst. Each SRAM entry is 8-byte in length. */ +#define TXQ_CTRL_TXF_BURST_NUM_MASK 0xffff + +/* Jumbo packet Threshold for task offload */ +#define REG_TX_EARLY_TH 0x1584 /* Jumbo frame threshold in QWORD unit. Packet greater than */ +/* JUMBO_TASK_OFFLOAD_THRESHOLD will not be task offloaded. */ +#define TX_TX_EARLY_TH_MASK 0x7ff +#define TX_TX_EARLY_TH_SHIFT 0 + + +/* RXQ Control Register */ +#define REG_RXQ_CTRL 0x15A0 +#define RXQ_CTRL_PBA_ALIGN_32 0 /* rx-packet alignment */ +#define RXQ_CTRL_PBA_ALIGN_64 1 +#define RXQ_CTRL_PBA_ALIGN_128 2 +#define RXQ_CTRL_PBA_ALIGN_256 3 +#define RXQ_CTRL_Q1_EN 0x10 +#define RXQ_CTRL_Q2_EN 0x20 +#define RXQ_CTRL_Q3_EN 0x40 +#define RXQ_CTRL_IPV6_XSUM_VERIFY_EN 0x80 +#define RXQ_CTRL_HASH_TLEN_SHIFT 8 +#define RXQ_CTRL_HASH_TLEN_MASK 0xFF +#define RXQ_CTRL_HASH_TYPE_IPV4 0x10000 +#define RXQ_CTRL_HASH_TYPE_IPV4_TCP 0x20000 +#define RXQ_CTRL_HASH_TYPE_IPV6 0x40000 +#define RXQ_CTRL_HASH_TYPE_IPV6_TCP 0x80000 +#define RXQ_CTRL_RSS_MODE_DISABLE 0 +#define RXQ_CTRL_RSS_MODE_SQSINT 0x4000000 +#define RXQ_CTRL_RSS_MODE_MQUESINT 0x8000000 +#define RXQ_CTRL_RSS_MODE_MQUEMINT 0xC000000 +#define RXQ_CTRL_NIP_QUEUE_SEL_TBL 0x10000000 +#define RXQ_CTRL_HASH_ENABLE 0x20000000 +#define RXQ_CTRL_CUT_THRU_EN 0x40000000 +#define RXQ_CTRL_EN 0x80000000 + +/* Rx jumbo packet threshold and rrd retirement timer */ +#define REG_RXQ_JMBOSZ_RRDTIM 0x15A4 +/* + * Jumbo packet threshold for non-VLAN packet, in QWORD (64-bit) unit. + * When the packet length greater than or equal to this value, RXQ + * shall start cut-through forwarding of the received packet. + */ +#define RXQ_JMBOSZ_TH_MASK 0x7ff +#define RXQ_JMBOSZ_TH_SHIFT 0 /* RRD retirement timer. Decrement by 1 after every 512ns passes*/ +#define RXQ_JMBO_LKAH_MASK 0xf +#define RXQ_JMBO_LKAH_SHIFT 11 + +/* RXF flow control register */ +#define REG_RXQ_RXF_PAUSE_THRESH 0x15A8 +#define RXQ_RXF_PAUSE_TH_HI_SHIFT 0 +#define RXQ_RXF_PAUSE_TH_HI_MASK 0xfff +#define RXQ_RXF_PAUSE_TH_LO_SHIFT 16 +#define RXQ_RXF_PAUSE_TH_LO_MASK 0xfff + + +/* DMA Engine Control Register */ +#define REG_DMA_CTRL 0x15C0 +#define DMA_CTRL_DMAR_IN_ORDER 0x1 +#define DMA_CTRL_DMAR_ENH_ORDER 0x2 +#define DMA_CTRL_DMAR_OUT_ORDER 0x4 +#define DMA_CTRL_RCB_VALUE 0x8 +#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4 +#define DMA_CTRL_DMAR_BURST_LEN_MASK 7 +#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7 +#define DMA_CTRL_DMAW_BURST_LEN_MASK 7 +#define DMA_CTRL_DMAR_REQ_PRI 0x400 +#define DMA_CTRL_DMAR_DLY_CNT_MASK 0x1F +#define DMA_CTRL_DMAR_DLY_CNT_SHIFT 11 +#define DMA_CTRL_DMAW_DLY_CNT_MASK 0xF +#define DMA_CTRL_DMAW_DLY_CNT_SHIFT 16 +#define DMA_CTRL_TXCMB_EN 0x100000 +#define DMA_CTRL_RXCMB_EN 0x200000 + + +/* CMB/SMB Control Register */ +#define REG_SMB_STAT_TIMER 0x15C4 +#define REG_TRIG_RRD_THRESH 0x15CA +#define REG_TRIG_TPD_THRESH 0x15C8 +#define REG_TRIG_TXTIMER 0x15CC +#define REG_TRIG_RXTIMER 0x15CE + +/* HOST RXF Page 1,2,3 address */ +#define REG_HOST_RXF1_PAGE0_LO 0x15D0 +#define REG_HOST_RXF1_PAGE1_LO 0x15D4 +#define REG_HOST_RXF2_PAGE0_LO 0x15D8 +#define REG_HOST_RXF2_PAGE1_LO 0x15DC +#define REG_HOST_RXF3_PAGE0_LO 0x15E0 +#define REG_HOST_RXF3_PAGE1_LO 0x15E4 + +/* Mail box */ +#define REG_MB_RXF1_RADDR 0x15B4 +#define REG_MB_RXF2_RADDR 0x15B8 +#define REG_MB_RXF3_RADDR 0x15BC +#define REG_MB_TPD_PROD_IDX 0x15F0 + +/* RXF-Page 0-3 PageNo & Valid bit */ +#define REG_HOST_RXF0_PAGE0_VLD 0x15F4 +#define HOST_RXF_VALID 1 +#define HOST_RXF_PAGENO_SHIFT 1 +#define HOST_RXF_PAGENO_MASK 0x7F +#define REG_HOST_RXF0_PAGE1_VLD 0x15F5 +#define REG_HOST_RXF1_PAGE0_VLD 0x15F6 +#define REG_HOST_RXF1_PAGE1_VLD 0x15F7 +#define REG_HOST_RXF2_PAGE0_VLD 0x15F8 +#define REG_HOST_RXF2_PAGE1_VLD 0x15F9 +#define REG_HOST_RXF3_PAGE0_VLD 0x15FA +#define REG_HOST_RXF3_PAGE1_VLD 0x15FB + +/* Interrupt Status Register */ +#define REG_ISR 0x1600 +#define ISR_SMB 1 +#define ISR_TIMER 2 /* Interrupt when Timer is counted down to zero */ +/* + * Software manual interrupt, for debug. Set when SW_MAN_INT_EN is set + * in Table 51 Selene Master Control Register (Offset 0x1400). + */ +#define ISR_MANUAL 4 +#define ISR_HW_RXF_OV 8 /* RXF overflow interrupt */ +#define ISR_HOST_RXF0_OV 0x10 +#define ISR_HOST_RXF1_OV 0x20 +#define ISR_HOST_RXF2_OV 0x40 +#define ISR_HOST_RXF3_OV 0x80 +#define ISR_TXF_UN 0x100 +#define ISR_RX0_PAGE_FULL 0x200 +#define ISR_DMAR_TO_RST 0x400 +#define ISR_DMAW_TO_RST 0x800 +#define ISR_GPHY 0x1000 +#define ISR_TX_CREDIT 0x2000 +#define ISR_GPHY_LPW 0x4000 /* GPHY low power state interrupt */ +#define ISR_RX_PKT 0x10000 /* One packet received, triggered by RFD */ +#define ISR_TX_PKT 0x20000 /* One packet transmitted, triggered by TPD */ +#define ISR_TX_DMA 0x40000 +#define ISR_RX_PKT_1 0x80000 +#define ISR_RX_PKT_2 0x100000 +#define ISR_RX_PKT_3 0x200000 +#define ISR_MAC_RX 0x400000 +#define ISR_MAC_TX 0x800000 +#define ISR_UR_DETECTED 0x1000000 +#define ISR_FERR_DETECTED 0x2000000 +#define ISR_NFERR_DETECTED 0x4000000 +#define ISR_CERR_DETECTED 0x8000000 +#define ISR_PHY_LINKDOWN 0x10000000 +#define ISR_DIS_INT 0x80000000 + + +/* Interrupt Mask Register */ +#define REG_IMR 0x1604 + + +#define IMR_NORMAL_MASK (\ + ISR_SMB |\ + ISR_TXF_UN |\ + ISR_HW_RXF_OV |\ + ISR_HOST_RXF0_OV|\ + ISR_MANUAL |\ + ISR_GPHY |\ + ISR_GPHY_LPW |\ + ISR_DMAR_TO_RST |\ + ISR_DMAW_TO_RST |\ + ISR_PHY_LINKDOWN|\ + ISR_RX_PKT |\ + ISR_TX_PKT) + +#define ISR_TX_EVENT (ISR_TXF_UN | ISR_TX_PKT) +#define ISR_RX_EVENT (ISR_HOST_RXF0_OV | ISR_HW_RXF_OV | ISR_RX_PKT) + +#define REG_MAC_RX_STATUS_BIN 0x1700 +#define REG_MAC_RX_STATUS_END 0x175c +#define REG_MAC_TX_STATUS_BIN 0x1760 +#define REG_MAC_TX_STATUS_END 0x17c0 + +/* Hardware Offset Register */ +#define REG_HOST_RXF0_PAGEOFF 0x1800 +#define REG_TPD_CONS_IDX 0x1804 +#define REG_HOST_RXF1_PAGEOFF 0x1808 +#define REG_HOST_RXF2_PAGEOFF 0x180C +#define REG_HOST_RXF3_PAGEOFF 0x1810 + +/* RXF-Page 0-3 Offset DMA Address */ +#define REG_HOST_RXF0_MB0_LO 0x1820 +#define REG_HOST_RXF0_MB1_LO 0x1824 +#define REG_HOST_RXF1_MB0_LO 0x1828 +#define REG_HOST_RXF1_MB1_LO 0x182C +#define REG_HOST_RXF2_MB0_LO 0x1830 +#define REG_HOST_RXF2_MB1_LO 0x1834 +#define REG_HOST_RXF3_MB0_LO 0x1838 +#define REG_HOST_RXF3_MB1_LO 0x183C + +/* Tpd CMB DMA Address */ +#define REG_HOST_TX_CMB_LO 0x1840 +#define REG_HOST_SMB_ADDR_LO 0x1844 + +/* DEBUG ADDR */ +#define REG_DEBUG_DATA0 0x1900 +#define REG_DEBUG_DATA1 0x1904 + +/***************************** MII definition ***************************************/ +/* PHY Common Register */ +#define MII_AT001_PSCR 0x10 +#define MII_AT001_PSSR 0x11 +#define MII_INT_CTRL 0x12 +#define MII_INT_STATUS 0x13 +#define MII_SMARTSPEED 0x14 +#define MII_LBRERROR 0x18 +#define MII_RESV2 0x1a + +#define MII_DBG_ADDR 0x1D +#define MII_DBG_DATA 0x1E + +/* Autoneg Advertisement Register */ +#define MII_AR_DEFAULT_CAP_MASK 0 + +/* 1000BASE-T Control Register */ +#define MII_AT001_CR_1000T_SPEED_MASK \ + (ADVERTISE_1000FULL | ADVERTISE_1000HALF) +#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK MII_AT001_CR_1000T_SPEED_MASK + +/* AT001 PHY Specific Control Register */ +#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define MII_AT001_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define MII_AT001_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define MII_AT001_PSCR_MAC_POWERDOWN 0x0008 +#define MII_AT001_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, + * 0=CLK125 toggling + */ +#define MII_AT001_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ +/* Manual MDI configuration */ +#define MII_AT001_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define MII_AT001_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, + * 100BASE-TX/10BASE-T: + * MDI Mode + */ +#define MII_AT001_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ +#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE 0x0080 +/* 1=Enable Extended 10BASE-T distance + * (Lower 10BASE-T RX Threshold) + * 0=Normal 10BASE-T RX Threshold */ +#define MII_AT001_PSCR_MII_5BIT_ENABLE 0x0100 +/* 1=5-Bit interface in 100BASE-TX + * 0=MII interface in 100BASE-TX */ +#define MII_AT001_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define MII_AT001_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define MII_AT001_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ +#define MII_AT001_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define MII_AT001_PSCR_AUTO_X_MODE_SHIFT 5 +#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 +/* AT001 PHY Specific Status Register */ +#define MII_AT001_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define MII_AT001_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define MII_AT001_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define MII_AT001_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define MII_AT001_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define MII_AT001_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#endif /*_ATHL1E_HW_H_*/ diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c new file mode 100644 index 000000000..59a03a193 --- /dev/null +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -0,0 +1,2578 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include "atl1e.h" + +#define DRV_VERSION "1.0.0.7-NAPI" + +char atl1e_driver_name[] = "ATL1E"; +char atl1e_driver_version[] = DRV_VERSION; +#define PCI_DEVICE_ID_ATTANSIC_L1E 0x1026 +/* + * atl1e_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id atl1e_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)}, + /* required last entry */ + { 0 } +}; +MODULE_DEVICE_TABLE(pci, atl1e_pci_tbl); + +MODULE_AUTHOR("Atheros Corporation, , Jie Yang "); +MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter); + +static const u16 +atl1e_rx_page_vld_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = +{ + {REG_HOST_RXF0_PAGE0_VLD, REG_HOST_RXF0_PAGE1_VLD}, + {REG_HOST_RXF1_PAGE0_VLD, REG_HOST_RXF1_PAGE1_VLD}, + {REG_HOST_RXF2_PAGE0_VLD, REG_HOST_RXF2_PAGE1_VLD}, + {REG_HOST_RXF3_PAGE0_VLD, REG_HOST_RXF3_PAGE1_VLD} +}; + +static const u16 atl1e_rx_page_hi_addr_regs[AT_MAX_RECEIVE_QUEUE] = +{ + REG_RXF0_BASE_ADDR_HI, + REG_RXF1_BASE_ADDR_HI, + REG_RXF2_BASE_ADDR_HI, + REG_RXF3_BASE_ADDR_HI +}; + +static const u16 +atl1e_rx_page_lo_addr_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = +{ + {REG_HOST_RXF0_PAGE0_LO, REG_HOST_RXF0_PAGE1_LO}, + {REG_HOST_RXF1_PAGE0_LO, REG_HOST_RXF1_PAGE1_LO}, + {REG_HOST_RXF2_PAGE0_LO, REG_HOST_RXF2_PAGE1_LO}, + {REG_HOST_RXF3_PAGE0_LO, REG_HOST_RXF3_PAGE1_LO} +}; + +static const u16 +atl1e_rx_page_write_offset_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = +{ + {REG_HOST_RXF0_MB0_LO, REG_HOST_RXF0_MB1_LO}, + {REG_HOST_RXF1_MB0_LO, REG_HOST_RXF1_MB1_LO}, + {REG_HOST_RXF2_MB0_LO, REG_HOST_RXF2_MB1_LO}, + {REG_HOST_RXF3_MB0_LO, REG_HOST_RXF3_MB1_LO} +}; + +static const u16 atl1e_pay_load_size[] = { + 128, 256, 512, 1024, 2048, 4096, +}; + +/** + * atl1e_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static inline void atl1e_irq_enable(struct atl1e_adapter *adapter) +{ + if (likely(atomic_dec_and_test(&adapter->irq_sem))) { + AT_WRITE_REG(&adapter->hw, REG_ISR, 0); + AT_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK); + AT_WRITE_FLUSH(&adapter->hw); + } +} + +/** + * atl1e_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static inline void atl1e_irq_disable(struct atl1e_adapter *adapter) +{ + atomic_inc(&adapter->irq_sem); + AT_WRITE_REG(&adapter->hw, REG_IMR, 0); + AT_WRITE_FLUSH(&adapter->hw); + synchronize_irq(adapter->pdev->irq); +} + +/** + * atl1e_irq_reset - reset interrupt confiure on the NIC + * @adapter: board private structure + */ +static inline void atl1e_irq_reset(struct atl1e_adapter *adapter) +{ + atomic_set(&adapter->irq_sem, 0); + AT_WRITE_REG(&adapter->hw, REG_ISR, 0); + AT_WRITE_REG(&adapter->hw, REG_IMR, 0); + AT_WRITE_FLUSH(&adapter->hw); +} + +/** + * atl1e_phy_config - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl1e_phy_config(unsigned long data) +{ + struct atl1e_adapter *adapter = (struct atl1e_adapter *) data; + struct atl1e_hw *hw = &adapter->hw; + unsigned long flags; + + spin_lock_irqsave(&adapter->mdio_lock, flags); + atl1e_restart_autoneg(hw); + spin_unlock_irqrestore(&adapter->mdio_lock, flags); +} + +void atl1e_reinit_locked(struct atl1e_adapter *adapter) +{ + + WARN_ON(in_interrupt()); + while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) + msleep(1); + atl1e_down(adapter); + atl1e_up(adapter); + clear_bit(__AT_RESETTING, &adapter->flags); +} + +static void atl1e_reset_task(struct work_struct *work) +{ + struct atl1e_adapter *adapter; + adapter = container_of(work, struct atl1e_adapter, reset_task); + + atl1e_reinit_locked(adapter); +} + +static int atl1e_check_link(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int err = 0; + u16 speed, duplex, phy_data; + + /* MII_BMSR must read twice */ + atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); + if ((phy_data & BMSR_LSTATUS) == 0) { + /* link down */ + if (netif_carrier_ok(netdev)) { /* old link state: Up */ + u32 value; + /* disable rx */ + value = AT_READ_REG(hw, REG_MAC_CTRL); + value &= ~MAC_CTRL_RX_EN; + AT_WRITE_REG(hw, REG_MAC_CTRL, value); + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + } else { + /* Link Up */ + err = atl1e_get_speed_and_duplex(hw, &speed, &duplex); + if (unlikely(err)) + return err; + + /* link result is our setting */ + if (adapter->link_speed != speed || + adapter->link_duplex != duplex) { + adapter->link_speed = speed; + adapter->link_duplex = duplex; + atl1e_setup_mac_ctrl(adapter); + netdev_info(netdev, + "NIC Link is Up <%d Mbps %s Duplex>\n", + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full" : "Half"); + } + + if (!netif_carrier_ok(netdev)) { + /* Link down -> Up */ + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } + } + return 0; +} + +/** + * atl1e_link_chg_task - deal with link change event Out of interrupt context + * @netdev: network interface device structure + */ +static void atl1e_link_chg_task(struct work_struct *work) +{ + struct atl1e_adapter *adapter; + unsigned long flags; + + adapter = container_of(work, struct atl1e_adapter, link_chg_task); + spin_lock_irqsave(&adapter->mdio_lock, flags); + atl1e_check_link(adapter); + spin_unlock_irqrestore(&adapter->mdio_lock, flags); +} + +static void atl1e_link_chg_event(struct atl1e_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 phy_data = 0; + u16 link_up = 0; + + spin_lock(&adapter->mdio_lock); + atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + spin_unlock(&adapter->mdio_lock); + link_up = phy_data & BMSR_LSTATUS; + /* notify upper layer link down ASAP */ + if (!link_up) { + if (netif_carrier_ok(netdev)) { + /* old link state: Up */ + netdev_info(netdev, "NIC Link is Down\n"); + adapter->link_speed = SPEED_0; + netif_stop_queue(netdev); + } + } + schedule_work(&adapter->link_chg_task); +} + +static void atl1e_del_timer(struct atl1e_adapter *adapter) +{ + del_timer_sync(&adapter->phy_config_timer); +} + +static void atl1e_cancel_work(struct atl1e_adapter *adapter) +{ + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->link_chg_task); +} + +/** + * atl1e_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + */ +static void atl1e_tx_timeout(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->reset_task); +} + +/** + * atl1e_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + */ +static void atl1e_set_multi(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u32 mac_ctrl_data = 0; + u32 hash_value; + + /* Check for Promiscuous and All Multicast modes */ + mac_ctrl_data = AT_READ_REG(hw, REG_MAC_CTRL); + + if (netdev->flags & IFF_PROMISC) { + mac_ctrl_data |= MAC_CTRL_PROMIS_EN; + } else if (netdev->flags & IFF_ALLMULTI) { + mac_ctrl_data |= MAC_CTRL_MC_ALL_EN; + mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN; + } else { + mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); + } + + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); + + /* clear the old settings from the multicast hash table */ + AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); + AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); + + /* comoute mc addresses' hash value ,and put it into hash table */ + netdev_for_each_mc_addr(ha, netdev) { + hash_value = atl1e_hash_mc_addr(hw, ha->addr); + atl1e_hash_set(hw, hash_value); + } +} + +static void __atl1e_rx_mode(netdev_features_t features, u32 *mac_ctrl_data) +{ + + if (features & NETIF_F_RXALL) { + /* enable RX of ALL frames */ + *mac_ctrl_data |= MAC_CTRL_DBG; + } else { + /* disable RX of ALL frames */ + *mac_ctrl_data &= ~MAC_CTRL_DBG; + } +} + +static void atl1e_rx_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + u32 mac_ctrl_data = 0; + + netdev_dbg(adapter->netdev, "%s\n", __func__); + + atl1e_irq_disable(adapter); + mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL); + __atl1e_rx_mode(features, &mac_ctrl_data); + AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data); + atl1e_irq_enable(adapter); +} + + +static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) +{ + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + /* enable VLAN tag insert/strip */ + *mac_ctrl_data |= MAC_CTRL_RMV_VLAN; + } else { + /* disable VLAN tag insert/strip */ + *mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN; + } +} + +static void atl1e_vlan_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + u32 mac_ctrl_data = 0; + + netdev_dbg(adapter->netdev, "%s\n", __func__); + + atl1e_irq_disable(adapter); + mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL); + __atl1e_vlan_mode(features, &mac_ctrl_data); + AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data); + atl1e_irq_enable(adapter); +} + +static void atl1e_restore_vlan(struct atl1e_adapter *adapter) +{ + netdev_dbg(adapter->netdev, "%s\n", __func__); + atl1e_vlan_mode(adapter->netdev, adapter->netdev->features); +} + +/** + * atl1e_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int atl1e_set_mac_addr(struct net_device *netdev, void *p) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (netif_running(netdev)) + return -EBUSY; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); + + atl1e_hw_set_mac_addr(&adapter->hw); + + return 0; +} + +static netdev_features_t atl1e_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* + * Since there is no support for separate rx/tx vlan accel + * enable/disable make sure tx flag is always in same state as rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int atl1e_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + atl1e_vlan_mode(netdev, features); + + if (changed & NETIF_F_RXALL) + atl1e_rx_mode(netdev, features); + + + return 0; +} + +/** + * atl1e_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int atl1e_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + int old_mtu = netdev->mtu; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || + (max_frame > MAX_JUMBO_FRAME_SIZE)) { + netdev_warn(adapter->netdev, "invalid MTU setting\n"); + return -EINVAL; + } + /* set MTU */ + if (old_mtu != new_mtu && netif_running(netdev)) { + while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) + msleep(1); + netdev->mtu = new_mtu; + adapter->hw.max_frame_size = new_mtu; + adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3; + atl1e_down(adapter); + atl1e_up(adapter); + clear_bit(__AT_RESETTING, &adapter->flags); + } + return 0; +} + +/* + * caller should hold mdio_lock + */ +static int atl1e_mdio_read(struct net_device *netdev, int phy_id, int reg_num) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + u16 result; + + atl1e_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result); + return result; +} + +static void atl1e_mdio_write(struct net_device *netdev, int phy_id, + int reg_num, int val) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val); +} + +static int atl1e_mii_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + unsigned long flags; + int retval = 0; + + if (!netif_running(netdev)) + return -EINVAL; + + spin_lock_irqsave(&adapter->mdio_lock, flags); + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = 0; + break; + + case SIOCGMIIREG: + if (atl1e_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) { + retval = -EIO; + goto out; + } + break; + + case SIOCSMIIREG: + if (data->reg_num & ~(0x1F)) { + retval = -EFAULT; + goto out; + } + + netdev_dbg(adapter->netdev, " write %x %x\n", + data->reg_num, data->val_in); + if (atl1e_write_phy_reg(&adapter->hw, + data->reg_num, data->val_in)) { + retval = -EIO; + goto out; + } + break; + + default: + retval = -EOPNOTSUPP; + break; + } +out: + spin_unlock_irqrestore(&adapter->mdio_lock, flags); + return retval; + +} + +static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return atl1e_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +static void atl1e_setup_pcicmd(struct pci_dev *pdev) +{ + u16 cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + cmd &= ~(PCI_COMMAND_INTX_DISABLE | PCI_COMMAND_IO); + cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + pci_write_config_word(pdev, PCI_COMMAND, cmd); + + /* + * some motherboards BIOS(PXE/EFI) driver may set PME + * while they transfer control to OS (Windows/Linux) + * so we should clear this bit before NIC work normally + */ + pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0); + msleep(1); +} + +/** + * atl1e_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + */ +static int atl1e_alloc_queues(struct atl1e_adapter *adapter) +{ + return 0; +} + +/** + * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter) + * @adapter: board private structure to initialize + * + * atl1e_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int atl1e_sw_init(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + u32 phy_status_data = 0; + + adapter->wol = 0; + adapter->link_speed = SPEED_0; /* hardware init */ + adapter->link_duplex = FULL_DUPLEX; + adapter->num_rx_queues = 1; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + hw->revision_id = pdev->revision; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); + + phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS); + /* nic type */ + if (hw->revision_id >= 0xF0) { + hw->nic_type = athr_l2e_revB; + } else { + if (phy_status_data & PHY_STATUS_100M) + hw->nic_type = athr_l1e; + else + hw->nic_type = athr_l2e_revA; + } + + phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS); + + if (phy_status_data & PHY_STATUS_EMI_CA) + hw->emi_ca = true; + else + hw->emi_ca = false; + + hw->phy_configured = false; + hw->preamble_len = 7; + hw->max_frame_size = adapter->netdev->mtu; + hw->rx_jumbo_th = (hw->max_frame_size + ETH_HLEN + + VLAN_HLEN + ETH_FCS_LEN + 7) >> 3; + + hw->rrs_type = atl1e_rrs_disable; + hw->indirect_tab = 0; + hw->base_cpu = 0; + + /* need confirm */ + + hw->ict = 50000; /* 100ms */ + hw->smb_timer = 200000; /* 200ms */ + hw->tpd_burst = 5; + hw->rrd_thresh = 1; + hw->tpd_thresh = adapter->tx_ring.count / 2; + hw->rx_count_down = 4; /* 2us resolution */ + hw->tx_count_down = hw->imt * 4 / 3; + hw->dmar_block = atl1e_dma_req_1024; + hw->dmaw_block = atl1e_dma_req_1024; + hw->dmar_dly_cnt = 15; + hw->dmaw_dly_cnt = 4; + + if (atl1e_alloc_queues(adapter)) { + netdev_err(adapter->netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + atomic_set(&adapter->irq_sem, 1); + spin_lock_init(&adapter->mdio_lock); + spin_lock_init(&adapter->tx_lock); + + set_bit(__AT_DOWN, &adapter->flags); + + return 0; +} + +/** + * atl1e_clean_tx_ring - Free Tx-skb + * @adapter: board private structure + */ +static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + struct atl1e_tx_buffer *tx_buffer = NULL; + struct pci_dev *pdev = adapter->pdev; + u16 index, ring_count; + + if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL) + return; + + ring_count = tx_ring->count; + /* first unmmap dma */ + for (index = 0; index < ring_count; index++) { + tx_buffer = &tx_ring->tx_buffer[index]; + if (tx_buffer->dma) { + if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE) + pci_unmap_single(pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE) + pci_unmap_page(pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + tx_buffer->dma = 0; + } + } + /* second free skb */ + for (index = 0; index < ring_count; index++) { + tx_buffer = &tx_ring->tx_buffer[index]; + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + tx_buffer->skb = NULL; + } + } + /* Zero out Tx-buffers */ + memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) * + ring_count); + memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) * + ring_count); +} + +/** + * atl1e_clean_rx_ring - Free rx-reservation skbs + * @adapter: board private structure + */ +static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter) +{ + struct atl1e_rx_ring *rx_ring = + &adapter->rx_ring; + struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc; + u16 i, j; + + + if (adapter->ring_vir_addr == NULL) + return; + /* Zero out the descriptor ring */ + for (i = 0; i < adapter->num_rx_queues; i++) { + for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { + if (rx_page_desc[i].rx_page[j].addr != NULL) { + memset(rx_page_desc[i].rx_page[j].addr, 0, + rx_ring->real_page_size); + } + } + } +} + +static void atl1e_cal_ring_size(struct atl1e_adapter *adapter, u32 *ring_size) +{ + *ring_size = ((u32)(adapter->tx_ring.count * + sizeof(struct atl1e_tpd_desc) + 7 + /* tx ring, qword align */ + + adapter->rx_ring.real_page_size * AT_PAGE_NUM_PER_QUEUE * + adapter->num_rx_queues + 31 + /* rx ring, 32 bytes align */ + + (1 + AT_PAGE_NUM_PER_QUEUE * adapter->num_rx_queues) * + sizeof(u32) + 3)); + /* tx, rx cmd, dword align */ +} + +static void atl1e_init_ring_resources(struct atl1e_adapter *adapter) +{ + struct atl1e_rx_ring *rx_ring = NULL; + + rx_ring = &adapter->rx_ring; + + rx_ring->real_page_size = adapter->rx_ring.page_size + + adapter->hw.max_frame_size + + ETH_HLEN + VLAN_HLEN + + ETH_FCS_LEN; + rx_ring->real_page_size = roundup(rx_ring->real_page_size, 32); + atl1e_cal_ring_size(adapter, &adapter->ring_size); + + adapter->ring_vir_addr = NULL; + adapter->rx_ring.desc = NULL; + rwlock_init(&adapter->tx_ring.tx_lock); +} + +/* + * Read / Write Ptr Initialize: + */ +static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = NULL; + struct atl1e_rx_ring *rx_ring = NULL; + struct atl1e_rx_page_desc *rx_page_desc = NULL; + int i, j; + + tx_ring = &adapter->tx_ring; + rx_ring = &adapter->rx_ring; + rx_page_desc = rx_ring->rx_page_desc; + + tx_ring->next_to_use = 0; + atomic_set(&tx_ring->next_to_clean, 0); + + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_page_desc[i].rx_using = 0; + rx_page_desc[i].rx_nxseq = 0; + for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { + *rx_page_desc[i].rx_page[j].write_offset_addr = 0; + rx_page_desc[i].rx_page[j].read_offset = 0; + } + } +} + +/** + * atl1e_free_ring_resources - Free Tx / RX descriptor Resources + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void atl1e_free_ring_resources(struct atl1e_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + + atl1e_clean_tx_ring(adapter); + atl1e_clean_rx_ring(adapter); + + if (adapter->ring_vir_addr) { + pci_free_consistent(pdev, adapter->ring_size, + adapter->ring_vir_addr, adapter->ring_dma); + adapter->ring_vir_addr = NULL; + } + + if (adapter->tx_ring.tx_buffer) { + kfree(adapter->tx_ring.tx_buffer); + adapter->tx_ring.tx_buffer = NULL; + } +} + +/** + * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct atl1e_tx_ring *tx_ring; + struct atl1e_rx_ring *rx_ring; + struct atl1e_rx_page_desc *rx_page_desc; + int size, i, j; + u32 offset = 0; + int err = 0; + + if (adapter->ring_vir_addr != NULL) + return 0; /* alloced already */ + + tx_ring = &adapter->tx_ring; + rx_ring = &adapter->rx_ring; + + /* real ring DMA buffer */ + + size = adapter->ring_size; + adapter->ring_vir_addr = pci_zalloc_consistent(pdev, adapter->ring_size, + &adapter->ring_dma); + if (adapter->ring_vir_addr == NULL) { + netdev_err(adapter->netdev, + "pci_alloc_consistent failed, size = D%d\n", size); + return -ENOMEM; + } + + rx_page_desc = rx_ring->rx_page_desc; + + /* Init TPD Ring */ + tx_ring->dma = roundup(adapter->ring_dma, 8); + offset = tx_ring->dma - adapter->ring_dma; + tx_ring->desc = adapter->ring_vir_addr + offset; + size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count); + tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL); + if (tx_ring->tx_buffer == NULL) { + err = -ENOMEM; + goto failed; + } + + /* Init RXF-Pages */ + offset += (sizeof(struct atl1e_tpd_desc) * tx_ring->count); + offset = roundup(offset, 32); + + for (i = 0; i < adapter->num_rx_queues; i++) { + for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { + rx_page_desc[i].rx_page[j].dma = + adapter->ring_dma + offset; + rx_page_desc[i].rx_page[j].addr = + adapter->ring_vir_addr + offset; + offset += rx_ring->real_page_size; + } + } + + /* Init CMB dma address */ + tx_ring->cmb_dma = adapter->ring_dma + offset; + tx_ring->cmb = adapter->ring_vir_addr + offset; + offset += sizeof(u32); + + for (i = 0; i < adapter->num_rx_queues; i++) { + for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { + rx_page_desc[i].rx_page[j].write_offset_dma = + adapter->ring_dma + offset; + rx_page_desc[i].rx_page[j].write_offset_addr = + adapter->ring_vir_addr + offset; + offset += sizeof(u32); + } + } + + if (unlikely(offset > adapter->ring_size)) { + netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n", + offset, adapter->ring_size); + err = -1; + goto failed; + } + + return 0; +failed: + if (adapter->ring_vir_addr != NULL) { + pci_free_consistent(pdev, adapter->ring_size, + adapter->ring_vir_addr, adapter->ring_dma); + adapter->ring_vir_addr = NULL; + } + return err; +} + +static inline void atl1e_configure_des_ring(struct atl1e_adapter *adapter) +{ + + struct atl1e_hw *hw = &adapter->hw; + struct atl1e_rx_ring *rx_ring = &adapter->rx_ring; + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + struct atl1e_rx_page_desc *rx_page_desc = NULL; + int i, j; + + AT_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI, + (u32)((adapter->ring_dma & AT_DMA_HI_ADDR_MASK) >> 32)); + AT_WRITE_REG(hw, REG_TPD_BASE_ADDR_LO, + (u32)((tx_ring->dma) & AT_DMA_LO_ADDR_MASK)); + AT_WRITE_REG(hw, REG_TPD_RING_SIZE, (u16)(tx_ring->count)); + AT_WRITE_REG(hw, REG_HOST_TX_CMB_LO, + (u32)((tx_ring->cmb_dma) & AT_DMA_LO_ADDR_MASK)); + + rx_page_desc = rx_ring->rx_page_desc; + /* RXF Page Physical address / Page Length */ + for (i = 0; i < AT_MAX_RECEIVE_QUEUE; i++) { + AT_WRITE_REG(hw, atl1e_rx_page_hi_addr_regs[i], + (u32)((adapter->ring_dma & + AT_DMA_HI_ADDR_MASK) >> 32)); + for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { + u32 page_phy_addr; + u32 offset_phy_addr; + + page_phy_addr = rx_page_desc[i].rx_page[j].dma; + offset_phy_addr = + rx_page_desc[i].rx_page[j].write_offset_dma; + + AT_WRITE_REG(hw, atl1e_rx_page_lo_addr_regs[i][j], + page_phy_addr & AT_DMA_LO_ADDR_MASK); + AT_WRITE_REG(hw, atl1e_rx_page_write_offset_regs[i][j], + offset_phy_addr & AT_DMA_LO_ADDR_MASK); + AT_WRITE_REGB(hw, atl1e_rx_page_vld_regs[i][j], 1); + } + } + /* Page Length */ + AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size); + /* Load all of base address above */ + AT_WRITE_REG(hw, REG_LOAD_PTR, 1); +} + +static inline void atl1e_configure_tx(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + u32 dev_ctrl_data = 0; + u32 max_pay_load = 0; + u32 jumbo_thresh = 0; + u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */ + + /* configure TXQ param */ + if (hw->nic_type != athr_l2e_revB) { + extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; + if (hw->max_frame_size <= 1500) { + jumbo_thresh = hw->max_frame_size + extra_size; + } else if (hw->max_frame_size < 6*1024) { + jumbo_thresh = + (hw->max_frame_size + extra_size) * 2 / 3; + } else { + jumbo_thresh = (hw->max_frame_size + extra_size) / 2; + } + AT_WRITE_REG(hw, REG_TX_EARLY_TH, (jumbo_thresh + 7) >> 3); + } + + dev_ctrl_data = AT_READ_REG(hw, REG_DEVICE_CTRL); + + max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) & + DEVICE_CTRL_MAX_PAYLOAD_MASK; + + hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block); + + max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) & + DEVICE_CTRL_MAX_RREQ_SZ_MASK; + hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block); + + if (hw->nic_type != athr_l2e_revB) + AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2, + atl1e_pay_load_size[hw->dmar_block]); + /* enable TXQ */ + AT_WRITE_REGW(hw, REG_TXQ_CTRL, + (((u16)hw->tpd_burst & TXQ_CTRL_NUM_TPD_BURST_MASK) + << TXQ_CTRL_NUM_TPD_BURST_SHIFT) + | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN); +} + +static inline void atl1e_configure_rx(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + u32 rxf_len = 0; + u32 rxf_low = 0; + u32 rxf_high = 0; + u32 rxf_thresh_data = 0; + u32 rxq_ctrl_data = 0; + + if (hw->nic_type != athr_l2e_revB) { + AT_WRITE_REGW(hw, REG_RXQ_JMBOSZ_RRDTIM, + (u16)((hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) << + RXQ_JMBOSZ_TH_SHIFT | + (1 & RXQ_JMBO_LKAH_MASK) << + RXQ_JMBO_LKAH_SHIFT)); + + rxf_len = AT_READ_REG(hw, REG_SRAM_RXF_LEN); + rxf_high = rxf_len * 4 / 5; + rxf_low = rxf_len / 5; + rxf_thresh_data = ((rxf_high & RXQ_RXF_PAUSE_TH_HI_MASK) + << RXQ_RXF_PAUSE_TH_HI_SHIFT) | + ((rxf_low & RXQ_RXF_PAUSE_TH_LO_MASK) + << RXQ_RXF_PAUSE_TH_LO_SHIFT); + + AT_WRITE_REG(hw, REG_RXQ_RXF_PAUSE_THRESH, rxf_thresh_data); + } + + /* RRS */ + AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab); + AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu); + + if (hw->rrs_type & atl1e_rrs_ipv4) + rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4; + + if (hw->rrs_type & atl1e_rrs_ipv4_tcp) + rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4_TCP; + + if (hw->rrs_type & atl1e_rrs_ipv6) + rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6; + + if (hw->rrs_type & atl1e_rrs_ipv6_tcp) + rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6_TCP; + + if (hw->rrs_type != atl1e_rrs_disable) + rxq_ctrl_data |= + (RXQ_CTRL_HASH_ENABLE | RXQ_CTRL_RSS_MODE_MQUESINT); + + rxq_ctrl_data |= RXQ_CTRL_IPV6_XSUM_VERIFY_EN | RXQ_CTRL_PBA_ALIGN_32 | + RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN; + + AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); +} + +static inline void atl1e_configure_dma(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + u32 dma_ctrl_data = 0; + + dma_ctrl_data = DMA_CTRL_RXCMB_EN; + dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) + << DMA_CTRL_DMAR_BURST_LEN_SHIFT; + dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) + << DMA_CTRL_DMAW_BURST_LEN_SHIFT; + dma_ctrl_data |= DMA_CTRL_DMAR_REQ_PRI | DMA_CTRL_DMAR_OUT_ORDER; + dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK) + << DMA_CTRL_DMAR_DLY_CNT_SHIFT; + dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK) + << DMA_CTRL_DMAW_DLY_CNT_SHIFT; + + AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data); +} + +static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter) +{ + u32 value; + struct atl1e_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + /* Config MAC CTRL Register */ + value = MAC_CTRL_TX_EN | + MAC_CTRL_RX_EN ; + + if (FULL_DUPLEX == adapter->link_duplex) + value |= MAC_CTRL_DUPLX; + + value |= ((u32)((SPEED_1000 == adapter->link_speed) ? + MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << + MAC_CTRL_SPEED_SHIFT); + value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); + + value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); + value |= (((u32)adapter->hw.preamble_len & + MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); + + __atl1e_vlan_mode(netdev->features, &value); + + value |= MAC_CTRL_BC_EN; + if (netdev->flags & IFF_PROMISC) + value |= MAC_CTRL_PROMIS_EN; + if (netdev->flags & IFF_ALLMULTI) + value |= MAC_CTRL_MC_ALL_EN; + if (netdev->features & NETIF_F_RXALL) + value |= MAC_CTRL_DBG; + AT_WRITE_REG(hw, REG_MAC_CTRL, value); +} + +/** + * atl1e_configure - Configure Transmit&Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Tx /Rx unit of the MAC after a reset. + */ +static int atl1e_configure(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + + u32 intr_status_data = 0; + + /* clear interrupt status */ + AT_WRITE_REG(hw, REG_ISR, ~0); + + /* 1. set MAC Address */ + atl1e_hw_set_mac_addr(hw); + + /* 2. Init the Multicast HASH table done by set_muti */ + + /* 3. Clear any WOL status */ + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + + /* 4. Descripter Ring BaseMem/Length/Read ptr/Write ptr + * TPD Ring/SMB/RXF0 Page CMBs, they use the same + * High 32bits memory */ + atl1e_configure_des_ring(adapter); + + /* 5. set Interrupt Moderator Timer */ + AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, hw->imt); + AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER2_INIT, hw->imt); + AT_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_LED_MODE | + MASTER_CTRL_ITIMER_EN | MASTER_CTRL_ITIMER2_EN); + + /* 6. rx/tx threshold to trig interrupt */ + AT_WRITE_REGW(hw, REG_TRIG_RRD_THRESH, hw->rrd_thresh); + AT_WRITE_REGW(hw, REG_TRIG_TPD_THRESH, hw->tpd_thresh); + AT_WRITE_REGW(hw, REG_TRIG_RXTIMER, hw->rx_count_down); + AT_WRITE_REGW(hw, REG_TRIG_TXTIMER, hw->tx_count_down); + + /* 7. set Interrupt Clear Timer */ + AT_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, hw->ict); + + /* 8. set MTU */ + AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN + + VLAN_HLEN + ETH_FCS_LEN); + + /* 9. config TXQ early tx threshold */ + atl1e_configure_tx(adapter); + + /* 10. config RXQ */ + atl1e_configure_rx(adapter); + + /* 11. config DMA Engine */ + atl1e_configure_dma(adapter); + + /* 12. smb timer to trig interrupt */ + AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, hw->smb_timer); + + intr_status_data = AT_READ_REG(hw, REG_ISR); + if (unlikely((intr_status_data & ISR_PHY_LINKDOWN) != 0)) { + netdev_err(adapter->netdev, + "atl1e_configure failed, PCIE phy link down\n"); + return -1; + } + + AT_WRITE_REG(hw, REG_ISR, 0x7fffffff); + return 0; +} + +/** + * atl1e_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + */ +static struct net_device_stats *atl1e_get_stats(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw_stats *hw_stats = &adapter->hw_stats; + struct net_device_stats *net_stats = &netdev->stats; + + net_stats->rx_bytes = hw_stats->rx_byte_cnt; + net_stats->tx_bytes = hw_stats->tx_byte_cnt; + net_stats->multicast = hw_stats->rx_mcast; + net_stats->collisions = hw_stats->tx_1_col + + hw_stats->tx_2_col + + hw_stats->tx_late_col + + hw_stats->tx_abort_col; + + net_stats->rx_errors = hw_stats->rx_frag + + hw_stats->rx_fcs_err + + hw_stats->rx_len_err + + hw_stats->rx_sz_ov + + hw_stats->rx_rrd_ov + + hw_stats->rx_align_err + + hw_stats->rx_rxf_ov; + + net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov; + net_stats->rx_length_errors = hw_stats->rx_len_err; + net_stats->rx_crc_errors = hw_stats->rx_fcs_err; + net_stats->rx_frame_errors = hw_stats->rx_align_err; + net_stats->rx_dropped = hw_stats->rx_rrd_ov; + + net_stats->tx_errors = hw_stats->tx_late_col + + hw_stats->tx_abort_col + + hw_stats->tx_underrun + + hw_stats->tx_trunc; + + net_stats->tx_fifo_errors = hw_stats->tx_underrun; + net_stats->tx_aborted_errors = hw_stats->tx_abort_col; + net_stats->tx_window_errors = hw_stats->tx_late_col; + + net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; + net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors; + + return net_stats; +} + +static void atl1e_update_hw_stats(struct atl1e_adapter *adapter) +{ + u16 hw_reg_addr = 0; + unsigned long *stats_item = NULL; + + /* update rx status */ + hw_reg_addr = REG_MAC_RX_STATUS_BIN; + stats_item = &adapter->hw_stats.rx_ok; + while (hw_reg_addr <= REG_MAC_RX_STATUS_END) { + *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr); + stats_item++; + hw_reg_addr += 4; + } + /* update tx status */ + hw_reg_addr = REG_MAC_TX_STATUS_BIN; + stats_item = &adapter->hw_stats.tx_ok; + while (hw_reg_addr <= REG_MAC_TX_STATUS_END) { + *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr); + stats_item++; + hw_reg_addr += 4; + } +} + +static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter) +{ + u16 phy_data; + + spin_lock(&adapter->mdio_lock); + atl1e_read_phy_reg(&adapter->hw, MII_INT_STATUS, &phy_data); + spin_unlock(&adapter->mdio_lock); +} + +static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + struct atl1e_tx_buffer *tx_buffer = NULL; + u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX); + u16 next_to_clean = atomic_read(&tx_ring->next_to_clean); + + while (next_to_clean != hw_next_to_clean) { + tx_buffer = &tx_ring->tx_buffer[next_to_clean]; + if (tx_buffer->dma) { + if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE) + pci_unmap_single(adapter->pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE) + pci_unmap_page(adapter->pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + tx_buffer->dma = 0; + } + + if (tx_buffer->skb) { + dev_kfree_skb_irq(tx_buffer->skb); + tx_buffer->skb = NULL; + } + + if (++next_to_clean == tx_ring->count) + next_to_clean = 0; + } + + atomic_set(&tx_ring->next_to_clean, next_to_clean); + + if (netif_queue_stopped(adapter->netdev) && + netif_carrier_ok(adapter->netdev)) { + netif_wake_queue(adapter->netdev); + } + + return true; +} + +/** + * atl1e_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t atl1e_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + int max_ints = AT_MAX_INT_WORK; + int handled = IRQ_NONE; + u32 status; + + do { + status = AT_READ_REG(hw, REG_ISR); + if ((status & IMR_NORMAL_MASK) == 0 || + (status & ISR_DIS_INT) != 0) { + if (max_ints != AT_MAX_INT_WORK) + handled = IRQ_HANDLED; + break; + } + /* link event */ + if (status & ISR_GPHY) + atl1e_clear_phy_int(adapter); + /* Ack ISR */ + AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT); + + handled = IRQ_HANDLED; + /* check if PCIE PHY Link down */ + if (status & ISR_PHY_LINKDOWN) { + netdev_err(adapter->netdev, + "pcie phy linkdown %x\n", status); + if (netif_running(adapter->netdev)) { + /* reset MAC */ + atl1e_irq_reset(adapter); + schedule_work(&adapter->reset_task); + break; + } + } + + /* check if DMA read/write error */ + if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { + netdev_err(adapter->netdev, + "PCIE DMA RW error (status = 0x%x)\n", + status); + atl1e_irq_reset(adapter); + schedule_work(&adapter->reset_task); + break; + } + + if (status & ISR_SMB) + atl1e_update_hw_stats(adapter); + + /* link event */ + if (status & (ISR_GPHY | ISR_MANUAL)) { + netdev->stats.tx_carrier_errors++; + atl1e_link_chg_event(adapter); + break; + } + + /* transmit event */ + if (status & ISR_TX_EVENT) + atl1e_clean_tx_irq(adapter); + + if (status & ISR_RX_EVENT) { + /* + * disable rx interrupts, without + * the synchronize_irq bit + */ + AT_WRITE_REG(hw, REG_IMR, + IMR_NORMAL_MASK & ~ISR_RX_EVENT); + AT_WRITE_FLUSH(hw); + if (likely(napi_schedule_prep( + &adapter->napi))) + __napi_schedule(&adapter->napi); + } + } while (--max_ints > 0); + /* re-enable Interrupt*/ + AT_WRITE_REG(&adapter->hw, REG_ISR, 0); + + return handled; +} + +static inline void atl1e_rx_checksum(struct atl1e_adapter *adapter, + struct sk_buff *skb, struct atl1e_recv_ret_status *prrs) +{ + u8 *packet = (u8 *)(prrs + 1); + struct iphdr *iph; + u16 head_len = ETH_HLEN; + u16 pkt_flags; + u16 err_flags; + + skb_checksum_none_assert(skb); + pkt_flags = prrs->pkt_flag; + err_flags = prrs->err_flag; + if (((pkt_flags & RRS_IS_IPV4) || (pkt_flags & RRS_IS_IPV6)) && + ((pkt_flags & RRS_IS_TCP) || (pkt_flags & RRS_IS_UDP))) { + if (pkt_flags & RRS_IS_IPV4) { + if (pkt_flags & RRS_IS_802_3) + head_len += 8; + iph = (struct iphdr *) (packet + head_len); + if (iph->frag_off != 0 && !(pkt_flags & RRS_IS_IP_DF)) + goto hw_xsum; + } + if (!(err_flags & (RRS_ERR_IP_CSUM | RRS_ERR_L4_CSUM))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + return; + } + } + +hw_xsum : + return; +} + +static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter, + u8 que) +{ + struct atl1e_rx_page_desc *rx_page_desc = + (struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc; + u8 rx_using = rx_page_desc[que].rx_using; + + return &(rx_page_desc[que].rx_page[rx_using]); +} + +static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct atl1e_rx_ring *rx_ring = &adapter->rx_ring; + struct atl1e_rx_page_desc *rx_page_desc = + (struct atl1e_rx_page_desc *) rx_ring->rx_page_desc; + struct sk_buff *skb = NULL; + struct atl1e_rx_page *rx_page = atl1e_get_rx_page(adapter, que); + u32 packet_size, write_offset; + struct atl1e_recv_ret_status *prrs; + + write_offset = *(rx_page->write_offset_addr); + if (likely(rx_page->read_offset < write_offset)) { + do { + if (*work_done >= work_to_do) + break; + (*work_done)++; + /* get new packet's rrs */ + prrs = (struct atl1e_recv_ret_status *) (rx_page->addr + + rx_page->read_offset); + /* check sequence number */ + if (prrs->seq_num != rx_page_desc[que].rx_nxseq) { + netdev_err(netdev, + "rx sequence number error (rx=%d) (expect=%d)\n", + prrs->seq_num, + rx_page_desc[que].rx_nxseq); + rx_page_desc[que].rx_nxseq++; + /* just for debug use */ + AT_WRITE_REG(&adapter->hw, REG_DEBUG_DATA0, + (((u32)prrs->seq_num) << 16) | + rx_page_desc[que].rx_nxseq); + goto fatal_err; + } + rx_page_desc[que].rx_nxseq++; + + /* error packet */ + if ((prrs->pkt_flag & RRS_IS_ERR_FRAME) && + !(netdev->features & NETIF_F_RXALL)) { + if (prrs->err_flag & (RRS_ERR_BAD_CRC | + RRS_ERR_DRIBBLE | RRS_ERR_CODE | + RRS_ERR_TRUNC)) { + /* hardware error, discard this packet*/ + netdev_err(netdev, + "rx packet desc error %x\n", + *((u32 *)prrs + 1)); + goto skip_pkt; + } + } + + packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & + RRS_PKT_SIZE_MASK); + if (likely(!(netdev->features & NETIF_F_RXFCS))) + packet_size -= 4; /* CRC */ + + skb = netdev_alloc_skb_ip_align(netdev, packet_size); + if (skb == NULL) + goto skip_pkt; + + memcpy(skb->data, (u8 *)(prrs + 1), packet_size); + skb_put(skb, packet_size); + skb->protocol = eth_type_trans(skb, netdev); + atl1e_rx_checksum(adapter, skb, prrs); + + if (prrs->pkt_flag & RRS_IS_VLAN_TAG) { + u16 vlan_tag = (prrs->vtag >> 4) | + ((prrs->vtag & 7) << 13) | + ((prrs->vtag & 8) << 9); + netdev_dbg(netdev, + "RXD VLAN TAG=0x%04x\n", + prrs->vtag); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + } + netif_receive_skb(skb); + +skip_pkt: + /* skip current packet whether it's ok or not. */ + rx_page->read_offset += + (((u32)((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & + RRS_PKT_SIZE_MASK) + + sizeof(struct atl1e_recv_ret_status) + 31) & + 0xFFFFFFE0); + + if (rx_page->read_offset >= rx_ring->page_size) { + /* mark this page clean */ + u16 reg_addr; + u8 rx_using; + + rx_page->read_offset = + *(rx_page->write_offset_addr) = 0; + rx_using = rx_page_desc[que].rx_using; + reg_addr = + atl1e_rx_page_vld_regs[que][rx_using]; + AT_WRITE_REGB(&adapter->hw, reg_addr, 1); + rx_page_desc[que].rx_using ^= 1; + rx_page = atl1e_get_rx_page(adapter, que); + } + write_offset = *(rx_page->write_offset_addr); + } while (rx_page->read_offset < write_offset); + } + + return; + +fatal_err: + if (!test_bit(__AT_DOWN, &adapter->flags)) + schedule_work(&adapter->reset_task); +} + +/** + * atl1e_clean - NAPI Rx polling callback + */ +static int atl1e_clean(struct napi_struct *napi, int budget) +{ + struct atl1e_adapter *adapter = + container_of(napi, struct atl1e_adapter, napi); + u32 imr_data; + int work_done = 0; + + /* Keep link state information with original netdev */ + if (!netif_carrier_ok(adapter->netdev)) + goto quit_polling; + + atl1e_clean_rx_irq(adapter, 0, &work_done, budget); + + /* If no Tx and not enough Rx work done, exit the polling mode */ + if (work_done < budget) { +quit_polling: + napi_complete(napi); + imr_data = AT_READ_REG(&adapter->hw, REG_IMR); + AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT); + /* test debug */ + if (test_bit(__AT_DOWN, &adapter->flags)) { + atomic_dec(&adapter->irq_sem); + netdev_err(adapter->netdev, + "atl1e_clean is called when AT_DOWN\n"); + } + /* reenable RX intr */ + /*atl1e_irq_enable(adapter); */ + + } + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER + +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void atl1e_netpoll(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + atl1e_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +static inline u16 atl1e_tpd_avail(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + u16 next_to_use = 0; + u16 next_to_clean = 0; + + next_to_clean = atomic_read(&tx_ring->next_to_clean); + next_to_use = tx_ring->next_to_use; + + return (u16)(next_to_clean > next_to_use) ? + (next_to_clean - next_to_use - 1) : + (tx_ring->count + next_to_clean - next_to_use - 1); +} + +/* + * get next usable tpd + * Note: should call atl1e_tdp_avail to make sure + * there is enough tpd to use + */ +static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + u16 next_to_use = 0; + + next_to_use = tx_ring->next_to_use; + if (++tx_ring->next_to_use == tx_ring->count) + tx_ring->next_to_use = 0; + + memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc)); + return &tx_ring->desc[next_to_use]; +} + +static struct atl1e_tx_buffer * +atl1e_get_tx_buffer(struct atl1e_adapter *adapter, struct atl1e_tpd_desc *tpd) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + + return &tx_ring->tx_buffer[tpd - tx_ring->desc]; +} + +/* Calculate the transmit packet descript needed*/ +static u16 atl1e_cal_tdp_req(const struct sk_buff *skb) +{ + int i = 0; + u16 tpd_req = 1; + u16 fg_size = 0; + u16 proto_hdr_len = 0; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + fg_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); + tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT); + } + + if (skb_is_gso(skb)) { + if (skb->protocol == htons(ETH_P_IP) || + (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) { + proto_hdr_len = skb_transport_offset(skb) + + tcp_hdrlen(skb); + if (proto_hdr_len < skb_headlen(skb)) { + tpd_req += ((skb_headlen(skb) - proto_hdr_len + + MAX_TX_BUF_LEN - 1) >> + MAX_TX_BUF_SHIFT); + } + } + + } + return tpd_req; +} + +static int atl1e_tso_csum(struct atl1e_adapter *adapter, + struct sk_buff *skb, struct atl1e_tpd_desc *tpd) +{ + unsigned short offload_type; + u8 hdr_len; + u32 real_len; + + if (skb_is_gso(skb)) { + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + offload_type = skb_shinfo(skb)->gso_type; + + if (offload_type & SKB_GSO_TCPV4) { + real_len = (((unsigned char *)ip_hdr(skb) - skb->data) + + ntohs(ip_hdr(skb)->tot_len)); + + if (real_len < skb->len) + pskb_trim(skb, real_len); + + hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (unlikely(skb->len == hdr_len)) { + /* only xsum need */ + netdev_warn(adapter->netdev, + "IPV4 tso with zero data??\n"); + goto check_sum; + } else { + ip_hdr(skb)->check = 0; + ip_hdr(skb)->tot_len = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic( + ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + tpd->word3 |= (ip_hdr(skb)->ihl & + TDP_V4_IPHL_MASK) << + TPD_V4_IPHL_SHIFT; + tpd->word3 |= ((tcp_hdrlen(skb) >> 2) & + TPD_TCPHDRLEN_MASK) << + TPD_TCPHDRLEN_SHIFT; + tpd->word3 |= ((skb_shinfo(skb)->gso_size) & + TPD_MSS_MASK) << TPD_MSS_SHIFT; + tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; + } + return 0; + } + } + +check_sum: + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + u8 css, cso; + + cso = skb_checksum_start_offset(skb); + if (unlikely(cso & 0x1)) { + netdev_err(adapter->netdev, + "payload offset should not ant event number\n"); + return -1; + } else { + css = cso + skb->csum_offset; + tpd->word3 |= (cso & TPD_PLOADOFFSET_MASK) << + TPD_PLOADOFFSET_SHIFT; + tpd->word3 |= (css & TPD_CCSUMOFFSET_MASK) << + TPD_CCSUMOFFSET_SHIFT; + tpd->word3 |= 1 << TPD_CC_SEGMENT_EN_SHIFT; + } + } + + return 0; +} + +static int atl1e_tx_map(struct atl1e_adapter *adapter, + struct sk_buff *skb, struct atl1e_tpd_desc *tpd) +{ + struct atl1e_tpd_desc *use_tpd = NULL; + struct atl1e_tx_buffer *tx_buffer = NULL; + u16 buf_len = skb_headlen(skb); + u16 map_len = 0; + u16 mapped_len = 0; + u16 hdr_len = 0; + u16 nr_frags; + u16 f; + int segment; + int ring_start = adapter->tx_ring.next_to_use; + int ring_end; + + nr_frags = skb_shinfo(skb)->nr_frags; + segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; + if (segment) { + /* TSO */ + map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + use_tpd = tpd; + + tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); + tx_buffer->length = map_len; + tx_buffer->dma = pci_map_single(adapter->pdev, + skb->data, hdr_len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) + return -ENOSPC; + + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE); + mapped_len += map_len; + use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); + use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | + ((cpu_to_le32(tx_buffer->length) & + TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); + } + + while (mapped_len < buf_len) { + /* mapped_len == 0, means we should use the first tpd, + which is given by caller */ + if (mapped_len == 0) { + use_tpd = tpd; + } else { + use_tpd = atl1e_get_tpd(adapter); + memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc)); + } + tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); + tx_buffer->skb = NULL; + + tx_buffer->length = map_len = + ((buf_len - mapped_len) >= MAX_TX_BUF_LEN) ? + MAX_TX_BUF_LEN : (buf_len - mapped_len); + tx_buffer->dma = + pci_map_single(adapter->pdev, skb->data + mapped_len, + map_len, PCI_DMA_TODEVICE); + + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) { + /* We need to unwind the mappings we've done */ + ring_end = adapter->tx_ring.next_to_use; + adapter->tx_ring.next_to_use = ring_start; + while (adapter->tx_ring.next_to_use != ring_end) { + tpd = atl1e_get_tpd(adapter); + tx_buffer = atl1e_get_tx_buffer(adapter, tpd); + pci_unmap_single(adapter->pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + } + /* Reset the tx rings next pointer */ + adapter->tx_ring.next_to_use = ring_start; + return -ENOSPC; + } + + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE); + mapped_len += map_len; + use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); + use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | + ((cpu_to_le32(tx_buffer->length) & + TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); + } + + for (f = 0; f < nr_frags; f++) { + const struct skb_frag_struct *frag; + u16 i; + u16 seg_num; + + frag = &skb_shinfo(skb)->frags[f]; + buf_len = skb_frag_size(frag); + + seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; + for (i = 0; i < seg_num; i++) { + use_tpd = atl1e_get_tpd(adapter); + memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc)); + + tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); + BUG_ON(tx_buffer->skb); + + tx_buffer->skb = NULL; + tx_buffer->length = + (buf_len > MAX_TX_BUF_LEN) ? + MAX_TX_BUF_LEN : buf_len; + buf_len -= tx_buffer->length; + + tx_buffer->dma = skb_frag_dma_map(&adapter->pdev->dev, + frag, + (i * MAX_TX_BUF_LEN), + tx_buffer->length, + DMA_TO_DEVICE); + + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) { + /* We need to unwind the mappings we've done */ + ring_end = adapter->tx_ring.next_to_use; + adapter->tx_ring.next_to_use = ring_start; + while (adapter->tx_ring.next_to_use != ring_end) { + tpd = atl1e_get_tpd(adapter); + tx_buffer = atl1e_get_tx_buffer(adapter, tpd); + dma_unmap_page(&adapter->pdev->dev, tx_buffer->dma, + tx_buffer->length, DMA_TO_DEVICE); + } + + /* Reset the ring next to use pointer */ + adapter->tx_ring.next_to_use = ring_start; + return -ENOSPC; + } + + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE); + use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); + use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | + ((cpu_to_le32(tx_buffer->length) & + TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); + } + } + + if ((tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK) + /* note this one is a tcp header */ + tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT; + /* The last tpd */ + + use_tpd->word3 |= 1 << TPD_EOP_SHIFT; + /* The last buffer info contain the skb address, + so it will be free after unmap */ + tx_buffer->skb = skb; + return 0; +} + +static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count, + struct atl1e_tpd_desc *tpd) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + AT_WRITE_REG(&adapter->hw, REG_MB_TPD_PROD_IDX, tx_ring->next_to_use); +} + +static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + unsigned long flags; + u16 tpd_req = 1; + struct atl1e_tpd_desc *tpd; + + if (test_bit(__AT_DOWN, &adapter->flags)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (unlikely(skb->len <= 0)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + tpd_req = atl1e_cal_tdp_req(skb); + if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) + return NETDEV_TX_LOCKED; + + if (atl1e_tpd_avail(adapter) < tpd_req) { + /* no enough descriptor, just stop queue */ + netif_stop_queue(netdev); + spin_unlock_irqrestore(&adapter->tx_lock, flags); + return NETDEV_TX_BUSY; + } + + tpd = atl1e_get_tpd(adapter); + + if (skb_vlan_tag_present(skb)) { + u16 vlan_tag = skb_vlan_tag_get(skb); + u16 atl1e_vlan_tag; + + tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; + AT_VLAN_TAG_TO_TPD_TAG(vlan_tag, atl1e_vlan_tag); + tpd->word2 |= (atl1e_vlan_tag & TPD_VLANTAG_MASK) << + TPD_VLAN_SHIFT; + } + + if (skb->protocol == htons(ETH_P_8021Q)) + tpd->word3 |= 1 << TPD_VL_TAGGED_SHIFT; + + if (skb_network_offset(skb) != ETH_HLEN) + tpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; /* 802.3 frame */ + + /* do TSO and check sum */ + if (atl1e_tso_csum(adapter, skb, tpd) != 0) { + spin_unlock_irqrestore(&adapter->tx_lock, flags); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (atl1e_tx_map(adapter, skb, tpd)) { + dev_kfree_skb_any(skb); + goto out; + } + + atl1e_tx_queue(adapter, tpd_req, tpd); + + netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ +out: + spin_unlock_irqrestore(&adapter->tx_lock, flags); + return NETDEV_TX_OK; +} + +static void atl1e_free_irq(struct atl1e_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); +} + +static int atl1e_request_irq(struct atl1e_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + int err = 0; + + err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name, + netdev); + if (err) { + netdev_dbg(adapter->netdev, + "Unable to allocate interrupt Error: %d\n", err); + return err; + } + netdev_dbg(netdev, "atl1e_request_irq OK\n"); + return err; +} + +int atl1e_up(struct atl1e_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + u32 val; + + /* hardware has been reset, we need to reload some things */ + err = atl1e_init_hw(&adapter->hw); + if (err) { + err = -EIO; + return err; + } + atl1e_init_ring_ptrs(adapter); + atl1e_set_multi(netdev); + atl1e_restore_vlan(adapter); + + if (atl1e_configure(adapter)) { + err = -EIO; + goto err_up; + } + + clear_bit(__AT_DOWN, &adapter->flags); + napi_enable(&adapter->napi); + atl1e_irq_enable(adapter); + val = AT_READ_REG(&adapter->hw, REG_MASTER_CTRL); + AT_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, + val | MASTER_CTRL_MANUAL_INT); + +err_up: + return err; +} + +void atl1e_down(struct atl1e_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer */ + set_bit(__AT_DOWN, &adapter->flags); + + netif_stop_queue(netdev); + + /* reset MAC to disable all RX/TX */ + atl1e_reset_hw(&adapter->hw); + msleep(1); + + napi_disable(&adapter->napi); + atl1e_del_timer(adapter); + atl1e_irq_disable(adapter); + + netif_carrier_off(netdev); + adapter->link_speed = SPEED_0; + adapter->link_duplex = -1; + atl1e_clean_tx_ring(adapter); + atl1e_clean_rx_ring(adapter); +} + +/** + * atl1e_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int atl1e_open(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + int err; + + /* disallow open during test */ + if (test_bit(__AT_TESTING, &adapter->flags)) + return -EBUSY; + + /* allocate rx/tx dma buffer & descriptors */ + atl1e_init_ring_resources(adapter); + err = atl1e_setup_ring_resources(adapter); + if (unlikely(err)) + return err; + + err = atl1e_request_irq(adapter); + if (unlikely(err)) + goto err_req_irq; + + err = atl1e_up(adapter); + if (unlikely(err)) + goto err_up; + + return 0; + +err_up: + atl1e_free_irq(adapter); +err_req_irq: + atl1e_free_ring_resources(adapter); + atl1e_reset_hw(&adapter->hw); + + return err; +} + +/** + * atl1e_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int atl1e_close(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); + atl1e_down(adapter); + atl1e_free_irq(adapter); + atl1e_free_ring_resources(adapter); + + return 0; +} + +static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + u32 ctrl = 0; + u32 mac_ctrl_data = 0; + u32 wol_ctrl_data = 0; + u16 mii_advertise_data = 0; + u16 mii_bmsr_data = 0; + u16 mii_intr_status_data = 0; + u32 wufc = adapter->wol; + u32 i; +#ifdef CONFIG_PM + int retval = 0; +#endif + + if (netif_running(netdev)) { + WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); + atl1e_down(adapter); + } + netif_device_detach(netdev); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + if (wufc) { + /* get link status */ + atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data); + atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data); + + mii_advertise_data = ADVERTISE_10HALF; + + if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) || + (atl1e_write_phy_reg(hw, + MII_ADVERTISE, mii_advertise_data) != 0) || + (atl1e_phy_commit(hw)) != 0) { + netdev_dbg(adapter->netdev, "set phy register failed\n"); + goto wol_dis; + } + + hw->phy_configured = false; /* re-init PHY when resume */ + + /* turn on magic packet wol */ + if (wufc & AT_WUFC_MAG) + wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN; + + if (wufc & AT_WUFC_LNKC) { + /* if orignal link status is link, just wait for retrive link */ + if (mii_bmsr_data & BMSR_LSTATUS) { + for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) { + msleep(100); + atl1e_read_phy_reg(hw, MII_BMSR, + &mii_bmsr_data); + if (mii_bmsr_data & BMSR_LSTATUS) + break; + } + + if ((mii_bmsr_data & BMSR_LSTATUS) == 0) + netdev_dbg(adapter->netdev, + "Link may change when suspend\n"); + } + wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; + /* only link up can wake up */ + if (atl1e_write_phy_reg(hw, MII_INT_CTRL, 0x400) != 0) { + netdev_dbg(adapter->netdev, + "read write phy register failed\n"); + goto wol_dis; + } + } + /* clear phy interrupt */ + atl1e_read_phy_reg(hw, MII_INT_STATUS, &mii_intr_status_data); + /* Config MAC Ctrl register */ + mac_ctrl_data = MAC_CTRL_RX_EN; + /* set to 10/100M halt duplex */ + mac_ctrl_data |= MAC_CTRL_SPEED_10_100 << MAC_CTRL_SPEED_SHIFT; + mac_ctrl_data |= (((u32)adapter->hw.preamble_len & + MAC_CTRL_PRMLEN_MASK) << + MAC_CTRL_PRMLEN_SHIFT); + + __atl1e_vlan_mode(netdev->features, &mac_ctrl_data); + + /* magic packet maybe Broadcast&multicast&Unicast frame */ + if (wufc & AT_WUFC_MAG) + mac_ctrl_data |= MAC_CTRL_BC_EN; + + netdev_dbg(adapter->netdev, "suspend MAC=0x%x\n", + mac_ctrl_data); + + AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data); + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); + /* pcie patch */ + ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); + pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); + goto suspend_exit; + } +wol_dis: + + /* WOL disabled */ + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + + /* pcie patch */ + ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); + + atl1e_force_ps(hw); + hw->phy_configured = false; /* re-init PHY when resume */ + + pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); + +suspend_exit: + + if (netif_running(netdev)) + atl1e_free_irq(adapter); + + pci_disable_device(pdev); + + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +#ifdef CONFIG_PM +static int atl1e_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + err = pci_enable_device(pdev); + if (err) { + netdev_err(adapter->netdev, + "Cannot enable PCI device from suspend\n"); + return err; + } + + pci_set_master(pdev); + + AT_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */ + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); + + if (netif_running(netdev)) { + err = atl1e_request_irq(adapter); + if (err) + return err; + } + + atl1e_reset_hw(&adapter->hw); + + if (netif_running(netdev)) + atl1e_up(adapter); + + netif_device_attach(netdev); + + return 0; +} +#endif + +static void atl1e_shutdown(struct pci_dev *pdev) +{ + atl1e_suspend(pdev, PMSG_SUSPEND); +} + +static const struct net_device_ops atl1e_netdev_ops = { + .ndo_open = atl1e_open, + .ndo_stop = atl1e_close, + .ndo_start_xmit = atl1e_xmit_frame, + .ndo_get_stats = atl1e_get_stats, + .ndo_set_rx_mode = atl1e_set_multi, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = atl1e_set_mac_addr, + .ndo_fix_features = atl1e_fix_features, + .ndo_set_features = atl1e_set_features, + .ndo_change_mtu = atl1e_change_mtu, + .ndo_do_ioctl = atl1e_ioctl, + .ndo_tx_timeout = atl1e_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = atl1e_netpoll, +#endif + +}; + +static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev) +{ + SET_NETDEV_DEV(netdev, &pdev->dev); + pci_set_drvdata(pdev, netdev); + + netdev->netdev_ops = &atl1e_netdev_ops; + + netdev->watchdog_timeo = AT_TX_WATCHDOG; + atl1e_set_ethtool_ops(netdev); + + netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO | + NETIF_F_HW_VLAN_CTAG_RX; + netdev->features = netdev->hw_features | NETIF_F_LLTX | + NETIF_F_HW_VLAN_CTAG_TX; + /* not enabled by default */ + netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS; + return 0; +} + +/** + * atl1e_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in atl1e_pci_tbl + * + * Returns 0 on success, negative on failure + * + * atl1e_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + */ +static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct atl1e_adapter *adapter = NULL; + static int cards_found; + + int err = 0; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + return err; + } + + /* + * The atl1e chip can DMA to 64-bit addresses, but it uses a single + * shared register for the high 32 bits, so only a single, aligned, + * 4 GB physical address range can be used at a time. + * + * Supporting 64-bit DMA on this hardware is more trouble than it's + * worth. It is far easier to limit to 32-bit DMA than update + * various kernel subsystems to support the mechanics required by a + * fixed-high-32-bit system. + */ + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || + (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { + dev_err(&pdev->dev, "No usable DMA configuration,aborting\n"); + goto err_dma; + } + + err = pci_request_regions(pdev, atl1e_driver_name); + if (err) { + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + goto err_pci_reg; + } + + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct atl1e_adapter)); + if (netdev == NULL) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + err = atl1e_init_netdev(netdev, pdev); + if (err) { + netdev_err(netdev, "init netdevice failed\n"); + goto err_init_netdev; + } + adapter = netdev_priv(netdev); + adapter->bd_number = cards_found; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->hw.adapter = adapter; + adapter->hw.hw_addr = pci_iomap(pdev, BAR_0, 0); + if (!adapter->hw.hw_addr) { + err = -EIO; + netdev_err(netdev, "cannot map device registers\n"); + goto err_ioremap; + } + + /* init mii data */ + adapter->mii.dev = netdev; + adapter->mii.mdio_read = atl1e_mdio_read; + adapter->mii.mdio_write = atl1e_mdio_write; + adapter->mii.phy_id_mask = 0x1f; + adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK; + + netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64); + + setup_timer(&adapter->phy_config_timer, atl1e_phy_config, + (unsigned long)adapter); + + /* get user settings */ + atl1e_check_options(adapter); + /* + * Mark all PCI regions associated with PCI device + * pdev as being reserved by owner atl1e_driver_name + * Enables bus-mastering on the device and calls + * pcibios_set_master to do the needed arch specific settings + */ + atl1e_setup_pcicmd(pdev); + /* setup the private structure */ + err = atl1e_sw_init(adapter); + if (err) { + netdev_err(netdev, "net device private data init failed\n"); + goto err_sw_init; + } + + /* Init GPHY as early as possible due to power saving issue */ + atl1e_phy_init(&adapter->hw); + /* reset the controller to + * put the device in a known good starting state */ + err = atl1e_reset_hw(&adapter->hw); + if (err) { + err = -EIO; + goto err_reset; + } + + if (atl1e_read_mac_addr(&adapter->hw) != 0) { + err = -EIO; + netdev_err(netdev, "get mac address failed\n"); + goto err_eeprom; + } + + memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr); + + INIT_WORK(&adapter->reset_task, atl1e_reset_task); + INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); + netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE); + err = register_netdev(netdev); + if (err) { + netdev_err(netdev, "register netdevice failed\n"); + goto err_register; + } + + /* assume we have no link for now */ + netif_stop_queue(netdev); + netif_carrier_off(netdev); + + cards_found++; + + return 0; + +err_reset: +err_register: +err_sw_init: +err_eeprom: + pci_iounmap(pdev, adapter->hw.hw_addr); +err_init_netdev: +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * atl1e_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * atl1e_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void atl1e_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + + /* + * flush_scheduled work may reschedule our watchdog task, so + * explicitly disable watchdog tasks from being rescheduled + */ + set_bit(__AT_DOWN, &adapter->flags); + + atl1e_del_timer(adapter); + atl1e_cancel_work(adapter); + + unregister_netdev(netdev); + atl1e_free_ring_resources(adapter); + atl1e_force_ps(&adapter->hw); + pci_iounmap(pdev, adapter->hw.hw_addr); + pci_release_regions(pdev); + free_netdev(netdev); + pci_disable_device(pdev); +} + +/** + * atl1e_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t +atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + atl1e_down(adapter); + + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * atl1e_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + netdev_err(adapter->netdev, + "Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + atl1e_reset_hw(&adapter->hw); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * atl1e_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the atl1e_resume routine. + */ +static void atl1e_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (atl1e_up(adapter)) { + netdev_err(adapter->netdev, + "can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +static const struct pci_error_handlers atl1e_err_handler = { + .error_detected = atl1e_io_error_detected, + .slot_reset = atl1e_io_slot_reset, + .resume = atl1e_io_resume, +}; + +static struct pci_driver atl1e_driver = { + .name = atl1e_driver_name, + .id_table = atl1e_pci_tbl, + .probe = atl1e_probe, + .remove = atl1e_remove, + /* Power Management Hooks */ +#ifdef CONFIG_PM + .suspend = atl1e_suspend, + .resume = atl1e_resume, +#endif + .shutdown = atl1e_shutdown, + .err_handler = &atl1e_err_handler +}; + +module_pci_driver(atl1e_driver); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_param.c b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c new file mode 100644 index 000000000..fa314282c --- /dev/null +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c @@ -0,0 +1,269 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include + +#include "atl1e.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define ATL1E_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ +#define ATL1E_PARAM_INIT { [0 ... ATL1E_MAX_NIC] = OPTION_UNSET } + +#define ATL1E_PARAM(x, desc) \ + static int x[ATL1E_MAX_NIC + 1] = ATL1E_PARAM_INIT; \ + static unsigned int num_##x; \ + module_param_array_named(x, x, int, &num_##x, 0); \ + MODULE_PARM_DESC(x, desc); + +/* Transmit Memory count + * + * Valid Range: 64-2048 + * + * Default Value: 128 + */ +#define ATL1E_MIN_TX_DESC_CNT 32 +#define ATL1E_MAX_TX_DESC_CNT 1020 +#define ATL1E_DEFAULT_TX_DESC_CNT 128 +ATL1E_PARAM(tx_desc_cnt, "Transmit description count"); + +/* Receive Memory Block Count + * + * Valid Range: 16-512 + * + * Default Value: 128 + */ +#define ATL1E_MIN_RX_MEM_SIZE 8 /* 8KB */ +#define ATL1E_MAX_RX_MEM_SIZE 1024 /* 1MB */ +#define ATL1E_DEFAULT_RX_MEM_SIZE 256 /* 128KB */ +ATL1E_PARAM(rx_mem_size, "memory size of rx buffer(KB)"); + +/* User Specified MediaType Override + * + * Valid Range: 0-5 + * - 0 - auto-negotiate at all supported speeds + * - 1 - only link at 100Mbps Full Duplex + * - 2 - only link at 100Mbps Half Duplex + * - 3 - only link at 10Mbps Full Duplex + * - 4 - only link at 10Mbps Half Duplex + * Default Value: 0 + */ + +ATL1E_PARAM(media_type, "MediaType Select"); + +/* Interrupt Moderate Timer in units of 2 us + * + * Valid Range: 10-65535 + * + * Default Value: 45000(90ms) + */ +#define INT_MOD_DEFAULT_CNT 100 /* 200us */ +#define INT_MOD_MAX_CNT 65000 +#define INT_MOD_MIN_CNT 50 +ATL1E_PARAM(int_mod_timer, "Interrupt Moderator Timer"); + +#define AUTONEG_ADV_DEFAULT 0x2F +#define AUTONEG_ADV_MASK 0x2F +#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL + +#define FLASH_VENDOR_DEFAULT 0 +#define FLASH_VENDOR_MIN 0 +#define FLASH_VENDOR_MAX 2 + +struct atl1e_option { + enum { enable_option, range_option, list_option } type; + char *name; + char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + struct atl1e_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int atl1e_validate_option(int *value, struct atl1e_option *opt, + struct atl1e_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + netdev_info(adapter->netdev, + "%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + netdev_info(adapter->netdev, + "%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + netdev_info(adapter->netdev, "%s set to %i\n", + opt->name, *value); + return 0; + } + break; + case list_option:{ + int i; + struct atl1e_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + netdev_info(adapter->netdev, + "%s\n", ent->str); + return 0; + } + } + break; + } + default: + BUG(); + } + + netdev_info(adapter->netdev, "Invalid %s specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +/** + * atl1e_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + */ +void atl1e_check_options(struct atl1e_adapter *adapter) +{ + int bd = adapter->bd_number; + + if (bd >= ATL1E_MAX_NIC) { + netdev_notice(adapter->netdev, + "no configuration for board #%i\n", bd); + netdev_notice(adapter->netdev, + "Using defaults for all values\n"); + } + + { /* Transmit Ring Size */ + struct atl1e_option opt = { + .type = range_option, + .name = "Transmit Ddescription Count", + .err = "using default of " + __MODULE_STRING(ATL1E_DEFAULT_TX_DESC_CNT), + .def = ATL1E_DEFAULT_TX_DESC_CNT, + .arg = { .r = { .min = ATL1E_MIN_TX_DESC_CNT, + .max = ATL1E_MAX_TX_DESC_CNT} } + }; + int val; + if (num_tx_desc_cnt > bd) { + val = tx_desc_cnt[bd]; + atl1e_validate_option(&val, &opt, adapter); + adapter->tx_ring.count = (u16) val & 0xFFFC; + } else + adapter->tx_ring.count = (u16)opt.def; + } + + { /* Receive Memory Block Count */ + struct atl1e_option opt = { + .type = range_option, + .name = "Memory size of rx buffer(KB)", + .err = "using default of " + __MODULE_STRING(ATL1E_DEFAULT_RX_MEM_SIZE), + .def = ATL1E_DEFAULT_RX_MEM_SIZE, + .arg = { .r = { .min = ATL1E_MIN_RX_MEM_SIZE, + .max = ATL1E_MAX_RX_MEM_SIZE} } + }; + int val; + if (num_rx_mem_size > bd) { + val = rx_mem_size[bd]; + atl1e_validate_option(&val, &opt, adapter); + adapter->rx_ring.page_size = (u32)val * 1024; + } else { + adapter->rx_ring.page_size = (u32)opt.def * 1024; + } + } + + { /* Interrupt Moderate Timer */ + struct atl1e_option opt = { + .type = range_option, + .name = "Interrupt Moderate Timer", + .err = "using default of " + __MODULE_STRING(INT_MOD_DEFAULT_CNT), + .def = INT_MOD_DEFAULT_CNT, + .arg = { .r = { .min = INT_MOD_MIN_CNT, + .max = INT_MOD_MAX_CNT} } + } ; + int val; + if (num_int_mod_timer > bd) { + val = int_mod_timer[bd]; + atl1e_validate_option(&val, &opt, adapter); + adapter->hw.imt = (u16) val; + } else + adapter->hw.imt = (u16)(opt.def); + } + + { /* MediaType */ + struct atl1e_option opt = { + .type = range_option, + .name = "Speed/Duplex Selection", + .err = "using default of " + __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR), + .def = MEDIA_TYPE_AUTO_SENSOR, + .arg = { .r = { .min = MEDIA_TYPE_AUTO_SENSOR, + .max = MEDIA_TYPE_10M_HALF} } + } ; + int val; + if (num_media_type > bd) { + val = media_type[bd]; + atl1e_validate_option(&val, &opt, adapter); + adapter->hw.media_type = (u16) val; + } else + adapter->hw.media_type = (u16)(opt.def); + + } +} diff --git a/drivers/net/ethernet/atheros/atlx/Makefile b/drivers/net/ethernet/atheros/atlx/Makefile new file mode 100644 index 000000000..e4f6022ca --- /dev/null +++ b/drivers/net/ethernet/atheros/atlx/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_ATL1) += atl1.o +obj-$(CONFIG_ATL2) += atl2.o + diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c new file mode 100644 index 000000000..eca1d113f --- /dev/null +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -0,0 +1,3690 @@ +/* + * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. + * Copyright(c) 2006 - 2007 Chris Snook + * Copyright(c) 2006 - 2008 Jay Cliburn + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + * + * Contact Information: + * Xiong Huang + * Jie Yang + * Chris Snook + * Jay Cliburn + * + * This version is adapted from the Attansic reference driver. + * + * TODO: + * Add more ethtool functions. + * Fix abstruse irq enable/disable condition described here: + * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 + * + * NEEDS TESTING: + * VLAN + * multicast + * promiscuous mode + * interrupt coalescing + * SMP torture testing + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "atl1.h" + +#define ATLX_DRIVER_VERSION "2.1.3" +MODULE_AUTHOR("Xiong Huang , " + "Chris Snook , " + "Jay Cliburn "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ATLX_DRIVER_VERSION); + +/* Temporary hack for merging atl1 and atl2 */ +#include "atlx.c" + +static const struct ethtool_ops atl1_ethtool_ops; + +/* + * This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ +#define ATL1_MAX_NIC 4 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET } + +/* + * Interrupt Moderate Timer in units of 2 us + * + * Valid Range: 10-65535 + * + * Default Value: 100 (200us) + */ +static int int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT; +static unsigned int num_int_mod_timer; +module_param_array_named(int_mod_timer, int_mod_timer, int, + &num_int_mod_timer, 0); +MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer"); + +#define DEFAULT_INT_MOD_CNT 100 /* 200us */ +#define MAX_INT_MOD_CNT 65000 +#define MIN_INT_MOD_CNT 50 + +struct atl1_option { + enum { enable_option, range_option, list_option } type; + char *name; + char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + struct atl1_opt_list { + int i; + char *str; + } *p; + } l; + } arg; +}; + +static int atl1_validate_option(int *value, struct atl1_option *opt, + struct pci_dev *pdev) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + dev_info(&pdev->dev, "%s enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + dev_info(&pdev->dev, "%s disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + dev_info(&pdev->dev, "%s set to %i\n", opt->name, + *value); + return 0; + } + break; + case list_option:{ + int i; + struct atl1_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + dev_info(&pdev->dev, "%s\n", + ent->str); + return 0; + } + } + } + break; + + default: + break; + } + + dev_info(&pdev->dev, "invalid %s specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +/** + * atl1_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + */ +static void atl1_check_options(struct atl1_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int bd = adapter->bd_number; + if (bd >= ATL1_MAX_NIC) { + dev_notice(&pdev->dev, "no configuration for board#%i\n", bd); + dev_notice(&pdev->dev, "using defaults for all values\n"); + } + { /* Interrupt Moderate Timer */ + struct atl1_option opt = { + .type = range_option, + .name = "Interrupt Moderator Timer", + .err = "using default of " + __MODULE_STRING(DEFAULT_INT_MOD_CNT), + .def = DEFAULT_INT_MOD_CNT, + .arg = {.r = {.min = MIN_INT_MOD_CNT, + .max = MAX_INT_MOD_CNT} } + }; + int val; + if (num_int_mod_timer > bd) { + val = int_mod_timer[bd]; + atl1_validate_option(&val, &opt, pdev); + adapter->imt = (u16) val; + } else + adapter->imt = (u16) (opt.def); + } +} + +/* + * atl1_pci_tbl - PCI Device ID Table + */ +static const struct pci_device_id atl1_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)}, + /* required last entry */ + {0,} +}; +MODULE_DEVICE_TABLE(pci, atl1_pci_tbl); + +static const u32 atl1_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; + +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Message level (0=none,...,16=all)"); + +/* + * Reset the transmit and receive units; mask and clear all interrupts. + * hw - Struct containing variables accessed by shared code + * return : 0 or idle status (if error) + */ +static s32 atl1_reset_hw(struct atl1_hw *hw) +{ + struct pci_dev *pdev = hw->back->pdev; + struct atl1_adapter *adapter = hw->back; + u32 icr; + int i; + + /* + * Clear Interrupt mask to stop board from generating + * interrupts & Clear any pending interrupt events + */ + /* + * atlx_irq_disable(adapter); + * iowrite32(0xffffffff, hw->hw_addr + REG_ISR); + */ + + /* + * Issue Soft Reset to the MAC. This will reset the chip's + * transmit, receive, DMA. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL); + ioread32(hw->hw_addr + REG_MASTER_CTRL); + + iowrite16(1, hw->hw_addr + REG_PHY_ENABLE); + ioread16(hw->hw_addr + REG_PHY_ENABLE); + + /* delay about 1ms */ + msleep(1); + + /* Wait at least 10ms for All module to be Idle */ + for (i = 0; i < 10; i++) { + icr = ioread32(hw->hw_addr + REG_IDLE_STATUS); + if (!icr) + break; + /* delay 1 ms */ + msleep(1); + /* FIXME: still the right way to do this? */ + cpu_relax(); + } + + if (icr) { + if (netif_msg_hw(adapter)) + dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr); + return icr; + } + + return 0; +} + +/* function about EEPROM + * + * check_eeprom_exist + * return 0 if eeprom exist + */ +static int atl1_check_eeprom_exist(struct atl1_hw *hw) +{ + u32 value; + value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); + if (value & SPI_FLASH_CTRL_EN_VPD) { + value &= ~SPI_FLASH_CTRL_EN_VPD; + iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); + } + + value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST); + return ((value & 0xFF00) == 0x6C00) ? 0 : 1; +} + +static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value) +{ + int i; + u32 control; + + if (offset & 3) + /* address do not align */ + return false; + + iowrite32(0, hw->hw_addr + REG_VPD_DATA); + control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; + iowrite32(control, hw->hw_addr + REG_VPD_CAP); + ioread32(hw->hw_addr + REG_VPD_CAP); + + for (i = 0; i < 10; i++) { + msleep(2); + control = ioread32(hw->hw_addr + REG_VPD_CAP); + if (control & VPD_CAP_VPD_FLAG) + break; + } + if (control & VPD_CAP_VPD_FLAG) { + *p_value = ioread32(hw->hw_addr + REG_VPD_DATA); + return true; + } + /* timeout */ + return false; +} + +/* + * Reads the value from a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to read + */ +static s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data) +{ + u32 val; + int i; + + val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | + MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 << + MDIO_CLK_SEL_SHIFT; + iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); + ioread32(hw->hw_addr + REG_MDIO_CTRL); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = ioread32(hw->hw_addr + REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + if (!(val & (MDIO_START | MDIO_BUSY))) { + *phy_data = (u16) val; + return 0; + } + return ATLX_ERR_PHY; +} + +#define CUSTOM_SPI_CS_SETUP 2 +#define CUSTOM_SPI_CLK_HI 2 +#define CUSTOM_SPI_CLK_LO 2 +#define CUSTOM_SPI_CS_HOLD 2 +#define CUSTOM_SPI_CS_HI 3 + +static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf) +{ + int i; + u32 value; + + iowrite32(0, hw->hw_addr + REG_SPI_DATA); + iowrite32(addr, hw->hw_addr + REG_SPI_ADDR); + + value = SPI_FLASH_CTRL_WAIT_READY | + (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) << + SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI & + SPI_FLASH_CTRL_CLK_HI_MASK) << + SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO & + SPI_FLASH_CTRL_CLK_LO_MASK) << + SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD & + SPI_FLASH_CTRL_CS_HOLD_MASK) << + SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI & + SPI_FLASH_CTRL_CS_HI_MASK) << + SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) << + SPI_FLASH_CTRL_INS_SHIFT; + + iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); + + value |= SPI_FLASH_CTRL_START; + iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); + ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); + + for (i = 0; i < 10; i++) { + msleep(1); + value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); + if (!(value & SPI_FLASH_CTRL_START)) + break; + } + + if (value & SPI_FLASH_CTRL_START) + return false; + + *buf = ioread32(hw->hw_addr + REG_SPI_DATA); + + return true; +} + +/* + * get_permanent_address + * return 0 if get valid mac address, + */ +static int atl1_get_permanent_address(struct atl1_hw *hw) +{ + u32 addr[2]; + u32 i, control; + u16 reg; + u8 eth_addr[ETH_ALEN]; + bool key_valid; + + if (is_valid_ether_addr(hw->perm_mac_addr)) + return 0; + + /* init */ + addr[0] = addr[1] = 0; + + if (!atl1_check_eeprom_exist(hw)) { + reg = 0; + key_valid = false; + /* Read out all EEPROM content */ + i = 0; + while (1) { + if (atl1_read_eeprom(hw, i + 0x100, &control)) { + if (key_valid) { + if (reg == REG_MAC_STA_ADDR) + addr[0] = control; + else if (reg == (REG_MAC_STA_ADDR + 4)) + addr[1] = control; + key_valid = false; + } else if ((control & 0xff) == 0x5A) { + key_valid = true; + reg = (u16) (control >> 16); + } else + break; + } else + /* read error */ + break; + i += 4; + } + + *(u32 *) ð_addr[2] = swab32(addr[0]); + *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); + if (is_valid_ether_addr(eth_addr)) { + memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + return 0; + } + } + + /* see if SPI FLAGS exist ? */ + addr[0] = addr[1] = 0; + reg = 0; + key_valid = false; + i = 0; + while (1) { + if (atl1_spi_read(hw, i + 0x1f000, &control)) { + if (key_valid) { + if (reg == REG_MAC_STA_ADDR) + addr[0] = control; + else if (reg == (REG_MAC_STA_ADDR + 4)) + addr[1] = control; + key_valid = false; + } else if ((control & 0xff) == 0x5A) { + key_valid = true; + reg = (u16) (control >> 16); + } else + /* data end */ + break; + } else + /* read error */ + break; + i += 4; + } + + *(u32 *) ð_addr[2] = swab32(addr[0]); + *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); + if (is_valid_ether_addr(eth_addr)) { + memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + return 0; + } + + /* + * On some motherboards, the MAC address is written by the + * BIOS directly to the MAC register during POST, and is + * not stored in eeprom. If all else thus far has failed + * to fetch the permanent MAC address, try reading it directly. + */ + addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR); + addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4)); + *(u32 *) ð_addr[2] = swab32(addr[0]); + *(u16 *) ð_addr[0] = swab16(*(u16 *) &addr[1]); + if (is_valid_ether_addr(eth_addr)) { + memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + return 0; + } + + return 1; +} + +/* + * Reads the adapter's MAC address from the EEPROM + * hw - Struct containing variables accessed by shared code + */ +static s32 atl1_read_mac_addr(struct atl1_hw *hw) +{ + s32 ret = 0; + u16 i; + + if (atl1_get_permanent_address(hw)) { + eth_random_addr(hw->perm_mac_addr); + ret = 1; + } + + for (i = 0; i < ETH_ALEN; i++) + hw->mac_addr[i] = hw->perm_mac_addr[i]; + return ret; +} + +/* + * Hashes an address to determine its location in the multicast table + * hw - Struct containing variables accessed by shared code + * mc_addr - the multicast address to hash + * + * atl1_hash_mc_addr + * purpose + * set hash value for a multicast address + * hash calcu processing : + * 1. calcu 32bit CRC for multicast address + * 2. reverse crc with MSB to LSB + */ +static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr) +{ + u32 crc32, value = 0; + int i; + + crc32 = ether_crc_le(6, mc_addr); + for (i = 0; i < 32; i++) + value |= (((crc32 >> i) & 1) << (31 - i)); + + return value; +} + +/* + * Sets the bit in the multicast table corresponding to the hash value. + * hw - Struct containing variables accessed by shared code + * hash_value - Multicast address hash value + */ +static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg; + u32 mta; + + /* + * The HASH Table is a register array of 2 32-bit registers. + * It is treated like an array of 64 bits. We want to set + * bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The register is determined by the + * upper 7 bits of the hash value and the bit within that + * register are determined by the lower 5 bits of the value. + */ + hash_reg = (hash_value >> 31) & 0x1; + hash_bit = (hash_value >> 26) & 0x1F; + mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); + mta |= (1 << hash_bit); + iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); +} + +/* + * Writes a value to a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to write + * data - data to write to the PHY + */ +static s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data) +{ + int i; + u32 val; + + val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | + (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | + MDIO_SUP_PREAMBLE | + MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; + iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); + ioread32(hw->hw_addr + REG_MDIO_CTRL); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = ioread32(hw->hw_addr + REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + + if (!(val & (MDIO_START | MDIO_BUSY))) + return 0; + + return ATLX_ERR_PHY; +} + +/* + * Make L001's PHY out of Power Saving State (bug) + * hw - Struct containing variables accessed by shared code + * when power on, L001's PHY always on Power saving State + * (Gigabit Link forbidden) + */ +static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw) +{ + s32 ret; + ret = atl1_write_phy_reg(hw, 29, 0x0029); + if (ret) + return ret; + return atl1_write_phy_reg(hw, 30, 0); +} + +/* + * Resets the PHY and make all config validate + * hw - Struct containing variables accessed by shared code + * + * Sets bit 15 and 12 of the MII Control regiser (for F001 bug) + */ +static s32 atl1_phy_reset(struct atl1_hw *hw) +{ + struct pci_dev *pdev = hw->back->pdev; + struct atl1_adapter *adapter = hw->back; + s32 ret_val; + u16 phy_data; + + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) + phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; + else { + switch (hw->media_type) { + case MEDIA_TYPE_100M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | + MII_CR_RESET; + break; + case MEDIA_TYPE_100M_HALF: + phy_data = MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_10M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; + break; + default: + /* MEDIA_TYPE_10M_HALF: */ + phy_data = MII_CR_SPEED_10 | MII_CR_RESET; + break; + } + } + + ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data); + if (ret_val) { + u32 val; + int i; + /* pcie serdes link may be down! */ + if (netif_msg_hw(adapter)) + dev_dbg(&pdev->dev, "pcie phy link down\n"); + + for (i = 0; i < 25; i++) { + msleep(1); + val = ioread32(hw->hw_addr + REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + + if ((val & (MDIO_START | MDIO_BUSY)) != 0) { + if (netif_msg_hw(adapter)) + dev_warn(&pdev->dev, + "pcie link down at least 25ms\n"); + return ret_val; + } + } + return 0; +} + +/* + * Configures PHY autoneg and flow control advertisement settings + * hw - Struct containing variables accessed by shared code + */ +static s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw) +{ + s32 ret_val; + s16 mii_autoneg_adv_reg; + s16 mii_1000t_ctrl_reg; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; + + /* Read the MII 1000Base-T Control Register (Address 9). */ + mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK; + + /* + * First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; + mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK; + + /* + * Need to parse media_type and set up + * the appropriate PHY registers. + */ + switch (hw->media_type) { + case MEDIA_TYPE_AUTO_SENSOR: + mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | + MII_AR_10T_FD_CAPS | + MII_AR_100TX_HD_CAPS | + MII_AR_100TX_FD_CAPS); + mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS; + break; + + case MEDIA_TYPE_1000M_FULL: + mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS; + break; + + case MEDIA_TYPE_100M_FULL: + mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; + break; + + case MEDIA_TYPE_100M_HALF: + mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; + break; + + case MEDIA_TYPE_10M_FULL: + mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; + break; + + default: + mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; + break; + } + + /* flow control fixed to enable all */ + mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); + + hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; + hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; + + ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + + return 0; +} + +/* + * Configures link settings. + * hw - Struct containing variables accessed by shared code + * Assumes the hardware has previously been reset and the + * transmitter and receiver are not enabled. + */ +static s32 atl1_setup_link(struct atl1_hw *hw) +{ + struct pci_dev *pdev = hw->back->pdev; + struct atl1_adapter *adapter = hw->back; + s32 ret_val; + + /* + * Options: + * PHY will advertise value(s) parsed from + * autoneg_advertised and fc + * no matter what autoneg is , We will not wait link result. + */ + ret_val = atl1_phy_setup_autoneg_adv(hw); + if (ret_val) { + if (netif_msg_link(adapter)) + dev_dbg(&pdev->dev, + "error setting up autonegotiation\n"); + return ret_val; + } + /* SW.Reset , En-Auto-Neg if needed */ + ret_val = atl1_phy_reset(hw); + if (ret_val) { + if (netif_msg_link(adapter)) + dev_dbg(&pdev->dev, "error resetting phy\n"); + return ret_val; + } + hw->phy_configured = true; + return ret_val; +} + +static void atl1_init_flash_opcode(struct atl1_hw *hw) +{ + if (hw->flash_vendor >= ARRAY_SIZE(flash_table)) + /* Atmel */ + hw->flash_vendor = 0; + + /* Init OP table */ + iowrite8(flash_table[hw->flash_vendor].cmd_program, + hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM); + iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase, + hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE); + iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase, + hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE); + iowrite8(flash_table[hw->flash_vendor].cmd_rdid, + hw->hw_addr + REG_SPI_FLASH_OP_RDID); + iowrite8(flash_table[hw->flash_vendor].cmd_wren, + hw->hw_addr + REG_SPI_FLASH_OP_WREN); + iowrite8(flash_table[hw->flash_vendor].cmd_rdsr, + hw->hw_addr + REG_SPI_FLASH_OP_RDSR); + iowrite8(flash_table[hw->flash_vendor].cmd_wrsr, + hw->hw_addr + REG_SPI_FLASH_OP_WRSR); + iowrite8(flash_table[hw->flash_vendor].cmd_read, + hw->hw_addr + REG_SPI_FLASH_OP_READ); +} + +/* + * Performs basic configuration of the adapter. + * hw - Struct containing variables accessed by shared code + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes multicast table, + * and Calls routines to setup link + * Leaves the transmit and receive units disabled and uninitialized. + */ +static s32 atl1_init_hw(struct atl1_hw *hw) +{ + u32 ret_val = 0; + + /* Zero out the Multicast HASH table */ + iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); + /* clear the old settings from the multicast hash table */ + iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); + + atl1_init_flash_opcode(hw); + + if (!hw->phy_configured) { + /* enable GPHY LinkChange Interrupt */ + ret_val = atl1_write_phy_reg(hw, 18, 0xC00); + if (ret_val) + return ret_val; + /* make PHY out of power-saving state */ + ret_val = atl1_phy_leave_power_saving(hw); + if (ret_val) + return ret_val; + /* Call a subroutine to configure the link */ + ret_val = atl1_setup_link(hw); + } + return ret_val; +} + +/* + * Detects the current speed and duplex settings of the hardware. + * hw - Struct containing variables accessed by shared code + * speed - Speed of the connection + * duplex - Duplex setting of the connection + */ +static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex) +{ + struct pci_dev *pdev = hw->back->pdev; + struct atl1_adapter *adapter = hw->back; + s32 ret_val; + u16 phy_data; + + /* ; --- Read PHY Specific Status Register (17) */ + ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data); + if (ret_val) + return ret_val; + + if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED)) + return ATLX_ERR_PHY_RES; + + switch (phy_data & MII_ATLX_PSSR_SPEED) { + case MII_ATLX_PSSR_1000MBS: + *speed = SPEED_1000; + break; + case MII_ATLX_PSSR_100MBS: + *speed = SPEED_100; + break; + case MII_ATLX_PSSR_10MBS: + *speed = SPEED_10; + break; + default: + if (netif_msg_hw(adapter)) + dev_dbg(&pdev->dev, "error getting speed\n"); + return ATLX_ERR_PHY_SPEED; + } + if (phy_data & MII_ATLX_PSSR_DPLX) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + return 0; +} + +static void atl1_set_mac_addr(struct atl1_hw *hw) +{ + u32 value; + /* + * 00-0B-6A-F6-00-DC + * 0: 6AF600DC 1: 000B + * low dword + */ + value = (((u32) hw->mac_addr[2]) << 24) | + (((u32) hw->mac_addr[3]) << 16) | + (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5])); + iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); + /* high dword */ + value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); + iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2)); +} + +/** + * atl1_sw_init - Initialize general software structures (struct atl1_adapter) + * @adapter: board private structure to initialize + * + * atl1_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int atl1_sw_init(struct atl1_adapter *adapter) +{ + struct atl1_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + adapter->wol = 0; + device_set_wakeup_enable(&adapter->pdev->dev, false); + adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; + adapter->ict = 50000; /* 100ms */ + adapter->link_speed = SPEED_0; /* hardware init */ + adapter->link_duplex = FULL_DUPLEX; + + hw->phy_configured = false; + hw->preamble_len = 7; + hw->ipgt = 0x60; + hw->min_ifg = 0x50; + hw->ipgr1 = 0x40; + hw->ipgr2 = 0x60; + hw->max_retry = 0xf; + hw->lcol = 0x37; + hw->jam_ipg = 7; + hw->rfd_burst = 8; + hw->rrd_burst = 8; + hw->rfd_fetch_gap = 1; + hw->rx_jumbo_th = adapter->rx_buffer_len / 8; + hw->rx_jumbo_lkah = 1; + hw->rrd_ret_timer = 16; + hw->tpd_burst = 4; + hw->tpd_fetch_th = 16; + hw->txf_burst = 0x100; + hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3; + hw->tpd_fetch_gap = 1; + hw->rcb_value = atl1_rcb_64; + hw->dma_ord = atl1_dma_ord_enh; + hw->dmar_block = atl1_dma_req_256; + hw->dmaw_block = atl1_dma_req_256; + hw->cmb_rrd = 4; + hw->cmb_tpd = 4; + hw->cmb_rx_timer = 1; /* about 2us */ + hw->cmb_tx_timer = 1; /* about 2us */ + hw->smb_timer = 100000; /* about 200ms */ + + spin_lock_init(&adapter->lock); + spin_lock_init(&adapter->mb_lock); + + return 0; +} + +static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + u16 result; + + atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); + + return result; +} + +static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, + int val) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + + atl1_write_phy_reg(&adapter->hw, reg_num, val); +} + +static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + unsigned long flags; + int retval; + + if (!netif_running(netdev)) + return -EINVAL; + + spin_lock_irqsave(&adapter->lock, flags); + retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); + spin_unlock_irqrestore(&adapter->lock, flags); + + return retval; +} + +/** + * atl1_setup_mem_resources - allocate Tx / RX descriptor resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_ring_header *ring_header = &adapter->ring_header; + struct pci_dev *pdev = adapter->pdev; + int size; + u8 offset = 0; + + size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count); + tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); + if (unlikely(!tpd_ring->buffer_info)) { + if (netif_msg_drv(adapter)) + dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", + size); + goto err_nomem; + } + rfd_ring->buffer_info = + (tpd_ring->buffer_info + tpd_ring->count); + + /* + * real ring DMA buffer + * each ring/block may need up to 8 bytes for alignment, hence the + * additional 40 bytes tacked onto the end. + */ + ring_header->size = size = + sizeof(struct tx_packet_desc) * tpd_ring->count + + sizeof(struct rx_free_desc) * rfd_ring->count + + sizeof(struct rx_return_desc) * rrd_ring->count + + sizeof(struct coals_msg_block) + + sizeof(struct stats_msg_block) + + 40; + + ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, + &ring_header->dma); + if (unlikely(!ring_header->desc)) { + if (netif_msg_drv(adapter)) + dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); + goto err_nomem; + } + + memset(ring_header->desc, 0, ring_header->size); + + /* init TPD ring */ + tpd_ring->dma = ring_header->dma; + offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0; + tpd_ring->dma += offset; + tpd_ring->desc = (u8 *) ring_header->desc + offset; + tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count; + + /* init RFD ring */ + rfd_ring->dma = tpd_ring->dma + tpd_ring->size; + offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0; + rfd_ring->dma += offset; + rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset); + rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count; + + + /* init RRD ring */ + rrd_ring->dma = rfd_ring->dma + rfd_ring->size; + offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0; + rrd_ring->dma += offset; + rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset); + rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count; + + + /* init CMB */ + adapter->cmb.dma = rrd_ring->dma + rrd_ring->size; + offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0; + adapter->cmb.dma += offset; + adapter->cmb.cmb = (struct coals_msg_block *) + ((u8 *) rrd_ring->desc + (rrd_ring->size + offset)); + + /* init SMB */ + adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block); + offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0; + adapter->smb.dma += offset; + adapter->smb.smb = (struct stats_msg_block *) + ((u8 *) adapter->cmb.cmb + + (sizeof(struct coals_msg_block) + offset)); + + return 0; + +err_nomem: + kfree(tpd_ring->buffer_info); + return -ENOMEM; +} + +static void atl1_init_ring_ptrs(struct atl1_adapter *adapter) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + + atomic_set(&tpd_ring->next_to_use, 0); + atomic_set(&tpd_ring->next_to_clean, 0); + + rfd_ring->next_to_clean = 0; + atomic_set(&rfd_ring->next_to_use, 0); + + rrd_ring->next_to_use = 0; + atomic_set(&rrd_ring->next_to_clean, 0); +} + +/** + * atl1_clean_rx_ring - Free RFD Buffers + * @adapter: board private structure + */ +static void atl1_clean_rx_ring(struct atl1_adapter *adapter) +{ + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rfd_ring->count; i++) { + buffer_info = &rfd_ring->buffer_info[i]; + if (buffer_info->dma) { + pci_unmap_page(pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + } + + size = sizeof(struct atl1_buffer) * rfd_ring->count; + memset(rfd_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rfd_ring->desc, 0, rfd_ring->size); + + rfd_ring->next_to_clean = 0; + atomic_set(&rfd_ring->next_to_use, 0); + + rrd_ring->next_to_use = 0; + atomic_set(&rrd_ring->next_to_clean, 0); +} + +/** + * atl1_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + */ +static void atl1_clean_tx_ring(struct atl1_adapter *adapter) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tpd_ring->count; i++) { + buffer_info = &tpd_ring->buffer_info[i]; + if (buffer_info->dma) { + pci_unmap_page(pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_TODEVICE); + buffer_info->dma = 0; + } + } + + for (i = 0; i < tpd_ring->count; i++) { + buffer_info = &tpd_ring->buffer_info[i]; + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + } + + size = sizeof(struct atl1_buffer) * tpd_ring->count; + memset(tpd_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tpd_ring->desc, 0, tpd_ring->size); + + atomic_set(&tpd_ring->next_to_use, 0); + atomic_set(&tpd_ring->next_to_clean, 0); +} + +/** + * atl1_free_ring_resources - Free Tx / RX descriptor Resources + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void atl1_free_ring_resources(struct atl1_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_ring_header *ring_header = &adapter->ring_header; + + atl1_clean_tx_ring(adapter); + atl1_clean_rx_ring(adapter); + + kfree(tpd_ring->buffer_info); + pci_free_consistent(pdev, ring_header->size, ring_header->desc, + ring_header->dma); + + tpd_ring->buffer_info = NULL; + tpd_ring->desc = NULL; + tpd_ring->dma = 0; + + rfd_ring->buffer_info = NULL; + rfd_ring->desc = NULL; + rfd_ring->dma = 0; + + rrd_ring->desc = NULL; + rrd_ring->dma = 0; + + adapter->cmb.dma = 0; + adapter->cmb.cmb = NULL; + + adapter->smb.dma = 0; + adapter->smb.smb = NULL; +} + +static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) +{ + u32 value; + struct atl1_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + /* Config MAC CTRL Register */ + value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN; + /* duplex */ + if (FULL_DUPLEX == adapter->link_duplex) + value |= MAC_CTRL_DUPLX; + /* speed */ + value |= ((u32) ((SPEED_1000 == adapter->link_speed) ? + MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << + MAC_CTRL_SPEED_SHIFT); + /* flow control */ + value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); + /* PAD & CRC */ + value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); + /* preamble length */ + value |= (((u32) adapter->hw.preamble_len + & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); + /* vlan */ + __atlx_vlan_mode(netdev->features, &value); + /* rx checksum + if (adapter->rx_csum) + value |= MAC_CTRL_RX_CHKSUM_EN; + */ + /* filter mode */ + value |= MAC_CTRL_BC_EN; + if (netdev->flags & IFF_PROMISC) + value |= MAC_CTRL_PROMIS_EN; + else if (netdev->flags & IFF_ALLMULTI) + value |= MAC_CTRL_MC_ALL_EN; + /* value |= MAC_CTRL_LOOPBACK; */ + iowrite32(value, hw->hw_addr + REG_MAC_CTRL); +} + +static u32 atl1_check_link(struct atl1_adapter *adapter) +{ + struct atl1_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 ret_val; + u16 speed, duplex, phy_data; + int reconfig = 0; + + /* MII_BMSR must read twice */ + atl1_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1_read_phy_reg(hw, MII_BMSR, &phy_data); + if (!(phy_data & BMSR_LSTATUS)) { + /* link down */ + if (netif_carrier_ok(netdev)) { + /* old link state: Up */ + if (netif_msg_link(adapter)) + dev_info(&adapter->pdev->dev, "link is down\n"); + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + } + return 0; + } + + /* Link Up */ + ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) + return ret_val; + + switch (hw->media_type) { + case MEDIA_TYPE_1000M_FULL: + if (speed != SPEED_1000 || duplex != FULL_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_100M_FULL: + if (speed != SPEED_100 || duplex != FULL_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_100M_HALF: + if (speed != SPEED_100 || duplex != HALF_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_10M_FULL: + if (speed != SPEED_10 || duplex != FULL_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_10M_HALF: + if (speed != SPEED_10 || duplex != HALF_DUPLEX) + reconfig = 1; + break; + } + + /* link result is our setting */ + if (!reconfig) { + if (adapter->link_speed != speed || + adapter->link_duplex != duplex) { + adapter->link_speed = speed; + adapter->link_duplex = duplex; + atl1_setup_mac_ctrl(adapter); + if (netif_msg_link(adapter)) + dev_info(&adapter->pdev->dev, + "%s link is up %d Mbps %s\n", + netdev->name, adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "full duplex" : "half duplex"); + } + if (!netif_carrier_ok(netdev)) { + /* Link down -> Up */ + netif_carrier_on(netdev); + } + return 0; + } + + /* change original link status */ + if (netif_carrier_ok(netdev)) { + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + + if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR && + hw->media_type != MEDIA_TYPE_1000M_FULL) { + switch (hw->media_type) { + case MEDIA_TYPE_100M_FULL: + phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | + MII_CR_RESET; + break; + case MEDIA_TYPE_100M_HALF: + phy_data = MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_10M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; + break; + default: + /* MEDIA_TYPE_10M_HALF: */ + phy_data = MII_CR_SPEED_10 | MII_CR_RESET; + break; + } + atl1_write_phy_reg(hw, MII_BMCR, phy_data); + return 0; + } + + /* auto-neg, insert timer to re-config phy */ + if (!adapter->phy_timer_pending) { + adapter->phy_timer_pending = true; + mod_timer(&adapter->phy_config_timer, + round_jiffies(jiffies + 3 * HZ)); + } + + return 0; +} + +static void set_flow_ctrl_old(struct atl1_adapter *adapter) +{ + u32 hi, lo, value; + + /* RFD Flow Control */ + value = adapter->rfd_ring.count; + hi = value / 16; + if (hi < 2) + hi = 2; + lo = value * 7 / 8; + + value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | + ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH); + + /* RRD Flow Control */ + value = adapter->rrd_ring.count; + lo = value / 16; + hi = value * 7 / 8; + if (lo < 2) + lo = 2; + value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | + ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH); +} + +static void set_flow_ctrl_new(struct atl1_hw *hw) +{ + u32 hi, lo, value; + + /* RXF Flow Control */ + value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN); + lo = value / 16; + if (lo < 192) + lo = 192; + hi = value * 7 / 8; + if (hi < lo) + hi = lo + 16; + value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | + ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); + iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH); + + /* RRD Flow Control */ + value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN); + lo = value / 8; + hi = value * 7 / 8; + if (lo < 2) + lo = 2; + if (hi < lo) + hi = lo + 3; + value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | + ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); + iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); +} + +/** + * atl1_configure - Configure Transmit&Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Tx /Rx unit of the MAC after a reset. + */ +static u32 atl1_configure(struct atl1_adapter *adapter) +{ + struct atl1_hw *hw = &adapter->hw; + u32 value; + + /* clear interrupt status */ + iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR); + + /* set MAC Address */ + value = (((u32) hw->mac_addr[2]) << 24) | + (((u32) hw->mac_addr[3]) << 16) | + (((u32) hw->mac_addr[4]) << 8) | + (((u32) hw->mac_addr[5])); + iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); + value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); + iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4)); + + /* tx / rx ring */ + + /* HI base address */ + iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32), + hw->hw_addr + REG_DESC_BASE_ADDR_HI); + /* LO base address */ + iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_RFD_ADDR_LO); + iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_RRD_ADDR_LO); + iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_TPD_ADDR_LO); + iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_CMB_ADDR_LO); + iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL), + hw->hw_addr + REG_DESC_SMB_ADDR_LO); + + /* element count */ + value = adapter->rrd_ring.count; + value <<= 16; + value += adapter->rfd_ring.count; + iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE); + iowrite32(adapter->tpd_ring.count, hw->hw_addr + + REG_DESC_TPD_RING_SIZE); + + /* Load Ptr */ + iowrite32(1, hw->hw_addr + REG_LOAD_PTR); + + /* config Mailbox */ + value = ((atomic_read(&adapter->tpd_ring.next_to_use) + & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) | + ((atomic_read(&adapter->rrd_ring.next_to_clean) + & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | + ((atomic_read(&adapter->rfd_ring.next_to_use) + & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); + iowrite32(value, hw->hw_addr + REG_MAILBOX); + + /* config IPG/IFG */ + value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK) + << MAC_IPG_IFG_IPGT_SHIFT) | + (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) + << MAC_IPG_IFG_MIFG_SHIFT) | + (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) + << MAC_IPG_IFG_IPGR1_SHIFT) | + (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) + << MAC_IPG_IFG_IPGR2_SHIFT); + iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG); + + /* config Half-Duplex Control */ + value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | + (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) + << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | + MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | + (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | + (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) + << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); + iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL); + + /* set Interrupt Moderator Timer */ + iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT); + iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL); + + /* set Interrupt Clear Timer */ + iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER); + + /* set max frame size hw will accept */ + iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU); + + /* jumbo size & rrd retirement timer */ + value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) + << RXQ_JMBOSZ_TH_SHIFT) | + (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) + << RXQ_JMBO_LKAH_SHIFT) | + (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) + << RXQ_RRD_TIMER_SHIFT); + iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM); + + /* Flow Control */ + switch (hw->dev_rev) { + case 0x8001: + case 0x9001: + case 0x9002: + case 0x9003: + set_flow_ctrl_old(adapter); + break; + default: + set_flow_ctrl_new(hw); + break; + } + + /* config TXQ */ + value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK) + << TXQ_CTRL_TPD_BURST_NUM_SHIFT) | + (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) + << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | + (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) + << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | + TXQ_CTRL_EN; + iowrite32(value, hw->hw_addr + REG_TXQ_CTRL); + + /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */ + value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK) + << TX_JUMBO_TASK_TH_SHIFT) | + (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) + << TX_TPD_MIN_IPG_SHIFT); + iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG); + + /* config RXQ */ + value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK) + << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | + (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) + << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | + (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) + << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN | + RXQ_CTRL_EN; + iowrite32(value, hw->hw_addr + REG_RXQ_CTRL); + + /* config DMA Engine */ + value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) + << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | + ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) + << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN | + DMA_CTRL_DMAW_EN; + value |= (u32) hw->dma_ord; + if (atl1_rcb_128 == hw->rcb_value) + value |= DMA_CTRL_RCB_VALUE; + iowrite32(value, hw->hw_addr + REG_DMA_CTRL); + + /* config CMB / SMB */ + value = (hw->cmb_tpd > adapter->tpd_ring.count) ? + hw->cmb_tpd : adapter->tpd_ring.count; + value <<= 16; + value |= hw->cmb_rrd; + iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH); + value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16); + iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER); + iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER); + + /* --- enable CMB / SMB */ + value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN; + iowrite32(value, hw->hw_addr + REG_CSMB_CTRL); + + value = ioread32(adapter->hw.hw_addr + REG_ISR); + if (unlikely((value & ISR_PHY_LINKDOWN) != 0)) + value = 1; /* config failed */ + else + value = 0; + + /* clear all interrupt status */ + iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR); + iowrite32(0, adapter->hw.hw_addr + REG_ISR); + return value; +} + +/* + * atl1_pcie_patch - Patch for PCIE module + */ +static void atl1_pcie_patch(struct atl1_adapter *adapter) +{ + u32 value; + + /* much vendor magic here */ + value = 0x6500; + iowrite32(value, adapter->hw.hw_addr + 0x12FC); + /* pcie flow control mode change */ + value = ioread32(adapter->hw.hw_addr + 0x1008); + value |= 0x8000; + iowrite32(value, adapter->hw.hw_addr + 0x1008); +} + +/* + * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 + * on PCI Command register is disable. + * The function enable this bit. + * Brackett, 2006/03/15 + */ +static void atl1_via_workaround(struct atl1_adapter *adapter) +{ + unsigned long value; + + value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); + if (value & PCI_COMMAND_INTX_DISABLE) + value &= ~PCI_COMMAND_INTX_DISABLE; + iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); +} + +static void atl1_inc_smb(struct atl1_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct stats_msg_block *smb = adapter->smb.smb; + + u64 new_rx_errors = smb->rx_frag + + smb->rx_fcs_err + + smb->rx_len_err + + smb->rx_sz_ov + + smb->rx_rxf_ov + + smb->rx_rrd_ov + + smb->rx_align_err; + u64 new_tx_errors = smb->tx_late_col + + smb->tx_abort_col + + smb->tx_underrun + + smb->tx_trunc; + + /* Fill out the OS statistics structure */ + adapter->soft_stats.rx_packets += smb->rx_ok + new_rx_errors; + adapter->soft_stats.tx_packets += smb->tx_ok + new_tx_errors; + adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; + adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; + adapter->soft_stats.multicast += smb->rx_mcast; + adapter->soft_stats.collisions += smb->tx_1_col + + smb->tx_2_col + + smb->tx_late_col + + smb->tx_abort_col; + + /* Rx Errors */ + adapter->soft_stats.rx_errors += new_rx_errors; + adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; + adapter->soft_stats.rx_length_errors += smb->rx_len_err; + adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; + adapter->soft_stats.rx_frame_errors += smb->rx_align_err; + + adapter->soft_stats.rx_pause += smb->rx_pause; + adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; + adapter->soft_stats.rx_trunc += smb->rx_sz_ov; + + /* Tx Errors */ + adapter->soft_stats.tx_errors += new_tx_errors; + adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; + adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; + adapter->soft_stats.tx_window_errors += smb->tx_late_col; + + adapter->soft_stats.excecol += smb->tx_abort_col; + adapter->soft_stats.deffer += smb->tx_defer; + adapter->soft_stats.scc += smb->tx_1_col; + adapter->soft_stats.mcc += smb->tx_2_col; + adapter->soft_stats.latecol += smb->tx_late_col; + adapter->soft_stats.tx_underun += smb->tx_underrun; + adapter->soft_stats.tx_trunc += smb->tx_trunc; + adapter->soft_stats.tx_pause += smb->tx_pause; + + netdev->stats.rx_bytes = adapter->soft_stats.rx_bytes; + netdev->stats.tx_bytes = adapter->soft_stats.tx_bytes; + netdev->stats.multicast = adapter->soft_stats.multicast; + netdev->stats.collisions = adapter->soft_stats.collisions; + netdev->stats.rx_errors = adapter->soft_stats.rx_errors; + netdev->stats.rx_length_errors = + adapter->soft_stats.rx_length_errors; + netdev->stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; + netdev->stats.rx_frame_errors = + adapter->soft_stats.rx_frame_errors; + netdev->stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; + netdev->stats.rx_dropped = adapter->soft_stats.rx_rrd_ov; + netdev->stats.tx_errors = adapter->soft_stats.tx_errors; + netdev->stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; + netdev->stats.tx_aborted_errors = + adapter->soft_stats.tx_aborted_errors; + netdev->stats.tx_window_errors = + adapter->soft_stats.tx_window_errors; + netdev->stats.tx_carrier_errors = + adapter->soft_stats.tx_carrier_errors; + + netdev->stats.rx_packets = adapter->soft_stats.rx_packets; + netdev->stats.tx_packets = adapter->soft_stats.tx_packets; +} + +static void atl1_update_mailbox(struct atl1_adapter *adapter) +{ + unsigned long flags; + u32 tpd_next_to_use; + u32 rfd_next_to_use; + u32 rrd_next_to_clean; + u32 value; + + spin_lock_irqsave(&adapter->mb_lock, flags); + + tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); + rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); + rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); + + value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << + MB_RFD_PROD_INDX_SHIFT) | + ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << + MB_RRD_CONS_INDX_SHIFT) | + ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << + MB_TPD_PROD_INDX_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); + + spin_unlock_irqrestore(&adapter->mb_lock, flags); +} + +static void atl1_clean_alloc_flag(struct atl1_adapter *adapter, + struct rx_return_desc *rrd, u16 offset) +{ + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + + while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) { + rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0; + if (++rfd_ring->next_to_clean == rfd_ring->count) { + rfd_ring->next_to_clean = 0; + } + } +} + +static void atl1_update_rfd_index(struct atl1_adapter *adapter, + struct rx_return_desc *rrd) +{ + u16 num_buf; + + num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) / + adapter->rx_buffer_len; + if (rrd->num_buf == num_buf) + /* clean alloc flag for bad rrd */ + atl1_clean_alloc_flag(adapter, rrd, num_buf); +} + +static void atl1_rx_checksum(struct atl1_adapter *adapter, + struct rx_return_desc *rrd, struct sk_buff *skb) +{ + struct pci_dev *pdev = adapter->pdev; + + /* + * The L1 hardware contains a bug that erroneously sets the + * PACKET_FLAG_ERR and ERR_FLAG_L4_CHKSUM bits whenever a + * fragmented IP packet is received, even though the packet + * is perfectly valid and its checksum is correct. There's + * no way to distinguish between one of these good packets + * and a packet that actually contains a TCP/UDP checksum + * error, so all we can do is allow it to be handed up to + * the higher layers and let it be sorted out there. + */ + + skb_checksum_none_assert(skb); + + if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { + if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | + ERR_FLAG_CODE | ERR_FLAG_OV)) { + adapter->hw_csum_err++; + if (netif_msg_rx_err(adapter)) + dev_printk(KERN_DEBUG, &pdev->dev, + "rx checksum error\n"); + return; + } + } + + /* not IPv4 */ + if (!(rrd->pkt_flg & PACKET_FLAG_IPV4)) + /* checksum is invalid, but it's not an IPv4 pkt, so ok */ + return; + + /* IPv4 packet */ + if (likely(!(rrd->err_flg & + (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + adapter->hw_csum_good++; + return; + } +} + +/** + * atl1_alloc_rx_buffers - Replace used receive buffers + * @adapter: address of board private structure + */ +static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) +{ + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct pci_dev *pdev = adapter->pdev; + struct page *page; + unsigned long offset; + struct atl1_buffer *buffer_info, *next_info; + struct sk_buff *skb; + u16 num_alloc = 0; + u16 rfd_next_to_use, next_next; + struct rx_free_desc *rfd_desc; + + next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use); + if (++next_next == rfd_ring->count) + next_next = 0; + buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; + next_info = &rfd_ring->buffer_info[next_next]; + + while (!buffer_info->alloced && !next_info->alloced) { + if (buffer_info->skb) { + buffer_info->alloced = 1; + goto next; + } + + rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); + + skb = netdev_alloc_skb_ip_align(adapter->netdev, + adapter->rx_buffer_len); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->soft_stats.rx_dropped++; + break; + } + + buffer_info->alloced = 1; + buffer_info->skb = skb; + buffer_info->length = (u16) adapter->rx_buffer_len; + page = virt_to_page(skb->data); + offset = (unsigned long)skb->data & ~PAGE_MASK; + buffer_info->dma = pci_map_page(pdev, page, offset, + adapter->rx_buffer_len, + PCI_DMA_FROMDEVICE); + rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); + rfd_desc->coalese = 0; + +next: + rfd_next_to_use = next_next; + if (unlikely(++next_next == rfd_ring->count)) + next_next = 0; + + buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; + next_info = &rfd_ring->buffer_info[next_next]; + num_alloc++; + } + + if (num_alloc) { + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use); + } + return num_alloc; +} + +static int atl1_intr_rx(struct atl1_adapter *adapter, int budget) +{ + int i, count; + u16 length; + u16 rrd_next_to_clean; + u32 value; + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1_buffer *buffer_info; + struct rx_return_desc *rrd; + struct sk_buff *skb; + + count = 0; + + rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); + + while (count < budget) { + rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); + i = 1; + if (likely(rrd->xsz.valid)) { /* packet valid */ +chk_rrd: + /* check rrd status */ + if (likely(rrd->num_buf == 1)) + goto rrd_ok; + else if (netif_msg_rx_err(adapter)) { + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "unexpected RRD buffer count\n"); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "rx_buf_len = %d\n", + adapter->rx_buffer_len); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "RRD num_buf = %d\n", + rrd->num_buf); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "RRD pkt_len = %d\n", + rrd->xsz.xsum_sz.pkt_size); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "RRD pkt_flg = 0x%08X\n", + rrd->pkt_flg); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "RRD err_flg = 0x%08X\n", + rrd->err_flg); + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "RRD vlan_tag = 0x%08X\n", + rrd->vlan_tag); + } + + /* rrd seems to be bad */ + if (unlikely(i-- > 0)) { + /* rrd may not be DMAed completely */ + udelay(1); + goto chk_rrd; + } + /* bad rrd */ + if (netif_msg_rx_err(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "bad RRD\n"); + /* see if update RFD index */ + if (rrd->num_buf > 1) + atl1_update_rfd_index(adapter, rrd); + + /* update rrd */ + rrd->xsz.valid = 0; + if (++rrd_next_to_clean == rrd_ring->count) + rrd_next_to_clean = 0; + count++; + continue; + } else { /* current rrd still not be updated */ + + break; + } +rrd_ok: + /* clean alloc flag for bad rrd */ + atl1_clean_alloc_flag(adapter, rrd, 0); + + buffer_info = &rfd_ring->buffer_info[rrd->buf_indx]; + if (++rfd_ring->next_to_clean == rfd_ring->count) + rfd_ring->next_to_clean = 0; + + /* update rrd next to clean */ + if (++rrd_next_to_clean == rrd_ring->count) + rrd_next_to_clean = 0; + count++; + + if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { + if (!(rrd->err_flg & + (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM + | ERR_FLAG_LEN))) { + /* packet error, don't need upstream */ + buffer_info->alloced = 0; + rrd->xsz.valid = 0; + continue; + } + } + + /* Good Receive */ + pci_unmap_page(adapter->pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; + skb = buffer_info->skb; + length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); + + skb_put(skb, length - ETH_FCS_LEN); + + /* Receive Checksum Offload */ + atl1_rx_checksum(adapter, rrd, skb); + skb->protocol = eth_type_trans(skb, adapter->netdev); + + if (rrd->pkt_flg & PACKET_FLAG_VLAN_INS) { + u16 vlan_tag = (rrd->vlan_tag >> 4) | + ((rrd->vlan_tag & 7) << 13) | + ((rrd->vlan_tag & 8) << 9); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + } + netif_receive_skb(skb); + + /* let protocol layer free skb */ + buffer_info->skb = NULL; + buffer_info->alloced = 0; + rrd->xsz.valid = 0; + } + + atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean); + + atl1_alloc_rx_buffers(adapter); + + /* update mailbox ? */ + if (count) { + u32 tpd_next_to_use; + u32 rfd_next_to_use; + + spin_lock(&adapter->mb_lock); + + tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); + rfd_next_to_use = + atomic_read(&adapter->rfd_ring.next_to_use); + rrd_next_to_clean = + atomic_read(&adapter->rrd_ring.next_to_clean); + value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << + MB_RFD_PROD_INDX_SHIFT) | + ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << + MB_RRD_CONS_INDX_SHIFT) | + ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << + MB_TPD_PROD_INDX_SHIFT); + iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); + spin_unlock(&adapter->mb_lock); + } + + return count; +} + +static int atl1_intr_tx(struct atl1_adapter *adapter) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_buffer *buffer_info; + u16 sw_tpd_next_to_clean; + u16 cmb_tpd_next_to_clean; + int count = 0; + + sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); + cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); + + while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { + buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; + if (buffer_info->dma) { + pci_unmap_page(adapter->pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_TODEVICE); + buffer_info->dma = 0; + } + + if (buffer_info->skb) { + dev_kfree_skb_irq(buffer_info->skb); + buffer_info->skb = NULL; + } + + if (++sw_tpd_next_to_clean == tpd_ring->count) + sw_tpd_next_to_clean = 0; + + count++; + } + atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); + + if (netif_queue_stopped(adapter->netdev) && + netif_carrier_ok(adapter->netdev)) + netif_wake_queue(adapter->netdev); + + return count; +} + +static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring) +{ + u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); + u16 next_to_use = atomic_read(&tpd_ring->next_to_use); + return (next_to_clean > next_to_use) ? + next_to_clean - next_to_use - 1 : + tpd_ring->count + next_to_clean - next_to_use - 1; +} + +static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, + struct tx_packet_desc *ptpd) +{ + u8 hdr_len, ip_off; + u32 real_len; + + if (skb_shinfo(skb)->gso_size) { + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + + real_len = (((unsigned char *)iph - skb->data) + + ntohs(iph->tot_len)); + if (real_len < skb->len) + pskb_trim(skb, real_len); + hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (skb->len == hdr_len) { + iph->check = 0; + tcp_hdr(skb)->check = + ~csum_tcpudp_magic(iph->saddr, + iph->daddr, tcp_hdrlen(skb), + IPPROTO_TCP, 0); + ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) << + TPD_IPHL_SHIFT; + ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) & + TPD_TCPHDRLEN_MASK) << + TPD_TCPHDRLEN_SHIFT; + ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT; + ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT; + return 1; + } + + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, IPPROTO_TCP, 0); + ip_off = (unsigned char *)iph - + (unsigned char *) skb_network_header(skb); + if (ip_off == 8) /* 802.3-SNAP frame */ + ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; + else if (ip_off != 0) + return -2; + + ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) << + TPD_IPHL_SHIFT; + ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) & + TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT; + ptpd->word3 |= (skb_shinfo(skb)->gso_size & + TPD_MSS_MASK) << TPD_MSS_SHIFT; + ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; + return 3; + } + } + return 0; +} + +static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, + struct tx_packet_desc *ptpd) +{ + u8 css, cso; + + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + css = skb_checksum_start_offset(skb); + cso = css + (u8) skb->csum_offset; + if (unlikely(css & 0x1)) { + /* L1 hardware requires an even number here */ + if (netif_msg_tx_err(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "payload offset not an even number\n"); + return -1; + } + ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) << + TPD_PLOADOFFSET_SHIFT; + ptpd->word3 |= (cso & TPD_CCSUMOFFSET_MASK) << + TPD_CCSUMOFFSET_SHIFT; + ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT; + return true; + } + return 0; +} + +static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, + struct tx_packet_desc *ptpd) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_buffer *buffer_info; + u16 buf_len = skb->len; + struct page *page; + unsigned long offset; + unsigned int nr_frags; + unsigned int f; + int retval; + u16 next_to_use; + u16 data_len; + u8 hdr_len; + + buf_len -= skb->data_len; + nr_frags = skb_shinfo(skb)->nr_frags; + next_to_use = atomic_read(&tpd_ring->next_to_use); + buffer_info = &tpd_ring->buffer_info[next_to_use]; + BUG_ON(buffer_info->skb); + /* put skb in last TPD */ + buffer_info->skb = NULL; + + retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; + if (retval) { + /* TSO */ + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + buffer_info->length = hdr_len; + page = virt_to_page(skb->data); + offset = (unsigned long)skb->data & ~PAGE_MASK; + buffer_info->dma = pci_map_page(adapter->pdev, page, + offset, hdr_len, + PCI_DMA_TODEVICE); + + if (++next_to_use == tpd_ring->count) + next_to_use = 0; + + if (buf_len > hdr_len) { + int i, nseg; + + data_len = buf_len - hdr_len; + nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; + for (i = 0; i < nseg; i++) { + buffer_info = + &tpd_ring->buffer_info[next_to_use]; + buffer_info->skb = NULL; + buffer_info->length = + (ATL1_MAX_TX_BUF_LEN >= + data_len) ? ATL1_MAX_TX_BUF_LEN : data_len; + data_len -= buffer_info->length; + page = virt_to_page(skb->data + + (hdr_len + i * ATL1_MAX_TX_BUF_LEN)); + offset = (unsigned long)(skb->data + + (hdr_len + i * ATL1_MAX_TX_BUF_LEN)) & + ~PAGE_MASK; + buffer_info->dma = pci_map_page(adapter->pdev, + page, offset, buffer_info->length, + PCI_DMA_TODEVICE); + if (++next_to_use == tpd_ring->count) + next_to_use = 0; + } + } + } else { + /* not TSO */ + buffer_info->length = buf_len; + page = virt_to_page(skb->data); + offset = (unsigned long)skb->data & ~PAGE_MASK; + buffer_info->dma = pci_map_page(adapter->pdev, page, + offset, buf_len, PCI_DMA_TODEVICE); + if (++next_to_use == tpd_ring->count) + next_to_use = 0; + } + + for (f = 0; f < nr_frags; f++) { + const struct skb_frag_struct *frag; + u16 i, nseg; + + frag = &skb_shinfo(skb)->frags[f]; + buf_len = skb_frag_size(frag); + + nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; + for (i = 0; i < nseg; i++) { + buffer_info = &tpd_ring->buffer_info[next_to_use]; + BUG_ON(buffer_info->skb); + + buffer_info->skb = NULL; + buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ? + ATL1_MAX_TX_BUF_LEN : buf_len; + buf_len -= buffer_info->length; + buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev, + frag, i * ATL1_MAX_TX_BUF_LEN, + buffer_info->length, DMA_TO_DEVICE); + + if (++next_to_use == tpd_ring->count) + next_to_use = 0; + } + } + + /* last tpd's buffer-info */ + buffer_info->skb = skb; +} + +static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count, + struct tx_packet_desc *ptpd) +{ + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + struct atl1_buffer *buffer_info; + struct tx_packet_desc *tpd; + u16 j; + u32 val; + u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use); + + for (j = 0; j < count; j++) { + buffer_info = &tpd_ring->buffer_info[next_to_use]; + tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use); + if (tpd != ptpd) + memcpy(tpd, ptpd, sizeof(struct tx_packet_desc)); + tpd->buffer_addr = cpu_to_le64(buffer_info->dma); + tpd->word2 &= ~(TPD_BUFLEN_MASK << TPD_BUFLEN_SHIFT); + tpd->word2 |= (cpu_to_le16(buffer_info->length) & + TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT; + + /* + * if this is the first packet in a TSO chain, set + * TPD_HDRFLAG, otherwise, clear it. + */ + val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & + TPD_SEGMENT_EN_MASK; + if (val) { + if (!j) + tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT; + else + tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT); + } + + if (j == (count - 1)) + tpd->word3 |= 1 << TPD_EOP_SHIFT; + + if (++next_to_use == tpd_ring->count) + next_to_use = 0; + } + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + atomic_set(&tpd_ring->next_to_use, next_to_use); +} + +static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; + int len; + int tso; + int count = 1; + int ret_val; + struct tx_packet_desc *ptpd; + u16 vlan_tag; + unsigned int nr_frags = 0; + unsigned int mss = 0; + unsigned int f; + unsigned int proto_hdr_len; + + len = skb_headlen(skb); + + if (unlikely(skb->len <= 0)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) { + unsigned int f_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); + count += (f_size + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; + } + + mss = skb_shinfo(skb)->gso_size; + if (mss) { + if (skb->protocol == htons(ETH_P_IP)) { + proto_hdr_len = (skb_transport_offset(skb) + + tcp_hdrlen(skb)); + if (unlikely(proto_hdr_len > len)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + /* need additional TPD ? */ + if (proto_hdr_len != len) + count += (len - proto_hdr_len + + ATL1_MAX_TX_BUF_LEN - 1) / + ATL1_MAX_TX_BUF_LEN; + } + } + + if (atl1_tpd_avail(&adapter->tpd_ring) < count) { + /* not enough descriptors */ + netif_stop_queue(netdev); + if (netif_msg_tx_queued(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "tx busy\n"); + return NETDEV_TX_BUSY; + } + + ptpd = ATL1_TPD_DESC(tpd_ring, + (u16) atomic_read(&tpd_ring->next_to_use)); + memset(ptpd, 0, sizeof(struct tx_packet_desc)); + + if (skb_vlan_tag_present(skb)) { + vlan_tag = skb_vlan_tag_get(skb); + vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | + ((vlan_tag >> 9) & 0x8); + ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; + ptpd->word2 |= (vlan_tag & TPD_VLANTAG_MASK) << + TPD_VLANTAG_SHIFT; + } + + tso = atl1_tso(adapter, skb, ptpd); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (!tso) { + ret_val = atl1_tx_csum(adapter, skb, ptpd); + if (ret_val < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } + + atl1_tx_map(adapter, skb, ptpd); + atl1_tx_queue(adapter, count, ptpd); + atl1_update_mailbox(adapter); + mmiowb(); + return NETDEV_TX_OK; +} + +static int atl1_rings_clean(struct napi_struct *napi, int budget) +{ + struct atl1_adapter *adapter = container_of(napi, struct atl1_adapter, napi); + int work_done = atl1_intr_rx(adapter, budget); + + if (atl1_intr_tx(adapter)) + work_done = budget; + + /* Let's come again to process some more packets */ + if (work_done >= budget) + return work_done; + + napi_complete(napi); + /* re-enable Interrupt */ + if (likely(adapter->int_enabled)) + atlx_imr_set(adapter, IMR_NORMAL_MASK); + return work_done; +} + +static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter) +{ + if (!napi_schedule_prep(&adapter->napi)) + /* It is possible in case even the RX/TX ints are disabled via IMR + * register the ISR bits are set anyway (but do not produce IRQ). + * To handle such situation the napi functions used to check is + * something scheduled or not. + */ + return 0; + + __napi_schedule(&adapter->napi); + + /* + * Disable RX/TX ints via IMR register if it is + * allowed. NAPI handler must reenable them in same + * way. + */ + if (!adapter->int_enabled) + return 1; + + atlx_imr_set(adapter, IMR_NORXTX_MASK); + return 1; +} + +/** + * atl1_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t atl1_intr(int irq, void *data) +{ + struct atl1_adapter *adapter = netdev_priv(data); + u32 status; + + status = adapter->cmb.cmb->int_stats; + if (!status) + return IRQ_NONE; + + /* clear CMB interrupt status at once, + * but leave rx/tx interrupt status in case it should be dropped + * only if rx/tx processing queued. In other case interrupt + * can be lost. + */ + adapter->cmb.cmb->int_stats = status & (ISR_CMB_TX | ISR_CMB_RX); + + if (status & ISR_GPHY) /* clear phy status */ + atlx_clear_phy_int(adapter); + + /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ + iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); + + /* check if SMB intr */ + if (status & ISR_SMB) + atl1_inc_smb(adapter); + + /* check if PCIE PHY Link down */ + if (status & ISR_PHY_LINKDOWN) { + if (netif_msg_intr(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "pcie phy link down %x\n", status); + if (netif_running(adapter->netdev)) { /* reset MAC */ + atlx_irq_disable(adapter); + schedule_work(&adapter->reset_dev_task); + return IRQ_HANDLED; + } + } + + /* check if DMA read/write error ? */ + if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { + if (netif_msg_intr(adapter)) + dev_printk(KERN_DEBUG, &adapter->pdev->dev, + "pcie DMA r/w error (status = 0x%x)\n", + status); + atlx_irq_disable(adapter); + schedule_work(&adapter->reset_dev_task); + return IRQ_HANDLED; + } + + /* link event */ + if (status & ISR_GPHY) { + adapter->soft_stats.tx_carrier_errors++; + atl1_check_for_link(adapter); + } + + /* transmit or receive event */ + if (status & (ISR_CMB_TX | ISR_CMB_RX) && + atl1_sched_rings_clean(adapter)) + adapter->cmb.cmb->int_stats = adapter->cmb.cmb->int_stats & + ~(ISR_CMB_TX | ISR_CMB_RX); + + /* rx exception */ + if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | + ISR_RRD_OV | ISR_HOST_RFD_UNRUN | + ISR_HOST_RRD_OV))) { + if (netif_msg_intr(adapter)) + dev_printk(KERN_DEBUG, + &adapter->pdev->dev, + "rx exception, ISR = 0x%x\n", + status); + atl1_sched_rings_clean(adapter); + } + + /* re-enable Interrupt */ + iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); + return IRQ_HANDLED; +} + + +/** + * atl1_phy_config - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl1_phy_config(unsigned long data) +{ + struct atl1_adapter *adapter = (struct atl1_adapter *)data; + struct atl1_hw *hw = &adapter->hw; + unsigned long flags; + + spin_lock_irqsave(&adapter->lock, flags); + adapter->phy_timer_pending = false; + atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); + atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg); + atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); + spin_unlock_irqrestore(&adapter->lock, flags); +} + +/* + * Orphaned vendor comment left intact here: + * + * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT + * will assert. We do soft reset <0x1400=1> according + * with the SPEC. BUT, it seemes that PCIE or DMA + * state-machine will not be reset. DMAR_TO_INT will + * assert again and again. + * + */ + +static int atl1_reset(struct atl1_adapter *adapter) +{ + int ret; + ret = atl1_reset_hw(&adapter->hw); + if (ret) + return ret; + return atl1_init_hw(&adapter->hw); +} + +static s32 atl1_up(struct atl1_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + int irq_flags = 0; + + /* hardware has been reset, we need to reload some things */ + atlx_set_multi(netdev); + atl1_init_ring_ptrs(adapter); + atlx_restore_vlan(adapter); + err = atl1_alloc_rx_buffers(adapter); + if (unlikely(!err)) + /* no RX BUFFER allocated */ + return -ENOMEM; + + if (unlikely(atl1_configure(adapter))) { + err = -EIO; + goto err_up; + } + + err = pci_enable_msi(adapter->pdev); + if (err) { + if (netif_msg_ifup(adapter)) + dev_info(&adapter->pdev->dev, + "Unable to enable MSI: %d\n", err); + irq_flags |= IRQF_SHARED; + } + + err = request_irq(adapter->pdev->irq, atl1_intr, irq_flags, + netdev->name, netdev); + if (unlikely(err)) + goto err_up; + + napi_enable(&adapter->napi); + atlx_irq_enable(adapter); + atl1_check_link(adapter); + netif_start_queue(netdev); + return 0; + +err_up: + pci_disable_msi(adapter->pdev); + /* free rx_buffers */ + atl1_clean_rx_ring(adapter); + return err; +} + +static void atl1_down(struct atl1_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + napi_disable(&adapter->napi); + netif_stop_queue(netdev); + del_timer_sync(&adapter->phy_config_timer); + adapter->phy_timer_pending = false; + + atlx_irq_disable(adapter); + free_irq(adapter->pdev->irq, netdev); + pci_disable_msi(adapter->pdev); + atl1_reset_hw(&adapter->hw); + adapter->cmb.cmb->int_stats = 0; + + adapter->link_speed = SPEED_0; + adapter->link_duplex = -1; + netif_carrier_off(netdev); + + atl1_clean_tx_ring(adapter); + atl1_clean_rx_ring(adapter); +} + +static void atl1_reset_dev_task(struct work_struct *work) +{ + struct atl1_adapter *adapter = + container_of(work, struct atl1_adapter, reset_dev_task); + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + atl1_down(adapter); + atl1_up(adapter); + netif_device_attach(netdev); +} + +/** + * atl1_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int atl1_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + int old_mtu = netdev->mtu; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || + (max_frame > MAX_JUMBO_FRAME_SIZE)) { + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); + return -EINVAL; + } + + adapter->hw.max_frame_size = max_frame; + adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; + adapter->rx_buffer_len = (max_frame + 7) & ~7; + adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; + + netdev->mtu = new_mtu; + if ((old_mtu != new_mtu) && netif_running(netdev)) { + atl1_down(adapter); + atl1_up(adapter); + } + + return 0; +} + +/** + * atl1_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int atl1_open(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + int err; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = atl1_setup_ring_resources(adapter); + if (err) + return err; + + err = atl1_up(adapter); + if (err) + goto err_up; + + return 0; + +err_up: + atl1_reset(adapter); + return err; +} + +/** + * atl1_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int atl1_close(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + atl1_down(adapter); + atl1_free_ring_resources(adapter); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int atl1_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + u32 ctrl = 0; + u32 wufc = adapter->wol; + u32 val; + u16 speed; + u16 duplex; + + netif_device_detach(netdev); + if (netif_running(netdev)) + atl1_down(adapter); + + atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); + atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); + val = ctrl & BMSR_LSTATUS; + if (val) + wufc &= ~ATLX_WUFC_LNKC; + if (!wufc) + goto disable_wol; + + if (val) { + val = atl1_get_speed_and_duplex(hw, &speed, &duplex); + if (val) { + if (netif_msg_ifdown(adapter)) + dev_printk(KERN_DEBUG, &pdev->dev, + "error getting speed/duplex\n"); + goto disable_wol; + } + + ctrl = 0; + + /* enable magic packet WOL */ + if (wufc & ATLX_WUFC_MAG) + ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN); + iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); + ioread32(hw->hw_addr + REG_WOL_CTRL); + + /* configure the mac */ + ctrl = MAC_CTRL_RX_EN; + ctrl |= ((u32)((speed == SPEED_1000) ? MAC_CTRL_SPEED_1000 : + MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT); + if (duplex == FULL_DUPLEX) + ctrl |= MAC_CTRL_DUPLX; + ctrl |= (((u32)adapter->hw.preamble_len & + MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); + __atlx_vlan_mode(netdev->features, &ctrl); + if (wufc & ATLX_WUFC_MAG) + ctrl |= MAC_CTRL_BC_EN; + iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); + ioread32(hw->hw_addr + REG_MAC_CTRL); + + /* poke the PHY */ + ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); + ioread32(hw->hw_addr + REG_PCIE_PHYMISC); + } else { + ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); + iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); + ioread32(hw->hw_addr + REG_WOL_CTRL); + iowrite32(0, hw->hw_addr + REG_MAC_CTRL); + ioread32(hw->hw_addr + REG_MAC_CTRL); + hw->phy_configured = false; + } + + return 0; + + disable_wol: + iowrite32(0, hw->hw_addr + REG_WOL_CTRL); + ioread32(hw->hw_addr + REG_WOL_CTRL); + ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); + ioread32(hw->hw_addr + REG_PCIE_PHYMISC); + hw->phy_configured = false; + + return 0; +} + +static int atl1_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1_adapter *adapter = netdev_priv(netdev); + + iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); + + atl1_reset_hw(&adapter->hw); + + if (netif_running(netdev)) { + adapter->cmb.cmb->int_stats = 0; + atl1_up(adapter); + } + netif_device_attach(netdev); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume); + +static void atl1_shutdown(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1_adapter *adapter = netdev_priv(netdev); + +#ifdef CONFIG_PM_SLEEP + atl1_suspend(&pdev->dev); +#endif + pci_wake_from_d3(pdev, adapter->wol); + pci_set_power_state(pdev, PCI_D3hot); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void atl1_poll_controller(struct net_device *netdev) +{ + disable_irq(netdev->irq); + atl1_intr(netdev->irq, netdev); + enable_irq(netdev->irq); +} +#endif + +static const struct net_device_ops atl1_netdev_ops = { + .ndo_open = atl1_open, + .ndo_stop = atl1_close, + .ndo_start_xmit = atl1_xmit_frame, + .ndo_set_rx_mode = atlx_set_multi, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = atl1_set_mac, + .ndo_change_mtu = atl1_change_mtu, + .ndo_fix_features = atlx_fix_features, + .ndo_set_features = atlx_set_features, + .ndo_do_ioctl = atlx_ioctl, + .ndo_tx_timeout = atlx_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = atl1_poll_controller, +#endif +}; + +/** + * atl1_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in atl1_pci_tbl + * + * Returns 0 on success, negative on failure + * + * atl1_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + */ +static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct atl1_adapter *adapter; + static int cards_found = 0; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + /* + * The atl1 chip can DMA to 64-bit addresses, but it uses a single + * shared register for the high 32 bits, so only a single, aligned, + * 4 GB physical address range can be used at a time. + * + * Supporting 64-bit DMA on this hardware is more trouble than it's + * worth. It is far easier to limit to 32-bit DMA than update + * various kernel subsystems to support the mechanics required by a + * fixed-high-32-bit system. + */ + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto err_dma; + } + /* + * Mark all PCI regions associated with PCI device + * pdev as being reserved by owner atl1_driver_name + */ + err = pci_request_regions(pdev, ATLX_DRIVER_NAME); + if (err) + goto err_request_regions; + + /* + * Enables bus-mastering on the device and calls + * pcibios_set_master to do the needed arch specific settings + */ + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct atl1_adapter)); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->hw.back = adapter; + adapter->msg_enable = netif_msg_init(debug, atl1_default_msg); + + adapter->hw.hw_addr = pci_iomap(pdev, 0, 0); + if (!adapter->hw.hw_addr) { + err = -EIO; + goto err_pci_iomap; + } + /* get device revision number */ + adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + + (REG_MASTER_CTRL + 2)); + if (netif_msg_probe(adapter)) + dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION); + + /* set default ring resource counts */ + adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD; + adapter->tpd_ring.count = ATL1_DEFAULT_TPD; + + adapter->mii.dev = netdev; + adapter->mii.mdio_read = mdio_read; + adapter->mii.mdio_write = mdio_write; + adapter->mii.phy_id_mask = 0x1f; + adapter->mii.reg_num_mask = 0x1f; + + netdev->netdev_ops = &atl1_netdev_ops; + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, atl1_rings_clean, 64); + + netdev->ethtool_ops = &atl1_ethtool_ops; + adapter->bd_number = cards_found; + + /* setup the private structure */ + err = atl1_sw_init(adapter); + if (err) + goto err_common; + + netdev->features = NETIF_F_HW_CSUM; + netdev->features |= NETIF_F_SG; + netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); + + netdev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_TSO | + NETIF_F_HW_VLAN_CTAG_RX; + + /* is this valid? see atl1_setup_mac_ctrl() */ + netdev->features |= NETIF_F_RXCSUM; + + /* + * patch for some L1 of old version, + * the final version of L1 may not need these + * patches + */ + /* atl1_pcie_patch(adapter); */ + + /* really reset GPHY core */ + iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE); + + /* + * reset the controller to + * put the device in a known good starting state + */ + if (atl1_reset_hw(&adapter->hw)) { + err = -EIO; + goto err_common; + } + + /* copy the MAC address out of the EEPROM */ + if (atl1_read_mac_addr(&adapter->hw)) { + /* mark random mac */ + netdev->addr_assign_type = NET_ADDR_RANDOM; + } + memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + err = -EIO; + goto err_common; + } + + atl1_check_options(adapter); + + /* pre-init the MAC, and setup link */ + err = atl1_init_hw(&adapter->hw); + if (err) { + err = -EIO; + goto err_common; + } + + atl1_pcie_patch(adapter); + /* assume we have no link for now */ + netif_carrier_off(netdev); + + setup_timer(&adapter->phy_config_timer, atl1_phy_config, + (unsigned long)adapter); + adapter->phy_timer_pending = false; + + INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task); + + INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task); + + err = register_netdev(netdev); + if (err) + goto err_common; + + cards_found++; + atl1_via_workaround(adapter); + return 0; + +err_common: + pci_iounmap(pdev, adapter->hw.hw_addr); +err_pci_iomap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_dma: +err_request_regions: + pci_disable_device(pdev); + return err; +} + +/** + * atl1_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * atl1_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void atl1_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1_adapter *adapter; + /* Device not available. Return. */ + if (!netdev) + return; + + adapter = netdev_priv(netdev); + + /* + * Some atl1 boards lack persistent storage for their MAC, and get it + * from the BIOS during POST. If we've been messing with the MAC + * address, we need to save the permanent one. + */ + if (!ether_addr_equal_unaligned(adapter->hw.mac_addr, + adapter->hw.perm_mac_addr)) { + memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, + ETH_ALEN); + atl1_set_mac_addr(&adapter->hw); + } + + iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE); + unregister_netdev(netdev); + pci_iounmap(pdev, adapter->hw.hw_addr); + pci_release_regions(pdev); + free_netdev(netdev); + pci_disable_device(pdev); +} + +static struct pci_driver atl1_driver = { + .name = ATLX_DRIVER_NAME, + .id_table = atl1_pci_tbl, + .probe = atl1_probe, + .remove = atl1_remove, + .shutdown = atl1_shutdown, + .driver.pm = &atl1_pm_ops, +}; + +struct atl1_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define ATL1_STAT(m) \ + sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m) + +static struct atl1_stats atl1_gstrings_stats[] = { + {"rx_packets", ATL1_STAT(soft_stats.rx_packets)}, + {"tx_packets", ATL1_STAT(soft_stats.tx_packets)}, + {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)}, + {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)}, + {"rx_errors", ATL1_STAT(soft_stats.rx_errors)}, + {"tx_errors", ATL1_STAT(soft_stats.tx_errors)}, + {"multicast", ATL1_STAT(soft_stats.multicast)}, + {"collisions", ATL1_STAT(soft_stats.collisions)}, + {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)}, + {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, + {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)}, + {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)}, + {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)}, + {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, + {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)}, + {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)}, + {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)}, + {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)}, + {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)}, + {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)}, + {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)}, + {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)}, + {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)}, + {"tx_underun", ATL1_STAT(soft_stats.tx_underun)}, + {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)}, + {"tx_pause", ATL1_STAT(soft_stats.tx_pause)}, + {"rx_pause", ATL1_STAT(soft_stats.rx_pause)}, + {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)}, + {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)} +}; + +static void atl1_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + int i; + char *p; + + for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { + p = (char *)adapter+atl1_gstrings_stats[i].stat_offset; + data[i] = (atl1_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + +} + +static int atl1_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(atl1_gstrings_stats); + default: + return -EOPNOTSUPP; + } +} + +static int atl1_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_TP); + ecmd->advertising = ADVERTISED_TP; + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) { + ecmd->advertising |= ADVERTISED_Autoneg; + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) { + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->advertising |= + (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full); + } else + ecmd->advertising |= (ADVERTISED_1000baseT_Full); + } + ecmd->port = PORT_TP; + ecmd->phy_address = 0; + ecmd->transceiver = XCVR_INTERNAL; + + if (netif_carrier_ok(adapter->netdev)) { + u16 link_speed, link_duplex; + atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex); + ethtool_cmd_speed_set(ecmd, link_speed); + if (link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) + ecmd->autoneg = AUTONEG_ENABLE; + else + ecmd->autoneg = AUTONEG_DISABLE; + + return 0; +} + +static int atl1_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + u16 phy_data; + int ret_val = 0; + u16 old_media_type = hw->media_type; + + if (netif_running(adapter->netdev)) { + if (netif_msg_link(adapter)) + dev_dbg(&adapter->pdev->dev, + "ethtool shutting down adapter\n"); + atl1_down(adapter); + } + + if (ecmd->autoneg == AUTONEG_ENABLE) + hw->media_type = MEDIA_TYPE_AUTO_SENSOR; + else { + u32 speed = ethtool_cmd_speed(ecmd); + if (speed == SPEED_1000) { + if (ecmd->duplex != DUPLEX_FULL) { + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, + "1000M half is invalid\n"); + ret_val = -EINVAL; + goto exit_sset; + } + hw->media_type = MEDIA_TYPE_1000M_FULL; + } else if (speed == SPEED_100) { + if (ecmd->duplex == DUPLEX_FULL) + hw->media_type = MEDIA_TYPE_100M_FULL; + else + hw->media_type = MEDIA_TYPE_100M_HALF; + } else { + if (ecmd->duplex == DUPLEX_FULL) + hw->media_type = MEDIA_TYPE_10M_FULL; + else + hw->media_type = MEDIA_TYPE_10M_HALF; + } + } + switch (hw->media_type) { + case MEDIA_TYPE_AUTO_SENSOR: + ecmd->advertising = + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_Autoneg | ADVERTISED_TP; + break; + case MEDIA_TYPE_1000M_FULL: + ecmd->advertising = + ADVERTISED_1000baseT_Full | + ADVERTISED_Autoneg | ADVERTISED_TP; + break; + default: + ecmd->advertising = 0; + break; + } + if (atl1_phy_setup_autoneg_adv(hw)) { + ret_val = -EINVAL; + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, + "invalid ethtool speed/duplex setting\n"); + goto exit_sset; + } + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) + phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; + else { + switch (hw->media_type) { + case MEDIA_TYPE_100M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | + MII_CR_RESET; + break; + case MEDIA_TYPE_100M_HALF: + phy_data = MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_10M_FULL: + phy_data = + MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; + break; + default: + /* MEDIA_TYPE_10M_HALF: */ + phy_data = MII_CR_SPEED_10 | MII_CR_RESET; + break; + } + } + atl1_write_phy_reg(hw, MII_BMCR, phy_data); +exit_sset: + if (ret_val) + hw->media_type = old_media_type; + + if (netif_running(adapter->netdev)) { + if (netif_msg_link(adapter)) + dev_dbg(&adapter->pdev->dev, + "ethtool starting adapter\n"); + atl1_up(adapter); + } else if (!ret_val) { + if (netif_msg_link(adapter)) + dev_dbg(&adapter->pdev->dev, + "ethtool resetting adapter\n"); + atl1_reset(adapter); + } + return ret_val; +} + +static void atl1_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, ATLX_DRIVER_VERSION, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->eedump_len = ATL1_EEDUMP_LEN; +} + +static void atl1_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_MAGIC; + wol->wolopts = 0; + if (adapter->wol & ATLX_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int atl1_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | + WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + adapter->wol = 0; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= ATLX_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static u32 atl1_get_msglevel(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void atl1_set_msglevel(struct net_device *netdev, u32 value) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = value; +} + +static int atl1_get_regs_len(struct net_device *netdev) +{ + return ATL1_REG_COUNT * sizeof(u32); +} + +static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + unsigned int i; + u32 *regbuf = p; + + for (i = 0; i < ATL1_REG_COUNT; i++) { + /* + * This switch statement avoids reserved regions + * of register space. + */ + switch (i) { + case 6 ... 9: + case 14: + case 29 ... 31: + case 34 ... 63: + case 75 ... 127: + case 136 ... 1023: + case 1027 ... 1087: + case 1091 ... 1151: + case 1194 ... 1195: + case 1200 ... 1201: + case 1206 ... 1213: + case 1216 ... 1279: + case 1290 ... 1311: + case 1323 ... 1343: + case 1358 ... 1359: + case 1368 ... 1375: + case 1378 ... 1383: + case 1388 ... 1391: + case 1393 ... 1395: + case 1402 ... 1403: + case 1410 ... 1471: + case 1522 ... 1535: + /* reserved region; don't read it */ + regbuf[i] = 0; + break; + default: + /* unreserved region */ + regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32))); + } + } +} + +static void atl1_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_tpd_ring *txdr = &adapter->tpd_ring; + struct atl1_rfd_ring *rxdr = &adapter->rfd_ring; + + ring->rx_max_pending = ATL1_MAX_RFD; + ring->tx_max_pending = ATL1_MAX_TPD; + ring->rx_pending = rxdr->count; + ring->tx_pending = txdr->count; +} + +static int atl1_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_tpd_ring *tpdr = &adapter->tpd_ring; + struct atl1_rrd_ring *rrdr = &adapter->rrd_ring; + struct atl1_rfd_ring *rfdr = &adapter->rfd_ring; + + struct atl1_tpd_ring tpd_old, tpd_new; + struct atl1_rfd_ring rfd_old, rfd_new; + struct atl1_rrd_ring rrd_old, rrd_new; + struct atl1_ring_header rhdr_old, rhdr_new; + struct atl1_smb smb; + struct atl1_cmb cmb; + int err; + + tpd_old = adapter->tpd_ring; + rfd_old = adapter->rfd_ring; + rrd_old = adapter->rrd_ring; + rhdr_old = adapter->ring_header; + + if (netif_running(adapter->netdev)) + atl1_down(adapter); + + rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD); + rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD : + rfdr->count; + rfdr->count = (rfdr->count + 3) & ~3; + rrdr->count = rfdr->count; + + tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD); + tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD : + tpdr->count; + tpdr->count = (tpdr->count + 3) & ~3; + + if (netif_running(adapter->netdev)) { + /* try to get new resources before deleting old */ + err = atl1_setup_ring_resources(adapter); + if (err) + goto err_setup_ring; + + /* + * save the new, restore the old in order to free it, + * then restore the new back again + */ + + rfd_new = adapter->rfd_ring; + rrd_new = adapter->rrd_ring; + tpd_new = adapter->tpd_ring; + rhdr_new = adapter->ring_header; + adapter->rfd_ring = rfd_old; + adapter->rrd_ring = rrd_old; + adapter->tpd_ring = tpd_old; + adapter->ring_header = rhdr_old; + /* + * Save SMB and CMB, since atl1_free_ring_resources + * will clear them. + */ + smb = adapter->smb; + cmb = adapter->cmb; + atl1_free_ring_resources(adapter); + adapter->rfd_ring = rfd_new; + adapter->rrd_ring = rrd_new; + adapter->tpd_ring = tpd_new; + adapter->ring_header = rhdr_new; + adapter->smb = smb; + adapter->cmb = cmb; + + err = atl1_up(adapter); + if (err) + return err; + } + return 0; + +err_setup_ring: + adapter->rfd_ring = rfd_old; + adapter->rrd_ring = rrd_old; + adapter->tpd_ring = tpd_old; + adapter->ring_header = rhdr_old; + atl1_up(adapter); + return err; +} + +static void atl1_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *epause) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) { + epause->autoneg = AUTONEG_ENABLE; + } else { + epause->autoneg = AUTONEG_DISABLE; + } + epause->rx_pause = 1; + epause->tx_pause = 1; +} + +static int atl1_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *epause) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) { + epause->autoneg = AUTONEG_ENABLE; + } else { + epause->autoneg = AUTONEG_DISABLE; + } + + epause->rx_pause = 1; + epause->tx_pause = 1; + + return 0; +} + +static void atl1_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { + memcpy(p, atl1_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int atl1_nway_reset(struct net_device *netdev) +{ + struct atl1_adapter *adapter = netdev_priv(netdev); + struct atl1_hw *hw = &adapter->hw; + + if (netif_running(netdev)) { + u16 phy_data; + atl1_down(adapter); + + if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || + hw->media_type == MEDIA_TYPE_1000M_FULL) { + phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; + } else { + switch (hw->media_type) { + case MEDIA_TYPE_100M_FULL: + phy_data = MII_CR_FULL_DUPLEX | + MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_100M_HALF: + phy_data = MII_CR_SPEED_100 | MII_CR_RESET; + break; + case MEDIA_TYPE_10M_FULL: + phy_data = MII_CR_FULL_DUPLEX | + MII_CR_SPEED_10 | MII_CR_RESET; + break; + default: + /* MEDIA_TYPE_10M_HALF */ + phy_data = MII_CR_SPEED_10 | MII_CR_RESET; + } + } + atl1_write_phy_reg(hw, MII_BMCR, phy_data); + atl1_up(adapter); + } + return 0; +} + +static const struct ethtool_ops atl1_ethtool_ops = { + .get_settings = atl1_get_settings, + .set_settings = atl1_set_settings, + .get_drvinfo = atl1_get_drvinfo, + .get_wol = atl1_get_wol, + .set_wol = atl1_set_wol, + .get_msglevel = atl1_get_msglevel, + .set_msglevel = atl1_set_msglevel, + .get_regs_len = atl1_get_regs_len, + .get_regs = atl1_get_regs, + .get_ringparam = atl1_get_ringparam, + .set_ringparam = atl1_set_ringparam, + .get_pauseparam = atl1_get_pauseparam, + .set_pauseparam = atl1_set_pauseparam, + .get_link = ethtool_op_get_link, + .get_strings = atl1_get_strings, + .nway_reset = atl1_nway_reset, + .get_ethtool_stats = atl1_get_ethtool_stats, + .get_sset_count = atl1_get_sset_count, +}; + +module_pci_driver(atl1_driver); diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h new file mode 100644 index 000000000..34a58cd84 --- /dev/null +++ b/drivers/net/ethernet/atheros/atlx/atl1.h @@ -0,0 +1,803 @@ +/* + * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. + * Copyright(c) 2006 - 2007 Chris Snook + * Copyright(c) 2006 - 2008 Jay Cliburn + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef ATL1_H +#define ATL1_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "atlx.h" + +#define ATLX_DRIVER_NAME "atl1" + +MODULE_DESCRIPTION("Atheros L1 Gigabit Ethernet Driver"); + +#define atlx_adapter atl1_adapter +#define atlx_check_for_link atl1_check_for_link +#define atlx_check_link atl1_check_link +#define atlx_hash_mc_addr atl1_hash_mc_addr +#define atlx_hash_set atl1_hash_set +#define atlx_hw atl1_hw +#define atlx_mii_ioctl atl1_mii_ioctl +#define atlx_read_phy_reg atl1_read_phy_reg +#define atlx_set_mac atl1_set_mac +#define atlx_set_mac_addr atl1_set_mac_addr + +struct atl1_adapter; +struct atl1_hw; + +/* function prototypes needed by multiple files */ +static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr); +static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value); +static void atl1_set_mac_addr(struct atl1_hw *hw); +static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd); +static u32 atl1_check_link(struct atl1_adapter *adapter); + +/* hardware definitions specific to L1 */ + +/* Block IDLE Status Register */ +#define IDLE_STATUS_RXMAC 0x1 +#define IDLE_STATUS_TXMAC 0x2 +#define IDLE_STATUS_RXQ 0x4 +#define IDLE_STATUS_TXQ 0x8 +#define IDLE_STATUS_DMAR 0x10 +#define IDLE_STATUS_DMAW 0x20 +#define IDLE_STATUS_SMB 0x40 +#define IDLE_STATUS_CMB 0x80 + +/* MDIO Control Register */ +#define MDIO_WAIT_TIMES 30 + +/* MAC Control Register */ +#define MAC_CTRL_TX_PAUSE 0x10000 +#define MAC_CTRL_SCNT 0x20000 +#define MAC_CTRL_SRST_TX 0x40000 +#define MAC_CTRL_TX_SIMURST 0x80000 +#define MAC_CTRL_SPEED_SHIFT 20 +#define MAC_CTRL_SPEED_MASK 0x300000 +#define MAC_CTRL_SPEED_1000 0x2 +#define MAC_CTRL_SPEED_10_100 0x1 +#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000 +#define MAC_CTRL_TX_HUGE 0x800000 +#define MAC_CTRL_RX_CHKSUM_EN 0x1000000 +#define MAC_CTRL_DBG 0x8000000 + +/* Wake-On-Lan control register */ +#define WOL_CLK_SWITCH_EN 0x8000 +#define WOL_PT5_EN 0x200000 +#define WOL_PT6_EN 0x400000 +#define WOL_PT5_MATCH 0x8000000 +#define WOL_PT6_MATCH 0x10000000 + +/* WOL Length ( 2 DWORD ) */ +#define REG_WOL_PATTERN_LEN 0x14A4 +#define WOL_PT_LEN_MASK 0x7F +#define WOL_PT0_LEN_SHIFT 0 +#define WOL_PT1_LEN_SHIFT 8 +#define WOL_PT2_LEN_SHIFT 16 +#define WOL_PT3_LEN_SHIFT 24 +#define WOL_PT4_LEN_SHIFT 0 +#define WOL_PT5_LEN_SHIFT 8 +#define WOL_PT6_LEN_SHIFT 16 + +/* Internal SRAM Partition Registers, low 32 bits */ +#define REG_SRAM_RFD_LEN 0x1504 +#define REG_SRAM_RRD_ADDR 0x1508 +#define REG_SRAM_RRD_LEN 0x150C +#define REG_SRAM_TPD_ADDR 0x1510 +#define REG_SRAM_TPD_LEN 0x1514 +#define REG_SRAM_TRD_ADDR 0x1518 +#define REG_SRAM_TRD_LEN 0x151C +#define REG_SRAM_RXF_ADDR 0x1520 +#define REG_SRAM_RXF_LEN 0x1524 +#define REG_SRAM_TXF_ADDR 0x1528 +#define REG_SRAM_TXF_LEN 0x152C +#define REG_SRAM_TCPH_PATH_ADDR 0x1530 +#define SRAM_TCPH_ADDR_MASK 0xFFF +#define SRAM_TCPH_ADDR_SHIFT 0 +#define SRAM_PATH_ADDR_MASK 0xFFF +#define SRAM_PATH_ADDR_SHIFT 16 + +/* Load Ptr Register */ +#define REG_LOAD_PTR 0x1534 + +/* Descriptor Control registers, low 32 bits */ +#define REG_DESC_RFD_ADDR_LO 0x1544 +#define REG_DESC_RRD_ADDR_LO 0x1548 +#define REG_DESC_TPD_ADDR_LO 0x154C +#define REG_DESC_CMB_ADDR_LO 0x1550 +#define REG_DESC_SMB_ADDR_LO 0x1554 +#define REG_DESC_RFD_RRD_RING_SIZE 0x1558 +#define DESC_RFD_RING_SIZE_MASK 0x7FF +#define DESC_RFD_RING_SIZE_SHIFT 0 +#define DESC_RRD_RING_SIZE_MASK 0x7FF +#define DESC_RRD_RING_SIZE_SHIFT 16 +#define REG_DESC_TPD_RING_SIZE 0x155C +#define DESC_TPD_RING_SIZE_MASK 0x3FF +#define DESC_TPD_RING_SIZE_SHIFT 0 + +/* TXQ Control Register */ +#define REG_TXQ_CTRL 0x1580 +#define TXQ_CTRL_TPD_BURST_NUM_SHIFT 0 +#define TXQ_CTRL_TPD_BURST_NUM_MASK 0x1F +#define TXQ_CTRL_EN 0x20 +#define TXQ_CTRL_ENH_MODE 0x40 +#define TXQ_CTRL_TPD_FETCH_TH_SHIFT 8 +#define TXQ_CTRL_TPD_FETCH_TH_MASK 0x3F +#define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16 +#define TXQ_CTRL_TXF_BURST_NUM_MASK 0xFFFF + +/* Jumbo packet Threshold for task offload */ +#define REG_TX_JUMBO_TASK_TH_TPD_IPG 0x1584 +#define TX_JUMBO_TASK_TH_MASK 0x7FF +#define TX_JUMBO_TASK_TH_SHIFT 0 +#define TX_TPD_MIN_IPG_MASK 0x1F +#define TX_TPD_MIN_IPG_SHIFT 16 + +/* RXQ Control Register */ +#define REG_RXQ_CTRL 0x15A0 +#define RXQ_CTRL_RFD_BURST_NUM_SHIFT 0 +#define RXQ_CTRL_RFD_BURST_NUM_MASK 0xFF +#define RXQ_CTRL_RRD_BURST_THRESH_SHIFT 8 +#define RXQ_CTRL_RRD_BURST_THRESH_MASK 0xFF +#define RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT 16 +#define RXQ_CTRL_RFD_PREF_MIN_IPG_MASK 0x1F +#define RXQ_CTRL_CUT_THRU_EN 0x40000000 +#define RXQ_CTRL_EN 0x80000000 + +/* Rx jumbo packet threshold and rrd retirement timer */ +#define REG_RXQ_JMBOSZ_RRDTIM 0x15A4 +#define RXQ_JMBOSZ_TH_MASK 0x7FF +#define RXQ_JMBOSZ_TH_SHIFT 0 +#define RXQ_JMBO_LKAH_MASK 0xF +#define RXQ_JMBO_LKAH_SHIFT 11 +#define RXQ_RRD_TIMER_MASK 0xFFFF +#define RXQ_RRD_TIMER_SHIFT 16 + +/* RFD flow control register */ +#define REG_RXQ_RXF_PAUSE_THRESH 0x15A8 +#define RXQ_RXF_PAUSE_TH_HI_SHIFT 16 +#define RXQ_RXF_PAUSE_TH_HI_MASK 0xFFF +#define RXQ_RXF_PAUSE_TH_LO_SHIFT 0 +#define RXQ_RXF_PAUSE_TH_LO_MASK 0xFFF + +/* RRD flow control register */ +#define REG_RXQ_RRD_PAUSE_THRESH 0x15AC +#define RXQ_RRD_PAUSE_TH_HI_SHIFT 0 +#define RXQ_RRD_PAUSE_TH_HI_MASK 0xFFF +#define RXQ_RRD_PAUSE_TH_LO_SHIFT 16 +#define RXQ_RRD_PAUSE_TH_LO_MASK 0xFFF + +/* DMA Engine Control Register */ +#define REG_DMA_CTRL 0x15C0 +#define DMA_CTRL_DMAR_IN_ORDER 0x1 +#define DMA_CTRL_DMAR_ENH_ORDER 0x2 +#define DMA_CTRL_DMAR_OUT_ORDER 0x4 +#define DMA_CTRL_RCB_VALUE 0x8 +#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4 +#define DMA_CTRL_DMAR_BURST_LEN_MASK 7 +#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7 +#define DMA_CTRL_DMAW_BURST_LEN_MASK 7 +#define DMA_CTRL_DMAR_EN 0x400 +#define DMA_CTRL_DMAW_EN 0x800 + +/* CMB/SMB Control Register */ +#define REG_CSMB_CTRL 0x15D0 +#define CSMB_CTRL_CMB_NOW 1 +#define CSMB_CTRL_SMB_NOW 2 +#define CSMB_CTRL_CMB_EN 4 +#define CSMB_CTRL_SMB_EN 8 + +/* CMB DMA Write Threshold Register */ +#define REG_CMB_WRITE_TH 0x15D4 +#define CMB_RRD_TH_SHIFT 0 +#define CMB_RRD_TH_MASK 0x7FF +#define CMB_TPD_TH_SHIFT 16 +#define CMB_TPD_TH_MASK 0x7FF + +/* RX/TX count-down timer to trigger CMB-write. 2us resolution. */ +#define REG_CMB_WRITE_TIMER 0x15D8 +#define CMB_RX_TM_SHIFT 0 +#define CMB_RX_TM_MASK 0xFFFF +#define CMB_TX_TM_SHIFT 16 +#define CMB_TX_TM_MASK 0xFFFF + +/* Number of packet received since last CMB write */ +#define REG_CMB_RX_PKT_CNT 0x15DC + +/* Number of packet transmitted since last CMB write */ +#define REG_CMB_TX_PKT_CNT 0x15E0 + +/* SMB auto DMA timer register */ +#define REG_SMB_TIMER 0x15E4 + +/* Mailbox Register */ +#define REG_MAILBOX 0x15F0 +#define MB_RFD_PROD_INDX_SHIFT 0 +#define MB_RFD_PROD_INDX_MASK 0x7FF +#define MB_RRD_CONS_INDX_SHIFT 11 +#define MB_RRD_CONS_INDX_MASK 0x7FF +#define MB_TPD_PROD_INDX_SHIFT 22 +#define MB_TPD_PROD_INDX_MASK 0x3FF + +/* Interrupt Status Register */ +#define ISR_SMB 0x1 +#define ISR_TIMER 0x2 +#define ISR_MANUAL 0x4 +#define ISR_RXF_OV 0x8 +#define ISR_RFD_UNRUN 0x10 +#define ISR_RRD_OV 0x20 +#define ISR_TXF_UNRUN 0x40 +#define ISR_LINK 0x80 +#define ISR_HOST_RFD_UNRUN 0x100 +#define ISR_HOST_RRD_OV 0x200 +#define ISR_DMAR_TO_RST 0x400 +#define ISR_DMAW_TO_RST 0x800 +#define ISR_GPHY 0x1000 +#define ISR_RX_PKT 0x10000 +#define ISR_TX_PKT 0x20000 +#define ISR_TX_DMA 0x40000 +#define ISR_RX_DMA 0x80000 +#define ISR_CMB_RX 0x100000 +#define ISR_CMB_TX 0x200000 +#define ISR_MAC_RX 0x400000 +#define ISR_MAC_TX 0x800000 +#define ISR_DIS_SMB 0x20000000 +#define ISR_DIS_DMA 0x40000000 + +/* Normal Interrupt mask without RX/TX enabled */ +#define IMR_NORXTX_MASK (\ + ISR_SMB |\ + ISR_GPHY |\ + ISR_PHY_LINKDOWN|\ + ISR_DMAR_TO_RST |\ + ISR_DMAW_TO_RST) + +/* Normal Interrupt mask */ +#define IMR_NORMAL_MASK (\ + IMR_NORXTX_MASK |\ + ISR_CMB_TX |\ + ISR_CMB_RX) + +/* Debug Interrupt Mask (enable all interrupt) */ +#define IMR_DEBUG_MASK (\ + ISR_SMB |\ + ISR_TIMER |\ + ISR_MANUAL |\ + ISR_RXF_OV |\ + ISR_RFD_UNRUN |\ + ISR_RRD_OV |\ + ISR_TXF_UNRUN |\ + ISR_LINK |\ + ISR_CMB_TX |\ + ISR_CMB_RX |\ + ISR_RX_PKT |\ + ISR_TX_PKT |\ + ISR_MAC_RX |\ + ISR_MAC_TX) + +#define MEDIA_TYPE_1000M_FULL 1 +#define MEDIA_TYPE_100M_FULL 2 +#define MEDIA_TYPE_100M_HALF 3 +#define MEDIA_TYPE_10M_FULL 4 +#define MEDIA_TYPE_10M_HALF 5 + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* All but 1000-Half */ + +#define MAX_JUMBO_FRAME_SIZE 10240 + +#define ATL1_EEDUMP_LEN 48 + +/* Statistics counters collected by the MAC */ +struct stats_msg_block { + /* rx */ + u32 rx_ok; /* good RX packets */ + u32 rx_bcast; /* good RX broadcast packets */ + u32 rx_mcast; /* good RX multicast packets */ + u32 rx_pause; /* RX pause frames */ + u32 rx_ctrl; /* RX control packets other than pause frames */ + u32 rx_fcs_err; /* RX packets with bad FCS */ + u32 rx_len_err; /* RX packets with length != actual size */ + u32 rx_byte_cnt; /* good bytes received. FCS is NOT included */ + u32 rx_runt; /* RX packets < 64 bytes with good FCS */ + u32 rx_frag; /* RX packets < 64 bytes with bad FCS */ + u32 rx_sz_64; /* 64 byte RX packets */ + u32 rx_sz_65_127; + u32 rx_sz_128_255; + u32 rx_sz_256_511; + u32 rx_sz_512_1023; + u32 rx_sz_1024_1518; + u32 rx_sz_1519_max; /* 1519 byte to MTU RX packets */ + u32 rx_sz_ov; /* truncated RX packets > MTU */ + u32 rx_rxf_ov; /* frames dropped due to RX FIFO overflow */ + u32 rx_rrd_ov; /* frames dropped due to RRD overflow */ + u32 rx_align_err; /* alignment errors */ + u32 rx_bcast_byte_cnt; /* RX broadcast bytes, excluding FCS */ + u32 rx_mcast_byte_cnt; /* RX multicast bytes, excluding FCS */ + u32 rx_err_addr; /* packets dropped due to address filtering */ + + /* tx */ + u32 tx_ok; /* good TX packets */ + u32 tx_bcast; /* good TX broadcast packets */ + u32 tx_mcast; /* good TX multicast packets */ + u32 tx_pause; /* TX pause frames */ + u32 tx_exc_defer; /* TX packets deferred excessively */ + u32 tx_ctrl; /* TX control frames, excluding pause frames */ + u32 tx_defer; /* TX packets deferred */ + u32 tx_byte_cnt; /* bytes transmitted, FCS is NOT included */ + u32 tx_sz_64; /* 64 byte TX packets */ + u32 tx_sz_65_127; + u32 tx_sz_128_255; + u32 tx_sz_256_511; + u32 tx_sz_512_1023; + u32 tx_sz_1024_1518; + u32 tx_sz_1519_max; /* 1519 byte to MTU TX packets */ + u32 tx_1_col; /* packets TX after a single collision */ + u32 tx_2_col; /* packets TX after multiple collisions */ + u32 tx_late_col; /* TX packets with late collisions */ + u32 tx_abort_col; /* TX packets aborted w/excessive collisions */ + u32 tx_underrun; /* TX packets aborted due to TX FIFO underrun + * or TRD FIFO underrun */ + u32 tx_rd_eop; /* reads beyond the EOP into the next frame + * when TRD was not written timely */ + u32 tx_len_err; /* TX packets where length != actual size */ + u32 tx_trunc; /* TX packets truncated due to size > MTU */ + u32 tx_bcast_byte; /* broadcast bytes transmitted, excluding FCS */ + u32 tx_mcast_byte; /* multicast bytes transmitted, excluding FCS */ + u32 smb_updated; /* 1: SMB Updated. This is used by software to + * indicate the statistics update. Software + * should clear this bit after retrieving the + * statistics information. */ +}; + +/* Coalescing Message Block */ +struct coals_msg_block { + u32 int_stats; /* interrupt status */ + u16 rrd_prod_idx; /* TRD Producer Index. */ + u16 rfd_cons_idx; /* RFD Consumer Index. */ + u16 update; /* Selene sets this bit every time it DMAs the + * CMB to host memory. Software should clear + * this bit when CMB info is processed. */ + u16 tpd_cons_idx; /* TPD Consumer Index. */ +}; + +/* RRD descriptor */ +struct rx_return_desc { + u8 num_buf; /* Number of RFD buffers used by the received packet */ + u8 resved; + u16 buf_indx; /* RFD Index of the first buffer */ + union { + u32 valid; + struct { + u16 rx_chksum; + u16 pkt_size; + } xsum_sz; + } xsz; + + u16 pkt_flg; /* Packet flags */ + u16 err_flg; /* Error flags */ + u16 resved2; + u16 vlan_tag; /* VLAN TAG */ +}; + +#define PACKET_FLAG_ETH_TYPE 0x0080 +#define PACKET_FLAG_VLAN_INS 0x0100 +#define PACKET_FLAG_ERR 0x0200 +#define PACKET_FLAG_IPV4 0x0400 +#define PACKET_FLAG_UDP 0x0800 +#define PACKET_FLAG_TCP 0x1000 +#define PACKET_FLAG_BCAST 0x2000 +#define PACKET_FLAG_MCAST 0x4000 +#define PACKET_FLAG_PAUSE 0x8000 + +#define ERR_FLAG_CRC 0x0001 +#define ERR_FLAG_CODE 0x0002 +#define ERR_FLAG_DRIBBLE 0x0004 +#define ERR_FLAG_RUNT 0x0008 +#define ERR_FLAG_OV 0x0010 +#define ERR_FLAG_TRUNC 0x0020 +#define ERR_FLAG_IP_CHKSUM 0x0040 +#define ERR_FLAG_L4_CHKSUM 0x0080 +#define ERR_FLAG_LEN 0x0100 +#define ERR_FLAG_DES_ADDR 0x0200 + +/* RFD descriptor */ +struct rx_free_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 buf_len; /* Size of the receive buffer in host memory */ + u16 coalese; /* Update consumer index to host after the + * reception of this frame */ + /* __packed is required */ +} __packed; + +/* + * The L1 transmit packet descriptor is comprised of four 32-bit words. + * + * 31 0 + * +---------------------------------------+ + * | Word 0: Buffer addr lo | + * +---------------------------------------+ + * | Word 1: Buffer addr hi | + * +---------------------------------------+ + * | Word 2 | + * +---------------------------------------+ + * | Word 3 | + * +---------------------------------------+ + * + * Words 0 and 1 combine to form a 64-bit buffer address. + * + * Word 2 is self explanatory in the #define block below. + * + * Word 3 has two forms, depending upon the state of bits 3 and 4. + * If bits 3 and 4 are both zero, then bits 14:31 are unused by the + * hardware. Otherwise, if either bit 3 or 4 is set, the definition + * of bits 14:31 vary according to the following depiction. + * + * 0 End of packet 0 End of packet + * 1 Coalesce 1 Coalesce + * 2 Insert VLAN tag 2 Insert VLAN tag + * 3 Custom csum enable = 0 3 Custom csum enable = 1 + * 4 Segment enable = 1 4 Segment enable = 0 + * 5 Generate IP checksum 5 Generate IP checksum + * 6 Generate TCP checksum 6 Generate TCP checksum + * 7 Generate UDP checksum 7 Generate UDP checksum + * 8 VLAN tagged 8 VLAN tagged + * 9 Ethernet frame type 9 Ethernet frame type + * 10-+ 10-+ + * 11 | IP hdr length (10:13) 11 | IP hdr length (10:13) + * 12 | (num 32-bit words) 12 | (num 32-bit words) + * 13-+ 13-+ + * 14-+ 14 Unused + * 15 | TCP hdr length (14:17) 15 Unused + * 16 | (num 32-bit words) 16-+ + * 17-+ 17 | + * 18 Header TPD flag 18 | + * 19-+ 19 | Payload offset + * 20 | 20 | (16:23) + * 21 | 21 | + * 22 | 22 | + * 23 | 23-+ + * 24 | 24-+ + * 25 | MSS (19:31) 25 | + * 26 | 26 | + * 27 | 27 | Custom csum offset + * 28 | 28 | (24:31) + * 29 | 29 | + * 30 | 30 | + * 31-+ 31-+ + */ + +/* tpd word 2 */ +#define TPD_BUFLEN_MASK 0x3FFF +#define TPD_BUFLEN_SHIFT 0 +#define TPD_DMAINT_MASK 0x0001 +#define TPD_DMAINT_SHIFT 14 +#define TPD_PKTNT_MASK 0x0001 +#define TPD_PKTINT_SHIFT 15 +#define TPD_VLANTAG_MASK 0xFFFF +#define TPD_VLANTAG_SHIFT 16 + +/* tpd word 3 bits 0:13 */ +#define TPD_EOP_MASK 0x0001 +#define TPD_EOP_SHIFT 0 +#define TPD_COALESCE_MASK 0x0001 +#define TPD_COALESCE_SHIFT 1 +#define TPD_INS_VL_TAG_MASK 0x0001 +#define TPD_INS_VL_TAG_SHIFT 2 +#define TPD_CUST_CSUM_EN_MASK 0x0001 +#define TPD_CUST_CSUM_EN_SHIFT 3 +#define TPD_SEGMENT_EN_MASK 0x0001 +#define TPD_SEGMENT_EN_SHIFT 4 +#define TPD_IP_CSUM_MASK 0x0001 +#define TPD_IP_CSUM_SHIFT 5 +#define TPD_TCP_CSUM_MASK 0x0001 +#define TPD_TCP_CSUM_SHIFT 6 +#define TPD_UDP_CSUM_MASK 0x0001 +#define TPD_UDP_CSUM_SHIFT 7 +#define TPD_VL_TAGGED_MASK 0x0001 +#define TPD_VL_TAGGED_SHIFT 8 +#define TPD_ETHTYPE_MASK 0x0001 +#define TPD_ETHTYPE_SHIFT 9 +#define TPD_IPHL_MASK 0x000F +#define TPD_IPHL_SHIFT 10 + +/* tpd word 3 bits 14:31 if segment enabled */ +#define TPD_TCPHDRLEN_MASK 0x000F +#define TPD_TCPHDRLEN_SHIFT 14 +#define TPD_HDRFLAG_MASK 0x0001 +#define TPD_HDRFLAG_SHIFT 18 +#define TPD_MSS_MASK 0x1FFF +#define TPD_MSS_SHIFT 19 + +/* tpd word 3 bits 16:31 if custom csum enabled */ +#define TPD_PLOADOFFSET_MASK 0x00FF +#define TPD_PLOADOFFSET_SHIFT 16 +#define TPD_CCSUMOFFSET_MASK 0x00FF +#define TPD_CCSUMOFFSET_SHIFT 24 + +struct tx_packet_desc { + __le64 buffer_addr; + __le32 word2; + __le32 word3; +}; + +/* DMA Order Settings */ +enum atl1_dma_order { + atl1_dma_ord_in = 1, + atl1_dma_ord_enh = 2, + atl1_dma_ord_out = 4 +}; + +enum atl1_dma_rcb { + atl1_rcb_64 = 0, + atl1_rcb_128 = 1 +}; + +enum atl1_dma_req_block { + atl1_dma_req_128 = 0, + atl1_dma_req_256 = 1, + atl1_dma_req_512 = 2, + atl1_dma_req_1024 = 3, + atl1_dma_req_2048 = 4, + atl1_dma_req_4096 = 5 +}; + +#define ATL1_MAX_INTR 3 +#define ATL1_MAX_TX_BUF_LEN 0x3000 /* 12288 bytes */ + +#define ATL1_DEFAULT_TPD 256 +#define ATL1_MAX_TPD 1024 +#define ATL1_MIN_TPD 64 +#define ATL1_DEFAULT_RFD 512 +#define ATL1_MIN_RFD 128 +#define ATL1_MAX_RFD 2048 +#define ATL1_REG_COUNT 1538 + +#define ATL1_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i])) +#define ATL1_RFD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_free_desc) +#define ATL1_TPD_DESC(R, i) ATL1_GET_DESC(R, i, struct tx_packet_desc) +#define ATL1_RRD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_return_desc) + +/* + * atl1_ring_header represents a single, contiguous block of DMA space + * mapped for the three descriptor rings (tpd, rfd, rrd) and the two + * message blocks (cmb, smb) described below + */ +struct atl1_ring_header { + void *desc; /* virtual address */ + dma_addr_t dma; /* physical address*/ + unsigned int size; /* length in bytes */ +}; + +/* + * atl1_buffer is wrapper around a pointer to a socket buffer + * so a DMA handle can be stored along with the skb + */ +struct atl1_buffer { + struct sk_buff *skb; /* socket buffer */ + u16 length; /* rx buffer length */ + u16 alloced; /* 1 if skb allocated */ + dma_addr_t dma; +}; + +/* transmit packet descriptor (tpd) ring */ +struct atl1_tpd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + u16 hw_idx; /* hardware index */ + atomic_t next_to_clean; + atomic_t next_to_use; + struct atl1_buffer *buffer_info; +}; + +/* receive free descriptor (rfd) ring */ +struct atl1_rfd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + atomic_t next_to_use; + u16 next_to_clean; + struct atl1_buffer *buffer_info; +}; + +/* receive return descriptor (rrd) ring */ +struct atl1_rrd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + unsigned int size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + u16 next_to_use; + atomic_t next_to_clean; +}; + +/* coalescing message block (cmb) */ +struct atl1_cmb { + struct coals_msg_block *cmb; + dma_addr_t dma; +}; + +/* statistics message block (smb) */ +struct atl1_smb { + struct stats_msg_block *smb; + dma_addr_t dma; +}; + +/* Statistics counters */ +struct atl1_sft_stats { + u64 rx_packets; + u64 tx_packets; + u64 rx_bytes; + u64 tx_bytes; + u64 multicast; + u64 collisions; + u64 rx_errors; + u64 rx_length_errors; + u64 rx_crc_errors; + u64 rx_dropped; + u64 rx_frame_errors; + u64 rx_fifo_errors; + u64 rx_missed_errors; + u64 tx_errors; + u64 tx_fifo_errors; + u64 tx_aborted_errors; + u64 tx_window_errors; + u64 tx_carrier_errors; + u64 tx_pause; /* TX pause frames */ + u64 excecol; /* TX packets w/ excessive collisions */ + u64 deffer; /* TX packets deferred */ + u64 scc; /* packets TX after a single collision */ + u64 mcc; /* packets TX after multiple collisions */ + u64 latecol; /* TX packets w/ late collisions */ + u64 tx_underun; /* TX packets aborted due to TX FIFO underrun + * or TRD FIFO underrun */ + u64 tx_trunc; /* TX packets truncated due to size > MTU */ + u64 rx_pause; /* num Pause packets received. */ + u64 rx_rrd_ov; + u64 rx_trunc; +}; + +/* hardware structure */ +struct atl1_hw { + u8 __iomem *hw_addr; + struct atl1_adapter *back; + enum atl1_dma_order dma_ord; + enum atl1_dma_rcb rcb_value; + enum atl1_dma_req_block dmar_block; + enum atl1_dma_req_block dmaw_block; + u8 preamble_len; + u8 max_retry; + u8 jam_ipg; /* IPG to start JAM for collision based flow + * control in half-duplex mode. In units of + * 8-bit time */ + u8 ipgt; /* Desired back to back inter-packet gap. + * The default is 96-bit time */ + u8 min_ifg; /* Minimum number of IFG to enforce in between + * receive frames. Frame gap below such IFP + * is dropped */ + u8 ipgr1; /* 64bit Carrier-Sense window */ + u8 ipgr2; /* 96-bit IPG window */ + u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned + * burst. Each TPD is 16 bytes long */ + u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned + * burst. Each RFD is 12 bytes long */ + u8 rfd_fetch_gap; + u8 rrd_burst; /* Threshold number of RRDs that can be retired + * in a burst. Each RRD is 16 bytes long */ + u8 tpd_fetch_th; + u8 tpd_fetch_gap; + u16 tx_jumbo_task_th; + u16 txf_burst; /* Number of data bytes to read in a cache- + * aligned burst. Each SRAM entry is 8 bytes */ + u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN + * packets should add 4 bytes */ + u16 rx_jumbo_lkah; + u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after + * every 512ns passes. */ + u16 lcol; /* Collision Window */ + + u16 cmb_tpd; + u16 cmb_rrd; + u16 cmb_rx_timer; + u16 cmb_tx_timer; + u32 smb_timer; + u16 media_type; + u16 autoneg_advertised; + + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + u32 max_frame_size; + u32 min_frame_size; + + u16 dev_rev; + + /* spi flash */ + u8 flash_vendor; + + u8 mac_addr[ETH_ALEN]; + u8 perm_mac_addr[ETH_ALEN]; + + bool phy_configured; +}; + +struct atl1_adapter { + struct net_device *netdev; + struct pci_dev *pdev; + + struct atl1_sft_stats soft_stats; + u32 rx_buffer_len; + u32 wol; + u16 link_speed; + u16 link_duplex; + spinlock_t lock; + struct napi_struct napi; + struct work_struct reset_dev_task; + struct work_struct link_chg_task; + + struct timer_list phy_config_timer; + bool phy_timer_pending; + + /* all descriptor rings' memory */ + struct atl1_ring_header ring_header; + + /* TX */ + struct atl1_tpd_ring tpd_ring; + spinlock_t mb_lock; + + /* RX */ + struct atl1_rfd_ring rfd_ring; + struct atl1_rrd_ring rrd_ring; + u64 hw_csum_err; + u64 hw_csum_good; + u32 msg_enable; + u16 imt; /* interrupt moderator timer (2us resolution) */ + u16 ict; /* interrupt clear timer (2us resolution */ + struct mii_if_info mii; /* MII interface info */ + + /* + * Use this value to check is napi handler allowed to + * enable ints or not + */ + bool int_enabled; + + u32 bd_number; /* board number */ + bool pci_using_64; + struct atl1_hw hw; + struct atl1_smb smb; + struct atl1_cmb cmb; +}; + +#endif /* ATL1_H */ diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c new file mode 100644 index 000000000..46a535318 --- /dev/null +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -0,0 +1,3085 @@ +/* + * Copyright(c) 2006 - 2007 Atheros Corporation. All rights reserved. + * Copyright(c) 2007 - 2008 Chris Snook + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "atl2.h" + +#define ATL2_DRV_VERSION "2.2.3" + +static const char atl2_driver_name[] = "atl2"; +static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver"; +static const char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation."; +static const char atl2_driver_version[] = ATL2_DRV_VERSION; +static const struct ethtool_ops atl2_ethtool_ops; + +MODULE_AUTHOR("Atheros Corporation , Chris Snook "); +MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ATL2_DRV_VERSION); + +/* + * atl2_pci_tbl - PCI Device ID Table + */ +static const struct pci_device_id atl2_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)}, + /* required last entry */ + {0,} +}; +MODULE_DEVICE_TABLE(pci, atl2_pci_tbl); + +static void atl2_check_options(struct atl2_adapter *adapter); + +/** + * atl2_sw_init - Initialize general software structures (struct atl2_adapter) + * @adapter: board private structure to initialize + * + * atl2_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int atl2_sw_init(struct atl2_adapter *adapter) +{ + struct atl2_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + hw->revision_id = pdev->revision; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); + + adapter->wol = 0; + adapter->ict = 50000; /* ~100ms */ + adapter->link_speed = SPEED_0; /* hardware init */ + adapter->link_duplex = FULL_DUPLEX; + + hw->phy_configured = false; + hw->preamble_len = 7; + hw->ipgt = 0x60; + hw->min_ifg = 0x50; + hw->ipgr1 = 0x40; + hw->ipgr2 = 0x60; + hw->retry_buf = 2; + hw->max_retry = 0xf; + hw->lcol = 0x37; + hw->jam_ipg = 7; + hw->fc_rxd_hi = 0; + hw->fc_rxd_lo = 0; + hw->max_frame_size = adapter->netdev->mtu; + + spin_lock_init(&adapter->stats_lock); + + set_bit(__ATL2_DOWN, &adapter->flags); + + return 0; +} + +/** + * atl2_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + */ +static void atl2_set_multi(struct net_device *netdev) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + struct atl2_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u32 rctl; + u32 hash_value; + + /* Check for Promiscuous and All Multicast modes */ + rctl = ATL2_READ_REG(hw, REG_MAC_CTRL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= MAC_CTRL_PROMIS_EN; + } else if (netdev->flags & IFF_ALLMULTI) { + rctl |= MAC_CTRL_MC_ALL_EN; + rctl &= ~MAC_CTRL_PROMIS_EN; + } else + rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); + + ATL2_WRITE_REG(hw, REG_MAC_CTRL, rctl); + + /* clear the old settings from the multicast hash table */ + ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); + ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); + + /* comoute mc addresses' hash value ,and put it into hash table */ + netdev_for_each_mc_addr(ha, netdev) { + hash_value = atl2_hash_mc_addr(hw, ha->addr); + atl2_hash_set(hw, hash_value); + } +} + +static void init_ring_ptrs(struct atl2_adapter *adapter) +{ + /* Read / Write Ptr Initialize: */ + adapter->txd_write_ptr = 0; + atomic_set(&adapter->txd_read_ptr, 0); + + adapter->rxd_read_ptr = 0; + adapter->rxd_write_ptr = 0; + + atomic_set(&adapter->txs_write_ptr, 0); + adapter->txs_next_clear = 0; +} + +/** + * atl2_configure - Configure Transmit&Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Tx /Rx unit of the MAC after a reset. + */ +static int atl2_configure(struct atl2_adapter *adapter) +{ + struct atl2_hw *hw = &adapter->hw; + u32 value; + + /* clear interrupt status */ + ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0xffffffff); + + /* set MAC Address */ + value = (((u32)hw->mac_addr[2]) << 24) | + (((u32)hw->mac_addr[3]) << 16) | + (((u32)hw->mac_addr[4]) << 8) | + (((u32)hw->mac_addr[5])); + ATL2_WRITE_REG(hw, REG_MAC_STA_ADDR, value); + value = (((u32)hw->mac_addr[0]) << 8) | + (((u32)hw->mac_addr[1])); + ATL2_WRITE_REG(hw, (REG_MAC_STA_ADDR+4), value); + + /* HI base address */ + ATL2_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI, + (u32)((adapter->ring_dma & 0xffffffff00000000ULL) >> 32)); + + /* LO base address */ + ATL2_WRITE_REG(hw, REG_TXD_BASE_ADDR_LO, + (u32)(adapter->txd_dma & 0x00000000ffffffffULL)); + ATL2_WRITE_REG(hw, REG_TXS_BASE_ADDR_LO, + (u32)(adapter->txs_dma & 0x00000000ffffffffULL)); + ATL2_WRITE_REG(hw, REG_RXD_BASE_ADDR_LO, + (u32)(adapter->rxd_dma & 0x00000000ffffffffULL)); + + /* element count */ + ATL2_WRITE_REGW(hw, REG_TXD_MEM_SIZE, (u16)(adapter->txd_ring_size/4)); + ATL2_WRITE_REGW(hw, REG_TXS_MEM_SIZE, (u16)adapter->txs_ring_size); + ATL2_WRITE_REGW(hw, REG_RXD_BUF_NUM, (u16)adapter->rxd_ring_size); + + /* config Internal SRAM */ +/* + ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_tx_end); + ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_rx_end); +*/ + + /* config IPG/IFG */ + value = (((u32)hw->ipgt & MAC_IPG_IFG_IPGT_MASK) << + MAC_IPG_IFG_IPGT_SHIFT) | + (((u32)hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) << + MAC_IPG_IFG_MIFG_SHIFT) | + (((u32)hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) << + MAC_IPG_IFG_IPGR1_SHIFT)| + (((u32)hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) << + MAC_IPG_IFG_IPGR2_SHIFT); + ATL2_WRITE_REG(hw, REG_MAC_IPG_IFG, value); + + /* config Half-Duplex Control */ + value = ((u32)hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | + (((u32)hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) << + MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | + MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | + (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | + (((u32)hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) << + MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); + ATL2_WRITE_REG(hw, REG_MAC_HALF_DUPLX_CTRL, value); + + /* set Interrupt Moderator Timer */ + ATL2_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, adapter->imt); + ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_ITIMER_EN); + + /* set Interrupt Clear Timer */ + ATL2_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, adapter->ict); + + /* set MTU */ + ATL2_WRITE_REG(hw, REG_MTU, adapter->netdev->mtu + + ENET_HEADER_SIZE + VLAN_SIZE + ETHERNET_FCS_SIZE); + + /* 1590 */ + ATL2_WRITE_REG(hw, REG_TX_CUT_THRESH, 0x177); + + /* flow control */ + ATL2_WRITE_REGW(hw, REG_PAUSE_ON_TH, hw->fc_rxd_hi); + ATL2_WRITE_REGW(hw, REG_PAUSE_OFF_TH, hw->fc_rxd_lo); + + /* Init mailbox */ + ATL2_WRITE_REGW(hw, REG_MB_TXD_WR_IDX, (u16)adapter->txd_write_ptr); + ATL2_WRITE_REGW(hw, REG_MB_RXD_RD_IDX, (u16)adapter->rxd_read_ptr); + + /* enable DMA read/write */ + ATL2_WRITE_REGB(hw, REG_DMAR, DMAR_EN); + ATL2_WRITE_REGB(hw, REG_DMAW, DMAW_EN); + + value = ATL2_READ_REG(&adapter->hw, REG_ISR); + if ((value & ISR_PHY_LINKDOWN) != 0) + value = 1; /* config failed */ + else + value = 0; + + /* clear all interrupt status */ + ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0x3fffffff); + ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0); + return value; +} + +/** + * atl2_setup_ring_resources - allocate Tx / RX descriptor resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + u8 offset = 0; + + /* real ring DMA buffer */ + adapter->ring_size = size = + adapter->txd_ring_size * 1 + 7 + /* dword align */ + adapter->txs_ring_size * 4 + 7 + /* dword align */ + adapter->rxd_ring_size * 1536 + 127; /* 128bytes align */ + + adapter->ring_vir_addr = pci_alloc_consistent(pdev, size, + &adapter->ring_dma); + if (!adapter->ring_vir_addr) + return -ENOMEM; + memset(adapter->ring_vir_addr, 0, adapter->ring_size); + + /* Init TXD Ring */ + adapter->txd_dma = adapter->ring_dma ; + offset = (adapter->txd_dma & 0x7) ? (8 - (adapter->txd_dma & 0x7)) : 0; + adapter->txd_dma += offset; + adapter->txd_ring = adapter->ring_vir_addr + offset; + + /* Init TXS Ring */ + adapter->txs_dma = adapter->txd_dma + adapter->txd_ring_size; + offset = (adapter->txs_dma & 0x7) ? (8 - (adapter->txs_dma & 0x7)) : 0; + adapter->txs_dma += offset; + adapter->txs_ring = (struct tx_pkt_status *) + (((u8 *)adapter->txd_ring) + (adapter->txd_ring_size + offset)); + + /* Init RXD Ring */ + adapter->rxd_dma = adapter->txs_dma + adapter->txs_ring_size * 4; + offset = (adapter->rxd_dma & 127) ? + (128 - (adapter->rxd_dma & 127)) : 0; + if (offset > 7) + offset -= 8; + else + offset += (128 - 8); + + adapter->rxd_dma += offset; + adapter->rxd_ring = (struct rx_desc *) (((u8 *)adapter->txs_ring) + + (adapter->txs_ring_size * 4 + offset)); + +/* + * Read / Write Ptr Initialize: + * init_ring_ptrs(adapter); + */ + return 0; +} + +/** + * atl2_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static inline void atl2_irq_enable(struct atl2_adapter *adapter) +{ + ATL2_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK); + ATL2_WRITE_FLUSH(&adapter->hw); +} + +/** + * atl2_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static inline void atl2_irq_disable(struct atl2_adapter *adapter) +{ + ATL2_WRITE_REG(&adapter->hw, REG_IMR, 0); + ATL2_WRITE_FLUSH(&adapter->hw); + synchronize_irq(adapter->pdev->irq); +} + +static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl) +{ + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + /* enable VLAN tag insert/strip */ + *ctrl |= MAC_CTRL_RMV_VLAN; + } else { + /* disable VLAN tag insert/strip */ + *ctrl &= ~MAC_CTRL_RMV_VLAN; + } +} + +static void atl2_vlan_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + u32 ctrl; + + atl2_irq_disable(adapter); + + ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL); + __atl2_vlan_mode(features, &ctrl); + ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl); + + atl2_irq_enable(adapter); +} + +static void atl2_restore_vlan(struct atl2_adapter *adapter) +{ + atl2_vlan_mode(adapter->netdev, adapter->netdev->features); +} + +static netdev_features_t atl2_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* + * Since there is no support for separate rx/tx vlan accel + * enable/disable make sure tx flag is always in same state as rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int atl2_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + atl2_vlan_mode(netdev, features); + + return 0; +} + +static void atl2_intr_rx(struct atl2_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct rx_desc *rxd; + struct sk_buff *skb; + + do { + rxd = adapter->rxd_ring+adapter->rxd_write_ptr; + if (!rxd->status.update) + break; /* end of tx */ + + /* clear this flag at once */ + rxd->status.update = 0; + + if (rxd->status.ok && rxd->status.pkt_size >= 60) { + int rx_size = (int)(rxd->status.pkt_size - 4); + /* alloc new buffer */ + skb = netdev_alloc_skb_ip_align(netdev, rx_size); + if (NULL == skb) { + /* + * Check that some rx space is free. If not, + * free one and mark stats->rx_dropped++. + */ + netdev->stats.rx_dropped++; + break; + } + memcpy(skb->data, rxd->packet, rx_size); + skb_put(skb, rx_size); + skb->protocol = eth_type_trans(skb, netdev); + if (rxd->status.vlan) { + u16 vlan_tag = (rxd->status.vtag>>4) | + ((rxd->status.vtag&7) << 13) | + ((rxd->status.vtag&8) << 9); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + } + netif_rx(skb); + netdev->stats.rx_bytes += rx_size; + netdev->stats.rx_packets++; + } else { + netdev->stats.rx_errors++; + + if (rxd->status.ok && rxd->status.pkt_size <= 60) + netdev->stats.rx_length_errors++; + if (rxd->status.mcast) + netdev->stats.multicast++; + if (rxd->status.crc) + netdev->stats.rx_crc_errors++; + if (rxd->status.align) + netdev->stats.rx_frame_errors++; + } + + /* advance write ptr */ + if (++adapter->rxd_write_ptr == adapter->rxd_ring_size) + adapter->rxd_write_ptr = 0; + } while (1); + + /* update mailbox? */ + adapter->rxd_read_ptr = adapter->rxd_write_ptr; + ATL2_WRITE_REGW(&adapter->hw, REG_MB_RXD_RD_IDX, adapter->rxd_read_ptr); +} + +static void atl2_intr_tx(struct atl2_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 txd_read_ptr; + u32 txs_write_ptr; + struct tx_pkt_status *txs; + struct tx_pkt_header *txph; + int free_hole = 0; + + do { + txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr); + txs = adapter->txs_ring + txs_write_ptr; + if (!txs->update) + break; /* tx stop here */ + + free_hole = 1; + txs->update = 0; + + if (++txs_write_ptr == adapter->txs_ring_size) + txs_write_ptr = 0; + atomic_set(&adapter->txs_write_ptr, (int)txs_write_ptr); + + txd_read_ptr = (u32) atomic_read(&adapter->txd_read_ptr); + txph = (struct tx_pkt_header *) + (((u8 *)adapter->txd_ring) + txd_read_ptr); + + if (txph->pkt_size != txs->pkt_size) { + struct tx_pkt_status *old_txs = txs; + printk(KERN_WARNING + "%s: txs packet size not consistent with txd" + " txd_:0x%08x, txs_:0x%08x!\n", + adapter->netdev->name, + *(u32 *)txph, *(u32 *)txs); + printk(KERN_WARNING + "txd read ptr: 0x%x\n", + txd_read_ptr); + txs = adapter->txs_ring + txs_write_ptr; + printk(KERN_WARNING + "txs-behind:0x%08x\n", + *(u32 *)txs); + if (txs_write_ptr < 2) { + txs = adapter->txs_ring + + (adapter->txs_ring_size + + txs_write_ptr - 2); + } else { + txs = adapter->txs_ring + (txs_write_ptr - 2); + } + printk(KERN_WARNING + "txs-before:0x%08x\n", + *(u32 *)txs); + txs = old_txs; + } + + /* 4for TPH */ + txd_read_ptr += (((u32)(txph->pkt_size) + 7) & ~3); + if (txd_read_ptr >= adapter->txd_ring_size) + txd_read_ptr -= adapter->txd_ring_size; + + atomic_set(&adapter->txd_read_ptr, (int)txd_read_ptr); + + /* tx statistics: */ + if (txs->ok) { + netdev->stats.tx_bytes += txs->pkt_size; + netdev->stats.tx_packets++; + } + else + netdev->stats.tx_errors++; + + if (txs->defer) + netdev->stats.collisions++; + if (txs->abort_col) + netdev->stats.tx_aborted_errors++; + if (txs->late_col) + netdev->stats.tx_window_errors++; + if (txs->underun) + netdev->stats.tx_fifo_errors++; + } while (1); + + if (free_hole) { + if (netif_queue_stopped(adapter->netdev) && + netif_carrier_ok(adapter->netdev)) + netif_wake_queue(adapter->netdev); + } +} + +static void atl2_check_for_link(struct atl2_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 phy_data = 0; + + spin_lock(&adapter->stats_lock); + atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + spin_unlock(&adapter->stats_lock); + + /* notify upper layer link down ASAP */ + if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */ + if (netif_carrier_ok(netdev)) { /* old link state: Up */ + printk(KERN_INFO "%s: %s NIC Link is Down\n", + atl2_driver_name, netdev->name); + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + } + schedule_work(&adapter->link_chg_task); +} + +static inline void atl2_clear_phy_int(struct atl2_adapter *adapter) +{ + u16 phy_data; + spin_lock(&adapter->stats_lock); + atl2_read_phy_reg(&adapter->hw, 19, &phy_data); + spin_unlock(&adapter->stats_lock); +} + +/** + * atl2_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t atl2_intr(int irq, void *data) +{ + struct atl2_adapter *adapter = netdev_priv(data); + struct atl2_hw *hw = &adapter->hw; + u32 status; + + status = ATL2_READ_REG(hw, REG_ISR); + if (0 == status) + return IRQ_NONE; + + /* link event */ + if (status & ISR_PHY) + atl2_clear_phy_int(adapter); + + /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ + ATL2_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT); + + /* check if PCIE PHY Link down */ + if (status & ISR_PHY_LINKDOWN) { + if (netif_running(adapter->netdev)) { /* reset MAC */ + ATL2_WRITE_REG(hw, REG_ISR, 0); + ATL2_WRITE_REG(hw, REG_IMR, 0); + ATL2_WRITE_FLUSH(hw); + schedule_work(&adapter->reset_task); + return IRQ_HANDLED; + } + } + + /* check if DMA read/write error? */ + if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { + ATL2_WRITE_REG(hw, REG_ISR, 0); + ATL2_WRITE_REG(hw, REG_IMR, 0); + ATL2_WRITE_FLUSH(hw); + schedule_work(&adapter->reset_task); + return IRQ_HANDLED; + } + + /* link event */ + if (status & (ISR_PHY | ISR_MANUAL)) { + adapter->netdev->stats.tx_carrier_errors++; + atl2_check_for_link(adapter); + } + + /* transmit event */ + if (status & ISR_TX_EVENT) + atl2_intr_tx(adapter); + + /* rx exception */ + if (status & ISR_RX_EVENT) + atl2_intr_rx(adapter); + + /* re-enable Interrupt */ + ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0); + return IRQ_HANDLED; +} + +static int atl2_request_irq(struct atl2_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int flags, err = 0; + + flags = IRQF_SHARED; + adapter->have_msi = true; + err = pci_enable_msi(adapter->pdev); + if (err) + adapter->have_msi = false; + + if (adapter->have_msi) + flags &= ~IRQF_SHARED; + + return request_irq(adapter->pdev->irq, atl2_intr, flags, netdev->name, + netdev); +} + +/** + * atl2_free_ring_resources - Free Tx / RX descriptor Resources + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void atl2_free_ring_resources(struct atl2_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + pci_free_consistent(pdev, adapter->ring_size, adapter->ring_vir_addr, + adapter->ring_dma); +} + +/** + * atl2_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int atl2_open(struct net_device *netdev) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + int err; + u32 val; + + /* disallow open during test */ + if (test_bit(__ATL2_TESTING, &adapter->flags)) + return -EBUSY; + + /* allocate transmit descriptors */ + err = atl2_setup_ring_resources(adapter); + if (err) + return err; + + err = atl2_init_hw(&adapter->hw); + if (err) { + err = -EIO; + goto err_init_hw; + } + + /* hardware has been reset, we need to reload some things */ + atl2_set_multi(netdev); + init_ring_ptrs(adapter); + + atl2_restore_vlan(adapter); + + if (atl2_configure(adapter)) { + err = -EIO; + goto err_config; + } + + err = atl2_request_irq(adapter); + if (err) + goto err_req_irq; + + clear_bit(__ATL2_DOWN, &adapter->flags); + + mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 4*HZ)); + + val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL); + ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, + val | MASTER_CTRL_MANUAL_INT); + + atl2_irq_enable(adapter); + + return 0; + +err_init_hw: +err_req_irq: +err_config: + atl2_free_ring_resources(adapter); + atl2_reset_hw(&adapter->hw); + + return err; +} + +static void atl2_down(struct atl2_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer */ + set_bit(__ATL2_DOWN, &adapter->flags); + + netif_tx_disable(netdev); + + /* reset MAC to disable all RX/TX */ + atl2_reset_hw(&adapter->hw); + msleep(1); + + atl2_irq_disable(adapter); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_config_timer); + clear_bit(0, &adapter->cfg_phy); + + netif_carrier_off(netdev); + adapter->link_speed = SPEED_0; + adapter->link_duplex = -1; +} + +static void atl2_free_irq(struct atl2_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); + +#ifdef CONFIG_PCI_MSI + if (adapter->have_msi) + pci_disable_msi(adapter->pdev); +#endif +} + +/** + * atl2_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int atl2_close(struct net_device *netdev) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + + WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags)); + + atl2_down(adapter); + atl2_free_irq(adapter); + atl2_free_ring_resources(adapter); + + return 0; +} + +static inline int TxsFreeUnit(struct atl2_adapter *adapter) +{ + u32 txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr); + + return (adapter->txs_next_clear >= txs_write_ptr) ? + (int) (adapter->txs_ring_size - adapter->txs_next_clear + + txs_write_ptr - 1) : + (int) (txs_write_ptr - adapter->txs_next_clear - 1); +} + +static inline int TxdFreeBytes(struct atl2_adapter *adapter) +{ + u32 txd_read_ptr = (u32)atomic_read(&adapter->txd_read_ptr); + + return (adapter->txd_write_ptr >= txd_read_ptr) ? + (int) (adapter->txd_ring_size - adapter->txd_write_ptr + + txd_read_ptr - 1) : + (int) (txd_read_ptr - adapter->txd_write_ptr - 1); +} + +static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + struct tx_pkt_header *txph; + u32 offset, copy_len; + int txs_unused; + int txbuf_unused; + + if (test_bit(__ATL2_DOWN, &adapter->flags)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (unlikely(skb->len <= 0)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + txs_unused = TxsFreeUnit(adapter); + txbuf_unused = TxdFreeBytes(adapter); + + if (skb->len + sizeof(struct tx_pkt_header) + 4 > txbuf_unused || + txs_unused < 1) { + /* not enough resources */ + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + offset = adapter->txd_write_ptr; + + txph = (struct tx_pkt_header *) (((u8 *)adapter->txd_ring) + offset); + + *(u32 *)txph = 0; + txph->pkt_size = skb->len; + + offset += 4; + if (offset >= adapter->txd_ring_size) + offset -= adapter->txd_ring_size; + copy_len = adapter->txd_ring_size - offset; + if (copy_len >= skb->len) { + memcpy(((u8 *)adapter->txd_ring) + offset, skb->data, skb->len); + offset += ((u32)(skb->len + 3) & ~3); + } else { + memcpy(((u8 *)adapter->txd_ring)+offset, skb->data, copy_len); + memcpy((u8 *)adapter->txd_ring, skb->data+copy_len, + skb->len-copy_len); + offset = ((u32)(skb->len-copy_len + 3) & ~3); + } +#ifdef NETIF_F_HW_VLAN_CTAG_TX + if (skb_vlan_tag_present(skb)) { + u16 vlan_tag = skb_vlan_tag_get(skb); + vlan_tag = (vlan_tag << 4) | + (vlan_tag >> 13) | + ((vlan_tag >> 9) & 0x8); + txph->ins_vlan = 1; + txph->vlan = vlan_tag; + } +#endif + if (offset >= adapter->txd_ring_size) + offset -= adapter->txd_ring_size; + adapter->txd_write_ptr = offset; + + /* clear txs before send */ + adapter->txs_ring[adapter->txs_next_clear].update = 0; + if (++adapter->txs_next_clear == adapter->txs_ring_size) + adapter->txs_next_clear = 0; + + ATL2_WRITE_REGW(&adapter->hw, REG_MB_TXD_WR_IDX, + (adapter->txd_write_ptr >> 2)); + + mmiowb(); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/** + * atl2_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int atl2_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + struct atl2_hw *hw = &adapter->hw; + + if ((new_mtu < 40) || (new_mtu > (ETH_DATA_LEN + VLAN_SIZE))) + return -EINVAL; + + /* set MTU */ + if (hw->max_frame_size != new_mtu) { + netdev->mtu = new_mtu; + ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ENET_HEADER_SIZE + + VLAN_SIZE + ETHERNET_FCS_SIZE); + } + + return 0; +} + +/** + * atl2_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int atl2_set_mac(struct net_device *netdev, void *p) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (netif_running(netdev)) + return -EBUSY; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); + + atl2_set_mac_addr(&adapter->hw); + + return 0; +} + +static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + unsigned long flags; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = 0; + break; + case SIOCGMIIREG: + spin_lock_irqsave(&adapter->stats_lock, flags); + if (atl2_read_phy_reg(&adapter->hw, + data->reg_num & 0x1F, &data->val_out)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + break; + case SIOCSMIIREG: + if (data->reg_num & ~(0x1F)) + return -EFAULT; + spin_lock_irqsave(&adapter->stats_lock, flags); + if (atl2_write_phy_reg(&adapter->hw, data->reg_num, + data->val_in)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return atl2_mii_ioctl(netdev, ifr, cmd); +#ifdef ETHTOOL_OPS_COMPAT + case SIOCETHTOOL: + return ethtool_ioctl(ifr); +#endif + default: + return -EOPNOTSUPP; + } +} + +/** + * atl2_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + */ +static void atl2_tx_timeout(struct net_device *netdev) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->reset_task); +} + +/** + * atl2_watchdog - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl2_watchdog(unsigned long data) +{ + struct atl2_adapter *adapter = (struct atl2_adapter *) data; + + if (!test_bit(__ATL2_DOWN, &adapter->flags)) { + u32 drop_rxd, drop_rxs; + unsigned long flags; + + spin_lock_irqsave(&adapter->stats_lock, flags); + drop_rxd = ATL2_READ_REG(&adapter->hw, REG_STS_RXD_OV); + drop_rxs = ATL2_READ_REG(&adapter->hw, REG_STS_RXS_OV); + spin_unlock_irqrestore(&adapter->stats_lock, flags); + + adapter->netdev->stats.rx_over_errors += drop_rxd + drop_rxs; + + /* Reset the timer */ + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 4 * HZ)); + } +} + +/** + * atl2_phy_config - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl2_phy_config(unsigned long data) +{ + struct atl2_adapter *adapter = (struct atl2_adapter *) data; + struct atl2_hw *hw = &adapter->hw; + unsigned long flags; + + spin_lock_irqsave(&adapter->stats_lock, flags); + atl2_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); + atl2_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + spin_unlock_irqrestore(&adapter->stats_lock, flags); + clear_bit(0, &adapter->cfg_phy); +} + +static int atl2_up(struct atl2_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + u32 val; + + /* hardware has been reset, we need to reload some things */ + + err = atl2_init_hw(&adapter->hw); + if (err) { + err = -EIO; + return err; + } + + atl2_set_multi(netdev); + init_ring_ptrs(adapter); + + atl2_restore_vlan(adapter); + + if (atl2_configure(adapter)) { + err = -EIO; + goto err_up; + } + + clear_bit(__ATL2_DOWN, &adapter->flags); + + val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL); + ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, val | + MASTER_CTRL_MANUAL_INT); + + atl2_irq_enable(adapter); + +err_up: + return err; +} + +static void atl2_reinit_locked(struct atl2_adapter *adapter) +{ + WARN_ON(in_interrupt()); + while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags)) + msleep(1); + atl2_down(adapter); + atl2_up(adapter); + clear_bit(__ATL2_RESETTING, &adapter->flags); +} + +static void atl2_reset_task(struct work_struct *work) +{ + struct atl2_adapter *adapter; + adapter = container_of(work, struct atl2_adapter, reset_task); + + atl2_reinit_locked(adapter); +} + +static void atl2_setup_mac_ctrl(struct atl2_adapter *adapter) +{ + u32 value; + struct atl2_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + /* Config MAC CTRL Register */ + value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY; + + /* duplex */ + if (FULL_DUPLEX == adapter->link_duplex) + value |= MAC_CTRL_DUPLX; + + /* flow control */ + value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); + + /* PAD & CRC */ + value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); + + /* preamble length */ + value |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) << + MAC_CTRL_PRMLEN_SHIFT); + + /* vlan */ + __atl2_vlan_mode(netdev->features, &value); + + /* filter mode */ + value |= MAC_CTRL_BC_EN; + if (netdev->flags & IFF_PROMISC) + value |= MAC_CTRL_PROMIS_EN; + else if (netdev->flags & IFF_ALLMULTI) + value |= MAC_CTRL_MC_ALL_EN; + + /* half retry buffer */ + value |= (((u32)(adapter->hw.retry_buf & + MAC_CTRL_HALF_LEFT_BUF_MASK)) << MAC_CTRL_HALF_LEFT_BUF_SHIFT); + + ATL2_WRITE_REG(hw, REG_MAC_CTRL, value); +} + +static int atl2_check_link(struct atl2_adapter *adapter) +{ + struct atl2_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int ret_val; + u16 speed, duplex, phy_data; + int reconfig = 0; + + /* MII_BMSR must read twise */ + atl2_read_phy_reg(hw, MII_BMSR, &phy_data); + atl2_read_phy_reg(hw, MII_BMSR, &phy_data); + if (!(phy_data&BMSR_LSTATUS)) { /* link down */ + if (netif_carrier_ok(netdev)) { /* old link state: Up */ + u32 value; + /* disable rx */ + value = ATL2_READ_REG(hw, REG_MAC_CTRL); + value &= ~MAC_CTRL_RX_EN; + ATL2_WRITE_REG(hw, REG_MAC_CTRL, value); + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + return 0; + } + + /* Link Up */ + ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) + return ret_val; + switch (hw->MediaType) { + case MEDIA_TYPE_100M_FULL: + if (speed != SPEED_100 || duplex != FULL_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_100M_HALF: + if (speed != SPEED_100 || duplex != HALF_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_10M_FULL: + if (speed != SPEED_10 || duplex != FULL_DUPLEX) + reconfig = 1; + break; + case MEDIA_TYPE_10M_HALF: + if (speed != SPEED_10 || duplex != HALF_DUPLEX) + reconfig = 1; + break; + } + /* link result is our setting */ + if (reconfig == 0) { + if (adapter->link_speed != speed || + adapter->link_duplex != duplex) { + adapter->link_speed = speed; + adapter->link_duplex = duplex; + atl2_setup_mac_ctrl(adapter); + printk(KERN_INFO "%s: %s NIC Link is Up<%d Mbps %s>\n", + atl2_driver_name, netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex"); + } + + if (!netif_carrier_ok(netdev)) { /* Link down -> Up */ + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } + return 0; + } + + /* change original link status */ + if (netif_carrier_ok(netdev)) { + u32 value; + /* disable rx */ + value = ATL2_READ_REG(hw, REG_MAC_CTRL); + value &= ~MAC_CTRL_RX_EN; + ATL2_WRITE_REG(hw, REG_MAC_CTRL, value); + + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + + /* auto-neg, insert timer to re-config phy + * (if interval smaller than 5 seconds, something strange) */ + if (!test_bit(__ATL2_DOWN, &adapter->flags)) { + if (!test_and_set_bit(0, &adapter->cfg_phy)) + mod_timer(&adapter->phy_config_timer, + round_jiffies(jiffies + 5 * HZ)); + } + + return 0; +} + +/** + * atl2_link_chg_task - deal with link change event Out of interrupt context + */ +static void atl2_link_chg_task(struct work_struct *work) +{ + struct atl2_adapter *adapter; + unsigned long flags; + + adapter = container_of(work, struct atl2_adapter, link_chg_task); + + spin_lock_irqsave(&adapter->stats_lock, flags); + atl2_check_link(adapter); + spin_unlock_irqrestore(&adapter->stats_lock, flags); +} + +static void atl2_setup_pcicmd(struct pci_dev *pdev) +{ + u16 cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + + if (cmd & PCI_COMMAND_INTX_DISABLE) + cmd &= ~PCI_COMMAND_INTX_DISABLE; + if (cmd & PCI_COMMAND_IO) + cmd &= ~PCI_COMMAND_IO; + if (0 == (cmd & PCI_COMMAND_MEMORY)) + cmd |= PCI_COMMAND_MEMORY; + if (0 == (cmd & PCI_COMMAND_MASTER)) + cmd |= PCI_COMMAND_MASTER; + pci_write_config_word(pdev, PCI_COMMAND, cmd); + + /* + * some motherboards BIOS(PXE/EFI) driver may set PME + * while they transfer control to OS (Windows/Linux) + * so we should clear this bit before NIC work normally + */ + pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void atl2_poll_controller(struct net_device *netdev) +{ + disable_irq(netdev->irq); + atl2_intr(netdev->irq, netdev); + enable_irq(netdev->irq); +} +#endif + + +static const struct net_device_ops atl2_netdev_ops = { + .ndo_open = atl2_open, + .ndo_stop = atl2_close, + .ndo_start_xmit = atl2_xmit_frame, + .ndo_set_rx_mode = atl2_set_multi, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = atl2_set_mac, + .ndo_change_mtu = atl2_change_mtu, + .ndo_fix_features = atl2_fix_features, + .ndo_set_features = atl2_set_features, + .ndo_do_ioctl = atl2_ioctl, + .ndo_tx_timeout = atl2_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = atl2_poll_controller, +#endif +}; + +/** + * atl2_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in atl2_pci_tbl + * + * Returns 0 on success, negative on failure + * + * atl2_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + */ +static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct atl2_adapter *adapter; + static int cards_found; + unsigned long mmio_start; + int mmio_len; + int err; + + cards_found = 0; + + err = pci_enable_device(pdev); + if (err) + return err; + + /* + * atl2 is a shared-high-32-bit device, so we're stuck with 32-bit DMA + * until the kernel has the proper infrastructure to support 64-bit DMA + * on these devices. + */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) && + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { + printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n"); + goto err_dma; + } + + /* Mark all PCI regions associated with PCI device + * pdev as being reserved by owner atl2_driver_name */ + err = pci_request_regions(pdev, atl2_driver_name); + if (err) + goto err_pci_reg; + + /* Enables bus-mastering on the device and calls + * pcibios_set_master to do the needed arch specific settings */ + pci_set_master(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct atl2_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->hw.back = adapter; + + mmio_start = pci_resource_start(pdev, 0x0); + mmio_len = pci_resource_len(pdev, 0x0); + + adapter->hw.mem_rang = (u32)mmio_len; + adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); + if (!adapter->hw.hw_addr) { + err = -EIO; + goto err_ioremap; + } + + atl2_setup_pcicmd(pdev); + + netdev->netdev_ops = &atl2_netdev_ops; + netdev->ethtool_ops = &atl2_ethtool_ops; + netdev->watchdog_timeo = 5 * HZ; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + netdev->mem_start = mmio_start; + netdev->mem_end = mmio_start + mmio_len; + adapter->bd_number = cards_found; + adapter->pci_using_64 = false; + + /* setup the private structure */ + err = atl2_sw_init(adapter); + if (err) + goto err_sw_init; + + err = -EIO; + + netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX; + netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); + + /* Init PHY as early as possible due to power saving issue */ + atl2_phy_init(&adapter->hw); + + /* reset the controller to + * put the device in a known good starting state */ + + if (atl2_reset_hw(&adapter->hw)) { + err = -EIO; + goto err_reset; + } + + /* copy the MAC address out of the EEPROM */ + atl2_read_mac_addr(&adapter->hw); + memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + if (!is_valid_ether_addr(netdev->dev_addr)) { + err = -EIO; + goto err_eeprom; + } + + atl2_check_options(adapter); + + setup_timer(&adapter->watchdog_timer, atl2_watchdog, + (unsigned long)adapter); + + setup_timer(&adapter->phy_config_timer, atl2_phy_config, + (unsigned long)adapter); + + INIT_WORK(&adapter->reset_task, atl2_reset_task); + INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task); + + strcpy(netdev->name, "eth%d"); /* ?? */ + err = register_netdev(netdev); + if (err) + goto err_register; + + /* assume we have no link for now */ + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + cards_found++; + + return 0; + +err_reset: +err_register: +err_sw_init: +err_eeprom: + iounmap(adapter->hw.hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * atl2_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * atl2_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +/* FIXME: write the original MAC address back in case it was changed from a + * BIOS-set value, as in atl1 -- CHS */ +static void atl2_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl2_adapter *adapter = netdev_priv(netdev); + + /* flush_scheduled work may reschedule our watchdog task, so + * explicitly disable watchdog tasks from being rescheduled */ + set_bit(__ATL2_DOWN, &adapter->flags); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_config_timer); + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->link_chg_task); + + unregister_netdev(netdev); + + atl2_force_ps(&adapter->hw); + + iounmap(adapter->hw.hw_addr); + pci_release_regions(pdev); + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +static int atl2_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl2_adapter *adapter = netdev_priv(netdev); + struct atl2_hw *hw = &adapter->hw; + u16 speed, duplex; + u32 ctrl = 0; + u32 wufc = adapter->wol; + +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags)); + atl2_down(adapter); + } + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl); + atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl); + if (ctrl & BMSR_LSTATUS) + wufc &= ~ATLX_WUFC_LNKC; + + if (0 != (ctrl & BMSR_LSTATUS) && 0 != wufc) { + u32 ret_val; + /* get current link speed & duplex */ + ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + printk(KERN_DEBUG + "%s: get speed&duplex error while suspend\n", + atl2_driver_name); + goto wol_dis; + } + + ctrl = 0; + + /* turn on magic packet wol */ + if (wufc & ATLX_WUFC_MAG) + ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN); + + /* ignore Link Chg event when Link is up */ + ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl); + + /* Config MAC CTRL Register */ + ctrl = MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY; + if (FULL_DUPLEX == adapter->link_duplex) + ctrl |= MAC_CTRL_DUPLX; + ctrl |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); + ctrl |= (((u32)adapter->hw.preamble_len & + MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); + ctrl |= (((u32)(adapter->hw.retry_buf & + MAC_CTRL_HALF_LEFT_BUF_MASK)) << + MAC_CTRL_HALF_LEFT_BUF_SHIFT); + if (wufc & ATLX_WUFC_MAG) { + /* magic packet maybe Broadcast&multicast&Unicast */ + ctrl |= MAC_CTRL_BC_EN; + } + + ATL2_WRITE_REG(hw, REG_MAC_CTRL, ctrl); + + /* pcie patch */ + ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); + ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1); + ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK; + ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl); + + pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); + goto suspend_exit; + } + + if (0 == (ctrl&BMSR_LSTATUS) && 0 != (wufc&ATLX_WUFC_LNKC)) { + /* link is down, so only LINK CHG WOL event enable */ + ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); + ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl); + ATL2_WRITE_REG(hw, REG_MAC_CTRL, 0); + + /* pcie patch */ + ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); + ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1); + ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK; + ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl); + + hw->phy_configured = false; /* re-init PHY when resume */ + + pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); + + goto suspend_exit; + } + +wol_dis: + /* WOL disabled */ + ATL2_WRITE_REG(hw, REG_WOL_CTRL, 0); + + /* pcie patch */ + ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); + ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1); + ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK; + ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl); + + atl2_force_ps(hw); + hw->phy_configured = false; /* re-init PHY when resume */ + + pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); + +suspend_exit: + if (netif_running(netdev)) + atl2_free_irq(adapter); + + pci_disable_device(pdev); + + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +#ifdef CONFIG_PM +static int atl2_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl2_adapter *adapter = netdev_priv(netdev); + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + err = pci_enable_device(pdev); + if (err) { + printk(KERN_ERR + "atl2: Cannot enable PCI device from suspend\n"); + return err; + } + + pci_set_master(pdev); + + ATL2_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */ + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); + + if (netif_running(netdev)) { + err = atl2_request_irq(adapter); + if (err) + return err; + } + + atl2_reset_hw(&adapter->hw); + + if (netif_running(netdev)) + atl2_up(adapter); + + netif_device_attach(netdev); + + return 0; +} +#endif + +static void atl2_shutdown(struct pci_dev *pdev) +{ + atl2_suspend(pdev, PMSG_SUSPEND); +} + +static struct pci_driver atl2_driver = { + .name = atl2_driver_name, + .id_table = atl2_pci_tbl, + .probe = atl2_probe, + .remove = atl2_remove, + /* Power Management Hooks */ + .suspend = atl2_suspend, +#ifdef CONFIG_PM + .resume = atl2_resume, +#endif + .shutdown = atl2_shutdown, +}; + +/** + * atl2_init_module - Driver Registration Routine + * + * atl2_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init atl2_init_module(void) +{ + printk(KERN_INFO "%s - version %s\n", atl2_driver_string, + atl2_driver_version); + printk(KERN_INFO "%s\n", atl2_copyright); + return pci_register_driver(&atl2_driver); +} +module_init(atl2_init_module); + +/** + * atl2_exit_module - Driver Exit Cleanup Routine + * + * atl2_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit atl2_exit_module(void) +{ + pci_unregister_driver(&atl2_driver); +} +module_exit(atl2_exit_module); + +static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value) +{ + struct atl2_adapter *adapter = hw->back; + pci_read_config_word(adapter->pdev, reg, value); +} + +static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value) +{ + struct atl2_adapter *adapter = hw->back; + pci_write_config_word(adapter->pdev, reg, *value); +} + +static int atl2_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + struct atl2_hw *hw = &adapter->hw; + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + ecmd->advertising = ADVERTISED_TP; + + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->advertising |= hw->autoneg_advertised; + + ecmd->port = PORT_TP; + ecmd->phy_address = 0; + ecmd->transceiver = XCVR_INTERNAL; + + if (adapter->link_speed != SPEED_0) { + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + if (adapter->link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } + + ecmd->autoneg = AUTONEG_ENABLE; + return 0; +} + +static int atl2_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + struct atl2_hw *hw = &adapter->hw; + + while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags)) + msleep(1); + + if (ecmd->autoneg == AUTONEG_ENABLE) { +#define MY_ADV_MASK (ADVERTISE_10_HALF | \ + ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF| \ + ADVERTISE_100_FULL) + + if ((ecmd->advertising & MY_ADV_MASK) == MY_ADV_MASK) { + hw->MediaType = MEDIA_TYPE_AUTO_SENSOR; + hw->autoneg_advertised = MY_ADV_MASK; + } else if ((ecmd->advertising & MY_ADV_MASK) == + ADVERTISE_100_FULL) { + hw->MediaType = MEDIA_TYPE_100M_FULL; + hw->autoneg_advertised = ADVERTISE_100_FULL; + } else if ((ecmd->advertising & MY_ADV_MASK) == + ADVERTISE_100_HALF) { + hw->MediaType = MEDIA_TYPE_100M_HALF; + hw->autoneg_advertised = ADVERTISE_100_HALF; + } else if ((ecmd->advertising & MY_ADV_MASK) == + ADVERTISE_10_FULL) { + hw->MediaType = MEDIA_TYPE_10M_FULL; + hw->autoneg_advertised = ADVERTISE_10_FULL; + } else if ((ecmd->advertising & MY_ADV_MASK) == + ADVERTISE_10_HALF) { + hw->MediaType = MEDIA_TYPE_10M_HALF; + hw->autoneg_advertised = ADVERTISE_10_HALF; + } else { + clear_bit(__ATL2_RESETTING, &adapter->flags); + return -EINVAL; + } + ecmd->advertising = hw->autoneg_advertised | + ADVERTISED_TP | ADVERTISED_Autoneg; + } else { + clear_bit(__ATL2_RESETTING, &adapter->flags); + return -EINVAL; + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + atl2_down(adapter); + atl2_up(adapter); + } else + atl2_reset_hw(&adapter->hw); + + clear_bit(__ATL2_RESETTING, &adapter->flags); + return 0; +} + +static u32 atl2_get_msglevel(struct net_device *netdev) +{ + return 0; +} + +/* + * It's sane for this to be empty, but we might want to take advantage of this. + */ +static void atl2_set_msglevel(struct net_device *netdev, u32 data) +{ +} + +static int atl2_get_regs_len(struct net_device *netdev) +{ +#define ATL2_REGS_LEN 42 + return sizeof(u32) * ATL2_REGS_LEN; +} + +static void atl2_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + struct atl2_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, sizeof(u32) * ATL2_REGS_LEN); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + regs_buff[0] = ATL2_READ_REG(hw, REG_VPD_CAP); + regs_buff[1] = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL); + regs_buff[2] = ATL2_READ_REG(hw, REG_SPI_FLASH_CONFIG); + regs_buff[3] = ATL2_READ_REG(hw, REG_TWSI_CTRL); + regs_buff[4] = ATL2_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL); + regs_buff[5] = ATL2_READ_REG(hw, REG_MASTER_CTRL); + regs_buff[6] = ATL2_READ_REG(hw, REG_MANUAL_TIMER_INIT); + regs_buff[7] = ATL2_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT); + regs_buff[8] = ATL2_READ_REG(hw, REG_PHY_ENABLE); + regs_buff[9] = ATL2_READ_REG(hw, REG_CMBDISDMA_TIMER); + regs_buff[10] = ATL2_READ_REG(hw, REG_IDLE_STATUS); + regs_buff[11] = ATL2_READ_REG(hw, REG_MDIO_CTRL); + regs_buff[12] = ATL2_READ_REG(hw, REG_SERDES_LOCK); + regs_buff[13] = ATL2_READ_REG(hw, REG_MAC_CTRL); + regs_buff[14] = ATL2_READ_REG(hw, REG_MAC_IPG_IFG); + regs_buff[15] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR); + regs_buff[16] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR+4); + regs_buff[17] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE); + regs_buff[18] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE+4); + regs_buff[19] = ATL2_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL); + regs_buff[20] = ATL2_READ_REG(hw, REG_MTU); + regs_buff[21] = ATL2_READ_REG(hw, REG_WOL_CTRL); + regs_buff[22] = ATL2_READ_REG(hw, REG_SRAM_TXRAM_END); + regs_buff[23] = ATL2_READ_REG(hw, REG_DESC_BASE_ADDR_HI); + regs_buff[24] = ATL2_READ_REG(hw, REG_TXD_BASE_ADDR_LO); + regs_buff[25] = ATL2_READ_REG(hw, REG_TXD_MEM_SIZE); + regs_buff[26] = ATL2_READ_REG(hw, REG_TXS_BASE_ADDR_LO); + regs_buff[27] = ATL2_READ_REG(hw, REG_TXS_MEM_SIZE); + regs_buff[28] = ATL2_READ_REG(hw, REG_RXD_BASE_ADDR_LO); + regs_buff[29] = ATL2_READ_REG(hw, REG_RXD_BUF_NUM); + regs_buff[30] = ATL2_READ_REG(hw, REG_DMAR); + regs_buff[31] = ATL2_READ_REG(hw, REG_TX_CUT_THRESH); + regs_buff[32] = ATL2_READ_REG(hw, REG_DMAW); + regs_buff[33] = ATL2_READ_REG(hw, REG_PAUSE_ON_TH); + regs_buff[34] = ATL2_READ_REG(hw, REG_PAUSE_OFF_TH); + regs_buff[35] = ATL2_READ_REG(hw, REG_MB_TXD_WR_IDX); + regs_buff[36] = ATL2_READ_REG(hw, REG_MB_RXD_RD_IDX); + regs_buff[38] = ATL2_READ_REG(hw, REG_ISR); + regs_buff[39] = ATL2_READ_REG(hw, REG_IMR); + + atl2_read_phy_reg(hw, MII_BMCR, &phy_data); + regs_buff[40] = (u32)phy_data; + atl2_read_phy_reg(hw, MII_BMSR, &phy_data); + regs_buff[41] = (u32)phy_data; +} + +static int atl2_get_eeprom_len(struct net_device *netdev) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + + if (!atl2_check_eeprom_exist(&adapter->hw)) + return 512; + else + return 0; +} + +static int atl2_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + struct atl2_hw *hw = &adapter->hw; + u32 *eeprom_buff; + int first_dword, last_dword; + int ret_val = 0; + int i; + + if (eeprom->len == 0) + return -EINVAL; + + if (atl2_check_eeprom_exist(hw)) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_dword = eeprom->offset >> 2; + last_dword = (eeprom->offset + eeprom->len - 1) >> 2; + + eeprom_buff = kmalloc(sizeof(u32) * (last_dword - first_dword + 1), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + for (i = first_dword; i < last_dword; i++) { + if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword]))) { + ret_val = -EIO; + goto free; + } + } + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), + eeprom->len); +free: + kfree(eeprom_buff); + + return ret_val; +} + +static int atl2_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + struct atl2_hw *hw = &adapter->hw; + u32 *eeprom_buff; + u32 *ptr; + int max_len, first_dword, last_dword, ret_val = 0; + int i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = 512; + + first_dword = eeprom->offset >> 2; + last_dword = (eeprom->offset + eeprom->len - 1) >> 2; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = eeprom_buff; + + if (eeprom->offset & 3) { + /* need read/modify/write of first changed EEPROM word */ + /* only the second byte of the word is being modified */ + if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0]))) { + ret_val = -EIO; + goto out; + } + ptr++; + } + if (((eeprom->offset + eeprom->len) & 3)) { + /* + * need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + if (!atl2_read_eeprom(hw, last_dword * 4, + &(eeprom_buff[last_dword - first_dword]))) { + ret_val = -EIO; + goto out; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_dword - first_dword + 1; i++) { + if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i])) { + ret_val = -EIO; + goto out; + } + } + out: + kfree(eeprom_buff); + return ret_val; +} + +static void atl2_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, atl2_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = 0; + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = atl2_get_regs_len(netdev); + drvinfo->eedump_len = atl2_get_eeprom_len(netdev); +} + +static void atl2_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_MAGIC; + wol->wolopts = 0; + + if (adapter->wol & ATLX_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & ATLX_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & ATLX_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & ATLX_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & ATLX_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int atl2_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (wol->wolopts & (WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)) + return -EOPNOTSUPP; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= ATLX_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= ATLX_WUFC_LNKC; + + return 0; +} + +static int atl2_nway_reset(struct net_device *netdev) +{ + struct atl2_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + atl2_reinit_locked(adapter); + return 0; +} + +static const struct ethtool_ops atl2_ethtool_ops = { + .get_settings = atl2_get_settings, + .set_settings = atl2_set_settings, + .get_drvinfo = atl2_get_drvinfo, + .get_regs_len = atl2_get_regs_len, + .get_regs = atl2_get_regs, + .get_wol = atl2_get_wol, + .set_wol = atl2_set_wol, + .get_msglevel = atl2_get_msglevel, + .set_msglevel = atl2_set_msglevel, + .nway_reset = atl2_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = atl2_get_eeprom_len, + .get_eeprom = atl2_get_eeprom, + .set_eeprom = atl2_set_eeprom, +}; + +#define LBYTESWAP(a) ((((a) & 0x00ff00ff) << 8) | \ + (((a) & 0xff00ff00) >> 8)) +#define LONGSWAP(a) ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16)) +#define SHORTSWAP(a) (((a) << 8) | ((a) >> 8)) + +/* + * Reset the transmit and receive units; mask and clear all interrupts. + * + * hw - Struct containing variables accessed by shared code + * return : 0 or idle status (if error) + */ +static s32 atl2_reset_hw(struct atl2_hw *hw) +{ + u32 icr; + u16 pci_cfg_cmd_word; + int i; + + /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */ + atl2_read_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word); + if ((pci_cfg_cmd_word & + (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) != + (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) { + pci_cfg_cmd_word |= + (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER); + atl2_write_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word); + } + + /* Clear Interrupt mask to stop board from generating + * interrupts & Clear any pending interrupt events + */ + /* FIXME */ + /* ATL2_WRITE_REG(hw, REG_IMR, 0); */ + /* ATL2_WRITE_REG(hw, REG_ISR, 0xffffffff); */ + + /* Issue Soft Reset to the MAC. This will reset the chip's + * transmit, receive, DMA. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST); + wmb(); + msleep(1); /* delay about 1ms */ + + /* Wait at least 10ms for All module to be Idle */ + for (i = 0; i < 10; i++) { + icr = ATL2_READ_REG(hw, REG_IDLE_STATUS); + if (!icr) + break; + msleep(1); /* delay 1 ms */ + cpu_relax(); + } + + if (icr) + return icr; + + return 0; +} + +#define CUSTOM_SPI_CS_SETUP 2 +#define CUSTOM_SPI_CLK_HI 2 +#define CUSTOM_SPI_CLK_LO 2 +#define CUSTOM_SPI_CS_HOLD 2 +#define CUSTOM_SPI_CS_HI 3 + +static struct atl2_spi_flash_dev flash_table[] = +{ +/* MFR WRSR READ PROGRAM WREN WRDI RDSR RDID SECTOR_ERASE CHIP_ERASE */ +{"Atmel", 0x0, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62 }, +{"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60 }, +{"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7 }, +}; + +static bool atl2_spi_read(struct atl2_hw *hw, u32 addr, u32 *buf) +{ + int i; + u32 value; + + ATL2_WRITE_REG(hw, REG_SPI_DATA, 0); + ATL2_WRITE_REG(hw, REG_SPI_ADDR, addr); + + value = SPI_FLASH_CTRL_WAIT_READY | + (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) << + SPI_FLASH_CTRL_CS_SETUP_SHIFT | + (CUSTOM_SPI_CLK_HI & SPI_FLASH_CTRL_CLK_HI_MASK) << + SPI_FLASH_CTRL_CLK_HI_SHIFT | + (CUSTOM_SPI_CLK_LO & SPI_FLASH_CTRL_CLK_LO_MASK) << + SPI_FLASH_CTRL_CLK_LO_SHIFT | + (CUSTOM_SPI_CS_HOLD & SPI_FLASH_CTRL_CS_HOLD_MASK) << + SPI_FLASH_CTRL_CS_HOLD_SHIFT | + (CUSTOM_SPI_CS_HI & SPI_FLASH_CTRL_CS_HI_MASK) << + SPI_FLASH_CTRL_CS_HI_SHIFT | + (0x1 & SPI_FLASH_CTRL_INS_MASK) << SPI_FLASH_CTRL_INS_SHIFT; + + ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); + + value |= SPI_FLASH_CTRL_START; + + ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); + + for (i = 0; i < 10; i++) { + msleep(1); + value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL); + if (!(value & SPI_FLASH_CTRL_START)) + break; + } + + if (value & SPI_FLASH_CTRL_START) + return false; + + *buf = ATL2_READ_REG(hw, REG_SPI_DATA); + + return true; +} + +/* + * get_permanent_address + * return 0 if get valid mac address, + */ +static int get_permanent_address(struct atl2_hw *hw) +{ + u32 Addr[2]; + u32 i, Control; + u16 Register; + u8 EthAddr[ETH_ALEN]; + bool KeyValid; + + if (is_valid_ether_addr(hw->perm_mac_addr)) + return 0; + + Addr[0] = 0; + Addr[1] = 0; + + if (!atl2_check_eeprom_exist(hw)) { /* eeprom exists */ + Register = 0; + KeyValid = false; + + /* Read out all EEPROM content */ + i = 0; + while (1) { + if (atl2_read_eeprom(hw, i + 0x100, &Control)) { + if (KeyValid) { + if (Register == REG_MAC_STA_ADDR) + Addr[0] = Control; + else if (Register == + (REG_MAC_STA_ADDR + 4)) + Addr[1] = Control; + KeyValid = false; + } else if ((Control & 0xff) == 0x5A) { + KeyValid = true; + Register = (u16) (Control >> 16); + } else { + /* assume data end while encount an invalid KEYWORD */ + break; + } + } else { + break; /* read error */ + } + i += 4; + } + + *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]); + *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]); + + if (is_valid_ether_addr(EthAddr)) { + memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN); + return 0; + } + return 1; + } + + /* see if SPI flash exists? */ + Addr[0] = 0; + Addr[1] = 0; + Register = 0; + KeyValid = false; + i = 0; + while (1) { + if (atl2_spi_read(hw, i + 0x1f000, &Control)) { + if (KeyValid) { + if (Register == REG_MAC_STA_ADDR) + Addr[0] = Control; + else if (Register == (REG_MAC_STA_ADDR + 4)) + Addr[1] = Control; + KeyValid = false; + } else if ((Control & 0xff) == 0x5A) { + KeyValid = true; + Register = (u16) (Control >> 16); + } else { + break; /* data end */ + } + } else { + break; /* read error */ + } + i += 4; + } + + *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]); + *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *)&Addr[1]); + if (is_valid_ether_addr(EthAddr)) { + memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN); + return 0; + } + /* maybe MAC-address is from BIOS */ + Addr[0] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR); + Addr[1] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR + 4); + *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]); + *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]); + + if (is_valid_ether_addr(EthAddr)) { + memcpy(hw->perm_mac_addr, EthAddr, ETH_ALEN); + return 0; + } + + return 1; +} + +/* + * Reads the adapter's MAC address from the EEPROM + * + * hw - Struct containing variables accessed by shared code + */ +static s32 atl2_read_mac_addr(struct atl2_hw *hw) +{ + if (get_permanent_address(hw)) { + /* for test */ + /* FIXME: shouldn't we use eth_random_addr() here? */ + hw->perm_mac_addr[0] = 0x00; + hw->perm_mac_addr[1] = 0x13; + hw->perm_mac_addr[2] = 0x74; + hw->perm_mac_addr[3] = 0x00; + hw->perm_mac_addr[4] = 0x5c; + hw->perm_mac_addr[5] = 0x38; + } + + memcpy(hw->mac_addr, hw->perm_mac_addr, ETH_ALEN); + + return 0; +} + +/* + * Hashes an address to determine its location in the multicast table + * + * hw - Struct containing variables accessed by shared code + * mc_addr - the multicast address to hash + * + * atl2_hash_mc_addr + * purpose + * set hash value for a multicast address + * hash calcu processing : + * 1. calcu 32bit CRC for multicast address + * 2. reverse crc with MSB to LSB + */ +static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr) +{ + u32 crc32, value; + int i; + + value = 0; + crc32 = ether_crc_le(6, mc_addr); + + for (i = 0; i < 32; i++) + value |= (((crc32 >> i) & 1) << (31 - i)); + + return value; +} + +/* + * Sets the bit in the multicast table corresponding to the hash value. + * + * hw - Struct containing variables accessed by shared code + * hash_value - Multicast address hash value + */ +static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg; + u32 mta; + + /* The HASH Table is a register array of 2 32-bit registers. + * It is treated like an array of 64 bits. We want to set + * bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The register is determined by the + * upper 7 bits of the hash value and the bit within that + * register are determined by the lower 5 bits of the value. + */ + hash_reg = (hash_value >> 31) & 0x1; + hash_bit = (hash_value >> 26) & 0x1F; + + mta = ATL2_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg); + + mta |= (1 << hash_bit); + + ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta); +} + +/* + * atl2_init_pcie - init PCIE module + */ +static void atl2_init_pcie(struct atl2_hw *hw) +{ + u32 value; + value = LTSSM_TEST_MODE_DEF; + ATL2_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value); + + value = PCIE_DLL_TX_CTRL1_DEF; + ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, value); +} + +static void atl2_init_flash_opcode(struct atl2_hw *hw) +{ + if (hw->flash_vendor >= ARRAY_SIZE(flash_table)) + hw->flash_vendor = 0; /* ATMEL */ + + /* Init OP table */ + ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_PROGRAM, + flash_table[hw->flash_vendor].cmdPROGRAM); + ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_SC_ERASE, + flash_table[hw->flash_vendor].cmdSECTOR_ERASE); + ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_CHIP_ERASE, + flash_table[hw->flash_vendor].cmdCHIP_ERASE); + ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDID, + flash_table[hw->flash_vendor].cmdRDID); + ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WREN, + flash_table[hw->flash_vendor].cmdWREN); + ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDSR, + flash_table[hw->flash_vendor].cmdRDSR); + ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WRSR, + flash_table[hw->flash_vendor].cmdWRSR); + ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_READ, + flash_table[hw->flash_vendor].cmdREAD); +} + +/******************************************************************** +* Performs basic configuration of the adapter. +* +* hw - Struct containing variables accessed by shared code +* Assumes that the controller has previously been reset and is in a +* post-reset uninitialized state. Initializes multicast table, +* and Calls routines to setup link +* Leaves the transmit and receive units disabled and uninitialized. +********************************************************************/ +static s32 atl2_init_hw(struct atl2_hw *hw) +{ + u32 ret_val = 0; + + atl2_init_pcie(hw); + + /* Zero out the Multicast HASH table */ + /* clear the old settings from the multicast hash table */ + ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); + ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); + + atl2_init_flash_opcode(hw); + + ret_val = atl2_phy_init(hw); + + return ret_val; +} + +/* + * Detects the current speed and duplex settings of the hardware. + * + * hw - Struct containing variables accessed by shared code + * speed - Speed of the connection + * duplex - Duplex setting of the connection + */ +static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + u16 phy_data; + + /* Read PHY Specific Status Register (17) */ + ret_val = atl2_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data); + if (ret_val) + return ret_val; + + if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED)) + return ATLX_ERR_PHY_RES; + + switch (phy_data & MII_ATLX_PSSR_SPEED) { + case MII_ATLX_PSSR_100MBS: + *speed = SPEED_100; + break; + case MII_ATLX_PSSR_10MBS: + *speed = SPEED_10; + break; + default: + return ATLX_ERR_PHY_SPEED; + } + + if (phy_data & MII_ATLX_PSSR_DPLX) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + return 0; +} + +/* + * Reads the value from a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to read + */ +static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data) +{ + u32 val; + int i; + + val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | + MDIO_START | + MDIO_SUP_PREAMBLE | + MDIO_RW | + MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; + ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val); + + wmb(); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = ATL2_READ_REG(hw, REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + wmb(); + } + if (!(val & (MDIO_START | MDIO_BUSY))) { + *phy_data = (u16)val; + return 0; + } + + return ATLX_ERR_PHY; +} + +/* + * Writes a value to a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to write + * data - data to write to the PHY + */ +static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data) +{ + int i; + u32 val; + + val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | + (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | + MDIO_SUP_PREAMBLE | + MDIO_START | + MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; + ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val); + + wmb(); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = ATL2_READ_REG(hw, REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + + wmb(); + } + + if (!(val & (MDIO_START | MDIO_BUSY))) + return 0; + + return ATLX_ERR_PHY; +} + +/* + * Configures PHY autoneg and flow control advertisement settings + * + * hw - Struct containing variables accessed by shared code + */ +static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw) +{ + s32 ret_val; + s16 mii_autoneg_adv_reg; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; + + /* Need to parse autoneg_advertised and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). */ + mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; + + /* Need to parse MediaType and setup the + * appropriate PHY registers. */ + switch (hw->MediaType) { + case MEDIA_TYPE_AUTO_SENSOR: + mii_autoneg_adv_reg |= + (MII_AR_10T_HD_CAPS | + MII_AR_10T_FD_CAPS | + MII_AR_100TX_HD_CAPS| + MII_AR_100TX_FD_CAPS); + hw->autoneg_advertised = + ADVERTISE_10_HALF | + ADVERTISE_10_FULL | + ADVERTISE_100_HALF| + ADVERTISE_100_FULL; + break; + case MEDIA_TYPE_100M_FULL: + mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; + hw->autoneg_advertised = ADVERTISE_100_FULL; + break; + case MEDIA_TYPE_100M_HALF: + mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; + hw->autoneg_advertised = ADVERTISE_100_HALF; + break; + case MEDIA_TYPE_10M_FULL: + mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; + hw->autoneg_advertised = ADVERTISE_10_FULL; + break; + default: + mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; + hw->autoneg_advertised = ADVERTISE_10_HALF; + break; + } + + /* flow control fixed to enable all */ + mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); + + hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; + + ret_val = atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); + + if (ret_val) + return ret_val; + + return 0; +} + +/* + * Resets the PHY and make all config validate + * + * hw - Struct containing variables accessed by shared code + * + * Sets bit 15 and 12 of the MII Control regiser (for F001 bug) + */ +static s32 atl2_phy_commit(struct atl2_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; + ret_val = atl2_write_phy_reg(hw, MII_BMCR, phy_data); + if (ret_val) { + u32 val; + int i; + /* pcie serdes link may be down ! */ + for (i = 0; i < 25; i++) { + msleep(1); + val = ATL2_READ_REG(hw, REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + + if (0 != (val & (MDIO_START | MDIO_BUSY))) { + printk(KERN_ERR "atl2: PCIe link down for at least 25ms !\n"); + return ret_val; + } + } + return 0; +} + +static s32 atl2_phy_init(struct atl2_hw *hw) +{ + s32 ret_val; + u16 phy_val; + + if (hw->phy_configured) + return 0; + + /* Enable PHY */ + ATL2_WRITE_REGW(hw, REG_PHY_ENABLE, 1); + ATL2_WRITE_FLUSH(hw); + msleep(1); + + /* check if the PHY is in powersaving mode */ + atl2_write_phy_reg(hw, MII_DBG_ADDR, 0); + atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val); + + /* 024E / 124E 0r 0274 / 1274 ? */ + if (phy_val & 0x1000) { + phy_val &= ~0x1000; + atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val); + } + + msleep(1); + + /*Enable PHY LinkChange Interrupt */ + ret_val = atl2_write_phy_reg(hw, 18, 0xC00); + if (ret_val) + return ret_val; + + /* setup AutoNeg parameters */ + ret_val = atl2_phy_setup_autoneg_adv(hw); + if (ret_val) + return ret_val; + + /* SW.Reset & En-Auto-Neg to restart Auto-Neg */ + ret_val = atl2_phy_commit(hw); + if (ret_val) + return ret_val; + + hw->phy_configured = true; + + return ret_val; +} + +static void atl2_set_mac_addr(struct atl2_hw *hw) +{ + u32 value; + /* 00-0B-6A-F6-00-DC + * 0: 6AF600DC 1: 000B + * low dword */ + value = (((u32)hw->mac_addr[2]) << 24) | + (((u32)hw->mac_addr[3]) << 16) | + (((u32)hw->mac_addr[4]) << 8) | + (((u32)hw->mac_addr[5])); + ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value); + /* hight dword */ + value = (((u32)hw->mac_addr[0]) << 8) | + (((u32)hw->mac_addr[1])); + ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value); +} + +/* + * check_eeprom_exist + * return 0 if eeprom exist + */ +static int atl2_check_eeprom_exist(struct atl2_hw *hw) +{ + u32 value; + + value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL); + if (value & SPI_FLASH_CTRL_EN_VPD) { + value &= ~SPI_FLASH_CTRL_EN_VPD; + ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); + } + value = ATL2_READ_REGW(hw, REG_PCIE_CAP_LIST); + return ((value & 0xFF00) == 0x6C00) ? 0 : 1; +} + +/* FIXME: This doesn't look right. -- CHS */ +static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value) +{ + return true; +} + +static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue) +{ + int i; + u32 Control; + + if (Offset & 0x3) + return false; /* address do not align */ + + ATL2_WRITE_REG(hw, REG_VPD_DATA, 0); + Control = (Offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; + ATL2_WRITE_REG(hw, REG_VPD_CAP, Control); + + for (i = 0; i < 10; i++) { + msleep(2); + Control = ATL2_READ_REG(hw, REG_VPD_CAP); + if (Control & VPD_CAP_VPD_FLAG) + break; + } + + if (Control & VPD_CAP_VPD_FLAG) { + *pValue = ATL2_READ_REG(hw, REG_VPD_DATA); + return true; + } + return false; /* timeout */ +} + +static void atl2_force_ps(struct atl2_hw *hw) +{ + u16 phy_val; + + atl2_write_phy_reg(hw, MII_DBG_ADDR, 0); + atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val); + atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val | 0x1000); + + atl2_write_phy_reg(hw, MII_DBG_ADDR, 2); + atl2_write_phy_reg(hw, MII_DBG_DATA, 0x3000); + atl2_write_phy_reg(hw, MII_DBG_ADDR, 3); + atl2_write_phy_reg(hw, MII_DBG_DATA, 0); +} + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ +#define ATL2_MAX_NIC 4 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ +#define ATL2_PARAM_INIT {[0 ... ATL2_MAX_NIC] = OPTION_UNSET} +#ifndef module_param_array +/* Module Parameters are always initialized to -1, so that the driver + * can tell the difference between no user specified value or the + * user asking for the default value. + * The true default values are loaded in when atl2_check_options is called. + * + * This is a GCC extension to ANSI C. + * See the item "Labeled Elements in Initializers" in the section + * "Extensions to the C Language Family" of the GCC documentation. + */ + +#define ATL2_PARAM(X, desc) \ + static const int X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \ + MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \ + MODULE_PARM_DESC(X, desc); +#else +#define ATL2_PARAM(X, desc) \ + static int X[ATL2_MAX_NIC+1] = ATL2_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); +#endif + +/* + * Transmit Memory Size + * Valid Range: 64-2048 + * Default Value: 128 + */ +#define ATL2_MIN_TX_MEMSIZE 4 /* 4KB */ +#define ATL2_MAX_TX_MEMSIZE 64 /* 64KB */ +#define ATL2_DEFAULT_TX_MEMSIZE 8 /* 8KB */ +ATL2_PARAM(TxMemSize, "Bytes of Transmit Memory"); + +/* + * Receive Memory Block Count + * Valid Range: 16-512 + * Default Value: 128 + */ +#define ATL2_MIN_RXD_COUNT 16 +#define ATL2_MAX_RXD_COUNT 512 +#define ATL2_DEFAULT_RXD_COUNT 64 +ATL2_PARAM(RxMemBlock, "Number of receive memory block"); + +/* + * User Specified MediaType Override + * + * Valid Range: 0-5 + * - 0 - auto-negotiate at all supported speeds + * - 1 - only link at 1000Mbps Full Duplex + * - 2 - only link at 100Mbps Full Duplex + * - 3 - only link at 100Mbps Half Duplex + * - 4 - only link at 10Mbps Full Duplex + * - 5 - only link at 10Mbps Half Duplex + * Default Value: 0 + */ +ATL2_PARAM(MediaType, "MediaType Select"); + +/* + * Interrupt Moderate Timer in units of 2048 ns (~2 us) + * Valid Range: 10-65535 + * Default Value: 45000(90ms) + */ +#define INT_MOD_DEFAULT_CNT 100 /* 200us */ +#define INT_MOD_MAX_CNT 65000 +#define INT_MOD_MIN_CNT 50 +ATL2_PARAM(IntModTimer, "Interrupt Moderator Timer"); + +/* + * FlashVendor + * Valid Range: 0-2 + * 0 - Atmel + * 1 - SST + * 2 - ST + */ +ATL2_PARAM(FlashVendor, "SPI Flash Vendor"); + +#define AUTONEG_ADV_DEFAULT 0x2F +#define AUTONEG_ADV_MASK 0x2F +#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL + +#define FLASH_VENDOR_DEFAULT 0 +#define FLASH_VENDOR_MIN 0 +#define FLASH_VENDOR_MAX 2 + +struct atl2_option { + enum { enable_option, range_option, list_option } type; + char *name; + char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + struct atl2_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int atl2_validate_option(int *value, struct atl2_option *opt) +{ + int i; + struct atl2_opt_list *ent; + + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + printk(KERN_INFO "%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + printk(KERN_INFO "%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + printk(KERN_INFO "%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + printk(KERN_INFO "%s\n", ent->str); + return 0; + } + } + break; + default: + BUG(); + } + + printk(KERN_INFO "Invalid %s specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +/** + * atl2_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + */ +static void atl2_check_options(struct atl2_adapter *adapter) +{ + int val; + struct atl2_option opt; + int bd = adapter->bd_number; + if (bd >= ATL2_MAX_NIC) { + printk(KERN_NOTICE "Warning: no configuration for board #%i\n", + bd); + printk(KERN_NOTICE "Using defaults for all values\n"); +#ifndef module_param_array + bd = ATL2_MAX_NIC; +#endif + } + + /* Bytes of Transmit Memory */ + opt.type = range_option; + opt.name = "Bytes of Transmit Memory"; + opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_TX_MEMSIZE); + opt.def = ATL2_DEFAULT_TX_MEMSIZE; + opt.arg.r.min = ATL2_MIN_TX_MEMSIZE; + opt.arg.r.max = ATL2_MAX_TX_MEMSIZE; +#ifdef module_param_array + if (num_TxMemSize > bd) { +#endif + val = TxMemSize[bd]; + atl2_validate_option(&val, &opt); + adapter->txd_ring_size = ((u32) val) * 1024; +#ifdef module_param_array + } else + adapter->txd_ring_size = ((u32)opt.def) * 1024; +#endif + /* txs ring size: */ + adapter->txs_ring_size = adapter->txd_ring_size / 128; + if (adapter->txs_ring_size > 160) + adapter->txs_ring_size = 160; + + /* Receive Memory Block Count */ + opt.type = range_option; + opt.name = "Number of receive memory block"; + opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_RXD_COUNT); + opt.def = ATL2_DEFAULT_RXD_COUNT; + opt.arg.r.min = ATL2_MIN_RXD_COUNT; + opt.arg.r.max = ATL2_MAX_RXD_COUNT; +#ifdef module_param_array + if (num_RxMemBlock > bd) { +#endif + val = RxMemBlock[bd]; + atl2_validate_option(&val, &opt); + adapter->rxd_ring_size = (u32)val; + /* FIXME */ + /* ((u16)val)&~1; */ /* even number */ +#ifdef module_param_array + } else + adapter->rxd_ring_size = (u32)opt.def; +#endif + /* init RXD Flow control value */ + adapter->hw.fc_rxd_hi = (adapter->rxd_ring_size / 8) * 7; + adapter->hw.fc_rxd_lo = (ATL2_MIN_RXD_COUNT / 8) > + (adapter->rxd_ring_size / 12) ? (ATL2_MIN_RXD_COUNT / 8) : + (adapter->rxd_ring_size / 12); + + /* Interrupt Moderate Timer */ + opt.type = range_option; + opt.name = "Interrupt Moderate Timer"; + opt.err = "using default of " __MODULE_STRING(INT_MOD_DEFAULT_CNT); + opt.def = INT_MOD_DEFAULT_CNT; + opt.arg.r.min = INT_MOD_MIN_CNT; + opt.arg.r.max = INT_MOD_MAX_CNT; +#ifdef module_param_array + if (num_IntModTimer > bd) { +#endif + val = IntModTimer[bd]; + atl2_validate_option(&val, &opt); + adapter->imt = (u16) val; +#ifdef module_param_array + } else + adapter->imt = (u16)(opt.def); +#endif + /* Flash Vendor */ + opt.type = range_option; + opt.name = "SPI Flash Vendor"; + opt.err = "using default of " __MODULE_STRING(FLASH_VENDOR_DEFAULT); + opt.def = FLASH_VENDOR_DEFAULT; + opt.arg.r.min = FLASH_VENDOR_MIN; + opt.arg.r.max = FLASH_VENDOR_MAX; +#ifdef module_param_array + if (num_FlashVendor > bd) { +#endif + val = FlashVendor[bd]; + atl2_validate_option(&val, &opt); + adapter->hw.flash_vendor = (u8) val; +#ifdef module_param_array + } else + adapter->hw.flash_vendor = (u8)(opt.def); +#endif + /* MediaType */ + opt.type = range_option; + opt.name = "Speed/Duplex Selection"; + opt.err = "using default of " __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR); + opt.def = MEDIA_TYPE_AUTO_SENSOR; + opt.arg.r.min = MEDIA_TYPE_AUTO_SENSOR; + opt.arg.r.max = MEDIA_TYPE_10M_HALF; +#ifdef module_param_array + if (num_MediaType > bd) { +#endif + val = MediaType[bd]; + atl2_validate_option(&val, &opt); + adapter->hw.MediaType = (u16) val; +#ifdef module_param_array + } else + adapter->hw.MediaType = (u16)(opt.def); +#endif +} diff --git a/drivers/net/ethernet/atheros/atlx/atl2.h b/drivers/net/ethernet/atheros/atlx/atl2.h new file mode 100644 index 000000000..2f27d4c4c --- /dev/null +++ b/drivers/net/ethernet/atheros/atlx/atl2.h @@ -0,0 +1,524 @@ +/* atl2.h -- atl2 driver definitions + * + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * Copyright(c) 2006 xiong huang + * Copyright(c) 2007 Chris Snook + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _ATL2_H_ +#define _ATL2_H_ + +#include +#include + +#ifndef _ATL2_HW_H_ +#define _ATL2_HW_H_ + +#ifndef _ATL2_OSDEP_H_ +#define _ATL2_OSDEP_H_ + +#include +#include +#include +#include + +#include "atlx.h" + +#ifdef ETHTOOL_OPS_COMPAT +int ethtool_ioctl(struct ifreq *ifr); +#endif + +#define PCI_COMMAND_REGISTER PCI_COMMAND +#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE + +#define ATL2_WRITE_REG(a, reg, value) (iowrite32((value), \ + ((a)->hw_addr + (reg)))) + +#define ATL2_WRITE_FLUSH(a) (ioread32((a)->hw_addr)) + +#define ATL2_READ_REG(a, reg) (ioread32((a)->hw_addr + (reg))) + +#define ATL2_WRITE_REGB(a, reg, value) (iowrite8((value), \ + ((a)->hw_addr + (reg)))) + +#define ATL2_READ_REGB(a, reg) (ioread8((a)->hw_addr + (reg))) + +#define ATL2_WRITE_REGW(a, reg, value) (iowrite16((value), \ + ((a)->hw_addr + (reg)))) + +#define ATL2_READ_REGW(a, reg) (ioread16((a)->hw_addr + (reg))) + +#define ATL2_WRITE_REG_ARRAY(a, reg, offset, value) \ + (iowrite32((value), (((a)->hw_addr + (reg)) + ((offset) << 2)))) + +#define ATL2_READ_REG_ARRAY(a, reg, offset) \ + (ioread32(((a)->hw_addr + (reg)) + ((offset) << 2))) + +#endif /* _ATL2_OSDEP_H_ */ + +struct atl2_adapter; +struct atl2_hw; + +/* function prototype */ +static s32 atl2_reset_hw(struct atl2_hw *hw); +static s32 atl2_read_mac_addr(struct atl2_hw *hw); +static s32 atl2_init_hw(struct atl2_hw *hw); +static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed, + u16 *duplex); +static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr); +static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value); +static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data); +static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data); +static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value); +static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value); +static void atl2_set_mac_addr(struct atl2_hw *hw); +static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue); +static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value); +static s32 atl2_phy_init(struct atl2_hw *hw); +static int atl2_check_eeprom_exist(struct atl2_hw *hw); +static void atl2_force_ps(struct atl2_hw *hw); + +/* register definition */ + +/* Block IDLE Status Register */ +#define IDLE_STATUS_RXMAC 1 /* 1: RXMAC is non-IDLE */ +#define IDLE_STATUS_TXMAC 2 /* 1: TXMAC is non-IDLE */ +#define IDLE_STATUS_DMAR 8 /* 1: DMAR is non-IDLE */ +#define IDLE_STATUS_DMAW 4 /* 1: DMAW is non-IDLE */ + +/* MDIO Control Register */ +#define MDIO_WAIT_TIMES 10 + +/* MAC Control Register */ +#define MAC_CTRL_DBG_TX_BKPRESURE 0x100000 /* 1: TX max backoff */ +#define MAC_CTRL_MACLP_CLK_PHY 0x8000000 /* 1: 25MHz from phy */ +#define MAC_CTRL_HALF_LEFT_BUF_SHIFT 28 +#define MAC_CTRL_HALF_LEFT_BUF_MASK 0xF /* MAC retry buf x32B */ + +/* Internal SRAM Partition Register */ +#define REG_SRAM_TXRAM_END 0x1500 /* Internal tail address of TXRAM + * default: 2byte*1024 */ +#define REG_SRAM_RXRAM_END 0x1502 /* Internal tail address of RXRAM + * default: 2byte*1024 */ + +/* Descriptor Control register */ +#define REG_TXD_BASE_ADDR_LO 0x1544 /* The base address of the Transmit + * Data Mem low 32-bit(dword align) */ +#define REG_TXD_MEM_SIZE 0x1548 /* Transmit Data Memory size(by + * double word , max 256KB) */ +#define REG_TXS_BASE_ADDR_LO 0x154C /* The base address of the Transmit + * Status Memory low 32-bit(dword word + * align) */ +#define REG_TXS_MEM_SIZE 0x1550 /* double word unit, max 4*2047 + * bytes. */ +#define REG_RXD_BASE_ADDR_LO 0x1554 /* The base address of the Transmit + * Status Memory low 32-bit(unit 8 + * bytes) */ +#define REG_RXD_BUF_NUM 0x1558 /* Receive Data & Status Memory buffer + * number (unit 1536bytes, max + * 1536*2047) */ + +/* DMAR Control Register */ +#define REG_DMAR 0x1580 +#define DMAR_EN 0x1 /* 1: Enable DMAR */ + +/* TX Cur-Through (early tx threshold) Control Register */ +#define REG_TX_CUT_THRESH 0x1590 /* TxMac begin transmit packet + * threshold(unit word) */ + +/* DMAW Control Register */ +#define REG_DMAW 0x15A0 +#define DMAW_EN 0x1 + +/* Flow control register */ +#define REG_PAUSE_ON_TH 0x15A8 /* RXD high watermark of overflow + * threshold configuration register */ +#define REG_PAUSE_OFF_TH 0x15AA /* RXD lower watermark of overflow + * threshold configuration register */ + +/* Mailbox Register */ +#define REG_MB_TXD_WR_IDX 0x15f0 /* double word align */ +#define REG_MB_RXD_RD_IDX 0x15F4 /* RXD Read index (unit: 1536byets) */ + +/* Interrupt Status Register */ +#define ISR_TIMER 1 /* Interrupt when Timer counts down to zero */ +#define ISR_MANUAL 2 /* Software manual interrupt, for debug. Set + * when SW_MAN_INT_EN is set in Table 51 + * Selene Master Control Register + * (Offset 0x1400). */ +#define ISR_RXF_OV 4 /* RXF overflow interrupt */ +#define ISR_TXF_UR 8 /* TXF underrun interrupt */ +#define ISR_TXS_OV 0x10 /* Internal transmit status buffer full + * interrupt */ +#define ISR_RXS_OV 0x20 /* Internal receive status buffer full + * interrupt */ +#define ISR_LINK_CHG 0x40 /* Link Status Change Interrupt */ +#define ISR_HOST_TXD_UR 0x80 +#define ISR_HOST_RXD_OV 0x100 /* Host rx data memory full , one pulse */ +#define ISR_DMAR_TO_RST 0x200 /* DMAR op timeout interrupt. SW should + * do Reset */ +#define ISR_DMAW_TO_RST 0x400 +#define ISR_PHY 0x800 /* phy interrupt */ +#define ISR_TS_UPDATE 0x10000 /* interrupt after new tx pkt status written + * to host */ +#define ISR_RS_UPDATE 0x20000 /* interrupt ater new rx pkt status written + * to host. */ +#define ISR_TX_EARLY 0x40000 /* interrupt when txmac begin transmit one + * packet */ + +#define ISR_TX_EVENT (ISR_TXF_UR | ISR_TXS_OV | ISR_HOST_TXD_UR |\ + ISR_TS_UPDATE | ISR_TX_EARLY) +#define ISR_RX_EVENT (ISR_RXF_OV | ISR_RXS_OV | ISR_HOST_RXD_OV |\ + ISR_RS_UPDATE) + +#define IMR_NORMAL_MASK (\ + /*ISR_LINK_CHG |*/\ + ISR_MANUAL |\ + ISR_DMAR_TO_RST |\ + ISR_DMAW_TO_RST |\ + ISR_PHY |\ + ISR_PHY_LINKDOWN |\ + ISR_TS_UPDATE |\ + ISR_RS_UPDATE) + +/* Receive MAC Statistics Registers */ +#define REG_STS_RX_PAUSE 0x1700 /* Num pause packets received */ +#define REG_STS_RXD_OV 0x1704 /* Num frames dropped due to RX + * FIFO overflow */ +#define REG_STS_RXS_OV 0x1708 /* Num frames dropped due to RX + * Status Buffer Overflow */ +#define REG_STS_RX_FILTER 0x170C /* Num packets dropped due to + * address filtering */ + +/* MII definitions */ + +/* PHY Common Register */ +#define MII_SMARTSPEED 0x14 +#define MII_DBG_ADDR 0x1D +#define MII_DBG_DATA 0x1E + +/* PCI Command Register Bit Definitions */ +#define PCI_REG_COMMAND 0x04 +#define CMD_IO_SPACE 0x0001 +#define CMD_MEMORY_SPACE 0x0002 +#define CMD_BUS_MASTER 0x0004 + +#define MEDIA_TYPE_100M_FULL 1 +#define MEDIA_TYPE_100M_HALF 2 +#define MEDIA_TYPE_10M_FULL 3 +#define MEDIA_TYPE_10M_HALF 4 + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x000F /* Everything */ + +/* The size (in bytes) of a ethernet packet */ +#define ENET_HEADER_SIZE 14 +#define MAXIMUM_ETHERNET_FRAME_SIZE 1518 /* with FCS */ +#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* with FCS */ +#define ETHERNET_FCS_SIZE 4 +#define MAX_JUMBO_FRAME_SIZE 0x2000 +#define VLAN_SIZE 4 + +struct tx_pkt_header { + unsigned pkt_size:11; + unsigned:4; /* reserved */ + unsigned ins_vlan:1; /* txmac should insert vlan */ + unsigned short vlan; /* vlan tag */ +}; +/* FIXME: replace above bitfields with MASK/SHIFT defines below */ +#define TX_PKT_HEADER_SIZE_MASK 0x7FF +#define TX_PKT_HEADER_SIZE_SHIFT 0 +#define TX_PKT_HEADER_INS_VLAN_MASK 0x1 +#define TX_PKT_HEADER_INS_VLAN_SHIFT 15 +#define TX_PKT_HEADER_VLAN_TAG_MASK 0xFFFF +#define TX_PKT_HEADER_VLAN_TAG_SHIFT 16 + +struct tx_pkt_status { + unsigned pkt_size:11; + unsigned:5; /* reserved */ + unsigned ok:1; /* current packet transmitted without error */ + unsigned bcast:1; /* broadcast packet */ + unsigned mcast:1; /* multicast packet */ + unsigned pause:1; /* transmiited a pause frame */ + unsigned ctrl:1; + unsigned defer:1; /* current packet is xmitted with defer */ + unsigned exc_defer:1; + unsigned single_col:1; + unsigned multi_col:1; + unsigned late_col:1; + unsigned abort_col:1; + unsigned underun:1; /* current packet is aborted + * due to txram underrun */ + unsigned:3; /* reserved */ + unsigned update:1; /* always 1'b1 in tx_status_buf */ +}; +/* FIXME: replace above bitfields with MASK/SHIFT defines below */ +#define TX_PKT_STATUS_SIZE_MASK 0x7FF +#define TX_PKT_STATUS_SIZE_SHIFT 0 +#define TX_PKT_STATUS_OK_MASK 0x1 +#define TX_PKT_STATUS_OK_SHIFT 16 +#define TX_PKT_STATUS_BCAST_MASK 0x1 +#define TX_PKT_STATUS_BCAST_SHIFT 17 +#define TX_PKT_STATUS_MCAST_MASK 0x1 +#define TX_PKT_STATUS_MCAST_SHIFT 18 +#define TX_PKT_STATUS_PAUSE_MASK 0x1 +#define TX_PKT_STATUS_PAUSE_SHIFT 19 +#define TX_PKT_STATUS_CTRL_MASK 0x1 +#define TX_PKT_STATUS_CTRL_SHIFT 20 +#define TX_PKT_STATUS_DEFER_MASK 0x1 +#define TX_PKT_STATUS_DEFER_SHIFT 21 +#define TX_PKT_STATUS_EXC_DEFER_MASK 0x1 +#define TX_PKT_STATUS_EXC_DEFER_SHIFT 22 +#define TX_PKT_STATUS_SINGLE_COL_MASK 0x1 +#define TX_PKT_STATUS_SINGLE_COL_SHIFT 23 +#define TX_PKT_STATUS_MULTI_COL_MASK 0x1 +#define TX_PKT_STATUS_MULTI_COL_SHIFT 24 +#define TX_PKT_STATUS_LATE_COL_MASK 0x1 +#define TX_PKT_STATUS_LATE_COL_SHIFT 25 +#define TX_PKT_STATUS_ABORT_COL_MASK 0x1 +#define TX_PKT_STATUS_ABORT_COL_SHIFT 26 +#define TX_PKT_STATUS_UNDERRUN_MASK 0x1 +#define TX_PKT_STATUS_UNDERRUN_SHIFT 27 +#define TX_PKT_STATUS_UPDATE_MASK 0x1 +#define TX_PKT_STATUS_UPDATE_SHIFT 31 + +struct rx_pkt_status { + unsigned pkt_size:11; /* packet size, max 2047 bytes */ + unsigned:5; /* reserved */ + unsigned ok:1; /* current packet received ok without error */ + unsigned bcast:1; /* current packet is broadcast */ + unsigned mcast:1; /* current packet is multicast */ + unsigned pause:1; + unsigned ctrl:1; + unsigned crc:1; /* received a packet with crc error */ + unsigned code:1; /* received a packet with code error */ + unsigned runt:1; /* received a packet less than 64 bytes + * with good crc */ + unsigned frag:1; /* received a packet less than 64 bytes + * with bad crc */ + unsigned trunc:1; /* current frame truncated due to rxram full */ + unsigned align:1; /* this packet is alignment error */ + unsigned vlan:1; /* this packet has vlan */ + unsigned:3; /* reserved */ + unsigned update:1; + unsigned short vtag; /* vlan tag */ + unsigned:16; +}; +/* FIXME: replace above bitfields with MASK/SHIFT defines below */ +#define RX_PKT_STATUS_SIZE_MASK 0x7FF +#define RX_PKT_STATUS_SIZE_SHIFT 0 +#define RX_PKT_STATUS_OK_MASK 0x1 +#define RX_PKT_STATUS_OK_SHIFT 16 +#define RX_PKT_STATUS_BCAST_MASK 0x1 +#define RX_PKT_STATUS_BCAST_SHIFT 17 +#define RX_PKT_STATUS_MCAST_MASK 0x1 +#define RX_PKT_STATUS_MCAST_SHIFT 18 +#define RX_PKT_STATUS_PAUSE_MASK 0x1 +#define RX_PKT_STATUS_PAUSE_SHIFT 19 +#define RX_PKT_STATUS_CTRL_MASK 0x1 +#define RX_PKT_STATUS_CTRL_SHIFT 20 +#define RX_PKT_STATUS_CRC_MASK 0x1 +#define RX_PKT_STATUS_CRC_SHIFT 21 +#define RX_PKT_STATUS_CODE_MASK 0x1 +#define RX_PKT_STATUS_CODE_SHIFT 22 +#define RX_PKT_STATUS_RUNT_MASK 0x1 +#define RX_PKT_STATUS_RUNT_SHIFT 23 +#define RX_PKT_STATUS_FRAG_MASK 0x1 +#define RX_PKT_STATUS_FRAG_SHIFT 24 +#define RX_PKT_STATUS_TRUNK_MASK 0x1 +#define RX_PKT_STATUS_TRUNK_SHIFT 25 +#define RX_PKT_STATUS_ALIGN_MASK 0x1 +#define RX_PKT_STATUS_ALIGN_SHIFT 26 +#define RX_PKT_STATUS_VLAN_MASK 0x1 +#define RX_PKT_STATUS_VLAN_SHIFT 27 +#define RX_PKT_STATUS_UPDATE_MASK 0x1 +#define RX_PKT_STATUS_UPDATE_SHIFT 31 +#define RX_PKT_STATUS_VLAN_TAG_MASK 0xFFFF +#define RX_PKT_STATUS_VLAN_TAG_SHIFT 32 + +struct rx_desc { + struct rx_pkt_status status; + unsigned char packet[1536-sizeof(struct rx_pkt_status)]; +}; + +enum atl2_speed_duplex { + atl2_10_half = 0, + atl2_10_full = 1, + atl2_100_half = 2, + atl2_100_full = 3 +}; + +struct atl2_spi_flash_dev { + const char *manu_name; /* manufacturer id */ + /* op-code */ + u8 cmdWRSR; + u8 cmdREAD; + u8 cmdPROGRAM; + u8 cmdWREN; + u8 cmdWRDI; + u8 cmdRDSR; + u8 cmdRDID; + u8 cmdSECTOR_ERASE; + u8 cmdCHIP_ERASE; +}; + +/* Structure containing variables used by the shared code (atl2_hw.c) */ +struct atl2_hw { + u8 __iomem *hw_addr; + void *back; + + u8 preamble_len; + u8 max_retry; /* Retransmission maximum, afterwards the + * packet will be discarded. */ + u8 jam_ipg; /* IPG to start JAM for collision based flow + * control in half-duplex mode. In unit of + * 8-bit time. */ + u8 ipgt; /* Desired back to back inter-packet gap. The + * default is 96-bit time. */ + u8 min_ifg; /* Minimum number of IFG to enforce in between + * RX frames. Frame gap below such IFP is + * dropped. */ + u8 ipgr1; /* 64bit Carrier-Sense window */ + u8 ipgr2; /* 96-bit IPG window */ + u8 retry_buf; /* When half-duplex mode, should hold some + * bytes for mac retry . (8*4bytes unit) */ + + u16 fc_rxd_hi; + u16 fc_rxd_lo; + u16 lcol; /* Collision Window */ + u16 max_frame_size; + + u16 MediaType; + u16 autoneg_advertised; + u16 pci_cmd_word; + + u16 mii_autoneg_adv_reg; + + u32 mem_rang; + u32 txcw; + u32 mc_filter_type; + u32 num_mc_addrs; + u32 collision_delta; + u32 tx_packet_delta; + u16 phy_spd_default; + + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + + /* spi flash */ + u8 flash_vendor; + + u8 dma_fairness; + u8 mac_addr[ETH_ALEN]; + u8 perm_mac_addr[ETH_ALEN]; + + /* FIXME */ + /* bool phy_preamble_sup; */ + bool phy_configured; +}; + +#endif /* _ATL2_HW_H_ */ + +struct atl2_ring_header { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; +}; + +/* board specific private data structure */ +struct atl2_adapter { + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + u32 wol; + u16 link_speed; + u16 link_duplex; + + spinlock_t stats_lock; + + struct work_struct reset_task; + struct work_struct link_chg_task; + struct timer_list watchdog_timer; + struct timer_list phy_config_timer; + + unsigned long cfg_phy; + bool mac_disabled; + + /* All Descriptor memory */ + dma_addr_t ring_dma; + void *ring_vir_addr; + int ring_size; + + struct tx_pkt_header *txd_ring; + dma_addr_t txd_dma; + + struct tx_pkt_status *txs_ring; + dma_addr_t txs_dma; + + struct rx_desc *rxd_ring; + dma_addr_t rxd_dma; + + u32 txd_ring_size; /* bytes per unit */ + u32 txs_ring_size; /* dwords per unit */ + u32 rxd_ring_size; /* 1536 bytes per unit */ + + /* read /write ptr: */ + /* host */ + u32 txd_write_ptr; + u32 txs_next_clear; + u32 rxd_read_ptr; + + /* nic */ + atomic_t txd_read_ptr; + atomic_t txs_write_ptr; + u32 rxd_write_ptr; + + /* Interrupt Moderator timer ( 2us resolution) */ + u16 imt; + /* Interrupt Clear timer (2us resolution) */ + u16 ict; + + unsigned long flags; + /* structs defined in atl2_hw.h */ + u32 bd_number; /* board number */ + bool pci_using_64; + bool have_msi; + struct atl2_hw hw; + + u32 usr_cmd; + /* FIXME */ + /* u32 regs_buff[ATL2_REGS_LEN]; */ + u32 pci_state[16]; + + u32 *config_space; +}; + +enum atl2_state_t { + __ATL2_TESTING, + __ATL2_RESETTING, + __ATL2_DOWN +}; + +#endif /* _ATL2_H_ */ diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c new file mode 100644 index 000000000..46a622cce --- /dev/null +++ b/drivers/net/ethernet/atheros/atlx/atlx.c @@ -0,0 +1,279 @@ +/* atlx.c -- common functions for Attansic network drivers + * + * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. + * Copyright(c) 2006 - 2007 Chris Snook + * Copyright(c) 2006 - 2008 Jay Cliburn + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* Including this file like a header is a temporary hack, I promise. -- CHS */ +#ifndef ATLX_C +#define ATLX_C + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "atlx.h" + +static s32 atlx_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data); +static u32 atlx_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr); +static void atlx_set_mac_addr(struct atl1_hw *hw); + +static struct atlx_spi_flash_dev flash_table[] = { +/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SEC_ERS CHIP_ERS */ + {"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62}, + {"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60}, + {"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7}, +}; + +static int atlx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return atlx_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * atlx_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int atlx_set_mac(struct net_device *netdev, void *p) +{ + struct atlx_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (netif_running(netdev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); + + atlx_set_mac_addr(&adapter->hw); + return 0; +} + +static void atlx_check_for_link(struct atlx_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 phy_data = 0; + + spin_lock(&adapter->lock); + adapter->phy_timer_pending = false; + atlx_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + atlx_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + spin_unlock(&adapter->lock); + + /* notify upper layer link down ASAP */ + if (!(phy_data & BMSR_LSTATUS)) { + /* Link Down */ + if (netif_carrier_ok(netdev)) { + /* old link state: Up */ + dev_info(&adapter->pdev->dev, "%s link is down\n", + netdev->name); + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + } + } + schedule_work(&adapter->link_chg_task); +} + +/** + * atlx_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + */ +static void atlx_set_multi(struct net_device *netdev) +{ + struct atlx_adapter *adapter = netdev_priv(netdev); + struct atlx_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u32 rctl; + u32 hash_value; + + /* Check for Promiscuous and All Multicast modes */ + rctl = ioread32(hw->hw_addr + REG_MAC_CTRL); + if (netdev->flags & IFF_PROMISC) + rctl |= MAC_CTRL_PROMIS_EN; + else if (netdev->flags & IFF_ALLMULTI) { + rctl |= MAC_CTRL_MC_ALL_EN; + rctl &= ~MAC_CTRL_PROMIS_EN; + } else + rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); + + iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL); + + /* clear the old settings from the multicast hash table */ + iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); + iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); + + /* compute mc addresses' hash value ,and put it into hash table */ + netdev_for_each_mc_addr(ha, netdev) { + hash_value = atlx_hash_mc_addr(hw, ha->addr); + atlx_hash_set(hw, hash_value); + } +} + +static inline void atlx_imr_set(struct atlx_adapter *adapter, + unsigned int imr) +{ + iowrite32(imr, adapter->hw.hw_addr + REG_IMR); + ioread32(adapter->hw.hw_addr + REG_IMR); +} + +/** + * atlx_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static void atlx_irq_enable(struct atlx_adapter *adapter) +{ + atlx_imr_set(adapter, IMR_NORMAL_MASK); + adapter->int_enabled = true; +} + +/** + * atlx_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static void atlx_irq_disable(struct atlx_adapter *adapter) +{ + adapter->int_enabled = false; + atlx_imr_set(adapter, 0); + synchronize_irq(adapter->pdev->irq); +} + +static void atlx_clear_phy_int(struct atlx_adapter *adapter) +{ + u16 phy_data; + unsigned long flags; + + spin_lock_irqsave(&adapter->lock, flags); + atlx_read_phy_reg(&adapter->hw, 19, &phy_data); + spin_unlock_irqrestore(&adapter->lock, flags); +} + +/** + * atlx_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + */ +static void atlx_tx_timeout(struct net_device *netdev) +{ + struct atlx_adapter *adapter = netdev_priv(netdev); + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->reset_dev_task); +} + +/* + * atlx_link_chg_task - deal with link change event Out of interrupt context + */ +static void atlx_link_chg_task(struct work_struct *work) +{ + struct atlx_adapter *adapter; + unsigned long flags; + + adapter = container_of(work, struct atlx_adapter, link_chg_task); + + spin_lock_irqsave(&adapter->lock, flags); + atlx_check_link(adapter); + spin_unlock_irqrestore(&adapter->lock, flags); +} + +static void __atlx_vlan_mode(netdev_features_t features, u32 *ctrl) +{ + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + /* enable VLAN tag insert/strip */ + *ctrl |= MAC_CTRL_RMV_VLAN; + } else { + /* disable VLAN tag insert/strip */ + *ctrl &= ~MAC_CTRL_RMV_VLAN; + } +} + +static void atlx_vlan_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct atlx_adapter *adapter = netdev_priv(netdev); + unsigned long flags; + u32 ctrl; + + spin_lock_irqsave(&adapter->lock, flags); + /* atlx_irq_disable(adapter); FIXME: confirm/remove */ + ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); + __atlx_vlan_mode(features, &ctrl); + iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); + /* atlx_irq_enable(adapter); FIXME */ + spin_unlock_irqrestore(&adapter->lock, flags); +} + +static void atlx_restore_vlan(struct atlx_adapter *adapter) +{ + atlx_vlan_mode(adapter->netdev, adapter->netdev->features); +} + +static netdev_features_t atlx_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* + * Since there is no support for separate rx/tx vlan accel + * enable/disable make sure tx flag is always in same state as rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int atlx_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + atlx_vlan_mode(netdev, features); + + return 0; +} + +#endif /* ATLX_C */ diff --git a/drivers/net/ethernet/atheros/atlx/atlx.h b/drivers/net/ethernet/atheros/atlx/atlx.h new file mode 100644 index 000000000..448f5dcc0 --- /dev/null +++ b/drivers/net/ethernet/atheros/atlx/atlx.h @@ -0,0 +1,502 @@ +/* atlx_hw.h -- common hardware definitions for Attansic network drivers + * + * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. + * Copyright(c) 2006 - 2007 Chris Snook + * Copyright(c) 2006 - 2008 Jay Cliburn + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef ATLX_H +#define ATLX_H + +#include +#include + +#define ATLX_ERR_PHY 2 +#define ATLX_ERR_PHY_SPEED 7 +#define ATLX_ERR_PHY_RES 8 + +#define SPEED_0 0xffff +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +#define MEDIA_TYPE_AUTO_SENSOR 0 + +/* register definitions */ +#define REG_PM_CTRLSTAT 0x44 + +#define REG_PCIE_CAP_LIST 0x58 + +#define REG_VPD_CAP 0x6C +#define VPD_CAP_ID_MASK 0xFF +#define VPD_CAP_ID_SHIFT 0 +#define VPD_CAP_NEXT_PTR_MASK 0xFF +#define VPD_CAP_NEXT_PTR_SHIFT 8 +#define VPD_CAP_VPD_ADDR_MASK 0x7FFF +#define VPD_CAP_VPD_ADDR_SHIFT 16 +#define VPD_CAP_VPD_FLAG 0x80000000 + +#define REG_VPD_DATA 0x70 + +#define REG_SPI_FLASH_CTRL 0x200 +#define SPI_FLASH_CTRL_STS_NON_RDY 0x1 +#define SPI_FLASH_CTRL_STS_WEN 0x2 +#define SPI_FLASH_CTRL_STS_WPEN 0x80 +#define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF +#define SPI_FLASH_CTRL_DEV_STS_SHIFT 0 +#define SPI_FLASH_CTRL_INS_MASK 0x7 +#define SPI_FLASH_CTRL_INS_SHIFT 8 +#define SPI_FLASH_CTRL_START 0x800 +#define SPI_FLASH_CTRL_EN_VPD 0x2000 +#define SPI_FLASH_CTRL_LDSTART 0x8000 +#define SPI_FLASH_CTRL_CS_HI_MASK 0x3 +#define SPI_FLASH_CTRL_CS_HI_SHIFT 16 +#define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3 +#define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18 +#define SPI_FLASH_CTRL_CLK_LO_MASK 0x3 +#define SPI_FLASH_CTRL_CLK_LO_SHIFT 20 +#define SPI_FLASH_CTRL_CLK_HI_MASK 0x3 +#define SPI_FLASH_CTRL_CLK_HI_SHIFT 22 +#define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3 +#define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24 +#define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3 +#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26 +#define SPI_FLASH_CTRL_WAIT_READY 0x10000000 + +#define REG_SPI_ADDR 0x204 + +#define REG_SPI_DATA 0x208 + +#define REG_SPI_FLASH_CONFIG 0x20C +#define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF +#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0 +#define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3 +#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24 +#define SPI_FLASH_CONFIG_LD_EXIST 0x4000000 + +#define REG_SPI_FLASH_OP_PROGRAM 0x210 +#define REG_SPI_FLASH_OP_SC_ERASE 0x211 +#define REG_SPI_FLASH_OP_CHIP_ERASE 0x212 +#define REG_SPI_FLASH_OP_RDID 0x213 +#define REG_SPI_FLASH_OP_WREN 0x214 +#define REG_SPI_FLASH_OP_RDSR 0x215 +#define REG_SPI_FLASH_OP_WRSR 0x216 +#define REG_SPI_FLASH_OP_READ 0x217 + +#define REG_TWSI_CTRL 0x218 +#define TWSI_CTRL_LD_OFFSET_MASK 0xFF +#define TWSI_CTRL_LD_OFFSET_SHIFT 0 +#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7 +#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8 +#define TWSI_CTRL_SW_LDSTART 0x800 +#define TWSI_CTRL_HW_LDSTART 0x1000 +#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F +#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15 +#define TWSI_CTRL_LD_EXIST 0x400000 +#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3 +#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23 +#define TWSI_CTRL_FREQ_SEL_100K 0 +#define TWSI_CTRL_FREQ_SEL_200K 1 +#define TWSI_CTRL_FREQ_SEL_300K 2 +#define TWSI_CTRL_FREQ_SEL_400K 3 +#define TWSI_CTRL_SMB_SLV_ADDR /* FIXME: define or remove */ +#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3 +#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24 + +#define REG_PCIE_DEV_MISC_CTRL 0x21C +#define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2 +#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1 +#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4 +#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8 +#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10 + +#define REG_PCIE_PHYMISC 0x1000 +#define PCIE_PHYMISC_FORCE_RCV_DET 0x4 + +#define REG_PCIE_DLL_TX_CTRL1 0x1104 +#define PCIE_DLL_TX_CTRL1_SEL_NOR_CLK 0x400 +#define PCIE_DLL_TX_CTRL1_DEF 0x568 + +#define REG_LTSSM_TEST_MODE 0x12FC +#define LTSSM_TEST_MODE_DEF 0x6500 + +/* Master Control Register */ +#define REG_MASTER_CTRL 0x1400 +#define MASTER_CTRL_SOFT_RST 0x1 +#define MASTER_CTRL_MTIMER_EN 0x2 +#define MASTER_CTRL_ITIMER_EN 0x4 +#define MASTER_CTRL_MANUAL_INT 0x8 +#define MASTER_CTRL_REV_NUM_SHIFT 16 +#define MASTER_CTRL_REV_NUM_MASK 0xFF +#define MASTER_CTRL_DEV_ID_SHIFT 24 +#define MASTER_CTRL_DEV_ID_MASK 0xFF + +/* Timer Initial Value Register */ +#define REG_MANUAL_TIMER_INIT 0x1404 + +/* IRQ Moderator Timer Initial Value Register */ +#define REG_IRQ_MODU_TIMER_INIT 0x1408 + +#define REG_PHY_ENABLE 0x140C + +/* IRQ Anti-Lost Timer Initial Value Register */ +#define REG_CMBDISDMA_TIMER 0x140E + +/* Block IDLE Status Register */ +#define REG_IDLE_STATUS 0x1410 + +/* MDIO Control Register */ +#define REG_MDIO_CTRL 0x1414 +#define MDIO_DATA_MASK 0xFFFF +#define MDIO_DATA_SHIFT 0 +#define MDIO_REG_ADDR_MASK 0x1F +#define MDIO_REG_ADDR_SHIFT 16 +#define MDIO_RW 0x200000 +#define MDIO_SUP_PREAMBLE 0x400000 +#define MDIO_START 0x800000 +#define MDIO_CLK_SEL_SHIFT 24 +#define MDIO_CLK_25_4 0 +#define MDIO_CLK_25_6 2 +#define MDIO_CLK_25_8 3 +#define MDIO_CLK_25_10 4 +#define MDIO_CLK_25_14 5 +#define MDIO_CLK_25_20 6 +#define MDIO_CLK_25_28 7 +#define MDIO_BUSY 0x8000000 + +/* MII PHY Status Register */ +#define REG_PHY_STATUS 0x1418 + +/* BIST Control and Status Register0 (for the Packet Memory) */ +#define REG_BIST0_CTRL 0x141C +#define BIST0_NOW 0x1 +#define BIST0_SRAM_FAIL 0x2 +#define BIST0_FUSE_FLAG 0x4 +#define REG_BIST1_CTRL 0x1420 +#define BIST1_NOW 0x1 +#define BIST1_SRAM_FAIL 0x2 +#define BIST1_FUSE_FLAG 0x4 + +/* SerDes Lock Detect Control and Status Register */ +#define REG_SERDES_LOCK 0x1424 +#define SERDES_LOCK_DETECT 1 +#define SERDES_LOCK_DETECT_EN 2 + +/* MAC Control Register */ +#define REG_MAC_CTRL 0x1480 +#define MAC_CTRL_TX_EN 1 +#define MAC_CTRL_RX_EN 2 +#define MAC_CTRL_TX_FLOW 4 +#define MAC_CTRL_RX_FLOW 8 +#define MAC_CTRL_LOOPBACK 0x10 +#define MAC_CTRL_DUPLX 0x20 +#define MAC_CTRL_ADD_CRC 0x40 +#define MAC_CTRL_PAD 0x80 +#define MAC_CTRL_LENCHK 0x100 +#define MAC_CTRL_HUGE_EN 0x200 +#define MAC_CTRL_PRMLEN_SHIFT 10 +#define MAC_CTRL_PRMLEN_MASK 0xF +#define MAC_CTRL_RMV_VLAN 0x4000 +#define MAC_CTRL_PROMIS_EN 0x8000 +#define MAC_CTRL_MC_ALL_EN 0x2000000 +#define MAC_CTRL_BC_EN 0x4000000 + +/* MAC IPG/IFG Control Register */ +#define REG_MAC_IPG_IFG 0x1484 +#define MAC_IPG_IFG_IPGT_SHIFT 0 +#define MAC_IPG_IFG_IPGT_MASK 0x7F +#define MAC_IPG_IFG_MIFG_SHIFT 8 +#define MAC_IPG_IFG_MIFG_MASK 0xFF +#define MAC_IPG_IFG_IPGR1_SHIFT 16 +#define MAC_IPG_IFG_IPGR1_MASK 0x7F +#define MAC_IPG_IFG_IPGR2_SHIFT 24 +#define MAC_IPG_IFG_IPGR2_MASK 0x7F + +/* MAC STATION ADDRESS */ +#define REG_MAC_STA_ADDR 0x1488 + +/* Hash table for multicast address */ +#define REG_RX_HASH_TABLE 0x1490 + +/* MAC Half-Duplex Control Register */ +#define REG_MAC_HALF_DUPLX_CTRL 0x1498 +#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 +#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3FF +#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12 +#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xF +#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000 +#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000 +#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 +#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 +#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 +#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xF +#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 +#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xF + +/* Maximum Frame Length Control Register */ +#define REG_MTU 0x149C + +/* Wake-On-Lan control register */ +#define REG_WOL_CTRL 0x14A0 +#define WOL_PATTERN_EN 0x1 +#define WOL_PATTERN_PME_EN 0x2 +#define WOL_MAGIC_EN 0x4 +#define WOL_MAGIC_PME_EN 0x8 +#define WOL_LINK_CHG_EN 0x10 +#define WOL_LINK_CHG_PME_EN 0x20 +#define WOL_PATTERN_ST 0x100 +#define WOL_MAGIC_ST 0x200 +#define WOL_LINKCHG_ST 0x400 +#define WOL_PT0_EN 0x10000 +#define WOL_PT1_EN 0x20000 +#define WOL_PT2_EN 0x40000 +#define WOL_PT3_EN 0x80000 +#define WOL_PT4_EN 0x100000 +#define WOL_PT0_MATCH 0x1000000 +#define WOL_PT1_MATCH 0x2000000 +#define WOL_PT2_MATCH 0x4000000 +#define WOL_PT3_MATCH 0x8000000 +#define WOL_PT4_MATCH 0x10000000 + +/* Internal SRAM Partition Register, high 32 bits */ +#define REG_SRAM_RFD_ADDR 0x1500 + +/* Descriptor Control register, high 32 bits */ +#define REG_DESC_BASE_ADDR_HI 0x1540 + +/* Interrupt Status Register */ +#define REG_ISR 0x1600 +#define ISR_UR_DETECTED 0x1000000 +#define ISR_FERR_DETECTED 0x2000000 +#define ISR_NFERR_DETECTED 0x4000000 +#define ISR_CERR_DETECTED 0x8000000 +#define ISR_PHY_LINKDOWN 0x10000000 +#define ISR_DIS_INT 0x80000000 + +/* Interrupt Mask Register */ +#define REG_IMR 0x1604 + +#define REG_RFD_RRD_IDX 0x1800 +#define REG_TPD_IDX 0x1804 + +/* MII definitions */ + +/* PHY Common Register */ +#define MII_ATLX_CR 0x09 +#define MII_ATLX_SR 0x0A +#define MII_ATLX_ESR 0x0F +#define MII_ATLX_PSCR 0x10 +#define MII_ATLX_PSSR 0x11 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, + * 00=10 + */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, + * 00=10 + */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_MASK 0x2040 +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Ext register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext stat info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Link partner ability register */ +#define MII_LPA_SLCT 0x001f /* Same as advertise selector */ +#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */ +#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */ +#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */ +#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */ +#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */ +#define MII_LPA_PAUSE 0x0400 /* PAUSE */ +#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */ +#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */ +#define MII_LPA_LPACK 0x4000 /* Link partner acked us */ +#define MII_LPA_NPAGE 0x8000 /* Next page bit */ + +/* Autoneg Advertisement Register */ +#define MII_AR_SELECTOR_FIELD 0x0001 /* IEEE 802.3 CSMA/CD */ +#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define MII_AR_PAUSE 0x0400 /* Pause operation desired */ +#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Dir bit */ +#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability support */ +#define MII_AR_SPEED_MASK 0x01E0 +#define MII_AR_DEFAULT_CAP_MASK 0x0DE0 + +/* 1000BASE-T Control Register */ +#define MII_ATLX_CR_1000T_HD_CAPS 0x0100 /* Adv 1000T HD cap */ +#define MII_ATLX_CR_1000T_FD_CAPS 0x0200 /* Adv 1000T FD cap */ +#define MII_ATLX_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device, + * 0=DTE device */ +#define MII_ATLX_CR_1000T_MS_VALUE 0x0800 /* 1=Config PHY as Master, + * 0=Configure PHY as Slave */ +#define MII_ATLX_CR_1000T_MS_ENABLE 0x1000 /* 1=Man Master/Slave config, + * 0=Auto Master/Slave config + */ +#define MII_ATLX_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define MII_ATLX_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define MII_ATLX_CR_1000T_TEST_MODE_2 0x4000 /* Master Xmit Jitter test */ +#define MII_ATLX_CR_1000T_TEST_MODE_3 0x6000 /* Slave Xmit Jitter test */ +#define MII_ATLX_CR_1000T_TEST_MODE_4 0x8000 /* Xmitter Distortion test */ +#define MII_ATLX_CR_1000T_SPEED_MASK 0x0300 +#define MII_ATLX_CR_1000T_DEFAULT_CAP_MASK 0x0300 + +/* 1000BASE-T Status Register */ +#define MII_ATLX_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define MII_ATLX_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define MII_ATLX_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define MII_ATLX_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define MII_ATLX_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master + * 0=Slave + */ +#define MII_ATLX_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config + * fault */ +#define MII_ATLX_SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define MII_ATLX_SR_1000T_LOCAL_RX_STATUS_SHIFT 13 + +/* Extended Status Register */ +#define MII_ATLX_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ +#define MII_ATLX_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ +#define MII_ATLX_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ +#define MII_ATLX_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ + +/* ATLX PHY Specific Control Register */ +#define MII_ATLX_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Func disabled */ +#define MII_ATLX_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enbld */ +#define MII_ATLX_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define MII_ATLX_PSCR_MAC_POWERDOWN 0x0008 +#define MII_ATLX_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low + * 0=CLK125 toggling + */ +#define MII_ATLX_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5, + * Manual MDI configuration + */ +#define MII_ATLX_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define MII_ATLX_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover + * 100BASE-TX/10BASE-T: MDI + * Mode */ +#define MII_ATLX_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ +#define MII_ATLX_PSCR_10BT_EXT_DIST_ENABLE 0x0080 /* 1=Enable Extended + * 10BASE-T distance + * (Lower 10BASE-T RX + * Threshold) + * 0=Normal 10BASE-T RX + * Threshold + */ +#define MII_ATLX_PSCR_MII_5BIT_ENABLE 0x0100 /* 1=5-Bit interface in + * 100BASE-TX + * 0=MII interface in + * 100BASE-TX + */ +#define MII_ATLX_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler dsbl */ +#define MII_ATLX_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define MII_ATLX_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ +#define MII_ATLX_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define MII_ATLX_PSCR_AUTO_X_MODE_SHIFT 5 +#define MII_ATLX_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 + +/* ATLX PHY Specific Status Register */ +#define MII_ATLX_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define MII_ATLX_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define MII_ATLX_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define MII_ATLX_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define MII_ATLX_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define MII_ATLX_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define MII_DBG_ADDR 0x1D +#define MII_DBG_DATA 0x1E + +/* PCI Command Register Bit Definitions */ +#define PCI_REG_COMMAND 0x04 /* PCI Command Register */ +#define CMD_IO_SPACE 0x0001 +#define CMD_MEMORY_SPACE 0x0002 +#define CMD_BUS_MASTER 0x0004 + +/* Wake Up Filter Control */ +#define ATLX_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define ATLX_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define ATLX_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define ATLX_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */ +#define ATLX_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 +#define ADVERTISE_1000_FULL 0x0020 +#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ +#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ + +#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ +#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ + +/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA */ +#define EEPROM_SUM 0xBABA + +struct atlx_spi_flash_dev { + const char *manu_name; /* manufacturer id */ + /* op-code */ + u8 cmd_wrsr; + u8 cmd_read; + u8 cmd_program; + u8 cmd_wren; + u8 cmd_wrdi; + u8 cmd_rdsr; + u8 cmd_rdid; + u8 cmd_sector_erase; + u8 cmd_chip_erase; +}; + +#endif /* ATLX_H */ diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig new file mode 100644 index 000000000..a6f9142b9 --- /dev/null +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -0,0 +1,164 @@ +# +# Broadcom device configuration +# + +config NET_VENDOR_BROADCOM + bool "Broadcom devices" + default y + depends on (SSB_POSSIBLE && HAS_DMA) || PCI || BCM63XX || \ + SIBYTE_SB1xxx_SOC + ---help--- + If you have a network (Ethernet) chipset belonging to this class, + say Y. + + Note that the answer to this question does not directly affect + the kernel: saying N will just case the configurator to skip all + the questions regarding AMD chipsets. If you say Y, you will be asked + for your specific chipset/driver in the following questions. + +if NET_VENDOR_BROADCOM + +config B44 + tristate "Broadcom 440x/47xx ethernet support" + depends on SSB_POSSIBLE && HAS_DMA + select SSB + select MII + select PHYLIB + ---help--- + If you have a network (Ethernet) controller of this type, say Y + or M and read the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called b44. + +# Auto-select SSB PCI-HOST support, if possible +config B44_PCI_AUTOSELECT + bool + depends on B44 && SSB_PCIHOST_POSSIBLE + select SSB_PCIHOST + default y + +# Auto-select SSB PCICORE driver, if possible +config B44_PCICORE_AUTOSELECT + bool + depends on B44 && SSB_DRIVER_PCICORE_POSSIBLE + select SSB_DRIVER_PCICORE + default y + +config B44_PCI + bool + depends on B44_PCI_AUTOSELECT && B44_PCICORE_AUTOSELECT + default y + +config BCM63XX_ENET + tristate "Broadcom 63xx internal mac support" + depends on BCM63XX + select MII + select PHYLIB + help + This driver supports the ethernet MACs in the Broadcom 63xx + MIPS chipset family (BCM63XX). + +config BCMGENET + tristate "Broadcom GENET internal MAC support" + select MII + select PHYLIB + select FIXED_PHY + select BCM7XXX_PHY + help + This driver supports the built-in Ethernet MACs found in the + Broadcom BCM7xxx Set Top Box family chipset. + +config BNX2 + tristate "QLogic bnx2 support" + depends on PCI + select CRC32 + select FW_LOADER + ---help--- + This driver supports QLogic bnx2 gigabit Ethernet cards. + + To compile this driver as a module, choose M here: the module + will be called bnx2. This is recommended. + +config CNIC + tristate "QLogic CNIC support" + depends on PCI && (IPV6 || IPV6=n) + select BNX2 + select UIO + ---help--- + This driver supports offload features of QLogic bnx2 gigabit + Ethernet cards. + + To compile this driver as a module, choose M here: the module + will be called cnic. This is recommended. + +config SB1250_MAC + tristate "SB1250 Gigabit Ethernet support" + depends on SIBYTE_SB1xxx_SOC + select PHYLIB + ---help--- + This driver supports Gigabit Ethernet interfaces based on the + Broadcom SiByte family of System-On-a-Chip parts. They include + the BCM1120, BCM1125, BCM1125H, BCM1250, BCM1255, BCM1280, BCM1455 + and BCM1480 chips. + + To compile this driver as a module, choose M here: the module + will be called sb1250-mac. + +config TIGON3 + tristate "Broadcom Tigon3 support" + depends on PCI + select PHYLIB + select HWMON + select PTP_1588_CLOCK + ---help--- + This driver supports Broadcom Tigon3 based gigabit Ethernet cards. + + To compile this driver as a module, choose M here: the module + will be called tg3. This is recommended. + +config BNX2X + tristate "Broadcom NetXtremeII 10Gb support" + depends on PCI + select PTP_1588_CLOCK + select FW_LOADER + select ZLIB_INFLATE + select LIBCRC32C + select MDIO + ---help--- + This driver supports Broadcom NetXtremeII 10 gigabit Ethernet cards. + To compile this driver as a module, choose M here: the module + will be called bnx2x. This is recommended. + +config BNX2X_SRIOV + bool "Broadcom 578xx and 57712 SR-IOV support" + depends on BNX2X && PCI_IOV + default y + ---help--- + This configuration parameter enables Single Root Input Output + Virtualization support in the 578xx and 57712 products. This + allows for virtual function acceleration in virtual environments. + +config BGMAC + tristate "BCMA bus GBit core support" + depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X) + select PHYLIB + ---help--- + This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus. + They can be found on BCM47xx SoCs and provide gigabit ethernet. + In case of using this driver on BCM4706 it's also requires to enable + BCMA_DRIVER_GMAC_CMN to make it work. + +config SYSTEMPORT + tristate "Broadcom SYSTEMPORT internal MAC support" + depends on OF + select MII + select PHYLIB + select FIXED_PHY + help + This driver supports the built-in Ethernet MACs found in the + Broadcom BCM7xxx Set Top Box family chipset using an internal + Ethernet switch. + +endif # NET_VENDOR_BROADCOM diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile new file mode 100644 index 000000000..e2a958a65 --- /dev/null +++ b/drivers/net/ethernet/broadcom/Makefile @@ -0,0 +1,14 @@ +# +# Makefile for the Broadcom network device drivers. +# + +obj-$(CONFIG_B44) += b44.o +obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o +obj-$(CONFIG_BCMGENET) += genet/ +obj-$(CONFIG_BNX2) += bnx2.o +obj-$(CONFIG_CNIC) += cnic.o +obj-$(CONFIG_BNX2X) += bnx2x/ +obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o +obj-$(CONFIG_TIGON3) += tg3.o +obj-$(CONFIG_BGMAC) += bgmac.o +obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c new file mode 100644 index 000000000..a3b1c07ae --- /dev/null +++ b/drivers/net/ethernet/broadcom/b44.c @@ -0,0 +1,2615 @@ +/* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver. + * + * Copyright (C) 2002 David S. Miller (davem@redhat.com) + * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi) + * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org) + * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org) + * Copyright (C) 2006 Broadcom Corporation. + * Copyright (C) 2007 Michael Buesch + * Copyright (C) 2013 Hauke Mehrtens + * + * Distribute under GPL. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +#include "b44.h" + +#define DRV_MODULE_NAME "b44" +#define DRV_MODULE_VERSION "2.0" +#define DRV_DESCRIPTION "Broadcom 44xx/47xx 10/100 PCI ethernet driver" + +#define B44_DEF_MSG_ENABLE \ + (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + +/* length of time before we decide the hardware is borked, + * and dev->tx_timeout() should be called to fix the problem + */ +#define B44_TX_TIMEOUT (5 * HZ) + +/* hardware minimum and maximum for a single frame's data payload */ +#define B44_MIN_MTU 60 +#define B44_MAX_MTU 1500 + +#define B44_RX_RING_SIZE 512 +#define B44_DEF_RX_RING_PENDING 200 +#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \ + B44_RX_RING_SIZE) +#define B44_TX_RING_SIZE 512 +#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1) +#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \ + B44_TX_RING_SIZE) + +#define TX_RING_GAP(BP) \ + (B44_TX_RING_SIZE - (BP)->tx_pending) +#define TX_BUFFS_AVAIL(BP) \ + (((BP)->tx_cons <= (BP)->tx_prod) ? \ + (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \ + (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP)) +#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1)) + +#define RX_PKT_OFFSET (RX_HEADER_LEN + 2) +#define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET) + +/* minimum number of free TX descriptors required to wake up TX process */ +#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4) + +/* b44 internal pattern match filter info */ +#define B44_PATTERN_BASE 0x400 +#define B44_PATTERN_SIZE 0x80 +#define B44_PMASK_BASE 0x600 +#define B44_PMASK_SIZE 0x10 +#define B44_MAX_PATTERNS 16 +#define B44_ETHIPV6UDP_HLEN 62 +#define B44_ETHIPV4UDP_HLEN 42 + +MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller"); +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */ +module_param(b44_debug, int, 0); +MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value"); + + +#ifdef CONFIG_B44_PCI +static const struct pci_device_id b44_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) }, + { 0 } /* terminate list with empty entry */ +}; +MODULE_DEVICE_TABLE(pci, b44_pci_tbl); + +static struct pci_driver b44_pci_driver = { + .name = DRV_MODULE_NAME, + .id_table = b44_pci_tbl, +}; +#endif /* CONFIG_B44_PCI */ + +static const struct ssb_device_id b44_ssb_tbl[] = { + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV), + {}, +}; +MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl); + +static void b44_halt(struct b44 *); +static void b44_init_rings(struct b44 *); + +#define B44_FULL_RESET 1 +#define B44_FULL_RESET_SKIP_PHY 2 +#define B44_PARTIAL_RESET 3 +#define B44_CHIP_RESET_FULL 4 +#define B44_CHIP_RESET_PARTIAL 5 + +static void b44_init_hw(struct b44 *, int); + +static int dma_desc_sync_size; +static int instance; + +static const char b44_gstrings[][ETH_GSTRING_LEN] = { +#define _B44(x...) # x, +B44_STAT_REG_DECLARE +#undef _B44 +}; + +static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev, + dma_addr_t dma_base, + unsigned long offset, + enum dma_data_direction dir) +{ + dma_sync_single_for_device(sdev->dma_dev, dma_base + offset, + dma_desc_sync_size, dir); +} + +static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, + dma_addr_t dma_base, + unsigned long offset, + enum dma_data_direction dir) +{ + dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset, + dma_desc_sync_size, dir); +} + +static inline unsigned long br32(const struct b44 *bp, unsigned long reg) +{ + return ssb_read32(bp->sdev, reg); +} + +static inline void bw32(const struct b44 *bp, + unsigned long reg, unsigned long val) +{ + ssb_write32(bp->sdev, reg, val); +} + +static int b44_wait_bit(struct b44 *bp, unsigned long reg, + u32 bit, unsigned long timeout, const int clear) +{ + unsigned long i; + + for (i = 0; i < timeout; i++) { + u32 val = br32(bp, reg); + + if (clear && !(val & bit)) + break; + if (!clear && (val & bit)) + break; + udelay(10); + } + if (i == timeout) { + if (net_ratelimit()) + netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n", + bit, reg, clear ? "clear" : "set"); + + return -ENODEV; + } + return 0; +} + +static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index) +{ + u32 val; + + bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ | + (index << CAM_CTRL_INDEX_SHIFT))); + + b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1); + + val = br32(bp, B44_CAM_DATA_LO); + + data[2] = (val >> 24) & 0xFF; + data[3] = (val >> 16) & 0xFF; + data[4] = (val >> 8) & 0xFF; + data[5] = (val >> 0) & 0xFF; + + val = br32(bp, B44_CAM_DATA_HI); + + data[0] = (val >> 8) & 0xFF; + data[1] = (val >> 0) & 0xFF; +} + +static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index) +{ + u32 val; + + val = ((u32) data[2]) << 24; + val |= ((u32) data[3]) << 16; + val |= ((u32) data[4]) << 8; + val |= ((u32) data[5]) << 0; + bw32(bp, B44_CAM_DATA_LO, val); + val = (CAM_DATA_HI_VALID | + (((u32) data[0]) << 8) | + (((u32) data[1]) << 0)); + bw32(bp, B44_CAM_DATA_HI, val); + bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE | + (index << CAM_CTRL_INDEX_SHIFT))); + b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1); +} + +static inline void __b44_disable_ints(struct b44 *bp) +{ + bw32(bp, B44_IMASK, 0); +} + +static void b44_disable_ints(struct b44 *bp) +{ + __b44_disable_ints(bp); + + /* Flush posted writes. */ + br32(bp, B44_IMASK); +} + +static void b44_enable_ints(struct b44 *bp) +{ + bw32(bp, B44_IMASK, bp->imask); +} + +static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val) +{ + int err; + + bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); + bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | + (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) | + (phy_addr << MDIO_DATA_PMD_SHIFT) | + (reg << MDIO_DATA_RA_SHIFT) | + (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT))); + err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); + *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA; + + return err; +} + +static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val) +{ + bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); + bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | + (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) | + (phy_addr << MDIO_DATA_PMD_SHIFT) | + (reg << MDIO_DATA_RA_SHIFT) | + (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) | + (val & MDIO_DATA_DATA))); + return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); +} + +static inline int b44_readphy(struct b44 *bp, int reg, u32 *val) +{ + if (bp->flags & B44_FLAG_EXTERNAL_PHY) + return 0; + + return __b44_readphy(bp, bp->phy_addr, reg, val); +} + +static inline int b44_writephy(struct b44 *bp, int reg, u32 val) +{ + if (bp->flags & B44_FLAG_EXTERNAL_PHY) + return 0; + + return __b44_writephy(bp, bp->phy_addr, reg, val); +} + +/* miilib interface */ +static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location) +{ + u32 val; + struct b44 *bp = netdev_priv(dev); + int rc = __b44_readphy(bp, phy_id, location, &val); + if (rc) + return 0xffffffff; + return val; +} + +static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location, + int val) +{ + struct b44 *bp = netdev_priv(dev); + __b44_writephy(bp, phy_id, location, val); +} + +static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location) +{ + u32 val; + struct b44 *bp = bus->priv; + int rc = __b44_readphy(bp, phy_id, location, &val); + if (rc) + return 0xffffffff; + return val; +} + +static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location, + u16 val) +{ + struct b44 *bp = bus->priv; + return __b44_writephy(bp, phy_id, location, val); +} + +static int b44_phy_reset(struct b44 *bp) +{ + u32 val; + int err; + + if (bp->flags & B44_FLAG_EXTERNAL_PHY) + return 0; + err = b44_writephy(bp, MII_BMCR, BMCR_RESET); + if (err) + return err; + udelay(100); + err = b44_readphy(bp, MII_BMCR, &val); + if (!err) { + if (val & BMCR_RESET) { + netdev_err(bp->dev, "PHY Reset would not complete\n"); + err = -ENODEV; + } + } + + return err; +} + +static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags) +{ + u32 val; + + bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE); + bp->flags |= pause_flags; + + val = br32(bp, B44_RXCONFIG); + if (pause_flags & B44_FLAG_RX_PAUSE) + val |= RXCONFIG_FLOW; + else + val &= ~RXCONFIG_FLOW; + bw32(bp, B44_RXCONFIG, val); + + val = br32(bp, B44_MAC_FLOW); + if (pause_flags & B44_FLAG_TX_PAUSE) + val |= (MAC_FLOW_PAUSE_ENAB | + (0xc0 & MAC_FLOW_RX_HI_WATER)); + else + val &= ~MAC_FLOW_PAUSE_ENAB; + bw32(bp, B44_MAC_FLOW, val); +} + +static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote) +{ + u32 pause_enab = 0; + + /* The driver supports only rx pause by default because + the b44 mac tx pause mechanism generates excessive + pause frames. + Use ethtool to turn on b44 tx pause if necessary. + */ + if ((local & ADVERTISE_PAUSE_CAP) && + (local & ADVERTISE_PAUSE_ASYM)){ + if ((remote & LPA_PAUSE_ASYM) && + !(remote & LPA_PAUSE_CAP)) + pause_enab |= B44_FLAG_RX_PAUSE; + } + + __b44_set_flow_ctrl(bp, pause_enab); +} + +#ifdef CONFIG_BCM47XX +#include +static void b44_wap54g10_workaround(struct b44 *bp) +{ + char buf[20]; + u32 val; + int err; + + /* + * workaround for bad hardware design in Linksys WAP54G v1.0 + * see https://dev.openwrt.org/ticket/146 + * check and reset bit "isolate" + */ + if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0) + return; + if (simple_strtoul(buf, NULL, 0) == 2) { + err = __b44_readphy(bp, 0, MII_BMCR, &val); + if (err) + goto error; + if (!(val & BMCR_ISOLATE)) + return; + val &= ~BMCR_ISOLATE; + err = __b44_writephy(bp, 0, MII_BMCR, val); + if (err) + goto error; + } + return; +error: + pr_warn("PHY: cannot reset MII transceiver isolate bit\n"); +} +#else +static inline void b44_wap54g10_workaround(struct b44 *bp) +{ +} +#endif + +static int b44_setup_phy(struct b44 *bp) +{ + u32 val; + int err; + + b44_wap54g10_workaround(bp); + + if (bp->flags & B44_FLAG_EXTERNAL_PHY) + return 0; + if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0) + goto out; + if ((err = b44_writephy(bp, B44_MII_ALEDCTRL, + val & MII_ALEDCTRL_ALLMSK)) != 0) + goto out; + if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0) + goto out; + if ((err = b44_writephy(bp, B44_MII_TLEDCTRL, + val | MII_TLEDCTRL_ENABLE)) != 0) + goto out; + + if (!(bp->flags & B44_FLAG_FORCE_LINK)) { + u32 adv = ADVERTISE_CSMA; + + if (bp->flags & B44_FLAG_ADV_10HALF) + adv |= ADVERTISE_10HALF; + if (bp->flags & B44_FLAG_ADV_10FULL) + adv |= ADVERTISE_10FULL; + if (bp->flags & B44_FLAG_ADV_100HALF) + adv |= ADVERTISE_100HALF; + if (bp->flags & B44_FLAG_ADV_100FULL) + adv |= ADVERTISE_100FULL; + + if (bp->flags & B44_FLAG_PAUSE_AUTO) + adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + + if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0) + goto out; + if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE | + BMCR_ANRESTART))) != 0) + goto out; + } else { + u32 bmcr; + + if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0) + goto out; + bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100); + if (bp->flags & B44_FLAG_100_BASE_T) + bmcr |= BMCR_SPEED100; + if (bp->flags & B44_FLAG_FULL_DUPLEX) + bmcr |= BMCR_FULLDPLX; + if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0) + goto out; + + /* Since we will not be negotiating there is no safe way + * to determine if the link partner supports flow control + * or not. So just disable it completely in this case. + */ + b44_set_flow_ctrl(bp, 0, 0); + } + +out: + return err; +} + +static void b44_stats_update(struct b44 *bp) +{ + unsigned long reg; + u64 *val; + + val = &bp->hw_stats.tx_good_octets; + u64_stats_update_begin(&bp->hw_stats.syncp); + + for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) { + *val++ += br32(bp, reg); + } + + /* Pad */ + reg += 8*4UL; + + for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) { + *val++ += br32(bp, reg); + } + + u64_stats_update_end(&bp->hw_stats.syncp); +} + +static void b44_link_report(struct b44 *bp) +{ + if (!netif_carrier_ok(bp->dev)) { + netdev_info(bp->dev, "Link is down\n"); + } else { + netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n", + (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10, + (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half"); + + netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n", + (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off", + (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off"); + } +} + +static void b44_check_phy(struct b44 *bp) +{ + u32 bmsr, aux; + + if (bp->flags & B44_FLAG_EXTERNAL_PHY) { + bp->flags |= B44_FLAG_100_BASE_T; + if (!netif_carrier_ok(bp->dev)) { + u32 val = br32(bp, B44_TX_CTRL); + if (bp->flags & B44_FLAG_FULL_DUPLEX) + val |= TX_CTRL_DUPLEX; + else + val &= ~TX_CTRL_DUPLEX; + bw32(bp, B44_TX_CTRL, val); + netif_carrier_on(bp->dev); + b44_link_report(bp); + } + return; + } + + if (!b44_readphy(bp, MII_BMSR, &bmsr) && + !b44_readphy(bp, B44_MII_AUXCTRL, &aux) && + (bmsr != 0xffff)) { + if (aux & MII_AUXCTRL_SPEED) + bp->flags |= B44_FLAG_100_BASE_T; + else + bp->flags &= ~B44_FLAG_100_BASE_T; + if (aux & MII_AUXCTRL_DUPLEX) + bp->flags |= B44_FLAG_FULL_DUPLEX; + else + bp->flags &= ~B44_FLAG_FULL_DUPLEX; + + if (!netif_carrier_ok(bp->dev) && + (bmsr & BMSR_LSTATUS)) { + u32 val = br32(bp, B44_TX_CTRL); + u32 local_adv, remote_adv; + + if (bp->flags & B44_FLAG_FULL_DUPLEX) + val |= TX_CTRL_DUPLEX; + else + val &= ~TX_CTRL_DUPLEX; + bw32(bp, B44_TX_CTRL, val); + + if (!(bp->flags & B44_FLAG_FORCE_LINK) && + !b44_readphy(bp, MII_ADVERTISE, &local_adv) && + !b44_readphy(bp, MII_LPA, &remote_adv)) + b44_set_flow_ctrl(bp, local_adv, remote_adv); + + /* Link now up */ + netif_carrier_on(bp->dev); + b44_link_report(bp); + } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) { + /* Link now down */ + netif_carrier_off(bp->dev); + b44_link_report(bp); + } + + if (bmsr & BMSR_RFAULT) + netdev_warn(bp->dev, "Remote fault detected in PHY\n"); + if (bmsr & BMSR_JCD) + netdev_warn(bp->dev, "Jabber detected in PHY\n"); + } +} + +static void b44_timer(unsigned long __opaque) +{ + struct b44 *bp = (struct b44 *) __opaque; + + spin_lock_irq(&bp->lock); + + b44_check_phy(bp); + + b44_stats_update(bp); + + spin_unlock_irq(&bp->lock); + + mod_timer(&bp->timer, round_jiffies(jiffies + HZ)); +} + +static void b44_tx(struct b44 *bp) +{ + u32 cur, cons; + unsigned bytes_compl = 0, pkts_compl = 0; + + cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK; + cur /= sizeof(struct dma_desc); + + /* XXX needs updating when NETIF_F_SG is supported */ + for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) { + struct ring_info *rp = &bp->tx_buffers[cons]; + struct sk_buff *skb = rp->skb; + + BUG_ON(skb == NULL); + + dma_unmap_single(bp->sdev->dma_dev, + rp->mapping, + skb->len, + DMA_TO_DEVICE); + rp->skb = NULL; + + bytes_compl += skb->len; + pkts_compl++; + + dev_kfree_skb_irq(skb); + } + + netdev_completed_queue(bp->dev, pkts_compl, bytes_compl); + bp->tx_cons = cons; + if (netif_queue_stopped(bp->dev) && + TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH) + netif_wake_queue(bp->dev); + + bw32(bp, B44_GPTIMER, 0); +} + +/* Works like this. This chip writes a 'struct rx_header" 30 bytes + * before the DMA address you give it. So we allocate 30 more bytes + * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then + * point the chip at 30 bytes past where the rx_header will go. + */ +static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) +{ + struct dma_desc *dp; + struct ring_info *src_map, *map; + struct rx_header *rh; + struct sk_buff *skb; + dma_addr_t mapping; + int dest_idx; + u32 ctrl; + + src_map = NULL; + if (src_idx >= 0) + src_map = &bp->rx_buffers[src_idx]; + dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1); + map = &bp->rx_buffers[dest_idx]; + skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ); + if (skb == NULL) + return -ENOMEM; + + mapping = dma_map_single(bp->sdev->dma_dev, skb->data, + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); + + /* Hardware bug work-around, the chip is unable to do PCI DMA + to/from anything above 1GB :-( */ + if (dma_mapping_error(bp->sdev->dma_dev, mapping) || + mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { + /* Sigh... */ + if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) + dma_unmap_single(bp->sdev->dma_dev, mapping, + RX_PKT_BUF_SZ, DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA); + if (skb == NULL) + return -ENOMEM; + mapping = dma_map_single(bp->sdev->dma_dev, skb->data, + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); + if (dma_mapping_error(bp->sdev->dma_dev, mapping) || + mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { + if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) + dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + return -ENOMEM; + } + bp->force_copybreak = 1; + } + + rh = (struct rx_header *) skb->data; + + rh->len = 0; + rh->flags = 0; + + map->skb = skb; + map->mapping = mapping; + + if (src_map != NULL) + src_map->skb = NULL; + + ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ); + if (dest_idx == (B44_RX_RING_SIZE - 1)) + ctrl |= DESC_CTRL_EOT; + + dp = &bp->rx_ring[dest_idx]; + dp->ctrl = cpu_to_le32(ctrl); + dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset); + + if (bp->flags & B44_FLAG_RX_RING_HACK) + b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, + dest_idx * sizeof(*dp), + DMA_BIDIRECTIONAL); + + return RX_PKT_BUF_SZ; +} + +static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) +{ + struct dma_desc *src_desc, *dest_desc; + struct ring_info *src_map, *dest_map; + struct rx_header *rh; + int dest_idx; + __le32 ctrl; + + dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1); + dest_desc = &bp->rx_ring[dest_idx]; + dest_map = &bp->rx_buffers[dest_idx]; + src_desc = &bp->rx_ring[src_idx]; + src_map = &bp->rx_buffers[src_idx]; + + dest_map->skb = src_map->skb; + rh = (struct rx_header *) src_map->skb->data; + rh->len = 0; + rh->flags = 0; + dest_map->mapping = src_map->mapping; + + if (bp->flags & B44_FLAG_RX_RING_HACK) + b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma, + src_idx * sizeof(*src_desc), + DMA_BIDIRECTIONAL); + + ctrl = src_desc->ctrl; + if (dest_idx == (B44_RX_RING_SIZE - 1)) + ctrl |= cpu_to_le32(DESC_CTRL_EOT); + else + ctrl &= cpu_to_le32(~DESC_CTRL_EOT); + + dest_desc->ctrl = ctrl; + dest_desc->addr = src_desc->addr; + + src_map->skb = NULL; + + if (bp->flags & B44_FLAG_RX_RING_HACK) + b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, + dest_idx * sizeof(*dest_desc), + DMA_BIDIRECTIONAL); + + dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping, + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); +} + +static int b44_rx(struct b44 *bp, int budget) +{ + int received; + u32 cons, prod; + + received = 0; + prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK; + prod /= sizeof(struct dma_desc); + cons = bp->rx_cons; + + while (cons != prod && budget > 0) { + struct ring_info *rp = &bp->rx_buffers[cons]; + struct sk_buff *skb = rp->skb; + dma_addr_t map = rp->mapping; + struct rx_header *rh; + u16 len; + + dma_sync_single_for_cpu(bp->sdev->dma_dev, map, + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); + rh = (struct rx_header *) skb->data; + len = le16_to_cpu(rh->len); + if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) || + (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) { + drop_it: + b44_recycle_rx(bp, cons, bp->rx_prod); + drop_it_no_recycle: + bp->dev->stats.rx_dropped++; + goto next_pkt; + } + + if (len == 0) { + int i = 0; + + do { + udelay(2); + barrier(); + len = le16_to_cpu(rh->len); + } while (len == 0 && i++ < 5); + if (len == 0) + goto drop_it; + } + + /* Omit CRC. */ + len -= 4; + + if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) { + int skb_size; + skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); + if (skb_size < 0) + goto drop_it; + dma_unmap_single(bp->sdev->dma_dev, map, + skb_size, DMA_FROM_DEVICE); + /* Leave out rx_header */ + skb_put(skb, len + RX_PKT_OFFSET); + skb_pull(skb, RX_PKT_OFFSET); + } else { + struct sk_buff *copy_skb; + + b44_recycle_rx(bp, cons, bp->rx_prod); + copy_skb = napi_alloc_skb(&bp->napi, len); + if (copy_skb == NULL) + goto drop_it_no_recycle; + + skb_put(copy_skb, len); + /* DMA sync done above, copy just the actual packet */ + skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET, + copy_skb->data, len); + skb = copy_skb; + } + skb_checksum_none_assert(skb); + skb->protocol = eth_type_trans(skb, bp->dev); + netif_receive_skb(skb); + received++; + budget--; + next_pkt: + bp->rx_prod = (bp->rx_prod + 1) & + (B44_RX_RING_SIZE - 1); + cons = (cons + 1) & (B44_RX_RING_SIZE - 1); + } + + bp->rx_cons = cons; + bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc)); + + return received; +} + +static int b44_poll(struct napi_struct *napi, int budget) +{ + struct b44 *bp = container_of(napi, struct b44, napi); + int work_done; + unsigned long flags; + + spin_lock_irqsave(&bp->lock, flags); + + if (bp->istat & (ISTAT_TX | ISTAT_TO)) { + /* spin_lock(&bp->tx_lock); */ + b44_tx(bp); + /* spin_unlock(&bp->tx_lock); */ + } + if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */ + bp->istat &= ~ISTAT_RFO; + b44_disable_ints(bp); + ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */ + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); + netif_wake_queue(bp->dev); + } + + spin_unlock_irqrestore(&bp->lock, flags); + + work_done = 0; + if (bp->istat & ISTAT_RX) + work_done += b44_rx(bp, budget); + + if (bp->istat & ISTAT_ERRORS) { + spin_lock_irqsave(&bp->lock, flags); + b44_halt(bp); + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); + netif_wake_queue(bp->dev); + spin_unlock_irqrestore(&bp->lock, flags); + work_done = 0; + } + + if (work_done < budget) { + napi_complete(napi); + b44_enable_ints(bp); + } + + return work_done; +} + +static irqreturn_t b44_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct b44 *bp = netdev_priv(dev); + u32 istat, imask; + int handled = 0; + + spin_lock(&bp->lock); + + istat = br32(bp, B44_ISTAT); + imask = br32(bp, B44_IMASK); + + /* The interrupt mask register controls which interrupt bits + * will actually raise an interrupt to the CPU when set by hw/firmware, + * but doesn't mask off the bits. + */ + istat &= imask; + if (istat) { + handled = 1; + + if (unlikely(!netif_running(dev))) { + netdev_info(dev, "late interrupt\n"); + goto irq_ack; + } + + if (napi_schedule_prep(&bp->napi)) { + /* NOTE: These writes are posted by the readback of + * the ISTAT register below. + */ + bp->istat = istat; + __b44_disable_ints(bp); + __napi_schedule(&bp->napi); + } + +irq_ack: + bw32(bp, B44_ISTAT, istat); + br32(bp, B44_ISTAT); + } + spin_unlock(&bp->lock); + return IRQ_RETVAL(handled); +} + +static void b44_tx_timeout(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + + netdev_err(dev, "transmit timed out, resetting\n"); + + spin_lock_irq(&bp->lock); + + b44_halt(bp); + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET); + + spin_unlock_irq(&bp->lock); + + b44_enable_ints(bp); + + netif_wake_queue(dev); +} + +static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + int rc = NETDEV_TX_OK; + dma_addr_t mapping; + u32 len, entry, ctrl; + unsigned long flags; + + len = skb->len; + spin_lock_irqsave(&bp->lock, flags); + + /* This is a hard error, log it. */ + if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { + netif_stop_queue(dev); + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + goto err_out; + } + + mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { + struct sk_buff *bounce_skb; + + /* Chip can't handle DMA to/from >1GB, use bounce buffer */ + if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) + dma_unmap_single(bp->sdev->dma_dev, mapping, len, + DMA_TO_DEVICE); + + bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA); + if (!bounce_skb) + goto err_out; + + mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data, + len, DMA_TO_DEVICE); + if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { + if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) + dma_unmap_single(bp->sdev->dma_dev, mapping, + len, DMA_TO_DEVICE); + dev_kfree_skb_any(bounce_skb); + goto err_out; + } + + skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len); + dev_kfree_skb_any(skb); + skb = bounce_skb; + } + + entry = bp->tx_prod; + bp->tx_buffers[entry].skb = skb; + bp->tx_buffers[entry].mapping = mapping; + + ctrl = (len & DESC_CTRL_LEN); + ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF; + if (entry == (B44_TX_RING_SIZE - 1)) + ctrl |= DESC_CTRL_EOT; + + bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl); + bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset); + + if (bp->flags & B44_FLAG_TX_RING_HACK) + b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma, + entry * sizeof(bp->tx_ring[0]), + DMA_TO_DEVICE); + + entry = NEXT_TX(entry); + + bp->tx_prod = entry; + + wmb(); + + bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); + if (bp->flags & B44_FLAG_BUGGY_TXPTR) + bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); + if (bp->flags & B44_FLAG_REORDER_BUG) + br32(bp, B44_DMATX_PTR); + + netdev_sent_queue(dev, skb->len); + + if (TX_BUFFS_AVAIL(bp) < 1) + netif_stop_queue(dev); + +out_unlock: + spin_unlock_irqrestore(&bp->lock, flags); + + return rc; + +err_out: + rc = NETDEV_TX_BUSY; + goto out_unlock; +} + +static int b44_change_mtu(struct net_device *dev, int new_mtu) +{ + struct b44 *bp = netdev_priv(dev); + + if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU) + return -EINVAL; + + if (!netif_running(dev)) { + /* We'll just catch it later when the + * device is up'd. + */ + dev->mtu = new_mtu; + return 0; + } + + spin_lock_irq(&bp->lock); + b44_halt(bp); + dev->mtu = new_mtu; + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET); + spin_unlock_irq(&bp->lock); + + b44_enable_ints(bp); + + return 0; +} + +/* Free up pending packets in all rx/tx rings. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. bp->lock is not held and we are not + * in an interrupt context and thus may sleep. + */ +static void b44_free_rings(struct b44 *bp) +{ + struct ring_info *rp; + int i; + + for (i = 0; i < B44_RX_RING_SIZE; i++) { + rp = &bp->rx_buffers[i]; + + if (rp->skb == NULL) + continue; + dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); + dev_kfree_skb_any(rp->skb); + rp->skb = NULL; + } + + /* XXX needs changes once NETIF_F_SG is set... */ + for (i = 0; i < B44_TX_RING_SIZE; i++) { + rp = &bp->tx_buffers[i]; + + if (rp->skb == NULL) + continue; + dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len, + DMA_TO_DEVICE); + dev_kfree_skb_any(rp->skb); + rp->skb = NULL; + } +} + +/* Initialize tx/rx rings for packet processing. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. + */ +static void b44_init_rings(struct b44 *bp) +{ + int i; + + b44_free_rings(bp); + + memset(bp->rx_ring, 0, B44_RX_RING_BYTES); + memset(bp->tx_ring, 0, B44_TX_RING_BYTES); + + if (bp->flags & B44_FLAG_RX_RING_HACK) + dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma, + DMA_TABLE_BYTES, DMA_BIDIRECTIONAL); + + if (bp->flags & B44_FLAG_TX_RING_HACK) + dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma, + DMA_TABLE_BYTES, DMA_TO_DEVICE); + + for (i = 0; i < bp->rx_pending; i++) { + if (b44_alloc_rx_skb(bp, -1, i) < 0) + break; + } +} + +/* + * Must not be invoked with interrupt sources disabled and + * the hardware shutdown down. + */ +static void b44_free_consistent(struct b44 *bp) +{ + kfree(bp->rx_buffers); + bp->rx_buffers = NULL; + kfree(bp->tx_buffers); + bp->tx_buffers = NULL; + if (bp->rx_ring) { + if (bp->flags & B44_FLAG_RX_RING_HACK) { + dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma, + DMA_TABLE_BYTES, DMA_BIDIRECTIONAL); + kfree(bp->rx_ring); + } else + dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, + bp->rx_ring, bp->rx_ring_dma); + bp->rx_ring = NULL; + bp->flags &= ~B44_FLAG_RX_RING_HACK; + } + if (bp->tx_ring) { + if (bp->flags & B44_FLAG_TX_RING_HACK) { + dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma, + DMA_TABLE_BYTES, DMA_TO_DEVICE); + kfree(bp->tx_ring); + } else + dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES, + bp->tx_ring, bp->tx_ring_dma); + bp->tx_ring = NULL; + bp->flags &= ~B44_FLAG_TX_RING_HACK; + } +} + +/* + * Must not be invoked with interrupt sources disabled and + * the hardware shutdown down. Can sleep. + */ +static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) +{ + int size; + + size = B44_RX_RING_SIZE * sizeof(struct ring_info); + bp->rx_buffers = kzalloc(size, gfp); + if (!bp->rx_buffers) + goto out_err; + + size = B44_TX_RING_SIZE * sizeof(struct ring_info); + bp->tx_buffers = kzalloc(size, gfp); + if (!bp->tx_buffers) + goto out_err; + + size = DMA_TABLE_BYTES; + bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, + &bp->rx_ring_dma, gfp); + if (!bp->rx_ring) { + /* Allocation may have failed due to pci_alloc_consistent + insisting on use of GFP_DMA, which is more restrictive + than necessary... */ + struct dma_desc *rx_ring; + dma_addr_t rx_ring_dma; + + rx_ring = kzalloc(size, gfp); + if (!rx_ring) + goto out_err; + + rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring, + DMA_TABLE_BYTES, + DMA_BIDIRECTIONAL); + + if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) || + rx_ring_dma + size > DMA_BIT_MASK(30)) { + kfree(rx_ring); + goto out_err; + } + + bp->rx_ring = rx_ring; + bp->rx_ring_dma = rx_ring_dma; + bp->flags |= B44_FLAG_RX_RING_HACK; + } + + bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, + &bp->tx_ring_dma, gfp); + if (!bp->tx_ring) { + /* Allocation may have failed due to ssb_dma_alloc_consistent + insisting on use of GFP_DMA, which is more restrictive + than necessary... */ + struct dma_desc *tx_ring; + dma_addr_t tx_ring_dma; + + tx_ring = kzalloc(size, gfp); + if (!tx_ring) + goto out_err; + + tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring, + DMA_TABLE_BYTES, + DMA_TO_DEVICE); + + if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) || + tx_ring_dma + size > DMA_BIT_MASK(30)) { + kfree(tx_ring); + goto out_err; + } + + bp->tx_ring = tx_ring; + bp->tx_ring_dma = tx_ring_dma; + bp->flags |= B44_FLAG_TX_RING_HACK; + } + + return 0; + +out_err: + b44_free_consistent(bp); + return -ENOMEM; +} + +/* bp->lock is held. */ +static void b44_clear_stats(struct b44 *bp) +{ + unsigned long reg; + + bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); + for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) + br32(bp, reg); + for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) + br32(bp, reg); +} + +/* bp->lock is held. */ +static void b44_chip_reset(struct b44 *bp, int reset_kind) +{ + struct ssb_device *sdev = bp->sdev; + bool was_enabled; + + was_enabled = ssb_device_is_enabled(bp->sdev); + + ssb_device_enable(bp->sdev, 0); + ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev); + + if (was_enabled) { + bw32(bp, B44_RCV_LAZY, 0); + bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE); + b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1); + bw32(bp, B44_DMATX_CTRL, 0); + bp->tx_prod = bp->tx_cons = 0; + if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) { + b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE, + 100, 0); + } + bw32(bp, B44_DMARX_CTRL, 0); + bp->rx_prod = bp->rx_cons = 0; + } + + b44_clear_stats(bp); + + /* + * Don't enable PHY if we are doing a partial reset + * we are probably going to power down + */ + if (reset_kind == B44_CHIP_RESET_PARTIAL) + return; + + switch (sdev->bus->bustype) { + case SSB_BUSTYPE_SSB: + bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | + (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus), + B44_MDC_RATIO) + & MDIO_CTRL_MAXF_MASK))); + break; + case SSB_BUSTYPE_PCI: + bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | + (0x0d & MDIO_CTRL_MAXF_MASK))); + break; + case SSB_BUSTYPE_PCMCIA: + case SSB_BUSTYPE_SDIO: + WARN_ON(1); /* A device with this bus does not exist. */ + break; + } + + br32(bp, B44_MDIO_CTRL); + + if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) { + bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL); + br32(bp, B44_ENET_CTRL); + bp->flags |= B44_FLAG_EXTERNAL_PHY; + } else { + u32 val = br32(bp, B44_DEVCTRL); + + if (val & DEVCTRL_EPR) { + bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR)); + br32(bp, B44_DEVCTRL); + udelay(100); + } + bp->flags &= ~B44_FLAG_EXTERNAL_PHY; + } +} + +/* bp->lock is held. */ +static void b44_halt(struct b44 *bp) +{ + b44_disable_ints(bp); + /* reset PHY */ + b44_phy_reset(bp); + /* power down PHY */ + netdev_info(bp->dev, "powering down PHY\n"); + bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN); + /* now reset the chip, but without enabling the MAC&PHY + * part of it. This has to be done _after_ we shut down the PHY */ + if (bp->flags & B44_FLAG_EXTERNAL_PHY) + b44_chip_reset(bp, B44_CHIP_RESET_FULL); + else + b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL); +} + +/* bp->lock is held. */ +static void __b44_set_mac_addr(struct b44 *bp) +{ + bw32(bp, B44_CAM_CTRL, 0); + if (!(bp->dev->flags & IFF_PROMISC)) { + u32 val; + + __b44_cam_write(bp, bp->dev->dev_addr, 0); + val = br32(bp, B44_CAM_CTRL); + bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); + } +} + +static int b44_set_mac_addr(struct net_device *dev, void *p) +{ + struct b44 *bp = netdev_priv(dev); + struct sockaddr *addr = p; + u32 val; + + if (netif_running(dev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + spin_lock_irq(&bp->lock); + + val = br32(bp, B44_RXCONFIG); + if (!(val & RXCONFIG_CAM_ABSENT)) + __b44_set_mac_addr(bp); + + spin_unlock_irq(&bp->lock); + + return 0; +} + +/* Called at device open time to get the chip ready for + * packet processing. Invoked with bp->lock held. + */ +static void __b44_set_rx_mode(struct net_device *); +static void b44_init_hw(struct b44 *bp, int reset_kind) +{ + u32 val; + + b44_chip_reset(bp, B44_CHIP_RESET_FULL); + if (reset_kind == B44_FULL_RESET) { + b44_phy_reset(bp); + b44_setup_phy(bp); + } + + /* Enable CRC32, set proper LED modes and power on PHY */ + bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL); + bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT)); + + /* This sets the MAC address too. */ + __b44_set_rx_mode(bp->dev); + + /* MTU + eth header + possible VLAN tag + struct rx_header */ + bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); + bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); + + bw32(bp, B44_TX_WMARK, 56); /* XXX magic */ + if (reset_kind == B44_PARTIAL_RESET) { + bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | + (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT))); + } else { + bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE); + bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset); + bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | + (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT))); + bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset); + + bw32(bp, B44_DMARX_PTR, bp->rx_pending); + bp->rx_prod = bp->rx_pending; + + bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); + } + + val = br32(bp, B44_ENET_CTRL); + bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE)); + + netdev_reset_queue(bp->dev); +} + +static int b44_open(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + int err; + + err = b44_alloc_consistent(bp, GFP_KERNEL); + if (err) + goto out; + + napi_enable(&bp->napi); + + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET); + + b44_check_phy(bp); + + err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); + if (unlikely(err < 0)) { + napi_disable(&bp->napi); + b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL); + b44_free_rings(bp); + b44_free_consistent(bp); + goto out; + } + + init_timer(&bp->timer); + bp->timer.expires = jiffies + HZ; + bp->timer.data = (unsigned long) bp; + bp->timer.function = b44_timer; + add_timer(&bp->timer); + + b44_enable_ints(bp); + + if (bp->flags & B44_FLAG_EXTERNAL_PHY) + phy_start(bp->phydev); + + netif_start_queue(dev); +out: + return err; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling receive - used by netconsole and other diagnostic tools + * to allow network i/o with interrupts disabled. + */ +static void b44_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + b44_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset) +{ + u32 i; + u32 *pattern = (u32 *) pp; + + for (i = 0; i < bytes; i += sizeof(u32)) { + bw32(bp, B44_FILT_ADDR, table_offset + i); + bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]); + } +} + +static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset) +{ + int magicsync = 6; + int k, j, len = offset; + int ethaddr_bytes = ETH_ALEN; + + memset(ppattern + offset, 0xff, magicsync); + for (j = 0; j < magicsync; j++) + set_bit(len++, (unsigned long *) pmask); + + for (j = 0; j < B44_MAX_PATTERNS; j++) { + if ((B44_PATTERN_SIZE - len) >= ETH_ALEN) + ethaddr_bytes = ETH_ALEN; + else + ethaddr_bytes = B44_PATTERN_SIZE - len; + if (ethaddr_bytes <=0) + break; + for (k = 0; k< ethaddr_bytes; k++) { + ppattern[offset + magicsync + + (j * ETH_ALEN) + k] = macaddr[k]; + set_bit(len++, (unsigned long *) pmask); + } + } + return len - 1; +} + +/* Setup magic packet patterns in the b44 WOL + * pattern matching filter. + */ +static void b44_setup_pseudo_magicp(struct b44 *bp) +{ + + u32 val; + int plen0, plen1, plen2; + u8 *pwol_pattern; + u8 pwol_mask[B44_PMASK_SIZE]; + + pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL); + if (!pwol_pattern) + return; + + /* Ipv4 magic packet pattern - pattern 0.*/ + memset(pwol_mask, 0, B44_PMASK_SIZE); + plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, + B44_ETHIPV4UDP_HLEN); + + bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE); + bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE); + + /* Raw ethernet II magic packet pattern - pattern 1 */ + memset(pwol_pattern, 0, B44_PATTERN_SIZE); + memset(pwol_mask, 0, B44_PMASK_SIZE); + plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, + ETH_HLEN); + + bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, + B44_PATTERN_BASE + B44_PATTERN_SIZE); + bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, + B44_PMASK_BASE + B44_PMASK_SIZE); + + /* Ipv6 magic packet pattern - pattern 2 */ + memset(pwol_pattern, 0, B44_PATTERN_SIZE); + memset(pwol_mask, 0, B44_PMASK_SIZE); + plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask, + B44_ETHIPV6UDP_HLEN); + + bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, + B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE); + bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, + B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE); + + kfree(pwol_pattern); + + /* set these pattern's lengths: one less than each real length */ + val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE; + bw32(bp, B44_WKUP_LEN, val); + + /* enable wakeup pattern matching */ + val = br32(bp, B44_DEVCTRL); + bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE); + +} + +#ifdef CONFIG_B44_PCI +static void b44_setup_wol_pci(struct b44 *bp) +{ + u16 val; + + if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) { + bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE); + pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val); + pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE); + } +} +#else +static inline void b44_setup_wol_pci(struct b44 *bp) { } +#endif /* CONFIG_B44_PCI */ + +static void b44_setup_wol(struct b44 *bp) +{ + u32 val; + + bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI); + + if (bp->flags & B44_FLAG_B0_ANDLATER) { + + bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE); + + val = bp->dev->dev_addr[2] << 24 | + bp->dev->dev_addr[3] << 16 | + bp->dev->dev_addr[4] << 8 | + bp->dev->dev_addr[5]; + bw32(bp, B44_ADDR_LO, val); + + val = bp->dev->dev_addr[0] << 8 | + bp->dev->dev_addr[1]; + bw32(bp, B44_ADDR_HI, val); + + val = br32(bp, B44_DEVCTRL); + bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE); + + } else { + b44_setup_pseudo_magicp(bp); + } + b44_setup_wol_pci(bp); +} + +static int b44_close(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + + netif_stop_queue(dev); + + if (bp->flags & B44_FLAG_EXTERNAL_PHY) + phy_stop(bp->phydev); + + napi_disable(&bp->napi); + + del_timer_sync(&bp->timer); + + spin_lock_irq(&bp->lock); + + b44_halt(bp); + b44_free_rings(bp); + netif_carrier_off(dev); + + spin_unlock_irq(&bp->lock); + + free_irq(dev->irq, dev); + + if (bp->flags & B44_FLAG_WOL_ENABLE) { + b44_init_hw(bp, B44_PARTIAL_RESET); + b44_setup_wol(bp); + } + + b44_free_consistent(bp); + + return 0; +} + +static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *nstat) +{ + struct b44 *bp = netdev_priv(dev); + struct b44_hw_stats *hwstat = &bp->hw_stats; + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&hwstat->syncp); + + /* Convert HW stats into rtnl_link_stats64 stats. */ + nstat->rx_packets = hwstat->rx_pkts; + nstat->tx_packets = hwstat->tx_pkts; + nstat->rx_bytes = hwstat->rx_octets; + nstat->tx_bytes = hwstat->tx_octets; + nstat->tx_errors = (hwstat->tx_jabber_pkts + + hwstat->tx_oversize_pkts + + hwstat->tx_underruns + + hwstat->tx_excessive_cols + + hwstat->tx_late_cols); + nstat->multicast = hwstat->rx_multicast_pkts; + nstat->collisions = hwstat->tx_total_cols; + + nstat->rx_length_errors = (hwstat->rx_oversize_pkts + + hwstat->rx_undersize); + nstat->rx_over_errors = hwstat->rx_missed_pkts; + nstat->rx_frame_errors = hwstat->rx_align_errs; + nstat->rx_crc_errors = hwstat->rx_crc_errs; + nstat->rx_errors = (hwstat->rx_jabber_pkts + + hwstat->rx_oversize_pkts + + hwstat->rx_missed_pkts + + hwstat->rx_crc_align_errs + + hwstat->rx_undersize + + hwstat->rx_crc_errs + + hwstat->rx_align_errs + + hwstat->rx_symbol_errs); + + nstat->tx_aborted_errors = hwstat->tx_underruns; +#if 0 + /* Carrier lost counter seems to be broken for some devices */ + nstat->tx_carrier_errors = hwstat->tx_carrier_lost; +#endif + } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start)); + + return nstat; +} + +static int __b44_load_mcast(struct b44 *bp, struct net_device *dev) +{ + struct netdev_hw_addr *ha; + int i, num_ents; + + num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE); + i = 0; + netdev_for_each_mc_addr(ha, dev) { + if (i == num_ents) + break; + __b44_cam_write(bp, ha->addr, i++ + 1); + } + return i+1; +} + +static void __b44_set_rx_mode(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + u32 val; + + val = br32(bp, B44_RXCONFIG); + val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI); + if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) { + val |= RXCONFIG_PROMISC; + bw32(bp, B44_RXCONFIG, val); + } else { + unsigned char zero[6] = {0, 0, 0, 0, 0, 0}; + int i = 1; + + __b44_set_mac_addr(bp); + + if ((dev->flags & IFF_ALLMULTI) || + (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE)) + val |= RXCONFIG_ALLMULTI; + else + i = __b44_load_mcast(bp, dev); + + for (; i < 64; i++) + __b44_cam_write(bp, zero, i); + + bw32(bp, B44_RXCONFIG, val); + val = br32(bp, B44_CAM_CTRL); + bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); + } +} + +static void b44_set_rx_mode(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + + spin_lock_irq(&bp->lock); + __b44_set_rx_mode(dev); + spin_unlock_irq(&bp->lock); +} + +static u32 b44_get_msglevel(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + return bp->msg_enable; +} + +static void b44_set_msglevel(struct net_device *dev, u32 value) +{ + struct b44 *bp = netdev_priv(dev); + bp->msg_enable = value; +} + +static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct b44 *bp = netdev_priv(dev); + struct ssb_bus *bus = bp->sdev->bus; + + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + switch (bus->bustype) { + case SSB_BUSTYPE_PCI: + strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info)); + break; + case SSB_BUSTYPE_SSB: + strlcpy(info->bus_info, "SSB", sizeof(info->bus_info)); + break; + case SSB_BUSTYPE_PCMCIA: + case SSB_BUSTYPE_SDIO: + WARN_ON(1); /* A device with this bus does not exist. */ + break; + } +} + +static int b44_nway_reset(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + u32 bmcr; + int r; + + spin_lock_irq(&bp->lock); + b44_readphy(bp, MII_BMCR, &bmcr); + b44_readphy(bp, MII_BMCR, &bmcr); + r = -EINVAL; + if (bmcr & BMCR_ANENABLE) { + b44_writephy(bp, MII_BMCR, + bmcr | BMCR_ANRESTART); + r = 0; + } + spin_unlock_irq(&bp->lock); + + return r; +} + +static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct b44 *bp = netdev_priv(dev); + + if (bp->flags & B44_FLAG_EXTERNAL_PHY) { + BUG_ON(!bp->phydev); + return phy_ethtool_gset(bp->phydev, cmd); + } + + cmd->supported = (SUPPORTED_Autoneg); + cmd->supported |= (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_MII); + + cmd->advertising = 0; + if (bp->flags & B44_FLAG_ADV_10HALF) + cmd->advertising |= ADVERTISED_10baseT_Half; + if (bp->flags & B44_FLAG_ADV_10FULL) + cmd->advertising |= ADVERTISED_10baseT_Full; + if (bp->flags & B44_FLAG_ADV_100HALF) + cmd->advertising |= ADVERTISED_100baseT_Half; + if (bp->flags & B44_FLAG_ADV_100FULL) + cmd->advertising |= ADVERTISED_100baseT_Full; + cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; + ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ? + SPEED_100 : SPEED_10)); + cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ? + DUPLEX_FULL : DUPLEX_HALF; + cmd->port = 0; + cmd->phy_address = bp->phy_addr; + cmd->transceiver = (bp->flags & B44_FLAG_EXTERNAL_PHY) ? + XCVR_EXTERNAL : XCVR_INTERNAL; + cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ? + AUTONEG_DISABLE : AUTONEG_ENABLE; + if (cmd->autoneg == AUTONEG_ENABLE) + cmd->advertising |= ADVERTISED_Autoneg; + if (!netif_running(dev)){ + ethtool_cmd_speed_set(cmd, 0); + cmd->duplex = 0xff; + } + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct b44 *bp = netdev_priv(dev); + u32 speed; + int ret; + + if (bp->flags & B44_FLAG_EXTERNAL_PHY) { + BUG_ON(!bp->phydev); + spin_lock_irq(&bp->lock); + if (netif_running(dev)) + b44_setup_phy(bp); + + ret = phy_ethtool_sset(bp->phydev, cmd); + + spin_unlock_irq(&bp->lock); + + return ret; + } + + speed = ethtool_cmd_speed(cmd); + + /* We do not support gigabit. */ + if (cmd->autoneg == AUTONEG_ENABLE) { + if (cmd->advertising & + (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full)) + return -EINVAL; + } else if ((speed != SPEED_100 && + speed != SPEED_10) || + (cmd->duplex != DUPLEX_HALF && + cmd->duplex != DUPLEX_FULL)) { + return -EINVAL; + } + + spin_lock_irq(&bp->lock); + + if (cmd->autoneg == AUTONEG_ENABLE) { + bp->flags &= ~(B44_FLAG_FORCE_LINK | + B44_FLAG_100_BASE_T | + B44_FLAG_FULL_DUPLEX | + B44_FLAG_ADV_10HALF | + B44_FLAG_ADV_10FULL | + B44_FLAG_ADV_100HALF | + B44_FLAG_ADV_100FULL); + if (cmd->advertising == 0) { + bp->flags |= (B44_FLAG_ADV_10HALF | + B44_FLAG_ADV_10FULL | + B44_FLAG_ADV_100HALF | + B44_FLAG_ADV_100FULL); + } else { + if (cmd->advertising & ADVERTISED_10baseT_Half) + bp->flags |= B44_FLAG_ADV_10HALF; + if (cmd->advertising & ADVERTISED_10baseT_Full) + bp->flags |= B44_FLAG_ADV_10FULL; + if (cmd->advertising & ADVERTISED_100baseT_Half) + bp->flags |= B44_FLAG_ADV_100HALF; + if (cmd->advertising & ADVERTISED_100baseT_Full) + bp->flags |= B44_FLAG_ADV_100FULL; + } + } else { + bp->flags |= B44_FLAG_FORCE_LINK; + bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX); + if (speed == SPEED_100) + bp->flags |= B44_FLAG_100_BASE_T; + if (cmd->duplex == DUPLEX_FULL) + bp->flags |= B44_FLAG_FULL_DUPLEX; + } + + if (netif_running(dev)) + b44_setup_phy(bp); + + spin_unlock_irq(&bp->lock); + + return 0; +} + +static void b44_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct b44 *bp = netdev_priv(dev); + + ering->rx_max_pending = B44_RX_RING_SIZE - 1; + ering->rx_pending = bp->rx_pending; + + /* XXX ethtool lacks a tx_max_pending, oops... */ +} + +static int b44_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct b44 *bp = netdev_priv(dev); + + if ((ering->rx_pending > B44_RX_RING_SIZE - 1) || + (ering->rx_mini_pending != 0) || + (ering->rx_jumbo_pending != 0) || + (ering->tx_pending > B44_TX_RING_SIZE - 1)) + return -EINVAL; + + spin_lock_irq(&bp->lock); + + bp->rx_pending = ering->rx_pending; + bp->tx_pending = ering->tx_pending; + + b44_halt(bp); + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET); + netif_wake_queue(bp->dev); + spin_unlock_irq(&bp->lock); + + b44_enable_ints(bp); + + return 0; +} + +static void b44_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct b44 *bp = netdev_priv(dev); + + epause->autoneg = + (bp->flags & B44_FLAG_PAUSE_AUTO) != 0; + epause->rx_pause = + (bp->flags & B44_FLAG_RX_PAUSE) != 0; + epause->tx_pause = + (bp->flags & B44_FLAG_TX_PAUSE) != 0; +} + +static int b44_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct b44 *bp = netdev_priv(dev); + + spin_lock_irq(&bp->lock); + if (epause->autoneg) + bp->flags |= B44_FLAG_PAUSE_AUTO; + else + bp->flags &= ~B44_FLAG_PAUSE_AUTO; + if (epause->rx_pause) + bp->flags |= B44_FLAG_RX_PAUSE; + else + bp->flags &= ~B44_FLAG_RX_PAUSE; + if (epause->tx_pause) + bp->flags |= B44_FLAG_TX_PAUSE; + else + bp->flags &= ~B44_FLAG_TX_PAUSE; + if (bp->flags & B44_FLAG_PAUSE_AUTO) { + b44_halt(bp); + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET); + } else { + __b44_set_flow_ctrl(bp, bp->flags); + } + spin_unlock_irq(&bp->lock); + + b44_enable_ints(bp); + + return 0; +} + +static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch(stringset) { + case ETH_SS_STATS: + memcpy(data, *b44_gstrings, sizeof(b44_gstrings)); + break; + } +} + +static int b44_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(b44_gstrings); + default: + return -EOPNOTSUPP; + } +} + +static void b44_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct b44 *bp = netdev_priv(dev); + struct b44_hw_stats *hwstat = &bp->hw_stats; + u64 *data_src, *data_dst; + unsigned int start; + u32 i; + + spin_lock_irq(&bp->lock); + b44_stats_update(bp); + spin_unlock_irq(&bp->lock); + + do { + data_src = &hwstat->tx_good_octets; + data_dst = data; + start = u64_stats_fetch_begin_irq(&hwstat->syncp); + + for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++) + *data_dst++ = *data_src++; + + } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start)); +} + +static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct b44 *bp = netdev_priv(dev); + + wol->supported = WAKE_MAGIC; + if (bp->flags & B44_FLAG_WOL_ENABLE) + wol->wolopts = WAKE_MAGIC; + else + wol->wolopts = 0; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct b44 *bp = netdev_priv(dev); + + spin_lock_irq(&bp->lock); + if (wol->wolopts & WAKE_MAGIC) + bp->flags |= B44_FLAG_WOL_ENABLE; + else + bp->flags &= ~B44_FLAG_WOL_ENABLE; + spin_unlock_irq(&bp->lock); + + device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC); + return 0; +} + +static const struct ethtool_ops b44_ethtool_ops = { + .get_drvinfo = b44_get_drvinfo, + .get_settings = b44_get_settings, + .set_settings = b44_set_settings, + .nway_reset = b44_nway_reset, + .get_link = ethtool_op_get_link, + .get_wol = b44_get_wol, + .set_wol = b44_set_wol, + .get_ringparam = b44_get_ringparam, + .set_ringparam = b44_set_ringparam, + .get_pauseparam = b44_get_pauseparam, + .set_pauseparam = b44_set_pauseparam, + .get_msglevel = b44_get_msglevel, + .set_msglevel = b44_set_msglevel, + .get_strings = b44_get_strings, + .get_sset_count = b44_get_sset_count, + .get_ethtool_stats = b44_get_ethtool_stats, +}; + +static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct b44 *bp = netdev_priv(dev); + int err = -EINVAL; + + if (!netif_running(dev)) + goto out; + + spin_lock_irq(&bp->lock); + if (bp->flags & B44_FLAG_EXTERNAL_PHY) { + BUG_ON(!bp->phydev); + err = phy_mii_ioctl(bp->phydev, ifr, cmd); + } else { + err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL); + } + spin_unlock_irq(&bp->lock); +out: + return err; +} + +static int b44_get_invariants(struct b44 *bp) +{ + struct ssb_device *sdev = bp->sdev; + int err = 0; + u8 *addr; + + bp->dma_offset = ssb_dma_translation(sdev); + + if (sdev->bus->bustype == SSB_BUSTYPE_SSB && + instance > 1) { + addr = sdev->bus->sprom.et1mac; + bp->phy_addr = sdev->bus->sprom.et1phyaddr; + } else { + addr = sdev->bus->sprom.et0mac; + bp->phy_addr = sdev->bus->sprom.et0phyaddr; + } + /* Some ROMs have buggy PHY addresses with the high + * bits set (sign extension?). Truncate them to a + * valid PHY address. */ + bp->phy_addr &= 0x1F; + + memcpy(bp->dev->dev_addr, addr, ETH_ALEN); + + if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){ + pr_err("Invalid MAC address found in EEPROM\n"); + return -EINVAL; + } + + bp->imask = IMASK_DEF; + + /* XXX - really required? + bp->flags |= B44_FLAG_BUGGY_TXPTR; + */ + + if (bp->sdev->id.revision >= 7) + bp->flags |= B44_FLAG_B0_ANDLATER; + + return err; +} + +static const struct net_device_ops b44_netdev_ops = { + .ndo_open = b44_open, + .ndo_stop = b44_close, + .ndo_start_xmit = b44_start_xmit, + .ndo_get_stats64 = b44_get_stats64, + .ndo_set_rx_mode = b44_set_rx_mode, + .ndo_set_mac_address = b44_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = b44_ioctl, + .ndo_tx_timeout = b44_tx_timeout, + .ndo_change_mtu = b44_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = b44_poll_controller, +#endif +}; + +static void b44_adjust_link(struct net_device *dev) +{ + struct b44 *bp = netdev_priv(dev); + struct phy_device *phydev = bp->phydev; + bool status_changed = 0; + + BUG_ON(!phydev); + + if (bp->old_link != phydev->link) { + status_changed = 1; + bp->old_link = phydev->link; + } + + /* reflect duplex change */ + if (phydev->link) { + if ((phydev->duplex == DUPLEX_HALF) && + (bp->flags & B44_FLAG_FULL_DUPLEX)) { + status_changed = 1; + bp->flags &= ~B44_FLAG_FULL_DUPLEX; + } else if ((phydev->duplex == DUPLEX_FULL) && + !(bp->flags & B44_FLAG_FULL_DUPLEX)) { + status_changed = 1; + bp->flags |= B44_FLAG_FULL_DUPLEX; + } + } + + if (status_changed) { + u32 val = br32(bp, B44_TX_CTRL); + if (bp->flags & B44_FLAG_FULL_DUPLEX) + val |= TX_CTRL_DUPLEX; + else + val &= ~TX_CTRL_DUPLEX; + bw32(bp, B44_TX_CTRL, val); + phy_print_status(phydev); + } +} + +static int b44_register_phy_one(struct b44 *bp) +{ + struct mii_bus *mii_bus; + struct ssb_device *sdev = bp->sdev; + struct phy_device *phydev; + char bus_id[MII_BUS_ID_SIZE + 3]; + struct ssb_sprom *sprom = &sdev->bus->sprom; + int err; + + mii_bus = mdiobus_alloc(); + if (!mii_bus) { + dev_err(sdev->dev, "mdiobus_alloc() failed\n"); + err = -ENOMEM; + goto err_out; + } + + mii_bus->priv = bp; + mii_bus->read = b44_mdio_read_phylib; + mii_bus->write = b44_mdio_write_phylib; + mii_bus->name = "b44_eth_mii"; + mii_bus->parent = sdev->dev; + mii_bus->phy_mask = ~(1 << bp->phy_addr); + snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance); + mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!mii_bus->irq) { + dev_err(sdev->dev, "mii_bus irq allocation failed\n"); + err = -ENOMEM; + goto err_out_mdiobus; + } + + memset(mii_bus->irq, PHY_POLL, sizeof(int) * PHY_MAX_ADDR); + + bp->mii_bus = mii_bus; + + err = mdiobus_register(mii_bus); + if (err) { + dev_err(sdev->dev, "failed to register MII bus\n"); + goto err_out_mdiobus_irq; + } + + if (!bp->mii_bus->phy_map[bp->phy_addr] && + (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) { + + dev_info(sdev->dev, + "could not find PHY at %i, use fixed one\n", + bp->phy_addr); + + bp->phy_addr = 0; + snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0", + bp->phy_addr); + } else { + snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id, + bp->phy_addr); + } + + phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link, + PHY_INTERFACE_MODE_MII); + if (IS_ERR(phydev)) { + dev_err(sdev->dev, "could not attach PHY at %i\n", + bp->phy_addr); + err = PTR_ERR(phydev); + goto err_out_mdiobus_unregister; + } + + /* mask with MAC supported features */ + phydev->supported &= (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_MII); + phydev->advertising = phydev->supported; + + bp->phydev = phydev; + bp->old_link = 0; + bp->phy_addr = phydev->addr; + + dev_info(sdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", + phydev->drv->name, dev_name(&phydev->dev)); + + return 0; + +err_out_mdiobus_unregister: + mdiobus_unregister(mii_bus); + +err_out_mdiobus_irq: + kfree(mii_bus->irq); + +err_out_mdiobus: + mdiobus_free(mii_bus); + +err_out: + return err; +} + +static void b44_unregister_phy_one(struct b44 *bp) +{ + struct mii_bus *mii_bus = bp->mii_bus; + + phy_disconnect(bp->phydev); + mdiobus_unregister(mii_bus); + kfree(mii_bus->irq); + mdiobus_free(mii_bus); +} + +static int b44_init_one(struct ssb_device *sdev, + const struct ssb_device_id *ent) +{ + struct net_device *dev; + struct b44 *bp; + int err; + + instance++; + + pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION); + + dev = alloc_etherdev(sizeof(*bp)); + if (!dev) { + err = -ENOMEM; + goto out; + } + + SET_NETDEV_DEV(dev, sdev->dev); + + /* No interesting netdevice features in this card... */ + dev->features |= 0; + + bp = netdev_priv(dev); + bp->sdev = sdev; + bp->dev = dev; + bp->force_copybreak = 0; + + bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); + + spin_lock_init(&bp->lock); + + bp->rx_pending = B44_DEF_RX_RING_PENDING; + bp->tx_pending = B44_DEF_TX_RING_PENDING; + + dev->netdev_ops = &b44_netdev_ops; + netif_napi_add(dev, &bp->napi, b44_poll, 64); + dev->watchdog_timeo = B44_TX_TIMEOUT; + dev->irq = sdev->irq; + dev->ethtool_ops = &b44_ethtool_ops; + + err = ssb_bus_powerup(sdev->bus, 0); + if (err) { + dev_err(sdev->dev, + "Failed to powerup the bus\n"); + goto err_out_free_dev; + } + + if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) { + dev_err(sdev->dev, + "Required 30BIT DMA mask unsupported by the system\n"); + goto err_out_powerdown; + } + + err = b44_get_invariants(bp); + if (err) { + dev_err(sdev->dev, + "Problem fetching invariants of chip, aborting\n"); + goto err_out_powerdown; + } + + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) { + dev_err(sdev->dev, "No PHY present on this MAC, aborting\n"); + err = -ENODEV; + goto err_out_powerdown; + } + + bp->mii_if.dev = dev; + bp->mii_if.mdio_read = b44_mdio_read_mii; + bp->mii_if.mdio_write = b44_mdio_write_mii; + bp->mii_if.phy_id = bp->phy_addr; + bp->mii_if.phy_id_mask = 0x1f; + bp->mii_if.reg_num_mask = 0x1f; + + /* By default, advertise all speed/duplex settings. */ + bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL | + B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL); + + /* By default, auto-negotiate PAUSE. */ + bp->flags |= B44_FLAG_PAUSE_AUTO; + + err = register_netdev(dev); + if (err) { + dev_err(sdev->dev, "Cannot register net device, aborting\n"); + goto err_out_powerdown; + } + + netif_carrier_off(dev); + + ssb_set_drvdata(sdev, dev); + + /* Chip reset provides power to the b44 MAC & PCI cores, which + * is necessary for MAC register access. + */ + b44_chip_reset(bp, B44_CHIP_RESET_FULL); + + /* do a phy reset to test if there is an active phy */ + err = b44_phy_reset(bp); + if (err < 0) { + dev_err(sdev->dev, "phy reset failed\n"); + goto err_out_unregister_netdev; + } + + if (bp->flags & B44_FLAG_EXTERNAL_PHY) { + err = b44_register_phy_one(bp); + if (err) { + dev_err(sdev->dev, "Cannot register PHY, aborting\n"); + goto err_out_unregister_netdev; + } + } + + device_set_wakeup_capable(sdev->dev, true); + netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr); + + return 0; + +err_out_unregister_netdev: + unregister_netdev(dev); +err_out_powerdown: + ssb_bus_may_powerdown(sdev->bus); + +err_out_free_dev: + netif_napi_del(&bp->napi); + free_netdev(dev); + +out: + return err; +} + +static void b44_remove_one(struct ssb_device *sdev) +{ + struct net_device *dev = ssb_get_drvdata(sdev); + struct b44 *bp = netdev_priv(dev); + + unregister_netdev(dev); + if (bp->flags & B44_FLAG_EXTERNAL_PHY) + b44_unregister_phy_one(bp); + ssb_device_disable(sdev, 0); + ssb_bus_may_powerdown(sdev->bus); + netif_napi_del(&bp->napi); + free_netdev(dev); + ssb_pcihost_set_power_state(sdev, PCI_D3hot); + ssb_set_drvdata(sdev, NULL); +} + +static int b44_suspend(struct ssb_device *sdev, pm_message_t state) +{ + struct net_device *dev = ssb_get_drvdata(sdev); + struct b44 *bp = netdev_priv(dev); + + if (!netif_running(dev)) + return 0; + + del_timer_sync(&bp->timer); + + spin_lock_irq(&bp->lock); + + b44_halt(bp); + netif_carrier_off(bp->dev); + netif_device_detach(bp->dev); + b44_free_rings(bp); + + spin_unlock_irq(&bp->lock); + + free_irq(dev->irq, dev); + if (bp->flags & B44_FLAG_WOL_ENABLE) { + b44_init_hw(bp, B44_PARTIAL_RESET); + b44_setup_wol(bp); + } + + ssb_pcihost_set_power_state(sdev, PCI_D3hot); + return 0; +} + +static int b44_resume(struct ssb_device *sdev) +{ + struct net_device *dev = ssb_get_drvdata(sdev); + struct b44 *bp = netdev_priv(dev); + int rc = 0; + + rc = ssb_bus_powerup(sdev->bus, 0); + if (rc) { + dev_err(sdev->dev, + "Failed to powerup the bus\n"); + return rc; + } + + if (!netif_running(dev)) + return 0; + + spin_lock_irq(&bp->lock); + b44_init_rings(bp); + b44_init_hw(bp, B44_FULL_RESET); + spin_unlock_irq(&bp->lock); + + /* + * As a shared interrupt, the handler can be called immediately. To be + * able to check the interrupt status the hardware must already be + * powered back on (b44_init_hw). + */ + rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); + if (rc) { + netdev_err(dev, "request_irq failed\n"); + spin_lock_irq(&bp->lock); + b44_halt(bp); + b44_free_rings(bp); + spin_unlock_irq(&bp->lock); + return rc; + } + + netif_device_attach(bp->dev); + + b44_enable_ints(bp); + netif_wake_queue(dev); + + mod_timer(&bp->timer, jiffies + 1); + + return 0; +} + +static struct ssb_driver b44_ssb_driver = { + .name = DRV_MODULE_NAME, + .id_table = b44_ssb_tbl, + .probe = b44_init_one, + .remove = b44_remove_one, + .suspend = b44_suspend, + .resume = b44_resume, +}; + +static inline int __init b44_pci_init(void) +{ + int err = 0; +#ifdef CONFIG_B44_PCI + err = ssb_pcihost_register(&b44_pci_driver); +#endif + return err; +} + +static inline void b44_pci_exit(void) +{ +#ifdef CONFIG_B44_PCI + ssb_pcihost_unregister(&b44_pci_driver); +#endif +} + +static int __init b44_init(void) +{ + unsigned int dma_desc_align_size = dma_get_cache_alignment(); + int err; + + /* Setup paramaters for syncing RX/TX DMA descriptors */ + dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc)); + + err = b44_pci_init(); + if (err) + return err; + err = ssb_driver_register(&b44_ssb_driver); + if (err) + b44_pci_exit(); + return err; +} + +static void __exit b44_cleanup(void) +{ + ssb_driver_unregister(&b44_ssb_driver); + b44_pci_exit(); +} + +module_init(b44_init); +module_exit(b44_cleanup); + diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h new file mode 100644 index 000000000..3e9c3fc75 --- /dev/null +++ b/drivers/net/ethernet/broadcom/b44.h @@ -0,0 +1,409 @@ +#ifndef _B44_H +#define _B44_H + +/* Register layout. (These correspond to struct _bcmenettregs in bcm4400.) */ +#define B44_DEVCTRL 0x0000UL /* Device Control */ +#define DEVCTRL_MPM 0x00000040 /* Magic Packet PME Enable (B0 only) */ +#define DEVCTRL_PFE 0x00000080 /* Pattern Filtering Enable */ +#define DEVCTRL_IPP 0x00000400 /* Internal EPHY Present */ +#define DEVCTRL_EPR 0x00008000 /* EPHY Reset */ +#define DEVCTRL_PME 0x00001000 /* PHY Mode Enable */ +#define DEVCTRL_PMCE 0x00002000 /* PHY Mode Clocks Enable */ +#define DEVCTRL_PADDR 0x0007c000 /* PHY Address */ +#define DEVCTRL_PADDR_SHIFT 18 +#define B44_BIST_STAT 0x000CUL /* Built-In Self-Test Status */ +#define B44_WKUP_LEN 0x0010UL /* Wakeup Length */ +#define WKUP_LEN_P0_MASK 0x0000007f /* Pattern 0 */ +#define WKUP_LEN_D0 0x00000080 +#define WKUP_LEN_P1_MASK 0x00007f00 /* Pattern 1 */ +#define WKUP_LEN_P1_SHIFT 8 +#define WKUP_LEN_D1 0x00008000 +#define WKUP_LEN_P2_MASK 0x007f0000 /* Pattern 2 */ +#define WKUP_LEN_P2_SHIFT 16 +#define WKUP_LEN_D2 0x00000000 +#define WKUP_LEN_P3_MASK 0x7f000000 /* Pattern 3 */ +#define WKUP_LEN_P3_SHIFT 24 +#define WKUP_LEN_D3 0x80000000 +#define WKUP_LEN_DISABLE 0x80808080 +#define WKUP_LEN_ENABLE_TWO 0x80800000 +#define WKUP_LEN_ENABLE_THREE 0x80000000 +#define B44_ISTAT 0x0020UL /* Interrupt Status */ +#define ISTAT_LS 0x00000020 /* Link Change (B0 only) */ +#define ISTAT_PME 0x00000040 /* Power Management Event */ +#define ISTAT_TO 0x00000080 /* General Purpose Timeout */ +#define ISTAT_DSCE 0x00000400 /* Descriptor Error */ +#define ISTAT_DATAE 0x00000800 /* Data Error */ +#define ISTAT_DPE 0x00001000 /* Descr. Protocol Error */ +#define ISTAT_RDU 0x00002000 /* Receive Descr. Underflow */ +#define ISTAT_RFO 0x00004000 /* Receive FIFO Overflow */ +#define ISTAT_TFU 0x00008000 /* Transmit FIFO Underflow */ +#define ISTAT_RX 0x00010000 /* RX Interrupt */ +#define ISTAT_TX 0x01000000 /* TX Interrupt */ +#define ISTAT_EMAC 0x04000000 /* EMAC Interrupt */ +#define ISTAT_MII_WRITE 0x08000000 /* MII Write Interrupt */ +#define ISTAT_MII_READ 0x10000000 /* MII Read Interrupt */ +#define ISTAT_ERRORS (ISTAT_DSCE|ISTAT_DATAE|ISTAT_DPE|ISTAT_RDU|ISTAT_RFO|ISTAT_TFU) +#define B44_IMASK 0x0024UL /* Interrupt Mask */ +#define IMASK_DEF (ISTAT_ERRORS | ISTAT_TO | ISTAT_RX | ISTAT_TX) +#define B44_GPTIMER 0x0028UL /* General Purpose Timer */ +#define B44_ADDR_LO 0x0088UL /* ENET Address Lo (B0 only) */ +#define B44_ADDR_HI 0x008CUL /* ENET Address Hi (B0 only) */ +#define B44_FILT_ADDR 0x0090UL /* ENET Filter Address */ +#define B44_FILT_DATA 0x0094UL /* ENET Filter Data */ +#define B44_TXBURST 0x00A0UL /* TX Max Burst Length */ +#define B44_RXBURST 0x00A4UL /* RX Max Burst Length */ +#define B44_MAC_CTRL 0x00A8UL /* MAC Control */ +#define MAC_CTRL_CRC32_ENAB 0x00000001 /* CRC32 Generation Enable */ +#define MAC_CTRL_PHY_PDOWN 0x00000004 /* Onchip EPHY Powerdown */ +#define MAC_CTRL_PHY_EDET 0x00000008 /* Onchip EPHY Energy Detected */ +#define MAC_CTRL_PHY_LEDCTRL 0x000000e0 /* Onchip EPHY LED Control */ +#define MAC_CTRL_PHY_LEDCTRL_SHIFT 5 +#define B44_MAC_FLOW 0x00ACUL /* MAC Flow Control */ +#define MAC_FLOW_RX_HI_WATER 0x000000ff /* Receive FIFO HI Water Mark */ +#define MAC_FLOW_PAUSE_ENAB 0x00008000 /* Enable Pause Frame Generation */ +#define B44_RCV_LAZY 0x0100UL /* Lazy Interrupt Control */ +#define RCV_LAZY_TO_MASK 0x00ffffff /* Timeout */ +#define RCV_LAZY_FC_MASK 0xff000000 /* Frame Count */ +#define RCV_LAZY_FC_SHIFT 24 +#define B44_DMATX_CTRL 0x0200UL /* DMA TX Control */ +#define DMATX_CTRL_ENABLE 0x00000001 /* Enable */ +#define DMATX_CTRL_SUSPEND 0x00000002 /* Suepend Request */ +#define DMATX_CTRL_LPBACK 0x00000004 /* Loopback Enable */ +#define DMATX_CTRL_FAIRPRIOR 0x00000008 /* Fair Priority */ +#define DMATX_CTRL_FLUSH 0x00000010 /* Flush Request */ +#define B44_DMATX_ADDR 0x0204UL /* DMA TX Descriptor Ring Address */ +#define B44_DMATX_PTR 0x0208UL /* DMA TX Last Posted Descriptor */ +#define B44_DMATX_STAT 0x020CUL /* DMA TX Current Active Desc. + Status */ +#define DMATX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */ +#define DMATX_STAT_SMASK 0x0000f000 /* State Mask */ +#define DMATX_STAT_SDISABLED 0x00000000 /* State Disabled */ +#define DMATX_STAT_SACTIVE 0x00001000 /* State Active */ +#define DMATX_STAT_SIDLE 0x00002000 /* State Idle Wait */ +#define DMATX_STAT_SSTOPPED 0x00003000 /* State Stopped */ +#define DMATX_STAT_SSUSP 0x00004000 /* State Suspend Pending */ +#define DMATX_STAT_EMASK 0x000f0000 /* Error Mask */ +#define DMATX_STAT_ENONE 0x00000000 /* Error None */ +#define DMATX_STAT_EDPE 0x00010000 /* Error Desc. Protocol Error */ +#define DMATX_STAT_EDFU 0x00020000 /* Error Data FIFO Underrun */ +#define DMATX_STAT_EBEBR 0x00030000 /* Error Bus Error on Buffer Read */ +#define DMATX_STAT_EBEDA 0x00040000 /* Error Bus Error on Desc. Access */ +#define DMATX_STAT_FLUSHED 0x00100000 /* Flushed */ +#define B44_DMARX_CTRL 0x0210UL /* DMA RX Control */ +#define DMARX_CTRL_ENABLE 0x00000001 /* Enable */ +#define DMARX_CTRL_ROMASK 0x000000fe /* Receive Offset Mask */ +#define DMARX_CTRL_ROSHIFT 1 /* Receive Offset Shift */ +#define B44_DMARX_ADDR 0x0214UL /* DMA RX Descriptor Ring Address */ +#define B44_DMARX_PTR 0x0218UL /* DMA RX Last Posted Descriptor */ +#define B44_DMARX_STAT 0x021CUL /* DMA RX Current Active Desc. + Status */ +#define DMARX_STAT_CDMASK 0x00000fff /* Current Descriptor Mask */ +#define DMARX_STAT_SMASK 0x0000f000 /* State Mask */ +#define DMARX_STAT_SDISABLED 0x00000000 /* State Disabled */ +#define DMARX_STAT_SACTIVE 0x00001000 /* State Active */ +#define DMARX_STAT_SIDLE 0x00002000 /* State Idle Wait */ +#define DMARX_STAT_SSTOPPED 0x00003000 /* State Stopped */ +#define DMARX_STAT_EMASK 0x000f0000 /* Error Mask */ +#define DMARX_STAT_ENONE 0x00000000 /* Error None */ +#define DMARX_STAT_EDPE 0x00010000 /* Error Desc. Protocol Error */ +#define DMARX_STAT_EDFO 0x00020000 /* Error Data FIFO Overflow */ +#define DMARX_STAT_EBEBW 0x00030000 /* Error Bus Error on Buffer Write */ +#define DMARX_STAT_EBEDA 0x00040000 /* Error Bus Error on Desc. Access */ +#define B44_DMAFIFO_AD 0x0220UL /* DMA FIFO Diag Address */ +#define DMAFIFO_AD_OMASK 0x0000ffff /* Offset Mask */ +#define DMAFIFO_AD_SMASK 0x000f0000 /* Select Mask */ +#define DMAFIFO_AD_SXDD 0x00000000 /* Select Transmit DMA Data */ +#define DMAFIFO_AD_SXDP 0x00010000 /* Select Transmit DMA Pointers */ +#define DMAFIFO_AD_SRDD 0x00040000 /* Select Receive DMA Data */ +#define DMAFIFO_AD_SRDP 0x00050000 /* Select Receive DMA Pointers */ +#define DMAFIFO_AD_SXFD 0x00080000 /* Select Transmit FIFO Data */ +#define DMAFIFO_AD_SXFP 0x00090000 /* Select Transmit FIFO Pointers */ +#define DMAFIFO_AD_SRFD 0x000c0000 /* Select Receive FIFO Data */ +#define DMAFIFO_AD_SRFP 0x000c0000 /* Select Receive FIFO Pointers */ +#define B44_DMAFIFO_LO 0x0224UL /* DMA FIFO Diag Low Data */ +#define B44_DMAFIFO_HI 0x0228UL /* DMA FIFO Diag High Data */ +#define B44_RXCONFIG 0x0400UL /* EMAC RX Config */ +#define RXCONFIG_DBCAST 0x00000001 /* Disable Broadcast */ +#define RXCONFIG_ALLMULTI 0x00000002 /* Accept All Multicast */ +#define RXCONFIG_NORX_WHILE_TX 0x00000004 /* Receive Disable While Transmitting */ +#define RXCONFIG_PROMISC 0x00000008 /* Promiscuous Enable */ +#define RXCONFIG_LPBACK 0x00000010 /* Loopback Enable */ +#define RXCONFIG_FLOW 0x00000020 /* Flow Control Enable */ +#define RXCONFIG_FLOW_ACCEPT 0x00000040 /* Accept Unicast Flow Control Frame */ +#define RXCONFIG_RFILT 0x00000080 /* Reject Filter */ +#define RXCONFIG_CAM_ABSENT 0x00000100 /* CAM Absent */ +#define B44_RXMAXLEN 0x0404UL /* EMAC RX Max Packet Length */ +#define B44_TXMAXLEN 0x0408UL /* EMAC TX Max Packet Length */ +#define B44_MDIO_CTRL 0x0410UL /* EMAC MDIO Control */ +#define MDIO_CTRL_MAXF_MASK 0x0000007f /* MDC Frequency */ +#define MDIO_CTRL_PREAMBLE 0x00000080 /* MII Preamble Enable */ +#define B44_MDIO_DATA 0x0414UL /* EMAC MDIO Data */ +#define MDIO_DATA_DATA 0x0000ffff /* R/W Data */ +#define MDIO_DATA_TA_MASK 0x00030000 /* Turnaround Value */ +#define MDIO_DATA_TA_SHIFT 16 +#define MDIO_TA_VALID 2 +#define MDIO_DATA_RA_MASK 0x007c0000 /* Register Address */ +#define MDIO_DATA_RA_SHIFT 18 +#define MDIO_DATA_PMD_MASK 0x0f800000 /* Physical Media Device */ +#define MDIO_DATA_PMD_SHIFT 23 +#define MDIO_DATA_OP_MASK 0x30000000 /* Opcode */ +#define MDIO_DATA_OP_SHIFT 28 +#define MDIO_OP_WRITE 1 +#define MDIO_OP_READ 2 +#define MDIO_DATA_SB_MASK 0xc0000000 /* Start Bits */ +#define MDIO_DATA_SB_SHIFT 30 +#define MDIO_DATA_SB_START 0x40000000 /* Start Of Frame */ +#define B44_EMAC_IMASK 0x0418UL /* EMAC Interrupt Mask */ +#define B44_EMAC_ISTAT 0x041CUL /* EMAC Interrupt Status */ +#define EMAC_INT_MII 0x00000001 /* MII MDIO Interrupt */ +#define EMAC_INT_MIB 0x00000002 /* MIB Interrupt */ +#define EMAC_INT_FLOW 0x00000003 /* Flow Control Interrupt */ +#define B44_CAM_DATA_LO 0x0420UL /* EMAC CAM Data Low */ +#define B44_CAM_DATA_HI 0x0424UL /* EMAC CAM Data High */ +#define CAM_DATA_HI_VALID 0x00010000 /* Valid Bit */ +#define B44_CAM_CTRL 0x0428UL /* EMAC CAM Control */ +#define CAM_CTRL_ENABLE 0x00000001 /* CAM Enable */ +#define CAM_CTRL_MSEL 0x00000002 /* Mask Select */ +#define CAM_CTRL_READ 0x00000004 /* Read */ +#define CAM_CTRL_WRITE 0x00000008 /* Read */ +#define CAM_CTRL_INDEX_MASK 0x003f0000 /* Index Mask */ +#define CAM_CTRL_INDEX_SHIFT 16 +#define CAM_CTRL_BUSY 0x80000000 /* CAM Busy */ +#define B44_ENET_CTRL 0x042CUL /* EMAC ENET Control */ +#define ENET_CTRL_ENABLE 0x00000001 /* EMAC Enable */ +#define ENET_CTRL_DISABLE 0x00000002 /* EMAC Disable */ +#define ENET_CTRL_SRST 0x00000004 /* EMAC Soft Reset */ +#define ENET_CTRL_EPSEL 0x00000008 /* External PHY Select */ +#define B44_TX_CTRL 0x0430UL /* EMAC TX Control */ +#define TX_CTRL_DUPLEX 0x00000001 /* Full Duplex */ +#define TX_CTRL_FMODE 0x00000002 /* Flow Mode */ +#define TX_CTRL_SBENAB 0x00000004 /* Single Backoff Enable */ +#define TX_CTRL_SMALL_SLOT 0x00000008 /* Small Slottime */ +#define B44_TX_WMARK 0x0434UL /* EMAC TX Watermark */ +#define B44_MIB_CTRL 0x0438UL /* EMAC MIB Control */ +#define MIB_CTRL_CLR_ON_READ 0x00000001 /* Autoclear on Read */ +#define B44_TX_GOOD_O 0x0500UL /* MIB TX Good Octets */ +#define B44_TX_GOOD_P 0x0504UL /* MIB TX Good Packets */ +#define B44_TX_O 0x0508UL /* MIB TX Octets */ +#define B44_TX_P 0x050CUL /* MIB TX Packets */ +#define B44_TX_BCAST 0x0510UL /* MIB TX Broadcast Packets */ +#define B44_TX_MCAST 0x0514UL /* MIB TX Multicast Packets */ +#define B44_TX_64 0x0518UL /* MIB TX <= 64 byte Packets */ +#define B44_TX_65_127 0x051CUL /* MIB TX 65 to 127 byte Packets */ +#define B44_TX_128_255 0x0520UL /* MIB TX 128 to 255 byte Packets */ +#define B44_TX_256_511 0x0524UL /* MIB TX 256 to 511 byte Packets */ +#define B44_TX_512_1023 0x0528UL /* MIB TX 512 to 1023 byte Packets */ +#define B44_TX_1024_MAX 0x052CUL /* MIB TX 1024 to max byte Packets */ +#define B44_TX_JABBER 0x0530UL /* MIB TX Jabber Packets */ +#define B44_TX_OSIZE 0x0534UL /* MIB TX Oversize Packets */ +#define B44_TX_FRAG 0x0538UL /* MIB TX Fragment Packets */ +#define B44_TX_URUNS 0x053CUL /* MIB TX Underruns */ +#define B44_TX_TCOLS 0x0540UL /* MIB TX Total Collisions */ +#define B44_TX_SCOLS 0x0544UL /* MIB TX Single Collisions */ +#define B44_TX_MCOLS 0x0548UL /* MIB TX Multiple Collisions */ +#define B44_TX_ECOLS 0x054CUL /* MIB TX Excessive Collisions */ +#define B44_TX_LCOLS 0x0550UL /* MIB TX Late Collisions */ +#define B44_TX_DEFERED 0x0554UL /* MIB TX Defered Packets */ +#define B44_TX_CLOST 0x0558UL /* MIB TX Carrier Lost */ +#define B44_TX_PAUSE 0x055CUL /* MIB TX Pause Packets */ +#define B44_RX_GOOD_O 0x0580UL /* MIB RX Good Octets */ +#define B44_RX_GOOD_P 0x0584UL /* MIB RX Good Packets */ +#define B44_RX_O 0x0588UL /* MIB RX Octets */ +#define B44_RX_P 0x058CUL /* MIB RX Packets */ +#define B44_RX_BCAST 0x0590UL /* MIB RX Broadcast Packets */ +#define B44_RX_MCAST 0x0594UL /* MIB RX Multicast Packets */ +#define B44_RX_64 0x0598UL /* MIB RX <= 64 byte Packets */ +#define B44_RX_65_127 0x059CUL /* MIB RX 65 to 127 byte Packets */ +#define B44_RX_128_255 0x05A0UL /* MIB RX 128 to 255 byte Packets */ +#define B44_RX_256_511 0x05A4UL /* MIB RX 256 to 511 byte Packets */ +#define B44_RX_512_1023 0x05A8UL /* MIB RX 512 to 1023 byte Packets */ +#define B44_RX_1024_MAX 0x05ACUL /* MIB RX 1024 to max byte Packets */ +#define B44_RX_JABBER 0x05B0UL /* MIB RX Jabber Packets */ +#define B44_RX_OSIZE 0x05B4UL /* MIB RX Oversize Packets */ +#define B44_RX_FRAG 0x05B8UL /* MIB RX Fragment Packets */ +#define B44_RX_MISS 0x05BCUL /* MIB RX Missed Packets */ +#define B44_RX_CRCA 0x05C0UL /* MIB RX CRC Align Errors */ +#define B44_RX_USIZE 0x05C4UL /* MIB RX Undersize Packets */ +#define B44_RX_CRC 0x05C8UL /* MIB RX CRC Errors */ +#define B44_RX_ALIGN 0x05CCUL /* MIB RX Align Errors */ +#define B44_RX_SYM 0x05D0UL /* MIB RX Symbol Errors */ +#define B44_RX_PAUSE 0x05D4UL /* MIB RX Pause Packets */ +#define B44_RX_NPAUSE 0x05D8UL /* MIB RX Non-Pause Packets */ + +/* 4400 PHY registers */ +#define B44_MII_AUXCTRL 24 /* Auxiliary Control */ +#define MII_AUXCTRL_DUPLEX 0x0001 /* Full Duplex */ +#define MII_AUXCTRL_SPEED 0x0002 /* 1=100Mbps, 0=10Mbps */ +#define MII_AUXCTRL_FORCED 0x0004 /* Forced 10/100 */ +#define B44_MII_ALEDCTRL 26 /* Activity LED */ +#define MII_ALEDCTRL_ALLMSK 0x7fff +#define B44_MII_TLEDCTRL 27 /* Traffic Meter LED */ +#define MII_TLEDCTRL_ENABLE 0x0040 + +struct dma_desc { + __le32 ctrl; + __le32 addr; +}; + +/* There are only 12 bits in the DMA engine for descriptor offsetting + * so the table must be aligned on a boundary of this. + */ +#define DMA_TABLE_BYTES 4096 + +#define DESC_CTRL_LEN 0x00001fff +#define DESC_CTRL_CMASK 0x0ff00000 /* Core specific bits */ +#define DESC_CTRL_EOT 0x10000000 /* End of Table */ +#define DESC_CTRL_IOC 0x20000000 /* Interrupt On Completion */ +#define DESC_CTRL_EOF 0x40000000 /* End of Frame */ +#define DESC_CTRL_SOF 0x80000000 /* Start of Frame */ + +#define RX_COPY_THRESHOLD 256 + +struct rx_header { + __le16 len; + __le16 flags; + __le16 pad[12]; +}; +#define RX_HEADER_LEN 28 + +#define RX_FLAG_OFIFO 0x00000001 /* FIFO Overflow */ +#define RX_FLAG_CRCERR 0x00000002 /* CRC Error */ +#define RX_FLAG_SERR 0x00000004 /* Receive Symbol Error */ +#define RX_FLAG_ODD 0x00000008 /* Frame has odd number of nibbles */ +#define RX_FLAG_LARGE 0x00000010 /* Frame is > RX MAX Length */ +#define RX_FLAG_MCAST 0x00000020 /* Dest is Multicast Address */ +#define RX_FLAG_BCAST 0x00000040 /* Dest is Broadcast Address */ +#define RX_FLAG_MISS 0x00000080 /* Received due to promisc mode */ +#define RX_FLAG_LAST 0x00000800 /* Last buffer in frame */ +#define RX_FLAG_ERRORS (RX_FLAG_ODD | RX_FLAG_SERR | RX_FLAG_CRCERR | RX_FLAG_OFIFO) + +struct ring_info { + struct sk_buff *skb; + dma_addr_t mapping; +}; + +#define B44_MCAST_TABLE_SIZE 32 +#define B44_PHY_ADDR_NO_LOCAL_PHY 30 /* no local phy regs */ +#define B44_PHY_ADDR_NO_PHY 31 /* no phy present at all */ +#define B44_MDC_RATIO 5000000 + +#define B44_STAT_REG_DECLARE \ + _B44(tx_good_octets) \ + _B44(tx_good_pkts) \ + _B44(tx_octets) \ + _B44(tx_pkts) \ + _B44(tx_broadcast_pkts) \ + _B44(tx_multicast_pkts) \ + _B44(tx_len_64) \ + _B44(tx_len_65_to_127) \ + _B44(tx_len_128_to_255) \ + _B44(tx_len_256_to_511) \ + _B44(tx_len_512_to_1023) \ + _B44(tx_len_1024_to_max) \ + _B44(tx_jabber_pkts) \ + _B44(tx_oversize_pkts) \ + _B44(tx_fragment_pkts) \ + _B44(tx_underruns) \ + _B44(tx_total_cols) \ + _B44(tx_single_cols) \ + _B44(tx_multiple_cols) \ + _B44(tx_excessive_cols) \ + _B44(tx_late_cols) \ + _B44(tx_defered) \ + _B44(tx_carrier_lost) \ + _B44(tx_pause_pkts) \ + _B44(rx_good_octets) \ + _B44(rx_good_pkts) \ + _B44(rx_octets) \ + _B44(rx_pkts) \ + _B44(rx_broadcast_pkts) \ + _B44(rx_multicast_pkts) \ + _B44(rx_len_64) \ + _B44(rx_len_65_to_127) \ + _B44(rx_len_128_to_255) \ + _B44(rx_len_256_to_511) \ + _B44(rx_len_512_to_1023) \ + _B44(rx_len_1024_to_max) \ + _B44(rx_jabber_pkts) \ + _B44(rx_oversize_pkts) \ + _B44(rx_fragment_pkts) \ + _B44(rx_missed_pkts) \ + _B44(rx_crc_align_errs) \ + _B44(rx_undersize) \ + _B44(rx_crc_errs) \ + _B44(rx_align_errs) \ + _B44(rx_symbol_errs) \ + _B44(rx_pause_pkts) \ + _B44(rx_nonpause_pkts) + +/* SW copy of device statistics, kept up to date by periodic timer + * which probes HW values. Check b44_stats_update if you mess with + * the layout + */ +struct b44_hw_stats { +#define _B44(x) u64 x; +B44_STAT_REG_DECLARE +#undef _B44 + struct u64_stats_sync syncp; +}; + +#define B44_BOARDFLAG_ROBO 0x0010 /* Board has robo switch */ +#define B44_BOARDFLAG_ADM 0x0080 /* Board has ADMtek switch */ + +struct ssb_device; + +struct b44 { + spinlock_t lock; + + u32 imask, istat; + + struct dma_desc *rx_ring, *tx_ring; + + u32 tx_prod, tx_cons; + u32 rx_prod, rx_cons; + + struct ring_info *rx_buffers; + struct ring_info *tx_buffers; + + struct napi_struct napi; + + u32 dma_offset; + u32 flags; +#define B44_FLAG_B0_ANDLATER 0x00000001 +#define B44_FLAG_BUGGY_TXPTR 0x00000002 +#define B44_FLAG_REORDER_BUG 0x00000004 +#define B44_FLAG_PAUSE_AUTO 0x00008000 +#define B44_FLAG_FULL_DUPLEX 0x00010000 +#define B44_FLAG_100_BASE_T 0x00020000 +#define B44_FLAG_TX_PAUSE 0x00040000 +#define B44_FLAG_RX_PAUSE 0x00080000 +#define B44_FLAG_FORCE_LINK 0x00100000 +#define B44_FLAG_ADV_10HALF 0x01000000 +#define B44_FLAG_ADV_10FULL 0x02000000 +#define B44_FLAG_ADV_100HALF 0x04000000 +#define B44_FLAG_ADV_100FULL 0x08000000 +#define B44_FLAG_EXTERNAL_PHY 0x10000000 +#define B44_FLAG_RX_RING_HACK 0x20000000 +#define B44_FLAG_TX_RING_HACK 0x40000000 +#define B44_FLAG_WOL_ENABLE 0x80000000 + + u32 msg_enable; + + struct timer_list timer; + + struct b44_hw_stats hw_stats; + + struct ssb_device *sdev; + struct net_device *dev; + + dma_addr_t rx_ring_dma, tx_ring_dma; + + u32 rx_pending; + u32 tx_pending; + u8 phy_addr; + u8 force_copybreak; + struct phy_device *phydev; + struct mii_bus *mii_bus; + int old_link; + struct mii_if_info mii_if; +}; + +#endif /* _B44_H */ diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c new file mode 100644 index 000000000..a7f2cc3e4 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -0,0 +1,2919 @@ +/* + * Driver for BCM963xx builtin Ethernet mac + * + * Copyright (C) 2008 Maxime Bizon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "bcm63xx_enet.h" + +static char bcm_enet_driver_name[] = "bcm63xx_enet"; +static char bcm_enet_driver_version[] = "1.0"; + +static int copybreak __read_mostly = 128; +module_param(copybreak, int, 0); +MODULE_PARM_DESC(copybreak, "Receive copy threshold"); + +/* io registers memory shared between all devices */ +static void __iomem *bcm_enet_shared_base[3]; + +/* + * io helpers to access mac registers + */ +static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readl(priv->base + off); +} + +static inline void enet_writel(struct bcm_enet_priv *priv, + u32 val, u32 off) +{ + bcm_writel(val, priv->base + off); +} + +/* + * io helpers to access switch registers + */ +static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readl(priv->base + off); +} + +static inline void enetsw_writel(struct bcm_enet_priv *priv, + u32 val, u32 off) +{ + bcm_writel(val, priv->base + off); +} + +static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readw(priv->base + off); +} + +static inline void enetsw_writew(struct bcm_enet_priv *priv, + u16 val, u32 off) +{ + bcm_writew(val, priv->base + off); +} + +static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readb(priv->base + off); +} + +static inline void enetsw_writeb(struct bcm_enet_priv *priv, + u8 val, u32 off) +{ + bcm_writeb(val, priv->base + off); +} + + +/* io helpers to access shared registers */ +static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readl(bcm_enet_shared_base[0] + off); +} + +static inline void enet_dma_writel(struct bcm_enet_priv *priv, + u32 val, u32 off) +{ + bcm_writel(val, bcm_enet_shared_base[0] + off); +} + +static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan) +{ + return bcm_readl(bcm_enet_shared_base[1] + + bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); +} + +static inline void enet_dmac_writel(struct bcm_enet_priv *priv, + u32 val, u32 off, int chan) +{ + bcm_writel(val, bcm_enet_shared_base[1] + + bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); +} + +static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan) +{ + return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); +} + +static inline void enet_dmas_writel(struct bcm_enet_priv *priv, + u32 val, u32 off, int chan) +{ + bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); +} + +/* + * write given data into mii register and wait for transfer to end + * with timeout (average measured transfer time is 25us) + */ +static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data) +{ + int limit; + + /* make sure mii interrupt status is cleared */ + enet_writel(priv, ENET_IR_MII, ENET_IR_REG); + + enet_writel(priv, data, ENET_MIIDATA_REG); + wmb(); + + /* busy wait on mii interrupt bit, with timeout */ + limit = 1000; + do { + if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) + break; + udelay(1); + } while (limit-- > 0); + + return (limit < 0) ? 1 : 0; +} + +/* + * MII internal read callback + */ +static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id, + int regnum) +{ + u32 tmp, val; + + tmp = regnum << ENET_MIIDATA_REG_SHIFT; + tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; + tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; + tmp |= ENET_MIIDATA_OP_READ_MASK; + + if (do_mdio_op(priv, tmp)) + return -1; + + val = enet_readl(priv, ENET_MIIDATA_REG); + val &= 0xffff; + return val; +} + +/* + * MII internal write callback + */ +static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id, + int regnum, u16 value) +{ + u32 tmp; + + tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT; + tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; + tmp |= regnum << ENET_MIIDATA_REG_SHIFT; + tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; + tmp |= ENET_MIIDATA_OP_WRITE_MASK; + + (void)do_mdio_op(priv, tmp); + return 0; +} + +/* + * MII read callback from phylib + */ +static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id, + int regnum) +{ + return bcm_enet_mdio_read(bus->priv, mii_id, regnum); +} + +/* + * MII write callback from phylib + */ +static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id, + int regnum, u16 value) +{ + return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value); +} + +/* + * MII read callback from mii core + */ +static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id, + int regnum) +{ + return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum); +} + +/* + * MII write callback from mii core + */ +static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id, + int regnum, int value) +{ + bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value); +} + +/* + * refill rx queue + */ +static int bcm_enet_refill_rx(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + + while (priv->rx_desc_count < priv->rx_ring_size) { + struct bcm_enet_desc *desc; + struct sk_buff *skb; + dma_addr_t p; + int desc_idx; + u32 len_stat; + + desc_idx = priv->rx_dirty_desc; + desc = &priv->rx_desc_cpu[desc_idx]; + + if (!priv->rx_skb[desc_idx]) { + skb = netdev_alloc_skb(dev, priv->rx_skb_size); + if (!skb) + break; + priv->rx_skb[desc_idx] = skb; + p = dma_map_single(&priv->pdev->dev, skb->data, + priv->rx_skb_size, + DMA_FROM_DEVICE); + desc->address = p; + } + + len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; + len_stat |= DMADESC_OWNER_MASK; + if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { + len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); + priv->rx_dirty_desc = 0; + } else { + priv->rx_dirty_desc++; + } + wmb(); + desc->len_stat = len_stat; + + priv->rx_desc_count++; + + /* tell dma engine we allocated one buffer */ + if (priv->dma_has_sram) + enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); + else + enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan); + } + + /* If rx ring is still empty, set a timer to try allocating + * again at a later time. */ + if (priv->rx_desc_count == 0 && netif_running(dev)) { + dev_warn(&priv->pdev->dev, "unable to refill rx ring\n"); + priv->rx_timeout.expires = jiffies + HZ; + add_timer(&priv->rx_timeout); + } + + return 0; +} + +/* + * timer callback to defer refill rx queue in case we're OOM + */ +static void bcm_enet_refill_rx_timer(unsigned long data) +{ + struct net_device *dev; + struct bcm_enet_priv *priv; + + dev = (struct net_device *)data; + priv = netdev_priv(dev); + + spin_lock(&priv->rx_lock); + bcm_enet_refill_rx((struct net_device *)data); + spin_unlock(&priv->rx_lock); +} + +/* + * extract packet from rx queue + */ +static int bcm_enet_receive_queue(struct net_device *dev, int budget) +{ + struct bcm_enet_priv *priv; + struct device *kdev; + int processed; + + priv = netdev_priv(dev); + kdev = &priv->pdev->dev; + processed = 0; + + /* don't scan ring further than number of refilled + * descriptor */ + if (budget > priv->rx_desc_count) + budget = priv->rx_desc_count; + + do { + struct bcm_enet_desc *desc; + struct sk_buff *skb; + int desc_idx; + u32 len_stat; + unsigned int len; + + desc_idx = priv->rx_curr_desc; + desc = &priv->rx_desc_cpu[desc_idx]; + + /* make sure we actually read the descriptor status at + * each loop */ + rmb(); + + len_stat = desc->len_stat; + + /* break if dma ownership belongs to hw */ + if (len_stat & DMADESC_OWNER_MASK) + break; + + processed++; + priv->rx_curr_desc++; + if (priv->rx_curr_desc == priv->rx_ring_size) + priv->rx_curr_desc = 0; + priv->rx_desc_count--; + + /* if the packet does not have start of packet _and_ + * end of packet flag set, then just recycle it */ + if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) != + (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) { + dev->stats.rx_dropped++; + continue; + } + + /* recycle packet if it's marked as bad */ + if (!priv->enet_is_sw && + unlikely(len_stat & DMADESC_ERR_MASK)) { + dev->stats.rx_errors++; + + if (len_stat & DMADESC_OVSIZE_MASK) + dev->stats.rx_length_errors++; + if (len_stat & DMADESC_CRC_MASK) + dev->stats.rx_crc_errors++; + if (len_stat & DMADESC_UNDER_MASK) + dev->stats.rx_frame_errors++; + if (len_stat & DMADESC_OV_MASK) + dev->stats.rx_fifo_errors++; + continue; + } + + /* valid packet */ + skb = priv->rx_skb[desc_idx]; + len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT; + /* don't include FCS */ + len -= 4; + + if (len < copybreak) { + struct sk_buff *nskb; + + nskb = napi_alloc_skb(&priv->napi, len); + if (!nskb) { + /* forget packet, just rearm desc */ + dev->stats.rx_dropped++; + continue; + } + + dma_sync_single_for_cpu(kdev, desc->address, + len, DMA_FROM_DEVICE); + memcpy(nskb->data, skb->data, len); + dma_sync_single_for_device(kdev, desc->address, + len, DMA_FROM_DEVICE); + skb = nskb; + } else { + dma_unmap_single(&priv->pdev->dev, desc->address, + priv->rx_skb_size, DMA_FROM_DEVICE); + priv->rx_skb[desc_idx] = NULL; + } + + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, dev); + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + netif_receive_skb(skb); + + } while (--budget > 0); + + if (processed || !priv->rx_desc_count) { + bcm_enet_refill_rx(dev); + + /* kick rx dma */ + enet_dmac_writel(priv, priv->dma_chan_en_mask, + ENETDMAC_CHANCFG, priv->rx_chan); + } + + return processed; +} + + +/* + * try to or force reclaim of transmitted buffers + */ +static int bcm_enet_tx_reclaim(struct net_device *dev, int force) +{ + struct bcm_enet_priv *priv; + int released; + + priv = netdev_priv(dev); + released = 0; + + while (priv->tx_desc_count < priv->tx_ring_size) { + struct bcm_enet_desc *desc; + struct sk_buff *skb; + + /* We run in a bh and fight against start_xmit, which + * is called with bh disabled */ + spin_lock(&priv->tx_lock); + + desc = &priv->tx_desc_cpu[priv->tx_dirty_desc]; + + if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) { + spin_unlock(&priv->tx_lock); + break; + } + + /* ensure other field of the descriptor were not read + * before we checked ownership */ + rmb(); + + skb = priv->tx_skb[priv->tx_dirty_desc]; + priv->tx_skb[priv->tx_dirty_desc] = NULL; + dma_unmap_single(&priv->pdev->dev, desc->address, skb->len, + DMA_TO_DEVICE); + + priv->tx_dirty_desc++; + if (priv->tx_dirty_desc == priv->tx_ring_size) + priv->tx_dirty_desc = 0; + priv->tx_desc_count++; + + spin_unlock(&priv->tx_lock); + + if (desc->len_stat & DMADESC_UNDER_MASK) + dev->stats.tx_errors++; + + dev_kfree_skb(skb); + released++; + } + + if (netif_queue_stopped(dev) && released) + netif_wake_queue(dev); + + return released; +} + +/* + * poll func, called by network core + */ +static int bcm_enet_poll(struct napi_struct *napi, int budget) +{ + struct bcm_enet_priv *priv; + struct net_device *dev; + int rx_work_done; + + priv = container_of(napi, struct bcm_enet_priv, napi); + dev = priv->net_dev; + + /* ack interrupts */ + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IR, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IR, priv->tx_chan); + + /* reclaim sent skb */ + bcm_enet_tx_reclaim(dev, 0); + + spin_lock(&priv->rx_lock); + rx_work_done = bcm_enet_receive_queue(dev, budget); + spin_unlock(&priv->rx_lock); + + if (rx_work_done >= budget) { + /* rx queue is not yet empty/clean */ + return rx_work_done; + } + + /* no more packet in rx/tx queue, remove device from poll + * queue */ + napi_complete(napi); + + /* restore rx/tx interrupt */ + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IRMASK, priv->tx_chan); + + return rx_work_done; +} + +/* + * mac interrupt handler + */ +static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id) +{ + struct net_device *dev; + struct bcm_enet_priv *priv; + u32 stat; + + dev = dev_id; + priv = netdev_priv(dev); + + stat = enet_readl(priv, ENET_IR_REG); + if (!(stat & ENET_IR_MIB)) + return IRQ_NONE; + + /* clear & mask interrupt */ + enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); + enet_writel(priv, 0, ENET_IRMASK_REG); + + /* read mib registers in workqueue */ + schedule_work(&priv->mib_update_task); + + return IRQ_HANDLED; +} + +/* + * rx/tx dma interrupt handler + */ +static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) +{ + struct net_device *dev; + struct bcm_enet_priv *priv; + + dev = dev_id; + priv = netdev_priv(dev); + + /* mask rx/tx interrupts */ + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); + + napi_schedule(&priv->napi); + + return IRQ_HANDLED; +} + +/* + * tx request callback + */ +static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct bcm_enet_desc *desc; + u32 len_stat; + int ret; + + priv = netdev_priv(dev); + + /* lock against tx reclaim */ + spin_lock(&priv->tx_lock); + + /* make sure the tx hw queue is not full, should not happen + * since we stop queue before it's the case */ + if (unlikely(!priv->tx_desc_count)) { + netif_stop_queue(dev); + dev_err(&priv->pdev->dev, "xmit called with no tx desc " + "available?\n"); + ret = NETDEV_TX_BUSY; + goto out_unlock; + } + + /* pad small packets sent on a switch device */ + if (priv->enet_is_sw && skb->len < 64) { + int needed = 64 - skb->len; + char *data; + + if (unlikely(skb_tailroom(skb) < needed)) { + struct sk_buff *nskb; + + nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC); + if (!nskb) { + ret = NETDEV_TX_BUSY; + goto out_unlock; + } + dev_kfree_skb(skb); + skb = nskb; + } + data = skb_put(skb, needed); + memset(data, 0, needed); + } + + /* point to the next available desc */ + desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; + priv->tx_skb[priv->tx_curr_desc] = skb; + + /* fill descriptor */ + desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + + len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; + len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) | + DMADESC_APPEND_CRC | + DMADESC_OWNER_MASK; + + priv->tx_curr_desc++; + if (priv->tx_curr_desc == priv->tx_ring_size) { + priv->tx_curr_desc = 0; + len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); + } + priv->tx_desc_count--; + + /* dma might be already polling, make sure we update desc + * fields in correct order */ + wmb(); + desc->len_stat = len_stat; + wmb(); + + /* kick tx dma */ + enet_dmac_writel(priv, priv->dma_chan_en_mask, + ENETDMAC_CHANCFG, priv->tx_chan); + + /* stop queue if no more desc available */ + if (!priv->tx_desc_count) + netif_stop_queue(dev); + + dev->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + ret = NETDEV_TX_OK; + +out_unlock: + spin_unlock(&priv->tx_lock); + return ret; +} + +/* + * Change the interface's mac address. + */ +static int bcm_enet_set_mac_address(struct net_device *dev, void *p) +{ + struct bcm_enet_priv *priv; + struct sockaddr *addr = p; + u32 val; + + priv = netdev_priv(dev); + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + + /* use perfect match register 0 to store my mac address */ + val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | + (dev->dev_addr[4] << 8) | dev->dev_addr[5]; + enet_writel(priv, val, ENET_PML_REG(0)); + + val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]); + val |= ENET_PMH_DATAVALID_MASK; + enet_writel(priv, val, ENET_PMH_REG(0)); + + return 0; +} + +/* + * Change rx mode (promiscuous/allmulti) and update multicast list + */ +static void bcm_enet_set_multicast_list(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct netdev_hw_addr *ha; + u32 val; + int i; + + priv = netdev_priv(dev); + + val = enet_readl(priv, ENET_RXCFG_REG); + + if (dev->flags & IFF_PROMISC) + val |= ENET_RXCFG_PROMISC_MASK; + else + val &= ~ENET_RXCFG_PROMISC_MASK; + + /* only 3 perfect match registers left, first one is used for + * own mac address */ + if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3) + val |= ENET_RXCFG_ALLMCAST_MASK; + else + val &= ~ENET_RXCFG_ALLMCAST_MASK; + + /* no need to set perfect match registers if we catch all + * multicast */ + if (val & ENET_RXCFG_ALLMCAST_MASK) { + enet_writel(priv, val, ENET_RXCFG_REG); + return; + } + + i = 0; + netdev_for_each_mc_addr(ha, dev) { + u8 *dmi_addr; + u32 tmp; + + if (i == 3) + break; + /* update perfect match registers */ + dmi_addr = ha->addr; + tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | + (dmi_addr[4] << 8) | dmi_addr[5]; + enet_writel(priv, tmp, ENET_PML_REG(i + 1)); + + tmp = (dmi_addr[0] << 8 | dmi_addr[1]); + tmp |= ENET_PMH_DATAVALID_MASK; + enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1)); + } + + for (; i < 3; i++) { + enet_writel(priv, 0, ENET_PML_REG(i + 1)); + enet_writel(priv, 0, ENET_PMH_REG(i + 1)); + } + + enet_writel(priv, val, ENET_RXCFG_REG); +} + +/* + * set mac duplex parameters + */ +static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex) +{ + u32 val; + + val = enet_readl(priv, ENET_TXCTL_REG); + if (fullduplex) + val |= ENET_TXCTL_FD_MASK; + else + val &= ~ENET_TXCTL_FD_MASK; + enet_writel(priv, val, ENET_TXCTL_REG); +} + +/* + * set mac flow control parameters + */ +static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en) +{ + u32 val; + + /* rx flow control (pause frame handling) */ + val = enet_readl(priv, ENET_RXCFG_REG); + if (rx_en) + val |= ENET_RXCFG_ENFLOW_MASK; + else + val &= ~ENET_RXCFG_ENFLOW_MASK; + enet_writel(priv, val, ENET_RXCFG_REG); + + if (!priv->dma_has_sram) + return; + + /* tx flow control (pause frame generation) */ + val = enet_dma_readl(priv, ENETDMA_CFG_REG); + if (tx_en) + val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); + else + val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); + enet_dma_writel(priv, val, ENETDMA_CFG_REG); +} + +/* + * link changed callback (from phylib) + */ +static void bcm_enet_adjust_phy_link(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct phy_device *phydev; + int status_changed; + + priv = netdev_priv(dev); + phydev = priv->phydev; + status_changed = 0; + + if (priv->old_link != phydev->link) { + status_changed = 1; + priv->old_link = phydev->link; + } + + /* reflect duplex change in mac configuration */ + if (phydev->link && phydev->duplex != priv->old_duplex) { + bcm_enet_set_duplex(priv, + (phydev->duplex == DUPLEX_FULL) ? 1 : 0); + status_changed = 1; + priv->old_duplex = phydev->duplex; + } + + /* enable flow control if remote advertise it (trust phylib to + * check that duplex is full */ + if (phydev->link && phydev->pause != priv->old_pause) { + int rx_pause_en, tx_pause_en; + + if (phydev->pause) { + /* pause was advertised by lpa and us */ + rx_pause_en = 1; + tx_pause_en = 1; + } else if (!priv->pause_auto) { + /* pause setting overrided by user */ + rx_pause_en = priv->pause_rx; + tx_pause_en = priv->pause_tx; + } else { + rx_pause_en = 0; + tx_pause_en = 0; + } + + bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en); + status_changed = 1; + priv->old_pause = phydev->pause; + } + + if (status_changed) { + pr_info("%s: link %s", dev->name, phydev->link ? + "UP" : "DOWN"); + if (phydev->link) + pr_cont(" - %d/%s - flow control %s", phydev->speed, + DUPLEX_FULL == phydev->duplex ? "full" : "half", + phydev->pause == 1 ? "rx&tx" : "off"); + + pr_cont("\n"); + } +} + +/* + * link changed callback (if phylib is not used) + */ +static void bcm_enet_adjust_link(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + bcm_enet_set_duplex(priv, priv->force_duplex_full); + bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx); + netif_carrier_on(dev); + + pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n", + dev->name, + priv->force_speed_100 ? 100 : 10, + priv->force_duplex_full ? "full" : "half", + priv->pause_rx ? "rx" : "off", + priv->pause_tx ? "tx" : "off"); +} + +/* + * open callback, allocate dma rings & buffers and start rx operation + */ +static int bcm_enet_open(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct sockaddr addr; + struct device *kdev; + struct phy_device *phydev; + int i, ret; + unsigned int size; + char phy_id[MII_BUS_ID_SIZE + 3]; + void *p; + u32 val; + + priv = netdev_priv(dev); + kdev = &priv->pdev->dev; + + if (priv->has_phy) { + /* connect to PHY */ + snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, + priv->mii_bus->id, priv->phy_id); + + phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, + PHY_INTERFACE_MODE_MII); + + if (IS_ERR(phydev)) { + dev_err(kdev, "could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + /* mask with MAC supported features */ + phydev->supported &= (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_MII); + phydev->advertising = phydev->supported; + + if (priv->pause_auto && priv->pause_rx && priv->pause_tx) + phydev->advertising |= SUPPORTED_Pause; + else + phydev->advertising &= ~SUPPORTED_Pause; + + dev_info(kdev, "attached PHY at address %d [%s]\n", + phydev->addr, phydev->drv->name); + + priv->old_link = 0; + priv->old_duplex = -1; + priv->old_pause = -1; + priv->phydev = phydev; + } + + /* mask all interrupts and request them */ + enet_writel(priv, 0, ENET_IRMASK_REG); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); + + ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); + if (ret) + goto out_phy_disconnect; + + ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0, + dev->name, dev); + if (ret) + goto out_freeirq; + + ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, + 0, dev->name, dev); + if (ret) + goto out_freeirq_rx; + + /* initialize perfect match registers */ + for (i = 0; i < 4; i++) { + enet_writel(priv, 0, ENET_PML_REG(i)); + enet_writel(priv, 0, ENET_PMH_REG(i)); + } + + /* write device mac address */ + memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN); + bcm_enet_set_mac_address(dev, &addr); + + /* allocate rx dma ring */ + size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); + p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); + if (!p) { + ret = -ENOMEM; + goto out_freeirq_tx; + } + + priv->rx_desc_alloc_size = size; + priv->rx_desc_cpu = p; + + /* allocate tx dma ring */ + size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); + p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); + if (!p) { + ret = -ENOMEM; + goto out_free_rx_ring; + } + + priv->tx_desc_alloc_size = size; + priv->tx_desc_cpu = p; + + priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *), + GFP_KERNEL); + if (!priv->tx_skb) { + ret = -ENOMEM; + goto out_free_tx_ring; + } + + priv->tx_desc_count = priv->tx_ring_size; + priv->tx_dirty_desc = 0; + priv->tx_curr_desc = 0; + spin_lock_init(&priv->tx_lock); + + /* init & fill rx ring with skbs */ + priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *), + GFP_KERNEL); + if (!priv->rx_skb) { + ret = -ENOMEM; + goto out_free_tx_skb; + } + + priv->rx_desc_count = 0; + priv->rx_dirty_desc = 0; + priv->rx_curr_desc = 0; + + /* initialize flow control buffer allocation */ + if (priv->dma_has_sram) + enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, + ENETDMA_BUFALLOC_REG(priv->rx_chan)); + else + enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, + ENETDMAC_BUFALLOC, priv->rx_chan); + + if (bcm_enet_refill_rx(dev)) { + dev_err(kdev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out; + } + + /* write rx & tx ring addresses */ + if (priv->dma_has_sram) { + enet_dmas_writel(priv, priv->rx_desc_dma, + ENETDMAS_RSTART_REG, priv->rx_chan); + enet_dmas_writel(priv, priv->tx_desc_dma, + ENETDMAS_RSTART_REG, priv->tx_chan); + } else { + enet_dmac_writel(priv, priv->rx_desc_dma, + ENETDMAC_RSTART, priv->rx_chan); + enet_dmac_writel(priv, priv->tx_desc_dma, + ENETDMAC_RSTART, priv->tx_chan); + } + + /* clear remaining state ram for rx & tx channel */ + if (priv->dma_has_sram) { + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); + } else { + enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan); + } + + /* set max rx/tx length */ + enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); + enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); + + /* set dma maximum burst len */ + enet_dmac_writel(priv, priv->dma_maxburst, + ENETDMAC_MAXBURST, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_maxburst, + ENETDMAC_MAXBURST, priv->tx_chan); + + /* set correct transmit fifo watermark */ + enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); + + /* set flow control low/high threshold to 1/3 / 2/3 */ + if (priv->dma_has_sram) { + val = priv->rx_ring_size / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); + val = (priv->rx_ring_size * 2) / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); + } else { + enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan); + enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan); + enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan); + } + + /* all set, enable mac and interrupts, start dma engine and + * kick rx dma channel */ + wmb(); + val = enet_readl(priv, ENET_CTL_REG); + val |= ENET_CTL_ENABLE_MASK; + enet_writel(priv, val, ENET_CTL_REG); + enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); + enet_dmac_writel(priv, priv->dma_chan_en_mask, + ENETDMAC_CHANCFG, priv->rx_chan); + + /* watch "mib counters about to overflow" interrupt */ + enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); + enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); + + /* watch "packet transferred" interrupt in rx and tx */ + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IR, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IR, priv->tx_chan); + + /* make sure we enable napi before rx interrupt */ + napi_enable(&priv->napi); + + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_chan_int_mask, + ENETDMAC_IRMASK, priv->tx_chan); + + if (priv->has_phy) + phy_start(priv->phydev); + else + bcm_enet_adjust_link(dev); + + netif_start_queue(dev); + return 0; + +out: + for (i = 0; i < priv->rx_ring_size; i++) { + struct bcm_enet_desc *desc; + + if (!priv->rx_skb[i]) + continue; + + desc = &priv->rx_desc_cpu[i]; + dma_unmap_single(kdev, desc->address, priv->rx_skb_size, + DMA_FROM_DEVICE); + kfree_skb(priv->rx_skb[i]); + } + kfree(priv->rx_skb); + +out_free_tx_skb: + kfree(priv->tx_skb); + +out_free_tx_ring: + dma_free_coherent(kdev, priv->tx_desc_alloc_size, + priv->tx_desc_cpu, priv->tx_desc_dma); + +out_free_rx_ring: + dma_free_coherent(kdev, priv->rx_desc_alloc_size, + priv->rx_desc_cpu, priv->rx_desc_dma); + +out_freeirq_tx: + free_irq(priv->irq_tx, dev); + +out_freeirq_rx: + free_irq(priv->irq_rx, dev); + +out_freeirq: + free_irq(dev->irq, dev); + +out_phy_disconnect: + phy_disconnect(priv->phydev); + + return ret; +} + +/* + * disable mac + */ +static void bcm_enet_disable_mac(struct bcm_enet_priv *priv) +{ + int limit; + u32 val; + + val = enet_readl(priv, ENET_CTL_REG); + val |= ENET_CTL_DISABLE_MASK; + enet_writel(priv, val, ENET_CTL_REG); + + limit = 1000; + do { + u32 val; + + val = enet_readl(priv, ENET_CTL_REG); + if (!(val & ENET_CTL_DISABLE_MASK)) + break; + udelay(1); + } while (limit--); +} + +/* + * disable dma in given channel + */ +static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan) +{ + int limit; + + enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan); + + limit = 1000; + do { + u32 val; + + val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan); + if (!(val & ENETDMAC_CHANCFG_EN_MASK)) + break; + udelay(1); + } while (limit--); +} + +/* + * stop callback + */ +static int bcm_enet_stop(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct device *kdev; + int i; + + priv = netdev_priv(dev); + kdev = &priv->pdev->dev; + + netif_stop_queue(dev); + napi_disable(&priv->napi); + if (priv->has_phy) + phy_stop(priv->phydev); + del_timer_sync(&priv->rx_timeout); + + /* mask all interrupts */ + enet_writel(priv, 0, ENET_IRMASK_REG); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); + + /* make sure no mib update is scheduled */ + cancel_work_sync(&priv->mib_update_task); + + /* disable dma & mac */ + bcm_enet_disable_dma(priv, priv->tx_chan); + bcm_enet_disable_dma(priv, priv->rx_chan); + bcm_enet_disable_mac(priv); + + /* force reclaim of all tx buffers */ + bcm_enet_tx_reclaim(dev, 1); + + /* free the rx skb ring */ + for (i = 0; i < priv->rx_ring_size; i++) { + struct bcm_enet_desc *desc; + + if (!priv->rx_skb[i]) + continue; + + desc = &priv->rx_desc_cpu[i]; + dma_unmap_single(kdev, desc->address, priv->rx_skb_size, + DMA_FROM_DEVICE); + kfree_skb(priv->rx_skb[i]); + } + + /* free remaining allocated memory */ + kfree(priv->rx_skb); + kfree(priv->tx_skb); + dma_free_coherent(kdev, priv->rx_desc_alloc_size, + priv->rx_desc_cpu, priv->rx_desc_dma); + dma_free_coherent(kdev, priv->tx_desc_alloc_size, + priv->tx_desc_cpu, priv->tx_desc_dma); + free_irq(priv->irq_tx, dev); + free_irq(priv->irq_rx, dev); + free_irq(dev->irq, dev); + + /* release phy */ + if (priv->has_phy) { + phy_disconnect(priv->phydev); + priv->phydev = NULL; + } + + return 0; +} + +/* + * ethtool callbacks + */ +struct bcm_enet_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; + int mib_reg; +}; + +#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \ + offsetof(struct bcm_enet_priv, m) +#define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \ + offsetof(struct net_device_stats, m) + +static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = { + { "rx_packets", DEV_STAT(rx_packets), -1 }, + { "tx_packets", DEV_STAT(tx_packets), -1 }, + { "rx_bytes", DEV_STAT(rx_bytes), -1 }, + { "tx_bytes", DEV_STAT(tx_bytes), -1 }, + { "rx_errors", DEV_STAT(rx_errors), -1 }, + { "tx_errors", DEV_STAT(tx_errors), -1 }, + { "rx_dropped", DEV_STAT(rx_dropped), -1 }, + { "tx_dropped", DEV_STAT(tx_dropped), -1 }, + + { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS}, + { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS }, + { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST }, + { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT }, + { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 }, + { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 }, + { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 }, + { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 }, + { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 }, + { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX }, + { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB }, + { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR }, + { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG }, + { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP }, + { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN }, + { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND }, + { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC }, + { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN }, + { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM }, + { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE }, + { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL }, + + { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS }, + { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS }, + { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST }, + { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT }, + { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 }, + { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 }, + { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 }, + { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 }, + { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023}, + { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX }, + { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB }, + { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR }, + { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG }, + { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN }, + { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL }, + { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL }, + { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL }, + { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL }, + { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE }, + { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF }, + { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS }, + { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE }, + +}; + +#define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats) + +static const u32 unused_mib_regs[] = { + ETH_MIB_TX_ALL_OCTETS, + ETH_MIB_TX_ALL_PKTS, + ETH_MIB_RX_ALL_OCTETS, + ETH_MIB_RX_ALL_PKTS, +}; + + +static void bcm_enet_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, bcm_enet_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info)); + drvinfo->n_stats = BCM_ENET_STATS_LEN; +} + +static int bcm_enet_get_sset_count(struct net_device *netdev, + int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return BCM_ENET_STATS_LEN; + default: + return -EINVAL; + } +} + +static void bcm_enet_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BCM_ENET_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + bcm_enet_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static void update_mib_counters(struct bcm_enet_priv *priv) +{ + int i; + + for (i = 0; i < BCM_ENET_STATS_LEN; i++) { + const struct bcm_enet_stats *s; + u32 val; + char *p; + + s = &bcm_enet_gstrings_stats[i]; + if (s->mib_reg == -1) + continue; + + val = enet_readl(priv, ENET_MIB_REG(s->mib_reg)); + p = (char *)priv + s->stat_offset; + + if (s->sizeof_stat == sizeof(u64)) + *(u64 *)p += val; + else + *(u32 *)p += val; + } + + /* also empty unused mib counters to make sure mib counter + * overflow interrupt is cleared */ + for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++) + (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i])); +} + +static void bcm_enet_update_mib_counters_defer(struct work_struct *t) +{ + struct bcm_enet_priv *priv; + + priv = container_of(t, struct bcm_enet_priv, mib_update_task); + mutex_lock(&priv->mib_update_lock); + update_mib_counters(priv); + mutex_unlock(&priv->mib_update_lock); + + /* reenable mib interrupt */ + if (netif_running(priv->net_dev)) + enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); +} + +static void bcm_enet_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct bcm_enet_priv *priv; + int i; + + priv = netdev_priv(netdev); + + mutex_lock(&priv->mib_update_lock); + update_mib_counters(priv); + + for (i = 0; i < BCM_ENET_STATS_LEN; i++) { + const struct bcm_enet_stats *s; + char *p; + + s = &bcm_enet_gstrings_stats[i]; + if (s->mib_reg == -1) + p = (char *)&netdev->stats; + else + p = (char *)priv; + p += s->stat_offset; + data[i] = (s->sizeof_stat == sizeof(u64)) ? + *(u64 *)p : *(u32 *)p; + } + mutex_unlock(&priv->mib_update_lock); +} + +static int bcm_enet_nway_reset(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + if (priv->has_phy) { + if (!priv->phydev) + return -ENODEV; + return genphy_restart_aneg(priv->phydev); + } + + return -EOPNOTSUPP; +} + +static int bcm_enet_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + + cmd->maxrxpkt = 0; + cmd->maxtxpkt = 0; + + if (priv->has_phy) { + if (!priv->phydev) + return -ENODEV; + return phy_ethtool_gset(priv->phydev, cmd); + } else { + cmd->autoneg = 0; + ethtool_cmd_speed_set(cmd, ((priv->force_speed_100) + ? SPEED_100 : SPEED_10)); + cmd->duplex = (priv->force_duplex_full) ? + DUPLEX_FULL : DUPLEX_HALF; + cmd->supported = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full; + cmd->advertising = 0; + cmd->port = PORT_MII; + cmd->transceiver = XCVR_EXTERNAL; + } + return 0; +} + +static int bcm_enet_set_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + if (priv->has_phy) { + if (!priv->phydev) + return -ENODEV; + return phy_ethtool_sset(priv->phydev, cmd); + } else { + + if (cmd->autoneg || + (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) || + cmd->port != PORT_MII) + return -EINVAL; + + priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0; + priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0; + + if (netif_running(dev)) + bcm_enet_adjust_link(dev); + return 0; + } +} + +static void bcm_enet_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + + /* rx/tx ring is actually only limited by memory */ + ering->rx_max_pending = 8192; + ering->tx_max_pending = 8192; + ering->rx_pending = priv->rx_ring_size; + ering->tx_pending = priv->tx_ring_size; +} + +static int bcm_enet_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bcm_enet_priv *priv; + int was_running; + + priv = netdev_priv(dev); + + was_running = 0; + if (netif_running(dev)) { + bcm_enet_stop(dev); + was_running = 1; + } + + priv->rx_ring_size = ering->rx_pending; + priv->tx_ring_size = ering->tx_pending; + + if (was_running) { + int err; + + err = bcm_enet_open(dev); + if (err) + dev_close(dev); + else + bcm_enet_set_multicast_list(dev); + } + return 0; +} + +static void bcm_enet_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + ecmd->autoneg = priv->pause_auto; + ecmd->rx_pause = priv->pause_rx; + ecmd->tx_pause = priv->pause_tx; +} + +static int bcm_enet_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + + if (priv->has_phy) { + if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) { + /* asymetric pause mode not supported, + * actually possible but integrated PHY has RO + * asym_pause bit */ + return -EINVAL; + } + } else { + /* no pause autoneg on direct mii connection */ + if (ecmd->autoneg) + return -EINVAL; + } + + priv->pause_auto = ecmd->autoneg; + priv->pause_rx = ecmd->rx_pause; + priv->pause_tx = ecmd->tx_pause; + + return 0; +} + +static const struct ethtool_ops bcm_enet_ethtool_ops = { + .get_strings = bcm_enet_get_strings, + .get_sset_count = bcm_enet_get_sset_count, + .get_ethtool_stats = bcm_enet_get_ethtool_stats, + .nway_reset = bcm_enet_nway_reset, + .get_settings = bcm_enet_get_settings, + .set_settings = bcm_enet_set_settings, + .get_drvinfo = bcm_enet_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ringparam = bcm_enet_get_ringparam, + .set_ringparam = bcm_enet_set_ringparam, + .get_pauseparam = bcm_enet_get_pauseparam, + .set_pauseparam = bcm_enet_set_pauseparam, +}; + +static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + if (priv->has_phy) { + if (!priv->phydev) + return -ENODEV; + return phy_mii_ioctl(priv->phydev, rq, cmd); + } else { + struct mii_if_info mii; + + mii.dev = dev; + mii.mdio_read = bcm_enet_mdio_read_mii; + mii.mdio_write = bcm_enet_mdio_write_mii; + mii.phy_id = 0; + mii.phy_id_mask = 0x3f; + mii.reg_num_mask = 0x1f; + return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); + } +} + +/* + * calculate actual hardware mtu + */ +static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu) +{ + int actual_mtu; + + actual_mtu = mtu; + + /* add ethernet header + vlan tag size */ + actual_mtu += VLAN_ETH_HLEN; + + if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU) + return -EINVAL; + + /* + * setup maximum size before we get overflow mark in + * descriptor, note that this will not prevent reception of + * big frames, they will be split into multiple buffers + * anyway + */ + priv->hw_mtu = actual_mtu; + + /* + * align rx buffer size to dma burst len, account FCS since + * it's appended + */ + priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN, + priv->dma_maxburst * 4); + return 0; +} + +/* + * adjust mtu, can't be called while device is running + */ +static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu) +{ + int ret; + + if (netif_running(dev)) + return -EBUSY; + + ret = compute_hw_mtu(netdev_priv(dev), new_mtu); + if (ret) + return ret; + dev->mtu = new_mtu; + return 0; +} + +/* + * preinit hardware to allow mii operation while device is down + */ +static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv) +{ + u32 val; + int limit; + + /* make sure mac is disabled */ + bcm_enet_disable_mac(priv); + + /* soft reset mac */ + val = ENET_CTL_SRESET_MASK; + enet_writel(priv, val, ENET_CTL_REG); + wmb(); + + limit = 1000; + do { + val = enet_readl(priv, ENET_CTL_REG); + if (!(val & ENET_CTL_SRESET_MASK)) + break; + udelay(1); + } while (limit--); + + /* select correct mii interface */ + val = enet_readl(priv, ENET_CTL_REG); + if (priv->use_external_mii) + val |= ENET_CTL_EPHYSEL_MASK; + else + val &= ~ENET_CTL_EPHYSEL_MASK; + enet_writel(priv, val, ENET_CTL_REG); + + /* turn on mdc clock */ + enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) | + ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG); + + /* set mib counters to self-clear when read */ + val = enet_readl(priv, ENET_MIBCTL_REG); + val |= ENET_MIBCTL_RDCLEAR_MASK; + enet_writel(priv, val, ENET_MIBCTL_REG); +} + +static const struct net_device_ops bcm_enet_ops = { + .ndo_open = bcm_enet_open, + .ndo_stop = bcm_enet_stop, + .ndo_start_xmit = bcm_enet_start_xmit, + .ndo_set_mac_address = bcm_enet_set_mac_address, + .ndo_set_rx_mode = bcm_enet_set_multicast_list, + .ndo_do_ioctl = bcm_enet_ioctl, + .ndo_change_mtu = bcm_enet_change_mtu, +}; + +/* + * allocate netdevice, request register memory and register device. + */ +static int bcm_enet_probe(struct platform_device *pdev) +{ + struct bcm_enet_priv *priv; + struct net_device *dev; + struct bcm63xx_enet_platform_data *pd; + struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx; + struct mii_bus *bus; + const char *clk_name; + int i, ret; + + /* stop if shared driver failed, assume driver->probe will be + * called in the same order we register devices (correct ?) */ + if (!bcm_enet_shared_base[0]) + return -ENODEV; + + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); + res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2); + if (!res_irq || !res_irq_rx || !res_irq_tx) + return -ENODEV; + + ret = 0; + dev = alloc_etherdev(sizeof(*priv)); + if (!dev) + return -ENOMEM; + priv = netdev_priv(dev); + + priv->enet_is_sw = false; + priv->dma_maxburst = BCMENET_DMA_MAXBURST; + + ret = compute_hw_mtu(priv, dev->mtu); + if (ret) + goto out; + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, res_mem); + if (IS_ERR(priv->base)) { + ret = PTR_ERR(priv->base); + goto out; + } + + dev->irq = priv->irq = res_irq->start; + priv->irq_rx = res_irq_rx->start; + priv->irq_tx = res_irq_tx->start; + priv->mac_id = pdev->id; + + /* get rx & tx dma channel id for this mac */ + if (priv->mac_id == 0) { + priv->rx_chan = 0; + priv->tx_chan = 1; + clk_name = "enet0"; + } else { + priv->rx_chan = 2; + priv->tx_chan = 3; + clk_name = "enet1"; + } + + priv->mac_clk = clk_get(&pdev->dev, clk_name); + if (IS_ERR(priv->mac_clk)) { + ret = PTR_ERR(priv->mac_clk); + goto out; + } + clk_prepare_enable(priv->mac_clk); + + /* initialize default and fetch platform data */ + priv->rx_ring_size = BCMENET_DEF_RX_DESC; + priv->tx_ring_size = BCMENET_DEF_TX_DESC; + + pd = dev_get_platdata(&pdev->dev); + if (pd) { + memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); + priv->has_phy = pd->has_phy; + priv->phy_id = pd->phy_id; + priv->has_phy_interrupt = pd->has_phy_interrupt; + priv->phy_interrupt = pd->phy_interrupt; + priv->use_external_mii = !pd->use_internal_phy; + priv->pause_auto = pd->pause_auto; + priv->pause_rx = pd->pause_rx; + priv->pause_tx = pd->pause_tx; + priv->force_duplex_full = pd->force_duplex_full; + priv->force_speed_100 = pd->force_speed_100; + priv->dma_chan_en_mask = pd->dma_chan_en_mask; + priv->dma_chan_int_mask = pd->dma_chan_int_mask; + priv->dma_chan_width = pd->dma_chan_width; + priv->dma_has_sram = pd->dma_has_sram; + priv->dma_desc_shift = pd->dma_desc_shift; + } + + if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) { + /* using internal PHY, enable clock */ + priv->phy_clk = clk_get(&pdev->dev, "ephy"); + if (IS_ERR(priv->phy_clk)) { + ret = PTR_ERR(priv->phy_clk); + priv->phy_clk = NULL; + goto out_put_clk_mac; + } + clk_prepare_enable(priv->phy_clk); + } + + /* do minimal hardware init to be able to probe mii bus */ + bcm_enet_hw_preinit(priv); + + /* MII bus registration */ + if (priv->has_phy) { + + priv->mii_bus = mdiobus_alloc(); + if (!priv->mii_bus) { + ret = -ENOMEM; + goto out_uninit_hw; + } + + bus = priv->mii_bus; + bus->name = "bcm63xx_enet MII bus"; + bus->parent = &pdev->dev; + bus->priv = priv; + bus->read = bcm_enet_mdio_read_phylib; + bus->write = bcm_enet_mdio_write_phylib; + sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id); + + /* only probe bus where we think the PHY is, because + * the mdio read operation return 0 instead of 0xffff + * if a slave is not present on hw */ + bus->phy_mask = ~(1 << priv->phy_id); + + bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR, + GFP_KERNEL); + if (!bus->irq) { + ret = -ENOMEM; + goto out_free_mdio; + } + + if (priv->has_phy_interrupt) + bus->irq[priv->phy_id] = priv->phy_interrupt; + else + bus->irq[priv->phy_id] = PHY_POLL; + + ret = mdiobus_register(bus); + if (ret) { + dev_err(&pdev->dev, "unable to register mdio bus\n"); + goto out_free_mdio; + } + } else { + + /* run platform code to initialize PHY device */ + if (pd->mii_config && + pd->mii_config(dev, 1, bcm_enet_mdio_read_mii, + bcm_enet_mdio_write_mii)) { + dev_err(&pdev->dev, "unable to configure mdio bus\n"); + goto out_uninit_hw; + } + } + + spin_lock_init(&priv->rx_lock); + + /* init rx timeout (used for oom) */ + init_timer(&priv->rx_timeout); + priv->rx_timeout.function = bcm_enet_refill_rx_timer; + priv->rx_timeout.data = (unsigned long)dev; + + /* init the mib update lock&work */ + mutex_init(&priv->mib_update_lock); + INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer); + + /* zero mib counters */ + for (i = 0; i < ENET_MIB_REG_COUNT; i++) + enet_writel(priv, 0, ENET_MIB_REG(i)); + + /* register netdevice */ + dev->netdev_ops = &bcm_enet_ops; + netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); + + dev->ethtool_ops = &bcm_enet_ethtool_ops; + SET_NETDEV_DEV(dev, &pdev->dev); + + ret = register_netdev(dev); + if (ret) + goto out_unregister_mdio; + + netif_carrier_off(dev); + platform_set_drvdata(pdev, dev); + priv->pdev = pdev; + priv->net_dev = dev; + + return 0; + +out_unregister_mdio: + if (priv->mii_bus) + mdiobus_unregister(priv->mii_bus); + +out_free_mdio: + if (priv->mii_bus) + mdiobus_free(priv->mii_bus); + +out_uninit_hw: + /* turn off mdc clock */ + enet_writel(priv, 0, ENET_MIISC_REG); + if (priv->phy_clk) { + clk_disable_unprepare(priv->phy_clk); + clk_put(priv->phy_clk); + } + +out_put_clk_mac: + clk_disable_unprepare(priv->mac_clk); + clk_put(priv->mac_clk); +out: + free_netdev(dev); + return ret; +} + + +/* + * exit func, stops hardware and unregisters netdevice + */ +static int bcm_enet_remove(struct platform_device *pdev) +{ + struct bcm_enet_priv *priv; + struct net_device *dev; + + /* stop netdevice */ + dev = platform_get_drvdata(pdev); + priv = netdev_priv(dev); + unregister_netdev(dev); + + /* turn off mdc clock */ + enet_writel(priv, 0, ENET_MIISC_REG); + + if (priv->has_phy) { + mdiobus_unregister(priv->mii_bus); + mdiobus_free(priv->mii_bus); + } else { + struct bcm63xx_enet_platform_data *pd; + + pd = dev_get_platdata(&pdev->dev); + if (pd && pd->mii_config) + pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, + bcm_enet_mdio_write_mii); + } + + /* disable hw block clocks */ + if (priv->phy_clk) { + clk_disable_unprepare(priv->phy_clk); + clk_put(priv->phy_clk); + } + clk_disable_unprepare(priv->mac_clk); + clk_put(priv->mac_clk); + + free_netdev(dev); + return 0; +} + +struct platform_driver bcm63xx_enet_driver = { + .probe = bcm_enet_probe, + .remove = bcm_enet_remove, + .driver = { + .name = "bcm63xx_enet", + .owner = THIS_MODULE, + }, +}; + +/* + * switch mii access callbacks + */ +static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv, + int ext, int phy_id, int location) +{ + u32 reg; + int ret; + + spin_lock_bh(&priv->enetsw_mdio_lock); + enetsw_writel(priv, 0, ENETSW_MDIOC_REG); + + reg = ENETSW_MDIOC_RD_MASK | + (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | + (location << ENETSW_MDIOC_REG_SHIFT); + + if (ext) + reg |= ENETSW_MDIOC_EXT_MASK; + + enetsw_writel(priv, reg, ENETSW_MDIOC_REG); + udelay(50); + ret = enetsw_readw(priv, ENETSW_MDIOD_REG); + spin_unlock_bh(&priv->enetsw_mdio_lock); + return ret; +} + +static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv, + int ext, int phy_id, int location, + uint16_t data) +{ + u32 reg; + + spin_lock_bh(&priv->enetsw_mdio_lock); + enetsw_writel(priv, 0, ENETSW_MDIOC_REG); + + reg = ENETSW_MDIOC_WR_MASK | + (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | + (location << ENETSW_MDIOC_REG_SHIFT); + + if (ext) + reg |= ENETSW_MDIOC_EXT_MASK; + + reg |= data; + + enetsw_writel(priv, reg, ENETSW_MDIOC_REG); + udelay(50); + spin_unlock_bh(&priv->enetsw_mdio_lock); +} + +static inline int bcm_enet_port_is_rgmii(int portid) +{ + return portid >= ENETSW_RGMII_PORT0; +} + +/* + * enet sw PHY polling + */ +static void swphy_poll_timer(unsigned long data) +{ + struct bcm_enet_priv *priv = (struct bcm_enet_priv *)data; + unsigned int i; + + for (i = 0; i < priv->num_ports; i++) { + struct bcm63xx_enetsw_port *port; + int val, j, up, advertise, lpa, lpa2, speed, duplex, media; + int external_phy = bcm_enet_port_is_rgmii(i); + u8 override; + + port = &priv->used_ports[i]; + if (!port->used) + continue; + + if (port->bypass_link) + continue; + + /* dummy read to clear */ + for (j = 0; j < 2; j++) + val = bcmenet_sw_mdio_read(priv, external_phy, + port->phy_id, MII_BMSR); + + if (val == 0xffff) + continue; + + up = (val & BMSR_LSTATUS) ? 1 : 0; + if (!(up ^ priv->sw_port_link[i])) + continue; + + priv->sw_port_link[i] = up; + + /* link changed */ + if (!up) { + dev_info(&priv->pdev->dev, "link DOWN on %s\n", + port->name); + enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, + ENETSW_PORTOV_REG(i)); + enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | + ENETSW_PTCTRL_TXDIS_MASK, + ENETSW_PTCTRL_REG(i)); + continue; + } + + advertise = bcmenet_sw_mdio_read(priv, external_phy, + port->phy_id, MII_ADVERTISE); + + lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, + MII_LPA); + + lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, + MII_STAT1000); + + /* figure out media and duplex from advertise and LPA values */ + media = mii_nway_result(lpa & advertise); + duplex = (media & ADVERTISE_FULL) ? 1 : 0; + if (lpa2 & LPA_1000FULL) + duplex = 1; + + if (lpa2 & (LPA_1000FULL | LPA_1000HALF)) + speed = 1000; + else { + if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) + speed = 100; + else + speed = 10; + } + + dev_info(&priv->pdev->dev, + "link UP on %s, %dMbps, %s-duplex\n", + port->name, speed, duplex ? "full" : "half"); + + override = ENETSW_PORTOV_ENABLE_MASK | + ENETSW_PORTOV_LINKUP_MASK; + + if (speed == 1000) + override |= ENETSW_IMPOV_1000_MASK; + else if (speed == 100) + override |= ENETSW_IMPOV_100_MASK; + if (duplex) + override |= ENETSW_IMPOV_FDX_MASK; + + enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i)); + enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i)); + } + + priv->swphy_poll.expires = jiffies + HZ; + add_timer(&priv->swphy_poll); +} + +/* + * open callback, allocate dma rings & buffers and start rx operation + */ +static int bcm_enetsw_open(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct device *kdev; + int i, ret; + unsigned int size; + void *p; + u32 val; + + priv = netdev_priv(dev); + kdev = &priv->pdev->dev; + + /* mask all interrupts and request them */ + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); + + ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, + 0, dev->name, dev); + if (ret) + goto out_freeirq; + + if (priv->irq_tx != -1) { + ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, + 0, dev->name, dev); + if (ret) + goto out_freeirq_rx; + } + + /* allocate rx dma ring */ + size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); + p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); + if (!p) { + dev_err(kdev, "cannot allocate rx ring %u\n", size); + ret = -ENOMEM; + goto out_freeirq_tx; + } + + memset(p, 0, size); + priv->rx_desc_alloc_size = size; + priv->rx_desc_cpu = p; + + /* allocate tx dma ring */ + size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); + p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); + if (!p) { + dev_err(kdev, "cannot allocate tx ring\n"); + ret = -ENOMEM; + goto out_free_rx_ring; + } + + memset(p, 0, size); + priv->tx_desc_alloc_size = size; + priv->tx_desc_cpu = p; + + priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size, + GFP_KERNEL); + if (!priv->tx_skb) { + dev_err(kdev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out_free_tx_ring; + } + + priv->tx_desc_count = priv->tx_ring_size; + priv->tx_dirty_desc = 0; + priv->tx_curr_desc = 0; + spin_lock_init(&priv->tx_lock); + + /* init & fill rx ring with skbs */ + priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size, + GFP_KERNEL); + if (!priv->rx_skb) { + dev_err(kdev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out_free_tx_skb; + } + + priv->rx_desc_count = 0; + priv->rx_dirty_desc = 0; + priv->rx_curr_desc = 0; + + /* disable all ports */ + for (i = 0; i < priv->num_ports; i++) { + enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, + ENETSW_PORTOV_REG(i)); + enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | + ENETSW_PTCTRL_TXDIS_MASK, + ENETSW_PTCTRL_REG(i)); + + priv->sw_port_link[i] = 0; + } + + /* reset mib */ + val = enetsw_readb(priv, ENETSW_GMCR_REG); + val |= ENETSW_GMCR_RST_MIB_MASK; + enetsw_writeb(priv, val, ENETSW_GMCR_REG); + mdelay(1); + val &= ~ENETSW_GMCR_RST_MIB_MASK; + enetsw_writeb(priv, val, ENETSW_GMCR_REG); + mdelay(1); + + /* force CPU port state */ + val = enetsw_readb(priv, ENETSW_IMPOV_REG); + val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK; + enetsw_writeb(priv, val, ENETSW_IMPOV_REG); + + /* enable switch forward engine */ + val = enetsw_readb(priv, ENETSW_SWMODE_REG); + val |= ENETSW_SWMODE_FWD_EN_MASK; + enetsw_writeb(priv, val, ENETSW_SWMODE_REG); + + /* enable jumbo on all ports */ + enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG); + enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG); + + /* initialize flow control buffer allocation */ + enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, + ENETDMA_BUFALLOC_REG(priv->rx_chan)); + + if (bcm_enet_refill_rx(dev)) { + dev_err(kdev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out; + } + + /* write rx & tx ring addresses */ + enet_dmas_writel(priv, priv->rx_desc_dma, + ENETDMAS_RSTART_REG, priv->rx_chan); + enet_dmas_writel(priv, priv->tx_desc_dma, + ENETDMAS_RSTART_REG, priv->tx_chan); + + /* clear remaining state ram for rx & tx channel */ + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); + + /* set dma maximum burst len */ + enet_dmac_writel(priv, priv->dma_maxburst, + ENETDMAC_MAXBURST, priv->rx_chan); + enet_dmac_writel(priv, priv->dma_maxburst, + ENETDMAC_MAXBURST, priv->tx_chan); + + /* set flow control low/high threshold to 1/3 / 2/3 */ + val = priv->rx_ring_size / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); + val = (priv->rx_ring_size * 2) / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); + + /* all set, enable mac and interrupts, start dma engine and + * kick rx dma channel + */ + wmb(); + enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); + enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, + ENETDMAC_CHANCFG, priv->rx_chan); + + /* watch "packet transferred" interrupt in rx and tx */ + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IR, priv->rx_chan); + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IR, priv->tx_chan); + + /* make sure we enable napi before rx interrupt */ + napi_enable(&priv->napi); + + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IRMASK, priv->tx_chan); + + netif_carrier_on(dev); + netif_start_queue(dev); + + /* apply override config for bypass_link ports here. */ + for (i = 0; i < priv->num_ports; i++) { + struct bcm63xx_enetsw_port *port; + u8 override; + port = &priv->used_ports[i]; + if (!port->used) + continue; + + if (!port->bypass_link) + continue; + + override = ENETSW_PORTOV_ENABLE_MASK | + ENETSW_PORTOV_LINKUP_MASK; + + switch (port->force_speed) { + case 1000: + override |= ENETSW_IMPOV_1000_MASK; + break; + case 100: + override |= ENETSW_IMPOV_100_MASK; + break; + case 10: + break; + default: + pr_warn("invalid forced speed on port %s: assume 10\n", + port->name); + break; + } + + if (port->force_duplex_full) + override |= ENETSW_IMPOV_FDX_MASK; + + + enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i)); + enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i)); + } + + /* start phy polling timer */ + init_timer(&priv->swphy_poll); + priv->swphy_poll.function = swphy_poll_timer; + priv->swphy_poll.data = (unsigned long)priv; + priv->swphy_poll.expires = jiffies; + add_timer(&priv->swphy_poll); + return 0; + +out: + for (i = 0; i < priv->rx_ring_size; i++) { + struct bcm_enet_desc *desc; + + if (!priv->rx_skb[i]) + continue; + + desc = &priv->rx_desc_cpu[i]; + dma_unmap_single(kdev, desc->address, priv->rx_skb_size, + DMA_FROM_DEVICE); + kfree_skb(priv->rx_skb[i]); + } + kfree(priv->rx_skb); + +out_free_tx_skb: + kfree(priv->tx_skb); + +out_free_tx_ring: + dma_free_coherent(kdev, priv->tx_desc_alloc_size, + priv->tx_desc_cpu, priv->tx_desc_dma); + +out_free_rx_ring: + dma_free_coherent(kdev, priv->rx_desc_alloc_size, + priv->rx_desc_cpu, priv->rx_desc_dma); + +out_freeirq_tx: + if (priv->irq_tx != -1) + free_irq(priv->irq_tx, dev); + +out_freeirq_rx: + free_irq(priv->irq_rx, dev); + +out_freeirq: + return ret; +} + +/* stop callback */ +static int bcm_enetsw_stop(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct device *kdev; + int i; + + priv = netdev_priv(dev); + kdev = &priv->pdev->dev; + + del_timer_sync(&priv->swphy_poll); + netif_stop_queue(dev); + napi_disable(&priv->napi); + del_timer_sync(&priv->rx_timeout); + + /* mask all interrupts */ + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); + + /* disable dma & mac */ + bcm_enet_disable_dma(priv, priv->tx_chan); + bcm_enet_disable_dma(priv, priv->rx_chan); + + /* force reclaim of all tx buffers */ + bcm_enet_tx_reclaim(dev, 1); + + /* free the rx skb ring */ + for (i = 0; i < priv->rx_ring_size; i++) { + struct bcm_enet_desc *desc; + + if (!priv->rx_skb[i]) + continue; + + desc = &priv->rx_desc_cpu[i]; + dma_unmap_single(kdev, desc->address, priv->rx_skb_size, + DMA_FROM_DEVICE); + kfree_skb(priv->rx_skb[i]); + } + + /* free remaining allocated memory */ + kfree(priv->rx_skb); + kfree(priv->tx_skb); + dma_free_coherent(kdev, priv->rx_desc_alloc_size, + priv->rx_desc_cpu, priv->rx_desc_dma); + dma_free_coherent(kdev, priv->tx_desc_alloc_size, + priv->tx_desc_cpu, priv->tx_desc_dma); + if (priv->irq_tx != -1) + free_irq(priv->irq_tx, dev); + free_irq(priv->irq_rx, dev); + + return 0; +} + +/* try to sort out phy external status by walking the used_port field + * in the bcm_enet_priv structure. in case the phy address is not + * assigned to any physical port on the switch, assume it is external + * (and yell at the user). + */ +static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id) +{ + int i; + + for (i = 0; i < priv->num_ports; ++i) { + if (!priv->used_ports[i].used) + continue; + if (priv->used_ports[i].phy_id == phy_id) + return bcm_enet_port_is_rgmii(i); + } + + printk_once(KERN_WARNING "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n", + phy_id); + return 1; +} + +/* can't use bcmenet_sw_mdio_read directly as we need to sort out + * external/internal status of the given phy_id first. + */ +static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id, + int location) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + return bcmenet_sw_mdio_read(priv, + bcm_enetsw_phy_is_external(priv, phy_id), + phy_id, location); +} + +/* can't use bcmenet_sw_mdio_write directly as we need to sort out + * external/internal status of the given phy_id first. + */ +static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id, + int location, + int val) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id), + phy_id, location, val); +} + +static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct mii_if_info mii; + + mii.dev = dev; + mii.mdio_read = bcm_enetsw_mii_mdio_read; + mii.mdio_write = bcm_enetsw_mii_mdio_write; + mii.phy_id = 0; + mii.phy_id_mask = 0x3f; + mii.reg_num_mask = 0x1f; + return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); + +} + +static const struct net_device_ops bcm_enetsw_ops = { + .ndo_open = bcm_enetsw_open, + .ndo_stop = bcm_enetsw_stop, + .ndo_start_xmit = bcm_enet_start_xmit, + .ndo_change_mtu = bcm_enet_change_mtu, + .ndo_do_ioctl = bcm_enetsw_ioctl, +}; + + +static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = { + { "rx_packets", DEV_STAT(rx_packets), -1 }, + { "tx_packets", DEV_STAT(tx_packets), -1 }, + { "rx_bytes", DEV_STAT(rx_bytes), -1 }, + { "tx_bytes", DEV_STAT(tx_bytes), -1 }, + { "rx_errors", DEV_STAT(rx_errors), -1 }, + { "tx_errors", DEV_STAT(tx_errors), -1 }, + { "rx_dropped", DEV_STAT(rx_dropped), -1 }, + { "tx_dropped", DEV_STAT(tx_dropped), -1 }, + + { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT }, + { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST }, + { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST }, + { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT }, + { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 }, + { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 }, + { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 }, + { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 }, + { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023}, + { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max), + ETHSW_MIB_RX_1024_1522 }, + { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047), + ETHSW_MIB_RX_1523_2047 }, + { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095), + ETHSW_MIB_RX_2048_4095 }, + { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191), + ETHSW_MIB_RX_4096_8191 }, + { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728), + ETHSW_MIB_RX_8192_9728 }, + { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR }, + { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC }, + { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP }, + { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND }, + { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE }, + + { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT }, + { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST }, + { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT }, + { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT }, + { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE }, + { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS }, + +}; + +#define BCM_ENETSW_STATS_LEN \ + (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats)) + +static void bcm_enetsw_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + bcm_enetsw_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static int bcm_enetsw_get_sset_count(struct net_device *netdev, + int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return BCM_ENETSW_STATS_LEN; + default: + return -EINVAL; + } +} + +static void bcm_enetsw_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + strncpy(drvinfo->driver, bcm_enet_driver_name, 32); + strncpy(drvinfo->version, bcm_enet_driver_version, 32); + strncpy(drvinfo->fw_version, "N/A", 32); + strncpy(drvinfo->bus_info, "bcm63xx", 32); + drvinfo->n_stats = BCM_ENETSW_STATS_LEN; +} + +static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct bcm_enet_priv *priv; + int i; + + priv = netdev_priv(netdev); + + for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { + const struct bcm_enet_stats *s; + u32 lo, hi; + char *p; + int reg; + + s = &bcm_enetsw_gstrings_stats[i]; + + reg = s->mib_reg; + if (reg == -1) + continue; + + lo = enetsw_readl(priv, ENETSW_MIB_REG(reg)); + p = (char *)priv + s->stat_offset; + + if (s->sizeof_stat == sizeof(u64)) { + hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1)); + *(u64 *)p = ((u64)hi << 32 | lo); + } else { + *(u32 *)p = lo; + } + } + + for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { + const struct bcm_enet_stats *s; + char *p; + + s = &bcm_enetsw_gstrings_stats[i]; + + if (s->mib_reg == -1) + p = (char *)&netdev->stats + s->stat_offset; + else + p = (char *)priv + s->stat_offset; + + data[i] = (s->sizeof_stat == sizeof(u64)) ? + *(u64 *)p : *(u32 *)p; + } +} + +static void bcm_enetsw_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + + /* rx/tx ring is actually only limited by memory */ + ering->rx_max_pending = 8192; + ering->tx_max_pending = 8192; + ering->rx_mini_max_pending = 0; + ering->rx_jumbo_max_pending = 0; + ering->rx_pending = priv->rx_ring_size; + ering->tx_pending = priv->tx_ring_size; +} + +static int bcm_enetsw_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bcm_enet_priv *priv; + int was_running; + + priv = netdev_priv(dev); + + was_running = 0; + if (netif_running(dev)) { + bcm_enetsw_stop(dev); + was_running = 1; + } + + priv->rx_ring_size = ering->rx_pending; + priv->tx_ring_size = ering->tx_pending; + + if (was_running) { + int err; + + err = bcm_enetsw_open(dev); + if (err) + dev_close(dev); + } + return 0; +} + +static struct ethtool_ops bcm_enetsw_ethtool_ops = { + .get_strings = bcm_enetsw_get_strings, + .get_sset_count = bcm_enetsw_get_sset_count, + .get_ethtool_stats = bcm_enetsw_get_ethtool_stats, + .get_drvinfo = bcm_enetsw_get_drvinfo, + .get_ringparam = bcm_enetsw_get_ringparam, + .set_ringparam = bcm_enetsw_set_ringparam, +}; + +/* allocate netdevice, request register memory and register device. */ +static int bcm_enetsw_probe(struct platform_device *pdev) +{ + struct bcm_enet_priv *priv; + struct net_device *dev; + struct bcm63xx_enetsw_platform_data *pd; + struct resource *res_mem; + int ret, irq_rx, irq_tx; + + /* stop if shared driver failed, assume driver->probe will be + * called in the same order we register devices (correct ?) + */ + if (!bcm_enet_shared_base[0]) + return -ENODEV; + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq_rx = platform_get_irq(pdev, 0); + irq_tx = platform_get_irq(pdev, 1); + if (!res_mem || irq_rx < 0) + return -ENODEV; + + ret = 0; + dev = alloc_etherdev(sizeof(*priv)); + if (!dev) + return -ENOMEM; + priv = netdev_priv(dev); + memset(priv, 0, sizeof(*priv)); + + /* initialize default and fetch platform data */ + priv->enet_is_sw = true; + priv->irq_rx = irq_rx; + priv->irq_tx = irq_tx; + priv->rx_ring_size = BCMENET_DEF_RX_DESC; + priv->tx_ring_size = BCMENET_DEF_TX_DESC; + priv->dma_maxburst = BCMENETSW_DMA_MAXBURST; + + pd = dev_get_platdata(&pdev->dev); + if (pd) { + memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); + memcpy(priv->used_ports, pd->used_ports, + sizeof(pd->used_ports)); + priv->num_ports = pd->num_ports; + priv->dma_has_sram = pd->dma_has_sram; + priv->dma_chan_en_mask = pd->dma_chan_en_mask; + priv->dma_chan_int_mask = pd->dma_chan_int_mask; + priv->dma_chan_width = pd->dma_chan_width; + } + + ret = compute_hw_mtu(priv, dev->mtu); + if (ret) + goto out; + + if (!request_mem_region(res_mem->start, resource_size(res_mem), + "bcm63xx_enetsw")) { + ret = -EBUSY; + goto out; + } + + priv->base = ioremap(res_mem->start, resource_size(res_mem)); + if (priv->base == NULL) { + ret = -ENOMEM; + goto out_release_mem; + } + + priv->mac_clk = clk_get(&pdev->dev, "enetsw"); + if (IS_ERR(priv->mac_clk)) { + ret = PTR_ERR(priv->mac_clk); + goto out_unmap; + } + clk_enable(priv->mac_clk); + + priv->rx_chan = 0; + priv->tx_chan = 1; + spin_lock_init(&priv->rx_lock); + + /* init rx timeout (used for oom) */ + init_timer(&priv->rx_timeout); + priv->rx_timeout.function = bcm_enet_refill_rx_timer; + priv->rx_timeout.data = (unsigned long)dev; + + /* register netdevice */ + dev->netdev_ops = &bcm_enetsw_ops; + netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); + dev->ethtool_ops = &bcm_enetsw_ethtool_ops; + SET_NETDEV_DEV(dev, &pdev->dev); + + spin_lock_init(&priv->enetsw_mdio_lock); + + ret = register_netdev(dev); + if (ret) + goto out_put_clk; + + netif_carrier_off(dev); + platform_set_drvdata(pdev, dev); + priv->pdev = pdev; + priv->net_dev = dev; + + return 0; + +out_put_clk: + clk_put(priv->mac_clk); + +out_unmap: + iounmap(priv->base); + +out_release_mem: + release_mem_region(res_mem->start, resource_size(res_mem)); +out: + free_netdev(dev); + return ret; +} + + +/* exit func, stops hardware and unregisters netdevice */ +static int bcm_enetsw_remove(struct platform_device *pdev) +{ + struct bcm_enet_priv *priv; + struct net_device *dev; + struct resource *res; + + /* stop netdevice */ + dev = platform_get_drvdata(pdev); + priv = netdev_priv(dev); + unregister_netdev(dev); + + /* release device resources */ + iounmap(priv->base); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + + free_netdev(dev); + return 0; +} + +struct platform_driver bcm63xx_enetsw_driver = { + .probe = bcm_enetsw_probe, + .remove = bcm_enetsw_remove, + .driver = { + .name = "bcm63xx_enetsw", + .owner = THIS_MODULE, + }, +}; + +/* reserve & remap memory space shared between all macs */ +static int bcm_enet_shared_probe(struct platform_device *pdev) +{ + struct resource *res; + void __iomem *p[3]; + unsigned int i; + + memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base)); + + for (i = 0; i < 3; i++) { + res = platform_get_resource(pdev, IORESOURCE_MEM, i); + p[i] = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(p[i])) + return PTR_ERR(p[i]); + } + + memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base)); + + return 0; +} + +static int bcm_enet_shared_remove(struct platform_device *pdev) +{ + return 0; +} + +/* this "shared" driver is needed because both macs share a single + * address space + */ +struct platform_driver bcm63xx_enet_shared_driver = { + .probe = bcm_enet_shared_probe, + .remove = bcm_enet_shared_remove, + .driver = { + .name = "bcm63xx_enet_shared", + .owner = THIS_MODULE, + }, +}; + +/* entry point */ +static int __init bcm_enet_init(void) +{ + int ret; + + ret = platform_driver_register(&bcm63xx_enet_shared_driver); + if (ret) + return ret; + + ret = platform_driver_register(&bcm63xx_enet_driver); + if (ret) + platform_driver_unregister(&bcm63xx_enet_shared_driver); + + ret = platform_driver_register(&bcm63xx_enetsw_driver); + if (ret) { + platform_driver_unregister(&bcm63xx_enet_driver); + platform_driver_unregister(&bcm63xx_enet_shared_driver); + } + + return ret; +} + +static void __exit bcm_enet_exit(void) +{ + platform_driver_unregister(&bcm63xx_enet_driver); + platform_driver_unregister(&bcm63xx_enetsw_driver); + platform_driver_unregister(&bcm63xx_enet_shared_driver); +} + + +module_init(bcm_enet_init); +module_exit(bcm_enet_exit); + +MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver"); +MODULE_AUTHOR("Maxime Bizon "); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h new file mode 100644 index 000000000..f55af4310 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h @@ -0,0 +1,360 @@ +#ifndef BCM63XX_ENET_H_ +#define BCM63XX_ENET_H_ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* default number of descriptor */ +#define BCMENET_DEF_RX_DESC 64 +#define BCMENET_DEF_TX_DESC 32 + +/* maximum burst len for dma (4 bytes unit) */ +#define BCMENET_DMA_MAXBURST 16 +#define BCMENETSW_DMA_MAXBURST 8 + +/* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value + * must be low enough so that a DMA transfer of above burst length can + * not overflow the fifo */ +#define BCMENET_TX_FIFO_TRESH 32 + +/* + * hardware maximum rx/tx packet size including FCS, max mtu is + * actually 2047, but if we set max rx size register to 2047 we won't + * get overflow information if packet size is 2048 or above + */ +#define BCMENET_MAX_MTU 2046 + +/* + * MIB Counters register definitions +*/ +#define ETH_MIB_TX_GD_OCTETS 0 +#define ETH_MIB_TX_GD_PKTS 1 +#define ETH_MIB_TX_ALL_OCTETS 2 +#define ETH_MIB_TX_ALL_PKTS 3 +#define ETH_MIB_TX_BRDCAST 4 +#define ETH_MIB_TX_MULT 5 +#define ETH_MIB_TX_64 6 +#define ETH_MIB_TX_65_127 7 +#define ETH_MIB_TX_128_255 8 +#define ETH_MIB_TX_256_511 9 +#define ETH_MIB_TX_512_1023 10 +#define ETH_MIB_TX_1024_MAX 11 +#define ETH_MIB_TX_JAB 12 +#define ETH_MIB_TX_OVR 13 +#define ETH_MIB_TX_FRAG 14 +#define ETH_MIB_TX_UNDERRUN 15 +#define ETH_MIB_TX_COL 16 +#define ETH_MIB_TX_1_COL 17 +#define ETH_MIB_TX_M_COL 18 +#define ETH_MIB_TX_EX_COL 19 +#define ETH_MIB_TX_LATE 20 +#define ETH_MIB_TX_DEF 21 +#define ETH_MIB_TX_CRS 22 +#define ETH_MIB_TX_PAUSE 23 + +#define ETH_MIB_RX_GD_OCTETS 32 +#define ETH_MIB_RX_GD_PKTS 33 +#define ETH_MIB_RX_ALL_OCTETS 34 +#define ETH_MIB_RX_ALL_PKTS 35 +#define ETH_MIB_RX_BRDCAST 36 +#define ETH_MIB_RX_MULT 37 +#define ETH_MIB_RX_64 38 +#define ETH_MIB_RX_65_127 39 +#define ETH_MIB_RX_128_255 40 +#define ETH_MIB_RX_256_511 41 +#define ETH_MIB_RX_512_1023 42 +#define ETH_MIB_RX_1024_MAX 43 +#define ETH_MIB_RX_JAB 44 +#define ETH_MIB_RX_OVR 45 +#define ETH_MIB_RX_FRAG 46 +#define ETH_MIB_RX_DROP 47 +#define ETH_MIB_RX_CRC_ALIGN 48 +#define ETH_MIB_RX_UND 49 +#define ETH_MIB_RX_CRC 50 +#define ETH_MIB_RX_ALIGN 51 +#define ETH_MIB_RX_SYM 52 +#define ETH_MIB_RX_PAUSE 53 +#define ETH_MIB_RX_CNTRL 54 + + +/* + * SW MIB Counters register definitions +*/ +#define ETHSW_MIB_TX_ALL_OCT 0 +#define ETHSW_MIB_TX_DROP_PKTS 2 +#define ETHSW_MIB_TX_QOS_PKTS 3 +#define ETHSW_MIB_TX_BRDCAST 4 +#define ETHSW_MIB_TX_MULT 5 +#define ETHSW_MIB_TX_UNI 6 +#define ETHSW_MIB_TX_COL 7 +#define ETHSW_MIB_TX_1_COL 8 +#define ETHSW_MIB_TX_M_COL 9 +#define ETHSW_MIB_TX_DEF 10 +#define ETHSW_MIB_TX_LATE 11 +#define ETHSW_MIB_TX_EX_COL 12 +#define ETHSW_MIB_TX_PAUSE 14 +#define ETHSW_MIB_TX_QOS_OCT 15 + +#define ETHSW_MIB_RX_ALL_OCT 17 +#define ETHSW_MIB_RX_UND 19 +#define ETHSW_MIB_RX_PAUSE 20 +#define ETHSW_MIB_RX_64 21 +#define ETHSW_MIB_RX_65_127 22 +#define ETHSW_MIB_RX_128_255 23 +#define ETHSW_MIB_RX_256_511 24 +#define ETHSW_MIB_RX_512_1023 25 +#define ETHSW_MIB_RX_1024_1522 26 +#define ETHSW_MIB_RX_OVR 27 +#define ETHSW_MIB_RX_JAB 28 +#define ETHSW_MIB_RX_ALIGN 29 +#define ETHSW_MIB_RX_CRC 30 +#define ETHSW_MIB_RX_GD_OCT 31 +#define ETHSW_MIB_RX_DROP 33 +#define ETHSW_MIB_RX_UNI 34 +#define ETHSW_MIB_RX_MULT 35 +#define ETHSW_MIB_RX_BRDCAST 36 +#define ETHSW_MIB_RX_SA_CHANGE 37 +#define ETHSW_MIB_RX_FRAG 38 +#define ETHSW_MIB_RX_OVR_DISC 39 +#define ETHSW_MIB_RX_SYM 40 +#define ETHSW_MIB_RX_QOS_PKTS 41 +#define ETHSW_MIB_RX_QOS_OCT 42 +#define ETHSW_MIB_RX_1523_2047 44 +#define ETHSW_MIB_RX_2048_4095 45 +#define ETHSW_MIB_RX_4096_8191 46 +#define ETHSW_MIB_RX_8192_9728 47 + + +struct bcm_enet_mib_counters { + u64 tx_gd_octets; + u32 tx_gd_pkts; + u32 tx_all_octets; + u32 tx_all_pkts; + u32 tx_unicast; + u32 tx_brdcast; + u32 tx_mult; + u32 tx_64; + u32 tx_65_127; + u32 tx_128_255; + u32 tx_256_511; + u32 tx_512_1023; + u32 tx_1024_max; + u32 tx_1523_2047; + u32 tx_2048_4095; + u32 tx_4096_8191; + u32 tx_8192_9728; + u32 tx_jab; + u32 tx_drop; + u32 tx_ovr; + u32 tx_frag; + u32 tx_underrun; + u32 tx_col; + u32 tx_1_col; + u32 tx_m_col; + u32 tx_ex_col; + u32 tx_late; + u32 tx_def; + u32 tx_crs; + u32 tx_pause; + u64 rx_gd_octets; + u32 rx_gd_pkts; + u32 rx_all_octets; + u32 rx_all_pkts; + u32 rx_brdcast; + u32 rx_unicast; + u32 rx_mult; + u32 rx_64; + u32 rx_65_127; + u32 rx_128_255; + u32 rx_256_511; + u32 rx_512_1023; + u32 rx_1024_max; + u32 rx_jab; + u32 rx_ovr; + u32 rx_frag; + u32 rx_drop; + u32 rx_crc_align; + u32 rx_und; + u32 rx_crc; + u32 rx_align; + u32 rx_sym; + u32 rx_pause; + u32 rx_cntrl; +}; + + +struct bcm_enet_priv { + + /* mac id (from platform device id) */ + int mac_id; + + /* base remapped address of device */ + void __iomem *base; + + /* mac irq, rx_dma irq, tx_dma irq */ + int irq; + int irq_rx; + int irq_tx; + + /* hw view of rx & tx dma ring */ + dma_addr_t rx_desc_dma; + dma_addr_t tx_desc_dma; + + /* allocated size (in bytes) for rx & tx dma ring */ + unsigned int rx_desc_alloc_size; + unsigned int tx_desc_alloc_size; + + + struct napi_struct napi; + + /* dma channel id for rx */ + int rx_chan; + + /* number of dma desc in rx ring */ + int rx_ring_size; + + /* cpu view of rx dma ring */ + struct bcm_enet_desc *rx_desc_cpu; + + /* current number of armed descriptor given to hardware for rx */ + int rx_desc_count; + + /* next rx descriptor to fetch from hardware */ + int rx_curr_desc; + + /* next dirty rx descriptor to refill */ + int rx_dirty_desc; + + /* size of allocated rx skbs */ + unsigned int rx_skb_size; + + /* list of skb given to hw for rx */ + struct sk_buff **rx_skb; + + /* used when rx skb allocation failed, so we defer rx queue + * refill */ + struct timer_list rx_timeout; + + /* lock rx_timeout against rx normal operation */ + spinlock_t rx_lock; + + + /* dma channel id for tx */ + int tx_chan; + + /* number of dma desc in tx ring */ + int tx_ring_size; + + /* maximum dma burst size */ + int dma_maxburst; + + /* cpu view of rx dma ring */ + struct bcm_enet_desc *tx_desc_cpu; + + /* number of available descriptor for tx */ + int tx_desc_count; + + /* next tx descriptor avaiable */ + int tx_curr_desc; + + /* next dirty tx descriptor to reclaim */ + int tx_dirty_desc; + + /* list of skb given to hw for tx */ + struct sk_buff **tx_skb; + + /* lock used by tx reclaim and xmit */ + spinlock_t tx_lock; + + + /* set if internal phy is ignored and external mii interface + * is selected */ + int use_external_mii; + + /* set if a phy is connected, phy address must be known, + * probing is not possible */ + int has_phy; + int phy_id; + + /* set if connected phy has an associated irq */ + int has_phy_interrupt; + int phy_interrupt; + + /* used when a phy is connected (phylib used) */ + struct mii_bus *mii_bus; + struct phy_device *phydev; + int old_link; + int old_duplex; + int old_pause; + + /* used when no phy is connected */ + int force_speed_100; + int force_duplex_full; + + /* pause parameters */ + int pause_auto; + int pause_rx; + int pause_tx; + + /* stats */ + struct bcm_enet_mib_counters mib; + + /* after mib interrupt, mib registers update is done in this + * work queue */ + struct work_struct mib_update_task; + + /* lock mib update between userspace request and workqueue */ + struct mutex mib_update_lock; + + /* mac clock */ + struct clk *mac_clk; + + /* phy clock if internal phy is used */ + struct clk *phy_clk; + + /* network device reference */ + struct net_device *net_dev; + + /* platform device reference */ + struct platform_device *pdev; + + /* maximum hardware transmit/receive size */ + unsigned int hw_mtu; + + bool enet_is_sw; + + /* port mapping for switch devices */ + int num_ports; + struct bcm63xx_enetsw_port used_ports[ENETSW_MAX_PORT]; + int sw_port_link[ENETSW_MAX_PORT]; + + /* used to poll switch port state */ + struct timer_list swphy_poll; + spinlock_t enetsw_mdio_lock; + + /* dma channel enable mask */ + u32 dma_chan_en_mask; + + /* dma channel interrupt mask */ + u32 dma_chan_int_mask; + + /* DMA engine has internal SRAM */ + bool dma_has_sram; + + /* dma channel width */ + unsigned int dma_chan_width; + + /* dma descriptor shift value */ + unsigned int dma_desc_shift; +}; + + +#endif /* ! BCM63XX_ENET_H_ */ diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c new file mode 100644 index 000000000..783543ad1 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -0,0 +1,2007 @@ +/* + * Broadcom BCM7xxx System Port Ethernet MAC driver + * + * Copyright (C) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bcmsysport.h" + +/* I/O accessors register helpers */ +#define BCM_SYSPORT_IO_MACRO(name, offset) \ +static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \ +{ \ + u32 reg = __raw_readl(priv->base + offset + off); \ + return reg; \ +} \ +static inline void name##_writel(struct bcm_sysport_priv *priv, \ + u32 val, u32 off) \ +{ \ + __raw_writel(val, priv->base + offset + off); \ +} \ + +BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET); +BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET); +BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET); +BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET); +BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET); +BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET); +BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET); +BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET); +BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET); +BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET); + +/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied + * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths. + */ +#define BCM_SYSPORT_INTR_L2(which) \ +static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \ + u32 mask) \ +{ \ + intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ + priv->irq##which##_mask &= ~(mask); \ +} \ +static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \ + u32 mask) \ +{ \ + intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \ + priv->irq##which##_mask |= (mask); \ +} \ + +BCM_SYSPORT_INTR_L2(0) +BCM_SYSPORT_INTR_L2(1) + +/* Register accesses to GISB/RBUS registers are expensive (few hundred + * nanoseconds), so keep the check for 64-bits explicit here to save + * one register write per-packet on 32-bits platforms. + */ +static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv, + void __iomem *d, + dma_addr_t addr) +{ +#ifdef CONFIG_PHYS_ADDR_T_64BIT + __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK, + d + DESC_ADDR_HI_STATUS_LEN); +#endif + __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO); +} + +static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, + struct dma_desc *desc, + unsigned int port) +{ + /* Ports are latched, so write upper address first */ + tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port)); + tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port)); +} + +/* Ethtool operations */ +static int bcm_sysport_set_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + return phy_ethtool_sset(priv->phydev, cmd); +} + +static int bcm_sysport_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + return phy_ethtool_gset(priv->phydev, cmd); +} + +static int bcm_sysport_set_rx_csum(struct net_device *dev, + netdev_features_t wanted) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + u32 reg; + + priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); + reg = rxchk_readl(priv, RXCHK_CONTROL); + if (priv->rx_chk_en) + reg |= RXCHK_EN; + else + reg &= ~RXCHK_EN; + + /* If UniMAC forwards CRC, we need to skip over it to get + * a valid CHK bit to be set in the per-packet status word + */ + if (priv->rx_chk_en && priv->crc_fwd) + reg |= RXCHK_SKIP_FCS; + else + reg &= ~RXCHK_SKIP_FCS; + + /* If Broadcom tags are enabled (e.g: using a switch), make + * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom + * tag after the Ethernet MAC Source Address. + */ + if (netdev_uses_dsa(dev)) + reg |= RXCHK_BRCM_TAG_EN; + else + reg &= ~RXCHK_BRCM_TAG_EN; + + rxchk_writel(priv, reg, RXCHK_CONTROL); + + return 0; +} + +static int bcm_sysport_set_tx_csum(struct net_device *dev, + netdev_features_t wanted) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + u32 reg; + + /* Hardware transmit checksum requires us to enable the Transmit status + * block prepended to the packet contents + */ + priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); + reg = tdma_readl(priv, TDMA_CONTROL); + if (priv->tsb_en) + reg |= TSB_EN; + else + reg &= ~TSB_EN; + tdma_writel(priv, reg, TDMA_CONTROL); + + return 0; +} + +static int bcm_sysport_set_features(struct net_device *dev, + netdev_features_t features) +{ + netdev_features_t changed = features ^ dev->features; + netdev_features_t wanted = dev->wanted_features; + int ret = 0; + + if (changed & NETIF_F_RXCSUM) + ret = bcm_sysport_set_rx_csum(dev, wanted); + if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) + ret = bcm_sysport_set_tx_csum(dev, wanted); + + return ret; +} + +/* Hardware counters must be kept in sync because the order/offset + * is important here (order in structure declaration = order in hardware) + */ +static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { + /* general stats */ + STAT_NETDEV(rx_packets), + STAT_NETDEV(tx_packets), + STAT_NETDEV(rx_bytes), + STAT_NETDEV(tx_bytes), + STAT_NETDEV(rx_errors), + STAT_NETDEV(tx_errors), + STAT_NETDEV(rx_dropped), + STAT_NETDEV(tx_dropped), + STAT_NETDEV(multicast), + /* UniMAC RSV counters */ + STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), + STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), + STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), + STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), + STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), + STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), + STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), + STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), + STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), + STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), + STAT_MIB_RX("rx_pkts", mib.rx.pkt), + STAT_MIB_RX("rx_bytes", mib.rx.bytes), + STAT_MIB_RX("rx_multicast", mib.rx.mca), + STAT_MIB_RX("rx_broadcast", mib.rx.bca), + STAT_MIB_RX("rx_fcs", mib.rx.fcs), + STAT_MIB_RX("rx_control", mib.rx.cf), + STAT_MIB_RX("rx_pause", mib.rx.pf), + STAT_MIB_RX("rx_unknown", mib.rx.uo), + STAT_MIB_RX("rx_align", mib.rx.aln), + STAT_MIB_RX("rx_outrange", mib.rx.flr), + STAT_MIB_RX("rx_code", mib.rx.cde), + STAT_MIB_RX("rx_carrier", mib.rx.fcr), + STAT_MIB_RX("rx_oversize", mib.rx.ovr), + STAT_MIB_RX("rx_jabber", mib.rx.jbr), + STAT_MIB_RX("rx_mtu_err", mib.rx.mtue), + STAT_MIB_RX("rx_good_pkts", mib.rx.pok), + STAT_MIB_RX("rx_unicast", mib.rx.uc), + STAT_MIB_RX("rx_ppp", mib.rx.ppp), + STAT_MIB_RX("rx_crc", mib.rx.rcrc), + /* UniMAC TSV counters */ + STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), + STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), + STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), + STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), + STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), + STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), + STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), + STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), + STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), + STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), + STAT_MIB_TX("tx_pkts", mib.tx.pkts), + STAT_MIB_TX("tx_multicast", mib.tx.mca), + STAT_MIB_TX("tx_broadcast", mib.tx.bca), + STAT_MIB_TX("tx_pause", mib.tx.pf), + STAT_MIB_TX("tx_control", mib.tx.cf), + STAT_MIB_TX("tx_fcs_err", mib.tx.fcs), + STAT_MIB_TX("tx_oversize", mib.tx.ovr), + STAT_MIB_TX("tx_defer", mib.tx.drf), + STAT_MIB_TX("tx_excess_defer", mib.tx.edf), + STAT_MIB_TX("tx_single_col", mib.tx.scl), + STAT_MIB_TX("tx_multi_col", mib.tx.mcl), + STAT_MIB_TX("tx_late_col", mib.tx.lcl), + STAT_MIB_TX("tx_excess_col", mib.tx.ecl), + STAT_MIB_TX("tx_frags", mib.tx.frg), + STAT_MIB_TX("tx_total_col", mib.tx.ncl), + STAT_MIB_TX("tx_jabber", mib.tx.jbr), + STAT_MIB_TX("tx_bytes", mib.tx.bytes), + STAT_MIB_TX("tx_good_pkts", mib.tx.pok), + STAT_MIB_TX("tx_unicast", mib.tx.uc), + /* UniMAC RUNT counters */ + STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt), + STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), + STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), + STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes), + /* RXCHK misc statistics */ + STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR), + STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc, + RXCHK_OTHER_DISC_CNTR), + /* RBUF misc statistics */ + STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), + STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), + STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), + STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed), + STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed), +}; + +#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) + +static void bcm_sysport_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->version, "0.1", sizeof(info->version)); + strlcpy(info->bus_info, "platform", sizeof(info->bus_info)); + info->n_stats = BCM_SYSPORT_STATS_LEN; +} + +static u32 bcm_sysport_get_msglvl(struct net_device *dev) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + + return priv->msg_enable; +} + +static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + + priv->msg_enable = enable; +} + +static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return BCM_SYSPORT_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void bcm_sysport_get_strings(struct net_device *dev, + u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + bcm_sysport_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + } + break; + default: + break; + } +} + +static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) +{ + int i, j = 0; + + for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { + const struct bcm_sysport_stats *s; + u8 offset = 0; + u32 val = 0; + char *p; + + s = &bcm_sysport_gstrings_stats[i]; + switch (s->type) { + case BCM_SYSPORT_STAT_NETDEV: + case BCM_SYSPORT_STAT_SOFT: + continue; + case BCM_SYSPORT_STAT_MIB_RX: + case BCM_SYSPORT_STAT_MIB_TX: + case BCM_SYSPORT_STAT_RUNT: + if (s->type != BCM_SYSPORT_STAT_MIB_RX) + offset = UMAC_MIB_STAT_OFFSET; + val = umac_readl(priv, UMAC_MIB_START + j + offset); + break; + case BCM_SYSPORT_STAT_RXCHK: + val = rxchk_readl(priv, s->reg_offset); + if (val == ~0) + rxchk_writel(priv, 0, s->reg_offset); + break; + case BCM_SYSPORT_STAT_RBUF: + val = rbuf_readl(priv, s->reg_offset); + if (val == ~0) + rbuf_writel(priv, 0, s->reg_offset); + break; + } + + j += s->stat_sizeof; + p = (char *)priv + s->stat_offset; + *(u32 *)p = val; + } + + netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); +} + +static void bcm_sysport_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + int i; + + if (netif_running(dev)) + bcm_sysport_update_mib_counters(priv); + + for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { + const struct bcm_sysport_stats *s; + char *p; + + s = &bcm_sysport_gstrings_stats[i]; + if (s->type == BCM_SYSPORT_STAT_NETDEV) + p = (char *)&dev->stats; + else + p = (char *)priv; + p += s->stat_offset; + data[i] = *(u32 *)p; + } +} + +static void bcm_sysport_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + u32 reg; + + wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE; + wol->wolopts = priv->wolopts; + + if (!(priv->wolopts & WAKE_MAGICSECURE)) + return; + + /* Return the programmed SecureOn password */ + reg = umac_readl(priv, UMAC_PSW_MS); + put_unaligned_be16(reg, &wol->sopass[0]); + reg = umac_readl(priv, UMAC_PSW_LS); + put_unaligned_be32(reg, &wol->sopass[2]); +} + +static int bcm_sysport_set_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE; + + if (!device_can_wakeup(kdev)) + return -ENOTSUPP; + + if (wol->wolopts & ~supported) + return -EINVAL; + + /* Program the SecureOn password */ + if (wol->wolopts & WAKE_MAGICSECURE) { + umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), + UMAC_PSW_MS); + umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), + UMAC_PSW_LS); + } + + /* Flag the device and relevant IRQ as wakeup capable */ + if (wol->wolopts) { + device_set_wakeup_enable(kdev, 1); + if (priv->wol_irq_disabled) + enable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = 0; + } else { + device_set_wakeup_enable(kdev, 0); + /* Avoid unbalanced disable_irq_wake calls */ + if (!priv->wol_irq_disabled) + disable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = 1; + } + + priv->wolopts = wol->wolopts; + + return 0; +} + +static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) +{ + dev_kfree_skb_any(cb->skb); + cb->skb = NULL; + dma_unmap_addr_set(cb, dma_addr, 0); +} + +static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, + struct bcm_sysport_cb *cb) +{ + struct device *kdev = &priv->pdev->dev; + struct net_device *ndev = priv->netdev; + dma_addr_t mapping; + int ret; + + cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); + if (!cb->skb) { + netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); + return -ENOMEM; + } + + mapping = dma_map_single(kdev, cb->skb->data, + RX_BUF_LENGTH, DMA_FROM_DEVICE); + ret = dma_mapping_error(kdev, mapping); + if (ret) { + priv->mib.rx_dma_failed++; + bcm_sysport_free_cb(cb); + netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); + return ret; + } + + dma_unmap_addr_set(cb, dma_addr, mapping); + dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping); + + priv->rx_bd_assign_index++; + priv->rx_bd_assign_index &= (priv->num_rx_bds - 1); + priv->rx_bd_assign_ptr = priv->rx_bds + + (priv->rx_bd_assign_index * DESC_SIZE); + + netif_dbg(priv, rx_status, ndev, "RX refill\n"); + + return 0; +} + +static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) +{ + struct bcm_sysport_cb *cb; + int ret = 0; + unsigned int i; + + for (i = 0; i < priv->num_rx_bds; i++) { + cb = &priv->rx_cbs[priv->rx_bd_assign_index]; + if (cb->skb) + continue; + + ret = bcm_sysport_rx_refill(priv, cb); + if (ret) + break; + } + + return ret; +} + +/* Poll the hardware for up to budget packets to process */ +static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, + unsigned int budget) +{ + struct device *kdev = &priv->pdev->dev; + struct net_device *ndev = priv->netdev; + unsigned int processed = 0, to_process; + struct bcm_sysport_cb *cb; + struct sk_buff *skb; + unsigned int p_index; + u16 len, status; + struct bcm_rsb *rsb; + int ret; + + /* Determine how much we should process since last call */ + p_index = rdma_readl(priv, RDMA_PROD_INDEX); + p_index &= RDMA_PROD_INDEX_MASK; + + if (p_index < priv->rx_c_index) + to_process = (RDMA_CONS_INDEX_MASK + 1) - + priv->rx_c_index + p_index; + else + to_process = p_index - priv->rx_c_index; + + netif_dbg(priv, rx_status, ndev, + "p_index=%d rx_c_index=%d to_process=%d\n", + p_index, priv->rx_c_index, to_process); + + while ((processed < to_process) && (processed < budget)) { + cb = &priv->rx_cbs[priv->rx_read_ptr]; + skb = cb->skb; + + processed++; + priv->rx_read_ptr++; + + if (priv->rx_read_ptr == priv->num_rx_bds) + priv->rx_read_ptr = 0; + + /* We do not have a backing SKB, so we do not a corresponding + * DMA mapping for this incoming packet since + * bcm_sysport_rx_refill always either has both skb and mapping + * or none. + */ + if (unlikely(!skb)) { + netif_err(priv, rx_err, ndev, "out of memory!\n"); + ndev->stats.rx_dropped++; + ndev->stats.rx_errors++; + goto refill; + } + + dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), + RX_BUF_LENGTH, DMA_FROM_DEVICE); + + /* Extract the Receive Status Block prepended */ + rsb = (struct bcm_rsb *)skb->data; + len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; + status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & + DESC_STATUS_MASK; + + netif_dbg(priv, rx_status, ndev, + "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", + p_index, priv->rx_c_index, priv->rx_read_ptr, + len, status); + + if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { + netif_err(priv, rx_status, ndev, "fragmented packet!\n"); + ndev->stats.rx_dropped++; + ndev->stats.rx_errors++; + bcm_sysport_free_cb(cb); + goto refill; + } + + if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) { + netif_err(priv, rx_err, ndev, "error packet\n"); + if (status & RX_STATUS_OVFLOW) + ndev->stats.rx_over_errors++; + ndev->stats.rx_dropped++; + ndev->stats.rx_errors++; + bcm_sysport_free_cb(cb); + goto refill; + } + + skb_put(skb, len); + + /* Hardware validated our checksum */ + if (likely(status & DESC_L4_CSUM)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* Hardware pre-pends packets with 2bytes before Ethernet + * header plus we have the Receive Status Block, strip off all + * of this from the SKB. + */ + skb_pull(skb, sizeof(*rsb) + 2); + len -= (sizeof(*rsb) + 2); + + /* UniMAC may forward CRC */ + if (priv->crc_fwd) { + skb_trim(skb, len - ETH_FCS_LEN); + len -= ETH_FCS_LEN; + } + + skb->protocol = eth_type_trans(skb, ndev); + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += len; + + napi_gro_receive(&priv->napi, skb); +refill: + ret = bcm_sysport_rx_refill(priv, cb); + if (ret) + priv->mib.alloc_rx_buff_failed++; + } + + return processed; +} + +static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv, + struct bcm_sysport_cb *cb, + unsigned int *bytes_compl, + unsigned int *pkts_compl) +{ + struct device *kdev = &priv->pdev->dev; + struct net_device *ndev = priv->netdev; + + if (cb->skb) { + ndev->stats.tx_bytes += cb->skb->len; + *bytes_compl += cb->skb->len; + dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + ndev->stats.tx_packets++; + (*pkts_compl)++; + bcm_sysport_free_cb(cb); + /* SKB fragment */ + } else if (dma_unmap_addr(cb, dma_addr)) { + ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len); + dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + } +} + +/* Reclaim queued SKBs for transmission completion, lockless version */ +static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, + struct bcm_sysport_tx_ring *ring) +{ + struct net_device *ndev = priv->netdev; + unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; + unsigned int pkts_compl = 0, bytes_compl = 0; + struct bcm_sysport_cb *cb; + struct netdev_queue *txq; + u32 hw_ind; + + txq = netdev_get_tx_queue(ndev, ring->index); + + /* Compute how many descriptors have been processed since last call */ + hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); + c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; + ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); + + last_c_index = ring->c_index; + num_tx_cbs = ring->size; + + c_index &= (num_tx_cbs - 1); + + if (c_index >= last_c_index) + last_tx_cn = c_index - last_c_index; + else + last_tx_cn = num_tx_cbs - last_c_index + c_index; + + netif_dbg(priv, tx_done, ndev, + "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", + ring->index, c_index, last_tx_cn, last_c_index); + + while (last_tx_cn-- > 0) { + cb = ring->cbs + last_c_index; + bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl); + + ring->desc_count++; + last_c_index++; + last_c_index &= (num_tx_cbs - 1); + } + + ring->c_index = c_index; + + if (netif_tx_queue_stopped(txq) && pkts_compl) + netif_tx_wake_queue(txq); + + netif_dbg(priv, tx_done, ndev, + "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", + ring->index, ring->c_index, pkts_compl, bytes_compl); + + return pkts_compl; +} + +/* Locked version of the per-ring TX reclaim routine */ +static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, + struct bcm_sysport_tx_ring *ring) +{ + unsigned int released; + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + released = __bcm_sysport_tx_reclaim(priv, ring); + spin_unlock_irqrestore(&ring->lock, flags); + + return released; +} + +static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) +{ + struct bcm_sysport_tx_ring *ring = + container_of(napi, struct bcm_sysport_tx_ring, napi); + unsigned int work_done = 0; + + work_done = bcm_sysport_tx_reclaim(ring->priv, ring); + + if (work_done == 0) { + napi_complete(napi); + /* re-enable TX interrupt */ + intrl2_1_mask_clear(ring->priv, BIT(ring->index)); + + return 0; + } + + return budget; +} + +static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) +{ + unsigned int q; + + for (q = 0; q < priv->netdev->num_tx_queues; q++) + bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]); +} + +static int bcm_sysport_poll(struct napi_struct *napi, int budget) +{ + struct bcm_sysport_priv *priv = + container_of(napi, struct bcm_sysport_priv, napi); + unsigned int work_done = 0; + + work_done = bcm_sysport_desc_rx(priv, budget); + + priv->rx_c_index += work_done; + priv->rx_c_index &= RDMA_CONS_INDEX_MASK; + rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); + + if (work_done < budget) { + napi_complete(napi); + /* re-enable RX interrupts */ + intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE); + } + + return work_done; +} + +static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) +{ + u32 reg; + + /* Stop monitoring MPD interrupt */ + intrl2_0_mask_set(priv, INTRL2_0_MPD); + + /* Clear the MagicPacket detection logic */ + reg = umac_readl(priv, UMAC_MPD_CTRL); + reg &= ~MPD_EN; + umac_writel(priv, reg, UMAC_MPD_CTRL); + + netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n"); +} + +/* RX and misc interrupt routine */ +static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct bcm_sysport_priv *priv = netdev_priv(dev); + + priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & + ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); + intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); + + if (unlikely(priv->irq0_stat == 0)) { + netdev_warn(priv->netdev, "spurious RX interrupt\n"); + return IRQ_NONE; + } + + if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) { + if (likely(napi_schedule_prep(&priv->napi))) { + /* disable RX interrupts */ + intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE); + __napi_schedule(&priv->napi); + } + } + + /* TX ring is full, perform a full reclaim since we do not know + * which one would trigger this interrupt + */ + if (priv->irq0_stat & INTRL2_0_TX_RING_FULL) + bcm_sysport_tx_reclaim_all(priv); + + if (priv->irq0_stat & INTRL2_0_MPD) { + netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n"); + bcm_sysport_resume_from_wol(priv); + } + + return IRQ_HANDLED; +} + +/* TX interrupt service routine */ +static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct bcm_sysport_priv *priv = netdev_priv(dev); + struct bcm_sysport_tx_ring *txr; + unsigned int ring; + + priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & + ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); + intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + + if (unlikely(priv->irq1_stat == 0)) { + netdev_warn(priv->netdev, "spurious TX interrupt\n"); + return IRQ_NONE; + } + + for (ring = 0; ring < dev->num_tx_queues; ring++) { + if (!(priv->irq1_stat & BIT(ring))) + continue; + + txr = &priv->tx_rings[ring]; + + if (likely(napi_schedule_prep(&txr->napi))) { + intrl2_1_mask_set(priv, BIT(ring)); + __napi_schedule(&txr->napi); + } + } + + return IRQ_HANDLED; +} + +static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id) +{ + struct bcm_sysport_priv *priv = dev_id; + + pm_wakeup_event(&priv->pdev->dev, 0); + + return IRQ_HANDLED; +} + +static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, + struct net_device *dev) +{ + struct sk_buff *nskb; + struct bcm_tsb *tsb; + u32 csum_info; + u8 ip_proto; + u16 csum_start; + u16 ip_ver; + + /* Re-allocate SKB if needed */ + if (unlikely(skb_headroom(skb) < sizeof(*tsb))) { + nskb = skb_realloc_headroom(skb, sizeof(*tsb)); + dev_kfree_skb(skb); + if (!nskb) { + dev->stats.tx_errors++; + dev->stats.tx_dropped++; + return NULL; + } + skb = nskb; + } + + tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb)); + /* Zero-out TSB by default */ + memset(tsb, 0, sizeof(*tsb)); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + ip_ver = htons(skb->protocol); + switch (ip_ver) { + case ETH_P_IP: + ip_proto = ip_hdr(skb)->protocol; + break; + case ETH_P_IPV6: + ip_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + return skb; + } + + /* Get the checksum offset and the L4 (transport) offset */ + csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb); + csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK; + csum_info |= (csum_start << L4_PTR_SHIFT); + + if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { + csum_info |= L4_LENGTH_VALID; + if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) + csum_info |= L4_UDP; + } else { + csum_info = 0; + } + + tsb->l4_ptr_dest_map = csum_info; + } + + return skb; +} + +static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + struct bcm_sysport_tx_ring *ring; + struct bcm_sysport_cb *cb; + struct netdev_queue *txq; + struct dma_desc *desc; + unsigned int skb_len; + unsigned long flags; + dma_addr_t mapping; + u32 len_status; + u16 queue; + int ret; + + queue = skb_get_queue_mapping(skb); + txq = netdev_get_tx_queue(dev, queue); + ring = &priv->tx_rings[queue]; + + /* lock against tx reclaim in BH context and TX ring full interrupt */ + spin_lock_irqsave(&ring->lock, flags); + if (unlikely(ring->desc_count == 0)) { + netif_tx_stop_queue(txq); + netdev_err(dev, "queue %d awake and ring full!\n", queue); + ret = NETDEV_TX_BUSY; + goto out; + } + + /* Insert TSB and checksum infos */ + if (priv->tsb_en) { + skb = bcm_sysport_insert_tsb(skb, dev); + if (!skb) { + ret = NETDEV_TX_OK; + goto out; + } + } + + /* The Ethernet switch we are interfaced with needs packets to be at + * least 64 bytes (including FCS) otherwise they will be discarded when + * they enter the switch port logic. When Broadcom tags are enabled, we + * need to make sure that packets are at least 68 bytes + * (including FCS and tag) because the length verification is done after + * the Broadcom tag is stripped off the ingress packet. + */ + if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) { + ret = NETDEV_TX_OK; + goto out; + } + + skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ? + ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len; + + mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); + if (dma_mapping_error(kdev, mapping)) { + priv->mib.tx_dma_failed++; + netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n", + skb->data, skb_len); + ret = NETDEV_TX_OK; + goto out; + } + + /* Remember the SKB for future freeing */ + cb = &ring->cbs[ring->curr_desc]; + cb->skb = skb; + dma_unmap_addr_set(cb, dma_addr, mapping); + dma_unmap_len_set(cb, dma_len, skb_len); + + /* Fetch a descriptor entry from our pool */ + desc = ring->desc_cpu; + + desc->addr_lo = lower_32_bits(mapping); + len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK; + len_status |= (skb_len << DESC_LEN_SHIFT); + len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) << + DESC_STATUS_SHIFT; + if (skb->ip_summed == CHECKSUM_PARTIAL) + len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT); + + ring->curr_desc++; + if (ring->curr_desc == ring->size) + ring->curr_desc = 0; + ring->desc_count--; + + /* Ensure write completion of the descriptor status/length + * in DRAM before the System Port WRITE_PORT register latches + * the value + */ + wmb(); + desc->addr_status_len = len_status; + wmb(); + + /* Write this descriptor address to the RING write port */ + tdma_port_write_desc_addr(priv, desc, ring->index); + + /* Check ring space and update SW control flow */ + if (ring->desc_count == 0) + netif_tx_stop_queue(txq); + + netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n", + ring->index, ring->desc_count, ring->curr_desc); + + ret = NETDEV_TX_OK; +out: + spin_unlock_irqrestore(&ring->lock, flags); + return ret; +} + +static void bcm_sysport_tx_timeout(struct net_device *dev) +{ + netdev_warn(dev, "transmit timeout!\n"); + + dev->trans_start = jiffies; + dev->stats.tx_errors++; + + netif_tx_wake_all_queues(dev); +} + +/* phylib adjust link callback */ +static void bcm_sysport_adj_link(struct net_device *dev) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + unsigned int changed = 0; + u32 cmd_bits = 0, reg; + + if (priv->old_link != phydev->link) { + changed = 1; + priv->old_link = phydev->link; + } + + if (priv->old_duplex != phydev->duplex) { + changed = 1; + priv->old_duplex = phydev->duplex; + } + + switch (phydev->speed) { + case SPEED_2500: + cmd_bits = CMD_SPEED_2500; + break; + case SPEED_1000: + cmd_bits = CMD_SPEED_1000; + break; + case SPEED_100: + cmd_bits = CMD_SPEED_100; + break; + case SPEED_10: + cmd_bits = CMD_SPEED_10; + break; + default: + break; + } + cmd_bits <<= CMD_SPEED_SHIFT; + + if (phydev->duplex == DUPLEX_HALF) + cmd_bits |= CMD_HD_EN; + + if (priv->old_pause != phydev->pause) { + changed = 1; + priv->old_pause = phydev->pause; + } + + if (!phydev->pause) + cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; + + if (!changed) + return; + + if (phydev->link) { + reg = umac_readl(priv, UMAC_CMD); + reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | + CMD_HD_EN | CMD_RX_PAUSE_IGNORE | + CMD_TX_PAUSE_IGNORE); + reg |= cmd_bits; + umac_writel(priv, reg, UMAC_CMD); + } + + phy_print_status(priv->phydev); +} + +static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, + unsigned int index) +{ + struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; + struct device *kdev = &priv->pdev->dev; + size_t size; + void *p; + u32 reg; + + /* Simple descriptors partitioning for now */ + size = 256; + + /* We just need one DMA descriptor which is DMA-able, since writing to + * the port will allocate a new descriptor in its internal linked-list + */ + p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, + GFP_KERNEL); + if (!p) { + netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); + return -ENOMEM; + } + + ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL); + if (!ring->cbs) { + netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); + return -ENOMEM; + } + + /* Initialize SW view of the ring */ + spin_lock_init(&ring->lock); + ring->priv = priv; + netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); + ring->index = index; + ring->size = size; + ring->alloc_size = ring->size; + ring->desc_cpu = p; + ring->desc_count = ring->size; + ring->curr_desc = 0; + + /* Initialize HW ring */ + tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index)); + tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index)); + tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index)); + tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index)); + tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index)); + tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index)); + + /* Program the number of descriptors as MAX_THRESHOLD and half of + * its size for the hysteresis trigger + */ + tdma_writel(priv, ring->size | + 1 << RING_HYST_THRESH_SHIFT, + TDMA_DESC_RING_MAX_HYST(index)); + + /* Enable the ring queue in the arbiter */ + reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN); + reg |= (1 << index); + tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN); + + napi_enable(&ring->napi); + + netif_dbg(priv, hw, priv->netdev, + "TDMA cfg, size=%d, desc_cpu=%p\n", + ring->size, ring->desc_cpu); + + return 0; +} + +static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, + unsigned int index) +{ + struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; + struct device *kdev = &priv->pdev->dev; + u32 reg; + + /* Caller should stop the TDMA engine */ + reg = tdma_readl(priv, TDMA_STATUS); + if (!(reg & TDMA_DISABLED)) + netdev_warn(priv->netdev, "TDMA not stopped!\n"); + + /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could + * fail, so by checking this pointer we know whether the TX ring was + * fully initialized or not. + */ + if (!ring->cbs) + return; + + napi_disable(&ring->napi); + netif_napi_del(&ring->napi); + + bcm_sysport_tx_reclaim(priv, ring); + + kfree(ring->cbs); + ring->cbs = NULL; + + if (ring->desc_dma) { + dma_free_coherent(kdev, sizeof(struct dma_desc), + ring->desc_cpu, ring->desc_dma); + ring->desc_dma = 0; + } + ring->size = 0; + ring->alloc_size = 0; + + netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n"); +} + +/* RDMA helper */ +static inline int rdma_enable_set(struct bcm_sysport_priv *priv, + unsigned int enable) +{ + unsigned int timeout = 1000; + u32 reg; + + reg = rdma_readl(priv, RDMA_CONTROL); + if (enable) + reg |= RDMA_EN; + else + reg &= ~RDMA_EN; + rdma_writel(priv, reg, RDMA_CONTROL); + + /* Poll for RMDA disabling completion */ + do { + reg = rdma_readl(priv, RDMA_STATUS); + if (!!(reg & RDMA_DISABLED) == !enable) + return 0; + usleep_range(1000, 2000); + } while (timeout-- > 0); + + netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n"); + + return -ETIMEDOUT; +} + +/* TDMA helper */ +static inline int tdma_enable_set(struct bcm_sysport_priv *priv, + unsigned int enable) +{ + unsigned int timeout = 1000; + u32 reg; + + reg = tdma_readl(priv, TDMA_CONTROL); + if (enable) + reg |= TDMA_EN; + else + reg &= ~TDMA_EN; + tdma_writel(priv, reg, TDMA_CONTROL); + + /* Poll for TMDA disabling completion */ + do { + reg = tdma_readl(priv, TDMA_STATUS); + if (!!(reg & TDMA_DISABLED) == !enable) + return 0; + + usleep_range(1000, 2000); + } while (timeout-- > 0); + + netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n"); + + return -ETIMEDOUT; +} + +static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) +{ + u32 reg; + int ret; + + /* Initialize SW view of the RX ring */ + priv->num_rx_bds = NUM_RX_DESC; + priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; + priv->rx_bd_assign_ptr = priv->rx_bds; + priv->rx_bd_assign_index = 0; + priv->rx_c_index = 0; + priv->rx_read_ptr = 0; + priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb), + GFP_KERNEL); + if (!priv->rx_cbs) { + netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); + return -ENOMEM; + } + + ret = bcm_sysport_alloc_rx_bufs(priv); + if (ret) { + netif_err(priv, hw, priv->netdev, "SKB allocation failed\n"); + return ret; + } + + /* Initialize HW, ensure RDMA is disabled */ + reg = rdma_readl(priv, RDMA_STATUS); + if (!(reg & RDMA_DISABLED)) + rdma_enable_set(priv, 0); + + rdma_writel(priv, 0, RDMA_WRITE_PTR_LO); + rdma_writel(priv, 0, RDMA_WRITE_PTR_HI); + rdma_writel(priv, 0, RDMA_PROD_INDEX); + rdma_writel(priv, 0, RDMA_CONS_INDEX); + rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT | + RX_BUF_LENGTH, RDMA_RING_BUF_SIZE); + /* Operate the queue in ring mode */ + rdma_writel(priv, 0, RDMA_START_ADDR_HI); + rdma_writel(priv, 0, RDMA_START_ADDR_LO); + rdma_writel(priv, 0, RDMA_END_ADDR_HI); + rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO); + + rdma_writel(priv, 1, RDMA_MBDONE_INTR); + + netif_dbg(priv, hw, priv->netdev, + "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n", + priv->num_rx_bds, priv->rx_bds); + + return 0; +} + +static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv) +{ + struct bcm_sysport_cb *cb; + unsigned int i; + u32 reg; + + /* Caller should ensure RDMA is disabled */ + reg = rdma_readl(priv, RDMA_STATUS); + if (!(reg & RDMA_DISABLED)) + netdev_warn(priv->netdev, "RDMA not stopped!\n"); + + for (i = 0; i < priv->num_rx_bds; i++) { + cb = &priv->rx_cbs[i]; + if (dma_unmap_addr(cb, dma_addr)) + dma_unmap_single(&priv->pdev->dev, + dma_unmap_addr(cb, dma_addr), + RX_BUF_LENGTH, DMA_FROM_DEVICE); + bcm_sysport_free_cb(cb); + } + + kfree(priv->rx_cbs); + priv->rx_cbs = NULL; + + netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n"); +} + +static void bcm_sysport_set_rx_mode(struct net_device *dev) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + u32 reg; + + reg = umac_readl(priv, UMAC_CMD); + if (dev->flags & IFF_PROMISC) + reg |= CMD_PROMISC; + else + reg &= ~CMD_PROMISC; + umac_writel(priv, reg, UMAC_CMD); + + /* No support for ALLMULTI */ + if (dev->flags & IFF_ALLMULTI) + return; +} + +static inline void umac_enable_set(struct bcm_sysport_priv *priv, + u32 mask, unsigned int enable) +{ + u32 reg; + + reg = umac_readl(priv, UMAC_CMD); + if (enable) + reg |= mask; + else + reg &= ~mask; + umac_writel(priv, reg, UMAC_CMD); + + /* UniMAC stops on a packet boundary, wait for a full-sized packet + * to be processed (1 msec). + */ + if (enable == 0) + usleep_range(1000, 2000); +} + +static inline void umac_reset(struct bcm_sysport_priv *priv) +{ + u32 reg; + + reg = umac_readl(priv, UMAC_CMD); + reg |= CMD_SW_RESET; + umac_writel(priv, reg, UMAC_CMD); + udelay(10); + reg = umac_readl(priv, UMAC_CMD); + reg &= ~CMD_SW_RESET; + umac_writel(priv, reg, UMAC_CMD); +} + +static void umac_set_hw_addr(struct bcm_sysport_priv *priv, + unsigned char *addr) +{ + umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | + (addr[2] << 8) | addr[3], UMAC_MAC0); + umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); +} + +static void topctrl_flush(struct bcm_sysport_priv *priv) +{ + topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); + topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); + mdelay(1); + topctrl_writel(priv, 0, RX_FLUSH_CNTL); + topctrl_writel(priv, 0, TX_FLUSH_CNTL); +} + +static int bcm_sysport_change_mac(struct net_device *dev, void *p) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + /* interface is disabled, changes to MAC will be reflected on next + * open call + */ + if (!netif_running(dev)) + return 0; + + umac_set_hw_addr(priv, dev->dev_addr); + + return 0; +} + +static void bcm_sysport_netif_start(struct net_device *dev) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + + /* Enable NAPI */ + napi_enable(&priv->napi); + + /* Enable RX interrupt and TX ring full interrupt */ + intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); + + phy_start(priv->phydev); + + /* Enable TX interrupts for the 32 TXQs */ + intrl2_1_mask_clear(priv, 0xffffffff); + + /* Last call before we start the real business */ + netif_tx_start_all_queues(dev); +} + +static void rbuf_init(struct bcm_sysport_priv *priv) +{ + u32 reg; + + reg = rbuf_readl(priv, RBUF_CONTROL); + reg |= RBUF_4B_ALGN | RBUF_RSB_EN; + rbuf_writel(priv, reg, RBUF_CONTROL); +} + +static int bcm_sysport_open(struct net_device *dev) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + unsigned int i; + int ret; + + /* Reset UniMAC */ + umac_reset(priv); + + /* Flush TX and RX FIFOs at TOPCTRL level */ + topctrl_flush(priv); + + /* Disable the UniMAC RX/TX */ + umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0); + + /* Enable RBUF 2bytes alignment and Receive Status Block */ + rbuf_init(priv); + + /* Set maximum frame length */ + umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + + /* Set MAC address */ + umac_set_hw_addr(priv, dev->dev_addr); + + /* Read CRC forward */ + priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); + + priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, + 0, priv->phy_interface); + if (!priv->phydev) { + netdev_err(dev, "could not attach to PHY\n"); + return -ENODEV; + } + + /* Reset house keeping link status */ + priv->old_duplex = -1; + priv->old_link = -1; + priv->old_pause = -1; + + /* mask all interrupts and request them */ + intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); + intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); + intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); + intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); + + ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev); + if (ret) { + netdev_err(dev, "failed to request RX interrupt\n"); + goto out_phy_disconnect; + } + + ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev); + if (ret) { + netdev_err(dev, "failed to request TX interrupt\n"); + goto out_free_irq0; + } + + /* Initialize both hardware and software ring */ + for (i = 0; i < dev->num_tx_queues; i++) { + ret = bcm_sysport_init_tx_ring(priv, i); + if (ret) { + netdev_err(dev, "failed to initialize TX ring %d\n", + i); + goto out_free_tx_ring; + } + } + + /* Initialize linked-list */ + tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); + + /* Initialize RX ring */ + ret = bcm_sysport_init_rx_ring(priv); + if (ret) { + netdev_err(dev, "failed to initialize RX ring\n"); + goto out_free_rx_ring; + } + + /* Turn on RDMA */ + ret = rdma_enable_set(priv, 1); + if (ret) + goto out_free_rx_ring; + + /* Turn on TDMA */ + ret = tdma_enable_set(priv, 1); + if (ret) + goto out_clear_rx_int; + + /* Turn on UniMAC TX/RX */ + umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1); + + bcm_sysport_netif_start(dev); + + return 0; + +out_clear_rx_int: + intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); +out_free_rx_ring: + bcm_sysport_fini_rx_ring(priv); +out_free_tx_ring: + for (i = 0; i < dev->num_tx_queues; i++) + bcm_sysport_fini_tx_ring(priv, i); + free_irq(priv->irq1, dev); +out_free_irq0: + free_irq(priv->irq0, dev); +out_phy_disconnect: + phy_disconnect(priv->phydev); + return ret; +} + +static void bcm_sysport_netif_stop(struct net_device *dev) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + + /* stop all software from updating hardware */ + netif_tx_stop_all_queues(dev); + napi_disable(&priv->napi); + phy_stop(priv->phydev); + + /* mask all interrupts */ + intrl2_0_mask_set(priv, 0xffffffff); + intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + intrl2_1_mask_set(priv, 0xffffffff); + intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); +} + +static int bcm_sysport_stop(struct net_device *dev) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + unsigned int i; + int ret; + + bcm_sysport_netif_stop(dev); + + /* Disable UniMAC RX */ + umac_enable_set(priv, CMD_RX_EN, 0); + + ret = tdma_enable_set(priv, 0); + if (ret) { + netdev_err(dev, "timeout disabling RDMA\n"); + return ret; + } + + /* Wait for a maximum packet size to be drained */ + usleep_range(2000, 3000); + + ret = rdma_enable_set(priv, 0); + if (ret) { + netdev_err(dev, "timeout disabling TDMA\n"); + return ret; + } + + /* Disable UniMAC TX */ + umac_enable_set(priv, CMD_TX_EN, 0); + + /* Free RX/TX rings SW structures */ + for (i = 0; i < dev->num_tx_queues; i++) + bcm_sysport_fini_tx_ring(priv, i); + bcm_sysport_fini_rx_ring(priv); + + free_irq(priv->irq0, dev); + free_irq(priv->irq1, dev); + + /* Disconnect from PHY */ + phy_disconnect(priv->phydev); + + return 0; +} + +static struct ethtool_ops bcm_sysport_ethtool_ops = { + .get_settings = bcm_sysport_get_settings, + .set_settings = bcm_sysport_set_settings, + .get_drvinfo = bcm_sysport_get_drvinfo, + .get_msglevel = bcm_sysport_get_msglvl, + .set_msglevel = bcm_sysport_set_msglvl, + .get_link = ethtool_op_get_link, + .get_strings = bcm_sysport_get_strings, + .get_ethtool_stats = bcm_sysport_get_stats, + .get_sset_count = bcm_sysport_get_sset_count, + .get_wol = bcm_sysport_get_wol, + .set_wol = bcm_sysport_set_wol, +}; + +static const struct net_device_ops bcm_sysport_netdev_ops = { + .ndo_start_xmit = bcm_sysport_xmit, + .ndo_tx_timeout = bcm_sysport_tx_timeout, + .ndo_open = bcm_sysport_open, + .ndo_stop = bcm_sysport_stop, + .ndo_set_features = bcm_sysport_set_features, + .ndo_set_rx_mode = bcm_sysport_set_rx_mode, + .ndo_set_mac_address = bcm_sysport_change_mac, +}; + +#define REV_FMT "v%2x.%02x" + +static int bcm_sysport_probe(struct platform_device *pdev) +{ + struct bcm_sysport_priv *priv; + struct device_node *dn; + struct net_device *dev; + const void *macaddr; + struct resource *r; + u32 txq, rxq; + int ret; + + dn = pdev->dev.of_node; + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + /* Read the Transmit/Receive Queue properties */ + if (of_property_read_u32(dn, "systemport,num-txq", &txq)) + txq = TDMA_NUM_RINGS; + if (of_property_read_u32(dn, "systemport,num-rxq", &rxq)) + rxq = 1; + + dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq); + if (!dev) + return -ENOMEM; + + /* Initialize private members */ + priv = netdev_priv(dev); + + priv->irq0 = platform_get_irq(pdev, 0); + priv->irq1 = platform_get_irq(pdev, 1); + priv->wol_irq = platform_get_irq(pdev, 2); + if (priv->irq0 <= 0 || priv->irq1 <= 0) { + dev_err(&pdev->dev, "invalid interrupts\n"); + ret = -EINVAL; + goto err; + } + + priv->base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(priv->base)) { + ret = PTR_ERR(priv->base); + goto err; + } + + priv->netdev = dev; + priv->pdev = pdev; + + priv->phy_interface = of_get_phy_mode(dn); + /* Default to GMII interface mode */ + if (priv->phy_interface < 0) + priv->phy_interface = PHY_INTERFACE_MODE_GMII; + + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (of_phy_is_fixed_link(dn)) { + ret = of_phy_register_fixed_link(dn); + if (ret) { + dev_err(&pdev->dev, "failed to register fixed PHY\n"); + goto err; + } + + priv->phy_dn = dn; + } + + /* Initialize netdevice members */ + macaddr = of_get_mac_address(dn); + if (!macaddr || !is_valid_ether_addr(macaddr)) { + dev_warn(&pdev->dev, "using random Ethernet MAC\n"); + random_ether_addr(dev->dev_addr); + } else { + ether_addr_copy(dev->dev_addr, macaddr); + } + + SET_NETDEV_DEV(dev, &pdev->dev); + dev_set_drvdata(&pdev->dev, dev); + dev->ethtool_ops = &bcm_sysport_ethtool_ops; + dev->netdev_ops = &bcm_sysport_netdev_ops; + netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64); + + /* HW supported features, none enabled by default */ + dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + + /* Request the WOL interrupt and advertise suspend if available */ + priv->wol_irq_disabled = 1; + ret = devm_request_irq(&pdev->dev, priv->wol_irq, + bcm_sysport_wol_isr, 0, dev->name, priv); + if (!ret) + device_set_wakeup_capable(&pdev->dev, 1); + + /* Set the needed headroom once and for all */ + BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); + dev->needed_headroom += sizeof(struct bcm_tsb); + + /* libphy will adjust the link state accordingly */ + netif_carrier_off(dev); + + ret = register_netdev(dev); + if (ret) { + dev_err(&pdev->dev, "failed to register net_device\n"); + goto err; + } + + priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; + dev_info(&pdev->dev, + "Broadcom SYSTEMPORT" REV_FMT + " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", + (priv->rev >> 8) & 0xff, priv->rev & 0xff, + priv->base, priv->irq0, priv->irq1, txq, rxq); + + return 0; +err: + free_netdev(dev); + return ret; +} + +static int bcm_sysport_remove(struct platform_device *pdev) +{ + struct net_device *dev = dev_get_drvdata(&pdev->dev); + + /* Not much to do, ndo_close has been called + * and we use managed allocations + */ + unregister_netdev(dev); + free_netdev(dev); + dev_set_drvdata(&pdev->dev, NULL); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) +{ + struct net_device *ndev = priv->netdev; + unsigned int timeout = 1000; + u32 reg; + + /* Password has already been programmed */ + reg = umac_readl(priv, UMAC_MPD_CTRL); + reg |= MPD_EN; + reg &= ~PSW_EN; + if (priv->wolopts & WAKE_MAGICSECURE) + reg |= PSW_EN; + umac_writel(priv, reg, UMAC_MPD_CTRL); + + /* Make sure RBUF entered WoL mode as result */ + do { + reg = rbuf_readl(priv, RBUF_STATUS); + if (reg & RBUF_WOL_MODE) + break; + + udelay(10); + } while (timeout-- > 0); + + /* Do not leave the UniMAC RBUF matching only MPD packets */ + if (!timeout) { + reg = umac_readl(priv, UMAC_MPD_CTRL); + reg &= ~MPD_EN; + umac_writel(priv, reg, UMAC_MPD_CTRL); + netif_err(priv, wol, ndev, "failed to enter WOL mode\n"); + return -ETIMEDOUT; + } + + /* UniMAC receive needs to be turned on */ + umac_enable_set(priv, CMD_RX_EN, 1); + + /* Enable the interrupt wake-up source */ + intrl2_0_mask_clear(priv, INTRL2_0_MPD); + + netif_dbg(priv, wol, ndev, "entered WOL mode\n"); + + return 0; +} + +static int bcm_sysport_suspend(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcm_sysport_priv *priv = netdev_priv(dev); + unsigned int i; + int ret = 0; + u32 reg; + + if (!netif_running(dev)) + return 0; + + bcm_sysport_netif_stop(dev); + + phy_suspend(priv->phydev); + + netif_device_detach(dev); + + /* Disable UniMAC RX */ + umac_enable_set(priv, CMD_RX_EN, 0); + + ret = rdma_enable_set(priv, 0); + if (ret) { + netdev_err(dev, "RDMA timeout!\n"); + return ret; + } + + /* Disable RXCHK if enabled */ + if (priv->rx_chk_en) { + reg = rxchk_readl(priv, RXCHK_CONTROL); + reg &= ~RXCHK_EN; + rxchk_writel(priv, reg, RXCHK_CONTROL); + } + + /* Flush RX pipe */ + if (!priv->wolopts) + topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); + + ret = tdma_enable_set(priv, 0); + if (ret) { + netdev_err(dev, "TDMA timeout!\n"); + return ret; + } + + /* Wait for a packet boundary */ + usleep_range(2000, 3000); + + umac_enable_set(priv, CMD_TX_EN, 0); + + topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); + + /* Free RX/TX rings SW structures */ + for (i = 0; i < dev->num_tx_queues; i++) + bcm_sysport_fini_tx_ring(priv, i); + bcm_sysport_fini_rx_ring(priv); + + /* Get prepared for Wake-on-LAN */ + if (device_may_wakeup(d) && priv->wolopts) + ret = bcm_sysport_suspend_to_wol(priv); + + return ret; +} + +static int bcm_sysport_resume(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcm_sysport_priv *priv = netdev_priv(dev); + unsigned int i; + u32 reg; + int ret; + + if (!netif_running(dev)) + return 0; + + umac_reset(priv); + + /* We may have been suspended and never received a WOL event that + * would turn off MPD detection, take care of that now + */ + bcm_sysport_resume_from_wol(priv); + + /* Initialize both hardware and software ring */ + for (i = 0; i < dev->num_tx_queues; i++) { + ret = bcm_sysport_init_tx_ring(priv, i); + if (ret) { + netdev_err(dev, "failed to initialize TX ring %d\n", + i); + goto out_free_tx_rings; + } + } + + /* Initialize linked-list */ + tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); + + /* Initialize RX ring */ + ret = bcm_sysport_init_rx_ring(priv); + if (ret) { + netdev_err(dev, "failed to initialize RX ring\n"); + goto out_free_rx_ring; + } + + netif_device_attach(dev); + + /* RX pipe enable */ + topctrl_writel(priv, 0, RX_FLUSH_CNTL); + + ret = rdma_enable_set(priv, 1); + if (ret) { + netdev_err(dev, "failed to enable RDMA\n"); + goto out_free_rx_ring; + } + + /* Enable rxhck */ + if (priv->rx_chk_en) { + reg = rxchk_readl(priv, RXCHK_CONTROL); + reg |= RXCHK_EN; + rxchk_writel(priv, reg, RXCHK_CONTROL); + } + + rbuf_init(priv); + + /* Set maximum frame length */ + umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + + /* Set MAC address */ + umac_set_hw_addr(priv, dev->dev_addr); + + umac_enable_set(priv, CMD_RX_EN, 1); + + /* TX pipe enable */ + topctrl_writel(priv, 0, TX_FLUSH_CNTL); + + umac_enable_set(priv, CMD_TX_EN, 1); + + ret = tdma_enable_set(priv, 1); + if (ret) { + netdev_err(dev, "TDMA timeout!\n"); + goto out_free_rx_ring; + } + + phy_resume(priv->phydev); + + bcm_sysport_netif_start(dev); + + return 0; + +out_free_rx_ring: + bcm_sysport_fini_rx_ring(priv); +out_free_tx_rings: + for (i = 0; i < dev->num_tx_queues; i++) + bcm_sysport_fini_tx_ring(priv, i); + return ret; +} +#endif + +static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops, + bcm_sysport_suspend, bcm_sysport_resume); + +static const struct of_device_id bcm_sysport_of_match[] = { + { .compatible = "brcm,systemport-v1.00" }, + { .compatible = "brcm,systemport" }, + { /* sentinel */ } +}; + +static struct platform_driver bcm_sysport_driver = { + .probe = bcm_sysport_probe, + .remove = bcm_sysport_remove, + .driver = { + .name = "brcm-systemport", + .of_match_table = bcm_sysport_of_match, + .pm = &bcm_sysport_pm_ops, + }, +}; +module_platform_driver(bcm_sysport_driver); + +MODULE_AUTHOR("Broadcom Corporation"); +MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver"); +MODULE_ALIAS("platform:brcm-systemport"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h new file mode 100644 index 000000000..e2c043eab --- /dev/null +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -0,0 +1,695 @@ +/* + * Broadcom BCM7xxx System Port Ethernet MAC driver + * + * Copyright (C) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __BCM_SYSPORT_H +#define __BCM_SYSPORT_H + +#include + +/* Receive/transmit descriptor format */ +#define DESC_ADDR_HI_STATUS_LEN 0x00 +#define DESC_ADDR_HI_SHIFT 0 +#define DESC_ADDR_HI_MASK 0xff +#define DESC_STATUS_SHIFT 8 +#define DESC_STATUS_MASK 0x3ff +#define DESC_LEN_SHIFT 18 +#define DESC_LEN_MASK 0x7fff +#define DESC_ADDR_LO 0x04 + +/* HW supports 40-bit addressing hence the */ +#define DESC_SIZE (WORDS_PER_DESC * sizeof(u32)) + +/* Default RX buffer allocation size */ +#define RX_BUF_LENGTH 2048 + +/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(4) + FCS(4) = 1526. + * 1536 is multiple of 256 bytes + */ +#define ENET_BRCM_TAG_LEN 4 +#define ENET_PAD 10 +#define UMAC_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \ + ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD) + +/* Transmit status block */ +struct bcm_tsb { + u32 pcp_dei_vid; +#define PCP_DEI_MASK 0xf +#define VID_SHIFT 4 +#define VID_MASK 0xfff + u32 l4_ptr_dest_map; +#define L4_CSUM_PTR_MASK 0x1ff +#define L4_PTR_SHIFT 9 +#define L4_PTR_MASK 0x1ff +#define L4_UDP (1 << 18) +#define L4_LENGTH_VALID (1 << 19) +#define DEST_MAP_SHIFT 20 +#define DEST_MAP_MASK 0x1ff +}; + +/* Receive status block uses the same + * definitions as the DMA descriptor + */ +struct bcm_rsb { + u32 rx_status_len; + u32 brcm_egress_tag; +}; + +/* Common Receive/Transmit status bits */ +#define DESC_L4_CSUM (1 << 7) +#define DESC_SOP (1 << 8) +#define DESC_EOP (1 << 9) + +/* Receive Status bits */ +#define RX_STATUS_UCAST 0 +#define RX_STATUS_BCAST 0x04 +#define RX_STATUS_MCAST 0x08 +#define RX_STATUS_L2_MCAST 0x0c +#define RX_STATUS_ERR (1 << 4) +#define RX_STATUS_OVFLOW (1 << 5) +#define RX_STATUS_PARSE_FAIL (1 << 6) + +/* Transmit Status bits */ +#define TX_STATUS_VLAN_NO_ACT 0x00 +#define TX_STATUS_VLAN_PCP_TSB 0x01 +#define TX_STATUS_VLAN_QUEUE 0x02 +#define TX_STATUS_VLAN_VID_TSB 0x03 +#define TX_STATUS_OWR_CRC (1 << 2) +#define TX_STATUS_APP_CRC (1 << 3) +#define TX_STATUS_BRCM_TAG_NO_ACT 0 +#define TX_STATUS_BRCM_TAG_ZERO 0x10 +#define TX_STATUS_BRCM_TAG_ONE_QUEUE 0x20 +#define TX_STATUS_BRCM_TAG_ONE_TSB 0x30 +#define TX_STATUS_SKIP_BYTES (1 << 6) + +/* Specific register definitions */ +#define SYS_PORT_TOPCTRL_OFFSET 0 +#define REV_CNTL 0x00 +#define REV_MASK 0xffff + +#define RX_FLUSH_CNTL 0x04 +#define RX_FLUSH (1 << 0) + +#define TX_FLUSH_CNTL 0x08 +#define TX_FLUSH (1 << 0) + +#define MISC_CNTL 0x0c +#define SYS_CLK_SEL (1 << 0) +#define TDMA_EOP_SEL (1 << 1) + +/* Level-2 Interrupt controller offsets and defines */ +#define SYS_PORT_INTRL2_0_OFFSET 0x200 +#define SYS_PORT_INTRL2_1_OFFSET 0x240 +#define INTRL2_CPU_STATUS 0x00 +#define INTRL2_CPU_SET 0x04 +#define INTRL2_CPU_CLEAR 0x08 +#define INTRL2_CPU_MASK_STATUS 0x0c +#define INTRL2_CPU_MASK_SET 0x10 +#define INTRL2_CPU_MASK_CLEAR 0x14 + +/* Level-2 instance 0 interrupt bits */ +#define INTRL2_0_GISB_ERR (1 << 0) +#define INTRL2_0_RBUF_OVFLOW (1 << 1) +#define INTRL2_0_TBUF_UNDFLOW (1 << 2) +#define INTRL2_0_MPD (1 << 3) +#define INTRL2_0_BRCM_MATCH_TAG (1 << 4) +#define INTRL2_0_RDMA_MBDONE (1 << 5) +#define INTRL2_0_OVER_MAX_THRESH (1 << 6) +#define INTRL2_0_BELOW_HYST_THRESH (1 << 7) +#define INTRL2_0_FREE_LIST_EMPTY (1 << 8) +#define INTRL2_0_TX_RING_FULL (1 << 9) +#define INTRL2_0_DESC_ALLOC_ERR (1 << 10) +#define INTRL2_0_UNEXP_PKTSIZE_ACK (1 << 11) + +/* RXCHK offset and defines */ +#define SYS_PORT_RXCHK_OFFSET 0x300 + +#define RXCHK_CONTROL 0x00 +#define RXCHK_EN (1 << 0) +#define RXCHK_SKIP_FCS (1 << 1) +#define RXCHK_BAD_CSUM_DIS (1 << 2) +#define RXCHK_BRCM_TAG_EN (1 << 3) +#define RXCHK_BRCM_TAG_MATCH_SHIFT 4 +#define RXCHK_BRCM_TAG_MATCH_MASK 0xff +#define RXCHK_PARSE_TNL (1 << 12) +#define RXCHK_VIOL_EN (1 << 13) +#define RXCHK_VIOL_DIS (1 << 14) +#define RXCHK_INCOM_PKT (1 << 15) +#define RXCHK_V6_DUPEXT_EN (1 << 16) +#define RXCHK_V6_DUPEXT_DIS (1 << 17) +#define RXCHK_ETHERTYPE_DIS (1 << 18) +#define RXCHK_L2_HDR_DIS (1 << 19) +#define RXCHK_L3_HDR_DIS (1 << 20) +#define RXCHK_MAC_RX_ERR_DIS (1 << 21) +#define RXCHK_PARSE_AUTH (1 << 22) + +#define RXCHK_BRCM_TAG0 0x04 +#define RXCHK_BRCM_TAG(i) ((i) * RXCHK_BRCM_TAG0) +#define RXCHK_BRCM_TAG0_MASK 0x24 +#define RXCHK_BRCM_TAG_MASK(i) ((i) * RXCHK_BRCM_TAG0_MASK) +#define RXCHK_BRCM_TAG_MATCH_STATUS 0x44 +#define RXCHK_ETHERTYPE 0x48 +#define RXCHK_BAD_CSUM_CNTR 0x4C +#define RXCHK_OTHER_DISC_CNTR 0x50 + +/* TXCHCK offsets and defines */ +#define SYS_PORT_TXCHK_OFFSET 0x380 +#define TXCHK_PKT_RDY_THRESH 0x00 + +/* Receive buffer offset and defines */ +#define SYS_PORT_RBUF_OFFSET 0x400 + +#define RBUF_CONTROL 0x00 +#define RBUF_RSB_EN (1 << 0) +#define RBUF_4B_ALGN (1 << 1) +#define RBUF_BRCM_TAG_STRIP (1 << 2) +#define RBUF_BAD_PKT_DISC (1 << 3) +#define RBUF_RESUME_THRESH_SHIFT 4 +#define RBUF_RESUME_THRESH_MASK 0xff +#define RBUF_OK_TO_SEND_SHIFT 12 +#define RBUF_OK_TO_SEND_MASK 0xff +#define RBUF_CRC_REPLACE (1 << 20) +#define RBUF_OK_TO_SEND_MODE (1 << 21) +#define RBUF_RSB_SWAP (1 << 22) +#define RBUF_ACPI_EN (1 << 23) + +#define RBUF_PKT_RDY_THRESH 0x04 + +#define RBUF_STATUS 0x08 +#define RBUF_WOL_MODE (1 << 0) +#define RBUF_MPD (1 << 1) +#define RBUF_ACPI (1 << 2) + +#define RBUF_OVFL_DISC_CNTR 0x0c +#define RBUF_ERR_PKT_CNTR 0x10 + +/* Transmit buffer offset and defines */ +#define SYS_PORT_TBUF_OFFSET 0x600 + +#define TBUF_CONTROL 0x00 +#define TBUF_BP_EN (1 << 0) +#define TBUF_MAX_PKT_THRESH_SHIFT 1 +#define TBUF_MAX_PKT_THRESH_MASK 0x1f +#define TBUF_FULL_THRESH_SHIFT 8 +#define TBUF_FULL_THRESH_MASK 0x1f + +/* UniMAC offset and defines */ +#define SYS_PORT_UMAC_OFFSET 0x800 + +#define UMAC_CMD 0x008 +#define CMD_TX_EN (1 << 0) +#define CMD_RX_EN (1 << 1) +#define CMD_SPEED_SHIFT 2 +#define CMD_SPEED_10 0 +#define CMD_SPEED_100 1 +#define CMD_SPEED_1000 2 +#define CMD_SPEED_2500 3 +#define CMD_SPEED_MASK 3 +#define CMD_PROMISC (1 << 4) +#define CMD_PAD_EN (1 << 5) +#define CMD_CRC_FWD (1 << 6) +#define CMD_PAUSE_FWD (1 << 7) +#define CMD_RX_PAUSE_IGNORE (1 << 8) +#define CMD_TX_ADDR_INS (1 << 9) +#define CMD_HD_EN (1 << 10) +#define CMD_SW_RESET (1 << 13) +#define CMD_LCL_LOOP_EN (1 << 15) +#define CMD_AUTO_CONFIG (1 << 22) +#define CMD_CNTL_FRM_EN (1 << 23) +#define CMD_NO_LEN_CHK (1 << 24) +#define CMD_RMT_LOOP_EN (1 << 25) +#define CMD_PRBL_EN (1 << 27) +#define CMD_TX_PAUSE_IGNORE (1 << 28) +#define CMD_TX_RX_EN (1 << 29) +#define CMD_RUNT_FILTER_DIS (1 << 30) + +#define UMAC_MAC0 0x00c +#define UMAC_MAC1 0x010 +#define UMAC_MAX_FRAME_LEN 0x014 + +#define UMAC_TX_FLUSH 0x334 + +#define UMAC_MIB_START 0x400 + +/* There is a 0xC gap between the end of RX and beginning of TX stats and then + * between the end of TX stats and the beginning of the RX RUNT + */ +#define UMAC_MIB_STAT_OFFSET 0xc + +#define UMAC_MIB_CTRL 0x580 +#define MIB_RX_CNT_RST (1 << 0) +#define MIB_RUNT_CNT_RST (1 << 1) +#define MIB_TX_CNT_RST (1 << 2) + +#define UMAC_MPD_CTRL 0x620 +#define MPD_EN (1 << 0) +#define MSEQ_LEN_SHIFT 16 +#define MSEQ_LEN_MASK 0xff +#define PSW_EN (1 << 27) + +#define UMAC_PSW_MS 0x624 +#define UMAC_PSW_LS 0x628 +#define UMAC_MDF_CTRL 0x650 +#define UMAC_MDF_ADDR 0x654 + +/* Receive DMA offset and defines */ +#define SYS_PORT_RDMA_OFFSET 0x2000 + +#define RDMA_CONTROL 0x1000 +#define RDMA_EN (1 << 0) +#define RDMA_RING_CFG (1 << 1) +#define RDMA_DISC_EN (1 << 2) +#define RDMA_BUF_DATA_OFFSET_SHIFT 4 +#define RDMA_BUF_DATA_OFFSET_MASK 0x3ff + +#define RDMA_STATUS 0x1004 +#define RDMA_DISABLED (1 << 0) +#define RDMA_DESC_RAM_INIT_BUSY (1 << 1) +#define RDMA_BP_STATUS (1 << 2) + +#define RDMA_SCB_BURST_SIZE 0x1008 + +#define RDMA_RING_BUF_SIZE 0x100c +#define RDMA_RING_SIZE_SHIFT 16 + +#define RDMA_WRITE_PTR_HI 0x1010 +#define RDMA_WRITE_PTR_LO 0x1014 +#define RDMA_PROD_INDEX 0x1018 +#define RDMA_PROD_INDEX_MASK 0xffff + +#define RDMA_CONS_INDEX 0x101c +#define RDMA_CONS_INDEX_MASK 0xffff + +#define RDMA_START_ADDR_HI 0x1020 +#define RDMA_START_ADDR_LO 0x1024 +#define RDMA_END_ADDR_HI 0x1028 +#define RDMA_END_ADDR_LO 0x102c + +#define RDMA_MBDONE_INTR 0x1030 +#define RDMA_INTR_THRESH_MASK 0xff +#define RDMA_TIMEOUT_SHIFT 16 +#define RDMA_TIMEOUT_MASK 0xffff + +#define RDMA_XON_XOFF_THRESH 0x1034 +#define RDMA_XON_XOFF_THRESH_MASK 0xffff +#define RDMA_XOFF_THRESH_SHIFT 16 + +#define RDMA_READ_PTR_HI 0x1038 +#define RDMA_READ_PTR_LO 0x103c + +#define RDMA_OVERRIDE 0x1040 +#define RDMA_LE_MODE (1 << 0) +#define RDMA_REG_MODE (1 << 1) + +#define RDMA_TEST 0x1044 +#define RDMA_TP_OUT_SEL (1 << 0) +#define RDMA_MEM_SEL (1 << 1) + +#define RDMA_DEBUG 0x1048 + +/* Transmit DMA offset and defines */ +#define TDMA_NUM_RINGS 32 /* rings = queues */ +#define TDMA_PORT_SIZE DESC_SIZE /* two 32-bits words */ + +#define SYS_PORT_TDMA_OFFSET 0x4000 +#define TDMA_WRITE_PORT_OFFSET 0x0000 +#define TDMA_WRITE_PORT_HI(i) (TDMA_WRITE_PORT_OFFSET + \ + (i) * TDMA_PORT_SIZE) +#define TDMA_WRITE_PORT_LO(i) (TDMA_WRITE_PORT_OFFSET + \ + sizeof(u32) + (i) * TDMA_PORT_SIZE) + +#define TDMA_READ_PORT_OFFSET (TDMA_WRITE_PORT_OFFSET + \ + (TDMA_NUM_RINGS * TDMA_PORT_SIZE)) +#define TDMA_READ_PORT_HI(i) (TDMA_READ_PORT_OFFSET + \ + (i) * TDMA_PORT_SIZE) +#define TDMA_READ_PORT_LO(i) (TDMA_READ_PORT_OFFSET + \ + sizeof(u32) + (i) * TDMA_PORT_SIZE) + +#define TDMA_READ_PORT_CMD_OFFSET (TDMA_READ_PORT_OFFSET + \ + (TDMA_NUM_RINGS * TDMA_PORT_SIZE)) +#define TDMA_READ_PORT_CMD(i) (TDMA_READ_PORT_CMD_OFFSET + \ + (i) * sizeof(u32)) + +#define TDMA_DESC_RING_00_BASE (TDMA_READ_PORT_CMD_OFFSET + \ + (TDMA_NUM_RINGS * sizeof(u32))) + +/* Register offsets and defines relatives to a specific ring number */ +#define RING_HEAD_TAIL_PTR 0x00 +#define RING_HEAD_MASK 0x7ff +#define RING_TAIL_SHIFT 11 +#define RING_TAIL_MASK 0x7ff +#define RING_FLUSH (1 << 24) +#define RING_EN (1 << 25) + +#define RING_COUNT 0x04 +#define RING_COUNT_MASK 0x7ff +#define RING_BUFF_DONE_SHIFT 11 +#define RING_BUFF_DONE_MASK 0x7ff + +#define RING_MAX_HYST 0x08 +#define RING_MAX_THRESH_MASK 0x7ff +#define RING_HYST_THRESH_SHIFT 11 +#define RING_HYST_THRESH_MASK 0x7ff + +#define RING_INTR_CONTROL 0x0c +#define RING_INTR_THRESH_MASK 0x7ff +#define RING_EMPTY_INTR_EN (1 << 15) +#define RING_TIMEOUT_SHIFT 16 +#define RING_TIMEOUT_MASK 0xffff + +#define RING_PROD_CONS_INDEX 0x10 +#define RING_PROD_INDEX_MASK 0xffff +#define RING_CONS_INDEX_SHIFT 16 +#define RING_CONS_INDEX_MASK 0xffff + +#define RING_MAPPING 0x14 +#define RING_QID_MASK 0x3 +#define RING_PORT_ID_SHIFT 3 +#define RING_PORT_ID_MASK 0x7 +#define RING_IGNORE_STATUS (1 << 6) +#define RING_FAILOVER_EN (1 << 7) +#define RING_CREDIT_SHIFT 8 +#define RING_CREDIT_MASK 0xffff + +#define RING_PCP_DEI_VID 0x18 +#define RING_VID_MASK 0x7ff +#define RING_DEI (1 << 12) +#define RING_PCP_SHIFT 13 +#define RING_PCP_MASK 0x7 +#define RING_PKT_SIZE_ADJ_SHIFT 16 +#define RING_PKT_SIZE_ADJ_MASK 0xf + +#define TDMA_DESC_RING_SIZE 28 + +/* Defininition for a given TX ring base address */ +#define TDMA_DESC_RING_BASE(i) (TDMA_DESC_RING_00_BASE + \ + ((i) * TDMA_DESC_RING_SIZE)) + +/* Ring indexed register addreses */ +#define TDMA_DESC_RING_HEAD_TAIL_PTR(i) (TDMA_DESC_RING_BASE(i) + \ + RING_HEAD_TAIL_PTR) +#define TDMA_DESC_RING_COUNT(i) (TDMA_DESC_RING_BASE(i) + \ + RING_COUNT) +#define TDMA_DESC_RING_MAX_HYST(i) (TDMA_DESC_RING_BASE(i) + \ + RING_MAX_HYST) +#define TDMA_DESC_RING_INTR_CONTROL(i) (TDMA_DESC_RING_BASE(i) + \ + RING_INTR_CONTROL) +#define TDMA_DESC_RING_PROD_CONS_INDEX(i) \ + (TDMA_DESC_RING_BASE(i) + \ + RING_PROD_CONS_INDEX) +#define TDMA_DESC_RING_MAPPING(i) (TDMA_DESC_RING_BASE(i) + \ + RING_MAPPING) +#define TDMA_DESC_RING_PCP_DEI_VID(i) (TDMA_DESC_RING_BASE(i) + \ + RING_PCP_DEI_VID) + +#define TDMA_CONTROL 0x600 +#define TDMA_EN (1 << 0) +#define TSB_EN (1 << 1) +#define TSB_SWAP (1 << 2) +#define ACB_ALGO (1 << 3) +#define BUF_DATA_OFFSET_SHIFT 4 +#define BUF_DATA_OFFSET_MASK 0x3ff +#define VLAN_EN (1 << 14) +#define SW_BRCM_TAG (1 << 15) +#define WNC_KPT_SIZE_UPDATE (1 << 16) +#define SYNC_PKT_SIZE (1 << 17) +#define ACH_TXDONE_DELAY_SHIFT 18 +#define ACH_TXDONE_DELAY_MASK 0xff + +#define TDMA_STATUS 0x604 +#define TDMA_DISABLED (1 << 0) +#define TDMA_LL_RAM_INIT_BUSY (1 << 1) + +#define TDMA_SCB_BURST_SIZE 0x608 +#define TDMA_OVER_MAX_THRESH_STATUS 0x60c +#define TDMA_OVER_HYST_THRESH_STATUS 0x610 +#define TDMA_TPID 0x614 + +#define TDMA_FREE_LIST_HEAD_TAIL_PTR 0x618 +#define TDMA_FREE_HEAD_MASK 0x7ff +#define TDMA_FREE_TAIL_SHIFT 11 +#define TDMA_FREE_TAIL_MASK 0x7ff + +#define TDMA_FREE_LIST_COUNT 0x61c +#define TDMA_FREE_LIST_COUNT_MASK 0x7ff + +#define TDMA_TIER2_ARB_CTRL 0x620 +#define TDMA_ARB_MODE_RR 0 +#define TDMA_ARB_MODE_WEIGHT_RR 0x1 +#define TDMA_ARB_MODE_STRICT 0x2 +#define TDMA_ARB_MODE_DEFICIT_RR 0x3 +#define TDMA_CREDIT_SHIFT 4 +#define TDMA_CREDIT_MASK 0xffff + +#define TDMA_TIER1_ARB_0_CTRL 0x624 +#define TDMA_ARB_EN (1 << 0) + +#define TDMA_TIER1_ARB_0_QUEUE_EN 0x628 +#define TDMA_TIER1_ARB_1_CTRL 0x62c +#define TDMA_TIER1_ARB_1_QUEUE_EN 0x630 +#define TDMA_TIER1_ARB_2_CTRL 0x634 +#define TDMA_TIER1_ARB_2_QUEUE_EN 0x638 +#define TDMA_TIER1_ARB_3_CTRL 0x63c +#define TDMA_TIER1_ARB_3_QUEUE_EN 0x640 + +#define TDMA_SCB_ENDIAN_OVERRIDE 0x644 +#define TDMA_LE_MODE (1 << 0) +#define TDMA_REG_MODE (1 << 1) + +#define TDMA_TEST 0x648 +#define TDMA_TP_OUT_SEL (1 << 0) +#define TDMA_MEM_TM (1 << 1) + +#define TDMA_DEBUG 0x64c + +/* Transmit/Receive descriptor */ +struct dma_desc { + u32 addr_status_len; + u32 addr_lo; +}; + +/* Number of Receive hardware descriptor words */ +#define NUM_HW_RX_DESC_WORDS 1024 +/* Real number of usable descriptors */ +#define NUM_RX_DESC (NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC) + +/* Internal linked-list RAM has up to 1536 entries */ +#define NUM_TX_DESC 1536 + +#define WORDS_PER_DESC (sizeof(struct dma_desc) / sizeof(u32)) + +/* Rx/Tx common counter group.*/ +struct bcm_sysport_pkt_counters { + u32 cnt_64; /* RO Received/Transmited 64 bytes packet */ + u32 cnt_127; /* RO Rx/Tx 127 bytes packet */ + u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */ + u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */ + u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */ + u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */ + u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */ + u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/ + u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/ + u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/ +}; + +/* RSV, Receive Status Vector */ +struct bcm_sysport_rx_counters { + struct bcm_sysport_pkt_counters pkt_cnt; + u32 pkt; /* RO (0x428) Received pkt count*/ + u32 bytes; /* RO Received byte count */ + u32 mca; /* RO # of Received multicast pkt */ + u32 bca; /* RO # of Receive broadcast pkt */ + u32 fcs; /* RO # of Received FCS error */ + u32 cf; /* RO # of Received control frame pkt*/ + u32 pf; /* RO # of Received pause frame pkt */ + u32 uo; /* RO # of unknown op code pkt */ + u32 aln; /* RO # of alignment error count */ + u32 flr; /* RO # of frame length out of range count */ + u32 cde; /* RO # of code error pkt */ + u32 fcr; /* RO # of carrier sense error pkt */ + u32 ovr; /* RO # of oversize pkt*/ + u32 jbr; /* RO # of jabber count */ + u32 mtue; /* RO # of MTU error pkt*/ + u32 pok; /* RO # of Received good pkt */ + u32 uc; /* RO # of unicast pkt */ + u32 ppp; /* RO # of PPP pkt */ + u32 rcrc; /* RO (0x470),# of CRC match pkt */ +}; + +/* TSV, Transmit Status Vector */ +struct bcm_sysport_tx_counters { + struct bcm_sysport_pkt_counters pkt_cnt; + u32 pkts; /* RO (0x4a8) Transmited pkt */ + u32 mca; /* RO # of xmited multicast pkt */ + u32 bca; /* RO # of xmited broadcast pkt */ + u32 pf; /* RO # of xmited pause frame count */ + u32 cf; /* RO # of xmited control frame count */ + u32 fcs; /* RO # of xmited FCS error count */ + u32 ovr; /* RO # of xmited oversize pkt */ + u32 drf; /* RO # of xmited deferral pkt */ + u32 edf; /* RO # of xmited Excessive deferral pkt*/ + u32 scl; /* RO # of xmited single collision pkt */ + u32 mcl; /* RO # of xmited multiple collision pkt*/ + u32 lcl; /* RO # of xmited late collision pkt */ + u32 ecl; /* RO # of xmited excessive collision pkt*/ + u32 frg; /* RO # of xmited fragments pkt*/ + u32 ncl; /* RO # of xmited total collision count */ + u32 jbr; /* RO # of xmited jabber count*/ + u32 bytes; /* RO # of xmited byte count */ + u32 pok; /* RO # of xmited good pkt */ + u32 uc; /* RO (0x4f0) # of xmited unicast pkt */ +}; + +struct bcm_sysport_mib { + struct bcm_sysport_rx_counters rx; + struct bcm_sysport_tx_counters tx; + u32 rx_runt_cnt; + u32 rx_runt_fcs; + u32 rx_runt_fcs_align; + u32 rx_runt_bytes; + u32 rxchk_bad_csum; + u32 rxchk_other_pkt_disc; + u32 rbuf_ovflow_cnt; + u32 rbuf_err_cnt; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + u32 tx_dma_failed; +}; + +/* HW maintains a large list of counters */ +enum bcm_sysport_stat_type { + BCM_SYSPORT_STAT_NETDEV = -1, + BCM_SYSPORT_STAT_MIB_RX, + BCM_SYSPORT_STAT_MIB_TX, + BCM_SYSPORT_STAT_RUNT, + BCM_SYSPORT_STAT_RXCHK, + BCM_SYSPORT_STAT_RBUF, + BCM_SYSPORT_STAT_SOFT, +}; + +/* Macros to help define ethtool statistics */ +#define STAT_NETDEV(m) { \ + .stat_string = __stringify(m), \ + .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ + .stat_offset = offsetof(struct net_device_stats, m), \ + .type = BCM_SYSPORT_STAT_NETDEV, \ +} + +#define STAT_MIB(str, m, _type) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \ + .stat_offset = offsetof(struct bcm_sysport_priv, m), \ + .type = _type, \ +} + +#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX) +#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX) +#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT) +#define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT) + +#define STAT_RXCHK(str, m, ofs) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \ + .stat_offset = offsetof(struct bcm_sysport_priv, m), \ + .type = BCM_SYSPORT_STAT_RXCHK, \ + .reg_offset = ofs, \ +} + +#define STAT_RBUF(str, m, ofs) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \ + .stat_offset = offsetof(struct bcm_sysport_priv, m), \ + .type = BCM_SYSPORT_STAT_RBUF, \ + .reg_offset = ofs, \ +} + +struct bcm_sysport_stats { + char stat_string[ETH_GSTRING_LEN]; + int stat_sizeof; + int stat_offset; + enum bcm_sysport_stat_type type; + /* reg offset from UMAC base for misc counters */ + u16 reg_offset; +}; + +/* Software house keeping helper structure */ +struct bcm_sysport_cb { + struct sk_buff *skb; /* SKB for RX packets */ + void __iomem *bd_addr; /* Buffer descriptor PHYS addr */ + + DEFINE_DMA_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_LEN(dma_len); +}; + +/* Software view of the TX ring */ +struct bcm_sysport_tx_ring { + spinlock_t lock; /* Ring lock for tx reclaim/xmit */ + struct napi_struct napi; /* NAPI per tx queue */ + dma_addr_t desc_dma; /* DMA cookie */ + unsigned int index; /* Ring index */ + unsigned int size; /* Ring current size */ + unsigned int alloc_size; /* Ring one-time allocated size */ + unsigned int desc_count; /* Number of descriptors */ + unsigned int curr_desc; /* Current descriptor */ + unsigned int c_index; /* Last consumer index */ + unsigned int p_index; /* Current producer index */ + struct bcm_sysport_cb *cbs; /* Transmit control blocks */ + struct dma_desc *desc_cpu; /* CPU view of the descriptor */ + struct bcm_sysport_priv *priv; /* private context backpointer */ +}; + +/* Driver private structure */ +struct bcm_sysport_priv { + void __iomem *base; + u32 irq0_stat; + u32 irq0_mask; + u32 irq1_stat; + u32 irq1_mask; + struct napi_struct napi ____cacheline_aligned; + struct net_device *netdev; + struct platform_device *pdev; + int irq0; + int irq1; + int wol_irq; + + /* Transmit rings */ + struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS]; + + /* Receive queue */ + void __iomem *rx_bds; + void __iomem *rx_bd_assign_ptr; + unsigned int rx_bd_assign_index; + struct bcm_sysport_cb *rx_cbs; + unsigned int num_rx_bds; + unsigned int rx_read_ptr; + unsigned int rx_c_index; + + /* PHY device */ + struct device_node *phy_dn; + struct phy_device *phydev; + phy_interface_t phy_interface; + int old_pause; + int old_link; + int old_duplex; + + /* Misc fields */ + unsigned int rx_chk_en:1; + unsigned int tsb_en:1; + unsigned int crc_fwd:1; + u16 rev; + u32 wolopts; + unsigned int wol_irq_disabled:1; + + /* MIB related fields */ + struct bcm_sysport_mib mib; + + /* Ethtool */ + u32 msg_enable; +}; +#endif /* __BCM_SYSPORT_H */ diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c new file mode 100644 index 000000000..21e3c38c7 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -0,0 +1,1726 @@ +/* + * Driver for (BCM4706)? GBit MAC core on BCMA bus. + * + * Copyright (C) 2012 RafaÅ‚ MiÅ‚ecki + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include "bgmac.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const struct bcma_device_id bgmac_bcma_tbl[] = { + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), + {}, +}; +MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl); + +static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask, + u32 value, int timeout) +{ + u32 val; + int i; + + for (i = 0; i < timeout / 10; i++) { + val = bcma_read32(core, reg); + if ((val & mask) == value) + return true; + udelay(10); + } + pr_err("Timeout waiting for reg 0x%X\n", reg); + return false; +} + +/************************************************** + * DMA + **************************************************/ + +static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) +{ + u32 val; + int i; + + if (!ring->mmio_base) + return; + + /* Suspend DMA TX ring first. + * bgmac_wait_value doesn't support waiting for any of few values, so + * implement whole loop here. + */ + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, + BGMAC_DMA_TX_SUSPEND); + for (i = 0; i < 10000 / 10; i++) { + val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); + val &= BGMAC_DMA_TX_STAT; + if (val == BGMAC_DMA_TX_STAT_DISABLED || + val == BGMAC_DMA_TX_STAT_IDLEWAIT || + val == BGMAC_DMA_TX_STAT_STOPPED) { + i = 0; + break; + } + udelay(10); + } + if (i) + bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n", + ring->mmio_base, val); + + /* Remove SUSPEND bit */ + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0); + if (!bgmac_wait_value(bgmac->core, + ring->mmio_base + BGMAC_DMA_TX_STATUS, + BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED, + 10000)) { + bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n", + ring->mmio_base); + udelay(300); + val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); + if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED) + bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n", + ring->mmio_base); + } +} + +static void bgmac_dma_tx_enable(struct bgmac *bgmac, + struct bgmac_dma_ring *ring) +{ + u32 ctl; + + ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL); + if (bgmac->core->id.rev >= 4) { + ctl &= ~BGMAC_DMA_TX_BL_MASK; + ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT; + + ctl &= ~BGMAC_DMA_TX_MR_MASK; + ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT; + + ctl &= ~BGMAC_DMA_TX_PC_MASK; + ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT; + + ctl &= ~BGMAC_DMA_TX_PT_MASK; + ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT; + } + ctl |= BGMAC_DMA_TX_ENABLE; + ctl |= BGMAC_DMA_TX_PARITY_DISABLE; + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl); +} + +static void +bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring, + int i, int len, u32 ctl0) +{ + struct bgmac_slot_info *slot; + struct bgmac_dma_desc *dma_desc; + u32 ctl1; + + if (i == BGMAC_TX_RING_SLOTS - 1) + ctl0 |= BGMAC_DESC_CTL0_EOT; + + ctl1 = len & BGMAC_DESC_CTL1_LEN; + + slot = &ring->slots[i]; + dma_desc = &ring->cpu_base[i]; + dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr)); + dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr)); + dma_desc->ctl0 = cpu_to_le32(ctl0); + dma_desc->ctl1 = cpu_to_le32(ctl1); +} + +static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, + struct bgmac_dma_ring *ring, + struct sk_buff *skb) +{ + struct device *dma_dev = bgmac->core->dma_dev; + struct net_device *net_dev = bgmac->net_dev; + int index = ring->end % BGMAC_TX_RING_SLOTS; + struct bgmac_slot_info *slot = &ring->slots[index]; + int nr_frags; + u32 flags; + int i; + + if (skb->len > BGMAC_DESC_CTL1_LEN) { + bgmac_err(bgmac, "Too long skb (%d)\n", skb->len); + goto err_drop; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) + skb_checksum_help(skb); + + nr_frags = skb_shinfo(skb)->nr_frags; + + /* ring->end - ring->start will return the number of valid slots, + * even when ring->end overflows + */ + if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) { + bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n"); + netif_stop_queue(net_dev); + return NETDEV_TX_BUSY; + } + + slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb), + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr))) + goto err_dma_head; + + flags = BGMAC_DESC_CTL0_SOF; + if (!nr_frags) + flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC; + + bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags); + flags = 0; + + for (i = 0; i < nr_frags; i++) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + int len = skb_frag_size(frag); + + index = (index + 1) % BGMAC_TX_RING_SLOTS; + slot = &ring->slots[index]; + slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0, + len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr))) + goto err_dma; + + if (i == nr_frags - 1) + flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC; + + bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags); + } + + slot->skb = skb; + ring->end += nr_frags + 1; + netdev_sent_queue(net_dev, skb->len); + + wmb(); + + /* Increase ring->end to point empty slot. We tell hardware the first + * slot it should *not* read. + */ + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX, + ring->index_base + + (ring->end % BGMAC_TX_RING_SLOTS) * + sizeof(struct bgmac_dma_desc)); + + if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8) + netif_stop_queue(net_dev); + + return NETDEV_TX_OK; + +err_dma: + dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb), + DMA_TO_DEVICE); + + while (i > 0) { + int index = (ring->end + i) % BGMAC_TX_RING_SLOTS; + struct bgmac_slot_info *slot = &ring->slots[index]; + u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1); + int len = ctl1 & BGMAC_DESC_CTL1_LEN; + + dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE); + } + +err_dma_head: + bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n", + ring->mmio_base); + +err_drop: + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +/* Free transmitted packets */ +static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) +{ + struct device *dma_dev = bgmac->core->dma_dev; + int empty_slot; + bool freed = false; + unsigned bytes_compl = 0, pkts_compl = 0; + + /* The last slot that hardware didn't consume yet */ + empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); + empty_slot &= BGMAC_DMA_TX_STATDPTR; + empty_slot -= ring->index_base; + empty_slot &= BGMAC_DMA_TX_STATDPTR; + empty_slot /= sizeof(struct bgmac_dma_desc); + + while (ring->start != ring->end) { + int slot_idx = ring->start % BGMAC_TX_RING_SLOTS; + struct bgmac_slot_info *slot = &ring->slots[slot_idx]; + u32 ctl1; + int len; + + if (slot_idx == empty_slot) + break; + + ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1); + len = ctl1 & BGMAC_DESC_CTL1_LEN; + if (ctl1 & BGMAC_DESC_CTL0_SOF) + /* Unmap no longer used buffer */ + dma_unmap_single(dma_dev, slot->dma_addr, len, + DMA_TO_DEVICE); + else + dma_unmap_page(dma_dev, slot->dma_addr, len, + DMA_TO_DEVICE); + + if (slot->skb) { + bytes_compl += slot->skb->len; + pkts_compl++; + + /* Free memory! :) */ + dev_kfree_skb(slot->skb); + slot->skb = NULL; + } + + slot->dma_addr = 0; + ring->start++; + freed = true; + } + + if (!pkts_compl) + return; + + netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl); + + if (netif_queue_stopped(bgmac->net_dev)) + netif_wake_queue(bgmac->net_dev); +} + +static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) +{ + if (!ring->mmio_base) + return; + + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0); + if (!bgmac_wait_value(bgmac->core, + ring->mmio_base + BGMAC_DMA_RX_STATUS, + BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED, + 10000)) + bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n", + ring->mmio_base); +} + +static void bgmac_dma_rx_enable(struct bgmac *bgmac, + struct bgmac_dma_ring *ring) +{ + u32 ctl; + + ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); + if (bgmac->core->id.rev >= 4) { + ctl &= ~BGMAC_DMA_RX_BL_MASK; + ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT; + + ctl &= ~BGMAC_DMA_RX_PC_MASK; + ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT; + + ctl &= ~BGMAC_DMA_RX_PT_MASK; + ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT; + } + ctl &= BGMAC_DMA_RX_ADDREXT_MASK; + ctl |= BGMAC_DMA_RX_ENABLE; + ctl |= BGMAC_DMA_RX_PARITY_DISABLE; + ctl |= BGMAC_DMA_RX_OVERFLOW_CONT; + ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT; + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl); +} + +static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, + struct bgmac_slot_info *slot) +{ + struct device *dma_dev = bgmac->core->dma_dev; + dma_addr_t dma_addr; + struct bgmac_rx_header *rx; + void *buf; + + /* Alloc skb */ + buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE); + if (!buf) + return -ENOMEM; + + /* Poison - if everything goes fine, hardware will overwrite it */ + rx = buf + BGMAC_RX_BUF_OFFSET; + rx->len = cpu_to_le16(0xdead); + rx->flags = cpu_to_le16(0xbeef); + + /* Map skb for the DMA */ + dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET, + BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(dma_dev, dma_addr)) { + bgmac_err(bgmac, "DMA mapping error\n"); + put_page(virt_to_head_page(buf)); + return -ENOMEM; + } + + /* Update the slot */ + slot->buf = buf; + slot->dma_addr = dma_addr; + + return 0; +} + +static void bgmac_dma_rx_update_index(struct bgmac *bgmac, + struct bgmac_dma_ring *ring) +{ + dma_wmb(); + + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX, + ring->index_base + + ring->end * sizeof(struct bgmac_dma_desc)); +} + +static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac, + struct bgmac_dma_ring *ring, int desc_idx) +{ + struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx; + u32 ctl0 = 0, ctl1 = 0; + + if (desc_idx == BGMAC_RX_RING_SLOTS - 1) + ctl0 |= BGMAC_DESC_CTL0_EOT; + ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN; + /* Is there any BGMAC device that requires extension? */ + /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) & + * B43_DMA64_DCTL1_ADDREXT_MASK; + */ + + dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr)); + dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr)); + dma_desc->ctl0 = cpu_to_le32(ctl0); + dma_desc->ctl1 = cpu_to_le32(ctl1); + + ring->end = desc_idx; +} + +static void bgmac_dma_rx_poison_buf(struct device *dma_dev, + struct bgmac_slot_info *slot) +{ + struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET; + + dma_sync_single_for_cpu(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE, + DMA_FROM_DEVICE); + rx->len = cpu_to_le16(0xdead); + rx->flags = cpu_to_le16(0xbeef); + dma_sync_single_for_device(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE, + DMA_FROM_DEVICE); +} + +static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, + int weight) +{ + u32 end_slot; + int handled = 0; + + end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS); + end_slot &= BGMAC_DMA_RX_STATDPTR; + end_slot -= ring->index_base; + end_slot &= BGMAC_DMA_RX_STATDPTR; + end_slot /= sizeof(struct bgmac_dma_desc); + + while (ring->start != end_slot) { + struct device *dma_dev = bgmac->core->dma_dev; + struct bgmac_slot_info *slot = &ring->slots[ring->start]; + struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET; + struct sk_buff *skb; + void *buf = slot->buf; + dma_addr_t dma_addr = slot->dma_addr; + u16 len, flags; + + do { + /* Prepare new skb as replacement */ + if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) { + bgmac_dma_rx_poison_buf(dma_dev, slot); + break; + } + + /* Unmap buffer to make it accessible to the CPU */ + dma_unmap_single(dma_dev, dma_addr, + BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); + + /* Get info from the header */ + len = le16_to_cpu(rx->len); + flags = le16_to_cpu(rx->flags); + + /* Check for poison and drop or pass the packet */ + if (len == 0xdead && flags == 0xbeef) { + bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", + ring->start); + put_page(virt_to_head_page(buf)); + break; + } + + if (len > BGMAC_RX_ALLOC_SIZE) { + bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n", + ring->start); + put_page(virt_to_head_page(buf)); + break; + } + + /* Omit CRC. */ + len -= ETH_FCS_LEN; + + skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE); + skb_put(skb, BGMAC_RX_FRAME_OFFSET + + BGMAC_RX_BUF_OFFSET + len); + skb_pull(skb, BGMAC_RX_FRAME_OFFSET + + BGMAC_RX_BUF_OFFSET); + + skb_checksum_none_assert(skb); + skb->protocol = eth_type_trans(skb, bgmac->net_dev); + napi_gro_receive(&bgmac->napi, skb); + handled++; + } while (0); + + bgmac_dma_rx_setup_desc(bgmac, ring, ring->start); + + if (++ring->start >= BGMAC_RX_RING_SLOTS) + ring->start = 0; + + if (handled >= weight) /* Should never be greater */ + break; + } + + bgmac_dma_rx_update_index(bgmac, ring); + + return handled; +} + +/* Does ring support unaligned addressing? */ +static bool bgmac_dma_unaligned(struct bgmac *bgmac, + struct bgmac_dma_ring *ring, + enum bgmac_dma_ring_type ring_type) +{ + switch (ring_type) { + case BGMAC_DMA_RING_TX: + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO, + 0xff0); + if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO)) + return true; + break; + case BGMAC_DMA_RING_RX: + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO, + 0xff0); + if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO)) + return true; + break; + } + return false; +} + +static void bgmac_dma_tx_ring_free(struct bgmac *bgmac, + struct bgmac_dma_ring *ring) +{ + struct device *dma_dev = bgmac->core->dma_dev; + struct bgmac_dma_desc *dma_desc = ring->cpu_base; + struct bgmac_slot_info *slot; + int i; + + for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) { + int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN; + + slot = &ring->slots[i]; + dev_kfree_skb(slot->skb); + + if (!slot->dma_addr) + continue; + + if (slot->skb) + dma_unmap_single(dma_dev, slot->dma_addr, + len, DMA_TO_DEVICE); + else + dma_unmap_page(dma_dev, slot->dma_addr, + len, DMA_TO_DEVICE); + } +} + +static void bgmac_dma_rx_ring_free(struct bgmac *bgmac, + struct bgmac_dma_ring *ring) +{ + struct device *dma_dev = bgmac->core->dma_dev; + struct bgmac_slot_info *slot; + int i; + + for (i = 0; i < BGMAC_RX_RING_SLOTS; i++) { + slot = &ring->slots[i]; + if (!slot->dma_addr) + continue; + + dma_unmap_single(dma_dev, slot->dma_addr, + BGMAC_RX_BUF_SIZE, + DMA_FROM_DEVICE); + put_page(virt_to_head_page(slot->buf)); + slot->dma_addr = 0; + } +} + +static void bgmac_dma_ring_desc_free(struct bgmac *bgmac, + struct bgmac_dma_ring *ring, + int num_slots) +{ + struct device *dma_dev = bgmac->core->dma_dev; + int size; + + if (!ring->cpu_base) + return; + + /* Free ring of descriptors */ + size = num_slots * sizeof(struct bgmac_dma_desc); + dma_free_coherent(dma_dev, size, ring->cpu_base, + ring->dma_base); +} + +static void bgmac_dma_cleanup(struct bgmac *bgmac) +{ + int i; + + for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) + bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]); + + for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) + bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]); +} + +static void bgmac_dma_free(struct bgmac *bgmac) +{ + int i; + + for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) + bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i], + BGMAC_TX_RING_SLOTS); + + for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) + bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i], + BGMAC_RX_RING_SLOTS); +} + +static int bgmac_dma_alloc(struct bgmac *bgmac) +{ + struct device *dma_dev = bgmac->core->dma_dev; + struct bgmac_dma_ring *ring; + static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1, + BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, }; + int size; /* ring size: different for Tx and Rx */ + int err; + int i; + + BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base)); + BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base)); + + if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) { + bgmac_err(bgmac, "Core does not report 64-bit DMA\n"); + return -ENOTSUPP; + } + + for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { + ring = &bgmac->tx_ring[i]; + ring->mmio_base = ring_base[i]; + + /* Alloc ring of descriptors */ + size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc); + ring->cpu_base = dma_zalloc_coherent(dma_dev, size, + &ring->dma_base, + GFP_KERNEL); + if (!ring->cpu_base) { + bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n", + ring->mmio_base); + goto err_dma_free; + } + + ring->unaligned = bgmac_dma_unaligned(bgmac, ring, + BGMAC_DMA_RING_TX); + if (ring->unaligned) + ring->index_base = lower_32_bits(ring->dma_base); + else + ring->index_base = 0; + + /* No need to alloc TX slots yet */ + } + + for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { + ring = &bgmac->rx_ring[i]; + ring->mmio_base = ring_base[i]; + + /* Alloc ring of descriptors */ + size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc); + ring->cpu_base = dma_zalloc_coherent(dma_dev, size, + &ring->dma_base, + GFP_KERNEL); + if (!ring->cpu_base) { + bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n", + ring->mmio_base); + err = -ENOMEM; + goto err_dma_free; + } + + ring->unaligned = bgmac_dma_unaligned(bgmac, ring, + BGMAC_DMA_RING_RX); + if (ring->unaligned) + ring->index_base = lower_32_bits(ring->dma_base); + else + ring->index_base = 0; + } + + return 0; + +err_dma_free: + bgmac_dma_free(bgmac); + return -ENOMEM; +} + +static int bgmac_dma_init(struct bgmac *bgmac) +{ + struct bgmac_dma_ring *ring; + int i, err; + + for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { + ring = &bgmac->tx_ring[i]; + + if (!ring->unaligned) + bgmac_dma_tx_enable(bgmac, ring); + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO, + lower_32_bits(ring->dma_base)); + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI, + upper_32_bits(ring->dma_base)); + if (ring->unaligned) + bgmac_dma_tx_enable(bgmac, ring); + + ring->start = 0; + ring->end = 0; /* Points the slot that should *not* be read */ + } + + for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { + int j; + + ring = &bgmac->rx_ring[i]; + + if (!ring->unaligned) + bgmac_dma_rx_enable(bgmac, ring); + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO, + lower_32_bits(ring->dma_base)); + bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI, + upper_32_bits(ring->dma_base)); + if (ring->unaligned) + bgmac_dma_rx_enable(bgmac, ring); + + ring->start = 0; + ring->end = 0; + for (j = 0; j < BGMAC_RX_RING_SLOTS; j++) { + err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]); + if (err) + goto error; + + bgmac_dma_rx_setup_desc(bgmac, ring, j); + } + + bgmac_dma_rx_update_index(bgmac, ring); + } + + return 0; + +error: + bgmac_dma_cleanup(bgmac); + return err; +} + +/************************************************** + * PHY ops + **************************************************/ + +static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg) +{ + struct bcma_device *core; + u16 phy_access_addr; + u16 phy_ctl_addr; + u32 tmp; + + BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK); + BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK); + BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT); + BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK); + BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT); + BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE); + BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START); + BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK); + BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK); + BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT); + BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE); + + if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) { + core = bgmac->core->bus->drv_gmac_cmn.core; + phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; + phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; + } else { + core = bgmac->core; + phy_access_addr = BGMAC_PHY_ACCESS; + phy_ctl_addr = BGMAC_PHY_CNTL; + } + + tmp = bcma_read32(core, phy_ctl_addr); + tmp &= ~BGMAC_PC_EPA_MASK; + tmp |= phyaddr; + bcma_write32(core, phy_ctl_addr, tmp); + + tmp = BGMAC_PA_START; + tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT; + tmp |= reg << BGMAC_PA_REG_SHIFT; + bcma_write32(core, phy_access_addr, tmp); + + if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) { + bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n", + phyaddr, reg); + return 0xffff; + } + + return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK; +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */ +static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value) +{ + struct bcma_device *core; + u16 phy_access_addr; + u16 phy_ctl_addr; + u32 tmp; + + if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) { + core = bgmac->core->bus->drv_gmac_cmn.core; + phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; + phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; + } else { + core = bgmac->core; + phy_access_addr = BGMAC_PHY_ACCESS; + phy_ctl_addr = BGMAC_PHY_CNTL; + } + + tmp = bcma_read32(core, phy_ctl_addr); + tmp &= ~BGMAC_PC_EPA_MASK; + tmp |= phyaddr; + bcma_write32(core, phy_ctl_addr, tmp); + + bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO); + if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO) + bgmac_warn(bgmac, "Error setting MDIO int\n"); + + tmp = BGMAC_PA_START; + tmp |= BGMAC_PA_WRITE; + tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT; + tmp |= reg << BGMAC_PA_REG_SHIFT; + tmp |= value; + bcma_write32(core, phy_access_addr, tmp); + + if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) { + bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n", + phyaddr, reg); + return -ETIMEDOUT; + } + + return 0; +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */ +static void bgmac_phy_init(struct bgmac *bgmac) +{ + struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; + struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc; + u8 i; + + if (ci->id == BCMA_CHIP_ID_BCM5356) { + for (i = 0; i < 5; i++) { + bgmac_phy_write(bgmac, i, 0x1f, 0x008b); + bgmac_phy_write(bgmac, i, 0x15, 0x0100); + bgmac_phy_write(bgmac, i, 0x1f, 0x000f); + bgmac_phy_write(bgmac, i, 0x12, 0x2aaa); + bgmac_phy_write(bgmac, i, 0x1f, 0x000b); + } + } + if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) || + (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) || + (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) { + bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0); + bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0); + for (i = 0; i < 5; i++) { + bgmac_phy_write(bgmac, i, 0x1f, 0x000f); + bgmac_phy_write(bgmac, i, 0x16, 0x5284); + bgmac_phy_write(bgmac, i, 0x1f, 0x000b); + bgmac_phy_write(bgmac, i, 0x17, 0x0010); + bgmac_phy_write(bgmac, i, 0x1f, 0x000f); + bgmac_phy_write(bgmac, i, 0x16, 0x5296); + bgmac_phy_write(bgmac, i, 0x17, 0x1073); + bgmac_phy_write(bgmac, i, 0x17, 0x9073); + bgmac_phy_write(bgmac, i, 0x16, 0x52b6); + bgmac_phy_write(bgmac, i, 0x17, 0x9273); + bgmac_phy_write(bgmac, i, 0x1f, 0x000b); + } + } +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */ +static void bgmac_phy_reset(struct bgmac *bgmac) +{ + if (bgmac->phyaddr == BGMAC_PHY_NOREGS) + return; + + bgmac_phy_write(bgmac, bgmac->phyaddr, MII_BMCR, BMCR_RESET); + udelay(100); + if (bgmac_phy_read(bgmac, bgmac->phyaddr, MII_BMCR) & BMCR_RESET) + bgmac_err(bgmac, "PHY reset failed\n"); + bgmac_phy_init(bgmac); +} + +/************************************************** + * Chip ops + **************************************************/ + +/* TODO: can we just drop @force? Can we don't reset MAC at all if there is + * nothing to change? Try if after stabilizng driver. + */ +static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set, + bool force) +{ + u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG); + u32 new_val = (cmdcfg & mask) | set; + + bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR(bgmac->core->id.rev)); + udelay(2); + + if (new_val != cmdcfg || force) + bgmac_write(bgmac, BGMAC_CMDCFG, new_val); + + bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR(bgmac->core->id.rev)); + udelay(2); +} + +static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr) +{ + u32 tmp; + + tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]; + bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp); + tmp = (addr[4] << 8) | addr[5]; + bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp); +} + +static void bgmac_set_rx_mode(struct net_device *net_dev) +{ + struct bgmac *bgmac = netdev_priv(net_dev); + + if (net_dev->flags & IFF_PROMISC) + bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true); + else + bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true); +} + +#if 0 /* We don't use that regs yet */ +static void bgmac_chip_stats_update(struct bgmac *bgmac) +{ + int i; + + if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) { + for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++) + bgmac->mib_tx_regs[i] = + bgmac_read(bgmac, + BGMAC_TX_GOOD_OCTETS + (i * 4)); + for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++) + bgmac->mib_rx_regs[i] = + bgmac_read(bgmac, + BGMAC_RX_GOOD_OCTETS + (i * 4)); + } + + /* TODO: what else? how to handle BCM4706? Specs are needed */ +} +#endif + +static void bgmac_clear_mib(struct bgmac *bgmac) +{ + int i; + + if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) + return; + + bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR); + for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++) + bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4)); + for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++) + bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4)); +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */ +static void bgmac_mac_speed(struct bgmac *bgmac) +{ + u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD); + u32 set = 0; + + switch (bgmac->mac_speed) { + case SPEED_10: + set |= BGMAC_CMDCFG_ES_10; + break; + case SPEED_100: + set |= BGMAC_CMDCFG_ES_100; + break; + case SPEED_1000: + set |= BGMAC_CMDCFG_ES_1000; + break; + case SPEED_2500: + set |= BGMAC_CMDCFG_ES_2500; + break; + default: + bgmac_err(bgmac, "Unsupported speed: %d\n", bgmac->mac_speed); + } + + if (bgmac->mac_duplex == DUPLEX_HALF) + set |= BGMAC_CMDCFG_HD; + + bgmac_cmdcfg_maskset(bgmac, mask, set, true); +} + +static void bgmac_miiconfig(struct bgmac *bgmac) +{ + struct bcma_device *core = bgmac->core; + struct bcma_chipinfo *ci = &core->bus->chipinfo; + u8 imode; + + if (ci->id == BCMA_CHIP_ID_BCM4707 || + ci->id == BCMA_CHIP_ID_BCM53018) { + bcma_awrite32(core, BCMA_IOCTL, + bcma_aread32(core, BCMA_IOCTL) | 0x40 | + BGMAC_BCMA_IOCTL_SW_CLKEN); + bgmac->mac_speed = SPEED_2500; + bgmac->mac_duplex = DUPLEX_FULL; + bgmac_mac_speed(bgmac); + } else { + imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & + BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT; + if (imode == 0 || imode == 1) { + bgmac->mac_speed = SPEED_100; + bgmac->mac_duplex = DUPLEX_FULL; + bgmac_mac_speed(bgmac); + } + } +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */ +static void bgmac_chip_reset(struct bgmac *bgmac) +{ + struct bcma_device *core = bgmac->core; + struct bcma_bus *bus = core->bus; + struct bcma_chipinfo *ci = &bus->chipinfo; + u32 flags; + u32 iost; + int i; + + if (bcma_core_is_enabled(core)) { + if (!bgmac->stats_grabbed) { + /* bgmac_chip_stats_update(bgmac); */ + bgmac->stats_grabbed = true; + } + + for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) + bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]); + + bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false); + udelay(1); + + for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) + bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]); + + /* TODO: Clear software multicast filter list */ + } + + iost = bcma_aread32(core, BCMA_IOST); + if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) || + (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) || + (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) + iost &= ~BGMAC_BCMA_IOST_ATTACHED; + + /* 3GMAC: for BCM4707, only do core reset at bgmac_probe() */ + if (ci->id != BCMA_CHIP_ID_BCM4707) { + flags = 0; + if (iost & BGMAC_BCMA_IOST_ATTACHED) { + flags = BGMAC_BCMA_IOCTL_SW_CLKEN; + if (!bgmac->has_robosw) + flags |= BGMAC_BCMA_IOCTL_SW_RESET; + } + bcma_core_enable(core, flags); + } + + /* Request Misc PLL for corerev > 2 */ + if (core->id.rev > 2 && + ci->id != BCMA_CHIP_ID_BCM4707 && + ci->id != BCMA_CHIP_ID_BCM53018) { + bgmac_set(bgmac, BCMA_CLKCTLST, + BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ); + bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, + BGMAC_BCMA_CLKCTLST_MISC_PLL_ST, + BGMAC_BCMA_CLKCTLST_MISC_PLL_ST, + 1000); + } + + if (ci->id == BCMA_CHIP_ID_BCM5357 || + ci->id == BCMA_CHIP_ID_BCM4749 || + ci->id == BCMA_CHIP_ID_BCM53572) { + struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc; + u8 et_swtype = 0; + u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY | + BGMAC_CHIPCTL_1_IF_TYPE_MII; + char buf[4]; + + if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) { + if (kstrtou8(buf, 0, &et_swtype)) + bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n", + buf); + et_swtype &= 0x0f; + et_swtype <<= 4; + sw_type = et_swtype; + } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) { + sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII; + } else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) || + (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) || + (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) { + sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII | + BGMAC_CHIPCTL_1_SW_TYPE_RGMII; + } + bcma_chipco_chipctl_maskset(cc, 1, + ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK | + BGMAC_CHIPCTL_1_SW_TYPE_MASK), + sw_type); + } + + if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw) + bcma_awrite32(core, BCMA_IOCTL, + bcma_aread32(core, BCMA_IOCTL) & + ~BGMAC_BCMA_IOCTL_SW_RESET); + + /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset + * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine + * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to + * be keps until taking MAC out of the reset. + */ + bgmac_cmdcfg_maskset(bgmac, + ~(BGMAC_CMDCFG_TE | + BGMAC_CMDCFG_RE | + BGMAC_CMDCFG_RPI | + BGMAC_CMDCFG_TAI | + BGMAC_CMDCFG_HD | + BGMAC_CMDCFG_ML | + BGMAC_CMDCFG_CFE | + BGMAC_CMDCFG_RL | + BGMAC_CMDCFG_RED | + BGMAC_CMDCFG_PE | + BGMAC_CMDCFG_TPI | + BGMAC_CMDCFG_PAD_EN | + BGMAC_CMDCFG_PF), + BGMAC_CMDCFG_PROM | + BGMAC_CMDCFG_NLC | + BGMAC_CMDCFG_CFE | + BGMAC_CMDCFG_SR(core->id.rev), + false); + bgmac->mac_speed = SPEED_UNKNOWN; + bgmac->mac_duplex = DUPLEX_UNKNOWN; + + bgmac_clear_mib(bgmac); + if (core->id.id == BCMA_CORE_4706_MAC_GBIT) + bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0, + BCMA_GMAC_CMN_PC_MTE); + else + bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE); + bgmac_miiconfig(bgmac); + bgmac_phy_init(bgmac); + + netdev_reset_queue(bgmac->net_dev); +} + +static void bgmac_chip_intrs_on(struct bgmac *bgmac) +{ + bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask); +} + +static void bgmac_chip_intrs_off(struct bgmac *bgmac) +{ + bgmac_write(bgmac, BGMAC_INT_MASK, 0); + bgmac_read(bgmac, BGMAC_INT_MASK); +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */ +static void bgmac_enable(struct bgmac *bgmac) +{ + struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; + u32 cmdcfg; + u32 mode; + u32 rxq_ctl; + u32 fl_ctl; + u16 bp_clk; + u8 mdp; + + cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG); + bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE), + BGMAC_CMDCFG_SR(bgmac->core->id.rev), true); + udelay(2); + cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE; + bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg); + + mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> + BGMAC_DS_MM_SHIFT; + if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0) + bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); + if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2) + bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0, + BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); + + switch (ci->id) { + case BCMA_CHIP_ID_BCM5357: + case BCMA_CHIP_ID_BCM4749: + case BCMA_CHIP_ID_BCM53572: + case BCMA_CHIP_ID_BCM4716: + case BCMA_CHIP_ID_BCM47162: + fl_ctl = 0x03cb04cb; + if (ci->id == BCMA_CHIP_ID_BCM5357 || + ci->id == BCMA_CHIP_ID_BCM4749 || + ci->id == BCMA_CHIP_ID_BCM53572) + fl_ctl = 0x2300e1; + bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl); + bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff); + break; + } + + if (ci->id != BCMA_CHIP_ID_BCM4707 && + ci->id != BCMA_CHIP_ID_BCM53018) { + rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL); + rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK; + bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / + 1000000; + mdp = (bp_clk * 128 / 1000) - 3; + rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT); + bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl); + } +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */ +static void bgmac_chip_init(struct bgmac *bgmac) +{ + /* 1 interrupt per received frame */ + bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT); + + /* Enable 802.3x tx flow control (honor received PAUSE frames) */ + bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true); + + bgmac_set_rx_mode(bgmac->net_dev); + + bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr); + + if (bgmac->loopback) + bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false); + else + bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false); + + bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN); + + bgmac_chip_intrs_on(bgmac); + + bgmac_enable(bgmac); +} + +static irqreturn_t bgmac_interrupt(int irq, void *dev_id) +{ + struct bgmac *bgmac = netdev_priv(dev_id); + + u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS); + int_status &= bgmac->int_mask; + + if (!int_status) + return IRQ_NONE; + + int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX); + if (int_status) + bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", int_status); + + /* Disable new interrupts until handling existing ones */ + bgmac_chip_intrs_off(bgmac); + + napi_schedule(&bgmac->napi); + + return IRQ_HANDLED; +} + +static int bgmac_poll(struct napi_struct *napi, int weight) +{ + struct bgmac *bgmac = container_of(napi, struct bgmac, napi); + int handled = 0; + + /* Ack */ + bgmac_write(bgmac, BGMAC_INT_STATUS, ~0); + + bgmac_dma_tx_free(bgmac, &bgmac->tx_ring[0]); + handled += bgmac_dma_rx_read(bgmac, &bgmac->rx_ring[0], weight); + + /* Poll again if more events arrived in the meantime */ + if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX)) + return weight; + + if (handled < weight) { + napi_complete(napi); + bgmac_chip_intrs_on(bgmac); + } + + return handled; +} + +/************************************************** + * net_device_ops + **************************************************/ + +static int bgmac_open(struct net_device *net_dev) +{ + struct bgmac *bgmac = netdev_priv(net_dev); + int err = 0; + + bgmac_chip_reset(bgmac); + + err = bgmac_dma_init(bgmac); + if (err) + return err; + + /* Specs say about reclaiming rings here, but we do that in DMA init */ + bgmac_chip_init(bgmac); + + err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED, + KBUILD_MODNAME, net_dev); + if (err < 0) { + bgmac_err(bgmac, "IRQ request error: %d!\n", err); + bgmac_dma_cleanup(bgmac); + return err; + } + napi_enable(&bgmac->napi); + + phy_start(bgmac->phy_dev); + + netif_carrier_on(net_dev); + return 0; +} + +static int bgmac_stop(struct net_device *net_dev) +{ + struct bgmac *bgmac = netdev_priv(net_dev); + + netif_carrier_off(net_dev); + + phy_stop(bgmac->phy_dev); + + napi_disable(&bgmac->napi); + bgmac_chip_intrs_off(bgmac); + free_irq(bgmac->core->irq, net_dev); + + bgmac_chip_reset(bgmac); + bgmac_dma_cleanup(bgmac); + + return 0; +} + +static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb, + struct net_device *net_dev) +{ + struct bgmac *bgmac = netdev_priv(net_dev); + struct bgmac_dma_ring *ring; + + /* No QOS support yet */ + ring = &bgmac->tx_ring[0]; + return bgmac_dma_tx_add(bgmac, ring, skb); +} + +static int bgmac_set_mac_address(struct net_device *net_dev, void *addr) +{ + struct bgmac *bgmac = netdev_priv(net_dev); + int ret; + + ret = eth_prepare_mac_addr_change(net_dev, addr); + if (ret < 0) + return ret; + bgmac_write_mac_address(bgmac, (u8 *)addr); + eth_commit_mac_addr_change(net_dev, addr); + return 0; +} + +static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) +{ + struct bgmac *bgmac = netdev_priv(net_dev); + + if (!netif_running(net_dev)) + return -EINVAL; + + return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd); +} + +static const struct net_device_ops bgmac_netdev_ops = { + .ndo_open = bgmac_open, + .ndo_stop = bgmac_stop, + .ndo_start_xmit = bgmac_start_xmit, + .ndo_set_rx_mode = bgmac_set_rx_mode, + .ndo_set_mac_address = bgmac_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = bgmac_ioctl, +}; + +/************************************************** + * ethtool_ops + **************************************************/ + +static int bgmac_get_settings(struct net_device *net_dev, + struct ethtool_cmd *cmd) +{ + struct bgmac *bgmac = netdev_priv(net_dev); + + return phy_ethtool_gset(bgmac->phy_dev, cmd); +} + +static int bgmac_set_settings(struct net_device *net_dev, + struct ethtool_cmd *cmd) +{ + struct bgmac *bgmac = netdev_priv(net_dev); + + return phy_ethtool_sset(bgmac->phy_dev, cmd); +} + +static void bgmac_get_drvinfo(struct net_device *net_dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info)); +} + +static const struct ethtool_ops bgmac_ethtool_ops = { + .get_settings = bgmac_get_settings, + .set_settings = bgmac_set_settings, + .get_drvinfo = bgmac_get_drvinfo, +}; + +/************************************************** + * MII + **************************************************/ + +static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum) +{ + return bgmac_phy_read(bus->priv, mii_id, regnum); +} + +static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) +{ + return bgmac_phy_write(bus->priv, mii_id, regnum, value); +} + +static void bgmac_adjust_link(struct net_device *net_dev) +{ + struct bgmac *bgmac = netdev_priv(net_dev); + struct phy_device *phy_dev = bgmac->phy_dev; + bool update = false; + + if (phy_dev->link) { + if (phy_dev->speed != bgmac->mac_speed) { + bgmac->mac_speed = phy_dev->speed; + update = true; + } + + if (phy_dev->duplex != bgmac->mac_duplex) { + bgmac->mac_duplex = phy_dev->duplex; + update = true; + } + } + + if (update) { + bgmac_mac_speed(bgmac); + phy_print_status(phy_dev); + } +} + +static int bgmac_fixed_phy_register(struct bgmac *bgmac) +{ + struct fixed_phy_status fphy_status = { + .link = 1, + .speed = SPEED_1000, + .duplex = DUPLEX_FULL, + }; + struct phy_device *phy_dev; + int err; + + phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); + if (!phy_dev || IS_ERR(phy_dev)) { + bgmac_err(bgmac, "Failed to register fixed PHY device\n"); + return -ENODEV; + } + + err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link, + PHY_INTERFACE_MODE_MII); + if (err) { + bgmac_err(bgmac, "Connecting PHY failed\n"); + return err; + } + + bgmac->phy_dev = phy_dev; + + return err; +} + +static int bgmac_mii_register(struct bgmac *bgmac) +{ + struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; + struct mii_bus *mii_bus; + struct phy_device *phy_dev; + char bus_id[MII_BUS_ID_SIZE + 3]; + int i, err = 0; + + if (ci->id == BCMA_CHIP_ID_BCM4707 || + ci->id == BCMA_CHIP_ID_BCM53018) + return bgmac_fixed_phy_register(bgmac); + + mii_bus = mdiobus_alloc(); + if (!mii_bus) + return -ENOMEM; + + mii_bus->name = "bgmac mii bus"; + sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num, + bgmac->core->core_unit); + mii_bus->priv = bgmac; + mii_bus->read = bgmac_mii_read; + mii_bus->write = bgmac_mii_write; + mii_bus->parent = &bgmac->core->dev; + mii_bus->phy_mask = ~(1 << bgmac->phyaddr); + + mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); + if (!mii_bus->irq) { + err = -ENOMEM; + goto err_free_bus; + } + for (i = 0; i < PHY_MAX_ADDR; i++) + mii_bus->irq[i] = PHY_POLL; + + err = mdiobus_register(mii_bus); + if (err) { + bgmac_err(bgmac, "Registration of mii bus failed\n"); + goto err_free_irq; + } + + bgmac->mii_bus = mii_bus; + + /* Connect to the PHY */ + snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id, + bgmac->phyaddr); + phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link, + PHY_INTERFACE_MODE_MII); + if (IS_ERR(phy_dev)) { + bgmac_err(bgmac, "PHY connecton failed\n"); + err = PTR_ERR(phy_dev); + goto err_unregister_bus; + } + bgmac->phy_dev = phy_dev; + + return err; + +err_unregister_bus: + mdiobus_unregister(mii_bus); +err_free_irq: + kfree(mii_bus->irq); +err_free_bus: + mdiobus_free(mii_bus); + return err; +} + +static void bgmac_mii_unregister(struct bgmac *bgmac) +{ + struct mii_bus *mii_bus = bgmac->mii_bus; + + mdiobus_unregister(mii_bus); + kfree(mii_bus->irq); + mdiobus_free(mii_bus); +} + +/************************************************** + * BCMA bus ops + **************************************************/ + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */ +static int bgmac_probe(struct bcma_device *core) +{ + struct bcma_chipinfo *ci = &core->bus->chipinfo; + struct net_device *net_dev; + struct bgmac *bgmac; + struct ssb_sprom *sprom = &core->bus->sprom; + u8 *mac = core->core_unit ? sprom->et1mac : sprom->et0mac; + int err; + + /* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */ + if (core->core_unit > 1) { + pr_err("Unsupported core_unit %d\n", core->core_unit); + return -ENOTSUPP; + } + + if (!is_valid_ether_addr(mac)) { + dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac); + eth_random_addr(mac); + dev_warn(&core->dev, "Using random MAC: %pM\n", mac); + } + + /* Allocation and references */ + net_dev = alloc_etherdev(sizeof(*bgmac)); + if (!net_dev) + return -ENOMEM; + net_dev->netdev_ops = &bgmac_netdev_ops; + net_dev->irq = core->irq; + net_dev->ethtool_ops = &bgmac_ethtool_ops; + bgmac = netdev_priv(net_dev); + bgmac->net_dev = net_dev; + bgmac->core = core; + bcma_set_drvdata(core, bgmac); + + /* Defaults */ + memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN); + + /* On BCM4706 we need common core to access PHY */ + if (core->id.id == BCMA_CORE_4706_MAC_GBIT && + !core->bus->drv_gmac_cmn.core) { + bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n"); + err = -ENODEV; + goto err_netdev_free; + } + bgmac->cmn = core->bus->drv_gmac_cmn.core; + + bgmac->phyaddr = core->core_unit ? sprom->et1phyaddr : + sprom->et0phyaddr; + bgmac->phyaddr &= BGMAC_PHY_MASK; + if (bgmac->phyaddr == BGMAC_PHY_MASK) { + bgmac_err(bgmac, "No PHY found\n"); + err = -ENODEV; + goto err_netdev_free; + } + bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr, + bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : ""); + + if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) { + bgmac_err(bgmac, "PCI setup not implemented\n"); + err = -ENOTSUPP; + goto err_netdev_free; + } + + bgmac_chip_reset(bgmac); + + /* For Northstar, we have to take all GMAC core out of reset */ + if (ci->id == BCMA_CHIP_ID_BCM4707 || + ci->id == BCMA_CHIP_ID_BCM53018) { + struct bcma_device *ns_core; + int ns_gmac; + + /* Northstar has 4 GMAC cores */ + for (ns_gmac = 0; ns_gmac < 4; ns_gmac++) { + /* As Northstar requirement, we have to reset all GMACs + * before accessing one. bgmac_chip_reset() call + * bcma_core_enable() for this core. Then the other + * three GMACs didn't reset. We do it here. + */ + ns_core = bcma_find_core_unit(core->bus, + BCMA_CORE_MAC_GBIT, + ns_gmac); + if (ns_core && !bcma_core_is_enabled(ns_core)) + bcma_core_enable(ns_core, 0); + } + } + + err = bgmac_dma_alloc(bgmac); + if (err) { + bgmac_err(bgmac, "Unable to alloc memory for DMA\n"); + goto err_netdev_free; + } + + bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK; + if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0) + bgmac->int_mask &= ~BGMAC_IS_TX_MASK; + + /* TODO: reset the external phy. Specs are needed */ + bgmac_phy_reset(bgmac); + + bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo & + BGMAC_BFL_ENETROBO); + if (bgmac->has_robosw) + bgmac_warn(bgmac, "Support for Roboswitch not implemented\n"); + + if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) + bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n"); + + netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT); + + err = bgmac_mii_register(bgmac); + if (err) { + bgmac_err(bgmac, "Cannot register MDIO\n"); + goto err_dma_free; + } + + net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + net_dev->hw_features = net_dev->features; + net_dev->vlan_features = net_dev->features; + + err = register_netdev(bgmac->net_dev); + if (err) { + bgmac_err(bgmac, "Cannot register net device\n"); + goto err_mii_unregister; + } + + netif_carrier_off(net_dev); + + return 0; + +err_mii_unregister: + bgmac_mii_unregister(bgmac); +err_dma_free: + bgmac_dma_free(bgmac); + +err_netdev_free: + bcma_set_drvdata(core, NULL); + free_netdev(net_dev); + + return err; +} + +static void bgmac_remove(struct bcma_device *core) +{ + struct bgmac *bgmac = bcma_get_drvdata(core); + + unregister_netdev(bgmac->net_dev); + bgmac_mii_unregister(bgmac); + netif_napi_del(&bgmac->napi); + bgmac_dma_free(bgmac); + bcma_set_drvdata(core, NULL); + free_netdev(bgmac->net_dev); +} + +static struct bcma_driver bgmac_bcma_driver = { + .name = KBUILD_MODNAME, + .id_table = bgmac_bcma_tbl, + .probe = bgmac_probe, + .remove = bgmac_remove, +}; + +static int __init bgmac_init(void) +{ + int err; + + err = bcma_driver_register(&bgmac_bcma_driver); + if (err) + return err; + pr_info("Broadcom 47xx GBit MAC driver loaded\n"); + + return 0; +} + +static void __exit bgmac_exit(void) +{ + bcma_driver_unregister(&bgmac_bcma_driver); +} + +module_init(bgmac_init) +module_exit(bgmac_exit) + +MODULE_AUTHOR("RafaÅ‚ MiÅ‚ecki"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h new file mode 100644 index 000000000..db27febbb --- /dev/null +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -0,0 +1,493 @@ +#ifndef _BGMAC_H +#define _BGMAC_H + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define bgmac_err(bgmac, fmt, ...) \ + dev_err(&(bgmac)->core->dev, fmt, ##__VA_ARGS__) +#define bgmac_warn(bgmac, fmt, ...) \ + dev_warn(&(bgmac)->core->dev, fmt, ##__VA_ARGS__) +#define bgmac_info(bgmac, fmt, ...) \ + dev_info(&(bgmac)->core->dev, fmt, ##__VA_ARGS__) +#define bgmac_dbg(bgmac, fmt, ...) \ + dev_dbg(&(bgmac)->core->dev, fmt, ##__VA_ARGS__) + +#include +#include + +#define BGMAC_DEV_CTL 0x000 +#define BGMAC_DC_TSM 0x00000002 +#define BGMAC_DC_CFCO 0x00000004 +#define BGMAC_DC_RLSS 0x00000008 +#define BGMAC_DC_MROR 0x00000010 +#define BGMAC_DC_FCM_MASK 0x00000060 +#define BGMAC_DC_FCM_SHIFT 5 +#define BGMAC_DC_NAE 0x00000080 +#define BGMAC_DC_TF 0x00000100 +#define BGMAC_DC_RDS_MASK 0x00030000 +#define BGMAC_DC_RDS_SHIFT 16 +#define BGMAC_DC_TDS_MASK 0x000c0000 +#define BGMAC_DC_TDS_SHIFT 18 +#define BGMAC_DEV_STATUS 0x004 /* Configuration of the interface */ +#define BGMAC_DS_RBF 0x00000001 +#define BGMAC_DS_RDF 0x00000002 +#define BGMAC_DS_RIF 0x00000004 +#define BGMAC_DS_TBF 0x00000008 +#define BGMAC_DS_TDF 0x00000010 +#define BGMAC_DS_TIF 0x00000020 +#define BGMAC_DS_PO 0x00000040 +#define BGMAC_DS_MM_MASK 0x00000300 /* Mode of the interface */ +#define BGMAC_DS_MM_SHIFT 8 +#define BGMAC_BIST_STATUS 0x00c +#define BGMAC_INT_STATUS 0x020 /* Interrupt status */ +#define BGMAC_IS_MRO 0x00000001 +#define BGMAC_IS_MTO 0x00000002 +#define BGMAC_IS_TFD 0x00000004 +#define BGMAC_IS_LS 0x00000008 +#define BGMAC_IS_MDIO 0x00000010 +#define BGMAC_IS_MR 0x00000020 +#define BGMAC_IS_MT 0x00000040 +#define BGMAC_IS_TO 0x00000080 +#define BGMAC_IS_DESC_ERR 0x00000400 /* Descriptor error */ +#define BGMAC_IS_DATA_ERR 0x00000800 /* Data error */ +#define BGMAC_IS_DESC_PROT_ERR 0x00001000 /* Descriptor protocol error */ +#define BGMAC_IS_RX_DESC_UNDERF 0x00002000 /* Receive descriptor underflow */ +#define BGMAC_IS_RX_F_OVERF 0x00004000 /* Receive FIFO overflow */ +#define BGMAC_IS_TX_F_UNDERF 0x00008000 /* Transmit FIFO underflow */ +#define BGMAC_IS_RX 0x00010000 /* Interrupt for RX queue 0 */ +#define BGMAC_IS_TX0 0x01000000 /* Interrupt for TX queue 0 */ +#define BGMAC_IS_TX1 0x02000000 /* Interrupt for TX queue 1 */ +#define BGMAC_IS_TX2 0x04000000 /* Interrupt for TX queue 2 */ +#define BGMAC_IS_TX3 0x08000000 /* Interrupt for TX queue 3 */ +#define BGMAC_IS_TX_MASK 0x0f000000 +#define BGMAC_IS_INTMASK 0x0f01fcff +#define BGMAC_IS_ERRMASK 0x0000fc00 +#define BGMAC_INT_MASK 0x024 /* Interrupt mask */ +#define BGMAC_GP_TIMER 0x028 +#define BGMAC_INT_RECV_LAZY 0x100 +#define BGMAC_IRL_TO_MASK 0x00ffffff +#define BGMAC_IRL_FC_MASK 0xff000000 +#define BGMAC_IRL_FC_SHIFT 24 /* Shift the number of interrupts triggered per received frame */ +#define BGMAC_FLOW_CTL_THRESH 0x104 /* Flow control thresholds */ +#define BGMAC_WRRTHRESH 0x108 +#define BGMAC_GMAC_IDLE_CNT_THRESH 0x10c +#define BGMAC_PHY_ACCESS 0x180 /* PHY access address */ +#define BGMAC_PA_DATA_MASK 0x0000ffff +#define BGMAC_PA_ADDR_MASK 0x001f0000 +#define BGMAC_PA_ADDR_SHIFT 16 +#define BGMAC_PA_REG_MASK 0x1f000000 +#define BGMAC_PA_REG_SHIFT 24 +#define BGMAC_PA_WRITE 0x20000000 +#define BGMAC_PA_START 0x40000000 +#define BGMAC_PHY_CNTL 0x188 /* PHY control address */ +#define BGMAC_PC_EPA_MASK 0x0000001f +#define BGMAC_PC_MCT_MASK 0x007f0000 +#define BGMAC_PC_MCT_SHIFT 16 +#define BGMAC_PC_MTE 0x00800000 +#define BGMAC_TXQ_CTL 0x18c +#define BGMAC_TXQ_CTL_DBT_MASK 0x00000fff +#define BGMAC_TXQ_CTL_DBT_SHIFT 0 +#define BGMAC_RXQ_CTL 0x190 +#define BGMAC_RXQ_CTL_DBT_MASK 0x00000fff +#define BGMAC_RXQ_CTL_DBT_SHIFT 0 +#define BGMAC_RXQ_CTL_PTE 0x00001000 +#define BGMAC_RXQ_CTL_MDP_MASK 0x3f000000 +#define BGMAC_RXQ_CTL_MDP_SHIFT 24 +#define BGMAC_GPIO_SELECT 0x194 +#define BGMAC_GPIO_OUTPUT_EN 0x198 + +/* For 0x1e0 see BCMA_CLKCTLST. Below are BGMAC specific bits */ +#define BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ 0x00000100 +#define BGMAC_BCMA_CLKCTLST_MISC_PLL_ST 0x01000000 + +#define BGMAC_HW_WAR 0x1e4 +#define BGMAC_PWR_CTL 0x1e8 +#define BGMAC_DMA_BASE0 0x200 /* Tx and Rx controller */ +#define BGMAC_DMA_BASE1 0x240 /* Tx controller only */ +#define BGMAC_DMA_BASE2 0x280 /* Tx controller only */ +#define BGMAC_DMA_BASE3 0x2C0 /* Tx controller only */ +#define BGMAC_TX_GOOD_OCTETS 0x300 +#define BGMAC_TX_GOOD_OCTETS_HIGH 0x304 +#define BGMAC_TX_GOOD_PKTS 0x308 +#define BGMAC_TX_OCTETS 0x30c +#define BGMAC_TX_OCTETS_HIGH 0x310 +#define BGMAC_TX_PKTS 0x314 +#define BGMAC_TX_BROADCAST_PKTS 0x318 +#define BGMAC_TX_MULTICAST_PKTS 0x31c +#define BGMAC_TX_LEN_64 0x320 +#define BGMAC_TX_LEN_65_TO_127 0x324 +#define BGMAC_TX_LEN_128_TO_255 0x328 +#define BGMAC_TX_LEN_256_TO_511 0x32c +#define BGMAC_TX_LEN_512_TO_1023 0x330 +#define BGMAC_TX_LEN_1024_TO_1522 0x334 +#define BGMAC_TX_LEN_1523_TO_2047 0x338 +#define BGMAC_TX_LEN_2048_TO_4095 0x33c +#define BGMAC_TX_LEN_4095_TO_8191 0x340 +#define BGMAC_TX_LEN_8192_TO_MAX 0x344 +#define BGMAC_TX_JABBER_PKTS 0x348 /* Error */ +#define BGMAC_TX_OVERSIZE_PKTS 0x34c /* Error */ +#define BGMAC_TX_FRAGMENT_PKTS 0x350 +#define BGMAC_TX_UNDERRUNS 0x354 /* Error */ +#define BGMAC_TX_TOTAL_COLS 0x358 +#define BGMAC_TX_SINGLE_COLS 0x35c +#define BGMAC_TX_MULTIPLE_COLS 0x360 +#define BGMAC_TX_EXCESSIVE_COLS 0x364 /* Error */ +#define BGMAC_TX_LATE_COLS 0x368 /* Error */ +#define BGMAC_TX_DEFERED 0x36c +#define BGMAC_TX_CARRIER_LOST 0x370 +#define BGMAC_TX_PAUSE_PKTS 0x374 +#define BGMAC_TX_UNI_PKTS 0x378 +#define BGMAC_TX_Q0_PKTS 0x37c +#define BGMAC_TX_Q0_OCTETS 0x380 +#define BGMAC_TX_Q0_OCTETS_HIGH 0x384 +#define BGMAC_TX_Q1_PKTS 0x388 +#define BGMAC_TX_Q1_OCTETS 0x38c +#define BGMAC_TX_Q1_OCTETS_HIGH 0x390 +#define BGMAC_TX_Q2_PKTS 0x394 +#define BGMAC_TX_Q2_OCTETS 0x398 +#define BGMAC_TX_Q2_OCTETS_HIGH 0x39c +#define BGMAC_TX_Q3_PKTS 0x3a0 +#define BGMAC_TX_Q3_OCTETS 0x3a4 +#define BGMAC_TX_Q3_OCTETS_HIGH 0x3a8 +#define BGMAC_RX_GOOD_OCTETS 0x3b0 +#define BGMAC_RX_GOOD_OCTETS_HIGH 0x3b4 +#define BGMAC_RX_GOOD_PKTS 0x3b8 +#define BGMAC_RX_OCTETS 0x3bc +#define BGMAC_RX_OCTETS_HIGH 0x3c0 +#define BGMAC_RX_PKTS 0x3c4 +#define BGMAC_RX_BROADCAST_PKTS 0x3c8 +#define BGMAC_RX_MULTICAST_PKTS 0x3cc +#define BGMAC_RX_LEN_64 0x3d0 +#define BGMAC_RX_LEN_65_TO_127 0x3d4 +#define BGMAC_RX_LEN_128_TO_255 0x3d8 +#define BGMAC_RX_LEN_256_TO_511 0x3dc +#define BGMAC_RX_LEN_512_TO_1023 0x3e0 +#define BGMAC_RX_LEN_1024_TO_1522 0x3e4 +#define BGMAC_RX_LEN_1523_TO_2047 0x3e8 +#define BGMAC_RX_LEN_2048_TO_4095 0x3ec +#define BGMAC_RX_LEN_4095_TO_8191 0x3f0 +#define BGMAC_RX_LEN_8192_TO_MAX 0x3f4 +#define BGMAC_RX_JABBER_PKTS 0x3f8 /* Error */ +#define BGMAC_RX_OVERSIZE_PKTS 0x3fc /* Error */ +#define BGMAC_RX_FRAGMENT_PKTS 0x400 +#define BGMAC_RX_MISSED_PKTS 0x404 /* Error */ +#define BGMAC_RX_CRC_ALIGN_ERRS 0x408 /* Error */ +#define BGMAC_RX_UNDERSIZE 0x40c /* Error */ +#define BGMAC_RX_CRC_ERRS 0x410 /* Error */ +#define BGMAC_RX_ALIGN_ERRS 0x414 /* Error */ +#define BGMAC_RX_SYMBOL_ERRS 0x418 /* Error */ +#define BGMAC_RX_PAUSE_PKTS 0x41c +#define BGMAC_RX_NONPAUSE_PKTS 0x420 +#define BGMAC_RX_SACHANGES 0x424 +#define BGMAC_RX_UNI_PKTS 0x428 +#define BGMAC_UNIMAC_VERSION 0x800 +#define BGMAC_HDBKP_CTL 0x804 +#define BGMAC_CMDCFG 0x808 /* Configuration */ +#define BGMAC_CMDCFG_TE 0x00000001 /* Set to activate TX */ +#define BGMAC_CMDCFG_RE 0x00000002 /* Set to activate RX */ +#define BGMAC_CMDCFG_ES_MASK 0x0000000c /* Ethernet speed see gmac_speed */ +#define BGMAC_CMDCFG_ES_10 0x00000000 +#define BGMAC_CMDCFG_ES_100 0x00000004 +#define BGMAC_CMDCFG_ES_1000 0x00000008 +#define BGMAC_CMDCFG_ES_2500 0x0000000C +#define BGMAC_CMDCFG_PROM 0x00000010 /* Set to activate promiscuous mode */ +#define BGMAC_CMDCFG_PAD_EN 0x00000020 +#define BGMAC_CMDCFG_CF 0x00000040 +#define BGMAC_CMDCFG_PF 0x00000080 +#define BGMAC_CMDCFG_RPI 0x00000100 /* Unset to enable 802.3x tx flow control */ +#define BGMAC_CMDCFG_TAI 0x00000200 +#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */ +#define BGMAC_CMDCFG_HD_SHIFT 10 +#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */ +#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */ +#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0) +#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */ +#define BGMAC_CMDCFG_AE 0x00400000 +#define BGMAC_CMDCFG_CFE 0x00800000 +#define BGMAC_CMDCFG_NLC 0x01000000 +#define BGMAC_CMDCFG_RL 0x02000000 +#define BGMAC_CMDCFG_RED 0x04000000 +#define BGMAC_CMDCFG_PE 0x08000000 +#define BGMAC_CMDCFG_TPI 0x10000000 +#define BGMAC_CMDCFG_AT 0x20000000 +#define BGMAC_MACADDR_HIGH 0x80c /* High 4 octets of own mac address */ +#define BGMAC_MACADDR_LOW 0x810 /* Low 2 octets of own mac address */ +#define BGMAC_RXMAX_LENGTH 0x814 /* Max receive frame length with vlan tag */ +#define BGMAC_PAUSEQUANTA 0x818 +#define BGMAC_MAC_MODE 0x844 +#define BGMAC_OUTERTAG 0x848 +#define BGMAC_INNERTAG 0x84c +#define BGMAC_TXIPG 0x85c +#define BGMAC_PAUSE_CTL 0xb30 +#define BGMAC_TX_FLUSH 0xb34 +#define BGMAC_RX_STATUS 0xb38 +#define BGMAC_TX_STATUS 0xb3c + +/* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */ +#define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */ +#define BGMAC_BCMA_IOCTL_SW_RESET 0x00000008 /* PHY Reset */ + +/* BCMA GMAC core specific IO status (BCMA_IOST) flags */ +#define BGMAC_BCMA_IOST_ATTACHED 0x00000800 + +#define BGMAC_NUM_MIB_TX_REGS \ + (((BGMAC_TX_Q3_OCTETS_HIGH - BGMAC_TX_GOOD_OCTETS) / 4) + 1) +#define BGMAC_NUM_MIB_RX_REGS \ + (((BGMAC_RX_UNI_PKTS - BGMAC_RX_GOOD_OCTETS) / 4) + 1) + +#define BGMAC_DMA_TX_CTL 0x00 +#define BGMAC_DMA_TX_ENABLE 0x00000001 +#define BGMAC_DMA_TX_SUSPEND 0x00000002 +#define BGMAC_DMA_TX_LOOPBACK 0x00000004 +#define BGMAC_DMA_TX_FLUSH 0x00000010 +#define BGMAC_DMA_TX_MR_MASK 0x000000C0 /* Multiple outstanding reads */ +#define BGMAC_DMA_TX_MR_SHIFT 6 +#define BGMAC_DMA_TX_MR_1 0 +#define BGMAC_DMA_TX_MR_2 1 +#define BGMAC_DMA_TX_PARITY_DISABLE 0x00000800 +#define BGMAC_DMA_TX_ADDREXT_MASK 0x00030000 +#define BGMAC_DMA_TX_ADDREXT_SHIFT 16 +#define BGMAC_DMA_TX_BL_MASK 0x001C0000 /* BurstLen bits */ +#define BGMAC_DMA_TX_BL_SHIFT 18 +#define BGMAC_DMA_TX_BL_16 0 +#define BGMAC_DMA_TX_BL_32 1 +#define BGMAC_DMA_TX_BL_64 2 +#define BGMAC_DMA_TX_BL_128 3 +#define BGMAC_DMA_TX_BL_256 4 +#define BGMAC_DMA_TX_BL_512 5 +#define BGMAC_DMA_TX_BL_1024 6 +#define BGMAC_DMA_TX_PC_MASK 0x00E00000 /* Prefetch control */ +#define BGMAC_DMA_TX_PC_SHIFT 21 +#define BGMAC_DMA_TX_PC_0 0 +#define BGMAC_DMA_TX_PC_4 1 +#define BGMAC_DMA_TX_PC_8 2 +#define BGMAC_DMA_TX_PC_16 3 +#define BGMAC_DMA_TX_PT_MASK 0x03000000 /* Prefetch threshold */ +#define BGMAC_DMA_TX_PT_SHIFT 24 +#define BGMAC_DMA_TX_PT_1 0 +#define BGMAC_DMA_TX_PT_2 1 +#define BGMAC_DMA_TX_PT_4 2 +#define BGMAC_DMA_TX_PT_8 3 +#define BGMAC_DMA_TX_INDEX 0x04 +#define BGMAC_DMA_TX_RINGLO 0x08 +#define BGMAC_DMA_TX_RINGHI 0x0C +#define BGMAC_DMA_TX_STATUS 0x10 +#define BGMAC_DMA_TX_STATDPTR 0x00001FFF +#define BGMAC_DMA_TX_STAT 0xF0000000 +#define BGMAC_DMA_TX_STAT_DISABLED 0x00000000 +#define BGMAC_DMA_TX_STAT_ACTIVE 0x10000000 +#define BGMAC_DMA_TX_STAT_IDLEWAIT 0x20000000 +#define BGMAC_DMA_TX_STAT_STOPPED 0x30000000 +#define BGMAC_DMA_TX_STAT_SUSP 0x40000000 +#define BGMAC_DMA_TX_ERROR 0x14 +#define BGMAC_DMA_TX_ERRDPTR 0x0001FFFF +#define BGMAC_DMA_TX_ERR 0xF0000000 +#define BGMAC_DMA_TX_ERR_NOERR 0x00000000 +#define BGMAC_DMA_TX_ERR_PROT 0x10000000 +#define BGMAC_DMA_TX_ERR_UNDERRUN 0x20000000 +#define BGMAC_DMA_TX_ERR_TRANSFER 0x30000000 +#define BGMAC_DMA_TX_ERR_DESCREAD 0x40000000 +#define BGMAC_DMA_TX_ERR_CORE 0x50000000 +#define BGMAC_DMA_RX_CTL 0x20 +#define BGMAC_DMA_RX_ENABLE 0x00000001 +#define BGMAC_DMA_RX_FRAME_OFFSET_MASK 0x000000FE +#define BGMAC_DMA_RX_FRAME_OFFSET_SHIFT 1 +#define BGMAC_DMA_RX_DIRECT_FIFO 0x00000100 +#define BGMAC_DMA_RX_OVERFLOW_CONT 0x00000400 +#define BGMAC_DMA_RX_PARITY_DISABLE 0x00000800 +#define BGMAC_DMA_RX_MR_MASK 0x000000C0 /* Multiple outstanding reads */ +#define BGMAC_DMA_RX_MR_SHIFT 6 +#define BGMAC_DMA_TX_MR_1 0 +#define BGMAC_DMA_TX_MR_2 1 +#define BGMAC_DMA_RX_ADDREXT_MASK 0x00030000 +#define BGMAC_DMA_RX_ADDREXT_SHIFT 16 +#define BGMAC_DMA_RX_BL_MASK 0x001C0000 /* BurstLen bits */ +#define BGMAC_DMA_RX_BL_SHIFT 18 +#define BGMAC_DMA_RX_BL_16 0 +#define BGMAC_DMA_RX_BL_32 1 +#define BGMAC_DMA_RX_BL_64 2 +#define BGMAC_DMA_RX_BL_128 3 +#define BGMAC_DMA_RX_BL_256 4 +#define BGMAC_DMA_RX_BL_512 5 +#define BGMAC_DMA_RX_BL_1024 6 +#define BGMAC_DMA_RX_PC_MASK 0x00E00000 /* Prefetch control */ +#define BGMAC_DMA_RX_PC_SHIFT 21 +#define BGMAC_DMA_RX_PC_0 0 +#define BGMAC_DMA_RX_PC_4 1 +#define BGMAC_DMA_RX_PC_8 2 +#define BGMAC_DMA_RX_PC_16 3 +#define BGMAC_DMA_RX_PT_MASK 0x03000000 /* Prefetch threshold */ +#define BGMAC_DMA_RX_PT_SHIFT 24 +#define BGMAC_DMA_RX_PT_1 0 +#define BGMAC_DMA_RX_PT_2 1 +#define BGMAC_DMA_RX_PT_4 2 +#define BGMAC_DMA_RX_PT_8 3 +#define BGMAC_DMA_RX_INDEX 0x24 +#define BGMAC_DMA_RX_RINGLO 0x28 +#define BGMAC_DMA_RX_RINGHI 0x2C +#define BGMAC_DMA_RX_STATUS 0x30 +#define BGMAC_DMA_RX_STATDPTR 0x00001FFF +#define BGMAC_DMA_RX_STAT 0xF0000000 +#define BGMAC_DMA_RX_STAT_DISABLED 0x00000000 +#define BGMAC_DMA_RX_STAT_ACTIVE 0x10000000 +#define BGMAC_DMA_RX_STAT_IDLEWAIT 0x20000000 +#define BGMAC_DMA_RX_STAT_STOPPED 0x30000000 +#define BGMAC_DMA_RX_STAT_SUSP 0x40000000 +#define BGMAC_DMA_RX_ERROR 0x34 +#define BGMAC_DMA_RX_ERRDPTR 0x0001FFFF +#define BGMAC_DMA_RX_ERR 0xF0000000 +#define BGMAC_DMA_RX_ERR_NOERR 0x00000000 +#define BGMAC_DMA_RX_ERR_PROT 0x10000000 +#define BGMAC_DMA_RX_ERR_UNDERRUN 0x20000000 +#define BGMAC_DMA_RX_ERR_TRANSFER 0x30000000 +#define BGMAC_DMA_RX_ERR_DESCREAD 0x40000000 +#define BGMAC_DMA_RX_ERR_CORE 0x50000000 + +#define BGMAC_DESC_CTL0_EOT 0x10000000 /* End of ring */ +#define BGMAC_DESC_CTL0_IOC 0x20000000 /* IRQ on complete */ +#define BGMAC_DESC_CTL0_EOF 0x40000000 /* End of frame */ +#define BGMAC_DESC_CTL0_SOF 0x80000000 /* Start of frame */ +#define BGMAC_DESC_CTL1_LEN 0x00001FFF + +#define BGMAC_PHY_NOREGS 0x1E +#define BGMAC_PHY_MASK 0x1F + +#define BGMAC_MAX_TX_RINGS 4 +#define BGMAC_MAX_RX_RINGS 1 + +#define BGMAC_TX_RING_SLOTS 128 +#define BGMAC_RX_RING_SLOTS 512 + +#define BGMAC_RX_HEADER_LEN 28 /* Last 24 bytes are unused. Well... */ +#define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */ +#define BGMAC_RX_BUF_OFFSET (NET_SKB_PAD + NET_IP_ALIGN - \ + BGMAC_RX_FRAME_OFFSET) +#define BGMAC_RX_MAX_FRAME_SIZE 1536 /* Copied from b44/tg3 */ +#define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE) +#define BGMAC_RX_ALLOC_SIZE (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + +#define BGMAC_BFL_ENETROBO 0x0010 /* has ephy roboswitch spi */ +#define BGMAC_BFL_ENETADM 0x0080 /* has ADMtek switch */ +#define BGMAC_BFL_ENETVLAN 0x0100 /* can do vlan */ + +#define BGMAC_CHIPCTL_1_IF_TYPE_MASK 0x00000030 +#define BGMAC_CHIPCTL_1_IF_TYPE_RMII 0x00000000 +#define BGMAC_CHIPCTL_1_IF_TYPE_MII 0x00000010 +#define BGMAC_CHIPCTL_1_IF_TYPE_RGMII 0x00000020 +#define BGMAC_CHIPCTL_1_SW_TYPE_MASK 0x000000C0 +#define BGMAC_CHIPCTL_1_SW_TYPE_EPHY 0x00000000 +#define BGMAC_CHIPCTL_1_SW_TYPE_EPHYMII 0x00000040 +#define BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII 0x00000080 +#define BGMAC_CHIPCTL_1_SW_TYPE_RGMII 0x000000C0 +#define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS 0x00010000 + +#define BGMAC_WEIGHT 64 + +#define ETHER_MAX_LEN 1518 + +struct bgmac_slot_info { + union { + struct sk_buff *skb; + void *buf; + }; + dma_addr_t dma_addr; +}; + +struct bgmac_dma_desc { + __le32 ctl0; + __le32 ctl1; + __le32 addr_low; + __le32 addr_high; +} __packed; + +enum bgmac_dma_ring_type { + BGMAC_DMA_RING_TX, + BGMAC_DMA_RING_RX, +}; + +/** + * bgmac_dma_ring - contains info about DMA ring (either TX or RX one) + * @start: index of the first slot containing data + * @end: index of a slot that can *not* be read (yet) + * + * Be really aware of the specific @end meaning. It's an index of a slot *after* + * the one containing data that can be read. If @start equals @end the ring is + * empty. + */ +struct bgmac_dma_ring { + u32 start; + u32 end; + + struct bgmac_dma_desc *cpu_base; + dma_addr_t dma_base; + u32 index_base; /* Used for unaligned rings only, otherwise 0 */ + u16 mmio_base; + bool unaligned; + + struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS]; +}; + +struct bgmac_rx_header { + __le16 len; + __le16 flags; + __le16 pad[12]; +}; + +struct bgmac { + struct bcma_device *core; + struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */ + struct net_device *net_dev; + struct napi_struct napi; + struct mii_bus *mii_bus; + struct phy_device *phy_dev; + + /* DMA */ + struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS]; + struct bgmac_dma_ring rx_ring[BGMAC_MAX_RX_RINGS]; + + /* Stats */ + bool stats_grabbed; + u32 mib_tx_regs[BGMAC_NUM_MIB_TX_REGS]; + u32 mib_rx_regs[BGMAC_NUM_MIB_RX_REGS]; + + /* Int */ + u32 int_mask; + + /* Current MAC state */ + int mac_speed; + int mac_duplex; + + u8 phyaddr; + bool has_robosw; + + bool loopback; +}; + +static inline u32 bgmac_read(struct bgmac *bgmac, u16 offset) +{ + return bcma_read32(bgmac->core, offset); +} + +static inline void bgmac_write(struct bgmac *bgmac, u16 offset, u32 value) +{ + bcma_write32(bgmac->core, offset, value); +} + +static inline void bgmac_maskset(struct bgmac *bgmac, u16 offset, u32 mask, + u32 set) +{ + bgmac_write(bgmac, offset, (bgmac_read(bgmac, offset) & mask) | set); +} + +static inline void bgmac_mask(struct bgmac *bgmac, u16 offset, u32 mask) +{ + bgmac_maskset(bgmac, offset, mask, 0); +} + +static inline void bgmac_set(struct bgmac *bgmac, u16 offset, u32 set) +{ + bgmac_maskset(bgmac, offset, ~0, set); +} + +#endif /* _BGMAC_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c new file mode 100644 index 000000000..241922c9d --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -0,0 +1,8805 @@ +/* bnx2.c: QLogic bnx2 network driver. + * + * Copyright (c) 2004-2014 Broadcom Corporation + * Copyright (c) 2014-2015 QLogic Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Michael Chan (mchan@broadcom.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) +#define BCM_CNIC 1 +#include "cnic_if.h" +#endif +#include "bnx2.h" +#include "bnx2_fw.h" + +#define DRV_MODULE_NAME "bnx2" +#define DRV_MODULE_VERSION "2.2.6" +#define DRV_MODULE_RELDATE "January 29, 2014" +#define FW_MIPS_FILE_06 "/*(DEBLOBBED)*/" +#define FW_RV2P_FILE_06 "/*(DEBLOBBED)*/" +#define FW_MIPS_FILE_09 "/*(DEBLOBBED)*/" +#define FW_RV2P_FILE_09_Ax "/*(DEBLOBBED)*/" +#define FW_RV2P_FILE_09 "/*(DEBLOBBED)*/" + +#define RUN_AT(x) (jiffies + (x)) + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (5*HZ) + +static char version[] = + "QLogic " DRV_MODULE_NAME " Gigabit Ethernet Driver v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("Michael Chan "); +MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); +/*(DEBLOBBED)*/ + +static int disable_msi = 0; + +module_param(disable_msi, int, S_IRUGO); +MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); + +typedef enum { + BCM5706 = 0, + NC370T, + NC370I, + BCM5706S, + NC370F, + BCM5708, + BCM5708S, + BCM5709, + BCM5709S, + BCM5716, + BCM5716S, +} board_t; + +/* indexed by board_t, above */ +static struct { + char *name; +} board_info[] = { + { "Broadcom NetXtreme II BCM5706 1000Base-T" }, + { "HP NC370T Multifunction Gigabit Server Adapter" }, + { "HP NC370i Multifunction Gigabit Server Adapter" }, + { "Broadcom NetXtreme II BCM5706 1000Base-SX" }, + { "HP NC370F Multifunction Gigabit Server Adapter" }, + { "Broadcom NetXtreme II BCM5708 1000Base-T" }, + { "Broadcom NetXtreme II BCM5708 1000Base-SX" }, + { "Broadcom NetXtreme II BCM5709 1000Base-T" }, + { "Broadcom NetXtreme II BCM5709 1000Base-SX" }, + { "Broadcom NetXtreme II BCM5716 1000Base-T" }, + { "Broadcom NetXtreme II BCM5716 1000Base-SX" }, + }; + +static const struct pci_device_id bnx2_pci_tbl[] = { + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, + PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, + PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S, + PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 }, + { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S }, + { PCI_VENDOR_ID_BROADCOM, 0x163b, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 }, + { PCI_VENDOR_ID_BROADCOM, 0x163c, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S }, + { 0, } +}; + +static const struct flash_spec flash_table[] = +{ +#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE) +#define NONBUFFERED_FLAGS (BNX2_NV_WREN) + /* Slow EEPROM */ + {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400, + BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, + SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, + "EEPROM - slow"}, + /* Expansion entry 0001 */ + {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + SAIFUN_FLASH_BYTE_ADDR_MASK, 0, + "Entry 0001"}, + /* Saifun SA25F010 (non-buffered flash) */ + /* strap, cfg1, & write1 need updates */ + {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2, + "Non-buffered flash (128kB)"}, + /* Saifun SA25F020 (non-buffered flash) */ + /* strap, cfg1, & write1 need updates */ + {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4, + "Non-buffered flash (256kB)"}, + /* Expansion entry 0100 */ + {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + SAIFUN_FLASH_BYTE_ADDR_MASK, 0, + "Entry 0100"}, + /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */ + {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, + ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, + "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, + /* Entry 0110: ST M45PE20 (non-buffered flash)*/ + {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, + ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, + "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, + /* Saifun SA25F005 (non-buffered flash) */ + /* strap, cfg1, & write1 need updates */ + {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE, + "Non-buffered flash (64kB)"}, + /* Fast EEPROM */ + {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400, + BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE, + SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE, + "EEPROM - fast"}, + /* Expansion entry 1001 */ + {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + SAIFUN_FLASH_BYTE_ADDR_MASK, 0, + "Entry 1001"}, + /* Expansion entry 1010 */ + {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + SAIFUN_FLASH_BYTE_ADDR_MASK, 0, + "Entry 1010"}, + /* ATMEL AT45DB011B (buffered flash) */ + {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400, + BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, + BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE, + "Buffered flash (128kB)"}, + /* Expansion entry 1100 */ + {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + SAIFUN_FLASH_BYTE_ADDR_MASK, 0, + "Entry 1100"}, + /* Expansion entry 1101 */ + {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406, + NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE, + SAIFUN_FLASH_BYTE_ADDR_MASK, 0, + "Entry 1101"}, + /* Ateml Expansion entry 1110 */ + {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400, + BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, + BUFFERED_FLASH_BYTE_ADDR_MASK, 0, + "Entry 1110 (Atmel)"}, + /* ATMEL AT45DB021B (buffered flash) */ + {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400, + BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE, + BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2, + "Buffered flash (256kB)"}, +}; + +static const struct flash_spec flash_5709 = { + .flags = BNX2_NV_BUFFERED, + .page_bits = BCM5709_FLASH_PAGE_BITS, + .page_size = BCM5709_FLASH_PAGE_SIZE, + .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK, + .total_size = BUFFERED_FLASH_TOTAL_SIZE*2, + .name = "5709 Buffered flash (256kB)", +}; + +MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); + +static void bnx2_init_napi(struct bnx2 *bp); +static void bnx2_del_napi(struct bnx2 *bp); + +static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) +{ + u32 diff; + + /* Tell compiler to fetch tx_prod and tx_cons from memory. */ + barrier(); + + /* The ring uses 256 indices for 255 entries, one of them + * needs to be skipped. + */ + diff = txr->tx_prod - txr->tx_cons; + if (unlikely(diff >= BNX2_TX_DESC_CNT)) { + diff &= 0xffff; + if (diff == BNX2_TX_DESC_CNT) + diff = BNX2_MAX_TX_DESC_CNT; + } + return bp->tx_ring_size - diff; +} + +static u32 +bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset) +{ + u32 val; + + spin_lock_bh(&bp->indirect_lock); + BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); + val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW); + spin_unlock_bh(&bp->indirect_lock); + return val; +} + +static void +bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val) +{ + spin_lock_bh(&bp->indirect_lock); + BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); + BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val); + spin_unlock_bh(&bp->indirect_lock); +} + +static void +bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val) +{ + bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val); +} + +static u32 +bnx2_shmem_rd(struct bnx2 *bp, u32 offset) +{ + return bnx2_reg_rd_ind(bp, bp->shmem_base + offset); +} + +static void +bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) +{ + offset += cid_addr; + spin_lock_bh(&bp->indirect_lock); + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { + int i; + + BNX2_WR(bp, BNX2_CTX_CTX_DATA, val); + BNX2_WR(bp, BNX2_CTX_CTX_CTRL, + offset | BNX2_CTX_CTX_CTRL_WRITE_REQ); + for (i = 0; i < 5; i++) { + val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL); + if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0) + break; + udelay(5); + } + } else { + BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset); + BNX2_WR(bp, BNX2_CTX_DATA, val); + } + spin_unlock_bh(&bp->indirect_lock); +} + +#ifdef BCM_CNIC +static int +bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info) +{ + struct bnx2 *bp = netdev_priv(dev); + struct drv_ctl_io *io = &info->data.io; + + switch (info->cmd) { + case DRV_CTL_IO_WR_CMD: + bnx2_reg_wr_ind(bp, io->offset, io->data); + break; + case DRV_CTL_IO_RD_CMD: + io->data = bnx2_reg_rd_ind(bp, io->offset); + break; + case DRV_CTL_CTX_WR_CMD: + bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data); + break; + default: + return -EINVAL; + } + return 0; +} + +static void bnx2_setup_cnic_irq_info(struct bnx2 *bp) +{ + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; + int sb_id; + + if (bp->flags & BNX2_FLAG_USING_MSIX) { + cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; + bnapi->cnic_present = 0; + sb_id = bp->irq_nvecs; + cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; + } else { + cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; + bnapi->cnic_tag = bnapi->last_status_idx; + bnapi->cnic_present = 1; + sb_id = 0; + cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; + } + + cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector; + cp->irq_arr[0].status_blk = (void *) + ((unsigned long) bnapi->status_blk.msi + + (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id)); + cp->irq_arr[0].status_blk_num = sb_id; + cp->num_irq = 1; +} + +static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops, + void *data) +{ + struct bnx2 *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + if (ops == NULL) + return -EINVAL; + + if (cp->drv_state & CNIC_DRV_STATE_REGD) + return -EBUSY; + + if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN)) + return -ENODEV; + + bp->cnic_data = data; + rcu_assign_pointer(bp->cnic_ops, ops); + + cp->num_irq = 0; + cp->drv_state = CNIC_DRV_STATE_REGD; + + bnx2_setup_cnic_irq_info(bp); + + return 0; +} + +static int bnx2_unregister_cnic(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + mutex_lock(&bp->cnic_lock); + cp->drv_state = 0; + bnapi->cnic_present = 0; + RCU_INIT_POINTER(bp->cnic_ops, NULL); + mutex_unlock(&bp->cnic_lock); + synchronize_rcu(); + return 0; +} + +static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + if (!cp->max_iscsi_conn) + return NULL; + + cp->drv_owner = THIS_MODULE; + cp->chip_id = bp->chip_id; + cp->pdev = bp->pdev; + cp->io_base = bp->regview; + cp->drv_ctl = bnx2_drv_ctl; + cp->drv_register_cnic = bnx2_register_cnic; + cp->drv_unregister_cnic = bnx2_unregister_cnic; + + return cp; +} + +static void +bnx2_cnic_stop(struct bnx2 *bp) +{ + struct cnic_ops *c_ops; + struct cnic_ctl_info info; + + mutex_lock(&bp->cnic_lock); + c_ops = rcu_dereference_protected(bp->cnic_ops, + lockdep_is_held(&bp->cnic_lock)); + if (c_ops) { + info.cmd = CNIC_CTL_STOP_CMD; + c_ops->cnic_ctl(bp->cnic_data, &info); + } + mutex_unlock(&bp->cnic_lock); +} + +static void +bnx2_cnic_start(struct bnx2 *bp) +{ + struct cnic_ops *c_ops; + struct cnic_ctl_info info; + + mutex_lock(&bp->cnic_lock); + c_ops = rcu_dereference_protected(bp->cnic_ops, + lockdep_is_held(&bp->cnic_lock)); + if (c_ops) { + if (!(bp->flags & BNX2_FLAG_USING_MSIX)) { + struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; + + bnapi->cnic_tag = bnapi->last_status_idx; + } + info.cmd = CNIC_CTL_START_CMD; + c_ops->cnic_ctl(bp->cnic_data, &info); + } + mutex_unlock(&bp->cnic_lock); +} + +#else + +static void +bnx2_cnic_stop(struct bnx2 *bp) +{ +} + +static void +bnx2_cnic_start(struct bnx2 *bp) +{ +} + +#endif + +static int +bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val) +{ + u32 val1; + int i, ret; + + if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { + val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); + val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL; + + BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1); + BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); + + udelay(40); + } + + val1 = (bp->phy_addr << 21) | (reg << 16) | + BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT | + BNX2_EMAC_MDIO_COMM_START_BUSY; + BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1); + + for (i = 0; i < 50; i++) { + udelay(10); + + val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM); + if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) { + udelay(5); + + val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM); + val1 &= BNX2_EMAC_MDIO_COMM_DATA; + + break; + } + } + + if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) { + *val = 0x0; + ret = -EBUSY; + } + else { + *val = val1; + ret = 0; + } + + if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { + val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); + val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL; + + BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1); + BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); + + udelay(40); + } + + return ret; +} + +static int +bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val) +{ + u32 val1; + int i, ret; + + if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { + val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); + val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL; + + BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1); + BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); + + udelay(40); + } + + val1 = (bp->phy_addr << 21) | (reg << 16) | val | + BNX2_EMAC_MDIO_COMM_COMMAND_WRITE | + BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT; + BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1); + + for (i = 0; i < 50; i++) { + udelay(10); + + val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM); + if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) { + udelay(5); + break; + } + } + + if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) + ret = -EBUSY; + else + ret = 0; + + if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { + val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); + val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL; + + BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1); + BNX2_RD(bp, BNX2_EMAC_MDIO_MODE); + + udelay(40); + } + + return ret; +} + +static void +bnx2_disable_int(struct bnx2 *bp) +{ + int i; + struct bnx2_napi *bnapi; + + for (i = 0; i < bp->irq_nvecs; i++) { + bnapi = &bp->bnx2_napi[i]; + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | + BNX2_PCICFG_INT_ACK_CMD_MASK_INT); + } + BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD); +} + +static void +bnx2_enable_int(struct bnx2 *bp) +{ + int i; + struct bnx2_napi *bnapi; + + for (i = 0; i < bp->irq_nvecs; i++) { + bnapi = &bp->bnx2_napi[i]; + + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + BNX2_PCICFG_INT_ACK_CMD_MASK_INT | + bnapi->last_status_idx); + + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + bnapi->last_status_idx); + } + BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); +} + +static void +bnx2_disable_int_sync(struct bnx2 *bp) +{ + int i; + + atomic_inc(&bp->intr_sem); + if (!netif_running(bp->dev)) + return; + + bnx2_disable_int(bp); + for (i = 0; i < bp->irq_nvecs; i++) + synchronize_irq(bp->irq_tbl[i].vector); +} + +static void +bnx2_napi_disable(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->irq_nvecs; i++) + napi_disable(&bp->bnx2_napi[i].napi); +} + +static void +bnx2_napi_enable(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->irq_nvecs; i++) + napi_enable(&bp->bnx2_napi[i].napi); +} + +static void +bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic) +{ + if (stop_cnic) + bnx2_cnic_stop(bp); + if (netif_running(bp->dev)) { + bnx2_napi_disable(bp); + netif_tx_disable(bp->dev); + } + bnx2_disable_int_sync(bp); + netif_carrier_off(bp->dev); /* prevent tx timeout */ +} + +static void +bnx2_netif_start(struct bnx2 *bp, bool start_cnic) +{ + if (atomic_dec_and_test(&bp->intr_sem)) { + if (netif_running(bp->dev)) { + netif_tx_wake_all_queues(bp->dev); + spin_lock_bh(&bp->phy_lock); + if (bp->link_up) + netif_carrier_on(bp->dev); + spin_unlock_bh(&bp->phy_lock); + bnx2_napi_enable(bp); + bnx2_enable_int(bp); + if (start_cnic) + bnx2_cnic_start(bp); + } + } +} + +static void +bnx2_free_tx_mem(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->num_tx_rings; i++) { + struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; + struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; + + if (txr->tx_desc_ring) { + dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE, + txr->tx_desc_ring, + txr->tx_desc_mapping); + txr->tx_desc_ring = NULL; + } + kfree(txr->tx_buf_ring); + txr->tx_buf_ring = NULL; + } +} + +static void +bnx2_free_rx_mem(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->num_rx_rings; i++) { + struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; + struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; + int j; + + for (j = 0; j < bp->rx_max_ring; j++) { + if (rxr->rx_desc_ring[j]) + dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE, + rxr->rx_desc_ring[j], + rxr->rx_desc_mapping[j]); + rxr->rx_desc_ring[j] = NULL; + } + vfree(rxr->rx_buf_ring); + rxr->rx_buf_ring = NULL; + + for (j = 0; j < bp->rx_max_pg_ring; j++) { + if (rxr->rx_pg_desc_ring[j]) + dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE, + rxr->rx_pg_desc_ring[j], + rxr->rx_pg_desc_mapping[j]); + rxr->rx_pg_desc_ring[j] = NULL; + } + vfree(rxr->rx_pg_ring); + rxr->rx_pg_ring = NULL; + } +} + +static int +bnx2_alloc_tx_mem(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->num_tx_rings; i++) { + struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; + struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; + + txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL); + if (txr->tx_buf_ring == NULL) + return -ENOMEM; + + txr->tx_desc_ring = + dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE, + &txr->tx_desc_mapping, GFP_KERNEL); + if (txr->tx_desc_ring == NULL) + return -ENOMEM; + } + return 0; +} + +static int +bnx2_alloc_rx_mem(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->num_rx_rings; i++) { + struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; + struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; + int j; + + rxr->rx_buf_ring = + vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring); + if (rxr->rx_buf_ring == NULL) + return -ENOMEM; + + for (j = 0; j < bp->rx_max_ring; j++) { + rxr->rx_desc_ring[j] = + dma_alloc_coherent(&bp->pdev->dev, + RXBD_RING_SIZE, + &rxr->rx_desc_mapping[j], + GFP_KERNEL); + if (rxr->rx_desc_ring[j] == NULL) + return -ENOMEM; + + } + + if (bp->rx_pg_ring_size) { + rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE * + bp->rx_max_pg_ring); + if (rxr->rx_pg_ring == NULL) + return -ENOMEM; + + } + + for (j = 0; j < bp->rx_max_pg_ring; j++) { + rxr->rx_pg_desc_ring[j] = + dma_alloc_coherent(&bp->pdev->dev, + RXBD_RING_SIZE, + &rxr->rx_pg_desc_mapping[j], + GFP_KERNEL); + if (rxr->rx_pg_desc_ring[j] == NULL) + return -ENOMEM; + + } + } + return 0; +} + +static void +bnx2_free_mem(struct bnx2 *bp) +{ + int i; + struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; + + bnx2_free_tx_mem(bp); + bnx2_free_rx_mem(bp); + + for (i = 0; i < bp->ctx_pages; i++) { + if (bp->ctx_blk[i]) { + dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE, + bp->ctx_blk[i], + bp->ctx_blk_mapping[i]); + bp->ctx_blk[i] = NULL; + } + } + if (bnapi->status_blk.msi) { + dma_free_coherent(&bp->pdev->dev, bp->status_stats_size, + bnapi->status_blk.msi, + bp->status_blk_mapping); + bnapi->status_blk.msi = NULL; + bp->stats_blk = NULL; + } +} + +static int +bnx2_alloc_mem(struct bnx2 *bp) +{ + int i, status_blk_size, err; + struct bnx2_napi *bnapi; + void *status_blk; + + /* Combine status and statistics blocks into one allocation. */ + status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block)); + if (bp->flags & BNX2_FLAG_MSIX_CAP) + status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC * + BNX2_SBLK_MSIX_ALIGN_SIZE); + bp->status_stats_size = status_blk_size + + sizeof(struct statistics_block); + + status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size, + &bp->status_blk_mapping, GFP_KERNEL); + if (status_blk == NULL) + goto alloc_mem_err; + + bnapi = &bp->bnx2_napi[0]; + bnapi->status_blk.msi = status_blk; + bnapi->hw_tx_cons_ptr = + &bnapi->status_blk.msi->status_tx_quick_consumer_index0; + bnapi->hw_rx_cons_ptr = + &bnapi->status_blk.msi->status_rx_quick_consumer_index0; + if (bp->flags & BNX2_FLAG_MSIX_CAP) { + for (i = 1; i < bp->irq_nvecs; i++) { + struct status_block_msix *sblk; + + bnapi = &bp->bnx2_napi[i]; + + sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i); + bnapi->status_blk.msix = sblk; + bnapi->hw_tx_cons_ptr = + &sblk->status_tx_quick_consumer_index; + bnapi->hw_rx_cons_ptr = + &sblk->status_rx_quick_consumer_index; + bnapi->int_num = i << 24; + } + } + + bp->stats_blk = status_blk + status_blk_size; + + bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size; + + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { + bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE; + if (bp->ctx_pages == 0) + bp->ctx_pages = 1; + for (i = 0; i < bp->ctx_pages; i++) { + bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev, + BNX2_PAGE_SIZE, + &bp->ctx_blk_mapping[i], + GFP_KERNEL); + if (bp->ctx_blk[i] == NULL) + goto alloc_mem_err; + } + } + + err = bnx2_alloc_rx_mem(bp); + if (err) + goto alloc_mem_err; + + err = bnx2_alloc_tx_mem(bp); + if (err) + goto alloc_mem_err; + + return 0; + +alloc_mem_err: + bnx2_free_mem(bp); + return -ENOMEM; +} + +static void +bnx2_report_fw_link(struct bnx2 *bp) +{ + u32 fw_link_status = 0; + + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) + return; + + if (bp->link_up) { + u32 bmsr; + + switch (bp->line_speed) { + case SPEED_10: + if (bp->duplex == DUPLEX_HALF) + fw_link_status = BNX2_LINK_STATUS_10HALF; + else + fw_link_status = BNX2_LINK_STATUS_10FULL; + break; + case SPEED_100: + if (bp->duplex == DUPLEX_HALF) + fw_link_status = BNX2_LINK_STATUS_100HALF; + else + fw_link_status = BNX2_LINK_STATUS_100FULL; + break; + case SPEED_1000: + if (bp->duplex == DUPLEX_HALF) + fw_link_status = BNX2_LINK_STATUS_1000HALF; + else + fw_link_status = BNX2_LINK_STATUS_1000FULL; + break; + case SPEED_2500: + if (bp->duplex == DUPLEX_HALF) + fw_link_status = BNX2_LINK_STATUS_2500HALF; + else + fw_link_status = BNX2_LINK_STATUS_2500FULL; + break; + } + + fw_link_status |= BNX2_LINK_STATUS_LINK_UP; + + if (bp->autoneg) { + fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED; + + bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); + bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); + + if (!(bmsr & BMSR_ANEGCOMPLETE) || + bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) + fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET; + else + fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE; + } + } + else + fw_link_status = BNX2_LINK_STATUS_LINK_DOWN; + + bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status); +} + +static char * +bnx2_xceiver_str(struct bnx2 *bp) +{ + return (bp->phy_port == PORT_FIBRE) ? "SerDes" : + ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" : + "Copper"); +} + +static void +bnx2_report_link(struct bnx2 *bp) +{ + if (bp->link_up) { + netif_carrier_on(bp->dev); + netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex", + bnx2_xceiver_str(bp), + bp->line_speed, + bp->duplex == DUPLEX_FULL ? "full" : "half"); + + if (bp->flow_ctrl) { + if (bp->flow_ctrl & FLOW_CTRL_RX) { + pr_cont(", receive "); + if (bp->flow_ctrl & FLOW_CTRL_TX) + pr_cont("& transmit "); + } + else { + pr_cont(", transmit "); + } + pr_cont("flow control ON"); + } + pr_cont("\n"); + } else { + netif_carrier_off(bp->dev); + netdev_err(bp->dev, "NIC %s Link is Down\n", + bnx2_xceiver_str(bp)); + } + + bnx2_report_fw_link(bp); +} + +static void +bnx2_resolve_flow_ctrl(struct bnx2 *bp) +{ + u32 local_adv, remote_adv; + + bp->flow_ctrl = 0; + if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != + (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) { + + if (bp->duplex == DUPLEX_FULL) { + bp->flow_ctrl = bp->req_flow_ctrl; + } + return; + } + + if (bp->duplex != DUPLEX_FULL) { + return; + } + + if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && + (BNX2_CHIP(bp) == BNX2_CHIP_5708)) { + u32 val; + + bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val); + if (val & BCM5708S_1000X_STAT1_TX_PAUSE) + bp->flow_ctrl |= FLOW_CTRL_TX; + if (val & BCM5708S_1000X_STAT1_RX_PAUSE) + bp->flow_ctrl |= FLOW_CTRL_RX; + return; + } + + bnx2_read_phy(bp, bp->mii_adv, &local_adv); + bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); + + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + u32 new_local_adv = 0; + u32 new_remote_adv = 0; + + if (local_adv & ADVERTISE_1000XPAUSE) + new_local_adv |= ADVERTISE_PAUSE_CAP; + if (local_adv & ADVERTISE_1000XPSE_ASYM) + new_local_adv |= ADVERTISE_PAUSE_ASYM; + if (remote_adv & ADVERTISE_1000XPAUSE) + new_remote_adv |= ADVERTISE_PAUSE_CAP; + if (remote_adv & ADVERTISE_1000XPSE_ASYM) + new_remote_adv |= ADVERTISE_PAUSE_ASYM; + + local_adv = new_local_adv; + remote_adv = new_remote_adv; + } + + /* See Table 28B-3 of 802.3ab-1999 spec. */ + if (local_adv & ADVERTISE_PAUSE_CAP) { + if(local_adv & ADVERTISE_PAUSE_ASYM) { + if (remote_adv & ADVERTISE_PAUSE_CAP) { + bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; + } + else if (remote_adv & ADVERTISE_PAUSE_ASYM) { + bp->flow_ctrl = FLOW_CTRL_RX; + } + } + else { + if (remote_adv & ADVERTISE_PAUSE_CAP) { + bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; + } + } + } + else if (local_adv & ADVERTISE_PAUSE_ASYM) { + if ((remote_adv & ADVERTISE_PAUSE_CAP) && + (remote_adv & ADVERTISE_PAUSE_ASYM)) { + + bp->flow_ctrl = FLOW_CTRL_TX; + } + } +} + +static int +bnx2_5709s_linkup(struct bnx2 *bp) +{ + u32 val, speed; + + bp->link_up = 1; + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS); + bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val); + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + + if ((bp->autoneg & AUTONEG_SPEED) == 0) { + bp->line_speed = bp->req_line_speed; + bp->duplex = bp->req_duplex; + return 0; + } + speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK; + switch (speed) { + case MII_BNX2_GP_TOP_AN_SPEED_10: + bp->line_speed = SPEED_10; + break; + case MII_BNX2_GP_TOP_AN_SPEED_100: + bp->line_speed = SPEED_100; + break; + case MII_BNX2_GP_TOP_AN_SPEED_1G: + case MII_BNX2_GP_TOP_AN_SPEED_1GKV: + bp->line_speed = SPEED_1000; + break; + case MII_BNX2_GP_TOP_AN_SPEED_2_5G: + bp->line_speed = SPEED_2500; + break; + } + if (val & MII_BNX2_GP_TOP_AN_FD) + bp->duplex = DUPLEX_FULL; + else + bp->duplex = DUPLEX_HALF; + return 0; +} + +static int +bnx2_5708s_linkup(struct bnx2 *bp) +{ + u32 val; + + bp->link_up = 1; + bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val); + switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) { + case BCM5708S_1000X_STAT1_SPEED_10: + bp->line_speed = SPEED_10; + break; + case BCM5708S_1000X_STAT1_SPEED_100: + bp->line_speed = SPEED_100; + break; + case BCM5708S_1000X_STAT1_SPEED_1G: + bp->line_speed = SPEED_1000; + break; + case BCM5708S_1000X_STAT1_SPEED_2G5: + bp->line_speed = SPEED_2500; + break; + } + if (val & BCM5708S_1000X_STAT1_FD) + bp->duplex = DUPLEX_FULL; + else + bp->duplex = DUPLEX_HALF; + + return 0; +} + +static int +bnx2_5706s_linkup(struct bnx2 *bp) +{ + u32 bmcr, local_adv, remote_adv, common; + + bp->link_up = 1; + bp->line_speed = SPEED_1000; + + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + if (bmcr & BMCR_FULLDPLX) { + bp->duplex = DUPLEX_FULL; + } + else { + bp->duplex = DUPLEX_HALF; + } + + if (!(bmcr & BMCR_ANENABLE)) { + return 0; + } + + bnx2_read_phy(bp, bp->mii_adv, &local_adv); + bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); + + common = local_adv & remote_adv; + if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) { + + if (common & ADVERTISE_1000XFULL) { + bp->duplex = DUPLEX_FULL; + } + else { + bp->duplex = DUPLEX_HALF; + } + } + + return 0; +} + +static int +bnx2_copper_linkup(struct bnx2 *bp) +{ + u32 bmcr; + + bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX; + + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + if (bmcr & BMCR_ANENABLE) { + u32 local_adv, remote_adv, common; + + bnx2_read_phy(bp, MII_CTRL1000, &local_adv); + bnx2_read_phy(bp, MII_STAT1000, &remote_adv); + + common = local_adv & (remote_adv >> 2); + if (common & ADVERTISE_1000FULL) { + bp->line_speed = SPEED_1000; + bp->duplex = DUPLEX_FULL; + } + else if (common & ADVERTISE_1000HALF) { + bp->line_speed = SPEED_1000; + bp->duplex = DUPLEX_HALF; + } + else { + bnx2_read_phy(bp, bp->mii_adv, &local_adv); + bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); + + common = local_adv & remote_adv; + if (common & ADVERTISE_100FULL) { + bp->line_speed = SPEED_100; + bp->duplex = DUPLEX_FULL; + } + else if (common & ADVERTISE_100HALF) { + bp->line_speed = SPEED_100; + bp->duplex = DUPLEX_HALF; + } + else if (common & ADVERTISE_10FULL) { + bp->line_speed = SPEED_10; + bp->duplex = DUPLEX_FULL; + } + else if (common & ADVERTISE_10HALF) { + bp->line_speed = SPEED_10; + bp->duplex = DUPLEX_HALF; + } + else { + bp->line_speed = 0; + bp->link_up = 0; + } + } + } + else { + if (bmcr & BMCR_SPEED100) { + bp->line_speed = SPEED_100; + } + else { + bp->line_speed = SPEED_10; + } + if (bmcr & BMCR_FULLDPLX) { + bp->duplex = DUPLEX_FULL; + } + else { + bp->duplex = DUPLEX_HALF; + } + } + + if (bp->link_up) { + u32 ext_status; + + bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status); + if (ext_status & EXT_STATUS_MDIX) + bp->phy_flags |= BNX2_PHY_FLAG_MDIX; + } + + return 0; +} + +static void +bnx2_init_rx_context(struct bnx2 *bp, u32 cid) +{ + u32 val, rx_cid_addr = GET_CID_ADDR(cid); + + val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE; + val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2; + val |= 0x02 << 8; + + if (bp->flow_ctrl & FLOW_CTRL_TX) + val |= BNX2_L2CTX_FLOW_CTRL_ENABLE; + + bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val); +} + +static void +bnx2_init_all_rx_contexts(struct bnx2 *bp) +{ + int i; + u32 cid; + + for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) { + if (i == 1) + cid = RX_RSS_CID; + bnx2_init_rx_context(bp, cid); + } +} + +static void +bnx2_set_mac_link(struct bnx2 *bp) +{ + u32 val; + + BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620); + if (bp->link_up && (bp->line_speed == SPEED_1000) && + (bp->duplex == DUPLEX_HALF)) { + BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff); + } + + /* Configure the EMAC mode register. */ + val = BNX2_RD(bp, BNX2_EMAC_MODE); + + val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX | + BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK | + BNX2_EMAC_MODE_25G_MODE); + + if (bp->link_up) { + switch (bp->line_speed) { + case SPEED_10: + if (BNX2_CHIP(bp) != BNX2_CHIP_5706) { + val |= BNX2_EMAC_MODE_PORT_MII_10M; + break; + } + /* fall through */ + case SPEED_100: + val |= BNX2_EMAC_MODE_PORT_MII; + break; + case SPEED_2500: + val |= BNX2_EMAC_MODE_25G_MODE; + /* fall through */ + case SPEED_1000: + val |= BNX2_EMAC_MODE_PORT_GMII; + break; + } + } + else { + val |= BNX2_EMAC_MODE_PORT_GMII; + } + + /* Set the MAC to operate in the appropriate duplex mode. */ + if (bp->duplex == DUPLEX_HALF) + val |= BNX2_EMAC_MODE_HALF_DUPLEX; + BNX2_WR(bp, BNX2_EMAC_MODE, val); + + /* Enable/disable rx PAUSE. */ + bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN; + + if (bp->flow_ctrl & FLOW_CTRL_RX) + bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN; + BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode); + + /* Enable/disable tx PAUSE. */ + val = BNX2_RD(bp, BNX2_EMAC_TX_MODE); + val &= ~BNX2_EMAC_TX_MODE_FLOW_EN; + + if (bp->flow_ctrl & FLOW_CTRL_TX) + val |= BNX2_EMAC_TX_MODE_FLOW_EN; + BNX2_WR(bp, BNX2_EMAC_TX_MODE, val); + + /* Acknowledge the interrupt. */ + BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE); + + bnx2_init_all_rx_contexts(bp); +} + +static void +bnx2_enable_bmsr1(struct bnx2 *bp) +{ + if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && + (BNX2_CHIP(bp) == BNX2_CHIP_5709)) + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_GP_STATUS); +} + +static void +bnx2_disable_bmsr1(struct bnx2 *bp) +{ + if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && + (BNX2_CHIP(bp) == BNX2_CHIP_5709)) + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_COMBO_IEEEB0); +} + +static int +bnx2_test_and_enable_2g5(struct bnx2 *bp) +{ + u32 up1; + int ret = 1; + + if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) + return 0; + + if (bp->autoneg & AUTONEG_SPEED) + bp->advertising |= ADVERTISED_2500baseX_Full; + + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); + + bnx2_read_phy(bp, bp->mii_up1, &up1); + if (!(up1 & BCM5708S_UP1_2G5)) { + up1 |= BCM5708S_UP1_2G5; + bnx2_write_phy(bp, bp->mii_up1, up1); + ret = 0; + } + + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + + return ret; +} + +static int +bnx2_test_and_disable_2g5(struct bnx2 *bp) +{ + u32 up1; + int ret = 0; + + if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) + return 0; + + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); + + bnx2_read_phy(bp, bp->mii_up1, &up1); + if (up1 & BCM5708S_UP1_2G5) { + up1 &= ~BCM5708S_UP1_2G5; + bnx2_write_phy(bp, bp->mii_up1, up1); + ret = 1; + } + + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + + return ret; +} + +static void +bnx2_enable_forced_2g5(struct bnx2 *bp) +{ + u32 uninitialized_var(bmcr); + int err; + + if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) + return; + + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { + u32 val; + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_SERDES_DIG); + if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) { + val &= ~MII_BNX2_SD_MISC1_FORCE_MSK; + val |= MII_BNX2_SD_MISC1_FORCE | + MII_BNX2_SD_MISC1_FORCE_2_5G; + bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); + } + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + + } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) { + err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + if (!err) + bmcr |= BCM5708S_BMCR_FORCE_2500; + } else { + return; + } + + if (err) + return; + + if (bp->autoneg & AUTONEG_SPEED) { + bmcr &= ~BMCR_ANENABLE; + if (bp->req_duplex == DUPLEX_FULL) + bmcr |= BMCR_FULLDPLX; + } + bnx2_write_phy(bp, bp->mii_bmcr, bmcr); +} + +static void +bnx2_disable_forced_2g5(struct bnx2 *bp) +{ + u32 uninitialized_var(bmcr); + int err; + + if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) + return; + + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { + u32 val; + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_SERDES_DIG); + if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) { + val &= ~MII_BNX2_SD_MISC1_FORCE; + bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val); + } + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, + MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + + } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) { + err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + if (!err) + bmcr &= ~BCM5708S_BMCR_FORCE_2500; + } else { + return; + } + + if (err) + return; + + if (bp->autoneg & AUTONEG_SPEED) + bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART; + bnx2_write_phy(bp, bp->mii_bmcr, bmcr); +} + +static void +bnx2_5706s_force_link_dn(struct bnx2 *bp, int start) +{ + u32 val; + + bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL); + bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val); + if (start) + bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f); + else + bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0); +} + +static int +bnx2_set_link(struct bnx2 *bp) +{ + u32 bmsr; + u8 link_up; + + if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) { + bp->link_up = 1; + return 0; + } + + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) + return 0; + + link_up = bp->link_up; + + bnx2_enable_bmsr1(bp); + bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); + bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); + bnx2_disable_bmsr1(bp); + + if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && + (BNX2_CHIP(bp) == BNX2_CHIP_5706)) { + u32 val, an_dbg; + + if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) { + bnx2_5706s_force_link_dn(bp, 0); + bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN; + } + val = BNX2_RD(bp, BNX2_EMAC_STATUS); + + bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG); + bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); + bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); + + if ((val & BNX2_EMAC_STATUS_LINK) && + !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC)) + bmsr |= BMSR_LSTATUS; + else + bmsr &= ~BMSR_LSTATUS; + } + + if (bmsr & BMSR_LSTATUS) { + bp->link_up = 1; + + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + if (BNX2_CHIP(bp) == BNX2_CHIP_5706) + bnx2_5706s_linkup(bp); + else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) + bnx2_5708s_linkup(bp); + else if (BNX2_CHIP(bp) == BNX2_CHIP_5709) + bnx2_5709s_linkup(bp); + } + else { + bnx2_copper_linkup(bp); + } + bnx2_resolve_flow_ctrl(bp); + } + else { + if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && + (bp->autoneg & AUTONEG_SPEED)) + bnx2_disable_forced_2g5(bp); + + if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) { + u32 bmcr; + + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + bmcr |= BMCR_ANENABLE; + bnx2_write_phy(bp, bp->mii_bmcr, bmcr); + + bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; + } + bp->link_up = 0; + } + + if (bp->link_up != link_up) { + bnx2_report_link(bp); + } + + bnx2_set_mac_link(bp); + + return 0; +} + +static int +bnx2_reset_phy(struct bnx2 *bp) +{ + int i; + u32 reg; + + bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET); + +#define PHY_RESET_MAX_WAIT 100 + for (i = 0; i < PHY_RESET_MAX_WAIT; i++) { + udelay(10); + + bnx2_read_phy(bp, bp->mii_bmcr, ®); + if (!(reg & BMCR_RESET)) { + udelay(20); + break; + } + } + if (i == PHY_RESET_MAX_WAIT) { + return -EBUSY; + } + return 0; +} + +static u32 +bnx2_phy_get_pause_adv(struct bnx2 *bp) +{ + u32 adv = 0; + + if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) == + (FLOW_CTRL_RX | FLOW_CTRL_TX)) { + + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + adv = ADVERTISE_1000XPAUSE; + } + else { + adv = ADVERTISE_PAUSE_CAP; + } + } + else if (bp->req_flow_ctrl & FLOW_CTRL_TX) { + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + adv = ADVERTISE_1000XPSE_ASYM; + } + else { + adv = ADVERTISE_PAUSE_ASYM; + } + } + else if (bp->req_flow_ctrl & FLOW_CTRL_RX) { + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; + } + else { + adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + } + } + return adv; +} + +static int bnx2_fw_sync(struct bnx2 *, u32, int, int); + +static int +bnx2_setup_remote_phy(struct bnx2 *bp, u8 port) +__releases(&bp->phy_lock) +__acquires(&bp->phy_lock) +{ + u32 speed_arg = 0, pause_adv; + + pause_adv = bnx2_phy_get_pause_adv(bp); + + if (bp->autoneg & AUTONEG_SPEED) { + speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG; + if (bp->advertising & ADVERTISED_10baseT_Half) + speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF; + if (bp->advertising & ADVERTISED_10baseT_Full) + speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL; + if (bp->advertising & ADVERTISED_100baseT_Half) + speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF; + if (bp->advertising & ADVERTISED_100baseT_Full) + speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL; + if (bp->advertising & ADVERTISED_1000baseT_Full) + speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL; + if (bp->advertising & ADVERTISED_2500baseX_Full) + speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL; + } else { + if (bp->req_line_speed == SPEED_2500) + speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL; + else if (bp->req_line_speed == SPEED_1000) + speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL; + else if (bp->req_line_speed == SPEED_100) { + if (bp->req_duplex == DUPLEX_FULL) + speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL; + else + speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF; + } else if (bp->req_line_speed == SPEED_10) { + if (bp->req_duplex == DUPLEX_FULL) + speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL; + else + speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF; + } + } + + if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP)) + speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE; + if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM)) + speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE; + + if (port == PORT_TP) + speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE | + BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED; + + bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg); + + spin_unlock_bh(&bp->phy_lock); + bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0); + spin_lock_bh(&bp->phy_lock); + + return 0; +} + +static int +bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port) +__releases(&bp->phy_lock) +__acquires(&bp->phy_lock) +{ + u32 adv, bmcr; + u32 new_adv = 0; + + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) + return bnx2_setup_remote_phy(bp, port); + + if (!(bp->autoneg & AUTONEG_SPEED)) { + u32 new_bmcr; + int force_link_down = 0; + + if (bp->req_line_speed == SPEED_2500) { + if (!bnx2_test_and_enable_2g5(bp)) + force_link_down = 1; + } else if (bp->req_line_speed == SPEED_1000) { + if (bnx2_test_and_disable_2g5(bp)) + force_link_down = 1; + } + bnx2_read_phy(bp, bp->mii_adv, &adv); + adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF); + + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + new_bmcr = bmcr & ~BMCR_ANENABLE; + new_bmcr |= BMCR_SPEED1000; + + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { + if (bp->req_line_speed == SPEED_2500) + bnx2_enable_forced_2g5(bp); + else if (bp->req_line_speed == SPEED_1000) { + bnx2_disable_forced_2g5(bp); + new_bmcr &= ~0x2000; + } + + } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) { + if (bp->req_line_speed == SPEED_2500) + new_bmcr |= BCM5708S_BMCR_FORCE_2500; + else + new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500; + } + + if (bp->req_duplex == DUPLEX_FULL) { + adv |= ADVERTISE_1000XFULL; + new_bmcr |= BMCR_FULLDPLX; + } + else { + adv |= ADVERTISE_1000XHALF; + new_bmcr &= ~BMCR_FULLDPLX; + } + if ((new_bmcr != bmcr) || (force_link_down)) { + /* Force a link down visible on the other side */ + if (bp->link_up) { + bnx2_write_phy(bp, bp->mii_adv, adv & + ~(ADVERTISE_1000XFULL | + ADVERTISE_1000XHALF)); + bnx2_write_phy(bp, bp->mii_bmcr, bmcr | + BMCR_ANRESTART | BMCR_ANENABLE); + + bp->link_up = 0; + netif_carrier_off(bp->dev); + bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr); + bnx2_report_link(bp); + } + bnx2_write_phy(bp, bp->mii_adv, adv); + bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr); + } else { + bnx2_resolve_flow_ctrl(bp); + bnx2_set_mac_link(bp); + } + return 0; + } + + bnx2_test_and_enable_2g5(bp); + + if (bp->advertising & ADVERTISED_1000baseT_Full) + new_adv |= ADVERTISE_1000XFULL; + + new_adv |= bnx2_phy_get_pause_adv(bp); + + bnx2_read_phy(bp, bp->mii_adv, &adv); + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + + bp->serdes_an_pending = 0; + if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) { + /* Force a link down visible on the other side */ + if (bp->link_up) { + bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); + spin_unlock_bh(&bp->phy_lock); + msleep(20); + spin_lock_bh(&bp->phy_lock); + } + + bnx2_write_phy(bp, bp->mii_adv, new_adv); + bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | + BMCR_ANENABLE); + /* Speed up link-up time when the link partner + * does not autonegotiate which is very common + * in blade servers. Some blade servers use + * IPMI for kerboard input and it's important + * to minimize link disruptions. Autoneg. involves + * exchanging base pages plus 3 next pages and + * normally completes in about 120 msec. + */ + bp->current_interval = BNX2_SERDES_AN_TIMEOUT; + bp->serdes_an_pending = 1; + mod_timer(&bp->timer, jiffies + bp->current_interval); + } else { + bnx2_resolve_flow_ctrl(bp); + bnx2_set_mac_link(bp); + } + + return 0; +} + +#define ETHTOOL_ALL_FIBRE_SPEED \ + (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \ + (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\ + (ADVERTISED_1000baseT_Full) + +#define ETHTOOL_ALL_COPPER_SPEED \ + (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \ + ADVERTISED_1000baseT_Full) + +#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \ + ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA) + +#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL) + +static void +bnx2_set_default_remote_link(struct bnx2 *bp) +{ + u32 link; + + if (bp->phy_port == PORT_TP) + link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK); + else + link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK); + + if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) { + bp->req_line_speed = 0; + bp->autoneg |= AUTONEG_SPEED; + bp->advertising = ADVERTISED_Autoneg; + if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF) + bp->advertising |= ADVERTISED_10baseT_Half; + if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL) + bp->advertising |= ADVERTISED_10baseT_Full; + if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF) + bp->advertising |= ADVERTISED_100baseT_Half; + if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL) + bp->advertising |= ADVERTISED_100baseT_Full; + if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL) + bp->advertising |= ADVERTISED_1000baseT_Full; + if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL) + bp->advertising |= ADVERTISED_2500baseX_Full; + } else { + bp->autoneg = 0; + bp->advertising = 0; + bp->req_duplex = DUPLEX_FULL; + if (link & BNX2_NETLINK_SET_LINK_SPEED_10) { + bp->req_line_speed = SPEED_10; + if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF) + bp->req_duplex = DUPLEX_HALF; + } + if (link & BNX2_NETLINK_SET_LINK_SPEED_100) { + bp->req_line_speed = SPEED_100; + if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF) + bp->req_duplex = DUPLEX_HALF; + } + if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL) + bp->req_line_speed = SPEED_1000; + if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL) + bp->req_line_speed = SPEED_2500; + } +} + +static void +bnx2_set_default_link(struct bnx2 *bp) +{ + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { + bnx2_set_default_remote_link(bp); + return; + } + + bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL; + bp->req_line_speed = 0; + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + u32 reg; + + bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg; + + reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG); + reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK; + if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) { + bp->autoneg = 0; + bp->req_line_speed = bp->line_speed = SPEED_1000; + bp->req_duplex = DUPLEX_FULL; + } + } else + bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg; +} + +static void +bnx2_send_heart_beat(struct bnx2 *bp) +{ + u32 msg; + u32 addr; + + spin_lock(&bp->indirect_lock); + msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK); + addr = bp->shmem_base + BNX2_DRV_PULSE_MB; + BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr); + BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg); + spin_unlock(&bp->indirect_lock); +} + +static void +bnx2_remote_phy_event(struct bnx2 *bp) +{ + u32 msg; + u8 link_up = bp->link_up; + u8 old_port; + + msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS); + + if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED) + bnx2_send_heart_beat(bp); + + msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED; + + if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN) + bp->link_up = 0; + else { + u32 speed; + + bp->link_up = 1; + speed = msg & BNX2_LINK_STATUS_SPEED_MASK; + bp->duplex = DUPLEX_FULL; + switch (speed) { + case BNX2_LINK_STATUS_10HALF: + bp->duplex = DUPLEX_HALF; + /* fall through */ + case BNX2_LINK_STATUS_10FULL: + bp->line_speed = SPEED_10; + break; + case BNX2_LINK_STATUS_100HALF: + bp->duplex = DUPLEX_HALF; + /* fall through */ + case BNX2_LINK_STATUS_100BASE_T4: + case BNX2_LINK_STATUS_100FULL: + bp->line_speed = SPEED_100; + break; + case BNX2_LINK_STATUS_1000HALF: + bp->duplex = DUPLEX_HALF; + /* fall through */ + case BNX2_LINK_STATUS_1000FULL: + bp->line_speed = SPEED_1000; + break; + case BNX2_LINK_STATUS_2500HALF: + bp->duplex = DUPLEX_HALF; + /* fall through */ + case BNX2_LINK_STATUS_2500FULL: + bp->line_speed = SPEED_2500; + break; + default: + bp->line_speed = 0; + break; + } + + bp->flow_ctrl = 0; + if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != + (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) { + if (bp->duplex == DUPLEX_FULL) + bp->flow_ctrl = bp->req_flow_ctrl; + } else { + if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED) + bp->flow_ctrl |= FLOW_CTRL_TX; + if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED) + bp->flow_ctrl |= FLOW_CTRL_RX; + } + + old_port = bp->phy_port; + if (msg & BNX2_LINK_STATUS_SERDES_LINK) + bp->phy_port = PORT_FIBRE; + else + bp->phy_port = PORT_TP; + + if (old_port != bp->phy_port) + bnx2_set_default_link(bp); + + } + if (bp->link_up != link_up) + bnx2_report_link(bp); + + bnx2_set_mac_link(bp); +} + +static int +bnx2_set_remote_link(struct bnx2 *bp) +{ + u32 evt_code; + + evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB); + switch (evt_code) { + case BNX2_FW_EVT_CODE_LINK_EVENT: + bnx2_remote_phy_event(bp); + break; + case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT: + default: + bnx2_send_heart_beat(bp); + break; + } + return 0; +} + +static int +bnx2_setup_copper_phy(struct bnx2 *bp) +__releases(&bp->phy_lock) +__acquires(&bp->phy_lock) +{ + u32 bmcr, adv_reg, new_adv = 0; + u32 new_bmcr; + + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + + bnx2_read_phy(bp, bp->mii_adv, &adv_reg); + adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP | + ADVERTISE_PAUSE_ASYM); + + new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising); + + if (bp->autoneg & AUTONEG_SPEED) { + u32 adv1000_reg; + u32 new_adv1000 = 0; + + new_adv |= bnx2_phy_get_pause_adv(bp); + + bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg); + adv1000_reg &= PHY_ALL_1000_SPEED; + + new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising); + if ((adv1000_reg != new_adv1000) || + (adv_reg != new_adv) || + ((bmcr & BMCR_ANENABLE) == 0)) { + + bnx2_write_phy(bp, bp->mii_adv, new_adv); + bnx2_write_phy(bp, MII_CTRL1000, new_adv1000); + bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART | + BMCR_ANENABLE); + } + else if (bp->link_up) { + /* Flow ctrl may have changed from auto to forced */ + /* or vice-versa. */ + + bnx2_resolve_flow_ctrl(bp); + bnx2_set_mac_link(bp); + } + return 0; + } + + /* advertise nothing when forcing speed */ + if (adv_reg != new_adv) + bnx2_write_phy(bp, bp->mii_adv, new_adv); + + new_bmcr = 0; + if (bp->req_line_speed == SPEED_100) { + new_bmcr |= BMCR_SPEED100; + } + if (bp->req_duplex == DUPLEX_FULL) { + new_bmcr |= BMCR_FULLDPLX; + } + if (new_bmcr != bmcr) { + u32 bmsr; + + bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); + bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); + + if (bmsr & BMSR_LSTATUS) { + /* Force link down */ + bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); + spin_unlock_bh(&bp->phy_lock); + msleep(50); + spin_lock_bh(&bp->phy_lock); + + bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); + bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); + } + + bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr); + + /* Normally, the new speed is setup after the link has + * gone down and up again. In some cases, link will not go + * down so we need to set up the new speed here. + */ + if (bmsr & BMSR_LSTATUS) { + bp->line_speed = bp->req_line_speed; + bp->duplex = bp->req_duplex; + bnx2_resolve_flow_ctrl(bp); + bnx2_set_mac_link(bp); + } + } else { + bnx2_resolve_flow_ctrl(bp); + bnx2_set_mac_link(bp); + } + return 0; +} + +static int +bnx2_setup_phy(struct bnx2 *bp, u8 port) +__releases(&bp->phy_lock) +__acquires(&bp->phy_lock) +{ + if (bp->loopback == MAC_LOOPBACK) + return 0; + + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + return bnx2_setup_serdes_phy(bp, port); + } + else { + return bnx2_setup_copper_phy(bp); + } +} + +static int +bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy) +{ + u32 val; + + bp->mii_bmcr = MII_BMCR + 0x10; + bp->mii_bmsr = MII_BMSR + 0x10; + bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1; + bp->mii_adv = MII_ADVERTISE + 0x10; + bp->mii_lpa = MII_LPA + 0x10; + bp->mii_up1 = MII_BNX2_OVER1G_UP1; + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER); + bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD); + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + if (reset_phy) + bnx2_reset_phy(bp); + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG); + + bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val); + val &= ~MII_BNX2_SD_1000XCTL1_AUTODET; + val |= MII_BNX2_SD_1000XCTL1_FIBER; + bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val); + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); + bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val); + if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) + val |= BCM5708S_UP1_2G5; + else + val &= ~BCM5708S_UP1_2G5; + bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val); + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG); + bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val); + val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM; + bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val); + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0); + + val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN | + MII_BNX2_CL73_BAM_NP_AFT_BP_EN; + bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val); + + bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); + + return 0; +} + +static int +bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy) +{ + u32 val; + + if (reset_phy) + bnx2_reset_phy(bp); + + bp->mii_up1 = BCM5708S_UP1; + + bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3); + bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE); + bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG); + + bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val); + val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN; + bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val); + + bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val); + val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN; + bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val); + + if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) { + bnx2_read_phy(bp, BCM5708S_UP1, &val); + val |= BCM5708S_UP1_2G5; + bnx2_write_phy(bp, BCM5708S_UP1, val); + } + + if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) { + /* increase tx signal amplitude */ + bnx2_write_phy(bp, BCM5708S_BLK_ADDR, + BCM5708S_BLK_ADDR_TX_MISC); + bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val); + val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM; + bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val); + bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG); + } + + val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) & + BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK; + + if (val) { + u32 is_backplane; + + is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG); + if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) { + bnx2_write_phy(bp, BCM5708S_BLK_ADDR, + BCM5708S_BLK_ADDR_TX_MISC); + bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val); + bnx2_write_phy(bp, BCM5708S_BLK_ADDR, + BCM5708S_BLK_ADDR_DIG); + } + } + return 0; +} + +static int +bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy) +{ + if (reset_phy) + bnx2_reset_phy(bp); + + bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; + + if (BNX2_CHIP(bp) == BNX2_CHIP_5706) + BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300); + + if (bp->dev->mtu > 1500) { + u32 val; + + /* Set extended packet length bit */ + bnx2_write_phy(bp, 0x18, 0x7); + bnx2_read_phy(bp, 0x18, &val); + bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000); + + bnx2_write_phy(bp, 0x1c, 0x6c00); + bnx2_read_phy(bp, 0x1c, &val); + bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02); + } + else { + u32 val; + + bnx2_write_phy(bp, 0x18, 0x7); + bnx2_read_phy(bp, 0x18, &val); + bnx2_write_phy(bp, 0x18, val & ~0x4007); + + bnx2_write_phy(bp, 0x1c, 0x6c00); + bnx2_read_phy(bp, 0x1c, &val); + bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00); + } + + return 0; +} + +static int +bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy) +{ + u32 val; + + if (reset_phy) + bnx2_reset_phy(bp); + + if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) { + bnx2_write_phy(bp, 0x18, 0x0c00); + bnx2_write_phy(bp, 0x17, 0x000a); + bnx2_write_phy(bp, 0x15, 0x310b); + bnx2_write_phy(bp, 0x17, 0x201f); + bnx2_write_phy(bp, 0x15, 0x9506); + bnx2_write_phy(bp, 0x17, 0x401f); + bnx2_write_phy(bp, 0x15, 0x14e2); + bnx2_write_phy(bp, 0x18, 0x0400); + } + + if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) { + bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, + MII_BNX2_DSP_EXPAND_REG | 0x8); + bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val); + val &= ~(1 << 8); + bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val); + } + + if (bp->dev->mtu > 1500) { + /* Set extended packet length bit */ + bnx2_write_phy(bp, 0x18, 0x7); + bnx2_read_phy(bp, 0x18, &val); + bnx2_write_phy(bp, 0x18, val | 0x4000); + + bnx2_read_phy(bp, 0x10, &val); + bnx2_write_phy(bp, 0x10, val | 0x1); + } + else { + bnx2_write_phy(bp, 0x18, 0x7); + bnx2_read_phy(bp, 0x18, &val); + bnx2_write_phy(bp, 0x18, val & ~0x4007); + + bnx2_read_phy(bp, 0x10, &val); + bnx2_write_phy(bp, 0x10, val & ~0x1); + } + + /* ethernet@wirespeed */ + bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL); + bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val); + val |= AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED; + + /* auto-mdix */ + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) + val |= AUX_CTL_MISC_CTL_AUTOMDIX; + + bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val); + return 0; +} + + +static int +bnx2_init_phy(struct bnx2 *bp, int reset_phy) +__releases(&bp->phy_lock) +__acquires(&bp->phy_lock) +{ + u32 val; + int rc = 0; + + bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK; + bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY; + + bp->mii_bmcr = MII_BMCR; + bp->mii_bmsr = MII_BMSR; + bp->mii_bmsr1 = MII_BMSR; + bp->mii_adv = MII_ADVERTISE; + bp->mii_lpa = MII_LPA; + + BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); + + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) + goto setup_phy; + + bnx2_read_phy(bp, MII_PHYSID1, &val); + bp->phy_id = val << 16; + bnx2_read_phy(bp, MII_PHYSID2, &val); + bp->phy_id |= val & 0xffff; + + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + if (BNX2_CHIP(bp) == BNX2_CHIP_5706) + rc = bnx2_init_5706s_phy(bp, reset_phy); + else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) + rc = bnx2_init_5708s_phy(bp, reset_phy); + else if (BNX2_CHIP(bp) == BNX2_CHIP_5709) + rc = bnx2_init_5709s_phy(bp, reset_phy); + } + else { + rc = bnx2_init_copper_phy(bp, reset_phy); + } + +setup_phy: + if (!rc) + rc = bnx2_setup_phy(bp, bp->phy_port); + + return rc; +} + +static int +bnx2_set_mac_loopback(struct bnx2 *bp) +{ + u32 mac_mode; + + mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE); + mac_mode &= ~BNX2_EMAC_MODE_PORT; + mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK; + BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode); + bp->link_up = 1; + return 0; +} + +static int bnx2_test_link(struct bnx2 *); + +static int +bnx2_set_phy_loopback(struct bnx2 *bp) +{ + u32 mac_mode; + int rc, i; + + spin_lock_bh(&bp->phy_lock); + rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX | + BMCR_SPEED1000); + spin_unlock_bh(&bp->phy_lock); + if (rc) + return rc; + + for (i = 0; i < 10; i++) { + if (bnx2_test_link(bp) == 0) + break; + msleep(100); + } + + mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE); + mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX | + BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK | + BNX2_EMAC_MODE_25G_MODE); + + mac_mode |= BNX2_EMAC_MODE_PORT_GMII; + BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode); + bp->link_up = 1; + return 0; +} + +static void +bnx2_dump_mcp_state(struct bnx2 *bp) +{ + struct net_device *dev = bp->dev; + u32 mcp_p0, mcp_p1; + + netdev_err(dev, "<--- start MCP states dump --->\n"); + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { + mcp_p0 = BNX2_MCP_STATE_P0; + mcp_p1 = BNX2_MCP_STATE_P1; + } else { + mcp_p0 = BNX2_MCP_STATE_P0_5708; + mcp_p1 = BNX2_MCP_STATE_P1_5708; + } + netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n", + bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1)); + netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n", + bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE), + bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE), + bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK)); + netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n", + bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER), + bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER), + bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION)); + netdev_err(dev, "DEBUG: shmem states:\n"); + netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]", + bnx2_shmem_rd(bp, BNX2_DRV_MB), + bnx2_shmem_rd(bp, BNX2_FW_MB), + bnx2_shmem_rd(bp, BNX2_LINK_STATUS)); + pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB)); + netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]", + bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE), + bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE)); + pr_cont(" condition[%08x]\n", + bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION)); + DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE); + DP_SHMEM_LINE(bp, 0x3cc); + DP_SHMEM_LINE(bp, 0x3dc); + DP_SHMEM_LINE(bp, 0x3ec); + netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc)); + netdev_err(dev, "<--- end MCP states dump --->\n"); +} + +static int +bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent) +{ + int i; + u32 val; + + bp->fw_wr_seq++; + msg_data |= bp->fw_wr_seq; + bp->fw_last_msg = msg_data; + + bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); + + if (!ack) + return 0; + + /* wait for an acknowledgement. */ + for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) { + msleep(10); + + val = bnx2_shmem_rd(bp, BNX2_FW_MB); + + if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ)) + break; + } + if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0) + return 0; + + /* If we timed out, inform the firmware that this is the case. */ + if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) { + msg_data &= ~BNX2_DRV_MSG_CODE; + msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT; + + bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); + if (!silent) { + pr_err("fw sync timeout, reset code = %x\n", msg_data); + bnx2_dump_mcp_state(bp); + } + + return -EBUSY; + } + + if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK) + return -EIO; + + return 0; +} + +static int +bnx2_init_5709_context(struct bnx2 *bp) +{ + int i, ret = 0; + u32 val; + + val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12); + val |= (BNX2_PAGE_BITS - 8) << 16; + BNX2_WR(bp, BNX2_CTX_COMMAND, val); + for (i = 0; i < 10; i++) { + val = BNX2_RD(bp, BNX2_CTX_COMMAND); + if (!(val & BNX2_CTX_COMMAND_MEM_INIT)) + break; + udelay(2); + } + if (val & BNX2_CTX_COMMAND_MEM_INIT) + return -EBUSY; + + for (i = 0; i < bp->ctx_pages; i++) { + int j; + + if (bp->ctx_blk[i]) + memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE); + else + return -ENOMEM; + + BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0, + (bp->ctx_blk_mapping[i] & 0xffffffff) | + BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID); + BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1, + (u64) bp->ctx_blk_mapping[i] >> 32); + BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i | + BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); + for (j = 0; j < 10; j++) { + + val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL); + if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) + break; + udelay(5); + } + if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) { + ret = -EBUSY; + break; + } + } + return ret; +} + +static void +bnx2_init_context(struct bnx2 *bp) +{ + u32 vcid; + + vcid = 96; + while (vcid) { + u32 vcid_addr, pcid_addr, offset; + int i; + + vcid--; + + if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { + u32 new_vcid; + + vcid_addr = GET_PCID_ADDR(vcid); + if (vcid & 0x8) { + new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7); + } + else { + new_vcid = vcid; + } + pcid_addr = GET_PCID_ADDR(new_vcid); + } + else { + vcid_addr = GET_CID_ADDR(vcid); + pcid_addr = vcid_addr; + } + + for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) { + vcid_addr += (i << PHY_CTX_SHIFT); + pcid_addr += (i << PHY_CTX_SHIFT); + + BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr); + BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); + + /* Zero out the context. */ + for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) + bnx2_ctx_wr(bp, vcid_addr, offset, 0); + } + } +} + +static int +bnx2_alloc_bad_rbuf(struct bnx2 *bp) +{ + u16 *good_mbuf; + u32 good_mbuf_cnt; + u32 val; + + good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL); + if (good_mbuf == NULL) + return -ENOMEM; + + BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, + BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE); + + good_mbuf_cnt = 0; + + /* Allocate a bunch of mbufs and save the good ones in an array. */ + val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1); + while (val & BNX2_RBUF_STATUS1_FREE_COUNT) { + bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND, + BNX2_RBUF_COMMAND_ALLOC_REQ); + + val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC); + + val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE; + + /* The addresses with Bit 9 set are bad memory blocks. */ + if (!(val & (1 << 9))) { + good_mbuf[good_mbuf_cnt] = (u16) val; + good_mbuf_cnt++; + } + + val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1); + } + + /* Free the good ones back to the mbuf pool thus discarding + * all the bad ones. */ + while (good_mbuf_cnt) { + good_mbuf_cnt--; + + val = good_mbuf[good_mbuf_cnt]; + val = (val << 9) | val | 1; + + bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val); + } + kfree(good_mbuf); + return 0; +} + +static void +bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos) +{ + u32 val; + + val = (mac_addr[0] << 8) | mac_addr[1]; + + BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val); + + val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]; + + BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val); +} + +static inline int +bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) +{ + dma_addr_t mapping; + struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index]; + struct bnx2_rx_bd *rxbd = + &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)]; + struct page *page = alloc_page(gfp); + + if (!page) + return -ENOMEM; + mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&bp->pdev->dev, mapping)) { + __free_page(page); + return -EIO; + } + + rx_pg->page = page; + dma_unmap_addr_set(rx_pg, mapping, mapping); + rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; + rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; + return 0; +} + +static void +bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) +{ + struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index]; + struct page *page = rx_pg->page; + + if (!page) + return; + + dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping), + PAGE_SIZE, PCI_DMA_FROMDEVICE); + + __free_page(page); + rx_pg->page = NULL; +} + +static inline int +bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) +{ + u8 *data; + struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index]; + dma_addr_t mapping; + struct bnx2_rx_bd *rxbd = + &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)]; + + data = kmalloc(bp->rx_buf_size, gfp); + if (!data) + return -ENOMEM; + + mapping = dma_map_single(&bp->pdev->dev, + get_l2_fhdr(data), + bp->rx_buf_use_size, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&bp->pdev->dev, mapping)) { + kfree(data); + return -EIO; + } + + rx_buf->data = data; + dma_unmap_addr_set(rx_buf, mapping, mapping); + + rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; + rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; + + rxr->rx_prod_bseq += bp->rx_buf_use_size; + + return 0; +} + +static int +bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event) +{ + struct status_block *sblk = bnapi->status_blk.msi; + u32 new_link_state, old_link_state; + int is_set = 1; + + new_link_state = sblk->status_attn_bits & event; + old_link_state = sblk->status_attn_bits_ack & event; + if (new_link_state != old_link_state) { + if (new_link_state) + BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event); + else + BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event); + } else + is_set = 0; + + return is_set; +} + +static void +bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi) +{ + spin_lock(&bp->phy_lock); + + if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) + bnx2_set_link(bp); + if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT)) + bnx2_set_remote_link(bp); + + spin_unlock(&bp->phy_lock); + +} + +static inline u16 +bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi) +{ + u16 cons; + + /* Tell compiler that status block fields can change. */ + barrier(); + cons = *bnapi->hw_tx_cons_ptr; + barrier(); + if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT)) + cons++; + return cons; +} + +static int +bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) +{ + struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; + u16 hw_cons, sw_cons, sw_ring_cons; + int tx_pkt = 0, index; + unsigned int tx_bytes = 0; + struct netdev_queue *txq; + + index = (bnapi - bp->bnx2_napi); + txq = netdev_get_tx_queue(bp->dev, index); + + hw_cons = bnx2_get_hw_tx_cons(bnapi); + sw_cons = txr->tx_cons; + + while (sw_cons != hw_cons) { + struct bnx2_sw_tx_bd *tx_buf; + struct sk_buff *skb; + int i, last; + + sw_ring_cons = BNX2_TX_RING_IDX(sw_cons); + + tx_buf = &txr->tx_buf_ring[sw_ring_cons]; + skb = tx_buf->skb; + + /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */ + prefetch(&skb->end); + + /* partial BD completions possible with TSO packets */ + if (tx_buf->is_gso) { + u16 last_idx, last_ring_idx; + + last_idx = sw_cons + tx_buf->nr_frags + 1; + last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1; + if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) { + last_idx++; + } + if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) { + break; + } + } + + dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), + skb_headlen(skb), PCI_DMA_TODEVICE); + + tx_buf->skb = NULL; + last = tx_buf->nr_frags; + + for (i = 0; i < last; i++) { + struct bnx2_sw_tx_bd *tx_buf; + + sw_cons = BNX2_NEXT_TX_BD(sw_cons); + + tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)]; + dma_unmap_page(&bp->pdev->dev, + dma_unmap_addr(tx_buf, mapping), + skb_frag_size(&skb_shinfo(skb)->frags[i]), + PCI_DMA_TODEVICE); + } + + sw_cons = BNX2_NEXT_TX_BD(sw_cons); + + tx_bytes += skb->len; + dev_kfree_skb_any(skb); + tx_pkt++; + if (tx_pkt == budget) + break; + + if (hw_cons == sw_cons) + hw_cons = bnx2_get_hw_tx_cons(bnapi); + } + + netdev_tx_completed_queue(txq, tx_pkt, tx_bytes); + txr->hw_tx_cons = hw_cons; + txr->tx_cons = sw_cons; + + /* Need to make the tx_cons update visible to bnx2_start_xmit() + * before checking for netif_tx_queue_stopped(). Without the + * memory barrier, there is a small possibility that bnx2_start_xmit() + * will miss it and cause the queue to be stopped forever. + */ + smp_mb(); + + if (unlikely(netif_tx_queue_stopped(txq)) && + (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) { + __netif_tx_lock(txq, smp_processor_id()); + if ((netif_tx_queue_stopped(txq)) && + (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) + netif_tx_wake_queue(txq); + __netif_tx_unlock(txq); + } + + return tx_pkt; +} + +static void +bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, + struct sk_buff *skb, int count) +{ + struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg; + struct bnx2_rx_bd *cons_bd, *prod_bd; + int i; + u16 hw_prod, prod; + u16 cons = rxr->rx_pg_cons; + + cons_rx_pg = &rxr->rx_pg_ring[cons]; + + /* The caller was unable to allocate a new page to replace the + * last one in the frags array, so we need to recycle that page + * and then free the skb. + */ + if (skb) { + struct page *page; + struct skb_shared_info *shinfo; + + shinfo = skb_shinfo(skb); + shinfo->nr_frags--; + page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]); + __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL); + + cons_rx_pg->page = page; + dev_kfree_skb(skb); + } + + hw_prod = rxr->rx_pg_prod; + + for (i = 0; i < count; i++) { + prod = BNX2_RX_PG_RING_IDX(hw_prod); + + prod_rx_pg = &rxr->rx_pg_ring[prod]; + cons_rx_pg = &rxr->rx_pg_ring[cons]; + cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)] + [BNX2_RX_IDX(cons)]; + prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)] + [BNX2_RX_IDX(prod)]; + + if (prod != cons) { + prod_rx_pg->page = cons_rx_pg->page; + cons_rx_pg->page = NULL; + dma_unmap_addr_set(prod_rx_pg, mapping, + dma_unmap_addr(cons_rx_pg, mapping)); + + prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; + prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; + + } + cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons)); + hw_prod = BNX2_NEXT_RX_BD(hw_prod); + } + rxr->rx_pg_prod = hw_prod; + rxr->rx_pg_cons = cons; +} + +static inline void +bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, + u8 *data, u16 cons, u16 prod) +{ + struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf; + struct bnx2_rx_bd *cons_bd, *prod_bd; + + cons_rx_buf = &rxr->rx_buf_ring[cons]; + prod_rx_buf = &rxr->rx_buf_ring[prod]; + + dma_sync_single_for_device(&bp->pdev->dev, + dma_unmap_addr(cons_rx_buf, mapping), + BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); + + rxr->rx_prod_bseq += bp->rx_buf_use_size; + + prod_rx_buf->data = data; + + if (cons == prod) + return; + + dma_unmap_addr_set(prod_rx_buf, mapping, + dma_unmap_addr(cons_rx_buf, mapping)); + + cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)]; + prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)]; + prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; + prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; +} + +static struct sk_buff * +bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data, + unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr, + u32 ring_idx) +{ + int err; + u16 prod = ring_idx & 0xffff; + struct sk_buff *skb; + + err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); + if (unlikely(err)) { + bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod); +error: + if (hdr_len) { + unsigned int raw_len = len + 4; + int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT; + + bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages); + } + return NULL; + } + + dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, + PCI_DMA_FROMDEVICE); + skb = build_skb(data, 0); + if (!skb) { + kfree(data); + goto error; + } + skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET); + if (hdr_len == 0) { + skb_put(skb, len); + return skb; + } else { + unsigned int i, frag_len, frag_size, pages; + struct bnx2_sw_pg *rx_pg; + u16 pg_cons = rxr->rx_pg_cons; + u16 pg_prod = rxr->rx_pg_prod; + + frag_size = len + 4 - hdr_len; + pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT; + skb_put(skb, hdr_len); + + for (i = 0; i < pages; i++) { + dma_addr_t mapping_old; + + frag_len = min(frag_size, (unsigned int) PAGE_SIZE); + if (unlikely(frag_len <= 4)) { + unsigned int tail = 4 - frag_len; + + rxr->rx_pg_cons = pg_cons; + rxr->rx_pg_prod = pg_prod; + bnx2_reuse_rx_skb_pages(bp, rxr, NULL, + pages - i); + skb->len -= tail; + if (i == 0) { + skb->tail -= tail; + } else { + skb_frag_t *frag = + &skb_shinfo(skb)->frags[i - 1]; + skb_frag_size_sub(frag, tail); + skb->data_len -= tail; + } + return skb; + } + rx_pg = &rxr->rx_pg_ring[pg_cons]; + + /* Don't unmap yet. If we're unable to allocate a new + * page, we need to recycle the page and the DMA addr. + */ + mapping_old = dma_unmap_addr(rx_pg, mapping); + if (i == pages - 1) + frag_len -= 4; + + skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len); + rx_pg->page = NULL; + + err = bnx2_alloc_rx_page(bp, rxr, + BNX2_RX_PG_RING_IDX(pg_prod), + GFP_ATOMIC); + if (unlikely(err)) { + rxr->rx_pg_cons = pg_cons; + rxr->rx_pg_prod = pg_prod; + bnx2_reuse_rx_skb_pages(bp, rxr, skb, + pages - i); + return NULL; + } + + dma_unmap_page(&bp->pdev->dev, mapping_old, + PAGE_SIZE, PCI_DMA_FROMDEVICE); + + frag_size -= frag_len; + skb->data_len += frag_len; + skb->truesize += PAGE_SIZE; + skb->len += frag_len; + + pg_prod = BNX2_NEXT_RX_BD(pg_prod); + pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons)); + } + rxr->rx_pg_prod = pg_prod; + rxr->rx_pg_cons = pg_cons; + } + return skb; +} + +static inline u16 +bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi) +{ + u16 cons; + + /* Tell compiler that status block fields can change. */ + barrier(); + cons = *bnapi->hw_rx_cons_ptr; + barrier(); + if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT)) + cons++; + return cons; +} + +static int +bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) +{ + struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; + u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod; + struct l2_fhdr *rx_hdr; + int rx_pkt = 0, pg_ring_used = 0; + + if (budget <= 0) + return rx_pkt; + + hw_cons = bnx2_get_hw_rx_cons(bnapi); + sw_cons = rxr->rx_cons; + sw_prod = rxr->rx_prod; + + /* Memory barrier necessary as speculative reads of the rx + * buffer can be ahead of the index in the status block + */ + rmb(); + while (sw_cons != hw_cons) { + unsigned int len, hdr_len; + u32 status; + struct bnx2_sw_bd *rx_buf, *next_rx_buf; + struct sk_buff *skb; + dma_addr_t dma_addr; + u8 *data; + u16 next_ring_idx; + + sw_ring_cons = BNX2_RX_RING_IDX(sw_cons); + sw_ring_prod = BNX2_RX_RING_IDX(sw_prod); + + rx_buf = &rxr->rx_buf_ring[sw_ring_cons]; + data = rx_buf->data; + rx_buf->data = NULL; + + rx_hdr = get_l2_fhdr(data); + prefetch(rx_hdr); + + dma_addr = dma_unmap_addr(rx_buf, mapping); + + dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, + BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, + PCI_DMA_FROMDEVICE); + + next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons)); + next_rx_buf = &rxr->rx_buf_ring[next_ring_idx]; + prefetch(get_l2_fhdr(next_rx_buf->data)); + + len = rx_hdr->l2_fhdr_pkt_len; + status = rx_hdr->l2_fhdr_status; + + hdr_len = 0; + if (status & L2_FHDR_STATUS_SPLIT) { + hdr_len = rx_hdr->l2_fhdr_ip_xsum; + pg_ring_used = 1; + } else if (len > bp->rx_jumbo_thresh) { + hdr_len = bp->rx_jumbo_thresh; + pg_ring_used = 1; + } + + if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC | + L2_FHDR_ERRORS_PHY_DECODE | + L2_FHDR_ERRORS_ALIGNMENT | + L2_FHDR_ERRORS_TOO_SHORT | + L2_FHDR_ERRORS_GIANT_FRAME))) { + + bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons, + sw_ring_prod); + if (pg_ring_used) { + int pages; + + pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT; + + bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages); + } + goto next_rx; + } + + len -= 4; + + if (len <= bp->rx_copy_thresh) { + skb = netdev_alloc_skb(bp->dev, len + 6); + if (skb == NULL) { + bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons, + sw_ring_prod); + goto next_rx; + } + + /* aligned copy */ + memcpy(skb->data, + (u8 *)rx_hdr + BNX2_RX_OFFSET - 6, + len + 6); + skb_reserve(skb, 6); + skb_put(skb, len); + + bnx2_reuse_rx_data(bp, rxr, data, + sw_ring_cons, sw_ring_prod); + + } else { + skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr, + (sw_ring_cons << 16) | sw_ring_prod); + if (!skb) + goto next_rx; + } + if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && + !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag); + + skb->protocol = eth_type_trans(skb, bp->dev); + + if (len > (bp->dev->mtu + ETH_HLEN) && + skb->protocol != htons(0x8100) && + skb->protocol != htons(ETH_P_8021AD)) { + + dev_kfree_skb(skb); + goto next_rx; + + } + + skb_checksum_none_assert(skb); + if ((bp->dev->features & NETIF_F_RXCSUM) && + (status & (L2_FHDR_STATUS_TCP_SEGMENT | + L2_FHDR_STATUS_UDP_DATAGRAM))) { + + if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM | + L2_FHDR_ERRORS_UDP_XSUM)) == 0)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + if ((bp->dev->features & NETIF_F_RXHASH) && + ((status & L2_FHDR_STATUS_USE_RXHASH) == + L2_FHDR_STATUS_USE_RXHASH)) + skb_set_hash(skb, rx_hdr->l2_fhdr_hash, + PKT_HASH_TYPE_L3); + + skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]); + napi_gro_receive(&bnapi->napi, skb); + rx_pkt++; + +next_rx: + sw_cons = BNX2_NEXT_RX_BD(sw_cons); + sw_prod = BNX2_NEXT_RX_BD(sw_prod); + + if ((rx_pkt == budget)) + break; + + /* Refresh hw_cons to see if there is new work */ + if (sw_cons == hw_cons) { + hw_cons = bnx2_get_hw_rx_cons(bnapi); + rmb(); + } + } + rxr->rx_cons = sw_cons; + rxr->rx_prod = sw_prod; + + if (pg_ring_used) + BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod); + + BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod); + + BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); + + mmiowb(); + + return rx_pkt; + +} + +/* MSI ISR - The only difference between this and the INTx ISR + * is that the MSI interrupt is always serviced. + */ +static irqreturn_t +bnx2_msi(int irq, void *dev_instance) +{ + struct bnx2_napi *bnapi = dev_instance; + struct bnx2 *bp = bnapi->bp; + + prefetch(bnapi->status_blk.msi); + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, + BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | + BNX2_PCICFG_INT_ACK_CMD_MASK_INT); + + /* Return here if interrupt is disabled. */ + if (unlikely(atomic_read(&bp->intr_sem) != 0)) + return IRQ_HANDLED; + + napi_schedule(&bnapi->napi); + + return IRQ_HANDLED; +} + +static irqreturn_t +bnx2_msi_1shot(int irq, void *dev_instance) +{ + struct bnx2_napi *bnapi = dev_instance; + struct bnx2 *bp = bnapi->bp; + + prefetch(bnapi->status_blk.msi); + + /* Return here if interrupt is disabled. */ + if (unlikely(atomic_read(&bp->intr_sem) != 0)) + return IRQ_HANDLED; + + napi_schedule(&bnapi->napi); + + return IRQ_HANDLED; +} + +static irqreturn_t +bnx2_interrupt(int irq, void *dev_instance) +{ + struct bnx2_napi *bnapi = dev_instance; + struct bnx2 *bp = bnapi->bp; + struct status_block *sblk = bnapi->status_blk.msi; + + /* When using INTx, it is possible for the interrupt to arrive + * at the CPU before the status block posted prior to the + * interrupt. Reading a register will flush the status block. + * When using MSI, the MSI message will always complete after + * the status block write. + */ + if ((sblk->status_idx == bnapi->last_status_idx) && + (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) & + BNX2_PCICFG_MISC_STATUS_INTA_VALUE)) + return IRQ_NONE; + + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, + BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | + BNX2_PCICFG_INT_ACK_CMD_MASK_INT); + + /* Read back to deassert IRQ immediately to avoid too many + * spurious interrupts. + */ + BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD); + + /* Return here if interrupt is shared and is disabled. */ + if (unlikely(atomic_read(&bp->intr_sem) != 0)) + return IRQ_HANDLED; + + if (napi_schedule_prep(&bnapi->napi)) { + bnapi->last_status_idx = sblk->status_idx; + __napi_schedule(&bnapi->napi); + } + + return IRQ_HANDLED; +} + +static inline int +bnx2_has_fast_work(struct bnx2_napi *bnapi) +{ + struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; + struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; + + if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) || + (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)) + return 1; + return 0; +} + +#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \ + STATUS_ATTN_BITS_TIMER_ABORT) + +static inline int +bnx2_has_work(struct bnx2_napi *bnapi) +{ + struct status_block *sblk = bnapi->status_blk.msi; + + if (bnx2_has_fast_work(bnapi)) + return 1; + +#ifdef BCM_CNIC + if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx)) + return 1; +#endif + + if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) != + (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS)) + return 1; + + return 0; +} + +static void +bnx2_chk_missed_msi(struct bnx2 *bp) +{ + struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; + u32 msi_ctrl; + + if (bnx2_has_work(bnapi)) { + msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL); + if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE)) + return; + + if (bnapi->last_status_idx == bp->idle_chk_status_idx) { + BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl & + ~BNX2_PCICFG_MSI_CONTROL_ENABLE); + BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl); + bnx2_msi(bp->irq_tbl[0].vector, bnapi); + } + } + + bp->idle_chk_status_idx = bnapi->last_status_idx; +} + +#ifdef BCM_CNIC +static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi) +{ + struct cnic_ops *c_ops; + + if (!bnapi->cnic_present) + return; + + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops) + bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data, + bnapi->status_blk.msi); + rcu_read_unlock(); +} +#endif + +static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) +{ + struct status_block *sblk = bnapi->status_blk.msi; + u32 status_attn_bits = sblk->status_attn_bits; + u32 status_attn_bits_ack = sblk->status_attn_bits_ack; + + if ((status_attn_bits & STATUS_ATTN_EVENTS) != + (status_attn_bits_ack & STATUS_ATTN_EVENTS)) { + + bnx2_phy_int(bp, bnapi); + + /* This is needed to take care of transient status + * during link changes. + */ + BNX2_WR(bp, BNX2_HC_COMMAND, + bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + BNX2_RD(bp, BNX2_HC_COMMAND); + } +} + +static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi, + int work_done, int budget) +{ + struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; + struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; + + if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons) + bnx2_tx_int(bp, bnapi, 0); + + if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) + work_done += bnx2_rx_int(bp, bnapi, budget - work_done); + + return work_done; +} + +static int bnx2_poll_msix(struct napi_struct *napi, int budget) +{ + struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi); + struct bnx2 *bp = bnapi->bp; + int work_done = 0; + struct status_block_msix *sblk = bnapi->status_blk.msix; + + while (1) { + work_done = bnx2_poll_work(bp, bnapi, work_done, budget); + if (unlikely(work_done >= budget)) + break; + + bnapi->last_status_idx = sblk->status_idx; + /* status idx must be read before checking for more work. */ + rmb(); + if (likely(!bnx2_has_fast_work(bnapi))) { + + napi_complete(napi); + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + bnapi->last_status_idx); + break; + } + } + return work_done; +} + +static int bnx2_poll(struct napi_struct *napi, int budget) +{ + struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi); + struct bnx2 *bp = bnapi->bp; + int work_done = 0; + struct status_block *sblk = bnapi->status_blk.msi; + + while (1) { + bnx2_poll_link(bp, bnapi); + + work_done = bnx2_poll_work(bp, bnapi, work_done, budget); + +#ifdef BCM_CNIC + bnx2_poll_cnic(bp, bnapi); +#endif + + /* bnapi->last_status_idx is used below to tell the hw how + * much work has been processed, so we must read it before + * checking for more work. + */ + bnapi->last_status_idx = sblk->status_idx; + + if (unlikely(work_done >= budget)) + break; + + rmb(); + if (likely(!bnx2_has_work(bnapi))) { + napi_complete(napi); + if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) { + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + bnapi->last_status_idx); + break; + } + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + BNX2_PCICFG_INT_ACK_CMD_MASK_INT | + bnapi->last_status_idx); + + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + bnapi->last_status_idx); + break; + } + } + + return work_done; +} + +/* Called with rtnl_lock from vlan functions and also netif_tx_lock + * from set_multicast. + */ +static void +bnx2_set_rx_mode(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + u32 rx_mode, sort_mode; + struct netdev_hw_addr *ha; + int i; + + if (!netif_running(dev)) + return; + + spin_lock_bh(&bp->phy_lock); + + rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS | + BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG); + sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN; + if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) + rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; + if (dev->flags & IFF_PROMISC) { + /* Promiscuous mode. */ + rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS; + sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN | + BNX2_RPM_SORT_USER0_PROM_VLAN; + } + else if (dev->flags & IFF_ALLMULTI) { + for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { + BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), + 0xffffffff); + } + sort_mode |= BNX2_RPM_SORT_USER0_MC_EN; + } + else { + /* Accept one or more multicast(s). */ + u32 mc_filter[NUM_MC_HASH_REGISTERS]; + u32 regidx; + u32 bit; + u32 crc; + + memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS); + + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(ETH_ALEN, ha->addr); + bit = crc & 0xff; + regidx = (bit & 0xe0) >> 5; + bit &= 0x1f; + mc_filter[regidx] |= (1 << bit); + } + + for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { + BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), + mc_filter[i]); + } + + sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN; + } + + if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) { + rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS; + sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN | + BNX2_RPM_SORT_USER0_PROM_VLAN; + } else if (!(dev->flags & IFF_PROMISC)) { + /* Add all entries into to the match filter list */ + i = 0; + netdev_for_each_uc_addr(ha, dev) { + bnx2_set_mac_addr(bp, ha->addr, + i + BNX2_START_UNICAST_ADDRESS_INDEX); + sort_mode |= (1 << + (i + BNX2_START_UNICAST_ADDRESS_INDEX)); + i++; + } + + } + + if (rx_mode != bp->rx_mode) { + bp->rx_mode = rx_mode; + BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode); + } + + BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0); + BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode); + BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA); + + spin_unlock_bh(&bp->phy_lock); +} + +static int +check_fw_section(const struct firmware *fw, + const struct bnx2_fw_file_section *section, + u32 alignment, bool non_empty) +{ + u32 offset = be32_to_cpu(section->offset); + u32 len = be32_to_cpu(section->len); + + if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3) + return -EINVAL; + if ((non_empty && len == 0) || len > fw->size - offset || + len & (alignment - 1)) + return -EINVAL; + return 0; +} + +static int +check_mips_fw_entry(const struct firmware *fw, + const struct bnx2_mips_fw_file_entry *entry) +{ + if (check_fw_section(fw, &entry->text, 4, true) || + check_fw_section(fw, &entry->data, 4, false) || + check_fw_section(fw, &entry->rodata, 4, false)) + return -EINVAL; + return 0; +} + +static void bnx2_release_firmware(struct bnx2 *bp) +{ + if (bp->rv2p_firmware) { + release_firmware(bp->mips_firmware); + release_firmware(bp->rv2p_firmware); + bp->rv2p_firmware = NULL; + } +} + +static int bnx2_request_uncached_firmware(struct bnx2 *bp) +{ + const char *mips_fw_file, *rv2p_fw_file; + const struct bnx2_mips_fw_file *mips_fw; + const struct bnx2_rv2p_fw_file *rv2p_fw; + int rc; + + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { + mips_fw_file = FW_MIPS_FILE_09; + if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1)) + rv2p_fw_file = FW_RV2P_FILE_09_Ax; + else + rv2p_fw_file = FW_RV2P_FILE_09; + } else { + mips_fw_file = FW_MIPS_FILE_06; + rv2p_fw_file = FW_RV2P_FILE_06; + } + + rc = reject_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev); + if (rc) { + pr_err("Can't load firmware file \"%s\"\n", mips_fw_file); + goto out; + } + + rc = reject_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev); + if (rc) { + pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file); + goto err_release_mips_firmware; + } + mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data; + rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data; + if (bp->mips_firmware->size < sizeof(*mips_fw) || + check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) || + check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) || + check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) || + check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) || + check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) { + pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file); + rc = -EINVAL; + goto err_release_firmware; + } + if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) || + check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) || + check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) { + pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file); + rc = -EINVAL; + goto err_release_firmware; + } +out: + return rc; + +err_release_firmware: + release_firmware(bp->rv2p_firmware); + bp->rv2p_firmware = NULL; +err_release_mips_firmware: + release_firmware(bp->mips_firmware); + goto out; +} + +static int bnx2_request_firmware(struct bnx2 *bp) +{ + return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp); +} + +static u32 +rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code) +{ + switch (idx) { + case RV2P_P1_FIXUP_PAGE_SIZE_IDX: + rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK; + rv2p_code |= RV2P_BD_PAGE_SIZE; + break; + } + return rv2p_code; +} + +static int +load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc, + const struct bnx2_rv2p_fw_file_entry *fw_entry) +{ + u32 rv2p_code_len, file_offset; + __be32 *rv2p_code; + int i; + u32 val, cmd, addr; + + rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len); + file_offset = be32_to_cpu(fw_entry->rv2p.offset); + + rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset); + + if (rv2p_proc == RV2P_PROC1) { + cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR; + addr = BNX2_RV2P_PROC1_ADDR_CMD; + } else { + cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR; + addr = BNX2_RV2P_PROC2_ADDR_CMD; + } + + for (i = 0; i < rv2p_code_len; i += 8) { + BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code)); + rv2p_code++; + BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code)); + rv2p_code++; + + val = (i / 8) | cmd; + BNX2_WR(bp, addr, val); + } + + rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset); + for (i = 0; i < 8; i++) { + u32 loc, code; + + loc = be32_to_cpu(fw_entry->fixup[i]); + if (loc && ((loc * 4) < rv2p_code_len)) { + code = be32_to_cpu(*(rv2p_code + loc - 1)); + BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code); + code = be32_to_cpu(*(rv2p_code + loc)); + code = rv2p_fw_fixup(rv2p_proc, i, loc, code); + BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code); + + val = (loc / 2) | cmd; + BNX2_WR(bp, addr, val); + } + } + + /* Reset the processor, un-stall is done later. */ + if (rv2p_proc == RV2P_PROC1) { + BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET); + } + else { + BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET); + } + + return 0; +} + +static int +load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, + const struct bnx2_mips_fw_file_entry *fw_entry) +{ + u32 addr, len, file_offset; + __be32 *data; + u32 offset; + u32 val; + + /* Halt the CPU. */ + val = bnx2_reg_rd_ind(bp, cpu_reg->mode); + val |= cpu_reg->mode_value_halt; + bnx2_reg_wr_ind(bp, cpu_reg->mode, val); + bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear); + + /* Load the Text area. */ + addr = be32_to_cpu(fw_entry->text.addr); + len = be32_to_cpu(fw_entry->text.len); + file_offset = be32_to_cpu(fw_entry->text.offset); + data = (__be32 *)(bp->mips_firmware->data + file_offset); + + offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base); + if (len) { + int j; + + for (j = 0; j < (len / 4); j++, offset += 4) + bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j])); + } + + /* Load the Data area. */ + addr = be32_to_cpu(fw_entry->data.addr); + len = be32_to_cpu(fw_entry->data.len); + file_offset = be32_to_cpu(fw_entry->data.offset); + data = (__be32 *)(bp->mips_firmware->data + file_offset); + + offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base); + if (len) { + int j; + + for (j = 0; j < (len / 4); j++, offset += 4) + bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j])); + } + + /* Load the Read-Only area. */ + addr = be32_to_cpu(fw_entry->rodata.addr); + len = be32_to_cpu(fw_entry->rodata.len); + file_offset = be32_to_cpu(fw_entry->rodata.offset); + data = (__be32 *)(bp->mips_firmware->data + file_offset); + + offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base); + if (len) { + int j; + + for (j = 0; j < (len / 4); j++, offset += 4) + bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j])); + } + + /* Clear the pre-fetch instruction. */ + bnx2_reg_wr_ind(bp, cpu_reg->inst, 0); + + val = be32_to_cpu(fw_entry->start_addr); + bnx2_reg_wr_ind(bp, cpu_reg->pc, val); + + /* Start the CPU. */ + val = bnx2_reg_rd_ind(bp, cpu_reg->mode); + val &= ~cpu_reg->mode_value_halt; + bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear); + bnx2_reg_wr_ind(bp, cpu_reg->mode, val); + + return 0; +} + +static int +bnx2_init_cpus(struct bnx2 *bp) +{ + const struct bnx2_mips_fw_file *mips_fw = + (const struct bnx2_mips_fw_file *) bp->mips_firmware->data; + const struct bnx2_rv2p_fw_file *rv2p_fw = + (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data; + int rc; + + /* Initialize the RV2P processor. */ + load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1); + load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2); + + /* Initialize the RX Processor. */ + rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp); + if (rc) + goto init_cpu_err; + + /* Initialize the TX Processor. */ + rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp); + if (rc) + goto init_cpu_err; + + /* Initialize the TX Patch-up Processor. */ + rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat); + if (rc) + goto init_cpu_err; + + /* Initialize the Completion Processor. */ + rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com); + if (rc) + goto init_cpu_err; + + /* Initialize the Command Processor. */ + rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp); + +init_cpu_err: + return rc; +} + +static void +bnx2_setup_wol(struct bnx2 *bp) +{ + int i; + u32 val, wol_msg; + + if (bp->wol) { + u32 advertising; + u8 autoneg; + + autoneg = bp->autoneg; + advertising = bp->advertising; + + if (bp->phy_port == PORT_TP) { + bp->autoneg = AUTONEG_SPEED; + bp->advertising = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_Autoneg; + } + + spin_lock_bh(&bp->phy_lock); + bnx2_setup_phy(bp, bp->phy_port); + spin_unlock_bh(&bp->phy_lock); + + bp->autoneg = autoneg; + bp->advertising = advertising; + + bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); + + val = BNX2_RD(bp, BNX2_EMAC_MODE); + + /* Enable port mode. */ + val &= ~BNX2_EMAC_MODE_PORT; + val |= BNX2_EMAC_MODE_MPKT_RCVD | + BNX2_EMAC_MODE_ACPI_RCVD | + BNX2_EMAC_MODE_MPKT; + if (bp->phy_port == PORT_TP) { + val |= BNX2_EMAC_MODE_PORT_MII; + } else { + val |= BNX2_EMAC_MODE_PORT_GMII; + if (bp->line_speed == SPEED_2500) + val |= BNX2_EMAC_MODE_25G_MODE; + } + + BNX2_WR(bp, BNX2_EMAC_MODE, val); + + /* receive all multicast */ + for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { + BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), + 0xffffffff); + } + BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE); + + val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN; + BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0); + BNX2_WR(bp, BNX2_RPM_SORT_USER0, val); + BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA); + + /* Need to enable EMAC and RPM for WOL. */ + BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, + BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE | + BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE | + BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE); + + val = BNX2_RD(bp, BNX2_RPM_CONFIG); + val &= ~BNX2_RPM_CONFIG_ACPI_ENA; + BNX2_WR(bp, BNX2_RPM_CONFIG, val); + + wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL; + } else { + wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; + } + + if (!(bp->flags & BNX2_FLAG_NO_WOL)) { + u32 val; + + wol_msg |= BNX2_DRV_MSG_DATA_WAIT3; + if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) { + bnx2_fw_sync(bp, wol_msg, 1, 0); + return; + } + /* Tell firmware not to power down the PHY yet, otherwise + * the chip will take a long time to respond to MMIO reads. + */ + val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE); + bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, + val | BNX2_PORT_FEATURE_ASF_ENABLED); + bnx2_fw_sync(bp, wol_msg, 1, 0); + bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val); + } + +} + +static int +bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) +{ + switch (state) { + case PCI_D0: { + u32 val; + + pci_enable_wake(bp->pdev, PCI_D0, false); + pci_set_power_state(bp->pdev, PCI_D0); + + val = BNX2_RD(bp, BNX2_EMAC_MODE); + val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD; + val &= ~BNX2_EMAC_MODE_MPKT; + BNX2_WR(bp, BNX2_EMAC_MODE, val); + + val = BNX2_RD(bp, BNX2_RPM_CONFIG); + val &= ~BNX2_RPM_CONFIG_ACPI_ENA; + BNX2_WR(bp, BNX2_RPM_CONFIG, val); + break; + } + case PCI_D3hot: { + bnx2_setup_wol(bp); + pci_wake_from_d3(bp->pdev, bp->wol); + if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) { + + if (bp->wol) + pci_set_power_state(bp->pdev, PCI_D3hot); + break; + + } + if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) { + u32 val; + + /* Tell firmware not to power down the PHY yet, + * otherwise the other port may not respond to + * MMIO reads. + */ + val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); + val &= ~BNX2_CONDITION_PM_STATE_MASK; + val |= BNX2_CONDITION_PM_STATE_UNPREP; + bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val); + } + pci_set_power_state(bp->pdev, PCI_D3hot); + + /* No more memory access after this point until + * device is brought back to D0. + */ + break; + } + default: + return -EINVAL; + } + return 0; +} + +static int +bnx2_acquire_nvram_lock(struct bnx2 *bp) +{ + u32 val; + int j; + + /* Request access to the flash interface. */ + BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2); + for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { + val = BNX2_RD(bp, BNX2_NVM_SW_ARB); + if (val & BNX2_NVM_SW_ARB_ARB_ARB2) + break; + + udelay(5); + } + + if (j >= NVRAM_TIMEOUT_COUNT) + return -EBUSY; + + return 0; +} + +static int +bnx2_release_nvram_lock(struct bnx2 *bp) +{ + int j; + u32 val; + + /* Relinquish nvram interface. */ + BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2); + + for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { + val = BNX2_RD(bp, BNX2_NVM_SW_ARB); + if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2)) + break; + + udelay(5); + } + + if (j >= NVRAM_TIMEOUT_COUNT) + return -EBUSY; + + return 0; +} + + +static int +bnx2_enable_nvram_write(struct bnx2 *bp) +{ + u32 val; + + val = BNX2_RD(bp, BNX2_MISC_CFG); + BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI); + + if (bp->flash_info->flags & BNX2_NV_WREN) { + int j; + + BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); + BNX2_WR(bp, BNX2_NVM_COMMAND, + BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT); + + for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { + udelay(5); + + val = BNX2_RD(bp, BNX2_NVM_COMMAND); + if (val & BNX2_NVM_COMMAND_DONE) + break; + } + + if (j >= NVRAM_TIMEOUT_COUNT) + return -EBUSY; + } + return 0; +} + +static void +bnx2_disable_nvram_write(struct bnx2 *bp) +{ + u32 val; + + val = BNX2_RD(bp, BNX2_MISC_CFG); + BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN); +} + + +static void +bnx2_enable_nvram_access(struct bnx2 *bp) +{ + u32 val; + + val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE); + /* Enable both bits, even on read. */ + BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE, + val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN); +} + +static void +bnx2_disable_nvram_access(struct bnx2 *bp) +{ + u32 val; + + val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE); + /* Disable both bits, even after read. */ + BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE, + val & ~(BNX2_NVM_ACCESS_ENABLE_EN | + BNX2_NVM_ACCESS_ENABLE_WR_EN)); +} + +static int +bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset) +{ + u32 cmd; + int j; + + if (bp->flash_info->flags & BNX2_NV_BUFFERED) + /* Buffered flash, no erase needed */ + return 0; + + /* Build an erase command */ + cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR | + BNX2_NVM_COMMAND_DOIT; + + /* Need to clear DONE bit separately. */ + BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); + + /* Address of the NVRAM to read from. */ + BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); + + /* Issue an erase command. */ + BNX2_WR(bp, BNX2_NVM_COMMAND, cmd); + + /* Wait for completion. */ + for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { + u32 val; + + udelay(5); + + val = BNX2_RD(bp, BNX2_NVM_COMMAND); + if (val & BNX2_NVM_COMMAND_DONE) + break; + } + + if (j >= NVRAM_TIMEOUT_COUNT) + return -EBUSY; + + return 0; +} + +static int +bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags) +{ + u32 cmd; + int j; + + /* Build the command word. */ + cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags; + + /* Calculate an offset of a buffered flash, not needed for 5709. */ + if (bp->flash_info->flags & BNX2_NV_TRANSLATE) { + offset = ((offset / bp->flash_info->page_size) << + bp->flash_info->page_bits) + + (offset % bp->flash_info->page_size); + } + + /* Need to clear DONE bit separately. */ + BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); + + /* Address of the NVRAM to read from. */ + BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); + + /* Issue a read command. */ + BNX2_WR(bp, BNX2_NVM_COMMAND, cmd); + + /* Wait for completion. */ + for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { + u32 val; + + udelay(5); + + val = BNX2_RD(bp, BNX2_NVM_COMMAND); + if (val & BNX2_NVM_COMMAND_DONE) { + __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ)); + memcpy(ret_val, &v, 4); + break; + } + } + if (j >= NVRAM_TIMEOUT_COUNT) + return -EBUSY; + + return 0; +} + + +static int +bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags) +{ + u32 cmd; + __be32 val32; + int j; + + /* Build the command word. */ + cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags; + + /* Calculate an offset of a buffered flash, not needed for 5709. */ + if (bp->flash_info->flags & BNX2_NV_TRANSLATE) { + offset = ((offset / bp->flash_info->page_size) << + bp->flash_info->page_bits) + + (offset % bp->flash_info->page_size); + } + + /* Need to clear DONE bit separately. */ + BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); + + memcpy(&val32, val, 4); + + /* Write the data. */ + BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32)); + + /* Address of the NVRAM to write to. */ + BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); + + /* Issue the write command. */ + BNX2_WR(bp, BNX2_NVM_COMMAND, cmd); + + /* Wait for completion. */ + for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { + udelay(5); + + if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE) + break; + } + if (j >= NVRAM_TIMEOUT_COUNT) + return -EBUSY; + + return 0; +} + +static int +bnx2_init_nvram(struct bnx2 *bp) +{ + u32 val; + int j, entry_count, rc = 0; + const struct flash_spec *flash; + + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { + bp->flash_info = &flash_5709; + goto get_flash_size; + } + + /* Determine the selected interface. */ + val = BNX2_RD(bp, BNX2_NVM_CFG1); + + entry_count = ARRAY_SIZE(flash_table); + + if (val & 0x40000000) { + + /* Flash interface has been reconfigured */ + for (j = 0, flash = &flash_table[0]; j < entry_count; + j++, flash++) { + if ((val & FLASH_BACKUP_STRAP_MASK) == + (flash->config1 & FLASH_BACKUP_STRAP_MASK)) { + bp->flash_info = flash; + break; + } + } + } + else { + u32 mask; + /* Not yet been reconfigured */ + + if (val & (1 << 23)) + mask = FLASH_BACKUP_STRAP_MASK; + else + mask = FLASH_STRAP_MASK; + + for (j = 0, flash = &flash_table[0]; j < entry_count; + j++, flash++) { + + if ((val & mask) == (flash->strapping & mask)) { + bp->flash_info = flash; + + /* Request access to the flash interface. */ + if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) + return rc; + + /* Enable access to flash interface */ + bnx2_enable_nvram_access(bp); + + /* Reconfigure the flash interface */ + BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1); + BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2); + BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3); + BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1); + + /* Disable access to flash interface */ + bnx2_disable_nvram_access(bp); + bnx2_release_nvram_lock(bp); + + break; + } + } + } /* if (val & 0x40000000) */ + + if (j == entry_count) { + bp->flash_info = NULL; + pr_alert("Unknown flash/EEPROM type\n"); + return -ENODEV; + } + +get_flash_size: + val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2); + val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK; + if (val) + bp->flash_size = val; + else + bp->flash_size = bp->flash_info->total_size; + + return rc; +} + +static int +bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf, + int buf_size) +{ + int rc = 0; + u32 cmd_flags, offset32, len32, extra; + + if (buf_size == 0) + return 0; + + /* Request access to the flash interface. */ + if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) + return rc; + + /* Enable access to flash interface */ + bnx2_enable_nvram_access(bp); + + len32 = buf_size; + offset32 = offset; + extra = 0; + + cmd_flags = 0; + + if (offset32 & 3) { + u8 buf[4]; + u32 pre_len; + + offset32 &= ~3; + pre_len = 4 - (offset & 3); + + if (pre_len >= len32) { + pre_len = len32; + cmd_flags = BNX2_NVM_COMMAND_FIRST | + BNX2_NVM_COMMAND_LAST; + } + else { + cmd_flags = BNX2_NVM_COMMAND_FIRST; + } + + rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); + + if (rc) + return rc; + + memcpy(ret_buf, buf + (offset & 3), pre_len); + + offset32 += 4; + ret_buf += pre_len; + len32 -= pre_len; + } + if (len32 & 3) { + extra = 4 - (len32 & 3); + len32 = (len32 + 4) & ~3; + } + + if (len32 == 4) { + u8 buf[4]; + + if (cmd_flags) + cmd_flags = BNX2_NVM_COMMAND_LAST; + else + cmd_flags = BNX2_NVM_COMMAND_FIRST | + BNX2_NVM_COMMAND_LAST; + + rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); + + memcpy(ret_buf, buf, 4 - extra); + } + else if (len32 > 0) { + u8 buf[4]; + + /* Read the first word. */ + if (cmd_flags) + cmd_flags = 0; + else + cmd_flags = BNX2_NVM_COMMAND_FIRST; + + rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags); + + /* Advance to the next dword. */ + offset32 += 4; + ret_buf += 4; + len32 -= 4; + + while (len32 > 4 && rc == 0) { + rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0); + + /* Advance to the next dword. */ + offset32 += 4; + ret_buf += 4; + len32 -= 4; + } + + if (rc) + return rc; + + cmd_flags = BNX2_NVM_COMMAND_LAST; + rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags); + + memcpy(ret_buf, buf, 4 - extra); + } + + /* Disable access to flash interface */ + bnx2_disable_nvram_access(bp); + + bnx2_release_nvram_lock(bp); + + return rc; +} + +static int +bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, + int buf_size) +{ + u32 written, offset32, len32; + u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL; + int rc = 0; + int align_start, align_end; + + buf = data_buf; + offset32 = offset; + len32 = buf_size; + align_start = align_end = 0; + + if ((align_start = (offset32 & 3))) { + offset32 &= ~3; + len32 += align_start; + if (len32 < 4) + len32 = 4; + if ((rc = bnx2_nvram_read(bp, offset32, start, 4))) + return rc; + } + + if (len32 & 3) { + align_end = 4 - (len32 & 3); + len32 += align_end; + if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4))) + return rc; + } + + if (align_start || align_end) { + align_buf = kmalloc(len32, GFP_KERNEL); + if (align_buf == NULL) + return -ENOMEM; + if (align_start) { + memcpy(align_buf, start, 4); + } + if (align_end) { + memcpy(align_buf + len32 - 4, end, 4); + } + memcpy(align_buf + align_start, data_buf, buf_size); + buf = align_buf; + } + + if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { + flash_buffer = kmalloc(264, GFP_KERNEL); + if (flash_buffer == NULL) { + rc = -ENOMEM; + goto nvram_write_end; + } + } + + written = 0; + while ((written < len32) && (rc == 0)) { + u32 page_start, page_end, data_start, data_end; + u32 addr, cmd_flags; + int i; + + /* Find the page_start addr */ + page_start = offset32 + written; + page_start -= (page_start % bp->flash_info->page_size); + /* Find the page_end addr */ + page_end = page_start + bp->flash_info->page_size; + /* Find the data_start addr */ + data_start = (written == 0) ? offset32 : page_start; + /* Find the data_end addr */ + data_end = (page_end > offset32 + len32) ? + (offset32 + len32) : page_end; + + /* Request access to the flash interface. */ + if ((rc = bnx2_acquire_nvram_lock(bp)) != 0) + goto nvram_write_end; + + /* Enable access to flash interface */ + bnx2_enable_nvram_access(bp); + + cmd_flags = BNX2_NVM_COMMAND_FIRST; + if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { + int j; + + /* Read the whole page into the buffer + * (non-buffer flash only) */ + for (j = 0; j < bp->flash_info->page_size; j += 4) { + if (j == (bp->flash_info->page_size - 4)) { + cmd_flags |= BNX2_NVM_COMMAND_LAST; + } + rc = bnx2_nvram_read_dword(bp, + page_start + j, + &flash_buffer[j], + cmd_flags); + + if (rc) + goto nvram_write_end; + + cmd_flags = 0; + } + } + + /* Enable writes to flash interface (unlock write-protect) */ + if ((rc = bnx2_enable_nvram_write(bp)) != 0) + goto nvram_write_end; + + /* Loop to write back the buffer data from page_start to + * data_start */ + i = 0; + if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { + /* Erase the page */ + if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0) + goto nvram_write_end; + + /* Re-enable the write again for the actual write */ + bnx2_enable_nvram_write(bp); + + for (addr = page_start; addr < data_start; + addr += 4, i += 4) { + + rc = bnx2_nvram_write_dword(bp, addr, + &flash_buffer[i], cmd_flags); + + if (rc != 0) + goto nvram_write_end; + + cmd_flags = 0; + } + } + + /* Loop to write the new data from data_start to data_end */ + for (addr = data_start; addr < data_end; addr += 4, i += 4) { + if ((addr == page_end - 4) || + ((bp->flash_info->flags & BNX2_NV_BUFFERED) && + (addr == data_end - 4))) { + + cmd_flags |= BNX2_NVM_COMMAND_LAST; + } + rc = bnx2_nvram_write_dword(bp, addr, buf, + cmd_flags); + + if (rc != 0) + goto nvram_write_end; + + cmd_flags = 0; + buf += 4; + } + + /* Loop to write back the buffer data from data_end + * to page_end */ + if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { + for (addr = data_end; addr < page_end; + addr += 4, i += 4) { + + if (addr == page_end-4) { + cmd_flags = BNX2_NVM_COMMAND_LAST; + } + rc = bnx2_nvram_write_dword(bp, addr, + &flash_buffer[i], cmd_flags); + + if (rc != 0) + goto nvram_write_end; + + cmd_flags = 0; + } + } + + /* Disable writes to flash interface (lock write-protect) */ + bnx2_disable_nvram_write(bp); + + /* Disable access to flash interface */ + bnx2_disable_nvram_access(bp); + bnx2_release_nvram_lock(bp); + + /* Increment written */ + written += data_end - data_start; + } + +nvram_write_end: + kfree(flash_buffer); + kfree(align_buf); + return rc; +} + +static void +bnx2_init_fw_cap(struct bnx2 *bp) +{ + u32 val, sig = 0; + + bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP; + bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN; + + if (!(bp->flags & BNX2_FLAG_ASF_ENABLE)) + bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN; + + val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB); + if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE) + return; + + if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) { + bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN; + sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN; + } + + if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && + (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) { + u32 link; + + bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP; + + link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS); + if (link & BNX2_LINK_STATUS_SERDES_LINK) + bp->phy_port = PORT_FIBRE; + else + bp->phy_port = PORT_TP; + + sig |= BNX2_DRV_ACK_CAP_SIGNATURE | + BNX2_FW_CAP_REMOTE_PHY_CAPABLE; + } + + if (netif_running(bp->dev) && sig) + bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig); +} + +static void +bnx2_setup_msix_tbl(struct bnx2 *bp) +{ + BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN); + + BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR); + BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR); +} + +static int +bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) +{ + u32 val; + int i, rc = 0; + u8 old_port; + + /* Wait for the current PCI transaction to complete before + * issuing a reset. */ + if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) || + (BNX2_CHIP(bp) == BNX2_CHIP_5708)) { + BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, + BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | + BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | + BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | + BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); + val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS); + udelay(5); + } else { /* 5709 */ + val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL); + val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE; + BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); + val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL); + + for (i = 0; i < 100; i++) { + msleep(1); + val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL); + if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND)) + break; + } + } + + /* Wait for the firmware to tell us it is ok to issue a reset. */ + bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1); + + /* Deposit a driver reset signature so the firmware knows that + * this is a soft reset. */ + bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE, + BNX2_DRV_RESET_SIGNATURE_MAGIC); + + /* Do a dummy read to force the chip to complete all current transaction + * before we issue a reset. */ + val = BNX2_RD(bp, BNX2_MISC_ID); + + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { + BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET); + BNX2_RD(bp, BNX2_MISC_COMMAND); + udelay(5); + + val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | + BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; + + BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); + + } else { + val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | + BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | + BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; + + /* Chip reset. */ + BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); + + /* Reading back any register after chip reset will hang the + * bus on 5706 A0 and A1. The msleep below provides plenty + * of margin for write posting. + */ + if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) + msleep(20); + + /* Reset takes approximate 30 usec */ + for (i = 0; i < 10; i++) { + val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG); + if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | + BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) + break; + udelay(10); + } + + if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | + BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) { + pr_err("Chip reset did not complete\n"); + return -EBUSY; + } + } + + /* Make sure byte swapping is properly configured. */ + val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0); + if (val != 0x01020304) { + pr_err("Chip not in correct endian mode\n"); + return -ENODEV; + } + + /* Wait for the firmware to finish its initialization. */ + rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0); + if (rc) + return rc; + + spin_lock_bh(&bp->phy_lock); + old_port = bp->phy_port; + bnx2_init_fw_cap(bp); + if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) && + old_port != bp->phy_port) + bnx2_set_default_remote_link(bp); + spin_unlock_bh(&bp->phy_lock); + + if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { + /* Adjust the voltage regular to two steps lower. The default + * of this register is 0x0000000e. */ + BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa); + + /* Remove bad rbuf memory from the free pool. */ + rc = bnx2_alloc_bad_rbuf(bp); + } + + if (bp->flags & BNX2_FLAG_USING_MSIX) { + bnx2_setup_msix_tbl(bp); + /* Prevent MSIX table reads and write from timing out */ + BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL, + BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN); + } + + return rc; +} + +static int +bnx2_init_chip(struct bnx2 *bp) +{ + u32 val, mtu; + int rc, i; + + /* Make sure the interrupt is not active. */ + BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT); + + val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP | + BNX2_DMA_CONFIG_DATA_WORD_SWAP | +#ifdef __BIG_ENDIAN + BNX2_DMA_CONFIG_CNTL_BYTE_SWAP | +#endif + BNX2_DMA_CONFIG_CNTL_WORD_SWAP | + DMA_READ_CHANS << 12 | + DMA_WRITE_CHANS << 16; + + val |= (0x2 << 20) | (1 << 11); + + if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133)) + val |= (1 << 23); + + if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) && + (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) && + !(bp->flags & BNX2_FLAG_PCIX)) + val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA; + + BNX2_WR(bp, BNX2_DMA_CONFIG, val); + + if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { + val = BNX2_RD(bp, BNX2_TDMA_CONFIG); + val |= BNX2_TDMA_CONFIG_ONE_DMA; + BNX2_WR(bp, BNX2_TDMA_CONFIG, val); + } + + if (bp->flags & BNX2_FLAG_PCIX) { + u16 val16; + + pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD, + &val16); + pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD, + val16 & ~PCI_X_CMD_ERO); + } + + BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, + BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | + BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | + BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); + + /* Initialize context mapping and zero out the quick contexts. The + * context block must have already been enabled. */ + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { + rc = bnx2_init_5709_context(bp); + if (rc) + return rc; + } else + bnx2_init_context(bp); + + if ((rc = bnx2_init_cpus(bp)) != 0) + return rc; + + bnx2_init_nvram(bp); + + bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); + + val = BNX2_RD(bp, BNX2_MQ_CONFIG); + val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; + val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { + val |= BNX2_MQ_CONFIG_BIN_MQ_MODE; + if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax) + val |= BNX2_MQ_CONFIG_HALT_DIS; + } + + BNX2_WR(bp, BNX2_MQ_CONFIG, val); + + val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); + BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val); + BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val); + + val = (BNX2_PAGE_BITS - 8) << 24; + BNX2_WR(bp, BNX2_RV2P_CONFIG, val); + + /* Configure page size. */ + val = BNX2_RD(bp, BNX2_TBDR_CONFIG); + val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE; + val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40; + BNX2_WR(bp, BNX2_TBDR_CONFIG, val); + + val = bp->mac_addr[0] + + (bp->mac_addr[1] << 8) + + (bp->mac_addr[2] << 16) + + bp->mac_addr[3] + + (bp->mac_addr[4] << 8) + + (bp->mac_addr[5] << 16); + BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val); + + /* Program the MTU. Also include 4 bytes for CRC32. */ + mtu = bp->dev->mtu; + val = mtu + ETH_HLEN + ETH_FCS_LEN; + if (val > (MAX_ETHERNET_PACKET_SIZE + 4)) + val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA; + BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val); + + if (mtu < 1500) + mtu = 1500; + + bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu)); + bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu)); + bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu)); + + memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size); + for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) + bp->bnx2_napi[i].last_status_idx = 0; + + bp->idle_chk_status_idx = 0xffff; + + /* Set up how to generate a link change interrupt. */ + BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); + + BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L, + (u64) bp->status_blk_mapping & 0xffffffff); + BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32); + + BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L, + (u64) bp->stats_blk_mapping & 0xffffffff); + BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H, + (u64) bp->stats_blk_mapping >> 32); + + BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP, + (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip); + + BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP, + (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip); + + BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP, + (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip); + + BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks); + + BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks); + + BNX2_WR(bp, BNX2_HC_COM_TICKS, + (bp->com_ticks_int << 16) | bp->com_ticks); + + BNX2_WR(bp, BNX2_HC_CMD_TICKS, + (bp->cmd_ticks_int << 16) | bp->cmd_ticks); + + if (bp->flags & BNX2_FLAG_BROKEN_STATS) + BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0); + else + BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks); + BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ + + if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) + val = BNX2_HC_CONFIG_COLLECT_STATS; + else { + val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE | + BNX2_HC_CONFIG_COLLECT_STATS; + } + + if (bp->flags & BNX2_FLAG_USING_MSIX) { + BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR, + BNX2_HC_MSIX_BIT_VECTOR_VAL); + + val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B; + } + + if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI) + val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM; + + BNX2_WR(bp, BNX2_HC_CONFIG, val); + + if (bp->rx_ticks < 25) + bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1); + else + bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0); + + for (i = 1; i < bp->irq_nvecs; i++) { + u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) + + BNX2_HC_SB_CONFIG_1; + + BNX2_WR(bp, base, + BNX2_HC_SB_CONFIG_1_TX_TMR_MODE | + BNX2_HC_SB_CONFIG_1_RX_TMR_MODE | + BNX2_HC_SB_CONFIG_1_ONE_SHOT); + + BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF, + (bp->tx_quick_cons_trip_int << 16) | + bp->tx_quick_cons_trip); + + BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF, + (bp->tx_ticks_int << 16) | bp->tx_ticks); + + BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF, + (bp->rx_quick_cons_trip_int << 16) | + bp->rx_quick_cons_trip); + + BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF, + (bp->rx_ticks_int << 16) | bp->rx_ticks); + } + + /* Clear internal stats counters. */ + BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW); + + BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS); + + /* Initialize the receive filter. */ + bnx2_set_rx_mode(bp->dev); + + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { + val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL); + val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE; + BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); + } + rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET, + 1, 0); + + BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT); + BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS); + + udelay(20); + + bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND); + + return rc; +} + +static void +bnx2_clear_ring_states(struct bnx2 *bp) +{ + struct bnx2_napi *bnapi; + struct bnx2_tx_ring_info *txr; + struct bnx2_rx_ring_info *rxr; + int i; + + for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { + bnapi = &bp->bnx2_napi[i]; + txr = &bnapi->tx_ring; + rxr = &bnapi->rx_ring; + + txr->tx_cons = 0; + txr->hw_tx_cons = 0; + rxr->rx_prod_bseq = 0; + rxr->rx_prod = 0; + rxr->rx_cons = 0; + rxr->rx_pg_prod = 0; + rxr->rx_pg_cons = 0; + } +} + +static void +bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr) +{ + u32 val, offset0, offset1, offset2, offset3; + u32 cid_addr = GET_CID_ADDR(cid); + + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { + offset0 = BNX2_L2CTX_TYPE_XI; + offset1 = BNX2_L2CTX_CMD_TYPE_XI; + offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; + offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; + } else { + offset0 = BNX2_L2CTX_TYPE; + offset1 = BNX2_L2CTX_CMD_TYPE; + offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; + offset3 = BNX2_L2CTX_TBDR_BHADDR_LO; + } + val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2; + bnx2_ctx_wr(bp, cid_addr, offset0, val); + + val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); + bnx2_ctx_wr(bp, cid_addr, offset1, val); + + val = (u64) txr->tx_desc_mapping >> 32; + bnx2_ctx_wr(bp, cid_addr, offset2, val); + + val = (u64) txr->tx_desc_mapping & 0xffffffff; + bnx2_ctx_wr(bp, cid_addr, offset3, val); +} + +static void +bnx2_init_tx_ring(struct bnx2 *bp, int ring_num) +{ + struct bnx2_tx_bd *txbd; + u32 cid = TX_CID; + struct bnx2_napi *bnapi; + struct bnx2_tx_ring_info *txr; + + bnapi = &bp->bnx2_napi[ring_num]; + txr = &bnapi->tx_ring; + + if (ring_num == 0) + cid = TX_CID; + else + cid = TX_TSS_CID + ring_num - 1; + + bp->tx_wake_thresh = bp->tx_ring_size / 2; + + txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT]; + + txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32; + txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff; + + txr->tx_prod = 0; + txr->tx_prod_bseq = 0; + + txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX; + txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ; + + bnx2_init_tx_context(bp, cid, txr); +} + +static void +bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[], + u32 buf_size, int num_rings) +{ + int i; + struct bnx2_rx_bd *rxbd; + + for (i = 0; i < num_rings; i++) { + int j; + + rxbd = &rx_ring[i][0]; + for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) { + rxbd->rx_bd_len = buf_size; + rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; + } + if (i == (num_rings - 1)) + j = 0; + else + j = i + 1; + rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32; + rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff; + } +} + +static void +bnx2_init_rx_ring(struct bnx2 *bp, int ring_num) +{ + int i; + u16 prod, ring_prod; + u32 cid, rx_cid_addr, val; + struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num]; + struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; + + if (ring_num == 0) + cid = RX_CID; + else + cid = RX_RSS_CID + ring_num - 1; + + rx_cid_addr = GET_CID_ADDR(cid); + + bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping, + bp->rx_buf_use_size, bp->rx_max_ring); + + bnx2_init_rx_context(bp, cid); + + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { + val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5); + BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM); + } + + bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0); + if (bp->rx_pg_ring_size) { + bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring, + rxr->rx_pg_desc_mapping, + PAGE_SIZE, bp->rx_max_pg_ring); + val = (bp->rx_buf_use_size << 16) | PAGE_SIZE; + bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val); + bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY, + BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num); + + val = (u64) rxr->rx_pg_desc_mapping[0] >> 32; + bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val); + + val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff; + bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val); + + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) + BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT); + } + + val = (u64) rxr->rx_desc_mapping[0] >> 32; + bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); + + val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff; + bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); + + ring_prod = prod = rxr->rx_pg_prod; + for (i = 0; i < bp->rx_pg_ring_size; i++) { + if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) { + netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n", + ring_num, i, bp->rx_pg_ring_size); + break; + } + prod = BNX2_NEXT_RX_BD(prod); + ring_prod = BNX2_RX_PG_RING_IDX(prod); + } + rxr->rx_pg_prod = prod; + + ring_prod = prod = rxr->rx_prod; + for (i = 0; i < bp->rx_ring_size; i++) { + if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) { + netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n", + ring_num, i, bp->rx_ring_size); + break; + } + prod = BNX2_NEXT_RX_BD(prod); + ring_prod = BNX2_RX_RING_IDX(prod); + } + rxr->rx_prod = prod; + + rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX; + rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ; + rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX; + + BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod); + BNX2_WR16(bp, rxr->rx_bidx_addr, prod); + + BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); +} + +static void +bnx2_init_all_rings(struct bnx2 *bp) +{ + int i; + u32 val; + + bnx2_clear_ring_states(bp); + + BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0); + for (i = 0; i < bp->num_tx_rings; i++) + bnx2_init_tx_ring(bp, i); + + if (bp->num_tx_rings > 1) + BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) | + (TX_TSS_CID << 7)); + + BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0); + bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0); + + for (i = 0; i < bp->num_rx_rings; i++) + bnx2_init_rx_ring(bp, i); + + if (bp->num_rx_rings > 1) { + u32 tbl_32 = 0; + + for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) { + int shift = (i % 8) << 2; + + tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift; + if ((i % 8) == 7) { + BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32); + BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) | + BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK | + BNX2_RLUP_RSS_COMMAND_WRITE | + BNX2_RLUP_RSS_COMMAND_HASH_MASK); + tbl_32 = 0; + } + } + + val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI | + BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI; + + BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val); + + } +} + +static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size) +{ + u32 max, num_rings = 1; + + while (ring_size > BNX2_MAX_RX_DESC_CNT) { + ring_size -= BNX2_MAX_RX_DESC_CNT; + num_rings++; + } + /* round to next power of 2 */ + max = max_size; + while ((max & num_rings) == 0) + max >>= 1; + + if (num_rings != max) + max <<= 1; + + return max; +} + +static void +bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) +{ + u32 rx_size, rx_space, jumbo_size; + + /* 8 for CRC and VLAN */ + rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8; + + rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + bp->rx_copy_thresh = BNX2_RX_COPY_THRESH; + bp->rx_pg_ring_size = 0; + bp->rx_max_pg_ring = 0; + bp->rx_max_pg_ring_idx = 0; + if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) { + int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; + + jumbo_size = size * pages; + if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT) + jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT; + + bp->rx_pg_ring_size = jumbo_size; + bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size, + BNX2_MAX_RX_PG_RINGS); + bp->rx_max_pg_ring_idx = + (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1; + rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET; + bp->rx_copy_thresh = 0; + } + + bp->rx_buf_use_size = rx_size; + /* hw alignment + build_skb() overhead*/ + bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) + + NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET; + bp->rx_ring_size = size; + bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS); + bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1; +} + +static void +bnx2_free_tx_skbs(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->num_tx_rings; i++) { + struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; + struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; + int j; + + if (txr->tx_buf_ring == NULL) + continue; + + for (j = 0; j < BNX2_TX_DESC_CNT; ) { + struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; + struct sk_buff *skb = tx_buf->skb; + int k, last; + + if (skb == NULL) { + j = BNX2_NEXT_TX_BD(j); + continue; + } + + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(tx_buf, mapping), + skb_headlen(skb), + PCI_DMA_TODEVICE); + + tx_buf->skb = NULL; + + last = tx_buf->nr_frags; + j = BNX2_NEXT_TX_BD(j); + for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) { + tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)]; + dma_unmap_page(&bp->pdev->dev, + dma_unmap_addr(tx_buf, mapping), + skb_frag_size(&skb_shinfo(skb)->frags[k]), + PCI_DMA_TODEVICE); + } + dev_kfree_skb(skb); + } + netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); + } +} + +static void +bnx2_free_rx_skbs(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->num_rx_rings; i++) { + struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; + struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; + int j; + + if (rxr->rx_buf_ring == NULL) + return; + + for (j = 0; j < bp->rx_max_ring_idx; j++) { + struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j]; + u8 *data = rx_buf->data; + + if (data == NULL) + continue; + + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + bp->rx_buf_use_size, + PCI_DMA_FROMDEVICE); + + rx_buf->data = NULL; + + kfree(data); + } + for (j = 0; j < bp->rx_max_pg_ring_idx; j++) + bnx2_free_rx_page(bp, rxr, j); + } +} + +static void +bnx2_free_skbs(struct bnx2 *bp) +{ + bnx2_free_tx_skbs(bp); + bnx2_free_rx_skbs(bp); +} + +static int +bnx2_reset_nic(struct bnx2 *bp, u32 reset_code) +{ + int rc; + + rc = bnx2_reset_chip(bp, reset_code); + bnx2_free_skbs(bp); + if (rc) + return rc; + + if ((rc = bnx2_init_chip(bp)) != 0) + return rc; + + bnx2_init_all_rings(bp); + return 0; +} + +static int +bnx2_init_nic(struct bnx2 *bp, int reset_phy) +{ + int rc; + + if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0) + return rc; + + spin_lock_bh(&bp->phy_lock); + bnx2_init_phy(bp, reset_phy); + bnx2_set_link(bp); + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) + bnx2_remote_phy_event(bp); + spin_unlock_bh(&bp->phy_lock); + return 0; +} + +static int +bnx2_shutdown_chip(struct bnx2 *bp) +{ + u32 reset_code; + + if (bp->flags & BNX2_FLAG_NO_WOL) + reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN; + else if (bp->wol) + reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL; + else + reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; + + return bnx2_reset_chip(bp, reset_code); +} + +static int +bnx2_test_registers(struct bnx2 *bp) +{ + int ret; + int i, is_5709; + static const struct { + u16 offset; + u16 flags; +#define BNX2_FL_NOT_5709 1 + u32 rw_mask; + u32 ro_mask; + } reg_tbl[] = { + { 0x006c, 0, 0x00000000, 0x0000003f }, + { 0x0090, 0, 0xffffffff, 0x00000000 }, + { 0x0094, 0, 0x00000000, 0x00000000 }, + + { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 }, + { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, + { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, + { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff }, + { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 }, + { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 }, + { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff }, + { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, + { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, + + { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, + { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff }, + { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 }, + { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 }, + { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 }, + { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 }, + + { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 }, + { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 }, + { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 }, + + { 0x1000, 0, 0x00000000, 0x00000001 }, + { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 }, + + { 0x1408, 0, 0x01c00800, 0x00000000 }, + { 0x149c, 0, 0x8000ffff, 0x00000000 }, + { 0x14a8, 0, 0x00000000, 0x000001ff }, + { 0x14ac, 0, 0x0fffffff, 0x10000000 }, + { 0x14b0, 0, 0x00000002, 0x00000001 }, + { 0x14b8, 0, 0x00000000, 0x00000000 }, + { 0x14c0, 0, 0x00000000, 0x00000009 }, + { 0x14c4, 0, 0x00003fff, 0x00000000 }, + { 0x14cc, 0, 0x00000000, 0x00000001 }, + { 0x14d0, 0, 0xffffffff, 0x00000000 }, + + { 0x1800, 0, 0x00000000, 0x00000001 }, + { 0x1804, 0, 0x00000000, 0x00000003 }, + + { 0x2800, 0, 0x00000000, 0x00000001 }, + { 0x2804, 0, 0x00000000, 0x00003f01 }, + { 0x2808, 0, 0x0f3f3f03, 0x00000000 }, + { 0x2810, 0, 0xffff0000, 0x00000000 }, + { 0x2814, 0, 0xffff0000, 0x00000000 }, + { 0x2818, 0, 0xffff0000, 0x00000000 }, + { 0x281c, 0, 0xffff0000, 0x00000000 }, + { 0x2834, 0, 0xffffffff, 0x00000000 }, + { 0x2840, 0, 0x00000000, 0xffffffff }, + { 0x2844, 0, 0x00000000, 0xffffffff }, + { 0x2848, 0, 0xffffffff, 0x00000000 }, + { 0x284c, 0, 0xf800f800, 0x07ff07ff }, + + { 0x2c00, 0, 0x00000000, 0x00000011 }, + { 0x2c04, 0, 0x00000000, 0x00030007 }, + + { 0x3c00, 0, 0x00000000, 0x00000001 }, + { 0x3c04, 0, 0x00000000, 0x00070000 }, + { 0x3c08, 0, 0x00007f71, 0x07f00000 }, + { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 }, + { 0x3c10, 0, 0xffffffff, 0x00000000 }, + { 0x3c14, 0, 0x00000000, 0xffffffff }, + { 0x3c18, 0, 0x00000000, 0xffffffff }, + { 0x3c1c, 0, 0xfffff000, 0x00000000 }, + { 0x3c20, 0, 0xffffff00, 0x00000000 }, + + { 0x5004, 0, 0x00000000, 0x0000007f }, + { 0x5008, 0, 0x0f0007ff, 0x00000000 }, + + { 0x5c00, 0, 0x00000000, 0x00000001 }, + { 0x5c04, 0, 0x00000000, 0x0003000f }, + { 0x5c08, 0, 0x00000003, 0x00000000 }, + { 0x5c0c, 0, 0x0000fff8, 0x00000000 }, + { 0x5c10, 0, 0x00000000, 0xffffffff }, + { 0x5c80, 0, 0x00000000, 0x0f7113f1 }, + { 0x5c84, 0, 0x00000000, 0x0000f333 }, + { 0x5c88, 0, 0x00000000, 0x00077373 }, + { 0x5c8c, 0, 0x00000000, 0x0007f737 }, + + { 0x6808, 0, 0x0000ff7f, 0x00000000 }, + { 0x680c, 0, 0xffffffff, 0x00000000 }, + { 0x6810, 0, 0xffffffff, 0x00000000 }, + { 0x6814, 0, 0xffffffff, 0x00000000 }, + { 0x6818, 0, 0xffffffff, 0x00000000 }, + { 0x681c, 0, 0xffffffff, 0x00000000 }, + { 0x6820, 0, 0x00ff00ff, 0x00000000 }, + { 0x6824, 0, 0x00ff00ff, 0x00000000 }, + { 0x6828, 0, 0x00ff00ff, 0x00000000 }, + { 0x682c, 0, 0x03ff03ff, 0x00000000 }, + { 0x6830, 0, 0x03ff03ff, 0x00000000 }, + { 0x6834, 0, 0x03ff03ff, 0x00000000 }, + { 0x6838, 0, 0x03ff03ff, 0x00000000 }, + { 0x683c, 0, 0x0000ffff, 0x00000000 }, + { 0x6840, 0, 0x00000ff0, 0x00000000 }, + { 0x6844, 0, 0x00ffff00, 0x00000000 }, + { 0x684c, 0, 0xffffffff, 0x00000000 }, + { 0x6850, 0, 0x7f7f7f7f, 0x00000000 }, + { 0x6854, 0, 0x7f7f7f7f, 0x00000000 }, + { 0x6858, 0, 0x7f7f7f7f, 0x00000000 }, + { 0x685c, 0, 0x7f7f7f7f, 0x00000000 }, + { 0x6908, 0, 0x00000000, 0x0001ff0f }, + { 0x690c, 0, 0x00000000, 0x0ffe00f0 }, + + { 0xffff, 0, 0x00000000, 0x00000000 }, + }; + + ret = 0; + is_5709 = 0; + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) + is_5709 = 1; + + for (i = 0; reg_tbl[i].offset != 0xffff; i++) { + u32 offset, rw_mask, ro_mask, save_val, val; + u16 flags = reg_tbl[i].flags; + + if (is_5709 && (flags & BNX2_FL_NOT_5709)) + continue; + + offset = (u32) reg_tbl[i].offset; + rw_mask = reg_tbl[i].rw_mask; + ro_mask = reg_tbl[i].ro_mask; + + save_val = readl(bp->regview + offset); + + writel(0, bp->regview + offset); + + val = readl(bp->regview + offset); + if ((val & rw_mask) != 0) { + goto reg_test_err; + } + + if ((val & ro_mask) != (save_val & ro_mask)) { + goto reg_test_err; + } + + writel(0xffffffff, bp->regview + offset); + + val = readl(bp->regview + offset); + if ((val & rw_mask) != rw_mask) { + goto reg_test_err; + } + + if ((val & ro_mask) != (save_val & ro_mask)) { + goto reg_test_err; + } + + writel(save_val, bp->regview + offset); + continue; + +reg_test_err: + writel(save_val, bp->regview + offset); + ret = -ENODEV; + break; + } + return ret; +} + +static int +bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size) +{ + static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555, + 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa }; + int i; + + for (i = 0; i < sizeof(test_pattern) / 4; i++) { + u32 offset; + + for (offset = 0; offset < size; offset += 4) { + + bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]); + + if (bnx2_reg_rd_ind(bp, start + offset) != + test_pattern[i]) { + return -ENODEV; + } + } + } + return 0; +} + +static int +bnx2_test_memory(struct bnx2 *bp) +{ + int ret = 0; + int i; + static struct mem_entry { + u32 offset; + u32 len; + } mem_tbl_5706[] = { + { 0x60000, 0x4000 }, + { 0xa0000, 0x3000 }, + { 0xe0000, 0x4000 }, + { 0x120000, 0x4000 }, + { 0x1a0000, 0x4000 }, + { 0x160000, 0x4000 }, + { 0xffffffff, 0 }, + }, + mem_tbl_5709[] = { + { 0x60000, 0x4000 }, + { 0xa0000, 0x3000 }, + { 0xe0000, 0x4000 }, + { 0x120000, 0x4000 }, + { 0x1a0000, 0x4000 }, + { 0xffffffff, 0 }, + }; + struct mem_entry *mem_tbl; + + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) + mem_tbl = mem_tbl_5709; + else + mem_tbl = mem_tbl_5706; + + for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { + if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset, + mem_tbl[i].len)) != 0) { + return ret; + } + } + + return ret; +} + +#define BNX2_MAC_LOOPBACK 0 +#define BNX2_PHY_LOOPBACK 1 + +static int +bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) +{ + unsigned int pkt_size, num_pkts, i; + struct sk_buff *skb; + u8 *data; + unsigned char *packet; + u16 rx_start_idx, rx_idx; + dma_addr_t map; + struct bnx2_tx_bd *txbd; + struct bnx2_sw_bd *rx_buf; + struct l2_fhdr *rx_hdr; + int ret = -ENODEV; + struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi; + struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; + struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; + + tx_napi = bnapi; + + txr = &tx_napi->tx_ring; + rxr = &bnapi->rx_ring; + if (loopback_mode == BNX2_MAC_LOOPBACK) { + bp->loopback = MAC_LOOPBACK; + bnx2_set_mac_loopback(bp); + } + else if (loopback_mode == BNX2_PHY_LOOPBACK) { + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) + return 0; + + bp->loopback = PHY_LOOPBACK; + bnx2_set_phy_loopback(bp); + } + else + return -EINVAL; + + pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4); + skb = netdev_alloc_skb(bp->dev, pkt_size); + if (!skb) + return -ENOMEM; + packet = skb_put(skb, pkt_size); + memcpy(packet, bp->dev->dev_addr, ETH_ALEN); + memset(packet + ETH_ALEN, 0x0, 8); + for (i = 14; i < pkt_size; i++) + packet[i] = (unsigned char) (i & 0xff); + + map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, + PCI_DMA_TODEVICE); + if (dma_mapping_error(&bp->pdev->dev, map)) { + dev_kfree_skb(skb); + return -EIO; + } + + BNX2_WR(bp, BNX2_HC_COMMAND, + bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + + BNX2_RD(bp, BNX2_HC_COMMAND); + + udelay(5); + rx_start_idx = bnx2_get_hw_rx_cons(bnapi); + + num_pkts = 0; + + txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)]; + + txbd->tx_bd_haddr_hi = (u64) map >> 32; + txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff; + txbd->tx_bd_mss_nbytes = pkt_size; + txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END; + + num_pkts++; + txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod); + txr->tx_prod_bseq += pkt_size; + + BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod); + BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); + + udelay(100); + + BNX2_WR(bp, BNX2_HC_COMMAND, + bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + + BNX2_RD(bp, BNX2_HC_COMMAND); + + udelay(5); + + dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE); + dev_kfree_skb(skb); + + if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod) + goto loopback_test_done; + + rx_idx = bnx2_get_hw_rx_cons(bnapi); + if (rx_idx != rx_start_idx + num_pkts) { + goto loopback_test_done; + } + + rx_buf = &rxr->rx_buf_ring[rx_start_idx]; + data = rx_buf->data; + + rx_hdr = get_l2_fhdr(data); + data = (u8 *)rx_hdr + BNX2_RX_OFFSET; + + dma_sync_single_for_cpu(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); + + if (rx_hdr->l2_fhdr_status & + (L2_FHDR_ERRORS_BAD_CRC | + L2_FHDR_ERRORS_PHY_DECODE | + L2_FHDR_ERRORS_ALIGNMENT | + L2_FHDR_ERRORS_TOO_SHORT | + L2_FHDR_ERRORS_GIANT_FRAME)) { + + goto loopback_test_done; + } + + if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) { + goto loopback_test_done; + } + + for (i = 14; i < pkt_size; i++) { + if (*(data + i) != (unsigned char) (i & 0xff)) { + goto loopback_test_done; + } + } + + ret = 0; + +loopback_test_done: + bp->loopback = 0; + return ret; +} + +#define BNX2_MAC_LOOPBACK_FAILED 1 +#define BNX2_PHY_LOOPBACK_FAILED 2 +#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \ + BNX2_PHY_LOOPBACK_FAILED) + +static int +bnx2_test_loopback(struct bnx2 *bp) +{ + int rc = 0; + + if (!netif_running(bp->dev)) + return BNX2_LOOPBACK_FAILED; + + bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); + spin_lock_bh(&bp->phy_lock); + bnx2_init_phy(bp, 1); + spin_unlock_bh(&bp->phy_lock); + if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK)) + rc |= BNX2_MAC_LOOPBACK_FAILED; + if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK)) + rc |= BNX2_PHY_LOOPBACK_FAILED; + return rc; +} + +#define NVRAM_SIZE 0x200 +#define CRC32_RESIDUAL 0xdebb20e3 + +static int +bnx2_test_nvram(struct bnx2 *bp) +{ + __be32 buf[NVRAM_SIZE / 4]; + u8 *data = (u8 *) buf; + int rc = 0; + u32 magic, csum; + + if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0) + goto test_nvram_done; + + magic = be32_to_cpu(buf[0]); + if (magic != 0x669955aa) { + rc = -ENODEV; + goto test_nvram_done; + } + + if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0) + goto test_nvram_done; + + csum = ether_crc_le(0x100, data); + if (csum != CRC32_RESIDUAL) { + rc = -ENODEV; + goto test_nvram_done; + } + + csum = ether_crc_le(0x100, data + 0x100); + if (csum != CRC32_RESIDUAL) { + rc = -ENODEV; + } + +test_nvram_done: + return rc; +} + +static int +bnx2_test_link(struct bnx2 *bp) +{ + u32 bmsr; + + if (!netif_running(bp->dev)) + return -ENODEV; + + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { + if (bp->link_up) + return 0; + return -ENODEV; + } + spin_lock_bh(&bp->phy_lock); + bnx2_enable_bmsr1(bp); + bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); + bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); + bnx2_disable_bmsr1(bp); + spin_unlock_bh(&bp->phy_lock); + + if (bmsr & BMSR_LSTATUS) { + return 0; + } + return -ENODEV; +} + +static int +bnx2_test_intr(struct bnx2 *bp) +{ + int i; + u16 status_idx; + + if (!netif_running(bp->dev)) + return -ENODEV; + + status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff; + + /* This register is not touched during run-time. */ + BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); + BNX2_RD(bp, BNX2_HC_COMMAND); + + for (i = 0; i < 10; i++) { + if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) != + status_idx) { + + break; + } + + msleep_interruptible(10); + } + if (i < 10) + return 0; + + return -ENODEV; +} + +/* Determining link for parallel detection. */ +static int +bnx2_5706_serdes_has_link(struct bnx2 *bp) +{ + u32 mode_ctl, an_dbg, exp; + + if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL) + return 0; + + bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL); + bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl); + + if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET)) + return 0; + + bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG); + bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); + bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); + + if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID)) + return 0; + + bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1); + bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp); + bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp); + + if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */ + return 0; + + return 1; +} + +static void +bnx2_5706_serdes_timer(struct bnx2 *bp) +{ + int check_link = 1; + + spin_lock(&bp->phy_lock); + if (bp->serdes_an_pending) { + bp->serdes_an_pending--; + check_link = 0; + } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { + u32 bmcr; + + bp->current_interval = BNX2_TIMER_INTERVAL; + + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + + if (bmcr & BMCR_ANENABLE) { + if (bnx2_5706_serdes_has_link(bp)) { + bmcr &= ~BMCR_ANENABLE; + bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; + bnx2_write_phy(bp, bp->mii_bmcr, bmcr); + bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT; + } + } + } + else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) && + (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) { + u32 phy2; + + bnx2_write_phy(bp, 0x17, 0x0f01); + bnx2_read_phy(bp, 0x15, &phy2); + if (phy2 & 0x20) { + u32 bmcr; + + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + bmcr |= BMCR_ANENABLE; + bnx2_write_phy(bp, bp->mii_bmcr, bmcr); + + bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; + } + } else + bp->current_interval = BNX2_TIMER_INTERVAL; + + if (check_link) { + u32 val; + + bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG); + bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val); + bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val); + + if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) { + if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) { + bnx2_5706s_force_link_dn(bp, 1); + bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN; + } else + bnx2_set_link(bp); + } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC)) + bnx2_set_link(bp); + } + spin_unlock(&bp->phy_lock); +} + +static void +bnx2_5708_serdes_timer(struct bnx2 *bp) +{ + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) + return; + + if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) { + bp->serdes_an_pending = 0; + return; + } + + spin_lock(&bp->phy_lock); + if (bp->serdes_an_pending) + bp->serdes_an_pending--; + else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { + u32 bmcr; + + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + if (bmcr & BMCR_ANENABLE) { + bnx2_enable_forced_2g5(bp); + bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT; + } else { + bnx2_disable_forced_2g5(bp); + bp->serdes_an_pending = 2; + bp->current_interval = BNX2_TIMER_INTERVAL; + } + + } else + bp->current_interval = BNX2_TIMER_INTERVAL; + + spin_unlock(&bp->phy_lock); +} + +static void +bnx2_timer(unsigned long data) +{ + struct bnx2 *bp = (struct bnx2 *) data; + + if (!netif_running(bp->dev)) + return; + + if (atomic_read(&bp->intr_sem) != 0) + goto bnx2_restart_timer; + + if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) == + BNX2_FLAG_USING_MSI) + bnx2_chk_missed_msi(bp); + + bnx2_send_heart_beat(bp); + + bp->stats_blk->stat_FwRxDrop = + bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT); + + /* workaround occasional corrupted counters */ + if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks) + BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | + BNX2_HC_COMMAND_STATS_NOW); + + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + if (BNX2_CHIP(bp) == BNX2_CHIP_5706) + bnx2_5706_serdes_timer(bp); + else + bnx2_5708_serdes_timer(bp); + } + +bnx2_restart_timer: + mod_timer(&bp->timer, jiffies + bp->current_interval); +} + +static int +bnx2_request_irq(struct bnx2 *bp) +{ + unsigned long flags; + struct bnx2_irq *irq; + int rc = 0, i; + + if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX) + flags = 0; + else + flags = IRQF_SHARED; + + for (i = 0; i < bp->irq_nvecs; i++) { + irq = &bp->irq_tbl[i]; + rc = request_irq(irq->vector, irq->handler, flags, irq->name, + &bp->bnx2_napi[i]); + if (rc) + break; + irq->requested = 1; + } + return rc; +} + +static void +__bnx2_free_irq(struct bnx2 *bp) +{ + struct bnx2_irq *irq; + int i; + + for (i = 0; i < bp->irq_nvecs; i++) { + irq = &bp->irq_tbl[i]; + if (irq->requested) + free_irq(irq->vector, &bp->bnx2_napi[i]); + irq->requested = 0; + } +} + +static void +bnx2_free_irq(struct bnx2 *bp) +{ + + __bnx2_free_irq(bp); + if (bp->flags & BNX2_FLAG_USING_MSI) + pci_disable_msi(bp->pdev); + else if (bp->flags & BNX2_FLAG_USING_MSIX) + pci_disable_msix(bp->pdev); + + bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI); +} + +static void +bnx2_enable_msix(struct bnx2 *bp, int msix_vecs) +{ + int i, total_vecs; + struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC]; + struct net_device *dev = bp->dev; + const int len = sizeof(bp->irq_tbl[0].name); + + bnx2_setup_msix_tbl(bp); + BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1); + BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE); + BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE); + + /* Need to flush the previous three writes to ensure MSI-X + * is setup properly */ + BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL); + + for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { + msix_ent[i].entry = i; + msix_ent[i].vector = 0; + } + + total_vecs = msix_vecs; +#ifdef BCM_CNIC + total_vecs++; +#endif + total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, + BNX2_MIN_MSIX_VEC, total_vecs); + if (total_vecs < 0) + return; + + msix_vecs = total_vecs; +#ifdef BCM_CNIC + msix_vecs--; +#endif + bp->irq_nvecs = msix_vecs; + bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI; + for (i = 0; i < total_vecs; i++) { + bp->irq_tbl[i].vector = msix_ent[i].vector; + snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i); + bp->irq_tbl[i].handler = bnx2_msi_1shot; + } +} + +static int +bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi) +{ + int cpus = netif_get_num_default_rss_queues(); + int msix_vecs; + + if (!bp->num_req_rx_rings) + msix_vecs = max(cpus + 1, bp->num_req_tx_rings); + else if (!bp->num_req_tx_rings) + msix_vecs = max(cpus, bp->num_req_rx_rings); + else + msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings); + + msix_vecs = min(msix_vecs, RX_MAX_RINGS); + + bp->irq_tbl[0].handler = bnx2_interrupt; + strcpy(bp->irq_tbl[0].name, bp->dev->name); + bp->irq_nvecs = 1; + bp->irq_tbl[0].vector = bp->pdev->irq; + + if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi) + bnx2_enable_msix(bp, msix_vecs); + + if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi && + !(bp->flags & BNX2_FLAG_USING_MSIX)) { + if (pci_enable_msi(bp->pdev) == 0) { + bp->flags |= BNX2_FLAG_USING_MSI; + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { + bp->flags |= BNX2_FLAG_ONE_SHOT_MSI; + bp->irq_tbl[0].handler = bnx2_msi_1shot; + } else + bp->irq_tbl[0].handler = bnx2_msi; + + bp->irq_tbl[0].vector = bp->pdev->irq; + } + } + + if (!bp->num_req_tx_rings) + bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs); + else + bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings); + + if (!bp->num_req_rx_rings) + bp->num_rx_rings = bp->irq_nvecs; + else + bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings); + + netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings); + + return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings); +} + +/* Called with rtnl_lock */ +static int +bnx2_open(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + int rc; + + rc = bnx2_request_firmware(bp); + if (rc < 0) + goto out; + + netif_carrier_off(dev); + + bnx2_disable_int(bp); + + rc = bnx2_setup_int_mode(bp, disable_msi); + if (rc) + goto open_err; + bnx2_init_napi(bp); + bnx2_napi_enable(bp); + rc = bnx2_alloc_mem(bp); + if (rc) + goto open_err; + + rc = bnx2_request_irq(bp); + if (rc) + goto open_err; + + rc = bnx2_init_nic(bp, 1); + if (rc) + goto open_err; + + mod_timer(&bp->timer, jiffies + bp->current_interval); + + atomic_set(&bp->intr_sem, 0); + + memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block)); + + bnx2_enable_int(bp); + + if (bp->flags & BNX2_FLAG_USING_MSI) { + /* Test MSI to make sure it is working + * If MSI test fails, go back to INTx mode + */ + if (bnx2_test_intr(bp) != 0) { + netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n"); + + bnx2_disable_int(bp); + bnx2_free_irq(bp); + + bnx2_setup_int_mode(bp, 1); + + rc = bnx2_init_nic(bp, 0); + + if (!rc) + rc = bnx2_request_irq(bp); + + if (rc) { + del_timer_sync(&bp->timer); + goto open_err; + } + bnx2_enable_int(bp); + } + } + if (bp->flags & BNX2_FLAG_USING_MSI) + netdev_info(dev, "using MSI\n"); + else if (bp->flags & BNX2_FLAG_USING_MSIX) + netdev_info(dev, "using MSIX\n"); + + netif_tx_start_all_queues(dev); +out: + return rc; + +open_err: + bnx2_napi_disable(bp); + bnx2_free_skbs(bp); + bnx2_free_irq(bp); + bnx2_free_mem(bp); + bnx2_del_napi(bp); + bnx2_release_firmware(bp); + goto out; +} + +static void +bnx2_reset_task(struct work_struct *work) +{ + struct bnx2 *bp = container_of(work, struct bnx2, reset_task); + int rc; + u16 pcicmd; + + rtnl_lock(); + if (!netif_running(bp->dev)) { + rtnl_unlock(); + return; + } + + bnx2_netif_stop(bp, true); + + pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd); + if (!(pcicmd & PCI_COMMAND_MEMORY)) { + /* in case PCI block has reset */ + pci_restore_state(bp->pdev); + pci_save_state(bp->pdev); + } + rc = bnx2_init_nic(bp, 1); + if (rc) { + netdev_err(bp->dev, "failed to reset NIC, closing\n"); + bnx2_napi_enable(bp); + dev_close(bp->dev); + rtnl_unlock(); + return; + } + + atomic_set(&bp->intr_sem, 1); + bnx2_netif_start(bp, true); + rtnl_unlock(); +} + +#define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL } + +static void +bnx2_dump_ftq(struct bnx2 *bp) +{ + int i; + u32 reg, bdidx, cid, valid; + struct net_device *dev = bp->dev; + static const struct ftq_reg { + char *name; + u32 off; + } ftq_arr[] = { + BNX2_FTQ_ENTRY(RV2P_P), + BNX2_FTQ_ENTRY(RV2P_T), + BNX2_FTQ_ENTRY(RV2P_M), + BNX2_FTQ_ENTRY(TBDR_), + BNX2_FTQ_ENTRY(TDMA_), + BNX2_FTQ_ENTRY(TXP_), + BNX2_FTQ_ENTRY(TXP_), + BNX2_FTQ_ENTRY(TPAT_), + BNX2_FTQ_ENTRY(RXP_C), + BNX2_FTQ_ENTRY(RXP_), + BNX2_FTQ_ENTRY(COM_COMXQ_), + BNX2_FTQ_ENTRY(COM_COMTQ_), + BNX2_FTQ_ENTRY(COM_COMQ_), + BNX2_FTQ_ENTRY(CP_CPQ_), + }; + + netdev_err(dev, "<--- start FTQ dump --->\n"); + for (i = 0; i < ARRAY_SIZE(ftq_arr); i++) + netdev_err(dev, "%s %08x\n", ftq_arr[i].name, + bnx2_reg_rd_ind(bp, ftq_arr[i].off)); + + netdev_err(dev, "CPU states:\n"); + for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000) + netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n", + reg, bnx2_reg_rd_ind(bp, reg), + bnx2_reg_rd_ind(bp, reg + 4), + bnx2_reg_rd_ind(bp, reg + 8), + bnx2_reg_rd_ind(bp, reg + 0x1c), + bnx2_reg_rd_ind(bp, reg + 0x1c), + bnx2_reg_rd_ind(bp, reg + 0x20)); + + netdev_err(dev, "<--- end FTQ dump --->\n"); + netdev_err(dev, "<--- start TBDC dump --->\n"); + netdev_err(dev, "TBDC free cnt: %ld\n", + BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT); + netdev_err(dev, "LINE CID BIDX CMD VALIDS\n"); + for (i = 0; i < 0x20; i++) { + int j = 0; + + BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i); + BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE, + BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ); + BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB); + while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) & + BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100) + j++; + + cid = BNX2_RD(bp, BNX2_TBDC_CID); + bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX); + valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE); + netdev_err(dev, "%02x %06x %04lx %02x [%x]\n", + i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX, + bdidx >> 24, (valid >> 8) & 0x0ff); + } + netdev_err(dev, "<--- end TBDC dump --->\n"); +} + +static void +bnx2_dump_state(struct bnx2 *bp) +{ + struct net_device *dev = bp->dev; + u32 val1, val2; + + pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1); + netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n", + atomic_read(&bp->intr_sem), val1); + pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1); + pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2); + netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2); + netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n", + BNX2_RD(bp, BNX2_EMAC_TX_STATUS), + BNX2_RD(bp, BNX2_EMAC_RX_STATUS)); + netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n", + BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL)); + netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n", + BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS)); + if (bp->flags & BNX2_FLAG_USING_MSIX) + netdev_err(dev, "DEBUG: PBA[%08x]\n", + BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE)); +} + +static void +bnx2_tx_timeout(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + + bnx2_dump_ftq(bp); + bnx2_dump_state(bp); + bnx2_dump_mcp_state(bp); + + /* This allows the netif to be shutdown gracefully before resetting */ + schedule_work(&bp->reset_task); +} + +/* Called with netif_tx_lock. + * bnx2_tx_int() runs without netif_tx_lock unless it needs to call + * netif_wake_queue(). + */ +static netdev_tx_t +bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + dma_addr_t mapping; + struct bnx2_tx_bd *txbd; + struct bnx2_sw_tx_bd *tx_buf; + u32 len, vlan_tag_flags, last_frag, mss; + u16 prod, ring_prod; + int i; + struct bnx2_napi *bnapi; + struct bnx2_tx_ring_info *txr; + struct netdev_queue *txq; + + /* Determine which tx ring we will be placed on */ + i = skb_get_queue_mapping(skb); + bnapi = &bp->bnx2_napi[i]; + txr = &bnapi->tx_ring; + txq = netdev_get_tx_queue(dev, i); + + if (unlikely(bnx2_tx_avail(bp, txr) < + (skb_shinfo(skb)->nr_frags + 1))) { + netif_tx_stop_queue(txq); + netdev_err(dev, "BUG! Tx ring full when queue awake!\n"); + + return NETDEV_TX_BUSY; + } + len = skb_headlen(skb); + prod = txr->tx_prod; + ring_prod = BNX2_TX_RING_IDX(prod); + + vlan_tag_flags = 0; + if (skb->ip_summed == CHECKSUM_PARTIAL) { + vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; + } + + if (skb_vlan_tag_present(skb)) { + vlan_tag_flags |= + (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16)); + } + + if ((mss = skb_shinfo(skb)->gso_size)) { + u32 tcp_opt_len; + struct iphdr *iph; + + vlan_tag_flags |= TX_BD_FLAGS_SW_LSO; + + tcp_opt_len = tcp_optlen(skb); + + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { + u32 tcp_off = skb_transport_offset(skb) - + sizeof(struct ipv6hdr) - ETH_HLEN; + + vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) | + TX_BD_FLAGS_SW_FLAGS; + if (likely(tcp_off == 0)) + vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK; + else { + tcp_off >>= 3; + vlan_tag_flags |= ((tcp_off & 0x3) << + TX_BD_FLAGS_TCP6_OFF0_SHL) | + ((tcp_off & 0x10) << + TX_BD_FLAGS_TCP6_OFF4_SHL); + mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL; + } + } else { + iph = ip_hdr(skb); + if (tcp_opt_len || (iph->ihl > 5)) { + vlan_tag_flags |= ((iph->ihl - 5) + + (tcp_opt_len >> 2)) << 8; + } + } + } else + mss = 0; + + mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&bp->pdev->dev, mapping)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + tx_buf = &txr->tx_buf_ring[ring_prod]; + tx_buf->skb = skb; + dma_unmap_addr_set(tx_buf, mapping, mapping); + + txbd = &txr->tx_desc_ring[ring_prod]; + + txbd->tx_bd_haddr_hi = (u64) mapping >> 32; + txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; + txbd->tx_bd_mss_nbytes = len | (mss << 16); + txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START; + + last_frag = skb_shinfo(skb)->nr_frags; + tx_buf->nr_frags = last_frag; + tx_buf->is_gso = skb_is_gso(skb); + + for (i = 0; i < last_frag; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + prod = BNX2_NEXT_TX_BD(prod); + ring_prod = BNX2_TX_RING_IDX(prod); + txbd = &txr->tx_desc_ring[ring_prod]; + + len = skb_frag_size(frag); + mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len, + DMA_TO_DEVICE); + if (dma_mapping_error(&bp->pdev->dev, mapping)) + goto dma_error; + dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping, + mapping); + + txbd->tx_bd_haddr_hi = (u64) mapping >> 32; + txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; + txbd->tx_bd_mss_nbytes = len | (mss << 16); + txbd->tx_bd_vlan_tag_flags = vlan_tag_flags; + + } + txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END; + + /* Sync BD data before updating TX mailbox */ + wmb(); + + netdev_tx_sent_queue(txq, skb->len); + + prod = BNX2_NEXT_TX_BD(prod); + txr->tx_prod_bseq += skb->len; + + BNX2_WR16(bp, txr->tx_bidx_addr, prod); + BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); + + mmiowb(); + + txr->tx_prod = prod; + + if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) { + netif_tx_stop_queue(txq); + + /* netif_tx_stop_queue() must be done before checking + * tx index in bnx2_tx_avail() below, because in + * bnx2_tx_int(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); + if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh) + netif_tx_wake_queue(txq); + } + + return NETDEV_TX_OK; +dma_error: + /* save value of frag that failed */ + last_frag = i; + + /* start back at beginning and unmap skb */ + prod = txr->tx_prod; + ring_prod = BNX2_TX_RING_IDX(prod); + tx_buf = &txr->tx_buf_ring[ring_prod]; + tx_buf->skb = NULL; + dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), + skb_headlen(skb), PCI_DMA_TODEVICE); + + /* unmap remaining mapped pages */ + for (i = 0; i < last_frag; i++) { + prod = BNX2_NEXT_TX_BD(prod); + ring_prod = BNX2_TX_RING_IDX(prod); + tx_buf = &txr->tx_buf_ring[ring_prod]; + dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), + skb_frag_size(&skb_shinfo(skb)->frags[i]), + PCI_DMA_TODEVICE); + } + + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/* Called with rtnl_lock */ +static int +bnx2_close(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + + bnx2_disable_int_sync(bp); + bnx2_napi_disable(bp); + netif_tx_disable(dev); + del_timer_sync(&bp->timer); + bnx2_shutdown_chip(bp); + bnx2_free_irq(bp); + bnx2_free_skbs(bp); + bnx2_free_mem(bp); + bnx2_del_napi(bp); + bp->link_up = 0; + netif_carrier_off(bp->dev); + return 0; +} + +static void +bnx2_save_stats(struct bnx2 *bp) +{ + u32 *hw_stats = (u32 *) bp->stats_blk; + u32 *temp_stats = (u32 *) bp->temp_stats_blk; + int i; + + /* The 1st 10 counters are 64-bit counters */ + for (i = 0; i < 20; i += 2) { + u32 hi; + u64 lo; + + hi = temp_stats[i] + hw_stats[i]; + lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1]; + if (lo > 0xffffffff) + hi++; + temp_stats[i] = hi; + temp_stats[i + 1] = lo & 0xffffffff; + } + + for ( ; i < sizeof(struct statistics_block) / 4; i++) + temp_stats[i] += hw_stats[i]; +} + +#define GET_64BIT_NET_STATS64(ctr) \ + (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo)) + +#define GET_64BIT_NET_STATS(ctr) \ + GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \ + GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr) + +#define GET_32BIT_NET_STATS(ctr) \ + (unsigned long) (bp->stats_blk->ctr + \ + bp->temp_stats_blk->ctr) + +static struct rtnl_link_stats64 * +bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) +{ + struct bnx2 *bp = netdev_priv(dev); + + if (bp->stats_blk == NULL) + return net_stats; + + net_stats->rx_packets = + GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) + + GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) + + GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts); + + net_stats->tx_packets = + GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) + + GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) + + GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts); + + net_stats->rx_bytes = + GET_64BIT_NET_STATS(stat_IfHCInOctets); + + net_stats->tx_bytes = + GET_64BIT_NET_STATS(stat_IfHCOutOctets); + + net_stats->multicast = + GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts); + + net_stats->collisions = + GET_32BIT_NET_STATS(stat_EtherStatsCollisions); + + net_stats->rx_length_errors = + GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) + + GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts); + + net_stats->rx_over_errors = + GET_32BIT_NET_STATS(stat_IfInFTQDiscards) + + GET_32BIT_NET_STATS(stat_IfInMBUFDiscards); + + net_stats->rx_frame_errors = + GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors); + + net_stats->rx_crc_errors = + GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors); + + net_stats->rx_errors = net_stats->rx_length_errors + + net_stats->rx_over_errors + net_stats->rx_frame_errors + + net_stats->rx_crc_errors; + + net_stats->tx_aborted_errors = + GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) + + GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions); + + if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0)) + net_stats->tx_carrier_errors = 0; + else { + net_stats->tx_carrier_errors = + GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors); + } + + net_stats->tx_errors = + GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) + + net_stats->tx_aborted_errors + + net_stats->tx_carrier_errors; + + net_stats->rx_missed_errors = + GET_32BIT_NET_STATS(stat_IfInFTQDiscards) + + GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) + + GET_32BIT_NET_STATS(stat_FwRxDrop); + + return net_stats; +} + +/* All ethtool functions called with rtnl_lock */ + +static int +bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct bnx2 *bp = netdev_priv(dev); + int support_serdes = 0, support_copper = 0; + + cmd->supported = SUPPORTED_Autoneg; + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { + support_serdes = 1; + support_copper = 1; + } else if (bp->phy_port == PORT_FIBRE) + support_serdes = 1; + else + support_copper = 1; + + if (support_serdes) { + cmd->supported |= SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE; + if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) + cmd->supported |= SUPPORTED_2500baseX_Full; + + } + if (support_copper) { + cmd->supported |= SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_TP; + + } + + spin_lock_bh(&bp->phy_lock); + cmd->port = bp->phy_port; + cmd->advertising = bp->advertising; + + if (bp->autoneg & AUTONEG_SPEED) { + cmd->autoneg = AUTONEG_ENABLE; + } else { + cmd->autoneg = AUTONEG_DISABLE; + } + + if (netif_carrier_ok(dev)) { + ethtool_cmd_speed_set(cmd, bp->line_speed); + cmd->duplex = bp->duplex; + if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) { + if (bp->phy_flags & BNX2_PHY_FLAG_MDIX) + cmd->eth_tp_mdix = ETH_TP_MDI_X; + else + cmd->eth_tp_mdix = ETH_TP_MDI; + } + } + else { + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->duplex = DUPLEX_UNKNOWN; + } + spin_unlock_bh(&bp->phy_lock); + + cmd->transceiver = XCVR_INTERNAL; + cmd->phy_address = bp->phy_addr; + + return 0; +} + +static int +bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct bnx2 *bp = netdev_priv(dev); + u8 autoneg = bp->autoneg; + u8 req_duplex = bp->req_duplex; + u16 req_line_speed = bp->req_line_speed; + u32 advertising = bp->advertising; + int err = -EINVAL; + + spin_lock_bh(&bp->phy_lock); + + if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE) + goto err_out_unlock; + + if (cmd->port != bp->phy_port && + !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)) + goto err_out_unlock; + + /* If device is down, we can store the settings only if the user + * is setting the currently active port. + */ + if (!netif_running(dev) && cmd->port != bp->phy_port) + goto err_out_unlock; + + if (cmd->autoneg == AUTONEG_ENABLE) { + autoneg |= AUTONEG_SPEED; + + advertising = cmd->advertising; + if (cmd->port == PORT_TP) { + advertising &= ETHTOOL_ALL_COPPER_SPEED; + if (!advertising) + advertising = ETHTOOL_ALL_COPPER_SPEED; + } else { + advertising &= ETHTOOL_ALL_FIBRE_SPEED; + if (!advertising) + advertising = ETHTOOL_ALL_FIBRE_SPEED; + } + advertising |= ADVERTISED_Autoneg; + } + else { + u32 speed = ethtool_cmd_speed(cmd); + if (cmd->port == PORT_FIBRE) { + if ((speed != SPEED_1000 && + speed != SPEED_2500) || + (cmd->duplex != DUPLEX_FULL)) + goto err_out_unlock; + + if (speed == SPEED_2500 && + !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) + goto err_out_unlock; + } else if (speed == SPEED_1000 || speed == SPEED_2500) + goto err_out_unlock; + + autoneg &= ~AUTONEG_SPEED; + req_line_speed = speed; + req_duplex = cmd->duplex; + advertising = 0; + } + + bp->autoneg = autoneg; + bp->advertising = advertising; + bp->req_line_speed = req_line_speed; + bp->req_duplex = req_duplex; + + err = 0; + /* If device is down, the new settings will be picked up when it is + * brought up. + */ + if (netif_running(dev)) + err = bnx2_setup_phy(bp, cmd->port); + +err_out_unlock: + spin_unlock_bh(&bp->phy_lock); + + return err; +} + +static void +bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct bnx2 *bp = netdev_priv(dev); + + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); + strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version)); +} + +#define BNX2_REGDUMP_LEN (32 * 1024) + +static int +bnx2_get_regs_len(struct net_device *dev) +{ + return BNX2_REGDUMP_LEN; +} + +static void +bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) +{ + u32 *p = _p, i, offset; + u8 *orig_p = _p; + struct bnx2 *bp = netdev_priv(dev); + static const u32 reg_boundaries[] = { + 0x0000, 0x0098, 0x0400, 0x045c, + 0x0800, 0x0880, 0x0c00, 0x0c10, + 0x0c30, 0x0d08, 0x1000, 0x101c, + 0x1040, 0x1048, 0x1080, 0x10a4, + 0x1400, 0x1490, 0x1498, 0x14f0, + 0x1500, 0x155c, 0x1580, 0x15dc, + 0x1600, 0x1658, 0x1680, 0x16d8, + 0x1800, 0x1820, 0x1840, 0x1854, + 0x1880, 0x1894, 0x1900, 0x1984, + 0x1c00, 0x1c0c, 0x1c40, 0x1c54, + 0x1c80, 0x1c94, 0x1d00, 0x1d84, + 0x2000, 0x2030, 0x23c0, 0x2400, + 0x2800, 0x2820, 0x2830, 0x2850, + 0x2b40, 0x2c10, 0x2fc0, 0x3058, + 0x3c00, 0x3c94, 0x4000, 0x4010, + 0x4080, 0x4090, 0x43c0, 0x4458, + 0x4c00, 0x4c18, 0x4c40, 0x4c54, + 0x4fc0, 0x5010, 0x53c0, 0x5444, + 0x5c00, 0x5c18, 0x5c80, 0x5c90, + 0x5fc0, 0x6000, 0x6400, 0x6428, + 0x6800, 0x6848, 0x684c, 0x6860, + 0x6888, 0x6910, 0x8000 + }; + + regs->version = 0; + + memset(p, 0, BNX2_REGDUMP_LEN); + + if (!netif_running(bp->dev)) + return; + + i = 0; + offset = reg_boundaries[0]; + p += offset; + while (offset < BNX2_REGDUMP_LEN) { + *p++ = BNX2_RD(bp, offset); + offset += 4; + if (offset == reg_boundaries[i + 1]) { + offset = reg_boundaries[i + 2]; + p = (u32 *) (orig_p + offset); + i += 2; + } + } +} + +static void +bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bnx2 *bp = netdev_priv(dev); + + if (bp->flags & BNX2_FLAG_NO_WOL) { + wol->supported = 0; + wol->wolopts = 0; + } + else { + wol->supported = WAKE_MAGIC; + if (bp->wol) + wol->wolopts = WAKE_MAGIC; + else + wol->wolopts = 0; + } + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int +bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bnx2 *bp = netdev_priv(dev); + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + + if (wol->wolopts & WAKE_MAGIC) { + if (bp->flags & BNX2_FLAG_NO_WOL) + return -EINVAL; + + bp->wol = 1; + } + else { + bp->wol = 0; + } + + device_set_wakeup_enable(&bp->pdev->dev, bp->wol); + + return 0; +} + +static int +bnx2_nway_reset(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + u32 bmcr; + + if (!netif_running(dev)) + return -EAGAIN; + + if (!(bp->autoneg & AUTONEG_SPEED)) { + return -EINVAL; + } + + spin_lock_bh(&bp->phy_lock); + + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { + int rc; + + rc = bnx2_setup_remote_phy(bp, bp->phy_port); + spin_unlock_bh(&bp->phy_lock); + return rc; + } + + /* Force a link down visible on the other side */ + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK); + spin_unlock_bh(&bp->phy_lock); + + msleep(20); + + spin_lock_bh(&bp->phy_lock); + + bp->current_interval = BNX2_SERDES_AN_TIMEOUT; + bp->serdes_an_pending = 1; + mod_timer(&bp->timer, jiffies + bp->current_interval); + } + + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + bmcr &= ~BMCR_LOOPBACK; + bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE); + + spin_unlock_bh(&bp->phy_lock); + + return 0; +} + +static u32 +bnx2_get_link(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + + return bp->link_up; +} + +static int +bnx2_get_eeprom_len(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + + if (bp->flash_info == NULL) + return 0; + + return (int) bp->flash_size; +} + +static int +bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *eebuf) +{ + struct bnx2 *bp = netdev_priv(dev); + int rc; + + /* parameters already validated in ethtool_get_eeprom */ + + rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); + + return rc; +} + +static int +bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *eebuf) +{ + struct bnx2 *bp = netdev_priv(dev); + int rc; + + /* parameters already validated in ethtool_set_eeprom */ + + rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); + + return rc; +} + +static int +bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) +{ + struct bnx2 *bp = netdev_priv(dev); + + memset(coal, 0, sizeof(struct ethtool_coalesce)); + + coal->rx_coalesce_usecs = bp->rx_ticks; + coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip; + coal->rx_coalesce_usecs_irq = bp->rx_ticks_int; + coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int; + + coal->tx_coalesce_usecs = bp->tx_ticks; + coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip; + coal->tx_coalesce_usecs_irq = bp->tx_ticks_int; + coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int; + + coal->stats_block_coalesce_usecs = bp->stats_ticks; + + return 0; +} + +static int +bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) +{ + struct bnx2 *bp = netdev_priv(dev); + + bp->rx_ticks = (u16) coal->rx_coalesce_usecs; + if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff; + + bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames; + if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff; + + bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq; + if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff; + + bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq; + if (bp->rx_quick_cons_trip_int > 0xff) + bp->rx_quick_cons_trip_int = 0xff; + + bp->tx_ticks = (u16) coal->tx_coalesce_usecs; + if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff; + + bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames; + if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff; + + bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq; + if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff; + + bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq; + if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int = + 0xff; + + bp->stats_ticks = coal->stats_block_coalesce_usecs; + if (bp->flags & BNX2_FLAG_BROKEN_STATS) { + if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC) + bp->stats_ticks = USEC_PER_SEC; + } + if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS) + bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS; + bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; + + if (netif_running(bp->dev)) { + bnx2_netif_stop(bp, true); + bnx2_init_nic(bp, 0); + bnx2_netif_start(bp, true); + } + + return 0; +} + +static void +bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +{ + struct bnx2 *bp = netdev_priv(dev); + + ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT; + ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT; + + ering->rx_pending = bp->rx_ring_size; + ering->rx_jumbo_pending = bp->rx_pg_ring_size; + + ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT; + ering->tx_pending = bp->tx_ring_size; +} + +static int +bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq) +{ + if (netif_running(bp->dev)) { + /* Reset will erase chipset stats; save them */ + bnx2_save_stats(bp); + + bnx2_netif_stop(bp, true); + bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); + if (reset_irq) { + bnx2_free_irq(bp); + bnx2_del_napi(bp); + } else { + __bnx2_free_irq(bp); + } + bnx2_free_skbs(bp); + bnx2_free_mem(bp); + } + + bnx2_set_rx_ring_size(bp, rx); + bp->tx_ring_size = tx; + + if (netif_running(bp->dev)) { + int rc = 0; + + if (reset_irq) { + rc = bnx2_setup_int_mode(bp, disable_msi); + bnx2_init_napi(bp); + } + + if (!rc) + rc = bnx2_alloc_mem(bp); + + if (!rc) + rc = bnx2_request_irq(bp); + + if (!rc) + rc = bnx2_init_nic(bp, 0); + + if (rc) { + bnx2_napi_enable(bp); + dev_close(bp->dev); + return rc; + } +#ifdef BCM_CNIC + mutex_lock(&bp->cnic_lock); + /* Let cnic know about the new status block. */ + if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) + bnx2_setup_cnic_irq_info(bp); + mutex_unlock(&bp->cnic_lock); +#endif + bnx2_netif_start(bp, true); + } + return 0; +} + +static int +bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +{ + struct bnx2 *bp = netdev_priv(dev); + int rc; + + if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) || + (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) || + (ering->tx_pending <= MAX_SKB_FRAGS)) { + + return -EINVAL; + } + rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending, + false); + return rc; +} + +static void +bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) +{ + struct bnx2 *bp = netdev_priv(dev); + + epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0); + epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0); + epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0); +} + +static int +bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) +{ + struct bnx2 *bp = netdev_priv(dev); + + bp->req_flow_ctrl = 0; + if (epause->rx_pause) + bp->req_flow_ctrl |= FLOW_CTRL_RX; + if (epause->tx_pause) + bp->req_flow_ctrl |= FLOW_CTRL_TX; + + if (epause->autoneg) { + bp->autoneg |= AUTONEG_FLOW_CTRL; + } + else { + bp->autoneg &= ~AUTONEG_FLOW_CTRL; + } + + if (netif_running(dev)) { + spin_lock_bh(&bp->phy_lock); + bnx2_setup_phy(bp, bp->phy_port); + spin_unlock_bh(&bp->phy_lock); + } + + return 0; +} + +static struct { + char string[ETH_GSTRING_LEN]; +} bnx2_stats_str_arr[] = { + { "rx_bytes" }, + { "rx_error_bytes" }, + { "tx_bytes" }, + { "tx_error_bytes" }, + { "rx_ucast_packets" }, + { "rx_mcast_packets" }, + { "rx_bcast_packets" }, + { "tx_ucast_packets" }, + { "tx_mcast_packets" }, + { "tx_bcast_packets" }, + { "tx_mac_errors" }, + { "tx_carrier_errors" }, + { "rx_crc_errors" }, + { "rx_align_errors" }, + { "tx_single_collisions" }, + { "tx_multi_collisions" }, + { "tx_deferred" }, + { "tx_excess_collisions" }, + { "tx_late_collisions" }, + { "tx_total_collisions" }, + { "rx_fragments" }, + { "rx_jabbers" }, + { "rx_undersize_packets" }, + { "rx_oversize_packets" }, + { "rx_64_byte_packets" }, + { "rx_65_to_127_byte_packets" }, + { "rx_128_to_255_byte_packets" }, + { "rx_256_to_511_byte_packets" }, + { "rx_512_to_1023_byte_packets" }, + { "rx_1024_to_1522_byte_packets" }, + { "rx_1523_to_9022_byte_packets" }, + { "tx_64_byte_packets" }, + { "tx_65_to_127_byte_packets" }, + { "tx_128_to_255_byte_packets" }, + { "tx_256_to_511_byte_packets" }, + { "tx_512_to_1023_byte_packets" }, + { "tx_1024_to_1522_byte_packets" }, + { "tx_1523_to_9022_byte_packets" }, + { "rx_xon_frames" }, + { "rx_xoff_frames" }, + { "tx_xon_frames" }, + { "tx_xoff_frames" }, + { "rx_mac_ctrl_frames" }, + { "rx_filtered_packets" }, + { "rx_ftq_discards" }, + { "rx_discards" }, + { "rx_fw_discards" }, +}; + +#define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr) + +#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4) + +static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = { + STATS_OFFSET32(stat_IfHCInOctets_hi), + STATS_OFFSET32(stat_IfHCInBadOctets_hi), + STATS_OFFSET32(stat_IfHCOutOctets_hi), + STATS_OFFSET32(stat_IfHCOutBadOctets_hi), + STATS_OFFSET32(stat_IfHCInUcastPkts_hi), + STATS_OFFSET32(stat_IfHCInMulticastPkts_hi), + STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi), + STATS_OFFSET32(stat_IfHCOutUcastPkts_hi), + STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi), + STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi), + STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors), + STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors), + STATS_OFFSET32(stat_Dot3StatsFCSErrors), + STATS_OFFSET32(stat_Dot3StatsAlignmentErrors), + STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames), + STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames), + STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions), + STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions), + STATS_OFFSET32(stat_Dot3StatsLateCollisions), + STATS_OFFSET32(stat_EtherStatsCollisions), + STATS_OFFSET32(stat_EtherStatsFragments), + STATS_OFFSET32(stat_EtherStatsJabbers), + STATS_OFFSET32(stat_EtherStatsUndersizePkts), + STATS_OFFSET32(stat_EtherStatsOverrsizePkts), + STATS_OFFSET32(stat_EtherStatsPktsRx64Octets), + STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets), + STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets), + STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets), + STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets), + STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets), + STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets), + STATS_OFFSET32(stat_EtherStatsPktsTx64Octets), + STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets), + STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets), + STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets), + STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets), + STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets), + STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets), + STATS_OFFSET32(stat_XonPauseFramesReceived), + STATS_OFFSET32(stat_XoffPauseFramesReceived), + STATS_OFFSET32(stat_OutXonSent), + STATS_OFFSET32(stat_OutXoffSent), + STATS_OFFSET32(stat_MacControlFramesReceived), + STATS_OFFSET32(stat_IfInFramesL2FilterDiscards), + STATS_OFFSET32(stat_IfInFTQDiscards), + STATS_OFFSET32(stat_IfInMBUFDiscards), + STATS_OFFSET32(stat_FwRxDrop), +}; + +/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are + * skipped because of errata. + */ +static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = { + 8,0,8,8,8,8,8,8,8,8, + 4,0,4,4,4,4,4,4,4,4, + 4,4,4,4,4,4,4,4,4,4, + 4,4,4,4,4,4,4,4,4,4, + 4,4,4,4,4,4,4, +}; + +static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = { + 8,0,8,8,8,8,8,8,8,8, + 4,4,4,4,4,4,4,4,4,4, + 4,4,4,4,4,4,4,4,4,4, + 4,4,4,4,4,4,4,4,4,4, + 4,4,4,4,4,4,4, +}; + +#define BNX2_NUM_TESTS 6 + +static struct { + char string[ETH_GSTRING_LEN]; +} bnx2_tests_str_arr[BNX2_NUM_TESTS] = { + { "register_test (offline)" }, + { "memory_test (offline)" }, + { "loopback_test (offline)" }, + { "nvram_test (online)" }, + { "interrupt_test (online)" }, + { "link_test (online)" }, +}; + +static int +bnx2_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return BNX2_NUM_TESTS; + case ETH_SS_STATS: + return BNX2_NUM_STATS; + default: + return -EOPNOTSUPP; + } +} + +static void +bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) +{ + struct bnx2 *bp = netdev_priv(dev); + + memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS); + if (etest->flags & ETH_TEST_FL_OFFLINE) { + int i; + + bnx2_netif_stop(bp, true); + bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG); + bnx2_free_skbs(bp); + + if (bnx2_test_registers(bp) != 0) { + buf[0] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + if (bnx2_test_memory(bp) != 0) { + buf[1] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + if ((buf[2] = bnx2_test_loopback(bp)) != 0) + etest->flags |= ETH_TEST_FL_FAILED; + + if (!netif_running(bp->dev)) + bnx2_shutdown_chip(bp); + else { + bnx2_init_nic(bp, 1); + bnx2_netif_start(bp, true); + } + + /* wait for link up */ + for (i = 0; i < 7; i++) { + if (bp->link_up) + break; + msleep_interruptible(1000); + } + } + + if (bnx2_test_nvram(bp) != 0) { + buf[3] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + if (bnx2_test_intr(bp) != 0) { + buf[4] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + + if (bnx2_test_link(bp) != 0) { + buf[5] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + + } +} + +static void +bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, bnx2_stats_str_arr, + sizeof(bnx2_stats_str_arr)); + break; + case ETH_SS_TEST: + memcpy(buf, bnx2_tests_str_arr, + sizeof(bnx2_tests_str_arr)); + break; + } +} + +static void +bnx2_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *buf) +{ + struct bnx2 *bp = netdev_priv(dev); + int i; + u32 *hw_stats = (u32 *) bp->stats_blk; + u32 *temp_stats = (u32 *) bp->temp_stats_blk; + u8 *stats_len_arr = NULL; + + if (hw_stats == NULL) { + memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS); + return; + } + + if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0)) + stats_len_arr = bnx2_5706_stats_len_arr; + else + stats_len_arr = bnx2_5708_stats_len_arr; + + for (i = 0; i < BNX2_NUM_STATS; i++) { + unsigned long offset; + + if (stats_len_arr[i] == 0) { + /* skip this counter */ + buf[i] = 0; + continue; + } + + offset = bnx2_stats_offset_arr[i]; + if (stats_len_arr[i] == 4) { + /* 4-byte counter */ + buf[i] = (u64) *(hw_stats + offset) + + *(temp_stats + offset); + continue; + } + /* 8-byte counter */ + buf[i] = (((u64) *(hw_stats + offset)) << 32) + + *(hw_stats + offset + 1) + + (((u64) *(temp_stats + offset)) << 32) + + *(temp_stats + offset + 1); + } +} + +static int +bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) +{ + struct bnx2 *bp = netdev_priv(dev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG); + BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC); + return 1; /* cycle on/off once per second */ + + case ETHTOOL_ID_ON: + BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE | + BNX2_EMAC_LED_1000MB_OVERRIDE | + BNX2_EMAC_LED_100MB_OVERRIDE | + BNX2_EMAC_LED_10MB_OVERRIDE | + BNX2_EMAC_LED_TRAFFIC_OVERRIDE | + BNX2_EMAC_LED_TRAFFIC); + break; + + case ETHTOOL_ID_OFF: + BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE); + break; + + case ETHTOOL_ID_INACTIVE: + BNX2_WR(bp, BNX2_EMAC_LED, 0); + BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save); + break; + } + + return 0; +} + +static int +bnx2_set_features(struct net_device *dev, netdev_features_t features) +{ + struct bnx2 *bp = netdev_priv(dev); + + /* TSO with VLAN tag won't work with current firmware */ + if (features & NETIF_F_HW_VLAN_CTAG_TX) + dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO); + else + dev->vlan_features &= ~NETIF_F_ALL_TSO; + + if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) != + !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) && + netif_running(dev)) { + bnx2_netif_stop(bp, false); + dev->features = features; + bnx2_set_rx_mode(dev); + bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1); + bnx2_netif_start(bp, false); + return 1; + } + + return 0; +} + +static void bnx2_get_channels(struct net_device *dev, + struct ethtool_channels *channels) +{ + struct bnx2 *bp = netdev_priv(dev); + u32 max_rx_rings = 1; + u32 max_tx_rings = 1; + + if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) { + max_rx_rings = RX_MAX_RINGS; + max_tx_rings = TX_MAX_RINGS; + } + + channels->max_rx = max_rx_rings; + channels->max_tx = max_tx_rings; + channels->max_other = 0; + channels->max_combined = 0; + channels->rx_count = bp->num_rx_rings; + channels->tx_count = bp->num_tx_rings; + channels->other_count = 0; + channels->combined_count = 0; +} + +static int bnx2_set_channels(struct net_device *dev, + struct ethtool_channels *channels) +{ + struct bnx2 *bp = netdev_priv(dev); + u32 max_rx_rings = 1; + u32 max_tx_rings = 1; + int rc = 0; + + if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) { + max_rx_rings = RX_MAX_RINGS; + max_tx_rings = TX_MAX_RINGS; + } + if (channels->rx_count > max_rx_rings || + channels->tx_count > max_tx_rings) + return -EINVAL; + + bp->num_req_rx_rings = channels->rx_count; + bp->num_req_tx_rings = channels->tx_count; + + if (netif_running(dev)) + rc = bnx2_change_ring_size(bp, bp->rx_ring_size, + bp->tx_ring_size, true); + + return rc; +} + +static const struct ethtool_ops bnx2_ethtool_ops = { + .get_settings = bnx2_get_settings, + .set_settings = bnx2_set_settings, + .get_drvinfo = bnx2_get_drvinfo, + .get_regs_len = bnx2_get_regs_len, + .get_regs = bnx2_get_regs, + .get_wol = bnx2_get_wol, + .set_wol = bnx2_set_wol, + .nway_reset = bnx2_nway_reset, + .get_link = bnx2_get_link, + .get_eeprom_len = bnx2_get_eeprom_len, + .get_eeprom = bnx2_get_eeprom, + .set_eeprom = bnx2_set_eeprom, + .get_coalesce = bnx2_get_coalesce, + .set_coalesce = bnx2_set_coalesce, + .get_ringparam = bnx2_get_ringparam, + .set_ringparam = bnx2_set_ringparam, + .get_pauseparam = bnx2_get_pauseparam, + .set_pauseparam = bnx2_set_pauseparam, + .self_test = bnx2_self_test, + .get_strings = bnx2_get_strings, + .set_phys_id = bnx2_set_phys_id, + .get_ethtool_stats = bnx2_get_ethtool_stats, + .get_sset_count = bnx2_get_sset_count, + .get_channels = bnx2_get_channels, + .set_channels = bnx2_set_channels, +}; + +/* Called with rtnl_lock */ +static int +bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + struct bnx2 *bp = netdev_priv(dev); + int err; + + switch(cmd) { + case SIOCGMIIPHY: + data->phy_id = bp->phy_addr; + + /* fallthru */ + case SIOCGMIIREG: { + u32 mii_regval; + + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) + return -EOPNOTSUPP; + + if (!netif_running(dev)) + return -EAGAIN; + + spin_lock_bh(&bp->phy_lock); + err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval); + spin_unlock_bh(&bp->phy_lock); + + data->val_out = mii_regval; + + return err; + } + + case SIOCSMIIREG: + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) + return -EOPNOTSUPP; + + if (!netif_running(dev)) + return -EAGAIN; + + spin_lock_bh(&bp->phy_lock); + err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in); + spin_unlock_bh(&bp->phy_lock); + + return err; + + default: + /* do nothing */ + break; + } + return -EOPNOTSUPP; +} + +/* Called with rtnl_lock */ +static int +bnx2_change_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct bnx2 *bp = netdev_priv(dev); + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + if (netif_running(dev)) + bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); + + return 0; +} + +/* Called with rtnl_lock */ +static int +bnx2_change_mtu(struct net_device *dev, int new_mtu) +{ + struct bnx2 *bp = netdev_priv(dev); + + if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) || + ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE)) + return -EINVAL; + + dev->mtu = new_mtu; + return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size, + false); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void +poll_bnx2(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + int i; + + for (i = 0; i < bp->irq_nvecs; i++) { + struct bnx2_irq *irq = &bp->irq_tbl[i]; + + disable_irq(irq->vector); + irq->handler(irq->vector, &bp->bnx2_napi[i]); + enable_irq(irq->vector); + } +} +#endif + +static void +bnx2_get_5709_media(struct bnx2 *bp) +{ + u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL); + u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID; + u32 strap; + + if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) + return; + else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) { + bp->phy_flags |= BNX2_PHY_FLAG_SERDES; + return; + } + + if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) + strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21; + else + strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8; + + if (bp->func == 0) { + switch (strap) { + case 0x4: + case 0x5: + case 0x6: + bp->phy_flags |= BNX2_PHY_FLAG_SERDES; + return; + } + } else { + switch (strap) { + case 0x1: + case 0x2: + case 0x4: + bp->phy_flags |= BNX2_PHY_FLAG_SERDES; + return; + } + } +} + +static void +bnx2_get_pci_speed(struct bnx2 *bp) +{ + u32 reg; + + reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS); + if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) { + u32 clkreg; + + bp->flags |= BNX2_FLAG_PCIX; + + clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS); + + clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; + switch (clkreg) { + case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ: + bp->bus_speed_mhz = 133; + break; + + case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ: + bp->bus_speed_mhz = 100; + break; + + case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ: + case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ: + bp->bus_speed_mhz = 66; + break; + + case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ: + case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ: + bp->bus_speed_mhz = 50; + break; + + case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW: + case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ: + case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ: + bp->bus_speed_mhz = 33; + break; + } + } + else { + if (reg & BNX2_PCICFG_MISC_STATUS_M66EN) + bp->bus_speed_mhz = 66; + else + bp->bus_speed_mhz = 33; + } + + if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET) + bp->flags |= BNX2_FLAG_PCI_32BIT; + +} + +static void +bnx2_read_vpd_fw_ver(struct bnx2 *bp) +{ + int rc, i, j; + u8 *data; + unsigned int block_end, rosize, len; + +#define BNX2_VPD_NVRAM_OFFSET 0x300 +#define BNX2_VPD_LEN 128 +#define BNX2_MAX_VER_SLEN 30 + + data = kmalloc(256, GFP_KERNEL); + if (!data) + return; + + rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN, + BNX2_VPD_LEN); + if (rc) + goto vpd_done; + + for (i = 0; i < BNX2_VPD_LEN; i += 4) { + data[i] = data[i + BNX2_VPD_LEN + 3]; + data[i + 1] = data[i + BNX2_VPD_LEN + 2]; + data[i + 2] = data[i + BNX2_VPD_LEN + 1]; + data[i + 3] = data[i + BNX2_VPD_LEN]; + } + + i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA); + if (i < 0) + goto vpd_done; + + rosize = pci_vpd_lrdt_size(&data[i]); + i += PCI_VPD_LRDT_TAG_SIZE; + block_end = i + rosize; + + if (block_end > BNX2_VPD_LEN) + goto vpd_done; + + j = pci_vpd_find_info_keyword(data, i, rosize, + PCI_VPD_RO_KEYWORD_MFR_ID); + if (j < 0) + goto vpd_done; + + len = pci_vpd_info_field_size(&data[j]); + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + if (j + len > block_end || len != 4 || + memcmp(&data[j], "1028", 4)) + goto vpd_done; + + j = pci_vpd_find_info_keyword(data, i, rosize, + PCI_VPD_RO_KEYWORD_VENDOR0); + if (j < 0) + goto vpd_done; + + len = pci_vpd_info_field_size(&data[j]); + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + if (j + len > block_end || len > BNX2_MAX_VER_SLEN) + goto vpd_done; + + memcpy(bp->fw_version, &data[j], len); + bp->fw_version[len] = ' '; + +vpd_done: + kfree(data); +} + +static int +bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) +{ + struct bnx2 *bp; + int rc, i, j; + u32 reg; + u64 dma_mask, persist_dma_mask; + int err; + + SET_NETDEV_DEV(dev, &pdev->dev); + bp = netdev_priv(dev); + + bp->flags = 0; + bp->phy_flags = 0; + + bp->temp_stats_blk = + kzalloc(sizeof(struct statistics_block), GFP_KERNEL); + + if (bp->temp_stats_blk == NULL) { + rc = -ENOMEM; + goto err_out; + } + + /* enable device (incl. PCI PM wakeup), and bus-mastering */ + rc = pci_enable_device(pdev); + if (rc) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); + goto err_out; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, + "Cannot find PCI device base address, aborting\n"); + rc = -ENODEV; + goto err_out_disable; + } + + rc = pci_request_regions(pdev, DRV_MODULE_NAME); + if (rc) { + dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable; + } + + pci_set_master(pdev); + + bp->pm_cap = pdev->pm_cap; + if (bp->pm_cap == 0) { + dev_err(&pdev->dev, + "Cannot find power management capability, aborting\n"); + rc = -EIO; + goto err_out_release; + } + + bp->dev = dev; + bp->pdev = pdev; + + spin_lock_init(&bp->phy_lock); + spin_lock_init(&bp->indirect_lock); +#ifdef BCM_CNIC + mutex_init(&bp->cnic_lock); +#endif + INIT_WORK(&bp->reset_task, bnx2_reset_task); + + bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID + + TX_MAX_TSS_RINGS + 1)); + if (!bp->regview) { + dev_err(&pdev->dev, "Cannot map register space, aborting\n"); + rc = -ENOMEM; + goto err_out_release; + } + + /* Configure byte swap and enable write to the reg_window registers. + * Rely on CPU to do target byte swapping on big endian systems + * The chip's target access swapping will not swap all accesses + */ + BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, + BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | + BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); + + bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID); + + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { + if (!pci_is_pcie(pdev)) { + dev_err(&pdev->dev, "Not PCIE, aborting\n"); + rc = -EIO; + goto err_out_unmap; + } + bp->flags |= BNX2_FLAG_PCIE; + if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax) + bp->flags |= BNX2_FLAG_JUMBO_BROKEN; + + /* AER (Advanced Error Reporting) hooks */ + err = pci_enable_pcie_error_reporting(pdev); + if (!err) + bp->flags |= BNX2_FLAG_AER_ENABLED; + + } else { + bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); + if (bp->pcix_cap == 0) { + dev_err(&pdev->dev, + "Cannot find PCIX capability, aborting\n"); + rc = -EIO; + goto err_out_unmap; + } + bp->flags |= BNX2_FLAG_BROKEN_STATS; + } + + if (BNX2_CHIP(bp) == BNX2_CHIP_5709 && + BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) { + if (pdev->msix_cap) + bp->flags |= BNX2_FLAG_MSIX_CAP; + } + + if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 && + BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) { + if (pdev->msi_cap) + bp->flags |= BNX2_FLAG_MSI_CAP; + } + + /* 5708 cannot support DMA addresses > 40-bit. */ + if (BNX2_CHIP(bp) == BNX2_CHIP_5708) + persist_dma_mask = dma_mask = DMA_BIT_MASK(40); + else + persist_dma_mask = dma_mask = DMA_BIT_MASK(64); + + /* Configure DMA attributes. */ + if (pci_set_dma_mask(pdev, dma_mask) == 0) { + dev->features |= NETIF_F_HIGHDMA; + rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask); + if (rc) { + dev_err(&pdev->dev, + "pci_set_consistent_dma_mask failed, aborting\n"); + goto err_out_unmap; + } + } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { + dev_err(&pdev->dev, "System does not support DMA, aborting\n"); + goto err_out_unmap; + } + + if (!(bp->flags & BNX2_FLAG_PCIE)) + bnx2_get_pci_speed(bp); + + /* 5706A0 may falsely detect SERR and PERR. */ + if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { + reg = BNX2_RD(bp, PCI_COMMAND); + reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY); + BNX2_WR(bp, PCI_COMMAND, reg); + } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) && + !(bp->flags & BNX2_FLAG_PCIX)) { + + dev_err(&pdev->dev, + "5706 A1 can only be used in a PCIX bus, aborting\n"); + goto err_out_unmap; + } + + bnx2_init_nvram(bp); + + reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE); + + if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID) + bp->func = 1; + + if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) == + BNX2_SHM_HDR_SIGNATURE_SIG) { + u32 off = bp->func << 2; + + bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off); + } else + bp->shmem_base = HOST_VIEW_SHMEM_BASE; + + /* Get the permanent MAC address. First we need to make sure the + * firmware is actually running. + */ + reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE); + + if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) != + BNX2_DEV_INFO_SIGNATURE_MAGIC) { + dev_err(&pdev->dev, "Firmware not running, aborting\n"); + rc = -ENODEV; + goto err_out_unmap; + } + + bnx2_read_vpd_fw_ver(bp); + + j = strlen(bp->fw_version); + reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV); + for (i = 0; i < 3 && j < 24; i++) { + u8 num, k, skip0; + + if (i == 0) { + bp->fw_version[j++] = 'b'; + bp->fw_version[j++] = 'c'; + bp->fw_version[j++] = ' '; + } + num = (u8) (reg >> (24 - (i * 8))); + for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) { + if (num >= k || !skip0 || k == 1) { + bp->fw_version[j++] = (num / k) + '0'; + skip0 = 0; + } + } + if (i != 2) + bp->fw_version[j++] = '.'; + } + reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE); + if (reg & BNX2_PORT_FEATURE_WOL_ENABLED) + bp->wol = 1; + + if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) { + bp->flags |= BNX2_FLAG_ASF_ENABLE; + + for (i = 0; i < 30; i++) { + reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); + if (reg & BNX2_CONDITION_MFW_RUN_MASK) + break; + msleep(10); + } + } + reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); + reg &= BNX2_CONDITION_MFW_RUN_MASK; + if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN && + reg != BNX2_CONDITION_MFW_RUN_NONE) { + u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR); + + if (j < 32) + bp->fw_version[j++] = ' '; + for (i = 0; i < 3 && j < 28; i++) { + reg = bnx2_reg_rd_ind(bp, addr + i * 4); + reg = be32_to_cpu(reg); + memcpy(&bp->fw_version[j], ®, 4); + j += 4; + } + } + + reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER); + bp->mac_addr[0] = (u8) (reg >> 8); + bp->mac_addr[1] = (u8) reg; + + reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER); + bp->mac_addr[2] = (u8) (reg >> 24); + bp->mac_addr[3] = (u8) (reg >> 16); + bp->mac_addr[4] = (u8) (reg >> 8); + bp->mac_addr[5] = (u8) reg; + + bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT; + bnx2_set_rx_ring_size(bp, 255); + + bp->tx_quick_cons_trip_int = 2; + bp->tx_quick_cons_trip = 20; + bp->tx_ticks_int = 18; + bp->tx_ticks = 80; + + bp->rx_quick_cons_trip_int = 2; + bp->rx_quick_cons_trip = 12; + bp->rx_ticks_int = 18; + bp->rx_ticks = 18; + + bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS; + + bp->current_interval = BNX2_TIMER_INTERVAL; + + bp->phy_addr = 1; + + /* Disable WOL support if we are running on a SERDES chip. */ + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) + bnx2_get_5709_media(bp); + else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT) + bp->phy_flags |= BNX2_PHY_FLAG_SERDES; + + bp->phy_port = PORT_TP; + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { + bp->phy_port = PORT_FIBRE; + reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG); + if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) { + bp->flags |= BNX2_FLAG_NO_WOL; + bp->wol = 0; + } + if (BNX2_CHIP(bp) == BNX2_CHIP_5706) { + /* Don't do parallel detect on this board because of + * some board problems. The link will not go down + * if we do parallel detect. + */ + if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP && + pdev->subsystem_device == 0x310c) + bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL; + } else { + bp->phy_addr = 2; + if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G) + bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE; + } + } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 || + BNX2_CHIP(bp) == BNX2_CHIP_5708) + bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX; + else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 && + (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax || + BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx)) + bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC; + + bnx2_init_fw_cap(bp); + + if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) || + (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) || + !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) { + bp->flags |= BNX2_FLAG_NO_WOL; + bp->wol = 0; + } + + if (bp->flags & BNX2_FLAG_NO_WOL) + device_set_wakeup_capable(&bp->pdev->dev, false); + else + device_set_wakeup_enable(&bp->pdev->dev, bp->wol); + + if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { + bp->tx_quick_cons_trip_int = + bp->tx_quick_cons_trip; + bp->tx_ticks_int = bp->tx_ticks; + bp->rx_quick_cons_trip_int = + bp->rx_quick_cons_trip; + bp->rx_ticks_int = bp->rx_ticks; + bp->comp_prod_trip_int = bp->comp_prod_trip; + bp->com_ticks_int = bp->com_ticks; + bp->cmd_ticks_int = bp->cmd_ticks; + } + + /* Disable MSI on 5706 if AMD 8132 bridge is found. + * + * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes + * with byte enables disabled on the unused 32-bit word. This is legal + * but causes problems on the AMD 8132 which will eventually stop + * responding after a while. + * + * AMD believes this incompatibility is unique to the 5706, and + * prefers to locally disable MSI rather than globally disabling it. + */ + if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) { + struct pci_dev *amd_8132 = NULL; + + while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD, + PCI_DEVICE_ID_AMD_8132_BRIDGE, + amd_8132))) { + + if (amd_8132->revision >= 0x10 && + amd_8132->revision <= 0x13) { + disable_msi = 1; + pci_dev_put(amd_8132); + break; + } + } + } + + bnx2_set_default_link(bp); + bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; + + init_timer(&bp->timer); + bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL); + bp->timer.data = (unsigned long) bp; + bp->timer.function = bnx2_timer; + +#ifdef BCM_CNIC + if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN) + bp->cnic_eth_dev.max_iscsi_conn = + (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) & + BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT; + bp->cnic_probe = bnx2_cnic_probe; +#endif + pci_save_state(pdev); + + return 0; + +err_out_unmap: + if (bp->flags & BNX2_FLAG_AER_ENABLED) { + pci_disable_pcie_error_reporting(pdev); + bp->flags &= ~BNX2_FLAG_AER_ENABLED; + } + + pci_iounmap(pdev, bp->regview); + bp->regview = NULL; + +err_out_release: + pci_release_regions(pdev); + +err_out_disable: + pci_disable_device(pdev); + +err_out: + return rc; +} + +static char * +bnx2_bus_string(struct bnx2 *bp, char *str) +{ + char *s = str; + + if (bp->flags & BNX2_FLAG_PCIE) { + s += sprintf(s, "PCI Express"); + } else { + s += sprintf(s, "PCI"); + if (bp->flags & BNX2_FLAG_PCIX) + s += sprintf(s, "-X"); + if (bp->flags & BNX2_FLAG_PCI_32BIT) + s += sprintf(s, " 32-bit"); + else + s += sprintf(s, " 64-bit"); + s += sprintf(s, " %dMHz", bp->bus_speed_mhz); + } + return str; +} + +static void +bnx2_del_napi(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->irq_nvecs; i++) + netif_napi_del(&bp->bnx2_napi[i].napi); +} + +static void +bnx2_init_napi(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->irq_nvecs; i++) { + struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; + int (*poll)(struct napi_struct *, int); + + if (i == 0) + poll = bnx2_poll; + else + poll = bnx2_poll_msix; + + netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64); + bnapi->bp = bp; + } +} + +static const struct net_device_ops bnx2_netdev_ops = { + .ndo_open = bnx2_open, + .ndo_start_xmit = bnx2_start_xmit, + .ndo_stop = bnx2_close, + .ndo_get_stats64 = bnx2_get_stats64, + .ndo_set_rx_mode = bnx2_set_rx_mode, + .ndo_do_ioctl = bnx2_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = bnx2_change_mac_addr, + .ndo_change_mtu = bnx2_change_mtu, + .ndo_set_features = bnx2_set_features, + .ndo_tx_timeout = bnx2_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = poll_bnx2, +#endif +}; + +static int +bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int version_printed = 0; + struct net_device *dev; + struct bnx2 *bp; + int rc; + char str[40]; + + if (version_printed++ == 0) + pr_info("%s", version); + + /* dev zeroed in init_etherdev */ + dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS); + if (!dev) + return -ENOMEM; + + rc = bnx2_init_board(pdev, dev); + if (rc < 0) + goto err_free; + + dev->netdev_ops = &bnx2_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + dev->ethtool_ops = &bnx2_ethtool_ops; + + bp = netdev_priv(dev); + + pci_set_drvdata(pdev, dev); + + memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); + + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_TSO_ECN | + NETIF_F_RXHASH | NETIF_F_RXCSUM; + + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) + dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + + dev->vlan_features = dev->hw_features; + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + dev->features |= dev->hw_features; + dev->priv_flags |= IFF_UNICAST_FLT; + + if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) + dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if ((rc = register_netdev(dev))) { + dev_err(&pdev->dev, "Cannot register net device\n"); + goto error; + } + + netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, " + "node addr %pM\n", board_info[ent->driver_data].name, + ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A', + ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4), + bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0), + pdev->irq, dev->dev_addr); + + return 0; + +error: + pci_iounmap(pdev, bp->regview); + pci_release_regions(pdev); + pci_disable_device(pdev); +err_free: + free_netdev(dev); + return rc; +} + +static void +bnx2_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2 *bp = netdev_priv(dev); + + unregister_netdev(dev); + + del_timer_sync(&bp->timer); + cancel_work_sync(&bp->reset_task); + + pci_iounmap(bp->pdev, bp->regview); + + kfree(bp->temp_stats_blk); + + if (bp->flags & BNX2_FLAG_AER_ENABLED) { + pci_disable_pcie_error_reporting(pdev); + bp->flags &= ~BNX2_FLAG_AER_ENABLED; + } + + bnx2_release_firmware(bp); + + free_netdev(dev); + + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +#ifdef CONFIG_PM_SLEEP +static int +bnx2_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2 *bp = netdev_priv(dev); + + if (netif_running(dev)) { + cancel_work_sync(&bp->reset_task); + bnx2_netif_stop(bp, true); + netif_device_detach(dev); + del_timer_sync(&bp->timer); + bnx2_shutdown_chip(bp); + __bnx2_free_irq(bp); + bnx2_free_skbs(bp); + } + bnx2_setup_wol(bp); + return 0; +} + +static int +bnx2_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2 *bp = netdev_priv(dev); + + if (!netif_running(dev)) + return 0; + + bnx2_set_power_state(bp, PCI_D0); + netif_device_attach(dev); + bnx2_request_irq(bp); + bnx2_init_nic(bp, 1); + bnx2_netif_start(bp, true); + return 0; +} + +static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume); +#define BNX2_PM_OPS (&bnx2_pm_ops) + +#else + +#define BNX2_PM_OPS NULL + +#endif /* CONFIG_PM_SLEEP */ +/** + * bnx2_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2 *bp = netdev_priv(dev); + + rtnl_lock(); + netif_device_detach(dev); + + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (netif_running(dev)) { + bnx2_netif_stop(bp, true); + del_timer_sync(&bp->timer); + bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); + } + + pci_disable_device(pdev); + rtnl_unlock(); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * bnx2_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2 *bp = netdev_priv(dev); + pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; + int err = 0; + + rtnl_lock(); + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset\n"); + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (netif_running(dev)) + err = bnx2_init_nic(bp, 1); + + if (!err) + result = PCI_ERS_RESULT_RECOVERED; + } + + if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) { + bnx2_napi_enable(bp); + dev_close(dev); + } + rtnl_unlock(); + + if (!(bp->flags & BNX2_FLAG_AER_ENABLED)) + return result; + + err = pci_cleanup_aer_uncorrect_error_status(pdev); + if (err) { + dev_err(&pdev->dev, + "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", + err); /* non-fatal, continue */ + } + + return result; +} + +/** + * bnx2_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void bnx2_io_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2 *bp = netdev_priv(dev); + + rtnl_lock(); + if (netif_running(dev)) + bnx2_netif_start(bp, true); + + netif_device_attach(dev); + rtnl_unlock(); +} + +static void bnx2_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2 *bp; + + if (!dev) + return; + + bp = netdev_priv(dev); + if (!bp) + return; + + rtnl_lock(); + if (netif_running(dev)) + dev_close(bp->dev); + + if (system_state == SYSTEM_POWER_OFF) + bnx2_set_power_state(bp, PCI_D3hot); + + rtnl_unlock(); +} + +static const struct pci_error_handlers bnx2_err_handler = { + .error_detected = bnx2_io_error_detected, + .slot_reset = bnx2_io_slot_reset, + .resume = bnx2_io_resume, +}; + +static struct pci_driver bnx2_pci_driver = { + .name = DRV_MODULE_NAME, + .id_table = bnx2_pci_tbl, + .probe = bnx2_init_one, + .remove = bnx2_remove_one, + .driver.pm = BNX2_PM_OPS, + .err_handler = &bnx2_err_handler, + .shutdown = bnx2_shutdown, +}; + +module_pci_driver(bnx2_pci_driver); diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h new file mode 100644 index 000000000..f92f76c44 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2.h @@ -0,0 +1,7465 @@ +/* bnx2.h: QLogic bnx2 network driver. + * + * Copyright (c) 2004-2014 Broadcom Corporation + * Copyright (c) 2014-2015 QLogic Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Michael Chan (mchan@broadcom.com) + */ + + +#ifndef BNX2_H +#define BNX2_H + +/* Hardware data structures and register definitions automatically + * generated from RTL code. Do not modify. + */ + +/* + * tx_bd definition + */ +struct bnx2_tx_bd { + u32 tx_bd_haddr_hi; + u32 tx_bd_haddr_lo; + u32 tx_bd_mss_nbytes; + #define TX_BD_TCP6_OFF2_SHL (14) + u32 tx_bd_vlan_tag_flags; + #define TX_BD_FLAGS_CONN_FAULT (1<<0) + #define TX_BD_FLAGS_TCP6_OFF0_MSK (3<<1) + #define TX_BD_FLAGS_TCP6_OFF0_SHL (1) + #define TX_BD_FLAGS_TCP_UDP_CKSUM (1<<1) + #define TX_BD_FLAGS_IP_CKSUM (1<<2) + #define TX_BD_FLAGS_VLAN_TAG (1<<3) + #define TX_BD_FLAGS_COAL_NOW (1<<4) + #define TX_BD_FLAGS_DONT_GEN_CRC (1<<5) + #define TX_BD_FLAGS_END (1<<6) + #define TX_BD_FLAGS_START (1<<7) + #define TX_BD_FLAGS_SW_OPTION_WORD (0x1f<<8) + #define TX_BD_FLAGS_TCP6_OFF4_SHL (12) + #define TX_BD_FLAGS_SW_FLAGS (1<<13) + #define TX_BD_FLAGS_SW_SNAP (1<<14) + #define TX_BD_FLAGS_SW_LSO (1<<15) + +}; + + +/* + * rx_bd definition + */ +struct bnx2_rx_bd { + u32 rx_bd_haddr_hi; + u32 rx_bd_haddr_lo; + u32 rx_bd_len; + u32 rx_bd_flags; + #define RX_BD_FLAGS_NOPUSH (1<<0) + #define RX_BD_FLAGS_DUMMY (1<<1) + #define RX_BD_FLAGS_END (1<<2) + #define RX_BD_FLAGS_START (1<<3) + +}; + +#define BNX2_RX_ALIGN 16 + +/* + * status_block definition + */ +struct status_block { + u32 status_attn_bits; + #define STATUS_ATTN_BITS_LINK_STATE (1L<<0) + #define STATUS_ATTN_BITS_TX_SCHEDULER_ABORT (1L<<1) + #define STATUS_ATTN_BITS_TX_BD_READ_ABORT (1L<<2) + #define STATUS_ATTN_BITS_TX_BD_CACHE_ABORT (1L<<3) + #define STATUS_ATTN_BITS_TX_PROCESSOR_ABORT (1L<<4) + #define STATUS_ATTN_BITS_TX_DMA_ABORT (1L<<5) + #define STATUS_ATTN_BITS_TX_PATCHUP_ABORT (1L<<6) + #define STATUS_ATTN_BITS_TX_ASSEMBLER_ABORT (1L<<7) + #define STATUS_ATTN_BITS_RX_PARSER_MAC_ABORT (1L<<8) + #define STATUS_ATTN_BITS_RX_PARSER_CATCHUP_ABORT (1L<<9) + #define STATUS_ATTN_BITS_RX_MBUF_ABORT (1L<<10) + #define STATUS_ATTN_BITS_RX_LOOKUP_ABORT (1L<<11) + #define STATUS_ATTN_BITS_RX_PROCESSOR_ABORT (1L<<12) + #define STATUS_ATTN_BITS_RX_V2P_ABORT (1L<<13) + #define STATUS_ATTN_BITS_RX_BD_CACHE_ABORT (1L<<14) + #define STATUS_ATTN_BITS_RX_DMA_ABORT (1L<<15) + #define STATUS_ATTN_BITS_COMPLETION_ABORT (1L<<16) + #define STATUS_ATTN_BITS_HOST_COALESCE_ABORT (1L<<17) + #define STATUS_ATTN_BITS_MAILBOX_QUEUE_ABORT (1L<<18) + #define STATUS_ATTN_BITS_CONTEXT_ABORT (1L<<19) + #define STATUS_ATTN_BITS_CMD_SCHEDULER_ABORT (1L<<20) + #define STATUS_ATTN_BITS_CMD_PROCESSOR_ABORT (1L<<21) + #define STATUS_ATTN_BITS_MGMT_PROCESSOR_ABORT (1L<<22) + #define STATUS_ATTN_BITS_MAC_ABORT (1L<<23) + #define STATUS_ATTN_BITS_TIMER_ABORT (1L<<24) + #define STATUS_ATTN_BITS_DMAE_ABORT (1L<<25) + #define STATUS_ATTN_BITS_FLSH_ABORT (1L<<26) + #define STATUS_ATTN_BITS_GRC_ABORT (1L<<27) + #define STATUS_ATTN_BITS_EPB_ERROR (1L<<30) + #define STATUS_ATTN_BITS_PARITY_ERROR (1L<<31) + + u32 status_attn_bits_ack; +#if defined(__BIG_ENDIAN) + u16 status_tx_quick_consumer_index0; + u16 status_tx_quick_consumer_index1; + u16 status_tx_quick_consumer_index2; + u16 status_tx_quick_consumer_index3; + u16 status_rx_quick_consumer_index0; + u16 status_rx_quick_consumer_index1; + u16 status_rx_quick_consumer_index2; + u16 status_rx_quick_consumer_index3; + u16 status_rx_quick_consumer_index4; + u16 status_rx_quick_consumer_index5; + u16 status_rx_quick_consumer_index6; + u16 status_rx_quick_consumer_index7; + u16 status_rx_quick_consumer_index8; + u16 status_rx_quick_consumer_index9; + u16 status_rx_quick_consumer_index10; + u16 status_rx_quick_consumer_index11; + u16 status_rx_quick_consumer_index12; + u16 status_rx_quick_consumer_index13; + u16 status_rx_quick_consumer_index14; + u16 status_rx_quick_consumer_index15; + u16 status_completion_producer_index; + u16 status_cmd_consumer_index; + u16 status_idx; + u8 status_unused; + u8 status_blk_num; +#elif defined(__LITTLE_ENDIAN) + u16 status_tx_quick_consumer_index1; + u16 status_tx_quick_consumer_index0; + u16 status_tx_quick_consumer_index3; + u16 status_tx_quick_consumer_index2; + u16 status_rx_quick_consumer_index1; + u16 status_rx_quick_consumer_index0; + u16 status_rx_quick_consumer_index3; + u16 status_rx_quick_consumer_index2; + u16 status_rx_quick_consumer_index5; + u16 status_rx_quick_consumer_index4; + u16 status_rx_quick_consumer_index7; + u16 status_rx_quick_consumer_index6; + u16 status_rx_quick_consumer_index9; + u16 status_rx_quick_consumer_index8; + u16 status_rx_quick_consumer_index11; + u16 status_rx_quick_consumer_index10; + u16 status_rx_quick_consumer_index13; + u16 status_rx_quick_consumer_index12; + u16 status_rx_quick_consumer_index15; + u16 status_rx_quick_consumer_index14; + u16 status_cmd_consumer_index; + u16 status_completion_producer_index; + u8 status_blk_num; + u8 status_unused; + u16 status_idx; +#endif +}; + +/* + * status_block definition + */ +struct status_block_msix { +#if defined(__BIG_ENDIAN) + u16 status_tx_quick_consumer_index; + u16 status_rx_quick_consumer_index; + u16 status_completion_producer_index; + u16 status_cmd_consumer_index; + u32 status_unused; + u16 status_idx; + u8 status_unused2; + u8 status_blk_num; +#elif defined(__LITTLE_ENDIAN) + u16 status_rx_quick_consumer_index; + u16 status_tx_quick_consumer_index; + u16 status_cmd_consumer_index; + u16 status_completion_producer_index; + u32 status_unused; + u8 status_blk_num; + u8 status_unused2; + u16 status_idx; +#endif +}; + +#define BNX2_SBLK_MSIX_ALIGN_SIZE 128 + + +/* + * statistics_block definition + */ +struct statistics_block { + u32 stat_IfHCInOctets_hi; + u32 stat_IfHCInOctets_lo; + u32 stat_IfHCInBadOctets_hi; + u32 stat_IfHCInBadOctets_lo; + u32 stat_IfHCOutOctets_hi; + u32 stat_IfHCOutOctets_lo; + u32 stat_IfHCOutBadOctets_hi; + u32 stat_IfHCOutBadOctets_lo; + u32 stat_IfHCInUcastPkts_hi; + u32 stat_IfHCInUcastPkts_lo; + u32 stat_IfHCInMulticastPkts_hi; + u32 stat_IfHCInMulticastPkts_lo; + u32 stat_IfHCInBroadcastPkts_hi; + u32 stat_IfHCInBroadcastPkts_lo; + u32 stat_IfHCOutUcastPkts_hi; + u32 stat_IfHCOutUcastPkts_lo; + u32 stat_IfHCOutMulticastPkts_hi; + u32 stat_IfHCOutMulticastPkts_lo; + u32 stat_IfHCOutBroadcastPkts_hi; + u32 stat_IfHCOutBroadcastPkts_lo; + u32 stat_emac_tx_stat_dot3statsinternalmactransmiterrors; + u32 stat_Dot3StatsCarrierSenseErrors; + u32 stat_Dot3StatsFCSErrors; + u32 stat_Dot3StatsAlignmentErrors; + u32 stat_Dot3StatsSingleCollisionFrames; + u32 stat_Dot3StatsMultipleCollisionFrames; + u32 stat_Dot3StatsDeferredTransmissions; + u32 stat_Dot3StatsExcessiveCollisions; + u32 stat_Dot3StatsLateCollisions; + u32 stat_EtherStatsCollisions; + u32 stat_EtherStatsFragments; + u32 stat_EtherStatsJabbers; + u32 stat_EtherStatsUndersizePkts; + u32 stat_EtherStatsOverrsizePkts; + u32 stat_EtherStatsPktsRx64Octets; + u32 stat_EtherStatsPktsRx65Octetsto127Octets; + u32 stat_EtherStatsPktsRx128Octetsto255Octets; + u32 stat_EtherStatsPktsRx256Octetsto511Octets; + u32 stat_EtherStatsPktsRx512Octetsto1023Octets; + u32 stat_EtherStatsPktsRx1024Octetsto1522Octets; + u32 stat_EtherStatsPktsRx1523Octetsto9022Octets; + u32 stat_EtherStatsPktsTx64Octets; + u32 stat_EtherStatsPktsTx65Octetsto127Octets; + u32 stat_EtherStatsPktsTx128Octetsto255Octets; + u32 stat_EtherStatsPktsTx256Octetsto511Octets; + u32 stat_EtherStatsPktsTx512Octetsto1023Octets; + u32 stat_EtherStatsPktsTx1024Octetsto1522Octets; + u32 stat_EtherStatsPktsTx1523Octetsto9022Octets; + u32 stat_XonPauseFramesReceived; + u32 stat_XoffPauseFramesReceived; + u32 stat_OutXonSent; + u32 stat_OutXoffSent; + u32 stat_FlowControlDone; + u32 stat_MacControlFramesReceived; + u32 stat_XoffStateEntered; + u32 stat_IfInFramesL2FilterDiscards; + u32 stat_IfInRuleCheckerDiscards; + u32 stat_IfInFTQDiscards; + u32 stat_IfInMBUFDiscards; + u32 stat_IfInRuleCheckerP4Hit; + u32 stat_CatchupInRuleCheckerDiscards; + u32 stat_CatchupInFTQDiscards; + u32 stat_CatchupInMBUFDiscards; + u32 stat_CatchupInRuleCheckerP4Hit; + u32 stat_GenStat00; + u32 stat_GenStat01; + u32 stat_GenStat02; + u32 stat_GenStat03; + u32 stat_GenStat04; + u32 stat_GenStat05; + u32 stat_GenStat06; + u32 stat_GenStat07; + u32 stat_GenStat08; + u32 stat_GenStat09; + u32 stat_GenStat10; + u32 stat_GenStat11; + u32 stat_GenStat12; + u32 stat_GenStat13; + u32 stat_GenStat14; + u32 stat_GenStat15; + u32 stat_FwRxDrop; +}; + + +/* + * l2_fhdr definition + */ +struct l2_fhdr { + u32 l2_fhdr_status; + #define L2_FHDR_STATUS_RULE_CLASS (0x7<<0) + #define L2_FHDR_STATUS_RULE_P2 (1<<3) + #define L2_FHDR_STATUS_RULE_P3 (1<<4) + #define L2_FHDR_STATUS_RULE_P4 (1<<5) + #define L2_FHDR_STATUS_L2_VLAN_TAG (1<<6) + #define L2_FHDR_STATUS_L2_LLC_SNAP (1<<7) + #define L2_FHDR_STATUS_RSS_HASH (1<<8) + #define L2_FHDR_STATUS_IP_DATAGRAM (1<<13) + #define L2_FHDR_STATUS_TCP_SEGMENT (1<<14) + #define L2_FHDR_STATUS_UDP_DATAGRAM (1<<15) + + #define L2_FHDR_STATUS_SPLIT (1<<16) + #define L2_FHDR_ERRORS_BAD_CRC (1<<17) + #define L2_FHDR_ERRORS_PHY_DECODE (1<<18) + #define L2_FHDR_ERRORS_ALIGNMENT (1<<19) + #define L2_FHDR_ERRORS_TOO_SHORT (1<<20) + #define L2_FHDR_ERRORS_GIANT_FRAME (1<<21) + #define L2_FHDR_ERRORS_TCP_XSUM (1<<28) + #define L2_FHDR_ERRORS_UDP_XSUM (1<<31) + + #define L2_FHDR_STATUS_USE_RXHASH \ + (L2_FHDR_STATUS_TCP_SEGMENT | L2_FHDR_STATUS_RSS_HASH) + + u32 l2_fhdr_hash; +#if defined(__BIG_ENDIAN) + u16 l2_fhdr_pkt_len; + u16 l2_fhdr_vlan_tag; + u16 l2_fhdr_ip_xsum; + u16 l2_fhdr_tcp_udp_xsum; +#elif defined(__LITTLE_ENDIAN) + u16 l2_fhdr_vlan_tag; + u16 l2_fhdr_pkt_len; + u16 l2_fhdr_tcp_udp_xsum; + u16 l2_fhdr_ip_xsum; +#endif +}; + +#define BNX2_RX_OFFSET (sizeof(struct l2_fhdr) + 2) + +/* + * l2_context definition + */ +#define BNX2_L2CTX_TYPE 0x00000000 +#define BNX2_L2CTX_TYPE_SIZE_L2 ((0xc0/0x20)<<16) +#define BNX2_L2CTX_TYPE_TYPE (0xf<<28) +#define BNX2_L2CTX_TYPE_TYPE_EMPTY (0<<28) +#define BNX2_L2CTX_TYPE_TYPE_L2 (1<<28) + +#define BNX2_L2CTX_TX_HOST_BIDX 0x00000088 +#define BNX2_L2CTX_EST_NBD 0x00000088 +#define BNX2_L2CTX_CMD_TYPE 0x00000088 +#define BNX2_L2CTX_CMD_TYPE_TYPE (0xf<<24) +#define BNX2_L2CTX_CMD_TYPE_TYPE_L2 (0<<24) +#define BNX2_L2CTX_CMD_TYPE_TYPE_TCP (1<<24) + +#define BNX2_L2CTX_TX_HOST_BSEQ 0x00000090 +#define BNX2_L2CTX_TSCH_BSEQ 0x00000094 +#define BNX2_L2CTX_TBDR_BSEQ 0x00000098 +#define BNX2_L2CTX_TBDR_BOFF 0x0000009c +#define BNX2_L2CTX_TBDR_BIDX 0x0000009c +#define BNX2_L2CTX_TBDR_BHADDR_HI 0x000000a0 +#define BNX2_L2CTX_TBDR_BHADDR_LO 0x000000a4 +#define BNX2_L2CTX_TXP_BOFF 0x000000a8 +#define BNX2_L2CTX_TXP_BIDX 0x000000a8 +#define BNX2_L2CTX_TXP_BSEQ 0x000000ac + +#define BNX2_L2CTX_TYPE_XI 0x00000080 +#define BNX2_L2CTX_CMD_TYPE_XI 0x00000240 +#define BNX2_L2CTX_TBDR_BHADDR_HI_XI 0x00000258 +#define BNX2_L2CTX_TBDR_BHADDR_LO_XI 0x0000025c + +/* + * l2_bd_chain_context definition + */ +#define BNX2_L2CTX_BD_PRE_READ 0x00000000 +#define BNX2_L2CTX_CTX_SIZE 0x00000000 +#define BNX2_L2CTX_CTX_TYPE 0x00000000 +#define BNX2_L2CTX_FLOW_CTRL_ENABLE 0x000000ff +#define BNX2_L2CTX_CTX_TYPE_SIZE_L2 ((0x20/20)<<16) +#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE (0xf<<28) +#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_UNDEFINED (0<<28) +#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28) + +#define BNX2_L2CTX_HOST_BDIDX 0x00000004 +#define BNX2_L2CTX_L5_STATUSB_NUM_SHIFT 16 +#define BNX2_L2CTX_L2_STATUSB_NUM_SHIFT 24 +#define BNX2_L2CTX_L5_STATUSB_NUM(sb_id) \ + (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L5_STATUSB_NUM_SHIFT) : 0) +#define BNX2_L2CTX_L2_STATUSB_NUM(sb_id) \ + (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT) : 0) +#define BNX2_L2CTX_HOST_BSEQ 0x00000008 +#define BNX2_L2CTX_NX_BSEQ 0x0000000c +#define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010 +#define BNX2_L2CTX_NX_BDHADDR_LO 0x00000014 +#define BNX2_L2CTX_NX_BDIDX 0x00000018 + +#define BNX2_L2CTX_HOST_PG_BDIDX 0x00000044 +#define BNX2_L2CTX_PG_BUF_SIZE 0x00000048 +#define BNX2_L2CTX_RBDC_KEY 0x0000004c +#define BNX2_L2CTX_RBDC_JUMBO_KEY 0x3ffe +#define BNX2_L2CTX_NX_PG_BDHADDR_HI 0x00000050 +#define BNX2_L2CTX_NX_PG_BDHADDR_LO 0x00000054 + +/* + * pci_config_l definition + * offset: 0000 + */ +#define BNX2_PCICFG_MSI_CONTROL 0x00000058 +#define BNX2_PCICFG_MSI_CONTROL_ENABLE (1L<<16) + +#define BNX2_PCICFG_MISC_CONFIG 0x00000068 +#define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP (1L<<2) +#define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP (1L<<3) +#define BNX2_PCICFG_MISC_CONFIG_RESERVED1 (1L<<4) +#define BNX2_PCICFG_MISC_CONFIG_CLOCK_CTL_ENA (1L<<5) +#define BNX2_PCICFG_MISC_CONFIG_TARGET_GRC_WORD_SWAP (1L<<6) +#define BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA (1L<<7) +#define BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ (1L<<8) +#define BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY (1L<<9) +#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN1_SWAP_EN (1L<<10) +#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN2_SWAP_EN (1L<<11) +#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN3_SWAP_EN (1L<<12) +#define BNX2_PCICFG_MISC_CONFIG_ASIC_METAL_REV (0xffL<<16) +#define BNX2_PCICFG_MISC_CONFIG_ASIC_BASE_REV (0xfL<<24) +#define BNX2_PCICFG_MISC_CONFIG_ASIC_ID (0xfL<<28) + +#define BNX2_PCICFG_MISC_STATUS 0x0000006c +#define BNX2_PCICFG_MISC_STATUS_INTA_VALUE (1L<<0) +#define BNX2_PCICFG_MISC_STATUS_32BIT_DET (1L<<1) +#define BNX2_PCICFG_MISC_STATUS_M66EN (1L<<2) +#define BNX2_PCICFG_MISC_STATUS_PCIX_DET (1L<<3) +#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED (0x3L<<4) +#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_66 (0L<<4) +#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_100 (1L<<4) +#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_133 (2L<<4) +#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_PCI_MODE (3L<<4) +#define BNX2_PCICFG_MISC_STATUS_BAD_MEM_WRITE_BE (1L<<8) + +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS 0x00000070 +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET (0xfL<<0) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ (0L<<0) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ (1L<<0) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ (2L<<0) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ (3L<<0) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ (4L<<0) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ (5L<<0) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ (6L<<0) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ (7L<<0) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW (0xfL<<0) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_DISABLE (1L<<6) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT (1L<<7) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC (0x7L<<8) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_UNDEF (0L<<8) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_12 (1L<<8) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_6 (2L<<8) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_62 (4L<<8) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_MIN_POWER (1L<<11) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED (0xfL<<12) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_100 (0L<<12) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_80 (1L<<12) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_50 (2L<<12) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_40 (4L<<12) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_25 (8L<<12) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_STOP (1L<<16) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_17 (1L<<17) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_18 (1L<<18) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_19 (1L<<19) +#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED (0xfffL<<20) + +#define BNX2_PCICFG_REG_WINDOW_ADDRESS 0x00000078 +#define BNX2_PCICFG_REG_WINDOW_ADDRESS_VAL (0xfffffL<<2) + +#define BNX2_PCICFG_REG_WINDOW 0x00000080 +#define BNX2_PCICFG_INT_ACK_CMD 0x00000084 +#define BNX2_PCICFG_INT_ACK_CMD_INDEX (0xffffL<<0) +#define BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID (1L<<16) +#define BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM (1L<<17) +#define BNX2_PCICFG_INT_ACK_CMD_MASK_INT (1L<<18) +#define BNX2_PCICFG_INT_ACK_CMD_INTERRUPT_NUM (0xfL<<24) +#define BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT 24 + +#define BNX2_PCICFG_STATUS_BIT_SET_CMD 0x00000088 +#define BNX2_PCICFG_STATUS_BIT_CLEAR_CMD 0x0000008c +#define BNX2_PCICFG_MAILBOX_QUEUE_ADDR 0x00000090 +#define BNX2_PCICFG_MAILBOX_QUEUE_DATA 0x00000094 + +#define BNX2_PCICFG_DEVICE_CONTROL 0x000000b4 +#define BNX2_PCICFG_DEVICE_STATUS_NO_PEND ((1L<<5)<<16) + +/* + * pci_reg definition + * offset: 0x400 + */ +#define BNX2_PCI_GRC_WINDOW_ADDR 0x00000400 +#define BNX2_PCI_GRC_WINDOW_ADDR_VALUE (0x1ffL<<13) +#define BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN (1L<<31) + +#define BNX2_PCI_GRC_WINDOW2_BASE 0xc000 +#define BNX2_PCI_GRC_WINDOW3_BASE 0xe000 + +#define BNX2_PCI_CONFIG_1 0x00000404 +#define BNX2_PCI_CONFIG_1_RESERVED0 (0xffL<<0) +#define BNX2_PCI_CONFIG_1_READ_BOUNDARY (0x7L<<8) +#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_OFF (0L<<8) +#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_16 (1L<<8) +#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_32 (2L<<8) +#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_64 (3L<<8) +#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_128 (4L<<8) +#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_256 (5L<<8) +#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_512 (6L<<8) +#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_1024 (7L<<8) +#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY (0x7L<<11) +#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_OFF (0L<<11) +#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_16 (1L<<11) +#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_32 (2L<<11) +#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_64 (3L<<11) +#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_128 (4L<<11) +#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_256 (5L<<11) +#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_512 (6L<<11) +#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_1024 (7L<<11) +#define BNX2_PCI_CONFIG_1_RESERVED1 (0x3ffffL<<14) + +#define BNX2_PCI_CONFIG_2 0x00000408 +#define BNX2_PCI_CONFIG_2_BAR1_SIZE (0xfL<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_256K (3L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_512K (4L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_1M (5L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_2M (6L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_4M (7L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_8M (8L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_16M (9L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_32M (10L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_64M (11L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_128M (12L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0) +#define BNX2_PCI_CONFIG_2_BAR1_64ENA (1L<<4) +#define BNX2_PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5) +#define BNX2_PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6) +#define BNX2_PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_1K (1L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_2K (2L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_4K (3L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_8K (4L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_16K (5L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_32K (6L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_64K (7L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_128K (8L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_256K (9L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_512K (10L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_1M (11L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_2M (12L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_4M (13L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_8M (14L<<8) +#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_16M (15L<<8) +#define BNX2_PCI_CONFIG_2_MAX_SPLIT_LIMIT (0x1fL<<16) +#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT (0x3L<<21) +#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_512 (0L<<21) +#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_1K (1L<<21) +#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_2K (2L<<21) +#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_4K (3L<<21) +#define BNX2_PCI_CONFIG_2_FORCE_32_BIT_MSTR (1L<<23) +#define BNX2_PCI_CONFIG_2_FORCE_32_BIT_TGT (1L<<24) +#define BNX2_PCI_CONFIG_2_KEEP_REQ_ASSERT (1L<<25) +#define BNX2_PCI_CONFIG_2_RESERVED0 (0x3fL<<26) +#define BNX2_PCI_CONFIG_2_BAR_PREFETCH_XI (1L<<16) +#define BNX2_PCI_CONFIG_2_RESERVED0_XI (0x7fffL<<17) + +#define BNX2_PCI_CONFIG_3 0x0000040c +#define BNX2_PCI_CONFIG_3_STICKY_BYTE (0xffL<<0) +#define BNX2_PCI_CONFIG_3_REG_STICKY_BYTE (0xffL<<8) +#define BNX2_PCI_CONFIG_3_FORCE_PME (1L<<24) +#define BNX2_PCI_CONFIG_3_PME_STATUS (1L<<25) +#define BNX2_PCI_CONFIG_3_PME_ENABLE (1L<<26) +#define BNX2_PCI_CONFIG_3_PM_STATE (0x3L<<27) +#define BNX2_PCI_CONFIG_3_VAUX_PRESET (1L<<30) +#define BNX2_PCI_CONFIG_3_PCI_POWER (1L<<31) + +#define BNX2_PCI_PM_DATA_A 0x00000410 +#define BNX2_PCI_PM_DATA_A_PM_DATA_0_PRG (0xffL<<0) +#define BNX2_PCI_PM_DATA_A_PM_DATA_1_PRG (0xffL<<8) +#define BNX2_PCI_PM_DATA_A_PM_DATA_2_PRG (0xffL<<16) +#define BNX2_PCI_PM_DATA_A_PM_DATA_3_PRG (0xffL<<24) + +#define BNX2_PCI_PM_DATA_B 0x00000414 +#define BNX2_PCI_PM_DATA_B_PM_DATA_4_PRG (0xffL<<0) +#define BNX2_PCI_PM_DATA_B_PM_DATA_5_PRG (0xffL<<8) +#define BNX2_PCI_PM_DATA_B_PM_DATA_6_PRG (0xffL<<16) +#define BNX2_PCI_PM_DATA_B_PM_DATA_7_PRG (0xffL<<24) + +#define BNX2_PCI_SWAP_DIAG0 0x00000418 +#define BNX2_PCI_SWAP_DIAG1 0x0000041c +#define BNX2_PCI_EXP_ROM_ADDR 0x00000420 +#define BNX2_PCI_EXP_ROM_ADDR_ADDRESS (0x3fffffL<<2) +#define BNX2_PCI_EXP_ROM_ADDR_REQ (1L<<31) + +#define BNX2_PCI_EXP_ROM_DATA 0x00000424 +#define BNX2_PCI_VPD_INTF 0x00000428 +#define BNX2_PCI_VPD_INTF_INTF_REQ (1L<<0) + +#define BNX2_PCI_VPD_ADDR_FLAG 0x0000042c +#define BNX2_PCI_VPD_ADDR_FLAG_MSK 0x0000ffff +#define BNX2_PCI_VPD_ADDR_FLAG_SL 0L +#define BNX2_PCI_VPD_ADDR_FLAG_ADDRESS (0x1fffL<<2) +#define BNX2_PCI_VPD_ADDR_FLAG_WR (1L<<15) + +#define BNX2_PCI_VPD_DATA 0x00000430 +#define BNX2_PCI_ID_VAL1 0x00000434 +#define BNX2_PCI_ID_VAL1_DEVICE_ID (0xffffL<<0) +#define BNX2_PCI_ID_VAL1_VENDOR_ID (0xffffL<<16) + +#define BNX2_PCI_ID_VAL2 0x00000438 +#define BNX2_PCI_ID_VAL2_SUBSYSTEM_VENDOR_ID (0xffffL<<0) +#define BNX2_PCI_ID_VAL2_SUBSYSTEM_ID (0xffffL<<16) + +#define BNX2_PCI_ID_VAL3 0x0000043c +#define BNX2_PCI_ID_VAL3_CLASS_CODE (0xffffffL<<0) +#define BNX2_PCI_ID_VAL3_REVISION_ID (0xffL<<24) + +#define BNX2_PCI_ID_VAL4 0x00000440 +#define BNX2_PCI_ID_VAL4_CAP_ENA (0xfL<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_0 (0L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_1 (1L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_2 (2L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_3 (3L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_4 (4L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_5 (5L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_6 (6L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_7 (7L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_8 (8L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_9 (9L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_10 (10L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_11 (11L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_12 (12L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_13 (13L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_14 (14L<<0) +#define BNX2_PCI_ID_VAL4_CAP_ENA_15 (15L<<0) +#define BNX2_PCI_ID_VAL4_RESERVED0 (0x3L<<4) +#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG (0x3L<<6) +#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_0 (0L<<6) +#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_1 (1L<<6) +#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_2 (2L<<6) +#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_3 (3L<<6) +#define BNX2_PCI_ID_VAL4_MSI_PV_MASK_CAP (1L<<8) +#define BNX2_PCI_ID_VAL4_MSI_LIMIT (0x7L<<9) +#define BNX2_PCI_ID_VAL4_MULTI_MSG_CAP (0x7L<<12) +#define BNX2_PCI_ID_VAL4_MSI_ENABLE (1L<<15) +#define BNX2_PCI_ID_VAL4_MAX_64_ADVERTIZE (1L<<16) +#define BNX2_PCI_ID_VAL4_MAX_133_ADVERTIZE (1L<<17) +#define BNX2_PCI_ID_VAL4_RESERVED2 (0x7L<<18) +#define BNX2_PCI_ID_VAL4_MAX_CUMULATIVE_SIZE_B21 (0x3L<<21) +#define BNX2_PCI_ID_VAL4_MAX_SPLIT_SIZE_B21 (0x3L<<23) +#define BNX2_PCI_ID_VAL4_MAX_CUMULATIVE_SIZE_B0 (1L<<25) +#define BNX2_PCI_ID_VAL4_MAX_MEM_READ_SIZE_B10 (0x3L<<26) +#define BNX2_PCI_ID_VAL4_MAX_SPLIT_SIZE_B0 (1L<<28) +#define BNX2_PCI_ID_VAL4_RESERVED3 (0x7L<<29) +#define BNX2_PCI_ID_VAL4_RESERVED3_XI (0xffffL<<16) + +#define BNX2_PCI_ID_VAL5 0x00000444 +#define BNX2_PCI_ID_VAL5_D1_SUPPORT (1L<<0) +#define BNX2_PCI_ID_VAL5_D2_SUPPORT (1L<<1) +#define BNX2_PCI_ID_VAL5_PME_IN_D0 (1L<<2) +#define BNX2_PCI_ID_VAL5_PME_IN_D1 (1L<<3) +#define BNX2_PCI_ID_VAL5_PME_IN_D2 (1L<<4) +#define BNX2_PCI_ID_VAL5_PME_IN_D3_HOT (1L<<5) +#define BNX2_PCI_ID_VAL5_RESERVED0_TE (0x3ffffffL<<6) +#define BNX2_PCI_ID_VAL5_PM_VERSION_XI (0x7L<<6) +#define BNX2_PCI_ID_VAL5_NO_SOFT_RESET_XI (1L<<9) +#define BNX2_PCI_ID_VAL5_RESERVED0_XI (0x3fffffL<<10) + +#define BNX2_PCI_PCIX_EXTENDED_STATUS 0x00000448 +#define BNX2_PCI_PCIX_EXTENDED_STATUS_NO_SNOOP (1L<<8) +#define BNX2_PCI_PCIX_EXTENDED_STATUS_LONG_BURST (1L<<9) +#define BNX2_PCI_PCIX_EXTENDED_STATUS_SPLIT_COMP_MSG_CLASS (0xfL<<16) +#define BNX2_PCI_PCIX_EXTENDED_STATUS_SPLIT_COMP_MSG_IDX (0xffL<<24) + +#define BNX2_PCI_ID_VAL6 0x0000044c +#define BNX2_PCI_ID_VAL6_MAX_LAT (0xffL<<0) +#define BNX2_PCI_ID_VAL6_MIN_GNT (0xffL<<8) +#define BNX2_PCI_ID_VAL6_BIST (0xffL<<16) +#define BNX2_PCI_ID_VAL6_RESERVED0 (0xffL<<24) + +#define BNX2_PCI_MSI_DATA 0x00000450 +#define BNX2_PCI_MSI_DATA_MSI_DATA (0xffffL<<0) + +#define BNX2_PCI_MSI_ADDR_H 0x00000454 +#define BNX2_PCI_MSI_ADDR_L 0x00000458 +#define BNX2_PCI_MSI_ADDR_L_VAL (0x3fffffffL<<2) + +#define BNX2_PCI_CFG_ACCESS_CMD 0x0000045c +#define BNX2_PCI_CFG_ACCESS_CMD_ADR (0x3fL<<2) +#define BNX2_PCI_CFG_ACCESS_CMD_RD_REQ (1L<<27) +#define BNX2_PCI_CFG_ACCESS_CMD_WR_REQ (0xfL<<28) + +#define BNX2_PCI_CFG_ACCESS_DATA 0x00000460 +#define BNX2_PCI_MSI_MASK 0x00000464 +#define BNX2_PCI_MSI_MASK_MSI_MASK (0xffffffffL<<0) + +#define BNX2_PCI_MSI_PEND 0x00000468 +#define BNX2_PCI_MSI_PEND_MSI_PEND (0xffffffffL<<0) + +#define BNX2_PCI_PM_DATA_C 0x0000046c +#define BNX2_PCI_PM_DATA_C_PM_DATA_8_PRG (0xffL<<0) +#define BNX2_PCI_PM_DATA_C_RESERVED0 (0xffffffL<<8) + +#define BNX2_PCI_MSIX_CONTROL 0x000004c0 +#define BNX2_PCI_MSIX_CONTROL_MSIX_TBL_SIZ (0x7ffL<<0) +#define BNX2_PCI_MSIX_CONTROL_RESERVED0 (0x1fffffL<<11) + +#define BNX2_PCI_MSIX_TBL_OFF_BIR 0x000004c4 +#define BNX2_PCI_MSIX_TBL_OFF_BIR_MSIX_TBL_BIR (0x7L<<0) +#define BNX2_PCI_MSIX_TBL_OFF_BIR_MSIX_TBL_OFF (0x1fffffffL<<3) + +#define BNX2_PCI_MSIX_PBA_OFF_BIT 0x000004c8 +#define BNX2_PCI_MSIX_PBA_OFF_BIT_MSIX_PBA_BIR (0x7L<<0) +#define BNX2_PCI_MSIX_PBA_OFF_BIT_MSIX_PBA_OFF (0x1fffffffL<<3) + +#define BNX2_PCI_PCIE_CAPABILITY 0x000004d0 +#define BNX2_PCI_PCIE_CAPABILITY_INTERRUPT_MSG_NUM (0x1fL<<0) +#define BNX2_PCI_PCIE_CAPABILITY_COMPLY_PCIE_1_1 (1L<<5) + +#define BNX2_PCI_DEVICE_CAPABILITY 0x000004d4 +#define BNX2_PCI_DEVICE_CAPABILITY_MAX_PL_SIZ_SUPPORTED (0x7L<<0) +#define BNX2_PCI_DEVICE_CAPABILITY_EXTENDED_TAG_SUPPORT (1L<<5) +#define BNX2_PCI_DEVICE_CAPABILITY_L0S_ACCEPTABLE_LATENCY (0x7L<<6) +#define BNX2_PCI_DEVICE_CAPABILITY_L1_ACCEPTABLE_LATENCY (0x7L<<9) +#define BNX2_PCI_DEVICE_CAPABILITY_ROLE_BASED_ERR_RPT (1L<<15) + +#define BNX2_PCI_LINK_CAPABILITY 0x000004dc +#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED (0xfL<<0) +#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED_0001 (1L<<0) +#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED_0010 (1L<<0) +#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_WIDTH (0x1fL<<4) +#define BNX2_PCI_LINK_CAPABILITY_CLK_POWER_MGMT (1L<<9) +#define BNX2_PCI_LINK_CAPABILITY_ASPM_SUPPORT (0x3L<<10) +#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT (0x7L<<12) +#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT_101 (5L<<12) +#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT_110 (6L<<12) +#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT (0x7L<<15) +#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT_001 (1L<<15) +#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT_010 (2L<<15) +#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT (0x7L<<18) +#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT_101 (5L<<18) +#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT_110 (6L<<18) +#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT (0x7L<<21) +#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT_001 (1L<<21) +#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT_010 (2L<<21) +#define BNX2_PCI_LINK_CAPABILITY_PORT_NUM (0xffL<<24) + +#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2 0x000004e4 +#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_CMPL_TO_RANGE_SUPP (0xfL<<0) +#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_CMPL_TO_DISABL_SUPP (1L<<4) +#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_RESERVED (0x7ffffffL<<5) + +#define BNX2_PCI_PCIE_LINK_CAPABILITY_2 0x000004e8 +#define BNX2_PCI_PCIE_LINK_CAPABILITY_2_RESERVED (0xffffffffL<<0) + +#define BNX2_PCI_GRC_WINDOW1_ADDR 0x00000610 +#define BNX2_PCI_GRC_WINDOW1_ADDR_VALUE (0x1ffL<<13) + +#define BNX2_PCI_GRC_WINDOW2_ADDR 0x00000614 +#define BNX2_PCI_GRC_WINDOW2_ADDR_VALUE (0x1ffL<<13) + +#define BNX2_PCI_GRC_WINDOW3_ADDR 0x00000618 +#define BNX2_PCI_GRC_WINDOW3_ADDR_VALUE (0x1ffL<<13) + +#define BNX2_MSIX_TABLE_ADDR 0x318000 +#define BNX2_MSIX_PBA_ADDR 0x31c000 + +/* + * misc_reg definition + * offset: 0x800 + */ +#define BNX2_MISC_COMMAND 0x00000800 +#define BNX2_MISC_COMMAND_ENABLE_ALL (1L<<0) +#define BNX2_MISC_COMMAND_DISABLE_ALL (1L<<1) +#define BNX2_MISC_COMMAND_SW_RESET (1L<<4) +#define BNX2_MISC_COMMAND_POR_RESET (1L<<5) +#define BNX2_MISC_COMMAND_HD_RESET (1L<<6) +#define BNX2_MISC_COMMAND_CMN_SW_RESET (1L<<7) +#define BNX2_MISC_COMMAND_PAR_ERROR (1L<<8) +#define BNX2_MISC_COMMAND_CS16_ERR (1L<<9) +#define BNX2_MISC_COMMAND_CS16_ERR_LOC (0xfL<<12) +#define BNX2_MISC_COMMAND_PAR_ERR_RAM (0x7fL<<16) +#define BNX2_MISC_COMMAND_POWERDOWN_EVENT (1L<<23) +#define BNX2_MISC_COMMAND_SW_SHUTDOWN (1L<<24) +#define BNX2_MISC_COMMAND_SHUTDOWN_EN (1L<<25) +#define BNX2_MISC_COMMAND_DINTEG_ATTN_EN (1L<<26) +#define BNX2_MISC_COMMAND_PCIE_LINK_IN_L23 (1L<<27) +#define BNX2_MISC_COMMAND_PCIE_DIS (1L<<28) + +#define BNX2_MISC_CFG 0x00000804 +#define BNX2_MISC_CFG_GRC_TMOUT (1L<<0) +#define BNX2_MISC_CFG_NVM_WR_EN (0x3L<<1) +#define BNX2_MISC_CFG_NVM_WR_EN_PROTECT (0L<<1) +#define BNX2_MISC_CFG_NVM_WR_EN_PCI (1L<<1) +#define BNX2_MISC_CFG_NVM_WR_EN_ALLOW (2L<<1) +#define BNX2_MISC_CFG_NVM_WR_EN_ALLOW2 (3L<<1) +#define BNX2_MISC_CFG_BIST_EN (1L<<3) +#define BNX2_MISC_CFG_CK25_OUT_ALT_SRC (1L<<4) +#define BNX2_MISC_CFG_RESERVED5_TE (1L<<5) +#define BNX2_MISC_CFG_RESERVED6_TE (1L<<6) +#define BNX2_MISC_CFG_CLK_CTL_OVERRIDE (1L<<7) +#define BNX2_MISC_CFG_LEDMODE (0x7L<<8) +#define BNX2_MISC_CFG_LEDMODE_MAC (0L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY1_TE (1L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY2_TE (2L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY3_TE (3L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY4_TE (4L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY5_TE (5L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY6_TE (6L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY7_TE (7L<<8) +#define BNX2_MISC_CFG_MCP_GRC_TMOUT_TE (1L<<11) +#define BNX2_MISC_CFG_DBU_GRC_TMOUT_TE (1L<<12) +#define BNX2_MISC_CFG_LEDMODE_XI (0xfL<<8) +#define BNX2_MISC_CFG_LEDMODE_MAC_XI (0L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY1_XI (1L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY2_XI (2L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY3_XI (3L<<8) +#define BNX2_MISC_CFG_LEDMODE_MAC2_XI (4L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY4_XI (5L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY5_XI (6L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY6_XI (7L<<8) +#define BNX2_MISC_CFG_LEDMODE_MAC3_XI (8L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY7_XI (9L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY8_XI (10L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY9_XI (11L<<8) +#define BNX2_MISC_CFG_LEDMODE_MAC4_XI (12L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY10_XI (13L<<8) +#define BNX2_MISC_CFG_LEDMODE_PHY11_XI (14L<<8) +#define BNX2_MISC_CFG_LEDMODE_UNUSED_XI (15L<<8) +#define BNX2_MISC_CFG_PORT_SELECT_XI (1L<<13) +#define BNX2_MISC_CFG_PARITY_MODE_XI (1L<<14) + +#define BNX2_MISC_ID 0x00000808 +#define BNX2_MISC_ID_BOND_ID (0xfL<<0) +#define BNX2_MISC_ID_BOND_ID_X (0L<<0) +#define BNX2_MISC_ID_BOND_ID_C (3L<<0) +#define BNX2_MISC_ID_BOND_ID_S (12L<<0) +#define BNX2_MISC_ID_CHIP_METAL (0xffL<<4) +#define BNX2_MISC_ID_CHIP_REV (0xfL<<12) +#define BNX2_MISC_ID_CHIP_NUM (0xffffL<<16) + +#define BNX2_MISC_ENABLE_STATUS_BITS 0x0000080c +#define BNX2_MISC_ENABLE_STATUS_BITS_TX_SCHEDULER_ENABLE (1L<<0) +#define BNX2_MISC_ENABLE_STATUS_BITS_TX_BD_READ_ENABLE (1L<<1) +#define BNX2_MISC_ENABLE_STATUS_BITS_TX_BD_CACHE_ENABLE (1L<<2) +#define BNX2_MISC_ENABLE_STATUS_BITS_TX_PROCESSOR_ENABLE (1L<<3) +#define BNX2_MISC_ENABLE_STATUS_BITS_TX_DMA_ENABLE (1L<<4) +#define BNX2_MISC_ENABLE_STATUS_BITS_TX_PATCHUP_ENABLE (1L<<5) +#define BNX2_MISC_ENABLE_STATUS_BITS_TX_PAYLOAD_Q_ENABLE (1L<<6) +#define BNX2_MISC_ENABLE_STATUS_BITS_TX_HEADER_Q_ENABLE (1L<<7) +#define BNX2_MISC_ENABLE_STATUS_BITS_TX_ASSEMBLER_ENABLE (1L<<8) +#define BNX2_MISC_ENABLE_STATUS_BITS_EMAC_ENABLE (1L<<9) +#define BNX2_MISC_ENABLE_STATUS_BITS_RX_PARSER_MAC_ENABLE (1L<<10) +#define BNX2_MISC_ENABLE_STATUS_BITS_RX_PARSER_CATCHUP_ENABLE (1L<<11) +#define BNX2_MISC_ENABLE_STATUS_BITS_RX_MBUF_ENABLE (1L<<12) +#define BNX2_MISC_ENABLE_STATUS_BITS_RX_LOOKUP_ENABLE (1L<<13) +#define BNX2_MISC_ENABLE_STATUS_BITS_RX_PROCESSOR_ENABLE (1L<<14) +#define BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE (1L<<15) +#define BNX2_MISC_ENABLE_STATUS_BITS_RX_BD_CACHE_ENABLE (1L<<16) +#define BNX2_MISC_ENABLE_STATUS_BITS_RX_DMA_ENABLE (1L<<17) +#define BNX2_MISC_ENABLE_STATUS_BITS_COMPLETION_ENABLE (1L<<18) +#define BNX2_MISC_ENABLE_STATUS_BITS_HOST_COALESCE_ENABLE (1L<<19) +#define BNX2_MISC_ENABLE_STATUS_BITS_MAILBOX_QUEUE_ENABLE (1L<<20) +#define BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE (1L<<21) +#define BNX2_MISC_ENABLE_STATUS_BITS_CMD_SCHEDULER_ENABLE (1L<<22) +#define BNX2_MISC_ENABLE_STATUS_BITS_CMD_PROCESSOR_ENABLE (1L<<23) +#define BNX2_MISC_ENABLE_STATUS_BITS_MGMT_PROCESSOR_ENABLE (1L<<24) +#define BNX2_MISC_ENABLE_STATUS_BITS_TIMER_ENABLE (1L<<25) +#define BNX2_MISC_ENABLE_STATUS_BITS_DMA_ENGINE_ENABLE (1L<<26) +#define BNX2_MISC_ENABLE_STATUS_BITS_UMP_ENABLE (1L<<27) +#define BNX2_MISC_ENABLE_STATUS_BITS_RV2P_CMD_SCHEDULER_ENABLE (1L<<28) +#define BNX2_MISC_ENABLE_STATUS_BITS_RSVD_FUTURE_ENABLE (0x7L<<29) + +#define BNX2_MISC_ENABLE_SET_BITS 0x00000810 +#define BNX2_MISC_ENABLE_SET_BITS_TX_SCHEDULER_ENABLE (1L<<0) +#define BNX2_MISC_ENABLE_SET_BITS_TX_BD_READ_ENABLE (1L<<1) +#define BNX2_MISC_ENABLE_SET_BITS_TX_BD_CACHE_ENABLE (1L<<2) +#define BNX2_MISC_ENABLE_SET_BITS_TX_PROCESSOR_ENABLE (1L<<3) +#define BNX2_MISC_ENABLE_SET_BITS_TX_DMA_ENABLE (1L<<4) +#define BNX2_MISC_ENABLE_SET_BITS_TX_PATCHUP_ENABLE (1L<<5) +#define BNX2_MISC_ENABLE_SET_BITS_TX_PAYLOAD_Q_ENABLE (1L<<6) +#define BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE (1L<<7) +#define BNX2_MISC_ENABLE_SET_BITS_TX_ASSEMBLER_ENABLE (1L<<8) +#define BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE (1L<<9) +#define BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE (1L<<10) +#define BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_CATCHUP_ENABLE (1L<<11) +#define BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE (1L<<12) +#define BNX2_MISC_ENABLE_SET_BITS_RX_LOOKUP_ENABLE (1L<<13) +#define BNX2_MISC_ENABLE_SET_BITS_RX_PROCESSOR_ENABLE (1L<<14) +#define BNX2_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE (1L<<15) +#define BNX2_MISC_ENABLE_SET_BITS_RX_BD_CACHE_ENABLE (1L<<16) +#define BNX2_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE (1L<<17) +#define BNX2_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE (1L<<18) +#define BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE (1L<<19) +#define BNX2_MISC_ENABLE_SET_BITS_MAILBOX_QUEUE_ENABLE (1L<<20) +#define BNX2_MISC_ENABLE_SET_BITS_CONTEXT_ENABLE (1L<<21) +#define BNX2_MISC_ENABLE_SET_BITS_CMD_SCHEDULER_ENABLE (1L<<22) +#define BNX2_MISC_ENABLE_SET_BITS_CMD_PROCESSOR_ENABLE (1L<<23) +#define BNX2_MISC_ENABLE_SET_BITS_MGMT_PROCESSOR_ENABLE (1L<<24) +#define BNX2_MISC_ENABLE_SET_BITS_TIMER_ENABLE (1L<<25) +#define BNX2_MISC_ENABLE_SET_BITS_DMA_ENGINE_ENABLE (1L<<26) +#define BNX2_MISC_ENABLE_SET_BITS_UMP_ENABLE (1L<<27) +#define BNX2_MISC_ENABLE_SET_BITS_RV2P_CMD_SCHEDULER_ENABLE (1L<<28) +#define BNX2_MISC_ENABLE_SET_BITS_RSVD_FUTURE_ENABLE (0x7L<<29) + +#define BNX2_MISC_ENABLE_CLR_BITS 0x00000814 +#define BNX2_MISC_ENABLE_CLR_BITS_TX_SCHEDULER_ENABLE (1L<<0) +#define BNX2_MISC_ENABLE_CLR_BITS_TX_BD_READ_ENABLE (1L<<1) +#define BNX2_MISC_ENABLE_CLR_BITS_TX_BD_CACHE_ENABLE (1L<<2) +#define BNX2_MISC_ENABLE_CLR_BITS_TX_PROCESSOR_ENABLE (1L<<3) +#define BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE (1L<<4) +#define BNX2_MISC_ENABLE_CLR_BITS_TX_PATCHUP_ENABLE (1L<<5) +#define BNX2_MISC_ENABLE_CLR_BITS_TX_PAYLOAD_Q_ENABLE (1L<<6) +#define BNX2_MISC_ENABLE_CLR_BITS_TX_HEADER_Q_ENABLE (1L<<7) +#define BNX2_MISC_ENABLE_CLR_BITS_TX_ASSEMBLER_ENABLE (1L<<8) +#define BNX2_MISC_ENABLE_CLR_BITS_EMAC_ENABLE (1L<<9) +#define BNX2_MISC_ENABLE_CLR_BITS_RX_PARSER_MAC_ENABLE (1L<<10) +#define BNX2_MISC_ENABLE_CLR_BITS_RX_PARSER_CATCHUP_ENABLE (1L<<11) +#define BNX2_MISC_ENABLE_CLR_BITS_RX_MBUF_ENABLE (1L<<12) +#define BNX2_MISC_ENABLE_CLR_BITS_RX_LOOKUP_ENABLE (1L<<13) +#define BNX2_MISC_ENABLE_CLR_BITS_RX_PROCESSOR_ENABLE (1L<<14) +#define BNX2_MISC_ENABLE_CLR_BITS_RX_V2P_ENABLE (1L<<15) +#define BNX2_MISC_ENABLE_CLR_BITS_RX_BD_CACHE_ENABLE (1L<<16) +#define BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE (1L<<17) +#define BNX2_MISC_ENABLE_CLR_BITS_COMPLETION_ENABLE (1L<<18) +#define BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE (1L<<19) +#define BNX2_MISC_ENABLE_CLR_BITS_MAILBOX_QUEUE_ENABLE (1L<<20) +#define BNX2_MISC_ENABLE_CLR_BITS_CONTEXT_ENABLE (1L<<21) +#define BNX2_MISC_ENABLE_CLR_BITS_CMD_SCHEDULER_ENABLE (1L<<22) +#define BNX2_MISC_ENABLE_CLR_BITS_CMD_PROCESSOR_ENABLE (1L<<23) +#define BNX2_MISC_ENABLE_CLR_BITS_MGMT_PROCESSOR_ENABLE (1L<<24) +#define BNX2_MISC_ENABLE_CLR_BITS_TIMER_ENABLE (1L<<25) +#define BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE (1L<<26) +#define BNX2_MISC_ENABLE_CLR_BITS_UMP_ENABLE (1L<<27) +#define BNX2_MISC_ENABLE_CLR_BITS_RV2P_CMD_SCHEDULER_ENABLE (1L<<28) +#define BNX2_MISC_ENABLE_CLR_BITS_RSVD_FUTURE_ENABLE (0x7L<<29) + +#define BNX2_MISC_CLOCK_CONTROL_BITS 0x00000818 +#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET (0xfL<<0) +#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ (0L<<0) +#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ (1L<<0) +#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ (2L<<0) +#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ (3L<<0) +#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ (4L<<0) +#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ (5L<<0) +#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ (6L<<0) +#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ (7L<<0) +#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW (0xfL<<0) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_DISABLE (1L<<6) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT (1L<<7) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC (0x7L<<8) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_UNDEF (0L<<8) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_12 (1L<<8) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_6 (2L<<8) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_62 (4L<<8) +#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED0_XI (0x7L<<8) +#define BNX2_MISC_CLOCK_CONTROL_BITS_MIN_POWER (1L<<11) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED (0xfL<<12) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_100 (0L<<12) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_80 (1L<<12) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_50 (2L<<12) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_40 (4L<<12) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_25 (8L<<12) +#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED1_XI (0xfL<<12) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_STOP (1L<<16) +#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_17_TE (1L<<17) +#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_18_TE (1L<<18) +#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_19_TE (1L<<19) +#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_TE (0xfffL<<20) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_MGMT_XI (1L<<17) +#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED2_XI (0x3fL<<18) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_VCO_XI (0x7L<<24) +#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED3_XI (1L<<27) +#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_XI (0xfL<<28) + +#define BNX2_MISC_SPIO 0x0000081c +#define BNX2_MISC_SPIO_VALUE (0xffL<<0) +#define BNX2_MISC_SPIO_SET (0xffL<<8) +#define BNX2_MISC_SPIO_CLR (0xffL<<16) +#define BNX2_MISC_SPIO_FLOAT (0xffL<<24) + +#define BNX2_MISC_SPIO_INT 0x00000820 +#define BNX2_MISC_SPIO_INT_INT_STATE_TE (0xfL<<0) +#define BNX2_MISC_SPIO_INT_OLD_VALUE_TE (0xfL<<8) +#define BNX2_MISC_SPIO_INT_OLD_SET_TE (0xfL<<16) +#define BNX2_MISC_SPIO_INT_OLD_CLR_TE (0xfL<<24) +#define BNX2_MISC_SPIO_INT_INT_STATE_XI (0xffL<<0) +#define BNX2_MISC_SPIO_INT_OLD_VALUE_XI (0xffL<<8) +#define BNX2_MISC_SPIO_INT_OLD_SET_XI (0xffL<<16) +#define BNX2_MISC_SPIO_INT_OLD_CLR_XI (0xffL<<24) + +#define BNX2_MISC_CONFIG_LFSR 0x00000824 +#define BNX2_MISC_CONFIG_LFSR_DIV (0xffffL<<0) + +#define BNX2_MISC_LFSR_MASK_BITS 0x00000828 +#define BNX2_MISC_LFSR_MASK_BITS_TX_SCHEDULER_ENABLE (1L<<0) +#define BNX2_MISC_LFSR_MASK_BITS_TX_BD_READ_ENABLE (1L<<1) +#define BNX2_MISC_LFSR_MASK_BITS_TX_BD_CACHE_ENABLE (1L<<2) +#define BNX2_MISC_LFSR_MASK_BITS_TX_PROCESSOR_ENABLE (1L<<3) +#define BNX2_MISC_LFSR_MASK_BITS_TX_DMA_ENABLE (1L<<4) +#define BNX2_MISC_LFSR_MASK_BITS_TX_PATCHUP_ENABLE (1L<<5) +#define BNX2_MISC_LFSR_MASK_BITS_TX_PAYLOAD_Q_ENABLE (1L<<6) +#define BNX2_MISC_LFSR_MASK_BITS_TX_HEADER_Q_ENABLE (1L<<7) +#define BNX2_MISC_LFSR_MASK_BITS_TX_ASSEMBLER_ENABLE (1L<<8) +#define BNX2_MISC_LFSR_MASK_BITS_EMAC_ENABLE (1L<<9) +#define BNX2_MISC_LFSR_MASK_BITS_RX_PARSER_MAC_ENABLE (1L<<10) +#define BNX2_MISC_LFSR_MASK_BITS_RX_PARSER_CATCHUP_ENABLE (1L<<11) +#define BNX2_MISC_LFSR_MASK_BITS_RX_MBUF_ENABLE (1L<<12) +#define BNX2_MISC_LFSR_MASK_BITS_RX_LOOKUP_ENABLE (1L<<13) +#define BNX2_MISC_LFSR_MASK_BITS_RX_PROCESSOR_ENABLE (1L<<14) +#define BNX2_MISC_LFSR_MASK_BITS_RX_V2P_ENABLE (1L<<15) +#define BNX2_MISC_LFSR_MASK_BITS_RX_BD_CACHE_ENABLE (1L<<16) +#define BNX2_MISC_LFSR_MASK_BITS_RX_DMA_ENABLE (1L<<17) +#define BNX2_MISC_LFSR_MASK_BITS_COMPLETION_ENABLE (1L<<18) +#define BNX2_MISC_LFSR_MASK_BITS_HOST_COALESCE_ENABLE (1L<<19) +#define BNX2_MISC_LFSR_MASK_BITS_MAILBOX_QUEUE_ENABLE (1L<<20) +#define BNX2_MISC_LFSR_MASK_BITS_CONTEXT_ENABLE (1L<<21) +#define BNX2_MISC_LFSR_MASK_BITS_CMD_SCHEDULER_ENABLE (1L<<22) +#define BNX2_MISC_LFSR_MASK_BITS_CMD_PROCESSOR_ENABLE (1L<<23) +#define BNX2_MISC_LFSR_MASK_BITS_MGMT_PROCESSOR_ENABLE (1L<<24) +#define BNX2_MISC_LFSR_MASK_BITS_TIMER_ENABLE (1L<<25) +#define BNX2_MISC_LFSR_MASK_BITS_DMA_ENGINE_ENABLE (1L<<26) +#define BNX2_MISC_LFSR_MASK_BITS_UMP_ENABLE (1L<<27) +#define BNX2_MISC_LFSR_MASK_BITS_RV2P_CMD_SCHEDULER_ENABLE (1L<<28) +#define BNX2_MISC_LFSR_MASK_BITS_RSVD_FUTURE_ENABLE (0x7L<<29) + +#define BNX2_MISC_ARB_REQ0 0x0000082c +#define BNX2_MISC_ARB_REQ1 0x00000830 +#define BNX2_MISC_ARB_REQ2 0x00000834 +#define BNX2_MISC_ARB_REQ3 0x00000838 +#define BNX2_MISC_ARB_REQ4 0x0000083c +#define BNX2_MISC_ARB_FREE0 0x00000840 +#define BNX2_MISC_ARB_FREE1 0x00000844 +#define BNX2_MISC_ARB_FREE2 0x00000848 +#define BNX2_MISC_ARB_FREE3 0x0000084c +#define BNX2_MISC_ARB_FREE4 0x00000850 +#define BNX2_MISC_ARB_REQ_STATUS0 0x00000854 +#define BNX2_MISC_ARB_REQ_STATUS1 0x00000858 +#define BNX2_MISC_ARB_REQ_STATUS2 0x0000085c +#define BNX2_MISC_ARB_REQ_STATUS3 0x00000860 +#define BNX2_MISC_ARB_REQ_STATUS4 0x00000864 +#define BNX2_MISC_ARB_GNT0 0x00000868 +#define BNX2_MISC_ARB_GNT0_0 (0x7L<<0) +#define BNX2_MISC_ARB_GNT0_1 (0x7L<<4) +#define BNX2_MISC_ARB_GNT0_2 (0x7L<<8) +#define BNX2_MISC_ARB_GNT0_3 (0x7L<<12) +#define BNX2_MISC_ARB_GNT0_4 (0x7L<<16) +#define BNX2_MISC_ARB_GNT0_5 (0x7L<<20) +#define BNX2_MISC_ARB_GNT0_6 (0x7L<<24) +#define BNX2_MISC_ARB_GNT0_7 (0x7L<<28) + +#define BNX2_MISC_ARB_GNT1 0x0000086c +#define BNX2_MISC_ARB_GNT1_8 (0x7L<<0) +#define BNX2_MISC_ARB_GNT1_9 (0x7L<<4) +#define BNX2_MISC_ARB_GNT1_10 (0x7L<<8) +#define BNX2_MISC_ARB_GNT1_11 (0x7L<<12) +#define BNX2_MISC_ARB_GNT1_12 (0x7L<<16) +#define BNX2_MISC_ARB_GNT1_13 (0x7L<<20) +#define BNX2_MISC_ARB_GNT1_14 (0x7L<<24) +#define BNX2_MISC_ARB_GNT1_15 (0x7L<<28) + +#define BNX2_MISC_ARB_GNT2 0x00000870 +#define BNX2_MISC_ARB_GNT2_16 (0x7L<<0) +#define BNX2_MISC_ARB_GNT2_17 (0x7L<<4) +#define BNX2_MISC_ARB_GNT2_18 (0x7L<<8) +#define BNX2_MISC_ARB_GNT2_19 (0x7L<<12) +#define BNX2_MISC_ARB_GNT2_20 (0x7L<<16) +#define BNX2_MISC_ARB_GNT2_21 (0x7L<<20) +#define BNX2_MISC_ARB_GNT2_22 (0x7L<<24) +#define BNX2_MISC_ARB_GNT2_23 (0x7L<<28) + +#define BNX2_MISC_ARB_GNT3 0x00000874 +#define BNX2_MISC_ARB_GNT3_24 (0x7L<<0) +#define BNX2_MISC_ARB_GNT3_25 (0x7L<<4) +#define BNX2_MISC_ARB_GNT3_26 (0x7L<<8) +#define BNX2_MISC_ARB_GNT3_27 (0x7L<<12) +#define BNX2_MISC_ARB_GNT3_28 (0x7L<<16) +#define BNX2_MISC_ARB_GNT3_29 (0x7L<<20) +#define BNX2_MISC_ARB_GNT3_30 (0x7L<<24) +#define BNX2_MISC_ARB_GNT3_31 (0x7L<<28) + +#define BNX2_MISC_RESERVED1 0x00000878 +#define BNX2_MISC_RESERVED1_MISC_RESERVED1_VALUE (0x3fL<<0) + +#define BNX2_MISC_RESERVED2 0x0000087c +#define BNX2_MISC_RESERVED2_PCIE_DIS (1L<<0) +#define BNX2_MISC_RESERVED2_LINK_IN_L23 (1L<<1) + +#define BNX2_MISC_SM_ASF_CONTROL 0x00000880 +#define BNX2_MISC_SM_ASF_CONTROL_ASF_RST (1L<<0) +#define BNX2_MISC_SM_ASF_CONTROL_TSC_EN (1L<<1) +#define BNX2_MISC_SM_ASF_CONTROL_WG_TO (1L<<2) +#define BNX2_MISC_SM_ASF_CONTROL_HB_TO (1L<<3) +#define BNX2_MISC_SM_ASF_CONTROL_PA_TO (1L<<4) +#define BNX2_MISC_SM_ASF_CONTROL_PL_TO (1L<<5) +#define BNX2_MISC_SM_ASF_CONTROL_RT_TO (1L<<6) +#define BNX2_MISC_SM_ASF_CONTROL_SMB_EVENT (1L<<7) +#define BNX2_MISC_SM_ASF_CONTROL_STRETCH_EN (1L<<8) +#define BNX2_MISC_SM_ASF_CONTROL_STRETCH_PULSE (1L<<9) +#define BNX2_MISC_SM_ASF_CONTROL_RES (0x3L<<10) +#define BNX2_MISC_SM_ASF_CONTROL_SMB_EN (1L<<12) +#define BNX2_MISC_SM_ASF_CONTROL_SMB_BB_EN (1L<<13) +#define BNX2_MISC_SM_ASF_CONTROL_SMB_NO_ADDR_FILT (1L<<14) +#define BNX2_MISC_SM_ASF_CONTROL_SMB_AUTOREAD (1L<<15) +#define BNX2_MISC_SM_ASF_CONTROL_NIC_SMB_ADDR1 (0x7fL<<16) +#define BNX2_MISC_SM_ASF_CONTROL_NIC_SMB_ADDR2 (0x7fL<<23) +#define BNX2_MISC_SM_ASF_CONTROL_EN_NIC_SMB_ADDR_0 (1L<<30) +#define BNX2_MISC_SM_ASF_CONTROL_SMB_EARLY_ATTN (1L<<31) + +#define BNX2_MISC_SMB_IN 0x00000884 +#define BNX2_MISC_SMB_IN_DAT_IN (0xffL<<0) +#define BNX2_MISC_SMB_IN_RDY (1L<<8) +#define BNX2_MISC_SMB_IN_DONE (1L<<9) +#define BNX2_MISC_SMB_IN_FIRSTBYTE (1L<<10) +#define BNX2_MISC_SMB_IN_STATUS (0x7L<<11) +#define BNX2_MISC_SMB_IN_STATUS_OK (0x0L<<11) +#define BNX2_MISC_SMB_IN_STATUS_PEC (0x1L<<11) +#define BNX2_MISC_SMB_IN_STATUS_OFLOW (0x2L<<11) +#define BNX2_MISC_SMB_IN_STATUS_STOP (0x3L<<11) +#define BNX2_MISC_SMB_IN_STATUS_TIMEOUT (0x4L<<11) + +#define BNX2_MISC_SMB_OUT 0x00000888 +#define BNX2_MISC_SMB_OUT_DAT_OUT (0xffL<<0) +#define BNX2_MISC_SMB_OUT_RDY (1L<<8) +#define BNX2_MISC_SMB_OUT_START (1L<<9) +#define BNX2_MISC_SMB_OUT_LAST (1L<<10) +#define BNX2_MISC_SMB_OUT_ACC_TYPE (1L<<11) +#define BNX2_MISC_SMB_OUT_ENB_PEC (1L<<12) +#define BNX2_MISC_SMB_OUT_GET_RX_LEN (1L<<13) +#define BNX2_MISC_SMB_OUT_SMB_READ_LEN (0x3fL<<14) +#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS (0xfL<<20) +#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_OK (0L<<20) +#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_FIRST_NACK (1L<<20) +#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_UFLOW (2L<<20) +#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_STOP (3L<<20) +#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_TIMEOUT (4L<<20) +#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_FIRST_LOST (5L<<20) +#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_BADACK (6L<<20) +#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_SUB_NACK (9L<<20) +#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_SUB_LOST (0xdL<<20) +#define BNX2_MISC_SMB_OUT_SMB_OUT_SLAVEMODE (1L<<24) +#define BNX2_MISC_SMB_OUT_SMB_OUT_DAT_EN (1L<<25) +#define BNX2_MISC_SMB_OUT_SMB_OUT_DAT_IN (1L<<26) +#define BNX2_MISC_SMB_OUT_SMB_OUT_CLK_EN (1L<<27) +#define BNX2_MISC_SMB_OUT_SMB_OUT_CLK_IN (1L<<28) + +#define BNX2_MISC_SMB_WATCHDOG 0x0000088c +#define BNX2_MISC_SMB_WATCHDOG_WATCHDOG (0xffffL<<0) + +#define BNX2_MISC_SMB_HEARTBEAT 0x00000890 +#define BNX2_MISC_SMB_HEARTBEAT_HEARTBEAT (0xffffL<<0) + +#define BNX2_MISC_SMB_POLL_ASF 0x00000894 +#define BNX2_MISC_SMB_POLL_ASF_POLL_ASF (0xffffL<<0) + +#define BNX2_MISC_SMB_POLL_LEGACY 0x00000898 +#define BNX2_MISC_SMB_POLL_LEGACY_POLL_LEGACY (0xffffL<<0) + +#define BNX2_MISC_SMB_RETRAN 0x0000089c +#define BNX2_MISC_SMB_RETRAN_RETRAN (0xffL<<0) + +#define BNX2_MISC_SMB_TIMESTAMP 0x000008a0 +#define BNX2_MISC_SMB_TIMESTAMP_TIMESTAMP (0xffffffffL<<0) + +#define BNX2_MISC_PERR_ENA0 0x000008a4 +#define BNX2_MISC_PERR_ENA0_COM_MISC_CTXC (1L<<0) +#define BNX2_MISC_PERR_ENA0_COM_MISC_REGF (1L<<1) +#define BNX2_MISC_PERR_ENA0_COM_MISC_SCPAD (1L<<2) +#define BNX2_MISC_PERR_ENA0_CP_MISC_CTXC (1L<<3) +#define BNX2_MISC_PERR_ENA0_CP_MISC_REGF (1L<<4) +#define BNX2_MISC_PERR_ENA0_CP_MISC_SCPAD (1L<<5) +#define BNX2_MISC_PERR_ENA0_CS_MISC_TMEM (1L<<6) +#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM0 (1L<<7) +#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM1 (1L<<8) +#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM2 (1L<<9) +#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM3 (1L<<10) +#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM4 (1L<<11) +#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM5 (1L<<12) +#define BNX2_MISC_PERR_ENA0_CTX_MISC_PGTBL (1L<<13) +#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR0 (1L<<14) +#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR1 (1L<<15) +#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR2 (1L<<16) +#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR3 (1L<<17) +#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR4 (1L<<18) +#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DW0 (1L<<19) +#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DW1 (1L<<20) +#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DW2 (1L<<21) +#define BNX2_MISC_PERR_ENA0_HC_MISC_DMA (1L<<22) +#define BNX2_MISC_PERR_ENA0_MCP_MISC_REGF (1L<<23) +#define BNX2_MISC_PERR_ENA0_MCP_MISC_SCPAD (1L<<24) +#define BNX2_MISC_PERR_ENA0_MQ_MISC_CTX (1L<<25) +#define BNX2_MISC_PERR_ENA0_RBDC_MISC (1L<<26) +#define BNX2_MISC_PERR_ENA0_RBUF_MISC_MB (1L<<27) +#define BNX2_MISC_PERR_ENA0_RBUF_MISC_PTR (1L<<28) +#define BNX2_MISC_PERR_ENA0_RDE_MISC_RPC (1L<<29) +#define BNX2_MISC_PERR_ENA0_RDE_MISC_RPM (1L<<30) +#define BNX2_MISC_PERR_ENA0_RV2P_MISC_CB0REGS (1L<<31) +#define BNX2_MISC_PERR_ENA0_COM_DMAE_PERR_EN_XI (1L<<0) +#define BNX2_MISC_PERR_ENA0_CP_DMAE_PERR_EN_XI (1L<<1) +#define BNX2_MISC_PERR_ENA0_RPM_ACPIBEMEM_PERR_EN_XI (1L<<2) +#define BNX2_MISC_PERR_ENA0_CTX_USAGE_CNT_PERR_EN_XI (1L<<3) +#define BNX2_MISC_PERR_ENA0_CTX_PGTBL_PERR_EN_XI (1L<<4) +#define BNX2_MISC_PERR_ENA0_CTX_CACHE_PERR_EN_XI (1L<<5) +#define BNX2_MISC_PERR_ENA0_CTX_MIRROR_PERR_EN_XI (1L<<6) +#define BNX2_MISC_PERR_ENA0_COM_CTXC_PERR_EN_XI (1L<<7) +#define BNX2_MISC_PERR_ENA0_COM_SCPAD_PERR_EN_XI (1L<<8) +#define BNX2_MISC_PERR_ENA0_CP_CTXC_PERR_EN_XI (1L<<9) +#define BNX2_MISC_PERR_ENA0_CP_SCPAD_PERR_EN_XI (1L<<10) +#define BNX2_MISC_PERR_ENA0_RXP_RBUFC_PERR_EN_XI (1L<<11) +#define BNX2_MISC_PERR_ENA0_RXP_CTXC_PERR_EN_XI (1L<<12) +#define BNX2_MISC_PERR_ENA0_RXP_SCPAD_PERR_EN_XI (1L<<13) +#define BNX2_MISC_PERR_ENA0_TPAT_SCPAD_PERR_EN_XI (1L<<14) +#define BNX2_MISC_PERR_ENA0_TXP_CTXC_PERR_EN_XI (1L<<15) +#define BNX2_MISC_PERR_ENA0_TXP_SCPAD_PERR_EN_XI (1L<<16) +#define BNX2_MISC_PERR_ENA0_CS_TMEM_PERR_EN_XI (1L<<17) +#define BNX2_MISC_PERR_ENA0_MQ_CTX_PERR_EN_XI (1L<<18) +#define BNX2_MISC_PERR_ENA0_RPM_DFIFOMEM_PERR_EN_XI (1L<<19) +#define BNX2_MISC_PERR_ENA0_RPC_DFIFOMEM_PERR_EN_XI (1L<<20) +#define BNX2_MISC_PERR_ENA0_RBUF_PTRMEM_PERR_EN_XI (1L<<21) +#define BNX2_MISC_PERR_ENA0_RBUF_DATAMEM_PERR_EN_XI (1L<<22) +#define BNX2_MISC_PERR_ENA0_RV2P_P2IRAM_PERR_EN_XI (1L<<23) +#define BNX2_MISC_PERR_ENA0_RV2P_P1IRAM_PERR_EN_XI (1L<<24) +#define BNX2_MISC_PERR_ENA0_RV2P_CB1REGS_PERR_EN_XI (1L<<25) +#define BNX2_MISC_PERR_ENA0_RV2P_CB0REGS_PERR_EN_XI (1L<<26) +#define BNX2_MISC_PERR_ENA0_TPBUF_PERR_EN_XI (1L<<27) +#define BNX2_MISC_PERR_ENA0_THBUF_PERR_EN_XI (1L<<28) +#define BNX2_MISC_PERR_ENA0_TDMA_PERR_EN_XI (1L<<29) +#define BNX2_MISC_PERR_ENA0_TBDC_PERR_EN_XI (1L<<30) +#define BNX2_MISC_PERR_ENA0_TSCH_LR_PERR_EN_XI (1L<<31) + +#define BNX2_MISC_PERR_ENA1 0x000008a8 +#define BNX2_MISC_PERR_ENA1_RV2P_MISC_CB1REGS (1L<<0) +#define BNX2_MISC_PERR_ENA1_RV2P_MISC_P1IRAM (1L<<1) +#define BNX2_MISC_PERR_ENA1_RV2P_MISC_P2IRAM (1L<<2) +#define BNX2_MISC_PERR_ENA1_RXP_MISC_CTXC (1L<<3) +#define BNX2_MISC_PERR_ENA1_RXP_MISC_REGF (1L<<4) +#define BNX2_MISC_PERR_ENA1_RXP_MISC_SCPAD (1L<<5) +#define BNX2_MISC_PERR_ENA1_RXP_MISC_RBUFC (1L<<6) +#define BNX2_MISC_PERR_ENA1_TBDC_MISC (1L<<7) +#define BNX2_MISC_PERR_ENA1_TDMA_MISC (1L<<8) +#define BNX2_MISC_PERR_ENA1_THBUF_MISC_MB0 (1L<<9) +#define BNX2_MISC_PERR_ENA1_THBUF_MISC_MB1 (1L<<10) +#define BNX2_MISC_PERR_ENA1_TPAT_MISC_REGF (1L<<11) +#define BNX2_MISC_PERR_ENA1_TPAT_MISC_SCPAD (1L<<12) +#define BNX2_MISC_PERR_ENA1_TPBUF_MISC_MB (1L<<13) +#define BNX2_MISC_PERR_ENA1_TSCH_MISC_LR (1L<<14) +#define BNX2_MISC_PERR_ENA1_TXP_MISC_CTXC (1L<<15) +#define BNX2_MISC_PERR_ENA1_TXP_MISC_REGF (1L<<16) +#define BNX2_MISC_PERR_ENA1_TXP_MISC_SCPAD (1L<<17) +#define BNX2_MISC_PERR_ENA1_UMP_MISC_FIORX (1L<<18) +#define BNX2_MISC_PERR_ENA1_UMP_MISC_FIOTX (1L<<19) +#define BNX2_MISC_PERR_ENA1_UMP_MISC_RX (1L<<20) +#define BNX2_MISC_PERR_ENA1_UMP_MISC_TX (1L<<21) +#define BNX2_MISC_PERR_ENA1_RDMAQ_MISC (1L<<22) +#define BNX2_MISC_PERR_ENA1_CSQ_MISC (1L<<23) +#define BNX2_MISC_PERR_ENA1_CPQ_MISC (1L<<24) +#define BNX2_MISC_PERR_ENA1_MCPQ_MISC (1L<<25) +#define BNX2_MISC_PERR_ENA1_RV2PMQ_MISC (1L<<26) +#define BNX2_MISC_PERR_ENA1_RV2PPQ_MISC (1L<<27) +#define BNX2_MISC_PERR_ENA1_RV2PTQ_MISC (1L<<28) +#define BNX2_MISC_PERR_ENA1_RXPQ_MISC (1L<<29) +#define BNX2_MISC_PERR_ENA1_RXPCQ_MISC (1L<<30) +#define BNX2_MISC_PERR_ENA1_RLUPQ_MISC (1L<<31) +#define BNX2_MISC_PERR_ENA1_RBDC_PERR_EN_XI (1L<<0) +#define BNX2_MISC_PERR_ENA1_RDMA_DFIFO_PERR_EN_XI (1L<<2) +#define BNX2_MISC_PERR_ENA1_HC_STATS_PERR_EN_XI (1L<<3) +#define BNX2_MISC_PERR_ENA1_HC_MSIX_PERR_EN_XI (1L<<4) +#define BNX2_MISC_PERR_ENA1_HC_PRODUCSTB_PERR_EN_XI (1L<<5) +#define BNX2_MISC_PERR_ENA1_HC_CONSUMSTB_PERR_EN_XI (1L<<6) +#define BNX2_MISC_PERR_ENA1_TPATQ_PERR_EN_XI (1L<<7) +#define BNX2_MISC_PERR_ENA1_MCPQ_PERR_EN_XI (1L<<8) +#define BNX2_MISC_PERR_ENA1_TDMAQ_PERR_EN_XI (1L<<9) +#define BNX2_MISC_PERR_ENA1_TXPQ_PERR_EN_XI (1L<<10) +#define BNX2_MISC_PERR_ENA1_COMTQ_PERR_EN_XI (1L<<11) +#define BNX2_MISC_PERR_ENA1_COMQ_PERR_EN_XI (1L<<12) +#define BNX2_MISC_PERR_ENA1_RLUPQ_PERR_EN_XI (1L<<13) +#define BNX2_MISC_PERR_ENA1_RXPQ_PERR_EN_XI (1L<<14) +#define BNX2_MISC_PERR_ENA1_RV2PPQ_PERR_EN_XI (1L<<15) +#define BNX2_MISC_PERR_ENA1_RDMAQ_PERR_EN_XI (1L<<16) +#define BNX2_MISC_PERR_ENA1_TASQ_PERR_EN_XI (1L<<17) +#define BNX2_MISC_PERR_ENA1_TBDRQ_PERR_EN_XI (1L<<18) +#define BNX2_MISC_PERR_ENA1_TSCHQ_PERR_EN_XI (1L<<19) +#define BNX2_MISC_PERR_ENA1_COMXQ_PERR_EN_XI (1L<<20) +#define BNX2_MISC_PERR_ENA1_RXPCQ_PERR_EN_XI (1L<<21) +#define BNX2_MISC_PERR_ENA1_RV2PTQ_PERR_EN_XI (1L<<22) +#define BNX2_MISC_PERR_ENA1_RV2PMQ_PERR_EN_XI (1L<<23) +#define BNX2_MISC_PERR_ENA1_CPQ_PERR_EN_XI (1L<<24) +#define BNX2_MISC_PERR_ENA1_CSQ_PERR_EN_XI (1L<<25) +#define BNX2_MISC_PERR_ENA1_RLUP_CID_PERR_EN_XI (1L<<26) +#define BNX2_MISC_PERR_ENA1_RV2PCS_TMEM_PERR_EN_XI (1L<<27) +#define BNX2_MISC_PERR_ENA1_RV2PCSQ_PERR_EN_XI (1L<<28) +#define BNX2_MISC_PERR_ENA1_MQ_IDX_PERR_EN_XI (1L<<29) + +#define BNX2_MISC_PERR_ENA2 0x000008ac +#define BNX2_MISC_PERR_ENA2_COMQ_MISC (1L<<0) +#define BNX2_MISC_PERR_ENA2_COMXQ_MISC (1L<<1) +#define BNX2_MISC_PERR_ENA2_COMTQ_MISC (1L<<2) +#define BNX2_MISC_PERR_ENA2_TSCHQ_MISC (1L<<3) +#define BNX2_MISC_PERR_ENA2_TBDRQ_MISC (1L<<4) +#define BNX2_MISC_PERR_ENA2_TXPQ_MISC (1L<<5) +#define BNX2_MISC_PERR_ENA2_TDMAQ_MISC (1L<<6) +#define BNX2_MISC_PERR_ENA2_TPATQ_MISC (1L<<7) +#define BNX2_MISC_PERR_ENA2_TASQ_MISC (1L<<8) +#define BNX2_MISC_PERR_ENA2_TGT_FIFO_PERR_EN_XI (1L<<0) +#define BNX2_MISC_PERR_ENA2_UMP_TX_PERR_EN_XI (1L<<1) +#define BNX2_MISC_PERR_ENA2_UMP_RX_PERR_EN_XI (1L<<2) +#define BNX2_MISC_PERR_ENA2_MCP_ROM_PERR_EN_XI (1L<<3) +#define BNX2_MISC_PERR_ENA2_MCP_SCPAD_PERR_EN_XI (1L<<4) +#define BNX2_MISC_PERR_ENA2_HB_MEM_PERR_EN_XI (1L<<5) +#define BNX2_MISC_PERR_ENA2_PCIE_REPLAY_PERR_EN_XI (1L<<6) + +#define BNX2_MISC_DEBUG_VECTOR_SEL 0x000008b0 +#define BNX2_MISC_DEBUG_VECTOR_SEL_0 (0xfffL<<0) +#define BNX2_MISC_DEBUG_VECTOR_SEL_1 (0xfffL<<12) +#define BNX2_MISC_DEBUG_VECTOR_SEL_1_XI (0xfffL<<15) + +#define BNX2_MISC_VREG_CONTROL 0x000008b4 +#define BNX2_MISC_VREG_CONTROL_1_2 (0xfL<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_XI (0xfL<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS14_XI (0L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS12_XI (1L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS10_XI (2L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS8_XI (3L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS6_XI (4L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS4_XI (5L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS2_XI (6L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_NOM_XI (7L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS2_XI (8L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS4_XI (9L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS6_XI (10L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS8_XI (11L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS10_XI (12L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS12_XI (13L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS14_XI (14L<<0) +#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS16_XI (15L<<0) +#define BNX2_MISC_VREG_CONTROL_2_5 (0xfL<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_PLUS14 (0L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_PLUS12 (1L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_PLUS10 (2L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_PLUS8 (3L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_PLUS6 (4L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_PLUS4 (5L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_PLUS2 (6L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_NOM (7L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_MINUS2 (8L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_MINUS4 (9L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_MINUS6 (10L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_MINUS8 (11L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_MINUS10 (12L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_MINUS12 (13L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_MINUS14 (14L<<4) +#define BNX2_MISC_VREG_CONTROL_2_5_MINUS16 (15L<<4) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT (0xfL<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS14 (0L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS12 (1L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS10 (2L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS8 (3L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS6 (4L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS4 (5L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS2 (6L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_NOM (7L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS2 (8L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS4 (9L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS6 (10L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS8 (11L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS10 (12L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS12 (13L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS14 (14L<<8) +#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS16 (15L<<8) + +#define BNX2_MISC_FINAL_CLK_CTL_VAL 0x000008b8 +#define BNX2_MISC_FINAL_CLK_CTL_VAL_MISC_FINAL_CLK_CTL_VAL (0x3ffffffL<<6) + +#define BNX2_MISC_GP_HW_CTL0 0x000008bc +#define BNX2_MISC_GP_HW_CTL0_TX_DRIVE (1L<<0) +#define BNX2_MISC_GP_HW_CTL0_RMII_MODE (1L<<1) +#define BNX2_MISC_GP_HW_CTL0_RMII_CRSDV_SEL (1L<<2) +#define BNX2_MISC_GP_HW_CTL0_RVMII_MODE (1L<<3) +#define BNX2_MISC_GP_HW_CTL0_FLASH_SAMP_SCLK_NEGEDGE_TE (1L<<4) +#define BNX2_MISC_GP_HW_CTL0_HIDDEN_REVISION_ID_TE (1L<<5) +#define BNX2_MISC_GP_HW_CTL0_HC_CNTL_TMOUT_CTR_RST_TE (1L<<6) +#define BNX2_MISC_GP_HW_CTL0_RESERVED1_XI (0x7L<<4) +#define BNX2_MISC_GP_HW_CTL0_ENA_CORE_RST_ON_MAIN_PWR_GOING_AWAY (1L<<7) +#define BNX2_MISC_GP_HW_CTL0_ENA_SEL_VAUX_B_IN_L2_TE (1L<<8) +#define BNX2_MISC_GP_HW_CTL0_GRC_BNK_FREE_FIX_TE (1L<<9) +#define BNX2_MISC_GP_HW_CTL0_LED_ACT_SEL_TE (1L<<10) +#define BNX2_MISC_GP_HW_CTL0_RESERVED2_XI (0x7L<<8) +#define BNX2_MISC_GP_HW_CTL0_UP1_DEF0 (1L<<11) +#define BNX2_MISC_GP_HW_CTL0_FIBER_MODE_DIS_DEF (1L<<12) +#define BNX2_MISC_GP_HW_CTL0_FORCE2500_DEF (1L<<13) +#define BNX2_MISC_GP_HW_CTL0_AUTODETECT_DIS_DEF (1L<<14) +#define BNX2_MISC_GP_HW_CTL0_PARALLEL_DETECT_DEF (1L<<15) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI (0xfL<<16) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_3MA (0L<<16) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_2P5MA (1L<<16) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_2P0MA (3L<<16) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_1P5MA (5L<<16) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_1P0MA (7L<<16) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_PWRDN (15L<<16) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PRE2DIS (1L<<20) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PRE1DIS (1L<<21) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT (0x3L<<22) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_M6P (0L<<22) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_M0P (1L<<22) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_P0P (2L<<22) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_P6P (3L<<22) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT (0x3L<<24) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_M6P (0L<<24) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_M0P (1L<<24) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_P0P (2L<<24) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_P6P (3L<<24) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ (0x3L<<26) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_240UA (0L<<26) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_160UA (1L<<26) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_400UA (2L<<26) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_320UA (3L<<26) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ (0x3L<<28) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_240UA (0L<<28) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_160UA (1L<<28) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_400UA (2L<<28) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_320UA (3L<<28) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ (0x3L<<30) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P57 (0L<<30) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P45 (1L<<30) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P62 (2L<<30) +#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P66 (3L<<30) + +#define BNX2_MISC_GP_HW_CTL1 0x000008c0 +#define BNX2_MISC_GP_HW_CTL1_1_ATTN_BTN_PRSNT_TE (1L<<0) +#define BNX2_MISC_GP_HW_CTL1_1_ATTN_IND_PRSNT_TE (1L<<1) +#define BNX2_MISC_GP_HW_CTL1_1_PWR_IND_PRSNT_TE (1L<<2) +#define BNX2_MISC_GP_HW_CTL1_0_PCIE_LOOPBACK_TE (1L<<3) +#define BNX2_MISC_GP_HW_CTL1_RESERVED_SOFT_XI (0xffffL<<0) +#define BNX2_MISC_GP_HW_CTL1_RESERVED_HARD_XI (0xffffL<<16) + +#define BNX2_MISC_NEW_HW_CTL 0x000008c4 +#define BNX2_MISC_NEW_HW_CTL_MAIN_POR_BYPASS (1L<<0) +#define BNX2_MISC_NEW_HW_CTL_RINGOSC_ENABLE (1L<<1) +#define BNX2_MISC_NEW_HW_CTL_RINGOSC_SEL0 (1L<<2) +#define BNX2_MISC_NEW_HW_CTL_RINGOSC_SEL1 (1L<<3) +#define BNX2_MISC_NEW_HW_CTL_RESERVED_SHARED (0xfffL<<4) +#define BNX2_MISC_NEW_HW_CTL_RESERVED_SPLIT (0xffffL<<16) + +#define BNX2_MISC_NEW_CORE_CTL 0x000008c8 +#define BNX2_MISC_NEW_CORE_CTL_LINK_HOLDOFF_SUCCESS (1L<<0) +#define BNX2_MISC_NEW_CORE_CTL_LINK_HOLDOFF_REQ (1L<<1) +#define BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE (1L<<16) +#define BNX2_MISC_NEW_CORE_CTL_RESERVED_CMN (0x3fffL<<2) +#define BNX2_MISC_NEW_CORE_CTL_RESERVED_TC (0xffffL<<16) + +#define BNX2_MISC_ECO_HW_CTL 0x000008cc +#define BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN (1L<<0) +#define BNX2_MISC_ECO_HW_CTL_RESERVED_SOFT (0x7fffL<<1) +#define BNX2_MISC_ECO_HW_CTL_RESERVED_HARD (0xffffL<<16) + +#define BNX2_MISC_ECO_CORE_CTL 0x000008d0 +#define BNX2_MISC_ECO_CORE_CTL_RESERVED_SOFT (0xffffL<<0) +#define BNX2_MISC_ECO_CORE_CTL_RESERVED_HARD (0xffffL<<16) + +#define BNX2_MISC_PPIO 0x000008d4 +#define BNX2_MISC_PPIO_VALUE (0xfL<<0) +#define BNX2_MISC_PPIO_SET (0xfL<<8) +#define BNX2_MISC_PPIO_CLR (0xfL<<16) +#define BNX2_MISC_PPIO_FLOAT (0xfL<<24) + +#define BNX2_MISC_PPIO_INT 0x000008d8 +#define BNX2_MISC_PPIO_INT_INT_STATE (0xfL<<0) +#define BNX2_MISC_PPIO_INT_OLD_VALUE (0xfL<<8) +#define BNX2_MISC_PPIO_INT_OLD_SET (0xfL<<16) +#define BNX2_MISC_PPIO_INT_OLD_CLR (0xfL<<24) + +#define BNX2_MISC_RESET_NUMS 0x000008dc +#define BNX2_MISC_RESET_NUMS_NUM_HARD_RESETS (0x7L<<0) +#define BNX2_MISC_RESET_NUMS_NUM_PCIE_RESETS (0x7L<<4) +#define BNX2_MISC_RESET_NUMS_NUM_PERSTB_RESETS (0x7L<<8) +#define BNX2_MISC_RESET_NUMS_NUM_CMN_RESETS (0x7L<<12) +#define BNX2_MISC_RESET_NUMS_NUM_PORT_RESETS (0x7L<<16) + +#define BNX2_MISC_CS16_ERR 0x000008e0 +#define BNX2_MISC_CS16_ERR_ENA_PCI (1L<<0) +#define BNX2_MISC_CS16_ERR_ENA_RDMA (1L<<1) +#define BNX2_MISC_CS16_ERR_ENA_TDMA (1L<<2) +#define BNX2_MISC_CS16_ERR_ENA_EMAC (1L<<3) +#define BNX2_MISC_CS16_ERR_ENA_CTX (1L<<4) +#define BNX2_MISC_CS16_ERR_ENA_TBDR (1L<<5) +#define BNX2_MISC_CS16_ERR_ENA_RBDC (1L<<6) +#define BNX2_MISC_CS16_ERR_ENA_COM (1L<<7) +#define BNX2_MISC_CS16_ERR_ENA_CP (1L<<8) +#define BNX2_MISC_CS16_ERR_STA_PCI (1L<<16) +#define BNX2_MISC_CS16_ERR_STA_RDMA (1L<<17) +#define BNX2_MISC_CS16_ERR_STA_TDMA (1L<<18) +#define BNX2_MISC_CS16_ERR_STA_EMAC (1L<<19) +#define BNX2_MISC_CS16_ERR_STA_CTX (1L<<20) +#define BNX2_MISC_CS16_ERR_STA_TBDR (1L<<21) +#define BNX2_MISC_CS16_ERR_STA_RBDC (1L<<22) +#define BNX2_MISC_CS16_ERR_STA_COM (1L<<23) +#define BNX2_MISC_CS16_ERR_STA_CP (1L<<24) + +#define BNX2_MISC_SPIO_EVENT 0x000008e4 +#define BNX2_MISC_SPIO_EVENT_ENABLE (0xffL<<0) + +#define BNX2_MISC_PPIO_EVENT 0x000008e8 +#define BNX2_MISC_PPIO_EVENT_ENABLE (0xfL<<0) + +#define BNX2_MISC_DUAL_MEDIA_CTRL 0x000008ec +#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID (0xffL<<0) +#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_X (0L<<0) +#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C (3L<<0) +#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S (12L<<0) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP (0x7L<<8) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PORT_SWAP_PIN (1L<<11) +#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES1_SIGDET (1L<<12) +#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES0_SIGDET (1L<<13) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY1_SIGDET (1L<<14) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY0_SIGDET (1L<<15) +#define BNX2_MISC_DUAL_MEDIA_CTRL_LCPLL_RST (1L<<16) +#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES1_RST (1L<<17) +#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES0_RST (1L<<18) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY1_RST (1L<<19) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY0_RST (1L<<20) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL (0x7L<<21) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PORT_SWAP (1L<<24) +#define BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE (1L<<25) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ (0xfL<<26) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_SER1_IDDQ (1L<<26) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_SER0_IDDQ (2L<<26) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_PHY1_IDDQ (4L<<26) +#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_PHY0_IDDQ (8L<<26) + +#define BNX2_MISC_OTP_CMD1 0x000008f0 +#define BNX2_MISC_OTP_CMD1_FMODE (0x7L<<0) +#define BNX2_MISC_OTP_CMD1_FMODE_IDLE (0L<<0) +#define BNX2_MISC_OTP_CMD1_FMODE_WRITE (1L<<0) +#define BNX2_MISC_OTP_CMD1_FMODE_INIT (2L<<0) +#define BNX2_MISC_OTP_CMD1_FMODE_SET (3L<<0) +#define BNX2_MISC_OTP_CMD1_FMODE_RST (4L<<0) +#define BNX2_MISC_OTP_CMD1_FMODE_VERIFY (5L<<0) +#define BNX2_MISC_OTP_CMD1_FMODE_RESERVED0 (6L<<0) +#define BNX2_MISC_OTP_CMD1_FMODE_RESERVED1 (7L<<0) +#define BNX2_MISC_OTP_CMD1_USEPINS (1L<<8) +#define BNX2_MISC_OTP_CMD1_PROGSEL (1L<<9) +#define BNX2_MISC_OTP_CMD1_PROGSTART (1L<<10) +#define BNX2_MISC_OTP_CMD1_PCOUNT (0x7L<<16) +#define BNX2_MISC_OTP_CMD1_PBYP (1L<<19) +#define BNX2_MISC_OTP_CMD1_VSEL (0xfL<<20) +#define BNX2_MISC_OTP_CMD1_TM (0x7L<<27) +#define BNX2_MISC_OTP_CMD1_SADBYP (1L<<30) +#define BNX2_MISC_OTP_CMD1_DEBUG (1L<<31) + +#define BNX2_MISC_OTP_CMD2 0x000008f4 +#define BNX2_MISC_OTP_CMD2_OTP_ROM_ADDR (0x3ffL<<0) +#define BNX2_MISC_OTP_CMD2_DOSEL (0x7fL<<16) +#define BNX2_MISC_OTP_CMD2_DOSEL_0 (0L<<16) +#define BNX2_MISC_OTP_CMD2_DOSEL_1 (1L<<16) +#define BNX2_MISC_OTP_CMD2_DOSEL_127 (127L<<16) + +#define BNX2_MISC_OTP_STATUS 0x000008f8 +#define BNX2_MISC_OTP_STATUS_DATA (0xffL<<0) +#define BNX2_MISC_OTP_STATUS_VALID (1L<<8) +#define BNX2_MISC_OTP_STATUS_BUSY (1L<<9) +#define BNX2_MISC_OTP_STATUS_BUSYSM (1L<<10) +#define BNX2_MISC_OTP_STATUS_DONE (1L<<11) + +#define BNX2_MISC_OTP_SHIFT1_CMD 0x000008fc +#define BNX2_MISC_OTP_SHIFT1_CMD_RESET_MODE_N (1L<<0) +#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_DONE (1L<<1) +#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_START (1L<<2) +#define BNX2_MISC_OTP_SHIFT1_CMD_LOAD_DATA (1L<<3) +#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_SELECT (0x1fL<<8) + +#define BNX2_MISC_OTP_SHIFT1_DATA 0x00000900 +#define BNX2_MISC_OTP_SHIFT2_CMD 0x00000904 +#define BNX2_MISC_OTP_SHIFT2_CMD_RESET_MODE_N (1L<<0) +#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_DONE (1L<<1) +#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_START (1L<<2) +#define BNX2_MISC_OTP_SHIFT2_CMD_LOAD_DATA (1L<<3) +#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_SELECT (0x1fL<<8) + +#define BNX2_MISC_OTP_SHIFT2_DATA 0x00000908 +#define BNX2_MISC_BIST_CS0 0x0000090c +#define BNX2_MISC_BIST_CS0_MBIST_EN (1L<<0) +#define BNX2_MISC_BIST_CS0_BIST_SETUP (0x3L<<1) +#define BNX2_MISC_BIST_CS0_MBIST_ASYNC_RESET (1L<<3) +#define BNX2_MISC_BIST_CS0_MBIST_DONE (1L<<8) +#define BNX2_MISC_BIST_CS0_MBIST_GO (1L<<9) +#define BNX2_MISC_BIST_CS0_BIST_OVERRIDE (1L<<31) + +#define BNX2_MISC_BIST_MEMSTATUS0 0x00000910 +#define BNX2_MISC_BIST_CS1 0x00000914 +#define BNX2_MISC_BIST_CS1_MBIST_EN (1L<<0) +#define BNX2_MISC_BIST_CS1_BIST_SETUP (0x3L<<1) +#define BNX2_MISC_BIST_CS1_MBIST_ASYNC_RESET (1L<<3) +#define BNX2_MISC_BIST_CS1_MBIST_DONE (1L<<8) +#define BNX2_MISC_BIST_CS1_MBIST_GO (1L<<9) + +#define BNX2_MISC_BIST_MEMSTATUS1 0x00000918 +#define BNX2_MISC_BIST_CS2 0x0000091c +#define BNX2_MISC_BIST_CS2_MBIST_EN (1L<<0) +#define BNX2_MISC_BIST_CS2_BIST_SETUP (0x3L<<1) +#define BNX2_MISC_BIST_CS2_MBIST_ASYNC_RESET (1L<<3) +#define BNX2_MISC_BIST_CS2_MBIST_DONE (1L<<8) +#define BNX2_MISC_BIST_CS2_MBIST_GO (1L<<9) + +#define BNX2_MISC_BIST_MEMSTATUS2 0x00000920 +#define BNX2_MISC_BIST_CS3 0x00000924 +#define BNX2_MISC_BIST_CS3_MBIST_EN (1L<<0) +#define BNX2_MISC_BIST_CS3_BIST_SETUP (0x3L<<1) +#define BNX2_MISC_BIST_CS3_MBIST_ASYNC_RESET (1L<<3) +#define BNX2_MISC_BIST_CS3_MBIST_DONE (1L<<8) +#define BNX2_MISC_BIST_CS3_MBIST_GO (1L<<9) + +#define BNX2_MISC_BIST_MEMSTATUS3 0x00000928 +#define BNX2_MISC_BIST_CS4 0x0000092c +#define BNX2_MISC_BIST_CS4_MBIST_EN (1L<<0) +#define BNX2_MISC_BIST_CS4_BIST_SETUP (0x3L<<1) +#define BNX2_MISC_BIST_CS4_MBIST_ASYNC_RESET (1L<<3) +#define BNX2_MISC_BIST_CS4_MBIST_DONE (1L<<8) +#define BNX2_MISC_BIST_CS4_MBIST_GO (1L<<9) + +#define BNX2_MISC_BIST_MEMSTATUS4 0x00000930 +#define BNX2_MISC_BIST_CS5 0x00000934 +#define BNX2_MISC_BIST_CS5_MBIST_EN (1L<<0) +#define BNX2_MISC_BIST_CS5_BIST_SETUP (0x3L<<1) +#define BNX2_MISC_BIST_CS5_MBIST_ASYNC_RESET (1L<<3) +#define BNX2_MISC_BIST_CS5_MBIST_DONE (1L<<8) +#define BNX2_MISC_BIST_CS5_MBIST_GO (1L<<9) + +#define BNX2_MISC_BIST_MEMSTATUS5 0x00000938 +#define BNX2_MISC_MEM_TM0 0x0000093c +#define BNX2_MISC_MEM_TM0_PCIE_REPLAY_TM (0xfL<<0) +#define BNX2_MISC_MEM_TM0_MCP_SCPAD (0xfL<<8) +#define BNX2_MISC_MEM_TM0_UMP_TM (0xffL<<16) +#define BNX2_MISC_MEM_TM0_HB_MEM_TM (0xfL<<24) + +#define BNX2_MISC_USPLL_CTRL 0x00000940 +#define BNX2_MISC_USPLL_CTRL_PH_DET_DIS (1L<<0) +#define BNX2_MISC_USPLL_CTRL_FREQ_DET_DIS (1L<<1) +#define BNX2_MISC_USPLL_CTRL_LCPX (0x3fL<<2) +#define BNX2_MISC_USPLL_CTRL_RX (0x3L<<8) +#define BNX2_MISC_USPLL_CTRL_VC_EN (1L<<10) +#define BNX2_MISC_USPLL_CTRL_VCO_MG (0x3L<<11) +#define BNX2_MISC_USPLL_CTRL_KVCO_XF (0x7L<<13) +#define BNX2_MISC_USPLL_CTRL_KVCO_XS (0x7L<<16) +#define BNX2_MISC_USPLL_CTRL_TESTD_EN (1L<<19) +#define BNX2_MISC_USPLL_CTRL_TESTD_SEL (0x7L<<20) +#define BNX2_MISC_USPLL_CTRL_TESTA_EN (1L<<23) +#define BNX2_MISC_USPLL_CTRL_TESTA_SEL (0x3L<<24) +#define BNX2_MISC_USPLL_CTRL_ATTEN_FREF (1L<<26) +#define BNX2_MISC_USPLL_CTRL_DIGITAL_RST (1L<<27) +#define BNX2_MISC_USPLL_CTRL_ANALOG_RST (1L<<28) +#define BNX2_MISC_USPLL_CTRL_LOCK (1L<<29) + +#define BNX2_MISC_PERR_STATUS0 0x00000944 +#define BNX2_MISC_PERR_STATUS0_COM_DMAE_PERR (1L<<0) +#define BNX2_MISC_PERR_STATUS0_CP_DMAE_PERR (1L<<1) +#define BNX2_MISC_PERR_STATUS0_RPM_ACPIBEMEM_PERR (1L<<2) +#define BNX2_MISC_PERR_STATUS0_CTX_USAGE_CNT_PERR (1L<<3) +#define BNX2_MISC_PERR_STATUS0_CTX_PGTBL_PERR (1L<<4) +#define BNX2_MISC_PERR_STATUS0_CTX_CACHE_PERR (1L<<5) +#define BNX2_MISC_PERR_STATUS0_CTX_MIRROR_PERR (1L<<6) +#define BNX2_MISC_PERR_STATUS0_COM_CTXC_PERR (1L<<7) +#define BNX2_MISC_PERR_STATUS0_COM_SCPAD_PERR (1L<<8) +#define BNX2_MISC_PERR_STATUS0_CP_CTXC_PERR (1L<<9) +#define BNX2_MISC_PERR_STATUS0_CP_SCPAD_PERR (1L<<10) +#define BNX2_MISC_PERR_STATUS0_RXP_RBUFC_PERR (1L<<11) +#define BNX2_MISC_PERR_STATUS0_RXP_CTXC_PERR (1L<<12) +#define BNX2_MISC_PERR_STATUS0_RXP_SCPAD_PERR (1L<<13) +#define BNX2_MISC_PERR_STATUS0_TPAT_SCPAD_PERR (1L<<14) +#define BNX2_MISC_PERR_STATUS0_TXP_CTXC_PERR (1L<<15) +#define BNX2_MISC_PERR_STATUS0_TXP_SCPAD_PERR (1L<<16) +#define BNX2_MISC_PERR_STATUS0_CS_TMEM_PERR (1L<<17) +#define BNX2_MISC_PERR_STATUS0_MQ_CTX_PERR (1L<<18) +#define BNX2_MISC_PERR_STATUS0_RPM_DFIFOMEM_PERR (1L<<19) +#define BNX2_MISC_PERR_STATUS0_RPC_DFIFOMEM_PERR (1L<<20) +#define BNX2_MISC_PERR_STATUS0_RBUF_PTRMEM_PERR (1L<<21) +#define BNX2_MISC_PERR_STATUS0_RBUF_DATAMEM_PERR (1L<<22) +#define BNX2_MISC_PERR_STATUS0_RV2P_P2IRAM_PERR (1L<<23) +#define BNX2_MISC_PERR_STATUS0_RV2P_P1IRAM_PERR (1L<<24) +#define BNX2_MISC_PERR_STATUS0_RV2P_CB1REGS_PERR (1L<<25) +#define BNX2_MISC_PERR_STATUS0_RV2P_CB0REGS_PERR (1L<<26) +#define BNX2_MISC_PERR_STATUS0_TPBUF_PERR (1L<<27) +#define BNX2_MISC_PERR_STATUS0_THBUF_PERR (1L<<28) +#define BNX2_MISC_PERR_STATUS0_TDMA_PERR (1L<<29) +#define BNX2_MISC_PERR_STATUS0_TBDC_PERR (1L<<30) +#define BNX2_MISC_PERR_STATUS0_TSCH_LR_PERR (1L<<31) + +#define BNX2_MISC_PERR_STATUS1 0x00000948 +#define BNX2_MISC_PERR_STATUS1_RBDC_PERR (1L<<0) +#define BNX2_MISC_PERR_STATUS1_RDMA_DFIFO_PERR (1L<<2) +#define BNX2_MISC_PERR_STATUS1_HC_STATS_PERR (1L<<3) +#define BNX2_MISC_PERR_STATUS1_HC_MSIX_PERR (1L<<4) +#define BNX2_MISC_PERR_STATUS1_HC_PRODUCSTB_PERR (1L<<5) +#define BNX2_MISC_PERR_STATUS1_HC_CONSUMSTB_PERR (1L<<6) +#define BNX2_MISC_PERR_STATUS1_TPATQ_PERR (1L<<7) +#define BNX2_MISC_PERR_STATUS1_MCPQ_PERR (1L<<8) +#define BNX2_MISC_PERR_STATUS1_TDMAQ_PERR (1L<<9) +#define BNX2_MISC_PERR_STATUS1_TXPQ_PERR (1L<<10) +#define BNX2_MISC_PERR_STATUS1_COMTQ_PERR (1L<<11) +#define BNX2_MISC_PERR_STATUS1_COMQ_PERR (1L<<12) +#define BNX2_MISC_PERR_STATUS1_RLUPQ_PERR (1L<<13) +#define BNX2_MISC_PERR_STATUS1_RXPQ_PERR (1L<<14) +#define BNX2_MISC_PERR_STATUS1_RV2PPQ_PERR (1L<<15) +#define BNX2_MISC_PERR_STATUS1_RDMAQ_PERR (1L<<16) +#define BNX2_MISC_PERR_STATUS1_TASQ_PERR (1L<<17) +#define BNX2_MISC_PERR_STATUS1_TBDRQ_PERR (1L<<18) +#define BNX2_MISC_PERR_STATUS1_TSCHQ_PERR (1L<<19) +#define BNX2_MISC_PERR_STATUS1_COMXQ_PERR (1L<<20) +#define BNX2_MISC_PERR_STATUS1_RXPCQ_PERR (1L<<21) +#define BNX2_MISC_PERR_STATUS1_RV2PTQ_PERR (1L<<22) +#define BNX2_MISC_PERR_STATUS1_RV2PMQ_PERR (1L<<23) +#define BNX2_MISC_PERR_STATUS1_CPQ_PERR (1L<<24) +#define BNX2_MISC_PERR_STATUS1_CSQ_PERR (1L<<25) +#define BNX2_MISC_PERR_STATUS1_RLUP_CID_PERR (1L<<26) +#define BNX2_MISC_PERR_STATUS1_RV2PCS_TMEM_PERR (1L<<27) +#define BNX2_MISC_PERR_STATUS1_RV2PCSQ_PERR (1L<<28) +#define BNX2_MISC_PERR_STATUS1_MQ_IDX_PERR (1L<<29) + +#define BNX2_MISC_PERR_STATUS2 0x0000094c +#define BNX2_MISC_PERR_STATUS2_TGT_FIFO_PERR (1L<<0) +#define BNX2_MISC_PERR_STATUS2_UMP_TX_PERR (1L<<1) +#define BNX2_MISC_PERR_STATUS2_UMP_RX_PERR (1L<<2) +#define BNX2_MISC_PERR_STATUS2_MCP_ROM_PERR (1L<<3) +#define BNX2_MISC_PERR_STATUS2_MCP_SCPAD_PERR (1L<<4) +#define BNX2_MISC_PERR_STATUS2_HB_MEM_PERR (1L<<5) +#define BNX2_MISC_PERR_STATUS2_PCIE_REPLAY_PERR (1L<<6) + +#define BNX2_MISC_LCPLL_CTRL0 0x00000950 +#define BNX2_MISC_LCPLL_CTRL0_OAC (0x7L<<0) +#define BNX2_MISC_LCPLL_CTRL0_OAC_NEGTWENTY (0L<<0) +#define BNX2_MISC_LCPLL_CTRL0_OAC_ZERO (1L<<0) +#define BNX2_MISC_LCPLL_CTRL0_OAC_TWENTY (3L<<0) +#define BNX2_MISC_LCPLL_CTRL0_OAC_FORTY (7L<<0) +#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL (0x7L<<3) +#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_360 (0L<<3) +#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_480 (1L<<3) +#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_600 (3L<<3) +#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_720 (7L<<3) +#define BNX2_MISC_LCPLL_CTRL0_BIAS_CTRL (0x3L<<6) +#define BNX2_MISC_LCPLL_CTRL0_PLL_OBSERVE (0x7L<<8) +#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL (0x3L<<11) +#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_0 (0L<<11) +#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_1 (1L<<11) +#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_2 (2L<<11) +#define BNX2_MISC_LCPLL_CTRL0_PLLSEQSTART (1L<<13) +#define BNX2_MISC_LCPLL_CTRL0_RESERVED (1L<<14) +#define BNX2_MISC_LCPLL_CTRL0_CAPRETRY_EN (1L<<15) +#define BNX2_MISC_LCPLL_CTRL0_FREQMONITOR_EN (1L<<16) +#define BNX2_MISC_LCPLL_CTRL0_FREQDETRESTART_EN (1L<<17) +#define BNX2_MISC_LCPLL_CTRL0_FREQDETRETRY_EN (1L<<18) +#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFDONE_EN (1L<<19) +#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFDONE (1L<<20) +#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFPASS (1L<<21) +#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPDONE_EN (1L<<22) +#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPDONE (1L<<23) +#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPPASS_EN (1L<<24) +#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPPASS (1L<<25) +#define BNX2_MISC_LCPLL_CTRL0_CAPRESTART (1L<<26) +#define BNX2_MISC_LCPLL_CTRL0_CAPSELECTM_EN (1L<<27) + +#define BNX2_MISC_LCPLL_CTRL1 0x00000954 +#define BNX2_MISC_LCPLL_CTRL1_CAPSELECTM (0x1fL<<0) +#define BNX2_MISC_LCPLL_CTRL1_CAPFORCESLOWDOWN_EN (1L<<5) +#define BNX2_MISC_LCPLL_CTRL1_CAPFORCESLOWDOWN (1L<<6) +#define BNX2_MISC_LCPLL_CTRL1_SLOWDN_XOR (1L<<7) + +#define BNX2_MISC_LCPLL_STATUS 0x00000958 +#define BNX2_MISC_LCPLL_STATUS_FREQDONE_SM (1L<<0) +#define BNX2_MISC_LCPLL_STATUS_FREQPASS_SM (1L<<1) +#define BNX2_MISC_LCPLL_STATUS_PLLSEQDONE (1L<<2) +#define BNX2_MISC_LCPLL_STATUS_PLLSEQPASS (1L<<3) +#define BNX2_MISC_LCPLL_STATUS_PLLSTATE (0x7L<<4) +#define BNX2_MISC_LCPLL_STATUS_CAPSTATE (0x7L<<7) +#define BNX2_MISC_LCPLL_STATUS_CAPSELECT (0x1fL<<10) +#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR (1L<<15) +#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR_0 (0L<<15) +#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR_1 (1L<<15) + +#define BNX2_MISC_OSCFUNDS_CTRL 0x0000095c +#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON (1L<<5) +#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON_OFF (0L<<5) +#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON_ON (1L<<5) +#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM (0x3L<<6) +#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_0 (0L<<6) +#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_1 (1L<<6) +#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_2 (2L<<6) +#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_3 (3L<<6) +#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ (0x3L<<8) +#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_0 (0L<<8) +#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_1 (1L<<8) +#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_2 (2L<<8) +#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_3 (3L<<8) +#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ (0x3L<<10) +#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_0 (0L<<10) +#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_1 (1L<<10) +#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_2 (2L<<10) +#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_3 (3L<<10) + + +/* + * nvm_reg definition + * offset: 0x6400 + */ +#define BNX2_NVM_COMMAND 0x00006400 +#define BNX2_NVM_COMMAND_RST (1L<<0) +#define BNX2_NVM_COMMAND_DONE (1L<<3) +#define BNX2_NVM_COMMAND_DOIT (1L<<4) +#define BNX2_NVM_COMMAND_WR (1L<<5) +#define BNX2_NVM_COMMAND_ERASE (1L<<6) +#define BNX2_NVM_COMMAND_FIRST (1L<<7) +#define BNX2_NVM_COMMAND_LAST (1L<<8) +#define BNX2_NVM_COMMAND_WREN (1L<<16) +#define BNX2_NVM_COMMAND_WRDI (1L<<17) +#define BNX2_NVM_COMMAND_EWSR (1L<<18) +#define BNX2_NVM_COMMAND_WRSR (1L<<19) +#define BNX2_NVM_COMMAND_RD_ID (1L<<20) +#define BNX2_NVM_COMMAND_RD_STATUS (1L<<21) +#define BNX2_NVM_COMMAND_MODE_256 (1L<<22) + +#define BNX2_NVM_STATUS 0x00006404 +#define BNX2_NVM_STATUS_PI_FSM_STATE (0xfL<<0) +#define BNX2_NVM_STATUS_EE_FSM_STATE (0xfL<<4) +#define BNX2_NVM_STATUS_EQ_FSM_STATE (0xfL<<8) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_XI (0x1fL<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_IDLE_XI (0L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD0_XI (1L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD1_XI (2L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD_FINISH0_XI (3L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD_FINISH1_XI (4L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_ADDR0_XI (5L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA0_XI (6L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA1_XI (7L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA2_XI (8L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA0_XI (9L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA1_XI (10L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA2_XI (11L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID0_XI (12L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID1_XI (13L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID2_XI (14L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID3_XI (15L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID4_XI (16L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CHECK_BUSY0_XI (17L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_ST_WREN_XI (18L<<0) +#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WAIT_XI (19L<<0) + +#define BNX2_NVM_WRITE 0x00006408 +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE (0xffffffffL<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_BIT_BANG (0L<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_EECLK (1L<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_EEDATA (2L<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SCLK (4L<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_CS_B (8L<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SO (16L<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SI (32L<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SI_XI (1L<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SO_XI (2L<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_CS_B_XI (4L<<0) +#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SCLK_XI (8L<<0) + +#define BNX2_NVM_ADDR 0x0000640c +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_BIT_BANG (0L<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_EECLK (1L<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_EEDATA (2L<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SCLK (4L<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_CS_B (8L<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SO (16L<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SI (32L<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SI_XI (1L<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SO_XI (2L<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_CS_B_XI (4L<<0) +#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SCLK_XI (8L<<0) + +#define BNX2_NVM_READ 0x00006410 +#define BNX2_NVM_READ_NVM_READ_VALUE (0xffffffffL<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_BIT_BANG (0L<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_EECLK (1L<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_EEDATA (2L<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_SCLK (4L<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_CS_B (8L<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_SO (16L<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_SI (32L<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_SI_XI (1L<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_SO_XI (2L<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_CS_B_XI (4L<<0) +#define BNX2_NVM_READ_NVM_READ_VALUE_SCLK_XI (8L<<0) + +#define BNX2_NVM_CFG1 0x00006414 +#define BNX2_NVM_CFG1_FLASH_MODE (1L<<0) +#define BNX2_NVM_CFG1_BUFFER_MODE (1L<<1) +#define BNX2_NVM_CFG1_PASS_MODE (1L<<2) +#define BNX2_NVM_CFG1_BITBANG_MODE (1L<<3) +#define BNX2_NVM_CFG1_STATUS_BIT (0x7L<<4) +#define BNX2_NVM_CFG1_STATUS_BIT_FLASH_RDY (0L<<4) +#define BNX2_NVM_CFG1_STATUS_BIT_BUFFER_RDY (7L<<4) +#define BNX2_NVM_CFG1_SPI_CLK_DIV (0xfL<<7) +#define BNX2_NVM_CFG1_SEE_CLK_DIV (0x7ffL<<11) +#define BNX2_NVM_CFG1_STRAP_CONTROL_0 (1L<<23) +#define BNX2_NVM_CFG1_PROTECT_MODE (1L<<24) +#define BNX2_NVM_CFG1_FLASH_SIZE (1L<<25) +#define BNX2_NVM_CFG1_FW_USTRAP_1 (1L<<26) +#define BNX2_NVM_CFG1_FW_USTRAP_0 (1L<<27) +#define BNX2_NVM_CFG1_FW_USTRAP_2 (1L<<28) +#define BNX2_NVM_CFG1_FW_USTRAP_3 (1L<<29) +#define BNX2_NVM_CFG1_FW_FLASH_TYPE_EN (1L<<30) +#define BNX2_NVM_CFG1_COMPAT_BYPASSS (1L<<31) + +#define BNX2_NVM_CFG2 0x00006418 +#define BNX2_NVM_CFG2_ERASE_CMD (0xffL<<0) +#define BNX2_NVM_CFG2_DUMMY (0xffL<<8) +#define BNX2_NVM_CFG2_STATUS_CMD (0xffL<<16) +#define BNX2_NVM_CFG2_READ_ID (0xffL<<24) + +#define BNX2_NVM_CFG3 0x0000641c +#define BNX2_NVM_CFG3_BUFFER_RD_CMD (0xffL<<0) +#define BNX2_NVM_CFG3_WRITE_CMD (0xffL<<8) +#define BNX2_NVM_CFG3_BUFFER_WRITE_CMD (0xffL<<16) +#define BNX2_NVM_CFG3_READ_CMD (0xffL<<24) + +#define BNX2_NVM_SW_ARB 0x00006420 +#define BNX2_NVM_SW_ARB_ARB_REQ_SET0 (1L<<0) +#define BNX2_NVM_SW_ARB_ARB_REQ_SET1 (1L<<1) +#define BNX2_NVM_SW_ARB_ARB_REQ_SET2 (1L<<2) +#define BNX2_NVM_SW_ARB_ARB_REQ_SET3 (1L<<3) +#define BNX2_NVM_SW_ARB_ARB_REQ_CLR0 (1L<<4) +#define BNX2_NVM_SW_ARB_ARB_REQ_CLR1 (1L<<5) +#define BNX2_NVM_SW_ARB_ARB_REQ_CLR2 (1L<<6) +#define BNX2_NVM_SW_ARB_ARB_REQ_CLR3 (1L<<7) +#define BNX2_NVM_SW_ARB_ARB_ARB0 (1L<<8) +#define BNX2_NVM_SW_ARB_ARB_ARB1 (1L<<9) +#define BNX2_NVM_SW_ARB_ARB_ARB2 (1L<<10) +#define BNX2_NVM_SW_ARB_ARB_ARB3 (1L<<11) +#define BNX2_NVM_SW_ARB_REQ0 (1L<<12) +#define BNX2_NVM_SW_ARB_REQ1 (1L<<13) +#define BNX2_NVM_SW_ARB_REQ2 (1L<<14) +#define BNX2_NVM_SW_ARB_REQ3 (1L<<15) + +#define BNX2_NVM_ACCESS_ENABLE 0x00006424 +#define BNX2_NVM_ACCESS_ENABLE_EN (1L<<0) +#define BNX2_NVM_ACCESS_ENABLE_WR_EN (1L<<1) + +#define BNX2_NVM_WRITE1 0x00006428 +#define BNX2_NVM_WRITE1_WREN_CMD (0xffL<<0) +#define BNX2_NVM_WRITE1_WRDI_CMD (0xffL<<8) +#define BNX2_NVM_WRITE1_SR_DATA (0xffL<<16) + +#define BNX2_NVM_CFG4 0x0000642c +#define BNX2_NVM_CFG4_FLASH_SIZE (0x7L<<0) +#define BNX2_NVM_CFG4_FLASH_SIZE_1MBIT (0L<<0) +#define BNX2_NVM_CFG4_FLASH_SIZE_2MBIT (1L<<0) +#define BNX2_NVM_CFG4_FLASH_SIZE_4MBIT (2L<<0) +#define BNX2_NVM_CFG4_FLASH_SIZE_8MBIT (3L<<0) +#define BNX2_NVM_CFG4_FLASH_SIZE_16MBIT (4L<<0) +#define BNX2_NVM_CFG4_FLASH_SIZE_32MBIT (5L<<0) +#define BNX2_NVM_CFG4_FLASH_SIZE_64MBIT (6L<<0) +#define BNX2_NVM_CFG4_FLASH_SIZE_128MBIT (7L<<0) +#define BNX2_NVM_CFG4_FLASH_VENDOR (1L<<3) +#define BNX2_NVM_CFG4_FLASH_VENDOR_ST (0L<<3) +#define BNX2_NVM_CFG4_FLASH_VENDOR_ATMEL (1L<<3) +#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC (0x3L<<4) +#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT8 (0L<<4) +#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT9 (1L<<4) +#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT10 (2L<<4) +#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT11 (3L<<4) +#define BNX2_NVM_CFG4_STATUS_BIT_POLARITY (1L<<6) +#define BNX2_NVM_CFG4_RESERVED (0x1ffffffL<<7) + +#define BNX2_NVM_RECONFIG 0x00006430 +#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE (0xfL<<0) +#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE_ST (0L<<0) +#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE_ATMEL (1L<<0) +#define BNX2_NVM_RECONFIG_RECONFIG_STRAP_VALUE (0xfL<<4) +#define BNX2_NVM_RECONFIG_RESERVED (0x7fffffL<<8) +#define BNX2_NVM_RECONFIG_RECONFIG_DONE (1L<<31) + + + +/* + * dma_reg definition + * offset: 0xc00 + */ +#define BNX2_DMA_COMMAND 0x00000c00 +#define BNX2_DMA_COMMAND_ENABLE (1L<<0) + +#define BNX2_DMA_STATUS 0x00000c04 +#define BNX2_DMA_STATUS_PAR_ERROR_STATE (1L<<0) +#define BNX2_DMA_STATUS_READ_TRANSFERS_STAT (1L<<16) +#define BNX2_DMA_STATUS_READ_DELAY_PCI_CLKS_STAT (1L<<17) +#define BNX2_DMA_STATUS_BIG_READ_TRANSFERS_STAT (1L<<18) +#define BNX2_DMA_STATUS_BIG_READ_DELAY_PCI_CLKS_STAT (1L<<19) +#define BNX2_DMA_STATUS_BIG_READ_RETRY_AFTER_DATA_STAT (1L<<20) +#define BNX2_DMA_STATUS_WRITE_TRANSFERS_STAT (1L<<21) +#define BNX2_DMA_STATUS_WRITE_DELAY_PCI_CLKS_STAT (1L<<22) +#define BNX2_DMA_STATUS_BIG_WRITE_TRANSFERS_STAT (1L<<23) +#define BNX2_DMA_STATUS_BIG_WRITE_DELAY_PCI_CLKS_STAT (1L<<24) +#define BNX2_DMA_STATUS_BIG_WRITE_RETRY_AFTER_DATA_STAT (1L<<25) +#define BNX2_DMA_STATUS_GLOBAL_ERR_XI (1L<<0) +#define BNX2_DMA_STATUS_BME_XI (1L<<4) + +#define BNX2_DMA_CONFIG 0x00000c08 +#define BNX2_DMA_CONFIG_DATA_BYTE_SWAP (1L<<0) +#define BNX2_DMA_CONFIG_DATA_WORD_SWAP (1L<<1) +#define BNX2_DMA_CONFIG_CNTL_BYTE_SWAP (1L<<4) +#define BNX2_DMA_CONFIG_CNTL_WORD_SWAP (1L<<5) +#define BNX2_DMA_CONFIG_ONE_DMA (1L<<6) +#define BNX2_DMA_CONFIG_CNTL_TWO_DMA (1L<<7) +#define BNX2_DMA_CONFIG_CNTL_FPGA_MODE (1L<<8) +#define BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA (1L<<10) +#define BNX2_DMA_CONFIG_CNTL_PCI_COMP_DLY (1L<<11) +#define BNX2_DMA_CONFIG_NO_RCHANS_IN_USE (0xfL<<12) +#define BNX2_DMA_CONFIG_NO_WCHANS_IN_USE (0xfL<<16) +#define BNX2_DMA_CONFIG_PCI_CLK_CMP_BITS (0x7L<<20) +#define BNX2_DMA_CONFIG_PCI_FAST_CLK_CMP (1L<<23) +#define BNX2_DMA_CONFIG_BIG_SIZE (0xfL<<24) +#define BNX2_DMA_CONFIG_BIG_SIZE_NONE (0x0L<<24) +#define BNX2_DMA_CONFIG_BIG_SIZE_64 (0x1L<<24) +#define BNX2_DMA_CONFIG_BIG_SIZE_128 (0x2L<<24) +#define BNX2_DMA_CONFIG_BIG_SIZE_256 (0x4L<<24) +#define BNX2_DMA_CONFIG_BIG_SIZE_512 (0x8L<<24) +#define BNX2_DMA_CONFIG_DAT_WBSWAP_MODE_XI (0x3L<<0) +#define BNX2_DMA_CONFIG_CTL_WBSWAP_MODE_XI (0x3L<<4) +#define BNX2_DMA_CONFIG_MAX_PL_XI (0x7L<<12) +#define BNX2_DMA_CONFIG_MAX_PL_128B_XI (0L<<12) +#define BNX2_DMA_CONFIG_MAX_PL_256B_XI (1L<<12) +#define BNX2_DMA_CONFIG_MAX_PL_512B_XI (2L<<12) +#define BNX2_DMA_CONFIG_MAX_PL_EN_XI (1L<<15) +#define BNX2_DMA_CONFIG_MAX_RRS_XI (0x7L<<16) +#define BNX2_DMA_CONFIG_MAX_RRS_128B_XI (0L<<16) +#define BNX2_DMA_CONFIG_MAX_RRS_256B_XI (1L<<16) +#define BNX2_DMA_CONFIG_MAX_RRS_512B_XI (2L<<16) +#define BNX2_DMA_CONFIG_MAX_RRS_1024B_XI (3L<<16) +#define BNX2_DMA_CONFIG_MAX_RRS_2048B_XI (4L<<16) +#define BNX2_DMA_CONFIG_MAX_RRS_4096B_XI (5L<<16) +#define BNX2_DMA_CONFIG_MAX_RRS_EN_XI (1L<<19) +#define BNX2_DMA_CONFIG_NO_64SWAP_EN_XI (1L<<31) + +#define BNX2_DMA_BLACKOUT 0x00000c0c +#define BNX2_DMA_BLACKOUT_RD_RETRY_BLACKOUT (0xffL<<0) +#define BNX2_DMA_BLACKOUT_2ND_RD_RETRY_BLACKOUT (0xffL<<8) +#define BNX2_DMA_BLACKOUT_WR_RETRY_BLACKOUT (0xffL<<16) + +#define BNX2_DMA_READ_MASTER_SETTING_0 0x00000c10 +#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_NO_SNOOP (1L<<0) +#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_RELAX_ORDER (1L<<1) +#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_PRIORITY (1L<<2) +#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_TRAFFIC_CLASS (0x7L<<4) +#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_PARAM_EN (1L<<7) +#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_NO_SNOOP (1L<<8) +#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_RELAX_ORDER (1L<<9) +#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_PRIORITY (1L<<10) +#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_TRAFFIC_CLASS (0x7L<<12) +#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_PARAM_EN (1L<<15) +#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_NO_SNOOP (1L<<16) +#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_RELAX_ORDER (1L<<17) +#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_PRIORITY (1L<<18) +#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_TRAFFIC_CLASS (0x7L<<20) +#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_PARAM_EN (1L<<23) +#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_NO_SNOOP (1L<<24) +#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_RELAX_ORDER (1L<<25) +#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_PRIORITY (1L<<26) +#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_TRAFFIC_CLASS (0x7L<<28) +#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_PARAM_EN (1L<<31) + +#define BNX2_DMA_READ_MASTER_SETTING_1 0x00000c14 +#define BNX2_DMA_READ_MASTER_SETTING_1_COM_NO_SNOOP (1L<<0) +#define BNX2_DMA_READ_MASTER_SETTING_1_COM_RELAX_ORDER (1L<<1) +#define BNX2_DMA_READ_MASTER_SETTING_1_COM_PRIORITY (1L<<2) +#define BNX2_DMA_READ_MASTER_SETTING_1_COM_TRAFFIC_CLASS (0x7L<<4) +#define BNX2_DMA_READ_MASTER_SETTING_1_COM_PARAM_EN (1L<<7) +#define BNX2_DMA_READ_MASTER_SETTING_1_CP_NO_SNOOP (1L<<8) +#define BNX2_DMA_READ_MASTER_SETTING_1_CP_RELAX_ORDER (1L<<9) +#define BNX2_DMA_READ_MASTER_SETTING_1_CP_PRIORITY (1L<<10) +#define BNX2_DMA_READ_MASTER_SETTING_1_CP_TRAFFIC_CLASS (0x7L<<12) +#define BNX2_DMA_READ_MASTER_SETTING_1_CP_PARAM_EN (1L<<15) + +#define BNX2_DMA_WRITE_MASTER_SETTING_0 0x00000c18 +#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_NO_SNOOP (1L<<0) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_RELAX_ORDER (1L<<1) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_PRIORITY (1L<<2) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_CS_VLD (1L<<3) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_TRAFFIC_CLASS (0x7L<<4) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_PARAM_EN (1L<<7) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_NO_SNOOP (1L<<8) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_RELAX_ORDER (1L<<9) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_PRIORITY (1L<<10) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_CS_VLD (1L<<11) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_TRAFFIC_CLASS (0x7L<<12) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_PARAM_EN (1L<<15) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_NO_SNOOP (1L<<24) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_RELAX_ORDER (1L<<25) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_PRIORITY (1L<<26) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_CS_VLD (1L<<27) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_TRAFFIC_CLASS (0x7L<<28) +#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_PARAM_EN (1L<<31) + +#define BNX2_DMA_WRITE_MASTER_SETTING_1 0x00000c1c +#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_NO_SNOOP (1L<<0) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_RELAX_ORDER (1L<<1) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_PRIORITY (1L<<2) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_CS_VLD (1L<<3) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_TRAFFIC_CLASS (0x7L<<4) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_PARAM_EN (1L<<7) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_NO_SNOOP (1L<<8) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_RELAX_ORDER (1L<<9) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_PRIORITY (1L<<10) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_CS_VLD (1L<<11) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_TRAFFIC_CLASS (0x7L<<12) +#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_PARAM_EN (1L<<15) + +#define BNX2_DMA_ARBITER 0x00000c20 +#define BNX2_DMA_ARBITER_NUM_READS (0x7L<<0) +#define BNX2_DMA_ARBITER_WR_ARB_MODE (1L<<4) +#define BNX2_DMA_ARBITER_WR_ARB_MODE_STRICT (0L<<4) +#define BNX2_DMA_ARBITER_WR_ARB_MODE_RND_RBN (1L<<4) +#define BNX2_DMA_ARBITER_RD_ARB_MODE (0x3L<<5) +#define BNX2_DMA_ARBITER_RD_ARB_MODE_STRICT (0L<<5) +#define BNX2_DMA_ARBITER_RD_ARB_MODE_RND_RBN (1L<<5) +#define BNX2_DMA_ARBITER_RD_ARB_MODE_WGT_RND_RBN (2L<<5) +#define BNX2_DMA_ARBITER_ALT_MODE_EN (1L<<8) +#define BNX2_DMA_ARBITER_RR_MODE (1L<<9) +#define BNX2_DMA_ARBITER_TIMER_MODE (1L<<10) +#define BNX2_DMA_ARBITER_OUSTD_READ_REQ (0xfL<<12) + +#define BNX2_DMA_ARB_TIMERS 0x00000c24 +#define BNX2_DMA_ARB_TIMERS_RD_DRR_WAIT_TIME (0xffL<<0) +#define BNX2_DMA_ARB_TIMERS_TM_MIN_TIMEOUT (0xffL<<12) +#define BNX2_DMA_ARB_TIMERS_TM_MAX_TIMEOUT (0xfffL<<20) + +#define BNX2_DMA_DEBUG_VECT_PEEK 0x00000c2c +#define BNX2_DMA_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) +#define BNX2_DMA_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) +#define BNX2_DMA_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) +#define BNX2_DMA_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) +#define BNX2_DMA_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) +#define BNX2_DMA_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) + +#define BNX2_DMA_TAG_RAM_00 0x00000c30 +#define BNX2_DMA_TAG_RAM_00_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_00_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_00_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_00_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_00_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_00_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_00_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_00_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_00_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_00_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_00_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_00_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_00_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_00_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_01 0x00000c34 +#define BNX2_DMA_TAG_RAM_01_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_01_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_01_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_01_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_01_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_01_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_01_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_01_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_01_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_01_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_01_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_01_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_01_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_01_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_02 0x00000c38 +#define BNX2_DMA_TAG_RAM_02_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_02_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_02_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_02_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_02_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_02_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_02_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_02_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_02_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_02_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_02_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_02_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_02_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_02_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_03 0x00000c3c +#define BNX2_DMA_TAG_RAM_03_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_03_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_03_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_03_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_03_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_03_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_03_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_03_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_03_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_03_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_03_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_03_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_03_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_03_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_04 0x00000c40 +#define BNX2_DMA_TAG_RAM_04_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_04_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_04_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_04_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_04_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_04_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_04_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_04_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_04_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_04_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_04_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_04_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_04_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_04_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_05 0x00000c44 +#define BNX2_DMA_TAG_RAM_05_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_05_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_05_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_05_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_05_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_05_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_05_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_05_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_05_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_05_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_05_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_05_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_05_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_05_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_06 0x00000c48 +#define BNX2_DMA_TAG_RAM_06_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_06_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_06_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_06_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_06_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_06_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_06_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_06_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_06_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_06_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_06_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_06_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_06_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_06_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_07 0x00000c4c +#define BNX2_DMA_TAG_RAM_07_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_07_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_07_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_07_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_07_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_07_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_07_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_07_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_07_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_07_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_07_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_07_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_07_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_07_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_08 0x00000c50 +#define BNX2_DMA_TAG_RAM_08_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_08_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_08_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_08_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_08_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_08_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_08_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_08_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_08_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_08_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_08_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_08_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_08_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_08_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_09 0x00000c54 +#define BNX2_DMA_TAG_RAM_09_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_09_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_09_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_09_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_09_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_09_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_09_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_09_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_09_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_09_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_09_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_09_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_09_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_09_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_10 0x00000c58 +#define BNX2_DMA_TAG_RAM_10_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_10_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_10_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_10_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_10_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_10_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_10_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_10_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_10_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_10_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_10_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_10_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_10_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_10_VALID (1L<<10) + +#define BNX2_DMA_TAG_RAM_11 0x00000c5c +#define BNX2_DMA_TAG_RAM_11_CHANNEL (0xfL<<0) +#define BNX2_DMA_TAG_RAM_11_MASTER (0x7L<<4) +#define BNX2_DMA_TAG_RAM_11_MASTER_CTX (0L<<4) +#define BNX2_DMA_TAG_RAM_11_MASTER_RBDC (1L<<4) +#define BNX2_DMA_TAG_RAM_11_MASTER_TBDC (2L<<4) +#define BNX2_DMA_TAG_RAM_11_MASTER_COM (3L<<4) +#define BNX2_DMA_TAG_RAM_11_MASTER_CP (4L<<4) +#define BNX2_DMA_TAG_RAM_11_MASTER_TDMA (5L<<4) +#define BNX2_DMA_TAG_RAM_11_SWAP (0x3L<<7) +#define BNX2_DMA_TAG_RAM_11_SWAP_CONFIG (0L<<7) +#define BNX2_DMA_TAG_RAM_11_SWAP_DATA (1L<<7) +#define BNX2_DMA_TAG_RAM_11_SWAP_CONTROL (2L<<7) +#define BNX2_DMA_TAG_RAM_11_FUNCTION (1L<<9) +#define BNX2_DMA_TAG_RAM_11_VALID (1L<<10) + +#define BNX2_DMA_RCHAN_STAT_22 0x00000c60 +#define BNX2_DMA_RCHAN_STAT_30 0x00000c64 +#define BNX2_DMA_RCHAN_STAT_31 0x00000c68 +#define BNX2_DMA_RCHAN_STAT_32 0x00000c6c +#define BNX2_DMA_RCHAN_STAT_40 0x00000c70 +#define BNX2_DMA_RCHAN_STAT_41 0x00000c74 +#define BNX2_DMA_RCHAN_STAT_42 0x00000c78 +#define BNX2_DMA_RCHAN_STAT_50 0x00000c7c +#define BNX2_DMA_RCHAN_STAT_51 0x00000c80 +#define BNX2_DMA_RCHAN_STAT_52 0x00000c84 +#define BNX2_DMA_RCHAN_STAT_60 0x00000c88 +#define BNX2_DMA_RCHAN_STAT_61 0x00000c8c +#define BNX2_DMA_RCHAN_STAT_62 0x00000c90 +#define BNX2_DMA_RCHAN_STAT_70 0x00000c94 +#define BNX2_DMA_RCHAN_STAT_71 0x00000c98 +#define BNX2_DMA_RCHAN_STAT_72 0x00000c9c +#define BNX2_DMA_WCHAN_STAT_00 0x00000ca0 +#define BNX2_DMA_WCHAN_STAT_00_WCHAN_STA_HOST_ADDR_LOW (0xffffffffL<<0) + +#define BNX2_DMA_WCHAN_STAT_01 0x00000ca4 +#define BNX2_DMA_WCHAN_STAT_01_WCHAN_STA_HOST_ADDR_HIGH (0xffffffffL<<0) + +#define BNX2_DMA_WCHAN_STAT_02 0x00000ca8 +#define BNX2_DMA_WCHAN_STAT_02_LENGTH (0xffffL<<0) +#define BNX2_DMA_WCHAN_STAT_02_WORD_SWAP (1L<<16) +#define BNX2_DMA_WCHAN_STAT_02_BYTE_SWAP (1L<<17) +#define BNX2_DMA_WCHAN_STAT_02_PRIORITY_LVL (1L<<18) + +#define BNX2_DMA_WCHAN_STAT_10 0x00000cac +#define BNX2_DMA_WCHAN_STAT_11 0x00000cb0 +#define BNX2_DMA_WCHAN_STAT_12 0x00000cb4 +#define BNX2_DMA_WCHAN_STAT_20 0x00000cb8 +#define BNX2_DMA_WCHAN_STAT_21 0x00000cbc +#define BNX2_DMA_WCHAN_STAT_22 0x00000cc0 +#define BNX2_DMA_WCHAN_STAT_30 0x00000cc4 +#define BNX2_DMA_WCHAN_STAT_31 0x00000cc8 +#define BNX2_DMA_WCHAN_STAT_32 0x00000ccc +#define BNX2_DMA_WCHAN_STAT_40 0x00000cd0 +#define BNX2_DMA_WCHAN_STAT_41 0x00000cd4 +#define BNX2_DMA_WCHAN_STAT_42 0x00000cd8 +#define BNX2_DMA_WCHAN_STAT_50 0x00000cdc +#define BNX2_DMA_WCHAN_STAT_51 0x00000ce0 +#define BNX2_DMA_WCHAN_STAT_52 0x00000ce4 +#define BNX2_DMA_WCHAN_STAT_60 0x00000ce8 +#define BNX2_DMA_WCHAN_STAT_61 0x00000cec +#define BNX2_DMA_WCHAN_STAT_62 0x00000cf0 +#define BNX2_DMA_WCHAN_STAT_70 0x00000cf4 +#define BNX2_DMA_WCHAN_STAT_71 0x00000cf8 +#define BNX2_DMA_WCHAN_STAT_72 0x00000cfc +#define BNX2_DMA_ARB_STAT_00 0x00000d00 +#define BNX2_DMA_ARB_STAT_00_MASTER (0xffffL<<0) +#define BNX2_DMA_ARB_STAT_00_MASTER_ENC (0xffL<<16) +#define BNX2_DMA_ARB_STAT_00_CUR_BINMSTR (0xffL<<24) + +#define BNX2_DMA_ARB_STAT_01 0x00000d04 +#define BNX2_DMA_ARB_STAT_01_LPR_RPTR (0xfL<<0) +#define BNX2_DMA_ARB_STAT_01_LPR_WPTR (0xfL<<4) +#define BNX2_DMA_ARB_STAT_01_LPB_RPTR (0xfL<<8) +#define BNX2_DMA_ARB_STAT_01_LPB_WPTR (0xfL<<12) +#define BNX2_DMA_ARB_STAT_01_HPR_RPTR (0xfL<<16) +#define BNX2_DMA_ARB_STAT_01_HPR_WPTR (0xfL<<20) +#define BNX2_DMA_ARB_STAT_01_HPB_RPTR (0xfL<<24) +#define BNX2_DMA_ARB_STAT_01_HPB_WPTR (0xfL<<28) + +#define BNX2_DMA_FUSE_CTRL0_CMD 0x00000f00 +#define BNX2_DMA_FUSE_CTRL0_CMD_PWRUP_DONE (1L<<0) +#define BNX2_DMA_FUSE_CTRL0_CMD_SHIFT_DONE (1L<<1) +#define BNX2_DMA_FUSE_CTRL0_CMD_SHIFT (1L<<2) +#define BNX2_DMA_FUSE_CTRL0_CMD_LOAD (1L<<3) +#define BNX2_DMA_FUSE_CTRL0_CMD_SEL (0xfL<<8) + +#define BNX2_DMA_FUSE_CTRL0_DATA 0x00000f04 +#define BNX2_DMA_FUSE_CTRL1_CMD 0x00000f08 +#define BNX2_DMA_FUSE_CTRL1_CMD_PWRUP_DONE (1L<<0) +#define BNX2_DMA_FUSE_CTRL1_CMD_SHIFT_DONE (1L<<1) +#define BNX2_DMA_FUSE_CTRL1_CMD_SHIFT (1L<<2) +#define BNX2_DMA_FUSE_CTRL1_CMD_LOAD (1L<<3) +#define BNX2_DMA_FUSE_CTRL1_CMD_SEL (0xfL<<8) + +#define BNX2_DMA_FUSE_CTRL1_DATA 0x00000f0c +#define BNX2_DMA_FUSE_CTRL2_CMD 0x00000f10 +#define BNX2_DMA_FUSE_CTRL2_CMD_PWRUP_DONE (1L<<0) +#define BNX2_DMA_FUSE_CTRL2_CMD_SHIFT_DONE (1L<<1) +#define BNX2_DMA_FUSE_CTRL2_CMD_SHIFT (1L<<2) +#define BNX2_DMA_FUSE_CTRL2_CMD_LOAD (1L<<3) +#define BNX2_DMA_FUSE_CTRL2_CMD_SEL (0xfL<<8) + +#define BNX2_DMA_FUSE_CTRL2_DATA 0x00000f14 + + +/* + * context_reg definition + * offset: 0x1000 + */ +#define BNX2_CTX_COMMAND 0x00001000 +#define BNX2_CTX_COMMAND_ENABLED (1L<<0) +#define BNX2_CTX_COMMAND_DISABLE_USAGE_CNT (1L<<1) +#define BNX2_CTX_COMMAND_DISABLE_PLRU (1L<<2) +#define BNX2_CTX_COMMAND_DISABLE_COMBINE_READ (1L<<3) +#define BNX2_CTX_COMMAND_FLUSH_AHEAD (0x1fL<<8) +#define BNX2_CTX_COMMAND_MEM_INIT (1L<<13) +#define BNX2_CTX_COMMAND_PAGE_SIZE (0xfL<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_256 (0L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_512 (1L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_1K (2L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_2K (3L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_4K (4L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_8K (5L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_16K (6L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_32K (7L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_64K (8L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_128K (9L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_256K (10L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_512K (11L<<16) +#define BNX2_CTX_COMMAND_PAGE_SIZE_1M (12L<<16) + +#define BNX2_CTX_STATUS 0x00001004 +#define BNX2_CTX_STATUS_LOCK_WAIT (1L<<0) +#define BNX2_CTX_STATUS_READ_STAT (1L<<16) +#define BNX2_CTX_STATUS_WRITE_STAT (1L<<17) +#define BNX2_CTX_STATUS_ACC_STALL_STAT (1L<<18) +#define BNX2_CTX_STATUS_LOCK_STALL_STAT (1L<<19) +#define BNX2_CTX_STATUS_EXT_READ_STAT (1L<<20) +#define BNX2_CTX_STATUS_EXT_WRITE_STAT (1L<<21) +#define BNX2_CTX_STATUS_MISS_STAT (1L<<22) +#define BNX2_CTX_STATUS_HIT_STAT (1L<<23) +#define BNX2_CTX_STATUS_DEAD_LOCK (1L<<24) +#define BNX2_CTX_STATUS_USAGE_CNT_ERR (1L<<25) +#define BNX2_CTX_STATUS_INVALID_PAGE (1L<<26) + +#define BNX2_CTX_VIRT_ADDR 0x00001008 +#define BNX2_CTX_VIRT_ADDR_VIRT_ADDR (0x7fffL<<6) + +#define BNX2_CTX_PAGE_TBL 0x0000100c +#define BNX2_CTX_PAGE_TBL_PAGE_TBL (0x3fffL<<6) + +#define BNX2_CTX_DATA_ADR 0x00001010 +#define BNX2_CTX_DATA_ADR_DATA_ADR (0x7ffffL<<2) + +#define BNX2_CTX_DATA 0x00001014 +#define BNX2_CTX_LOCK 0x00001018 +#define BNX2_CTX_LOCK_TYPE (0x7L<<0) +#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_VOID (0x0L<<0) +#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_PROTOCOL (0x1L<<0) +#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_TX (0x2L<<0) +#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_TIMER (0x4L<<0) +#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_COMPLETE (0x7L<<0) +#define BNX2_CTX_LOCK_TYPE_VOID_XI (0L<<0) +#define BNX2_CTX_LOCK_TYPE_PROTOCOL_XI (1L<<0) +#define BNX2_CTX_LOCK_TYPE_TX_XI (2L<<0) +#define BNX2_CTX_LOCK_TYPE_TIMER_XI (4L<<0) +#define BNX2_CTX_LOCK_TYPE_COMPLETE_XI (7L<<0) +#define BNX2_CTX_LOCK_CID_VALUE (0x3fffL<<7) +#define BNX2_CTX_LOCK_GRANTED (1L<<26) +#define BNX2_CTX_LOCK_MODE (0x7L<<27) +#define BNX2_CTX_LOCK_MODE_UNLOCK (0x0L<<27) +#define BNX2_CTX_LOCK_MODE_IMMEDIATE (0x1L<<27) +#define BNX2_CTX_LOCK_MODE_SURE (0x2L<<27) +#define BNX2_CTX_LOCK_STATUS (1L<<30) +#define BNX2_CTX_LOCK_REQ (1L<<31) + +#define BNX2_CTX_CTX_CTRL 0x0000101c +#define BNX2_CTX_CTX_CTRL_CTX_ADDR (0x7ffffL<<2) +#define BNX2_CTX_CTX_CTRL_MOD_USAGE_CNT (0x3L<<21) +#define BNX2_CTX_CTX_CTRL_NO_RAM_ACC (1L<<23) +#define BNX2_CTX_CTX_CTRL_PREFETCH_SIZE (0x3L<<24) +#define BNX2_CTX_CTX_CTRL_ATTR (1L<<26) +#define BNX2_CTX_CTX_CTRL_WRITE_REQ (1L<<30) +#define BNX2_CTX_CTX_CTRL_READ_REQ (1L<<31) + +#define BNX2_CTX_CTX_DATA 0x00001020 +#define BNX2_CTX_ACCESS_STATUS 0x00001040 +#define BNX2_CTX_ACCESS_STATUS_MASTERENCODED (0xfL<<0) +#define BNX2_CTX_ACCESS_STATUS_ACCESSMEMORYSM (0x3L<<10) +#define BNX2_CTX_ACCESS_STATUS_PAGETABLEINITSM (0x3L<<12) +#define BNX2_CTX_ACCESS_STATUS_ACCESSMEMORYINITSM (0x3L<<14) +#define BNX2_CTX_ACCESS_STATUS_QUALIFIED_REQUEST (0x7ffL<<17) +#define BNX2_CTX_ACCESS_STATUS_CAMMASTERENCODED_XI (0x1fL<<0) +#define BNX2_CTX_ACCESS_STATUS_CACHEMASTERENCODED_XI (0x1fL<<5) +#define BNX2_CTX_ACCESS_STATUS_REQUEST_XI (0x3fffffL<<10) + +#define BNX2_CTX_DBG_LOCK_STATUS 0x00001044 +#define BNX2_CTX_DBG_LOCK_STATUS_SM (0x3ffL<<0) +#define BNX2_CTX_DBG_LOCK_STATUS_MATCH (0x3ffL<<22) + +#define BNX2_CTX_CACHE_CTRL_STATUS 0x00001048 +#define BNX2_CTX_CACHE_CTRL_STATUS_RFIFO_OVERFLOW (1L<<0) +#define BNX2_CTX_CACHE_CTRL_STATUS_INVALID_READ_COMP (1L<<1) +#define BNX2_CTX_CACHE_CTRL_STATUS_FLUSH_START (1L<<6) +#define BNX2_CTX_CACHE_CTRL_STATUS_FREE_ENTRY_CNT (0x3fL<<7) +#define BNX2_CTX_CACHE_CTRL_STATUS_CACHE_ENTRY_NEEDED (0x3fL<<13) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN0_ACTIVE (1L<<19) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN1_ACTIVE (1L<<20) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN2_ACTIVE (1L<<21) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN3_ACTIVE (1L<<22) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN4_ACTIVE (1L<<23) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN5_ACTIVE (1L<<24) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN6_ACTIVE (1L<<25) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN7_ACTIVE (1L<<26) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN8_ACTIVE (1L<<27) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN9_ACTIVE (1L<<28) +#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN10_ACTIVE (1L<<29) + +#define BNX2_CTX_CACHE_CTRL_SM_STATUS 0x0000104c +#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_DWC (0x7L<<0) +#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_WFIFOC (0x7L<<3) +#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_RTAGC (0x7L<<6) +#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_RFIFOC (0x7L<<9) +#define BNX2_CTX_CACHE_CTRL_SM_STATUS_INVALID_BLK_ADDR (0x7fffL<<16) + +#define BNX2_CTX_CACHE_STATUS 0x00001050 +#define BNX2_CTX_CACHE_STATUS_HELD_ENTRIES (0x3ffL<<0) +#define BNX2_CTX_CACHE_STATUS_MAX_HELD_ENTRIES (0x3ffL<<16) + +#define BNX2_CTX_DMA_STATUS 0x00001054 +#define BNX2_CTX_DMA_STATUS_RD_CHAN0_STATUS (0x3L<<0) +#define BNX2_CTX_DMA_STATUS_RD_CHAN1_STATUS (0x3L<<2) +#define BNX2_CTX_DMA_STATUS_RD_CHAN2_STATUS (0x3L<<4) +#define BNX2_CTX_DMA_STATUS_RD_CHAN3_STATUS (0x3L<<6) +#define BNX2_CTX_DMA_STATUS_RD_CHAN4_STATUS (0x3L<<8) +#define BNX2_CTX_DMA_STATUS_RD_CHAN5_STATUS (0x3L<<10) +#define BNX2_CTX_DMA_STATUS_RD_CHAN6_STATUS (0x3L<<12) +#define BNX2_CTX_DMA_STATUS_RD_CHAN7_STATUS (0x3L<<14) +#define BNX2_CTX_DMA_STATUS_RD_CHAN8_STATUS (0x3L<<16) +#define BNX2_CTX_DMA_STATUS_RD_CHAN9_STATUS (0x3L<<18) +#define BNX2_CTX_DMA_STATUS_RD_CHAN10_STATUS (0x3L<<20) + +#define BNX2_CTX_REP_STATUS 0x00001058 +#define BNX2_CTX_REP_STATUS_ERROR_ENTRY (0x3ffL<<0) +#define BNX2_CTX_REP_STATUS_ERROR_CLIENT_ID (0x1fL<<10) +#define BNX2_CTX_REP_STATUS_USAGE_CNT_MAX_ERR (1L<<16) +#define BNX2_CTX_REP_STATUS_USAGE_CNT_MIN_ERR (1L<<17) +#define BNX2_CTX_REP_STATUS_USAGE_CNT_MISS_ERR (1L<<18) + +#define BNX2_CTX_CKSUM_ERROR_STATUS 0x0000105c +#define BNX2_CTX_CKSUM_ERROR_STATUS_CALCULATED (0xffffL<<0) +#define BNX2_CTX_CKSUM_ERROR_STATUS_EXPECTED (0xffffL<<16) + +#define BNX2_CTX_CHNL_LOCK_STATUS_0 0x00001080 +#define BNX2_CTX_CHNL_LOCK_STATUS_0_CID (0x3fffL<<0) +#define BNX2_CTX_CHNL_LOCK_STATUS_0_TYPE (0x3L<<14) +#define BNX2_CTX_CHNL_LOCK_STATUS_0_MODE (1L<<16) +#define BNX2_CTX_CHNL_LOCK_STATUS_0_MODE_XI (1L<<14) +#define BNX2_CTX_CHNL_LOCK_STATUS_0_TYPE_XI (0x7L<<15) + +#define BNX2_CTX_CHNL_LOCK_STATUS_1 0x00001084 +#define BNX2_CTX_CHNL_LOCK_STATUS_2 0x00001088 +#define BNX2_CTX_CHNL_LOCK_STATUS_3 0x0000108c +#define BNX2_CTX_CHNL_LOCK_STATUS_4 0x00001090 +#define BNX2_CTX_CHNL_LOCK_STATUS_5 0x00001094 +#define BNX2_CTX_CHNL_LOCK_STATUS_6 0x00001098 +#define BNX2_CTX_CHNL_LOCK_STATUS_7 0x0000109c +#define BNX2_CTX_CHNL_LOCK_STATUS_8 0x000010a0 +#define BNX2_CTX_CHNL_LOCK_STATUS_9 0x000010a4 + +#define BNX2_CTX_CACHE_DATA 0x000010c4 +#define BNX2_CTX_HOST_PAGE_TBL_CTRL 0x000010c8 +#define BNX2_CTX_HOST_PAGE_TBL_CTRL_PAGE_TBL_ADDR (0x1ffL<<0) +#define BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ (1L<<30) +#define BNX2_CTX_HOST_PAGE_TBL_CTRL_READ_REQ (1L<<31) + +#define BNX2_CTX_HOST_PAGE_TBL_DATA0 0x000010cc +#define BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID (1L<<0) +#define BNX2_CTX_HOST_PAGE_TBL_DATA0_VALUE (0xffffffL<<8) + +#define BNX2_CTX_HOST_PAGE_TBL_DATA1 0x000010d0 +#define BNX2_CTX_CAM_CTRL 0x000010d4 +#define BNX2_CTX_CAM_CTRL_CAM_ADDR (0x3ffL<<0) +#define BNX2_CTX_CAM_CTRL_RESET (1L<<27) +#define BNX2_CTX_CAM_CTRL_INVALIDATE (1L<<28) +#define BNX2_CTX_CAM_CTRL_SEARCH (1L<<29) +#define BNX2_CTX_CAM_CTRL_WRITE_REQ (1L<<30) +#define BNX2_CTX_CAM_CTRL_READ_REQ (1L<<31) + + +/* + * emac_reg definition + * offset: 0x1400 + */ +#define BNX2_EMAC_MODE 0x00001400 +#define BNX2_EMAC_MODE_RESET (1L<<0) +#define BNX2_EMAC_MODE_HALF_DUPLEX (1L<<1) +#define BNX2_EMAC_MODE_PORT (0x3L<<2) +#define BNX2_EMAC_MODE_PORT_NONE (0L<<2) +#define BNX2_EMAC_MODE_PORT_MII (1L<<2) +#define BNX2_EMAC_MODE_PORT_GMII (2L<<2) +#define BNX2_EMAC_MODE_PORT_MII_10M (3L<<2) +#define BNX2_EMAC_MODE_MAC_LOOP (1L<<4) +#define BNX2_EMAC_MODE_25G_MODE (1L<<5) +#define BNX2_EMAC_MODE_TAGGED_MAC_CTL (1L<<7) +#define BNX2_EMAC_MODE_TX_BURST (1L<<8) +#define BNX2_EMAC_MODE_MAX_DEFER_DROP_ENA (1L<<9) +#define BNX2_EMAC_MODE_EXT_LINK_POL (1L<<10) +#define BNX2_EMAC_MODE_FORCE_LINK (1L<<11) +#define BNX2_EMAC_MODE_SERDES_MODE (1L<<12) +#define BNX2_EMAC_MODE_BOND_OVRD (1L<<13) +#define BNX2_EMAC_MODE_MPKT (1L<<18) +#define BNX2_EMAC_MODE_MPKT_RCVD (1L<<19) +#define BNX2_EMAC_MODE_ACPI_RCVD (1L<<20) + +#define BNX2_EMAC_STATUS 0x00001404 +#define BNX2_EMAC_STATUS_LINK (1L<<11) +#define BNX2_EMAC_STATUS_LINK_CHANGE (1L<<12) +#define BNX2_EMAC_STATUS_SERDES_AUTONEG_COMPLETE (1L<<13) +#define BNX2_EMAC_STATUS_SERDES_AUTONEG_CHANGE (1L<<14) +#define BNX2_EMAC_STATUS_SERDES_NXT_PG_CHANGE (1L<<16) +#define BNX2_EMAC_STATUS_SERDES_RX_CONFIG_IS_0 (1L<<17) +#define BNX2_EMAC_STATUS_SERDES_RX_CONFIG_IS_0_CHANGE (1L<<18) +#define BNX2_EMAC_STATUS_MI_COMPLETE (1L<<22) +#define BNX2_EMAC_STATUS_MI_INT (1L<<23) +#define BNX2_EMAC_STATUS_AP_ERROR (1L<<24) +#define BNX2_EMAC_STATUS_PARITY_ERROR_STATE (1L<<31) + +#define BNX2_EMAC_ATTENTION_ENA 0x00001408 +#define BNX2_EMAC_ATTENTION_ENA_LINK (1L<<11) +#define BNX2_EMAC_ATTENTION_ENA_AUTONEG_CHANGE (1L<<14) +#define BNX2_EMAC_ATTENTION_ENA_NXT_PG_CHANGE (1L<<16) +#define BNX2_EMAC_ATTENTION_ENA_SERDES_RX_CONFIG_IS_0_CHANGE (1L<<18) +#define BNX2_EMAC_ATTENTION_ENA_MI_COMPLETE (1L<<22) +#define BNX2_EMAC_ATTENTION_ENA_MI_INT (1L<<23) +#define BNX2_EMAC_ATTENTION_ENA_AP_ERROR (1L<<24) + +#define BNX2_EMAC_LED 0x0000140c +#define BNX2_EMAC_LED_OVERRIDE (1L<<0) +#define BNX2_EMAC_LED_1000MB_OVERRIDE (1L<<1) +#define BNX2_EMAC_LED_100MB_OVERRIDE (1L<<2) +#define BNX2_EMAC_LED_10MB_OVERRIDE (1L<<3) +#define BNX2_EMAC_LED_TRAFFIC_OVERRIDE (1L<<4) +#define BNX2_EMAC_LED_BLNK_TRAFFIC (1L<<5) +#define BNX2_EMAC_LED_TRAFFIC (1L<<6) +#define BNX2_EMAC_LED_1000MB (1L<<7) +#define BNX2_EMAC_LED_100MB (1L<<8) +#define BNX2_EMAC_LED_10MB (1L<<9) +#define BNX2_EMAC_LED_TRAFFIC_STAT (1L<<10) +#define BNX2_EMAC_LED_2500MB (1L<<11) +#define BNX2_EMAC_LED_2500MB_OVERRIDE (1L<<12) +#define BNX2_EMAC_LED_ACTIVITY_SEL (0x3L<<17) +#define BNX2_EMAC_LED_ACTIVITY_SEL_0 (0L<<17) +#define BNX2_EMAC_LED_ACTIVITY_SEL_1 (1L<<17) +#define BNX2_EMAC_LED_ACTIVITY_SEL_2 (2L<<17) +#define BNX2_EMAC_LED_ACTIVITY_SEL_3 (3L<<17) +#define BNX2_EMAC_LED_BLNK_RATE (0xfffL<<19) +#define BNX2_EMAC_LED_BLNK_RATE_ENA (1L<<31) + +#define BNX2_EMAC_MAC_MATCH0 0x00001410 +#define BNX2_EMAC_MAC_MATCH1 0x00001414 +#define BNX2_EMAC_MAC_MATCH2 0x00001418 +#define BNX2_EMAC_MAC_MATCH3 0x0000141c +#define BNX2_EMAC_MAC_MATCH4 0x00001420 +#define BNX2_EMAC_MAC_MATCH5 0x00001424 +#define BNX2_EMAC_MAC_MATCH6 0x00001428 +#define BNX2_EMAC_MAC_MATCH7 0x0000142c +#define BNX2_EMAC_MAC_MATCH8 0x00001430 +#define BNX2_EMAC_MAC_MATCH9 0x00001434 +#define BNX2_EMAC_MAC_MATCH10 0x00001438 +#define BNX2_EMAC_MAC_MATCH11 0x0000143c +#define BNX2_EMAC_MAC_MATCH12 0x00001440 +#define BNX2_EMAC_MAC_MATCH13 0x00001444 +#define BNX2_EMAC_MAC_MATCH14 0x00001448 +#define BNX2_EMAC_MAC_MATCH15 0x0000144c +#define BNX2_EMAC_MAC_MATCH16 0x00001450 +#define BNX2_EMAC_MAC_MATCH17 0x00001454 +#define BNX2_EMAC_MAC_MATCH18 0x00001458 +#define BNX2_EMAC_MAC_MATCH19 0x0000145c +#define BNX2_EMAC_MAC_MATCH20 0x00001460 +#define BNX2_EMAC_MAC_MATCH21 0x00001464 +#define BNX2_EMAC_MAC_MATCH22 0x00001468 +#define BNX2_EMAC_MAC_MATCH23 0x0000146c +#define BNX2_EMAC_MAC_MATCH24 0x00001470 +#define BNX2_EMAC_MAC_MATCH25 0x00001474 +#define BNX2_EMAC_MAC_MATCH26 0x00001478 +#define BNX2_EMAC_MAC_MATCH27 0x0000147c +#define BNX2_EMAC_MAC_MATCH28 0x00001480 +#define BNX2_EMAC_MAC_MATCH29 0x00001484 +#define BNX2_EMAC_MAC_MATCH30 0x00001488 +#define BNX2_EMAC_MAC_MATCH31 0x0000148c +#define BNX2_EMAC_BACKOFF_SEED 0x00001498 +#define BNX2_EMAC_BACKOFF_SEED_EMAC_BACKOFF_SEED (0x3ffL<<0) + +#define BNX2_EMAC_RX_MTU_SIZE 0x0000149c +#define BNX2_EMAC_RX_MTU_SIZE_MTU_SIZE (0xffffL<<0) +#define BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31) + +#define BNX2_EMAC_SERDES_CNTL 0x000014a4 +#define BNX2_EMAC_SERDES_CNTL_RXR (0x7L<<0) +#define BNX2_EMAC_SERDES_CNTL_RXG (0x3L<<3) +#define BNX2_EMAC_SERDES_CNTL_RXCKSEL (1L<<6) +#define BNX2_EMAC_SERDES_CNTL_TXBIAS (0x7L<<7) +#define BNX2_EMAC_SERDES_CNTL_BGMAX (1L<<10) +#define BNX2_EMAC_SERDES_CNTL_BGMIN (1L<<11) +#define BNX2_EMAC_SERDES_CNTL_TXMODE (1L<<12) +#define BNX2_EMAC_SERDES_CNTL_TXEDGE (1L<<13) +#define BNX2_EMAC_SERDES_CNTL_SERDES_MODE (1L<<14) +#define BNX2_EMAC_SERDES_CNTL_PLLTEST (1L<<15) +#define BNX2_EMAC_SERDES_CNTL_CDET_EN (1L<<16) +#define BNX2_EMAC_SERDES_CNTL_TBI_LBK (1L<<17) +#define BNX2_EMAC_SERDES_CNTL_REMOTE_LBK (1L<<18) +#define BNX2_EMAC_SERDES_CNTL_REV_PHASE (1L<<19) +#define BNX2_EMAC_SERDES_CNTL_REGCTL12 (0x3L<<20) +#define BNX2_EMAC_SERDES_CNTL_REGCTL25 (0x3L<<22) + +#define BNX2_EMAC_SERDES_STATUS 0x000014a8 +#define BNX2_EMAC_SERDES_STATUS_RX_STAT (0xffL<<0) +#define BNX2_EMAC_SERDES_STATUS_COMMA_DET (1L<<8) + +#define BNX2_EMAC_MDIO_COMM 0x000014ac +#define BNX2_EMAC_MDIO_COMM_DATA (0xffffL<<0) +#define BNX2_EMAC_MDIO_COMM_REG_ADDR (0x1fL<<16) +#define BNX2_EMAC_MDIO_COMM_PHY_ADDR (0x1fL<<21) +#define BNX2_EMAC_MDIO_COMM_COMMAND (0x3L<<26) +#define BNX2_EMAC_MDIO_COMM_COMMAND_UNDEFINED_0 (0L<<26) +#define BNX2_EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26) +#define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE (1L<<26) +#define BNX2_EMAC_MDIO_COMM_COMMAND_READ (2L<<26) +#define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE_22_XI (1L<<26) +#define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE_45_XI (1L<<26) +#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_22_XI (2L<<26) +#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_INC_45_XI (2L<<26) +#define BNX2_EMAC_MDIO_COMM_COMMAND_UNDEFINED_3 (3L<<26) +#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26) +#define BNX2_EMAC_MDIO_COMM_FAIL (1L<<28) +#define BNX2_EMAC_MDIO_COMM_START_BUSY (1L<<29) +#define BNX2_EMAC_MDIO_COMM_DISEXT (1L<<30) + +#define BNX2_EMAC_MDIO_STATUS 0x000014b0 +#define BNX2_EMAC_MDIO_STATUS_LINK (1L<<0) +#define BNX2_EMAC_MDIO_STATUS_10MB (1L<<1) + +#define BNX2_EMAC_MDIO_MODE 0x000014b4 +#define BNX2_EMAC_MDIO_MODE_SHORT_PREAMBLE (1L<<1) +#define BNX2_EMAC_MDIO_MODE_AUTO_POLL (1L<<4) +#define BNX2_EMAC_MDIO_MODE_BIT_BANG (1L<<8) +#define BNX2_EMAC_MDIO_MODE_MDIO (1L<<9) +#define BNX2_EMAC_MDIO_MODE_MDIO_OE (1L<<10) +#define BNX2_EMAC_MDIO_MODE_MDC (1L<<11) +#define BNX2_EMAC_MDIO_MODE_MDINT (1L<<12) +#define BNX2_EMAC_MDIO_MODE_EXT_MDINT (1L<<13) +#define BNX2_EMAC_MDIO_MODE_CLOCK_CNT (0x1fL<<16) +#define BNX2_EMAC_MDIO_MODE_CLOCK_CNT_XI (0x3fL<<16) +#define BNX2_EMAC_MDIO_MODE_CLAUSE_45_XI (1L<<31) + +#define BNX2_EMAC_MDIO_AUTO_STATUS 0x000014b8 +#define BNX2_EMAC_MDIO_AUTO_STATUS_AUTO_ERR (1L<<0) + +#define BNX2_EMAC_TX_MODE 0x000014bc +#define BNX2_EMAC_TX_MODE_RESET (1L<<0) +#define BNX2_EMAC_TX_MODE_CS16_TEST (1L<<2) +#define BNX2_EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3) +#define BNX2_EMAC_TX_MODE_FLOW_EN (1L<<4) +#define BNX2_EMAC_TX_MODE_BIG_BACKOFF (1L<<5) +#define BNX2_EMAC_TX_MODE_LONG_PAUSE (1L<<6) +#define BNX2_EMAC_TX_MODE_LINK_AWARE (1L<<7) + +#define BNX2_EMAC_TX_STATUS 0x000014c0 +#define BNX2_EMAC_TX_STATUS_XOFFED (1L<<0) +#define BNX2_EMAC_TX_STATUS_XOFF_SENT (1L<<1) +#define BNX2_EMAC_TX_STATUS_XON_SENT (1L<<2) +#define BNX2_EMAC_TX_STATUS_LINK_UP (1L<<3) +#define BNX2_EMAC_TX_STATUS_UNDERRUN (1L<<4) +#define BNX2_EMAC_TX_STATUS_CS16_ERROR (1L<<5) + +#define BNX2_EMAC_TX_LENGTHS 0x000014c4 +#define BNX2_EMAC_TX_LENGTHS_SLOT (0xffL<<0) +#define BNX2_EMAC_TX_LENGTHS_IPG (0xfL<<8) +#define BNX2_EMAC_TX_LENGTHS_IPG_CRS (0x3L<<12) + +#define BNX2_EMAC_RX_MODE 0x000014c8 +#define BNX2_EMAC_RX_MODE_RESET (1L<<0) +#define BNX2_EMAC_RX_MODE_FLOW_EN (1L<<2) +#define BNX2_EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3) +#define BNX2_EMAC_RX_MODE_KEEP_PAUSE (1L<<4) +#define BNX2_EMAC_RX_MODE_ACCEPT_OVERSIZE (1L<<5) +#define BNX2_EMAC_RX_MODE_ACCEPT_RUNTS (1L<<6) +#define BNX2_EMAC_RX_MODE_LLC_CHK (1L<<7) +#define BNX2_EMAC_RX_MODE_PROMISCUOUS (1L<<8) +#define BNX2_EMAC_RX_MODE_NO_CRC_CHK (1L<<9) +#define BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10) +#define BNX2_EMAC_RX_MODE_FILT_BROADCAST (1L<<11) +#define BNX2_EMAC_RX_MODE_SORT_MODE (1L<<12) + +#define BNX2_EMAC_RX_STATUS 0x000014cc +#define BNX2_EMAC_RX_STATUS_FFED (1L<<0) +#define BNX2_EMAC_RX_STATUS_FF_RECEIVED (1L<<1) +#define BNX2_EMAC_RX_STATUS_N_RECEIVED (1L<<2) + +#define BNX2_EMAC_MULTICAST_HASH0 0x000014d0 +#define BNX2_EMAC_MULTICAST_HASH1 0x000014d4 +#define BNX2_EMAC_MULTICAST_HASH2 0x000014d8 +#define BNX2_EMAC_MULTICAST_HASH3 0x000014dc +#define BNX2_EMAC_MULTICAST_HASH4 0x000014e0 +#define BNX2_EMAC_MULTICAST_HASH5 0x000014e4 +#define BNX2_EMAC_MULTICAST_HASH6 0x000014e8 +#define BNX2_EMAC_MULTICAST_HASH7 0x000014ec +#define BNX2_EMAC_CKSUM_ERROR_STATUS 0x000014f0 +#define BNX2_EMAC_CKSUM_ERROR_STATUS_CALCULATED (0xffffL<<0) +#define BNX2_EMAC_CKSUM_ERROR_STATUS_EXPECTED (0xffffL<<16) + +#define BNX2_EMAC_RX_STAT_IFHCINOCTETS 0x00001500 +#define BNX2_EMAC_RX_STAT_IFHCINBADOCTETS 0x00001504 +#define BNX2_EMAC_RX_STAT_ETHERSTATSFRAGMENTS 0x00001508 +#define BNX2_EMAC_RX_STAT_IFHCINUCASTPKTS 0x0000150c +#define BNX2_EMAC_RX_STAT_IFHCINMULTICASTPKTS 0x00001510 +#define BNX2_EMAC_RX_STAT_IFHCINBROADCASTPKTS 0x00001514 +#define BNX2_EMAC_RX_STAT_DOT3STATSFCSERRORS 0x00001518 +#define BNX2_EMAC_RX_STAT_DOT3STATSALIGNMENTERRORS 0x0000151c +#define BNX2_EMAC_RX_STAT_DOT3STATSCARRIERSENSEERRORS 0x00001520 +#define BNX2_EMAC_RX_STAT_XONPAUSEFRAMESRECEIVED 0x00001524 +#define BNX2_EMAC_RX_STAT_XOFFPAUSEFRAMESRECEIVED 0x00001528 +#define BNX2_EMAC_RX_STAT_MACCONTROLFRAMESRECEIVED 0x0000152c +#define BNX2_EMAC_RX_STAT_XOFFSTATEENTERED 0x00001530 +#define BNX2_EMAC_RX_STAT_DOT3STATSFRAMESTOOLONG 0x00001534 +#define BNX2_EMAC_RX_STAT_ETHERSTATSJABBERS 0x00001538 +#define BNX2_EMAC_RX_STAT_ETHERSTATSUNDERSIZEPKTS 0x0000153c +#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS64OCTETS 0x00001540 +#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS65OCTETSTO127OCTETS 0x00001544 +#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS128OCTETSTO255OCTETS 0x00001548 +#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS256OCTETSTO511OCTETS 0x0000154c +#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS512OCTETSTO1023OCTETS 0x00001550 +#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS1024OCTETSTO1522OCTETS 0x00001554 +#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTSOVER1522OCTETS 0x00001558 +#define BNX2_EMAC_RXMAC_DEBUG0 0x0000155c +#define BNX2_EMAC_RXMAC_DEBUG1 0x00001560 +#define BNX2_EMAC_RXMAC_DEBUG1_LENGTH_NE_BYTE_COUNT (1L<<0) +#define BNX2_EMAC_RXMAC_DEBUG1_LENGTH_OUT_RANGE (1L<<1) +#define BNX2_EMAC_RXMAC_DEBUG1_BAD_CRC (1L<<2) +#define BNX2_EMAC_RXMAC_DEBUG1_RX_ERROR (1L<<3) +#define BNX2_EMAC_RXMAC_DEBUG1_ALIGN_ERROR (1L<<4) +#define BNX2_EMAC_RXMAC_DEBUG1_LAST_DATA (1L<<5) +#define BNX2_EMAC_RXMAC_DEBUG1_ODD_BYTE_START (1L<<6) +#define BNX2_EMAC_RXMAC_DEBUG1_BYTE_COUNT (0xffffL<<7) +#define BNX2_EMAC_RXMAC_DEBUG1_SLOT_TIME (0xffL<<23) + +#define BNX2_EMAC_RXMAC_DEBUG2 0x00001564 +#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE (0x7L<<0) +#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_IDLE (0x0L<<0) +#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_SFD (0x1L<<0) +#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_DATA (0x2L<<0) +#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_SKEEP (0x3L<<0) +#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_EXT (0x4L<<0) +#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_DROP (0x5L<<0) +#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_SDROP (0x6L<<0) +#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_FC (0x7L<<0) +#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE (0xfL<<3) +#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_IDLE (0x0L<<3) +#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA0 (0x1L<<3) +#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA1 (0x2L<<3) +#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA2 (0x3L<<3) +#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA3 (0x4L<<3) +#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_ABORT (0x5L<<3) +#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_WAIT (0x6L<<3) +#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_STATUS (0x7L<<3) +#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_LAST (0x8L<<3) +#define BNX2_EMAC_RXMAC_DEBUG2_BYTE_IN (0xffL<<7) +#define BNX2_EMAC_RXMAC_DEBUG2_FALSEC (1L<<15) +#define BNX2_EMAC_RXMAC_DEBUG2_TAGGED (1L<<16) +#define BNX2_EMAC_RXMAC_DEBUG2_PAUSE_STATE (1L<<18) +#define BNX2_EMAC_RXMAC_DEBUG2_PAUSE_STATE_IDLE (0L<<18) +#define BNX2_EMAC_RXMAC_DEBUG2_PAUSE_STATE_PAUSED (1L<<18) +#define BNX2_EMAC_RXMAC_DEBUG2_SE_COUNTER (0xfL<<19) +#define BNX2_EMAC_RXMAC_DEBUG2_QUANTA (0x1fL<<23) + +#define BNX2_EMAC_RXMAC_DEBUG3 0x00001568 +#define BNX2_EMAC_RXMAC_DEBUG3_PAUSE_CTR (0xffffL<<0) +#define BNX2_EMAC_RXMAC_DEBUG3_TMP_PAUSE_CTR (0xffffL<<16) + +#define BNX2_EMAC_RXMAC_DEBUG4 0x0000156c +#define BNX2_EMAC_RXMAC_DEBUG4_TYPE_FIELD (0xffffL<<0) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE (0x3fL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_IDLE (0x0L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UMAC2 (0x1L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UMAC3 (0x2L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UNI (0x3L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MMAC3 (0x5L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA1 (0x6L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MMAC2 (0x7L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA2 (0x7L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA3 (0x8L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MC2 (0x9L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MC3 (0xaL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MWAIT1 (0xeL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MWAIT2 (0xfL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MCHECK (0x10L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MC (0x11L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BC2 (0x12L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BC3 (0x13L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BSA1 (0x14L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BSA2 (0x15L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BSA3 (0x16L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BTYPE (0x17L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BC (0x18L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PTYPE (0x19L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_CMD (0x1aL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MAC (0x1bL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_LATCH (0x1cL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_XOFF (0x1dL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_XON (0x1eL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PAUSED (0x1fL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_NPAUSED (0x20L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_TTYPE (0x21L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_TVAL (0x22L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_USA1 (0x23L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_USA2 (0x24L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_USA3 (0x25L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UTYPE (0x26L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UTTYPE (0x27L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UTVAL (0x28L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MTYPE (0x29L<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_DROP (0x2aL<<16) +#define BNX2_EMAC_RXMAC_DEBUG4_DROP_PKT (1L<<22) +#define BNX2_EMAC_RXMAC_DEBUG4_SLOT_FILLED (1L<<23) +#define BNX2_EMAC_RXMAC_DEBUG4_FALSE_CARRIER (1L<<24) +#define BNX2_EMAC_RXMAC_DEBUG4_LAST_DATA (1L<<25) +#define BNX2_EMAC_RXMAC_DEBUG4_SFD_FOUND (1L<<26) +#define BNX2_EMAC_RXMAC_DEBUG4_ADVANCE (1L<<27) +#define BNX2_EMAC_RXMAC_DEBUG4_START (1L<<28) + +#define BNX2_EMAC_RXMAC_DEBUG5 0x00001570 +#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM (0x7L<<0) +#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_IDLE (0L<<0) +#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_WAIT_EOF (1L<<0) +#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_WAIT_STAT (2L<<0) +#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_SET_EOF4FCRC (3L<<0) +#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_SET_EOF4RDE (4L<<0) +#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_SET_EOF4ALL (5L<<0) +#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_1WD_WAIT_STAT (6L<<0) +#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1 (0x7L<<4) +#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_VDW (0x0L<<4) +#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_STAT (0x1L<<4) +#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_AEOF (0x2L<<4) +#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_NEOF (0x3L<<4) +#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_SOF (0x4L<<4) +#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_SAEOF (0x6L<<4) +#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_SNEOF (0x7L<<4) +#define BNX2_EMAC_RXMAC_DEBUG5_EOF_DETECTED (1L<<7) +#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF0 (0x7L<<8) +#define BNX2_EMAC_RXMAC_DEBUG5_RPM_IDI_FIFO_FULL (1L<<11) +#define BNX2_EMAC_RXMAC_DEBUG5_LOAD_CCODE (1L<<12) +#define BNX2_EMAC_RXMAC_DEBUG5_LOAD_DATA (1L<<13) +#define BNX2_EMAC_RXMAC_DEBUG5_LOAD_STAT (1L<<14) +#define BNX2_EMAC_RXMAC_DEBUG5_CLR_STAT (1L<<15) +#define BNX2_EMAC_RXMAC_DEBUG5_IDI_RPM_CCODE (0x3L<<16) +#define BNX2_EMAC_RXMAC_DEBUG5_IDI_RPM_ACCEPT (1L<<19) +#define BNX2_EMAC_RXMAC_DEBUG5_FMLEN (0xfffL<<20) + +#define BNX2_EMAC_RX_STAT_FALSECARRIERERRORS 0x00001574 +#define BNX2_EMAC_RX_STAT_AC0 0x00001580 +#define BNX2_EMAC_RX_STAT_AC1 0x00001584 +#define BNX2_EMAC_RX_STAT_AC2 0x00001588 +#define BNX2_EMAC_RX_STAT_AC3 0x0000158c +#define BNX2_EMAC_RX_STAT_AC4 0x00001590 +#define BNX2_EMAC_RX_STAT_AC5 0x00001594 +#define BNX2_EMAC_RX_STAT_AC6 0x00001598 +#define BNX2_EMAC_RX_STAT_AC7 0x0000159c +#define BNX2_EMAC_RX_STAT_AC8 0x000015a0 +#define BNX2_EMAC_RX_STAT_AC9 0x000015a4 +#define BNX2_EMAC_RX_STAT_AC10 0x000015a8 +#define BNX2_EMAC_RX_STAT_AC11 0x000015ac +#define BNX2_EMAC_RX_STAT_AC12 0x000015b0 +#define BNX2_EMAC_RX_STAT_AC13 0x000015b4 +#define BNX2_EMAC_RX_STAT_AC14 0x000015b8 +#define BNX2_EMAC_RX_STAT_AC15 0x000015bc +#define BNX2_EMAC_RX_STAT_AC16 0x000015c0 +#define BNX2_EMAC_RX_STAT_AC17 0x000015c4 +#define BNX2_EMAC_RX_STAT_AC18 0x000015c8 +#define BNX2_EMAC_RX_STAT_AC19 0x000015cc +#define BNX2_EMAC_RX_STAT_AC20 0x000015d0 +#define BNX2_EMAC_RX_STAT_AC21 0x000015d4 +#define BNX2_EMAC_RX_STAT_AC22 0x000015d8 +#define BNX2_EMAC_RXMAC_SUC_DBG_OVERRUNVEC 0x000015dc +#define BNX2_EMAC_RX_STAT_AC_28 0x000015f4 +#define BNX2_EMAC_TX_STAT_IFHCOUTOCTETS 0x00001600 +#define BNX2_EMAC_TX_STAT_IFHCOUTBADOCTETS 0x00001604 +#define BNX2_EMAC_TX_STAT_ETHERSTATSCOLLISIONS 0x00001608 +#define BNX2_EMAC_TX_STAT_OUTXONSENT 0x0000160c +#define BNX2_EMAC_TX_STAT_OUTXOFFSENT 0x00001610 +#define BNX2_EMAC_TX_STAT_FLOWCONTROLDONE 0x00001614 +#define BNX2_EMAC_TX_STAT_DOT3STATSSINGLECOLLISIONFRAMES 0x00001618 +#define BNX2_EMAC_TX_STAT_DOT3STATSMULTIPLECOLLISIONFRAMES 0x0000161c +#define BNX2_EMAC_TX_STAT_DOT3STATSDEFERREDTRANSMISSIONS 0x00001620 +#define BNX2_EMAC_TX_STAT_DOT3STATSEXCESSIVECOLLISIONS 0x00001624 +#define BNX2_EMAC_TX_STAT_DOT3STATSLATECOLLISIONS 0x00001628 +#define BNX2_EMAC_TX_STAT_IFHCOUTUCASTPKTS 0x0000162c +#define BNX2_EMAC_TX_STAT_IFHCOUTMULTICASTPKTS 0x00001630 +#define BNX2_EMAC_TX_STAT_IFHCOUTBROADCASTPKTS 0x00001634 +#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS64OCTETS 0x00001638 +#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS65OCTETSTO127OCTETS 0x0000163c +#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS128OCTETSTO255OCTETS 0x00001640 +#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS256OCTETSTO511OCTETS 0x00001644 +#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS512OCTETSTO1023OCTETS 0x00001648 +#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS1024OCTETSTO1522OCTETS 0x0000164c +#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTSOVER1522OCTETS 0x00001650 +#define BNX2_EMAC_TX_STAT_DOT3STATSINTERNALMACTRANSMITERRORS 0x00001654 +#define BNX2_EMAC_TXMAC_DEBUG0 0x00001658 +#define BNX2_EMAC_TXMAC_DEBUG1 0x0000165c +#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE (0xfL<<0) +#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_IDLE (0x0L<<0) +#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_START0 (0x1L<<0) +#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA0 (0x4L<<0) +#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA1 (0x5L<<0) +#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA2 (0x6L<<0) +#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA3 (0x7L<<0) +#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_WAIT0 (0x8L<<0) +#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_WAIT1 (0x9L<<0) +#define BNX2_EMAC_TXMAC_DEBUG1_CRS_ENABLE (1L<<4) +#define BNX2_EMAC_TXMAC_DEBUG1_BAD_CRC (1L<<5) +#define BNX2_EMAC_TXMAC_DEBUG1_SE_COUNTER (0xfL<<6) +#define BNX2_EMAC_TXMAC_DEBUG1_SEND_PAUSE (1L<<10) +#define BNX2_EMAC_TXMAC_DEBUG1_LATE_COLLISION (1L<<11) +#define BNX2_EMAC_TXMAC_DEBUG1_MAX_DEFER (1L<<12) +#define BNX2_EMAC_TXMAC_DEBUG1_DEFERRED (1L<<13) +#define BNX2_EMAC_TXMAC_DEBUG1_ONE_BYTE (1L<<14) +#define BNX2_EMAC_TXMAC_DEBUG1_IPG_TIME (0xfL<<15) +#define BNX2_EMAC_TXMAC_DEBUG1_SLOT_TIME (0xffL<<19) + +#define BNX2_EMAC_TXMAC_DEBUG2 0x00001660 +#define BNX2_EMAC_TXMAC_DEBUG2_BACK_OFF (0x3ffL<<0) +#define BNX2_EMAC_TXMAC_DEBUG2_BYTE_COUNT (0xffffL<<10) +#define BNX2_EMAC_TXMAC_DEBUG2_COL_COUNT (0x1fL<<26) +#define BNX2_EMAC_TXMAC_DEBUG2_COL_BIT (1L<<31) + +#define BNX2_EMAC_TXMAC_DEBUG3 0x00001664 +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE (0xfL<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_IDLE (0x0L<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_PRE1 (0x1L<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_PRE2 (0x2L<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_SFD (0x3L<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_DATA (0x4L<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_CRC1 (0x5L<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_CRC2 (0x6L<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_EXT (0x7L<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_STATB (0x8L<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_STATG (0x9L<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_JAM (0xaL<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_EJAM (0xbL<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_BJAM (0xcL<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_SWAIT (0xdL<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_BACKOFF (0xeL<<0) +#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE (0x7L<<4) +#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_IDLE (0x0L<<4) +#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_WAIT (0x1L<<4) +#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_UNI (0x2L<<4) +#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_MC (0x3L<<4) +#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_BC2 (0x4L<<4) +#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_BC3 (0x5L<<4) +#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_BC (0x6L<<4) +#define BNX2_EMAC_TXMAC_DEBUG3_CRS_DONE (1L<<7) +#define BNX2_EMAC_TXMAC_DEBUG3_XOFF (1L<<8) +#define BNX2_EMAC_TXMAC_DEBUG3_SE_COUNTER (0xfL<<9) +#define BNX2_EMAC_TXMAC_DEBUG3_QUANTA_COUNTER (0x1fL<<13) + +#define BNX2_EMAC_TXMAC_DEBUG4 0x00001668 +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_COUNTER (0xffffL<<0) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE (0xfL<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_IDLE (0x0L<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA1 (0x2L<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA2 (0x3L<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC3 (0x4L<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC2 (0x5L<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA3 (0x6L<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC1 (0x7L<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CRC1 (0x8L<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CRC2 (0x9L<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_TIME (0xaL<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_TYPE (0xcL<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_WAIT (0xdL<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CMD (0xeL<<16) +#define BNX2_EMAC_TXMAC_DEBUG4_STATS0_VALID (1L<<20) +#define BNX2_EMAC_TXMAC_DEBUG4_APPEND_CRC (1L<<21) +#define BNX2_EMAC_TXMAC_DEBUG4_SLOT_FILLED (1L<<22) +#define BNX2_EMAC_TXMAC_DEBUG4_MAX_DEFER (1L<<23) +#define BNX2_EMAC_TXMAC_DEBUG4_SEND_EXTEND (1L<<24) +#define BNX2_EMAC_TXMAC_DEBUG4_SEND_PADDING (1L<<25) +#define BNX2_EMAC_TXMAC_DEBUG4_EOF_LOC (1L<<26) +#define BNX2_EMAC_TXMAC_DEBUG4_COLLIDING (1L<<27) +#define BNX2_EMAC_TXMAC_DEBUG4_COL_IN (1L<<28) +#define BNX2_EMAC_TXMAC_DEBUG4_BURSTING (1L<<29) +#define BNX2_EMAC_TXMAC_DEBUG4_ADVANCE (1L<<30) +#define BNX2_EMAC_TXMAC_DEBUG4_GO (1L<<31) + +#define BNX2_EMAC_TX_STAT_AC0 0x00001680 +#define BNX2_EMAC_TX_STAT_AC1 0x00001684 +#define BNX2_EMAC_TX_STAT_AC2 0x00001688 +#define BNX2_EMAC_TX_STAT_AC3 0x0000168c +#define BNX2_EMAC_TX_STAT_AC4 0x00001690 +#define BNX2_EMAC_TX_STAT_AC5 0x00001694 +#define BNX2_EMAC_TX_STAT_AC6 0x00001698 +#define BNX2_EMAC_TX_STAT_AC7 0x0000169c +#define BNX2_EMAC_TX_STAT_AC8 0x000016a0 +#define BNX2_EMAC_TX_STAT_AC9 0x000016a4 +#define BNX2_EMAC_TX_STAT_AC10 0x000016a8 +#define BNX2_EMAC_TX_STAT_AC11 0x000016ac +#define BNX2_EMAC_TX_STAT_AC12 0x000016b0 +#define BNX2_EMAC_TX_STAT_AC13 0x000016b4 +#define BNX2_EMAC_TX_STAT_AC14 0x000016b8 +#define BNX2_EMAC_TX_STAT_AC15 0x000016bc +#define BNX2_EMAC_TX_STAT_AC16 0x000016c0 +#define BNX2_EMAC_TX_STAT_AC17 0x000016c4 +#define BNX2_EMAC_TX_STAT_AC18 0x000016c8 +#define BNX2_EMAC_TX_STAT_AC19 0x000016cc +#define BNX2_EMAC_TX_STAT_AC20 0x000016d0 +#define BNX2_EMAC_TXMAC_SUC_DBG_OVERRUNVEC 0x000016d8 +#define BNX2_EMAC_TX_RATE_LIMIT_CTRL 0x000016fc +#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_TX_THROTTLE_INC (0x7fL<<0) +#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_TX_THROTTLE_NUM (0x7fL<<16) +#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_RATE_LIMITER_EN (1L<<31) + + +/* + * rpm_reg definition + * offset: 0x1800 + */ +#define BNX2_RPM_COMMAND 0x00001800 +#define BNX2_RPM_COMMAND_ENABLED (1L<<0) +#define BNX2_RPM_COMMAND_OVERRUN_ABORT (1L<<4) + +#define BNX2_RPM_STATUS 0x00001804 +#define BNX2_RPM_STATUS_MBUF_WAIT (1L<<0) +#define BNX2_RPM_STATUS_FREE_WAIT (1L<<1) + +#define BNX2_RPM_CONFIG 0x00001808 +#define BNX2_RPM_CONFIG_NO_PSD_HDR_CKSUM (1L<<0) +#define BNX2_RPM_CONFIG_ACPI_ENA (1L<<1) +#define BNX2_RPM_CONFIG_ACPI_KEEP (1L<<2) +#define BNX2_RPM_CONFIG_MP_KEEP (1L<<3) +#define BNX2_RPM_CONFIG_SORT_VECT_VAL (0xfL<<4) +#define BNX2_RPM_CONFIG_DISABLE_WOL_ASSERT (1L<<30) +#define BNX2_RPM_CONFIG_IGNORE_VLAN (1L<<31) + +#define BNX2_RPM_MGMT_PKT_CTRL 0x0000180c +#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_SORT (0xfL<<0) +#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_RULE (0xfL<<4) +#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_DISCARD_EN (1L<<30) +#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_EN (1L<<31) + +#define BNX2_RPM_VLAN_MATCH0 0x00001810 +#define BNX2_RPM_VLAN_MATCH0_RPM_VLAN_MTCH0_VALUE (0xfffL<<0) + +#define BNX2_RPM_VLAN_MATCH1 0x00001814 +#define BNX2_RPM_VLAN_MATCH1_RPM_VLAN_MTCH1_VALUE (0xfffL<<0) + +#define BNX2_RPM_VLAN_MATCH2 0x00001818 +#define BNX2_RPM_VLAN_MATCH2_RPM_VLAN_MTCH2_VALUE (0xfffL<<0) + +#define BNX2_RPM_VLAN_MATCH3 0x0000181c +#define BNX2_RPM_VLAN_MATCH3_RPM_VLAN_MTCH3_VALUE (0xfffL<<0) + +#define BNX2_RPM_SORT_USER0 0x00001820 +#define BNX2_RPM_SORT_USER0_PM_EN (0xffffL<<0) +#define BNX2_RPM_SORT_USER0_BC_EN (1L<<16) +#define BNX2_RPM_SORT_USER0_MC_EN (1L<<17) +#define BNX2_RPM_SORT_USER0_MC_HSH_EN (1L<<18) +#define BNX2_RPM_SORT_USER0_PROM_EN (1L<<19) +#define BNX2_RPM_SORT_USER0_VLAN_EN (0xfL<<20) +#define BNX2_RPM_SORT_USER0_PROM_VLAN (1L<<24) +#define BNX2_RPM_SORT_USER0_VLAN_NOTMATCH (1L<<25) +#define BNX2_RPM_SORT_USER0_ENA (1L<<31) + +#define BNX2_RPM_SORT_USER1 0x00001824 +#define BNX2_RPM_SORT_USER1_PM_EN (0xffffL<<0) +#define BNX2_RPM_SORT_USER1_BC_EN (1L<<16) +#define BNX2_RPM_SORT_USER1_MC_EN (1L<<17) +#define BNX2_RPM_SORT_USER1_MC_HSH_EN (1L<<18) +#define BNX2_RPM_SORT_USER1_PROM_EN (1L<<19) +#define BNX2_RPM_SORT_USER1_VLAN_EN (0xfL<<20) +#define BNX2_RPM_SORT_USER1_PROM_VLAN (1L<<24) +#define BNX2_RPM_SORT_USER1_ENA (1L<<31) + +#define BNX2_RPM_SORT_USER2 0x00001828 +#define BNX2_RPM_SORT_USER2_PM_EN (0xffffL<<0) +#define BNX2_RPM_SORT_USER2_BC_EN (1L<<16) +#define BNX2_RPM_SORT_USER2_MC_EN (1L<<17) +#define BNX2_RPM_SORT_USER2_MC_HSH_EN (1L<<18) +#define BNX2_RPM_SORT_USER2_PROM_EN (1L<<19) +#define BNX2_RPM_SORT_USER2_VLAN_EN (0xfL<<20) +#define BNX2_RPM_SORT_USER2_PROM_VLAN (1L<<24) +#define BNX2_RPM_SORT_USER2_ENA (1L<<31) + +#define BNX2_RPM_SORT_USER3 0x0000182c +#define BNX2_RPM_SORT_USER3_PM_EN (0xffffL<<0) +#define BNX2_RPM_SORT_USER3_BC_EN (1L<<16) +#define BNX2_RPM_SORT_USER3_MC_EN (1L<<17) +#define BNX2_RPM_SORT_USER3_MC_HSH_EN (1L<<18) +#define BNX2_RPM_SORT_USER3_PROM_EN (1L<<19) +#define BNX2_RPM_SORT_USER3_VLAN_EN (0xfL<<20) +#define BNX2_RPM_SORT_USER3_PROM_VLAN (1L<<24) +#define BNX2_RPM_SORT_USER3_ENA (1L<<31) + +#define BNX2_RPM_STAT_L2_FILTER_DISCARDS 0x00001840 +#define BNX2_RPM_STAT_RULE_CHECKER_DISCARDS 0x00001844 +#define BNX2_RPM_STAT_IFINFTQDISCARDS 0x00001848 +#define BNX2_RPM_STAT_IFINMBUFDISCARD 0x0000184c +#define BNX2_RPM_STAT_RULE_CHECKER_P4_HIT 0x00001850 +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0 0x00001854 +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_LEN (0xffL<<0) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER (0xffL<<16) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_LEN_TYPE (1L<<30) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_EN (1L<<31) + +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1 0x00001858 +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_LEN (0xffL<<0) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER (0xffL<<16) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_LEN_TYPE (1L<<30) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_EN (1L<<31) + +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2 0x0000185c +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_LEN (0xffL<<0) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER (0xffL<<16) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_LEN_TYPE (1L<<30) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_EN (1L<<31) + +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3 0x00001860 +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_LEN (0xffL<<0) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER (0xffL<<16) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_LEN_TYPE (1L<<30) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_EN (1L<<31) + +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4 0x00001864 +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_LEN (0xffL<<0) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER (0xffL<<16) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_LEN_TYPE (1L<<30) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_EN (1L<<31) + +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5 0x00001868 +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_LEN (0xffL<<0) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER (0xffL<<16) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_LEN_TYPE (1L<<30) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_EN (1L<<31) + +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6 0x0000186c +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_LEN (0xffL<<0) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER (0xffL<<16) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_LEN_TYPE (1L<<30) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_EN (1L<<31) + +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7 0x00001870 +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_LEN (0xffL<<0) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER (0xffL<<16) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_LEN_TYPE (1L<<30) +#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_EN (1L<<31) + +#define BNX2_RPM_STAT_AC0 0x00001880 +#define BNX2_RPM_STAT_AC1 0x00001884 +#define BNX2_RPM_STAT_AC2 0x00001888 +#define BNX2_RPM_STAT_AC3 0x0000188c +#define BNX2_RPM_STAT_AC4 0x00001890 +#define BNX2_RPM_RC_CNTL_16 0x000018e0 +#define BNX2_RPM_RC_CNTL_16_OFFSET (0xffL<<0) +#define BNX2_RPM_RC_CNTL_16_CLASS (0x7L<<8) +#define BNX2_RPM_RC_CNTL_16_PRIORITY (1L<<11) +#define BNX2_RPM_RC_CNTL_16_P4 (1L<<12) +#define BNX2_RPM_RC_CNTL_16_HDR_TYPE (0x7L<<13) +#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_START (0L<<13) +#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_IP (1L<<13) +#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_TCP (2L<<13) +#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_UDP (3L<<13) +#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_DATA (4L<<13) +#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_TCP_UDP (5L<<13) +#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_ICMPV6 (6L<<13) +#define BNX2_RPM_RC_CNTL_16_COMP (0x3L<<16) +#define BNX2_RPM_RC_CNTL_16_COMP_EQUAL (0L<<16) +#define BNX2_RPM_RC_CNTL_16_COMP_NEQUAL (1L<<16) +#define BNX2_RPM_RC_CNTL_16_COMP_GREATER (2L<<16) +#define BNX2_RPM_RC_CNTL_16_COMP_LESS (3L<<16) +#define BNX2_RPM_RC_CNTL_16_MAP (1L<<18) +#define BNX2_RPM_RC_CNTL_16_SBIT (1L<<19) +#define BNX2_RPM_RC_CNTL_16_CMDSEL (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_16_DISCARD (1L<<25) +#define BNX2_RPM_RC_CNTL_16_MASK (1L<<26) +#define BNX2_RPM_RC_CNTL_16_P1 (1L<<27) +#define BNX2_RPM_RC_CNTL_16_P2 (1L<<28) +#define BNX2_RPM_RC_CNTL_16_P3 (1L<<29) +#define BNX2_RPM_RC_CNTL_16_NBIT (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_16 0x000018e4 +#define BNX2_RPM_RC_VALUE_MASK_16_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_16_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_17 0x000018e8 +#define BNX2_RPM_RC_CNTL_17_OFFSET (0xffL<<0) +#define BNX2_RPM_RC_CNTL_17_CLASS (0x7L<<8) +#define BNX2_RPM_RC_CNTL_17_PRIORITY (1L<<11) +#define BNX2_RPM_RC_CNTL_17_P4 (1L<<12) +#define BNX2_RPM_RC_CNTL_17_HDR_TYPE (0x7L<<13) +#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_START (0L<<13) +#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_IP (1L<<13) +#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_TCP (2L<<13) +#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_UDP (3L<<13) +#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_DATA (4L<<13) +#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_TCP_UDP (5L<<13) +#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_ICMPV6 (6L<<13) +#define BNX2_RPM_RC_CNTL_17_COMP (0x3L<<16) +#define BNX2_RPM_RC_CNTL_17_COMP_EQUAL (0L<<16) +#define BNX2_RPM_RC_CNTL_17_COMP_NEQUAL (1L<<16) +#define BNX2_RPM_RC_CNTL_17_COMP_GREATER (2L<<16) +#define BNX2_RPM_RC_CNTL_17_COMP_LESS (3L<<16) +#define BNX2_RPM_RC_CNTL_17_MAP (1L<<18) +#define BNX2_RPM_RC_CNTL_17_SBIT (1L<<19) +#define BNX2_RPM_RC_CNTL_17_CMDSEL (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_17_DISCARD (1L<<25) +#define BNX2_RPM_RC_CNTL_17_MASK (1L<<26) +#define BNX2_RPM_RC_CNTL_17_P1 (1L<<27) +#define BNX2_RPM_RC_CNTL_17_P2 (1L<<28) +#define BNX2_RPM_RC_CNTL_17_P3 (1L<<29) +#define BNX2_RPM_RC_CNTL_17_NBIT (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_17 0x000018ec +#define BNX2_RPM_RC_VALUE_MASK_17_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_17_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_18 0x000018f0 +#define BNX2_RPM_RC_CNTL_18_OFFSET (0xffL<<0) +#define BNX2_RPM_RC_CNTL_18_CLASS (0x7L<<8) +#define BNX2_RPM_RC_CNTL_18_PRIORITY (1L<<11) +#define BNX2_RPM_RC_CNTL_18_P4 (1L<<12) +#define BNX2_RPM_RC_CNTL_18_HDR_TYPE (0x7L<<13) +#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_START (0L<<13) +#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_IP (1L<<13) +#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_TCP (2L<<13) +#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_UDP (3L<<13) +#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_DATA (4L<<13) +#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_TCP_UDP (5L<<13) +#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_ICMPV6 (6L<<13) +#define BNX2_RPM_RC_CNTL_18_COMP (0x3L<<16) +#define BNX2_RPM_RC_CNTL_18_COMP_EQUAL (0L<<16) +#define BNX2_RPM_RC_CNTL_18_COMP_NEQUAL (1L<<16) +#define BNX2_RPM_RC_CNTL_18_COMP_GREATER (2L<<16) +#define BNX2_RPM_RC_CNTL_18_COMP_LESS (3L<<16) +#define BNX2_RPM_RC_CNTL_18_MAP (1L<<18) +#define BNX2_RPM_RC_CNTL_18_SBIT (1L<<19) +#define BNX2_RPM_RC_CNTL_18_CMDSEL (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_18_DISCARD (1L<<25) +#define BNX2_RPM_RC_CNTL_18_MASK (1L<<26) +#define BNX2_RPM_RC_CNTL_18_P1 (1L<<27) +#define BNX2_RPM_RC_CNTL_18_P2 (1L<<28) +#define BNX2_RPM_RC_CNTL_18_P3 (1L<<29) +#define BNX2_RPM_RC_CNTL_18_NBIT (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_18 0x000018f4 +#define BNX2_RPM_RC_VALUE_MASK_18_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_18_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_19 0x000018f8 +#define BNX2_RPM_RC_CNTL_19_OFFSET (0xffL<<0) +#define BNX2_RPM_RC_CNTL_19_CLASS (0x7L<<8) +#define BNX2_RPM_RC_CNTL_19_PRIORITY (1L<<11) +#define BNX2_RPM_RC_CNTL_19_P4 (1L<<12) +#define BNX2_RPM_RC_CNTL_19_HDR_TYPE (0x7L<<13) +#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_START (0L<<13) +#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_IP (1L<<13) +#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_TCP (2L<<13) +#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_UDP (3L<<13) +#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_DATA (4L<<13) +#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_TCP_UDP (5L<<13) +#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_ICMPV6 (6L<<13) +#define BNX2_RPM_RC_CNTL_19_COMP (0x3L<<16) +#define BNX2_RPM_RC_CNTL_19_COMP_EQUAL (0L<<16) +#define BNX2_RPM_RC_CNTL_19_COMP_NEQUAL (1L<<16) +#define BNX2_RPM_RC_CNTL_19_COMP_GREATER (2L<<16) +#define BNX2_RPM_RC_CNTL_19_COMP_LESS (3L<<16) +#define BNX2_RPM_RC_CNTL_19_MAP (1L<<18) +#define BNX2_RPM_RC_CNTL_19_SBIT (1L<<19) +#define BNX2_RPM_RC_CNTL_19_CMDSEL (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_19_DISCARD (1L<<25) +#define BNX2_RPM_RC_CNTL_19_MASK (1L<<26) +#define BNX2_RPM_RC_CNTL_19_P1 (1L<<27) +#define BNX2_RPM_RC_CNTL_19_P2 (1L<<28) +#define BNX2_RPM_RC_CNTL_19_P3 (1L<<29) +#define BNX2_RPM_RC_CNTL_19_NBIT (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_19 0x000018fc +#define BNX2_RPM_RC_VALUE_MASK_19_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_19_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_0 0x00001900 +#define BNX2_RPM_RC_CNTL_0_OFFSET (0xffL<<0) +#define BNX2_RPM_RC_CNTL_0_CLASS (0x7L<<8) +#define BNX2_RPM_RC_CNTL_0_PRIORITY (1L<<11) +#define BNX2_RPM_RC_CNTL_0_P4 (1L<<12) +#define BNX2_RPM_RC_CNTL_0_HDR_TYPE (0x7L<<13) +#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_START (0L<<13) +#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_IP (1L<<13) +#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_TCP (2L<<13) +#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_UDP (3L<<13) +#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_DATA (4L<<13) +#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_TCP_UDP (5L<<13) +#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_ICMPV6 (6L<<13) +#define BNX2_RPM_RC_CNTL_0_COMP (0x3L<<16) +#define BNX2_RPM_RC_CNTL_0_COMP_EQUAL (0L<<16) +#define BNX2_RPM_RC_CNTL_0_COMP_NEQUAL (1L<<16) +#define BNX2_RPM_RC_CNTL_0_COMP_GREATER (2L<<16) +#define BNX2_RPM_RC_CNTL_0_COMP_LESS (3L<<16) +#define BNX2_RPM_RC_CNTL_0_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_0_SBIT (1L<<19) +#define BNX2_RPM_RC_CNTL_0_CMDSEL (0xfL<<20) +#define BNX2_RPM_RC_CNTL_0_MAP (1L<<24) +#define BNX2_RPM_RC_CNTL_0_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_0_DISCARD (1L<<25) +#define BNX2_RPM_RC_CNTL_0_MASK (1L<<26) +#define BNX2_RPM_RC_CNTL_0_P1 (1L<<27) +#define BNX2_RPM_RC_CNTL_0_P2 (1L<<28) +#define BNX2_RPM_RC_CNTL_0_P3 (1L<<29) +#define BNX2_RPM_RC_CNTL_0_NBIT (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_0 0x00001904 +#define BNX2_RPM_RC_VALUE_MASK_0_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_0_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_1 0x00001908 +#define BNX2_RPM_RC_CNTL_1_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_1_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_1_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_1_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_1_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_1_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_1_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_1_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_1_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_1_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_1_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_1_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_1_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_1_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_1_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_1_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_1_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_1_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_1_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_1_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_1 0x0000190c +#define BNX2_RPM_RC_VALUE_MASK_1_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_1_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_2 0x00001910 +#define BNX2_RPM_RC_CNTL_2_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_2_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_2_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_2_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_2_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_2_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_2_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_2_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_2_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_2_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_2_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_2_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_2_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_2_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_2_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_2_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_2_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_2_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_2_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_2_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_2 0x00001914 +#define BNX2_RPM_RC_VALUE_MASK_2_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_2_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_3 0x00001918 +#define BNX2_RPM_RC_CNTL_3_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_3_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_3_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_3_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_3_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_3_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_3_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_3_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_3_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_3_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_3_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_3_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_3_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_3_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_3_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_3_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_3_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_3_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_3_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_3_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_3 0x0000191c +#define BNX2_RPM_RC_VALUE_MASK_3_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_3_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_4 0x00001920 +#define BNX2_RPM_RC_CNTL_4_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_4_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_4_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_4_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_4_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_4_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_4_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_4_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_4_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_4_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_4_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_4_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_4_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_4_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_4_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_4_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_4_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_4_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_4_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_4_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_4 0x00001924 +#define BNX2_RPM_RC_VALUE_MASK_4_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_4_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_5 0x00001928 +#define BNX2_RPM_RC_CNTL_5_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_5_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_5_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_5_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_5_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_5_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_5_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_5_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_5_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_5_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_5_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_5_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_5_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_5_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_5_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_5_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_5_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_5_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_5_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_5_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_5 0x0000192c +#define BNX2_RPM_RC_VALUE_MASK_5_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_5_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_6 0x00001930 +#define BNX2_RPM_RC_CNTL_6_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_6_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_6_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_6_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_6_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_6_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_6_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_6_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_6_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_6_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_6_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_6_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_6_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_6_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_6_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_6_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_6_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_6_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_6_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_6_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_6 0x00001934 +#define BNX2_RPM_RC_VALUE_MASK_6_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_6_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_7 0x00001938 +#define BNX2_RPM_RC_CNTL_7_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_7_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_7_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_7_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_7_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_7_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_7_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_7_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_7_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_7_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_7_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_7_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_7_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_7_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_7_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_7_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_7_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_7_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_7_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_7_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_7 0x0000193c +#define BNX2_RPM_RC_VALUE_MASK_7_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_7_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_8 0x00001940 +#define BNX2_RPM_RC_CNTL_8_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_8_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_8_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_8_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_8_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_8_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_8_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_8_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_8_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_8_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_8_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_8_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_8_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_8_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_8_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_8_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_8_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_8_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_8_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_8_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_8 0x00001944 +#define BNX2_RPM_RC_VALUE_MASK_8_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_8_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_9 0x00001948 +#define BNX2_RPM_RC_CNTL_9_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_9_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_9_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_9_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_9_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_9_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_9_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_9_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_9_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_9_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_9_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_9_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_9_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_9_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_9_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_9_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_9_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_9_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_9_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_9_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_9 0x0000194c +#define BNX2_RPM_RC_VALUE_MASK_9_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_9_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_10 0x00001950 +#define BNX2_RPM_RC_CNTL_10_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_10_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_10_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_10_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_10_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_10_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_10_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_10_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_10_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_10_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_10_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_10_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_10_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_10_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_10_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_10_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_10_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_10_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_10_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_10_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_10 0x00001954 +#define BNX2_RPM_RC_VALUE_MASK_10_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_10_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_11 0x00001958 +#define BNX2_RPM_RC_CNTL_11_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_11_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_11_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_11_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_11_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_11_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_11_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_11_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_11_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_11_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_11_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_11_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_11_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_11_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_11_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_11_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_11_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_11_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_11_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_11_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_11 0x0000195c +#define BNX2_RPM_RC_VALUE_MASK_11_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_11_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_12 0x00001960 +#define BNX2_RPM_RC_CNTL_12_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_12_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_12_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_12_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_12_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_12_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_12_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_12_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_12_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_12_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_12_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_12_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_12_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_12_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_12_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_12_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_12_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_12_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_12_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_12_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_12 0x00001964 +#define BNX2_RPM_RC_VALUE_MASK_12_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_12_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_13 0x00001968 +#define BNX2_RPM_RC_CNTL_13_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_13_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_13_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_13_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_13_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_13_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_13_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_13_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_13_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_13_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_13_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_13_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_13_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_13_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_13_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_13_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_13_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_13_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_13_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_13_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_13 0x0000196c +#define BNX2_RPM_RC_VALUE_MASK_13_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_13_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_14 0x00001970 +#define BNX2_RPM_RC_CNTL_14_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_14_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_14_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_14_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_14_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_14_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_14_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_14_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_14_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_14_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_14_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_14_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_14_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_14_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_14_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_14_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_14_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_14_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_14_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_14_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_14 0x00001974 +#define BNX2_RPM_RC_VALUE_MASK_14_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_14_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CNTL_15 0x00001978 +#define BNX2_RPM_RC_CNTL_15_A (0x3ffffL<<0) +#define BNX2_RPM_RC_CNTL_15_B (0xfffL<<19) +#define BNX2_RPM_RC_CNTL_15_OFFSET_XI (0xffL<<0) +#define BNX2_RPM_RC_CNTL_15_CLASS_XI (0x7L<<8) +#define BNX2_RPM_RC_CNTL_15_PRIORITY_XI (1L<<11) +#define BNX2_RPM_RC_CNTL_15_P4_XI (1L<<12) +#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_XI (0x7L<<13) +#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_START_XI (0L<<13) +#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_IP_XI (1L<<13) +#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_TCP_XI (2L<<13) +#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_UDP_XI (3L<<13) +#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_DATA_XI (4L<<13) +#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_TCP_UDP_XI (5L<<13) +#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_ICMPV6_XI (6L<<13) +#define BNX2_RPM_RC_CNTL_15_COMP_XI (0x3L<<16) +#define BNX2_RPM_RC_CNTL_15_COMP_EQUAL_XI (0L<<16) +#define BNX2_RPM_RC_CNTL_15_COMP_NEQUAL_XI (1L<<16) +#define BNX2_RPM_RC_CNTL_15_COMP_GREATER_XI (2L<<16) +#define BNX2_RPM_RC_CNTL_15_COMP_LESS_XI (3L<<16) +#define BNX2_RPM_RC_CNTL_15_MAP_XI (1L<<18) +#define BNX2_RPM_RC_CNTL_15_SBIT_XI (1L<<19) +#define BNX2_RPM_RC_CNTL_15_CMDSEL_XI (0x1fL<<20) +#define BNX2_RPM_RC_CNTL_15_DISCARD_XI (1L<<25) +#define BNX2_RPM_RC_CNTL_15_MASK_XI (1L<<26) +#define BNX2_RPM_RC_CNTL_15_P1_XI (1L<<27) +#define BNX2_RPM_RC_CNTL_15_P2_XI (1L<<28) +#define BNX2_RPM_RC_CNTL_15_P3_XI (1L<<29) +#define BNX2_RPM_RC_CNTL_15_NBIT_XI (1L<<30) + +#define BNX2_RPM_RC_VALUE_MASK_15 0x0000197c +#define BNX2_RPM_RC_VALUE_MASK_15_VALUE (0xffffL<<0) +#define BNX2_RPM_RC_VALUE_MASK_15_MASK (0xffffL<<16) + +#define BNX2_RPM_RC_CONFIG 0x00001980 +#define BNX2_RPM_RC_CONFIG_RULE_ENABLE (0xffffL<<0) +#define BNX2_RPM_RC_CONFIG_RULE_ENABLE_XI (0xfffffL<<0) +#define BNX2_RPM_RC_CONFIG_DEF_CLASS (0x7L<<24) +#define BNX2_RPM_RC_CONFIG_KNUM_OVERWRITE (1L<<31) + +#define BNX2_RPM_DEBUG0 0x00001984 +#define BNX2_RPM_DEBUG0_FM_BCNT (0xffffL<<0) +#define BNX2_RPM_DEBUG0_T_DATA_OFST_VLD (1L<<16) +#define BNX2_RPM_DEBUG0_T_UDP_OFST_VLD (1L<<17) +#define BNX2_RPM_DEBUG0_T_TCP_OFST_VLD (1L<<18) +#define BNX2_RPM_DEBUG0_T_IP_OFST_VLD (1L<<19) +#define BNX2_RPM_DEBUG0_IP_MORE_FRGMT (1L<<20) +#define BNX2_RPM_DEBUG0_T_IP_NO_TCP_UDP_HDR (1L<<21) +#define BNX2_RPM_DEBUG0_LLC_SNAP (1L<<22) +#define BNX2_RPM_DEBUG0_FM_STARTED (1L<<23) +#define BNX2_RPM_DEBUG0_DONE (1L<<24) +#define BNX2_RPM_DEBUG0_WAIT_4_DONE (1L<<25) +#define BNX2_RPM_DEBUG0_USE_TPBUF_CKSUM (1L<<26) +#define BNX2_RPM_DEBUG0_RX_NO_PSD_HDR_CKSUM (1L<<27) +#define BNX2_RPM_DEBUG0_IGNORE_VLAN (1L<<28) +#define BNX2_RPM_DEBUG0_RP_ENA_ACTIVE (1L<<31) + +#define BNX2_RPM_DEBUG1 0x00001988 +#define BNX2_RPM_DEBUG1_FSM_CUR_ST (0xffffL<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_IDLE (0L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B6_ALL (1L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B2_IPLLC (2L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B6_IP (4L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B2_IP (8L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_IP_START (16L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_IP (32L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_TCP (64L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_UDP (128L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_AH (256L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ESP (512L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ESP_PAYLOAD (1024L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_DATA (2048L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ADD_CARRY (0x2000L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ADD_CARRYOUT (0x4000L<<0) +#define BNX2_RPM_DEBUG1_FSM_CUR_ST_LATCH_RESULT (0x8000L<<0) +#define BNX2_RPM_DEBUG1_HDR_BCNT (0x7ffL<<16) +#define BNX2_RPM_DEBUG1_UNKNOWN_ETYPE_D (1L<<28) +#define BNX2_RPM_DEBUG1_VLAN_REMOVED_D2 (1L<<29) +#define BNX2_RPM_DEBUG1_VLAN_REMOVED_D1 (1L<<30) +#define BNX2_RPM_DEBUG1_EOF_0XTRA_WD (1L<<31) + +#define BNX2_RPM_DEBUG2 0x0000198c +#define BNX2_RPM_DEBUG2_CMD_HIT_VEC (0xffffL<<0) +#define BNX2_RPM_DEBUG2_IP_BCNT (0xffL<<16) +#define BNX2_RPM_DEBUG2_THIS_CMD_M4 (1L<<24) +#define BNX2_RPM_DEBUG2_THIS_CMD_M3 (1L<<25) +#define BNX2_RPM_DEBUG2_THIS_CMD_M2 (1L<<26) +#define BNX2_RPM_DEBUG2_THIS_CMD_M1 (1L<<27) +#define BNX2_RPM_DEBUG2_IPIPE_EMPTY (1L<<28) +#define BNX2_RPM_DEBUG2_FM_DISCARD (1L<<29) +#define BNX2_RPM_DEBUG2_LAST_RULE_IN_FM_D2 (1L<<30) +#define BNX2_RPM_DEBUG2_LAST_RULE_IN_FM_D1 (1L<<31) + +#define BNX2_RPM_DEBUG3 0x00001990 +#define BNX2_RPM_DEBUG3_AVAIL_MBUF_PTR (0x1ffL<<0) +#define BNX2_RPM_DEBUG3_RDE_RLUPQ_WR_REQ_INT (1L<<9) +#define BNX2_RPM_DEBUG3_RDE_RBUF_WR_LAST_INT (1L<<10) +#define BNX2_RPM_DEBUG3_RDE_RBUF_WR_REQ_INT (1L<<11) +#define BNX2_RPM_DEBUG3_RDE_RBUF_FREE_REQ (1L<<12) +#define BNX2_RPM_DEBUG3_RDE_RBUF_ALLOC_REQ (1L<<13) +#define BNX2_RPM_DEBUG3_DFSM_MBUF_NOTAVAIL (1L<<14) +#define BNX2_RPM_DEBUG3_RBUF_RDE_SOF_DROP (1L<<15) +#define BNX2_RPM_DEBUG3_DFIFO_VLD_ENTRY_CT (0xfL<<16) +#define BNX2_RPM_DEBUG3_RDE_SRC_FIFO_ALMFULL (1L<<21) +#define BNX2_RPM_DEBUG3_DROP_NXT_VLD (1L<<22) +#define BNX2_RPM_DEBUG3_DROP_NXT (1L<<23) +#define BNX2_RPM_DEBUG3_FTQ_FSM (0x3L<<24) +#define BNX2_RPM_DEBUG3_FTQ_FSM_IDLE (0x0L<<24) +#define BNX2_RPM_DEBUG3_FTQ_FSM_WAIT_ACK (0x1L<<24) +#define BNX2_RPM_DEBUG3_FTQ_FSM_WAIT_FREE (0x2L<<24) +#define BNX2_RPM_DEBUG3_MBWRITE_FSM (0x3L<<26) +#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_SOF (0x0L<<26) +#define BNX2_RPM_DEBUG3_MBWRITE_FSM_GET_MBUF (0x1L<<26) +#define BNX2_RPM_DEBUG3_MBWRITE_FSM_DMA_DATA (0x2L<<26) +#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_DATA (0x3L<<26) +#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_EOF (0x4L<<26) +#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_MF_ACK (0x5L<<26) +#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_DROP_NXT_VLD (0x6L<<26) +#define BNX2_RPM_DEBUG3_MBWRITE_FSM_DONE (0x7L<<26) +#define BNX2_RPM_DEBUG3_MBFREE_FSM (1L<<29) +#define BNX2_RPM_DEBUG3_MBFREE_FSM_IDLE (0L<<29) +#define BNX2_RPM_DEBUG3_MBFREE_FSM_WAIT_ACK (1L<<29) +#define BNX2_RPM_DEBUG3_MBALLOC_FSM (1L<<30) +#define BNX2_RPM_DEBUG3_MBALLOC_FSM_ET_MBUF (0x0L<<30) +#define BNX2_RPM_DEBUG3_MBALLOC_FSM_IVE_MBUF (0x1L<<30) +#define BNX2_RPM_DEBUG3_CCODE_EOF_ERROR (1L<<31) + +#define BNX2_RPM_DEBUG4 0x00001994 +#define BNX2_RPM_DEBUG4_DFSM_MBUF_CLUSTER (0x1ffffffL<<0) +#define BNX2_RPM_DEBUG4_DFIFO_CUR_CCODE (0x7L<<25) +#define BNX2_RPM_DEBUG4_MBWRITE_FSM (0x7L<<28) +#define BNX2_RPM_DEBUG4_DFIFO_EMPTY (1L<<31) + +#define BNX2_RPM_DEBUG5 0x00001998 +#define BNX2_RPM_DEBUG5_RDROP_WPTR (0x1fL<<0) +#define BNX2_RPM_DEBUG5_RDROP_ACPI_RPTR (0x1fL<<5) +#define BNX2_RPM_DEBUG5_RDROP_MC_RPTR (0x1fL<<10) +#define BNX2_RPM_DEBUG5_RDROP_RC_RPTR (0x1fL<<15) +#define BNX2_RPM_DEBUG5_RDROP_ACPI_EMPTY (1L<<20) +#define BNX2_RPM_DEBUG5_RDROP_MC_EMPTY (1L<<21) +#define BNX2_RPM_DEBUG5_RDROP_AEOF_VEC_AT_RDROP_MC_RPTR (1L<<22) +#define BNX2_RPM_DEBUG5_HOLDREG_WOL_DROP_INT (1L<<23) +#define BNX2_RPM_DEBUG5_HOLDREG_DISCARD (1L<<24) +#define BNX2_RPM_DEBUG5_HOLDREG_MBUF_NOTAVAIL (1L<<25) +#define BNX2_RPM_DEBUG5_HOLDREG_MC_EMPTY (1L<<26) +#define BNX2_RPM_DEBUG5_HOLDREG_RC_EMPTY (1L<<27) +#define BNX2_RPM_DEBUG5_HOLDREG_FC_EMPTY (1L<<28) +#define BNX2_RPM_DEBUG5_HOLDREG_ACPI_EMPTY (1L<<29) +#define BNX2_RPM_DEBUG5_HOLDREG_FULL_T (1L<<30) +#define BNX2_RPM_DEBUG5_HOLDREG_RD (1L<<31) + +#define BNX2_RPM_DEBUG6 0x0000199c +#define BNX2_RPM_DEBUG6_ACPI_VEC (0xffffL<<0) +#define BNX2_RPM_DEBUG6_VEC (0xffffL<<16) + +#define BNX2_RPM_DEBUG7 0x000019a0 +#define BNX2_RPM_DEBUG7_RPM_DBG7_LAST_CRC (0xffffffffL<<0) + +#define BNX2_RPM_DEBUG8 0x000019a4 +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM (0xfL<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_IDLE (0L<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_W1_ADDR (1L<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_W2_ADDR (2L<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_W3_ADDR (3L<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_WAIT_THBUF (4L<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W3_DATA (5L<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W0_ADDR (6L<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W1_ADDR (7L<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W2_ADDR (8L<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W3_ADDR (9L<<0) +#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_WAIT_THBUF (10L<<0) +#define BNX2_RPM_DEBUG8_COMPARE_AT_W0 (1L<<4) +#define BNX2_RPM_DEBUG8_COMPARE_AT_W3_DATA (1L<<5) +#define BNX2_RPM_DEBUG8_COMPARE_AT_SOF_WAIT (1L<<6) +#define BNX2_RPM_DEBUG8_COMPARE_AT_SOF_W3 (1L<<7) +#define BNX2_RPM_DEBUG8_COMPARE_AT_SOF_W2 (1L<<8) +#define BNX2_RPM_DEBUG8_EOF_W_LTEQ6_VLDBYTES (1L<<9) +#define BNX2_RPM_DEBUG8_EOF_W_LTEQ4_VLDBYTES (1L<<10) +#define BNX2_RPM_DEBUG8_NXT_EOF_W_12_VLDBYTES (1L<<11) +#define BNX2_RPM_DEBUG8_EOF_DET (1L<<12) +#define BNX2_RPM_DEBUG8_SOF_DET (1L<<13) +#define BNX2_RPM_DEBUG8_WAIT_4_SOF (1L<<14) +#define BNX2_RPM_DEBUG8_ALL_DONE (1L<<15) +#define BNX2_RPM_DEBUG8_THBUF_ADDR (0x7fL<<16) +#define BNX2_RPM_DEBUG8_BYTE_CTR (0xffL<<24) + +#define BNX2_RPM_DEBUG9 0x000019a8 +#define BNX2_RPM_DEBUG9_OUTFIFO_COUNT (0x7L<<0) +#define BNX2_RPM_DEBUG9_RDE_ACPI_RDY (1L<<3) +#define BNX2_RPM_DEBUG9_VLD_RD_ENTRY_CT (0x7L<<4) +#define BNX2_RPM_DEBUG9_OUTFIFO_OVERRUN_OCCURRED (1L<<28) +#define BNX2_RPM_DEBUG9_INFIFO_OVERRUN_OCCURRED (1L<<29) +#define BNX2_RPM_DEBUG9_ACPI_MATCH_INT (1L<<30) +#define BNX2_RPM_DEBUG9_ACPI_ENABLE_SYN (1L<<31) +#define BNX2_RPM_DEBUG9_BEMEM_R_XI (0x1fL<<0) +#define BNX2_RPM_DEBUG9_EO_XI (1L<<5) +#define BNX2_RPM_DEBUG9_AEOF_DE_XI (1L<<6) +#define BNX2_RPM_DEBUG9_SO_XI (1L<<7) +#define BNX2_RPM_DEBUG9_WD64_CT_XI (0x1fL<<8) +#define BNX2_RPM_DEBUG9_EOF_VLDBYTE_XI (0x7L<<13) +#define BNX2_RPM_DEBUG9_ACPI_RDE_PAT_ID_XI (0xfL<<16) +#define BNX2_RPM_DEBUG9_CALCRC_RESULT_XI (0x3ffL<<20) +#define BNX2_RPM_DEBUG9_DATA_IN_VL_XI (1L<<30) +#define BNX2_RPM_DEBUG9_CALCRC_BUFFER_VLD_XI (1L<<31) + +#define BNX2_RPM_ACPI_DBG_BUF_W00 0x000019c0 +#define BNX2_RPM_ACPI_DBG_BUF_W01 0x000019c4 +#define BNX2_RPM_ACPI_DBG_BUF_W02 0x000019c8 +#define BNX2_RPM_ACPI_DBG_BUF_W03 0x000019cc +#define BNX2_RPM_ACPI_DBG_BUF_W10 0x000019d0 +#define BNX2_RPM_ACPI_DBG_BUF_W11 0x000019d4 +#define BNX2_RPM_ACPI_DBG_BUF_W12 0x000019d8 +#define BNX2_RPM_ACPI_DBG_BUF_W13 0x000019dc +#define BNX2_RPM_ACPI_DBG_BUF_W20 0x000019e0 +#define BNX2_RPM_ACPI_DBG_BUF_W21 0x000019e4 +#define BNX2_RPM_ACPI_DBG_BUF_W22 0x000019e8 +#define BNX2_RPM_ACPI_DBG_BUF_W23 0x000019ec +#define BNX2_RPM_ACPI_DBG_BUF_W30 0x000019f0 +#define BNX2_RPM_ACPI_DBG_BUF_W31 0x000019f4 +#define BNX2_RPM_ACPI_DBG_BUF_W32 0x000019f8 +#define BNX2_RPM_ACPI_DBG_BUF_W33 0x000019fc +#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL 0x00001a00 +#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_BYTE_ADDRESS (0xffffL<<0) +#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_DEBUGRD (1L<<28) +#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_MODE (1L<<29) +#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_INIT (1L<<30) +#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_WR (1L<<31) + +#define BNX2_RPM_ACPI_PATTERN_CTRL 0x00001a04 +#define BNX2_RPM_ACPI_PATTERN_CTRL_PATTERN_ID (0xfL<<0) +#define BNX2_RPM_ACPI_PATTERN_CTRL_CRC_SM_CLR (1L<<30) +#define BNX2_RPM_ACPI_PATTERN_CTRL_WR (1L<<31) + +#define BNX2_RPM_ACPI_DATA 0x00001a08 +#define BNX2_RPM_ACPI_DATA_PATTERN_BE (0xffffffffL<<0) + +#define BNX2_RPM_ACPI_PATTERN_LEN0 0x00001a0c +#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN3 (0xffL<<0) +#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN2 (0xffL<<8) +#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN1 (0xffL<<16) +#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN0 (0xffL<<24) + +#define BNX2_RPM_ACPI_PATTERN_LEN1 0x00001a10 +#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN7 (0xffL<<0) +#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN6 (0xffL<<8) +#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN5 (0xffL<<16) +#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN4 (0xffL<<24) + +#define BNX2_RPM_ACPI_PATTERN_CRC0 0x00001a18 +#define BNX2_RPM_ACPI_PATTERN_CRC0_PATTERN_CRC0 (0xffffffffL<<0) + +#define BNX2_RPM_ACPI_PATTERN_CRC1 0x00001a1c +#define BNX2_RPM_ACPI_PATTERN_CRC1_PATTERN_CRC1 (0xffffffffL<<0) + +#define BNX2_RPM_ACPI_PATTERN_CRC2 0x00001a20 +#define BNX2_RPM_ACPI_PATTERN_CRC2_PATTERN_CRC2 (0xffffffffL<<0) + +#define BNX2_RPM_ACPI_PATTERN_CRC3 0x00001a24 +#define BNX2_RPM_ACPI_PATTERN_CRC3_PATTERN_CRC3 (0xffffffffL<<0) + +#define BNX2_RPM_ACPI_PATTERN_CRC4 0x00001a28 +#define BNX2_RPM_ACPI_PATTERN_CRC4_PATTERN_CRC4 (0xffffffffL<<0) + +#define BNX2_RPM_ACPI_PATTERN_CRC5 0x00001a2c +#define BNX2_RPM_ACPI_PATTERN_CRC5_PATTERN_CRC5 (0xffffffffL<<0) + +#define BNX2_RPM_ACPI_PATTERN_CRC6 0x00001a30 +#define BNX2_RPM_ACPI_PATTERN_CRC6_PATTERN_CRC6 (0xffffffffL<<0) + +#define BNX2_RPM_ACPI_PATTERN_CRC7 0x00001a34 +#define BNX2_RPM_ACPI_PATTERN_CRC7_PATTERN_CRC7 (0xffffffffL<<0) + + +/* + * rlup_reg definition + * offset: 0x2000 + */ +#define BNX2_RLUP_RSS_CONFIG 0x0000201c +#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_XI (0x3L<<0) +#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_OFF_XI (0L<<0) +#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI (1L<<0) +#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_IP_ONLY_XI (2L<<0) +#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_RES_XI (3L<<0) +#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_XI (0x3L<<2) +#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_OFF_XI (0L<<2) +#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI (1L<<2) +#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_IP_ONLY_XI (2L<<2) +#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_RES_XI (3L<<2) + +#define BNX2_RLUP_RSS_COMMAND 0x00002048 +#define BNX2_RLUP_RSS_COMMAND_RSS_IND_TABLE_ADDR (0xfUL<<0) +#define BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK (0xffUL<<4) +#define BNX2_RLUP_RSS_COMMAND_WRITE (1UL<<12) +#define BNX2_RLUP_RSS_COMMAND_READ (1UL<<13) +#define BNX2_RLUP_RSS_COMMAND_HASH_MASK (0x7UL<<14) + +#define BNX2_RLUP_RSS_DATA 0x0000204c + + +/* + * rbuf_reg definition + * offset: 0x200000 + */ +#define BNX2_RBUF_COMMAND 0x00200000 +#define BNX2_RBUF_COMMAND_ENABLED (1L<<0) +#define BNX2_RBUF_COMMAND_FREE_INIT (1L<<1) +#define BNX2_RBUF_COMMAND_RAM_INIT (1L<<2) +#define BNX2_RBUF_COMMAND_PKT_OFFSET_OVFL (1L<<3) +#define BNX2_RBUF_COMMAND_OVER_FREE (1L<<4) +#define BNX2_RBUF_COMMAND_ALLOC_REQ (1L<<5) +#define BNX2_RBUF_COMMAND_EN_PRI_CHNGE_TE (1L<<6) +#define BNX2_RBUF_COMMAND_CU_ISOLATE_XI (1L<<5) +#define BNX2_RBUF_COMMAND_EN_PRI_CHANGE_XI (1L<<6) +#define BNX2_RBUF_COMMAND_GRC_ENDIAN_CONV_DIS_XI (1L<<7) + +#define BNX2_RBUF_STATUS1 0x00200004 +#define BNX2_RBUF_STATUS1_FREE_COUNT (0x3ffL<<0) + +#define BNX2_RBUF_STATUS2 0x00200008 +#define BNX2_RBUF_STATUS2_FREE_TAIL (0x1ffL<<0) +#define BNX2_RBUF_STATUS2_FREE_HEAD (0x1ffL<<16) + +#define BNX2_RBUF_CONFIG 0x0020000c +#define BNX2_RBUF_CONFIG_XOFF_TRIP (0x3ffL<<0) +#define BNX2_RBUF_CONFIG_XOFF_TRIP_VAL(mtu) \ + ((((mtu) - 1500) * 31 / 1000) + 54) +#define BNX2_RBUF_CONFIG_XON_TRIP (0x3ffL<<16) +#define BNX2_RBUF_CONFIG_XON_TRIP_VAL(mtu) \ + ((((mtu) - 1500) * 39 / 1000) + 66) +#define BNX2_RBUF_CONFIG_VAL(mtu) \ + (BNX2_RBUF_CONFIG_XOFF_TRIP_VAL(mtu) | \ + (BNX2_RBUF_CONFIG_XON_TRIP_VAL(mtu) << 16)) + +#define BNX2_RBUF_FW_BUF_ALLOC 0x00200010 +#define BNX2_RBUF_FW_BUF_ALLOC_VALUE (0x1ffL<<7) +#define BNX2_RBUF_FW_BUF_ALLOC_TYPE (1L<<16) +#define BNX2_RBUF_FW_BUF_ALLOC_ALLOC_REQ (1L<<31) + +#define BNX2_RBUF_FW_BUF_FREE 0x00200014 +#define BNX2_RBUF_FW_BUF_FREE_COUNT (0x7fL<<0) +#define BNX2_RBUF_FW_BUF_FREE_TAIL (0x1ffL<<7) +#define BNX2_RBUF_FW_BUF_FREE_HEAD (0x1ffL<<16) +#define BNX2_RBUF_FW_BUF_FREE_TYPE (1L<<25) +#define BNX2_RBUF_FW_BUF_FREE_FREE_REQ (1L<<31) + +#define BNX2_RBUF_FW_BUF_SEL 0x00200018 +#define BNX2_RBUF_FW_BUF_SEL_COUNT (0x7fL<<0) +#define BNX2_RBUF_FW_BUF_SEL_TAIL (0x1ffL<<7) +#define BNX2_RBUF_FW_BUF_SEL_HEAD (0x1ffL<<16) +#define BNX2_RBUF_FW_BUF_SEL_SEL_REQ (1L<<31) + +#define BNX2_RBUF_CONFIG2 0x0020001c +#define BNX2_RBUF_CONFIG2_MAC_DROP_TRIP (0x3ffL<<0) +#define BNX2_RBUF_CONFIG2_MAC_DROP_TRIP_VAL(mtu) \ + ((((mtu) - 1500) * 4 / 1000) + 5) +#define BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP (0x3ffL<<16) +#define BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP_VAL(mtu) \ + ((((mtu) - 1500) * 2 / 100) + 30) +#define BNX2_RBUF_CONFIG2_VAL(mtu) \ + (BNX2_RBUF_CONFIG2_MAC_DROP_TRIP_VAL(mtu) | \ + (BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP_VAL(mtu) << 16)) + +#define BNX2_RBUF_CONFIG3 0x00200020 +#define BNX2_RBUF_CONFIG3_CU_DROP_TRIP (0x3ffL<<0) +#define BNX2_RBUF_CONFIG3_CU_DROP_TRIP_VAL(mtu) \ + ((((mtu) - 1500) * 12 / 1000) + 18) +#define BNX2_RBUF_CONFIG3_CU_KEEP_TRIP (0x3ffL<<16) +#define BNX2_RBUF_CONFIG3_CU_KEEP_TRIP_VAL(mtu) \ + ((((mtu) - 1500) * 2 / 100) + 30) +#define BNX2_RBUF_CONFIG3_VAL(mtu) \ + (BNX2_RBUF_CONFIG3_CU_DROP_TRIP_VAL(mtu) | \ + (BNX2_RBUF_CONFIG3_CU_KEEP_TRIP_VAL(mtu) << 16)) + +#define BNX2_RBUF_PKT_DATA 0x00208000 +#define BNX2_RBUF_CLIST_DATA 0x00210000 +#define BNX2_RBUF_BUF_DATA 0x00220000 + + +/* + * rv2p_reg definition + * offset: 0x2800 + */ +#define BNX2_RV2P_COMMAND 0x00002800 +#define BNX2_RV2P_COMMAND_ENABLED (1L<<0) +#define BNX2_RV2P_COMMAND_PROC1_INTRPT (1L<<1) +#define BNX2_RV2P_COMMAND_PROC2_INTRPT (1L<<2) +#define BNX2_RV2P_COMMAND_ABORT0 (1L<<4) +#define BNX2_RV2P_COMMAND_ABORT1 (1L<<5) +#define BNX2_RV2P_COMMAND_ABORT2 (1L<<6) +#define BNX2_RV2P_COMMAND_ABORT3 (1L<<7) +#define BNX2_RV2P_COMMAND_ABORT4 (1L<<8) +#define BNX2_RV2P_COMMAND_ABORT5 (1L<<9) +#define BNX2_RV2P_COMMAND_PROC1_RESET (1L<<16) +#define BNX2_RV2P_COMMAND_PROC2_RESET (1L<<17) +#define BNX2_RV2P_COMMAND_CTXIF_RESET (1L<<18) + +#define BNX2_RV2P_STATUS 0x00002804 +#define BNX2_RV2P_STATUS_ALWAYS_0 (1L<<0) +#define BNX2_RV2P_STATUS_RV2P_GEN_STAT0_CNT (1L<<8) +#define BNX2_RV2P_STATUS_RV2P_GEN_STAT1_CNT (1L<<9) +#define BNX2_RV2P_STATUS_RV2P_GEN_STAT2_CNT (1L<<10) +#define BNX2_RV2P_STATUS_RV2P_GEN_STAT3_CNT (1L<<11) +#define BNX2_RV2P_STATUS_RV2P_GEN_STAT4_CNT (1L<<12) +#define BNX2_RV2P_STATUS_RV2P_GEN_STAT5_CNT (1L<<13) + +#define BNX2_RV2P_CONFIG 0x00002808 +#define BNX2_RV2P_CONFIG_STALL_PROC1 (1L<<0) +#define BNX2_RV2P_CONFIG_STALL_PROC2 (1L<<1) +#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT0 (1L<<8) +#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT1 (1L<<9) +#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT2 (1L<<10) +#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT3 (1L<<11) +#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT4 (1L<<12) +#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT5 (1L<<13) +#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT0 (1L<<16) +#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT1 (1L<<17) +#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT2 (1L<<18) +#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT3 (1L<<19) +#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT4 (1L<<20) +#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT5 (1L<<21) +#define BNX2_RV2P_CONFIG_PAGE_SIZE (0xfL<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_256 (0L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_512 (1L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_1K (2L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_2K (3L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_4K (4L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_8K (5L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_16K (6L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_32K (7L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_64K (8L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_128K (9L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_256K (10L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_512K (11L<<24) +#define BNX2_RV2P_CONFIG_PAGE_SIZE_1M (12L<<24) + +#define BNX2_RV2P_GEN_BFR_ADDR_0 0x00002810 +#define BNX2_RV2P_GEN_BFR_ADDR_0_VALUE (0xffffL<<16) + +#define BNX2_RV2P_GEN_BFR_ADDR_1 0x00002814 +#define BNX2_RV2P_GEN_BFR_ADDR_1_VALUE (0xffffL<<16) + +#define BNX2_RV2P_GEN_BFR_ADDR_2 0x00002818 +#define BNX2_RV2P_GEN_BFR_ADDR_2_VALUE (0xffffL<<16) + +#define BNX2_RV2P_GEN_BFR_ADDR_3 0x0000281c +#define BNX2_RV2P_GEN_BFR_ADDR_3_VALUE (0xffffL<<16) + +#define BNX2_RV2P_INSTR_HIGH 0x00002830 +#define BNX2_RV2P_INSTR_HIGH_HIGH (0x1fL<<0) + +#define BNX2_RV2P_INSTR_LOW 0x00002834 +#define BNX2_RV2P_INSTR_LOW_LOW (0xffffffffL<<0) + +#define BNX2_RV2P_PROC1_ADDR_CMD 0x00002838 +#define BNX2_RV2P_PROC1_ADDR_CMD_ADD (0x3ffL<<0) +#define BNX2_RV2P_PROC1_ADDR_CMD_RDWR (1L<<31) + +#define BNX2_RV2P_PROC2_ADDR_CMD 0x0000283c +#define BNX2_RV2P_PROC2_ADDR_CMD_ADD (0x3ffL<<0) +#define BNX2_RV2P_PROC2_ADDR_CMD_RDWR (1L<<31) + +#define BNX2_RV2P_PROC1_GRC_DEBUG 0x00002840 +#define BNX2_RV2P_PROC2_GRC_DEBUG 0x00002844 +#define BNX2_RV2P_GRC_PROC_DEBUG 0x00002848 +#define BNX2_RV2P_DEBUG_VECT_PEEK 0x0000284c +#define BNX2_RV2P_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) +#define BNX2_RV2P_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) +#define BNX2_RV2P_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) +#define BNX2_RV2P_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) +#define BNX2_RV2P_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) +#define BNX2_RV2P_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) + +#define BNX2_RV2P_MPFE_PFE_CTL 0x00002afc +#define BNX2_RV2P_MPFE_PFE_CTL_INC_USAGE_CNT (1L<<0) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE (0xfL<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_0 (0L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_1 (1L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_2 (2L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_3 (3L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_4 (4L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_5 (5L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_6 (6L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_7 (7L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_8 (8L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_9 (9L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_10 (10L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_11 (11L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_12 (12L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_13 (13L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_14 (14L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_15 (15L<<4) +#define BNX2_RV2P_MPFE_PFE_CTL_PFE_COUNT (0xfL<<12) +#define BNX2_RV2P_MPFE_PFE_CTL_OFFSET (0x1ffL<<16) + +#define BNX2_RV2P_RV2PPQ 0x00002b40 +#define BNX2_RV2P_PFTQ_CMD 0x00002b78 +#define BNX2_RV2P_PFTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_RV2P_PFTQ_CMD_WR_TOP (1L<<10) +#define BNX2_RV2P_PFTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_RV2P_PFTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_RV2P_PFTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_RV2P_PFTQ_CMD_RD_DATA (1L<<26) +#define BNX2_RV2P_PFTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_RV2P_PFTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_RV2P_PFTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_RV2P_PFTQ_CMD_POP (1L<<30) +#define BNX2_RV2P_PFTQ_CMD_BUSY (1L<<31) + +#define BNX2_RV2P_PFTQ_CTL 0x00002b7c +#define BNX2_RV2P_PFTQ_CTL_INTERVENE (1L<<0) +#define BNX2_RV2P_PFTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_RV2P_PFTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_RV2P_PFTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_RV2P_PFTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_RV2P_RV2PTQ 0x00002b80 +#define BNX2_RV2P_TFTQ_CMD 0x00002bb8 +#define BNX2_RV2P_TFTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_RV2P_TFTQ_CMD_WR_TOP (1L<<10) +#define BNX2_RV2P_TFTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_RV2P_TFTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_RV2P_TFTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_RV2P_TFTQ_CMD_RD_DATA (1L<<26) +#define BNX2_RV2P_TFTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_RV2P_TFTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_RV2P_TFTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_RV2P_TFTQ_CMD_POP (1L<<30) +#define BNX2_RV2P_TFTQ_CMD_BUSY (1L<<31) + +#define BNX2_RV2P_TFTQ_CTL 0x00002bbc +#define BNX2_RV2P_TFTQ_CTL_INTERVENE (1L<<0) +#define BNX2_RV2P_TFTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_RV2P_TFTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_RV2P_TFTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_RV2P_TFTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_RV2P_RV2PMQ 0x00002bc0 +#define BNX2_RV2P_MFTQ_CMD 0x00002bf8 +#define BNX2_RV2P_MFTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_RV2P_MFTQ_CMD_WR_TOP (1L<<10) +#define BNX2_RV2P_MFTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_RV2P_MFTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_RV2P_MFTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_RV2P_MFTQ_CMD_RD_DATA (1L<<26) +#define BNX2_RV2P_MFTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_RV2P_MFTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_RV2P_MFTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_RV2P_MFTQ_CMD_POP (1L<<30) +#define BNX2_RV2P_MFTQ_CMD_BUSY (1L<<31) + +#define BNX2_RV2P_MFTQ_CTL 0x00002bfc +#define BNX2_RV2P_MFTQ_CTL_INTERVENE (1L<<0) +#define BNX2_RV2P_MFTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_RV2P_MFTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_RV2P_MFTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_RV2P_MFTQ_CTL_CUR_DEPTH (0x3ffL<<22) + + + +/* + * mq_reg definition + * offset: 0x3c00 + */ +#define BNX2_MQ_COMMAND 0x00003c00 +#define BNX2_MQ_COMMAND_ENABLED (1L<<0) +#define BNX2_MQ_COMMAND_INIT (1L<<1) +#define BNX2_MQ_COMMAND_OVERFLOW (1L<<4) +#define BNX2_MQ_COMMAND_WR_ERROR (1L<<5) +#define BNX2_MQ_COMMAND_RD_ERROR (1L<<6) +#define BNX2_MQ_COMMAND_IDB_CFG_ERROR (1L<<7) +#define BNX2_MQ_COMMAND_IDB_OVERFLOW (1L<<10) +#define BNX2_MQ_COMMAND_NO_BIN_ERROR (1L<<11) +#define BNX2_MQ_COMMAND_NO_MAP_ERROR (1L<<12) + +#define BNX2_MQ_STATUS 0x00003c04 +#define BNX2_MQ_STATUS_CTX_ACCESS_STAT (1L<<16) +#define BNX2_MQ_STATUS_CTX_ACCESS64_STAT (1L<<17) +#define BNX2_MQ_STATUS_PCI_STALL_STAT (1L<<18) +#define BNX2_MQ_STATUS_IDB_OFLOW_STAT (1L<<19) + +#define BNX2_MQ_CONFIG 0x00003c08 +#define BNX2_MQ_CONFIG_TX_HIGH_PRI (1L<<0) +#define BNX2_MQ_CONFIG_HALT_DIS (1L<<1) +#define BNX2_MQ_CONFIG_BIN_MQ_MODE (1L<<2) +#define BNX2_MQ_CONFIG_DIS_IDB_DROP (1L<<3) +#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE (0x7L<<4) +#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256 (0L<<4) +#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_512 (1L<<4) +#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_1K (2L<<4) +#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_2K (3L<<4) +#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_4K (4L<<4) +#define BNX2_MQ_CONFIG_MAX_DEPTH (0x7fL<<8) +#define BNX2_MQ_CONFIG_CUR_DEPTH (0x7fL<<20) + +#define BNX2_MQ_ENQUEUE1 0x00003c0c +#define BNX2_MQ_ENQUEUE1_OFFSET (0x3fL<<2) +#define BNX2_MQ_ENQUEUE1_CID (0x3fffL<<8) +#define BNX2_MQ_ENQUEUE1_BYTE_MASK (0xfL<<24) +#define BNX2_MQ_ENQUEUE1_KNL_MODE (1L<<28) + +#define BNX2_MQ_ENQUEUE2 0x00003c10 +#define BNX2_MQ_BAD_WR_ADDR 0x00003c14 +#define BNX2_MQ_BAD_RD_ADDR 0x00003c18 +#define BNX2_MQ_KNL_BYP_WIND_START 0x00003c1c +#define BNX2_MQ_KNL_BYP_WIND_START_VALUE (0xfffffL<<12) + +#define BNX2_MQ_KNL_WIND_END 0x00003c20 +#define BNX2_MQ_KNL_WIND_END_VALUE (0xffffffL<<8) + +#define BNX2_MQ_KNL_WRITE_MASK1 0x00003c24 +#define BNX2_MQ_KNL_TX_MASK1 0x00003c28 +#define BNX2_MQ_KNL_CMD_MASK1 0x00003c2c +#define BNX2_MQ_KNL_COND_ENQUEUE_MASK1 0x00003c30 +#define BNX2_MQ_KNL_RX_V2P_MASK1 0x00003c34 +#define BNX2_MQ_KNL_WRITE_MASK2 0x00003c38 +#define BNX2_MQ_KNL_TX_MASK2 0x00003c3c +#define BNX2_MQ_KNL_CMD_MASK2 0x00003c40 +#define BNX2_MQ_KNL_COND_ENQUEUE_MASK2 0x00003c44 +#define BNX2_MQ_KNL_RX_V2P_MASK2 0x00003c48 +#define BNX2_MQ_KNL_BYP_WRITE_MASK1 0x00003c4c +#define BNX2_MQ_KNL_BYP_TX_MASK1 0x00003c50 +#define BNX2_MQ_KNL_BYP_CMD_MASK1 0x00003c54 +#define BNX2_MQ_KNL_BYP_COND_ENQUEUE_MASK1 0x00003c58 +#define BNX2_MQ_KNL_BYP_RX_V2P_MASK1 0x00003c5c +#define BNX2_MQ_KNL_BYP_WRITE_MASK2 0x00003c60 +#define BNX2_MQ_KNL_BYP_TX_MASK2 0x00003c64 +#define BNX2_MQ_KNL_BYP_CMD_MASK2 0x00003c68 +#define BNX2_MQ_KNL_BYP_COND_ENQUEUE_MASK2 0x00003c6c +#define BNX2_MQ_KNL_BYP_RX_V2P_MASK2 0x00003c70 +#define BNX2_MQ_MEM_WR_ADDR 0x00003c74 +#define BNX2_MQ_MEM_WR_ADDR_VALUE (0x3fL<<0) + +#define BNX2_MQ_MEM_WR_DATA0 0x00003c78 +#define BNX2_MQ_MEM_WR_DATA0_VALUE (0xffffffffL<<0) + +#define BNX2_MQ_MEM_WR_DATA1 0x00003c7c +#define BNX2_MQ_MEM_WR_DATA1_VALUE (0xffffffffL<<0) + +#define BNX2_MQ_MEM_WR_DATA2 0x00003c80 +#define BNX2_MQ_MEM_WR_DATA2_VALUE (0x3fffffffL<<0) +#define BNX2_MQ_MEM_WR_DATA2_VALUE_XI (0x7fffffffL<<0) + +#define BNX2_MQ_MEM_RD_ADDR 0x00003c84 +#define BNX2_MQ_MEM_RD_ADDR_VALUE (0x3fL<<0) + +#define BNX2_MQ_MEM_RD_DATA0 0x00003c88 +#define BNX2_MQ_MEM_RD_DATA0_VALUE (0xffffffffL<<0) + +#define BNX2_MQ_MEM_RD_DATA1 0x00003c8c +#define BNX2_MQ_MEM_RD_DATA1_VALUE (0xffffffffL<<0) + +#define BNX2_MQ_MEM_RD_DATA2 0x00003c90 +#define BNX2_MQ_MEM_RD_DATA2_VALUE (0x3fffffffL<<0) +#define BNX2_MQ_MEM_RD_DATA2_VALUE_XI (0x7fffffffL<<0) + +#define BNX2_MQ_MAP_L2_3 0x00003d2c +#define BNX2_MQ_MAP_L2_3_MQ_OFFSET (0xffL<<0) +#define BNX2_MQ_MAP_L2_3_SZ (0x3L<<8) +#define BNX2_MQ_MAP_L2_3_CTX_OFFSET (0x2ffL<<10) +#define BNX2_MQ_MAP_L2_3_BIN_OFFSET (0x7L<<23) +#define BNX2_MQ_MAP_L2_3_ARM (0x3L<<26) +#define BNX2_MQ_MAP_L2_3_ENA (0x1L<<31) +#define BNX2_MQ_MAP_L2_3_DEFAULT 0x82004646 + +#define BNX2_MQ_MAP_L2_5 0x00003d34 +#define BNX2_MQ_MAP_L2_5_ARM (0x3L<<26) + +/* + * tsch_reg definition + * offset: 0x4c00 + */ +#define BNX2_TSCH_TSS_CFG 0x00004c1c +#define BNX2_TSCH_TSS_CFG_TSS_START_CID (0x7ffL<<8) +#define BNX2_TSCH_TSS_CFG_NUM_OF_TSS_CON (0xfL<<24) + + + +/* + * tbdr_reg definition + * offset: 0x5000 + */ +#define BNX2_TBDR_COMMAND 0x00005000 +#define BNX2_TBDR_COMMAND_ENABLE (1L<<0) +#define BNX2_TBDR_COMMAND_SOFT_RST (1L<<1) +#define BNX2_TBDR_COMMAND_MSTR_ABORT (1L<<4) + +#define BNX2_TBDR_STATUS 0x00005004 +#define BNX2_TBDR_STATUS_DMA_WAIT (1L<<0) +#define BNX2_TBDR_STATUS_FTQ_WAIT (1L<<1) +#define BNX2_TBDR_STATUS_FIFO_OVERFLOW (1L<<2) +#define BNX2_TBDR_STATUS_FIFO_UNDERFLOW (1L<<3) +#define BNX2_TBDR_STATUS_SEARCHMISS_ERROR (1L<<4) +#define BNX2_TBDR_STATUS_FTQ_ENTRY_CNT (1L<<5) +#define BNX2_TBDR_STATUS_BURST_CNT (1L<<6) + +#define BNX2_TBDR_CONFIG 0x00005008 +#define BNX2_TBDR_CONFIG_MAX_BDS (0xffL<<0) +#define BNX2_TBDR_CONFIG_SWAP_MODE (1L<<8) +#define BNX2_TBDR_CONFIG_PRIORITY (1L<<9) +#define BNX2_TBDR_CONFIG_CACHE_NEXT_PAGE_PTRS (1L<<10) +#define BNX2_TBDR_CONFIG_PAGE_SIZE (0xfL<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_256 (0L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_512 (1L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_1K (2L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_2K (3L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_4K (4L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_8K (5L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_16K (6L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_32K (7L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_64K (8L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_128K (9L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_256K (10L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_512K (11L<<24) +#define BNX2_TBDR_CONFIG_PAGE_SIZE_1M (12L<<24) + +#define BNX2_TBDR_DEBUG_VECT_PEEK 0x0000500c +#define BNX2_TBDR_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) +#define BNX2_TBDR_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) +#define BNX2_TBDR_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) +#define BNX2_TBDR_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) +#define BNX2_TBDR_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) +#define BNX2_TBDR_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) + +#define BNX2_TBDR_CKSUM_ERROR_STATUS 0x00005010 +#define BNX2_TBDR_CKSUM_ERROR_STATUS_CALCULATED (0xffffL<<0) +#define BNX2_TBDR_CKSUM_ERROR_STATUS_EXPECTED (0xffffL<<16) + +#define BNX2_TBDR_TBDRQ 0x000053c0 +#define BNX2_TBDR_FTQ_CMD 0x000053f8 +#define BNX2_TBDR_FTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_TBDR_FTQ_CMD_WR_TOP (1L<<10) +#define BNX2_TBDR_FTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_TBDR_FTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_TBDR_FTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_TBDR_FTQ_CMD_RD_DATA (1L<<26) +#define BNX2_TBDR_FTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_TBDR_FTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_TBDR_FTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_TBDR_FTQ_CMD_POP (1L<<30) +#define BNX2_TBDR_FTQ_CMD_BUSY (1L<<31) + +#define BNX2_TBDR_FTQ_CTL 0x000053fc +#define BNX2_TBDR_FTQ_CTL_INTERVENE (1L<<0) +#define BNX2_TBDR_FTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_TBDR_FTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_TBDR_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_TBDR_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) + + +/* + * tbdc definition + * offset: 0x5400 + */ +#define BNX2_TBDC_COMMAND 0x5400 +#define BNX2_TBDC_COMMAND_CMD_ENABLED (1UL<<0) +#define BNX2_TBDC_COMMAND_CMD_FLUSH (1UL<<1) +#define BNX2_TBDC_COMMAND_CMD_SOFT_RST (1UL<<2) +#define BNX2_TBDC_COMMAND_CMD_REG_ARB (1UL<<3) +#define BNX2_TBDC_COMMAND_WRCHK_RANGE_ERROR (1UL<<4) +#define BNX2_TBDC_COMMAND_WRCHK_ALL_ONES_ERROR (1UL<<5) +#define BNX2_TBDC_COMMAND_WRCHK_ALL_ZEROS_ERROR (1UL<<6) +#define BNX2_TBDC_COMMAND_WRCHK_ANY_ONES_ERROR (1UL<<7) +#define BNX2_TBDC_COMMAND_WRCHK_ANY_ZEROS_ERROR (1UL<<8) + +#define BNX2_TBDC_STATUS 0x5404 +#define BNX2_TBDC_STATUS_FREE_CNT (0x3fUL<<0) + +#define BNX2_TBDC_BD_ADDR 0x5424 + +#define BNX2_TBDC_BIDX 0x542c +#define BNX2_TBDC_BDIDX_BDIDX (0xffffUL<<0) +#define BNX2_TBDC_BDIDX_CMD (0xffUL<<24) + +#define BNX2_TBDC_CID 0x5430 + +#define BNX2_TBDC_CAM_OPCODE 0x5434 +#define BNX2_TBDC_CAM_OPCODE_OPCODE (0x7UL<<0) +#define BNX2_TBDC_CAM_OPCODE_OPCODE_SEARCH (0UL<<0) +#define BNX2_TBDC_CAM_OPCODE_OPCODE_CACHE_WRITE (1UL<<0) +#define BNX2_TBDC_CAM_OPCODE_OPCODE_INVALIDATE (2UL<<0) +#define BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_WRITE (4UL<<0) +#define BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ (5UL<<0) +#define BNX2_TBDC_CAM_OPCODE_OPCODE_RAM_WRITE (6UL<<0) +#define BNX2_TBDC_CAM_OPCODE_OPCODE_RAM_READ (7UL<<0) +#define BNX2_TBDC_CAM_OPCODE_SMASK_BDIDX (1UL<<4) +#define BNX2_TBDC_CAM_OPCODE_SMASK_CID (1UL<<5) +#define BNX2_TBDC_CAM_OPCODE_SMASK_CMD (1UL<<6) +#define BNX2_TBDC_CAM_OPCODE_WMT_FAILED (1UL<<7) +#define BNX2_TBDC_CAM_OPCODE_CAM_VALIDS (0xffUL<<8) + + +/* + * tdma_reg definition + * offset: 0x5c00 + */ +#define BNX2_TDMA_COMMAND 0x00005c00 +#define BNX2_TDMA_COMMAND_ENABLED (1L<<0) +#define BNX2_TDMA_COMMAND_MASTER_ABORT (1L<<4) +#define BNX2_TDMA_COMMAND_CS16_ERR (1L<<5) +#define BNX2_TDMA_COMMAND_BAD_L2_LENGTH_ABORT (1L<<7) +#define BNX2_TDMA_COMMAND_MASK_CS1 (1L<<20) +#define BNX2_TDMA_COMMAND_MASK_CS2 (1L<<21) +#define BNX2_TDMA_COMMAND_MASK_CS3 (1L<<22) +#define BNX2_TDMA_COMMAND_MASK_CS4 (1L<<23) +#define BNX2_TDMA_COMMAND_FORCE_ILOCK_CKERR (1L<<24) +#define BNX2_TDMA_COMMAND_OFIFO_CLR (1L<<30) +#define BNX2_TDMA_COMMAND_IFIFO_CLR (1L<<31) + +#define BNX2_TDMA_STATUS 0x00005c04 +#define BNX2_TDMA_STATUS_DMA_WAIT (1L<<0) +#define BNX2_TDMA_STATUS_PAYLOAD_WAIT (1L<<1) +#define BNX2_TDMA_STATUS_PATCH_FTQ_WAIT (1L<<2) +#define BNX2_TDMA_STATUS_LOCK_WAIT (1L<<3) +#define BNX2_TDMA_STATUS_FTQ_ENTRY_CNT (1L<<16) +#define BNX2_TDMA_STATUS_BURST_CNT (1L<<17) +#define BNX2_TDMA_STATUS_MAX_IFIFO_DEPTH (0x3fL<<20) +#define BNX2_TDMA_STATUS_OFIFO_OVERFLOW (1L<<30) +#define BNX2_TDMA_STATUS_IFIFO_OVERFLOW (1L<<31) + +#define BNX2_TDMA_CONFIG 0x00005c08 +#define BNX2_TDMA_CONFIG_ONE_DMA (1L<<0) +#define BNX2_TDMA_CONFIG_ONE_RECORD (1L<<1) +#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN (0x3L<<2) +#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_0 (0L<<2) +#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_1 (1L<<2) +#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_2 (2L<<2) +#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_3 (3L<<2) +#define BNX2_TDMA_CONFIG_LIMIT_SZ (0xfL<<4) +#define BNX2_TDMA_CONFIG_LIMIT_SZ_64 (0L<<4) +#define BNX2_TDMA_CONFIG_LIMIT_SZ_128 (0x4L<<4) +#define BNX2_TDMA_CONFIG_LIMIT_SZ_256 (0x6L<<4) +#define BNX2_TDMA_CONFIG_LIMIT_SZ_512 (0x8L<<4) +#define BNX2_TDMA_CONFIG_LINE_SZ (0xfL<<8) +#define BNX2_TDMA_CONFIG_LINE_SZ_64 (0L<<8) +#define BNX2_TDMA_CONFIG_LINE_SZ_128 (4L<<8) +#define BNX2_TDMA_CONFIG_LINE_SZ_256 (6L<<8) +#define BNX2_TDMA_CONFIG_LINE_SZ_512 (8L<<8) +#define BNX2_TDMA_CONFIG_ALIGN_ENA (1L<<15) +#define BNX2_TDMA_CONFIG_CHK_L2_BD (1L<<16) +#define BNX2_TDMA_CONFIG_CMPL_ENTRY (1L<<17) +#define BNX2_TDMA_CONFIG_OFIFO_CMP (1L<<19) +#define BNX2_TDMA_CONFIG_OFIFO_CMP_3 (0L<<19) +#define BNX2_TDMA_CONFIG_OFIFO_CMP_2 (1L<<19) +#define BNX2_TDMA_CONFIG_FIFO_CMP (0xfL<<20) +#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_XI (0x7L<<20) +#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_0_XI (0L<<20) +#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_4_XI (1L<<20) +#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_8_XI (2L<<20) +#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_16_XI (3L<<20) +#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_32_XI (4L<<20) +#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_64_XI (5L<<20) +#define BNX2_TDMA_CONFIG_FIFO_CMP_EN_XI (1L<<23) +#define BNX2_TDMA_CONFIG_BYTES_OST_XI (0x7L<<24) +#define BNX2_TDMA_CONFIG_BYTES_OST_512_XI (0L<<24) +#define BNX2_TDMA_CONFIG_BYTES_OST_1024_XI (1L<<24) +#define BNX2_TDMA_CONFIG_BYTES_OST_2048_XI (2L<<24) +#define BNX2_TDMA_CONFIG_BYTES_OST_4096_XI (3L<<24) +#define BNX2_TDMA_CONFIG_BYTES_OST_8192_XI (4L<<24) +#define BNX2_TDMA_CONFIG_BYTES_OST_16384_XI (5L<<24) +#define BNX2_TDMA_CONFIG_HC_BYPASS_XI (1L<<27) +#define BNX2_TDMA_CONFIG_LCL_MRRS_XI (0x7L<<28) +#define BNX2_TDMA_CONFIG_LCL_MRRS_128_XI (0L<<28) +#define BNX2_TDMA_CONFIG_LCL_MRRS_256_XI (1L<<28) +#define BNX2_TDMA_CONFIG_LCL_MRRS_512_XI (2L<<28) +#define BNX2_TDMA_CONFIG_LCL_MRRS_1024_XI (3L<<28) +#define BNX2_TDMA_CONFIG_LCL_MRRS_2048_XI (4L<<28) +#define BNX2_TDMA_CONFIG_LCL_MRRS_4096_XI (5L<<28) +#define BNX2_TDMA_CONFIG_LCL_MRRS_EN_XI (1L<<31) + +#define BNX2_TDMA_PAYLOAD_PROD 0x00005c0c +#define BNX2_TDMA_PAYLOAD_PROD_VALUE (0x1fffL<<3) + +#define BNX2_TDMA_DBG_WATCHDOG 0x00005c10 +#define BNX2_TDMA_DBG_TRIGGER 0x00005c14 +#define BNX2_TDMA_DMAD_FSM 0x00005c80 +#define BNX2_TDMA_DMAD_FSM_BD_INVLD (1L<<0) +#define BNX2_TDMA_DMAD_FSM_PUSH (0xfL<<4) +#define BNX2_TDMA_DMAD_FSM_ARB_TBDC (0x3L<<8) +#define BNX2_TDMA_DMAD_FSM_ARB_CTX (1L<<12) +#define BNX2_TDMA_DMAD_FSM_DR_INTF (1L<<16) +#define BNX2_TDMA_DMAD_FSM_DMAD (0x7L<<20) +#define BNX2_TDMA_DMAD_FSM_BD (0xfL<<24) + +#define BNX2_TDMA_DMAD_STATUS 0x00005c84 +#define BNX2_TDMA_DMAD_STATUS_RHOLD_PUSH_ENTRY (0x3L<<0) +#define BNX2_TDMA_DMAD_STATUS_RHOLD_DMAD_ENTRY (0x3L<<4) +#define BNX2_TDMA_DMAD_STATUS_RHOLD_BD_ENTRY (0x3L<<8) +#define BNX2_TDMA_DMAD_STATUS_IFTQ_ENUM (0xfL<<12) + +#define BNX2_TDMA_DR_INTF_FSM 0x00005c88 +#define BNX2_TDMA_DR_INTF_FSM_L2_COMP (0x3L<<0) +#define BNX2_TDMA_DR_INTF_FSM_TPATQ (0x7L<<4) +#define BNX2_TDMA_DR_INTF_FSM_TPBUF (0x3L<<8) +#define BNX2_TDMA_DR_INTF_FSM_DR_BUF (0x7L<<12) +#define BNX2_TDMA_DR_INTF_FSM_DMAD (0x7L<<16) + +#define BNX2_TDMA_DR_INTF_STATUS 0x00005c8c +#define BNX2_TDMA_DR_INTF_STATUS_HOLE_PHASE (0x7L<<0) +#define BNX2_TDMA_DR_INTF_STATUS_DATA_AVAIL (0x3L<<4) +#define BNX2_TDMA_DR_INTF_STATUS_SHIFT_ADDR (0x7L<<8) +#define BNX2_TDMA_DR_INTF_STATUS_NXT_PNTR (0xfL<<12) +#define BNX2_TDMA_DR_INTF_STATUS_BYTE_COUNT (0x7L<<16) + +#define BNX2_TDMA_PUSH_FSM 0x00005c90 +#define BNX2_TDMA_BD_IF_DEBUG 0x00005c94 +#define BNX2_TDMA_DMAD_IF_DEBUG 0x00005c98 +#define BNX2_TDMA_CTX_IF_DEBUG 0x00005c9c +#define BNX2_TDMA_TPBUF_IF_DEBUG 0x00005ca0 +#define BNX2_TDMA_DR_IF_DEBUG 0x00005ca4 +#define BNX2_TDMA_TPATQ_IF_DEBUG 0x00005ca8 +#define BNX2_TDMA_TDMA_ILOCK_CKSUM 0x00005cac +#define BNX2_TDMA_TDMA_ILOCK_CKSUM_CALCULATED (0xffffL<<0) +#define BNX2_TDMA_TDMA_ILOCK_CKSUM_EXPECTED (0xffffL<<16) + +#define BNX2_TDMA_TDMA_PCIE_CKSUM 0x00005cb0 +#define BNX2_TDMA_TDMA_PCIE_CKSUM_CALCULATED (0xffffL<<0) +#define BNX2_TDMA_TDMA_PCIE_CKSUM_EXPECTED (0xffffL<<16) + +#define BNX2_TDMA_TDMAQ 0x00005fc0 +#define BNX2_TDMA_FTQ_CMD 0x00005ff8 +#define BNX2_TDMA_FTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_TDMA_FTQ_CMD_WR_TOP (1L<<10) +#define BNX2_TDMA_FTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_TDMA_FTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_TDMA_FTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_TDMA_FTQ_CMD_RD_DATA (1L<<26) +#define BNX2_TDMA_FTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_TDMA_FTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_TDMA_FTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_TDMA_FTQ_CMD_POP (1L<<30) +#define BNX2_TDMA_FTQ_CMD_BUSY (1L<<31) + +#define BNX2_TDMA_FTQ_CTL 0x00005ffc +#define BNX2_TDMA_FTQ_CTL_INTERVENE (1L<<0) +#define BNX2_TDMA_FTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_TDMA_FTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_TDMA_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_TDMA_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) + + + +/* + * hc_reg definition + * offset: 0x6800 + */ +#define BNX2_HC_COMMAND 0x00006800 +#define BNX2_HC_COMMAND_ENABLE (1L<<0) +#define BNX2_HC_COMMAND_SKIP_ABORT (1L<<4) +#define BNX2_HC_COMMAND_COAL_NOW (1L<<16) +#define BNX2_HC_COMMAND_COAL_NOW_WO_INT (1L<<17) +#define BNX2_HC_COMMAND_STATS_NOW (1L<<18) +#define BNX2_HC_COMMAND_FORCE_INT (0x3L<<19) +#define BNX2_HC_COMMAND_FORCE_INT_NULL (0L<<19) +#define BNX2_HC_COMMAND_FORCE_INT_HIGH (1L<<19) +#define BNX2_HC_COMMAND_FORCE_INT_LOW (2L<<19) +#define BNX2_HC_COMMAND_FORCE_INT_FREE (3L<<19) +#define BNX2_HC_COMMAND_CLR_STAT_NOW (1L<<21) +#define BNX2_HC_COMMAND_MAIN_PWR_INT (1L<<22) +#define BNX2_HC_COMMAND_COAL_ON_NEXT_EVENT (1L<<27) + +#define BNX2_HC_STATUS 0x00006804 +#define BNX2_HC_STATUS_MASTER_ABORT (1L<<0) +#define BNX2_HC_STATUS_PARITY_ERROR_STATE (1L<<1) +#define BNX2_HC_STATUS_PCI_CLK_CNT_STAT (1L<<16) +#define BNX2_HC_STATUS_CORE_CLK_CNT_STAT (1L<<17) +#define BNX2_HC_STATUS_NUM_STATUS_BLOCKS_STAT (1L<<18) +#define BNX2_HC_STATUS_NUM_INT_GEN_STAT (1L<<19) +#define BNX2_HC_STATUS_NUM_INT_MBOX_WR_STAT (1L<<20) +#define BNX2_HC_STATUS_CORE_CLKS_TO_HW_INTACK_STAT (1L<<23) +#define BNX2_HC_STATUS_CORE_CLKS_TO_SW_INTACK_STAT (1L<<24) +#define BNX2_HC_STATUS_CORE_CLKS_DURING_SW_INTACK_STAT (1L<<25) + +#define BNX2_HC_CONFIG 0x00006808 +#define BNX2_HC_CONFIG_COLLECT_STATS (1L<<0) +#define BNX2_HC_CONFIG_RX_TMR_MODE (1L<<1) +#define BNX2_HC_CONFIG_TX_TMR_MODE (1L<<2) +#define BNX2_HC_CONFIG_COM_TMR_MODE (1L<<3) +#define BNX2_HC_CONFIG_CMD_TMR_MODE (1L<<4) +#define BNX2_HC_CONFIG_STATISTIC_PRIORITY (1L<<5) +#define BNX2_HC_CONFIG_STATUS_PRIORITY (1L<<6) +#define BNX2_HC_CONFIG_STAT_MEM_ADDR (0xffL<<8) +#define BNX2_HC_CONFIG_PER_MODE (1L<<16) +#define BNX2_HC_CONFIG_ONE_SHOT (1L<<17) +#define BNX2_HC_CONFIG_USE_INT_PARAM (1L<<18) +#define BNX2_HC_CONFIG_SET_MASK_AT_RD (1L<<19) +#define BNX2_HC_CONFIG_PER_COLLECT_LIMIT (0xfL<<20) +#define BNX2_HC_CONFIG_SB_ADDR_INC (0x7L<<24) +#define BNX2_HC_CONFIG_SB_ADDR_INC_64B (0L<<24) +#define BNX2_HC_CONFIG_SB_ADDR_INC_128B (1L<<24) +#define BNX2_HC_CONFIG_SB_ADDR_INC_256B (2L<<24) +#define BNX2_HC_CONFIG_SB_ADDR_INC_512B (3L<<24) +#define BNX2_HC_CONFIG_SB_ADDR_INC_1024B (4L<<24) +#define BNX2_HC_CONFIG_SB_ADDR_INC_2048B (5L<<24) +#define BNX2_HC_CONFIG_SB_ADDR_INC_4096B (6L<<24) +#define BNX2_HC_CONFIG_SB_ADDR_INC_8192B (7L<<24) +#define BNX2_HC_CONFIG_GEN_STAT_AVG_INTR (1L<<29) +#define BNX2_HC_CONFIG_UNMASK_ALL (1L<<30) +#define BNX2_HC_CONFIG_TX_SEL (1L<<31) + +#define BNX2_HC_ATTN_BITS_ENABLE 0x0000680c +#define BNX2_HC_STATUS_ADDR_L 0x00006810 +#define BNX2_HC_STATUS_ADDR_H 0x00006814 +#define BNX2_HC_STATISTICS_ADDR_L 0x00006818 +#define BNX2_HC_STATISTICS_ADDR_H 0x0000681c +#define BNX2_HC_TX_QUICK_CONS_TRIP 0x00006820 +#define BNX2_HC_TX_QUICK_CONS_TRIP_VALUE (0xffL<<0) +#define BNX2_HC_TX_QUICK_CONS_TRIP_INT (0xffL<<16) + +#define BNX2_HC_COMP_PROD_TRIP 0x00006824 +#define BNX2_HC_COMP_PROD_TRIP_VALUE (0xffL<<0) +#define BNX2_HC_COMP_PROD_TRIP_INT (0xffL<<16) + +#define BNX2_HC_RX_QUICK_CONS_TRIP 0x00006828 +#define BNX2_HC_RX_QUICK_CONS_TRIP_VALUE (0xffL<<0) +#define BNX2_HC_RX_QUICK_CONS_TRIP_INT (0xffL<<16) + +#define BNX2_HC_RX_TICKS 0x0000682c +#define BNX2_HC_RX_TICKS_VALUE (0x3ffL<<0) +#define BNX2_HC_RX_TICKS_INT (0x3ffL<<16) + +#define BNX2_HC_TX_TICKS 0x00006830 +#define BNX2_HC_TX_TICKS_VALUE (0x3ffL<<0) +#define BNX2_HC_TX_TICKS_INT (0x3ffL<<16) + +#define BNX2_HC_COM_TICKS 0x00006834 +#define BNX2_HC_COM_TICKS_VALUE (0x3ffL<<0) +#define BNX2_HC_COM_TICKS_INT (0x3ffL<<16) + +#define BNX2_HC_CMD_TICKS 0x00006838 +#define BNX2_HC_CMD_TICKS_VALUE (0x3ffL<<0) +#define BNX2_HC_CMD_TICKS_INT (0x3ffL<<16) + +#define BNX2_HC_PERIODIC_TICKS 0x0000683c +#define BNX2_HC_PERIODIC_TICKS_HC_PERIODIC_TICKS (0xffffL<<0) +#define BNX2_HC_PERIODIC_TICKS_HC_INT_PERIODIC_TICKS (0xffffL<<16) + +#define BNX2_HC_STAT_COLLECT_TICKS 0x00006840 +#define BNX2_HC_STAT_COLLECT_TICKS_HC_STAT_COLL_TICKS (0xffL<<4) + +#define BNX2_HC_STATS_TICKS 0x00006844 +#define BNX2_HC_STATS_TICKS_HC_STAT_TICKS (0xffffL<<8) + +#define BNX2_HC_STATS_INTERRUPT_STATUS 0x00006848 +#define BNX2_HC_STATS_INTERRUPT_STATUS_SB_STATUS (0x1ffL<<0) +#define BNX2_HC_STATS_INTERRUPT_STATUS_INT_STATUS (0x1ffL<<16) + +#define BNX2_HC_STAT_MEM_DATA 0x0000684c +#define BNX2_HC_STAT_GEN_SEL_0 0x00006850 +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0 (0x7fL<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT0 (0L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT1 (1L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT2 (2L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT3 (3L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT4 (4L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT5 (5L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT6 (6L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT7 (7L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT8 (8L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT9 (9L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT10 (10L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT11 (11L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT0 (12L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT1 (13L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT2 (14L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT3 (15L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT4 (16L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT5 (17L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT6 (18L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT7 (19L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT0 (20L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT1 (21L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT2 (22L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT3 (23L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT4 (24L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT5 (25L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT6 (26L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT7 (27L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT8 (28L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT9 (29L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT10 (30L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT11 (31L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT0 (32L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT1 (33L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT2 (34L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT3 (35L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT0 (36L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT1 (37L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT2 (38L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT3 (39L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT4 (40L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT5 (41L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT6 (42L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT7 (43L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT0 (44L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT1 (45L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT2 (46L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT3 (47L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT4 (48L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT5 (49L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT6 (50L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT7 (51L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_PCI_CLK_CNT (52L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CORE_CLK_CNT (53L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS (54L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN (55L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR (56L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK (59L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK (60L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK (61L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCH_CMD_CNT (62L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCH_SLOT_CNT (63L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSCH_CMD_CNT (64L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSCH_SLOT_CNT (65L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT (66L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT (67L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT (68L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT (69L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT (70L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT (71L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT (72L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT (73L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT (74L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT (75L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT (76L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT (77L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT (78L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT (79L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT (80L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT (81L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT (82L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT (83L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT (84L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_READ_TRANSFERS_CNT (85L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_READ_DELAY_PCI_CLKS_CNT (86L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_READ_TRANSFERS_CNT (87L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_READ_DELAY_PCI_CLKS_CNT (88L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_READ_RETRY_AFTER_DATA_CNT (89L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_WRITE_TRANSFERS_CNT (90L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_WRITE_DELAY_PCI_CLKS_CNT (91L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_WRITE_TRANSFERS_CNT (92L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_WRITE_DELAY_PCI_CLKS_CNT (93L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_WRITE_RETRY_AFTER_DATA_CNT (94L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_WR_CNT64 (95L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_RD_CNT64 (96L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_ACC_STALL_CLKS (97L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_LOCK_STALL_CLKS (98L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MBQ_CTX_ACCESS_STAT (99L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MBQ_CTX_ACCESS64_STAT (100L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MBQ_PCI_STALL_STAT (101L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDR_FTQ_ENTRY_CNT (102L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDR_BURST_CNT (103L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMA_FTQ_ENTRY_CNT (104L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMA_BURST_CNT (105L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMA_FTQ_ENTRY_CNT (106L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMA_BURST_CNT (107L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUP_MATCH_CNT (108L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_POLL_PASS_CNT (109L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR1_CNT (110L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR2_CNT (111L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR3_CNT (112L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR4_CNT (113L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR5_CNT (114L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT0 (115L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT1 (116L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT2 (117L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT3 (118L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT4 (119L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT5 (120L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RBDC_PROC1_MISS (121L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RBDC_PROC2_MISS (122L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RBDC_BURST_CNT (127L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_1 (0x7fL<<8) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_2 (0x7fL<<16) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_3 (0x7fL<<24) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_XI (0xffL<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UMP_RX_FRAME_DROP_XI (52L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S0_XI (57L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S1_XI (58L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S2_XI (85L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S3_XI (86L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S4_XI (87L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S5_XI (88L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S6_XI (89L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S7_XI (90L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S8_XI (91L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S9_XI (92L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S10_XI (93L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MQ_IDB_OFLOW_XI (94L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_BLK_RD_CNT_XI (123L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_BLK_WR_CNT_XI (124L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_HITS_XI (125L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_MISSES_XI (126L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC1_XI (128L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC1_XI (129L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC1_XI (130L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC1_XI (131L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC1_XI (132L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC1_XI (133L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC2_XI (134L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC2_XI (135L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC2_XI (136L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC2_XI (137L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC2_XI (138L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC2_XI (139L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC3_XI (140L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC3_XI (141L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC3_XI (142L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC3_XI (143L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC3_XI (144L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC3_XI (145L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC4_XI (146L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC4_XI (147L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC4_XI (148L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC4_XI (149L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC4_XI (150L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC4_XI (151L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC5_XI (152L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC5_XI (153L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC5_XI (154L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC5_XI (155L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC5_XI (156L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC5_XI (157L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC6_XI (158L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC6_XI (159L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC6_XI (160L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC6_XI (161L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC6_XI (162L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC6_XI (163L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC7_XI (164L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC7_XI (165L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC7_XI (166L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC7_XI (167L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC7_XI (168L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC7_XI (169L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC8_XI (170L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC8_XI (171L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC8_XI (172L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC8_XI (173L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC8_XI (174L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC8_XI (175L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCS_CMD_CNT_XI (176L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCS_SLOT_CNT_XI (177L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCSQ_VALID_CNT_XI (178L<<0) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_1_XI (0xffL<<8) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_2_XI (0xffL<<16) +#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_3_XI (0xffL<<24) + +#define BNX2_HC_STAT_GEN_SEL_1 0x00006854 +#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_4 (0x7fL<<0) +#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_5 (0x7fL<<8) +#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_6 (0x7fL<<16) +#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_7 (0x7fL<<24) +#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_4_XI (0xffL<<0) +#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_5_XI (0xffL<<8) +#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_6_XI (0xffL<<16) +#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_7_XI (0xffL<<24) + +#define BNX2_HC_STAT_GEN_SEL_2 0x00006858 +#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_8 (0x7fL<<0) +#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_9 (0x7fL<<8) +#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_10 (0x7fL<<16) +#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_11 (0x7fL<<24) +#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_8_XI (0xffL<<0) +#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_9_XI (0xffL<<8) +#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_10_XI (0xffL<<16) +#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_11_XI (0xffL<<24) + +#define BNX2_HC_STAT_GEN_SEL_3 0x0000685c +#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_12 (0x7fL<<0) +#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_13 (0x7fL<<8) +#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_14 (0x7fL<<16) +#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_15 (0x7fL<<24) +#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_12_XI (0xffL<<0) +#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_13_XI (0xffL<<8) +#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_14_XI (0xffL<<16) +#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_15_XI (0xffL<<24) + +#define BNX2_HC_STAT_GEN_STAT0 0x00006888 +#define BNX2_HC_STAT_GEN_STAT1 0x0000688c +#define BNX2_HC_STAT_GEN_STAT2 0x00006890 +#define BNX2_HC_STAT_GEN_STAT3 0x00006894 +#define BNX2_HC_STAT_GEN_STAT4 0x00006898 +#define BNX2_HC_STAT_GEN_STAT5 0x0000689c +#define BNX2_HC_STAT_GEN_STAT6 0x000068a0 +#define BNX2_HC_STAT_GEN_STAT7 0x000068a4 +#define BNX2_HC_STAT_GEN_STAT8 0x000068a8 +#define BNX2_HC_STAT_GEN_STAT9 0x000068ac +#define BNX2_HC_STAT_GEN_STAT10 0x000068b0 +#define BNX2_HC_STAT_GEN_STAT11 0x000068b4 +#define BNX2_HC_STAT_GEN_STAT12 0x000068b8 +#define BNX2_HC_STAT_GEN_STAT13 0x000068bc +#define BNX2_HC_STAT_GEN_STAT14 0x000068c0 +#define BNX2_HC_STAT_GEN_STAT15 0x000068c4 +#define BNX2_HC_STAT_GEN_STAT_AC0 0x000068c8 +#define BNX2_HC_STAT_GEN_STAT_AC1 0x000068cc +#define BNX2_HC_STAT_GEN_STAT_AC2 0x000068d0 +#define BNX2_HC_STAT_GEN_STAT_AC3 0x000068d4 +#define BNX2_HC_STAT_GEN_STAT_AC4 0x000068d8 +#define BNX2_HC_STAT_GEN_STAT_AC5 0x000068dc +#define BNX2_HC_STAT_GEN_STAT_AC6 0x000068e0 +#define BNX2_HC_STAT_GEN_STAT_AC7 0x000068e4 +#define BNX2_HC_STAT_GEN_STAT_AC8 0x000068e8 +#define BNX2_HC_STAT_GEN_STAT_AC9 0x000068ec +#define BNX2_HC_STAT_GEN_STAT_AC10 0x000068f0 +#define BNX2_HC_STAT_GEN_STAT_AC11 0x000068f4 +#define BNX2_HC_STAT_GEN_STAT_AC12 0x000068f8 +#define BNX2_HC_STAT_GEN_STAT_AC13 0x000068fc +#define BNX2_HC_STAT_GEN_STAT_AC14 0x00006900 +#define BNX2_HC_STAT_GEN_STAT_AC15 0x00006904 +#define BNX2_HC_STAT_GEN_STAT_AC 0x000068c8 +#define BNX2_HC_VIS 0x00006908 +#define BNX2_HC_VIS_STAT_BUILD_STATE (0xfL<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_IDLE (0L<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_START (1L<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_REQUEST (2L<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_UPDATE64 (3L<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_UPDATE32 (4L<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_UPDATE_DONE (5L<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_DMA (6L<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_CONTROL (7L<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_LOW (8L<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_HIGH (9L<<0) +#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_DATA (10L<<0) +#define BNX2_HC_VIS_DMA_STAT_STATE (0xfL<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_IDLE (0L<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_STATUS_PARAM (1L<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_STATUS_DMA (2L<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_WRITE_COMP (3L<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_COMP (4L<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_STATISTIC_PARAM (5L<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_STATISTIC_DMA (6L<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_WRITE_COMP_1 (7L<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_WRITE_COMP_2 (8L<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_WAIT (9L<<8) +#define BNX2_HC_VIS_DMA_STAT_STATE_ABORT (15L<<8) +#define BNX2_HC_VIS_DMA_MSI_STATE (0x7L<<12) +#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE (0x3L<<15) +#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE_IDLE (0L<<15) +#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE_COUNT (1L<<15) +#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE_START (2L<<15) + +#define BNX2_HC_VIS_1 0x0000690c +#define BNX2_HC_VIS_1_HW_INTACK_STATE (1L<<4) +#define BNX2_HC_VIS_1_HW_INTACK_STATE_IDLE (0L<<4) +#define BNX2_HC_VIS_1_HW_INTACK_STATE_COUNT (1L<<4) +#define BNX2_HC_VIS_1_SW_INTACK_STATE (1L<<5) +#define BNX2_HC_VIS_1_SW_INTACK_STATE_IDLE (0L<<5) +#define BNX2_HC_VIS_1_SW_INTACK_STATE_COUNT (1L<<5) +#define BNX2_HC_VIS_1_DURING_SW_INTACK_STATE (1L<<6) +#define BNX2_HC_VIS_1_DURING_SW_INTACK_STATE_IDLE (0L<<6) +#define BNX2_HC_VIS_1_DURING_SW_INTACK_STATE_COUNT (1L<<6) +#define BNX2_HC_VIS_1_MAILBOX_COUNT_STATE (1L<<7) +#define BNX2_HC_VIS_1_MAILBOX_COUNT_STATE_IDLE (0L<<7) +#define BNX2_HC_VIS_1_MAILBOX_COUNT_STATE_COUNT (1L<<7) +#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE (0xfL<<17) +#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_IDLE (0L<<17) +#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_DMA (1L<<17) +#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_UPDATE (2L<<17) +#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_ASSIGN (3L<<17) +#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_WAIT (4L<<17) +#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_REG_UPDATE (5L<<17) +#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_REG_ASSIGN (6L<<17) +#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_REG_WAIT (7L<<17) +#define BNX2_HC_VIS_1_RAM_WR_ARB_STATE (0x3L<<21) +#define BNX2_HC_VIS_1_RAM_WR_ARB_STATE_NORMAL (0L<<21) +#define BNX2_HC_VIS_1_RAM_WR_ARB_STATE_CLEAR (1L<<21) +#define BNX2_HC_VIS_1_INT_GEN_STATE (1L<<23) +#define BNX2_HC_VIS_1_INT_GEN_STATE_DLE (0L<<23) +#define BNX2_HC_VIS_1_INT_GEN_STATE_NTERRUPT (1L<<23) +#define BNX2_HC_VIS_1_STAT_CHAN_ID (0x7L<<24) +#define BNX2_HC_VIS_1_INT_B (1L<<27) + +#define BNX2_HC_DEBUG_VECT_PEEK 0x00006910 +#define BNX2_HC_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) +#define BNX2_HC_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) +#define BNX2_HC_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) +#define BNX2_HC_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) +#define BNX2_HC_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) +#define BNX2_HC_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) + +#define BNX2_HC_COALESCE_NOW 0x00006914 +#define BNX2_HC_COALESCE_NOW_COAL_NOW (0x1ffL<<1) +#define BNX2_HC_COALESCE_NOW_COAL_NOW_WO_INT (0x1ffL<<11) +#define BNX2_HC_COALESCE_NOW_COAL_ON_NXT_EVENT (0x1ffL<<21) + +#define BNX2_HC_MSIX_BIT_VECTOR 0x00006918 +#define BNX2_HC_MSIX_BIT_VECTOR_VAL (0x1ffL<<0) + +#define BNX2_HC_SB_CONFIG_1 0x00006a00 +#define BNX2_HC_SB_CONFIG_1_RX_TMR_MODE (1L<<1) +#define BNX2_HC_SB_CONFIG_1_TX_TMR_MODE (1L<<2) +#define BNX2_HC_SB_CONFIG_1_COM_TMR_MODE (1L<<3) +#define BNX2_HC_SB_CONFIG_1_CMD_TMR_MODE (1L<<4) +#define BNX2_HC_SB_CONFIG_1_PER_MODE (1L<<16) +#define BNX2_HC_SB_CONFIG_1_ONE_SHOT (1L<<17) +#define BNX2_HC_SB_CONFIG_1_USE_INT_PARAM (1L<<18) +#define BNX2_HC_SB_CONFIG_1_PER_COLLECT_LIMIT (0xfL<<20) + +#define BNX2_HC_TX_QUICK_CONS_TRIP_1 0x00006a04 +#define BNX2_HC_TX_QUICK_CONS_TRIP_1_VALUE (0xffL<<0) +#define BNX2_HC_TX_QUICK_CONS_TRIP_1_INT (0xffL<<16) + +#define BNX2_HC_COMP_PROD_TRIP_1 0x00006a08 +#define BNX2_HC_COMP_PROD_TRIP_1_VALUE (0xffL<<0) +#define BNX2_HC_COMP_PROD_TRIP_1_INT (0xffL<<16) + +#define BNX2_HC_RX_QUICK_CONS_TRIP_1 0x00006a0c +#define BNX2_HC_RX_QUICK_CONS_TRIP_1_VALUE (0xffL<<0) +#define BNX2_HC_RX_QUICK_CONS_TRIP_1_INT (0xffL<<16) + +#define BNX2_HC_RX_TICKS_1 0x00006a10 +#define BNX2_HC_RX_TICKS_1_VALUE (0x3ffL<<0) +#define BNX2_HC_RX_TICKS_1_INT (0x3ffL<<16) + +#define BNX2_HC_TX_TICKS_1 0x00006a14 +#define BNX2_HC_TX_TICKS_1_VALUE (0x3ffL<<0) +#define BNX2_HC_TX_TICKS_1_INT (0x3ffL<<16) + +#define BNX2_HC_COM_TICKS_1 0x00006a18 +#define BNX2_HC_COM_TICKS_1_VALUE (0x3ffL<<0) +#define BNX2_HC_COM_TICKS_1_INT (0x3ffL<<16) + +#define BNX2_HC_CMD_TICKS_1 0x00006a1c +#define BNX2_HC_CMD_TICKS_1_VALUE (0x3ffL<<0) +#define BNX2_HC_CMD_TICKS_1_INT (0x3ffL<<16) + +#define BNX2_HC_PERIODIC_TICKS_1 0x00006a20 +#define BNX2_HC_PERIODIC_TICKS_1_HC_PERIODIC_TICKS (0xffffL<<0) +#define BNX2_HC_PERIODIC_TICKS_1_HC_INT_PERIODIC_TICKS (0xffffL<<16) + +#define BNX2_HC_SB_CONFIG_2 0x00006a24 +#define BNX2_HC_SB_CONFIG_2_RX_TMR_MODE (1L<<1) +#define BNX2_HC_SB_CONFIG_2_TX_TMR_MODE (1L<<2) +#define BNX2_HC_SB_CONFIG_2_COM_TMR_MODE (1L<<3) +#define BNX2_HC_SB_CONFIG_2_CMD_TMR_MODE (1L<<4) +#define BNX2_HC_SB_CONFIG_2_PER_MODE (1L<<16) +#define BNX2_HC_SB_CONFIG_2_ONE_SHOT (1L<<17) +#define BNX2_HC_SB_CONFIG_2_USE_INT_PARAM (1L<<18) +#define BNX2_HC_SB_CONFIG_2_PER_COLLECT_LIMIT (0xfL<<20) + +#define BNX2_HC_TX_QUICK_CONS_TRIP_2 0x00006a28 +#define BNX2_HC_TX_QUICK_CONS_TRIP_2_VALUE (0xffL<<0) +#define BNX2_HC_TX_QUICK_CONS_TRIP_2_INT (0xffL<<16) + +#define BNX2_HC_COMP_PROD_TRIP_2 0x00006a2c +#define BNX2_HC_COMP_PROD_TRIP_2_VALUE (0xffL<<0) +#define BNX2_HC_COMP_PROD_TRIP_2_INT (0xffL<<16) + +#define BNX2_HC_RX_QUICK_CONS_TRIP_2 0x00006a30 +#define BNX2_HC_RX_QUICK_CONS_TRIP_2_VALUE (0xffL<<0) +#define BNX2_HC_RX_QUICK_CONS_TRIP_2_INT (0xffL<<16) + +#define BNX2_HC_RX_TICKS_2 0x00006a34 +#define BNX2_HC_RX_TICKS_2_VALUE (0x3ffL<<0) +#define BNX2_HC_RX_TICKS_2_INT (0x3ffL<<16) + +#define BNX2_HC_TX_TICKS_2 0x00006a38 +#define BNX2_HC_TX_TICKS_2_VALUE (0x3ffL<<0) +#define BNX2_HC_TX_TICKS_2_INT (0x3ffL<<16) + +#define BNX2_HC_COM_TICKS_2 0x00006a3c +#define BNX2_HC_COM_TICKS_2_VALUE (0x3ffL<<0) +#define BNX2_HC_COM_TICKS_2_INT (0x3ffL<<16) + +#define BNX2_HC_CMD_TICKS_2 0x00006a40 +#define BNX2_HC_CMD_TICKS_2_VALUE (0x3ffL<<0) +#define BNX2_HC_CMD_TICKS_2_INT (0x3ffL<<16) + +#define BNX2_HC_PERIODIC_TICKS_2 0x00006a44 +#define BNX2_HC_PERIODIC_TICKS_2_HC_PERIODIC_TICKS (0xffffL<<0) +#define BNX2_HC_PERIODIC_TICKS_2_HC_INT_PERIODIC_TICKS (0xffffL<<16) + +#define BNX2_HC_SB_CONFIG_3 0x00006a48 +#define BNX2_HC_SB_CONFIG_3_RX_TMR_MODE (1L<<1) +#define BNX2_HC_SB_CONFIG_3_TX_TMR_MODE (1L<<2) +#define BNX2_HC_SB_CONFIG_3_COM_TMR_MODE (1L<<3) +#define BNX2_HC_SB_CONFIG_3_CMD_TMR_MODE (1L<<4) +#define BNX2_HC_SB_CONFIG_3_PER_MODE (1L<<16) +#define BNX2_HC_SB_CONFIG_3_ONE_SHOT (1L<<17) +#define BNX2_HC_SB_CONFIG_3_USE_INT_PARAM (1L<<18) +#define BNX2_HC_SB_CONFIG_3_PER_COLLECT_LIMIT (0xfL<<20) + +#define BNX2_HC_TX_QUICK_CONS_TRIP_3 0x00006a4c +#define BNX2_HC_TX_QUICK_CONS_TRIP_3_VALUE (0xffL<<0) +#define BNX2_HC_TX_QUICK_CONS_TRIP_3_INT (0xffL<<16) + +#define BNX2_HC_COMP_PROD_TRIP_3 0x00006a50 +#define BNX2_HC_COMP_PROD_TRIP_3_VALUE (0xffL<<0) +#define BNX2_HC_COMP_PROD_TRIP_3_INT (0xffL<<16) + +#define BNX2_HC_RX_QUICK_CONS_TRIP_3 0x00006a54 +#define BNX2_HC_RX_QUICK_CONS_TRIP_3_VALUE (0xffL<<0) +#define BNX2_HC_RX_QUICK_CONS_TRIP_3_INT (0xffL<<16) + +#define BNX2_HC_RX_TICKS_3 0x00006a58 +#define BNX2_HC_RX_TICKS_3_VALUE (0x3ffL<<0) +#define BNX2_HC_RX_TICKS_3_INT (0x3ffL<<16) + +#define BNX2_HC_TX_TICKS_3 0x00006a5c +#define BNX2_HC_TX_TICKS_3_VALUE (0x3ffL<<0) +#define BNX2_HC_TX_TICKS_3_INT (0x3ffL<<16) + +#define BNX2_HC_COM_TICKS_3 0x00006a60 +#define BNX2_HC_COM_TICKS_3_VALUE (0x3ffL<<0) +#define BNX2_HC_COM_TICKS_3_INT (0x3ffL<<16) + +#define BNX2_HC_CMD_TICKS_3 0x00006a64 +#define BNX2_HC_CMD_TICKS_3_VALUE (0x3ffL<<0) +#define BNX2_HC_CMD_TICKS_3_INT (0x3ffL<<16) + +#define BNX2_HC_PERIODIC_TICKS_3 0x00006a68 +#define BNX2_HC_PERIODIC_TICKS_3_HC_PERIODIC_TICKS (0xffffL<<0) +#define BNX2_HC_PERIODIC_TICKS_3_HC_INT_PERIODIC_TICKS (0xffffL<<16) + +#define BNX2_HC_SB_CONFIG_4 0x00006a6c +#define BNX2_HC_SB_CONFIG_4_RX_TMR_MODE (1L<<1) +#define BNX2_HC_SB_CONFIG_4_TX_TMR_MODE (1L<<2) +#define BNX2_HC_SB_CONFIG_4_COM_TMR_MODE (1L<<3) +#define BNX2_HC_SB_CONFIG_4_CMD_TMR_MODE (1L<<4) +#define BNX2_HC_SB_CONFIG_4_PER_MODE (1L<<16) +#define BNX2_HC_SB_CONFIG_4_ONE_SHOT (1L<<17) +#define BNX2_HC_SB_CONFIG_4_USE_INT_PARAM (1L<<18) +#define BNX2_HC_SB_CONFIG_4_PER_COLLECT_LIMIT (0xfL<<20) + +#define BNX2_HC_TX_QUICK_CONS_TRIP_4 0x00006a70 +#define BNX2_HC_TX_QUICK_CONS_TRIP_4_VALUE (0xffL<<0) +#define BNX2_HC_TX_QUICK_CONS_TRIP_4_INT (0xffL<<16) + +#define BNX2_HC_COMP_PROD_TRIP_4 0x00006a74 +#define BNX2_HC_COMP_PROD_TRIP_4_VALUE (0xffL<<0) +#define BNX2_HC_COMP_PROD_TRIP_4_INT (0xffL<<16) + +#define BNX2_HC_RX_QUICK_CONS_TRIP_4 0x00006a78 +#define BNX2_HC_RX_QUICK_CONS_TRIP_4_VALUE (0xffL<<0) +#define BNX2_HC_RX_QUICK_CONS_TRIP_4_INT (0xffL<<16) + +#define BNX2_HC_RX_TICKS_4 0x00006a7c +#define BNX2_HC_RX_TICKS_4_VALUE (0x3ffL<<0) +#define BNX2_HC_RX_TICKS_4_INT (0x3ffL<<16) + +#define BNX2_HC_TX_TICKS_4 0x00006a80 +#define BNX2_HC_TX_TICKS_4_VALUE (0x3ffL<<0) +#define BNX2_HC_TX_TICKS_4_INT (0x3ffL<<16) + +#define BNX2_HC_COM_TICKS_4 0x00006a84 +#define BNX2_HC_COM_TICKS_4_VALUE (0x3ffL<<0) +#define BNX2_HC_COM_TICKS_4_INT (0x3ffL<<16) + +#define BNX2_HC_CMD_TICKS_4 0x00006a88 +#define BNX2_HC_CMD_TICKS_4_VALUE (0x3ffL<<0) +#define BNX2_HC_CMD_TICKS_4_INT (0x3ffL<<16) + +#define BNX2_HC_PERIODIC_TICKS_4 0x00006a8c +#define BNX2_HC_PERIODIC_TICKS_4_HC_PERIODIC_TICKS (0xffffL<<0) +#define BNX2_HC_PERIODIC_TICKS_4_HC_INT_PERIODIC_TICKS (0xffffL<<16) + +#define BNX2_HC_SB_CONFIG_5 0x00006a90 +#define BNX2_HC_SB_CONFIG_5_RX_TMR_MODE (1L<<1) +#define BNX2_HC_SB_CONFIG_5_TX_TMR_MODE (1L<<2) +#define BNX2_HC_SB_CONFIG_5_COM_TMR_MODE (1L<<3) +#define BNX2_HC_SB_CONFIG_5_CMD_TMR_MODE (1L<<4) +#define BNX2_HC_SB_CONFIG_5_PER_MODE (1L<<16) +#define BNX2_HC_SB_CONFIG_5_ONE_SHOT (1L<<17) +#define BNX2_HC_SB_CONFIG_5_USE_INT_PARAM (1L<<18) +#define BNX2_HC_SB_CONFIG_5_PER_COLLECT_LIMIT (0xfL<<20) + +#define BNX2_HC_TX_QUICK_CONS_TRIP_5 0x00006a94 +#define BNX2_HC_TX_QUICK_CONS_TRIP_5_VALUE (0xffL<<0) +#define BNX2_HC_TX_QUICK_CONS_TRIP_5_INT (0xffL<<16) + +#define BNX2_HC_COMP_PROD_TRIP_5 0x00006a98 +#define BNX2_HC_COMP_PROD_TRIP_5_VALUE (0xffL<<0) +#define BNX2_HC_COMP_PROD_TRIP_5_INT (0xffL<<16) + +#define BNX2_HC_RX_QUICK_CONS_TRIP_5 0x00006a9c +#define BNX2_HC_RX_QUICK_CONS_TRIP_5_VALUE (0xffL<<0) +#define BNX2_HC_RX_QUICK_CONS_TRIP_5_INT (0xffL<<16) + +#define BNX2_HC_RX_TICKS_5 0x00006aa0 +#define BNX2_HC_RX_TICKS_5_VALUE (0x3ffL<<0) +#define BNX2_HC_RX_TICKS_5_INT (0x3ffL<<16) + +#define BNX2_HC_TX_TICKS_5 0x00006aa4 +#define BNX2_HC_TX_TICKS_5_VALUE (0x3ffL<<0) +#define BNX2_HC_TX_TICKS_5_INT (0x3ffL<<16) + +#define BNX2_HC_COM_TICKS_5 0x00006aa8 +#define BNX2_HC_COM_TICKS_5_VALUE (0x3ffL<<0) +#define BNX2_HC_COM_TICKS_5_INT (0x3ffL<<16) + +#define BNX2_HC_CMD_TICKS_5 0x00006aac +#define BNX2_HC_CMD_TICKS_5_VALUE (0x3ffL<<0) +#define BNX2_HC_CMD_TICKS_5_INT (0x3ffL<<16) + +#define BNX2_HC_PERIODIC_TICKS_5 0x00006ab0 +#define BNX2_HC_PERIODIC_TICKS_5_HC_PERIODIC_TICKS (0xffffL<<0) +#define BNX2_HC_PERIODIC_TICKS_5_HC_INT_PERIODIC_TICKS (0xffffL<<16) + +#define BNX2_HC_SB_CONFIG_6 0x00006ab4 +#define BNX2_HC_SB_CONFIG_6_RX_TMR_MODE (1L<<1) +#define BNX2_HC_SB_CONFIG_6_TX_TMR_MODE (1L<<2) +#define BNX2_HC_SB_CONFIG_6_COM_TMR_MODE (1L<<3) +#define BNX2_HC_SB_CONFIG_6_CMD_TMR_MODE (1L<<4) +#define BNX2_HC_SB_CONFIG_6_PER_MODE (1L<<16) +#define BNX2_HC_SB_CONFIG_6_ONE_SHOT (1L<<17) +#define BNX2_HC_SB_CONFIG_6_USE_INT_PARAM (1L<<18) +#define BNX2_HC_SB_CONFIG_6_PER_COLLECT_LIMIT (0xfL<<20) + +#define BNX2_HC_TX_QUICK_CONS_TRIP_6 0x00006ab8 +#define BNX2_HC_TX_QUICK_CONS_TRIP_6_VALUE (0xffL<<0) +#define BNX2_HC_TX_QUICK_CONS_TRIP_6_INT (0xffL<<16) + +#define BNX2_HC_COMP_PROD_TRIP_6 0x00006abc +#define BNX2_HC_COMP_PROD_TRIP_6_VALUE (0xffL<<0) +#define BNX2_HC_COMP_PROD_TRIP_6_INT (0xffL<<16) + +#define BNX2_HC_RX_QUICK_CONS_TRIP_6 0x00006ac0 +#define BNX2_HC_RX_QUICK_CONS_TRIP_6_VALUE (0xffL<<0) +#define BNX2_HC_RX_QUICK_CONS_TRIP_6_INT (0xffL<<16) + +#define BNX2_HC_RX_TICKS_6 0x00006ac4 +#define BNX2_HC_RX_TICKS_6_VALUE (0x3ffL<<0) +#define BNX2_HC_RX_TICKS_6_INT (0x3ffL<<16) + +#define BNX2_HC_TX_TICKS_6 0x00006ac8 +#define BNX2_HC_TX_TICKS_6_VALUE (0x3ffL<<0) +#define BNX2_HC_TX_TICKS_6_INT (0x3ffL<<16) + +#define BNX2_HC_COM_TICKS_6 0x00006acc +#define BNX2_HC_COM_TICKS_6_VALUE (0x3ffL<<0) +#define BNX2_HC_COM_TICKS_6_INT (0x3ffL<<16) + +#define BNX2_HC_CMD_TICKS_6 0x00006ad0 +#define BNX2_HC_CMD_TICKS_6_VALUE (0x3ffL<<0) +#define BNX2_HC_CMD_TICKS_6_INT (0x3ffL<<16) + +#define BNX2_HC_PERIODIC_TICKS_6 0x00006ad4 +#define BNX2_HC_PERIODIC_TICKS_6_HC_PERIODIC_TICKS (0xffffL<<0) +#define BNX2_HC_PERIODIC_TICKS_6_HC_INT_PERIODIC_TICKS (0xffffL<<16) + +#define BNX2_HC_SB_CONFIG_7 0x00006ad8 +#define BNX2_HC_SB_CONFIG_7_RX_TMR_MODE (1L<<1) +#define BNX2_HC_SB_CONFIG_7_TX_TMR_MODE (1L<<2) +#define BNX2_HC_SB_CONFIG_7_COM_TMR_MODE (1L<<3) +#define BNX2_HC_SB_CONFIG_7_CMD_TMR_MODE (1L<<4) +#define BNX2_HC_SB_CONFIG_7_PER_MODE (1L<<16) +#define BNX2_HC_SB_CONFIG_7_ONE_SHOT (1L<<17) +#define BNX2_HC_SB_CONFIG_7_USE_INT_PARAM (1L<<18) +#define BNX2_HC_SB_CONFIG_7_PER_COLLECT_LIMIT (0xfL<<20) + +#define BNX2_HC_TX_QUICK_CONS_TRIP_7 0x00006adc +#define BNX2_HC_TX_QUICK_CONS_TRIP_7_VALUE (0xffL<<0) +#define BNX2_HC_TX_QUICK_CONS_TRIP_7_INT (0xffL<<16) + +#define BNX2_HC_COMP_PROD_TRIP_7 0x00006ae0 +#define BNX2_HC_COMP_PROD_TRIP_7_VALUE (0xffL<<0) +#define BNX2_HC_COMP_PROD_TRIP_7_INT (0xffL<<16) + +#define BNX2_HC_RX_QUICK_CONS_TRIP_7 0x00006ae4 +#define BNX2_HC_RX_QUICK_CONS_TRIP_7_VALUE (0xffL<<0) +#define BNX2_HC_RX_QUICK_CONS_TRIP_7_INT (0xffL<<16) + +#define BNX2_HC_RX_TICKS_7 0x00006ae8 +#define BNX2_HC_RX_TICKS_7_VALUE (0x3ffL<<0) +#define BNX2_HC_RX_TICKS_7_INT (0x3ffL<<16) + +#define BNX2_HC_TX_TICKS_7 0x00006aec +#define BNX2_HC_TX_TICKS_7_VALUE (0x3ffL<<0) +#define BNX2_HC_TX_TICKS_7_INT (0x3ffL<<16) + +#define BNX2_HC_COM_TICKS_7 0x00006af0 +#define BNX2_HC_COM_TICKS_7_VALUE (0x3ffL<<0) +#define BNX2_HC_COM_TICKS_7_INT (0x3ffL<<16) + +#define BNX2_HC_CMD_TICKS_7 0x00006af4 +#define BNX2_HC_CMD_TICKS_7_VALUE (0x3ffL<<0) +#define BNX2_HC_CMD_TICKS_7_INT (0x3ffL<<16) + +#define BNX2_HC_PERIODIC_TICKS_7 0x00006af8 +#define BNX2_HC_PERIODIC_TICKS_7_HC_PERIODIC_TICKS (0xffffL<<0) +#define BNX2_HC_PERIODIC_TICKS_7_HC_INT_PERIODIC_TICKS (0xffffL<<16) + +#define BNX2_HC_SB_CONFIG_8 0x00006afc +#define BNX2_HC_SB_CONFIG_8_RX_TMR_MODE (1L<<1) +#define BNX2_HC_SB_CONFIG_8_TX_TMR_MODE (1L<<2) +#define BNX2_HC_SB_CONFIG_8_COM_TMR_MODE (1L<<3) +#define BNX2_HC_SB_CONFIG_8_CMD_TMR_MODE (1L<<4) +#define BNX2_HC_SB_CONFIG_8_PER_MODE (1L<<16) +#define BNX2_HC_SB_CONFIG_8_ONE_SHOT (1L<<17) +#define BNX2_HC_SB_CONFIG_8_USE_INT_PARAM (1L<<18) +#define BNX2_HC_SB_CONFIG_8_PER_COLLECT_LIMIT (0xfL<<20) + +#define BNX2_HC_TX_QUICK_CONS_TRIP_8 0x00006b00 +#define BNX2_HC_TX_QUICK_CONS_TRIP_8_VALUE (0xffL<<0) +#define BNX2_HC_TX_QUICK_CONS_TRIP_8_INT (0xffL<<16) + +#define BNX2_HC_COMP_PROD_TRIP_8 0x00006b04 +#define BNX2_HC_COMP_PROD_TRIP_8_VALUE (0xffL<<0) +#define BNX2_HC_COMP_PROD_TRIP_8_INT (0xffL<<16) + +#define BNX2_HC_RX_QUICK_CONS_TRIP_8 0x00006b08 +#define BNX2_HC_RX_QUICK_CONS_TRIP_8_VALUE (0xffL<<0) +#define BNX2_HC_RX_QUICK_CONS_TRIP_8_INT (0xffL<<16) + +#define BNX2_HC_RX_TICKS_8 0x00006b0c +#define BNX2_HC_RX_TICKS_8_VALUE (0x3ffL<<0) +#define BNX2_HC_RX_TICKS_8_INT (0x3ffL<<16) + +#define BNX2_HC_TX_TICKS_8 0x00006b10 +#define BNX2_HC_TX_TICKS_8_VALUE (0x3ffL<<0) +#define BNX2_HC_TX_TICKS_8_INT (0x3ffL<<16) + +#define BNX2_HC_COM_TICKS_8 0x00006b14 +#define BNX2_HC_COM_TICKS_8_VALUE (0x3ffL<<0) +#define BNX2_HC_COM_TICKS_8_INT (0x3ffL<<16) + +#define BNX2_HC_CMD_TICKS_8 0x00006b18 +#define BNX2_HC_CMD_TICKS_8_VALUE (0x3ffL<<0) +#define BNX2_HC_CMD_TICKS_8_INT (0x3ffL<<16) + +#define BNX2_HC_PERIODIC_TICKS_8 0x00006b1c +#define BNX2_HC_PERIODIC_TICKS_8_HC_PERIODIC_TICKS (0xffffL<<0) +#define BNX2_HC_PERIODIC_TICKS_8_HC_INT_PERIODIC_TICKS (0xffffL<<16) + +#define BNX2_HC_SB_CONFIG_SIZE (BNX2_HC_SB_CONFIG_2 - BNX2_HC_SB_CONFIG_1) +#define BNX2_HC_COMP_PROD_TRIP_OFF (BNX2_HC_COMP_PROD_TRIP_1 - \ + BNX2_HC_SB_CONFIG_1) +#define BNX2_HC_COM_TICKS_OFF (BNX2_HC_COM_TICKS_1 - BNX2_HC_SB_CONFIG_1) +#define BNX2_HC_CMD_TICKS_OFF (BNX2_HC_CMD_TICKS_1 - BNX2_HC_SB_CONFIG_1) +#define BNX2_HC_TX_QUICK_CONS_TRIP_OFF (BNX2_HC_TX_QUICK_CONS_TRIP_1 - \ + BNX2_HC_SB_CONFIG_1) +#define BNX2_HC_TX_TICKS_OFF (BNX2_HC_TX_TICKS_1 - BNX2_HC_SB_CONFIG_1) +#define BNX2_HC_RX_QUICK_CONS_TRIP_OFF (BNX2_HC_RX_QUICK_CONS_TRIP_1 - \ + BNX2_HC_SB_CONFIG_1) +#define BNX2_HC_RX_TICKS_OFF (BNX2_HC_RX_TICKS_1 - BNX2_HC_SB_CONFIG_1) + + +/* + * txp_reg definition + * offset: 0x40000 + */ +#define BNX2_TXP_CPU_MODE 0x00045000 +#define BNX2_TXP_CPU_MODE_LOCAL_RST (1L<<0) +#define BNX2_TXP_CPU_MODE_STEP_ENA (1L<<1) +#define BNX2_TXP_CPU_MODE_PAGE_0_DATA_ENA (1L<<2) +#define BNX2_TXP_CPU_MODE_PAGE_0_INST_ENA (1L<<3) +#define BNX2_TXP_CPU_MODE_MSG_BIT1 (1L<<6) +#define BNX2_TXP_CPU_MODE_INTERRUPT_ENA (1L<<7) +#define BNX2_TXP_CPU_MODE_SOFT_HALT (1L<<10) +#define BNX2_TXP_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11) +#define BNX2_TXP_CPU_MODE_BAD_INST_HALT_ENA (1L<<12) +#define BNX2_TXP_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13) +#define BNX2_TXP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15) + +#define BNX2_TXP_CPU_STATE 0x00045004 +#define BNX2_TXP_CPU_STATE_BREAKPOINT (1L<<0) +#define BNX2_TXP_CPU_STATE_BAD_INST_HALTED (1L<<2) +#define BNX2_TXP_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3) +#define BNX2_TXP_CPU_STATE_PAGE_0_INST_HALTED (1L<<4) +#define BNX2_TXP_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5) +#define BNX2_TXP_CPU_STATE_BAD_PC_HALTED (1L<<6) +#define BNX2_TXP_CPU_STATE_ALIGN_HALTED (1L<<7) +#define BNX2_TXP_CPU_STATE_FIO_ABORT_HALTED (1L<<8) +#define BNX2_TXP_CPU_STATE_SOFT_HALTED (1L<<10) +#define BNX2_TXP_CPU_STATE_SPAD_UNDERFLOW (1L<<11) +#define BNX2_TXP_CPU_STATE_INTERRUPT (1L<<12) +#define BNX2_TXP_CPU_STATE_DATA_ACCESS_STALL (1L<<14) +#define BNX2_TXP_CPU_STATE_INST_FETCH_STALL (1L<<15) +#define BNX2_TXP_CPU_STATE_BLOCKED_READ (1L<<31) + +#define BNX2_TXP_CPU_EVENT_MASK 0x00045008 +#define BNX2_TXP_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0) +#define BNX2_TXP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2) +#define BNX2_TXP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3) +#define BNX2_TXP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4) +#define BNX2_TXP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5) +#define BNX2_TXP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6) +#define BNX2_TXP_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7) +#define BNX2_TXP_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8) +#define BNX2_TXP_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10) +#define BNX2_TXP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11) +#define BNX2_TXP_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12) + +#define BNX2_TXP_CPU_PROGRAM_COUNTER 0x0004501c +#define BNX2_TXP_CPU_INSTRUCTION 0x00045020 +#define BNX2_TXP_CPU_DATA_ACCESS 0x00045024 +#define BNX2_TXP_CPU_INTERRUPT_ENABLE 0x00045028 +#define BNX2_TXP_CPU_INTERRUPT_VECTOR 0x0004502c +#define BNX2_TXP_CPU_INTERRUPT_SAVED_PC 0x00045030 +#define BNX2_TXP_CPU_HW_BREAKPOINT 0x00045034 +#define BNX2_TXP_CPU_HW_BREAKPOINT_DISABLE (1L<<0) +#define BNX2_TXP_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2) + +#define BNX2_TXP_CPU_DEBUG_VECT_PEEK 0x00045038 +#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) +#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) +#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) +#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) +#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) +#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) + +#define BNX2_TXP_CPU_LAST_BRANCH_ADDR 0x00045048 +#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1) +#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1) +#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1) +#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2) + +#define BNX2_TXP_CPU_REG_FILE 0x00045200 +#define BNX2_TXP_TXPQ 0x000453c0 +#define BNX2_TXP_FTQ_CMD 0x000453f8 +#define BNX2_TXP_FTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_TXP_FTQ_CMD_WR_TOP (1L<<10) +#define BNX2_TXP_FTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_TXP_FTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_TXP_FTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_TXP_FTQ_CMD_RD_DATA (1L<<26) +#define BNX2_TXP_FTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_TXP_FTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_TXP_FTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_TXP_FTQ_CMD_POP (1L<<30) +#define BNX2_TXP_FTQ_CMD_BUSY (1L<<31) + +#define BNX2_TXP_FTQ_CTL 0x000453fc +#define BNX2_TXP_FTQ_CTL_INTERVENE (1L<<0) +#define BNX2_TXP_FTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_TXP_FTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_TXP_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_TXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_TXP_SCRATCH 0x00060000 + + +/* + * tpat_reg definition + * offset: 0x80000 + */ +#define BNX2_TPAT_CPU_MODE 0x00085000 +#define BNX2_TPAT_CPU_MODE_LOCAL_RST (1L<<0) +#define BNX2_TPAT_CPU_MODE_STEP_ENA (1L<<1) +#define BNX2_TPAT_CPU_MODE_PAGE_0_DATA_ENA (1L<<2) +#define BNX2_TPAT_CPU_MODE_PAGE_0_INST_ENA (1L<<3) +#define BNX2_TPAT_CPU_MODE_MSG_BIT1 (1L<<6) +#define BNX2_TPAT_CPU_MODE_INTERRUPT_ENA (1L<<7) +#define BNX2_TPAT_CPU_MODE_SOFT_HALT (1L<<10) +#define BNX2_TPAT_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11) +#define BNX2_TPAT_CPU_MODE_BAD_INST_HALT_ENA (1L<<12) +#define BNX2_TPAT_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13) +#define BNX2_TPAT_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15) + +#define BNX2_TPAT_CPU_STATE 0x00085004 +#define BNX2_TPAT_CPU_STATE_BREAKPOINT (1L<<0) +#define BNX2_TPAT_CPU_STATE_BAD_INST_HALTED (1L<<2) +#define BNX2_TPAT_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3) +#define BNX2_TPAT_CPU_STATE_PAGE_0_INST_HALTED (1L<<4) +#define BNX2_TPAT_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5) +#define BNX2_TPAT_CPU_STATE_BAD_PC_HALTED (1L<<6) +#define BNX2_TPAT_CPU_STATE_ALIGN_HALTED (1L<<7) +#define BNX2_TPAT_CPU_STATE_FIO_ABORT_HALTED (1L<<8) +#define BNX2_TPAT_CPU_STATE_SOFT_HALTED (1L<<10) +#define BNX2_TPAT_CPU_STATE_SPAD_UNDERFLOW (1L<<11) +#define BNX2_TPAT_CPU_STATE_INTERRUPT (1L<<12) +#define BNX2_TPAT_CPU_STATE_DATA_ACCESS_STALL (1L<<14) +#define BNX2_TPAT_CPU_STATE_INST_FETCH_STALL (1L<<15) +#define BNX2_TPAT_CPU_STATE_BLOCKED_READ (1L<<31) + +#define BNX2_TPAT_CPU_EVENT_MASK 0x00085008 +#define BNX2_TPAT_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0) +#define BNX2_TPAT_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2) +#define BNX2_TPAT_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3) +#define BNX2_TPAT_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4) +#define BNX2_TPAT_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5) +#define BNX2_TPAT_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6) +#define BNX2_TPAT_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7) +#define BNX2_TPAT_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8) +#define BNX2_TPAT_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10) +#define BNX2_TPAT_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11) +#define BNX2_TPAT_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12) + +#define BNX2_TPAT_CPU_PROGRAM_COUNTER 0x0008501c +#define BNX2_TPAT_CPU_INSTRUCTION 0x00085020 +#define BNX2_TPAT_CPU_DATA_ACCESS 0x00085024 +#define BNX2_TPAT_CPU_INTERRUPT_ENABLE 0x00085028 +#define BNX2_TPAT_CPU_INTERRUPT_VECTOR 0x0008502c +#define BNX2_TPAT_CPU_INTERRUPT_SAVED_PC 0x00085030 +#define BNX2_TPAT_CPU_HW_BREAKPOINT 0x00085034 +#define BNX2_TPAT_CPU_HW_BREAKPOINT_DISABLE (1L<<0) +#define BNX2_TPAT_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2) + +#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK 0x00085038 +#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) +#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) +#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) +#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) +#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) +#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) + +#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR 0x00085048 +#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1) +#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1) +#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1) +#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2) + +#define BNX2_TPAT_CPU_REG_FILE 0x00085200 +#define BNX2_TPAT_TPATQ 0x000853c0 +#define BNX2_TPAT_FTQ_CMD 0x000853f8 +#define BNX2_TPAT_FTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_TPAT_FTQ_CMD_WR_TOP (1L<<10) +#define BNX2_TPAT_FTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_TPAT_FTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_TPAT_FTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_TPAT_FTQ_CMD_RD_DATA (1L<<26) +#define BNX2_TPAT_FTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_TPAT_FTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_TPAT_FTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_TPAT_FTQ_CMD_POP (1L<<30) +#define BNX2_TPAT_FTQ_CMD_BUSY (1L<<31) + +#define BNX2_TPAT_FTQ_CTL 0x000853fc +#define BNX2_TPAT_FTQ_CTL_INTERVENE (1L<<0) +#define BNX2_TPAT_FTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_TPAT_FTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_TPAT_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_TPAT_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_TPAT_SCRATCH 0x000a0000 + + +/* + * rxp_reg definition + * offset: 0xc0000 + */ +#define BNX2_RXP_CPU_MODE 0x000c5000 +#define BNX2_RXP_CPU_MODE_LOCAL_RST (1L<<0) +#define BNX2_RXP_CPU_MODE_STEP_ENA (1L<<1) +#define BNX2_RXP_CPU_MODE_PAGE_0_DATA_ENA (1L<<2) +#define BNX2_RXP_CPU_MODE_PAGE_0_INST_ENA (1L<<3) +#define BNX2_RXP_CPU_MODE_MSG_BIT1 (1L<<6) +#define BNX2_RXP_CPU_MODE_INTERRUPT_ENA (1L<<7) +#define BNX2_RXP_CPU_MODE_SOFT_HALT (1L<<10) +#define BNX2_RXP_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11) +#define BNX2_RXP_CPU_MODE_BAD_INST_HALT_ENA (1L<<12) +#define BNX2_RXP_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13) +#define BNX2_RXP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15) + +#define BNX2_RXP_CPU_STATE 0x000c5004 +#define BNX2_RXP_CPU_STATE_BREAKPOINT (1L<<0) +#define BNX2_RXP_CPU_STATE_BAD_INST_HALTED (1L<<2) +#define BNX2_RXP_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3) +#define BNX2_RXP_CPU_STATE_PAGE_0_INST_HALTED (1L<<4) +#define BNX2_RXP_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5) +#define BNX2_RXP_CPU_STATE_BAD_PC_HALTED (1L<<6) +#define BNX2_RXP_CPU_STATE_ALIGN_HALTED (1L<<7) +#define BNX2_RXP_CPU_STATE_FIO_ABORT_HALTED (1L<<8) +#define BNX2_RXP_CPU_STATE_SOFT_HALTED (1L<<10) +#define BNX2_RXP_CPU_STATE_SPAD_UNDERFLOW (1L<<11) +#define BNX2_RXP_CPU_STATE_INTERRUPT (1L<<12) +#define BNX2_RXP_CPU_STATE_DATA_ACCESS_STALL (1L<<14) +#define BNX2_RXP_CPU_STATE_INST_FETCH_STALL (1L<<15) +#define BNX2_RXP_CPU_STATE_BLOCKED_READ (1L<<31) + +#define BNX2_RXP_CPU_EVENT_MASK 0x000c5008 +#define BNX2_RXP_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0) +#define BNX2_RXP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2) +#define BNX2_RXP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3) +#define BNX2_RXP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4) +#define BNX2_RXP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5) +#define BNX2_RXP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6) +#define BNX2_RXP_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7) +#define BNX2_RXP_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8) +#define BNX2_RXP_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10) +#define BNX2_RXP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11) +#define BNX2_RXP_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12) + +#define BNX2_RXP_CPU_PROGRAM_COUNTER 0x000c501c +#define BNX2_RXP_CPU_INSTRUCTION 0x000c5020 +#define BNX2_RXP_CPU_DATA_ACCESS 0x000c5024 +#define BNX2_RXP_CPU_INTERRUPT_ENABLE 0x000c5028 +#define BNX2_RXP_CPU_INTERRUPT_VECTOR 0x000c502c +#define BNX2_RXP_CPU_INTERRUPT_SAVED_PC 0x000c5030 +#define BNX2_RXP_CPU_HW_BREAKPOINT 0x000c5034 +#define BNX2_RXP_CPU_HW_BREAKPOINT_DISABLE (1L<<0) +#define BNX2_RXP_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2) + +#define BNX2_RXP_CPU_DEBUG_VECT_PEEK 0x000c5038 +#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) +#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) +#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) +#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) +#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) +#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) + +#define BNX2_RXP_CPU_LAST_BRANCH_ADDR 0x000c5048 +#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1) +#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1) +#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1) +#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2) + +#define BNX2_RXP_CPU_REG_FILE 0x000c5200 +#define BNX2_RXP_PFE_PFE_CTL 0x000c537c +#define BNX2_RXP_PFE_PFE_CTL_INC_USAGE_CNT (1L<<0) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE (0xfL<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_0 (0L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_1 (1L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_2 (2L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_3 (3L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_4 (4L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_5 (5L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_6 (6L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_7 (7L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_8 (8L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_9 (9L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_10 (10L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_11 (11L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_12 (12L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_13 (13L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_14 (14L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_15 (15L<<4) +#define BNX2_RXP_PFE_PFE_CTL_PFE_COUNT (0xfL<<12) +#define BNX2_RXP_PFE_PFE_CTL_OFFSET (0x1ffL<<16) + +#define BNX2_RXP_RXPCQ 0x000c5380 +#define BNX2_RXP_CFTQ_CMD 0x000c53b8 +#define BNX2_RXP_CFTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_RXP_CFTQ_CMD_WR_TOP (1L<<10) +#define BNX2_RXP_CFTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_RXP_CFTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_RXP_CFTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_RXP_CFTQ_CMD_RD_DATA (1L<<26) +#define BNX2_RXP_CFTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_RXP_CFTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_RXP_CFTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_RXP_CFTQ_CMD_POP (1L<<30) +#define BNX2_RXP_CFTQ_CMD_BUSY (1L<<31) + +#define BNX2_RXP_CFTQ_CTL 0x000c53bc +#define BNX2_RXP_CFTQ_CTL_INTERVENE (1L<<0) +#define BNX2_RXP_CFTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_RXP_CFTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_RXP_CFTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_RXP_CFTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_RXP_RXPQ 0x000c53c0 +#define BNX2_RXP_FTQ_CMD 0x000c53f8 +#define BNX2_RXP_FTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_RXP_FTQ_CMD_WR_TOP (1L<<10) +#define BNX2_RXP_FTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_RXP_FTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_RXP_FTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_RXP_FTQ_CMD_RD_DATA (1L<<26) +#define BNX2_RXP_FTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_RXP_FTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_RXP_FTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_RXP_FTQ_CMD_POP (1L<<30) +#define BNX2_RXP_FTQ_CMD_BUSY (1L<<31) + +#define BNX2_RXP_FTQ_CTL 0x000c53fc +#define BNX2_RXP_FTQ_CTL_INTERVENE (1L<<0) +#define BNX2_RXP_FTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_RXP_FTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_RXP_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_RXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_RXP_SCRATCH 0x000e0000 +#define BNX2_RXP_SCRATCH_RXP_FLOOD 0x000e0024 +#define BNX2_RXP_SCRATCH_RSS_TBL_SZ 0x000e0038 +#define BNX2_RXP_SCRATCH_RSS_TBL 0x000e003c +#define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES 128 + + +/* + * com_reg definition + * offset: 0x100000 + */ +#define BNX2_COM_CKSUM_ERROR_STATUS 0x00100000 +#define BNX2_COM_CKSUM_ERROR_STATUS_CALCULATED (0xffffL<<0) +#define BNX2_COM_CKSUM_ERROR_STATUS_EXPECTED (0xffffL<<16) + +#define BNX2_COM_CPU_MODE 0x00105000 +#define BNX2_COM_CPU_MODE_LOCAL_RST (1L<<0) +#define BNX2_COM_CPU_MODE_STEP_ENA (1L<<1) +#define BNX2_COM_CPU_MODE_PAGE_0_DATA_ENA (1L<<2) +#define BNX2_COM_CPU_MODE_PAGE_0_INST_ENA (1L<<3) +#define BNX2_COM_CPU_MODE_MSG_BIT1 (1L<<6) +#define BNX2_COM_CPU_MODE_INTERRUPT_ENA (1L<<7) +#define BNX2_COM_CPU_MODE_SOFT_HALT (1L<<10) +#define BNX2_COM_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11) +#define BNX2_COM_CPU_MODE_BAD_INST_HALT_ENA (1L<<12) +#define BNX2_COM_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13) +#define BNX2_COM_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15) + +#define BNX2_COM_CPU_STATE 0x00105004 +#define BNX2_COM_CPU_STATE_BREAKPOINT (1L<<0) +#define BNX2_COM_CPU_STATE_BAD_INST_HALTED (1L<<2) +#define BNX2_COM_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3) +#define BNX2_COM_CPU_STATE_PAGE_0_INST_HALTED (1L<<4) +#define BNX2_COM_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5) +#define BNX2_COM_CPU_STATE_BAD_PC_HALTED (1L<<6) +#define BNX2_COM_CPU_STATE_ALIGN_HALTED (1L<<7) +#define BNX2_COM_CPU_STATE_FIO_ABORT_HALTED (1L<<8) +#define BNX2_COM_CPU_STATE_SOFT_HALTED (1L<<10) +#define BNX2_COM_CPU_STATE_SPAD_UNDERFLOW (1L<<11) +#define BNX2_COM_CPU_STATE_INTERRUPT (1L<<12) +#define BNX2_COM_CPU_STATE_DATA_ACCESS_STALL (1L<<14) +#define BNX2_COM_CPU_STATE_INST_FETCH_STALL (1L<<15) +#define BNX2_COM_CPU_STATE_BLOCKED_READ (1L<<31) + +#define BNX2_COM_CPU_EVENT_MASK 0x00105008 +#define BNX2_COM_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0) +#define BNX2_COM_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2) +#define BNX2_COM_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3) +#define BNX2_COM_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4) +#define BNX2_COM_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5) +#define BNX2_COM_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6) +#define BNX2_COM_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7) +#define BNX2_COM_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8) +#define BNX2_COM_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10) +#define BNX2_COM_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11) +#define BNX2_COM_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12) + +#define BNX2_COM_CPU_PROGRAM_COUNTER 0x0010501c +#define BNX2_COM_CPU_INSTRUCTION 0x00105020 +#define BNX2_COM_CPU_DATA_ACCESS 0x00105024 +#define BNX2_COM_CPU_INTERRUPT_ENABLE 0x00105028 +#define BNX2_COM_CPU_INTERRUPT_VECTOR 0x0010502c +#define BNX2_COM_CPU_INTERRUPT_SAVED_PC 0x00105030 +#define BNX2_COM_CPU_HW_BREAKPOINT 0x00105034 +#define BNX2_COM_CPU_HW_BREAKPOINT_DISABLE (1L<<0) +#define BNX2_COM_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2) + +#define BNX2_COM_CPU_DEBUG_VECT_PEEK 0x00105038 +#define BNX2_COM_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) +#define BNX2_COM_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) +#define BNX2_COM_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) +#define BNX2_COM_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) +#define BNX2_COM_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) +#define BNX2_COM_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) + +#define BNX2_COM_CPU_LAST_BRANCH_ADDR 0x00105048 +#define BNX2_COM_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1) +#define BNX2_COM_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1) +#define BNX2_COM_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1) +#define BNX2_COM_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2) + +#define BNX2_COM_CPU_REG_FILE 0x00105200 +#define BNX2_COM_COMTQ_PFE_PFE_CTL 0x001052bc +#define BNX2_COM_COMTQ_PFE_PFE_CTL_INC_USAGE_CNT (1L<<0) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE (0xfL<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_0 (0L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_1 (1L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_2 (2L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_3 (3L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_4 (4L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_5 (5L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_6 (6L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_7 (7L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_8 (8L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_9 (9L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_10 (10L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_11 (11L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_12 (12L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_13 (13L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_14 (14L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_15 (15L<<4) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_COUNT (0xfL<<12) +#define BNX2_COM_COMTQ_PFE_PFE_CTL_OFFSET (0x1ffL<<16) + +#define BNX2_COM_COMXQ 0x00105340 +#define BNX2_COM_COMXQ_FTQ_CMD 0x00105378 +#define BNX2_COM_COMXQ_FTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_COM_COMXQ_FTQ_CMD_WR_TOP (1L<<10) +#define BNX2_COM_COMXQ_FTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_COM_COMXQ_FTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_COM_COMXQ_FTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_COM_COMXQ_FTQ_CMD_RD_DATA (1L<<26) +#define BNX2_COM_COMXQ_FTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_COM_COMXQ_FTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_COM_COMXQ_FTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_COM_COMXQ_FTQ_CMD_POP (1L<<30) +#define BNX2_COM_COMXQ_FTQ_CMD_BUSY (1L<<31) + +#define BNX2_COM_COMXQ_FTQ_CTL 0x0010537c +#define BNX2_COM_COMXQ_FTQ_CTL_INTERVENE (1L<<0) +#define BNX2_COM_COMXQ_FTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_COM_COMXQ_FTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_COM_COMXQ_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_COM_COMXQ_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_COM_COMTQ 0x00105380 +#define BNX2_COM_COMTQ_FTQ_CMD 0x001053b8 +#define BNX2_COM_COMTQ_FTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_COM_COMTQ_FTQ_CMD_WR_TOP (1L<<10) +#define BNX2_COM_COMTQ_FTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_COM_COMTQ_FTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_COM_COMTQ_FTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_COM_COMTQ_FTQ_CMD_RD_DATA (1L<<26) +#define BNX2_COM_COMTQ_FTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_COM_COMTQ_FTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_COM_COMTQ_FTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_COM_COMTQ_FTQ_CMD_POP (1L<<30) +#define BNX2_COM_COMTQ_FTQ_CMD_BUSY (1L<<31) + +#define BNX2_COM_COMTQ_FTQ_CTL 0x001053bc +#define BNX2_COM_COMTQ_FTQ_CTL_INTERVENE (1L<<0) +#define BNX2_COM_COMTQ_FTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_COM_COMTQ_FTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_COM_COMTQ_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_COM_COMTQ_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_COM_COMQ 0x001053c0 +#define BNX2_COM_COMQ_FTQ_CMD 0x001053f8 +#define BNX2_COM_COMQ_FTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_COM_COMQ_FTQ_CMD_WR_TOP (1L<<10) +#define BNX2_COM_COMQ_FTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_COM_COMQ_FTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_COM_COMQ_FTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_COM_COMQ_FTQ_CMD_RD_DATA (1L<<26) +#define BNX2_COM_COMQ_FTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_COM_COMQ_FTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_COM_COMQ_FTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_COM_COMQ_FTQ_CMD_POP (1L<<30) +#define BNX2_COM_COMQ_FTQ_CMD_BUSY (1L<<31) + +#define BNX2_COM_COMQ_FTQ_CTL 0x001053fc +#define BNX2_COM_COMQ_FTQ_CTL_INTERVENE (1L<<0) +#define BNX2_COM_COMQ_FTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_COM_COMQ_FTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_COM_COMQ_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_COM_COMQ_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_COM_SCRATCH 0x00120000 + +#define BNX2_FW_RX_LOW_LATENCY 0x00120058 +#define BNX2_FW_RX_DROP_COUNT 0x00120084 + + +/* + * cp_reg definition + * offset: 0x180000 + */ +#define BNX2_CP_CKSUM_ERROR_STATUS 0x00180000 +#define BNX2_CP_CKSUM_ERROR_STATUS_CALCULATED (0xffffL<<0) +#define BNX2_CP_CKSUM_ERROR_STATUS_EXPECTED (0xffffL<<16) + +#define BNX2_CP_CPU_MODE 0x00185000 +#define BNX2_CP_CPU_MODE_LOCAL_RST (1L<<0) +#define BNX2_CP_CPU_MODE_STEP_ENA (1L<<1) +#define BNX2_CP_CPU_MODE_PAGE_0_DATA_ENA (1L<<2) +#define BNX2_CP_CPU_MODE_PAGE_0_INST_ENA (1L<<3) +#define BNX2_CP_CPU_MODE_MSG_BIT1 (1L<<6) +#define BNX2_CP_CPU_MODE_INTERRUPT_ENA (1L<<7) +#define BNX2_CP_CPU_MODE_SOFT_HALT (1L<<10) +#define BNX2_CP_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11) +#define BNX2_CP_CPU_MODE_BAD_INST_HALT_ENA (1L<<12) +#define BNX2_CP_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13) +#define BNX2_CP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15) + +#define BNX2_CP_CPU_STATE 0x00185004 +#define BNX2_CP_CPU_STATE_BREAKPOINT (1L<<0) +#define BNX2_CP_CPU_STATE_BAD_INST_HALTED (1L<<2) +#define BNX2_CP_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3) +#define BNX2_CP_CPU_STATE_PAGE_0_INST_HALTED (1L<<4) +#define BNX2_CP_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5) +#define BNX2_CP_CPU_STATE_BAD_PC_HALTED (1L<<6) +#define BNX2_CP_CPU_STATE_ALIGN_HALTED (1L<<7) +#define BNX2_CP_CPU_STATE_FIO_ABORT_HALTED (1L<<8) +#define BNX2_CP_CPU_STATE_SOFT_HALTED (1L<<10) +#define BNX2_CP_CPU_STATE_SPAD_UNDERFLOW (1L<<11) +#define BNX2_CP_CPU_STATE_INTERRUPT (1L<<12) +#define BNX2_CP_CPU_STATE_DATA_ACCESS_STALL (1L<<14) +#define BNX2_CP_CPU_STATE_INST_FETCH_STALL (1L<<15) +#define BNX2_CP_CPU_STATE_BLOCKED_READ (1L<<31) + +#define BNX2_CP_CPU_EVENT_MASK 0x00185008 +#define BNX2_CP_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0) +#define BNX2_CP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2) +#define BNX2_CP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3) +#define BNX2_CP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4) +#define BNX2_CP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5) +#define BNX2_CP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6) +#define BNX2_CP_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7) +#define BNX2_CP_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8) +#define BNX2_CP_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10) +#define BNX2_CP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11) +#define BNX2_CP_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12) + +#define BNX2_CP_CPU_PROGRAM_COUNTER 0x0018501c +#define BNX2_CP_CPU_INSTRUCTION 0x00185020 +#define BNX2_CP_CPU_DATA_ACCESS 0x00185024 +#define BNX2_CP_CPU_INTERRUPT_ENABLE 0x00185028 +#define BNX2_CP_CPU_INTERRUPT_VECTOR 0x0018502c +#define BNX2_CP_CPU_INTERRUPT_SAVED_PC 0x00185030 +#define BNX2_CP_CPU_HW_BREAKPOINT 0x00185034 +#define BNX2_CP_CPU_HW_BREAKPOINT_DISABLE (1L<<0) +#define BNX2_CP_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2) + +#define BNX2_CP_CPU_DEBUG_VECT_PEEK 0x00185038 +#define BNX2_CP_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) +#define BNX2_CP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) +#define BNX2_CP_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) +#define BNX2_CP_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) +#define BNX2_CP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) +#define BNX2_CP_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) + +#define BNX2_CP_CPU_LAST_BRANCH_ADDR 0x00185048 +#define BNX2_CP_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1) +#define BNX2_CP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1) +#define BNX2_CP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1) +#define BNX2_CP_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2) + +#define BNX2_CP_CPU_REG_FILE 0x00185200 +#define BNX2_CP_CPQ_PFE_PFE_CTL 0x001853bc +#define BNX2_CP_CPQ_PFE_PFE_CTL_INC_USAGE_CNT (1L<<0) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE (0xfL<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_0 (0L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_1 (1L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_2 (2L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_3 (3L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_4 (4L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_5 (5L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_6 (6L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_7 (7L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_8 (8L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_9 (9L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_10 (10L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_11 (11L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_12 (12L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_13 (13L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_14 (14L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_15 (15L<<4) +#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_COUNT (0xfL<<12) +#define BNX2_CP_CPQ_PFE_PFE_CTL_OFFSET (0x1ffL<<16) + +#define BNX2_CP_CPQ 0x001853c0 +#define BNX2_CP_CPQ_FTQ_CMD 0x001853f8 +#define BNX2_CP_CPQ_FTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_CP_CPQ_FTQ_CMD_WR_TOP (1L<<10) +#define BNX2_CP_CPQ_FTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_CP_CPQ_FTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_CP_CPQ_FTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_CP_CPQ_FTQ_CMD_RD_DATA (1L<<26) +#define BNX2_CP_CPQ_FTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_CP_CPQ_FTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_CP_CPQ_FTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_CP_CPQ_FTQ_CMD_POP (1L<<30) +#define BNX2_CP_CPQ_FTQ_CMD_BUSY (1L<<31) + +#define BNX2_CP_CPQ_FTQ_CTL 0x001853fc +#define BNX2_CP_CPQ_FTQ_CTL_INTERVENE (1L<<0) +#define BNX2_CP_CPQ_FTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_CP_CPQ_FTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_CP_CPQ_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_CP_CPQ_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_CP_SCRATCH 0x001a0000 + +#define BNX2_FW_MAX_ISCSI_CONN 0x001a0080 + + +/* + * mcp_reg definition + * offset: 0x140000 + */ +#define BNX2_MCP_MCP_CONTROL 0x00140080 +#define BNX2_MCP_MCP_CONTROL_SMBUS_SEL (1L<<30) +#define BNX2_MCP_MCP_CONTROL_MCP_ISOLATE (1L<<31) + +#define BNX2_MCP_MCP_ATTENTION_STATUS 0x00140084 +#define BNX2_MCP_MCP_ATTENTION_STATUS_DRV_DOORBELL (1L<<29) +#define BNX2_MCP_MCP_ATTENTION_STATUS_WATCHDOG_TIMEOUT (1L<<30) +#define BNX2_MCP_MCP_ATTENTION_STATUS_CPU_EVENT (1L<<31) + +#define BNX2_MCP_MCP_HEARTBEAT_CONTROL 0x00140088 +#define BNX2_MCP_MCP_HEARTBEAT_CONTROL_MCP_HEARTBEAT_ENABLE (1L<<31) + +#define BNX2_MCP_MCP_HEARTBEAT_STATUS 0x0014008c +#define BNX2_MCP_MCP_HEARTBEAT_STATUS_MCP_HEARTBEAT_PERIOD (0x7ffL<<0) +#define BNX2_MCP_MCP_HEARTBEAT_STATUS_VALID (1L<<31) + +#define BNX2_MCP_MCP_HEARTBEAT 0x00140090 +#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_COUNT (0x3fffffffL<<0) +#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_INC (1L<<30) +#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_RESET (1L<<31) + +#define BNX2_MCP_WATCHDOG_RESET 0x00140094 +#define BNX2_MCP_WATCHDOG_RESET_WATCHDOG_RESET (1L<<31) + +#define BNX2_MCP_WATCHDOG_CONTROL 0x00140098 +#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_TIMEOUT (0xfffffffL<<0) +#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_ATTN (1L<<29) +#define BNX2_MCP_WATCHDOG_CONTROL_MCP_RST_ENABLE (1L<<30) +#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_ENABLE (1L<<31) + +#define BNX2_MCP_ACCESS_LOCK 0x0014009c +#define BNX2_MCP_ACCESS_LOCK_LOCK (1L<<31) + +#define BNX2_MCP_TOE_ID 0x001400a0 +#define BNX2_MCP_TOE_ID_FUNCTION_ID (1L<<31) + +#define BNX2_MCP_MAILBOX_CFG 0x001400a4 +#define BNX2_MCP_MAILBOX_CFG_MAILBOX_OFFSET (0x3fffL<<0) +#define BNX2_MCP_MAILBOX_CFG_MAILBOX_SIZE (0xfffL<<20) + +#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC 0x001400a8 +#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC_MAILBOX_OFFSET (0x3fffL<<0) +#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC_MAILBOX_SIZE (0xfffL<<20) + +#define BNX2_MCP_MCP_DOORBELL 0x001400ac +#define BNX2_MCP_MCP_DOORBELL_MCP_DOORBELL (1L<<31) + +#define BNX2_MCP_DRIVER_DOORBELL 0x001400b0 +#define BNX2_MCP_DRIVER_DOORBELL_DRIVER_DOORBELL (1L<<31) + +#define BNX2_MCP_DRIVER_DOORBELL_OTHER_FUNC 0x001400b4 +#define BNX2_MCP_DRIVER_DOORBELL_OTHER_FUNC_DRIVER_DOORBELL (1L<<31) + +#define BNX2_MCP_CPU_MODE 0x00145000 +#define BNX2_MCP_CPU_MODE_LOCAL_RST (1L<<0) +#define BNX2_MCP_CPU_MODE_STEP_ENA (1L<<1) +#define BNX2_MCP_CPU_MODE_PAGE_0_DATA_ENA (1L<<2) +#define BNX2_MCP_CPU_MODE_PAGE_0_INST_ENA (1L<<3) +#define BNX2_MCP_CPU_MODE_MSG_BIT1 (1L<<6) +#define BNX2_MCP_CPU_MODE_INTERRUPT_ENA (1L<<7) +#define BNX2_MCP_CPU_MODE_SOFT_HALT (1L<<10) +#define BNX2_MCP_CPU_MODE_BAD_DATA_HALT_ENA (1L<<11) +#define BNX2_MCP_CPU_MODE_BAD_INST_HALT_ENA (1L<<12) +#define BNX2_MCP_CPU_MODE_FIO_ABORT_HALT_ENA (1L<<13) +#define BNX2_MCP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA (1L<<15) + +#define BNX2_MCP_CPU_STATE 0x00145004 +#define BNX2_MCP_CPU_STATE_BREAKPOINT (1L<<0) +#define BNX2_MCP_CPU_STATE_BAD_INST_HALTED (1L<<2) +#define BNX2_MCP_CPU_STATE_PAGE_0_DATA_HALTED (1L<<3) +#define BNX2_MCP_CPU_STATE_PAGE_0_INST_HALTED (1L<<4) +#define BNX2_MCP_CPU_STATE_BAD_DATA_ADDR_HALTED (1L<<5) +#define BNX2_MCP_CPU_STATE_BAD_PC_HALTED (1L<<6) +#define BNX2_MCP_CPU_STATE_ALIGN_HALTED (1L<<7) +#define BNX2_MCP_CPU_STATE_FIO_ABORT_HALTED (1L<<8) +#define BNX2_MCP_CPU_STATE_SOFT_HALTED (1L<<10) +#define BNX2_MCP_CPU_STATE_SPAD_UNDERFLOW (1L<<11) +#define BNX2_MCP_CPU_STATE_INTERRUPT (1L<<12) +#define BNX2_MCP_CPU_STATE_DATA_ACCESS_STALL (1L<<14) +#define BNX2_MCP_CPU_STATE_INST_FETCH_STALL (1L<<15) +#define BNX2_MCP_CPU_STATE_BLOCKED_READ (1L<<31) + +#define BNX2_MCP_CPU_EVENT_MASK 0x00145008 +#define BNX2_MCP_CPU_EVENT_MASK_BREAKPOINT_MASK (1L<<0) +#define BNX2_MCP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK (1L<<2) +#define BNX2_MCP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK (1L<<3) +#define BNX2_MCP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK (1L<<4) +#define BNX2_MCP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK (1L<<5) +#define BNX2_MCP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK (1L<<6) +#define BNX2_MCP_CPU_EVENT_MASK_ALIGN_HALTED_MASK (1L<<7) +#define BNX2_MCP_CPU_EVENT_MASK_FIO_ABORT_MASK (1L<<8) +#define BNX2_MCP_CPU_EVENT_MASK_SOFT_HALTED_MASK (1L<<10) +#define BNX2_MCP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK (1L<<11) +#define BNX2_MCP_CPU_EVENT_MASK_INTERRUPT_MASK (1L<<12) + +#define BNX2_MCP_CPU_PROGRAM_COUNTER 0x0014501c +#define BNX2_MCP_CPU_INSTRUCTION 0x00145020 +#define BNX2_MCP_CPU_DATA_ACCESS 0x00145024 +#define BNX2_MCP_CPU_INTERRUPT_ENABLE 0x00145028 +#define BNX2_MCP_CPU_INTERRUPT_VECTOR 0x0014502c +#define BNX2_MCP_CPU_INTERRUPT_SAVED_PC 0x00145030 +#define BNX2_MCP_CPU_HW_BREAKPOINT 0x00145034 +#define BNX2_MCP_CPU_HW_BREAKPOINT_DISABLE (1L<<0) +#define BNX2_MCP_CPU_HW_BREAKPOINT_ADDRESS (0x3fffffffL<<2) + +#define BNX2_MCP_CPU_DEBUG_VECT_PEEK 0x00145038 +#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_1_VALUE (0x7ffL<<0) +#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN (1L<<11) +#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_1_SEL (0xfL<<12) +#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_2_VALUE (0x7ffL<<16) +#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN (1L<<27) +#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_2_SEL (0xfL<<28) + +#define BNX2_MCP_CPU_LAST_BRANCH_ADDR 0x00145048 +#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_TYPE (1L<<1) +#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP (0L<<1) +#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH (1L<<1) +#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_LBA (0x3fffffffL<<2) + +#define BNX2_MCP_CPU_REG_FILE 0x00145200 +#define BNX2_MCP_MCPQ 0x001453c0 +#define BNX2_MCP_MCPQ_FTQ_CMD 0x001453f8 +#define BNX2_MCP_MCPQ_FTQ_CMD_OFFSET (0x3ffL<<0) +#define BNX2_MCP_MCPQ_FTQ_CMD_WR_TOP (1L<<10) +#define BNX2_MCP_MCPQ_FTQ_CMD_WR_TOP_0 (0L<<10) +#define BNX2_MCP_MCPQ_FTQ_CMD_WR_TOP_1 (1L<<10) +#define BNX2_MCP_MCPQ_FTQ_CMD_SFT_RESET (1L<<25) +#define BNX2_MCP_MCPQ_FTQ_CMD_RD_DATA (1L<<26) +#define BNX2_MCP_MCPQ_FTQ_CMD_ADD_INTERVEN (1L<<27) +#define BNX2_MCP_MCPQ_FTQ_CMD_ADD_DATA (1L<<28) +#define BNX2_MCP_MCPQ_FTQ_CMD_INTERVENE_CLR (1L<<29) +#define BNX2_MCP_MCPQ_FTQ_CMD_POP (1L<<30) +#define BNX2_MCP_MCPQ_FTQ_CMD_BUSY (1L<<31) + +#define BNX2_MCP_MCPQ_FTQ_CTL 0x001453fc +#define BNX2_MCP_MCPQ_FTQ_CTL_INTERVENE (1L<<0) +#define BNX2_MCP_MCPQ_FTQ_CTL_OVERFLOW (1L<<1) +#define BNX2_MCP_MCPQ_FTQ_CTL_FORCE_INTERVENE (1L<<2) +#define BNX2_MCP_MCPQ_FTQ_CTL_MAX_DEPTH (0x3ffL<<12) +#define BNX2_MCP_MCPQ_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) + +#define BNX2_MCP_ROM 0x00150000 +#define BNX2_MCP_SCRATCH 0x00160000 +#define BNX2_MCP_STATE_P1 0x0016f9c8 +#define BNX2_MCP_STATE_P0 0x0016fdc8 +#define BNX2_MCP_STATE_P1_5708 0x001699c8 +#define BNX2_MCP_STATE_P0_5708 0x00169dc8 + +#define BNX2_SHM_HDR_SIGNATURE BNX2_MCP_SCRATCH +#define BNX2_SHM_HDR_SIGNATURE_SIG_MASK 0xffff0000 +#define BNX2_SHM_HDR_SIGNATURE_SIG 0x53530000 +#define BNX2_SHM_HDR_SIGNATURE_VER_MASK 0x000000ff +#define BNX2_SHM_HDR_SIGNATURE_VER_ONE 0x00000001 + +#define BNX2_SHM_HDR_ADDR_0 BNX2_MCP_SCRATCH + 4 +#define BNX2_SHM_HDR_ADDR_1 BNX2_MCP_SCRATCH + 8 + + +#define NUM_MC_HASH_REGISTERS 8 + + +/* PHY_ID1: bits 31-16; PHY_ID2: bits 15-0. */ +#define PHY_BCM5706_PHY_ID 0x00206160 + +#define PHY_ID(id) ((id) & 0xfffffff0) +#define PHY_REV_ID(id) ((id) & 0xf) + +/* 5708 Serdes PHY registers */ + +#define BCM5708S_BMCR_FORCE_2500 0x20 + +#define BCM5708S_UP1 0xb + +#define BCM5708S_UP1_2G5 0x1 + +#define BCM5708S_BLK_ADDR 0x1f + +#define BCM5708S_BLK_ADDR_DIG 0x0000 +#define BCM5708S_BLK_ADDR_DIG3 0x0002 +#define BCM5708S_BLK_ADDR_TX_MISC 0x0005 + +/* Digital Block */ +#define BCM5708S_1000X_CTL1 0x10 + +#define BCM5708S_1000X_CTL1_FIBER_MODE 0x0001 +#define BCM5708S_1000X_CTL1_AUTODET_EN 0x0010 + +#define BCM5708S_1000X_CTL2 0x11 + +#define BCM5708S_1000X_CTL2_PLLEL_DET_EN 0x0001 + +#define BCM5708S_1000X_STAT1 0x14 + +#define BCM5708S_1000X_STAT1_SGMII 0x0001 +#define BCM5708S_1000X_STAT1_LINK 0x0002 +#define BCM5708S_1000X_STAT1_FD 0x0004 +#define BCM5708S_1000X_STAT1_SPEED_MASK 0x0018 +#define BCM5708S_1000X_STAT1_SPEED_10 0x0000 +#define BCM5708S_1000X_STAT1_SPEED_100 0x0008 +#define BCM5708S_1000X_STAT1_SPEED_1G 0x0010 +#define BCM5708S_1000X_STAT1_SPEED_2G5 0x0018 +#define BCM5708S_1000X_STAT1_TX_PAUSE 0x0020 +#define BCM5708S_1000X_STAT1_RX_PAUSE 0x0040 + +/* Digital3 Block */ +#define BCM5708S_DIG_3_0 0x10 + +#define BCM5708S_DIG_3_0_USE_IEEE 0x0001 + +/* Tx/Misc Block */ +#define BCM5708S_TX_ACTL1 0x15 + +#define BCM5708S_TX_ACTL1_DRIVER_VCM 0x30 + +#define BCM5708S_TX_ACTL3 0x17 + +#define MII_BNX2_EXT_STATUS 0x11 +#define EXT_STATUS_MDIX (1 << 13) + +#define MII_BNX2_AUX_CTL 0x18 +#define AUX_CTL_MISC_CTL 0x7007 +#define AUX_CTL_MISC_CTL_WIRESPEED (1 << 4) +#define AUX_CTL_MISC_CTL_AUTOMDIX (1 << 9) +#define AUX_CTL_MISC_CTL_WR (1 << 15) + +#define MII_BNX2_DSP_RW_PORT 0x15 +#define MII_BNX2_DSP_ADDRESS 0x17 +#define MII_BNX2_DSP_EXPAND_REG 0x0f00 +#define MII_EXPAND_REG1 (MII_BNX2_DSP_EXPAND_REG | 1) +#define MII_EXPAND_REG1_RUDI_C 0x20 +#define MII_EXPAND_SERDES_CTL (MII_BNX2_DSP_EXPAND_REG | 3) + +#define MII_BNX2_MISC_SHADOW 0x1c +#define MISC_SHDW_AN_DBG 0x6800 +#define MISC_SHDW_AN_DBG_NOSYNC 0x0002 +#define MISC_SHDW_AN_DBG_RUDI_INVALID 0x0100 +#define MISC_SHDW_MODE_CTL 0x7c00 +#define MISC_SHDW_MODE_CTL_SIG_DET 0x0010 + +#define MII_BNX2_BLK_ADDR 0x1f +#define MII_BNX2_BLK_ADDR_IEEE0 0x0000 +#define MII_BNX2_BLK_ADDR_GP_STATUS 0x8120 +#define MII_BNX2_GP_TOP_AN_STATUS1 0x1b +#define MII_BNX2_GP_TOP_AN_SPEED_MSK 0x3f00 +#define MII_BNX2_GP_TOP_AN_SPEED_10 0x0000 +#define MII_BNX2_GP_TOP_AN_SPEED_100 0x0100 +#define MII_BNX2_GP_TOP_AN_SPEED_1G 0x0200 +#define MII_BNX2_GP_TOP_AN_SPEED_2_5G 0x0300 +#define MII_BNX2_GP_TOP_AN_SPEED_1GKV 0x0d00 +#define MII_BNX2_GP_TOP_AN_FD 0x8 +#define MII_BNX2_BLK_ADDR_SERDES_DIG 0x8300 +#define MII_BNX2_SERDES_DIG_1000XCTL1 0x10 +#define MII_BNX2_SD_1000XCTL1_FIBER 0x01 +#define MII_BNX2_SD_1000XCTL1_AUTODET 0x10 +#define MII_BNX2_SERDES_DIG_MISC1 0x18 +#define MII_BNX2_SD_MISC1_FORCE_MSK 0xf +#define MII_BNX2_SD_MISC1_FORCE_2_5G 0x0 +#define MII_BNX2_SD_MISC1_FORCE 0x10 +#define MII_BNX2_BLK_ADDR_OVER1G 0x8320 +#define MII_BNX2_OVER1G_UP1 0x19 +#define MII_BNX2_BLK_ADDR_BAM_NXTPG 0x8350 +#define MII_BNX2_BAM_NXTPG_CTL 0x10 +#define MII_BNX2_NXTPG_CTL_BAM 0x1 +#define MII_BNX2_NXTPG_CTL_T2 0x2 +#define MII_BNX2_BLK_ADDR_CL73_USERB0 0x8370 +#define MII_BNX2_CL73_BAM_CTL1 0x12 +#define MII_BNX2_CL73_BAM_EN 0x8000 +#define MII_BNX2_CL73_BAM_STA_MGR_EN 0x4000 +#define MII_BNX2_CL73_BAM_NP_AFT_BP_EN 0x2000 +#define MII_BNX2_BLK_ADDR_AER 0xffd0 +#define MII_BNX2_AER_AER 0x1e +#define MII_BNX2_AER_AER_AN_MMD 0x3800 +#define MII_BNX2_BLK_ADDR_COMBO_IEEEB0 0xffe0 + +#define MIN_ETHERNET_PACKET_SIZE 60 +#define MAX_ETHERNET_PACKET_SIZE 1514 +#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9014 + +#define BNX2_RX_COPY_THRESH 128 + +#define BNX2_MISC_ENABLE_DEFAULT 0x17ffffff + +#define BNX2_START_UNICAST_ADDRESS_INDEX 4 +#define BNX2_END_UNICAST_ADDRESS_INDEX 7 +#define BNX2_MAX_UNICAST_ADDRESSES (BNX2_END_UNICAST_ADDRESS_INDEX - \ + BNX2_START_UNICAST_ADDRESS_INDEX + 1) + +#define DMA_READ_CHANS 5 +#define DMA_WRITE_CHANS 3 + +/* Use CPU native page size up to 16K for the ring sizes. */ +#if (PAGE_SHIFT > 14) +#define BNX2_PAGE_BITS 14 +#else +#define BNX2_PAGE_BITS PAGE_SHIFT +#endif +#define BNX2_PAGE_SIZE (1 << BNX2_PAGE_BITS) + +#define BNX2_TX_DESC_CNT (BNX2_PAGE_SIZE / sizeof(struct bnx2_tx_bd)) +#define BNX2_MAX_TX_DESC_CNT (BNX2_TX_DESC_CNT - 1) + +#define BNX2_MAX_RX_RINGS 8 +#define BNX2_MAX_RX_PG_RINGS 32 +#define BNX2_RX_DESC_CNT (BNX2_PAGE_SIZE / sizeof(struct bnx2_rx_bd)) +#define BNX2_MAX_RX_DESC_CNT (BNX2_RX_DESC_CNT - 1) +#define BNX2_MAX_TOTAL_RX_DESC_CNT (BNX2_MAX_RX_DESC_CNT * BNX2_MAX_RX_RINGS) +#define BNX2_MAX_TOTAL_RX_PG_DESC_CNT \ + (BNX2_MAX_RX_DESC_CNT * BNX2_MAX_RX_PG_RINGS) + +#define BNX2_NEXT_TX_BD(x) (((x) & (BNX2_MAX_TX_DESC_CNT - 1)) == \ + (BNX2_MAX_TX_DESC_CNT - 1)) ? \ + (x) + 2 : (x) + 1 + +#define BNX2_TX_RING_IDX(x) ((x) & BNX2_MAX_TX_DESC_CNT) + +#define BNX2_NEXT_RX_BD(x) (((x) & (BNX2_MAX_RX_DESC_CNT - 1)) == \ + (BNX2_MAX_RX_DESC_CNT - 1)) ? \ + (x) + 2 : (x) + 1 + +#define BNX2_RX_RING_IDX(x) ((x) & bp->rx_max_ring_idx) +#define BNX2_RX_PG_RING_IDX(x) ((x) & bp->rx_max_pg_ring_idx) + +#define BNX2_RX_RING(x) (((x) & ~BNX2_MAX_RX_DESC_CNT) >> (BNX2_PAGE_BITS - 4)) +#define BNX2_RX_IDX(x) ((x) & BNX2_MAX_RX_DESC_CNT) + +/* Context size. */ +#define CTX_SHIFT 7 +#define CTX_SIZE (1 << CTX_SHIFT) +#define CTX_MASK (CTX_SIZE - 1) +#define GET_CID_ADDR(_cid) ((_cid) << CTX_SHIFT) +#define GET_CID(_cid_addr) ((_cid_addr) >> CTX_SHIFT) + +#define PHY_CTX_SHIFT 6 +#define PHY_CTX_SIZE (1 << PHY_CTX_SHIFT) +#define PHY_CTX_MASK (PHY_CTX_SIZE - 1) +#define GET_PCID_ADDR(_pcid) ((_pcid) << PHY_CTX_SHIFT) +#define GET_PCID(_pcid_addr) ((_pcid_addr) >> PHY_CTX_SHIFT) + +#define MB_KERNEL_CTX_SHIFT 8 +#define MB_KERNEL_CTX_SIZE (1 << MB_KERNEL_CTX_SHIFT) +#define MB_KERNEL_CTX_MASK (MB_KERNEL_CTX_SIZE - 1) +#define MB_GET_CID_ADDR(_cid) (0x10000 + ((_cid) << MB_KERNEL_CTX_SHIFT)) + +#define MAX_CID_CNT 0x4000 +#define MAX_CID_ADDR (GET_CID_ADDR(MAX_CID_CNT)) +#define INVALID_CID_ADDR 0xffffffff + +#define TX_CID 16 +#define TX_TSS_CID 32 +#define RX_CID 0 +#define RX_RSS_CID 4 +#define RX_MAX_RSS_RINGS 7 +#define RX_MAX_RINGS (RX_MAX_RSS_RINGS + 1) +#define TX_MAX_TSS_RINGS 7 +#define TX_MAX_RINGS (TX_MAX_TSS_RINGS + 1) + +#define MB_TX_CID_ADDR MB_GET_CID_ADDR(TX_CID) +#define MB_RX_CID_ADDR MB_GET_CID_ADDR(RX_CID) + +/* + * This driver uses new build_skb() API : + * RX ring buffer contains pointer to kmalloc() data only, + * skb are built only after Hardware filled the frame. + */ +struct bnx2_sw_bd { + u8 *data; + DEFINE_DMA_UNMAP_ADDR(mapping); +}; + +/* Its faster to compute this from data than storing it in sw_bd + * (less cache misses) + */ +static inline struct l2_fhdr *get_l2_fhdr(u8 *data) +{ + return (struct l2_fhdr *)(PTR_ALIGN(data, BNX2_RX_ALIGN) + NET_SKB_PAD); +} + + +struct bnx2_sw_pg { + struct page *page; + DEFINE_DMA_UNMAP_ADDR(mapping); +}; + +struct bnx2_sw_tx_bd { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapping); + unsigned short is_gso; + unsigned short nr_frags; +}; + +#define SW_RXBD_RING_SIZE (sizeof(struct bnx2_sw_bd) * BNX2_RX_DESC_CNT) +#define SW_RXPG_RING_SIZE (sizeof(struct bnx2_sw_pg) * BNX2_RX_DESC_CNT) +#define RXBD_RING_SIZE (sizeof(struct bnx2_rx_bd) * BNX2_RX_DESC_CNT) +#define SW_TXBD_RING_SIZE (sizeof(struct bnx2_sw_tx_bd) * BNX2_TX_DESC_CNT) +#define TXBD_RING_SIZE (sizeof(struct bnx2_tx_bd) * BNX2_TX_DESC_CNT) + +/* Buffered flash (Atmel: AT45DB011B) specific information */ +#define SEEPROM_PAGE_BITS 2 +#define SEEPROM_PHY_PAGE_SIZE (1 << SEEPROM_PAGE_BITS) +#define SEEPROM_BYTE_ADDR_MASK (SEEPROM_PHY_PAGE_SIZE-1) +#define SEEPROM_PAGE_SIZE 4 +#define SEEPROM_TOTAL_SIZE 65536 + +#define BUFFERED_FLASH_PAGE_BITS 9 +#define BUFFERED_FLASH_PHY_PAGE_SIZE (1 << BUFFERED_FLASH_PAGE_BITS) +#define BUFFERED_FLASH_BYTE_ADDR_MASK (BUFFERED_FLASH_PHY_PAGE_SIZE-1) +#define BUFFERED_FLASH_PAGE_SIZE 264 +#define BUFFERED_FLASH_TOTAL_SIZE 0x21000 + +#define SAIFUN_FLASH_PAGE_BITS 8 +#define SAIFUN_FLASH_PHY_PAGE_SIZE (1 << SAIFUN_FLASH_PAGE_BITS) +#define SAIFUN_FLASH_BYTE_ADDR_MASK (SAIFUN_FLASH_PHY_PAGE_SIZE-1) +#define SAIFUN_FLASH_PAGE_SIZE 256 +#define SAIFUN_FLASH_BASE_TOTAL_SIZE 65536 + +#define ST_MICRO_FLASH_PAGE_BITS 8 +#define ST_MICRO_FLASH_PHY_PAGE_SIZE (1 << ST_MICRO_FLASH_PAGE_BITS) +#define ST_MICRO_FLASH_BYTE_ADDR_MASK (ST_MICRO_FLASH_PHY_PAGE_SIZE-1) +#define ST_MICRO_FLASH_PAGE_SIZE 256 +#define ST_MICRO_FLASH_BASE_TOTAL_SIZE 65536 + +#define BCM5709_FLASH_PAGE_BITS 8 +#define BCM5709_FLASH_PHY_PAGE_SIZE (1 << BCM5709_FLASH_PAGE_BITS) +#define BCM5709_FLASH_BYTE_ADDR_MASK (BCM5709_FLASH_PHY_PAGE_SIZE-1) +#define BCM5709_FLASH_PAGE_SIZE 256 + +#define NVRAM_TIMEOUT_COUNT 30000 + + +#define FLASH_STRAP_MASK (BNX2_NVM_CFG1_FLASH_MODE | \ + BNX2_NVM_CFG1_BUFFER_MODE | \ + BNX2_NVM_CFG1_PROTECT_MODE | \ + BNX2_NVM_CFG1_FLASH_SIZE) + +#define FLASH_BACKUP_STRAP_MASK (0xf << 26) + +struct flash_spec { + u32 strapping; + u32 config1; + u32 config2; + u32 config3; + u32 write1; + u32 flags; +#define BNX2_NV_BUFFERED 0x00000001 +#define BNX2_NV_TRANSLATE 0x00000002 +#define BNX2_NV_WREN 0x00000004 + u32 page_bits; + u32 page_size; + u32 addr_mask; + u32 total_size; + u8 *name; +}; + +#define BNX2_MAX_MSIX_HW_VEC 9 +#define BNX2_MAX_MSIX_VEC 9 +#ifdef BCM_CNIC +#define BNX2_MIN_MSIX_VEC 2 +#else +#define BNX2_MIN_MSIX_VEC 1 +#endif + + +struct bnx2_irq { + irq_handler_t handler; + unsigned int vector; + u8 requested; + char name[IFNAMSIZ + 2]; +}; + +struct bnx2_tx_ring_info { + u32 tx_prod_bseq; + u16 tx_prod; + u32 tx_bidx_addr; + u32 tx_bseq_addr; + + struct bnx2_tx_bd *tx_desc_ring; + struct bnx2_sw_tx_bd *tx_buf_ring; + + u16 tx_cons; + u16 hw_tx_cons; + + dma_addr_t tx_desc_mapping; +}; + +struct bnx2_rx_ring_info { + u32 rx_prod_bseq; + u16 rx_prod; + u16 rx_cons; + + u32 rx_bidx_addr; + u32 rx_bseq_addr; + u32 rx_pg_bidx_addr; + + u16 rx_pg_prod; + u16 rx_pg_cons; + + struct bnx2_sw_bd *rx_buf_ring; + struct bnx2_rx_bd *rx_desc_ring[BNX2_MAX_RX_RINGS]; + struct bnx2_sw_pg *rx_pg_ring; + struct bnx2_rx_bd *rx_pg_desc_ring[BNX2_MAX_RX_PG_RINGS]; + + dma_addr_t rx_desc_mapping[BNX2_MAX_RX_RINGS]; + dma_addr_t rx_pg_desc_mapping[BNX2_MAX_RX_PG_RINGS]; +}; + +struct bnx2_napi { + struct napi_struct napi ____cacheline_aligned; + struct bnx2 *bp; + union { + struct status_block *msi; + struct status_block_msix *msix; + } status_blk; + u16 *hw_tx_cons_ptr; + u16 *hw_rx_cons_ptr; + u32 last_status_idx; + u32 int_num; + +#ifdef BCM_CNIC + u32 cnic_tag; + int cnic_present; +#endif + + struct bnx2_rx_ring_info rx_ring; + struct bnx2_tx_ring_info tx_ring; +}; + +struct bnx2 { + /* Fields used in the tx and intr/napi performance paths are grouped */ + /* together in the beginning of the structure. */ + void __iomem *regview; + + struct net_device *dev; + struct pci_dev *pdev; + + atomic_t intr_sem; + + u32 flags; +#define BNX2_FLAG_PCIX 0x00000001 +#define BNX2_FLAG_PCI_32BIT 0x00000002 +#define BNX2_FLAG_MSIX_CAP 0x00000004 +#define BNX2_FLAG_NO_WOL 0x00000008 +#define BNX2_FLAG_USING_MSI 0x00000020 +#define BNX2_FLAG_ASF_ENABLE 0x00000040 +#define BNX2_FLAG_MSI_CAP 0x00000080 +#define BNX2_FLAG_ONE_SHOT_MSI 0x00000100 +#define BNX2_FLAG_PCIE 0x00000200 +#define BNX2_FLAG_USING_MSIX 0x00000400 +#define BNX2_FLAG_USING_MSI_OR_MSIX (BNX2_FLAG_USING_MSI | \ + BNX2_FLAG_USING_MSIX) +#define BNX2_FLAG_JUMBO_BROKEN 0x00000800 +#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000 +#define BNX2_FLAG_BROKEN_STATS 0x00002000 +#define BNX2_FLAG_AER_ENABLED 0x00004000 + + struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC]; + + u32 rx_buf_use_size; /* useable size */ + u32 rx_buf_size; /* with alignment */ + u32 rx_copy_thresh; + u32 rx_jumbo_thresh; + u32 rx_max_ring_idx; + u32 rx_max_pg_ring_idx; + + /* TX constants */ + int tx_ring_size; + u32 tx_wake_thresh; + +#ifdef BCM_CNIC + struct cnic_ops __rcu *cnic_ops; + void *cnic_data; +#endif + + /* End of fields used in the performance code paths. */ + + unsigned int current_interval; +#define BNX2_TIMER_INTERVAL HZ +#define BNX2_SERDES_AN_TIMEOUT (HZ / 3) +#define BNX2_SERDES_FORCED_TIMEOUT (HZ / 10) + + struct timer_list timer; + struct work_struct reset_task; + + /* Used to synchronize phy accesses. */ + spinlock_t phy_lock; + spinlock_t indirect_lock; + + u32 phy_flags; +#define BNX2_PHY_FLAG_SERDES 0x00000001 +#define BNX2_PHY_FLAG_CRC_FIX 0x00000002 +#define BNX2_PHY_FLAG_PARALLEL_DETECT 0x00000004 +#define BNX2_PHY_FLAG_2_5G_CAPABLE 0x00000008 +#define BNX2_PHY_FLAG_INT_MODE_MASK 0x00000300 +#define BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING 0x00000100 +#define BNX2_PHY_FLAG_INT_MODE_LINK_READY 0x00000200 +#define BNX2_PHY_FLAG_DIS_EARLY_DAC 0x00000400 +#define BNX2_PHY_FLAG_REMOTE_PHY_CAP 0x00000800 +#define BNX2_PHY_FLAG_FORCED_DOWN 0x00001000 +#define BNX2_PHY_FLAG_NO_PARALLEL 0x00002000 +#define BNX2_PHY_FLAG_MDIX 0x00004000 + + u32 mii_bmcr; + u32 mii_bmsr; + u32 mii_bmsr1; + u32 mii_adv; + u32 mii_lpa; + u32 mii_up1; + + u32 chip_id; + /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ +#define BNX2_CHIP(bp) (((bp)->chip_id) & 0xffff0000) +#define BNX2_CHIP_5706 0x57060000 +#define BNX2_CHIP_5708 0x57080000 +#define BNX2_CHIP_5709 0x57090000 + +#define BNX2_CHIP_REV(bp) (((bp)->chip_id) & 0x0000f000) +#define BNX2_CHIP_REV_Ax 0x00000000 +#define BNX2_CHIP_REV_Bx 0x00001000 +#define BNX2_CHIP_REV_Cx 0x00002000 + +#define BNX2_CHIP_METAL(bp) (((bp)->chip_id) & 0x00000ff0) +#define BNX2_CHIP_BOND(bp) (((bp)->chip_id) & 0x0000000f) + +#define BNX2_CHIP_ID(bp) (((bp)->chip_id) & 0xfffffff0) +#define BNX2_CHIP_ID_5706_A0 0x57060000 +#define BNX2_CHIP_ID_5706_A1 0x57060010 +#define BNX2_CHIP_ID_5706_A2 0x57060020 +#define BNX2_CHIP_ID_5708_A0 0x57080000 +#define BNX2_CHIP_ID_5708_B0 0x57081000 +#define BNX2_CHIP_ID_5708_B1 0x57081010 +#define BNX2_CHIP_ID_5709_A0 0x57090000 +#define BNX2_CHIP_ID_5709_A1 0x57090010 + +/* A serdes chip will have the first bit of the bond id set. */ +#define BNX2_CHIP_BOND_SERDES_BIT 0x01 + + u32 phy_addr; + u32 phy_id; + + u16 bus_speed_mhz; + u8 wol; + + u8 pad; + + u16 fw_wr_seq; + u16 fw_drv_pulse_wr_seq; + u32 fw_last_msg; + + int rx_max_ring; + int rx_ring_size; + + int rx_max_pg_ring; + int rx_pg_ring_size; + + u16 tx_quick_cons_trip; + u16 tx_quick_cons_trip_int; + u16 rx_quick_cons_trip; + u16 rx_quick_cons_trip_int; + u16 comp_prod_trip; + u16 comp_prod_trip_int; + u16 tx_ticks; + u16 tx_ticks_int; + u16 com_ticks; + u16 com_ticks_int; + u16 cmd_ticks; + u16 cmd_ticks_int; + u16 rx_ticks; + u16 rx_ticks_int; + + u32 stats_ticks; + + dma_addr_t status_blk_mapping; + + struct statistics_block *stats_blk; + struct statistics_block *temp_stats_blk; + dma_addr_t stats_blk_mapping; + + int ctx_pages; + void *ctx_blk[4]; + dma_addr_t ctx_blk_mapping[4]; + + u32 hc_cmd; + u32 rx_mode; + + u16 req_line_speed; + u8 req_duplex; + + u8 phy_port; + u8 link_up; + + u16 line_speed; + u8 duplex; + u8 flow_ctrl; /* actual flow ctrl settings */ + /* may be different from */ + /* req_flow_ctrl if autoneg */ + u32 advertising; + + u8 req_flow_ctrl; /* flow ctrl advertisement */ + /* settings or forced */ + /* settings */ + u8 autoneg; +#define AUTONEG_SPEED 1 +#define AUTONEG_FLOW_CTRL 2 + + u8 loopback; +#define MAC_LOOPBACK 1 +#define PHY_LOOPBACK 2 + + u8 serdes_an_pending; + + u8 mac_addr[8]; + + u32 shmem_base; + + char fw_version[32]; + + int pm_cap; + int pcix_cap; + + const struct flash_spec *flash_info; + u32 flash_size; + + int status_stats_size; + + struct bnx2_irq irq_tbl[BNX2_MAX_MSIX_VEC]; + int irq_nvecs; + + u8 func; + + u8 num_tx_rings; + u8 num_rx_rings; + + int num_req_tx_rings; + int num_req_rx_rings; + + u32 leds_save; + u32 idle_chk_status_idx; + +#ifdef BCM_CNIC + struct mutex cnic_lock; + struct cnic_eth_dev cnic_eth_dev; + struct cnic_eth_dev *(*cnic_probe)(struct net_device *); +#endif + + const struct firmware *mips_firmware; + const struct firmware *rv2p_firmware; +}; + +#define BNX2_RD(bp, offset) \ + readl(bp->regview + offset) + +#define BNX2_WR(bp, offset, val) \ + writel(val, bp->regview + offset) + +#define BNX2_WR16(bp, offset, val) \ + writew(val, bp->regview + offset) + +struct cpu_reg { + u32 mode; + u32 mode_value_halt; + u32 mode_value_sstep; + + u32 state; + u32 state_value_clear; + + u32 gpr0; + u32 evmask; + u32 pc; + u32 inst; + u32 bp; + + u32 spad_base; + + u32 mips_view_base; +}; + +struct bnx2_fw_file_section { + __be32 addr; + __be32 len; + __be32 offset; +}; + +struct bnx2_mips_fw_file_entry { + __be32 start_addr; + struct bnx2_fw_file_section text; + struct bnx2_fw_file_section data; + struct bnx2_fw_file_section rodata; +}; + +struct bnx2_rv2p_fw_file_entry { + struct bnx2_fw_file_section rv2p; + __be32 fixup[8]; +}; + +struct bnx2_mips_fw_file { + struct bnx2_mips_fw_file_entry com; + struct bnx2_mips_fw_file_entry cp; + struct bnx2_mips_fw_file_entry rxp; + struct bnx2_mips_fw_file_entry tpat; + struct bnx2_mips_fw_file_entry txp; +}; + +struct bnx2_rv2p_fw_file { + struct bnx2_rv2p_fw_file_entry proc1; + struct bnx2_rv2p_fw_file_entry proc2; +}; + +#define RV2P_P1_FIXUP_PAGE_SIZE_IDX 0 +#define RV2P_BD_PAGE_SIZE_MSK 0xffff +#define RV2P_BD_PAGE_SIZE ((BNX2_PAGE_SIZE / 16) - 1) + +#define RV2P_PROC1 0 +#define RV2P_PROC2 1 + + +/* This value (in milliseconds) determines the frequency of the driver + * issuing the PULSE message code. The firmware monitors this periodic + * pulse to determine when to switch to an OS-absent mode. */ +#define BNX2_DRV_PULSE_PERIOD_MS 250 + +/* This value (in milliseconds) determines how long the driver should + * wait for an acknowledgement from the firmware before timing out. Once + * the firmware has timed out, the driver will assume there is no firmware + * running and there won't be any firmware-driver synchronization during a + * driver reset. */ +#define BNX2_FW_ACK_TIME_OUT_MS 1000 + + +#define BNX2_DRV_RESET_SIGNATURE 0x00000000 +#define BNX2_DRV_RESET_SIGNATURE_MAGIC 0x4841564b /* HAVK */ +//#define DRV_RESET_SIGNATURE_MAGIC 0x47495352 /* RSIG */ + +#define BNX2_DRV_MB 0x00000004 +#define BNX2_DRV_MSG_CODE 0xff000000 +#define BNX2_DRV_MSG_CODE_RESET 0x01000000 +#define BNX2_DRV_MSG_CODE_UNLOAD 0x02000000 +#define BNX2_DRV_MSG_CODE_SHUTDOWN 0x03000000 +#define BNX2_DRV_MSG_CODE_SUSPEND_WOL 0x04000000 +#define BNX2_DRV_MSG_CODE_FW_TIMEOUT 0x05000000 +#define BNX2_DRV_MSG_CODE_PULSE 0x06000000 +#define BNX2_DRV_MSG_CODE_DIAG 0x07000000 +#define BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL 0x09000000 +#define BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN 0x0b000000 +#define BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE 0x0d000000 +#define BNX2_DRV_MSG_CODE_CMD_SET_LINK 0x10000000 + +#define BNX2_DRV_MSG_DATA 0x00ff0000 +#define BNX2_DRV_MSG_DATA_WAIT0 0x00010000 +#define BNX2_DRV_MSG_DATA_WAIT1 0x00020000 +#define BNX2_DRV_MSG_DATA_WAIT2 0x00030000 +#define BNX2_DRV_MSG_DATA_WAIT3 0x00040000 + +#define BNX2_DRV_MSG_SEQ 0x0000ffff + +#define BNX2_FW_MB 0x00000008 +#define BNX2_FW_MSG_ACK 0x0000ffff +#define BNX2_FW_MSG_STATUS_MASK 0x00ff0000 +#define BNX2_FW_MSG_STATUS_OK 0x00000000 +#define BNX2_FW_MSG_STATUS_FAILURE 0x00ff0000 + +#define BNX2_LINK_STATUS 0x0000000c +#define BNX2_LINK_STATUS_INIT_VALUE 0xffffffff +#define BNX2_LINK_STATUS_LINK_UP 0x1 +#define BNX2_LINK_STATUS_LINK_DOWN 0x0 +#define BNX2_LINK_STATUS_SPEED_MASK 0x1e +#define BNX2_LINK_STATUS_AN_INCOMPLETE (0<<1) +#define BNX2_LINK_STATUS_10HALF (1<<1) +#define BNX2_LINK_STATUS_10FULL (2<<1) +#define BNX2_LINK_STATUS_100HALF (3<<1) +#define BNX2_LINK_STATUS_100BASE_T4 (4<<1) +#define BNX2_LINK_STATUS_100FULL (5<<1) +#define BNX2_LINK_STATUS_1000HALF (6<<1) +#define BNX2_LINK_STATUS_1000FULL (7<<1) +#define BNX2_LINK_STATUS_2500HALF (8<<1) +#define BNX2_LINK_STATUS_2500FULL (9<<1) +#define BNX2_LINK_STATUS_AN_ENABLED (1<<5) +#define BNX2_LINK_STATUS_AN_COMPLETE (1<<6) +#define BNX2_LINK_STATUS_PARALLEL_DET (1<<7) +#define BNX2_LINK_STATUS_RESERVED (1<<8) +#define BNX2_LINK_STATUS_PARTNER_AD_1000FULL (1<<9) +#define BNX2_LINK_STATUS_PARTNER_AD_1000HALF (1<<10) +#define BNX2_LINK_STATUS_PARTNER_AD_100BT4 (1<<11) +#define BNX2_LINK_STATUS_PARTNER_AD_100FULL (1<<12) +#define BNX2_LINK_STATUS_PARTNER_AD_100HALF (1<<13) +#define BNX2_LINK_STATUS_PARTNER_AD_10FULL (1<<14) +#define BNX2_LINK_STATUS_PARTNER_AD_10HALF (1<<15) +#define BNX2_LINK_STATUS_TX_FC_ENABLED (1<<16) +#define BNX2_LINK_STATUS_RX_FC_ENABLED (1<<17) +#define BNX2_LINK_STATUS_PARTNER_SYM_PAUSE_CAP (1<<18) +#define BNX2_LINK_STATUS_PARTNER_ASYM_PAUSE_CAP (1<<19) +#define BNX2_LINK_STATUS_SERDES_LINK (1<<20) +#define BNX2_LINK_STATUS_PARTNER_AD_2500FULL (1<<21) +#define BNX2_LINK_STATUS_PARTNER_AD_2500HALF (1<<22) +#define BNX2_LINK_STATUS_HEART_BEAT_EXPIRED (1<<31) + +#define BNX2_DRV_PULSE_MB 0x00000010 +#define BNX2_DRV_PULSE_SEQ_MASK 0x00007fff + +/* Indicate to the firmware not to go into the + * OS absent when it is not getting driver pulse. + * This is used for debugging. */ +#define BNX2_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE 0x00080000 + +#define BNX2_DRV_MB_ARG0 0x00000014 +#define BNX2_NETLINK_SET_LINK_SPEED_10HALF (1<<0) +#define BNX2_NETLINK_SET_LINK_SPEED_10FULL (1<<1) +#define BNX2_NETLINK_SET_LINK_SPEED_10 \ + (BNX2_NETLINK_SET_LINK_SPEED_10HALF | \ + BNX2_NETLINK_SET_LINK_SPEED_10FULL) +#define BNX2_NETLINK_SET_LINK_SPEED_100HALF (1<<2) +#define BNX2_NETLINK_SET_LINK_SPEED_100FULL (1<<3) +#define BNX2_NETLINK_SET_LINK_SPEED_100 \ + (BNX2_NETLINK_SET_LINK_SPEED_100HALF | \ + BNX2_NETLINK_SET_LINK_SPEED_100FULL) +#define BNX2_NETLINK_SET_LINK_SPEED_1GHALF (1<<4) +#define BNX2_NETLINK_SET_LINK_SPEED_1GFULL (1<<5) +#define BNX2_NETLINK_SET_LINK_SPEED_2G5HALF (1<<6) +#define BNX2_NETLINK_SET_LINK_SPEED_2G5FULL (1<<7) +#define BNX2_NETLINK_SET_LINK_SPEED_10GHALF (1<<8) +#define BNX2_NETLINK_SET_LINK_SPEED_10GFULL (1<<9) +#define BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG (1<<10) +#define BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE (1<<11) +#define BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE (1<<12) +#define BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE (1<<13) +#define BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED (1<<14) +#define BNX2_NETLINK_SET_LINK_PHY_RESET (1<<15) + +#define BNX2_DEV_INFO_SIGNATURE 0x00000020 +#define BNX2_DEV_INFO_SIGNATURE_MAGIC 0x44564900 +#define BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK 0xffffff00 +#define BNX2_DEV_INFO_FEATURE_CFG_VALID 0x01 +#define BNX2_DEV_INFO_SECONDARY_PORT 0x80 +#define BNX2_DEV_INFO_DRV_ALWAYS_ALIVE 0x40 + +#define BNX2_SHARED_HW_CFG_PART_NUM 0x00000024 + +#define BNX2_SHARED_HW_CFG_POWER_DISSIPATED 0x00000034 +#define BNX2_SHARED_HW_CFG_POWER_STATE_D3_MASK 0xff000000 +#define BNX2_SHARED_HW_CFG_POWER_STATE_D2_MASK 0xff0000 +#define BNX2_SHARED_HW_CFG_POWER_STATE_D1_MASK 0xff00 +#define BNX2_SHARED_HW_CFG_POWER_STATE_D0_MASK 0xff + +#define BNX2_SHARED_HW_CFG POWER_CONSUMED 0x00000038 +#define BNX2_SHARED_HW_CFG_CONFIG 0x0000003c +#define BNX2_SHARED_HW_CFG_DESIGN_NIC 0 +#define BNX2_SHARED_HW_CFG_DESIGN_LOM 0x1 +#define BNX2_SHARED_HW_CFG_PHY_COPPER 0 +#define BNX2_SHARED_HW_CFG_PHY_FIBER 0x2 +#define BNX2_SHARED_HW_CFG_PHY_2_5G 0x20 +#define BNX2_SHARED_HW_CFG_PHY_BACKPLANE 0x40 +#define BNX2_SHARED_HW_CFG_LED_MODE_SHIFT_BITS 8 +#define BNX2_SHARED_HW_CFG_LED_MODE_MASK 0x300 +#define BNX2_SHARED_HW_CFG_LED_MODE_MAC 0 +#define BNX2_SHARED_HW_CFG_LED_MODE_GPHY1 0x100 +#define BNX2_SHARED_HW_CFG_LED_MODE_GPHY2 0x200 +#define BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX 0x8000 + +#define BNX2_SHARED_HW_CFG_CONFIG2 0x00000040 +#define BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK 0x00fff000 + +#define BNX2_DEV_INFO_BC_REV 0x0000004c + +#define BNX2_PORT_HW_CFG_MAC_UPPER 0x00000050 +#define BNX2_PORT_HW_CFG_UPPERMAC_MASK 0xffff + +#define BNX2_PORT_HW_CFG_MAC_LOWER 0x00000054 +#define BNX2_PORT_HW_CFG_CONFIG 0x00000058 +#define BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK 0x0000ffff +#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK 0x001f0000 +#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_AN 0x00000000 +#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G 0x00030000 +#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_2_5G 0x00040000 + +#define BNX2_PORT_HW_CFG_IMD_MAC_A_UPPER 0x00000068 +#define BNX2_PORT_HW_CFG_IMD_MAC_A_LOWER 0x0000006c +#define BNX2_PORT_HW_CFG_IMD_MAC_B_UPPER 0x00000070 +#define BNX2_PORT_HW_CFG_IMD_MAC_B_LOWER 0x00000074 +#define BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER 0x00000078 +#define BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER 0x0000007c + +#define BNX2_DEV_INFO_PER_PORT_HW_CONFIG2 0x000000b4 + +#define BNX2_DEV_INFO_FORMAT_REV 0x000000c4 +#define BNX2_DEV_INFO_FORMAT_REV_MASK 0xff000000 +#define BNX2_DEV_INFO_FORMAT_REV_ID ('A' << 24) + +#define BNX2_SHARED_FEATURE 0x000000c8 +#define BNX2_SHARED_FEATURE_MASK 0xffffffff + +#define BNX2_PORT_FEATURE 0x000000d8 +#define BNX2_PORT2_FEATURE 0x00000014c +#define BNX2_PORT_FEATURE_WOL_ENABLED 0x01000000 +#define BNX2_PORT_FEATURE_MBA_ENABLED 0x02000000 +#define BNX2_PORT_FEATURE_ASF_ENABLED 0x04000000 +#define BNX2_PORT_FEATURE_IMD_ENABLED 0x08000000 +#define BNX2_PORT_FEATURE_BAR1_SIZE_MASK 0xf +#define BNX2_PORT_FEATURE_BAR1_SIZE_DISABLED 0x0 +#define BNX2_PORT_FEATURE_BAR1_SIZE_64K 0x1 +#define BNX2_PORT_FEATURE_BAR1_SIZE_128K 0x2 +#define BNX2_PORT_FEATURE_BAR1_SIZE_256K 0x3 +#define BNX2_PORT_FEATURE_BAR1_SIZE_512K 0x4 +#define BNX2_PORT_FEATURE_BAR1_SIZE_1M 0x5 +#define BNX2_PORT_FEATURE_BAR1_SIZE_2M 0x6 +#define BNX2_PORT_FEATURE_BAR1_SIZE_4M 0x7 +#define BNX2_PORT_FEATURE_BAR1_SIZE_8M 0x8 +#define BNX2_PORT_FEATURE_BAR1_SIZE_16M 0x9 +#define BNX2_PORT_FEATURE_BAR1_SIZE_32M 0xa +#define BNX2_PORT_FEATURE_BAR1_SIZE_64M 0xb +#define BNX2_PORT_FEATURE_BAR1_SIZE_128M 0xc +#define BNX2_PORT_FEATURE_BAR1_SIZE_256M 0xd +#define BNX2_PORT_FEATURE_BAR1_SIZE_512M 0xe +#define BNX2_PORT_FEATURE_BAR1_SIZE_1G 0xf + +#define BNX2_PORT_FEATURE_WOL 0xdc +#define BNX2_PORT2_FEATURE_WOL 0x150 +#define BNX2_PORT_FEATURE_WOL_DEFAULT_SHIFT_BITS 4 +#define BNX2_PORT_FEATURE_WOL_DEFAULT_MASK 0x30 +#define BNX2_PORT_FEATURE_WOL_DEFAULT_DISABLE 0 +#define BNX2_PORT_FEATURE_WOL_DEFAULT_MAGIC 0x10 +#define BNX2_PORT_FEATURE_WOL_DEFAULT_ACPI 0x20 +#define BNX2_PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI 0x30 +#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_MASK 0xf +#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_AUTONEG 0 +#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_10HALF 1 +#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_10FULL 2 +#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_100HALF 3 +#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_100FULL 4 +#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_1000HALF 5 +#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_1000FULL 6 +#define BNX2_PORT_FEATURE_WOL_AUTONEG_ADVERTISE_1000 0x40 +#define BNX2_PORT_FEATURE_WOL_RESERVED_PAUSE_CAP 0x400 +#define BNX2_PORT_FEATURE_WOL_RESERVED_ASYM_PAUSE_CAP 0x800 + +#define BNX2_PORT_FEATURE_MBA 0xe0 +#define BNX2_PORT2_FEATURE_MBA 0x154 +#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT_BITS 0 +#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x3 +#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0 +#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 1 +#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 2 +#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_SHIFT_BITS 2 +#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3c +#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_AUTONEG 0 +#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_10HALF 0x4 +#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_10FULL 0x8 +#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_100HALF 0xc +#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_100FULL 0x10 +#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_1000HALF 0x14 +#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_1000FULL 0x18 +#define BNX2_PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x40 +#define BNX2_PORT_FEATURE_MBA_HOTKEY_CTRL_S 0 +#define BNX2_PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x80 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT_BITS 8 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0xff00 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_1K 0x100 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x200 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x300 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x400 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x500 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x600 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x700 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x800 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x900 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0xa00 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0xb00 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0xc00 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0xd00 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0xe00 +#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0xf00 +#define BNX2_PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT_BITS 16 +#define BNX2_PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0xf0000 +#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT_BITS 20 +#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x300000 +#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0 +#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x100000 +#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x200000 +#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x300000 + +#define BNX2_PORT_FEATURE_IMD 0xe4 +#define BNX2_PORT2_FEATURE_IMD 0x158 +#define BNX2_PORT_FEATURE_IMD_LINK_OVERRIDE_DEFAULT 0 +#define BNX2_PORT_FEATURE_IMD_LINK_OVERRIDE_ENABLE 1 + +#define BNX2_PORT_FEATURE_VLAN 0xe8 +#define BNX2_PORT2_FEATURE_VLAN 0x15c +#define BNX2_PORT_FEATURE_MBA_VLAN_TAG_MASK 0xffff +#define BNX2_PORT_FEATURE_MBA_VLAN_ENABLE 0x10000 + +#define BNX2_MFW_VER_PTR 0x00000014c + +#define BNX2_BC_STATE_RESET_TYPE 0x000001c0 +#define BNX2_BC_STATE_RESET_TYPE_SIG 0x00005254 +#define BNX2_BC_STATE_RESET_TYPE_SIG_MASK 0x0000ffff +#define BNX2_BC_STATE_RESET_TYPE_NONE (BNX2_BC_STATE_RESET_TYPE_SIG | \ + 0x00010000) +#define BNX2_BC_STATE_RESET_TYPE_PCI (BNX2_BC_STATE_RESET_TYPE_SIG | \ + 0x00020000) +#define BNX2_BC_STATE_RESET_TYPE_VAUX (BNX2_BC_STATE_RESET_TYPE_SIG | \ + 0x00030000) +#define BNX2_BC_STATE_RESET_TYPE_DRV_MASK DRV_MSG_CODE +#define BNX2_BC_STATE_RESET_TYPE_DRV_RESET (BNX2_BC_STATE_RESET_TYPE_SIG | \ + DRV_MSG_CODE_RESET) +#define BNX2_BC_STATE_RESET_TYPE_DRV_UNLOAD (BNX2_BC_STATE_RESET_TYPE_SIG | \ + DRV_MSG_CODE_UNLOAD) +#define BNX2_BC_STATE_RESET_TYPE_DRV_SHUTDOWN (BNX2_BC_STATE_RESET_TYPE_SIG | \ + DRV_MSG_CODE_SHUTDOWN) +#define BNX2_BC_STATE_RESET_TYPE_DRV_WOL (BNX2_BC_STATE_RESET_TYPE_SIG | \ + DRV_MSG_CODE_WOL) +#define BNX2_BC_STATE_RESET_TYPE_DRV_DIAG (BNX2_BC_STATE_RESET_TYPE_SIG | \ + DRV_MSG_CODE_DIAG) +#define BNX2_BC_STATE_RESET_TYPE_VALUE(msg) (BNX2_BC_STATE_RESET_TYPE_SIG | \ + (msg)) + +#define BNX2_BC_RESET_TYPE 0x000001c0 + +#define BNX2_BC_STATE 0x000001c4 +#define BNX2_BC_STATE_ERR_MASK 0x0000ff00 +#define BNX2_BC_STATE_SIGN 0x42530000 +#define BNX2_BC_STATE_SIGN_MASK 0xffff0000 +#define BNX2_BC_STATE_BC1_START (BNX2_BC_STATE_SIGN | 0x1) +#define BNX2_BC_STATE_GET_NVM_CFG1 (BNX2_BC_STATE_SIGN | 0x2) +#define BNX2_BC_STATE_PROG_BAR (BNX2_BC_STATE_SIGN | 0x3) +#define BNX2_BC_STATE_INIT_VID (BNX2_BC_STATE_SIGN | 0x4) +#define BNX2_BC_STATE_GET_NVM_CFG2 (BNX2_BC_STATE_SIGN | 0x5) +#define BNX2_BC_STATE_APPLY_WKARND (BNX2_BC_STATE_SIGN | 0x6) +#define BNX2_BC_STATE_LOAD_BC2 (BNX2_BC_STATE_SIGN | 0x7) +#define BNX2_BC_STATE_GOING_BC2 (BNX2_BC_STATE_SIGN | 0x8) +#define BNX2_BC_STATE_GOING_DIAG (BNX2_BC_STATE_SIGN | 0x9) +#define BNX2_BC_STATE_RT_FINAL_INIT (BNX2_BC_STATE_SIGN | 0x81) +#define BNX2_BC_STATE_RT_WKARND (BNX2_BC_STATE_SIGN | 0x82) +#define BNX2_BC_STATE_RT_DRV_PULSE (BNX2_BC_STATE_SIGN | 0x83) +#define BNX2_BC_STATE_RT_FIOEVTS (BNX2_BC_STATE_SIGN | 0x84) +#define BNX2_BC_STATE_RT_DRV_CMD (BNX2_BC_STATE_SIGN | 0x85) +#define BNX2_BC_STATE_RT_LOW_POWER (BNX2_BC_STATE_SIGN | 0x86) +#define BNX2_BC_STATE_RT_SET_WOL (BNX2_BC_STATE_SIGN | 0x87) +#define BNX2_BC_STATE_RT_OTHER_FW (BNX2_BC_STATE_SIGN | 0x88) +#define BNX2_BC_STATE_RT_GOING_D3 (BNX2_BC_STATE_SIGN | 0x89) +#define BNX2_BC_STATE_ERR_BAD_VERSION (BNX2_BC_STATE_SIGN | 0x0100) +#define BNX2_BC_STATE_ERR_BAD_BC2_CRC (BNX2_BC_STATE_SIGN | 0x0200) +#define BNX2_BC_STATE_ERR_BC1_LOOP (BNX2_BC_STATE_SIGN | 0x0300) +#define BNX2_BC_STATE_ERR_UNKNOWN_CMD (BNX2_BC_STATE_SIGN | 0x0400) +#define BNX2_BC_STATE_ERR_DRV_DEAD (BNX2_BC_STATE_SIGN | 0x0500) +#define BNX2_BC_STATE_ERR_NO_RXP (BNX2_BC_STATE_SIGN | 0x0600) +#define BNX2_BC_STATE_ERR_TOO_MANY_RBUF (BNX2_BC_STATE_SIGN | 0x0700) + +#define BNX2_BC_STATE_CONDITION 0x000001c8 +#define BNX2_CONDITION_MFW_RUN_UNKNOWN 0x00000000 +#define BNX2_CONDITION_MFW_RUN_IPMI 0x00002000 +#define BNX2_CONDITION_MFW_RUN_UMP 0x00004000 +#define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000 +#define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000 +#define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000 +#define BNX2_CONDITION_PM_STATE_MASK 0x00030000 +#define BNX2_CONDITION_PM_STATE_FULL 0x00030000 +#define BNX2_CONDITION_PM_STATE_PREP 0x00020000 +#define BNX2_CONDITION_PM_STATE_UNPREP 0x00010000 + +#define BNX2_BC_STATE_DEBUG_CMD 0x1dc +#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000 +#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE_MASK 0xffff0000 +#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_CNT_MASK 0xffff +#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_INFINITE 0xffff + +#define BNX2_FW_EVT_CODE_MB 0x354 +#define BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT 0x00000000 +#define BNX2_FW_EVT_CODE_LINK_EVENT 0x00000001 + +#define BNX2_DRV_ACK_CAP_MB 0x364 +#define BNX2_DRV_ACK_CAP_SIGNATURE 0x35450000 +#define BNX2_CAPABILITY_SIGNATURE_MASK 0xFFFF0000 + +#define BNX2_FW_CAP_MB 0x368 +#define BNX2_FW_CAP_SIGNATURE 0xaa550000 +#define BNX2_FW_ACK_DRV_SIGNATURE 0x52500000 +#define BNX2_FW_CAP_SIGNATURE_MASK 0xffff0000 +#define BNX2_FW_CAP_REMOTE_PHY_CAPABLE 0x00000001 +#define BNX2_FW_CAP_REMOTE_PHY_PRESENT 0x00000002 +#define BNX2_FW_CAP_MFW_CAN_KEEP_VLAN 0x00000008 +#define BNX2_FW_CAP_BC_CAN_KEEP_VLAN 0x00000010 +#define BNX2_FW_CAP_CAN_KEEP_VLAN (BNX2_FW_CAP_BC_CAN_KEEP_VLAN | \ + BNX2_FW_CAP_MFW_CAN_KEEP_VLAN) + +#define BNX2_RPHY_SIGNATURE 0x36c +#define BNX2_RPHY_LOAD_SIGNATURE 0x5a5a5a5a + +#define BNX2_RPHY_FLAGS 0x370 +#define BNX2_RPHY_SERDES_LINK 0x374 +#define BNX2_RPHY_COPPER_LINK 0x378 + +#define BNX2_ISCSI_INITIATOR 0x3dc +#define BNX2_ISCSI_INITIATOR_EN 0x00080000 + +#define BNX2_ISCSI_MAX_CONN 0x3e4 +#define BNX2_ISCSI_MAX_CONN_MASK 0xffff0000 +#define BNX2_ISCSI_MAX_CONN_SHIFT 16 + +#define HOST_VIEW_SHMEM_BASE 0x167c00 + +#define DP_SHMEM_LINE(bp, offset) \ + netdev_err(bp->dev, "DEBUG: %08x: %08x %08x %08x %08x\n", \ + offset, \ + bnx2_shmem_rd(bp, offset), \ + bnx2_shmem_rd(bp, offset + 4), \ + bnx2_shmem_rd(bp, offset + 8), \ + bnx2_shmem_rd(bp, offset + 12)) + +#endif diff --git a/drivers/net/ethernet/broadcom/bnx2_fw.h b/drivers/net/ethernet/broadcom/bnx2_fw.h new file mode 100644 index 000000000..b0f2ccada --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2_fw.h @@ -0,0 +1,89 @@ +/* bnx2_fw.h: QLogic bnx2 network driver. + * + * Copyright (c) 2004, 2005, 2006, 2007 Broadcom Corporation + * Copyright (c) 2014-2015 QLogic Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +/* Initialized Values for the Completion Processor. */ +static const struct cpu_reg cpu_reg_com = { + .mode = BNX2_COM_CPU_MODE, + .mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT, + .mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA, + .state = BNX2_COM_CPU_STATE, + .state_value_clear = 0xffffff, + .gpr0 = BNX2_COM_CPU_REG_FILE, + .evmask = BNX2_COM_CPU_EVENT_MASK, + .pc = BNX2_COM_CPU_PROGRAM_COUNTER, + .inst = BNX2_COM_CPU_INSTRUCTION, + .bp = BNX2_COM_CPU_HW_BREAKPOINT, + .spad_base = BNX2_COM_SCRATCH, + .mips_view_base = 0x8000000, +}; + +/* Initialized Values the Command Processor. */ +static const struct cpu_reg cpu_reg_cp = { + .mode = BNX2_CP_CPU_MODE, + .mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT, + .mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA, + .state = BNX2_CP_CPU_STATE, + .state_value_clear = 0xffffff, + .gpr0 = BNX2_CP_CPU_REG_FILE, + .evmask = BNX2_CP_CPU_EVENT_MASK, + .pc = BNX2_CP_CPU_PROGRAM_COUNTER, + .inst = BNX2_CP_CPU_INSTRUCTION, + .bp = BNX2_CP_CPU_HW_BREAKPOINT, + .spad_base = BNX2_CP_SCRATCH, + .mips_view_base = 0x8000000, +}; + +/* Initialized Values for the RX Processor. */ +static const struct cpu_reg cpu_reg_rxp = { + .mode = BNX2_RXP_CPU_MODE, + .mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT, + .mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA, + .state = BNX2_RXP_CPU_STATE, + .state_value_clear = 0xffffff, + .gpr0 = BNX2_RXP_CPU_REG_FILE, + .evmask = BNX2_RXP_CPU_EVENT_MASK, + .pc = BNX2_RXP_CPU_PROGRAM_COUNTER, + .inst = BNX2_RXP_CPU_INSTRUCTION, + .bp = BNX2_RXP_CPU_HW_BREAKPOINT, + .spad_base = BNX2_RXP_SCRATCH, + .mips_view_base = 0x8000000, +}; + +/* Initialized Values for the TX Patch-up Processor. */ +static const struct cpu_reg cpu_reg_tpat = { + .mode = BNX2_TPAT_CPU_MODE, + .mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT, + .mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA, + .state = BNX2_TPAT_CPU_STATE, + .state_value_clear = 0xffffff, + .gpr0 = BNX2_TPAT_CPU_REG_FILE, + .evmask = BNX2_TPAT_CPU_EVENT_MASK, + .pc = BNX2_TPAT_CPU_PROGRAM_COUNTER, + .inst = BNX2_TPAT_CPU_INSTRUCTION, + .bp = BNX2_TPAT_CPU_HW_BREAKPOINT, + .spad_base = BNX2_TPAT_SCRATCH, + .mips_view_base = 0x8000000, +}; + +/* Initialized Values for the TX Processor. */ +static const struct cpu_reg cpu_reg_txp = { + .mode = BNX2_TXP_CPU_MODE, + .mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT, + .mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA, + .state = BNX2_TXP_CPU_STATE, + .state_value_clear = 0xffffff, + .gpr0 = BNX2_TXP_CPU_REG_FILE, + .evmask = BNX2_TXP_CPU_EVENT_MASK, + .pc = BNX2_TXP_CPU_PROGRAM_COUNTER, + .inst = BNX2_TXP_CPU_INSTRUCTION, + .bp = BNX2_TXP_CPU_HW_BREAKPOINT, + .spad_base = BNX2_TXP_SCRATCH, + .mips_view_base = 0x8000000, +}; diff --git a/drivers/net/ethernet/broadcom/bnx2x/Makefile b/drivers/net/ethernet/broadcom/bnx2x/Makefile new file mode 100644 index 000000000..116762daa --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for Broadcom 10-Gigabit ethernet driver +# + +obj-$(CONFIG_BNX2X) += bnx2x.o + +bnx2x-y := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o +bnx2x-$(CONFIG_BNX2X_SRIOV) += bnx2x_vfpf.o bnx2x_sriov.o diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h new file mode 100644 index 000000000..1f82a04ce --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -0,0 +1,2580 @@ +/* bnx2x.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2013 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Ariel Elior + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + */ + +#ifndef BNX2X_H +#define BNX2X_H + +#include +#include +#include +#include +#include + +#include +#include +#include + +/* compilation time flags */ + +/* define this to make the driver freeze on error to allow getting debug info + * (you will need to reboot afterwards) */ +/* #define BNX2X_STOP_ON_ERROR */ + +#define DRV_MODULE_VERSION "1.710.51-0" +#define DRV_MODULE_RELDATE "2014/02/10" +#define BNX2X_BC_VER 0x040200 + +#if defined(CONFIG_DCB) +#define BCM_DCBNL +#endif + +#include "bnx2x_hsi.h" + +#include "../cnic_if.h" + +#define BNX2X_MIN_MSIX_VEC_CNT(bp) ((bp)->min_msix_vec_cnt) + +#include + +#include "bnx2x_reg.h" +#include "bnx2x_fw_defs.h" +#include "bnx2x_mfw_req.h" +#include "bnx2x_link.h" +#include "bnx2x_sp.h" +#include "bnx2x_dcb.h" +#include "bnx2x_stats.h" +#include "bnx2x_vfpf.h" + +enum bnx2x_int_mode { + BNX2X_INT_MODE_MSIX, + BNX2X_INT_MODE_INTX, + BNX2X_INT_MODE_MSI +}; + +/* error/debug prints */ + +#define DRV_MODULE_NAME "bnx2x" + +/* for messages that are currently off */ +#define BNX2X_MSG_OFF 0x0 +#define BNX2X_MSG_MCP 0x0010000 /* was: NETIF_MSG_HW */ +#define BNX2X_MSG_STATS 0x0020000 /* was: NETIF_MSG_TIMER */ +#define BNX2X_MSG_NVM 0x0040000 /* was: NETIF_MSG_HW */ +#define BNX2X_MSG_DMAE 0x0080000 /* was: NETIF_MSG_HW */ +#define BNX2X_MSG_SP 0x0100000 /* was: NETIF_MSG_INTR */ +#define BNX2X_MSG_FP 0x0200000 /* was: NETIF_MSG_INTR */ +#define BNX2X_MSG_IOV 0x0800000 +#define BNX2X_MSG_PTP 0x1000000 +#define BNX2X_MSG_IDLE 0x2000000 /* used for idle check*/ +#define BNX2X_MSG_ETHTOOL 0x4000000 +#define BNX2X_MSG_DCB 0x8000000 + +/* regular debug print */ +#define DP_INNER(fmt, ...) \ + pr_notice("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + bp->dev ? (bp->dev->name) : "?", \ + ##__VA_ARGS__); + +#define DP(__mask, fmt, ...) \ +do { \ + if (unlikely(bp->msg_enable & (__mask))) \ + DP_INNER(fmt, ##__VA_ARGS__); \ +} while (0) + +#define DP_AND(__mask, fmt, ...) \ +do { \ + if (unlikely((bp->msg_enable & (__mask)) == __mask)) \ + DP_INNER(fmt, ##__VA_ARGS__); \ +} while (0) + +#define DP_CONT(__mask, fmt, ...) \ +do { \ + if (unlikely(bp->msg_enable & (__mask))) \ + pr_cont(fmt, ##__VA_ARGS__); \ +} while (0) + +/* errors debug print */ +#define BNX2X_DBG_ERR(fmt, ...) \ +do { \ + if (unlikely(netif_msg_probe(bp))) \ + pr_err("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + bp->dev ? (bp->dev->name) : "?", \ + ##__VA_ARGS__); \ +} while (0) + +/* for errors (never masked) */ +#define BNX2X_ERR(fmt, ...) \ +do { \ + pr_err("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + bp->dev ? (bp->dev->name) : "?", \ + ##__VA_ARGS__); \ +} while (0) + +#define BNX2X_ERROR(fmt, ...) \ + pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__) + +/* before we have a dev->name use dev_info() */ +#define BNX2X_DEV_INFO(fmt, ...) \ +do { \ + if (unlikely(netif_msg_probe(bp))) \ + dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__); \ +} while (0) + +/* Error handling */ +void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int); +#ifdef BNX2X_STOP_ON_ERROR +#define bnx2x_panic() \ +do { \ + bp->panic = 1; \ + BNX2X_ERR("driver assert\n"); \ + bnx2x_panic_dump(bp, true); \ +} while (0) +#else +#define bnx2x_panic() \ +do { \ + bp->panic = 1; \ + BNX2X_ERR("driver assert\n"); \ + bnx2x_panic_dump(bp, false); \ +} while (0) +#endif + +#define bnx2x_mc_addr(ha) ((ha)->addr) +#define bnx2x_uc_addr(ha) ((ha)->addr) + +#define U64_LO(x) ((u32)(((u64)(x)) & 0xffffffff)) +#define U64_HI(x) ((u32)(((u64)(x)) >> 32)) +#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) + +#define REG_ADDR(bp, offset) ((bp->regview) + (offset)) + +#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset)) +#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset)) +#define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset)) + +#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset)) +#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset)) +#define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset)) + +#define REG_RD_IND(bp, offset) bnx2x_reg_rd_ind(bp, offset) +#define REG_WR_IND(bp, offset, val) bnx2x_reg_wr_ind(bp, offset, val) + +#define REG_RD_DMAE(bp, offset, valp, len32) \ + do { \ + bnx2x_read_dmae(bp, offset, len32);\ + memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \ + } while (0) + +#define REG_WR_DMAE(bp, offset, valp, len32) \ + do { \ + memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \ + bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \ + offset, len32); \ + } while (0) + +#define REG_WR_DMAE_LEN(bp, offset, valp, len32) \ + REG_WR_DMAE(bp, offset, valp, len32) + +#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \ + do { \ + memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \ + bnx2x_write_big_buf_wb(bp, addr, len32); \ + } while (0) + +#define SHMEM_ADDR(bp, field) (bp->common.shmem_base + \ + offsetof(struct shmem_region, field)) +#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field)) +#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val) + +#define SHMEM2_ADDR(bp, field) (bp->common.shmem2_base + \ + offsetof(struct shmem2_region, field)) +#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field)) +#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val) +#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \ + offsetof(struct mf_cfg, field)) +#define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \ + offsetof(struct mf2_cfg, field)) + +#define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field)) +#define MF_CFG_WR(bp, field, val) REG_WR(bp,\ + MF_CFG_ADDR(bp, field), (val)) +#define MF2_CFG_RD(bp, field) REG_RD(bp, MF2_CFG_ADDR(bp, field)) + +#define SHMEM2_HAS(bp, field) ((bp)->common.shmem2_base && \ + (SHMEM2_RD((bp), size) > \ + offsetof(struct shmem2_region, field))) + +#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg) +#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val) + +/* SP SB indices */ + +/* General SP events - stats query, cfc delete, etc */ +#define HC_SP_INDEX_ETH_DEF_CONS 3 + +/* EQ completions */ +#define HC_SP_INDEX_EQ_CONS 7 + +/* FCoE L2 connection completions */ +#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6 +#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4 +/* iSCSI L2 */ +#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5 +#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1 + +/* Special clients parameters */ + +/* SB indices */ +/* FCoE L2 */ +#define BNX2X_FCOE_L2_RX_INDEX \ + (&bp->def_status_blk->sp_sb.\ + index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS]) + +#define BNX2X_FCOE_L2_TX_INDEX \ + (&bp->def_status_blk->sp_sb.\ + index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS]) + +/** + * CIDs and CLIDs: + * CLIDs below is a CLID for func 0, then the CLID for other + * functions will be calculated by the formula: + * + * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X + * + */ +enum { + BNX2X_ISCSI_ETH_CL_ID_IDX, + BNX2X_FCOE_ETH_CL_ID_IDX, + BNX2X_MAX_CNIC_ETH_CL_ID_IDX, +}; + +/* use a value high enough to be above all the PFs, which has least significant + * nibble as 8, so when cnic needs to come up with a CID for UIO to use to + * calculate doorbell address according to old doorbell configuration scheme + * (db_msg_sz 1 << 7 * cid + 0x40 DPM offset) it can come up with a valid number + * We must avoid coming up with cid 8 for iscsi since according to this method + * the designated UIO cid will come out 0 and it has a special handling for that + * case which doesn't suit us. Therefore will will cieling to closes cid which + * has least signigifcant nibble 8 and if it is 8 we will move forward to 0x18. + */ + +#define BNX2X_1st_NON_L2_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * \ + (bp)->max_cos) +/* amount of cids traversed by UIO's DPM addition to doorbell */ +#define UIO_DPM 8 +/* roundup to DPM offset */ +#define UIO_ROUNDUP(bp) (roundup(BNX2X_1st_NON_L2_ETH_CID(bp), \ + UIO_DPM)) +/* offset to nearest value which has lsb nibble matching DPM */ +#define UIO_CID_OFFSET(bp) ((UIO_ROUNDUP(bp) + UIO_DPM) % \ + (UIO_DPM * 2)) +/* add offset to rounded-up cid to get a value which could be used with UIO */ +#define UIO_DPM_ALIGN(bp) (UIO_ROUNDUP(bp) + UIO_CID_OFFSET(bp)) +/* but wait - avoid UIO special case for cid 0 */ +#define UIO_DPM_CID0_OFFSET(bp) ((UIO_DPM * 2) * \ + (UIO_DPM_ALIGN(bp) == UIO_DPM)) +/* Properly DPM aligned CID dajusted to cid 0 secal case */ +#define BNX2X_CNIC_START_ETH_CID(bp) (UIO_DPM_ALIGN(bp) + \ + (UIO_DPM_CID0_OFFSET(bp))) +/* how many cids were wasted - need this value for cid allocation */ +#define UIO_CID_PAD(bp) (BNX2X_CNIC_START_ETH_CID(bp) - \ + BNX2X_1st_NON_L2_ETH_CID(bp)) + /* iSCSI L2 */ +#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp)) + /* FCoE L2 */ +#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1) + +#define CNIC_SUPPORT(bp) ((bp)->cnic_support) +#define CNIC_ENABLED(bp) ((bp)->cnic_enabled) +#define CNIC_LOADED(bp) ((bp)->cnic_loaded) +#define FCOE_INIT(bp) ((bp)->fcoe_init) + +#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR + +#define SM_RX_ID 0 +#define SM_TX_ID 1 + +/* defines for multiple tx priority indices */ +#define FIRST_TX_ONLY_COS_INDEX 1 +#define FIRST_TX_COS_INDEX 0 + +/* rules for calculating the cids of tx-only connections */ +#define CID_TO_FP(cid, bp) ((cid) % BNX2X_NUM_NON_CNIC_QUEUES(bp)) +#define CID_COS_TO_TX_ONLY_CID(cid, cos, bp) \ + (cid + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp)) + +/* fp index inside class of service range */ +#define FP_COS_TO_TXQ(fp, cos, bp) \ + ((fp)->index + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp)) + +/* Indexes for transmission queues array: + * txdata for RSS i CoS j is at location i + (j * num of RSS) + * txdata for FCoE (if exist) is at location max cos * num of RSS + * txdata for FWD (if exist) is one location after FCoE + * txdata for OOO (if exist) is one location after FWD + */ +enum { + FCOE_TXQ_IDX_OFFSET, + FWD_TXQ_IDX_OFFSET, + OOO_TXQ_IDX_OFFSET, +}; +#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos) +#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET) + +/* fast path */ +/* + * This driver uses new build_skb() API : + * RX ring buffer contains pointer to kmalloc() data only, + * skb are built only after Hardware filled the frame. + */ +struct sw_rx_bd { + u8 *data; + DEFINE_DMA_UNMAP_ADDR(mapping); +}; + +struct sw_tx_bd { + struct sk_buff *skb; + u16 first_bd; + u8 flags; +/* Set on the first BD descriptor when there is a split BD */ +#define BNX2X_TSO_SPLIT_BD (1<<0) +#define BNX2X_HAS_SECOND_PBD (1<<1) +}; + +struct sw_rx_page { + struct page *page; + DEFINE_DMA_UNMAP_ADDR(mapping); +}; + +union db_prod { + struct doorbell_set_prod data; + u32 raw; +}; + +/* dropless fc FW/HW related params */ +#define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512) +#define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \ + ETH_MAX_AGGREGATION_QUEUES_E1 :\ + ETH_MAX_AGGREGATION_QUEUES_E1H_E2) +#define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp)) +#define FW_PREFETCH_CNT 16 +#define DROPLESS_FC_HEADROOM 100 + +/* MC hsi */ +#define BCM_PAGE_SHIFT 12 +#define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT) +#define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1)) +#define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK) + +#define PAGES_PER_SGE_SHIFT 0 +#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) +#define SGE_PAGE_SIZE PAGE_SIZE +#define SGE_PAGE_SHIFT PAGE_SHIFT +#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) +#define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE) +#define TPA_AGG_SIZE min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * \ + SGE_PAGES), 0xffff) + +/* SGE ring related macros */ +#define NUM_RX_SGE_PAGES 2 +#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) +#define NEXT_PAGE_SGE_DESC_CNT 2 +#define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT) +/* RX_SGE_CNT is promised to be a power of 2 */ +#define RX_SGE_MASK (RX_SGE_CNT - 1) +#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) +#define MAX_RX_SGE (NUM_RX_SGE - 1) +#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ + (MAX_RX_SGE_CNT - 1)) ? \ + (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \ + (x) + 1) +#define RX_SGE(x) ((x) & MAX_RX_SGE) + +/* + * Number of required SGEs is the sum of two: + * 1. Number of possible opened aggregations (next packet for + * these aggregations will probably consume SGE immediately) + * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only + * after placement on BD for new TPA aggregation) + * + * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page + */ +#define NUM_SGE_REQ (MAX_AGG_QS(bp) + \ + (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2) +#define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \ + MAX_RX_SGE_CNT) +#define SGE_TH_LO(bp) (NUM_SGE_REQ + \ + NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT) +#define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM) + +/* Manipulate a bit vector defined as an array of u64 */ + +/* Number of bits in one sge_mask array element */ +#define BIT_VEC64_ELEM_SZ 64 +#define BIT_VEC64_ELEM_SHIFT 6 +#define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1) + +#define __BIT_VEC64_SET_BIT(el, bit) \ + do { \ + el = ((el) | ((u64)0x1 << (bit))); \ + } while (0) + +#define __BIT_VEC64_CLEAR_BIT(el, bit) \ + do { \ + el = ((el) & (~((u64)0x1 << (bit)))); \ + } while (0) + +#define BIT_VEC64_SET_BIT(vec64, idx) \ + __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ + (idx) & BIT_VEC64_ELEM_MASK) + +#define BIT_VEC64_CLEAR_BIT(vec64, idx) \ + __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ + (idx) & BIT_VEC64_ELEM_MASK) + +#define BIT_VEC64_TEST_BIT(vec64, idx) \ + (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \ + ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1) + +/* Creates a bitmask of all ones in less significant bits. + idx - index of the most significant bit in the created mask */ +#define BIT_VEC64_ONES_MASK(idx) \ + (((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1) +#define BIT_VEC64_ELEM_ONE_MASK ((u64)(~0)) + +/*******************************************************/ + +/* Number of u64 elements in SGE mask array */ +#define RX_SGE_MASK_LEN (NUM_RX_SGE / BIT_VEC64_ELEM_SZ) +#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) +#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) + +union host_hc_status_block { + /* pointer to fp status block e1x */ + struct host_hc_status_block_e1x *e1x_sb; + /* pointer to fp status block e2 */ + struct host_hc_status_block_e2 *e2_sb; +}; + +struct bnx2x_agg_info { + /* + * First aggregation buffer is a data buffer, the following - are pages. + * We will preallocate the data buffer for each aggregation when + * we open the interface and will replace the BD at the consumer + * with this one when we receive the TPA_START CQE in order to + * keep the Rx BD ring consistent. + */ + struct sw_rx_bd first_buf; + u8 tpa_state; +#define BNX2X_TPA_START 1 +#define BNX2X_TPA_STOP 2 +#define BNX2X_TPA_ERROR 3 + u8 placement_offset; + u16 parsing_flags; + u16 vlan_tag; + u16 len_on_bd; + u32 rxhash; + enum pkt_hash_types rxhash_type; + u16 gro_size; + u16 full_page; +}; + +#define Q_STATS_OFFSET32(stat_name) \ + (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) + +struct bnx2x_fp_txdata { + + struct sw_tx_bd *tx_buf_ring; + + union eth_tx_bd_types *tx_desc_ring; + dma_addr_t tx_desc_mapping; + + u32 cid; + + union db_prod tx_db; + + u16 tx_pkt_prod; + u16 tx_pkt_cons; + u16 tx_bd_prod; + u16 tx_bd_cons; + + unsigned long tx_pkt; + + __le16 *tx_cons_sb; + + int txq_index; + struct bnx2x_fastpath *parent_fp; + int tx_ring_size; +}; + +enum bnx2x_tpa_mode_t { + TPA_MODE_DISABLED, + TPA_MODE_LRO, + TPA_MODE_GRO +}; + +struct bnx2x_fastpath { + struct bnx2x *bp; /* parent */ + + struct napi_struct napi; + +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned long busy_poll_state; +#endif + + union host_hc_status_block status_blk; + /* chip independent shortcuts into sb structure */ + __le16 *sb_index_values; + __le16 *sb_running_index; + /* chip independent shortcut into rx_prods_offset memory */ + u32 ustorm_rx_prods_offset; + + u32 rx_buf_size; + u32 rx_frag_size; /* 0 if kmalloced(), or rx_buf_size + NET_SKB_PAD */ + dma_addr_t status_blk_mapping; + + enum bnx2x_tpa_mode_t mode; + + u8 max_cos; /* actual number of active tx coses */ + struct bnx2x_fp_txdata *txdata_ptr[BNX2X_MULTI_TX_COS]; + + struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */ + struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */ + + struct eth_rx_bd *rx_desc_ring; + dma_addr_t rx_desc_mapping; + + union eth_rx_cqe *rx_comp_ring; + dma_addr_t rx_comp_mapping; + + /* SGE ring */ + struct eth_rx_sge *rx_sge_ring; + dma_addr_t rx_sge_mapping; + + u64 sge_mask[RX_SGE_MASK_LEN]; + + u32 cid; + + __le16 fp_hc_idx; + + u8 index; /* number in fp array */ + u8 rx_queue; /* index for skb_record */ + u8 cl_id; /* eth client id */ + u8 cl_qzone_id; + u8 fw_sb_id; /* status block number in FW */ + u8 igu_sb_id; /* status block number in HW */ + + u16 rx_bd_prod; + u16 rx_bd_cons; + u16 rx_comp_prod; + u16 rx_comp_cons; + u16 rx_sge_prod; + /* The last maximal completed SGE */ + u16 last_max_sge; + __le16 *rx_cons_sb; + unsigned long rx_pkt, + rx_calls; + + /* TPA related */ + struct bnx2x_agg_info *tpa_info; +#ifdef BNX2X_STOP_ON_ERROR + u64 tpa_queue_used; +#endif + /* The size is calculated using the following: + sizeof name field from netdev structure + + 4 ('-Xx-' string) + + 4 (for the digits and to make it DWORD aligned) */ +#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) + char name[FP_NAME_SIZE]; +}; + +#define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var) +#define bnx2x_sp_obj(bp, fp) ((bp)->sp_objs[(fp)->index]) +#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index])) +#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) + +#ifdef CONFIG_NET_RX_BUSY_POLL + +enum bnx2x_fp_state { + BNX2X_STATE_FP_NAPI = BIT(0), /* NAPI handler owns the queue */ + + BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */ + BNX2X_STATE_FP_NAPI_REQ = BIT(1), + + BNX2X_STATE_FP_POLL_BIT = 2, + BNX2X_STATE_FP_POLL = BIT(2), /* busy_poll owns the queue */ + + BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */ +}; + +static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp) +{ + WRITE_ONCE(fp->busy_poll_state, 0); +} + +/* called from the device poll routine to get ownership of a FP */ +static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) +{ + unsigned long prev, old = READ_ONCE(fp->busy_poll_state); + + while (1) { + switch (old) { + case BNX2X_STATE_FP_POLL: + /* make sure bnx2x_fp_lock_poll() wont starve us */ + set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT, + &fp->busy_poll_state); + /* fallthrough */ + case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ: + return false; + default: + break; + } + prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI); + if (unlikely(prev != old)) { + old = prev; + continue; + } + return true; + } +} + +static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) +{ + smp_wmb(); + fp->busy_poll_state = 0; +} + +/* called from bnx2x_low_latency_poll() */ +static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) +{ + return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0; +} + +static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) +{ + smp_mb__before_atomic(); + clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state); +} + +/* true if a socket is polling */ +static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) +{ + return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL; +} + +/* false if fp is currently owned */ +static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) +{ + set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state); + return !bnx2x_fp_ll_polling(fp); + +} +#else +static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp) +{ +} + +static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) +{ + return true; +} + +static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) +{ +} + +static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) +{ + return false; +} + +static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) +{ +} + +static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) +{ + return false; +} +static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) +{ + return true; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +/* Use 2500 as a mini-jumbo MTU for FCoE */ +#define BNX2X_FCOE_MINI_JUMBO_MTU 2500 + +#define FCOE_IDX_OFFSET 0 + +#define FCOE_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) + \ + FCOE_IDX_OFFSET) +#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX(bp)]) +#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) +#define bnx2x_fcoe_inner_sp_obj(bp) (&bp->sp_objs[FCOE_IDX(bp)]) +#define bnx2x_fcoe_sp_obj(bp, var) (bnx2x_fcoe_inner_sp_obj(bp)->var) +#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \ + txdata_ptr[FIRST_TX_COS_INDEX] \ + ->var) + +#define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp)) +#define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->bp)) +#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp)) + +/* MC hsi */ +#define MAX_FETCH_BD 13 /* HW max BDs per packet */ +#define RX_COPY_THRESH 92 + +#define NUM_TX_RINGS 16 +#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) +#define NEXT_PAGE_TX_DESC_CNT 1 +#define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT) +#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) +#define MAX_TX_BD (NUM_TX_BD - 1) +#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) +#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ + (MAX_TX_DESC_CNT - 1)) ? \ + (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \ + (x) + 1) +#define TX_BD(x) ((x) & MAX_TX_BD) +#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) + +/* number of NEXT_PAGE descriptors may be required during placement */ +#define NEXT_CNT_PER_TX_PKT(bds) \ + (((bds) + MAX_TX_DESC_CNT - 1) / \ + MAX_TX_DESC_CNT * NEXT_PAGE_TX_DESC_CNT) +/* max BDs per tx packet w/o next_pages: + * START_BD - describes packed + * START_BD(splitted) - includes unpaged data segment for GSO + * PARSING_BD - for TSO and CSUM data + * PARSING_BD2 - for encapsulation data + * Frag BDs - describes pages for frags + */ +#define BDS_PER_TX_PKT 4 +#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT) +/* max BDs per tx packet including next pages */ +#define MAX_DESC_PER_TX_PKT (MAX_BDS_PER_TX_PKT + \ + NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT)) + +/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ +#define NUM_RX_RINGS 8 +#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) +#define NEXT_PAGE_RX_DESC_CNT 2 +#define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT) +#define RX_DESC_MASK (RX_DESC_CNT - 1) +#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) +#define MAX_RX_BD (NUM_RX_BD - 1) +#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) + +/* dropless fc calculations for BDs + * + * Number of BDs should as number of buffers in BRB: + * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT + * "next" elements on each page + */ +#define NUM_BD_REQ BRB_SIZE(bp) +#define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \ + MAX_RX_DESC_CNT) +#define BD_TH_LO(bp) (NUM_BD_REQ + \ + NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \ + FW_DROP_LEVEL(bp)) +#define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM) + +#define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128) + +#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ + ETH_MIN_RX_CQES_WITH_TPA_E1 : \ + ETH_MIN_RX_CQES_WITH_TPA_E1H_E2) +#define MIN_RX_SIZE_NONTPA_HW ETH_MIN_RX_CQES_WITHOUT_TPA +#define MIN_RX_SIZE_TPA (max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL)) +#define MIN_RX_SIZE_NONTPA (max_t(u32, MIN_RX_SIZE_NONTPA_HW,\ + MIN_RX_AVAIL)) + +#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ + (MAX_RX_DESC_CNT - 1)) ? \ + (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \ + (x) + 1) +#define RX_BD(x) ((x) & MAX_RX_BD) + +/* + * As long as CQE is X times bigger than BD entry we have to allocate X times + * more pages for CQ ring in order to keep it balanced with BD ring + */ +#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) +#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) +#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) +#define NEXT_PAGE_RCQ_DESC_CNT 1 +#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT) +#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) +#define MAX_RCQ_BD (NUM_RCQ_BD - 1) +#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) +#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ + (MAX_RCQ_DESC_CNT - 1)) ? \ + (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \ + (x) + 1) +#define RCQ_BD(x) ((x) & MAX_RCQ_BD) + +/* dropless fc calculations for RCQs + * + * Number of RCQs should be as number of buffers in BRB: + * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT + * "next" elements on each page + */ +#define NUM_RCQ_REQ BRB_SIZE(bp) +#define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \ + MAX_RCQ_DESC_CNT) +#define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \ + NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \ + FW_DROP_LEVEL(bp)) +#define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM) + +/* This is needed for determining of last_max */ +#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) +#define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b)) + +#define BNX2X_SWCID_SHIFT 17 +#define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1) + +/* used on a CID received from the HW */ +#define SW_CID(x) (le32_to_cpu(x) & BNX2X_SWCID_MASK) +#define CQE_CMD(x) (le32_to_cpu(x) >> \ + COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT) + +#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr_hi), \ + le32_to_cpu((bd)->addr_lo)) +#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) + +#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ +#define BNX2X_DB_SHIFT 3 /* 8 bytes*/ +#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) +#error "Min DB doorbell stride is 8" +#endif +#define DOORBELL(bp, cid, val) \ + do { \ + writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \ + } while (0) + +/* TX CSUM helpers */ +#define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \ + skb->csum_offset) +#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \ + skb->csum_offset)) + +#define pbd_tcp_flags(tcp_hdr) (ntohl(tcp_flag_word(tcp_hdr))>>16 & 0xff) + +#define XMIT_PLAIN 0 +#define XMIT_CSUM_V4 (1 << 0) +#define XMIT_CSUM_V6 (1 << 1) +#define XMIT_CSUM_TCP (1 << 2) +#define XMIT_GSO_V4 (1 << 3) +#define XMIT_GSO_V6 (1 << 4) +#define XMIT_CSUM_ENC_V4 (1 << 5) +#define XMIT_CSUM_ENC_V6 (1 << 6) +#define XMIT_GSO_ENC_V4 (1 << 7) +#define XMIT_GSO_ENC_V6 (1 << 8) + +#define XMIT_CSUM_ENC (XMIT_CSUM_ENC_V4 | XMIT_CSUM_ENC_V6) +#define XMIT_GSO_ENC (XMIT_GSO_ENC_V4 | XMIT_GSO_ENC_V6) + +#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6 | XMIT_CSUM_ENC) +#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6 | XMIT_GSO_ENC) + +/* stuff added to make the code fit 80Col */ +#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) +#define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG) +#define CQE_TYPE_STOP(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG) +#define CQE_TYPE_SLOW(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD) +#define CQE_TYPE_FAST(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH) + +#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG + +#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \ + (((le16_to_cpu(flags) & \ + PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \ + PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) \ + == PRS_FLAG_OVERETH_IPV4) +#define BNX2X_RX_SUM_FIX(cqe) \ + BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) + +#define FP_USB_FUNC_OFF \ + offsetof(struct cstorm_status_block_u, func) +#define FP_CSB_FUNC_OFF \ + offsetof(struct cstorm_status_block_c, func) + +#define HC_INDEX_ETH_RX_CQ_CONS 1 + +#define HC_INDEX_OOO_TX_CQ_CONS 4 + +#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 + +#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 + +#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 + +#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 + +#define BNX2X_RX_SB_INDEX \ + (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) + +#define BNX2X_TX_SB_INDEX_BASE BNX2X_TX_SB_INDEX_COS0 + +#define BNX2X_TX_SB_INDEX_COS0 \ + (&fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]) + +/* end of fast path */ + +/* common */ + +struct bnx2x_common { + + u32 chip_id; +/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ +#define CHIP_ID(bp) (bp->common.chip_id & 0xfffffff0) + +#define CHIP_NUM(bp) (bp->common.chip_id >> 16) +#define CHIP_NUM_57710 0x164e +#define CHIP_NUM_57711 0x164f +#define CHIP_NUM_57711E 0x1650 +#define CHIP_NUM_57712 0x1662 +#define CHIP_NUM_57712_MF 0x1663 +#define CHIP_NUM_57712_VF 0x166f +#define CHIP_NUM_57713 0x1651 +#define CHIP_NUM_57713E 0x1652 +#define CHIP_NUM_57800 0x168a +#define CHIP_NUM_57800_MF 0x16a5 +#define CHIP_NUM_57800_VF 0x16a9 +#define CHIP_NUM_57810 0x168e +#define CHIP_NUM_57810_MF 0x16ae +#define CHIP_NUM_57810_VF 0x16af +#define CHIP_NUM_57811 0x163d +#define CHIP_NUM_57811_MF 0x163e +#define CHIP_NUM_57811_VF 0x163f +#define CHIP_NUM_57840_OBSOLETE 0x168d +#define CHIP_NUM_57840_MF_OBSOLETE 0x16ab +#define CHIP_NUM_57840_4_10 0x16a1 +#define CHIP_NUM_57840_2_20 0x16a2 +#define CHIP_NUM_57840_MF 0x16a4 +#define CHIP_NUM_57840_VF 0x16ad +#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) +#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711) +#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E) +#define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712) +#define CHIP_IS_57712_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_VF) +#define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF) +#define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800) +#define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF) +#define CHIP_IS_57800_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_VF) +#define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810) +#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF) +#define CHIP_IS_57810_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_VF) +#define CHIP_IS_57811(bp) (CHIP_NUM(bp) == CHIP_NUM_57811) +#define CHIP_IS_57811_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_MF) +#define CHIP_IS_57811_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_VF) +#define CHIP_IS_57840(bp) \ + ((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || \ + (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || \ + (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE)) +#define CHIP_IS_57840_MF(bp) ((CHIP_NUM(bp) == CHIP_NUM_57840_MF) || \ + (CHIP_NUM(bp) == CHIP_NUM_57840_MF_OBSOLETE)) +#define CHIP_IS_57840_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_VF) +#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ + CHIP_IS_57711E(bp)) +#define CHIP_IS_57811xx(bp) (CHIP_IS_57811(bp) || \ + CHIP_IS_57811_MF(bp) || \ + CHIP_IS_57811_VF(bp)) +#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \ + CHIP_IS_57712_MF(bp) || \ + CHIP_IS_57712_VF(bp)) +#define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \ + CHIP_IS_57800_MF(bp) || \ + CHIP_IS_57800_VF(bp) || \ + CHIP_IS_57810(bp) || \ + CHIP_IS_57810_MF(bp) || \ + CHIP_IS_57810_VF(bp) || \ + CHIP_IS_57811xx(bp) || \ + CHIP_IS_57840(bp) || \ + CHIP_IS_57840_MF(bp) || \ + CHIP_IS_57840_VF(bp)) +#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp))) +#define USES_WARPCORE(bp) (CHIP_IS_E3(bp)) +#define IS_E1H_OFFSET (!CHIP_IS_E1(bp)) + +#define CHIP_REV_SHIFT 12 +#define CHIP_REV_MASK (0xF << CHIP_REV_SHIFT) +#define CHIP_REV_VAL(bp) (bp->common.chip_id & CHIP_REV_MASK) +#define CHIP_REV_Ax (0x0 << CHIP_REV_SHIFT) +#define CHIP_REV_Bx (0x1 << CHIP_REV_SHIFT) +/* assume maximum 5 revisions */ +#define CHIP_REV_IS_SLOW(bp) (CHIP_REV_VAL(bp) > 0x00005000) +/* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */ +#define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \ + !(CHIP_REV_VAL(bp) & 0x00001000)) +/* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */ +#define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \ + (CHIP_REV_VAL(bp) & 0x00001000)) + +#define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \ + ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1)) + +#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) +#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) +#define CHIP_REV_SIM(bp) (((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\ + (CHIP_REV_SHIFT + 1)) \ + << CHIP_REV_SHIFT) +#define CHIP_REV(bp) (CHIP_REV_IS_SLOW(bp) ? \ + CHIP_REV_SIM(bp) :\ + CHIP_REV_VAL(bp)) +#define CHIP_IS_E3B0(bp) (CHIP_IS_E3(bp) && \ + (CHIP_REV(bp) == CHIP_REV_Bx)) +#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ + (CHIP_REV(bp) == CHIP_REV_Ax)) +/* This define is used in two main places: + * 1. In the early stages of nic_load, to know if to configure Parser / Searcher + * to nic-only mode or to offload mode. Offload mode is configured if either the + * chip is E1x (where MIC_MODE register is not applicable), or if cnic already + * registered for this port (which means that the user wants storage services). + * 2. During cnic-related load, to know if offload mode is already configured in + * the HW or needs to be configured. + * Since the transition from nic-mode to offload-mode in HW causes traffic + * corruption, nic-mode is configured only in ports on which storage services + * where never requested. + */ +#define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp)) + + int flash_size; +#define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ +#define BNX2X_NVRAM_TIMEOUT_COUNT 30000 +#define BNX2X_NVRAM_PAGE_SIZE 256 + + u32 shmem_base; + u32 shmem2_base; + u32 mf_cfg_base; + u32 mf2_cfg_base; + + u32 hw_config; + + u32 bc_ver; + + u8 int_block; +#define INT_BLOCK_HC 0 +#define INT_BLOCK_IGU 1 +#define INT_BLOCK_MODE_NORMAL 0 +#define INT_BLOCK_MODE_BW_COMP 2 +#define CHIP_INT_MODE_IS_NBC(bp) \ + (!CHIP_IS_E1x(bp) && \ + !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP)) +#define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp)) + + u8 chip_port_mode; +#define CHIP_4_PORT_MODE 0x0 +#define CHIP_2_PORT_MODE 0x1 +#define CHIP_PORT_MODE_NONE 0x2 +#define CHIP_MODE(bp) (bp->common.chip_port_mode) +#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE) + + u32 boot_mode; +}; + +/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */ +#define BNX2X_IGU_STAS_MSG_VF_CNT 64 +#define BNX2X_IGU_STAS_MSG_PF_CNT 4 + +#define MAX_IGU_ATTN_ACK_TO 100 +/* end of common */ + +/* port */ + +struct bnx2x_port { + u32 pmf; + + u32 link_config[LINK_CONFIG_SIZE]; + + u32 supported[LINK_CONFIG_SIZE]; + + u32 advertising[LINK_CONFIG_SIZE]; + + u32 phy_addr; + + /* used to synchronize phy accesses */ + struct mutex phy_mutex; + + u32 port_stx; + + struct nig_stats old_nig_stats; +}; + +/* end of port */ + +#define STATS_OFFSET32(stat_name) \ + (offsetof(struct bnx2x_eth_stats, stat_name) / 4) + +/* slow path */ +#define BNX2X_MAX_NUM_OF_VFS 64 +#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */ +#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND) + +/* We need to reserve doorbell addresses for all VF and queue combinations */ +#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF) + +/* The doorbell is configured to have the same number of CIDs for PFs and for + * VFs. For this reason the PF CID zone is as large as the VF zone. + */ +#define BNX2X_FIRST_VF_CID BNX2X_VF_CIDS +#define BNX2X_MAX_NUM_VF_QUEUES 64 +#define BNX2X_VF_ID_INVALID 0xFF + +/* the number of VF CIDS multiplied by the amount of bytes reserved for each + * cid must not exceed the size of the VF doorbell + */ +#define BNX2X_VF_BAR_SIZE 512 +#if (BNX2X_VF_BAR_SIZE < BNX2X_CIDS_PER_VF * (1 << BNX2X_DB_SHIFT)) +#error "VF doorbell bar size is 512" +#endif + +/* + * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is + * control by the number of fast-path status blocks supported by the + * device (HW/FW). Each fast-path status block (FP-SB) aka non-default + * status block represents an independent interrupts context that can + * serve a regular L2 networking queue. However special L2 queues such + * as the FCoE queue do not require a FP-SB and other components like + * the CNIC may consume FP-SB reducing the number of possible L2 queues + * + * If the maximum number of FP-SB available is X then: + * a. If CNIC is supported it consumes 1 FP-SB thus the max number of + * regular L2 queues is Y=X-1 + * b. In MF mode the actual number of L2 queues is Y= (X-1/MF_factor) + * c. If the FCoE L2 queue is supported the actual number of L2 queues + * is Y+1 + * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for + * slow-path interrupts) or Y+2 if CNIC is supported (one additional + * FP interrupt context for the CNIC). + * e. The number of HW context (CID count) is always X or X+1 if FCoE + * L2 queue is supported. The cid for the FCoE L2 queue is always X. + */ + +/* fast-path interrupt contexts E1x */ +#define FP_SB_MAX_E1x 16 +/* fast-path interrupt contexts E2 */ +#define FP_SB_MAX_E2 HC_SB_MAX_SB_E2 + +union cdu_context { + struct eth_context eth; + char pad[1024]; +}; + +/* CDU host DB constants */ +#define CDU_ILT_PAGE_SZ_HW 2 +#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */ +#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context)) + +#define CNIC_ISCSI_CID_MAX 256 +#define CNIC_FCOE_CID_MAX 2048 +#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX) +#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) + +#define QM_ILT_PAGE_SZ_HW 0 +#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */ +#define QM_CID_ROUND 1024 + +/* TM (timers) host DB constants */ +#define TM_ILT_PAGE_SZ_HW 0 +#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ +#define TM_CONN_NUM (BNX2X_FIRST_VF_CID + \ + BNX2X_VF_CIDS + \ + CNIC_ISCSI_CID_MAX) +#define TM_ILT_SZ (8 * TM_CONN_NUM) +#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) + +/* SRC (Searcher) host DB constants */ +#define SRC_ILT_PAGE_SZ_HW 0 +#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */ +#define SRC_HASH_BITS 10 +#define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */ +#define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM) +#define SRC_T2_SZ SRC_ILT_SZ +#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ) + +#define MAX_DMAE_C 8 + +/* DMA memory not used in fastpath */ +struct bnx2x_slowpath { + union { + struct mac_configuration_cmd e1x; + struct eth_classify_rules_ramrod_data e2; + } mac_rdata; + + union { + struct tstorm_eth_mac_filter_config e1x; + struct eth_filter_rules_ramrod_data e2; + } rx_mode_rdata; + + union { + struct mac_configuration_cmd e1; + struct eth_multicast_rules_ramrod_data e2; + } mcast_rdata; + + struct eth_rss_update_ramrod_data rss_rdata; + + /* Queue State related ramrods are always sent under rtnl_lock */ + union { + struct client_init_ramrod_data init_data; + struct client_update_ramrod_data update_data; + struct tpa_update_ramrod_data tpa_data; + } q_rdata; + + union { + struct function_start_data func_start; + /* pfc configuration for DCBX ramrod */ + struct flow_control_configuration pfc_config; + } func_rdata; + + /* afex ramrod can not be a part of func_rdata union because these + * events might arrive in parallel to other events from func_rdata. + * Therefore, if they would have been defined in the same union, + * data can get corrupted. + */ + union { + struct afex_vif_list_ramrod_data viflist_data; + struct function_update_data func_update; + } func_afex_rdata; + + /* used by dmae command executer */ + struct dmae_command dmae[MAX_DMAE_C]; + + u32 stats_comp; + union mac_stats mac_stats; + struct nig_stats nig_stats; + struct host_port_stats port_stats; + struct host_func_stats func_stats; + + u32 wb_comp; + u32 wb_data[4]; + + union drv_info_to_mcp drv_info_to_mcp; +}; + +#define bnx2x_sp(bp, var) (&bp->slowpath->var) +#define bnx2x_sp_mapping(bp, var) \ + (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var)) + +/* attn group wiring */ +#define MAX_DYNAMIC_ATTN_GRPS 8 + +struct attn_route { + u32 sig[5]; +}; + +struct iro { + u32 base; + u16 m1; + u16 m2; + u16 m3; + u16 size; +}; + +struct hw_context { + union cdu_context *vcxt; + dma_addr_t cxt_mapping; + size_t size; +}; + +/* forward */ +struct bnx2x_ilt; + +struct bnx2x_vfdb; + +enum bnx2x_recovery_state { + BNX2X_RECOVERY_DONE, + BNX2X_RECOVERY_INIT, + BNX2X_RECOVERY_WAIT, + BNX2X_RECOVERY_FAILED, + BNX2X_RECOVERY_NIC_LOADING +}; + +/* + * Event queue (EQ or event ring) MC hsi + * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2 + */ +#define NUM_EQ_PAGES 1 +#define EQ_DESC_CNT_PAGE (BCM_PAGE_SIZE / sizeof(union event_ring_elem)) +#define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1) +#define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES) +#define EQ_DESC_MASK (NUM_EQ_DESC - 1) +#define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2) + +/* depends on EQ_DESC_CNT_PAGE being a power of 2 */ +#define NEXT_EQ_IDX(x) ((((x) & EQ_DESC_MAX_PAGE) == \ + (EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1) + +/* depends on the above and on NUM_EQ_PAGES being a power of 2 */ +#define EQ_DESC(x) ((x) & EQ_DESC_MASK) + +#define BNX2X_EQ_INDEX \ + (&bp->def_status_blk->sp_sb.\ + index_values[HC_SP_INDEX_EQ_CONS]) + +/* This is a data that will be used to create a link report message. + * We will keep the data used for the last link report in order + * to prevent reporting the same link parameters twice. + */ +struct bnx2x_link_report_data { + u16 line_speed; /* Effective line speed */ + unsigned long link_report_flags;/* BNX2X_LINK_REPORT_XXX flags */ +}; + +enum { + BNX2X_LINK_REPORT_FD, /* Full DUPLEX */ + BNX2X_LINK_REPORT_LINK_DOWN, + BNX2X_LINK_REPORT_RX_FC_ON, + BNX2X_LINK_REPORT_TX_FC_ON, +}; + +enum { + BNX2X_PORT_QUERY_IDX, + BNX2X_PF_QUERY_IDX, + BNX2X_FCOE_QUERY_IDX, + BNX2X_FIRST_QUEUE_QUERY_IDX, +}; + +struct bnx2x_fw_stats_req { + struct stats_query_header hdr; + struct stats_query_entry query[FP_SB_MAX_E1x+ + BNX2X_FIRST_QUEUE_QUERY_IDX]; +}; + +struct bnx2x_fw_stats_data { + struct stats_counter storm_counters; + struct per_port_stats port; + struct per_pf_stats pf; + struct fcoe_statistics_params fcoe; + struct per_queue_stats queue_stats[1]; +}; + +/* Public slow path states */ +enum sp_rtnl_flag { + BNX2X_SP_RTNL_SETUP_TC, + BNX2X_SP_RTNL_TX_TIMEOUT, + BNX2X_SP_RTNL_FAN_FAILURE, + BNX2X_SP_RTNL_AFEX_F_UPDATE, + BNX2X_SP_RTNL_ENABLE_SRIOV, + BNX2X_SP_RTNL_VFPF_MCAST, + BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, + BNX2X_SP_RTNL_RX_MODE, + BNX2X_SP_RTNL_HYPERVISOR_VLAN, + BNX2X_SP_RTNL_TX_STOP, + BNX2X_SP_RTNL_GET_DRV_VERSION, +}; + +enum bnx2x_iov_flag { + BNX2X_IOV_HANDLE_VF_MSG, + BNX2X_IOV_HANDLE_FLR, +}; + +struct bnx2x_prev_path_list { + struct list_head list; + u8 bus; + u8 slot; + u8 path; + u8 aer; + u8 undi; +}; + +struct bnx2x_sp_objs { + /* MACs object */ + struct bnx2x_vlan_mac_obj mac_obj; + + /* Queue State object */ + struct bnx2x_queue_sp_obj q_obj; +}; + +struct bnx2x_fp_stats { + struct tstorm_per_queue_stats old_tclient; + struct ustorm_per_queue_stats old_uclient; + struct xstorm_per_queue_stats old_xclient; + struct bnx2x_eth_q_stats eth_q_stats; + struct bnx2x_eth_q_stats_old eth_q_stats_old; +}; + +enum { + SUB_MF_MODE_UNKNOWN = 0, + SUB_MF_MODE_UFP, + SUB_MF_MODE_NPAR1_DOT_5, +}; + +struct bnx2x { + /* Fields used in the tx and intr/napi performance paths + * are grouped together in the beginning of the structure + */ + struct bnx2x_fastpath *fp; + struct bnx2x_sp_objs *sp_objs; + struct bnx2x_fp_stats *fp_stats; + struct bnx2x_fp_txdata *bnx2x_txq; + void __iomem *regview; + void __iomem *doorbells; + u16 db_size; + + u8 pf_num; /* absolute PF number */ + u8 pfid; /* per-path PF number */ + int base_fw_ndsb; /**/ +#define BP_PATH(bp) (CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1)) +#define BP_PORT(bp) (bp->pfid & 1) +#define BP_FUNC(bp) (bp->pfid) +#define BP_ABS_FUNC(bp) (bp->pf_num) +#define BP_VN(bp) ((bp)->pfid >> 1) +#define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4) +#define BP_L_ID(bp) (BP_VN(bp) << 2) +#define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\ + (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) +#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp)) + +#ifdef CONFIG_BNX2X_SRIOV + /* protects vf2pf mailbox from simultaneous access */ + struct mutex vf2pf_mutex; + /* vf pf channel mailbox contains request and response buffers */ + struct bnx2x_vf_mbx_msg *vf2pf_mbox; + dma_addr_t vf2pf_mbox_mapping; + + /* we set aside a copy of the acquire response */ + struct pfvf_acquire_resp_tlv acquire_resp; + + /* bulletin board for messages from pf to vf */ + union pf_vf_bulletin *pf2vf_bulletin; + dma_addr_t pf2vf_bulletin_mapping; + + union pf_vf_bulletin shadow_bulletin; + struct pf_vf_bulletin_content old_bulletin; + + u16 requested_nr_virtfn; +#endif /* CONFIG_BNX2X_SRIOV */ + + struct net_device *dev; + struct pci_dev *pdev; + + const struct iro *iro_arr; +#define IRO (bp->iro_arr) + + enum bnx2x_recovery_state recovery_state; + int is_leader; + struct msix_entry *msix_table; + + int tx_ring_size; + +/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ +#define ETH_OVREHEAD (ETH_HLEN + 8 + 8) +#define ETH_MIN_PACKET_SIZE 60 +#define ETH_MAX_PACKET_SIZE 1500 +#define ETH_MAX_JUMBO_PACKET_SIZE 9600 +/* TCP with Timestamp Option (32) + IPv6 (40) */ +#define ETH_MAX_TPA_HEADER_SIZE 72 + + /* Max supported alignment is 256 (8 shift) + * minimal alignment shift 6 is optimal for 57xxx HW performance + */ +#define BNX2X_RX_ALIGN_SHIFT max(6, min(8, L1_CACHE_SHIFT)) + + /* FW uses 2 Cache lines Alignment for start packet and size + * + * We assume skb_build() uses sizeof(struct skb_shared_info) bytes + * at the end of skb->data, to avoid wasting a full cache line. + * This reduces memory use (skb->truesize). + */ +#define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT) + +#define BNX2X_FW_RX_ALIGN_END \ + max_t(u64, 1UL << BNX2X_RX_ALIGN_SHIFT, \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + +#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) + + struct host_sp_status_block *def_status_blk; +#define DEF_SB_IGU_ID 16 +#define DEF_SB_ID HC_SP_SB_ID + __le16 def_idx; + __le16 def_att_idx; + u32 attn_state; + struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; + + /* slow path ring */ + struct eth_spe *spq; + dma_addr_t spq_mapping; + u16 spq_prod_idx; + struct eth_spe *spq_prod_bd; + struct eth_spe *spq_last_bd; + __le16 *dsb_sp_prod; + atomic_t cq_spq_left; /* ETH_XXX ramrods credit */ + /* used to synchronize spq accesses */ + spinlock_t spq_lock; + + /* event queue */ + union event_ring_elem *eq_ring; + dma_addr_t eq_mapping; + u16 eq_prod; + u16 eq_cons; + __le16 *eq_cons_sb; + atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */ + + /* Counter for marking that there is a STAT_QUERY ramrod pending */ + u16 stats_pending; + /* Counter for completed statistics ramrods */ + u16 stats_comp; + + /* End of fields used in the performance code paths */ + + int panic; + int msg_enable; + + u32 flags; +#define PCIX_FLAG (1 << 0) +#define PCI_32BIT_FLAG (1 << 1) +#define ONE_PORT_FLAG (1 << 2) +#define NO_WOL_FLAG (1 << 3) +#define USING_MSIX_FLAG (1 << 5) +#define USING_MSI_FLAG (1 << 6) +#define DISABLE_MSI_FLAG (1 << 7) +#define NO_MCP_FLAG (1 << 9) +#define MF_FUNC_DIS (1 << 11) +#define OWN_CNIC_IRQ (1 << 12) +#define NO_ISCSI_OOO_FLAG (1 << 13) +#define NO_ISCSI_FLAG (1 << 14) +#define NO_FCOE_FLAG (1 << 15) +#define BC_SUPPORTS_PFC_STATS (1 << 17) +#define TX_SWITCHING (1 << 18) +#define BC_SUPPORTS_FCOE_FEATURES (1 << 19) +#define USING_SINGLE_MSIX_FLAG (1 << 20) +#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) +#define IS_VF_FLAG (1 << 22) +#define BC_SUPPORTS_RMMOD_CMD (1 << 23) +#define HAS_PHYS_PORT_ID (1 << 24) +#define AER_ENABLED (1 << 25) +#define PTP_SUPPORTED (1 << 26) +#define TX_TIMESTAMPING_EN (1 << 27) + +#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) + +#ifdef CONFIG_BNX2X_SRIOV +#define IS_VF(bp) ((bp)->flags & IS_VF_FLAG) +#define IS_PF(bp) (!((bp)->flags & IS_VF_FLAG)) +#else +#define IS_VF(bp) false +#define IS_PF(bp) true +#endif + +#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) +#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) +#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) + + u8 cnic_support; + bool cnic_enabled; + bool cnic_loaded; + struct cnic_eth_dev *(*cnic_probe)(struct net_device *); + + /* Flag that indicates that we can start looking for FCoE L2 queue + * completions in the default status block. + */ + bool fcoe_init; + + int mrrs; + + struct delayed_work sp_task; + struct delayed_work iov_task; + + atomic_t interrupt_occurred; + struct delayed_work sp_rtnl_task; + + struct delayed_work period_task; + struct timer_list timer; + int current_interval; + + u16 fw_seq; + u16 fw_drv_pulse_wr_seq; + u32 func_stx; + + struct link_params link_params; + struct link_vars link_vars; + u32 link_cnt; + struct bnx2x_link_report_data last_reported_link; + + struct mdio_if_info mdio; + + struct bnx2x_common common; + struct bnx2x_port port; + + struct cmng_init cmng; + + u32 mf_config[E1HVN_MAX]; + u32 mf_ext_config; + u32 path_has_ovlan; /* E3 */ + u16 mf_ov; + u8 mf_mode; +#define IS_MF(bp) (bp->mf_mode != 0) +#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI) +#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD) +#define IS_MF_AFEX(bp) (bp->mf_mode == MULTI_FUNCTION_AFEX) + u8 mf_sub_mode; +#define IS_MF_UFP(bp) (IS_MF_SD(bp) && \ + bp->mf_sub_mode == SUB_MF_MODE_UFP) + + u8 wol; + + int rx_ring_size; + + u16 tx_quick_cons_trip_int; + u16 tx_quick_cons_trip; + u16 tx_ticks_int; + u16 tx_ticks; + + u16 rx_quick_cons_trip_int; + u16 rx_quick_cons_trip; + u16 rx_ticks_int; + u16 rx_ticks; +/* Maximal coalescing timeout in us */ +#define BNX2X_MAX_COALESCE_TOUT (0xff*BNX2X_BTR) + + u32 lin_cnt; + + u16 state; +#define BNX2X_STATE_CLOSED 0 +#define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000 +#define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000 +#define BNX2X_STATE_OPEN 0x3000 +#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 +#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 + +#define BNX2X_STATE_DIAG 0xe000 +#define BNX2X_STATE_ERROR 0xf000 + +#define BNX2X_MAX_PRIORITY 8 + int num_queues; + uint num_ethernet_queues; + uint num_cnic_queues; + int disable_tpa; + + u32 rx_mode; +#define BNX2X_RX_MODE_NONE 0 +#define BNX2X_RX_MODE_NORMAL 1 +#define BNX2X_RX_MODE_ALLMULTI 2 +#define BNX2X_RX_MODE_PROMISC 3 +#define BNX2X_MAX_MULTICAST 64 + + u8 igu_dsb_id; + u8 igu_base_sb; + u8 igu_sb_cnt; + u8 min_msix_vec_cnt; + + u32 igu_base_addr; + dma_addr_t def_status_blk_mapping; + + struct bnx2x_slowpath *slowpath; + dma_addr_t slowpath_mapping; + + /* Mechanism protecting the drv_info_to_mcp */ + struct mutex drv_info_mutex; + bool drv_info_mng_owner; + + /* Total number of FW statistics requests */ + u8 fw_stats_num; + + /* + * This is a memory buffer that will contain both statistics + * ramrod request and data. + */ + void *fw_stats; + dma_addr_t fw_stats_mapping; + + /* + * FW statistics request shortcut (points at the + * beginning of fw_stats buffer). + */ + struct bnx2x_fw_stats_req *fw_stats_req; + dma_addr_t fw_stats_req_mapping; + int fw_stats_req_sz; + + /* + * FW statistics data shortcut (points at the beginning of + * fw_stats buffer + fw_stats_req_sz). + */ + struct bnx2x_fw_stats_data *fw_stats_data; + dma_addr_t fw_stats_data_mapping; + int fw_stats_data_sz; + + /* For max 1024 cids (VF RSS), 32KB ILT page size and 1KB + * context size we need 8 ILT entries. + */ +#define ILT_MAX_L2_LINES 32 + struct hw_context context[ILT_MAX_L2_LINES]; + + struct bnx2x_ilt *ilt; +#define BP_ILT(bp) ((bp)->ilt) +#define ILT_MAX_LINES 256 +/* + * Maximum supported number of RSS queues: number of IGU SBs minus one that goes + * to CNIC. + */ +#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_SUPPORT(bp)) + +/* + * Maximum CID count that might be required by the bnx2x: + * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI + */ + +#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \ + + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp))) +#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \ + + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp))) +#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\ + ILT_PAGE_CIDS)) + + int qm_cid_count; + + bool dropless_fc; + + void *t2; + dma_addr_t t2_mapping; + struct cnic_ops __rcu *cnic_ops; + void *cnic_data; + u32 cnic_tag; + struct cnic_eth_dev cnic_eth_dev; + union host_hc_status_block cnic_sb; + dma_addr_t cnic_sb_mapping; + struct eth_spe *cnic_kwq; + struct eth_spe *cnic_kwq_prod; + struct eth_spe *cnic_kwq_cons; + struct eth_spe *cnic_kwq_last; + u16 cnic_kwq_pending; + u16 cnic_spq_pending; + u8 fip_mac[ETH_ALEN]; + struct mutex cnic_mutex; + struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj; + + /* Start index of the "special" (CNIC related) L2 clients */ + u8 cnic_base_cl_id; + + int dmae_ready; + /* used to synchronize dmae accesses */ + spinlock_t dmae_lock; + + /* used to protect the FW mail box */ + struct mutex fw_mb_mutex; + + /* used to synchronize stats collecting */ + int stats_state; + + /* used for synchronization of concurrent threads statistics handling */ + struct semaphore stats_lock; + + /* used by dmae command loader */ + struct dmae_command stats_dmae; + int executer_idx; + + u16 stats_counter; + struct bnx2x_eth_stats eth_stats; + struct host_func_stats func_stats; + struct bnx2x_eth_stats_old eth_stats_old; + struct bnx2x_net_stats_old net_stats_old; + struct bnx2x_fw_port_stats_old fw_stats_old; + bool stats_init; + + struct z_stream_s *strm; + void *gunzip_buf; + dma_addr_t gunzip_mapping; + int gunzip_outlen; +#define FW_BUF_SIZE 0x8000 +#define GUNZIP_BUF(bp) (bp->gunzip_buf) +#define GUNZIP_PHYS(bp) (bp->gunzip_mapping) +#define GUNZIP_OUTLEN(bp) (bp->gunzip_outlen) + + struct raw_op *init_ops; + /* Init blocks offsets inside init_ops */ + u16 *init_ops_offsets; + /* Data blob - has 32 bit granularity */ + u32 *init_data; + u32 init_mode_flags; +#define INIT_MODE_FLAGS(bp) (bp->init_mode_flags) + /* Zipped PRAM blobs - raw data */ + const u8 *tsem_int_table_data; + const u8 *tsem_pram_data; + const u8 *usem_int_table_data; + const u8 *usem_pram_data; + const u8 *xsem_int_table_data; + const u8 *xsem_pram_data; + const u8 *csem_int_table_data; + const u8 *csem_pram_data; +#define INIT_OPS(bp) (bp->init_ops) +#define INIT_OPS_OFFSETS(bp) (bp->init_ops_offsets) +#define INIT_DATA(bp) (bp->init_data) +#define INIT_TSEM_INT_TABLE_DATA(bp) (bp->tsem_int_table_data) +#define INIT_TSEM_PRAM_DATA(bp) (bp->tsem_pram_data) +#define INIT_USEM_INT_TABLE_DATA(bp) (bp->usem_int_table_data) +#define INIT_USEM_PRAM_DATA(bp) (bp->usem_pram_data) +#define INIT_XSEM_INT_TABLE_DATA(bp) (bp->xsem_int_table_data) +#define INIT_XSEM_PRAM_DATA(bp) (bp->xsem_pram_data) +#define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data) +#define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data) + +#define PHY_FW_VER_LEN 20 + char fw_ver[32]; + const struct firmware *firmware; + + struct bnx2x_vfdb *vfdb; +#define IS_SRIOV(bp) ((bp)->vfdb) + + /* DCB support on/off */ + u16 dcb_state; +#define BNX2X_DCB_STATE_OFF 0 +#define BNX2X_DCB_STATE_ON 1 + + /* DCBX engine mode */ + int dcbx_enabled; +#define BNX2X_DCBX_ENABLED_OFF 0 +#define BNX2X_DCBX_ENABLED_ON_NEG_OFF 1 +#define BNX2X_DCBX_ENABLED_ON_NEG_ON 2 +#define BNX2X_DCBX_ENABLED_INVALID (-1) + + bool dcbx_mode_uset; + + struct bnx2x_config_dcbx_params dcbx_config_params; + struct bnx2x_dcbx_port_params dcbx_port_params; + int dcb_version; + + /* CAM credit pools */ + + /* used only in sriov */ + struct bnx2x_credit_pool_obj vlans_pool; + + struct bnx2x_credit_pool_obj macs_pool; + + /* RX_MODE object */ + struct bnx2x_rx_mode_obj rx_mode_obj; + + /* MCAST object */ + struct bnx2x_mcast_obj mcast_obj; + + /* RSS configuration object */ + struct bnx2x_rss_config_obj rss_conf_obj; + + /* Function State controlling object */ + struct bnx2x_func_sp_obj func_obj; + + unsigned long sp_state; + + /* operation indication for the sp_rtnl task */ + unsigned long sp_rtnl_state; + + /* Indication of the IOV tasks */ + unsigned long iov_task_state; + + /* DCBX Negotiation results */ + struct dcbx_features dcbx_local_feat; + u32 dcbx_error; + +#ifdef BCM_DCBNL + struct dcbx_features dcbx_remote_feat; + u32 dcbx_remote_flags; +#endif + /* AFEX: store default vlan used */ + int afex_def_vlan_tag; + enum mf_cfg_afex_vlan_mode afex_vlan_mode; + u32 pending_max; + + /* multiple tx classes of service */ + u8 max_cos; + + /* priority to cos mapping */ + u8 prio_to_cos[8]; + + int fp_array_size; + u32 dump_preset_idx; + + u8 phys_port_id[ETH_ALEN]; + + /* PTP related context */ + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct work_struct ptp_task; + struct cyclecounter cyclecounter; + struct timecounter timecounter; + bool timecounter_init_done; + struct sk_buff *ptp_tx_skb; + unsigned long ptp_tx_start; + bool hwtstamp_ioctl_called; + u16 tx_type; + u16 rx_filter; + + struct bnx2x_link_report_data vf_link_vars; +}; + +/* Tx queues may be less or equal to Rx queues */ +extern int num_queues; +#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) +#define BNX2X_NUM_ETH_QUEUES(bp) ((bp)->num_ethernet_queues) +#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \ + (bp)->num_cnic_queues) +#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp) + +#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) + +#define BNX2X_MAX_QUEUES(bp) BNX2X_MAX_RSS_COUNT(bp) +/* #define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1) */ + +#define RSS_IPV4_CAP_MASK \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY + +#define RSS_IPV4_TCP_CAP_MASK \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY + +#define RSS_IPV6_CAP_MASK \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY + +#define RSS_IPV6_TCP_CAP_MASK \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY + +/* func init flags */ +#define FUNC_FLG_RSS 0x0001 +#define FUNC_FLG_STATS 0x0002 +/* removed FUNC_FLG_UNMATCHED 0x0004 */ +#define FUNC_FLG_TPA 0x0008 +#define FUNC_FLG_SPQ 0x0010 +#define FUNC_FLG_LEADING 0x0020 /* PF only */ +#define FUNC_FLG_LEADING_STATS 0x0040 +struct bnx2x_func_init_params { + /* dma */ + dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ + dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */ + + u16 func_flgs; + u16 func_id; /* abs fid */ + u16 pf_id; + u16 spq_prod; /* valid iff FUNC_FLG_SPQ */ +}; + +#define for_each_cnic_queue(bp, var) \ + for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \ + (var)++) \ + if (skip_queue(bp, var)) \ + continue; \ + else + +#define for_each_eth_queue(bp, var) \ + for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++) + +#define for_each_nondefault_eth_queue(bp, var) \ + for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++) + +#define for_each_queue(bp, var) \ + for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ + if (skip_queue(bp, var)) \ + continue; \ + else + +/* Skip forwarding FP */ +#define for_each_valid_rx_queue(bp, var) \ + for ((var) = 0; \ + (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) : \ + BNX2X_NUM_ETH_QUEUES(bp)); \ + (var)++) \ + if (skip_rx_queue(bp, var)) \ + continue; \ + else + +#define for_each_rx_queue_cnic(bp, var) \ + for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \ + (var)++) \ + if (skip_rx_queue(bp, var)) \ + continue; \ + else + +#define for_each_rx_queue(bp, var) \ + for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ + if (skip_rx_queue(bp, var)) \ + continue; \ + else + +/* Skip OOO FP */ +#define for_each_valid_tx_queue(bp, var) \ + for ((var) = 0; \ + (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) : \ + BNX2X_NUM_ETH_QUEUES(bp)); \ + (var)++) \ + if (skip_tx_queue(bp, var)) \ + continue; \ + else + +#define for_each_tx_queue_cnic(bp, var) \ + for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \ + (var)++) \ + if (skip_tx_queue(bp, var)) \ + continue; \ + else + +#define for_each_tx_queue(bp, var) \ + for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ + if (skip_tx_queue(bp, var)) \ + continue; \ + else + +#define for_each_nondefault_queue(bp, var) \ + for ((var) = 1; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ + if (skip_queue(bp, var)) \ + continue; \ + else + +#define for_each_cos_in_tx_queue(fp, var) \ + for ((var) = 0; (var) < (fp)->max_cos; (var)++) + +/* skip rx queue + * if FCOE l2 support is disabled and this is the fcoe L2 queue + */ +#define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) + +/* skip tx queue + * if FCOE l2 support is disabled and this is the fcoe L2 queue + */ +#define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) + +#define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) + +/** + * bnx2x_set_mac_one - configure a single MAC address + * + * @bp: driver handle + * @mac: MAC to configure + * @obj: MAC object handle + * @set: if 'true' add a new MAC, otherwise - delete + * @mac_type: the type of the MAC to configure (e.g. ETH, UC list) + * @ramrod_flags: RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT) + * + * Configures one MAC according to provided parameters or continues the + * execution of previously scheduled commands if RAMROD_CONT is set in + * ramrod_flags. + * + * Returns zero if operation has successfully completed, a positive value if the + * operation has been successfully scheduled and a negative - if a requested + * operations has failed. + */ +int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, + struct bnx2x_vlan_mac_obj *obj, bool set, + int mac_type, unsigned long *ramrod_flags); +/** + * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object + * + * @bp: driver handle + * @mac_obj: MAC object handle + * @mac_type: type of the MACs to clear (BNX2X_XXX_MAC) + * @wait_for_comp: if 'true' block until completion + * + * Deletes all MACs of the specific type (e.g. ETH, UC list). + * + * Returns zero if operation has successfully completed, a positive value if the + * operation has been successfully scheduled and a negative - if a requested + * operations has failed. + */ +int bnx2x_del_all_macs(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *mac_obj, + int mac_type, bool wait_for_comp); + +/* Init Function API */ +void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p); +void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, + u8 vf_valid, int fw_sb_id, int igu_sb_id); +int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); +int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); +int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode); +int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); +void bnx2x_read_mf_cfg(struct bnx2x *bp); + +int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val); + +/* dmae */ +void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); +void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, + u32 len32); +void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx); +u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type); +u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode); +u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, + bool with_comp, u8 comp_type); + +void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, + u8 src_type, u8 dst_type); +int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, + u32 *comp); + +/* FLR related routines */ +u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp); +void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count); +int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt); +u8 bnx2x_is_pcie_pending(struct pci_dev *dev); +int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, + char *msg, u32 poll_cnt); + +void bnx2x_calc_fc_adv(struct bnx2x *bp); +int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, + u32 data_hi, u32 data_lo, int cmd_type); +void bnx2x_update_coalesce(struct bnx2x *bp); +int bnx2x_get_cur_phy_idx(struct bnx2x *bp); + +bool bnx2x_port_after_undi(struct bnx2x *bp); + +static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, + int wait) +{ + u32 val; + + do { + val = REG_RD(bp, reg); + if (val == expected) + break; + ms -= wait; + msleep(wait); + + } while (ms > 0); + + return val; +} + +void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, + bool is_pf); + +#define BNX2X_ILT_ZALLOC(x, y, size) \ + x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL) + +#define BNX2X_ILT_FREE(x, y, size) \ + do { \ + if (x) { \ + dma_free_coherent(&bp->pdev->dev, size, x, y); \ + x = NULL; \ + y = 0; \ + } \ + } while (0) + +#define ILOG2(x) (ilog2((x))) + +#define ILT_NUM_PAGE_ENTRIES (3072) +/* In 57710/11 we use whole table since we have 8 func + * In 57712 we have only 4 func, but use same size per func, then only half of + * the table in use + */ +#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8) + +#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC) +/* + * the phys address is shifted right 12 bits and has an added + * 1=valid bit added to the 53rd bit + * then since this is a wide register(TM) + * we split it into two 32 bit writes + */ +#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF)) +#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) + +/* load/unload mode */ +#define LOAD_NORMAL 0 +#define LOAD_OPEN 1 +#define LOAD_DIAG 2 +#define LOAD_LOOPBACK_EXT 3 +#define UNLOAD_NORMAL 0 +#define UNLOAD_CLOSE 1 +#define UNLOAD_RECOVERY 2 + +/* DMAE command defines */ +#define DMAE_TIMEOUT -1 +#define DMAE_PCI_ERROR -2 /* E2 and onward */ +#define DMAE_NOT_RDY -3 +#define DMAE_PCI_ERR_FLAG 0x80000000 + +#define DMAE_SRC_PCI 0 +#define DMAE_SRC_GRC 1 + +#define DMAE_DST_NONE 0 +#define DMAE_DST_PCI 1 +#define DMAE_DST_GRC 2 + +#define DMAE_COMP_PCI 0 +#define DMAE_COMP_GRC 1 + +/* E2 and onward - PCI error handling in the completion */ + +#define DMAE_COMP_REGULAR 0 +#define DMAE_COM_SET_ERR 1 + +#define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << \ + DMAE_COMMAND_SRC_SHIFT) +#define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << \ + DMAE_COMMAND_SRC_SHIFT) + +#define DMAE_CMD_DST_PCI (DMAE_DST_PCI << \ + DMAE_COMMAND_DST_SHIFT) +#define DMAE_CMD_DST_GRC (DMAE_DST_GRC << \ + DMAE_COMMAND_DST_SHIFT) + +#define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << \ + DMAE_COMMAND_C_DST_SHIFT) +#define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << \ + DMAE_COMMAND_C_DST_SHIFT) + +#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE + +#define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT) +#define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT) +#define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT) +#define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT) + +#define DMAE_CMD_PORT_0 0 +#define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT + +#define DMAE_CMD_SRC_RESET DMAE_COMMAND_SRC_RESET +#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET +#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT + +#define DMAE_SRC_PF 0 +#define DMAE_SRC_VF 1 + +#define DMAE_DST_PF 0 +#define DMAE_DST_VF 1 + +#define DMAE_C_SRC 0 +#define DMAE_C_DST 1 + +#define DMAE_LEN32_RD_MAX 0x80 +#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000) + +#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit + * indicates error + */ + +#define MAX_DMAE_C_PER_PORT 8 +#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ + BP_VN(bp)) +#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ + E1HVN_MAX) + +/* PCIE link and speed */ +#define PCICFG_LINK_WIDTH 0x1f00000 +#define PCICFG_LINK_WIDTH_SHIFT 20 +#define PCICFG_LINK_SPEED 0xf0000 +#define PCICFG_LINK_SPEED_SHIFT 16 + +#define BNX2X_NUM_TESTS_SF 7 +#define BNX2X_NUM_TESTS_MF 3 +#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \ + IS_VF(bp) ? 0 : BNX2X_NUM_TESTS_SF) + +#define BNX2X_PHY_LOOPBACK 0 +#define BNX2X_MAC_LOOPBACK 1 +#define BNX2X_EXT_LOOPBACK 2 +#define BNX2X_PHY_LOOPBACK_FAILED 1 +#define BNX2X_MAC_LOOPBACK_FAILED 2 +#define BNX2X_EXT_LOOPBACK_FAILED 3 +#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \ + BNX2X_PHY_LOOPBACK_FAILED) + +#define STROM_ASSERT_ARRAY_SIZE 50 + +/* must be used on a CID before placing it on a HW ring */ +#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ + (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \ + (x)) + +#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) +#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) + +#define BNX2X_BTR 4 +#define MAX_SPQ_PENDING 8 + +/* CMNG constants, as derived from system spec calculations */ +/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */ +#define DEF_MIN_RATE 100 +/* resolution of the rate shaping timer - 400 usec */ +#define RS_PERIODIC_TIMEOUT_USEC 400 +/* number of bytes in single QM arbitration cycle - + * coefficient for calculating the fairness timer */ +#define QM_ARB_BYTES 160000 +/* resolution of Min algorithm 1:100 */ +#define MIN_RES 100 +/* how many bytes above threshold for the minimal credit of Min algorithm*/ +#define MIN_ABOVE_THRESH 32768 +/* Fairness algorithm integration time coefficient - + * for calculating the actual Tfair */ +#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) +/* Memory of fairness algorithm . 2 cycles */ +#define FAIR_MEM 2 + +#define ATTN_NIG_FOR_FUNC (1L << 8) +#define ATTN_SW_TIMER_4_FUNC (1L << 9) +#define GPIO_2_FUNC (1L << 10) +#define GPIO_3_FUNC (1L << 11) +#define GPIO_4_FUNC (1L << 12) +#define ATTN_GENERAL_ATTN_1 (1L << 13) +#define ATTN_GENERAL_ATTN_2 (1L << 14) +#define ATTN_GENERAL_ATTN_3 (1L << 15) +#define ATTN_GENERAL_ATTN_4 (1L << 13) +#define ATTN_GENERAL_ATTN_5 (1L << 14) +#define ATTN_GENERAL_ATTN_6 (1L << 15) + +#define ATTN_HARD_WIRED_MASK 0xff00 +#define ATTENTION_ID 4 + +#define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_PERSONALITY_ONLY(bp) || \ + IS_MF_FCOE_AFEX(bp)) + +/* stuff added to make the code fit 80Col */ + +#define BNX2X_PMF_LINK_ASSERT \ + GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp)) + +#define BNX2X_MC_ASSERT_BITS \ + (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT)) + +#define BNX2X_MCP_ASSERT \ + GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT) + +#define BNX2X_GRC_TIMEOUT GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC) +#define BNX2X_GRC_RSV (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC)) + +#define HW_INTERRUT_ASSERT_SET_0 \ + (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT) +#define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR) +#define HW_INTERRUT_ASSERT_SET_1 \ + (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT) +#define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR) +#define HW_INTERRUT_ASSERT_SET_2 \ + (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT) +#define HW_PRTY_ASSERT_SET_2 (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) + +#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) + +#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR) + +#define MULTI_MASK 0x7f + +#define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func) +#define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func) +#define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func) +#define DEF_TSB_FUNC_OFF offsetof(struct tstorm_def_status_block, func) + +#define DEF_USB_IGU_INDEX_OFF \ + offsetof(struct cstorm_def_status_block_u, igu_index) +#define DEF_CSB_IGU_INDEX_OFF \ + offsetof(struct cstorm_def_status_block_c, igu_index) +#define DEF_XSB_IGU_INDEX_OFF \ + offsetof(struct xstorm_def_status_block, igu_index) +#define DEF_TSB_IGU_INDEX_OFF \ + offsetof(struct tstorm_def_status_block, igu_index) + +#define DEF_USB_SEGMENT_OFF \ + offsetof(struct cstorm_def_status_block_u, segment) +#define DEF_CSB_SEGMENT_OFF \ + offsetof(struct cstorm_def_status_block_c, segment) +#define DEF_XSB_SEGMENT_OFF \ + offsetof(struct xstorm_def_status_block, segment) +#define DEF_TSB_SEGMENT_OFF \ + offsetof(struct tstorm_def_status_block, segment) + +#define BNX2X_SP_DSB_INDEX \ + (&bp->def_status_blk->sp_sb.\ + index_values[HC_SP_INDEX_ETH_DEF_CONS]) + +#define CAM_IS_INVALID(x) \ + (GET_FLAG(x.flags, \ + MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \ + (T_ETH_MAC_COMMAND_INVALIDATE)) + +/* Number of u32 elements in MC hash array */ +#define MC_HASH_SIZE 8 +#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \ + TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4) + +#ifndef PXP2_REG_PXP2_INT_STS +#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0 +#endif + +#ifndef ETH_MAX_RX_CLIENTS_E2 +#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H +#endif + +#define BNX2X_VPD_LEN 128 +#define VENDOR_ID_LEN 4 + +#define VF_ACQUIRE_THRESH 3 +#define VF_ACQUIRE_MAC_FILTERS 1 +#define VF_ACQUIRE_MC_FILTERS 10 + +#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \ + (!((me_reg) & ME_REG_VF_ERR))) +int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err); + +/* Congestion management fairness mode */ +#define CMNG_FNS_NONE 0 +#define CMNG_FNS_MINMAX 1 + +#define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/ +#define HC_SEG_ACCESS_ATTN 4 +#define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/ + +static const u32 dmae_reg_go_c[] = { + DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, + DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, + DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, + DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 +}; + +void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev); +void bnx2x_notify_link_changed(struct bnx2x *bp); + +#define BNX2X_MF_SD_PROTOCOL(bp) \ + ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK) + +#define BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) \ + (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI) + +#define BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) \ + (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_FCOE) + +#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) +#define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) +#define IS_MF_ISCSI_SI(bp) (IS_MF_SI(bp) && BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp)) + +#define IS_MF_ISCSI_ONLY(bp) (IS_MF_ISCSI_SD(bp) || IS_MF_ISCSI_SI(bp)) + +#define BNX2X_MF_EXT_PROTOCOL_MASK \ + (MACP_FUNC_CFG_FLAGS_ETHERNET | \ + MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD | \ + MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) + +#define BNX2X_MF_EXT_PROT(bp) ((bp)->mf_ext_config & \ + BNX2X_MF_EXT_PROTOCOL_MASK) + +#define BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp) \ + (BNX2X_MF_EXT_PROT(bp) & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) + +#define BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp) \ + (BNX2X_MF_EXT_PROT(bp) == MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) + +#define BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp) \ + (BNX2X_MF_EXT_PROT(bp) == MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) + +#define IS_MF_FCOE_AFEX(bp) \ + (IS_MF_AFEX(bp) && BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp)) + +#define IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) \ + (IS_MF_SD(bp) && \ + (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \ + BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) + +#define IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp) \ + (IS_MF_SI(bp) && \ + (BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp) || \ + BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp))) + +#define IS_MF_STORAGE_PERSONALITY_ONLY(bp) \ + (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) || \ + IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp)) + + +#define SET_FLAG(value, mask, flag) \ + do {\ + (value) &= ~(mask);\ + (value) |= ((flag) << (mask##_SHIFT));\ + } while (0) + +#define GET_FLAG(value, mask) \ + (((value) & (mask)) >> (mask##_SHIFT)) + +#define GET_FIELD(value, fname) \ + (((value) & (fname##_MASK)) >> (fname##_SHIFT)) + +enum { + SWITCH_UPDATE, + AFEX_UPDATE, +}; + +#define NUM_MACS 8 + +void bnx2x_set_local_cmng(struct bnx2x *bp); + +void bnx2x_update_mng_version(struct bnx2x *bp); + +#define MCPR_SCRATCH_BASE(bp) \ + (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) + +#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX)) + +void bnx2x_init_ptp(struct bnx2x *bp); +int bnx2x_configure_ptp_filters(struct bnx2x *bp); +void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb); + +#define BNX2X_MAX_PHC_DRIFT 31000000 +#define BNX2X_PTP_TX_TIMEOUT + +#endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c new file mode 100644 index 000000000..c31f25cde --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -0,0 +1,5038 @@ +/* bnx2x_cmn.c: Broadcom Everest network driver. + * + * Copyright (c) 2007-2013 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Ariel Elior + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "bnx2x_cmn.h" +#include "bnx2x_init.h" +#include "bnx2x_sp.h" + +static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp); +static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp); +static int bnx2x_alloc_fp_mem(struct bnx2x *bp); +static int bnx2x_poll(struct napi_struct *napi, int budget); + +static void bnx2x_add_all_napi_cnic(struct bnx2x *bp) +{ + int i; + + /* Add NAPI objects */ + for_each_rx_queue_cnic(bp, i) { + netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), + bnx2x_poll, NAPI_POLL_WEIGHT); + napi_hash_add(&bnx2x_fp(bp, i, napi)); + } +} + +static void bnx2x_add_all_napi(struct bnx2x *bp) +{ + int i; + + /* Add NAPI objects */ + for_each_eth_queue(bp, i) { + netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), + bnx2x_poll, NAPI_POLL_WEIGHT); + napi_hash_add(&bnx2x_fp(bp, i, napi)); + } +} + +static int bnx2x_calc_num_queues(struct bnx2x *bp) +{ + int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues(); + + /* Reduce memory usage in kdump environment by using only one queue */ + if (is_kdump_kernel()) + nq = 1; + + nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp)); + return nq; +} + +/** + * bnx2x_move_fp - move content of the fastpath structure. + * + * @bp: driver handle + * @from: source FP index + * @to: destination FP index + * + * Makes sure the contents of the bp->fp[to].napi is kept + * intact. This is done by first copying the napi struct from + * the target to the source, and then mem copying the entire + * source onto the target. Update txdata pointers and related + * content. + */ +static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) +{ + struct bnx2x_fastpath *from_fp = &bp->fp[from]; + struct bnx2x_fastpath *to_fp = &bp->fp[to]; + struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from]; + struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to]; + struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from]; + struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; + int old_max_eth_txqs, new_max_eth_txqs; + int old_txdata_index = 0, new_txdata_index = 0; + struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info; + + /* Copy the NAPI object as it has been already initialized */ + from_fp->napi = to_fp->napi; + + /* Move bnx2x_fastpath contents */ + memcpy(to_fp, from_fp, sizeof(*to_fp)); + to_fp->index = to; + + /* Retain the tpa_info of the original `to' version as we don't want + * 2 FPs to contain the same tpa_info pointer. + */ + to_fp->tpa_info = old_tpa_info; + + /* move sp_objs contents as well, as their indices match fp ones */ + memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); + + /* move fp_stats contents as well, as their indices match fp ones */ + memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats)); + + /* Update txdata pointers in fp and move txdata content accordingly: + * Each fp consumes 'max_cos' txdata structures, so the index should be + * decremented by max_cos x delta. + */ + + old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos; + new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) * + (bp)->max_cos; + if (from == FCOE_IDX(bp)) { + old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET; + new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET; + } + + memcpy(&bp->bnx2x_txq[new_txdata_index], + &bp->bnx2x_txq[old_txdata_index], + sizeof(struct bnx2x_fp_txdata)); + to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index]; +} + +/** + * bnx2x_fill_fw_str - Fill buffer with FW version string. + * + * @bp: driver handle + * @buf: character buffer to fill with the fw name + * @buf_len: length of the above buffer + * + */ +void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len) +{ + if (IS_PF(bp)) { + u8 phy_fw_ver[PHY_FW_VER_LEN]; + + phy_fw_ver[0] = '\0'; + bnx2x_get_ext_phy_fw_version(&bp->link_params, + phy_fw_ver, PHY_FW_VER_LEN); + strlcpy(buf, bp->fw_ver, buf_len); + snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver), + "bc %d.%d.%d%s%s", + (bp->common.bc_ver & 0xff0000) >> 16, + (bp->common.bc_ver & 0xff00) >> 8, + (bp->common.bc_ver & 0xff), + ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); + } else { + bnx2x_vf_fill_fw_str(bp, buf, buf_len); + } +} + +/** + * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact + * + * @bp: driver handle + * @delta: number of eth queues which were not allocated + */ +static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta) +{ + int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp); + + /* Queue pointer cannot be re-set on an fp-basis, as moving pointer + * backward along the array could cause memory to be overridden + */ + for (cos = 1; cos < bp->max_cos; cos++) { + for (i = 0; i < old_eth_num - delta; i++) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + int new_idx = cos * (old_eth_num - delta) + i; + + memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos], + sizeof(struct bnx2x_fp_txdata)); + fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx]; + } + } +} + +int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ + +/* free skb in the packet ring at pos idx + * return idx of last bd freed + */ +static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, + u16 idx, unsigned int *pkts_compl, + unsigned int *bytes_compl) +{ + struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx]; + struct eth_tx_start_bd *tx_start_bd; + struct eth_tx_bd *tx_data_bd; + struct sk_buff *skb = tx_buf->skb; + u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; + int nbd; + u16 split_bd_len = 0; + + /* prefetch skb end pointer to speedup dev_kfree_skb() */ + prefetch(&skb->end); + + DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", + txdata->txq_index, idx, tx_buf, skb); + + tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; + + nbd = le16_to_cpu(tx_start_bd->nbd) - 1; +#ifdef BNX2X_STOP_ON_ERROR + if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { + BNX2X_ERR("BAD nbd!\n"); + bnx2x_panic(); + } +#endif + new_cons = nbd + tx_buf->first_bd; + + /* Get the next bd */ + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + + /* Skip a parse bd... */ + --nbd; + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + + if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) { + /* Skip second parse bd... */ + --nbd; + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + } + + /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ + if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { + tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; + split_bd_len = BD_UNMAP_LEN(tx_data_bd); + --nbd; + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + } + + /* unmap first bd */ + dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), + BD_UNMAP_LEN(tx_start_bd) + split_bd_len, + DMA_TO_DEVICE); + + /* now free frags */ + while (nbd > 0) { + + tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; + dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), + BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); + if (--nbd) + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + } + + /* release skb */ + WARN_ON(!skb); + if (likely(skb)) { + (*pkts_compl)++; + (*bytes_compl) += skb->len; + } + + dev_kfree_skb_any(skb); + tx_buf->first_bd = 0; + tx_buf->skb = NULL; + + return new_cons; +} + +int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) +{ + struct netdev_queue *txq; + u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons; + unsigned int pkts_compl = 0, bytes_compl = 0; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return -1; +#endif + + txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); + hw_cons = le16_to_cpu(*txdata->tx_cons_sb); + sw_cons = txdata->tx_pkt_cons; + + while (sw_cons != hw_cons) { + u16 pkt_cons; + + pkt_cons = TX_BD(sw_cons); + + DP(NETIF_MSG_TX_DONE, + "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n", + txdata->txq_index, hw_cons, sw_cons, pkt_cons); + + bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons, + &pkts_compl, &bytes_compl); + + sw_cons++; + } + + netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); + + txdata->tx_pkt_cons = sw_cons; + txdata->tx_bd_cons = bd_cons; + + /* Need to make the tx_bd_cons update visible to start_xmit() + * before checking for netif_tx_queue_stopped(). Without the + * memory barrier, there is a small possibility that + * start_xmit() will miss it and cause the queue to be stopped + * forever. + * On the other hand we need an rmb() here to ensure the proper + * ordering of bit testing in the following + * netif_tx_queue_stopped(txq) call. + */ + smp_mb(); + + if (unlikely(netif_tx_queue_stopped(txq))) { + /* Taking tx_lock() is needed to prevent re-enabling the queue + * while it's empty. This could have happen if rx_action() gets + * suspended in bnx2x_tx_int() after the condition before + * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): + * + * stops the queue->sees fresh tx_bd_cons->releases the queue-> + * sends some packets consuming the whole queue again-> + * stops the queue + */ + + __netif_tx_lock(txq, smp_processor_id()); + + if ((netif_tx_queue_stopped(txq)) && + (bp->state == BNX2X_STATE_OPEN) && + (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)) + netif_tx_wake_queue(txq); + + __netif_tx_unlock(txq); + } + return 0; +} + +static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp, + u16 idx) +{ + u16 last_max = fp->last_max_sge; + + if (SUB_S16(idx, last_max) > 0) + fp->last_max_sge = idx; +} + +static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, + u16 sge_len, + struct eth_end_agg_rx_cqe *cqe) +{ + struct bnx2x *bp = fp->bp; + u16 last_max, last_elem, first_elem; + u16 delta = 0; + u16 i; + + if (!sge_len) + return; + + /* First mark all used pages */ + for (i = 0; i < sge_len; i++) + BIT_VEC64_CLEAR_BIT(fp->sge_mask, + RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i]))); + + DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", + sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); + + /* Here we assume that the last SGE index is the biggest */ + prefetch((void *)(fp->sge_mask)); + bnx2x_update_last_max_sge(fp, + le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); + + last_max = RX_SGE(fp->last_max_sge); + last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; + first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; + + /* If ring is not full */ + if (last_elem + 1 != first_elem) + last_elem++; + + /* Now update the prod */ + for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) { + if (likely(fp->sge_mask[i])) + break; + + fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; + delta += BIT_VEC64_ELEM_SZ; + } + + if (delta > 0) { + fp->rx_sge_prod += delta; + /* clear page-end entries */ + bnx2x_clear_sge_mask_next_elems(fp); + } + + DP(NETIF_MSG_RX_STATUS, + "fp->last_max_sge = %d fp->rx_sge_prod = %d\n", + fp->last_max_sge, fp->rx_sge_prod); +} + +/* Get Toeplitz hash value in the skb using the value from the + * CQE (calculated by HW). + */ +static u32 bnx2x_get_rxhash(const struct bnx2x *bp, + const struct eth_fast_path_rx_cqe *cqe, + enum pkt_hash_types *rxhash_type) +{ + /* Get Toeplitz hash from CQE */ + if ((bp->dev->features & NETIF_F_RXHASH) && + (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) { + enum eth_rss_hash_type htype; + + htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE; + *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) || + (htype == TCP_IPV6_HASH_TYPE)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3; + + return le32_to_cpu(cqe->rss_hash_result); + } + *rxhash_type = PKT_HASH_TYPE_NONE; + return 0; +} + +static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, + u16 cons, u16 prod, + struct eth_fast_path_rx_cqe *cqe) +{ + struct bnx2x *bp = fp->bp; + struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; + struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; + struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; + dma_addr_t mapping; + struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; + struct sw_rx_bd *first_buf = &tpa_info->first_buf; + + /* print error if current state != stop */ + if (tpa_info->tpa_state != BNX2X_TPA_STOP) + BNX2X_ERR("start of bin not in stop [%d]\n", queue); + + /* Try to map an empty data buffer from the aggregation info */ + mapping = dma_map_single(&bp->pdev->dev, + first_buf->data + NET_SKB_PAD, + fp->rx_buf_size, DMA_FROM_DEVICE); + /* + * ...if it fails - move the skb from the consumer to the producer + * and set the current aggregation state as ERROR to drop it + * when TPA_STOP arrives. + */ + + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + /* Move the BD from the consumer to the producer */ + bnx2x_reuse_rx_data(fp, cons, prod); + tpa_info->tpa_state = BNX2X_TPA_ERROR; + return; + } + + /* move empty data from pool to prod */ + prod_rx_buf->data = first_buf->data; + dma_unmap_addr_set(prod_rx_buf, mapping, mapping); + /* point prod_bd to new data */ + prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + + /* move partial skb from cons to pool (don't unmap yet) */ + *first_buf = *cons_rx_buf; + + /* mark bin state as START */ + tpa_info->parsing_flags = + le16_to_cpu(cqe->pars_flags.flags); + tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); + tpa_info->tpa_state = BNX2X_TPA_START; + tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); + tpa_info->placement_offset = cqe->placement_offset; + tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type); + if (fp->mode == TPA_MODE_GRO) { + u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len); + tpa_info->full_page = SGE_PAGES / gro_size * gro_size; + tpa_info->gro_size = gro_size; + } + +#ifdef BNX2X_STOP_ON_ERROR + fp->tpa_queue_used |= (1 << queue); + DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", + fp->tpa_queue_used); +#endif +} + +/* Timestamp option length allowed for TPA aggregation: + * + * nop nop kind length echo val + */ +#define TPA_TSTAMP_OPT_LEN 12 +/** + * bnx2x_set_gro_params - compute GRO values + * + * @skb: packet skb + * @parsing_flags: parsing flags from the START CQE + * @len_on_bd: total length of the first packet for the + * aggregation. + * @pkt_len: length of all segments + * + * Approximate value of the MSS for this aggregation calculated using + * the first packet of it. + * Compute number of aggregated segments, and gso_type. + */ +static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags, + u16 len_on_bd, unsigned int pkt_len, + u16 num_of_coalesced_segs) +{ + /* TPA aggregation won't have either IP options or TCP options + * other than timestamp or IPv6 extension headers. + */ + u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr); + + if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == + PRS_FLAG_OVERETH_IPV6) { + hdrs_len += sizeof(struct ipv6hdr); + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + } else { + hdrs_len += sizeof(struct iphdr); + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + } + + /* Check if there was a TCP timestamp, if there is it's will + * always be 12 bytes length: nop nop kind length echo val. + * + * Otherwise FW would close the aggregation. + */ + if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) + hdrs_len += TPA_TSTAMP_OPT_LEN; + + skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len; + + /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count + * to skb_shinfo(skb)->gso_segs + */ + NAPI_GRO_CB(skb)->count = num_of_coalesced_segs; +} + +static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, + u16 index, gfp_t gfp_mask) +{ + struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); + struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; + struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; + dma_addr_t mapping; + + if (unlikely(page == NULL)) { + BNX2X_ERR("Can't alloc sge\n"); + return -ENOMEM; + } + + mapping = dma_map_page(&bp->pdev->dev, page, 0, + SGE_PAGES, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + __free_pages(page, PAGES_PER_SGE_SHIFT); + BNX2X_ERR("Can't map sge\n"); + return -ENOMEM; + } + + sw_buf->page = page; + dma_unmap_addr_set(sw_buf, mapping, mapping); + + sge->addr_hi = cpu_to_le32(U64_HI(mapping)); + sge->addr_lo = cpu_to_le32(U64_LO(mapping)); + + return 0; +} + +static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, + struct bnx2x_agg_info *tpa_info, + u16 pages, + struct sk_buff *skb, + struct eth_end_agg_rx_cqe *cqe, + u16 cqe_idx) +{ + struct sw_rx_page *rx_pg, old_rx_pg; + u32 i, frag_len, frag_size; + int err, j, frag_id = 0; + u16 len_on_bd = tpa_info->len_on_bd; + u16 full_page = 0, gro_size = 0; + + frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd; + + if (fp->mode == TPA_MODE_GRO) { + gro_size = tpa_info->gro_size; + full_page = tpa_info->full_page; + } + + /* This is needed in order to enable forwarding support */ + if (frag_size) + bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd, + le16_to_cpu(cqe->pkt_len), + le16_to_cpu(cqe->num_of_coalesced_segs)); + +#ifdef BNX2X_STOP_ON_ERROR + if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) { + BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", + pages, cqe_idx); + BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len); + bnx2x_panic(); + return -EINVAL; + } +#endif + + /* Run through the SGL and compose the fragmented skb */ + for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { + u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j])); + + /* FW gives the indices of the SGE as if the ring is an array + (meaning that "next" element will consume 2 indices) */ + if (fp->mode == TPA_MODE_GRO) + frag_len = min_t(u32, frag_size, (u32)full_page); + else /* LRO */ + frag_len = min_t(u32, frag_size, (u32)SGE_PAGES); + + rx_pg = &fp->rx_page_ring[sge_idx]; + old_rx_pg = *rx_pg; + + /* If we fail to allocate a substitute page, we simply stop + where we are and drop the whole packet */ + err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC); + if (unlikely(err)) { + bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; + return err; + } + + /* Unmap the page as we're going to pass it to the stack */ + dma_unmap_page(&bp->pdev->dev, + dma_unmap_addr(&old_rx_pg, mapping), + SGE_PAGES, DMA_FROM_DEVICE); + /* Add one frag and update the appropriate fields in the skb */ + if (fp->mode == TPA_MODE_LRO) + skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); + else { /* GRO */ + int rem; + int offset = 0; + for (rem = frag_len; rem > 0; rem -= gro_size) { + int len = rem > gro_size ? gro_size : rem; + skb_fill_page_desc(skb, frag_id++, + old_rx_pg.page, offset, len); + if (offset) + get_page(old_rx_pg.page); + offset += len; + } + } + + skb->data_len += frag_len; + skb->truesize += SGE_PAGES; + skb->len += frag_len; + + frag_size -= frag_len; + } + + return 0; +} + +static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data) +{ + if (fp->rx_frag_size) + put_page(virt_to_head_page(data)); + else + kfree(data); +} + +static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask) +{ + if (fp->rx_frag_size) { + /* GFP_KERNEL allocations are used only during initialization */ + if (unlikely(gfp_mask & __GFP_WAIT)) + return (void *)__get_free_page(gfp_mask); + + return netdev_alloc_frag(fp->rx_frag_size); + } + + return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask); +} + +#ifdef CONFIG_INET +static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb) +{ + const struct iphdr *iph = ip_hdr(skb); + struct tcphdr *th; + + skb_set_transport_header(skb, sizeof(struct iphdr)); + th = tcp_hdr(skb); + + th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), + iph->saddr, iph->daddr, 0); +} + +static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb) +{ + struct ipv6hdr *iph = ipv6_hdr(skb); + struct tcphdr *th; + + skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + th = tcp_hdr(skb); + + th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), + &iph->saddr, &iph->daddr, 0); +} + +static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb, + void (*gro_func)(struct bnx2x*, struct sk_buff*)) +{ + skb_set_network_header(skb, 0); + gro_func(bp, skb); + tcp_gro_complete(skb); +} +#endif + +static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, + struct sk_buff *skb) +{ +#ifdef CONFIG_INET + if (skb_shinfo(skb)->gso_size) { + switch (be16_to_cpu(skb->protocol)) { + case ETH_P_IP: + bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum); + break; + case ETH_P_IPV6: + bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum); + break; + default: + BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", + be16_to_cpu(skb->protocol)); + } + } +#endif + skb_record_rx_queue(skb, fp->rx_queue); + napi_gro_receive(&fp->napi, skb); +} + +static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, + struct bnx2x_agg_info *tpa_info, + u16 pages, + struct eth_end_agg_rx_cqe *cqe, + u16 cqe_idx) +{ + struct sw_rx_bd *rx_buf = &tpa_info->first_buf; + u8 pad = tpa_info->placement_offset; + u16 len = tpa_info->len_on_bd; + struct sk_buff *skb = NULL; + u8 *new_data, *data = rx_buf->data; + u8 old_tpa_state = tpa_info->tpa_state; + + tpa_info->tpa_state = BNX2X_TPA_STOP; + + /* If we there was an error during the handling of the TPA_START - + * drop this aggregation. + */ + if (old_tpa_state == BNX2X_TPA_ERROR) + goto drop; + + /* Try to allocate the new data */ + new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC); + /* Unmap skb in the pool anyway, as we are going to change + pool entry status to BNX2X_TPA_STOP even if new skb allocation + fails. */ + dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), + fp->rx_buf_size, DMA_FROM_DEVICE); + if (likely(new_data)) + skb = build_skb(data, fp->rx_frag_size); + + if (likely(skb)) { +#ifdef BNX2X_STOP_ON_ERROR + if (pad + len > fp->rx_buf_size) { + BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n", + pad, len, fp->rx_buf_size); + bnx2x_panic(); + return; + } +#endif + + skb_reserve(skb, pad + NET_SKB_PAD); + skb_put(skb, len); + skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type); + + skb->protocol = eth_type_trans(skb, bp->dev); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages, + skb, cqe, cqe_idx)) { + if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag); + bnx2x_gro_receive(bp, fp, skb); + } else { + DP(NETIF_MSG_RX_STATUS, + "Failed to allocate new pages - dropping packet!\n"); + dev_kfree_skb_any(skb); + } + + /* put new data in bin */ + rx_buf->data = new_data; + + return; + } + if (new_data) + bnx2x_frag_free(fp, new_data); +drop: + /* drop the packet and keep the buffer in the bin */ + DP(NETIF_MSG_RX_STATUS, + "Failed to allocate or map a new skb - dropping packet!\n"); + bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++; +} + +static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp, + u16 index, gfp_t gfp_mask) +{ + u8 *data; + struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; + struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; + dma_addr_t mapping; + + data = bnx2x_frag_alloc(fp, gfp_mask); + if (unlikely(data == NULL)) + return -ENOMEM; + + mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, + fp->rx_buf_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + bnx2x_frag_free(fp, data); + BNX2X_ERR("Can't map rx data\n"); + return -ENOMEM; + } + + rx_buf->data = data; + dma_unmap_addr_set(rx_buf, mapping, mapping); + + rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + + return 0; +} + +static +void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, + struct bnx2x_fastpath *fp, + struct bnx2x_eth_q_stats *qstats) +{ + /* Do nothing if no L4 csum validation was done. + * We do not check whether IP csum was validated. For IPv4 we assume + * that if the card got as far as validating the L4 csum, it also + * validated the IP csum. IPv6 has no IP csum. + */ + if (cqe->fast_path_cqe.status_flags & + ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) + return; + + /* If L4 validation was done, check if an error was found. */ + + if (cqe->fast_path_cqe.type_error_flags & + (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | + ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) + qstats->hw_csum_err++; + else + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) +{ + struct bnx2x *bp = fp->bp; + u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; + u16 sw_comp_cons, sw_comp_prod; + int rx_pkt = 0; + union eth_rx_cqe *cqe; + struct eth_fast_path_rx_cqe *cqe_fp; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return 0; +#endif + if (budget <= 0) + return rx_pkt; + + bd_cons = fp->rx_bd_cons; + bd_prod = fp->rx_bd_prod; + bd_prod_fw = bd_prod; + sw_comp_cons = fp->rx_comp_cons; + sw_comp_prod = fp->rx_comp_prod; + + comp_ring_cons = RCQ_BD(sw_comp_cons); + cqe = &fp->rx_comp_ring[comp_ring_cons]; + cqe_fp = &cqe->fast_path_cqe; + + DP(NETIF_MSG_RX_STATUS, + "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons); + + while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) { + struct sw_rx_bd *rx_buf = NULL; + struct sk_buff *skb; + u8 cqe_fp_flags; + enum eth_rx_cqe_type cqe_fp_type; + u16 len, pad, queue; + u8 *data; + u32 rxhash; + enum pkt_hash_types rxhash_type; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return 0; +#endif + + bd_prod = RX_BD(bd_prod); + bd_cons = RX_BD(bd_cons); + + /* A rmb() is required to ensure that the CQE is not read + * before it is written by the adapter DMA. PCI ordering + * rules will make sure the other fields are written before + * the marker at the end of struct eth_fast_path_rx_cqe + * but without rmb() a weakly ordered processor can process + * stale data. Without the barrier TPA state-machine might + * enter inconsistent state and kernel stack might be + * provided with incorrect packet description - these lead + * to various kernel crashed. + */ + rmb(); + + cqe_fp_flags = cqe_fp->type_error_flags; + cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; + + DP(NETIF_MSG_RX_STATUS, + "CQE type %x err %x status %x queue %x vlan %x len %u\n", + CQE_TYPE(cqe_fp_flags), + cqe_fp_flags, cqe_fp->status_flags, + le32_to_cpu(cqe_fp->rss_hash_result), + le16_to_cpu(cqe_fp->vlan_tag), + le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len)); + + /* is this a slowpath msg? */ + if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) { + bnx2x_sp_event(fp, cqe); + goto next_cqe; + } + + rx_buf = &fp->rx_buf_ring[bd_cons]; + data = rx_buf->data; + + if (!CQE_TYPE_FAST(cqe_fp_type)) { + struct bnx2x_agg_info *tpa_info; + u16 frag_size, pages; +#ifdef BNX2X_STOP_ON_ERROR + /* sanity check */ + if (fp->mode == TPA_MODE_DISABLED && + (CQE_TYPE_START(cqe_fp_type) || + CQE_TYPE_STOP(cqe_fp_type))) + BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n", + CQE_TYPE(cqe_fp_type)); +#endif + + if (CQE_TYPE_START(cqe_fp_type)) { + u16 queue = cqe_fp->queue_index; + DP(NETIF_MSG_RX_STATUS, + "calling tpa_start on queue %d\n", + queue); + + bnx2x_tpa_start(fp, queue, + bd_cons, bd_prod, + cqe_fp); + + goto next_rx; + } + queue = cqe->end_agg_cqe.queue_index; + tpa_info = &fp->tpa_info[queue]; + DP(NETIF_MSG_RX_STATUS, + "calling tpa_stop on queue %d\n", + queue); + + frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) - + tpa_info->len_on_bd; + + if (fp->mode == TPA_MODE_GRO) + pages = (frag_size + tpa_info->full_page - 1) / + tpa_info->full_page; + else + pages = SGE_PAGE_ALIGN(frag_size) >> + SGE_PAGE_SHIFT; + + bnx2x_tpa_stop(bp, fp, tpa_info, pages, + &cqe->end_agg_cqe, comp_ring_cons); +#ifdef BNX2X_STOP_ON_ERROR + if (bp->panic) + return 0; +#endif + + bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe); + goto next_cqe; + } + /* non TPA */ + len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len); + pad = cqe_fp->placement_offset; + dma_sync_single_for_cpu(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + pad + RX_COPY_THRESH, + DMA_FROM_DEVICE); + pad += NET_SKB_PAD; + prefetch(data + pad); /* speedup eth_type_trans() */ + /* is this an error packet? */ + if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { + DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, + "ERROR flags %x rx packet %u\n", + cqe_fp_flags, sw_comp_cons); + bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++; + goto reuse_rx; + } + + /* Since we don't have a jumbo ring + * copy small packets if mtu > 1500 + */ + if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && + (len <= RX_COPY_THRESH)) { + skb = napi_alloc_skb(&fp->napi, len); + if (skb == NULL) { + DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, + "ERROR packet dropped because of alloc failure\n"); + bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; + goto reuse_rx; + } + memcpy(skb->data, data + pad, len); + bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); + } else { + if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod, + GFP_ATOMIC) == 0)) { + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + fp->rx_buf_size, + DMA_FROM_DEVICE); + skb = build_skb(data, fp->rx_frag_size); + if (unlikely(!skb)) { + bnx2x_frag_free(fp, data); + bnx2x_fp_qstats(bp, fp)-> + rx_skb_alloc_failed++; + goto next_rx; + } + skb_reserve(skb, pad); + } else { + DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, + "ERROR packet dropped because of alloc failure\n"); + bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; +reuse_rx: + bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); + goto next_rx; + } + } + + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, bp->dev); + + /* Set Toeplitz hash for a none-LRO skb */ + rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type); + skb_set_hash(skb, rxhash, rxhash_type); + + skb_checksum_none_assert(skb); + + if (bp->dev->features & NETIF_F_RXCSUM) + bnx2x_csum_validate(skb, cqe, fp, + bnx2x_fp_qstats(bp, fp)); + + skb_record_rx_queue(skb, fp->rx_queue); + + /* Check if this packet was timestamped */ + if (unlikely(cqe->fast_path_cqe.type_error_flags & + (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT))) + bnx2x_set_rx_ts(bp, skb); + + if (le16_to_cpu(cqe_fp->pars_flags.flags) & + PARSING_FLAGS_VLAN) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + le16_to_cpu(cqe_fp->vlan_tag)); + + skb_mark_napi_id(skb, &fp->napi); + + if (bnx2x_fp_ll_polling(fp)) + netif_receive_skb(skb); + else + napi_gro_receive(&fp->napi, skb); +next_rx: + rx_buf->data = NULL; + + bd_cons = NEXT_RX_IDX(bd_cons); + bd_prod = NEXT_RX_IDX(bd_prod); + bd_prod_fw = NEXT_RX_IDX(bd_prod_fw); + rx_pkt++; +next_cqe: + sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); + sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); + + /* mark CQE as free */ + BNX2X_SEED_CQE(cqe_fp); + + if (rx_pkt == budget) + break; + + comp_ring_cons = RCQ_BD(sw_comp_cons); + cqe = &fp->rx_comp_ring[comp_ring_cons]; + cqe_fp = &cqe->fast_path_cqe; + } /* while */ + + fp->rx_bd_cons = bd_cons; + fp->rx_bd_prod = bd_prod_fw; + fp->rx_comp_cons = sw_comp_cons; + fp->rx_comp_prod = sw_comp_prod; + + /* Update producers */ + bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, + fp->rx_sge_prod); + + fp->rx_pkt += rx_pkt; + fp->rx_calls++; + + return rx_pkt; +} + +static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) +{ + struct bnx2x_fastpath *fp = fp_cookie; + struct bnx2x *bp = fp->bp; + u8 cos; + + DP(NETIF_MSG_INTR, + "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n", + fp->index, fp->fw_sb_id, fp->igu_sb_id); + + bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return IRQ_HANDLED; +#endif + + /* Handle Rx and Tx according to MSI-X vector */ + for_each_cos_in_tx_queue(fp, cos) + prefetch(fp->txdata_ptr[cos]->tx_cons_sb); + + prefetch(&fp->sb_running_index[SM_RX_ID]); + napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); + + return IRQ_HANDLED; +} + +/* HW Lock for shared dual port PHYs */ +void bnx2x_acquire_phy_lock(struct bnx2x *bp) +{ + mutex_lock(&bp->port.phy_mutex); + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); +} + +void bnx2x_release_phy_lock(struct bnx2x *bp) +{ + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); + + mutex_unlock(&bp->port.phy_mutex); +} + +/* calculates MF speed according to current linespeed and MF configuration */ +u16 bnx2x_get_mf_speed(struct bnx2x *bp) +{ + u16 line_speed = bp->link_vars.line_speed; + if (IS_MF(bp)) { + u16 maxCfg = bnx2x_extract_max_cfg(bp, + bp->mf_config[BP_VN(bp)]); + + /* Calculate the current MAX line speed limit for the MF + * devices + */ + if (IS_MF_SI(bp)) + line_speed = (line_speed * maxCfg) / 100; + else { /* SD mode */ + u16 vn_max_rate = maxCfg * 100; + + if (vn_max_rate < line_speed) + line_speed = vn_max_rate; + } + } + + return line_speed; +} + +/** + * bnx2x_fill_report_data - fill link report data to report + * + * @bp: driver handle + * @data: link state to update + * + * It uses a none-atomic bit operations because is called under the mutex. + */ +static void bnx2x_fill_report_data(struct bnx2x *bp, + struct bnx2x_link_report_data *data) +{ + memset(data, 0, sizeof(*data)); + + if (IS_PF(bp)) { + /* Fill the report data: effective line speed */ + data->line_speed = bnx2x_get_mf_speed(bp); + + /* Link is down */ + if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS)) + __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &data->link_report_flags); + + if (!BNX2X_NUM_ETH_QUEUES(bp)) + __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &data->link_report_flags); + + /* Full DUPLEX */ + if (bp->link_vars.duplex == DUPLEX_FULL) + __set_bit(BNX2X_LINK_REPORT_FD, + &data->link_report_flags); + + /* Rx Flow Control is ON */ + if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) + __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, + &data->link_report_flags); + + /* Tx Flow Control is ON */ + if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) + __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, + &data->link_report_flags); + } else { /* VF */ + *data = bp->vf_link_vars; + } +} + +/** + * bnx2x_link_report - report link status to OS. + * + * @bp: driver handle + * + * Calls the __bnx2x_link_report() under the same locking scheme + * as a link/PHY state managing code to ensure a consistent link + * reporting. + */ + +void bnx2x_link_report(struct bnx2x *bp) +{ + bnx2x_acquire_phy_lock(bp); + __bnx2x_link_report(bp); + bnx2x_release_phy_lock(bp); +} + +/** + * __bnx2x_link_report - report link status to OS. + * + * @bp: driver handle + * + * None atomic implementation. + * Should be called under the phy_lock. + */ +void __bnx2x_link_report(struct bnx2x *bp) +{ + struct bnx2x_link_report_data cur_data; + + /* reread mf_cfg */ + if (IS_PF(bp) && !CHIP_IS_E1(bp)) + bnx2x_read_mf_cfg(bp); + + /* Read the current link report info */ + bnx2x_fill_report_data(bp, &cur_data); + + /* Don't report link down or exactly the same link status twice */ + if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) || + (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &bp->last_reported_link.link_report_flags) && + test_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &cur_data.link_report_flags))) + return; + + bp->link_cnt++; + + /* We are going to report a new link parameters now - + * remember the current data for the next time. + */ + memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data)); + + /* propagate status to VFs */ + if (IS_PF(bp)) + bnx2x_iov_link_update(bp); + + if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &cur_data.link_report_flags)) { + netif_carrier_off(bp->dev); + netdev_err(bp->dev, "NIC Link is Down\n"); + return; + } else { + const char *duplex; + const char *flow; + + netif_carrier_on(bp->dev); + + if (test_and_clear_bit(BNX2X_LINK_REPORT_FD, + &cur_data.link_report_flags)) + duplex = "full"; + else + duplex = "half"; + + /* Handle the FC at the end so that only these flags would be + * possibly set. This way we may easily check if there is no FC + * enabled. + */ + if (cur_data.link_report_flags) { + if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON, + &cur_data.link_report_flags)) { + if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON, + &cur_data.link_report_flags)) + flow = "ON - receive & transmit"; + else + flow = "ON - receive"; + } else { + flow = "ON - transmit"; + } + } else { + flow = "none"; + } + netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", + cur_data.line_speed, duplex, flow); + } +} + +static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) +{ + int i; + + for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { + struct eth_rx_sge *sge; + + sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; + sge->addr_hi = + cpu_to_le32(U64_HI(fp->rx_sge_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); + + sge->addr_lo = + cpu_to_le32(U64_LO(fp->rx_sge_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); + } +} + +static void bnx2x_free_tpa_pool(struct bnx2x *bp, + struct bnx2x_fastpath *fp, int last) +{ + int i; + + for (i = 0; i < last; i++) { + struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; + struct sw_rx_bd *first_buf = &tpa_info->first_buf; + u8 *data = first_buf->data; + + if (data == NULL) { + DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); + continue; + } + if (tpa_info->tpa_state == BNX2X_TPA_START) + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(first_buf, mapping), + fp->rx_buf_size, DMA_FROM_DEVICE); + bnx2x_frag_free(fp, data); + first_buf->data = NULL; + } +} + +void bnx2x_init_rx_rings_cnic(struct bnx2x *bp) +{ + int j; + + for_each_rx_queue_cnic(bp, j) { + struct bnx2x_fastpath *fp = &bp->fp[j]; + + fp->rx_bd_cons = 0; + + /* Activate BD ring */ + /* Warning! + * this will generate an interrupt (to the TSTORM) + * must only be done after chip is initialized + */ + bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, + fp->rx_sge_prod); + } +} + +void bnx2x_init_rx_rings(struct bnx2x *bp) +{ + int func = BP_FUNC(bp); + u16 ring_prod; + int i, j; + + /* Allocate TPA resources */ + for_each_eth_queue(bp, j) { + struct bnx2x_fastpath *fp = &bp->fp[j]; + + DP(NETIF_MSG_IFUP, + "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); + + if (fp->mode != TPA_MODE_DISABLED) { + /* Fill the per-aggregation pool */ + for (i = 0; i < MAX_AGG_QS(bp); i++) { + struct bnx2x_agg_info *tpa_info = + &fp->tpa_info[i]; + struct sw_rx_bd *first_buf = + &tpa_info->first_buf; + + first_buf->data = + bnx2x_frag_alloc(fp, GFP_KERNEL); + if (!first_buf->data) { + BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n", + j); + bnx2x_free_tpa_pool(bp, fp, i); + fp->mode = TPA_MODE_DISABLED; + break; + } + dma_unmap_addr_set(first_buf, mapping, 0); + tpa_info->tpa_state = BNX2X_TPA_STOP; + } + + /* "next page" elements initialization */ + bnx2x_set_next_page_sgl(fp); + + /* set SGEs bit mask */ + bnx2x_init_sge_ring_bit_mask(fp); + + /* Allocate SGEs and initialize the ring elements */ + for (i = 0, ring_prod = 0; + i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) { + + if (bnx2x_alloc_rx_sge(bp, fp, ring_prod, + GFP_KERNEL) < 0) { + BNX2X_ERR("was only able to allocate %d rx sges\n", + i); + BNX2X_ERR("disabling TPA for queue[%d]\n", + j); + /* Cleanup already allocated elements */ + bnx2x_free_rx_sge_range(bp, fp, + ring_prod); + bnx2x_free_tpa_pool(bp, fp, + MAX_AGG_QS(bp)); + fp->mode = TPA_MODE_DISABLED; + ring_prod = 0; + break; + } + ring_prod = NEXT_SGE_IDX(ring_prod); + } + + fp->rx_sge_prod = ring_prod; + } + } + + for_each_eth_queue(bp, j) { + struct bnx2x_fastpath *fp = &bp->fp[j]; + + fp->rx_bd_cons = 0; + + /* Activate BD ring */ + /* Warning! + * this will generate an interrupt (to the TSTORM) + * must only be done after chip is initialized + */ + bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, + fp->rx_sge_prod); + + if (j != 0) + continue; + + if (CHIP_IS_E1(bp)) { + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), + U64_LO(fp->rx_comp_mapping)); + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, + U64_HI(fp->rx_comp_mapping)); + } + } +} + +static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp) +{ + u8 cos; + struct bnx2x *bp = fp->bp; + + for_each_cos_in_tx_queue(fp, cos) { + struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; + unsigned pkts_compl = 0, bytes_compl = 0; + + u16 sw_prod = txdata->tx_pkt_prod; + u16 sw_cons = txdata->tx_pkt_cons; + + while (sw_cons != sw_prod) { + bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons), + &pkts_compl, &bytes_compl); + sw_cons++; + } + + netdev_tx_reset_queue( + netdev_get_tx_queue(bp->dev, + txdata->txq_index)); + } +} + +static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp) +{ + int i; + + for_each_tx_queue_cnic(bp, i) { + bnx2x_free_tx_skbs_queue(&bp->fp[i]); + } +} + +static void bnx2x_free_tx_skbs(struct bnx2x *bp) +{ + int i; + + for_each_eth_queue(bp, i) { + bnx2x_free_tx_skbs_queue(&bp->fp[i]); + } +} + +static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) +{ + struct bnx2x *bp = fp->bp; + int i; + + /* ring wasn't allocated */ + if (fp->rx_buf_ring == NULL) + return; + + for (i = 0; i < NUM_RX_BD; i++) { + struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; + u8 *data = rx_buf->data; + + if (data == NULL) + continue; + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + fp->rx_buf_size, DMA_FROM_DEVICE); + + rx_buf->data = NULL; + bnx2x_frag_free(fp, data); + } +} + +static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp) +{ + int j; + + for_each_rx_queue_cnic(bp, j) { + bnx2x_free_rx_bds(&bp->fp[j]); + } +} + +static void bnx2x_free_rx_skbs(struct bnx2x *bp) +{ + int j; + + for_each_eth_queue(bp, j) { + struct bnx2x_fastpath *fp = &bp->fp[j]; + + bnx2x_free_rx_bds(fp); + + if (fp->mode != TPA_MODE_DISABLED) + bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); + } +} + +static void bnx2x_free_skbs_cnic(struct bnx2x *bp) +{ + bnx2x_free_tx_skbs_cnic(bp); + bnx2x_free_rx_skbs_cnic(bp); +} + +void bnx2x_free_skbs(struct bnx2x *bp) +{ + bnx2x_free_tx_skbs(bp); + bnx2x_free_rx_skbs(bp); +} + +void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) +{ + /* load old values */ + u32 mf_cfg = bp->mf_config[BP_VN(bp)]; + + if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) { + /* leave all but MAX value */ + mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; + + /* set new MAX value */ + mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT) + & FUNC_MF_CFG_MAX_BW_MASK; + + bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg); + } +} + +/** + * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors + * + * @bp: driver handle + * @nvecs: number of vectors to be released + */ +static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) +{ + int i, offset = 0; + + if (nvecs == offset) + return; + + /* VFs don't have a default SB */ + if (IS_PF(bp)) { + free_irq(bp->msix_table[offset].vector, bp->dev); + DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", + bp->msix_table[offset].vector); + offset++; + } + + if (CNIC_SUPPORT(bp)) { + if (nvecs == offset) + return; + offset++; + } + + for_each_eth_queue(bp, i) { + if (nvecs == offset) + return; + DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n", + i, bp->msix_table[offset].vector); + + free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); + } +} + +void bnx2x_free_irq(struct bnx2x *bp) +{ + if (bp->flags & USING_MSIX_FLAG && + !(bp->flags & USING_SINGLE_MSIX_FLAG)) { + int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp); + + /* vfs don't have a default status block */ + if (IS_PF(bp)) + nvecs++; + + bnx2x_free_msix_irqs(bp, nvecs); + } else { + free_irq(bp->dev->irq, bp->dev); + } +} + +int bnx2x_enable_msix(struct bnx2x *bp) +{ + int msix_vec = 0, i, rc; + + /* VFs don't have a default status block */ + if (IS_PF(bp)) { + bp->msix_table[msix_vec].entry = msix_vec; + BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n", + bp->msix_table[0].entry); + msix_vec++; + } + + /* Cnic requires an msix vector for itself */ + if (CNIC_SUPPORT(bp)) { + bp->msix_table[msix_vec].entry = msix_vec; + BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n", + msix_vec, bp->msix_table[msix_vec].entry); + msix_vec++; + } + + /* We need separate vectors for ETH queues only (not FCoE) */ + for_each_eth_queue(bp, i) { + bp->msix_table[msix_vec].entry = msix_vec; + BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n", + msix_vec, msix_vec, i); + msix_vec++; + } + + DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n", + msix_vec); + + rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], + BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec); + /* + * reconfigure number of tx/rx queues according to available + * MSI-X vectors + */ + if (rc == -ENOSPC) { + /* Get by with single vector */ + rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1); + if (rc < 0) { + BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n", + rc); + goto no_msix; + } + + BNX2X_DEV_INFO("Using single MSI-X vector\n"); + bp->flags |= USING_SINGLE_MSIX_FLAG; + + BNX2X_DEV_INFO("set number of queues to 1\n"); + bp->num_ethernet_queues = 1; + bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; + } else if (rc < 0) { + BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); + goto no_msix; + } else if (rc < msix_vec) { + /* how less vectors we will have? */ + int diff = msix_vec - rc; + + BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc); + + /* + * decrease number of queues by number of unallocated entries + */ + bp->num_ethernet_queues -= diff; + bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; + + BNX2X_DEV_INFO("New queue configuration set: %d\n", + bp->num_queues); + } + + bp->flags |= USING_MSIX_FLAG; + + return 0; + +no_msix: + /* fall to INTx if not enough memory */ + if (rc == -ENOMEM) + bp->flags |= DISABLE_MSI_FLAG; + + return rc; +} + +static int bnx2x_req_msix_irqs(struct bnx2x *bp) +{ + int i, rc, offset = 0; + + /* no default status block for vf */ + if (IS_PF(bp)) { + rc = request_irq(bp->msix_table[offset++].vector, + bnx2x_msix_sp_int, 0, + bp->dev->name, bp->dev); + if (rc) { + BNX2X_ERR("request sp irq failed\n"); + return -EBUSY; + } + } + + if (CNIC_SUPPORT(bp)) + offset++; + + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", + bp->dev->name, i); + + rc = request_irq(bp->msix_table[offset].vector, + bnx2x_msix_fp_int, 0, fp->name, fp); + if (rc) { + BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i, + bp->msix_table[offset].vector, rc); + bnx2x_free_msix_irqs(bp, offset); + return -EBUSY; + } + + offset++; + } + + i = BNX2X_NUM_ETH_QUEUES(bp); + if (IS_PF(bp)) { + offset = 1 + CNIC_SUPPORT(bp); + netdev_info(bp->dev, + "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n", + bp->msix_table[0].vector, + 0, bp->msix_table[offset].vector, + i - 1, bp->msix_table[offset + i - 1].vector); + } else { + offset = CNIC_SUPPORT(bp); + netdev_info(bp->dev, + "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n", + 0, bp->msix_table[offset].vector, + i - 1, bp->msix_table[offset + i - 1].vector); + } + return 0; +} + +int bnx2x_enable_msi(struct bnx2x *bp) +{ + int rc; + + rc = pci_enable_msi(bp->pdev); + if (rc) { + BNX2X_DEV_INFO("MSI is not attainable\n"); + return -1; + } + bp->flags |= USING_MSI_FLAG; + + return 0; +} + +static int bnx2x_req_irq(struct bnx2x *bp) +{ + unsigned long flags; + unsigned int irq; + + if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG)) + flags = 0; + else + flags = IRQF_SHARED; + + if (bp->flags & USING_MSIX_FLAG) + irq = bp->msix_table[0].vector; + else + irq = bp->pdev->irq; + + return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev); +} + +static int bnx2x_setup_irqs(struct bnx2x *bp) +{ + int rc = 0; + if (bp->flags & USING_MSIX_FLAG && + !(bp->flags & USING_SINGLE_MSIX_FLAG)) { + rc = bnx2x_req_msix_irqs(bp); + if (rc) + return rc; + } else { + rc = bnx2x_req_irq(bp); + if (rc) { + BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); + return rc; + } + if (bp->flags & USING_MSI_FLAG) { + bp->dev->irq = bp->pdev->irq; + netdev_info(bp->dev, "using MSI IRQ %d\n", + bp->dev->irq); + } + if (bp->flags & USING_MSIX_FLAG) { + bp->dev->irq = bp->msix_table[0].vector; + netdev_info(bp->dev, "using MSIX IRQ %d\n", + bp->dev->irq); + } + } + + return 0; +} + +static void bnx2x_napi_enable_cnic(struct bnx2x *bp) +{ + int i; + + for_each_rx_queue_cnic(bp, i) { + bnx2x_fp_busy_poll_init(&bp->fp[i]); + napi_enable(&bnx2x_fp(bp, i, napi)); + } +} + +static void bnx2x_napi_enable(struct bnx2x *bp) +{ + int i; + + for_each_eth_queue(bp, i) { + bnx2x_fp_busy_poll_init(&bp->fp[i]); + napi_enable(&bnx2x_fp(bp, i, napi)); + } +} + +static void bnx2x_napi_disable_cnic(struct bnx2x *bp) +{ + int i; + + for_each_rx_queue_cnic(bp, i) { + napi_disable(&bnx2x_fp(bp, i, napi)); + while (!bnx2x_fp_ll_disable(&bp->fp[i])) + usleep_range(1000, 2000); + } +} + +static void bnx2x_napi_disable(struct bnx2x *bp) +{ + int i; + + for_each_eth_queue(bp, i) { + napi_disable(&bnx2x_fp(bp, i, napi)); + while (!bnx2x_fp_ll_disable(&bp->fp[i])) + usleep_range(1000, 2000); + } +} + +void bnx2x_netif_start(struct bnx2x *bp) +{ + if (netif_running(bp->dev)) { + bnx2x_napi_enable(bp); + if (CNIC_LOADED(bp)) + bnx2x_napi_enable_cnic(bp); + bnx2x_int_enable(bp); + if (bp->state == BNX2X_STATE_OPEN) + netif_tx_wake_all_queues(bp->dev); + } +} + +void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) +{ + bnx2x_int_disable_sync(bp, disable_hw); + bnx2x_napi_disable(bp); + if (CNIC_LOADED(bp)) + bnx2x_napi_disable_cnic(bp); +} + +u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (CNIC_LOADED(bp) && !NO_FCOE(bp)) { + struct ethhdr *hdr = (struct ethhdr *)skb->data; + u16 ether_type = ntohs(hdr->h_proto); + + /* Skip VLAN tag if present */ + if (ether_type == ETH_P_8021Q) { + struct vlan_ethhdr *vhdr = + (struct vlan_ethhdr *)skb->data; + + ether_type = ntohs(vhdr->h_vlan_encapsulated_proto); + } + + /* If ethertype is FCoE or FIP - use FCoE ring */ + if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP)) + return bnx2x_fcoe_tx(bp, txq_index); + } + + /* select a non-FCoE queue */ + return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); +} + +void bnx2x_set_num_queues(struct bnx2x *bp) +{ + /* RSS queues */ + bp->num_ethernet_queues = bnx2x_calc_num_queues(bp); + + /* override in STORAGE SD modes */ + if (IS_MF_STORAGE_ONLY(bp)) + bp->num_ethernet_queues = 1; + + /* Add special queues */ + bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */ + bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; + + BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); +} + +/** + * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues + * + * @bp: Driver handle + * + * We currently support for at most 16 Tx queues for each CoS thus we will + * allocate a multiple of 16 for ETH L2 rings according to the value of the + * bp->max_cos. + * + * If there is an FCoE L2 queue the appropriate Tx queue will have the next + * index after all ETH L2 indices. + * + * If the actual number of Tx queues (for each CoS) is less than 16 then there + * will be the holes at the end of each group of 16 ETh L2 indices (0..15, + * 16..31,...) with indices that are not coupled with any real Tx queue. + * + * The proper configuration of skb->queue_mapping is handled by + * bnx2x_select_queue() and __skb_tx_hash(). + * + * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() + * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). + */ +static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic) +{ + int rc, tx, rx; + + tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos; + rx = BNX2X_NUM_ETH_QUEUES(bp); + +/* account for fcoe queue */ + if (include_cnic && !NO_FCOE(bp)) { + rx++; + tx++; + } + + rc = netif_set_real_num_tx_queues(bp->dev, tx); + if (rc) { + BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc); + return rc; + } + rc = netif_set_real_num_rx_queues(bp->dev, rx); + if (rc) { + BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc); + return rc; + } + + DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n", + tx, rx); + + return rc; +} + +static void bnx2x_set_rx_buf_size(struct bnx2x *bp) +{ + int i; + + for_each_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + u32 mtu; + + /* Always use a mini-jumbo MTU for the FCoE L2 ring */ + if (IS_FCOE_IDX(i)) + /* + * Although there are no IP frames expected to arrive to + * this ring we still want to add an + * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer + * overrun attack. + */ + mtu = BNX2X_FCOE_MINI_JUMBO_MTU; + else + mtu = bp->dev->mtu; + fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START + + IP_HEADER_ALIGNMENT_PADDING + + ETH_OVREHEAD + + mtu + + BNX2X_FW_RX_ALIGN_END; + /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */ + if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) + fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; + else + fp->rx_frag_size = 0; + } +} + +static int bnx2x_init_rss(struct bnx2x *bp) +{ + int i; + u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); + + /* Prepare the initial contents for the indirection table if RSS is + * enabled + */ + for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) + bp->rss_conf_obj.ind_table[i] = + bp->fp->cl_id + + ethtool_rxfh_indir_default(i, num_eth_queues); + + /* + * For 57710 and 57711 SEARCHER configuration (rss_keys) is + * per-port, so if explicit configuration is needed , do it only + * for a PMF. + * + * For 57712 and newer on the other hand it's a per-function + * configuration. + */ + return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp)); +} + +int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, + bool config_hash, bool enable) +{ + struct bnx2x_config_rss_params params = {NULL}; + + /* Although RSS is meaningless when there is a single HW queue we + * still need it enabled in order to have HW Rx hash generated. + * + * if (!is_eth_multi(bp)) + * bp->multi_mode = ETH_RSS_MODE_DISABLED; + */ + + params.rss_obj = rss_obj; + + __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); + + if (enable) { + __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); + + /* RSS configuration */ + __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); + if (rss_obj->udp_rss_v4) + __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags); + if (rss_obj->udp_rss_v6) + __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags); + + if (!CHIP_IS_E1x(bp)) + /* valid only for TUNN_MODE_GRE tunnel mode */ + __set_bit(BNX2X_RSS_GRE_INNER_HDRS, ¶ms.rss_flags); + } else { + __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); + } + + /* Hash bits */ + params.rss_result_mask = MULTI_MASK; + + memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); + + if (config_hash) { + /* RSS keys */ + netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4); + __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); + } + + if (IS_PF(bp)) + return bnx2x_config_rss(bp, ¶ms); + else + return bnx2x_vfpf_config_rss(bp, ¶ms); +} + +static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) +{ + struct bnx2x_func_state_params func_params = {NULL}; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_HW_INIT; + + func_params.params.hw_init.load_phase = load_code; + + return bnx2x_func_state_change(bp, &func_params); +} + +/* + * Cleans the object that have internal lists without sending + * ramrods. Should be run when interrupts are disabled. + */ +void bnx2x_squeeze_objects(struct bnx2x *bp) +{ + int rc; + unsigned long ramrod_flags = 0, vlan_mac_flags = 0; + struct bnx2x_mcast_ramrod_params rparam = {NULL}; + struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; + + /***************** Cleanup MACs' object first *************************/ + + /* Wait for completion of requested */ + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + /* Perform a dry cleanup */ + __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); + + /* Clean ETH primary MAC */ + __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags); + rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags, + &ramrod_flags); + if (rc != 0) + BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc); + + /* Cleanup UC list */ + vlan_mac_flags = 0; + __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags); + rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, + &ramrod_flags); + if (rc != 0) + BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc); + + /***************** Now clean mcast object *****************************/ + rparam.mcast_obj = &bp->mcast_obj; + __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); + + /* Add a DEL command... - Since we're doing a driver cleanup only, + * we take a lock surrounding both the initial send and the CONTs, + * as we don't want a true completion to disrupt us in the middle. + */ + netif_addr_lock_bh(bp->dev); + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); + if (rc < 0) + BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n", + rc); + + /* ...and wait until all pending commands are cleared */ + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); + while (rc != 0) { + if (rc < 0) { + BNX2X_ERR("Failed to clean multi-cast object: %d\n", + rc); + netif_addr_unlock_bh(bp->dev); + return; + } + + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); + } + netif_addr_unlock_bh(bp->dev); +} + +#ifndef BNX2X_STOP_ON_ERROR +#define LOAD_ERROR_EXIT(bp, label) \ + do { \ + (bp)->state = BNX2X_STATE_ERROR; \ + goto label; \ + } while (0) + +#define LOAD_ERROR_EXIT_CNIC(bp, label) \ + do { \ + bp->cnic_loaded = false; \ + goto label; \ + } while (0) +#else /*BNX2X_STOP_ON_ERROR*/ +#define LOAD_ERROR_EXIT(bp, label) \ + do { \ + (bp)->state = BNX2X_STATE_ERROR; \ + (bp)->panic = 1; \ + return -EBUSY; \ + } while (0) +#define LOAD_ERROR_EXIT_CNIC(bp, label) \ + do { \ + bp->cnic_loaded = false; \ + (bp)->panic = 1; \ + return -EBUSY; \ + } while (0) +#endif /*BNX2X_STOP_ON_ERROR*/ + +static void bnx2x_free_fw_stats_mem(struct bnx2x *bp) +{ + BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, + bp->fw_stats_data_sz + bp->fw_stats_req_sz); + return; +} + +static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) +{ + int num_groups, vf_headroom = 0; + int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; + + /* number of queues for statistics is number of eth queues + FCoE */ + u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats; + + /* Total number of FW statistics requests = + * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper + * and fcoe l2 queue) stats + num of queues (which includes another 1 + * for fcoe l2 queue if applicable) + */ + bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; + + /* vf stats appear in the request list, but their data is allocated by + * the VFs themselves. We don't include them in the bp->fw_stats_num as + * it is used to determine where to place the vf stats queries in the + * request struct + */ + if (IS_SRIOV(bp)) + vf_headroom = bnx2x_vf_headroom(bp); + + /* Request is built from stats_query_header and an array of + * stats_query_cmd_group each of which contains + * STATS_QUERY_CMD_COUNT rules. The real number or requests is + * configured in the stats_query_header. + */ + num_groups = + (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) + + (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ? + 1 : 0)); + + DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n", + bp->fw_stats_num, vf_headroom, num_groups); + bp->fw_stats_req_sz = sizeof(struct stats_query_header) + + num_groups * sizeof(struct stats_query_cmd_group); + + /* Data for statistics requests + stats_counter + * stats_counter holds per-STORM counters that are incremented + * when STORM has finished with the current request. + * memory for FCoE offloaded statistics are counted anyway, + * even if they will not be sent. + * VF stats are not accounted for here as the data of VF stats is stored + * in memory allocated by the VF, not here. + */ + bp->fw_stats_data_sz = sizeof(struct per_port_stats) + + sizeof(struct per_pf_stats) + + sizeof(struct fcoe_statistics_params) + + sizeof(struct per_queue_stats) * num_queue_stats + + sizeof(struct stats_counter); + + bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping, + bp->fw_stats_data_sz + bp->fw_stats_req_sz); + if (!bp->fw_stats) + goto alloc_mem_err; + + /* Set shortcuts */ + bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; + bp->fw_stats_req_mapping = bp->fw_stats_mapping; + bp->fw_stats_data = (struct bnx2x_fw_stats_data *) + ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); + bp->fw_stats_data_mapping = bp->fw_stats_mapping + + bp->fw_stats_req_sz; + + DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n", + U64_HI(bp->fw_stats_req_mapping), + U64_LO(bp->fw_stats_req_mapping)); + DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n", + U64_HI(bp->fw_stats_data_mapping), + U64_LO(bp->fw_stats_data_mapping)); + return 0; + +alloc_mem_err: + bnx2x_free_fw_stats_mem(bp); + BNX2X_ERR("Can't allocate FW stats memory\n"); + return -ENOMEM; +} + +/* send load request to mcp and analyze response */ +static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code) +{ + u32 param; + + /* init fw_seq */ + bp->fw_seq = + (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK); + BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); + + /* Get current FW pulse sequence */ + bp->fw_drv_pulse_wr_seq = + (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) & + DRV_PULSE_SEQ_MASK); + BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); + + param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA; + + if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp)) + param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA; + + /* load request */ + (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param); + + /* if mcp fails to respond we must abort */ + if (!(*load_code)) { + BNX2X_ERR("MCP response failure, aborting\n"); + return -EBUSY; + } + + /* If mcp refused (e.g. other port is in diagnostic mode) we + * must abort + */ + if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { + BNX2X_ERR("MCP refused load request, aborting\n"); + return -EBUSY; + } + return 0; +} + +/* check whether another PF has already loaded FW to chip. In + * virtualized environments a pf from another VM may have already + * initialized the device including loading FW + */ +int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err) +{ + /* is another pf loaded on this engine? */ + if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP && + load_code != FW_MSG_CODE_DRV_LOAD_COMMON) { + /* build my FW version dword */ + /*(DEBLOBBED)*/ + + /* read loaded FW from chip */ + u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM); + + u32 my_fw = ~loaded_fw; + + DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n", + loaded_fw, my_fw); + + /* abort nic load if version mismatch */ + if (my_fw != loaded_fw) { + if (print_err) + BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n", + loaded_fw, my_fw); + else + BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n", + loaded_fw, my_fw); + return -EBUSY; + } + } + return 0; +} + +/* returns the "mcp load_code" according to global load_count array */ +static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port) +{ + int path = BP_PATH(bp); + + DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n", + path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], + bnx2x_load_count[path][2]); + bnx2x_load_count[path][0]++; + bnx2x_load_count[path][1 + port]++; + DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n", + path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], + bnx2x_load_count[path][2]); + if (bnx2x_load_count[path][0] == 1) + return FW_MSG_CODE_DRV_LOAD_COMMON; + else if (bnx2x_load_count[path][1 + port] == 1) + return FW_MSG_CODE_DRV_LOAD_PORT; + else + return FW_MSG_CODE_DRV_LOAD_FUNCTION; +} + +/* mark PMF if applicable */ +static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code) +{ + if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || + (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || + (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { + bp->port.pmf = 1; + /* We need the barrier to ensure the ordering between the + * writing to bp->port.pmf here and reading it from the + * bnx2x_periodic_task(). + */ + smp_mb(); + } else { + bp->port.pmf = 0; + } + + DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); +} + +static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code) +{ + if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || + (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) && + (bp->common.shmem2_base)) { + if (SHMEM2_HAS(bp, dcc_support)) + SHMEM2_WR(bp, dcc_support, + (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | + SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); + if (SHMEM2_HAS(bp, afex_driver_support)) + SHMEM2_WR(bp, afex_driver_support, + SHMEM_AFEX_SUPPORTED_VERSION_ONE); + } + + /* Set AFEX default VLAN tag to an invalid value */ + bp->afex_def_vlan_tag = -1; +} + +/** + * bnx2x_bz_fp - zero content of the fastpath structure. + * + * @bp: driver handle + * @index: fastpath index to be zeroed + * + * Makes sure the contents of the bp->fp[index].napi is kept + * intact. + */ +static void bnx2x_bz_fp(struct bnx2x *bp, int index) +{ + struct bnx2x_fastpath *fp = &bp->fp[index]; + int cos; + struct napi_struct orig_napi = fp->napi; + struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info; + + /* bzero bnx2x_fastpath contents */ + if (fp->tpa_info) + memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 * + sizeof(struct bnx2x_agg_info)); + memset(fp, 0, sizeof(*fp)); + + /* Restore the NAPI object as it has been already initialized */ + fp->napi = orig_napi; + fp->tpa_info = orig_tpa_info; + fp->bp = bp; + fp->index = index; + if (IS_ETH_FP(fp)) + fp->max_cos = bp->max_cos; + else + /* Special queues support only one CoS */ + fp->max_cos = 1; + + /* Init txdata pointers */ + if (IS_FCOE_FP(fp)) + fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; + if (IS_ETH_FP(fp)) + for_each_cos_in_tx_queue(fp, cos) + fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * + BNX2X_NUM_ETH_QUEUES(bp) + index]; + + /* set the tpa flag for each queue. The tpa flag determines the queue + * minimal size so it must be set prior to queue memory allocation + */ + if (bp->dev->features & NETIF_F_LRO) + fp->mode = TPA_MODE_LRO; + else if (bp->dev->features & NETIF_F_GRO && + bnx2x_mtu_allows_gro(bp->dev->mtu)) + fp->mode = TPA_MODE_GRO; + else + fp->mode = TPA_MODE_DISABLED; + + /* We don't want TPA if it's disabled in bp + * or if this is an FCoE L2 ring. + */ + if (bp->disable_tpa || IS_FCOE_FP(fp)) + fp->mode = TPA_MODE_DISABLED; +} + +int bnx2x_load_cnic(struct bnx2x *bp) +{ + int i, rc, port = BP_PORT(bp); + + DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n"); + + mutex_init(&bp->cnic_mutex); + + if (IS_PF(bp)) { + rc = bnx2x_alloc_mem_cnic(bp); + if (rc) { + BNX2X_ERR("Unable to allocate bp memory for cnic\n"); + LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); + } + } + + rc = bnx2x_alloc_fp_mem_cnic(bp); + if (rc) { + BNX2X_ERR("Unable to allocate memory for cnic fps\n"); + LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); + } + + /* Update the number of queues with the cnic queues */ + rc = bnx2x_set_real_num_queues(bp, 1); + if (rc) { + BNX2X_ERR("Unable to set real_num_queues including cnic\n"); + LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0); + } + + /* Add all CNIC NAPI objects */ + bnx2x_add_all_napi_cnic(bp); + DP(NETIF_MSG_IFUP, "cnic napi added\n"); + bnx2x_napi_enable_cnic(bp); + + rc = bnx2x_init_hw_func_cnic(bp); + if (rc) + LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1); + + bnx2x_nic_init_cnic(bp); + + if (IS_PF(bp)) { + /* Enable Timer scan */ + REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); + + /* setup cnic queues */ + for_each_cnic_queue(bp, i) { + rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); + if (rc) { + BNX2X_ERR("Queue setup failed\n"); + LOAD_ERROR_EXIT(bp, load_error_cnic2); + } + } + } + + /* Initialize Rx filter. */ + bnx2x_set_rx_mode_inner(bp); + + /* re-read iscsi info */ + bnx2x_get_iscsi_info(bp); + bnx2x_setup_cnic_irq_info(bp); + bnx2x_setup_cnic_info(bp); + bp->cnic_loaded = true; + if (bp->state == BNX2X_STATE_OPEN) + bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); + + DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n"); + + return 0; + +#ifndef BNX2X_STOP_ON_ERROR +load_error_cnic2: + /* Disable Timer scan */ + REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); + +load_error_cnic1: + bnx2x_napi_disable_cnic(bp); + /* Update the number of queues without the cnic queues */ + if (bnx2x_set_real_num_queues(bp, 0)) + BNX2X_ERR("Unable to set real_num_queues not including cnic\n"); +load_error_cnic0: + BNX2X_ERR("CNIC-related load failed\n"); + bnx2x_free_fp_mem_cnic(bp); + bnx2x_free_mem_cnic(bp); + return rc; +#endif /* ! BNX2X_STOP_ON_ERROR */ +} + +/* must be called with rtnl_lock */ +int bnx2x_nic_load(struct bnx2x *bp, int load_mode) +{ + int port = BP_PORT(bp); + int i, rc = 0, load_code = 0; + + DP(NETIF_MSG_IFUP, "Starting NIC load\n"); + DP(NETIF_MSG_IFUP, + "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled"); + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) { + BNX2X_ERR("Can't load NIC when there is panic\n"); + return -EPERM; + } +#endif + + bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; + + /* zero the structure w/o any lock, before SP handler is initialized */ + memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); + __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &bp->last_reported_link.link_report_flags); + + if (IS_PF(bp)) + /* must be called before memory allocation and HW init */ + bnx2x_ilt_set_info(bp); + + /* + * Zero fastpath structures preserving invariants like napi, which are + * allocated only once, fp index, max_cos, bp pointer. + * Also set fp->mode and txdata_ptr. + */ + DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); + for_each_queue(bp, i) + bnx2x_bz_fp(bp, i); + memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + + bp->num_cnic_queues) * + sizeof(struct bnx2x_fp_txdata)); + + bp->fcoe_init = false; + + /* Set the receive queues buffer size */ + bnx2x_set_rx_buf_size(bp); + + if (IS_PF(bp)) { + rc = bnx2x_alloc_mem(bp); + if (rc) { + BNX2X_ERR("Unable to allocate bp memory\n"); + return rc; + } + } + + /* need to be done after alloc mem, since it's self adjusting to amount + * of memory available for RSS queues + */ + rc = bnx2x_alloc_fp_mem(bp); + if (rc) { + BNX2X_ERR("Unable to allocate memory for fps\n"); + LOAD_ERROR_EXIT(bp, load_error0); + } + + /* Allocated memory for FW statistics */ + if (bnx2x_alloc_fw_stats_mem(bp)) + LOAD_ERROR_EXIT(bp, load_error0); + + /* request pf to initialize status blocks */ + if (IS_VF(bp)) { + rc = bnx2x_vfpf_init(bp); + if (rc) + LOAD_ERROR_EXIT(bp, load_error0); + } + + /* As long as bnx2x_alloc_mem() may possibly update + * bp->num_queues, bnx2x_set_real_num_queues() should always + * come after it. At this stage cnic queues are not counted. + */ + rc = bnx2x_set_real_num_queues(bp, 0); + if (rc) { + BNX2X_ERR("Unable to set real_num_queues\n"); + LOAD_ERROR_EXIT(bp, load_error0); + } + + /* configure multi cos mappings in kernel. + * this configuration may be overridden by a multi class queue + * discipline or by a dcbx negotiation result. + */ + bnx2x_setup_tc(bp->dev, bp->max_cos); + + /* Add all NAPI objects */ + bnx2x_add_all_napi(bp); + DP(NETIF_MSG_IFUP, "napi added\n"); + bnx2x_napi_enable(bp); + + if (IS_PF(bp)) { + /* set pf load just before approaching the MCP */ + bnx2x_set_pf_load(bp); + + /* if mcp exists send load request and analyze response */ + if (!BP_NOMCP(bp)) { + /* attempt to load pf */ + rc = bnx2x_nic_load_request(bp, &load_code); + if (rc) + LOAD_ERROR_EXIT(bp, load_error1); + + /* what did mcp say? */ + rc = bnx2x_compare_fw_ver(bp, load_code, true); + if (rc) { + bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + LOAD_ERROR_EXIT(bp, load_error2); + } + } else { + load_code = bnx2x_nic_load_no_mcp(bp, port); + } + + /* mark pmf if applicable */ + bnx2x_nic_load_pmf(bp, load_code); + + /* Init Function state controlling object */ + bnx2x__init_func_obj(bp); + + /* Initialize HW */ + rc = bnx2x_init_hw(bp, load_code); + if (rc) { + BNX2X_ERR("HW init failed, aborting\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + LOAD_ERROR_EXIT(bp, load_error2); + } + } + + bnx2x_pre_irq_nic_init(bp); + + /* Connect to IRQs */ + rc = bnx2x_setup_irqs(bp); + if (rc) { + BNX2X_ERR("setup irqs failed\n"); + if (IS_PF(bp)) + bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + LOAD_ERROR_EXIT(bp, load_error2); + } + + /* Init per-function objects */ + if (IS_PF(bp)) { + /* Setup NIC internals and enable interrupts */ + bnx2x_post_irq_nic_init(bp, load_code); + + bnx2x_init_bp_objs(bp); + bnx2x_iov_nic_init(bp); + + /* Set AFEX default VLAN tag to an invalid value */ + bp->afex_def_vlan_tag = -1; + bnx2x_nic_load_afex_dcc(bp, load_code); + bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; + rc = bnx2x_func_start(bp); + if (rc) { + BNX2X_ERR("Function start failed!\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + + LOAD_ERROR_EXIT(bp, load_error3); + } + + /* Send LOAD_DONE command to MCP */ + if (!BP_NOMCP(bp)) { + load_code = bnx2x_fw_command(bp, + DRV_MSG_CODE_LOAD_DONE, 0); + if (!load_code) { + BNX2X_ERR("MCP response failure, aborting\n"); + rc = -EBUSY; + LOAD_ERROR_EXIT(bp, load_error3); + } + } + + /* initialize FW coalescing state machines in RAM */ + bnx2x_update_coalesce(bp); + } + + /* setup the leading queue */ + rc = bnx2x_setup_leading(bp); + if (rc) { + BNX2X_ERR("Setup leading failed!\n"); + LOAD_ERROR_EXIT(bp, load_error3); + } + + /* set up the rest of the queues */ + for_each_nondefault_eth_queue(bp, i) { + if (IS_PF(bp)) + rc = bnx2x_setup_queue(bp, &bp->fp[i], false); + else /* VF */ + rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false); + if (rc) { + BNX2X_ERR("Queue %d setup failed\n", i); + LOAD_ERROR_EXIT(bp, load_error3); + } + } + + /* setup rss */ + rc = bnx2x_init_rss(bp); + if (rc) { + BNX2X_ERR("PF RSS init failed\n"); + LOAD_ERROR_EXIT(bp, load_error3); + } + + /* Now when Clients are configured we are ready to work */ + bp->state = BNX2X_STATE_OPEN; + + /* Configure a ucast MAC */ + if (IS_PF(bp)) + rc = bnx2x_set_eth_mac(bp, true); + else /* vf */ + rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, + true); + if (rc) { + BNX2X_ERR("Setting Ethernet MAC failed\n"); + LOAD_ERROR_EXIT(bp, load_error3); + } + + if (IS_PF(bp) && bp->pending_max) { + bnx2x_update_max_mf_config(bp, bp->pending_max); + bp->pending_max = 0; + } + + if (bp->port.pmf) { + rc = bnx2x_initial_phy_init(bp, load_mode); + if (rc) + LOAD_ERROR_EXIT(bp, load_error3); + } + bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN; + + /* Start fast path */ + + /* Initialize Rx filter. */ + bnx2x_set_rx_mode_inner(bp); + + if (bp->flags & PTP_SUPPORTED) { + bnx2x_init_ptp(bp); + bnx2x_configure_ptp_filters(bp); + } + /* Start Tx */ + switch (load_mode) { + case LOAD_NORMAL: + /* Tx queue should be only re-enabled */ + netif_tx_wake_all_queues(bp->dev); + break; + + case LOAD_OPEN: + netif_tx_start_all_queues(bp->dev); + smp_mb__after_atomic(); + break; + + case LOAD_DIAG: + case LOAD_LOOPBACK_EXT: + bp->state = BNX2X_STATE_DIAG; + break; + + default: + break; + } + + if (bp->port.pmf) + bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0); + else + bnx2x__link_status_update(bp); + + /* start the timer */ + mod_timer(&bp->timer, jiffies + bp->current_interval); + + if (CNIC_ENABLED(bp)) + bnx2x_load_cnic(bp); + + if (IS_PF(bp)) + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); + + if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { + /* mark driver is loaded in shmem2 */ + u32 val; + val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); + SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], + val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | + DRV_FLAGS_CAPABILITIES_LOADED_L2); + } + + /* Wait for all pending SP commands to complete */ + if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) { + BNX2X_ERR("Timeout waiting for SP elements to complete\n"); + bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); + return -EBUSY; + } + + /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ + if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) + bnx2x_dcbx_init(bp, false); + + DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n"); + + return 0; + +#ifndef BNX2X_STOP_ON_ERROR +load_error3: + if (IS_PF(bp)) { + bnx2x_int_disable_sync(bp, 1); + + /* Clean queueable objects */ + bnx2x_squeeze_objects(bp); + } + + /* Free SKBs, SGEs, TPA pool and driver internals */ + bnx2x_free_skbs(bp); + for_each_rx_queue(bp, i) + bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); + + /* Release IRQs */ + bnx2x_free_irq(bp); +load_error2: + if (IS_PF(bp) && !BP_NOMCP(bp)) { + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); + } + + bp->port.pmf = 0; +load_error1: + bnx2x_napi_disable(bp); + bnx2x_del_all_napi(bp); + + /* clear pf_load status, as it was already set */ + if (IS_PF(bp)) + bnx2x_clear_pf_load(bp); +load_error0: + bnx2x_free_fw_stats_mem(bp); + bnx2x_free_fp_mem(bp); + bnx2x_free_mem(bp); + + return rc; +#endif /* ! BNX2X_STOP_ON_ERROR */ +} + +int bnx2x_drain_tx_queues(struct bnx2x *bp) +{ + u8 rc = 0, cos, i; + + /* Wait until tx fastpath tasks complete */ + for_each_tx_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + for_each_cos_in_tx_queue(fp, cos) + rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); + if (rc) + return rc; + } + return 0; +} + +/* must be called with rtnl_lock */ +int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) +{ + int i; + bool global = false; + + DP(NETIF_MSG_IFUP, "Starting NIC unload\n"); + + /* mark driver is unloaded in shmem2 */ + if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { + u32 val; + val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); + SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], + val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); + } + + if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE && + (bp->state == BNX2X_STATE_CLOSED || + bp->state == BNX2X_STATE_ERROR)) { + /* We can get here if the driver has been unloaded + * during parity error recovery and is either waiting for a + * leader to complete or for other functions to unload and + * then ifdown has been issued. In this case we want to + * unload and let other functions to complete a recovery + * process. + */ + bp->recovery_state = BNX2X_RECOVERY_DONE; + bp->is_leader = 0; + bnx2x_release_leader_lock(bp); + smp_mb(); + + DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n"); + BNX2X_ERR("Can't unload in closed or error state\n"); + return -EINVAL; + } + + /* Nothing to do during unload if previous bnx2x_nic_load() + * have not completed successfully - all resources are released. + * + * we can get here only after unsuccessful ndo_* callback, during which + * dev->IFF_UP flag is still on. + */ + if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR) + return 0; + + /* It's important to set the bp->state to the value different from + * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int() + * may restart the Tx from the NAPI context (see bnx2x_tx_int()). + */ + bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; + smp_mb(); + + /* indicate to VFs that the PF is going down */ + bnx2x_iov_channel_down(bp); + + if (CNIC_LOADED(bp)) + bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); + + /* Stop Tx */ + bnx2x_tx_disable(bp); + netdev_reset_tc(bp->dev); + + bp->rx_mode = BNX2X_RX_MODE_NONE; + + del_timer_sync(&bp->timer); + + if (IS_PF(bp)) { + /* Set ALWAYS_ALIVE bit in shmem */ + bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; + bnx2x_drv_pulse(bp); + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_save_statistics(bp); + } + + /* wait till consumers catch up with producers in all queues */ + bnx2x_drain_tx_queues(bp); + + /* if VF indicate to PF this function is going down (PF will delete sp + * elements and clear initializations + */ + if (IS_VF(bp)) + bnx2x_vfpf_close_vf(bp); + else if (unload_mode != UNLOAD_RECOVERY) + /* if this is a normal/close unload need to clean up chip*/ + bnx2x_chip_cleanup(bp, unload_mode, keep_link); + else { + /* Send the UNLOAD_REQUEST to the MCP */ + bnx2x_send_unload_req(bp, unload_mode); + + /* Prevent transactions to host from the functions on the + * engine that doesn't reset global blocks in case of global + * attention once global blocks are reset and gates are opened + * (the engine which leader will perform the recovery + * last). + */ + if (!CHIP_IS_E1x(bp)) + bnx2x_pf_disable(bp); + + /* Disable HW interrupts, NAPI */ + bnx2x_netif_stop(bp, 1); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); + if (CNIC_LOADED(bp)) + bnx2x_del_all_napi_cnic(bp); + /* Release IRQs */ + bnx2x_free_irq(bp); + + /* Report UNLOAD_DONE to MCP */ + bnx2x_send_unload_done(bp, false); + } + + /* + * At this stage no more interrupts will arrive so we may safely clean + * the queueable objects here in case they failed to get cleaned so far. + */ + if (IS_PF(bp)) + bnx2x_squeeze_objects(bp); + + /* There should be no more pending SP commands at this stage */ + bp->sp_state = 0; + + bp->port.pmf = 0; + + /* clear pending work in rtnl task */ + bp->sp_rtnl_state = 0; + smp_mb(); + + /* Free SKBs, SGEs, TPA pool and driver internals */ + bnx2x_free_skbs(bp); + if (CNIC_LOADED(bp)) + bnx2x_free_skbs_cnic(bp); + for_each_rx_queue(bp, i) + bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); + + bnx2x_free_fp_mem(bp); + if (CNIC_LOADED(bp)) + bnx2x_free_fp_mem_cnic(bp); + + if (IS_PF(bp)) { + if (CNIC_LOADED(bp)) + bnx2x_free_mem_cnic(bp); + } + bnx2x_free_mem(bp); + + bp->state = BNX2X_STATE_CLOSED; + bp->cnic_loaded = false; + + /* Clear driver version indication in shmem */ + if (IS_PF(bp)) + bnx2x_update_mng_version(bp); + + /* Check if there are pending parity attentions. If there are - set + * RECOVERY_IN_PROGRESS. + */ + if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) { + bnx2x_set_reset_in_progress(bp); + + /* Set RESET_IS_GLOBAL if needed */ + if (global) + bnx2x_set_reset_global(bp); + } + + /* The last driver must disable a "close the gate" if there is no + * parity attention or "process kill" pending. + */ + if (IS_PF(bp) && + !bnx2x_clear_pf_load(bp) && + bnx2x_reset_is_done(bp, BP_PATH(bp))) + bnx2x_disable_close_the_gate(bp); + + DP(NETIF_MSG_IFUP, "Ending NIC unload\n"); + + return 0; +} + +int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) +{ + u16 pmcsr; + + /* If there is no power capability, silently succeed */ + if (!bp->pdev->pm_cap) { + BNX2X_DEV_INFO("No power capability. Breaking.\n"); + return 0; + } + + pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr); + + switch (state) { + case PCI_D0: + pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, + ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | + PCI_PM_CTRL_PME_STATUS)); + + if (pmcsr & PCI_PM_CTRL_STATE_MASK) + /* delay required during transition out of D3hot */ + msleep(20); + break; + + case PCI_D3hot: + /* If there are other clients above don't + shut down the power */ + if (atomic_read(&bp->pdev->enable_cnt) != 1) + return 0; + /* Don't shut down the power for emulation and FPGA */ + if (CHIP_REV_IS_SLOW(bp)) + return 0; + + pmcsr &= ~PCI_PM_CTRL_STATE_MASK; + pmcsr |= 3; + + if (bp->wol) + pmcsr |= PCI_PM_CTRL_PME_ENABLE; + + pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, + pmcsr); + + /* No more memory access after this point until + * device is brought back to D0. + */ + break; + + default: + dev_err(&bp->pdev->dev, "Can't support state = %d\n", state); + return -EINVAL; + } + return 0; +} + +/* + * net_device service functions + */ +static int bnx2x_poll(struct napi_struct *napi, int budget) +{ + int work_done = 0; + u8 cos; + struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, + napi); + struct bnx2x *bp = fp->bp; + + while (1) { +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) { + napi_complete(napi); + return 0; + } +#endif + if (!bnx2x_fp_lock_napi(fp)) + return budget; + + for_each_cos_in_tx_queue(fp, cos) + if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) + bnx2x_tx_int(bp, fp->txdata_ptr[cos]); + + if (bnx2x_has_rx_work(fp)) { + work_done += bnx2x_rx_int(fp, budget - work_done); + + /* must not complete if we consumed full budget */ + if (work_done >= budget) { + bnx2x_fp_unlock_napi(fp); + break; + } + } + + bnx2x_fp_unlock_napi(fp); + + /* Fall out from the NAPI loop if needed */ + if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { + + /* No need to update SB for FCoE L2 ring as long as + * it's connected to the default SB and the SB + * has been updated when NAPI was scheduled. + */ + if (IS_FCOE_FP(fp)) { + napi_complete(napi); + break; + } + bnx2x_update_fpsb_idx(fp); + /* bnx2x_has_rx_work() reads the status block, + * thus we need to ensure that status block indices + * have been actually read (bnx2x_update_fpsb_idx) + * prior to this check (bnx2x_has_rx_work) so that + * we won't write the "newer" value of the status block + * to IGU (if there was a DMA right after + * bnx2x_has_rx_work and if there is no rmb, the memory + * reading (bnx2x_update_fpsb_idx) may be postponed + * to right before bnx2x_ack_sb). In this case there + * will never be another interrupt until there is + * another update of the status block, while there + * is still unhandled work. + */ + rmb(); + + if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { + napi_complete(napi); + /* Re-enable interrupts */ + DP(NETIF_MSG_RX_STATUS, + "Update index to %d\n", fp->fp_hc_idx); + bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, + le16_to_cpu(fp->fp_hc_idx), + IGU_INT_ENABLE, 1); + break; + } + } + } + + return work_done; +} + +#ifdef CONFIG_NET_RX_BUSY_POLL +/* must be called with local_bh_disable()d */ +int bnx2x_low_latency_recv(struct napi_struct *napi) +{ + struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, + napi); + struct bnx2x *bp = fp->bp; + int found = 0; + + if ((bp->state == BNX2X_STATE_CLOSED) || + (bp->state == BNX2X_STATE_ERROR) || + (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO))) + return LL_FLUSH_FAILED; + + if (!bnx2x_fp_lock_poll(fp)) + return LL_FLUSH_BUSY; + + if (bnx2x_has_rx_work(fp)) + found = bnx2x_rx_int(fp, 4); + + bnx2x_fp_unlock_poll(fp); + + return found; +} +#endif + +/* we split the first BD into headers and data BDs + * to ease the pain of our fellow microcode engineers + * we use one mapping for both BDs + */ +static u16 bnx2x_tx_split(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata, + struct sw_tx_bd *tx_buf, + struct eth_tx_start_bd **tx_bd, u16 hlen, + u16 bd_prod) +{ + struct eth_tx_start_bd *h_tx_bd = *tx_bd; + struct eth_tx_bd *d_tx_bd; + dma_addr_t mapping; + int old_len = le16_to_cpu(h_tx_bd->nbytes); + + /* first fix first BD */ + h_tx_bd->nbytes = cpu_to_le16(hlen); + + DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n", + h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo); + + /* now get a new data BD + * (after the pbd) and fill it */ + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; + + mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), + le32_to_cpu(h_tx_bd->addr_lo)) + hlen; + + d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + d_tx_bd->nbytes = cpu_to_le16(old_len - hlen); + + /* this marks the BD as one that has no individual mapping */ + tx_buf->flags |= BNX2X_TSO_SPLIT_BD; + + DP(NETIF_MSG_TX_QUEUED, + "TSO split data size is %d (%x:%x)\n", + d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo); + + /* update tx_bd */ + *tx_bd = (struct eth_tx_start_bd *)d_tx_bd; + + return bd_prod; +} + +#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32))) +#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16))) +static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) +{ + __sum16 tsum = (__force __sum16) csum; + + if (fix > 0) + tsum = ~csum_fold(csum_sub((__force __wsum) csum, + csum_partial(t_header - fix, fix, 0))); + + else if (fix < 0) + tsum = ~csum_fold(csum_add((__force __wsum) csum, + csum_partial(t_header, -fix, 0))); + + return bswab16(tsum); +} + +static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) +{ + u32 rc; + __u8 prot = 0; + __be16 protocol; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return XMIT_PLAIN; + + protocol = vlan_get_protocol(skb); + if (protocol == htons(ETH_P_IPV6)) { + rc = XMIT_CSUM_V6; + prot = ipv6_hdr(skb)->nexthdr; + } else { + rc = XMIT_CSUM_V4; + prot = ip_hdr(skb)->protocol; + } + + if (!CHIP_IS_E1x(bp) && skb->encapsulation) { + if (inner_ip_hdr(skb)->version == 6) { + rc |= XMIT_CSUM_ENC_V6; + if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + rc |= XMIT_CSUM_TCP; + } else { + rc |= XMIT_CSUM_ENC_V4; + if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP) + rc |= XMIT_CSUM_TCP; + } + } + if (prot == IPPROTO_TCP) + rc |= XMIT_CSUM_TCP; + + if (skb_is_gso(skb)) { + if (skb_is_gso_v6(skb)) { + rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP); + if (rc & XMIT_CSUM_ENC) + rc |= XMIT_GSO_ENC_V6; + } else { + rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP); + if (rc & XMIT_CSUM_ENC) + rc |= XMIT_GSO_ENC_V4; + } + } + + return rc; +} + +#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) +/* check if packet requires linearization (packet is too fragmented) + no need to check fragmentation if page size > 8K (there will be no + violation to FW restrictions) */ +static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, + u32 xmit_type) +{ + int to_copy = 0; + int hlen = 0; + int first_bd_sz = 0; + + /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */ + if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) { + + if (xmit_type & XMIT_GSO) { + unsigned short lso_mss = skb_shinfo(skb)->gso_size; + /* Check if LSO packet needs to be copied: + 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ + int wnd_size = MAX_FETCH_BD - 3; + /* Number of windows to check */ + int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; + int wnd_idx = 0; + int frag_idx = 0; + u32 wnd_sum = 0; + + /* Headers length */ + hlen = (int)(skb_transport_header(skb) - skb->data) + + tcp_hdrlen(skb); + + /* Amount of data (w/o headers) on linear part of SKB*/ + first_bd_sz = skb_headlen(skb) - hlen; + + wnd_sum = first_bd_sz; + + /* Calculate the first sum - it's special */ + for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++) + wnd_sum += + skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]); + + /* If there was data on linear skb data - check it */ + if (first_bd_sz > 0) { + if (unlikely(wnd_sum < lso_mss)) { + to_copy = 1; + goto exit_lbl; + } + + wnd_sum -= first_bd_sz; + } + + /* Others are easier: run through the frag list and + check all windows */ + for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) { + wnd_sum += + skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]); + + if (unlikely(wnd_sum < lso_mss)) { + to_copy = 1; + break; + } + wnd_sum -= + skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]); + } + } else { + /* in non-LSO too fragmented packet should always + be linearized */ + to_copy = 1; + } + } + +exit_lbl: + if (unlikely(to_copy)) + DP(NETIF_MSG_TX_QUEUED, + "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n", + (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO", + skb_shinfo(skb)->nr_frags, hlen, first_bd_sz); + + return to_copy; +} +#endif + +/** + * bnx2x_set_pbd_gso - update PBD in GSO case. + * + * @skb: packet skb + * @pbd: parse BD + * @xmit_type: xmit flags + */ +static void bnx2x_set_pbd_gso(struct sk_buff *skb, + struct eth_tx_parse_bd_e1x *pbd, + u32 xmit_type) +{ + pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); + pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq); + pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb)); + + if (xmit_type & XMIT_GSO_V4) { + pbd->ip_id = bswab16(ip_hdr(skb)->id); + pbd->tcp_pseudo_csum = + bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0)); + } else { + pbd->tcp_pseudo_csum = + bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0)); + } + + pbd->global_data |= + cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); +} + +/** + * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length + * + * @bp: driver handle + * @skb: packet skb + * @parsing_data: data to be updated + * @xmit_type: xmit flags + * + * 57712/578xx related, when skb has encapsulation + */ +static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb, + u32 *parsing_data, u32 xmit_type) +{ + *parsing_data |= + ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) << + ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & + ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W; + + if (xmit_type & XMIT_CSUM_TCP) { + *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) << + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; + + return skb_inner_transport_header(skb) + + inner_tcp_hdrlen(skb) - skb->data; + } + + /* We support checksum offload for TCP and UDP only. + * No need to pass the UDP header length - it's a constant. + */ + return skb_inner_transport_header(skb) + + sizeof(struct udphdr) - skb->data; +} + +/** + * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length + * + * @bp: driver handle + * @skb: packet skb + * @parsing_data: data to be updated + * @xmit_type: xmit flags + * + * 57712/578xx related + */ +static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, + u32 *parsing_data, u32 xmit_type) +{ + *parsing_data |= + ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << + ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & + ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W; + + if (xmit_type & XMIT_CSUM_TCP) { + *parsing_data |= ((tcp_hdrlen(skb) / 4) << + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; + + return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data; + } + /* We support checksum offload for TCP and UDP only. + * No need to pass the UDP header length - it's a constant. + */ + return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data; +} + +/* set FW indication according to inner or outer protocols if tunneled */ +static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, + struct eth_tx_start_bd *tx_start_bd, + u32 xmit_type) +{ + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; + + if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6)) + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6; + + if (!(xmit_type & XMIT_CSUM_TCP)) + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; +} + +/** + * bnx2x_set_pbd_csum - update PBD with checksum and return header length + * + * @bp: driver handle + * @skb: packet skb + * @pbd: parse BD to be updated + * @xmit_type: xmit flags + */ +static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, + struct eth_tx_parse_bd_e1x *pbd, + u32 xmit_type) +{ + u8 hlen = (skb_network_header(skb) - skb->data) >> 1; + + /* for now NS flag is not used in Linux */ + pbd->global_data = + cpu_to_le16(hlen | + ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << + ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); + + pbd->ip_hlen_w = (skb_transport_header(skb) - + skb_network_header(skb)) >> 1; + + hlen += pbd->ip_hlen_w; + + /* We support checksum offload for TCP and UDP only */ + if (xmit_type & XMIT_CSUM_TCP) + hlen += tcp_hdrlen(skb) / 2; + else + hlen += sizeof(struct udphdr) / 2; + + pbd->total_hlen_w = cpu_to_le16(hlen); + hlen = hlen*2; + + if (xmit_type & XMIT_CSUM_TCP) { + pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check); + + } else { + s8 fix = SKB_CS_OFF(skb); /* signed! */ + + DP(NETIF_MSG_TX_QUEUED, + "hlen %d fix %d csum before fix %x\n", + le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb)); + + /* HW bug: fixup the CSUM */ + pbd->tcp_pseudo_csum = + bnx2x_csum_fix(skb_transport_header(skb), + SKB_CS(skb), fix); + + DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n", + pbd->tcp_pseudo_csum); + } + + return hlen; +} + +static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, + struct eth_tx_parse_bd_e2 *pbd_e2, + struct eth_tx_parse_2nd_bd *pbd2, + u16 *global_data, + u32 xmit_type) +{ + u16 hlen_w = 0; + u8 outerip_off, outerip_len = 0; + + /* from outer IP to transport */ + hlen_w = (skb_inner_transport_header(skb) - + skb_network_header(skb)) >> 1; + + /* transport len */ + hlen_w += inner_tcp_hdrlen(skb) >> 1; + + pbd2->fw_ip_hdr_to_payload_w = hlen_w; + + /* outer IP header info */ + if (xmit_type & XMIT_CSUM_V4) { + struct iphdr *iph = ip_hdr(skb); + u32 csum = (__force u32)(~iph->check) - + (__force u32)iph->tot_len - + (__force u32)iph->frag_off; + + outerip_len = iph->ihl << 1; + + pbd2->fw_ip_csum_wo_len_flags_frag = + bswab16(csum_fold((__force __wsum)csum)); + } else { + pbd2->fw_ip_hdr_to_payload_w = + hlen_w - ((sizeof(struct ipv6hdr)) >> 1); + pbd_e2->data.tunnel_data.flags |= + ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER; + } + + pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq); + + pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb)); + + /* inner IP header info */ + if (xmit_type & XMIT_CSUM_ENC_V4) { + pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id); + + pbd_e2->data.tunnel_data.pseudo_csum = + bswab16(~csum_tcpudp_magic( + inner_ip_hdr(skb)->saddr, + inner_ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0)); + } else { + pbd_e2->data.tunnel_data.pseudo_csum = + bswab16(~csum_ipv6_magic( + &inner_ipv6_hdr(skb)->saddr, + &inner_ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0)); + } + + outerip_off = (skb_network_header(skb) - skb->data) >> 1; + + *global_data |= + outerip_off | + (outerip_len << + ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) | + ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << + ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT); + + if (ip_hdr(skb)->protocol == IPPROTO_UDP) { + SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1); + pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1; + } +} + +static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data, + u32 xmit_type) +{ + struct ipv6hdr *ipv6; + + if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6))) + return; + + if (xmit_type & XMIT_GSO_ENC_V6) + ipv6 = inner_ipv6_hdr(skb); + else /* XMIT_GSO_V6 */ + ipv6 = ipv6_hdr(skb); + + if (ipv6->nexthdr == NEXTHDR_IPV6) + *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; +} + +/* called with netif_tx_lock + * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call + * netif_wake_queue() + */ +netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + struct netdev_queue *txq; + struct bnx2x_fp_txdata *txdata; + struct sw_tx_bd *tx_buf; + struct eth_tx_start_bd *tx_start_bd, *first_bd; + struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; + struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; + struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; + struct eth_tx_parse_2nd_bd *pbd2 = NULL; + u32 pbd_e2_parsing_data = 0; + u16 pkt_prod, bd_prod; + int nbd, txq_index; + dma_addr_t mapping; + u32 xmit_type = bnx2x_xmit_type(bp, skb); + int i; + u8 hlen = 0; + __le16 pkt_size = 0; + struct ethhdr *eth; + u8 mac_type = UNICAST_ADDRESS; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return NETDEV_TX_BUSY; +#endif + + txq_index = skb_get_queue_mapping(skb); + txq = netdev_get_tx_queue(dev, txq_index); + + BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0)); + + txdata = &bp->bnx2x_txq[txq_index]; + + /* enable this debug print to view the transmission queue being used + DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n", + txq_index, fp_index, txdata_index); */ + + /* enable this debug print to view the transmission details + DP(NETIF_MSG_TX_QUEUED, + "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n", + txdata->cid, fp_index, txdata_index, txdata, fp); */ + + if (unlikely(bnx2x_tx_avail(bp, txdata) < + skb_shinfo(skb)->nr_frags + + BDS_PER_TX_PKT + + NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) { + /* Handle special storage cases separately */ + if (txdata->tx_ring_size == 0) { + struct bnx2x_eth_q_stats *q_stats = + bnx2x_fp_qstats(bp, txdata->parent_fp); + q_stats->driver_filtered_tx_pkt++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; + netif_tx_stop_queue(txq); + BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); + + return NETDEV_TX_BUSY; + } + + DP(NETIF_MSG_TX_QUEUED, + "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n", + txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, + ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type, + skb->len); + + eth = (struct ethhdr *)skb->data; + + /* set flag according to packet type (UNICAST_ADDRESS is default)*/ + if (unlikely(is_multicast_ether_addr(eth->h_dest))) { + if (is_broadcast_ether_addr(eth->h_dest)) + mac_type = BROADCAST_ADDRESS; + else + mac_type = MULTICAST_ADDRESS; + } + +#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT) + /* First, check if we need to linearize the skb (due to FW + restrictions). No need to check fragmentation if page size > 8K + (there will be no violation to FW restrictions) */ + if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { + /* Statistics of linearization */ + bp->lin_cnt++; + if (skb_linearize(skb) != 0) { + DP(NETIF_MSG_TX_QUEUED, + "SKB linearization failed - silently dropping this SKB\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } +#endif + /* Map skb linear data for DMA */ + mapping = dma_map_single(&bp->pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + DP(NETIF_MSG_TX_QUEUED, + "SKB mapping failed - silently dropping this SKB\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + /* + Please read carefully. First we use one BD which we mark as start, + then we have a parsing info BD (used for TSO or xsum), + and only then we have the rest of the TSO BDs. + (don't forget to mark the last one as last, + and to unmap only AFTER you write to the BD ...) + And above all, all pdb sizes are in words - NOT DWORDS! + */ + + /* get current pkt produced now - advance it just before sending packet + * since mapping of pages may fail and cause packet to be dropped + */ + pkt_prod = txdata->tx_pkt_prod; + bd_prod = TX_BD(txdata->tx_bd_prod); + + /* get a tx_buf and first BD + * tx_start_bd may be changed during SPLIT, + * but first_bd will always stay first + */ + tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)]; + tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd; + first_bd = tx_start_bd; + + tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + if (!(bp->flags & TX_TIMESTAMPING_EN)) { + BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n"); + } else if (bp->ptp_tx_skb) { + BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n"); + } else { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + /* schedule check for Tx timestamp */ + bp->ptp_tx_skb = skb_get(skb); + bp->ptp_tx_start = jiffies; + schedule_work(&bp->ptp_task); + } + } + + /* header nbd: indirectly zero other flags! */ + tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT; + + /* remember the first BD of the packet */ + tx_buf->first_bd = txdata->tx_bd_prod; + tx_buf->skb = skb; + tx_buf->flags = 0; + + DP(NETIF_MSG_TX_QUEUED, + "sending pkt %u @%p next_idx %u bd %u @%p\n", + pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd); + + if (skb_vlan_tag_present(skb)) { + tx_start_bd->vlan_or_ethertype = + cpu_to_le16(skb_vlan_tag_get(skb)); + tx_start_bd->bd_flags.as_bitfield |= + (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); + } else { + /* when transmitting in a vf, start bd must hold the ethertype + * for fw to enforce it + */ +#ifndef BNX2X_STOP_ON_ERROR + if (IS_VF(bp)) +#endif + tx_start_bd->vlan_or_ethertype = + cpu_to_le16(ntohs(eth->h_proto)); +#ifndef BNX2X_STOP_ON_ERROR + else + /* used by FW for packet accounting */ + tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); +#endif + } + + nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ + + /* turn on parsing and get a BD */ + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + + if (xmit_type & XMIT_CSUM) + bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type); + + if (!CHIP_IS_E1x(bp)) { + pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; + memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); + + if (xmit_type & XMIT_CSUM_ENC) { + u16 global_data = 0; + + /* Set PBD in enc checksum offload case */ + hlen = bnx2x_set_pbd_csum_enc(bp, skb, + &pbd_e2_parsing_data, + xmit_type); + + /* turn on 2nd parsing and get a BD */ + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + + pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd; + + memset(pbd2, 0, sizeof(*pbd2)); + + pbd_e2->data.tunnel_data.ip_hdr_start_inner_w = + (skb_inner_network_header(skb) - + skb->data) >> 1; + + if (xmit_type & XMIT_GSO_ENC) + bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2, + &global_data, + xmit_type); + + pbd2->global_data = cpu_to_le16(global_data); + + /* add addition parse BD indication to start BD */ + SET_FLAG(tx_start_bd->general_data, + ETH_TX_START_BD_PARSE_NBDS, 1); + /* set encapsulation flag in start BD */ + SET_FLAG(tx_start_bd->general_data, + ETH_TX_START_BD_TUNNEL_EXIST, 1); + + tx_buf->flags |= BNX2X_HAS_SECOND_PBD; + + nbd++; + } else if (xmit_type & XMIT_CSUM) { + /* Set PBD in checksum offload case w/o encapsulation */ + hlen = bnx2x_set_pbd_csum_e2(bp, skb, + &pbd_e2_parsing_data, + xmit_type); + } + + bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type); + /* Add the macs to the parsing BD if this is a vf or if + * Tx Switching is enabled. + */ + if (IS_VF(bp)) { + /* override GRE parameters in BD */ + bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, + &pbd_e2->data.mac_addr.src_mid, + &pbd_e2->data.mac_addr.src_lo, + eth->h_source); + + bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, + &pbd_e2->data.mac_addr.dst_mid, + &pbd_e2->data.mac_addr.dst_lo, + eth->h_dest); + } else { + if (bp->flags & TX_SWITCHING) + bnx2x_set_fw_mac_addr( + &pbd_e2->data.mac_addr.dst_hi, + &pbd_e2->data.mac_addr.dst_mid, + &pbd_e2->data.mac_addr.dst_lo, + eth->h_dest); +#ifdef BNX2X_STOP_ON_ERROR + /* Enforce security is always set in Stop on Error - + * source mac should be present in the parsing BD + */ + bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, + &pbd_e2->data.mac_addr.src_mid, + &pbd_e2->data.mac_addr.src_lo, + eth->h_source); +#endif + } + + SET_FLAG(pbd_e2_parsing_data, + ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type); + } else { + u16 global_data = 0; + pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; + memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); + /* Set PBD in checksum offload case */ + if (xmit_type & XMIT_CSUM) + hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type); + + SET_FLAG(global_data, + ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); + pbd_e1x->global_data |= cpu_to_le16(global_data); + } + + /* Setup the data pointer of the first BD of the packet */ + tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); + pkt_size = tx_start_bd->nbytes; + + DP(NETIF_MSG_TX_QUEUED, + "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n", + tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, + le16_to_cpu(tx_start_bd->nbytes), + tx_start_bd->bd_flags.as_bitfield, + le16_to_cpu(tx_start_bd->vlan_or_ethertype)); + + if (xmit_type & XMIT_GSO) { + + DP(NETIF_MSG_TX_QUEUED, + "TSO packet len %d hlen %d total len %d tso size %d\n", + skb->len, hlen, skb_headlen(skb), + skb_shinfo(skb)->gso_size); + + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; + + if (unlikely(skb_headlen(skb) > hlen)) { + nbd++; + bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, + &tx_start_bd, hlen, + bd_prod); + } + if (!CHIP_IS_E1x(bp)) + pbd_e2_parsing_data |= + (skb_shinfo(skb)->gso_size << + ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & + ETH_TX_PARSE_BD_E2_LSO_MSS; + else + bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type); + } + + /* Set the PBD's parsing_data field if not zero + * (for the chips newer than 57711). + */ + if (pbd_e2_parsing_data) + pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data); + + tx_data_bd = (struct eth_tx_bd *)tx_start_bd; + + /* Handle fragmented skb */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + unsigned int pkts_compl = 0, bytes_compl = 0; + + DP(NETIF_MSG_TX_QUEUED, + "Unable to map page - dropping packet...\n"); + + /* we need unmap all buffers already mapped + * for this SKB; + * first_bd->nbd need to be properly updated + * before call to bnx2x_free_tx_pkt + */ + first_bd->nbd = cpu_to_le16(nbd); + bnx2x_free_tx_pkt(bp, txdata, + TX_BD(txdata->tx_pkt_prod), + &pkts_compl, &bytes_compl); + return NETDEV_TX_OK; + } + + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; + if (total_pkt_bd == NULL) + total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; + + tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag)); + le16_add_cpu(&pkt_size, skb_frag_size(frag)); + nbd++; + + DP(NETIF_MSG_TX_QUEUED, + "frag %d bd @%p addr (%x:%x) nbytes %d\n", + i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo, + le16_to_cpu(tx_data_bd->nbytes)); + } + + DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd); + + /* update with actual num BDs */ + first_bd->nbd = cpu_to_le16(nbd); + + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + + /* now send a tx doorbell, counting the next BD + * if the packet contains or ends with it + */ + if (TX_BD_POFF(bd_prod) < nbd) + nbd++; + + /* total_pkt_bytes should be set on the first data BD if + * it's not an LSO packet and there is more than one + * data BD. In this case pkt_size is limited by an MTU value. + * However we prefer to set it for an LSO packet (while we don't + * have to) in order to save some CPU cycles in a none-LSO + * case, when we much more care about them. + */ + if (total_pkt_bd != NULL) + total_pkt_bd->total_pkt_bytes = pkt_size; + + if (pbd_e1x) + DP(NETIF_MSG_TX_QUEUED, + "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n", + pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w, + pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags, + pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq, + le16_to_cpu(pbd_e1x->total_hlen_w)); + if (pbd_e2) + DP(NETIF_MSG_TX_QUEUED, + "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n", + pbd_e2, + pbd_e2->data.mac_addr.dst_hi, + pbd_e2->data.mac_addr.dst_mid, + pbd_e2->data.mac_addr.dst_lo, + pbd_e2->data.mac_addr.src_hi, + pbd_e2->data.mac_addr.src_mid, + pbd_e2->data.mac_addr.src_lo, + pbd_e2->parsing_data); + DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); + + netdev_tx_sent_queue(txq, skb->len); + + skb_tx_timestamp(skb); + + txdata->tx_pkt_prod++; + /* + * Make sure that the BD data is updated before updating the producer + * since FW might read the BD right after the producer is updated. + * This is only applicable for weak-ordered memory model archs such + * as IA-64. The following barrier is also mandatory since FW will + * assumes packets must have BDs. + */ + wmb(); + + txdata->tx_db.data.prod += nbd; + barrier(); + + DOORBELL(bp, txdata->cid, txdata->tx_db.raw); + + mmiowb(); + + txdata->tx_bd_prod += nbd; + + if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) { + netif_tx_stop_queue(txq); + + /* paired memory barrier is in bnx2x_tx_int(), we have to keep + * ordering of set_bit() in netif_tx_stop_queue() and read of + * fp->bd_tx_cons */ + smp_mb(); + + bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; + if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT) + netif_tx_wake_queue(txq); + } + txdata->tx_pkt++; + + return NETDEV_TX_OK; +} + +/** + * bnx2x_setup_tc - routine to configure net_device for multi tc + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + * + * callback connected to the ndo_setup_tc function pointer + */ +int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) +{ + int cos, prio, count, offset; + struct bnx2x *bp = netdev_priv(dev); + + /* setup tc must be called under rtnl lock */ + ASSERT_RTNL(); + + /* no traffic classes requested. Aborting */ + if (!num_tc) { + netdev_reset_tc(dev); + return 0; + } + + /* requested to support too many traffic classes */ + if (num_tc > bp->max_cos) { + BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n", + num_tc, bp->max_cos); + return -EINVAL; + } + + /* declare amount of supported traffic classes */ + if (netdev_set_num_tc(dev, num_tc)) { + BNX2X_ERR("failed to declare %d traffic classes\n", num_tc); + return -EINVAL; + } + + /* configure priority to traffic class mapping */ + for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { + netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]); + DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, + "mapping priority %d to tc %d\n", + prio, bp->prio_to_cos[prio]); + } + + /* Use this configuration to differentiate tc0 from other COSes + This can be used for ets or pfc, and save the effort of setting + up a multio class queue disc or negotiating DCBX with a switch + netdev_set_prio_tc_map(dev, 0, 0); + DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0); + for (prio = 1; prio < 16; prio++) { + netdev_set_prio_tc_map(dev, prio, 1); + DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1); + } */ + + /* configure traffic class to transmission queue mapping */ + for (cos = 0; cos < bp->max_cos; cos++) { + count = BNX2X_NUM_ETH_QUEUES(bp); + offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp); + netdev_set_tc_queue(dev, cos, count, offset); + DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, + "mapping tc %d to offset %d count %d\n", + cos, offset, count); + } + + return 0; +} + +/* called with rtnl_lock */ +int bnx2x_change_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct bnx2x *bp = netdev_priv(dev); + int rc = 0; + + if (!is_valid_ether_addr(addr->sa_data)) { + BNX2X_ERR("Requested MAC address is not valid\n"); + return -EINVAL; + } + + if (IS_MF_STORAGE_ONLY(bp)) { + BNX2X_ERR("Can't change address on STORAGE ONLY function\n"); + return -EINVAL; + } + + if (netif_running(dev)) { + rc = bnx2x_set_eth_mac(bp, false); + if (rc) + return rc; + } + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + if (netif_running(dev)) + rc = bnx2x_set_eth_mac(bp, true); + + return rc; +} + +static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) +{ + union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk); + struct bnx2x_fastpath *fp = &bp->fp[fp_index]; + u8 cos; + + /* Common */ + + if (IS_FCOE_IDX(fp_index)) { + memset(sb, 0, sizeof(union host_hc_status_block)); + fp->status_blk_mapping = 0; + } else { + /* status blocks */ + if (!CHIP_IS_E1x(bp)) + BNX2X_PCI_FREE(sb->e2_sb, + bnx2x_fp(bp, fp_index, + status_blk_mapping), + sizeof(struct host_hc_status_block_e2)); + else + BNX2X_PCI_FREE(sb->e1x_sb, + bnx2x_fp(bp, fp_index, + status_blk_mapping), + sizeof(struct host_hc_status_block_e1x)); + } + + /* Rx */ + if (!skip_rx_queue(bp, fp_index)) { + bnx2x_free_rx_bds(fp); + + /* fastpath rx rings: rx_buf rx_desc rx_comp */ + BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring)); + BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring), + bnx2x_fp(bp, fp_index, rx_desc_mapping), + sizeof(struct eth_rx_bd) * NUM_RX_BD); + + BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring), + bnx2x_fp(bp, fp_index, rx_comp_mapping), + sizeof(struct eth_fast_path_rx_cqe) * + NUM_RCQ_BD); + + /* SGE ring */ + BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring)); + BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring), + bnx2x_fp(bp, fp_index, rx_sge_mapping), + BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); + } + + /* Tx */ + if (!skip_tx_queue(bp, fp_index)) { + /* fastpath tx rings: tx_buf tx_desc */ + for_each_cos_in_tx_queue(fp, cos) { + struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; + + DP(NETIF_MSG_IFDOWN, + "freeing tx memory of fp %d cos %d cid %d\n", + fp_index, cos, txdata->cid); + + BNX2X_FREE(txdata->tx_buf_ring); + BNX2X_PCI_FREE(txdata->tx_desc_ring, + txdata->tx_desc_mapping, + sizeof(union eth_tx_bd_types) * NUM_TX_BD); + } + } + /* end of fastpath */ +} + +static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp) +{ + int i; + for_each_cnic_queue(bp, i) + bnx2x_free_fp_mem_at(bp, i); +} + +void bnx2x_free_fp_mem(struct bnx2x *bp) +{ + int i; + for_each_eth_queue(bp, i) + bnx2x_free_fp_mem_at(bp, i); +} + +static void set_sb_shortcuts(struct bnx2x *bp, int index) +{ + union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); + if (!CHIP_IS_E1x(bp)) { + bnx2x_fp(bp, index, sb_index_values) = + (__le16 *)status_blk.e2_sb->sb.index_values; + bnx2x_fp(bp, index, sb_running_index) = + (__le16 *)status_blk.e2_sb->sb.running_index; + } else { + bnx2x_fp(bp, index, sb_index_values) = + (__le16 *)status_blk.e1x_sb->sb.index_values; + bnx2x_fp(bp, index, sb_running_index) = + (__le16 *)status_blk.e1x_sb->sb.running_index; + } +} + +/* Returns the number of actually allocated BDs */ +static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, + int rx_ring_size) +{ + struct bnx2x *bp = fp->bp; + u16 ring_prod, cqe_ring_prod; + int i, failure_cnt = 0; + + fp->rx_comp_cons = 0; + cqe_ring_prod = ring_prod = 0; + + /* This routine is called only during fo init so + * fp->eth_q_stats.rx_skb_alloc_failed = 0 + */ + for (i = 0; i < rx_ring_size; i++) { + if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) { + failure_cnt++; + continue; + } + ring_prod = NEXT_RX_IDX(ring_prod); + cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); + WARN_ON(ring_prod <= (i - failure_cnt)); + } + + if (failure_cnt) + BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n", + i - failure_cnt, fp->index); + + fp->rx_bd_prod = ring_prod; + /* Limit the CQE producer by the CQE ring size */ + fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, + cqe_ring_prod); + fp->rx_pkt = fp->rx_calls = 0; + + bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt; + + return i - failure_cnt; +} + +static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) +{ + int i; + + for (i = 1; i <= NUM_RCQ_RINGS; i++) { + struct eth_rx_cqe_next_page *nextpg; + + nextpg = (struct eth_rx_cqe_next_page *) + &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; + nextpg->addr_hi = + cpu_to_le32(U64_HI(fp->rx_comp_mapping + + BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); + nextpg->addr_lo = + cpu_to_le32(U64_LO(fp->rx_comp_mapping + + BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); + } +} + +static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) +{ + union host_hc_status_block *sb; + struct bnx2x_fastpath *fp = &bp->fp[index]; + int ring_size = 0; + u8 cos; + int rx_ring_size = 0; + + if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) { + rx_ring_size = MIN_RX_SIZE_NONTPA; + bp->rx_ring_size = rx_ring_size; + } else if (!bp->rx_ring_size) { + rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); + + if (CHIP_IS_E3(bp)) { + u32 cfg = SHMEM_RD(bp, + dev_info.port_hw_config[BP_PORT(bp)]. + default_cfg); + + /* Decrease ring size for 1G functions */ + if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) == + PORT_HW_CFG_NET_SERDES_IF_SGMII) + rx_ring_size /= 10; + } + + /* allocate at least number of buffers required by FW */ + rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : + MIN_RX_SIZE_TPA, rx_ring_size); + + bp->rx_ring_size = rx_ring_size; + } else /* if rx_ring_size specified - use it */ + rx_ring_size = bp->rx_ring_size; + + DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size); + + /* Common */ + sb = &bnx2x_fp(bp, index, status_blk); + + if (!IS_FCOE_IDX(index)) { + /* status blocks */ + if (!CHIP_IS_E1x(bp)) { + sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), + sizeof(struct host_hc_status_block_e2)); + if (!sb->e2_sb) + goto alloc_mem_err; + } else { + sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), + sizeof(struct host_hc_status_block_e1x)); + if (!sb->e1x_sb) + goto alloc_mem_err; + } + } + + /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to + * set shortcuts for it. + */ + if (!IS_FCOE_IDX(index)) + set_sb_shortcuts(bp, index); + + /* Tx */ + if (!skip_tx_queue(bp, index)) { + /* fastpath tx rings: tx_buf tx_desc */ + for_each_cos_in_tx_queue(fp, cos) { + struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; + + DP(NETIF_MSG_IFUP, + "allocating tx memory of fp %d cos %d\n", + index, cos); + + txdata->tx_buf_ring = kcalloc(NUM_TX_BD, + sizeof(struct sw_tx_bd), + GFP_KERNEL); + if (!txdata->tx_buf_ring) + goto alloc_mem_err; + txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping, + sizeof(union eth_tx_bd_types) * NUM_TX_BD); + if (!txdata->tx_desc_ring) + goto alloc_mem_err; + } + } + + /* Rx */ + if (!skip_rx_queue(bp, index)) { + /* fastpath rx rings: rx_buf rx_desc rx_comp */ + bnx2x_fp(bp, index, rx_buf_ring) = + kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL); + if (!bnx2x_fp(bp, index, rx_buf_ring)) + goto alloc_mem_err; + bnx2x_fp(bp, index, rx_desc_ring) = + BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping), + sizeof(struct eth_rx_bd) * NUM_RX_BD); + if (!bnx2x_fp(bp, index, rx_desc_ring)) + goto alloc_mem_err; + + /* Seed all CQEs by 1s */ + bnx2x_fp(bp, index, rx_comp_ring) = + BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping), + sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD); + if (!bnx2x_fp(bp, index, rx_comp_ring)) + goto alloc_mem_err; + + /* SGE ring */ + bnx2x_fp(bp, index, rx_page_ring) = + kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page), + GFP_KERNEL); + if (!bnx2x_fp(bp, index, rx_page_ring)) + goto alloc_mem_err; + bnx2x_fp(bp, index, rx_sge_ring) = + BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping), + BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); + if (!bnx2x_fp(bp, index, rx_sge_ring)) + goto alloc_mem_err; + /* RX BD ring */ + bnx2x_set_next_page_rx_bd(fp); + + /* CQ ring */ + bnx2x_set_next_page_rx_cq(fp); + + /* BDs */ + ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size); + if (ring_size < rx_ring_size) + goto alloc_mem_err; + } + + return 0; + +/* handles low memory cases */ +alloc_mem_err: + BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n", + index, ring_size); + /* FW will drop all packets if queue is not big enough, + * In these cases we disable the queue + * Min size is different for OOO, TPA and non-TPA queues + */ + if (ring_size < (fp->mode == TPA_MODE_DISABLED ? + MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) { + /* release memory allocated for this queue */ + bnx2x_free_fp_mem_at(bp, index); + return -ENOMEM; + } + return 0; +} + +static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp) +{ + if (!NO_FCOE(bp)) + /* FCoE */ + if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp))) + /* we will fail load process instead of mark + * NO_FCOE_FLAG + */ + return -ENOMEM; + + return 0; +} + +static int bnx2x_alloc_fp_mem(struct bnx2x *bp) +{ + int i; + + /* 1. Allocate FP for leading - fatal if error + * 2. Allocate RSS - fix number of queues if error + */ + + /* leading */ + if (bnx2x_alloc_fp_mem_at(bp, 0)) + return -ENOMEM; + + /* RSS */ + for_each_nondefault_eth_queue(bp, i) + if (bnx2x_alloc_fp_mem_at(bp, i)) + break; + + /* handle memory failures */ + if (i != BNX2X_NUM_ETH_QUEUES(bp)) { + int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; + + WARN_ON(delta < 0); + bnx2x_shrink_eth_fp(bp, delta); + if (CNIC_SUPPORT(bp)) + /* move non eth FPs next to last eth FP + * must be done in that order + * FCOE_IDX < FWD_IDX < OOO_IDX + */ + + /* move FCoE fp even NO_FCOE_FLAG is on */ + bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta); + bp->num_ethernet_queues -= delta; + bp->num_queues = bp->num_ethernet_queues + + bp->num_cnic_queues; + BNX2X_ERR("Adjusted num of queues from %d to %d\n", + bp->num_queues + delta, bp->num_queues); + } + + return 0; +} + +void bnx2x_free_mem_bp(struct bnx2x *bp) +{ + int i; + + for (i = 0; i < bp->fp_array_size; i++) + kfree(bp->fp[i].tpa_info); + kfree(bp->fp); + kfree(bp->sp_objs); + kfree(bp->fp_stats); + kfree(bp->bnx2x_txq); + kfree(bp->msix_table); + kfree(bp->ilt); +} + +int bnx2x_alloc_mem_bp(struct bnx2x *bp) +{ + struct bnx2x_fastpath *fp; + struct msix_entry *tbl; + struct bnx2x_ilt *ilt; + int msix_table_size = 0; + int fp_array_size, txq_array_size; + int i; + + /* + * The biggest MSI-X table we might need is as a maximum number of fast + * path IGU SBs plus default SB (for PF only). + */ + msix_table_size = bp->igu_sb_cnt; + if (IS_PF(bp)) + msix_table_size++; + BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size); + + /* fp array: RSS plus CNIC related L2 queues */ + fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp); + bp->fp_array_size = fp_array_size; + BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size); + + fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL); + if (!fp) + goto alloc_err; + for (i = 0; i < bp->fp_array_size; i++) { + fp[i].tpa_info = + kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2, + sizeof(struct bnx2x_agg_info), GFP_KERNEL); + if (!(fp[i].tpa_info)) + goto alloc_err; + } + + bp->fp = fp; + + /* allocate sp objs */ + bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs), + GFP_KERNEL); + if (!bp->sp_objs) + goto alloc_err; + + /* allocate fp_stats */ + bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats), + GFP_KERNEL); + if (!bp->fp_stats) + goto alloc_err; + + /* Allocate memory for the transmission queues array */ + txq_array_size = + BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp); + BNX2X_DEV_INFO("txq_array_size %d", txq_array_size); + + bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata), + GFP_KERNEL); + if (!bp->bnx2x_txq) + goto alloc_err; + + /* msix table */ + tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL); + if (!tbl) + goto alloc_err; + bp->msix_table = tbl; + + /* ilt */ + ilt = kzalloc(sizeof(*ilt), GFP_KERNEL); + if (!ilt) + goto alloc_err; + bp->ilt = ilt; + + return 0; +alloc_err: + bnx2x_free_mem_bp(bp); + return -ENOMEM; +} + +int bnx2x_reload_if_running(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (unlikely(!netif_running(dev))) + return 0; + + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); + return bnx2x_nic_load(bp, LOAD_NORMAL); +} + +int bnx2x_get_cur_phy_idx(struct bnx2x *bp) +{ + u32 sel_phy_idx = 0; + if (bp->link_params.num_phys <= 1) + return INT_PHY; + + if (bp->link_vars.link_up) { + sel_phy_idx = EXT_PHY1; + /* In case link is SERDES, check if the EXT_PHY2 is the one */ + if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && + (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) + sel_phy_idx = EXT_PHY2; + } else { + + switch (bnx2x_phy_selection(&bp->link_params)) { + case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: + sel_phy_idx = EXT_PHY1; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: + sel_phy_idx = EXT_PHY2; + break; + } + } + + return sel_phy_idx; +} +int bnx2x_get_link_cfg_idx(struct bnx2x *bp) +{ + u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp); + /* + * The selected activated PHY is always after swapping (in case PHY + * swapping is enabled). So when swapping is enabled, we need to reverse + * the configuration + */ + + if (bp->link_params.multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED) { + if (sel_phy_idx == EXT_PHY1) + sel_phy_idx = EXT_PHY2; + else if (sel_phy_idx == EXT_PHY2) + sel_phy_idx = EXT_PHY1; + } + return LINK_CONFIG_IDX(sel_phy_idx); +} + +#ifdef NETDEV_FCOE_WWNN +int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) +{ + struct bnx2x *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + switch (type) { + case NETDEV_FCOE_WWNN: + *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi, + cp->fcoe_wwn_node_name_lo); + break; + case NETDEV_FCOE_WWPN: + *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi, + cp->fcoe_wwn_port_name_lo); + break; + default: + BNX2X_ERR("Wrong WWN type requested - %d\n", type); + return -EINVAL; + } + + return 0; +} +#endif + +/* called with rtnl_lock */ +int bnx2x_change_mtu(struct net_device *dev, int new_mtu) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (pci_num_vf(bp->pdev)) { + DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n"); + return -EPERM; + } + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + BNX2X_ERR("Can't perform change MTU during parity recovery\n"); + return -EAGAIN; + } + + if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || + ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) { + BNX2X_ERR("Can't support requested MTU size\n"); + return -EINVAL; + } + + /* This does not race with packet allocation + * because the actual alloc size is + * only updated as part of load + */ + dev->mtu = new_mtu; + + return bnx2x_reload_if_running(dev); +} + +netdev_features_t bnx2x_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (pci_num_vf(bp->pdev)) { + netdev_features_t changed = dev->features ^ features; + + /* Revert the requested changes in features if they + * would require internal reload of PF in bnx2x_set_features(). + */ + if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) { + features &= ~NETIF_F_RXCSUM; + features |= dev->features & NETIF_F_RXCSUM; + } + + if (changed & NETIF_F_LOOPBACK) { + features &= ~NETIF_F_LOOPBACK; + features |= dev->features & NETIF_F_LOOPBACK; + } + } + + /* TPA requires Rx CSUM offloading */ + if (!(features & NETIF_F_RXCSUM)) { + features &= ~NETIF_F_LRO; + features &= ~NETIF_F_GRO; + } + + return features; +} + +int bnx2x_set_features(struct net_device *dev, netdev_features_t features) +{ + struct bnx2x *bp = netdev_priv(dev); + netdev_features_t changes = features ^ dev->features; + bool bnx2x_reload = false; + int rc; + + /* VFs or non SRIOV PFs should be able to change loopback feature */ + if (!pci_num_vf(bp->pdev)) { + if (features & NETIF_F_LOOPBACK) { + if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { + bp->link_params.loopback_mode = LOOPBACK_BMAC; + bnx2x_reload = true; + } + } else { + if (bp->link_params.loopback_mode != LOOPBACK_NONE) { + bp->link_params.loopback_mode = LOOPBACK_NONE; + bnx2x_reload = true; + } + } + } + + /* if GRO is changed while LRO is enabled, don't force a reload */ + if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO)) + changes &= ~NETIF_F_GRO; + + /* if GRO is changed while HW TPA is off, don't force a reload */ + if ((changes & NETIF_F_GRO) && bp->disable_tpa) + changes &= ~NETIF_F_GRO; + + if (changes) + bnx2x_reload = true; + + if (bnx2x_reload) { + if (bp->recovery_state == BNX2X_RECOVERY_DONE) { + dev->features = features; + rc = bnx2x_reload_if_running(dev); + return rc ? rc : 1; + } + /* else: bnx2x_nic_load() will be called at end of recovery */ + } + + return 0; +} + +void bnx2x_tx_timeout(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + +#ifdef BNX2X_STOP_ON_ERROR + if (!bp->panic) + bnx2x_panic(); +#endif + + /* This allows the netif to be shutdown gracefully before resetting */ + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0); +} + +int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp; + + if (!dev) { + dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); + return -ENODEV; + } + bp = netdev_priv(dev); + + rtnl_lock(); + + pci_save_state(pdev); + + if (!netif_running(dev)) { + rtnl_unlock(); + return 0; + } + + netif_device_detach(dev); + + bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); + + bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); + + rtnl_unlock(); + + return 0; +} + +int bnx2x_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp; + int rc; + + if (!dev) { + dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); + return -ENODEV; + } + bp = netdev_priv(dev); + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + BNX2X_ERR("Handling parity error recovery. Try again later\n"); + return -EAGAIN; + } + + rtnl_lock(); + + pci_restore_state(pdev); + + if (!netif_running(dev)) { + rtnl_unlock(); + return 0; + } + + bnx2x_set_power_state(bp, PCI_D0); + netif_device_attach(dev); + + rc = bnx2x_nic_load(bp, LOAD_OPEN); + + rtnl_unlock(); + + return rc; +} + +void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, + u32 cid) +{ + if (!cxt) { + BNX2X_ERR("bad context pointer %p\n", cxt); + return; + } + + /* ustorm cxt validation */ + cxt->ustorm_ag_context.cdu_usage = + CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), + CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); + /* xcontext validation */ + cxt->xstorm_ag_context.cdu_reserved = + CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), + CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); +} + +static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, + u8 fw_sb_id, u8 sb_index, + u8 ticks) +{ + u32 addr = BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index); + REG_WR8(bp, addr, ticks); + DP(NETIF_MSG_IFUP, + "port %x fw_sb_id %d sb_index %d ticks %d\n", + port, fw_sb_id, sb_index, ticks); +} + +static void storm_memset_hc_disable(struct bnx2x *bp, u8 port, + u16 fw_sb_id, u8 sb_index, + u8 disable) +{ + u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); + u32 addr = BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); + u8 flags = REG_RD8(bp, addr); + /* clear and set */ + flags &= ~HC_INDEX_DATA_HC_ENABLED; + flags |= enable_flag; + REG_WR8(bp, addr, flags); + DP(NETIF_MSG_IFUP, + "port %x fw_sb_id %d sb_index %d disable %d\n", + port, fw_sb_id, sb_index, disable); +} + +void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, + u8 sb_index, u8 disable, u16 usec) +{ + int port = BP_PORT(bp); + u8 ticks = usec / BNX2X_BTR; + + storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); + + disable = disable ? 1 : (usec ? 0 : 1); + storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); +} + +void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, + u32 verbose) +{ + smp_mb__before_atomic(); + set_bit(flag, &bp->sp_rtnl_state); + smp_mb__after_atomic(); + DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n", + flag); + schedule_delayed_work(&bp->sp_rtnl_task, 0); +} +EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h new file mode 100644 index 000000000..d7a71758e --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -0,0 +1,1325 @@ +/* bnx2x_cmn.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2013 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Ariel Elior + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ +#ifndef BNX2X_CMN_H +#define BNX2X_CMN_H + +#include +#include +#include +#include +#include + +#include "bnx2x.h" +#include "bnx2x_sriov.h" + +/* This is used as a replacement for an MCP if it's not present */ +extern int bnx2x_load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ +extern int bnx2x_num_queues; + +/************************ Macros ********************************/ +#define BNX2X_PCI_FREE(x, y, size) \ + do { \ + if (x) { \ + dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \ + x = NULL; \ + y = 0; \ + } \ + } while (0) + +#define BNX2X_FREE(x) \ + do { \ + if (x) { \ + kfree((void *)x); \ + x = NULL; \ + } \ + } while (0) + +#define BNX2X_PCI_ALLOC(y, size) \ +({ \ + void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + if (x) \ + DP(NETIF_MSG_HW, \ + "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ + (unsigned long long)(*y), x); \ + x; \ +}) +#define BNX2X_PCI_FALLOC(y, size) \ +({ \ + void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + if (x) { \ + memset(x, 0xff, size); \ + DP(NETIF_MSG_HW, \ + "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n", \ + (unsigned long long)(*y), x); \ + } \ + x; \ +}) + +/*********************** Interfaces **************************** + * Functions that need to be implemented by each driver version + */ +/* Init */ + +/** + * bnx2x_send_unload_req - request unload mode from the MCP. + * + * @bp: driver handle + * @unload_mode: requested function's unload mode + * + * Return unload mode returned by the MCP: COMMON, PORT or FUNC. + */ +u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); + +/** + * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. + * + * @bp: driver handle + * @keep_link: true iff link should be kept up + */ +void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link); + +/** + * bnx2x_config_rss_pf - configure RSS parameters in a PF. + * + * @bp: driver handle + * @rss_obj: RSS object to use + * @ind_table: indirection table to configure + * @config_hash: re-configure RSS hash keys configuration + * @enable: enabled or disabled configuration + */ +int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, + bool config_hash, bool enable); + +/** + * bnx2x__init_func_obj - init function object + * + * @bp: driver handle + * + * Initializes the Function Object with the appropriate + * parameters which include a function slow path driver + * interface. + */ +void bnx2x__init_func_obj(struct bnx2x *bp); + +/** + * bnx2x_setup_queue - setup eth queue. + * + * @bp: driver handle + * @fp: pointer to the fastpath structure + * @leading: boolean + * + */ +int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, + bool leading); + +/** + * bnx2x_setup_leading - bring up a leading eth queue. + * + * @bp: driver handle + */ +int bnx2x_setup_leading(struct bnx2x *bp); + +/** + * bnx2x_fw_command - send the MCP a request + * + * @bp: driver handle + * @command: request + * @param: request's parameter + * + * block until there is a reply + */ +u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); + +/** + * bnx2x_initial_phy_init - initialize link parameters structure variables. + * + * @bp: driver handle + * @load_mode: current mode + */ +int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); + +/** + * bnx2x_link_set - configure hw according to link parameters structure. + * + * @bp: driver handle + */ +void bnx2x_link_set(struct bnx2x *bp); + +/** + * bnx2x_force_link_reset - Forces link reset, and put the PHY + * in reset as well. + * + * @bp: driver handle + */ +void bnx2x_force_link_reset(struct bnx2x *bp); + +/** + * bnx2x_link_test - query link status. + * + * @bp: driver handle + * @is_serdes: bool + * + * Returns 0 if link is UP. + */ +u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); + +/** + * bnx2x_drv_pulse - write driver pulse to shmem + * + * @bp: driver handle + * + * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox + * in the shmem. + */ +void bnx2x_drv_pulse(struct bnx2x *bp); + +/** + * bnx2x_igu_ack_sb - update IGU with current SB value + * + * @bp: driver handle + * @igu_sb_id: SB id + * @segment: SB segment + * @index: SB index + * @op: SB operation + * @update: is HW update required + */ +void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, + u16 index, u8 op, u8 update); + +/* Disable transactions from chip to host */ +void bnx2x_pf_disable(struct bnx2x *bp); +int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val); + +/** + * bnx2x__link_status_update - handles link status change. + * + * @bp: driver handle + */ +void bnx2x__link_status_update(struct bnx2x *bp); + +/** + * bnx2x_link_report - report link status to upper layer. + * + * @bp: driver handle + */ +void bnx2x_link_report(struct bnx2x *bp); + +/* None-atomic version of bnx2x_link_report() */ +void __bnx2x_link_report(struct bnx2x *bp); + +/** + * bnx2x_get_mf_speed - calculate MF speed. + * + * @bp: driver handle + * + * Takes into account current linespeed and MF configuration. + */ +u16 bnx2x_get_mf_speed(struct bnx2x *bp); + +/** + * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler + * + * @irq: irq number + * @dev_instance: private instance + */ +irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance); + +/** + * bnx2x_interrupt - non MSI-X interrupt handler + * + * @irq: irq number + * @dev_instance: private instance + */ +irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); + +/** + * bnx2x_cnic_notify - send command to cnic driver + * + * @bp: driver handle + * @cmd: command + */ +int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); + +/** + * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information + * + * @bp: driver handle + */ +void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); + +/** + * bnx2x_setup_cnic_info - provides cnic with updated info + * + * @bp: driver handle + */ +void bnx2x_setup_cnic_info(struct bnx2x *bp); + +/** + * bnx2x_int_enable - enable HW interrupts. + * + * @bp: driver handle + */ +void bnx2x_int_enable(struct bnx2x *bp); + +/** + * bnx2x_int_disable_sync - disable interrupts. + * + * @bp: driver handle + * @disable_hw: true, disable HW interrupts. + * + * This function ensures that there are no + * ISRs or SP DPCs (sp_task) are running after it returns. + */ +void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); + +/** + * bnx2x_nic_init_cnic - init driver internals for cnic. + * + * @bp: driver handle + * @load_code: COMMON, PORT or FUNCTION + * + * Initializes: + * - rings + * - status blocks + * - etc. + */ +void bnx2x_nic_init_cnic(struct bnx2x *bp); + +/** + * bnx2x_preirq_nic_init - init driver internals. + * + * @bp: driver handle + * + * Initializes: + * - fastpath object + * - fastpath rings + * etc. + */ +void bnx2x_pre_irq_nic_init(struct bnx2x *bp); + +/** + * bnx2x_postirq_nic_init - init driver internals. + * + * @bp: driver handle + * @load_code: COMMON, PORT or FUNCTION + * + * Initializes: + * - status blocks + * - slowpath rings + * - etc. + */ +void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code); +/** + * bnx2x_alloc_mem_cnic - allocate driver's memory for cnic. + * + * @bp: driver handle + */ +int bnx2x_alloc_mem_cnic(struct bnx2x *bp); +/** + * bnx2x_alloc_mem - allocate driver's memory. + * + * @bp: driver handle + */ +int bnx2x_alloc_mem(struct bnx2x *bp); + +/** + * bnx2x_free_mem_cnic - release driver's memory for cnic. + * + * @bp: driver handle + */ +void bnx2x_free_mem_cnic(struct bnx2x *bp); +/** + * bnx2x_free_mem - release driver's memory. + * + * @bp: driver handle + */ +void bnx2x_free_mem(struct bnx2x *bp); + +/** + * bnx2x_set_num_queues - set number of queues according to mode. + * + * @bp: driver handle + */ +void bnx2x_set_num_queues(struct bnx2x *bp); + +/** + * bnx2x_chip_cleanup - cleanup chip internals. + * + * @bp: driver handle + * @unload_mode: COMMON, PORT, FUNCTION + * @keep_link: true iff link should be kept up. + * + * - Cleanup MAC configuration. + * - Closes clients. + * - etc. + */ +void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link); + +/** + * bnx2x_acquire_hw_lock - acquire HW lock. + * + * @bp: driver handle + * @resource: resource bit which was locked + */ +int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); + +/** + * bnx2x_release_hw_lock - release HW lock. + * + * @bp: driver handle + * @resource: resource bit which was locked + */ +int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); + +/** + * bnx2x_release_leader_lock - release recovery leader lock + * + * @bp: driver handle + */ +int bnx2x_release_leader_lock(struct bnx2x *bp); + +/** + * bnx2x_set_eth_mac - configure eth MAC address in the HW + * + * @bp: driver handle + * @set: set or clear + * + * Configures according to the value in netdev->dev_addr. + */ +int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); + +/** + * bnx2x_set_rx_mode - set MAC filtering configurations. + * + * @dev: netdevice + * + * called with netif_tx_lock from dev_mcast.c + * If bp->state is OPEN, should be called with + * netif_addr_lock_bh() + */ +void bnx2x_set_rx_mode_inner(struct bnx2x *bp); + +/* Parity errors related */ +void bnx2x_set_pf_load(struct bnx2x *bp); +bool bnx2x_clear_pf_load(struct bnx2x *bp); +bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print); +bool bnx2x_reset_is_done(struct bnx2x *bp, int engine); +void bnx2x_set_reset_in_progress(struct bnx2x *bp); +void bnx2x_set_reset_global(struct bnx2x *bp); +void bnx2x_disable_close_the_gate(struct bnx2x *bp); +int bnx2x_init_hw_func_cnic(struct bnx2x *bp); + +/** + * bnx2x_sp_event - handle ramrods completion. + * + * @fp: fastpath handle for the event + * @rr_cqe: eth_rx_cqe + */ +void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); + +/** + * bnx2x_ilt_set_info - prepare ILT configurations. + * + * @bp: driver handle + */ +void bnx2x_ilt_set_info(struct bnx2x *bp); + +/** + * bnx2x_ilt_set_cnic_info - prepare ILT configurations for SRC + * and TM. + * + * @bp: driver handle + */ +void bnx2x_ilt_set_info_cnic(struct bnx2x *bp); + +/** + * bnx2x_dcbx_init - initialize dcbx protocol. + * + * @bp: driver handle + */ +void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem); + +/** + * bnx2x_set_power_state - set power state to the requested value. + * + * @bp: driver handle + * @state: required state D0 or D3hot + * + * Currently only D0 and D3hot are supported. + */ +int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); + +/** + * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW. + * + * @bp: driver handle + * @value: new value + */ +void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); +/* Error handling */ +void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); + +/* dev_close main block */ +int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link); + +/* dev_open main block */ +int bnx2x_nic_load(struct bnx2x *bp, int load_mode); + +/* hard_xmit callback */ +netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); + +/* setup_tc callback */ +int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); + +int bnx2x_get_vf_config(struct net_device *dev, int vf, + struct ifla_vf_info *ivi); +int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac); +int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); + +/* select_queue callback */ +u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback); + +static inline void bnx2x_update_rx_prod(struct bnx2x *bp, + struct bnx2x_fastpath *fp, + u16 bd_prod, u16 rx_comp_prod, + u16 rx_sge_prod) +{ + struct ustorm_eth_rx_producers rx_prods = {0}; + u32 i; + + /* Update producers */ + rx_prods.bd_prod = bd_prod; + rx_prods.cqe_prod = rx_comp_prod; + rx_prods.sge_prod = rx_sge_prod; + + /* Make sure that the BD and SGE data is updated before updating the + * producers since FW might read the BD/SGE right after the producer + * is updated. + * This is only applicable for weak-ordered memory model archs such + * as IA-64. The following barrier is also mandatory since FW will + * assumes BDs must have buffers. + */ + wmb(); + + for (i = 0; i < sizeof(rx_prods)/4; i++) + REG_WR(bp, fp->ustorm_rx_prods_offset + i*4, + ((u32 *)&rx_prods)[i]); + + mmiowb(); /* keep prod updates ordered */ + + DP(NETIF_MSG_RX_STATUS, + "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", + fp->index, bd_prod, rx_comp_prod, rx_sge_prod); +} + +/* reload helper */ +int bnx2x_reload_if_running(struct net_device *dev); + +int bnx2x_change_mac_addr(struct net_device *dev, void *p); + +/* NAPI poll Tx part */ +int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); + +/* suspend/resume callbacks */ +int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); +int bnx2x_resume(struct pci_dev *pdev); + +/* Release IRQ vectors */ +void bnx2x_free_irq(struct bnx2x *bp); + +void bnx2x_free_fp_mem(struct bnx2x *bp); +void bnx2x_init_rx_rings(struct bnx2x *bp); +void bnx2x_init_rx_rings_cnic(struct bnx2x *bp); +void bnx2x_free_skbs(struct bnx2x *bp); +void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); +void bnx2x_netif_start(struct bnx2x *bp); +int bnx2x_load_cnic(struct bnx2x *bp); + +/** + * bnx2x_enable_msix - set msix configuration. + * + * @bp: driver handle + * + * fills msix_table, requests vectors, updates num_queues + * according to number of available vectors. + */ +int bnx2x_enable_msix(struct bnx2x *bp); + +/** + * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly + * + * @bp: driver handle + */ +int bnx2x_enable_msi(struct bnx2x *bp); + +/** + * bnx2x_low_latency_recv - LL callback + * + * @napi: napi structure + */ +int bnx2x_low_latency_recv(struct napi_struct *napi); + +/** + * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure + * + * @bp: driver handle + */ +int bnx2x_alloc_mem_bp(struct bnx2x *bp); + +/** + * bnx2x_free_mem_bp - release memories outsize main driver structure + * + * @bp: driver handle + */ +void bnx2x_free_mem_bp(struct bnx2x *bp); + +/** + * bnx2x_change_mtu - change mtu netdev callback + * + * @dev: net device + * @new_mtu: requested mtu + * + */ +int bnx2x_change_mtu(struct net_device *dev, int new_mtu); + +#ifdef NETDEV_FCOE_WWNN +/** + * bnx2x_fcoe_get_wwn - return the requested WWN value for this port + * + * @dev: net_device + * @wwn: output buffer + * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port) + * + */ +int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type); +#endif + +netdev_features_t bnx2x_fix_features(struct net_device *dev, + netdev_features_t features); +int bnx2x_set_features(struct net_device *dev, netdev_features_t features); + +/** + * bnx2x_tx_timeout - tx timeout netdev callback + * + * @dev: net device + */ +void bnx2x_tx_timeout(struct net_device *dev); + +/*********************** Inlines **********************************/ +/*********************** Fast path ********************************/ +static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) +{ + barrier(); /* status block is written to by the chip */ + fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; +} + +static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, + u8 segment, u16 index, u8 op, + u8 update, u32 igu_addr) +{ + struct igu_regular cmd_data = {0}; + + cmd_data.sb_id_and_flags = + ((index << IGU_REGULAR_SB_INDEX_SHIFT) | + (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | + (update << IGU_REGULAR_BUPDATE_SHIFT) | + (op << IGU_REGULAR_ENABLE_INT_SHIFT)); + + DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", + cmd_data.sb_id_and_flags, igu_addr); + REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); + + /* Make sure that ACK is written */ + mmiowb(); + barrier(); +} + +static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, + u8 storm, u16 index, u8 op, u8 update) +{ + u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + + COMMAND_REG_INT_ACK); + struct igu_ack_register igu_ack; + + igu_ack.status_block_index = index; + igu_ack.sb_id_and_flags = + ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | + (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | + (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | + (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); + + REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); + + /* Make sure that ACK is written */ + mmiowb(); + barrier(); +} + +static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, + u16 index, u8 op, u8 update) +{ + if (bp->common.int_block == INT_BLOCK_HC) + bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update); + else { + u8 segment; + + if (CHIP_INT_MODE_IS_BC(bp)) + segment = storm; + else if (igu_sb_id != bp->igu_dsb_id) + segment = IGU_SEG_ACCESS_DEF; + else if (storm == ATTENTION_ID) + segment = IGU_SEG_ACCESS_ATTN; + else + segment = IGU_SEG_ACCESS_DEF; + bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update); + } +} + +static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp) +{ + u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + + COMMAND_REG_SIMD_MASK); + u32 result = REG_RD(bp, hc_addr); + + barrier(); + return result; +} + +static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp) +{ + u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8); + u32 result = REG_RD(bp, igu_addr); + + DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n", + result, igu_addr); + + barrier(); + return result; +} + +static inline u16 bnx2x_ack_int(struct bnx2x *bp) +{ + barrier(); + if (bp->common.int_block == INT_BLOCK_HC) + return bnx2x_hc_ack_int(bp); + else + return bnx2x_igu_ack_int(bp); +} + +static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata) +{ + /* Tell compiler that consumer and producer can change */ + barrier(); + return txdata->tx_pkt_prod != txdata->tx_pkt_cons; +} + +static inline u16 bnx2x_tx_avail(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata) +{ + s16 used; + u16 prod; + u16 cons; + + prod = txdata->tx_bd_prod; + cons = txdata->tx_bd_cons; + + used = SUB_S16(prod, cons); + +#ifdef BNX2X_STOP_ON_ERROR + WARN_ON(used < 0); + WARN_ON(used > txdata->tx_ring_size); + WARN_ON((txdata->tx_ring_size - used) > MAX_TX_AVAIL); +#endif + + return (s16)(txdata->tx_ring_size) - used; +} + +static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata) +{ + u16 hw_cons; + + /* Tell compiler that status block fields can change */ + barrier(); + hw_cons = le16_to_cpu(*txdata->tx_cons_sb); + return hw_cons != txdata->tx_pkt_cons; +} + +static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) +{ + u8 cos; + for_each_cos_in_tx_queue(fp, cos) + if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) + return true; + return false; +} + +#define BNX2X_IS_CQE_COMPLETED(cqe_fp) (cqe_fp->marker == 0x0) +#define BNX2X_SEED_CQE(cqe_fp) (cqe_fp->marker = 0xFFFFFFFF) +static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) +{ + u16 cons; + union eth_rx_cqe *cqe; + struct eth_fast_path_rx_cqe *cqe_fp; + + cons = RCQ_BD(fp->rx_comp_cons); + cqe = &fp->rx_comp_ring[cons]; + cqe_fp = &cqe->fast_path_cqe; + return BNX2X_IS_CQE_COMPLETED(cqe_fp); +} + +/** + * bnx2x_tx_disable - disables tx from stack point of view + * + * @bp: driver handle + */ +static inline void bnx2x_tx_disable(struct bnx2x *bp) +{ + netif_tx_disable(bp->dev); + netif_carrier_off(bp->dev); +} + +static inline void bnx2x_free_rx_sge(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) +{ + struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; + struct page *page = sw_buf->page; + struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; + + /* Skip "next page" elements */ + if (!page) + return; + + dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), + SGE_PAGES, DMA_FROM_DEVICE); + __free_pages(page, PAGES_PER_SGE_SHIFT); + + sw_buf->page = NULL; + sge->addr_hi = 0; + sge->addr_lo = 0; +} + +static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp) +{ + int i; + + for_each_rx_queue_cnic(bp, i) { + napi_hash_del(&bnx2x_fp(bp, i, napi)); + netif_napi_del(&bnx2x_fp(bp, i, napi)); + } +} + +static inline void bnx2x_del_all_napi(struct bnx2x *bp) +{ + int i; + + for_each_eth_queue(bp, i) { + napi_hash_del(&bnx2x_fp(bp, i, napi)); + netif_napi_del(&bnx2x_fp(bp, i, napi)); + } +} + +int bnx2x_set_int_mode(struct bnx2x *bp); + +static inline void bnx2x_disable_msi(struct bnx2x *bp) +{ + if (bp->flags & USING_MSIX_FLAG) { + pci_disable_msix(bp->pdev); + bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG); + } else if (bp->flags & USING_MSI_FLAG) { + pci_disable_msi(bp->pdev); + bp->flags &= ~USING_MSI_FLAG; + } +} + +static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) +{ + int i, j; + + for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { + int idx = RX_SGE_CNT * i - 1; + + for (j = 0; j < 2; j++) { + BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); + idx--; + } + } +} + +static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) +{ + /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ + memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); + + /* Clear the two last indices in the page to 1: + these are the indices that correspond to the "next" element, + hence will never be indicated and should be removed from + the calculations. */ + bnx2x_clear_sge_mask_next_elems(fp); +} + +/* note that we are not allocating a new buffer, + * we are just moving one from cons to prod + * we are not creating a new mapping, + * so there is no need to check for dma_mapping_error(). + */ +static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp, + u16 cons, u16 prod) +{ + struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; + struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; + struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; + struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; + + dma_unmap_addr_set(prod_rx_buf, mapping, + dma_unmap_addr(cons_rx_buf, mapping)); + prod_rx_buf->data = cons_rx_buf->data; + *prod_bd = *cons_bd; +} + +/************************* Init ******************************************/ + +/* returns func by VN for current port */ +static inline int func_by_vn(struct bnx2x *bp, int vn) +{ + return 2 * vn + BP_PORT(bp); +} + +static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash) +{ + return bnx2x_rss(bp, &bp->rss_conf_obj, config_hash, true); +} + +/** + * bnx2x_func_start - init function + * + * @bp: driver handle + * + * Must be called before sending CLIENT_SETUP for the first client. + */ +static inline int bnx2x_func_start(struct bnx2x *bp) +{ + struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_start_params *start_params = + &func_params.params.start; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_START; + + /* Function parameters */ + start_params->mf_mode = bp->mf_mode; + start_params->sd_vlan_tag = bp->mf_ov; + + if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) + start_params->network_cos_mode = STATIC_COS; + else /* CHIP_IS_E1X */ + start_params->network_cos_mode = FW_WRR; + + start_params->tunnel_mode = TUNN_MODE_GRE; + start_params->gre_tunnel_type = IPGRE_TUNNEL; + start_params->inner_gre_rss_en = 1; + + if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) { + start_params->class_fail_ethtype = ETH_P_FIP; + start_params->class_fail = 1; + start_params->no_added_tags = 1; + } + + return bnx2x_func_state_change(bp, &func_params); +} + +/** + * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format + * + * @fw_hi: pointer to upper part + * @fw_mid: pointer to middle part + * @fw_lo: pointer to lower part + * @mac: pointer to MAC address + */ +static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, + __le16 *fw_lo, u8 *mac) +{ + ((u8 *)fw_hi)[0] = mac[1]; + ((u8 *)fw_hi)[1] = mac[0]; + ((u8 *)fw_mid)[0] = mac[3]; + ((u8 *)fw_mid)[1] = mac[2]; + ((u8 *)fw_lo)[0] = mac[5]; + ((u8 *)fw_lo)[1] = mac[4]; +} + +static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, + struct bnx2x_fastpath *fp, int last) +{ + int i; + + if (fp->mode == TPA_MODE_DISABLED) + return; + + for (i = 0; i < last; i++) + bnx2x_free_rx_sge(bp, fp, i); +} + +static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) +{ + int i; + + for (i = 1; i <= NUM_RX_RINGS; i++) { + struct eth_rx_bd *rx_bd; + + rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; + rx_bd->addr_hi = + cpu_to_le32(U64_HI(fp->rx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); + rx_bd->addr_lo = + cpu_to_le32(U64_LO(fp->rx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); + } +} + +/* Statistics ID are global per chip/path, while Client IDs for E1x are per + * port. + */ +static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp) +{ + struct bnx2x *bp = fp->bp; + if (!CHIP_IS_E1x(bp)) { + /* there are special statistics counters for FCoE 136..140 */ + if (IS_FCOE_FP(fp)) + return bp->cnic_base_cl_id + (bp->pf_num >> 1); + return fp->cl_id; + } + return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x; +} + +static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, + bnx2x_obj_type obj_type) +{ + struct bnx2x *bp = fp->bp; + + /* Configure classification DBs */ + bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id, + fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), + bnx2x_sp_mapping(bp, mac_rdata), + BNX2X_FILTER_MAC_PENDING, + &bp->sp_state, obj_type, + &bp->macs_pool); +} + +/** + * bnx2x_get_path_func_num - get number of active functions + * + * @bp: driver handle + * + * Calculates the number of active (not hidden) functions on the + * current path. + */ +static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) +{ + u8 func_num = 0, i; + + /* 57710 has only one function per-port */ + if (CHIP_IS_E1(bp)) + return 1; + + /* Calculate a number of functions enabled on the current + * PATH/PORT. + */ + if (CHIP_REV_IS_SLOW(bp)) { + if (IS_MF(bp)) + func_num = 4; + else + func_num = 2; + } else { + for (i = 0; i < E1H_FUNC_MAX / 2; i++) { + u32 func_config = + MF_CFG_RD(bp, + func_mf_config[BP_PORT(bp) + 2 * i]. + config); + func_num += + ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); + } + } + + WARN_ON(!func_num); + + return func_num; +} + +static inline void bnx2x_init_bp_objs(struct bnx2x *bp) +{ + /* RX_MODE controlling object */ + bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); + + /* multicast configuration controlling object */ + bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, + BP_FUNC(bp), BP_FUNC(bp), + bnx2x_sp(bp, mcast_rdata), + bnx2x_sp_mapping(bp, mcast_rdata), + BNX2X_FILTER_MCAST_PENDING, &bp->sp_state, + BNX2X_OBJ_TYPE_RX); + + /* Setup CAM credit pools */ + bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), + bnx2x_get_path_func_num(bp)); + + bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1, + bnx2x_get_path_func_num(bp)); + + /* RSS configuration object */ + bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, + bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), + bnx2x_sp(bp, rss_rdata), + bnx2x_sp_mapping(bp, rss_rdata), + BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, + BNX2X_OBJ_TYPE_RX); +} + +static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) +{ + if (CHIP_IS_E1x(fp->bp)) + return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H; + else + return fp->cl_id; +} + +static inline void bnx2x_init_txdata(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata, u32 cid, + int txq_index, __le16 *tx_cons_sb, + struct bnx2x_fastpath *fp) +{ + txdata->cid = cid; + txdata->txq_index = txq_index; + txdata->tx_cons_sb = tx_cons_sb; + txdata->parent_fp = fp; + txdata->tx_ring_size = IS_FCOE_FP(fp) ? MAX_TX_AVAIL : bp->tx_ring_size; + + DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n", + txdata->cid, txdata->txq_index); +} + +static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) +{ + return bp->cnic_base_cl_id + cl_idx + + (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX; +} + +static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) +{ + /* the 'first' id is allocated for the cnic */ + return bp->base_fw_ndsb; +} + +static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) +{ + return bp->igu_base_sb; +} + +static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata) +{ + int cnt = 1000; + + while (bnx2x_has_tx_work_unload(txdata)) { + if (!cnt) { + BNX2X_ERR("timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n", + txdata->txq_index, txdata->tx_pkt_prod, + txdata->tx_pkt_cons); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); + return -EBUSY; +#else + break; +#endif + } + cnt--; + usleep_range(1000, 2000); + } + + return 0; +} + +int bnx2x_get_link_cfg_idx(struct bnx2x *bp); + +static inline void __storm_memset_struct(struct bnx2x *bp, + u32 addr, size_t size, u32 *data) +{ + int i; + for (i = 0; i < size/4; i++) + REG_WR(bp, addr + (i * 4), data[i]); +} + +/** + * bnx2x_wait_sp_comp - wait for the outstanding SP commands. + * + * @bp: driver handle + * @mask: bits that need to be cleared + */ +static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) +{ + int tout = 5000; /* Wait for 5 secs tops */ + + while (tout--) { + smp_mb(); + netif_addr_lock_bh(bp->dev); + if (!(bp->sp_state & mask)) { + netif_addr_unlock_bh(bp->dev); + return true; + } + netif_addr_unlock_bh(bp->dev); + + usleep_range(1000, 2000); + } + + smp_mb(); + + netif_addr_lock_bh(bp->dev); + if (bp->sp_state & mask) { + BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n", + bp->sp_state, mask); + netif_addr_unlock_bh(bp->dev); + return false; + } + netif_addr_unlock_bh(bp->dev); + + return true; +} + +/** + * bnx2x_set_ctx_validation - set CDU context validation values + * + * @bp: driver handle + * @cxt: context of the connection on the host memory + * @cid: SW CID of the connection to be configured + */ +void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, + u32 cid); + +void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, + u8 sb_index, u8 disable, u16 usec); +void bnx2x_acquire_phy_lock(struct bnx2x *bp); +void bnx2x_release_phy_lock(struct bnx2x *bp); + +/** + * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration. + * + * @bp: driver handle + * @mf_cfg: MF configuration + * + */ +static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) +{ + u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT; + if (!max_cfg) { + DP(NETIF_MSG_IFUP | BNX2X_MSG_ETHTOOL, + "Max BW configured to 0 - using 100 instead\n"); + max_cfg = 100; + } + return max_cfg; +} + +/* checks if HW supports GRO for given MTU */ +static inline bool bnx2x_mtu_allows_gro(int mtu) +{ + /* gro frags per page */ + int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE); + + /* + * 1. Number of frags should not grow above MAX_SKB_FRAGS + * 2. Frag must fit the page + */ + return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; +} + +/** + * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. + * + * @bp: driver handle + * + */ +void bnx2x_get_iscsi_info(struct bnx2x *bp); + +/** + * bnx2x_link_sync_notify - send notification to other functions. + * + * @bp: driver handle + * + */ +static inline void bnx2x_link_sync_notify(struct bnx2x *bp) +{ + int func; + int vn; + + /* Set the attention towards other drivers on the same port */ + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { + if (vn == BP_VN(bp)) + continue; + + func = func_by_vn(bp, vn); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + + (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); + } +} + +/** + * bnx2x_update_drv_flags - update flags in shmem + * + * @bp: driver handle + * @flags: flags to update + * @set: set or clear + * + */ +static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) +{ + if (SHMEM2_HAS(bp, drv_flags)) { + u32 drv_flags; + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); + drv_flags = SHMEM2_RD(bp, drv_flags); + + if (set) + SET_FLAGS(drv_flags, flags); + else + RESET_FLAGS(drv_flags, flags); + + SHMEM2_WR(bp, drv_flags, drv_flags); + DP(NETIF_MSG_IFUP, "drv_flags 0x%08x\n", drv_flags); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); + } +} + + + +/** + * bnx2x_fill_fw_str - Fill buffer with FW version string + * + * @bp: driver handle + * @buf: character buffer to fill with the fw name + * @buf_len: length of the above buffer + * + */ +void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len); + +int bnx2x_drain_tx_queues(struct bnx2x *bp); +void bnx2x_squeeze_objects(struct bnx2x *bp); + +void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag, + u32 verbose); + +#endif /* BNX2X_CMN_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c new file mode 100644 index 000000000..6e4294ed1 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -0,0 +1,2552 @@ +/* bnx2x_dcb.c: Broadcom Everest network driver. + * + * Copyright 2009-2013 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Ariel Elior + * Written by: Dmitry Kravkov + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#include "bnx2x.h" +#include "bnx2x_cmn.h" +#include "bnx2x_dcb.h" + +/* forward declarations of dcbx related functions */ +static void bnx2x_pfc_set_pfc(struct bnx2x *bp); +static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp); +static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, + u32 *set_configuration_ets_pg, + u32 *pri_pg_tbl); +static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, + u32 *pg_pri_orginal_spread, + struct pg_help_data *help_data); +static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp, + struct pg_help_data *help_data, + struct dcbx_ets_feature *ets, + u32 *pg_pri_orginal_spread); +static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, + struct cos_help_data *cos_data, + u32 *pg_pri_orginal_spread, + struct dcbx_ets_feature *ets); +static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, + struct bnx2x_func_tx_start_params*); + +/* helpers: read/write len bytes from addr into buff by REG_RD/REG_WR */ +static void bnx2x_read_data(struct bnx2x *bp, u32 *buff, + u32 addr, u32 len) +{ + int i; + for (i = 0; i < len; i += 4, buff++) + *buff = REG_RD(bp, addr + i); +} + +static void bnx2x_write_data(struct bnx2x *bp, u32 *buff, + u32 addr, u32 len) +{ + int i; + for (i = 0; i < len; i += 4, buff++) + REG_WR(bp, addr + i, *buff); +} + +static void bnx2x_pfc_set(struct bnx2x *bp) +{ + struct bnx2x_nig_brb_pfc_port_params pfc_params = {0}; + u32 pri_bit, val = 0; + int i; + + pfc_params.num_of_rx_cos_priority_mask = + bp->dcbx_port_params.ets.num_of_cos; + + /* Tx COS configuration */ + for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++) + /* + * We configure only the pauseable bits (non pauseable aren't + * configured at all) it's done to avoid false pauses from + * network + */ + pfc_params.rx_cos_priority_mask[i] = + bp->dcbx_port_params.ets.cos_params[i].pri_bitmask + & DCBX_PFC_PRI_PAUSE_MASK(bp); + + /* + * Rx COS configuration + * Changing PFC RX configuration . + * In RX COS0 will always be configured to lossless and COS1 to lossy + */ + for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) { + pri_bit = 1 << i; + + if (!(pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp))) + val |= 1 << (i * 4); + } + + pfc_params.pkt_priority_to_cos = val; + + /* RX COS0 */ + pfc_params.llfc_low_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp); + /* RX COS1 */ + pfc_params.llfc_high_priority_classes = 0; + + bnx2x_acquire_phy_lock(bp); + bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED; + bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &pfc_params); + bnx2x_release_phy_lock(bp); +} + +static void bnx2x_pfc_clear(struct bnx2x *bp) +{ + struct bnx2x_nig_brb_pfc_port_params nig_params = {0}; + nig_params.pause_enable = 1; + bnx2x_acquire_phy_lock(bp); + bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED; + bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params); + bnx2x_release_phy_lock(bp); +} + +static void bnx2x_dump_dcbx_drv_param(struct bnx2x *bp, + struct dcbx_features *features, + u32 error) +{ + u8 i = 0; + DP(NETIF_MSG_LINK, "local_mib.error %x\n", error); + + /* PG */ + DP(NETIF_MSG_LINK, + "local_mib.features.ets.enabled %x\n", features->ets.enabled); + for (i = 0; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) + DP(NETIF_MSG_LINK, + "local_mib.features.ets.pg_bw_tbl[%d] %d\n", i, + DCBX_PG_BW_GET(features->ets.pg_bw_tbl, i)); + for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) + DP(NETIF_MSG_LINK, + "local_mib.features.ets.pri_pg_tbl[%d] %d\n", i, + DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i)); + + /* pfc */ + DP(BNX2X_MSG_DCB, "dcbx_features.pfc.pri_en_bitmap %x\n", + features->pfc.pri_en_bitmap); + DP(BNX2X_MSG_DCB, "dcbx_features.pfc.pfc_caps %x\n", + features->pfc.pfc_caps); + DP(BNX2X_MSG_DCB, "dcbx_features.pfc.enabled %x\n", + features->pfc.enabled); + + DP(BNX2X_MSG_DCB, "dcbx_features.app.default_pri %x\n", + features->app.default_pri); + DP(BNX2X_MSG_DCB, "dcbx_features.app.tc_supported %x\n", + features->app.tc_supported); + DP(BNX2X_MSG_DCB, "dcbx_features.app.enabled %x\n", + features->app.enabled); + for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { + DP(BNX2X_MSG_DCB, + "dcbx_features.app.app_pri_tbl[%x].app_id %x\n", + i, features->app.app_pri_tbl[i].app_id); + DP(BNX2X_MSG_DCB, + "dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n", + i, features->app.app_pri_tbl[i].pri_bitmap); + DP(BNX2X_MSG_DCB, + "dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n", + i, features->app.app_pri_tbl[i].appBitfield); + } +} + +static void bnx2x_dcbx_get_ap_priority(struct bnx2x *bp, + u8 pri_bitmap, + u8 llfc_traf_type) +{ + u32 pri = MAX_PFC_PRIORITIES; + u32 index = MAX_PFC_PRIORITIES - 1; + u32 pri_mask; + u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + + /* Choose the highest priority */ + while ((MAX_PFC_PRIORITIES == pri) && (0 != index)) { + pri_mask = 1 << index; + if (GET_FLAGS(pri_bitmap, pri_mask)) + pri = index ; + index--; + } + + if (pri < MAX_PFC_PRIORITIES) + ttp[llfc_traf_type] = max_t(u32, ttp[llfc_traf_type], pri); +} + +static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp, + struct dcbx_app_priority_feature *app, + u32 error) { + u8 index; + u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + + if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR)) + DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_ERROR\n"); + + if (GET_FLAGS(error, DCBX_LOCAL_APP_MISMATCH)) + DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_MISMATCH\n"); + + if (GET_FLAGS(error, DCBX_REMOTE_APP_TLV_NOT_FOUND)) + DP(BNX2X_MSG_DCB, "DCBX_REMOTE_APP_TLV_NOT_FOUND\n"); + if (app->enabled && + !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH | + DCBX_REMOTE_APP_TLV_NOT_FOUND)) { + + bp->dcbx_port_params.app.enabled = true; + + for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++) + ttp[index] = 0; + + if (app->default_pri < MAX_PFC_PRIORITIES) + ttp[LLFC_TRAFFIC_TYPE_NW] = app->default_pri; + + for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) { + struct dcbx_app_priority_entry *entry = + app->app_pri_tbl; + + if (GET_FLAGS(entry[index].appBitfield, + DCBX_APP_SF_ETH_TYPE) && + ETH_TYPE_FCOE == entry[index].app_id) + bnx2x_dcbx_get_ap_priority(bp, + entry[index].pri_bitmap, + LLFC_TRAFFIC_TYPE_FCOE); + + if (GET_FLAGS(entry[index].appBitfield, + DCBX_APP_SF_PORT) && + TCP_PORT_ISCSI == entry[index].app_id) + bnx2x_dcbx_get_ap_priority(bp, + entry[index].pri_bitmap, + LLFC_TRAFFIC_TYPE_ISCSI); + } + } else { + DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_DISABLED\n"); + bp->dcbx_port_params.app.enabled = false; + for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++) + ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY; + } +} + +static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, + struct dcbx_ets_feature *ets, + u32 error) { + int i = 0; + u32 pg_pri_orginal_spread[DCBX_MAX_NUM_PG_BW_ENTRIES] = {0}; + struct pg_help_data pg_help_data; + struct bnx2x_dcbx_cos_params *cos_params = + bp->dcbx_port_params.ets.cos_params; + + memset(&pg_help_data, 0, sizeof(struct pg_help_data)); + + if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR)) + DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ERROR\n"); + + if (GET_FLAGS(error, DCBX_REMOTE_ETS_TLV_NOT_FOUND)) + DP(BNX2X_MSG_DCB, "DCBX_REMOTE_ETS_TLV_NOT_FOUND\n"); + + /* Clean up old settings of ets on COS */ + for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) { + cos_params[i].pauseable = false; + cos_params[i].strict = BNX2X_DCBX_STRICT_INVALID; + cos_params[i].bw_tbl = DCBX_INVALID_COS_BW; + cos_params[i].pri_bitmask = 0; + } + + if (bp->dcbx_port_params.app.enabled && ets->enabled && + !GET_FLAGS(error, + DCBX_LOCAL_ETS_ERROR | DCBX_REMOTE_ETS_TLV_NOT_FOUND)) { + DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ENABLE\n"); + bp->dcbx_port_params.ets.enabled = true; + + bnx2x_dcbx_get_ets_pri_pg_tbl(bp, + pg_pri_orginal_spread, + ets->pri_pg_tbl); + + bnx2x_dcbx_get_num_pg_traf_type(bp, + pg_pri_orginal_spread, + &pg_help_data); + + bnx2x_dcbx_fill_cos_params(bp, &pg_help_data, + ets, pg_pri_orginal_spread); + + } else { + DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_DISABLED\n"); + bp->dcbx_port_params.ets.enabled = false; + ets->pri_pg_tbl[0] = 0; + + for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES ; i++) + DCBX_PG_BW_SET(ets->pg_bw_tbl, i, 1); + } +} + +static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp, + struct dcbx_pfc_feature *pfc, u32 error) +{ + if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR)) + DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_ERROR\n"); + + if (GET_FLAGS(error, DCBX_REMOTE_PFC_TLV_NOT_FOUND)) + DP(BNX2X_MSG_DCB, "DCBX_REMOTE_PFC_TLV_NOT_FOUND\n"); + if (bp->dcbx_port_params.app.enabled && pfc->enabled && + !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH | + DCBX_REMOTE_PFC_TLV_NOT_FOUND)) { + bp->dcbx_port_params.pfc.enabled = true; + bp->dcbx_port_params.pfc.priority_non_pauseable_mask = + ~(pfc->pri_en_bitmap); + } else { + DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_DISABLED\n"); + bp->dcbx_port_params.pfc.enabled = false; + bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0; + } +} + +/* maps unmapped priorities to to the same COS as L2 */ +static void bnx2x_dcbx_map_nw(struct bnx2x *bp) +{ + int i; + u32 unmapped = (1 << MAX_PFC_PRIORITIES) - 1; /* all ones */ + u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + u32 nw_prio = 1 << ttp[LLFC_TRAFFIC_TYPE_NW]; + struct bnx2x_dcbx_cos_params *cos_params = + bp->dcbx_port_params.ets.cos_params; + + /* get unmapped priorities by clearing mapped bits */ + for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) + unmapped &= ~(1 << ttp[i]); + + /* find cos for nw prio and extend it with unmapped */ + for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) { + if (cos_params[i].pri_bitmask & nw_prio) { + /* extend the bitmask with unmapped */ + DP(BNX2X_MSG_DCB, + "cos %d extended with 0x%08x\n", i, unmapped); + cos_params[i].pri_bitmask |= unmapped; + break; + } + } +} + +static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp, + struct dcbx_features *features, + u32 error) +{ + bnx2x_dcbx_get_ap_feature(bp, &features->app, error); + + bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error); + + bnx2x_dcbx_get_ets_feature(bp, &features->ets, error); + + bnx2x_dcbx_map_nw(bp); +} + +#define DCBX_LOCAL_MIB_MAX_TRY_READ (100) +static int bnx2x_dcbx_read_mib(struct bnx2x *bp, + u32 *base_mib_addr, + u32 offset, + int read_mib_type) +{ + int max_try_read = 0; + u32 mib_size, prefix_seq_num, suffix_seq_num; + struct lldp_remote_mib *remote_mib ; + struct lldp_local_mib *local_mib; + + switch (read_mib_type) { + case DCBX_READ_LOCAL_MIB: + mib_size = sizeof(struct lldp_local_mib); + break; + case DCBX_READ_REMOTE_MIB: + mib_size = sizeof(struct lldp_remote_mib); + break; + default: + return 1; /*error*/ + } + + offset += BP_PORT(bp) * mib_size; + + do { + bnx2x_read_data(bp, base_mib_addr, offset, mib_size); + + max_try_read++; + + switch (read_mib_type) { + case DCBX_READ_LOCAL_MIB: + local_mib = (struct lldp_local_mib *) base_mib_addr; + prefix_seq_num = local_mib->prefix_seq_num; + suffix_seq_num = local_mib->suffix_seq_num; + break; + case DCBX_READ_REMOTE_MIB: + remote_mib = (struct lldp_remote_mib *) base_mib_addr; + prefix_seq_num = remote_mib->prefix_seq_num; + suffix_seq_num = remote_mib->suffix_seq_num; + break; + default: + return 1; /*error*/ + } + } while ((prefix_seq_num != suffix_seq_num) && + (max_try_read < DCBX_LOCAL_MIB_MAX_TRY_READ)); + + if (max_try_read >= DCBX_LOCAL_MIB_MAX_TRY_READ) { + BNX2X_ERR("MIB could not be read\n"); + return 1; + } + + return 0; +} + +static void bnx2x_pfc_set_pfc(struct bnx2x *bp) +{ + int mfw_configured = SHMEM2_HAS(bp, drv_flags) && + GET_FLAGS(SHMEM2_RD(bp, drv_flags), + 1 << DRV_FLAGS_DCB_MFW_CONFIGURED); + + if (bp->dcbx_port_params.pfc.enabled && + (!(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) || mfw_configured)) + /* + * 1. Fills up common PFC structures if required + * 2. Configure NIG, MAC and BRB via the elink + */ + bnx2x_pfc_set(bp); + else + bnx2x_pfc_clear(bp); +} + +int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) +{ + struct bnx2x_func_state_params func_params = {NULL}; + int rc; + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_TX_STOP; + + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); + + DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n"); + + rc = bnx2x_func_state_change(bp, &func_params); + if (rc) { + BNX2X_ERR("Unable to hold traffic for HW configuration\n"); + bnx2x_panic(); + } + + return rc; +} + +int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) +{ + struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_tx_start_params *tx_params = + &func_params.params.tx_start; + int rc; + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_TX_START; + + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); + + bnx2x_dcbx_fw_struct(bp, tx_params); + + DP(BNX2X_MSG_DCB, "START TRAFFIC\n"); + + rc = bnx2x_func_state_change(bp, &func_params); + if (rc) { + BNX2X_ERR("Unable to resume traffic after HW configuration\n"); + bnx2x_panic(); + } + + return rc; +} + +static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) +{ + struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); + int rc = 0; + + if (ets->num_of_cos == 0 || ets->num_of_cos > DCBX_COS_MAX_NUM_E2) { + BNX2X_ERR("Illegal number of COSes %d\n", ets->num_of_cos); + return; + } + + /* valid COS entries */ + if (ets->num_of_cos == 1) /* no ETS */ + return; + + /* sanity */ + if (((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[0].strict) && + (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) || + ((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[1].strict) && + (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) { + BNX2X_ERR("all COS should have at least bw_limit or strict" + "ets->cos_params[0].strict= %x" + "ets->cos_params[0].bw_tbl= %x" + "ets->cos_params[1].strict= %x" + "ets->cos_params[1].bw_tbl= %x", + ets->cos_params[0].strict, + ets->cos_params[0].bw_tbl, + ets->cos_params[1].strict, + ets->cos_params[1].bw_tbl); + return; + } + /* If we join a group and there is bw_tbl and strict then bw rules */ + if ((DCBX_INVALID_COS_BW != ets->cos_params[0].bw_tbl) && + (DCBX_INVALID_COS_BW != ets->cos_params[1].bw_tbl)) { + u32 bw_tbl_0 = ets->cos_params[0].bw_tbl; + u32 bw_tbl_1 = ets->cos_params[1].bw_tbl; + /* Do not allow 0-100 configuration + * since PBF does not support it + * force 1-99 instead + */ + if (bw_tbl_0 == 0) { + bw_tbl_0 = 1; + bw_tbl_1 = 99; + } else if (bw_tbl_1 == 0) { + bw_tbl_1 = 1; + bw_tbl_0 = 99; + } + + bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1); + } else { + if (ets->cos_params[0].strict == BNX2X_DCBX_STRICT_COS_HIGHEST) + rc = bnx2x_ets_strict(&bp->link_params, 0); + else if (ets->cos_params[1].strict + == BNX2X_DCBX_STRICT_COS_HIGHEST) + rc = bnx2x_ets_strict(&bp->link_params, 1); + if (rc) + BNX2X_ERR("update_ets_params failed\n"); + } +} + +/* + * In E3B0 the configuration may have more than 2 COS. + */ +static void bnx2x_dcbx_update_ets_config(struct bnx2x *bp) +{ + struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); + struct bnx2x_ets_params ets_params = { 0 }; + u8 i; + + ets_params.num_of_cos = ets->num_of_cos; + + for (i = 0; i < ets->num_of_cos; i++) { + /* COS is SP */ + if (ets->cos_params[i].strict != BNX2X_DCBX_STRICT_INVALID) { + if (ets->cos_params[i].bw_tbl != DCBX_INVALID_COS_BW) { + BNX2X_ERR("COS can't be not BW and not SP\n"); + return; + } + + ets_params.cos[i].state = bnx2x_cos_state_strict; + ets_params.cos[i].params.sp_params.pri = + ets->cos_params[i].strict; + } else { /* COS is BW */ + if (ets->cos_params[i].bw_tbl == DCBX_INVALID_COS_BW) { + BNX2X_ERR("COS can't be not BW and not SP\n"); + return; + } + ets_params.cos[i].state = bnx2x_cos_state_bw; + ets_params.cos[i].params.bw_params.bw = + (u8)ets->cos_params[i].bw_tbl; + } + } + + /* Configure the ETS in HW */ + if (bnx2x_ets_e3b0_config(&bp->link_params, &bp->link_vars, + &ets_params)) { + BNX2X_ERR("bnx2x_ets_e3b0_config failed\n"); + bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); + } +} + +static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) +{ + int mfw_configured = SHMEM2_HAS(bp, drv_flags) && + GET_FLAGS(SHMEM2_RD(bp, drv_flags), + 1 << DRV_FLAGS_DCB_MFW_CONFIGURED); + + bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); + + if (!bp->dcbx_port_params.ets.enabled || + ((bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) && !mfw_configured)) + return; + + if (CHIP_IS_E3B0(bp)) + bnx2x_dcbx_update_ets_config(bp); + else + bnx2x_dcbx_2cos_limit_update_ets_config(bp); +} + +#ifdef BCM_DCBNL +static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp) +{ + struct lldp_remote_mib remote_mib = {0}; + u32 dcbx_remote_mib_offset = SHMEM2_RD(bp, dcbx_remote_mib_offset); + int rc; + + DP(BNX2X_MSG_DCB, "dcbx_remote_mib_offset 0x%x\n", + dcbx_remote_mib_offset); + + if (SHMEM_DCBX_REMOTE_MIB_NONE == dcbx_remote_mib_offset) { + BNX2X_ERR("FW doesn't support dcbx_remote_mib_offset\n"); + return -EINVAL; + } + + rc = bnx2x_dcbx_read_mib(bp, (u32 *)&remote_mib, dcbx_remote_mib_offset, + DCBX_READ_REMOTE_MIB); + + if (rc) { + BNX2X_ERR("Failed to read remote mib from FW\n"); + return rc; + } + + /* save features and flags */ + bp->dcbx_remote_feat = remote_mib.features; + bp->dcbx_remote_flags = remote_mib.flags; + return 0; +} +#endif + +static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp) +{ + struct lldp_local_mib local_mib = {0}; + u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset); + int rc; + + DP(BNX2X_MSG_DCB, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset); + + if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) { + BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n"); + return -EINVAL; + } + + rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset, + DCBX_READ_LOCAL_MIB); + + if (rc) { + BNX2X_ERR("Failed to read local mib from FW\n"); + return rc; + } + + /* save features and error */ + bp->dcbx_local_feat = local_mib.features; + bp->dcbx_error = local_mib.error; + return 0; +} + +#ifdef BCM_DCBNL +static inline +u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent) +{ + u8 pri; + + /* Choose the highest priority */ + for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--) + if (ent->pri_bitmap & (1 << pri)) + break; + return pri; +} + +static inline +u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent) +{ + return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) == + DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM : + DCB_APP_IDTYPE_ETHTYPE; +} + +int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall) +{ + int i, err = 0; + + for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) { + struct dcbx_app_priority_entry *ent = + &bp->dcbx_local_feat.app.app_pri_tbl[i]; + + if (ent->appBitfield & DCBX_APP_ENTRY_VALID) { + u8 up = bnx2x_dcbx_dcbnl_app_up(ent); + + /* avoid invalid user-priority */ + if (up) { + struct dcb_app app; + app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent); + app.protocol = ent->app_id; + app.priority = delall ? 0 : up; + err = dcb_setapp(bp->dev, &app); + } + } + } + return err; +} +#endif + +static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) +{ + u8 prio, cos; + for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) { + for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { + if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask + & (1 << prio)) { + bp->prio_to_cos[prio] = cos; + DP(BNX2X_MSG_DCB, + "tx_mapping %d --> %d\n", prio, cos); + } + } + } + + /* setup tc must be called under rtnl lock, but we can't take it here + * as we are handling an attention on a work queue which must be + * flushed at some rtnl-locked contexts (e.g. if down) + */ + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0); +} + +void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) +{ + switch (state) { + case BNX2X_DCBX_STATE_NEG_RECEIVED: + { + DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); +#ifdef BCM_DCBNL + /** + * Delete app tlvs from dcbnl before reading new + * negotiation results + */ + bnx2x_dcbnl_update_applist(bp, true); + + /* Read remote mib if dcbx is in the FW */ + if (bnx2x_dcbx_read_shmem_remote_mib(bp)) + return; +#endif + /* Read neg results if dcbx is in the FW */ + if (bnx2x_dcbx_read_shmem_neg_results(bp)) + return; + + bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat, + bp->dcbx_error); + + bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, + bp->dcbx_error); + + /* mark DCBX result for PMF migration */ + bnx2x_update_drv_flags(bp, + 1 << DRV_FLAGS_DCB_CONFIGURED, + 1); +#ifdef BCM_DCBNL + /* + * Add new app tlvs to dcbnl + */ + bnx2x_dcbnl_update_applist(bp, false); +#endif + /* + * reconfigure the netdevice with the results of the new + * dcbx negotiation. + */ + bnx2x_dcbx_update_tc_mapping(bp); + + /* + * allow other functions to update their netdevices + * accordingly + */ + if (IS_MF(bp)) + bnx2x_link_sync_notify(bp); + + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0); + return; + } + case BNX2X_DCBX_STATE_TX_PAUSED: + DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_PAUSED\n"); + bnx2x_pfc_set_pfc(bp); + + bnx2x_dcbx_update_ets_params(bp); + + /* ets may affect cmng configuration: reinit it in hw */ + bnx2x_set_local_cmng(bp); + return; + case BNX2X_DCBX_STATE_TX_RELEASED: + DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0); +#ifdef BCM_DCBNL + /* + * Send a notification for the new negotiated parameters + */ + dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0); +#endif + return; + default: + BNX2X_ERR("Unknown DCBX_STATE\n"); + } +} + +#define LLDP_ADMIN_MIB_OFFSET(bp) (PORT_MAX*sizeof(struct lldp_params) + \ + BP_PORT(bp)*sizeof(struct lldp_admin_mib)) + +static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, + u32 dcbx_lldp_params_offset) +{ + struct lldp_admin_mib admin_mib; + u32 i, other_traf_type = PREDEFINED_APP_IDX_MAX, traf_type = 0; + u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp); + + /*shortcuts*/ + struct dcbx_features *af = &admin_mib.features; + struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params; + + memset(&admin_mib, 0, sizeof(struct lldp_admin_mib)); + + /* Read the data first */ + bnx2x_read_data(bp, (u32 *)&admin_mib, offset, + sizeof(struct lldp_admin_mib)); + + if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON) + SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED); + + if (dp->overwrite_settings == BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE) { + + RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_CEE_VERSION_MASK); + admin_mib.ver_cfg_flags |= + (dp->admin_dcbx_version << DCBX_CEE_VERSION_SHIFT) & + DCBX_CEE_VERSION_MASK; + + af->ets.enabled = (u8)dp->admin_ets_enable; + + af->pfc.enabled = (u8)dp->admin_pfc_enable; + + /* FOR IEEE dp->admin_tc_supported_tx_enable */ + if (dp->admin_ets_configuration_tx_enable) + SET_FLAGS(admin_mib.ver_cfg_flags, + DCBX_ETS_CONFIG_TX_ENABLED); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, + DCBX_ETS_CONFIG_TX_ENABLED); + /* For IEEE admin_ets_recommendation_tx_enable */ + if (dp->admin_pfc_tx_enable) + SET_FLAGS(admin_mib.ver_cfg_flags, + DCBX_PFC_CONFIG_TX_ENABLED); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, + DCBX_PFC_CONFIG_TX_ENABLED); + + if (dp->admin_application_priority_tx_enable) + SET_FLAGS(admin_mib.ver_cfg_flags, + DCBX_APP_CONFIG_TX_ENABLED); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, + DCBX_APP_CONFIG_TX_ENABLED); + + if (dp->admin_ets_willing) + SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING); + /* For IEEE admin_ets_reco_valid */ + if (dp->admin_pfc_willing) + SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING); + + if (dp->admin_app_priority_willing) + SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING); + else + RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING); + + for (i = 0 ; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) { + DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i, + (u8)dp->admin_configuration_bw_precentage[i]); + + DP(BNX2X_MSG_DCB, "pg_bw_tbl[%d] = %02x\n", + i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i)); + } + + for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) { + DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i, + (u8)dp->admin_configuration_ets_pg[i]); + + DP(BNX2X_MSG_DCB, "pri_pg_tbl[%d] = %02x\n", + i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i)); + } + + /*For IEEE admin_recommendation_bw_percentage + *For IEEE admin_recommendation_ets_pg */ + af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap; + for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) { + if (dp->admin_priority_app_table[i].valid) { + struct bnx2x_admin_priority_app_table *table = + dp->admin_priority_app_table; + if ((ETH_TYPE_FCOE == table[i].app_id) && + (TRAFFIC_TYPE_ETH == table[i].traffic_type)) + traf_type = FCOE_APP_IDX; + else if ((TCP_PORT_ISCSI == table[i].app_id) && + (TRAFFIC_TYPE_PORT == table[i].traffic_type)) + traf_type = ISCSI_APP_IDX; + else + traf_type = other_traf_type++; + + af->app.app_pri_tbl[traf_type].app_id = + table[i].app_id; + + af->app.app_pri_tbl[traf_type].pri_bitmap = + (u8)(1 << table[i].priority); + + af->app.app_pri_tbl[traf_type].appBitfield = + (DCBX_APP_ENTRY_VALID); + + af->app.app_pri_tbl[traf_type].appBitfield |= + (TRAFFIC_TYPE_ETH == table[i].traffic_type) ? + DCBX_APP_SF_ETH_TYPE : DCBX_APP_SF_PORT; + } + } + + af->app.default_pri = (u8)dp->admin_default_priority; + } + + /* Write the data. */ + bnx2x_write_data(bp, (u32 *)&admin_mib, offset, + sizeof(struct lldp_admin_mib)); +} + +void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) +{ + if (!CHIP_IS_E1x(bp)) { + bp->dcb_state = dcb_on; + bp->dcbx_enabled = dcbx_enabled; + } else { + bp->dcb_state = false; + bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID; + } + DP(BNX2X_MSG_DCB, "DCB state [%s:%s]\n", + dcb_on ? "ON" : "OFF", + dcbx_enabled == BNX2X_DCBX_ENABLED_OFF ? "user-mode" : + dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF ? "on-chip static" : + dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON ? + "on-chip with negotiation" : "invalid"); +} + +void bnx2x_dcbx_init_params(struct bnx2x *bp) +{ + bp->dcbx_config_params.admin_dcbx_version = 0x0; /* 0 - CEE; 1 - IEEE */ + bp->dcbx_config_params.admin_ets_willing = 1; + bp->dcbx_config_params.admin_pfc_willing = 1; + bp->dcbx_config_params.overwrite_settings = 1; + bp->dcbx_config_params.admin_ets_enable = 1; + bp->dcbx_config_params.admin_pfc_enable = 1; + bp->dcbx_config_params.admin_tc_supported_tx_enable = 1; + bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; + bp->dcbx_config_params.admin_pfc_tx_enable = 1; + bp->dcbx_config_params.admin_application_priority_tx_enable = 1; + bp->dcbx_config_params.admin_ets_reco_valid = 1; + bp->dcbx_config_params.admin_app_priority_willing = 1; + bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 100; + bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 0; + bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 0; + bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0; + bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0; + bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0; + bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0; + bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[0] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[3] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0; + bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0; + bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 100; + bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 0; + bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 0; + bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0; + bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 0; + bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 0; + bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 0; + bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 0; + bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0; + bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1; + bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2; + bp->dcbx_config_params.admin_recommendation_ets_pg[3] = 3; + bp->dcbx_config_params.admin_recommendation_ets_pg[4] = 4; + bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5; + bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6; + bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7; + bp->dcbx_config_params.admin_pfc_bitmap = 0x0; + bp->dcbx_config_params.admin_priority_app_table[0].valid = 0; + bp->dcbx_config_params.admin_priority_app_table[1].valid = 0; + bp->dcbx_config_params.admin_priority_app_table[2].valid = 0; + bp->dcbx_config_params.admin_priority_app_table[3].valid = 0; + bp->dcbx_config_params.admin_default_priority = 0; +} + +void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem) +{ + u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE; + + /* only PMF can send ADMIN msg to MFW in old MFW versions */ + if ((!bp->port.pmf) && (!(bp->flags & BC_SUPPORTS_DCBX_MSG_NON_PMF))) + return; + + if (bp->dcbx_enabled <= 0) + return; + + /* validate: + * chip of good for dcbx version, + * dcb is wanted + * shmem2 contains DCBX support fields + */ + DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n", + bp->dcb_state, bp->port.pmf); + + if (bp->dcb_state == BNX2X_DCB_STATE_ON && + SHMEM2_HAS(bp, dcbx_lldp_params_offset)) { + dcbx_lldp_params_offset = + SHMEM2_RD(bp, dcbx_lldp_params_offset); + + DP(BNX2X_MSG_DCB, "dcbx_lldp_params_offset 0x%x\n", + dcbx_lldp_params_offset); + + bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0); + + if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) { + /* need HW lock to avoid scenario of two drivers + * writing in parallel to shmem + */ + bnx2x_acquire_hw_lock(bp, + HW_LOCK_RESOURCE_DCBX_ADMIN_MIB); + if (update_shmem) + bnx2x_dcbx_admin_mib_updated_params(bp, + dcbx_lldp_params_offset); + + /* Let HW start negotiation */ + bnx2x_fw_command(bp, + DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0); + /* release HW lock only after MFW acks that it finished + * reading values from shmem + */ + bnx2x_release_hw_lock(bp, + HW_LOCK_RESOURCE_DCBX_ADMIN_MIB); + } + } +} +static void +bnx2x_dcbx_print_cos_params(struct bnx2x *bp, + struct bnx2x_func_tx_start_params *pfc_fw_cfg) +{ + u8 pri = 0; + u8 cos = 0; + + DP(BNX2X_MSG_DCB, + "pfc_fw_cfg->dcb_version %x\n", pfc_fw_cfg->dcb_version); + DP(BNX2X_MSG_DCB, + "pdev->params.dcbx_port_params.pfc.priority_non_pauseable_mask %x\n", + bp->dcbx_port_params.pfc.priority_non_pauseable_mask); + + for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) { + DP(BNX2X_MSG_DCB, + "pdev->params.dcbx_port_params.ets.cos_params[%d].pri_bitmask %x\n", + cos, bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask); + + DP(BNX2X_MSG_DCB, + "pdev->params.dcbx_port_params.ets.cos_params[%d].bw_tbl %x\n", + cos, bp->dcbx_port_params.ets.cos_params[cos].bw_tbl); + + DP(BNX2X_MSG_DCB, + "pdev->params.dcbx_port_params.ets.cos_params[%d].strict %x\n", + cos, bp->dcbx_port_params.ets.cos_params[cos].strict); + + DP(BNX2X_MSG_DCB, + "pdev->params.dcbx_port_params.ets.cos_params[%d].pauseable %x\n", + cos, bp->dcbx_port_params.ets.cos_params[cos].pauseable); + } + + for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) { + DP(BNX2X_MSG_DCB, + "pfc_fw_cfg->traffic_type_to_priority_cos[%d].priority %x\n", + pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority); + + DP(BNX2X_MSG_DCB, + "pfc_fw_cfg->traffic_type_to_priority_cos[%d].cos %x\n", + pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].cos); + } +} + +/* fills help_data according to pg_info */ +static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp, + u32 *pg_pri_orginal_spread, + struct pg_help_data *help_data) +{ + bool pg_found = false; + u32 i, traf_type, add_traf_type, add_pg; + u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + struct pg_entry_help_data *data = help_data->data; /*shortcut*/ + + /* Set to invalid */ + for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) + data[i].pg = DCBX_ILLEGAL_PG; + + for (add_traf_type = 0; + add_traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; add_traf_type++) { + pg_found = false; + if (ttp[add_traf_type] < MAX_PFC_PRIORITIES) { + add_pg = (u8)pg_pri_orginal_spread[ttp[add_traf_type]]; + for (traf_type = 0; + traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; + traf_type++) { + if (data[traf_type].pg == add_pg) { + if (!(data[traf_type].pg_priority & + (1 << ttp[add_traf_type]))) + data[traf_type]. + num_of_dif_pri++; + data[traf_type].pg_priority |= + (1 << ttp[add_traf_type]); + pg_found = true; + break; + } + } + if (false == pg_found) { + data[help_data->num_of_pg].pg = add_pg; + data[help_data->num_of_pg].pg_priority = + (1 << ttp[add_traf_type]); + data[help_data->num_of_pg].num_of_dif_pri = 1; + help_data->num_of_pg++; + } + } + DP(BNX2X_MSG_DCB, + "add_traf_type %d pg_found %s num_of_pg %d\n", + add_traf_type, (false == pg_found) ? "NO" : "YES", + help_data->num_of_pg); + } +} + +static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp, + struct cos_help_data *cos_data, + u32 pri_join_mask) +{ + /* Only one priority than only one COS */ + cos_data->data[0].pausable = + IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); + cos_data->data[0].pri_join_mask = pri_join_mask; + cos_data->data[0].cos_bw = 100; + cos_data->num_of_cos = 1; +} + +static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp, + struct cos_entry_help_data *data, + u8 pg_bw) +{ + if (data->cos_bw == DCBX_INVALID_COS_BW) + data->cos_bw = pg_bw; + else + data->cos_bw += pg_bw; +} + +static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, + struct cos_help_data *cos_data, + u32 *pg_pri_orginal_spread, + struct dcbx_ets_feature *ets) +{ + u32 pri_tested = 0; + u8 i = 0; + u8 entry = 0; + u8 pg_entry = 0; + u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX; + + cos_data->data[0].pausable = true; + cos_data->data[1].pausable = false; + cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0; + + for (i = 0 ; i < num_of_pri ; i++) { + pri_tested = 1 << bp->dcbx_port_params. + app.traffic_type_priority[i]; + + if (pri_tested & DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) { + cos_data->data[1].pri_join_mask |= pri_tested; + entry = 1; + } else { + cos_data->data[0].pri_join_mask |= pri_tested; + entry = 0; + } + pg_entry = (u8)pg_pri_orginal_spread[bp->dcbx_port_params. + app.traffic_type_priority[i]]; + /* There can be only one strict pg */ + if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) + bnx2x_dcbx_add_to_cos_bw(bp, &cos_data->data[entry], + DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry)); + else + /* If we join a group and one is strict + * than the bw rules + */ + cos_data->data[entry].strict = + BNX2X_DCBX_STRICT_COS_HIGHEST; + } + if ((0 == cos_data->data[0].pri_join_mask) && + (0 == cos_data->data[1].pri_join_mask)) + BNX2X_ERR("dcbx error: Both groups must have priorities\n"); +} + +#ifndef POWER_OF_2 +#define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1)))) +#endif + +static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp, + struct pg_help_data *pg_help_data, + struct cos_help_data *cos_data, + u32 pri_join_mask, + u8 num_of_dif_pri) +{ + u8 i = 0; + u32 pri_tested = 0; + u32 pri_mask_without_pri = 0; + u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + /*debug*/ + if (num_of_dif_pri == 1) { + bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask); + return; + } + /* single priority group */ + if (pg_help_data->data[0].pg < DCBX_MAX_NUM_PG_BW_ENTRIES) { + /* If there are both pauseable and non-pauseable priorities, + * the pauseable priorities go to the first queue and + * the non-pauseable priorities go to the second queue. + */ + if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { + /* Pauseable */ + cos_data->data[0].pausable = true; + /* Non pauseable.*/ + cos_data->data[1].pausable = false; + + if (2 == num_of_dif_pri) { + cos_data->data[0].cos_bw = 50; + cos_data->data[1].cos_bw = 50; + } + + if (3 == num_of_dif_pri) { + if (POWER_OF_2(DCBX_PFC_PRI_GET_PAUSE(bp, + pri_join_mask))) { + cos_data->data[0].cos_bw = 33; + cos_data->data[1].cos_bw = 67; + } else { + cos_data->data[0].cos_bw = 67; + cos_data->data[1].cos_bw = 33; + } + } + + } else if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask)) { + /* If there are only pauseable priorities, + * then one/two priorities go to the first queue + * and one priority goes to the second queue. + */ + if (2 == num_of_dif_pri) { + cos_data->data[0].cos_bw = 50; + cos_data->data[1].cos_bw = 50; + } else { + cos_data->data[0].cos_bw = 67; + cos_data->data[1].cos_bw = 33; + } + cos_data->data[1].pausable = true; + cos_data->data[0].pausable = true; + /* All priorities except FCOE */ + cos_data->data[0].pri_join_mask = (pri_join_mask & + ((u8)~(1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]))); + /* Only FCOE priority.*/ + cos_data->data[1].pri_join_mask = + (1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]); + } else + /* If there are only non-pauseable priorities, + * they will all go to the same queue. + */ + bnx2x_dcbx_ets_disabled_entry_data(bp, + cos_data, pri_join_mask); + } else { + /* priority group which is not BW limited (PG#15):*/ + if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { + /* If there are both pauseable and non-pauseable + * priorities, the pauseable priorities go to the first + * queue and the non-pauseable priorities + * go to the second queue. + */ + if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) > + DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) { + cos_data->data[0].strict = + BNX2X_DCBX_STRICT_COS_HIGHEST; + cos_data->data[1].strict = + BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( + BNX2X_DCBX_STRICT_COS_HIGHEST); + } else { + cos_data->data[0].strict = + BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( + BNX2X_DCBX_STRICT_COS_HIGHEST); + cos_data->data[1].strict = + BNX2X_DCBX_STRICT_COS_HIGHEST; + } + /* Pauseable */ + cos_data->data[0].pausable = true; + /* Non pause-able.*/ + cos_data->data[1].pausable = false; + } else { + /* If there are only pauseable priorities or + * only non-pauseable,* the lower priorities go + * to the first queue and the higher priorities go + * to the second queue. + */ + cos_data->data[0].pausable = + cos_data->data[1].pausable = + IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); + + for (i = 0 ; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) { + pri_tested = 1 << bp->dcbx_port_params. + app.traffic_type_priority[i]; + /* Remove priority tested */ + pri_mask_without_pri = + (pri_join_mask & ((u8)(~pri_tested))); + if (pri_mask_without_pri < pri_tested) + break; + } + + if (i == LLFC_DRIVER_TRAFFIC_TYPE_MAX) + BNX2X_ERR("Invalid value for pri_join_mask - could not find a priority\n"); + + cos_data->data[0].pri_join_mask = pri_mask_without_pri; + cos_data->data[1].pri_join_mask = pri_tested; + /* Both queues are strict priority, + * and that with the highest priority + * gets the highest strict priority in the arbiter. + */ + cos_data->data[0].strict = + BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( + BNX2X_DCBX_STRICT_COS_HIGHEST); + cos_data->data[1].strict = + BNX2X_DCBX_STRICT_COS_HIGHEST; + } + } +} + +static void bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params( + struct bnx2x *bp, + struct pg_help_data *pg_help_data, + struct dcbx_ets_feature *ets, + struct cos_help_data *cos_data, + u32 *pg_pri_orginal_spread, + u32 pri_join_mask, + u8 num_of_dif_pri) +{ + u8 i = 0; + u8 pg[DCBX_COS_MAX_NUM_E2] = { 0 }; + + /* If there are both pauseable and non-pauseable priorities, + * the pauseable priorities go to the first queue and + * the non-pauseable priorities go to the second queue. + */ + if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) { + if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, + pg_help_data->data[0].pg_priority) || + IS_DCBX_PFC_PRI_MIX_PAUSE(bp, + pg_help_data->data[1].pg_priority)) { + /* If one PG contains both pauseable and + * non-pauseable priorities then ETS is disabled. + */ + bnx2x_dcbx_separate_pauseable_from_non(bp, cos_data, + pg_pri_orginal_spread, ets); + bp->dcbx_port_params.ets.enabled = false; + return; + } + + /* Pauseable */ + cos_data->data[0].pausable = true; + /* Non pauseable. */ + cos_data->data[1].pausable = false; + if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, + pg_help_data->data[0].pg_priority)) { + /* 0 is pauseable */ + cos_data->data[0].pri_join_mask = + pg_help_data->data[0].pg_priority; + pg[0] = pg_help_data->data[0].pg; + cos_data->data[1].pri_join_mask = + pg_help_data->data[1].pg_priority; + pg[1] = pg_help_data->data[1].pg; + } else {/* 1 is pauseable */ + cos_data->data[0].pri_join_mask = + pg_help_data->data[1].pg_priority; + pg[0] = pg_help_data->data[1].pg; + cos_data->data[1].pri_join_mask = + pg_help_data->data[0].pg_priority; + pg[1] = pg_help_data->data[0].pg; + } + } else { + /* If there are only pauseable priorities or + * only non-pauseable, each PG goes to a queue. + */ + cos_data->data[0].pausable = cos_data->data[1].pausable = + IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); + cos_data->data[0].pri_join_mask = + pg_help_data->data[0].pg_priority; + pg[0] = pg_help_data->data[0].pg; + cos_data->data[1].pri_join_mask = + pg_help_data->data[1].pg_priority; + pg[1] = pg_help_data->data[1].pg; + } + + /* There can be only one strict pg */ + for (i = 0 ; i < ARRAY_SIZE(pg); i++) { + if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES) + cos_data->data[i].cos_bw = + DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]); + else + cos_data->data[i].strict = + BNX2X_DCBX_STRICT_COS_HIGHEST; + } +} + +static int bnx2x_dcbx_join_pgs( + struct bnx2x *bp, + struct dcbx_ets_feature *ets, + struct pg_help_data *pg_help_data, + u8 required_num_of_pg) +{ + u8 entry_joined = pg_help_data->num_of_pg - 1; + u8 entry_removed = entry_joined + 1; + u8 pg_joined = 0; + + if (required_num_of_pg == 0 || ARRAY_SIZE(pg_help_data->data) + <= pg_help_data->num_of_pg) { + + BNX2X_ERR("required_num_of_pg can't be zero\n"); + return -EINVAL; + } + + while (required_num_of_pg < pg_help_data->num_of_pg) { + entry_joined = pg_help_data->num_of_pg - 2; + entry_removed = entry_joined + 1; + /* protect index */ + entry_removed %= ARRAY_SIZE(pg_help_data->data); + + pg_help_data->data[entry_joined].pg_priority |= + pg_help_data->data[entry_removed].pg_priority; + + pg_help_data->data[entry_joined].num_of_dif_pri += + pg_help_data->data[entry_removed].num_of_dif_pri; + + if (pg_help_data->data[entry_joined].pg == DCBX_STRICT_PRI_PG || + pg_help_data->data[entry_removed].pg == DCBX_STRICT_PRI_PG) + /* Entries joined strict priority rules */ + pg_help_data->data[entry_joined].pg = + DCBX_STRICT_PRI_PG; + else { + /* Entries can be joined join BW */ + pg_joined = DCBX_PG_BW_GET(ets->pg_bw_tbl, + pg_help_data->data[entry_joined].pg) + + DCBX_PG_BW_GET(ets->pg_bw_tbl, + pg_help_data->data[entry_removed].pg); + + DCBX_PG_BW_SET(ets->pg_bw_tbl, + pg_help_data->data[entry_joined].pg, pg_joined); + } + /* Joined the entries */ + pg_help_data->num_of_pg--; + } + + return 0; +} + +static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( + struct bnx2x *bp, + struct pg_help_data *pg_help_data, + struct dcbx_ets_feature *ets, + struct cos_help_data *cos_data, + u32 *pg_pri_orginal_spread, + u32 pri_join_mask, + u8 num_of_dif_pri) +{ + u8 i = 0; + u32 pri_tested = 0; + u8 entry = 0; + u8 pg_entry = 0; + bool b_found_strict = false; + u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX; + + cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0; + /* If there are both pauseable and non-pauseable priorities, + * the pauseable priorities go to the first queue and the + * non-pauseable priorities go to the second queue. + */ + if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) + bnx2x_dcbx_separate_pauseable_from_non(bp, + cos_data, pg_pri_orginal_spread, ets); + else { + /* If two BW-limited PG-s were combined to one queue, + * the BW is their sum. + * + * If there are only pauseable priorities or only non-pauseable, + * and there are both BW-limited and non-BW-limited PG-s, + * the BW-limited PG/s go to one queue and the non-BW-limited + * PG/s go to the second queue. + * + * If there are only pauseable priorities or only non-pauseable + * and all are BW limited, then two priorities go to the first + * queue and one priority goes to the second queue. + * + * We will join this two cases: + * if one is BW limited it will go to the second queue + * otherwise the last priority will get it + */ + + cos_data->data[0].pausable = cos_data->data[1].pausable = + IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask); + + for (i = 0 ; i < num_of_pri; i++) { + pri_tested = 1 << bp->dcbx_port_params. + app.traffic_type_priority[i]; + pg_entry = (u8)pg_pri_orginal_spread[bp-> + dcbx_port_params.app.traffic_type_priority[i]]; + + if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) { + entry = 0; + + if (i == (num_of_pri-1) && + false == b_found_strict) + /* last entry will be handled separately + * If no priority is strict than last + * entry goes to last queue. + */ + entry = 1; + cos_data->data[entry].pri_join_mask |= + pri_tested; + bnx2x_dcbx_add_to_cos_bw(bp, + &cos_data->data[entry], + DCBX_PG_BW_GET(ets->pg_bw_tbl, + pg_entry)); + } else { + b_found_strict = true; + cos_data->data[1].pri_join_mask |= pri_tested; + /* If we join a group and one is strict + * than the bw rules + */ + cos_data->data[1].strict = + BNX2X_DCBX_STRICT_COS_HIGHEST; + } + } + } +} + +static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp, + struct pg_help_data *help_data, + struct dcbx_ets_feature *ets, + struct cos_help_data *cos_data, + u32 *pg_pri_orginal_spread, + u32 pri_join_mask, + u8 num_of_dif_pri) +{ + /* default E2 settings */ + cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2; + + switch (help_data->num_of_pg) { + case 1: + bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params( + bp, + help_data, + cos_data, + pri_join_mask, + num_of_dif_pri); + break; + case 2: + bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params( + bp, + help_data, + ets, + cos_data, + pg_pri_orginal_spread, + pri_join_mask, + num_of_dif_pri); + break; + + case 3: + bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( + bp, + help_data, + ets, + cos_data, + pg_pri_orginal_spread, + pri_join_mask, + num_of_dif_pri); + break; + default: + BNX2X_ERR("Wrong pg_help_data.num_of_pg\n"); + bnx2x_dcbx_ets_disabled_entry_data(bp, + cos_data, pri_join_mask); + } +} + +static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp, + struct cos_help_data *cos_data, + u8 entry, + u8 num_spread_of_entries, + u8 strict_app_pris) +{ + u8 strict_pri = BNX2X_DCBX_STRICT_COS_HIGHEST; + u8 num_of_app_pri = MAX_PFC_PRIORITIES; + u8 app_pri_bit = 0; + + while (num_spread_of_entries && num_of_app_pri > 0) { + app_pri_bit = 1 << (num_of_app_pri - 1); + if (app_pri_bit & strict_app_pris) { + struct cos_entry_help_data *data = &cos_data-> + data[entry]; + num_spread_of_entries--; + if (num_spread_of_entries == 0) { + /* last entry needed put all the entries left */ + data->cos_bw = DCBX_INVALID_COS_BW; + data->strict = strict_pri; + data->pri_join_mask = strict_app_pris; + data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, + data->pri_join_mask); + } else { + strict_app_pris &= ~app_pri_bit; + + data->cos_bw = DCBX_INVALID_COS_BW; + data->strict = strict_pri; + data->pri_join_mask = app_pri_bit; + data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, + data->pri_join_mask); + } + + strict_pri = + BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(strict_pri); + entry++; + } + + num_of_app_pri--; + } + + if (num_spread_of_entries) { + BNX2X_ERR("Didn't succeed to spread strict priorities\n"); + return -EINVAL; + } + + return 0; +} + +static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp, + struct cos_help_data *cos_data, + u8 entry, + u8 num_spread_of_entries, + u8 strict_app_pris) +{ + if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry, + num_spread_of_entries, + strict_app_pris)) { + struct cos_entry_help_data *data = &cos_data-> + data[entry]; + /* Fill BW entry */ + data->cos_bw = DCBX_INVALID_COS_BW; + data->strict = BNX2X_DCBX_STRICT_COS_HIGHEST; + data->pri_join_mask = strict_app_pris; + data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, + data->pri_join_mask); + return 1; + } + + return num_spread_of_entries; +} + +static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp, + struct pg_help_data *help_data, + struct dcbx_ets_feature *ets, + struct cos_help_data *cos_data, + u32 pri_join_mask) + +{ + u8 need_num_of_entries = 0; + u8 i = 0; + u8 entry = 0; + + /* + * if the number of requested PG-s in CEE is greater than 3 + * then the results are not determined since this is a violation + * of the standard. + */ + if (help_data->num_of_pg > DCBX_COS_MAX_NUM_E3B0) { + if (bnx2x_dcbx_join_pgs(bp, ets, help_data, + DCBX_COS_MAX_NUM_E3B0)) { + BNX2X_ERR("Unable to reduce the number of PGs - we will disables ETS\n"); + bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, + pri_join_mask); + return; + } + } + + for (i = 0 ; i < help_data->num_of_pg; i++) { + struct pg_entry_help_data *pg = &help_data->data[i]; + if (pg->pg < DCBX_MAX_NUM_PG_BW_ENTRIES) { + struct cos_entry_help_data *data = &cos_data-> + data[entry]; + /* Fill BW entry */ + data->cos_bw = DCBX_PG_BW_GET(ets->pg_bw_tbl, pg->pg); + data->strict = BNX2X_DCBX_STRICT_INVALID; + data->pri_join_mask = pg->pg_priority; + data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, + data->pri_join_mask); + + entry++; + } else { + need_num_of_entries = min_t(u8, + (u8)pg->num_of_dif_pri, + (u8)DCBX_COS_MAX_NUM_E3B0 - + help_data->num_of_pg + 1); + /* + * If there are still VOQ-s which have no associated PG, + * then associate these VOQ-s to PG15. These PG-s will + * be used for SP between priorities on PG15. + */ + entry += bnx2x_dcbx_cee_fill_strict_pri(bp, cos_data, + entry, need_num_of_entries, pg->pg_priority); + } + } + + /* the entry will represent the number of COSes used */ + cos_data->num_of_cos = entry; +} +static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp, + struct pg_help_data *help_data, + struct dcbx_ets_feature *ets, + u32 *pg_pri_orginal_spread) +{ + struct cos_help_data cos_data; + u8 i = 0; + u32 pri_join_mask = 0; + u8 num_of_dif_pri = 0; + + memset(&cos_data, 0, sizeof(cos_data)); + + /* Validate the pg value */ + for (i = 0; i < help_data->num_of_pg ; i++) { + if (DCBX_STRICT_PRIORITY != help_data->data[i].pg && + DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg) + BNX2X_ERR("Invalid pg[%d] data %x\n", i, + help_data->data[i].pg); + pri_join_mask |= help_data->data[i].pg_priority; + num_of_dif_pri += help_data->data[i].num_of_dif_pri; + } + + /* defaults */ + cos_data.num_of_cos = 1; + for (i = 0; i < ARRAY_SIZE(cos_data.data); i++) { + cos_data.data[i].pri_join_mask = 0; + cos_data.data[i].pausable = false; + cos_data.data[i].strict = BNX2X_DCBX_STRICT_INVALID; + cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW; + } + + if (CHIP_IS_E3B0(bp)) + bnx2x_dcbx_cee_fill_cos_params(bp, help_data, ets, + &cos_data, pri_join_mask); + else /* E2 + E3A0 */ + bnx2x_dcbx_2cos_limit_cee_fill_cos_params(bp, + help_data, ets, + &cos_data, + pg_pri_orginal_spread, + pri_join_mask, + num_of_dif_pri); + + for (i = 0; i < cos_data.num_of_cos ; i++) { + struct bnx2x_dcbx_cos_params *p = + &bp->dcbx_port_params.ets.cos_params[i]; + + p->strict = cos_data.data[i].strict; + p->bw_tbl = cos_data.data[i].cos_bw; + p->pri_bitmask = cos_data.data[i].pri_join_mask; + p->pauseable = cos_data.data[i].pausable; + + /* sanity */ + if (p->bw_tbl != DCBX_INVALID_COS_BW || + p->strict != BNX2X_DCBX_STRICT_INVALID) { + if (p->pri_bitmask == 0) + BNX2X_ERR("Invalid pri_bitmask for %d\n", i); + + if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) { + + if (p->pauseable && + DCBX_PFC_PRI_GET_NON_PAUSE(bp, + p->pri_bitmask) != 0) + BNX2X_ERR("Inconsistent config for pausable COS %d\n", + i); + + if (!p->pauseable && + DCBX_PFC_PRI_GET_PAUSE(bp, + p->pri_bitmask) != 0) + BNX2X_ERR("Inconsistent config for nonpausable COS %d\n", + i); + } + } + + if (p->pauseable) + DP(BNX2X_MSG_DCB, "COS %d PAUSABLE prijoinmask 0x%x\n", + i, cos_data.data[i].pri_join_mask); + else + DP(BNX2X_MSG_DCB, + "COS %d NONPAUSABLE prijoinmask 0x%x\n", + i, cos_data.data[i].pri_join_mask); + } + + bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ; +} + +static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, + u32 *set_configuration_ets_pg, + u32 *pri_pg_tbl) +{ + int i; + + for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) { + set_configuration_ets_pg[i] = DCBX_PRI_PG_GET(pri_pg_tbl, i); + + DP(BNX2X_MSG_DCB, "set_configuration_ets_pg[%d] = 0x%x\n", + i, set_configuration_ets_pg[i]); + } +} + +static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, + struct bnx2x_func_tx_start_params *pfc_fw_cfg) +{ + u16 pri_bit = 0; + u8 cos = 0, pri = 0; + struct priority_cos *tt2cos; + u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + int mfw_configured = SHMEM2_HAS(bp, drv_flags) && + GET_FLAGS(SHMEM2_RD(bp, drv_flags), + 1 << DRV_FLAGS_DCB_MFW_CONFIGURED); + + memset(pfc_fw_cfg, 0, sizeof(*pfc_fw_cfg)); + + /* to disable DCB - the structure must be zeroed */ + if ((bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) && !mfw_configured) + return; + + /*shortcut*/ + tt2cos = pfc_fw_cfg->traffic_type_to_priority_cos; + + /* Fw version should be incremented each update */ + pfc_fw_cfg->dcb_version = ++bp->dcb_version; + pfc_fw_cfg->dcb_enabled = 1; + + /* Fill priority parameters */ + for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) { + tt2cos[pri].priority = ttp[pri]; + pri_bit = 1 << tt2cos[pri].priority; + + /* Fill COS parameters based on COS calculated to + * make it more general for future use */ + for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) + if (bp->dcbx_port_params.ets.cos_params[cos]. + pri_bitmask & pri_bit) + tt2cos[pri].cos = cos; + } + + /* we never want the FW to add a 0 vlan tag */ + pfc_fw_cfg->dont_add_pri_0_en = 1; + + bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg); +} + +void bnx2x_dcbx_pmf_update(struct bnx2x *bp) +{ + /* if we need to synchronize DCBX result from prev PMF + * read it from shmem and update bp and netdev accordingly + */ + if (SHMEM2_HAS(bp, drv_flags) && + GET_FLAGS(SHMEM2_RD(bp, drv_flags), 1 << DRV_FLAGS_DCB_CONFIGURED)) { + /* Read neg results if dcbx is in the FW */ + if (bnx2x_dcbx_read_shmem_neg_results(bp)) + return; + + bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat, + bp->dcbx_error); + bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, + bp->dcbx_error); +#ifdef BCM_DCBNL + /* + * Add new app tlvs to dcbnl + */ + bnx2x_dcbnl_update_applist(bp, false); + /* + * Send a notification for the new negotiated parameters + */ + dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0); +#endif + /* + * reconfigure the netdevice with the results of the new + * dcbx negotiation. + */ + bnx2x_dcbx_update_tc_mapping(bp); + } +} + +/* DCB netlink */ +#ifdef BCM_DCBNL + +#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \ + DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC) + +static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp) +{ + /* validate dcbnl call that may change HW state: + * DCB is on and DCBX mode was SUCCESSFULLY set by the user. + */ + return bp->dcb_state && bp->dcbx_mode_uset; +} + +static u8 bnx2x_dcbnl_get_state(struct net_device *netdev) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcb_state); + return bp->dcb_state; +} + +static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off"); + + /* Fail to set state to "enabled" if dcbx is disabled in nvram */ + if (state && ((bp->dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) || + (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_INVALID))) { + DP(BNX2X_MSG_DCB, "Can not set dcbx to enabled while it is disabled in nvm\n"); + return 1; + } + + bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled); + return 0; +} + +static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev, + u8 *perm_addr) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(BNX2X_MSG_DCB, "GET-PERM-ADDR\n"); + + /* first the HW mac address */ + memcpy(perm_addr, netdev->dev_addr, netdev->addr_len); + + if (CNIC_LOADED(bp)) + /* second SAN address */ + memcpy(perm_addr+netdev->addr_len, bp->fip_mac, + netdev->addr_len); +} + +static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio, + u8 prio_type, u8 pgid, u8 bw_pct, + u8 up_map) +{ + struct bnx2x *bp = netdev_priv(netdev); + + DP(BNX2X_MSG_DCB, "prio[%d] = %d\n", prio, pgid); + if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES) + return; + + /** + * bw_pct ignored - band-width percentage devision between user + * priorities within the same group is not + * standard and hence not supported + * + * prio_type ignored - priority levels within the same group are not + * standard and hence are not supported. According + * to the standard pgid 15 is dedicated to strict + * priority traffic (on the port level). + * + * up_map ignored + */ + + bp->dcbx_config_params.admin_configuration_ets_pg[prio] = pgid; + bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; +} + +static void bnx2x_dcbnl_set_pg_bwgcfg_tx(struct net_device *netdev, + int pgid, u8 bw_pct) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(BNX2X_MSG_DCB, "pgid[%d] = %d\n", pgid, bw_pct); + + if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES) + return; + + bp->dcbx_config_params.admin_configuration_bw_precentage[pgid] = bw_pct; + bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1; +} + +static void bnx2x_dcbnl_set_pg_tccfg_rx(struct net_device *netdev, int prio, + u8 prio_type, u8 pgid, u8 bw_pct, + u8 up_map) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(BNX2X_MSG_DCB, "Nothing to set; No RX support\n"); +} + +static void bnx2x_dcbnl_set_pg_bwgcfg_rx(struct net_device *netdev, + int pgid, u8 bw_pct) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(BNX2X_MSG_DCB, "Nothing to set; No RX support\n"); +} + +static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio, + u8 *prio_type, u8 *pgid, u8 *bw_pct, + u8 *up_map) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(BNX2X_MSG_DCB, "prio = %d\n", prio); + + /** + * bw_pct ignored - band-width percentage devision between user + * priorities within the same group is not + * standard and hence not supported + * + * prio_type ignored - priority levels within the same group are not + * standard and hence are not supported. According + * to the standard pgid 15 is dedicated to strict + * priority traffic (on the port level). + * + * up_map ignored + */ + *up_map = *bw_pct = *prio_type = *pgid = 0; + + if (!bp->dcb_state || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES) + return; + + *pgid = DCBX_PRI_PG_GET(bp->dcbx_local_feat.ets.pri_pg_tbl, prio); +} + +static void bnx2x_dcbnl_get_pg_bwgcfg_tx(struct net_device *netdev, + int pgid, u8 *bw_pct) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(BNX2X_MSG_DCB, "pgid = %d\n", pgid); + + *bw_pct = 0; + + if (!bp->dcb_state || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES) + return; + + *bw_pct = DCBX_PG_BW_GET(bp->dcbx_local_feat.ets.pg_bw_tbl, pgid); +} + +static void bnx2x_dcbnl_get_pg_tccfg_rx(struct net_device *netdev, int prio, + u8 *prio_type, u8 *pgid, u8 *bw_pct, + u8 *up_map) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(BNX2X_MSG_DCB, "Nothing to get; No RX support\n"); + + *prio_type = *pgid = *bw_pct = *up_map = 0; +} + +static void bnx2x_dcbnl_get_pg_bwgcfg_rx(struct net_device *netdev, + int pgid, u8 *bw_pct) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(BNX2X_MSG_DCB, "Nothing to get; No RX support\n"); + + *bw_pct = 0; +} + +static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, + u8 setting) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(BNX2X_MSG_DCB, "prio[%d] = %d\n", prio, setting); + + if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES) + return; + + if (setting) { + bp->dcbx_config_params.admin_pfc_bitmap |= (1 << prio); + bp->dcbx_config_params.admin_pfc_tx_enable = 1; + } else { + bp->dcbx_config_params.admin_pfc_bitmap &= ~(1 << prio); + } +} + +static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, + u8 *setting) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(BNX2X_MSG_DCB, "prio = %d\n", prio); + + *setting = 0; + + if (!bp->dcb_state || prio >= MAX_PFC_PRIORITIES) + return; + + *setting = (bp->dcbx_local_feat.pfc.pri_en_bitmap >> prio) & 0x1; +} + +static u8 bnx2x_dcbnl_set_all(struct net_device *netdev) +{ + struct bnx2x *bp = netdev_priv(netdev); + + DP(BNX2X_MSG_DCB, "SET-ALL\n"); + + if (!bnx2x_dcbnl_set_valid(bp)) + return 1; + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + netdev_err(bp->dev, + "Handling parity error recovery. Try again later\n"); + return 1; + } + if (netif_running(bp->dev)) { + bnx2x_update_drv_flags(bp, + 1 << DRV_FLAGS_DCB_MFW_CONFIGURED, + 1); + bnx2x_dcbx_init(bp, true); + } + DP(BNX2X_MSG_DCB, "set_dcbx_params done\n"); + + return 0; +} + +static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) +{ + struct bnx2x *bp = netdev_priv(netdev); + u8 rval = 0; + + if (bp->dcb_state) { + switch (capid) { + case DCB_CAP_ATTR_PG: + *cap = true; + break; + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_UP2TC: + *cap = false; + break; + case DCB_CAP_ATTR_PG_TCS: + *cap = 0x80; /* 8 priorities for PGs */ + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; /* 8 priorities for PFC */ + break; + case DCB_CAP_ATTR_GSP: + *cap = true; + break; + case DCB_CAP_ATTR_BCN: + *cap = false; + break; + case DCB_CAP_ATTR_DCBX: + *cap = BNX2X_DCBX_CAPS; + break; + default: + BNX2X_ERR("Non valid capability ID\n"); + rval = 1; + break; + } + } else { + DP(BNX2X_MSG_DCB, "DCB disabled\n"); + rval = 1; + } + + DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap); + return rval; +} + +static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num) +{ + struct bnx2x *bp = netdev_priv(netdev); + u8 rval = 0; + + DP(BNX2X_MSG_DCB, "tcid %d\n", tcid); + + if (bp->dcb_state) { + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 : + DCBX_COS_MAX_NUM_E2; + break; + case DCB_NUMTCS_ATTR_PFC: + *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 : + DCBX_COS_MAX_NUM_E2; + break; + default: + BNX2X_ERR("Non valid TC-ID\n"); + rval = 1; + break; + } + } else { + DP(BNX2X_MSG_DCB, "DCB disabled\n"); + rval = 1; + } + + return rval; +} + +static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(BNX2X_MSG_DCB, "num tcs = %d; Not supported\n", num); + return -EINVAL; +} + +static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled); + + if (!bp->dcb_state) + return 0; + + return bp->dcbx_local_feat.pfc.enabled; +} + +static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off"); + + if (!bnx2x_dcbnl_set_valid(bp)) + return; + + bp->dcbx_config_params.admin_pfc_tx_enable = + bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0); +} + +static void bnx2x_admin_app_set_ent( + struct bnx2x_admin_priority_app_table *app_ent, + u8 idtype, u16 idval, u8 up) +{ + app_ent->valid = 1; + + switch (idtype) { + case DCB_APP_IDTYPE_ETHTYPE: + app_ent->traffic_type = TRAFFIC_TYPE_ETH; + break; + case DCB_APP_IDTYPE_PORTNUM: + app_ent->traffic_type = TRAFFIC_TYPE_PORT; + break; + default: + break; /* never gets here */ + } + app_ent->app_id = idval; + app_ent->priority = up; +} + +static bool bnx2x_admin_app_is_equal( + struct bnx2x_admin_priority_app_table *app_ent, + u8 idtype, u16 idval) +{ + if (!app_ent->valid) + return false; + + switch (idtype) { + case DCB_APP_IDTYPE_ETHTYPE: + if (app_ent->traffic_type != TRAFFIC_TYPE_ETH) + return false; + break; + case DCB_APP_IDTYPE_PORTNUM: + if (app_ent->traffic_type != TRAFFIC_TYPE_PORT) + return false; + break; + default: + return false; + } + if (app_ent->app_id != idval) + return false; + + return true; +} + +static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up) +{ + int i, ff; + + /* iterate over the app entries looking for idtype and idval */ + for (i = 0, ff = -1; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) { + struct bnx2x_admin_priority_app_table *app_ent = + &bp->dcbx_config_params.admin_priority_app_table[i]; + if (bnx2x_admin_app_is_equal(app_ent, idtype, idval)) + break; + + if (ff < 0 && !app_ent->valid) + ff = i; + } + if (i < DCBX_CONFIG_MAX_APP_PROTOCOL) + /* if found overwrite up */ + bp->dcbx_config_params. + admin_priority_app_table[i].priority = up; + else if (ff >= 0) + /* not found use first-free */ + bnx2x_admin_app_set_ent( + &bp->dcbx_config_params.admin_priority_app_table[ff], + idtype, idval, up); + else { + /* app table is full */ + BNX2X_ERR("Application table is too large\n"); + return -EBUSY; + } + + /* up configured, if not 0 make sure feature is enabled */ + if (up) + bp->dcbx_config_params.admin_application_priority_tx_enable = 1; + + return 0; +} + +static int bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype, + u16 idval, u8 up) +{ + struct bnx2x *bp = netdev_priv(netdev); + + DP(BNX2X_MSG_DCB, "app_type %d, app_id %x, prio bitmap %d\n", + idtype, idval, up); + + if (!bnx2x_dcbnl_set_valid(bp)) { + DP(BNX2X_MSG_DCB, "dcbnl call not valid\n"); + return -EINVAL; + } + + /* verify idtype */ + switch (idtype) { + case DCB_APP_IDTYPE_ETHTYPE: + case DCB_APP_IDTYPE_PORTNUM: + break; + default: + DP(BNX2X_MSG_DCB, "Wrong ID type\n"); + return -EINVAL; + } + return bnx2x_set_admin_app_up(bp, idtype, idval, up); +} + +static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev) +{ + struct bnx2x *bp = netdev_priv(netdev); + u8 state; + + state = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE; + + if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF) + state |= DCB_CAP_DCBX_STATIC; + + return state; +} + +static u8 bnx2x_dcbnl_set_dcbx(struct net_device *netdev, u8 state) +{ + struct bnx2x *bp = netdev_priv(netdev); + DP(BNX2X_MSG_DCB, "state = %02x\n", state); + + /* set dcbx mode */ + + if ((state & BNX2X_DCBX_CAPS) != state) { + BNX2X_ERR("Requested DCBX mode %x is beyond advertised capabilities\n", + state); + return 1; + } + + if (bp->dcb_state != BNX2X_DCB_STATE_ON) { + BNX2X_ERR("DCB turned off, DCBX configuration is invalid\n"); + return 1; + } + + if (state & DCB_CAP_DCBX_STATIC) + bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_OFF; + else + bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_ON; + + bp->dcbx_mode_uset = true; + return 0; +} + +static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid, + u8 *flags) +{ + struct bnx2x *bp = netdev_priv(netdev); + u8 rval = 0; + + DP(BNX2X_MSG_DCB, "featid %d\n", featid); + + if (bp->dcb_state) { + *flags = 0; + switch (featid) { + case DCB_FEATCFG_ATTR_PG: + if (bp->dcbx_local_feat.ets.enabled) + *flags |= DCB_FEATCFG_ENABLE; + if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR | + DCBX_REMOTE_MIB_ERROR)) + *flags |= DCB_FEATCFG_ERROR; + break; + case DCB_FEATCFG_ATTR_PFC: + if (bp->dcbx_local_feat.pfc.enabled) + *flags |= DCB_FEATCFG_ENABLE; + if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR | + DCBX_LOCAL_PFC_MISMATCH | + DCBX_REMOTE_MIB_ERROR)) + *flags |= DCB_FEATCFG_ERROR; + break; + case DCB_FEATCFG_ATTR_APP: + if (bp->dcbx_local_feat.app.enabled) + *flags |= DCB_FEATCFG_ENABLE; + if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR | + DCBX_LOCAL_APP_MISMATCH | + DCBX_REMOTE_MIB_ERROR)) + *flags |= DCB_FEATCFG_ERROR; + break; + default: + BNX2X_ERR("Non valid feature-ID\n"); + rval = 1; + break; + } + } else { + DP(BNX2X_MSG_DCB, "DCB disabled\n"); + rval = 1; + } + + return rval; +} + +static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid, + u8 flags) +{ + struct bnx2x *bp = netdev_priv(netdev); + u8 rval = 0; + + DP(BNX2X_MSG_DCB, "featid = %d flags = %02x\n", featid, flags); + + /* ignore the 'advertise' flag */ + if (bnx2x_dcbnl_set_valid(bp)) { + switch (featid) { + case DCB_FEATCFG_ATTR_PG: + bp->dcbx_config_params.admin_ets_enable = + flags & DCB_FEATCFG_ENABLE ? 1 : 0; + bp->dcbx_config_params.admin_ets_willing = + flags & DCB_FEATCFG_WILLING ? 1 : 0; + break; + case DCB_FEATCFG_ATTR_PFC: + bp->dcbx_config_params.admin_pfc_enable = + flags & DCB_FEATCFG_ENABLE ? 1 : 0; + bp->dcbx_config_params.admin_pfc_willing = + flags & DCB_FEATCFG_WILLING ? 1 : 0; + break; + case DCB_FEATCFG_ATTR_APP: + /* ignore enable, always enabled */ + bp->dcbx_config_params.admin_app_priority_willing = + flags & DCB_FEATCFG_WILLING ? 1 : 0; + break; + default: + BNX2X_ERR("Non valid feature-ID\n"); + rval = 1; + break; + } + } else { + DP(BNX2X_MSG_DCB, "dcbnl call not valid\n"); + rval = 1; + } + + return rval; +} + +static int bnx2x_peer_appinfo(struct net_device *netdev, + struct dcb_peer_app_info *info, u16* app_count) +{ + int i; + struct bnx2x *bp = netdev_priv(netdev); + + DP(BNX2X_MSG_DCB, "APP-INFO\n"); + + info->willing = (bp->dcbx_remote_flags & DCBX_APP_REM_WILLING) ?: 0; + info->error = (bp->dcbx_remote_flags & DCBX_APP_RX_ERROR) ?: 0; + *app_count = 0; + + for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) + if (bp->dcbx_remote_feat.app.app_pri_tbl[i].appBitfield & + DCBX_APP_ENTRY_VALID) + (*app_count)++; + return 0; +} + +static int bnx2x_peer_apptable(struct net_device *netdev, + struct dcb_app *table) +{ + int i, j; + struct bnx2x *bp = netdev_priv(netdev); + + DP(BNX2X_MSG_DCB, "APP-TABLE\n"); + + for (i = 0, j = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { + struct dcbx_app_priority_entry *ent = + &bp->dcbx_remote_feat.app.app_pri_tbl[i]; + + if (ent->appBitfield & DCBX_APP_ENTRY_VALID) { + table[j].selector = bnx2x_dcbx_dcbnl_app_idtype(ent); + table[j].priority = bnx2x_dcbx_dcbnl_app_up(ent); + table[j++].protocol = ent->app_id; + } + } + return 0; +} + +static int bnx2x_cee_peer_getpg(struct net_device *netdev, struct cee_pg *pg) +{ + int i; + struct bnx2x *bp = netdev_priv(netdev); + + pg->willing = (bp->dcbx_remote_flags & DCBX_ETS_REM_WILLING) ?: 0; + + for (i = 0; i < CEE_DCBX_MAX_PGS; i++) { + pg->pg_bw[i] = + DCBX_PG_BW_GET(bp->dcbx_remote_feat.ets.pg_bw_tbl, i); + pg->prio_pg[i] = + DCBX_PRI_PG_GET(bp->dcbx_remote_feat.ets.pri_pg_tbl, i); + } + return 0; +} + +static int bnx2x_cee_peer_getpfc(struct net_device *netdev, + struct cee_pfc *pfc) +{ + struct bnx2x *bp = netdev_priv(netdev); + pfc->tcs_supported = bp->dcbx_remote_feat.pfc.pfc_caps; + pfc->pfc_en = bp->dcbx_remote_feat.pfc.pri_en_bitmap; + return 0; +} + +const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = { + .getstate = bnx2x_dcbnl_get_state, + .setstate = bnx2x_dcbnl_set_state, + .getpermhwaddr = bnx2x_dcbnl_get_perm_hw_addr, + .setpgtccfgtx = bnx2x_dcbnl_set_pg_tccfg_tx, + .setpgbwgcfgtx = bnx2x_dcbnl_set_pg_bwgcfg_tx, + .setpgtccfgrx = bnx2x_dcbnl_set_pg_tccfg_rx, + .setpgbwgcfgrx = bnx2x_dcbnl_set_pg_bwgcfg_rx, + .getpgtccfgtx = bnx2x_dcbnl_get_pg_tccfg_tx, + .getpgbwgcfgtx = bnx2x_dcbnl_get_pg_bwgcfg_tx, + .getpgtccfgrx = bnx2x_dcbnl_get_pg_tccfg_rx, + .getpgbwgcfgrx = bnx2x_dcbnl_get_pg_bwgcfg_rx, + .setpfccfg = bnx2x_dcbnl_set_pfc_cfg, + .getpfccfg = bnx2x_dcbnl_get_pfc_cfg, + .setall = bnx2x_dcbnl_set_all, + .getcap = bnx2x_dcbnl_get_cap, + .getnumtcs = bnx2x_dcbnl_get_numtcs, + .setnumtcs = bnx2x_dcbnl_set_numtcs, + .getpfcstate = bnx2x_dcbnl_get_pfc_state, + .setpfcstate = bnx2x_dcbnl_set_pfc_state, + .setapp = bnx2x_dcbnl_set_app_up, + .getdcbx = bnx2x_dcbnl_get_dcbx, + .setdcbx = bnx2x_dcbnl_set_dcbx, + .getfeatcfg = bnx2x_dcbnl_get_featcfg, + .setfeatcfg = bnx2x_dcbnl_set_featcfg, + .peer_getappinfo = bnx2x_peer_appinfo, + .peer_getapptable = bnx2x_peer_apptable, + .cee_peer_getpg = bnx2x_cee_peer_getpg, + .cee_peer_getpfc = bnx2x_cee_peer_getpfc, +}; + +#endif /* BCM_DCBNL */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h new file mode 100644 index 000000000..c6939ecb0 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h @@ -0,0 +1,205 @@ +/* bnx2x_dcb.h: Broadcom Everest network driver. + * + * Copyright 2009-2013 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Ariel Elior + * Written by: Dmitry Kravkov + * + */ +#ifndef BNX2X_DCB_H +#define BNX2X_DCB_H + +#include "bnx2x_hsi.h" + +#define LLFC_DRIVER_TRAFFIC_TYPE_MAX 3 /* NW, iSCSI, FCoE */ +struct bnx2x_dcbx_app_params { + u32 enabled; + u32 traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX]; +}; + +#define DCBX_COS_MAX_NUM_E2 DCBX_E2E3_MAX_NUM_COS +/* bnx2x currently limits numbers of supported COSes to 3 to be extended to 6 */ +#define BNX2X_MAX_COS_SUPPORT 3 +#define DCBX_COS_MAX_NUM_E3B0 BNX2X_MAX_COS_SUPPORT +#define DCBX_COS_MAX_NUM BNX2X_MAX_COS_SUPPORT + +struct bnx2x_dcbx_cos_params { + u32 bw_tbl; + u32 pri_bitmask; + /* + * strict priority: valid values are 0..5; 0 is highest priority. + * There can't be two COSes with the same priority. + */ + u8 strict; +#define BNX2X_DCBX_STRICT_INVALID DCBX_COS_MAX_NUM +#define BNX2X_DCBX_STRICT_COS_HIGHEST 0 +#define BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(sp) ((sp) + 1) + u8 pauseable; +}; + +struct bnx2x_dcbx_pg_params { + u32 enabled; + u8 num_of_cos; /* valid COS entries */ + struct bnx2x_dcbx_cos_params cos_params[DCBX_COS_MAX_NUM]; +}; + +struct bnx2x_dcbx_pfc_params { + u32 enabled; + u32 priority_non_pauseable_mask; +}; + +struct bnx2x_dcbx_port_params { + struct bnx2x_dcbx_pfc_params pfc; + struct bnx2x_dcbx_pg_params ets; + struct bnx2x_dcbx_app_params app; +}; + +#define BNX2X_DCBX_CONFIG_INV_VALUE (0xFFFFFFFF) +#define BNX2X_DCBX_OVERWRITE_SETTINGS_DISABLE 0 +#define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE 1 +#define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID (BNX2X_DCBX_CONFIG_INV_VALUE) +#define BNX2X_IS_ETS_ENABLED(bp) ((bp)->dcb_state == BNX2X_DCB_STATE_ON &&\ + (bp)->dcbx_port_params.ets.enabled) + +struct bnx2x_config_lldp_params { + u32 overwrite_settings; + u32 msg_tx_hold; + u32 msg_fast_tx; + u32 tx_credit_max; + u32 msg_tx_interval; + u32 tx_fast; +}; + +struct bnx2x_admin_priority_app_table { + u32 valid; + u32 priority; +#define INVALID_TRAFFIC_TYPE_PRIORITY (0xFFFFFFFF) + u32 traffic_type; +#define TRAFFIC_TYPE_ETH 0 +#define TRAFFIC_TYPE_PORT 1 + u32 app_id; +}; + +#define DCBX_CONFIG_MAX_APP_PROTOCOL 4 +struct bnx2x_config_dcbx_params { + u32 overwrite_settings; + u32 admin_dcbx_version; + u32 admin_ets_enable; + u32 admin_pfc_enable; + u32 admin_tc_supported_tx_enable; + u32 admin_ets_configuration_tx_enable; + u32 admin_ets_recommendation_tx_enable; + u32 admin_pfc_tx_enable; + u32 admin_application_priority_tx_enable; + u32 admin_ets_willing; + u32 admin_ets_reco_valid; + u32 admin_pfc_willing; + u32 admin_app_priority_willing; + u32 admin_configuration_bw_precentage[8]; + u32 admin_configuration_ets_pg[8]; + u32 admin_recommendation_bw_precentage[8]; + u32 admin_recommendation_ets_pg[8]; + u32 admin_pfc_bitmap; + struct bnx2x_admin_priority_app_table + admin_priority_app_table[DCBX_CONFIG_MAX_APP_PROTOCOL]; + u32 admin_default_priority; +}; + +#define GET_FLAGS(flags, bits) ((flags) & (bits)) +#define SET_FLAGS(flags, bits) ((flags) |= (bits)) +#define RESET_FLAGS(flags, bits) ((flags) &= ~(bits)) + +enum { + DCBX_READ_LOCAL_MIB, + DCBX_READ_REMOTE_MIB +}; + +#define ETH_TYPE_FCOE (0x8906) +#define TCP_PORT_ISCSI (0xCBC) + +#define PFC_VALUE_FRAME_SIZE (512) +#define PFC_QUANTA_IN_NANOSEC_FROM_SPEED_MEGA(mega_speed) \ + ((1000 * PFC_VALUE_FRAME_SIZE)/(mega_speed)) + +#define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD 130 +#define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD 170 + +struct cos_entry_help_data { + u32 pri_join_mask; + u32 cos_bw; + u8 strict; + bool pausable; +}; + +struct cos_help_data { + struct cos_entry_help_data data[DCBX_COS_MAX_NUM]; + u8 num_of_cos; +}; + +#define DCBX_ILLEGAL_PG (0xFF) +#define DCBX_PFC_PRI_MASK (0xFF) +#define DCBX_STRICT_PRIORITY (15) +#define DCBX_INVALID_COS_BW (0xFFFFFFFF) +#define DCBX_PFC_PRI_NON_PAUSE_MASK(bp) \ + ((bp)->dcbx_port_params.pfc.priority_non_pauseable_mask) +#define DCBX_PFC_PRI_PAUSE_MASK(bp) \ + ((u8)~DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) +#define DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri) \ + ((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp))) +#define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri) \ + (DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri)) +#define DCBX_IS_PFC_PRI_SOME_PAUSE(bp, pg_pri) \ + (0 != DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri)) +#define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri) \ + (pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri))) +#define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\ + ((pg_pri) == DCBX_PFC_PRI_GET_NON_PAUSE((bp), (pg_pri))) +#define IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pg_pri) \ + (!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \ + IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri)))) + +struct pg_entry_help_data { + u8 num_of_dif_pri; + u8 pg; + u32 pg_priority; +}; + +struct pg_help_data { + struct pg_entry_help_data data[LLFC_DRIVER_TRAFFIC_TYPE_MAX]; + u8 num_of_pg; +}; + +/* forward DCB/PFC related declarations */ +struct bnx2x; +void bnx2x_dcbx_update(struct work_struct *work); +void bnx2x_dcbx_init_params(struct bnx2x *bp); +void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled); + +enum { + BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1, + BNX2X_DCBX_STATE_TX_PAUSED, + BNX2X_DCBX_STATE_TX_RELEASED +}; + +void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state); +void bnx2x_dcbx_pmf_update(struct bnx2x *bp); +/* DCB netlink */ +#ifdef BCM_DCBNL +extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops; +int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall); +#endif /* BCM_DCBNL */ + +int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp); +int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp); + +#endif /* BNX2X_DCB_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h new file mode 100644 index 000000000..741aa130c --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h @@ -0,0 +1,2216 @@ +/* bnx2x_dump.h: Broadcom Everest network driver. + * + * Copyright (c) 2012-2013 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + */ + +#ifndef BNX2X_DUMP_H +#define BNX2X_DUMP_H + +/* WaitP Definitions */ +#define DRV_DUMP_XSTORM_WAITP_ADDRESS 0x2b8a80 +#define DRV_DUMP_TSTORM_WAITP_ADDRESS 0x1b8a80 +#define DRV_DUMP_USTORM_WAITP_ADDRESS 0x338a80 +#define DRV_DUMP_CSTORM_WAITP_ADDRESS 0x238a80 + +/* Possible Chips */ +#define DUMP_CHIP_E1 1 +#define DUMP_CHIP_E1H 2 +#define DUMP_CHIP_E2 4 +#define DUMP_CHIP_E3A0 8 +#define DUMP_CHIP_E3B0 16 +#define DUMP_PATH_0 512 +#define DUMP_PATH_1 1024 +#define NUM_PRESETS 13 +#define NUM_CHIPS 5 + +struct dump_header { + u32 header_size; /* Size in DWORDs excluding this field */ + u32 version; + u32 preset; + u32 dump_meta_data; /* OR of CHIP and PATH. */ +}; + +#define BNX2X_DUMP_VERSION 0x61111111 +struct reg_addr { + u32 addr; + u32 size; + u32 chips; + u32 presets; +}; + +struct wreg_addr { + u32 addr; + u32 size; + u32 read_regs_count; + const u32 *read_regs; + u32 chips; + u32 presets; +}; + +#define PAGE_MODE_VALUES_E2 2 +#define PAGE_READ_REGS_E2 1 +#define PAGE_WRITE_REGS_E2 1 +static const u32 page_vals_e2[] = {0, 128}; +static const u32 page_write_regs_e2[] = {328476}; +static const struct reg_addr page_read_regs_e2[] = { + {0x58000, 4608, DUMP_CHIP_E2, 0x30} +}; + +#define PAGE_MODE_VALUES_E3 2 +#define PAGE_READ_REGS_E3 1 +#define PAGE_WRITE_REGS_E3 1 +static const u32 page_vals_e3[] = {0, 128}; +static const u32 page_write_regs_e3[] = {328476}; +static const struct reg_addr page_read_regs_e3[] = { + {0x58000, 4608, DUMP_CHIP_E3A0 | DUMP_CHIP_E3B0, 0x30} +}; + +static const struct reg_addr reg_addrs[] = { + { 0x2000, 1, 0x1f, 0xfff}, + { 0x2004, 1, 0x1f, 0x1fff}, + { 0x2008, 25, 0x1f, 0xfff}, + { 0x206c, 1, 0x1f, 0x1fff}, + { 0x2070, 313, 0x1f, 0xfff}, + { 0x2800, 103, 0x1f, 0xfff}, + { 0x3000, 287, 0x1f, 0xfff}, + { 0x3800, 331, 0x1f, 0xfff}, + { 0x8800, 6, 0x1f, 0x924}, + { 0x8818, 1, 0x1e, 0x924}, + { 0x9000, 4, 0x1c, 0x924}, + { 0x9010, 7, 0x1c, 0xfff}, + { 0x902c, 1, 0x1c, 0x924}, + { 0x9030, 1, 0x1c, 0xfff}, + { 0x9034, 13, 0x1c, 0x924}, + { 0x9068, 16, 0x1c, 0xfff}, + { 0x90a8, 98, 0x1c, 0x924}, + { 0x9230, 2, 0x1c, 0xfff}, + { 0x9238, 3, 0x1c, 0x924}, + { 0x9244, 1, 0x1c, 0xfff}, + { 0x9248, 1, 0x1c, 0x924}, + { 0x924c, 1, 0x4, 0x924}, + { 0x9250, 16, 0x1c, 0x924}, + { 0x92a8, 2, 0x1c, 0x1fff}, + { 0x92b4, 1, 0x1c, 0x1fff}, + { 0x9400, 33, 0x1c, 0x924}, + { 0x9484, 5, 0x18, 0x924}, + { 0xa000, 27, 0x1f, 0x924}, + { 0xa06c, 1, 0x3, 0x924}, + { 0xa070, 2, 0x1f, 0x924}, + { 0xa078, 1, 0x1f, 0x1fff}, + { 0xa07c, 31, 0x1f, 0x924}, + { 0xa0f8, 1, 0x1f, 0x1fff}, + { 0xa0fc, 3, 0x1f, 0x924}, + { 0xa108, 1, 0x1f, 0x1fff}, + { 0xa10c, 3, 0x1f, 0x924}, + { 0xa118, 1, 0x1f, 0x1fff}, + { 0xa11c, 28, 0x1f, 0x924}, + { 0xa18c, 4, 0x3, 0x924}, + { 0xa19c, 3, 0x1f, 0x924}, + { 0xa1a8, 1, 0x1f, 0x1fff}, + { 0xa1ac, 3, 0x1f, 0x924}, + { 0xa1b8, 1, 0x1f, 0x1fff}, + { 0xa1bc, 54, 0x1f, 0x924}, + { 0xa294, 2, 0x3, 0x924}, + { 0xa29c, 2, 0x1f, 0x924}, + { 0xa2a4, 2, 0x7, 0x924}, + { 0xa2ac, 2, 0x1f, 0x924}, + { 0xa2b4, 1, 0x1f, 0x1fff}, + { 0xa2b8, 49, 0x1f, 0x924}, + { 0xa38c, 2, 0x1f, 0x1fff}, + { 0xa398, 1, 0x1f, 0x1fff}, + { 0xa39c, 7, 0x1e, 0x924}, + { 0xa3b8, 2, 0x18, 0x924}, + { 0xa3c0, 1, 0x1e, 0x924}, + { 0xa3c4, 1, 0x1e, 0xfff}, + { 0xa3c8, 1, 0x1e, 0x924}, + { 0xa3d0, 1, 0x1e, 0x924}, + { 0xa3d8, 1, 0x1e, 0x924}, + { 0xa3e0, 1, 0x1e, 0x924}, + { 0xa3e8, 1, 0x1e, 0x924}, + { 0xa3f0, 1, 0x1e, 0x924}, + { 0xa3f8, 1, 0x1e, 0x924}, + { 0xa400, 1, 0x1f, 0x924}, + { 0xa404, 1, 0x1f, 0xfff}, + { 0xa408, 2, 0x1f, 0x1fff}, + { 0xa410, 7, 0x1f, 0x924}, + { 0xa42c, 12, 0x1f, 0xfff}, + { 0xa45c, 1, 0x1f, 0x924}, + { 0xa460, 1, 0x1f, 0x1924}, + { 0xa464, 15, 0x1f, 0x924}, + { 0xa4a0, 1, 0x7, 0x924}, + { 0xa4a4, 2, 0x1f, 0x924}, + { 0xa4ac, 2, 0x3, 0x924}, + { 0xa4b4, 1, 0x7, 0x924}, + { 0xa4b8, 2, 0x3, 0x924}, + { 0xa4c0, 3, 0x1f, 0x924}, + { 0xa4cc, 5, 0x3, 0x924}, + { 0xa4e0, 3, 0x1f, 0x924}, + { 0xa4fc, 2, 0x1f, 0x924}, + { 0xa504, 1, 0x3, 0x924}, + { 0xa508, 3, 0x1f, 0x924}, + { 0xa518, 1, 0x1f, 0x924}, + { 0xa520, 1, 0x1f, 0x924}, + { 0xa528, 1, 0x1f, 0x924}, + { 0xa530, 1, 0x1f, 0x924}, + { 0xa538, 1, 0x1f, 0x924}, + { 0xa540, 1, 0x1f, 0x924}, + { 0xa548, 1, 0x3, 0x924}, + { 0xa550, 1, 0x3, 0x924}, + { 0xa558, 1, 0x3, 0x924}, + { 0xa560, 1, 0x3, 0x924}, + { 0xa568, 1, 0x3, 0x924}, + { 0xa570, 1, 0x1f, 0x924}, + { 0xa580, 1, 0x1f, 0x1fff}, + { 0xa590, 1, 0x1f, 0x1fff}, + { 0xa5a0, 1, 0x7, 0x924}, + { 0xa5c0, 1, 0x1f, 0x924}, + { 0xa5e0, 1, 0x1e, 0x924}, + { 0xa5e8, 1, 0x1e, 0x924}, + { 0xa5f0, 1, 0x1e, 0x924}, + { 0xa5f8, 1, 0x6, 0x924}, + { 0xa5fc, 1, 0x1e, 0x924}, + { 0xa600, 5, 0x1e, 0xfff}, + { 0xa614, 1, 0x1e, 0x924}, + { 0xa618, 1, 0x1e, 0xfff}, + { 0xa61c, 1, 0x1e, 0x924}, + { 0xa620, 6, 0x1c, 0x924}, + { 0xa638, 20, 0x4, 0x924}, + { 0xa688, 35, 0x1c, 0x924}, + { 0xa714, 1, 0x1c, 0xfff}, + { 0xa718, 2, 0x1c, 0x924}, + { 0xa720, 1, 0x1c, 0xfff}, + { 0xa724, 3, 0x1c, 0x924}, + { 0xa730, 1, 0x4, 0x924}, + { 0xa734, 2, 0x1c, 0x924}, + { 0xa73c, 4, 0x4, 0x924}, + { 0xa74c, 1, 0x1c, 0x924}, + { 0xa750, 1, 0x1c, 0xfff}, + { 0xa754, 3, 0x1c, 0x924}, + { 0xa760, 5, 0x4, 0x924}, + { 0xa774, 7, 0x1c, 0x924}, + { 0xa790, 15, 0x4, 0x924}, + { 0xa7cc, 4, 0x1c, 0x924}, + { 0xa7e0, 6, 0x18, 0x924}, + { 0xa800, 18, 0x4, 0x924}, + { 0xa848, 33, 0x1c, 0x924}, + { 0xa8cc, 2, 0x18, 0x924}, + { 0xa8d4, 4, 0x1c, 0x924}, + { 0xa8e4, 1, 0x18, 0x924}, + { 0xa8e8, 1, 0x1c, 0x924}, + { 0xa8f0, 1, 0x1c, 0x924}, + { 0xa8f8, 30, 0x18, 0x924}, + { 0xa974, 73, 0x18, 0x924}, + { 0xac30, 1, 0x18, 0x924}, + { 0xac40, 1, 0x18, 0x924}, + { 0xac50, 1, 0x18, 0x924}, + { 0xac60, 1, 0x10, 0x924}, + { 0x10000, 9, 0x1f, 0x924}, + { 0x10024, 1, 0x7, 0x924}, + { 0x10028, 5, 0x1f, 0x924}, + { 0x1003c, 6, 0x7, 0x924}, + { 0x10054, 20, 0x1f, 0x924}, + { 0x100a4, 4, 0x7, 0x924}, + { 0x100b4, 11, 0x1f, 0x924}, + { 0x100e0, 4, 0x7, 0x924}, + { 0x100f0, 8, 0x1f, 0x924}, + { 0x10110, 6, 0x7, 0x924}, + { 0x10128, 110, 0x1f, 0x924}, + { 0x102e0, 4, 0x7, 0x924}, + { 0x102f0, 18, 0x1f, 0x924}, + { 0x10338, 20, 0x7, 0x924}, + { 0x10388, 10, 0x1f, 0x924}, + { 0x103d0, 2, 0x3, 0x1fff}, + { 0x103dc, 1, 0x3, 0x1fff}, + { 0x10400, 6, 0x7, 0x924}, + { 0x10418, 1, 0x1f, 0xfff}, + { 0x1041c, 1, 0x1f, 0x924}, + { 0x10420, 1, 0x1f, 0xfff}, + { 0x10424, 1, 0x1f, 0x924}, + { 0x10428, 1, 0x1f, 0xfff}, + { 0x1042c, 1, 0x1f, 0x924}, + { 0x10430, 10, 0x7, 0x924}, + { 0x10458, 2, 0x1f, 0x924}, + { 0x10460, 1, 0x1f, 0xfff}, + { 0x10464, 4, 0x1f, 0x924}, + { 0x10474, 1, 0x1f, 0xfff}, + { 0x10478, 14, 0x1f, 0x924}, + { 0x104b0, 12, 0x7, 0x924}, + { 0x104e0, 1, 0x1f, 0xfff}, + { 0x104e8, 1, 0x1f, 0x924}, + { 0x104ec, 1, 0x1f, 0xfff}, + { 0x104f4, 1, 0x1f, 0x924}, + { 0x104f8, 1, 0x1f, 0xfff}, + { 0x10500, 2, 0x1f, 0x924}, + { 0x10508, 1, 0x1f, 0xfff}, + { 0x1050c, 9, 0x1f, 0x924}, + { 0x10530, 1, 0x1f, 0xfff}, + { 0x10534, 1, 0x1f, 0x924}, + { 0x10538, 1, 0x1f, 0xfff}, + { 0x1053c, 3, 0x1f, 0x924}, + { 0x10548, 1, 0x1f, 0xfff}, + { 0x1054c, 3, 0x1f, 0x924}, + { 0x10558, 1, 0x1f, 0xfff}, + { 0x1055c, 123, 0x1f, 0x924}, + { 0x10750, 2, 0x7, 0x924}, + { 0x10760, 2, 0x7, 0x924}, + { 0x10770, 2, 0x7, 0x924}, + { 0x10780, 2, 0x7, 0x924}, + { 0x10790, 2, 0x1f, 0x924}, + { 0x107a0, 2, 0x7, 0x924}, + { 0x107b0, 2, 0x7, 0x924}, + { 0x107c0, 2, 0x7, 0x924}, + { 0x107d0, 2, 0x7, 0x924}, + { 0x107e0, 2, 0x1f, 0x924}, + { 0x10880, 2, 0x1f, 0x924}, + { 0x10900, 2, 0x1f, 0x924}, + { 0x16000, 1, 0x6, 0x924}, + { 0x16004, 25, 0x1e, 0x924}, + { 0x16070, 8, 0x1e, 0x924}, + { 0x16090, 4, 0xe, 0x924}, + { 0x160a0, 6, 0x1e, 0x924}, + { 0x160c0, 7, 0x1e, 0x924}, + { 0x160dc, 2, 0x6, 0x924}, + { 0x160e4, 6, 0x1e, 0x924}, + { 0x160fc, 4, 0x1e, 0x1fff}, + { 0x1610c, 2, 0x6, 0x924}, + { 0x16114, 6, 0x1e, 0x924}, + { 0x16140, 48, 0x1e, 0x1fff}, + { 0x16204, 5, 0x1e, 0x924}, + { 0x18000, 1, 0x1e, 0x924}, + { 0x18008, 1, 0x1e, 0x924}, + { 0x18010, 35, 0x1c, 0x924}, + { 0x180a4, 2, 0x1c, 0x924}, + { 0x180c0, 9, 0x1c, 0x924}, + { 0x180e4, 1, 0xc, 0x924}, + { 0x180e8, 2, 0x1c, 0x924}, + { 0x180f0, 1, 0xc, 0x924}, + { 0x180f4, 79, 0x1c, 0x924}, + { 0x18230, 1, 0xc, 0x924}, + { 0x18234, 2, 0x1c, 0x924}, + { 0x1823c, 1, 0xc, 0x924}, + { 0x18240, 13, 0x1c, 0x924}, + { 0x18274, 1, 0x4, 0x924}, + { 0x18278, 12, 0x1c, 0x924}, + { 0x182a8, 1, 0x1c, 0xfff}, + { 0x182ac, 3, 0x1c, 0x924}, + { 0x182b8, 1, 0x1c, 0xfff}, + { 0x182bc, 19, 0x1c, 0x924}, + { 0x18308, 1, 0x1c, 0xfff}, + { 0x1830c, 3, 0x1c, 0x924}, + { 0x18318, 1, 0x1c, 0xfff}, + { 0x1831c, 7, 0x1c, 0x924}, + { 0x18338, 1, 0x1c, 0xfff}, + { 0x1833c, 3, 0x1c, 0x924}, + { 0x18348, 1, 0x1c, 0xfff}, + { 0x1834c, 28, 0x1c, 0x924}, + { 0x183bc, 2, 0x1c, 0x1fff}, + { 0x183c8, 3, 0x1c, 0x1fff}, + { 0x183d8, 1, 0x1c, 0x1fff}, + { 0x18440, 48, 0x1c, 0x1fff}, + { 0x18500, 15, 0x1c, 0x924}, + { 0x18570, 1, 0x18, 0xfff}, + { 0x18574, 1, 0x18, 0x924}, + { 0x18578, 1, 0x18, 0xfff}, + { 0x1857c, 4, 0x18, 0x924}, + { 0x1858c, 1, 0x18, 0xfff}, + { 0x18590, 1, 0x18, 0x924}, + { 0x18594, 1, 0x18, 0xfff}, + { 0x18598, 32, 0x18, 0x924}, + { 0x18618, 5, 0x10, 0x924}, + { 0x1862c, 4, 0x10, 0xfff}, + { 0x1863c, 16, 0x10, 0x924}, + { 0x18680, 44, 0x10, 0x924}, + { 0x18748, 12, 0x10, 0x924}, + { 0x18788, 1, 0x10, 0x924}, + { 0x1879c, 6, 0x10, 0x924}, + { 0x187c4, 51, 0x10, 0x924}, + { 0x18a00, 48, 0x10, 0x924}, + { 0x20000, 24, 0x1f, 0x924}, + { 0x20060, 8, 0x1f, 0x9e4}, + { 0x20080, 94, 0x1f, 0x924}, + { 0x201f8, 1, 0x3, 0x924}, + { 0x201fc, 1, 0x1f, 0x924}, + { 0x20200, 1, 0x3, 0x924}, + { 0x20204, 1, 0x1f, 0x924}, + { 0x20208, 1, 0x3, 0x924}, + { 0x2020c, 4, 0x1f, 0x924}, + { 0x2021c, 11, 0x1f, 0xfff}, + { 0x20248, 24, 0x1f, 0x924}, + { 0x202b8, 2, 0x1f, 0x1fff}, + { 0x202c4, 1, 0x1f, 0x1fff}, + { 0x202c8, 1, 0x1c, 0x924}, + { 0x202d8, 4, 0x1c, 0x924}, + { 0x202f0, 1, 0x10, 0x924}, + { 0x20400, 1, 0x1f, 0x924}, + { 0x20404, 1, 0x1f, 0xfff}, + { 0x2040c, 2, 0x1f, 0xfff}, + { 0x20414, 2, 0x1f, 0x924}, + { 0x2041c, 2, 0x1f, 0xfff}, + { 0x20424, 2, 0x1f, 0x924}, + { 0x2042c, 18, 0x1e, 0x924}, + { 0x20480, 1, 0x1f, 0x924}, + { 0x20500, 1, 0x1f, 0x924}, + { 0x20600, 1, 0x1f, 0x924}, + { 0x28000, 1, 0x1f, 0x9e4}, + { 0x28004, 255, 0x1f, 0x180}, + { 0x28400, 1, 0x1f, 0x1c0}, + { 0x28404, 255, 0x1f, 0x180}, + { 0x28800, 1, 0x1f, 0x1c0}, + { 0x28804, 255, 0x1f, 0x180}, + { 0x28c00, 1, 0x1f, 0x1c0}, + { 0x28c04, 255, 0x1f, 0x180}, + { 0x29000, 1, 0x1f, 0x1c0}, + { 0x29004, 255, 0x1f, 0x180}, + { 0x29400, 1, 0x1f, 0x1c0}, + { 0x29404, 255, 0x1f, 0x180}, + { 0x29800, 1, 0x1f, 0x1c0}, + { 0x29804, 255, 0x1f, 0x180}, + { 0x29c00, 1, 0x1f, 0x1c0}, + { 0x29c04, 255, 0x1f, 0x180}, + { 0x2a000, 1, 0x1f, 0x1c0}, + { 0x2a004, 255, 0x1f, 0x180}, + { 0x2a400, 1, 0x1f, 0x1c0}, + { 0x2a404, 255, 0x1f, 0x180}, + { 0x2a800, 1, 0x1f, 0x1c0}, + { 0x2a804, 255, 0x1f, 0x180}, + { 0x2ac00, 1, 0x1f, 0x1c0}, + { 0x2ac04, 255, 0x1f, 0x180}, + { 0x2b000, 1, 0x1f, 0x1c0}, + { 0x2b004, 255, 0x1f, 0x180}, + { 0x2b400, 1, 0x1f, 0x1c0}, + { 0x2b404, 255, 0x1f, 0x180}, + { 0x2b800, 1, 0x1f, 0x1c0}, + { 0x2b804, 255, 0x1f, 0x180}, + { 0x2bc00, 1, 0x1f, 0x1c0}, + { 0x2bc04, 255, 0x1f, 0x180}, + { 0x2c000, 1, 0x1f, 0x1c0}, + { 0x2c004, 255, 0x1f, 0x180}, + { 0x2c400, 1, 0x1f, 0x1c0}, + { 0x2c404, 255, 0x1f, 0x180}, + { 0x2c800, 1, 0x1f, 0x1c0}, + { 0x2c804, 255, 0x1f, 0x180}, + { 0x2cc00, 1, 0x1f, 0x1c0}, + { 0x2cc04, 255, 0x1f, 0x180}, + { 0x2d000, 1, 0x1f, 0x1c0}, + { 0x2d004, 255, 0x1f, 0x180}, + { 0x2d400, 1, 0x1f, 0x1c0}, + { 0x2d404, 255, 0x1f, 0x180}, + { 0x2d800, 1, 0x1f, 0x1c0}, + { 0x2d804, 255, 0x1f, 0x180}, + { 0x2dc00, 1, 0x1f, 0x1c0}, + { 0x2dc04, 255, 0x1f, 0x180}, + { 0x2e000, 1, 0x1f, 0x1c0}, + { 0x2e004, 255, 0x1f, 0x180}, + { 0x2e400, 1, 0x1f, 0x1c0}, + { 0x2e404, 255, 0x1f, 0x180}, + { 0x2e800, 1, 0x1f, 0x1c0}, + { 0x2e804, 255, 0x1f, 0x180}, + { 0x2ec00, 1, 0x1f, 0x1c0}, + { 0x2ec04, 255, 0x1f, 0x180}, + { 0x2f000, 1, 0x1f, 0x1c0}, + { 0x2f004, 255, 0x1f, 0x180}, + { 0x2f400, 1, 0x1f, 0x1c0}, + { 0x2f404, 255, 0x1f, 0x180}, + { 0x2f800, 1, 0x1f, 0x1c0}, + { 0x2f804, 255, 0x1f, 0x180}, + { 0x2fc00, 1, 0x1f, 0x1c0}, + { 0x2fc04, 255, 0x1f, 0x180}, + { 0x30000, 1, 0x1f, 0x9e4}, + { 0x30004, 255, 0x1f, 0x180}, + { 0x30400, 1, 0x1f, 0x1c0}, + { 0x30404, 255, 0x1f, 0x180}, + { 0x30800, 1, 0x1f, 0x1c0}, + { 0x30804, 255, 0x1f, 0x180}, + { 0x30c00, 1, 0x1f, 0x1c0}, + { 0x30c04, 255, 0x1f, 0x180}, + { 0x31000, 1, 0x1f, 0x1c0}, + { 0x31004, 255, 0x1f, 0x180}, + { 0x31400, 1, 0x1f, 0x1c0}, + { 0x31404, 255, 0x1f, 0x180}, + { 0x31800, 1, 0x1f, 0x1c0}, + { 0x31804, 255, 0x1f, 0x180}, + { 0x31c00, 1, 0x1f, 0x1c0}, + { 0x31c04, 255, 0x1f, 0x180}, + { 0x32000, 1, 0x1f, 0x1c0}, + { 0x32004, 255, 0x1f, 0x180}, + { 0x32400, 1, 0x1f, 0x1c0}, + { 0x32404, 255, 0x1f, 0x180}, + { 0x32800, 1, 0x1f, 0x1c0}, + { 0x32804, 255, 0x1f, 0x180}, + { 0x32c00, 1, 0x1f, 0x1c0}, + { 0x32c04, 255, 0x1f, 0x180}, + { 0x33000, 1, 0x1f, 0x1c0}, + { 0x33004, 255, 0x1f, 0x180}, + { 0x33400, 1, 0x1f, 0x1c0}, + { 0x33404, 255, 0x1f, 0x180}, + { 0x33800, 1, 0x1f, 0x1c0}, + { 0x33804, 255, 0x1f, 0x180}, + { 0x33c00, 1, 0x1f, 0x1c0}, + { 0x33c04, 255, 0x1f, 0x180}, + { 0x34000, 1, 0x1f, 0x1c0}, + { 0x34004, 255, 0x1f, 0x180}, + { 0x34400, 1, 0x1f, 0x1c0}, + { 0x34404, 255, 0x1f, 0x180}, + { 0x34800, 1, 0x1f, 0x1c0}, + { 0x34804, 255, 0x1f, 0x180}, + { 0x34c00, 1, 0x1f, 0x1c0}, + { 0x34c04, 255, 0x1f, 0x180}, + { 0x35000, 1, 0x1f, 0x1c0}, + { 0x35004, 255, 0x1f, 0x180}, + { 0x35400, 1, 0x1f, 0x1c0}, + { 0x35404, 255, 0x1f, 0x180}, + { 0x35800, 1, 0x1f, 0x1c0}, + { 0x35804, 255, 0x1f, 0x180}, + { 0x35c00, 1, 0x1f, 0x1c0}, + { 0x35c04, 255, 0x1f, 0x180}, + { 0x36000, 1, 0x1f, 0x1c0}, + { 0x36004, 255, 0x1f, 0x180}, + { 0x36400, 1, 0x1f, 0x1c0}, + { 0x36404, 255, 0x1f, 0x180}, + { 0x36800, 1, 0x1f, 0x1c0}, + { 0x36804, 255, 0x1f, 0x180}, + { 0x36c00, 1, 0x1f, 0x1c0}, + { 0x36c04, 255, 0x1f, 0x180}, + { 0x37000, 1, 0x1f, 0x1c0}, + { 0x37004, 255, 0x1f, 0x180}, + { 0x37400, 1, 0x1f, 0x1c0}, + { 0x37404, 255, 0x1f, 0x180}, + { 0x37800, 1, 0x1f, 0x1c0}, + { 0x37804, 255, 0x1f, 0x180}, + { 0x37c00, 1, 0x1f, 0x1c0}, + { 0x37c04, 255, 0x1f, 0x180}, + { 0x38000, 1, 0x1f, 0x1c0}, + { 0x38004, 255, 0x1f, 0x180}, + { 0x38400, 1, 0x1f, 0x1c0}, + { 0x38404, 255, 0x1f, 0x180}, + { 0x38800, 1, 0x1f, 0x1c0}, + { 0x38804, 255, 0x1f, 0x180}, + { 0x38c00, 1, 0x1f, 0x1c0}, + { 0x38c04, 255, 0x1f, 0x180}, + { 0x39000, 1, 0x1f, 0x1c0}, + { 0x39004, 255, 0x1f, 0x180}, + { 0x39400, 1, 0x1f, 0x1c0}, + { 0x39404, 255, 0x1f, 0x180}, + { 0x39800, 1, 0x1f, 0x1c0}, + { 0x39804, 255, 0x1f, 0x180}, + { 0x39c00, 1, 0x1f, 0x1c0}, + { 0x39c04, 255, 0x1f, 0x180}, + { 0x3a000, 1, 0x1f, 0x1c0}, + { 0x3a004, 255, 0x1f, 0x180}, + { 0x3a400, 1, 0x1f, 0x1c0}, + { 0x3a404, 255, 0x1f, 0x180}, + { 0x3a800, 1, 0x1f, 0x1c0}, + { 0x3a804, 255, 0x1f, 0x180}, + { 0x3ac00, 1, 0x1f, 0x1c0}, + { 0x3ac04, 255, 0x1f, 0x180}, + { 0x3b000, 1, 0x1f, 0x1c0}, + { 0x3b004, 255, 0x1f, 0x180}, + { 0x3b400, 1, 0x1f, 0x1c0}, + { 0x3b404, 255, 0x1f, 0x180}, + { 0x3b800, 1, 0x1f, 0x1c0}, + { 0x3b804, 255, 0x1f, 0x180}, + { 0x3bc00, 1, 0x1f, 0x1c0}, + { 0x3bc04, 255, 0x1f, 0x180}, + { 0x3c000, 1, 0x1f, 0x1c0}, + { 0x3c004, 255, 0x1f, 0x180}, + { 0x3c400, 1, 0x1f, 0x1c0}, + { 0x3c404, 255, 0x1f, 0x180}, + { 0x3c800, 1, 0x1f, 0x1c0}, + { 0x3c804, 255, 0x1f, 0x180}, + { 0x3cc00, 1, 0x1f, 0x1c0}, + { 0x3cc04, 255, 0x1f, 0x180}, + { 0x3d000, 1, 0x1f, 0x1c0}, + { 0x3d004, 255, 0x1f, 0x180}, + { 0x3d400, 1, 0x1f, 0x1c0}, + { 0x3d404, 255, 0x1f, 0x180}, + { 0x3d800, 1, 0x1f, 0x1c0}, + { 0x3d804, 255, 0x1f, 0x180}, + { 0x3dc00, 1, 0x1f, 0x1c0}, + { 0x3dc04, 255, 0x1f, 0x180}, + { 0x3e000, 1, 0x1f, 0x1c0}, + { 0x3e004, 255, 0x1f, 0x180}, + { 0x3e400, 1, 0x1f, 0x1c0}, + { 0x3e404, 255, 0x1f, 0x180}, + { 0x3e800, 1, 0x1f, 0x1c0}, + { 0x3e804, 255, 0x1f, 0x180}, + { 0x3ec00, 1, 0x1f, 0x1c0}, + { 0x3ec04, 255, 0x1f, 0x180}, + { 0x3f000, 1, 0x1f, 0x1c0}, + { 0x3f004, 255, 0x1f, 0x180}, + { 0x3f400, 1, 0x1f, 0x1c0}, + { 0x3f404, 255, 0x1f, 0x180}, + { 0x3f800, 1, 0x1f, 0x1c0}, + { 0x3f804, 255, 0x1f, 0x180}, + { 0x3fc00, 1, 0x1f, 0x1c0}, + { 0x3fc04, 255, 0x1f, 0x180}, + { 0x40000, 85, 0x1f, 0x924}, + { 0x40154, 13, 0x1f, 0xfff}, + { 0x40198, 2, 0x1f, 0x1fff}, + { 0x401a4, 1, 0x1f, 0x1fff}, + { 0x401a8, 8, 0x1e, 0x924}, + { 0x401c8, 1, 0x2, 0x924}, + { 0x401cc, 2, 0x1e, 0x924}, + { 0x401d4, 2, 0x1c, 0x924}, + { 0x40200, 4, 0x1f, 0x924}, + { 0x40220, 6, 0x1c, 0x924}, + { 0x40238, 8, 0xc, 0x924}, + { 0x40258, 4, 0x1c, 0x924}, + { 0x40268, 2, 0x18, 0x924}, + { 0x40270, 17, 0x10, 0x924}, + { 0x40400, 43, 0x1f, 0x924}, + { 0x404bc, 2, 0x1f, 0x1fff}, + { 0x404c8, 1, 0x1f, 0x1fff}, + { 0x404cc, 3, 0x1e, 0x924}, + { 0x404e0, 1, 0x1c, 0x924}, + { 0x40500, 2, 0x1f, 0x924}, + { 0x40510, 2, 0x1f, 0x924}, + { 0x40520, 2, 0x1f, 0x924}, + { 0x40530, 2, 0x1f, 0x924}, + { 0x40540, 2, 0x1f, 0x924}, + { 0x40550, 10, 0x1c, 0x924}, + { 0x40610, 2, 0x1c, 0x924}, + { 0x42000, 164, 0x1f, 0x924}, + { 0x422b0, 2, 0x1f, 0x1fff}, + { 0x422bc, 1, 0x1f, 0x1fff}, + { 0x422c0, 4, 0x1c, 0x924}, + { 0x422d4, 5, 0x1e, 0x924}, + { 0x422e8, 1, 0x1c, 0x924}, + { 0x42400, 49, 0x1f, 0x924}, + { 0x424c8, 32, 0x1f, 0x924}, + { 0x42548, 1, 0x1f, 0xfff}, + { 0x4254c, 1, 0x1f, 0x924}, + { 0x42550, 1, 0x1f, 0xfff}, + { 0x42554, 1, 0x1f, 0x924}, + { 0x42558, 1, 0x1f, 0xfff}, + { 0x4255c, 1, 0x1f, 0x924}, + { 0x42568, 2, 0x1f, 0x924}, + { 0x42640, 5, 0x1c, 0x924}, + { 0x42800, 1, 0x1f, 0x924}, + { 0x50000, 1, 0x1f, 0x1fff}, + { 0x50004, 19, 0x1f, 0x924}, + { 0x50050, 8, 0x1f, 0x93c}, + { 0x50070, 60, 0x1f, 0x924}, + { 0x50160, 8, 0x1f, 0xfff}, + { 0x50180, 20, 0x1f, 0x924}, + { 0x501e0, 2, 0x1f, 0x1fff}, + { 0x501ec, 1, 0x1f, 0x1fff}, + { 0x501f0, 4, 0x1e, 0x924}, + { 0x50200, 1, 0x1f, 0x924}, + { 0x50204, 1, 0x1f, 0xfff}, + { 0x5020c, 2, 0x1f, 0xfff}, + { 0x50214, 2, 0x1f, 0x924}, + { 0x5021c, 1, 0x1f, 0xfff}, + { 0x50220, 2, 0x1f, 0x924}, + { 0x50228, 6, 0x1e, 0x924}, + { 0x50240, 1, 0x1f, 0x924}, + { 0x50280, 1, 0x1f, 0x924}, + { 0x50300, 1, 0x1c, 0x924}, + { 0x5030c, 1, 0x1c, 0x924}, + { 0x50318, 1, 0x1c, 0x934}, + { 0x5031c, 1, 0x1c, 0x924}, + { 0x50320, 2, 0x1c, 0x934}, + { 0x50330, 1, 0x10, 0x924}, + { 0x52000, 1, 0x1f, 0x924}, + { 0x54000, 1, 0x1f, 0x93c}, + { 0x54004, 255, 0x1f, 0x30}, + { 0x54400, 1, 0x1f, 0x38}, + { 0x54404, 255, 0x1f, 0x30}, + { 0x54800, 1, 0x1f, 0x38}, + { 0x54804, 255, 0x1f, 0x30}, + { 0x54c00, 1, 0x1f, 0x38}, + { 0x54c04, 255, 0x1f, 0x30}, + { 0x55000, 1, 0x1f, 0x38}, + { 0x55004, 255, 0x1f, 0x30}, + { 0x55400, 1, 0x1f, 0x38}, + { 0x55404, 255, 0x1f, 0x30}, + { 0x55800, 1, 0x1f, 0x38}, + { 0x55804, 255, 0x1f, 0x30}, + { 0x55c00, 1, 0x1f, 0x38}, + { 0x55c04, 255, 0x1f, 0x30}, + { 0x56000, 1, 0x1f, 0x38}, + { 0x56004, 255, 0x1f, 0x30}, + { 0x56400, 1, 0x1f, 0x38}, + { 0x56404, 255, 0x1f, 0x30}, + { 0x56800, 1, 0x1f, 0x38}, + { 0x56804, 255, 0x1f, 0x30}, + { 0x56c00, 1, 0x1f, 0x38}, + { 0x56c04, 255, 0x1f, 0x30}, + { 0x57000, 1, 0x1f, 0x38}, + { 0x57004, 255, 0x1f, 0x30}, + { 0x58000, 1, 0x1f, 0x934}, + { 0x58004, 8191, 0x3, 0x30}, + { 0x60000, 26, 0x1f, 0x924}, + { 0x60068, 8, 0x3, 0x924}, + { 0x60088, 2, 0x1f, 0x924}, + { 0x60090, 1, 0x1f, 0xfff}, + { 0x60094, 9, 0x1f, 0x924}, + { 0x600b8, 9, 0x3, 0x924}, + { 0x600dc, 1, 0x1f, 0x924}, + { 0x600e0, 5, 0x3, 0x924}, + { 0x600f4, 1, 0x7, 0x924}, + { 0x600f8, 1, 0x3, 0x924}, + { 0x600fc, 8, 0x1f, 0x924}, + { 0x6012c, 2, 0x1f, 0x1fff}, + { 0x60138, 1, 0x1f, 0x1fff}, + { 0x6013c, 24, 0x2, 0x924}, + { 0x6019c, 2, 0x1c, 0x924}, + { 0x601ac, 18, 0x1c, 0x924}, + { 0x60200, 1, 0x1f, 0xb6d}, + { 0x60204, 2, 0x1f, 0x249}, + { 0x60210, 13, 0x1c, 0x924}, + { 0x60244, 16, 0x10, 0x924}, + { 0x61000, 1, 0x1f, 0xb6d}, + { 0x61004, 511, 0x1f, 0x249}, + { 0x61800, 512, 0x18, 0x249}, + { 0x70000, 8, 0x1f, 0xb6d}, + { 0x70020, 8184, 0x1f, 0x249}, + { 0x78000, 8192, 0x18, 0x249}, + { 0x85000, 3, 0x1f, 0x1000}, + { 0x8501c, 7, 0x1f, 0x1000}, + { 0x85048, 1, 0x1f, 0x1000}, + { 0x85200, 32, 0x1f, 0x1000}, + { 0xa0000, 16384, 0x3, 0x1000}, + { 0xb0000, 16384, 0x2, 0x1000}, + { 0xc1000, 7, 0x1f, 0x924}, + { 0xc102c, 2, 0x1f, 0x1fff}, + { 0xc1038, 1, 0x1f, 0x1fff}, + { 0xc103c, 2, 0x1c, 0x924}, + { 0xc1800, 2, 0x1f, 0x924}, + { 0xc2000, 164, 0x1f, 0x924}, + { 0xc22b0, 2, 0x1f, 0x1fff}, + { 0xc22bc, 1, 0x1f, 0x1fff}, + { 0xc22c0, 5, 0x1c, 0x924}, + { 0xc22d8, 4, 0x1c, 0x924}, + { 0xc2400, 49, 0x1f, 0x924}, + { 0xc24c8, 32, 0x1f, 0x924}, + { 0xc2548, 1, 0x1f, 0xfff}, + { 0xc254c, 1, 0x1f, 0x924}, + { 0xc2550, 1, 0x1f, 0xfff}, + { 0xc2554, 1, 0x1f, 0x924}, + { 0xc2558, 1, 0x1f, 0xfff}, + { 0xc255c, 1, 0x1f, 0x924}, + { 0xc2568, 2, 0x1f, 0x924}, + { 0xc2600, 1, 0x1f, 0x924}, + { 0xc4000, 165, 0x1f, 0x924}, + { 0xc42b4, 2, 0x1f, 0x1fff}, + { 0xc42c0, 1, 0x1f, 0x1fff}, + { 0xc42d8, 2, 0x1c, 0x924}, + { 0xc42e0, 7, 0x1e, 0x924}, + { 0xc42fc, 1, 0x1c, 0x924}, + { 0xc4400, 51, 0x1f, 0x924}, + { 0xc44d0, 32, 0x1f, 0x924}, + { 0xc4550, 1, 0x1f, 0xfff}, + { 0xc4554, 1, 0x1f, 0x924}, + { 0xc4558, 1, 0x1f, 0xfff}, + { 0xc455c, 1, 0x1f, 0x924}, + { 0xc4560, 1, 0x1f, 0xfff}, + { 0xc4564, 1, 0x1f, 0x924}, + { 0xc4570, 2, 0x1f, 0x924}, + { 0xc4578, 5, 0x1c, 0x924}, + { 0xc4600, 1, 0x1f, 0x924}, + { 0xd0000, 19, 0x1f, 0x924}, + { 0xd004c, 8, 0x1f, 0x1927}, + { 0xd006c, 64, 0x1f, 0x924}, + { 0xd016c, 8, 0x1f, 0xfff}, + { 0xd018c, 19, 0x1f, 0x924}, + { 0xd01e8, 2, 0x1f, 0x1fff}, + { 0xd01f4, 1, 0x1f, 0x1fff}, + { 0xd01fc, 1, 0x1c, 0x924}, + { 0xd0200, 1, 0x1f, 0x924}, + { 0xd0204, 1, 0x1f, 0xfff}, + { 0xd020c, 3, 0x1f, 0xfff}, + { 0xd0218, 4, 0x1f, 0x924}, + { 0xd0228, 18, 0x1e, 0x924}, + { 0xd0280, 1, 0x1f, 0x924}, + { 0xd0300, 1, 0x1f, 0x924}, + { 0xd0400, 1, 0x1f, 0x924}, + { 0xd0818, 1, 0x10, 0x924}, + { 0xd4000, 1, 0x1f, 0x1927}, + { 0xd4004, 255, 0x1f, 0x6}, + { 0xd4400, 1, 0x1f, 0x1007}, + { 0xd4404, 255, 0x1f, 0x6}, + { 0xd4800, 1, 0x1f, 0x1007}, + { 0xd4804, 255, 0x1f, 0x6}, + { 0xd4c00, 1, 0x1f, 0x1007}, + { 0xd4c04, 255, 0x1f, 0x6}, + { 0xd5000, 1, 0x1f, 0x1007}, + { 0xd5004, 255, 0x1f, 0x6}, + { 0xd5400, 1, 0x1f, 0x1007}, + { 0xd5404, 255, 0x1f, 0x6}, + { 0xd5800, 1, 0x1f, 0x1007}, + { 0xd5804, 255, 0x1f, 0x6}, + { 0xd5c00, 1, 0x1f, 0x1007}, + { 0xd5c04, 255, 0x1f, 0x6}, + { 0xd6000, 1, 0x1f, 0x1007}, + { 0xd6004, 255, 0x1f, 0x6}, + { 0xd6400, 1, 0x1f, 0x1007}, + { 0xd6404, 255, 0x1f, 0x6}, + { 0xd8000, 1, 0x1f, 0x1927}, + { 0xd8004, 255, 0x1f, 0x6}, + { 0xd8400, 1, 0x1f, 0x1007}, + { 0xd8404, 255, 0x1f, 0x6}, + { 0xd8800, 1, 0x1f, 0x1007}, + { 0xd8804, 255, 0x1f, 0x6}, + { 0xd8c00, 1, 0x1f, 0x1007}, + { 0xd8c04, 255, 0x1f, 0x6}, + { 0xd9000, 1, 0x1f, 0x1007}, + { 0xd9004, 255, 0x1f, 0x6}, + { 0xd9400, 1, 0x1f, 0x1007}, + { 0xd9404, 255, 0x1f, 0x6}, + { 0xd9800, 1, 0x1f, 0x1007}, + { 0xd9804, 255, 0x1f, 0x6}, + { 0xd9c00, 1, 0x1f, 0x1007}, + { 0xd9c04, 255, 0x1f, 0x6}, + { 0xda000, 1, 0x1f, 0x1007}, + { 0xda004, 255, 0x1f, 0x6}, + { 0xda400, 1, 0x1f, 0x1007}, + { 0xda404, 255, 0x1f, 0x6}, + { 0xda800, 1, 0x1f, 0x1007}, + { 0xda804, 255, 0x1f, 0x6}, + { 0xdac00, 1, 0x1f, 0x1007}, + { 0xdac04, 255, 0x1f, 0x6}, + { 0xdb000, 1, 0x1f, 0x1007}, + { 0xdb004, 255, 0x1f, 0x6}, + { 0xdb400, 1, 0x1f, 0x1007}, + { 0xdb404, 255, 0x1f, 0x6}, + { 0xdb800, 1, 0x1f, 0x1007}, + { 0xdb804, 255, 0x1f, 0x6}, + { 0xdbc00, 1, 0x1f, 0x1007}, + { 0xdbc04, 255, 0x1f, 0x6}, + { 0xdc000, 1, 0x1f, 0x1007}, + { 0xdc004, 255, 0x1f, 0x6}, + { 0xdc400, 1, 0x1f, 0x1007}, + { 0xdc404, 255, 0x1f, 0x6}, + { 0xdc800, 1, 0x1f, 0x1007}, + { 0xdc804, 255, 0x1f, 0x6}, + { 0xdcc00, 1, 0x1f, 0x1007}, + { 0xdcc04, 255, 0x1f, 0x6}, + { 0xdd000, 1, 0x1f, 0x1007}, + { 0xdd004, 255, 0x1f, 0x6}, + { 0xdd400, 1, 0x1f, 0x1007}, + { 0xdd404, 255, 0x1f, 0x6}, + { 0xdd800, 1, 0x1f, 0x1007}, + { 0xdd804, 255, 0x1f, 0x6}, + { 0xddc00, 1, 0x1f, 0x1007}, + { 0xddc04, 255, 0x1f, 0x6}, + { 0xde000, 1, 0x1f, 0x1007}, + { 0xde004, 255, 0x1f, 0x6}, + { 0xde400, 1, 0x1f, 0x1007}, + { 0xde404, 255, 0x1f, 0x6}, + { 0xde800, 1, 0x1f, 0x1007}, + { 0xde804, 255, 0x1f, 0x6}, + { 0xdec00, 1, 0x1f, 0x1007}, + { 0xdec04, 255, 0x1f, 0x6}, + { 0xdf000, 1, 0x1f, 0x1007}, + { 0xdf004, 255, 0x1f, 0x6}, + { 0xdf400, 1, 0x1f, 0x1007}, + { 0xdf404, 255, 0x1f, 0x6}, + { 0xdf800, 1, 0x1f, 0x1007}, + { 0xdf804, 255, 0x1f, 0x6}, + { 0xdfc00, 1, 0x1f, 0x1007}, + { 0xdfc04, 255, 0x1f, 0x6}, + { 0xe0000, 21, 0x1f, 0x924}, + { 0xe0054, 8, 0x1f, 0xf24}, + { 0xe0074, 49, 0x1f, 0x924}, + { 0xe0138, 1, 0x3, 0x924}, + { 0xe013c, 6, 0x1f, 0x924}, + { 0xe0154, 8, 0x1f, 0xfff}, + { 0xe0174, 21, 0x1f, 0x924}, + { 0xe01d8, 2, 0x1f, 0x1fff}, + { 0xe01e4, 1, 0x1f, 0x1fff}, + { 0xe01f4, 1, 0x4, 0x924}, + { 0xe01f8, 1, 0x1c, 0x924}, + { 0xe0200, 1, 0x1f, 0x924}, + { 0xe0204, 1, 0x1f, 0xfff}, + { 0xe020c, 2, 0x1f, 0xfff}, + { 0xe0214, 2, 0x1f, 0x924}, + { 0xe021c, 2, 0x1f, 0xfff}, + { 0xe0224, 2, 0x1f, 0x924}, + { 0xe022c, 18, 0x1e, 0x924}, + { 0xe0280, 1, 0x1f, 0x924}, + { 0xe0300, 1, 0x1f, 0x924}, + { 0xe0400, 1, 0x10, 0x924}, + { 0xe1000, 1, 0x1f, 0x924}, + { 0xe2000, 1, 0x1f, 0xf24}, + { 0xe2004, 255, 0x1f, 0xc00}, + { 0xe2400, 1, 0x1f, 0xe00}, + { 0xe2404, 255, 0x1f, 0xc00}, + { 0xe2800, 1, 0x1f, 0xe00}, + { 0xe2804, 255, 0x1f, 0xc00}, + { 0xe2c00, 1, 0x1f, 0xe00}, + { 0xe2c04, 255, 0x1f, 0xc00}, + { 0xe3000, 1, 0x1f, 0xe00}, + { 0xe3004, 255, 0x1f, 0xc00}, + { 0xe3400, 1, 0x1f, 0xe00}, + { 0xe3404, 255, 0x1f, 0xc00}, + { 0xe3800, 1, 0x1f, 0xe00}, + { 0xe3804, 255, 0x1f, 0xc00}, + { 0xe3c00, 1, 0x1f, 0xe00}, + { 0xe3c04, 255, 0x1f, 0xc00}, + { 0xf0000, 1, 0x1f, 0xf24}, + { 0xf0004, 255, 0x1f, 0xc00}, + { 0xf0400, 1, 0x1f, 0xe00}, + { 0xf0404, 255, 0x1f, 0xc00}, + { 0xf0800, 1, 0x1f, 0xe00}, + { 0xf0804, 255, 0x1f, 0xc00}, + { 0xf0c00, 1, 0x1f, 0xe00}, + { 0xf0c04, 255, 0x1f, 0xc00}, + { 0xf1000, 1, 0x1f, 0xe00}, + { 0xf1004, 255, 0x1f, 0xc00}, + { 0xf1400, 1, 0x1f, 0xe00}, + { 0xf1404, 255, 0x1f, 0xc00}, + { 0xf1800, 1, 0x1f, 0xe00}, + { 0xf1804, 255, 0x1f, 0xc00}, + { 0xf1c00, 1, 0x1f, 0xe00}, + { 0xf1c04, 255, 0x1f, 0xc00}, + { 0xf2000, 1, 0x1f, 0xe00}, + { 0xf2004, 255, 0x1f, 0xc00}, + { 0xf2400, 1, 0x1f, 0xe00}, + { 0xf2404, 255, 0x1f, 0xc00}, + { 0xf2800, 1, 0x1f, 0xe00}, + { 0xf2804, 255, 0x1f, 0xc00}, + { 0xf2c00, 1, 0x1f, 0xe00}, + { 0xf2c04, 255, 0x1f, 0xc00}, + { 0xf3000, 1, 0x1f, 0xe00}, + { 0xf3004, 255, 0x1f, 0xc00}, + { 0xf3400, 1, 0x1f, 0xe00}, + { 0xf3404, 255, 0x1f, 0xc00}, + { 0xf3800, 1, 0x1f, 0xe00}, + { 0xf3804, 255, 0x1f, 0xc00}, + { 0xf3c00, 1, 0x1f, 0xe00}, + { 0xf3c04, 255, 0x1f, 0xc00}, + { 0xf4000, 1, 0x1f, 0xe00}, + { 0xf4004, 255, 0x1f, 0xc00}, + { 0xf4400, 1, 0x1f, 0xe00}, + { 0xf4404, 255, 0x1f, 0xc00}, + { 0xf4800, 1, 0x1f, 0xe00}, + { 0xf4804, 255, 0x1f, 0xc00}, + { 0xf4c00, 1, 0x1f, 0xe00}, + { 0xf4c04, 255, 0x1f, 0xc00}, + { 0xf5000, 1, 0x1f, 0xe00}, + { 0xf5004, 255, 0x1f, 0xc00}, + { 0xf5400, 1, 0x1f, 0xe00}, + { 0xf5404, 255, 0x1f, 0xc00}, + { 0xf5800, 1, 0x1f, 0xe00}, + { 0xf5804, 255, 0x1f, 0xc00}, + { 0xf5c00, 1, 0x1f, 0xe00}, + { 0xf5c04, 255, 0x1f, 0xc00}, + { 0xf6000, 1, 0x1f, 0xe00}, + { 0xf6004, 255, 0x1f, 0xc00}, + { 0xf6400, 1, 0x1f, 0xe00}, + { 0xf6404, 255, 0x1f, 0xc00}, + { 0xf6800, 1, 0x1f, 0xe00}, + { 0xf6804, 255, 0x1f, 0xc00}, + { 0xf6c00, 1, 0x1f, 0xe00}, + { 0xf6c04, 255, 0x1f, 0xc00}, + { 0xf7000, 1, 0x1f, 0xe00}, + { 0xf7004, 255, 0x1f, 0xc00}, + { 0xf7400, 1, 0x1f, 0xe00}, + { 0xf7404, 255, 0x1f, 0xc00}, + { 0xf7800, 1, 0x1f, 0xe00}, + { 0xf7804, 255, 0x1f, 0xc00}, + { 0xf7c00, 1, 0x1f, 0xe00}, + { 0xf7c04, 255, 0x1f, 0xc00}, + { 0xf8000, 1, 0x1f, 0xe00}, + { 0xf8004, 255, 0x1f, 0xc00}, + { 0xf8400, 1, 0x1f, 0xe00}, + { 0xf8404, 255, 0x1f, 0xc00}, + { 0xf8800, 1, 0x1f, 0xe00}, + { 0xf8804, 255, 0x1f, 0xc00}, + { 0xf8c00, 1, 0x1f, 0xe00}, + { 0xf8c04, 255, 0x1f, 0xc00}, + { 0xf9000, 1, 0x1f, 0xe00}, + { 0xf9004, 255, 0x1f, 0xc00}, + { 0xf9400, 1, 0x1f, 0xe00}, + { 0xf9404, 255, 0x1f, 0xc00}, + { 0xf9800, 1, 0x1f, 0xe00}, + { 0xf9804, 255, 0x1f, 0xc00}, + { 0xf9c00, 1, 0x1f, 0xe00}, + { 0xf9c04, 255, 0x1f, 0xc00}, + { 0xfa000, 1, 0x1f, 0xe00}, + { 0xfa004, 255, 0x1f, 0xc00}, + { 0xfa400, 1, 0x1f, 0xe00}, + { 0xfa404, 255, 0x1f, 0xc00}, + { 0xfa800, 1, 0x1f, 0xe00}, + { 0xfa804, 255, 0x1f, 0xc00}, + { 0xfac00, 1, 0x1f, 0xe00}, + { 0xfac04, 255, 0x1f, 0xc00}, + { 0xfb000, 1, 0x1f, 0xe00}, + { 0xfb004, 255, 0x1f, 0xc00}, + { 0xfb400, 1, 0x1f, 0xe00}, + { 0xfb404, 255, 0x1f, 0xc00}, + { 0xfb800, 1, 0x1f, 0xe00}, + { 0xfb804, 255, 0x1f, 0xc00}, + { 0xfbc00, 1, 0x1f, 0xe00}, + { 0xfbc04, 255, 0x1f, 0xc00}, + { 0xfc000, 1, 0x1f, 0xe00}, + { 0xfc004, 255, 0x1f, 0xc00}, + { 0xfc400, 1, 0x1f, 0xe00}, + { 0xfc404, 255, 0x1f, 0xc00}, + { 0xfc800, 1, 0x1f, 0xe00}, + { 0xfc804, 255, 0x1f, 0xc00}, + { 0xfcc00, 1, 0x1f, 0xe00}, + { 0xfcc04, 255, 0x1f, 0xc00}, + { 0xfd000, 1, 0x1f, 0xe00}, + { 0xfd004, 255, 0x1f, 0xc00}, + { 0xfd400, 1, 0x1f, 0xe00}, + { 0xfd404, 255, 0x1f, 0xc00}, + { 0xfd800, 1, 0x1f, 0xe00}, + { 0xfd804, 255, 0x1f, 0xc00}, + { 0xfdc00, 1, 0x1f, 0xe00}, + { 0xfdc04, 255, 0x1f, 0xc00}, + { 0xfe000, 1, 0x1f, 0xe00}, + { 0xfe004, 255, 0x1f, 0xc00}, + { 0xfe400, 1, 0x1f, 0xe00}, + { 0xfe404, 255, 0x1f, 0xc00}, + { 0xfe800, 1, 0x1f, 0xe00}, + { 0xfe804, 255, 0x1f, 0xc00}, + { 0xfec00, 1, 0x1f, 0xe00}, + { 0xfec04, 255, 0x1f, 0xc00}, + { 0xff000, 1, 0x1f, 0xe00}, + { 0xff004, 255, 0x1f, 0xc00}, + { 0xff400, 1, 0x1f, 0xe00}, + { 0xff404, 255, 0x1f, 0xc00}, + { 0xff800, 1, 0x1f, 0xe00}, + { 0xff804, 255, 0x1f, 0xc00}, + { 0xffc00, 1, 0x1f, 0xe00}, + { 0xffc04, 255, 0x1f, 0xc00}, + { 0x101000, 5, 0x1f, 0x924}, + { 0x101014, 1, 0x1f, 0xfff}, + { 0x101018, 6, 0x1f, 0x924}, + { 0x101040, 2, 0x1f, 0x1fff}, + { 0x10104c, 1, 0x1f, 0x1fff}, + { 0x101050, 1, 0x1e, 0x924}, + { 0x101054, 3, 0x1c, 0x924}, + { 0x101100, 1, 0x1f, 0x924}, + { 0x101800, 8, 0x1f, 0x924}, + { 0x102000, 18, 0x1f, 0x924}, + { 0x102058, 2, 0x1f, 0x1fff}, + { 0x102064, 1, 0x1f, 0x1fff}, + { 0x102068, 6, 0x1c, 0x924}, + { 0x102080, 16, 0x1f, 0xfff}, + { 0x1020c0, 1, 0x1f, 0x924}, + { 0x1020c8, 8, 0x2, 0x924}, + { 0x1020e8, 9, 0x1c, 0x924}, + { 0x102400, 1, 0x1f, 0x924}, + { 0x103000, 1, 0x1f, 0x924}, + { 0x103004, 2, 0x1f, 0xfff}, + { 0x10300c, 23, 0x1f, 0x924}, + { 0x103088, 2, 0x1f, 0x1fff}, + { 0x103094, 1, 0x1f, 0x1fff}, + { 0x103098, 1, 0x1e, 0x924}, + { 0x10309c, 2, 0x1e, 0xfff}, + { 0x1030a4, 2, 0x1e, 0x924}, + { 0x1030ac, 2, 0x1c, 0x924}, + { 0x1030b4, 1, 0x4, 0x924}, + { 0x1030b8, 2, 0x1c, 0xfff}, + { 0x1030c0, 3, 0x1c, 0x924}, + { 0x1030cc, 1, 0x1c, 0xfff}, + { 0x1030d0, 1, 0x1c, 0x924}, + { 0x1030d8, 2, 0x1c, 0x924}, + { 0x1030e0, 1, 0x1c, 0xfff}, + { 0x1030e4, 5, 0x1c, 0x924}, + { 0x103400, 136, 0x1c, 0x1fff}, + { 0x103800, 8, 0x1f, 0x924}, + { 0x104000, 1, 0x1f, 0x924}, + { 0x104004, 1, 0x1f, 0xfff}, + { 0x104008, 4, 0x1f, 0x924}, + { 0x104018, 1, 0x1f, 0xfff}, + { 0x10401c, 1, 0x1f, 0x924}, + { 0x104020, 1, 0x1f, 0xfff}, + { 0x104024, 6, 0x1f, 0x924}, + { 0x10403c, 1, 0x1f, 0xfff}, + { 0x104040, 47, 0x1f, 0x924}, + { 0x10410c, 2, 0x1f, 0x1fff}, + { 0x104118, 1, 0x1f, 0x1fff}, + { 0x10411c, 16, 0x1c, 0x924}, + { 0x104200, 17, 0x1f, 0x924}, + { 0x104400, 1, 0x1f, 0x1fff}, + { 0x104404, 63, 0x1f, 0xfff}, + { 0x104500, 192, 0x1f, 0xdb6}, + { 0x104800, 1, 0x1f, 0x1fff}, + { 0x104804, 63, 0x1f, 0xfff}, + { 0x104900, 192, 0x1f, 0xdb6}, + { 0x105000, 4, 0x1f, 0x1fff}, + { 0x105010, 252, 0x1f, 0xfff}, + { 0x105400, 768, 0x1f, 0xdb6}, + { 0x107000, 7, 0x1c, 0x924}, + { 0x10701c, 1, 0x18, 0x924}, + { 0x108000, 33, 0x3, 0x924}, + { 0x1080ac, 5, 0x2, 0x924}, + { 0x108100, 5, 0x3, 0x924}, + { 0x108120, 5, 0x3, 0x924}, + { 0x108200, 74, 0x3, 0x924}, + { 0x108400, 74, 0x3, 0x924}, + { 0x108800, 152, 0x3, 0x924}, + { 0x110000, 111, 0x1c, 0x924}, + { 0x1101cc, 2, 0x1c, 0x1fff}, + { 0x1101d8, 1, 0x1c, 0x1fff}, + { 0x1101dc, 1, 0x18, 0x924}, + { 0x110200, 4, 0x1c, 0x924}, + { 0x120000, 92, 0x1f, 0x924}, + { 0x120170, 2, 0x3, 0x924}, + { 0x120178, 14, 0x1f, 0x924}, + { 0x1201b0, 2, 0x1f, 0xfff}, + { 0x1201b8, 93, 0x1f, 0x924}, + { 0x12032c, 1, 0x1f, 0xfff}, + { 0x120330, 15, 0x1f, 0x924}, + { 0x12036c, 3, 0x1f, 0xfff}, + { 0x120378, 36, 0x1f, 0x924}, + { 0x120408, 2, 0x1f, 0xfff}, + { 0x120410, 1, 0x1f, 0x924}, + { 0x120414, 15, 0x1f, 0xfff}, + { 0x120450, 10, 0x1f, 0x924}, + { 0x120478, 2, 0x1f, 0xfff}, + { 0x120480, 43, 0x1f, 0x924}, + { 0x12052c, 1, 0x1f, 0xfff}, + { 0x120530, 5, 0x1f, 0x924}, + { 0x120544, 4, 0x3, 0x924}, + { 0x120554, 4, 0x1f, 0x924}, + { 0x120564, 2, 0x1f, 0xfff}, + { 0x12057c, 2, 0x1f, 0x1fff}, + { 0x120588, 3, 0x1f, 0x1fff}, + { 0x120598, 1, 0x1f, 0x1fff}, + { 0x12059c, 22, 0x1e, 0x924}, + { 0x1205f4, 1, 0x6, 0x924}, + { 0x1205f8, 4, 0x1c, 0x924}, + { 0x120618, 1, 0x1c, 0x924}, + { 0x12061c, 31, 0x1e, 0x924}, + { 0x120698, 3, 0x1c, 0x924}, + { 0x1206a4, 1, 0x4, 0x924}, + { 0x1206a8, 1, 0x1c, 0x924}, + { 0x1206b0, 38, 0x1c, 0x924}, + { 0x120748, 1, 0x1c, 0xfff}, + { 0x12074c, 11, 0x1c, 0x924}, + { 0x120778, 2, 0x1c, 0xfff}, + { 0x120780, 23, 0x1c, 0x924}, + { 0x1207dc, 1, 0x4, 0x924}, + { 0x1207fc, 1, 0x1c, 0x924}, + { 0x12080c, 2, 0x1f, 0xfff}, + { 0x120814, 1, 0x1f, 0x924}, + { 0x120818, 1, 0x1f, 0xfff}, + { 0x12081c, 1, 0x1f, 0x924}, + { 0x120820, 1, 0x1f, 0xfff}, + { 0x120824, 1, 0x1f, 0x924}, + { 0x120828, 1, 0x1f, 0xfff}, + { 0x12082c, 1, 0x1f, 0x924}, + { 0x120830, 1, 0x1f, 0xfff}, + { 0x120834, 1, 0x1f, 0x924}, + { 0x120838, 1, 0x1f, 0xfff}, + { 0x12083c, 1, 0x1f, 0x924}, + { 0x120840, 1, 0x1f, 0xfff}, + { 0x120844, 1, 0x1f, 0x924}, + { 0x120848, 1, 0x1f, 0xfff}, + { 0x12084c, 1, 0x1f, 0x924}, + { 0x120850, 1, 0x1f, 0xfff}, + { 0x120854, 1, 0x1f, 0x924}, + { 0x120858, 1, 0x1f, 0xfff}, + { 0x12085c, 1, 0x1f, 0x924}, + { 0x120860, 1, 0x1f, 0xfff}, + { 0x120864, 1, 0x1f, 0x924}, + { 0x120868, 1, 0x1f, 0xfff}, + { 0x12086c, 1, 0x1f, 0x924}, + { 0x120870, 1, 0x1f, 0xfff}, + { 0x120874, 1, 0x1f, 0x924}, + { 0x120878, 1, 0x1f, 0xfff}, + { 0x12087c, 1, 0x1f, 0x924}, + { 0x120880, 1, 0x1f, 0xfff}, + { 0x120884, 1, 0x1f, 0x924}, + { 0x120888, 1, 0x1f, 0xfff}, + { 0x12088c, 1, 0x1f, 0x924}, + { 0x120890, 1, 0x1f, 0xfff}, + { 0x120894, 1, 0x1f, 0x924}, + { 0x120898, 1, 0x1f, 0xfff}, + { 0x12089c, 1, 0x1f, 0x924}, + { 0x1208a0, 1, 0x1f, 0xfff}, + { 0x1208a4, 1, 0x1f, 0x924}, + { 0x1208a8, 1, 0x1f, 0xfff}, + { 0x1208ac, 1, 0x1f, 0x924}, + { 0x1208b0, 1, 0x1f, 0xfff}, + { 0x1208b4, 1, 0x1f, 0x924}, + { 0x1208b8, 1, 0x1f, 0xfff}, + { 0x1208bc, 1, 0x1f, 0x924}, + { 0x1208c0, 1, 0x1f, 0xfff}, + { 0x1208c4, 1, 0x1f, 0x924}, + { 0x1208c8, 1, 0x1f, 0xfff}, + { 0x1208cc, 1, 0x1f, 0x924}, + { 0x1208d0, 1, 0x1f, 0xfff}, + { 0x1208d4, 1, 0x1f, 0x924}, + { 0x1208d8, 1, 0x1f, 0xfff}, + { 0x1208dc, 1, 0x1f, 0x924}, + { 0x1208e0, 1, 0x1f, 0xfff}, + { 0x1208e4, 1, 0x1f, 0x924}, + { 0x1208e8, 1, 0x1f, 0xfff}, + { 0x1208ec, 1, 0x1f, 0x924}, + { 0x1208f0, 1, 0x1f, 0xfff}, + { 0x1208f4, 1, 0x1f, 0x924}, + { 0x1208f8, 1, 0x1f, 0xfff}, + { 0x1208fc, 1, 0x1f, 0x924}, + { 0x120900, 1, 0x1f, 0xfff}, + { 0x120904, 1, 0x1f, 0x924}, + { 0x120908, 1, 0x1f, 0xfff}, + { 0x12090c, 1, 0x1f, 0x924}, + { 0x120910, 7, 0x1c, 0x924}, + { 0x120930, 9, 0x1c, 0x924}, + { 0x12095c, 37, 0x18, 0x924}, + { 0x120a00, 2, 0x7, 0x924}, + { 0x120b00, 1, 0x18, 0x924}, + { 0x122000, 2, 0x1f, 0x924}, + { 0x122008, 2046, 0x1, 0x924}, + { 0x128000, 6144, 0x1e, 0x924}, + { 0x130000, 1, 0x1c, 0x1fff}, + { 0x130004, 11, 0x1c, 0x924}, + { 0x130030, 1, 0x1c, 0xfff}, + { 0x130034, 6, 0x1c, 0x924}, + { 0x13004c, 3, 0x1c, 0xfff}, + { 0x130058, 3, 0x1c, 0x924}, + { 0x130064, 2, 0x1c, 0xfff}, + { 0x13006c, 8, 0x1c, 0x924}, + { 0x13009c, 2, 0x1c, 0x1fff}, + { 0x1300a8, 1, 0x1c, 0x1fff}, + { 0x130100, 12, 0x1c, 0x924}, + { 0x130130, 1, 0x1c, 0xfff}, + { 0x130134, 14, 0x1c, 0x924}, + { 0x13016c, 1, 0x1c, 0xfff}, + { 0x130170, 1, 0x1c, 0x924}, + { 0x130180, 1, 0x1c, 0x924}, + { 0x130200, 1, 0x1c, 0x924}, + { 0x130280, 1, 0x1c, 0x924}, + { 0x130300, 1, 0x1c, 0xfff}, + { 0x130304, 4, 0x1c, 0x924}, + { 0x130380, 1, 0x1c, 0x924}, + { 0x130400, 1, 0x1c, 0x924}, + { 0x130480, 1, 0x1c, 0xfff}, + { 0x130484, 4, 0x1c, 0x924}, + { 0x130800, 72, 0x1c, 0x924}, + { 0x131000, 136, 0x1c, 0x924}, + { 0x132000, 148, 0x1c, 0x924}, + { 0x134000, 544, 0x1c, 0x924}, + { 0x140000, 1, 0x1f, 0x924}, + { 0x140004, 9, 0xf, 0x924}, + { 0x140028, 8, 0x1f, 0x924}, + { 0x140048, 5, 0xf, 0x924}, + { 0x14005c, 2, 0xf, 0xfff}, + { 0x140064, 3, 0xf, 0x924}, + { 0x140070, 1, 0x1f, 0x924}, + { 0x140074, 10, 0xf, 0x924}, + { 0x14009c, 1, 0x1f, 0x924}, + { 0x1400a0, 5, 0xf, 0x924}, + { 0x1400b4, 7, 0x1f, 0x924}, + { 0x1400d0, 2, 0xf, 0xfff}, + { 0x1400d8, 2, 0xf, 0x924}, + { 0x1400e0, 1, 0xf, 0xfff}, + { 0x1400e4, 5, 0xf, 0x924}, + { 0x1400f8, 2, 0x1f, 0x924}, + { 0x140100, 5, 0x3, 0x924}, + { 0x140114, 5, 0xf, 0x924}, + { 0x140128, 7, 0x1f, 0x924}, + { 0x140144, 9, 0xf, 0x924}, + { 0x140168, 8, 0x1f, 0x924}, + { 0x140188, 3, 0xf, 0x924}, + { 0x140194, 13, 0x1f, 0x924}, + { 0x1401d8, 2, 0x1f, 0x1fff}, + { 0x1401e4, 1, 0x1f, 0x1fff}, + { 0x140200, 6, 0xf, 0xfff}, + { 0x1402e0, 2, 0xc, 0x924}, + { 0x1402e8, 2, 0x1c, 0x924}, + { 0x1402f0, 9, 0xc, 0x924}, + { 0x140314, 9, 0x10, 0x924}, + { 0x140338, 7, 0x10, 0xfff}, + { 0x140354, 7, 0x10, 0x924}, + { 0x140370, 7, 0x10, 0xfff}, + { 0x14038c, 14, 0x10, 0x924}, + { 0x1404b0, 14, 0x10, 0x924}, + { 0x15c000, 2, 0x1e, 0x924}, + { 0x15c008, 5, 0x2, 0x924}, + { 0x15c020, 8, 0x1c, 0x924}, + { 0x15c040, 1, 0xc, 0x924}, + { 0x15c044, 2, 0x1c, 0x924}, + { 0x15c04c, 8, 0xc, 0x924}, + { 0x15c06c, 8, 0x1c, 0x924}, + { 0x15c090, 13, 0x1c, 0x924}, + { 0x15c0c8, 24, 0x1c, 0x924}, + { 0x15c128, 2, 0xc, 0x924}, + { 0x15c130, 1, 0x1c, 0x924}, + { 0x15c138, 6, 0x1c, 0x924}, + { 0x15c150, 2, 0x18, 0x924}, + { 0x15c158, 2, 0x8, 0x924}, + { 0x15c160, 23, 0x10, 0x924}, + { 0x15c1bc, 6, 0x10, 0xfff}, + { 0x15c1d4, 23, 0x10, 0x924}, + { 0x15c230, 7, 0x10, 0xfff}, + { 0x15c24c, 90, 0x10, 0x924}, + { 0x160004, 6, 0x18, 0x924}, + { 0x16003c, 1, 0x10, 0x924}, + { 0x160040, 6, 0x18, 0x924}, + { 0x16005c, 6, 0x18, 0x924}, + { 0x160074, 1, 0x10, 0x924}, + { 0x160078, 2, 0x18, 0x924}, + { 0x160300, 8, 0x18, 0x924}, + { 0x160330, 6, 0x18, 0x924}, + { 0x160404, 6, 0x18, 0x924}, + { 0x16043c, 1, 0x10, 0x924}, + { 0x160440, 6, 0x18, 0x924}, + { 0x16045c, 6, 0x18, 0x924}, + { 0x160474, 1, 0x10, 0x924}, + { 0x160478, 2, 0x18, 0x924}, + { 0x160700, 8, 0x18, 0x924}, + { 0x160730, 6, 0x18, 0x924}, + { 0x161000, 7, 0x1f, 0x924}, + { 0x16102c, 2, 0x1f, 0x1fff}, + { 0x161038, 1, 0x1f, 0x1fff}, + { 0x16103c, 2, 0x1c, 0x924}, + { 0x161800, 2, 0x1f, 0x924}, + { 0x162000, 54, 0x18, 0x924}, + { 0x162200, 60, 0x18, 0x924}, + { 0x162400, 54, 0x18, 0x924}, + { 0x162600, 60, 0x18, 0x924}, + { 0x162800, 54, 0x18, 0x924}, + { 0x162a00, 60, 0x18, 0x924}, + { 0x162c00, 54, 0x18, 0x924}, + { 0x162e00, 60, 0x18, 0x924}, + { 0x163000, 1, 0x18, 0x924}, + { 0x163008, 1, 0x18, 0x924}, + { 0x163010, 1, 0x18, 0x924}, + { 0x163018, 1, 0x18, 0x924}, + { 0x163020, 5, 0x18, 0x924}, + { 0x163038, 3, 0x18, 0x924}, + { 0x163048, 3, 0x18, 0x924}, + { 0x163058, 1, 0x18, 0x924}, + { 0x163060, 1, 0x18, 0x924}, + { 0x163068, 1, 0x18, 0x924}, + { 0x163070, 3, 0x18, 0x924}, + { 0x163080, 1, 0x18, 0x924}, + { 0x163088, 3, 0x18, 0x924}, + { 0x163098, 1, 0x18, 0x924}, + { 0x1630a0, 1, 0x18, 0x924}, + { 0x1630a8, 1, 0x18, 0x924}, + { 0x1630b0, 2, 0x10, 0x924}, + { 0x1630c0, 1, 0x18, 0x924}, + { 0x1630c8, 1, 0x18, 0x924}, + { 0x1630d0, 1, 0x18, 0x924}, + { 0x1630d8, 1, 0x18, 0x924}, + { 0x1630e0, 2, 0x18, 0x924}, + { 0x163110, 1, 0x18, 0x924}, + { 0x163120, 2, 0x18, 0x924}, + { 0x163420, 4, 0x18, 0x924}, + { 0x163438, 2, 0x18, 0x924}, + { 0x163488, 2, 0x18, 0x924}, + { 0x163520, 2, 0x18, 0x924}, + { 0x163800, 1, 0x18, 0x924}, + { 0x163808, 1, 0x18, 0x924}, + { 0x163810, 1, 0x18, 0x924}, + { 0x163818, 1, 0x18, 0x924}, + { 0x163820, 5, 0x18, 0x924}, + { 0x163838, 3, 0x18, 0x924}, + { 0x163848, 3, 0x18, 0x924}, + { 0x163858, 1, 0x18, 0x924}, + { 0x163860, 1, 0x18, 0x924}, + { 0x163868, 1, 0x18, 0x924}, + { 0x163870, 3, 0x18, 0x924}, + { 0x163880, 1, 0x18, 0x924}, + { 0x163888, 3, 0x18, 0x924}, + { 0x163898, 1, 0x18, 0x924}, + { 0x1638a0, 1, 0x18, 0x924}, + { 0x1638a8, 1, 0x18, 0x924}, + { 0x1638b0, 2, 0x10, 0x924}, + { 0x1638c0, 1, 0x18, 0x924}, + { 0x1638c8, 1, 0x18, 0x924}, + { 0x1638d0, 1, 0x18, 0x924}, + { 0x1638d8, 1, 0x18, 0x924}, + { 0x1638e0, 2, 0x18, 0x924}, + { 0x163910, 1, 0x18, 0x924}, + { 0x163920, 2, 0x18, 0x924}, + { 0x163c20, 4, 0x18, 0x924}, + { 0x163c38, 2, 0x18, 0x924}, + { 0x163c88, 2, 0x18, 0x924}, + { 0x163d20, 2, 0x18, 0x924}, + { 0x164000, 5, 0x1f, 0x924}, + { 0x164014, 2, 0x1f, 0xfff}, + { 0x16401c, 53, 0x1f, 0x924}, + { 0x164100, 2, 0x1f, 0x1fff}, + { 0x16410c, 1, 0x1f, 0x1fff}, + { 0x164110, 2, 0x1e, 0x924}, + { 0x164118, 15, 0x1c, 0x924}, + { 0x164200, 1, 0x1f, 0x924}, + { 0x164208, 1, 0x1f, 0x924}, + { 0x164210, 1, 0x1f, 0x924}, + { 0x164218, 1, 0x1f, 0x924}, + { 0x164220, 1, 0x1f, 0x924}, + { 0x164228, 1, 0x1f, 0x924}, + { 0x164230, 1, 0x1f, 0x924}, + { 0x164238, 1, 0x1f, 0x924}, + { 0x164240, 1, 0x1f, 0x924}, + { 0x164248, 1, 0x1f, 0x924}, + { 0x164250, 1, 0x1f, 0x924}, + { 0x164258, 1, 0x1f, 0x924}, + { 0x164260, 1, 0x1f, 0x924}, + { 0x164270, 2, 0x1f, 0x924}, + { 0x164280, 2, 0x1f, 0x924}, + { 0x164800, 2, 0x1f, 0x924}, + { 0x165000, 2, 0x1f, 0x924}, + { 0x166000, 164, 0x1f, 0x924}, + { 0x1662b0, 2, 0x1f, 0x1fff}, + { 0x1662bc, 1, 0x1f, 0x1fff}, + { 0x1662cc, 7, 0x1c, 0x924}, + { 0x166400, 49, 0x1f, 0x924}, + { 0x1664c8, 32, 0x1f, 0x924}, + { 0x166548, 1, 0x1f, 0xfff}, + { 0x16654c, 1, 0x1f, 0x924}, + { 0x166550, 1, 0x1f, 0xfff}, + { 0x166554, 1, 0x1f, 0x924}, + { 0x166558, 1, 0x1f, 0xfff}, + { 0x16655c, 1, 0x1f, 0x924}, + { 0x166568, 2, 0x1f, 0x924}, + { 0x166570, 5, 0x1c, 0x924}, + { 0x166800, 1, 0x1f, 0x924}, + { 0x168000, 1, 0x1f, 0xfff}, + { 0x168004, 1, 0x1f, 0x924}, + { 0x168008, 1, 0x1f, 0xfff}, + { 0x16800c, 1, 0x1f, 0x924}, + { 0x168010, 1, 0x1f, 0xfff}, + { 0x168014, 1, 0x1f, 0x924}, + { 0x168018, 1, 0x1f, 0xfff}, + { 0x16801c, 3, 0x1f, 0x924}, + { 0x168028, 2, 0x1f, 0xfff}, + { 0x168030, 10, 0x1f, 0x924}, + { 0x168058, 9, 0x1f, 0xfff}, + { 0x16807c, 106, 0x1f, 0x924}, + { 0x168224, 2, 0x3, 0x924}, + { 0x16822c, 3, 0x1f, 0x924}, + { 0x168238, 1, 0x1f, 0xfff}, + { 0x16823c, 25, 0x1f, 0x924}, + { 0x1682a0, 12, 0x3, 0x924}, + { 0x1682d0, 7, 0x1f, 0xfff}, + { 0x1682ec, 5, 0x1f, 0x924}, + { 0x168300, 2, 0x3, 0xfff}, + { 0x168308, 65, 0x1f, 0xfff}, + { 0x16840c, 1, 0x1f, 0x924}, + { 0x168410, 2, 0x1f, 0xfff}, + { 0x168418, 2, 0x3, 0x924}, + { 0x168420, 6, 0x1f, 0x924}, + { 0x168448, 2, 0x1f, 0x1fff}, + { 0x168454, 1, 0x1f, 0x1fff}, + { 0x168800, 19, 0x1f, 0x924}, + { 0x168900, 1, 0x1f, 0x924}, + { 0x168a00, 128, 0x1f, 0xfff}, + { 0x16a000, 1536, 0x1f, 0x924}, + { 0x16c000, 1536, 0x1f, 0x924}, + { 0x16e000, 16, 0x2, 0x924}, + { 0x16e040, 8, 0x1c, 0x924}, + { 0x16e100, 1, 0x2, 0x924}, + { 0x16e200, 2, 0x2, 0xfff}, + { 0x16e400, 1, 0x2, 0x924}, + { 0x16e404, 2, 0x2, 0xfff}, + { 0x16e40c, 94, 0x2, 0x924}, + { 0x16e584, 64, 0x2, 0xfff}, + { 0x16e684, 2, 0x1e, 0xfff}, + { 0x16e68c, 4, 0x2, 0xfff}, + { 0x16e69c, 8, 0x2, 0x924}, + { 0x16e6bc, 4, 0x1e, 0x924}, + { 0x16e6cc, 4, 0x2, 0x924}, + { 0x16e6e0, 2, 0x1c, 0x924}, + { 0x16e6e8, 5, 0xc, 0x924}, + { 0x16e6fc, 4, 0x1c, 0xfff}, + { 0x16e70c, 1, 0x1c, 0x924}, + { 0x16e768, 17, 0x1c, 0x924}, + { 0x16e7ac, 12, 0x10, 0xfff}, + { 0x170000, 24, 0x1f, 0x924}, + { 0x170060, 4, 0x3, 0x924}, + { 0x170070, 13, 0x1f, 0x924}, + { 0x1700a4, 1, 0x1f, 0xfff}, + { 0x1700a8, 1, 0x1f, 0x924}, + { 0x1700ac, 2, 0x1f, 0xfff}, + { 0x1700b4, 3, 0x1f, 0x924}, + { 0x1700c0, 1, 0x1f, 0xfff}, + { 0x1700c4, 44, 0x1f, 0x924}, + { 0x170184, 2, 0x1f, 0x1fff}, + { 0x170190, 1, 0x1f, 0x1fff}, + { 0x170194, 11, 0x1c, 0x924}, + { 0x1701c4, 1, 0x1c, 0x924}, + { 0x1701cc, 7, 0x1c, 0x924}, + { 0x1701e8, 1, 0x18, 0x924}, + { 0x1701ec, 1, 0x1c, 0x924}, + { 0x1701f4, 1, 0x1c, 0x924}, + { 0x170200, 4, 0x1f, 0x924}, + { 0x170214, 1, 0x1f, 0x924}, + { 0x170218, 77, 0x1c, 0x924}, + { 0x170400, 64, 0x1c, 0x924}, + { 0x178000, 1, 0x1f, 0x924}, + { 0x180000, 61, 0x1f, 0x924}, + { 0x180114, 2, 0x1f, 0x1fff}, + { 0x180120, 3, 0x1f, 0x1fff}, + { 0x180130, 1, 0x1f, 0x1fff}, + { 0x18013c, 2, 0x1e, 0x924}, + { 0x180200, 27, 0x1f, 0x924}, + { 0x18026c, 1, 0x1f, 0xfff}, + { 0x180270, 12, 0x1f, 0x924}, + { 0x1802a0, 1, 0x1f, 0xfff}, + { 0x1802a4, 17, 0x1f, 0x924}, + { 0x180340, 4, 0x1f, 0x924}, + { 0x180380, 1, 0x1c, 0x924}, + { 0x180388, 1, 0x1c, 0x924}, + { 0x180390, 1, 0x1c, 0x924}, + { 0x180398, 1, 0x1c, 0x924}, + { 0x1803a0, 5, 0x1c, 0x924}, + { 0x1803b4, 2, 0x18, 0x924}, + { 0x181000, 4, 0x1f, 0x93c}, + { 0x181010, 1020, 0x1f, 0x38}, + { 0x182000, 4, 0x18, 0x924}, + { 0x1a0000, 1, 0x1f, 0x92c}, + { 0x1a0004, 5631, 0x1f, 0x8}, + { 0x1a5800, 2560, 0x1e, 0x8}, + { 0x1a8000, 1, 0x1f, 0x92c}, + { 0x1a8004, 8191, 0x1e, 0x8}, + { 0x1b0000, 1, 0x1f, 0x92c}, + { 0x1b0004, 15, 0x2, 0x8}, + { 0x1b0040, 1, 0x1e, 0x92c}, + { 0x1b0044, 239, 0x2, 0x8}, + { 0x1b0400, 1, 0x1f, 0x92c}, + { 0x1b0404, 255, 0x2, 0x8}, + { 0x1b0800, 1, 0x1f, 0x924}, + { 0x1b0840, 1, 0x1e, 0x924}, + { 0x1b0c00, 1, 0x1f, 0x1fff}, + { 0x1b1000, 1, 0x1f, 0x1fff}, + { 0x1b1040, 1, 0x1e, 0x1fff}, + { 0x1b1400, 1, 0x1f, 0x924}, + { 0x1b1440, 1, 0x1e, 0x924}, + { 0x1b1480, 1, 0x1e, 0x924}, + { 0x1b14c0, 1, 0x1e, 0x924}, + { 0x1b1800, 128, 0x1f, 0x10}, + { 0x1b1c00, 128, 0x1f, 0x10}, + { 0x1b2000, 1, 0x1f, 0xdb6}, + { 0x1b2400, 1, 0x1e, 0x92c}, + { 0x1b2404, 5631, 0x1c, 0x8}, + { 0x1b8000, 1, 0x1f, 0xfff}, + { 0x1b8040, 1, 0x1f, 0xfff}, + { 0x1b8080, 1, 0x1f, 0xfff}, + { 0x1b80c0, 1, 0x1f, 0xfff}, + { 0x1b8100, 1, 0x1f, 0x924}, + { 0x1b8140, 1, 0x1f, 0x924}, + { 0x1b8180, 1, 0x1f, 0x924}, + { 0x1b81c0, 1, 0x1f, 0x924}, + { 0x1b8200, 1, 0x1f, 0x924}, + { 0x1b8240, 1, 0x1f, 0x924}, + { 0x1b8280, 1, 0x1f, 0x924}, + { 0x1b82c0, 1, 0x1f, 0x924}, + { 0x1b8300, 1, 0x1f, 0x924}, + { 0x1b8340, 1, 0x1f, 0x924}, + { 0x1b8380, 1, 0x1f, 0x924}, + { 0x1b83c0, 1, 0x1f, 0x924}, + { 0x1b8400, 1, 0x1f, 0x924}, + { 0x1b8440, 1, 0x1f, 0x924}, + { 0x1b8480, 1, 0x1f, 0x924}, + { 0x1b84c0, 1, 0x1f, 0x924}, + { 0x1b8500, 1, 0x1f, 0x924}, + { 0x1b8540, 1, 0x1f, 0x924}, + { 0x1b8580, 1, 0x1f, 0x924}, + { 0x1b85c0, 19, 0x1c, 0x924}, + { 0x1b8800, 1, 0x1f, 0x924}, + { 0x1b8840, 1, 0x1f, 0x924}, + { 0x1b8880, 1, 0x1f, 0x924}, + { 0x1b88c0, 1, 0x1f, 0x924}, + { 0x1b8900, 1, 0x1f, 0x924}, + { 0x1b8940, 1, 0x1f, 0x924}, + { 0x1b8980, 1, 0x1f, 0x924}, + { 0x1b89c0, 1, 0x1f, 0x924}, + { 0x1b8a00, 1, 0x1f, 0x934}, + { 0x1b8a40, 1, 0x1f, 0x924}, + { 0x1b8a80, 1, 0x1f, 0x492}, + { 0x1b8ac0, 1, 0x1f, 0x924}, + { 0x1b8b00, 1, 0x1f, 0x924}, + { 0x1b8b40, 1, 0x1f, 0x924}, + { 0x1b8b80, 1, 0x1f, 0x924}, + { 0x1b8bc0, 1, 0x1f, 0x924}, + { 0x1b8c00, 1, 0x1f, 0x924}, + { 0x1b8c40, 1, 0x1f, 0x924}, + { 0x1b8c80, 1, 0x1f, 0x924}, + { 0x1b8cc0, 1, 0x1f, 0x924}, + { 0x1b8cc4, 1, 0x1c, 0x924}, + { 0x1b8d00, 1, 0x1f, 0x924}, + { 0x1b8d40, 1, 0x1f, 0x924}, + { 0x1b8d80, 1, 0x1f, 0x924}, + { 0x1b8dc0, 1, 0x1f, 0x924}, + { 0x1b8e00, 1, 0x1f, 0x924}, + { 0x1b8e40, 1, 0x1f, 0x924}, + { 0x1b8e80, 1, 0x1f, 0x924}, + { 0x1b8e84, 1, 0x1c, 0x924}, + { 0x1b8ec0, 1, 0x1e, 0x924}, + { 0x1b8f00, 1, 0x1e, 0x924}, + { 0x1b8f40, 1, 0x1e, 0x924}, + { 0x1b8f80, 1, 0x1e, 0x924}, + { 0x1b8fc0, 1, 0x1e, 0x924}, + { 0x1b8fd4, 5, 0x1c, 0x924}, + { 0x1b8fe8, 2, 0x18, 0x924}, + { 0x1b9000, 1, 0x1c, 0x924}, + { 0x1b9040, 3, 0x1c, 0x924}, + { 0x1b905c, 1, 0x18, 0x924}, + { 0x1b9064, 1, 0x10, 0x924}, + { 0x1b9080, 10, 0x10, 0x924}, + { 0x1c0000, 2, 0x1f, 0x924}, + { 0x200000, 65, 0x1f, 0x924}, + { 0x200124, 2, 0x1f, 0x1fff}, + { 0x200130, 3, 0x1f, 0x1fff}, + { 0x200140, 1, 0x1f, 0x1fff}, + { 0x20014c, 2, 0x1e, 0x924}, + { 0x200200, 27, 0x1f, 0x924}, + { 0x20026c, 1, 0x1f, 0xfff}, + { 0x200270, 12, 0x1f, 0x924}, + { 0x2002a0, 1, 0x1f, 0xfff}, + { 0x2002a4, 17, 0x1f, 0x924}, + { 0x200340, 4, 0x1f, 0x924}, + { 0x200380, 1, 0x1c, 0x924}, + { 0x200388, 1, 0x1c, 0x924}, + { 0x200390, 1, 0x1c, 0x924}, + { 0x200398, 1, 0x1c, 0x924}, + { 0x2003a0, 1, 0x1c, 0x924}, + { 0x2003a8, 2, 0x1c, 0x924}, + { 0x202000, 4, 0x1f, 0x1927}, + { 0x202010, 2044, 0x1f, 0x1007}, + { 0x204000, 4, 0x18, 0x924}, + { 0x220000, 1, 0x1f, 0x925}, + { 0x220004, 5631, 0x1f, 0x1}, + { 0x225800, 2560, 0x1e, 0x1}, + { 0x228000, 1, 0x1f, 0x925}, + { 0x228004, 8191, 0x1e, 0x1}, + { 0x230000, 1, 0x1f, 0x925}, + { 0x230004, 15, 0x2, 0x1}, + { 0x230040, 1, 0x1e, 0x925}, + { 0x230044, 239, 0x2, 0x1}, + { 0x230400, 1, 0x1f, 0x925}, + { 0x230404, 255, 0x2, 0x1}, + { 0x230800, 1, 0x1f, 0x924}, + { 0x230840, 1, 0x1e, 0x924}, + { 0x230c00, 1, 0x1f, 0x924}, + { 0x231000, 1, 0x1f, 0x924}, + { 0x231040, 1, 0x1e, 0x924}, + { 0x231400, 1, 0x1f, 0x924}, + { 0x231440, 1, 0x1e, 0x924}, + { 0x231480, 1, 0x1e, 0x924}, + { 0x2314c0, 1, 0x1e, 0x924}, + { 0x231800, 128, 0x1f, 0x2}, + { 0x231c00, 128, 0x1f, 0x2}, + { 0x232000, 1, 0x1f, 0xdb6}, + { 0x232400, 1, 0x1e, 0x925}, + { 0x232404, 5631, 0x1c, 0x1}, + { 0x238000, 1, 0x1f, 0xfff}, + { 0x238040, 1, 0x1f, 0xfff}, + { 0x238080, 1, 0x1f, 0xfff}, + { 0x2380c0, 1, 0x1f, 0xfff}, + { 0x238100, 1, 0x1f, 0x924}, + { 0x238140, 1, 0x1f, 0x924}, + { 0x238180, 1, 0x1f, 0x924}, + { 0x2381c0, 1, 0x1f, 0x924}, + { 0x238200, 1, 0x1f, 0x924}, + { 0x238240, 1, 0x1f, 0x924}, + { 0x238280, 1, 0x1f, 0x924}, + { 0x2382c0, 1, 0x1f, 0x924}, + { 0x238300, 1, 0x1f, 0x924}, + { 0x238340, 1, 0x1f, 0x924}, + { 0x238380, 1, 0x1f, 0x924}, + { 0x2383c0, 1, 0x1f, 0x924}, + { 0x238400, 1, 0x1f, 0x924}, + { 0x238440, 1, 0x1f, 0x924}, + { 0x238480, 1, 0x1f, 0x924}, + { 0x2384c0, 1, 0x1f, 0x924}, + { 0x238500, 1, 0x1f, 0x924}, + { 0x238540, 1, 0x1f, 0x924}, + { 0x238580, 1, 0x1f, 0x924}, + { 0x2385c0, 19, 0x1c, 0x924}, + { 0x238800, 1, 0x1f, 0x924}, + { 0x238840, 1, 0x1f, 0x924}, + { 0x238880, 1, 0x1f, 0x924}, + { 0x2388c0, 1, 0x1f, 0x924}, + { 0x238900, 1, 0x1f, 0x924}, + { 0x238940, 1, 0x1f, 0x924}, + { 0x238980, 1, 0x1f, 0x924}, + { 0x2389c0, 1, 0x1f, 0x924}, + { 0x238a00, 1, 0x1f, 0x926}, + { 0x238a40, 1, 0x1f, 0x924}, + { 0x238a80, 1, 0x1f, 0x492}, + { 0x238ac0, 1, 0x1f, 0x924}, + { 0x238b00, 1, 0x1f, 0x924}, + { 0x238b40, 1, 0x1f, 0x924}, + { 0x238b80, 1, 0x1f, 0x924}, + { 0x238bc0, 1, 0x1f, 0x924}, + { 0x238c00, 1, 0x1f, 0x924}, + { 0x238c40, 1, 0x1f, 0x924}, + { 0x238c80, 1, 0x1f, 0x924}, + { 0x238cc0, 1, 0x1f, 0x924}, + { 0x238cc4, 1, 0x1c, 0x924}, + { 0x238d00, 1, 0x1f, 0x924}, + { 0x238d40, 1, 0x1f, 0x924}, + { 0x238d80, 1, 0x1f, 0x924}, + { 0x238dc0, 1, 0x1f, 0x924}, + { 0x238e00, 1, 0x1f, 0x924}, + { 0x238e40, 1, 0x1f, 0x924}, + { 0x238e80, 1, 0x1f, 0x924}, + { 0x238e84, 1, 0x1c, 0x924}, + { 0x238ec0, 1, 0x1e, 0x924}, + { 0x238f00, 1, 0x1e, 0x924}, + { 0x238f40, 1, 0x1e, 0x924}, + { 0x238f80, 1, 0x1e, 0x924}, + { 0x238fc0, 1, 0x1e, 0x924}, + { 0x238fd4, 5, 0x1c, 0x924}, + { 0x238fe8, 2, 0x18, 0x924}, + { 0x239000, 1, 0x1c, 0x924}, + { 0x239040, 3, 0x1c, 0x924}, + { 0x23905c, 1, 0x18, 0x924}, + { 0x239064, 1, 0x10, 0x924}, + { 0x239080, 10, 0x10, 0x924}, + { 0x240000, 2, 0x1f, 0x924}, + { 0x280000, 65, 0x1f, 0x924}, + { 0x280124, 2, 0x1f, 0x1fff}, + { 0x280130, 3, 0x1f, 0x1fff}, + { 0x280140, 1, 0x1f, 0x1fff}, + { 0x28014c, 2, 0x1e, 0x924}, + { 0x280200, 27, 0x1f, 0x924}, + { 0x28026c, 1, 0x1f, 0xfff}, + { 0x280270, 12, 0x1f, 0x924}, + { 0x2802a0, 1, 0x1f, 0xfff}, + { 0x2802a4, 17, 0x1f, 0x924}, + { 0x280340, 4, 0x1f, 0x924}, + { 0x280380, 1, 0x1c, 0x924}, + { 0x280388, 1, 0x1c, 0x924}, + { 0x280390, 1, 0x1c, 0x924}, + { 0x280398, 1, 0x1c, 0x924}, + { 0x2803a0, 1, 0x1c, 0x924}, + { 0x2803a8, 2, 0x1c, 0x924}, + { 0x282000, 4, 0x1f, 0x9e4}, + { 0x282010, 2044, 0x1f, 0x1c0}, + { 0x284000, 4, 0x18, 0x924}, + { 0x2a0000, 1, 0x1f, 0x964}, + { 0x2a0004, 5631, 0x1f, 0x40}, + { 0x2a5800, 2560, 0x1e, 0x40}, + { 0x2a8000, 1, 0x1f, 0x964}, + { 0x2a8004, 8191, 0x1e, 0x40}, + { 0x2b0000, 1, 0x1f, 0x964}, + { 0x2b0004, 15, 0x2, 0x40}, + { 0x2b0040, 1, 0x1e, 0x964}, + { 0x2b0044, 239, 0x2, 0x40}, + { 0x2b0400, 1, 0x1f, 0x964}, + { 0x2b0404, 255, 0x2, 0x40}, + { 0x2b0800, 1, 0x1f, 0x924}, + { 0x2b0840, 1, 0x1e, 0x924}, + { 0x2b0c00, 1, 0x1f, 0x924}, + { 0x2b1000, 1, 0x1f, 0x924}, + { 0x2b1040, 1, 0x1e, 0x924}, + { 0x2b1400, 1, 0x1f, 0x924}, + { 0x2b1440, 1, 0x1e, 0x924}, + { 0x2b1480, 1, 0x1e, 0x924}, + { 0x2b14c0, 1, 0x1e, 0x924}, + { 0x2b1800, 128, 0x1f, 0x80}, + { 0x2b1c00, 128, 0x1f, 0x80}, + { 0x2b2000, 1, 0x1f, 0xdb6}, + { 0x2b2400, 1, 0x1e, 0x964}, + { 0x2b2404, 5631, 0x1c, 0x40}, + { 0x2b8000, 1, 0x1f, 0xfff}, + { 0x2b8040, 1, 0x1f, 0xfff}, + { 0x2b8080, 1, 0x1f, 0xfff}, + { 0x2b80c0, 1, 0x1f, 0x924}, + { 0x2b8100, 1, 0x1f, 0x924}, + { 0x2b8140, 1, 0x1f, 0x924}, + { 0x2b8180, 1, 0x1f, 0x924}, + { 0x2b81c0, 1, 0x1f, 0x924}, + { 0x2b8200, 1, 0x1f, 0x924}, + { 0x2b8240, 1, 0x1f, 0x924}, + { 0x2b8280, 1, 0x1f, 0x924}, + { 0x2b82c0, 1, 0x1f, 0x924}, + { 0x2b8300, 1, 0x1f, 0x924}, + { 0x2b8340, 1, 0x1f, 0x924}, + { 0x2b8380, 1, 0x1f, 0x924}, + { 0x2b83c0, 1, 0x1f, 0x924}, + { 0x2b8400, 1, 0x1f, 0x924}, + { 0x2b8440, 1, 0x1f, 0x924}, + { 0x2b8480, 1, 0x1f, 0x924}, + { 0x2b84c0, 1, 0x1f, 0x924}, + { 0x2b8500, 1, 0x1f, 0x924}, + { 0x2b8540, 1, 0x1f, 0x924}, + { 0x2b8580, 1, 0x1f, 0x924}, + { 0x2b85c0, 19, 0x1c, 0x924}, + { 0x2b8800, 1, 0x1f, 0x924}, + { 0x2b8840, 1, 0x1f, 0x924}, + { 0x2b8880, 1, 0x1f, 0x924}, + { 0x2b88c0, 1, 0x1f, 0x924}, + { 0x2b8900, 1, 0x1f, 0x924}, + { 0x2b8940, 1, 0x1f, 0x924}, + { 0x2b8980, 1, 0x1f, 0x924}, + { 0x2b89c0, 1, 0x1f, 0x924}, + { 0x2b8a00, 1, 0x1f, 0x9a4}, + { 0x2b8a40, 1, 0x1f, 0x924}, + { 0x2b8a80, 1, 0x1f, 0x492}, + { 0x2b8ac0, 1, 0x1f, 0x924}, + { 0x2b8b00, 1, 0x1f, 0x924}, + { 0x2b8b40, 1, 0x1f, 0x924}, + { 0x2b8b80, 1, 0x1f, 0x924}, + { 0x2b8bc0, 1, 0x1f, 0x924}, + { 0x2b8c00, 1, 0x1f, 0x924}, + { 0x2b8c40, 1, 0x1f, 0x924}, + { 0x2b8c80, 1, 0x1f, 0x924}, + { 0x2b8cc0, 1, 0x1f, 0x924}, + { 0x2b8cc4, 1, 0x1c, 0x924}, + { 0x2b8d00, 1, 0x1f, 0x924}, + { 0x2b8d40, 1, 0x1f, 0x924}, + { 0x2b8d80, 1, 0x1f, 0x924}, + { 0x2b8dc0, 1, 0x1f, 0x924}, + { 0x2b8e00, 1, 0x1f, 0x924}, + { 0x2b8e40, 1, 0x1f, 0x924}, + { 0x2b8e80, 1, 0x1f, 0x924}, + { 0x2b8e84, 1, 0x1c, 0x924}, + { 0x2b8ec0, 1, 0x1e, 0x924}, + { 0x2b8f00, 1, 0x1e, 0x924}, + { 0x2b8f40, 1, 0x1e, 0x924}, + { 0x2b8f80, 1, 0x1e, 0x924}, + { 0x2b8fc0, 1, 0x1e, 0x924}, + { 0x2b8fd4, 5, 0x1c, 0x924}, + { 0x2b8fe8, 2, 0x18, 0x924}, + { 0x2b9000, 1, 0x1c, 0x924}, + { 0x2b9040, 3, 0x1c, 0x924}, + { 0x2b905c, 1, 0x18, 0x924}, + { 0x2b9064, 1, 0x10, 0x924}, + { 0x2b9080, 10, 0x10, 0x924}, + { 0x2c0000, 2, 0x1f, 0x1fff}, + { 0x300000, 65, 0x1f, 0x924}, + { 0x300124, 2, 0x1f, 0x1fff}, + { 0x300130, 3, 0x1f, 0x1fff}, + { 0x300140, 1, 0x1f, 0x1fff}, + { 0x30014c, 2, 0x1e, 0x924}, + { 0x300200, 27, 0x1f, 0x924}, + { 0x30026c, 1, 0x1f, 0xfff}, + { 0x300270, 12, 0x1f, 0x924}, + { 0x3002a0, 1, 0x1f, 0xfff}, + { 0x3002a4, 17, 0x1f, 0x924}, + { 0x300340, 4, 0x1f, 0x924}, + { 0x300380, 1, 0x1c, 0x924}, + { 0x300388, 1, 0x1c, 0x924}, + { 0x300390, 1, 0x1c, 0x924}, + { 0x300398, 1, 0x1c, 0x924}, + { 0x3003a0, 1, 0x1c, 0x924}, + { 0x3003a8, 2, 0x1c, 0x924}, + { 0x302000, 4, 0x1f, 0xf24}, + { 0x302010, 2044, 0x1f, 0xe00}, + { 0x304000, 4, 0x18, 0x924}, + { 0x320000, 1, 0x1f, 0xb24}, + { 0x320004, 5631, 0x1f, 0x200}, + { 0x325800, 2560, 0x1e, 0x200}, + { 0x328000, 1, 0x1f, 0xb24}, + { 0x328004, 8191, 0x1e, 0x200}, + { 0x330000, 1, 0x1f, 0xb24}, + { 0x330004, 15, 0x2, 0x200}, + { 0x330040, 1, 0x1e, 0xb24}, + { 0x330044, 239, 0x2, 0x200}, + { 0x330400, 1, 0x1f, 0xb24}, + { 0x330404, 255, 0x2, 0x200}, + { 0x330800, 1, 0x1f, 0x924}, + { 0x330840, 1, 0x1e, 0x924}, + { 0x330c00, 1, 0x1f, 0x924}, + { 0x331000, 1, 0x1f, 0x924}, + { 0x331040, 1, 0x1e, 0x924}, + { 0x331400, 1, 0x1f, 0x924}, + { 0x331440, 1, 0x1e, 0x924}, + { 0x331480, 1, 0x1e, 0x924}, + { 0x3314c0, 1, 0x1e, 0x924}, + { 0x331800, 128, 0x1f, 0x400}, + { 0x331c00, 128, 0x1f, 0x400}, + { 0x332000, 1, 0x1f, 0xdb6}, + { 0x332400, 1, 0x1e, 0xb24}, + { 0x332404, 5631, 0x1c, 0x200}, + { 0x338000, 1, 0x1f, 0xfff}, + { 0x338040, 1, 0x1f, 0xfff}, + { 0x338080, 1, 0x1f, 0xfff}, + { 0x3380c0, 1, 0x1f, 0xfff}, + { 0x338100, 1, 0x1f, 0x924}, + { 0x338140, 1, 0x1f, 0x924}, + { 0x338180, 1, 0x1f, 0x924}, + { 0x3381c0, 1, 0x1f, 0x924}, + { 0x338200, 1, 0x1f, 0x924}, + { 0x338240, 1, 0x1f, 0x924}, + { 0x338280, 1, 0x1f, 0x924}, + { 0x3382c0, 1, 0x1f, 0x924}, + { 0x338300, 1, 0x1f, 0x924}, + { 0x338340, 1, 0x1f, 0x924}, + { 0x338380, 1, 0x1f, 0x924}, + { 0x3383c0, 1, 0x1f, 0x924}, + { 0x338400, 1, 0x1f, 0x924}, + { 0x338440, 1, 0x1f, 0x924}, + { 0x338480, 1, 0x1f, 0x924}, + { 0x3384c0, 1, 0x1f, 0x924}, + { 0x338500, 1, 0x1f, 0x924}, + { 0x338540, 1, 0x1f, 0x924}, + { 0x338580, 1, 0x1f, 0x924}, + { 0x3385c0, 19, 0x1c, 0x924}, + { 0x338800, 1, 0x1f, 0x924}, + { 0x338840, 1, 0x1f, 0x924}, + { 0x338880, 1, 0x1f, 0x924}, + { 0x3388c0, 1, 0x1f, 0x924}, + { 0x338900, 1, 0x1f, 0x924}, + { 0x338940, 1, 0x1f, 0x924}, + { 0x338980, 1, 0x1f, 0x924}, + { 0x3389c0, 1, 0x1f, 0x924}, + { 0x338a00, 1, 0x1f, 0xd24}, + { 0x338a40, 1, 0x1f, 0x924}, + { 0x338a80, 1, 0x1f, 0x492}, + { 0x338ac0, 1, 0x1f, 0x924}, + { 0x338b00, 1, 0x1f, 0x924}, + { 0x338b40, 1, 0x1f, 0x924}, + { 0x338b80, 1, 0x1f, 0x924}, + { 0x338bc0, 1, 0x1f, 0x924}, + { 0x338c00, 1, 0x1f, 0x924}, + { 0x338c40, 1, 0x1f, 0x924}, + { 0x338c80, 1, 0x1f, 0x924}, + { 0x338cc0, 1, 0x1f, 0x924}, + { 0x338cc4, 1, 0x1c, 0x924}, + { 0x338d00, 1, 0x1f, 0x924}, + { 0x338d40, 1, 0x1f, 0x924}, + { 0x338d80, 1, 0x1f, 0x924}, + { 0x338dc0, 1, 0x1f, 0x924}, + { 0x338e00, 1, 0x1f, 0x924}, + { 0x338e40, 1, 0x1f, 0x924}, + { 0x338e80, 1, 0x1f, 0x924}, + { 0x338e84, 1, 0x1c, 0x924}, + { 0x338ec0, 1, 0x1e, 0x924}, + { 0x338f00, 1, 0x1e, 0x924}, + { 0x338f40, 1, 0x1e, 0x924}, + { 0x338f80, 1, 0x1e, 0x924}, + { 0x338fc0, 1, 0x1e, 0x924}, + { 0x338fd4, 5, 0x1c, 0x924}, + { 0x338fe8, 2, 0x18, 0x924}, + { 0x339000, 1, 0x1c, 0x924}, + { 0x339040, 3, 0x1c, 0x924}, + { 0x33905c, 1, 0x18, 0x924}, + { 0x339064, 1, 0x10, 0x924}, + { 0x339080, 10, 0x10, 0x924}, + { 0x340000, 2, 0x1f, 0x924}, + { 0x3a0000, 40960, 0x1c, 0x1000} +}; + +#define REGS_COUNT ARRAY_SIZE(reg_addrs) + +static const struct reg_addr idle_reg_addrs[] = { + { 0x2104, 1, 0x1f, 0xfff}, + { 0x2110, 2, 0x1f, 0xfff}, + { 0x211c, 8, 0x1f, 0xfff}, + { 0x2814, 1, 0x1f, 0xfff}, + { 0x281c, 2, 0x1f, 0xfff}, + { 0x2854, 1, 0x1f, 0xfff}, + { 0x285c, 1, 0x1f, 0xfff}, + { 0x3040, 1, 0x1f, 0xfff}, + { 0x9010, 7, 0x1c, 0xfff}, + { 0x9030, 1, 0x1c, 0xfff}, + { 0x9068, 16, 0x1c, 0xfff}, + { 0x9230, 2, 0x1c, 0xfff}, + { 0x9244, 1, 0x1c, 0xfff}, + { 0x9298, 1, 0x1c, 0xfff}, + { 0x92a8, 1, 0x1c, 0x1fff}, + { 0xa38c, 1, 0x1f, 0x1fff}, + { 0xa3c4, 1, 0x1e, 0xfff}, + { 0xa404, 1, 0x1f, 0xfff}, + { 0xa408, 2, 0x1f, 0x1fff}, + { 0xa42c, 12, 0x1f, 0xfff}, + { 0xa580, 1, 0x1f, 0x1fff}, + { 0xa590, 1, 0x1f, 0x1fff}, + { 0xa600, 5, 0x1e, 0xfff}, + { 0xa618, 1, 0x1e, 0xfff}, + { 0xa714, 1, 0x1c, 0xfff}, + { 0xa720, 1, 0x1c, 0xfff}, + { 0xa750, 1, 0x1c, 0xfff}, + { 0xc09c, 1, 0x3, 0xfff}, + { 0x103b0, 1, 0x1f, 0xfff}, + { 0x103c0, 1, 0x1f, 0xfff}, + { 0x103d0, 1, 0x3, 0x1fff}, + { 0x10418, 1, 0x1f, 0xfff}, + { 0x10420, 1, 0x1f, 0xfff}, + { 0x10428, 1, 0x1f, 0xfff}, + { 0x10460, 1, 0x1f, 0xfff}, + { 0x10474, 1, 0x1f, 0xfff}, + { 0x104e0, 1, 0x1f, 0xfff}, + { 0x104ec, 1, 0x1f, 0xfff}, + { 0x104f8, 1, 0x1f, 0xfff}, + { 0x10508, 1, 0x1f, 0xfff}, + { 0x10530, 1, 0x1f, 0xfff}, + { 0x10538, 1, 0x1f, 0xfff}, + { 0x10548, 1, 0x1f, 0xfff}, + { 0x10558, 1, 0x1f, 0xfff}, + { 0x182a8, 1, 0x1c, 0xfff}, + { 0x182b8, 1, 0x1c, 0xfff}, + { 0x18308, 1, 0x1c, 0xfff}, + { 0x18318, 1, 0x1c, 0xfff}, + { 0x18338, 1, 0x1c, 0xfff}, + { 0x18348, 1, 0x1c, 0xfff}, + { 0x183bc, 1, 0x1c, 0x1fff}, + { 0x183cc, 1, 0x1c, 0x1fff}, + { 0x18570, 1, 0x18, 0xfff}, + { 0x18578, 1, 0x18, 0xfff}, + { 0x1858c, 1, 0x18, 0xfff}, + { 0x18594, 1, 0x18, 0xfff}, + { 0x1862c, 4, 0x10, 0xfff}, + { 0x2021c, 11, 0x1f, 0xfff}, + { 0x202a8, 1, 0x1f, 0xfff}, + { 0x202b8, 1, 0x1f, 0x1fff}, + { 0x20404, 1, 0x1f, 0xfff}, + { 0x2040c, 2, 0x1f, 0xfff}, + { 0x2041c, 2, 0x1f, 0xfff}, + { 0x40154, 14, 0x1f, 0xfff}, + { 0x40198, 1, 0x1f, 0x1fff}, + { 0x404ac, 1, 0x1f, 0xfff}, + { 0x404bc, 1, 0x1f, 0x1fff}, + { 0x42290, 1, 0x1f, 0xfff}, + { 0x422a0, 1, 0x1f, 0xfff}, + { 0x422b0, 1, 0x1f, 0x1fff}, + { 0x42548, 1, 0x1f, 0xfff}, + { 0x42550, 1, 0x1f, 0xfff}, + { 0x42558, 1, 0x1f, 0xfff}, + { 0x50160, 8, 0x1f, 0xfff}, + { 0x501d0, 1, 0x1f, 0xfff}, + { 0x501e0, 1, 0x1f, 0x1fff}, + { 0x50204, 1, 0x1f, 0xfff}, + { 0x5020c, 2, 0x1f, 0xfff}, + { 0x5021c, 1, 0x1f, 0xfff}, + { 0x60090, 1, 0x1f, 0xfff}, + { 0x6011c, 1, 0x1f, 0xfff}, + { 0x6012c, 1, 0x1f, 0x1fff}, + { 0xc101c, 1, 0x1f, 0xfff}, + { 0xc102c, 1, 0x1f, 0x1fff}, + { 0xc2290, 1, 0x1f, 0xfff}, + { 0xc22a0, 1, 0x1f, 0xfff}, + { 0xc22b0, 1, 0x1f, 0x1fff}, + { 0xc2548, 1, 0x1f, 0xfff}, + { 0xc2550, 1, 0x1f, 0xfff}, + { 0xc2558, 1, 0x1f, 0xfff}, + { 0xc4294, 1, 0x1f, 0xfff}, + { 0xc42a4, 1, 0x1f, 0xfff}, + { 0xc42b4, 1, 0x1f, 0x1fff}, + { 0xc4550, 1, 0x1f, 0xfff}, + { 0xc4558, 1, 0x1f, 0xfff}, + { 0xc4560, 1, 0x1f, 0xfff}, + { 0xd016c, 8, 0x1f, 0xfff}, + { 0xd01d8, 1, 0x1f, 0xfff}, + { 0xd01e8, 1, 0x1f, 0x1fff}, + { 0xd0204, 1, 0x1f, 0xfff}, + { 0xd020c, 3, 0x1f, 0xfff}, + { 0xe0154, 8, 0x1f, 0xfff}, + { 0xe01c8, 1, 0x1f, 0xfff}, + { 0xe01d8, 1, 0x1f, 0x1fff}, + { 0xe0204, 1, 0x1f, 0xfff}, + { 0xe020c, 2, 0x1f, 0xfff}, + { 0xe021c, 2, 0x1f, 0xfff}, + { 0x101014, 1, 0x1f, 0xfff}, + { 0x101030, 1, 0x1f, 0xfff}, + { 0x101040, 1, 0x1f, 0x1fff}, + { 0x102058, 1, 0x1f, 0x1fff}, + { 0x102080, 16, 0x1f, 0xfff}, + { 0x103004, 2, 0x1f, 0xfff}, + { 0x103068, 1, 0x1f, 0xfff}, + { 0x103078, 1, 0x1f, 0xfff}, + { 0x103088, 1, 0x1f, 0x1fff}, + { 0x10309c, 2, 0x1e, 0xfff}, + { 0x1030b8, 2, 0x1c, 0xfff}, + { 0x1030cc, 1, 0x1c, 0xfff}, + { 0x1030e0, 1, 0x1c, 0xfff}, + { 0x104004, 1, 0x1f, 0xfff}, + { 0x104018, 1, 0x1f, 0xfff}, + { 0x104020, 1, 0x1f, 0xfff}, + { 0x10403c, 1, 0x1f, 0xfff}, + { 0x1040fc, 1, 0x1f, 0xfff}, + { 0x10410c, 1, 0x1f, 0x1fff}, + { 0x104400, 1, 0x1f, 0x1fff}, + { 0x104404, 63, 0x1f, 0xfff}, + { 0x104800, 1, 0x1f, 0x1fff}, + { 0x104804, 63, 0x1f, 0xfff}, + { 0x105000, 4, 0x1f, 0x1fff}, + { 0x105010, 252, 0x1f, 0xfff}, + { 0x108094, 1, 0x3, 0xfff}, + { 0x1201b0, 2, 0x1f, 0xfff}, + { 0x12032c, 1, 0x1f, 0xfff}, + { 0x12036c, 3, 0x1f, 0xfff}, + { 0x120408, 2, 0x1f, 0xfff}, + { 0x120414, 15, 0x1f, 0xfff}, + { 0x120478, 2, 0x1f, 0xfff}, + { 0x12052c, 1, 0x1f, 0xfff}, + { 0x120564, 3, 0x1f, 0xfff}, + { 0x12057c, 1, 0x1f, 0x1fff}, + { 0x12058c, 1, 0x1f, 0x1fff}, + { 0x120608, 1, 0x1e, 0xfff}, + { 0x120748, 1, 0x1c, 0xfff}, + { 0x120778, 2, 0x1c, 0xfff}, + { 0x120808, 3, 0x1f, 0xfff}, + { 0x120818, 1, 0x1f, 0xfff}, + { 0x120820, 1, 0x1f, 0xfff}, + { 0x120828, 1, 0x1f, 0xfff}, + { 0x120830, 1, 0x1f, 0xfff}, + { 0x120838, 1, 0x1f, 0xfff}, + { 0x120840, 1, 0x1f, 0xfff}, + { 0x120848, 1, 0x1f, 0xfff}, + { 0x120850, 1, 0x1f, 0xfff}, + { 0x120858, 1, 0x1f, 0xfff}, + { 0x120860, 1, 0x1f, 0xfff}, + { 0x120868, 1, 0x1f, 0xfff}, + { 0x120870, 1, 0x1f, 0xfff}, + { 0x120878, 1, 0x1f, 0xfff}, + { 0x120880, 1, 0x1f, 0xfff}, + { 0x120888, 1, 0x1f, 0xfff}, + { 0x120890, 1, 0x1f, 0xfff}, + { 0x120898, 1, 0x1f, 0xfff}, + { 0x1208a0, 1, 0x1f, 0xfff}, + { 0x1208a8, 1, 0x1f, 0xfff}, + { 0x1208b0, 1, 0x1f, 0xfff}, + { 0x1208b8, 1, 0x1f, 0xfff}, + { 0x1208c0, 1, 0x1f, 0xfff}, + { 0x1208c8, 1, 0x1f, 0xfff}, + { 0x1208d0, 1, 0x1f, 0xfff}, + { 0x1208d8, 1, 0x1f, 0xfff}, + { 0x1208e0, 1, 0x1f, 0xfff}, + { 0x1208e8, 1, 0x1f, 0xfff}, + { 0x1208f0, 1, 0x1f, 0xfff}, + { 0x1208f8, 1, 0x1f, 0xfff}, + { 0x120900, 1, 0x1f, 0xfff}, + { 0x120908, 1, 0x1f, 0xfff}, + { 0x130030, 1, 0x1c, 0xfff}, + { 0x13004c, 3, 0x1c, 0xfff}, + { 0x130064, 2, 0x1c, 0xfff}, + { 0x13009c, 1, 0x1c, 0x1fff}, + { 0x130130, 1, 0x1c, 0xfff}, + { 0x13016c, 1, 0x1c, 0xfff}, + { 0x130300, 1, 0x1c, 0xfff}, + { 0x130480, 1, 0x1c, 0xfff}, + { 0x14005c, 2, 0xf, 0xfff}, + { 0x1400d0, 2, 0xf, 0xfff}, + { 0x1400e0, 1, 0xf, 0xfff}, + { 0x1401c8, 1, 0xf, 0xfff}, + { 0x140200, 6, 0xf, 0xfff}, + { 0x140338, 7, 0x10, 0xfff}, + { 0x140370, 7, 0x10, 0xfff}, + { 0x15c1bc, 6, 0x10, 0xfff}, + { 0x15c230, 7, 0x10, 0xfff}, + { 0x16101c, 1, 0x1f, 0xfff}, + { 0x16102c, 1, 0x1f, 0x1fff}, + { 0x164014, 2, 0x1f, 0xfff}, + { 0x1640f0, 1, 0x1f, 0xfff}, + { 0x166290, 1, 0x1f, 0xfff}, + { 0x1662a0, 1, 0x1f, 0xfff}, + { 0x1662b0, 1, 0x1f, 0x1fff}, + { 0x166548, 1, 0x1f, 0xfff}, + { 0x166550, 1, 0x1f, 0xfff}, + { 0x166558, 1, 0x1f, 0xfff}, + { 0x168000, 1, 0x1f, 0xfff}, + { 0x168008, 1, 0x1f, 0xfff}, + { 0x168010, 1, 0x1f, 0xfff}, + { 0x168018, 1, 0x1f, 0xfff}, + { 0x168028, 2, 0x1f, 0xfff}, + { 0x168058, 9, 0x1f, 0xfff}, + { 0x168238, 1, 0x1f, 0xfff}, + { 0x1682d0, 7, 0x1f, 0xfff}, + { 0x168300, 2, 0x3, 0xfff}, + { 0x168308, 65, 0x1f, 0xfff}, + { 0x168410, 2, 0x1f, 0xfff}, + { 0x168438, 1, 0x1f, 0xfff}, + { 0x168448, 1, 0x1f, 0x1fff}, + { 0x168a00, 128, 0x1f, 0xfff}, + { 0x16e200, 128, 0x2, 0xfff}, + { 0x16e404, 2, 0x2, 0xfff}, + { 0x16e584, 64, 0x2, 0xfff}, + { 0x16e684, 2, 0x1e, 0xfff}, + { 0x16e68c, 4, 0x2, 0xfff}, + { 0x16e6fc, 4, 0x1c, 0xfff}, + { 0x16e7ac, 12, 0x10, 0xfff}, + { 0x1700a4, 1, 0x1f, 0xfff}, + { 0x1700ac, 2, 0x1f, 0xfff}, + { 0x1700c0, 1, 0x1f, 0xfff}, + { 0x170174, 1, 0x1f, 0xfff}, + { 0x170184, 1, 0x1f, 0x1fff}, + { 0x1800f4, 1, 0x1f, 0xfff}, + { 0x180104, 1, 0x1f, 0xfff}, + { 0x180114, 1, 0x1f, 0x1fff}, + { 0x180124, 1, 0x1f, 0x1fff}, + { 0x18026c, 1, 0x1f, 0xfff}, + { 0x1802a0, 1, 0x1f, 0xfff}, + { 0x1b8000, 1, 0x1f, 0xfff}, + { 0x1b8040, 1, 0x1f, 0xfff}, + { 0x1b8080, 1, 0x1f, 0xfff}, + { 0x1b80c0, 1, 0x1f, 0xfff}, + { 0x200104, 1, 0x1f, 0xfff}, + { 0x200114, 1, 0x1f, 0xfff}, + { 0x200124, 1, 0x1f, 0x1fff}, + { 0x200134, 1, 0x1f, 0x1fff}, + { 0x20026c, 1, 0x1f, 0xfff}, + { 0x2002a0, 1, 0x1f, 0xfff}, + { 0x238000, 1, 0x1f, 0xfff}, + { 0x238040, 1, 0x1f, 0xfff}, + { 0x238080, 1, 0x1f, 0xfff}, + { 0x2380c0, 1, 0x1f, 0xfff}, + { 0x280104, 1, 0x1f, 0xfff}, + { 0x280114, 1, 0x1f, 0xfff}, + { 0x280124, 1, 0x1f, 0x1fff}, + { 0x280134, 1, 0x1f, 0x1fff}, + { 0x28026c, 1, 0x1f, 0xfff}, + { 0x2802a0, 1, 0x1f, 0xfff}, + { 0x2b8000, 1, 0x1f, 0xfff}, + { 0x2b8040, 1, 0x1f, 0xfff}, + { 0x2b8080, 1, 0x1f, 0xfff}, + { 0x300104, 1, 0x1f, 0xfff}, + { 0x300114, 1, 0x1f, 0xfff}, + { 0x300124, 1, 0x1f, 0x1fff}, + { 0x300134, 1, 0x1f, 0x1fff}, + { 0x30026c, 1, 0x1f, 0xfff}, + { 0x3002a0, 1, 0x1f, 0xfff}, + { 0x338000, 1, 0x1f, 0xfff}, + { 0x338040, 1, 0x1f, 0xfff}, + { 0x338080, 1, 0x1f, 0xfff}, + { 0x3380c0, 1, 0x1f, 0xfff} +}; + +#define IDLE_REGS_COUNT ARRAY_SIZE(idle_reg_addrs) + +static const u32 read_reg_e1[] = { + 0x1b1000}; + +static const struct wreg_addr wreg_addr_e1 = { + 0x1b0c00, 192, 1, read_reg_e1, 0x1f, 0x1fff}; + +static const u32 read_reg_e1h[] = { + 0x1b1040, 0x1b1000}; + +static const struct wreg_addr wreg_addr_e1h = { + 0x1b0c00, 256, 2, read_reg_e1h, 0x1f, 0x1fff}; + +static const u32 read_reg_e2[] = { + 0x1b1040, 0x1b1000}; + +static const struct wreg_addr wreg_addr_e2 = { + 0x1b0c00, 128, 2, read_reg_e2, 0x1f, 0x1fff}; + +static const u32 read_reg_e3[] = { + 0x1b1040, 0x1b1000}; + +static const struct wreg_addr wreg_addr_e3 = { + 0x1b0c00, 128, 2, read_reg_e3, 0x1f, 0x1fff}; + +static const u32 read_reg_e3b0[] = { + 0x1b1040, 0x1b1000}; + +static const struct wreg_addr wreg_addr_e3b0 = { + 0x1b0c00, 128, 2, read_reg_e3b0, 0x1f, 0x1fff}; + +static const unsigned int dump_num_registers[NUM_CHIPS][NUM_PRESETS] = { + {19758, 17543, 26951, 18705, 17287, 26695, 19812, 31367, 40775, 19788, + 25223, 34631, 19074}, + {31750, 18273, 32253, 30697, 18017, 31997, 31804, 32097, 46077, 31780, + 25953, 39933, 35895}, + {36527, 17928, 33697, 35474, 18700, 34466, 36581, 31752, 47521, 36557, + 25608, 41377, 43903}, + {45239, 17936, 34387, 44186, 18708, 35156, 45293, 31760, 48211, 45269, + 25616, 42067, 43903}, + {45302, 17999, 34802, 44249, 18771, 35571, 45356, 31823, 48626, 45332, + 25679, 42482, 43903} +}; +#endif diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c new file mode 100644 index 000000000..48ed005ba --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -0,0 +1,3626 @@ +/* bnx2x_ethtool.c: Broadcom Everest network driver. + * + * Copyright (c) 2007-2013 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Ariel Elior + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include "bnx2x.h" +#include "bnx2x_cmn.h" +#include "bnx2x_dump.h" +#include "bnx2x_init.h" + +/* Note: in the format strings below %s is replaced by the queue-name which is + * either its index or 'fcoe' for the fcoe queue. Make sure the format string + * length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2 + */ +#define MAX_QUEUE_NAME_LEN 4 +static const struct { + long offset; + int size; + char string[ETH_GSTRING_LEN]; +} bnx2x_q_stats_arr[] = { +/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" }, + { Q_STATS_OFFSET32(total_unicast_packets_received_hi), + 8, "[%s]: rx_ucast_packets" }, + { Q_STATS_OFFSET32(total_multicast_packets_received_hi), + 8, "[%s]: rx_mcast_packets" }, + { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), + 8, "[%s]: rx_bcast_packets" }, + { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" }, + { Q_STATS_OFFSET32(rx_err_discard_pkt), + 4, "[%s]: rx_phy_ip_err_discards"}, + { Q_STATS_OFFSET32(rx_skb_alloc_failed), + 4, "[%s]: rx_skb_alloc_discard" }, + { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" }, + + { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" }, +/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), + 8, "[%s]: tx_ucast_packets" }, + { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), + 8, "[%s]: tx_mcast_packets" }, + { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), + 8, "[%s]: tx_bcast_packets" }, + { Q_STATS_OFFSET32(total_tpa_aggregations_hi), + 8, "[%s]: tpa_aggregations" }, + { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), + 8, "[%s]: tpa_aggregated_frames"}, + { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"}, + { Q_STATS_OFFSET32(driver_filtered_tx_pkt), + 4, "[%s]: driver_filtered_tx_pkt" } +}; + +#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr) + +static const struct { + long offset; + int size; + u32 flags; +#define STATS_FLAGS_PORT 1 +#define STATS_FLAGS_FUNC 2 +#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) + char string[ETH_GSTRING_LEN]; +} bnx2x_stats_arr[] = { +/* 1 */ { STATS_OFFSET32(total_bytes_received_hi), + 8, STATS_FLAGS_BOTH, "rx_bytes" }, + { STATS_OFFSET32(error_bytes_received_hi), + 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, + { STATS_OFFSET32(total_unicast_packets_received_hi), + 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, + { STATS_OFFSET32(total_multicast_packets_received_hi), + 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, + { STATS_OFFSET32(total_broadcast_packets_received_hi), + 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, + { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), + 8, STATS_FLAGS_PORT, "rx_crc_errors" }, + { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), + 8, STATS_FLAGS_PORT, "rx_align_errors" }, + { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), + 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, + { STATS_OFFSET32(etherstatsoverrsizepkts_hi), + 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, +/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi), + 8, STATS_FLAGS_PORT, "rx_fragments" }, + { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), + 8, STATS_FLAGS_PORT, "rx_jabbers" }, + { STATS_OFFSET32(no_buff_discard_hi), + 8, STATS_FLAGS_BOTH, "rx_discards" }, + { STATS_OFFSET32(mac_filter_discard), + 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, + { STATS_OFFSET32(mf_tag_discard), + 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, + { STATS_OFFSET32(pfc_frames_received_hi), + 8, STATS_FLAGS_PORT, "pfc_frames_received" }, + { STATS_OFFSET32(pfc_frames_sent_hi), + 8, STATS_FLAGS_PORT, "pfc_frames_sent" }, + { STATS_OFFSET32(brb_drop_hi), + 8, STATS_FLAGS_PORT, "rx_brb_discard" }, + { STATS_OFFSET32(brb_truncate_hi), + 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, + { STATS_OFFSET32(pause_frames_received_hi), + 8, STATS_FLAGS_PORT, "rx_pause_frames" }, + { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), + 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, + { STATS_OFFSET32(nig_timer_max), + 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, +/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt), + 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"}, + { STATS_OFFSET32(rx_skb_alloc_failed), + 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" }, + { STATS_OFFSET32(hw_csum_err), + 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" }, + + { STATS_OFFSET32(total_bytes_transmitted_hi), + 8, STATS_FLAGS_BOTH, "tx_bytes" }, + { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), + 8, STATS_FLAGS_PORT, "tx_error_bytes" }, + { STATS_OFFSET32(total_unicast_packets_transmitted_hi), + 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, + { STATS_OFFSET32(total_multicast_packets_transmitted_hi), + 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, + { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), + 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, + { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), + 8, STATS_FLAGS_PORT, "tx_mac_errors" }, + { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), + 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, +/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), + 8, STATS_FLAGS_PORT, "tx_single_collisions" }, + { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), + 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, + { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), + 8, STATS_FLAGS_PORT, "tx_deferred" }, + { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), + 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, + { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), + 8, STATS_FLAGS_PORT, "tx_late_collisions" }, + { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), + 8, STATS_FLAGS_PORT, "tx_total_collisions" }, + { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), + 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, + { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), + 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, + { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), + 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, + { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), + 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, +/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), + 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, + { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), + 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, + { STATS_OFFSET32(etherstatspktsover1522octets_hi), + 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, + { STATS_OFFSET32(pause_frames_sent_hi), + 8, STATS_FLAGS_PORT, "tx_pause_frames" }, + { STATS_OFFSET32(total_tpa_aggregations_hi), + 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, + { STATS_OFFSET32(total_tpa_aggregated_frames_hi), + 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, + { STATS_OFFSET32(total_tpa_bytes_hi), + 8, STATS_FLAGS_FUNC, "tpa_bytes"}, + { STATS_OFFSET32(recoverable_error), + 4, STATS_FLAGS_FUNC, "recoverable_errors" }, + { STATS_OFFSET32(unrecoverable_error), + 4, STATS_FLAGS_FUNC, "unrecoverable_errors" }, + { STATS_OFFSET32(driver_filtered_tx_pkt), + 4, STATS_FLAGS_FUNC, "driver_filtered_tx_pkt" }, + { STATS_OFFSET32(eee_tx_lpi), + 4, STATS_FLAGS_PORT, "Tx LPI entry count"} +}; + +#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr) + +static int bnx2x_get_port_type(struct bnx2x *bp) +{ + int port_type; + u32 phy_idx = bnx2x_get_cur_phy_idx(bp); + switch (bp->link_params.phy[phy_idx].media_type) { + case ETH_PHY_SFPP_10G_FIBER: + case ETH_PHY_SFP_1G_FIBER: + case ETH_PHY_XFP_FIBER: + case ETH_PHY_KR: + case ETH_PHY_CX4: + port_type = PORT_FIBRE; + break; + case ETH_PHY_DA_TWINAX: + port_type = PORT_DA; + break; + case ETH_PHY_BASE_T: + port_type = PORT_TP; + break; + case ETH_PHY_NOT_PRESENT: + port_type = PORT_NONE; + break; + case ETH_PHY_UNSPECIFIED: + default: + port_type = PORT_OTHER; + break; + } + return port_type; +} + +static int bnx2x_get_vf_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (bp->state == BNX2X_STATE_OPEN) { + if (test_bit(BNX2X_LINK_REPORT_FD, + &bp->vf_link_vars.link_report_flags)) + cmd->duplex = DUPLEX_FULL; + else + cmd->duplex = DUPLEX_HALF; + + ethtool_cmd_speed_set(cmd, bp->vf_link_vars.line_speed); + } else { + cmd->duplex = DUPLEX_UNKNOWN; + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + } + + cmd->port = PORT_OTHER; + cmd->phy_address = 0; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = AUTONEG_DISABLE; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + + DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" + " supported 0x%x advertising 0x%x speed %u\n" + " duplex %d port %d phy_address %d transceiver %d\n" + " autoneg %d maxtxpkt %d maxrxpkt %d\n", + cmd->cmd, cmd->supported, cmd->advertising, + ethtool_cmd_speed(cmd), + cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, + cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); + + return 0; +} + +static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct bnx2x *bp = netdev_priv(dev); + int cfg_idx = bnx2x_get_link_cfg_idx(bp); + + /* Dual Media boards present all available port types */ + cmd->supported = bp->port.supported[cfg_idx] | + (bp->port.supported[cfg_idx ^ 1] & + (SUPPORTED_TP | SUPPORTED_FIBRE)); + cmd->advertising = bp->port.advertising[cfg_idx]; + if (bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type == + ETH_PHY_SFP_1G_FIBER) { + cmd->supported &= ~(SUPPORTED_10000baseT_Full); + cmd->advertising &= ~(ADVERTISED_10000baseT_Full); + } + + if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up && + !(bp->flags & MF_FUNC_DIS)) { + cmd->duplex = bp->link_vars.duplex; + + if (IS_MF(bp) && !BP_NOMCP(bp)) + ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp)); + else + ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed); + } else { + cmd->duplex = DUPLEX_UNKNOWN; + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + } + + cmd->port = bnx2x_get_port_type(bp); + + cmd->phy_address = bp->mdio.prtad; + cmd->transceiver = XCVR_INTERNAL; + + if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) + cmd->autoneg = AUTONEG_ENABLE; + else + cmd->autoneg = AUTONEG_DISABLE; + + /* Publish LP advertised speeds and FC */ + if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { + u32 status = bp->link_vars.link_status; + + cmd->lp_advertising |= ADVERTISED_Autoneg; + if (status & LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE) + cmd->lp_advertising |= ADVERTISED_Pause; + if (status & LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) + cmd->lp_advertising |= ADVERTISED_Asym_Pause; + + if (status & LINK_STATUS_LINK_PARTNER_10THD_CAPABLE) + cmd->lp_advertising |= ADVERTISED_10baseT_Half; + if (status & LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE) + cmd->lp_advertising |= ADVERTISED_10baseT_Full; + if (status & LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE) + cmd->lp_advertising |= ADVERTISED_100baseT_Half; + if (status & LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE) + cmd->lp_advertising |= ADVERTISED_100baseT_Full; + if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) + cmd->lp_advertising |= ADVERTISED_1000baseT_Half; + if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) + cmd->lp_advertising |= ADVERTISED_1000baseT_Full; + if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE) + cmd->lp_advertising |= ADVERTISED_2500baseX_Full; + if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) + cmd->lp_advertising |= ADVERTISED_10000baseT_Full; + if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE) + cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full; + } + + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + + DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" + " supported 0x%x advertising 0x%x speed %u\n" + " duplex %d port %d phy_address %d transceiver %d\n" + " autoneg %d maxtxpkt %d maxrxpkt %d\n", + cmd->cmd, cmd->supported, cmd->advertising, + ethtool_cmd_speed(cmd), + cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, + cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); + + return 0; +} + +static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config; + u32 speed, phy_idx; + + if (IS_MF_SD(bp)) + return 0; + + DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n" + " supported 0x%x advertising 0x%x speed %u\n" + " duplex %d port %d phy_address %d transceiver %d\n" + " autoneg %d maxtxpkt %d maxrxpkt %d\n", + cmd->cmd, cmd->supported, cmd->advertising, + ethtool_cmd_speed(cmd), + cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, + cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); + + speed = ethtool_cmd_speed(cmd); + + /* If received a request for an unknown duplex, assume full*/ + if (cmd->duplex == DUPLEX_UNKNOWN) + cmd->duplex = DUPLEX_FULL; + + if (IS_MF_SI(bp)) { + u32 part; + u32 line_speed = bp->link_vars.line_speed; + + /* use 10G if no link detected */ + if (!line_speed) + line_speed = 10000; + + if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) { + DP(BNX2X_MSG_ETHTOOL, + "To set speed BC %X or higher is required, please upgrade BC\n", + REQ_BC_VER_4_SET_MF_BW); + return -EINVAL; + } + + part = (speed * 100) / line_speed; + + if (line_speed < speed || !part) { + DP(BNX2X_MSG_ETHTOOL, + "Speed setting should be in a range from 1%% to 100%% of actual line speed\n"); + return -EINVAL; + } + + if (bp->state != BNX2X_STATE_OPEN) + /* store value for following "load" */ + bp->pending_max = part; + else + bnx2x_update_max_mf_config(bp, part); + + return 0; + } + + cfg_idx = bnx2x_get_link_cfg_idx(bp); + old_multi_phy_config = bp->link_params.multi_phy_config; + if (cmd->port != bnx2x_get_port_type(bp)) { + switch (cmd->port) { + case PORT_TP: + if (!(bp->port.supported[0] & SUPPORTED_TP || + bp->port.supported[1] & SUPPORTED_TP)) { + DP(BNX2X_MSG_ETHTOOL, + "Unsupported port type\n"); + return -EINVAL; + } + bp->link_params.multi_phy_config &= + ~PORT_HW_CFG_PHY_SELECTION_MASK; + if (bp->link_params.multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED) + bp->link_params.multi_phy_config |= + PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; + else + bp->link_params.multi_phy_config |= + PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; + break; + case PORT_FIBRE: + case PORT_DA: + case PORT_NONE: + if (!(bp->port.supported[0] & SUPPORTED_FIBRE || + bp->port.supported[1] & SUPPORTED_FIBRE)) { + DP(BNX2X_MSG_ETHTOOL, + "Unsupported port type\n"); + return -EINVAL; + } + bp->link_params.multi_phy_config &= + ~PORT_HW_CFG_PHY_SELECTION_MASK; + if (bp->link_params.multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED) + bp->link_params.multi_phy_config |= + PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; + else + bp->link_params.multi_phy_config |= + PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; + break; + default: + DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n"); + return -EINVAL; + } + } + /* Save new config in case command complete successfully */ + new_multi_phy_config = bp->link_params.multi_phy_config; + /* Get the new cfg_idx */ + cfg_idx = bnx2x_get_link_cfg_idx(bp); + /* Restore old config in case command failed */ + bp->link_params.multi_phy_config = old_multi_phy_config; + DP(BNX2X_MSG_ETHTOOL, "cfg_idx = %x\n", cfg_idx); + + if (cmd->autoneg == AUTONEG_ENABLE) { + u32 an_supported_speed = bp->port.supported[cfg_idx]; + if (bp->link_params.phy[EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) + an_supported_speed |= (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full); + if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) { + DP(BNX2X_MSG_ETHTOOL, "Autoneg not supported\n"); + return -EINVAL; + } + + /* advertise the requested speed and duplex if supported */ + if (cmd->advertising & ~an_supported_speed) { + DP(BNX2X_MSG_ETHTOOL, + "Advertisement parameters are not supported\n"); + return -EINVAL; + } + + bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG; + bp->link_params.req_duplex[cfg_idx] = cmd->duplex; + bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg | + cmd->advertising); + if (cmd->advertising) { + + bp->link_params.speed_cap_mask[cfg_idx] = 0; + if (cmd->advertising & ADVERTISED_10baseT_Half) { + bp->link_params.speed_cap_mask[cfg_idx] |= + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF; + } + if (cmd->advertising & ADVERTISED_10baseT_Full) + bp->link_params.speed_cap_mask[cfg_idx] |= + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL; + + if (cmd->advertising & ADVERTISED_100baseT_Full) + bp->link_params.speed_cap_mask[cfg_idx] |= + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL; + + if (cmd->advertising & ADVERTISED_100baseT_Half) { + bp->link_params.speed_cap_mask[cfg_idx] |= + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF; + } + if (cmd->advertising & ADVERTISED_1000baseT_Half) { + bp->link_params.speed_cap_mask[cfg_idx] |= + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; + } + if (cmd->advertising & (ADVERTISED_1000baseT_Full | + ADVERTISED_1000baseKX_Full)) + bp->link_params.speed_cap_mask[cfg_idx] |= + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G; + + if (cmd->advertising & (ADVERTISED_10000baseT_Full | + ADVERTISED_10000baseKX4_Full | + ADVERTISED_10000baseKR_Full)) + bp->link_params.speed_cap_mask[cfg_idx] |= + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G; + + if (cmd->advertising & ADVERTISED_20000baseKR2_Full) + bp->link_params.speed_cap_mask[cfg_idx] |= + PORT_HW_CFG_SPEED_CAPABILITY_D0_20G; + } + } else { /* forced speed */ + /* advertise the requested speed and duplex if supported */ + switch (speed) { + case SPEED_10: + if (cmd->duplex == DUPLEX_FULL) { + if (!(bp->port.supported[cfg_idx] & + SUPPORTED_10baseT_Full)) { + DP(BNX2X_MSG_ETHTOOL, + "10M full not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_10baseT_Full | + ADVERTISED_TP); + } else { + if (!(bp->port.supported[cfg_idx] & + SUPPORTED_10baseT_Half)) { + DP(BNX2X_MSG_ETHTOOL, + "10M half not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_10baseT_Half | + ADVERTISED_TP); + } + break; + + case SPEED_100: + if (cmd->duplex == DUPLEX_FULL) { + if (!(bp->port.supported[cfg_idx] & + SUPPORTED_100baseT_Full)) { + DP(BNX2X_MSG_ETHTOOL, + "100M full not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_100baseT_Full | + ADVERTISED_TP); + } else { + if (!(bp->port.supported[cfg_idx] & + SUPPORTED_100baseT_Half)) { + DP(BNX2X_MSG_ETHTOOL, + "100M half not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_100baseT_Half | + ADVERTISED_TP); + } + break; + + case SPEED_1000: + if (cmd->duplex != DUPLEX_FULL) { + DP(BNX2X_MSG_ETHTOOL, + "1G half not supported\n"); + return -EINVAL; + } + + if (!(bp->port.supported[cfg_idx] & + SUPPORTED_1000baseT_Full)) { + DP(BNX2X_MSG_ETHTOOL, + "1G full not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_TP); + break; + + case SPEED_2500: + if (cmd->duplex != DUPLEX_FULL) { + DP(BNX2X_MSG_ETHTOOL, + "2.5G half not supported\n"); + return -EINVAL; + } + + if (!(bp->port.supported[cfg_idx] + & SUPPORTED_2500baseX_Full)) { + DP(BNX2X_MSG_ETHTOOL, + "2.5G full not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_2500baseX_Full | + ADVERTISED_TP); + break; + + case SPEED_10000: + if (cmd->duplex != DUPLEX_FULL) { + DP(BNX2X_MSG_ETHTOOL, + "10G half not supported\n"); + return -EINVAL; + } + phy_idx = bnx2x_get_cur_phy_idx(bp); + if (!(bp->port.supported[cfg_idx] + & SUPPORTED_10000baseT_Full) || + (bp->link_params.phy[phy_idx].media_type == + ETH_PHY_SFP_1G_FIBER)) { + DP(BNX2X_MSG_ETHTOOL, + "10G full not supported\n"); + return -EINVAL; + } + + advertising = (ADVERTISED_10000baseT_Full | + ADVERTISED_FIBRE); + break; + + default: + DP(BNX2X_MSG_ETHTOOL, "Unsupported speed %u\n", speed); + return -EINVAL; + } + + bp->link_params.req_line_speed[cfg_idx] = speed; + bp->link_params.req_duplex[cfg_idx] = cmd->duplex; + bp->port.advertising[cfg_idx] = advertising; + } + + DP(BNX2X_MSG_ETHTOOL, "req_line_speed %d\n" + " req_duplex %d advertising 0x%x\n", + bp->link_params.req_line_speed[cfg_idx], + bp->link_params.req_duplex[cfg_idx], + bp->port.advertising[cfg_idx]); + + /* Set new config */ + bp->link_params.multi_phy_config = new_multi_phy_config; + if (netif_running(dev)) { + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_link_set(bp); + } + + return 0; +} + +#define DUMP_ALL_PRESETS 0x1FFF +#define DUMP_MAX_PRESETS 13 + +static int __bnx2x_get_preset_regs_len(struct bnx2x *bp, u32 preset) +{ + if (CHIP_IS_E1(bp)) + return dump_num_registers[0][preset-1]; + else if (CHIP_IS_E1H(bp)) + return dump_num_registers[1][preset-1]; + else if (CHIP_IS_E2(bp)) + return dump_num_registers[2][preset-1]; + else if (CHIP_IS_E3A0(bp)) + return dump_num_registers[3][preset-1]; + else if (CHIP_IS_E3B0(bp)) + return dump_num_registers[4][preset-1]; + else + return 0; +} + +static int __bnx2x_get_regs_len(struct bnx2x *bp) +{ + u32 preset_idx; + int regdump_len = 0; + + /* Calculate the total preset regs length */ + for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) + regdump_len += __bnx2x_get_preset_regs_len(bp, preset_idx); + + return regdump_len; +} + +static int bnx2x_get_regs_len(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + int regdump_len = 0; + + if (IS_VF(bp)) + return 0; + + regdump_len = __bnx2x_get_regs_len(bp); + regdump_len *= 4; + regdump_len += sizeof(struct dump_header); + + return regdump_len; +} + +#define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1) +#define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H) +#define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2) +#define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0) +#define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0) + +#define IS_REG_IN_PRESET(presets, idx) \ + ((presets & (1 << (idx-1))) == (1 << (idx-1))) + +/******* Paged registers info selectors ********/ +static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp) +{ + if (CHIP_IS_E2(bp)) + return page_vals_e2; + else if (CHIP_IS_E3(bp)) + return page_vals_e3; + else + return NULL; +} + +static u32 __bnx2x_get_page_reg_num(struct bnx2x *bp) +{ + if (CHIP_IS_E2(bp)) + return PAGE_MODE_VALUES_E2; + else if (CHIP_IS_E3(bp)) + return PAGE_MODE_VALUES_E3; + else + return 0; +} + +static const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp) +{ + if (CHIP_IS_E2(bp)) + return page_write_regs_e2; + else if (CHIP_IS_E3(bp)) + return page_write_regs_e3; + else + return NULL; +} + +static u32 __bnx2x_get_page_write_num(struct bnx2x *bp) +{ + if (CHIP_IS_E2(bp)) + return PAGE_WRITE_REGS_E2; + else if (CHIP_IS_E3(bp)) + return PAGE_WRITE_REGS_E3; + else + return 0; +} + +static const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp) +{ + if (CHIP_IS_E2(bp)) + return page_read_regs_e2; + else if (CHIP_IS_E3(bp)) + return page_read_regs_e3; + else + return NULL; +} + +static u32 __bnx2x_get_page_read_num(struct bnx2x *bp) +{ + if (CHIP_IS_E2(bp)) + return PAGE_READ_REGS_E2; + else if (CHIP_IS_E3(bp)) + return PAGE_READ_REGS_E3; + else + return 0; +} + +static bool bnx2x_is_reg_in_chip(struct bnx2x *bp, + const struct reg_addr *reg_info) +{ + if (CHIP_IS_E1(bp)) + return IS_E1_REG(reg_info->chips); + else if (CHIP_IS_E1H(bp)) + return IS_E1H_REG(reg_info->chips); + else if (CHIP_IS_E2(bp)) + return IS_E2_REG(reg_info->chips); + else if (CHIP_IS_E3A0(bp)) + return IS_E3A0_REG(reg_info->chips); + else if (CHIP_IS_E3B0(bp)) + return IS_E3B0_REG(reg_info->chips); + else + return false; +} + +static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp, + const struct wreg_addr *wreg_info) +{ + if (CHIP_IS_E1(bp)) + return IS_E1_REG(wreg_info->chips); + else if (CHIP_IS_E1H(bp)) + return IS_E1H_REG(wreg_info->chips); + else if (CHIP_IS_E2(bp)) + return IS_E2_REG(wreg_info->chips); + else if (CHIP_IS_E3A0(bp)) + return IS_E3A0_REG(wreg_info->chips); + else if (CHIP_IS_E3B0(bp)) + return IS_E3B0_REG(wreg_info->chips); + else + return false; +} + +/** + * bnx2x_read_pages_regs - read "paged" registers + * + * @bp device handle + * @p output buffer + * + * Reads "paged" memories: memories that may only be read by first writing to a + * specific address ("write address") and then reading from a specific address + * ("read address"). There may be more than one write address per "page" and + * more than one read address per write address. + */ +static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p, u32 preset) +{ + u32 i, j, k, n; + + /* addresses of the paged registers */ + const u32 *page_addr = __bnx2x_get_page_addr_ar(bp); + /* number of paged registers */ + int num_pages = __bnx2x_get_page_reg_num(bp); + /* write addresses */ + const u32 *write_addr = __bnx2x_get_page_write_ar(bp); + /* number of write addresses */ + int write_num = __bnx2x_get_page_write_num(bp); + /* read addresses info */ + const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp); + /* number of read addresses */ + int read_num = __bnx2x_get_page_read_num(bp); + u32 addr, size; + + for (i = 0; i < num_pages; i++) { + for (j = 0; j < write_num; j++) { + REG_WR(bp, write_addr[j], page_addr[i]); + + for (k = 0; k < read_num; k++) { + if (IS_REG_IN_PRESET(read_addr[k].presets, + preset)) { + size = read_addr[k].size; + for (n = 0; n < size; n++) { + addr = read_addr[k].addr + n*4; + *p++ = REG_RD(bp, addr); + } + } + } + } + } +} + +static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset) +{ + u32 i, j, addr; + const struct wreg_addr *wreg_addr_p = NULL; + + if (CHIP_IS_E1(bp)) + wreg_addr_p = &wreg_addr_e1; + else if (CHIP_IS_E1H(bp)) + wreg_addr_p = &wreg_addr_e1h; + else if (CHIP_IS_E2(bp)) + wreg_addr_p = &wreg_addr_e2; + else if (CHIP_IS_E3A0(bp)) + wreg_addr_p = &wreg_addr_e3; + else if (CHIP_IS_E3B0(bp)) + wreg_addr_p = &wreg_addr_e3b0; + + /* Read the idle_chk registers */ + for (i = 0; i < IDLE_REGS_COUNT; i++) { + if (bnx2x_is_reg_in_chip(bp, &idle_reg_addrs[i]) && + IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) { + for (j = 0; j < idle_reg_addrs[i].size; j++) + *p++ = REG_RD(bp, idle_reg_addrs[i].addr + j*4); + } + } + + /* Read the regular registers */ + for (i = 0; i < REGS_COUNT; i++) { + if (bnx2x_is_reg_in_chip(bp, ®_addrs[i]) && + IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) { + for (j = 0; j < reg_addrs[i].size; j++) + *p++ = REG_RD(bp, reg_addrs[i].addr + j*4); + } + } + + /* Read the CAM registers */ + if (bnx2x_is_wreg_in_chip(bp, wreg_addr_p) && + IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) { + for (i = 0; i < wreg_addr_p->size; i++) { + *p++ = REG_RD(bp, wreg_addr_p->addr + i*4); + + /* In case of wreg_addr register, read additional + registers from read_regs array + */ + for (j = 0; j < wreg_addr_p->read_regs_count; j++) { + addr = *(wreg_addr_p->read_regs); + *p++ = REG_RD(bp, addr + j*4); + } + } + } + + /* Paged registers are supported in E2 & E3 only */ + if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) { + /* Read "paged" registers */ + bnx2x_read_pages_regs(bp, p, preset); + } + + return 0; +} + +static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p) +{ + u32 preset_idx; + + /* Read all registers, by reading all preset registers */ + for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { + /* Skip presets with IOR */ + if ((preset_idx == 2) || + (preset_idx == 5) || + (preset_idx == 8) || + (preset_idx == 11)) + continue; + __bnx2x_get_preset_regs(bp, p, preset_idx); + p += __bnx2x_get_preset_regs_len(bp, preset_idx); + } +} + +static void bnx2x_get_regs(struct net_device *dev, + struct ethtool_regs *regs, void *_p) +{ + u32 *p = _p; + struct bnx2x *bp = netdev_priv(dev); + struct dump_header dump_hdr = {0}; + + regs->version = 2; + memset(p, 0, regs->len); + + if (!netif_running(bp->dev)) + return; + + /* Disable parity attentions as long as following dump may + * cause false alarms by reading never written registers. We + * will re-enable parity attentions right after the dump. + */ + + bnx2x_disable_blocks_parity(bp); + + dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1; + dump_hdr.preset = DUMP_ALL_PRESETS; + dump_hdr.version = BNX2X_DUMP_VERSION; + + /* dump_meta_data presents OR of CHIP and PATH. */ + if (CHIP_IS_E1(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E1; + } else if (CHIP_IS_E1H(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E1H; + } else if (CHIP_IS_E2(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E2 | + (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); + } else if (CHIP_IS_E3A0(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 | + (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); + } else if (CHIP_IS_E3B0(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 | + (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); + } + + memcpy(p, &dump_hdr, sizeof(struct dump_header)); + p += dump_hdr.header_size + 1; + + /* Actually read the registers */ + __bnx2x_get_regs(bp, p); + + /* Re-enable parity attentions */ + bnx2x_clear_blocks_parity(bp); + bnx2x_enable_blocks_parity(bp); +} + +static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset) +{ + struct bnx2x *bp = netdev_priv(dev); + int regdump_len = 0; + + regdump_len = __bnx2x_get_preset_regs_len(bp, preset); + regdump_len *= 4; + regdump_len += sizeof(struct dump_header); + + return regdump_len; +} + +static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val) +{ + struct bnx2x *bp = netdev_priv(dev); + + /* Use the ethtool_dump "flag" field as the dump preset index */ + if (val->flag < 1 || val->flag > DUMP_MAX_PRESETS) + return -EINVAL; + + bp->dump_preset_idx = val->flag; + return 0; +} + +static int bnx2x_get_dump_flag(struct net_device *dev, + struct ethtool_dump *dump) +{ + struct bnx2x *bp = netdev_priv(dev); + + dump->version = BNX2X_DUMP_VERSION; + dump->flag = bp->dump_preset_idx; + /* Calculate the requested preset idx length */ + dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx); + DP(BNX2X_MSG_ETHTOOL, "Get dump preset %d length=%d\n", + bp->dump_preset_idx, dump->len); + return 0; +} + +static int bnx2x_get_dump_data(struct net_device *dev, + struct ethtool_dump *dump, + void *buffer) +{ + u32 *p = buffer; + struct bnx2x *bp = netdev_priv(dev); + struct dump_header dump_hdr = {0}; + + /* Disable parity attentions as long as following dump may + * cause false alarms by reading never written registers. We + * will re-enable parity attentions right after the dump. + */ + + bnx2x_disable_blocks_parity(bp); + + dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1; + dump_hdr.preset = bp->dump_preset_idx; + dump_hdr.version = BNX2X_DUMP_VERSION; + + DP(BNX2X_MSG_ETHTOOL, "Get dump data of preset %d\n", dump_hdr.preset); + + /* dump_meta_data presents OR of CHIP and PATH. */ + if (CHIP_IS_E1(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E1; + } else if (CHIP_IS_E1H(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E1H; + } else if (CHIP_IS_E2(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E2 | + (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); + } else if (CHIP_IS_E3A0(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 | + (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); + } else if (CHIP_IS_E3B0(bp)) { + dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 | + (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0); + } + + memcpy(p, &dump_hdr, sizeof(struct dump_header)); + p += dump_hdr.header_size + 1; + + /* Actually read the registers */ + __bnx2x_get_preset_regs(bp, p, dump_hdr.preset); + + /* Re-enable parity attentions */ + bnx2x_clear_blocks_parity(bp); + bnx2x_enable_blocks_parity(bp); + + return 0; +} + +static void bnx2x_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct bnx2x *bp = netdev_priv(dev); + + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + + bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version)); + + strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); + info->n_stats = BNX2X_NUM_STATS; + info->testinfo_len = BNX2X_NUM_TESTS(bp); + info->eedump_len = bp->common.flash_size; + info->regdump_len = bnx2x_get_regs_len(dev); +} + +static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (bp->flags & NO_WOL_FLAG) { + wol->supported = 0; + wol->wolopts = 0; + } else { + wol->supported = WAKE_MAGIC; + if (bp->wol) + wol->wolopts = WAKE_MAGIC; + else + wol->wolopts = 0; + } + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (wol->wolopts & ~WAKE_MAGIC) { + DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n"); + return -EINVAL; + } + + if (wol->wolopts & WAKE_MAGIC) { + if (bp->flags & NO_WOL_FLAG) { + DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n"); + return -EINVAL; + } + bp->wol = 1; + } else + bp->wol = 0; + + return 0; +} + +static u32 bnx2x_get_msglevel(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + return bp->msg_enable; +} + +static void bnx2x_set_msglevel(struct net_device *dev, u32 level) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (capable(CAP_NET_ADMIN)) { + /* dump MCP trace */ + if (IS_PF(bp) && (level & BNX2X_MSG_MCP)) + bnx2x_fw_dump_lvl(bp, KERN_INFO); + bp->msg_enable = level; + } +} + +static int bnx2x_nway_reset(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (!bp->port.pmf) + return 0; + + if (netif_running(dev)) { + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_force_link_reset(bp); + bnx2x_link_set(bp); + } + + return 0; +} + +static u32 bnx2x_get_link(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN)) + return 0; + + if (IS_VF(bp)) + return !test_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &bp->vf_link_vars.link_report_flags); + + return bp->link_vars.link_up; +} + +static int bnx2x_get_eeprom_len(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + return bp->common.flash_size; +} + +/* Per pf misc lock must be acquired before the per port mcp lock. Otherwise, + * had we done things the other way around, if two pfs from the same port would + * attempt to access nvram at the same time, we could run into a scenario such + * as: + * pf A takes the port lock. + * pf B succeeds in taking the same lock since they are from the same port. + * pf A takes the per pf misc lock. Performs eeprom access. + * pf A finishes. Unlocks the per pf misc lock. + * Pf B takes the lock and proceeds to perform it's own access. + * pf A unlocks the per port lock, while pf B is still working (!). + * mcp takes the per port lock and corrupts pf B's access (and/or has it's own + * access corrupted by pf B) + */ +static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int count, i; + u32 val; + + /* acquire HW lock: protect against other PFs in PF Direct Assignment */ + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM); + + /* adjust timeout for emulation/FPGA */ + count = BNX2X_NVRAM_TIMEOUT_COUNT; + if (CHIP_REV_IS_SLOW(bp)) + count *= 100; + + /* request access to nvram interface */ + REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, + (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); + + for (i = 0; i < count*10; i++) { + val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB); + if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) + break; + + udelay(5); + } + + if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "cannot get access to nvram interface\n"); + return -EBUSY; + } + + return 0; +} + +static int bnx2x_release_nvram_lock(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int count, i; + u32 val; + + /* adjust timeout for emulation/FPGA */ + count = BNX2X_NVRAM_TIMEOUT_COUNT; + if (CHIP_REV_IS_SLOW(bp)) + count *= 100; + + /* relinquish nvram interface */ + REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, + (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); + + for (i = 0; i < count*10; i++) { + val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB); + if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) + break; + + udelay(5); + } + + if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "cannot free access to nvram interface\n"); + return -EBUSY; + } + + /* release HW lock: protect against other PFs in PF Direct Assignment */ + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM); + return 0; +} + +static void bnx2x_enable_nvram_access(struct bnx2x *bp) +{ + u32 val; + + val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE); + + /* enable both bits, even on read */ + REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE, + (val | MCPR_NVM_ACCESS_ENABLE_EN | + MCPR_NVM_ACCESS_ENABLE_WR_EN)); +} + +static void bnx2x_disable_nvram_access(struct bnx2x *bp) +{ + u32 val; + + val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE); + + /* disable both bits, even after read */ + REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE, + (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | + MCPR_NVM_ACCESS_ENABLE_WR_EN))); +} + +static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val, + u32 cmd_flags) +{ + int count, i, rc; + u32 val; + + /* build the command word */ + cmd_flags |= MCPR_NVM_COMMAND_DOIT; + + /* need to clear DONE bit separately */ + REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); + + /* address of the NVRAM to read from */ + REG_WR(bp, MCP_REG_MCPR_NVM_ADDR, + (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); + + /* issue a read command */ + REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); + + /* adjust timeout for emulation/FPGA */ + count = BNX2X_NVRAM_TIMEOUT_COUNT; + if (CHIP_REV_IS_SLOW(bp)) + count *= 100; + + /* wait for completion */ + *ret_val = 0; + rc = -EBUSY; + for (i = 0; i < count; i++) { + udelay(5); + val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND); + + if (val & MCPR_NVM_COMMAND_DONE) { + val = REG_RD(bp, MCP_REG_MCPR_NVM_READ); + /* we read nvram data in cpu order + * but ethtool sees it as an array of bytes + * converting to big-endian will do the work + */ + *ret_val = cpu_to_be32(val); + rc = 0; + break; + } + } + if (rc == -EBUSY) + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "nvram read timeout expired\n"); + return rc; +} + +static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf, + int buf_size) +{ + int rc; + u32 cmd_flags; + __be32 val; + + if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "Invalid parameter: offset 0x%x buf_size 0x%x\n", + offset, buf_size); + return -EINVAL; + } + + if (offset + buf_size > bp->common.flash_size) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n", + offset, buf_size, bp->common.flash_size); + return -EINVAL; + } + + /* request access to nvram interface */ + rc = bnx2x_acquire_nvram_lock(bp); + if (rc) + return rc; + + /* enable access to nvram interface */ + bnx2x_enable_nvram_access(bp); + + /* read the first word(s) */ + cmd_flags = MCPR_NVM_COMMAND_FIRST; + while ((buf_size > sizeof(u32)) && (rc == 0)) { + rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags); + memcpy(ret_buf, &val, 4); + + /* advance to the next dword */ + offset += sizeof(u32); + ret_buf += sizeof(u32); + buf_size -= sizeof(u32); + cmd_flags = 0; + } + + if (rc == 0) { + cmd_flags |= MCPR_NVM_COMMAND_LAST; + rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags); + memcpy(ret_buf, &val, 4); + } + + /* disable access to nvram interface */ + bnx2x_disable_nvram_access(bp); + bnx2x_release_nvram_lock(bp); + + return rc; +} + +static int bnx2x_nvram_read32(struct bnx2x *bp, u32 offset, u32 *buf, + int buf_size) +{ + int rc; + + rc = bnx2x_nvram_read(bp, offset, (u8 *)buf, buf_size); + + if (!rc) { + __be32 *be = (__be32 *)buf; + + while ((buf_size -= 4) >= 0) + *buf++ = be32_to_cpu(*be++); + } + + return rc; +} + +static bool bnx2x_is_nvm_accessible(struct bnx2x *bp) +{ + int rc = 1; + u16 pm = 0; + struct net_device *dev = pci_get_drvdata(bp->pdev); + + if (bp->pdev->pm_cap) + rc = pci_read_config_word(bp->pdev, + bp->pdev->pm_cap + PCI_PM_CTRL, &pm); + + if ((rc && !netif_running(dev)) || + (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0))) + return false; + + return true; +} + +static int bnx2x_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *eebuf) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (!bnx2x_is_nvm_accessible(bp)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "cannot access eeprom when the interface is down\n"); + return -EAGAIN; + } + + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" + " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", + eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, + eeprom->len, eeprom->len); + + /* parameters already validated in ethtool_get_eeprom */ + + return bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); +} + +static int bnx2x_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct bnx2x *bp = netdev_priv(dev); + int rc = -EINVAL, phy_idx; + u8 *user_data = data; + unsigned int start_addr = ee->offset, xfer_size = 0; + + if (!bnx2x_is_nvm_accessible(bp)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "cannot access eeprom when the interface is down\n"); + return -EAGAIN; + } + + phy_idx = bnx2x_get_cur_phy_idx(bp); + + /* Read A0 section */ + if (start_addr < ETH_MODULE_SFF_8079_LEN) { + /* Limit transfer size to the A0 section boundary */ + if (start_addr + ee->len > ETH_MODULE_SFF_8079_LEN) + xfer_size = ETH_MODULE_SFF_8079_LEN - start_addr; + else + xfer_size = ee->len; + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], + &bp->link_params, + I2C_DEV_ADDR_A0, + start_addr, + xfer_size, + user_data); + bnx2x_release_phy_lock(bp); + if (rc) { + DP(BNX2X_MSG_ETHTOOL, "Failed reading A0 section\n"); + + return -EINVAL; + } + user_data += xfer_size; + start_addr += xfer_size; + } + + /* Read A2 section */ + if ((start_addr >= ETH_MODULE_SFF_8079_LEN) && + (start_addr < ETH_MODULE_SFF_8472_LEN)) { + xfer_size = ee->len - xfer_size; + /* Limit transfer size to the A2 section boundary */ + if (start_addr + xfer_size > ETH_MODULE_SFF_8472_LEN) + xfer_size = ETH_MODULE_SFF_8472_LEN - start_addr; + start_addr -= ETH_MODULE_SFF_8079_LEN; + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], + &bp->link_params, + I2C_DEV_ADDR_A2, + start_addr, + xfer_size, + user_data); + bnx2x_release_phy_lock(bp); + if (rc) { + DP(BNX2X_MSG_ETHTOOL, "Failed reading A2 section\n"); + return -EINVAL; + } + } + return rc; +} + +static int bnx2x_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct bnx2x *bp = netdev_priv(dev); + int phy_idx, rc; + u8 sff8472_comp, diag_type; + + if (!bnx2x_is_nvm_accessible(bp)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "cannot access eeprom when the interface is down\n"); + return -EAGAIN; + } + phy_idx = bnx2x_get_cur_phy_idx(bp); + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], + &bp->link_params, + I2C_DEV_ADDR_A0, + SFP_EEPROM_SFF_8472_COMP_ADDR, + SFP_EEPROM_SFF_8472_COMP_SIZE, + &sff8472_comp); + bnx2x_release_phy_lock(bp); + if (rc) { + DP(BNX2X_MSG_ETHTOOL, "Failed reading SFF-8472 comp field\n"); + return -EINVAL; + } + + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], + &bp->link_params, + I2C_DEV_ADDR_A0, + SFP_EEPROM_DIAG_TYPE_ADDR, + SFP_EEPROM_DIAG_TYPE_SIZE, + &diag_type); + bnx2x_release_phy_lock(bp); + if (rc) { + DP(BNX2X_MSG_ETHTOOL, "Failed reading Diag Type field\n"); + return -EINVAL; + } + + if (!sff8472_comp || + (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ)) { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + return 0; +} + +static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, + u32 cmd_flags) +{ + int count, i, rc; + + /* build the command word */ + cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR; + + /* need to clear DONE bit separately */ + REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); + + /* write the data */ + REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val); + + /* address of the NVRAM to write to */ + REG_WR(bp, MCP_REG_MCPR_NVM_ADDR, + (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); + + /* issue the write command */ + REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); + + /* adjust timeout for emulation/FPGA */ + count = BNX2X_NVRAM_TIMEOUT_COUNT; + if (CHIP_REV_IS_SLOW(bp)) + count *= 100; + + /* wait for completion */ + rc = -EBUSY; + for (i = 0; i < count; i++) { + udelay(5); + val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND); + if (val & MCPR_NVM_COMMAND_DONE) { + rc = 0; + break; + } + } + + if (rc == -EBUSY) + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "nvram write timeout expired\n"); + return rc; +} + +#define BYTE_OFFSET(offset) (8 * (offset & 0x03)) + +static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, + int buf_size) +{ + int rc; + u32 cmd_flags, align_offset, val; + __be32 val_be; + + if (offset + buf_size > bp->common.flash_size) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n", + offset, buf_size, bp->common.flash_size); + return -EINVAL; + } + + /* request access to nvram interface */ + rc = bnx2x_acquire_nvram_lock(bp); + if (rc) + return rc; + + /* enable access to nvram interface */ + bnx2x_enable_nvram_access(bp); + + cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); + align_offset = (offset & ~0x03); + rc = bnx2x_nvram_read_dword(bp, align_offset, &val_be, cmd_flags); + + if (rc == 0) { + /* nvram data is returned as an array of bytes + * convert it back to cpu order + */ + val = be32_to_cpu(val_be); + + val &= ~le32_to_cpu((__force __le32) + (0xff << BYTE_OFFSET(offset))); + val |= le32_to_cpu((__force __le32) + (*data_buf << BYTE_OFFSET(offset))); + + rc = bnx2x_nvram_write_dword(bp, align_offset, val, + cmd_flags); + } + + /* disable access to nvram interface */ + bnx2x_disable_nvram_access(bp); + bnx2x_release_nvram_lock(bp); + + return rc; +} + +static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, + int buf_size) +{ + int rc; + u32 cmd_flags; + u32 val; + u32 written_so_far; + + if (buf_size == 1) /* ethtool */ + return bnx2x_nvram_write1(bp, offset, data_buf, buf_size); + + if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "Invalid parameter: offset 0x%x buf_size 0x%x\n", + offset, buf_size); + return -EINVAL; + } + + if (offset + buf_size > bp->common.flash_size) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n", + offset, buf_size, bp->common.flash_size); + return -EINVAL; + } + + /* request access to nvram interface */ + rc = bnx2x_acquire_nvram_lock(bp); + if (rc) + return rc; + + /* enable access to nvram interface */ + bnx2x_enable_nvram_access(bp); + + written_so_far = 0; + cmd_flags = MCPR_NVM_COMMAND_FIRST; + while ((written_so_far < buf_size) && (rc == 0)) { + if (written_so_far == (buf_size - sizeof(u32))) + cmd_flags |= MCPR_NVM_COMMAND_LAST; + else if (((offset + 4) % BNX2X_NVRAM_PAGE_SIZE) == 0) + cmd_flags |= MCPR_NVM_COMMAND_LAST; + else if ((offset % BNX2X_NVRAM_PAGE_SIZE) == 0) + cmd_flags |= MCPR_NVM_COMMAND_FIRST; + + memcpy(&val, data_buf, 4); + + /* Notice unlike bnx2x_nvram_read_dword() this will not + * change val using be32_to_cpu(), which causes data to flip + * if the eeprom is read and then written back. This is due + * to tools utilizing this functionality that would break + * if this would be resolved. + */ + rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags); + + /* advance to the next dword */ + offset += sizeof(u32); + data_buf += sizeof(u32); + written_so_far += sizeof(u32); + cmd_flags = 0; + } + + /* disable access to nvram interface */ + bnx2x_disable_nvram_access(bp); + bnx2x_release_nvram_lock(bp); + + return rc; +} + +static int bnx2x_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *eebuf) +{ + struct bnx2x *bp = netdev_priv(dev); + int port = BP_PORT(bp); + int rc = 0; + u32 ext_phy_config; + + if (!bnx2x_is_nvm_accessible(bp)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "cannot access eeprom when the interface is down\n"); + return -EAGAIN; + } + + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" + " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", + eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, + eeprom->len, eeprom->len); + + /* parameters already validated in ethtool_set_eeprom */ + + /* PHY eeprom can be accessed only by the PMF */ + if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) && + !bp->port.pmf) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "wrong magic or interface is not pmf\n"); + return -EINVAL; + } + + ext_phy_config = + SHMEM_RD(bp, + dev_info.port_hw_config[port].external_phy_config); + + if (eeprom->magic == 0x50485950) { + /* 'PHYP' (0x50485950): prepare phy for FW upgrade */ + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + + bnx2x_acquire_phy_lock(bp); + rc |= bnx2x_link_reset(&bp->link_params, + &bp->link_vars, 0); + if (XGXS_EXT_PHY_TYPE(ext_phy_config) == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, + MISC_REGISTERS_GPIO_HIGH, port); + bnx2x_release_phy_lock(bp); + bnx2x_link_report(bp); + + } else if (eeprom->magic == 0x50485952) { + /* 'PHYR' (0x50485952): re-init link after FW upgrade */ + if (bp->state == BNX2X_STATE_OPEN) { + bnx2x_acquire_phy_lock(bp); + rc |= bnx2x_link_reset(&bp->link_params, + &bp->link_vars, 1); + + rc |= bnx2x_phy_init(&bp->link_params, + &bp->link_vars); + bnx2x_release_phy_lock(bp); + bnx2x_calc_fc_adv(bp); + } + } else if (eeprom->magic == 0x53985943) { + /* 'PHYC' (0x53985943): PHY FW upgrade completed */ + if (XGXS_EXT_PHY_TYPE(ext_phy_config) == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) { + + /* DSP Remove Download Mode */ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, + MISC_REGISTERS_GPIO_LOW, port); + + bnx2x_acquire_phy_lock(bp); + + bnx2x_sfx7101_sp_sw_reset(bp, + &bp->link_params.phy[EXT_PHY1]); + + /* wait 0.5 sec to allow it to run */ + msleep(500); + bnx2x_ext_phy_hw_reset(bp, port); + msleep(500); + bnx2x_release_phy_lock(bp); + } + } else + rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); + + return rc; +} + +static int bnx2x_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct bnx2x *bp = netdev_priv(dev); + + memset(coal, 0, sizeof(struct ethtool_coalesce)); + + coal->rx_coalesce_usecs = bp->rx_ticks; + coal->tx_coalesce_usecs = bp->tx_ticks; + + return 0; +} + +static int bnx2x_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct bnx2x *bp = netdev_priv(dev); + + bp->rx_ticks = (u16)coal->rx_coalesce_usecs; + if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT) + bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT; + + bp->tx_ticks = (u16)coal->tx_coalesce_usecs; + if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT) + bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT; + + if (netif_running(dev)) + bnx2x_update_coalesce(bp); + + return 0; +} + +static void bnx2x_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bnx2x *bp = netdev_priv(dev); + + ering->rx_max_pending = MAX_RX_AVAIL; + + if (bp->rx_ring_size) + ering->rx_pending = bp->rx_ring_size; + else + ering->rx_pending = MAX_RX_AVAIL; + + ering->tx_max_pending = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL; + ering->tx_pending = bp->tx_ring_size; +} + +static int bnx2x_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bnx2x *bp = netdev_priv(dev); + + DP(BNX2X_MSG_ETHTOOL, + "set ring params command parameters: rx_pending = %d, tx_pending = %d\n", + ering->rx_pending, ering->tx_pending); + + if (pci_num_vf(bp->pdev)) { + DP(BNX2X_MSG_IOV, + "VFs are enabled, can not change ring parameters\n"); + return -EPERM; + } + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + DP(BNX2X_MSG_ETHTOOL, + "Handling parity error recovery. Try again later\n"); + return -EAGAIN; + } + + if ((ering->rx_pending > MAX_RX_AVAIL) || + (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA : + MIN_RX_SIZE_TPA)) || + (ering->tx_pending > (IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL)) || + (ering->tx_pending <= MAX_SKB_FRAGS + 4)) { + DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); + return -EINVAL; + } + + bp->rx_ring_size = ering->rx_pending; + bp->tx_ring_size = ering->tx_pending; + + return bnx2x_reload_if_running(dev); +} + +static void bnx2x_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct bnx2x *bp = netdev_priv(dev); + int cfg_idx = bnx2x_get_link_cfg_idx(bp); + int cfg_reg; + + epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] == + BNX2X_FLOW_CTRL_AUTO); + + if (!epause->autoneg) + cfg_reg = bp->link_params.req_flow_ctrl[cfg_idx]; + else + cfg_reg = bp->link_params.req_fc_auto_adv; + + epause->rx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_RX) == + BNX2X_FLOW_CTRL_RX); + epause->tx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_TX) == + BNX2X_FLOW_CTRL_TX); + + DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n" + " autoneg %d rx_pause %d tx_pause %d\n", + epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); +} + +static int bnx2x_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 cfg_idx = bnx2x_get_link_cfg_idx(bp); + if (IS_MF(bp)) + return 0; + + DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n" + " autoneg %d rx_pause %d tx_pause %d\n", + epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); + + bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO; + + if (epause->rx_pause) + bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX; + + if (epause->tx_pause) + bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX; + + if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO) + bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE; + + if (epause->autoneg) { + if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) { + DP(BNX2X_MSG_ETHTOOL, "autoneg not supported\n"); + return -EINVAL; + } + + if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) { + bp->link_params.req_flow_ctrl[cfg_idx] = + BNX2X_FLOW_CTRL_AUTO; + } + bp->link_params.req_fc_auto_adv = 0; + if (epause->rx_pause) + bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_RX; + + if (epause->tx_pause) + bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_TX; + + if (!bp->link_params.req_fc_auto_adv) + bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_NONE; + } + + DP(BNX2X_MSG_ETHTOOL, + "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]); + + if (netif_running(dev)) { + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_link_set(bp); + } + + return 0; +} + +static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = { + "register_test (offline) ", + "memory_test (offline) ", + "int_loopback_test (offline)", + "ext_loopback_test (offline)", + "nvram_test (online) ", + "interrupt_test (online) ", + "link_test (online) " +}; + +enum { + BNX2X_PRI_FLAG_ISCSI, + BNX2X_PRI_FLAG_FCOE, + BNX2X_PRI_FLAG_STORAGE, + BNX2X_PRI_FLAG_LEN, +}; + +static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { + "iSCSI offload support", + "FCoE offload support", + "Storage only interface" +}; + +static u32 bnx2x_eee_to_adv(u32 eee_adv) +{ + u32 modes = 0; + + if (eee_adv & SHMEM_EEE_100M_ADV) + modes |= ADVERTISED_100baseT_Full; + if (eee_adv & SHMEM_EEE_1G_ADV) + modes |= ADVERTISED_1000baseT_Full; + if (eee_adv & SHMEM_EEE_10G_ADV) + modes |= ADVERTISED_10000baseT_Full; + + return modes; +} + +static u32 bnx2x_adv_to_eee(u32 modes, u32 shift) +{ + u32 eee_adv = 0; + if (modes & ADVERTISED_100baseT_Full) + eee_adv |= SHMEM_EEE_100M_ADV; + if (modes & ADVERTISED_1000baseT_Full) + eee_adv |= SHMEM_EEE_1G_ADV; + if (modes & ADVERTISED_10000baseT_Full) + eee_adv |= SHMEM_EEE_10G_ADV; + + return eee_adv << shift; +} + +static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 eee_cfg; + + if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) { + DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n"); + return -EOPNOTSUPP; + } + + eee_cfg = bp->link_vars.eee_status; + + edata->supported = + bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >> + SHMEM_EEE_SUPPORTED_SHIFT); + + edata->advertised = + bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >> + SHMEM_EEE_ADV_STATUS_SHIFT); + edata->lp_advertised = + bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >> + SHMEM_EEE_LP_ADV_STATUS_SHIFT); + + /* SHMEM value is in 16u units --> Convert to 1u units. */ + edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4; + + edata->eee_enabled = (eee_cfg & SHMEM_EEE_REQUESTED_BIT) ? 1 : 0; + edata->eee_active = (eee_cfg & SHMEM_EEE_ACTIVE_BIT) ? 1 : 0; + edata->tx_lpi_enabled = (eee_cfg & SHMEM_EEE_LPI_REQUESTED_BIT) ? 1 : 0; + + return 0; +} + +static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 eee_cfg; + u32 advertised; + + if (IS_MF(bp)) + return 0; + + if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) { + DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n"); + return -EOPNOTSUPP; + } + + eee_cfg = bp->link_vars.eee_status; + + if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) { + DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n"); + return -EOPNOTSUPP; + } + + advertised = bnx2x_adv_to_eee(edata->advertised, + SHMEM_EEE_ADV_STATUS_SHIFT); + if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) { + DP(BNX2X_MSG_ETHTOOL, + "Direct manipulation of EEE advertisement is not supported\n"); + return -EINVAL; + } + + if (edata->tx_lpi_timer > EEE_MODE_TIMER_MASK) { + DP(BNX2X_MSG_ETHTOOL, + "Maximal Tx Lpi timer supported is %x(u)\n", + EEE_MODE_TIMER_MASK); + return -EINVAL; + } + if (edata->tx_lpi_enabled && + (edata->tx_lpi_timer < EEE_MODE_NVRAM_AGGRESSIVE_TIME)) { + DP(BNX2X_MSG_ETHTOOL, + "Minimal Tx Lpi timer supported is %d(u)\n", + EEE_MODE_NVRAM_AGGRESSIVE_TIME); + return -EINVAL; + } + + /* All is well; Apply changes*/ + if (edata->eee_enabled) + bp->link_params.eee_mode |= EEE_MODE_ADV_LPI; + else + bp->link_params.eee_mode &= ~EEE_MODE_ADV_LPI; + + if (edata->tx_lpi_enabled) + bp->link_params.eee_mode |= EEE_MODE_ENABLE_LPI; + else + bp->link_params.eee_mode &= ~EEE_MODE_ENABLE_LPI; + + bp->link_params.eee_mode &= ~EEE_MODE_TIMER_MASK; + bp->link_params.eee_mode |= (edata->tx_lpi_timer & + EEE_MODE_TIMER_MASK) | + EEE_MODE_OVERRIDE_NVRAM | + EEE_MODE_OUTPUT_TIME; + + /* Restart link to propagate changes */ + if (netif_running(dev)) { + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_force_link_reset(bp); + bnx2x_link_set(bp); + } + + return 0; +} + +enum { + BNX2X_CHIP_E1_OFST = 0, + BNX2X_CHIP_E1H_OFST, + BNX2X_CHIP_E2_OFST, + BNX2X_CHIP_E3_OFST, + BNX2X_CHIP_E3B0_OFST, + BNX2X_CHIP_MAX_OFST +}; + +#define BNX2X_CHIP_MASK_E1 (1 << BNX2X_CHIP_E1_OFST) +#define BNX2X_CHIP_MASK_E1H (1 << BNX2X_CHIP_E1H_OFST) +#define BNX2X_CHIP_MASK_E2 (1 << BNX2X_CHIP_E2_OFST) +#define BNX2X_CHIP_MASK_E3 (1 << BNX2X_CHIP_E3_OFST) +#define BNX2X_CHIP_MASK_E3B0 (1 << BNX2X_CHIP_E3B0_OFST) + +#define BNX2X_CHIP_MASK_ALL ((1 << BNX2X_CHIP_MAX_OFST) - 1) +#define BNX2X_CHIP_MASK_E1X (BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H) + +static int bnx2x_test_registers(struct bnx2x *bp) +{ + int idx, i, rc = -ENODEV; + u32 wr_val = 0, hw; + int port = BP_PORT(bp); + static const struct { + u32 hw; + u32 offset0; + u32 offset1; + u32 mask; + } reg_tbl[] = { +/* 0 */ { BNX2X_CHIP_MASK_ALL, + BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff }, + { BNX2X_CHIP_MASK_ALL, + DORQ_REG_DB_ADDR0, 4, 0xffffffff }, + { BNX2X_CHIP_MASK_E1X, + HC_REG_AGG_INT_0, 4, 0x000003ff }, + { BNX2X_CHIP_MASK_ALL, + PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3, + PBF_REG_P0_INIT_CRD, 4, 0x000007ff }, + { BNX2X_CHIP_MASK_E3B0, + PBF_REG_INIT_CRD_Q0, 4, 0x000007ff }, + { BNX2X_CHIP_MASK_ALL, + PRS_REG_CID_PORT_0, 4, 0x00ffffff }, + { BNX2X_CHIP_MASK_ALL, + PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff }, + { BNX2X_CHIP_MASK_ALL, + PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, + { BNX2X_CHIP_MASK_ALL, + PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff }, +/* 10 */ { BNX2X_CHIP_MASK_ALL, + PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, + { BNX2X_CHIP_MASK_ALL, + PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff }, + { BNX2X_CHIP_MASK_ALL, + QM_REG_CONNNUM_0, 4, 0x000fffff }, + { BNX2X_CHIP_MASK_ALL, + TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff }, + { BNX2X_CHIP_MASK_ALL, + SRC_REG_KEYRSS0_0, 40, 0xffffffff }, + { BNX2X_CHIP_MASK_ALL, + SRC_REG_KEYRSS0_7, 40, 0xffffffff }, + { BNX2X_CHIP_MASK_ALL, + XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_ALL, + XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 }, + { BNX2X_CHIP_MASK_ALL, + XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_T_BIT, 4, 0x00000001 }, +/* 20 */ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, + NIG_REG_EMAC0_IN_EN, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, + NIG_REG_BMAC0_IN_EN, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_XCM0_OUT_EN, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_BRB0_OUT_EN, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 }, +/* 30 */ { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff }, + { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, + NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001}, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff }, + { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, + NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 }, + { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, + NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f }, + + { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 } + }; + + if (!bnx2x_is_nvm_accessible(bp)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "cannot access eeprom when the interface is down\n"); + return rc; + } + + if (CHIP_IS_E1(bp)) + hw = BNX2X_CHIP_MASK_E1; + else if (CHIP_IS_E1H(bp)) + hw = BNX2X_CHIP_MASK_E1H; + else if (CHIP_IS_E2(bp)) + hw = BNX2X_CHIP_MASK_E2; + else if (CHIP_IS_E3B0(bp)) + hw = BNX2X_CHIP_MASK_E3B0; + else /* e3 A0 */ + hw = BNX2X_CHIP_MASK_E3; + + /* Repeat the test twice: + * First by writing 0x00000000, second by writing 0xffffffff + */ + for (idx = 0; idx < 2; idx++) { + + switch (idx) { + case 0: + wr_val = 0; + break; + case 1: + wr_val = 0xffffffff; + break; + } + + for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { + u32 offset, mask, save_val, val; + if (!(hw & reg_tbl[i].hw)) + continue; + + offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; + mask = reg_tbl[i].mask; + + save_val = REG_RD(bp, offset); + + REG_WR(bp, offset, wr_val & mask); + + val = REG_RD(bp, offset); + + /* Restore the original register's value */ + REG_WR(bp, offset, save_val); + + /* verify value is as expected */ + if ((val & mask) != (wr_val & mask)) { + DP(BNX2X_MSG_ETHTOOL, + "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n", + offset, val, wr_val, mask); + goto test_reg_exit; + } + } + } + + rc = 0; + +test_reg_exit: + return rc; +} + +static int bnx2x_test_memory(struct bnx2x *bp) +{ + int i, j, rc = -ENODEV; + u32 val, index; + static const struct { + u32 offset; + int size; + } mem_tbl[] = { + { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE }, + { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE }, + { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE }, + { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE }, + { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE }, + { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE }, + { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE }, + + { 0xffffffff, 0 } + }; + + static const struct { + char *name; + u32 offset; + u32 hw_mask[BNX2X_CHIP_MAX_OFST]; + } prty_tbl[] = { + { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, + {0x3ffc0, 0, 0, 0} }, + { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, + {0x2, 0x2, 0, 0} }, + { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, + {0, 0, 0, 0} }, + { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, + {0x3ffc0, 0, 0, 0} }, + { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, + {0x3ffc0, 0, 0, 0} }, + { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, + {0x3ffc1, 0, 0, 0} }, + + { NULL, 0xffffffff, {0, 0, 0, 0} } + }; + + if (!bnx2x_is_nvm_accessible(bp)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "cannot access eeprom when the interface is down\n"); + return rc; + } + + if (CHIP_IS_E1(bp)) + index = BNX2X_CHIP_E1_OFST; + else if (CHIP_IS_E1H(bp)) + index = BNX2X_CHIP_E1H_OFST; + else if (CHIP_IS_E2(bp)) + index = BNX2X_CHIP_E2_OFST; + else /* e3 */ + index = BNX2X_CHIP_E3_OFST; + + /* pre-Check the parity status */ + for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { + val = REG_RD(bp, prty_tbl[i].offset); + if (val & ~(prty_tbl[i].hw_mask[index])) { + DP(BNX2X_MSG_ETHTOOL, + "%s is 0x%x\n", prty_tbl[i].name, val); + goto test_mem_exit; + } + } + + /* Go through all the memories */ + for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) + for (j = 0; j < mem_tbl[i].size; j++) + REG_RD(bp, mem_tbl[i].offset + j*4); + + /* Check the parity status */ + for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { + val = REG_RD(bp, prty_tbl[i].offset); + if (val & ~(prty_tbl[i].hw_mask[index])) { + DP(BNX2X_MSG_ETHTOOL, + "%s is 0x%x\n", prty_tbl[i].name, val); + goto test_mem_exit; + } + } + + rc = 0; + +test_mem_exit: + return rc; +} + +static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes) +{ + int cnt = 1400; + + if (link_up) { + while (bnx2x_link_test(bp, is_serdes) && cnt--) + msleep(20); + + if (cnt <= 0 && bnx2x_link_test(bp, is_serdes)) + DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n"); + + cnt = 1400; + while (!bp->link_vars.link_up && cnt--) + msleep(20); + + if (cnt <= 0 && !bp->link_vars.link_up) + DP(BNX2X_MSG_ETHTOOL, + "Timeout waiting for link init\n"); + } +} + +static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) +{ + unsigned int pkt_size, num_pkts, i; + struct sk_buff *skb; + unsigned char *packet; + struct bnx2x_fastpath *fp_rx = &bp->fp[0]; + struct bnx2x_fastpath *fp_tx = &bp->fp[0]; + struct bnx2x_fp_txdata *txdata = fp_tx->txdata_ptr[0]; + u16 tx_start_idx, tx_idx; + u16 rx_start_idx, rx_idx; + u16 pkt_prod, bd_prod; + struct sw_tx_bd *tx_buf; + struct eth_tx_start_bd *tx_start_bd; + dma_addr_t mapping; + union eth_rx_cqe *cqe; + u8 cqe_fp_flags, cqe_fp_type; + struct sw_rx_bd *rx_buf; + u16 len; + int rc = -ENODEV; + u8 *data; + struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, + txdata->txq_index); + + /* check the loopback mode */ + switch (loopback_mode) { + case BNX2X_PHY_LOOPBACK: + if (bp->link_params.loopback_mode != LOOPBACK_XGXS) { + DP(BNX2X_MSG_ETHTOOL, "PHY loopback not supported\n"); + return -EINVAL; + } + break; + case BNX2X_MAC_LOOPBACK: + if (CHIP_IS_E3(bp)) { + int cfg_idx = bnx2x_get_link_cfg_idx(bp); + if (bp->port.supported[cfg_idx] & + (SUPPORTED_10000baseT_Full | + SUPPORTED_20000baseMLD2_Full | + SUPPORTED_20000baseKR2_Full)) + bp->link_params.loopback_mode = LOOPBACK_XMAC; + else + bp->link_params.loopback_mode = LOOPBACK_UMAC; + } else + bp->link_params.loopback_mode = LOOPBACK_BMAC; + + bnx2x_phy_init(&bp->link_params, &bp->link_vars); + break; + case BNX2X_EXT_LOOPBACK: + if (bp->link_params.loopback_mode != LOOPBACK_EXT) { + DP(BNX2X_MSG_ETHTOOL, + "Can't configure external loopback\n"); + return -EINVAL; + } + break; + default: + DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); + return -EINVAL; + } + + /* prepare the loopback packet */ + pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ? + bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN); + skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size); + if (!skb) { + DP(BNX2X_MSG_ETHTOOL, "Can't allocate skb\n"); + rc = -ENOMEM; + goto test_loopback_exit; + } + packet = skb_put(skb, pkt_size); + memcpy(packet, bp->dev->dev_addr, ETH_ALEN); + eth_zero_addr(packet + ETH_ALEN); + memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN)); + for (i = ETH_HLEN; i < pkt_size; i++) + packet[i] = (unsigned char) (i & 0xff); + mapping = dma_map_single(&bp->pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + rc = -ENOMEM; + dev_kfree_skb(skb); + DP(BNX2X_MSG_ETHTOOL, "Unable to map SKB\n"); + goto test_loopback_exit; + } + + /* send the loopback packet */ + num_pkts = 0; + tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb); + rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb); + + netdev_tx_sent_queue(txq, skb->len); + + pkt_prod = txdata->tx_pkt_prod++; + tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)]; + tx_buf->first_bd = txdata->tx_bd_prod; + tx_buf->skb = skb; + tx_buf->flags = 0; + + bd_prod = TX_BD(txdata->tx_bd_prod); + tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd; + tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ + tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); + tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); + tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; + SET_FLAG(tx_start_bd->general_data, + ETH_TX_START_BD_HDR_NBDS, + 1); + SET_FLAG(tx_start_bd->general_data, + ETH_TX_START_BD_PARSE_NBDS, + 0); + + /* turn on parsing and get a BD */ + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); + + if (CHIP_IS_E1x(bp)) { + u16 global_data = 0; + struct eth_tx_parse_bd_e1x *pbd_e1x = + &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; + memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); + SET_FLAG(global_data, + ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, UNICAST_ADDRESS); + pbd_e1x->global_data = cpu_to_le16(global_data); + } else { + u32 parsing_data = 0; + struct eth_tx_parse_bd_e2 *pbd_e2 = + &txdata->tx_desc_ring[bd_prod].parse_bd_e2; + memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); + SET_FLAG(parsing_data, + ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, UNICAST_ADDRESS); + pbd_e2->parsing_data = cpu_to_le32(parsing_data); + } + wmb(); + + txdata->tx_db.data.prod += 2; + barrier(); + DOORBELL(bp, txdata->cid, txdata->tx_db.raw); + + mmiowb(); + barrier(); + + num_pkts++; + txdata->tx_bd_prod += 2; /* start + pbd */ + + udelay(100); + + tx_idx = le16_to_cpu(*txdata->tx_cons_sb); + if (tx_idx != tx_start_idx + num_pkts) + goto test_loopback_exit; + + /* Unlike HC IGU won't generate an interrupt for status block + * updates that have been performed while interrupts were + * disabled. + */ + if (bp->common.int_block == INT_BLOCK_IGU) { + /* Disable local BHes to prevent a dead-lock situation between + * sch_direct_xmit() and bnx2x_run_loopback() (calling + * bnx2x_tx_int()), as both are taking netif_tx_lock(). + */ + local_bh_disable(); + bnx2x_tx_int(bp, txdata); + local_bh_enable(); + } + + rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb); + if (rx_idx != rx_start_idx + num_pkts) + goto test_loopback_exit; + + cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)]; + cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; + cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; + if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) + goto test_loopback_rx_exit; + + len = le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len); + if (len != pkt_size) + goto test_loopback_rx_exit; + + rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)]; + dma_sync_single_for_cpu(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + fp_rx->rx_buf_size, DMA_FROM_DEVICE); + data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset; + for (i = ETH_HLEN; i < pkt_size; i++) + if (*(data + i) != (unsigned char) (i & 0xff)) + goto test_loopback_rx_exit; + + rc = 0; + +test_loopback_rx_exit: + + fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons); + fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod); + fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons); + fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod); + + /* Update producers */ + bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod, + fp_rx->rx_sge_prod); + +test_loopback_exit: + bp->link_params.loopback_mode = LOOPBACK_NONE; + + return rc; +} + +static int bnx2x_test_loopback(struct bnx2x *bp) +{ + int rc = 0, res; + + if (BP_NOMCP(bp)) + return rc; + + if (!netif_running(bp->dev)) + return BNX2X_LOOPBACK_FAILED; + + bnx2x_netif_stop(bp, 1); + bnx2x_acquire_phy_lock(bp); + + res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK); + if (res) { + DP(BNX2X_MSG_ETHTOOL, " PHY loopback failed (res %d)\n", res); + rc |= BNX2X_PHY_LOOPBACK_FAILED; + } + + res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK); + if (res) { + DP(BNX2X_MSG_ETHTOOL, " MAC loopback failed (res %d)\n", res); + rc |= BNX2X_MAC_LOOPBACK_FAILED; + } + + bnx2x_release_phy_lock(bp); + bnx2x_netif_start(bp); + + return rc; +} + +static int bnx2x_test_ext_loopback(struct bnx2x *bp) +{ + int rc; + u8 is_serdes = + (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; + + if (BP_NOMCP(bp)) + return -ENODEV; + + if (!netif_running(bp->dev)) + return BNX2X_EXT_LOOPBACK_FAILED; + + bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); + rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT); + if (rc) { + DP(BNX2X_MSG_ETHTOOL, + "Can't perform self-test, nic_load (for external lb) failed\n"); + return -ENODEV; + } + bnx2x_wait_for_link(bp, 1, is_serdes); + + bnx2x_netif_stop(bp, 1); + + rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK); + if (rc) + DP(BNX2X_MSG_ETHTOOL, "EXT loopback failed (res %d)\n", rc); + + bnx2x_netif_start(bp); + + return rc; +} + +struct code_entry { + u32 sram_start_addr; + u32 code_attribute; +#define CODE_IMAGE_TYPE_MASK 0xf0800003 +#define CODE_IMAGE_VNTAG_PROFILES_DATA 0xd0000003 +#define CODE_IMAGE_LENGTH_MASK 0x007ffffc +#define CODE_IMAGE_TYPE_EXTENDED_DIR 0xe0000000 + u32 nvm_start_addr; +}; + +#define CODE_ENTRY_MAX 16 +#define CODE_ENTRY_EXTENDED_DIR_IDX 15 +#define MAX_IMAGES_IN_EXTENDED_DIR 64 +#define NVRAM_DIR_OFFSET 0x14 + +#define EXTENDED_DIR_EXISTS(code) \ + ((code & CODE_IMAGE_TYPE_MASK) == CODE_IMAGE_TYPE_EXTENDED_DIR && \ + (code & CODE_IMAGE_LENGTH_MASK) != 0) + +#define CRC32_RESIDUAL 0xdebb20e3 +#define CRC_BUFF_SIZE 256 + +static int bnx2x_nvram_crc(struct bnx2x *bp, + int offset, + int size, + u8 *buff) +{ + u32 crc = ~0; + int rc = 0, done = 0; + + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "NVRAM CRC from 0x%08x to 0x%08x\n", offset, offset + size); + + while (done < size) { + int count = min_t(int, size - done, CRC_BUFF_SIZE); + + rc = bnx2x_nvram_read(bp, offset + done, buff, count); + + if (rc) + return rc; + + crc = crc32_le(crc, buff, count); + done += count; + } + + if (crc != CRC32_RESIDUAL) + rc = -EINVAL; + + return rc; +} + +static int bnx2x_test_nvram_dir(struct bnx2x *bp, + struct code_entry *entry, + u8 *buff) +{ + size_t size = entry->code_attribute & CODE_IMAGE_LENGTH_MASK; + u32 type = entry->code_attribute & CODE_IMAGE_TYPE_MASK; + int rc; + + /* Zero-length images and AFEX profiles do not have CRC */ + if (size == 0 || type == CODE_IMAGE_VNTAG_PROFILES_DATA) + return 0; + + rc = bnx2x_nvram_crc(bp, entry->nvm_start_addr, size, buff); + if (rc) + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "image %x has failed crc test (rc %d)\n", type, rc); + + return rc; +} + +static int bnx2x_test_dir_entry(struct bnx2x *bp, u32 addr, u8 *buff) +{ + int rc; + struct code_entry entry; + + rc = bnx2x_nvram_read32(bp, addr, (u32 *)&entry, sizeof(entry)); + if (rc) + return rc; + + return bnx2x_test_nvram_dir(bp, &entry, buff); +} + +static int bnx2x_test_nvram_ext_dirs(struct bnx2x *bp, u8 *buff) +{ + u32 rc, cnt, dir_offset = NVRAM_DIR_OFFSET; + struct code_entry entry; + int i; + + rc = bnx2x_nvram_read32(bp, + dir_offset + + sizeof(entry) * CODE_ENTRY_EXTENDED_DIR_IDX, + (u32 *)&entry, sizeof(entry)); + if (rc) + return rc; + + if (!EXTENDED_DIR_EXISTS(entry.code_attribute)) + return 0; + + rc = bnx2x_nvram_read32(bp, entry.nvm_start_addr, + &cnt, sizeof(u32)); + if (rc) + return rc; + + dir_offset = entry.nvm_start_addr + 8; + + for (i = 0; i < cnt && i < MAX_IMAGES_IN_EXTENDED_DIR; i++) { + rc = bnx2x_test_dir_entry(bp, dir_offset + + sizeof(struct code_entry) * i, + buff); + if (rc) + return rc; + } + + return 0; +} + +static int bnx2x_test_nvram_dirs(struct bnx2x *bp, u8 *buff) +{ + u32 rc, dir_offset = NVRAM_DIR_OFFSET; + int i; + + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "NVRAM DIRS CRC test-set\n"); + + for (i = 0; i < CODE_ENTRY_EXTENDED_DIR_IDX; i++) { + rc = bnx2x_test_dir_entry(bp, dir_offset + + sizeof(struct code_entry) * i, + buff); + if (rc) + return rc; + } + + return bnx2x_test_nvram_ext_dirs(bp, buff); +} + +struct crc_pair { + int offset; + int size; +}; + +static int bnx2x_test_nvram_tbl(struct bnx2x *bp, + const struct crc_pair *nvram_tbl, u8 *buf) +{ + int i; + + for (i = 0; nvram_tbl[i].size; i++) { + int rc = bnx2x_nvram_crc(bp, nvram_tbl[i].offset, + nvram_tbl[i].size, buf); + if (rc) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "nvram_tbl[%d] has failed crc test (rc %d)\n", + i, rc); + return rc; + } + } + + return 0; +} + +static int bnx2x_test_nvram(struct bnx2x *bp) +{ + const struct crc_pair nvram_tbl[] = { + { 0, 0x14 }, /* bootstrap */ + { 0x14, 0xec }, /* dir */ + { 0x100, 0x350 }, /* manuf_info */ + { 0x450, 0xf0 }, /* feature_info */ + { 0x640, 0x64 }, /* upgrade_key_info */ + { 0x708, 0x70 }, /* manuf_key_info */ + { 0, 0 } + }; + const struct crc_pair nvram_tbl2[] = { + { 0x7e8, 0x350 }, /* manuf_info2 */ + { 0xb38, 0xf0 }, /* feature_info */ + { 0, 0 } + }; + + u8 *buf; + int rc; + u32 magic; + + if (BP_NOMCP(bp)) + return 0; + + buf = kmalloc(CRC_BUFF_SIZE, GFP_KERNEL); + if (!buf) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "kmalloc failed\n"); + rc = -ENOMEM; + goto test_nvram_exit; + } + + rc = bnx2x_nvram_read32(bp, 0, &magic, sizeof(magic)); + if (rc) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "magic value read (rc %d)\n", rc); + goto test_nvram_exit; + } + + if (magic != 0x669955aa) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "wrong magic value (0x%08x)\n", magic); + rc = -ENODEV; + goto test_nvram_exit; + } + + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "Port 0 CRC test-set\n"); + rc = bnx2x_test_nvram_tbl(bp, nvram_tbl, buf); + if (rc) + goto test_nvram_exit; + + if (!CHIP_IS_E1x(bp) && !CHIP_IS_57811xx(bp)) { + u32 hide = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & + SHARED_HW_CFG_HIDE_PORT1; + + if (!hide) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "Port 1 CRC test-set\n"); + rc = bnx2x_test_nvram_tbl(bp, nvram_tbl2, buf); + if (rc) + goto test_nvram_exit; + } + } + + rc = bnx2x_test_nvram_dirs(bp, buf); + +test_nvram_exit: + kfree(buf); + return rc; +} + +/* Send an EMPTY ramrod on the first queue */ +static int bnx2x_test_intr(struct bnx2x *bp) +{ + struct bnx2x_queue_state_params params = {NULL}; + + if (!netif_running(bp->dev)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "cannot access eeprom when the interface is down\n"); + return -ENODEV; + } + + params.q_obj = &bp->sp_objs->q_obj; + params.cmd = BNX2X_Q_CMD_EMPTY; + + __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); + + return bnx2x_queue_state_change(bp, ¶ms); +} + +static void bnx2x_self_test(struct net_device *dev, + struct ethtool_test *etest, u64 *buf) +{ + struct bnx2x *bp = netdev_priv(dev); + u8 is_serdes, link_up; + int rc, cnt = 0; + + if (pci_num_vf(bp->pdev)) { + DP(BNX2X_MSG_IOV, + "VFs are enabled, can not perform self test\n"); + return; + } + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + netdev_err(bp->dev, + "Handling parity error recovery. Try again later\n"); + etest->flags |= ETH_TEST_FL_FAILED; + return; + } + + DP(BNX2X_MSG_ETHTOOL, + "Self-test command parameters: offline = %d, external_lb = %d\n", + (etest->flags & ETH_TEST_FL_OFFLINE), + (etest->flags & ETH_TEST_FL_EXTERNAL_LB)>>2); + + memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp)); + + if (bnx2x_test_nvram(bp) != 0) { + if (!IS_MF(bp)) + buf[4] = 1; + else + buf[0] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + + if (!netif_running(dev)) { + DP(BNX2X_MSG_ETHTOOL, "Interface is down\n"); + return; + } + + is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; + link_up = bp->link_vars.link_up; + /* offline tests are not supported in MF mode */ + if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) { + int port = BP_PORT(bp); + u32 val; + + /* save current value of input enable for TX port IF */ + val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4); + /* disable input for TX port IF */ + REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0); + + bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); + rc = bnx2x_nic_load(bp, LOAD_DIAG); + if (rc) { + etest->flags |= ETH_TEST_FL_FAILED; + DP(BNX2X_MSG_ETHTOOL, + "Can't perform self-test, nic_load (for offline) failed\n"); + return; + } + + /* wait until link state is restored */ + bnx2x_wait_for_link(bp, 1, is_serdes); + + if (bnx2x_test_registers(bp) != 0) { + buf[0] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + if (bnx2x_test_memory(bp) != 0) { + buf[1] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + + buf[2] = bnx2x_test_loopback(bp); /* internal LB */ + if (buf[2] != 0) + etest->flags |= ETH_TEST_FL_FAILED; + + if (etest->flags & ETH_TEST_FL_EXTERNAL_LB) { + buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */ + if (buf[3] != 0) + etest->flags |= ETH_TEST_FL_FAILED; + etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; + } + + bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); + + /* restore input for TX port IF */ + REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val); + rc = bnx2x_nic_load(bp, LOAD_NORMAL); + if (rc) { + etest->flags |= ETH_TEST_FL_FAILED; + DP(BNX2X_MSG_ETHTOOL, + "Can't perform self-test, nic_load (for online) failed\n"); + return; + } + /* wait until link state is restored */ + bnx2x_wait_for_link(bp, link_up, is_serdes); + } + + if (bnx2x_test_intr(bp) != 0) { + if (!IS_MF(bp)) + buf[5] = 1; + else + buf[1] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + + if (link_up) { + cnt = 100; + while (bnx2x_link_test(bp, is_serdes) && --cnt) + msleep(20); + } + + if (!cnt) { + if (!IS_MF(bp)) + buf[6] = 1; + else + buf[2] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } +} + +#define IS_PORT_STAT(i) \ + ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) +#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) +#define HIDE_PORT_STAT(bp) \ + ((IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) || \ + IS_VF(bp)) + +/* ethtool statistics are displayed for all regular ethernet queues and the + * fcoe L2 queue if not disabled + */ +static int bnx2x_num_stat_queues(struct bnx2x *bp) +{ + return BNX2X_NUM_ETH_QUEUES(bp); +} + +static int bnx2x_get_sset_count(struct net_device *dev, int stringset) +{ + struct bnx2x *bp = netdev_priv(dev); + int i, num_strings = 0; + + switch (stringset) { + case ETH_SS_STATS: + if (is_multi(bp)) { + num_strings = bnx2x_num_stat_queues(bp) * + BNX2X_NUM_Q_STATS; + } else + num_strings = 0; + if (HIDE_PORT_STAT(bp)) { + for (i = 0; i < BNX2X_NUM_STATS; i++) + if (IS_FUNC_STAT(i)) + num_strings++; + } else + num_strings += BNX2X_NUM_STATS; + + return num_strings; + + case ETH_SS_TEST: + return BNX2X_NUM_TESTS(bp); + + case ETH_SS_PRIV_FLAGS: + return BNX2X_PRI_FLAG_LEN; + + default: + return -EINVAL; + } +} + +static u32 bnx2x_get_private_flags(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 flags = 0; + + flags |= (!(bp->flags & NO_ISCSI_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_ISCSI; + flags |= (!(bp->flags & NO_FCOE_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_FCOE; + flags |= (!!IS_MF_STORAGE_ONLY(bp)) << BNX2X_PRI_FLAG_STORAGE; + + return flags; +} + +static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + struct bnx2x *bp = netdev_priv(dev); + int i, j, k, start; + char queue_name[MAX_QUEUE_NAME_LEN+1]; + + switch (stringset) { + case ETH_SS_STATS: + k = 0; + if (is_multi(bp)) { + for_each_eth_queue(bp, i) { + memset(queue_name, 0, sizeof(queue_name)); + sprintf(queue_name, "%d", i); + for (j = 0; j < BNX2X_NUM_Q_STATS; j++) + snprintf(buf + (k + j)*ETH_GSTRING_LEN, + ETH_GSTRING_LEN, + bnx2x_q_stats_arr[j].string, + queue_name); + k += BNX2X_NUM_Q_STATS; + } + } + + for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { + if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i)) + continue; + strcpy(buf + (k + j)*ETH_GSTRING_LEN, + bnx2x_stats_arr[i].string); + j++; + } + + break; + + case ETH_SS_TEST: + /* First 4 tests cannot be done in MF mode */ + if (!IS_MF(bp)) + start = 0; + else + start = 4; + memcpy(buf, bnx2x_tests_str_arr + start, + ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp)); + break; + + case ETH_SS_PRIV_FLAGS: + memcpy(buf, bnx2x_private_arr, + ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN); + break; + } +} + +static void bnx2x_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *buf) +{ + struct bnx2x *bp = netdev_priv(dev); + u32 *hw_stats, *offset; + int i, j, k = 0; + + if (is_multi(bp)) { + for_each_eth_queue(bp, i) { + hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats; + for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { + if (bnx2x_q_stats_arr[j].size == 0) { + /* skip this counter */ + buf[k + j] = 0; + continue; + } + offset = (hw_stats + + bnx2x_q_stats_arr[j].offset); + if (bnx2x_q_stats_arr[j].size == 4) { + /* 4-byte counter */ + buf[k + j] = (u64) *offset; + continue; + } + /* 8-byte counter */ + buf[k + j] = HILO_U64(*offset, *(offset + 1)); + } + k += BNX2X_NUM_Q_STATS; + } + } + + hw_stats = (u32 *)&bp->eth_stats; + for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { + if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i)) + continue; + if (bnx2x_stats_arr[i].size == 0) { + /* skip this counter */ + buf[k + j] = 0; + j++; + continue; + } + offset = (hw_stats + bnx2x_stats_arr[i].offset); + if (bnx2x_stats_arr[i].size == 4) { + /* 4-byte counter */ + buf[k + j] = (u64) *offset; + j++; + continue; + } + /* 8-byte counter */ + buf[k + j] = HILO_U64(*offset, *(offset + 1)); + j++; + } +} + +static int bnx2x_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (!bnx2x_is_nvm_accessible(bp)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "cannot access eeprom when the interface is down\n"); + return -EAGAIN; + } + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 1; /* cycle on/off once per second */ + + case ETHTOOL_ID_ON: + bnx2x_acquire_phy_lock(bp); + bnx2x_set_led(&bp->link_params, &bp->link_vars, + LED_MODE_ON, SPEED_1000); + bnx2x_release_phy_lock(bp); + break; + + case ETHTOOL_ID_OFF: + bnx2x_acquire_phy_lock(bp); + bnx2x_set_led(&bp->link_params, &bp->link_vars, + LED_MODE_FRONT_PANEL_OFF, 0); + bnx2x_release_phy_lock(bp); + break; + + case ETHTOOL_ID_INACTIVE: + bnx2x_acquire_phy_lock(bp); + bnx2x_set_led(&bp->link_params, &bp->link_vars, + LED_MODE_OPER, + bp->link_vars.line_speed); + bnx2x_release_phy_lock(bp); + } + + return 0; +} + +static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) +{ + switch (info->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V4_FLOW: + if (bp->rss_conf_obj.udp_rss_v4) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case UDP_V6_FLOW: + if (bp->rss_conf_obj.udp_rss_v6) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case IPV4_FLOW: + case IPV6_FLOW: + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + default: + info->data = 0; + break; + } + + return 0; +} + +static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + u32 *rules __always_unused) +{ + struct bnx2x *bp = netdev_priv(dev); + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = BNX2X_NUM_ETH_QUEUES(bp); + return 0; + case ETHTOOL_GRXFH: + return bnx2x_get_rss_flags(bp, info); + default: + DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); + return -EOPNOTSUPP; + } +} + +static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) +{ + int udp_rss_requested; + + DP(BNX2X_MSG_ETHTOOL, + "Set rss flags command parameters: flow type = %d, data = %llu\n", + info->flow_type, info->data); + + switch (info->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + /* For TCP only 4-tupple hash is supported */ + if (info->data ^ (RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + DP(BNX2X_MSG_ETHTOOL, + "Command parameters not supported\n"); + return -EINVAL; + } + return 0; + + case UDP_V4_FLOW: + case UDP_V6_FLOW: + /* For UDP either 2-tupple hash or 4-tupple hash is supported */ + if (info->data == (RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + udp_rss_requested = 1; + else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) + udp_rss_requested = 0; + else + return -EINVAL; + if ((info->flow_type == UDP_V4_FLOW) && + (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) { + bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested; + DP(BNX2X_MSG_ETHTOOL, + "rss re-configured, UDP 4-tupple %s\n", + udp_rss_requested ? "enabled" : "disabled"); + return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); + } else if ((info->flow_type == UDP_V6_FLOW) && + (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { + bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; + DP(BNX2X_MSG_ETHTOOL, + "rss re-configured, UDP 4-tupple %s\n", + udp_rss_requested ? "enabled" : "disabled"); + return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); + } + return 0; + + case IPV4_FLOW: + case IPV6_FLOW: + /* For IP only 2-tupple hash is supported */ + if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) { + DP(BNX2X_MSG_ETHTOOL, + "Command parameters not supported\n"); + return -EINVAL; + } + return 0; + + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IP_USER_FLOW: + case ETHER_FLOW: + /* RSS is not supported for these protocols */ + if (info->data) { + DP(BNX2X_MSG_ETHTOOL, + "Command parameters not supported\n"); + return -EINVAL; + } + return 0; + + default: + return -EINVAL; + } +} + +static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) +{ + struct bnx2x *bp = netdev_priv(dev); + + switch (info->cmd) { + case ETHTOOL_SRXFH: + return bnx2x_set_rss_flags(bp, info); + default: + DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); + return -EOPNOTSUPP; + } +} + +static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev) +{ + return T_ETH_INDIRECTION_TABLE_SIZE; +} + +static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct bnx2x *bp = netdev_priv(dev); + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; + size_t i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + + /* Get the current configuration of the RSS indirection table */ + bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table); + + /* + * We can't use a memcpy() as an internal storage of an + * indirection table is a u8 array while indir->ring_index + * points to an array of u32. + * + * Indirection table contains the FW Client IDs, so we need to + * align the returned table to the Client ID of the leading RSS + * queue. + */ + for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) + indir[i] = ind_table[i] - bp->fp->cl_id; + + return 0; +} + +static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct bnx2x *bp = netdev_priv(dev); + size_t i; + + /* We require at least one supported parameter to be changed and no + * change in any of the unsupported parameters + */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + + if (!indir) + return 0; + + for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { + /* + * The same as in bnx2x_get_rxfh: we can't use a memcpy() + * as an internal storage of an indirection table is a u8 array + * while indir->ring_index points to an array of u32. + * + * Indirection table contains the FW Client IDs, so we need to + * align the received table to the Client ID of the leading RSS + * queue + */ + bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id; + } + + return bnx2x_config_rss_eth(bp, false); +} + +/** + * bnx2x_get_channels - gets the number of RSS queues. + * + * @dev: net device + * @channels: returns the number of max / current queues + */ +static void bnx2x_get_channels(struct net_device *dev, + struct ethtool_channels *channels) +{ + struct bnx2x *bp = netdev_priv(dev); + + channels->max_combined = BNX2X_MAX_RSS_COUNT(bp); + channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp); +} + +/** + * bnx2x_change_num_queues - change the number of RSS queues. + * + * @bp: bnx2x private structure + * + * Re-configure interrupt mode to get the new number of MSI-X + * vectors and re-add NAPI objects. + */ +static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss) +{ + bnx2x_disable_msi(bp); + bp->num_ethernet_queues = num_rss; + bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; + BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); + bnx2x_set_int_mode(bp); +} + +/** + * bnx2x_set_channels - sets the number of RSS queues. + * + * @dev: net device + * @channels: includes the number of queues requested + */ +static int bnx2x_set_channels(struct net_device *dev, + struct ethtool_channels *channels) +{ + struct bnx2x *bp = netdev_priv(dev); + + DP(BNX2X_MSG_ETHTOOL, + "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", + channels->rx_count, channels->tx_count, channels->other_count, + channels->combined_count); + + if (pci_num_vf(bp->pdev)) { + DP(BNX2X_MSG_IOV, "VFs are enabled, can not set channels\n"); + return -EPERM; + } + + /* We don't support separate rx / tx channels. + * We don't allow setting 'other' channels. + */ + if (channels->rx_count || channels->tx_count || channels->other_count + || (channels->combined_count == 0) || + (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) { + DP(BNX2X_MSG_ETHTOOL, "command parameters not supported\n"); + return -EINVAL; + } + + /* Check if there was a change in the active parameters */ + if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) { + DP(BNX2X_MSG_ETHTOOL, "No change in active parameters\n"); + return 0; + } + + /* Set the requested number of queues in bp context. + * Note that the actual number of queues created during load may be + * less than requested if memory is low. + */ + if (unlikely(!netif_running(dev))) { + bnx2x_change_num_queues(bp, channels->combined_count); + return 0; + } + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); + bnx2x_change_num_queues(bp, channels->combined_count); + return bnx2x_nic_load(bp, LOAD_NORMAL); +} + +static int bnx2x_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (bp->flags & PTP_SUPPORTED) { + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (bp->ptp_clock) + info->phc_index = ptp_clock_index(bp->ptp_clock); + else + info->phc_index = -1; + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); + + info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON); + + return 0; + } + + return ethtool_op_get_ts_info(dev, info); +} + +static const struct ethtool_ops bnx2x_ethtool_ops = { + .get_settings = bnx2x_get_settings, + .set_settings = bnx2x_set_settings, + .get_drvinfo = bnx2x_get_drvinfo, + .get_regs_len = bnx2x_get_regs_len, + .get_regs = bnx2x_get_regs, + .get_dump_flag = bnx2x_get_dump_flag, + .get_dump_data = bnx2x_get_dump_data, + .set_dump = bnx2x_set_dump, + .get_wol = bnx2x_get_wol, + .set_wol = bnx2x_set_wol, + .get_msglevel = bnx2x_get_msglevel, + .set_msglevel = bnx2x_set_msglevel, + .nway_reset = bnx2x_nway_reset, + .get_link = bnx2x_get_link, + .get_eeprom_len = bnx2x_get_eeprom_len, + .get_eeprom = bnx2x_get_eeprom, + .set_eeprom = bnx2x_set_eeprom, + .get_coalesce = bnx2x_get_coalesce, + .set_coalesce = bnx2x_set_coalesce, + .get_ringparam = bnx2x_get_ringparam, + .set_ringparam = bnx2x_set_ringparam, + .get_pauseparam = bnx2x_get_pauseparam, + .set_pauseparam = bnx2x_set_pauseparam, + .self_test = bnx2x_self_test, + .get_sset_count = bnx2x_get_sset_count, + .get_priv_flags = bnx2x_get_private_flags, + .get_strings = bnx2x_get_strings, + .set_phys_id = bnx2x_set_phys_id, + .get_ethtool_stats = bnx2x_get_ethtool_stats, + .get_rxnfc = bnx2x_get_rxnfc, + .set_rxnfc = bnx2x_set_rxnfc, + .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, + .get_rxfh = bnx2x_get_rxfh, + .set_rxfh = bnx2x_set_rxfh, + .get_channels = bnx2x_get_channels, + .set_channels = bnx2x_set_channels, + .get_module_info = bnx2x_get_module_info, + .get_module_eeprom = bnx2x_get_module_eeprom, + .get_eee = bnx2x_get_eee, + .set_eee = bnx2x_set_eee, + .get_ts_info = bnx2x_get_ts_info, +}; + +static const struct ethtool_ops bnx2x_vf_ethtool_ops = { + .get_settings = bnx2x_get_vf_settings, + .get_drvinfo = bnx2x_get_drvinfo, + .get_msglevel = bnx2x_get_msglevel, + .set_msglevel = bnx2x_set_msglevel, + .get_link = bnx2x_get_link, + .get_coalesce = bnx2x_get_coalesce, + .get_ringparam = bnx2x_get_ringparam, + .set_ringparam = bnx2x_set_ringparam, + .get_sset_count = bnx2x_get_sset_count, + .get_strings = bnx2x_get_strings, + .get_ethtool_stats = bnx2x_get_ethtool_stats, + .get_rxnfc = bnx2x_get_rxnfc, + .set_rxnfc = bnx2x_set_rxnfc, + .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, + .get_rxfh = bnx2x_get_rxfh, + .set_rxfh = bnx2x_set_rxfh, + .get_channels = bnx2x_get_channels, + .set_channels = bnx2x_set_channels, +}; + +void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev) +{ + netdev->ethtool_ops = (IS_PF(bp)) ? + &bnx2x_ethtool_ops : &bnx2x_vf_ethtool_ops; +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h new file mode 100644 index 000000000..7636e3c18 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -0,0 +1,396 @@ +/* bnx2x_fw_defs.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2013 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNX2X_FW_DEFS_H +#define BNX2X_FW_DEFS_H + +#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[152].base) +#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ + (IRO[151].base + ((assertListEntry) * IRO[151].m1)) +#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \ + (IRO[157].base + (((pfId)>>1) * IRO[157].m1) + (((pfId)&1) * \ + IRO[157].m2)) +#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \ + (IRO[158].base + (((pfId)>>1) * IRO[158].m1) + (((pfId)&1) * \ + IRO[158].m2)) +#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \ + (IRO[163].base + ((funcId) * IRO[163].m1)) +#define CSTORM_FUNC_EN_OFFSET(funcId) \ + (IRO[153].base + ((funcId) * IRO[153].m1)) +#define CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hcIndex, sbId) \ + (IRO[143].base + ((hcIndex) * IRO[143].m1) + ((sbId) * IRO[143].m2)) +#define CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hcIndex, sbId) \ + (IRO[142].base + (((hcIndex)>>2) * IRO[142].m1) + (((hcIndex)&3) \ + * IRO[142].m2) + ((sbId) * IRO[142].m3)) +#define CSTORM_IGU_MODE_OFFSET (IRO[161].base) +#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ + (IRO[323].base + ((pfId) * IRO[323].m1)) +#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ + (IRO[324].base + ((pfId) * IRO[324].m1)) +#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ + (IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2)) +#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ + (IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2)) +#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ + (IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2)) +#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ + (IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2)) +#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ + (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2)) +#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ + (IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2)) +#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ + (IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2)) +#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ + (IRO[322].base + ((pfId) * IRO[322].m1)) +#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ + (IRO[314].base + ((pfId) * IRO[314].m1)) +#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ + (IRO[313].base + ((pfId) * IRO[313].m1)) +#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ + (IRO[312].base + ((pfId) * IRO[312].m1)) +#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ + (IRO[155].base + ((funcId) * IRO[155].m1)) +#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ + (IRO[146].base + ((pfId) * IRO[146].m1)) +#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \ + (IRO[147].base + ((pfId) * IRO[147].m1)) +#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \ + (IRO[145].base + ((pfId) * IRO[145].m1)) +#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[145].size) +#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \ + (IRO[148].base + ((pfId) * IRO[148].m1)) +#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[148].size) +#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \ + (IRO[140].base + ((sbId) * IRO[140].m1) + ((hcIndex) * IRO[140].m2)) +#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \ + (IRO[137].base + ((sbId) * IRO[137].m1)) +#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \ + (IRO[138].base + ((sbId) * IRO[138].m1)) +#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \ + (IRO[139].base + ((sbId) * IRO[139].m1) + ((hcIndex) * IRO[139].m2)) +#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \ + (IRO[136].base + ((sbId) * IRO[136].m1)) +#define CSTORM_STATUS_BLOCK_SIZE (IRO[136].size) +#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \ + (IRO[141].base + ((sbId) * IRO[141].m1)) +#define CSTORM_SYNC_BLOCK_SIZE (IRO[141].size) +#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \ + (IRO[159].base + ((vfId) * IRO[159].m1)) +#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \ + (IRO[160].base + ((vfId) * IRO[160].m1)) +#define CSTORM_VF_TO_PF_OFFSET(funcId) \ + (IRO[154].base + ((funcId) * IRO[154].m1)) +#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ + (IRO[207].base + ((pfId) * IRO[207].m1)) +#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) +#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ + (IRO[101].base + ((assertListEntry) * IRO[101].m1)) +#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \ + (IRO[205].base + ((pfId) * IRO[205].m1)) +#define TSTORM_FUNC_EN_OFFSET(funcId) \ + (IRO[107].base + ((funcId) * IRO[107].m1)) +#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ + (IRO[278].base + ((pfId) * IRO[278].m1)) +#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \ + (IRO[279].base + ((pfId) * IRO[279].m1)) +#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \ + (IRO[280].base + ((pfId) * IRO[280].m1)) +#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \ + (IRO[281].base + ((pfId) * IRO[281].m1)) +#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ + (IRO[277].base + ((pfId) * IRO[277].m1)) +#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ + (IRO[276].base + ((pfId) * IRO[276].m1)) +#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ + (IRO[275].base + ((pfId) * IRO[275].m1)) +#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ + (IRO[274].base + ((pfId) * IRO[274].m1)) +#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ + (IRO[284].base + ((pfId) * IRO[284].m1)) +#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ + (IRO[270].base + ((pfId) * IRO[270].m1)) +#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ + (IRO[271].base + ((pfId) * IRO[271].m1)) +#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \ + (IRO[272].base + ((pfId) * IRO[272].m1)) +#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ + (IRO[273].base + ((pfId) * IRO[273].m1)) +#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \ + (IRO[206].base + ((pfId) * IRO[206].m1)) +#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ + (IRO[109].base + ((funcId) * IRO[109].m1)) +#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \ + (IRO[223].base + ((pfId) * IRO[223].m1)) +#define TSTORM_VF_TO_PF_OFFSET(funcId) \ + (IRO[108].base + ((funcId) * IRO[108].m1)) +#define USTORM_AGG_DATA_OFFSET (IRO[212].base) +#define USTORM_AGG_DATA_SIZE (IRO[212].size) +#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[181].base) +#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \ + (IRO[180].base + ((assertListEntry) * IRO[180].m1)) +#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ + (IRO[187].base + ((portId) * IRO[187].m1)) +#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ + (IRO[325].base + ((pfId) * IRO[325].m1)) +#define USTORM_FUNC_EN_OFFSET(funcId) \ + (IRO[182].base + ((funcId) * IRO[182].m1)) +#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ + (IRO[289].base + ((pfId) * IRO[289].m1)) +#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ + (IRO[290].base + ((pfId) * IRO[290].m1)) +#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ + (IRO[294].base + ((pfId) * IRO[294].m1)) +#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ + (IRO[291].base + ((pfId) * IRO[291].m1)) +#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ + (IRO[287].base + ((pfId) * IRO[287].m1)) +#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ + (IRO[286].base + ((pfId) * IRO[286].m1)) +#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ + (IRO[285].base + ((pfId) * IRO[285].m1)) +#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ + (IRO[288].base + ((pfId) * IRO[288].m1)) +#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ + (IRO[292].base + ((pfId) * IRO[292].m1)) +#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ + (IRO[293].base + ((pfId) * IRO[293].m1)) +#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ + (IRO[186].base + ((pfId) * IRO[186].m1)) +#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ + (IRO[184].base + ((funcId) * IRO[184].m1)) +#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \ + (IRO[215].base + ((portId) * IRO[215].m1) + ((clientId) * \ + IRO[215].m2)) +#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \ + (IRO[216].base + ((qzoneId) * IRO[216].m1)) +#define USTORM_TPA_BTR_OFFSET (IRO[213].base) +#define USTORM_TPA_BTR_SIZE (IRO[213].size) +#define USTORM_VF_TO_PF_OFFSET(funcId) \ + (IRO[183].base + ((funcId) * IRO[183].m1)) +#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base) +#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base) +#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base) +#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ + (IRO[50].base + ((assertListEntry) * IRO[50].m1)) +#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \ + (IRO[43].base + ((portId) * IRO[43].m1)) +#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \ + (IRO[45].base + ((pfId) * IRO[45].m1)) +#define XSTORM_FUNC_EN_OFFSET(funcId) \ + (IRO[47].base + ((funcId) * IRO[47].m1)) +#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ + (IRO[302].base + ((pfId) * IRO[302].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ + (IRO[305].base + ((pfId) * IRO[305].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \ + (IRO[306].base + ((pfId) * IRO[306].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ + (IRO[307].base + ((pfId) * IRO[307].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ + (IRO[308].base + ((pfId) * IRO[308].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ + (IRO[309].base + ((pfId) * IRO[309].m1)) +#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ + (IRO[310].base + ((pfId) * IRO[310].m1)) +#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ + (IRO[311].base + ((pfId) * IRO[311].m1)) +#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ + (IRO[301].base + ((pfId) * IRO[301].m1)) +#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ + (IRO[300].base + ((pfId) * IRO[300].m1)) +#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ + (IRO[299].base + ((pfId) * IRO[299].m1)) +#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ + (IRO[304].base + ((pfId) * IRO[304].m1)) +#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ + (IRO[303].base + ((pfId) * IRO[303].m1)) +#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ + (IRO[298].base + ((pfId) * IRO[298].m1)) +#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ + (IRO[297].base + ((pfId) * IRO[297].m1)) +#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ + (IRO[296].base + ((pfId) * IRO[296].m1)) +#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ + (IRO[295].base + ((pfId) * IRO[295].m1)) +#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ + (IRO[44].base + ((pfId) * IRO[44].m1)) +#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ + (IRO[49].base + ((funcId) * IRO[49].m1)) +#define XSTORM_SPQ_DATA_OFFSET(funcId) \ + (IRO[32].base + ((funcId) * IRO[32].m1)) +#define XSTORM_SPQ_DATA_SIZE (IRO[32].size) +#define XSTORM_SPQ_PAGE_BASE_OFFSET(funcId) \ + (IRO[30].base + ((funcId) * IRO[30].m1)) +#define XSTORM_SPQ_PROD_OFFSET(funcId) \ + (IRO[31].base + ((funcId) * IRO[31].m1)) +#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \ + (IRO[217].base + ((portId) * IRO[217].m1)) +#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \ + (IRO[218].base + ((portId) * IRO[218].m1)) +#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \ + (IRO[220].base + (((pfId)>>1) * IRO[220].m1) + (((pfId)&1) * \ + IRO[220].m2)) +#define XSTORM_VF_TO_PF_OFFSET(funcId) \ + (IRO[48].base + ((funcId) * IRO[48].m1)) +#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 + +/* eth hsi version */ +#define ETH_FP_HSI_VERSION (ETH_FP_HSI_VER_2) + +/* Ethernet Ring parameters */ +#define X_ETH_LOCAL_RING_SIZE 13 +#define FIRST_BD_IN_PKT 0 +#define PARSE_BD_INDEX 1 +#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8)) +#define U_ETH_NUM_OF_SGES_TO_FETCH 8 +#define U_ETH_MAX_SGES_FOR_PACKET 3 + +/* Rx ring params */ +#define U_ETH_LOCAL_BD_RING_SIZE 8 +#define U_ETH_LOCAL_SGE_RING_SIZE 10 +#define U_ETH_SGL_SIZE 8 + /* The fw will padd the buffer with this value, so the IP header \ + will be align to 4 Byte */ +#define IP_HEADER_ALIGNMENT_PADDING 2 + +#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \ + (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1)) + +#define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8)) +#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8)) +#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8)) + +#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1) +#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1) +#define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1) + +#define U_ETH_UNDEFINED_Q 0xFF + +#define T_ETH_INDIRECTION_TABLE_SIZE 128 +#define T_ETH_RSS_KEY 10 +#define ETH_NUM_OF_RSS_ENGINES_E2 72 + +#define FILTER_RULES_COUNT 16 +#define MULTICAST_RULES_COUNT 16 +#define CLASSIFY_RULES_COUNT 16 + +/*The CRC32 seed, that is used for the hash(reduction) multicast address */ +#define ETH_CRC32_HASH_SEED 0x00000000 + +#define ETH_CRC32_HASH_BIT_SIZE (8) +#define ETH_CRC32_HASH_MASK EVAL((1< + * Written by: Vladislav Zolotarov + * Based on the original idea of John Wright . + */ + +#ifndef BNX2X_INIT_FILE_HDR_H +#define BNX2X_INIT_FILE_HDR_H + +struct bnx2x_fw_file_section { + __be32 len; + __be32 offset; +}; + +struct bnx2x_fw_file_hdr { + struct bnx2x_fw_file_section init_ops; + struct bnx2x_fw_file_section init_ops_offsets; + struct bnx2x_fw_file_section init_data; + struct bnx2x_fw_file_section tsem_int_table_data; + struct bnx2x_fw_file_section tsem_pram_data; + struct bnx2x_fw_file_section usem_int_table_data; + struct bnx2x_fw_file_section usem_pram_data; + struct bnx2x_fw_file_section csem_int_table_data; + struct bnx2x_fw_file_section csem_pram_data; + struct bnx2x_fw_file_section xsem_int_table_data; + struct bnx2x_fw_file_section xsem_pram_data; + struct bnx2x_fw_file_section iro_arr; + struct bnx2x_fw_file_section fw_version; +}; + +#endif /* BNX2X_INIT_FILE_HDR_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h new file mode 100644 index 000000000..42b4cb34a --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -0,0 +1,5897 @@ +/* bnx2x_hsi.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2013 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#ifndef BNX2X_HSI_H +#define BNX2X_HSI_H + +#include "bnx2x_fw_defs.h" +#include "bnx2x_mfw_req.h" + +#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e + +struct license_key { + u32 reserved[6]; + + u32 max_iscsi_conn; +#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF +#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT 0 +#define BNX2X_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000 +#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT 16 + + u32 reserved_a; + + u32 max_fcoe_conn; +#define BNX2X_MAX_FCOE_TRGT_CONN_MASK 0xFFFF +#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT 0 +#define BNX2X_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000 +#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT 16 + + u32 reserved_b[4]; +}; + +/**************************************************************************** + * Shared HW configuration * + ****************************************************************************/ +#define PIN_CFG_NA 0x00000000 +#define PIN_CFG_GPIO0_P0 0x00000001 +#define PIN_CFG_GPIO1_P0 0x00000002 +#define PIN_CFG_GPIO2_P0 0x00000003 +#define PIN_CFG_GPIO3_P0 0x00000004 +#define PIN_CFG_GPIO0_P1 0x00000005 +#define PIN_CFG_GPIO1_P1 0x00000006 +#define PIN_CFG_GPIO2_P1 0x00000007 +#define PIN_CFG_GPIO3_P1 0x00000008 +#define PIN_CFG_EPIO0 0x00000009 +#define PIN_CFG_EPIO1 0x0000000a +#define PIN_CFG_EPIO2 0x0000000b +#define PIN_CFG_EPIO3 0x0000000c +#define PIN_CFG_EPIO4 0x0000000d +#define PIN_CFG_EPIO5 0x0000000e +#define PIN_CFG_EPIO6 0x0000000f +#define PIN_CFG_EPIO7 0x00000010 +#define PIN_CFG_EPIO8 0x00000011 +#define PIN_CFG_EPIO9 0x00000012 +#define PIN_CFG_EPIO10 0x00000013 +#define PIN_CFG_EPIO11 0x00000014 +#define PIN_CFG_EPIO12 0x00000015 +#define PIN_CFG_EPIO13 0x00000016 +#define PIN_CFG_EPIO14 0x00000017 +#define PIN_CFG_EPIO15 0x00000018 +#define PIN_CFG_EPIO16 0x00000019 +#define PIN_CFG_EPIO17 0x0000001a +#define PIN_CFG_EPIO18 0x0000001b +#define PIN_CFG_EPIO19 0x0000001c +#define PIN_CFG_EPIO20 0x0000001d +#define PIN_CFG_EPIO21 0x0000001e +#define PIN_CFG_EPIO22 0x0000001f +#define PIN_CFG_EPIO23 0x00000020 +#define PIN_CFG_EPIO24 0x00000021 +#define PIN_CFG_EPIO25 0x00000022 +#define PIN_CFG_EPIO26 0x00000023 +#define PIN_CFG_EPIO27 0x00000024 +#define PIN_CFG_EPIO28 0x00000025 +#define PIN_CFG_EPIO29 0x00000026 +#define PIN_CFG_EPIO30 0x00000027 +#define PIN_CFG_EPIO31 0x00000028 + +/* EPIO definition */ +#define EPIO_CFG_NA 0x00000000 +#define EPIO_CFG_EPIO0 0x00000001 +#define EPIO_CFG_EPIO1 0x00000002 +#define EPIO_CFG_EPIO2 0x00000003 +#define EPIO_CFG_EPIO3 0x00000004 +#define EPIO_CFG_EPIO4 0x00000005 +#define EPIO_CFG_EPIO5 0x00000006 +#define EPIO_CFG_EPIO6 0x00000007 +#define EPIO_CFG_EPIO7 0x00000008 +#define EPIO_CFG_EPIO8 0x00000009 +#define EPIO_CFG_EPIO9 0x0000000a +#define EPIO_CFG_EPIO10 0x0000000b +#define EPIO_CFG_EPIO11 0x0000000c +#define EPIO_CFG_EPIO12 0x0000000d +#define EPIO_CFG_EPIO13 0x0000000e +#define EPIO_CFG_EPIO14 0x0000000f +#define EPIO_CFG_EPIO15 0x00000010 +#define EPIO_CFG_EPIO16 0x00000011 +#define EPIO_CFG_EPIO17 0x00000012 +#define EPIO_CFG_EPIO18 0x00000013 +#define EPIO_CFG_EPIO19 0x00000014 +#define EPIO_CFG_EPIO20 0x00000015 +#define EPIO_CFG_EPIO21 0x00000016 +#define EPIO_CFG_EPIO22 0x00000017 +#define EPIO_CFG_EPIO23 0x00000018 +#define EPIO_CFG_EPIO24 0x00000019 +#define EPIO_CFG_EPIO25 0x0000001a +#define EPIO_CFG_EPIO26 0x0000001b +#define EPIO_CFG_EPIO27 0x0000001c +#define EPIO_CFG_EPIO28 0x0000001d +#define EPIO_CFG_EPIO29 0x0000001e +#define EPIO_CFG_EPIO30 0x0000001f +#define EPIO_CFG_EPIO31 0x00000020 + +struct mac_addr { + u32 upper; + u32 lower; +}; + +struct shared_hw_cfg { /* NVRAM Offset */ + /* Up to 16 bytes of NULL-terminated string */ + u8 part_num[16]; /* 0x104 */ + + u32 config; /* 0x114 */ + #define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 0x00000001 + #define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT 0 + #define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 0x00000000 + #define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 0x00000001 + #define SHARED_HW_CFG_MCP_RST_ON_CORE_RST_EN 0x00000002 + + #define SHARED_HW_CFG_PORT_SWAP 0x00000004 + + #define SHARED_HW_CFG_BEACON_WOL_EN 0x00000008 + + #define SHARED_HW_CFG_PCIE_GEN3_DISABLED 0x00000000 + #define SHARED_HW_CFG_PCIE_GEN3_ENABLED 0x00000010 + + #define SHARED_HW_CFG_MFW_SELECT_MASK 0x00000700 + #define SHARED_HW_CFG_MFW_SELECT_SHIFT 8 + /* Whatever MFW found in NVM + (if multiple found, priority order is: NC-SI, UMP, IPMI) */ + #define SHARED_HW_CFG_MFW_SELECT_DEFAULT 0x00000000 + #define SHARED_HW_CFG_MFW_SELECT_NC_SI 0x00000100 + #define SHARED_HW_CFG_MFW_SELECT_UMP 0x00000200 + #define SHARED_HW_CFG_MFW_SELECT_IPMI 0x00000300 + /* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI + (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ + #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI 0x00000400 + /* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI + (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ + #define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI 0x00000500 + /* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP + (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ + #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP 0x00000600 + + #define SHARED_HW_CFG_LED_MODE_MASK 0x000f0000 + #define SHARED_HW_CFG_LED_MODE_SHIFT 16 + #define SHARED_HW_CFG_LED_MAC1 0x00000000 + #define SHARED_HW_CFG_LED_PHY1 0x00010000 + #define SHARED_HW_CFG_LED_PHY2 0x00020000 + #define SHARED_HW_CFG_LED_PHY3 0x00030000 + #define SHARED_HW_CFG_LED_MAC2 0x00040000 + #define SHARED_HW_CFG_LED_PHY4 0x00050000 + #define SHARED_HW_CFG_LED_PHY5 0x00060000 + #define SHARED_HW_CFG_LED_PHY6 0x00070000 + #define SHARED_HW_CFG_LED_MAC3 0x00080000 + #define SHARED_HW_CFG_LED_PHY7 0x00090000 + #define SHARED_HW_CFG_LED_PHY9 0x000a0000 + #define SHARED_HW_CFG_LED_PHY11 0x000b0000 + #define SHARED_HW_CFG_LED_MAC4 0x000c0000 + #define SHARED_HW_CFG_LED_PHY8 0x000d0000 + #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000 + #define SHARED_HW_CFG_LED_EXTPHY2 0x000f0000 + + + #define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000 + #define SHARED_HW_CFG_AN_ENABLE_SHIFT 24 + #define SHARED_HW_CFG_AN_ENABLE_CL37 0x01000000 + #define SHARED_HW_CFG_AN_ENABLE_CL73 0x02000000 + #define SHARED_HW_CFG_AN_ENABLE_BAM 0x04000000 + #define SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 0x08000000 + #define SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 0x10000000 + #define SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY 0x20000000 + + #define SHARED_HW_CFG_SRIOV_MASK 0x40000000 + #define SHARED_HW_CFG_SRIOV_DISABLED 0x00000000 + #define SHARED_HW_CFG_SRIOV_ENABLED 0x40000000 + + #define SHARED_HW_CFG_ATC_MASK 0x80000000 + #define SHARED_HW_CFG_ATC_DISABLED 0x00000000 + #define SHARED_HW_CFG_ATC_ENABLED 0x80000000 + + u32 config2; /* 0x118 */ + /* one time auto detect grace period (in sec) */ + #define SHARED_HW_CFG_GRACE_PERIOD_MASK 0x000000ff + #define SHARED_HW_CFG_GRACE_PERIOD_SHIFT 0 + + #define SHARED_HW_CFG_PCIE_GEN2_ENABLED 0x00000100 + #define SHARED_HW_CFG_PCIE_GEN2_DISABLED 0x00000000 + + /* The default value for the core clock is 250MHz and it is + achieved by setting the clock change to 4 */ + #define SHARED_HW_CFG_CLOCK_CHANGE_MASK 0x00000e00 + #define SHARED_HW_CFG_CLOCK_CHANGE_SHIFT 9 + + #define SHARED_HW_CFG_SMBUS_TIMING_MASK 0x00001000 + #define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000 + #define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000 + + #define SHARED_HW_CFG_HIDE_PORT1 0x00002000 + + #define SHARED_HW_CFG_WOL_CAPABLE_MASK 0x00004000 + #define SHARED_HW_CFG_WOL_CAPABLE_DISABLED 0x00000000 + #define SHARED_HW_CFG_WOL_CAPABLE_ENABLED 0x00004000 + + /* Output low when PERST is asserted */ + #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_MASK 0x00008000 + #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_DISABLED 0x00000000 + #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_ENABLED 0x00008000 + + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_MASK 0x00070000 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_SHIFT 16 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_HW 0x00000000 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_0DB 0x00010000 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_3_5DB 0x00020000 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_6_0DB 0x00030000 + + /* The fan failure mechanism is usually related to the PHY type + since the power consumption of the board is determined by the PHY. + Currently, fan is required for most designs with SFX7101, BCM8727 + and BCM8481. If a fan is not required for a board which uses one + of those PHYs, this field should be set to "Disabled". If a fan is + required for a different PHY type, this option should be set to + "Enabled". The fan failure indication is expected on SPIO5 */ + #define SHARED_HW_CFG_FAN_FAILURE_MASK 0x00180000 + #define SHARED_HW_CFG_FAN_FAILURE_SHIFT 19 + #define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE 0x00000000 + #define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000 + #define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000 + + /* ASPM Power Management support */ + #define SHARED_HW_CFG_ASPM_SUPPORT_MASK 0x00600000 + #define SHARED_HW_CFG_ASPM_SUPPORT_SHIFT 21 + #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_ENABLED 0x00000000 + #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_DISABLED 0x00200000 + #define SHARED_HW_CFG_ASPM_SUPPORT_L1_DISABLED 0x00400000 + #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_DISABLED 0x00600000 + + /* The value of PM_TL_IGNORE_REQS (bit0) in PCI register + tl_control_0 (register 0x2800) */ + #define SHARED_HW_CFG_PREVENT_L1_ENTRY_MASK 0x00800000 + #define SHARED_HW_CFG_PREVENT_L1_ENTRY_DISABLED 0x00000000 + #define SHARED_HW_CFG_PREVENT_L1_ENTRY_ENABLED 0x00800000 + + #define SHARED_HW_CFG_PORT_MODE_MASK 0x01000000 + #define SHARED_HW_CFG_PORT_MODE_2 0x00000000 + #define SHARED_HW_CFG_PORT_MODE_4 0x01000000 + + #define SHARED_HW_CFG_PATH_SWAP_MASK 0x02000000 + #define SHARED_HW_CFG_PATH_SWAP_DISABLED 0x00000000 + #define SHARED_HW_CFG_PATH_SWAP_ENABLED 0x02000000 + + /* Set the MDC/MDIO access for the first external phy */ + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000 + + /* Set the MDC/MDIO access for the second external phy */ + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000 + + u32 config_3; /* 0x11C */ + #define SHARED_HW_CFG_EXTENDED_MF_MODE_MASK 0x00000F00 + #define SHARED_HW_CFG_EXTENDED_MF_MODE_SHIFT 8 + #define SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5 0x00000000 + #define SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR2_DOT_0 0x00000100 + + u32 ump_nc_si_config; /* 0x120 */ + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT 0 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC 0x00000000 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY 0x00000001 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII 0x00000000 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII 0x00000002 + + #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_MASK 0x00000f00 + #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_SHIFT 8 + + #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_MASK 0x00ff0000 + #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_SHIFT 16 + #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_NONE 0x00000000 + #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_BCM5221 0x00010000 + + u32 board; /* 0x124 */ + #define SHARED_HW_CFG_E3_I2C_MUX0_MASK 0x0000003F + #define SHARED_HW_CFG_E3_I2C_MUX0_SHIFT 0 + #define SHARED_HW_CFG_E3_I2C_MUX1_MASK 0x00000FC0 + #define SHARED_HW_CFG_E3_I2C_MUX1_SHIFT 6 + /* Use the PIN_CFG_XXX defines on top */ + #define SHARED_HW_CFG_BOARD_REV_MASK 0x00ff0000 + #define SHARED_HW_CFG_BOARD_REV_SHIFT 16 + + #define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK 0x0f000000 + #define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT 24 + + #define SHARED_HW_CFG_BOARD_MINOR_VER_MASK 0xf0000000 + #define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT 28 + + u32 wc_lane_config; /* 0x128 */ + #define SHARED_HW_CFG_LANE_SWAP_CFG_MASK 0x0000FFFF + #define SHARED_HW_CFG_LANE_SWAP_CFG_SHIFT 0 + #define SHARED_HW_CFG_LANE_SWAP_CFG_32103210 0x00001b1b + #define SHARED_HW_CFG_LANE_SWAP_CFG_32100123 0x00001be4 + #define SHARED_HW_CFG_LANE_SWAP_CFG_01233210 0x0000e41b + #define SHARED_HW_CFG_LANE_SWAP_CFG_01230123 0x0000e4e4 + #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000FF + #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 + #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000FF00 + #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8 + + /* TX lane Polarity swap */ + #define SHARED_HW_CFG_TX_LANE0_POL_FLIP_ENABLED 0x00010000 + #define SHARED_HW_CFG_TX_LANE1_POL_FLIP_ENABLED 0x00020000 + #define SHARED_HW_CFG_TX_LANE2_POL_FLIP_ENABLED 0x00040000 + #define SHARED_HW_CFG_TX_LANE3_POL_FLIP_ENABLED 0x00080000 + /* TX lane Polarity swap */ + #define SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED 0x00100000 + #define SHARED_HW_CFG_RX_LANE1_POL_FLIP_ENABLED 0x00200000 + #define SHARED_HW_CFG_RX_LANE2_POL_FLIP_ENABLED 0x00400000 + #define SHARED_HW_CFG_RX_LANE3_POL_FLIP_ENABLED 0x00800000 + + /* Selects the port layout of the board */ + #define SHARED_HW_CFG_E3_PORT_LAYOUT_MASK 0x0F000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_SHIFT 24 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_01 0x00000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_10 0x01000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_0123 0x02000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_1032 0x03000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_2301 0x04000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_3210 0x05000000 +}; + + +/**************************************************************************** + * Port HW configuration * + ****************************************************************************/ +struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ + + u32 pci_id; + #define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xffff0000 + #define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000ffff + + u32 pci_sub_id; + #define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK 0xffff0000 + #define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK 0x0000ffff + + u32 power_dissipated; + #define PORT_HW_CFG_POWER_DIS_D0_MASK 0x000000ff + #define PORT_HW_CFG_POWER_DIS_D0_SHIFT 0 + #define PORT_HW_CFG_POWER_DIS_D1_MASK 0x0000ff00 + #define PORT_HW_CFG_POWER_DIS_D1_SHIFT 8 + #define PORT_HW_CFG_POWER_DIS_D2_MASK 0x00ff0000 + #define PORT_HW_CFG_POWER_DIS_D2_SHIFT 16 + #define PORT_HW_CFG_POWER_DIS_D3_MASK 0xff000000 + #define PORT_HW_CFG_POWER_DIS_D3_SHIFT 24 + + u32 power_consumed; + #define PORT_HW_CFG_POWER_CONS_D0_MASK 0x000000ff + #define PORT_HW_CFG_POWER_CONS_D0_SHIFT 0 + #define PORT_HW_CFG_POWER_CONS_D1_MASK 0x0000ff00 + #define PORT_HW_CFG_POWER_CONS_D1_SHIFT 8 + #define PORT_HW_CFG_POWER_CONS_D2_MASK 0x00ff0000 + #define PORT_HW_CFG_POWER_CONS_D2_SHIFT 16 + #define PORT_HW_CFG_POWER_CONS_D3_MASK 0xff000000 + #define PORT_HW_CFG_POWER_CONS_D3_SHIFT 24 + + u32 mac_upper; + #define PORT_HW_CFG_UPPERMAC_MASK 0x0000ffff + #define PORT_HW_CFG_UPPERMAC_SHIFT 0 + u32 mac_lower; + + u32 iscsi_mac_upper; /* Upper 16 bits are always zeroes */ + u32 iscsi_mac_lower; + + u32 rdma_mac_upper; /* Upper 16 bits are always zeroes */ + u32 rdma_mac_lower; + + u32 serdes_config; + #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000ffff + #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT 0 + + #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK 0xffff0000 + #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16 + + + /* Default values: 2P-64, 4P-32 */ + u32 pf_config; /* 0x158 */ + #define PORT_HW_CFG_PF_NUM_VF_MASK 0x0000007F + #define PORT_HW_CFG_PF_NUM_VF_SHIFT 0 + + /* Default values: 17 */ + #define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_MASK 0x00007F00 + #define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_SHIFT 8 + + #define PORT_HW_CFG_ENABLE_FLR_MASK 0x00010000 + #define PORT_HW_CFG_FLR_ENABLED 0x00010000 + + u32 vf_config; /* 0x15C */ + #define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_MASK 0x0000007F + #define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_SHIFT 0 + + #define PORT_HW_CFG_VF_PCI_DEVICE_ID_MASK 0xFFFF0000 + #define PORT_HW_CFG_VF_PCI_DEVICE_ID_SHIFT 16 + + u32 mf_pci_id; /* 0x160 */ + #define PORT_HW_CFG_MF_PCI_DEVICE_ID_MASK 0x0000FFFF + #define PORT_HW_CFG_MF_PCI_DEVICE_ID_SHIFT 0 + + /* Controls the TX laser of the SFP+ module */ + u32 sfp_ctrl; /* 0x164 */ + #define PORT_HW_CFG_TX_LASER_MASK 0x000000FF + #define PORT_HW_CFG_TX_LASER_SHIFT 0 + #define PORT_HW_CFG_TX_LASER_MDIO 0x00000000 + #define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001 + #define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002 + #define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003 + #define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004 + + /* Controls the fault module LED of the SFP+ */ + #define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00 + #define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8 + #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000 + #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100 + #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200 + #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300 + #define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400 + + /* The output pin TX_DIS that controls the TX laser of the SFP+ + module. Use the PIN_CFG_XXX defines on top */ + u32 e3_sfp_ctrl; /* 0x168 */ + #define PORT_HW_CFG_E3_TX_LASER_MASK 0x000000FF + #define PORT_HW_CFG_E3_TX_LASER_SHIFT 0 + + /* The output pin for SFPP_TYPE which turns on the Fault module LED */ + #define PORT_HW_CFG_E3_FAULT_MDL_LED_MASK 0x0000FF00 + #define PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT 8 + + /* The input pin MOD_ABS that indicates whether SFP+ module is + present or not. Use the PIN_CFG_XXX defines on top */ + #define PORT_HW_CFG_E3_MOD_ABS_MASK 0x00FF0000 + #define PORT_HW_CFG_E3_MOD_ABS_SHIFT 16 + + /* The output pin PWRDIS_SFP_X which disable the power of the SFP+ + module. Use the PIN_CFG_XXX defines on top */ + #define PORT_HW_CFG_E3_PWR_DIS_MASK 0xFF000000 + #define PORT_HW_CFG_E3_PWR_DIS_SHIFT 24 + + /* + * The input pin which signals module transmit fault. Use the + * PIN_CFG_XXX defines on top + */ + u32 e3_cmn_pin_cfg; /* 0x16C */ + #define PORT_HW_CFG_E3_TX_FAULT_MASK 0x000000FF + #define PORT_HW_CFG_E3_TX_FAULT_SHIFT 0 + + /* The output pin which reset the PHY. Use the PIN_CFG_XXX defines on + top */ + #define PORT_HW_CFG_E3_PHY_RESET_MASK 0x0000FF00 + #define PORT_HW_CFG_E3_PHY_RESET_SHIFT 8 + + /* + * The output pin which powers down the PHY. Use the PIN_CFG_XXX + * defines on top + */ + #define PORT_HW_CFG_E3_PWR_DOWN_MASK 0x00FF0000 + #define PORT_HW_CFG_E3_PWR_DOWN_SHIFT 16 + + /* The output pin values BSC_SEL which selects the I2C for this port + in the I2C Mux */ + #define PORT_HW_CFG_E3_I2C_MUX0_MASK 0x01000000 + #define PORT_HW_CFG_E3_I2C_MUX1_MASK 0x02000000 + + + /* + * The input pin I_FAULT which indicate over-current has occurred. + * Use the PIN_CFG_XXX defines on top + */ + u32 e3_cmn_pin_cfg1; /* 0x170 */ + #define PORT_HW_CFG_E3_OVER_CURRENT_MASK 0x000000FF + #define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT 0 + + /* pause on host ring */ + u32 generic_features; /* 0x174 */ + #define PORT_HW_CFG_PAUSE_ON_HOST_RING_MASK 0x00000001 + #define PORT_HW_CFG_PAUSE_ON_HOST_RING_SHIFT 0 + #define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED 0x00000000 + #define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED 0x00000001 + + /* SFP+ Tx Equalization: NIC recommended and tested value is 0xBEB2 + * LOM recommended and tested value is 0xBEB2. Using a different + * value means using a value not tested by BRCM + */ + u32 sfi_tap_values; /* 0x178 */ + #define PORT_HW_CFG_TX_EQUALIZATION_MASK 0x0000FFFF + #define PORT_HW_CFG_TX_EQUALIZATION_SHIFT 0 + + /* SFP+ Tx driver broadcast IDRIVER: NIC recommended and tested + * value is 0x2. LOM recommended and tested value is 0x2. Using a + * different value means using a value not tested by BRCM + */ + #define PORT_HW_CFG_TX_DRV_BROADCAST_MASK 0x000F0000 + #define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT 16 + /* Set non-default values for TXFIR in SFP mode. */ + #define PORT_HW_CFG_TX_DRV_IFIR_MASK 0x00F00000 + #define PORT_HW_CFG_TX_DRV_IFIR_SHIFT 20 + + /* Set non-default values for IPREDRIVER in SFP mode. */ + #define PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK 0x0F000000 + #define PORT_HW_CFG_TX_DRV_IPREDRIVER_SHIFT 24 + + /* Set non-default values for POST2 in SFP mode. */ + #define PORT_HW_CFG_TX_DRV_POST2_MASK 0xF0000000 + #define PORT_HW_CFG_TX_DRV_POST2_SHIFT 28 + + u32 reserved0[5]; /* 0x17c */ + + u32 aeu_int_mask; /* 0x190 */ + + u32 media_type; /* 0x194 */ + #define PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK 0x000000FF + #define PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT 0 + + #define PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK 0x0000FF00 + #define PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT 8 + + #define PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK 0x00FF0000 + #define PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT 16 + + /* 4 times 16 bits for all 4 lanes. In case external PHY is present + (not direct mode), those values will not take effect on the 4 XGXS + lanes. For some external PHYs (such as 8706 and 8726) the values + will be used to configure the external PHY in those cases, not + all 4 values are needed. */ + u16 xgxs_config_rx[4]; /* 0x198 */ + u16 xgxs_config_tx[4]; /* 0x1A0 */ + + /* For storing FCOE mac on shared memory */ + u32 fcoe_fip_mac_upper; + #define PORT_HW_CFG_FCOE_UPPERMAC_MASK 0x0000ffff + #define PORT_HW_CFG_FCOE_UPPERMAC_SHIFT 0 + u32 fcoe_fip_mac_lower; + + u32 fcoe_wwn_port_name_upper; + u32 fcoe_wwn_port_name_lower; + + u32 fcoe_wwn_node_name_upper; + u32 fcoe_wwn_node_name_lower; + + u32 Reserved1[49]; /* 0x1C0 */ + + /* Enable RJ45 magjack pair swapping on 10GBase-T PHY (0=default), + 84833 only */ + u32 xgbt_phy_cfg; /* 0x284 */ + #define PORT_HW_CFG_RJ45_PAIR_SWAP_MASK 0x000000FF + #define PORT_HW_CFG_RJ45_PAIR_SWAP_SHIFT 0 + + u32 default_cfg; /* 0x288 */ + #define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003 + #define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0 + #define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000 + #define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001 + #define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002 + #define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003 + + #define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C + #define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2 + #define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000 + #define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004 + #define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008 + #define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c + + #define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030 + #define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4 + #define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000 + #define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010 + #define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020 + #define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030 + + #define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0 + #define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6 + #define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000 + #define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040 + #define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080 + #define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0 + + /* When KR link is required to be set to force which is not + KR-compliant, this parameter determine what is the trigger for it. + When GPIO is selected, low input will force the speed. Currently + default speed is 1G. In the future, it may be widen to select the + forced speed in with another parameter. Note when force-1G is + enabled, it override option 56: Link Speed option. */ + #define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00 + #define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8 + #define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800 + #define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900 + /* Enable to determine with which GPIO to reset the external phy */ + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000 + + /* Enable BAM on KR */ + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000 + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20 + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000 + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000 + + /* Enable Common Mode Sense */ + #define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000 + #define PORT_HW_CFG_ENABLE_CMS_SHIFT 21 + #define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000 + #define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000 + + /* Determine the Serdes electrical interface */ + #define PORT_HW_CFG_NET_SERDES_IF_MASK 0x0F000000 + #define PORT_HW_CFG_NET_SERDES_IF_SHIFT 24 + #define PORT_HW_CFG_NET_SERDES_IF_SGMII 0x00000000 + #define PORT_HW_CFG_NET_SERDES_IF_XFI 0x01000000 + #define PORT_HW_CFG_NET_SERDES_IF_SFI 0x02000000 + #define PORT_HW_CFG_NET_SERDES_IF_KR 0x03000000 + #define PORT_HW_CFG_NET_SERDES_IF_DXGXS 0x04000000 + #define PORT_HW_CFG_NET_SERDES_IF_KR2 0x05000000 + + + u32 speed_capability_mask2; /* 0x28C */ + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3__ 0x00000002 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3___ 0x00000004 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G 0x00000020 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_20G 0x00000080 + + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0__ 0x00020000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0___ 0x00040000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G 0x00200000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_20G 0x00800000 + + + /* In the case where two media types (e.g. copper and fiber) are + present and electrically active at the same time, PHY Selection + will determine which of the two PHYs will be designated as the + Active PHY and used for a connection to the network. */ + u32 multi_phy_config; /* 0x290 */ + #define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007 + #define PORT_HW_CFG_PHY_SELECTION_SHIFT 0 + #define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000 + #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001 + #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002 + #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003 + #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004 + + /* When enabled, all second phy nvram parameters will be swapped + with the first phy parameters */ + #define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008 + #define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3 + #define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000 + #define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008 + + + /* Address of the second external phy */ + u32 external_phy_config2; /* 0x294 */ + #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF + #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0 + + /* The second XGXS external PHY type */ + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071 0x00000100 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072 0x00000200 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073 0x00000300 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705 0x00000400 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706 0x00000500 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726 0x00000600 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481 0x00000700 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727 0x00000900 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC 0x00000a00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823 0x00000b00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640 0x00000c00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833 0x00000d00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54618SE 0x00000e00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616 0x00001000 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84834 0x00001100 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00 + + + /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as + 8706, 8726 and 8727) not all 4 values are needed. */ + u16 xgxs_config2_rx[4]; /* 0x296 */ + u16 xgxs_config2_tx[4]; /* 0x2A0 */ + + u32 lane_config; + #define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff + #define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0 + /* AN and forced */ + #define PORT_HW_CFG_LANE_SWAP_CFG_01230123 0x00001b1b + /* forced only */ + #define PORT_HW_CFG_LANE_SWAP_CFG_01233210 0x00001be4 + /* forced only */ + #define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8 + /* forced only */ + #define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4 + #define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff + #define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 + #define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00 + #define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8 + #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK 0x0000c000 + #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT 14 + + /* Indicate whether to swap the external phy polarity */ + #define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000 + #define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000 + #define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000 + + + u32 external_phy_config; + #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000ff + #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT 0 + + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK 0x0000ff00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT 8 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT 0x00000000 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8071 0x00000100 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072 0x00000200 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073 0x00000300 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705 0x00000400 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706 0x00000500 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726 0x00000600 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481 0x00000700 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54640 0x00000c00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE 0x00000e00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616 0x00001000 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834 0x00001100 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 + + #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK 0x00ff0000 + #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT 16 + + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT 24 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT 0x00000000 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482 0x01000000 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD 0x02000000 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN 0xff000000 + + u32 speed_capability_mask; + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK 0x0000ffff + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT 0 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL 0x00000001 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF 0x00000002 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF 0x00000004 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL 0x00000008 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G 0x00000010 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G 0x00000020 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G 0x00000040 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_20G 0x00000080 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED 0x0000f000 + + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK 0xffff0000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT 16 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL 0x00010000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF 0x00020000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF 0x00040000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL 0x00080000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G 0x00100000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G 0x00200000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G 0x00400000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_20G 0x00800000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED 0xf0000000 + + /* A place to hold the original MAC address as a backup */ + u32 backup_mac_upper; /* 0x2B4 */ + u32 backup_mac_lower; /* 0x2B8 */ + +}; + + +/**************************************************************************** + * Shared Feature configuration * + ****************************************************************************/ +struct shared_feat_cfg { /* NVRAM Offset */ + + u32 config; /* 0x450 */ + #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001 + + /* Use NVRAM values instead of HW default values */ + #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_MASK \ + 0x00000002 + #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED \ + 0x00000000 + #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED \ + 0x00000002 + + #define SHARED_FEAT_CFG_NCSI_ID_METHOD_MASK 0x00000008 + #define SHARED_FEAT_CFG_NCSI_ID_METHOD_SPIO 0x00000000 + #define SHARED_FEAT_CFG_NCSI_ID_METHOD_NVRAM 0x00000008 + + #define SHARED_FEAT_CFG_NCSI_ID_MASK 0x00000030 + #define SHARED_FEAT_CFG_NCSI_ID_SHIFT 4 + + /* Override the OTP back to single function mode. When using GPIO, + high means only SF, 0 is according to CLP configuration */ + #define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE 0x00000600 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE 0x00000700 + + /* The interval in seconds between sending LLDP packets. Set to zero + to disable the feature */ + #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_MASK 0x00ff0000 + #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_SHIFT 16 + + /* The assigned device type ID for LLDP usage */ + #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_MASK 0xff000000 + #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_SHIFT 24 + +}; + + +/**************************************************************************** + * Port Feature configuration * + ****************************************************************************/ +struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */ + + u32 config; + #define PORT_FEATURE_BAR1_SIZE_MASK 0x0000000f + #define PORT_FEATURE_BAR1_SIZE_SHIFT 0 + #define PORT_FEATURE_BAR1_SIZE_DISABLED 0x00000000 + #define PORT_FEATURE_BAR1_SIZE_64K 0x00000001 + #define PORT_FEATURE_BAR1_SIZE_128K 0x00000002 + #define PORT_FEATURE_BAR1_SIZE_256K 0x00000003 + #define PORT_FEATURE_BAR1_SIZE_512K 0x00000004 + #define PORT_FEATURE_BAR1_SIZE_1M 0x00000005 + #define PORT_FEATURE_BAR1_SIZE_2M 0x00000006 + #define PORT_FEATURE_BAR1_SIZE_4M 0x00000007 + #define PORT_FEATURE_BAR1_SIZE_8M 0x00000008 + #define PORT_FEATURE_BAR1_SIZE_16M 0x00000009 + #define PORT_FEATURE_BAR1_SIZE_32M 0x0000000a + #define PORT_FEATURE_BAR1_SIZE_64M 0x0000000b + #define PORT_FEATURE_BAR1_SIZE_128M 0x0000000c + #define PORT_FEATURE_BAR1_SIZE_256M 0x0000000d + #define PORT_FEATURE_BAR1_SIZE_512M 0x0000000e + #define PORT_FEATURE_BAR1_SIZE_1G 0x0000000f + #define PORT_FEATURE_BAR2_SIZE_MASK 0x000000f0 + #define PORT_FEATURE_BAR2_SIZE_SHIFT 4 + #define PORT_FEATURE_BAR2_SIZE_DISABLED 0x00000000 + #define PORT_FEATURE_BAR2_SIZE_64K 0x00000010 + #define PORT_FEATURE_BAR2_SIZE_128K 0x00000020 + #define PORT_FEATURE_BAR2_SIZE_256K 0x00000030 + #define PORT_FEATURE_BAR2_SIZE_512K 0x00000040 + #define PORT_FEATURE_BAR2_SIZE_1M 0x00000050 + #define PORT_FEATURE_BAR2_SIZE_2M 0x00000060 + #define PORT_FEATURE_BAR2_SIZE_4M 0x00000070 + #define PORT_FEATURE_BAR2_SIZE_8M 0x00000080 + #define PORT_FEATURE_BAR2_SIZE_16M 0x00000090 + #define PORT_FEATURE_BAR2_SIZE_32M 0x000000a0 + #define PORT_FEATURE_BAR2_SIZE_64M 0x000000b0 + #define PORT_FEATURE_BAR2_SIZE_128M 0x000000c0 + #define PORT_FEATURE_BAR2_SIZE_256M 0x000000d0 + #define PORT_FEATURE_BAR2_SIZE_512M 0x000000e0 + #define PORT_FEATURE_BAR2_SIZE_1G 0x000000f0 + + #define PORT_FEAT_CFG_DCBX_MASK 0x00000100 + #define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000 + #define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100 + + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK 0x00000C00 + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE 0x00000400 + #define PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI 0x00000800 + + #define PORT_FEATURE_EN_SIZE_MASK 0x0f000000 + #define PORT_FEATURE_EN_SIZE_SHIFT 24 + #define PORT_FEATURE_WOL_ENABLED 0x01000000 + #define PORT_FEATURE_MBA_ENABLED 0x02000000 + #define PORT_FEATURE_MFW_ENABLED 0x04000000 + + /* Advertise expansion ROM even if MBA is disabled */ + #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_MASK 0x08000000 + #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_DISABLED 0x00000000 + #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_ENABLED 0x08000000 + + /* Check the optic vendor via i2c against a list of approved modules + in a separate nvram image */ + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK 0xe0000000 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT 29 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT \ + 0x00000000 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER \ + 0x20000000 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG 0x40000000 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN 0x60000000 + + u32 wol_config; + /* Default is used when driver sets to "auto" mode */ + #define PORT_FEATURE_WOL_DEFAULT_MASK 0x00000003 + #define PORT_FEATURE_WOL_DEFAULT_SHIFT 0 + #define PORT_FEATURE_WOL_DEFAULT_DISABLE 0x00000000 + #define PORT_FEATURE_WOL_DEFAULT_MAGIC 0x00000001 + #define PORT_FEATURE_WOL_DEFAULT_ACPI 0x00000002 + #define PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI 0x00000003 + #define PORT_FEATURE_WOL_RES_PAUSE_CAP 0x00000004 + #define PORT_FEATURE_WOL_RES_ASYM_PAUSE_CAP 0x00000008 + #define PORT_FEATURE_WOL_ACPI_UPON_MGMT 0x00000010 + + u32 mba_config; + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x00000007 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT 0 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0x00000000 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 0x00000001 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 0x00000002 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB 0x00000003 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT 0x00000004 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE 0x00000007 + + #define PORT_FEATURE_MBA_BOOT_RETRY_MASK 0x00000038 + #define PORT_FEATURE_MBA_BOOT_RETRY_SHIFT 3 + + #define PORT_FEATURE_MBA_RES_PAUSE_CAP 0x00000100 + #define PORT_FEATURE_MBA_RES_ASYM_PAUSE_CAP 0x00000200 + #define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x00000400 + #define PORT_FEATURE_MBA_HOTKEY_MASK 0x00000800 + #define PORT_FEATURE_MBA_HOTKEY_CTRL_S 0x00000000 + #define PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x00000800 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0x000ff000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT 12 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0x00000000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x00001000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x00002000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x00003000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x00004000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x00005000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x00006000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x00007000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x00008000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0x00009000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0x0000a000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0x0000b000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0x0000c000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0x0000d000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0x0000e000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M 0x0000f000 + #define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0x00f00000 + #define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT 20 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x03000000 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT 24 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0x00000000 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x01000000 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x02000000 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x03000000 + #define PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3c000000 + #define PORT_FEATURE_MBA_LINK_SPEED_SHIFT 26 + #define PORT_FEATURE_MBA_LINK_SPEED_AUTO 0x00000000 + #define PORT_FEATURE_MBA_LINK_SPEED_10HD 0x04000000 + #define PORT_FEATURE_MBA_LINK_SPEED_10FD 0x08000000 + #define PORT_FEATURE_MBA_LINK_SPEED_100HD 0x0c000000 + #define PORT_FEATURE_MBA_LINK_SPEED_100FD 0x10000000 + #define PORT_FEATURE_MBA_LINK_SPEED_1GBPS 0x14000000 + #define PORT_FEATURE_MBA_LINK_SPEED_2_5GBPS 0x18000000 + #define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_CX4 0x1c000000 + #define PORT_FEATURE_MBA_LINK_SPEED_20GBPS 0x20000000 + u32 bmc_config; + #define PORT_FEATURE_BMC_LINK_OVERRIDE_MASK 0x00000001 + #define PORT_FEATURE_BMC_LINK_OVERRIDE_DEFAULT 0x00000000 + #define PORT_FEATURE_BMC_LINK_OVERRIDE_EN 0x00000001 + + u32 mba_vlan_cfg; + #define PORT_FEATURE_MBA_VLAN_TAG_MASK 0x0000ffff + #define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 0 + #define PORT_FEATURE_MBA_VLAN_EN 0x00010000 + + u32 resource_cfg; + #define PORT_FEATURE_RESOURCE_CFG_VALID 0x00000001 + #define PORT_FEATURE_RESOURCE_CFG_DIAG 0x00000002 + #define PORT_FEATURE_RESOURCE_CFG_L2 0x00000004 + #define PORT_FEATURE_RESOURCE_CFG_ISCSI 0x00000008 + #define PORT_FEATURE_RESOURCE_CFG_RDMA 0x00000010 + + u32 smbus_config; + #define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe + #define PORT_FEATURE_SMBUS_ADDR_SHIFT 1 + + u32 vf_config; + #define PORT_FEAT_CFG_VF_BAR2_SIZE_MASK 0x0000000f + #define PORT_FEAT_CFG_VF_BAR2_SIZE_SHIFT 0 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_DISABLED 0x00000000 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_4K 0x00000001 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_8K 0x00000002 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_16K 0x00000003 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_32K 0x00000004 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_64K 0x00000005 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_128K 0x00000006 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_256K 0x00000007 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_512K 0x00000008 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_1M 0x00000009 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_2M 0x0000000a + #define PORT_FEAT_CFG_VF_BAR2_SIZE_4M 0x0000000b + #define PORT_FEAT_CFG_VF_BAR2_SIZE_8M 0x0000000c + #define PORT_FEAT_CFG_VF_BAR2_SIZE_16M 0x0000000d + #define PORT_FEAT_CFG_VF_BAR2_SIZE_32M 0x0000000e + #define PORT_FEAT_CFG_VF_BAR2_SIZE_64M 0x0000000f + + u32 link_config; /* Used as HW defaults for the driver */ + #define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000 + #define PORT_FEATURE_CONNECTED_SWITCH_SHIFT 24 + /* (forced) low speed switch (< 10G) */ + #define PORT_FEATURE_CON_SWITCH_1G_SWITCH 0x00000000 + /* (forced) high speed switch (>= 10G) */ + #define PORT_FEATURE_CON_SWITCH_10G_SWITCH 0x01000000 + #define PORT_FEATURE_CON_SWITCH_AUTO_DETECT 0x02000000 + #define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT 0x03000000 + + #define PORT_FEATURE_LINK_SPEED_MASK 0x000f0000 + #define PORT_FEATURE_LINK_SPEED_SHIFT 16 + #define PORT_FEATURE_LINK_SPEED_AUTO 0x00000000 + #define PORT_FEATURE_LINK_SPEED_10M_FULL 0x00010000 + #define PORT_FEATURE_LINK_SPEED_10M_HALF 0x00020000 + #define PORT_FEATURE_LINK_SPEED_100M_HALF 0x00030000 + #define PORT_FEATURE_LINK_SPEED_100M_FULL 0x00040000 + #define PORT_FEATURE_LINK_SPEED_1G 0x00050000 + #define PORT_FEATURE_LINK_SPEED_2_5G 0x00060000 + #define PORT_FEATURE_LINK_SPEED_10G_CX4 0x00070000 + #define PORT_FEATURE_LINK_SPEED_20G 0x00080000 + + #define PORT_FEATURE_FLOW_CONTROL_MASK 0x00000700 + #define PORT_FEATURE_FLOW_CONTROL_SHIFT 8 + #define PORT_FEATURE_FLOW_CONTROL_AUTO 0x00000000 + #define PORT_FEATURE_FLOW_CONTROL_TX 0x00000100 + #define PORT_FEATURE_FLOW_CONTROL_RX 0x00000200 + #define PORT_FEATURE_FLOW_CONTROL_BOTH 0x00000300 + #define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400 + + /* The default for MCP link configuration, + uses the same defines as link_config */ + u32 mfw_wol_link_cfg; + + /* The default for the driver of the second external phy, + uses the same defines as link_config */ + u32 link_config2; /* 0x47C */ + + /* The default for MCP of the second external phy, + uses the same defines as link_config */ + u32 mfw_wol_link_cfg2; /* 0x480 */ + + + /* EEE power saving mode */ + u32 eee_power_mode; /* 0x484 */ + #define PORT_FEAT_CFG_EEE_POWER_MODE_MASK 0x000000FF + #define PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT 0 + #define PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED 0x00000000 + #define PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED 0x00000001 + #define PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE 0x00000002 + #define PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY 0x00000003 + + + u32 Reserved2[16]; /* 0x488 */ +}; + + +/**************************************************************************** + * Device Information * + ****************************************************************************/ +struct shm_dev_info { /* size */ + + u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */ + + struct shared_hw_cfg shared_hw_config; /* 40 */ + + struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */ + + struct shared_feat_cfg shared_feature_config; /* 4 */ + + struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */ + +}; + + +#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) + #error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition." +#endif + +#define FUNC_0 0 +#define FUNC_1 1 +#define FUNC_2 2 +#define FUNC_3 3 +#define FUNC_4 4 +#define FUNC_5 5 +#define FUNC_6 6 +#define FUNC_7 7 +#define E1_FUNC_MAX 2 +#define E1H_FUNC_MAX 8 +#define E2_FUNC_MAX 4 /* per path */ + +#define VN_0 0 +#define VN_1 1 +#define VN_2 2 +#define VN_3 3 +#define E1VN_MAX 1 +#define E1HVN_MAX 4 + +#define E2_VF_MAX 64 /* HC_REG_VF_CONFIGURATION_SIZE */ +/* This value (in milliseconds) determines the frequency of the driver + * issuing the PULSE message code. The firmware monitors this periodic + * pulse to determine when to switch to an OS-absent mode. */ +#define DRV_PULSE_PERIOD_MS 250 + +/* This value (in milliseconds) determines how long the driver should + * wait for an acknowledgement from the firmware before timing out. Once + * the firmware has timed out, the driver will assume there is no firmware + * running and there won't be any firmware-driver synchronization during a + * driver reset. */ +#define FW_ACK_TIME_OUT_MS 5000 + +#define FW_ACK_POLL_TIME_MS 1 + +#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS) + +#define MFW_TRACE_SIGNATURE 0x54524342 + +/**************************************************************************** + * Driver <-> FW Mailbox * + ****************************************************************************/ +struct drv_port_mb { + + u32 link_status; + /* Driver should update this field on any link change event */ + + #define LINK_STATUS_NONE (0<<0) + #define LINK_STATUS_LINK_FLAG_MASK 0x00000001 + #define LINK_STATUS_LINK_UP 0x00000001 + #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E + #define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_20GTFD (11<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_20GXFD (11<<1) + + #define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020 + #define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 + + #define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 + #define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080 + #define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 + + #define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 + #define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 + #define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800 + #define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000 + #define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000 + #define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000 + #define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000 + + #define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000 + #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000 + + #define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000 + #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000 + + #define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 + #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18) + #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18) + #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18) + #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18) + + #define LINK_STATUS_SERDES_LINK 0x00100000 + + #define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000 + #define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000 + #define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000 + #define LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE 0x10000000 + + #define LINK_STATUS_PFC_ENABLED 0x20000000 + + #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000 + #define LINK_STATUS_SFP_TX_FAULT 0x80000000 + + u32 port_stx; + + u32 stat_nig_timer; + + /* MCP firmware does not use this field */ + u32 ext_phy_fw_version; + +}; + + +struct drv_func_mb { + + u32 drv_mb_header; + #define DRV_MSG_CODE_MASK 0xffff0000 + #define DRV_MSG_CODE_LOAD_REQ 0x10000000 + #define DRV_MSG_CODE_LOAD_DONE 0x11000000 + #define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000 + #define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000 + #define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000 + #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 + #define DRV_MSG_CODE_DCC_OK 0x30000000 + #define DRV_MSG_CODE_DCC_FAILURE 0x31000000 + #define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000 + #define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000 + #define DRV_MSG_CODE_VALIDATE_KEY 0x70000000 + #define DRV_MSG_CODE_GET_CURR_KEY 0x80000000 + #define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000 + #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000 + #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000 + #define DRV_MSG_CODE_OEM_OK 0x00010000 + #define DRV_MSG_CODE_OEM_FAILURE 0x00020000 + #define DRV_MSG_CODE_OEM_UPDATE_SVID_OK 0x00030000 + #define DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE 0x00040000 + /* + * The optic module verification command requires bootcode + * v5.0.6 or later, te specific optic module verification command + * requires bootcode v5.2.12 or later + */ + #define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000 + #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006 + #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 + #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 + #define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED 0xa2000000 + #define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002 + #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014 + #define REQ_BC_VER_4_MT_SUPPORTED 0x00070201 + #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201 + #define REQ_BC_VER_4_FCOE_FEATURES 0x00070209 + + #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000 + #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 + #define REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF 0x00070401 + + #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 + + #define DRV_MSG_CODE_AFEX_DRIVER_SETMAC 0xd0000000 + #define DRV_MSG_CODE_AFEX_LISTGET_ACK 0xd1000000 + #define DRV_MSG_CODE_AFEX_LISTSET_ACK 0xd2000000 + #define DRV_MSG_CODE_AFEX_STATSGET_ACK 0xd3000000 + #define DRV_MSG_CODE_AFEX_VIFSET_ACK 0xd4000000 + + #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000 + #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000 + + #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 + + #define DRV_MSG_CODE_RMMOD 0xdb000000 + #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f + + #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 + #define REQ_BC_VER_4_SET_MF_BW 0x00060202 + #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 + + #define DRV_MSG_CODE_LINK_STATUS_CHANGED 0x01000000 + + #define DRV_MSG_CODE_INITIATE_FLR 0x02000000 + #define REQ_BC_VER_4_INITIATE_FLR 0x00070213 + + #define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000 + #define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000 + #define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000 + #define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 + + #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff + + u32 drv_mb_param; + #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000 + #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000 + + #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002 + + #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a + #define DRV_MSG_CODE_LOAD_REQ_FORCE_LFA 0x00002000 + + u32 fw_mb_header; + #define FW_MSG_CODE_MASK 0xffff0000 + #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 + #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 + #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 + /* Load common chip is supported from bc 6.0.0 */ + #define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000 + #define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000 + + #define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000 + #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 + #define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000 + #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000 + #define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000 + #define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 + #define FW_MSG_CODE_DCC_DONE 0x30100000 + #define FW_MSG_CODE_LLDP_DONE 0x40100000 + #define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000 + #define FW_MSG_CODE_DIAG_REFUSE 0x50200000 + #define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000 + #define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000 + #define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000 + #define FW_MSG_CODE_GET_KEY_DONE 0x80100000 + #define FW_MSG_CODE_NO_KEY 0x80f00000 + #define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000 + #define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000 + #define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000 + #define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000 + #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000 + #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000 + #define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS 0xa0100000 + #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000 + #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000 + #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 + #define FW_MSG_CODE_HW_SET_INVALID_IMAGE 0xb0100000 + + #define FW_MSG_CODE_AFEX_DRIVER_SETMAC_DONE 0xd0100000 + #define FW_MSG_CODE_AFEX_LISTGET_ACK 0xd1100000 + #define FW_MSG_CODE_AFEX_LISTSET_ACK 0xd2100000 + #define FW_MSG_CODE_AFEX_STATSGET_ACK 0xd3100000 + #define FW_MSG_CODE_AFEX_VIFSET_ACK 0xd4100000 + + #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000 + #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000 + + #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 + + #define FW_MSG_CODE_RMMOD_ACK 0xdb100000 + + #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 + #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 + + #define FW_MSG_CODE_LINK_CHANGED_ACK 0x01100000 + + #define FW_MSG_CODE_LIC_CHALLENGE 0xff010000 + #define FW_MSG_CODE_LIC_RESPONSE 0xff020000 + #define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000 + #define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 + + #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff + + u32 fw_mb_param; + + u32 drv_pulse_mb; + #define DRV_PULSE_SEQ_MASK 0x00007fff + #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 + /* + * The system time is in the format of + * (year-2001)*12*32 + month*32 + day. + */ + #define DRV_PULSE_ALWAYS_ALIVE 0x00008000 + /* + * Indicate to the firmware not to go into the + * OS-absent when it is not getting driver pulse. + * This is used for debugging as well for PXE(MBA). + */ + + u32 mcp_pulse_mb; + #define MCP_PULSE_SEQ_MASK 0x00007fff + #define MCP_PULSE_ALWAYS_ALIVE 0x00008000 + /* Indicates to the driver not to assert due to lack + * of MCP response */ + #define MCP_EVENT_MASK 0xffff0000 + #define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 + + u32 iscsi_boot_signature; + u32 iscsi_boot_block_offset; + + u32 drv_status; + #define DRV_STATUS_PMF 0x00000001 + #define DRV_STATUS_VF_DISABLED 0x00000002 + #define DRV_STATUS_SET_MF_BW 0x00000004 + #define DRV_STATUS_LINK_EVENT 0x00000008 + + #define DRV_STATUS_OEM_EVENT_MASK 0x00000070 + #define DRV_STATUS_OEM_DISABLE_ENABLE_PF 0x00000010 + #define DRV_STATUS_OEM_BANDWIDTH_ALLOCATION 0x00000020 + + #define DRV_STATUS_OEM_UPDATE_SVID 0x00000080 + + #define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00 + #define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100 + #define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200 + #define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS 0x00000400 + #define DRV_STATUS_DCC_RESERVED1 0x00000800 + #define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000 + #define DRV_STATUS_DCC_SET_PRIORITY 0x00002000 + + #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000 + #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000 + #define DRV_STATUS_AFEX_EVENT_MASK 0x03f00000 + #define DRV_STATUS_AFEX_LISTGET_REQ 0x00100000 + #define DRV_STATUS_AFEX_LISTSET_REQ 0x00200000 + #define DRV_STATUS_AFEX_STATSGET_REQ 0x00400000 + #define DRV_STATUS_AFEX_VIFSET_REQ 0x00800000 + + #define DRV_STATUS_DRV_INFO_REQ 0x04000000 + + #define DRV_STATUS_EEE_NEGOTIATION_RESULTS 0x08000000 + + u32 virt_mac_upper; + #define VIRT_MAC_SIGN_MASK 0xffff0000 + #define VIRT_MAC_SIGNATURE 0x564d0000 + u32 virt_mac_lower; + +}; + + +/**************************************************************************** + * Management firmware state * + ****************************************************************************/ +/* Allocate 440 bytes for management firmware */ +#define MGMTFW_STATE_WORD_SIZE 110 + +struct mgmtfw_state { + u32 opaque[MGMTFW_STATE_WORD_SIZE]; +}; + + +/**************************************************************************** + * Multi-Function configuration * + ****************************************************************************/ +struct shared_mf_cfg { + + u32 clp_mb; + #define SHARED_MF_CLP_SET_DEFAULT 0x00000000 + /* set by CLP */ + #define SHARED_MF_CLP_EXIT 0x00000001 + /* set by MCP */ + #define SHARED_MF_CLP_EXIT_DONE 0x00010000 + +}; + +struct port_mf_cfg { + + u32 dynamic_cfg; /* device control channel */ + #define PORT_MF_CFG_E1HOV_TAG_MASK 0x0000ffff + #define PORT_MF_CFG_E1HOV_TAG_SHIFT 0 + #define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK + + u32 reserved[1]; + +}; + +struct func_mf_cfg { + + u32 config; + /* E/R/I/D */ + /* function 0 of each port cannot be hidden */ + #define FUNC_MF_CFG_FUNC_HIDE 0x00000001 + + #define FUNC_MF_CFG_PROTOCOL_MASK 0x00000006 + #define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000000 + #define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002 + #define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004 + #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006 + #define FUNC_MF_CFG_PROTOCOL_DEFAULT \ + FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA + + #define FUNC_MF_CFG_FUNC_DISABLED 0x00000008 + #define FUNC_MF_CFG_FUNC_DELETED 0x00000010 + + /* PRI */ + /* 0 - low priority, 3 - high priority */ + #define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300 + #define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8 + #define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000 + + /* MINBW, MAXBW */ + /* value range - 0..100, increments in 100Mbps */ + #define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000 + #define FUNC_MF_CFG_MIN_BW_SHIFT 16 + #define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 + #define FUNC_MF_CFG_MAX_BW_MASK 0xff000000 + #define FUNC_MF_CFG_MAX_BW_SHIFT 24 + #define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000 + + u32 mac_upper; /* MAC */ + #define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff + #define FUNC_MF_CFG_UPPERMAC_SHIFT 0 + #define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK + u32 mac_lower; + #define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff + + u32 e1hov_tag; /* VNI */ + #define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff + #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0 + #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK + + /* afex default VLAN ID - 12 bits */ + #define FUNC_MF_CFG_AFEX_VLAN_MASK 0x0fff0000 + #define FUNC_MF_CFG_AFEX_VLAN_SHIFT 16 + + u32 afex_config; + #define FUNC_MF_CFG_AFEX_COS_FILTER_MASK 0x000000ff + #define FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT 0 + #define FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK 0x0000ff00 + #define FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT 8 + #define FUNC_MF_CFG_AFEX_MBA_ENABLED_VAL 0x00000100 + #define FUNC_MF_CFG_AFEX_VLAN_MODE_MASK 0x000f0000 + #define FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT 16 + + u32 reserved; +}; + +enum mf_cfg_afex_vlan_mode { + FUNC_MF_CFG_AFEX_VLAN_TRUNK_MODE = 0, + FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE, + FUNC_MF_CFG_AFEX_VLAN_TRUNK_TAG_NATIVE_MODE +}; + +/* This structure is not applicable and should not be accessed on 57711 */ +struct func_ext_cfg { + u32 func_cfg; + #define MACP_FUNC_CFG_FLAGS_MASK 0x0000007F + #define MACP_FUNC_CFG_FLAGS_SHIFT 0 + #define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001 + #define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002 + #define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004 + #define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008 + #define MACP_FUNC_CFG_PAUSE_ON_HOST_RING 0x00000080 + + u32 iscsi_mac_addr_upper; + u32 iscsi_mac_addr_lower; + + u32 fcoe_mac_addr_upper; + u32 fcoe_mac_addr_lower; + + u32 fcoe_wwn_port_name_upper; + u32 fcoe_wwn_port_name_lower; + + u32 fcoe_wwn_node_name_upper; + u32 fcoe_wwn_node_name_lower; + + u32 preserve_data; + #define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0) + #define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1) + #define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2) + #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3) + #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4) + #define MF_FUNC_CFG_PRESERVE_TX_BW (1<<5) +}; + +struct mf_cfg { + + struct shared_mf_cfg shared_mf_config; /* 0x4 */ + /* 0x8*2*2=0x20 */ + struct port_mf_cfg port_mf_config[NVM_PATH_MAX][PORT_MAX]; + /* for all chips, there are 8 mf functions */ + struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */ + /* + * Extended configuration per function - this array does not exist and + * should not be accessed on 57711 + */ + struct func_ext_cfg func_ext_config[E1H_FUNC_MAX]; /* 0x28 * 8 = 0x140*/ +}; /* 0x224 */ + +/**************************************************************************** + * Shared Memory Region * + ****************************************************************************/ +struct shmem_region { /* SharedMem Offset (size) */ + + u32 validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */ + #define SHR_MEM_FORMAT_REV_MASK 0xff000000 + #define SHR_MEM_FORMAT_REV_ID ('A'<<24) + /* validity bits */ + #define SHR_MEM_VALIDITY_PCI_CFG 0x00100000 + #define SHR_MEM_VALIDITY_MB 0x00200000 + #define SHR_MEM_VALIDITY_DEV_INFO 0x00400000 + #define SHR_MEM_VALIDITY_RESERVED 0x00000007 + /* One licensing bit should be set */ + #define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 + #define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008 + #define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010 + #define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020 + /* Active MFW */ + #define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0 + + struct shm_dev_info dev_info; /* 0x8 (0x438) */ + + struct license_key drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */ + + /* FW information (for internal FW use) */ + u32 fw_info_fio_offset; /* 0x4a8 (0x4) */ + struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */ + + struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */ + +#ifdef BMAPI + /* This is a variable length array */ + /* the number of function depends on the chip type */ + struct drv_func_mb func_mb[1]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */ +#else + /* the number of function depends on the chip type */ + struct drv_func_mb func_mb[]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */ +#endif /* BMAPI */ + +}; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */ + +/**************************************************************************** + * Shared Memory 2 Region * + ****************************************************************************/ +/* The fw_flr_ack is actually built in the following way: */ +/* 8 bit: PF ack */ +/* 64 bit: VF ack */ +/* 8 bit: ios_dis_ack */ +/* In order to maintain endianity in the mailbox hsi, we want to keep using */ +/* u32. The fw must have the VF right after the PF since this is how it */ +/* access arrays(it expects always the VF to reside after the PF, and that */ +/* makes the calculation much easier for it. ) */ +/* In order to answer both limitations, and keep the struct small, the code */ +/* will abuse the structure defined here to achieve the actual partition */ +/* above */ +/****************************************************************************/ +struct fw_flr_ack { + u32 pf_ack; + u32 vf_ack[1]; + u32 iov_dis_ack; +}; + +struct fw_flr_mb { + u32 aggint; + u32 opgen_addr; + struct fw_flr_ack ack; +}; + +struct eee_remote_vals { + u32 tx_tw; + u32 rx_tw; +}; + +/**** SUPPORT FOR SHMEM ARRRAYS *** + * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to + * define arrays with storage types smaller then unsigned dwords. + * The macros below add generic support for SHMEM arrays with numeric elements + * that can span 2,4,8 or 16 bits. The array underlying type is a 32 bit dword + * array with individual bit-filed elements accessed using shifts and masks. + * + */ + +/* eb is the bitwidth of a single element */ +#define SHMEM_ARRAY_MASK(eb) ((1<<(eb))-1) +#define SHMEM_ARRAY_ENTRY(i, eb) ((i)/(32/(eb))) + +/* the bit-position macro allows the used to flip the order of the arrays + * elements on a per byte or word boundary. + * + * example: an array with 8 entries each 4 bit wide. This array will fit into + * a single dword. The diagrmas below show the array order of the nibbles. + * + * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering: + * + * | | | | + * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | | | | + * + * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte: + * + * | | | | + * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 | + * | | | | + * + * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word: + * + * | | | | + * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 | + * | | | | + */ +#define SHMEM_ARRAY_BITPOS(i, eb, fb) \ + ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \ + (((i)%((fb)/(eb))) * (eb))) + +#define SHMEM_ARRAY_GET(a, i, eb, fb) \ + ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \ + SHMEM_ARRAY_MASK(eb)) + +#define SHMEM_ARRAY_SET(a, i, eb, fb, val) \ +do { \ + a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \ + SHMEM_ARRAY_BITPOS(i, eb, fb)); \ + a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \ + SHMEM_ARRAY_BITPOS(i, eb, fb)); \ +} while (0) + + +/****START OF DCBX STRUCTURES DECLARATIONS****/ +#define DCBX_MAX_NUM_PRI_PG_ENTRIES 8 +#define DCBX_PRI_PG_BITWIDTH 4 +#define DCBX_PRI_PG_FBITS 8 +#define DCBX_PRI_PG_GET(a, i) \ + SHMEM_ARRAY_GET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS) +#define DCBX_PRI_PG_SET(a, i, val) \ + SHMEM_ARRAY_SET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS, val) +#define DCBX_MAX_NUM_PG_BW_ENTRIES 8 +#define DCBX_BW_PG_BITWIDTH 8 +#define DCBX_PG_BW_GET(a, i) \ + SHMEM_ARRAY_GET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH) +#define DCBX_PG_BW_SET(a, i, val) \ + SHMEM_ARRAY_SET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH, val) +#define DCBX_STRICT_PRI_PG 15 +#define DCBX_MAX_APP_PROTOCOL 16 +#define FCOE_APP_IDX 0 +#define ISCSI_APP_IDX 1 +#define PREDEFINED_APP_IDX_MAX 2 + + +/* Big/Little endian have the same representation. */ +struct dcbx_ets_feature { + /* + * For Admin MIB - is this feature supported by the + * driver | For Local MIB - should this feature be enabled. + */ + u32 enabled; + u32 pg_bw_tbl[2]; + u32 pri_pg_tbl[1]; +}; + +/* Driver structure in LE */ +struct dcbx_pfc_feature { +#ifdef __BIG_ENDIAN + u8 pri_en_bitmap; + #define DCBX_PFC_PRI_0 0x01 + #define DCBX_PFC_PRI_1 0x02 + #define DCBX_PFC_PRI_2 0x04 + #define DCBX_PFC_PRI_3 0x08 + #define DCBX_PFC_PRI_4 0x10 + #define DCBX_PFC_PRI_5 0x20 + #define DCBX_PFC_PRI_6 0x40 + #define DCBX_PFC_PRI_7 0x80 + u8 pfc_caps; + u8 reserved; + u8 enabled; +#elif defined(__LITTLE_ENDIAN) + u8 enabled; + u8 reserved; + u8 pfc_caps; + u8 pri_en_bitmap; + #define DCBX_PFC_PRI_0 0x01 + #define DCBX_PFC_PRI_1 0x02 + #define DCBX_PFC_PRI_2 0x04 + #define DCBX_PFC_PRI_3 0x08 + #define DCBX_PFC_PRI_4 0x10 + #define DCBX_PFC_PRI_5 0x20 + #define DCBX_PFC_PRI_6 0x40 + #define DCBX_PFC_PRI_7 0x80 +#endif +}; + +struct dcbx_app_priority_entry { +#ifdef __BIG_ENDIAN + u16 app_id; + u8 pri_bitmap; + u8 appBitfield; + #define DCBX_APP_ENTRY_VALID 0x01 + #define DCBX_APP_ENTRY_SF_MASK 0x30 + #define DCBX_APP_ENTRY_SF_SHIFT 4 + #define DCBX_APP_SF_ETH_TYPE 0x10 + #define DCBX_APP_SF_PORT 0x20 +#elif defined(__LITTLE_ENDIAN) + u8 appBitfield; + #define DCBX_APP_ENTRY_VALID 0x01 + #define DCBX_APP_ENTRY_SF_MASK 0x30 + #define DCBX_APP_ENTRY_SF_SHIFT 4 + #define DCBX_APP_SF_ETH_TYPE 0x10 + #define DCBX_APP_SF_PORT 0x20 + u8 pri_bitmap; + u16 app_id; +#endif +}; + + +/* FW structure in BE */ +struct dcbx_app_priority_feature { +#ifdef __BIG_ENDIAN + u8 reserved; + u8 default_pri; + u8 tc_supported; + u8 enabled; +#elif defined(__LITTLE_ENDIAN) + u8 enabled; + u8 tc_supported; + u8 default_pri; + u8 reserved; +#endif + struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; +}; + +/* FW structure in BE */ +struct dcbx_features { + /* PG feature */ + struct dcbx_ets_feature ets; + /* PFC feature */ + struct dcbx_pfc_feature pfc; + /* APP feature */ + struct dcbx_app_priority_feature app; +}; + +/* LLDP protocol parameters */ +/* FW structure in BE */ +struct lldp_params { +#ifdef __BIG_ENDIAN + u8 msg_fast_tx_interval; + u8 msg_tx_hold; + u8 msg_tx_interval; + u8 admin_status; + #define LLDP_TX_ONLY 0x01 + #define LLDP_RX_ONLY 0x02 + #define LLDP_TX_RX 0x03 + #define LLDP_DISABLED 0x04 + u8 reserved1; + u8 tx_fast; + u8 tx_crd_max; + u8 tx_crd; +#elif defined(__LITTLE_ENDIAN) + u8 admin_status; + #define LLDP_TX_ONLY 0x01 + #define LLDP_RX_ONLY 0x02 + #define LLDP_TX_RX 0x03 + #define LLDP_DISABLED 0x04 + u8 msg_tx_interval; + u8 msg_tx_hold; + u8 msg_fast_tx_interval; + u8 tx_crd; + u8 tx_crd_max; + u8 tx_fast; + u8 reserved1; +#endif + #define REM_CHASSIS_ID_STAT_LEN 4 + #define REM_PORT_ID_STAT_LEN 4 + /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */ + u32 peer_chassis_id[REM_CHASSIS_ID_STAT_LEN]; + /* Holds remote Port ID TLV header, subtype and 9B of payload. */ + u32 peer_port_id[REM_PORT_ID_STAT_LEN]; +}; + +struct lldp_dcbx_stat { + #define LOCAL_CHASSIS_ID_STAT_LEN 2 + #define LOCAL_PORT_ID_STAT_LEN 2 + /* Holds local Chassis ID 8B payload of constant subtype 4. */ + u32 local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN]; + /* Holds local Port ID 8B payload of constant subtype 3. */ + u32 local_port_id[LOCAL_PORT_ID_STAT_LEN]; + /* Number of DCBX frames transmitted. */ + u32 num_tx_dcbx_pkts; + /* Number of DCBX frames received. */ + u32 num_rx_dcbx_pkts; +}; + +/* ADMIN MIB - DCBX local machine default configuration. */ +struct lldp_admin_mib { + u32 ver_cfg_flags; + #define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001 + #define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002 + #define DCBX_APP_CONFIG_TX_ENABLED 0x00000004 + #define DCBX_ETS_RECO_TX_ENABLED 0x00000008 + #define DCBX_ETS_RECO_VALID 0x00000010 + #define DCBX_ETS_WILLING 0x00000020 + #define DCBX_PFC_WILLING 0x00000040 + #define DCBX_APP_WILLING 0x00000080 + #define DCBX_VERSION_CEE 0x00000100 + #define DCBX_VERSION_IEEE 0x00000200 + #define DCBX_DCBX_ENABLED 0x00000400 + #define DCBX_CEE_VERSION_MASK 0x0000f000 + #define DCBX_CEE_VERSION_SHIFT 12 + #define DCBX_CEE_MAX_VERSION_MASK 0x000f0000 + #define DCBX_CEE_MAX_VERSION_SHIFT 16 + struct dcbx_features features; +}; + +/* REMOTE MIB - remote machine DCBX configuration. */ +struct lldp_remote_mib { + u32 prefix_seq_num; + u32 flags; + #define DCBX_ETS_TLV_RX 0x00000001 + #define DCBX_PFC_TLV_RX 0x00000002 + #define DCBX_APP_TLV_RX 0x00000004 + #define DCBX_ETS_RX_ERROR 0x00000010 + #define DCBX_PFC_RX_ERROR 0x00000020 + #define DCBX_APP_RX_ERROR 0x00000040 + #define DCBX_ETS_REM_WILLING 0x00000100 + #define DCBX_PFC_REM_WILLING 0x00000200 + #define DCBX_APP_REM_WILLING 0x00000400 + #define DCBX_REMOTE_ETS_RECO_VALID 0x00001000 + #define DCBX_REMOTE_MIB_VALID 0x00002000 + struct dcbx_features features; + u32 suffix_seq_num; +}; + +/* LOCAL MIB - operational DCBX configuration - transmitted on Tx LLDPDU. */ +struct lldp_local_mib { + u32 prefix_seq_num; + /* Indicates if there is mismatch with negotiation results. */ + u32 error; + #define DCBX_LOCAL_ETS_ERROR 0x00000001 + #define DCBX_LOCAL_PFC_ERROR 0x00000002 + #define DCBX_LOCAL_APP_ERROR 0x00000004 + #define DCBX_LOCAL_PFC_MISMATCH 0x00000010 + #define DCBX_LOCAL_APP_MISMATCH 0x00000020 + #define DCBX_REMOTE_MIB_ERROR 0x00000040 + #define DCBX_REMOTE_ETS_TLV_NOT_FOUND 0x00000080 + #define DCBX_REMOTE_PFC_TLV_NOT_FOUND 0x00000100 + #define DCBX_REMOTE_APP_TLV_NOT_FOUND 0x00000200 + struct dcbx_features features; + u32 suffix_seq_num; +}; +/***END OF DCBX STRUCTURES DECLARATIONS***/ + +/***********************************************************/ +/* Elink section */ +/***********************************************************/ +#define SHMEM_LINK_CONFIG_SIZE 2 +struct shmem_lfa { + u32 req_duplex; + #define REQ_DUPLEX_PHY0_MASK 0x0000ffff + #define REQ_DUPLEX_PHY0_SHIFT 0 + #define REQ_DUPLEX_PHY1_MASK 0xffff0000 + #define REQ_DUPLEX_PHY1_SHIFT 16 + u32 req_flow_ctrl; + #define REQ_FLOW_CTRL_PHY0_MASK 0x0000ffff + #define REQ_FLOW_CTRL_PHY0_SHIFT 0 + #define REQ_FLOW_CTRL_PHY1_MASK 0xffff0000 + #define REQ_FLOW_CTRL_PHY1_SHIFT 16 + u32 req_line_speed; /* Also determine AutoNeg */ + #define REQ_LINE_SPD_PHY0_MASK 0x0000ffff + #define REQ_LINE_SPD_PHY0_SHIFT 0 + #define REQ_LINE_SPD_PHY1_MASK 0xffff0000 + #define REQ_LINE_SPD_PHY1_SHIFT 16 + u32 speed_cap_mask[SHMEM_LINK_CONFIG_SIZE]; + u32 additional_config; + #define REQ_FC_AUTO_ADV_MASK 0x0000ffff + #define REQ_FC_AUTO_ADV0_SHIFT 0 + #define NO_LFA_DUE_TO_DCC_MASK 0x00010000 + u32 lfa_sts; + #define LFA_LINK_FLAP_REASON_OFFSET 0 + #define LFA_LINK_FLAP_REASON_MASK 0x000000ff + #define LFA_LINK_DOWN 0x1 + #define LFA_LOOPBACK_ENABLED 0x2 + #define LFA_DUPLEX_MISMATCH 0x3 + #define LFA_MFW_IS_TOO_OLD 0x4 + #define LFA_LINK_SPEED_MISMATCH 0x5 + #define LFA_FLOW_CTRL_MISMATCH 0x6 + #define LFA_SPEED_CAP_MISMATCH 0x7 + #define LFA_DCC_LFA_DISABLED 0x8 + #define LFA_EEE_MISMATCH 0x9 + + #define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8 + #define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00 + + #define LINK_FLAP_COUNT_OFFSET 16 + #define LINK_FLAP_COUNT_MASK 0x00ff0000 + + #define LFA_FLAGS_MASK 0xff000000 + #define SHMEM_LFA_DONT_CLEAR_STAT (1<<24) +}; + +/* Used to support NSCI get OS driver version + * on driver load the version value will be set + * on driver unload driver value of 0x0 will be set. + */ +struct os_drv_ver { +#define DRV_VER_NOT_LOADED 0 + + /* personalties order is important */ +#define DRV_PERS_ETHERNET 0 +#define DRV_PERS_ISCSI 1 +#define DRV_PERS_FCOE 2 + + /* shmem2 struct is constant can't add more personalties here */ +#define MAX_DRV_PERS 3 + u32 versions[MAX_DRV_PERS]; +}; + +struct ncsi_oem_fcoe_features { + u32 fcoe_features1; + #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF + #define FCOE_FEATURES1_IOS_PER_CONNECTION_OFFSET 0 + + #define FCOE_FEATURES1_LOGINS_PER_PORT_MASK 0xFFFF0000 + #define FCOE_FEATURES1_LOGINS_PER_PORT_OFFSET 16 + + u32 fcoe_features2; + #define FCOE_FEATURES2_EXCHANGES_MASK 0x0000FFFF + #define FCOE_FEATURES2_EXCHANGES_OFFSET 0 + + #define FCOE_FEATURES2_NPIV_WWN_PER_PORT_MASK 0xFFFF0000 + #define FCOE_FEATURES2_NPIV_WWN_PER_PORT_OFFSET 16 + + u32 fcoe_features3; + #define FCOE_FEATURES3_TARGETS_SUPPORTED_MASK 0x0000FFFF + #define FCOE_FEATURES3_TARGETS_SUPPORTED_OFFSET 0 + + #define FCOE_FEATURES3_OUTSTANDING_COMMANDS_MASK 0xFFFF0000 + #define FCOE_FEATURES3_OUTSTANDING_COMMANDS_OFFSET 16 + + u32 fcoe_features4; + #define FCOE_FEATURES4_FEATURE_SETTINGS_MASK 0x0000000F + #define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET 0 +}; + +struct ncsi_oem_data { + u32 driver_version[4]; + struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features; +}; + +struct shmem2_region { + + u32 size; /* 0x0000 */ + + u32 dcc_support; /* 0x0004 */ + #define SHMEM_DCC_SUPPORT_NONE 0x00000000 + #define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001 + #define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV 0x00000004 + #define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV 0x00000008 + #define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040 + #define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080 + + u32 ext_phy_fw_version2[PORT_MAX]; /* 0x0008 */ + /* + * For backwards compatibility, if the mf_cfg_addr does not exist + * (the size filed is smaller than 0xc) the mf_cfg resides at the + * end of struct shmem_region + */ + u32 mf_cfg_addr; /* 0x0010 */ + #define SHMEM_MF_CFG_ADDR_NONE 0x00000000 + + struct fw_flr_mb flr_mb; /* 0x0014 */ + u32 dcbx_lldp_params_offset; /* 0x0028 */ + #define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000 + u32 dcbx_neg_res_offset; /* 0x002c */ + #define SHMEM_DCBX_NEG_RES_NONE 0x00000000 + u32 dcbx_remote_mib_offset; /* 0x0030 */ + #define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000 + /* + * The other shmemX_base_addr holds the other path's shmem address + * required for example in case of common phy init, or for path1 to know + * the address of mcp debug trace which is located in offset from shmem + * of path0 + */ + u32 other_shmem_base_addr; /* 0x0034 */ + u32 other_shmem2_base_addr; /* 0x0038 */ + /* + * mcp_vf_disabled is set by the MCP to indicate the driver about VFs + * which were disabled/flred + */ + u32 mcp_vf_disabled[E2_VF_MAX / 32]; /* 0x003c */ + + /* + * drv_ack_vf_disabled is set by the PF driver to ack handled disabled + * VFs + */ + u32 drv_ack_vf_disabled[E2_FUNC_MAX][E2_VF_MAX / 32]; /* 0x0044 */ + + u32 dcbx_lldp_dcbx_stat_offset; /* 0x0064 */ + #define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000 + + /* + * edebug_driver_if field is used to transfer messages between edebug + * app to the driver through shmem2. + * + * message format: + * bits 0-2 - function number / instance of driver to perform request + * bits 3-5 - op code / is_ack? + * bits 6-63 - data + */ + u32 edebug_driver_if[2]; /* 0x0068 */ + #define EDEBUG_DRIVER_IF_OP_CODE_GET_PHYS_ADDR 1 + #define EDEBUG_DRIVER_IF_OP_CODE_GET_BUS_ADDR 2 + #define EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT 3 + + u32 nvm_retain_bitmap_addr; /* 0x0070 */ + + /* afex support of that driver */ + u32 afex_driver_support; /* 0x0074 */ + #define SHMEM_AFEX_VERSION_MASK 0x100f + #define SHMEM_AFEX_SUPPORTED_VERSION_ONE 0x1001 + #define SHMEM_AFEX_REDUCED_DRV_LOADED 0x8000 + + /* driver receives addr in scratchpad to which it should respond */ + u32 afex_scratchpad_addr_to_write[E2_FUNC_MAX]; + + /* generic params from MCP to driver (value depends on the msg sent + * to driver + */ + u32 afex_param1_to_driver[E2_FUNC_MAX]; /* 0x0088 */ + u32 afex_param2_to_driver[E2_FUNC_MAX]; /* 0x0098 */ + + u32 swim_base_addr; /* 0x0108 */ + u32 swim_funcs; + u32 swim_main_cb; + + /* bitmap notifying which VIF profiles stored in nvram are enabled by + * switch + */ + u32 afex_profiles_enabled[2]; + + /* generic flags controlled by the driver */ + u32 drv_flags; + #define DRV_FLAGS_DCB_CONFIGURED 0x0 + #define DRV_FLAGS_DCB_CONFIGURATION_ABORTED 0x1 + #define DRV_FLAGS_DCB_MFW_CONFIGURED 0x2 + + #define DRV_FLAGS_PORT_MASK ((1 << DRV_FLAGS_DCB_CONFIGURED) | \ + (1 << DRV_FLAGS_DCB_CONFIGURATION_ABORTED) | \ + (1 << DRV_FLAGS_DCB_MFW_CONFIGURED)) + /* pointer to extended dev_info shared data copied from nvm image */ + u32 extended_dev_info_shared_addr; + u32 ncsi_oem_data_addr; + + u32 ocsd_host_addr; /* initialized by option ROM */ + u32 ocbb_host_addr; /* initialized by option ROM */ + u32 ocsd_req_update_interval; /* initialized by option ROM */ + u32 temperature_in_half_celsius; + u32 glob_struct_in_host; + + u32 dcbx_neg_res_ext_offset; +#define SHMEM_DCBX_NEG_RES_EXT_NONE 0x00000000 + + u32 drv_capabilities_flag[E2_FUNC_MAX]; +#define DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED 0x00000001 +#define DRV_FLAGS_CAPABILITIES_LOADED_L2 0x00000002 +#define DRV_FLAGS_CAPABILITIES_LOADED_FCOE 0x00000004 +#define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI 0x00000008 + + u32 extended_dev_info_shared_cfg_size; + + u32 dcbx_en[PORT_MAX]; + + /* The offset points to the multi threaded meta structure */ + u32 multi_thread_data_offset; + + /* address of DMAable host address holding values from the drivers */ + u32 drv_info_host_addr_lo; + u32 drv_info_host_addr_hi; + + /* general values written by the MFW (such as current version) */ + u32 drv_info_control; +#define DRV_INFO_CONTROL_VER_MASK 0x000000ff +#define DRV_INFO_CONTROL_VER_SHIFT 0 +#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00 +#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8 + u32 ibft_host_addr; /* initialized by option ROM */ + struct eee_remote_vals eee_remote_vals[PORT_MAX]; + u32 reserved[E2_FUNC_MAX]; + + + /* the status of EEE auto-negotiation + * bits 15:0 the configured tx-lpi entry timer value. Depends on bit 31. + * bits 19:16 the supported modes for EEE. + * bits 23:20 the speeds advertised for EEE. + * bits 27:24 the speeds the Link partner advertised for EEE. + * The supported/adv. modes in bits 27:19 originate from the + * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed). + * bit 28 when 1'b1 EEE was requested. + * bit 29 when 1'b1 tx lpi was requested. + * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted iff + * 30:29 are 2'b11. + * bit 31 when 1'b0 bits 15:0 contain a PORT_FEAT_CFG_EEE_ define as + * value. When 1'b1 those bits contains a value times 16 microseconds. + */ + u32 eee_status[PORT_MAX]; + #define SHMEM_EEE_TIMER_MASK 0x0000ffff + #define SHMEM_EEE_SUPPORTED_MASK 0x000f0000 + #define SHMEM_EEE_SUPPORTED_SHIFT 16 + #define SHMEM_EEE_ADV_STATUS_MASK 0x00f00000 + #define SHMEM_EEE_100M_ADV (1<<0) + #define SHMEM_EEE_1G_ADV (1<<1) + #define SHMEM_EEE_10G_ADV (1<<2) + #define SHMEM_EEE_ADV_STATUS_SHIFT 20 + #define SHMEM_EEE_LP_ADV_STATUS_MASK 0x0f000000 + #define SHMEM_EEE_LP_ADV_STATUS_SHIFT 24 + #define SHMEM_EEE_REQUESTED_BIT 0x10000000 + #define SHMEM_EEE_LPI_REQUESTED_BIT 0x20000000 + #define SHMEM_EEE_ACTIVE_BIT 0x40000000 + #define SHMEM_EEE_TIME_OUTPUT_BIT 0x80000000 + + u32 sizeof_port_stats; + + /* Link Flap Avoidance */ + u32 lfa_host_addr[PORT_MAX]; + u32 reserved1; + + u32 reserved2; /* Offset 0x148 */ + u32 reserved3; /* Offset 0x14C */ + u32 reserved4; /* Offset 0x150 */ + u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */ + #define LINK_ATTR_SYNC_KR2_ENABLE 0x00000001 + #define LINK_SFP_EEPROM_COMP_CODE_MASK 0x0000ff00 + #define LINK_SFP_EEPROM_COMP_CODE_SHIFT 8 + #define LINK_SFP_EEPROM_COMP_CODE_SR 0x00001000 + #define LINK_SFP_EEPROM_COMP_CODE_LR 0x00002000 + #define LINK_SFP_EEPROM_COMP_CODE_LRM 0x00004000 + + u32 reserved5[2]; + u32 link_change_count[PORT_MAX]; /* Offset 0x160-0x164 */ + #define LINK_CHANGE_COUNT_MASK 0xff /* Offset 0x168 */ + /* driver version for each personality */ + struct os_drv_ver func_os_drv_ver[E2_FUNC_MAX]; /* Offset 0x16c */ + + /* Flag to the driver that PF's drv_info_host_addr buffer was read */ + u32 mfw_drv_indication; + + /* We use indication for each PF (0..3) */ +#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_)) +}; + + +struct emac_stats { + u32 rx_stat_ifhcinoctets; + u32 rx_stat_ifhcinbadoctets; + u32 rx_stat_etherstatsfragments; + u32 rx_stat_ifhcinucastpkts; + u32 rx_stat_ifhcinmulticastpkts; + u32 rx_stat_ifhcinbroadcastpkts; + u32 rx_stat_dot3statsfcserrors; + u32 rx_stat_dot3statsalignmenterrors; + u32 rx_stat_dot3statscarriersenseerrors; + u32 rx_stat_xonpauseframesreceived; + u32 rx_stat_xoffpauseframesreceived; + u32 rx_stat_maccontrolframesreceived; + u32 rx_stat_xoffstateentered; + u32 rx_stat_dot3statsframestoolong; + u32 rx_stat_etherstatsjabbers; + u32 rx_stat_etherstatsundersizepkts; + u32 rx_stat_etherstatspkts64octets; + u32 rx_stat_etherstatspkts65octetsto127octets; + u32 rx_stat_etherstatspkts128octetsto255octets; + u32 rx_stat_etherstatspkts256octetsto511octets; + u32 rx_stat_etherstatspkts512octetsto1023octets; + u32 rx_stat_etherstatspkts1024octetsto1522octets; + u32 rx_stat_etherstatspktsover1522octets; + + u32 rx_stat_falsecarriererrors; + + u32 tx_stat_ifhcoutoctets; + u32 tx_stat_ifhcoutbadoctets; + u32 tx_stat_etherstatscollisions; + u32 tx_stat_outxonsent; + u32 tx_stat_outxoffsent; + u32 tx_stat_flowcontroldone; + u32 tx_stat_dot3statssinglecollisionframes; + u32 tx_stat_dot3statsmultiplecollisionframes; + u32 tx_stat_dot3statsdeferredtransmissions; + u32 tx_stat_dot3statsexcessivecollisions; + u32 tx_stat_dot3statslatecollisions; + u32 tx_stat_ifhcoutucastpkts; + u32 tx_stat_ifhcoutmulticastpkts; + u32 tx_stat_ifhcoutbroadcastpkts; + u32 tx_stat_etherstatspkts64octets; + u32 tx_stat_etherstatspkts65octetsto127octets; + u32 tx_stat_etherstatspkts128octetsto255octets; + u32 tx_stat_etherstatspkts256octetsto511octets; + u32 tx_stat_etherstatspkts512octetsto1023octets; + u32 tx_stat_etherstatspkts1024octetsto1522octets; + u32 tx_stat_etherstatspktsover1522octets; + u32 tx_stat_dot3statsinternalmactransmiterrors; +}; + + +struct bmac1_stats { + u32 tx_stat_gtpkt_lo; + u32 tx_stat_gtpkt_hi; + u32 tx_stat_gtxpf_lo; + u32 tx_stat_gtxpf_hi; + u32 tx_stat_gtfcs_lo; + u32 tx_stat_gtfcs_hi; + u32 tx_stat_gtmca_lo; + u32 tx_stat_gtmca_hi; + u32 tx_stat_gtbca_lo; + u32 tx_stat_gtbca_hi; + u32 tx_stat_gtfrg_lo; + u32 tx_stat_gtfrg_hi; + u32 tx_stat_gtovr_lo; + u32 tx_stat_gtovr_hi; + u32 tx_stat_gt64_lo; + u32 tx_stat_gt64_hi; + u32 tx_stat_gt127_lo; + u32 tx_stat_gt127_hi; + u32 tx_stat_gt255_lo; + u32 tx_stat_gt255_hi; + u32 tx_stat_gt511_lo; + u32 tx_stat_gt511_hi; + u32 tx_stat_gt1023_lo; + u32 tx_stat_gt1023_hi; + u32 tx_stat_gt1518_lo; + u32 tx_stat_gt1518_hi; + u32 tx_stat_gt2047_lo; + u32 tx_stat_gt2047_hi; + u32 tx_stat_gt4095_lo; + u32 tx_stat_gt4095_hi; + u32 tx_stat_gt9216_lo; + u32 tx_stat_gt9216_hi; + u32 tx_stat_gt16383_lo; + u32 tx_stat_gt16383_hi; + u32 tx_stat_gtmax_lo; + u32 tx_stat_gtmax_hi; + u32 tx_stat_gtufl_lo; + u32 tx_stat_gtufl_hi; + u32 tx_stat_gterr_lo; + u32 tx_stat_gterr_hi; + u32 tx_stat_gtbyt_lo; + u32 tx_stat_gtbyt_hi; + + u32 rx_stat_gr64_lo; + u32 rx_stat_gr64_hi; + u32 rx_stat_gr127_lo; + u32 rx_stat_gr127_hi; + u32 rx_stat_gr255_lo; + u32 rx_stat_gr255_hi; + u32 rx_stat_gr511_lo; + u32 rx_stat_gr511_hi; + u32 rx_stat_gr1023_lo; + u32 rx_stat_gr1023_hi; + u32 rx_stat_gr1518_lo; + u32 rx_stat_gr1518_hi; + u32 rx_stat_gr2047_lo; + u32 rx_stat_gr2047_hi; + u32 rx_stat_gr4095_lo; + u32 rx_stat_gr4095_hi; + u32 rx_stat_gr9216_lo; + u32 rx_stat_gr9216_hi; + u32 rx_stat_gr16383_lo; + u32 rx_stat_gr16383_hi; + u32 rx_stat_grmax_lo; + u32 rx_stat_grmax_hi; + u32 rx_stat_grpkt_lo; + u32 rx_stat_grpkt_hi; + u32 rx_stat_grfcs_lo; + u32 rx_stat_grfcs_hi; + u32 rx_stat_grmca_lo; + u32 rx_stat_grmca_hi; + u32 rx_stat_grbca_lo; + u32 rx_stat_grbca_hi; + u32 rx_stat_grxcf_lo; + u32 rx_stat_grxcf_hi; + u32 rx_stat_grxpf_lo; + u32 rx_stat_grxpf_hi; + u32 rx_stat_grxuo_lo; + u32 rx_stat_grxuo_hi; + u32 rx_stat_grjbr_lo; + u32 rx_stat_grjbr_hi; + u32 rx_stat_grovr_lo; + u32 rx_stat_grovr_hi; + u32 rx_stat_grflr_lo; + u32 rx_stat_grflr_hi; + u32 rx_stat_grmeg_lo; + u32 rx_stat_grmeg_hi; + u32 rx_stat_grmeb_lo; + u32 rx_stat_grmeb_hi; + u32 rx_stat_grbyt_lo; + u32 rx_stat_grbyt_hi; + u32 rx_stat_grund_lo; + u32 rx_stat_grund_hi; + u32 rx_stat_grfrg_lo; + u32 rx_stat_grfrg_hi; + u32 rx_stat_grerb_lo; + u32 rx_stat_grerb_hi; + u32 rx_stat_grfre_lo; + u32 rx_stat_grfre_hi; + u32 rx_stat_gripj_lo; + u32 rx_stat_gripj_hi; +}; + +struct bmac2_stats { + u32 tx_stat_gtpk_lo; /* gtpok */ + u32 tx_stat_gtpk_hi; /* gtpok */ + u32 tx_stat_gtxpf_lo; /* gtpf */ + u32 tx_stat_gtxpf_hi; /* gtpf */ + u32 tx_stat_gtpp_lo; /* NEW BMAC2 */ + u32 tx_stat_gtpp_hi; /* NEW BMAC2 */ + u32 tx_stat_gtfcs_lo; + u32 tx_stat_gtfcs_hi; + u32 tx_stat_gtuca_lo; /* NEW BMAC2 */ + u32 tx_stat_gtuca_hi; /* NEW BMAC2 */ + u32 tx_stat_gtmca_lo; + u32 tx_stat_gtmca_hi; + u32 tx_stat_gtbca_lo; + u32 tx_stat_gtbca_hi; + u32 tx_stat_gtovr_lo; + u32 tx_stat_gtovr_hi; + u32 tx_stat_gtfrg_lo; + u32 tx_stat_gtfrg_hi; + u32 tx_stat_gtpkt1_lo; /* gtpkt */ + u32 tx_stat_gtpkt1_hi; /* gtpkt */ + u32 tx_stat_gt64_lo; + u32 tx_stat_gt64_hi; + u32 tx_stat_gt127_lo; + u32 tx_stat_gt127_hi; + u32 tx_stat_gt255_lo; + u32 tx_stat_gt255_hi; + u32 tx_stat_gt511_lo; + u32 tx_stat_gt511_hi; + u32 tx_stat_gt1023_lo; + u32 tx_stat_gt1023_hi; + u32 tx_stat_gt1518_lo; + u32 tx_stat_gt1518_hi; + u32 tx_stat_gt2047_lo; + u32 tx_stat_gt2047_hi; + u32 tx_stat_gt4095_lo; + u32 tx_stat_gt4095_hi; + u32 tx_stat_gt9216_lo; + u32 tx_stat_gt9216_hi; + u32 tx_stat_gt16383_lo; + u32 tx_stat_gt16383_hi; + u32 tx_stat_gtmax_lo; + u32 tx_stat_gtmax_hi; + u32 tx_stat_gtufl_lo; + u32 tx_stat_gtufl_hi; + u32 tx_stat_gterr_lo; + u32 tx_stat_gterr_hi; + u32 tx_stat_gtbyt_lo; + u32 tx_stat_gtbyt_hi; + + u32 rx_stat_gr64_lo; + u32 rx_stat_gr64_hi; + u32 rx_stat_gr127_lo; + u32 rx_stat_gr127_hi; + u32 rx_stat_gr255_lo; + u32 rx_stat_gr255_hi; + u32 rx_stat_gr511_lo; + u32 rx_stat_gr511_hi; + u32 rx_stat_gr1023_lo; + u32 rx_stat_gr1023_hi; + u32 rx_stat_gr1518_lo; + u32 rx_stat_gr1518_hi; + u32 rx_stat_gr2047_lo; + u32 rx_stat_gr2047_hi; + u32 rx_stat_gr4095_lo; + u32 rx_stat_gr4095_hi; + u32 rx_stat_gr9216_lo; + u32 rx_stat_gr9216_hi; + u32 rx_stat_gr16383_lo; + u32 rx_stat_gr16383_hi; + u32 rx_stat_grmax_lo; + u32 rx_stat_grmax_hi; + u32 rx_stat_grpkt_lo; + u32 rx_stat_grpkt_hi; + u32 rx_stat_grfcs_lo; + u32 rx_stat_grfcs_hi; + u32 rx_stat_gruca_lo; + u32 rx_stat_gruca_hi; + u32 rx_stat_grmca_lo; + u32 rx_stat_grmca_hi; + u32 rx_stat_grbca_lo; + u32 rx_stat_grbca_hi; + u32 rx_stat_grxpf_lo; /* grpf */ + u32 rx_stat_grxpf_hi; /* grpf */ + u32 rx_stat_grpp_lo; + u32 rx_stat_grpp_hi; + u32 rx_stat_grxuo_lo; /* gruo */ + u32 rx_stat_grxuo_hi; /* gruo */ + u32 rx_stat_grjbr_lo; + u32 rx_stat_grjbr_hi; + u32 rx_stat_grovr_lo; + u32 rx_stat_grovr_hi; + u32 rx_stat_grxcf_lo; /* grcf */ + u32 rx_stat_grxcf_hi; /* grcf */ + u32 rx_stat_grflr_lo; + u32 rx_stat_grflr_hi; + u32 rx_stat_grpok_lo; + u32 rx_stat_grpok_hi; + u32 rx_stat_grmeg_lo; + u32 rx_stat_grmeg_hi; + u32 rx_stat_grmeb_lo; + u32 rx_stat_grmeb_hi; + u32 rx_stat_grbyt_lo; + u32 rx_stat_grbyt_hi; + u32 rx_stat_grund_lo; + u32 rx_stat_grund_hi; + u32 rx_stat_grfrg_lo; + u32 rx_stat_grfrg_hi; + u32 rx_stat_grerb_lo; /* grerrbyt */ + u32 rx_stat_grerb_hi; /* grerrbyt */ + u32 rx_stat_grfre_lo; /* grfrerr */ + u32 rx_stat_grfre_hi; /* grfrerr */ + u32 rx_stat_gripj_lo; + u32 rx_stat_gripj_hi; +}; + +struct mstat_stats { + struct { + /* OTE MSTAT on E3 has a bug where this register's contents are + * actually tx_gtxpok + tx_gtxpf + (possibly)tx_gtxpp + */ + u32 tx_gtxpok_lo; + u32 tx_gtxpok_hi; + u32 tx_gtxpf_lo; + u32 tx_gtxpf_hi; + u32 tx_gtxpp_lo; + u32 tx_gtxpp_hi; + u32 tx_gtfcs_lo; + u32 tx_gtfcs_hi; + u32 tx_gtuca_lo; + u32 tx_gtuca_hi; + u32 tx_gtmca_lo; + u32 tx_gtmca_hi; + u32 tx_gtgca_lo; + u32 tx_gtgca_hi; + u32 tx_gtpkt_lo; + u32 tx_gtpkt_hi; + u32 tx_gt64_lo; + u32 tx_gt64_hi; + u32 tx_gt127_lo; + u32 tx_gt127_hi; + u32 tx_gt255_lo; + u32 tx_gt255_hi; + u32 tx_gt511_lo; + u32 tx_gt511_hi; + u32 tx_gt1023_lo; + u32 tx_gt1023_hi; + u32 tx_gt1518_lo; + u32 tx_gt1518_hi; + u32 tx_gt2047_lo; + u32 tx_gt2047_hi; + u32 tx_gt4095_lo; + u32 tx_gt4095_hi; + u32 tx_gt9216_lo; + u32 tx_gt9216_hi; + u32 tx_gt16383_lo; + u32 tx_gt16383_hi; + u32 tx_gtufl_lo; + u32 tx_gtufl_hi; + u32 tx_gterr_lo; + u32 tx_gterr_hi; + u32 tx_gtbyt_lo; + u32 tx_gtbyt_hi; + u32 tx_collisions_lo; + u32 tx_collisions_hi; + u32 tx_singlecollision_lo; + u32 tx_singlecollision_hi; + u32 tx_multiplecollisions_lo; + u32 tx_multiplecollisions_hi; + u32 tx_deferred_lo; + u32 tx_deferred_hi; + u32 tx_excessivecollisions_lo; + u32 tx_excessivecollisions_hi; + u32 tx_latecollisions_lo; + u32 tx_latecollisions_hi; + } stats_tx; + + struct { + u32 rx_gr64_lo; + u32 rx_gr64_hi; + u32 rx_gr127_lo; + u32 rx_gr127_hi; + u32 rx_gr255_lo; + u32 rx_gr255_hi; + u32 rx_gr511_lo; + u32 rx_gr511_hi; + u32 rx_gr1023_lo; + u32 rx_gr1023_hi; + u32 rx_gr1518_lo; + u32 rx_gr1518_hi; + u32 rx_gr2047_lo; + u32 rx_gr2047_hi; + u32 rx_gr4095_lo; + u32 rx_gr4095_hi; + u32 rx_gr9216_lo; + u32 rx_gr9216_hi; + u32 rx_gr16383_lo; + u32 rx_gr16383_hi; + u32 rx_grpkt_lo; + u32 rx_grpkt_hi; + u32 rx_grfcs_lo; + u32 rx_grfcs_hi; + u32 rx_gruca_lo; + u32 rx_gruca_hi; + u32 rx_grmca_lo; + u32 rx_grmca_hi; + u32 rx_grbca_lo; + u32 rx_grbca_hi; + u32 rx_grxpf_lo; + u32 rx_grxpf_hi; + u32 rx_grxpp_lo; + u32 rx_grxpp_hi; + u32 rx_grxuo_lo; + u32 rx_grxuo_hi; + u32 rx_grovr_lo; + u32 rx_grovr_hi; + u32 rx_grxcf_lo; + u32 rx_grxcf_hi; + u32 rx_grflr_lo; + u32 rx_grflr_hi; + u32 rx_grpok_lo; + u32 rx_grpok_hi; + u32 rx_grbyt_lo; + u32 rx_grbyt_hi; + u32 rx_grund_lo; + u32 rx_grund_hi; + u32 rx_grfrg_lo; + u32 rx_grfrg_hi; + u32 rx_grerb_lo; + u32 rx_grerb_hi; + u32 rx_grfre_lo; + u32 rx_grfre_hi; + + u32 rx_alignmenterrors_lo; + u32 rx_alignmenterrors_hi; + u32 rx_falsecarrier_lo; + u32 rx_falsecarrier_hi; + u32 rx_llfcmsgcnt_lo; + u32 rx_llfcmsgcnt_hi; + } stats_rx; +}; + +union mac_stats { + struct emac_stats emac_stats; + struct bmac1_stats bmac1_stats; + struct bmac2_stats bmac2_stats; + struct mstat_stats mstat_stats; +}; + + +struct mac_stx { + /* in_bad_octets */ + u32 rx_stat_ifhcinbadoctets_hi; + u32 rx_stat_ifhcinbadoctets_lo; + + /* out_bad_octets */ + u32 tx_stat_ifhcoutbadoctets_hi; + u32 tx_stat_ifhcoutbadoctets_lo; + + /* crc_receive_errors */ + u32 rx_stat_dot3statsfcserrors_hi; + u32 rx_stat_dot3statsfcserrors_lo; + /* alignment_errors */ + u32 rx_stat_dot3statsalignmenterrors_hi; + u32 rx_stat_dot3statsalignmenterrors_lo; + /* carrier_sense_errors */ + u32 rx_stat_dot3statscarriersenseerrors_hi; + u32 rx_stat_dot3statscarriersenseerrors_lo; + /* false_carrier_detections */ + u32 rx_stat_falsecarriererrors_hi; + u32 rx_stat_falsecarriererrors_lo; + + /* runt_packets_received */ + u32 rx_stat_etherstatsundersizepkts_hi; + u32 rx_stat_etherstatsundersizepkts_lo; + /* jabber_packets_received */ + u32 rx_stat_dot3statsframestoolong_hi; + u32 rx_stat_dot3statsframestoolong_lo; + + /* error_runt_packets_received */ + u32 rx_stat_etherstatsfragments_hi; + u32 rx_stat_etherstatsfragments_lo; + /* error_jabber_packets_received */ + u32 rx_stat_etherstatsjabbers_hi; + u32 rx_stat_etherstatsjabbers_lo; + + /* control_frames_received */ + u32 rx_stat_maccontrolframesreceived_hi; + u32 rx_stat_maccontrolframesreceived_lo; + u32 rx_stat_mac_xpf_hi; + u32 rx_stat_mac_xpf_lo; + u32 rx_stat_mac_xcf_hi; + u32 rx_stat_mac_xcf_lo; + + /* xoff_state_entered */ + u32 rx_stat_xoffstateentered_hi; + u32 rx_stat_xoffstateentered_lo; + /* pause_xon_frames_received */ + u32 rx_stat_xonpauseframesreceived_hi; + u32 rx_stat_xonpauseframesreceived_lo; + /* pause_xoff_frames_received */ + u32 rx_stat_xoffpauseframesreceived_hi; + u32 rx_stat_xoffpauseframesreceived_lo; + /* pause_xon_frames_transmitted */ + u32 tx_stat_outxonsent_hi; + u32 tx_stat_outxonsent_lo; + /* pause_xoff_frames_transmitted */ + u32 tx_stat_outxoffsent_hi; + u32 tx_stat_outxoffsent_lo; + /* flow_control_done */ + u32 tx_stat_flowcontroldone_hi; + u32 tx_stat_flowcontroldone_lo; + + /* ether_stats_collisions */ + u32 tx_stat_etherstatscollisions_hi; + u32 tx_stat_etherstatscollisions_lo; + /* single_collision_transmit_frames */ + u32 tx_stat_dot3statssinglecollisionframes_hi; + u32 tx_stat_dot3statssinglecollisionframes_lo; + /* multiple_collision_transmit_frames */ + u32 tx_stat_dot3statsmultiplecollisionframes_hi; + u32 tx_stat_dot3statsmultiplecollisionframes_lo; + /* deferred_transmissions */ + u32 tx_stat_dot3statsdeferredtransmissions_hi; + u32 tx_stat_dot3statsdeferredtransmissions_lo; + /* excessive_collision_frames */ + u32 tx_stat_dot3statsexcessivecollisions_hi; + u32 tx_stat_dot3statsexcessivecollisions_lo; + /* late_collision_frames */ + u32 tx_stat_dot3statslatecollisions_hi; + u32 tx_stat_dot3statslatecollisions_lo; + + /* frames_transmitted_64_bytes */ + u32 tx_stat_etherstatspkts64octets_hi; + u32 tx_stat_etherstatspkts64octets_lo; + /* frames_transmitted_65_127_bytes */ + u32 tx_stat_etherstatspkts65octetsto127octets_hi; + u32 tx_stat_etherstatspkts65octetsto127octets_lo; + /* frames_transmitted_128_255_bytes */ + u32 tx_stat_etherstatspkts128octetsto255octets_hi; + u32 tx_stat_etherstatspkts128octetsto255octets_lo; + /* frames_transmitted_256_511_bytes */ + u32 tx_stat_etherstatspkts256octetsto511octets_hi; + u32 tx_stat_etherstatspkts256octetsto511octets_lo; + /* frames_transmitted_512_1023_bytes */ + u32 tx_stat_etherstatspkts512octetsto1023octets_hi; + u32 tx_stat_etherstatspkts512octetsto1023octets_lo; + /* frames_transmitted_1024_1522_bytes */ + u32 tx_stat_etherstatspkts1024octetsto1522octets_hi; + u32 tx_stat_etherstatspkts1024octetsto1522octets_lo; + /* frames_transmitted_1523_9022_bytes */ + u32 tx_stat_etherstatspktsover1522octets_hi; + u32 tx_stat_etherstatspktsover1522octets_lo; + u32 tx_stat_mac_2047_hi; + u32 tx_stat_mac_2047_lo; + u32 tx_stat_mac_4095_hi; + u32 tx_stat_mac_4095_lo; + u32 tx_stat_mac_9216_hi; + u32 tx_stat_mac_9216_lo; + u32 tx_stat_mac_16383_hi; + u32 tx_stat_mac_16383_lo; + + /* internal_mac_transmit_errors */ + u32 tx_stat_dot3statsinternalmactransmiterrors_hi; + u32 tx_stat_dot3statsinternalmactransmiterrors_lo; + + /* if_out_discards */ + u32 tx_stat_mac_ufl_hi; + u32 tx_stat_mac_ufl_lo; +}; + + +#define MAC_STX_IDX_MAX 2 + +struct host_port_stats { + u32 host_port_stats_counter; + + struct mac_stx mac_stx[MAC_STX_IDX_MAX]; + + u32 brb_drop_hi; + u32 brb_drop_lo; + + u32 not_used; /* obsolete */ + u32 pfc_frames_tx_hi; + u32 pfc_frames_tx_lo; + u32 pfc_frames_rx_hi; + u32 pfc_frames_rx_lo; + + u32 eee_lpi_count_hi; + u32 eee_lpi_count_lo; +}; + + +struct host_func_stats { + u32 host_func_stats_start; + + u32 total_bytes_received_hi; + u32 total_bytes_received_lo; + + u32 total_bytes_transmitted_hi; + u32 total_bytes_transmitted_lo; + + u32 total_unicast_packets_received_hi; + u32 total_unicast_packets_received_lo; + + u32 total_multicast_packets_received_hi; + u32 total_multicast_packets_received_lo; + + u32 total_broadcast_packets_received_hi; + u32 total_broadcast_packets_received_lo; + + u32 total_unicast_packets_transmitted_hi; + u32 total_unicast_packets_transmitted_lo; + + u32 total_multicast_packets_transmitted_hi; + u32 total_multicast_packets_transmitted_lo; + + u32 total_broadcast_packets_transmitted_hi; + u32 total_broadcast_packets_transmitted_lo; + + u32 valid_bytes_received_hi; + u32 valid_bytes_received_lo; + + u32 host_func_stats_end; +}; + +/* VIC definitions */ +#define VICSTATST_UIF_INDEX 2 + + +/* stats collected for afex. + * NOTE: structure is exactly as expected to be received by the switch. + * order must remain exactly as is unless protocol changes ! + */ +struct afex_stats { + u32 tx_unicast_frames_hi; + u32 tx_unicast_frames_lo; + u32 tx_unicast_bytes_hi; + u32 tx_unicast_bytes_lo; + u32 tx_multicast_frames_hi; + u32 tx_multicast_frames_lo; + u32 tx_multicast_bytes_hi; + u32 tx_multicast_bytes_lo; + u32 tx_broadcast_frames_hi; + u32 tx_broadcast_frames_lo; + u32 tx_broadcast_bytes_hi; + u32 tx_broadcast_bytes_lo; + u32 tx_frames_discarded_hi; + u32 tx_frames_discarded_lo; + u32 tx_frames_dropped_hi; + u32 tx_frames_dropped_lo; + + u32 rx_unicast_frames_hi; + u32 rx_unicast_frames_lo; + u32 rx_unicast_bytes_hi; + u32 rx_unicast_bytes_lo; + u32 rx_multicast_frames_hi; + u32 rx_multicast_frames_lo; + u32 rx_multicast_bytes_hi; + u32 rx_multicast_bytes_lo; + u32 rx_broadcast_frames_hi; + u32 rx_broadcast_frames_lo; + u32 rx_broadcast_bytes_hi; + u32 rx_broadcast_bytes_lo; + u32 rx_frames_discarded_hi; + u32 rx_frames_discarded_lo; + u32 rx_frames_dropped_hi; + u32 rx_frames_dropped_lo; +}; + +/*(DEBLOBBED)*/ + + +/* + * attention bits + */ +struct atten_sp_status_block { + __le32 attn_bits; + __le32 attn_bits_ack; + u8 status_block_id; + u8 reserved0; + __le16 attn_bits_index; + __le32 reserved1; +}; + + +/* + * The eth aggregative context of Cstorm + */ +struct cstorm_eth_ag_context { + u32 __reserved0[10]; +}; + + +/* + * dmae command structure + */ +struct dmae_command { + u32 opcode; +#define DMAE_COMMAND_SRC (0x1<<0) +#define DMAE_COMMAND_SRC_SHIFT 0 +#define DMAE_COMMAND_DST (0x3<<1) +#define DMAE_COMMAND_DST_SHIFT 1 +#define DMAE_COMMAND_C_DST (0x1<<3) +#define DMAE_COMMAND_C_DST_SHIFT 3 +#define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4) +#define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4 +#define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5) +#define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5 +#define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6) +#define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6 +#define DMAE_COMMAND_ENDIANITY (0x3<<9) +#define DMAE_COMMAND_ENDIANITY_SHIFT 9 +#define DMAE_COMMAND_PORT (0x1<<11) +#define DMAE_COMMAND_PORT_SHIFT 11 +#define DMAE_COMMAND_CRC_RESET (0x1<<12) +#define DMAE_COMMAND_CRC_RESET_SHIFT 12 +#define DMAE_COMMAND_SRC_RESET (0x1<<13) +#define DMAE_COMMAND_SRC_RESET_SHIFT 13 +#define DMAE_COMMAND_DST_RESET (0x1<<14) +#define DMAE_COMMAND_DST_RESET_SHIFT 14 +#define DMAE_COMMAND_E1HVN (0x3<<15) +#define DMAE_COMMAND_E1HVN_SHIFT 15 +#define DMAE_COMMAND_DST_VN (0x3<<17) +#define DMAE_COMMAND_DST_VN_SHIFT 17 +#define DMAE_COMMAND_C_FUNC (0x1<<19) +#define DMAE_COMMAND_C_FUNC_SHIFT 19 +#define DMAE_COMMAND_ERR_POLICY (0x3<<20) +#define DMAE_COMMAND_ERR_POLICY_SHIFT 20 +#define DMAE_COMMAND_RESERVED0 (0x3FF<<22) +#define DMAE_COMMAND_RESERVED0_SHIFT 22 + u32 src_addr_lo; + u32 src_addr_hi; + u32 dst_addr_lo; + u32 dst_addr_hi; +#if defined(__BIG_ENDIAN) + u16 opcode_iov; +#define DMAE_COMMAND_SRC_VFID (0x3F<<0) +#define DMAE_COMMAND_SRC_VFID_SHIFT 0 +#define DMAE_COMMAND_SRC_VFPF (0x1<<6) +#define DMAE_COMMAND_SRC_VFPF_SHIFT 6 +#define DMAE_COMMAND_RESERVED1 (0x1<<7) +#define DMAE_COMMAND_RESERVED1_SHIFT 7 +#define DMAE_COMMAND_DST_VFID (0x3F<<8) +#define DMAE_COMMAND_DST_VFID_SHIFT 8 +#define DMAE_COMMAND_DST_VFPF (0x1<<14) +#define DMAE_COMMAND_DST_VFPF_SHIFT 14 +#define DMAE_COMMAND_RESERVED2 (0x1<<15) +#define DMAE_COMMAND_RESERVED2_SHIFT 15 + u16 len; +#elif defined(__LITTLE_ENDIAN) + u16 len; + u16 opcode_iov; +#define DMAE_COMMAND_SRC_VFID (0x3F<<0) +#define DMAE_COMMAND_SRC_VFID_SHIFT 0 +#define DMAE_COMMAND_SRC_VFPF (0x1<<6) +#define DMAE_COMMAND_SRC_VFPF_SHIFT 6 +#define DMAE_COMMAND_RESERVED1 (0x1<<7) +#define DMAE_COMMAND_RESERVED1_SHIFT 7 +#define DMAE_COMMAND_DST_VFID (0x3F<<8) +#define DMAE_COMMAND_DST_VFID_SHIFT 8 +#define DMAE_COMMAND_DST_VFPF (0x1<<14) +#define DMAE_COMMAND_DST_VFPF_SHIFT 14 +#define DMAE_COMMAND_RESERVED2 (0x1<<15) +#define DMAE_COMMAND_RESERVED2_SHIFT 15 +#endif + u32 comp_addr_lo; + u32 comp_addr_hi; + u32 comp_val; + u32 crc32; + u32 crc32_c; +#if defined(__BIG_ENDIAN) + u16 crc16_c; + u16 crc16; +#elif defined(__LITTLE_ENDIAN) + u16 crc16; + u16 crc16_c; +#endif +#if defined(__BIG_ENDIAN) + u16 reserved3; + u16 crc_t10; +#elif defined(__LITTLE_ENDIAN) + u16 crc_t10; + u16 reserved3; +#endif +#if defined(__BIG_ENDIAN) + u16 xsum8; + u16 xsum16; +#elif defined(__LITTLE_ENDIAN) + u16 xsum16; + u16 xsum8; +#endif +}; + + +/* + * common data for all protocols + */ +struct doorbell_hdr { + u8 header; +#define DOORBELL_HDR_RX (0x1<<0) +#define DOORBELL_HDR_RX_SHIFT 0 +#define DOORBELL_HDR_DB_TYPE (0x1<<1) +#define DOORBELL_HDR_DB_TYPE_SHIFT 1 +#define DOORBELL_HDR_DPM_SIZE (0x3<<2) +#define DOORBELL_HDR_DPM_SIZE_SHIFT 2 +#define DOORBELL_HDR_CONN_TYPE (0xF<<4) +#define DOORBELL_HDR_CONN_TYPE_SHIFT 4 +}; + +/* + * Ethernet doorbell + */ +struct eth_tx_doorbell { +#if defined(__BIG_ENDIAN) + u16 npackets; + u8 params; +#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) +#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 +#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) +#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 +#define ETH_TX_DOORBELL_SPARE (0x1<<7) +#define ETH_TX_DOORBELL_SPARE_SHIFT 7 + struct doorbell_hdr hdr; +#elif defined(__LITTLE_ENDIAN) + struct doorbell_hdr hdr; + u8 params; +#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) +#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 +#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) +#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 +#define ETH_TX_DOORBELL_SPARE (0x1<<7) +#define ETH_TX_DOORBELL_SPARE_SHIFT 7 + u16 npackets; +#endif +}; + + +/* + * 3 lines. status block + */ +struct hc_status_block_e1x { + __le16 index_values[HC_SB_MAX_INDICES_E1X]; + __le16 running_index[HC_SB_MAX_SM]; + __le32 rsrv[11]; +}; + +/* + * host status block + */ +struct host_hc_status_block_e1x { + struct hc_status_block_e1x sb; +}; + + +/* + * 3 lines. status block + */ +struct hc_status_block_e2 { + __le16 index_values[HC_SB_MAX_INDICES_E2]; + __le16 running_index[HC_SB_MAX_SM]; + __le32 reserved[11]; +}; + +/* + * host status block + */ +struct host_hc_status_block_e2 { + struct hc_status_block_e2 sb; +}; + + +/* + * 5 lines. slow-path status block + */ +struct hc_sp_status_block { + __le16 index_values[HC_SP_SB_MAX_INDICES]; + __le16 running_index; + __le16 rsrv; + u32 rsrv1; +}; + +/* + * host status block + */ +struct host_sp_status_block { + struct atten_sp_status_block atten_status_block; + struct hc_sp_status_block sp_sb; +}; + + +/* + * IGU driver acknowledgment register + */ +struct igu_ack_register { +#if defined(__BIG_ENDIAN) + u16 sb_id_and_flags; +#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0) +#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0 +#define IGU_ACK_REGISTER_STORM_ID (0x7<<5) +#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5 +#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8) +#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8 +#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9) +#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9 +#define IGU_ACK_REGISTER_RESERVED (0x1F<<11) +#define IGU_ACK_REGISTER_RESERVED_SHIFT 11 + u16 status_block_index; +#elif defined(__LITTLE_ENDIAN) + u16 status_block_index; + u16 sb_id_and_flags; +#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0) +#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0 +#define IGU_ACK_REGISTER_STORM_ID (0x7<<5) +#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5 +#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8) +#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8 +#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9) +#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9 +#define IGU_ACK_REGISTER_RESERVED (0x1F<<11) +#define IGU_ACK_REGISTER_RESERVED_SHIFT 11 +#endif +}; + + +/* + * IGU driver acknowledgement register + */ +struct igu_backward_compatible { + u32 sb_id_and_flags; +#define IGU_BACKWARD_COMPATIBLE_SB_INDEX (0xFFFF<<0) +#define IGU_BACKWARD_COMPATIBLE_SB_INDEX_SHIFT 0 +#define IGU_BACKWARD_COMPATIBLE_SB_SELECT (0x1F<<16) +#define IGU_BACKWARD_COMPATIBLE_SB_SELECT_SHIFT 16 +#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS (0x7<<21) +#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS_SHIFT 21 +#define IGU_BACKWARD_COMPATIBLE_BUPDATE (0x1<<24) +#define IGU_BACKWARD_COMPATIBLE_BUPDATE_SHIFT 24 +#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT (0x3<<25) +#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT_SHIFT 25 +#define IGU_BACKWARD_COMPATIBLE_RESERVED_0 (0x1F<<27) +#define IGU_BACKWARD_COMPATIBLE_RESERVED_0_SHIFT 27 + u32 reserved_2; +}; + + +/* + * IGU driver acknowledgement register + */ +struct igu_regular { + u32 sb_id_and_flags; +#define IGU_REGULAR_SB_INDEX (0xFFFFF<<0) +#define IGU_REGULAR_SB_INDEX_SHIFT 0 +#define IGU_REGULAR_RESERVED0 (0x1<<20) +#define IGU_REGULAR_RESERVED0_SHIFT 20 +#define IGU_REGULAR_SEGMENT_ACCESS (0x7<<21) +#define IGU_REGULAR_SEGMENT_ACCESS_SHIFT 21 +#define IGU_REGULAR_BUPDATE (0x1<<24) +#define IGU_REGULAR_BUPDATE_SHIFT 24 +#define IGU_REGULAR_ENABLE_INT (0x3<<25) +#define IGU_REGULAR_ENABLE_INT_SHIFT 25 +#define IGU_REGULAR_RESERVED_1 (0x1<<27) +#define IGU_REGULAR_RESERVED_1_SHIFT 27 +#define IGU_REGULAR_CLEANUP_TYPE (0x3<<28) +#define IGU_REGULAR_CLEANUP_TYPE_SHIFT 28 +#define IGU_REGULAR_CLEANUP_SET (0x1<<30) +#define IGU_REGULAR_CLEANUP_SET_SHIFT 30 +#define IGU_REGULAR_BCLEANUP (0x1<<31) +#define IGU_REGULAR_BCLEANUP_SHIFT 31 + u32 reserved_2; +}; + +/* + * IGU driver acknowledgement register + */ +union igu_consprod_reg { + struct igu_regular regular; + struct igu_backward_compatible backward_compatible; +}; + + +/* + * Igu control commands + */ +enum igu_ctrl_cmd { + IGU_CTRL_CMD_TYPE_RD, + IGU_CTRL_CMD_TYPE_WR, + MAX_IGU_CTRL_CMD +}; + + +/* + * Control register for the IGU command register + */ +struct igu_ctrl_reg { + u32 ctrl_data; +#define IGU_CTRL_REG_ADDRESS (0xFFF<<0) +#define IGU_CTRL_REG_ADDRESS_SHIFT 0 +#define IGU_CTRL_REG_FID (0x7F<<12) +#define IGU_CTRL_REG_FID_SHIFT 12 +#define IGU_CTRL_REG_RESERVED (0x1<<19) +#define IGU_CTRL_REG_RESERVED_SHIFT 19 +#define IGU_CTRL_REG_TYPE (0x1<<20) +#define IGU_CTRL_REG_TYPE_SHIFT 20 +#define IGU_CTRL_REG_UNUSED (0x7FF<<21) +#define IGU_CTRL_REG_UNUSED_SHIFT 21 +}; + + +/* + * Igu interrupt command + */ +enum igu_int_cmd { + IGU_INT_ENABLE, + IGU_INT_DISABLE, + IGU_INT_NOP, + IGU_INT_NOP2, + MAX_IGU_INT_CMD +}; + + +/* + * Igu segments + */ +enum igu_seg_access { + IGU_SEG_ACCESS_NORM, + IGU_SEG_ACCESS_DEF, + IGU_SEG_ACCESS_ATTN, + MAX_IGU_SEG_ACCESS +}; + + +/* + * Parser parsing flags field + */ +struct parsing_flags { + __le16 flags; +#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE (0x1<<0) +#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE_SHIFT 0 +#define PARSING_FLAGS_VLAN (0x1<<1) +#define PARSING_FLAGS_VLAN_SHIFT 1 +#define PARSING_FLAGS_EXTRA_VLAN (0x1<<2) +#define PARSING_FLAGS_EXTRA_VLAN_SHIFT 2 +#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL (0x3<<3) +#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT 3 +#define PARSING_FLAGS_IP_OPTIONS (0x1<<5) +#define PARSING_FLAGS_IP_OPTIONS_SHIFT 5 +#define PARSING_FLAGS_FRAGMENTATION_STATUS (0x1<<6) +#define PARSING_FLAGS_FRAGMENTATION_STATUS_SHIFT 6 +#define PARSING_FLAGS_OVER_IP_PROTOCOL (0x3<<7) +#define PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT 7 +#define PARSING_FLAGS_PURE_ACK_INDICATION (0x1<<9) +#define PARSING_FLAGS_PURE_ACK_INDICATION_SHIFT 9 +#define PARSING_FLAGS_TCP_OPTIONS_EXIST (0x1<<10) +#define PARSING_FLAGS_TCP_OPTIONS_EXIST_SHIFT 10 +#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG (0x1<<11) +#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG_SHIFT 11 +#define PARSING_FLAGS_CONNECTION_MATCH (0x1<<12) +#define PARSING_FLAGS_CONNECTION_MATCH_SHIFT 12 +#define PARSING_FLAGS_LLC_SNAP (0x1<<13) +#define PARSING_FLAGS_LLC_SNAP_SHIFT 13 +#define PARSING_FLAGS_RESERVED0 (0x3<<14) +#define PARSING_FLAGS_RESERVED0_SHIFT 14 +}; + + +/* + * Parsing flags for TCP ACK type + */ +enum prs_flags_ack_type { + PRS_FLAG_PUREACK_PIGGY, + PRS_FLAG_PUREACK_PURE, + MAX_PRS_FLAGS_ACK_TYPE +}; + + +/* + * Parsing flags for Ethernet address type + */ +enum prs_flags_eth_addr_type { + PRS_FLAG_ETHTYPE_NON_UNICAST, + PRS_FLAG_ETHTYPE_UNICAST, + MAX_PRS_FLAGS_ETH_ADDR_TYPE +}; + + +/* + * Parsing flags for over-ethernet protocol + */ +enum prs_flags_over_eth { + PRS_FLAG_OVERETH_UNKNOWN, + PRS_FLAG_OVERETH_IPV4, + PRS_FLAG_OVERETH_IPV6, + PRS_FLAG_OVERETH_LLCSNAP_UNKNOWN, + MAX_PRS_FLAGS_OVER_ETH +}; + + +/* + * Parsing flags for over-IP protocol + */ +enum prs_flags_over_ip { + PRS_FLAG_OVERIP_UNKNOWN, + PRS_FLAG_OVERIP_TCP, + PRS_FLAG_OVERIP_UDP, + MAX_PRS_FLAGS_OVER_IP +}; + + +/* + * SDM operation gen command (generate aggregative interrupt) + */ +struct sdm_op_gen { + __le32 command; +#define SDM_OP_GEN_COMP_PARAM (0x1F<<0) +#define SDM_OP_GEN_COMP_PARAM_SHIFT 0 +#define SDM_OP_GEN_COMP_TYPE (0x7<<5) +#define SDM_OP_GEN_COMP_TYPE_SHIFT 5 +#define SDM_OP_GEN_AGG_VECT_IDX (0xFF<<8) +#define SDM_OP_GEN_AGG_VECT_IDX_SHIFT 8 +#define SDM_OP_GEN_AGG_VECT_IDX_VALID (0x1<<16) +#define SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT 16 +#define SDM_OP_GEN_RESERVED (0x7FFF<<17) +#define SDM_OP_GEN_RESERVED_SHIFT 17 +}; + + +/* + * Timers connection context + */ +struct timers_block_context { + u32 __reserved_0; + u32 __reserved_1; + u32 __reserved_2; + u32 flags; +#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0) +#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0 +#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2) +#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2 +#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3) +#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3 +}; + + +/* + * The eth aggregative context of Tstorm + */ +struct tstorm_eth_ag_context { + u32 __reserved0[14]; +}; + + +/* + * The eth aggregative context of Ustorm + */ +struct ustorm_eth_ag_context { + u32 __reserved0; +#if defined(__BIG_ENDIAN) + u8 cdu_usage; + u8 __reserved2; + u16 __reserved1; +#elif defined(__LITTLE_ENDIAN) + u16 __reserved1; + u8 __reserved2; + u8 cdu_usage; +#endif + u32 __reserved3[6]; +}; + + +/* + * The eth aggregative context of Xstorm + */ +struct xstorm_eth_ag_context { + u32 reserved0; +#if defined(__BIG_ENDIAN) + u8 cdu_reserved; + u8 reserved2; + u16 reserved1; +#elif defined(__LITTLE_ENDIAN) + u16 reserved1; + u8 reserved2; + u8 cdu_reserved; +#endif + u32 reserved3[30]; +}; + + +/* + * doorbell message sent to the chip + */ +struct doorbell { +#if defined(__BIG_ENDIAN) + u16 zero_fill2; + u8 zero_fill1; + struct doorbell_hdr header; +#elif defined(__LITTLE_ENDIAN) + struct doorbell_hdr header; + u8 zero_fill1; + u16 zero_fill2; +#endif +}; + + +/* + * doorbell message sent to the chip + */ +struct doorbell_set_prod { +#if defined(__BIG_ENDIAN) + u16 prod; + u8 zero_fill1; + struct doorbell_hdr header; +#elif defined(__LITTLE_ENDIAN) + struct doorbell_hdr header; + u8 zero_fill1; + u16 prod; +#endif +}; + + +struct regpair { + __le32 lo; + __le32 hi; +}; + +struct regpair_native { + u32 lo; + u32 hi; +}; + +/* + * Classify rule opcodes in E2/E3 + */ +enum classify_rule { + CLASSIFY_RULE_OPCODE_MAC, + CLASSIFY_RULE_OPCODE_VLAN, + CLASSIFY_RULE_OPCODE_PAIR, + CLASSIFY_RULE_OPCODE_VXLAN, + MAX_CLASSIFY_RULE +}; + + +/* + * Classify rule types in E2/E3 + */ +enum classify_rule_action_type { + CLASSIFY_RULE_REMOVE, + CLASSIFY_RULE_ADD, + MAX_CLASSIFY_RULE_ACTION_TYPE +}; + + +/* + * client init ramrod data + */ +struct client_init_general_data { + u8 client_id; + u8 statistics_counter_id; + u8 statistics_en_flg; + u8 is_fcoe_flg; + u8 activate_flg; + u8 sp_client_id; + __le16 mtu; + u8 statistics_zero_flg; + u8 func_id; + u8 cos; + u8 traffic_type; + u8 fp_hsi_ver; + u8 reserved0[3]; +}; + + +/* + * client init rx data + */ +struct client_init_rx_data { + u8 tpa_en; +#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4 (0x1<<0) +#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0 +#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1) +#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1 +#define CLIENT_INIT_RX_DATA_TPA_MODE (0x1<<2) +#define CLIENT_INIT_RX_DATA_TPA_MODE_SHIFT 2 +#define CLIENT_INIT_RX_DATA_RESERVED5 (0x1F<<3) +#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 3 + u8 vmqueue_mode_en_flg; + u8 extra_data_over_sgl_en_flg; + u8 cache_line_alignment_log_size; + u8 enable_dynamic_hc; + u8 max_sges_for_packet; + u8 client_qzone_id; + u8 drop_ip_cs_err_flg; + u8 drop_tcp_cs_err_flg; + u8 drop_ttl0_flg; + u8 drop_udp_cs_err_flg; + u8 inner_vlan_removal_enable_flg; + u8 outer_vlan_removal_enable_flg; + u8 status_block_id; + u8 rx_sb_index_number; + u8 dont_verify_rings_pause_thr_flg; + u8 max_tpa_queues; + u8 silent_vlan_removal_flg; + __le16 max_bytes_on_bd; + __le16 sge_buff_size; + u8 approx_mcast_engine_id; + u8 rss_engine_id; + struct regpair bd_page_base; + struct regpair sge_page_base; + struct regpair cqe_page_base; + u8 is_leading_rss; + u8 is_approx_mcast; + __le16 max_agg_size; + __le16 state; +#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL (0x1<<0) +#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL_SHIFT 0 +#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL (0x1<<1) +#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL_SHIFT 1 +#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED (0x1<<2) +#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED_SHIFT 2 +#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL (0x1<<3) +#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL_SHIFT 3 +#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL (0x1<<4) +#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL_SHIFT 4 +#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL (0x1<<5) +#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL_SHIFT 5 +#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN (0x1<<6) +#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN_SHIFT 6 +#define CLIENT_INIT_RX_DATA_RESERVED2 (0x1FF<<7) +#define CLIENT_INIT_RX_DATA_RESERVED2_SHIFT 7 + __le16 cqe_pause_thr_low; + __le16 cqe_pause_thr_high; + __le16 bd_pause_thr_low; + __le16 bd_pause_thr_high; + __le16 sge_pause_thr_low; + __le16 sge_pause_thr_high; + __le16 rx_cos_mask; + __le16 silent_vlan_value; + __le16 silent_vlan_mask; + u8 handle_ptp_pkts_flg; + u8 reserved6[3]; + __le32 reserved7; +}; + +/* + * client init tx data + */ +struct client_init_tx_data { + u8 enforce_security_flg; + u8 tx_status_block_id; + u8 tx_sb_index_number; + u8 tss_leading_client_id; + u8 tx_switching_flg; + u8 anti_spoofing_flg; + __le16 default_vlan; + struct regpair tx_bd_page_base; + __le16 state; +#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL (0x1<<0) +#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL_SHIFT 0 +#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL (0x1<<1) +#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL_SHIFT 1 +#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL (0x1<<2) +#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2 +#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3) +#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3 +#define CLIENT_INIT_TX_DATA_RESERVED0 (0xFFF<<4) +#define CLIENT_INIT_TX_DATA_RESERVED0_SHIFT 4 + u8 default_vlan_flg; + u8 force_default_pri_flg; + u8 tunnel_lso_inc_ip_id; + u8 refuse_outband_vlan_flg; + u8 tunnel_non_lso_pcsum_location; + u8 tunnel_non_lso_outer_ip_csum_location; +}; + +/* + * client init ramrod data + */ +struct client_init_ramrod_data { + struct client_init_general_data general; + struct client_init_rx_data rx; + struct client_init_tx_data tx; +}; + + +/* + * client update ramrod data + */ +struct client_update_ramrod_data { + u8 client_id; + u8 func_id; + u8 inner_vlan_removal_enable_flg; + u8 inner_vlan_removal_change_flg; + u8 outer_vlan_removal_enable_flg; + u8 outer_vlan_removal_change_flg; + u8 anti_spoofing_enable_flg; + u8 anti_spoofing_change_flg; + u8 activate_flg; + u8 activate_change_flg; + __le16 default_vlan; + u8 default_vlan_enable_flg; + u8 default_vlan_change_flg; + __le16 silent_vlan_value; + __le16 silent_vlan_mask; + u8 silent_vlan_removal_flg; + u8 silent_vlan_change_flg; + u8 refuse_outband_vlan_flg; + u8 refuse_outband_vlan_change_flg; + u8 tx_switching_flg; + u8 tx_switching_change_flg; + u8 handle_ptp_pkts_flg; + u8 handle_ptp_pkts_change_flg; + __le16 reserved1; + __le32 echo; +}; + + +/* + * The eth storm context of Cstorm + */ +struct cstorm_eth_st_context { + u32 __reserved0[4]; +}; + + +struct double_regpair { + u32 regpair0_lo; + u32 regpair0_hi; + u32 regpair1_lo; + u32 regpair1_hi; +}; + +/* 2nd parse bd type used in ethernet tx BDs */ +enum eth_2nd_parse_bd_type { + ETH_2ND_PARSE_BD_TYPE_LSO_TUNNEL, + MAX_ETH_2ND_PARSE_BD_TYPE +}; + +/* + * Ethernet address typesm used in ethernet tx BDs + */ +enum eth_addr_type { + UNKNOWN_ADDRESS, + UNICAST_ADDRESS, + MULTICAST_ADDRESS, + BROADCAST_ADDRESS, + MAX_ETH_ADDR_TYPE +}; + + +/* + * + */ +struct eth_classify_cmd_header { + u8 cmd_general_data; +#define ETH_CLASSIFY_CMD_HEADER_RX_CMD (0x1<<0) +#define ETH_CLASSIFY_CMD_HEADER_RX_CMD_SHIFT 0 +#define ETH_CLASSIFY_CMD_HEADER_TX_CMD (0x1<<1) +#define ETH_CLASSIFY_CMD_HEADER_TX_CMD_SHIFT 1 +#define ETH_CLASSIFY_CMD_HEADER_OPCODE (0x3<<2) +#define ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT 2 +#define ETH_CLASSIFY_CMD_HEADER_IS_ADD (0x1<<4) +#define ETH_CLASSIFY_CMD_HEADER_IS_ADD_SHIFT 4 +#define ETH_CLASSIFY_CMD_HEADER_RESERVED0 (0x7<<5) +#define ETH_CLASSIFY_CMD_HEADER_RESERVED0_SHIFT 5 + u8 func_id; + u8 client_id; + u8 reserved1; +}; + + +/* + * header for eth classification config ramrod + */ +struct eth_classify_header { + u8 rule_cnt; + u8 reserved0; + __le16 reserved1; + __le32 echo; +}; + + +/* + * Command for adding/removing a MAC classification rule + */ +struct eth_classify_mac_cmd { + struct eth_classify_cmd_header header; + __le16 reserved0; + __le16 inner_mac; + __le16 mac_lsb; + __le16 mac_mid; + __le16 mac_msb; + __le16 reserved1; +}; + + +/* + * Command for adding/removing a MAC-VLAN pair classification rule + */ +struct eth_classify_pair_cmd { + struct eth_classify_cmd_header header; + __le16 reserved0; + __le16 inner_mac; + __le16 mac_lsb; + __le16 mac_mid; + __le16 mac_msb; + __le16 vlan; +}; + + +/* + * Command for adding/removing a VLAN classification rule + */ +struct eth_classify_vlan_cmd { + struct eth_classify_cmd_header header; + __le32 reserved0; + __le32 reserved1; + __le16 reserved2; + __le16 vlan; +}; + +/* + * Command for adding/removing a VXLAN classification rule + */ +struct eth_classify_vxlan_cmd { + struct eth_classify_cmd_header header; + __le32 vni; + __le16 inner_mac_lsb; + __le16 inner_mac_mid; + __le16 inner_mac_msb; + __le16 reserved1; +}; + +/* + * union for eth classification rule + */ +union eth_classify_rule_cmd { + struct eth_classify_mac_cmd mac; + struct eth_classify_vlan_cmd vlan; + struct eth_classify_pair_cmd pair; + struct eth_classify_vxlan_cmd vxlan; +}; + +/* + * parameters for eth classification configuration ramrod + */ +struct eth_classify_rules_ramrod_data { + struct eth_classify_header header; + union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT]; +}; + + +/* + * The data contain client ID need to the ramrod + */ +struct eth_common_ramrod_data { + __le32 client_id; + __le32 reserved1; +}; + + +/* + * The eth storm context of Ustorm + */ +struct ustorm_eth_st_context { + u32 reserved0[52]; +}; + +/* + * The eth storm context of Tstorm + */ +struct tstorm_eth_st_context { + u32 __reserved0[28]; +}; + +/* + * The eth storm context of Xstorm + */ +struct xstorm_eth_st_context { + u32 reserved0[60]; +}; + +/* + * Ethernet connection context + */ +struct eth_context { + struct ustorm_eth_st_context ustorm_st_context; + struct tstorm_eth_st_context tstorm_st_context; + struct xstorm_eth_ag_context xstorm_ag_context; + struct tstorm_eth_ag_context tstorm_ag_context; + struct cstorm_eth_ag_context cstorm_ag_context; + struct ustorm_eth_ag_context ustorm_ag_context; + struct timers_block_context timers_context; + struct xstorm_eth_st_context xstorm_st_context; + struct cstorm_eth_st_context cstorm_st_context; +}; + + +/* + * union for sgl and raw data. + */ +union eth_sgl_or_raw_data { + __le16 sgl[8]; + u32 raw_data[4]; +}; + +/* + * eth FP end aggregation CQE parameters struct + */ +struct eth_end_agg_rx_cqe { + u8 type_error_flags; +#define ETH_END_AGG_RX_CQE_TYPE (0x3<<0) +#define ETH_END_AGG_RX_CQE_TYPE_SHIFT 0 +#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL (0x1<<2) +#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL_SHIFT 2 +#define ETH_END_AGG_RX_CQE_RESERVED0 (0x1F<<3) +#define ETH_END_AGG_RX_CQE_RESERVED0_SHIFT 3 + u8 reserved1; + u8 queue_index; + u8 reserved2; + __le32 timestamp_delta; + __le16 num_of_coalesced_segs; + __le16 pkt_len; + u8 pure_ack_count; + u8 reserved3; + __le16 reserved4; + union eth_sgl_or_raw_data sgl_or_raw_data; + __le32 reserved5[8]; +}; + + +/* + * regular eth FP CQE parameters struct + */ +struct eth_fast_path_rx_cqe { + u8 type_error_flags; +#define ETH_FAST_PATH_RX_CQE_TYPE (0x3<<0) +#define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x1<<2) +#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 2 +#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<3) +#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 3 +#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<4) +#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4 +#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5) +#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5 +#define ETH_FAST_PATH_RX_CQE_PTP_PKT (0x1<<6) +#define ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT 6 +#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x1<<7) +#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 7 + u8 status_flags; +#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0) +#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG (0x1<<3) +#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG_SHIFT 3 +#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG (0x1<<4) +#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG_SHIFT 4 +#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG (0x1<<5) +#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG_SHIFT 5 +#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG (0x1<<6) +#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6 +#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7) +#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7 + u8 queue_index; + u8 placement_offset; + __le32 rss_hash_result; + __le16 vlan_tag; + __le16 pkt_len_or_gro_seg_len; + __le16 len_on_bd; + struct parsing_flags pars_flags; + union eth_sgl_or_raw_data sgl_or_raw_data; + __le32 reserved1[7]; + u32 marker; +}; + + +/* + * Command for setting classification flags for a client + */ +struct eth_filter_rules_cmd { + u8 cmd_general_data; +#define ETH_FILTER_RULES_CMD_RX_CMD (0x1<<0) +#define ETH_FILTER_RULES_CMD_RX_CMD_SHIFT 0 +#define ETH_FILTER_RULES_CMD_TX_CMD (0x1<<1) +#define ETH_FILTER_RULES_CMD_TX_CMD_SHIFT 1 +#define ETH_FILTER_RULES_CMD_RESERVED0 (0x3F<<2) +#define ETH_FILTER_RULES_CMD_RESERVED0_SHIFT 2 + u8 func_id; + u8 client_id; + u8 reserved1; + __le16 state; +#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL (0x1<<0) +#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL_SHIFT 0 +#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL (0x1<<1) +#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL_SHIFT 1 +#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED (0x1<<2) +#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED_SHIFT 2 +#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL (0x1<<3) +#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL_SHIFT 3 +#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL (0x1<<4) +#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL_SHIFT 4 +#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL (0x1<<5) +#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL_SHIFT 5 +#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN (0x1<<6) +#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN_SHIFT 6 +#define ETH_FILTER_RULES_CMD_RESERVED2 (0x1FF<<7) +#define ETH_FILTER_RULES_CMD_RESERVED2_SHIFT 7 + __le16 reserved3; + struct regpair reserved4; +}; + + +/* + * parameters for eth classification filters ramrod + */ +struct eth_filter_rules_ramrod_data { + struct eth_classify_header header; + struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT]; +}; + +/* Hsi version */ +enum eth_fp_hsi_ver { + ETH_FP_HSI_VER_0, + ETH_FP_HSI_VER_1, + ETH_FP_HSI_VER_2, + MAX_ETH_FP_HSI_VER +}; + +/* + * parameters for eth classification configuration ramrod + */ +struct eth_general_rules_ramrod_data { + struct eth_classify_header header; + union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT]; +}; + + +/* + * The data for Halt ramrod + */ +struct eth_halt_ramrod_data { + __le32 client_id; + __le32 reserved0; +}; + + +/* + * destination and source mac address. + */ +struct eth_mac_addresses { +#if defined(__BIG_ENDIAN) + __le16 dst_mid; + __le16 dst_lo; +#elif defined(__LITTLE_ENDIAN) + __le16 dst_lo; + __le16 dst_mid; +#endif +#if defined(__BIG_ENDIAN) + __le16 src_lo; + __le16 dst_hi; +#elif defined(__LITTLE_ENDIAN) + __le16 dst_hi; + __le16 src_lo; +#endif +#if defined(__BIG_ENDIAN) + __le16 src_hi; + __le16 src_mid; +#elif defined(__LITTLE_ENDIAN) + __le16 src_mid; + __le16 src_hi; +#endif +}; + +/* tunneling related data */ +struct eth_tunnel_data { + __le16 dst_lo; + __le16 dst_mid; + __le16 dst_hi; + __le16 fw_ip_hdr_csum; + __le16 pseudo_csum; + u8 ip_hdr_start_inner_w; + u8 flags; +#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0) +#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0 +#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1) +#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1 +}; + +/* union for mac addresses and for tunneling data. + * considered as tunneling data only if (tunnel_exist == 1). + */ +union eth_mac_addr_or_tunnel_data { + struct eth_mac_addresses mac_addr; + struct eth_tunnel_data tunnel_data; +}; + +/*Command for setting multicast classification for a client */ +struct eth_multicast_rules_cmd { + u8 cmd_general_data; +#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0) +#define ETH_MULTICAST_RULES_CMD_RX_CMD_SHIFT 0 +#define ETH_MULTICAST_RULES_CMD_TX_CMD (0x1<<1) +#define ETH_MULTICAST_RULES_CMD_TX_CMD_SHIFT 1 +#define ETH_MULTICAST_RULES_CMD_IS_ADD (0x1<<2) +#define ETH_MULTICAST_RULES_CMD_IS_ADD_SHIFT 2 +#define ETH_MULTICAST_RULES_CMD_RESERVED0 (0x1F<<3) +#define ETH_MULTICAST_RULES_CMD_RESERVED0_SHIFT 3 + u8 func_id; + u8 bin_id; + u8 engine_id; + __le32 reserved2; + struct regpair reserved3; +}; + +/* + * parameters for multicast classification ramrod + */ +struct eth_multicast_rules_ramrod_data { + struct eth_classify_header header; + struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT]; +}; + +/* + * Place holder for ramrods protocol specific data + */ +struct ramrod_data { + __le32 data_lo; + __le32 data_hi; +}; + +/* + * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits) + */ +union eth_ramrod_data { + struct ramrod_data general; +}; + + +/* + * RSS toeplitz hash type, as reported in CQE + */ +enum eth_rss_hash_type { + DEFAULT_HASH_TYPE, + IPV4_HASH_TYPE, + TCP_IPV4_HASH_TYPE, + IPV6_HASH_TYPE, + TCP_IPV6_HASH_TYPE, + VLAN_PRI_HASH_TYPE, + E1HOV_PRI_HASH_TYPE, + DSCP_HASH_TYPE, + MAX_ETH_RSS_HASH_TYPE +}; + + +/* + * Ethernet RSS mode + */ +enum eth_rss_mode { + ETH_RSS_MODE_DISABLED, + ETH_RSS_MODE_REGULAR, + ETH_RSS_MODE_VLAN_PRI, + ETH_RSS_MODE_E1HOV_PRI, + ETH_RSS_MODE_IP_DSCP, + MAX_ETH_RSS_MODE +}; + + +/* + * parameters for RSS update ramrod (E2) + */ +struct eth_rss_update_ramrod_data { + u8 rss_engine_id; + u8 rss_mode; + __le16 capabilities; +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY (0x1<<3) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY_SHIFT 3 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<4) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 4 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<5) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 5 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<6) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1<<7) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7 +#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<8) +#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 8 +#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY (0x1<<9) +#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY_SHIFT 9 +#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY (0x1<<10) +#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY_SHIFT 10 +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<11) +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 11 +#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0xF<<12) +#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 12 + u8 rss_result_mask; + u8 reserved3; + __le16 reserved4; + u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE]; + __le32 rss_key[T_ETH_RSS_KEY]; + __le32 echo; + __le32 reserved5; +}; + + +/* + * The eth Rx Buffer Descriptor + */ +struct eth_rx_bd { + __le32 addr_lo; + __le32 addr_hi; +}; + + +/* + * Eth Rx Cqe structure- general structure for ramrods + */ +struct common_ramrod_eth_rx_cqe { + u8 ramrod_type; +#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x3<<0) +#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0 +#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<2) +#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 2 +#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x1F<<3) +#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 3 + u8 conn_type; + __le16 reserved1; + __le32 conn_and_cmd_data; +#define COMMON_RAMROD_ETH_RX_CQE_CID (0xFFFFFF<<0) +#define COMMON_RAMROD_ETH_RX_CQE_CID_SHIFT 0 +#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24) +#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24 + struct ramrod_data protocol_data; + __le32 echo; + __le32 reserved2[11]; +}; + +/* + * Rx Last CQE in page (in ETH) + */ +struct eth_rx_cqe_next_page { + __le32 addr_lo; + __le32 addr_hi; + __le32 reserved[14]; +}; + +/* + * union for all eth rx cqe types (fix their sizes) + */ +union eth_rx_cqe { + struct eth_fast_path_rx_cqe fast_path_cqe; + struct common_ramrod_eth_rx_cqe ramrod_cqe; + struct eth_rx_cqe_next_page next_page_cqe; + struct eth_end_agg_rx_cqe end_agg_cqe; +}; + + +/* + * Values for RX ETH CQE type field + */ +enum eth_rx_cqe_type { + RX_ETH_CQE_TYPE_ETH_FASTPATH, + RX_ETH_CQE_TYPE_ETH_RAMROD, + RX_ETH_CQE_TYPE_ETH_START_AGG, + RX_ETH_CQE_TYPE_ETH_STOP_AGG, + MAX_ETH_RX_CQE_TYPE +}; + + +/* + * Type of SGL/Raw field in ETH RX fast path CQE + */ +enum eth_rx_fp_sel { + ETH_FP_CQE_REGULAR, + ETH_FP_CQE_RAW, + MAX_ETH_RX_FP_SEL +}; + + +/* + * The eth Rx SGE Descriptor + */ +struct eth_rx_sge { + __le32 addr_lo; + __le32 addr_hi; +}; + + +/* + * common data for all protocols + */ +struct spe_hdr { + __le32 conn_and_cmd_data; +#define SPE_HDR_CID (0xFFFFFF<<0) +#define SPE_HDR_CID_SHIFT 0 +#define SPE_HDR_CMD_ID (0xFF<<24) +#define SPE_HDR_CMD_ID_SHIFT 24 + __le16 type; +#define SPE_HDR_CONN_TYPE (0xFF<<0) +#define SPE_HDR_CONN_TYPE_SHIFT 0 +#define SPE_HDR_FUNCTION_ID (0xFF<<8) +#define SPE_HDR_FUNCTION_ID_SHIFT 8 + __le16 reserved1; +}; + +/* + * specific data for ethernet slow path element + */ +union eth_specific_data { + u8 protocol_data[8]; + struct regpair client_update_ramrod_data; + struct regpair client_init_ramrod_init_data; + struct eth_halt_ramrod_data halt_ramrod_data; + struct regpair update_data_addr; + struct eth_common_ramrod_data common_ramrod_data; + struct regpair classify_cfg_addr; + struct regpair filter_cfg_addr; + struct regpair mcast_cfg_addr; +}; + +/* + * Ethernet slow path element + */ +struct eth_spe { + struct spe_hdr hdr; + union eth_specific_data data; +}; + + +/* + * Ethernet command ID for slow path elements + */ +enum eth_spqe_cmd_id { + RAMROD_CMD_ID_ETH_UNUSED, + RAMROD_CMD_ID_ETH_CLIENT_SETUP, + RAMROD_CMD_ID_ETH_HALT, + RAMROD_CMD_ID_ETH_FORWARD_SETUP, + RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP, + RAMROD_CMD_ID_ETH_CLIENT_UPDATE, + RAMROD_CMD_ID_ETH_EMPTY, + RAMROD_CMD_ID_ETH_TERMINATE, + RAMROD_CMD_ID_ETH_TPA_UPDATE, + RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES, + RAMROD_CMD_ID_ETH_FILTER_RULES, + RAMROD_CMD_ID_ETH_MULTICAST_RULES, + RAMROD_CMD_ID_ETH_RSS_UPDATE, + RAMROD_CMD_ID_ETH_SET_MAC, + MAX_ETH_SPQE_CMD_ID +}; + + +/* + * eth tpa update command + */ +enum eth_tpa_update_command { + TPA_UPDATE_NONE_COMMAND, + TPA_UPDATE_ENABLE_COMMAND, + TPA_UPDATE_DISABLE_COMMAND, + MAX_ETH_TPA_UPDATE_COMMAND +}; + +/* In case of LSO over IPv4 tunnel, whether to increment + * IP ID on external IP header or internal IP header + */ +enum eth_tunnel_lso_inc_ip_id { + EXT_HEADER, + INT_HEADER, + MAX_ETH_TUNNEL_LSO_INC_IP_ID +}; + +/* In case tunnel exist and L4 checksum offload, + * the pseudo checksum location, on packet or on BD. + */ +enum eth_tunnel_non_lso_csum_location { + CSUM_ON_PKT, + CSUM_ON_BD, + MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION +}; + +/* + * Tx regular BD structure + */ +struct eth_tx_bd { + __le32 addr_lo; + __le32 addr_hi; + __le16 total_pkt_bytes; + __le16 nbytes; + u8 reserved[4]; +}; + + +/* + * structure for easy accessibility to assembler + */ +struct eth_tx_bd_flags { + u8 as_bitfield; +#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0) +#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0 +#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1) +#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1 +#define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2) +#define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2 +#define ETH_TX_BD_FLAGS_START_BD (0x1<<4) +#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4 +#define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5) +#define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5 +#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6) +#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6 +#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7) +#define ETH_TX_BD_FLAGS_IPV6_SHIFT 7 +}; + +/* + * The eth Tx Buffer Descriptor + */ +struct eth_tx_start_bd { + __le32 addr_lo; + __le32 addr_hi; + __le16 nbd; + __le16 nbytes; + __le16 vlan_or_ethertype; + struct eth_tx_bd_flags bd_flags; + u8 general_data; +#define ETH_TX_START_BD_HDR_NBDS (0x7<<0) +#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0 +#define ETH_TX_START_BD_NO_ADDED_TAGS (0x1<<3) +#define ETH_TX_START_BD_NO_ADDED_TAGS_SHIFT 3 +#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4) +#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4 +#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5) +#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5 +#define ETH_TX_START_BD_TUNNEL_EXIST (0x1<<7) +#define ETH_TX_START_BD_TUNNEL_EXIST_SHIFT 7 +}; + +/* + * Tx parsing BD structure for ETH E1/E1h + */ +struct eth_tx_parse_bd_e1x { + __le16 global_data; +#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0) +#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE (0x3<<4) +#define ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT 4 +#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<6) +#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 6 +#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<7) +#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 7 +#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<8) +#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 8 +#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x7F<<9) +#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 9 + u8 tcp_flags; +#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0) +#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0 +#define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1) +#define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1 +#define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2) +#define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2 +#define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3) +#define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3 +#define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4) +#define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4 +#define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5) +#define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5 +#define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6) +#define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6 +#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7) +#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7 + u8 ip_hlen_w; + __le16 total_hlen_w; + __le16 tcp_pseudo_csum; + __le16 lso_mss; + __le16 ip_id; + __le32 tcp_send_seq; +}; + +/* + * Tx parsing BD structure for ETH E2 + */ +struct eth_tx_parse_bd_e2 { + union eth_mac_addr_or_tunnel_data data; + __le32 parsing_data; +#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W (0x7FF<<0) +#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11) +#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11 +#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15) +#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 15 +#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<16) +#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 16 +#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE (0x3<<30) +#define ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT 30 +}; + +/* + * Tx 2nd parsing BD structure for ETH packet + */ +struct eth_tx_parse_2nd_bd { + __le16 global_data; +#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0) +#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0 +#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x1<<4) +#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 4 +#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5) +#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5 +#define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6) +#define ETH_TX_PARSE_2ND_BD_NS_FLG_SHIFT 6 +#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST (0x1<<7) +#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7 +#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8) +#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8 +#define ETH_TX_PARSE_2ND_BD_RESERVED1 (0x7<<13) +#define ETH_TX_PARSE_2ND_BD_RESERVED1_SHIFT 13 + u8 bd_type; +#define ETH_TX_PARSE_2ND_BD_TYPE (0xF<<0) +#define ETH_TX_PARSE_2ND_BD_TYPE_SHIFT 0 +#define ETH_TX_PARSE_2ND_BD_RESERVED2 (0xF<<4) +#define ETH_TX_PARSE_2ND_BD_RESERVED2_SHIFT 4 + u8 reserved3; + u8 tcp_flags; +#define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0) +#define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0 +#define ETH_TX_PARSE_2ND_BD_SYN_FLG (0x1<<1) +#define ETH_TX_PARSE_2ND_BD_SYN_FLG_SHIFT 1 +#define ETH_TX_PARSE_2ND_BD_RST_FLG (0x1<<2) +#define ETH_TX_PARSE_2ND_BD_RST_FLG_SHIFT 2 +#define ETH_TX_PARSE_2ND_BD_PSH_FLG (0x1<<3) +#define ETH_TX_PARSE_2ND_BD_PSH_FLG_SHIFT 3 +#define ETH_TX_PARSE_2ND_BD_ACK_FLG (0x1<<4) +#define ETH_TX_PARSE_2ND_BD_ACK_FLG_SHIFT 4 +#define ETH_TX_PARSE_2ND_BD_URG_FLG (0x1<<5) +#define ETH_TX_PARSE_2ND_BD_URG_FLG_SHIFT 5 +#define ETH_TX_PARSE_2ND_BD_ECE_FLG (0x1<<6) +#define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6 +#define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7) +#define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7 + u8 reserved4; + u8 tunnel_udp_hdr_start_w; + u8 fw_ip_hdr_to_payload_w; + __le16 fw_ip_csum_wo_len_flags_frag; + __le16 hw_ip_id; + __le32 tcp_send_seq; +}; + +/* The last BD in the BD memory will hold a pointer to the next BD memory */ +struct eth_tx_next_bd { + __le32 addr_lo; + __le32 addr_hi; + u8 reserved[8]; +}; + +/* + * union for 4 Bd types + */ +union eth_tx_bd_types { + struct eth_tx_start_bd start_bd; + struct eth_tx_bd reg_bd; + struct eth_tx_parse_bd_e1x parse_bd_e1x; + struct eth_tx_parse_bd_e2 parse_bd_e2; + struct eth_tx_parse_2nd_bd parse_2nd_bd; + struct eth_tx_next_bd next_bd; +}; + +/* + * array of 13 bds as appears in the eth xstorm context + */ +struct eth_tx_bds_array { + union eth_tx_bd_types bds[13]; +}; + + +/* + * VLAN mode on TX BDs + */ +enum eth_tx_vlan_type { + X_ETH_NO_VLAN, + X_ETH_OUTBAND_VLAN, + X_ETH_INBAND_VLAN, + X_ETH_FW_ADDED_VLAN, + MAX_ETH_TX_VLAN_TYPE +}; + + +/* + * Ethernet VLAN filtering mode in E1x + */ +enum eth_vlan_filter_mode { + ETH_VLAN_FILTER_ANY_VLAN, + ETH_VLAN_FILTER_SPECIFIC_VLAN, + ETH_VLAN_FILTER_CLASSIFY, + MAX_ETH_VLAN_FILTER_MODE +}; + + +/* + * MAC filtering configuration command header + */ +struct mac_configuration_hdr { + u8 length; + u8 offset; + __le16 client_id; + __le32 echo; +}; + +/* + * MAC address in list for ramrod + */ +struct mac_configuration_entry { + __le16 lsb_mac_addr; + __le16 middle_mac_addr; + __le16 msb_mac_addr; + __le16 vlan_id; + u8 pf_id; + u8 flags; +#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE (0x1<<0) +#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE_SHIFT 0 +#define MAC_CONFIGURATION_ENTRY_RDMA_MAC (0x1<<1) +#define MAC_CONFIGURATION_ENTRY_RDMA_MAC_SHIFT 1 +#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE (0x3<<2) +#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE_SHIFT 2 +#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<4) +#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 4 +#define MAC_CONFIGURATION_ENTRY_BROADCAST (0x1<<5) +#define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5 +#define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6) +#define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6 + __le16 reserved0; + __le32 clients_bit_vector; +}; + +/* + * MAC filtering configuration command + */ +struct mac_configuration_cmd { + struct mac_configuration_hdr hdr; + struct mac_configuration_entry config_table[64]; +}; + + +/* + * Set-MAC command type (in E1x) + */ +enum set_mac_action_type { + T_ETH_MAC_COMMAND_INVALIDATE, + T_ETH_MAC_COMMAND_SET, + MAX_SET_MAC_ACTION_TYPE +}; + + +/* + * Ethernet TPA Modes + */ +enum tpa_mode { + TPA_LRO, + TPA_GRO, + MAX_TPA_MODE}; + + +/* + * tpa update ramrod data + */ +struct tpa_update_ramrod_data { + u8 update_ipv4; + u8 update_ipv6; + u8 client_id; + u8 max_tpa_queues; + u8 max_sges_for_packet; + u8 complete_on_both_clients; + u8 dont_verify_rings_pause_thr_flg; + u8 tpa_mode; + __le16 sge_buff_size; + __le16 max_agg_size; + __le32 sge_page_base_lo; + __le32 sge_page_base_hi; + __le16 sge_pause_thr_low; + __le16 sge_pause_thr_high; +}; + + +/* + * approximate-match multicast filtering for E1H per function in Tstorm + */ +struct tstorm_eth_approximate_match_multicast_filtering { + u32 mcast_add_hash_bit_array[8]; +}; + + +/* + * Common configuration parameters per function in Tstorm + */ +struct tstorm_eth_function_common_config { + __le16 config_flags; +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<7) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 7 +#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0xFF<<8) +#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 8 + u8 rss_result_mask; + u8 reserved1; + __le16 vlan_id[2]; +}; + + +/* + * MAC filtering configuration parameters per port in Tstorm + */ +struct tstorm_eth_mac_filter_config { + u32 ucast_drop_all; + u32 ucast_accept_all; + u32 mcast_drop_all; + u32 mcast_accept_all; + u32 bcast_accept_all; + u32 vlan_filter[2]; + u32 unmatched_unicast; +}; + + +/* + * tx only queue init ramrod data + */ +struct tx_queue_init_ramrod_data { + struct client_init_general_data general; + struct client_init_tx_data tx; +}; + + +/* + * Three RX producers for ETH + */ +struct ustorm_eth_rx_producers { +#if defined(__BIG_ENDIAN) + u16 bd_prod; + u16 cqe_prod; +#elif defined(__LITTLE_ENDIAN) + u16 cqe_prod; + u16 bd_prod; +#endif +#if defined(__BIG_ENDIAN) + u16 reserved; + u16 sge_prod; +#elif defined(__LITTLE_ENDIAN) + u16 sge_prod; + u16 reserved; +#endif +}; + + +/* + * FCoE RX statistics parameters section#0 + */ +struct fcoe_rx_stat_params_section0 { + __le32 fcoe_rx_pkt_cnt; + __le32 fcoe_rx_byte_cnt; +}; + + +/* + * FCoE RX statistics parameters section#1 + */ +struct fcoe_rx_stat_params_section1 { + __le32 fcoe_ver_cnt; + __le32 fcoe_rx_drop_pkt_cnt; +}; + + +/* + * FCoE RX statistics parameters section#2 + */ +struct fcoe_rx_stat_params_section2 { + __le32 fc_crc_cnt; + __le32 eofa_del_cnt; + __le32 miss_frame_cnt; + __le32 seq_timeout_cnt; + __le32 drop_seq_cnt; + __le32 fcoe_rx_drop_pkt_cnt; + __le32 fcp_rx_pkt_cnt; + __le32 reserved0; +}; + + +/* + * FCoE TX statistics parameters + */ +struct fcoe_tx_stat_params { + __le32 fcoe_tx_pkt_cnt; + __le32 fcoe_tx_byte_cnt; + __le32 fcp_tx_pkt_cnt; + __le32 reserved0; +}; + +/* + * FCoE statistics parameters + */ +struct fcoe_statistics_params { + struct fcoe_tx_stat_params tx_stat; + struct fcoe_rx_stat_params_section0 rx_stat0; + struct fcoe_rx_stat_params_section1 rx_stat1; + struct fcoe_rx_stat_params_section2 rx_stat2; +}; + + +/* + * The data afex vif list ramrod need + */ +struct afex_vif_list_ramrod_data { + u8 afex_vif_list_command; + u8 func_bit_map; + __le16 vif_list_index; + u8 func_to_clear; + u8 echo; + __le16 reserved1; +}; + + +/* + * cfc delete event data + */ +struct cfc_del_event_data { + u32 cid; + u32 reserved0; + u32 reserved1; +}; + + +/* + * per-port SAFC demo variables + */ +struct cmng_flags_per_port { + u32 cmng_enables; +#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1<<0) +#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0 +#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1<<1) +#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1 +#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<2) +#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 2 +#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<3) +#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 3 +#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0xFFFFFFF<<4) +#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 4 + u32 __reserved1; +}; + + +/* + * per-port rate shaping variables + */ +struct rate_shaping_vars_per_port { + u32 rs_periodic_timeout; + u32 rs_threshold; +}; + +/* + * per-port fairness variables + */ +struct fairness_vars_per_port { + u32 upper_bound; + u32 fair_threshold; + u32 fairness_timeout; + u32 reserved0; +}; + +/* + * per-port SAFC variables + */ +struct safc_struct_per_port { +#if defined(__BIG_ENDIAN) + u16 __reserved1; + u8 __reserved0; + u8 safc_timeout_usec; +#elif defined(__LITTLE_ENDIAN) + u8 safc_timeout_usec; + u8 __reserved0; + u16 __reserved1; +#endif + u8 cos_to_traffic_types[MAX_COS_NUMBER]; + u16 cos_to_pause_mask[NUM_OF_SAFC_BITS]; +}; + +/* + * Per-port congestion management variables + */ +struct cmng_struct_per_port { + struct rate_shaping_vars_per_port rs_vars; + struct fairness_vars_per_port fair_vars; + struct safc_struct_per_port safc_vars; + struct cmng_flags_per_port flags; +}; + +/* + * a single rate shaping counter. can be used as protocol or vnic counter + */ +struct rate_shaping_counter { + u32 quota; +#if defined(__BIG_ENDIAN) + u16 __reserved0; + u16 rate; +#elif defined(__LITTLE_ENDIAN) + u16 rate; + u16 __reserved0; +#endif +}; + +/* + * per-vnic rate shaping variables + */ +struct rate_shaping_vars_per_vn { + struct rate_shaping_counter vn_counter; +}; + +/* + * per-vnic fairness variables + */ +struct fairness_vars_per_vn { + u32 cos_credit_delta[MAX_COS_NUMBER]; + u32 vn_credit_delta; + u32 __reserved0; +}; + +/* + * cmng port init state + */ +struct cmng_vnic { + struct rate_shaping_vars_per_vn vnic_max_rate[4]; + struct fairness_vars_per_vn vnic_min_rate[4]; +}; + +/* + * cmng port init state + */ +struct cmng_init { + struct cmng_struct_per_port port; + struct cmng_vnic vnic; +}; + + +/* + * driver parameters for congestion management init, all rates are in Mbps + */ +struct cmng_init_input { + u32 port_rate; + u16 vnic_min_rate[4]; + u16 vnic_max_rate[4]; + u16 cos_min_rate[MAX_COS_NUMBER]; + u16 cos_to_pause_mask[MAX_COS_NUMBER]; + struct cmng_flags_per_port flags; +}; + + +/* + * Protocol-common command ID for slow path elements + */ +enum common_spqe_cmd_id { + RAMROD_CMD_ID_COMMON_UNUSED, + RAMROD_CMD_ID_COMMON_FUNCTION_START, + RAMROD_CMD_ID_COMMON_FUNCTION_STOP, + RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, + RAMROD_CMD_ID_COMMON_CFC_DEL, + RAMROD_CMD_ID_COMMON_CFC_DEL_WB, + RAMROD_CMD_ID_COMMON_STAT_QUERY, + RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, + RAMROD_CMD_ID_COMMON_START_TRAFFIC, + RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, + RAMROD_CMD_ID_COMMON_SET_TIMESYNC, + MAX_COMMON_SPQE_CMD_ID +}; + +/* + * Per-protocol connection types + */ +enum connection_type { + ETH_CONNECTION_TYPE, + TOE_CONNECTION_TYPE, + RDMA_CONNECTION_TYPE, + ISCSI_CONNECTION_TYPE, + FCOE_CONNECTION_TYPE, + RESERVED_CONNECTION_TYPE_0, + RESERVED_CONNECTION_TYPE_1, + RESERVED_CONNECTION_TYPE_2, + NONE_CONNECTION_TYPE, + MAX_CONNECTION_TYPE +}; + + +/* + * Cos modes + */ +enum cos_mode { + OVERRIDE_COS, + STATIC_COS, + FW_WRR, + MAX_COS_MODE +}; + + +/* + * Dynamic HC counters set by the driver + */ +struct hc_dynamic_drv_counter { + u32 val[HC_SB_MAX_DYNAMIC_INDICES]; +}; + +/* + * zone A per-queue data + */ +struct cstorm_queue_zone_data { + struct hc_dynamic_drv_counter hc_dyn_drv_cnt; + struct regpair reserved[2]; +}; + + +/* + * Vf-PF channel data in cstorm ram (non-triggered zone) + */ +struct vf_pf_channel_zone_data { + u32 msg_addr_lo; + u32 msg_addr_hi; +}; + +/* + * zone for VF non-triggered data + */ +struct non_trigger_vf_zone { + struct vf_pf_channel_zone_data vf_pf_channel; +}; + +/* + * Vf-PF channel trigger zone in cstorm ram + */ +struct vf_pf_channel_zone_trigger { + u8 addr_valid; +}; + +/* + * zone that triggers the in-bound interrupt + */ +struct trigger_vf_zone { +#if defined(__BIG_ENDIAN) + u16 reserved1; + u8 reserved0; + struct vf_pf_channel_zone_trigger vf_pf_channel; +#elif defined(__LITTLE_ENDIAN) + struct vf_pf_channel_zone_trigger vf_pf_channel; + u8 reserved0; + u16 reserved1; +#endif + u32 reserved2; +}; + +/* + * zone B per-VF data + */ +struct cstorm_vf_zone_data { + struct non_trigger_vf_zone non_trigger; + struct trigger_vf_zone trigger; +}; + + +/* + * Dynamic host coalescing init parameters, per state machine + */ +struct dynamic_hc_sm_config { + u32 threshold[3]; + u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES]; + u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES]; + u8 hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES]; + u8 hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES]; + u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES]; +}; + +/* + * Dynamic host coalescing init parameters + */ +struct dynamic_hc_config { + struct dynamic_hc_sm_config sm_config[HC_SB_MAX_SM]; +}; + + +struct e2_integ_data { +#if defined(__BIG_ENDIAN) + u8 flags; +#define E2_INTEG_DATA_TESTING_EN (0x1<<0) +#define E2_INTEG_DATA_TESTING_EN_SHIFT 0 +#define E2_INTEG_DATA_LB_TX (0x1<<1) +#define E2_INTEG_DATA_LB_TX_SHIFT 1 +#define E2_INTEG_DATA_COS_TX (0x1<<2) +#define E2_INTEG_DATA_COS_TX_SHIFT 2 +#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3) +#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3 +#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4) +#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4 +#define E2_INTEG_DATA_RESERVED (0x7<<5) +#define E2_INTEG_DATA_RESERVED_SHIFT 5 + u8 cos; + u8 voq; + u8 pbf_queue; +#elif defined(__LITTLE_ENDIAN) + u8 pbf_queue; + u8 voq; + u8 cos; + u8 flags; +#define E2_INTEG_DATA_TESTING_EN (0x1<<0) +#define E2_INTEG_DATA_TESTING_EN_SHIFT 0 +#define E2_INTEG_DATA_LB_TX (0x1<<1) +#define E2_INTEG_DATA_LB_TX_SHIFT 1 +#define E2_INTEG_DATA_COS_TX (0x1<<2) +#define E2_INTEG_DATA_COS_TX_SHIFT 2 +#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3) +#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3 +#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4) +#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4 +#define E2_INTEG_DATA_RESERVED (0x7<<5) +#define E2_INTEG_DATA_RESERVED_SHIFT 5 +#endif +#if defined(__BIG_ENDIAN) + u16 reserved3; + u8 reserved2; + u8 ramEn; +#elif defined(__LITTLE_ENDIAN) + u8 ramEn; + u8 reserved2; + u16 reserved3; +#endif +}; + + +/* + * set mac event data + */ +struct eth_event_data { + u32 echo; + u32 reserved0; + u32 reserved1; +}; + + +/* + * pf-vf event data + */ +struct vf_pf_event_data { + u8 vf_id; + u8 reserved0; + u16 reserved1; + u32 msg_addr_lo; + u32 msg_addr_hi; +}; + +/* + * VF FLR event data + */ +struct vf_flr_event_data { + u8 vf_id; + u8 reserved0; + u16 reserved1; + u32 reserved2; + u32 reserved3; +}; + +/* + * malicious VF event data + */ +struct malicious_vf_event_data { + u8 vf_id; + u8 err_id; + u16 reserved1; + u32 reserved2; + u32 reserved3; +}; + +/* + * vif list event data + */ +struct vif_list_event_data { + u8 func_bit_map; + u8 echo; + __le16 reserved0; + __le32 reserved1; + __le32 reserved2; +}; + +/* function update event data */ +struct function_update_event_data { + u8 echo; + u8 reserved; + __le16 reserved0; + __le32 reserved1; + __le32 reserved2; +}; + + +/* union for all event ring message types */ +union event_data { + struct vf_pf_event_data vf_pf_event; + struct eth_event_data eth_event; + struct cfc_del_event_data cfc_del_event; + struct vf_flr_event_data vf_flr_event; + struct malicious_vf_event_data malicious_vf_event; + struct vif_list_event_data vif_list_event; + struct function_update_event_data function_update_event; +}; + + +/* + * per PF event ring data + */ +struct event_ring_data { + struct regpair_native base_addr; +#if defined(__BIG_ENDIAN) + u8 index_id; + u8 sb_id; + u16 producer; +#elif defined(__LITTLE_ENDIAN) + u16 producer; + u8 sb_id; + u8 index_id; +#endif + u32 reserved0; +}; + + +/* + * event ring message element (each element is 128 bits) + */ +struct event_ring_msg { + u8 opcode; + u8 error; + u16 reserved1; + union event_data data; +}; + +/* + * event ring next page element (128 bits) + */ +struct event_ring_next { + struct regpair addr; + u32 reserved[2]; +}; + +/* + * union for event ring element types (each element is 128 bits) + */ +union event_ring_elem { + struct event_ring_msg message; + struct event_ring_next next_page; +}; + + +/* + * Common event ring opcodes + */ +enum event_ring_opcode { + EVENT_RING_OPCODE_VF_PF_CHANNEL, + EVENT_RING_OPCODE_FUNCTION_START, + EVENT_RING_OPCODE_FUNCTION_STOP, + EVENT_RING_OPCODE_CFC_DEL, + EVENT_RING_OPCODE_CFC_DEL_WB, + EVENT_RING_OPCODE_STAT_QUERY, + EVENT_RING_OPCODE_STOP_TRAFFIC, + EVENT_RING_OPCODE_START_TRAFFIC, + EVENT_RING_OPCODE_VF_FLR, + EVENT_RING_OPCODE_MALICIOUS_VF, + EVENT_RING_OPCODE_FORWARD_SETUP, + EVENT_RING_OPCODE_RSS_UPDATE_RULES, + EVENT_RING_OPCODE_FUNCTION_UPDATE, + EVENT_RING_OPCODE_AFEX_VIF_LISTS, + EVENT_RING_OPCODE_SET_MAC, + EVENT_RING_OPCODE_CLASSIFICATION_RULES, + EVENT_RING_OPCODE_FILTERS_RULES, + EVENT_RING_OPCODE_MULTICAST_RULES, + EVENT_RING_OPCODE_SET_TIMESYNC, + MAX_EVENT_RING_OPCODE +}; + +/* + * Modes for fairness algorithm + */ +enum fairness_mode { + FAIRNESS_COS_WRR_MODE, + FAIRNESS_COS_ETS_MODE, + MAX_FAIRNESS_MODE +}; + + +/* + * Priority and cos + */ +struct priority_cos { + u8 priority; + u8 cos; + __le16 reserved1; +}; + +/* + * The data for flow control configuration + */ +struct flow_control_configuration { + struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; + u8 dcb_enabled; + u8 dcb_version; + u8 dont_add_pri_0_en; + u8 reserved1; + __le32 reserved2; +}; + + +/* + * + */ +struct function_start_data { + u8 function_mode; + u8 allow_npar_tx_switching; + __le16 sd_vlan_tag; + __le16 vif_id; + u8 path_id; + u8 network_cos_mode; + u8 dmae_cmd_id; + u8 tunnel_mode; + u8 gre_tunnel_type; + u8 tunn_clss_en; + u8 inner_gre_rss_en; + u8 sd_accept_mf_clss_fail; + __le16 vxlan_dst_port; + __le16 sd_accept_mf_clss_fail_ethtype; + __le16 sd_vlan_eth_type; + u8 sd_vlan_force_pri_flg; + u8 sd_vlan_force_pri_val; + u8 sd_accept_mf_clss_fail_match_ethtype; + u8 no_added_tags; +}; + +struct function_update_data { + u8 vif_id_change_flg; + u8 afex_default_vlan_change_flg; + u8 allowed_priorities_change_flg; + u8 network_cos_mode_change_flg; + __le16 vif_id; + __le16 afex_default_vlan; + u8 allowed_priorities; + u8 network_cos_mode; + u8 lb_mode_en_change_flg; + u8 lb_mode_en; + u8 tx_switch_suspend_change_flg; + u8 tx_switch_suspend; + u8 echo; + u8 update_tunn_cfg_flg; + u8 tunnel_mode; + u8 gre_tunnel_type; + u8 tunn_clss_en; + u8 inner_gre_rss_en; + __le16 vxlan_dst_port; + u8 sd_vlan_force_pri_change_flg; + u8 sd_vlan_force_pri_flg; + u8 sd_vlan_force_pri_val; + u8 sd_vlan_tag_change_flg; + u8 sd_vlan_eth_type_change_flg; + u8 reserved1; + __le16 sd_vlan_tag; + __le16 sd_vlan_eth_type; +}; + +/* + * FW version stored in the Xstorm RAM + */ +struct fw_version { +#if defined(__BIG_ENDIAN) + u8 engineering; + u8 revision; + u8 minor; + u8 major; +#elif defined(__LITTLE_ENDIAN) + u8 major; + u8 minor; + u8 revision; + u8 engineering; +#endif + u32 flags; +#define FW_VERSION_OPTIMIZED (0x1<<0) +#define FW_VERSION_OPTIMIZED_SHIFT 0 +#define FW_VERSION_BIG_ENDIEN (0x1<<1) +#define FW_VERSION_BIG_ENDIEN_SHIFT 1 +#define FW_VERSION_CHIP_VERSION (0x3<<2) +#define FW_VERSION_CHIP_VERSION_SHIFT 2 +#define __FW_VERSION_RESERVED (0xFFFFFFF<<4) +#define __FW_VERSION_RESERVED_SHIFT 4 +}; + + +/* GRE Tunnel Mode */ +enum gre_tunnel_type { + NVGRE_TUNNEL, + L2GRE_TUNNEL, + IPGRE_TUNNEL, + MAX_GRE_TUNNEL_TYPE +}; + +/* + * Dynamic Host-Coalescing - Driver(host) counters + */ +struct hc_dynamic_sb_drv_counters { + u32 dynamic_hc_drv_counter[HC_SB_MAX_DYNAMIC_INDICES]; +}; + + +/* + * 2 bytes. configuration/state parameters for a single protocol index + */ +struct hc_index_data { +#if defined(__BIG_ENDIAN) + u8 flags; +#define HC_INDEX_DATA_SM_ID (0x1<<0) +#define HC_INDEX_DATA_SM_ID_SHIFT 0 +#define HC_INDEX_DATA_HC_ENABLED (0x1<<1) +#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1 +#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2) +#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2 +#define HC_INDEX_DATA_RESERVE (0x1F<<3) +#define HC_INDEX_DATA_RESERVE_SHIFT 3 + u8 timeout; +#elif defined(__LITTLE_ENDIAN) + u8 timeout; + u8 flags; +#define HC_INDEX_DATA_SM_ID (0x1<<0) +#define HC_INDEX_DATA_SM_ID_SHIFT 0 +#define HC_INDEX_DATA_HC_ENABLED (0x1<<1) +#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1 +#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2) +#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2 +#define HC_INDEX_DATA_RESERVE (0x1F<<3) +#define HC_INDEX_DATA_RESERVE_SHIFT 3 +#endif +}; + + +/* + * HC state-machine + */ +struct hc_status_block_sm { +#if defined(__BIG_ENDIAN) + u8 igu_seg_id; + u8 igu_sb_id; + u8 timer_value; + u8 __flags; +#elif defined(__LITTLE_ENDIAN) + u8 __flags; + u8 timer_value; + u8 igu_sb_id; + u8 igu_seg_id; +#endif + u32 time_to_expire; +}; + +/* + * hold PCI identification variables- used in various places in firmware + */ +struct pci_entity { +#if defined(__BIG_ENDIAN) + u8 vf_valid; + u8 vf_id; + u8 vnic_id; + u8 pf_id; +#elif defined(__LITTLE_ENDIAN) + u8 pf_id; + u8 vnic_id; + u8 vf_id; + u8 vf_valid; +#endif +}; + +/* + * The fast-path status block meta-data, common to all chips + */ +struct hc_sb_data { + struct regpair_native host_sb_addr; + struct hc_status_block_sm state_machine[HC_SB_MAX_SM]; + struct pci_entity p_func; +#if defined(__BIG_ENDIAN) + u8 rsrv0; + u8 state; + u8 dhc_qzone_id; + u8 same_igu_sb_1b; +#elif defined(__LITTLE_ENDIAN) + u8 same_igu_sb_1b; + u8 dhc_qzone_id; + u8 state; + u8 rsrv0; +#endif + struct regpair_native rsrv1[2]; +}; + + +/* + * Segment types for host coaslescing + */ +enum hc_segment { + HC_REGULAR_SEGMENT, + HC_DEFAULT_SEGMENT, + MAX_HC_SEGMENT +}; + + +/* + * The fast-path status block meta-data + */ +struct hc_sp_status_block_data { + struct regpair_native host_sb_addr; +#if defined(__BIG_ENDIAN) + u8 rsrv1; + u8 state; + u8 igu_seg_id; + u8 igu_sb_id; +#elif defined(__LITTLE_ENDIAN) + u8 igu_sb_id; + u8 igu_seg_id; + u8 state; + u8 rsrv1; +#endif + struct pci_entity p_func; +}; + + +/* + * The fast-path status block meta-data + */ +struct hc_status_block_data_e1x { + struct hc_index_data index_data[HC_SB_MAX_INDICES_E1X]; + struct hc_sb_data common; +}; + + +/* + * The fast-path status block meta-data + */ +struct hc_status_block_data_e2 { + struct hc_index_data index_data[HC_SB_MAX_INDICES_E2]; + struct hc_sb_data common; +}; + + +/* + * IGU block operartion modes (in Everest2) + */ +enum igu_mode { + HC_IGU_BC_MODE, + HC_IGU_NBC_MODE, + MAX_IGU_MODE +}; + + +/* + * IP versions + */ +enum ip_ver { + IP_V4, + IP_V6, + MAX_IP_VER +}; + +/* + * Malicious VF error ID + */ +enum malicious_vf_error_id { + MALICIOUS_VF_NO_ERROR, + VF_PF_CHANNEL_NOT_READY, + ETH_ILLEGAL_BD_LENGTHS, + ETH_PACKET_TOO_SHORT, + ETH_PAYLOAD_TOO_BIG, + ETH_ILLEGAL_ETH_TYPE, + ETH_ILLEGAL_LSO_HDR_LEN, + ETH_TOO_MANY_BDS, + ETH_ZERO_HDR_NBDS, + ETH_START_BD_NOT_SET, + ETH_ILLEGAL_PARSE_NBDS, + ETH_IPV6_AND_CHECKSUM, + ETH_VLAN_FLG_INCORRECT, + ETH_ILLEGAL_LSO_MSS, + ETH_TUNNEL_NOT_SUPPORTED, + MAX_MALICIOUS_VF_ERROR_ID +}; + +/* + * Multi-function modes + */ +enum mf_mode { + SINGLE_FUNCTION, + MULTI_FUNCTION_SD, + MULTI_FUNCTION_SI, + MULTI_FUNCTION_AFEX, + MAX_MF_MODE +}; + +/* + * Protocol-common statistics collected by the Tstorm (per pf) + */ +struct tstorm_per_pf_stats { + struct regpair rcv_error_bytes; +}; + +/* + * + */ +struct per_pf_stats { + struct tstorm_per_pf_stats tstorm_pf_statistics; +}; + + +/* + * Protocol-common statistics collected by the Tstorm (per port) + */ +struct tstorm_per_port_stats { + __le32 mac_discard; + __le32 mac_filter_discard; + __le32 brb_truncate_discard; + __le32 mf_tag_discard; + __le32 packet_drop; + __le32 reserved; +}; + +/* + * + */ +struct per_port_stats { + struct tstorm_per_port_stats tstorm_port_statistics; +}; + + +/* + * Protocol-common statistics collected by the Tstorm (per client) + */ +struct tstorm_per_queue_stats { + struct regpair rcv_ucast_bytes; + __le32 rcv_ucast_pkts; + __le32 checksum_discard; + struct regpair rcv_bcast_bytes; + __le32 rcv_bcast_pkts; + __le32 pkts_too_big_discard; + struct regpair rcv_mcast_bytes; + __le32 rcv_mcast_pkts; + __le32 ttl0_discard; + __le16 no_buff_discard; + __le16 reserved0; + __le32 reserved1; +}; + +/* + * Protocol-common statistics collected by the Ustorm (per client) + */ +struct ustorm_per_queue_stats { + struct regpair ucast_no_buff_bytes; + struct regpair mcast_no_buff_bytes; + struct regpair bcast_no_buff_bytes; + __le32 ucast_no_buff_pkts; + __le32 mcast_no_buff_pkts; + __le32 bcast_no_buff_pkts; + __le32 coalesced_pkts; + struct regpair coalesced_bytes; + __le32 coalesced_events; + __le32 coalesced_aborts; +}; + +/* + * Protocol-common statistics collected by the Xstorm (per client) + */ +struct xstorm_per_queue_stats { + struct regpair ucast_bytes_sent; + struct regpair mcast_bytes_sent; + struct regpair bcast_bytes_sent; + __le32 ucast_pkts_sent; + __le32 mcast_pkts_sent; + __le32 bcast_pkts_sent; + __le32 error_drop_pkts; +}; + +/* + * + */ +struct per_queue_stats { + struct tstorm_per_queue_stats tstorm_queue_statistics; + struct ustorm_per_queue_stats ustorm_queue_statistics; + struct xstorm_per_queue_stats xstorm_queue_statistics; +}; + + +/* + * FW version stored in first line of pram + */ +struct pram_fw_version { + u8 major; + u8 minor; + u8 revision; + u8 engineering; + u8 flags; +#define PRAM_FW_VERSION_OPTIMIZED (0x1<<0) +#define PRAM_FW_VERSION_OPTIMIZED_SHIFT 0 +#define PRAM_FW_VERSION_STORM_ID (0x3<<1) +#define PRAM_FW_VERSION_STORM_ID_SHIFT 1 +#define PRAM_FW_VERSION_BIG_ENDIEN (0x1<<3) +#define PRAM_FW_VERSION_BIG_ENDIEN_SHIFT 3 +#define PRAM_FW_VERSION_CHIP_VERSION (0x3<<4) +#define PRAM_FW_VERSION_CHIP_VERSION_SHIFT 4 +#define __PRAM_FW_VERSION_RESERVED0 (0x3<<6) +#define __PRAM_FW_VERSION_RESERVED0_SHIFT 6 +}; + + +/* + * Ethernet slow path element + */ +union protocol_common_specific_data { + u8 protocol_data[8]; + struct regpair phy_address; + struct regpair mac_config_addr; + struct afex_vif_list_ramrod_data afex_vif_list_data; +}; + +/* + * The send queue element + */ +struct protocol_common_spe { + struct spe_hdr hdr; + union protocol_common_specific_data data; +}; + +/* The data for the Set Timesync Ramrod */ +struct set_timesync_ramrod_data { + u8 drift_adjust_cmd; + u8 offset_cmd; + u8 add_sub_drift_adjust_value; + u8 drift_adjust_value; + u32 drift_adjust_period; + struct regpair offset_delta; +}; + +/* + * The send queue element + */ +struct slow_path_element { + struct spe_hdr hdr; + struct regpair protocol_data; +}; + + +/* + * Protocol-common statistics counter + */ +struct stats_counter { + __le16 xstats_counter; + __le16 reserved0; + __le32 reserved1; + __le16 tstats_counter; + __le16 reserved2; + __le32 reserved3; + __le16 ustats_counter; + __le16 reserved4; + __le32 reserved5; + __le16 cstats_counter; + __le16 reserved6; + __le32 reserved7; +}; + + +/* + * + */ +struct stats_query_entry { + u8 kind; + u8 index; + __le16 funcID; + __le32 reserved; + struct regpair address; +}; + +/* + * statistic command + */ +struct stats_query_cmd_group { + struct stats_query_entry query[STATS_QUERY_CMD_COUNT]; +}; + + +/* + * statistic command header + */ +struct stats_query_header { + u8 cmd_num; + u8 reserved0; + __le16 drv_stats_counter; + __le32 reserved1; + struct regpair stats_counters_addrs; +}; + + +/* + * Types of statistcis query entry + */ +enum stats_query_type { + STATS_TYPE_QUEUE, + STATS_TYPE_PORT, + STATS_TYPE_PF, + STATS_TYPE_TOE, + STATS_TYPE_FCOE, + MAX_STATS_QUERY_TYPE +}; + + +/* + * Indicate of the function status block state + */ +enum status_block_state { + SB_DISABLED, + SB_ENABLED, + SB_CLEANED, + MAX_STATUS_BLOCK_STATE +}; + + +/* + * Storm IDs (including attentions for IGU related enums) + */ +enum storm_id { + USTORM_ID, + CSTORM_ID, + XSTORM_ID, + TSTORM_ID, + ATTENTION_ID, + MAX_STORM_ID +}; + + +/* + * Taffic types used in ETS and flow control algorithms + */ +enum traffic_type { + LLFC_TRAFFIC_TYPE_NW, + LLFC_TRAFFIC_TYPE_FCOE, + LLFC_TRAFFIC_TYPE_ISCSI, + MAX_TRAFFIC_TYPE +}; + + +/* + * zone A per-queue data + */ +struct tstorm_queue_zone_data { + struct regpair reserved[4]; +}; + + +/* + * zone B per-VF data + */ +struct tstorm_vf_zone_data { + struct regpair reserved; +}; + +/* Add or Subtract Value for Set Timesync Ramrod */ +enum ts_add_sub_value { + TS_SUB_VALUE, + TS_ADD_VALUE, + MAX_TS_ADD_SUB_VALUE +}; + +/* Drift-Adjust Commands for Set Timesync Ramrod */ +enum ts_drift_adjust_cmd { + TS_DRIFT_ADJUST_KEEP, + TS_DRIFT_ADJUST_SET, + TS_DRIFT_ADJUST_RESET, + MAX_TS_DRIFT_ADJUST_CMD +}; + +/* Offset Commands for Set Timesync Ramrod */ +enum ts_offset_cmd { + TS_OFFSET_KEEP, + TS_OFFSET_INC, + TS_OFFSET_DEC, + MAX_TS_OFFSET_CMD +}; + +/* Tunnel Mode */ +enum tunnel_mode { + TUNN_MODE_NONE, + TUNN_MODE_VXLAN, + TUNN_MODE_GRE, + MAX_TUNNEL_MODE +}; + + /* zone A per-queue data */ +struct ustorm_queue_zone_data { + struct ustorm_eth_rx_producers eth_rx_producers; + struct regpair reserved[3]; +}; + + +/* + * zone B per-VF data + */ +struct ustorm_vf_zone_data { + struct regpair reserved; +}; + + +/* + * data per VF-PF channel + */ +struct vf_pf_channel_data { +#if defined(__BIG_ENDIAN) + u16 reserved0; + u8 valid; + u8 state; +#elif defined(__LITTLE_ENDIAN) + u8 state; + u8 valid; + u16 reserved0; +#endif + u32 reserved1; +}; + + +/* + * State of VF-PF channel + */ +enum vf_pf_channel_state { + VF_PF_CHANNEL_STATE_READY, + VF_PF_CHANNEL_STATE_WAITING_FOR_ACK, + MAX_VF_PF_CHANNEL_STATE +}; + + +/* + * vif_list_rule_kind + */ +enum vif_list_rule_kind { + VIF_LIST_RULE_SET, + VIF_LIST_RULE_GET, + VIF_LIST_RULE_CLEAR_ALL, + VIF_LIST_RULE_CLEAR_FUNC, + MAX_VIF_LIST_RULE_KIND +}; + + +/* + * zone A per-queue data + */ +struct xstorm_queue_zone_data { + struct regpair reserved[4]; +}; + + +/* + * zone B per-VF data + */ +struct xstorm_vf_zone_data { + struct regpair reserved; +}; + +#endif /* BNX2X_HSI_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h new file mode 100644 index 000000000..d6e1975b7 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h @@ -0,0 +1,786 @@ +/* bnx2x_init.h: Broadcom Everest network driver. + * Structures and macroes needed during the initialization. + * + * Copyright (c) 2007-2013 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Ariel Elior + * Written by: Eliezer Tamir + * Modified by: Vladislav Zolotarov + */ + +#ifndef BNX2X_INIT_H +#define BNX2X_INIT_H + +/* Init operation types and structures */ +enum { + OP_RD = 0x1, /* read a single register */ + OP_WR, /* write a single register */ + OP_SW, /* copy a string to the device */ + OP_ZR, /* clear memory */ + OP_ZP, /* unzip then copy with DMAE */ + OP_WR_64, /* write 64 bit pattern */ + OP_WB, /* copy a string using DMAE */ + OP_WB_ZR, /* Clear a string using DMAE or indirect-wr */ + /* Skip the following ops if all of the init modes don't match */ + OP_IF_MODE_OR, + /* Skip the following ops if any of the init modes don't match */ + OP_IF_MODE_AND, + OP_MAX +}; + +enum { + STAGE_START, + STAGE_END, +}; + +/* Returns the index of start or end of a specific block stage in ops array*/ +#define BLOCK_OPS_IDX(block, stage, end) \ + (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) + + +/* structs for the various opcodes */ +struct raw_op { + u32 op:8; + u32 offset:24; + u32 raw_data; +}; + +struct op_read { + u32 op:8; + u32 offset:24; + u32 val; +}; + +struct op_write { + u32 op:8; + u32 offset:24; + u32 val; +}; + +struct op_arr_write { + u32 op:8; + u32 offset:24; +#ifdef __BIG_ENDIAN + u16 data_len; + u16 data_off; +#else /* __LITTLE_ENDIAN */ + u16 data_off; + u16 data_len; +#endif +}; + +struct op_zero { + u32 op:8; + u32 offset:24; + u32 len; +}; + +struct op_if_mode { + u32 op:8; + u32 cmd_offset:24; + u32 mode_bit_map; +}; + + +union init_op { + struct op_read read; + struct op_write write; + struct op_arr_write arr_wr; + struct op_zero zero; + struct raw_op raw; + struct op_if_mode if_mode; +}; + + +/* Init Phases */ +enum { + PHASE_COMMON, + PHASE_PORT0, + PHASE_PORT1, + PHASE_PF0, + PHASE_PF1, + PHASE_PF2, + PHASE_PF3, + PHASE_PF4, + PHASE_PF5, + PHASE_PF6, + PHASE_PF7, + NUM_OF_INIT_PHASES +}; + +/* Init Modes */ +enum { + MODE_ASIC = 0x00000001, + MODE_FPGA = 0x00000002, + MODE_EMUL = 0x00000004, + MODE_E2 = 0x00000008, + MODE_E3 = 0x00000010, + MODE_PORT2 = 0x00000020, + MODE_PORT4 = 0x00000040, + MODE_SF = 0x00000080, + MODE_MF = 0x00000100, + MODE_MF_SD = 0x00000200, + MODE_MF_SI = 0x00000400, + MODE_MF_AFEX = 0x00000800, + MODE_E3_A0 = 0x00001000, + MODE_E3_B0 = 0x00002000, + MODE_COS3 = 0x00004000, + MODE_COS6 = 0x00008000, + MODE_LITTLE_ENDIAN = 0x00010000, + MODE_BIG_ENDIAN = 0x00020000, +}; + +/* Init Blocks */ +enum { + BLOCK_ATC, + BLOCK_BRB1, + BLOCK_CCM, + BLOCK_CDU, + BLOCK_CFC, + BLOCK_CSDM, + BLOCK_CSEM, + BLOCK_DBG, + BLOCK_DMAE, + BLOCK_DORQ, + BLOCK_HC, + BLOCK_IGU, + BLOCK_MISC, + BLOCK_NIG, + BLOCK_PBF, + BLOCK_PGLUE_B, + BLOCK_PRS, + BLOCK_PXP2, + BLOCK_PXP, + BLOCK_QM, + BLOCK_SRC, + BLOCK_TCM, + BLOCK_TM, + BLOCK_TSDM, + BLOCK_TSEM, + BLOCK_UCM, + BLOCK_UPB, + BLOCK_USDM, + BLOCK_USEM, + BLOCK_XCM, + BLOCK_XPB, + BLOCK_XSDM, + BLOCK_XSEM, + BLOCK_MISC_AEU, + NUM_OF_INIT_BLOCKS +}; + +/* QM queue numbers */ +#define BNX2X_ETH_Q 0 +#define BNX2X_TOE_Q 3 +#define BNX2X_TOE_ACK_Q 6 +#define BNX2X_ISCSI_Q 9 +#define BNX2X_ISCSI_ACK_Q 11 +#define BNX2X_FCOE_Q 10 + +/* Vnics per mode */ +#define BNX2X_PORT2_MODE_NUM_VNICS 4 +#define BNX2X_PORT4_MODE_NUM_VNICS 2 + +/* COS offset for port1 in E3 B0 4port mode */ +#define BNX2X_E3B0_PORT1_COS_OFFSET 3 + +/* QM Register addresses */ +#define BNX2X_Q_VOQ_REG_ADDR(pf_q_num)\ + (QM_REG_QVOQIDX_0 + 4 * (pf_q_num)) +#define BNX2X_VOQ_Q_REG_ADDR(cos, pf_q_num)\ + (QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5))) +#define BNX2X_Q_CMDQ_REG_ADDR(pf_q_num)\ + (QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4)) + +/* extracts the QM queue number for the specified port and vnic */ +#define BNX2X_PF_Q_NUM(q_num, port, vnic)\ + ((((port) << 1) | (vnic)) * 16 + (q_num)) + + +/* Maps the specified queue to the specified COS */ +static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos) +{ + /* find current COS mapping */ + u32 curr_cos = REG_RD(bp, QM_REG_QVOQIDX_0 + q_num * 4); + + /* check if queue->COS mapping has changed */ + if (curr_cos != new_cos) { + u32 num_vnics = BNX2X_PORT2_MODE_NUM_VNICS; + u32 reg_addr, reg_bit_map, vnic; + + /* update parameters for 4port mode */ + if (INIT_MODE_FLAGS(bp) & MODE_PORT4) { + num_vnics = BNX2X_PORT4_MODE_NUM_VNICS; + if (BP_PORT(bp)) { + curr_cos += BNX2X_E3B0_PORT1_COS_OFFSET; + new_cos += BNX2X_E3B0_PORT1_COS_OFFSET; + } + } + + /* change queue mapping for each VNIC */ + for (vnic = 0; vnic < num_vnics; vnic++) { + u32 pf_q_num = + BNX2X_PF_Q_NUM(q_num, BP_PORT(bp), vnic); + u32 q_bit_map = 1 << (pf_q_num & 0x1f); + + /* overwrite queue->VOQ mapping */ + REG_WR(bp, BNX2X_Q_VOQ_REG_ADDR(pf_q_num), new_cos); + + /* clear queue bit from current COS bit map */ + reg_addr = BNX2X_VOQ_Q_REG_ADDR(curr_cos, pf_q_num); + reg_bit_map = REG_RD(bp, reg_addr); + REG_WR(bp, reg_addr, reg_bit_map & (~q_bit_map)); + + /* set queue bit in new COS bit map */ + reg_addr = BNX2X_VOQ_Q_REG_ADDR(new_cos, pf_q_num); + reg_bit_map = REG_RD(bp, reg_addr); + REG_WR(bp, reg_addr, reg_bit_map | q_bit_map); + + /* set/clear queue bit in command-queue bit map + * (E2/E3A0 only, valid COS values are 0/1) + */ + if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) { + reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num); + reg_bit_map = REG_RD(bp, reg_addr); + q_bit_map = 1 << (2 * (pf_q_num & 0xf)); + reg_bit_map = new_cos ? + (reg_bit_map | q_bit_map) : + (reg_bit_map & (~q_bit_map)); + REG_WR(bp, reg_addr, reg_bit_map); + } + } + } +} + +/* Configures the QM according to the specified per-traffic-type COSes */ +static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode, + struct priority_cos *traffic_cos) +{ + bnx2x_map_q_cos(bp, BNX2X_FCOE_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos); + bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos); + bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos); + if (mode != STATIC_COS) { + /* required only in backward compatible COS mode */ + bnx2x_map_q_cos(bp, BNX2X_ETH_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); + bnx2x_map_q_cos(bp, BNX2X_TOE_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); + bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); + } +} + + +/* congestion management port init api description + * the api works as follows: + * the driver should pass the cmng_init_input struct, the port_init function + * will prepare the required internal ram structure which will be passed back + * to the driver (cmng_init) that will write it into the internal ram. + * + * IMPORTANT REMARKS: + * 1. the cmng_init struct does not represent the contiguous internal ram + * structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET + * offset in order to write the port sub struct and the + * PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other + * words - don't use memcpy!). + * 2. although the cmng_init struct is filled for the maximal vnic number + * possible, the driver should only write the valid vnics into the internal + * ram according to the appropriate port mode. + */ +#define BITS_TO_BYTES(x) ((x)/8) + +/* CMNG constants, as derived from system spec calculations */ + +/* default MIN rate in case VNIC min rate is configured to zero- 100Mbps */ +#define DEF_MIN_RATE 100 + +/* resolution of the rate shaping timer - 400 usec */ +#define RS_PERIODIC_TIMEOUT_USEC 400 + +/* number of bytes in single QM arbitration cycle - + * coefficient for calculating the fairness timer + */ +#define QM_ARB_BYTES 160000 + +/* resolution of Min algorithm 1:100 */ +#define MIN_RES 100 + +/* how many bytes above threshold for + * the minimal credit of Min algorithm + */ +#define MIN_ABOVE_THRESH 32768 + +/* Fairness algorithm integration time coefficient - + * for calculating the actual Tfair + */ +#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) + +/* Memory of fairness algorithm - 2 cycles */ +#define FAIR_MEM 2 +#define SAFC_TIMEOUT_USEC 52 + +#define SDM_TICKS 4 + + +static inline void bnx2x_init_max(const struct cmng_init_input *input_data, + u32 r_param, struct cmng_init *ram_data) +{ + u32 vnic; + struct cmng_vnic *vdata = &ram_data->vnic; + struct cmng_struct_per_port *pdata = &ram_data->port; + /* rate shaping per-port variables + * 100 micro seconds in SDM ticks = 25 + * since each tick is 4 microSeconds + */ + + pdata->rs_vars.rs_periodic_timeout = + RS_PERIODIC_TIMEOUT_USEC / SDM_TICKS; + + /* this is the threshold below which no timer arming will occur. + * 1.25 coefficient is for the threshold to be a little bigger + * then the real time to compensate for timer in-accuracy + */ + pdata->rs_vars.rs_threshold = + (5 * RS_PERIODIC_TIMEOUT_USEC * r_param)/4; + + /* rate shaping per-vnic variables */ + for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) { + /* global vnic counter */ + vdata->vnic_max_rate[vnic].vn_counter.rate = + input_data->vnic_max_rate[vnic]; + /* maximal Mbps for this vnic + * the quota in each timer period - number of bytes + * transmitted in this period + */ + vdata->vnic_max_rate[vnic].vn_counter.quota = + RS_PERIODIC_TIMEOUT_USEC * + (u32)vdata->vnic_max_rate[vnic].vn_counter.rate / 8; + } + +} + +static inline void bnx2x_init_min(const struct cmng_init_input *input_data, + u32 r_param, struct cmng_init *ram_data) +{ + u32 vnic, fair_periodic_timeout_usec, vnicWeightSum, tFair; + struct cmng_vnic *vdata = &ram_data->vnic; + struct cmng_struct_per_port *pdata = &ram_data->port; + + /* this is the resolution of the fairness timer */ + fair_periodic_timeout_usec = QM_ARB_BYTES / r_param; + + /* fairness per-port variables + * for 10G it is 1000usec. for 1G it is 10000usec. + */ + tFair = T_FAIR_COEF / input_data->port_rate; + + /* this is the threshold below which we won't arm the timer anymore */ + pdata->fair_vars.fair_threshold = QM_ARB_BYTES; + + /* we multiply by 1e3/8 to get bytes/msec. We don't want the credits + * to pass a credit of the T_FAIR*FAIR_MEM (algorithm resolution) + */ + pdata->fair_vars.upper_bound = r_param * tFair * FAIR_MEM; + + /* since each tick is 4 microSeconds */ + pdata->fair_vars.fairness_timeout = + fair_periodic_timeout_usec / SDM_TICKS; + + /* calculate sum of weights */ + vnicWeightSum = 0; + + for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) + vnicWeightSum += input_data->vnic_min_rate[vnic]; + + /* global vnic counter */ + if (vnicWeightSum > 0) { + /* fairness per-vnic variables */ + for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) { + /* this is the credit for each period of the fairness + * algorithm - number of bytes in T_FAIR (this vnic + * share of the port rate) + */ + vdata->vnic_min_rate[vnic].vn_credit_delta = + (u32)input_data->vnic_min_rate[vnic] * 100 * + (T_FAIR_COEF / (8 * 100 * vnicWeightSum)); + if (vdata->vnic_min_rate[vnic].vn_credit_delta < + pdata->fair_vars.fair_threshold + + MIN_ABOVE_THRESH) { + vdata->vnic_min_rate[vnic].vn_credit_delta = + pdata->fair_vars.fair_threshold + + MIN_ABOVE_THRESH; + } + } + } +} + +static inline void bnx2x_init_fw_wrr(const struct cmng_init_input *input_data, + u32 r_param, struct cmng_init *ram_data) +{ + u32 vnic, cos; + u32 cosWeightSum = 0; + struct cmng_vnic *vdata = &ram_data->vnic; + struct cmng_struct_per_port *pdata = &ram_data->port; + + for (cos = 0; cos < MAX_COS_NUMBER; cos++) + cosWeightSum += input_data->cos_min_rate[cos]; + + if (cosWeightSum > 0) { + + for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) { + /* Since cos and vnic shouldn't work together the rate + * to divide between the coses is the port rate. + */ + u32 *ccd = vdata->vnic_min_rate[vnic].cos_credit_delta; + for (cos = 0; cos < MAX_COS_NUMBER; cos++) { + /* this is the credit for each period of + * the fairness algorithm - number of bytes + * in T_FAIR (this cos share of the vnic rate) + */ + ccd[cos] = + (u32)input_data->cos_min_rate[cos] * 100 * + (T_FAIR_COEF / (8 * 100 * cosWeightSum)); + if (ccd[cos] < pdata->fair_vars.fair_threshold + + MIN_ABOVE_THRESH) { + ccd[cos] = + pdata->fair_vars.fair_threshold + + MIN_ABOVE_THRESH; + } + } + } + } +} + +static inline void bnx2x_init_safc(const struct cmng_init_input *input_data, + struct cmng_init *ram_data) +{ + /* in microSeconds */ + ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC; +} + +/* Congestion management port init */ +static inline void bnx2x_init_cmng(const struct cmng_init_input *input_data, + struct cmng_init *ram_data) +{ + u32 r_param; + memset(ram_data, 0, sizeof(struct cmng_init)); + + ram_data->port.flags = input_data->flags; + + /* number of bytes transmitted in a rate of 10Gbps + * in one usec = 1.25KB. + */ + r_param = BITS_TO_BYTES(input_data->port_rate); + bnx2x_init_max(input_data, r_param, ram_data); + bnx2x_init_min(input_data, r_param, ram_data); + bnx2x_init_fw_wrr(input_data, r_param, ram_data); + bnx2x_init_safc(input_data, ram_data); +} + + + +/* Returns the index of start or end of a specific block stage in ops array */ +#define BLOCK_OPS_IDX(block, stage, end) \ + (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) + + +#define INITOP_SET 0 /* set the HW directly */ +#define INITOP_CLEAR 1 /* clear the HW directly */ +#define INITOP_INIT 2 /* set the init-value array */ + +/**************************************************************************** +* ILT management +****************************************************************************/ +struct ilt_line { + dma_addr_t page_mapping; + void *page; + u32 size; +}; + +struct ilt_client_info { + u32 page_size; + u16 start; + u16 end; + u16 client_num; + u16 flags; +#define ILT_CLIENT_SKIP_INIT 0x1 +#define ILT_CLIENT_SKIP_MEM 0x2 +}; + +struct bnx2x_ilt { + u32 start_line; + struct ilt_line *lines; + struct ilt_client_info clients[4]; +#define ILT_CLIENT_CDU 0 +#define ILT_CLIENT_QM 1 +#define ILT_CLIENT_SRC 2 +#define ILT_CLIENT_TM 3 +}; + +/**************************************************************************** +* SRC configuration +****************************************************************************/ +struct src_ent { + u8 opaque[56]; + u64 next; +}; + +/**************************************************************************** +* Parity configuration +****************************************************************************/ +#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2, m3) \ +{ \ + block##_REG_##block##_PRTY_MASK, \ + block##_REG_##block##_PRTY_STS_CLR, \ + en_mask, {m1, m1h, m2, m3}, #block \ +} + +#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2, m3) \ +{ \ + block##_REG_##block##_PRTY_MASK_0, \ + block##_REG_##block##_PRTY_STS_CLR_0, \ + en_mask, {m1, m1h, m2, m3}, #block"_0" \ +} + +#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2, m3) \ +{ \ + block##_REG_##block##_PRTY_MASK_1, \ + block##_REG_##block##_PRTY_STS_CLR_1, \ + en_mask, {m1, m1h, m2, m3}, #block"_1" \ +} + +static const struct { + u32 mask_addr; + u32 sts_clr_addr; + u32 en_mask; /* Mask to enable parity attentions */ + struct { + u32 e1; /* 57710 */ + u32 e1h; /* 57711 */ + u32 e2; /* 57712 */ + u32 e3; /* 578xx */ + } reg_mask; /* Register mask (all valid bits) */ + char name[8]; /* Block's longest name is 7 characters long + * (name + suffix) + */ +} bnx2x_blocks_parity_data[] = { + /* bit 19 masked */ + /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */ + /* bit 5,18,20-31 */ + /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */ + /* bit 5 */ + /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */ + /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */ + /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */ + + /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't + * want to handle "system kill" flow at the moment. + */ + BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff, + 0x7ffffff), + BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(PXP2, 0x1ffffff, 0x7f, 0x7f, 0x7ff, 0x1ffffff), + BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0, 0), + BLOCK_PRTY_INFO(NIG, 0xffffffff, 0x3fffffff, 0xffffffff, 0, 0), + BLOCK_PRTY_INFO_0(NIG, 0xffffffff, 0, 0, 0xffffffff, 0xffffffff), + BLOCK_PRTY_INFO_1(NIG, 0xffff, 0, 0, 0xff, 0xffff), + BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1, 0x1), + BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff, 0xfff), + BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0, 0x1f, 0x1f), + BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0, 0x3, 0x3), + BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3, 0x3), + {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, + GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf, + {0xf, 0xf, 0xf, 0xf}, "UPB"}, + {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, + GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0, + {0xf, 0xf, 0xf, 0xf}, "XPB"}, + BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7, 0x7), + BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f, 0x1f), + BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf, 0x3f), + BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1, 0x1), + BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf, 0xf), + BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf, 0xf), + BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff, 0xff), + BLOCK_PRTY_INFO(PBF, 0, 0, 0x3ffff, 0xfffff, 0xfffffff), + BLOCK_PRTY_INFO(TM, 0, 0, 0x7f, 0x7f, 0x7f), + BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(TCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff), + BLOCK_PRTY_INFO(CCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff), + BLOCK_PRTY_INFO(UCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff), + BLOCK_PRTY_INFO(XCM, 0, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff), + BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f, 0x3f), + BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f, 0x1f), + BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f, 0x1f), + BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f, 0x3f), +}; + + +/* [28] MCP Latched rom_parity + * [29] MCP Latched ump_rx_parity + * [30] MCP Latched ump_tx_parity + * [31] MCP Latched scpad_parity + */ +#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS \ + (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY) + +#define MISC_AEU_ENABLE_MCP_PRTY_BITS \ + (MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) + +/* Below registers control the MCP parity attention output. When + * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are + * enabled, when cleared - disabled. + */ +static const struct { + u32 addr; + u32 bits; +} mcp_attn_ctl_regs[] = { + { MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, + MISC_AEU_ENABLE_MCP_PRTY_BITS }, + { MISC_REG_AEU_ENABLE4_NIG_0, + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, + { MISC_REG_AEU_ENABLE4_PXP_0, + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, + { MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, + MISC_AEU_ENABLE_MCP_PRTY_BITS }, + { MISC_REG_AEU_ENABLE4_NIG_1, + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, + { MISC_REG_AEU_ENABLE4_PXP_1, + MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS } +}; + +static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable) +{ + int i; + u32 reg_val; + + for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) { + reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr); + + if (enable) + reg_val |= mcp_attn_ctl_regs[i].bits; + else + reg_val &= ~mcp_attn_ctl_regs[i].bits; + + REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val); + } +} + +static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx) +{ + if (CHIP_IS_E1(bp)) + return bnx2x_blocks_parity_data[idx].reg_mask.e1; + else if (CHIP_IS_E1H(bp)) + return bnx2x_blocks_parity_data[idx].reg_mask.e1h; + else if (CHIP_IS_E2(bp)) + return bnx2x_blocks_parity_data[idx].reg_mask.e2; + else /* CHIP_IS_E3 */ + return bnx2x_blocks_parity_data[idx].reg_mask.e3; +} + +static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) { + u32 dis_mask = bnx2x_parity_reg_mask(bp, i); + + if (dis_mask) { + REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr, + dis_mask); + DP(NETIF_MSG_HW, "Setting parity mask " + "for %s to\t\t0x%x\n", + bnx2x_blocks_parity_data[i].name, dis_mask); + } + } + + /* Disable MCP parity attentions */ + bnx2x_set_mcp_parity(bp, false); +} + +/* Clear the parity error status registers. */ +static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp) +{ + int i; + u32 reg_val, mcp_aeu_bits = + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY | + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY; + + /* Clear SEM_FAST parities */ + REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); + REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); + REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); + REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); + + for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) { + u32 reg_mask = bnx2x_parity_reg_mask(bp, i); + + if (reg_mask) { + reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i]. + sts_clr_addr); + if (reg_val & reg_mask) + DP(NETIF_MSG_HW, + "Parity errors in %s: 0x%x\n", + bnx2x_blocks_parity_data[i].name, + reg_val & reg_mask); + } + } + + /* Check if there were parity attentions in MCP */ + reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP); + if (reg_val & mcp_aeu_bits) + DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n", + reg_val & mcp_aeu_bits); + + /* Clear parity attentions in MCP: + * [7] clears Latched rom_parity + * [8] clears Latched ump_rx_parity + * [9] clears Latched ump_tx_parity + * [10] clears Latched scpad_parity (both ports) + */ + REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780); +} + +static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) { + u32 reg_mask = bnx2x_parity_reg_mask(bp, i); + + if (reg_mask) + REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr, + bnx2x_blocks_parity_data[i].en_mask & reg_mask); + } + + /* Enable MCP parity attentions */ + bnx2x_set_mcp_parity(bp, true); +} + + +#endif /* BNX2X_INIT_H */ + diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h new file mode 100644 index 000000000..7919cc644 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h @@ -0,0 +1,842 @@ +/* bnx2x_init_ops.h: Broadcom Everest network driver. + * Static functions needed during the initialization. + * This file is "included" in bnx2x_main.c. + * + * Copyright (c) 2007-2013 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Ariel Elior + * Written by: Vladislav Zolotarov + */ + +#ifndef BNX2X_INIT_OPS_H +#define BNX2X_INIT_OPS_H + + +#ifndef BP_ILT +#define BP_ILT(bp) NULL +#endif + +#ifndef BP_FUNC +#define BP_FUNC(bp) 0 +#endif + +#ifndef BP_PORT +#define BP_PORT(bp) 0 +#endif + +#ifndef BNX2X_ILT_FREE +#define BNX2X_ILT_FREE(x, y, sz) +#endif + +#ifndef BNX2X_ILT_ZALLOC +#define BNX2X_ILT_ZALLOC(x, y, sz) +#endif + +#ifndef ILOG2 +#define ILOG2(x) x +#endif + +static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len); +static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val); +static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, + dma_addr_t phys_addr, u32 addr, + u32 len); + +static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, + const u32 *data, u32 len) +{ + u32 i; + + for (i = 0; i < len; i++) + REG_WR(bp, addr + i*4, data[i]); +} + +static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, + const u32 *data, u32 len) +{ + u32 i; + + for (i = 0; i < len; i++) + bnx2x_reg_wr_ind(bp, addr + i*4, data[i]); +} + +static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len, + u8 wb) +{ + if (bp->dmae_ready) + bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); + + /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */ + else if (wb && CHIP_IS_E1(bp)) + bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); + + /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */ + else + bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len); +} + +static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, + u32 len, u8 wb) +{ + u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4)); + u32 buf_len32 = buf_len/4; + u32 i; + + memset(GUNZIP_BUF(bp), (u8)fill, buf_len); + + for (i = 0; i < len; i += buf_len32) { + u32 cur_len = min(buf_len32, len - i); + + bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb); + } +} + +static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len) +{ + if (bp->dmae_ready) + bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); + + /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */ + else if (CHIP_IS_E1(bp)) + bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); + + /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */ + else + bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len); +} + +static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, + const u32 *data, u32 len64) +{ + u32 buf_len32 = FW_BUF_SIZE/4; + u32 len = len64*2; + u64 data64 = 0; + u32 i; + + /* 64 bit value is in a blob: first low DWORD, then high DWORD */ + data64 = HILO_U64((*(data + 1)), (*data)); + + len64 = min((u32)(FW_BUF_SIZE/8), len64); + for (i = 0; i < len64; i++) { + u64 *pdata = ((u64 *)(GUNZIP_BUF(bp))) + i; + + *pdata = data64; + } + + for (i = 0; i < len; i += buf_len32) { + u32 cur_len = min(buf_len32, len - i); + + bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len); + } +} + +/********************************************************* + There are different blobs for each PRAM section. + In addition, each blob write operation is divided into a few operations + in order to decrease the amount of phys. contiguous buffer needed. + Thus, when we select a blob the address may be with some offset + from the beginning of PRAM section. + The same holds for the INT_TABLE sections. +**********************************************************/ +#define IF_IS_INT_TABLE_ADDR(base, addr) \ + if (((base) <= (addr)) && ((base) + 0x400 >= (addr))) + +#define IF_IS_PRAM_ADDR(base, addr) \ + if (((base) <= (addr)) && ((base) + 0x40000 >= (addr))) + +static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, + const u8 *data) +{ + IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr) + data = INIT_TSEM_INT_TABLE_DATA(bp); + else + IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr) + data = INIT_CSEM_INT_TABLE_DATA(bp); + else + IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr) + data = INIT_USEM_INT_TABLE_DATA(bp); + else + IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr) + data = INIT_XSEM_INT_TABLE_DATA(bp); + else + IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr) + data = INIT_TSEM_PRAM_DATA(bp); + else + IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr) + data = INIT_CSEM_PRAM_DATA(bp); + else + IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr) + data = INIT_USEM_PRAM_DATA(bp); + else + IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr) + data = INIT_XSEM_PRAM_DATA(bp); + + return data; +} + +extern void bnx2x_init_wr_wb(struct bnx2x *, u32, const u32 *, u32); +/*(DEBLOBBED)*/ + +static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, + u32 val_hi) +{ + u32 wb_write[2]; + + wb_write[0] = val_lo; + wb_write[1] = val_hi; + REG_WR_DMAE_LEN(bp, reg, wb_write, 2); +} +static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, + u32 blob_off) +{ + const u8 *data = NULL; + int rc; + u32 i; + + data = bnx2x_sel_blob(bp, addr, data) + blob_off*4; + + rc = bnx2x_gunzip(bp, data, len); + if (rc) + return; + + /* gunzip_outlen is in dwords */ + len = GUNZIP_OUTLEN(bp); + for (i = 0; i < len; i++) + ((u32 *)GUNZIP_BUF(bp))[i] = (__force u32) + cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]); + + bnx2x_write_big_buf_wb(bp, addr, len); +} + +/*(DEBLOBBED)*/ + + +/**************************************************************************** +* PXP Arbiter +****************************************************************************/ +/* + * This code configures the PCI read/write arbiter + * which implements a weighted round robin + * between the virtual queues in the chip. + * + * The values were derived for each PCI max payload and max request size. + * since max payload and max request size are only known at run time, + * this is done as a separate init stage. + */ + +#define NUM_WR_Q 13 +#define NUM_RD_Q 29 +#define MAX_RD_ORD 3 +#define MAX_WR_ORD 2 + +/* configuration for one arbiter queue */ +struct arb_line { + int l; + int add; + int ubound; +}; + +/* derived configuration for each read queue for each max request size */ +static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = { +/* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} }, + { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} }, + { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} }, + { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} }, + { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} }, +/* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, +/* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} }, + { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} } +}; + +/* derived configuration for each write queue for each max request size */ +static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = { +/* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} }, + { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} }, + { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, + { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, + { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, + { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} }, + { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} }, + { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, + { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} }, +/* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} }, + { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} }, + { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} }, + { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} } +}; + +/* register addresses for read queues */ +static const struct arb_line read_arb_addr[NUM_RD_Q-1] = { +/* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0, + PXP2_REG_RQ_BW_RD_UBOUND0}, + {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1, + PXP2_REG_PSWRQ_BW_UB1}, + {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2, + PXP2_REG_PSWRQ_BW_UB2}, + {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3, + PXP2_REG_PSWRQ_BW_UB3}, + {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4, + PXP2_REG_RQ_BW_RD_UBOUND4}, + {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5, + PXP2_REG_RQ_BW_RD_UBOUND5}, + {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6, + PXP2_REG_PSWRQ_BW_UB6}, + {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7, + PXP2_REG_PSWRQ_BW_UB7}, + {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8, + PXP2_REG_PSWRQ_BW_UB8}, +/* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9, + PXP2_REG_PSWRQ_BW_UB9}, + {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10, + PXP2_REG_PSWRQ_BW_UB10}, + {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11, + PXP2_REG_PSWRQ_BW_UB11}, + {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12, + PXP2_REG_RQ_BW_RD_UBOUND12}, + {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13, + PXP2_REG_RQ_BW_RD_UBOUND13}, + {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14, + PXP2_REG_RQ_BW_RD_UBOUND14}, + {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15, + PXP2_REG_RQ_BW_RD_UBOUND15}, + {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16, + PXP2_REG_RQ_BW_RD_UBOUND16}, + {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17, + PXP2_REG_RQ_BW_RD_UBOUND17}, + {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18, + PXP2_REG_RQ_BW_RD_UBOUND18}, +/* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19, + PXP2_REG_RQ_BW_RD_UBOUND19}, + {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20, + PXP2_REG_RQ_BW_RD_UBOUND20}, + {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22, + PXP2_REG_RQ_BW_RD_UBOUND22}, + {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23, + PXP2_REG_RQ_BW_RD_UBOUND23}, + {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24, + PXP2_REG_RQ_BW_RD_UBOUND24}, + {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25, + PXP2_REG_RQ_BW_RD_UBOUND25}, + {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26, + PXP2_REG_RQ_BW_RD_UBOUND26}, + {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27, + PXP2_REG_RQ_BW_RD_UBOUND27}, + {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28, + PXP2_REG_PSWRQ_BW_UB28} +}; + +/* register addresses for write queues */ +static const struct arb_line write_arb_addr[NUM_WR_Q-1] = { +/* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1, + PXP2_REG_PSWRQ_BW_UB1}, + {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2, + PXP2_REG_PSWRQ_BW_UB2}, + {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3, + PXP2_REG_PSWRQ_BW_UB3}, + {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6, + PXP2_REG_PSWRQ_BW_UB6}, + {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7, + PXP2_REG_PSWRQ_BW_UB7}, + {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8, + PXP2_REG_PSWRQ_BW_UB8}, + {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9, + PXP2_REG_PSWRQ_BW_UB9}, + {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10, + PXP2_REG_PSWRQ_BW_UB10}, + {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11, + PXP2_REG_PSWRQ_BW_UB11}, +/* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28, + PXP2_REG_PSWRQ_BW_UB28}, + {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29, + PXP2_REG_RQ_BW_WR_UBOUND29}, + {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30, + PXP2_REG_RQ_BW_WR_UBOUND30} +}; + +static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, + int w_order) +{ + u32 val, i; + + if (r_order > MAX_RD_ORD) { + DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n", + r_order, MAX_RD_ORD); + r_order = MAX_RD_ORD; + } + if (w_order > MAX_WR_ORD) { + DP(NETIF_MSG_HW, "write order of %d order adjusted to %d\n", + w_order, MAX_WR_ORD); + w_order = MAX_WR_ORD; + } + if (CHIP_REV_IS_FPGA(bp)) { + DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n"); + w_order = 0; + } + DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order); + + for (i = 0; i < NUM_RD_Q-1; i++) { + REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l); + REG_WR(bp, read_arb_addr[i].add, + read_arb_data[i][r_order].add); + REG_WR(bp, read_arb_addr[i].ubound, + read_arb_data[i][r_order].ubound); + } + + for (i = 0; i < NUM_WR_Q-1; i++) { + if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) || + (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) { + + REG_WR(bp, write_arb_addr[i].l, + write_arb_data[i][w_order].l); + + REG_WR(bp, write_arb_addr[i].add, + write_arb_data[i][w_order].add); + + REG_WR(bp, write_arb_addr[i].ubound, + write_arb_data[i][w_order].ubound); + } else { + + val = REG_RD(bp, write_arb_addr[i].l); + REG_WR(bp, write_arb_addr[i].l, + val | (write_arb_data[i][w_order].l << 10)); + + val = REG_RD(bp, write_arb_addr[i].add); + REG_WR(bp, write_arb_addr[i].add, + val | (write_arb_data[i][w_order].add << 10)); + + val = REG_RD(bp, write_arb_addr[i].ubound); + REG_WR(bp, write_arb_addr[i].ubound, + val | (write_arb_data[i][w_order].ubound << 7)); + } + } + + val = write_arb_data[NUM_WR_Q-1][w_order].add; + val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10; + val += write_arb_data[NUM_WR_Q-1][w_order].l << 17; + REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val); + + val = read_arb_data[NUM_RD_Q-1][r_order].add; + val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10; + val += read_arb_data[NUM_RD_Q-1][r_order].l << 17; + REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val); + + REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order); + REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order); + REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order); + REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order); + + if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD)) + REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00); + + if (CHIP_IS_E3(bp)) + REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order)); + else if (CHIP_IS_E2(bp)) + REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order)); + else + REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order)); + + if (!CHIP_IS_E1(bp)) { + /* MPS w_order optimal TH presently TH + * 128 0 0 2 + * 256 1 1 3 + * >=512 2 2 3 + */ + /* DMAE is special */ + if (!CHIP_IS_E1H(bp)) { + /* E2 can use optimal TH */ + val = w_order; + REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val); + } else { + val = ((w_order == 0) ? 2 : 3); + REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2); + } + + REG_WR(bp, PXP2_REG_WR_HC_MPS, val); + REG_WR(bp, PXP2_REG_WR_USDM_MPS, val); + REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val); + REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val); + REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val); + REG_WR(bp, PXP2_REG_WR_QM_MPS, val); + REG_WR(bp, PXP2_REG_WR_TM_MPS, val); + REG_WR(bp, PXP2_REG_WR_SRC_MPS, val); + REG_WR(bp, PXP2_REG_WR_DBG_MPS, val); + REG_WR(bp, PXP2_REG_WR_CDU_MPS, val); + } + + /* Validate number of tags suppoted by device */ +#define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980 + val = REG_RD(bp, PCIE_REG_PCIER_TL_HDR_FC_ST); + val &= 0xFF; + if (val <= 0x20) + REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x20); +} + +/**************************************************************************** +* ILT management +****************************************************************************/ +/* + * This codes hides the low level HW interaction for ILT management and + * configuration. The API consists of a shadow ILT table which is set by the + * driver and a set of routines to use it to configure the HW. + * + */ + +/* ILT HW init operations */ + +/* ILT memory management operations */ +#define ILT_MEMOP_ALLOC 0 +#define ILT_MEMOP_FREE 1 + +/* the phys address is shifted right 12 bits and has an added + * 1=valid bit added to the 53rd bit + * then since this is a wide register(TM) + * we split it into two 32 bit writes + */ +#define ILT_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF)) +#define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) +#define ILT_RANGE(f, l) (((l) << 10) | f) + +static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, + struct ilt_line *line, u32 size, u8 memop) +{ + if (memop == ILT_MEMOP_FREE) { + BNX2X_ILT_FREE(line->page, line->page_mapping, line->size); + return 0; + } + BNX2X_ILT_ZALLOC(line->page, &line->page_mapping, size); + if (!line->page) + return -1; + line->size = size; + return 0; +} + + +static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, + u8 memop) +{ + int i, rc; + struct bnx2x_ilt *ilt = BP_ILT(bp); + struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; + + if (!ilt || !ilt->lines) + return -1; + + if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM)) + return 0; + + for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) { + rc = bnx2x_ilt_line_mem_op(bp, &ilt->lines[i], + ilt_cli->page_size, memop); + } + return rc; +} + +static int bnx2x_ilt_mem_op_cnic(struct bnx2x *bp, u8 memop) +{ + int rc = 0; + + if (CONFIGURE_NIC_MODE(bp)) + rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop); + if (!rc) + rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop); + + return rc; +} + +static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop) +{ + int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop); + if (!rc) + rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop); + if (!rc && CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp)) + rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop); + + return rc; +} + +static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx, + dma_addr_t page_mapping) +{ + u32 reg; + + if (CHIP_IS_E1(bp)) + reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx*8; + else + reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8; + + bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping)); +} + +static void bnx2x_ilt_line_init_op(struct bnx2x *bp, + struct bnx2x_ilt *ilt, int idx, u8 initop) +{ + dma_addr_t null_mapping; + int abs_idx = ilt->start_line + idx; + + + switch (initop) { + case INITOP_INIT: + /* set in the init-value array */ + case INITOP_SET: + bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping); + break; + case INITOP_CLEAR: + null_mapping = 0; + bnx2x_ilt_line_wr(bp, abs_idx, null_mapping); + break; + } +} + +static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp, + struct ilt_client_info *ilt_cli, + u32 ilt_start, u8 initop) +{ + u32 start_reg = 0; + u32 end_reg = 0; + + /* The boundary is either SET or INIT, + CLEAR => SET and for now SET ~~ INIT */ + + /* find the appropriate regs */ + if (CHIP_IS_E1(bp)) { + switch (ilt_cli->client_num) { + case ILT_CLIENT_CDU: + start_reg = PXP2_REG_PSWRQ_CDU0_L2P; + break; + case ILT_CLIENT_QM: + start_reg = PXP2_REG_PSWRQ_QM0_L2P; + break; + case ILT_CLIENT_SRC: + start_reg = PXP2_REG_PSWRQ_SRC0_L2P; + break; + case ILT_CLIENT_TM: + start_reg = PXP2_REG_PSWRQ_TM0_L2P; + break; + } + REG_WR(bp, start_reg + BP_FUNC(bp)*4, + ILT_RANGE((ilt_start + ilt_cli->start), + (ilt_start + ilt_cli->end))); + } else { + switch (ilt_cli->client_num) { + case ILT_CLIENT_CDU: + start_reg = PXP2_REG_RQ_CDU_FIRST_ILT; + end_reg = PXP2_REG_RQ_CDU_LAST_ILT; + break; + case ILT_CLIENT_QM: + start_reg = PXP2_REG_RQ_QM_FIRST_ILT; + end_reg = PXP2_REG_RQ_QM_LAST_ILT; + break; + case ILT_CLIENT_SRC: + start_reg = PXP2_REG_RQ_SRC_FIRST_ILT; + end_reg = PXP2_REG_RQ_SRC_LAST_ILT; + break; + case ILT_CLIENT_TM: + start_reg = PXP2_REG_RQ_TM_FIRST_ILT; + end_reg = PXP2_REG_RQ_TM_LAST_ILT; + break; + } + REG_WR(bp, start_reg, (ilt_start + ilt_cli->start)); + REG_WR(bp, end_reg, (ilt_start + ilt_cli->end)); + } +} + +static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, + struct bnx2x_ilt *ilt, + struct ilt_client_info *ilt_cli, + u8 initop) +{ + int i; + + if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT) + return; + + for (i = ilt_cli->start; i <= ilt_cli->end; i++) + bnx2x_ilt_line_init_op(bp, ilt, i, initop); + + /* init/clear the ILT boundries */ + bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop); +} + +static void bnx2x_ilt_client_init_op(struct bnx2x *bp, + struct ilt_client_info *ilt_cli, u8 initop) +{ + struct bnx2x_ilt *ilt = BP_ILT(bp); + + bnx2x_ilt_client_init_op_ilt(bp, ilt, ilt_cli, initop); +} + +static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp, + int cli_num, u8 initop) +{ + struct bnx2x_ilt *ilt = BP_ILT(bp); + struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; + + bnx2x_ilt_client_init_op(bp, ilt_cli, initop); +} + +static void bnx2x_ilt_init_op_cnic(struct bnx2x *bp, u8 initop) +{ + if (CONFIGURE_NIC_MODE(bp)) + bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop); + bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop); +} + +static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop) +{ + bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop); + bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop); + if (CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp)) + bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop); +} + +static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num, + u32 psz_reg, u8 initop) +{ + struct bnx2x_ilt *ilt = BP_ILT(bp); + struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; + + if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT) + return; + + switch (initop) { + case INITOP_INIT: + /* set in the init-value array */ + case INITOP_SET: + REG_WR(bp, psz_reg, ILOG2(ilt_cli->page_size >> 12)); + break; + case INITOP_CLEAR: + break; + } +} + +/* + * called during init common stage, ilt clients should be initialized + * prioir to calling this function + */ +static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop) +{ + bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU, + PXP2_REG_RQ_CDU_P_SIZE, initop); + bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_QM, + PXP2_REG_RQ_QM_P_SIZE, initop); + bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_SRC, + PXP2_REG_RQ_SRC_P_SIZE, initop); + bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_TM, + PXP2_REG_RQ_TM_P_SIZE, initop); +} + +/**************************************************************************** +* QM initializations +****************************************************************************/ +#define QM_QUEUES_PER_FUNC 16 /* E1 has 32, but only 16 are used */ +#define QM_INIT_MIN_CID_COUNT 31 +#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT) + +/* called during init port stage */ +static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count, + u8 initop) +{ + int port = BP_PORT(bp); + + if (QM_INIT(qm_cid_count)) { + switch (initop) { + case INITOP_INIT: + /* set in the init-value array */ + case INITOP_SET: + REG_WR(bp, QM_REG_CONNNUM_0 + port*4, + qm_cid_count/16 - 1); + break; + case INITOP_CLEAR: + break; + } + } +} + +static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count, + u32 base_reg, u32 reg) +{ + int i; + u32 wb_data[2] = {0, 0}; + for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) { + REG_WR(bp, base_reg + i*4, + qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC)); + bnx2x_init_wr_wb(bp, reg + i*8, wb_data, 2); + } +} + +/* called during init common stage */ +static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count, + u8 initop) +{ + if (!QM_INIT(qm_cid_count)) + return; + + switch (initop) { + case INITOP_INIT: + /* set in the init-value array */ + case INITOP_SET: + bnx2x_qm_set_ptr_table(bp, qm_cid_count, + QM_REG_BASEADDR, QM_REG_PTRTBL); + if (CHIP_IS_E1H(bp)) + bnx2x_qm_set_ptr_table(bp, qm_cid_count, + QM_REG_BASEADDR_EXT_A, + QM_REG_PTRTBL_EXT_A); + break; + case INITOP_CLEAR: + break; + } +} + +/**************************************************************************** +* SRC initializations +****************************************************************************/ +/* called during init func stage */ +static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2, + dma_addr_t t2_mapping, int src_cid_count) +{ + int i; + int port = BP_PORT(bp); + + /* Initialize T2 */ + for (i = 0; i < src_cid_count-1; i++) + t2[i].next = (u64)(t2_mapping + + (i+1)*sizeof(struct src_ent)); + + /* tell the searcher where the T2 table is */ + REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count); + + bnx2x_wr_64(bp, SRC_REG_FIRSTFREE0 + port*16, + U64_LO(t2_mapping), U64_HI(t2_mapping)); + + bnx2x_wr_64(bp, SRC_REG_LASTFREE0 + port*16, + U64_LO((u64)t2_mapping + + (src_cid_count-1) * sizeof(struct src_ent)), + U64_HI((u64)t2_mapping + + (src_cid_count-1) * sizeof(struct src_ent))); +} +#endif /* BNX2X_INIT_OPS_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c new file mode 100644 index 000000000..21a0d6afc --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -0,0 +1,13777 @@ +/* Copyright 2008-2013 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Written by Yaniv Rosner + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "bnx2x.h" +#include "bnx2x_cmn.h" + +typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy, + struct link_params *params, + u8 dev_addr, u16 addr, u8 byte_cnt, + u8 *o_buf, u8); +/********************************************************/ +#define ETH_HLEN 14 +/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ +#define ETH_OVREHEAD (ETH_HLEN + 8 + 8) +#define ETH_MIN_PACKET_SIZE 60 +#define ETH_MAX_PACKET_SIZE 1500 +#define ETH_MAX_JUMBO_PACKET_SIZE 9600 +#define MDIO_ACCESS_TIMEOUT 1000 +#define WC_LANE_MAX 4 +#define I2C_SWITCH_WIDTH 2 +#define I2C_BSC0 0 +#define I2C_BSC1 1 +#define I2C_WA_RETRY_CNT 3 +#define I2C_WA_PWR_ITER (I2C_WA_RETRY_CNT - 1) +#define MCPR_IMC_COMMAND_READ_OP 1 +#define MCPR_IMC_COMMAND_WRITE_OP 2 + +/* LED Blink rate that will achieve ~15.9Hz */ +#define LED_BLINK_RATE_VAL_E3 354 +#define LED_BLINK_RATE_VAL_E1X_E2 480 +/***********************************************************/ +/* Shortcut definitions */ +/***********************************************************/ + +#define NIG_LATCH_BC_ENABLE_MI_INT 0 + +#define NIG_STATUS_EMAC0_MI_INT \ + NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT +#define NIG_STATUS_XGXS0_LINK10G \ + NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G +#define NIG_STATUS_XGXS0_LINK_STATUS \ + NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS +#define NIG_STATUS_XGXS0_LINK_STATUS_SIZE \ + NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE +#define NIG_STATUS_SERDES0_LINK_STATUS \ + NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS +#define NIG_MASK_MI_INT \ + NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT +#define NIG_MASK_XGXS0_LINK10G \ + NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G +#define NIG_MASK_XGXS0_LINK_STATUS \ + NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS +#define NIG_MASK_SERDES0_LINK_STATUS \ + NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS + +#define MDIO_AN_CL73_OR_37_COMPLETE \ + (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \ + MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE) + +#define XGXS_RESET_BITS \ + (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB) + +#define SERDES_RESET_BITS \ + (MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN | \ + MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD) + +#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37 +#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73 +#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM +#define AUTONEG_PARALLEL \ + SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION +#define AUTONEG_SGMII_FIBER_AUTODET \ + SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT +#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY + +#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \ + MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE +#define GP_STATUS_PAUSE_RSOLUTION_RXSIDE \ + MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE +#define GP_STATUS_SPEED_MASK \ + MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK +#define GP_STATUS_10M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M +#define GP_STATUS_100M MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M +#define GP_STATUS_1G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G +#define GP_STATUS_2_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G +#define GP_STATUS_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G +#define GP_STATUS_6G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G +#define GP_STATUS_10G_HIG \ + MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG +#define GP_STATUS_10G_CX4 \ + MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 +#define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX +#define GP_STATUS_10G_KX4 \ + MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 +#define GP_STATUS_10G_KR MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR +#define GP_STATUS_10G_XFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI +#define GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS +#define GP_STATUS_10G_SFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI +#define GP_STATUS_20G_KR2 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2 +#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD +#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD +#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD +#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4 +#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD +#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD +#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD +#define LINK_1000XFD LINK_STATUS_SPEED_AND_DUPLEX_1000XFD +#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD +#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD +#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD +#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD +#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD +#define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD +#define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD + +#define LINK_UPDATE_MASK \ + (LINK_STATUS_SPEED_AND_DUPLEX_MASK | \ + LINK_STATUS_LINK_UP | \ + LINK_STATUS_PHYSICAL_LINK_FLAG | \ + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | \ + LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | \ + LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | \ + LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | \ + LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | \ + LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) + +#define SFP_EEPROM_CON_TYPE_ADDR 0x2 + #define SFP_EEPROM_CON_TYPE_VAL_UNKNOWN 0x0 + #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 + #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 + #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22 + + +#define SFP_EEPROM_10G_COMP_CODE_ADDR 0x3 + #define SFP_EEPROM_10G_COMP_CODE_SR_MASK (1<<4) + #define SFP_EEPROM_10G_COMP_CODE_LR_MASK (1<<5) + #define SFP_EEPROM_10G_COMP_CODE_LRM_MASK (1<<6) + +#define SFP_EEPROM_1G_COMP_CODE_ADDR 0x6 + #define SFP_EEPROM_1G_COMP_CODE_SX (1<<0) + #define SFP_EEPROM_1G_COMP_CODE_LX (1<<1) + #define SFP_EEPROM_1G_COMP_CODE_CX (1<<2) + #define SFP_EEPROM_1G_COMP_CODE_BASE_T (1<<3) + +#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8 + #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 + #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8 + +#define SFP_EEPROM_OPTIONS_ADDR 0x40 + #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1 +#define SFP_EEPROM_OPTIONS_SIZE 2 + +#define EDC_MODE_LINEAR 0x0022 +#define EDC_MODE_LIMITING 0x0044 +#define EDC_MODE_PASSIVE_DAC 0x0055 +#define EDC_MODE_ACTIVE_DAC 0x0066 + +/* ETS defines*/ +#define DCBX_INVALID_COS (0xFF) + +#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000) +#define ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000) +#define ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS (1360) +#define ETS_E3B0_NIG_MIN_W_VAL_20GBPS (2720) +#define ETS_E3B0_PBF_MIN_W_VAL (10000) + +#define MAX_PACKET_SIZE (9700) +#define MAX_KR_LINK_RETRY 4 +#define DEFAULT_TX_DRV_BRDCT 2 +#define DEFAULT_TX_DRV_IFIR 0 +#define DEFAULT_TX_DRV_POST2 3 +#define DEFAULT_TX_DRV_IPRE_DRIVER 6 + +/**********************************************************/ +/* INTERFACE */ +/**********************************************************/ + +#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \ + bnx2x_cl45_write(_bp, _phy, \ + (_phy)->def_md_devad, \ + (_bank + (_addr & 0xf)), \ + _val) + +#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \ + bnx2x_cl45_read(_bp, _phy, \ + (_phy)->def_md_devad, \ + (_bank + (_addr & 0xf)), \ + _val) + +static int bnx2x_check_half_open_conn(struct link_params *params, + struct link_vars *vars, u8 notify); +static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, + struct link_params *params); + +static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits) +{ + u32 val = REG_RD(bp, reg); + + val |= bits; + REG_WR(bp, reg, val); + return val; +} + +static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits) +{ + u32 val = REG_RD(bp, reg); + + val &= ~bits; + REG_WR(bp, reg, val); + return val; +} + +/* + * bnx2x_check_lfa - This function checks if link reinitialization is required, + * or link flap can be avoided. + * + * @params: link parameters + * Returns 0 if Link Flap Avoidance conditions are met otherwise, the failed + * condition code. + */ +static int bnx2x_check_lfa(struct link_params *params) +{ + u32 link_status, cfg_idx, lfa_mask, cfg_size; + u32 cur_speed_cap_mask, cur_req_fc_auto_adv, additional_config; + u32 saved_val, req_val, eee_status; + struct bnx2x *bp = params->bp; + + additional_config = + REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, additional_config)); + + /* NOTE: must be first condition checked - + * to verify DCC bit is cleared in any case! + */ + if (additional_config & NO_LFA_DUE_TO_DCC_MASK) { + DP(NETIF_MSG_LINK, "No LFA due to DCC flap after clp exit\n"); + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, additional_config), + additional_config & ~NO_LFA_DUE_TO_DCC_MASK); + return LFA_DCC_LFA_DISABLED; + } + + /* Verify that link is up */ + link_status = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + port_mb[params->port].link_status)); + if (!(link_status & LINK_STATUS_LINK_UP)) + return LFA_LINK_DOWN; + + /* if loaded after BOOT from SAN, don't flap the link in any case and + * rely on link set by preboot driver + */ + if (params->feature_config_flags & FEATURE_CONFIG_BOOT_FROM_SAN) + return 0; + + /* Verify that loopback mode is not set */ + if (params->loopback_mode) + return LFA_LOOPBACK_ENABLED; + + /* Verify that MFW supports LFA */ + if (!params->lfa_base) + return LFA_MFW_IS_TOO_OLD; + + if (params->num_phys == 3) { + cfg_size = 2; + lfa_mask = 0xffffffff; + } else { + cfg_size = 1; + lfa_mask = 0xffff; + } + + /* Compare Duplex */ + saved_val = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_duplex)); + req_val = params->req_duplex[0] | (params->req_duplex[1] << 16); + if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { + DP(NETIF_MSG_LINK, "Duplex mismatch %x vs. %x\n", + (saved_val & lfa_mask), (req_val & lfa_mask)); + return LFA_DUPLEX_MISMATCH; + } + /* Compare Flow Control */ + saved_val = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_flow_ctrl)); + req_val = params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16); + if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { + DP(NETIF_MSG_LINK, "Flow control mismatch %x vs. %x\n", + (saved_val & lfa_mask), (req_val & lfa_mask)); + return LFA_FLOW_CTRL_MISMATCH; + } + /* Compare Link Speed */ + saved_val = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_line_speed)); + req_val = params->req_line_speed[0] | (params->req_line_speed[1] << 16); + if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { + DP(NETIF_MSG_LINK, "Link speed mismatch %x vs. %x\n", + (saved_val & lfa_mask), (req_val & lfa_mask)); + return LFA_LINK_SPEED_MISMATCH; + } + + for (cfg_idx = 0; cfg_idx < cfg_size; cfg_idx++) { + cur_speed_cap_mask = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, + speed_cap_mask[cfg_idx])); + + if (cur_speed_cap_mask != params->speed_cap_mask[cfg_idx]) { + DP(NETIF_MSG_LINK, "Speed Cap mismatch %x vs. %x\n", + cur_speed_cap_mask, + params->speed_cap_mask[cfg_idx]); + return LFA_SPEED_CAP_MISMATCH; + } + } + + cur_req_fc_auto_adv = + REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, additional_config)) & + REQ_FC_AUTO_ADV_MASK; + + if ((u16)cur_req_fc_auto_adv != params->req_fc_auto_adv) { + DP(NETIF_MSG_LINK, "Flow Ctrl AN mismatch %x vs. %x\n", + cur_req_fc_auto_adv, params->req_fc_auto_adv); + return LFA_FLOW_CTRL_MISMATCH; + } + + eee_status = REG_RD(bp, params->shmem2_base + + offsetof(struct shmem2_region, + eee_status[params->port])); + + if (((eee_status & SHMEM_EEE_LPI_REQUESTED_BIT) ^ + (params->eee_mode & EEE_MODE_ENABLE_LPI)) || + ((eee_status & SHMEM_EEE_REQUESTED_BIT) ^ + (params->eee_mode & EEE_MODE_ADV_LPI))) { + DP(NETIF_MSG_LINK, "EEE mismatch %x vs. %x\n", params->eee_mode, + eee_status); + return LFA_EEE_MISMATCH; + } + + /* LFA conditions are met */ + return 0; +} +/******************************************************************/ +/* EPIO/GPIO section */ +/******************************************************************/ +static void bnx2x_get_epio(struct bnx2x *bp, u32 epio_pin, u32 *en) +{ + u32 epio_mask, gp_oenable; + *en = 0; + /* Sanity check */ + if (epio_pin > 31) { + DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to get\n", epio_pin); + return; + } + + epio_mask = 1 << epio_pin; + /* Set this EPIO to output */ + gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE); + REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask); + + *en = (REG_RD(bp, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin; +} +static void bnx2x_set_epio(struct bnx2x *bp, u32 epio_pin, u32 en) +{ + u32 epio_mask, gp_output, gp_oenable; + + /* Sanity check */ + if (epio_pin > 31) { + DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to set\n", epio_pin); + return; + } + DP(NETIF_MSG_LINK, "Setting EPIO pin %d to %d\n", epio_pin, en); + epio_mask = 1 << epio_pin; + /* Set this EPIO to output */ + gp_output = REG_RD(bp, MCP_REG_MCPR_GP_OUTPUTS); + if (en) + gp_output |= epio_mask; + else + gp_output &= ~epio_mask; + + REG_WR(bp, MCP_REG_MCPR_GP_OUTPUTS, gp_output); + + /* Set the value for this EPIO */ + gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE); + REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask); +} + +static void bnx2x_set_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 val) +{ + if (pin_cfg == PIN_CFG_NA) + return; + if (pin_cfg >= PIN_CFG_EPIO0) { + bnx2x_set_epio(bp, pin_cfg - PIN_CFG_EPIO0, val); + } else { + u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3; + u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2; + bnx2x_set_gpio(bp, gpio_num, (u8)val, gpio_port); + } +} + +static u32 bnx2x_get_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 *val) +{ + if (pin_cfg == PIN_CFG_NA) + return -EINVAL; + if (pin_cfg >= PIN_CFG_EPIO0) { + bnx2x_get_epio(bp, pin_cfg - PIN_CFG_EPIO0, val); + } else { + u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3; + u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2; + *val = bnx2x_get_gpio(bp, gpio_num, gpio_port); + } + return 0; + +} +/******************************************************************/ +/* ETS section */ +/******************************************************************/ +static void bnx2x_ets_e2e3a0_disabled(struct link_params *params) +{ + /* ETS disabled configuration*/ + struct bnx2x *bp = params->bp; + + DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n"); + + /* mapping between entry priority to client number (0,1,2 -debug and + * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) + * 3bits client num. + * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 + * cos1-100 cos0-011 dbg1-010 dbg0-001 MCP-000 + */ + + REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688); + /* Bitmap of 5bits length. Each bit specifies whether the entry behaves + * as strict. Bits 0,1,2 - debug and management entries, 3 - + * COS0 entry, 4 - COS1 entry. + * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT + * bit4 bit3 bit2 bit1 bit0 + * MCP and debug are strict + */ + + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); + /* defines which entries (clients) are subjected to WFQ arbitration */ + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); + /* For strict priority entries defines the number of consecutive + * slots for the highest priority. + */ + REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); + /* mapping between the CREDIT_WEIGHT registers and actual client + * numbers + */ + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0); + + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0); + REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0); + /* ETS mode disable */ + REG_WR(bp, PBF_REG_ETS_ENABLED, 0); + /* If ETS mode is enabled (there is no strict priority) defines a WFQ + * weight for COS0/COS1. + */ + REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710); + REG_WR(bp, PBF_REG_COS1_WEIGHT, 0x2710); + /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */ + REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 0x989680); + REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 0x989680); + /* Defines the number of consecutive slots for the strict priority */ + REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); +} +/****************************************************************************** +* Description: +* Getting min_w_val will be set according to line speed . +*. +******************************************************************************/ +static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars) +{ + u32 min_w_val = 0; + /* Calculate min_w_val.*/ + if (vars->link_up) { + if (vars->line_speed == SPEED_20000) + min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS; + else + min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS; + } else + min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS; + /* If the link isn't up (static configuration for example ) The + * link will be according to 20GBPS. + */ + return min_w_val; +} +/****************************************************************************** +* Description: +* Getting credit upper bound form min_w_val. +*. +******************************************************************************/ +static u32 bnx2x_ets_get_credit_upper_bound(const u32 min_w_val) +{ + const u32 credit_upper_bound = (u32)MAXVAL((150 * min_w_val), + MAX_PACKET_SIZE); + return credit_upper_bound; +} +/****************************************************************************** +* Description: +* Set credit upper bound for NIG. +*. +******************************************************************************/ +static void bnx2x_ets_e3b0_set_credit_upper_bound_nig( + const struct link_params *params, + const u32 min_w_val) +{ + struct bnx2x *bp = params->bp; + const u8 port = params->port; + const u32 credit_upper_bound = + bnx2x_ets_get_credit_upper_bound(min_w_val); + + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound); + + if (!port) { + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6, + credit_upper_bound); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7, + credit_upper_bound); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8, + credit_upper_bound); + } +} +/****************************************************************************** +* Description: +* Will return the NIG ETS registers to init values.Except +* credit_upper_bound. +* That isn't used in this configuration (No WFQ is enabled) and will be +* configured according to spec +*. +******************************************************************************/ +static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, + const struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + const u8 port = params->port; + const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars); + /* Mapping between entry priority to client number (0,1,2 -debug and + * management clients, 3 - COS0 client, 4 - COS1, ... 8 - + * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by + * reset value or init tool + */ + if (port) { + REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210); + REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0); + } else { + REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210); + REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8); + } + /* For strict priority entries defines the number of consecutive + * slots for the highest priority. + */ + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS : + NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); + /* Mapping between the CREDIT_WEIGHT registers and actual client + * numbers + */ + if (port) { + /*Port 1 has 6 COS*/ + REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543); + REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0); + } else { + /*Port 0 has 9 COS*/ + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB, + 0x43210876); + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5); + } + + /* Bitmap of 5bits length. Each bit specifies whether the entry behaves + * as strict. Bits 0,1,2 - debug and management entries, 3 - + * COS0 entry, 4 - COS1 entry. + * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT + * bit4 bit3 bit2 bit1 bit0 + * MCP and debug are strict + */ + if (port) + REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f); + else + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff); + /* defines which entries (clients) are subjected to WFQ arbitration */ + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : + NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); + + /* Please notice the register address are note continuous and a + * for here is note appropriate.In 2 port mode port0 only COS0-5 + * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4 + * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT + * are never used for WFQ + */ + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0x0); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2, 0x0); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3, 0x0); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0); + if (!port) { + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0); + } + + bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val); +} +/****************************************************************************** +* Description: +* Set credit upper bound for PBF. +*. +******************************************************************************/ +static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf( + const struct link_params *params, + const u32 min_w_val) +{ + struct bnx2x *bp = params->bp; + const u32 credit_upper_bound = + bnx2x_ets_get_credit_upper_bound(min_w_val); + const u8 port = params->port; + u32 base_upper_bound = 0; + u8 max_cos = 0; + u8 i = 0; + /* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4 + * port mode port1 has COS0-2 that can be used for WFQ. + */ + if (!port) { + base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0; + max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; + } else { + base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1; + max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1; + } + + for (i = 0; i < max_cos; i++) + REG_WR(bp, base_upper_bound + (i << 2), credit_upper_bound); +} + +/****************************************************************************** +* Description: +* Will return the PBF ETS registers to init values.Except +* credit_upper_bound. +* That isn't used in this configuration (No WFQ is enabled) and will be +* configured according to spec +*. +******************************************************************************/ +static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params) +{ + struct bnx2x *bp = params->bp; + const u8 port = params->port; + const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL; + u8 i = 0; + u32 base_weight = 0; + u8 max_cos = 0; + + /* Mapping between entry priority to client number 0 - COS0 + * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num. + * TODO_ETS - Should be done by reset value or init tool + */ + if (port) + /* 0x688 (|011|0 10|00 1|000) */ + REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , 0x688); + else + /* (10 1|100 |011|0 10|00 1|000) */ + REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , 0x2C688); + + /* TODO_ETS - Should be done by reset value or init tool */ + if (port) + /* 0x688 (|011|0 10|00 1|000)*/ + REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688); + else + /* 0x2C688 (10 1|100 |011|0 10|00 1|000) */ + REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688); + + REG_WR(bp, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 : + PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 , 0x100); + + + REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 : + PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , 0); + + REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : + PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0); + /* In 2 port mode port0 has COS0-5 that can be used for WFQ. + * In 4 port mode port1 has COS0-2 that can be used for WFQ. + */ + if (!port) { + base_weight = PBF_REG_COS0_WEIGHT_P0; + max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; + } else { + base_weight = PBF_REG_COS0_WEIGHT_P1; + max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1; + } + + for (i = 0; i < max_cos; i++) + REG_WR(bp, base_weight + (0x4 * i), 0); + + bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf); +} +/****************************************************************************** +* Description: +* E3B0 disable will return basically the values to init values. +*. +******************************************************************************/ +static int bnx2x_ets_e3b0_disabled(const struct link_params *params, + const struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + + if (!CHIP_IS_E3B0(bp)) { + DP(NETIF_MSG_LINK, + "bnx2x_ets_e3b0_disabled the chip isn't E3B0\n"); + return -EINVAL; + } + + bnx2x_ets_e3b0_nig_disabled(params, vars); + + bnx2x_ets_e3b0_pbf_disabled(params); + + return 0; +} + +/****************************************************************************** +* Description: +* Disable will return basically the values to init values. +* +******************************************************************************/ +int bnx2x_ets_disabled(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + int bnx2x_status = 0; + + if ((CHIP_IS_E2(bp)) || (CHIP_IS_E3A0(bp))) + bnx2x_ets_e2e3a0_disabled(params); + else if (CHIP_IS_E3B0(bp)) + bnx2x_status = bnx2x_ets_e3b0_disabled(params, vars); + else { + DP(NETIF_MSG_LINK, "bnx2x_ets_disabled - chip not supported\n"); + return -EINVAL; + } + + return bnx2x_status; +} + +/****************************************************************************** +* Description +* Set the COS mappimg to SP and BW until this point all the COS are not +* set as SP or BW. +******************************************************************************/ +static int bnx2x_ets_e3b0_cli_map(const struct link_params *params, + const struct bnx2x_ets_params *ets_params, + const u8 cos_sp_bitmap, + const u8 cos_bw_bitmap) +{ + struct bnx2x *bp = params->bp; + const u8 port = params->port; + const u8 nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3); + const u8 pbf_cli_sp_bitmap = cos_sp_bitmap; + const u8 nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3; + const u8 pbf_cli_subject2wfq_bitmap = cos_bw_bitmap; + + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT : + NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap); + + REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 : + PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , pbf_cli_sp_bitmap); + + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : + NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, + nig_cli_subject2wfq_bitmap); + + REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : + PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0, + pbf_cli_subject2wfq_bitmap); + + return 0; +} + +/****************************************************************************** +* Description: +* This function is needed because NIG ARB_CREDIT_WEIGHT_X are +* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. +******************************************************************************/ +static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp, + const u8 cos_entry, + const u32 min_w_val_nig, + const u32 min_w_val_pbf, + const u16 total_bw, + const u8 bw, + const u8 port) +{ + u32 nig_reg_adress_crd_weight = 0; + u32 pbf_reg_adress_crd_weight = 0; + /* Calculate and set BW for this COS - use 1 instead of 0 for BW */ + const u32 cos_bw_nig = ((bw ? bw : 1) * min_w_val_nig) / total_bw; + const u32 cos_bw_pbf = ((bw ? bw : 1) * min_w_val_pbf) / total_bw; + + switch (cos_entry) { + case 0: + nig_reg_adress_crd_weight = + (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0; + pbf_reg_adress_crd_weight = (port) ? + PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0; + break; + case 1: + nig_reg_adress_crd_weight = (port) ? + NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1; + pbf_reg_adress_crd_weight = (port) ? + PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0; + break; + case 2: + nig_reg_adress_crd_weight = (port) ? + NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2; + + pbf_reg_adress_crd_weight = (port) ? + PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0; + break; + case 3: + if (port) + return -EINVAL; + nig_reg_adress_crd_weight = + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3; + pbf_reg_adress_crd_weight = + PBF_REG_COS3_WEIGHT_P0; + break; + case 4: + if (port) + return -EINVAL; + nig_reg_adress_crd_weight = + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4; + pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0; + break; + case 5: + if (port) + return -EINVAL; + nig_reg_adress_crd_weight = + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5; + pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0; + break; + } + + REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig); + + REG_WR(bp, pbf_reg_adress_crd_weight, cos_bw_pbf); + + return 0; +} +/****************************************************************************** +* Description: +* Calculate the total BW.A value of 0 isn't legal. +* +******************************************************************************/ +static int bnx2x_ets_e3b0_get_total_bw( + const struct link_params *params, + struct bnx2x_ets_params *ets_params, + u16 *total_bw) +{ + struct bnx2x *bp = params->bp; + u8 cos_idx = 0; + u8 is_bw_cos_exist = 0; + + *total_bw = 0 ; + /* Calculate total BW requested */ + for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { + if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) { + is_bw_cos_exist = 1; + if (!ets_params->cos[cos_idx].params.bw_params.bw) { + DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" + "was set to 0\n"); + /* This is to prevent a state when ramrods + * can't be sent + */ + ets_params->cos[cos_idx].params.bw_params.bw + = 1; + } + *total_bw += + ets_params->cos[cos_idx].params.bw_params.bw; + } + } + + /* Check total BW is valid */ + if ((is_bw_cos_exist == 1) && (*total_bw != 100)) { + if (*total_bw == 0) { + DP(NETIF_MSG_LINK, + "bnx2x_ets_E3B0_config total BW shouldn't be 0\n"); + return -EINVAL; + } + DP(NETIF_MSG_LINK, + "bnx2x_ets_E3B0_config total BW should be 100\n"); + /* We can handle a case whre the BW isn't 100 this can happen + * if the TC are joined. + */ + } + return 0; +} + +/****************************************************************************** +* Description: +* Invalidate all the sp_pri_to_cos. +* +******************************************************************************/ +static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos) +{ + u8 pri = 0; + for (pri = 0; pri < DCBX_MAX_NUM_COS; pri++) + sp_pri_to_cos[pri] = DCBX_INVALID_COS; +} +/****************************************************************************** +* Description: +* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers +* according to sp_pri_to_cos. +* +******************************************************************************/ +static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, + u8 *sp_pri_to_cos, const u8 pri, + const u8 cos_entry) +{ + struct bnx2x *bp = params->bp; + const u8 port = params->port; + const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : + DCBX_E3B0_MAX_NUM_COS_PORT0; + + if (pri >= max_num_of_cos) { + DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " + "parameter Illegal strict priority\n"); + return -EINVAL; + } + + if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) { + DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " + "parameter There can't be two COS's with " + "the same strict pri\n"); + return -EINVAL; + } + + sp_pri_to_cos[pri] = cos_entry; + return 0; + +} + +/****************************************************************************** +* Description: +* Returns the correct value according to COS and priority in +* the sp_pri_cli register. +* +******************************************************************************/ +static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset, + const u8 pri_set, + const u8 pri_offset, + const u8 entry_size) +{ + u64 pri_cli_nig = 0; + pri_cli_nig = ((u64)(cos + cos_offset)) << (entry_size * + (pri_set + pri_offset)); + + return pri_cli_nig; +} +/****************************************************************************** +* Description: +* Returns the correct value according to COS and priority in the +* sp_pri_cli register for NIG. +* +******************************************************************************/ +static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set) +{ + /* MCP Dbg0 and dbg1 are always with higher strict pri*/ + const u8 nig_cos_offset = 3; + const u8 nig_pri_offset = 3; + + return bnx2x_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set, + nig_pri_offset, 4); + +} +/****************************************************************************** +* Description: +* Returns the correct value according to COS and priority in the +* sp_pri_cli register for PBF. +* +******************************************************************************/ +static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set) +{ + const u8 pbf_cos_offset = 0; + const u8 pbf_pri_offset = 0; + + return bnx2x_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set, + pbf_pri_offset, 3); + +} + +/****************************************************************************** +* Description: +* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers +* according to sp_pri_to_cos.(which COS has higher priority) +* +******************************************************************************/ +static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params, + u8 *sp_pri_to_cos) +{ + struct bnx2x *bp = params->bp; + u8 i = 0; + const u8 port = params->port; + /* MCP Dbg0 and dbg1 are always with higher strict pri*/ + u64 pri_cli_nig = 0x210; + u32 pri_cli_pbf = 0x0; + u8 pri_set = 0; + u8 pri_bitmask = 0; + const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : + DCBX_E3B0_MAX_NUM_COS_PORT0; + + u8 cos_bit_to_set = (1 << max_num_of_cos) - 1; + + /* Set all the strict priority first */ + for (i = 0; i < max_num_of_cos; i++) { + if (sp_pri_to_cos[i] != DCBX_INVALID_COS) { + if (sp_pri_to_cos[i] >= DCBX_MAX_NUM_COS) { + DP(NETIF_MSG_LINK, + "bnx2x_ets_e3b0_sp_set_pri_cli_reg " + "invalid cos entry\n"); + return -EINVAL; + } + + pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig( + sp_pri_to_cos[i], pri_set); + + pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf( + sp_pri_to_cos[i], pri_set); + pri_bitmask = 1 << sp_pri_to_cos[i]; + /* COS is used remove it from bitmap.*/ + if (!(pri_bitmask & cos_bit_to_set)) { + DP(NETIF_MSG_LINK, + "bnx2x_ets_e3b0_sp_set_pri_cli_reg " + "invalid There can't be two COS's with" + " the same strict pri\n"); + return -EINVAL; + } + cos_bit_to_set &= ~pri_bitmask; + pri_set++; + } + } + + /* Set all the Non strict priority i= COS*/ + for (i = 0; i < max_num_of_cos; i++) { + pri_bitmask = 1 << i; + /* Check if COS was already used for SP */ + if (pri_bitmask & cos_bit_to_set) { + /* COS wasn't used for SP */ + pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig( + i, pri_set); + + pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf( + i, pri_set); + /* COS is used remove it from bitmap.*/ + cos_bit_to_set &= ~pri_bitmask; + pri_set++; + } + } + + if (pri_set != max_num_of_cos) { + DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg not all " + "entries were set\n"); + return -EINVAL; + } + + if (port) { + /* Only 6 usable clients*/ + REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, + (u32)pri_cli_nig); + + REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , pri_cli_pbf); + } else { + /* Only 9 usable clients*/ + const u32 pri_cli_nig_lsb = (u32) (pri_cli_nig); + const u32 pri_cli_nig_msb = (u32) ((pri_cli_nig >> 32) & 0xF); + + REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, + pri_cli_nig_lsb); + REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, + pri_cli_nig_msb); + + REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , pri_cli_pbf); + } + return 0; +} + +/****************************************************************************** +* Description: +* Configure the COS to ETS according to BW and SP settings. +******************************************************************************/ +int bnx2x_ets_e3b0_config(const struct link_params *params, + const struct link_vars *vars, + struct bnx2x_ets_params *ets_params) +{ + struct bnx2x *bp = params->bp; + int bnx2x_status = 0; + const u8 port = params->port; + u16 total_bw = 0; + const u32 min_w_val_nig = bnx2x_ets_get_min_w_val_nig(vars); + const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL; + u8 cos_bw_bitmap = 0; + u8 cos_sp_bitmap = 0; + u8 sp_pri_to_cos[DCBX_MAX_NUM_COS] = {0}; + const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : + DCBX_E3B0_MAX_NUM_COS_PORT0; + u8 cos_entry = 0; + + if (!CHIP_IS_E3B0(bp)) { + DP(NETIF_MSG_LINK, + "bnx2x_ets_e3b0_disabled the chip isn't E3B0\n"); + return -EINVAL; + } + + if ((ets_params->num_of_cos > max_num_of_cos)) { + DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config the number of COS " + "isn't supported\n"); + return -EINVAL; + } + + /* Prepare sp strict priority parameters*/ + bnx2x_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos); + + /* Prepare BW parameters*/ + bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params, + &total_bw); + if (bnx2x_status) { + DP(NETIF_MSG_LINK, + "bnx2x_ets_E3B0_config get_total_bw failed\n"); + return -EINVAL; + } + + /* Upper bound is set according to current link speed (min_w_val + * should be the same for upper bound and COS credit val). + */ + bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig); + bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf); + + + for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) { + if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) { + cos_bw_bitmap |= (1 << cos_entry); + /* The function also sets the BW in HW(not the mappin + * yet) + */ + bnx2x_status = bnx2x_ets_e3b0_set_cos_bw( + bp, cos_entry, min_w_val_nig, min_w_val_pbf, + total_bw, + ets_params->cos[cos_entry].params.bw_params.bw, + port); + } else if (bnx2x_cos_state_strict == + ets_params->cos[cos_entry].state){ + cos_sp_bitmap |= (1 << cos_entry); + + bnx2x_status = bnx2x_ets_e3b0_sp_pri_to_cos_set( + params, + sp_pri_to_cos, + ets_params->cos[cos_entry].params.sp_params.pri, + cos_entry); + + } else { + DP(NETIF_MSG_LINK, + "bnx2x_ets_e3b0_config cos state not valid\n"); + return -EINVAL; + } + if (bnx2x_status) { + DP(NETIF_MSG_LINK, + "bnx2x_ets_e3b0_config set cos bw failed\n"); + return bnx2x_status; + } + } + + /* Set SP register (which COS has higher priority) */ + bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params, + sp_pri_to_cos); + + if (bnx2x_status) { + DP(NETIF_MSG_LINK, + "bnx2x_ets_E3B0_config set_pri_cli_reg failed\n"); + return bnx2x_status; + } + + /* Set client mapping of BW and strict */ + bnx2x_status = bnx2x_ets_e3b0_cli_map(params, ets_params, + cos_sp_bitmap, + cos_bw_bitmap); + + if (bnx2x_status) { + DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n"); + return bnx2x_status; + } + return 0; +} +static void bnx2x_ets_bw_limit_common(const struct link_params *params) +{ + /* ETS disabled configuration */ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); + /* Defines which entries (clients) are subjected to WFQ arbitration + * COS0 0x8 + * COS1 0x10 + */ + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18); + /* Mapping between the ARB_CREDIT_WEIGHT registers and actual + * client numbers (WEIGHT_0 does not actually have to represent + * client 0) + * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 + * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010 + */ + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A); + + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, + ETS_BW_LIMIT_CREDIT_UPPER_BOUND); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, + ETS_BW_LIMIT_CREDIT_UPPER_BOUND); + + /* ETS mode enabled*/ + REG_WR(bp, PBF_REG_ETS_ENABLED, 1); + + /* Defines the number of consecutive slots for the strict priority */ + REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); + /* Bitmap of 5bits length. Each bit specifies whether the entry behaves + * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0 + * entry, 4 - COS1 entry. + * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT + * bit4 bit3 bit2 bit1 bit0 + * MCP and debug are strict + */ + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); + + /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/ + REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, + ETS_BW_LIMIT_CREDIT_UPPER_BOUND); + REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, + ETS_BW_LIMIT_CREDIT_UPPER_BOUND); +} + +void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, + const u32 cos1_bw) +{ + /* ETS disabled configuration*/ + struct bnx2x *bp = params->bp; + const u32 total_bw = cos0_bw + cos1_bw; + u32 cos0_credit_weight = 0; + u32 cos1_credit_weight = 0; + + DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); + + if ((!total_bw) || + (!cos0_bw) || + (!cos1_bw)) { + DP(NETIF_MSG_LINK, "Total BW can't be zero\n"); + return; + } + + cos0_credit_weight = (cos0_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/ + total_bw; + cos1_credit_weight = (cos1_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/ + total_bw; + + bnx2x_ets_bw_limit_common(params); + + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight); + + REG_WR(bp, PBF_REG_COS0_WEIGHT, cos0_credit_weight); + REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight); +} + +int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) +{ + /* ETS disabled configuration*/ + struct bnx2x *bp = params->bp; + u32 val = 0; + + DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n"); + /* Bitmap of 5bits length. Each bit specifies whether the entry behaves + * as strict. Bits 0,1,2 - debug and management entries, + * 3 - COS0 entry, 4 - COS1 entry. + * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT + * bit4 bit3 bit2 bit1 bit0 + * MCP and debug are strict + */ + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F); + /* For strict priority entries defines the number of consecutive slots + * for the highest priority. + */ + REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); + /* ETS mode disable */ + REG_WR(bp, PBF_REG_ETS_ENABLED, 0); + /* Defines the number of consecutive slots for the strict priority */ + REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100); + + /* Defines the number of consecutive slots for the strict priority */ + REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos); + + /* Mapping between entry priority to client number (0,1,2 -debug and + * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) + * 3bits client num. + * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 + * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000 + * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000 + */ + val = (!strict_cos) ? 0x2318 : 0x22E0; + REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val); + + return 0; +} + +/******************************************************************/ +/* PFC section */ +/******************************************************************/ +static void bnx2x_update_pfc_xmac(struct link_params *params, + struct link_vars *vars, + u8 is_lb) +{ + struct bnx2x *bp = params->bp; + u32 xmac_base; + u32 pause_val, pfc0_val, pfc1_val; + + /* XMAC base adrr */ + xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + + /* Initialize pause and pfc registers */ + pause_val = 0x18000; + pfc0_val = 0xFFFF8000; + pfc1_val = 0x2; + + /* No PFC support */ + if (!(params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED)) { + + /* RX flow control - Process pause frame in receive direction + */ + if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) + pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN; + + /* TX flow control - Send pause packet when buffer is full */ + if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) + pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN; + } else {/* PFC support */ + pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN | + XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN | + XMAC_PFC_CTRL_HI_REG_RX_PFC_EN | + XMAC_PFC_CTRL_HI_REG_TX_PFC_EN | + XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON; + /* Write pause and PFC registers */ + REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val); + REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val); + REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val); + pfc1_val &= ~XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON; + + } + + /* Write pause and PFC registers */ + REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val); + REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val); + REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val); + + + /* Set MAC address for source TX Pause/PFC frames */ + REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_LO, + ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | + (params->mac_addr[5]))); + REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_HI, + ((params->mac_addr[0] << 8) | + (params->mac_addr[1]))); + + udelay(30); +} + +/******************************************************************/ +/* MAC/PBF section */ +/******************************************************************/ +static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, + u32 emac_base) +{ + u32 new_mode, cur_mode; + u32 clc_cnt; + /* Set clause 45 mode, slow down the MDIO clock to 2.5MHz + * (a value of 49==0x31) and make sure that the AUTO poll is off + */ + cur_mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); + + if (USES_WARPCORE(bp)) + clc_cnt = 74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT; + else + clc_cnt = 49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT; + + if (((cur_mode & EMAC_MDIO_MODE_CLOCK_CNT) == clc_cnt) && + (cur_mode & (EMAC_MDIO_MODE_CLAUSE_45))) + return; + + new_mode = cur_mode & + ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT); + new_mode |= clc_cnt; + new_mode |= (EMAC_MDIO_MODE_CLAUSE_45); + + DP(NETIF_MSG_LINK, "Changing emac_mode from 0x%x to 0x%x\n", + cur_mode, new_mode); + REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, new_mode); + udelay(40); +} + +static void bnx2x_set_mdio_emac_per_phy(struct bnx2x *bp, + struct link_params *params) +{ + u8 phy_index; + /* Set mdio clock per phy */ + for (phy_index = INT_PHY; phy_index < params->num_phys; + phy_index++) + bnx2x_set_mdio_clk(bp, params->chip_id, + params->phy[phy_index].mdio_ctrl); +} + +static u8 bnx2x_is_4_port_mode(struct bnx2x *bp) +{ + u32 port4mode_ovwr_val; + /* Check 4-port override enabled */ + port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); + if (port4mode_ovwr_val & (1<<0)) { + /* Return 4-port mode override value */ + return ((port4mode_ovwr_val & (1<<1)) == (1<<1)); + } + /* Return 4-port mode from input pin */ + return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN); +} + +static void bnx2x_emac_init(struct link_params *params, + struct link_vars *vars) +{ + /* reset and unreset the emac core */ + struct bnx2x *bp = params->bp; + u8 port = params->port; + u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + u32 val; + u16 timeout; + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); + udelay(5); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); + + /* init emac - use read-modify-write */ + /* self clear reset */ + val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); + EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET)); + + timeout = 200; + do { + val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); + DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val); + if (!timeout) { + DP(NETIF_MSG_LINK, "EMAC timeout!\n"); + return; + } + timeout--; + } while (val & EMAC_MODE_RESET); + + bnx2x_set_mdio_emac_per_phy(bp, params); + /* Set mac address */ + val = ((params->mac_addr[0] << 8) | + params->mac_addr[1]); + EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val); + + val = ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | + params->mac_addr[5]); + EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val); +} + +static void bnx2x_set_xumac_nig(struct link_params *params, + u16 tx_pause_en, + u8 enable) +{ + struct bnx2x *bp = params->bp; + + REG_WR(bp, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN, + enable); + REG_WR(bp, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN, + enable); + REG_WR(bp, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN : + NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en); +} + +static void bnx2x_set_umac_rxtx(struct link_params *params, u8 en) +{ + u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + u32 val; + struct bnx2x *bp = params->bp; + if (!(REG_RD(bp, MISC_REG_RESET_REG_2) & + (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port))) + return; + val = REG_RD(bp, umac_base + UMAC_REG_COMMAND_CONFIG); + if (en) + val |= (UMAC_COMMAND_CONFIG_REG_TX_ENA | + UMAC_COMMAND_CONFIG_REG_RX_ENA); + else + val &= ~(UMAC_COMMAND_CONFIG_REG_TX_ENA | + UMAC_COMMAND_CONFIG_REG_RX_ENA); + /* Disable RX and TX */ + REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); +} + +static void bnx2x_umac_enable(struct link_params *params, + struct link_vars *vars, u8 lb) +{ + u32 val; + u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + struct bnx2x *bp = params->bp; + /* Reset UMAC */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); + usleep_range(1000, 2000); + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); + + DP(NETIF_MSG_LINK, "enabling UMAC\n"); + + /* This register opens the gate for the UMAC despite its name */ + REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); + + val = UMAC_COMMAND_CONFIG_REG_PROMIS_EN | + UMAC_COMMAND_CONFIG_REG_PAD_EN | + UMAC_COMMAND_CONFIG_REG_SW_RESET | + UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK; + switch (vars->line_speed) { + case SPEED_10: + val |= (0<<2); + break; + case SPEED_100: + val |= (1<<2); + break; + case SPEED_1000: + val |= (2<<2); + break; + case SPEED_2500: + val |= (3<<2); + break; + default: + DP(NETIF_MSG_LINK, "Invalid speed for UMAC %d\n", + vars->line_speed); + break; + } + if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) + val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE; + + if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)) + val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE; + + if (vars->duplex == DUPLEX_HALF) + val |= UMAC_COMMAND_CONFIG_REG_HD_ENA; + + REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); + udelay(50); + + /* Configure UMAC for EEE */ + if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) { + DP(NETIF_MSG_LINK, "configured UMAC for EEE\n"); + REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, + UMAC_UMAC_EEE_CTRL_REG_EEE_EN); + REG_WR(bp, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11); + } else { + REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, 0x0); + } + + /* Set MAC address for source TX Pause/PFC frames (under SW reset) */ + REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0, + ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | + (params->mac_addr[5]))); + REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR1, + ((params->mac_addr[0] << 8) | + (params->mac_addr[1]))); + + /* Enable RX and TX */ + val &= ~UMAC_COMMAND_CONFIG_REG_PAD_EN; + val |= UMAC_COMMAND_CONFIG_REG_TX_ENA | + UMAC_COMMAND_CONFIG_REG_RX_ENA; + REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); + udelay(50); + + /* Remove SW Reset */ + val &= ~UMAC_COMMAND_CONFIG_REG_SW_RESET; + + /* Check loopback mode */ + if (lb) + val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA; + REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); + + /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame + * length used by the MAC receive logic to check frames. + */ + REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); + bnx2x_set_xumac_nig(params, + ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); + vars->mac_type = MAC_TYPE_UMAC; + +} + +/* Define the XMAC mode */ +static void bnx2x_xmac_init(struct link_params *params, u32 max_speed) +{ + struct bnx2x *bp = params->bp; + u32 is_port4mode = bnx2x_is_4_port_mode(bp); + + /* In 4-port mode, need to set the mode only once, so if XMAC is + * already out of reset, it means the mode has already been set, + * and it must not* reset the XMAC again, since it controls both + * ports of the path + */ + + if (((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || + (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || + (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE)) && + is_port4mode && + (REG_RD(bp, MISC_REG_RESET_REG_2) & + MISC_REGISTERS_RESET_REG_2_XMAC)) { + DP(NETIF_MSG_LINK, + "XMAC already out of reset in 4-port mode\n"); + return; + } + + /* Hard reset */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + MISC_REGISTERS_RESET_REG_2_XMAC); + usleep_range(1000, 2000); + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + MISC_REGISTERS_RESET_REG_2_XMAC); + if (is_port4mode) { + DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n"); + + /* Set the number of ports on the system side to up to 2 */ + REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1); + + /* Set the number of ports on the Warp Core to 10G */ + REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); + } else { + /* Set the number of ports on the system side to 1 */ + REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0); + if (max_speed == SPEED_10000) { + DP(NETIF_MSG_LINK, + "Init XMAC to 10G x 1 port per path\n"); + /* Set the number of ports on the Warp Core to 10G */ + REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); + } else { + DP(NETIF_MSG_LINK, + "Init XMAC to 20G x 2 ports per path\n"); + /* Set the number of ports on the Warp Core to 20G */ + REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 1); + } + } + /* Soft reset */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); + usleep_range(1000, 2000); + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); + +} + +static void bnx2x_set_xmac_rxtx(struct link_params *params, u8 en) +{ + u8 port = params->port; + struct bnx2x *bp = params->bp; + u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + u32 val; + + if (REG_RD(bp, MISC_REG_RESET_REG_2) & + MISC_REGISTERS_RESET_REG_2_XMAC) { + /* Send an indication to change the state in the NIG back to XON + * Clearing this bit enables the next set of this bit to get + * rising edge + */ + pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI); + REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, + (pfc_ctrl & ~(1<<1))); + REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, + (pfc_ctrl | (1<<1))); + DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port); + val = REG_RD(bp, xmac_base + XMAC_REG_CTRL); + if (en) + val |= (XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN); + else + val &= ~(XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN); + REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); + } +} + +static int bnx2x_xmac_enable(struct link_params *params, + struct link_vars *vars, u8 lb) +{ + u32 val, xmac_base; + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "enabling XMAC\n"); + + xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + + bnx2x_xmac_init(params, vars->line_speed); + + /* This register determines on which events the MAC will assert + * error on the i/f to the NIG along w/ EOP. + */ + + /* This register tells the NIG whether to send traffic to UMAC + * or XMAC + */ + REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0); + + /* When XMAC is in XLGMII mode, disable sending idles for fault + * detection. + */ + if (!(params->phy[INT_PHY].flags & FLAGS_TX_ERROR_CHECK)) { + REG_WR(bp, xmac_base + XMAC_REG_RX_LSS_CTRL, + (XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE | + XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE)); + REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0); + REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, + XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS | + XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS); + } + /* Set Max packet size */ + REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710); + + /* CRC append for Tx packets */ + REG_WR(bp, xmac_base + XMAC_REG_TX_CTRL, 0xC800); + + /* update PFC */ + bnx2x_update_pfc_xmac(params, vars, 0); + + if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) { + DP(NETIF_MSG_LINK, "Setting XMAC for EEE\n"); + REG_WR(bp, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008); + REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x1); + } else { + REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x0); + } + + /* Enable TX and RX */ + val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN; + + /* Set MAC in XLGMII mode for dual-mode */ + if ((vars->line_speed == SPEED_20000) && + (params->phy[INT_PHY].supported & + SUPPORTED_20000baseKR2_Full)) + val |= XMAC_CTRL_REG_XLGMII_ALIGN_ENB; + + /* Check loopback mode */ + if (lb) + val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK; + REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); + bnx2x_set_xumac_nig(params, + ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); + + vars->mac_type = MAC_TYPE_XMAC; + + return 0; +} + +static int bnx2x_emac_enable(struct link_params *params, + struct link_vars *vars, u8 lb) +{ + struct bnx2x *bp = params->bp; + u8 port = params->port; + u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + u32 val; + + DP(NETIF_MSG_LINK, "enabling EMAC\n"); + + /* Disable BMAC */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + + /* enable emac and not bmac */ + REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1); + + /* ASIC */ + if (vars->phy_flags & PHY_XGXS_FLAG) { + u32 ser_lane = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + + DP(NETIF_MSG_LINK, "XGXS\n"); + /* select the master lanes (out of 0-3) */ + REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane); + /* select XGXS */ + REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); + + } else { /* SerDes */ + DP(NETIF_MSG_LINK, "SerDes\n"); + /* select SerDes */ + REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0); + } + + bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE, + EMAC_RX_MODE_RESET); + bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, + EMAC_TX_MODE_RESET); + + /* pause enable/disable */ + bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, + EMAC_RX_MODE_FLOW_EN); + + bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE, + (EMAC_TX_MODE_EXT_PAUSE_EN | + EMAC_TX_MODE_FLOW_EN)); + if (!(params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED)) { + if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) + bnx2x_bits_en(bp, emac_base + + EMAC_REG_EMAC_RX_MODE, + EMAC_RX_MODE_FLOW_EN); + + if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) + bnx2x_bits_en(bp, emac_base + + EMAC_REG_EMAC_TX_MODE, + (EMAC_TX_MODE_EXT_PAUSE_EN | + EMAC_TX_MODE_FLOW_EN)); + } else + bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, + EMAC_TX_MODE_FLOW_EN); + + /* KEEP_VLAN_TAG, promiscuous */ + val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); + val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; + + /* Setting this bit causes MAC control frames (except for pause + * frames) to be passed on for processing. This setting has no + * affect on the operation of the pause frames. This bit effects + * all packets regardless of RX Parser packet sorting logic. + * Turn the PFC off to make sure we are in Xon state before + * enabling it. + */ + EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0); + if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) { + DP(NETIF_MSG_LINK, "PFC is enabled\n"); + /* Enable PFC again */ + EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, + EMAC_REG_RX_PFC_MODE_RX_EN | + EMAC_REG_RX_PFC_MODE_TX_EN | + EMAC_REG_RX_PFC_MODE_PRIORITIES); + + EMAC_WR(bp, EMAC_REG_RX_PFC_PARAM, + ((0x0101 << + EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) | + (0x00ff << + EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT))); + val |= EMAC_RX_MODE_KEEP_MAC_CONTROL; + } + EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val); + + /* Set Loopback */ + val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); + if (lb) + val |= 0x810; + else + val &= ~0x810; + EMAC_WR(bp, EMAC_REG_EMAC_MODE, val); + + /* Enable emac */ + REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1); + + /* Enable emac for jumbo packets */ + EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE, + (EMAC_RX_MTU_SIZE_JUMBO_ENA | + (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); + + /* Strip CRC */ + REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1); + + /* Disable the NIG in/out to the bmac */ + REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0); + REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0); + REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0); + + /* Enable the NIG in/out to the emac */ + REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1); + val = 0; + if ((params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED) || + (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) + val = 1; + + REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val); + REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1); + + REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0); + + vars->mac_type = MAC_TYPE_EMAC; + return 0; +} + +static void bnx2x_update_pfc_bmac1(struct link_params *params, + struct link_vars *vars) +{ + u32 wb_data[2]; + struct bnx2x *bp = params->bp; + u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + + u32 val = 0x14; + if ((!(params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED)) && + (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)) + /* Enable BigMAC to react on received Pause packets */ + val |= (1<<5); + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2); + + /* TX control */ + val = 0xc0; + if (!(params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED) && + (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) + val |= 0x800000; + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2); +} + +static void bnx2x_update_pfc_bmac2(struct link_params *params, + struct link_vars *vars, + u8 is_lb) +{ + /* Set rx control: Strip CRC and enable BigMAC to relay + * control packets to the system as well + */ + u32 wb_data[2]; + struct bnx2x *bp = params->bp; + u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + u32 val = 0x14; + + if ((!(params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED)) && + (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)) + /* Enable BigMAC to react on received Pause packets */ + val |= (1<<5); + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2); + udelay(30); + + /* Tx control */ + val = 0xc0; + if (!(params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED) && + (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) + val |= 0x800000; + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2); + + if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) { + DP(NETIF_MSG_LINK, "PFC is enabled\n"); + /* Enable PFC RX & TX & STATS and set 8 COS */ + wb_data[0] = 0x0; + wb_data[0] |= (1<<0); /* RX */ + wb_data[0] |= (1<<1); /* TX */ + wb_data[0] |= (1<<2); /* Force initial Xon */ + wb_data[0] |= (1<<3); /* 8 cos */ + wb_data[0] |= (1<<5); /* STATS */ + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, + wb_data, 2); + /* Clear the force Xon */ + wb_data[0] &= ~(1<<2); + } else { + DP(NETIF_MSG_LINK, "PFC is disabled\n"); + /* Disable PFC RX & TX & STATS and set 8 COS */ + wb_data[0] = 0x8; + wb_data[1] = 0; + } + + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2); + + /* Set Time (based unit is 512 bit time) between automatic + * re-sending of PP packets amd enable automatic re-send of + * Per-Priroity Packet as long as pp_gen is asserted and + * pp_disable is low. + */ + val = 0x8000; + if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) + val |= (1<<16); /* enable automatic re-send */ + + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL, + wb_data, 2); + + /* mac control */ + val = 0x3; /* Enable RX and TX */ + if (is_lb) { + val |= 0x4; /* Local loopback */ + DP(NETIF_MSG_LINK, "enable bmac loopback\n"); + } + /* When PFC enabled, Pass pause frames towards the NIG. */ + if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) + val |= ((1<<6)|(1<<5)); + + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); +} + +/****************************************************************************** +* Description: +* This function is needed because NIG ARB_CREDIT_WEIGHT_X are +* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. +******************************************************************************/ +static int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp, + u8 cos_entry, + u32 priority_mask, u8 port) +{ + u32 nig_reg_rx_priority_mask_add = 0; + + switch (cos_entry) { + case 0: + nig_reg_rx_priority_mask_add = (port) ? + NIG_REG_P1_RX_COS0_PRIORITY_MASK : + NIG_REG_P0_RX_COS0_PRIORITY_MASK; + break; + case 1: + nig_reg_rx_priority_mask_add = (port) ? + NIG_REG_P1_RX_COS1_PRIORITY_MASK : + NIG_REG_P0_RX_COS1_PRIORITY_MASK; + break; + case 2: + nig_reg_rx_priority_mask_add = (port) ? + NIG_REG_P1_RX_COS2_PRIORITY_MASK : + NIG_REG_P0_RX_COS2_PRIORITY_MASK; + break; + case 3: + if (port) + return -EINVAL; + nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK; + break; + case 4: + if (port) + return -EINVAL; + nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK; + break; + case 5: + if (port) + return -EINVAL; + nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK; + break; + } + + REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask); + + return 0; +} +static void bnx2x_update_mng(struct link_params *params, u32 link_status) +{ + struct bnx2x *bp = params->bp; + + REG_WR(bp, params->shmem_base + + offsetof(struct shmem_region, + port_mb[params->port].link_status), link_status); +} + +static void bnx2x_update_link_attr(struct link_params *params, u32 link_attr) +{ + struct bnx2x *bp = params->bp; + + if (SHMEM2_HAS(bp, link_attr_sync)) + REG_WR(bp, params->shmem2_base + + offsetof(struct shmem2_region, + link_attr_sync[params->port]), link_attr); +} + +static void bnx2x_update_pfc_nig(struct link_params *params, + struct link_vars *vars, + struct bnx2x_nig_brb_pfc_port_params *nig_params) +{ + u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0; + u32 llfc_enable = 0, xcm_out_en = 0, hwpfc_enable = 0; + u32 pkt_priority_to_cos = 0; + struct bnx2x *bp = params->bp; + u8 port = params->port; + + int set_pfc = params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED; + DP(NETIF_MSG_LINK, "updating pfc nig parameters\n"); + + /* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set + * MAC control frames (that are not pause packets) + * will be forwarded to the XCM. + */ + xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK : + NIG_REG_LLH0_XCM_MASK); + /* NIG params will override non PFC params, since it's possible to + * do transition from PFC to SAFC + */ + if (set_pfc) { + pause_enable = 0; + llfc_out_en = 0; + llfc_enable = 0; + if (CHIP_IS_E3(bp)) + ppp_enable = 0; + else + ppp_enable = 1; + xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : + NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); + xcm_out_en = 0; + hwpfc_enable = 1; + } else { + if (nig_params) { + llfc_out_en = nig_params->llfc_out_en; + llfc_enable = nig_params->llfc_enable; + pause_enable = nig_params->pause_enable; + } else /* Default non PFC mode - PAUSE */ + pause_enable = 1; + + xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : + NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); + xcm_out_en = 1; + } + + if (CHIP_IS_E3(bp)) + REG_WR(bp, port ? NIG_REG_BRB1_PAUSE_IN_EN : + NIG_REG_BRB0_PAUSE_IN_EN, pause_enable); + REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 : + NIG_REG_LLFC_OUT_EN_0, llfc_out_en); + REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 : + NIG_REG_LLFC_ENABLE_0, llfc_enable); + REG_WR(bp, port ? NIG_REG_PAUSE_ENABLE_1 : + NIG_REG_PAUSE_ENABLE_0, pause_enable); + + REG_WR(bp, port ? NIG_REG_PPP_ENABLE_1 : + NIG_REG_PPP_ENABLE_0, ppp_enable); + + REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK : + NIG_REG_LLH0_XCM_MASK, xcm_mask); + + REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 : + NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7); + + /* Output enable for RX_XCM # IF */ + REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN : + NIG_REG_XCM0_OUT_EN, xcm_out_en); + + /* HW PFC TX enable */ + REG_WR(bp, port ? NIG_REG_P1_HWPFC_ENABLE : + NIG_REG_P0_HWPFC_ENABLE, hwpfc_enable); + + if (nig_params) { + u8 i = 0; + pkt_priority_to_cos = nig_params->pkt_priority_to_cos; + + for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++) + bnx2x_pfc_nig_rx_priority_mask(bp, i, + nig_params->rx_cos_priority_mask[i], port); + + REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 : + NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0, + nig_params->llfc_high_priority_classes); + + REG_WR(bp, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 : + NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0, + nig_params->llfc_low_priority_classes); + } + REG_WR(bp, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS : + NIG_REG_P0_PKT_PRIORITY_TO_COS, + pkt_priority_to_cos); +} + +int bnx2x_update_pfc(struct link_params *params, + struct link_vars *vars, + struct bnx2x_nig_brb_pfc_port_params *pfc_params) +{ + /* The PFC and pause are orthogonal to one another, meaning when + * PFC is enabled, the pause are disabled, and when PFC is + * disabled, pause are set according to the pause result. + */ + u32 val; + struct bnx2x *bp = params->bp; + u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC); + + if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) + vars->link_status |= LINK_STATUS_PFC_ENABLED; + else + vars->link_status &= ~LINK_STATUS_PFC_ENABLED; + + bnx2x_update_mng(params, vars->link_status); + + /* Update NIG params */ + bnx2x_update_pfc_nig(params, vars, pfc_params); + + if (!vars->link_up) + return 0; + + DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n"); + + if (CHIP_IS_E3(bp)) { + if (vars->mac_type == MAC_TYPE_XMAC) + bnx2x_update_pfc_xmac(params, vars, 0); + } else { + val = REG_RD(bp, MISC_REG_RESET_REG_2); + if ((val & + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) + == 0) { + DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n"); + bnx2x_emac_enable(params, vars, 0); + return 0; + } + if (CHIP_IS_E2(bp)) + bnx2x_update_pfc_bmac2(params, vars, bmac_loopback); + else + bnx2x_update_pfc_bmac1(params, vars); + + val = 0; + if ((params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED) || + (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) + val = 1; + REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val); + } + return 0; +} + +static int bnx2x_bmac1_enable(struct link_params *params, + struct link_vars *vars, + u8 is_lb) +{ + struct bnx2x *bp = params->bp; + u8 port = params->port; + u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + u32 wb_data[2]; + u32 val; + + DP(NETIF_MSG_LINK, "Enabling BigMAC1\n"); + + /* XGXS control */ + wb_data[0] = 0x3c; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL, + wb_data, 2); + + /* TX MAC SA */ + wb_data[0] = ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | + params->mac_addr[5]); + wb_data[1] = ((params->mac_addr[0] << 8) | + params->mac_addr[1]); + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2); + + /* MAC control */ + val = 0x3; + if (is_lb) { + val |= 0x4; + DP(NETIF_MSG_LINK, "enable bmac loopback\n"); + } + wb_data[0] = val; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2); + + /* Set rx mtu */ + wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2); + + bnx2x_update_pfc_bmac1(params, vars); + + /* Set tx mtu */ + wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2); + + /* Set cnt max size */ + wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2); + + /* Configure SAFC */ + wb_data[0] = 0x1000200; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, + wb_data, 2); + + return 0; +} + +static int bnx2x_bmac2_enable(struct link_params *params, + struct link_vars *vars, + u8 is_lb) +{ + struct bnx2x *bp = params->bp; + u8 port = params->port; + u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + u32 wb_data[2]; + + DP(NETIF_MSG_LINK, "Enabling BigMAC2\n"); + + wb_data[0] = 0; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); + udelay(30); + + /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */ + wb_data[0] = 0x3c; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL, + wb_data, 2); + + udelay(30); + + /* TX MAC SA */ + wb_data[0] = ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | + params->mac_addr[5]); + wb_data[1] = ((params->mac_addr[0] << 8) | + params->mac_addr[1]); + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR, + wb_data, 2); + + udelay(30); + + /* Configure SAFC */ + wb_data[0] = 0x1000200; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS, + wb_data, 2); + udelay(30); + + /* Set RX MTU */ + wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2); + udelay(30); + + /* Set TX MTU */ + wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2); + udelay(30); + /* Set cnt max size */ + wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2; + wb_data[1] = 0; + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2); + udelay(30); + bnx2x_update_pfc_bmac2(params, vars, is_lb); + + return 0; +} + +static int bnx2x_bmac_enable(struct link_params *params, + struct link_vars *vars, + u8 is_lb, u8 reset_bmac) +{ + int rc = 0; + u8 port = params->port; + struct bnx2x *bp = params->bp; + u32 val; + /* Reset and unreset the BigMac */ + if (reset_bmac) { + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + usleep_range(1000, 2000); + } + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + + /* Enable access for bmac registers */ + REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1); + + /* Enable BMAC according to BMAC type*/ + if (CHIP_IS_E2(bp)) + rc = bnx2x_bmac2_enable(params, vars, is_lb); + else + rc = bnx2x_bmac1_enable(params, vars, is_lb); + REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1); + REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0); + REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0); + val = 0; + if ((params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED) || + (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) + val = 1; + REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val); + REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0); + REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0); + REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0); + REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1); + REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1); + + vars->mac_type = MAC_TYPE_BMAC; + return rc; +} + +static void bnx2x_set_bmac_rx(struct bnx2x *bp, u32 chip_id, u8 port, u8 en) +{ + u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + u32 wb_data[2]; + u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); + + if (CHIP_IS_E2(bp)) + bmac_addr += BIGMAC2_REGISTER_BMAC_CONTROL; + else + bmac_addr += BIGMAC_REGISTER_BMAC_CONTROL; + /* Only if the bmac is out of reset */ + if (REG_RD(bp, MISC_REG_RESET_REG_2) & + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) && + nig_bmac_enable) { + /* Clear Rx Enable bit in BMAC_CONTROL register */ + REG_RD_DMAE(bp, bmac_addr, wb_data, 2); + if (en) + wb_data[0] |= BMAC_CONTROL_RX_ENABLE; + else + wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; + REG_WR_DMAE(bp, bmac_addr, wb_data, 2); + usleep_range(1000, 2000); + } +} + +static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, + u32 line_speed) +{ + struct bnx2x *bp = params->bp; + u8 port = params->port; + u32 init_crd, crd; + u32 count = 1000; + + /* Disable port */ + REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1); + + /* Wait for init credit */ + init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4); + crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); + DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd); + + while ((init_crd != crd) && count) { + usleep_range(5000, 10000); + crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); + count--; + } + crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); + if (init_crd != crd) { + DP(NETIF_MSG_LINK, "BUG! init_crd 0x%x != crd 0x%x\n", + init_crd, crd); + return -EINVAL; + } + + if (flow_ctrl & BNX2X_FLOW_CTRL_RX || + line_speed == SPEED_10 || + line_speed == SPEED_100 || + line_speed == SPEED_1000 || + line_speed == SPEED_2500) { + REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1); + /* Update threshold */ + REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); + /* Update init credit */ + init_crd = 778; /* (800-18-4) */ + + } else { + u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + + ETH_OVREHEAD)/16; + REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); + /* Update threshold */ + REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh); + /* Update init credit */ + switch (line_speed) { + case SPEED_10000: + init_crd = thresh + 553 - 22; + break; + default: + DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", + line_speed); + return -EINVAL; + } + } + REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd); + DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n", + line_speed, init_crd); + + /* Probe the credit changes */ + REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1); + usleep_range(5000, 10000); + REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0); + + /* Enable port */ + REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0); + return 0; +} + +/** + * bnx2x_get_emac_base - retrive emac base address + * + * @bp: driver handle + * @mdc_mdio_access: access type + * @port: port id + * + * This function selects the MDC/MDIO access (through emac0 or + * emac1) depend on the mdc_mdio_access, port, port swapped. Each + * phy has a default access mode, which could also be overridden + * by nvram configuration. This parameter, whether this is the + * default phy configuration, or the nvram overrun + * configuration, is passed here as mdc_mdio_access and selects + * the emac_base for the CL45 read/writes operations + */ +static u32 bnx2x_get_emac_base(struct bnx2x *bp, + u32 mdc_mdio_access, u8 port) +{ + u32 emac_base = 0; + switch (mdc_mdio_access) { + case SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE: + break; + case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0: + if (REG_RD(bp, NIG_REG_PORT_SWAP)) + emac_base = GRCBASE_EMAC1; + else + emac_base = GRCBASE_EMAC0; + break; + case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1: + if (REG_RD(bp, NIG_REG_PORT_SWAP)) + emac_base = GRCBASE_EMAC0; + else + emac_base = GRCBASE_EMAC1; + break; + case SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH: + emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + break; + case SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED: + emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1; + break; + default: + break; + } + return emac_base; + +} + +/******************************************************************/ +/* CL22 access functions */ +/******************************************************************/ +static int bnx2x_cl22_write(struct bnx2x *bp, + struct bnx2x_phy *phy, + u16 reg, u16 val) +{ + u32 tmp, mode; + u8 i; + int rc = 0; + /* Switch to CL22 */ + mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, + mode & ~EMAC_MDIO_MODE_CLAUSE_45); + + /* Address */ + tmp = ((phy->addr << 21) | (reg << 16) | val | + EMAC_MDIO_COMM_COMMAND_WRITE_22 | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); + + for (i = 0; i < 50; i++) { + udelay(10); + + tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); + if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { + udelay(5); + break; + } + } + if (tmp & EMAC_MDIO_COMM_START_BUSY) { + DP(NETIF_MSG_LINK, "write phy register failed\n"); + rc = -EFAULT; + } + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode); + return rc; +} + +static int bnx2x_cl22_read(struct bnx2x *bp, + struct bnx2x_phy *phy, + u16 reg, u16 *ret_val) +{ + u32 val, mode; + u16 i; + int rc = 0; + + /* Switch to CL22 */ + mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, + mode & ~EMAC_MDIO_MODE_CLAUSE_45); + + /* Address */ + val = ((phy->addr << 21) | (reg << 16) | + EMAC_MDIO_COMM_COMMAND_READ_22 | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); + + for (i = 0; i < 50; i++) { + udelay(10); + + val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); + if (!(val & EMAC_MDIO_COMM_START_BUSY)) { + *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA); + udelay(5); + break; + } + } + if (val & EMAC_MDIO_COMM_START_BUSY) { + DP(NETIF_MSG_LINK, "read phy register failed\n"); + + *ret_val = 0; + rc = -EFAULT; + } + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode); + return rc; +} + +/******************************************************************/ +/* CL45 access functions */ +/******************************************************************/ +static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, + u8 devad, u16 reg, u16 *ret_val) +{ + u32 val; + u16 i; + int rc = 0; + u32 chip_id; + if (phy->flags & FLAGS_MDC_MDIO_WA_G) { + chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) | + ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12); + bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl); + } + + if (phy->flags & FLAGS_MDC_MDIO_WA_B0) + bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, + EMAC_MDIO_STATUS_10MB); + /* Address */ + val = ((phy->addr << 21) | (devad << 16) | reg | + EMAC_MDIO_COMM_COMMAND_ADDRESS | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); + + for (i = 0; i < 50; i++) { + udelay(10); + + val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); + if (!(val & EMAC_MDIO_COMM_START_BUSY)) { + udelay(5); + break; + } + } + if (val & EMAC_MDIO_COMM_START_BUSY) { + DP(NETIF_MSG_LINK, "read phy register failed\n"); + netdev_err(bp->dev, "MDC/MDIO access timeout\n"); + *ret_val = 0; + rc = -EFAULT; + } else { + /* Data */ + val = ((phy->addr << 21) | (devad << 16) | + EMAC_MDIO_COMM_COMMAND_READ_45 | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); + + for (i = 0; i < 50; i++) { + udelay(10); + + val = REG_RD(bp, phy->mdio_ctrl + + EMAC_REG_EMAC_MDIO_COMM); + if (!(val & EMAC_MDIO_COMM_START_BUSY)) { + *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA); + break; + } + } + if (val & EMAC_MDIO_COMM_START_BUSY) { + DP(NETIF_MSG_LINK, "read phy register failed\n"); + netdev_err(bp->dev, "MDC/MDIO access timeout\n"); + *ret_val = 0; + rc = -EFAULT; + } + } + /* Work around for E3 A0 */ + if (phy->flags & FLAGS_MDC_MDIO_WA) { + phy->flags ^= FLAGS_DUMMY_READ; + if (phy->flags & FLAGS_DUMMY_READ) { + u16 temp_val; + bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); + } + } + + if (phy->flags & FLAGS_MDC_MDIO_WA_B0) + bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, + EMAC_MDIO_STATUS_10MB); + return rc; +} + +static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, + u8 devad, u16 reg, u16 val) +{ + u32 tmp; + u8 i; + int rc = 0; + u32 chip_id; + if (phy->flags & FLAGS_MDC_MDIO_WA_G) { + chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) | + ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12); + bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl); + } + + if (phy->flags & FLAGS_MDC_MDIO_WA_B0) + bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, + EMAC_MDIO_STATUS_10MB); + + /* Address */ + tmp = ((phy->addr << 21) | (devad << 16) | reg | + EMAC_MDIO_COMM_COMMAND_ADDRESS | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); + + for (i = 0; i < 50; i++) { + udelay(10); + + tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); + if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { + udelay(5); + break; + } + } + if (tmp & EMAC_MDIO_COMM_START_BUSY) { + DP(NETIF_MSG_LINK, "write phy register failed\n"); + netdev_err(bp->dev, "MDC/MDIO access timeout\n"); + rc = -EFAULT; + } else { + /* Data */ + tmp = ((phy->addr << 21) | (devad << 16) | val | + EMAC_MDIO_COMM_COMMAND_WRITE_45 | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); + + for (i = 0; i < 50; i++) { + udelay(10); + + tmp = REG_RD(bp, phy->mdio_ctrl + + EMAC_REG_EMAC_MDIO_COMM); + if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { + udelay(5); + break; + } + } + if (tmp & EMAC_MDIO_COMM_START_BUSY) { + DP(NETIF_MSG_LINK, "write phy register failed\n"); + netdev_err(bp->dev, "MDC/MDIO access timeout\n"); + rc = -EFAULT; + } + } + /* Work around for E3 A0 */ + if (phy->flags & FLAGS_MDC_MDIO_WA) { + phy->flags ^= FLAGS_DUMMY_READ; + if (phy->flags & FLAGS_DUMMY_READ) { + u16 temp_val; + bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); + } + } + if (phy->flags & FLAGS_MDC_MDIO_WA_B0) + bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, + EMAC_MDIO_STATUS_10MB); + return rc; +} + +/******************************************************************/ +/* EEE section */ +/******************************************************************/ +static u8 bnx2x_eee_has_cap(struct link_params *params) +{ + struct bnx2x *bp = params->bp; + + if (REG_RD(bp, params->shmem2_base) <= + offsetof(struct shmem2_region, eee_status[params->port])) + return 0; + + return 1; +} + +static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer) +{ + switch (nvram_mode) { + case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED: + *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME; + break; + case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE: + *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME; + break; + case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY: + *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME; + break; + default: + *idle_timer = 0; + break; + } + + return 0; +} + +static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode) +{ + switch (idle_timer) { + case EEE_MODE_NVRAM_BALANCED_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED; + break; + case EEE_MODE_NVRAM_AGGRESSIVE_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE; + break; + case EEE_MODE_NVRAM_LATENCY_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY; + break; + default: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED; + break; + } + + return 0; +} + +static u32 bnx2x_eee_calc_timer(struct link_params *params) +{ + u32 eee_mode, eee_idle; + struct bnx2x *bp = params->bp; + + if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) { + if (params->eee_mode & EEE_MODE_OUTPUT_TIME) { + /* time value in eee_mode --> used directly*/ + eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK; + } else { + /* hsi value in eee_mode --> time */ + if (bnx2x_eee_nvram_to_time(params->eee_mode & + EEE_MODE_NVRAM_MASK, + &eee_idle)) + return 0; + } + } else { + /* hsi values in nvram --> time*/ + eee_mode = ((REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port]. + eee_power_mode)) & + PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> + PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); + + if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle)) + return 0; + } + + return eee_idle; +} + +static int bnx2x_eee_set_timers(struct link_params *params, + struct link_vars *vars) +{ + u32 eee_idle = 0, eee_mode; + struct bnx2x *bp = params->bp; + + eee_idle = bnx2x_eee_calc_timer(params); + + if (eee_idle) { + REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2), + eee_idle); + } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) && + (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) && + (params->eee_mode & EEE_MODE_OUTPUT_TIME)) { + DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n"); + return -EINVAL; + } + + vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT); + if (params->eee_mode & EEE_MODE_OUTPUT_TIME) { + /* eee_idle in 1u --> eee_status in 16u */ + eee_idle >>= 4; + vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) | + SHMEM_EEE_TIME_OUTPUT_BIT; + } else { + if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode)) + return -EINVAL; + vars->eee_status |= eee_mode; + } + + return 0; +} + +static int bnx2x_eee_initial_config(struct link_params *params, + struct link_vars *vars, u8 mode) +{ + vars->eee_status |= ((u32) mode) << SHMEM_EEE_SUPPORTED_SHIFT; + + /* Propagate params' bits --> vars (for migration exposure) */ + if (params->eee_mode & EEE_MODE_ENABLE_LPI) + vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT; + else + vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT; + + if (params->eee_mode & EEE_MODE_ADV_LPI) + vars->eee_status |= SHMEM_EEE_REQUESTED_BIT; + else + vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT; + + return bnx2x_eee_set_timers(params, vars); +} + +static int bnx2x_eee_disable(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + + /* Make Certain LPI is disabled */ + REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0); + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0); + + vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK; + + return 0; +} + +static int bnx2x_eee_advertise(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars, u8 modes) +{ + struct bnx2x *bp = params->bp; + u16 val = 0; + + /* Mask events preventing LPI generation */ + REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20); + + if (modes & SHMEM_EEE_10G_ADV) { + DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n"); + val |= 0x8; + } + if (modes & SHMEM_EEE_1G_ADV) { + DP(NETIF_MSG_LINK, "Advertise 1GBase-T EEE\n"); + val |= 0x4; + } + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val); + + vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK; + vars->eee_status |= (modes << SHMEM_EEE_ADV_STATUS_SHIFT); + + return 0; +} + +static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status) +{ + struct bnx2x *bp = params->bp; + + if (bnx2x_eee_has_cap(params)) + REG_WR(bp, params->shmem2_base + + offsetof(struct shmem2_region, + eee_status[params->port]), eee_status); +} + +static void bnx2x_eee_an_resolve(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 adv = 0, lp = 0; + u32 lp_adv = 0; + u8 neg = 0; + + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv); + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp); + + if (lp & 0x2) { + lp_adv |= SHMEM_EEE_100M_ADV; + if (adv & 0x2) { + if (vars->line_speed == SPEED_100) + neg = 1; + DP(NETIF_MSG_LINK, "EEE negotiated - 100M\n"); + } + } + if (lp & 0x14) { + lp_adv |= SHMEM_EEE_1G_ADV; + if (adv & 0x14) { + if (vars->line_speed == SPEED_1000) + neg = 1; + DP(NETIF_MSG_LINK, "EEE negotiated - 1G\n"); + } + } + if (lp & 0x68) { + lp_adv |= SHMEM_EEE_10G_ADV; + if (adv & 0x68) { + if (vars->line_speed == SPEED_10000) + neg = 1; + DP(NETIF_MSG_LINK, "EEE negotiated - 10G\n"); + } + } + + vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK; + vars->eee_status |= (lp_adv << SHMEM_EEE_LP_ADV_STATUS_SHIFT); + + if (neg) { + DP(NETIF_MSG_LINK, "EEE is active\n"); + vars->eee_status |= SHMEM_EEE_ACTIVE_BIT; + } + +} + +/******************************************************************/ +/* BSC access functions from E3 */ +/******************************************************************/ +static void bnx2x_bsc_module_sel(struct link_params *params) +{ + int idx; + u32 board_cfg, sfp_ctrl; + u32 i2c_pins[I2C_SWITCH_WIDTH], i2c_val[I2C_SWITCH_WIDTH]; + struct bnx2x *bp = params->bp; + u8 port = params->port; + /* Read I2C output PINs */ + board_cfg = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.shared_hw_config.board)); + i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK; + i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >> + SHARED_HW_CFG_E3_I2C_MUX1_SHIFT; + + /* Read I2C output value */ + sfp_ctrl = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_cmn_pin_cfg)); + i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0; + i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0; + DP(NETIF_MSG_LINK, "Setting BSC switch\n"); + for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++) + bnx2x_set_cfg_pin(bp, i2c_pins[idx], i2c_val[idx]); +} + +static int bnx2x_bsc_read(struct link_params *params, + struct bnx2x *bp, + u8 sl_devid, + u16 sl_addr, + u8 lc_addr, + u8 xfer_cnt, + u32 *data_array) +{ + u32 val, i; + int rc = 0; + + if (xfer_cnt > 16) { + DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n", + xfer_cnt); + return -EINVAL; + } + bnx2x_bsc_module_sel(params); + + xfer_cnt = 16 - lc_addr; + + /* Enable the engine */ + val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); + val |= MCPR_IMC_COMMAND_ENABLE; + REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); + + /* Program slave device ID */ + val = (sl_devid << 16) | sl_addr; + REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val); + + /* Start xfer with 0 byte to update the address pointer ???*/ + val = (MCPR_IMC_COMMAND_ENABLE) | + (MCPR_IMC_COMMAND_WRITE_OP << + MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | + (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0); + REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); + + /* Poll for completion */ + i = 0; + val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); + while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { + udelay(10); + val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); + if (i++ > 1000) { + DP(NETIF_MSG_LINK, "wr 0 byte timed out after %d try\n", + i); + rc = -EFAULT; + break; + } + } + if (rc == -EFAULT) + return rc; + + /* Start xfer with read op */ + val = (MCPR_IMC_COMMAND_ENABLE) | + (MCPR_IMC_COMMAND_READ_OP << + MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | + (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | + (xfer_cnt); + REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); + + /* Poll for completion */ + i = 0; + val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); + while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { + udelay(10); + val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); + if (i++ > 1000) { + DP(NETIF_MSG_LINK, "rd op timed out after %d try\n", i); + rc = -EFAULT; + break; + } + } + if (rc == -EFAULT) + return rc; + + for (i = (lc_addr >> 2); i < 4; i++) { + data_array[i] = REG_RD(bp, (MCP_REG_MCPR_IMC_DATAREG0 + i*4)); +#ifdef __BIG_ENDIAN + data_array[i] = ((data_array[i] & 0x000000ff) << 24) | + ((data_array[i] & 0x0000ff00) << 8) | + ((data_array[i] & 0x00ff0000) >> 8) | + ((data_array[i] & 0xff000000) >> 24); +#endif + } + return rc; +} + +static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy, + u8 devad, u16 reg, u16 or_val) +{ + u16 val; + bnx2x_cl45_read(bp, phy, devad, reg, &val); + bnx2x_cl45_write(bp, phy, devad, reg, val | or_val); +} + +static void bnx2x_cl45_read_and_write(struct bnx2x *bp, + struct bnx2x_phy *phy, + u8 devad, u16 reg, u16 and_val) +{ + u16 val; + bnx2x_cl45_read(bp, phy, devad, reg, &val); + bnx2x_cl45_write(bp, phy, devad, reg, val & and_val); +} + +int bnx2x_phy_read(struct link_params *params, u8 phy_addr, + u8 devad, u16 reg, u16 *ret_val) +{ + u8 phy_index; + /* Probe for the phy according to the given phy_addr, and execute + * the read request on it + */ + for (phy_index = 0; phy_index < params->num_phys; phy_index++) { + if (params->phy[phy_index].addr == phy_addr) { + return bnx2x_cl45_read(params->bp, + ¶ms->phy[phy_index], devad, + reg, ret_val); + } + } + return -EINVAL; +} + +int bnx2x_phy_write(struct link_params *params, u8 phy_addr, + u8 devad, u16 reg, u16 val) +{ + u8 phy_index; + /* Probe for the phy according to the given phy_addr, and execute + * the write request on it + */ + for (phy_index = 0; phy_index < params->num_phys; phy_index++) { + if (params->phy[phy_index].addr == phy_addr) { + return bnx2x_cl45_write(params->bp, + ¶ms->phy[phy_index], devad, + reg, val); + } + } + return -EINVAL; +} +static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy, + struct link_params *params) +{ + u8 lane = 0; + struct bnx2x *bp = params->bp; + u32 path_swap, path_swap_ovr; + u8 path, port; + + path = BP_PATH(bp); + port = params->port; + + if (bnx2x_is_4_port_mode(bp)) { + u32 port_swap, port_swap_ovr; + + /* Figure out path swap value */ + path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR); + if (path_swap_ovr & 0x1) + path_swap = (path_swap_ovr & 0x2); + else + path_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP); + + if (path_swap) + path = path ^ 1; + + /* Figure out port swap value */ + port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR); + if (port_swap_ovr & 0x1) + port_swap = (port_swap_ovr & 0x2); + else + port_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP); + + if (port_swap) + port = port ^ 1; + + lane = (port<<1) + path; + } else { /* Two port mode - no port swap */ + + /* Figure out path swap value */ + path_swap_ovr = + REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR); + if (path_swap_ovr & 0x1) { + path_swap = (path_swap_ovr & 0x2); + } else { + path_swap = + REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP); + } + if (path_swap) + path = path ^ 1; + + lane = path << 1 ; + } + return lane; +} + +static void bnx2x_set_aer_mmd(struct link_params *params, + struct bnx2x_phy *phy) +{ + u32 ser_lane; + u16 offset, aer_val; + struct bnx2x *bp = params->bp; + ser_lane = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + + offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ? + (phy->addr + ser_lane) : 0; + + if (USES_WARPCORE(bp)) { + aer_val = bnx2x_get_warpcore_lane(phy, params); + /* In Dual-lane mode, two lanes are joined together, + * so in order to configure them, the AER broadcast method is + * used here. + * 0x200 is the broadcast address for lanes 0,1 + * 0x201 is the broadcast address for lanes 2,3 + */ + if (phy->flags & FLAGS_WC_DUAL_MODE) + aer_val = (aer_val >> 1) | 0x200; + } else if (CHIP_IS_E2(bp)) + aer_val = 0x3800 + offset - 1; + else + aer_val = 0x3800 + offset; + + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, aer_val); + +} + +/******************************************************************/ +/* Internal phy section */ +/******************************************************************/ + +static void bnx2x_set_serdes_access(struct bnx2x *bp, u8 port) +{ + u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + + /* Set Clause 22 */ + REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 1); + REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000); + udelay(500); + REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f); + udelay(500); + /* Set Clause 45 */ + REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 0); +} + +static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port) +{ + u32 val; + + DP(NETIF_MSG_LINK, "bnx2x_serdes_deassert\n"); + + val = SERDES_RESET_BITS << (port*16); + + /* Reset and unreset the SerDes/XGXS */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); + udelay(500); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); + + bnx2x_set_serdes_access(bp, port); + + REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10, + DEFAULT_PHY_DEV_ADDR); +} + +static void bnx2x_xgxs_specific_func(struct bnx2x_phy *phy, + struct link_params *params, + u32 action) +{ + struct bnx2x *bp = params->bp; + switch (action) { + case PHY_INIT: + /* Set correct devad */ + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + params->port*0x18, 0); + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18, + phy->def_md_devad); + break; + } +} + +static void bnx2x_xgxs_deassert(struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u8 port; + u32 val; + DP(NETIF_MSG_LINK, "bnx2x_xgxs_deassert\n"); + port = params->port; + + val = XGXS_RESET_BITS << (port*16); + + /* Reset and unreset the SerDes/XGXS */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); + udelay(500); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); + bnx2x_xgxs_specific_func(¶ms->phy[INT_PHY], params, + PHY_INIT); +} + +static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, + struct link_params *params, u16 *ieee_fc) +{ + struct bnx2x *bp = params->bp; + *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; + /* Resolve pause mode and advertisement Please refer to Table + * 28B-3 of the 802.3ab-1999 spec + */ + + switch (phy->req_flow_ctrl) { + case BNX2X_FLOW_CTRL_AUTO: + switch (params->req_fc_auto_adv) { + case BNX2X_FLOW_CTRL_BOTH: + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + break; + case BNX2X_FLOW_CTRL_RX: + case BNX2X_FLOW_CTRL_TX: + *ieee_fc |= + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; + break; + default: + break; + } + break; + case BNX2X_FLOW_CTRL_TX: + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; + break; + + case BNX2X_FLOW_CTRL_RX: + case BNX2X_FLOW_CTRL_BOTH: + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + break; + + case BNX2X_FLOW_CTRL_NONE: + default: + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; + break; + } + DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc); +} + +static void set_phy_vars(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 actual_phy_idx, phy_index, link_cfg_idx; + u8 phy_config_swapped = params->multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED; + for (phy_index = INT_PHY; phy_index < params->num_phys; + phy_index++) { + link_cfg_idx = LINK_CONFIG_IDX(phy_index); + actual_phy_idx = phy_index; + if (phy_config_swapped) { + if (phy_index == EXT_PHY1) + actual_phy_idx = EXT_PHY2; + else if (phy_index == EXT_PHY2) + actual_phy_idx = EXT_PHY1; + } + params->phy[actual_phy_idx].req_flow_ctrl = + params->req_flow_ctrl[link_cfg_idx]; + + params->phy[actual_phy_idx].req_line_speed = + params->req_line_speed[link_cfg_idx]; + + params->phy[actual_phy_idx].speed_cap_mask = + params->speed_cap_mask[link_cfg_idx]; + + params->phy[actual_phy_idx].req_duplex = + params->req_duplex[link_cfg_idx]; + + if (params->req_line_speed[link_cfg_idx] == + SPEED_AUTO_NEG) + vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; + + DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x," + " speed_cap_mask %x\n", + params->phy[actual_phy_idx].req_flow_ctrl, + params->phy[actual_phy_idx].req_line_speed, + params->phy[actual_phy_idx].speed_cap_mask); + } +} + +static void bnx2x_ext_phy_set_pause(struct link_params *params, + struct bnx2x_phy *phy, + struct link_vars *vars) +{ + u16 val; + struct bnx2x *bp = params->bp; + /* Read modify write pause advertizing */ + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val); + + val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; + + /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ + bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { + val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; + } + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { + val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; + } + DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val); +} + +static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) +{ /* LD LP */ + switch (pause_result) { /* ASYM P ASYM P */ + case 0xb: /* 1 0 1 1 */ + vars->flow_ctrl = BNX2X_FLOW_CTRL_TX; + break; + + case 0xe: /* 1 1 1 0 */ + vars->flow_ctrl = BNX2X_FLOW_CTRL_RX; + break; + + case 0x5: /* 0 1 0 1 */ + case 0x7: /* 0 1 1 1 */ + case 0xd: /* 1 1 0 1 */ + case 0xf: /* 1 1 1 1 */ + vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH; + break; + + default: + break; + } + if (pause_result & (1<<0)) + vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE; + if (pause_result & (1<<1)) + vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE; + +} + +static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + u16 ld_pause; /* local */ + u16 lp_pause; /* link partner */ + u16 pause_result; + struct bnx2x *bp = params->bp; + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) { + bnx2x_cl22_read(bp, phy, 0x4, &ld_pause); + bnx2x_cl22_read(bp, phy, 0x5, &lp_pause); + } else if (CHIP_IS_E3(bp) && + SINGLE_MEDIA_DIRECT(params)) { + u8 lane = bnx2x_get_warpcore_lane(phy, params); + u16 gp_status, gp_mask; + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_4, + &gp_status); + gp_mask = (MDIO_WC_REG_GP2_STATUS_GP_2_4_CL73_AN_CMPL | + MDIO_WC_REG_GP2_STATUS_GP_2_4_CL37_LP_AN_CAP) << + lane; + if ((gp_status & gp_mask) == gp_mask) { + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_ADV_PAUSE, &ld_pause); + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); + } else { + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_CL37_FC_LD, &ld_pause); + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_CL37_FC_LP, &lp_pause); + ld_pause = ((ld_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) + << 3); + lp_pause = ((lp_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) + << 3); + } + } else { + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_ADV_PAUSE, &ld_pause); + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); + } + pause_result = (ld_pause & + MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; + pause_result |= (lp_pause & + MDIO_AN_REG_ADV_PAUSE_MASK) >> 10; + DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", pause_result); + bnx2x_pause_resolve(vars, pause_result); + +} + +static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + u8 ret = 0; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) { + /* Update the advertised flow-controled of LD/LP in AN */ + if (phy->req_line_speed == SPEED_AUTO_NEG) + bnx2x_ext_phy_update_adv_fc(phy, params, vars); + /* But set the flow-control result as the requested one */ + vars->flow_ctrl = phy->req_flow_ctrl; + } else if (phy->req_line_speed != SPEED_AUTO_NEG) + vars->flow_ctrl = params->req_fc_auto_adv; + else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { + ret = 1; + bnx2x_ext_phy_update_adv_fc(phy, params, vars); + } + return ret; +} +/******************************************************************/ +/* Warpcore section */ +/******************************************************************/ +/* The init_internal_warpcore should mirror the xgxs, + * i.e. reset the lane (if needed), set aer for the + * init configuration, and set/clear SGMII flag. Internal + * phy init is done purely in phy_init stage. + */ +#define WC_TX_DRIVER(post2, idriver, ipre, ifir) \ + ((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \ + (idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \ + (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET) | \ + (ifir << MDIO_WC_REG_TX0_TX_DRIVER_IFIR_OFFSET)) + +#define WC_TX_FIR(post, main, pre) \ + ((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \ + (main << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | \ + (pre << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)) + +static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 i; + static struct bnx2x_reg_set reg_set[] = { + /* Step 1 - Program the TX/RX alignment markers */ + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0xa157}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xcbe2}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0x7537}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0xa157}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xcbe2}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0x7537}, + /* Step 2 - Configure the NP registers */ + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000a}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6400}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0620}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0157}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x6464}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x3150}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x3150}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0157}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0620} + }; + DP(NETIF_MSG_LINK, "Enabling 20G-KR2\n"); + + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL49_USERB0_CTRL, (3<<6)); + + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); + + /* Start KR2 work-around timer which handles BCM8073 link-parner */ + params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE; + bnx2x_update_link_attr(params, params->link_attr_sync); +} + +static void bnx2x_disable_kr2(struct link_params *params, + struct link_vars *vars, + struct bnx2x_phy *phy) +{ + struct bnx2x *bp = params->bp; + int i; + static struct bnx2x_reg_set reg_set[] = { + /* Step 1 - Program the TX/RX alignment markers */ + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002}, + {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000} + }; + DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n"); + + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); + params->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; + bnx2x_update_link_attr(params, params->link_attr_sync); + + vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; +} + +static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + + DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n"); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_EEE_COMBO_CONTROL0, 0x7c); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC5, 0xc000); +} + +static void bnx2x_warpcore_restart_AN_KR(struct bnx2x_phy *phy, + struct link_params *params) +{ + /* Restart autoneg on the leading lane only */ + struct bnx2x *bp = params->bp; + u16 lane = bnx2x_get_warpcore_lane(phy, params); + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, lane); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200); + + /* Restore AER */ + bnx2x_set_aer_mmd(params, phy); +} + +static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { + u16 lane, i, cl72_ctrl, an_adv = 0, val; + u32 wc_lane_config; + struct bnx2x *bp = params->bp; + static struct bnx2x_reg_set reg_set[] = { + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, + {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190}, + /* Disable Autoneg: re-enable it after adv is done. */ + {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0}, + {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0}, + }; + DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n"); + /* Set to default registers that may be overriden by 10G force */ + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); + + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl); + cl72_ctrl &= 0x08ff; + cl72_ctrl |= 0x3800; + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl); + + /* Check adding advertisement for 1G KX */ + if (((vars->line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (vars->line_speed == SPEED_1000)) { + u16 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2; + an_adv |= (1<<5); + + /* Enable CL37 1G Parallel Detect */ + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1); + DP(NETIF_MSG_LINK, "Advertize 1G\n"); + } + if (((vars->line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || + (vars->line_speed == SPEED_10000)) { + /* Check adding advertisement for 10G KR */ + an_adv |= (1<<7); + /* Enable 10G Parallel Detect */ + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_PAR_DET_10G_CTRL, 1); + bnx2x_set_aer_mmd(params, phy); + DP(NETIF_MSG_LINK, "Advertize 10G\n"); + } + + /* Set Transmit PMD settings */ + lane = bnx2x_get_warpcore_lane(phy, params); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, + WC_TX_DRIVER(0x02, 0x06, 0x09, 0)); + /* Configure the next lane if dual mode */ + if (phy->flags & FLAGS_WC_DUAL_MODE) + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1), + WC_TX_DRIVER(0x02, 0x06, 0x09, 0)); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL, + 0x03f0); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL, + 0x03f0); + + /* Advertised speeds */ + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, an_adv); + + /* Advertised and set FEC (Forward Error Correction) */ + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2, + (MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY | + MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ)); + + /* Enable CL37 BAM */ + if (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port].default_cfg)) & + PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, + 1); + DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n"); + } + + /* Advertise pause */ + bnx2x_ext_phy_set_pause(params, phy, vars); + vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC7, 0x100); + + /* Over 1G - AN local device user page 1 */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL3_UP1, 0x1f); + + if (((phy->req_line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) || + (phy->req_line_speed == SPEED_20000)) { + + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, lane); + + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX1_PCI_CTRL + (0x10*lane), + (1<<11)); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXS_X2_CONTROL3, 0x7); + bnx2x_set_aer_mmd(params, phy); + + bnx2x_warpcore_enable_AN_KR2(phy, params, vars); + } else { + /* Enable Auto-Detect to support 1G over CL37 as well */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10); + wc_lane_config = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + shared_hw_config.wc_lane_config)); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), &val); + /* Force cl48 sync_status LOW to avoid getting stuck in CL73 + * parallel-detect loop when CL73 and CL37 are enabled. + */ + val |= 1 << 11; + + /* Restore Polarity settings in case it was run over by + * previous link owner + */ + if (wc_lane_config & + (SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED << lane)) + val |= 3 << 2; + else + val &= ~(3 << 2); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), + val); + + bnx2x_disable_kr2(params, vars, phy); + } + + /* Enable Autoneg: only on the main lane */ + bnx2x_warpcore_restart_AN_KR(phy, params); +} + +static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 val16, i, lane; + static struct bnx2x_reg_set reg_set[] = { + /* Disable Autoneg */ + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, + {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, + 0x3f00}, + {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0}, + {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1}, + {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa}, + /* Leave cl72 training enable, needed for KR */ + {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2} + }; + + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); + + lane = bnx2x_get_warpcore_lane(phy, params); + /* Global registers */ + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + /* Disable CL36 PCS Tx */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16); + val16 &= ~(0x0011 << lane); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16); + + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16); + val16 |= (0x0303 << (lane << 1)); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16); + /* Restore AER */ + bnx2x_set_aer_mmd(params, phy); + /* Set speed via PMA/PMD register */ + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); + + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, + MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB); + + /* Enable encoded forced speed */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30); + + /* Turn TX scramble payload only the 64/66 scrambler */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX66_CONTROL, 0x9); + + /* Turn RX scramble payload only the 64/66 scrambler */ + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, 0xF9); + + /* Set and clear loopback to cause a reset to 64/66 decoder */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0); + +} + +static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy, + struct link_params *params, + u8 is_xfi) +{ + struct bnx2x *bp = params->bp; + u16 misc1_val, tap_val, tx_driver_val, lane, val; + u32 cfg_tap_val, tx_drv_brdct, tx_equal; + u32 ifir_val, ipost2_val, ipre_driver_val; + + /* Hold rxSeqStart */ + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000); + + /* Hold tx_fifo_reset */ + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x1); + + /* Disable CL73 AN */ + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); + + /* Disable 100FX Enable and Auto-Detect */ + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL1, 0xFFFA); + + /* Disable 100FX Idle detect */ + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL3, 0x0080); + + /* Set Block address to Remote PHY & Clear forced_speed[5] */ + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, 0xFF7F); + + /* Turn off auto-detect & fiber mode */ + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, + 0xFFEE); + + /* Set filter_force_link, disable_false_link and parallel_detect */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + ((val | 0x0006) & 0xFFFE)); + + /* Set XFI / SFI */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, &misc1_val); + + misc1_val &= ~(0x1f); + + if (is_xfi) { + misc1_val |= 0x5; + tap_val = WC_TX_FIR(0x08, 0x37, 0x00); + tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03, 0); + } else { + cfg_tap_val = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port]. + sfi_tap_values)); + + tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK; + + misc1_val |= 0x9; + + /* TAP values are controlled by nvram, if value there isn't 0 */ + if (tx_equal) + tap_val = (u16)tx_equal; + else + tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02); + + ifir_val = DEFAULT_TX_DRV_IFIR; + ipost2_val = DEFAULT_TX_DRV_POST2; + ipre_driver_val = DEFAULT_TX_DRV_IPRE_DRIVER; + tx_drv_brdct = DEFAULT_TX_DRV_BRDCT; + + /* If any of the IFIR/IPRE_DRIVER/POST@ is set, apply all + * configuration. + */ + if (cfg_tap_val & (PORT_HW_CFG_TX_DRV_IFIR_MASK | + PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK | + PORT_HW_CFG_TX_DRV_POST2_MASK)) { + ifir_val = (cfg_tap_val & + PORT_HW_CFG_TX_DRV_IFIR_MASK) >> + PORT_HW_CFG_TX_DRV_IFIR_SHIFT; + ipre_driver_val = (cfg_tap_val & + PORT_HW_CFG_TX_DRV_IPREDRIVER_MASK) + >> PORT_HW_CFG_TX_DRV_IPREDRIVER_SHIFT; + ipost2_val = (cfg_tap_val & + PORT_HW_CFG_TX_DRV_POST2_MASK) >> + PORT_HW_CFG_TX_DRV_POST2_SHIFT; + } + + if (cfg_tap_val & PORT_HW_CFG_TX_DRV_BROADCAST_MASK) { + tx_drv_brdct = (cfg_tap_val & + PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >> + PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT; + } + + tx_driver_val = WC_TX_DRIVER(ipost2_val, tx_drv_brdct, + ipre_driver_val, ifir_val); + } + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val); + + /* Set Transmit PMD settings */ + lane = bnx2x_get_warpcore_lane(phy, params); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX_FIR_TAP, + tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, + tx_driver_val); + + /* Enable fiber mode, enable and invert sig_det */ + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0xd); + + /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */ + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, 0x8080); + + bnx2x_warpcore_set_lpi_passthrough(phy, params); + + /* 10G XFI Full Duplex */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100); + + /* Release tx_fifo_reset */ + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, + 0xFFFE); + /* Release rxSeqStart */ + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x7FFF); +} + +static void bnx2x_warpcore_set_20G_force_KR2(struct bnx2x_phy *phy, + struct link_params *params) +{ + u16 val; + struct bnx2x *bp = params->bp; + /* Set global registers, so set AER lane to 0 */ + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + + /* Disable sequencer */ + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, ~(1<<13)); + + bnx2x_set_aer_mmd(params, phy); + + bnx2x_cl45_read_and_write(bp, phy, MDIO_PMA_DEVAD, + MDIO_WC_REG_PMD_KR_CONTROL, ~(1<<1)); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_CTRL, 0); + /* Turn off CL73 */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL73_USERB0_CTRL, &val); + val &= ~(1<<5); + val |= (1<<6); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL73_USERB0_CTRL, val); + + /* Set 20G KR2 force speed */ + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x1f); + + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, (1<<7)); + + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &val); + val &= ~(3<<14); + val |= (1<<15); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0x835A); + + /* Enable sequencer (over lane 0) */ + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, (1<<13)); + + bnx2x_set_aer_mmd(params, phy); +} + +static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp, + struct bnx2x_phy *phy, + u16 lane) +{ + /* Rx0 anaRxControl1G */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX0_ANARXCONTROL1G, 0x90); + + /* Rx2 anaRxControl1G */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX2_ANARXCONTROL1G, 0x90); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW0, 0xE070); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW1, 0xC0D0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW2, 0xA0B0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW3, 0x8090); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW1_MASK, 0xF0F0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW2_MASK, 0xF0F0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW3_MASK, 0xF0F0); + + /* Serdes Digital Misc1 */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6008); + + /* Serdes Digital4 Misc3 */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, 0x8088); + + /* Set Transmit PMD settings */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX_FIR_TAP, + (WC_TX_FIR(0x12, 0x2d, 0x00) | + MDIO_WC_REG_TX_FIR_TAP_ENABLE)); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, + WC_TX_DRIVER(0x02, 0x02, 0x02, 0)); +} + +static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy, + struct link_params *params, + u8 fiber_mode, + u8 always_autoneg) +{ + struct bnx2x *bp = params->bp; + u16 val16, digctrl_kx1, digctrl_kx2; + + /* Clear XFI clock comp in non-10G single lane mode. */ + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, ~(3<<13)); + + bnx2x_warpcore_set_lpi_passthrough(phy, params); + + if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) { + /* SGMII Autoneg */ + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, + 0x1000); + DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n"); + } else { + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); + val16 &= 0xcebf; + switch (phy->req_line_speed) { + case SPEED_10: + break; + case SPEED_100: + val16 |= 0x2000; + break; + case SPEED_1000: + val16 |= 0x0040; + break; + default: + DP(NETIF_MSG_LINK, + "Speed not supported: 0x%x\n", phy->req_line_speed); + return; + } + + if (phy->req_duplex == DUPLEX_FULL) + val16 |= 0x0100; + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16); + + DP(NETIF_MSG_LINK, "set SGMII force speed %d\n", + phy->req_line_speed); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); + DP(NETIF_MSG_LINK, " (readback) %x\n", val16); + } + + /* SGMII Slave mode and disable signal detect */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &digctrl_kx1); + if (fiber_mode) + digctrl_kx1 = 1; + else + digctrl_kx1 &= 0xff4a; + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, + digctrl_kx1); + + /* Turn off parallel detect */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &digctrl_kx2); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + (digctrl_kx2 & ~(1<<2))); + + /* Re-enable parallel detect */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + (digctrl_kx2 | (1<<2))); + + /* Enable autodet */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, + (digctrl_kx1 | 0x10)); +} + +static void bnx2x_warpcore_reset_lane(struct bnx2x *bp, + struct bnx2x_phy *phy, + u8 reset) +{ + u16 val; + /* Take lane out of reset after configuration is finished */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC6, &val); + if (reset) + val |= 0xC000; + else + val &= 0x3FFF; + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC6, val); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC6, &val); +} +/* Clear SFI/XFI link settings registers */ +static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy, + struct link_params *params, + u16 lane) +{ + struct bnx2x *bp = params->bp; + u16 i; + static struct bnx2x_reg_set wc_regs[] = { + {MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0}, + {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL1, 0x014a}, + {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL3, 0x0800}, + {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8008}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, + 0x0195}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + 0x0007}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, + 0x0002}, + {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, 0x0000}, + {MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040}, + {MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140} + }; + /* Set XFI clock comp as default. */ + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, (3<<13)); + + for (i = 0; i < ARRAY_SIZE(wc_regs); i++) + bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg, + wc_regs[i].val); + + lane = bnx2x_get_warpcore_lane(phy, params); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990); + +} + +static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp, + u32 chip_id, + u32 shmem_base, u8 port, + u8 *gpio_num, u8 *gpio_port) +{ + u32 cfg_pin; + *gpio_num = 0; + *gpio_port = 0; + if (CHIP_IS_E3(bp)) { + cfg_pin = (REG_RD(bp, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_sfp_ctrl)) & + PORT_HW_CFG_E3_MOD_ABS_MASK) >> + PORT_HW_CFG_E3_MOD_ABS_SHIFT; + + /* Should not happen. This function called upon interrupt + * triggered by GPIO ( since EPIO can only generate interrupts + * to MCP). + * So if this function was called and none of the GPIOs was set, + * it means the shit hit the fan. + */ + if ((cfg_pin < PIN_CFG_GPIO0_P0) || + (cfg_pin > PIN_CFG_GPIO3_P1)) { + DP(NETIF_MSG_LINK, + "No cfg pin %x for module detect indication\n", + cfg_pin); + return -EINVAL; + } + + *gpio_num = (cfg_pin - PIN_CFG_GPIO0_P0) & 0x3; + *gpio_port = (cfg_pin - PIN_CFG_GPIO0_P0) >> 2; + } else { + *gpio_num = MISC_REGISTERS_GPIO_3; + *gpio_port = port; + } + + return 0; +} + +static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u8 gpio_num, gpio_port; + u32 gpio_val; + if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, + params->shmem_base, params->port, + &gpio_num, &gpio_port) != 0) + return 0; + gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port); + + /* Call the handling function in case module is detected */ + if (gpio_val == 0) + return 1; + else + return 0; +} +static int bnx2x_warpcore_get_sigdet(struct bnx2x_phy *phy, + struct link_params *params) +{ + u16 gp2_status_reg0, lane; + struct bnx2x *bp = params->bp; + + lane = bnx2x_get_warpcore_lane(phy, params); + + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_0, + &gp2_status_reg0); + + return (gp2_status_reg0 >> (8+lane)) & 0x1; +} + +static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u32 serdes_net_if; + u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0; + + vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1; + + if (!vars->turn_to_run_wc_rt) + return; + + if (vars->rx_tx_asic_rst) { + u16 lane = bnx2x_get_warpcore_lane(phy, params); + serdes_net_if = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port].default_cfg)) & + PORT_HW_CFG_NET_SERDES_IF_MASK); + + switch (serdes_net_if) { + case PORT_HW_CFG_NET_SERDES_IF_KR: + /* Do we get link yet? */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 0x81d1, + &gp_status1); + lnkup = (gp_status1 >> (8+lane)) & 0x1;/* 1G */ + /*10G KR*/ + lnkup_kr = (gp_status1 >> (12+lane)) & 0x1; + + if (lnkup_kr || lnkup) { + vars->rx_tx_asic_rst = 0; + } else { + /* Reset the lane to see if link comes up.*/ + bnx2x_warpcore_reset_lane(bp, phy, 1); + bnx2x_warpcore_reset_lane(bp, phy, 0); + + /* Restart Autoneg */ + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200); + + vars->rx_tx_asic_rst--; + DP(NETIF_MSG_LINK, "0x%x retry left\n", + vars->rx_tx_asic_rst); + } + break; + + default: + break; + } + + } /*params->rx_tx_asic_rst*/ + +} +static void bnx2x_warpcore_config_sfi(struct bnx2x_phy *phy, + struct link_params *params) +{ + u16 lane = bnx2x_get_warpcore_lane(phy, params); + struct bnx2x *bp = params->bp; + bnx2x_warpcore_clear_regs(phy, params, lane); + if ((params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] == + SPEED_10000) && + (phy->media_type != ETH_PHY_SFP_1G_FIBER)) { + DP(NETIF_MSG_LINK, "Setting 10G SFI\n"); + bnx2x_warpcore_set_10G_XFI(phy, params, 0); + } else { + DP(NETIF_MSG_LINK, "Setting 1G Fiber\n"); + bnx2x_warpcore_set_sgmii_speed(phy, params, 1, 0); + } +} + +static void bnx2x_sfp_e3_set_transmitter(struct link_params *params, + struct bnx2x_phy *phy, + u8 tx_en) +{ + struct bnx2x *bp = params->bp; + u32 cfg_pin; + u8 port = params->port; + + cfg_pin = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_sfp_ctrl)) & + PORT_HW_CFG_E3_TX_LASER_MASK; + /* Set the !tx_en since this pin is DISABLE_TX_LASER */ + DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en); + + /* For 20G, the expected pin to be used is 3 pins after the current */ + bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1); + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G) + bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1); +} + +static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u32 serdes_net_if; + u8 fiber_mode; + u16 lane = bnx2x_get_warpcore_lane(phy, params); + serdes_net_if = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port].default_cfg)) & + PORT_HW_CFG_NET_SERDES_IF_MASK); + DP(NETIF_MSG_LINK, "Begin Warpcore init, link_speed %d, " + "serdes_net_if = 0x%x\n", + vars->line_speed, serdes_net_if); + bnx2x_set_aer_mmd(params, phy); + bnx2x_warpcore_reset_lane(bp, phy, 1); + vars->phy_flags |= PHY_XGXS_FLAG; + if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) || + (phy->req_line_speed && + ((phy->req_line_speed == SPEED_100) || + (phy->req_line_speed == SPEED_10)))) { + vars->phy_flags |= PHY_SGMII_FLAG; + DP(NETIF_MSG_LINK, "Setting SGMII mode\n"); + bnx2x_warpcore_clear_regs(phy, params, lane); + bnx2x_warpcore_set_sgmii_speed(phy, params, 0, 1); + } else { + switch (serdes_net_if) { + case PORT_HW_CFG_NET_SERDES_IF_KR: + /* Enable KR Auto Neg */ + if (params->loopback_mode != LOOPBACK_EXT) + bnx2x_warpcore_enable_AN_KR(phy, params, vars); + else { + DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n"); + bnx2x_warpcore_set_10G_KR(phy, params, vars); + } + break; + + case PORT_HW_CFG_NET_SERDES_IF_XFI: + bnx2x_warpcore_clear_regs(phy, params, lane); + if (vars->line_speed == SPEED_10000) { + DP(NETIF_MSG_LINK, "Setting 10G XFI\n"); + bnx2x_warpcore_set_10G_XFI(phy, params, 1); + } else { + if (SINGLE_MEDIA_DIRECT(params)) { + DP(NETIF_MSG_LINK, "1G Fiber\n"); + fiber_mode = 1; + } else { + DP(NETIF_MSG_LINK, "10/100/1G SGMII\n"); + fiber_mode = 0; + } + bnx2x_warpcore_set_sgmii_speed(phy, + params, + fiber_mode, + 0); + } + + break; + + case PORT_HW_CFG_NET_SERDES_IF_SFI: + /* Issue Module detection if module is plugged, or + * enabled transmitter to avoid current leakage in case + * no module is connected + */ + if ((params->loopback_mode == LOOPBACK_NONE) || + (params->loopback_mode == LOOPBACK_EXT)) { + if (bnx2x_is_sfp_module_plugged(phy, params)) + bnx2x_sfp_module_detection(phy, params); + else + bnx2x_sfp_e3_set_transmitter(params, + phy, 1); + } + + bnx2x_warpcore_config_sfi(phy, params); + break; + + case PORT_HW_CFG_NET_SERDES_IF_DXGXS: + if (vars->line_speed != SPEED_20000) { + DP(NETIF_MSG_LINK, "Speed not supported yet\n"); + return; + } + DP(NETIF_MSG_LINK, "Setting 20G DXGXS\n"); + bnx2x_warpcore_set_20G_DXGXS(bp, phy, lane); + /* Issue Module detection */ + + bnx2x_sfp_module_detection(phy, params); + break; + case PORT_HW_CFG_NET_SERDES_IF_KR2: + if (!params->loopback_mode) { + bnx2x_warpcore_enable_AN_KR(phy, params, vars); + } else { + DP(NETIF_MSG_LINK, "Setting KR 20G-Force\n"); + bnx2x_warpcore_set_20G_force_KR2(phy, params); + } + break; + default: + DP(NETIF_MSG_LINK, + "Unsupported Serdes Net Interface 0x%x\n", + serdes_net_if); + return; + } + } + + /* Take lane out of reset after configuration is finished */ + bnx2x_warpcore_reset_lane(bp, phy, 0); + DP(NETIF_MSG_LINK, "Exit config init\n"); +} + +static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 val16, lane; + bnx2x_sfp_e3_set_transmitter(params, phy, 0); + bnx2x_set_mdio_emac_per_phy(bp, params); + bnx2x_set_aer_mmd(params, phy); + /* Global register */ + bnx2x_warpcore_reset_lane(bp, phy, 1); + + /* Clear loopback settings (if any) */ + /* 10G & 20G */ + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0xBFFF); + + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0xfffe); + + /* Update those 1-copy registers */ + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + /* Enable 1G MDIO (1-copy) */ + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, + ~0x10); + + bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL2, 0xff00); + lane = bnx2x_get_warpcore_lane(phy, params); + /* Disable CL36 PCS Tx */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16); + val16 |= (0x11 << lane); + if (phy->flags & FLAGS_WC_DUAL_MODE) + val16 |= (0x22 << lane); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16); + + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16); + val16 &= ~(0x0303 << (lane << 1)); + val16 |= (0x0101 << (lane << 1)); + if (phy->flags & FLAGS_WC_DUAL_MODE) { + val16 &= ~(0x0c0c << (lane << 1)); + val16 |= (0x0404 << (lane << 1)); + } + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16); + /* Restore AER */ + bnx2x_set_aer_mmd(params, phy); + +} + +static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 val16; + u32 lane; + DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n", + params->loopback_mode, phy->req_line_speed); + + if (phy->req_line_speed < SPEED_10000 || + phy->supported & SUPPORTED_20000baseKR2_Full) { + /* 10/100/1000/20G-KR2 */ + + /* Update those 1-copy registers */ + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + /* Enable 1G MDIO (1-copy) */ + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, + 0x10); + /* Set 1G loopback based on lane (1-copy) */ + lane = bnx2x_get_warpcore_lane(phy, params); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16); + val16 |= (1<flags & FLAGS_WC_DUAL_MODE) + val16 |= (2<bp; + u8 link_10g_plus; + if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) + vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; + vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); + if (vars->link_up) { + DP(NETIF_MSG_LINK, "phy link up\n"); + + vars->phy_link_up = 1; + vars->duplex = DUPLEX_FULL; + switch (vars->link_status & + LINK_STATUS_SPEED_AND_DUPLEX_MASK) { + case LINK_10THD: + vars->duplex = DUPLEX_HALF; + /* Fall thru */ + case LINK_10TFD: + vars->line_speed = SPEED_10; + break; + + case LINK_100TXHD: + vars->duplex = DUPLEX_HALF; + /* Fall thru */ + case LINK_100T4: + case LINK_100TXFD: + vars->line_speed = SPEED_100; + break; + + case LINK_1000THD: + vars->duplex = DUPLEX_HALF; + /* Fall thru */ + case LINK_1000TFD: + vars->line_speed = SPEED_1000; + break; + + case LINK_2500THD: + vars->duplex = DUPLEX_HALF; + /* Fall thru */ + case LINK_2500TFD: + vars->line_speed = SPEED_2500; + break; + + case LINK_10GTFD: + vars->line_speed = SPEED_10000; + break; + case LINK_20GTFD: + vars->line_speed = SPEED_20000; + break; + default: + break; + } + vars->flow_ctrl = 0; + if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED) + vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX; + + if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED) + vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX; + + if (!vars->flow_ctrl) + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + + if (vars->line_speed && + ((vars->line_speed == SPEED_10) || + (vars->line_speed == SPEED_100))) { + vars->phy_flags |= PHY_SGMII_FLAG; + } else { + vars->phy_flags &= ~PHY_SGMII_FLAG; + } + if (vars->line_speed && + USES_WARPCORE(bp) && + (vars->line_speed == SPEED_1000)) + vars->phy_flags |= PHY_SGMII_FLAG; + /* Anything 10 and over uses the bmac */ + link_10g_plus = (vars->line_speed >= SPEED_10000); + + if (link_10g_plus) { + if (USES_WARPCORE(bp)) + vars->mac_type = MAC_TYPE_XMAC; + else + vars->mac_type = MAC_TYPE_BMAC; + } else { + if (USES_WARPCORE(bp)) + vars->mac_type = MAC_TYPE_UMAC; + else + vars->mac_type = MAC_TYPE_EMAC; + } + } else { /* Link down */ + DP(NETIF_MSG_LINK, "phy link down\n"); + + vars->phy_link_up = 0; + + vars->line_speed = 0; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + + /* Indicate no mac active */ + vars->mac_type = MAC_TYPE_NONE; + if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) + vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; + if (vars->link_status & LINK_STATUS_SFP_TX_FAULT) + vars->phy_flags |= PHY_SFP_TX_FAULT_FLAG; + } +} + +void bnx2x_link_status_update(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 port = params->port; + u32 sync_offset, media_types; + /* Update PHY configuration */ + set_phy_vars(params, vars); + + vars->link_status = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + port_mb[port].link_status)); + + /* Force link UP in non LOOPBACK_EXT loopback mode(s) */ + if (params->loopback_mode != LOOPBACK_NONE && + params->loopback_mode != LOOPBACK_EXT) + vars->link_status |= LINK_STATUS_LINK_UP; + + if (bnx2x_eee_has_cap(params)) + vars->eee_status = REG_RD(bp, params->shmem2_base + + offsetof(struct shmem2_region, + eee_status[params->port])); + + vars->phy_flags = PHY_XGXS_FLAG; + bnx2x_sync_link(params, vars); + /* Sync media type */ + sync_offset = params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].media_type); + media_types = REG_RD(bp, sync_offset); + + params->phy[INT_PHY].media_type = + (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >> + PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT; + params->phy[EXT_PHY1].media_type = + (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >> + PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT; + params->phy[EXT_PHY2].media_type = + (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >> + PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT; + DP(NETIF_MSG_LINK, "media_types = 0x%x\n", media_types); + + /* Sync AEU offset */ + sync_offset = params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].aeu_int_mask); + + vars->aeu_int_mask = REG_RD(bp, sync_offset); + + /* Sync PFC status */ + if (vars->link_status & LINK_STATUS_PFC_ENABLED) + params->feature_config_flags |= + FEATURE_CONFIG_PFC_ENABLED; + else + params->feature_config_flags &= + ~FEATURE_CONFIG_PFC_ENABLED; + + if (SHMEM2_HAS(bp, link_attr_sync)) + params->link_attr_sync = SHMEM2_RD(bp, + link_attr_sync[params->port]); + + DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n", + vars->link_status, vars->phy_link_up, vars->aeu_int_mask); + DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n", + vars->line_speed, vars->duplex, vars->flow_ctrl); +} + +static void bnx2x_set_master_ln(struct link_params *params, + struct bnx2x_phy *phy) +{ + struct bnx2x *bp = params->bp; + u16 new_master_ln, ser_lane; + ser_lane = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + + /* Set the master_ln for AN */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_TEST_MODE_LANE, + &new_master_ln); + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2 , + MDIO_XGXS_BLOCK2_TEST_MODE_LANE, + (new_master_ln | ser_lane)); +} + +static int bnx2x_reset_unicore(struct link_params *params, + struct bnx2x_phy *phy, + u8 set_serdes) +{ + struct bnx2x *bp = params->bp; + u16 mii_control; + u16 i; + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control); + + /* Reset the unicore */ + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + (mii_control | + MDIO_COMBO_IEEO_MII_CONTROL_RESET)); + if (set_serdes) + bnx2x_set_serdes_access(bp, params->port); + + /* Wait for the reset to self clear */ + for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) { + udelay(5); + + /* The reset erased the previous bank value */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + &mii_control); + + if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) { + udelay(5); + return 0; + } + } + + netdev_err(bp->dev, "Warning: PHY was not initialized," + " Port %d\n", + params->port); + DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n"); + return -EINVAL; + +} + +static void bnx2x_set_swap_lanes(struct link_params *params, + struct bnx2x_phy *phy) +{ + struct bnx2x *bp = params->bp; + /* Each two bits represents a lane number: + * No swap is 0123 => 0x1b no need to enable the swap + */ + u16 rx_lane_swap, tx_lane_swap; + + rx_lane_swap = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT); + tx_lane_swap = ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT); + + if (rx_lane_swap != 0x1b) { + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_RX_LN_SWAP, + (rx_lane_swap | + MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE | + MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE)); + } else { + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0); + } + + if (tx_lane_swap != 0x1b) { + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_TX_LN_SWAP, + (tx_lane_swap | + MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE)); + } else { + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0); + } +} + +static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 control2; + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, + &control2); + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) + control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; + else + control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; + DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n", + phy->speed_cap_mask, control2); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, + control2); + + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { + DP(NETIF_MSG_LINK, "XGXS\n"); + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT); + + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, + &control2); + + + control2 |= + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN; + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, + control2); + + /* Disable parallel detection of HiG */ + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_UNICORE_MODE_10G, + MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS | + MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS); + } +} + +static void bnx2x_set_autoneg(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars, + u8 enable_cl73) +{ + struct bnx2x *bp = params->bp; + u16 reg_val; + + /* CL37 Autoneg */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); + + /* CL37 Autoneg Enabled */ + if (vars->line_speed == SPEED_AUTO_NEG) + reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN; + else /* CL37 Autoneg Disabled */ + reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | + MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN); + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); + + /* Enable/Disable Autodetection */ + + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val); + reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN | + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT); + reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE; + if (vars->line_speed == SPEED_AUTO_NEG) + reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; + else + reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val); + + /* Enable TetonII and BAM autoneg */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_BAM_NEXT_PAGE, + MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, + ®_val); + if (vars->line_speed == SPEED_AUTO_NEG) { + /* Enable BAM aneg Mode and TetonII aneg Mode */ + reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | + MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); + } else { + /* TetonII and BAM Autoneg Disabled */ + reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | + MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); + } + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_BAM_NEXT_PAGE, + MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, + reg_val); + + if (enable_cl73) { + /* Enable Cl73 FSM status bits */ + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_USERB0, + MDIO_CL73_USERB0_CL73_UCTRL, + 0xe); + + /* Enable BAM Station Manager*/ + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_USERB0, + MDIO_CL73_USERB0_CL73_BAM_CTRL1, + MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN | + MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN | + MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN); + + /* Advertise CL73 link speeds */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV2, + ®_val); + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) + reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4; + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) + reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX; + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV2, + reg_val); + + /* CL73 Autoneg Enabled */ + reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN; + + } else /* CL73 Autoneg Disabled */ + reg_val = 0; + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); +} + +/* Program SerDes, forced speed */ +static void bnx2x_program_serdes(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 reg_val; + + /* Program duplex, disable autoneg and sgmii*/ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); + reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX | + MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | + MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK); + if (phy->req_duplex == DUPLEX_FULL) + reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); + + /* Program speed + * - needed only if the speed is greater than 1G (2.5G or 10G) + */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_MISC1, ®_val); + /* Clearing the speed value before setting the right speed */ + DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val); + + reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK | + MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL); + + if (!((vars->line_speed == SPEED_1000) || + (vars->line_speed == SPEED_100) || + (vars->line_speed == SPEED_10))) { + + reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M | + MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL); + if (vars->line_speed == SPEED_10000) + reg_val |= + MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4; + } + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_MISC1, reg_val); + +} + +static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 val = 0; + + /* Set extended capabilities */ + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) + val |= MDIO_OVER_1G_UP1_2_5G; + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) + val |= MDIO_OVER_1G_UP1_10G; + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_OVER_1G, + MDIO_OVER_1G_UP1, val); + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_OVER_1G, + MDIO_OVER_1G_UP3, 0x400); +} + +static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy, + struct link_params *params, + u16 ieee_fc) +{ + struct bnx2x *bp = params->bp; + u16 val; + /* For AN, we are always publishing full duplex */ + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV1, &val); + val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH; + val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV1, val); +} + +static void bnx2x_restart_autoneg(struct bnx2x_phy *phy, + struct link_params *params, + u8 enable_cl73) +{ + struct bnx2x *bp = params->bp; + u16 mii_control; + + DP(NETIF_MSG_LINK, "bnx2x_restart_autoneg\n"); + /* Enable and restart BAM/CL37 aneg */ + + if (enable_cl73) { + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + &mii_control); + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + (mii_control | + MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN | + MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN)); + } else { + + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + &mii_control); + DP(NETIF_MSG_LINK, + "bnx2x_restart_autoneg mii_control before = 0x%x\n", + mii_control); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + (mii_control | + MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | + MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN)); + } +} + +static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 control1; + + /* In SGMII mode, the unicore is always slave */ + + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, + &control1); + control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT; + /* Set sgmii mode (and not fiber) */ + control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE | + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET | + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, + control1); + + /* If forced speed */ + if (!(vars->line_speed == SPEED_AUTO_NEG)) { + /* Set speed, disable autoneg */ + u16 mii_control; + + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + &mii_control); + mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | + MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK| + MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX); + + switch (vars->line_speed) { + case SPEED_100: + mii_control |= + MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100; + break; + case SPEED_1000: + mii_control |= + MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000; + break; + case SPEED_10: + /* There is nothing to set for 10M */ + break; + default: + /* Invalid speed for SGMII */ + DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", + vars->line_speed); + break; + } + + /* Setting the full duplex */ + if (phy->req_duplex == DUPLEX_FULL) + mii_control |= + MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + mii_control); + + } else { /* AN mode */ + /* Enable and restart AN */ + bnx2x_restart_autoneg(phy, params, 0); + } +} + +/* Link management + */ +static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 pd_10g, status2_1000x; + if (phy->req_line_speed != SPEED_AUTO_NEG) + return 0; + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_STATUS2, + &status2_1000x); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_STATUS2, + &status2_1000x); + if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) { + DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n", + params->port); + return 1; + } + + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS, + &pd_10g); + + if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) { + DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n", + params->port); + return 1; + } + return 0; +} + +static void bnx2x_update_adv_fc(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars, + u32 gp_status) +{ + u16 ld_pause; /* local driver */ + u16 lp_pause; /* link partner */ + u16 pause_result; + struct bnx2x *bp = params->bp; + if ((gp_status & + (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | + MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) == + (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | + MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) { + + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV1, + &ld_pause); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_LP_ADV1, + &lp_pause); + pause_result = (ld_pause & + MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) >> 8; + pause_result |= (lp_pause & + MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK) >> 10; + DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", pause_result); + } else { + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_AUTO_NEG_ADV, + &ld_pause); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, + &lp_pause); + pause_result = (ld_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5; + pause_result |= (lp_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; + DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", pause_result); + } + bnx2x_pause_resolve(vars, pause_result); + +} + +static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars, + u32 gp_status) +{ + struct bnx2x *bp = params->bp; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + + /* Resolve from gp_status in case of AN complete and not sgmii */ + if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) { + /* Update the advertised flow-controled of LD/LP in AN */ + if (phy->req_line_speed == SPEED_AUTO_NEG) + bnx2x_update_adv_fc(phy, params, vars, gp_status); + /* But set the flow-control result as the requested one */ + vars->flow_ctrl = phy->req_flow_ctrl; + } else if (phy->req_line_speed != SPEED_AUTO_NEG) + vars->flow_ctrl = params->req_fc_auto_adv; + else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) && + (!(vars->phy_flags & PHY_SGMII_FLAG))) { + if (bnx2x_direct_parallel_detect_used(phy, params)) { + vars->flow_ctrl = params->req_fc_auto_adv; + return; + } + bnx2x_update_adv_fc(phy, params, vars, gp_status); + } + DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl); +} + +static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 rx_status, ustat_val, cl37_fsm_received; + DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n"); + /* Step 1: Make sure signal is detected */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_RX0, + MDIO_RX0_RX_STATUS, + &rx_status); + if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) != + (MDIO_RX0_RX_STATUS_SIGDET)) { + DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73." + "rx_status(0x80b0) = 0x%x\n", rx_status); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN); + return; + } + /* Step 2: Check CL73 state machine */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_USERB0, + MDIO_CL73_USERB0_CL73_USTAT1, + &ustat_val); + if ((ustat_val & + (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK | + MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) != + (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK | + MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) { + DP(NETIF_MSG_LINK, "CL73 state-machine is not stable. " + "ustat_val(0x8371) = 0x%x\n", ustat_val); + return; + } + /* Step 3: Check CL37 Message Pages received to indicate LP + * supports only CL37 + */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_REMOTE_PHY, + MDIO_REMOTE_PHY_MISC_RX_STATUS, + &cl37_fsm_received); + if ((cl37_fsm_received & + (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | + MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) != + (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | + MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) { + DP(NETIF_MSG_LINK, "No CL37 FSM were received. " + "misc_rx_status(0x8330) = 0x%x\n", + cl37_fsm_received); + return; + } + /* The combined cl37/cl73 fsm state information indicating that + * we are connected to a device which does not support cl73, but + * does support cl37 BAM. In this case we disable cl73 and + * restart cl37 auto-neg + */ + + /* Disable CL73 */ + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + 0); + /* Restart CL37 autoneg */ + bnx2x_restart_autoneg(phy, params, 0); + DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n"); +} + +static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars, + u32 gp_status) +{ + if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) + vars->link_status |= + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + + if (bnx2x_direct_parallel_detect_used(phy, params)) + vars->link_status |= + LINK_STATUS_PARALLEL_DETECTION_USED; +} +static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars, + u16 is_link_up, + u16 speed_mask, + u16 is_duplex) +{ + struct bnx2x *bp = params->bp; + if (phy->req_line_speed == SPEED_AUTO_NEG) + vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; + if (is_link_up) { + DP(NETIF_MSG_LINK, "phy link up\n"); + + vars->phy_link_up = 1; + vars->link_status |= LINK_STATUS_LINK_UP; + + switch (speed_mask) { + case GP_STATUS_10M: + vars->line_speed = SPEED_10; + if (is_duplex == DUPLEX_FULL) + vars->link_status |= LINK_10TFD; + else + vars->link_status |= LINK_10THD; + break; + + case GP_STATUS_100M: + vars->line_speed = SPEED_100; + if (is_duplex == DUPLEX_FULL) + vars->link_status |= LINK_100TXFD; + else + vars->link_status |= LINK_100TXHD; + break; + + case GP_STATUS_1G: + case GP_STATUS_1G_KX: + vars->line_speed = SPEED_1000; + if (is_duplex == DUPLEX_FULL) + vars->link_status |= LINK_1000TFD; + else + vars->link_status |= LINK_1000THD; + break; + + case GP_STATUS_2_5G: + vars->line_speed = SPEED_2500; + if (is_duplex == DUPLEX_FULL) + vars->link_status |= LINK_2500TFD; + else + vars->link_status |= LINK_2500THD; + break; + + case GP_STATUS_5G: + case GP_STATUS_6G: + DP(NETIF_MSG_LINK, + "link speed unsupported gp_status 0x%x\n", + speed_mask); + return -EINVAL; + + case GP_STATUS_10G_KX4: + case GP_STATUS_10G_HIG: + case GP_STATUS_10G_CX4: + case GP_STATUS_10G_KR: + case GP_STATUS_10G_SFI: + case GP_STATUS_10G_XFI: + vars->line_speed = SPEED_10000; + vars->link_status |= LINK_10GTFD; + break; + case GP_STATUS_20G_DXGXS: + case GP_STATUS_20G_KR2: + vars->line_speed = SPEED_20000; + vars->link_status |= LINK_20GTFD; + break; + default: + DP(NETIF_MSG_LINK, + "link speed unsupported gp_status 0x%x\n", + speed_mask); + return -EINVAL; + } + } else { /* link_down */ + DP(NETIF_MSG_LINK, "phy link down\n"); + + vars->phy_link_up = 0; + + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->mac_type = MAC_TYPE_NONE; + } + DP(NETIF_MSG_LINK, " phy_link_up %x line_speed %d\n", + vars->phy_link_up, vars->line_speed); + return 0; +} + +static int bnx2x_link_settings_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + + u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask; + int rc = 0; + + /* Read gp_status */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_GP_STATUS, + MDIO_GP_STATUS_TOP_AN_STATUS1, + &gp_status); + if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS) + duplex = DUPLEX_FULL; + if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) + link_up = 1; + speed_mask = gp_status & GP_STATUS_SPEED_MASK; + DP(NETIF_MSG_LINK, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x\n", + gp_status, link_up, speed_mask); + rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, speed_mask, + duplex); + if (rc == -EINVAL) + return rc; + + if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) { + if (SINGLE_MEDIA_DIRECT(params)) { + vars->duplex = duplex; + bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status); + if (phy->req_line_speed == SPEED_AUTO_NEG) + bnx2x_xgxs_an_resolve(phy, params, vars, + gp_status); + } + } else { /* Link_down */ + if ((phy->req_line_speed == SPEED_AUTO_NEG) && + SINGLE_MEDIA_DIRECT(params)) { + /* Check signal is detected */ + bnx2x_check_fallback_to_cl37(phy, params); + } + } + + /* Read LP advertised speeds*/ + if (SINGLE_MEDIA_DIRECT(params) && + (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)) { + u16 val; + + CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_LP_ADV2, &val); + + if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 | + MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + + CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_OVER_1G, + MDIO_OVER_1G_LP_UP1, &val); + + if (val & MDIO_OVER_1G_UP1_2_5G) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE; + if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + } + + DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", + vars->duplex, vars->flow_ctrl, vars->link_status); + return rc; +} + +static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 lane; + u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL; + int rc = 0; + lane = bnx2x_get_warpcore_lane(phy, params); + /* Read gp_status */ + if ((params->loopback_mode) && + (phy->flags & FLAGS_WC_DUAL_MODE)) { + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up); + link_up &= 0x1; + } else if ((phy->req_line_speed > SPEED_10000) && + (phy->supported & SUPPORTED_20000baseMLD2_Full)) { + u16 temp_link_up; + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + 1, &temp_link_up); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + 1, &link_up); + DP(NETIF_MSG_LINK, "PCS RX link status = 0x%x-->0x%x\n", + temp_link_up, link_up); + link_up &= (1<<2); + if (link_up) + bnx2x_ext_phy_resolve_fc(phy, params, vars); + } else { + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_1, + &gp_status1); + DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1); + /* Check for either KR, 1G, or AN up. */ + link_up = ((gp_status1 >> 8) | + (gp_status1 >> 12) | + (gp_status1)) & + (1 << lane); + if (phy->supported & SUPPORTED_20000baseKR2_Full) { + u16 an_link; + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_STATUS, &an_link); + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_STATUS, &an_link); + link_up |= (an_link & (1<<2)); + } + if (link_up && SINGLE_MEDIA_DIRECT(params)) { + u16 pd, gp_status4; + if (phy->req_line_speed == SPEED_AUTO_NEG) { + /* Check Autoneg complete */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_4, + &gp_status4); + if (gp_status4 & ((1<<12)<link_status |= + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + + /* Check parallel detect used */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_PAR_DET_10G_STATUS, + &pd); + if (pd & (1<<15)) + vars->link_status |= + LINK_STATUS_PARALLEL_DETECTION_USED; + } + bnx2x_ext_phy_resolve_fc(phy, params, vars); + vars->duplex = duplex; + } + } + + if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) && + SINGLE_MEDIA_DIRECT(params)) { + u16 val; + + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG2, &val); + + if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 | + MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL3_LP_UP1, &val); + + if (val & MDIO_OVER_1G_UP1_2_5G) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE; + if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + + } + + + if (lane < 2) { + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed); + } else { + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed); + } + DP(NETIF_MSG_LINK, "lane %d gp_speed 0x%x\n", lane, gp_speed); + + if ((lane & 1) == 0) + gp_speed <<= 8; + gp_speed &= 0x3f00; + link_up = !!link_up; + + rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed, + duplex); + + /* In case of KR link down, start up the recovering procedure */ + if ((!link_up) && (phy->media_type == ETH_PHY_KR) && + (!(phy->flags & FLAGS_WC_DUAL_MODE))) + vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; + + DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", + vars->duplex, vars->flow_ctrl, vars->link_status); + return rc; +} +static void bnx2x_set_gmii_tx_driver(struct link_params *params) +{ + struct bnx2x *bp = params->bp; + struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; + u16 lp_up2; + u16 tx_driver; + u16 bank; + + /* Read precomp */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_OVER_1G, + MDIO_OVER_1G_LP_UP2, &lp_up2); + + /* Bits [10:7] at lp_up2, positioned at [15:12] */ + lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >> + MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) << + MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT); + + if (lp_up2 == 0) + return; + + for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3; + bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) { + CL22_RD_OVER_CL45(bp, phy, + bank, + MDIO_TX0_TX_DRIVER, &tx_driver); + + /* Replace tx_driver bits [15:12] */ + if (lp_up2 != + (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) { + tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK; + tx_driver |= lp_up2; + CL22_WR_OVER_CL45(bp, phy, + bank, + MDIO_TX0_TX_DRIVER, tx_driver); + } + } +} + +static int bnx2x_emac_program(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 port = params->port; + u16 mode = 0; + + DP(NETIF_MSG_LINK, "setting link speed & duplex\n"); + bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + + EMAC_REG_EMAC_MODE, + (EMAC_MODE_25G_MODE | + EMAC_MODE_PORT_MII_10M | + EMAC_MODE_HALF_DUPLEX)); + switch (vars->line_speed) { + case SPEED_10: + mode |= EMAC_MODE_PORT_MII_10M; + break; + + case SPEED_100: + mode |= EMAC_MODE_PORT_MII; + break; + + case SPEED_1000: + mode |= EMAC_MODE_PORT_GMII; + break; + + case SPEED_2500: + mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII); + break; + + default: + /* 10G not valid for EMAC */ + DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", + vars->line_speed); + return -EINVAL; + } + + if (vars->duplex == DUPLEX_HALF) + mode |= EMAC_MODE_HALF_DUPLEX; + bnx2x_bits_en(bp, + GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE, + mode); + + bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); + return 0; +} + +static void bnx2x_set_preemphasis(struct bnx2x_phy *phy, + struct link_params *params) +{ + + u16 bank, i = 0; + struct bnx2x *bp = params->bp; + + for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3; + bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) { + CL22_WR_OVER_CL45(bp, phy, + bank, + MDIO_RX0_RX_EQ_BOOST, + phy->rx_preemphasis[i]); + } + + for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3; + bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) { + CL22_WR_OVER_CL45(bp, phy, + bank, + MDIO_TX0_TX_DRIVER, + phy->tx_preemphasis[i]); + } +} + +static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) || + (params->loopback_mode == LOOPBACK_XGXS)); + if (!(vars->phy_flags & PHY_SGMII_FLAG)) { + if (SINGLE_MEDIA_DIRECT(params) && + (params->feature_config_flags & + FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) + bnx2x_set_preemphasis(phy, params); + + /* Forced speed requested? */ + if (vars->line_speed != SPEED_AUTO_NEG || + (SINGLE_MEDIA_DIRECT(params) && + params->loopback_mode == LOOPBACK_EXT)) { + DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); + + /* Disable autoneg */ + bnx2x_set_autoneg(phy, params, vars, 0); + + /* Program speed and duplex */ + bnx2x_program_serdes(phy, params, vars); + + } else { /* AN_mode */ + DP(NETIF_MSG_LINK, "not SGMII, AN\n"); + + /* AN enabled */ + bnx2x_set_brcm_cl37_advertisement(phy, params); + + /* Program duplex & pause advertisement (for aneg) */ + bnx2x_set_ieee_aneg_advertisement(phy, params, + vars->ieee_fc); + + /* Enable autoneg */ + bnx2x_set_autoneg(phy, params, vars, enable_cl73); + + /* Enable and restart AN */ + bnx2x_restart_autoneg(phy, params, enable_cl73); + } + + } else { /* SGMII mode */ + DP(NETIF_MSG_LINK, "SGMII\n"); + + bnx2x_initialize_sgmii_process(phy, params, vars); + } +} + +static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + int rc; + vars->phy_flags |= PHY_XGXS_FLAG; + if ((phy->req_line_speed && + ((phy->req_line_speed == SPEED_100) || + (phy->req_line_speed == SPEED_10))) || + (!phy->req_line_speed && + (phy->speed_cap_mask >= + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && + (phy->speed_cap_mask < + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (phy->type == PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD)) + vars->phy_flags |= PHY_SGMII_FLAG; + else + vars->phy_flags &= ~PHY_SGMII_FLAG; + + bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); + bnx2x_set_aer_mmd(params, phy); + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) + bnx2x_set_master_ln(params, phy); + + rc = bnx2x_reset_unicore(params, phy, 0); + /* Reset the SerDes and wait for reset bit return low */ + if (rc) + return rc; + + bnx2x_set_aer_mmd(params, phy); + /* Setting the masterLn_def again after the reset */ + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) { + bnx2x_set_master_ln(params, phy); + bnx2x_set_swap_lanes(params, phy); + } + + return rc; +} + +static u16 bnx2x_wait_reset_complete(struct bnx2x *bp, + struct bnx2x_phy *phy, + struct link_params *params) +{ + u16 cnt, ctrl; + /* Wait for soft reset to get cleared up to 1 sec */ + for (cnt = 0; cnt < 1000; cnt++) { + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) + bnx2x_cl22_read(bp, phy, + MDIO_PMA_REG_CTRL, &ctrl); + else + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, &ctrl); + if (!(ctrl & (1<<15))) + break; + usleep_range(1000, 2000); + } + + if (cnt == 1000) + netdev_err(bp->dev, "Warning: PHY was not initialized," + " Port %d\n", + params->port); + DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt); + return cnt; +} + +static void bnx2x_link_int_enable(struct link_params *params) +{ + u8 port = params->port; + u32 mask; + struct bnx2x *bp = params->bp; + + /* Setting the status to report on link up for either XGXS or SerDes */ + if (CHIP_IS_E3(bp)) { + mask = NIG_MASK_XGXS0_LINK_STATUS; + if (!(SINGLE_MEDIA_DIRECT(params))) + mask |= NIG_MASK_MI_INT; + } else if (params->switch_cfg == SWITCH_CFG_10G) { + mask = (NIG_MASK_XGXS0_LINK10G | + NIG_MASK_XGXS0_LINK_STATUS); + DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n"); + if (!(SINGLE_MEDIA_DIRECT(params)) && + params->phy[INT_PHY].type != + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) { + mask |= NIG_MASK_MI_INT; + DP(NETIF_MSG_LINK, "enabled external phy int\n"); + } + + } else { /* SerDes */ + mask = NIG_MASK_SERDES0_LINK_STATUS; + DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n"); + if (!(SINGLE_MEDIA_DIRECT(params)) && + params->phy[INT_PHY].type != + PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) { + mask |= NIG_MASK_MI_INT; + DP(NETIF_MSG_LINK, "enabled external phy int\n"); + } + } + bnx2x_bits_en(bp, + NIG_REG_MASK_INTERRUPT_PORT0 + port*4, + mask); + + DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port, + (params->switch_cfg == SWITCH_CFG_10G), + REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); + DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n", + REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), + REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18), + REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c)); + DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n", + REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), + REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); +} + +static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port, + u8 exp_mi_int) +{ + u32 latch_status = 0; + + /* Disable the MI INT ( external phy int ) by writing 1 to the + * status register. Link down indication is high-active-signal, + * so in this case we need to write the status to clear the XOR + */ + /* Read Latched signals */ + latch_status = REG_RD(bp, + NIG_REG_LATCH_STATUS_0 + port*8); + DP(NETIF_MSG_LINK, "latch_status = 0x%x\n", latch_status); + /* Handle only those with latched-signal=up.*/ + if (exp_mi_int) + bnx2x_bits_en(bp, + NIG_REG_STATUS_INTERRUPT_PORT0 + + port*4, + NIG_STATUS_EMAC0_MI_INT); + else + bnx2x_bits_dis(bp, + NIG_REG_STATUS_INTERRUPT_PORT0 + + port*4, + NIG_STATUS_EMAC0_MI_INT); + + if (latch_status & 1) { + + /* For all latched-signal=up : Re-Arm Latch signals */ + REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8, + (latch_status & 0xfffe) | (latch_status & 1)); + } + /* For all latched-signal=up,Write original_signal to status */ +} + +static void bnx2x_link_int_ack(struct link_params *params, + struct link_vars *vars, u8 is_10g_plus) +{ + struct bnx2x *bp = params->bp; + u8 port = params->port; + u32 mask; + /* First reset all status we assume only one line will be + * change at a time + */ + bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, + (NIG_STATUS_XGXS0_LINK10G | + NIG_STATUS_XGXS0_LINK_STATUS | + NIG_STATUS_SERDES0_LINK_STATUS)); + if (vars->phy_link_up) { + if (USES_WARPCORE(bp)) + mask = NIG_STATUS_XGXS0_LINK_STATUS; + else { + if (is_10g_plus) + mask = NIG_STATUS_XGXS0_LINK10G; + else if (params->switch_cfg == SWITCH_CFG_10G) { + /* Disable the link interrupt by writing 1 to + * the relevant lane in the status register + */ + u32 ser_lane = + ((params->lane_config & + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + mask = ((1 << ser_lane) << + NIG_STATUS_XGXS0_LINK_STATUS_SIZE); + } else + mask = NIG_STATUS_SERDES0_LINK_STATUS; + } + DP(NETIF_MSG_LINK, "Ack link up interrupt with mask 0x%x\n", + mask); + bnx2x_bits_en(bp, + NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, + mask); + } +} + +static int bnx2x_format_ver(u32 num, u8 *str, u16 *len) +{ + u8 *str_ptr = str; + u32 mask = 0xf0000000; + u8 shift = 8*4; + u8 digit; + u8 remove_leading_zeros = 1; + if (*len < 10) { + /* Need more than 10chars for this format */ + *str_ptr = '\0'; + (*len)--; + return -EINVAL; + } + while (shift > 0) { + + shift -= 4; + digit = ((num & mask) >> shift); + if (digit == 0 && remove_leading_zeros) { + mask = mask >> 4; + continue; + } else if (digit < 0xa) + *str_ptr = digit + '0'; + else + *str_ptr = digit - 0xa + 'a'; + remove_leading_zeros = 0; + str_ptr++; + (*len)--; + mask = mask >> 4; + if (shift == 4*4) { + *str_ptr = '.'; + str_ptr++; + (*len)--; + remove_leading_zeros = 1; + } + } + return 0; +} + + +static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len) +{ + str[0] = '\0'; + (*len)--; + return 0; +} + +int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version, + u16 len) +{ + struct bnx2x *bp; + u32 spirom_ver = 0; + int status = 0; + u8 *ver_p = version; + u16 remain_len = len; + if (version == NULL || params == NULL) + return -EINVAL; + bp = params->bp; + + /* Extract first external phy*/ + version[0] = '\0'; + spirom_ver = REG_RD(bp, params->phy[EXT_PHY1].ver_addr); + + if (params->phy[EXT_PHY1].format_fw_ver) { + status |= params->phy[EXT_PHY1].format_fw_ver(spirom_ver, + ver_p, + &remain_len); + ver_p += (len - remain_len); + } + if ((params->num_phys == MAX_PHYS) && + (params->phy[EXT_PHY2].ver_addr != 0)) { + spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr); + if (params->phy[EXT_PHY2].format_fw_ver) { + *ver_p = '/'; + ver_p++; + remain_len--; + status |= params->phy[EXT_PHY2].format_fw_ver( + spirom_ver, + ver_p, + &remain_len); + ver_p = version + (len - remain_len); + } + } + *ver_p = '\0'; + return status; +} + +static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy, + struct link_params *params) +{ + u8 port = params->port; + struct bnx2x *bp = params->bp; + + if (phy->req_line_speed != SPEED_1000) { + u32 md_devad = 0; + + DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n"); + + if (!CHIP_IS_E3(bp)) { + /* Change the uni_phy_addr in the nig */ + md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + + port*0x18)); + + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, + 0x5); + } + + bnx2x_cl45_write(bp, phy, + 5, + (MDIO_REG_BANK_AER_BLOCK + + (MDIO_AER_BLOCK_AER_REG & 0xf)), + 0x2800); + + bnx2x_cl45_write(bp, phy, + 5, + (MDIO_REG_BANK_CL73_IEEEB0 + + (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), + 0x6041); + msleep(200); + /* Set aer mmd back */ + bnx2x_set_aer_mmd(params, phy); + + if (!CHIP_IS_E3(bp)) { + /* And md_devad */ + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, + md_devad); + } + } else { + u16 mii_ctrl; + DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n"); + bnx2x_cl45_read(bp, phy, 5, + (MDIO_REG_BANK_COMBO_IEEE0 + + (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)), + &mii_ctrl); + bnx2x_cl45_write(bp, phy, 5, + (MDIO_REG_BANK_COMBO_IEEE0 + + (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)), + mii_ctrl | + MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK); + } +} + +int bnx2x_set_led(struct link_params *params, + struct link_vars *vars, u8 mode, u32 speed) +{ + u8 port = params->port; + u16 hw_led_mode = params->hw_led_mode; + int rc = 0; + u8 phy_idx; + u32 tmp; + u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode); + DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n", + speed, hw_led_mode); + /* In case */ + for (phy_idx = EXT_PHY1; phy_idx < MAX_PHYS; phy_idx++) { + if (params->phy[phy_idx].set_link_led) { + params->phy[phy_idx].set_link_led( + ¶ms->phy[phy_idx], params, mode); + } + } + + switch (mode) { + case LED_MODE_FRONT_PANEL_OFF: + case LED_MODE_OFF: + REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0); + REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, + SHARED_HW_CFG_LED_MAC1); + + tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); + if (params->phy[EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) + tmp &= ~(EMAC_LED_1000MB_OVERRIDE | + EMAC_LED_100MB_OVERRIDE | + EMAC_LED_10MB_OVERRIDE); + else + tmp |= EMAC_LED_OVERRIDE; + + EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp); + break; + + case LED_MODE_OPER: + /* For all other phys, OPER mode is same as ON, so in case + * link is down, do nothing + */ + if (!vars->link_up) + break; + case LED_MODE_ON: + if (((params->phy[EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) || + (params->phy[EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) && + CHIP_IS_E2(bp) && params->num_phys == 2) { + /* This is a work-around for E2+8727 Configurations */ + if (mode == LED_MODE_ON || + speed == SPEED_10000){ + REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); + REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); + + tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); + EMAC_WR(bp, EMAC_REG_EMAC_LED, + (tmp | EMAC_LED_OVERRIDE)); + /* Return here without enabling traffic + * LED blink and setting rate in ON mode. + * In oper mode, enabling LED blink + * and setting rate is needed. + */ + if (mode == LED_MODE_ON) + return rc; + } + } else if (SINGLE_MEDIA_DIRECT(params)) { + /* This is a work-around for HW issue found when link + * is up in CL73 + */ + if ((!CHIP_IS_E3(bp)) || + (CHIP_IS_E3(bp) && + mode == LED_MODE_ON)) + REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); + + if (CHIP_IS_E1x(bp) || + CHIP_IS_E2(bp) || + (mode == LED_MODE_ON)) + REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); + else + REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, + hw_led_mode); + } else if ((params->phy[EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) && + (mode == LED_MODE_ON)) { + REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); + tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); + EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp | + EMAC_LED_OVERRIDE | EMAC_LED_1000MB_OVERRIDE); + /* Break here; otherwise, it'll disable the + * intended override. + */ + break; + } else { + u32 nig_led_mode = ((params->hw_led_mode << + SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY2) ? + (SHARED_HW_CFG_LED_PHY1 >> + SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode; + REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, + nig_led_mode); + } + + REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0); + /* Set blinking rate to ~15.9Hz */ + if (CHIP_IS_E3(bp)) + REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4, + LED_BLINK_RATE_VAL_E3); + else + REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4, + LED_BLINK_RATE_VAL_E1X_E2); + REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + + port*4, 1); + tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); + EMAC_WR(bp, EMAC_REG_EMAC_LED, + (tmp & (~EMAC_LED_OVERRIDE))); + + if (CHIP_IS_E1(bp) && + ((speed == SPEED_2500) || + (speed == SPEED_1000) || + (speed == SPEED_100) || + (speed == SPEED_10))) { + /* For speeds less than 10G LED scheme is different */ + REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + + port*4, 1); + REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + + port*4, 0); + REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + + port*4, 1); + } + break; + + default: + rc = -EINVAL; + DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n", + mode); + break; + } + return rc; + +} + +/* This function comes to reflect the actual link state read DIRECTLY from the + * HW + */ +int bnx2x_test_link(struct link_params *params, struct link_vars *vars, + u8 is_serdes) +{ + struct bnx2x *bp = params->bp; + u16 gp_status = 0, phy_index = 0; + u8 ext_phy_link_up = 0, serdes_phy_type; + struct link_vars temp_vars; + struct bnx2x_phy *int_phy = ¶ms->phy[INT_PHY]; + + if (CHIP_IS_E3(bp)) { + u16 link_up; + if (params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] + > SPEED_10000) { + /* Check 20G link */ + bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, + 1, &link_up); + bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, + 1, &link_up); + link_up &= (1<<2); + } else { + /* Check 10G link and below*/ + u8 lane = bnx2x_get_warpcore_lane(int_phy, params); + bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_1, + &gp_status); + gp_status = ((gp_status >> 8) & 0xf) | + ((gp_status >> 12) & 0xf); + link_up = gp_status & (1 << lane); + } + if (!link_up) + return -ESRCH; + } else { + CL22_RD_OVER_CL45(bp, int_phy, + MDIO_REG_BANK_GP_STATUS, + MDIO_GP_STATUS_TOP_AN_STATUS1, + &gp_status); + /* Link is up only if both local phy and external phy are up */ + if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) + return -ESRCH; + } + /* In XGXS loopback mode, do not check external PHY */ + if (params->loopback_mode == LOOPBACK_XGXS) + return 0; + + switch (params->num_phys) { + case 1: + /* No external PHY */ + return 0; + case 2: + ext_phy_link_up = params->phy[EXT_PHY1].read_status( + ¶ms->phy[EXT_PHY1], + params, &temp_vars); + break; + case 3: /* Dual Media */ + for (phy_index = EXT_PHY1; phy_index < params->num_phys; + phy_index++) { + serdes_phy_type = ((params->phy[phy_index].media_type == + ETH_PHY_SFPP_10G_FIBER) || + (params->phy[phy_index].media_type == + ETH_PHY_SFP_1G_FIBER) || + (params->phy[phy_index].media_type == + ETH_PHY_XFP_FIBER) || + (params->phy[phy_index].media_type == + ETH_PHY_DA_TWINAX)); + + if (is_serdes != serdes_phy_type) + continue; + if (params->phy[phy_index].read_status) { + ext_phy_link_up |= + params->phy[phy_index].read_status( + ¶ms->phy[phy_index], + params, &temp_vars); + } + } + break; + } + if (ext_phy_link_up) + return 0; + return -ESRCH; +} + +static int bnx2x_link_initialize(struct link_params *params, + struct link_vars *vars) +{ + u8 phy_index, non_ext_phy; + struct bnx2x *bp = params->bp; + /* In case of external phy existence, the line speed would be the + * line speed linked up by the external phy. In case it is direct + * only, then the line_speed during initialization will be + * equal to the req_line_speed + */ + vars->line_speed = params->phy[INT_PHY].req_line_speed; + + /* Initialize the internal phy in case this is a direct board + * (no external phys), or this board has external phy which requires + * to first. + */ + if (!USES_WARPCORE(bp)) + bnx2x_prepare_xgxs(¶ms->phy[INT_PHY], params, vars); + /* init ext phy and enable link state int */ + non_ext_phy = (SINGLE_MEDIA_DIRECT(params) || + (params->loopback_mode == LOOPBACK_XGXS)); + + if (non_ext_phy || + (params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) || + (params->loopback_mode == LOOPBACK_EXT_PHY)) { + struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; + if (vars->line_speed == SPEED_AUTO_NEG && + (CHIP_IS_E1x(bp) || + CHIP_IS_E2(bp))) + bnx2x_set_parallel_detection(phy, params); + if (params->phy[INT_PHY].config_init) + params->phy[INT_PHY].config_init(phy, params, vars); + } + + /* Re-read this value in case it was changed inside config_init due to + * limitations of optic module + */ + vars->line_speed = params->phy[INT_PHY].req_line_speed; + + /* Init external phy*/ + if (non_ext_phy) { + if (params->phy[INT_PHY].supported & + SUPPORTED_FIBRE) + vars->link_status |= LINK_STATUS_SERDES_LINK; + } else { + for (phy_index = EXT_PHY1; phy_index < params->num_phys; + phy_index++) { + /* No need to initialize second phy in case of first + * phy only selection. In case of second phy, we do + * need to initialize the first phy, since they are + * connected. + */ + if (params->phy[phy_index].supported & + SUPPORTED_FIBRE) + vars->link_status |= LINK_STATUS_SERDES_LINK; + + if (phy_index == EXT_PHY2 && + (bnx2x_phy_selection(params) == + PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) { + DP(NETIF_MSG_LINK, + "Not initializing second phy\n"); + continue; + } + params->phy[phy_index].config_init( + ¶ms->phy[phy_index], + params, vars); + } + } + /* Reset the interrupt indication after phy was initialized */ + bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + + params->port*4, + (NIG_STATUS_XGXS0_LINK10G | + NIG_STATUS_XGXS0_LINK_STATUS | + NIG_STATUS_SERDES0_LINK_STATUS | + NIG_MASK_MI_INT)); + return 0; +} + +static void bnx2x_int_link_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + /* Reset the SerDes/XGXS */ + REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, + (0x1ff << (params->port*16))); +} + +static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u8 gpio_port; + /* HW reset */ + if (CHIP_IS_E2(bp)) + gpio_port = BP_PATH(bp); + else + gpio_port = params->port; + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, + gpio_port); + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_LOW, + gpio_port); + DP(NETIF_MSG_LINK, "reset external PHY\n"); +} + +static int bnx2x_update_link_down(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 port = params->port; + + DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port); + bnx2x_set_led(params, vars, LED_MODE_OFF, 0); + vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG; + /* Indicate no mac active */ + vars->mac_type = MAC_TYPE_NONE; + + /* Update shared memory */ + vars->link_status &= ~LINK_UPDATE_MASK; + vars->line_speed = 0; + bnx2x_update_mng(params, vars->link_status); + + /* Activate nig drain */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); + + /* Disable emac */ + if (!CHIP_IS_E3(bp)) + REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); + + usleep_range(10000, 20000); + /* Reset BigMac/Xmac */ + if (CHIP_IS_E1x(bp) || + CHIP_IS_E2(bp)) + bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0); + + if (CHIP_IS_E3(bp)) { + /* Prevent LPI Generation by chip */ + REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), + 0); + REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2), + 0); + vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK | + SHMEM_EEE_ACTIVE_BIT); + + bnx2x_update_mng_eee(params, vars->eee_status); + bnx2x_set_xmac_rxtx(params, 0); + bnx2x_set_umac_rxtx(params, 0); + } + + return 0; +} + +static int bnx2x_update_link_up(struct link_params *params, + struct link_vars *vars, + u8 link_10g) +{ + struct bnx2x *bp = params->bp; + u8 phy_idx, port = params->port; + int rc = 0; + + vars->link_status |= (LINK_STATUS_LINK_UP | + LINK_STATUS_PHYSICAL_LINK_FLAG); + vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; + + if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) + vars->link_status |= + LINK_STATUS_TX_FLOW_CONTROL_ENABLED; + + if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) + vars->link_status |= + LINK_STATUS_RX_FLOW_CONTROL_ENABLED; + if (USES_WARPCORE(bp)) { + if (link_10g) { + if (bnx2x_xmac_enable(params, vars, 0) == + -ESRCH) { + DP(NETIF_MSG_LINK, "Found errors on XMAC\n"); + vars->link_up = 0; + vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; + vars->link_status &= ~LINK_STATUS_LINK_UP; + } + } else + bnx2x_umac_enable(params, vars, 0); + bnx2x_set_led(params, vars, + LED_MODE_OPER, vars->line_speed); + + if ((vars->eee_status & SHMEM_EEE_ACTIVE_BIT) && + (vars->eee_status & SHMEM_EEE_LPI_REQUESTED_BIT)) { + DP(NETIF_MSG_LINK, "Enabling LPI assertion\n"); + REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + + (params->port << 2), 1); + REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 1); + REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + + (params->port << 2), 0xfc20); + } + } + if ((CHIP_IS_E1x(bp) || + CHIP_IS_E2(bp))) { + if (link_10g) { + if (bnx2x_bmac_enable(params, vars, 0, 1) == + -ESRCH) { + DP(NETIF_MSG_LINK, "Found errors on BMAC\n"); + vars->link_up = 0; + vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; + vars->link_status &= ~LINK_STATUS_LINK_UP; + } + + bnx2x_set_led(params, vars, + LED_MODE_OPER, SPEED_10000); + } else { + rc = bnx2x_emac_program(params, vars); + bnx2x_emac_enable(params, vars, 0); + + /* AN complete? */ + if ((vars->link_status & + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) + && (!(vars->phy_flags & PHY_SGMII_FLAG)) && + SINGLE_MEDIA_DIRECT(params)) + bnx2x_set_gmii_tx_driver(params); + } + } + + /* PBF - link up */ + if (CHIP_IS_E1x(bp)) + rc |= bnx2x_pbf_update(params, vars->flow_ctrl, + vars->line_speed); + + /* Disable drain */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0); + + /* Update shared memory */ + bnx2x_update_mng(params, vars->link_status); + bnx2x_update_mng_eee(params, vars->eee_status); + /* Check remote fault */ + for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { + if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) { + bnx2x_check_half_open_conn(params, vars, 0); + break; + } + } + msleep(20); + return rc; +} + +static void bnx2x_chng_link_count(struct link_params *params, bool clear) +{ + struct bnx2x *bp = params->bp; + u32 addr, val; + + /* Verify the link_change_count is supported by the MFW */ + if (!(SHMEM2_HAS(bp, link_change_count))) + return; + + addr = params->shmem2_base + + offsetof(struct shmem2_region, link_change_count[params->port]); + if (clear) + val = 0; + else + val = REG_RD(bp, addr) + 1; + REG_WR(bp, addr, val); +} + +/* The bnx2x_link_update function should be called upon link + * interrupt. + * Link is considered up as follows: + * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs + * to be up + * - SINGLE_MEDIA - The link between the 577xx and the external + * phy (XGXS) need to up as well as the external link of the + * phy (PHY_EXT1) + * - DUAL_MEDIA - The link between the 577xx and the first + * external phy needs to be up, and at least one of the 2 + * external phy link must be up. + */ +int bnx2x_link_update(struct link_params *params, struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + struct link_vars phy_vars[MAX_PHYS]; + u8 port = params->port; + u8 link_10g_plus, phy_index; + u32 prev_link_status = vars->link_status; + u8 ext_phy_link_up = 0, cur_link_up; + int rc = 0; + u8 is_mi_int = 0; + u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed; + u8 active_external_phy = INT_PHY; + vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; + vars->link_status &= ~LINK_UPDATE_MASK; + for (phy_index = INT_PHY; phy_index < params->num_phys; + phy_index++) { + phy_vars[phy_index].flow_ctrl = 0; + phy_vars[phy_index].link_status = 0; + phy_vars[phy_index].line_speed = 0; + phy_vars[phy_index].duplex = DUPLEX_FULL; + phy_vars[phy_index].phy_link_up = 0; + phy_vars[phy_index].link_up = 0; + phy_vars[phy_index].fault_detected = 0; + /* different consideration, since vars holds inner state */ + phy_vars[phy_index].eee_status = vars->eee_status; + } + + if (USES_WARPCORE(bp)) + bnx2x_set_aer_mmd(params, ¶ms->phy[INT_PHY]); + + DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n", + port, (vars->phy_flags & PHY_XGXS_FLAG), + REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); + + is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + + port*0x18) > 0); + DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n", + REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), + is_mi_int, + REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c)); + + DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n", + REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), + REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); + + /* Disable emac */ + if (!CHIP_IS_E3(bp)) + REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); + + /* Step 1: + * Check external link change only for external phys, and apply + * priority selection between them in case the link on both phys + * is up. Note that instead of the common vars, a temporary + * vars argument is used since each phy may have different link/ + * speed/duplex result + */ + for (phy_index = EXT_PHY1; phy_index < params->num_phys; + phy_index++) { + struct bnx2x_phy *phy = ¶ms->phy[phy_index]; + if (!phy->read_status) + continue; + /* Read link status and params of this ext phy */ + cur_link_up = phy->read_status(phy, params, + &phy_vars[phy_index]); + if (cur_link_up) { + DP(NETIF_MSG_LINK, "phy in index %d link is up\n", + phy_index); + } else { + DP(NETIF_MSG_LINK, "phy in index %d link is down\n", + phy_index); + continue; + } + + if (!ext_phy_link_up) { + ext_phy_link_up = 1; + active_external_phy = phy_index; + } else { + switch (bnx2x_phy_selection(params)) { + case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: + /* In this option, the first PHY makes sure to pass the + * traffic through itself only. + * Its not clear how to reset the link on the second phy + */ + active_external_phy = EXT_PHY1; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: + /* In this option, the first PHY makes sure to pass the + * traffic through the second PHY. + */ + active_external_phy = EXT_PHY2; + break; + default: + /* Link indication on both PHYs with the following cases + * is invalid: + * - FIRST_PHY means that second phy wasn't initialized, + * hence its link is expected to be down + * - SECOND_PHY means that first phy should not be able + * to link up by itself (using configuration) + * - DEFAULT should be overriden during initialiazation + */ + DP(NETIF_MSG_LINK, "Invalid link indication" + "mpc=0x%x. DISABLING LINK !!!\n", + params->multi_phy_config); + ext_phy_link_up = 0; + break; + } + } + } + prev_line_speed = vars->line_speed; + /* Step 2: + * Read the status of the internal phy. In case of + * DIRECT_SINGLE_MEDIA board, this link is the external link, + * otherwise this is the link between the 577xx and the first + * external phy + */ + if (params->phy[INT_PHY].read_status) + params->phy[INT_PHY].read_status( + ¶ms->phy[INT_PHY], + params, vars); + /* The INT_PHY flow control reside in the vars. This include the + * case where the speed or flow control are not set to AUTO. + * Otherwise, the active external phy flow control result is set + * to the vars. The ext_phy_line_speed is needed to check if the + * speed is different between the internal phy and external phy. + * This case may be result of intermediate link speed change. + */ + if (active_external_phy > INT_PHY) { + vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl; + /* Link speed is taken from the XGXS. AN and FC result from + * the external phy. + */ + vars->link_status |= phy_vars[active_external_phy].link_status; + + /* if active_external_phy is first PHY and link is up - disable + * disable TX on second external PHY + */ + if (active_external_phy == EXT_PHY1) { + if (params->phy[EXT_PHY2].phy_specific_func) { + DP(NETIF_MSG_LINK, + "Disabling TX on EXT_PHY2\n"); + params->phy[EXT_PHY2].phy_specific_func( + ¶ms->phy[EXT_PHY2], + params, DISABLE_TX); + } + } + + ext_phy_line_speed = phy_vars[active_external_phy].line_speed; + vars->duplex = phy_vars[active_external_phy].duplex; + if (params->phy[active_external_phy].supported & + SUPPORTED_FIBRE) + vars->link_status |= LINK_STATUS_SERDES_LINK; + else + vars->link_status &= ~LINK_STATUS_SERDES_LINK; + + vars->eee_status = phy_vars[active_external_phy].eee_status; + + DP(NETIF_MSG_LINK, "Active external phy selected: %x\n", + active_external_phy); + } + + for (phy_index = EXT_PHY1; phy_index < params->num_phys; + phy_index++) { + if (params->phy[phy_index].flags & + FLAGS_REARM_LATCH_SIGNAL) { + bnx2x_rearm_latch_signal(bp, port, + phy_index == + active_external_phy); + break; + } + } + DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x," + " ext_phy_line_speed = %d\n", vars->flow_ctrl, + vars->link_status, ext_phy_line_speed); + /* Upon link speed change set the NIG into drain mode. Comes to + * deals with possible FIFO glitch due to clk change when speed + * is decreased without link down indicator + */ + + if (vars->phy_link_up) { + if (!(SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up && + (ext_phy_line_speed != vars->line_speed)) { + DP(NETIF_MSG_LINK, "Internal link speed %d is" + " different than the external" + " link speed %d\n", vars->line_speed, + ext_phy_line_speed); + vars->phy_link_up = 0; + } else if (prev_line_speed != vars->line_speed) { + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, + 0); + usleep_range(1000, 2000); + } + } + + /* Anything 10 and over uses the bmac */ + link_10g_plus = (vars->line_speed >= SPEED_10000); + + bnx2x_link_int_ack(params, vars, link_10g_plus); + + /* In case external phy link is up, and internal link is down + * (not initialized yet probably after link initialization, it + * needs to be initialized. + * Note that after link down-up as result of cable plug, the xgxs + * link would probably become up again without the need + * initialize it + */ + if (!(SINGLE_MEDIA_DIRECT(params))) { + DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d," + " init_preceding = %d\n", ext_phy_link_up, + vars->phy_link_up, + params->phy[EXT_PHY1].flags & + FLAGS_INIT_XGXS_FIRST); + if (!(params->phy[EXT_PHY1].flags & + FLAGS_INIT_XGXS_FIRST) + && ext_phy_link_up && !vars->phy_link_up) { + vars->line_speed = ext_phy_line_speed; + if (vars->line_speed < SPEED_1000) + vars->phy_flags |= PHY_SGMII_FLAG; + else + vars->phy_flags &= ~PHY_SGMII_FLAG; + + if (params->phy[INT_PHY].config_init) + params->phy[INT_PHY].config_init( + ¶ms->phy[INT_PHY], params, + vars); + } + } + /* Link is up only if both local phy and external phy (in case of + * non-direct board) are up and no fault detected on active PHY. + */ + vars->link_up = (vars->phy_link_up && + (ext_phy_link_up || + SINGLE_MEDIA_DIRECT(params)) && + (phy_vars[active_external_phy].fault_detected == 0)); + + /* Update the PFC configuration in case it was changed */ + if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) + vars->link_status |= LINK_STATUS_PFC_ENABLED; + else + vars->link_status &= ~LINK_STATUS_PFC_ENABLED; + + if (vars->link_up) + rc = bnx2x_update_link_up(params, vars, link_10g_plus); + else + rc = bnx2x_update_link_down(params, vars); + + if ((prev_link_status ^ vars->link_status) & LINK_STATUS_LINK_UP) + bnx2x_chng_link_count(params, false); + + /* Update MCP link status was changed */ + if (params->feature_config_flags & FEATURE_CONFIG_BC_SUPPORTS_AFEX) + bnx2x_fw_command(bp, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0); + + return rc; +} + +/*****************************************************************************/ +/* External Phy section */ +/*****************************************************************************/ +void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port) +{ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); + usleep_range(1000, 2000); + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); +} + +static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port, + u32 spirom_ver, u32 ver_addr) +{ + DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n", + (u16)(spirom_ver>>16), (u16)spirom_ver, port); + + if (ver_addr) + REG_WR(bp, ver_addr, spirom_ver); +} + +static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp, + struct bnx2x_phy *phy, + u8 port) +{ + u16 fw_ver1, fw_ver2; + + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER1, &fw_ver1); + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, &fw_ver2); + bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2), + phy->ver_addr); +} + +static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp, + struct bnx2x_phy *phy, + struct link_vars *vars) +{ + u16 val; + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_STATUS, &val); + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_STATUS, &val); + if (val & (1<<5)) + vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + if ((val & (1<<0)) == 0) + vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED; +} + +/******************************************************************/ +/* common BCM8073/BCM8727 PHY SECTION */ +/******************************************************************/ +static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + if (phy->req_line_speed == SPEED_10 || + phy->req_line_speed == SPEED_100) { + vars->flow_ctrl = phy->req_flow_ctrl; + return; + } + + if (bnx2x_ext_phy_resolve_fc(phy, params, vars) && + (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE)) { + u16 pause_result; + u16 ld_pause; /* local */ + u16 lp_pause; /* link partner */ + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_CL37_FC_LD, &ld_pause); + + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_CL37_FC_LP, &lp_pause); + pause_result = (ld_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5; + pause_result |= (lp_pause & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7; + + bnx2x_pause_resolve(vars, pause_result); + DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n", + pause_result); + } +} +static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, + struct bnx2x_phy *phy, + u8 port) +{ + u32 count = 0; + u16 fw_ver1, fw_msgout; + int rc = 0; + + /* Boot port from external ROM */ + /* EDC grst */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + 0x0001); + + /* Ucode reboot and rst */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + 0x008c); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL1, 0x0001); + + /* Reset internal microprocessor */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); + + /* Release srst bit */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); + + /* Delay 100ms per the PHY specifications */ + msleep(100); + + /* 8073 sometimes taking longer to download */ + do { + count++; + if (count > 300) { + DP(NETIF_MSG_LINK, + "bnx2x_8073_8727_external_rom_boot port %x:" + "Download failed. fw version = 0x%x\n", + port, fw_ver1); + rc = -EINVAL; + break; + } + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER1, &fw_ver1); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout); + + usleep_range(1000, 2000); + } while (fw_ver1 == 0 || fw_ver1 == 0x4321 || + ((fw_msgout & 0xff) != 0x03 && (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))); + + /* Clear ser_boot_ctl bit */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL1, 0x0000); + bnx2x_save_bcm_spirom_ver(bp, phy, port); + + DP(NETIF_MSG_LINK, + "bnx2x_8073_8727_external_rom_boot port %x:" + "Download complete. fw version = 0x%x\n", + port, fw_ver1); + + return rc; +} + +/******************************************************************/ +/* BCM8073 PHY SECTION */ +/******************************************************************/ +static int bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy) +{ + /* This is only required for 8073A1, version 102 only */ + u16 val; + + /* Read 8073 HW revision*/ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_CHIP_REV, &val); + + if (val != 1) { + /* No need to workaround in 8073 A1 */ + return 0; + } + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, &val); + + /* SNR should be applied only for version 0x102 */ + if (val != 0x102) + return 0; + + return 1; +} + +static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) +{ + u16 val, cnt, cnt1 ; + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_CHIP_REV, &val); + + if (val > 0) { + /* No need to workaround in 8073 A1 */ + return 0; + } + /* XAUI workaround in 8073 A0: */ + + /* After loading the boot ROM and restarting Autoneg, poll + * Dev1, Reg $C820: + */ + + for (cnt = 0; cnt < 1000; cnt++) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_SPEED_LINK_STATUS, + &val); + /* If bit [14] = 0 or bit [13] = 0, continue on with + * system initialization (XAUI work-around not required, as + * these bits indicate 2.5G or 1G link up). + */ + if (!(val & (1<<14)) || !(val & (1<<13))) { + DP(NETIF_MSG_LINK, "XAUI work-around not required\n"); + return 0; + } else if (!(val & (1<<15))) { + DP(NETIF_MSG_LINK, "bit 15 went off\n"); + /* If bit 15 is 0, then poll Dev1, Reg $C841 until it's + * MSB (bit15) goes to 1 (indicating that the XAUI + * workaround has completed), then continue on with + * system initialization. + */ + for (cnt1 = 0; cnt1 < 1000; cnt1++) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_XAUI_WA, &val); + if (val & (1<<15)) { + DP(NETIF_MSG_LINK, + "XAUI workaround has completed\n"); + return 0; + } + usleep_range(3000, 6000); + } + break; + } + usleep_range(3000, 6000); + } + DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n"); + return -EINVAL; +} + +static void bnx2x_807x_force_10G(struct bnx2x *bp, struct bnx2x_phy *phy) +{ + /* Force KR or KX */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x000b); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0000); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000); +} + +static void bnx2x_8073_set_pause_cl37(struct link_params *params, + struct bnx2x_phy *phy, + struct link_vars *vars) +{ + u16 cl37_val; + struct bnx2x *bp = params->bp; + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &cl37_val); + + cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ + bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) { + cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC; + } + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { + cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; + } + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { + cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + } + DP(NETIF_MSG_LINK, + "Ext phy AN advertize cl37 0x%x\n", cl37_val); + + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val); + msleep(500); +} + +static void bnx2x_8073_specific_func(struct bnx2x_phy *phy, + struct link_params *params, + u32 action) +{ + struct bnx2x *bp = params->bp; + switch (action) { + case PHY_INIT: + /* Enable LASI */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2)); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004); + break; + } +} + +static int bnx2x_8073_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 val = 0, tmp1; + u8 gpio_port; + DP(NETIF_MSG_LINK, "Init 8073\n"); + + if (CHIP_IS_E2(bp)) + gpio_port = BP_PATH(bp); + else + gpio_port = params->port; + /* Restore normal power mode*/ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); + + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); + + bnx2x_8073_specific_func(phy, params, PHY_INIT); + bnx2x_8073_set_pause_cl37(params, phy, vars); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); + + DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); + + /* Swap polarity if required - Must be done only in non-1G mode */ + if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) { + /* Configure the 8073 to swap _P and _N of the KR lines */ + DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n"); + /* 10G Rx/Tx and 1G Tx signal polarity swap */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, + (val | (3<<9))); + } + + + /* Enable CL37 BAM */ + if (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port].default_cfg)) & + PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { + + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8073_BAM, &val); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8073_BAM, val | 1); + DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n"); + } + if (params->loopback_mode == LOOPBACK_EXT) { + bnx2x_807x_force_10G(bp, phy); + DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n"); + return 0; + } else { + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002); + } + if (phy->req_line_speed != SPEED_AUTO_NEG) { + if (phy->req_line_speed == SPEED_10000) { + val = (1<<7); + } else if (phy->req_line_speed == SPEED_2500) { + val = (1<<5); + /* Note that 2.5G works only when used with 1G + * advertisement + */ + } else + val = (1<<5); + } else { + val = 0; + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) + val |= (1<<7); + + /* Note that 2.5G works only when used with 1G advertisement */ + if (phy->speed_cap_mask & + (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G | + PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) + val |= (1<<5); + DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val); + } + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val); + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1); + + if (((phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) && + (phy->req_line_speed == SPEED_AUTO_NEG)) || + (phy->req_line_speed == SPEED_2500)) { + u16 phy_ver; + /* Allow 2.5G for A1 and above */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, + &phy_ver); + DP(NETIF_MSG_LINK, "Add 2.5G\n"); + if (phy_ver > 0) + tmp1 |= 1; + else + tmp1 &= 0xfffe; + } else { + DP(NETIF_MSG_LINK, "Disable 2.5G\n"); + tmp1 &= 0xfffe; + } + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1); + /* Add support for CL37 (passive mode) II */ + + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, + (tmp1 | ((phy->req_duplex == DUPLEX_FULL) ? + 0x20 : 0x40))); + + /* Add support for CL37 (passive mode) III */ + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); + + /* The SNR will improve about 2db by changing BW and FEE main + * tap. Rest commands are executed after link is up + * Change FFE main cursor to 5 in EDC register + */ + if (bnx2x_8073_is_snr_needed(bp, phy)) + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN, + 0xFB0C); + + /* Enable FEC (Forware Error Correction) Request in the AN */ + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1); + tmp1 |= (1<<15); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1); + + bnx2x_ext_phy_set_pause(params, phy, vars); + + /* Restart autoneg */ + msleep(500); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); + DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n", + ((val & (1<<5)) > 0), ((val & (1<<7)) > 0)); + return 0; +} + +static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 link_up = 0; + u16 val1, val2; + u16 link_status = 0; + u16 an1000_status = 0; + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); + + DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1); + + /* Clear the interrupt LASI status register */ + bnx2x_cl45_read(bp, phy, + MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2); + bnx2x_cl45_read(bp, phy, + MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1); + DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", val2, val1); + /* Clear MSG-OUT */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); + + /* Check the LASI */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2); + + DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2); + + /* Check the link status */ + bnx2x_cl45_read(bp, phy, + MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2); + DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1); + link_up = ((val1 & 4) == 4); + DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1); + + if (link_up && + ((phy->req_line_speed != SPEED_10000))) { + if (bnx2x_8073_xaui_wa(bp, phy) != 0) + return 0; + } + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status); + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status); + + /* Check the link status on 1.1.2 */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1); + DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x," + "an_link_status=0x%x\n", val2, val1, an1000_status); + + link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1))); + if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) { + /* The SNR will improve about 2dbby changing the BW and FEE main + * tap. The 1st write to change FFE main tap is set before + * restart AN. Change PLL Bandwidth in EDC register + */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH, + 0x26BC); + + /* Change CDR Bandwidth in EDC register */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CDR_BANDWIDTH, + 0x0333); + } + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS, + &link_status); + + /* Bits 0..2 --> speed detected, bits 13..15--> link is down */ + if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) { + link_up = 1; + vars->line_speed = SPEED_10000; + DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n", + params->port); + } else if ((link_status & (1<<1)) && (!(link_status & (1<<14)))) { + link_up = 1; + vars->line_speed = SPEED_2500; + DP(NETIF_MSG_LINK, "port %x: External link up in 2.5G\n", + params->port); + } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) { + link_up = 1; + vars->line_speed = SPEED_1000; + DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n", + params->port); + } else { + link_up = 0; + DP(NETIF_MSG_LINK, "port %x: External link is down\n", + params->port); + } + + if (link_up) { + /* Swap polarity if required */ + if (params->lane_config & + PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) { + /* Configure the 8073 to swap P and N of the KR lines */ + bnx2x_cl45_read(bp, phy, + MDIO_XS_DEVAD, + MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1); + /* Set bit 3 to invert Rx in 1G mode and clear this bit + * when it`s in 10G mode. + */ + if (vars->line_speed == SPEED_1000) { + DP(NETIF_MSG_LINK, "Swapping 1G polarity for" + "the 8073\n"); + val1 |= (1<<3); + } else + val1 &= ~(1<<3); + + bnx2x_cl45_write(bp, phy, + MDIO_XS_DEVAD, + MDIO_XS_REG_8073_RX_CTRL_PCIE, + val1); + } + bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); + bnx2x_8073_resolve_fc(phy, params, vars); + vars->duplex = DUPLEX_FULL; + } + + if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG2, &val1); + + if (val1 & (1<<5)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + if (val1 & (1<<7)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + } + + return link_up; +} + +static void bnx2x_8073_link_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u8 gpio_port; + if (CHIP_IS_E2(bp)) + gpio_port = BP_PATH(bp); + else + gpio_port = params->port; + DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n", + gpio_port); + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_LOW, + gpio_port); +} + +/******************************************************************/ +/* BCM8705 PHY SECTION */ +/******************************************************************/ +static int bnx2x_8705_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "init 8705\n"); + /* Restore normal power mode*/ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + /* HW reset */ + bnx2x_ext_phy_hw_reset(bp, params->port); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); + bnx2x_wait_reset_complete(bp, phy, params); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 0x7fbf); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CMU_PLL_BYPASS, 0x0100); + bnx2x_cl45_write(bp, phy, + MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1); + /* BCM8705 doesn't have microcode, hence the 0 */ + bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0); + return 0; +} + +static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + u8 link_up = 0; + u16 val1, rx_sd; + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "read status 8705\n"); + bnx2x_cl45_read(bp, phy, + MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1); + DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1); + + bnx2x_cl45_read(bp, phy, + MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1); + DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, 0xc809, &val1); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, 0xc809, &val1); + + DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1); + link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) && ((val1 & (1<<8)) == 0)); + if (link_up) { + vars->line_speed = SPEED_10000; + bnx2x_ext_phy_resolve_fc(phy, params, vars); + } + return link_up; +} + +/******************************************************************/ +/* SFP+ module Section */ +/******************************************************************/ +static void bnx2x_set_disable_pmd_transmit(struct link_params *params, + struct bnx2x_phy *phy, + u8 pmd_dis) +{ + struct bnx2x *bp = params->bp; + /* Disable transmitter only for bootcodes which can enable it afterwards + * (for D3 link) + */ + if (pmd_dis) { + if (params->feature_config_flags & + FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED) + DP(NETIF_MSG_LINK, "Disabling PMD transmitter\n"); + else { + DP(NETIF_MSG_LINK, "NOT disabling PMD transmitter\n"); + return; + } + } else + DP(NETIF_MSG_LINK, "Enabling PMD transmitter\n"); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_DISABLE, pmd_dis); +} + +static u8 bnx2x_get_gpio_port(struct link_params *params) +{ + u8 gpio_port; + u32 swap_val, swap_override; + struct bnx2x *bp = params->bp; + if (CHIP_IS_E2(bp)) + gpio_port = BP_PATH(bp); + else + gpio_port = params->port; + swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); + swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); + return gpio_port ^ (swap_val && swap_override); +} + +static void bnx2x_sfp_e1e2_set_transmitter(struct link_params *params, + struct bnx2x_phy *phy, + u8 tx_en) +{ + u16 val; + u8 port = params->port; + struct bnx2x *bp = params->bp; + u32 tx_en_mode; + + /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/ + tx_en_mode = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].sfp_ctrl)) & + PORT_HW_CFG_TX_LASER_MASK; + DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x " + "mode = %x\n", tx_en, port, tx_en_mode); + switch (tx_en_mode) { + case PORT_HW_CFG_TX_LASER_MDIO: + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + &val); + + if (tx_en) + val &= ~(1<<15); + else + val |= (1<<15); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + val); + break; + case PORT_HW_CFG_TX_LASER_GPIO0: + case PORT_HW_CFG_TX_LASER_GPIO1: + case PORT_HW_CFG_TX_LASER_GPIO2: + case PORT_HW_CFG_TX_LASER_GPIO3: + { + u16 gpio_pin; + u8 gpio_port, gpio_mode; + if (tx_en) + gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH; + else + gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW; + + gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0; + gpio_port = bnx2x_get_gpio_port(params); + bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port); + break; + } + default: + DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode); + break; + } +} + +static void bnx2x_sfp_set_transmitter(struct link_params *params, + struct bnx2x_phy *phy, + u8 tx_en) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "Setting SFP+ transmitter to %d\n", tx_en); + if (CHIP_IS_E3(bp)) + bnx2x_sfp_e3_set_transmitter(params, phy, tx_en); + else + bnx2x_sfp_e1e2_set_transmitter(params, phy, tx_en); +} + +static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, + struct link_params *params, + u8 dev_addr, u16 addr, u8 byte_cnt, + u8 *o_buf, u8 is_init) +{ + struct bnx2x *bp = params->bp; + u16 val = 0; + u16 i; + if (byte_cnt > SFP_EEPROM_PAGE_SIZE) { + DP(NETIF_MSG_LINK, + "Reading from eeprom is limited to 0xf\n"); + return -EINVAL; + } + /* Set the read command byte count */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, + (byte_cnt | (dev_addr << 8))); + + /* Set the read command address */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, + addr); + + /* Activate read command */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, + 0x2c0f); + + /* Wait up to 500us for command complete status */ + for (i = 0; i < 100; i++) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) + break; + udelay(5); + } + + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) != + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) { + DP(NETIF_MSG_LINK, + "Got bad status 0x%x when reading from SFP+ EEPROM\n", + (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK)); + return -EINVAL; + } + + /* Read the buffer */ + for (i = 0; i < byte_cnt; i++) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val); + o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK); + } + + for (i = 0; i < 100; i++) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) + return 0; + usleep_range(1000, 2000); + } + return -EINVAL; +} + +static void bnx2x_warpcore_power_module(struct link_params *params, + u8 power) +{ + u32 pin_cfg; + struct bnx2x *bp = params->bp; + + pin_cfg = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].e3_sfp_ctrl)) & + PORT_HW_CFG_E3_PWR_DIS_MASK) >> + PORT_HW_CFG_E3_PWR_DIS_SHIFT; + + if (pin_cfg == PIN_CFG_NA) + return; + DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n", + power, pin_cfg); + /* Low ==> corresponding SFP+ module is powered + * high ==> the SFP+ module is powered down + */ + bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1); +} +static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, + struct link_params *params, + u8 dev_addr, + u16 addr, u8 byte_cnt, + u8 *o_buf, u8 is_init) +{ + int rc = 0; + u8 i, j = 0, cnt = 0; + u32 data_array[4]; + u16 addr32; + struct bnx2x *bp = params->bp; + + if (byte_cnt > SFP_EEPROM_PAGE_SIZE) { + DP(NETIF_MSG_LINK, + "Reading from eeprom is limited to 16 bytes\n"); + return -EINVAL; + } + + /* 4 byte aligned address */ + addr32 = addr & (~0x3); + do { + if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) { + bnx2x_warpcore_power_module(params, 0); + /* Note that 100us are not enough here */ + usleep_range(1000, 2000); + bnx2x_warpcore_power_module(params, 1); + } + rc = bnx2x_bsc_read(params, bp, dev_addr, addr32, 0, byte_cnt, + data_array); + } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT)); + + if (rc == 0) { + for (i = (addr - addr32); i < byte_cnt + (addr - addr32); i++) { + o_buf[j] = *((u8 *)data_array + i); + j++; + } + } + + return rc; +} + +static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, + struct link_params *params, + u8 dev_addr, u16 addr, u8 byte_cnt, + u8 *o_buf, u8 is_init) +{ + struct bnx2x *bp = params->bp; + u16 val, i; + + if (byte_cnt > SFP_EEPROM_PAGE_SIZE) { + DP(NETIF_MSG_LINK, + "Reading from eeprom is limited to 0xf\n"); + return -EINVAL; + } + + /* Set 2-wire transfer rate of SFP+ module EEPROM + * to 100Khz since some DACs(direct attached cables) do + * not work at 400Khz. + */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR, + ((dev_addr << 8) | 1)); + + /* Need to read from 1.8000 to clear it */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, + &val); + + /* Set the read command byte count */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, + ((byte_cnt < 2) ? 2 : byte_cnt)); + + /* Set the read command address */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, + addr); + /* Set the destination address */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + 0x8004, + MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF); + + /* Activate read command */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, + 0x8002); + /* Wait appropriate time for two-wire command to finish before + * polling the status register + */ + usleep_range(1000, 2000); + + /* Wait up to 500us for command complete status */ + for (i = 0; i < 100; i++) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) + break; + udelay(5); + } + + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) != + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) { + DP(NETIF_MSG_LINK, + "Got bad status 0x%x when reading from SFP+ EEPROM\n", + (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK)); + return -EFAULT; + } + + /* Read the buffer */ + for (i = 0; i < byte_cnt; i++) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val); + o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK); + } + + for (i = 0; i < 100; i++) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == + MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) + return 0; + usleep_range(1000, 2000); + } + + return -EINVAL; +} +int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, + struct link_params *params, u8 dev_addr, + u16 addr, u16 byte_cnt, u8 *o_buf) +{ + int rc = 0; + struct bnx2x *bp = params->bp; + u8 xfer_size; + u8 *user_data = o_buf; + read_sfp_module_eeprom_func_p read_func; + + if ((dev_addr != 0xa0) && (dev_addr != 0xa2)) { + DP(NETIF_MSG_LINK, "invalid dev_addr 0x%x\n", dev_addr); + return -EINVAL; + } + + switch (phy->type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: + read_func = bnx2x_8726_read_sfp_module_eeprom; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: + read_func = bnx2x_8727_read_sfp_module_eeprom; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: + read_func = bnx2x_warpcore_read_sfp_module_eeprom; + break; + default: + return -EOPNOTSUPP; + } + + while (!rc && (byte_cnt > 0)) { + xfer_size = (byte_cnt > SFP_EEPROM_PAGE_SIZE) ? + SFP_EEPROM_PAGE_SIZE : byte_cnt; + rc = read_func(phy, params, dev_addr, addr, xfer_size, + user_data, 0); + byte_cnt -= xfer_size; + user_data += xfer_size; + addr += xfer_size; + } + return rc; +} + +static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, + struct link_params *params, + u16 *edc_mode) +{ + struct bnx2x *bp = params->bp; + u32 sync_offset = 0, phy_idx, media_types; + u8 val[SFP_EEPROM_FC_TX_TECH_ADDR + 1], check_limiting_mode = 0; + *edc_mode = EDC_MODE_LIMITING; + phy->media_type = ETH_PHY_UNSPECIFIED; + /* First check for copper cable */ + if (bnx2x_read_sfp_module_eeprom(phy, + params, + I2C_DEV_ADDR_A0, + 0, + SFP_EEPROM_FC_TX_TECH_ADDR + 1, + (u8 *)val) != 0) { + DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n"); + return -EINVAL; + } + params->link_attr_sync &= ~LINK_SFP_EEPROM_COMP_CODE_MASK; + params->link_attr_sync |= val[SFP_EEPROM_10G_COMP_CODE_ADDR] << + LINK_SFP_EEPROM_COMP_CODE_SHIFT; + bnx2x_update_link_attr(params, params->link_attr_sync); + switch (val[SFP_EEPROM_CON_TYPE_ADDR]) { + case SFP_EEPROM_CON_TYPE_VAL_COPPER: + { + u8 copper_module_type; + phy->media_type = ETH_PHY_DA_TWINAX; + /* Check if its active cable (includes SFP+ module) + * of passive cable + */ + copper_module_type = val[SFP_EEPROM_FC_TX_TECH_ADDR]; + + if (copper_module_type & + SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { + DP(NETIF_MSG_LINK, "Active Copper cable detected\n"); + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) + *edc_mode = EDC_MODE_ACTIVE_DAC; + else + check_limiting_mode = 1; + } else { + *edc_mode = EDC_MODE_PASSIVE_DAC; + /* Even in case PASSIVE_DAC indication is not set, + * treat it as a passive DAC cable, since some cables + * don't have this indication. + */ + if (copper_module_type & + SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { + DP(NETIF_MSG_LINK, + "Passive Copper cable detected\n"); + } else { + DP(NETIF_MSG_LINK, + "Unknown copper-cable-type\n"); + } + } + break; + } + case SFP_EEPROM_CON_TYPE_VAL_UNKNOWN: + case SFP_EEPROM_CON_TYPE_VAL_LC: + case SFP_EEPROM_CON_TYPE_VAL_RJ45: + check_limiting_mode = 1; + if (((val[SFP_EEPROM_10G_COMP_CODE_ADDR] & + (SFP_EEPROM_10G_COMP_CODE_SR_MASK | + SFP_EEPROM_10G_COMP_CODE_LR_MASK | + SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) && + (val[SFP_EEPROM_1G_COMP_CODE_ADDR] != 0)) { + DP(NETIF_MSG_LINK, "1G SFP module detected\n"); + phy->media_type = ETH_PHY_SFP_1G_FIBER; + if (phy->req_line_speed != SPEED_1000) { + u8 gport = params->port; + phy->req_line_speed = SPEED_1000; + if (!CHIP_IS_E1x(bp)) { + gport = BP_PATH(bp) + + (params->port << 1); + } + netdev_err(bp->dev, + "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n", + gport); + } + if (val[SFP_EEPROM_1G_COMP_CODE_ADDR] & + SFP_EEPROM_1G_COMP_CODE_BASE_T) { + bnx2x_sfp_set_transmitter(params, phy, 0); + msleep(40); + bnx2x_sfp_set_transmitter(params, phy, 1); + } + } else { + int idx, cfg_idx = 0; + DP(NETIF_MSG_LINK, "10G Optic module detected\n"); + for (idx = INT_PHY; idx < MAX_PHYS; idx++) { + if (params->phy[idx].type == phy->type) { + cfg_idx = LINK_CONFIG_IDX(idx); + break; + } + } + phy->media_type = ETH_PHY_SFPP_10G_FIBER; + phy->req_line_speed = params->req_line_speed[cfg_idx]; + } + break; + default: + DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n", + val[SFP_EEPROM_CON_TYPE_ADDR]); + return -EINVAL; + } + sync_offset = params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].media_type); + media_types = REG_RD(bp, sync_offset); + /* Update media type for non-PMF sync */ + for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { + if (&(params->phy[phy_idx]) == phy) { + media_types &= ~(PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK << + (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx)); + media_types |= ((phy->media_type & + PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) << + (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx)); + break; + } + } + REG_WR(bp, sync_offset, media_types); + if (check_limiting_mode) { + u8 options[SFP_EEPROM_OPTIONS_SIZE]; + if (bnx2x_read_sfp_module_eeprom(phy, + params, + I2C_DEV_ADDR_A0, + SFP_EEPROM_OPTIONS_ADDR, + SFP_EEPROM_OPTIONS_SIZE, + options) != 0) { + DP(NETIF_MSG_LINK, + "Failed to read Option field from module EEPROM\n"); + return -EINVAL; + } + if ((options[0] & SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK)) + *edc_mode = EDC_MODE_LINEAR; + else + *edc_mode = EDC_MODE_LIMITING; + } + DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode); + return 0; +} +/* This function read the relevant field from the module (SFP+), and verify it + * is compliant with this board + */ +static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u32 val, cmd; + u32 fw_resp, fw_cmd_param; + char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1]; + char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1]; + phy->flags &= ~FLAGS_SFP_NOT_APPROVED; + val = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port].config)); + if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT) { + DP(NETIF_MSG_LINK, "NOT enforcing module verification\n"); + return 0; + } + + if (params->feature_config_flags & + FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY) { + /* Use specific phy request */ + cmd = DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL; + } else if (params->feature_config_flags & + FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) { + /* Use first phy request only in case of non-dual media*/ + if (DUAL_MEDIA(params)) { + DP(NETIF_MSG_LINK, + "FW does not support OPT MDL verification\n"); + return -EINVAL; + } + cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL; + } else { + /* No support in OPT MDL detection */ + DP(NETIF_MSG_LINK, + "FW does not support OPT MDL verification\n"); + return -EINVAL; + } + + fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl); + fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param); + if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) { + DP(NETIF_MSG_LINK, "Approved module\n"); + return 0; + } + + /* Format the warning message */ + if (bnx2x_read_sfp_module_eeprom(phy, + params, + I2C_DEV_ADDR_A0, + SFP_EEPROM_VENDOR_NAME_ADDR, + SFP_EEPROM_VENDOR_NAME_SIZE, + (u8 *)vendor_name)) + vendor_name[0] = '\0'; + else + vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0'; + if (bnx2x_read_sfp_module_eeprom(phy, + params, + I2C_DEV_ADDR_A0, + SFP_EEPROM_PART_NO_ADDR, + SFP_EEPROM_PART_NO_SIZE, + (u8 *)vendor_pn)) + vendor_pn[0] = '\0'; + else + vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0'; + + netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected," + " Port %d from %s part number %s\n", + params->port, vendor_name, vendor_pn); + if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) != + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG) + phy->flags |= FLAGS_SFP_NOT_APPROVED; + return -EINVAL; +} + +static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, + struct link_params *params) + +{ + u8 val; + int rc; + struct bnx2x *bp = params->bp; + u16 timeout; + /* Initialization time after hot-plug may take up to 300ms for + * some phys type ( e.g. JDSU ) + */ + + for (timeout = 0; timeout < 60; timeout++) { + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) + rc = bnx2x_warpcore_read_sfp_module_eeprom( + phy, params, I2C_DEV_ADDR_A0, 1, 1, &val, + 1); + else + rc = bnx2x_read_sfp_module_eeprom(phy, params, + I2C_DEV_ADDR_A0, + 1, 1, &val); + if (rc == 0) { + DP(NETIF_MSG_LINK, + "SFP+ module initialization took %d ms\n", + timeout * 5); + return 0; + } + usleep_range(5000, 10000); + } + rc = bnx2x_read_sfp_module_eeprom(phy, params, I2C_DEV_ADDR_A0, + 1, 1, &val); + return rc; +} + +static void bnx2x_8727_power_module(struct bnx2x *bp, + struct bnx2x_phy *phy, + u8 is_power_up) { + /* Make sure GPIOs are not using for LED mode */ + u16 val; + /* In the GPIO register, bit 4 is use to determine if the GPIOs are + * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for + * output + * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0 + * Bits 8-9 determine the GPIOs value for INPUT in case bit 4 val is 1 + * where the 1st bit is the over-current(only input), and 2nd bit is + * for power( only output ) + * + * In case of NOC feature is disabled and power is up, set GPIO control + * as input to enable listening of over-current indication + */ + if (phy->flags & FLAGS_NOC) + return; + if (is_power_up) + val = (1<<4); + else + /* Set GPIO control to OUTPUT, and set the power bit + * to according to the is_power_up + */ + val = (1<<1); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_GPIO_CTRL, + val); +} + +static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp, + struct bnx2x_phy *phy, + u16 edc_mode) +{ + u16 cur_limiting_mode; + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + &cur_limiting_mode); + DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n", + cur_limiting_mode); + + if (edc_mode == EDC_MODE_LIMITING) { + DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n"); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + EDC_MODE_LIMITING); + } else { /* LRM mode ( default )*/ + + DP(NETIF_MSG_LINK, "Setting LRM MODE\n"); + + /* Changing to LRM mode takes quite few seconds. So do it only + * if current mode is limiting (default is LRM) + */ + if (cur_limiting_mode != EDC_MODE_LIMITING) + return 0; + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_LRM_MODE, + 0); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + 0x128); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL0, + 0x4008); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_LRM_MODE, + 0xaaaa); + } + return 0; +} + +static int bnx2x_8727_set_limiting_mode(struct bnx2x *bp, + struct bnx2x_phy *phy, + u16 edc_mode) +{ + u16 phy_identifier; + u16 rom_ver2_val; + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + &phy_identifier); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + (phy_identifier & ~(1<<9))); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + &rom_ver2_val); + /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff)); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + (phy_identifier | (1<<9))); + + return 0; +} + +static void bnx2x_8727_specific_func(struct bnx2x_phy *phy, + struct link_params *params, + u32 action) +{ + struct bnx2x *bp = params->bp; + u16 val; + switch (action) { + case DISABLE_TX: + bnx2x_sfp_set_transmitter(params, phy, 0); + break; + case ENABLE_TX: + if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) + bnx2x_sfp_set_transmitter(params, phy, 1); + break; + case PHY_INIT: + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + (1<<2) | (1<<5)); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, + 0); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0006); + /* Make MOD_ABS give interrupt on change */ + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_OPT_CTRL, + &val); + val |= (1<<12); + if (phy->flags & FLAGS_NOC) + val |= (3<<5); + /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0 + * status which reflect SFP+ module over-current + */ + if (!(phy->flags & FLAGS_NOC)) + val &= 0xff8f; /* Reset bits 4-6 */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, + val); + break; + default: + DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n", + action); + return; + } +} + +static void bnx2x_set_e1e2_module_fault_led(struct link_params *params, + u8 gpio_mode) +{ + struct bnx2x *bp = params->bp; + + u32 fault_led_gpio = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].sfp_ctrl)) & + PORT_HW_CFG_FAULT_MODULE_LED_MASK; + switch (fault_led_gpio) { + case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED: + return; + case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0: + case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1: + case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2: + case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3: + { + u8 gpio_port = bnx2x_get_gpio_port(params); + u16 gpio_pin = fault_led_gpio - + PORT_HW_CFG_FAULT_MODULE_LED_GPIO0; + DP(NETIF_MSG_LINK, "Set fault module-detected led " + "pin %x port %x mode %x\n", + gpio_pin, gpio_port, gpio_mode); + bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port); + } + break; + default: + DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n", + fault_led_gpio); + } +} + +static void bnx2x_set_e3_module_fault_led(struct link_params *params, + u8 gpio_mode) +{ + u32 pin_cfg; + u8 port = params->port; + struct bnx2x *bp = params->bp; + pin_cfg = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_sfp_ctrl)) & + PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >> + PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT; + DP(NETIF_MSG_LINK, "Setting Fault LED to %d using pin cfg %d\n", + gpio_mode, pin_cfg); + bnx2x_set_cfg_pin(bp, pin_cfg, gpio_mode); +} + +static void bnx2x_set_sfp_module_fault_led(struct link_params *params, + u8 gpio_mode) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode); + if (CHIP_IS_E3(bp)) { + /* Low ==> if SFP+ module is supported otherwise + * High ==> if SFP+ module is not on the approved vendor list + */ + bnx2x_set_e3_module_fault_led(params, gpio_mode); + } else + bnx2x_set_e1e2_module_fault_led(params, gpio_mode); +} + +static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + bnx2x_warpcore_power_module(params, 0); + /* Put Warpcore in low power mode */ + REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e); + + /* Put LCPLL in low power mode */ + REG_WR(bp, MISC_REG_LCPLL_E40_PWRDWN, 1); + REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_ANA, 0); + REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_DIG, 0); +} + +static void bnx2x_power_sfp_module(struct link_params *params, + struct bnx2x_phy *phy, + u8 power) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "Setting SFP+ power to %x\n", power); + + switch (phy->type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: + bnx2x_8727_power_module(params->bp, phy, power); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: + bnx2x_warpcore_power_module(params, power); + break; + default: + break; + } +} +static void bnx2x_warpcore_set_limiting_mode(struct link_params *params, + struct bnx2x_phy *phy, + u16 edc_mode) +{ + u16 val = 0; + u16 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; + struct bnx2x *bp = params->bp; + + u8 lane = bnx2x_get_warpcore_lane(phy, params); + /* This is a global register which controls all lanes */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val); + val &= ~(0xf << (lane << 2)); + + switch (edc_mode) { + case EDC_MODE_LINEAR: + case EDC_MODE_LIMITING: + mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; + break; + case EDC_MODE_PASSIVE_DAC: + case EDC_MODE_ACTIVE_DAC: + mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC; + break; + default: + break; + } + + val |= (mode << (lane << 2)); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, val); + /* A must read */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val); + + /* Restart microcode to re-read the new mode */ + bnx2x_warpcore_reset_lane(bp, phy, 1); + bnx2x_warpcore_reset_lane(bp, phy, 0); + +} + +static void bnx2x_set_limiting_mode(struct link_params *params, + struct bnx2x_phy *phy, + u16 edc_mode) +{ + switch (phy->type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: + bnx2x_8726_set_limiting_mode(params->bp, phy, edc_mode); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: + bnx2x_8727_set_limiting_mode(params->bp, phy, edc_mode); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: + bnx2x_warpcore_set_limiting_mode(params, phy, edc_mode); + break; + } +} + +static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 edc_mode; + int rc = 0; + + u32 val = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port].config)); + /* Enabled transmitter by default */ + bnx2x_sfp_set_transmitter(params, phy, 1); + DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n", + params->port); + /* Power up module */ + bnx2x_power_sfp_module(params, phy, 1); + if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) { + DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); + return -EINVAL; + } else if (bnx2x_verify_sfp_module(phy, params) != 0) { + /* Check SFP+ module compatibility */ + DP(NETIF_MSG_LINK, "Module verification failed!!\n"); + rc = -EINVAL; + /* Turn on fault module-detected led */ + bnx2x_set_sfp_module_fault_led(params, + MISC_REGISTERS_GPIO_HIGH); + + /* Check if need to power down the SFP+ module */ + if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) { + DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n"); + bnx2x_power_sfp_module(params, phy, 0); + return rc; + } + } else { + /* Turn off fault module-detected led */ + bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW); + } + + /* Check and set limiting mode / LRM mode on 8726. On 8727 it + * is done automatically + */ + bnx2x_set_limiting_mode(params, phy, edc_mode); + + /* Disable transmit for this module if the module is not approved, and + * laser needs to be disabled. + */ + if ((rc) && + ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)) + bnx2x_sfp_set_transmitter(params, phy, 0); + + return rc; +} + +void bnx2x_handle_module_detect_int(struct link_params *params) +{ + struct bnx2x *bp = params->bp; + struct bnx2x_phy *phy; + u32 gpio_val; + u8 gpio_num, gpio_port; + if (CHIP_IS_E3(bp)) { + phy = ¶ms->phy[INT_PHY]; + /* Always enable TX laser,will be disabled in case of fault */ + bnx2x_sfp_set_transmitter(params, phy, 1); + } else { + phy = ¶ms->phy[EXT_PHY1]; + } + if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base, + params->port, &gpio_num, &gpio_port) == + -EINVAL) { + DP(NETIF_MSG_LINK, "Failed to get MOD_ABS interrupt config\n"); + return; + } + + /* Set valid module led off */ + bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH); + + /* Get current gpio val reflecting module plugged in / out*/ + gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port); + + /* Call the handling function in case module is detected */ + if (gpio_val == 0) { + bnx2x_set_mdio_emac_per_phy(bp, params); + bnx2x_set_aer_mmd(params, phy); + + bnx2x_power_sfp_module(params, phy, 1); + bnx2x_set_gpio_int(bp, gpio_num, + MISC_REGISTERS_GPIO_INT_OUTPUT_CLR, + gpio_port); + if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) { + bnx2x_sfp_module_detection(phy, params); + if (CHIP_IS_E3(bp)) { + u16 rx_tx_in_reset; + /* In case WC is out of reset, reconfigure the + * link speed while taking into account 1G + * module limitation. + */ + bnx2x_cl45_read(bp, phy, + MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC6, + &rx_tx_in_reset); + if ((!rx_tx_in_reset) && + (params->link_flags & + PHY_INITIALIZED)) { + bnx2x_warpcore_reset_lane(bp, phy, 1); + bnx2x_warpcore_config_sfi(phy, params); + bnx2x_warpcore_reset_lane(bp, phy, 0); + } + } + } else { + DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); + } + } else { + bnx2x_set_gpio_int(bp, gpio_num, + MISC_REGISTERS_GPIO_INT_OUTPUT_SET, + gpio_port); + /* Module was plugged out. + * Disable transmit for this module + */ + phy->media_type = ETH_PHY_NOT_PRESENT; + } +} + +/******************************************************************/ +/* Used by 8706 and 8727 */ +/******************************************************************/ +static void bnx2x_sfp_mask_fault(struct bnx2x *bp, + struct bnx2x_phy *phy, + u16 alarm_status_offset, + u16 alarm_ctrl_offset) +{ + u16 alarm_status, val; + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, alarm_status_offset, + &alarm_status); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, alarm_status_offset, + &alarm_status); + /* Mask or enable the fault event. */ + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val); + if (alarm_status & (1<<0)) + val &= ~(1<<0); + else + val |= (1<<0); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val); +} +/******************************************************************/ +/* common BCM8706/BCM8726 PHY SECTION */ +/******************************************************************/ +static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + u8 link_up = 0; + u16 val1, val2, rx_sd, pcs_status; + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "XGXS 8706/8726\n"); + /* Clear RX Alarm*/ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2); + + bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, + MDIO_PMA_LASI_TXCTRL); + + /* Clear LASI indication*/ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2); + DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd); + bnx2x_cl45_read(bp, phy, + MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &pcs_status); + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2); + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2); + + DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps" + " link_status 0x%x\n", rx_sd, pcs_status, val2); + /* Link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status + * are set, or if the autoneg bit 1 is set + */ + link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1))); + if (link_up) { + if (val2 & (1<<1)) + vars->line_speed = SPEED_1000; + else + vars->line_speed = SPEED_10000; + bnx2x_ext_phy_resolve_fc(phy, params, vars); + vars->duplex = DUPLEX_FULL; + } + + /* Capture 10G link fault. Read twice to clear stale value. */ + if (vars->line_speed == SPEED_10000) { + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, + MDIO_PMA_LASI_TXSTAT, &val1); + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, + MDIO_PMA_LASI_TXSTAT, &val1); + if (val1 & (1<<0)) + vars->fault_detected = 1; + } + + return link_up; +} + +/******************************************************************/ +/* BCM8706 PHY SECTION */ +/******************************************************************/ +static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + u32 tx_en_mode; + u16 cnt, val, tmp1; + struct bnx2x *bp = params->bp; + + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + /* HW reset */ + bnx2x_ext_phy_hw_reset(bp, params->port); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); + bnx2x_wait_reset_complete(bp, phy, params); + + /* Wait until fw is loaded */ + for (cnt = 0; cnt < 100; cnt++) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val); + if (val) + break; + usleep_range(10000, 20000); + } + DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt); + if ((params->feature_config_flags & + FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) { + u8 i; + u16 reg; + for (i = 0; i < 4; i++) { + reg = MDIO_XS_8706_REG_BANK_RX0 + + i*(MDIO_XS_8706_REG_BANK_RX1 - + MDIO_XS_8706_REG_BANK_RX0); + bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, reg, &val); + /* Clear first 3 bits of the control */ + val &= ~0x7; + /* Set control bits according to configuration */ + val |= (phy->rx_preemphasis[i] & 0x7); + DP(NETIF_MSG_LINK, "Setting RX Equalizer to BCM8706" + " reg 0x%x <-- val 0x%x\n", reg, val); + bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, reg, val); + } + } + /* Force speed */ + if (phy->req_line_speed == SPEED_10000) { + DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n"); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_DIGITAL_CTRL, 0x400); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, + 0); + /* Arm LASI for link and Tx fault. */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 3); + } else { + /* Force 1Gbps using autoneg with 1G advertisement */ + + /* Allow CL37 through CL73 */ + DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n"); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c); + + /* Enable Full-Duplex advertisement on CL37 */ + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LP, 0x0020); + /* Enable CL37 AN */ + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); + /* 1G support */ + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_ADV, (1<<5)); + + /* Enable clause 73 AN */ + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + 0x0400); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, + 0x0004); + } + bnx2x_save_bcm_spirom_ver(bp, phy, params->port); + + /* If TX Laser is controlled by GPIO_0, do not let PHY go into low + * power mode, if TX Laser is disabled + */ + + tx_en_mode = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].sfp_ctrl)) + & PORT_HW_CFG_TX_LASER_MASK; + + if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) { + DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n"); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1); + tmp1 |= 0x1; + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1); + } + + return 0; +} + +static int bnx2x_8706_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + return bnx2x_8706_8726_read_status(phy, params, vars); +} + +/******************************************************************/ +/* BCM8726 PHY SECTION */ +/******************************************************************/ +static void bnx2x_8726_config_loopback(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n"); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001); +} + +static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + /* Need to wait 100ms after reset */ + msleep(100); + + /* Micro controller re-boot */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x018B); + + /* Set soft reset */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL1, 0x0001); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); + + /* Wait for 150ms for microcode load */ + msleep(150); + + /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL1, 0x0000); + + msleep(200); + bnx2x_save_bcm_spirom_ver(bp, phy, params->port); +} + +static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 val1; + u8 link_up = bnx2x_8706_8726_read_status(phy, params, vars); + if (link_up) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, + &val1); + if (val1 & (1<<15)) { + DP(NETIF_MSG_LINK, "Tx is disabled\n"); + link_up = 0; + vars->line_speed = 0; + } + } + return link_up; +} + + +static int bnx2x_8726_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "Initializing BCM8726\n"); + + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); + bnx2x_wait_reset_complete(bp, phy, params); + + bnx2x_8726_external_rom_boot(phy, params); + + /* Need to call module detected on initialization since the module + * detection triggered by actual module insertion might occur before + * driver is loaded, and when driver is loaded, it reset all + * registers, including the transmitter + */ + bnx2x_sfp_module_detection(phy, params); + + if (phy->req_line_speed == SPEED_1000) { + DP(NETIF_MSG_LINK, "Setting 1G force\n"); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x5); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + 0x400); + } else if ((phy->req_line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) && + ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) != + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { + DP(NETIF_MSG_LINK, "Setting 1G clause37\n"); + /* Set Flow control */ + bnx2x_ext_phy_set_pause(params, phy, vars); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, 0x0020); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); + /* Enable RX-ALARM control to receive interrupt for 1G speed + * change + */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x4); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + 0x400); + + } else { /* Default 10G. Set only LASI control */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 1); + } + + /* Set TX PreEmphasis if needed */ + if ((params->feature_config_flags & + FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) { + DP(NETIF_MSG_LINK, + "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n", + phy->tx_preemphasis[0], + phy->tx_preemphasis[1]); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8726_TX_CTRL1, + phy->tx_preemphasis[0]); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8726_TX_CTRL2, + phy->tx_preemphasis[1]); + } + + return 0; + +} + +static void bnx2x_8726_link_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "bnx2x_8726_link_reset port %d\n", params->port); + /* Set serial boot control for external load */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, 0x0001); +} + +/******************************************************************/ +/* BCM8727 PHY SECTION */ +/******************************************************************/ + +static void bnx2x_8727_set_link_led(struct bnx2x_phy *phy, + struct link_params *params, u8 mode) +{ + struct bnx2x *bp = params->bp; + u16 led_mode_bitmask = 0; + u16 gpio_pins_bitmask = 0; + u16 val; + /* Only NOC flavor requires to set the LED specifically */ + if (!(phy->flags & FLAGS_NOC)) + return; + switch (mode) { + case LED_MODE_FRONT_PANEL_OFF: + case LED_MODE_OFF: + led_mode_bitmask = 0; + gpio_pins_bitmask = 0x03; + break; + case LED_MODE_ON: + led_mode_bitmask = 0; + gpio_pins_bitmask = 0x02; + break; + case LED_MODE_OPER: + led_mode_bitmask = 0x60; + gpio_pins_bitmask = 0x11; + break; + } + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_OPT_CTRL, + &val); + val &= 0xff8f; + val |= led_mode_bitmask; + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_OPT_CTRL, + val); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_GPIO_CTRL, + &val); + val &= 0xffe0; + val |= gpio_pins_bitmask; + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_GPIO_CTRL, + val); +} +static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy, + struct link_params *params) { + u32 swap_val, swap_override; + u8 port; + /* The PHY reset is controlled by GPIO 1. Fake the port number + * to cancel the swap done in set_gpio() + */ + struct bnx2x *bp = params->bp; + swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); + swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); + port = (swap_val && swap_override) ^ 1; + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); +} + +static void bnx2x_8727_config_speed(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 tmp1, val; + /* Set option 1G speed */ + if ((phy->req_line_speed == SPEED_1000) || + (phy->media_type == ETH_PHY_SFP_1G_FIBER)) { + DP(NETIF_MSG_LINK, "Setting 1G force\n"); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); + DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1); + /* Power down the XAUI until link is up in case of dual-media + * and 1G + */ + if (DUAL_MEDIA(params)) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_GP, &val); + val |= (3<<10); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_GP, val); + } + } else if ((phy->req_line_speed == SPEED_AUTO_NEG) && + ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) && + ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) != + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { + + DP(NETIF_MSG_LINK, "Setting 1G clause37\n"); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); + } else { + /* Since the 8727 has only single reset pin, need to set the 10G + * registers although it is default + */ + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, + 0x0020); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, + 0x0008); + } +} + +static int bnx2x_8727_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + u32 tx_en_mode; + u16 tmp1, mod_abs, tmp2; + struct bnx2x *bp = params->bp; + /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ + + bnx2x_wait_reset_complete(bp, phy, params); + + DP(NETIF_MSG_LINK, "Initializing BCM8727\n"); + + bnx2x_8727_specific_func(phy, params, PHY_INIT); + /* Initially configure MOD_ABS to interrupt when module is + * presence( bit 8) + */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); + /* Set EDC off by setting OPTXLOS signal input to low (bit 9). + * When the EDC is off it locks onto a reference clock and avoids + * becoming 'lost' + */ + mod_abs &= ~(1<<8); + if (!(phy->flags & FLAGS_NOC)) + mod_abs &= ~(1<<9); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); + + /* Enable/Disable PHY transmitter output */ + bnx2x_set_disable_pmd_transmit(params, phy, 0); + + bnx2x_8727_power_module(bp, phy, 1); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); + + bnx2x_8727_config_speed(phy, params); + + + /* Set TX PreEmphasis if needed */ + if ((params->feature_config_flags & + FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) { + DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n", + phy->tx_preemphasis[0], + phy->tx_preemphasis[1]); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1, + phy->tx_preemphasis[0]); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL2, + phy->tx_preemphasis[1]); + } + + /* If TX Laser is controlled by GPIO_0, do not let PHY go into low + * power mode, if TX Laser is disabled + */ + tx_en_mode = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].sfp_ctrl)) + & PORT_HW_CFG_TX_LASER_MASK; + + if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) { + + DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n"); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2); + tmp2 |= 0x1000; + tmp2 &= 0xFFEF; + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, + &tmp2); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, + (tmp2 & 0x7fff)); + } + + return 0; +} + +static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 mod_abs, rx_alarm_status; + u32 val = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port]. + config)); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); + if (mod_abs & (1<<8)) { + + /* Module is absent */ + DP(NETIF_MSG_LINK, + "MOD_ABS indication show module is absent\n"); + phy->media_type = ETH_PHY_NOT_PRESENT; + /* 1. Set mod_abs to detect next module + * presence event + * 2. Set EDC off by setting OPTXLOS signal input to low + * (bit 9). + * When the EDC is off it locks onto a reference clock and + * avoids becoming 'lost'. + */ + mod_abs &= ~(1<<8); + if (!(phy->flags & FLAGS_NOC)) + mod_abs &= ~(1<<9); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); + + /* Clear RX alarm since it stays up as long as + * the mod_abs wasn't changed + */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); + + } else { + /* Module is present */ + DP(NETIF_MSG_LINK, + "MOD_ABS indication show module is present\n"); + /* First disable transmitter, and if the module is ok, the + * module_detection will enable it + * 1. Set mod_abs to detect next module absent event ( bit 8) + * 2. Restore the default polarity of the OPRXLOS signal and + * this signal will then correctly indicate the presence or + * absence of the Rx signal. (bit 9) + */ + mod_abs |= (1<<8); + if (!(phy->flags & FLAGS_NOC)) + mod_abs |= (1<<9); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); + + /* Clear RX alarm since it stays up as long as the mod_abs + * wasn't changed. This is need to be done before calling the + * module detection, otherwise it will clear* the link update + * alarm + */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); + + + if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) + bnx2x_sfp_set_transmitter(params, phy, 0); + + if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) + bnx2x_sfp_module_detection(phy, params); + else + DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); + + /* Reconfigure link speed based on module type limitations */ + bnx2x_8727_config_speed(phy, params); + } + + DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", + rx_alarm_status); + /* No need to check link status in case of module plugged in/out */ +} + +static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) + +{ + struct bnx2x *bp = params->bp; + u8 link_up = 0, oc_port = params->port; + u16 link_status = 0; + u16 rx_alarm_status, lasi_ctrl, val1; + + /* If PHY is not initialized, do not check link status */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, + &lasi_ctrl); + if (!lasi_ctrl) + return 0; + + /* Check the LASI on Rx */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, + &rx_alarm_status); + vars->line_speed = 0; + DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", rx_alarm_status); + + bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, + MDIO_PMA_LASI_TXCTRL); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); + + DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1); + + /* Clear MSG-OUT */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); + + /* If a module is present and there is need to check + * for over current + */ + if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) { + /* Check over-current using 8727 GPIO0 input*/ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL, + &val1); + + if ((val1 & (1<<8)) == 0) { + if (!CHIP_IS_E1x(bp)) + oc_port = BP_PATH(bp) + (params->port << 1); + DP(NETIF_MSG_LINK, + "8727 Power fault has been detected on port %d\n", + oc_port); + netdev_err(bp->dev, "Error: Power fault on Port %d has " + "been detected and the power to " + "that SFP+ module has been removed " + "to prevent failure of the card. " + "Please remove the SFP+ module and " + "restart the system to clear this " + "error.\n", + oc_port); + /* Disable all RX_ALARMs except for mod_abs */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_LASI_RXCTRL, (1<<5)); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, &val1); + /* Wait for module_absent_event */ + val1 |= (1<<8); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, val1); + /* Clear RX alarm */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); + bnx2x_8727_power_module(params->bp, phy, 0); + return 0; + } + } /* Over current check */ + + /* When module absent bit is set, check module */ + if (rx_alarm_status & (1<<5)) { + bnx2x_8727_handle_mod_abs(phy, params); + /* Enable all mod_abs and link detection bits */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + ((1<<5) | (1<<2))); + } + + if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) { + DP(NETIF_MSG_LINK, "Enabling 8727 TX laser\n"); + bnx2x_sfp_set_transmitter(params, phy, 1); + } else { + DP(NETIF_MSG_LINK, "Tx is disabled\n"); + return 0; + } + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status); + + /* Bits 0..2 --> speed detected, + * Bits 13..15--> link is down + */ + if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) { + link_up = 1; + vars->line_speed = SPEED_10000; + DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n", + params->port); + } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) { + link_up = 1; + vars->line_speed = SPEED_1000; + DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n", + params->port); + } else { + link_up = 0; + DP(NETIF_MSG_LINK, "port %x: External link is down\n", + params->port); + } + + /* Capture 10G link fault. */ + if (vars->line_speed == SPEED_10000) { + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, + MDIO_PMA_LASI_TXSTAT, &val1); + + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, + MDIO_PMA_LASI_TXSTAT, &val1); + + if (val1 & (1<<0)) { + vars->fault_detected = 1; + } + } + + if (link_up) { + bnx2x_ext_phy_resolve_fc(phy, params, vars); + vars->duplex = DUPLEX_FULL; + DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex); + } + + if ((DUAL_MEDIA(params)) && + (phy->req_line_speed == SPEED_1000)) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_GP, &val1); + /* In case of dual-media board and 1G, power up the XAUI side, + * otherwise power it down. For 10G it is done automatically + */ + if (link_up) + val1 &= ~(3<<10); + else + val1 |= (3<<10); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_GP, val1); + } + return link_up; +} + +static void bnx2x_8727_link_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + + /* Enable/Disable PHY transmitter output */ + bnx2x_set_disable_pmd_transmit(params, phy, 1); + + /* Disable Transmitter */ + bnx2x_sfp_set_transmitter(params, phy, 0); + /* Clear LASI */ + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0); + +} + +/******************************************************************/ +/* BCM8481/BCM84823/BCM84833 PHY SECTION */ +/******************************************************************/ +static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, + struct bnx2x *bp, + u8 port) +{ + u16 val, fw_ver2, cnt, i; + static struct bnx2x_reg_set reg_set[] = { + {MDIO_PMA_DEVAD, 0xA819, 0x0014}, + {MDIO_PMA_DEVAD, 0xA81A, 0xc200}, + {MDIO_PMA_DEVAD, 0xA81B, 0x0000}, + {MDIO_PMA_DEVAD, 0xA81C, 0x0300}, + {MDIO_PMA_DEVAD, 0xA817, 0x0009} + }; + u16 fw_ver1; + + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); + bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff, + phy->ver_addr); + } else { + /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ + /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + bnx2x_cl45_write(bp, phy, reg_set[i].devad, + reg_set[i].reg, reg_set[i].val); + + for (cnt = 0; cnt < 100; cnt++) { + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); + if (val & 1) + break; + udelay(5); + } + if (cnt == 100) { + DP(NETIF_MSG_LINK, "Unable to read 848xx " + "phy fw version(1)\n"); + bnx2x_save_spirom_version(bp, port, 0, + phy->ver_addr); + return; + } + + + /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A); + for (cnt = 0; cnt < 100; cnt++) { + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); + if (val & 1) + break; + udelay(5); + } + if (cnt == 100) { + DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw " + "version(2)\n"); + bnx2x_save_spirom_version(bp, port, 0, + phy->ver_addr); + return; + } + + /* lower 16 bits of the register SPI_FW_STATUS */ + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1); + /* upper 16 bits of register SPI_FW_STATUS */ + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2); + + bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1, + phy->ver_addr); + } + +} +static void bnx2x_848xx_set_led(struct bnx2x *bp, + struct bnx2x_phy *phy) +{ + u16 val, offset, i; + static struct bnx2x_reg_set reg_set[] = { + {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080}, + {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018}, + {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006}, + {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_BLINK, 0x0000}, + {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH, + MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ}, + {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD} + }; + /* PHYC_CTL_LED_CTL */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, &val); + val &= 0xFE00; + val |= 0x0092; + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, val); + + for (i = 0; i < ARRAY_SIZE(reg_set); i++) + bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, + reg_set[i].val); + + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) + offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1; + else + offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1; + + /* stretch_en for LED3*/ + bnx2x_cl45_read_or_write(bp, phy, + MDIO_PMA_DEVAD, offset, + MDIO_PMA_REG_84823_LED3_STRETCH_EN); +} + +static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy, + struct link_params *params, + u32 action) +{ + struct bnx2x *bp = params->bp; + switch (action) { + case PHY_INIT: + if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && + (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { + /* Save spirom version */ + bnx2x_save_848xx_spirom_version(phy, bp, params->port); + } + /* This phy uses the NIG latch mechanism since link indication + * arrives through its LED4 and not via its LASI signal, so we + * get steady signal instead of clear on read + */ + bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, + 1 << NIG_LATCH_BC_ENABLE_MI_INT); + + bnx2x_848xx_set_led(bp, phy); + break; + } +} + +static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 autoneg_val, an_1000_val, an_10_100_val; + + bnx2x_848xx_specific_func(phy, params, PHY_INIT); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000); + + /* set 1000 speed advertisement */ + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, + &an_1000_val); + + bnx2x_ext_phy_set_pause(params, phy, vars); + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_AN_ADV, + &an_10_100_val); + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL, + &autoneg_val); + /* Disable forced speed */ + autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13)); + an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8)); + + if (((phy->req_line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (phy->req_line_speed == SPEED_1000)) { + an_1000_val |= (1<<8); + autoneg_val |= (1<<9 | 1<<12); + if (phy->req_duplex == DUPLEX_FULL) + an_1000_val |= (1<<9); + DP(NETIF_MSG_LINK, "Advertising 1G\n"); + } else + an_1000_val &= ~((1<<8) | (1<<9)); + + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, + an_1000_val); + + /* Set 10/100 speed advertisement */ + if (phy->req_line_speed == SPEED_AUTO_NEG) { + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) { + /* Enable autoneg and restart autoneg for legacy speeds + */ + autoneg_val |= (1<<9 | 1<<12); + an_10_100_val |= (1<<8); + DP(NETIF_MSG_LINK, "Advertising 100M-FD\n"); + } + + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) { + /* Enable autoneg and restart autoneg for legacy speeds + */ + autoneg_val |= (1<<9 | 1<<12); + an_10_100_val |= (1<<7); + DP(NETIF_MSG_LINK, "Advertising 100M-HD\n"); + } + + if ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && + (phy->supported & SUPPORTED_10baseT_Full)) { + an_10_100_val |= (1<<6); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 10M-FD\n"); + } + + if ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) && + (phy->supported & SUPPORTED_10baseT_Half)) { + an_10_100_val |= (1<<5); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 10M-HD\n"); + } + } + + /* Only 10/100 are allowed to work in FORCE mode */ + if ((phy->req_line_speed == SPEED_100) && + (phy->supported & + (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full))) { + autoneg_val |= (1<<13); + /* Enabled AUTO-MDIX when autoneg is disabled */ + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL, + (1<<15 | 1<<9 | 7<<0)); + /* The PHY needs this set even for forced link. */ + an_10_100_val |= (1<<8) | (1<<7); + DP(NETIF_MSG_LINK, "Setting 100M force\n"); + } + if ((phy->req_line_speed == SPEED_10) && + (phy->supported & + (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full))) { + /* Enabled AUTO-MDIX when autoneg is disabled */ + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL, + (1<<15 | 1<<9 | 7<<0)); + DP(NETIF_MSG_LINK, "Setting 10M force\n"); + } + + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_AN_ADV, + an_10_100_val); + + if (phy->req_duplex == DUPLEX_FULL) + autoneg_val |= (1<<8); + + /* Always write this if this is not 84833/4. + * For 84833/4, write it only when it's a forced speed. + */ + if (((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && + (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) || + ((autoneg_val & (1<<12)) == 0)) + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val); + + if (((phy->req_line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || + (phy->req_line_speed == SPEED_10000)) { + DP(NETIF_MSG_LINK, "Advertising 10G\n"); + /* Restart autoneg for 10G*/ + + bnx2x_cl45_read_or_write( + bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, + 0x1000); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, + 0x3200); + } else + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, + 1); + + return 0; +} + +static int bnx2x_8481_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + /* Restore normal power mode*/ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + + /* HW reset */ + bnx2x_ext_phy_hw_reset(bp, params->port); + bnx2x_wait_reset_complete(bp, phy, params); + + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); + return bnx2x_848xx_cmn_config_init(phy, params, vars); +} + +#define PHY84833_CMDHDLR_WAIT 300 +#define PHY84833_CMDHDLR_MAX_ARGS 5 +static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, + struct link_params *params, u16 fw_cmd, + u16 cmd_args[], int argc) +{ + int idx; + u16 val; + struct bnx2x *bp = params->bp; + /* Write CMD_OPEN_OVERRIDE to STATUS reg */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_CMD_HDLR_STATUS, + PHY84833_STATUS_CMD_OPEN_OVERRIDE); + for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_CMD_HDLR_STATUS, &val); + if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS) + break; + usleep_range(1000, 2000); + } + if (idx >= PHY84833_CMDHDLR_WAIT) { + DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); + return -EINVAL; + } + + /* Prepare argument(s) and issue command */ + for (idx = 0; idx < argc; idx++) { + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_CMD_HDLR_DATA1 + idx, + cmd_args[idx]); + } + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_CMD_HDLR_COMMAND, fw_cmd); + for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_CMD_HDLR_STATUS, &val); + if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) || + (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) + break; + usleep_range(1000, 2000); + } + if ((idx >= PHY84833_CMDHDLR_WAIT) || + (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { + DP(NETIF_MSG_LINK, "FW cmd failed.\n"); + return -EINVAL; + } + /* Gather returning data */ + for (idx = 0; idx < argc; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_CMD_HDLR_DATA1 + idx, + &cmd_args[idx]); + } + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_CMD_HDLR_STATUS, + PHY84833_STATUS_CMD_CLEAR_COMPLETE); + return 0; +} + +static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + u32 pair_swap; + u16 data[PHY84833_CMDHDLR_MAX_ARGS]; + int status; + struct bnx2x *bp = params->bp; + + /* Check for configuration. */ + pair_swap = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].xgbt_phy_cfg)) & + PORT_HW_CFG_RJ45_PAIR_SWAP_MASK; + + if (pair_swap == 0) + return 0; + + /* Only the second argument is used for this command */ + data[1] = (u16)pair_swap; + + status = bnx2x_84833_cmd_hdlr(phy, params, + PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS); + if (status == 0) + DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]); + + return status; +} + +static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp, + u32 shmem_base_path[], + u32 chip_id) +{ + u32 reset_pin[2]; + u32 idx; + u8 reset_gpios; + if (CHIP_IS_E3(bp)) { + /* Assume that these will be GPIOs, not EPIOs. */ + for (idx = 0; idx < 2; idx++) { + /* Map config param to register bit. */ + reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] + + offsetof(struct shmem_region, + dev_info.port_hw_config[0].e3_cmn_pin_cfg)); + reset_pin[idx] = (reset_pin[idx] & + PORT_HW_CFG_E3_PHY_RESET_MASK) >> + PORT_HW_CFG_E3_PHY_RESET_SHIFT; + reset_pin[idx] -= PIN_CFG_GPIO0_P0; + reset_pin[idx] = (1 << reset_pin[idx]); + } + reset_gpios = (u8)(reset_pin[0] | reset_pin[1]); + } else { + /* E2, look from diff place of shmem. */ + for (idx = 0; idx < 2; idx++) { + reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] + + offsetof(struct shmem_region, + dev_info.port_hw_config[0].default_cfg)); + reset_pin[idx] &= PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK; + reset_pin[idx] -= PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0; + reset_pin[idx] >>= PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT; + reset_pin[idx] = (1 << reset_pin[idx]); + } + reset_gpios = (u8)(reset_pin[0] | reset_pin[1]); + } + + return reset_gpios; +} + +static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u8 reset_gpios; + u32 other_shmem_base_addr = REG_RD(bp, params->shmem2_base + + offsetof(struct shmem2_region, + other_shmem_base_addr)); + + u32 shmem_base_path[2]; + + /* Work around for 84833 LED failure inside RESET status */ + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_MII_CTRL, + MDIO_AN_REG_8481_MII_CTRL_FORCE_1G); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_8481_1G_100T_EXT_CTRL, + MIDO_AN_REG_8481_EXT_CTRL_FORCE_LEDS_OFF); + + shmem_base_path[0] = params->shmem_base; + shmem_base_path[1] = other_shmem_base_addr; + + reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, + params->chip_id); + + bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); + udelay(10); + DP(NETIF_MSG_LINK, "84833 hw reset on pin values 0x%x\n", + reset_gpios); + + return 0; +} + +static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + int rc; + struct bnx2x *bp = params->bp; + u16 cmd_args = 0; + + DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n"); + + /* Prevent Phy from working in EEE and advertising it */ + rc = bnx2x_84833_cmd_hdlr(phy, params, + PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); + if (rc) { + DP(NETIF_MSG_LINK, "EEE disable failed.\n"); + return rc; + } + + return bnx2x_eee_disable(phy, params, vars); +} + +static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + int rc; + struct bnx2x *bp = params->bp; + u16 cmd_args = 1; + + rc = bnx2x_84833_cmd_hdlr(phy, params, + PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); + if (rc) { + DP(NETIF_MSG_LINK, "EEE enable failed.\n"); + return rc; + } + + return bnx2x_eee_advertise(phy, params, vars, SHMEM_EEE_10G_ADV); +} + +#define PHY84833_CONSTANT_LATENCY 1193 +static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 port, initialize = 1; + u16 val; + u32 actual_phy_selection; + u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS]; + int rc = 0; + + usleep_range(1000, 2000); + + if (!(CHIP_IS_E1x(bp))) + port = BP_PATH(bp); + else + port = params->port; + + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, + port); + } else { + /* MDIO reset */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, 0x8000); + } + + bnx2x_wait_reset_complete(bp, phy, params); + + /* Wait for GPHY to come out of reset */ + msleep(50); + if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && + (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { + /* BCM84823 requires that XGXS links up first @ 10G for normal + * behavior. + */ + u16 temp; + temp = vars->line_speed; + vars->line_speed = SPEED_10000; + bnx2x_set_autoneg(¶ms->phy[INT_PHY], params, vars, 0); + bnx2x_program_serdes(¶ms->phy[INT_PHY], params, vars); + vars->line_speed = temp; + } + + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_CTL_REG_84823_MEDIA, &val); + val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | + MDIO_CTL_REG_84823_MEDIA_LINE_MASK | + MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN | + MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK | + MDIO_CTL_REG_84823_MEDIA_FIBER_1G); + + if (CHIP_IS_E3(bp)) { + val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | + MDIO_CTL_REG_84823_MEDIA_LINE_MASK); + } else { + val |= (MDIO_CTL_REG_84823_CTRL_MAC_XFI | + MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L); + } + + actual_phy_selection = bnx2x_phy_selection(params); + + switch (actual_phy_selection) { + case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: + /* Do nothing. Essentially this is like the priority copper */ + break; + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: + val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: + val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER; + break; + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: + /* Do nothing here. The first PHY won't be initialized at all */ + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: + val |= MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN; + initialize = 0; + break; + } + if (params->phy[EXT_PHY2].req_line_speed == SPEED_1000) + val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G; + + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_CTL_REG_84823_MEDIA, val); + DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n", + params->multi_phy_config, val); + + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { + bnx2x_84833_pair_swap_cfg(phy, params, vars); + + /* Keep AutogrEEEn disabled. */ + cmd_args[0] = 0x0; + cmd_args[1] = 0x0; + cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1; + cmd_args[3] = PHY84833_CONSTANT_LATENCY; + rc = bnx2x_84833_cmd_hdlr(phy, params, + PHY84833_CMD_SET_EEE_MODE, cmd_args, + PHY84833_CMDHDLR_MAX_ARGS); + if (rc) + DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); + } + if (initialize) + rc = bnx2x_848xx_cmn_config_init(phy, params, vars); + else + bnx2x_save_848xx_spirom_version(phy, bp, params->port); + /* 84833 PHY has a better feature and doesn't need to support this. */ + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { + u32 cms_enable = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].default_cfg)) & + PORT_HW_CFG_ENABLE_CMS_MASK; + + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_CTL_REG_84823_USER_CTRL_REG, &val); + if (cms_enable) + val |= MDIO_CTL_REG_84823_USER_CTRL_CMS; + else + val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS; + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_CTL_REG_84823_USER_CTRL_REG, val); + } + + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_FW_REV, &val); + + /* Configure EEE support */ + if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && + (val != MDIO_84833_TOP_CFG_FW_NO_EEE) && + bnx2x_eee_has_cap(params)) { + rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_10G_ADV); + if (rc) { + DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n"); + bnx2x_8483x_disable_eee(phy, params, vars); + return rc; + } + + if ((phy->req_duplex == DUPLEX_FULL) && + (params->eee_mode & EEE_MODE_ADV_LPI) && + (bnx2x_eee_calc_timer(params) || + !(params->eee_mode & EEE_MODE_ENABLE_LPI))) + rc = bnx2x_8483x_enable_eee(phy, params, vars); + else + rc = bnx2x_8483x_disable_eee(phy, params, vars); + if (rc) { + DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n"); + return rc; + } + } else { + vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; + } + + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { + /* Bring PHY out of super isolate mode as the final step. */ + bnx2x_cl45_read_and_write(bp, phy, + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, + (u16)~MDIO_84833_SUPER_ISOLATE); + } + return rc; +} + +static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 val, val1, val2; + u8 link_up = 0; + + + /* Check 10G-BaseT link status */ + /* Check PMD signal ok */ + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, 0xFFFA, &val1); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL, + &val2); + DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2); + + /* Check link 10G */ + if (val2 & (1<<11)) { + vars->line_speed = SPEED_10000; + vars->duplex = DUPLEX_FULL; + link_up = 1; + bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); + } else { /* Check Legacy speed link */ + u16 legacy_status, legacy_speed; + + /* Enable expansion register 0x42 (Operation mode status) */ + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf42); + + /* Get legacy speed operation status */ + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, + &legacy_status); + + DP(NETIF_MSG_LINK, "Legacy speed status = 0x%x\n", + legacy_status); + link_up = ((legacy_status & (1<<11)) == (1<<11)); + legacy_speed = (legacy_status & (3<<9)); + if (legacy_speed == (0<<9)) + vars->line_speed = SPEED_10; + else if (legacy_speed == (1<<9)) + vars->line_speed = SPEED_100; + else if (legacy_speed == (2<<9)) + vars->line_speed = SPEED_1000; + else { /* Should not happen: Treat as link down */ + vars->line_speed = 0; + link_up = 0; + } + + if (link_up) { + if (legacy_status & (1<<8)) + vars->duplex = DUPLEX_FULL; + else + vars->duplex = DUPLEX_HALF; + + DP(NETIF_MSG_LINK, + "Link is up in %dMbps, is_duplex_full= %d\n", + vars->line_speed, + (vars->duplex == DUPLEX_FULL)); + /* Check legacy speed AN resolution */ + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_MII_STATUS, + &val); + if (val & (1<<5)) + vars->link_status |= + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8481_LEGACY_AN_EXPANSION, + &val); + if ((val & (1<<0)) == 0) + vars->link_status |= + LINK_STATUS_PARALLEL_DETECTION_USED; + } + } + if (link_up) { + DP(NETIF_MSG_LINK, "BCM848x3: link speed is %d\n", + vars->line_speed); + bnx2x_ext_phy_resolve_fc(phy, params, vars); + + /* Read LP advertised speeds */ + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_CL37_FC_LP, &val); + if (val & (1<<5)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10THD_CAPABLE; + if (val & (1<<6)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE; + if (val & (1<<7)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE; + if (val & (1<<8)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE; + if (val & (1<<9)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100T4_CAPABLE; + + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_1000T_STATUS, &val); + + if (val & (1<<10)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE; + if (val & (1<<11)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_MASTER_STATUS, &val); + + if (val & (1<<11)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + + /* Determine if EEE was negotiated */ + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) + bnx2x_eee_an_resolve(phy, params, vars); + } + + return link_up; +} + +static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len) +{ + int status = 0; + u32 spirom_ver; + spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F); + status = bnx2x_format_ver(spirom_ver, str, len); + return status; +} + +static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, 0); + bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, 1); +} + +static void bnx2x_8481_link_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + bnx2x_cl45_write(params->bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000); + bnx2x_cl45_write(params->bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1); +} + +static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u8 port; + u16 val16; + + if (!(CHIP_IS_E1x(bp))) + port = BP_PATH(bp); + else + port = params->port; + + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, + MISC_REGISTERS_GPIO_OUTPUT_LOW, + port); + } else { + bnx2x_cl45_read(bp, phy, + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val16); + val16 |= MDIO_84833_SUPER_ISOLATE; + bnx2x_cl45_write(bp, phy, + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, val16); + } +} + +static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, + struct link_params *params, u8 mode) +{ + struct bnx2x *bp = params->bp; + u16 val; + u8 port; + + if (!(CHIP_IS_E1x(bp))) + port = BP_PATH(bp); + else + port = params->port; + + switch (mode) { + case LED_MODE_OFF: + + DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", port); + + if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY1) { + + /* Set LED masks */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x0); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x0); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x0); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, + 0x0); + + } else { + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x0); + } + break; + case LED_MODE_FRONT_PANEL_OFF: + + DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n", + port); + + if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY1) { + + /* Set LED masks */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x0); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x0); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x0); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, + 0x20); + + } else { + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x0); + if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { + /* Disable MI_INT interrupt before setting LED4 + * source to constant off. + */ + if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + + params->port*4) & + NIG_MASK_MI_INT) { + params->link_flags |= + LINK_FLAGS_INT_DISABLED; + + bnx2x_bits_dis( + bp, + NIG_REG_MASK_INTERRUPT_PORT0 + + params->port*4, + NIG_MASK_MI_INT); + } + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_SIGNAL_MASK, + 0x0); + } + } + break; + case LED_MODE_ON: + + DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", port); + + if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY1) { + /* Set control reg */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + &val); + val &= 0x8000; + val |= 0x2492; + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + val); + + /* Set LED masks */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x0); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x20); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x20); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, + 0x0); + } else { + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x20); + if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { + /* Disable MI_INT interrupt before setting LED4 + * source to constant on. + */ + if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + + params->port*4) & + NIG_MASK_MI_INT) { + params->link_flags |= + LINK_FLAGS_INT_DISABLED; + + bnx2x_bits_dis( + bp, + NIG_REG_MASK_INTERRUPT_PORT0 + + params->port*4, + NIG_MASK_MI_INT); + } + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_SIGNAL_MASK, + 0x20); + } + } + break; + + case LED_MODE_OPER: + + DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", port); + + if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY1) { + + /* Set control reg */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + &val); + + if (!((val & + MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK) + >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) { + DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n"); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + 0xa492); + } + + /* Set LED masks */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x10); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x80); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x98); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, + 0x40); + + } else { + /* EXTPHY2 LED mode indicate that the 100M/1G/10G LED + * sources are all wired through LED1, rather than only + * 10G in other modes. + */ + val = ((params->hw_led_mode << + SHARED_HW_CFG_LED_MODE_SHIFT) == + SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80; + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + val); + + /* Tell LED3 to blink on source */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + &val); + val &= ~(7<<6); + val |= (1<<6); /* A83B[8:6]= 1 */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + val); + if (phy->type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { + /* Restore LED4 source to external link, + * and re-enable interrupts. + */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_SIGNAL_MASK, + 0x40); + if (params->link_flags & + LINK_FLAGS_INT_DISABLED) { + bnx2x_link_int_enable(params); + params->link_flags &= + ~LINK_FLAGS_INT_DISABLED; + } + } + } + break; + } + + /* This is a workaround for E3+84833 until autoneg + * restart is fixed in f/w + */ + if (CHIP_IS_E3(bp)) { + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_1, &val); + } +} + +/******************************************************************/ +/* 54618SE PHY SECTION */ +/******************************************************************/ +static void bnx2x_54618se_specific_func(struct bnx2x_phy *phy, + struct link_params *params, + u32 action) +{ + struct bnx2x *bp = params->bp; + u16 temp; + switch (action) { + case PHY_INIT: + /* Configure LED4: set to INTR (0x6). */ + /* Accessing shadow register 0xe. */ + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_LED_SEL2); + bnx2x_cl22_read(bp, phy, + MDIO_REG_GPHY_SHADOW, + &temp); + temp &= ~(0xf << 4); + temp |= (0x6 << 4); + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_WR_ENA | temp); + /* Configure INTR based on link status change. */ + bnx2x_cl22_write(bp, phy, + MDIO_REG_INTR_MASK, + ~MDIO_REG_INTR_MASK_LINK_STATUS); + break; + } +} + +static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 port; + u16 autoneg_val, an_1000_val, an_10_100_val, fc_val, temp; + u32 cfg_pin; + + DP(NETIF_MSG_LINK, "54618SE cfg init\n"); + usleep_range(1000, 2000); + + /* This works with E3 only, no need to check the chip + * before determining the port. + */ + port = params->port; + + cfg_pin = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_cmn_pin_cfg)) & + PORT_HW_CFG_E3_PHY_RESET_MASK) >> + PORT_HW_CFG_E3_PHY_RESET_SHIFT; + + /* Drive pin high to bring the GPHY out of reset. */ + bnx2x_set_cfg_pin(bp, cfg_pin, 1); + + /* wait for GPHY to reset */ + msleep(50); + + /* reset phy */ + bnx2x_cl22_write(bp, phy, + MDIO_PMA_REG_CTRL, 0x8000); + bnx2x_wait_reset_complete(bp, phy, params); + + /* Wait for GPHY to reset */ + msleep(50); + + + bnx2x_54618se_specific_func(phy, params, PHY_INIT); + /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */ + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_AUTO_DET_MED); + bnx2x_cl22_read(bp, phy, + MDIO_REG_GPHY_SHADOW, + &temp); + temp |= MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD; + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_WR_ENA | temp); + + /* Set up fc */ + /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ + bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); + fc_val = 0; + if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) + fc_val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; + + if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) + fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; + + /* Read all advertisement */ + bnx2x_cl22_read(bp, phy, + 0x09, + &an_1000_val); + + bnx2x_cl22_read(bp, phy, + 0x04, + &an_10_100_val); + + bnx2x_cl22_read(bp, phy, + MDIO_PMA_REG_CTRL, + &autoneg_val); + + /* Disable forced speed */ + autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13)); + an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8) | (1<<10) | + (1<<11)); + + if (((phy->req_line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (phy->req_line_speed == SPEED_1000)) { + an_1000_val |= (1<<8); + autoneg_val |= (1<<9 | 1<<12); + if (phy->req_duplex == DUPLEX_FULL) + an_1000_val |= (1<<9); + DP(NETIF_MSG_LINK, "Advertising 1G\n"); + } else + an_1000_val &= ~((1<<8) | (1<<9)); + + bnx2x_cl22_write(bp, phy, + 0x09, + an_1000_val); + bnx2x_cl22_read(bp, phy, + 0x09, + &an_1000_val); + + /* Advertise 10/100 link speed */ + if (phy->req_line_speed == SPEED_AUTO_NEG) { + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) { + an_10_100_val |= (1<<5); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 10M-HD\n"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) { + an_10_100_val |= (1<<6); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 10M-FD\n"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) { + an_10_100_val |= (1<<7); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 100M-HD\n"); + } + if (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) { + an_10_100_val |= (1<<8); + autoneg_val |= (1<<9 | 1<<12); + DP(NETIF_MSG_LINK, "Advertising 100M-FD\n"); + } + } + + /* Only 10/100 are allowed to work in FORCE mode */ + if (phy->req_line_speed == SPEED_100) { + autoneg_val |= (1<<13); + /* Enabled AUTO-MDIX when autoneg is disabled */ + bnx2x_cl22_write(bp, phy, + 0x18, + (1<<15 | 1<<9 | 7<<0)); + DP(NETIF_MSG_LINK, "Setting 100M force\n"); + } + if (phy->req_line_speed == SPEED_10) { + /* Enabled AUTO-MDIX when autoneg is disabled */ + bnx2x_cl22_write(bp, phy, + 0x18, + (1<<15 | 1<<9 | 7<<0)); + DP(NETIF_MSG_LINK, "Setting 10M force\n"); + } + + if ((phy->flags & FLAGS_EEE) && bnx2x_eee_has_cap(params)) { + int rc; + + bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS, + MDIO_REG_GPHY_EXP_ACCESS_TOP | + MDIO_REG_GPHY_EXP_TOP_2K_BUF); + bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, &temp); + temp &= 0xfffe; + bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, temp); + + rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_1G_ADV); + if (rc) { + DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n"); + bnx2x_eee_disable(phy, params, vars); + } else if ((params->eee_mode & EEE_MODE_ADV_LPI) && + (phy->req_duplex == DUPLEX_FULL) && + (bnx2x_eee_calc_timer(params) || + !(params->eee_mode & EEE_MODE_ENABLE_LPI))) { + /* Need to advertise EEE only when requested, + * and either no LPI assertion was requested, + * or it was requested and a valid timer was set. + * Also notice full duplex is required for EEE. + */ + bnx2x_eee_advertise(phy, params, vars, + SHMEM_EEE_1G_ADV); + } else { + DP(NETIF_MSG_LINK, "Don't Advertise 1GBase-T EEE\n"); + bnx2x_eee_disable(phy, params, vars); + } + } else { + vars->eee_status &= ~SHMEM_EEE_1G_ADV << + SHMEM_EEE_SUPPORTED_SHIFT; + + if (phy->flags & FLAGS_EEE) { + /* Handle legacy auto-grEEEn */ + if (params->feature_config_flags & + FEATURE_CONFIG_AUTOGREEEN_ENABLED) { + temp = 6; + DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n"); + } else { + temp = 0; + DP(NETIF_MSG_LINK, "Don't Adv. EEE\n"); + } + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_EEE_ADV, temp); + } + } + + bnx2x_cl22_write(bp, phy, + 0x04, + an_10_100_val | fc_val); + + if (phy->req_duplex == DUPLEX_FULL) + autoneg_val |= (1<<8); + + bnx2x_cl22_write(bp, phy, + MDIO_PMA_REG_CTRL, autoneg_val); + + return 0; +} + + +static void bnx2x_5461x_set_link_led(struct bnx2x_phy *phy, + struct link_params *params, u8 mode) +{ + struct bnx2x *bp = params->bp; + u16 temp; + + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_LED_SEL1); + bnx2x_cl22_read(bp, phy, + MDIO_REG_GPHY_SHADOW, + &temp); + temp &= 0xff00; + + DP(NETIF_MSG_LINK, "54618x set link led (mode=%x)\n", mode); + switch (mode) { + case LED_MODE_FRONT_PANEL_OFF: + case LED_MODE_OFF: + temp |= 0x00ee; + break; + case LED_MODE_OPER: + temp |= 0x0001; + break; + case LED_MODE_ON: + temp |= 0x00ff; + break; + default: + break; + } + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_WR_ENA | temp); + return; +} + + +static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u32 cfg_pin; + u8 port; + + /* In case of no EPIO routed to reset the GPHY, put it + * in low power mode. + */ + bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800); + /* This works with E3 only, no need to check the chip + * before determining the port. + */ + port = params->port; + cfg_pin = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_cmn_pin_cfg)) & + PORT_HW_CFG_E3_PHY_RESET_MASK) >> + PORT_HW_CFG_E3_PHY_RESET_SHIFT; + + /* Drive pin low to put GPHY in reset. */ + bnx2x_set_cfg_pin(bp, cfg_pin, 0); +} + +static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 val; + u8 link_up = 0; + u16 legacy_status, legacy_speed; + + /* Get speed operation status */ + bnx2x_cl22_read(bp, phy, + MDIO_REG_GPHY_AUX_STATUS, + &legacy_status); + DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status); + + /* Read status to clear the PHY interrupt. */ + bnx2x_cl22_read(bp, phy, + MDIO_REG_INTR_STATUS, + &val); + + link_up = ((legacy_status & (1<<2)) == (1<<2)); + + if (link_up) { + legacy_speed = (legacy_status & (7<<8)); + if (legacy_speed == (7<<8)) { + vars->line_speed = SPEED_1000; + vars->duplex = DUPLEX_FULL; + } else if (legacy_speed == (6<<8)) { + vars->line_speed = SPEED_1000; + vars->duplex = DUPLEX_HALF; + } else if (legacy_speed == (5<<8)) { + vars->line_speed = SPEED_100; + vars->duplex = DUPLEX_FULL; + } + /* Omitting 100Base-T4 for now */ + else if (legacy_speed == (3<<8)) { + vars->line_speed = SPEED_100; + vars->duplex = DUPLEX_HALF; + } else if (legacy_speed == (2<<8)) { + vars->line_speed = SPEED_10; + vars->duplex = DUPLEX_FULL; + } else if (legacy_speed == (1<<8)) { + vars->line_speed = SPEED_10; + vars->duplex = DUPLEX_HALF; + } else /* Should not happen */ + vars->line_speed = 0; + + DP(NETIF_MSG_LINK, + "Link is up in %dMbps, is_duplex_full= %d\n", + vars->line_speed, + (vars->duplex == DUPLEX_FULL)); + + /* Check legacy speed AN resolution */ + bnx2x_cl22_read(bp, phy, + 0x01, + &val); + if (val & (1<<5)) + vars->link_status |= + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + bnx2x_cl22_read(bp, phy, + 0x06, + &val); + if ((val & (1<<0)) == 0) + vars->link_status |= + LINK_STATUS_PARALLEL_DETECTION_USED; + + DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n", + vars->line_speed); + + bnx2x_ext_phy_resolve_fc(phy, params, vars); + + if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { + /* Report LP advertised speeds */ + bnx2x_cl22_read(bp, phy, 0x5, &val); + + if (val & (1<<5)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10THD_CAPABLE; + if (val & (1<<6)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE; + if (val & (1<<7)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE; + if (val & (1<<8)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE; + if (val & (1<<9)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_100T4_CAPABLE; + + bnx2x_cl22_read(bp, phy, 0xa, &val); + if (val & (1<<10)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE; + if (val & (1<<11)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + + if ((phy->flags & FLAGS_EEE) && + bnx2x_eee_has_cap(params)) + bnx2x_eee_an_resolve(phy, params, vars); + } + } + return link_up; +} + +static void bnx2x_54618se_config_loopback(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 val; + u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + + DP(NETIF_MSG_LINK, "2PMA/PMD ext_phy_loopback: 54618se\n"); + + /* Enable master/slave manual mmode and set to master */ + /* mii write 9 [bits set 11 12] */ + bnx2x_cl22_write(bp, phy, 0x09, 3<<11); + + /* forced 1G and disable autoneg */ + /* set val [mii read 0] */ + /* set val [expr $val & [bits clear 6 12 13]] */ + /* set val [expr $val | [bits set 6 8]] */ + /* mii write 0 $val */ + bnx2x_cl22_read(bp, phy, 0x00, &val); + val &= ~((1<<6) | (1<<12) | (1<<13)); + val |= (1<<6) | (1<<8); + bnx2x_cl22_write(bp, phy, 0x00, val); + + /* Set external loopback and Tx using 6dB coding */ + /* mii write 0x18 7 */ + /* set val [mii read 0x18] */ + /* mii write 0x18 [expr $val | [bits set 10 15]] */ + bnx2x_cl22_write(bp, phy, 0x18, 7); + bnx2x_cl22_read(bp, phy, 0x18, &val); + bnx2x_cl22_write(bp, phy, 0x18, val | (1<<10) | (1<<15)); + + /* This register opens the gate for the UMAC despite its name */ + REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); + + /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame + * length used by the MAC receive logic to check frames. + */ + REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); +} + +/******************************************************************/ +/* SFX7101 PHY SECTION */ +/******************************************************************/ +static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + /* SFX7101_XGXS_TEST1 */ + bnx2x_cl45_write(bp, phy, + MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100); +} + +static int bnx2x_7101_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + u16 fw_ver1, fw_ver2, val; + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "Setting the SFX7101 LASI indication\n"); + + /* Restore normal power mode*/ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + /* HW reset */ + bnx2x_ext_phy_hw_reset(bp, params->port); + bnx2x_wait_reset_complete(bp, phy, params); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1); + DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n"); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3)); + + bnx2x_ext_phy_set_pause(params, phy, vars); + /* Restart autoneg */ + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val); + val |= 0x200; + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val); + + /* Save spirom version */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER1, &fw_ver1); + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2); + bnx2x_save_spirom_version(bp, params->port, + (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr); + return 0; +} + +static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 link_up; + u16 val1, val2; + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); + DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n", + val2, val1); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1); + DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n", + val2, val1); + link_up = ((val1 & 4) == 4); + /* If link is up print the AN outcome of the SFX7101 PHY */ + if (link_up) { + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, + &val2); + vars->line_speed = SPEED_10000; + vars->duplex = DUPLEX_FULL; + DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n", + val2, (val2 & (1<<14))); + bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); + bnx2x_ext_phy_resolve_fc(phy, params, vars); + + /* Read LP advertised speeds */ + if (val2 & (1<<11)) + vars->link_status |= + LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; + } + return link_up; +} + +static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len) +{ + if (*len < 5) + return -EINVAL; + str[0] = (spirom_ver & 0xFF); + str[1] = (spirom_ver & 0xFF00) >> 8; + str[2] = (spirom_ver & 0xFF0000) >> 16; + str[3] = (spirom_ver & 0xFF000000) >> 24; + str[4] = '\0'; + *len -= 5; + return 0; +} + +void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy) +{ + u16 val, cnt; + + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_7101_RESET, &val); + + for (cnt = 0; cnt < 10; cnt++) { + msleep(50); + /* Writes a self-clearing reset */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_7101_RESET, + (val | (1<<15))); + /* Wait for clear */ + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_7101_RESET, &val); + + if ((val & (1<<15)) == 0) + break; + } +} + +static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy, + struct link_params *params) { + /* Low power mode is controlled by GPIO 2 */ + bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); + /* The PHY reset is controlled by GPIO 1 */ + bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, + MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); +} + +static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy, + struct link_params *params, u8 mode) +{ + u16 val = 0; + struct bnx2x *bp = params->bp; + switch (mode) { + case LED_MODE_FRONT_PANEL_OFF: + case LED_MODE_OFF: + val = 2; + break; + case LED_MODE_ON: + val = 1; + break; + case LED_MODE_OPER: + val = 0; + break; + } + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_7107_LINK_LED_CNTL, + val); +} + +/******************************************************************/ +/* STATIC PHY DECLARATION */ +/******************************************************************/ + +static const struct bnx2x_phy phy_null = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN, + .addr = 0, + .def_md_devad = 0, + .flags = FLAGS_INIT_XGXS_FIRST, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = 0, + .media_type = ETH_PHY_NOT_PRESENT, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)NULL, + .read_status = (read_status_t)NULL, + .link_reset = (link_reset_t)NULL, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)NULL, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; + +static const struct bnx2x_phy phy_serdes = { + .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT, + .addr = 0xff, + .def_md_devad = 0, + .flags = 0, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_2500baseX_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_xgxs_config_init, + .read_status = (read_status_t)bnx2x_link_settings_status, + .link_reset = (link_reset_t)bnx2x_int_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)NULL, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; + +static const struct bnx2x_phy phy_xgxs = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, + .addr = 0xff, + .def_md_devad = 0, + .flags = 0, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_2500baseX_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_CX4, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_xgxs_config_init, + .read_status = (read_status_t)bnx2x_link_settings_status, + .link_reset = (link_reset_t)bnx2x_int_link_reset, + .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback, + .format_fw_ver = (format_fw_ver_t)NULL, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func +}; +static const struct bnx2x_phy phy_warpcore = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, + .addr = 0xff, + .def_md_devad = 0, + .flags = FLAGS_TX_ERROR_CHECK, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_20000baseKR2_Full | + SUPPORTED_20000baseMLD2_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_UNSPECIFIED, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + /* req_duplex = */0, + /* rsrv = */0, + .config_init = (config_init_t)bnx2x_warpcore_config_init, + .read_status = (read_status_t)bnx2x_warpcore_read_status, + .link_reset = (link_reset_t)bnx2x_warpcore_link_reset, + .config_loopback = (config_loopback_t)bnx2x_set_warpcore_loopback, + .format_fw_ver = (format_fw_ver_t)NULL, + .hw_reset = (hw_reset_t)bnx2x_warpcore_hw_reset, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; + + +static const struct bnx2x_phy phy_7101 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, + .addr = 0xff, + .def_md_devad = 0, + .flags = FLAGS_FAN_FAILURE_DET_REQ, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10000baseT_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_7101_config_init, + .read_status = (read_status_t)bnx2x_7101_read_status, + .link_reset = (link_reset_t)bnx2x_common_ext_link_reset, + .config_loopback = (config_loopback_t)bnx2x_7101_config_loopback, + .format_fw_ver = (format_fw_ver_t)bnx2x_7101_format_ver, + .hw_reset = (hw_reset_t)bnx2x_7101_hw_reset, + .set_link_led = (set_link_led_t)bnx2x_7101_set_link_led, + .phy_specific_func = (phy_specific_func_t)NULL +}; +static const struct bnx2x_phy phy_8073 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, + .addr = 0xff, + .def_md_devad = 0, + .flags = 0, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10000baseT_Full | + SUPPORTED_2500baseX_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_KR, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_8073_config_init, + .read_status = (read_status_t)bnx2x_8073_read_status, + .link_reset = (link_reset_t)bnx2x_8073_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func +}; +static const struct bnx2x_phy phy_8705 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705, + .addr = 0xff, + .def_md_devad = 0, + .flags = FLAGS_INIT_XGXS_FIRST, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_XFP_FIBER, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_8705_config_init, + .read_status = (read_status_t)bnx2x_8705_read_status, + .link_reset = (link_reset_t)bnx2x_common_ext_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)bnx2x_null_format_ver, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; +static const struct bnx2x_phy phy_8706 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, + .addr = 0xff, + .def_md_devad = 0, + .flags = FLAGS_INIT_XGXS_FIRST, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_SFPP_10G_FIBER, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_8706_config_init, + .read_status = (read_status_t)bnx2x_8706_read_status, + .link_reset = (link_reset_t)bnx2x_common_ext_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; + +static const struct bnx2x_phy phy_8726 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726, + .addr = 0xff, + .def_md_devad = 0, + .flags = (FLAGS_INIT_XGXS_FIRST | + FLAGS_TX_ERROR_CHECK), + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_NOT_PRESENT, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_8726_config_init, + .read_status = (read_status_t)bnx2x_8726_read_status, + .link_reset = (link_reset_t)bnx2x_8726_link_reset, + .config_loopback = (config_loopback_t)bnx2x_8726_config_loopback, + .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; + +static const struct bnx2x_phy phy_8727 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, + .addr = 0xff, + .def_md_devad = 0, + .flags = (FLAGS_FAN_FAILURE_DET_REQ | + FLAGS_TX_ERROR_CHECK), + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_NOT_PRESENT, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_8727_config_init, + .read_status = (read_status_t)bnx2x_8727_read_status, + .link_reset = (link_reset_t)bnx2x_8727_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, + .hw_reset = (hw_reset_t)bnx2x_8727_hw_reset, + .set_link_led = (set_link_led_t)bnx2x_8727_set_link_led, + .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func +}; +static const struct bnx2x_phy phy_8481 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, + .addr = 0xff, + .def_md_devad = 0, + .flags = FLAGS_FAN_FAILURE_DET_REQ | + FLAGS_REARM_LATCH_SIGNAL, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_8481_config_init, + .read_status = (read_status_t)bnx2x_848xx_read_status, + .link_reset = (link_reset_t)bnx2x_8481_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, + .hw_reset = (hw_reset_t)bnx2x_8481_hw_reset, + .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t)NULL +}; + +static const struct bnx2x_phy phy_84823 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823, + .addr = 0xff, + .def_md_devad = 0, + .flags = (FLAGS_FAN_FAILURE_DET_REQ | + FLAGS_REARM_LATCH_SIGNAL | + FLAGS_TX_ERROR_CHECK), + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_848x3_config_init, + .read_status = (read_status_t)bnx2x_848xx_read_status, + .link_reset = (link_reset_t)bnx2x_848x3_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func +}; + +static const struct bnx2x_phy phy_84833 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833, + .addr = 0xff, + .def_md_devad = 0, + .flags = (FLAGS_FAN_FAILURE_DET_REQ | + FLAGS_REARM_LATCH_SIGNAL | + FLAGS_TX_ERROR_CHECK), + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_848x3_config_init, + .read_status = (read_status_t)bnx2x_848xx_read_status, + .link_reset = (link_reset_t)bnx2x_848x3_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, + .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, + .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func +}; + +static const struct bnx2x_phy phy_84834 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834, + .addr = 0xff, + .def_md_devad = 0, + .flags = FLAGS_FAN_FAILURE_DET_REQ | + FLAGS_REARM_LATCH_SIGNAL, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_848x3_config_init, + .read_status = (read_status_t)bnx2x_848xx_read_status, + .link_reset = (link_reset_t)bnx2x_848x3_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, + .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, + .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func +}; + +static const struct bnx2x_phy phy_54618se = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE, + .addr = 0xff, + .def_md_devad = 0, + .flags = FLAGS_INIT_XGXS_FIRST, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + /* req_duplex = */0, + /* rsrv = */0, + .config_init = (config_init_t)bnx2x_54618se_config_init, + .read_status = (read_status_t)bnx2x_54618se_read_status, + .link_reset = (link_reset_t)bnx2x_54618se_link_reset, + .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback, + .format_fw_ver = (format_fw_ver_t)NULL, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led, + .phy_specific_func = (phy_specific_func_t)bnx2x_54618se_specific_func +}; +/*****************************************************************/ +/* */ +/* Populate the phy according. Main function: bnx2x_populate_phy */ +/* */ +/*****************************************************************/ + +static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base, + struct bnx2x_phy *phy, u8 port, + u8 phy_index) +{ + /* Get the 4 lanes xgxs config rx and tx */ + u32 rx = 0, tx = 0, i; + for (i = 0; i < 2; i++) { + /* INT_PHY and EXT_PHY1 share the same value location in + * the shmem. When num_phys is greater than 1, than this value + * applies only to EXT_PHY1 + */ + if (phy_index == INT_PHY || phy_index == EXT_PHY1) { + rx = REG_RD(bp, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].xgxs_config_rx[i<<1])); + + tx = REG_RD(bp, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].xgxs_config_tx[i<<1])); + } else { + rx = REG_RD(bp, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); + + tx = REG_RD(bp, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); + } + + phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff); + phy->rx_preemphasis[(i << 1) + 1] = (rx & 0xffff); + + phy->tx_preemphasis[i << 1] = ((tx>>16) & 0xffff); + phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff); + } +} + +static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base, + u8 phy_index, u8 port) +{ + u32 ext_phy_config = 0; + switch (phy_index) { + case EXT_PHY1: + ext_phy_config = REG_RD(bp, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].external_phy_config)); + break; + case EXT_PHY2: + ext_phy_config = REG_RD(bp, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].external_phy_config2)); + break; + default: + DP(NETIF_MSG_LINK, "Invalid phy_index %d\n", phy_index); + return -EINVAL; + } + + return ext_phy_config; +} +static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, + struct bnx2x_phy *phy) +{ + u32 phy_addr; + u32 chip_id; + u32 switch_cfg = (REG_RD(bp, shmem_base + + offsetof(struct shmem_region, + dev_info.port_feature_config[port].link_config)) & + PORT_FEATURE_CONNECTED_SWITCH_MASK); + chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) | + ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12); + + DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id); + if (USES_WARPCORE(bp)) { + u32 serdes_net_if; + phy_addr = REG_RD(bp, + MISC_REG_WC0_CTRL_PHY_ADDR); + *phy = phy_warpcore; + if (REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR) == 0x3) + phy->flags |= FLAGS_4_PORT_MODE; + else + phy->flags &= ~FLAGS_4_PORT_MODE; + /* Check Dual mode */ + serdes_net_if = (REG_RD(bp, shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[port].default_cfg)) & + PORT_HW_CFG_NET_SERDES_IF_MASK); + /* Set the appropriate supported and flags indications per + * interface type of the chip + */ + switch (serdes_net_if) { + case PORT_HW_CFG_NET_SERDES_IF_SGMII: + phy->supported &= (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + phy->media_type = ETH_PHY_BASE_T; + break; + case PORT_HW_CFG_NET_SERDES_IF_XFI: + phy->supported &= (SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + phy->media_type = ETH_PHY_XFP_FIBER; + break; + case PORT_HW_CFG_NET_SERDES_IF_SFI: + phy->supported &= (SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + phy->media_type = ETH_PHY_SFPP_10G_FIBER; + break; + case PORT_HW_CFG_NET_SERDES_IF_KR: + phy->media_type = ETH_PHY_KR; + phy->supported &= (SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; + case PORT_HW_CFG_NET_SERDES_IF_DXGXS: + phy->media_type = ETH_PHY_KR; + phy->flags |= FLAGS_WC_DUAL_MODE; + phy->supported &= (SUPPORTED_20000baseMLD2_Full | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; + case PORT_HW_CFG_NET_SERDES_IF_KR2: + phy->media_type = ETH_PHY_KR; + phy->flags |= FLAGS_WC_DUAL_MODE; + phy->supported &= (SUPPORTED_20000baseKR2_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + phy->flags &= ~FLAGS_TX_ERROR_CHECK; + break; + default: + DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n", + serdes_net_if); + break; + } + + /* Enable MDC/MDIO work-around for E3 A0 since free running MDC + * was not set as expected. For B0, ECO will be enabled so there + * won't be an issue there + */ + if (CHIP_REV(bp) == CHIP_REV_Ax) + phy->flags |= FLAGS_MDC_MDIO_WA; + else + phy->flags |= FLAGS_MDC_MDIO_WA_B0; + } else { + switch (switch_cfg) { + case SWITCH_CFG_1G: + phy_addr = REG_RD(bp, + NIG_REG_SERDES0_CTRL_PHY_ADDR + + port * 0x10); + *phy = phy_serdes; + break; + case SWITCH_CFG_10G: + phy_addr = REG_RD(bp, + NIG_REG_XGXS0_CTRL_PHY_ADDR + + port * 0x18); + *phy = phy_xgxs; + break; + default: + DP(NETIF_MSG_LINK, "Invalid switch_cfg\n"); + return -EINVAL; + } + } + phy->addr = (u8)phy_addr; + phy->mdio_ctrl = bnx2x_get_emac_base(bp, + SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH, + port); + if (CHIP_IS_E2(bp)) + phy->def_md_devad = E2_DEFAULT_PHY_DEV_ADDR; + else + phy->def_md_devad = DEFAULT_PHY_DEV_ADDR; + + DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n", + port, phy->addr, phy->mdio_ctrl); + + bnx2x_populate_preemphasis(bp, shmem_base, phy, port, INT_PHY); + return 0; +} + +static int bnx2x_populate_ext_phy(struct bnx2x *bp, + u8 phy_index, + u32 shmem_base, + u32 shmem2_base, + u8 port, + struct bnx2x_phy *phy) +{ + u32 ext_phy_config, phy_type, config2; + u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH; + ext_phy_config = bnx2x_get_ext_phy_config(bp, shmem_base, + phy_index, port); + phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); + /* Select the phy type */ + switch (phy_type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: + mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED; + *phy = phy_8073; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: + *phy = phy_8705; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: + *phy = phy_8706; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: + mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; + *phy = phy_8726; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC: + /* BCM8727_NOC => BCM8727 no over current */ + mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; + *phy = phy_8727; + phy->flags |= FLAGS_NOC; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: + mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; + *phy = phy_8727; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: + *phy = phy_8481; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823: + *phy = phy_84823; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: + *phy = phy_84833; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834: + *phy = phy_84834; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE: + *phy = phy_54618se; + if (phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) + phy->flags |= FLAGS_EEE; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: + *phy = phy_7101; + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: + *phy = phy_null; + return -EINVAL; + default: + *phy = phy_null; + /* In case external PHY wasn't found */ + if ((phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && + (phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) + return -EINVAL; + return 0; + } + + phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config); + bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index); + + /* The shmem address of the phy version is located on different + * structures. In case this structure is too old, do not set + * the address + */ + config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region, + dev_info.shared_hw_config.config2)); + if (phy_index == EXT_PHY1) { + phy->ver_addr = shmem_base + offsetof(struct shmem_region, + port_mb[port].ext_phy_fw_version); + + /* Check specific mdc mdio settings */ + if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK) + mdc_mdio_access = config2 & + SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK; + } else { + u32 size = REG_RD(bp, shmem2_base); + + if (size > + offsetof(struct shmem2_region, ext_phy_fw_version2)) { + phy->ver_addr = shmem2_base + + offsetof(struct shmem2_region, + ext_phy_fw_version2[port]); + } + /* Check specific mdc mdio settings */ + if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) + mdc_mdio_access = (config2 & + SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) >> + (SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT - + SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT); + } + phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port); + + if (((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) && + (phy->ver_addr)) { + /* Remove 100Mb link supported for BCM84833/4 when phy fw + * version lower than or equal to 1.39 + */ + u32 raw_ver = REG_RD(bp, phy->ver_addr); + if (((raw_ver & 0x7F) <= 39) && + (((raw_ver & 0xF80) >> 7) <= 1)) + phy->supported &= ~(SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full); + } + + DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n", + phy_type, port, phy_index); + DP(NETIF_MSG_LINK, " addr=0x%x, mdio_ctl=0x%x\n", + phy->addr, phy->mdio_ctrl); + return 0; +} + +static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base, + u32 shmem2_base, u8 port, struct bnx2x_phy *phy) +{ + int status = 0; + phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN; + if (phy_index == INT_PHY) + return bnx2x_populate_int_phy(bp, shmem_base, port, phy); + status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base, + port, phy); + return status; +} + +static void bnx2x_phy_def_cfg(struct link_params *params, + struct bnx2x_phy *phy, + u8 phy_index) +{ + struct bnx2x *bp = params->bp; + u32 link_config; + /* Populate the default phy configuration for MF mode */ + if (phy_index == EXT_PHY2) { + link_config = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port].link_config2)); + phy->speed_cap_mask = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info. + port_hw_config[params->port].speed_capability_mask2)); + } else { + link_config = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port].link_config)); + phy->speed_cap_mask = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info. + port_hw_config[params->port].speed_capability_mask)); + } + DP(NETIF_MSG_LINK, + "Default config phy idx %x cfg 0x%x speed_cap_mask 0x%x\n", + phy_index, link_config, phy->speed_cap_mask); + + phy->req_duplex = DUPLEX_FULL; + switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { + case PORT_FEATURE_LINK_SPEED_10M_HALF: + phy->req_duplex = DUPLEX_HALF; + case PORT_FEATURE_LINK_SPEED_10M_FULL: + phy->req_line_speed = SPEED_10; + break; + case PORT_FEATURE_LINK_SPEED_100M_HALF: + phy->req_duplex = DUPLEX_HALF; + case PORT_FEATURE_LINK_SPEED_100M_FULL: + phy->req_line_speed = SPEED_100; + break; + case PORT_FEATURE_LINK_SPEED_1G: + phy->req_line_speed = SPEED_1000; + break; + case PORT_FEATURE_LINK_SPEED_2_5G: + phy->req_line_speed = SPEED_2500; + break; + case PORT_FEATURE_LINK_SPEED_10G_CX4: + phy->req_line_speed = SPEED_10000; + break; + default: + phy->req_line_speed = SPEED_AUTO_NEG; + break; + } + + switch (link_config & PORT_FEATURE_FLOW_CONTROL_MASK) { + case PORT_FEATURE_FLOW_CONTROL_AUTO: + phy->req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO; + break; + case PORT_FEATURE_FLOW_CONTROL_TX: + phy->req_flow_ctrl = BNX2X_FLOW_CTRL_TX; + break; + case PORT_FEATURE_FLOW_CONTROL_RX: + phy->req_flow_ctrl = BNX2X_FLOW_CTRL_RX; + break; + case PORT_FEATURE_FLOW_CONTROL_BOTH: + phy->req_flow_ctrl = BNX2X_FLOW_CTRL_BOTH; + break; + default: + phy->req_flow_ctrl = BNX2X_FLOW_CTRL_NONE; + break; + } +} + +u32 bnx2x_phy_selection(struct link_params *params) +{ + u32 phy_config_swapped, prio_cfg; + u32 return_cfg = PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT; + + phy_config_swapped = params->multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED; + + prio_cfg = params->multi_phy_config & + PORT_HW_CFG_PHY_SELECTION_MASK; + + if (phy_config_swapped) { + switch (prio_cfg) { + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: + return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: + return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: + return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; + break; + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: + return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; + break; + } + } else + return_cfg = prio_cfg; + + return return_cfg; +} + +int bnx2x_phy_probe(struct link_params *params) +{ + u8 phy_index, actual_phy_idx; + u32 phy_config_swapped, sync_offset, media_types; + struct bnx2x *bp = params->bp; + struct bnx2x_phy *phy; + params->num_phys = 0; + DP(NETIF_MSG_LINK, "Begin phy probe\n"); + phy_config_swapped = params->multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED; + + for (phy_index = INT_PHY; phy_index < MAX_PHYS; + phy_index++) { + actual_phy_idx = phy_index; + if (phy_config_swapped) { + if (phy_index == EXT_PHY1) + actual_phy_idx = EXT_PHY2; + else if (phy_index == EXT_PHY2) + actual_phy_idx = EXT_PHY1; + } + DP(NETIF_MSG_LINK, "phy_config_swapped %x, phy_index %x," + " actual_phy_idx %x\n", phy_config_swapped, + phy_index, actual_phy_idx); + phy = ¶ms->phy[actual_phy_idx]; + if (bnx2x_populate_phy(bp, phy_index, params->shmem_base, + params->shmem2_base, params->port, + phy) != 0) { + params->num_phys = 0; + DP(NETIF_MSG_LINK, "phy probe failed in phy index %d\n", + phy_index); + for (phy_index = INT_PHY; + phy_index < MAX_PHYS; + phy_index++) + *phy = phy_null; + return -EINVAL; + } + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) + break; + + if (params->feature_config_flags & + FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET) + phy->flags &= ~FLAGS_TX_ERROR_CHECK; + + if (!(params->feature_config_flags & + FEATURE_CONFIG_MT_SUPPORT)) + phy->flags |= FLAGS_MDC_MDIO_WA_G; + + sync_offset = params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].media_type); + media_types = REG_RD(bp, sync_offset); + + /* Update media type for non-PMF sync only for the first time + * In case the media type changes afterwards, it will be updated + * using the update_status function + */ + if ((media_types & (PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK << + (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * + actual_phy_idx))) == 0) { + media_types |= ((phy->media_type & + PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) << + (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * + actual_phy_idx)); + } + REG_WR(bp, sync_offset, media_types); + + bnx2x_phy_def_cfg(params, phy, phy_index); + params->num_phys++; + } + + DP(NETIF_MSG_LINK, "End phy probe. #phys found %x\n", params->num_phys); + return 0; +} + +static void bnx2x_init_bmac_loopback(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + vars->link_up = 1; + vars->line_speed = SPEED_10000; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->mac_type = MAC_TYPE_BMAC; + + vars->phy_flags = PHY_XGXS_FLAG; + + bnx2x_xgxs_deassert(params); + + /* Set bmac loopback */ + bnx2x_bmac_enable(params, vars, 1, 1); + + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); +} + +static void bnx2x_init_emac_loopback(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + vars->link_up = 1; + vars->line_speed = SPEED_1000; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->mac_type = MAC_TYPE_EMAC; + + vars->phy_flags = PHY_XGXS_FLAG; + + bnx2x_xgxs_deassert(params); + /* Set bmac loopback */ + bnx2x_emac_enable(params, vars, 1); + bnx2x_emac_program(params, vars); + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); +} + +static void bnx2x_init_xmac_loopback(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + vars->link_up = 1; + if (!params->req_line_speed[0]) + vars->line_speed = SPEED_10000; + else + vars->line_speed = params->req_line_speed[0]; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->mac_type = MAC_TYPE_XMAC; + vars->phy_flags = PHY_XGXS_FLAG; + /* Set WC to loopback mode since link is required to provide clock + * to the XMAC in 20G mode + */ + bnx2x_set_aer_mmd(params, ¶ms->phy[0]); + bnx2x_warpcore_reset_lane(bp, ¶ms->phy[0], 0); + params->phy[INT_PHY].config_loopback( + ¶ms->phy[INT_PHY], + params); + + bnx2x_xmac_enable(params, vars, 1); + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); +} + +static void bnx2x_init_umac_loopback(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + vars->link_up = 1; + vars->line_speed = SPEED_1000; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->mac_type = MAC_TYPE_UMAC; + vars->phy_flags = PHY_XGXS_FLAG; + bnx2x_umac_enable(params, vars, 1); + + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); +} + +static void bnx2x_init_xgxs_loopback(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + struct bnx2x_phy *int_phy = ¶ms->phy[INT_PHY]; + vars->link_up = 1; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->duplex = DUPLEX_FULL; + if (params->req_line_speed[0] == SPEED_1000) + vars->line_speed = SPEED_1000; + else if ((params->req_line_speed[0] == SPEED_20000) || + (int_phy->flags & FLAGS_WC_DUAL_MODE)) + vars->line_speed = SPEED_20000; + else + vars->line_speed = SPEED_10000; + + if (!USES_WARPCORE(bp)) + bnx2x_xgxs_deassert(params); + bnx2x_link_initialize(params, vars); + + if (params->req_line_speed[0] == SPEED_1000) { + if (USES_WARPCORE(bp)) + bnx2x_umac_enable(params, vars, 0); + else { + bnx2x_emac_program(params, vars); + bnx2x_emac_enable(params, vars, 0); + } + } else { + if (USES_WARPCORE(bp)) + bnx2x_xmac_enable(params, vars, 0); + else + bnx2x_bmac_enable(params, vars, 0, 1); + } + + if (params->loopback_mode == LOOPBACK_XGXS) { + /* Set 10G XGXS loopback */ + int_phy->config_loopback(int_phy, params); + } else { + /* Set external phy loopback */ + u8 phy_index; + for (phy_index = EXT_PHY1; + phy_index < params->num_phys; phy_index++) + if (params->phy[phy_index].config_loopback) + params->phy[phy_index].config_loopback( + ¶ms->phy[phy_index], + params); + } + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); + + bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); +} + +void bnx2x_set_rx_filter(struct link_params *params, u8 en) +{ + struct bnx2x *bp = params->bp; + u8 val = en * 0x1F; + + /* Open / close the gate between the NIG and the BRB */ + if (!CHIP_IS_E1x(bp)) + val |= en * 0x20; + REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + params->port*4, val); + + if (!CHIP_IS_E1(bp)) { + REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + params->port*4, + en*0x3); + } + + REG_WR(bp, (params->port ? NIG_REG_LLH1_BRB1_NOT_MCP : + NIG_REG_LLH0_BRB1_NOT_MCP), en); +} +static int bnx2x_avoid_link_flap(struct link_params *params, + struct link_vars *vars) +{ + u32 phy_idx; + u32 dont_clear_stat, lfa_sts; + struct bnx2x *bp = params->bp; + + bnx2x_set_mdio_emac_per_phy(bp, params); + /* Sync the link parameters */ + bnx2x_link_status_update(params, vars); + + /* + * The module verification was already done by previous link owner, + * so this call is meant only to get warning message + */ + + for (phy_idx = INT_PHY; phy_idx < params->num_phys; phy_idx++) { + struct bnx2x_phy *phy = ¶ms->phy[phy_idx]; + if (phy->phy_specific_func) { + DP(NETIF_MSG_LINK, "Calling PHY specific func\n"); + phy->phy_specific_func(phy, params, PHY_INIT); + } + if ((phy->media_type == ETH_PHY_SFPP_10G_FIBER) || + (phy->media_type == ETH_PHY_SFP_1G_FIBER) || + (phy->media_type == ETH_PHY_DA_TWINAX)) + bnx2x_verify_sfp_module(phy, params); + } + lfa_sts = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, + lfa_sts)); + + dont_clear_stat = lfa_sts & SHMEM_LFA_DONT_CLEAR_STAT; + + /* Re-enable the NIG/MAC */ + if (CHIP_IS_E3(bp)) { + if (!dont_clear_stat) { + REG_WR(bp, GRCBASE_MISC + + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_MSTAT0 << + params->port)); + REG_WR(bp, GRCBASE_MISC + + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_MSTAT0 << + params->port)); + } + if (vars->line_speed < SPEED_10000) + bnx2x_umac_enable(params, vars, 0); + else + bnx2x_xmac_enable(params, vars, 0); + } else { + if (vars->line_speed < SPEED_10000) + bnx2x_emac_enable(params, vars, 0); + else + bnx2x_bmac_enable(params, vars, 0, !dont_clear_stat); + } + + /* Increment LFA count */ + lfa_sts = ((lfa_sts & ~LINK_FLAP_AVOIDANCE_COUNT_MASK) | + (((((lfa_sts & LINK_FLAP_AVOIDANCE_COUNT_MASK) >> + LINK_FLAP_AVOIDANCE_COUNT_OFFSET) + 1) & 0xff) + << LINK_FLAP_AVOIDANCE_COUNT_OFFSET)); + /* Clear link flap reason */ + lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK; + + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, lfa_sts), lfa_sts); + + /* Disable NIG DRAIN */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); + + /* Enable interrupts */ + bnx2x_link_int_enable(params); + return 0; +} + +static void bnx2x_cannot_avoid_link_flap(struct link_params *params, + struct link_vars *vars, + int lfa_status) +{ + u32 lfa_sts, cfg_idx, tmp_val; + struct bnx2x *bp = params->bp; + + bnx2x_link_reset(params, vars, 1); + + if (!params->lfa_base) + return; + /* Store the new link parameters */ + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_duplex), + params->req_duplex[0] | (params->req_duplex[1] << 16)); + + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_flow_ctrl), + params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16)); + + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_line_speed), + params->req_line_speed[0] | (params->req_line_speed[1] << 16)); + + for (cfg_idx = 0; cfg_idx < SHMEM_LINK_CONFIG_SIZE; cfg_idx++) { + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, + speed_cap_mask[cfg_idx]), + params->speed_cap_mask[cfg_idx]); + } + + tmp_val = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, additional_config)); + tmp_val &= ~REQ_FC_AUTO_ADV_MASK; + tmp_val |= params->req_fc_auto_adv; + + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, additional_config), tmp_val); + + lfa_sts = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, lfa_sts)); + + /* Clear the "Don't Clear Statistics" bit, and set reason */ + lfa_sts &= ~SHMEM_LFA_DONT_CLEAR_STAT; + + /* Set link flap reason */ + lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK; + lfa_sts |= ((lfa_status & LFA_LINK_FLAP_REASON_MASK) << + LFA_LINK_FLAP_REASON_OFFSET); + + /* Increment link flap counter */ + lfa_sts = ((lfa_sts & ~LINK_FLAP_COUNT_MASK) | + (((((lfa_sts & LINK_FLAP_COUNT_MASK) >> + LINK_FLAP_COUNT_OFFSET) + 1) & 0xff) + << LINK_FLAP_COUNT_OFFSET)); + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, lfa_sts), lfa_sts); + /* Proceed with regular link initialization */ +} + +int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) +{ + int lfa_status; + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "Phy Initialization started\n"); + DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n", + params->req_line_speed[0], params->req_flow_ctrl[0]); + DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n", + params->req_line_speed[1], params->req_flow_ctrl[1]); + DP(NETIF_MSG_LINK, "req_adv_flow_ctrl 0x%x\n", params->req_fc_auto_adv); + vars->link_status = 0; + vars->phy_link_up = 0; + vars->link_up = 0; + vars->line_speed = 0; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->mac_type = MAC_TYPE_NONE; + vars->phy_flags = 0; + vars->check_kr2_recovery_cnt = 0; + params->link_flags = PHY_INITIALIZED; + /* Driver opens NIG-BRB filters */ + bnx2x_set_rx_filter(params, 1); + bnx2x_chng_link_count(params, true); + /* Check if link flap can be avoided */ + lfa_status = bnx2x_check_lfa(params); + + if (lfa_status == 0) { + DP(NETIF_MSG_LINK, "Link Flap Avoidance in progress\n"); + return bnx2x_avoid_link_flap(params, vars); + } + + DP(NETIF_MSG_LINK, "Cannot avoid link flap lfa_sta=0x%x\n", + lfa_status); + bnx2x_cannot_avoid_link_flap(params, vars, lfa_status); + + /* Disable attentions */ + bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, + (NIG_MASK_XGXS0_LINK_STATUS | + NIG_MASK_XGXS0_LINK10G | + NIG_MASK_SERDES0_LINK_STATUS | + NIG_MASK_MI_INT)); + + bnx2x_emac_init(params, vars); + + if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) + vars->link_status |= LINK_STATUS_PFC_ENABLED; + + if (params->num_phys == 0) { + DP(NETIF_MSG_LINK, "No phy found for initialization !!\n"); + return -EINVAL; + } + set_phy_vars(params, vars); + + DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys); + switch (params->loopback_mode) { + case LOOPBACK_BMAC: + bnx2x_init_bmac_loopback(params, vars); + break; + case LOOPBACK_EMAC: + bnx2x_init_emac_loopback(params, vars); + break; + case LOOPBACK_XMAC: + bnx2x_init_xmac_loopback(params, vars); + break; + case LOOPBACK_UMAC: + bnx2x_init_umac_loopback(params, vars); + break; + case LOOPBACK_XGXS: + case LOOPBACK_EXT_PHY: + bnx2x_init_xgxs_loopback(params, vars); + break; + default: + if (!CHIP_IS_E3(bp)) { + if (params->switch_cfg == SWITCH_CFG_10G) + bnx2x_xgxs_deassert(params); + else + bnx2x_serdes_deassert(bp, params->port); + } + bnx2x_link_initialize(params, vars); + msleep(30); + bnx2x_link_int_enable(params); + break; + } + bnx2x_update_mng(params, vars->link_status); + + bnx2x_update_mng_eee(params, vars->eee_status); + return 0; +} + +int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, + u8 reset_ext_phy) +{ + struct bnx2x *bp = params->bp; + u8 phy_index, port = params->port, clear_latch_ind = 0; + DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port); + /* Disable attentions */ + vars->link_status = 0; + bnx2x_chng_link_count(params, true); + bnx2x_update_mng(params, vars->link_status); + vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK | + SHMEM_EEE_ACTIVE_BIT); + bnx2x_update_mng_eee(params, vars->eee_status); + bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, + (NIG_MASK_XGXS0_LINK_STATUS | + NIG_MASK_XGXS0_LINK10G | + NIG_MASK_SERDES0_LINK_STATUS | + NIG_MASK_MI_INT)); + + /* Activate nig drain */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); + + /* Disable nig egress interface */ + if (!CHIP_IS_E3(bp)) { + REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0); + REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); + } + + if (!CHIP_IS_E3(bp)) { + bnx2x_set_bmac_rx(bp, params->chip_id, port, 0); + } else { + bnx2x_set_xmac_rxtx(params, 0); + bnx2x_set_umac_rxtx(params, 0); + } + /* Disable emac */ + if (!CHIP_IS_E3(bp)) + REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); + + usleep_range(10000, 20000); + /* The PHY reset is controlled by GPIO 1 + * Hold it as vars low + */ + /* Clear link led */ + bnx2x_set_mdio_emac_per_phy(bp, params); + bnx2x_set_led(params, vars, LED_MODE_OFF, 0); + + if (reset_ext_phy) { + for (phy_index = EXT_PHY1; phy_index < params->num_phys; + phy_index++) { + if (params->phy[phy_index].link_reset) { + bnx2x_set_aer_mmd(params, + ¶ms->phy[phy_index]); + params->phy[phy_index].link_reset( + ¶ms->phy[phy_index], + params); + } + if (params->phy[phy_index].flags & + FLAGS_REARM_LATCH_SIGNAL) + clear_latch_ind = 1; + } + } + + if (clear_latch_ind) { + /* Clear latching indication */ + bnx2x_rearm_latch_signal(bp, port, 0); + bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4, + 1 << NIG_LATCH_BC_ENABLE_MI_INT); + } + if (params->phy[INT_PHY].link_reset) + params->phy[INT_PHY].link_reset( + ¶ms->phy[INT_PHY], params); + + /* Disable nig ingress interface */ + if (!CHIP_IS_E3(bp)) { + /* Reset BigMac */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0); + REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0); + } else { + u32 xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + bnx2x_set_xumac_nig(params, 0, 0); + if (REG_RD(bp, MISC_REG_RESET_REG_2) & + MISC_REGISTERS_RESET_REG_2_XMAC) + REG_WR(bp, xmac_base + XMAC_REG_CTRL, + XMAC_CTRL_REG_SOFT_RESET); + } + vars->link_up = 0; + vars->phy_flags = 0; + return 0; +} +int bnx2x_lfa_reset(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + vars->link_up = 0; + vars->phy_flags = 0; + params->link_flags &= ~PHY_INITIALIZED; + if (!params->lfa_base) + return bnx2x_link_reset(params, vars, 1); + /* + * Activate NIG drain so that during this time the device won't send + * anything while it is unable to response. + */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1); + + /* + * Close gracefully the gate from BMAC to NIG such that no half packets + * are passed. + */ + if (!CHIP_IS_E3(bp)) + bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0); + + if (CHIP_IS_E3(bp)) { + bnx2x_set_xmac_rxtx(params, 0); + bnx2x_set_umac_rxtx(params, 0); + } + /* Wait 10ms for the pipe to clean up*/ + usleep_range(10000, 20000); + + /* Clean the NIG-BRB using the network filters in a way that will + * not cut a packet in the middle. + */ + bnx2x_set_rx_filter(params, 0); + + /* + * Re-open the gate between the BMAC and the NIG, after verifying the + * gate to the BRB is closed, otherwise packets may arrive to the + * firmware before driver had initialized it. The target is to achieve + * minimum management protocol down time. + */ + if (!CHIP_IS_E3(bp)) + bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 1); + + if (CHIP_IS_E3(bp)) { + bnx2x_set_xmac_rxtx(params, 1); + bnx2x_set_umac_rxtx(params, 1); + } + /* Disable NIG drain */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); + return 0; +} + +/****************************************************************************/ +/* Common function */ +/****************************************************************************/ +static int bnx2x_8073_common_init_phy(struct bnx2x *bp, + u32 shmem_base_path[], + u32 shmem2_base_path[], u8 phy_index, + u32 chip_id) +{ + struct bnx2x_phy phy[PORT_MAX]; + struct bnx2x_phy *phy_blk[PORT_MAX]; + u16 val; + s8 port = 0; + s8 port_of_path = 0; + u32 swap_val, swap_override; + swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); + swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); + port ^= (swap_val && swap_override); + bnx2x_ext_phy_hw_reset(bp, port); + /* PART1 - Reset both phys */ + for (port = PORT_MAX - 1; port >= PORT_0; port--) { + u32 shmem_base, shmem2_base; + /* In E2, same phy is using for port0 of the two paths */ + if (CHIP_IS_E1x(bp)) { + shmem_base = shmem_base_path[0]; + shmem2_base = shmem2_base_path[0]; + port_of_path = port; + } else { + shmem_base = shmem_base_path[port]; + shmem2_base = shmem2_base_path[port]; + port_of_path = 0; + } + + /* Extract the ext phy address for the port */ + if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, + port_of_path, &phy[port]) != + 0) { + DP(NETIF_MSG_LINK, "populate_phy failed\n"); + return -EINVAL; + } + /* Disable attentions */ + bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + + port_of_path*4, + (NIG_MASK_XGXS0_LINK_STATUS | + NIG_MASK_XGXS0_LINK10G | + NIG_MASK_SERDES0_LINK_STATUS | + NIG_MASK_MI_INT)); + + /* Need to take the phy out of low power mode in order + * to write to access its registers + */ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, + port); + + /* Reset the phy */ + bnx2x_cl45_write(bp, &phy[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, + 1<<15); + } + + /* Add delay of 150ms after reset */ + msleep(150); + + if (phy[PORT_0].addr & 0x1) { + phy_blk[PORT_0] = &(phy[PORT_1]); + phy_blk[PORT_1] = &(phy[PORT_0]); + } else { + phy_blk[PORT_0] = &(phy[PORT_0]); + phy_blk[PORT_1] = &(phy[PORT_1]); + } + + /* PART2 - Download firmware to both phys */ + for (port = PORT_MAX - 1; port >= PORT_0; port--) { + if (CHIP_IS_E1x(bp)) + port_of_path = port; + else + port_of_path = 0; + + DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", + phy_blk[port]->addr); + if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], + port_of_path)) + return -EINVAL; + + /* Only set bit 10 = 1 (Tx power down) */ + bnx2x_cl45_read(bp, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, &val); + + /* Phase1 of TX_POWER_DOWN reset */ + bnx2x_cl45_write(bp, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, + (val | 1<<10)); + } + + /* Toggle Transmitter: Power down and then up with 600ms delay + * between + */ + msleep(600); + + /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */ + for (port = PORT_MAX - 1; port >= PORT_0; port--) { + /* Phase2 of POWER_DOWN_RESET */ + /* Release bit 10 (Release Tx power down) */ + bnx2x_cl45_read(bp, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, &val); + + bnx2x_cl45_write(bp, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); + usleep_range(15000, 30000); + + /* Read modify write the SPI-ROM version select register */ + bnx2x_cl45_read(bp, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_EDC_FFE_MAIN, &val); + bnx2x_cl45_write(bp, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12))); + + /* set GPIO2 back to LOW */ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); + } + return 0; +} +static int bnx2x_8726_common_init_phy(struct bnx2x *bp, + u32 shmem_base_path[], + u32 shmem2_base_path[], u8 phy_index, + u32 chip_id) +{ + u32 val; + s8 port; + struct bnx2x_phy phy; + /* Use port1 because of the static port-swap */ + /* Enable the module detection interrupt */ + val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN); + val |= ((1<= PORT_0; port--) { + u32 shmem_base, shmem2_base; + + /* In E2, same phy is using for port0 of the two paths */ + if (CHIP_IS_E1x(bp)) { + shmem_base = shmem_base_path[0]; + shmem2_base = shmem2_base_path[0]; + port_of_path = port; + } else { + shmem_base = shmem_base_path[port]; + shmem2_base = shmem2_base_path[port]; + port_of_path = 0; + } + + /* Extract the ext phy address for the port */ + if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, + port_of_path, &phy[port]) != + 0) { + DP(NETIF_MSG_LINK, "populate phy failed\n"); + return -EINVAL; + } + /* disable attentions */ + bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + + port_of_path*4, + (NIG_MASK_XGXS0_LINK_STATUS | + NIG_MASK_XGXS0_LINK10G | + NIG_MASK_SERDES0_LINK_STATUS | + NIG_MASK_MI_INT)); + + + /* Reset the phy */ + bnx2x_cl45_write(bp, &phy[port], + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); + } + + /* Add delay of 150ms after reset */ + msleep(150); + if (phy[PORT_0].addr & 0x1) { + phy_blk[PORT_0] = &(phy[PORT_1]); + phy_blk[PORT_1] = &(phy[PORT_0]); + } else { + phy_blk[PORT_0] = &(phy[PORT_0]); + phy_blk[PORT_1] = &(phy[PORT_1]); + } + /* PART2 - Download firmware to both phys */ + for (port = PORT_MAX - 1; port >= PORT_0; port--) { + if (CHIP_IS_E1x(bp)) + port_of_path = port; + else + port_of_path = 0; + DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", + phy_blk[port]->addr); + if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], + port_of_path)) + return -EINVAL; + /* Disable PHY transmitter output */ + bnx2x_cl45_write(bp, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_DISABLE, 1); + + } + return 0; +} + +static int bnx2x_84833_common_init_phy(struct bnx2x *bp, + u32 shmem_base_path[], + u32 shmem2_base_path[], + u8 phy_index, + u32 chip_id) +{ + u8 reset_gpios; + reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id); + bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); + udelay(10); + bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH); + DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n", + reset_gpios); + return 0; +} + +static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], + u32 shmem2_base_path[], u8 phy_index, + u32 ext_phy_type, u32 chip_id) +{ + int rc = 0; + + switch (ext_phy_type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: + rc = bnx2x_8073_common_init_phy(bp, shmem_base_path, + shmem2_base_path, + phy_index, chip_id); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC: + rc = bnx2x_8727_common_init_phy(bp, shmem_base_path, + shmem2_base_path, + phy_index, chip_id); + break; + + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: + /* GPIO1 affects both ports, so there's need to pull + * it for single port alone + */ + rc = bnx2x_8726_common_init_phy(bp, shmem_base_path, + shmem2_base_path, + phy_index, chip_id); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834: + /* GPIO3's are linked, and so both need to be toggled + * to obtain required 2us pulse. + */ + rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, + shmem2_base_path, + phy_index, chip_id); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: + rc = -EINVAL; + break; + default: + DP(NETIF_MSG_LINK, + "ext_phy 0x%x common init not required\n", + ext_phy_type); + break; + } + + if (rc) + netdev_err(bp->dev, "Warning: PHY was not initialized," + " Port %d\n", + 0); + return rc; +} + +int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], + u32 shmem2_base_path[], u32 chip_id) +{ + int rc = 0; + u32 phy_ver, val; + u8 phy_index = 0; + u32 ext_phy_type, ext_phy_config; + + bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC0); + bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC1); + DP(NETIF_MSG_LINK, "Begin common phy init\n"); + if (CHIP_IS_E3(bp)) { + /* Enable EPIO */ + val = REG_RD(bp, MISC_REG_GEN_PURP_HWG); + REG_WR(bp, MISC_REG_GEN_PURP_HWG, val | 1); + } + /* Check if common init was already done */ + phy_ver = REG_RD(bp, shmem_base_path[0] + + offsetof(struct shmem_region, + port_mb[PORT_0].ext_phy_fw_version)); + if (phy_ver) { + DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n", + phy_ver); + return 0; + } + + /* Read the ext_phy_type for arbitrary port(0) */ + for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; + phy_index++) { + ext_phy_config = bnx2x_get_ext_phy_config(bp, + shmem_base_path[0], + phy_index, 0); + ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); + rc |= bnx2x_ext_phy_common_init(bp, shmem_base_path, + shmem2_base_path, + phy_index, ext_phy_type, + chip_id); + } + return rc; +} + +static void bnx2x_check_over_curr(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u32 cfg_pin; + u8 port = params->port; + u32 pin_val; + + cfg_pin = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_cmn_pin_cfg1)) & + PORT_HW_CFG_E3_OVER_CURRENT_MASK) >> + PORT_HW_CFG_E3_OVER_CURRENT_SHIFT; + + /* Ignore check if no external input PIN available */ + if (bnx2x_get_cfg_pin(bp, cfg_pin, &pin_val) != 0) + return; + + if (!pin_val) { + if ((vars->phy_flags & PHY_OVER_CURRENT_FLAG) == 0) { + netdev_err(bp->dev, "Error: Power fault on Port %d has" + " been detected and the power to " + "that SFP+ module has been removed" + " to prevent failure of the card." + " Please remove the SFP+ module and" + " restart the system to clear this" + " error.\n", + params->port); + vars->phy_flags |= PHY_OVER_CURRENT_FLAG; + bnx2x_warpcore_power_module(params, 0); + } + } else + vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG; +} + +/* Returns 0 if no change occurred since last check; 1 otherwise. */ +static u8 bnx2x_analyze_link_error(struct link_params *params, + struct link_vars *vars, u32 status, + u32 phy_flag, u32 link_flag, u8 notify) +{ + struct bnx2x *bp = params->bp; + /* Compare new value with previous value */ + u8 led_mode; + u32 old_status = (vars->phy_flags & phy_flag) ? 1 : 0; + + if ((status ^ old_status) == 0) + return 0; + + /* If values differ */ + switch (phy_flag) { + case PHY_HALF_OPEN_CONN_FLAG: + DP(NETIF_MSG_LINK, "Analyze Remote Fault\n"); + break; + case PHY_SFP_TX_FAULT_FLAG: + DP(NETIF_MSG_LINK, "Analyze TX Fault\n"); + break; + default: + DP(NETIF_MSG_LINK, "Analyze UNKNOWN\n"); + } + DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, + old_status, status); + + /* Do not touch the link in case physical link down */ + if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) + return 1; + + /* a. Update shmem->link_status accordingly + * b. Update link_vars->link_up + */ + if (status) { + vars->link_status &= ~LINK_STATUS_LINK_UP; + vars->link_status |= link_flag; + vars->link_up = 0; + vars->phy_flags |= phy_flag; + + /* activate nig drain */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1); + /* Set LED mode to off since the PHY doesn't know about these + * errors + */ + led_mode = LED_MODE_OFF; + } else { + vars->link_status |= LINK_STATUS_LINK_UP; + vars->link_status &= ~link_flag; + vars->link_up = 1; + vars->phy_flags &= ~phy_flag; + led_mode = LED_MODE_OPER; + + /* Clear nig drain */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); + } + bnx2x_sync_link(params, vars); + /* Update the LED according to the link state */ + bnx2x_set_led(params, vars, led_mode, SPEED_10000); + + /* Update link status in the shared memory */ + bnx2x_update_mng(params, vars->link_status); + + /* C. Trigger General Attention */ + vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT; + if (notify) + bnx2x_notify_link_changed(bp); + + return 1; +} + +/****************************************************************************** +* Description: +* This function checks for half opened connection change indication. +* When such change occurs, it calls the bnx2x_analyze_link_error +* to check if Remote Fault is set or cleared. Reception of remote fault +* status message in the MAC indicates that the peer's MAC has detected +* a fault, for example, due to break in the TX side of fiber. +* +******************************************************************************/ +static int bnx2x_check_half_open_conn(struct link_params *params, + struct link_vars *vars, + u8 notify) +{ + struct bnx2x *bp = params->bp; + u32 lss_status = 0; + u32 mac_base; + /* In case link status is physically up @ 10G do */ + if (((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) || + (REG_RD(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4))) + return 0; + + if (CHIP_IS_E3(bp) && + (REG_RD(bp, MISC_REG_RESET_REG_2) & + (MISC_REGISTERS_RESET_REG_2_XMAC))) { + /* Check E3 XMAC */ + /* Note that link speed cannot be queried here, since it may be + * zero while link is down. In case UMAC is active, LSS will + * simply not be set + */ + mac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + + /* Clear stick bits (Requires rising edge) */ + REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0); + REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, + XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS | + XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS); + if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS)) + lss_status = 1; + + bnx2x_analyze_link_error(params, vars, lss_status, + PHY_HALF_OPEN_CONN_FLAG, + LINK_STATUS_NONE, notify); + } else if (REG_RD(bp, MISC_REG_RESET_REG_2) & + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) { + /* Check E1X / E2 BMAC */ + u32 lss_status_reg; + u32 wb_data[2]; + mac_base = params->port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + /* Read BIGMAC_REGISTER_RX_LSS_STATUS */ + if (CHIP_IS_E2(bp)) + lss_status_reg = BIGMAC2_REGISTER_RX_LSS_STAT; + else + lss_status_reg = BIGMAC_REGISTER_RX_LSS_STATUS; + + REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2); + lss_status = (wb_data[0] > 0); + + bnx2x_analyze_link_error(params, vars, lss_status, + PHY_HALF_OPEN_CONN_FLAG, + LINK_STATUS_NONE, notify); + } + return 0; +} +static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u32 cfg_pin, value = 0; + u8 led_change, port = params->port; + + /* Get The SFP+ TX_Fault controlling pin ([eg]pio) */ + cfg_pin = (REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_cmn_pin_cfg)) & + PORT_HW_CFG_E3_TX_FAULT_MASK) >> + PORT_HW_CFG_E3_TX_FAULT_SHIFT; + + if (bnx2x_get_cfg_pin(bp, cfg_pin, &value)) { + DP(NETIF_MSG_LINK, "Failed to read pin 0x%02x\n", cfg_pin); + return; + } + + led_change = bnx2x_analyze_link_error(params, vars, value, + PHY_SFP_TX_FAULT_FLAG, + LINK_STATUS_SFP_TX_FAULT, 1); + + if (led_change) { + /* Change TX_Fault led, set link status for further syncs */ + u8 led_mode; + + if (vars->phy_flags & PHY_SFP_TX_FAULT_FLAG) { + led_mode = MISC_REGISTERS_GPIO_HIGH; + vars->link_status |= LINK_STATUS_SFP_TX_FAULT; + } else { + led_mode = MISC_REGISTERS_GPIO_LOW; + vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT; + } + + /* If module is unapproved, led should be on regardless */ + if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) { + DP(NETIF_MSG_LINK, "Change TX_Fault LED: ->%x\n", + led_mode); + bnx2x_set_e3_module_fault_led(params, led_mode); + } + } +} +static void bnx2x_kr2_recovery(struct link_params *params, + struct link_vars *vars, + struct bnx2x_phy *phy) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "KR2 recovery\n"); + bnx2x_warpcore_enable_AN_KR2(phy, params, vars); + bnx2x_warpcore_restart_AN_KR(phy, params); +} + +static void bnx2x_check_kr2_wa(struct link_params *params, + struct link_vars *vars, + struct bnx2x_phy *phy) +{ + struct bnx2x *bp = params->bp; + u16 base_page, next_page, not_kr2_device, lane; + int sigdet; + + /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery + * Since some switches tend to reinit the AN process and clear the + * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled + * and recovered many times + */ + if (vars->check_kr2_recovery_cnt > 0) { + vars->check_kr2_recovery_cnt--; + return; + } + + sigdet = bnx2x_warpcore_get_sigdet(phy, params); + if (!sigdet) { + if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { + bnx2x_kr2_recovery(params, vars, phy); + DP(NETIF_MSG_LINK, "No sigdet\n"); + } + return; + } + + lane = bnx2x_get_warpcore_lane(phy, params); + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, lane); + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG, &base_page); + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG2, &next_page); + bnx2x_set_aer_mmd(params, phy); + + /* CL73 has not begun yet */ + if (base_page == 0) { + if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { + bnx2x_kr2_recovery(params, vars, phy); + DP(NETIF_MSG_LINK, "No BP\n"); + } + return; + } + + /* In case NP bit is not set in the BasePage, or it is set, + * but only KX is advertised, declare this link partner as non-KR2 + * device. + */ + not_kr2_device = (((base_page & 0x8000) == 0) || + (((base_page & 0x8000) && + ((next_page & 0xe0) == 0x20)))); + + /* In case KR2 is already disabled, check if we need to re-enable it */ + if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { + if (!not_kr2_device) { + DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, + next_page); + bnx2x_kr2_recovery(params, vars, phy); + } + return; + } + /* KR2 is enabled, but not KR2 device */ + if (not_kr2_device) { + /* Disable KR2 on both lanes */ + DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page); + bnx2x_disable_kr2(params, vars, phy); + /* Restart AN on leading lane */ + bnx2x_warpcore_restart_AN_KR(phy, params); + return; + } +} + +void bnx2x_period_func(struct link_params *params, struct link_vars *vars) +{ + u16 phy_idx; + struct bnx2x *bp = params->bp; + for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { + if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) { + bnx2x_set_aer_mmd(params, ¶ms->phy[phy_idx]); + if (bnx2x_check_half_open_conn(params, vars, 1) != + 0) + DP(NETIF_MSG_LINK, "Fault detection failed\n"); + break; + } + } + + if (CHIP_IS_E3(bp)) { + struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; + bnx2x_set_aer_mmd(params, phy); + if ((phy->supported & SUPPORTED_20000baseKR2_Full) && + (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) + bnx2x_check_kr2_wa(params, vars, phy); + bnx2x_check_over_curr(params, vars); + if (vars->rx_tx_asic_rst) + bnx2x_warpcore_config_runtime(phy, params, vars); + + if ((REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port].default_cfg)) + & PORT_HW_CFG_NET_SERDES_IF_MASK) == + PORT_HW_CFG_NET_SERDES_IF_SFI) { + if (bnx2x_is_sfp_module_plugged(phy, params)) { + bnx2x_sfp_tx_fault_detection(phy, params, vars); + } else if (vars->link_status & + LINK_STATUS_SFP_TX_FAULT) { + /* Clean trail, interrupt corrects the leds */ + vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT; + vars->phy_flags &= ~PHY_SFP_TX_FAULT_FLAG; + /* Update link status in the shared memory */ + bnx2x_update_mng(params, vars->link_status); + } + } + } +} + +u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, + u32 shmem_base, + u32 shmem2_base, + u8 port) +{ + u8 phy_index, fan_failure_det_req = 0; + struct bnx2x_phy phy; + for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; + phy_index++) { + if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, + port, &phy) + != 0) { + DP(NETIF_MSG_LINK, "populate phy failed\n"); + return 0; + } + fan_failure_det_req |= (phy.flags & + FLAGS_FAN_FAILURE_DET_REQ); + } + return fan_failure_det_req; +} + +void bnx2x_hw_reset_phy(struct link_params *params) +{ + u8 phy_index; + struct bnx2x *bp = params->bp; + bnx2x_update_mng(params, 0); + bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, + (NIG_MASK_XGXS0_LINK_STATUS | + NIG_MASK_XGXS0_LINK10G | + NIG_MASK_SERDES0_LINK_STATUS | + NIG_MASK_MI_INT)); + + for (phy_index = INT_PHY; phy_index < MAX_PHYS; + phy_index++) { + if (params->phy[phy_index].hw_reset) { + params->phy[phy_index].hw_reset( + ¶ms->phy[phy_index], + params); + params->phy[phy_index] = phy_null; + } + } +} + +void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars, + u32 chip_id, u32 shmem_base, u32 shmem2_base, + u8 port) +{ + u8 gpio_num = 0xff, gpio_port = 0xff, phy_index; + u32 val; + u32 offset, aeu_mask, swap_val, swap_override, sync_offset; + if (CHIP_IS_E3(bp)) { + if (bnx2x_get_mod_abs_int_cfg(bp, chip_id, + shmem_base, + port, + &gpio_num, + &gpio_port) != 0) + return; + } else { + struct bnx2x_phy phy; + for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; + phy_index++) { + if (bnx2x_populate_phy(bp, phy_index, shmem_base, + shmem2_base, port, &phy) + != 0) { + DP(NETIF_MSG_LINK, "populate phy failed\n"); + return; + } + if (phy.type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) { + gpio_num = MISC_REGISTERS_GPIO_3; + gpio_port = port; + break; + } + } + } + + if (gpio_num == 0xff) + return; + + /* Set GPIO3 to trigger SFP+ module insertion/removal */ + bnx2x_set_gpio(bp, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z, gpio_port); + + swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); + swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); + gpio_port ^= (swap_val && swap_override); + + vars->aeu_int_mask = AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 << + (gpio_num + (gpio_port << 2)); + + sync_offset = shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].aeu_int_mask); + REG_WR(bp, sync_offset, vars->aeu_int_mask); + + DP(NETIF_MSG_LINK, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x\n", + gpio_num, gpio_port, vars->aeu_int_mask); + + if (port == 0) + offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; + else + offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0; + + /* Open appropriate AEU for interrupts */ + aeu_mask = REG_RD(bp, offset); + aeu_mask |= vars->aeu_int_mask; + REG_WR(bp, offset, aeu_mask); + + /* Enable the GPIO to trigger interrupt */ + val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN); + val |= 1 << (gpio_num + (gpio_port << 2)); + REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h new file mode 100644 index 000000000..d9cce4c38 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -0,0 +1,544 @@ +/* Copyright 2008-2013 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Written by Yaniv Rosner + * + */ + +#ifndef BNX2X_LINK_H +#define BNX2X_LINK_H + + + +/***********************************************************/ +/* Defines */ +/***********************************************************/ +#define DEFAULT_PHY_DEV_ADDR 3 +#define E2_DEFAULT_PHY_DEV_ADDR 5 + + + +#define BNX2X_FLOW_CTRL_AUTO PORT_FEATURE_FLOW_CONTROL_AUTO +#define BNX2X_FLOW_CTRL_TX PORT_FEATURE_FLOW_CONTROL_TX +#define BNX2X_FLOW_CTRL_RX PORT_FEATURE_FLOW_CONTROL_RX +#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH +#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE + +#define NET_SERDES_IF_XFI 1 +#define NET_SERDES_IF_SFI 2 +#define NET_SERDES_IF_KR 3 +#define NET_SERDES_IF_DXGXS 4 + +#define SPEED_AUTO_NEG 0 +#define SPEED_20000 20000 + +#define I2C_DEV_ADDR_A0 0xa0 +#define I2C_DEV_ADDR_A2 0xa2 + +#define SFP_EEPROM_PAGE_SIZE 16 +#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14 +#define SFP_EEPROM_VENDOR_NAME_SIZE 16 +#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25 +#define SFP_EEPROM_VENDOR_OUI_SIZE 3 +#define SFP_EEPROM_PART_NO_ADDR 0x28 +#define SFP_EEPROM_PART_NO_SIZE 16 +#define SFP_EEPROM_REVISION_ADDR 0x38 +#define SFP_EEPROM_REVISION_SIZE 4 +#define SFP_EEPROM_SERIAL_ADDR 0x44 +#define SFP_EEPROM_SERIAL_SIZE 16 +#define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */ +#define SFP_EEPROM_DATE_SIZE 6 +#define SFP_EEPROM_DIAG_TYPE_ADDR 0x5c +#define SFP_EEPROM_DIAG_TYPE_SIZE 1 +#define SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1<<2) +#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e +#define SFP_EEPROM_SFF_8472_COMP_SIZE 1 + +#define SFP_EEPROM_A2_CHECKSUM_RANGE 0x5e +#define SFP_EEPROM_A2_CC_DMI_ADDR 0x5f + +#define PWR_FLT_ERR_MSG_LEN 250 + +#define XGXS_EXT_PHY_TYPE(ext_phy_config) \ + ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) +#define XGXS_EXT_PHY_ADDR(ext_phy_config) \ + (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \ + PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT) +#define SERDES_EXT_PHY_TYPE(ext_phy_config) \ + ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK) + +/* Single Media Direct board is the plain 577xx board with CX4/RJ45 jacks */ +#define SINGLE_MEDIA_DIRECT(params) (params->num_phys == 1) +/* Single Media board contains single external phy */ +#define SINGLE_MEDIA(params) (params->num_phys == 2) +/* Dual Media board contains two external phy with different media */ +#define DUAL_MEDIA(params) (params->num_phys == 3) + +#define FW_PARAM_PHY_ADDR_MASK 0x000000FF +#define FW_PARAM_PHY_TYPE_MASK 0x0000FF00 +#define FW_PARAM_MDIO_CTRL_MASK 0xFFFF0000 +#define FW_PARAM_MDIO_CTRL_OFFSET 16 +#define FW_PARAM_PHY_ADDR(fw_param) (fw_param & \ + FW_PARAM_PHY_ADDR_MASK) +#define FW_PARAM_PHY_TYPE(fw_param) (fw_param & \ + FW_PARAM_PHY_TYPE_MASK) +#define FW_PARAM_MDIO_CTRL(fw_param) ((fw_param & \ + FW_PARAM_MDIO_CTRL_MASK) >> \ + FW_PARAM_MDIO_CTRL_OFFSET) +#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \ + (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET) + + +#define PFC_BRB_FULL_LB_XOFF_THRESHOLD 170 +#define PFC_BRB_FULL_LB_XON_THRESHOLD 250 + +#define MAXVAL(a, b) (((a) > (b)) ? (a) : (b)) + +#define BMAC_CONTROL_RX_ENABLE 2 +/***********************************************************/ +/* Structs */ +/***********************************************************/ +#define INT_PHY 0 +#define EXT_PHY1 1 +#define EXT_PHY2 2 +#define MAX_PHYS 3 + +/* Same configuration is shared between the XGXS and the first external phy */ +#define LINK_CONFIG_SIZE (MAX_PHYS - 1) +#define LINK_CONFIG_IDX(_phy_idx) ((_phy_idx == INT_PHY) ? \ + 0 : (_phy_idx - 1)) +/***********************************************************/ +/* bnx2x_phy struct */ +/* Defines the required arguments and function per phy */ +/***********************************************************/ +struct link_vars; +struct link_params; +struct bnx2x_phy; + +typedef u8 (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params, + struct link_vars *vars); +typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params, + struct link_vars *vars); +typedef void (*link_reset_t)(struct bnx2x_phy *phy, + struct link_params *params); +typedef void (*config_loopback_t)(struct bnx2x_phy *phy, + struct link_params *params); +typedef u8 (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len); +typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params); +typedef void (*set_link_led_t)(struct bnx2x_phy *phy, + struct link_params *params, u8 mode); +typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy, + struct link_params *params, u32 action); +struct bnx2x_reg_set { + u8 devad; + u16 reg; + u16 val; +}; + +struct bnx2x_phy { + u32 type; + + /* Loaded during init */ + u8 addr; + u8 def_md_devad; + u16 flags; + /* No Over-Current detection */ +#define FLAGS_NOC (1<<1) + /* Fan failure detection required */ +#define FLAGS_FAN_FAILURE_DET_REQ (1<<2) + /* Initialize first the XGXS and only then the phy itself */ +#define FLAGS_INIT_XGXS_FIRST (1<<3) +#define FLAGS_WC_DUAL_MODE (1<<4) +#define FLAGS_4_PORT_MODE (1<<5) +#define FLAGS_REARM_LATCH_SIGNAL (1<<6) +#define FLAGS_SFP_NOT_APPROVED (1<<7) +#define FLAGS_MDC_MDIO_WA (1<<8) +#define FLAGS_DUMMY_READ (1<<9) +#define FLAGS_MDC_MDIO_WA_B0 (1<<10) +#define FLAGS_TX_ERROR_CHECK (1<<12) +#define FLAGS_EEE (1<<13) +#define FLAGS_MDC_MDIO_WA_G (1<<15) + + /* preemphasis values for the rx side */ + u16 rx_preemphasis[4]; + + /* preemphasis values for the tx side */ + u16 tx_preemphasis[4]; + + /* EMAC address for access MDIO */ + u32 mdio_ctrl; + + u32 supported; + + u32 media_type; +#define ETH_PHY_UNSPECIFIED 0x0 +#define ETH_PHY_SFPP_10G_FIBER 0x1 +#define ETH_PHY_XFP_FIBER 0x2 +#define ETH_PHY_DA_TWINAX 0x3 +#define ETH_PHY_BASE_T 0x4 +#define ETH_PHY_SFP_1G_FIBER 0x5 +#define ETH_PHY_KR 0xf0 +#define ETH_PHY_CX4 0xf1 +#define ETH_PHY_NOT_PRESENT 0xff + + /* The address in which version is located*/ + u32 ver_addr; + + u16 req_flow_ctrl; + + u16 req_line_speed; + + u32 speed_cap_mask; + + u16 req_duplex; + u16 rsrv; + /* Called per phy/port init, and it configures LASI, speed, autoneg, + duplex, flow control negotiation, etc. */ + config_init_t config_init; + + /* Called due to interrupt. It determines the link, speed */ + read_status_t read_status; + + /* Called when driver is unloading. Should reset the phy */ + link_reset_t link_reset; + + /* Set the loopback configuration for the phy */ + config_loopback_t config_loopback; + + /* Format the given raw number into str up to len */ + format_fw_ver_t format_fw_ver; + + /* Reset the phy (both ports) */ + hw_reset_t hw_reset; + + /* Set link led mode (on/off/oper)*/ + set_link_led_t set_link_led; + + /* PHY Specific tasks */ + phy_specific_func_t phy_specific_func; +#define DISABLE_TX 1 +#define ENABLE_TX 2 +#define PHY_INIT 3 +}; + +/* Inputs parameters to the CLC */ +struct link_params { + + u8 port; + + /* Default / User Configuration */ + u8 loopback_mode; +#define LOOPBACK_NONE 0 +#define LOOPBACK_EMAC 1 +#define LOOPBACK_BMAC 2 +#define LOOPBACK_XGXS 3 +#define LOOPBACK_EXT_PHY 4 +#define LOOPBACK_EXT 5 +#define LOOPBACK_UMAC 6 +#define LOOPBACK_XMAC 7 + + /* Device parameters */ + u8 mac_addr[6]; + + u16 req_duplex[LINK_CONFIG_SIZE]; + u16 req_flow_ctrl[LINK_CONFIG_SIZE]; + + u16 req_line_speed[LINK_CONFIG_SIZE]; /* Also determine AutoNeg */ + + /* shmem parameters */ + u32 shmem_base; + u32 shmem2_base; + u32 speed_cap_mask[LINK_CONFIG_SIZE]; + u32 switch_cfg; +#define SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH +#define SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH +#define SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT + + u32 lane_config; + + /* Phy register parameter */ + u32 chip_id; + + /* features */ + u32 feature_config_flags; +#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0) +#define FEATURE_CONFIG_PFC_ENABLED (1<<1) +#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) +#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3) +#define FEATURE_CONFIG_BC_SUPPORTS_AFEX (1<<8) +#define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9) +#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10) +#define FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET (1<<11) +#define FEATURE_CONFIG_MT_SUPPORT (1<<13) +#define FEATURE_CONFIG_BOOT_FROM_SAN (1<<14) + + /* Will be populated during common init */ + struct bnx2x_phy phy[MAX_PHYS]; + + /* Will be populated during common init */ + u8 num_phys; + + u8 rsrv; + + /* Used to configure the EEE Tx LPI timer, has several modes of + * operation, according to bits 29:28 - + * 2'b00: Timer will be configured by nvram, output will be the value + * from nvram. + * 2'b01: Timer will be configured by nvram, output will be in + * microseconds. + * 2'b10: bits 1:0 contain an nvram value which will be used instead + * of the one located in the nvram. Output will be that value. + * 2'b11: bits 19:0 contain the idle timer in microseconds; output + * will be in microseconds. + * Bits 31:30 should be 2'b11 in order for EEE to be enabled. + */ + u32 eee_mode; +#define EEE_MODE_NVRAM_BALANCED_TIME (0xa00) +#define EEE_MODE_NVRAM_AGGRESSIVE_TIME (0x100) +#define EEE_MODE_NVRAM_LATENCY_TIME (0x6000) +#define EEE_MODE_NVRAM_MASK (0x3) +#define EEE_MODE_TIMER_MASK (0xfffff) +#define EEE_MODE_OUTPUT_TIME (1<<28) +#define EEE_MODE_OVERRIDE_NVRAM (1<<29) +#define EEE_MODE_ENABLE_LPI (1<<30) +#define EEE_MODE_ADV_LPI (1<<31) + + u16 hw_led_mode; /* part of the hw_config read from the shmem */ + u32 multi_phy_config; + + /* Device pointer passed to all callback functions */ + struct bnx2x *bp; + u16 req_fc_auto_adv; /* Should be set to TX / BOTH when + req_flow_ctrl is set to AUTO */ + u16 link_flags; +#define LINK_FLAGS_INT_DISABLED (1<<0) +#define PHY_INITIALIZED (1<<1) + u32 lfa_base; + + /* The same definitions as the shmem2 parameter */ + u32 link_attr_sync; +}; + +/* Output parameters */ +struct link_vars { + u8 phy_flags; +#define PHY_XGXS_FLAG (1<<0) +#define PHY_SGMII_FLAG (1<<1) +#define PHY_PHYSICAL_LINK_FLAG (1<<2) +#define PHY_HALF_OPEN_CONN_FLAG (1<<3) +#define PHY_OVER_CURRENT_FLAG (1<<4) +#define PHY_SFP_TX_FAULT_FLAG (1<<5) + + u8 mac_type; +#define MAC_TYPE_NONE 0 +#define MAC_TYPE_EMAC 1 +#define MAC_TYPE_BMAC 2 +#define MAC_TYPE_UMAC 3 +#define MAC_TYPE_XMAC 4 + + u8 phy_link_up; /* internal phy link indication */ + u8 link_up; + + u16 line_speed; + u16 duplex; + + u16 flow_ctrl; + u16 ieee_fc; + + /* The same definitions as the shmem parameter */ + u32 link_status; + u32 eee_status; + u8 fault_detected; + u8 check_kr2_recovery_cnt; +#define CHECK_KR2_RECOVERY_CNT 5 + u16 periodic_flags; +#define PERIODIC_FLAGS_LINK_EVENT 0x0001 + + u32 aeu_int_mask; + u8 rx_tx_asic_rst; + u8 turn_to_run_wc_rt; + u16 rsrv2; +}; + +/***********************************************************/ +/* Functions */ +/***********************************************************/ +int bnx2x_phy_init(struct link_params *params, struct link_vars *vars); + +/* Reset the link. Should be called when driver or interface goes down + Before calling phy firmware upgrade, the reset_ext_phy should be set + to 0 */ +int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, + u8 reset_ext_phy); +int bnx2x_lfa_reset(struct link_params *params, struct link_vars *vars); +/* bnx2x_link_update should be called upon link interrupt */ +int bnx2x_link_update(struct link_params *params, struct link_vars *vars); + +/* use the following phy functions to read/write from external_phy + In order to use it to read/write internal phy registers, use + DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as + the register */ +int bnx2x_phy_read(struct link_params *params, u8 phy_addr, + u8 devad, u16 reg, u16 *ret_val); + +int bnx2x_phy_write(struct link_params *params, u8 phy_addr, + u8 devad, u16 reg, u16 val); + +/* Reads the link_status from the shmem, + and update the link vars accordingly */ +void bnx2x_link_status_update(struct link_params *input, + struct link_vars *output); +/* returns string representing the fw_version of the external phy */ +int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version, + u16 len); + +/* Set/Unset the led + Basically, the CLC takes care of the led for the link, but in case one needs + to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to + blink the led, and LED_MODE_OFF to set the led off.*/ +int bnx2x_set_led(struct link_params *params, + struct link_vars *vars, u8 mode, u32 speed); +#define LED_MODE_OFF 0 +#define LED_MODE_ON 1 +#define LED_MODE_OPER 2 +#define LED_MODE_FRONT_PANEL_OFF 3 + +/* bnx2x_handle_module_detect_int should be called upon module detection + interrupt */ +void bnx2x_handle_module_detect_int(struct link_params *params); + +/* Get the actual link status. In case it returns 0, link is up, + otherwise link is down*/ +int bnx2x_test_link(struct link_params *params, struct link_vars *vars, + u8 is_serdes); + +/* One-time initialization for external phy after power up */ +int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], + u32 shmem2_base_path[], u32 chip_id); + +/* Reset the external PHY using GPIO */ +void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port); + +/* Reset the external of SFX7101 */ +void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy); + +/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */ +int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, + struct link_params *params, u8 dev_addr, + u16 addr, u16 byte_cnt, u8 *o_buf); + +void bnx2x_hw_reset_phy(struct link_params *params); + +/* Check swap bit and adjust PHY order */ +u32 bnx2x_phy_selection(struct link_params *params); + +/* Probe the phys on board, and populate them in "params" */ +int bnx2x_phy_probe(struct link_params *params); + +/* Checks if fan failure detection is required on one of the phys on board */ +u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base, + u32 shmem2_base, u8 port); + +/* Open / close the gate between the NIG and the BRB */ +void bnx2x_set_rx_filter(struct link_params *params, u8 en); + +/* DCBX structs */ + +/* Number of maximum COS per chip */ +#define DCBX_E2E3_MAX_NUM_COS (2) +#define DCBX_E3B0_MAX_NUM_COS_PORT0 (6) +#define DCBX_E3B0_MAX_NUM_COS_PORT1 (3) +#define DCBX_E3B0_MAX_NUM_COS ( \ + MAXVAL(DCBX_E3B0_MAX_NUM_COS_PORT0, \ + DCBX_E3B0_MAX_NUM_COS_PORT1)) + +#define DCBX_MAX_NUM_COS ( \ + MAXVAL(DCBX_E3B0_MAX_NUM_COS, \ + DCBX_E2E3_MAX_NUM_COS)) + +/* PFC port configuration params */ +struct bnx2x_nig_brb_pfc_port_params { + /* NIG */ + u32 pause_enable; + u32 llfc_out_en; + u32 llfc_enable; + u32 pkt_priority_to_cos; + u8 num_of_rx_cos_priority_mask; + u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS]; + u32 llfc_high_priority_classes; + u32 llfc_low_priority_classes; +}; + + +/* ETS port configuration params */ +struct bnx2x_ets_bw_params { + u8 bw; +}; + +struct bnx2x_ets_sp_params { + /** + * valid values are 0 - 5. 0 is highest strict priority. + * There can't be two COS's with the same pri. + */ + u8 pri; +}; + +enum bnx2x_cos_state { + bnx2x_cos_state_strict = 0, + bnx2x_cos_state_bw = 1, +}; + +struct bnx2x_ets_cos_params { + enum bnx2x_cos_state state ; + union { + struct bnx2x_ets_bw_params bw_params; + struct bnx2x_ets_sp_params sp_params; + } params; +}; + +struct bnx2x_ets_params { + u8 num_of_cos; /* Number of valid COS entries*/ + struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS]; +}; + +/* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB + * when link is already up + */ +int bnx2x_update_pfc(struct link_params *params, + struct link_vars *vars, + struct bnx2x_nig_brb_pfc_port_params *pfc_params); + + +/* Used to configure the ETS to disable */ +int bnx2x_ets_disabled(struct link_params *params, + struct link_vars *vars); + +/* Used to configure the ETS to BW limited */ +void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, + const u32 cos1_bw); + +/* Used to configure the ETS to strict */ +int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos); + + +/* Configure the COS to ETS according to BW and SP settings.*/ +int bnx2x_ets_e3b0_config(const struct link_params *params, + const struct link_vars *vars, + struct bnx2x_ets_params *ets_params); + +void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars, + u32 chip_id, u32 shmem_base, u32 shmem2_base, + u8 port); + +void bnx2x_period_func(struct link_params *params, struct link_vars *vars); + +#endif /* BNX2X_LINK_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c new file mode 100644 index 000000000..b1405d20d --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -0,0 +1,14820 @@ +/* bnx2x_main.c: Broadcom Everest network driver. + * + * Copyright (c) 2007-2013 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Ariel Elior + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include /* for dev_info() */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bnx2x.h" +#include "bnx2x_init.h" +#include "bnx2x_init_ops.h" +#include "bnx2x_cmn.h" +#include "bnx2x_vfpf.h" +#include "bnx2x_dcb.h" +#include "bnx2x_sp.h" +#include +#include "bnx2x_fw_file_hdr.h" +/* FW files */ +/*(DEBLOBBED)*/ +#define FW_FILE_NAME_E1 "/*(DEBLOBBED)*/" +#define FW_FILE_NAME_E1H "/*(DEBLOBBED)*/" +#define FW_FILE_NAME_E2 "/*(DEBLOBBED)*/" +#define bnx2x_init_block(bp, start, end) \ + return (printk(KERN_ERR "%s: Missing Free firmware\n", bp->dev->name),\ + -EINVAL) + +/* Time in jiffies before concluding the transmitter is hung */ +#define TX_TIMEOUT (5*HZ) + +static char version[] = + "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver " + DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("Eliezer Tamir"); +MODULE_DESCRIPTION("Broadcom NetXtreme II " + "BCM57710/57711/57711E/" + "57712/57712_MF/57800/57800_MF/57810/57810_MF/" + "57840/57840_MF Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); +/*(DEBLOBBED)*/ + +int bnx2x_num_queues; +module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO); +MODULE_PARM_DESC(num_queues, + " Set number of queues (default is as a number of CPUs)"); + +static int disable_tpa; +module_param(disable_tpa, int, S_IRUGO); +MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); + +static int int_mode; +module_param(int_mode, int, S_IRUGO); +MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " + "(1 INT#x; 2 MSI)"); + +static int dropless_fc; +module_param(dropless_fc, int, S_IRUGO); +MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring"); + +static int mrrs = -1; +module_param(mrrs, int, S_IRUGO); +MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)"); + +static int debug; +module_param(debug, int, S_IRUGO); +MODULE_PARM_DESC(debug, " Default debug msglevel"); + +static struct workqueue_struct *bnx2x_wq; +struct workqueue_struct *bnx2x_iov_wq; + +struct bnx2x_mac_vals { + u32 xmac_addr; + u32 xmac_val; + u32 emac_addr; + u32 emac_val; + u32 umac_addr[2]; + u32 umac_val[2]; + u32 bmac_addr; + u32 bmac_val[2]; +}; + +enum bnx2x_board_type { + BCM57710 = 0, + BCM57711, + BCM57711E, + BCM57712, + BCM57712_MF, + BCM57712_VF, + BCM57800, + BCM57800_MF, + BCM57800_VF, + BCM57810, + BCM57810_MF, + BCM57810_VF, + BCM57840_4_10, + BCM57840_2_20, + BCM57840_MF, + BCM57840_VF, + BCM57811, + BCM57811_MF, + BCM57840_O, + BCM57840_MFO, + BCM57811_VF +}; + +/* indexed by board_type, above */ +static struct { + char *name; +} board_info[] = { + [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, + [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, + [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, + [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, + [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, + [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" }, + [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, + [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, + [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" }, + [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, + [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, + [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" }, + [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" }, + [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" }, + [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" }, + [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }, + [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" }, + [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" }, + [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, + [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" }, + [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" } +}; + +#ifndef PCI_DEVICE_ID_NX2_57710 +#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710 +#endif +#ifndef PCI_DEVICE_ID_NX2_57711 +#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711 +#endif +#ifndef PCI_DEVICE_ID_NX2_57711E +#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E +#endif +#ifndef PCI_DEVICE_ID_NX2_57712 +#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712 +#endif +#ifndef PCI_DEVICE_ID_NX2_57712_MF +#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF +#endif +#ifndef PCI_DEVICE_ID_NX2_57712_VF +#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF +#endif +#ifndef PCI_DEVICE_ID_NX2_57800 +#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800 +#endif +#ifndef PCI_DEVICE_ID_NX2_57800_MF +#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF +#endif +#ifndef PCI_DEVICE_ID_NX2_57800_VF +#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF +#endif +#ifndef PCI_DEVICE_ID_NX2_57810 +#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810 +#endif +#ifndef PCI_DEVICE_ID_NX2_57810_MF +#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF +#endif +#ifndef PCI_DEVICE_ID_NX2_57840_O +#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE +#endif +#ifndef PCI_DEVICE_ID_NX2_57810_VF +#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF +#endif +#ifndef PCI_DEVICE_ID_NX2_57840_4_10 +#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10 +#endif +#ifndef PCI_DEVICE_ID_NX2_57840_2_20 +#define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20 +#endif +#ifndef PCI_DEVICE_ID_NX2_57840_MFO +#define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE +#endif +#ifndef PCI_DEVICE_ID_NX2_57840_MF +#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF +#endif +#ifndef PCI_DEVICE_ID_NX2_57840_VF +#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF +#endif +#ifndef PCI_DEVICE_ID_NX2_57811 +#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811 +#endif +#ifndef PCI_DEVICE_ID_NX2_57811_MF +#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF +#endif +#ifndef PCI_DEVICE_ID_NX2_57811_VF +#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF +#endif + +static const struct pci_device_id bnx2x_pci_tbl[] = { + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl); + +/* Global resources for unloading a previously loaded device */ +#define BNX2X_PREV_WAIT_NEEDED 1 +static DEFINE_SEMAPHORE(bnx2x_prev_sem); +static LIST_HEAD(bnx2x_prev_list); + +/* Forward declaration */ +static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev); +static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp); +static int bnx2x_set_storm_rx_mode(struct bnx2x *bp); + +/**************************************************************************** +* General service functions +****************************************************************************/ + +static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr); + +static void __storm_memset_dma_mapping(struct bnx2x *bp, + u32 addr, dma_addr_t mapping) +{ + REG_WR(bp, addr, U64_LO(mapping)); + REG_WR(bp, addr + 4, U64_HI(mapping)); +} + +static void storm_memset_spq_addr(struct bnx2x *bp, + dma_addr_t mapping, u16 abs_fid) +{ + u32 addr = XSEM_REG_FAST_MEMORY + + XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); + + __storm_memset_dma_mapping(bp, addr, mapping); +} + +static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, + u16 pf_id) +{ + REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); + REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); + REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); + REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); +} + +static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, + u8 enable) +{ + REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), + enable); + REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), + enable); + REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), + enable); + REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), + enable); +} + +static void storm_memset_eq_data(struct bnx2x *bp, + struct event_ring_data *eq_data, + u16 pfid) +{ + size_t size = sizeof(struct event_ring_data); + + u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid); + + __storm_memset_struct(bp, addr, size, (u32 *)eq_data); +} + +static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, + u16 pfid) +{ + u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid); + REG_WR16(bp, addr, eq_prod); +} + +/* used only at init + * locking is done by mcp + */ +static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val) +{ + pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); + pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); + pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, + PCICFG_VENDOR_ID_OFFSET); +} + +static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) +{ + u32 val; + + pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); + pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val); + pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, + PCICFG_VENDOR_ID_OFFSET); + + return val; +} + +#define DMAE_DP_SRC_GRC "grc src_addr [%08x]" +#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]" +#define DMAE_DP_DST_GRC "grc dst_addr [%08x]" +#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" +#define DMAE_DP_DST_NONE "dst_addr [none]" + +static void bnx2x_dp_dmae(struct bnx2x *bp, + struct dmae_command *dmae, int msglvl) +{ + u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; + int i; + + switch (dmae->opcode & DMAE_COMMAND_DST) { + case DMAE_CMD_DST_PCI: + if (src_type == DMAE_CMD_SRC_PCI) + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" + "comp_addr [%x:%08x], comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, + dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, + dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + else + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src [%08x], len [%d*4], dst [%x:%08x]\n" + "comp_addr [%x:%08x], comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_lo >> 2, + dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, + dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + break; + case DMAE_CMD_DST_GRC: + if (src_type == DMAE_CMD_SRC_PCI) + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" + "comp_addr [%x:%08x], comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, + dmae->len, dmae->dst_addr_lo >> 2, + dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + else + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src [%08x], len [%d*4], dst [%08x]\n" + "comp_addr [%x:%08x], comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_lo >> 2, + dmae->len, dmae->dst_addr_lo >> 2, + dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + break; + default: + if (src_type == DMAE_CMD_SRC_PCI) + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n" + "comp_addr [%x:%08x] comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, + dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + else + DP(msglvl, "DMAE: opcode 0x%08x\n" + "src_addr [%08x] len [%d * 4] dst_addr [none]\n" + "comp_addr [%x:%08x] comp_val 0x%08x\n", + dmae->opcode, dmae->src_addr_lo >> 2, + dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, + dmae->comp_val); + break; + } + + for (i = 0; i < (sizeof(struct dmae_command)/4); i++) + DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n", + i, *(((u32 *)dmae) + i)); +} + +/* copy command into DMAE command memory and set DMAE command go */ +void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) +{ + u32 cmd_offset; + int i; + + cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx); + for (i = 0; i < (sizeof(struct dmae_command)/4); i++) { + REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i)); + } + REG_WR(bp, dmae_reg_go_c[idx], 1); +} + +u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type) +{ + return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | + DMAE_CMD_C_ENABLE); +} + +u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode) +{ + return opcode & ~DMAE_CMD_SRC_RESET; +} + +u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, + bool with_comp, u8 comp_type) +{ + u32 opcode = 0; + + opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | + (dst_type << DMAE_COMMAND_DST_SHIFT)); + + opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); + + opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); + opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | + (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); + opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); + +#ifdef __BIG_ENDIAN + opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; +#else + opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; +#endif + if (with_comp) + opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type); + return opcode; +} + +void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, + struct dmae_command *dmae, + u8 src_type, u8 dst_type) +{ + memset(dmae, 0, sizeof(struct dmae_command)); + + /* set the opcode */ + dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type, + true, DMAE_COMP_PCI); + + /* fill in the completion parameters */ + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); + dmae->comp_val = DMAE_COMP_VAL; +} + +/* issue a dmae command over the init-channel and wait for completion */ +int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, + u32 *comp) +{ + int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; + int rc = 0; + + bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE); + + /* Lock the dmae channel. Disable BHs to prevent a dead-lock + * as long as this code is called both from syscall context and + * from ndo_set_rx_mode() flow that may be called from BH. + */ + + spin_lock_bh(&bp->dmae_lock); + + /* reset completion */ + *comp = 0; + + /* post the command on the channel used for initializations */ + bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); + + /* wait for completion */ + udelay(5); + while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { + + if (!cnt || + (bp->recovery_state != BNX2X_RECOVERY_DONE && + bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { + BNX2X_ERR("DMAE timeout!\n"); + rc = DMAE_TIMEOUT; + goto unlock; + } + cnt--; + udelay(50); + } + if (*comp & DMAE_PCI_ERR_FLAG) { + BNX2X_ERR("DMAE PCI error!\n"); + rc = DMAE_PCI_ERROR; + } + +unlock: + + spin_unlock_bh(&bp->dmae_lock); + + return rc; +} + +void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, + u32 len32) +{ + int rc; + struct dmae_command dmae; + + if (!bp->dmae_ready) { + u32 *data = bnx2x_sp(bp, wb_data[0]); + + if (CHIP_IS_E1(bp)) + bnx2x_init_ind_wr(bp, dst_addr, data, len32); + else + bnx2x_init_str_wr(bp, dst_addr, data, len32); + return; + } + + /* set opcode and fixed command fields */ + bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); + + /* fill in addresses and len */ + dmae.src_addr_lo = U64_LO(dma_addr); + dmae.src_addr_hi = U64_HI(dma_addr); + dmae.dst_addr_lo = dst_addr >> 2; + dmae.dst_addr_hi = 0; + dmae.len = len32; + + /* issue the command and wait for completion */ + rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); + if (rc) { + BNX2X_ERR("DMAE returned failure %d\n", rc); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#endif + } +} + +void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) +{ + int rc; + struct dmae_command dmae; + + if (!bp->dmae_ready) { + u32 *data = bnx2x_sp(bp, wb_data[0]); + int i; + + if (CHIP_IS_E1(bp)) + for (i = 0; i < len32; i++) + data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4); + else + for (i = 0; i < len32; i++) + data[i] = REG_RD(bp, src_addr + i*4); + + return; + } + + /* set opcode and fixed command fields */ + bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); + + /* fill in addresses and len */ + dmae.src_addr_lo = src_addr >> 2; + dmae.src_addr_hi = 0; + dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); + dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); + dmae.len = len32; + + /* issue the command and wait for completion */ + rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); + if (rc) { + BNX2X_ERR("DMAE returned failure %d\n", rc); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#endif + } +} + +static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, + u32 addr, u32 len) +{ + int dmae_wr_max = DMAE_LEN32_WR_MAX(bp); + int offset = 0; + + while (len > dmae_wr_max) { + bnx2x_write_dmae(bp, phys_addr + offset, + addr + offset, dmae_wr_max); + offset += dmae_wr_max * 4; + len -= dmae_wr_max; + } + + bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); +} + +enum storms { + XSTORM, + TSTORM, + CSTORM, + USTORM, + MAX_STORMS +}; + +#define STORMS_NUM 4 +#define REGS_IN_ENTRY 4 + +static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp, + enum storms storm, + int entry) +{ + switch (storm) { + case XSTORM: + return XSTORM_ASSERT_LIST_OFFSET(entry); + case TSTORM: + return TSTORM_ASSERT_LIST_OFFSET(entry); + case CSTORM: + return CSTORM_ASSERT_LIST_OFFSET(entry); + case USTORM: + return USTORM_ASSERT_LIST_OFFSET(entry); + case MAX_STORMS: + default: + BNX2X_ERR("unknown storm\n"); + } + return -EINVAL; +} + +static int bnx2x_mc_assert(struct bnx2x *bp) +{ + char last_idx; + int i, j, rc = 0; + enum storms storm; + u32 regs[REGS_IN_ENTRY]; + u32 bar_storm_intmem[STORMS_NUM] = { + BAR_XSTRORM_INTMEM, + BAR_TSTRORM_INTMEM, + BAR_CSTRORM_INTMEM, + BAR_USTRORM_INTMEM + }; + u32 storm_assert_list_index[STORMS_NUM] = { + XSTORM_ASSERT_LIST_INDEX_OFFSET, + TSTORM_ASSERT_LIST_INDEX_OFFSET, + CSTORM_ASSERT_LIST_INDEX_OFFSET, + USTORM_ASSERT_LIST_INDEX_OFFSET + }; + char *storms_string[STORMS_NUM] = { + "XSTORM", + "TSTORM", + "CSTORM", + "USTORM" + }; + + for (storm = XSTORM; storm < MAX_STORMS; storm++) { + last_idx = REG_RD8(bp, bar_storm_intmem[storm] + + storm_assert_list_index[storm]); + if (last_idx) + BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n", + storms_string[storm], last_idx); + + /* print the asserts */ + for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { + /* read a single assert entry */ + for (j = 0; j < REGS_IN_ENTRY; j++) + regs[j] = REG_RD(bp, bar_storm_intmem[storm] + + bnx2x_get_assert_list_entry(bp, + storm, + i) + + sizeof(u32) * j); + + /* log entry if it contains a valid assert */ + if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) { + BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", + storms_string[storm], i, regs[3], + regs[2], regs[1], regs[0]); + rc++; + } else { + break; + } + } + } + + BNX2X_ERR("Chip Revision: %s, /*(DEBLOBBED)*/\n", + CHIP_IS_E1(bp) ? "everest1" : + CHIP_IS_E1H(bp) ? "everest1h" : + CHIP_IS_E2(bp) ? "everest2" : "everest3"/*(DEBLOBBED)*/); + + return rc; +} + +#define MCPR_TRACE_BUFFER_SIZE (0x800) +#define SCRATCH_BUFFER_SIZE(bp) \ + (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000)) + +void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) +{ + u32 addr, val; + u32 mark, offset; + __be32 data[9]; + int word; + u32 trace_shmem_base; + if (BP_NOMCP(bp)) { + BNX2X_ERR("NO MCP - can not dump\n"); + return; + } + netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n", + (bp->common.bc_ver & 0xff0000) >> 16, + (bp->common.bc_ver & 0xff00) >> 8, + (bp->common.bc_ver & 0xff)); + + val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); + if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) + BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val); + + if (BP_PATH(bp) == 0) + trace_shmem_base = bp->common.shmem_base; + else + trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); + + /* sanity */ + if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE || + trace_shmem_base >= MCPR_SCRATCH_BASE(bp) + + SCRATCH_BUFFER_SIZE(bp)) { + BNX2X_ERR("Unable to dump trace buffer (mark %x)\n", + trace_shmem_base); + return; + } + + addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE; + + /* validate TRCB signature */ + mark = REG_RD(bp, addr); + if (mark != MFW_TRACE_SIGNATURE) { + BNX2X_ERR("Trace buffer signature is missing."); + return ; + } + + /* read cyclic buffer pointer */ + addr += 4; + mark = REG_RD(bp, addr); + mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000; + if (mark >= trace_shmem_base || mark < addr + 4) { + BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n"); + return; + } + printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); + + printk("%s", lvl); + + /* dump buffer after the mark */ + for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) { + for (word = 0; word < 8; word++) + data[word] = htonl(REG_RD(bp, offset + 4*word)); + data[8] = 0x0; + pr_cont("%s", (char *)data); + } + + /* dump buffer before the mark */ + for (offset = addr + 4; offset <= mark; offset += 0x8*4) { + for (word = 0; word < 8; word++) + data[word] = htonl(REG_RD(bp, offset + 4*word)); + data[8] = 0x0; + pr_cont("%s", (char *)data); + } + printk("%s" "end of fw dump\n", lvl); +} + +static void bnx2x_fw_dump(struct bnx2x *bp) +{ + bnx2x_fw_dump_lvl(bp, KERN_ERR); +} + +static void bnx2x_hc_int_disable(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; + u32 val = REG_RD(bp, addr); + + /* in E1 we must use only PCI configuration space to disable + * MSI/MSIX capability + * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block + */ + if (CHIP_IS_E1(bp)) { + /* Since IGU_PF_CONF_MSI_MSIX_EN still always on + * Use mask register to prevent from HC sending interrupts + * after we exit the function + */ + REG_WR(bp, HC_REG_INT_MASK + port*4, 0); + + val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + } else + val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + + DP(NETIF_MSG_IFDOWN, + "write %x to HC %d (addr 0x%x)\n", + val, port, addr); + + /* flush all outstanding writes */ + mmiowb(); + + REG_WR(bp, addr, val); + if (REG_RD(bp, addr) != val) + BNX2X_ERR("BUG! Proper val not read from IGU!\n"); +} + +static void bnx2x_igu_int_disable(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); + + val &= ~(IGU_PF_CONF_MSI_MSIX_EN | + IGU_PF_CONF_INT_LINE_EN | + IGU_PF_CONF_ATTN_BIT_EN); + + DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val); + + /* flush all outstanding writes */ + mmiowb(); + + REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); + if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) + BNX2X_ERR("BUG! Proper val not read from IGU!\n"); +} + +static void bnx2x_int_disable(struct bnx2x *bp) +{ + if (bp->common.int_block == INT_BLOCK_HC) + bnx2x_hc_int_disable(bp); + else + bnx2x_igu_int_disable(bp); +} + +void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) +{ + int i; + u16 j; + struct hc_sp_status_block_data sp_sb_data; + int func = BP_FUNC(bp); +#ifdef BNX2X_STOP_ON_ERROR + u16 start = 0, end = 0; + u8 cos; +#endif + if (IS_PF(bp) && disable_int) + bnx2x_int_disable(bp); + + bp->stats_state = STATS_STATE_DISABLED; + bp->eth_stats.unrecoverable_error++; + DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); + + BNX2X_ERR("begin crash dump -----------------\n"); + + /* Indices */ + /* Common */ + if (IS_PF(bp)) { + struct host_sp_status_block *def_sb = bp->def_status_blk; + int data_size, cstorm_offset; + + BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", + bp->def_idx, bp->def_att_idx, bp->attn_state, + bp->spq_prod_idx, bp->stats_counter); + BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", + def_sb->atten_status_block.attn_bits, + def_sb->atten_status_block.attn_bits_ack, + def_sb->atten_status_block.status_block_id, + def_sb->atten_status_block.attn_bits_index); + BNX2X_ERR(" def ("); + for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) + pr_cont("0x%x%s", + def_sb->sp_sb.index_values[i], + (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); + + data_size = sizeof(struct hc_sp_status_block_data) / + sizeof(u32); + cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func); + for (i = 0; i < data_size; i++) + *((u32 *)&sp_sb_data + i) = + REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset + + i * sizeof(u32)); + + pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", + sp_sb_data.igu_sb_id, + sp_sb_data.igu_seg_id, + sp_sb_data.p_func.pf_id, + sp_sb_data.p_func.vnic_id, + sp_sb_data.p_func.vf_id, + sp_sb_data.p_func.vf_valid, + sp_sb_data.state); + } + + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + int loop; + struct hc_status_block_data_e2 sb_data_e2; + struct hc_status_block_data_e1x sb_data_e1x; + struct hc_status_block_sm *hc_sm_p = + CHIP_IS_E1x(bp) ? + sb_data_e1x.common.state_machine : + sb_data_e2.common.state_machine; + struct hc_index_data *hc_index_p = + CHIP_IS_E1x(bp) ? + sb_data_e1x.index_data : + sb_data_e2.index_data; + u8 data_size, cos; + u32 *sb_data_p; + struct bnx2x_fp_txdata txdata; + + if (!bp->fp) + break; + + if (!fp->rx_cons_sb) + continue; + + /* Rx */ + BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", + i, fp->rx_bd_prod, fp->rx_bd_cons, + fp->rx_comp_prod, + fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); + BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n", + fp->rx_sge_prod, fp->last_max_sge, + le16_to_cpu(fp->fp_hc_idx)); + + /* Tx */ + for_each_cos_in_tx_queue(fp, cos) + { + if (!fp->txdata_ptr[cos]) + break; + + txdata = *fp->txdata_ptr[cos]; + + if (!txdata.tx_cons_sb) + continue; + + BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", + i, txdata.tx_pkt_prod, + txdata.tx_pkt_cons, txdata.tx_bd_prod, + txdata.tx_bd_cons, + le16_to_cpu(*txdata.tx_cons_sb)); + } + + loop = CHIP_IS_E1x(bp) ? + HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2; + + /* host sb data */ + + if (IS_FCOE_FP(fp)) + continue; + + BNX2X_ERR(" run indexes ("); + for (j = 0; j < HC_SB_MAX_SM; j++) + pr_cont("0x%x%s", + fp->sb_running_index[j], + (j == HC_SB_MAX_SM - 1) ? ")" : " "); + + BNX2X_ERR(" indexes ("); + for (j = 0; j < loop; j++) + pr_cont("0x%x%s", + fp->sb_index_values[j], + (j == loop - 1) ? ")" : " "); + + /* VF cannot access FW refelection for status block */ + if (IS_VF(bp)) + continue; + + /* fw sb data */ + data_size = CHIP_IS_E1x(bp) ? + sizeof(struct hc_status_block_data_e1x) : + sizeof(struct hc_status_block_data_e2); + data_size /= sizeof(u32); + sb_data_p = CHIP_IS_E1x(bp) ? + (u32 *)&sb_data_e1x : + (u32 *)&sb_data_e2; + /* copy sb data in here */ + for (j = 0; j < data_size; j++) + *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + + j * sizeof(u32)); + + if (!CHIP_IS_E1x(bp)) { + pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", + sb_data_e2.common.p_func.pf_id, + sb_data_e2.common.p_func.vf_id, + sb_data_e2.common.p_func.vf_valid, + sb_data_e2.common.p_func.vnic_id, + sb_data_e2.common.same_igu_sb_1b, + sb_data_e2.common.state); + } else { + pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", + sb_data_e1x.common.p_func.pf_id, + sb_data_e1x.common.p_func.vf_id, + sb_data_e1x.common.p_func.vf_valid, + sb_data_e1x.common.p_func.vnic_id, + sb_data_e1x.common.same_igu_sb_1b, + sb_data_e1x.common.state); + } + + /* SB_SMs data */ + for (j = 0; j < HC_SB_MAX_SM; j++) { + pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n", + j, hc_sm_p[j].__flags, + hc_sm_p[j].igu_sb_id, + hc_sm_p[j].igu_seg_id, + hc_sm_p[j].time_to_expire, + hc_sm_p[j].timer_value); + } + + /* Indices data */ + for (j = 0; j < loop; j++) { + pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j, + hc_index_p[j].flags, + hc_index_p[j].timeout); + } + } + +#ifdef BNX2X_STOP_ON_ERROR + if (IS_PF(bp)) { + /* event queue */ + BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); + for (i = 0; i < NUM_EQ_DESC; i++) { + u32 *data = (u32 *)&bp->eq_ring[i].message.data; + + BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n", + i, bp->eq_ring[i].message.opcode, + bp->eq_ring[i].message.error); + BNX2X_ERR("data: %x %x %x\n", + data[0], data[1], data[2]); + } + } + + /* Rings */ + /* Rx */ + for_each_valid_rx_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + if (!bp->fp) + break; + + if (!fp->rx_cons_sb) + continue; + + start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); + end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); + for (j = start; j != end; j = RX_BD(j + 1)) { + u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j]; + struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; + + BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", + i, j, rx_bd[1], rx_bd[0], sw_bd->data); + } + + start = RX_SGE(fp->rx_sge_prod); + end = RX_SGE(fp->last_max_sge); + for (j = start; j != end; j = RX_SGE(j + 1)) { + u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; + struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; + + BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n", + i, j, rx_sge[1], rx_sge[0], sw_page->page); + } + + start = RCQ_BD(fp->rx_comp_cons - 10); + end = RCQ_BD(fp->rx_comp_cons + 503); + for (j = start; j != end; j = RCQ_BD(j + 1)) { + u32 *cqe = (u32 *)&fp->rx_comp_ring[j]; + + BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n", + i, j, cqe[0], cqe[1], cqe[2], cqe[3]); + } + } + + /* Tx */ + for_each_valid_tx_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + if (!bp->fp) + break; + + for_each_cos_in_tx_queue(fp, cos) { + struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; + + if (!fp->txdata_ptr[cos]) + break; + + if (!txdata->tx_cons_sb) + continue; + + start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); + end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); + for (j = start; j != end; j = TX_BD(j + 1)) { + struct sw_tx_bd *sw_bd = + &txdata->tx_buf_ring[j]; + + BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n", + i, cos, j, sw_bd->skb, + sw_bd->first_bd); + } + + start = TX_BD(txdata->tx_bd_cons - 10); + end = TX_BD(txdata->tx_bd_cons + 254); + for (j = start; j != end; j = TX_BD(j + 1)) { + u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j]; + + BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n", + i, cos, j, tx_bd[0], tx_bd[1], + tx_bd[2], tx_bd[3]); + } + } + } +#endif + if (IS_PF(bp)) { + bnx2x_fw_dump(bp); + bnx2x_mc_assert(bp); + } + BNX2X_ERR("end crash dump -----------------\n"); +} + +/* + * FLR Support for E2 + * + * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW + * initialization. + */ +#define FLR_WAIT_USEC 10000 /* 10 milliseconds */ +#define FLR_WAIT_INTERVAL 50 /* usec */ +#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */ + +struct pbf_pN_buf_regs { + int pN; + u32 init_crd; + u32 crd; + u32 crd_freed; +}; + +struct pbf_pN_cmd_regs { + int pN; + u32 lines_occup; + u32 lines_freed; +}; + +static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp, + struct pbf_pN_buf_regs *regs, + u32 poll_count) +{ + u32 init_crd, crd, crd_start, crd_freed, crd_freed_start; + u32 cur_cnt = poll_count; + + crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); + crd = crd_start = REG_RD(bp, regs->crd); + init_crd = REG_RD(bp, regs->init_crd); + + DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); + DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd); + DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); + + while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) < + (init_crd - crd_start))) { + if (cur_cnt--) { + udelay(FLR_WAIT_INTERVAL); + crd = REG_RD(bp, regs->crd); + crd_freed = REG_RD(bp, regs->crd_freed); + } else { + DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n", + regs->pN); + DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n", + regs->pN, crd); + DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n", + regs->pN, crd_freed); + break; + } + } + DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", + poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); +} + +static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, + struct pbf_pN_cmd_regs *regs, + u32 poll_count) +{ + u32 occup, to_free, freed, freed_start; + u32 cur_cnt = poll_count; + + occup = to_free = REG_RD(bp, regs->lines_occup); + freed = freed_start = REG_RD(bp, regs->lines_freed); + + DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); + DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); + + while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) { + if (cur_cnt--) { + udelay(FLR_WAIT_INTERVAL); + occup = REG_RD(bp, regs->lines_occup); + freed = REG_RD(bp, regs->lines_freed); + } else { + DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n", + regs->pN); + DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", + regs->pN, occup); + DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", + regs->pN, freed); + break; + } + } + DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", + poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); +} + +static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, + u32 expected, u32 poll_count) +{ + u32 cur_cnt = poll_count; + u32 val; + + while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) + udelay(FLR_WAIT_INTERVAL); + + return val; +} + +int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, + char *msg, u32 poll_cnt) +{ + u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); + if (val != 0) { + BNX2X_ERR("%s usage count=%d\n", msg, val); + return 1; + } + return 0; +} + +/* Common routines with VF FLR cleanup */ +u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) +{ + /* adjust polling timeout */ + if (CHIP_REV_IS_EMUL(bp)) + return FLR_POLL_CNT * 2000; + + if (CHIP_REV_IS_FPGA(bp)) + return FLR_POLL_CNT * 120; + + return FLR_POLL_CNT; +} + +void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) +{ + struct pbf_pN_cmd_regs cmd_regs[] = { + {0, (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_OCCUPANCY_Q0 : + PBF_REG_P0_TQ_OCCUPANCY, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_LINES_FREED_CNT_Q0 : + PBF_REG_P0_TQ_LINES_FREED_CNT}, + {1, (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_OCCUPANCY_Q1 : + PBF_REG_P1_TQ_OCCUPANCY, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_LINES_FREED_CNT_Q1 : + PBF_REG_P1_TQ_LINES_FREED_CNT}, + {4, (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_OCCUPANCY_LB_Q : + PBF_REG_P4_TQ_OCCUPANCY, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_LINES_FREED_CNT_LB_Q : + PBF_REG_P4_TQ_LINES_FREED_CNT} + }; + + struct pbf_pN_buf_regs buf_regs[] = { + {0, (CHIP_IS_E3B0(bp)) ? + PBF_REG_INIT_CRD_Q0 : + PBF_REG_P0_INIT_CRD , + (CHIP_IS_E3B0(bp)) ? + PBF_REG_CREDIT_Q0 : + PBF_REG_P0_CREDIT, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : + PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, + {1, (CHIP_IS_E3B0(bp)) ? + PBF_REG_INIT_CRD_Q1 : + PBF_REG_P1_INIT_CRD, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_CREDIT_Q1 : + PBF_REG_P1_CREDIT, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : + PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, + {4, (CHIP_IS_E3B0(bp)) ? + PBF_REG_INIT_CRD_LB_Q : + PBF_REG_P4_INIT_CRD, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_CREDIT_LB_Q : + PBF_REG_P4_CREDIT, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : + PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, + }; + + int i; + + /* Verify the command queues are flushed P0, P1, P4 */ + for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) + bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); + + /* Verify the transmission buffers are flushed P0, P1, P4 */ + for (i = 0; i < ARRAY_SIZE(buf_regs); i++) + bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); +} + +#define OP_GEN_PARAM(param) \ + (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) + +#define OP_GEN_TYPE(type) \ + (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) + +#define OP_GEN_AGG_VECT(index) \ + (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) + +int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) +{ + u32 op_gen_command = 0; + u32 comp_addr = BAR_CSTRORM_INTMEM + + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); + int ret = 0; + + if (REG_RD(bp, comp_addr)) { + BNX2X_ERR("Cleanup complete was not 0 before sending\n"); + return 1; + } + + op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); + op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); + op_gen_command |= OP_GEN_AGG_VECT(clnup_func); + op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; + + DP(BNX2X_MSG_SP, "sending FW Final cleanup\n"); + REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command); + + if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { + BNX2X_ERR("FW final cleanup did not succeed\n"); + DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n", + (REG_RD(bp, comp_addr))); + bnx2x_panic(); + return 1; + } + /* Zero completion for next FLR */ + REG_WR(bp, comp_addr, 0); + + return ret; +} + +u8 bnx2x_is_pcie_pending(struct pci_dev *dev) +{ + u16 status; + + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); + return status & PCI_EXP_DEVSTA_TRPND; +} + +/* PF FLR specific routines +*/ +static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) +{ + /* wait for CFC PF usage-counter to zero (includes all the VFs) */ + if (bnx2x_flr_clnup_poll_hw_counter(bp, + CFC_REG_NUM_LCIDS_INSIDE_PF, + "CFC PF usage counter timed out", + poll_cnt)) + return 1; + + /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ + if (bnx2x_flr_clnup_poll_hw_counter(bp, + DORQ_REG_PF_USAGE_CNT, + "DQ PF usage counter timed out", + poll_cnt)) + return 1; + + /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ + if (bnx2x_flr_clnup_poll_hw_counter(bp, + QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp), + "QM PF usage counter timed out", + poll_cnt)) + return 1; + + /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ + if (bnx2x_flr_clnup_poll_hw_counter(bp, + TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp), + "Timers VNIC usage counter timed out", + poll_cnt)) + return 1; + if (bnx2x_flr_clnup_poll_hw_counter(bp, + TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp), + "Timers NUM_SCANS usage counter timed out", + poll_cnt)) + return 1; + + /* Wait DMAE PF usage counter to zero */ + if (bnx2x_flr_clnup_poll_hw_counter(bp, + dmae_reg_go_c[INIT_DMAE_C(bp)], + "DMAE command register timed out", + poll_cnt)) + return 1; + + return 0; +} + +static void bnx2x_hw_enable_status(struct bnx2x *bp) +{ + u32 val; + + val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF); + DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); + + val = REG_RD(bp, PBF_REG_DISABLE_PF); + DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val); + + val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN); + DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); + + val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN); + DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); + + val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK); + DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); + + val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); + DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); + + val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); + DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); + + val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); + DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", + val); +} + +static int bnx2x_pf_flr_clnup(struct bnx2x *bp) +{ + u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); + + DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); + + /* Re-enable PF target read access */ + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); + + /* Poll HW usage counters */ + DP(BNX2X_MSG_SP, "Polling usage counters\n"); + if (bnx2x_poll_hw_usage_counters(bp, poll_cnt)) + return -EBUSY; + + /* Zero the igu 'trailing edge' and 'leading edge' */ + + /* Send the FW cleanup command */ + if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt)) + return -EBUSY; + + /* ATC cleanup */ + + /* Verify TX hw is flushed */ + bnx2x_tx_hw_flushed(bp, poll_cnt); + + /* Wait 100ms (not adjusted according to platform) */ + msleep(100); + + /* Verify no pending pci transactions */ + if (bnx2x_is_pcie_pending(bp->pdev)) + BNX2X_ERR("PCIE Transactions still pending\n"); + + /* Debug */ + bnx2x_hw_enable_status(bp); + + /* + * Master enable - Due to WB DMAE writes performed before this + * register is re-initialized as part of the regular function init + */ + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); + + return 0; +} + +static void bnx2x_hc_int_enable(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; + u32 val = REG_RD(bp, addr); + bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; + bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; + bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; + + if (msix) { + val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0); + val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + if (single_msix) + val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; + } else if (msi) { + val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; + val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + } else { + val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); + + if (!CHIP_IS_E1(bp)) { + DP(NETIF_MSG_IFUP, + "write %x to HC %d (addr 0x%x)\n", val, port, addr); + + REG_WR(bp, addr, val); + + val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; + } + } + + if (CHIP_IS_E1(bp)) + REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF); + + DP(NETIF_MSG_IFUP, + "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr, + (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); + + REG_WR(bp, addr, val); + /* + * Ensure that HC_CONFIG is written before leading/trailing edge config + */ + mmiowb(); + barrier(); + + if (!CHIP_IS_E1(bp)) { + /* init leading/trailing edge */ + if (IS_MF(bp)) { + val = (0xee0f | (1 << (BP_VN(bp) + 4))); + if (bp->port.pmf) + /* enable nig and gpio3 attention */ + val |= 0x1100; + } else + val = 0xffff; + + REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); + REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); + } + + /* Make sure that interrupts are indeed enabled from here on */ + mmiowb(); +} + +static void bnx2x_igu_int_enable(struct bnx2x *bp) +{ + u32 val; + bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; + bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; + bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; + + val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); + + if (msix) { + val &= ~(IGU_PF_CONF_INT_LINE_EN | + IGU_PF_CONF_SINGLE_ISR_EN); + val |= (IGU_PF_CONF_MSI_MSIX_EN | + IGU_PF_CONF_ATTN_BIT_EN); + + if (single_msix) + val |= IGU_PF_CONF_SINGLE_ISR_EN; + } else if (msi) { + val &= ~IGU_PF_CONF_INT_LINE_EN; + val |= (IGU_PF_CONF_MSI_MSIX_EN | + IGU_PF_CONF_ATTN_BIT_EN | + IGU_PF_CONF_SINGLE_ISR_EN); + } else { + val &= ~IGU_PF_CONF_MSI_MSIX_EN; + val |= (IGU_PF_CONF_INT_LINE_EN | + IGU_PF_CONF_ATTN_BIT_EN | + IGU_PF_CONF_SINGLE_ISR_EN); + } + + /* Clean previous status - need to configure igu prior to ack*/ + if ((!msix) || single_msix) { + REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); + bnx2x_ack_int(bp); + } + + val |= IGU_PF_CONF_FUNC_EN; + + DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n", + val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); + + REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); + + if (val & IGU_PF_CONF_INT_LINE_EN) + pci_intx(bp->pdev, true); + + barrier(); + + /* init leading/trailing edge */ + if (IS_MF(bp)) { + val = (0xee0f | (1 << (BP_VN(bp) + 4))); + if (bp->port.pmf) + /* enable nig and gpio3 attention */ + val |= 0x1100; + } else + val = 0xffff; + + REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); + REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); + + /* Make sure that interrupts are indeed enabled from here on */ + mmiowb(); +} + +void bnx2x_int_enable(struct bnx2x *bp) +{ + if (bp->common.int_block == INT_BLOCK_HC) + bnx2x_hc_int_enable(bp); + else + bnx2x_igu_int_enable(bp); +} + +void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) +{ + int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; + int i, offset; + + if (disable_hw) + /* prevent the HW from sending interrupts */ + bnx2x_int_disable(bp); + + /* make sure all ISRs are done */ + if (msix) { + synchronize_irq(bp->msix_table[0].vector); + offset = 1; + if (CNIC_SUPPORT(bp)) + offset++; + for_each_eth_queue(bp, i) + synchronize_irq(bp->msix_table[offset++].vector); + } else + synchronize_irq(bp->pdev->irq); + + /* make sure sp_task is not running */ + cancel_delayed_work(&bp->sp_task); + cancel_delayed_work(&bp->period_task); + flush_workqueue(bnx2x_wq); +} + +/* fast path */ + +/* + * General service functions + */ + +/* Return true if succeeded to acquire the lock */ +static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) +{ + u32 lock_status; + u32 resource_bit = (1 << resource); + int func = BP_FUNC(bp); + u32 hw_lock_control_reg; + + DP(NETIF_MSG_HW | NETIF_MSG_IFUP, + "Trying to take a lock on resource %d\n", resource); + + /* Validating that the resource is within range */ + if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { + DP(NETIF_MSG_HW | NETIF_MSG_IFUP, + "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", + resource, HW_LOCK_MAX_RESOURCE_VALUE); + return false; + } + + if (func <= 5) + hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); + else + hw_lock_control_reg = + (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); + + /* Try to acquire the lock */ + REG_WR(bp, hw_lock_control_reg + 4, resource_bit); + lock_status = REG_RD(bp, hw_lock_control_reg); + if (lock_status & resource_bit) + return true; + + DP(NETIF_MSG_HW | NETIF_MSG_IFUP, + "Failed to get a lock on resource %d\n", resource); + return false; +} + +/** + * bnx2x_get_leader_lock_resource - get the recovery leader resource id + * + * @bp: driver handle + * + * Returns the recovery leader resource id according to the engine this function + * belongs to. Currently only only 2 engines is supported. + */ +static int bnx2x_get_leader_lock_resource(struct bnx2x *bp) +{ + if (BP_PATH(bp)) + return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; + else + return HW_LOCK_RESOURCE_RECOVERY_LEADER_0; +} + +/** + * bnx2x_trylock_leader_lock- try to acquire a leader lock. + * + * @bp: driver handle + * + * Tries to acquire a leader lock for current engine. + */ +static bool bnx2x_trylock_leader_lock(struct bnx2x *bp) +{ + return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); +} + +static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); + +/* schedule the sp task and mark that interrupt occurred (runs from ISR) */ +static int bnx2x_schedule_sp_task(struct bnx2x *bp) +{ + /* Set the interrupt occurred bit for the sp-task to recognize it + * must ack the interrupt and transition according to the IGU + * state machine. + */ + atomic_set(&bp->interrupt_occurred, 1); + + /* The sp_task must execute only after this bit + * is set, otherwise we will get out of sync and miss all + * further interrupts. Hence, the barrier. + */ + smp_wmb(); + + /* schedule sp_task to workqueue */ + return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); +} + +void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) +{ + struct bnx2x *bp = fp->bp; + int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); + int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); + enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; + struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + + DP(BNX2X_MSG_SP, + "fp %d cid %d got ramrod #%d state is %x type is %d\n", + fp->index, cid, command, bp->state, + rr_cqe->ramrod_cqe.ramrod_type); + + /* If cid is within VF range, replace the slowpath object with the + * one corresponding to this VF + */ + if (cid >= BNX2X_FIRST_VF_CID && + cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS) + bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj); + + switch (command) { + case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): + DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); + drv_cmd = BNX2X_Q_CMD_UPDATE; + break; + + case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): + DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid); + drv_cmd = BNX2X_Q_CMD_SETUP; + break; + + case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): + DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); + drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; + break; + + case (RAMROD_CMD_ID_ETH_HALT): + DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid); + drv_cmd = BNX2X_Q_CMD_HALT; + break; + + case (RAMROD_CMD_ID_ETH_TERMINATE): + DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid); + drv_cmd = BNX2X_Q_CMD_TERMINATE; + break; + + case (RAMROD_CMD_ID_ETH_EMPTY): + DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid); + drv_cmd = BNX2X_Q_CMD_EMPTY; + break; + + case (RAMROD_CMD_ID_ETH_TPA_UPDATE): + DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid); + drv_cmd = BNX2X_Q_CMD_UPDATE_TPA; + break; + + default: + BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", + command, fp->index); + return; + } + + if ((drv_cmd != BNX2X_Q_CMD_MAX) && + q_obj->complete_cmd(bp, q_obj, drv_cmd)) + /* q_obj->complete_cmd() failure means that this was + * an unexpected completion. + * + * In this case we don't want to increase the bp->spq_left + * because apparently we haven't sent this command the first + * place. + */ +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#else + return; +#endif + + smp_mb__before_atomic(); + atomic_inc(&bp->cq_spq_left); + /* push the change in bp->spq_left and towards the memory */ + smp_mb__after_atomic(); + + DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); + + if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) && + (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) { + /* if Q update ramrod is completed for last Q in AFEX vif set + * flow, then ACK MCP at the end + * + * mark pending ACK to MCP bit. + * prevent case that both bits are cleared. + * At the end of load/unload driver checks that + * sp_state is cleared, and this order prevents + * races + */ + smp_mb__before_atomic(); + set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); + wmb(); + clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); + smp_mb__after_atomic(); + + /* schedule the sp task as mcp ack is required */ + bnx2x_schedule_sp_task(bp); + } + + return; +} + +irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) +{ + struct bnx2x *bp = netdev_priv(dev_instance); + u16 status = bnx2x_ack_int(bp); + u16 mask; + int i; + u8 cos; + + /* Return here if interrupt is shared and it's not for us */ + if (unlikely(status == 0)) { + DP(NETIF_MSG_INTR, "not our interrupt!\n"); + return IRQ_NONE; + } + DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return IRQ_HANDLED; +#endif + + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + mask = 0x2 << (fp->index + CNIC_SUPPORT(bp)); + if (status & mask) { + /* Handle Rx or Tx according to SB id */ + for_each_cos_in_tx_queue(fp, cos) + prefetch(fp->txdata_ptr[cos]->tx_cons_sb); + prefetch(&fp->sb_running_index[SM_RX_ID]); + napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi)); + status &= ~mask; + } + } + + if (CNIC_SUPPORT(bp)) { + mask = 0x2; + if (status & (mask | 0x1)) { + struct cnic_ops *c_ops = NULL; + + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops && (bp->cnic_eth_dev.drv_state & + CNIC_DRV_STATE_HANDLES_IRQ)) + c_ops->cnic_handler(bp->cnic_data, NULL); + rcu_read_unlock(); + + status &= ~mask; + } + } + + if (unlikely(status & 0x1)) { + + /* schedule sp task to perform default status block work, ack + * attentions and enable interrupts. + */ + bnx2x_schedule_sp_task(bp); + + status &= ~0x1; + if (!status) + return IRQ_HANDLED; + } + + if (unlikely(status)) + DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", + status); + + return IRQ_HANDLED; +} + +/* Link */ + +/* + * General service functions + */ + +int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) +{ + u32 lock_status; + u32 resource_bit = (1 << resource); + int func = BP_FUNC(bp); + u32 hw_lock_control_reg; + int cnt; + + /* Validating that the resource is within range */ + if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { + BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", + resource, HW_LOCK_MAX_RESOURCE_VALUE); + return -EINVAL; + } + + if (func <= 5) { + hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); + } else { + hw_lock_control_reg = + (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); + } + + /* Validating that the resource is not already taken */ + lock_status = REG_RD(bp, hw_lock_control_reg); + if (lock_status & resource_bit) { + BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n", + lock_status, resource_bit); + return -EEXIST; + } + + /* Try for 5 second every 5ms */ + for (cnt = 0; cnt < 1000; cnt++) { + /* Try to acquire the lock */ + REG_WR(bp, hw_lock_control_reg + 4, resource_bit); + lock_status = REG_RD(bp, hw_lock_control_reg); + if (lock_status & resource_bit) + return 0; + + usleep_range(5000, 10000); + } + BNX2X_ERR("Timeout\n"); + return -EAGAIN; +} + +int bnx2x_release_leader_lock(struct bnx2x *bp) +{ + return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); +} + +int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) +{ + u32 lock_status; + u32 resource_bit = (1 << resource); + int func = BP_FUNC(bp); + u32 hw_lock_control_reg; + + /* Validating that the resource is within range */ + if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { + BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", + resource, HW_LOCK_MAX_RESOURCE_VALUE); + return -EINVAL; + } + + if (func <= 5) { + hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); + } else { + hw_lock_control_reg = + (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); + } + + /* Validating that the resource is currently taken */ + lock_status = REG_RD(bp, hw_lock_control_reg); + if (!(lock_status & resource_bit)) { + BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n", + lock_status, resource_bit); + return -EFAULT; + } + + REG_WR(bp, hw_lock_control_reg, resource_bit); + return 0; +} + +int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) +{ + /* The GPIO should be swapped if swap register is set and active */ + int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && + REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; + int gpio_shift = gpio_num + + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); + u32 gpio_mask = (1 << gpio_shift); + u32 gpio_reg; + int value; + + if (gpio_num > MISC_REGISTERS_GPIO_3) { + BNX2X_ERR("Invalid GPIO %d\n", gpio_num); + return -EINVAL; + } + + /* read GPIO value */ + gpio_reg = REG_RD(bp, MISC_REG_GPIO); + + /* get the requested pin value */ + if ((gpio_reg & gpio_mask) == gpio_mask) + value = 1; + else + value = 0; + + return value; +} + +int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) +{ + /* The GPIO should be swapped if swap register is set and active */ + int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && + REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; + int gpio_shift = gpio_num + + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); + u32 gpio_mask = (1 << gpio_shift); + u32 gpio_reg; + + if (gpio_num > MISC_REGISTERS_GPIO_3) { + BNX2X_ERR("Invalid GPIO %d\n", gpio_num); + return -EINVAL; + } + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + /* read GPIO and mask except the float bits */ + gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); + + switch (mode) { + case MISC_REGISTERS_GPIO_OUTPUT_LOW: + DP(NETIF_MSG_LINK, + "Set GPIO %d (shift %d) -> output low\n", + gpio_num, gpio_shift); + /* clear FLOAT and set CLR */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); + break; + + case MISC_REGISTERS_GPIO_OUTPUT_HIGH: + DP(NETIF_MSG_LINK, + "Set GPIO %d (shift %d) -> output high\n", + gpio_num, gpio_shift); + /* clear FLOAT and set SET */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); + break; + + case MISC_REGISTERS_GPIO_INPUT_HI_Z: + DP(NETIF_MSG_LINK, + "Set GPIO %d (shift %d) -> input\n", + gpio_num, gpio_shift); + /* set FLOAT */ + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); + break; + + default: + break; + } + + REG_WR(bp, MISC_REG_GPIO, gpio_reg); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + + return 0; +} + +int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode) +{ + u32 gpio_reg = 0; + int rc = 0; + + /* Any port swapping should be handled by caller. */ + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + /* read GPIO and mask except the float bits */ + gpio_reg = REG_RD(bp, MISC_REG_GPIO); + gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); + gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); + gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); + + switch (mode) { + case MISC_REGISTERS_GPIO_OUTPUT_LOW: + DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins); + /* set CLR */ + gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); + break; + + case MISC_REGISTERS_GPIO_OUTPUT_HIGH: + DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins); + /* set SET */ + gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); + break; + + case MISC_REGISTERS_GPIO_INPUT_HI_Z: + DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins); + /* set FLOAT */ + gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); + break; + + default: + BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode); + rc = -EINVAL; + break; + } + + if (rc == 0) + REG_WR(bp, MISC_REG_GPIO, gpio_reg); + + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + + return rc; +} + +int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) +{ + /* The GPIO should be swapped if swap register is set and active */ + int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && + REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; + int gpio_shift = gpio_num + + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); + u32 gpio_mask = (1 << gpio_shift); + u32 gpio_reg; + + if (gpio_num > MISC_REGISTERS_GPIO_3) { + BNX2X_ERR("Invalid GPIO %d\n", gpio_num); + return -EINVAL; + } + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + /* read GPIO int */ + gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT); + + switch (mode) { + case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: + DP(NETIF_MSG_LINK, + "Clear GPIO INT %d (shift %d) -> output low\n", + gpio_num, gpio_shift); + /* clear SET and set CLR */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); + break; + + case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: + DP(NETIF_MSG_LINK, + "Set GPIO INT %d (shift %d) -> output high\n", + gpio_num, gpio_shift); + /* clear CLR and set SET */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); + break; + + default: + break; + } + + REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + + return 0; +} + +static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode) +{ + u32 spio_reg; + + /* Only 2 SPIOs are configurable */ + if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { + BNX2X_ERR("Invalid SPIO 0x%x\n", spio); + return -EINVAL; + } + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); + /* read SPIO and mask except the float bits */ + spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT); + + switch (mode) { + case MISC_SPIO_OUTPUT_LOW: + DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio); + /* clear FLOAT and set CLR */ + spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); + spio_reg |= (spio << MISC_SPIO_CLR_POS); + break; + + case MISC_SPIO_OUTPUT_HIGH: + DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio); + /* clear FLOAT and set SET */ + spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); + spio_reg |= (spio << MISC_SPIO_SET_POS); + break; + + case MISC_SPIO_INPUT_HI_Z: + DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio); + /* set FLOAT */ + spio_reg |= (spio << MISC_SPIO_FLOAT_POS); + break; + + default: + break; + } + + REG_WR(bp, MISC_REG_SPIO, spio_reg); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); + + return 0; +} + +void bnx2x_calc_fc_adv(struct bnx2x *bp) +{ + u8 cfg_idx = bnx2x_get_link_cfg_idx(bp); + switch (bp->link_vars.ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { + case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: + bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | + ADVERTISED_Pause); + break; + + case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: + bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | + ADVERTISED_Pause); + break; + + case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: + bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; + break; + + default: + bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | + ADVERTISED_Pause); + break; + } +} + +static void bnx2x_set_requested_fc(struct bnx2x *bp) +{ + /* Initialize link parameters structure variables + * It is recommended to turn off RX FC for jumbo frames + * for better performance + */ + if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) + bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; + else + bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; +} + +static void bnx2x_init_dropless_fc(struct bnx2x *bp) +{ + u32 pause_enabled = 0; + + if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) { + if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) + pause_enabled = 1; + + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)), + pause_enabled); + } + + DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n", + pause_enabled ? "enabled" : "disabled"); +} + +int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) +{ + int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); + u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; + + if (!BP_NOMCP(bp)) { + bnx2x_set_requested_fc(bp); + bnx2x_acquire_phy_lock(bp); + + if (load_mode == LOAD_DIAG) { + struct link_params *lp = &bp->link_params; + lp->loopback_mode = LOOPBACK_XGXS; + /* do PHY loopback at 10G speed, if possible */ + if (lp->req_line_speed[cfx_idx] < SPEED_10000) { + if (lp->speed_cap_mask[cfx_idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) + lp->req_line_speed[cfx_idx] = + SPEED_10000; + else + lp->req_line_speed[cfx_idx] = + SPEED_1000; + } + } + + if (load_mode == LOAD_LOOPBACK_EXT) { + struct link_params *lp = &bp->link_params; + lp->loopback_mode = LOOPBACK_EXT; + } + + rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); + + bnx2x_release_phy_lock(bp); + + bnx2x_init_dropless_fc(bp); + + bnx2x_calc_fc_adv(bp); + + if (bp->link_vars.link_up) { + bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); + bnx2x_link_report(bp); + } + queue_delayed_work(bnx2x_wq, &bp->period_task, 0); + bp->link_params.req_line_speed[cfx_idx] = req_line_speed; + return rc; + } + BNX2X_ERR("Bootcode is missing - can not initialize link\n"); + return -EINVAL; +} + +void bnx2x_link_set(struct bnx2x *bp) +{ + if (!BP_NOMCP(bp)) { + bnx2x_acquire_phy_lock(bp); + bnx2x_phy_init(&bp->link_params, &bp->link_vars); + bnx2x_release_phy_lock(bp); + + bnx2x_init_dropless_fc(bp); + + bnx2x_calc_fc_adv(bp); + } else + BNX2X_ERR("Bootcode is missing - can not set link\n"); +} + +static void bnx2x__link_reset(struct bnx2x *bp) +{ + if (!BP_NOMCP(bp)) { + bnx2x_acquire_phy_lock(bp); + bnx2x_lfa_reset(&bp->link_params, &bp->link_vars); + bnx2x_release_phy_lock(bp); + } else + BNX2X_ERR("Bootcode is missing - can not reset link\n"); +} + +void bnx2x_force_link_reset(struct bnx2x *bp) +{ + bnx2x_acquire_phy_lock(bp); + bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); + bnx2x_release_phy_lock(bp); +} + +u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) +{ + u8 rc = 0; + + if (!BP_NOMCP(bp)) { + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_test_link(&bp->link_params, &bp->link_vars, + is_serdes); + bnx2x_release_phy_lock(bp); + } else + BNX2X_ERR("Bootcode is missing - can not test link\n"); + + return rc; +} + +/* Calculates the sum of vn_min_rates. + It's needed for further normalizing of the min_rates. + Returns: + sum of vn_min_rates. + or + 0 - if all the min_rates are 0. + In the later case fairness algorithm should be deactivated. + If not all min_rates are zero then those that are zeroes will be set to 1. + */ +static void bnx2x_calc_vn_min(struct bnx2x *bp, + struct cmng_init_input *input) +{ + int all_zero = 1; + int vn; + + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { + u32 vn_cfg = bp->mf_config[vn]; + u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> + FUNC_MF_CFG_MIN_BW_SHIFT) * 100; + + /* Skip hidden vns */ + if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) + vn_min_rate = 0; + /* If min rate is zero - set it to 1 */ + else if (!vn_min_rate) + vn_min_rate = DEF_MIN_RATE; + else + all_zero = 0; + + input->vnic_min_rate[vn] = vn_min_rate; + } + + /* if ETS or all min rates are zeros - disable fairness */ + if (BNX2X_IS_ETS_ENABLED(bp)) { + input->flags.cmng_enables &= + ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; + DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); + } else if (all_zero) { + input->flags.cmng_enables &= + ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; + DP(NETIF_MSG_IFUP, + "All MIN values are zeroes fairness will be disabled\n"); + } else + input->flags.cmng_enables |= + CMNG_FLAGS_PER_PORT_FAIRNESS_VN; +} + +static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn, + struct cmng_init_input *input) +{ + u16 vn_max_rate; + u32 vn_cfg = bp->mf_config[vn]; + + if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) + vn_max_rate = 0; + else { + u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); + + if (IS_MF_SI(bp)) { + /* maxCfg in percents of linkspeed */ + vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; + } else /* SD modes */ + /* maxCfg is absolute in 100Mb units */ + vn_max_rate = maxCfg * 100; + } + + DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); + + input->vnic_max_rate[vn] = vn_max_rate; +} + +static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) +{ + if (CHIP_REV_IS_SLOW(bp)) + return CMNG_FNS_NONE; + if (IS_MF(bp)) + return CMNG_FNS_MINMAX; + + return CMNG_FNS_NONE; +} + +void bnx2x_read_mf_cfg(struct bnx2x *bp) +{ + int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); + + if (BP_NOMCP(bp)) + return; /* what should be the default value in this case */ + + /* For 2 port configuration the absolute function number formula + * is: + * abs_func = 2 * vn + BP_PORT + BP_PATH + * + * and there are 4 functions per port + * + * For 4 port configuration it is + * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH + * + * and there are 2 functions per port + */ + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { + int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); + + if (func >= E1H_FUNC_MAX) + break; + + bp->mf_config[vn] = + MF_CFG_RD(bp, func_mf_config[func].config); + } + if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { + DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); + bp->flags |= MF_FUNC_DIS; + } else { + DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); + bp->flags &= ~MF_FUNC_DIS; + } +} + +static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) +{ + struct cmng_init_input input; + memset(&input, 0, sizeof(struct cmng_init_input)); + + input.port_rate = bp->link_vars.line_speed; + + if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) { + int vn; + + /* read mf conf from shmem */ + if (read_cfg) + bnx2x_read_mf_cfg(bp); + + /* vn_weight_sum and enable fairness if not 0 */ + bnx2x_calc_vn_min(bp, &input); + + /* calculate and set min-max rate for each vn */ + if (bp->port.pmf) + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) + bnx2x_calc_vn_max(bp, vn, &input); + + /* always enable rate shaping and fairness */ + input.flags.cmng_enables |= + CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; + + bnx2x_init_cmng(&input, &bp->cmng); + return; + } + + /* rate shaping and fairness are disabled */ + DP(NETIF_MSG_IFUP, + "rate shaping and fairness are disabled\n"); +} + +static void storm_memset_cmng(struct bnx2x *bp, + struct cmng_init *cmng, + u8 port) +{ + int vn; + size_t size = sizeof(struct cmng_struct_per_port); + + u32 addr = BAR_XSTRORM_INTMEM + + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); + + __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port); + + for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { + int func = func_by_vn(bp, vn); + + addr = BAR_XSTRORM_INTMEM + + XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func); + size = sizeof(struct rate_shaping_vars_per_vn); + __storm_memset_struct(bp, addr, size, + (u32 *)&cmng->vnic.vnic_max_rate[vn]); + + addr = BAR_XSTRORM_INTMEM + + XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func); + size = sizeof(struct fairness_vars_per_vn); + __storm_memset_struct(bp, addr, size, + (u32 *)&cmng->vnic.vnic_min_rate[vn]); + } +} + +/* init cmng mode in HW according to local configuration */ +void bnx2x_set_local_cmng(struct bnx2x *bp) +{ + int cmng_fns = bnx2x_get_cmng_fns_mode(bp); + + if (cmng_fns != CMNG_FNS_NONE) { + bnx2x_cmng_fns_init(bp, false, cmng_fns); + storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); + } else { + /* rate shaping and fairness are disabled */ + DP(NETIF_MSG_IFUP, + "single function mode without fairness\n"); + } +} + +/* This function is called upon link interrupt */ +static void bnx2x_link_attn(struct bnx2x *bp) +{ + /* Make sure that we are synced with the current statistics */ + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + + bnx2x_link_update(&bp->link_params, &bp->link_vars); + + bnx2x_init_dropless_fc(bp); + + if (bp->link_vars.link_up) { + + if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { + struct host_port_stats *pstats; + + pstats = bnx2x_sp(bp, port_stats); + /* reset old mac stats */ + memset(&(pstats->mac_stx[0]), 0, + sizeof(struct mac_stx)); + } + if (bp->state == BNX2X_STATE_OPEN) + bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); + } + + if (bp->link_vars.link_up && bp->link_vars.line_speed) + bnx2x_set_local_cmng(bp); + + __bnx2x_link_report(bp); + + if (IS_MF(bp)) + bnx2x_link_sync_notify(bp); +} + +void bnx2x__link_status_update(struct bnx2x *bp) +{ + if (bp->state != BNX2X_STATE_OPEN) + return; + + /* read updated dcb configuration */ + if (IS_PF(bp)) { + bnx2x_dcbx_pmf_update(bp); + bnx2x_link_status_update(&bp->link_params, &bp->link_vars); + if (bp->link_vars.link_up) + bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); + else + bnx2x_stats_handle(bp, STATS_EVENT_STOP); + /* indicate link status */ + bnx2x_link_report(bp); + + } else { /* VF */ + bp->port.supported[0] |= (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_2500baseX_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_TP | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + bp->port.advertising[0] = bp->port.supported[0]; + + bp->link_params.bp = bp; + bp->link_params.port = BP_PORT(bp); + bp->link_params.req_duplex[0] = DUPLEX_FULL; + bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE; + bp->link_params.req_line_speed[0] = SPEED_10000; + bp->link_params.speed_cap_mask[0] = 0x7f0000; + bp->link_params.switch_cfg = SWITCH_CFG_10G; + bp->link_vars.mac_type = MAC_TYPE_BMAC; + bp->link_vars.line_speed = SPEED_10000; + bp->link_vars.link_status = + (LINK_STATUS_LINK_UP | + LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); + bp->link_vars.link_up = 1; + bp->link_vars.duplex = DUPLEX_FULL; + bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE; + __bnx2x_link_report(bp); + + bnx2x_sample_bulletin(bp); + + /* if bulletin board did not have an update for link status + * __bnx2x_link_report will report current status + * but it will NOT duplicate report in case of already reported + * during sampling bulletin board. + */ + bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); + } +} + +static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid, + u16 vlan_val, u8 allowed_prio) +{ + struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_afex_update_params *f_update_params = + &func_params.params.afex_update; + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE; + + /* no need to wait for RAMROD completion, so don't + * set RAMROD_COMP_WAIT flag + */ + + f_update_params->vif_id = vifid; + f_update_params->afex_default_vlan = vlan_val; + f_update_params->allowed_priorities = allowed_prio; + + /* if ramrod can not be sent, response to MCP immediately */ + if (bnx2x_func_state_change(bp, &func_params) < 0) + bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); + + return 0; +} + +static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type, + u16 vif_index, u8 func_bit_map) +{ + struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_afex_viflists_params *update_params = + &func_params.params.afex_viflists; + int rc; + u32 drv_msg_code; + + /* validate only LIST_SET and LIST_GET are received from switch */ + if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET)) + BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n", + cmd_type); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS; + + /* set parameters according to cmd_type */ + update_params->afex_vif_list_command = cmd_type; + update_params->vif_list_index = vif_index; + update_params->func_bit_map = + (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map; + update_params->func_to_clear = 0; + drv_msg_code = + (cmd_type == VIF_LIST_RULE_GET) ? + DRV_MSG_CODE_AFEX_LISTGET_ACK : + DRV_MSG_CODE_AFEX_LISTSET_ACK; + + /* if ramrod can not be sent, respond to MCP immediately for + * SET and GET requests (other are not triggered from MCP) + */ + rc = bnx2x_func_state_change(bp, &func_params); + if (rc < 0) + bnx2x_fw_command(bp, drv_msg_code, 0); + + return 0; +} + +static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd) +{ + struct afex_stats afex_stats; + u32 func = BP_ABS_FUNC(bp); + u32 mf_config; + u16 vlan_val; + u32 vlan_prio; + u16 vif_id; + u8 allowed_prio; + u8 vlan_mode; + u32 addr_to_write, vifid, addrs, stats_type, i; + + if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) { + vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); + DP(BNX2X_MSG_MCP, + "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid); + bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0); + } + + if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) { + vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); + addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]); + DP(BNX2X_MSG_MCP, + "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n", + vifid, addrs); + bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid, + addrs); + } + + if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) { + addr_to_write = SHMEM2_RD(bp, + afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]); + stats_type = SHMEM2_RD(bp, + afex_param1_to_driver[BP_FW_MB_IDX(bp)]); + + DP(BNX2X_MSG_MCP, + "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n", + addr_to_write); + + bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type); + + /* write response to scratchpad, for MCP */ + for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++) + REG_WR(bp, addr_to_write + i*sizeof(u32), + *(((u32 *)(&afex_stats))+i)); + + /* send ack message to MCP */ + bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0); + } + + if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) { + mf_config = MF_CFG_RD(bp, func_mf_config[func].config); + bp->mf_config[BP_VN(bp)] = mf_config; + DP(BNX2X_MSG_MCP, + "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n", + mf_config); + + /* if VIF_SET is "enabled" */ + if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) { + /* set rate limit directly to internal RAM */ + struct cmng_init_input cmng_input; + struct rate_shaping_vars_per_vn m_rs_vn; + size_t size = sizeof(struct rate_shaping_vars_per_vn); + u32 addr = BAR_XSTRORM_INTMEM + + XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp)); + + bp->mf_config[BP_VN(bp)] = mf_config; + + bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input); + m_rs_vn.vn_counter.rate = + cmng_input.vnic_max_rate[BP_VN(bp)]; + m_rs_vn.vn_counter.quota = + (m_rs_vn.vn_counter.rate * + RS_PERIODIC_TIMEOUT_USEC) / 8; + + __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn); + + /* read relevant values from mf_cfg struct in shmem */ + vif_id = + (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & + FUNC_MF_CFG_E1HOV_TAG_MASK) >> + FUNC_MF_CFG_E1HOV_TAG_SHIFT; + vlan_val = + (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & + FUNC_MF_CFG_AFEX_VLAN_MASK) >> + FUNC_MF_CFG_AFEX_VLAN_SHIFT; + vlan_prio = (mf_config & + FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> + FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT; + vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT); + vlan_mode = + (MF_CFG_RD(bp, + func_mf_config[func].afex_config) & + FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> + FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT; + allowed_prio = + (MF_CFG_RD(bp, + func_mf_config[func].afex_config) & + FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> + FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT; + + /* send ramrod to FW, return in case of failure */ + if (bnx2x_afex_func_update(bp, vif_id, vlan_val, + allowed_prio)) + return; + + bp->afex_def_vlan_tag = vlan_val; + bp->afex_vlan_mode = vlan_mode; + } else { + /* notify link down because BP->flags is disabled */ + bnx2x_link_report(bp); + + /* send INVALID VIF ramrod to FW */ + bnx2x_afex_func_update(bp, 0xFFFF, 0, 0); + + /* Reset the default afex VLAN */ + bp->afex_def_vlan_tag = -1; + } + } +} + +static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp) +{ + struct bnx2x_func_switch_update_params *switch_update_params; + struct bnx2x_func_state_params func_params; + + memset(&func_params, 0, sizeof(struct bnx2x_func_state_params)); + switch_update_params = &func_params.params.switch_update; + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; + + if (IS_MF_UFP(bp)) { + int func = BP_ABS_FUNC(bp); + u32 val; + + /* Re-learn the S-tag from shmem */ + val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & + FUNC_MF_CFG_E1HOV_TAG_MASK; + if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { + bp->mf_ov = val; + } else { + BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n"); + goto fail; + } + + /* Configure new S-tag in LLH */ + REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8, + bp->mf_ov); + + /* Send Ramrod to update FW of change */ + __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG, + &switch_update_params->changes); + switch_update_params->vlan = bp->mf_ov; + + if (bnx2x_func_state_change(bp, &func_params) < 0) { + BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n", + bp->mf_ov); + goto fail; + } + + DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", bp->mf_ov); + + bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0); + + return; + } + + /* not supported by SW yet */ +fail: + bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0); +} + +static void bnx2x_pmf_update(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 val; + + bp->port.pmf = 1; + DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf); + + /* + * We need the mb() to ensure the ordering between the writing to + * bp->port.pmf here and reading it from the bnx2x_periodic_task(). + */ + smp_mb(); + + /* queue a periodic task */ + queue_delayed_work(bnx2x_wq, &bp->period_task, 0); + + bnx2x_dcbx_pmf_update(bp); + + /* enable nig attention */ + val = (0xff0f | (1 << (BP_VN(bp) + 4))); + if (bp->common.int_block == INT_BLOCK_HC) { + REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); + REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); + } else if (!CHIP_IS_E1x(bp)) { + REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); + REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); + } + + bnx2x_stats_handle(bp, STATS_EVENT_PMF); +} + +/* end of Link */ + +/* slow path */ + +/* + * General service functions + */ + +/* send the MCP a request, block until there is a reply */ +u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) +{ + int mb_idx = BP_FW_MB_IDX(bp); + u32 seq; + u32 rc = 0; + u32 cnt = 1; + u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; + + mutex_lock(&bp->fw_mb_mutex); + seq = ++bp->fw_seq; + SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param); + SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq)); + + DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n", + (command | seq), param); + + do { + /* let the FW do it's magic ... */ + msleep(delay); + + rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header); + + /* Give the FW up to 5 second (500*10ms) */ + } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); + + DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", + cnt*delay, rc, seq); + + /* is this a reply to our command? */ + if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) + rc &= FW_MSG_CODE_MASK; + else { + /* FW BUG! */ + BNX2X_ERR("FW failed to respond!\n"); + bnx2x_fw_dump(bp); + rc = 0; + } + mutex_unlock(&bp->fw_mb_mutex); + + return rc; +} + +static void storm_memset_func_cfg(struct bnx2x *bp, + struct tstorm_eth_function_common_config *tcfg, + u16 abs_fid) +{ + size_t size = sizeof(struct tstorm_eth_function_common_config); + + u32 addr = BAR_TSTRORM_INTMEM + + TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); + + __storm_memset_struct(bp, addr, size, (u32 *)tcfg); +} + +void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) +{ + if (CHIP_IS_E1x(bp)) { + struct tstorm_eth_function_common_config tcfg = {0}; + + storm_memset_func_cfg(bp, &tcfg, p->func_id); + } + + /* Enable the function in the FW */ + storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); + storm_memset_func_en(bp, p->func_id, 1); + + /* spq */ + if (p->func_flgs & FUNC_FLG_SPQ) { + storm_memset_spq_addr(bp, p->spq_map, p->func_id); + REG_WR(bp, XSEM_REG_FAST_MEMORY + + XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); + } +} + +/** + * bnx2x_get_common_flags - Return common flags + * + * @bp device handle + * @fp queue handle + * @zero_stats TRUE if statistics zeroing is needed + * + * Return the flags that are common for the Tx-only and not normal connections. + */ +static unsigned long bnx2x_get_common_flags(struct bnx2x *bp, + struct bnx2x_fastpath *fp, + bool zero_stats) +{ + unsigned long flags = 0; + + /* PF driver will always initialize the Queue to an ACTIVE state */ + __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); + + /* tx only connections collect statistics (on the same index as the + * parent connection). The statistics are zeroed when the parent + * connection is initialized. + */ + + __set_bit(BNX2X_Q_FLG_STATS, &flags); + if (zero_stats) + __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); + + if (bp->flags & TX_SWITCHING) + __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags); + + __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags); + __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags); + +#ifdef BNX2X_STOP_ON_ERROR + __set_bit(BNX2X_Q_FLG_TX_SEC, &flags); +#endif + + return flags; +} + +static unsigned long bnx2x_get_q_flags(struct bnx2x *bp, + struct bnx2x_fastpath *fp, + bool leading) +{ + unsigned long flags = 0; + + /* calculate other queue flags */ + if (IS_MF_SD(bp)) + __set_bit(BNX2X_Q_FLG_OV, &flags); + + if (IS_FCOE_FP(fp)) { + __set_bit(BNX2X_Q_FLG_FCOE, &flags); + /* For FCoE - force usage of default priority (for afex) */ + __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags); + } + + if (fp->mode != TPA_MODE_DISABLED) { + __set_bit(BNX2X_Q_FLG_TPA, &flags); + __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); + if (fp->mode == TPA_MODE_GRO) + __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags); + } + + if (leading) { + __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags); + __set_bit(BNX2X_Q_FLG_MCAST, &flags); + } + + /* Always set HW VLAN stripping */ + __set_bit(BNX2X_Q_FLG_VLAN, &flags); + + /* configure silent vlan removal */ + if (IS_MF_AFEX(bp)) + __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags); + + return flags | bnx2x_get_common_flags(bp, fp, true); +} + +static void bnx2x_pf_q_prep_general(struct bnx2x *bp, + struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init, + u8 cos) +{ + gen_init->stat_id = bnx2x_stats_id(fp); + gen_init->spcl_id = fp->cl_id; + + /* Always use mini-jumbo MTU for FCoE L2 ring */ + if (IS_FCOE_FP(fp)) + gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; + else + gen_init->mtu = bp->dev->mtu; + + gen_init->cos = cos; + + gen_init->fp_hsi = ETH_FP_HSI_VERSION; +} + +static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, + struct bnx2x_fastpath *fp, struct rxq_pause_params *pause, + struct bnx2x_rxq_setup_params *rxq_init) +{ + u8 max_sge = 0; + u16 sge_sz = 0; + u16 tpa_agg_size = 0; + + if (fp->mode != TPA_MODE_DISABLED) { + pause->sge_th_lo = SGE_TH_LO(bp); + pause->sge_th_hi = SGE_TH_HI(bp); + + /* validate SGE ring has enough to cross high threshold */ + WARN_ON(bp->dropless_fc && + pause->sge_th_hi + FW_PREFETCH_CNT > + MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); + + tpa_agg_size = TPA_AGG_SIZE; + max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> + SGE_PAGE_SHIFT; + max_sge = ((max_sge + PAGES_PER_SGE - 1) & + (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; + sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff); + } + + /* pause - not for e1 */ + if (!CHIP_IS_E1(bp)) { + pause->bd_th_lo = BD_TH_LO(bp); + pause->bd_th_hi = BD_TH_HI(bp); + + pause->rcq_th_lo = RCQ_TH_LO(bp); + pause->rcq_th_hi = RCQ_TH_HI(bp); + /* + * validate that rings have enough entries to cross + * high thresholds + */ + WARN_ON(bp->dropless_fc && + pause->bd_th_hi + FW_PREFETCH_CNT > + bp->rx_ring_size); + WARN_ON(bp->dropless_fc && + pause->rcq_th_hi + FW_PREFETCH_CNT > + NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT); + + pause->pri_map = 1; + } + + /* rxq setup */ + rxq_init->dscr_map = fp->rx_desc_mapping; + rxq_init->sge_map = fp->rx_sge_mapping; + rxq_init->rcq_map = fp->rx_comp_mapping; + rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; + + /* This should be a maximum number of data bytes that may be + * placed on the BD (not including paddings). + */ + rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - + BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; + + rxq_init->cl_qzone_id = fp->cl_qzone_id; + rxq_init->tpa_agg_sz = tpa_agg_size; + rxq_init->sge_buf_sz = sge_sz; + rxq_init->max_sges_pkt = max_sge; + rxq_init->rss_engine_id = BP_FUNC(bp); + rxq_init->mcast_engine_id = BP_FUNC(bp); + + /* Maximum number or simultaneous TPA aggregation for this Queue. + * + * For PF Clients it should be the maximum available number. + * VF driver(s) may want to define it to a smaller value. + */ + rxq_init->max_tpa_queues = MAX_AGG_QS(bp); + + rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; + rxq_init->fw_sb_id = fp->fw_sb_id; + + if (IS_FCOE_FP(fp)) + rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; + else + rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; + /* configure silent vlan removal + * if multi function mode is afex, then mask default vlan + */ + if (IS_MF_AFEX(bp)) { + rxq_init->silent_removal_value = bp->afex_def_vlan_tag; + rxq_init->silent_removal_mask = VLAN_VID_MASK; + } +} + +static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, + struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init, + u8 cos) +{ + txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping; + txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; + txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; + txq_init->fw_sb_id = fp->fw_sb_id; + + /* + * set the tss leading client id for TX classification == + * leading RSS client id + */ + txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); + + if (IS_FCOE_FP(fp)) { + txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; + txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; + } +} + +static void bnx2x_pf_init(struct bnx2x *bp) +{ + struct bnx2x_func_init_params func_init = {0}; + struct event_ring_data eq_data = { {0} }; + u16 flags; + + if (!CHIP_IS_E1x(bp)) { + /* reset IGU PF statistics: MSIX + ATTN */ + /* PF */ + REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + + BNX2X_IGU_STAS_MSG_VF_CNT*4 + + (CHIP_MODE_IS_4_PORT(bp) ? + BP_FUNC(bp) : BP_VN(bp))*4, 0); + /* ATTN */ + REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + + BNX2X_IGU_STAS_MSG_VF_CNT*4 + + BNX2X_IGU_STAS_MSG_PF_CNT*4 + + (CHIP_MODE_IS_4_PORT(bp) ? + BP_FUNC(bp) : BP_VN(bp))*4, 0); + } + + /* function setup flags */ + flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); + + /* This flag is relevant for E1x only. + * E2 doesn't have a TPA configuration in a function level. + */ + flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0; + + func_init.func_flgs = flags; + func_init.pf_id = BP_FUNC(bp); + func_init.func_id = BP_FUNC(bp); + func_init.spq_map = bp->spq_mapping; + func_init.spq_prod = bp->spq_prod_idx; + + bnx2x_func_init(bp, &func_init); + + memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); + + /* + * Congestion management values depend on the link rate + * There is no active link so initial link rate is set to 10 Gbps. + * When the link comes up The congestion management values are + * re-calculated according to the actual link rate. + */ + bp->link_vars.line_speed = SPEED_10000; + bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); + + /* Only the PMF sets the HW */ + if (bp->port.pmf) + storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); + + /* init Event Queue - PCI bus guarantees correct endianity*/ + eq_data.base_addr.hi = U64_HI(bp->eq_mapping); + eq_data.base_addr.lo = U64_LO(bp->eq_mapping); + eq_data.producer = bp->eq_prod; + eq_data.index_id = HC_SP_INDEX_EQ_CONS; + eq_data.sb_id = DEF_SB_ID; + storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); +} + +static void bnx2x_e1h_disable(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + + bnx2x_tx_disable(bp); + + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); +} + +static void bnx2x_e1h_enable(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + + if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1); + + /* Tx queue should be only re-enabled */ + netif_tx_wake_all_queues(bp->dev); + + /* + * Should not call netif_carrier_on since it will be called if the link + * is up when checking for link state + */ +} + +#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 + +static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) +{ + struct eth_stats_info *ether_stat = + &bp->slowpath->drv_info_to_mcp.ether_stat; + struct bnx2x_vlan_mac_obj *mac_obj = + &bp->sp_objs->mac_obj; + int i; + + strlcpy(ether_stat->version, DRV_MODULE_VERSION, + ETH_STAT_INFO_VERSION_LEN); + + /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the + * mac_local field in ether_stat struct. The base address is offset by 2 + * bytes to account for the field being 8 bytes but a mac address is + * only 6 bytes. Likewise, the stride for the get_n_elements function is + * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes + * allocated by the ether_stat struct, so the macs will land in their + * proper positions. + */ + for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++) + memset(ether_stat->mac_local + i, 0, + sizeof(ether_stat->mac_local[0])); + mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj, + DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, + ether_stat->mac_local + MAC_PAD, MAC_PAD, + ETH_ALEN); + ether_stat->mtu_size = bp->dev->mtu; + if (bp->dev->features & NETIF_F_RXCSUM) + ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; + if (bp->dev->features & NETIF_F_TSO) + ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; + ether_stat->feature_flags |= bp->common.boot_mode; + + ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0; + + ether_stat->txq_size = bp->tx_ring_size; + ether_stat->rxq_size = bp->rx_ring_size; + +#ifdef CONFIG_BNX2X_SRIOV + ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0; +#endif +} + +static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) +{ + struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; + struct fcoe_stats_info *fcoe_stat = + &bp->slowpath->drv_info_to_mcp.fcoe_stat; + + if (!CNIC_LOADED(bp)) + return; + + memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN); + + fcoe_stat->qos_priority = + app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; + + /* insert FCoE stats from ramrod response */ + if (!NO_FCOE(bp)) { + struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = + &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. + tstorm_queue_statistics; + + struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = + &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. + xstorm_queue_statistics; + + struct fcoe_statistics_params *fw_fcoe_stat = + &bp->fw_stats_data->fcoe; + + ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0, + fcoe_stat->rx_bytes_lo, + fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); + + ADD_64_LE(fcoe_stat->rx_bytes_hi, + fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, + fcoe_stat->rx_bytes_lo, + fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); + + ADD_64_LE(fcoe_stat->rx_bytes_hi, + fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, + fcoe_stat->rx_bytes_lo, + fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); + + ADD_64_LE(fcoe_stat->rx_bytes_hi, + fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, + fcoe_stat->rx_bytes_lo, + fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); + + ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, + fcoe_stat->rx_frames_lo, + fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); + + ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, + fcoe_stat->rx_frames_lo, + fcoe_q_tstorm_stats->rcv_ucast_pkts); + + ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, + fcoe_stat->rx_frames_lo, + fcoe_q_tstorm_stats->rcv_bcast_pkts); + + ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, + fcoe_stat->rx_frames_lo, + fcoe_q_tstorm_stats->rcv_mcast_pkts); + + ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0, + fcoe_stat->tx_bytes_lo, + fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); + + ADD_64_LE(fcoe_stat->tx_bytes_hi, + fcoe_q_xstorm_stats->ucast_bytes_sent.hi, + fcoe_stat->tx_bytes_lo, + fcoe_q_xstorm_stats->ucast_bytes_sent.lo); + + ADD_64_LE(fcoe_stat->tx_bytes_hi, + fcoe_q_xstorm_stats->bcast_bytes_sent.hi, + fcoe_stat->tx_bytes_lo, + fcoe_q_xstorm_stats->bcast_bytes_sent.lo); + + ADD_64_LE(fcoe_stat->tx_bytes_hi, + fcoe_q_xstorm_stats->mcast_bytes_sent.hi, + fcoe_stat->tx_bytes_lo, + fcoe_q_xstorm_stats->mcast_bytes_sent.lo); + + ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, + fcoe_stat->tx_frames_lo, + fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); + + ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, + fcoe_stat->tx_frames_lo, + fcoe_q_xstorm_stats->ucast_pkts_sent); + + ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, + fcoe_stat->tx_frames_lo, + fcoe_q_xstorm_stats->bcast_pkts_sent); + + ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, + fcoe_stat->tx_frames_lo, + fcoe_q_xstorm_stats->mcast_pkts_sent); + } + + /* ask L5 driver to add data to the struct */ + bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD); +} + +static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) +{ + struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; + struct iscsi_stats_info *iscsi_stat = + &bp->slowpath->drv_info_to_mcp.iscsi_stat; + + if (!CNIC_LOADED(bp)) + return; + + memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac, + ETH_ALEN); + + iscsi_stat->qos_priority = + app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; + + /* ask L5 driver to add data to the struct */ + bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD); +} + +/* called due to MCP event (on pmf): + * reread new bandwidth configuration + * configure FW + * notify others function about the change + */ +static void bnx2x_config_mf_bw(struct bnx2x *bp) +{ + if (bp->link_vars.link_up) { + bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); + bnx2x_link_sync_notify(bp); + } + storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); +} + +static void bnx2x_set_mf_bw(struct bnx2x *bp) +{ + bnx2x_config_mf_bw(bp); + bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); +} + +static void bnx2x_handle_eee_event(struct bnx2x *bp) +{ + DP(BNX2X_MSG_MCP, "EEE - LLDP event\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); +} + +#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20) +#define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25) + +static void bnx2x_handle_drv_info_req(struct bnx2x *bp) +{ + enum drv_info_opcode op_code; + u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control); + bool release = false; + int wait; + + /* if drv_info version supported by MFW doesn't match - send NACK */ + if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { + bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); + return; + } + + op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> + DRV_INFO_CONTROL_OP_CODE_SHIFT; + + /* Must prevent other flows from accessing drv_info_to_mcp */ + mutex_lock(&bp->drv_info_mutex); + + memset(&bp->slowpath->drv_info_to_mcp, 0, + sizeof(union drv_info_to_mcp)); + + switch (op_code) { + case ETH_STATS_OPCODE: + bnx2x_drv_info_ether_stat(bp); + break; + case FCOE_STATS_OPCODE: + bnx2x_drv_info_fcoe_stat(bp); + break; + case ISCSI_STATS_OPCODE: + bnx2x_drv_info_iscsi_stat(bp); + break; + default: + /* if op code isn't supported - send NACK */ + bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); + goto out; + } + + /* if we got drv_info attn from MFW then these fields are defined in + * shmem2 for sure + */ + SHMEM2_WR(bp, drv_info_host_addr_lo, + U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp))); + SHMEM2_WR(bp, drv_info_host_addr_hi, + U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp))); + + bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0); + + /* Since possible management wants both this and get_driver_version + * need to wait until management notifies us it finished utilizing + * the buffer. + */ + if (!SHMEM2_HAS(bp, mfw_drv_indication)) { + DP(BNX2X_MSG_MCP, "Management does not support indication\n"); + } else if (!bp->drv_info_mng_owner) { + u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1)); + + for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) { + u32 indication = SHMEM2_RD(bp, mfw_drv_indication); + + /* Management is done; need to clear indication */ + if (indication & bit) { + SHMEM2_WR(bp, mfw_drv_indication, + indication & ~bit); + release = true; + break; + } + + msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH); + } + } + if (!release) { + DP(BNX2X_MSG_MCP, "Management did not release indication\n"); + bp->drv_info_mng_owner = true; + } + +out: + mutex_unlock(&bp->drv_info_mutex); +} + +static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format) +{ + u8 vals[4]; + int i = 0; + + if (bnx2x_format) { + i = sscanf(version, "1.%c%hhd.%hhd.%hhd", + &vals[0], &vals[1], &vals[2], &vals[3]); + if (i > 0) + vals[0] -= '0'; + } else { + i = sscanf(version, "%hhd.%hhd.%hhd.%hhd", + &vals[0], &vals[1], &vals[2], &vals[3]); + } + + while (i < 4) + vals[i++] = 0; + + return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3]; +} + +void bnx2x_update_mng_version(struct bnx2x *bp) +{ + u32 iscsiver = DRV_VER_NOT_LOADED; + u32 fcoever = DRV_VER_NOT_LOADED; + u32 ethver = DRV_VER_NOT_LOADED; + int idx = BP_FW_MB_IDX(bp); + u8 *version; + + if (!SHMEM2_HAS(bp, func_os_drv_ver)) + return; + + mutex_lock(&bp->drv_info_mutex); + /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */ + if (bp->drv_info_mng_owner) + goto out; + + if (bp->state != BNX2X_STATE_OPEN) + goto out; + + /* Parse ethernet driver version */ + ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true); + if (!CNIC_LOADED(bp)) + goto out; + + /* Try getting storage driver version via cnic */ + memset(&bp->slowpath->drv_info_to_mcp, 0, + sizeof(union drv_info_to_mcp)); + bnx2x_drv_info_iscsi_stat(bp); + version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version; + iscsiver = bnx2x_update_mng_version_utility(version, false); + + memset(&bp->slowpath->drv_info_to_mcp, 0, + sizeof(union drv_info_to_mcp)); + bnx2x_drv_info_fcoe_stat(bp); + version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version; + fcoever = bnx2x_update_mng_version_utility(version, false); + +out: + SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver); + SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver); + SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever); + + mutex_unlock(&bp->drv_info_mutex); + + DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n", + ethver, iscsiver, fcoever); +} + +static void bnx2x_oem_event(struct bnx2x *bp, u32 event) +{ + u32 cmd_ok, cmd_fail; + + /* sanity */ + if (event & DRV_STATUS_DCC_EVENT_MASK && + event & DRV_STATUS_OEM_EVENT_MASK) { + BNX2X_ERR("Received simultaneous events %08x\n", event); + return; + } + + if (event & DRV_STATUS_DCC_EVENT_MASK) { + cmd_fail = DRV_MSG_CODE_DCC_FAILURE; + cmd_ok = DRV_MSG_CODE_DCC_OK; + } else /* if (event & DRV_STATUS_OEM_EVENT_MASK) */ { + cmd_fail = DRV_MSG_CODE_OEM_FAILURE; + cmd_ok = DRV_MSG_CODE_OEM_OK; + } + + DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event); + + if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF | + DRV_STATUS_OEM_DISABLE_ENABLE_PF)) { + /* This is the only place besides the function initialization + * where the bp->flags can change so it is done without any + * locks + */ + if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { + DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n"); + bp->flags |= MF_FUNC_DIS; + + bnx2x_e1h_disable(bp); + } else { + DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n"); + bp->flags &= ~MF_FUNC_DIS; + + bnx2x_e1h_enable(bp); + } + event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF | + DRV_STATUS_OEM_DISABLE_ENABLE_PF); + } + + if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION | + DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) { + bnx2x_config_mf_bw(bp); + event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION | + DRV_STATUS_OEM_BANDWIDTH_ALLOCATION); + } + + /* Report results to MCP */ + if (event) + bnx2x_fw_command(bp, cmd_fail, 0); + else + bnx2x_fw_command(bp, cmd_ok, 0); +} + +/* must be called under the spq lock */ +static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) +{ + struct eth_spe *next_spe = bp->spq_prod_bd; + + if (bp->spq_prod_bd == bp->spq_last_bd) { + bp->spq_prod_bd = bp->spq; + bp->spq_prod_idx = 0; + DP(BNX2X_MSG_SP, "end of spq\n"); + } else { + bp->spq_prod_bd++; + bp->spq_prod_idx++; + } + return next_spe; +} + +/* must be called under the spq lock */ +static void bnx2x_sp_prod_update(struct bnx2x *bp) +{ + int func = BP_FUNC(bp); + + /* + * Make sure that BD data is updated before writing the producer: + * BD data is written to the memory, the producer is read from the + * memory, thus we need a full memory barrier to ensure the ordering. + */ + mb(); + + REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), + bp->spq_prod_idx); + mmiowb(); +} + +/** + * bnx2x_is_contextless_ramrod - check if the current command ends on EQ + * + * @cmd: command to check + * @cmd_type: command type + */ +static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) +{ + if ((cmd_type == NONE_CONNECTION_TYPE) || + (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || + (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || + (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || + (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || + (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || + (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) + return true; + else + return false; +} + +/** + * bnx2x_sp_post - place a single command on an SP ring + * + * @bp: driver handle + * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) + * @cid: SW CID the command is related to + * @data_hi: command private data address (high 32 bits) + * @data_lo: command private data address (low 32 bits) + * @cmd_type: command type (e.g. NONE, ETH) + * + * SP data is handled as if it's always an address pair, thus data fields are + * not swapped to little endian in upper functions. Instead this function swaps + * data as if it's two u32 fields. + */ +int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, + u32 data_hi, u32 data_lo, int cmd_type) +{ + struct eth_spe *spe; + u16 type; + bool common = bnx2x_is_contextless_ramrod(command, cmd_type); + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) { + BNX2X_ERR("Can't post SP when there is panic\n"); + return -EIO; + } +#endif + + spin_lock_bh(&bp->spq_lock); + + if (common) { + if (!atomic_read(&bp->eq_spq_left)) { + BNX2X_ERR("BUG! EQ ring full!\n"); + spin_unlock_bh(&bp->spq_lock); + bnx2x_panic(); + return -EBUSY; + } + } else if (!atomic_read(&bp->cq_spq_left)) { + BNX2X_ERR("BUG! SPQ ring full!\n"); + spin_unlock_bh(&bp->spq_lock); + bnx2x_panic(); + return -EBUSY; + } + + spe = bnx2x_sp_get_next(bp); + + /* CID needs port number to be encoded int it */ + spe->hdr.conn_and_cmd_data = + cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | + HW_CID(bp, cid)); + + /* In some cases, type may already contain the func-id + * mainly in SRIOV related use cases, so we add it here only + * if it's not already set. + */ + if (!(cmd_type & SPE_HDR_FUNCTION_ID)) { + type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & + SPE_HDR_CONN_TYPE; + type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & + SPE_HDR_FUNCTION_ID); + } else { + type = cmd_type; + } + + spe->hdr.type = cpu_to_le16(type); + + spe->data.update_data_addr.hi = cpu_to_le32(data_hi); + spe->data.update_data_addr.lo = cpu_to_le32(data_lo); + + /* + * It's ok if the actual decrement is issued towards the memory + * somewhere between the spin_lock and spin_unlock. Thus no + * more explicit memory barrier is needed. + */ + if (common) + atomic_dec(&bp->eq_spq_left); + else + atomic_dec(&bp->cq_spq_left); + + DP(BNX2X_MSG_SP, + "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n", + bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), + (u32)(U64_LO(bp->spq_mapping) + + (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, + HW_CID(bp, cid), data_hi, data_lo, type, + atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left)); + + bnx2x_sp_prod_update(bp); + spin_unlock_bh(&bp->spq_lock); + return 0; +} + +/* acquire split MCP access lock register */ +static int bnx2x_acquire_alr(struct bnx2x *bp) +{ + u32 j, val; + int rc = 0; + + might_sleep(); + for (j = 0; j < 1000; j++) { + REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK); + val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK); + if (val & MCPR_ACCESS_LOCK_LOCK) + break; + + usleep_range(5000, 10000); + } + if (!(val & MCPR_ACCESS_LOCK_LOCK)) { + BNX2X_ERR("Cannot acquire MCP access lock register\n"); + rc = -EBUSY; + } + + return rc; +} + +/* release split MCP access lock register */ +static void bnx2x_release_alr(struct bnx2x *bp) +{ + REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); +} + +#define BNX2X_DEF_SB_ATT_IDX 0x0001 +#define BNX2X_DEF_SB_IDX 0x0002 + +static u16 bnx2x_update_dsb_idx(struct bnx2x *bp) +{ + struct host_sp_status_block *def_sb = bp->def_status_blk; + u16 rc = 0; + + barrier(); /* status block is written to by the chip */ + if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { + bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; + rc |= BNX2X_DEF_SB_ATT_IDX; + } + + if (bp->def_idx != def_sb->sp_sb.running_index) { + bp->def_idx = def_sb->sp_sb.running_index; + rc |= BNX2X_DEF_SB_IDX; + } + + /* Do not reorder: indices reading should complete before handling */ + barrier(); + return rc; +} + +/* + * slow path service functions + */ + +static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) +{ + int port = BP_PORT(bp); + u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0; + u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : + NIG_REG_MASK_INTERRUPT_PORT0; + u32 aeu_mask; + u32 nig_mask = 0; + u32 reg_addr; + + if (bp->attn_state & asserted) + BNX2X_ERR("IGU ERROR\n"); + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + aeu_mask = REG_RD(bp, aeu_addr); + + DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", + aeu_mask, asserted); + aeu_mask &= ~(asserted & 0x3ff); + DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); + + REG_WR(bp, aeu_addr, aeu_mask); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + + DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); + bp->attn_state |= asserted; + DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); + + if (asserted & ATTN_HARD_WIRED_MASK) { + if (asserted & ATTN_NIG_FOR_FUNC) { + + bnx2x_acquire_phy_lock(bp); + + /* save nig interrupt mask */ + nig_mask = REG_RD(bp, nig_int_mask_addr); + + /* If nig_mask is not set, no need to call the update + * function. + */ + if (nig_mask) { + REG_WR(bp, nig_int_mask_addr, 0); + + bnx2x_link_attn(bp); + } + + /* handle unicore attn? */ + } + if (asserted & ATTN_SW_TIMER_4_FUNC) + DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n"); + + if (asserted & GPIO_2_FUNC) + DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n"); + + if (asserted & GPIO_3_FUNC) + DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n"); + + if (asserted & GPIO_4_FUNC) + DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n"); + + if (port == 0) { + if (asserted & ATTN_GENERAL_ATTN_1) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_2) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_3) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); + } + } else { + if (asserted & ATTN_GENERAL_ATTN_4) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_5) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); + } + if (asserted & ATTN_GENERAL_ATTN_6) { + DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); + } + } + + } /* if hardwired */ + + if (bp->common.int_block == INT_BLOCK_HC) + reg_addr = (HC_REG_COMMAND_REG + port*32 + + COMMAND_REG_ATTN_BITS_SET); + else + reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); + + DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted, + (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); + REG_WR(bp, reg_addr, asserted); + + /* now set back the mask */ + if (asserted & ATTN_NIG_FOR_FUNC) { + /* Verify that IGU ack through BAR was written before restoring + * NIG mask. This loop should exit after 2-3 iterations max. + */ + if (bp->common.int_block != INT_BLOCK_HC) { + u32 cnt = 0, igu_acked; + do { + igu_acked = REG_RD(bp, + IGU_REG_ATTENTION_ACK_BITS); + } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && + (++cnt < MAX_IGU_ATTN_ACK_TO)); + if (!igu_acked) + DP(NETIF_MSG_HW, + "Failed to verify IGU ack on time\n"); + barrier(); + } + REG_WR(bp, nig_int_mask_addr, nig_mask); + bnx2x_release_phy_lock(bp); + } +} + +static void bnx2x_fan_failure(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 ext_phy_config; + /* mark the failure */ + ext_phy_config = + SHMEM_RD(bp, + dev_info.port_hw_config[port].external_phy_config); + + ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; + ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; + SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, + ext_phy_config); + + /* log the failure */ + netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" + "Please contact OEM Support for assistance\n"); + + /* Schedule device reset (unload) + * This is due to some boards consuming sufficient power when driver is + * up to overheat if fan fails. + */ + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0); +} + +static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) +{ + int port = BP_PORT(bp); + int reg_offset; + u32 val; + + reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); + + if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { + + val = REG_RD(bp, reg_offset); + val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; + REG_WR(bp, reg_offset, val); + + BNX2X_ERR("SPIO5 hw attention\n"); + + /* Fan failure attention */ + bnx2x_hw_reset_phy(&bp->link_params); + bnx2x_fan_failure(bp); + } + + if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) { + bnx2x_acquire_phy_lock(bp); + bnx2x_handle_module_detect_int(&bp->link_params); + bnx2x_release_phy_lock(bp); + } + + if (attn & HW_INTERRUT_ASSERT_SET_0) { + + val = REG_RD(bp, reg_offset); + val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); + REG_WR(bp, reg_offset, val); + + BNX2X_ERR("FATAL HW block attention set0 0x%x\n", + (u32)(attn & HW_INTERRUT_ASSERT_SET_0)); + bnx2x_panic(); + } +} + +static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) +{ + u32 val; + + if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { + + val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR); + BNX2X_ERR("DB hw attention 0x%x\n", val); + /* DORQ discard attention */ + if (val & 0x2) + BNX2X_ERR("FATAL error from DORQ\n"); + } + + if (attn & HW_INTERRUT_ASSERT_SET_1) { + + int port = BP_PORT(bp); + int reg_offset; + + reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); + + val = REG_RD(bp, reg_offset); + val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); + REG_WR(bp, reg_offset, val); + + BNX2X_ERR("FATAL HW block attention set1 0x%x\n", + (u32)(attn & HW_INTERRUT_ASSERT_SET_1)); + bnx2x_panic(); + } +} + +static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) +{ + u32 val; + + if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { + + val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR); + BNX2X_ERR("CFC hw attention 0x%x\n", val); + /* CFC error attention */ + if (val & 0x2) + BNX2X_ERR("FATAL error from CFC\n"); + } + + if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { + val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); + BNX2X_ERR("PXP hw attention-0 0x%x\n", val); + /* RQ_USDMDP_FIFO_OVERFLOW */ + if (val & 0x18000) + BNX2X_ERR("FATAL error from PXP\n"); + + if (!CHIP_IS_E1x(bp)) { + val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1); + BNX2X_ERR("PXP hw attention-1 0x%x\n", val); + } + } + + if (attn & HW_INTERRUT_ASSERT_SET_2) { + + int port = BP_PORT(bp); + int reg_offset; + + reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); + + val = REG_RD(bp, reg_offset); + val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); + REG_WR(bp, reg_offset, val); + + BNX2X_ERR("FATAL HW block attention set2 0x%x\n", + (u32)(attn & HW_INTERRUT_ASSERT_SET_2)); + bnx2x_panic(); + } +} + +static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) +{ + u32 val; + + if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { + + if (attn & BNX2X_PMF_LINK_ASSERT) { + int func = BP_FUNC(bp); + + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); + bnx2x_read_mf_cfg(bp); + bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, + func_mf_config[BP_ABS_FUNC(bp)].config); + val = SHMEM_RD(bp, + func_mb[BP_FW_MB_IDX(bp)].drv_status); + + if (val & (DRV_STATUS_DCC_EVENT_MASK | + DRV_STATUS_OEM_EVENT_MASK)) + bnx2x_oem_event(bp, + (val & (DRV_STATUS_DCC_EVENT_MASK | + DRV_STATUS_OEM_EVENT_MASK))); + + if (val & DRV_STATUS_SET_MF_BW) + bnx2x_set_mf_bw(bp); + + if (val & DRV_STATUS_DRV_INFO_REQ) + bnx2x_handle_drv_info_req(bp); + + if (val & DRV_STATUS_VF_DISABLED) + bnx2x_schedule_iov_task(bp, + BNX2X_IOV_HANDLE_FLR); + + if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) + bnx2x_pmf_update(bp); + + if (bp->port.pmf && + (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && + bp->dcbx_enabled > 0) + /* start dcbx state machine */ + bnx2x_dcbx_set_params(bp, + BNX2X_DCBX_STATE_NEG_RECEIVED); + if (val & DRV_STATUS_AFEX_EVENT_MASK) + bnx2x_handle_afex_cmd(bp, + val & DRV_STATUS_AFEX_EVENT_MASK); + if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) + bnx2x_handle_eee_event(bp); + + if (val & DRV_STATUS_OEM_UPDATE_SVID) + bnx2x_handle_update_svid_cmd(bp); + + if (bp->link_vars.periodic_flags & + PERIODIC_FLAGS_LINK_EVENT) { + /* sync with link */ + bnx2x_acquire_phy_lock(bp); + bp->link_vars.periodic_flags &= + ~PERIODIC_FLAGS_LINK_EVENT; + bnx2x_release_phy_lock(bp); + if (IS_MF(bp)) + bnx2x_link_sync_notify(bp); + bnx2x_link_report(bp); + } + /* Always call it here: bnx2x_link_report() will + * prevent the link indication duplication. + */ + bnx2x__link_status_update(bp); + } else if (attn & BNX2X_MC_ASSERT_BITS) { + + BNX2X_ERR("MC assert!\n"); + bnx2x_mc_assert(bp); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0); + bnx2x_panic(); + + } else if (attn & BNX2X_MCP_ASSERT) { + + BNX2X_ERR("MCP assert!\n"); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); + bnx2x_fw_dump(bp); + + } else + BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn); + } + + if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { + BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); + if (attn & BNX2X_GRC_TIMEOUT) { + val = CHIP_IS_E1(bp) ? 0 : + REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN); + BNX2X_ERR("GRC time-out 0x%08x\n", val); + } + if (attn & BNX2X_GRC_RSV) { + val = CHIP_IS_E1(bp) ? 0 : + REG_RD(bp, MISC_REG_GRC_RSV_ATTN); + BNX2X_ERR("GRC reserved 0x%08x\n", val); + } + REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); + } +} + +/* + * Bits map: + * 0-7 - Engine0 load counter. + * 8-15 - Engine1 load counter. + * 16 - Engine0 RESET_IN_PROGRESS bit. + * 17 - Engine1 RESET_IN_PROGRESS bit. + * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function + * on the engine + * 19 - Engine1 ONE_IS_LOADED. + * 20 - Chip reset flow bit. When set none-leader must wait for both engines + * leader to complete (check for both RESET_IN_PROGRESS bits and not for + * just the one belonging to its engine). + * + */ +#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 + +#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff +#define BNX2X_PATH0_LOAD_CNT_SHIFT 0 +#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00 +#define BNX2X_PATH1_LOAD_CNT_SHIFT 8 +#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000 +#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000 +#define BNX2X_GLOBAL_RESET_BIT 0x00040000 + +/* + * Set the GLOBAL_RESET bit. + * + * Should be run under rtnl lock + */ +void bnx2x_set_reset_global(struct bnx2x *bp) +{ + u32 val; + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); + val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); +} + +/* + * Clear the GLOBAL_RESET bit. + * + * Should be run under rtnl lock + */ +static void bnx2x_clear_reset_global(struct bnx2x *bp) +{ + u32 val; + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); + val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); +} + +/* + * Checks the GLOBAL_RESET bit. + * + * should be run under rtnl lock + */ +static bool bnx2x_reset_is_global(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + + DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); + return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false; +} + +/* + * Clear RESET_IN_PROGRESS bit for the current engine. + * + * Should be run under rtnl lock + */ +static void bnx2x_set_reset_done(struct bnx2x *bp) +{ + u32 val; + u32 bit = BP_PATH(bp) ? + BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); + val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + + /* Clear the bit */ + val &= ~bit; + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); + + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); +} + +/* + * Set RESET_IN_PROGRESS for the current engine. + * + * should be run under rtnl lock + */ +void bnx2x_set_reset_in_progress(struct bnx2x *bp) +{ + u32 val; + u32 bit = BP_PATH(bp) ? + BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); + val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + + /* Set the bit */ + val |= bit; + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); +} + +/* + * Checks the RESET_IN_PROGRESS bit for the given engine. + * should be run under rtnl lock + */ +bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) +{ + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 bit = engine ? + BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; + + /* return false if bit is set */ + return (val & bit) ? false : true; +} + +/* + * set pf load for the current pf. + * + * should be run under rtnl lock + */ +void bnx2x_set_pf_load(struct bnx2x *bp) +{ + u32 val1, val; + u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK; + u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : + BNX2X_PATH0_LOAD_CNT_SHIFT; + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); + val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + + DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val); + + /* get the current counter value */ + val1 = (val & mask) >> shift; + + /* set bit of that PF */ + val1 |= (1 << bp->pf_num); + + /* clear the old value */ + val &= ~mask; + + /* set the new one */ + val |= ((val1 << shift) & mask); + + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); +} + +/** + * bnx2x_clear_pf_load - clear pf load mark + * + * @bp: driver handle + * + * Should be run under rtnl lock. + * Decrements the load counter for the current engine. Returns + * whether other functions are still loaded + */ +bool bnx2x_clear_pf_load(struct bnx2x *bp) +{ + u32 val1, val; + u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK; + u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : + BNX2X_PATH0_LOAD_CNT_SHIFT; + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); + val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val); + + /* get the current counter value */ + val1 = (val & mask) >> shift; + + /* clear bit of that PF */ + val1 &= ~(1 << bp->pf_num); + + /* clear the old value */ + val &= ~mask; + + /* set the new one */ + val |= ((val1 << shift) & mask); + + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); + return val1 != 0; +} + +/* + * Read the load status for the current engine. + * + * should be run under rtnl lock + */ +static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) +{ + u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK); + u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT : + BNX2X_PATH0_LOAD_CNT_SHIFT); + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + + DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val); + + val = (val & mask) >> shift; + + DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n", + engine, val); + + return val != 0; +} + +static void _print_parity(struct bnx2x *bp, u32 reg) +{ + pr_cont(" [0x%08x] ", REG_RD(bp, reg)); +} + +static void _print_next_block(int idx, const char *blk) +{ + pr_cont("%s%s", idx ? ", " : "", blk); +} + +static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, + int *par_num, bool print) +{ + u32 cur_bit; + bool res; + int i; + + res = false; + + for (i = 0; sig; i++) { + cur_bit = (0x1UL << i); + if (sig & cur_bit) { + res |= true; /* Each bit is real error! */ + + if (print) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: + _print_next_block((*par_num)++, "BRB"); + _print_parity(bp, + BRB1_REG_BRB1_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: + _print_next_block((*par_num)++, + "PARSER"); + _print_parity(bp, PRS_REG_PRS_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: + _print_next_block((*par_num)++, "TSDM"); + _print_parity(bp, + TSDM_REG_TSDM_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: + _print_next_block((*par_num)++, + "SEARCHER"); + _print_parity(bp, SRC_REG_SRC_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: + _print_next_block((*par_num)++, "TCM"); + _print_parity(bp, TCM_REG_TCM_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: + _print_next_block((*par_num)++, + "TSEMI"); + _print_parity(bp, + TSEM_REG_TSEM_PRTY_STS_0); + _print_parity(bp, + TSEM_REG_TSEM_PRTY_STS_1); + break; + case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: + _print_next_block((*par_num)++, "XPB"); + _print_parity(bp, GRCBASE_XPB + + PB_REG_PB_PRTY_STS); + break; + } + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return res; +} + +static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, + int *par_num, bool *global, + bool print) +{ + u32 cur_bit; + bool res; + int i; + + res = false; + + for (i = 0; sig; i++) { + cur_bit = (0x1UL << i); + if (sig & cur_bit) { + res |= true; /* Each bit is real error! */ + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: + if (print) { + _print_next_block((*par_num)++, "PBF"); + _print_parity(bp, PBF_REG_PBF_PRTY_STS); + } + break; + case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: + if (print) { + _print_next_block((*par_num)++, "QM"); + _print_parity(bp, QM_REG_QM_PRTY_STS); + } + break; + case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: + if (print) { + _print_next_block((*par_num)++, "TM"); + _print_parity(bp, TM_REG_TM_PRTY_STS); + } + break; + case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: + if (print) { + _print_next_block((*par_num)++, "XSDM"); + _print_parity(bp, + XSDM_REG_XSDM_PRTY_STS); + } + break; + case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: + if (print) { + _print_next_block((*par_num)++, "XCM"); + _print_parity(bp, XCM_REG_XCM_PRTY_STS); + } + break; + case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: + if (print) { + _print_next_block((*par_num)++, + "XSEMI"); + _print_parity(bp, + XSEM_REG_XSEM_PRTY_STS_0); + _print_parity(bp, + XSEM_REG_XSEM_PRTY_STS_1); + } + break; + case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: + if (print) { + _print_next_block((*par_num)++, + "DOORBELLQ"); + _print_parity(bp, + DORQ_REG_DORQ_PRTY_STS); + } + break; + case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: + if (print) { + _print_next_block((*par_num)++, "NIG"); + if (CHIP_IS_E1x(bp)) { + _print_parity(bp, + NIG_REG_NIG_PRTY_STS); + } else { + _print_parity(bp, + NIG_REG_NIG_PRTY_STS_0); + _print_parity(bp, + NIG_REG_NIG_PRTY_STS_1); + } + } + break; + case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: + if (print) + _print_next_block((*par_num)++, + "VAUX PCI CORE"); + *global = true; + break; + case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: + if (print) { + _print_next_block((*par_num)++, + "DEBUG"); + _print_parity(bp, DBG_REG_DBG_PRTY_STS); + } + break; + case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: + if (print) { + _print_next_block((*par_num)++, "USDM"); + _print_parity(bp, + USDM_REG_USDM_PRTY_STS); + } + break; + case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: + if (print) { + _print_next_block((*par_num)++, "UCM"); + _print_parity(bp, UCM_REG_UCM_PRTY_STS); + } + break; + case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: + if (print) { + _print_next_block((*par_num)++, + "USEMI"); + _print_parity(bp, + USEM_REG_USEM_PRTY_STS_0); + _print_parity(bp, + USEM_REG_USEM_PRTY_STS_1); + } + break; + case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: + if (print) { + _print_next_block((*par_num)++, "UPB"); + _print_parity(bp, GRCBASE_UPB + + PB_REG_PB_PRTY_STS); + } + break; + case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: + if (print) { + _print_next_block((*par_num)++, "CSDM"); + _print_parity(bp, + CSDM_REG_CSDM_PRTY_STS); + } + break; + case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: + if (print) { + _print_next_block((*par_num)++, "CCM"); + _print_parity(bp, CCM_REG_CCM_PRTY_STS); + } + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return res; +} + +static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, + int *par_num, bool print) +{ + u32 cur_bit; + bool res; + int i; + + res = false; + + for (i = 0; sig; i++) { + cur_bit = (0x1UL << i); + if (sig & cur_bit) { + res = true; /* Each bit is real error! */ + if (print) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: + _print_next_block((*par_num)++, + "CSEMI"); + _print_parity(bp, + CSEM_REG_CSEM_PRTY_STS_0); + _print_parity(bp, + CSEM_REG_CSEM_PRTY_STS_1); + break; + case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: + _print_next_block((*par_num)++, "PXP"); + _print_parity(bp, PXP_REG_PXP_PRTY_STS); + _print_parity(bp, + PXP2_REG_PXP2_PRTY_STS_0); + _print_parity(bp, + PXP2_REG_PXP2_PRTY_STS_1); + break; + case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: + _print_next_block((*par_num)++, + "PXPPCICLOCKCLIENT"); + break; + case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: + _print_next_block((*par_num)++, "CFC"); + _print_parity(bp, + CFC_REG_CFC_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: + _print_next_block((*par_num)++, "CDU"); + _print_parity(bp, CDU_REG_CDU_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: + _print_next_block((*par_num)++, "DMAE"); + _print_parity(bp, + DMAE_REG_DMAE_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: + _print_next_block((*par_num)++, "IGU"); + if (CHIP_IS_E1x(bp)) + _print_parity(bp, + HC_REG_HC_PRTY_STS); + else + _print_parity(bp, + IGU_REG_IGU_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: + _print_next_block((*par_num)++, "MISC"); + _print_parity(bp, + MISC_REG_MISC_PRTY_STS); + break; + } + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return res; +} + +static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig, + int *par_num, bool *global, + bool print) +{ + bool res = false; + u32 cur_bit; + int i; + + for (i = 0; sig; i++) { + cur_bit = (0x1UL << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: + if (print) + _print_next_block((*par_num)++, + "MCP ROM"); + *global = true; + res = true; + break; + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: + if (print) + _print_next_block((*par_num)++, + "MCP UMP RX"); + *global = true; + res = true; + break; + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: + if (print) + _print_next_block((*par_num)++, + "MCP UMP TX"); + *global = true; + res = true; + break; + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: + if (print) + _print_next_block((*par_num)++, + "MCP SCPAD"); + /* clear latched SCPAD PATIRY from MCP */ + REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, + 1UL << 10); + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return res; +} + +static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, + int *par_num, bool print) +{ + u32 cur_bit; + bool res; + int i; + + res = false; + + for (i = 0; sig; i++) { + cur_bit = (0x1UL << i); + if (sig & cur_bit) { + res = true; /* Each bit is real error! */ + if (print) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: + _print_next_block((*par_num)++, + "PGLUE_B"); + _print_parity(bp, + PGLUE_B_REG_PGLUE_B_PRTY_STS); + break; + case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: + _print_next_block((*par_num)++, "ATC"); + _print_parity(bp, + ATC_REG_ATC_PRTY_STS); + break; + } + } + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return res; +} + +static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, + u32 *sig) +{ + bool res = false; + + if ((sig[0] & HW_PRTY_ASSERT_SET_0) || + (sig[1] & HW_PRTY_ASSERT_SET_1) || + (sig[2] & HW_PRTY_ASSERT_SET_2) || + (sig[3] & HW_PRTY_ASSERT_SET_3) || + (sig[4] & HW_PRTY_ASSERT_SET_4)) { + int par_num = 0; + DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n" + "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", + sig[0] & HW_PRTY_ASSERT_SET_0, + sig[1] & HW_PRTY_ASSERT_SET_1, + sig[2] & HW_PRTY_ASSERT_SET_2, + sig[3] & HW_PRTY_ASSERT_SET_3, + sig[4] & HW_PRTY_ASSERT_SET_4); + if (print) + netdev_err(bp->dev, + "Parity errors detected in blocks: "); + res |= bnx2x_check_blocks_with_parity0(bp, + sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print); + res |= bnx2x_check_blocks_with_parity1(bp, + sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print); + res |= bnx2x_check_blocks_with_parity2(bp, + sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print); + res |= bnx2x_check_blocks_with_parity3(bp, + sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print); + res |= bnx2x_check_blocks_with_parity4(bp, + sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print); + + if (print) + pr_cont("\n"); + } + + return res; +} + +/** + * bnx2x_chk_parity_attn - checks for parity attentions. + * + * @bp: driver handle + * @global: true if there was a global attention + * @print: show parity attention in syslog + */ +bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) +{ + struct attn_route attn = { {0} }; + int port = BP_PORT(bp); + + attn.sig[0] = REG_RD(bp, + MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + + port*4); + attn.sig[1] = REG_RD(bp, + MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + + port*4); + attn.sig[2] = REG_RD(bp, + MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + + port*4); + attn.sig[3] = REG_RD(bp, + MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + + port*4); + /* Since MCP attentions can't be disabled inside the block, we need to + * read AEU registers to see whether they're currently disabled + */ + attn.sig[3] &= ((REG_RD(bp, + !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 + : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) & + MISC_AEU_ENABLE_MCP_PRTY_BITS) | + ~MISC_AEU_ENABLE_MCP_PRTY_BITS); + + if (!CHIP_IS_E1x(bp)) + attn.sig[4] = REG_RD(bp, + MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + + port*4); + + return bnx2x_parity_attn(bp, global, print, attn.sig); +} + +static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) +{ + u32 val; + if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { + + val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); + BNX2X_ERR("PGLUE hw attention 0x%x\n", val); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); + if (val & + PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); + if (val & + PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) + BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); + } + if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { + val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR); + BNX2X_ERR("ATC hw attention 0x%x\n", val); + if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) + BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); + if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) + BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); + if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) + BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); + if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) + BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); + if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) + BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); + if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) + BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); + } + + if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | + AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { + BNX2X_ERR("FATAL parity attention set4 0x%x\n", + (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | + AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); + } +} + +static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) +{ + struct attn_route attn, *group_mask; + int port = BP_PORT(bp); + int index; + u32 reg_addr; + u32 val; + u32 aeu_mask; + bool global = false; + + /* need to take HW lock because MCP or other port might also + try to handle this event */ + bnx2x_acquire_alr(bp); + + if (bnx2x_chk_parity_attn(bp, &global, true)) { +#ifndef BNX2X_STOP_ON_ERROR + bp->recovery_state = BNX2X_RECOVERY_INIT; + schedule_delayed_work(&bp->sp_rtnl_task, 0); + /* Disable HW interrupts */ + bnx2x_int_disable(bp); + /* In case of parity errors don't handle attentions so that + * other function would "see" parity errors. + */ +#else + bnx2x_panic(); +#endif + bnx2x_release_alr(bp); + return; + } + + attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); + attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); + attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); + attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); + if (!CHIP_IS_E1x(bp)) + attn.sig[4] = + REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); + else + attn.sig[4] = 0; + + DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n", + attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); + + for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { + if (deasserted & (1 << index)) { + group_mask = &bp->attn_group[index]; + + DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n", + index, + group_mask->sig[0], group_mask->sig[1], + group_mask->sig[2], group_mask->sig[3], + group_mask->sig[4]); + + bnx2x_attn_int_deasserted4(bp, + attn.sig[4] & group_mask->sig[4]); + bnx2x_attn_int_deasserted3(bp, + attn.sig[3] & group_mask->sig[3]); + bnx2x_attn_int_deasserted1(bp, + attn.sig[1] & group_mask->sig[1]); + bnx2x_attn_int_deasserted2(bp, + attn.sig[2] & group_mask->sig[2]); + bnx2x_attn_int_deasserted0(bp, + attn.sig[0] & group_mask->sig[0]); + } + } + + bnx2x_release_alr(bp); + + if (bp->common.int_block == INT_BLOCK_HC) + reg_addr = (HC_REG_COMMAND_REG + port*32 + + COMMAND_REG_ATTN_BITS_CLR); + else + reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); + + val = ~deasserted; + DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val, + (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); + REG_WR(bp, reg_addr, val); + + if (~bp->attn_state & deasserted) + BNX2X_ERR("IGU ERROR\n"); + + reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0; + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + aeu_mask = REG_RD(bp, reg_addr); + + DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", + aeu_mask, deasserted); + aeu_mask |= (deasserted & 0x3ff); + DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); + + REG_WR(bp, reg_addr, aeu_mask); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); + + DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); + bp->attn_state &= ~deasserted; + DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); +} + +static void bnx2x_attn_int(struct bnx2x *bp) +{ + /* read local copy of bits */ + u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. + attn_bits); + u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. + attn_bits_ack); + u32 attn_state = bp->attn_state; + + /* look for changed bits */ + u32 asserted = attn_bits & ~attn_ack & ~attn_state; + u32 deasserted = ~attn_bits & attn_ack & attn_state; + + DP(NETIF_MSG_HW, + "attn_bits %x attn_ack %x asserted %x deasserted %x\n", + attn_bits, attn_ack, asserted, deasserted); + + if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) + BNX2X_ERR("BAD attention state\n"); + + /* handle bits that were raised */ + if (asserted) + bnx2x_attn_int_asserted(bp, asserted); + + if (deasserted) + bnx2x_attn_int_deasserted(bp, deasserted); +} + +void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, + u16 index, u8 op, u8 update) +{ + u32 igu_addr = bp->igu_base_addr; + igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; + bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, + igu_addr); +} + +static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) +{ + /* No memory barriers */ + storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); + mmiowb(); /* keep prod updates ordered */ +} + +static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, + union event_ring_elem *elem) +{ + u8 err = elem->message.error; + + if (!bp->cnic_eth_dev.starting_cid || + (cid < bp->cnic_eth_dev.starting_cid && + cid != bp->cnic_eth_dev.iscsi_l2_cid)) + return 1; + + DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); + + if (unlikely(err)) { + + BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", + cid); + bnx2x_panic_dump(bp, false); + } + bnx2x_cnic_cfc_comp(bp, cid, err); + return 0; +} + +static void bnx2x_handle_mcast_eqe(struct bnx2x *bp) +{ + struct bnx2x_mcast_ramrod_params rparam; + int rc; + + memset(&rparam, 0, sizeof(rparam)); + + rparam.mcast_obj = &bp->mcast_obj; + + netif_addr_lock_bh(bp->dev); + + /* Clear pending state for the last command */ + bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); + + /* If there are pending mcast commands - send them */ + if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); + if (rc < 0) + BNX2X_ERR("Failed to send pending mcast commands: %d\n", + rc); + } + + netif_addr_unlock_bh(bp->dev); +} + +static void bnx2x_handle_classification_eqe(struct bnx2x *bp, + union event_ring_elem *elem) +{ + unsigned long ramrod_flags = 0; + int rc = 0; + u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; + struct bnx2x_vlan_mac_obj *vlan_mac_obj; + + /* Always push next commands out, don't wait here */ + __set_bit(RAMROD_CONT, &ramrod_flags); + + switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo) + >> BNX2X_SWCID_SHIFT) { + case BNX2X_FILTER_MAC_PENDING: + DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); + if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp))) + vlan_mac_obj = &bp->iscsi_l2_mac_obj; + else + vlan_mac_obj = &bp->sp_objs[cid].mac_obj; + + break; + case BNX2X_FILTER_MCAST_PENDING: + DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n"); + /* This is only relevant for 57710 where multicast MACs are + * configured as unicast MACs using the same ramrod. + */ + bnx2x_handle_mcast_eqe(bp); + return; + default: + BNX2X_ERR("Unsupported classification command: %d\n", + elem->message.data.eth_event.echo); + return; + } + + rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); + + if (rc < 0) + BNX2X_ERR("Failed to schedule new commands: %d\n", rc); + else if (rc > 0) + DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); +} + +static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); + +static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) +{ + netif_addr_lock_bh(bp->dev); + + clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); + + /* Send rx_mode command again if was requested */ + if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) + bnx2x_set_storm_rx_mode(bp); + else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, + &bp->sp_state)) + bnx2x_set_iscsi_eth_rx_mode(bp, true); + else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, + &bp->sp_state)) + bnx2x_set_iscsi_eth_rx_mode(bp, false); + + netif_addr_unlock_bh(bp->dev); +} + +static void bnx2x_after_afex_vif_lists(struct bnx2x *bp, + union event_ring_elem *elem) +{ + if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) { + DP(BNX2X_MSG_SP, + "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n", + elem->message.data.vif_list_event.func_bit_map); + bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK, + elem->message.data.vif_list_event.func_bit_map); + } else if (elem->message.data.vif_list_event.echo == + VIF_LIST_RULE_SET) { + DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0); + } +} + +/* called with rtnl_lock */ +static void bnx2x_after_function_update(struct bnx2x *bp) +{ + int q, rc; + struct bnx2x_fastpath *fp; + struct bnx2x_queue_state_params queue_params = {NULL}; + struct bnx2x_queue_update_params *q_update_params = + &queue_params.params.update; + + /* Send Q update command with afex vlan removal values for all Qs */ + queue_params.cmd = BNX2X_Q_CMD_UPDATE; + + /* set silent vlan removal values according to vlan mode */ + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, + &q_update_params->update_flags); + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, + &q_update_params->update_flags); + __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); + + /* in access mode mark mask and value are 0 to strip all vlans */ + if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) { + q_update_params->silent_removal_value = 0; + q_update_params->silent_removal_mask = 0; + } else { + q_update_params->silent_removal_value = + (bp->afex_def_vlan_tag & VLAN_VID_MASK); + q_update_params->silent_removal_mask = VLAN_VID_MASK; + } + + for_each_eth_queue(bp, q) { + /* Set the appropriate Queue object */ + fp = &bp->fp[q]; + queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + + /* send the ramrod */ + rc = bnx2x_queue_state_change(bp, &queue_params); + if (rc < 0) + BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", + q); + } + + if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) { + fp = &bp->fp[FCOE_IDX(bp)]; + queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + + /* clear pending completion bit */ + __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); + + /* mark latest Q bit */ + smp_mb__before_atomic(); + set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); + smp_mb__after_atomic(); + + /* send Q update ramrod for FCoE Q */ + rc = bnx2x_queue_state_change(bp, &queue_params); + if (rc < 0) + BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", + q); + } else { + /* If no FCoE ring - ACK MCP now */ + bnx2x_link_report(bp); + bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); + } +} + +static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( + struct bnx2x *bp, u32 cid) +{ + DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); + + if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp))) + return &bnx2x_fcoe_sp_obj(bp, q_obj); + else + return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj; +} + +static void bnx2x_eq_int(struct bnx2x *bp) +{ + u16 hw_cons, sw_cons, sw_prod; + union event_ring_elem *elem; + u8 echo; + u32 cid; + u8 opcode; + int rc, spqe_cnt = 0; + struct bnx2x_queue_sp_obj *q_obj; + struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; + struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; + + hw_cons = le16_to_cpu(*bp->eq_cons_sb); + + /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. + * when we get the next-page we need to adjust so the loop + * condition below will be met. The next element is the size of a + * regular element and hence incrementing by 1 + */ + if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) + hw_cons++; + + /* This function may never run in parallel with itself for a + * specific bp, thus there is no need in "paired" read memory + * barrier here. + */ + sw_cons = bp->eq_cons; + sw_prod = bp->eq_prod; + + DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n", + hw_cons, sw_cons, atomic_read(&bp->eq_spq_left)); + + for (; sw_cons != hw_cons; + sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { + + elem = &bp->eq_ring[EQ_DESC(sw_cons)]; + + rc = bnx2x_iov_eq_sp_event(bp, elem); + if (!rc) { + DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n", + rc); + goto next_spqe; + } + + /* elem CID originates from FW; actually LE */ + cid = SW_CID((__force __le32) + elem->message.data.cfc_del_event.cid); + opcode = elem->message.opcode; + + /* handle eq element */ + switch (opcode) { + case EVENT_RING_OPCODE_VF_PF_CHANNEL: + bnx2x_vf_mbx_schedule(bp, + &elem->message.data.vf_pf_event); + continue; + + case EVENT_RING_OPCODE_STAT_QUERY: + DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS), + "got statistics comp event %d\n", + bp->stats_comp++); + /* nothing to do with stats comp */ + goto next_spqe; + + case EVENT_RING_OPCODE_CFC_DEL: + /* handle according to cid range */ + /* + * we may want to verify here that the bp state is + * HALTING + */ + DP(BNX2X_MSG_SP, + "got delete ramrod for MULTI[%d]\n", cid); + + if (CNIC_LOADED(bp) && + !bnx2x_cnic_handle_cfc_del(bp, cid, elem)) + goto next_spqe; + + q_obj = bnx2x_cid_to_q_obj(bp, cid); + + if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) + break; + + goto next_spqe; + + case EVENT_RING_OPCODE_STOP_TRAFFIC: + DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); + bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); + if (f_obj->complete_cmd(bp, f_obj, + BNX2X_F_CMD_TX_STOP)) + break; + goto next_spqe; + + case EVENT_RING_OPCODE_START_TRAFFIC: + DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); + bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); + if (f_obj->complete_cmd(bp, f_obj, + BNX2X_F_CMD_TX_START)) + break; + goto next_spqe; + + case EVENT_RING_OPCODE_FUNCTION_UPDATE: + echo = elem->message.data.function_update_event.echo; + if (echo == SWITCH_UPDATE) { + DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, + "got FUNC_SWITCH_UPDATE ramrod\n"); + if (f_obj->complete_cmd( + bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE)) + break; + + } else { + int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE; + + DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, + "AFEX: ramrod completed FUNCTION_UPDATE\n"); + f_obj->complete_cmd(bp, f_obj, + BNX2X_F_CMD_AFEX_UPDATE); + + /* We will perform the Queues update from + * sp_rtnl task as all Queue SP operations + * should run under rtnl_lock. + */ + bnx2x_schedule_sp_rtnl(bp, cmd, 0); + } + + goto next_spqe; + + case EVENT_RING_OPCODE_AFEX_VIF_LISTS: + f_obj->complete_cmd(bp, f_obj, + BNX2X_F_CMD_AFEX_VIFLISTS); + bnx2x_after_afex_vif_lists(bp, elem); + goto next_spqe; + case EVENT_RING_OPCODE_FUNCTION_START: + DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, + "got FUNC_START ramrod\n"); + if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) + break; + + goto next_spqe; + + case EVENT_RING_OPCODE_FUNCTION_STOP: + DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, + "got FUNC_STOP ramrod\n"); + if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) + break; + + goto next_spqe; + + case EVENT_RING_OPCODE_SET_TIMESYNC: + DP(BNX2X_MSG_SP | BNX2X_MSG_PTP, + "got set_timesync ramrod completion\n"); + if (f_obj->complete_cmd(bp, f_obj, + BNX2X_F_CMD_SET_TIMESYNC)) + break; + goto next_spqe; + } + + switch (opcode | bp->state) { + case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | + BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | + BNX2X_STATE_OPENING_WAIT4_PORT): + cid = elem->message.data.eth_event.echo & + BNX2X_SWCID_MASK; + DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", + cid); + rss_raw->clear_pending(rss_raw); + break; + + case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_SET_MAC | + BNX2X_STATE_CLOSING_WAIT4_HALT): + case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | + BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | + BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | + BNX2X_STATE_CLOSING_WAIT4_HALT): + DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n"); + bnx2x_handle_classification_eqe(bp, elem); + break; + + case (EVENT_RING_OPCODE_MULTICAST_RULES | + BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_MULTICAST_RULES | + BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_MULTICAST_RULES | + BNX2X_STATE_CLOSING_WAIT4_HALT): + DP(BNX2X_MSG_SP, "got mcast ramrod\n"); + bnx2x_handle_mcast_eqe(bp); + break; + + case (EVENT_RING_OPCODE_FILTERS_RULES | + BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_FILTERS_RULES | + BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_FILTERS_RULES | + BNX2X_STATE_CLOSING_WAIT4_HALT): + DP(BNX2X_MSG_SP, "got rx_mode ramrod\n"); + bnx2x_handle_rx_mode_eqe(bp); + break; + default: + /* unknown event log error and continue */ + BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", + elem->message.opcode, bp->state); + } +next_spqe: + spqe_cnt++; + } /* for */ + + smp_mb__before_atomic(); + atomic_add(spqe_cnt, &bp->eq_spq_left); + + bp->eq_cons = sw_cons; + bp->eq_prod = sw_prod; + /* Make sure that above mem writes were issued towards the memory */ + smp_wmb(); + + /* update producer */ + bnx2x_update_eq_prod(bp, bp->eq_prod); +} + +static void bnx2x_sp_task(struct work_struct *work) +{ + struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); + + DP(BNX2X_MSG_SP, "sp task invoked\n"); + + /* make sure the atomic interrupt_occurred has been written */ + smp_rmb(); + if (atomic_read(&bp->interrupt_occurred)) { + + /* what work needs to be performed? */ + u16 status = bnx2x_update_dsb_idx(bp); + + DP(BNX2X_MSG_SP, "status %x\n", status); + DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n"); + atomic_set(&bp->interrupt_occurred, 0); + + /* HW attentions */ + if (status & BNX2X_DEF_SB_ATT_IDX) { + bnx2x_attn_int(bp); + status &= ~BNX2X_DEF_SB_ATT_IDX; + } + + /* SP events: STAT_QUERY and others */ + if (status & BNX2X_DEF_SB_IDX) { + struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); + + if (FCOE_INIT(bp) && + (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { + /* Prevent local bottom-halves from running as + * we are going to change the local NAPI list. + */ + local_bh_disable(); + napi_schedule(&bnx2x_fcoe(bp, napi)); + local_bh_enable(); + } + + /* Handle EQ completions */ + bnx2x_eq_int(bp); + bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, + le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); + + status &= ~BNX2X_DEF_SB_IDX; + } + + /* if status is non zero then perhaps something went wrong */ + if (unlikely(status)) + DP(BNX2X_MSG_SP, + "got an unknown interrupt! (status 0x%x)\n", status); + + /* ack status block only if something was actually handled */ + bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, + le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); + } + + /* afex - poll to check if VIFSET_ACK should be sent to MFW */ + if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, + &bp->sp_state)) { + bnx2x_link_report(bp); + bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); + } +} + +irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct bnx2x *bp = netdev_priv(dev); + + bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, + IGU_INT_DISABLE, 0); + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return IRQ_HANDLED; +#endif + + if (CNIC_LOADED(bp)) { + struct cnic_ops *c_ops; + + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops) + c_ops->cnic_handler(bp->cnic_data, NULL); + rcu_read_unlock(); + } + + /* schedule sp task to perform default status block work, ack + * attentions and enable interrupts. + */ + bnx2x_schedule_sp_task(bp); + + return IRQ_HANDLED; +} + +/* end of slow path */ + +void bnx2x_drv_pulse(struct bnx2x *bp) +{ + SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, + bp->fw_drv_pulse_wr_seq); +} + +static void bnx2x_timer(unsigned long data) +{ + struct bnx2x *bp = (struct bnx2x *) data; + + if (!netif_running(bp->dev)) + return; + + if (IS_PF(bp) && + !BP_NOMCP(bp)) { + int mb_idx = BP_FW_MB_IDX(bp); + u16 drv_pulse; + u16 mcp_pulse; + + ++bp->fw_drv_pulse_wr_seq; + bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; + drv_pulse = bp->fw_drv_pulse_wr_seq; + bnx2x_drv_pulse(bp); + + mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & + MCP_PULSE_SEQ_MASK); + /* The delta between driver pulse and mcp response + * should not get too big. If the MFW is more than 5 pulses + * behind, we should worry about it enough to generate an error + * log. + */ + if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5) + BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n", + drv_pulse, mcp_pulse); + } + + if (bp->state == BNX2X_STATE_OPEN) + bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); + + /* sample pf vf bulletin board for new posts from pf */ + if (IS_VF(bp)) + bnx2x_timer_sriov(bp); + + mod_timer(&bp->timer, jiffies + bp->current_interval); +} + +/* end of Statistics */ + +/* nic init */ + +/* + * nic init service functions + */ + +static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) +{ + u32 i; + if (!(len%4) && !(addr%4)) + for (i = 0; i < len; i += 4) + REG_WR(bp, addr + i, fill); + else + for (i = 0; i < len; i++) + REG_WR8(bp, addr + i, fill); +} + +/* helper: writes FP SP data to FW - data_size in dwords */ +static void bnx2x_wr_fp_sb_data(struct bnx2x *bp, + int fw_sb_id, + u32 *sb_data_p, + u32 data_size) +{ + int index; + for (index = 0; index < data_size; index++) + REG_WR(bp, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + + sizeof(u32)*index, + *(sb_data_p + index)); +} + +static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) +{ + u32 *sb_data_p; + u32 data_size = 0; + struct hc_status_block_data_e2 sb_data_e2; + struct hc_status_block_data_e1x sb_data_e1x; + + /* disable the function first */ + if (!CHIP_IS_E1x(bp)) { + memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); + sb_data_e2.common.state = SB_DISABLED; + sb_data_e2.common.p_func.vf_valid = false; + sb_data_p = (u32 *)&sb_data_e2; + data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); + } else { + memset(&sb_data_e1x, 0, + sizeof(struct hc_status_block_data_e1x)); + sb_data_e1x.common.state = SB_DISABLED; + sb_data_e1x.common.p_func.vf_valid = false; + sb_data_p = (u32 *)&sb_data_e1x; + data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); + } + bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); + + bnx2x_fill(bp, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0, + CSTORM_STATUS_BLOCK_SIZE); + bnx2x_fill(bp, BAR_CSTRORM_INTMEM + + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0, + CSTORM_SYNC_BLOCK_SIZE); +} + +/* helper: writes SP SB data to FW */ +static void bnx2x_wr_sp_sb_data(struct bnx2x *bp, + struct hc_sp_status_block_data *sp_sb_data) +{ + int func = BP_FUNC(bp); + int i; + for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) + REG_WR(bp, BAR_CSTRORM_INTMEM + + CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + + i*sizeof(u32), + *((u32 *)sp_sb_data + i)); +} + +static void bnx2x_zero_sp_sb(struct bnx2x *bp) +{ + int func = BP_FUNC(bp); + struct hc_sp_status_block_data sp_sb_data; + memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); + + sp_sb_data.state = SB_DISABLED; + sp_sb_data.p_func.vf_valid = false; + + bnx2x_wr_sp_sb_data(bp, &sp_sb_data); + + bnx2x_fill(bp, BAR_CSTRORM_INTMEM + + CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0, + CSTORM_SP_STATUS_BLOCK_SIZE); + bnx2x_fill(bp, BAR_CSTRORM_INTMEM + + CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0, + CSTORM_SP_SYNC_BLOCK_SIZE); +} + +static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, + int igu_sb_id, int igu_seg_id) +{ + hc_sm->igu_sb_id = igu_sb_id; + hc_sm->igu_seg_id = igu_seg_id; + hc_sm->timer_value = 0xFF; + hc_sm->time_to_expire = 0xFFFFFFFF; +} + +/* allocates state machine ids. */ +static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) +{ + /* zero out state machine indices */ + /* rx indices */ + index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; + + /* tx indices */ + index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; + + /* map indices */ + /* rx indices */ + index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= + SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT; + + /* tx indices */ + index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= + SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= + SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= + SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; + index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= + SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; +} + +void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, + u8 vf_valid, int fw_sb_id, int igu_sb_id) +{ + int igu_seg_id; + + struct hc_status_block_data_e2 sb_data_e2; + struct hc_status_block_data_e1x sb_data_e1x; + struct hc_status_block_sm *hc_sm_p; + int data_size; + u32 *sb_data_p; + + if (CHIP_INT_MODE_IS_BC(bp)) + igu_seg_id = HC_SEG_ACCESS_NORM; + else + igu_seg_id = IGU_SEG_ACCESS_NORM; + + bnx2x_zero_fp_sb(bp, fw_sb_id); + + if (!CHIP_IS_E1x(bp)) { + memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); + sb_data_e2.common.state = SB_ENABLED; + sb_data_e2.common.p_func.pf_id = BP_FUNC(bp); + sb_data_e2.common.p_func.vf_id = vfid; + sb_data_e2.common.p_func.vf_valid = vf_valid; + sb_data_e2.common.p_func.vnic_id = BP_VN(bp); + sb_data_e2.common.same_igu_sb_1b = true; + sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping); + sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping); + hc_sm_p = sb_data_e2.common.state_machine; + sb_data_p = (u32 *)&sb_data_e2; + data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); + bnx2x_map_sb_state_machines(sb_data_e2.index_data); + } else { + memset(&sb_data_e1x, 0, + sizeof(struct hc_status_block_data_e1x)); + sb_data_e1x.common.state = SB_ENABLED; + sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); + sb_data_e1x.common.p_func.vf_id = 0xff; + sb_data_e1x.common.p_func.vf_valid = false; + sb_data_e1x.common.p_func.vnic_id = BP_VN(bp); + sb_data_e1x.common.same_igu_sb_1b = true; + sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping); + sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping); + hc_sm_p = sb_data_e1x.common.state_machine; + sb_data_p = (u32 *)&sb_data_e1x; + data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); + bnx2x_map_sb_state_machines(sb_data_e1x.index_data); + } + + bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], + igu_sb_id, igu_seg_id); + bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], + igu_sb_id, igu_seg_id); + + DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id); + + /* write indices to HW - PCI guarantees endianity of regpairs */ + bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); +} + +static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id, + u16 tx_usec, u16 rx_usec) +{ + bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS, + false, rx_usec); + bnx2x_update_coalesce_sb_index(bp, fw_sb_id, + HC_INDEX_ETH_TX_CQ_CONS_COS0, false, + tx_usec); + bnx2x_update_coalesce_sb_index(bp, fw_sb_id, + HC_INDEX_ETH_TX_CQ_CONS_COS1, false, + tx_usec); + bnx2x_update_coalesce_sb_index(bp, fw_sb_id, + HC_INDEX_ETH_TX_CQ_CONS_COS2, false, + tx_usec); +} + +static void bnx2x_init_def_sb(struct bnx2x *bp) +{ + struct host_sp_status_block *def_sb = bp->def_status_blk; + dma_addr_t mapping = bp->def_status_blk_mapping; + int igu_sp_sb_index; + int igu_seg_id; + int port = BP_PORT(bp); + int func = BP_FUNC(bp); + int reg_offset, reg_offset_en5; + u64 section; + int index; + struct hc_sp_status_block_data sp_sb_data; + memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); + + if (CHIP_INT_MODE_IS_BC(bp)) { + igu_sp_sb_index = DEF_SB_IGU_ID; + igu_seg_id = HC_SEG_ACCESS_DEF; + } else { + igu_sp_sb_index = bp->igu_dsb_id; + igu_seg_id = IGU_SEG_ACCESS_DEF; + } + + /* ATTN */ + section = ((u64)mapping) + offsetof(struct host_sp_status_block, + atten_status_block); + def_sb->atten_status_block.status_block_id = igu_sp_sb_index; + + bp->attn_state = 0; + + reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); + reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0); + for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { + int sindex; + /* take care of sig[0]..sig[4] */ + for (sindex = 0; sindex < 4; sindex++) + bp->attn_group[index].sig[sindex] = + REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); + + if (!CHIP_IS_E1x(bp)) + /* + * enable5 is separate from the rest of the registers, + * and therefore the address skip is 4 + * and not 16 between the different groups + */ + bp->attn_group[index].sig[4] = REG_RD(bp, + reg_offset_en5 + 0x4*index); + else + bp->attn_group[index].sig[4] = 0; + } + + if (bp->common.int_block == INT_BLOCK_HC) { + reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : + HC_REG_ATTN_MSG0_ADDR_L); + + REG_WR(bp, reg_offset, U64_LO(section)); + REG_WR(bp, reg_offset + 4, U64_HI(section)); + } else if (!CHIP_IS_E1x(bp)) { + REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); + REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); + } + + section = ((u64)mapping) + offsetof(struct host_sp_status_block, + sp_sb); + + bnx2x_zero_sp_sb(bp); + + /* PCI guarantees endianity of regpairs */ + sp_sb_data.state = SB_ENABLED; + sp_sb_data.host_sb_addr.lo = U64_LO(section); + sp_sb_data.host_sb_addr.hi = U64_HI(section); + sp_sb_data.igu_sb_id = igu_sp_sb_index; + sp_sb_data.igu_seg_id = igu_seg_id; + sp_sb_data.p_func.pf_id = func; + sp_sb_data.p_func.vnic_id = BP_VN(bp); + sp_sb_data.p_func.vf_id = 0xff; + + bnx2x_wr_sp_sb_data(bp, &sp_sb_data); + + bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); +} + +void bnx2x_update_coalesce(struct bnx2x *bp) +{ + int i; + + for_each_eth_queue(bp, i) + bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, + bp->tx_ticks, bp->rx_ticks); +} + +static void bnx2x_init_sp_ring(struct bnx2x *bp) +{ + spin_lock_init(&bp->spq_lock); + atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING); + + bp->spq_prod_idx = 0; + bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; + bp->spq_prod_bd = bp->spq; + bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; +} + +static void bnx2x_init_eq_ring(struct bnx2x *bp) +{ + int i; + for (i = 1; i <= NUM_EQ_PAGES; i++) { + union event_ring_elem *elem = + &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1]; + + elem->next_page.addr.hi = + cpu_to_le32(U64_HI(bp->eq_mapping + + BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); + elem->next_page.addr.lo = + cpu_to_le32(U64_LO(bp->eq_mapping + + BCM_PAGE_SIZE*(i % NUM_EQ_PAGES))); + } + bp->eq_cons = 0; + bp->eq_prod = NUM_EQ_DESC; + bp->eq_cons_sb = BNX2X_EQ_INDEX; + /* we want a warning message before it gets wrought... */ + atomic_set(&bp->eq_spq_left, + min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); +} + +/* called with netif_addr_lock_bh() */ +static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, + unsigned long rx_mode_flags, + unsigned long rx_accept_flags, + unsigned long tx_accept_flags, + unsigned long ramrod_flags) +{ + struct bnx2x_rx_mode_ramrod_params ramrod_param; + int rc; + + memset(&ramrod_param, 0, sizeof(ramrod_param)); + + /* Prepare ramrod parameters */ + ramrod_param.cid = 0; + ramrod_param.cl_id = cl_id; + ramrod_param.rx_mode_obj = &bp->rx_mode_obj; + ramrod_param.func_id = BP_FUNC(bp); + + ramrod_param.pstate = &bp->sp_state; + ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING; + + ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata); + ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata); + + set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); + + ramrod_param.ramrod_flags = ramrod_flags; + ramrod_param.rx_mode_flags = rx_mode_flags; + + ramrod_param.rx_accept_flags = rx_accept_flags; + ramrod_param.tx_accept_flags = tx_accept_flags; + + rc = bnx2x_config_rx_mode(bp, &ramrod_param); + if (rc < 0) { + BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); + return rc; + } + + return 0; +} + +static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, + unsigned long *rx_accept_flags, + unsigned long *tx_accept_flags) +{ + /* Clear the flags first */ + *rx_accept_flags = 0; + *tx_accept_flags = 0; + + switch (rx_mode) { + case BNX2X_RX_MODE_NONE: + /* + * 'drop all' supersedes any accept flags that may have been + * passed to the function. + */ + break; + case BNX2X_RX_MODE_NORMAL: + __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); + + /* internal switching mode */ + __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); + __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); + + break; + case BNX2X_RX_MODE_ALLMULTI: + __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); + + /* internal switching mode */ + __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); + + break; + case BNX2X_RX_MODE_PROMISC: + /* According to definition of SI mode, iface in promisc mode + * should receive matched and unmatched (in resolution of port) + * unicast packets. + */ + __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); + + /* internal switching mode */ + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); + + if (IS_MF_SI(bp)) + __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags); + else + __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); + + break; + default: + BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode); + return -EINVAL; + } + + /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ + if (rx_mode != BNX2X_RX_MODE_NONE) { + __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); + } + + return 0; +} + +/* called with netif_addr_lock_bh() */ +static int bnx2x_set_storm_rx_mode(struct bnx2x *bp) +{ + unsigned long rx_mode_flags = 0, ramrod_flags = 0; + unsigned long rx_accept_flags = 0, tx_accept_flags = 0; + int rc; + + if (!NO_FCOE(bp)) + /* Configure rx_mode of FCoE Queue */ + __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); + + rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags, + &tx_accept_flags); + if (rc) + return rc; + + __set_bit(RAMROD_RX, &ramrod_flags); + __set_bit(RAMROD_TX, &ramrod_flags); + + return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, + rx_accept_flags, tx_accept_flags, + ramrod_flags); +} + +static void bnx2x_init_internal_common(struct bnx2x *bp) +{ + int i; + + /* Zero this manually as its initialization is + currently missing in the initTool */ + for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_AGG_DATA_OFFSET + i * 4, 0); + if (!CHIP_IS_E1x(bp)) { + REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET, + CHIP_INT_MODE_IS_BC(bp) ? + HC_IGU_BC_MODE : HC_IGU_NBC_MODE); + } +} + +static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) +{ + switch (load_code) { + case FW_MSG_CODE_DRV_LOAD_COMMON: + case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: + bnx2x_init_internal_common(bp); + /* no break */ + + case FW_MSG_CODE_DRV_LOAD_PORT: + /* nothing to do */ + /* no break */ + + case FW_MSG_CODE_DRV_LOAD_FUNCTION: + /* internal memory per function is + initialized inside bnx2x_pf_init */ + break; + + default: + BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); + break; + } +} + +static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) +{ + return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp); +} + +static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) +{ + return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp); +} + +static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) +{ + if (CHIP_IS_E1x(fp->bp)) + return BP_L_ID(fp->bp) + fp->index; + else /* We want Client ID to be the same as IGU SB ID for 57712 */ + return bnx2x_fp_igu_sb_id(fp); +} + +static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) +{ + struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; + u8 cos; + unsigned long q_type = 0; + u32 cids[BNX2X_MULTI_TX_COS] = { 0 }; + fp->rx_queue = fp_idx; + fp->cid = fp_idx; + fp->cl_id = bnx2x_fp_cl_id(fp); + fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); + fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp); + /* qZone id equals to FW (per path) client id */ + fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); + + /* init shortcut */ + fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); + + /* Setup SB indices */ + fp->rx_cons_sb = BNX2X_RX_SB_INDEX; + + /* Configure Queue State object */ + __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); + __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); + + BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS); + + /* init tx data */ + for_each_cos_in_tx_queue(fp, cos) { + bnx2x_init_txdata(bp, fp->txdata_ptr[cos], + CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp), + FP_COS_TO_TXQ(fp, cos, bp), + BNX2X_TX_SB_INDEX_BASE + cos, fp); + cids[cos] = fp->txdata_ptr[cos]->cid; + } + + /* nothing more for vf to do here */ + if (IS_VF(bp)) + return; + + bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, + fp->fw_sb_id, fp->igu_sb_id); + bnx2x_update_fpsb_idx(fp); + bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids, + fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), + bnx2x_sp_mapping(bp, q_rdata), q_type); + + /** + * Configure classification DBs: Always enable Tx switching + */ + bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX); + + DP(NETIF_MSG_IFUP, + "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", + fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, + fp->igu_sb_id); +} + +static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) +{ + int i; + + for (i = 1; i <= NUM_TX_RINGS; i++) { + struct eth_tx_next_bd *tx_next_bd = + &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; + + tx_next_bd->addr_hi = + cpu_to_le32(U64_HI(txdata->tx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); + tx_next_bd->addr_lo = + cpu_to_le32(U64_LO(txdata->tx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); + } + + *txdata->tx_cons_sb = cpu_to_le16(0); + + SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); + txdata->tx_db.data.zero_fill1 = 0; + txdata->tx_db.data.prod = 0; + + txdata->tx_pkt_prod = 0; + txdata->tx_pkt_cons = 0; + txdata->tx_bd_prod = 0; + txdata->tx_bd_cons = 0; + txdata->tx_pkt = 0; +} + +static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp) +{ + int i; + + for_each_tx_queue_cnic(bp, i) + bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]); +} + +static void bnx2x_init_tx_rings(struct bnx2x *bp) +{ + int i; + u8 cos; + + for_each_eth_queue(bp, i) + for_each_cos_in_tx_queue(&bp->fp[i], cos) + bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]); +} + +static void bnx2x_init_fcoe_fp(struct bnx2x *bp) +{ + struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); + unsigned long q_type = 0; + + bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); + bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, + BNX2X_FCOE_ETH_CL_ID_IDX); + bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp); + bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; + bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; + bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; + bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]), + fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, + fp); + + DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); + + /* qZone id equals to FW (per path) client id */ + bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); + /* init shortcut */ + bnx2x_fcoe(bp, ustorm_rx_prods_offset) = + bnx2x_rx_ustorm_prods_offset(fp); + + /* Configure Queue State object */ + __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); + __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); + + /* No multi-CoS for FCoE L2 client */ + BUG_ON(fp->max_cos != 1); + + bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, + &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), + bnx2x_sp_mapping(bp, q_rdata), q_type); + + DP(NETIF_MSG_IFUP, + "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", + fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, + fp->igu_sb_id); +} + +void bnx2x_nic_init_cnic(struct bnx2x *bp) +{ + if (!NO_FCOE(bp)) + bnx2x_init_fcoe_fp(bp); + + bnx2x_init_sb(bp, bp->cnic_sb_mapping, + BNX2X_VF_ID_INVALID, false, + bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); + + /* ensure status block indices were read */ + rmb(); + bnx2x_init_rx_rings_cnic(bp); + bnx2x_init_tx_rings_cnic(bp); + + /* flush all */ + mb(); + mmiowb(); +} + +void bnx2x_pre_irq_nic_init(struct bnx2x *bp) +{ + int i; + + /* Setup NIC internals and enable interrupts */ + for_each_eth_queue(bp, i) + bnx2x_init_eth_fp(bp, i); + + /* ensure status block indices were read */ + rmb(); + bnx2x_init_rx_rings(bp); + bnx2x_init_tx_rings(bp); + + if (IS_PF(bp)) { + /* Initialize MOD_ABS interrupts */ + bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, + bp->common.shmem_base, + bp->common.shmem2_base, BP_PORT(bp)); + + /* initialize the default status block and sp ring */ + bnx2x_init_def_sb(bp); + bnx2x_update_dsb_idx(bp); + bnx2x_init_sp_ring(bp); + } else { + bnx2x_memset_stats(bp); + } +} + +void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code) +{ + bnx2x_init_eq_ring(bp); + bnx2x_init_internal(bp, load_code); + bnx2x_pf_init(bp); + bnx2x_stats_init(bp); + + /* flush all before enabling interrupts */ + mb(); + mmiowb(); + + bnx2x_int_enable(bp); + + /* Check for SPIO5 */ + bnx2x_attn_int_deasserted0(bp, + REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) & + AEU_INPUTS_ATTN_BITS_SPIO5); +} + +/* gzip service functions */ +static int bnx2x_gunzip_init(struct bnx2x *bp) +{ + bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, + &bp->gunzip_mapping, GFP_KERNEL); + if (bp->gunzip_buf == NULL) + goto gunzip_nomem1; + + bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL); + if (bp->strm == NULL) + goto gunzip_nomem2; + + bp->strm->workspace = vmalloc(zlib_inflate_workspacesize()); + if (bp->strm->workspace == NULL) + goto gunzip_nomem3; + + return 0; + +gunzip_nomem3: + kfree(bp->strm); + bp->strm = NULL; + +gunzip_nomem2: + dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, + bp->gunzip_mapping); + bp->gunzip_buf = NULL; + +gunzip_nomem1: + BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n"); + return -ENOMEM; +} + +static void bnx2x_gunzip_end(struct bnx2x *bp) +{ + if (bp->strm) { + vfree(bp->strm->workspace); + kfree(bp->strm); + bp->strm = NULL; + } + + if (bp->gunzip_buf) { + dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, + bp->gunzip_mapping); + bp->gunzip_buf = NULL; + } +} + +static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len) +{ + int n, rc; + + /* check gzip header */ + if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) { + BNX2X_ERR("Bad gzip header\n"); + return -EINVAL; + } + + n = 10; + +#define FNAME 0x8 + + if (zbuf[3] & FNAME) + while ((zbuf[n++] != 0) && (n < len)); + + bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; + bp->strm->avail_in = len - n; + bp->strm->next_out = bp->gunzip_buf; + bp->strm->avail_out = FW_BUF_SIZE; + + rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); + if (rc != Z_OK) + return rc; + + rc = zlib_inflate(bp->strm, Z_FINISH); + if ((rc != Z_OK) && (rc != Z_STREAM_END)) + netdev_err(bp->dev, "Firmware decompression error: %s\n", + bp->strm->msg); + + bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); + if (bp->gunzip_outlen & 0x3) + netdev_err(bp->dev, + "Firmware decompression error: gunzip_outlen (%d) not aligned\n", + bp->gunzip_outlen); + bp->gunzip_outlen >>= 2; + + zlib_inflateEnd(bp->strm); + + if (rc == Z_STREAM_END) + return 0; + + return rc; +} + +/* nic load/unload */ + +/* + * General service functions + */ + +/* send a NIG loopback debug packet */ +static void bnx2x_lb_pckt(struct bnx2x *bp) +{ + u32 wb_write[3]; + + /* Ethernet source and destination addresses */ + wb_write[0] = 0x55555555; + wb_write[1] = 0x55555555; + wb_write[2] = 0x20; /* SOP */ + REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); + + /* NON-IP protocol */ + wb_write[0] = 0x09000000; + wb_write[1] = 0x55555555; + wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ + REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); +} + +/* some of the internal memories + * are not directly readable from the driver + * to test them we send debug packets + */ +static int bnx2x_int_mem_test(struct bnx2x *bp) +{ + int factor; + int count, i; + u32 val = 0; + + if (CHIP_REV_IS_FPGA(bp)) + factor = 120; + else if (CHIP_REV_IS_EMUL(bp)) + factor = 200; + else + factor = 1; + + /* Disable inputs of parser neighbor blocks */ + REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); + REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); + REG_WR(bp, CFC_REG_DEBUG0, 0x1); + REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); + + /* Write 0 to parser credits for CFC search request */ + REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); + + /* send Ethernet packet */ + bnx2x_lb_pckt(bp); + + /* TODO do i reset NIG statistic? */ + /* Wait until NIG register shows 1 packet of size 0x10 */ + count = 1000 * factor; + while (count) { + + bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); + val = *bnx2x_sp(bp, wb_data[0]); + if (val == 0x10) + break; + + usleep_range(10000, 20000); + count--; + } + if (val != 0x10) { + BNX2X_ERR("NIG timeout val = 0x%x\n", val); + return -1; + } + + /* Wait until PRS register shows 1 packet */ + count = 1000 * factor; + while (count) { + val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); + if (val == 1) + break; + + usleep_range(10000, 20000); + count--; + } + if (val != 0x1) { + BNX2X_ERR("PRS timeout val = 0x%x\n", val); + return -2; + } + + /* Reset and init BRB, PRS */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); + msleep(50); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); + msleep(50); + bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); + + DP(NETIF_MSG_HW, "part2\n"); + + /* Disable inputs of parser neighbor blocks */ + REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); + REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); + REG_WR(bp, CFC_REG_DEBUG0, 0x1); + REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); + + /* Write 0 to parser credits for CFC search request */ + REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); + + /* send 10 Ethernet packets */ + for (i = 0; i < 10; i++) + bnx2x_lb_pckt(bp); + + /* Wait until NIG register shows 10 + 1 + packets of size 11*0x10 = 0xb0 */ + count = 1000 * factor; + while (count) { + + bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); + val = *bnx2x_sp(bp, wb_data[0]); + if (val == 0xb0) + break; + + usleep_range(10000, 20000); + count--; + } + if (val != 0xb0) { + BNX2X_ERR("NIG timeout val = 0x%x\n", val); + return -3; + } + + /* Wait until PRS register shows 2 packets */ + val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); + if (val != 2) + BNX2X_ERR("PRS timeout val = 0x%x\n", val); + + /* Write 1 to parser credits for CFC search request */ + REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); + + /* Wait until PRS register shows 3 packets */ + msleep(10 * factor); + /* Wait until NIG register shows 1 packet of size 0x10 */ + val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); + if (val != 3) + BNX2X_ERR("PRS timeout val = 0x%x\n", val); + + /* clear NIG EOP FIFO */ + for (i = 0; i < 11; i++) + REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO); + val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY); + if (val != 1) { + BNX2X_ERR("clear of NIG failed\n"); + return -4; + } + + /* Reset and init BRB, PRS, NIG */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); + msleep(50); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); + msleep(50); + bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); + if (!CNIC_SUPPORT(bp)) + /* set NIC mode */ + REG_WR(bp, PRS_REG_NIC_MODE, 1); + + /* Enable inputs of parser neighbor blocks */ + REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); + REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); + REG_WR(bp, CFC_REG_DEBUG0, 0x0); + REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1); + + DP(NETIF_MSG_HW, "done\n"); + + return 0; /* OK */ +} + +static void bnx2x_enable_blocks_attention(struct bnx2x *bp) +{ + u32 val; + + REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); + else + REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); + REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); + REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); + /* + * mask read length error interrupts in brb for parser + * (parsing unit and 'checksum and crc' unit) + * these errors are legal (PU reads fixed length and CAC can cause + * read length error on truncated packets) + */ + REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00); + REG_WR(bp, QM_REG_QM_INT_MASK, 0); + REG_WR(bp, TM_REG_TM_INT_MASK, 0); + REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); + REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); + REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); +/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ +/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ + REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); + REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); + REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); +/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ +/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ + REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); + REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); + REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); + REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); +/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ +/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ + + val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | + PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | + PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN; + if (!CHIP_IS_E1x(bp)) + val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | + PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED; + REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val); + + REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); + REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); + REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); +/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ + + if (!CHIP_IS_E1x(bp)) + /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ + REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); + + REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); + REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); +/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ + REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ +} + +static void bnx2x_reset_common(struct bnx2x *bp) +{ + u32 val = 0x1400; + + /* reset_common */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, + 0xd3ffff7f); + + if (CHIP_IS_E3(bp)) { + val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; + val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; + } + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val); +} + +static void bnx2x_setup_dmae(struct bnx2x *bp) +{ + bp->dmae_ready = 0; + spin_lock_init(&bp->dmae_lock); +} + +static void bnx2x_init_pxp(struct bnx2x *bp) +{ + u16 devctl; + int r_order, w_order; + + pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl); + DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); + w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); + if (bp->mrrs == -1) + r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12); + else { + DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs); + r_order = bp->mrrs; + } + + bnx2x_init_pxp_arb(bp, r_order, w_order); +} + +static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) +{ + int is_required; + u32 val; + int port; + + if (BP_NOMCP(bp)) + return; + + is_required = 0; + val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & + SHARED_HW_CFG_FAN_FAILURE_MASK; + + if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) + is_required = 1; + + /* + * The fan failure mechanism is usually related to the PHY type since + * the power consumption of the board is affected by the PHY. Currently, + * fan is required for most designs with SFX7101, BCM8727 and BCM8481. + */ + else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) + for (port = PORT_0; port < PORT_MAX; port++) { + is_required |= + bnx2x_fan_failure_det_req( + bp, + bp->common.shmem_base, + bp->common.shmem2_base, + port); + } + + DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); + + if (is_required == 0) + return; + + /* Fan failure is indicated by SPIO 5 */ + bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); + + /* set to active low mode */ + val = REG_RD(bp, MISC_REG_SPIO_INT); + val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); + REG_WR(bp, MISC_REG_SPIO_INT, val); + + /* enable interrupt to signal the IGU */ + val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); + val |= MISC_SPIO_SPIO5; + REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); +} + +void bnx2x_pf_disable(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); + val &= ~IGU_PF_CONF_FUNC_EN; + + REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); + REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); +} + +static void bnx2x__common_init_phy(struct bnx2x *bp) +{ + u32 shmem_base[2], shmem2_base[2]; + /* Avoid common init in case MFW supports LFA */ + if (SHMEM2_RD(bp, size) > + (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)])) + return; + shmem_base[0] = bp->common.shmem_base; + shmem2_base[0] = bp->common.shmem2_base; + if (!CHIP_IS_E1x(bp)) { + shmem_base[1] = + SHMEM2_RD(bp, other_shmem_base_addr); + shmem2_base[1] = + SHMEM2_RD(bp, other_shmem2_base_addr); + } + bnx2x_acquire_phy_lock(bp); + bnx2x_common_init_phy(bp, shmem_base, shmem2_base, + bp->common.chip_id); + bnx2x_release_phy_lock(bp); +} + +static void bnx2x_config_endianity(struct bnx2x *bp, u32 val) +{ + REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val); + REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val); + REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val); + REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val); + REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val); + + /* make sure this value is 0 */ + REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); + + REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val); + REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val); + REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val); + REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val); +} + +static void bnx2x_set_endianity(struct bnx2x *bp) +{ +#ifdef __BIG_ENDIAN + bnx2x_config_endianity(bp, 1); +#else + bnx2x_config_endianity(bp, 0); +#endif +} + +static void bnx2x_reset_endianity(struct bnx2x *bp) +{ + bnx2x_config_endianity(bp, 0); +} + +/** + * bnx2x_init_hw_common - initialize the HW at the COMMON phase. + * + * @bp: driver handle + */ +static int bnx2x_init_hw_common(struct bnx2x *bp) +{ + u32 val; + + DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp)); + + /* + * take the RESET lock to protect undi_unload flow from accessing + * registers while we're resetting the chip + */ + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); + + bnx2x_reset_common(bp); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); + + val = 0xfffc; + if (CHIP_IS_E3(bp)) { + val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; + val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; + } + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); + + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); + + bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); + + if (!CHIP_IS_E1x(bp)) { + u8 abs_func_id; + + /** + * 4-port mode or 2-port mode we need to turn of master-enable + * for everyone, after that, turn it back on for self. + * so, we disregard multi-function or not, and always disable + * for all functions on the given path, this means 0,2,4,6 for + * path 0 and 1,3,5,7 for path 1 + */ + for (abs_func_id = BP_PATH(bp); + abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) { + if (abs_func_id == BP_ABS_FUNC(bp)) { + REG_WR(bp, + PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, + 1); + continue; + } + + bnx2x_pretend_func(bp, abs_func_id); + /* clear pf enable */ + bnx2x_pf_disable(bp); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + } + } + + bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON); + if (CHIP_IS_E1(bp)) { + /* enable HW interrupt from PXP on USDM overflow + bit 16 on INT_MASK_0 */ + REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); + } + + bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); + bnx2x_init_pxp(bp); + bnx2x_set_endianity(bp); + bnx2x_ilt_init_page_size(bp, INITOP_SET); + + if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) + REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); + + /* let the HW do it's magic ... */ + msleep(100); + /* finish PXP init */ + val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); + if (val != 1) { + BNX2X_ERR("PXP2 CFG failed\n"); + return -EBUSY; + } + val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); + if (val != 1) { + BNX2X_ERR("PXP2 RD_INIT failed\n"); + return -EBUSY; + } + + /* Timers bug workaround E2 only. We need to set the entire ILT to + * have entries with value "0" and valid bit on. + * This needs to be done by the first PF that is loaded in a path + * (i.e. common phase) + */ + if (!CHIP_IS_E1x(bp)) { +/* In E2 there is a bug in the timers block that can cause function 6 / 7 + * (i.e. vnic3) to start even if it is marked as "scan-off". + * This occurs when a different function (func2,3) is being marked + * as "scan-off". Real-life scenario for example: if a driver is being + * load-unloaded while func6,7 are down. This will cause the timer to access + * the ilt, translate to a logical address and send a request to read/write. + * Since the ilt for the function that is down is not valid, this will cause + * a translation error which is unrecoverable. + * The Workaround is intended to make sure that when this happens nothing fatal + * will occur. The workaround: + * 1. First PF driver which loads on a path will: + * a. After taking the chip out of reset, by using pretend, + * it will write "0" to the following registers of + * the other vnics. + * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); + * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); + * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); + * And for itself it will write '1' to + * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable + * dmae-operations (writing to pram for example.) + * note: can be done for only function 6,7 but cleaner this + * way. + * b. Write zero+valid to the entire ILT. + * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of + * VNIC3 (of that port). The range allocated will be the + * entire ILT. This is needed to prevent ILT range error. + * 2. Any PF driver load flow: + * a. ILT update with the physical addresses of the allocated + * logical pages. + * b. Wait 20msec. - note that this timeout is needed to make + * sure there are no requests in one of the PXP internal + * queues with "old" ILT addresses. + * c. PF enable in the PGLC. + * d. Clear the was_error of the PF in the PGLC. (could have + * occurred while driver was down) + * e. PF enable in the CFC (WEAK + STRONG) + * f. Timers scan enable + * 3. PF driver unload flow: + * a. Clear the Timers scan_en. + * b. Polling for scan_on=0 for that PF. + * c. Clear the PF enable bit in the PXP. + * d. Clear the PF enable in the CFC (WEAK + STRONG) + * e. Write zero+valid to all ILT entries (The valid bit must + * stay set) + * f. If this is VNIC 3 of a port then also init + * first_timers_ilt_entry to zero and last_timers_ilt_entry + * to the last entry in the ILT. + * + * Notes: + * Currently the PF error in the PGLC is non recoverable. + * In the future the there will be a recovery routine for this error. + * Currently attention is masked. + * Having an MCP lock on the load/unload process does not guarantee that + * there is no Timer disable during Func6/7 enable. This is because the + * Timers scan is currently being cleared by the MCP on FLR. + * Step 2.d can be done only for PF6/7 and the driver can also check if + * there is error before clearing it. But the flow above is simpler and + * more general. + * All ILT entries are written by zero+valid and not just PF6/7 + * ILT entries since in the future the ILT entries allocation for + * PF-s might be dynamic. + */ + struct ilt_client_info ilt_cli; + struct bnx2x_ilt ilt; + memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); + memset(&ilt, 0, sizeof(struct bnx2x_ilt)); + + /* initialize dummy TM client */ + ilt_cli.start = 0; + ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; + ilt_cli.client_num = ILT_CLIENT_TM; + + /* Step 1: set zeroes to all ilt page entries with valid bit on + * Step 2: set the timers first/last ilt entry to point + * to the entire range to prevent ILT range error for 3rd/4th + * vnic (this code assumes existence of the vnic) + * + * both steps performed by call to bnx2x_ilt_client_init_op() + * with dummy TM client + * + * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT + * and his brother are split registers + */ + bnx2x_pretend_func(bp, (BP_PATH(bp) + 6)); + bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + + REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN); + REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN); + REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); + } + + REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); + REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); + + if (!CHIP_IS_E1x(bp)) { + int factor = CHIP_REV_IS_EMUL(bp) ? 1000 : + (CHIP_REV_IS_FPGA(bp) ? 400 : 0); + bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON); + + bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON); + + /* let the HW do it's magic ... */ + do { + msleep(200); + val = REG_RD(bp, ATC_REG_ATC_INIT_DONE); + } while (factor-- && (val != 1)); + + if (val != 1) { + BNX2X_ERR("ATC_INIT failed\n"); + return -EBUSY; + } + } + + bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON); + + bnx2x_iov_init_dmae(bp); + + /* clean the DMAE memory */ + bp->dmae_ready = 1; + bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1); + + bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON); + + bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON); + + bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON); + + bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON); + + bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); + bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); + bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); + bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); + + bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); + + /* QM queues pointers table */ + bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); + + /* soft reset pulse */ + REG_WR(bp, QM_REG_SOFT_RESET, 1); + REG_WR(bp, QM_REG_SOFT_RESET, 0); + + if (CNIC_SUPPORT(bp)) + bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); + + bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); + + if (!CHIP_REV_IS_SLOW(bp)) + /* enable hw interrupt from doorbell Q */ + REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); + + bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); + + bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); + REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); + + if (!CHIP_IS_E1(bp)) + REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); + + if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) { + if (IS_MF_AFEX(bp)) { + /* configure that VNTag and VLAN headers must be + * received in afex mode + */ + REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE); + REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA); + REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6); + REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926); + REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4); + } else { + /* Bit-map indicating which L2 hdrs may appear + * after the basic Ethernet header + */ + REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, + bp->path_has_ovlan ? 7 : 6); + } + } + + bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON); + + if (!CHIP_IS_E1x(bp)) { + /* reset VFC memories */ + REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, + VFC_MEMORIES_RST_REG_CAM_RST | + VFC_MEMORIES_RST_REG_RAM_RST); + REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, + VFC_MEMORIES_RST_REG_CAM_RST | + VFC_MEMORIES_RST_REG_RAM_RST); + + msleep(20); + } + + bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON); + + /* sync semi rtc */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, + 0x80000000); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, + 0x80000000); + + bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); + + if (!CHIP_IS_E1x(bp)) { + if (IS_MF_AFEX(bp)) { + /* configure that VNTag and VLAN headers must be + * sent in afex mode + */ + REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE); + REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA); + REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6); + REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926); + REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4); + } else { + REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, + bp->path_has_ovlan ? 7 : 6); + } + } + + REG_WR(bp, SRC_REG_SOFT_RST, 1); + + bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); + + if (CNIC_SUPPORT(bp)) { + REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); + REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); + REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); + REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); + REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); + REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); + REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); + REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); + REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); + REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); + } + REG_WR(bp, SRC_REG_SOFT_RST, 0); + + if (sizeof(union cdu_context) != 1024) + /* we currently assume that a context is 1024 bytes */ + dev_alert(&bp->pdev->dev, + "please adjust the size of cdu_context(%ld)\n", + (long)sizeof(union cdu_context)); + + bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON); + val = (4 << 24) + (0 << 12) + 1024; + REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); + + bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON); + REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); + /* enable context validation interrupt from CFC */ + REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); + + /* set the thresholds to prevent CFC/CDU race */ + REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); + + bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON); + + if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp)) + REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36); + + bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON); + + /* Reset PCIE errors for debug */ + REG_WR(bp, 0x2814, 0xffffffff); + REG_WR(bp, 0x3820, 0xffffffff); + + if (!CHIP_IS_E1x(bp)) { + REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, + (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | + PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); + REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, + (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | + PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | + PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); + REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, + (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | + PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | + PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); + } + + bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON); + if (!CHIP_IS_E1(bp)) { + /* in E3 this done in per-port section */ + if (!CHIP_IS_E3(bp)) + REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); + } + if (CHIP_IS_E1H(bp)) + /* not applicable for E2 (and above ...) */ + REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); + + if (CHIP_REV_IS_SLOW(bp)) + msleep(200); + + /* finish CFC init */ + val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10); + if (val != 1) { + BNX2X_ERR("CFC LL_INIT failed\n"); + return -EBUSY; + } + val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10); + if (val != 1) { + BNX2X_ERR("CFC AC_INIT failed\n"); + return -EBUSY; + } + val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10); + if (val != 1) { + BNX2X_ERR("CFC CAM_INIT failed\n"); + return -EBUSY; + } + REG_WR(bp, CFC_REG_DEBUG0, 0); + + if (CHIP_IS_E1(bp)) { + /* read NIG statistic + to see if this is our first up since powerup */ + bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); + val = *bnx2x_sp(bp, wb_data[0]); + + /* do internal memory self test */ + if ((val == 0) && bnx2x_int_mem_test(bp)) { + BNX2X_ERR("internal mem self test failed\n"); + return -EBUSY; + } + } + + bnx2x_setup_fan_failure_detection(bp); + + /* clear PXP2 attentions */ + REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); + + bnx2x_enable_blocks_attention(bp); + bnx2x_enable_blocks_parity(bp); + + if (!BP_NOMCP(bp)) { + if (CHIP_IS_E1x(bp)) + bnx2x__common_init_phy(bp); + } else + BNX2X_ERR("Bootcode is missing - can not initialize link\n"); + + return 0; +} + +/** + * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. + * + * @bp: driver handle + */ +static int bnx2x_init_hw_common_chip(struct bnx2x *bp) +{ + int rc = bnx2x_init_hw_common(bp); + + if (rc) + return rc; + + /* In E2 2-PORT mode, same ext phy is used for the two paths */ + if (!BP_NOMCP(bp)) + bnx2x__common_init_phy(bp); + + return 0; +} + +static int bnx2x_init_hw_port(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; + u32 low, high; + u32 val, reg; + + DP(NETIF_MSG_HW, "starting port init port %d\n", port); + + REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); + + bnx2x_init_block(bp, BLOCK_MISC, init_phase); + bnx2x_init_block(bp, BLOCK_PXP, init_phase); + bnx2x_init_block(bp, BLOCK_PXP2, init_phase); + + /* Timers bug workaround: disables the pf_master bit in pglue at + * common phase, we need to enable it here before any dmae access are + * attempted. Therefore we manually added the enable-master to the + * port phase (it also happens in the function phase) + */ + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); + + bnx2x_init_block(bp, BLOCK_ATC, init_phase); + bnx2x_init_block(bp, BLOCK_DMAE, init_phase); + bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); + bnx2x_init_block(bp, BLOCK_QM, init_phase); + + bnx2x_init_block(bp, BLOCK_TCM, init_phase); + bnx2x_init_block(bp, BLOCK_UCM, init_phase); + bnx2x_init_block(bp, BLOCK_CCM, init_phase); + bnx2x_init_block(bp, BLOCK_XCM, init_phase); + + /* QM cid (connection) count */ + bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); + + if (CNIC_SUPPORT(bp)) { + bnx2x_init_block(bp, BLOCK_TM, init_phase); + REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); + REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); + } + + bnx2x_init_block(bp, BLOCK_DORQ, init_phase); + + bnx2x_init_block(bp, BLOCK_BRB1, init_phase); + + if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { + + if (IS_MF(bp)) + low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); + else if (bp->dev->mtu > 4096) { + if (bp->flags & ONE_PORT_FLAG) + low = 160; + else { + val = bp->dev->mtu; + /* (24*1024 + val*4)/256 */ + low = 96 + (val/64) + + ((val % 64) ? 1 : 0); + } + } else + low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); + high = low + 56; /* 14*1024/256 */ + REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); + REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); + } + + if (CHIP_MODE_IS_4_PORT(bp)) + REG_WR(bp, (BP_PORT(bp) ? + BRB1_REG_MAC_GUARANTIED_1 : + BRB1_REG_MAC_GUARANTIED_0), 40); + + bnx2x_init_block(bp, BLOCK_PRS, init_phase); + if (CHIP_IS_E3B0(bp)) { + if (IS_MF_AFEX(bp)) { + /* configure headers for AFEX mode */ + REG_WR(bp, BP_PORT(bp) ? + PRS_REG_HDRS_AFTER_BASIC_PORT_1 : + PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); + REG_WR(bp, BP_PORT(bp) ? + PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : + PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); + REG_WR(bp, BP_PORT(bp) ? + PRS_REG_MUST_HAVE_HDRS_PORT_1 : + PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); + } else { + /* Ovlan exists only if we are in multi-function + + * switch-dependent mode, in switch-independent there + * is no ovlan headers + */ + REG_WR(bp, BP_PORT(bp) ? + PRS_REG_HDRS_AFTER_BASIC_PORT_1 : + PRS_REG_HDRS_AFTER_BASIC_PORT_0, + (bp->path_has_ovlan ? 7 : 6)); + } + } + + bnx2x_init_block(bp, BLOCK_TSDM, init_phase); + bnx2x_init_block(bp, BLOCK_CSDM, init_phase); + bnx2x_init_block(bp, BLOCK_USDM, init_phase); + bnx2x_init_block(bp, BLOCK_XSDM, init_phase); + + bnx2x_init_block(bp, BLOCK_TSEM, init_phase); + bnx2x_init_block(bp, BLOCK_USEM, init_phase); + bnx2x_init_block(bp, BLOCK_CSEM, init_phase); + bnx2x_init_block(bp, BLOCK_XSEM, init_phase); + + bnx2x_init_block(bp, BLOCK_UPB, init_phase); + bnx2x_init_block(bp, BLOCK_XPB, init_phase); + + bnx2x_init_block(bp, BLOCK_PBF, init_phase); + + if (CHIP_IS_E1x(bp)) { + /* configure PBF to work without PAUSE mtu 9000 */ + REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); + + /* update threshold */ + REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); + /* update init credit */ + REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); + + /* probe changes */ + REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); + udelay(50); + REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); + } + + if (CNIC_SUPPORT(bp)) + bnx2x_init_block(bp, BLOCK_SRC, init_phase); + + bnx2x_init_block(bp, BLOCK_CDU, init_phase); + bnx2x_init_block(bp, BLOCK_CFC, init_phase); + + if (CHIP_IS_E1(bp)) { + REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); + REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); + } + bnx2x_init_block(bp, BLOCK_HC, init_phase); + + bnx2x_init_block(bp, BLOCK_IGU, init_phase); + + bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); + /* init aeu_mask_attn_func_0/1: + * - SF mode: bits 3-7 are masked. Only bits 0-2 are in use + * - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF + * bits 4-7 are used for "per vn group attention" */ + val = IS_MF(bp) ? 0xF7 : 0x7; + /* Enable DCBX attention for all but E1 */ + val |= CHIP_IS_E1(bp) ? 0 : 0x10; + REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); + + /* SCPAD_PARITY should NOT trigger close the gates */ + reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0; + REG_WR(bp, reg, + REG_RD(bp, reg) & + ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY); + + reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0; + REG_WR(bp, reg, + REG_RD(bp, reg) & + ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY); + + bnx2x_init_block(bp, BLOCK_NIG, init_phase); + + if (!CHIP_IS_E1x(bp)) { + /* Bit-map indicating which L2 hdrs may appear after the + * basic Ethernet header + */ + if (IS_MF_AFEX(bp)) + REG_WR(bp, BP_PORT(bp) ? + NIG_REG_P1_HDRS_AFTER_BASIC : + NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); + else + REG_WR(bp, BP_PORT(bp) ? + NIG_REG_P1_HDRS_AFTER_BASIC : + NIG_REG_P0_HDRS_AFTER_BASIC, + IS_MF_SD(bp) ? 7 : 6); + + if (CHIP_IS_E3(bp)) + REG_WR(bp, BP_PORT(bp) ? + NIG_REG_LLH1_MF_MODE : + NIG_REG_LLH_MF_MODE, IS_MF(bp)); + } + if (!CHIP_IS_E3(bp)) + REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); + + if (!CHIP_IS_E1(bp)) { + /* 0x2 disable mf_ov, 0x1 enable */ + REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, + (IS_MF_SD(bp) ? 0x1 : 0x2)); + + if (!CHIP_IS_E1x(bp)) { + val = 0; + switch (bp->mf_mode) { + case MULTI_FUNCTION_SD: + val = 1; + break; + case MULTI_FUNCTION_SI: + case MULTI_FUNCTION_AFEX: + val = 2; + break; + } + + REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE : + NIG_REG_LLH0_CLS_TYPE), val); + } + { + REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); + REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); + REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); + } + } + + /* If SPIO5 is set to generate interrupts, enable it for this port */ + val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); + if (val & MISC_SPIO_SPIO5) { + u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : + MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); + val = REG_RD(bp, reg_addr); + val |= AEU_INPUTS_ATTN_BITS_SPIO5; + REG_WR(bp, reg_addr, val); + } + + return 0; +} + +static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) +{ + int reg; + u32 wb_write[2]; + + if (CHIP_IS_E1(bp)) + reg = PXP2_REG_RQ_ONCHIP_AT + index*8; + else + reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; + + wb_write[0] = ONCHIP_ADDR1(addr); + wb_write[1] = ONCHIP_ADDR2(addr); + REG_WR_DMAE(bp, reg, wb_write, 2); +} + +void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf) +{ + u32 data, ctl, cnt = 100; + u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; + u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; + u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; + u32 sb_bit = 1 << (idu_sb_id%32); + u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; + u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; + + /* Not supported in BC mode */ + if (CHIP_INT_MODE_IS_BC(bp)) + return; + + data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup + << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | + IGU_REGULAR_CLEANUP_SET | + IGU_REGULAR_BCLEANUP; + + ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | + func_encode << IGU_CTRL_REG_FID_SHIFT | + IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; + + DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", + data, igu_addr_data); + REG_WR(bp, igu_addr_data, data); + mmiowb(); + barrier(); + DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", + ctl, igu_addr_ctl); + REG_WR(bp, igu_addr_ctl, ctl); + mmiowb(); + barrier(); + + /* wait for clean up to finish */ + while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) + msleep(20); + + if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { + DP(NETIF_MSG_HW, + "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n", + idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); + } +} + +static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) +{ + bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); +} + +static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) +{ + u32 i, base = FUNC_ILT_BASE(func); + for (i = base; i < base + ILT_PER_FUNC; i++) + bnx2x_ilt_wr(bp, i, 0); +} + +static void bnx2x_init_searcher(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); + /* T1 hash bits value determines the T1 number of entries */ + REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); +} + +static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend) +{ + int rc; + struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_switch_update_params *switch_update_params = + &func_params.params.switch_update; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; + + /* Function parameters */ + __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG, + &switch_update_params->changes); + if (suspend) + __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND, + &switch_update_params->changes); + + rc = bnx2x_func_state_change(bp, &func_params); + + return rc; +} + +static int bnx2x_reset_nic_mode(struct bnx2x *bp) +{ + int rc, i, port = BP_PORT(bp); + int vlan_en = 0, mac_en[NUM_MACS]; + + /* Close input from network */ + if (bp->mf_mode == SINGLE_FUNCTION) { + bnx2x_set_rx_filter(&bp->link_params, 0); + } else { + vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN : + NIG_REG_LLH0_FUNC_EN); + REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN : + NIG_REG_LLH0_FUNC_EN, 0); + for (i = 0; i < NUM_MACS; i++) { + mac_en[i] = REG_RD(bp, port ? + (NIG_REG_LLH1_FUNC_MEM_ENABLE + + 4 * i) : + (NIG_REG_LLH0_FUNC_MEM_ENABLE + + 4 * i)); + REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE + + 4 * i) : + (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0); + } + } + + /* Close BMC to host */ + REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE : + NIG_REG_P1_TX_MNG_HOST_ENABLE, 0); + + /* Suspend Tx switching to the PF. Completion of this ramrod + * further guarantees that all the packets of that PF / child + * VFs in BRB were processed by the Parser, so it is safe to + * change the NIC_MODE register. + */ + rc = bnx2x_func_switch_update(bp, 1); + if (rc) { + BNX2X_ERR("Can't suspend tx-switching!\n"); + return rc; + } + + /* Change NIC_MODE register */ + REG_WR(bp, PRS_REG_NIC_MODE, 0); + + /* Open input from network */ + if (bp->mf_mode == SINGLE_FUNCTION) { + bnx2x_set_rx_filter(&bp->link_params, 1); + } else { + REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN : + NIG_REG_LLH0_FUNC_EN, vlan_en); + for (i = 0; i < NUM_MACS; i++) { + REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE + + 4 * i) : + (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), + mac_en[i]); + } + } + + /* Enable BMC to host */ + REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE : + NIG_REG_P1_TX_MNG_HOST_ENABLE, 1); + + /* Resume Tx switching to the PF */ + rc = bnx2x_func_switch_update(bp, 0); + if (rc) { + BNX2X_ERR("Can't resume tx-switching!\n"); + return rc; + } + + DP(NETIF_MSG_IFUP, "NIC MODE disabled\n"); + return 0; +} + +int bnx2x_init_hw_func_cnic(struct bnx2x *bp) +{ + int rc; + + bnx2x_ilt_init_op_cnic(bp, INITOP_SET); + + if (CONFIGURE_NIC_MODE(bp)) { + /* Configure searcher as part of function hw init */ + bnx2x_init_searcher(bp); + + /* Reset NIC mode */ + rc = bnx2x_reset_nic_mode(bp); + if (rc) + BNX2X_ERR("Can't change NIC mode!\n"); + return rc; + } + + return 0; +} + +/* previous driver DMAE transaction may have occurred when pre-boot stage ended + * and boot began, or when kdump kernel was loaded. Either case would invalidate + * the addresses of the transaction, resulting in was-error bit set in the pci + * causing all hw-to-host pcie transactions to timeout. If this happened we want + * to clear the interrupt which detected this from the pglueb and the was done + * bit + */ +static void bnx2x_clean_pglue_errors(struct bnx2x *bp) +{ + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, + 1 << BP_ABS_FUNC(bp)); +} + +static int bnx2x_init_hw_func(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int func = BP_FUNC(bp); + int init_phase = PHASE_PF0 + func; + struct bnx2x_ilt *ilt = BP_ILT(bp); + u16 cdu_ilt_start; + u32 addr, val; + u32 main_mem_base, main_mem_size, main_mem_prty_clr; + int i, main_mem_width, rc; + + DP(NETIF_MSG_HW, "starting func init func %d\n", func); + + /* FLR cleanup - hmmm */ + if (!CHIP_IS_E1x(bp)) { + rc = bnx2x_pf_flr_clnup(bp); + if (rc) { + bnx2x_fw_dump(bp); + return rc; + } + } + + /* set MSI reconfigure capability */ + if (bp->common.int_block == INT_BLOCK_HC) { + addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); + val = REG_RD(bp, addr); + val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; + REG_WR(bp, addr, val); + } + + bnx2x_init_block(bp, BLOCK_PXP, init_phase); + bnx2x_init_block(bp, BLOCK_PXP2, init_phase); + + ilt = BP_ILT(bp); + cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; + + if (IS_SRIOV(bp)) + cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS; + cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start); + + /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes + * those of the VFs, so start line should be reset + */ + cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; + for (i = 0; i < L2_ILT_LINES(bp); i++) { + ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt; + ilt->lines[cdu_ilt_start + i].page_mapping = + bp->context[i].cxt_mapping; + ilt->lines[cdu_ilt_start + i].size = bp->context[i].size; + } + + bnx2x_ilt_init_op(bp, INITOP_SET); + + if (!CONFIGURE_NIC_MODE(bp)) { + bnx2x_init_searcher(bp); + REG_WR(bp, PRS_REG_NIC_MODE, 0); + DP(NETIF_MSG_IFUP, "NIC MODE disabled\n"); + } else { + /* Set NIC mode */ + REG_WR(bp, PRS_REG_NIC_MODE, 1); + DP(NETIF_MSG_IFUP, "NIC MODE configured\n"); + } + + if (!CHIP_IS_E1x(bp)) { + u32 pf_conf = IGU_PF_CONF_FUNC_EN; + + /* Turn on a single ISR mode in IGU if driver is going to use + * INT#x or MSI + */ + if (!(bp->flags & USING_MSIX_FLAG)) + pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; + /* + * Timers workaround bug: function init part. + * Need to wait 20msec after initializing ILT, + * needed to make sure there are no requests in + * one of the PXP internal queues with "old" ILT addresses + */ + msleep(20); + /* + * Master enable - Due to WB DMAE writes performed before this + * register is re-initialized as part of the regular function + * init + */ + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); + /* Enable the function in IGU */ + REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf); + } + + bp->dmae_ready = 1; + + bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); + + bnx2x_clean_pglue_errors(bp); + + bnx2x_init_block(bp, BLOCK_ATC, init_phase); + bnx2x_init_block(bp, BLOCK_DMAE, init_phase); + bnx2x_init_block(bp, BLOCK_NIG, init_phase); + bnx2x_init_block(bp, BLOCK_SRC, init_phase); + bnx2x_init_block(bp, BLOCK_MISC, init_phase); + bnx2x_init_block(bp, BLOCK_TCM, init_phase); + bnx2x_init_block(bp, BLOCK_UCM, init_phase); + bnx2x_init_block(bp, BLOCK_CCM, init_phase); + bnx2x_init_block(bp, BLOCK_XCM, init_phase); + bnx2x_init_block(bp, BLOCK_TSEM, init_phase); + bnx2x_init_block(bp, BLOCK_USEM, init_phase); + bnx2x_init_block(bp, BLOCK_CSEM, init_phase); + bnx2x_init_block(bp, BLOCK_XSEM, init_phase); + + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, QM_REG_PF_EN, 1); + + if (!CHIP_IS_E1x(bp)) { + REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + } + bnx2x_init_block(bp, BLOCK_QM, init_phase); + + bnx2x_init_block(bp, BLOCK_TM, init_phase); + bnx2x_init_block(bp, BLOCK_DORQ, init_phase); + REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */ + + bnx2x_iov_init_dq(bp); + + bnx2x_init_block(bp, BLOCK_BRB1, init_phase); + bnx2x_init_block(bp, BLOCK_PRS, init_phase); + bnx2x_init_block(bp, BLOCK_TSDM, init_phase); + bnx2x_init_block(bp, BLOCK_CSDM, init_phase); + bnx2x_init_block(bp, BLOCK_USDM, init_phase); + bnx2x_init_block(bp, BLOCK_XSDM, init_phase); + bnx2x_init_block(bp, BLOCK_UPB, init_phase); + bnx2x_init_block(bp, BLOCK_XPB, init_phase); + bnx2x_init_block(bp, BLOCK_PBF, init_phase); + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PBF_REG_DISABLE_PF, 0); + + bnx2x_init_block(bp, BLOCK_CDU, init_phase); + + bnx2x_init_block(bp, BLOCK_CFC, init_phase); + + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); + + if (IS_MF(bp)) { + if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) { + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1); + REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8, + bp->mf_ov); + } + } + + bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); + + /* HC init per function */ + if (bp->common.int_block == INT_BLOCK_HC) { + if (CHIP_IS_E1H(bp)) { + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); + + REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); + REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); + } + bnx2x_init_block(bp, BLOCK_HC, init_phase); + + } else { + int num_segs, sb_idx, prod_offset; + + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); + + if (!CHIP_IS_E1x(bp)) { + REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); + REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); + } + + bnx2x_init_block(bp, BLOCK_IGU, init_phase); + + if (!CHIP_IS_E1x(bp)) { + int dsb_idx = 0; + /** + * Producer memory: + * E2 mode: address 0-135 match to the mapping memory; + * 136 - PF0 default prod; 137 - PF1 default prod; + * 138 - PF2 default prod; 139 - PF3 default prod; + * 140 - PF0 attn prod; 141 - PF1 attn prod; + * 142 - PF2 attn prod; 143 - PF3 attn prod; + * 144-147 reserved. + * + * E1.5 mode - In backward compatible mode; + * for non default SB; each even line in the memory + * holds the U producer and each odd line hold + * the C producer. The first 128 producers are for + * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 + * producers are for the DSB for each PF. + * Each PF has five segments: (the order inside each + * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; + * 132-135 C prods; 136-139 X prods; 140-143 T prods; + * 144-147 attn prods; + */ + /* non-default-status-blocks */ + num_segs = CHIP_INT_MODE_IS_BC(bp) ? + IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; + for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) { + prod_offset = (bp->igu_base_sb + sb_idx) * + num_segs; + + for (i = 0; i < num_segs; i++) { + addr = IGU_REG_PROD_CONS_MEMORY + + (prod_offset + i) * 4; + REG_WR(bp, addr, 0); + } + /* send consumer update with value 0 */ + bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx, + USTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_igu_clear_sb(bp, + bp->igu_base_sb + sb_idx); + } + + /* default-status-blocks */ + num_segs = CHIP_INT_MODE_IS_BC(bp) ? + IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; + + if (CHIP_MODE_IS_4_PORT(bp)) + dsb_idx = BP_FUNC(bp); + else + dsb_idx = BP_VN(bp); + + prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? + IGU_BC_BASE_DSB_PROD + dsb_idx : + IGU_NORM_BASE_DSB_PROD + dsb_idx); + + /* + * igu prods come in chunks of E1HVN_MAX (4) - + * does not matters what is the current chip mode + */ + for (i = 0; i < (num_segs * E1HVN_MAX); + i += E1HVN_MAX) { + addr = IGU_REG_PROD_CONS_MEMORY + + (prod_offset + i)*4; + REG_WR(bp, addr, 0); + } + /* send consumer update with 0 */ + if (CHIP_INT_MODE_IS_BC(bp)) { + bnx2x_ack_sb(bp, bp->igu_dsb_id, + USTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, bp->igu_dsb_id, + CSTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, bp->igu_dsb_id, + XSTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, bp->igu_dsb_id, + TSTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, bp->igu_dsb_id, + ATTENTION_ID, 0, IGU_INT_NOP, 1); + } else { + bnx2x_ack_sb(bp, bp->igu_dsb_id, + USTORM_ID, 0, IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, bp->igu_dsb_id, + ATTENTION_ID, 0, IGU_INT_NOP, 1); + } + bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); + + /* !!! These should become driver const once + rf-tool supports split-68 const */ + REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); + REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); + REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); + REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); + REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); + REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); + } + } + + /* Reset PCIE errors for debug */ + REG_WR(bp, 0x2114, 0xffffffff); + REG_WR(bp, 0x2120, 0xffffffff); + + if (CHIP_IS_E1x(bp)) { + main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ + main_mem_base = HC_REG_MAIN_MEMORY + + BP_PORT(bp) * (main_mem_size * 4); + main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; + main_mem_width = 8; + + val = REG_RD(bp, main_mem_prty_clr); + if (val) + DP(NETIF_MSG_HW, + "Hmmm... Parity errors in HC block during function init (0x%x)!\n", + val); + + /* Clear "false" parity errors in MSI-X table */ + for (i = main_mem_base; + i < main_mem_base + main_mem_size * 4; + i += main_mem_width) { + bnx2x_read_dmae(bp, i, main_mem_width / 4); + bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), + i, main_mem_width / 4); + } + /* Clear HC parity attention */ + REG_RD(bp, main_mem_prty_clr); + } + +#ifdef BNX2X_STOP_ON_ERROR + /* Enable STORMs SP logging */ + REG_WR8(bp, BAR_USTRORM_INTMEM + + USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); + REG_WR8(bp, BAR_TSTRORM_INTMEM + + TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); + REG_WR8(bp, BAR_CSTRORM_INTMEM + + CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); + REG_WR8(bp, BAR_XSTRORM_INTMEM + + XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); +#endif + + bnx2x_phy_probe(&bp->link_params); + + return 0; +} + +void bnx2x_free_mem_cnic(struct bnx2x *bp) +{ + bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE); + + if (!CHIP_IS_E1x(bp)) + BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, + sizeof(struct host_hc_status_block_e2)); + else + BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, + sizeof(struct host_hc_status_block_e1x)); + + BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); +} + +void bnx2x_free_mem(struct bnx2x *bp) +{ + int i; + + BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, + bp->fw_stats_data_sz + bp->fw_stats_req_sz); + + if (IS_VF(bp)) + return; + + BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, + sizeof(struct host_sp_status_block)); + + BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, + sizeof(struct bnx2x_slowpath)); + + for (i = 0; i < L2_ILT_LINES(bp); i++) + BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping, + bp->context[i].size); + bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); + + BNX2X_FREE(bp->ilt->lines); + + BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); + + BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, + BCM_PAGE_SIZE * NUM_EQ_PAGES); + + BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); + + bnx2x_iov_free_mem(bp); +} + +int bnx2x_alloc_mem_cnic(struct bnx2x *bp) +{ + if (!CHIP_IS_E1x(bp)) { + /* size = the status block + ramrod buffers */ + bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, + sizeof(struct host_hc_status_block_e2)); + if (!bp->cnic_sb.e2_sb) + goto alloc_mem_err; + } else { + bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, + sizeof(struct host_hc_status_block_e1x)); + if (!bp->cnic_sb.e1x_sb) + goto alloc_mem_err; + } + + if (CONFIGURE_NIC_MODE(bp) && !bp->t2) { + /* allocate searcher T2 table, as it wasn't allocated before */ + bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); + if (!bp->t2) + goto alloc_mem_err; + } + + /* write address to which L5 should insert its values */ + bp->cnic_eth_dev.addr_drv_info_to_mcp = + &bp->slowpath->drv_info_to_mcp; + + if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC)) + goto alloc_mem_err; + + return 0; + +alloc_mem_err: + bnx2x_free_mem_cnic(bp); + BNX2X_ERR("Can't allocate memory\n"); + return -ENOMEM; +} + +int bnx2x_alloc_mem(struct bnx2x *bp) +{ + int i, allocated, context_size; + + if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) { + /* allocate searcher T2 table */ + bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); + if (!bp->t2) + goto alloc_mem_err; + } + + bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping, + sizeof(struct host_sp_status_block)); + if (!bp->def_status_blk) + goto alloc_mem_err; + + bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping, + sizeof(struct bnx2x_slowpath)); + if (!bp->slowpath) + goto alloc_mem_err; + + /* Allocate memory for CDU context: + * This memory is allocated separately and not in the generic ILT + * functions because CDU differs in few aspects: + * 1. There are multiple entities allocating memory for context - + * 'regular' driver, CNIC and SRIOV driver. Each separately controls + * its own ILT lines. + * 2. Since CDU page-size is not a single 4KB page (which is the case + * for the other ILT clients), to be efficient we want to support + * allocation of sub-page-size in the last entry. + * 3. Context pointers are used by the driver to pass to FW / update + * the context (for the other ILT clients the pointers are used just to + * free the memory during unload). + */ + context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); + + for (i = 0, allocated = 0; allocated < context_size; i++) { + bp->context[i].size = min(CDU_ILT_PAGE_SZ, + (context_size - allocated)); + bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping, + bp->context[i].size); + if (!bp->context[i].vcxt) + goto alloc_mem_err; + allocated += bp->context[i].size; + } + bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line), + GFP_KERNEL); + if (!bp->ilt->lines) + goto alloc_mem_err; + + if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) + goto alloc_mem_err; + + if (bnx2x_iov_alloc_mem(bp)) + goto alloc_mem_err; + + /* Slow path ring */ + bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE); + if (!bp->spq) + goto alloc_mem_err; + + /* EQ */ + bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping, + BCM_PAGE_SIZE * NUM_EQ_PAGES); + if (!bp->eq_ring) + goto alloc_mem_err; + + return 0; + +alloc_mem_err: + bnx2x_free_mem(bp); + BNX2X_ERR("Can't allocate memory\n"); + return -ENOMEM; +} + +/* + * Init service functions + */ + +int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, + struct bnx2x_vlan_mac_obj *obj, bool set, + int mac_type, unsigned long *ramrod_flags) +{ + int rc; + struct bnx2x_vlan_mac_ramrod_params ramrod_param; + + memset(&ramrod_param, 0, sizeof(ramrod_param)); + + /* Fill general parameters */ + ramrod_param.vlan_mac_obj = obj; + ramrod_param.ramrod_flags = *ramrod_flags; + + /* Fill a user request section if needed */ + if (!test_bit(RAMROD_CONT, ramrod_flags)) { + memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); + + __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); + + /* Set the command: ADD or DEL */ + if (set) + ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; + else + ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; + } + + rc = bnx2x_config_vlan_mac(bp, &ramrod_param); + + if (rc == -EEXIST) { + DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); + /* do not treat adding same MAC as error */ + rc = 0; + } else if (rc < 0) + BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del")); + + return rc; +} + +int bnx2x_del_all_macs(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *mac_obj, + int mac_type, bool wait_for_comp) +{ + int rc; + unsigned long ramrod_flags = 0, vlan_mac_flags = 0; + + /* Wait for completion of requested */ + if (wait_for_comp) + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + + /* Set the mac type of addresses we want to clear */ + __set_bit(mac_type, &vlan_mac_flags); + + rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); + if (rc < 0) + BNX2X_ERR("Failed to delete MACs: %d\n", rc); + + return rc; +} + +int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) +{ + if (IS_PF(bp)) { + unsigned long ramrod_flags = 0; + + DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + return bnx2x_set_mac_one(bp, bp->dev->dev_addr, + &bp->sp_objs->mac_obj, set, + BNX2X_ETH_MAC, &ramrod_flags); + } else { /* vf */ + return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, + bp->fp->index, true); + } +} + +int bnx2x_setup_leading(struct bnx2x *bp) +{ + if (IS_PF(bp)) + return bnx2x_setup_queue(bp, &bp->fp[0], true); + else /* VF */ + return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true); +} + +/** + * bnx2x_set_int_mode - configure interrupt mode + * + * @bp: driver handle + * + * In case of MSI-X it will also try to enable MSI-X. + */ +int bnx2x_set_int_mode(struct bnx2x *bp) +{ + int rc = 0; + + if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) { + BNX2X_ERR("VF not loaded since interrupt mode not msix\n"); + return -EINVAL; + } + + switch (int_mode) { + case BNX2X_INT_MODE_MSIX: + /* attempt to enable msix */ + rc = bnx2x_enable_msix(bp); + + /* msix attained */ + if (!rc) + return 0; + + /* vfs use only msix */ + if (rc && IS_VF(bp)) + return rc; + + /* failed to enable multiple MSI-X */ + BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n", + bp->num_queues, + 1 + bp->num_cnic_queues); + + /* falling through... */ + case BNX2X_INT_MODE_MSI: + bnx2x_enable_msi(bp); + + /* falling through... */ + case BNX2X_INT_MODE_INTX: + bp->num_ethernet_queues = 1; + bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; + BNX2X_DEV_INFO("set number of queues to 1\n"); + break; + default: + BNX2X_DEV_INFO("unknown value in int_mode module parameter\n"); + return -EINVAL; + } + return 0; +} + +/* must be called prior to any HW initializations */ +static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp) +{ + if (IS_SRIOV(bp)) + return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS; + return L2_ILT_LINES(bp); +} + +void bnx2x_ilt_set_info(struct bnx2x *bp) +{ + struct ilt_client_info *ilt_client; + struct bnx2x_ilt *ilt = BP_ILT(bp); + u16 line = 0; + + ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp)); + DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line); + + /* CDU */ + ilt_client = &ilt->clients[ILT_CLIENT_CDU]; + ilt_client->client_num = ILT_CLIENT_CDU; + ilt_client->page_size = CDU_ILT_PAGE_SZ; + ilt_client->flags = ILT_CLIENT_SKIP_MEM; + ilt_client->start = line; + line += bnx2x_cid_ilt_lines(bp); + + if (CNIC_SUPPORT(bp)) + line += CNIC_ILT_LINES; + ilt_client->end = line - 1; + + DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", + ilt_client->start, + ilt_client->end, + ilt_client->page_size, + ilt_client->flags, + ilog2(ilt_client->page_size >> 12)); + + /* QM */ + if (QM_INIT(bp->qm_cid_count)) { + ilt_client = &ilt->clients[ILT_CLIENT_QM]; + ilt_client->client_num = ILT_CLIENT_QM; + ilt_client->page_size = QM_ILT_PAGE_SZ; + ilt_client->flags = 0; + ilt_client->start = line; + + /* 4 bytes for each cid */ + line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4, + QM_ILT_PAGE_SZ); + + ilt_client->end = line - 1; + + DP(NETIF_MSG_IFUP, + "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", + ilt_client->start, + ilt_client->end, + ilt_client->page_size, + ilt_client->flags, + ilog2(ilt_client->page_size >> 12)); + } + + if (CNIC_SUPPORT(bp)) { + /* SRC */ + ilt_client = &ilt->clients[ILT_CLIENT_SRC]; + ilt_client->client_num = ILT_CLIENT_SRC; + ilt_client->page_size = SRC_ILT_PAGE_SZ; + ilt_client->flags = 0; + ilt_client->start = line; + line += SRC_ILT_LINES; + ilt_client->end = line - 1; + + DP(NETIF_MSG_IFUP, + "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", + ilt_client->start, + ilt_client->end, + ilt_client->page_size, + ilt_client->flags, + ilog2(ilt_client->page_size >> 12)); + + /* TM */ + ilt_client = &ilt->clients[ILT_CLIENT_TM]; + ilt_client->client_num = ILT_CLIENT_TM; + ilt_client->page_size = TM_ILT_PAGE_SZ; + ilt_client->flags = 0; + ilt_client->start = line; + line += TM_ILT_LINES; + ilt_client->end = line - 1; + + DP(NETIF_MSG_IFUP, + "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", + ilt_client->start, + ilt_client->end, + ilt_client->page_size, + ilt_client->flags, + ilog2(ilt_client->page_size >> 12)); + } + + BUG_ON(line > ILT_MAX_LINES); +} + +/** + * bnx2x_pf_q_prep_init - prepare INIT transition parameters + * + * @bp: driver handle + * @fp: pointer to fastpath + * @init_params: pointer to parameters structure + * + * parameters configured: + * - HC configuration + * - Queue's CDU context + */ +static void bnx2x_pf_q_prep_init(struct bnx2x *bp, + struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) +{ + u8 cos; + int cxt_index, cxt_offset; + + /* FCoE Queue uses Default SB, thus has no HC capabilities */ + if (!IS_FCOE_FP(fp)) { + __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); + __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); + + /* If HC is supported, enable host coalescing in the transition + * to INIT state. + */ + __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); + __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags); + + /* HC rate */ + init_params->rx.hc_rate = bp->rx_ticks ? + (1000000 / bp->rx_ticks) : 0; + init_params->tx.hc_rate = bp->tx_ticks ? + (1000000 / bp->tx_ticks) : 0; + + /* FW SB ID */ + init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = + fp->fw_sb_id; + + /* + * CQ index among the SB indices: FCoE clients uses the default + * SB, therefore it's different. + */ + init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; + init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; + } + + /* set maximum number of COSs supported by this queue */ + init_params->max_cos = fp->max_cos; + + DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n", + fp->index, init_params->max_cos); + + /* set the context pointers queue object */ + for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { + cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS; + cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index * + ILT_PAGE_CIDS); + init_params->cxts[cos] = + &bp->context[cxt_index].vcxt[cxt_offset].eth; + } +} + +static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, + struct bnx2x_queue_state_params *q_params, + struct bnx2x_queue_setup_tx_only_params *tx_only_params, + int tx_index, bool leading) +{ + memset(tx_only_params, 0, sizeof(*tx_only_params)); + + /* Set the command */ + q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; + + /* Set tx-only QUEUE flags: don't zero statistics */ + tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false); + + /* choose the index of the cid to send the slow path on */ + tx_only_params->cid_index = tx_index; + + /* Set general TX_ONLY_SETUP parameters */ + bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index); + + /* Set Tx TX_ONLY_SETUP parameters */ + bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); + + DP(NETIF_MSG_IFUP, + "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n", + tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX], + q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id, + tx_only_params->gen_params.spcl_id, tx_only_params->flags); + + /* send the ramrod */ + return bnx2x_queue_state_change(bp, q_params); +} + +/** + * bnx2x_setup_queue - setup queue + * + * @bp: driver handle + * @fp: pointer to fastpath + * @leading: is leading + * + * This function performs 2 steps in a Queue state machine + * actually: 1) RESET->INIT 2) INIT->SETUP + */ + +int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, + bool leading) +{ + struct bnx2x_queue_state_params q_params = {NULL}; + struct bnx2x_queue_setup_params *setup_params = + &q_params.params.setup; + struct bnx2x_queue_setup_tx_only_params *tx_only_params = + &q_params.params.tx_only; + int rc; + u8 tx_index; + + DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index); + + /* reset IGU state skip FCoE L2 queue */ + if (!IS_FCOE_FP(fp)) + bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, + IGU_INT_ENABLE, 0); + + q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + /* We want to wait for completion in this context */ + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + + /* Prepare the INIT parameters */ + bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init); + + /* Set the command */ + q_params.cmd = BNX2X_Q_CMD_INIT; + + /* Change the state to INIT */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Queue(%d) INIT failed\n", fp->index); + return rc; + } + + DP(NETIF_MSG_IFUP, "init complete\n"); + + /* Now move the Queue to the SETUP state... */ + memset(setup_params, 0, sizeof(*setup_params)); + + /* Set QUEUE flags */ + setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); + + /* Set general SETUP parameters */ + bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params, + FIRST_TX_COS_INDEX); + + bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params, + &setup_params->rxq_params); + + bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params, + FIRST_TX_COS_INDEX); + + /* Set the command */ + q_params.cmd = BNX2X_Q_CMD_SETUP; + + if (IS_FCOE_FP(fp)) + bp->fcoe_init = true; + + /* Change the state to SETUP */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index); + return rc; + } + + /* loop through the relevant tx-only indices */ + for (tx_index = FIRST_TX_ONLY_COS_INDEX; + tx_index < fp->max_cos; + tx_index++) { + + /* prepare and send tx-only ramrod*/ + rc = bnx2x_setup_tx_only(bp, fp, &q_params, + tx_only_params, tx_index, leading); + if (rc) { + BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n", + fp->index, tx_index); + return rc; + } + } + + return rc; +} + +static int bnx2x_stop_queue(struct bnx2x *bp, int index) +{ + struct bnx2x_fastpath *fp = &bp->fp[index]; + struct bnx2x_fp_txdata *txdata; + struct bnx2x_queue_state_params q_params = {NULL}; + int rc, tx_index; + + DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); + + q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + /* We want to wait for completion in this context */ + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + + /* close tx-only connections */ + for (tx_index = FIRST_TX_ONLY_COS_INDEX; + tx_index < fp->max_cos; + tx_index++){ + + /* ascertain this is a normal queue*/ + txdata = fp->txdata_ptr[tx_index]; + + DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n", + txdata->txq_index); + + /* send halt terminate on tx-only connection */ + q_params.cmd = BNX2X_Q_CMD_TERMINATE; + memset(&q_params.params.terminate, 0, + sizeof(q_params.params.terminate)); + q_params.params.terminate.cid_index = tx_index; + + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) + return rc; + + /* send halt terminate on tx-only connection */ + q_params.cmd = BNX2X_Q_CMD_CFC_DEL; + memset(&q_params.params.cfc_del, 0, + sizeof(q_params.params.cfc_del)); + q_params.params.cfc_del.cid_index = tx_index; + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) + return rc; + } + /* Stop the primary connection: */ + /* ...halt the connection */ + q_params.cmd = BNX2X_Q_CMD_HALT; + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) + return rc; + + /* ...terminate the connection */ + q_params.cmd = BNX2X_Q_CMD_TERMINATE; + memset(&q_params.params.terminate, 0, + sizeof(q_params.params.terminate)); + q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) + return rc; + /* ...delete cfc entry */ + q_params.cmd = BNX2X_Q_CMD_CFC_DEL; + memset(&q_params.params.cfc_del, 0, + sizeof(q_params.params.cfc_del)); + q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; + return bnx2x_queue_state_change(bp, &q_params); +} + +static void bnx2x_reset_func(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int func = BP_FUNC(bp); + int i; + + /* Disable the function in the FW */ + REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); + REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); + REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); + REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); + + /* FP SBs */ + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + REG_WR8(bp, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), + SB_DISABLED); + } + + if (CNIC_LOADED(bp)) + /* CNIC SB */ + REG_WR8(bp, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET + (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED); + + /* SP SB */ + REG_WR8(bp, BAR_CSTRORM_INTMEM + + CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), + SB_DISABLED); + + for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) + REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), + 0); + + /* Configure IGU */ + if (bp->common.int_block == INT_BLOCK_HC) { + REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); + REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); + } else { + REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); + REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); + } + + if (CNIC_LOADED(bp)) { + /* Disable Timer scan */ + REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); + /* + * Wait for at least 10ms and up to 2 second for the timers + * scan to complete + */ + for (i = 0; i < 200; i++) { + usleep_range(10000, 20000); + if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) + break; + } + } + /* Clear ILT */ + bnx2x_clear_func_ilt(bp, func); + + /* Timers workaround bug for E2: if this is vnic-3, + * we need to set the entire ilt range for this timers. + */ + if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) { + struct ilt_client_info ilt_cli; + /* use dummy TM client */ + memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); + ilt_cli.start = 0; + ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; + ilt_cli.client_num = ILT_CLIENT_TM; + + bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR); + } + + /* this assumes that reset_port() called before reset_func()*/ + if (!CHIP_IS_E1x(bp)) + bnx2x_pf_disable(bp); + + bp->dmae_ready = 0; +} + +static void bnx2x_reset_port(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 val; + + /* Reset physical Link */ + bnx2x__link_reset(bp); + + REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); + + /* Do not rcv packets to BRB */ + REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); + /* Do not direct rcv packets that are not for MCP to the BRB */ + REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : + NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); + + /* Configure AEU */ + REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); + + msleep(100); + /* Check for BRB port occupancy */ + val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); + if (val) + DP(NETIF_MSG_IFDOWN, + "BRB1 is not empty %d blocks are occupied\n", val); + + /* TODO: Close Doorbell port? */ +} + +static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) +{ + struct bnx2x_func_state_params func_params = {NULL}; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_HW_RESET; + + func_params.params.hw_init.load_phase = load_code; + + return bnx2x_func_state_change(bp, &func_params); +} + +static int bnx2x_func_stop(struct bnx2x *bp) +{ + struct bnx2x_func_state_params func_params = {NULL}; + int rc; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_STOP; + + /* + * Try to stop the function the 'good way'. If fails (in case + * of a parity error during bnx2x_chip_cleanup()) and we are + * not in a debug mode, perform a state transaction in order to + * enable further HW_RESET transaction. + */ + rc = bnx2x_func_state_change(bp, &func_params); + if (rc) { +#ifdef BNX2X_STOP_ON_ERROR + return rc; +#else + BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n"); + __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); + return bnx2x_func_state_change(bp, &func_params); +#endif + } + + return 0; +} + +/** + * bnx2x_send_unload_req - request unload mode from the MCP. + * + * @bp: driver handle + * @unload_mode: requested function's unload mode + * + * Return unload mode returned by the MCP: COMMON, PORT or FUNC. + */ +u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) +{ + u32 reset_code = 0; + int port = BP_PORT(bp); + + /* Select the UNLOAD request mode */ + if (unload_mode == UNLOAD_NORMAL) + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + + else if (bp->flags & NO_WOL_FLAG) + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; + + else if (bp->wol) { + u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + u8 *mac_addr = bp->dev->dev_addr; + struct pci_dev *pdev = bp->pdev; + u32 val; + u16 pmc; + + /* The mac address is written to entries 1-4 to + * preserve entry 0 which is used by the PMF + */ + u8 entry = (BP_VN(bp) + 1)*8; + + val = (mac_addr[0] << 8) | mac_addr[1]; + EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); + + val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]; + EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); + + /* Enable the PME and clear the status */ + pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc); + pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; + pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc); + + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; + + } else + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + + /* Send the request to the MCP */ + if (!BP_NOMCP(bp)) + reset_code = bnx2x_fw_command(bp, reset_code, 0); + else { + int path = BP_PATH(bp); + + DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n", + path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], + bnx2x_load_count[path][2]); + bnx2x_load_count[path][0]--; + bnx2x_load_count[path][1 + port]--; + DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n", + path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], + bnx2x_load_count[path][2]); + if (bnx2x_load_count[path][0] == 0) + reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; + else if (bnx2x_load_count[path][1 + port] == 0) + reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; + else + reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; + } + + return reset_code; +} + +/** + * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. + * + * @bp: driver handle + * @keep_link: true iff link should be kept up + */ +void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link) +{ + u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; + + /* Report UNLOAD_DONE to MCP */ + if (!BP_NOMCP(bp)) + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param); +} + +static int bnx2x_func_wait_started(struct bnx2x *bp) +{ + int tout = 50; + int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; + + if (!bp->port.pmf) + return 0; + + /* + * (assumption: No Attention from MCP at this stage) + * PMF probably in the middle of TX disable/enable transaction + * 1. Sync IRS for default SB + * 2. Sync SP queue - this guarantees us that attention handling started + * 3. Wait, that TX disable/enable transaction completes + * + * 1+2 guarantee that if DCBx attention was scheduled it already changed + * pending bit of transaction from STARTED-->TX_STOPPED, if we already + * received completion for the transaction the state is TX_STOPPED. + * State will return to STARTED after completion of TX_STOPPED-->STARTED + * transaction. + */ + + /* make sure default SB ISR is done */ + if (msix) + synchronize_irq(bp->msix_table[0].vector); + else + synchronize_irq(bp->pdev->irq); + + flush_workqueue(bnx2x_wq); + flush_workqueue(bnx2x_iov_wq); + + while (bnx2x_func_get_state(bp, &bp->func_obj) != + BNX2X_F_STATE_STARTED && tout--) + msleep(20); + + if (bnx2x_func_get_state(bp, &bp->func_obj) != + BNX2X_F_STATE_STARTED) { +#ifdef BNX2X_STOP_ON_ERROR + BNX2X_ERR("Wrong function state\n"); + return -EBUSY; +#else + /* + * Failed to complete the transaction in a "good way" + * Force both transactions with CLR bit + */ + struct bnx2x_func_state_params func_params = {NULL}; + + DP(NETIF_MSG_IFDOWN, + "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n"); + + func_params.f_obj = &bp->func_obj; + __set_bit(RAMROD_DRV_CLR_ONLY, + &func_params.ramrod_flags); + + /* STARTED-->TX_ST0PPED */ + func_params.cmd = BNX2X_F_CMD_TX_STOP; + bnx2x_func_state_change(bp, &func_params); + + /* TX_ST0PPED-->STARTED */ + func_params.cmd = BNX2X_F_CMD_TX_START; + return bnx2x_func_state_change(bp, &func_params); +#endif + } + + return 0; +} + +static void bnx2x_disable_ptp(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + + /* Disable sending PTP packets to host */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : + NIG_REG_P0_LLH_PTP_TO_HOST, 0x0); + + /* Reset PTP event detection rules */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : + NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : + NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF); + + /* Disable the PTP feature */ + REG_WR(bp, port ? NIG_REG_P1_PTP_EN : + NIG_REG_P0_PTP_EN, 0x0); +} + +/* Called during unload, to stop PTP-related stuff */ +static void bnx2x_stop_ptp(struct bnx2x *bp) +{ + /* Cancel PTP work queue. Should be done after the Tx queues are + * drained to prevent additional scheduling. + */ + cancel_work_sync(&bp->ptp_task); + + if (bp->ptp_tx_skb) { + dev_kfree_skb_any(bp->ptp_tx_skb); + bp->ptp_tx_skb = NULL; + } + + /* Disable PTP in HW */ + bnx2x_disable_ptp(bp); + + DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n"); +} + +void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) +{ + int port = BP_PORT(bp); + int i, rc = 0; + u8 cos; + struct bnx2x_mcast_ramrod_params rparam = {NULL}; + u32 reset_code; + + /* Wait until tx fastpath tasks complete */ + for_each_tx_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + for_each_cos_in_tx_queue(fp, cos) + rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); +#ifdef BNX2X_STOP_ON_ERROR + if (rc) + return; +#endif + } + + /* Give HW time to discard old tx messages */ + usleep_range(1000, 2000); + + /* Clean all ETH MACs */ + rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC, + false); + if (rc < 0) + BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); + + /* Clean up UC list */ + rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC, + true); + if (rc < 0) + BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", + rc); + + /* Disable LLH */ + if (!CHIP_IS_E1(bp)) + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); + + /* Set "drop all" (stop Rx). + * We need to take a netif_addr_lock() here in order to prevent + * a race between the completion code and this code. + */ + netif_addr_lock_bh(bp->dev); + /* Schedule the rx_mode command */ + if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) + set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); + else + bnx2x_set_storm_rx_mode(bp); + + /* Cleanup multicast configuration */ + rparam.mcast_obj = &bp->mcast_obj; + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); + if (rc < 0) + BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc); + + netif_addr_unlock_bh(bp->dev); + + bnx2x_iov_chip_cleanup(bp); + + /* + * Send the UNLOAD_REQUEST to the MCP. This will return if + * this function should perform FUNC, PORT or COMMON HW + * reset. + */ + reset_code = bnx2x_send_unload_req(bp, unload_mode); + + /* + * (assumption: No Attention from MCP at this stage) + * PMF probably in the middle of TX disable/enable transaction + */ + rc = bnx2x_func_wait_started(bp); + if (rc) { + BNX2X_ERR("bnx2x_func_wait_started failed\n"); +#ifdef BNX2X_STOP_ON_ERROR + return; +#endif + } + + /* Close multi and leading connections + * Completions for ramrods are collected in a synchronous way + */ + for_each_eth_queue(bp, i) + if (bnx2x_stop_queue(bp, i)) +#ifdef BNX2X_STOP_ON_ERROR + return; +#else + goto unload_error; +#endif + + if (CNIC_LOADED(bp)) { + for_each_cnic_queue(bp, i) + if (bnx2x_stop_queue(bp, i)) +#ifdef BNX2X_STOP_ON_ERROR + return; +#else + goto unload_error; +#endif + } + + /* If SP settings didn't get completed so far - something + * very wrong has happen. + */ + if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) + BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n"); + +#ifndef BNX2X_STOP_ON_ERROR +unload_error: +#endif + rc = bnx2x_func_stop(bp); + if (rc) { + BNX2X_ERR("Function stop failed!\n"); +#ifdef BNX2X_STOP_ON_ERROR + return; +#endif + } + + /* stop_ptp should be after the Tx queues are drained to prevent + * scheduling to the cancelled PTP work queue. It should also be after + * function stop ramrod is sent, since as part of this ramrod FW access + * PTP registers. + */ + if (bp->flags & PTP_SUPPORTED) + bnx2x_stop_ptp(bp); + + /* Disable HW interrupts, NAPI */ + bnx2x_netif_stop(bp, 1); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); + if (CNIC_LOADED(bp)) + bnx2x_del_all_napi_cnic(bp); + + /* Release IRQs */ + bnx2x_free_irq(bp); + + /* Reset the chip */ + rc = bnx2x_reset_hw(bp, reset_code); + if (rc) + BNX2X_ERR("HW_RESET failed\n"); + + /* Report UNLOAD_DONE to MCP */ + bnx2x_send_unload_done(bp, keep_link); +} + +void bnx2x_disable_close_the_gate(struct bnx2x *bp) +{ + u32 val; + + DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n"); + + if (CHIP_IS_E1(bp)) { + int port = BP_PORT(bp); + u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0; + + val = REG_RD(bp, addr); + val &= ~(0x300); + REG_WR(bp, addr, val); + } else { + val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); + val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | + MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); + REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val); + } +} + +/* Close gates #2, #3 and #4: */ +static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) +{ + u32 val; + + /* Gates #2 and #4a are closed/opened for "not E1" only */ + if (!CHIP_IS_E1(bp)) { + /* #4 */ + REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close); + /* #2 */ + REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); + } + + /* #3 */ + if (CHIP_IS_E1x(bp)) { + /* Prevent interrupts from HC on both ports */ + val = REG_RD(bp, HC_REG_CONFIG_1); + REG_WR(bp, HC_REG_CONFIG_1, + (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : + (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); + + val = REG_RD(bp, HC_REG_CONFIG_0); + REG_WR(bp, HC_REG_CONFIG_0, + (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : + (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); + } else { + /* Prevent incoming interrupts in IGU */ + val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); + + REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, + (!close) ? + (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : + (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); + } + + DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n", + close ? "closing" : "opening"); + mmiowb(); +} + +#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */ + +static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val) +{ + /* Do some magic... */ + u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); + *magic_val = val & SHARED_MF_CLP_MAGIC; + MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); +} + +/** + * bnx2x_clp_reset_done - restore the value of the `magic' bit. + * + * @bp: driver handle + * @magic_val: old value of the `magic' bit. + */ +static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) +{ + /* Restore the `magic' bit value... */ + u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); + MF_CFG_WR(bp, shared_mf_config.clp_mb, + (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); +} + +/** + * bnx2x_reset_mcp_prep - prepare for MCP reset. + * + * @bp: driver handle + * @magic_val: old value of 'magic' bit. + * + * Takes care of CLP configurations. + */ +static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) +{ + u32 shmem; + u32 validity_offset; + + DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n"); + + /* Set `magic' bit in order to save MF config */ + if (!CHIP_IS_E1(bp)) + bnx2x_clp_reset_prep(bp, magic_val); + + /* Get shmem offset */ + shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); + validity_offset = + offsetof(struct shmem_region, validity_map[BP_PORT(bp)]); + + /* Clear validity map flags */ + if (shmem > 0) + REG_WR(bp, shmem + validity_offset, 0); +} + +#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ +#define MCP_ONE_TIMEOUT 100 /* 100 ms */ + +/** + * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT + * + * @bp: driver handle + */ +static void bnx2x_mcp_wait_one(struct bnx2x *bp) +{ + /* special handling for emulation and FPGA, + wait 10 times longer */ + if (CHIP_REV_IS_SLOW(bp)) + msleep(MCP_ONE_TIMEOUT*10); + else + msleep(MCP_ONE_TIMEOUT); +} + +/* + * initializes bp->common.shmem_base and waits for validity signature to appear + */ +static int bnx2x_init_shmem(struct bnx2x *bp) +{ + int cnt = 0; + u32 val = 0; + + do { + bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); + if (bp->common.shmem_base) { + val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); + if (val & SHR_MEM_VALIDITY_MB) + return 0; + } + + bnx2x_mcp_wait_one(bp); + + } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); + + BNX2X_ERR("BAD MCP validity signature\n"); + + return -ENODEV; +} + +static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) +{ + int rc = bnx2x_init_shmem(bp); + + /* Restore the `magic' bit value */ + if (!CHIP_IS_E1(bp)) + bnx2x_clp_reset_done(bp, magic_val); + + return rc; +} + +static void bnx2x_pxp_prep(struct bnx2x *bp) +{ + if (!CHIP_IS_E1(bp)) { + REG_WR(bp, PXP2_REG_RD_START_INIT, 0); + REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); + mmiowb(); + } +} + +/* + * Reset the whole chip except for: + * - PCIE core + * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by + * one reset bit) + * - IGU + * - MISC (including AEU) + * - GRC + * - RBCN, RBCP + */ +static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) +{ + u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; + u32 global_bits2, stay_reset2; + + /* + * Bits that have to be set in reset_mask2 if we want to reset 'global' + * (per chip) blocks. + */ + global_bits2 = + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; + + /* Don't reset the following blocks. + * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be + * reset, as in 4 port device they might still be owned + * by the MCP (there is only one leader per path). + */ + not_reset_mask1 = + MISC_REGISTERS_RESET_REG_1_RST_HC | + MISC_REGISTERS_RESET_REG_1_RST_PXPV | + MISC_REGISTERS_RESET_REG_1_RST_PXP; + + not_reset_mask2 = + MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | + MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | + MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | + MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | + MISC_REGISTERS_RESET_REG_2_RST_RBCN | + MISC_REGISTERS_RESET_REG_2_RST_GRC | + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | + MISC_REGISTERS_RESET_REG_2_RST_ATC | + MISC_REGISTERS_RESET_REG_2_PGLC | + MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | + MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | + MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | + MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | + MISC_REGISTERS_RESET_REG_2_UMAC0 | + MISC_REGISTERS_RESET_REG_2_UMAC1; + + /* + * Keep the following blocks in reset: + * - all xxMACs are handled by the bnx2x_link code. + */ + stay_reset2 = + MISC_REGISTERS_RESET_REG_2_XMAC | + MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; + + /* Full reset masks according to the chip */ + reset_mask1 = 0xffffffff; + + if (CHIP_IS_E1(bp)) + reset_mask2 = 0xffff; + else if (CHIP_IS_E1H(bp)) + reset_mask2 = 0x1ffff; + else if (CHIP_IS_E2(bp)) + reset_mask2 = 0xfffff; + else /* CHIP_IS_E3 */ + reset_mask2 = 0x3ffffff; + + /* Don't reset global blocks unless we need to */ + if (!global) + reset_mask2 &= ~global_bits2; + + /* + * In case of attention in the QM, we need to reset PXP + * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM + * because otherwise QM reset would release 'close the gates' shortly + * before resetting the PXP, then the PSWRQ would send a write + * request to PGLUE. Then when PXP is reset, PGLUE would try to + * read the payload data from PSWWR, but PSWWR would not + * respond. The write queue in PGLUE would stuck, dmae commands + * would not return. Therefore it's important to reset the second + * reset register (containing the + * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the + * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM + * bit). + */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + reset_mask2 & (~not_reset_mask2)); + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, + reset_mask1 & (~not_reset_mask1)); + + barrier(); + mmiowb(); + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + reset_mask2 & (~stay_reset2)); + + barrier(); + mmiowb(); + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); + mmiowb(); +} + +/** + * bnx2x_er_poll_igu_vq - poll for pending writes bit. + * It should get cleared in no more than 1s. + * + * @bp: driver handle + * + * It should get cleared in no more than 1s. Returns 0 if + * pending writes bit gets cleared. + */ +static int bnx2x_er_poll_igu_vq(struct bnx2x *bp) +{ + u32 cnt = 1000; + u32 pend_bits = 0; + + do { + pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS); + + if (pend_bits == 0) + break; + + usleep_range(1000, 2000); + } while (cnt-- > 0); + + if (cnt <= 0) { + BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n", + pend_bits); + return -EBUSY; + } + + return 0; +} + +static int bnx2x_process_kill(struct bnx2x *bp, bool global) +{ + int cnt = 1000; + u32 val = 0; + u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; + u32 tags_63_32 = 0; + + /* Empty the Tetris buffer, wait for 1s */ + do { + sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT); + blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT); + port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0); + port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1); + pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2); + if (CHIP_IS_E3(bp)) + tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32); + + if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && + ((port_is_idle_0 & 0x1) == 0x1) && + ((port_is_idle_1 & 0x1) == 0x1) && + (pgl_exp_rom2 == 0xffffffff) && + (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff))) + break; + usleep_range(1000, 2000); + } while (cnt-- > 0); + + if (cnt <= 0) { + BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n"); + BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", + sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, + pgl_exp_rom2); + return -EAGAIN; + } + + barrier(); + + /* Close gates #2, #3 and #4 */ + bnx2x_set_234_gates(bp, true); + + /* Poll for IGU VQs for 57712 and newer chips */ + if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) + return -EAGAIN; + + /* TBD: Indicate that "process kill" is in progress to MCP */ + + /* Clear "unprepared" bit */ + REG_WR(bp, MISC_REG_UNPREPARED, 0); + barrier(); + + /* Make sure all is written to the chip before the reset */ + mmiowb(); + + /* Wait for 1ms to empty GLUE and PCI-E core queues, + * PSWHST, GRC and PSWRD Tetris buffer. + */ + usleep_range(1000, 2000); + + /* Prepare to chip reset: */ + /* MCP */ + if (global) + bnx2x_reset_mcp_prep(bp, &val); + + /* PXP */ + bnx2x_pxp_prep(bp); + barrier(); + + /* reset the chip */ + bnx2x_process_kill_chip_reset(bp, global); + barrier(); + + /* clear errors in PGB */ + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); + + /* Recover after reset: */ + /* MCP */ + if (global && bnx2x_reset_mcp_comp(bp, val)) + return -EAGAIN; + + /* TBD: Add resetting the NO_MCP mode DB here */ + + /* Open the gates #2, #3 and #4 */ + bnx2x_set_234_gates(bp, false); + + /* TBD: IGU/AEU preparation bring back the AEU/IGU to a + * reset state, re-enable attentions. */ + + return 0; +} + +static int bnx2x_leader_reset(struct bnx2x *bp) +{ + int rc = 0; + bool global = bnx2x_reset_is_global(bp); + u32 load_code; + + /* if not going to reset MCP - load "fake" driver to reset HW while + * driver is owner of the HW + */ + if (!global && !BP_NOMCP(bp)) { + load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, + DRV_MSG_CODE_LOAD_REQ_WITH_LFA); + if (!load_code) { + BNX2X_ERR("MCP response failure, aborting\n"); + rc = -EAGAIN; + goto exit_leader_reset; + } + if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && + (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { + BNX2X_ERR("MCP unexpected resp, aborting\n"); + rc = -EAGAIN; + goto exit_leader_reset2; + } + load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + if (!load_code) { + BNX2X_ERR("MCP response failure, aborting\n"); + rc = -EAGAIN; + goto exit_leader_reset2; + } + } + + /* Try to recover after the failure */ + if (bnx2x_process_kill(bp, global)) { + BNX2X_ERR("Something bad had happen on engine %d! Aii!\n", + BP_PATH(bp)); + rc = -EAGAIN; + goto exit_leader_reset2; + } + + /* + * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver + * state. + */ + bnx2x_set_reset_done(bp); + if (global) + bnx2x_clear_reset_global(bp); + +exit_leader_reset2: + /* unload "fake driver" if it was loaded */ + if (!global && !BP_NOMCP(bp)) { + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); + } +exit_leader_reset: + bp->is_leader = 0; + bnx2x_release_leader_lock(bp); + smp_mb(); + return rc; +} + +static void bnx2x_recovery_failed(struct bnx2x *bp) +{ + netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); + + /* Disconnect this device */ + netif_device_detach(bp->dev); + + /* + * Block ifup for all function on this engine until "process kill" + * or power cycle. + */ + bnx2x_set_reset_in_progress(bp); + + /* Shut down the power */ + bnx2x_set_power_state(bp, PCI_D3hot); + + bp->recovery_state = BNX2X_RECOVERY_FAILED; + + smp_mb(); +} + +/* + * Assumption: runs under rtnl lock. This together with the fact + * that it's called only from bnx2x_sp_rtnl() ensure that it + * will never be called when netif_running(bp->dev) is false. + */ +static void bnx2x_parity_recover(struct bnx2x *bp) +{ + bool global = false; + u32 error_recovered, error_unrecovered; + bool is_parity; + + DP(NETIF_MSG_HW, "Handling parity\n"); + while (1) { + switch (bp->recovery_state) { + case BNX2X_RECOVERY_INIT: + DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); + is_parity = bnx2x_chk_parity_attn(bp, &global, false); + WARN_ON(!is_parity); + + /* Try to get a LEADER_LOCK HW lock */ + if (bnx2x_trylock_leader_lock(bp)) { + bnx2x_set_reset_in_progress(bp); + /* + * Check if there is a global attention and if + * there was a global attention, set the global + * reset bit. + */ + + if (global) + bnx2x_set_reset_global(bp); + + bp->is_leader = 1; + } + + /* Stop the driver */ + /* If interface has been removed - break */ + if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false)) + return; + + bp->recovery_state = BNX2X_RECOVERY_WAIT; + + /* Ensure "is_leader", MCP command sequence and + * "recovery_state" update values are seen on other + * CPUs. + */ + smp_mb(); + break; + + case BNX2X_RECOVERY_WAIT: + DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); + if (bp->is_leader) { + int other_engine = BP_PATH(bp) ? 0 : 1; + bool other_load_status = + bnx2x_get_load_status(bp, other_engine); + bool load_status = + bnx2x_get_load_status(bp, BP_PATH(bp)); + global = bnx2x_reset_is_global(bp); + + /* + * In case of a parity in a global block, let + * the first leader that performs a + * leader_reset() reset the global blocks in + * order to clear global attentions. Otherwise + * the gates will remain closed for that + * engine. + */ + if (load_status || + (global && other_load_status)) { + /* Wait until all other functions get + * down. + */ + schedule_delayed_work(&bp->sp_rtnl_task, + HZ/10); + return; + } else { + /* If all other functions got down - + * try to bring the chip back to + * normal. In any case it's an exit + * point for a leader. + */ + if (bnx2x_leader_reset(bp)) { + bnx2x_recovery_failed(bp); + return; + } + + /* If we are here, means that the + * leader has succeeded and doesn't + * want to be a leader any more. Try + * to continue as a none-leader. + */ + break; + } + } else { /* non-leader */ + if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) { + /* Try to get a LEADER_LOCK HW lock as + * long as a former leader may have + * been unloaded by the user or + * released a leadership by another + * reason. + */ + if (bnx2x_trylock_leader_lock(bp)) { + /* I'm a leader now! Restart a + * switch case. + */ + bp->is_leader = 1; + break; + } + + schedule_delayed_work(&bp->sp_rtnl_task, + HZ/10); + return; + + } else { + /* + * If there was a global attention, wait + * for it to be cleared. + */ + if (bnx2x_reset_is_global(bp)) { + schedule_delayed_work( + &bp->sp_rtnl_task, + HZ/10); + return; + } + + error_recovered = + bp->eth_stats.recoverable_error; + error_unrecovered = + bp->eth_stats.unrecoverable_error; + bp->recovery_state = + BNX2X_RECOVERY_NIC_LOADING; + if (bnx2x_nic_load(bp, LOAD_NORMAL)) { + error_unrecovered++; + netdev_err(bp->dev, + "Recovery failed. Power cycle needed\n"); + /* Disconnect this device */ + netif_device_detach(bp->dev); + /* Shut down the power */ + bnx2x_set_power_state( + bp, PCI_D3hot); + smp_mb(); + } else { + bp->recovery_state = + BNX2X_RECOVERY_DONE; + error_recovered++; + smp_mb(); + } + bp->eth_stats.recoverable_error = + error_recovered; + bp->eth_stats.unrecoverable_error = + error_unrecovered; + + return; + } + } + default: + return; + } + } +} + +static int bnx2x_close(struct net_device *dev); + +/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is + * scheduled on a general queue in order to prevent a dead lock. + */ +static void bnx2x_sp_rtnl_task(struct work_struct *work) +{ + struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); + + rtnl_lock(); + + if (!netif_running(bp->dev)) { + rtnl_unlock(); + return; + } + + if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { +#ifdef BNX2X_STOP_ON_ERROR + BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" + "you will need to reboot when done\n"); + goto sp_rtnl_not_reset; +#endif + /* + * Clear all pending SP commands as we are going to reset the + * function anyway. + */ + bp->sp_rtnl_state = 0; + smp_mb(); + + bnx2x_parity_recover(bp); + + rtnl_unlock(); + return; + } + + if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { +#ifdef BNX2X_STOP_ON_ERROR + BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" + "you will need to reboot when done\n"); + goto sp_rtnl_not_reset; +#endif + + /* + * Clear all pending SP commands as we are going to reset the + * function anyway. + */ + bp->sp_rtnl_state = 0; + smp_mb(); + + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); + bnx2x_nic_load(bp, LOAD_NORMAL); + + rtnl_unlock(); + return; + } +#ifdef BNX2X_STOP_ON_ERROR +sp_rtnl_not_reset: +#endif + if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) + bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); + if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state)) + bnx2x_after_function_update(bp); + /* + * in case of fan failure we need to reset id if the "stop on error" + * debug flag is set, since we trying to prevent permanent overheating + * damage + */ + if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) { + DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n"); + netif_device_detach(bp->dev); + bnx2x_close(bp->dev); + rtnl_unlock(); + return; + } + + if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) { + DP(BNX2X_MSG_SP, + "sending set mcast vf pf channel message from rtnl sp-task\n"); + bnx2x_vfpf_set_mcast(bp->dev); + } + if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, + &bp->sp_rtnl_state)){ + if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) { + bnx2x_tx_disable(bp); + BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n"); + } + } + + if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) { + DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n"); + bnx2x_set_rx_mode_inner(bp); + } + + if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, + &bp->sp_rtnl_state)) + bnx2x_pf_set_vfs_vlan(bp); + + if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) { + bnx2x_dcbx_stop_hw_tx(bp); + bnx2x_dcbx_resume_hw_tx(bp); + } + + if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION, + &bp->sp_rtnl_state)) + bnx2x_update_mng_version(bp); + + /* work which needs rtnl lock not-taken (as it takes the lock itself and + * can be called from other contexts as well) + */ + rtnl_unlock(); + + /* enable SR-IOV if applicable */ + if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, + &bp->sp_rtnl_state)) { + bnx2x_disable_sriov(bp); + bnx2x_enable_sriov(bp); + } +} + +static void bnx2x_period_task(struct work_struct *work) +{ + struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work); + + if (!netif_running(bp->dev)) + goto period_task_exit; + + if (CHIP_REV_IS_SLOW(bp)) { + BNX2X_ERR("period task called on emulation, ignoring\n"); + goto period_task_exit; + } + + bnx2x_acquire_phy_lock(bp); + /* + * The barrier is needed to ensure the ordering between the writing to + * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and + * the reading here. + */ + smp_mb(); + if (bp->port.pmf) { + bnx2x_period_func(&bp->link_params, &bp->link_vars); + + /* Re-queue task in 1 sec */ + queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ); + } + + bnx2x_release_phy_lock(bp); +period_task_exit: + return; +} + +/* + * Init service functions + */ + +static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) +{ + u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0; + u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; + return base + (BP_ABS_FUNC(bp)) * stride; +} + +static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp, + u8 port, u32 reset_reg, + struct bnx2x_mac_vals *vals) +{ + u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; + u32 base_addr; + + if (!(mask & reset_reg)) + return false; + + BNX2X_DEV_INFO("Disable umac Rx %02x\n", port); + base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG; + vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]); + REG_WR(bp, vals->umac_addr[port], 0); + + return true; +} + +static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, + struct bnx2x_mac_vals *vals) +{ + u32 val, base_addr, offset, mask, reset_reg; + bool mac_stopped = false; + u8 port = BP_PORT(bp); + + /* reset addresses as they also mark which values were changed */ + memset(vals, 0, sizeof(*vals)); + + reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); + + if (!CHIP_IS_E3(bp)) { + val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); + mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; + if ((mask & reset_reg) && val) { + u32 wb_data[2]; + BNX2X_DEV_INFO("Disable bmac Rx\n"); + base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM + : NIG_REG_INGRESS_BMAC0_MEM; + offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL + : BIGMAC_REGISTER_BMAC_CONTROL; + + /* + * use rd/wr since we cannot use dmae. This is safe + * since MCP won't access the bus due to the request + * to unload, and no function on the path can be + * loaded at this time. + */ + wb_data[0] = REG_RD(bp, base_addr + offset); + wb_data[1] = REG_RD(bp, base_addr + offset + 0x4); + vals->bmac_addr = base_addr + offset; + vals->bmac_val[0] = wb_data[0]; + vals->bmac_val[1] = wb_data[1]; + wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; + REG_WR(bp, vals->bmac_addr, wb_data[0]); + REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]); + } + BNX2X_DEV_INFO("Disable emac Rx\n"); + vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4; + vals->emac_val = REG_RD(bp, vals->emac_addr); + REG_WR(bp, vals->emac_addr, 0); + mac_stopped = true; + } else { + if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { + BNX2X_DEV_INFO("Disable xmac Rx\n"); + base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI); + REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, + val & ~(1 << 1)); + REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, + val | (1 << 1)); + vals->xmac_addr = base_addr + XMAC_REG_CTRL; + vals->xmac_val = REG_RD(bp, vals->xmac_addr); + REG_WR(bp, vals->xmac_addr, 0); + mac_stopped = true; + } + + mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0, + reset_reg, vals); + mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1, + reset_reg, vals); + } + + if (mac_stopped) + msleep(20); +} + +#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) +#define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \ + 0x1848 + ((f) << 4)) +#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) +#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) +#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) + +#define BCM_5710_UNDI_FW_MF_MAJOR (0x07) +#define BCM_5710_UNDI_FW_MF_MINOR (0x08) +#define BCM_5710_UNDI_FW_MF_VERS (0x05) + +static bool bnx2x_prev_is_after_undi(struct bnx2x *bp) +{ + /* UNDI marks its presence in DORQ - + * it initializes CID offset for normal bell to 0x7 + */ + if (!(REG_RD(bp, MISC_REG_RESET_REG_1) & + MISC_REGISTERS_RESET_REG_1_RST_DORQ)) + return false; + + if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) { + BNX2X_DEV_INFO("UNDI previously loaded\n"); + return true; + } + + return false; +} + +static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc) +{ + u16 rcq, bd; + u32 addr, tmp_reg; + + if (BP_FUNC(bp) < 2) + addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp)); + else + addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2); + + tmp_reg = REG_RD(bp, addr); + rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; + bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; + + tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); + REG_WR(bp, addr, tmp_reg); + + BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n", + BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq); +} + +static int bnx2x_prev_mcp_done(struct bnx2x *bp) +{ + u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, + DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); + if (!rc) { + BNX2X_ERR("MCP response failure, aborting\n"); + return -EBUSY; + } + + return 0; +} + +static struct bnx2x_prev_path_list * + bnx2x_prev_path_get_entry(struct bnx2x *bp) +{ + struct bnx2x_prev_path_list *tmp_list; + + list_for_each_entry(tmp_list, &bnx2x_prev_list, list) + if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot && + bp->pdev->bus->number == tmp_list->bus && + BP_PATH(bp) == tmp_list->path) + return tmp_list; + + return NULL; +} + +static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp) +{ + struct bnx2x_prev_path_list *tmp_list; + int rc; + + rc = down_interruptible(&bnx2x_prev_sem); + if (rc) { + BNX2X_ERR("Received %d when tried to take lock\n", rc); + return rc; + } + + tmp_list = bnx2x_prev_path_get_entry(bp); + if (tmp_list) { + tmp_list->aer = 1; + rc = 0; + } else { + BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n", + BP_PATH(bp)); + } + + up(&bnx2x_prev_sem); + + return rc; +} + +static bool bnx2x_prev_is_path_marked(struct bnx2x *bp) +{ + struct bnx2x_prev_path_list *tmp_list; + bool rc = false; + + if (down_trylock(&bnx2x_prev_sem)) + return false; + + tmp_list = bnx2x_prev_path_get_entry(bp); + if (tmp_list) { + if (tmp_list->aer) { + DP(NETIF_MSG_HW, "Path %d was marked by AER\n", + BP_PATH(bp)); + } else { + rc = true; + BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n", + BP_PATH(bp)); + } + } + + up(&bnx2x_prev_sem); + + return rc; +} + +bool bnx2x_port_after_undi(struct bnx2x *bp) +{ + struct bnx2x_prev_path_list *entry; + bool val; + + down(&bnx2x_prev_sem); + + entry = bnx2x_prev_path_get_entry(bp); + val = !!(entry && (entry->undi & (1 << BP_PORT(bp)))); + + up(&bnx2x_prev_sem); + + return val; +} + +static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi) +{ + struct bnx2x_prev_path_list *tmp_list; + int rc; + + rc = down_interruptible(&bnx2x_prev_sem); + if (rc) { + BNX2X_ERR("Received %d when tried to take lock\n", rc); + return rc; + } + + /* Check whether the entry for this path already exists */ + tmp_list = bnx2x_prev_path_get_entry(bp); + if (tmp_list) { + if (!tmp_list->aer) { + BNX2X_ERR("Re-Marking the path.\n"); + } else { + DP(NETIF_MSG_HW, "Removing AER indication from path %d\n", + BP_PATH(bp)); + tmp_list->aer = 0; + } + up(&bnx2x_prev_sem); + return 0; + } + up(&bnx2x_prev_sem); + + /* Create an entry for this path and add it */ + tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL); + if (!tmp_list) { + BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n"); + return -ENOMEM; + } + + tmp_list->bus = bp->pdev->bus->number; + tmp_list->slot = PCI_SLOT(bp->pdev->devfn); + tmp_list->path = BP_PATH(bp); + tmp_list->aer = 0; + tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0; + + rc = down_interruptible(&bnx2x_prev_sem); + if (rc) { + BNX2X_ERR("Received %d when tried to take lock\n", rc); + kfree(tmp_list); + } else { + DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n", + BP_PATH(bp)); + list_add(&tmp_list->list, &bnx2x_prev_list); + up(&bnx2x_prev_sem); + } + + return rc; +} + +static int bnx2x_do_flr(struct bnx2x *bp) +{ + struct pci_dev *dev = bp->pdev; + + if (CHIP_IS_E1x(bp)) { + BNX2X_DEV_INFO("FLR not supported in E1/E1H\n"); + return -EINVAL; + } + + /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ + if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { + BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", + bp->common.bc_ver); + return -EINVAL; + } + + if (!pci_wait_for_pending_transaction(dev)) + dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); + + BNX2X_DEV_INFO("Initiating FLR\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); + + return 0; +} + +static int bnx2x_prev_unload_uncommon(struct bnx2x *bp) +{ + int rc; + + BNX2X_DEV_INFO("Uncommon unload Flow\n"); + + /* Test if previous unload process was already finished for this path */ + if (bnx2x_prev_is_path_marked(bp)) + return bnx2x_prev_mcp_done(bp); + + BNX2X_DEV_INFO("Path is unmarked\n"); + + /* Cannot proceed with FLR if UNDI is loaded, since FW does not match */ + if (bnx2x_prev_is_after_undi(bp)) + goto out; + + /* If function has FLR capabilities, and existing FW version matches + * the one required, then FLR will be sufficient to clean any residue + * left by previous driver + */ + rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false); + + if (!rc) { + /* fw version is good */ + BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n"); + rc = bnx2x_do_flr(bp); + } + + if (!rc) { + /* FLR was performed */ + BNX2X_DEV_INFO("FLR successful\n"); + return 0; + } + + BNX2X_DEV_INFO("Could not FLR\n"); + +out: + /* Close the MCP request, return failure*/ + rc = bnx2x_prev_mcp_done(bp); + if (!rc) + rc = BNX2X_PREV_WAIT_NEEDED; + + return rc; +} + +static int bnx2x_prev_unload_common(struct bnx2x *bp) +{ + u32 reset_reg, tmp_reg = 0, rc; + bool prev_undi = false; + struct bnx2x_mac_vals mac_vals; + + /* It is possible a previous function received 'common' answer, + * but hasn't loaded yet, therefore creating a scenario of + * multiple functions receiving 'common' on the same path. + */ + BNX2X_DEV_INFO("Common unload Flow\n"); + + memset(&mac_vals, 0, sizeof(mac_vals)); + + if (bnx2x_prev_is_path_marked(bp)) + return bnx2x_prev_mcp_done(bp); + + reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1); + + /* Reset should be performed after BRB is emptied */ + if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { + u32 timer_count = 1000; + + /* Close the MAC Rx to prevent BRB from filling up */ + bnx2x_prev_unload_close_mac(bp, &mac_vals); + + /* close LLH filters for both ports towards the BRB */ + bnx2x_set_rx_filter(&bp->link_params, 0); + bp->link_params.port ^= 1; + bnx2x_set_rx_filter(&bp->link_params, 0); + bp->link_params.port ^= 1; + + /* Check if the UNDI driver was previously loaded */ + if (bnx2x_prev_is_after_undi(bp)) { + prev_undi = true; + /* clear the UNDI indication */ + REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); + /* clear possible idle check errors */ + REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); + } + if (!CHIP_IS_E1x(bp)) + /* block FW from writing to host */ + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); + + /* wait until BRB is empty */ + tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); + while (timer_count) { + u32 prev_brb = tmp_reg; + + tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); + if (!tmp_reg) + break; + + BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg); + + /* reset timer as long as BRB actually gets emptied */ + if (prev_brb > tmp_reg) + timer_count = 1000; + else + timer_count--; + + /* If UNDI resides in memory, manually increment it */ + if (prev_undi) + bnx2x_prev_unload_undi_inc(bp, 1); + + udelay(10); + } + + if (!timer_count) + BNX2X_ERR("Failed to empty BRB, hope for the best\n"); + } + + /* No packets are in the pipeline, path is ready for reset */ + bnx2x_reset_common(bp); + + if (mac_vals.xmac_addr) + REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val); + if (mac_vals.umac_addr[0]) + REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]); + if (mac_vals.umac_addr[1]) + REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]); + if (mac_vals.emac_addr) + REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val); + if (mac_vals.bmac_addr) { + REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]); + REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); + } + + rc = bnx2x_prev_mark_path(bp, prev_undi); + if (rc) { + bnx2x_prev_mcp_done(bp); + return rc; + } + + return bnx2x_prev_mcp_done(bp); +} + +static int bnx2x_prev_unload(struct bnx2x *bp) +{ + int time_counter = 10; + u32 rc, fw, hw_lock_reg, hw_lock_val; + BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); + + /* clear hw from errors which may have resulted from an interrupted + * dmae transaction. + */ + bnx2x_clean_pglue_errors(bp); + + /* Release previously held locks */ + hw_lock_reg = (BP_FUNC(bp) <= 5) ? + (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : + (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); + + hw_lock_val = REG_RD(bp, hw_lock_reg); + if (hw_lock_val) { + if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { + BNX2X_DEV_INFO("Release Previously held NVRAM lock\n"); + REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, + (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp))); + } + + BNX2X_DEV_INFO("Release Previously held hw lock\n"); + REG_WR(bp, hw_lock_reg, 0xffffffff); + } else + BNX2X_DEV_INFO("No need to release hw/nvram locks\n"); + + if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) { + BNX2X_DEV_INFO("Release previously held alr\n"); + bnx2x_release_alr(bp); + } + + do { + int aer = 0; + /* Lock MCP using an unload request */ + fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); + if (!fw) { + BNX2X_ERR("MCP response failure, aborting\n"); + rc = -EBUSY; + break; + } + + rc = down_interruptible(&bnx2x_prev_sem); + if (rc) { + BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n", + rc); + } else { + /* If Path is marked by EEH, ignore unload status */ + aer = !!(bnx2x_prev_path_get_entry(bp) && + bnx2x_prev_path_get_entry(bp)->aer); + up(&bnx2x_prev_sem); + } + + if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) { + rc = bnx2x_prev_unload_common(bp); + break; + } + + /* non-common reply from MCP might require looping */ + rc = bnx2x_prev_unload_uncommon(bp); + if (rc != BNX2X_PREV_WAIT_NEEDED) + break; + + msleep(20); + } while (--time_counter); + + if (!time_counter || rc) { + BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n"); + rc = -EPROBE_DEFER; + } + + /* Mark function if its port was used to boot from SAN */ + if (bnx2x_port_after_undi(bp)) + bp->link_params.feature_config_flags |= + FEATURE_CONFIG_BOOT_FROM_SAN; + + BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc); + + return rc; +} + +static void bnx2x_get_common_hwinfo(struct bnx2x *bp) +{ + u32 val, val2, val3, val4, id, boot_mode; + u16 pmc; + + /* Get the chip revision id and number. */ + /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ + val = REG_RD(bp, MISC_REG_CHIP_NUM); + id = ((val & 0xffff) << 16); + val = REG_RD(bp, MISC_REG_CHIP_REV); + id |= ((val & 0xf) << 12); + + /* Metal is read from PCI regs, but we can't access >=0x400 from + * the configuration space (so we need to reg_rd) + */ + val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3); + id |= (((val >> 24) & 0xf) << 4); + val = REG_RD(bp, MISC_REG_BOND_ID); + id |= (val & 0xf); + bp->common.chip_id = id; + + /* force 57811 according to MISC register */ + if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { + if (CHIP_IS_57810(bp)) + bp->common.chip_id = (CHIP_NUM_57811 << 16) | + (bp->common.chip_id & 0x0000FFFF); + else if (CHIP_IS_57810_MF(bp)) + bp->common.chip_id = (CHIP_NUM_57811_MF << 16) | + (bp->common.chip_id & 0x0000FFFF); + bp->common.chip_id |= 0x1; + } + + /* Set doorbell size */ + bp->db_size = (1 << BNX2X_DB_SHIFT); + + if (!CHIP_IS_E1x(bp)) { + val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); + if ((val & 1) == 0) + val = REG_RD(bp, MISC_REG_PORT4MODE_EN); + else + val = (val >> 1) & 1; + BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" : + "2_PORT_MODE"); + bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE : + CHIP_2_PORT_MODE; + + if (CHIP_MODE_IS_4_PORT(bp)) + bp->pfid = (bp->pf_num >> 1); /* 0..3 */ + else + bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */ + } else { + bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */ + bp->pfid = bp->pf_num; /* 0..7 */ + } + + BNX2X_DEV_INFO("pf_id: %x", bp->pfid); + + bp->link_params.chip_id = bp->common.chip_id; + BNX2X_DEV_INFO("chip ID is 0x%x\n", id); + + val = (REG_RD(bp, 0x2874) & 0x55); + if ((bp->common.chip_id & 0x1) || + (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) { + bp->flags |= ONE_PORT_FLAG; + BNX2X_DEV_INFO("single port device\n"); + } + + val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); + bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE << + (val & MCPR_NVM_CFG4_FLASH_SIZE)); + BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", + bp->common.flash_size, bp->common.flash_size); + + bnx2x_init_shmem(bp); + + bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? + MISC_REG_GENERIC_CR_1 : + MISC_REG_GENERIC_CR_0)); + + bp->link_params.shmem_base = bp->common.shmem_base; + bp->link_params.shmem2_base = bp->common.shmem2_base; + if (SHMEM2_RD(bp, size) > + (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)])) + bp->link_params.lfa_base = + REG_RD(bp, bp->common.shmem2_base + + (u32)offsetof(struct shmem2_region, + lfa_host_addr[BP_PORT(bp)])); + else + bp->link_params.lfa_base = 0; + BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", + bp->common.shmem_base, bp->common.shmem2_base); + + if (!bp->common.shmem_base) { + BNX2X_DEV_INFO("MCP not active\n"); + bp->flags |= NO_MCP_FLAG; + return; + } + + bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); + BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); + + bp->link_params.hw_led_mode = ((bp->common.hw_config & + SHARED_HW_CFG_LED_MODE_MASK) >> + SHARED_HW_CFG_LED_MODE_SHIFT); + + bp->link_params.feature_config_flags = 0; + val = SHMEM_RD(bp, dev_info.shared_feature_config.config); + if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) + bp->link_params.feature_config_flags |= + FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; + else + bp->link_params.feature_config_flags &= + ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; + + val = SHMEM_RD(bp, dev_info.bc_rev) >> 8; + bp->common.bc_ver = val; + BNX2X_DEV_INFO("bc_ver %X\n", val); + if (val < BNX2X_BC_VER) { + /* for now only warn + * later we might need to enforce this */ + BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n", + BNX2X_BC_VER, val); + } + bp->link_params.feature_config_flags |= + (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? + FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0; + + bp->link_params.feature_config_flags |= + (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? + FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; + bp->link_params.feature_config_flags |= + (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ? + FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0; + bp->link_params.feature_config_flags |= + (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? + FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; + + bp->link_params.feature_config_flags |= + (val >= REQ_BC_VER_4_MT_SUPPORTED) ? + FEATURE_CONFIG_MT_SUPPORT : 0; + + bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? + BC_SUPPORTS_PFC_STATS : 0; + + bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ? + BC_SUPPORTS_FCOE_FEATURES : 0; + + bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? + BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; + + bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ? + BC_SUPPORTS_RMMOD_CMD : 0; + + boot_mode = SHMEM_RD(bp, + dev_info.port_feature_config[BP_PORT(bp)].mba_config) & + PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; + switch (boot_mode) { + case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE: + bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE; + break; + case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB: + bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI; + break; + case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT: + bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE; + break; + case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE: + bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE; + break; + } + + pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc); + bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; + + BNX2X_DEV_INFO("%sWoL capable\n", + (bp->flags & NO_WOL_FLAG) ? "not " : ""); + + val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); + val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); + val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); + val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); + + dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n", + val, val2, val3, val4); +} + +#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) +#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) + +static int bnx2x_get_igu_cam_info(struct bnx2x *bp) +{ + int pfid = BP_FUNC(bp); + int igu_sb_id; + u32 val; + u8 fid, igu_sb_cnt = 0; + + bp->igu_base_sb = 0xff; + if (CHIP_INT_MODE_IS_BC(bp)) { + int vn = BP_VN(bp); + igu_sb_cnt = bp->igu_sb_cnt; + bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * + FP_SB_MAX_E1x; + + bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x + + (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn); + + return 0; + } + + /* IGU in normal mode - read CAM */ + for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; + igu_sb_id++) { + val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); + if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) + continue; + fid = IGU_FID(val); + if ((fid & IGU_FID_ENCODE_IS_PF)) { + if ((fid & IGU_FID_PF_NUM_MASK) != pfid) + continue; + if (IGU_VEC(val) == 0) + /* default status block */ + bp->igu_dsb_id = igu_sb_id; + else { + if (bp->igu_base_sb == 0xff) + bp->igu_base_sb = igu_sb_id; + igu_sb_cnt++; + } + } + } + +#ifdef CONFIG_PCI_MSI + /* Due to new PF resource allocation by MFW T7.4 and above, it's + * optional that number of CAM entries will not be equal to the value + * advertised in PCI. + * Driver should use the minimal value of both as the actual status + * block count + */ + bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt); +#endif + + if (igu_sb_cnt == 0) { + BNX2X_ERR("CAM configuration error\n"); + return -EINVAL; + } + + return 0; +} + +static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) +{ + int cfg_size = 0, idx, port = BP_PORT(bp); + + /* Aggregation of supported attributes of all external phys */ + bp->port.supported[0] = 0; + bp->port.supported[1] = 0; + switch (bp->link_params.num_phys) { + case 1: + bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported; + cfg_size = 1; + break; + case 2: + bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported; + cfg_size = 1; + break; + case 3: + if (bp->link_params.multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED) { + bp->port.supported[1] = + bp->link_params.phy[EXT_PHY1].supported; + bp->port.supported[0] = + bp->link_params.phy[EXT_PHY2].supported; + } else { + bp->port.supported[0] = + bp->link_params.phy[EXT_PHY1].supported; + bp->port.supported[1] = + bp->link_params.phy[EXT_PHY2].supported; + } + cfg_size = 2; + break; + } + + if (!(bp->port.supported[0] || bp->port.supported[1])) { + BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n", + SHMEM_RD(bp, + dev_info.port_hw_config[port].external_phy_config), + SHMEM_RD(bp, + dev_info.port_hw_config[port].external_phy_config2)); + return; + } + + if (CHIP_IS_E3(bp)) + bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); + else { + switch (switch_cfg) { + case SWITCH_CFG_1G: + bp->port.phy_addr = REG_RD( + bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); + break; + case SWITCH_CFG_10G: + bp->port.phy_addr = REG_RD( + bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); + break; + default: + BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", + bp->port.link_config[0]); + return; + } + } + BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); + /* mask what we support according to speed_cap_mask per configuration */ + for (idx = 0; idx < cfg_size; idx++) { + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) + bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half; + + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) + bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full; + + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) + bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half; + + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) + bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full; + + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) + bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) + bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full; + + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) + bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; + + if (!(bp->link_params.speed_cap_mask[idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) + bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full; + } + + BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], + bp->port.supported[1]); +} + +static void bnx2x_link_settings_requested(struct bnx2x *bp) +{ + u32 link_config, idx, cfg_size = 0; + bp->port.advertising[0] = 0; + bp->port.advertising[1] = 0; + switch (bp->link_params.num_phys) { + case 1: + case 2: + cfg_size = 1; + break; + case 3: + cfg_size = 2; + break; + } + for (idx = 0; idx < cfg_size; idx++) { + bp->link_params.req_duplex[idx] = DUPLEX_FULL; + link_config = bp->port.link_config[idx]; + switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { + case PORT_FEATURE_LINK_SPEED_AUTO: + if (bp->port.supported[idx] & SUPPORTED_Autoneg) { + bp->link_params.req_line_speed[idx] = + SPEED_AUTO_NEG; + bp->port.advertising[idx] |= + bp->port.supported[idx]; + if (bp->link_params.phy[EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) + bp->port.advertising[idx] |= + (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full); + } else { + /* force 10G, no AN */ + bp->link_params.req_line_speed[idx] = + SPEED_10000; + bp->port.advertising[idx] |= + (ADVERTISED_10000baseT_Full | + ADVERTISED_FIBRE); + continue; + } + break; + + case PORT_FEATURE_LINK_SPEED_10M_FULL: + if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) { + bp->link_params.req_line_speed[idx] = + SPEED_10; + bp->port.advertising[idx] |= + (ADVERTISED_10baseT_Full | + ADVERTISED_TP); + } else { + BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_10M_HALF: + if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) { + bp->link_params.req_line_speed[idx] = + SPEED_10; + bp->link_params.req_duplex[idx] = + DUPLEX_HALF; + bp->port.advertising[idx] |= + (ADVERTISED_10baseT_Half | + ADVERTISED_TP); + } else { + BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_100M_FULL: + if (bp->port.supported[idx] & + SUPPORTED_100baseT_Full) { + bp->link_params.req_line_speed[idx] = + SPEED_100; + bp->port.advertising[idx] |= + (ADVERTISED_100baseT_Full | + ADVERTISED_TP); + } else { + BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_100M_HALF: + if (bp->port.supported[idx] & + SUPPORTED_100baseT_Half) { + bp->link_params.req_line_speed[idx] = + SPEED_100; + bp->link_params.req_duplex[idx] = + DUPLEX_HALF; + bp->port.advertising[idx] |= + (ADVERTISED_100baseT_Half | + ADVERTISED_TP); + } else { + BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_1G: + if (bp->port.supported[idx] & + SUPPORTED_1000baseT_Full) { + bp->link_params.req_line_speed[idx] = + SPEED_1000; + bp->port.advertising[idx] |= + (ADVERTISED_1000baseT_Full | + ADVERTISED_TP); + } else { + BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_2_5G: + if (bp->port.supported[idx] & + SUPPORTED_2500baseX_Full) { + bp->link_params.req_line_speed[idx] = + SPEED_2500; + bp->port.advertising[idx] |= + (ADVERTISED_2500baseX_Full | + ADVERTISED_TP); + } else { + BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + + case PORT_FEATURE_LINK_SPEED_10G_CX4: + if (bp->port.supported[idx] & + SUPPORTED_10000baseT_Full) { + bp->link_params.req_line_speed[idx] = + SPEED_10000; + bp->port.advertising[idx] |= + (ADVERTISED_10000baseT_Full | + ADVERTISED_FIBRE); + } else { + BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", + link_config, + bp->link_params.speed_cap_mask[idx]); + return; + } + break; + case PORT_FEATURE_LINK_SPEED_20G: + bp->link_params.req_line_speed[idx] = SPEED_20000; + + break; + default: + BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n", + link_config); + bp->link_params.req_line_speed[idx] = + SPEED_AUTO_NEG; + bp->port.advertising[idx] = + bp->port.supported[idx]; + break; + } + + bp->link_params.req_flow_ctrl[idx] = (link_config & + PORT_FEATURE_FLOW_CONTROL_MASK); + if (bp->link_params.req_flow_ctrl[idx] == + BNX2X_FLOW_CTRL_AUTO) { + if (!(bp->port.supported[idx] & SUPPORTED_Autoneg)) + bp->link_params.req_flow_ctrl[idx] = + BNX2X_FLOW_CTRL_NONE; + else + bnx2x_set_requested_fc(bp); + } + + BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n", + bp->link_params.req_line_speed[idx], + bp->link_params.req_duplex[idx], + bp->link_params.req_flow_ctrl[idx], + bp->port.advertising[idx]); + } +} + +static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) +{ + __be16 mac_hi_be = cpu_to_be16(mac_hi); + __be32 mac_lo_be = cpu_to_be32(mac_lo); + memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be)); + memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be)); +} + +static void bnx2x_get_port_hwinfo(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + u32 config; + u32 ext_phy_type, ext_phy_config, eee_mode; + + bp->link_params.bp = bp; + bp->link_params.port = port; + + bp->link_params.lane_config = + SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); + + bp->link_params.speed_cap_mask[0] = + SHMEM_RD(bp, + dev_info.port_hw_config[port].speed_capability_mask) & + PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; + bp->link_params.speed_cap_mask[1] = + SHMEM_RD(bp, + dev_info.port_hw_config[port].speed_capability_mask2) & + PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; + bp->port.link_config[0] = + SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); + + bp->port.link_config[1] = + SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2); + + bp->link_params.multi_phy_config = + SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config); + /* If the device is capable of WoL, set the default state according + * to the HW + */ + config = SHMEM_RD(bp, dev_info.port_feature_config[port].config); + bp->wol = (!(bp->flags & NO_WOL_FLAG) && + (config & PORT_FEATURE_WOL_ENABLED)); + + if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == + PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp)) + bp->flags |= NO_ISCSI_FLAG; + if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == + PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp))) + bp->flags |= NO_FCOE_FLAG; + + BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n", + bp->link_params.lane_config, + bp->link_params.speed_cap_mask[0], + bp->port.link_config[0]); + + bp->link_params.switch_cfg = (bp->port.link_config[0] & + PORT_FEATURE_CONNECTED_SWITCH_MASK); + bnx2x_phy_probe(&bp->link_params); + bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); + + bnx2x_link_settings_requested(bp); + + /* + * If connected directly, work with the internal PHY, otherwise, work + * with the external PHY + */ + ext_phy_config = + SHMEM_RD(bp, + dev_info.port_hw_config[port].external_phy_config); + ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); + if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) + bp->mdio.prtad = bp->port.phy_addr; + + else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && + (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) + bp->mdio.prtad = + XGXS_EXT_PHY_ADDR(ext_phy_config); + + /* Configure link feature according to nvram value */ + eee_mode = (((SHMEM_RD(bp, dev_info. + port_feature_config[port].eee_power_mode)) & + PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> + PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); + if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { + bp->link_params.eee_mode = EEE_MODE_ADV_LPI | + EEE_MODE_ENABLE_LPI | + EEE_MODE_OUTPUT_TIME; + } else { + bp->link_params.eee_mode = 0; + } +} + +void bnx2x_get_iscsi_info(struct bnx2x *bp) +{ + u32 no_flags = NO_ISCSI_FLAG; + int port = BP_PORT(bp); + u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, + drv_lic_key[port].max_iscsi_conn); + + if (!CNIC_SUPPORT(bp)) { + bp->flags |= no_flags; + return; + } + + /* Get the number of maximum allowed iSCSI connections */ + bp->cnic_eth_dev.max_iscsi_conn = + (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> + BNX2X_MAX_ISCSI_INIT_CONN_SHIFT; + + BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n", + bp->cnic_eth_dev.max_iscsi_conn); + + /* + * If maximum allowed number of connections is zero - + * disable the feature. + */ + if (!bp->cnic_eth_dev.max_iscsi_conn) + bp->flags |= no_flags; +} + +static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) +{ + /* Port info */ + bp->cnic_eth_dev.fcoe_wwn_port_name_hi = + MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper); + bp->cnic_eth_dev.fcoe_wwn_port_name_lo = + MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower); + + /* Node info */ + bp->cnic_eth_dev.fcoe_wwn_node_name_hi = + MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper); + bp->cnic_eth_dev.fcoe_wwn_node_name_lo = + MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); +} + +static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp) +{ + u8 count = 0; + + if (IS_MF(bp)) { + u8 fid; + + /* iterate over absolute function ids for this path: */ + for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) { + if (IS_MF_SD(bp)) { + u32 cfg = MF_CFG_RD(bp, + func_mf_config[fid].config); + + if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) && + ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) == + FUNC_MF_CFG_PROTOCOL_FCOE)) + count++; + } else { + u32 cfg = MF_CFG_RD(bp, + func_ext_config[fid]. + func_cfg); + + if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) && + (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)) + count++; + } + } + } else { /* SF */ + int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1; + + for (port = 0; port < port_cnt; port++) { + u32 lic = SHMEM_RD(bp, + drv_lic_key[port].max_fcoe_conn) ^ + FW_ENCODE_32BIT_PATTERN; + if (lic) + count++; + } + } + + return count; +} + +static void bnx2x_get_fcoe_info(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int func = BP_ABS_FUNC(bp); + u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, + drv_lic_key[port].max_fcoe_conn); + u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp); + + if (!CNIC_SUPPORT(bp)) { + bp->flags |= NO_FCOE_FLAG; + return; + } + + /* Get the number of maximum allowed FCoE connections */ + bp->cnic_eth_dev.max_fcoe_conn = + (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> + BNX2X_MAX_FCOE_INIT_CONN_SHIFT; + + /* Calculate the number of maximum allowed FCoE tasks */ + bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE; + + /* check if FCoE resources must be shared between different functions */ + if (num_fcoe_func) + bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func; + + /* Read the WWN: */ + if (!IS_MF(bp)) { + /* Port info */ + bp->cnic_eth_dev.fcoe_wwn_port_name_hi = + SHMEM_RD(bp, + dev_info.port_hw_config[port]. + fcoe_wwn_port_name_upper); + bp->cnic_eth_dev.fcoe_wwn_port_name_lo = + SHMEM_RD(bp, + dev_info.port_hw_config[port]. + fcoe_wwn_port_name_lower); + + /* Node info */ + bp->cnic_eth_dev.fcoe_wwn_node_name_hi = + SHMEM_RD(bp, + dev_info.port_hw_config[port]. + fcoe_wwn_node_name_upper); + bp->cnic_eth_dev.fcoe_wwn_node_name_lo = + SHMEM_RD(bp, + dev_info.port_hw_config[port]. + fcoe_wwn_node_name_lower); + } else if (!IS_MF_SD(bp)) { + /* Read the WWN info only if the FCoE feature is enabled for + * this function. + */ + if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp)) + bnx2x_get_ext_wwn_info(bp, func); + } else { + if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp)) + bnx2x_get_ext_wwn_info(bp, func); + } + + BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); + + /* + * If maximum allowed number of connections is zero - + * disable the feature. + */ + if (!bp->cnic_eth_dev.max_fcoe_conn) + bp->flags |= NO_FCOE_FLAG; +} + +static void bnx2x_get_cnic_info(struct bnx2x *bp) +{ + /* + * iSCSI may be dynamically disabled but reading + * info here we will decrease memory usage by driver + * if the feature is disabled for good + */ + bnx2x_get_iscsi_info(bp); + bnx2x_get_fcoe_info(bp); +} + +static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp) +{ + u32 val, val2; + int func = BP_ABS_FUNC(bp); + int port = BP_PORT(bp); + u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; + u8 *fip_mac = bp->fip_mac; + + if (IS_MF(bp)) { + /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or + * FCoE MAC then the appropriate feature should be disabled. + * In non SD mode features configuration comes from struct + * func_ext_config. + */ + if (!IS_MF_SD(bp)) { + u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); + if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { + val2 = MF_CFG_RD(bp, func_ext_config[func]. + iscsi_mac_addr_upper); + val = MF_CFG_RD(bp, func_ext_config[func]. + iscsi_mac_addr_lower); + bnx2x_set_mac_buf(iscsi_mac, val, val2); + BNX2X_DEV_INFO + ("Read iSCSI MAC: %pM\n", iscsi_mac); + } else { + bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; + } + + if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { + val2 = MF_CFG_RD(bp, func_ext_config[func]. + fcoe_mac_addr_upper); + val = MF_CFG_RD(bp, func_ext_config[func]. + fcoe_mac_addr_lower); + bnx2x_set_mac_buf(fip_mac, val, val2); + BNX2X_DEV_INFO + ("Read FCoE L2 MAC: %pM\n", fip_mac); + } else { + bp->flags |= NO_FCOE_FLAG; + } + + bp->mf_ext_config = cfg; + + } else { /* SD MODE */ + if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { + /* use primary mac as iscsi mac */ + memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN); + + BNX2X_DEV_INFO("SD ISCSI MODE\n"); + BNX2X_DEV_INFO + ("Read iSCSI MAC: %pM\n", iscsi_mac); + } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) { + /* use primary mac as fip mac */ + memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); + BNX2X_DEV_INFO("SD FCoE MODE\n"); + BNX2X_DEV_INFO + ("Read FIP MAC: %pM\n", fip_mac); + } + } + + /* If this is a storage-only interface, use SAN mac as + * primary MAC. Notice that for SD this is already the case, + * as the SAN mac was copied from the primary MAC. + */ + if (IS_MF_FCOE_AFEX(bp)) + memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); + } else { + val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. + iscsi_mac_upper); + val = SHMEM_RD(bp, dev_info.port_hw_config[port]. + iscsi_mac_lower); + bnx2x_set_mac_buf(iscsi_mac, val, val2); + + val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. + fcoe_fip_mac_upper); + val = SHMEM_RD(bp, dev_info.port_hw_config[port]. + fcoe_fip_mac_lower); + bnx2x_set_mac_buf(fip_mac, val, val2); + } + + /* Disable iSCSI OOO if MAC configuration is invalid. */ + if (!is_valid_ether_addr(iscsi_mac)) { + bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; + eth_zero_addr(iscsi_mac); + } + + /* Disable FCoE if MAC configuration is invalid. */ + if (!is_valid_ether_addr(fip_mac)) { + bp->flags |= NO_FCOE_FLAG; + eth_zero_addr(bp->fip_mac); + } +} + +static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) +{ + u32 val, val2; + int func = BP_ABS_FUNC(bp); + int port = BP_PORT(bp); + + /* Zero primary MAC configuration */ + eth_zero_addr(bp->dev->dev_addr); + + if (BP_NOMCP(bp)) { + BNX2X_ERROR("warning: random MAC workaround active\n"); + eth_hw_addr_random(bp->dev); + } else if (IS_MF(bp)) { + val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); + val = MF_CFG_RD(bp, func_mf_config[func].mac_lower); + if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && + (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) + bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); + + if (CNIC_SUPPORT(bp)) + bnx2x_get_cnic_mac_hwinfo(bp); + } else { + /* in SF read MACs from port configuration */ + val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); + val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); + bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); + + if (CNIC_SUPPORT(bp)) + bnx2x_get_cnic_mac_hwinfo(bp); + } + + if (!BP_NOMCP(bp)) { + /* Read physical port identifier from shmem */ + val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); + val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); + bnx2x_set_mac_buf(bp->phys_port_id, val, val2); + bp->flags |= HAS_PHYS_PORT_ID; + } + + memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); + + if (!is_valid_ether_addr(bp->dev->dev_addr)) + dev_err(&bp->pdev->dev, + "bad Ethernet MAC address configuration: %pM\n" + "change it manually before bringing up the appropriate network interface\n", + bp->dev->dev_addr); +} + +static bool bnx2x_get_dropless_info(struct bnx2x *bp) +{ + int tmp; + u32 cfg; + + if (IS_VF(bp)) + return false; + + if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { + /* Take function: tmp = func */ + tmp = BP_ABS_FUNC(bp); + cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg); + cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING); + } else { + /* Take port: tmp = port */ + tmp = BP_PORT(bp); + cfg = SHMEM_RD(bp, + dev_info.port_hw_config[tmp].generic_features); + cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED); + } + return cfg; +} + +static void validate_set_si_mode(struct bnx2x *bp) +{ + u8 func = BP_ABS_FUNC(bp); + u32 val; + + val = MF_CFG_RD(bp, func_mf_config[func].mac_upper); + + /* check for legal mac (upper bytes) */ + if (val != 0xffff) { + bp->mf_mode = MULTI_FUNCTION_SI; + bp->mf_config[BP_VN(bp)] = + MF_CFG_RD(bp, func_mf_config[func].config); + } else + BNX2X_DEV_INFO("illegal MAC address for SI\n"); +} + +static int bnx2x_get_hwinfo(struct bnx2x *bp) +{ + int /*abs*/func = BP_ABS_FUNC(bp); + int vn; + u32 val = 0, val2 = 0; + int rc = 0; + + /* Validate that chip access is feasible */ + if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) { + dev_err(&bp->pdev->dev, + "Chip read returns all Fs. Preventing probe from continuing\n"); + return -EINVAL; + } + + bnx2x_get_common_hwinfo(bp); + + /* + * initialize IGU parameters + */ + if (CHIP_IS_E1x(bp)) { + bp->common.int_block = INT_BLOCK_HC; + + bp->igu_dsb_id = DEF_SB_IGU_ID; + bp->igu_base_sb = 0; + } else { + bp->common.int_block = INT_BLOCK_IGU; + + /* do not allow device reset during IGU info processing */ + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); + + val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); + + if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { + int tout = 5000; + + BNX2X_DEV_INFO("FORCING Normal Mode\n"); + + val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); + REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val); + REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f); + + while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) { + tout--; + usleep_range(1000, 2000); + } + + if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { + dev_err(&bp->pdev->dev, + "FORCING Normal Mode failed!!!\n"); + bnx2x_release_hw_lock(bp, + HW_LOCK_RESOURCE_RESET); + return -EPERM; + } + } + + if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { + BNX2X_DEV_INFO("IGU Backward Compatible Mode\n"); + bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; + } else + BNX2X_DEV_INFO("IGU Normal Mode\n"); + + rc = bnx2x_get_igu_cam_info(bp); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); + if (rc) + return rc; + } + + /* + * set base FW non-default (fast path) status block id, this value is + * used to initialize the fw_sb_id saved on the fp/queue structure to + * determine the id used by the FW. + */ + if (CHIP_IS_E1x(bp)) + bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); + else /* + * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of + * the same queue are indicated on the same IGU SB). So we prefer + * FW and IGU SBs to be the same value. + */ + bp->base_fw_ndsb = bp->igu_base_sb; + + BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n" + "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, + bp->igu_sb_cnt, bp->base_fw_ndsb); + + /* + * Initialize MF configuration + */ + + bp->mf_ov = 0; + bp->mf_mode = 0; + bp->mf_sub_mode = 0; + vn = BP_VN(bp); + + if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { + BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", + bp->common.shmem2_base, SHMEM2_RD(bp, size), + (u32)offsetof(struct shmem2_region, mf_cfg_addr)); + + if (SHMEM2_HAS(bp, mf_cfg_addr)) + bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); + else + bp->common.mf_cfg_base = bp->common.shmem_base + + offsetof(struct shmem_region, func_mb) + + E1H_FUNC_MAX * sizeof(struct drv_func_mb); + /* + * get mf configuration: + * 1. Existence of MF configuration + * 2. MAC address must be legal (check only upper bytes) + * for Switch-Independent mode; + * OVLAN must be legal for Switch-Dependent mode + * 3. SF_MODE configures specific MF mode + */ + if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { + /* get mf configuration */ + val = SHMEM_RD(bp, + dev_info.shared_feature_config.config); + val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK; + + switch (val) { + case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: + validate_set_si_mode(bp); + break; + case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: + if ((!CHIP_IS_E1x(bp)) && + (MF_CFG_RD(bp, func_mf_config[func]. + mac_upper) != 0xffff) && + (SHMEM2_HAS(bp, + afex_driver_support))) { + bp->mf_mode = MULTI_FUNCTION_AFEX; + bp->mf_config[vn] = MF_CFG_RD(bp, + func_mf_config[func].config); + } else { + BNX2X_DEV_INFO("can not configure afex mode\n"); + } + break; + case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: + /* get OV configuration */ + val = MF_CFG_RD(bp, + func_mf_config[FUNC_0].e1hov_tag); + val &= FUNC_MF_CFG_E1HOV_TAG_MASK; + + if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { + bp->mf_mode = MULTI_FUNCTION_SD; + bp->mf_config[vn] = MF_CFG_RD(bp, + func_mf_config[func].config); + } else + BNX2X_DEV_INFO("illegal OV for SD\n"); + break; + case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE: + bp->mf_mode = MULTI_FUNCTION_SD; + bp->mf_sub_mode = SUB_MF_MODE_UFP; + bp->mf_config[vn] = + MF_CFG_RD(bp, + func_mf_config[func].config); + break; + case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: + bp->mf_config[vn] = 0; + break; + case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE: + val2 = SHMEM_RD(bp, + dev_info.shared_hw_config.config_3); + val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK; + switch (val2) { + case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5: + validate_set_si_mode(bp); + bp->mf_sub_mode = + SUB_MF_MODE_NPAR1_DOT_5; + break; + default: + /* Unknown configuration */ + bp->mf_config[vn] = 0; + BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n", + val); + } + break; + default: + /* Unknown configuration: reset mf_config */ + bp->mf_config[vn] = 0; + BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val); + } + } + + BNX2X_DEV_INFO("%s function mode\n", + IS_MF(bp) ? "multi" : "single"); + + switch (bp->mf_mode) { + case MULTI_FUNCTION_SD: + val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & + FUNC_MF_CFG_E1HOV_TAG_MASK; + if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { + bp->mf_ov = val; + bp->path_has_ovlan = true; + + BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n", + func, bp->mf_ov, bp->mf_ov); + } else if (bp->mf_sub_mode == SUB_MF_MODE_UFP) { + dev_err(&bp->pdev->dev, + "Unexpected - no valid MF OV for func %d in UFP mode\n", + func); + bp->path_has_ovlan = true; + } else { + dev_err(&bp->pdev->dev, + "No valid MF OV for func %d, aborting\n", + func); + return -EPERM; + } + break; + case MULTI_FUNCTION_AFEX: + BNX2X_DEV_INFO("func %d is in MF afex mode\n", func); + break; + case MULTI_FUNCTION_SI: + BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n", + func); + break; + default: + if (vn) { + dev_err(&bp->pdev->dev, + "VN %d is in a single function mode, aborting\n", + vn); + return -EPERM; + } + break; + } + + /* check if other port on the path needs ovlan: + * Since MF configuration is shared between ports + * Possible mixed modes are only + * {SF, SI} {SF, SD} {SD, SF} {SI, SF} + */ + if (CHIP_MODE_IS_4_PORT(bp) && + !bp->path_has_ovlan && + !IS_MF(bp) && + bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { + u8 other_port = !BP_PORT(bp); + u8 other_func = BP_PATH(bp) + 2*other_port; + val = MF_CFG_RD(bp, + func_mf_config[other_func].e1hov_tag); + if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) + bp->path_has_ovlan = true; + } + } + + /* adjust igu_sb_cnt to MF for E1H */ + if (CHIP_IS_E1H(bp) && IS_MF(bp)) + bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT); + + /* port info */ + bnx2x_get_port_hwinfo(bp); + + /* Get MAC addresses */ + bnx2x_get_mac_hwinfo(bp); + + bnx2x_get_cnic_info(bp); + + return rc; +} + +static void bnx2x_read_fwinfo(struct bnx2x *bp) +{ + int cnt, i, block_end, rodi; + char vpd_start[BNX2X_VPD_LEN+1]; + char str_id_reg[VENDOR_ID_LEN+1]; + char str_id_cap[VENDOR_ID_LEN+1]; + char *vpd_data; + char *vpd_extended_data = NULL; + u8 len; + + cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start); + memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); + + if (cnt < BNX2X_VPD_LEN) + goto out_not_found; + + /* VPD RO tag should be first tag after identifier string, hence + * we should be able to find it in first BNX2X_VPD_LEN chars + */ + i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN, + PCI_VPD_LRDT_RO_DATA); + if (i < 0) + goto out_not_found; + + block_end = i + PCI_VPD_LRDT_TAG_SIZE + + pci_vpd_lrdt_size(&vpd_start[i]); + + i += PCI_VPD_LRDT_TAG_SIZE; + + if (block_end > BNX2X_VPD_LEN) { + vpd_extended_data = kmalloc(block_end, GFP_KERNEL); + if (vpd_extended_data == NULL) + goto out_not_found; + + /* read rest of vpd image into vpd_extended_data */ + memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN); + cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN, + block_end - BNX2X_VPD_LEN, + vpd_extended_data + BNX2X_VPD_LEN); + if (cnt < (block_end - BNX2X_VPD_LEN)) + goto out_not_found; + vpd_data = vpd_extended_data; + } else + vpd_data = vpd_start; + + /* now vpd_data holds full vpd content in both cases */ + + rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, + PCI_VPD_RO_KEYWORD_MFR_ID); + if (rodi < 0) + goto out_not_found; + + len = pci_vpd_info_field_size(&vpd_data[rodi]); + + if (len != VENDOR_ID_LEN) + goto out_not_found; + + rodi += PCI_VPD_INFO_FLD_HDR_SIZE; + + /* vendor specific info */ + snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL); + snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL); + if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) || + !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) { + + rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, + PCI_VPD_RO_KEYWORD_VENDOR0); + if (rodi >= 0) { + len = pci_vpd_info_field_size(&vpd_data[rodi]); + + rodi += PCI_VPD_INFO_FLD_HDR_SIZE; + + if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) { + memcpy(bp->fw_ver, &vpd_data[rodi], len); + bp->fw_ver[len] = ' '; + } + } + kfree(vpd_extended_data); + return; + } +out_not_found: + kfree(vpd_extended_data); + return; +} + +static void bnx2x_set_modes_bitmap(struct bnx2x *bp) +{ + u32 flags = 0; + + if (CHIP_REV_IS_FPGA(bp)) + SET_FLAGS(flags, MODE_FPGA); + else if (CHIP_REV_IS_EMUL(bp)) + SET_FLAGS(flags, MODE_EMUL); + else + SET_FLAGS(flags, MODE_ASIC); + + if (CHIP_MODE_IS_4_PORT(bp)) + SET_FLAGS(flags, MODE_PORT4); + else + SET_FLAGS(flags, MODE_PORT2); + + if (CHIP_IS_E2(bp)) + SET_FLAGS(flags, MODE_E2); + else if (CHIP_IS_E3(bp)) { + SET_FLAGS(flags, MODE_E3); + if (CHIP_REV(bp) == CHIP_REV_Ax) + SET_FLAGS(flags, MODE_E3_A0); + else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/ + SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); + } + + if (IS_MF(bp)) { + SET_FLAGS(flags, MODE_MF); + switch (bp->mf_mode) { + case MULTI_FUNCTION_SD: + SET_FLAGS(flags, MODE_MF_SD); + break; + case MULTI_FUNCTION_SI: + SET_FLAGS(flags, MODE_MF_SI); + break; + case MULTI_FUNCTION_AFEX: + SET_FLAGS(flags, MODE_MF_AFEX); + break; + } + } else + SET_FLAGS(flags, MODE_SF); + +#if defined(__LITTLE_ENDIAN) + SET_FLAGS(flags, MODE_LITTLE_ENDIAN); +#else /*(__BIG_ENDIAN)*/ + SET_FLAGS(flags, MODE_BIG_ENDIAN); +#endif + INIT_MODE_FLAGS(bp) = flags; +} + +static int bnx2x_init_bp(struct bnx2x *bp) +{ + int func; + int rc; + + mutex_init(&bp->port.phy_mutex); + mutex_init(&bp->fw_mb_mutex); + mutex_init(&bp->drv_info_mutex); + sema_init(&bp->stats_lock, 1); + bp->drv_info_mng_owner = false; + + INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); + INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); + INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); + INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task); + if (IS_PF(bp)) { + rc = bnx2x_get_hwinfo(bp); + if (rc) + return rc; + } else { + eth_zero_addr(bp->dev->dev_addr); + } + + bnx2x_set_modes_bitmap(bp); + + rc = bnx2x_alloc_mem_bp(bp); + if (rc) + return rc; + + bnx2x_read_fwinfo(bp); + + func = BP_FUNC(bp); + + /* need to reset chip if undi was active */ + if (IS_PF(bp) && !BP_NOMCP(bp)) { + /* init fw_seq */ + bp->fw_seq = + SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK; + BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); + + rc = bnx2x_prev_unload(bp); + if (rc) { + bnx2x_free_mem_bp(bp); + return rc; + } + } + + if (CHIP_REV_IS_FPGA(bp)) + dev_err(&bp->pdev->dev, "FPGA detected\n"); + + if (BP_NOMCP(bp) && (func == 0)) + dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); + + bp->disable_tpa = disable_tpa; + bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp); + /* Reduce memory usage in kdump environment by disabling TPA */ + bp->disable_tpa |= is_kdump_kernel(); + + /* Set TPA flags */ + if (bp->disable_tpa) { + bp->dev->hw_features &= ~NETIF_F_LRO; + bp->dev->features &= ~NETIF_F_LRO; + } + + if (CHIP_IS_E1(bp)) + bp->dropless_fc = 0; + else + bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp); + + bp->mrrs = mrrs; + + bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL; + if (IS_VF(bp)) + bp->rx_ring_size = MAX_RX_AVAIL; + + /* make sure that the numbers are in the right granularity */ + bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; + bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; + + bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ; + + init_timer(&bp->timer); + bp->timer.expires = jiffies + bp->current_interval; + bp->timer.data = (unsigned long) bp; + bp->timer.function = bnx2x_timer; + + if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) && + SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) && + SHMEM2_RD(bp, dcbx_lldp_params_offset) && + SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) { + bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); + bnx2x_dcbx_init_params(bp); + } else { + bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF); + } + + if (CHIP_IS_E1x(bp)) + bp->cnic_base_cl_id = FP_SB_MAX_E1x; + else + bp->cnic_base_cl_id = FP_SB_MAX_E2; + + /* multiple tx priority */ + if (IS_VF(bp)) + bp->max_cos = 1; + else if (CHIP_IS_E1x(bp)) + bp->max_cos = BNX2X_MULTI_TX_COS_E1X; + else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) + bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; + else if (CHIP_IS_E3B0(bp)) + bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; + else + BNX2X_ERR("unknown chip %x revision %x\n", + CHIP_NUM(bp), CHIP_REV(bp)); + BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos); + + /* We need at least one default status block for slow-path events, + * second status block for the L2 queue, and a third status block for + * CNIC if supported. + */ + if (IS_VF(bp)) + bp->min_msix_vec_cnt = 1; + else if (CNIC_SUPPORT(bp)) + bp->min_msix_vec_cnt = 3; + else /* PF w/o cnic */ + bp->min_msix_vec_cnt = 2; + BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); + + bp->dump_preset_idx = 1; + + if (CHIP_IS_E3B0(bp)) + bp->flags |= PTP_SUPPORTED; + + return rc; +} + +/**************************************************************************** +* General service functions +****************************************************************************/ + +/* + * net_device service functions + */ + +/* called with rtnl_lock */ +static int bnx2x_open(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + int rc; + + bp->stats_init = true; + + netif_carrier_off(dev); + + bnx2x_set_power_state(bp, PCI_D0); + + /* If parity had happen during the unload, then attentions + * and/or RECOVERY_IN_PROGRES may still be set. In this case we + * want the first function loaded on the current engine to + * complete the recovery. + * Parity recovery is only relevant for PF driver. + */ + if (IS_PF(bp)) { + int other_engine = BP_PATH(bp) ? 0 : 1; + bool other_load_status, load_status; + bool global = false; + + other_load_status = bnx2x_get_load_status(bp, other_engine); + load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); + if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || + bnx2x_chk_parity_attn(bp, &global, true)) { + do { + /* If there are attentions and they are in a + * global blocks, set the GLOBAL_RESET bit + * regardless whether it will be this function + * that will complete the recovery or not. + */ + if (global) + bnx2x_set_reset_global(bp); + + /* Only the first function on the current + * engine should try to recover in open. In case + * of attentions in global blocks only the first + * in the chip should try to recover. + */ + if ((!load_status && + (!global || !other_load_status)) && + bnx2x_trylock_leader_lock(bp) && + !bnx2x_leader_reset(bp)) { + netdev_info(bp->dev, + "Recovered in open\n"); + break; + } + + /* recovery has failed... */ + bnx2x_set_power_state(bp, PCI_D3hot); + bp->recovery_state = BNX2X_RECOVERY_FAILED; + + BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n" + "If you still see this message after a few retries then power cycle is required.\n"); + + return -EAGAIN; + } while (0); + } + } + + bp->recovery_state = BNX2X_RECOVERY_DONE; + rc = bnx2x_nic_load(bp, LOAD_OPEN); + if (rc) + return rc; + return 0; +} + +/* called with rtnl_lock */ +static int bnx2x_close(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + /* Unload the driver, release IRQs */ + bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); + + return 0; +} + +static int bnx2x_init_mcast_macs_list(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p) +{ + int mc_count = netdev_mc_count(bp->dev); + struct bnx2x_mcast_list_elem *mc_mac = + kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC); + struct netdev_hw_addr *ha; + + if (!mc_mac) + return -ENOMEM; + + INIT_LIST_HEAD(&p->mcast_list); + + netdev_for_each_mc_addr(ha, bp->dev) { + mc_mac->mac = bnx2x_mc_addr(ha); + list_add_tail(&mc_mac->link, &p->mcast_list); + mc_mac++; + } + + p->mcast_list_len = mc_count; + + return 0; +} + +static void bnx2x_free_mcast_macs_list( + struct bnx2x_mcast_ramrod_params *p) +{ + struct bnx2x_mcast_list_elem *mc_mac = + list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem, + link); + + WARN_ON(!mc_mac); + kfree(mc_mac); +} + +/** + * bnx2x_set_uc_list - configure a new unicast MACs list. + * + * @bp: driver handle + * + * We will use zero (0) as a MAC type for these MACs. + */ +static int bnx2x_set_uc_list(struct bnx2x *bp) +{ + int rc; + struct net_device *dev = bp->dev; + struct netdev_hw_addr *ha; + struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; + unsigned long ramrod_flags = 0; + + /* First schedule a cleanup up of old configuration */ + rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false); + if (rc < 0) { + BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc); + return rc; + } + + netdev_for_each_uc_addr(ha, dev) { + rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, + BNX2X_UC_LIST_MAC, &ramrod_flags); + if (rc == -EEXIST) { + DP(BNX2X_MSG_SP, + "Failed to schedule ADD operations: %d\n", rc); + /* do not treat adding same MAC as error */ + rc = 0; + + } else if (rc < 0) { + + BNX2X_ERR("Failed to schedule ADD operations: %d\n", + rc); + return rc; + } + } + + /* Execute the pending commands */ + __set_bit(RAMROD_CONT, &ramrod_flags); + return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */, + BNX2X_UC_LIST_MAC, &ramrod_flags); +} + +static int bnx2x_set_mc_list(struct bnx2x *bp) +{ + struct net_device *dev = bp->dev; + struct bnx2x_mcast_ramrod_params rparam = {NULL}; + int rc = 0; + + rparam.mcast_obj = &bp->mcast_obj; + + /* first, clear all configured multicast MACs */ + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); + if (rc < 0) { + BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc); + return rc; + } + + /* then, configure a new MACs list */ + if (netdev_mc_count(dev)) { + rc = bnx2x_init_mcast_macs_list(bp, &rparam); + if (rc) { + BNX2X_ERR("Failed to create multicast MACs list: %d\n", + rc); + return rc; + } + + /* Now add the new MACs */ + rc = bnx2x_config_mcast(bp, &rparam, + BNX2X_MCAST_CMD_ADD); + if (rc < 0) + BNX2X_ERR("Failed to set a new multicast configuration: %d\n", + rc); + + bnx2x_free_mcast_macs_list(&rparam); + } + + return rc; +} + +/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ +static void bnx2x_set_rx_mode(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (bp->state != BNX2X_STATE_OPEN) { + DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); + return; + } else { + /* Schedule an SP task to handle rest of change */ + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE, + NETIF_MSG_IFUP); + } +} + +void bnx2x_set_rx_mode_inner(struct bnx2x *bp) +{ + u32 rx_mode = BNX2X_RX_MODE_NORMAL; + + DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); + + netif_addr_lock_bh(bp->dev); + + if (bp->dev->flags & IFF_PROMISC) { + rx_mode = BNX2X_RX_MODE_PROMISC; + } else if ((bp->dev->flags & IFF_ALLMULTI) || + ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) && + CHIP_IS_E1(bp))) { + rx_mode = BNX2X_RX_MODE_ALLMULTI; + } else { + if (IS_PF(bp)) { + /* some multicasts */ + if (bnx2x_set_mc_list(bp) < 0) + rx_mode = BNX2X_RX_MODE_ALLMULTI; + + /* release bh lock, as bnx2x_set_uc_list might sleep */ + netif_addr_unlock_bh(bp->dev); + if (bnx2x_set_uc_list(bp) < 0) + rx_mode = BNX2X_RX_MODE_PROMISC; + netif_addr_lock_bh(bp->dev); + } else { + /* configuring mcast to a vf involves sleeping (when we + * wait for the pf's response). + */ + bnx2x_schedule_sp_rtnl(bp, + BNX2X_SP_RTNL_VFPF_MCAST, 0); + } + } + + bp->rx_mode = rx_mode; + /* handle ISCSI SD mode */ + if (IS_MF_ISCSI_ONLY(bp)) + bp->rx_mode = BNX2X_RX_MODE_NONE; + + /* Schedule the rx_mode command */ + if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { + set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); + netif_addr_unlock_bh(bp->dev); + return; + } + + if (IS_PF(bp)) { + bnx2x_set_storm_rx_mode(bp); + netif_addr_unlock_bh(bp->dev); + } else { + /* VF will need to request the PF to make this change, and so + * the VF needs to release the bottom-half lock prior to the + * request (as it will likely require sleep on the VF side) + */ + netif_addr_unlock_bh(bp->dev); + bnx2x_vfpf_storm_rx_mode(bp); + } +} + +/* called with rtnl_lock */ +static int bnx2x_mdio_read(struct net_device *netdev, int prtad, + int devad, u16 addr) +{ + struct bnx2x *bp = netdev_priv(netdev); + u16 value; + int rc; + + DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n", + prtad, devad, addr); + + /* The HW expects different devad if CL22 is used */ + devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; + + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value); + bnx2x_release_phy_lock(bp); + DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc); + + if (!rc) + rc = value; + return rc; +} + +/* called with rtnl_lock */ +static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad, + u16 addr, u16 value) +{ + struct bnx2x *bp = netdev_priv(netdev); + int rc; + + DP(NETIF_MSG_LINK, + "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n", + prtad, devad, addr, value); + + /* The HW expects different devad if CL22 is used */ + devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; + + bnx2x_acquire_phy_lock(bp); + rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value); + bnx2x_release_phy_lock(bp); + return rc; +} + +/* called with rtnl_lock */ +static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct bnx2x *bp = netdev_priv(dev); + struct mii_ioctl_data *mdio = if_mii(ifr); + + if (!netif_running(dev)) + return -EAGAIN; + + switch (cmd) { + case SIOCSHWTSTAMP: + return bnx2x_hwtstamp_ioctl(bp, ifr); + default: + DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", + mdio->phy_id, mdio->reg_num, mdio->val_in); + return mdio_mii_ioctl(&bp->mdio, mdio, cmd); + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void poll_bnx2x(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + int i; + + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + } +} +#endif + +static int bnx2x_validate_addr(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + + /* query the bulletin board for mac address configured by the PF */ + if (IS_VF(bp)) + bnx2x_sample_bulletin(bp); + + if (!is_valid_ether_addr(dev->dev_addr)) { + BNX2X_ERR("Non-valid Ethernet address\n"); + return -EADDRNOTAVAIL; + } + return 0; +} + +static int bnx2x_get_phys_port_id(struct net_device *netdev, + struct netdev_phys_item_id *ppid) +{ + struct bnx2x *bp = netdev_priv(netdev); + + if (!(bp->flags & HAS_PHYS_PORT_ID)) + return -EOPNOTSUPP; + + ppid->id_len = sizeof(bp->phys_port_id); + memcpy(ppid->id, bp->phys_port_id, ppid->id_len); + + return 0; +} + +static netdev_features_t bnx2x_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + features = vlan_features_check(skb, features); + return vxlan_features_check(skb, features); +} + +static const struct net_device_ops bnx2x_netdev_ops = { + .ndo_open = bnx2x_open, + .ndo_stop = bnx2x_close, + .ndo_start_xmit = bnx2x_start_xmit, + .ndo_select_queue = bnx2x_select_queue, + .ndo_set_rx_mode = bnx2x_set_rx_mode, + .ndo_set_mac_address = bnx2x_change_mac_addr, + .ndo_validate_addr = bnx2x_validate_addr, + .ndo_do_ioctl = bnx2x_ioctl, + .ndo_change_mtu = bnx2x_change_mtu, + .ndo_fix_features = bnx2x_fix_features, + .ndo_set_features = bnx2x_set_features, + .ndo_tx_timeout = bnx2x_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = poll_bnx2x, +#endif + .ndo_setup_tc = bnx2x_setup_tc, +#ifdef CONFIG_BNX2X_SRIOV + .ndo_set_vf_mac = bnx2x_set_vf_mac, + .ndo_set_vf_vlan = bnx2x_set_vf_vlan, + .ndo_get_vf_config = bnx2x_get_vf_config, +#endif +#ifdef NETDEV_FCOE_WWNN + .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, +#endif + +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = bnx2x_low_latency_recv, +#endif + .ndo_get_phys_port_id = bnx2x_get_phys_port_id, + .ndo_set_vf_link_state = bnx2x_set_vf_link_state, + .ndo_features_check = bnx2x_features_check, +}; + +static int bnx2x_set_coherency_mask(struct bnx2x *bp) +{ + struct device *dev = &bp->pdev->dev; + + if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 && + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) { + dev_err(dev, "System does not support DMA, aborting\n"); + return -EIO; + } + + return 0; +} + +static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp) +{ + if (bp->flags & AER_ENABLED) { + pci_disable_pcie_error_reporting(bp->pdev); + bp->flags &= ~AER_ENABLED; + } +} + +static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, + struct net_device *dev, unsigned long board_type) +{ + int rc; + u32 pci_cfg_dword; + bool chip_is_e1x = (board_type == BCM57710 || + board_type == BCM57711 || + board_type == BCM57711E); + + SET_NETDEV_DEV(dev, &pdev->dev); + + bp->dev = dev; + bp->pdev = pdev; + + rc = pci_enable_device(pdev); + if (rc) { + dev_err(&bp->pdev->dev, + "Cannot enable PCI device, aborting\n"); + goto err_out; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&bp->pdev->dev, + "Cannot find PCI device base address, aborting\n"); + rc = -ENODEV; + goto err_out_disable; + } + + if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { + dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n"); + rc = -ENODEV; + goto err_out_disable; + } + + pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword); + if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) == + PCICFG_REVESION_ID_ERROR_VAL) { + pr_err("PCI device error, probably due to fan failure, aborting\n"); + rc = -ENODEV; + goto err_out_disable; + } + + if (atomic_read(&pdev->enable_cnt) == 1) { + rc = pci_request_regions(pdev, DRV_MODULE_NAME); + if (rc) { + dev_err(&bp->pdev->dev, + "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable; + } + + pci_set_master(pdev); + pci_save_state(pdev); + } + + if (IS_PF(bp)) { + if (!pdev->pm_cap) { + dev_err(&bp->pdev->dev, + "Cannot find power management capability, aborting\n"); + rc = -EIO; + goto err_out_release; + } + } + + if (!pci_is_pcie(pdev)) { + dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); + rc = -EIO; + goto err_out_release; + } + + rc = bnx2x_set_coherency_mask(bp); + if (rc) + goto err_out_release; + + dev->mem_start = pci_resource_start(pdev, 0); + dev->base_addr = dev->mem_start; + dev->mem_end = pci_resource_end(pdev, 0); + + dev->irq = pdev->irq; + + bp->regview = pci_ioremap_bar(pdev, 0); + if (!bp->regview) { + dev_err(&bp->pdev->dev, + "Cannot map register space, aborting\n"); + rc = -ENOMEM; + goto err_out_release; + } + + /* In E1/E1H use pci device function given by kernel. + * In E2/E3 read physical function from ME register since these chips + * support Physical Device Assignment where kernel BDF maybe arbitrary + * (depending on hypervisor). + */ + if (chip_is_e1x) { + bp->pf_num = PCI_FUNC(pdev->devfn); + } else { + /* chip is E2/3*/ + pci_read_config_dword(bp->pdev, + PCICFG_ME_REGISTER, &pci_cfg_dword); + bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >> + ME_REG_ABS_PF_NUM_SHIFT); + } + BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); + + /* clean indirect addresses */ + pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, + PCICFG_VENDOR_ID_OFFSET); + + /* Set PCIe reset type to fundamental for EEH recovery */ + pdev->needs_freset = 1; + + /* AER (Advanced Error reporting) configuration */ + rc = pci_enable_pcie_error_reporting(pdev); + if (!rc) + bp->flags |= AER_ENABLED; + else + BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc); + + /* + * Clean the following indirect addresses for all functions since it + * is not used by the driver. + */ + if (IS_PF(bp)) { + REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); + + if (chip_is_e1x) { + REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); + } + + /* Enable internal target-read (in case we are probed after PF + * FLR). Must be done prior to any BAR read access. Only for + * 57712 and up + */ + if (!chip_is_e1x) + REG_WR(bp, + PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); + } + + dev->watchdog_timeo = TX_TIMEOUT; + + dev->netdev_ops = &bnx2x_netdev_ops; + bnx2x_set_ethtool_ops(bp, dev); + + dev->priv_flags |= IFF_UNICAST_FLT; + + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | + NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | + NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; + if (!chip_is_e1x) { + dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; + dev->hw_enc_features = + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | + NETIF_F_GSO_IPIP | + NETIF_F_GSO_SIT | + NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL; + } + + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; + + dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; + dev->features |= NETIF_F_HIGHDMA; + + /* Add Loopback capability to the device */ + dev->hw_features |= NETIF_F_LOOPBACK; + +#ifdef BCM_DCBNL + dev->dcbnl_ops = &bnx2x_dcbnl_ops; +#endif + + /* get_port_hwinfo() will set prtad and mmds properly */ + bp->mdio.prtad = MDIO_PRTAD_NONE; + bp->mdio.mmds = 0; + bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; + bp->mdio.dev = dev; + bp->mdio.mdio_read = bnx2x_mdio_read; + bp->mdio.mdio_write = bnx2x_mdio_write; + + return 0; + +err_out_release: + if (atomic_read(&pdev->enable_cnt) == 1) + pci_release_regions(pdev); + +err_out_disable: + pci_disable_device(pdev); + +err_out: + return rc; +} + +/*(DEBLOBBED)*/ + +static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) +{ + const __be32 *source = (const __be32 *)_source; + u32 *target = (u32 *)_target; + u32 i; + + for (i = 0; i < n/4; i++) + target[i] = be32_to_cpu(source[i]); +} + +/* + Ops array is stored in the following format: + {op(8bit), offset(24bit, big endian), data(32bit, big endian)} + */ +static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) +{ + const __be32 *source = (const __be32 *)_source; + struct raw_op *target = (struct raw_op *)_target; + u32 i, j, tmp; + + for (i = 0, j = 0; i < n/8; i++, j += 2) { + tmp = be32_to_cpu(source[j]); + target[i].op = (tmp >> 24) & 0xff; + target[i].offset = tmp & 0xffffff; + target[i].raw_data = be32_to_cpu(source[j + 1]); + } +} + +/* IRO array is stored in the following format: + * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } + */ +static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) +{ + const __be32 *source = (const __be32 *)_source; + struct iro *target = (struct iro *)_target; + u32 i, j, tmp; + + for (i = 0, j = 0; i < n/sizeof(struct iro); i++) { + target[i].base = be32_to_cpu(source[j]); + j++; + tmp = be32_to_cpu(source[j]); + target[i].m1 = (tmp >> 16) & 0xffff; + target[i].m2 = tmp & 0xffff; + j++; + tmp = be32_to_cpu(source[j]); + target[i].m3 = (tmp >> 16) & 0xffff; + target[i].size = tmp & 0xffff; + j++; + } +} + +static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) +{ + const __be16 *source = (const __be16 *)_source; + u16 *target = (u16 *)_target; + u32 i; + + for (i = 0; i < n/2; i++) + target[i] = be16_to_cpu(source[i]); +} + +#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \ +do { \ + u32 len = be32_to_cpu(fw_hdr->arr.len); \ + bp->arr = kmalloc(len, GFP_KERNEL); \ + if (!bp->arr) \ + goto lbl; \ + func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \ + (u8 *)bp->arr, len); \ +} while (0) + +static int bnx2x_init_firmware(struct bnx2x *bp) +{ + const char *fw_file_name; + struct bnx2x_fw_file_hdr *fw_hdr; + int rc; + + if (bp->firmware) + return 0; + + if (CHIP_IS_E1(bp)) + fw_file_name = FW_FILE_NAME_E1; + else if (CHIP_IS_E1H(bp)) + fw_file_name = FW_FILE_NAME_E1H; + else if (!CHIP_IS_E1x(bp)) + fw_file_name = FW_FILE_NAME_E2; + else { + BNX2X_ERR("Unsupported chip revision\n"); + return -EINVAL; + } + BNX2X_DEV_INFO("Loading %s\n", fw_file_name); + + rc = reject_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); + if (rc) { + BNX2X_ERR("Can't load firmware file %s\n", + fw_file_name); + goto request_firmware_exit; + } + + /*(DEBLOBBED)*/ + if (rc) { + BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); + goto request_firmware_exit; + } + + fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; + + /* Initialize the pointers to the init arrays */ + /* Blob */ + BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n); + + /* Opcodes */ + BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops); + + /* Offsets */ + BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, + be16_to_cpu_n); + + /* STORMs firmware */ + INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->tsem_int_table_data.offset); + INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->tsem_pram_data.offset); + INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->usem_int_table_data.offset); + INIT_USEM_PRAM_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->usem_pram_data.offset); + INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->xsem_int_table_data.offset); + INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->xsem_pram_data.offset); + INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->csem_int_table_data.offset); + INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + + be32_to_cpu(fw_hdr->csem_pram_data.offset); + /* IRO */ + BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro); + + return 0; + +iro_alloc_err: + kfree(bp->init_ops_offsets); +init_offsets_alloc_err: + kfree(bp->init_ops); +init_ops_alloc_err: + kfree(bp->init_data); +request_firmware_exit: + release_firmware(bp->firmware); + bp->firmware = NULL; + + return rc; +} + +static void bnx2x_release_firmware(struct bnx2x *bp) +{ + kfree(bp->init_ops_offsets); + kfree(bp->init_ops); + kfree(bp->init_data); + release_firmware(bp->firmware); + bp->firmware = NULL; +} + +static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { + .init_hw_cmn_chip = bnx2x_init_hw_common_chip, + .init_hw_cmn = bnx2x_init_hw_common, + .init_hw_port = bnx2x_init_hw_port, + .init_hw_func = bnx2x_init_hw_func, + + .reset_hw_cmn = bnx2x_reset_common, + .reset_hw_port = bnx2x_reset_port, + .reset_hw_func = bnx2x_reset_func, + + .gunzip_init = bnx2x_gunzip_init, + .gunzip_end = bnx2x_gunzip_end, + + .init_fw = bnx2x_init_firmware, + .release_fw = bnx2x_release_firmware, +}; + +void bnx2x__init_func_obj(struct bnx2x *bp) +{ + /* Prepare DMAE related driver resources */ + bnx2x_setup_dmae(bp); + + bnx2x_init_func_obj(bp, &bp->func_obj, + bnx2x_sp(bp, func_rdata), + bnx2x_sp_mapping(bp, func_rdata), + bnx2x_sp(bp, func_afex_rdata), + bnx2x_sp_mapping(bp, func_afex_rdata), + &bnx2x_func_sp_drv); +} + +/* must be called after sriov-enable */ +static int bnx2x_set_qm_cid_count(struct bnx2x *bp) +{ + int cid_count = BNX2X_L2_MAX_CID(bp); + + if (IS_SRIOV(bp)) + cid_count += BNX2X_VF_CIDS; + + if (CNIC_SUPPORT(bp)) + cid_count += CNIC_CID_MAX; + + return roundup(cid_count, QM_CID_ROUND); +} + +/** + * bnx2x_get_num_none_def_sbs - return the number of none default SBs + * + * @dev: pci device + * + */ +static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt) +{ + int index; + u16 control = 0; + + /* + * If MSI-X is not supported - return number of SBs needed to support + * one fast path queue: one FP queue + SB for CNIC + */ + if (!pdev->msix_cap) { + dev_info(&pdev->dev, "no msix capability found\n"); + return 1 + cnic_cnt; + } + dev_info(&pdev->dev, "msix capability found\n"); + + /* + * The value in the PCI configuration space is the index of the last + * entry, namely one less than the actual size of the table, which is + * exactly what we want to return from this function: number of all SBs + * without the default SB. + * For VFs there is no default SB, then we return (index+1). + */ + pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control); + + index = control & PCI_MSIX_FLAGS_QSIZE; + + return index; +} + +static int set_max_cos_est(int chip_id) +{ + switch (chip_id) { + case BCM57710: + case BCM57711: + case BCM57711E: + return BNX2X_MULTI_TX_COS_E1X; + case BCM57712: + case BCM57712_MF: + return BNX2X_MULTI_TX_COS_E2_E3A0; + case BCM57800: + case BCM57800_MF: + case BCM57810: + case BCM57810_MF: + case BCM57840_4_10: + case BCM57840_2_20: + case BCM57840_O: + case BCM57840_MFO: + case BCM57840_MF: + case BCM57811: + case BCM57811_MF: + return BNX2X_MULTI_TX_COS_E3B0; + case BCM57712_VF: + case BCM57800_VF: + case BCM57810_VF: + case BCM57840_VF: + case BCM57811_VF: + return 1; + default: + pr_err("Unknown board_type (%d), aborting\n", chip_id); + return -ENODEV; + } +} + +static int set_is_vf(int chip_id) +{ + switch (chip_id) { + case BCM57712_VF: + case BCM57800_VF: + case BCM57810_VF: + case BCM57840_VF: + case BCM57811_VF: + return true; + default: + return false; + } +} + +/* nig_tsgen registers relative address */ +#define tsgen_ctrl 0x0 +#define tsgen_freecount 0x10 +#define tsgen_synctime_t0 0x20 +#define tsgen_offset_t0 0x28 +#define tsgen_drift_t0 0x30 +#define tsgen_synctime_t1 0x58 +#define tsgen_offset_t1 0x60 +#define tsgen_drift_t1 0x68 + +/* FW workaround for setting drift */ +static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir, + int best_val, int best_period) +{ + struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_set_timesync_params *set_timesync_params = + &func_params.params.set_timesync; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC; + + /* Function parameters */ + set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET; + set_timesync_params->offset_cmd = TS_OFFSET_KEEP; + set_timesync_params->add_sub_drift_adjust_value = + drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE; + set_timesync_params->drift_adjust_value = best_val; + set_timesync_params->drift_adjust_period = best_period; + + return bnx2x_func_state_change(bp, &func_params); +} + +static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + int rc; + int drift_dir = 1; + int val, period, period1, period2, dif, dif1, dif2; + int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0; + + DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb); + + if (!netif_running(bp->dev)) { + DP(BNX2X_MSG_PTP, + "PTP adjfreq called while the interface is down\n"); + return -EFAULT; + } + + if (ppb < 0) { + ppb = -ppb; + drift_dir = 0; + } + + if (ppb == 0) { + best_val = 1; + best_period = 0x1FFFFFF; + } else if (ppb >= BNX2X_MAX_PHC_DRIFT) { + best_val = 31; + best_period = 1; + } else { + /* Changed not to allow val = 8, 16, 24 as these values + * are not supported in workaround. + */ + for (val = 0; val <= 31; val++) { + if ((val & 0x7) == 0) + continue; + period1 = val * 1000000 / ppb; + period2 = period1 + 1; + if (period1 != 0) + dif1 = ppb - (val * 1000000 / period1); + else + dif1 = BNX2X_MAX_PHC_DRIFT; + if (dif1 < 0) + dif1 = -dif1; + dif2 = ppb - (val * 1000000 / period2); + if (dif2 < 0) + dif2 = -dif2; + dif = (dif1 < dif2) ? dif1 : dif2; + period = (dif1 < dif2) ? period1 : period2; + if (dif < best_dif) { + best_dif = dif; + best_val = val; + best_period = period; + } + } + } + + rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val, + best_period); + if (rc) { + BNX2X_ERR("Failed to set drift\n"); + return -EFAULT; + } + + DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val, + best_period); + + return 0; +} + +static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + + DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta); + + timecounter_adjtime(&bp->timecounter, delta); + + return 0; +} + +static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + u64 ns; + + ns = timecounter_read(&bp->timecounter); + + DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int bnx2x_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + u64 ns; + + ns = timespec64_to_ns(ts); + + DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns); + + /* Re-init the timecounter */ + timecounter_init(&bp->timecounter, &bp->cyclecounter, ns); + + return 0; +} + +/* Enable (or disable) ancillary features of the phc subsystem */ +static int bnx2x_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + + BNX2X_ERR("PHC ancillary features are not supported\n"); + return -ENOTSUPP; +} + +static void bnx2x_register_phc(struct bnx2x *bp) +{ + /* Fill the ptp_clock_info struct and register PTP clock*/ + bp->ptp_clock_info.owner = THIS_MODULE; + snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name); + bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */ + bp->ptp_clock_info.n_alarm = 0; + bp->ptp_clock_info.n_ext_ts = 0; + bp->ptp_clock_info.n_per_out = 0; + bp->ptp_clock_info.pps = 0; + bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq; + bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime; + bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime; + bp->ptp_clock_info.settime64 = bnx2x_ptp_settime; + bp->ptp_clock_info.enable = bnx2x_ptp_enable; + + bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev); + if (IS_ERR(bp->ptp_clock)) { + bp->ptp_clock = NULL; + BNX2X_ERR("PTP clock registeration failed\n"); + } +} + +static int bnx2x_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev = NULL; + struct bnx2x *bp; + enum pcie_link_width pcie_width; + enum pci_bus_speed pcie_speed; + int rc, max_non_def_sbs; + int rx_count, tx_count, rss_count, doorbell_size; + int max_cos_est; + bool is_vf; + int cnic_cnt; + + /* Management FW 'remembers' living interfaces. Allow it some time + * to forget previously living interfaces, allowing a proper re-load. + */ + if (is_kdump_kernel()) { + ktime_t now = ktime_get_boottime(); + ktime_t fw_ready_time = ktime_set(5, 0); + + if (ktime_before(now, fw_ready_time)) + msleep(ktime_ms_delta(fw_ready_time, now)); + } + + /* An estimated maximum supported CoS number according to the chip + * version. + * We will try to roughly estimate the maximum number of CoSes this chip + * may support in order to minimize the memory allocated for Tx + * netdev_queue's. This number will be accurately calculated during the + * initialization of bp->max_cos based on the chip versions AND chip + * revision in the bnx2x_init_bp(). + */ + max_cos_est = set_max_cos_est(ent->driver_data); + if (max_cos_est < 0) + return max_cos_est; + is_vf = set_is_vf(ent->driver_data); + cnic_cnt = is_vf ? 0 : 1; + + max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt); + + /* add another SB for VF as it has no default SB */ + max_non_def_sbs += is_vf ? 1 : 0; + + /* Maximum number of RSS queues: one IGU SB goes to CNIC */ + rss_count = max_non_def_sbs - cnic_cnt; + + if (rss_count < 1) + return -EINVAL; + + /* Maximum number of netdev Rx queues: RSS + FCoE L2 */ + rx_count = rss_count + cnic_cnt; + + /* Maximum number of netdev Tx queues: + * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 + */ + tx_count = rss_count * max_cos_est + cnic_cnt; + + /* dev zeroed in init_etherdev */ + dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); + if (!dev) + return -ENOMEM; + + bp = netdev_priv(dev); + + bp->flags = 0; + if (is_vf) + bp->flags |= IS_VF_FLAG; + + bp->igu_sb_cnt = max_non_def_sbs; + bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM; + bp->msg_enable = debug; + bp->cnic_support = cnic_cnt; + bp->cnic_probe = bnx2x_cnic_probe; + + pci_set_drvdata(pdev, dev); + + rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data); + if (rc < 0) { + free_netdev(dev); + return rc; + } + + BNX2X_DEV_INFO("This is a %s function\n", + IS_PF(bp) ? "physical" : "virtual"); + BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off"); + BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs); + BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n", + tx_count, rx_count); + + rc = bnx2x_init_bp(bp); + if (rc) + goto init_one_exit; + + /* Map doorbells here as we need the real value of bp->max_cos which + * is initialized in bnx2x_init_bp() to determine the number of + * l2 connections. + */ + if (IS_VF(bp)) { + bp->doorbells = bnx2x_vf_doorbells(bp); + rc = bnx2x_vf_pci_alloc(bp); + if (rc) + goto init_one_exit; + } else { + doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); + if (doorbell_size > pci_resource_len(pdev, 2)) { + dev_err(&bp->pdev->dev, + "Cannot map doorbells, bar size too small, aborting\n"); + rc = -ENOMEM; + goto init_one_exit; + } + bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), + doorbell_size); + } + if (!bp->doorbells) { + dev_err(&bp->pdev->dev, + "Cannot map doorbell space, aborting\n"); + rc = -ENOMEM; + goto init_one_exit; + } + + if (IS_VF(bp)) { + rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); + if (rc) + goto init_one_exit; + } + + /* Enable SRIOV if capability found in configuration space */ + rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); + if (rc) + goto init_one_exit; + + /* calc qm_cid_count */ + bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); + BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count); + + /* disable FCOE L2 queue for E1x*/ + if (CHIP_IS_E1x(bp)) + bp->flags |= NO_FCOE_FLAG; + + /* Set bp->num_queues for MSI-X mode*/ + bnx2x_set_num_queues(bp); + + /* Configure interrupt mode: try to enable MSI-X/MSI if + * needed. + */ + rc = bnx2x_set_int_mode(bp); + if (rc) { + dev_err(&pdev->dev, "Cannot set interrupts\n"); + goto init_one_exit; + } + BNX2X_DEV_INFO("set interrupts successfully\n"); + + /* register the net device */ + rc = register_netdev(dev); + if (rc) { + dev_err(&pdev->dev, "Cannot register net device\n"); + goto init_one_exit; + } + BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); + + if (!NO_FCOE(bp)) { + /* Add storage MAC address */ + rtnl_lock(); + dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + } + if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) || + pcie_speed == PCI_SPEED_UNKNOWN || + pcie_width == PCIE_LNK_WIDTH_UNKNOWN) + BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n"); + else + BNX2X_DEV_INFO( + "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", + board_info[ent->driver_data].name, + (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), + pcie_width, + pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" : + pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" : + pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" : + "Unknown", + dev->base_addr, bp->pdev->irq, dev->dev_addr); + + bnx2x_register_phc(bp); + + return 0; + +init_one_exit: + bnx2x_disable_pcie_error_reporting(bp); + + if (bp->regview) + iounmap(bp->regview); + + if (IS_PF(bp) && bp->doorbells) + iounmap(bp->doorbells); + + free_netdev(dev); + + if (atomic_read(&pdev->enable_cnt) == 1) + pci_release_regions(pdev); + + pci_disable_device(pdev); + + return rc; +} + +static void __bnx2x_remove(struct pci_dev *pdev, + struct net_device *dev, + struct bnx2x *bp, + bool remove_netdev) +{ + if (bp->ptp_clock) { + ptp_clock_unregister(bp->ptp_clock); + bp->ptp_clock = NULL; + } + + /* Delete storage MAC address */ + if (!NO_FCOE(bp)) { + rtnl_lock(); + dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + } + +#ifdef BCM_DCBNL + /* Delete app tlvs from dcbnl */ + bnx2x_dcbnl_update_applist(bp, true); +#endif + + if (IS_PF(bp) && + !BP_NOMCP(bp) && + (bp->flags & BC_SUPPORTS_RMMOD_CMD)) + bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0); + + /* Close the interface - either directly or implicitly */ + if (remove_netdev) { + unregister_netdev(dev); + } else { + rtnl_lock(); + dev_close(dev); + rtnl_unlock(); + } + + bnx2x_iov_remove_one(bp); + + /* Power on: we can't let PCI layer write to us while we are in D3 */ + if (IS_PF(bp)) { + bnx2x_set_power_state(bp, PCI_D0); + + /* Set endianity registers to reset values in case next driver + * boots in different endianty environment. + */ + bnx2x_reset_endianity(bp); + } + + /* Disable MSI/MSI-X */ + bnx2x_disable_msi(bp); + + /* Power off */ + if (IS_PF(bp)) + bnx2x_set_power_state(bp, PCI_D3hot); + + /* Make sure RESET task is not scheduled before continuing */ + cancel_delayed_work_sync(&bp->sp_rtnl_task); + + /* send message via vfpf channel to release the resources of this vf */ + if (IS_VF(bp)) + bnx2x_vfpf_release(bp); + + /* Assumes no further PCIe PM changes will occur */ + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, bp->wol); + pci_set_power_state(pdev, PCI_D3hot); + } + + bnx2x_disable_pcie_error_reporting(bp); + if (remove_netdev) { + if (bp->regview) + iounmap(bp->regview); + + /* For vfs, doorbells are part of the regview and were unmapped + * along with it. FW is only loaded by PF. + */ + if (IS_PF(bp)) { + if (bp->doorbells) + iounmap(bp->doorbells); + + bnx2x_release_firmware(bp); + } else { + bnx2x_vf_pci_dealloc(bp); + } + bnx2x_free_mem_bp(bp); + + free_netdev(dev); + + if (atomic_read(&pdev->enable_cnt) == 1) + pci_release_regions(pdev); + + pci_disable_device(pdev); + } +} + +static void bnx2x_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp; + + if (!dev) { + dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); + return; + } + bp = netdev_priv(dev); + + __bnx2x_remove(pdev, dev, bp, true); +} + +static int bnx2x_eeh_nic_unload(struct bnx2x *bp) +{ + bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; + + bp->rx_mode = BNX2X_RX_MODE_NONE; + + if (CNIC_LOADED(bp)) + bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); + + /* Stop Tx */ + bnx2x_tx_disable(bp); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); + if (CNIC_LOADED(bp)) + bnx2x_del_all_napi_cnic(bp); + netdev_reset_tc(bp->dev); + + del_timer_sync(&bp->timer); + cancel_delayed_work_sync(&bp->sp_task); + cancel_delayed_work_sync(&bp->period_task); + + if (!down_timeout(&bp->stats_lock, HZ / 10)) { + bp->stats_state = STATS_STATE_DISABLED; + up(&bp->stats_lock); + } + + bnx2x_save_statistics(bp); + + netif_carrier_off(bp->dev); + + return 0; +} + +/** + * bnx2x_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp = netdev_priv(dev); + + rtnl_lock(); + + BNX2X_ERR("IO error detected\n"); + + netif_device_detach(dev); + + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (netif_running(dev)) + bnx2x_eeh_nic_unload(bp); + + bnx2x_prev_path_mark_eeh(bp); + + pci_disable_device(pdev); + + rtnl_unlock(); + + /* Request a slot reset */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * bnx2x_io_slot_reset - called after the PCI bus has been reset + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp = netdev_priv(dev); + int i; + + rtnl_lock(); + BNX2X_ERR("IO slot reset initializing...\n"); + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset\n"); + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (netif_running(dev)) + bnx2x_set_power_state(bp, PCI_D0); + + if (netif_running(dev)) { + BNX2X_ERR("IO slot reset --> driver unload\n"); + + /* MCP should have been reset; Need to wait for validity */ + bnx2x_init_shmem(bp); + + if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { + u32 v; + + v = SHMEM2_RD(bp, + drv_capabilities_flag[BP_FW_MB_IDX(bp)]); + SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], + v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); + } + bnx2x_drain_tx_queues(bp); + bnx2x_send_unload_req(bp, UNLOAD_RECOVERY); + bnx2x_netif_stop(bp, 1); + bnx2x_free_irq(bp); + + /* Report UNLOAD_DONE to MCP */ + bnx2x_send_unload_done(bp, true); + + bp->sp_state = 0; + bp->port.pmf = 0; + + bnx2x_prev_unload(bp); + + /* We should have reseted the engine, so It's fair to + * assume the FW will no longer write to the bnx2x driver. + */ + bnx2x_squeeze_objects(bp); + bnx2x_free_skbs(bp); + for_each_rx_queue(bp, i) + bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); + bnx2x_free_fp_mem(bp); + bnx2x_free_mem(bp); + + bp->state = BNX2X_STATE_CLOSED; + } + + rtnl_unlock(); + + /* If AER, perform cleanup of the PCIe registers */ + if (bp->flags & AER_ENABLED) { + if (pci_cleanup_aer_uncorrect_error_status(pdev)) + BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n"); + else + DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n"); + } + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * bnx2x_io_resume - called when traffic can start flowing again + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void bnx2x_io_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp = netdev_priv(dev); + + if (bp->recovery_state != BNX2X_RECOVERY_DONE) { + netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); + return; + } + + rtnl_lock(); + + bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK; + + if (netif_running(dev)) + bnx2x_nic_load(bp, LOAD_NORMAL); + + netif_device_attach(dev); + + rtnl_unlock(); +} + +static const struct pci_error_handlers bnx2x_err_handler = { + .error_detected = bnx2x_io_error_detected, + .slot_reset = bnx2x_io_slot_reset, + .resume = bnx2x_io_resume, +}; + +static void bnx2x_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2x *bp; + + if (!dev) + return; + + bp = netdev_priv(dev); + if (!bp) + return; + + rtnl_lock(); + netif_device_detach(dev); + rtnl_unlock(); + + /* Don't remove the netdevice, as there are scenarios which will cause + * the kernel to hang, e.g., when trying to remove bnx2i while the + * rootfs is mounted from SAN. + */ + __bnx2x_remove(pdev, dev, bp, false); +} + +static struct pci_driver bnx2x_pci_driver = { + .name = DRV_MODULE_NAME, + .id_table = bnx2x_pci_tbl, + .probe = bnx2x_init_one, + .remove = bnx2x_remove_one, + .suspend = bnx2x_suspend, + .resume = bnx2x_resume, + .err_handler = &bnx2x_err_handler, +#ifdef CONFIG_BNX2X_SRIOV + .sriov_configure = bnx2x_sriov_configure, +#endif + .shutdown = bnx2x_shutdown, +}; + +static int __init bnx2x_init(void) +{ + int ret; + + pr_info("%s", version); + + bnx2x_wq = create_singlethread_workqueue("bnx2x"); + if (bnx2x_wq == NULL) { + pr_err("Cannot create workqueue\n"); + return -ENOMEM; + } + bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov"); + if (!bnx2x_iov_wq) { + pr_err("Cannot create iov workqueue\n"); + destroy_workqueue(bnx2x_wq); + return -ENOMEM; + } + + ret = pci_register_driver(&bnx2x_pci_driver); + if (ret) { + pr_err("Cannot register driver\n"); + destroy_workqueue(bnx2x_wq); + destroy_workqueue(bnx2x_iov_wq); + } + return ret; +} + +static void __exit bnx2x_cleanup(void) +{ + struct list_head *pos, *q; + + pci_unregister_driver(&bnx2x_pci_driver); + + destroy_workqueue(bnx2x_wq); + destroy_workqueue(bnx2x_iov_wq); + + /* Free globally allocated resources */ + list_for_each_safe(pos, q, &bnx2x_prev_list) { + struct bnx2x_prev_path_list *tmp = + list_entry(pos, struct bnx2x_prev_path_list, list); + list_del(pos); + kfree(tmp); + } +} + +void bnx2x_notify_link_changed(struct bnx2x *bp) +{ + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1); +} + +module_init(bnx2x_init); +module_exit(bnx2x_cleanup); + +/** + * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). + * + * @bp: driver handle + * @set: set or clear the CAM entry + * + * This function will wait until the ramrod completion returns. + * Return 0 if success, -ENODEV if ramrod doesn't return. + */ +static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) +{ + unsigned long ramrod_flags = 0; + + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, + &bp->iscsi_l2_mac_obj, true, + BNX2X_ISCSI_ETH_MAC, &ramrod_flags); +} + +/* count denotes the number of new completions we have seen */ +static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) +{ + struct eth_spe *spe; + int cxt_index, cxt_offset; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return; +#endif + + spin_lock_bh(&bp->spq_lock); + BUG_ON(bp->cnic_spq_pending < count); + bp->cnic_spq_pending -= count; + + for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { + u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) + & SPE_HDR_CONN_TYPE) >> + SPE_HDR_CONN_TYPE_SHIFT; + u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) + >> SPE_HDR_CMD_ID_SHIFT) & 0xff; + + /* Set validation for iSCSI L2 client before sending SETUP + * ramrod + */ + if (type == ETH_CONNECTION_TYPE) { + if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) { + cxt_index = BNX2X_ISCSI_ETH_CID(bp) / + ILT_PAGE_CIDS; + cxt_offset = BNX2X_ISCSI_ETH_CID(bp) - + (cxt_index * ILT_PAGE_CIDS); + bnx2x_set_ctx_validation(bp, + &bp->context[cxt_index]. + vcxt[cxt_offset].eth, + BNX2X_ISCSI_ETH_CID(bp)); + } + } + + /* + * There may be not more than 8 L2, not more than 8 L5 SPEs + * and in the air. We also check that number of outstanding + * COMMON ramrods is not more than the EQ and SPQ can + * accommodate. + */ + if (type == ETH_CONNECTION_TYPE) { + if (!atomic_read(&bp->cq_spq_left)) + break; + else + atomic_dec(&bp->cq_spq_left); + } else if (type == NONE_CONNECTION_TYPE) { + if (!atomic_read(&bp->eq_spq_left)) + break; + else + atomic_dec(&bp->eq_spq_left); + } else if ((type == ISCSI_CONNECTION_TYPE) || + (type == FCOE_CONNECTION_TYPE)) { + if (bp->cnic_spq_pending >= + bp->cnic_eth_dev.max_kwqe_pending) + break; + else + bp->cnic_spq_pending++; + } else { + BNX2X_ERR("Unknown SPE type: %d\n", type); + bnx2x_panic(); + break; + } + + spe = bnx2x_sp_get_next(bp); + *spe = *bp->cnic_kwq_cons; + + DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n", + bp->cnic_spq_pending, bp->cnic_kwq_pending, count); + + if (bp->cnic_kwq_cons == bp->cnic_kwq_last) + bp->cnic_kwq_cons = bp->cnic_kwq; + else + bp->cnic_kwq_cons++; + } + bnx2x_sp_prod_update(bp); + spin_unlock_bh(&bp->spq_lock); +} + +static int bnx2x_cnic_sp_queue(struct net_device *dev, + struct kwqe_16 *kwqes[], u32 count) +{ + struct bnx2x *bp = netdev_priv(dev); + int i; + +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) { + BNX2X_ERR("Can't post to SP queue while panic\n"); + return -EIO; + } +#endif + + if ((bp->recovery_state != BNX2X_RECOVERY_DONE) && + (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { + BNX2X_ERR("Handling parity error recovery. Try again later\n"); + return -EAGAIN; + } + + spin_lock_bh(&bp->spq_lock); + + for (i = 0; i < count; i++) { + struct eth_spe *spe = (struct eth_spe *)kwqes[i]; + + if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT) + break; + + *bp->cnic_kwq_prod = *spe; + + bp->cnic_kwq_pending++; + + DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n", + spe->hdr.conn_and_cmd_data, spe->hdr.type, + spe->data.update_data_addr.hi, + spe->data.update_data_addr.lo, + bp->cnic_kwq_pending); + + if (bp->cnic_kwq_prod == bp->cnic_kwq_last) + bp->cnic_kwq_prod = bp->cnic_kwq; + else + bp->cnic_kwq_prod++; + } + + spin_unlock_bh(&bp->spq_lock); + + if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending) + bnx2x_cnic_sp_post(bp, 0); + + return i; +} + +static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl) +{ + struct cnic_ops *c_ops; + int rc = 0; + + mutex_lock(&bp->cnic_mutex); + c_ops = rcu_dereference_protected(bp->cnic_ops, + lockdep_is_held(&bp->cnic_mutex)); + if (c_ops) + rc = c_ops->cnic_ctl(bp->cnic_data, ctl); + mutex_unlock(&bp->cnic_mutex); + + return rc; +} + +static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl) +{ + struct cnic_ops *c_ops; + int rc = 0; + + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops) + rc = c_ops->cnic_ctl(bp->cnic_data, ctl); + rcu_read_unlock(); + + return rc; +} + +/* + * for commands that have no data + */ +int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) +{ + struct cnic_ctl_info ctl = {0}; + + ctl.cmd = cmd; + + return bnx2x_cnic_ctl_send(bp, &ctl); +} + +static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) +{ + struct cnic_ctl_info ctl = {0}; + + /* first we tell CNIC and only then we count this as a completion */ + ctl.cmd = CNIC_CTL_COMPLETION_CMD; + ctl.data.comp.cid = cid; + ctl.data.comp.error = err; + + bnx2x_cnic_ctl_send_bh(bp, &ctl); + bnx2x_cnic_sp_post(bp, 0); +} + +/* Called with netif_addr_lock_bh() taken. + * Sets an rx_mode config for an iSCSI ETH client. + * Doesn't block. + * Completion should be checked outside. + */ +static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) +{ + unsigned long accept_flags = 0, ramrod_flags = 0; + u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); + int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED; + + if (start) { + /* Start accepting on iSCSI L2 ring. Accept all multicasts + * because it's the only way for UIO Queue to accept + * multicasts (in non-promiscuous mode only one Queue per + * function will receive multicast packets (leading in our + * case). + */ + __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags); + __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); + + /* Clear STOP_PENDING bit if START is requested */ + clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); + + sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED; + } else + /* Clear START_PENDING bit if STOP is requested */ + clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); + + if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) + set_bit(sched_state, &bp->sp_state); + else { + __set_bit(RAMROD_RX, &ramrod_flags); + bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0, + ramrod_flags); + } +} + +static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) +{ + struct bnx2x *bp = netdev_priv(dev); + int rc = 0; + + switch (ctl->cmd) { + case DRV_CTL_CTXTBL_WR_CMD: { + u32 index = ctl->data.io.offset; + dma_addr_t addr = ctl->data.io.dma_addr; + + bnx2x_ilt_wr(bp, index, addr); + break; + } + + case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: { + int count = ctl->data.credit.credit_count; + + bnx2x_cnic_sp_post(bp, count); + break; + } + + /* rtnl_lock is held. */ + case DRV_CTL_START_L2_CMD: { + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + unsigned long sp_bits = 0; + + /* Configure the iSCSI classification object */ + bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, + cp->iscsi_l2_client_id, + cp->iscsi_l2_cid, BP_FUNC(bp), + bnx2x_sp(bp, mac_rdata), + bnx2x_sp_mapping(bp, mac_rdata), + BNX2X_FILTER_MAC_PENDING, + &bp->sp_state, BNX2X_OBJ_TYPE_RX, + &bp->macs_pool); + + /* Set iSCSI MAC address */ + rc = bnx2x_set_iscsi_eth_mac_addr(bp); + if (rc) + break; + + mmiowb(); + barrier(); + + /* Start accepting on iSCSI L2 ring */ + + netif_addr_lock_bh(dev); + bnx2x_set_iscsi_eth_rx_mode(bp, true); + netif_addr_unlock_bh(dev); + + /* bits to wait on */ + __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); + __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits); + + if (!bnx2x_wait_sp_comp(bp, sp_bits)) + BNX2X_ERR("rx_mode completion timed out!\n"); + + break; + } + + /* rtnl_lock is held. */ + case DRV_CTL_STOP_L2_CMD: { + unsigned long sp_bits = 0; + + /* Stop accepting on iSCSI L2 ring */ + netif_addr_lock_bh(dev); + bnx2x_set_iscsi_eth_rx_mode(bp, false); + netif_addr_unlock_bh(dev); + + /* bits to wait on */ + __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); + __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits); + + if (!bnx2x_wait_sp_comp(bp, sp_bits)) + BNX2X_ERR("rx_mode completion timed out!\n"); + + mmiowb(); + barrier(); + + /* Unset iSCSI L2 MAC */ + rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, + BNX2X_ISCSI_ETH_MAC, true); + break; + } + case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { + int count = ctl->data.credit.credit_count; + + smp_mb__before_atomic(); + atomic_add(count, &bp->cq_spq_left); + smp_mb__after_atomic(); + break; + } + case DRV_CTL_ULP_REGISTER_CMD: { + int ulp_type = ctl->data.register_data.ulp_type; + + if (CHIP_IS_E3(bp)) { + int idx = BP_FW_MB_IDX(bp); + u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); + int path = BP_PATH(bp); + int port = BP_PORT(bp); + int i; + u32 scratch_offset; + u32 *host_addr; + + /* first write capability to shmem2 */ + if (ulp_type == CNIC_ULP_ISCSI) + cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; + else if (ulp_type == CNIC_ULP_FCOE) + cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE; + SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); + + if ((ulp_type != CNIC_ULP_FCOE) || + (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) || + (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES))) + break; + + /* if reached here - should write fcoe capabilities */ + scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr); + if (!scratch_offset) + break; + scratch_offset += offsetof(struct glob_ncsi_oem_data, + fcoe_features[path][port]); + host_addr = (u32 *) &(ctl->data.register_data. + fcoe_features); + for (i = 0; i < sizeof(struct fcoe_capabilities); + i += 4) + REG_WR(bp, scratch_offset + i, + *(host_addr + i/4)); + } + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); + break; + } + + case DRV_CTL_ULP_UNREGISTER_CMD: { + int ulp_type = ctl->data.ulp_type; + + if (CHIP_IS_E3(bp)) { + int idx = BP_FW_MB_IDX(bp); + u32 cap; + + cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); + if (ulp_type == CNIC_ULP_ISCSI) + cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; + else if (ulp_type == CNIC_ULP_FCOE) + cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE; + SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); + } + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); + break; + } + + default: + BNX2X_ERR("unknown command %x\n", ctl->cmd); + rc = -EINVAL; + } + + return rc; +} + +void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) +{ + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + if (bp->flags & USING_MSIX_FLAG) { + cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; + cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; + cp->irq_arr[0].vector = bp->msix_table[1].vector; + } else { + cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; + cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; + } + if (!CHIP_IS_E1x(bp)) + cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; + else + cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; + + cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); + cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); + cp->irq_arr[1].status_blk = bp->def_status_blk; + cp->irq_arr[1].status_blk_num = DEF_SB_ID; + cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; + + cp->num_irq = 2; +} + +void bnx2x_setup_cnic_info(struct bnx2x *bp) +{ + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + + bnx2x_cid_ilt_lines(bp); + cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; + cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); + cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); + + DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n", + BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid, + cp->iscsi_l2_cid); + + if (NO_ISCSI_OOO(bp)) + cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; +} + +static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, + void *data) +{ + struct bnx2x *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + int rc; + + DP(NETIF_MSG_IFUP, "Register_cnic called\n"); + + if (ops == NULL) { + BNX2X_ERR("NULL ops received\n"); + return -EINVAL; + } + + if (!CNIC_SUPPORT(bp)) { + BNX2X_ERR("Can't register CNIC when not supported\n"); + return -EOPNOTSUPP; + } + + if (!CNIC_LOADED(bp)) { + rc = bnx2x_load_cnic(bp); + if (rc) { + BNX2X_ERR("CNIC-related load failed\n"); + return rc; + } + } + + bp->cnic_enabled = true; + + bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!bp->cnic_kwq) + return -ENOMEM; + + bp->cnic_kwq_cons = bp->cnic_kwq; + bp->cnic_kwq_prod = bp->cnic_kwq; + bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT; + + bp->cnic_spq_pending = 0; + bp->cnic_kwq_pending = 0; + + bp->cnic_data = data; + + cp->num_irq = 0; + cp->drv_state |= CNIC_DRV_STATE_REGD; + cp->iro_arr = bp->iro_arr; + + bnx2x_setup_cnic_irq_info(bp); + + rcu_assign_pointer(bp->cnic_ops, ops); + + /* Schedule driver to read CNIC driver versions */ + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); + + return 0; +} + +static int bnx2x_unregister_cnic(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + mutex_lock(&bp->cnic_mutex); + cp->drv_state = 0; + RCU_INIT_POINTER(bp->cnic_ops, NULL); + mutex_unlock(&bp->cnic_mutex); + synchronize_rcu(); + bp->cnic_enabled = false; + kfree(bp->cnic_kwq); + bp->cnic_kwq = NULL; + + return 0; +} + +static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + /* If both iSCSI and FCoE are disabled - return NULL in + * order to indicate CNIC that it should not try to work + * with this device. + */ + if (NO_ISCSI(bp) && NO_FCOE(bp)) + return NULL; + + cp->drv_owner = THIS_MODULE; + cp->chip_id = CHIP_ID(bp); + cp->pdev = bp->pdev; + cp->io_base = bp->regview; + cp->io_base2 = bp->doorbells; + cp->max_kwqe_pending = 8; + cp->ctx_blk_size = CDU_ILT_PAGE_SZ; + cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + + bnx2x_cid_ilt_lines(bp); + cp->ctx_tbl_len = CNIC_ILT_LINES; + cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; + cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue; + cp->drv_ctl = bnx2x_drv_ctl; + cp->drv_register_cnic = bnx2x_register_cnic; + cp->drv_unregister_cnic = bnx2x_unregister_cnic; + cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); + cp->iscsi_l2_client_id = + bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); + cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); + + if (NO_ISCSI_OOO(bp)) + cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; + + if (NO_ISCSI(bp)) + cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI; + + if (NO_FCOE(bp)) + cp->drv_state |= CNIC_DRV_STATE_NO_FCOE; + + BNX2X_DEV_INFO( + "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n", + cp->ctx_blk_size, + cp->ctx_tbl_offset, + cp->ctx_tbl_len, + cp->starting_cid); + return cp; +} + +static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) +{ + struct bnx2x *bp = fp->bp; + u32 offset = BAR_USTRORM_INTMEM; + + if (IS_VF(bp)) + return bnx2x_vf_ustorm_prods_offset(bp, fp); + else if (!CHIP_IS_E1x(bp)) + offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); + else + offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); + + return offset; +} + +/* called only on E1H or E2. + * When pretending to be PF, the pretend value is the function number 0...7 + * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID + * combination + */ +int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val) +{ + u32 pretend_reg; + + if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX) + return -1; + + /* get my own pretend register */ + pretend_reg = bnx2x_get_pretend_reg(bp); + REG_WR(bp, pretend_reg, pretend_func_val); + REG_RD(bp, pretend_reg); + return 0; +} + +static void bnx2x_ptp_task(struct work_struct *work) +{ + struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task); + int port = BP_PORT(bp); + u32 val_seq; + u64 timestamp, ns; + struct skb_shared_hwtstamps shhwtstamps; + + /* Read Tx timestamp registers */ + val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : + NIG_REG_P0_TLLH_PTP_BUF_SEQID); + if (val_seq & 0x10000) { + /* There is a valid timestamp value */ + timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB : + NIG_REG_P0_TLLH_PTP_BUF_TS_MSB); + timestamp <<= 32; + timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB : + NIG_REG_P0_TLLH_PTP_BUF_TS_LSB); + /* Reset timestamp register to allow new timestamp */ + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : + NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000); + ns = timecounter_cyc2time(&bp->timecounter, timestamp); + + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps); + dev_kfree_skb_any(bp->ptp_tx_skb); + bp->ptp_tx_skb = NULL; + + DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n", + timestamp, ns); + } else { + DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n"); + /* Reschedule to keep checking for a valid timestamp value */ + schedule_work(&bp->ptp_task); + } +} + +void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb) +{ + int port = BP_PORT(bp); + u64 timestamp, ns; + + timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB : + NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB); + timestamp <<= 32; + timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB : + NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB); + + /* Reset timestamp register to allow new timestamp */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID : + NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000); + + ns = timecounter_cyc2time(&bp->timecounter, timestamp); + + skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); + + DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n", + timestamp, ns); +} + +/* Read the PHC */ +static cycle_t bnx2x_cyclecounter_read(const struct cyclecounter *cc) +{ + struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter); + int port = BP_PORT(bp); + u32 wb_data[2]; + u64 phc_cycles; + + REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 : + NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2); + phc_cycles = wb_data[1]; + phc_cycles = (phc_cycles << 32) + wb_data[0]; + + DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles); + + return phc_cycles; +} + +static void bnx2x_init_cyclecounter(struct bnx2x *bp) +{ + memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter)); + bp->cyclecounter.read = bnx2x_cyclecounter_read; + bp->cyclecounter.mask = CYCLECOUNTER_MASK(64); + bp->cyclecounter.shift = 1; + bp->cyclecounter.mult = 1; +} + +static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp) +{ + struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_set_timesync_params *set_timesync_params = + &func_params.params.set_timesync; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC; + + /* Function parameters */ + set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET; + set_timesync_params->offset_cmd = TS_OFFSET_KEEP; + + return bnx2x_func_state_change(bp, &func_params); +} + +static int bnx2x_enable_ptp_packets(struct bnx2x *bp) +{ + struct bnx2x_queue_state_params q_params; + int rc, i; + + /* send queue update ramrod to enable PTP packets */ + memset(&q_params, 0, sizeof(q_params)); + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + q_params.cmd = BNX2X_Q_CMD_UPDATE; + __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, + &q_params.params.update.update_flags); + __set_bit(BNX2X_Q_UPDATE_PTP_PKTS, + &q_params.params.update.update_flags); + + /* send the ramrod on all the queues of the PF */ + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + /* Set the appropriate Queue object */ + q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + + /* Update the Queue state */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to enable PTP packets\n"); + return rc; + } + } + + return 0; +} + +int bnx2x_configure_ptp_filters(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int rc; + + if (!bp->hwtstamp_ioctl_called) + return 0; + + switch (bp->tx_type) { + case HWTSTAMP_TX_ON: + bp->flags |= TX_TIMESTAMPING_EN; + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : + NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : + NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE); + break; + case HWTSTAMP_TX_ONESTEP_SYNC: + BNX2X_ERR("One-step timestamping is not supported\n"); + return -ERANGE; + } + + switch (bp->rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + bp->rx_filter = HWTSTAMP_FILTER_NONE; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + /* Initialize PTP detection for UDP/IPv4 events */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE); + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE); + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + /* Initialize PTP detection L2 events */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF); + + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE); + break; + } + + /* Indicate to FW that this PF expects recorded PTP packets */ + rc = bnx2x_enable_ptp_packets(bp); + if (rc) + return rc; + + /* Enable sending PTP packets to host */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : + NIG_REG_P0_LLH_PTP_TO_HOST, 0x1); + + return 0; +} + +static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int rc; + + DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n"); + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n", + config.tx_type, config.rx_filter); + + if (config.flags) { + BNX2X_ERR("config.flags is reserved for future use\n"); + return -EINVAL; + } + + bp->hwtstamp_ioctl_called = 1; + bp->tx_type = config.tx_type; + bp->rx_filter = config.rx_filter; + + rc = bnx2x_configure_ptp_filters(bp); + if (rc) + return rc; + + config.rx_filter = bp->rx_filter; + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/* Configures HW for PTP */ +static int bnx2x_configure_ptp(struct bnx2x *bp) +{ + int rc, port = BP_PORT(bp); + u32 wb_data[2]; + + /* Reset PTP event detection rules - will be configured in the IOCTL */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : + NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : + NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF); + + /* Disable PTP packets to host - will be configured in the IOCTL*/ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : + NIG_REG_P0_LLH_PTP_TO_HOST, 0x0); + + /* Enable the PTP feature */ + REG_WR(bp, port ? NIG_REG_P1_PTP_EN : + NIG_REG_P0_PTP_EN, 0x3F); + + /* Enable the free-running counter */ + wb_data[0] = 0; + wb_data[1] = 0; + REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2); + + /* Reset drift register (offset register is not reset) */ + rc = bnx2x_send_reset_timesync_ramrod(bp); + if (rc) { + BNX2X_ERR("Failed to reset PHC drift register\n"); + return -EFAULT; + } + + /* Reset possibly old timestamps */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID : + NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : + NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000); + + return 0; +} + +/* Called during load, to initialize PTP-related stuff */ +void bnx2x_init_ptp(struct bnx2x *bp) +{ + int rc; + + /* Configure PTP in HW */ + rc = bnx2x_configure_ptp(bp); + if (rc) { + BNX2X_ERR("Stopping PTP initialization\n"); + return; + } + + /* Init work queue for Tx timestamping */ + INIT_WORK(&bp->ptp_task, bnx2x_ptp_task); + + /* Init cyclecounter and timecounter. This is done only in the first + * load. If done in every load, PTP application will fail when doing + * unload / load (e.g. MTU change) while it is running. + */ + if (!bp->timecounter_init_done) { + bnx2x_init_cyclecounter(bp); + timecounter_init(&bp->timecounter, &bp->cyclecounter, + ktime_to_ns(ktime_get_real())); + bp->timecounter_init_done = 1; + } + + DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n"); +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h new file mode 100644 index 000000000..caf1aef65 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h @@ -0,0 +1,168 @@ +/* bnx2x_mfw_req.h: Broadcom Everest network driver. + * + * Copyright (c) 2012-2013 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNX2X_MFW_REQ_H +#define BNX2X_MFW_REQ_H + +#define PORT_0 0 +#define PORT_1 1 +#define PORT_MAX 2 +#define NVM_PATH_MAX 2 + +/* FCoE capabilities required from the driver */ +struct fcoe_capabilities { + u32 capability1; + /* Maximum number of I/Os per connection */ + #define FCOE_IOS_PER_CONNECTION_MASK 0x0000ffff + #define FCOE_IOS_PER_CONNECTION_SHIFT 0 + /* Maximum number of Logins per port */ + #define FCOE_LOGINS_PER_PORT_MASK 0xffff0000 + #define FCOE_LOGINS_PER_PORT_SHIFT 16 + + u32 capability2; + /* Maximum number of exchanges */ + #define FCOE_NUMBER_OF_EXCHANGES_MASK 0x0000ffff + #define FCOE_NUMBER_OF_EXCHANGES_SHIFT 0 + /* Maximum NPIV WWN per port */ + #define FCOE_NPIV_WWN_PER_PORT_MASK 0xffff0000 + #define FCOE_NPIV_WWN_PER_PORT_SHIFT 16 + + u32 capability3; + /* Maximum number of targets supported */ + #define FCOE_TARGETS_SUPPORTED_MASK 0x0000ffff + #define FCOE_TARGETS_SUPPORTED_SHIFT 0 + /* Maximum number of outstanding commands across all connections */ + #define FCOE_OUTSTANDING_COMMANDS_MASK 0xffff0000 + #define FCOE_OUTSTANDING_COMMANDS_SHIFT 16 + + u32 capability4; + #define FCOE_CAPABILITY4_STATEFUL 0x00000001 + #define FCOE_CAPABILITY4_STATELESS 0x00000002 + #define FCOE_CAPABILITY4_CAPABILITIES_REPORTED_VALID 0x00000004 +}; + +struct glob_ncsi_oem_data { + u32 driver_version; + u32 unused[3]; + struct fcoe_capabilities fcoe_features[NVM_PATH_MAX][PORT_MAX]; +}; + +/* current drv_info version */ +#define DRV_INFO_CUR_VER 2 + +/* drv_info op codes supported */ +enum drv_info_opcode { + ETH_STATS_OPCODE, + FCOE_STATS_OPCODE, + ISCSI_STATS_OPCODE +}; + +#define ETH_STAT_INFO_VERSION_LEN 12 +/* Per PCI Function Ethernet Statistics required from the driver */ +struct eth_stats_info { + /* Function's Driver Version. padded to 12 */ + u8 version[ETH_STAT_INFO_VERSION_LEN]; + /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */ + u8 mac_local[8]; + u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */ + u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */ + u32 mtu_size; /* MTU Size. Note : Negotiated MTU */ + u32 feature_flags; /* Feature_Flags. */ +#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01 +#define FEATURE_ETH_LSO_MASK 0x02 +#define FEATURE_ETH_BOOTMODE_MASK 0x1C +#define FEATURE_ETH_BOOTMODE_SHIFT 2 +#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2) +#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2) +#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2) +#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2) +#define FEATURE_ETH_TOE_MASK 0x20 + u32 lso_max_size; /* LSO MaxOffloadSize. */ + u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */ + /* Num Offloaded Connections TCP_IPv4. */ + u32 ipv4_ofld_cnt; + /* Num Offloaded Connections TCP_IPv6. */ + u32 ipv6_ofld_cnt; + u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */ + u32 txq_size; /* TX Descriptors Queue Size */ + u32 rxq_size; /* RX Descriptors Queue Size */ + /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */ + u32 txq_avg_depth; + /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */ + u32 rxq_avg_depth; + /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/ + u32 iov_offload; + /* Number of NetQueue/VMQ Config'd. */ + u32 netq_cnt; + u32 vf_cnt; /* Num VF assigned to this PF. */ +}; + +/* Per PCI Function FCOE Statistics required from the driver */ +struct fcoe_stats_info { + u8 version[12]; /* Function's Driver Version. */ + u8 mac_local[8]; /* Locally Admin Addr. */ + u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */ + u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */ + /* QoS Priority (per 802.1p). 0-7255 */ + u32 qos_priority; + u32 txq_size; /* FCoE TX Descriptors Queue Size. */ + u32 rxq_size; /* FCoE RX Descriptors Queue Size. */ + /* FCoE TX Descriptor Queue Avg Depth. */ + u32 txq_avg_depth; + /* FCoE RX Descriptors Queue Avg Depth. */ + u32 rxq_avg_depth; + u32 rx_frames_lo; /* FCoE RX Frames received. */ + u32 rx_frames_hi; /* FCoE RX Frames received. */ + u32 rx_bytes_lo; /* FCoE RX Bytes received. */ + u32 rx_bytes_hi; /* FCoE RX Bytes received. */ + u32 tx_frames_lo; /* FCoE TX Frames sent. */ + u32 tx_frames_hi; /* FCoE TX Frames sent. */ + u32 tx_bytes_lo; /* FCoE TX Bytes sent. */ + u32 tx_bytes_hi; /* FCoE TX Bytes sent. */ +}; + +/* Per PCI Function iSCSI Statistics required from the driver*/ +struct iscsi_stats_info { + u8 version[12]; /* Function's Driver Version. */ + u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */ + u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */ + /* QoS Priority (per 802.1p). 0-7255 */ + u32 qos_priority; + u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */ + u8 ww_port_name[64]; /* iSCSI World wide port name */ + u8 boot_target_name[64];/* iSCSI Boot Target Name. */ + u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */ + u32 boot_target_portal; /* iSCSI Boot Target Portal. */ + u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */ + u32 max_frame_size; /* Max Frame Size. bytes */ + u32 txq_size; /* PDU TX Descriptors Queue Size. */ + u32 rxq_size; /* PDU RX Descriptors Queue Size. */ + u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */ + u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */ + u32 rx_pdus_lo; /* iSCSI PDUs received. */ + u32 rx_pdus_hi; /* iSCSI PDUs received. */ + u32 rx_bytes_lo; /* iSCSI RX Bytes received. */ + u32 rx_bytes_hi; /* iSCSI RX Bytes received. */ + u32 tx_pdus_lo; /* iSCSI PDUs sent. */ + u32 tx_pdus_hi; /* iSCSI PDUs sent. */ + u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */ + u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */ + u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable. + * 9 nibbles, the position of each nibble + * represents the C-PCP value, the value + * of the nibble = S-PCP value. + */ +}; + +union drv_info_to_mcp { + struct eth_stats_info ether_stat; + struct fcoe_stats_info fcoe_stat; + struct iscsi_stats_info iscsi_stat; +}; +#endif /* BNX2X_MFW_REQ_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h new file mode 100644 index 000000000..49d511092 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -0,0 +1,7669 @@ +/* bnx2x_reg.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2013 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * The registers description starts with the register Access type followed + * by size in bits. For example [RW 32]. The access types are: + * R - Read only + * RC - Clear on read + * RW - Read/Write + * ST - Statistics register (clear on read) + * W - Write only + * WB - Wide bus register - the size is over 32 bits and it should be + * read/write in consecutive 32 bits accesses + * WR - Write Clear (write 1 to clear the bit) + * + */ +#ifndef BNX2X_REG_H +#define BNX2X_REG_H + +#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0) +#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2) +#define ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU (0x1<<5) +#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT (0x1<<3) +#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR (0x1<<4) +#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND (0x1<<1) +/* [RW 1] Initiate the ATC array - reset all the valid bits */ +#define ATC_REG_ATC_INIT_ARRAY 0x1100b8 +/* [R 1] ATC initialization done */ +#define ATC_REG_ATC_INIT_DONE 0x1100bc +/* [RC 6] Interrupt register #0 read clear */ +#define ATC_REG_ATC_INT_STS_CLR 0x1101c0 +/* [RW 5] Parity mask register #0 read/write */ +#define ATC_REG_ATC_PRTY_MASK 0x1101d8 +/* [R 5] Parity register #0 read */ +#define ATC_REG_ATC_PRTY_STS 0x1101cc +/* [RC 5] Parity register #0 read clear */ +#define ATC_REG_ATC_PRTY_STS_CLR 0x1101d0 +/* [RW 19] Interrupt mask register #0 read/write */ +#define BRB1_REG_BRB1_INT_MASK 0x60128 +/* [R 19] Interrupt register #0 read */ +#define BRB1_REG_BRB1_INT_STS 0x6011c +/* [RW 4] Parity mask register #0 read/write */ +#define BRB1_REG_BRB1_PRTY_MASK 0x60138 +/* [R 4] Parity register #0 read */ +#define BRB1_REG_BRB1_PRTY_STS 0x6012c +/* [RC 4] Parity register #0 read clear */ +#define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130 +/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At + * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address + * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning - + * following reset the first rbc access to this reg must be write; there can + * be no more rbc writes after the first one; there can be any number of rbc + * read following the first write; rbc access not following these rules will + * result in hang condition. */ +#define BRB1_REG_FREE_LIST_PRS_CRDT 0x60200 +/* [RW 10] The number of free blocks below which the full signal to class 0 + * is asserted */ +#define BRB1_REG_FULL_0_XOFF_THRESHOLD_0 0x601d0 +#define BRB1_REG_FULL_0_XOFF_THRESHOLD_1 0x60230 +/* [RW 11] The number of free blocks above which the full signal to class 0 + * is de-asserted */ +#define BRB1_REG_FULL_0_XON_THRESHOLD_0 0x601d4 +#define BRB1_REG_FULL_0_XON_THRESHOLD_1 0x60234 +/* [RW 11] The number of free blocks below which the full signal to class 1 + * is asserted */ +#define BRB1_REG_FULL_1_XOFF_THRESHOLD_0 0x601d8 +#define BRB1_REG_FULL_1_XOFF_THRESHOLD_1 0x60238 +/* [RW 11] The number of free blocks above which the full signal to class 1 + * is de-asserted */ +#define BRB1_REG_FULL_1_XON_THRESHOLD_0 0x601dc +#define BRB1_REG_FULL_1_XON_THRESHOLD_1 0x6023c +/* [RW 11] The number of free blocks below which the full signal to the LB + * port is asserted */ +#define BRB1_REG_FULL_LB_XOFF_THRESHOLD 0x601e0 +/* [RW 10] The number of free blocks above which the full signal to the LB + * port is de-asserted */ +#define BRB1_REG_FULL_LB_XON_THRESHOLD 0x601e4 +/* [RW 10] The number of free blocks above which the High_llfc signal to + interface #n is de-asserted. */ +#define BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0 0x6014c +/* [RW 10] The number of free blocks below which the High_llfc signal to + interface #n is asserted. */ +#define BRB1_REG_HIGH_LLFC_LOW_THRESHOLD_0 0x6013c +/* [RW 11] The number of blocks guarantied for the LB port */ +#define BRB1_REG_LB_GUARANTIED 0x601ec +/* [RW 11] The hysteresis on the guarantied buffer space for the Lb port + * before signaling XON. */ +#define BRB1_REG_LB_GUARANTIED_HYST 0x60264 +/* [RW 24] LL RAM data. */ +#define BRB1_REG_LL_RAM 0x61000 +/* [RW 10] The number of free blocks above which the Low_llfc signal to + interface #n is de-asserted. */ +#define BRB1_REG_LOW_LLFC_HIGH_THRESHOLD_0 0x6016c +/* [RW 10] The number of free blocks below which the Low_llfc signal to + interface #n is asserted. */ +#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c +/* [RW 11] The number of blocks guarantied for class 0 in MAC 0. The + * register is applicable only when per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED 0x60244 +/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC + * 1 before signaling XON. The register is applicable only when + * per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST 0x60254 +/* [RW 11] The number of blocks guarantied for class 1 in MAC 0. The + * register is applicable only when per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED 0x60248 +/* [RW 11] The hysteresis on the guarantied buffer space for class 1in MAC 0 + * before signaling XON. The register is applicable only when + * per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST 0x60258 +/* [RW 11] The number of blocks guarantied for class 0in MAC1.The register + * is applicable only when per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED 0x6024c +/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC + * 1 before signaling XON. The register is applicable only when + * per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST 0x6025c +/* [RW 11] The number of blocks guarantied for class 1 in MAC 1. The + * register is applicable only when per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED 0x60250 +/* [RW 11] The hysteresis on the guarantied buffer space for class 1 in MAC + * 1 before signaling XON. The register is applicable only when + * per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST 0x60260 +/* [RW 11] The number of blocks guarantied for the MAC port. The register is + * applicable only when per_class_guaranty_mode is reset. */ +#define BRB1_REG_MAC_GUARANTIED_0 0x601e8 +#define BRB1_REG_MAC_GUARANTIED_1 0x60240 +/* [R 24] The number of full blocks. */ +#define BRB1_REG_NUM_OF_FULL_BLOCKS 0x60090 +/* [ST 32] The number of cycles that the write_full signal towards MAC #0 + was asserted. */ +#define BRB1_REG_NUM_OF_FULL_CYCLES_0 0x600c8 +#define BRB1_REG_NUM_OF_FULL_CYCLES_1 0x600cc +#define BRB1_REG_NUM_OF_FULL_CYCLES_4 0x600d8 +/* [ST 32] The number of cycles that the pause signal towards MAC #0 was + asserted. */ +#define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8 +#define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc +/* [RW 10] The number of free blocks below which the pause signal to class 0 + * is asserted */ +#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 0x601c0 +#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 0x60220 +/* [RW 11] The number of free blocks above which the pause signal to class 0 + * is de-asserted */ +#define BRB1_REG_PAUSE_0_XON_THRESHOLD_0 0x601c4 +#define BRB1_REG_PAUSE_0_XON_THRESHOLD_1 0x60224 +/* [RW 11] The number of free blocks below which the pause signal to class 1 + * is asserted */ +#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0 0x601c8 +#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 0x60228 +/* [RW 11] The number of free blocks above which the pause signal to class 1 + * is de-asserted */ +#define BRB1_REG_PAUSE_1_XON_THRESHOLD_0 0x601cc +#define BRB1_REG_PAUSE_1_XON_THRESHOLD_1 0x6022c +/* [RW 10] Write client 0: De-assert pause threshold. Not Functional */ +#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078 +#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c +/* [RW 10] Write client 0: Assert pause threshold. */ +#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068 +/* [RW 1] Indicates if to use per-class guaranty mode (new mode) or per-MAC + * guaranty mode (backwards-compatible mode). 0=per-MAC guaranty mode (BC + * mode). 1=per-class guaranty mode (new mode). */ +#define BRB1_REG_PER_CLASS_GUARANTY_MODE 0x60268 +/* [R 24] The number of full blocks occpied by port. */ +#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094 +/* [RW 1] Reset the design by software. */ +#define BRB1_REG_SOFT_RESET 0x600dc +/* [R 5] Used to read the value of the XX protection CAM occupancy counter. */ +#define CCM_REG_CAM_OCCUP 0xd0188 +/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_CCM_CFC_IFEN 0xd003c +/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_CCM_CQM_IFEN 0xd000c +/* [RW 1] If set the Q index; received from the QM is inserted to event ID. + Otherwise 0 is inserted. */ +#define CCM_REG_CCM_CQM_USE_Q 0xd00c0 +/* [RW 11] Interrupt mask register #0 read/write */ +#define CCM_REG_CCM_INT_MASK 0xd01e4 +/* [R 11] Interrupt register #0 read */ +#define CCM_REG_CCM_INT_STS 0xd01d8 +/* [RW 27] Parity mask register #0 read/write */ +#define CCM_REG_CCM_PRTY_MASK 0xd01f4 +/* [R 27] Parity register #0 read */ +#define CCM_REG_CCM_PRTY_STS 0xd01e8 +/* [RC 27] Parity register #0 read clear */ +#define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec +/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS + REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). + Is used to determine the number of the AG context REG-pairs written back; + when the input message Reg1WbFlg isn't set. */ +#define CCM_REG_CCM_REG0_SZ 0xd00c4 +/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_CCM_STORM0_IFEN 0xd0004 +/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_CCM_STORM1_IFEN 0xd0008 +/* [RW 1] CDU AG read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define CCM_REG_CDU_AG_RD_IFEN 0xd0030 +/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input + are disregarded; all other signals are treated as usual; if 1 - normal + activity. */ +#define CCM_REG_CDU_AG_WR_IFEN 0xd002c +/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define CCM_REG_CDU_SM_RD_IFEN 0xd0038 +/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid + input is disregarded; all other signals are treated as usual; if 1 - + normal activity. */ +#define CCM_REG_CDU_SM_WR_IFEN 0xd0034 +/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 1 at start-up. */ +#define CCM_REG_CFC_INIT_CRD 0xd0204 +/* [RW 2] Auxiliary counter flag Q number 1. */ +#define CCM_REG_CNT_AUX1_Q 0xd00c8 +/* [RW 2] Auxiliary counter flag Q number 2. */ +#define CCM_REG_CNT_AUX2_Q 0xd00cc +/* [RW 28] The CM header value for QM request (primary). */ +#define CCM_REG_CQM_CCM_HDR_P 0xd008c +/* [RW 28] The CM header value for QM request (secondary). */ +#define CCM_REG_CQM_CCM_HDR_S 0xd0090 +/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_CQM_CCM_IFEN 0xd0014 +/* [RW 6] QM output initial credit. Max credit available - 32. Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 32 at start-up. */ +#define CCM_REG_CQM_INIT_CRD 0xd020c +/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_CQM_P_WEIGHT 0xd00b8 +/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_CQM_S_WEIGHT 0xd00bc +/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_CSDM_IFEN 0xd0018 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the SDM interface is detected. */ +#define CCM_REG_CSDM_LENGTH_MIS 0xd0170 +/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_CSDM_WEIGHT 0xd00b4 +/* [RW 28] The CM header for QM formatting in case of an error in the QM + inputs. */ +#define CCM_REG_ERR_CCM_HDR 0xd0094 +/* [RW 8] The Event ID in case the input message ErrorFlg is set. */ +#define CCM_REG_ERR_EVNT_ID 0xd0098 +/* [RW 8] FIC0 output initial credit. Max credit available - 255. Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define CCM_REG_FIC0_INIT_CRD 0xd0210 +/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define CCM_REG_FIC1_INIT_CRD 0xd0214 +/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1 + - strict priority defined by ~ccm_registers_gr_ag_pr.gr_ag_pr; + ~ccm_registers_gr_ld0_pr.gr_ld0_pr and + ~ccm_registers_gr_ld1_pr.gr_ld1_pr. Groups are according to channels and + outputs to STORM: aggregation; load FIC0; load FIC1 and store. */ +#define CCM_REG_GR_ARB_TYPE 0xd015c +/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed; that the Store channel priority is + the compliment to 4 of the rest priorities - Aggregation channel; Load + (FIC0) channel and Load (FIC1). */ +#define CCM_REG_GR_LD0_PR 0xd0164 +/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed; that the Store channel priority is + the compliment to 4 of the rest priorities - Aggregation channel; Load + (FIC0) channel and Load (FIC1). */ +#define CCM_REG_GR_LD1_PR 0xd0168 +/* [RW 2] General flags index. */ +#define CCM_REG_INV_DONE_Q 0xd0108 +/* [RW 4] The number of double REG-pairs(128 bits); loaded from the STORM + context and sent to STORM; for a specific connection type. The double + REG-pairs are used in order to align to STORM context row size of 128 + bits. The offset of these data in the STORM context is always 0. Index + _(0..15) stands for the connection type (one of 16). */ +#define CCM_REG_N_SM_CTX_LD_0 0xd004c +#define CCM_REG_N_SM_CTX_LD_1 0xd0050 +#define CCM_REG_N_SM_CTX_LD_2 0xd0054 +#define CCM_REG_N_SM_CTX_LD_3 0xd0058 +#define CCM_REG_N_SM_CTX_LD_4 0xd005c +/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define CCM_REG_PBF_IFEN 0xd0028 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the pbf interface is detected. */ +#define CCM_REG_PBF_LENGTH_MIS 0xd0180 +/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_PBF_WEIGHT 0xd00ac +#define CCM_REG_PHYS_QNUM1_0 0xd0134 +#define CCM_REG_PHYS_QNUM1_1 0xd0138 +#define CCM_REG_PHYS_QNUM2_0 0xd013c +#define CCM_REG_PHYS_QNUM2_1 0xd0140 +#define CCM_REG_PHYS_QNUM3_0 0xd0144 +#define CCM_REG_PHYS_QNUM3_1 0xd0148 +#define CCM_REG_QOS_PHYS_QNUM0_0 0xd0114 +#define CCM_REG_QOS_PHYS_QNUM0_1 0xd0118 +#define CCM_REG_QOS_PHYS_QNUM1_0 0xd011c +#define CCM_REG_QOS_PHYS_QNUM1_1 0xd0120 +#define CCM_REG_QOS_PHYS_QNUM2_0 0xd0124 +#define CCM_REG_QOS_PHYS_QNUM2_1 0xd0128 +#define CCM_REG_QOS_PHYS_QNUM3_0 0xd012c +#define CCM_REG_QOS_PHYS_QNUM3_1 0xd0130 +/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define CCM_REG_STORM_CCM_IFEN 0xd0010 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the STORM interface is detected. */ +#define CCM_REG_STORM_LENGTH_MIS 0xd016c +/* [RW 3] The weight of the STORM input in the WRR (Weighted Round robin) + mechanism. 0 stands for weight 8 (the most prioritised); 1 stands for + weight 1(least prioritised); 2 stands for weight 2 (more prioritised); + tc. */ +#define CCM_REG_STORM_WEIGHT 0xd009c +/* [RW 1] Input tsem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define CCM_REG_TSEM_IFEN 0xd001c +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the tsem interface is detected. */ +#define CCM_REG_TSEM_LENGTH_MIS 0xd0174 +/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_TSEM_WEIGHT 0xd00a0 +/* [RW 1] Input usem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define CCM_REG_USEM_IFEN 0xd0024 +/* [RC 1] Set when message length mismatch (relative to last indication) at + the usem interface is detected. */ +#define CCM_REG_USEM_LENGTH_MIS 0xd017c +/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_USEM_WEIGHT 0xd00a8 +/* [RW 1] Input xsem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define CCM_REG_XSEM_IFEN 0xd0020 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the xsem interface is detected. */ +#define CCM_REG_XSEM_LENGTH_MIS 0xd0178 +/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define CCM_REG_XSEM_WEIGHT 0xd00a4 +/* [RW 19] Indirect access to the descriptor table of the XX protection + mechanism. The fields are: [5:0] - message length; [12:6] - message + pointer; 18:13] - next pointer. */ +#define CCM_REG_XX_DESCR_TABLE 0xd0300 +#define CCM_REG_XX_DESCR_TABLE_SIZE 24 +/* [R 7] Used to read the value of XX protection Free counter. */ +#define CCM_REG_XX_FREE 0xd0184 +/* [RW 6] Initial value for the credit counter; responsible for fulfilling + of the Input Stage XX protection buffer by the XX protection pending + messages. Max credit available - 127. Write writes the initial credit + value; read returns the current value of the credit counter. Must be + initialized to maximum XX protected message size - 2 at start-up. */ +#define CCM_REG_XX_INIT_CRD 0xd0220 +/* [RW 7] The maximum number of pending messages; which may be stored in XX + protection. At read the ~ccm_registers_xx_free.xx_free counter is read. + At write comprises the start value of the ~ccm_registers_xx_free.xx_free + counter. */ +#define CCM_REG_XX_MSG_NUM 0xd0224 +/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */ +#define CCM_REG_XX_OVFL_EVNT_ID 0xd0044 +/* [RW 18] Indirect access to the XX table of the XX protection mechanism. + The fields are: [5:0] - tail pointer; 11:6] - Link List size; 17:12] - + header pointer. */ +#define CCM_REG_XX_TABLE 0xd0280 +#define CDU_REG_CDU_CHK_MASK0 0x101000 +#define CDU_REG_CDU_CHK_MASK1 0x101004 +#define CDU_REG_CDU_CONTROL0 0x101008 +#define CDU_REG_CDU_DEBUG 0x101010 +#define CDU_REG_CDU_GLOBAL_PARAMS 0x101020 +/* [RW 7] Interrupt mask register #0 read/write */ +#define CDU_REG_CDU_INT_MASK 0x10103c +/* [R 7] Interrupt register #0 read */ +#define CDU_REG_CDU_INT_STS 0x101030 +/* [RW 5] Parity mask register #0 read/write */ +#define CDU_REG_CDU_PRTY_MASK 0x10104c +/* [R 5] Parity register #0 read */ +#define CDU_REG_CDU_PRTY_STS 0x101040 +/* [RC 5] Parity register #0 read clear */ +#define CDU_REG_CDU_PRTY_STS_CLR 0x101044 +/* [RC 32] logging of error data in case of a CDU load error: + {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error; + ype_error; ctual_active; ctual_compressed_context}; */ +#define CDU_REG_ERROR_DATA 0x101014 +/* [WB 216] L1TT ram access. each entry has the following format : + {mrege_regions[7:0]; ffset12[5:0]...offset0[5:0]; + ength12[5:0]...length0[5:0]; d12[3:0]...id0[3:0]} */ +#define CDU_REG_L1TT 0x101800 +/* [WB 24] MATT ram access. each entry has the following + format:{RegionLength[11:0]; egionOffset[11:0]} */ +#define CDU_REG_MATT 0x101100 +/* [RW 1] when this bit is set the CDU operates in e1hmf mode */ +#define CDU_REG_MF_MODE 0x101050 +/* [R 1] indication the initializing the activity counter by the hardware + was done. */ +#define CFC_REG_AC_INIT_DONE 0x104078 +/* [RW 13] activity counter ram access */ +#define CFC_REG_ACTIVITY_COUNTER 0x104400 +#define CFC_REG_ACTIVITY_COUNTER_SIZE 256 +/* [R 1] indication the initializing the cams by the hardware was done. */ +#define CFC_REG_CAM_INIT_DONE 0x10407c +/* [RW 2] Interrupt mask register #0 read/write */ +#define CFC_REG_CFC_INT_MASK 0x104108 +/* [R 2] Interrupt register #0 read */ +#define CFC_REG_CFC_INT_STS 0x1040fc +/* [RC 2] Interrupt register #0 read clear */ +#define CFC_REG_CFC_INT_STS_CLR 0x104100 +/* [RW 4] Parity mask register #0 read/write */ +#define CFC_REG_CFC_PRTY_MASK 0x104118 +/* [R 4] Parity register #0 read */ +#define CFC_REG_CFC_PRTY_STS 0x10410c +/* [RC 4] Parity register #0 read clear */ +#define CFC_REG_CFC_PRTY_STS_CLR 0x104110 +/* [RW 21] CID cam access (21:1 - Data; alid - 0) */ +#define CFC_REG_CID_CAM 0x104800 +#define CFC_REG_CONTROL0 0x104028 +#define CFC_REG_DEBUG0 0x104050 +/* [RW 14] indicates per error (in #cfc_registers_cfc_error_vector.cfc_error + vector) whether the cfc should be disabled upon it */ +#define CFC_REG_DISABLE_ON_ERROR 0x104044 +/* [RC 14] CFC error vector. when the CFC detects an internal error it will + set one of these bits. the bit description can be found in CFC + specifications */ +#define CFC_REG_ERROR_VECTOR 0x10403c +/* [WB 93] LCID info ram access */ +#define CFC_REG_INFO_RAM 0x105000 +#define CFC_REG_INFO_RAM_SIZE 1024 +#define CFC_REG_INIT_REG 0x10404c +#define CFC_REG_INTERFACES 0x104058 +/* [RW 24] {weight_load_client7[2:0] to weight_load_client0[2:0]}. this + field allows changing the priorities of the weighted-round-robin arbiter + which selects which CFC load client should be served next */ +#define CFC_REG_LCREQ_WEIGHTS 0x104084 +/* [RW 16] Link List ram access; data = {prev_lcid; ext_lcid} */ +#define CFC_REG_LINK_LIST 0x104c00 +#define CFC_REG_LINK_LIST_SIZE 256 +/* [R 1] indication the initializing the link list by the hardware was done. */ +#define CFC_REG_LL_INIT_DONE 0x104074 +/* [R 9] Number of allocated LCIDs which are at empty state */ +#define CFC_REG_NUM_LCIDS_ALLOC 0x104020 +/* [R 9] Number of Arriving LCIDs in Link List Block */ +#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004 +#define CFC_REG_NUM_LCIDS_INSIDE_PF 0x104120 +/* [R 9] Number of Leaving LCIDs in Link List Block */ +#define CFC_REG_NUM_LCIDS_LEAVING 0x104018 +#define CFC_REG_WEAK_ENABLE_PF 0x104124 +/* [RW 8] The event id for aggregated interrupt 0 */ +#define CSDM_REG_AGG_INT_EVENT_0 0xc2038 +#define CSDM_REG_AGG_INT_EVENT_10 0xc2060 +#define CSDM_REG_AGG_INT_EVENT_11 0xc2064 +#define CSDM_REG_AGG_INT_EVENT_12 0xc2068 +#define CSDM_REG_AGG_INT_EVENT_13 0xc206c +#define CSDM_REG_AGG_INT_EVENT_14 0xc2070 +#define CSDM_REG_AGG_INT_EVENT_15 0xc2074 +#define CSDM_REG_AGG_INT_EVENT_16 0xc2078 +#define CSDM_REG_AGG_INT_EVENT_2 0xc2040 +#define CSDM_REG_AGG_INT_EVENT_3 0xc2044 +#define CSDM_REG_AGG_INT_EVENT_4 0xc2048 +#define CSDM_REG_AGG_INT_EVENT_5 0xc204c +#define CSDM_REG_AGG_INT_EVENT_6 0xc2050 +#define CSDM_REG_AGG_INT_EVENT_7 0xc2054 +#define CSDM_REG_AGG_INT_EVENT_8 0xc2058 +#define CSDM_REG_AGG_INT_EVENT_9 0xc205c +/* [RW 1] For each aggregated interrupt index whether the mode is normal (0) + or auto-mask-mode (1) */ +#define CSDM_REG_AGG_INT_MODE_10 0xc21e0 +#define CSDM_REG_AGG_INT_MODE_11 0xc21e4 +#define CSDM_REG_AGG_INT_MODE_12 0xc21e8 +#define CSDM_REG_AGG_INT_MODE_13 0xc21ec +#define CSDM_REG_AGG_INT_MODE_14 0xc21f0 +#define CSDM_REG_AGG_INT_MODE_15 0xc21f4 +#define CSDM_REG_AGG_INT_MODE_16 0xc21f8 +#define CSDM_REG_AGG_INT_MODE_6 0xc21d0 +#define CSDM_REG_AGG_INT_MODE_7 0xc21d4 +#define CSDM_REG_AGG_INT_MODE_8 0xc21d8 +#define CSDM_REG_AGG_INT_MODE_9 0xc21dc +/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ +#define CSDM_REG_CFC_RSP_START_ADDR 0xc2008 +/* [RW 16] The maximum value of the completion counter #0 */ +#define CSDM_REG_CMP_COUNTER_MAX0 0xc201c +/* [RW 16] The maximum value of the completion counter #1 */ +#define CSDM_REG_CMP_COUNTER_MAX1 0xc2020 +/* [RW 16] The maximum value of the completion counter #2 */ +#define CSDM_REG_CMP_COUNTER_MAX2 0xc2024 +/* [RW 16] The maximum value of the completion counter #3 */ +#define CSDM_REG_CMP_COUNTER_MAX3 0xc2028 +/* [RW 13] The start address in the internal RAM for the completion + counters. */ +#define CSDM_REG_CMP_COUNTER_START_ADDR 0xc200c +/* [RW 32] Interrupt mask register #0 read/write */ +#define CSDM_REG_CSDM_INT_MASK_0 0xc229c +#define CSDM_REG_CSDM_INT_MASK_1 0xc22ac +/* [R 32] Interrupt register #0 read */ +#define CSDM_REG_CSDM_INT_STS_0 0xc2290 +#define CSDM_REG_CSDM_INT_STS_1 0xc22a0 +/* [RW 11] Parity mask register #0 read/write */ +#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc +/* [R 11] Parity register #0 read */ +#define CSDM_REG_CSDM_PRTY_STS 0xc22b0 +/* [RC 11] Parity register #0 read clear */ +#define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4 +#define CSDM_REG_ENABLE_IN1 0xc2238 +#define CSDM_REG_ENABLE_IN2 0xc223c +#define CSDM_REG_ENABLE_OUT1 0xc2240 +#define CSDM_REG_ENABLE_OUT2 0xc2244 +/* [RW 4] The initial number of messages that can be sent to the pxp control + interface without receiving any ACK. */ +#define CSDM_REG_INIT_CREDIT_PXP_CTRL 0xc24bc +/* [ST 32] The number of ACK after placement messages received */ +#define CSDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc227c +/* [ST 32] The number of packet end messages received from the parser */ +#define CSDM_REG_NUM_OF_PKT_END_MSG 0xc2274 +/* [ST 32] The number of requests received from the pxp async if */ +#define CSDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc2278 +/* [ST 32] The number of commands received in queue 0 */ +#define CSDM_REG_NUM_OF_Q0_CMD 0xc2248 +/* [ST 32] The number of commands received in queue 10 */ +#define CSDM_REG_NUM_OF_Q10_CMD 0xc226c +/* [ST 32] The number of commands received in queue 11 */ +#define CSDM_REG_NUM_OF_Q11_CMD 0xc2270 +/* [ST 32] The number of commands received in queue 1 */ +#define CSDM_REG_NUM_OF_Q1_CMD 0xc224c +/* [ST 32] The number of commands received in queue 3 */ +#define CSDM_REG_NUM_OF_Q3_CMD 0xc2250 +/* [ST 32] The number of commands received in queue 4 */ +#define CSDM_REG_NUM_OF_Q4_CMD 0xc2254 +/* [ST 32] The number of commands received in queue 5 */ +#define CSDM_REG_NUM_OF_Q5_CMD 0xc2258 +/* [ST 32] The number of commands received in queue 6 */ +#define CSDM_REG_NUM_OF_Q6_CMD 0xc225c +/* [ST 32] The number of commands received in queue 7 */ +#define CSDM_REG_NUM_OF_Q7_CMD 0xc2260 +/* [ST 32] The number of commands received in queue 8 */ +#define CSDM_REG_NUM_OF_Q8_CMD 0xc2264 +/* [ST 32] The number of commands received in queue 9 */ +#define CSDM_REG_NUM_OF_Q9_CMD 0xc2268 +/* [RW 13] The start address in the internal RAM for queue counters */ +#define CSDM_REG_Q_COUNTER_START_ADDR 0xc2010 +/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ +#define CSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc2548 +/* [R 1] parser fifo empty in sdm_sync block */ +#define CSDM_REG_SYNC_PARSER_EMPTY 0xc2550 +/* [R 1] parser serial fifo empty in sdm_sync block */ +#define CSDM_REG_SYNC_SYNC_EMPTY 0xc2558 +/* [RW 32] Tick for timer counter. Applicable only when + ~csdm_registers_timer_tick_enable.timer_tick_enable =1 */ +#define CSDM_REG_TIMER_TICK 0xc2000 +/* [RW 5] The number of time_slots in the arbitration cycle */ +#define CSEM_REG_ARB_CYCLE_SIZE 0x200034 +/* [RW 3] The source that is associated with arbitration element 0. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2 */ +#define CSEM_REG_ARB_ELEMENT0 0x200020 +/* [RW 3] The source that is associated with arbitration element 1. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~csem_registers_arb_element0.arb_element0 */ +#define CSEM_REG_ARB_ELEMENT1 0x200024 +/* [RW 3] The source that is associated with arbitration element 2. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~csem_registers_arb_element0.arb_element0 + and ~csem_registers_arb_element1.arb_element1 */ +#define CSEM_REG_ARB_ELEMENT2 0x200028 +/* [RW 3] The source that is associated with arbitration element 3. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2.Could + not be equal to register ~csem_registers_arb_element0.arb_element0 and + ~csem_registers_arb_element1.arb_element1 and + ~csem_registers_arb_element2.arb_element2 */ +#define CSEM_REG_ARB_ELEMENT3 0x20002c +/* [RW 3] The source that is associated with arbitration element 4. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~csem_registers_arb_element0.arb_element0 + and ~csem_registers_arb_element1.arb_element1 and + ~csem_registers_arb_element2.arb_element2 and + ~csem_registers_arb_element3.arb_element3 */ +#define CSEM_REG_ARB_ELEMENT4 0x200030 +/* [RW 32] Interrupt mask register #0 read/write */ +#define CSEM_REG_CSEM_INT_MASK_0 0x200110 +#define CSEM_REG_CSEM_INT_MASK_1 0x200120 +/* [R 32] Interrupt register #0 read */ +#define CSEM_REG_CSEM_INT_STS_0 0x200104 +#define CSEM_REG_CSEM_INT_STS_1 0x200114 +/* [RW 32] Parity mask register #0 read/write */ +#define CSEM_REG_CSEM_PRTY_MASK_0 0x200130 +#define CSEM_REG_CSEM_PRTY_MASK_1 0x200140 +/* [R 32] Parity register #0 read */ +#define CSEM_REG_CSEM_PRTY_STS_0 0x200124 +#define CSEM_REG_CSEM_PRTY_STS_1 0x200134 +/* [RC 32] Parity register #0 read clear */ +#define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128 +#define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138 +#define CSEM_REG_ENABLE_IN 0x2000a4 +#define CSEM_REG_ENABLE_OUT 0x2000a8 +/* [RW 32] This address space contains all registers and memories that are + placed in SEM_FAST block. The SEM_FAST registers are described in + appendix B. In order to access the sem_fast registers the base address + ~fast_memory.fast_memory should be added to eachsem_fast register offset. */ +#define CSEM_REG_FAST_MEMORY 0x220000 +/* [RW 1] Disables input messages from FIC0 May be updated during run_time + by the microcode */ +#define CSEM_REG_FIC0_DISABLE 0x200224 +/* [RW 1] Disables input messages from FIC1 May be updated during run_time + by the microcode */ +#define CSEM_REG_FIC1_DISABLE 0x200234 +/* [RW 15] Interrupt table Read and write access to it is not possible in + the middle of the work */ +#define CSEM_REG_INT_TABLE 0x200400 +/* [ST 24] Statistics register. The number of messages that entered through + FIC0 */ +#define CSEM_REG_MSG_NUM_FIC0 0x200000 +/* [ST 24] Statistics register. The number of messages that entered through + FIC1 */ +#define CSEM_REG_MSG_NUM_FIC1 0x200004 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC0 */ +#define CSEM_REG_MSG_NUM_FOC0 0x200008 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC1 */ +#define CSEM_REG_MSG_NUM_FOC1 0x20000c +/* [ST 24] Statistics register. The number of messages that were sent to + FOC2 */ +#define CSEM_REG_MSG_NUM_FOC2 0x200010 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC3 */ +#define CSEM_REG_MSG_NUM_FOC3 0x200014 +/* [RW 1] Disables input messages from the passive buffer May be updated + during run_time by the microcode */ +#define CSEM_REG_PAS_DISABLE 0x20024c +/* [WB 128] Debug only. Passive buffer memory */ +#define CSEM_REG_PASSIVE_BUFFER 0x202000 +/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */ +#define CSEM_REG_PRAM 0x240000 +/* [R 16] Valid sleeping threads indication have bit per thread */ +#define CSEM_REG_SLEEP_THREADS_VALID 0x20026c +/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */ +#define CSEM_REG_SLOW_EXT_STORE_EMPTY 0x2002a0 +/* [RW 16] List of free threads . There is a bit per thread. */ +#define CSEM_REG_THREADS_LIST 0x2002e4 +/* [RW 3] The arbitration scheme of time_slot 0 */ +#define CSEM_REG_TS_0_AS 0x200038 +/* [RW 3] The arbitration scheme of time_slot 10 */ +#define CSEM_REG_TS_10_AS 0x200060 +/* [RW 3] The arbitration scheme of time_slot 11 */ +#define CSEM_REG_TS_11_AS 0x200064 +/* [RW 3] The arbitration scheme of time_slot 12 */ +#define CSEM_REG_TS_12_AS 0x200068 +/* [RW 3] The arbitration scheme of time_slot 13 */ +#define CSEM_REG_TS_13_AS 0x20006c +/* [RW 3] The arbitration scheme of time_slot 14 */ +#define CSEM_REG_TS_14_AS 0x200070 +/* [RW 3] The arbitration scheme of time_slot 15 */ +#define CSEM_REG_TS_15_AS 0x200074 +/* [RW 3] The arbitration scheme of time_slot 16 */ +#define CSEM_REG_TS_16_AS 0x200078 +/* [RW 3] The arbitration scheme of time_slot 17 */ +#define CSEM_REG_TS_17_AS 0x20007c +/* [RW 3] The arbitration scheme of time_slot 18 */ +#define CSEM_REG_TS_18_AS 0x200080 +/* [RW 3] The arbitration scheme of time_slot 1 */ +#define CSEM_REG_TS_1_AS 0x20003c +/* [RW 3] The arbitration scheme of time_slot 2 */ +#define CSEM_REG_TS_2_AS 0x200040 +/* [RW 3] The arbitration scheme of time_slot 3 */ +#define CSEM_REG_TS_3_AS 0x200044 +/* [RW 3] The arbitration scheme of time_slot 4 */ +#define CSEM_REG_TS_4_AS 0x200048 +/* [RW 3] The arbitration scheme of time_slot 5 */ +#define CSEM_REG_TS_5_AS 0x20004c +/* [RW 3] The arbitration scheme of time_slot 6 */ +#define CSEM_REG_TS_6_AS 0x200050 +/* [RW 3] The arbitration scheme of time_slot 7 */ +#define CSEM_REG_TS_7_AS 0x200054 +/* [RW 3] The arbitration scheme of time_slot 8 */ +#define CSEM_REG_TS_8_AS 0x200058 +/* [RW 3] The arbitration scheme of time_slot 9 */ +#define CSEM_REG_TS_9_AS 0x20005c +/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 + * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ +#define CSEM_REG_VFPF_ERR_NUM 0x200380 +/* [RW 1] Parity mask register #0 read/write */ +#define DBG_REG_DBG_PRTY_MASK 0xc0a8 +/* [R 1] Parity register #0 read */ +#define DBG_REG_DBG_PRTY_STS 0xc09c +/* [RC 1] Parity register #0 read clear */ +#define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0 +/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The + * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0; + * 4.Completion function=0; 5.Error handling=0 */ +#define DMAE_REG_BACKWARD_COMP_EN 0x10207c +/* [RW 32] Commands memory. The address to command X; row Y is to calculated + as 14*X+Y. */ +#define DMAE_REG_CMD_MEM 0x102400 +#define DMAE_REG_CMD_MEM_SIZE 224 +/* [RW 1] If 0 - the CRC-16c initial value is all zeroes; if 1 - the CRC-16c + initial value is all ones. */ +#define DMAE_REG_CRC16C_INIT 0x10201c +/* [RW 1] If 0 - the CRC-16 T10 initial value is all zeroes; if 1 - the + CRC-16 T10 initial value is all ones. */ +#define DMAE_REG_CRC16T10_INIT 0x102020 +/* [RW 2] Interrupt mask register #0 read/write */ +#define DMAE_REG_DMAE_INT_MASK 0x102054 +/* [RW 4] Parity mask register #0 read/write */ +#define DMAE_REG_DMAE_PRTY_MASK 0x102064 +/* [R 4] Parity register #0 read */ +#define DMAE_REG_DMAE_PRTY_STS 0x102058 +/* [RC 4] Parity register #0 read clear */ +#define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c +/* [RW 1] Command 0 go. */ +#define DMAE_REG_GO_C0 0x102080 +/* [RW 1] Command 1 go. */ +#define DMAE_REG_GO_C1 0x102084 +/* [RW 1] Command 10 go. */ +#define DMAE_REG_GO_C10 0x102088 +/* [RW 1] Command 11 go. */ +#define DMAE_REG_GO_C11 0x10208c +/* [RW 1] Command 12 go. */ +#define DMAE_REG_GO_C12 0x102090 +/* [RW 1] Command 13 go. */ +#define DMAE_REG_GO_C13 0x102094 +/* [RW 1] Command 14 go. */ +#define DMAE_REG_GO_C14 0x102098 +/* [RW 1] Command 15 go. */ +#define DMAE_REG_GO_C15 0x10209c +/* [RW 1] Command 2 go. */ +#define DMAE_REG_GO_C2 0x1020a0 +/* [RW 1] Command 3 go. */ +#define DMAE_REG_GO_C3 0x1020a4 +/* [RW 1] Command 4 go. */ +#define DMAE_REG_GO_C4 0x1020a8 +/* [RW 1] Command 5 go. */ +#define DMAE_REG_GO_C5 0x1020ac +/* [RW 1] Command 6 go. */ +#define DMAE_REG_GO_C6 0x1020b0 +/* [RW 1] Command 7 go. */ +#define DMAE_REG_GO_C7 0x1020b4 +/* [RW 1] Command 8 go. */ +#define DMAE_REG_GO_C8 0x1020b8 +/* [RW 1] Command 9 go. */ +#define DMAE_REG_GO_C9 0x1020bc +/* [RW 1] DMAE GRC Interface (Target; aster) enable. If 0 - the acknowledge + input is disregarded; valid is deasserted; all other signals are treated + as usual; if 1 - normal activity. */ +#define DMAE_REG_GRC_IFEN 0x102008 +/* [RW 1] DMAE PCI Interface (Request; ead; rite) enable. If 0 - the + acknowledge input is disregarded; valid is deasserted; full is asserted; + all other signals are treated as usual; if 1 - normal activity. */ +#define DMAE_REG_PCI_IFEN 0x102004 +/* [RW 4] DMAE- PCI Request Interface initial credit. Write writes the + initial value to the credit counter; related to the address. Read returns + the current value of the counter. */ +#define DMAE_REG_PXP_REQ_INIT_CRD 0x1020c0 +/* [RW 8] Aggregation command. */ +#define DORQ_REG_AGG_CMD0 0x170060 +/* [RW 8] Aggregation command. */ +#define DORQ_REG_AGG_CMD1 0x170064 +/* [RW 8] Aggregation command. */ +#define DORQ_REG_AGG_CMD2 0x170068 +/* [RW 8] Aggregation command. */ +#define DORQ_REG_AGG_CMD3 0x17006c +/* [RW 28] UCM Header. */ +#define DORQ_REG_CMHEAD_RX 0x170050 +/* [RW 32] Doorbell address for RBC doorbells (function 0). */ +#define DORQ_REG_DB_ADDR0 0x17008c +/* [RW 5] Interrupt mask register #0 read/write */ +#define DORQ_REG_DORQ_INT_MASK 0x170180 +/* [R 5] Interrupt register #0 read */ +#define DORQ_REG_DORQ_INT_STS 0x170174 +/* [RC 5] Interrupt register #0 read clear */ +#define DORQ_REG_DORQ_INT_STS_CLR 0x170178 +/* [RW 2] Parity mask register #0 read/write */ +#define DORQ_REG_DORQ_PRTY_MASK 0x170190 +/* [R 2] Parity register #0 read */ +#define DORQ_REG_DORQ_PRTY_STS 0x170184 +/* [RC 2] Parity register #0 read clear */ +#define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188 +/* [RW 8] The address to write the DPM CID to STORM. */ +#define DORQ_REG_DPM_CID_ADDR 0x170044 +/* [RW 5] The DPM mode CID extraction offset. */ +#define DORQ_REG_DPM_CID_OFST 0x170030 +/* [RW 12] The threshold of the DQ FIFO to send the almost full interrupt. */ +#define DORQ_REG_DQ_FIFO_AFULL_TH 0x17007c +/* [RW 12] The threshold of the DQ FIFO to send the full interrupt. */ +#define DORQ_REG_DQ_FIFO_FULL_TH 0x170078 +/* [R 13] Current value of the DQ FIFO fill level according to following + pointer. The range is 0 - 256 FIFO rows; where each row stands for the + doorbell. */ +#define DORQ_REG_DQ_FILL_LVLF 0x1700a4 +/* [R 1] DQ FIFO full status. Is set; when FIFO filling level is more or + equal to full threshold; reset on full clear. */ +#define DORQ_REG_DQ_FULL_ST 0x1700c0 +/* [RW 28] The value sent to CM header in the case of CFC load error. */ +#define DORQ_REG_ERR_CMHEAD 0x170058 +#define DORQ_REG_IF_EN 0x170004 +#define DORQ_REG_MAX_RVFID_SIZE 0x1701ec +#define DORQ_REG_MODE_ACT 0x170008 +/* [RW 5] The normal mode CID extraction offset. */ +#define DORQ_REG_NORM_CID_OFST 0x17002c +/* [RW 28] TCM Header when only TCP context is loaded. */ +#define DORQ_REG_NORM_CMHEAD_TX 0x17004c +/* [RW 3] The number of simultaneous outstanding requests to Context Fetch + Interface. */ +#define DORQ_REG_OUTST_REQ 0x17003c +#define DORQ_REG_PF_USAGE_CNT 0x1701d0 +#define DORQ_REG_REGN 0x170038 +/* [R 4] Current value of response A counter credit. Initial credit is + configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd + register. */ +#define DORQ_REG_RSPA_CRD_CNT 0x1700ac +/* [R 4] Current value of response B counter credit. Initial credit is + configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd + register. */ +#define DORQ_REG_RSPB_CRD_CNT 0x1700b0 +/* [RW 4] The initial credit at the Doorbell Response Interface. The write + writes the same initial credit to the rspa_crd_cnt and rspb_crd_cnt. The + read reads this written value. */ +#define DORQ_REG_RSP_INIT_CRD 0x170048 +#define DORQ_REG_RSPB_CRD_CNT 0x1700b0 +#define DORQ_REG_VF_NORM_CID_BASE 0x1701a0 +#define DORQ_REG_VF_NORM_CID_OFST 0x1701f4 +#define DORQ_REG_VF_NORM_CID_WND_SIZE 0x1701a4 +#define DORQ_REG_VF_NORM_MAX_CID_COUNT 0x1701e4 +#define DORQ_REG_VF_NORM_VF_BASE 0x1701a8 +/* [RW 10] VF type validation mask value */ +#define DORQ_REG_VF_TYPE_MASK_0 0x170218 +/* [RW 17] VF type validation Min MCID value */ +#define DORQ_REG_VF_TYPE_MAX_MCID_0 0x1702d8 +/* [RW 17] VF type validation Max MCID value */ +#define DORQ_REG_VF_TYPE_MIN_MCID_0 0x170298 +/* [RW 10] VF type validation comp value */ +#define DORQ_REG_VF_TYPE_VALUE_0 0x170258 +#define DORQ_REG_VF_USAGE_CT_LIMIT 0x170340 + +/* [RW 4] Initial activity counter value on the load request; when the + shortcut is done. */ +#define DORQ_REG_SHRT_ACT_CNT 0x170070 +/* [RW 28] TCM Header when both ULP and TCP context is loaded. */ +#define DORQ_REG_SHRT_CMHEAD 0x170054 +#define HC_CONFIG_0_REG_ATTN_BIT_EN_0 (0x1<<4) +#define HC_CONFIG_0_REG_BLOCK_DISABLE_0 (0x1<<0) +#define HC_CONFIG_0_REG_INT_LINE_EN_0 (0x1<<3) +#define HC_CONFIG_0_REG_MSI_ATTN_EN_0 (0x1<<7) +#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2) +#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1) +#define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1<<0) +#define DORQ_REG_VF_USAGE_CNT 0x170320 +#define HC_REG_AGG_INT_0 0x108050 +#define HC_REG_AGG_INT_1 0x108054 +#define HC_REG_ATTN_BIT 0x108120 +#define HC_REG_ATTN_IDX 0x108100 +#define HC_REG_ATTN_MSG0_ADDR_L 0x108018 +#define HC_REG_ATTN_MSG1_ADDR_L 0x108020 +#define HC_REG_ATTN_NUM_P0 0x108038 +#define HC_REG_ATTN_NUM_P1 0x10803c +#define HC_REG_COMMAND_REG 0x108180 +#define HC_REG_CONFIG_0 0x108000 +#define HC_REG_CONFIG_1 0x108004 +#define HC_REG_FUNC_NUM_P0 0x1080ac +#define HC_REG_FUNC_NUM_P1 0x1080b0 +/* [RW 3] Parity mask register #0 read/write */ +#define HC_REG_HC_PRTY_MASK 0x1080a0 +/* [R 3] Parity register #0 read */ +#define HC_REG_HC_PRTY_STS 0x108094 +/* [RC 3] Parity register #0 read clear */ +#define HC_REG_HC_PRTY_STS_CLR 0x108098 +#define HC_REG_INT_MASK 0x108108 +#define HC_REG_LEADING_EDGE_0 0x108040 +#define HC_REG_LEADING_EDGE_1 0x108048 +#define HC_REG_MAIN_MEMORY 0x108800 +#define HC_REG_MAIN_MEMORY_SIZE 152 +#define HC_REG_P0_PROD_CONS 0x108200 +#define HC_REG_P1_PROD_CONS 0x108400 +#define HC_REG_PBA_COMMAND 0x108140 +#define HC_REG_PCI_CONFIG_0 0x108010 +#define HC_REG_PCI_CONFIG_1 0x108014 +#define HC_REG_STATISTIC_COUNTERS 0x109000 +#define HC_REG_TRAILING_EDGE_0 0x108044 +#define HC_REG_TRAILING_EDGE_1 0x10804c +#define HC_REG_UC_RAM_ADDR_0 0x108028 +#define HC_REG_UC_RAM_ADDR_1 0x108030 +#define HC_REG_USTORM_ADDR_FOR_COALESCE 0x108068 +#define HC_REG_VQID_0 0x108008 +#define HC_REG_VQID_1 0x10800c +#define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN (0x1<<1) +#define IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE (0x1<<0) +#define IGU_REG_ATTENTION_ACK_BITS 0x130108 +/* [R 4] Debug: attn_fsm */ +#define IGU_REG_ATTN_FSM 0x130054 +#define IGU_REG_ATTN_MSG_ADDR_H 0x13011c +#define IGU_REG_ATTN_MSG_ADDR_L 0x130120 +/* [R 4] Debug: [3] - attention write done message is pending (0-no pending; + * 1-pending). [2:0] = PFID. Pending means attention message was sent; but + * write done didn't receive. */ +#define IGU_REG_ATTN_WRITE_DONE_PENDING 0x130030 +#define IGU_REG_BLOCK_CONFIGURATION 0x130000 +#define IGU_REG_COMMAND_REG_32LSB_DATA 0x130124 +#define IGU_REG_COMMAND_REG_CTRL 0x13012c +/* [WB_R 32] Cleanup bit status per SB. 1 = cleanup is set. 0 = cleanup bit + * is clear. The bits in this registers are set and clear via the producer + * command. Data valid only in addresses 0-4. all the rest are zero. */ +#define IGU_REG_CSTORM_TYPE_0_SB_CLEANUP 0x130200 +/* [R 5] Debug: ctrl_fsm */ +#define IGU_REG_CTRL_FSM 0x130064 +/* [R 1] data available for error memory. If this bit is clear do not red + * from error_handling_memory. */ +#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130 +/* [RW 11] Parity mask register #0 read/write */ +#define IGU_REG_IGU_PRTY_MASK 0x1300a8 +/* [R 11] Parity register #0 read */ +#define IGU_REG_IGU_PRTY_STS 0x13009c +/* [RC 11] Parity register #0 read clear */ +#define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0 +/* [R 4] Debug: int_handle_fsm */ +#define IGU_REG_INT_HANDLE_FSM 0x130050 +#define IGU_REG_LEADING_EDGE_LATCH 0x130134 +/* [RW 14] mapping CAM; relevant for E2 operating mode only. [0] - valid. + * [6:1] - vector number; [13:7] - FID (if VF - [13] = 0; [12:7] = VF + * number; if PF - [13] = 1; [12:10] = 0; [9:7] = PF number); */ +#define IGU_REG_MAPPING_MEMORY 0x131000 +#define IGU_REG_MAPPING_MEMORY_SIZE 136 +#define IGU_REG_PBA_STATUS_LSB 0x130138 +#define IGU_REG_PBA_STATUS_MSB 0x13013c +#define IGU_REG_PCI_PF_MSI_EN 0x130140 +#define IGU_REG_PCI_PF_MSIX_EN 0x130144 +#define IGU_REG_PCI_PF_MSIX_FUNC_MASK 0x130148 +/* [WB_R 32] Each bit represent the pending bits status for that SB. 0 = no + * pending; 1 = pending. Pendings means interrupt was asserted; and write + * done was not received. Data valid only in addresses 0-4. all the rest are + * zero. */ +#define IGU_REG_PENDING_BITS_STATUS 0x130300 +#define IGU_REG_PF_CONFIGURATION 0x130154 +/* [RW 20] producers only. E2 mode: address 0-135 match to the mapping + * memory; 136 - PF0 default prod; 137 PF1 default prod; 138 - PF2 default + * prod; 139 PF3 default prod; 140 - PF0 - ATTN prod; 141 - PF1 - ATTN prod; + * 142 - PF2 - ATTN prod; 143 - PF3 - ATTN prod; 144-147 reserved. E1.5 mode + * - In backward compatible mode; for non default SB; each even line in the + * memory holds the U producer and each odd line hold the C producer. The + * first 128 producer are for NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The + * last 20 producers are for the DSB for each PF. each PF has five segments + * (the order inside each segment is PF0; PF1; PF2; PF3) - 128-131 U prods; + * 132-135 C prods; 136-139 X prods; 140-143 T prods; 144-147 ATTN prods; */ +#define IGU_REG_PROD_CONS_MEMORY 0x132000 +/* [R 3] Debug: pxp_arb_fsm */ +#define IGU_REG_PXP_ARB_FSM 0x130068 +/* [RW 6] Write one for each bit will reset the appropriate memory. When the + * memory reset finished the appropriate bit will be clear. Bit 0 - mapping + * memory; Bit 1 - SB memory; Bit 2 - SB interrupt and mask register; Bit 3 + * - MSIX memory; Bit 4 - PBA memory; Bit 5 - statistics; */ +#define IGU_REG_RESET_MEMORIES 0x130158 +/* [R 4] Debug: sb_ctrl_fsm */ +#define IGU_REG_SB_CTRL_FSM 0x13004c +#define IGU_REG_SB_INT_BEFORE_MASK_LSB 0x13015c +#define IGU_REG_SB_INT_BEFORE_MASK_MSB 0x130160 +#define IGU_REG_SB_MASK_LSB 0x130164 +#define IGU_REG_SB_MASK_MSB 0x130168 +/* [RW 16] Number of command that were dropped without causing an interrupt + * due to: read access for WO BAR address; or write access for RO BAR + * address or any access for reserved address or PCI function error is set + * and address is not MSIX; PBA or cleanup */ +#define IGU_REG_SILENT_DROP 0x13016c +/* [RW 10] Number of MSI/MSIX/ATTN messages sent for the function: 0-63 - + * number of MSIX messages per VF; 64-67 - number of MSI/MSIX messages per + * PF; 68-71 number of ATTN messages per PF */ +#define IGU_REG_STATISTIC_NUM_MESSAGE_SENT 0x130800 +/* [RW 32] Number of cycles the timer mask masking the IGU interrupt when a + * timer mask command arrives. Value must be bigger than 100. */ +#define IGU_REG_TIMER_MASKING_VALUE 0x13003c +#define IGU_REG_TRAILING_EDGE_LATCH 0x130104 +#define IGU_REG_VF_CONFIGURATION 0x130170 +/* [WB_R 32] Each bit represent write done pending bits status for that SB + * (MSI/MSIX message was sent and write done was not received yet). 0 = + * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */ +#define IGU_REG_WRITE_DONE_PENDING 0x130480 +#define MCP_A_REG_MCPR_SCRATCH 0x3a0000 +#define MCP_REG_MCPR_ACCESS_LOCK 0x8009c +#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c +#define MCP_REG_MCPR_GP_INPUTS 0x800c0 +#define MCP_REG_MCPR_GP_OENABLE 0x800c8 +#define MCP_REG_MCPR_GP_OUTPUTS 0x800c4 +#define MCP_REG_MCPR_IMC_COMMAND 0x85900 +#define MCP_REG_MCPR_IMC_DATAREG0 0x85920 +#define MCP_REG_MCPR_IMC_SLAVE_CONTROL 0x85904 +#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c +#define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424 +#define MCP_REG_MCPR_NVM_ADDR 0x8640c +#define MCP_REG_MCPR_NVM_CFG4 0x8642c +#define MCP_REG_MCPR_NVM_COMMAND 0x86400 +#define MCP_REG_MCPR_NVM_READ 0x86410 +#define MCP_REG_MCPR_NVM_SW_ARB 0x86420 +#define MCP_REG_MCPR_NVM_WRITE 0x86408 +#define MCP_REG_MCPR_SCRATCH 0xa0000 +#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK (0x1<<1) +#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK (0x1<<0) +/* [R 32] read first 32 bit after inversion of function 0. mapped as + follows: [0] NIG attention for function0; [1] NIG attention for + function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; + [6] GPIO1 function 1; [7] GPIO2 function 1; [8] GPIO3 function 1; [9] + GPIO4 function 1; [10] PCIE glue/PXP VPD event function0; [11] PCIE + glue/PXP VPD event function1; [12] PCIE glue/PXP Expansion ROM event0; + [13] PCIE glue/PXP Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] + MSI/X indication for mcp; [17] MSI/X indication for function 1; [18] BRB + Parity error; [19] BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw + interrupt; [22] SRC Parity error; [23] SRC Hw interrupt; [24] TSDM Parity + error; [25] TSDM Hw interrupt; [26] TCM Parity error; [27] TCM Hw + interrupt; [28] TSEMI Parity error; [29] TSEMI Hw interrupt; [30] PBF + Parity error; [31] PBF Hw interrupt; */ +#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 0xa42c +#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_1 0xa430 +/* [R 32] read first 32 bit after inversion of mcp. mapped as follows: [0] + NIG attention for function0; [1] NIG attention for function1; [2] GPIO1 + mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; + [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10] + PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event + function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP + Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for + mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19] + BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC + Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw + interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI + Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw + interrupt; */ +#define MISC_REG_AEU_AFTER_INVERT_1_MCP 0xa434 +/* [R 32] read second 32 bit after inversion of function 0. mapped as + follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM + Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw + interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity + error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw + interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] + NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; + [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw + interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM + Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI + Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM + Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw + interrupt; */ +#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 0xa438 +#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_1 0xa43c +/* [R 32] read second 32 bit after inversion of mcp. mapped as follows: [0] + PBClient Parity error; [1] PBClient Hw interrupt; [2] QM Parity error; + [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw interrupt; + [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9] + XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12] + DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity + error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux + PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt; + [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error; + [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt; + [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error; + [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */ +#define MISC_REG_AEU_AFTER_INVERT_2_MCP 0xa440 +/* [R 32] read third 32 bit after inversion of function 0. mapped as + follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity + error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; [5] + PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw + interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity + error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) + Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] + pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] + MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] + SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW + timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 + func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General + attn1; */ +#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 0xa444 +#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_1 0xa448 +/* [R 32] read third 32 bit after inversion of mcp. mapped as follows: [0] + CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity error; [3] PXP + Hw interrupt; [4] PXPpciClockClient Parity error; [5] PXPpciClockClient + Hw interrupt; [6] CFC Parity error; [7] CFC Hw interrupt; [8] CDU Parity + error; [9] CDU Hw interrupt; [10] DMAE Parity error; [11] DMAE Hw + interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) Hw interrupt; [14] + MISC Parity error; [15] MISC Hw interrupt; [16] pxp_misc_mps_attn; [17] + Flash event; [18] SMB event; [19] MCP attn0; [20] MCP attn1; [21] SW + timers attn_1 func0; [22] SW timers attn_2 func0; [23] SW timers attn_3 + func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW timers attn_1 + func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 func1; [29] SW + timers attn_4 func1; [30] General attn0; [31] General attn1; */ +#define MISC_REG_AEU_AFTER_INVERT_3_MCP 0xa44c +/* [R 32] read fourth 32 bit after inversion of function 0. mapped as + follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] + General attn5; [4] General attn6; [5] General attn7; [6] General attn8; + [7] General attn9; [8] General attn10; [9] General attn11; [10] General + attn12; [11] General attn13; [12] General attn14; [13] General attn15; + [14] General attn16; [15] General attn17; [16] General attn18; [17] + General attn19; [18] General attn20; [19] General attn21; [20] Main power + interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN + Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC + Latched timeout attention; [27] GRC Latched reserved access attention; + [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP + Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ +#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 0xa450 +#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_1 0xa454 +/* [R 32] read fourth 32 bit after inversion of mcp. mapped as follows: [0] + General attn2; [1] General attn3; [2] General attn4; [3] General attn5; + [4] General attn6; [5] General attn7; [6] General attn8; [7] General + attn9; [8] General attn10; [9] General attn11; [10] General attn12; [11] + General attn13; [12] General attn14; [13] General attn15; [14] General + attn16; [15] General attn17; [16] General attn18; [17] General attn19; + [18] General attn20; [19] General attn21; [20] Main power interrupt; [21] + RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN Latched attn; [24] + RBCU Latched attn; [25] RBCP Latched attn; [26] GRC Latched timeout + attention; [27] GRC Latched reserved access attention; [28] MCP Latched + rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched + ump_tx_parity; [31] MCP Latched scpad_parity; */ +#define MISC_REG_AEU_AFTER_INVERT_4_MCP 0xa458 +/* [R 32] Read fifth 32 bit after inversion of function 0. Mapped as + * follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC + * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] + * CNIG attention (reserved); [7] CNIG parity (reserved); [31-8] Reserved; */ +#define MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 0xa700 +/* [W 14] write to this register results with the clear of the latched + signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in + d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP + latch; one in d5 clears GRC Latched timeout attention; one in d6 clears + GRC Latched reserved access attention; one in d7 clears Latched + rom_parity; one in d8 clears Latched ump_rx_parity; one in d9 clears + Latched ump_tx_parity; one in d10 clears Latched scpad_parity (both + ports); one in d11 clears pxpv_misc_mps_attn; one in d12 clears + pxp_misc_exp_rom_attn0; one in d13 clears pxp_misc_exp_rom_attn1; read + from this register return zero */ +#define MISC_REG_AEU_CLR_LATCH_SIGNAL 0xa45c +/* [RW 32] first 32b for enabling the output for function 0 output0. mapped + as follows: [0] NIG attention for function0; [1] NIG attention for + function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function + 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8] + GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event + function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP + Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] + SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X + indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; + [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] + SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] + TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] + TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */ +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0 0xa06c +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1 0xa07c +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2 0xa08c +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_3 0xa09c +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_5 0xa0bc +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_6 0xa0cc +#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_7 0xa0dc +/* [RW 32] first 32b for enabling the output for function 1 output0. mapped + as follows: [0] NIG attention for function0; [1] NIG attention for + function1; [2] GPIO1 function 1; [3] GPIO2 function 1; [4] GPIO3 function + 1; [5] GPIO4 function 1; [6] GPIO1 function 1; [7] GPIO2 function 1; [8] + GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event + function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP + Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] + SPIO4; [15] SPIO5; [16] MSI/X indication for function 1; [17] MSI/X + indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; + [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] + SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] + TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] + TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */ +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 0xa10c +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 0xa11c +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 0xa12c +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_3 0xa13c +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_5 0xa15c +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_6 0xa16c +#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_7 0xa17c +/* [RW 32] first 32b for enabling the output for close the gate nig. mapped + as follows: [0] NIG attention for function0; [1] NIG attention for + function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function + 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8] + GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event + function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP + Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] + SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X + indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; + [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] + SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] + TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] + TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */ +#define MISC_REG_AEU_ENABLE1_NIG_0 0xa0ec +#define MISC_REG_AEU_ENABLE1_NIG_1 0xa18c +/* [RW 32] first 32b for enabling the output for close the gate pxp. mapped + as follows: [0] NIG attention for function0; [1] NIG attention for + function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function + 0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8] + GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event + function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP + Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] + SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X + indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; + [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] + SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] + TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] + TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */ +#define MISC_REG_AEU_ENABLE1_PXP_0 0xa0fc +#define MISC_REG_AEU_ENABLE1_PXP_1 0xa19c +/* [RW 32] second 32b for enabling the output for function 0 output0. mapped + as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM + Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw + interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity + error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw + interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] + NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; + [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw + interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM + Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI + Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM + Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw + interrupt; */ +#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0 0xa070 +#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_1 0xa080 +/* [RW 32] second 32b for enabling the output for function 1 output0. mapped + as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM + Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw + interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity + error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw + interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] + NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; + [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw + interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM + Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI + Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM + Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw + interrupt; */ +#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0 0xa110 +#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_1 0xa120 +/* [RW 32] second 32b for enabling the output for close the gate nig. mapped + as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM + Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw + interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity + error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw + interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] + NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; + [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw + interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM + Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI + Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM + Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw + interrupt; */ +#define MISC_REG_AEU_ENABLE2_NIG_0 0xa0f0 +#define MISC_REG_AEU_ENABLE2_NIG_1 0xa190 +/* [RW 32] second 32b for enabling the output for close the gate pxp. mapped + as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM + Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw + interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity + error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw + interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] + NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; + [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw + interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM + Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI + Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM + Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw + interrupt; */ +#define MISC_REG_AEU_ENABLE2_PXP_0 0xa100 +#define MISC_REG_AEU_ENABLE2_PXP_1 0xa1a0 +/* [RW 32] third 32b for enabling the output for function 0 output0. mapped + as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP + Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; + [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw + interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity + error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) + Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] + pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] + MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] + SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW + timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 + func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General + attn1; */ +#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_0 0xa074 +#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_1 0xa084 +/* [RW 32] third 32b for enabling the output for function 1 output0. mapped + as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP + Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; + [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw + interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity + error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) + Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] + pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] + MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] + SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW + timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 + func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General + attn1; */ +#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_0 0xa114 +#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_1 0xa124 +/* [RW 32] third 32b for enabling the output for close the gate nig. mapped + as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP + Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; + [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw + interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity + error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) + Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] + pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] + MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] + SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW + timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 + func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General + attn1; */ +#define MISC_REG_AEU_ENABLE3_NIG_0 0xa0f4 +#define MISC_REG_AEU_ENABLE3_NIG_1 0xa194 +/* [RW 32] third 32b for enabling the output for close the gate pxp. mapped + as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP + Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; + [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw + interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity + error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) + Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16] + pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20] + MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23] + SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW + timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 + func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General + attn1; */ +#define MISC_REG_AEU_ENABLE3_PXP_0 0xa104 +#define MISC_REG_AEU_ENABLE3_PXP_1 0xa1a4 +/* [RW 32] fourth 32b for enabling the output for function 0 output0.mapped + as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] + General attn5; [4] General attn6; [5] General attn7; [6] General attn8; + [7] General attn9; [8] General attn10; [9] General attn11; [10] General + attn12; [11] General attn13; [12] General attn14; [13] General attn15; + [14] General attn16; [15] General attn17; [16] General attn18; [17] + General attn19; [18] General attn20; [19] General attn21; [20] Main power + interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN + Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC + Latched timeout attention; [27] GRC Latched reserved access attention; + [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP + Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ +#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 0xa078 +#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_2 0xa098 +#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_4 0xa0b8 +#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_5 0xa0c8 +#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_6 0xa0d8 +#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_7 0xa0e8 +/* [RW 32] fourth 32b for enabling the output for function 1 output0.mapped + as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] + General attn5; [4] General attn6; [5] General attn7; [6] General attn8; + [7] General attn9; [8] General attn10; [9] General attn11; [10] General + attn12; [11] General attn13; [12] General attn14; [13] General attn15; + [14] General attn16; [15] General attn17; [16] General attn18; [17] + General attn19; [18] General attn20; [19] General attn21; [20] Main power + interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN + Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC + Latched timeout attention; [27] GRC Latched reserved access attention; + [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP + Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ +#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0 0xa118 +#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_2 0xa138 +#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_4 0xa158 +#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_5 0xa168 +#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_6 0xa178 +#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_7 0xa188 +/* [RW 32] fourth 32b for enabling the output for close the gate nig.mapped + as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] + General attn5; [4] General attn6; [5] General attn7; [6] General attn8; + [7] General attn9; [8] General attn10; [9] General attn11; [10] General + attn12; [11] General attn13; [12] General attn14; [13] General attn15; + [14] General attn16; [15] General attn17; [16] General attn18; [17] + General attn19; [18] General attn20; [19] General attn21; [20] Main power + interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN + Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC + Latched timeout attention; [27] GRC Latched reserved access attention; + [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP + Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ +#define MISC_REG_AEU_ENABLE4_NIG_0 0xa0f8 +#define MISC_REG_AEU_ENABLE4_NIG_1 0xa198 +/* [RW 32] fourth 32b for enabling the output for close the gate pxp.mapped + as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3] + General attn5; [4] General attn6; [5] General attn7; [6] General attn8; + [7] General attn9; [8] General attn10; [9] General attn11; [10] General + attn12; [11] General attn13; [12] General attn14; [13] General attn15; + [14] General attn16; [15] General attn17; [16] General attn18; [17] + General attn19; [18] General attn20; [19] General attn21; [20] Main power + interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN + Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC + Latched timeout attention; [27] GRC Latched reserved access attention; + [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP + Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ +#define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 +#define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 +/* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped + * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC + * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] + * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 + * parity; [31-10] Reserved; */ +#define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688 +/* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped + * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC + * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] + * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 + * parity; [31-10] Reserved; */ +#define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0 +/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu + 128 bit vector */ +#define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 +#define MISC_REG_AEU_GENERAL_ATTN_1 0xa004 +#define MISC_REG_AEU_GENERAL_ATTN_10 0xa028 +#define MISC_REG_AEU_GENERAL_ATTN_11 0xa02c +#define MISC_REG_AEU_GENERAL_ATTN_12 0xa030 +#define MISC_REG_AEU_GENERAL_ATTN_2 0xa008 +#define MISC_REG_AEU_GENERAL_ATTN_3 0xa00c +#define MISC_REG_AEU_GENERAL_ATTN_4 0xa010 +#define MISC_REG_AEU_GENERAL_ATTN_5 0xa014 +#define MISC_REG_AEU_GENERAL_ATTN_6 0xa018 +#define MISC_REG_AEU_GENERAL_ATTN_7 0xa01c +#define MISC_REG_AEU_GENERAL_ATTN_8 0xa020 +#define MISC_REG_AEU_GENERAL_ATTN_9 0xa024 +#define MISC_REG_AEU_GENERAL_MASK 0xa61c +/* [RW 32] first 32b for inverting the input for function 0; for each bit: + 0= do not invert; 1= invert; mapped as follows: [0] NIG attention for + function0; [1] NIG attention for function1; [2] GPIO1 mcp; [3] GPIO2 mcp; + [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; [7] GPIO2 function 1; + [8] GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event + function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP + Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14] + SPIO4; [15] SPIO5; [16] MSI/X indication for mcp; [17] MSI/X indication + for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; [20] PRS + Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] SRC Hw + interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] TCM + Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] TSEMI + Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */ +#define MISC_REG_AEU_INVERTER_1_FUNC_0 0xa22c +#define MISC_REG_AEU_INVERTER_1_FUNC_1 0xa23c +/* [RW 32] second 32b for inverting the input for function 0; for each bit: + 0= do not invert; 1= invert. mapped as follows: [0] PBClient Parity + error; [1] PBClient Hw interrupt; [2] QM Parity error; [3] QM Hw + interrupt; [4] Timers Parity error; [5] Timers Hw interrupt; [6] XSDM + Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9] XCM Hw + interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12] + DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity + error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux + PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt; + [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error; + [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt; + [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error; + [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */ +#define MISC_REG_AEU_INVERTER_2_FUNC_0 0xa230 +#define MISC_REG_AEU_INVERTER_2_FUNC_1 0xa240 +/* [RW 10] [7:0] = mask 8 attention output signals toward IGU function0; + [9:8] = raserved. Zero = mask; one = unmask */ +#define MISC_REG_AEU_MASK_ATTN_FUNC_0 0xa060 +#define MISC_REG_AEU_MASK_ATTN_FUNC_1 0xa064 +/* [RW 1] If set a system kill occurred */ +#define MISC_REG_AEU_SYS_KILL_OCCURRED 0xa610 +/* [RW 32] Represent the status of the input vector to the AEU when a system + kill occurred. The register is reset in por reset. Mapped as follows: [0] + NIG attention for function0; [1] NIG attention for function1; [2] GPIO1 + mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; + [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10] + PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event + function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP + Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for + mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19] + BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC + Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw + interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI + Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw + interrupt; */ +#define MISC_REG_AEU_SYS_KILL_STATUS_0 0xa600 +#define MISC_REG_AEU_SYS_KILL_STATUS_1 0xa604 +#define MISC_REG_AEU_SYS_KILL_STATUS_2 0xa608 +#define MISC_REG_AEU_SYS_KILL_STATUS_3 0xa60c +/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1 + Port. */ +#define MISC_REG_BOND_ID 0xa400 +/* [R 16] These bits indicate the part number for the chip. */ +#define MISC_REG_CHIP_NUM 0xa408 +/* [R 4] These bits indicate the base revision of the chip. This value + starts at 0x0 for the A0 tape-out and increments by one for each + all-layer tape-out. */ +#define MISC_REG_CHIP_REV 0xa40c +/* [R 14] otp_misc_do[100:0] spare bits collection: 13:11- + * otp_misc_do[100:98]; 10:7 - otp_misc_do[87:84]; 6:3 - otp_misc_do[75:72]; + * 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. */ +#define MISC_REG_CHIP_TYPE 0xac60 +#define MISC_REG_CHIP_TYPE_57811_MASK (1<<1) +#define MISC_REG_CPMU_LP_DR_ENABLE 0xa858 +/* [RW 1] FW EEE LPI Enable. When 1 indicates that EEE LPI mode is enabled + * by FW. When 0 indicates that the EEE LPI mode is disabled by FW. Clk + * 25MHz. Reset on hard reset. */ +#define MISC_REG_CPMU_LP_FW_ENABLE_P0 0xa84c +/* [RW 32] EEE LPI Idle Threshold. The threshold value for the idle EEE LPI + * counter. Timer tick is 1 us. Clock 25MHz. Reset on hard reset. */ +#define MISC_REG_CPMU_LP_IDLE_THR_P0 0xa8a0 +/* [RW 18] LPI entry events mask. [0] - Vmain SM Mask. When 1 indicates that + * the Vmain SM end state is disabled. When 0 indicates that the Vmain SM + * end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates that + * the FW command that all Queues are empty is disabled. When 0 indicates + * that the FW command that all Queues are empty is enabled. [2] - FW Early + * Exit Mask / Reserved (Entry mask). When 1 indicates that the FW Early + * Exit command is disabled. When 0 indicates that the FW Early Exit command + * is enabled. This bit applicable only in the EXIT Events Mask registers. + * [3] - PBF Request Mask. When 1 indicates that the PBF Request indication + * is disabled. When 0 indicates that the PBF Request indication is enabled. + * [4] - Tx Request Mask. When =1 indicates that the Tx other Than PBF + * Request indication is disabled. When 0 indicates that the Tx Other Than + * PBF Request indication is enabled. [5] - Rx EEE LPI Status Mask. When 1 + * indicates that the RX EEE LPI Status indication is disabled. When 0 + * indicates that the RX EEE LPI Status indication is enabled. In the EXIT + * Events Masks registers; this bit masks the falling edge detect of the LPI + * Status (Rx LPI is on - off). [6] - Tx Pause Mask. When 1 indicates that + * the Tx Pause indication is disabled. When 0 indicates that the Tx Pause + * indication is enabled. [7] - BRB1 Empty Mask. When 1 indicates that the + * BRB1 EMPTY indication is disabled. When 0 indicates that the BRB1 EMPTY + * indication is enabled. [8] - QM Idle Mask. When 1 indicates that the QM + * IDLE indication is disabled. When 0 indicates that the QM IDLE indication + * is enabled. (One bit for both VOQ0 and VOQ1). [9] - QM LB Idle Mask. When + * 1 indicates that the QM IDLE indication for LOOPBACK is disabled. When 0 + * indicates that the QM IDLE indication for LOOPBACK is enabled. [10] - L1 + * Status Mask. When 1 indicates that the L1 Status indication from the PCIE + * CORE is disabled. When 0 indicates that the RX EEE LPI Status indication + * from the PCIE CORE is enabled. In the EXIT Events Masks registers; this + * bit masks the falling edge detect of the L1 status (L1 is on - off). [11] + * - P0 E0 EEE EEE LPI REQ Mask. When =1 indicates that the P0 E0 EEE EEE + * LPI REQ indication is disabled. When =0 indicates that the P0 E0 EEE LPI + * REQ indication is enabled. [12] - P1 E0 EEE LPI REQ Mask. When =1 + * indicates that the P0 EEE LPI REQ indication is disabled. When =0 + * indicates that the P0 EEE LPI REQ indication is enabled. [13] - P0 E1 EEE + * LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication is + * disabled. When =0 indicates that the P0 EEE LPI REQ indication is + * enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE + * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ + * indication is enabled. [15] - L1 REQ Mask. When =1 indicates that the L1 + * REQ indication is disabled. When =0 indicates that the L1 indication is + * enabled. [16] - Rx EEE LPI Status Edge Detect Mask. When =1 indicates + * that the RX EEE LPI Status Falling Edge Detect indication is disabled (Rx + * EEE LPI is on - off). When =0 indicates that the RX EEE LPI Status + * Falling Edge Detec indication is enabled (Rx EEE LPI is on - off). This + * bit is applicable only in the EXIT Events Masks registers. [17] - L1 + * Status Edge Detect Mask. When =1 indicates that the L1 Status Falling + * Edge Detect indication from the PCIE CORE is disabled (L1 is on - off). + * When =0 indicates that the L1 Status Falling Edge Detect indication from + * the PCIE CORE is enabled (L1 is on - off). This bit is applicable only in + * the EXIT Events Masks registers. Clock 25MHz. Reset on hard reset. */ +#define MISC_REG_CPMU_LP_MASK_ENT_P0 0xa880 +/* [RW 18] EEE LPI exit events mask. [0] - Vmain SM Mask. When 1 indicates + * that the Vmain SM end state is disabled. When 0 indicates that the Vmain + * SM end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates + * that the FW command that all Queues are empty is disabled. When 0 + * indicates that the FW command that all Queues are empty is enabled. [2] - + * FW Early Exit Mask / Reserved (Entry mask). When 1 indicates that the FW + * Early Exit command is disabled. When 0 indicates that the FW Early Exit + * command is enabled. This bit applicable only in the EXIT Events Mask + * registers. [3] - PBF Request Mask. When 1 indicates that the PBF Request + * indication is disabled. When 0 indicates that the PBF Request indication + * is enabled. [4] - Tx Request Mask. When =1 indicates that the Tx other + * Than PBF Request indication is disabled. When 0 indicates that the Tx + * Other Than PBF Request indication is enabled. [5] - Rx EEE LPI Status + * Mask. When 1 indicates that the RX EEE LPI Status indication is disabled. + * When 0 indicates that the RX LPI Status indication is enabled. In the + * EXIT Events Masks registers; this bit masks the falling edge detect of + * the EEE LPI Status (Rx EEE LPI is on - off). [6] - Tx Pause Mask. When 1 + * indicates that the Tx Pause indication is disabled. When 0 indicates that + * the Tx Pause indication is enabled. [7] - BRB1 Empty Mask. When 1 + * indicates that the BRB1 EMPTY indication is disabled. When 0 indicates + * that the BRB1 EMPTY indication is enabled. [8] - QM Idle Mask. When 1 + * indicates that the QM IDLE indication is disabled. When 0 indicates that + * the QM IDLE indication is enabled. (One bit for both VOQ0 and VOQ1). [9] + * - QM LB Idle Mask. When 1 indicates that the QM IDLE indication for + * LOOPBACK is disabled. When 0 indicates that the QM IDLE indication for + * LOOPBACK is enabled. [10] - L1 Status Mask. When 1 indicates that the L1 + * Status indication from the PCIE CORE is disabled. When 0 indicates that + * the RX EEE LPI Status indication from the PCIE CORE is enabled. In the + * EXIT Events Masks registers; this bit masks the falling edge detect of + * the L1 status (L1 is on - off). [11] - P0 E0 EEE EEE LPI REQ Mask. When + * =1 indicates that the P0 E0 EEE EEE LPI REQ indication is disabled. When + * =0 indicates that the P0 E0 EEE LPI REQ indication is enabled. [12] - P1 + * E0 EEE LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication + * is disabled. When =0 indicates that the P0 EEE LPI REQ indication is + * enabled. [13] - P0 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE + * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ + * indication is enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates + * that the P0 EEE LPI REQ indication is disabled. When =0 indicates that + * the P0 EEE LPI REQ indication is enabled. [15] - L1 REQ Mask. When =1 + * indicates that the L1 REQ indication is disabled. When =0 indicates that + * the L1 indication is enabled. [16] - Rx EEE LPI Status Edge Detect Mask. + * When =1 indicates that the RX EEE LPI Status Falling Edge Detect + * indication is disabled (Rx EEE LPI is on - off). When =0 indicates that + * the RX EEE LPI Status Falling Edge Detec indication is enabled (Rx EEE + * LPI is on - off). This bit is applicable only in the EXIT Events Masks + * registers. [17] - L1 Status Edge Detect Mask. When =1 indicates that the + * L1 Status Falling Edge Detect indication from the PCIE CORE is disabled + * (L1 is on - off). When =0 indicates that the L1 Status Falling Edge + * Detect indication from the PCIE CORE is enabled (L1 is on - off). This + * bit is applicable only in the EXIT Events Masks registers.Clock 25MHz. + * Reset on hard reset. */ +#define MISC_REG_CPMU_LP_MASK_EXT_P0 0xa888 +/* [RW 16] EEE LPI Entry Events Counter. A statistic counter with the number + * of counts that the SM entered the EEE LPI state. Clock 25MHz. Read only + * register. Reset on hard reset. */ +#define MISC_REG_CPMU_LP_SM_ENT_CNT_P0 0xa8b8 +/* [RW 16] EEE LPI Entry Events Counter. A statistic counter with the number + * of counts that the SM entered the EEE LPI state. Clock 25MHz. Read only + * register. Reset on hard reset. */ +#define MISC_REG_CPMU_LP_SM_ENT_CNT_P1 0xa8bc +/* [RW 32] The following driver registers(1...16) represent 16 drivers and + 32 clients. Each client can be controlled by one driver only. One in each + bit represent that this driver control the appropriate client (Ex: bit 5 + is set means this driver control client number 5). addr1 = set; addr0 = + clear; read from both addresses will give the same result = status. write + to address 1 will set a request to control all the clients that their + appropriate bit (in the write command) is set. if the client is free (the + appropriate bit in all the other drivers is clear) one will be written to + that driver register; if the client isn't free the bit will remain zero. + if the appropriate bit is set (the driver request to gain control on a + client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW + interrupt will be asserted). write to address 0 will set a request to + free all the clients that their appropriate bit (in the write command) is + set. if the appropriate bit is clear (the driver request to free a client + it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will + be asserted). */ +#define MISC_REG_DRIVER_CONTROL_1 0xa510 +#define MISC_REG_DRIVER_CONTROL_7 0xa3c8 +/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0 + only. */ +#define MISC_REG_E1HMF_MODE 0xa5f8 +/* [R 1] Status of four port mode path swap input pin. */ +#define MISC_REG_FOUR_PORT_PATH_SWAP 0xa75c +/* [RW 2] 4 port path swap overwrite.[0] - Overwrite control; if it is 0 - + the path_swap output is equal to 4 port mode path swap input pin; if it + is 1 - the path_swap output is equal to bit[1] of this register; [1] - + Overwrite value. If bit[0] of this register is 1 this is the value that + receives the path_swap output. Reset on Hard reset. */ +#define MISC_REG_FOUR_PORT_PATH_SWAP_OVWR 0xa738 +/* [R 1] Status of 4 port mode port swap input pin. */ +#define MISC_REG_FOUR_PORT_PORT_SWAP 0xa754 +/* [RW 2] 4 port port swap overwrite.[0] - Overwrite control; if it is 0 - + the port_swap output is equal to 4 port mode port swap input pin; if it + is 1 - the port_swap output is equal to bit[1] of this register; [1] - + Overwrite value. If bit[0] of this register is 1 this is the value that + receives the port_swap output. Reset on Hard reset. */ +#define MISC_REG_FOUR_PORT_PORT_SWAP_OVWR 0xa734 +/* [RW 32] Debug only: spare RW register reset by core reset */ +#define MISC_REG_GENERIC_CR_0 0xa460 +#define MISC_REG_GENERIC_CR_1 0xa464 +/* [RW 32] Debug only: spare RW register reset by por reset */ +#define MISC_REG_GENERIC_POR_1 0xa474 +/* [RW 32] Bit[0]: EPIO MODE SEL: Setting this bit to 1 will allow SW/FW to + use all of the 32 Extended GPIO pins. Without setting this bit; an EPIO + can not be configured as an output. Each output has its output enable in + the MCP register space; but this bit needs to be set to make use of that. + Bit[3:1] spare. Bit[4]: WCVTMON_PWRDN: Powerdown for Warpcore VTMON. When + set to 1 - Powerdown. Bit[5]: WCVTMON_RESETB: Reset for Warpcore VTMON. + When set to 0 - vTMON is in reset. Bit[6]: setting this bit will change + the i/o to an output and will drive the TimeSync output. Bit[31:7]: + spare. Global register. Reset by hard reset. */ +#define MISC_REG_GEN_PURP_HWG 0xa9a0 +/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of + these bits is written as a '1'; the corresponding SPIO bit will turn off + it's drivers and become an input. This is the reset state of all GPIO + pins. The read value of these bits will be a '1' if that last command + (#SET; #CLR; or #FLOAT) for this bit was a #FLOAT. (reset value 0xff). + [23-20] CLR port 1; 19-16] CLR port 0; When any of these bits is written + as a '1'; the corresponding GPIO bit will drive low. The read value of + these bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for + this bit was a #CLR. (reset value 0). [15-12] SET port 1; 11-8] port 0; + SET When any of these bits is written as a '1'; the corresponding GPIO + bit will drive high (if it has that capability). The read value of these + bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for this + bit was a #SET. (reset value 0). [7-4] VALUE port 1; [3-0] VALUE port 0; + RO; These bits indicate the read value of each of the eight GPIO pins. + This is the result value of the pin; not the drive value. Writing these + bits will have not effect. */ +#define MISC_REG_GPIO 0xa490 +/* [RW 8] These bits enable the GPIO_INTs to signals event to the + IGU/MCP.according to the following map: [0] p0_gpio_0; [1] p0_gpio_1; [2] + p0_gpio_2; [3] p0_gpio_3; [4] p1_gpio_0; [5] p1_gpio_1; [6] p1_gpio_2; + [7] p1_gpio_3; */ +#define MISC_REG_GPIO_EVENT_EN 0xa2bc +/* [RW 32] GPIO INT. [31-28] OLD_CLR port1; [27-24] OLD_CLR port0; Writing a + '1' to these bit clears the corresponding bit in the #OLD_VALUE register. + This will acknowledge an interrupt on the falling edge of corresponding + GPIO input (reset value 0). [23-16] OLD_SET [23-16] port1; OLD_SET port0; + Writing a '1' to these bit sets the corresponding bit in the #OLD_VALUE + register. This will acknowledge an interrupt on the rising edge of + corresponding SPIO input (reset value 0). [15-12] OLD_VALUE [11-8] port1; + OLD_VALUE port0; RO; These bits indicate the old value of the GPIO input + value. When the ~INT_STATE bit is set; this bit indicates the OLD value + of the pin such that if ~INT_STATE is set and this bit is '0'; then the + interrupt is due to a low to high edge. If ~INT_STATE is set and this bit + is '1'; then the interrupt is due to a high to low edge (reset value 0). + [7-4] INT_STATE port1; [3-0] INT_STATE RO port0; These bits indicate the + current GPIO interrupt state for each GPIO pin. This bit is cleared when + the appropriate #OLD_SET or #OLD_CLR command bit is written. This bit is + set when the GPIO input does not match the current value in #OLD_VALUE + (reset value 0). */ +#define MISC_REG_GPIO_INT 0xa494 +/* [R 28] this field hold the last information that caused reserved + attention. bits [19:0] - address; [22:20] function; [23] reserved; + [27:24] the master that caused the attention - according to the following + encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = + dbu; 8 = dmae */ +#define MISC_REG_GRC_RSV_ATTN 0xa3c0 +/* [R 28] this field hold the last information that caused timeout + attention. bits [19:0] - address; [22:20] function; [23] reserved; + [27:24] the master that caused the attention - according to the following + encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = + dbu; 8 = dmae */ +#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4 +/* [RW 1] Setting this bit enables a timer in the GRC block to timeout any + access that does not finish within + ~misc_registers_grc_timout_val.grc_timeout_val cycles. When this bit is + cleared; this timeout is disabled. If this timeout occurs; the GRC shall + assert it attention output. */ +#define MISC_REG_GRC_TIMEOUT_EN 0xa280 +/* [RW 28] 28 LSB of LCPLL first register; reset val = 521. inside order of + the bits is: [2:0] OAC reset value 001) CML output buffer bias control; + 111 for +40%; 011 for +20%; 001 for 0%; 000 for -20%. [5:3] Icp_ctrl + (reset value 001) Charge pump current control; 111 for 720u; 011 for + 600u; 001 for 480u and 000 for 360u. [7:6] Bias_ctrl (reset value 00) + Global bias control; When bit 7 is high bias current will be 10 0gh; When + bit 6 is high bias will be 100w; Valid values are 00; 10; 01. [10:8] + Pll_observe (reset value 010) Bits to control observability. bit 10 is + for test bias; bit 9 is for test CK; bit 8 is test Vc. [12:11] Vth_ctrl + (reset value 00) Comparator threshold control. 00 for 0.6V; 01 for 0.54V + and 10 for 0.66V. [13] pllSeqStart (reset value 0) Enables VCO tuning + sequencer: 1= sequencer disabled; 0= sequencer enabled (inverted + internally). [14] reserved (reset value 0) Reset for VCO sequencer is + connected to RESET input directly. [15] capRetry_en (reset value 0) + enable retry on cap search failure (inverted). [16] freqMonitor_e (reset + value 0) bit to continuously monitor vco freq (inverted). [17] + freqDetRestart_en (reset value 0) bit to enable restart when not freq + locked (inverted). [18] freqDetRetry_en (reset value 0) bit to enable + retry on freq det failure(inverted). [19] pllForceFdone_en (reset value + 0) bit to enable pllForceFdone & pllForceFpass into pllSeq. [20] + pllForceFdone (reset value 0) bit to force freqDone. [21] pllForceFpass + (reset value 0) bit to force freqPass. [22] pllForceDone_en (reset value + 0) bit to enable pllForceCapDone. [23] pllForceCapDone (reset value 0) + bit to force capDone. [24] pllForceCapPass_en (reset value 0) bit to + enable pllForceCapPass. [25] pllForceCapPass (reset value 0) bit to force + capPass. [26] capRestart (reset value 0) bit to force cap sequencer to + restart. [27] capSelectM_en (reset value 0) bit to enable cap select + register bits. */ +#define MISC_REG_LCPLL_CTRL_1 0xa2a4 +#define MISC_REG_LCPLL_CTRL_REG_2 0xa2a8 +/* [RW 1] LCPLL power down. Global register. Active High. Reset on POR + * reset. */ +#define MISC_REG_LCPLL_E40_PWRDWN 0xaa74 +/* [RW 1] LCPLL VCO reset. Global register. Active Low Reset on POR reset. */ +#define MISC_REG_LCPLL_E40_RESETB_ANA 0xaa78 +/* [RW 1] LCPLL post-divider reset. Global register. Active Low Reset on POR + * reset. */ +#define MISC_REG_LCPLL_E40_RESETB_DIG 0xaa7c +/* [RW 4] Interrupt mask register #0 read/write */ +#define MISC_REG_MISC_INT_MASK 0xa388 +/* [RW 1] Parity mask register #0 read/write */ +#define MISC_REG_MISC_PRTY_MASK 0xa398 +/* [R 1] Parity register #0 read */ +#define MISC_REG_MISC_PRTY_STS 0xa38c +/* [RC 1] Parity register #0 read clear */ +#define MISC_REG_MISC_PRTY_STS_CLR 0xa390 +#define MISC_REG_NIG_WOL_P0 0xa270 +#define MISC_REG_NIG_WOL_P1 0xa274 +/* [R 1] If set indicate that the pcie_rst_b was asserted without perst + assertion */ +#define MISC_REG_PCIE_HOT_RESET 0xa618 +/* [RW 32] 32 LSB of storm PLL first register; reset val = 0x 071d2911. + inside order of the bits is: [0] P1 divider[0] (reset value 1); [1] P1 + divider[1] (reset value 0); [2] P1 divider[2] (reset value 0); [3] P1 + divider[3] (reset value 0); [4] P2 divider[0] (reset value 1); [5] P2 + divider[1] (reset value 0); [6] P2 divider[2] (reset value 0); [7] P2 + divider[3] (reset value 0); [8] ph_det_dis (reset value 1); [9] + freq_det_dis (reset value 0); [10] Icpx[0] (reset value 0); [11] Icpx[1] + (reset value 1); [12] Icpx[2] (reset value 0); [13] Icpx[3] (reset value + 1); [14] Icpx[4] (reset value 0); [15] Icpx[5] (reset value 0); [16] + Rx[0] (reset value 1); [17] Rx[1] (reset value 0); [18] vc_en (reset + value 1); [19] vco_rng[0] (reset value 1); [20] vco_rng[1] (reset value + 1); [21] Kvco_xf[0] (reset value 0); [22] Kvco_xf[1] (reset value 0); + [23] Kvco_xf[2] (reset value 0); [24] Kvco_xs[0] (reset value 1); [25] + Kvco_xs[1] (reset value 1); [26] Kvco_xs[2] (reset value 1); [27] + testd_en (reset value 0); [28] testd_sel[0] (reset value 0); [29] + testd_sel[1] (reset value 0); [30] testd_sel[2] (reset value 0); [31] + testa_en (reset value 0); */ +#define MISC_REG_PLL_STORM_CTRL_1 0xa294 +#define MISC_REG_PLL_STORM_CTRL_2 0xa298 +#define MISC_REG_PLL_STORM_CTRL_3 0xa29c +#define MISC_REG_PLL_STORM_CTRL_4 0xa2a0 +/* [R 1] Status of 4 port mode enable input pin. */ +#define MISC_REG_PORT4MODE_EN 0xa750 +/* [RW 2] 4 port mode enable overwrite.[0] - Overwrite control; if it is 0 - + * the port4mode_en output is equal to 4 port mode input pin; if it is 1 - + * the port4mode_en output is equal to bit[1] of this register; [1] - + * Overwrite value. If bit[0] of this register is 1 this is the value that + * receives the port4mode_en output . */ +#define MISC_REG_PORT4MODE_EN_OVWR 0xa720 +/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset; + write/read zero = the specific block is in reset; addr 0-wr- the write + value will be written to the register; addr 1-set - one will be written + to all the bits that have the value of one in the data written (bits that + have the value of zero will not be change) ; addr 2-clear - zero will be + written to all the bits that have the value of one in the data written + (bits that have the value of zero will not be change); addr 3-ignore; + read ignore from all addr except addr 00; inside order of the bits is: + [0] rst_bmac0; [1] rst_bmac1; [2] rst_emac0; [3] rst_emac1; [4] rst_grc; + [5] rst_mcp_n_reset_reg_hard_core; [6] rst_ mcp_n_hard_core_rst_b; [7] + rst_ mcp_n_reset_cmn_cpu; [8] rst_ mcp_n_reset_cmn_core; [9] rst_rbcn; + [10] rst_dbg; [11] rst_misc_core; [12] rst_dbue (UART); [13] + Pci_resetmdio_n; [14] rst_emac0_hard_core; [15] rst_emac1_hard_core; 16] + rst_pxp_rq_rd_wr; 31:17] reserved */ +#define MISC_REG_RESET_REG_1 0xa580 +#define MISC_REG_RESET_REG_2 0xa590 +/* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is + shared with the driver resides */ +#define MISC_REG_SHARED_MEM_ADDR 0xa2b4 +/* [RW 32] SPIO. [31-24] FLOAT When any of these bits is written as a '1'; + the corresponding SPIO bit will turn off it's drivers and become an + input. This is the reset state of all SPIO pins. The read value of these + bits will be a '1' if that last command (#SET; #CL; or #FLOAT) for this + bit was a #FLOAT. (reset value 0xff). [23-16] CLR When any of these bits + is written as a '1'; the corresponding SPIO bit will drive low. The read + value of these bits will be a '1' if that last command (#SET; #CLR; or +#FLOAT) for this bit was a #CLR. (reset value 0). [15-8] SET When any of + these bits is written as a '1'; the corresponding SPIO bit will drive + high (if it has that capability). The read value of these bits will be a + '1' if that last command (#SET; #CLR; or #FLOAT) for this bit was a #SET. + (reset value 0). [7-0] VALUE RO; These bits indicate the read value of + each of the eight SPIO pins. This is the result value of the pin; not the + drive value. Writing these bits will have not effect. Each 8 bits field + is divided as follows: [0] VAUX Enable; when pulsed low; enables supply + from VAUX. (This is an output pin only; the FLOAT field is not applicable + for this pin); [1] VAUX Disable; when pulsed low; disables supply form + VAUX. (This is an output pin only; FLOAT field is not applicable for this + pin); [2] SEL_VAUX_B - Control to power switching logic. Drive low to + select VAUX supply. (This is an output pin only; it is not controlled by + the SET and CLR fields; it is controlled by the Main Power SM; the FLOAT + field is not applicable for this pin; only the VALUE fields is relevant - + it reflects the output value); [3] port swap [4] spio_4; [5] spio_5; [6] + Bit 0 of UMP device ID select; read by UMP firmware; [7] Bit 1 of UMP + device ID select; read by UMP firmware. */ +#define MISC_REG_SPIO 0xa4fc +/* [RW 8] These bits enable the SPIO_INTs to signals event to the IGU/MC. + according to the following map: [3:0] reserved; [4] spio_4 [5] spio_5; + [7:0] reserved */ +#define MISC_REG_SPIO_EVENT_EN 0xa2b8 +/* [RW 32] SPIO INT. [31-24] OLD_CLR Writing a '1' to these bit clears the + corresponding bit in the #OLD_VALUE register. This will acknowledge an + interrupt on the falling edge of corresponding SPIO input (reset value + 0). [23-16] OLD_SET Writing a '1' to these bit sets the corresponding bit + in the #OLD_VALUE register. This will acknowledge an interrupt on the + rising edge of corresponding SPIO input (reset value 0). [15-8] OLD_VALUE + RO; These bits indicate the old value of the SPIO input value. When the + ~INT_STATE bit is set; this bit indicates the OLD value of the pin such + that if ~INT_STATE is set and this bit is '0'; then the interrupt is due + to a low to high edge. If ~INT_STATE is set and this bit is '1'; then the + interrupt is due to a high to low edge (reset value 0). [7-0] INT_STATE + RO; These bits indicate the current SPIO interrupt state for each SPIO + pin. This bit is cleared when the appropriate #OLD_SET or #OLD_CLR + command bit is written. This bit is set when the SPIO input does not + match the current value in #OLD_VALUE (reset value 0). */ +#define MISC_REG_SPIO_INT 0xa500 +/* [RW 32] reload value for counter 4 if reload; the value will be reload if + the counter reached zero and the reload bit + (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */ +#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc +/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses + in this register. address 0 - timer 1; address 1 - timer 2, ... address 7 - + timer 8 */ +#define MISC_REG_SW_TIMER_VAL 0xa5c0 +/* [R 1] Status of two port mode path swap input pin. */ +#define MISC_REG_TWO_PORT_PATH_SWAP 0xa758 +/* [RW 2] 2 port swap overwrite.[0] - Overwrite control; if it is 0 - the + path_swap output is equal to 2 port mode path swap input pin; if it is 1 + - the path_swap output is equal to bit[1] of this register; [1] - + Overwrite value. If bit[0] of this register is 1 this is the value that + receives the path_swap output. Reset on Hard reset. */ +#define MISC_REG_TWO_PORT_PATH_SWAP_OVWR 0xa72c +/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are + loaded; 0-prepare; -unprepare */ +#define MISC_REG_UNPREPARED 0xa424 +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3) +/* [RW 5] MDIO PHY Address. The WC uses this address to determine whether or + * not it is the recipient of the message on the MDIO interface. The value + * is compared to the value on ctrl_md_devad. Drives output + * misc_xgxs0_phy_addr. Global register. */ +#define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc +#define MISC_REG_WC0_RESET 0xac30 +/* [RW 2] XMAC Core port mode. Indicates the number of ports on the system + side. This should be less than or equal to phy_port_mode; if some of the + ports are not used. This enables reduction of frequency on the core side. + This is a strap input for the XMAC_MP core. 00 - Single Port Mode; 01 - + Dual Port Mode; 10 - Tri Port Mode; 11 - Quad Port Mode. This is a strap + input for the XMAC_MP core; and should be changed only while reset is + held low. Reset on Hard reset. */ +#define MISC_REG_XMAC_CORE_PORT_MODE 0xa964 +/* [RW 2] XMAC PHY port mode. Indicates the number of ports on the Warp + Core. This is a strap input for the XMAC_MP core. 00 - Single Port Mode; + 01 - Dual Port Mode; 1x - Quad Port Mode; This is a strap input for the + XMAC_MP core; and should be changed only while reset is held low. Reset + on Hard reset. */ +#define MISC_REG_XMAC_PHY_PORT_MODE 0xa960 +/* [RW 32] 1 [47] Packet Size = 64 Write to this register write bits 31:0. + * Reads from this register will clear bits 31:0. */ +#define MSTAT_REG_RX_STAT_GR64_LO 0x200 +/* [RW 32] 1 [00] Tx Good Packet Count Write to this register write bits + * 31:0. Reads from this register will clear bits 31:0. */ +#define MSTAT_REG_TX_STAT_GTXPOK_LO 0 +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3) +#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0) +#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0) +#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0) +#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9) +#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15) +#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS (0xf<<18) +/* [RW 1] Input enable for RX_BMAC0 IF */ +#define NIG_REG_BMAC0_IN_EN 0x100ac +/* [RW 1] output enable for TX_BMAC0 IF */ +#define NIG_REG_BMAC0_OUT_EN 0x100e0 +/* [RW 1] output enable for TX BMAC pause port 0 IF */ +#define NIG_REG_BMAC0_PAUSE_OUT_EN 0x10110 +/* [RW 1] output enable for RX_BMAC0_REGS IF */ +#define NIG_REG_BMAC0_REGS_OUT_EN 0x100e8 +/* [RW 1] output enable for RX BRB1 port0 IF */ +#define NIG_REG_BRB0_OUT_EN 0x100f8 +/* [RW 1] Input enable for TX BRB1 pause port 0 IF */ +#define NIG_REG_BRB0_PAUSE_IN_EN 0x100c4 +/* [RW 1] output enable for RX BRB1 port1 IF */ +#define NIG_REG_BRB1_OUT_EN 0x100fc +/* [RW 1] Input enable for TX BRB1 pause port 1 IF */ +#define NIG_REG_BRB1_PAUSE_IN_EN 0x100c8 +/* [RW 1] output enable for RX BRB1 LP IF */ +#define NIG_REG_BRB_LB_OUT_EN 0x10100 +/* [WB_W 82] Debug packet to LP from RBC; Data spelling:[63:0] data; 64] + error; [67:65]eop_bvalid; [68]eop; [69]sop; [70]port_id; 71]flush; + 72:73]-vnic_num; 81:74]-sideband_info */ +#define NIG_REG_DEBUG_PACKET_LB 0x10800 +/* [RW 1] Input enable for TX Debug packet */ +#define NIG_REG_EGRESS_DEBUG_IN_EN 0x100dc +/* [RW 1] If 1 - egress drain mode for port0 is active. In this mode all + packets from PBFare not forwarded to the MAC and just deleted from FIFO. + First packet may be deleted from the middle. And last packet will be + always deleted till the end. */ +#define NIG_REG_EGRESS_DRAIN0_MODE 0x10060 +/* [RW 1] Output enable to EMAC0 */ +#define NIG_REG_EGRESS_EMAC0_OUT_EN 0x10120 +/* [RW 1] MAC configuration for packets of port0. If 1 - all packet outputs + to emac for port0; other way to bmac for port0 */ +#define NIG_REG_EGRESS_EMAC0_PORT 0x10058 +/* [RW 1] Input enable for TX PBF user packet port0 IF */ +#define NIG_REG_EGRESS_PBF0_IN_EN 0x100cc +/* [RW 1] Input enable for TX PBF user packet port1 IF */ +#define NIG_REG_EGRESS_PBF1_IN_EN 0x100d0 +/* [RW 1] Input enable for TX UMP management packet port0 IF */ +#define NIG_REG_EGRESS_UMP0_IN_EN 0x100d4 +/* [RW 1] Input enable for RX_EMAC0 IF */ +#define NIG_REG_EMAC0_IN_EN 0x100a4 +/* [RW 1] output enable for TX EMAC pause port 0 IF */ +#define NIG_REG_EMAC0_PAUSE_OUT_EN 0x10118 +/* [R 1] status from emac0. This bit is set when MDINT from either the + EXT_MDINT pin or from the Copper PHY is driven low. This condition must + be cleared in the attached PHY device that is driving the MINT pin. */ +#define NIG_REG_EMAC0_STATUS_MISC_MI_INT 0x10494 +/* [WB 48] This address space contains BMAC0 registers. The BMAC registers + are described in appendix A. In order to access the BMAC0 registers; the + base address; NIG_REGISTERS_INGRESS_BMAC0_MEM; Offset: 0x10c00; should be + added to each BMAC register offset */ +#define NIG_REG_INGRESS_BMAC0_MEM 0x10c00 +/* [WB 48] This address space contains BMAC1 registers. The BMAC registers + are described in appendix A. In order to access the BMAC0 registers; the + base address; NIG_REGISTERS_INGRESS_BMAC1_MEM; Offset: 0x11000; should be + added to each BMAC register offset */ +#define NIG_REG_INGRESS_BMAC1_MEM 0x11000 +/* [R 1] FIFO empty in EOP descriptor FIFO of LP in NIG_RX_EOP */ +#define NIG_REG_INGRESS_EOP_LB_EMPTY 0x104e0 +/* [RW 17] Debug only. RX_EOP_DSCR_lb_FIFO in NIG_RX_EOP. Data + packet_length[13:0]; mac_error[14]; trunc_error[15]; parity[16] */ +#define NIG_REG_INGRESS_EOP_LB_FIFO 0x104e4 +/* [RW 27] 0 - must be active for Everest A0; 1- for Everest B0 when latch + logic for interrupts must be used. Enable per bit of interrupt of + ~latch_status.latch_status */ +#define NIG_REG_LATCH_BC_0 0x16210 +/* [RW 27] Latch for each interrupt from Unicore.b[0] + status_emac0_misc_mi_int; b[1] status_emac0_misc_mi_complete; + b[2]status_emac0_misc_cfg_change; b[3]status_emac0_misc_link_status; + b[4]status_emac0_misc_link_change; b[5]status_emac0_misc_attn; + b[6]status_serdes0_mac_crs; b[7]status_serdes0_autoneg_complete; + b[8]status_serdes0_fiber_rxact; b[9]status_serdes0_link_status; + b[10]status_serdes0_mr_page_rx; b[11]status_serdes0_cl73_an_complete; + b[12]status_serdes0_cl73_mr_page_rx; b[13]status_serdes0_rx_sigdet; + b[14]status_xgxs0_remotemdioreq; b[15]status_xgxs0_link10g; + b[16]status_xgxs0_autoneg_complete; b[17]status_xgxs0_fiber_rxact; + b[21:18]status_xgxs0_link_status; b[22]status_xgxs0_mr_page_rx; + b[23]status_xgxs0_cl73_an_complete; b[24]status_xgxs0_cl73_mr_page_rx; + b[25]status_xgxs0_rx_sigdet; b[26]status_xgxs0_mac_crs */ +#define NIG_REG_LATCH_STATUS_0 0x18000 +/* [RW 1] led 10g for port 0 */ +#define NIG_REG_LED_10G_P0 0x10320 +/* [RW 1] led 10g for port 1 */ +#define NIG_REG_LED_10G_P1 0x10324 +/* [RW 1] Port0: This bit is set to enable the use of the + ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 field + defined below. If this bit is cleared; then the blink rate will be about + 8Hz. */ +#define NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 0x10318 +/* [RW 12] Port0: Specifies the period of each blink cycle (on + off) for + Traffic LED in milliseconds. Must be a non-zero value. This 12-bit field + is reset to 0x080; giving a default blink period of approximately 8Hz. */ +#define NIG_REG_LED_CONTROL_BLINK_RATE_P0 0x10310 +/* [RW 1] Port0: If set along with the + ~nig_registers_led_control_override_traffic_p0.led_control_override_traffic_p0 + bit and ~nig_registers_led_control_traffic_p0.led_control_traffic_p0 LED + bit; the Traffic LED will blink with the blink rate specified in + ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and + ~nig_registers_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0 + fields. */ +#define NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 0x10308 +/* [RW 1] Port0: If set overrides hardware control of the Traffic LED. The + Traffic LED will then be controlled via bit ~nig_registers_ + led_control_traffic_p0.led_control_traffic_p0 and bit + ~nig_registers_led_control_blink_traffic_p0.led_control_blink_traffic_p0 */ +#define NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 0x102f8 +/* [RW 1] Port0: If set along with the led_control_override_trafic_p0 bit; + turns on the Traffic LED. If the led_control_blink_traffic_p0 bit is also + set; the LED will blink with blink rate specified in + ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and + ~nig_regsters_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0 + fields. */ +#define NIG_REG_LED_CONTROL_TRAFFIC_P0 0x10300 +/* [RW 4] led mode for port0: 0 MAC; 1-3 PHY1; 4 MAC2; 5-7 PHY4; 8-MAC3; + 9-11PHY7; 12 MAC4; 13-15 PHY10; */ +#define NIG_REG_LED_MODE_P0 0x102f0 +/* [RW 3] for port0 enable for llfc ppp and pause. b0 - brb1 enable; b1- + tsdm enable; b2- usdm enable */ +#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 0x16070 +#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 0x16074 +/* [RW 1] SAFC enable for port0. This register may get 1 only when + ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same + port */ +#define NIG_REG_LLFC_ENABLE_0 0x16208 +#define NIG_REG_LLFC_ENABLE_1 0x1620c +/* [RW 16] classes are high-priority for port0 */ +#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 0x16058 +#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 0x1605c +/* [RW 16] classes are low-priority for port0 */ +#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 0x16060 +#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 0x16064 +/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */ +#define NIG_REG_LLFC_OUT_EN_0 0x160c8 +#define NIG_REG_LLFC_OUT_EN_1 0x160cc +#define NIG_REG_LLH0_ACPI_PAT_0_CRC 0x1015c +#define NIG_REG_LLH0_ACPI_PAT_6_LEN 0x10154 +#define NIG_REG_LLH0_BRB1_DRV_MASK 0x10244 +#define NIG_REG_LLH0_BRB1_DRV_MASK_MF 0x16048 +/* [RW 1] send to BRB1 if no match on any of RMP rules. */ +#define NIG_REG_LLH0_BRB1_NOT_MCP 0x1025c +/* [RW 2] Determine the classification participants. 0: no classification.1: + classification upon VLAN id. 2: classification upon MAC address. 3: + classification upon both VLAN id & MAC addr. */ +#define NIG_REG_LLH0_CLS_TYPE 0x16080 +/* [RW 32] cm header for llh0 */ +#define NIG_REG_LLH0_CM_HEADER 0x1007c +#define NIG_REG_LLH0_DEST_IP_0_1 0x101dc +#define NIG_REG_LLH0_DEST_MAC_0_0 0x101c0 +/* [RW 16] destination TCP address 1. The LLH will look for this address in + all incoming packets. */ +#define NIG_REG_LLH0_DEST_TCP_0 0x10220 +/* [RW 16] destination UDP address 1 The LLH will look for this address in + all incoming packets. */ +#define NIG_REG_LLH0_DEST_UDP_0 0x10214 +#define NIG_REG_LLH0_ERROR_MASK 0x1008c +/* [RW 8] event id for llh0 */ +#define NIG_REG_LLH0_EVENT_ID 0x10084 +#define NIG_REG_LLH0_FUNC_EN 0x160fc +#define NIG_REG_LLH0_FUNC_MEM 0x16180 +#define NIG_REG_LLH0_FUNC_MEM_ENABLE 0x16140 +#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100 +/* [RW 1] Determine the IP version to look for in + ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */ +#define NIG_REG_LLH0_IPV4_IPV6_0 0x10208 +/* [RW 1] t bit for llh0 */ +#define NIG_REG_LLH0_T_BIT 0x10074 +/* [RW 12] VLAN ID 1. In case of VLAN packet the LLH will look for this ID. */ +#define NIG_REG_LLH0_VLAN_ID_0 0x1022c +/* [RW 8] init credit counter for port0 in LLH */ +#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554 +#define NIG_REG_LLH0_XCM_MASK 0x10130 +#define NIG_REG_LLH1_BRB1_DRV_MASK 0x10248 +/* [RW 1] send to BRB1 if no match on any of RMP rules. */ +#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc +/* [RW 2] Determine the classification participants. 0: no classification.1: + classification upon VLAN id. 2: classification upon MAC address. 3: + classification upon both VLAN id & MAC addr. */ +#define NIG_REG_LLH1_CLS_TYPE 0x16084 +/* [RW 32] cm header for llh1 */ +#define NIG_REG_LLH1_CM_HEADER 0x10080 +#define NIG_REG_LLH1_ERROR_MASK 0x10090 +/* [RW 8] event id for llh1 */ +#define NIG_REG_LLH1_EVENT_ID 0x10088 +#define NIG_REG_LLH1_FUNC_EN 0x16104 +#define NIG_REG_LLH1_FUNC_MEM 0x161c0 +#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160 +#define NIG_REG_LLH1_FUNC_MEM_SIZE 16 +/* [RW 1] When this bit is set; the LLH will classify the packet before + * sending it to the BRB or calculating WoL on it. This bit controls port 1 + * only. The legacy llh_multi_function_mode bit controls port 0. */ +#define NIG_REG_LLH1_MF_MODE 0x18614 +/* [RW 8] init credit counter for port1 in LLH */ +#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564 +#define NIG_REG_LLH1_XCM_MASK 0x10134 +/* [RW 1] When this bit is set; the LLH will expect all packets to be with + e1hov */ +#define NIG_REG_LLH_E1HOV_MODE 0x160d8 +/* [RW 1] When this bit is set; the LLH will classify the packet before + sending it to the BRB or calculating WoL on it. */ +#define NIG_REG_LLH_MF_MODE 0x16024 +#define NIG_REG_MASK_INTERRUPT_PORT0 0x10330 +#define NIG_REG_MASK_INTERRUPT_PORT1 0x10334 +/* [RW 1] Output signal from NIG to EMAC0. When set enables the EMAC0 block. */ +#define NIG_REG_NIG_EMAC0_EN 0x1003c +/* [RW 1] Output signal from NIG to EMAC1. When set enables the EMAC1 block. */ +#define NIG_REG_NIG_EMAC1_EN 0x10040 +/* [RW 1] Output signal from NIG to TX_EMAC0. When set indicates to the + EMAC0 to strip the CRC from the ingress packets. */ +#define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC 0x10044 +/* [R 32] Interrupt register #0 read */ +#define NIG_REG_NIG_INT_STS_0 0x103b0 +#define NIG_REG_NIG_INT_STS_1 0x103c0 +/* [RC 32] Interrupt register #0 read clear */ +#define NIG_REG_NIG_INT_STS_CLR_0 0x103b4 +/* [R 32] Legacy E1 and E1H location for parity error mask register. */ +#define NIG_REG_NIG_PRTY_MASK 0x103dc +/* [RW 32] Parity mask register #0 read/write */ +#define NIG_REG_NIG_PRTY_MASK_0 0x183c8 +#define NIG_REG_NIG_PRTY_MASK_1 0x183d8 +/* [R 32] Legacy E1 and E1H location for parity error status register. */ +#define NIG_REG_NIG_PRTY_STS 0x103d0 +/* [R 32] Parity register #0 read */ +#define NIG_REG_NIG_PRTY_STS_0 0x183bc +#define NIG_REG_NIG_PRTY_STS_1 0x183cc +/* [R 32] Legacy E1 and E1H location for parity error status clear register. */ +#define NIG_REG_NIG_PRTY_STS_CLR 0x103d4 +/* [RC 32] Parity register #0 read clear */ +#define NIG_REG_NIG_PRTY_STS_CLR_0 0x183c0 +#define NIG_REG_NIG_PRTY_STS_CLR_1 0x183d0 +#define MCPR_IMC_COMMAND_ENABLE (1L<<31) +#define MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT 16 +#define MCPR_IMC_COMMAND_OPERATION_BITSHIFT 28 +#define MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT 8 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header. */ +#define NIG_REG_P0_HDRS_AFTER_BASIC 0x18038 +/* [RW 1] HW PFC enable bit. Set this bit to enable the PFC functionality in + * the NIG. Other flow control modes such as PAUSE and SAFC/LLFC should be + * disabled when this bit is set. */ +#define NIG_REG_P0_HWPFC_ENABLE 0x18078 +#define NIG_REG_P0_LLH_FUNC_MEM2 0x18480 +#define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440 +/* [RW 17] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. Bits [15:0] return the sequence ID of the packet. Bit 16 + * indicates the validity of the data in the buffer. Writing a 1 to bit 16 + * will clear the buffer. + */ +#define NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID 0x1875c +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. This location returns the lower 32 bits of timestamp value. + */ +#define NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB 0x18754 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. This location returns the upper 32 bits of timestamp value. + */ +#define NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB 0x18758 +/* [RW 11] Mask register for the various parameters used in determining PTP + * packet presence. Set each bit to 1 to mask out the particular parameter. + * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of + * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP + * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC + * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of + * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable + * MAC DA 2. The reset default is set to mask out all parameters. + */ +#define NIG_REG_P0_LLH_PTP_PARAM_MASK 0x187a0 +/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set + * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . + * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP + * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; + * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC + * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype + * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset + * default is to mask out all of the rules. Note that rules 0-3 are for IPv4 + * packets only and require that the packet is IPv4 for the rules to match. + * Note that rules 4-7 are for IPv6 packets only and require that the packet + * is IPv6 for the rules to match. + */ +#define NIG_REG_P0_LLH_PTP_RULE_MASK 0x187a4 +/* [RW 1] Set to 1 to enable PTP packets to be forwarded to the host. */ +#define NIG_REG_P0_LLH_PTP_TO_HOST 0x187ac +/* [RW 1] Input enable for RX MAC interface. */ +#define NIG_REG_P0_MAC_IN_EN 0x185ac +/* [RW 1] Output enable for TX MAC interface */ +#define NIG_REG_P0_MAC_OUT_EN 0x185b0 +/* [RW 1] Output enable for TX PAUSE signal to the MAC. */ +#define NIG_REG_P0_MAC_PAUSE_OUT_EN 0x185b4 +/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for + * future expansion) each priorty is to be mapped to. Bits 3:0 specify the + * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit + * priority field is extracted from the outer-most VLAN in receive packet. + * Only COS 0 and COS 1 are supported in E2. */ +#define NIG_REG_P0_PKT_PRIORITY_TO_COS 0x18054 +/* [RW 6] Enable for TimeSync feature. Bits [2:0] are for RX side. Bits + * [5:3] are for TX side. Bit 0 enables TimeSync on RX side. Bit 1 enables + * V1 frame format in timesync event detection on RX side. Bit 2 enables V2 + * frame format in timesync event detection on RX side. Bit 3 enables + * TimeSync on TX side. Bit 4 enables V1 frame format in timesync event + * detection on TX side. Bit 5 enables V2 frame format in timesync event + * detection on TX side. Note that for HW to detect PTP packet and extract + * data from the packet, at least one of the version bits of that traffic + * direction has to be enabled. + */ +#define NIG_REG_P0_PTP_EN 0x18788 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A + * priority is mapped to COS 0 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS0_PRIORITY_MASK 0x18058 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A + * priority is mapped to COS 1 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A + * priority is mapped to COS 2 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS2_PRIORITY_MASK 0x186b0 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 3. A + * priority is mapped to COS 3 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS3_PRIORITY_MASK 0x186b4 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 4. A + * priority is mapped to COS 4 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS4_PRIORITY_MASK 0x186b8 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 5. A + * priority is mapped to COS 5 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS5_PRIORITY_MASK 0x186bc +/* [R 1] RX FIFO for receiving data from MAC is empty. */ +/* [RW 15] Specify which of the credit registers the client is to be mapped + * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For + * clients that are not subject to WFQ credit blocking - their + * specifications here are not used. */ +#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP 0x180f0 +/* [RW 32] Specify which of the credit registers the client is to be mapped + * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are + * for client 0; bits [35:32] are for client 8. For clients that are not + * subject to WFQ credit blocking - their specifications here are not used. + * This is a new register (with 2_) added in E3 B0 to accommodate the 9 + * input clients to ETS arbiter. The reset default is set for management and + * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to + * use credit registers 0-5 respectively (0x543210876). Note that credit + * registers can not be shared between clients. */ +#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x18688 +/* [RW 4] Specify which of the credit registers the client is to be mapped + * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are + * for client 0; bits [35:32] are for client 8. For clients that are not + * subject to WFQ credit blocking - their specifications here are not used. + * This is a new register (with 2_) added in E3 B0 to accommodate the 9 + * input clients to ETS arbiter. The reset default is set for management and + * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to + * use credit registers 0-5 respectively (0x543210876). Note that credit + * registers can not be shared between clients. */ +#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x1868c +/* [RW 5] Specify whether the client competes directly in the strict + * priority arbiter. The bits are mapped according to client ID (client IDs + * are defined in tx_arb_priority_client). Default value is set to enable + * strict priorities for clients 0-2 -- management and debug traffic. */ +#define NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT 0x180e8 +/* [RW 5] Specify whether the client is subject to WFQ credit blocking. The + * bits are mapped according to client ID (client IDs are defined in + * tx_arb_priority_client). Default value is 0 for not using WFQ credit + * blocking. */ +#define NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x180ec +/* [RW 32] Specify the upper bound that credit register 0 is allowed to + * reach. */ +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 0x1810c +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 0x18110 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2 0x18114 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3 0x18118 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4 0x1811c +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5 0x186a0 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6 0x186a4 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7 0x186a8 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8 0x186ac +/* [RW 32] Specify the weight (in bytes) to be added to credit register 0 + * when it is time to increment. */ +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 0x180f8 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 0x180fc +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2 0x18100 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3 0x18104 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4 0x18108 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5 0x18690 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6 0x18694 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7 0x18698 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8 0x1869c +/* [RW 12] Specify the number of strict priority arbitration slots between + * two round-robin arbitration slots to avoid starvation. A value of 0 means + * no strict priority cycles - the strict priority with anti-starvation + * arbiter becomes a round-robin arbiter. */ +#define NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS 0x180f4 +/* [RW 15] Specify the client number to be assigned to each priority of the + * strict priority arbiter. Priority 0 is the highest priority. Bits [2:0] + * are for priority 0 client; bits [14:12] are for priority 4 client. The + * clients are assigned the following IDs: 0-management; 1-debug traffic + * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 + * traffic. The reset value[14:0] is set to 0x4688 (15'b100_011_010_001_000) + * for management at priority 0; debug traffic at priorities 1 and 2; COS0 + * traffic at priority 3; and COS1 traffic at priority 4. */ +#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header. */ +#define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c +#define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0 +#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460a +/* [RW 17] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. Bits [15:0] return the sequence ID of the packet. Bit 16 + * indicates the validity of the data in the buffer. Writing a 1 to bit 16 + * will clear the buffer. + */ +#define NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID 0x18774 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. This location returns the lower 32 bits of timestamp value. + */ +#define NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB 0x1876c +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. This location returns the upper 32 bits of timestamp value. + */ +#define NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB 0x18770 +/* [RW 11] Mask register for the various parameters used in determining PTP + * packet presence. Set each bit to 1 to mask out the particular parameter. + * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of + * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP + * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC + * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of + * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable + * MAC DA 2. The reset default is set to mask out all parameters. + */ +#define NIG_REG_P1_LLH_PTP_PARAM_MASK 0x187c8 +/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set + * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . + * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP + * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; + * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC + * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype + * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset + * default is to mask out all of the rules. Note that rules 0-3 are for IPv4 + * packets only and require that the packet is IPv4 for the rules to match. + * Note that rules 4-7 are for IPv6 packets only and require that the packet + * is IPv6 for the rules to match. + */ +#define NIG_REG_P1_LLH_PTP_RULE_MASK 0x187cc +/* [RW 1] Set to 1 to enable PTP packets to be forwarded to the host. */ +#define NIG_REG_P1_LLH_PTP_TO_HOST 0x187d4 +/* [RW 32] Specify the client number to be assigned to each priority of the + * strict priority arbiter. This register specifies bits 31:0 of the 36-bit + * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 + * client; bits [35-32] are for priority 8 client. The clients are assigned + * the following IDs: 0-management; 1-debug traffic from this port; 2-debug + * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; + * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is + * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to + * accommodate the 9 input clients to ETS arbiter. */ +#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB 0x18680 +/* [RW 4] Specify the client number to be assigned to each priority of the + * strict priority arbiter. This register specifies bits 35:32 of the 36-bit + * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 + * client; bits [35-32] are for priority 8 client. The clients are assigned + * the following IDs: 0-management; 1-debug traffic from this port; 2-debug + * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; + * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is + * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to + * accommodate the 9 input clients to ETS arbiter. */ +#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684 +/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP + * packets to BRB LB interface to forward the packet to the host. All + * packets from MCP are forwarded to the network when this bit is cleared - + * regardless of the configured destination in tx_mng_destination register. + * When MCP-to-host paths for both ports 0 and 1 are disabled - the arbiter + * for BRB LB interface is bypassed and PBF LB traffic is always selected to + * send to BRB LB. + */ +#define NIG_REG_P0_TX_MNG_HOST_ENABLE 0x182f4 +#define NIG_REG_P1_HWPFC_ENABLE 0x181d0 +#define NIG_REG_P1_MAC_IN_EN 0x185c0 +/* [RW 1] Output enable for TX MAC interface */ +#define NIG_REG_P1_MAC_OUT_EN 0x185c4 +/* [RW 1] Output enable for TX PAUSE signal to the MAC. */ +#define NIG_REG_P1_MAC_PAUSE_OUT_EN 0x185c8 +/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for + * future expansion) each priorty is to be mapped to. Bits 3:0 specify the + * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit + * priority field is extracted from the outer-most VLAN in receive packet. + * Only COS 0 and COS 1 are supported in E2. */ +#define NIG_REG_P1_PKT_PRIORITY_TO_COS 0x181a8 +/* [RW 6] Enable for TimeSync feature. Bits [2:0] are for RX side. Bits + * [5:3] are for TX side. Bit 0 enables TimeSync on RX side. Bit 1 enables + * V1 frame format in timesync event detection on RX side. Bit 2 enables V2 + * frame format in timesync event detection on RX side. Bit 3 enables + * TimeSync on TX side. Bit 4 enables V1 frame format in timesync event + * detection on TX side. Bit 5 enables V2 frame format in timesync event + * detection on TX side. Note that for HW to detect PTP packet and extract + * data from the packet, at least one of the version bits of that traffic + * direction has to be enabled. + */ +#define NIG_REG_P1_PTP_EN 0x187b0 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A + * priority is mapped to COS 0 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P1_RX_COS0_PRIORITY_MASK 0x181ac +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A + * priority is mapped to COS 1 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A + * priority is mapped to COS 2 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P1_RX_COS2_PRIORITY_MASK 0x186f8 +/* [R 1] RX FIFO for receiving data from MAC is empty. */ +#define NIG_REG_P1_RX_MACFIFO_EMPTY 0x1858c +/* [R 1] TLLH FIFO is empty. */ +#define NIG_REG_P1_TLLH_FIFO_EMPTY 0x18338 +/* [RW 19] Packet TimeSync information that is buffered in 1-deep FIFOs for + * TX side. Bits [15:0] reflect the sequence ID of the packet. Bit 16 + * indicates the validity of the data in the buffer. Bit 17 indicates that + * the sequence ID is valid and it is waiting for the TX timestamp value. + * Bit 18 indicates whether the timestamp is from a SW request (value of 1) + * or HW request (value of 0). Writing a 1 to bit 16 will clear the buffer. + */ +#define NIG_REG_P0_TLLH_PTP_BUF_SEQID 0x187e0 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * MCP. This location returns the lower 32 bits of timestamp value. + */ +#define NIG_REG_P0_TLLH_PTP_BUF_TS_LSB 0x187d8 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * MCP. This location returns the upper 32 bits of timestamp value. + */ +#define NIG_REG_P0_TLLH_PTP_BUF_TS_MSB 0x187dc +/* [RW 11] Mask register for the various parameters used in determining PTP + * packet presence. Set each bit to 1 to mask out the particular parameter. + * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of + * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP + * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC + * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of + * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable + * MAC DA 2. The reset default is set to mask out all parameters. + */ +#define NIG_REG_P0_TLLH_PTP_PARAM_MASK 0x187f0 +/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set + * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . + * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP + * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; + * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC + * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype + * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset + * default is to mask out all of the rules. + */ +#define NIG_REG_P0_TLLH_PTP_RULE_MASK 0x187f4 +/* [RW 19] Packet TimeSync information that is buffered in 1-deep FIFOs for + * TX side. Bits [15:0] reflect the sequence ID of the packet. Bit 16 + * indicates the validity of the data in the buffer. Bit 17 indicates that + * the sequence ID is valid and it is waiting for the TX timestamp value. + * Bit 18 indicates whether the timestamp is from a SW request (value of 1) + * or HW request (value of 0). Writing a 1 to bit 16 will clear the buffer. + */ +#define NIG_REG_P1_TLLH_PTP_BUF_SEQID 0x187ec +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * MCP. This location returns the lower 32 bits of timestamp value. + */ +#define NIG_REG_P1_TLLH_PTP_BUF_TS_LSB 0x187e4 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * MCP. This location returns the upper 32 bits of timestamp value. + */ +#define NIG_REG_P1_TLLH_PTP_BUF_TS_MSB 0x187e8 +/* [RW 11] Mask register for the various parameters used in determining PTP + * packet presence. Set each bit to 1 to mask out the particular parameter. + * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of + * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP + * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC + * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of + * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable + * MAC DA 2. The reset default is set to mask out all parameters. + */ +#define NIG_REG_P1_TLLH_PTP_PARAM_MASK 0x187f8 +/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set + * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . + * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP + * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; + * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC + * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype + * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset + * default is to mask out all of the rules. + */ +#define NIG_REG_P1_TLLH_PTP_RULE_MASK 0x187fc +/* [RW 32] Specify which of the credit registers the client is to be mapped + * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are + * for client 0; bits [35:32] are for client 8. For clients that are not + * subject to WFQ credit blocking - their specifications here are not used. + * This is a new register (with 2_) added in E3 B0 to accommodate the 9 + * input clients to ETS arbiter. The reset default is set for management and + * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to + * use credit registers 0-5 respectively (0x543210876). Note that credit + * registers can not be shared between clients. Note also that there are + * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only + * credit registers 0-5 are valid. This register should be configured + * appropriately before enabling WFQ. */ +#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x186e8 +/* [RW 4] Specify which of the credit registers the client is to be mapped + * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are + * for client 0; bits [35:32] are for client 8. For clients that are not + * subject to WFQ credit blocking - their specifications here are not used. + * This is a new register (with 2_) added in E3 B0 to accommodate the 9 + * input clients to ETS arbiter. The reset default is set for management and + * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to + * use credit registers 0-5 respectively (0x543210876). Note that credit + * registers can not be shared between clients. Note also that there are + * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only + * credit registers 0-5 are valid. This register should be configured + * appropriately before enabling WFQ. */ +#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x186ec +/* [RW 9] Specify whether the client competes directly in the strict + * priority arbiter. The bits are mapped according to client ID (client IDs + * are defined in tx_arb_priority_client2): 0-management; 1-debug traffic + * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 + * traffic; 5-COS2 traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. + * Default value is set to enable strict priorities for all clients. */ +#define NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT 0x18234 +/* [RW 9] Specify whether the client is subject to WFQ credit blocking. The + * bits are mapped according to client ID (client IDs are defined in + * tx_arb_priority_client2): 0-management; 1-debug traffic from this port; + * 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 + * traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. Default value is + * 0 for not using WFQ credit blocking. */ +#define NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x18238 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 0x18258 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 0x1825c +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 0x18260 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 0x18264 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 0x18268 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 0x186f4 +/* [RW 32] Specify the weight (in bytes) to be added to credit register 0 + * when it is time to increment. */ +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 0x18244 +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 0x18248 +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 0x1824c +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 0x18250 +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 0x18254 +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 0x186f0 +/* [RW 12] Specify the number of strict priority arbitration slots between + two round-robin arbitration slots to avoid starvation. A value of 0 means + no strict priority cycles - the strict priority with anti-starvation + arbiter becomes a round-robin arbiter. */ +#define NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS 0x18240 +/* [RW 32] Specify the client number to be assigned to each priority of the + strict priority arbiter. This register specifies bits 31:0 of the 36-bit + value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 + client; bits [35-32] are for priority 8 client. The clients are assigned + the following IDs: 0-management; 1-debug traffic from this port; 2-debug + traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; + 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is + set to 0x345678021. This is a new register (with 2_) added in E3 B0 to + accommodate the 9 input clients to ETS arbiter. Note that this register + is the same as the one for port 0, except that port 1 only has COS 0-2 + traffic. There is no traffic for COS 3-5 of port 1. */ +#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB 0x186e0 +/* [RW 4] Specify the client number to be assigned to each priority of the + strict priority arbiter. This register specifies bits 35:32 of the 36-bit + value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 + client; bits [35-32] are for priority 8 client. The clients are assigned + the following IDs: 0-management; 1-debug traffic from this port; 2-debug + traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; + 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is + set to 0x345678021. This is a new register (with 2_) added in E3 B0 to + accommodate the 9 input clients to ETS arbiter. Note that this register + is the same as the one for port 0, except that port 1 only has COS 0-2 + traffic. There is no traffic for COS 3-5 of port 1. */ +#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4 +/* [R 1] TX FIFO for transmitting data to MAC is empty. */ +#define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594 +/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP + * packets to BRB LB interface to forward the packet to the host. All + * packets from MCP are forwarded to the network when this bit is cleared - + * regardless of the configured destination in tx_mng_destination register. + */ +#define NIG_REG_P1_TX_MNG_HOST_ENABLE 0x182f8 +/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets + forwarded to the host. */ +#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8 +/* [RW 32] Specify the upper bound that credit register 0 is allowed to + * reach. */ +/* [RW 1] Pause enable for port0. This register may get 1 only when + ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same + port */ +#define NIG_REG_PAUSE_ENABLE_0 0x160c0 +#define NIG_REG_PAUSE_ENABLE_1 0x160c4 +/* [RW 1] Input enable for RX PBF LP IF */ +#define NIG_REG_PBF_LB_IN_EN 0x100b4 +/* [RW 1] Value of this register will be transmitted to port swap when + ~nig_registers_strap_override.strap_override =1 */ +#define NIG_REG_PORT_SWAP 0x10394 +/* [RW 1] PPP enable for port0. This register may get 1 only when + * ~safc_enable.safc_enable = 0 and pause_enable.pause_enable =0 for the + * same port */ +#define NIG_REG_PPP_ENABLE_0 0x160b0 +#define NIG_REG_PPP_ENABLE_1 0x160b4 +/* [RW 1] output enable for RX parser descriptor IF */ +#define NIG_REG_PRS_EOP_OUT_EN 0x10104 +/* [RW 1] Input enable for RX parser request IF */ +#define NIG_REG_PRS_REQ_IN_EN 0x100b8 +/* [RW 5] control to serdes - CL45 DEVAD */ +#define NIG_REG_SERDES0_CTRL_MD_DEVAD 0x10370 +/* [RW 1] control to serdes; 0 - clause 45; 1 - clause 22 */ +#define NIG_REG_SERDES0_CTRL_MD_ST 0x1036c +/* [RW 5] control to serdes - CL22 PHY_ADD and CL45 PRTAD */ +#define NIG_REG_SERDES0_CTRL_PHY_ADDR 0x10374 +/* [R 1] status from serdes0 that inputs to interrupt logic of link status */ +#define NIG_REG_SERDES0_STATUS_LINK_STATUS 0x10578 +/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure + for port0 */ +#define NIG_REG_STAT0_BRB_DISCARD 0x105f0 +/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure + for port0 */ +#define NIG_REG_STAT0_BRB_TRUNCATE 0x105f8 +/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that + between 1024 and 1522 bytes for port0 */ +#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750 +/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that + between 1523 bytes and above for port0 */ +#define NIG_REG_STAT0_EGRESS_MAC_PKT1 0x10760 +/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure + for port1 */ +#define NIG_REG_STAT1_BRB_DISCARD 0x10628 +/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that + between 1024 and 1522 bytes for port1 */ +#define NIG_REG_STAT1_EGRESS_MAC_PKT0 0x107a0 +/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that + between 1523 bytes and above for port1 */ +#define NIG_REG_STAT1_EGRESS_MAC_PKT1 0x107b0 +/* [WB_R 64] Rx statistics : User octets received for LP */ +#define NIG_REG_STAT2_BRB_OCTET 0x107e0 +#define NIG_REG_STATUS_INTERRUPT_PORT0 0x10328 +#define NIG_REG_STATUS_INTERRUPT_PORT1 0x1032c +/* [RW 1] port swap mux selection. If this register equal to 0 then port + swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then + ort swap is equal to ~nig_registers_port_swap.port_swap */ +#define NIG_REG_STRAP_OVERRIDE 0x10398 +/* [WB 64] Addresses for TimeSync related registers in the timesync + * generator sub-module. + */ +#define NIG_REG_TIMESYNC_GEN_REG 0x18800 +/* [RW 1] output enable for RX_XCM0 IF */ +#define NIG_REG_XCM0_OUT_EN 0x100f0 +/* [RW 1] output enable for RX_XCM1 IF */ +#define NIG_REG_XCM1_OUT_EN 0x100f4 +/* [RW 1] control to xgxs - remote PHY in-band MDIO */ +#define NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST 0x10348 +/* [RW 5] control to xgxs - CL45 DEVAD */ +#define NIG_REG_XGXS0_CTRL_MD_DEVAD 0x1033c +/* [RW 1] control to xgxs; 0 - clause 45; 1 - clause 22 */ +#define NIG_REG_XGXS0_CTRL_MD_ST 0x10338 +/* [RW 5] control to xgxs - CL22 PHY_ADD and CL45 PRTAD */ +#define NIG_REG_XGXS0_CTRL_PHY_ADDR 0x10340 +/* [R 1] status from xgxs0 that inputs to interrupt logic of link10g. */ +#define NIG_REG_XGXS0_STATUS_LINK10G 0x10680 +/* [R 4] status from xgxs0 that inputs to interrupt logic of link status */ +#define NIG_REG_XGXS0_STATUS_LINK_STATUS 0x10684 +/* [RW 2] selection for XGXS lane of port 0 in NIG_MUX block */ +#define NIG_REG_XGXS_LANE_SEL_P0 0x102e8 +/* [RW 1] selection for port0 for NIG_MUX block : 0 = SerDes; 1 = XGXS */ +#define NIG_REG_XGXS_SERDES0_MODE_SEL 0x102e0 +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT (0x1<<0) +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS (0x1<<9) +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G (0x1<<15) +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS (0xf<<18) +#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18 +/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */ +#define PBF_REG_COS0_UPPER_BOUND 0x15c05c +/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter + * of port 0. */ +#define PBF_REG_COS0_UPPER_BOUND_P0 0x15c2cc +/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter + * of port 1. */ +#define PBF_REG_COS0_UPPER_BOUND_P1 0x15c2e4 +/* [RW 31] The weight of COS0 in the ETS command arbiter. */ +#define PBF_REG_COS0_WEIGHT 0x15c054 +/* [RW 31] The weight of COS0 in port 0 ETS command arbiter. */ +#define PBF_REG_COS0_WEIGHT_P0 0x15c2a8 +/* [RW 31] The weight of COS0 in port 1 ETS command arbiter. */ +#define PBF_REG_COS0_WEIGHT_P1 0x15c2c0 +/* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */ +#define PBF_REG_COS1_UPPER_BOUND 0x15c060 +/* [RW 31] The weight of COS1 in the ETS command arbiter. */ +#define PBF_REG_COS1_WEIGHT 0x15c058 +/* [RW 31] The weight of COS1 in port 0 ETS command arbiter. */ +#define PBF_REG_COS1_WEIGHT_P0 0x15c2ac +/* [RW 31] The weight of COS1 in port 1 ETS command arbiter. */ +#define PBF_REG_COS1_WEIGHT_P1 0x15c2c4 +/* [RW 31] The weight of COS2 in port 0 ETS command arbiter. */ +#define PBF_REG_COS2_WEIGHT_P0 0x15c2b0 +/* [RW 31] The weight of COS2 in port 1 ETS command arbiter. */ +#define PBF_REG_COS2_WEIGHT_P1 0x15c2c8 +/* [RW 31] The weight of COS3 in port 0 ETS command arbiter. */ +#define PBF_REG_COS3_WEIGHT_P0 0x15c2b4 +/* [RW 31] The weight of COS4 in port 0 ETS command arbiter. */ +#define PBF_REG_COS4_WEIGHT_P0 0x15c2b8 +/* [RW 31] The weight of COS5 in port 0 ETS command arbiter. */ +#define PBF_REG_COS5_WEIGHT_P0 0x15c2bc +/* [R 11] Current credit for the LB queue in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_CREDIT_LB_Q 0x140338 +/* [R 11] Current credit for queue 0 in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_CREDIT_Q0 0x14033c +/* [R 11] Current credit for queue 1 in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_CREDIT_Q1 0x140340 +/* [RW 1] Disable processing further tasks from port 0 (after ending the + current task in process). */ +#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c +/* [RW 1] Disable processing further tasks from port 1 (after ending the + current task in process). */ +#define PBF_REG_DISABLE_NEW_TASK_PROC_P1 0x140060 +/* [RW 1] Disable processing further tasks from port 4 (after ending the + current task in process). */ +#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c +#define PBF_REG_DISABLE_PF 0x1402e8 +#define PBF_REG_DISABLE_VF 0x1402ec +/* [RW 18] For port 0: For each client that is subject to WFQ (the + * corresponding bit is 1); indicates to which of the credit registers this + * client is mapped. For clients which are not credit blocked; their mapping + * is dont care. */ +#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0 0x15c288 +/* [RW 9] For port 1: For each client that is subject to WFQ (the + * corresponding bit is 1); indicates to which of the credit registers this + * client is mapped. For clients which are not credit blocked; their mapping + * is dont care. */ +#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1 0x15c28c +/* [RW 6] For port 0: Bit per client to indicate if the client competes in + * the strict priority arbiter directly (corresponding bit = 1); or first + * goes to the RR arbiter (corresponding bit = 0); and then competes in the + * lowest priority in the strict-priority arbiter. */ +#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 0x15c278 +/* [RW 3] For port 1: Bit per client to indicate if the client competes in + * the strict priority arbiter directly (corresponding bit = 1); or first + * goes to the RR arbiter (corresponding bit = 0); and then competes in the + * lowest priority in the strict-priority arbiter. */ +#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 0x15c27c +/* [RW 6] For port 0: Bit per client to indicate if the client is subject to + * WFQ credit blocking (corresponding bit = 1). */ +#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 0x15c280 +/* [RW 3] For port 0: Bit per client to indicate if the client is subject to + * WFQ credit blocking (corresponding bit = 1). */ +#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 0x15c284 +/* [RW 16] For port 0: The number of strict priority arbitration slots + * between 2 RR arbitration slots. A value of 0 means no strict priority + * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR + * arbiter. */ +#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 0x15c2a0 +/* [RW 16] For port 1: The number of strict priority arbitration slots + * between 2 RR arbitration slots. A value of 0 means no strict priority + * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR + * arbiter. */ +#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 0x15c2a4 +/* [RW 18] For port 0: Indicates which client is connected to each priority + * in the strict-priority arbiter. Priority 0 is the highest priority, and + * priority 5 is the lowest; to which the RR output is connected to (this is + * not configurable). */ +#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 0x15c270 +/* [RW 9] For port 1: Indicates which client is connected to each priority + * in the strict-priority arbiter. Priority 0 is the highest priority, and + * priority 5 is the lowest; to which the RR output is connected to (this is + * not configurable). */ +#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 0x15c274 +/* [RW 1] Indicates that ETS is performed between the COSes in the command + * arbiter. If reset strict priority w/ anti-starvation will be performed + * w/o WFQ. */ +#define PBF_REG_ETS_ENABLED 0x15c050 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header. */ +#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */ +#define PBF_REG_HDRS_AFTER_TAG_0 0x15c0b8 +/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest + * priority in the command arbiter. */ +#define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c +#define PBF_REG_IF_ENABLE_REG 0x140044 +/* [RW 1] Init bit. When set the initial credits are copied to the credit + registers (except the port credits). Should be set and then reset after + the configuration of the block has ended. */ +#define PBF_REG_INIT 0x140000 +/* [RW 11] Initial credit for the LB queue in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_INIT_CRD_LB_Q 0x15c248 +/* [RW 11] Initial credit for queue 0 in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_INIT_CRD_Q0 0x15c230 +/* [RW 11] Initial credit for queue 1 in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_INIT_CRD_Q1 0x15c234 +/* [RW 1] Init bit for port 0. When set the initial credit of port 0 is + copied to the credit register. Should be set and then reset after the + configuration of the port has ended. */ +#define PBF_REG_INIT_P0 0x140004 +/* [RW 1] Init bit for port 1. When set the initial credit of port 1 is + copied to the credit register. Should be set and then reset after the + configuration of the port has ended. */ +#define PBF_REG_INIT_P1 0x140008 +/* [RW 1] Init bit for port 4. When set the initial credit of port 4 is + copied to the credit register. Should be set and then reset after the + configuration of the port has ended. */ +#define PBF_REG_INIT_P4 0x14000c +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * the LB queue. Reset upon init. */ +#define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q 0x140354 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * queue 0. Reset upon init. */ +#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 0x140358 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * queue 1. Reset upon init. */ +#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 0x14035c +/* [RW 1] Enable for mac interface 0. */ +#define PBF_REG_MAC_IF0_ENABLE 0x140030 +/* [RW 1] Enable for mac interface 1. */ +#define PBF_REG_MAC_IF1_ENABLE 0x140034 +/* [RW 1] Enable for the loopback interface. */ +#define PBF_REG_MAC_LB_ENABLE 0x140040 +/* [RW 6] Bit-map indicating which headers must appear in the packet */ +#define PBF_REG_MUST_HAVE_HDRS 0x15c0c4 +/* [RW 16] The number of strict priority arbitration slots between 2 RR + * arbitration slots. A value of 0 means no strict priority cycles; i.e. the + * strict-priority w/ anti-starvation arbiter is a RR arbiter. */ +#define PBF_REG_NUM_STRICT_ARB_SLOTS 0x15c064 +/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause + not suppoterd. */ +#define PBF_REG_P0_ARB_THRSH 0x1400e4 +/* [R 11] Current credit for port 0 in the tx port buffers in 16 byte lines. */ +#define PBF_REG_P0_CREDIT 0x140200 +/* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte + lines. */ +#define PBF_REG_P0_INIT_CRD 0x1400d0 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * port 0. Reset upon init. */ +#define PBF_REG_P0_INTERNAL_CRD_FREED_CNT 0x140308 +/* [R 1] Removed for E3 B0 - Indication that pause is enabled for port 0. */ +#define PBF_REG_P0_PAUSE_ENABLE 0x140014 +/* [R 8] Removed for E3 B0 - Number of tasks in port 0 task queue. */ +#define PBF_REG_P0_TASK_CNT 0x140204 +/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines + * freed from the task queue of port 0. Reset upon init. */ +#define PBF_REG_P0_TQ_LINES_FREED_CNT 0x1402f0 +/* [R 12] Number of 8 bytes lines occupied in the task queue of port 0. */ +#define PBF_REG_P0_TQ_OCCUPANCY 0x1402fc +/* [R 11] Removed for E3 B0 - Current credit for port 1 in the tx port + * buffers in 16 byte lines. */ +#define PBF_REG_P1_CREDIT 0x140208 +/* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port + * buffers in 16 byte lines. */ +#define PBF_REG_P1_INIT_CRD 0x1400d4 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * port 1. Reset upon init. */ +#define PBF_REG_P1_INTERNAL_CRD_FREED_CNT 0x14030c +/* [R 8] Removed for E3 B0 - Number of tasks in port 1 task queue. */ +#define PBF_REG_P1_TASK_CNT 0x14020c +/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines + * freed from the task queue of port 1. Reset upon init. */ +#define PBF_REG_P1_TQ_LINES_FREED_CNT 0x1402f4 +/* [R 12] Number of 8 bytes lines occupied in the task queue of port 1. */ +#define PBF_REG_P1_TQ_OCCUPANCY 0x140300 +/* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */ +#define PBF_REG_P4_CREDIT 0x140210 +/* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte + lines. */ +#define PBF_REG_P4_INIT_CRD 0x1400e0 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * port 4. Reset upon init. */ +#define PBF_REG_P4_INTERNAL_CRD_FREED_CNT 0x140310 +/* [R 8] Removed for E3 B0 - Number of tasks in port 4 task queue. */ +#define PBF_REG_P4_TASK_CNT 0x140214 +/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines + * freed from the task queue of port 4. Reset upon init. */ +#define PBF_REG_P4_TQ_LINES_FREED_CNT 0x1402f8 +/* [R 12] Number of 8 bytes lines occupied in the task queue of port 4. */ +#define PBF_REG_P4_TQ_OCCUPANCY 0x140304 +/* [RW 5] Interrupt mask register #0 read/write */ +#define PBF_REG_PBF_INT_MASK 0x1401d4 +/* [R 5] Interrupt register #0 read */ +#define PBF_REG_PBF_INT_STS 0x1401c8 +/* [RW 20] Parity mask register #0 read/write */ +#define PBF_REG_PBF_PRTY_MASK 0x1401e4 +/* [R 28] Parity register #0 read */ +#define PBF_REG_PBF_PRTY_STS 0x1401d8 +/* [RC 20] Parity register #0 read clear */ +#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc +/* [RW 16] The Ethernet type value for L2 tag 0 */ +#define PBF_REG_TAG_ETHERTYPE_0 0x15c090 +/* [RW 4] The length of the info field for L2 tag 0. The length is between + * 2B and 14B; in 2B granularity */ +#define PBF_REG_TAG_LEN_0 0x15c09c +/* [R 32] Cyclic counter for number of 8 byte lines freed from the LB task + * queue. Reset upon init. */ +#define PBF_REG_TQ_LINES_FREED_CNT_LB_Q 0x14038c +/* [R 32] Cyclic counter for number of 8 byte lines freed from the task + * queue 0. Reset upon init. */ +#define PBF_REG_TQ_LINES_FREED_CNT_Q0 0x140390 +/* [R 32] Cyclic counter for number of 8 byte lines freed from task queue 1. + * Reset upon init. */ +#define PBF_REG_TQ_LINES_FREED_CNT_Q1 0x140394 +/* [R 13] Number of 8 bytes lines occupied in the task queue of the LB + * queue. */ +#define PBF_REG_TQ_OCCUPANCY_LB_Q 0x1403a8 +/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 0. */ +#define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac +/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */ +#define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0 +#define PB_REG_CONTROL 0 +/* [RW 2] Interrupt mask register #0 read/write */ +#define PB_REG_PB_INT_MASK 0x28 +/* [R 2] Interrupt register #0 read */ +#define PB_REG_PB_INT_STS 0x1c +/* [RW 4] Parity mask register #0 read/write */ +#define PB_REG_PB_PRTY_MASK 0x38 +/* [R 4] Parity register #0 read */ +#define PB_REG_PB_PRTY_STS 0x2c +/* [RC 4] Parity register #0 read clear */ +#define PB_REG_PB_PRTY_STS_CLR 0x30 +#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0) +#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8) +#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1) +#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN (0x1<<6) +#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN (0x1<<7) +#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN (0x1<<4) +#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN (0x1<<3) +#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN (0x1<<5) +#define PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN (0x1<<2) +/* [R 8] Config space A attention dirty bits. Each bit indicates that the + * corresponding PF generates config space A attention. Set by PXP. Reset by + * MCP writing 1 to icfg_space_a_request_clr. Note: register contains bits + * from both paths. */ +#define PGLUE_B_REG_CFG_SPACE_A_REQUEST 0x9010 +/* [R 8] Config space B attention dirty bits. Each bit indicates that the + * corresponding PF generates config space B attention. Set by PXP. Reset by + * MCP writing 1 to icfg_space_b_request_clr. Note: register contains bits + * from both paths. */ +#define PGLUE_B_REG_CFG_SPACE_B_REQUEST 0x9014 +/* [RW 1] Type A PF enable inbound interrupt table for CSDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_CSDM_INB_INT_A_PF_ENABLE 0x9194 +/* [RW 18] Type B VF inbound interrupt table for CSDM: bits[17:9]-mask; + * its[8:0]-address. Bits [1:0] must be zero (DW resolution address). */ +#define PGLUE_B_REG_CSDM_INB_INT_B_VF 0x916c +/* [RW 1] Type B VF enable inbound interrupt table for CSDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_CSDM_INB_INT_B_VF_ENABLE 0x919c +/* [RW 16] Start offset of CSDM zone A (queue zone) in the internal RAM */ +#define PGLUE_B_REG_CSDM_START_OFFSET_A 0x9100 +/* [RW 16] Start offset of CSDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_CSDM_START_OFFSET_B 0x9108 +/* [RW 5] VF Shift of CSDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_CSDM_VF_SHIFT_B 0x9110 +/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */ +#define PGLUE_B_REG_CSDM_ZONE_A_SIZE_PF 0x91ac +/* [R 8] FLR request attention dirty bits for PFs 0 to 7. Each bit indicates + * that the FLR register of the corresponding PF was set. Set by PXP. Reset + * by MCP writing 1 to flr_request_pf_7_0_clr. Note: register contains bits + * from both paths. */ +#define PGLUE_B_REG_FLR_REQUEST_PF_7_0 0x9028 +/* [W 8] FLR request attention dirty bits clear for PFs 0 to 7. MCP writes 1 + * to a bit in this register in order to clear the corresponding bit in + * flr_request_pf_7_0 register. Note: register contains bits from both + * paths. */ +#define PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR 0x9418 +/* [R 32] FLR request attention dirty bits for VFs 96 to 127. Each bit + * indicates that the FLR register of the corresponding VF was set. Set by + * PXP. Reset by MCP writing 1 to flr_request_vf_127_96_clr. */ +#define PGLUE_B_REG_FLR_REQUEST_VF_127_96 0x9024 +/* [R 32] FLR request attention dirty bits for VFs 0 to 31. Each bit + * indicates that the FLR register of the corresponding VF was set. Set by + * PXP. Reset by MCP writing 1 to flr_request_vf_31_0_clr. */ +#define PGLUE_B_REG_FLR_REQUEST_VF_31_0 0x9018 +/* [R 32] FLR request attention dirty bits for VFs 32 to 63. Each bit + * indicates that the FLR register of the corresponding VF was set. Set by + * PXP. Reset by MCP writing 1 to flr_request_vf_63_32_clr. */ +#define PGLUE_B_REG_FLR_REQUEST_VF_63_32 0x901c +/* [R 32] FLR request attention dirty bits for VFs 64 to 95. Each bit + * indicates that the FLR register of the corresponding VF was set. Set by + * PXP. Reset by MCP writing 1 to flr_request_vf_95_64_clr. */ +#define PGLUE_B_REG_FLR_REQUEST_VF_95_64 0x9020 +/* [R 8] Each bit indicates an incorrect behavior in user RX interface. Bit + * 0 - Target memory read arrived with a correctable error. Bit 1 - Target + * memory read arrived with an uncorrectable error. Bit 2 - Configuration RW + * arrived with a correctable error. Bit 3 - Configuration RW arrived with + * an uncorrectable error. Bit 4 - Completion with Configuration Request + * Retry Status. Bit 5 - Expansion ROM access received with a write request. + * Bit 6 - Completion with pcie_rx_err of 0000; CMPL_STATUS of non-zero; and + * pcie_rx_last not asserted. Bit 7 - Completion with pcie_rx_err of 1010; + * and pcie_rx_last not asserted. */ +#define PGLUE_B_REG_INCORRECT_RCV_DETAILS 0x9068 +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER 0x942c +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430 +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434 +#define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438 +/* [W 7] Writing 1 to each bit in this register clears a corresponding error + * details register and enables logging new error details. Bit 0 - clears + * INCORRECT_RCV_DETAILS; Bit 1 - clears RX_ERR_DETAILS; Bit 2 - clears + * TX_ERR_WR_ADD_31_0 TX_ERR_WR_ADD_63_32 TX_ERR_WR_DETAILS + * TX_ERR_WR_DETAILS2 TX_ERR_RD_ADD_31_0 TX_ERR_RD_ADD_63_32 + * TX_ERR_RD_DETAILS TX_ERR_RD_DETAILS2 TX_ERR_WR_DETAILS_ICPL; Bit 3 - + * clears VF_LENGTH_VIOLATION_DETAILS. Bit 4 - clears + * VF_GRC_SPACE_VIOLATION_DETAILS. Bit 5 - clears RX_TCPL_ERR_DETAILS. Bit 6 + * - clears TCPL_IN_TWO_RCBS_DETAILS. */ +#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x943c + +/* [R 9] Interrupt register #0 read */ +#define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298 +/* [RC 9] Interrupt register #0 read clear */ +#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR 0x929c +/* [RW 2] Parity mask register #0 read/write */ +#define PGLUE_B_REG_PGLUE_B_PRTY_MASK 0x92b4 +/* [R 2] Parity register #0 read */ +#define PGLUE_B_REG_PGLUE_B_PRTY_STS 0x92a8 +/* [RC 2] Parity register #0 read clear */ +#define PGLUE_B_REG_PGLUE_B_PRTY_STS_CLR 0x92ac +/* [R 13] Details of first request received with error. [2:0] - PFID. [3] - + * VF_VALID. [9:4] - VFID. [11:10] - Error Code - 0 - Indicates Completion + * Timeout of a User Tx non-posted request. 1 - unsupported request. 2 - + * completer abort. 3 - Illegal value for this field. [12] valid - indicates + * if there was a completion error since the last time this register was + * cleared. */ +#define PGLUE_B_REG_RX_ERR_DETAILS 0x9080 +/* [R 18] Details of first ATS Translation Completion request received with + * error. [2:0] - PFID. [3] - VF_VALID. [9:4] - VFID. [11:10] - Error Code - + * 0 - Indicates Completion Timeout of a User Tx non-posted request. 1 - + * unsupported request. 2 - completer abort. 3 - Illegal value for this + * field. [16:12] - ATC OTB EntryID. [17] valid - indicates if there was a + * completion error since the last time this register was cleared. */ +#define PGLUE_B_REG_RX_TCPL_ERR_DETAILS 0x9084 +/* [W 8] Debug only - Shadow BME bits clear for PFs 0 to 7. MCP writes 1 to + * a bit in this register in order to clear the corresponding bit in + * shadow_bme_pf_7_0 register. MCP should never use this unless a + * work-around is needed. Note: register contains bits from both paths. */ +#define PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR 0x9458 +/* [R 8] SR IOV disabled attention dirty bits. Each bit indicates that the + * VF enable register of the corresponding PF is written to 0 and was + * previously 1. Set by PXP. Reset by MCP writing 1 to + * sr_iov_disabled_request_clr. Note: register contains bits from both + * paths. */ +#define PGLUE_B_REG_SR_IOV_DISABLED_REQUEST 0x9030 +/* [R 32] Indicates the status of tags 32-63. 0 - tags is used - read + * completion did not return yet. 1 - tag is unused. Same functionality as + * pxp2_registers_pgl_exp_rom_data2 for tags 0-31. */ +#define PGLUE_B_REG_TAGS_63_32 0x9244 +/* [RW 1] Type A PF enable inbound interrupt table for TSDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_TSDM_INB_INT_A_PF_ENABLE 0x9170 +/* [RW 16] Start offset of TSDM zone A (queue zone) in the internal RAM */ +#define PGLUE_B_REG_TSDM_START_OFFSET_A 0x90c4 +/* [RW 16] Start offset of TSDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_TSDM_START_OFFSET_B 0x90cc +/* [RW 5] VF Shift of TSDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_TSDM_VF_SHIFT_B 0x90d4 +/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */ +#define PGLUE_B_REG_TSDM_ZONE_A_SIZE_PF 0x91a0 +/* [R 32] Address [31:0] of first read request not submitted due to error */ +#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x9098 +/* [R 32] Address [63:32] of first read request not submitted due to error */ +#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x909c +/* [R 31] Details of first read request not submitted due to error. [4:0] + * VQID. [5] TREQ. 1 - Indicates the request is a Translation Request. + * [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] - + * VFID. */ +#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x90a0 +/* [R 26] Details of first read request not submitted due to error. [15:0] + * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type - + * [21] - Indicates was_error was set; [22] - Indicates BME was cleared; + * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent + * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid - + * indicates if there was a request not submitted due to error since the + * last time this register was cleared. */ +#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x90a4 +/* [R 32] Address [31:0] of first write request not submitted due to error */ +#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x9088 +/* [R 32] Address [63:32] of first write request not submitted due to error */ +#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x908c +/* [R 31] Details of first write request not submitted due to error. [4:0] + * VQID. [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] + * - VFID. */ +#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x9090 +/* [R 26] Details of first write request not submitted due to error. [15:0] + * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type - + * [21] - Indicates was_error was set; [22] - Indicates BME was cleared; + * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent + * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid - + * indicates if there was a request not submitted due to error since the + * last time this register was cleared. */ +#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x9094 +/* [RW 10] Type A PF/VF inbound interrupt table for USDM: bits[9:5]-mask; + * its[4:0]-address relative to start_offset_a. Bits [1:0] can have any + * value (Byte resolution address). */ +#define PGLUE_B_REG_USDM_INB_INT_A_0 0x9128 +#define PGLUE_B_REG_USDM_INB_INT_A_1 0x912c +#define PGLUE_B_REG_USDM_INB_INT_A_2 0x9130 +#define PGLUE_B_REG_USDM_INB_INT_A_3 0x9134 +#define PGLUE_B_REG_USDM_INB_INT_A_4 0x9138 +#define PGLUE_B_REG_USDM_INB_INT_A_5 0x913c +#define PGLUE_B_REG_USDM_INB_INT_A_6 0x9140 +/* [RW 1] Type A PF enable inbound interrupt table for USDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_USDM_INB_INT_A_PF_ENABLE 0x917c +/* [RW 1] Type A VF enable inbound interrupt table for USDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_USDM_INB_INT_A_VF_ENABLE 0x9180 +/* [RW 1] Type B VF enable inbound interrupt table for USDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_USDM_INB_INT_B_VF_ENABLE 0x9184 +/* [RW 16] Start offset of USDM zone A (queue zone) in the internal RAM */ +#define PGLUE_B_REG_USDM_START_OFFSET_A 0x90d8 +/* [RW 16] Start offset of USDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_USDM_START_OFFSET_B 0x90e0 +/* [RW 5] VF Shift of USDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_USDM_VF_SHIFT_B 0x90e8 +/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */ +#define PGLUE_B_REG_USDM_ZONE_A_SIZE_PF 0x91a4 +/* [R 26] Details of first target VF request accessing VF GRC space that + * failed permission check. [14:0] Address. [15] w_nr: 0 - Read; 1 - Write. + * [21:16] VFID. [24:22] - PFID. [25] valid - indicates if there was a + * request accessing VF GRC space that failed permission check since the + * last time this register was cleared. Permission checks are: function + * permission; R/W permission; address range permission. */ +#define PGLUE_B_REG_VF_GRC_SPACE_VIOLATION_DETAILS 0x9234 +/* [R 31] Details of first target VF request with length violation (too many + * DWs) accessing BAR0. [12:0] Address in DWs (bits [14:2] of byte address). + * [14:13] BAR. [20:15] VFID. [23:21] - PFID. [29:24] - Length in DWs. [30] + * valid - indicates if there was a request with length violation since the + * last time this register was cleared. Length violations: length of more + * than 2DWs; length of 2DWs and address not QW aligned; window is GRC and + * length is more than 1 DW. */ +#define PGLUE_B_REG_VF_LENGTH_VIOLATION_DETAILS 0x9230 +/* [R 8] Was_error indication dirty bits for PFs 0 to 7. Each bit indicates + * that there was a completion with uncorrectable error for the + * corresponding PF. Set by PXP. Reset by MCP writing 1 to + * was_error_pf_7_0_clr. */ +#define PGLUE_B_REG_WAS_ERROR_PF_7_0 0x907c +/* [W 8] Was_error indication dirty bits clear for PFs 0 to 7. MCP writes 1 + * to a bit in this register in order to clear the corresponding bit in + * flr_request_pf_7_0 register. */ +#define PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR 0x9470 +/* [R 32] Was_error indication dirty bits for VFs 96 to 127. Each bit + * indicates that there was a completion with uncorrectable error for the + * corresponding VF. Set by PXP. Reset by MCP writing 1 to + * was_error_vf_127_96_clr. */ +#define PGLUE_B_REG_WAS_ERROR_VF_127_96 0x9078 +/* [W 32] Was_error indication dirty bits clear for VFs 96 to 127. MCP + * writes 1 to a bit in this register in order to clear the corresponding + * bit in was_error_vf_127_96 register. */ +#define PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR 0x9474 +/* [R 32] Was_error indication dirty bits for VFs 0 to 31. Each bit + * indicates that there was a completion with uncorrectable error for the + * corresponding VF. Set by PXP. Reset by MCP writing 1 to + * was_error_vf_31_0_clr. */ +#define PGLUE_B_REG_WAS_ERROR_VF_31_0 0x906c +/* [W 32] Was_error indication dirty bits clear for VFs 0 to 31. MCP writes + * 1 to a bit in this register in order to clear the corresponding bit in + * was_error_vf_31_0 register. */ +#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x9478 +/* [R 32] Was_error indication dirty bits for VFs 32 to 63. Each bit + * indicates that there was a completion with uncorrectable error for the + * corresponding VF. Set by PXP. Reset by MCP writing 1 to + * was_error_vf_63_32_clr. */ +#define PGLUE_B_REG_WAS_ERROR_VF_63_32 0x9070 +/* [W 32] Was_error indication dirty bits clear for VFs 32 to 63. MCP writes + * 1 to a bit in this register in order to clear the corresponding bit in + * was_error_vf_63_32 register. */ +#define PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR 0x947c +/* [R 32] Was_error indication dirty bits for VFs 64 to 95. Each bit + * indicates that there was a completion with uncorrectable error for the + * corresponding VF. Set by PXP. Reset by MCP writing 1 to + * was_error_vf_95_64_clr. */ +#define PGLUE_B_REG_WAS_ERROR_VF_95_64 0x9074 +/* [W 32] Was_error indication dirty bits clear for VFs 64 to 95. MCP writes + * 1 to a bit in this register in order to clear the corresponding bit in + * was_error_vf_95_64 register. */ +#define PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR 0x9480 +/* [RW 1] Type A PF enable inbound interrupt table for XSDM. 0 - disable; 1 + * - enable. */ +#define PGLUE_B_REG_XSDM_INB_INT_A_PF_ENABLE 0x9188 +/* [RW 16] Start offset of XSDM zone A (queue zone) in the internal RAM */ +#define PGLUE_B_REG_XSDM_START_OFFSET_A 0x90ec +/* [RW 16] Start offset of XSDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_XSDM_START_OFFSET_B 0x90f4 +/* [RW 5] VF Shift of XSDM zone B (legacy zone) in the internal RAM */ +#define PGLUE_B_REG_XSDM_VF_SHIFT_B 0x90fc +/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */ +#define PGLUE_B_REG_XSDM_ZONE_A_SIZE_PF 0x91a8 +#define PRS_REG_A_PRSU_20 0x40134 +/* [R 8] debug only: CFC load request current credit. Transaction based. */ +#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164 +/* [R 8] debug only: CFC search request current credit. Transaction based. */ +#define PRS_REG_CFC_SEARCH_CURRENT_CREDIT 0x40168 +/* [RW 6] The initial credit for the search message to the CFC interface. + Credit is transaction based. */ +#define PRS_REG_CFC_SEARCH_INITIAL_CREDIT 0x4011c +/* [RW 24] CID for port 0 if no match */ +#define PRS_REG_CID_PORT_0 0x400fc +/* [RW 32] The CM header for flush message where 'load existed' bit in CFC + load response is reset and packet type is 0. Used in packet start message + to TCM. */ +#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_0 0x400dc +#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_1 0x400e0 +#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_2 0x400e4 +#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_3 0x400e8 +#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_4 0x400ec +#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_5 0x400f0 +/* [RW 32] The CM header for flush message where 'load existed' bit in CFC + load response is set and packet type is 0. Used in packet start message + to TCM. */ +#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_0 0x400bc +#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_1 0x400c0 +#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_2 0x400c4 +#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_3 0x400c8 +#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_4 0x400cc +#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_5 0x400d0 +/* [RW 32] The CM header for a match and packet type 1 for loopback port. + Used in packet start message to TCM. */ +#define PRS_REG_CM_HDR_LOOPBACK_TYPE_1 0x4009c +#define PRS_REG_CM_HDR_LOOPBACK_TYPE_2 0x400a0 +#define PRS_REG_CM_HDR_LOOPBACK_TYPE_3 0x400a4 +#define PRS_REG_CM_HDR_LOOPBACK_TYPE_4 0x400a8 +/* [RW 32] The CM header for a match and packet type 0. Used in packet start + message to TCM. */ +#define PRS_REG_CM_HDR_TYPE_0 0x40078 +#define PRS_REG_CM_HDR_TYPE_1 0x4007c +#define PRS_REG_CM_HDR_TYPE_2 0x40080 +#define PRS_REG_CM_HDR_TYPE_3 0x40084 +#define PRS_REG_CM_HDR_TYPE_4 0x40088 +/* [RW 32] The CM header in case there was not a match on the connection */ +#define PRS_REG_CM_NO_MATCH_HDR 0x400b8 +/* [RW 1] Indicates if in e1hov mode. 0=non-e1hov mode; 1=e1hov mode. */ +#define PRS_REG_E1HOV_MODE 0x401c8 +/* [RW 8] The 8-bit event ID for a match and packet type 1. Used in packet + start message to TCM. */ +#define PRS_REG_EVENT_ID_1 0x40054 +#define PRS_REG_EVENT_ID_2 0x40058 +#define PRS_REG_EVENT_ID_3 0x4005c +/* [RW 16] The Ethernet type value for FCoE */ +#define PRS_REG_FCOE_TYPE 0x401d0 +/* [RW 8] Context region for flush packet with packet type 0. Used in CFC + load request message. */ +#define PRS_REG_FLUSH_REGIONS_TYPE_0 0x40004 +#define PRS_REG_FLUSH_REGIONS_TYPE_1 0x40008 +#define PRS_REG_FLUSH_REGIONS_TYPE_2 0x4000c +#define PRS_REG_FLUSH_REGIONS_TYPE_3 0x40010 +#define PRS_REG_FLUSH_REGIONS_TYPE_4 0x40014 +#define PRS_REG_FLUSH_REGIONS_TYPE_5 0x40018 +#define PRS_REG_FLUSH_REGIONS_TYPE_6 0x4001c +#define PRS_REG_FLUSH_REGIONS_TYPE_7 0x40020 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header. */ +#define PRS_REG_HDRS_AFTER_BASIC 0x40238 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header for port 0 packets. */ +#define PRS_REG_HDRS_AFTER_BASIC_PORT_0 0x40270 +#define PRS_REG_HDRS_AFTER_BASIC_PORT_1 0x40290 +/* [R 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */ +#define PRS_REG_HDRS_AFTER_TAG_0 0x40248 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 for + * port 0 packets */ +#define PRS_REG_HDRS_AFTER_TAG_0_PORT_0 0x40280 +#define PRS_REG_HDRS_AFTER_TAG_0_PORT_1 0x402a0 +/* [RW 4] The increment value to send in the CFC load request message */ +#define PRS_REG_INC_VALUE 0x40048 +/* [RW 6] Bit-map indicating which headers must appear in the packet */ +#define PRS_REG_MUST_HAVE_HDRS 0x40254 +/* [RW 6] Bit-map indicating which headers must appear in the packet for + * port 0 packets */ +#define PRS_REG_MUST_HAVE_HDRS_PORT_0 0x4028c +#define PRS_REG_MUST_HAVE_HDRS_PORT_1 0x402ac +#define PRS_REG_NIC_MODE 0x40138 +/* [RW 8] The 8-bit event ID for cases where there is no match on the + connection. Used in packet start message to TCM. */ +#define PRS_REG_NO_MATCH_EVENT_ID 0x40070 +/* [ST 24] The number of input CFC flush packets */ +#define PRS_REG_NUM_OF_CFC_FLUSH_MESSAGES 0x40128 +/* [ST 32] The number of cycles the Parser halted its operation since it + could not allocate the next serial number */ +#define PRS_REG_NUM_OF_DEAD_CYCLES 0x40130 +/* [ST 24] The number of input packets */ +#define PRS_REG_NUM_OF_PACKETS 0x40124 +/* [ST 24] The number of input transparent flush packets */ +#define PRS_REG_NUM_OF_TRANSPARENT_FLUSH_MESSAGES 0x4012c +/* [RW 8] Context region for received Ethernet packet with a match and + packet type 0. Used in CFC load request message */ +#define PRS_REG_PACKET_REGIONS_TYPE_0 0x40028 +#define PRS_REG_PACKET_REGIONS_TYPE_1 0x4002c +#define PRS_REG_PACKET_REGIONS_TYPE_2 0x40030 +#define PRS_REG_PACKET_REGIONS_TYPE_3 0x40034 +#define PRS_REG_PACKET_REGIONS_TYPE_4 0x40038 +#define PRS_REG_PACKET_REGIONS_TYPE_5 0x4003c +#define PRS_REG_PACKET_REGIONS_TYPE_6 0x40040 +#define PRS_REG_PACKET_REGIONS_TYPE_7 0x40044 +/* [R 2] debug only: Number of pending requests for CAC on port 0. */ +#define PRS_REG_PENDING_BRB_CAC0_RQ 0x40174 +/* [R 2] debug only: Number of pending requests for header parsing. */ +#define PRS_REG_PENDING_BRB_PRS_RQ 0x40170 +/* [R 1] Interrupt register #0 read */ +#define PRS_REG_PRS_INT_STS 0x40188 +/* [RW 8] Parity mask register #0 read/write */ +#define PRS_REG_PRS_PRTY_MASK 0x401a4 +/* [R 8] Parity register #0 read */ +#define PRS_REG_PRS_PRTY_STS 0x40198 +/* [RC 8] Parity register #0 read clear */ +#define PRS_REG_PRS_PRTY_STS_CLR 0x4019c +/* [RW 8] Context region for pure acknowledge packets. Used in CFC load + request message */ +#define PRS_REG_PURE_REGIONS 0x40024 +/* [R 32] debug only: Serial number status lsb 32 bits. '1' indicates this + serail number was released by SDM but cannot be used because a previous + serial number was not released. */ +#define PRS_REG_SERIAL_NUM_STATUS_LSB 0x40154 +/* [R 32] debug only: Serial number status msb 32 bits. '1' indicates this + serail number was released by SDM but cannot be used because a previous + serial number was not released. */ +#define PRS_REG_SERIAL_NUM_STATUS_MSB 0x40158 +/* [R 4] debug only: SRC current credit. Transaction based. */ +#define PRS_REG_SRC_CURRENT_CREDIT 0x4016c +/* [RW 16] The Ethernet type value for L2 tag 0 */ +#define PRS_REG_TAG_ETHERTYPE_0 0x401d4 +/* [RW 4] The length of the info field for L2 tag 0. The length is between + * 2B and 14B; in 2B granularity */ +#define PRS_REG_TAG_LEN_0 0x4022c +/* [R 8] debug only: TCM current credit. Cycle based. */ +#define PRS_REG_TCM_CURRENT_CREDIT 0x40160 +/* [R 8] debug only: TSDM current credit. Transaction based. */ +#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c +#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT (0x1<<19) +#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF (0x1<<20) +#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN (0x1<<22) +#define PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED (0x1<<23) +#define PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED (0x1<<24) +#define PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7) +#define PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7) +/* [R 6] Debug only: Number of used entries in the data FIFO */ +#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c +/* [R 7] Debug only: Number of used entries in the header FIFO */ +#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478 +#define PXP2_REG_PGL_ADDR_88_F0 0x120534 +/* [R 32] GRC address for configuration access to PCIE config address 0x88. + * any write to this PCIE address will cause a GRC write access to the + * address that's in t this register */ +#define PXP2_REG_PGL_ADDR_88_F1 0x120544 +#define PXP2_REG_PGL_ADDR_8C_F0 0x120538 +/* [R 32] GRC address for configuration access to PCIE config address 0x8c. + * any write to this PCIE address will cause a GRC write access to the + * address that's in t this register */ +#define PXP2_REG_PGL_ADDR_8C_F1 0x120548 +#define PXP2_REG_PGL_ADDR_90_F0 0x12053c +/* [R 32] GRC address for configuration access to PCIE config address 0x90. + * any write to this PCIE address will cause a GRC write access to the + * address that's in t this register */ +#define PXP2_REG_PGL_ADDR_90_F1 0x12054c +#define PXP2_REG_PGL_ADDR_94_F0 0x120540 +/* [R 32] GRC address for configuration access to PCIE config address 0x94. + * any write to this PCIE address will cause a GRC write access to the + * address that's in t this register */ +#define PXP2_REG_PGL_ADDR_94_F1 0x120550 +#define PXP2_REG_PGL_CONTROL0 0x120490 +#define PXP2_REG_PGL_CONTROL1 0x120514 +#define PXP2_REG_PGL_DEBUG 0x120520 +/* [RW 32] third dword data of expansion rom request. this register is + special. reading from it provides a vector outstanding read requests. if + a bit is zero it means that a read request on the corresponding tag did + not finish yet (not all completions have arrived for it) */ +#define PXP2_REG_PGL_EXP_ROM2 0x120808 +/* [RW 32] Inbound interrupt table for CSDM: bits[31:16]-mask; + its[15:0]-address */ +#define PXP2_REG_PGL_INT_CSDM_0 0x1204f4 +#define PXP2_REG_PGL_INT_CSDM_1 0x1204f8 +#define PXP2_REG_PGL_INT_CSDM_2 0x1204fc +#define PXP2_REG_PGL_INT_CSDM_3 0x120500 +#define PXP2_REG_PGL_INT_CSDM_4 0x120504 +#define PXP2_REG_PGL_INT_CSDM_5 0x120508 +#define PXP2_REG_PGL_INT_CSDM_6 0x12050c +#define PXP2_REG_PGL_INT_CSDM_7 0x120510 +/* [RW 32] Inbound interrupt table for TSDM: bits[31:16]-mask; + its[15:0]-address */ +#define PXP2_REG_PGL_INT_TSDM_0 0x120494 +#define PXP2_REG_PGL_INT_TSDM_1 0x120498 +#define PXP2_REG_PGL_INT_TSDM_2 0x12049c +#define PXP2_REG_PGL_INT_TSDM_3 0x1204a0 +#define PXP2_REG_PGL_INT_TSDM_4 0x1204a4 +#define PXP2_REG_PGL_INT_TSDM_5 0x1204a8 +#define PXP2_REG_PGL_INT_TSDM_6 0x1204ac +#define PXP2_REG_PGL_INT_TSDM_7 0x1204b0 +/* [RW 32] Inbound interrupt table for USDM: bits[31:16]-mask; + its[15:0]-address */ +#define PXP2_REG_PGL_INT_USDM_0 0x1204b4 +#define PXP2_REG_PGL_INT_USDM_1 0x1204b8 +#define PXP2_REG_PGL_INT_USDM_2 0x1204bc +#define PXP2_REG_PGL_INT_USDM_3 0x1204c0 +#define PXP2_REG_PGL_INT_USDM_4 0x1204c4 +#define PXP2_REG_PGL_INT_USDM_5 0x1204c8 +#define PXP2_REG_PGL_INT_USDM_6 0x1204cc +#define PXP2_REG_PGL_INT_USDM_7 0x1204d0 +/* [RW 32] Inbound interrupt table for XSDM: bits[31:16]-mask; + its[15:0]-address */ +#define PXP2_REG_PGL_INT_XSDM_0 0x1204d4 +#define PXP2_REG_PGL_INT_XSDM_1 0x1204d8 +#define PXP2_REG_PGL_INT_XSDM_2 0x1204dc +#define PXP2_REG_PGL_INT_XSDM_3 0x1204e0 +#define PXP2_REG_PGL_INT_XSDM_4 0x1204e4 +#define PXP2_REG_PGL_INT_XSDM_5 0x1204e8 +#define PXP2_REG_PGL_INT_XSDM_6 0x1204ec +#define PXP2_REG_PGL_INT_XSDM_7 0x1204f0 +/* [RW 3] this field allows one function to pretend being another function + when accessing any BAR mapped resource within the device. the value of + the field is the number of the function that will be accessed + effectively. after software write to this bit it must read it in order to + know that the new value is updated */ +#define PXP2_REG_PGL_PRETEND_FUNC_F0 0x120674 +#define PXP2_REG_PGL_PRETEND_FUNC_F1 0x120678 +#define PXP2_REG_PGL_PRETEND_FUNC_F2 0x12067c +#define PXP2_REG_PGL_PRETEND_FUNC_F3 0x120680 +#define PXP2_REG_PGL_PRETEND_FUNC_F4 0x120684 +#define PXP2_REG_PGL_PRETEND_FUNC_F5 0x120688 +#define PXP2_REG_PGL_PRETEND_FUNC_F6 0x12068c +#define PXP2_REG_PGL_PRETEND_FUNC_F7 0x120690 +/* [R 1] this bit indicates that a read request was blocked because of + bus_master_en was deasserted */ +#define PXP2_REG_PGL_READ_BLOCKED 0x120568 +#define PXP2_REG_PGL_TAGS_LIMIT 0x1205a8 +/* [R 18] debug only */ +#define PXP2_REG_PGL_TXW_CDTS 0x12052c +/* [R 1] this bit indicates that a write request was blocked because of + bus_master_en was deasserted */ +#define PXP2_REG_PGL_WRITE_BLOCKED 0x120564 +#define PXP2_REG_PSWRQ_BW_ADD1 0x1201c0 +#define PXP2_REG_PSWRQ_BW_ADD10 0x1201e4 +#define PXP2_REG_PSWRQ_BW_ADD11 0x1201e8 +#define PXP2_REG_PSWRQ_BW_ADD2 0x1201c4 +#define PXP2_REG_PSWRQ_BW_ADD28 0x120228 +#define PXP2_REG_PSWRQ_BW_ADD3 0x1201c8 +#define PXP2_REG_PSWRQ_BW_ADD6 0x1201d4 +#define PXP2_REG_PSWRQ_BW_ADD7 0x1201d8 +#define PXP2_REG_PSWRQ_BW_ADD8 0x1201dc +#define PXP2_REG_PSWRQ_BW_ADD9 0x1201e0 +#define PXP2_REG_PSWRQ_BW_CREDIT 0x12032c +#define PXP2_REG_PSWRQ_BW_L1 0x1202b0 +#define PXP2_REG_PSWRQ_BW_L10 0x1202d4 +#define PXP2_REG_PSWRQ_BW_L11 0x1202d8 +#define PXP2_REG_PSWRQ_BW_L2 0x1202b4 +#define PXP2_REG_PSWRQ_BW_L28 0x120318 +#define PXP2_REG_PSWRQ_BW_L3 0x1202b8 +#define PXP2_REG_PSWRQ_BW_L6 0x1202c4 +#define PXP2_REG_PSWRQ_BW_L7 0x1202c8 +#define PXP2_REG_PSWRQ_BW_L8 0x1202cc +#define PXP2_REG_PSWRQ_BW_L9 0x1202d0 +#define PXP2_REG_PSWRQ_BW_RD 0x120324 +#define PXP2_REG_PSWRQ_BW_UB1 0x120238 +#define PXP2_REG_PSWRQ_BW_UB10 0x12025c +#define PXP2_REG_PSWRQ_BW_UB11 0x120260 +#define PXP2_REG_PSWRQ_BW_UB2 0x12023c +#define PXP2_REG_PSWRQ_BW_UB28 0x1202a0 +#define PXP2_REG_PSWRQ_BW_UB3 0x120240 +#define PXP2_REG_PSWRQ_BW_UB6 0x12024c +#define PXP2_REG_PSWRQ_BW_UB7 0x120250 +#define PXP2_REG_PSWRQ_BW_UB8 0x120254 +#define PXP2_REG_PSWRQ_BW_UB9 0x120258 +#define PXP2_REG_PSWRQ_BW_WR 0x120328 +#define PXP2_REG_PSWRQ_CDU0_L2P 0x120000 +#define PXP2_REG_PSWRQ_QM0_L2P 0x120038 +#define PXP2_REG_PSWRQ_SRC0_L2P 0x120054 +#define PXP2_REG_PSWRQ_TM0_L2P 0x12001c +#define PXP2_REG_PSWRQ_TSDM0_L2P 0x1200e0 +/* [RW 32] Interrupt mask register #0 read/write */ +#define PXP2_REG_PXP2_INT_MASK_0 0x120578 +/* [R 32] Interrupt register #0 read */ +#define PXP2_REG_PXP2_INT_STS_0 0x12056c +#define PXP2_REG_PXP2_INT_STS_1 0x120608 +/* [RC 32] Interrupt register #0 read clear */ +#define PXP2_REG_PXP2_INT_STS_CLR_0 0x120570 +/* [RW 32] Parity mask register #0 read/write */ +#define PXP2_REG_PXP2_PRTY_MASK_0 0x120588 +#define PXP2_REG_PXP2_PRTY_MASK_1 0x120598 +/* [R 32] Parity register #0 read */ +#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c +#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c +/* [RC 32] Parity register #0 read clear */ +#define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580 +#define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590 +/* [R 1] Debug only: The 'almost full' indication from each fifo (gives + indication about backpressure) */ +#define PXP2_REG_RD_ALMOST_FULL_0 0x120424 +/* [R 8] Debug only: The blocks counter - number of unused block ids */ +#define PXP2_REG_RD_BLK_CNT 0x120418 +/* [RW 8] Debug only: Total number of available blocks in Tetris Buffer. + Must be bigger than 6. Normally should not be changed. */ +#define PXP2_REG_RD_BLK_NUM_CFG 0x12040c +/* [RW 2] CDU byte swapping mode configuration for master read requests */ +#define PXP2_REG_RD_CDURD_SWAP_MODE 0x120404 +/* [RW 1] When '1'; inputs to the PSWRD block are ignored */ +#define PXP2_REG_RD_DISABLE_INPUTS 0x120374 +/* [R 1] PSWRD internal memories initialization is done */ +#define PXP2_REG_RD_INIT_DONE 0x120370 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq10 */ +#define PXP2_REG_RD_MAX_BLKS_VQ10 0x1203a0 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq11 */ +#define PXP2_REG_RD_MAX_BLKS_VQ11 0x1203a4 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq17 */ +#define PXP2_REG_RD_MAX_BLKS_VQ17 0x1203bc +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq18 */ +#define PXP2_REG_RD_MAX_BLKS_VQ18 0x1203c0 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq19 */ +#define PXP2_REG_RD_MAX_BLKS_VQ19 0x1203c4 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq22 */ +#define PXP2_REG_RD_MAX_BLKS_VQ22 0x1203d0 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq25 */ +#define PXP2_REG_RD_MAX_BLKS_VQ25 0x1203dc +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq6 */ +#define PXP2_REG_RD_MAX_BLKS_VQ6 0x120390 +/* [RW 8] The maximum number of blocks in Tetris Buffer that can be + allocated for vq9 */ +#define PXP2_REG_RD_MAX_BLKS_VQ9 0x12039c +/* [RW 2] PBF byte swapping mode configuration for master read requests */ +#define PXP2_REG_RD_PBF_SWAP_MODE 0x1203f4 +/* [R 1] Debug only: Indication if delivery ports are idle */ +#define PXP2_REG_RD_PORT_IS_IDLE_0 0x12041c +#define PXP2_REG_RD_PORT_IS_IDLE_1 0x120420 +/* [RW 2] QM byte swapping mode configuration for master read requests */ +#define PXP2_REG_RD_QM_SWAP_MODE 0x1203f8 +/* [R 7] Debug only: The SR counter - number of unused sub request ids */ +#define PXP2_REG_RD_SR_CNT 0x120414 +/* [RW 2] SRC byte swapping mode configuration for master read requests */ +#define PXP2_REG_RD_SRC_SWAP_MODE 0x120400 +/* [RW 7] Debug only: Total number of available PCI read sub-requests. Must + be bigger than 1. Normally should not be changed. */ +#define PXP2_REG_RD_SR_NUM_CFG 0x120408 +/* [RW 1] Signals the PSWRD block to start initializing internal memories */ +#define PXP2_REG_RD_START_INIT 0x12036c +/* [RW 2] TM byte swapping mode configuration for master read requests */ +#define PXP2_REG_RD_TM_SWAP_MODE 0x1203fc +/* [RW 10] Bandwidth addition to VQ0 write requests */ +#define PXP2_REG_RQ_BW_RD_ADD0 0x1201bc +/* [RW 10] Bandwidth addition to VQ12 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD12 0x1201ec +/* [RW 10] Bandwidth addition to VQ13 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD13 0x1201f0 +/* [RW 10] Bandwidth addition to VQ14 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD14 0x1201f4 +/* [RW 10] Bandwidth addition to VQ15 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD15 0x1201f8 +/* [RW 10] Bandwidth addition to VQ16 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD16 0x1201fc +/* [RW 10] Bandwidth addition to VQ17 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD17 0x120200 +/* [RW 10] Bandwidth addition to VQ18 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD18 0x120204 +/* [RW 10] Bandwidth addition to VQ19 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD19 0x120208 +/* [RW 10] Bandwidth addition to VQ20 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD20 0x12020c +/* [RW 10] Bandwidth addition to VQ22 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD22 0x120210 +/* [RW 10] Bandwidth addition to VQ23 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD23 0x120214 +/* [RW 10] Bandwidth addition to VQ24 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD24 0x120218 +/* [RW 10] Bandwidth addition to VQ25 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD25 0x12021c +/* [RW 10] Bandwidth addition to VQ26 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD26 0x120220 +/* [RW 10] Bandwidth addition to VQ27 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD27 0x120224 +/* [RW 10] Bandwidth addition to VQ4 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD4 0x1201cc +/* [RW 10] Bandwidth addition to VQ5 read requests */ +#define PXP2_REG_RQ_BW_RD_ADD5 0x1201d0 +/* [RW 10] Bandwidth Typical L for VQ0 Read requests */ +#define PXP2_REG_RQ_BW_RD_L0 0x1202ac +/* [RW 10] Bandwidth Typical L for VQ12 Read requests */ +#define PXP2_REG_RQ_BW_RD_L12 0x1202dc +/* [RW 10] Bandwidth Typical L for VQ13 Read requests */ +#define PXP2_REG_RQ_BW_RD_L13 0x1202e0 +/* [RW 10] Bandwidth Typical L for VQ14 Read requests */ +#define PXP2_REG_RQ_BW_RD_L14 0x1202e4 +/* [RW 10] Bandwidth Typical L for VQ15 Read requests */ +#define PXP2_REG_RQ_BW_RD_L15 0x1202e8 +/* [RW 10] Bandwidth Typical L for VQ16 Read requests */ +#define PXP2_REG_RQ_BW_RD_L16 0x1202ec +/* [RW 10] Bandwidth Typical L for VQ17 Read requests */ +#define PXP2_REG_RQ_BW_RD_L17 0x1202f0 +/* [RW 10] Bandwidth Typical L for VQ18 Read requests */ +#define PXP2_REG_RQ_BW_RD_L18 0x1202f4 +/* [RW 10] Bandwidth Typical L for VQ19 Read requests */ +#define PXP2_REG_RQ_BW_RD_L19 0x1202f8 +/* [RW 10] Bandwidth Typical L for VQ20 Read requests */ +#define PXP2_REG_RQ_BW_RD_L20 0x1202fc +/* [RW 10] Bandwidth Typical L for VQ22 Read requests */ +#define PXP2_REG_RQ_BW_RD_L22 0x120300 +/* [RW 10] Bandwidth Typical L for VQ23 Read requests */ +#define PXP2_REG_RQ_BW_RD_L23 0x120304 +/* [RW 10] Bandwidth Typical L for VQ24 Read requests */ +#define PXP2_REG_RQ_BW_RD_L24 0x120308 +/* [RW 10] Bandwidth Typical L for VQ25 Read requests */ +#define PXP2_REG_RQ_BW_RD_L25 0x12030c +/* [RW 10] Bandwidth Typical L for VQ26 Read requests */ +#define PXP2_REG_RQ_BW_RD_L26 0x120310 +/* [RW 10] Bandwidth Typical L for VQ27 Read requests */ +#define PXP2_REG_RQ_BW_RD_L27 0x120314 +/* [RW 10] Bandwidth Typical L for VQ4 Read requests */ +#define PXP2_REG_RQ_BW_RD_L4 0x1202bc +/* [RW 10] Bandwidth Typical L for VQ5 Read- currently not used */ +#define PXP2_REG_RQ_BW_RD_L5 0x1202c0 +/* [RW 7] Bandwidth upper bound for VQ0 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND0 0x120234 +/* [RW 7] Bandwidth upper bound for VQ12 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND12 0x120264 +/* [RW 7] Bandwidth upper bound for VQ13 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND13 0x120268 +/* [RW 7] Bandwidth upper bound for VQ14 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND14 0x12026c +/* [RW 7] Bandwidth upper bound for VQ15 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND15 0x120270 +/* [RW 7] Bandwidth upper bound for VQ16 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND16 0x120274 +/* [RW 7] Bandwidth upper bound for VQ17 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND17 0x120278 +/* [RW 7] Bandwidth upper bound for VQ18 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND18 0x12027c +/* [RW 7] Bandwidth upper bound for VQ19 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND19 0x120280 +/* [RW 7] Bandwidth upper bound for VQ20 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND20 0x120284 +/* [RW 7] Bandwidth upper bound for VQ22 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND22 0x120288 +/* [RW 7] Bandwidth upper bound for VQ23 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND23 0x12028c +/* [RW 7] Bandwidth upper bound for VQ24 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND24 0x120290 +/* [RW 7] Bandwidth upper bound for VQ25 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND25 0x120294 +/* [RW 7] Bandwidth upper bound for VQ26 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND26 0x120298 +/* [RW 7] Bandwidth upper bound for VQ27 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND27 0x12029c +/* [RW 7] Bandwidth upper bound for VQ4 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND4 0x120244 +/* [RW 7] Bandwidth upper bound for VQ5 read requests */ +#define PXP2_REG_RQ_BW_RD_UBOUND5 0x120248 +/* [RW 10] Bandwidth addition to VQ29 write requests */ +#define PXP2_REG_RQ_BW_WR_ADD29 0x12022c +/* [RW 10] Bandwidth addition to VQ30 write requests */ +#define PXP2_REG_RQ_BW_WR_ADD30 0x120230 +/* [RW 10] Bandwidth Typical L for VQ29 Write requests */ +#define PXP2_REG_RQ_BW_WR_L29 0x12031c +/* [RW 10] Bandwidth Typical L for VQ30 Write requests */ +#define PXP2_REG_RQ_BW_WR_L30 0x120320 +/* [RW 7] Bandwidth upper bound for VQ29 */ +#define PXP2_REG_RQ_BW_WR_UBOUND29 0x1202a4 +/* [RW 7] Bandwidth upper bound for VQ30 */ +#define PXP2_REG_RQ_BW_WR_UBOUND30 0x1202a8 +/* [RW 18] external first_mem_addr field in L2P table for CDU module port 0 */ +#define PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR 0x120008 +/* [RW 2] Endian mode for cdu */ +#define PXP2_REG_RQ_CDU_ENDIAN_M 0x1201a0 +#define PXP2_REG_RQ_CDU_FIRST_ILT 0x12061c +#define PXP2_REG_RQ_CDU_LAST_ILT 0x120620 +/* [RW 3] page size in L2P table for CDU module; -4k; -8k; -16k; -32k; -64k; + -128k */ +#define PXP2_REG_RQ_CDU_P_SIZE 0x120018 +/* [R 1] 1' indicates that the requester has finished its internal + configuration */ +#define PXP2_REG_RQ_CFG_DONE 0x1201b4 +/* [RW 2] Endian mode for debug */ +#define PXP2_REG_RQ_DBG_ENDIAN_M 0x1201a4 +/* [RW 1] When '1'; requests will enter input buffers but wont get out + towards the glue */ +#define PXP2_REG_RQ_DISABLE_INPUTS 0x120330 +/* [RW 4] Determines alignment of write SRs when a request is split into + * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B + * aligned. 4 - 512B aligned. */ +#define PXP2_REG_RQ_DRAM_ALIGN 0x1205b0 +/* [RW 4] Determines alignment of read SRs when a request is split into + * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B + * aligned. 4 - 512B aligned. */ +#define PXP2_REG_RQ_DRAM_ALIGN_RD 0x12092c +/* [RW 1] when set the new alignment method (E2) will be applied; when reset + * the original alignment method (E1 E1H) will be applied */ +#define PXP2_REG_RQ_DRAM_ALIGN_SEL 0x120930 +/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will + be asserted */ +#define PXP2_REG_RQ_ELT_DISABLE 0x12066c +/* [RW 2] Endian mode for hc */ +#define PXP2_REG_RQ_HC_ENDIAN_M 0x1201a8 +/* [RW 1] when '0' ILT logic will work as in A0; otherwise B0; for back + compatibility needs; Note that different registers are used per mode */ +#define PXP2_REG_RQ_ILT_MODE 0x1205b4 +/* [WB 53] Onchip address table */ +#define PXP2_REG_RQ_ONCHIP_AT 0x122000 +/* [WB 53] Onchip address table - B0 */ +#define PXP2_REG_RQ_ONCHIP_AT_B0 0x128000 +/* [RW 13] Pending read limiter threshold; in Dwords */ +#define PXP2_REG_RQ_PDR_LIMIT 0x12033c +/* [RW 2] Endian mode for qm */ +#define PXP2_REG_RQ_QM_ENDIAN_M 0x120194 +#define PXP2_REG_RQ_QM_FIRST_ILT 0x120634 +#define PXP2_REG_RQ_QM_LAST_ILT 0x120638 +/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k; + -128k */ +#define PXP2_REG_RQ_QM_P_SIZE 0x120050 +/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */ +#define PXP2_REG_RQ_RBC_DONE 0x1201b0 +/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B; + 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */ +#define PXP2_REG_RQ_RD_MBS0 0x120160 +/* [RW 3] Max burst size filed for read requests port 1; 000 - 128B; + 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */ +#define PXP2_REG_RQ_RD_MBS1 0x120168 +/* [RW 2] Endian mode for src */ +#define PXP2_REG_RQ_SRC_ENDIAN_M 0x12019c +#define PXP2_REG_RQ_SRC_FIRST_ILT 0x12063c +#define PXP2_REG_RQ_SRC_LAST_ILT 0x120640 +/* [RW 3] page size in L2P table for SRC module; -4k; -8k; -16k; -32k; -64k; + -128k */ +#define PXP2_REG_RQ_SRC_P_SIZE 0x12006c +/* [RW 2] Endian mode for tm */ +#define PXP2_REG_RQ_TM_ENDIAN_M 0x120198 +#define PXP2_REG_RQ_TM_FIRST_ILT 0x120644 +#define PXP2_REG_RQ_TM_LAST_ILT 0x120648 +/* [RW 3] page size in L2P table for TM module; -4k; -8k; -16k; -32k; -64k; + -128k */ +#define PXP2_REG_RQ_TM_P_SIZE 0x120034 +/* [R 5] Number of entries in the ufifo; his fifo has l2p completions */ +#define PXP2_REG_RQ_UFIFO_NUM_OF_ENTRY 0x12080c +/* [RW 18] external first_mem_addr field in L2P table for USDM module port 0 */ +#define PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR 0x120094 +/* [R 8] Number of entries occupied by vq 0 in pswrq memory */ +#define PXP2_REG_RQ_VQ0_ENTRY_CNT 0x120810 +/* [R 8] Number of entries occupied by vq 10 in pswrq memory */ +#define PXP2_REG_RQ_VQ10_ENTRY_CNT 0x120818 +/* [R 8] Number of entries occupied by vq 11 in pswrq memory */ +#define PXP2_REG_RQ_VQ11_ENTRY_CNT 0x120820 +/* [R 8] Number of entries occupied by vq 12 in pswrq memory */ +#define PXP2_REG_RQ_VQ12_ENTRY_CNT 0x120828 +/* [R 8] Number of entries occupied by vq 13 in pswrq memory */ +#define PXP2_REG_RQ_VQ13_ENTRY_CNT 0x120830 +/* [R 8] Number of entries occupied by vq 14 in pswrq memory */ +#define PXP2_REG_RQ_VQ14_ENTRY_CNT 0x120838 +/* [R 8] Number of entries occupied by vq 15 in pswrq memory */ +#define PXP2_REG_RQ_VQ15_ENTRY_CNT 0x120840 +/* [R 8] Number of entries occupied by vq 16 in pswrq memory */ +#define PXP2_REG_RQ_VQ16_ENTRY_CNT 0x120848 +/* [R 8] Number of entries occupied by vq 17 in pswrq memory */ +#define PXP2_REG_RQ_VQ17_ENTRY_CNT 0x120850 +/* [R 8] Number of entries occupied by vq 18 in pswrq memory */ +#define PXP2_REG_RQ_VQ18_ENTRY_CNT 0x120858 +/* [R 8] Number of entries occupied by vq 19 in pswrq memory */ +#define PXP2_REG_RQ_VQ19_ENTRY_CNT 0x120860 +/* [R 8] Number of entries occupied by vq 1 in pswrq memory */ +#define PXP2_REG_RQ_VQ1_ENTRY_CNT 0x120868 +/* [R 8] Number of entries occupied by vq 20 in pswrq memory */ +#define PXP2_REG_RQ_VQ20_ENTRY_CNT 0x120870 +/* [R 8] Number of entries occupied by vq 21 in pswrq memory */ +#define PXP2_REG_RQ_VQ21_ENTRY_CNT 0x120878 +/* [R 8] Number of entries occupied by vq 22 in pswrq memory */ +#define PXP2_REG_RQ_VQ22_ENTRY_CNT 0x120880 +/* [R 8] Number of entries occupied by vq 23 in pswrq memory */ +#define PXP2_REG_RQ_VQ23_ENTRY_CNT 0x120888 +/* [R 8] Number of entries occupied by vq 24 in pswrq memory */ +#define PXP2_REG_RQ_VQ24_ENTRY_CNT 0x120890 +/* [R 8] Number of entries occupied by vq 25 in pswrq memory */ +#define PXP2_REG_RQ_VQ25_ENTRY_CNT 0x120898 +/* [R 8] Number of entries occupied by vq 26 in pswrq memory */ +#define PXP2_REG_RQ_VQ26_ENTRY_CNT 0x1208a0 +/* [R 8] Number of entries occupied by vq 27 in pswrq memory */ +#define PXP2_REG_RQ_VQ27_ENTRY_CNT 0x1208a8 +/* [R 8] Number of entries occupied by vq 28 in pswrq memory */ +#define PXP2_REG_RQ_VQ28_ENTRY_CNT 0x1208b0 +/* [R 8] Number of entries occupied by vq 29 in pswrq memory */ +#define PXP2_REG_RQ_VQ29_ENTRY_CNT 0x1208b8 +/* [R 8] Number of entries occupied by vq 2 in pswrq memory */ +#define PXP2_REG_RQ_VQ2_ENTRY_CNT 0x1208c0 +/* [R 8] Number of entries occupied by vq 30 in pswrq memory */ +#define PXP2_REG_RQ_VQ30_ENTRY_CNT 0x1208c8 +/* [R 8] Number of entries occupied by vq 31 in pswrq memory */ +#define PXP2_REG_RQ_VQ31_ENTRY_CNT 0x1208d0 +/* [R 8] Number of entries occupied by vq 3 in pswrq memory */ +#define PXP2_REG_RQ_VQ3_ENTRY_CNT 0x1208d8 +/* [R 8] Number of entries occupied by vq 4 in pswrq memory */ +#define PXP2_REG_RQ_VQ4_ENTRY_CNT 0x1208e0 +/* [R 8] Number of entries occupied by vq 5 in pswrq memory */ +#define PXP2_REG_RQ_VQ5_ENTRY_CNT 0x1208e8 +/* [R 8] Number of entries occupied by vq 6 in pswrq memory */ +#define PXP2_REG_RQ_VQ6_ENTRY_CNT 0x1208f0 +/* [R 8] Number of entries occupied by vq 7 in pswrq memory */ +#define PXP2_REG_RQ_VQ7_ENTRY_CNT 0x1208f8 +/* [R 8] Number of entries occupied by vq 8 in pswrq memory */ +#define PXP2_REG_RQ_VQ8_ENTRY_CNT 0x120900 +/* [R 8] Number of entries occupied by vq 9 in pswrq memory */ +#define PXP2_REG_RQ_VQ9_ENTRY_CNT 0x120908 +/* [RW 3] Max burst size filed for write requests port 0; 000 - 128B; + 001:256B; 010: 512B; */ +#define PXP2_REG_RQ_WR_MBS0 0x12015c +/* [RW 3] Max burst size filed for write requests port 1; 000 - 128B; + 001:256B; 010: 512B; */ +#define PXP2_REG_RQ_WR_MBS1 0x120164 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_CDU_MPS 0x1205f0 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_CSDM_MPS 0x1205d0 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_DBG_MPS 0x1205e8 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_DMAE_MPS 0x1205ec +/* [RW 10] if Number of entries in dmae fifo will be higher than this + threshold then has_payload indication will be asserted; the default value + should be equal to > write MBS size! */ +#define PXP2_REG_WR_DMAE_TH 0x120368 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_HC_MPS 0x1205c8 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_QM_MPS 0x1205dc +/* [RW 1] 0 - working in A0 mode; - working in B0 mode */ +#define PXP2_REG_WR_REV_MODE 0x120670 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_SRC_MPS 0x1205e4 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_TM_MPS 0x1205e0 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_TSDM_MPS 0x1205d4 +/* [RW 10] if Number of entries in usdmdp fifo will be higher than this + threshold then has_payload indication will be asserted; the default value + should be equal to > write MBS size! */ +#define PXP2_REG_WR_USDMDP_TH 0x120348 +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_USDM_MPS 0x1205cc +/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the + buffer reaches this number has_payload will be asserted */ +#define PXP2_REG_WR_XSDM_MPS 0x1205d8 +/* [R 1] debug only: Indication if PSWHST arbiter is idle */ +#define PXP_REG_HST_ARB_IS_IDLE 0x103004 +/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means + this client is waiting for the arbiter. */ +#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008 +/* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue + block. Should be used for close the gates. */ +#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4 +/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit + should update according to 'hst_discard_doorbells' register when the state + machine is idle */ +#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0 +/* [RW 1] When 1; new internal writes arriving to the block are discarded. + Should be used for close the gates. */ +#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8 +/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1' + means this PSWHST is discarding inputs from this client. Each bit should + update according to 'hst_discard_internal_writes' register when the state + machine is idle. */ +#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c +/* [WB 160] Used for initialization of the inbound interrupts memory */ +#define PXP_REG_HST_INBOUND_INT 0x103800 +/* [RW 7] Indirect access to the permission table. The fields are : {Valid; + * VFID[5:0]} + */ +#define PXP_REG_HST_ZONE_PERMISSION_TABLE 0x103400 +/* [RW 32] Interrupt mask register #0 read/write */ +#define PXP_REG_PXP_INT_MASK_0 0x103074 +#define PXP_REG_PXP_INT_MASK_1 0x103084 +/* [R 32] Interrupt register #0 read */ +#define PXP_REG_PXP_INT_STS_0 0x103068 +#define PXP_REG_PXP_INT_STS_1 0x103078 +/* [RC 32] Interrupt register #0 read clear */ +#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c +#define PXP_REG_PXP_INT_STS_CLR_1 0x10307c +/* [RW 27] Parity mask register #0 read/write */ +#define PXP_REG_PXP_PRTY_MASK 0x103094 +/* [R 26] Parity register #0 read */ +#define PXP_REG_PXP_PRTY_STS 0x103088 +/* [RC 27] Parity register #0 read clear */ +#define PXP_REG_PXP_PRTY_STS_CLR 0x10308c +/* [RW 4] The activity counter initial increment value sent in the load + request */ +#define QM_REG_ACTCTRINITVAL_0 0x168040 +#define QM_REG_ACTCTRINITVAL_1 0x168044 +#define QM_REG_ACTCTRINITVAL_2 0x168048 +#define QM_REG_ACTCTRINITVAL_3 0x16804c +/* [RW 32] The base logical address (in bytes) of each physical queue. The + index I represents the physical queue number. The 12 lsbs are ignore and + considered zero so practically there are only 20 bits in this register; + queues 63-0 */ +#define QM_REG_BASEADDR 0x168900 +/* [RW 32] The base logical address (in bytes) of each physical queue. The + index I represents the physical queue number. The 12 lsbs are ignore and + considered zero so practically there are only 20 bits in this register; + queues 127-64 */ +#define QM_REG_BASEADDR_EXT_A 0x16e100 +/* [RW 16] The byte credit cost for each task. This value is for both ports */ +#define QM_REG_BYTECRDCOST 0x168234 +/* [RW 16] The initial byte credit value for both ports. */ +#define QM_REG_BYTECRDINITVAL 0x168238 +/* [RW 32] A bit per physical queue. If the bit is cleared then the physical + queue uses port 0 else it uses port 1; queues 31-0 */ +#define QM_REG_BYTECRDPORT_LSB 0x168228 +/* [RW 32] A bit per physical queue. If the bit is cleared then the physical + queue uses port 0 else it uses port 1; queues 95-64 */ +#define QM_REG_BYTECRDPORT_LSB_EXT_A 0x16e520 +/* [RW 32] A bit per physical queue. If the bit is cleared then the physical + queue uses port 0 else it uses port 1; queues 63-32 */ +#define QM_REG_BYTECRDPORT_MSB 0x168224 +/* [RW 32] A bit per physical queue. If the bit is cleared then the physical + queue uses port 0 else it uses port 1; queues 127-96 */ +#define QM_REG_BYTECRDPORT_MSB_EXT_A 0x16e51c +/* [RW 16] The byte credit value that if above the QM is considered almost + full */ +#define QM_REG_BYTECREDITAFULLTHR 0x168094 +/* [RW 4] The initial credit for interface */ +#define QM_REG_CMINITCRD_0 0x1680cc +#define QM_REG_BYTECRDCMDQ_0 0x16e6e8 +#define QM_REG_CMINITCRD_1 0x1680d0 +#define QM_REG_CMINITCRD_2 0x1680d4 +#define QM_REG_CMINITCRD_3 0x1680d8 +#define QM_REG_CMINITCRD_4 0x1680dc +#define QM_REG_CMINITCRD_5 0x1680e0 +#define QM_REG_CMINITCRD_6 0x1680e4 +#define QM_REG_CMINITCRD_7 0x1680e8 +/* [RW 8] A mask bit per CM interface. If this bit is 0 then this interface + is masked */ +#define QM_REG_CMINTEN 0x1680ec +/* [RW 12] A bit vector which indicates which one of the queues are tied to + interface 0 */ +#define QM_REG_CMINTVOQMASK_0 0x1681f4 +#define QM_REG_CMINTVOQMASK_1 0x1681f8 +#define QM_REG_CMINTVOQMASK_2 0x1681fc +#define QM_REG_CMINTVOQMASK_3 0x168200 +#define QM_REG_CMINTVOQMASK_4 0x168204 +#define QM_REG_CMINTVOQMASK_5 0x168208 +#define QM_REG_CMINTVOQMASK_6 0x16820c +#define QM_REG_CMINTVOQMASK_7 0x168210 +/* [RW 20] The number of connections divided by 16 which dictates the size + of each queue which belongs to even function number. */ +#define QM_REG_CONNNUM_0 0x168020 +/* [R 6] Keep the fill level of the fifo from write client 4 */ +#define QM_REG_CQM_WRC_FIFOLVL 0x168018 +/* [RW 8] The context regions sent in the CFC load request */ +#define QM_REG_CTXREG_0 0x168030 +#define QM_REG_CTXREG_1 0x168034 +#define QM_REG_CTXREG_2 0x168038 +#define QM_REG_CTXREG_3 0x16803c +/* [RW 12] The VOQ mask used to select the VOQs which needs to be full for + bypass enable */ +#define QM_REG_ENBYPVOQMASK 0x16823c +/* [RW 32] A bit mask per each physical queue. If a bit is set then the + physical queue uses the byte credit; queues 31-0 */ +#define QM_REG_ENBYTECRD_LSB 0x168220 +/* [RW 32] A bit mask per each physical queue. If a bit is set then the + physical queue uses the byte credit; queues 95-64 */ +#define QM_REG_ENBYTECRD_LSB_EXT_A 0x16e518 +/* [RW 32] A bit mask per each physical queue. If a bit is set then the + physical queue uses the byte credit; queues 63-32 */ +#define QM_REG_ENBYTECRD_MSB 0x16821c +/* [RW 32] A bit mask per each physical queue. If a bit is set then the + physical queue uses the byte credit; queues 127-96 */ +#define QM_REG_ENBYTECRD_MSB_EXT_A 0x16e514 +/* [RW 4] If cleared then the secondary interface will not be served by the + RR arbiter */ +#define QM_REG_ENSEC 0x1680f0 +/* [RW 32] NA */ +#define QM_REG_FUNCNUMSEL_LSB 0x168230 +/* [RW 32] NA */ +#define QM_REG_FUNCNUMSEL_MSB 0x16822c +/* [RW 32] A mask register to mask the Almost empty signals which will not + be use for the almost empty indication to the HW block; queues 31:0 */ +#define QM_REG_HWAEMPTYMASK_LSB 0x168218 +/* [RW 32] A mask register to mask the Almost empty signals which will not + be use for the almost empty indication to the HW block; queues 95-64 */ +#define QM_REG_HWAEMPTYMASK_LSB_EXT_A 0x16e510 +/* [RW 32] A mask register to mask the Almost empty signals which will not + be use for the almost empty indication to the HW block; queues 63:32 */ +#define QM_REG_HWAEMPTYMASK_MSB 0x168214 +/* [RW 32] A mask register to mask the Almost empty signals which will not + be use for the almost empty indication to the HW block; queues 127-96 */ +#define QM_REG_HWAEMPTYMASK_MSB_EXT_A 0x16e50c +/* [RW 4] The number of outstanding request to CFC */ +#define QM_REG_OUTLDREQ 0x168804 +/* [RC 1] A flag to indicate that overflow error occurred in one of the + queues. */ +#define QM_REG_OVFERROR 0x16805c +/* [RC 7] the Q where the overflow occurs */ +#define QM_REG_OVFQNUM 0x168058 +/* [R 16] Pause state for physical queues 15-0 */ +#define QM_REG_PAUSESTATE0 0x168410 +/* [R 16] Pause state for physical queues 31-16 */ +#define QM_REG_PAUSESTATE1 0x168414 +/* [R 16] Pause state for physical queues 47-32 */ +#define QM_REG_PAUSESTATE2 0x16e684 +/* [R 16] Pause state for physical queues 63-48 */ +#define QM_REG_PAUSESTATE3 0x16e688 +/* [R 16] Pause state for physical queues 79-64 */ +#define QM_REG_PAUSESTATE4 0x16e68c +/* [R 16] Pause state for physical queues 95-80 */ +#define QM_REG_PAUSESTATE5 0x16e690 +/* [R 16] Pause state for physical queues 111-96 */ +#define QM_REG_PAUSESTATE6 0x16e694 +/* [R 16] Pause state for physical queues 127-112 */ +#define QM_REG_PAUSESTATE7 0x16e698 +/* [RW 2] The PCI attributes field used in the PCI request. */ +#define QM_REG_PCIREQAT 0x168054 +#define QM_REG_PF_EN 0x16e70c +/* [R 24] The number of tasks stored in the QM for the PF. only even + * functions are valid in E2 (odd I registers will be hard wired to 0) */ +#define QM_REG_PF_USG_CNT_0 0x16e040 +/* [R 16] NOT USED */ +#define QM_REG_PORT0BYTECRD 0x168300 +/* [R 16] The byte credit of port 1 */ +#define QM_REG_PORT1BYTECRD 0x168304 +/* [RW 3] pci function number of queues 15-0 */ +#define QM_REG_PQ2PCIFUNC_0 0x16e6bc +#define QM_REG_PQ2PCIFUNC_1 0x16e6c0 +#define QM_REG_PQ2PCIFUNC_2 0x16e6c4 +#define QM_REG_PQ2PCIFUNC_3 0x16e6c8 +#define QM_REG_PQ2PCIFUNC_4 0x16e6cc +#define QM_REG_PQ2PCIFUNC_5 0x16e6d0 +#define QM_REG_PQ2PCIFUNC_6 0x16e6d4 +#define QM_REG_PQ2PCIFUNC_7 0x16e6d8 +/* [WB 54] Pointer Table Memory for queues 63-0; The mapping is as follow: + ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read + bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */ +#define QM_REG_PTRTBL 0x168a00 +/* [WB 54] Pointer Table Memory for queues 127-64; The mapping is as follow: + ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read + bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */ +#define QM_REG_PTRTBL_EXT_A 0x16e200 +/* [RW 2] Interrupt mask register #0 read/write */ +#define QM_REG_QM_INT_MASK 0x168444 +/* [R 2] Interrupt register #0 read */ +#define QM_REG_QM_INT_STS 0x168438 +/* [RW 12] Parity mask register #0 read/write */ +#define QM_REG_QM_PRTY_MASK 0x168454 +/* [R 12] Parity register #0 read */ +#define QM_REG_QM_PRTY_STS 0x168448 +/* [RC 12] Parity register #0 read clear */ +#define QM_REG_QM_PRTY_STS_CLR 0x16844c +/* [R 32] Current queues in pipeline: Queues from 32 to 63 */ +#define QM_REG_QSTATUS_HIGH 0x16802c +/* [R 32] Current queues in pipeline: Queues from 96 to 127 */ +#define QM_REG_QSTATUS_HIGH_EXT_A 0x16e408 +/* [R 32] Current queues in pipeline: Queues from 0 to 31 */ +#define QM_REG_QSTATUS_LOW 0x168028 +/* [R 32] Current queues in pipeline: Queues from 64 to 95 */ +#define QM_REG_QSTATUS_LOW_EXT_A 0x16e404 +/* [R 24] The number of tasks queued for each queue; queues 63-0 */ +#define QM_REG_QTASKCTR_0 0x168308 +/* [R 24] The number of tasks queued for each queue; queues 127-64 */ +#define QM_REG_QTASKCTR_EXT_A_0 0x16e584 +/* [RW 4] Queue tied to VOQ */ +#define QM_REG_QVOQIDX_0 0x1680f4 +#define QM_REG_QVOQIDX_10 0x16811c +#define QM_REG_QVOQIDX_100 0x16e49c +#define QM_REG_QVOQIDX_101 0x16e4a0 +#define QM_REG_QVOQIDX_102 0x16e4a4 +#define QM_REG_QVOQIDX_103 0x16e4a8 +#define QM_REG_QVOQIDX_104 0x16e4ac +#define QM_REG_QVOQIDX_105 0x16e4b0 +#define QM_REG_QVOQIDX_106 0x16e4b4 +#define QM_REG_QVOQIDX_107 0x16e4b8 +#define QM_REG_QVOQIDX_108 0x16e4bc +#define QM_REG_QVOQIDX_109 0x16e4c0 +#define QM_REG_QVOQIDX_11 0x168120 +#define QM_REG_QVOQIDX_110 0x16e4c4 +#define QM_REG_QVOQIDX_111 0x16e4c8 +#define QM_REG_QVOQIDX_112 0x16e4cc +#define QM_REG_QVOQIDX_113 0x16e4d0 +#define QM_REG_QVOQIDX_114 0x16e4d4 +#define QM_REG_QVOQIDX_115 0x16e4d8 +#define QM_REG_QVOQIDX_116 0x16e4dc +#define QM_REG_QVOQIDX_117 0x16e4e0 +#define QM_REG_QVOQIDX_118 0x16e4e4 +#define QM_REG_QVOQIDX_119 0x16e4e8 +#define QM_REG_QVOQIDX_12 0x168124 +#define QM_REG_QVOQIDX_120 0x16e4ec +#define QM_REG_QVOQIDX_121 0x16e4f0 +#define QM_REG_QVOQIDX_122 0x16e4f4 +#define QM_REG_QVOQIDX_123 0x16e4f8 +#define QM_REG_QVOQIDX_124 0x16e4fc +#define QM_REG_QVOQIDX_125 0x16e500 +#define QM_REG_QVOQIDX_126 0x16e504 +#define QM_REG_QVOQIDX_127 0x16e508 +#define QM_REG_QVOQIDX_13 0x168128 +#define QM_REG_QVOQIDX_14 0x16812c +#define QM_REG_QVOQIDX_15 0x168130 +#define QM_REG_QVOQIDX_16 0x168134 +#define QM_REG_QVOQIDX_17 0x168138 +#define QM_REG_QVOQIDX_21 0x168148 +#define QM_REG_QVOQIDX_22 0x16814c +#define QM_REG_QVOQIDX_23 0x168150 +#define QM_REG_QVOQIDX_24 0x168154 +#define QM_REG_QVOQIDX_25 0x168158 +#define QM_REG_QVOQIDX_26 0x16815c +#define QM_REG_QVOQIDX_27 0x168160 +#define QM_REG_QVOQIDX_28 0x168164 +#define QM_REG_QVOQIDX_29 0x168168 +#define QM_REG_QVOQIDX_30 0x16816c +#define QM_REG_QVOQIDX_31 0x168170 +#define QM_REG_QVOQIDX_32 0x168174 +#define QM_REG_QVOQIDX_33 0x168178 +#define QM_REG_QVOQIDX_34 0x16817c +#define QM_REG_QVOQIDX_35 0x168180 +#define QM_REG_QVOQIDX_36 0x168184 +#define QM_REG_QVOQIDX_37 0x168188 +#define QM_REG_QVOQIDX_38 0x16818c +#define QM_REG_QVOQIDX_39 0x168190 +#define QM_REG_QVOQIDX_40 0x168194 +#define QM_REG_QVOQIDX_41 0x168198 +#define QM_REG_QVOQIDX_42 0x16819c +#define QM_REG_QVOQIDX_43 0x1681a0 +#define QM_REG_QVOQIDX_44 0x1681a4 +#define QM_REG_QVOQIDX_45 0x1681a8 +#define QM_REG_QVOQIDX_46 0x1681ac +#define QM_REG_QVOQIDX_47 0x1681b0 +#define QM_REG_QVOQIDX_48 0x1681b4 +#define QM_REG_QVOQIDX_49 0x1681b8 +#define QM_REG_QVOQIDX_5 0x168108 +#define QM_REG_QVOQIDX_50 0x1681bc +#define QM_REG_QVOQIDX_51 0x1681c0 +#define QM_REG_QVOQIDX_52 0x1681c4 +#define QM_REG_QVOQIDX_53 0x1681c8 +#define QM_REG_QVOQIDX_54 0x1681cc +#define QM_REG_QVOQIDX_55 0x1681d0 +#define QM_REG_QVOQIDX_56 0x1681d4 +#define QM_REG_QVOQIDX_57 0x1681d8 +#define QM_REG_QVOQIDX_58 0x1681dc +#define QM_REG_QVOQIDX_59 0x1681e0 +#define QM_REG_QVOQIDX_6 0x16810c +#define QM_REG_QVOQIDX_60 0x1681e4 +#define QM_REG_QVOQIDX_61 0x1681e8 +#define QM_REG_QVOQIDX_62 0x1681ec +#define QM_REG_QVOQIDX_63 0x1681f0 +#define QM_REG_QVOQIDX_64 0x16e40c +#define QM_REG_QVOQIDX_65 0x16e410 +#define QM_REG_QVOQIDX_69 0x16e420 +#define QM_REG_QVOQIDX_7 0x168110 +#define QM_REG_QVOQIDX_70 0x16e424 +#define QM_REG_QVOQIDX_71 0x16e428 +#define QM_REG_QVOQIDX_72 0x16e42c +#define QM_REG_QVOQIDX_73 0x16e430 +#define QM_REG_QVOQIDX_74 0x16e434 +#define QM_REG_QVOQIDX_75 0x16e438 +#define QM_REG_QVOQIDX_76 0x16e43c +#define QM_REG_QVOQIDX_77 0x16e440 +#define QM_REG_QVOQIDX_78 0x16e444 +#define QM_REG_QVOQIDX_79 0x16e448 +#define QM_REG_QVOQIDX_8 0x168114 +#define QM_REG_QVOQIDX_80 0x16e44c +#define QM_REG_QVOQIDX_81 0x16e450 +#define QM_REG_QVOQIDX_85 0x16e460 +#define QM_REG_QVOQIDX_86 0x16e464 +#define QM_REG_QVOQIDX_87 0x16e468 +#define QM_REG_QVOQIDX_88 0x16e46c +#define QM_REG_QVOQIDX_89 0x16e470 +#define QM_REG_QVOQIDX_9 0x168118 +#define QM_REG_QVOQIDX_90 0x16e474 +#define QM_REG_QVOQIDX_91 0x16e478 +#define QM_REG_QVOQIDX_92 0x16e47c +#define QM_REG_QVOQIDX_93 0x16e480 +#define QM_REG_QVOQIDX_94 0x16e484 +#define QM_REG_QVOQIDX_95 0x16e488 +#define QM_REG_QVOQIDX_96 0x16e48c +#define QM_REG_QVOQIDX_97 0x16e490 +#define QM_REG_QVOQIDX_98 0x16e494 +#define QM_REG_QVOQIDX_99 0x16e498 +/* [RW 1] Initialization bit command */ +#define QM_REG_SOFT_RESET 0x168428 +/* [RW 8] The credit cost per every task in the QM. A value per each VOQ */ +#define QM_REG_TASKCRDCOST_0 0x16809c +#define QM_REG_TASKCRDCOST_1 0x1680a0 +#define QM_REG_TASKCRDCOST_2 0x1680a4 +#define QM_REG_TASKCRDCOST_4 0x1680ac +#define QM_REG_TASKCRDCOST_5 0x1680b0 +/* [R 6] Keep the fill level of the fifo from write client 3 */ +#define QM_REG_TQM_WRC_FIFOLVL 0x168010 +/* [R 6] Keep the fill level of the fifo from write client 2 */ +#define QM_REG_UQM_WRC_FIFOLVL 0x168008 +/* [RC 32] Credit update error register */ +#define QM_REG_VOQCRDERRREG 0x168408 +/* [R 16] The credit value for each VOQ */ +#define QM_REG_VOQCREDIT_0 0x1682d0 +#define QM_REG_VOQCREDIT_1 0x1682d4 +#define QM_REG_VOQCREDIT_4 0x1682e0 +/* [RW 16] The credit value that if above the QM is considered almost full */ +#define QM_REG_VOQCREDITAFULLTHR 0x168090 +/* [RW 16] The init and maximum credit for each VoQ */ +#define QM_REG_VOQINITCREDIT_0 0x168060 +#define QM_REG_VOQINITCREDIT_1 0x168064 +#define QM_REG_VOQINITCREDIT_2 0x168068 +#define QM_REG_VOQINITCREDIT_4 0x168070 +#define QM_REG_VOQINITCREDIT_5 0x168074 +/* [RW 1] The port of which VOQ belongs */ +#define QM_REG_VOQPORT_0 0x1682a0 +#define QM_REG_VOQPORT_1 0x1682a4 +#define QM_REG_VOQPORT_2 0x1682a8 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_0_LSB 0x168240 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_0_LSB_EXT_A 0x16e524 +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_0_MSB 0x168244 +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_0_MSB_EXT_A 0x16e528 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_10_LSB 0x168290 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_10_LSB_EXT_A 0x16e574 +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_10_MSB 0x168294 +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_10_MSB_EXT_A 0x16e578 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_11_LSB 0x168298 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_11_LSB_EXT_A 0x16e57c +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_11_MSB 0x16829c +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_11_MSB_EXT_A 0x16e580 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_1_LSB 0x168248 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_1_LSB_EXT_A 0x16e52c +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_1_MSB 0x16824c +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_1_MSB_EXT_A 0x16e530 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_2_LSB 0x168250 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_2_LSB_EXT_A 0x16e534 +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_2_MSB 0x168254 +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_2_MSB_EXT_A 0x16e538 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_3_LSB 0x168258 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_3_LSB_EXT_A 0x16e53c +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_3_MSB_EXT_A 0x16e540 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_4_LSB 0x168260 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_4_LSB_EXT_A 0x16e544 +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_4_MSB 0x168264 +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_4_MSB_EXT_A 0x16e548 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_5_LSB 0x168268 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_5_LSB_EXT_A 0x16e54c +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_5_MSB 0x16826c +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_5_MSB_EXT_A 0x16e550 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_6_LSB 0x168270 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_6_LSB_EXT_A 0x16e554 +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_6_MSB 0x168274 +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_6_MSB_EXT_A 0x16e558 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_7_LSB 0x168278 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_7_LSB_EXT_A 0x16e55c +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_7_MSB 0x16827c +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_7_MSB_EXT_A 0x16e560 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_8_LSB 0x168280 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_8_LSB_EXT_A 0x16e564 +/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */ +#define QM_REG_VOQQMASK_8_MSB 0x168284 +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_8_MSB_EXT_A 0x16e568 +/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */ +#define QM_REG_VOQQMASK_9_LSB 0x168288 +/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */ +#define QM_REG_VOQQMASK_9_LSB_EXT_A 0x16e56c +/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */ +#define QM_REG_VOQQMASK_9_MSB_EXT_A 0x16e570 +/* [RW 32] Wrr weights */ +#define QM_REG_WRRWEIGHTS_0 0x16880c +#define QM_REG_WRRWEIGHTS_1 0x168810 +#define QM_REG_WRRWEIGHTS_10 0x168814 +#define QM_REG_WRRWEIGHTS_11 0x168818 +#define QM_REG_WRRWEIGHTS_12 0x16881c +#define QM_REG_WRRWEIGHTS_13 0x168820 +#define QM_REG_WRRWEIGHTS_14 0x168824 +#define QM_REG_WRRWEIGHTS_15 0x168828 +#define QM_REG_WRRWEIGHTS_16 0x16e000 +#define QM_REG_WRRWEIGHTS_17 0x16e004 +#define QM_REG_WRRWEIGHTS_18 0x16e008 +#define QM_REG_WRRWEIGHTS_19 0x16e00c +#define QM_REG_WRRWEIGHTS_2 0x16882c +#define QM_REG_WRRWEIGHTS_20 0x16e010 +#define QM_REG_WRRWEIGHTS_21 0x16e014 +#define QM_REG_WRRWEIGHTS_22 0x16e018 +#define QM_REG_WRRWEIGHTS_23 0x16e01c +#define QM_REG_WRRWEIGHTS_24 0x16e020 +#define QM_REG_WRRWEIGHTS_25 0x16e024 +#define QM_REG_WRRWEIGHTS_26 0x16e028 +#define QM_REG_WRRWEIGHTS_27 0x16e02c +#define QM_REG_WRRWEIGHTS_28 0x16e030 +#define QM_REG_WRRWEIGHTS_29 0x16e034 +#define QM_REG_WRRWEIGHTS_3 0x168830 +#define QM_REG_WRRWEIGHTS_30 0x16e038 +#define QM_REG_WRRWEIGHTS_31 0x16e03c +#define QM_REG_WRRWEIGHTS_4 0x168834 +#define QM_REG_WRRWEIGHTS_5 0x168838 +#define QM_REG_WRRWEIGHTS_6 0x16883c +#define QM_REG_WRRWEIGHTS_7 0x168840 +#define QM_REG_WRRWEIGHTS_8 0x168844 +#define QM_REG_WRRWEIGHTS_9 0x168848 +/* [R 6] Keep the fill level of the fifo from write client 1 */ +#define QM_REG_XQM_WRC_FIFOLVL 0x168000 +/* [W 1] reset to parity interrupt */ +#define SEM_FAST_REG_PARITY_RST 0x18840 +#define SRC_REG_COUNTFREE0 0x40500 +/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two + ports. If set the searcher support 8 functions. */ +#define SRC_REG_E1HMF_ENABLE 0x404cc +#define SRC_REG_FIRSTFREE0 0x40510 +#define SRC_REG_KEYRSS0_0 0x40408 +#define SRC_REG_KEYRSS0_7 0x40424 +#define SRC_REG_KEYRSS1_9 0x40454 +#define SRC_REG_KEYSEARCH_0 0x40458 +#define SRC_REG_KEYSEARCH_1 0x4045c +#define SRC_REG_KEYSEARCH_2 0x40460 +#define SRC_REG_KEYSEARCH_3 0x40464 +#define SRC_REG_KEYSEARCH_4 0x40468 +#define SRC_REG_KEYSEARCH_5 0x4046c +#define SRC_REG_KEYSEARCH_6 0x40470 +#define SRC_REG_KEYSEARCH_7 0x40474 +#define SRC_REG_KEYSEARCH_8 0x40478 +#define SRC_REG_KEYSEARCH_9 0x4047c +#define SRC_REG_LASTFREE0 0x40530 +#define SRC_REG_NUMBER_HASH_BITS0 0x40400 +/* [RW 1] Reset internal state machines. */ +#define SRC_REG_SOFT_RST 0x4049c +/* [R 3] Interrupt register #0 read */ +#define SRC_REG_SRC_INT_STS 0x404ac +/* [RW 3] Parity mask register #0 read/write */ +#define SRC_REG_SRC_PRTY_MASK 0x404c8 +/* [R 3] Parity register #0 read */ +#define SRC_REG_SRC_PRTY_STS 0x404bc +/* [RC 3] Parity register #0 read clear */ +#define SRC_REG_SRC_PRTY_STS_CLR 0x404c0 +/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */ +#define TCM_REG_CAM_OCCUP 0x5017c +/* [RW 1] CDU AG read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define TCM_REG_CDU_AG_RD_IFEN 0x50034 +/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input + are disregarded; all other signals are treated as usual; if 1 - normal + activity. */ +#define TCM_REG_CDU_AG_WR_IFEN 0x50030 +/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define TCM_REG_CDU_SM_RD_IFEN 0x5003c +/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid + input is disregarded; all other signals are treated as usual; if 1 - + normal activity. */ +#define TCM_REG_CDU_SM_WR_IFEN 0x50038 +/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 1 at start-up. */ +#define TCM_REG_CFC_INIT_CRD 0x50204 +/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_CP_WEIGHT 0x500c0 +/* [RW 1] Input csem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define TCM_REG_CSEM_IFEN 0x5002c +/* [RC 1] Message length mismatch (relative to last indication) at the In#9 + interface. */ +#define TCM_REG_CSEM_LENGTH_MIS 0x50174 +/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_CSEM_WEIGHT 0x500bc +/* [RW 8] The Event ID in case of ErrorFlg is set in the input message. */ +#define TCM_REG_ERR_EVNT_ID 0x500a0 +/* [RW 28] The CM erroneous header for QM and Timers formatting. */ +#define TCM_REG_ERR_TCM_HDR 0x5009c +/* [RW 8] The Event ID for Timers expiration. */ +#define TCM_REG_EXPR_EVNT_ID 0x500a4 +/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define TCM_REG_FIC0_INIT_CRD 0x5020c +/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define TCM_REG_FIC1_INIT_CRD 0x50210 +/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1 + - strict priority defined by ~tcm_registers_gr_ag_pr.gr_ag_pr; + ~tcm_registers_gr_ld0_pr.gr_ld0_pr and + ~tcm_registers_gr_ld1_pr.gr_ld1_pr. */ +#define TCM_REG_GR_ARB_TYPE 0x50114 +/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed that the Store channel is the + compliment of the other 3 groups. */ +#define TCM_REG_GR_LD0_PR 0x5011c +/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed that the Store channel is the + compliment of the other 3 groups. */ +#define TCM_REG_GR_LD1_PR 0x50120 +/* [RW 4] The number of double REG-pairs; loaded from the STORM context and + sent to STORM; for a specific connection type. The double REG-pairs are + used to align to STORM context row size of 128 bits. The offset of these + data in the STORM context is always 0. Index _i stands for the connection + type (one of 16). */ +#define TCM_REG_N_SM_CTX_LD_0 0x50050 +#define TCM_REG_N_SM_CTX_LD_1 0x50054 +#define TCM_REG_N_SM_CTX_LD_2 0x50058 +#define TCM_REG_N_SM_CTX_LD_3 0x5005c +#define TCM_REG_N_SM_CTX_LD_4 0x50060 +#define TCM_REG_N_SM_CTX_LD_5 0x50064 +/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_PBF_IFEN 0x50024 +/* [RC 1] Message length mismatch (relative to last indication) at the In#7 + interface. */ +#define TCM_REG_PBF_LENGTH_MIS 0x5016c +/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_PBF_WEIGHT 0x500b4 +#define TCM_REG_PHYS_QNUM0_0 0x500e0 +#define TCM_REG_PHYS_QNUM0_1 0x500e4 +#define TCM_REG_PHYS_QNUM1_0 0x500e8 +#define TCM_REG_PHYS_QNUM1_1 0x500ec +#define TCM_REG_PHYS_QNUM2_0 0x500f0 +#define TCM_REG_PHYS_QNUM2_1 0x500f4 +#define TCM_REG_PHYS_QNUM3_0 0x500f8 +#define TCM_REG_PHYS_QNUM3_1 0x500fc +/* [RW 1] Input prs Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_PRS_IFEN 0x50020 +/* [RC 1] Message length mismatch (relative to last indication) at the In#6 + interface. */ +#define TCM_REG_PRS_LENGTH_MIS 0x50168 +/* [RW 3] The weight of the input prs in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_PRS_WEIGHT 0x500b0 +/* [RW 8] The Event ID for Timers formatting in case of stop done. */ +#define TCM_REG_STOP_EVNT_ID 0x500a8 +/* [RC 1] Message length mismatch (relative to last indication) at the STORM + interface. */ +#define TCM_REG_STORM_LENGTH_MIS 0x50160 +/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define TCM_REG_STORM_TCM_IFEN 0x50010 +/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_STORM_WEIGHT 0x500ac +/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_TCM_CFC_IFEN 0x50040 +/* [RW 11] Interrupt mask register #0 read/write */ +#define TCM_REG_TCM_INT_MASK 0x501dc +/* [R 11] Interrupt register #0 read */ +#define TCM_REG_TCM_INT_STS 0x501d0 +/* [RW 27] Parity mask register #0 read/write */ +#define TCM_REG_TCM_PRTY_MASK 0x501ec +/* [R 27] Parity register #0 read */ +#define TCM_REG_TCM_PRTY_STS 0x501e0 +/* [RC 27] Parity register #0 read clear */ +#define TCM_REG_TCM_PRTY_STS_CLR 0x501e4 +/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS + REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). + Is used to determine the number of the AG context REG-pairs written back; + when the input message Reg1WbFlg isn't set. */ +#define TCM_REG_TCM_REG0_SZ 0x500d8 +/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_TCM_STORM0_IFEN 0x50004 +/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_TCM_STORM1_IFEN 0x50008 +/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_TCM_TQM_IFEN 0x5000c +/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */ +#define TCM_REG_TCM_TQM_USE_Q 0x500d4 +/* [RW 28] The CM header for Timers expiration command. */ +#define TCM_REG_TM_TCM_HDR 0x50098 +/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define TCM_REG_TM_TCM_IFEN 0x5001c +/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_TM_WEIGHT 0x500d0 +/* [RW 6] QM output initial credit. Max credit available - 32.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 32 at start-up. */ +#define TCM_REG_TQM_INIT_CRD 0x5021c +/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_TQM_P_WEIGHT 0x500c8 +/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_TQM_S_WEIGHT 0x500cc +/* [RW 28] The CM header value for QM request (primary). */ +#define TCM_REG_TQM_TCM_HDR_P 0x50090 +/* [RW 28] The CM header value for QM request (secondary). */ +#define TCM_REG_TQM_TCM_HDR_S 0x50094 +/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_TQM_TCM_IFEN 0x50014 +/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define TCM_REG_TSDM_IFEN 0x50018 +/* [RC 1] Message length mismatch (relative to last indication) at the SDM + interface. */ +#define TCM_REG_TSDM_LENGTH_MIS 0x50164 +/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_TSDM_WEIGHT 0x500c4 +/* [RW 1] Input usem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define TCM_REG_USEM_IFEN 0x50028 +/* [RC 1] Message length mismatch (relative to last indication) at the In#8 + interface. */ +#define TCM_REG_USEM_LENGTH_MIS 0x50170 +/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define TCM_REG_USEM_WEIGHT 0x500b8 +/* [RW 21] Indirect access to the descriptor table of the XX protection + mechanism. The fields are: [5:0] - length of the message; 15:6] - message + pointer; 20:16] - next pointer. */ +#define TCM_REG_XX_DESCR_TABLE 0x50280 +#define TCM_REG_XX_DESCR_TABLE_SIZE 29 +/* [R 6] Use to read the value of XX protection Free counter. */ +#define TCM_REG_XX_FREE 0x50178 +/* [RW 6] Initial value for the credit counter; responsible for fulfilling + of the Input Stage XX protection buffer by the XX protection pending + messages. Max credit available - 127.Write writes the initial credit + value; read returns the current value of the credit counter. Must be + initialized to 19 at start-up. */ +#define TCM_REG_XX_INIT_CRD 0x50220 +/* [RW 6] Maximum link list size (messages locked) per connection in the XX + protection. */ +#define TCM_REG_XX_MAX_LL_SZ 0x50044 +/* [RW 6] The maximum number of pending messages; which may be stored in XX + protection. ~tcm_registers_xx_free.xx_free is read on read. */ +#define TCM_REG_XX_MSG_NUM 0x50224 +/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */ +#define TCM_REG_XX_OVFL_EVNT_ID 0x50048 +/* [RW 16] Indirect access to the XX table of the XX protection mechanism. + The fields are:[4:0] - tail pointer; [10:5] - Link List size; 15:11] - + header pointer. */ +#define TCM_REG_XX_TABLE 0x50240 +/* [RW 4] Load value for cfc ac credit cnt. */ +#define TM_REG_CFC_AC_CRDCNT_VAL 0x164208 +/* [RW 4] Load value for cfc cld credit cnt. */ +#define TM_REG_CFC_CLD_CRDCNT_VAL 0x164210 +/* [RW 8] Client0 context region. */ +#define TM_REG_CL0_CONT_REGION 0x164030 +/* [RW 8] Client1 context region. */ +#define TM_REG_CL1_CONT_REGION 0x164034 +/* [RW 8] Client2 context region. */ +#define TM_REG_CL2_CONT_REGION 0x164038 +/* [RW 2] Client in High priority client number. */ +#define TM_REG_CLIN_PRIOR0_CLIENT 0x164024 +/* [RW 4] Load value for clout0 cred cnt. */ +#define TM_REG_CLOUT_CRDCNT0_VAL 0x164220 +/* [RW 4] Load value for clout1 cred cnt. */ +#define TM_REG_CLOUT_CRDCNT1_VAL 0x164228 +/* [RW 4] Load value for clout2 cred cnt. */ +#define TM_REG_CLOUT_CRDCNT2_VAL 0x164230 +/* [RW 1] Enable client0 input. */ +#define TM_REG_EN_CL0_INPUT 0x164008 +/* [RW 1] Enable client1 input. */ +#define TM_REG_EN_CL1_INPUT 0x16400c +/* [RW 1] Enable client2 input. */ +#define TM_REG_EN_CL2_INPUT 0x164010 +#define TM_REG_EN_LINEAR0_TIMER 0x164014 +/* [RW 1] Enable real time counter. */ +#define TM_REG_EN_REAL_TIME_CNT 0x1640d8 +/* [RW 1] Enable for Timers state machines. */ +#define TM_REG_EN_TIMERS 0x164000 +/* [RW 4] Load value for expiration credit cnt. CFC max number of + outstanding load requests for timers (expiration) context loading. */ +#define TM_REG_EXP_CRDCNT_VAL 0x164238 +/* [RW 32] Linear0 logic address. */ +#define TM_REG_LIN0_LOGIC_ADDR 0x164240 +/* [RW 18] Linear0 Max active cid (in banks of 32 entries). */ +#define TM_REG_LIN0_MAX_ACTIVE_CID 0x164048 +/* [ST 16] Linear0 Number of scans counter. */ +#define TM_REG_LIN0_NUM_SCANS 0x1640a0 +/* [WB 64] Linear0 phy address. */ +#define TM_REG_LIN0_PHY_ADDR 0x164270 +/* [RW 1] Linear0 physical address valid. */ +#define TM_REG_LIN0_PHY_ADDR_VALID 0x164248 +#define TM_REG_LIN0_SCAN_ON 0x1640d0 +/* [RW 24] Linear0 array scan timeout. */ +#define TM_REG_LIN0_SCAN_TIME 0x16403c +#define TM_REG_LIN0_VNIC_UC 0x164128 +/* [RW 32] Linear1 logic address. */ +#define TM_REG_LIN1_LOGIC_ADDR 0x164250 +/* [WB 64] Linear1 phy address. */ +#define TM_REG_LIN1_PHY_ADDR 0x164280 +/* [RW 1] Linear1 physical address valid. */ +#define TM_REG_LIN1_PHY_ADDR_VALID 0x164258 +/* [RW 6] Linear timer set_clear fifo threshold. */ +#define TM_REG_LIN_SETCLR_FIFO_ALFULL_THR 0x164070 +/* [RW 2] Load value for pci arbiter credit cnt. */ +#define TM_REG_PCIARB_CRDCNT_VAL 0x164260 +/* [RW 20] The amount of hardware cycles for each timer tick. */ +#define TM_REG_TIMER_TICK_SIZE 0x16401c +/* [RW 8] Timers Context region. */ +#define TM_REG_TM_CONTEXT_REGION 0x164044 +/* [RW 1] Interrupt mask register #0 read/write */ +#define TM_REG_TM_INT_MASK 0x1640fc +/* [R 1] Interrupt register #0 read */ +#define TM_REG_TM_INT_STS 0x1640f0 +/* [RW 7] Parity mask register #0 read/write */ +#define TM_REG_TM_PRTY_MASK 0x16410c +/* [R 7] Parity register #0 read */ +#define TM_REG_TM_PRTY_STS 0x164100 +/* [RC 7] Parity register #0 read clear */ +#define TM_REG_TM_PRTY_STS_CLR 0x164104 +/* [RW 8] The event id for aggregated interrupt 0 */ +#define TSDM_REG_AGG_INT_EVENT_0 0x42038 +#define TSDM_REG_AGG_INT_EVENT_1 0x4203c +#define TSDM_REG_AGG_INT_EVENT_2 0x42040 +#define TSDM_REG_AGG_INT_EVENT_3 0x42044 +#define TSDM_REG_AGG_INT_EVENT_4 0x42048 +/* [RW 1] The T bit for aggregated interrupt 0 */ +#define TSDM_REG_AGG_INT_T_0 0x420b8 +#define TSDM_REG_AGG_INT_T_1 0x420bc +/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ +#define TSDM_REG_CFC_RSP_START_ADDR 0x42008 +/* [RW 16] The maximum value of the completion counter #0 */ +#define TSDM_REG_CMP_COUNTER_MAX0 0x4201c +/* [RW 16] The maximum value of the completion counter #1 */ +#define TSDM_REG_CMP_COUNTER_MAX1 0x42020 +/* [RW 16] The maximum value of the completion counter #2 */ +#define TSDM_REG_CMP_COUNTER_MAX2 0x42024 +/* [RW 16] The maximum value of the completion counter #3 */ +#define TSDM_REG_CMP_COUNTER_MAX3 0x42028 +/* [RW 13] The start address in the internal RAM for the completion + counters. */ +#define TSDM_REG_CMP_COUNTER_START_ADDR 0x4200c +#define TSDM_REG_ENABLE_IN1 0x42238 +#define TSDM_REG_ENABLE_IN2 0x4223c +#define TSDM_REG_ENABLE_OUT1 0x42240 +#define TSDM_REG_ENABLE_OUT2 0x42244 +/* [RW 4] The initial number of messages that can be sent to the pxp control + interface without receiving any ACK. */ +#define TSDM_REG_INIT_CREDIT_PXP_CTRL 0x424bc +/* [ST 32] The number of ACK after placement messages received */ +#define TSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x4227c +/* [ST 32] The number of packet end messages received from the parser */ +#define TSDM_REG_NUM_OF_PKT_END_MSG 0x42274 +/* [ST 32] The number of requests received from the pxp async if */ +#define TSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x42278 +/* [ST 32] The number of commands received in queue 0 */ +#define TSDM_REG_NUM_OF_Q0_CMD 0x42248 +/* [ST 32] The number of commands received in queue 10 */ +#define TSDM_REG_NUM_OF_Q10_CMD 0x4226c +/* [ST 32] The number of commands received in queue 11 */ +#define TSDM_REG_NUM_OF_Q11_CMD 0x42270 +/* [ST 32] The number of commands received in queue 1 */ +#define TSDM_REG_NUM_OF_Q1_CMD 0x4224c +/* [ST 32] The number of commands received in queue 3 */ +#define TSDM_REG_NUM_OF_Q3_CMD 0x42250 +/* [ST 32] The number of commands received in queue 4 */ +#define TSDM_REG_NUM_OF_Q4_CMD 0x42254 +/* [ST 32] The number of commands received in queue 5 */ +#define TSDM_REG_NUM_OF_Q5_CMD 0x42258 +/* [ST 32] The number of commands received in queue 6 */ +#define TSDM_REG_NUM_OF_Q6_CMD 0x4225c +/* [ST 32] The number of commands received in queue 7 */ +#define TSDM_REG_NUM_OF_Q7_CMD 0x42260 +/* [ST 32] The number of commands received in queue 8 */ +#define TSDM_REG_NUM_OF_Q8_CMD 0x42264 +/* [ST 32] The number of commands received in queue 9 */ +#define TSDM_REG_NUM_OF_Q9_CMD 0x42268 +/* [RW 13] The start address in the internal RAM for the packet end message */ +#define TSDM_REG_PCK_END_MSG_START_ADDR 0x42014 +/* [RW 13] The start address in the internal RAM for queue counters */ +#define TSDM_REG_Q_COUNTER_START_ADDR 0x42010 +/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ +#define TSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x42548 +/* [R 1] parser fifo empty in sdm_sync block */ +#define TSDM_REG_SYNC_PARSER_EMPTY 0x42550 +/* [R 1] parser serial fifo empty in sdm_sync block */ +#define TSDM_REG_SYNC_SYNC_EMPTY 0x42558 +/* [RW 32] Tick for timer counter. Applicable only when + ~tsdm_registers_timer_tick_enable.timer_tick_enable =1 */ +#define TSDM_REG_TIMER_TICK 0x42000 +/* [RW 32] Interrupt mask register #0 read/write */ +#define TSDM_REG_TSDM_INT_MASK_0 0x4229c +#define TSDM_REG_TSDM_INT_MASK_1 0x422ac +/* [R 32] Interrupt register #0 read */ +#define TSDM_REG_TSDM_INT_STS_0 0x42290 +#define TSDM_REG_TSDM_INT_STS_1 0x422a0 +/* [RW 11] Parity mask register #0 read/write */ +#define TSDM_REG_TSDM_PRTY_MASK 0x422bc +/* [R 11] Parity register #0 read */ +#define TSDM_REG_TSDM_PRTY_STS 0x422b0 +/* [RC 11] Parity register #0 read clear */ +#define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4 +/* [RW 5] The number of time_slots in the arbitration cycle */ +#define TSEM_REG_ARB_CYCLE_SIZE 0x180034 +/* [RW 3] The source that is associated with arbitration element 0. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2 */ +#define TSEM_REG_ARB_ELEMENT0 0x180020 +/* [RW 3] The source that is associated with arbitration element 1. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~tsem_registers_arb_element0.arb_element0 */ +#define TSEM_REG_ARB_ELEMENT1 0x180024 +/* [RW 3] The source that is associated with arbitration element 2. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~tsem_registers_arb_element0.arb_element0 + and ~tsem_registers_arb_element1.arb_element1 */ +#define TSEM_REG_ARB_ELEMENT2 0x180028 +/* [RW 3] The source that is associated with arbitration element 3. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2.Could + not be equal to register ~tsem_registers_arb_element0.arb_element0 and + ~tsem_registers_arb_element1.arb_element1 and + ~tsem_registers_arb_element2.arb_element2 */ +#define TSEM_REG_ARB_ELEMENT3 0x18002c +/* [RW 3] The source that is associated with arbitration element 4. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~tsem_registers_arb_element0.arb_element0 + and ~tsem_registers_arb_element1.arb_element1 and + ~tsem_registers_arb_element2.arb_element2 and + ~tsem_registers_arb_element3.arb_element3 */ +#define TSEM_REG_ARB_ELEMENT4 0x180030 +#define TSEM_REG_ENABLE_IN 0x1800a4 +#define TSEM_REG_ENABLE_OUT 0x1800a8 +/* [RW 32] This address space contains all registers and memories that are + placed in SEM_FAST block. The SEM_FAST registers are described in + appendix B. In order to access the sem_fast registers the base address + ~fast_memory.fast_memory should be added to eachsem_fast register offset. */ +#define TSEM_REG_FAST_MEMORY 0x1a0000 +/* [RW 1] Disables input messages from FIC0 May be updated during run_time + by the microcode */ +#define TSEM_REG_FIC0_DISABLE 0x180224 +/* [RW 1] Disables input messages from FIC1 May be updated during run_time + by the microcode */ +#define TSEM_REG_FIC1_DISABLE 0x180234 +/* [RW 15] Interrupt table Read and write access to it is not possible in + the middle of the work */ +#define TSEM_REG_INT_TABLE 0x180400 +/* [ST 24] Statistics register. The number of messages that entered through + FIC0 */ +#define TSEM_REG_MSG_NUM_FIC0 0x180000 +/* [ST 24] Statistics register. The number of messages that entered through + FIC1 */ +#define TSEM_REG_MSG_NUM_FIC1 0x180004 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC0 */ +#define TSEM_REG_MSG_NUM_FOC0 0x180008 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC1 */ +#define TSEM_REG_MSG_NUM_FOC1 0x18000c +/* [ST 24] Statistics register. The number of messages that were sent to + FOC2 */ +#define TSEM_REG_MSG_NUM_FOC2 0x180010 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC3 */ +#define TSEM_REG_MSG_NUM_FOC3 0x180014 +/* [RW 1] Disables input messages from the passive buffer May be updated + during run_time by the microcode */ +#define TSEM_REG_PAS_DISABLE 0x18024c +/* [WB 128] Debug only. Passive buffer memory */ +#define TSEM_REG_PASSIVE_BUFFER 0x181000 +/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */ +#define TSEM_REG_PRAM 0x1c0000 +/* [R 8] Valid sleeping threads indication have bit per thread */ +#define TSEM_REG_SLEEP_THREADS_VALID 0x18026c +/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */ +#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0 +/* [RW 8] List of free threads . There is a bit per thread. */ +#define TSEM_REG_THREADS_LIST 0x1802e4 +/* [RC 32] Parity register #0 read clear */ +#define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118 +#define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128 +/* [RW 3] The arbitration scheme of time_slot 0 */ +#define TSEM_REG_TS_0_AS 0x180038 +/* [RW 3] The arbitration scheme of time_slot 10 */ +#define TSEM_REG_TS_10_AS 0x180060 +/* [RW 3] The arbitration scheme of time_slot 11 */ +#define TSEM_REG_TS_11_AS 0x180064 +/* [RW 3] The arbitration scheme of time_slot 12 */ +#define TSEM_REG_TS_12_AS 0x180068 +/* [RW 3] The arbitration scheme of time_slot 13 */ +#define TSEM_REG_TS_13_AS 0x18006c +/* [RW 3] The arbitration scheme of time_slot 14 */ +#define TSEM_REG_TS_14_AS 0x180070 +/* [RW 3] The arbitration scheme of time_slot 15 */ +#define TSEM_REG_TS_15_AS 0x180074 +/* [RW 3] The arbitration scheme of time_slot 16 */ +#define TSEM_REG_TS_16_AS 0x180078 +/* [RW 3] The arbitration scheme of time_slot 17 */ +#define TSEM_REG_TS_17_AS 0x18007c +/* [RW 3] The arbitration scheme of time_slot 18 */ +#define TSEM_REG_TS_18_AS 0x180080 +/* [RW 3] The arbitration scheme of time_slot 1 */ +#define TSEM_REG_TS_1_AS 0x18003c +/* [RW 3] The arbitration scheme of time_slot 2 */ +#define TSEM_REG_TS_2_AS 0x180040 +/* [RW 3] The arbitration scheme of time_slot 3 */ +#define TSEM_REG_TS_3_AS 0x180044 +/* [RW 3] The arbitration scheme of time_slot 4 */ +#define TSEM_REG_TS_4_AS 0x180048 +/* [RW 3] The arbitration scheme of time_slot 5 */ +#define TSEM_REG_TS_5_AS 0x18004c +/* [RW 3] The arbitration scheme of time_slot 6 */ +#define TSEM_REG_TS_6_AS 0x180050 +/* [RW 3] The arbitration scheme of time_slot 7 */ +#define TSEM_REG_TS_7_AS 0x180054 +/* [RW 3] The arbitration scheme of time_slot 8 */ +#define TSEM_REG_TS_8_AS 0x180058 +/* [RW 3] The arbitration scheme of time_slot 9 */ +#define TSEM_REG_TS_9_AS 0x18005c +/* [RW 32] Interrupt mask register #0 read/write */ +#define TSEM_REG_TSEM_INT_MASK_0 0x180100 +#define TSEM_REG_TSEM_INT_MASK_1 0x180110 +/* [R 32] Interrupt register #0 read */ +#define TSEM_REG_TSEM_INT_STS_0 0x1800f4 +#define TSEM_REG_TSEM_INT_STS_1 0x180104 +/* [RW 32] Parity mask register #0 read/write */ +#define TSEM_REG_TSEM_PRTY_MASK_0 0x180120 +#define TSEM_REG_TSEM_PRTY_MASK_1 0x180130 +/* [R 32] Parity register #0 read */ +#define TSEM_REG_TSEM_PRTY_STS_0 0x180114 +#define TSEM_REG_TSEM_PRTY_STS_1 0x180124 +/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 + * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ +#define TSEM_REG_VFPF_ERR_NUM 0x180380 +/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits + * [10:8] of the address should be the offset within the accessed LCID + * context; the bits [7:0] are the accessed LCID.Example: to write to REG10 + * LCID100. The RBC address should be 12'ha64. */ +#define UCM_REG_AG_CTX 0xe2000 +/* [R 5] Used to read the XX protection CAM occupancy counter. */ +#define UCM_REG_CAM_OCCUP 0xe0170 +/* [RW 1] CDU AG read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define UCM_REG_CDU_AG_RD_IFEN 0xe0038 +/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input + are disregarded; all other signals are treated as usual; if 1 - normal + activity. */ +#define UCM_REG_CDU_AG_WR_IFEN 0xe0034 +/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define UCM_REG_CDU_SM_RD_IFEN 0xe0040 +/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid + input is disregarded; all other signals are treated as usual; if 1 - + normal activity. */ +#define UCM_REG_CDU_SM_WR_IFEN 0xe003c +/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 1 at start-up. */ +#define UCM_REG_CFC_INIT_CRD 0xe0204 +/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_CP_WEIGHT 0xe00c4 +/* [RW 1] Input csem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_CSEM_IFEN 0xe0028 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the csem interface is detected. */ +#define UCM_REG_CSEM_LENGTH_MIS 0xe0160 +/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_CSEM_WEIGHT 0xe00b8 +/* [RW 1] Input dorq Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_DORQ_IFEN 0xe0030 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the dorq interface is detected. */ +#define UCM_REG_DORQ_LENGTH_MIS 0xe0168 +/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_DORQ_WEIGHT 0xe00c0 +/* [RW 8] The Event ID in case ErrorFlg input message bit is set. */ +#define UCM_REG_ERR_EVNT_ID 0xe00a4 +/* [RW 28] The CM erroneous header for QM and Timers formatting. */ +#define UCM_REG_ERR_UCM_HDR 0xe00a0 +/* [RW 8] The Event ID for Timers expiration. */ +#define UCM_REG_EXPR_EVNT_ID 0xe00a8 +/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define UCM_REG_FIC0_INIT_CRD 0xe020c +/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define UCM_REG_FIC1_INIT_CRD 0xe0210 +/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1 + - strict priority defined by ~ucm_registers_gr_ag_pr.gr_ag_pr; + ~ucm_registers_gr_ld0_pr.gr_ld0_pr and + ~ucm_registers_gr_ld1_pr.gr_ld1_pr. */ +#define UCM_REG_GR_ARB_TYPE 0xe0144 +/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed that the Store channel group is + compliment to the others. */ +#define UCM_REG_GR_LD0_PR 0xe014c +/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed that the Store channel group is + compliment to the others. */ +#define UCM_REG_GR_LD1_PR 0xe0150 +/* [RW 2] The queue index for invalidate counter flag decision. */ +#define UCM_REG_INV_CFLG_Q 0xe00e4 +/* [RW 5] The number of double REG-pairs; loaded from the STORM context and + sent to STORM; for a specific connection type. the double REG-pairs are + used in order to align to STORM context row size of 128 bits. The offset + of these data in the STORM context is always 0. Index _i stands for the + connection type (one of 16). */ +#define UCM_REG_N_SM_CTX_LD_0 0xe0054 +#define UCM_REG_N_SM_CTX_LD_1 0xe0058 +#define UCM_REG_N_SM_CTX_LD_2 0xe005c +#define UCM_REG_N_SM_CTX_LD_3 0xe0060 +#define UCM_REG_N_SM_CTX_LD_4 0xe0064 +#define UCM_REG_N_SM_CTX_LD_5 0xe0068 +#define UCM_REG_PHYS_QNUM0_0 0xe0110 +#define UCM_REG_PHYS_QNUM0_1 0xe0114 +#define UCM_REG_PHYS_QNUM1_0 0xe0118 +#define UCM_REG_PHYS_QNUM1_1 0xe011c +#define UCM_REG_PHYS_QNUM2_0 0xe0120 +#define UCM_REG_PHYS_QNUM2_1 0xe0124 +#define UCM_REG_PHYS_QNUM3_0 0xe0128 +#define UCM_REG_PHYS_QNUM3_1 0xe012c +/* [RW 8] The Event ID for Timers formatting in case of stop done. */ +#define UCM_REG_STOP_EVNT_ID 0xe00ac +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the STORM interface is detected. */ +#define UCM_REG_STORM_LENGTH_MIS 0xe0154 +/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_STORM_UCM_IFEN 0xe0010 +/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_STORM_WEIGHT 0xe00b0 +/* [RW 4] Timers output initial credit. Max credit available - 15.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 4 at start-up. */ +#define UCM_REG_TM_INIT_CRD 0xe021c +/* [RW 28] The CM header for Timers expiration command. */ +#define UCM_REG_TM_UCM_HDR 0xe009c +/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_TM_UCM_IFEN 0xe001c +/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_TM_WEIGHT 0xe00d4 +/* [RW 1] Input tsem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_TSEM_IFEN 0xe0024 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the tsem interface is detected. */ +#define UCM_REG_TSEM_LENGTH_MIS 0xe015c +/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_TSEM_WEIGHT 0xe00b4 +/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define UCM_REG_UCM_CFC_IFEN 0xe0044 +/* [RW 11] Interrupt mask register #0 read/write */ +#define UCM_REG_UCM_INT_MASK 0xe01d4 +/* [R 11] Interrupt register #0 read */ +#define UCM_REG_UCM_INT_STS 0xe01c8 +/* [RW 27] Parity mask register #0 read/write */ +#define UCM_REG_UCM_PRTY_MASK 0xe01e4 +/* [R 27] Parity register #0 read */ +#define UCM_REG_UCM_PRTY_STS 0xe01d8 +/* [RC 27] Parity register #0 read clear */ +#define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc +/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS + REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). + Is used to determine the number of the AG context REG-pairs written back; + when the Reg1WbFlg isn't set. */ +#define UCM_REG_UCM_REG0_SZ 0xe00dc +/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define UCM_REG_UCM_STORM0_IFEN 0xe0004 +/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define UCM_REG_UCM_STORM1_IFEN 0xe0008 +/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_UCM_TM_IFEN 0xe0020 +/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define UCM_REG_UCM_UQM_IFEN 0xe000c +/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */ +#define UCM_REG_UCM_UQM_USE_Q 0xe00d8 +/* [RW 6] QM output initial credit. Max credit available - 32.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 32 at start-up. */ +#define UCM_REG_UQM_INIT_CRD 0xe0220 +/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_UQM_P_WEIGHT 0xe00cc +/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_UQM_S_WEIGHT 0xe00d0 +/* [RW 28] The CM header value for QM request (primary). */ +#define UCM_REG_UQM_UCM_HDR_P 0xe0094 +/* [RW 28] The CM header value for QM request (secondary). */ +#define UCM_REG_UQM_UCM_HDR_S 0xe0098 +/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define UCM_REG_UQM_UCM_IFEN 0xe0014 +/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define UCM_REG_USDM_IFEN 0xe0018 +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the SDM interface is detected. */ +#define UCM_REG_USDM_LENGTH_MIS 0xe0158 +/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_USDM_WEIGHT 0xe00c8 +/* [RW 1] Input xsem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define UCM_REG_XSEM_IFEN 0xe002c +/* [RC 1] Set when the message length mismatch (relative to last indication) + at the xsem interface isdetected. */ +#define UCM_REG_XSEM_LENGTH_MIS 0xe0164 +/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define UCM_REG_XSEM_WEIGHT 0xe00bc +/* [RW 20] Indirect access to the descriptor table of the XX protection + mechanism. The fields are:[5:0] - message length; 14:6] - message + pointer; 19:15] - next pointer. */ +#define UCM_REG_XX_DESCR_TABLE 0xe0280 +#define UCM_REG_XX_DESCR_TABLE_SIZE 27 +/* [R 6] Use to read the XX protection Free counter. */ +#define UCM_REG_XX_FREE 0xe016c +/* [RW 6] Initial value for the credit counter; responsible for fulfilling + of the Input Stage XX protection buffer by the XX protection pending + messages. Write writes the initial credit value; read returns the current + value of the credit counter. Must be initialized to 12 at start-up. */ +#define UCM_REG_XX_INIT_CRD 0xe0224 +/* [RW 6] The maximum number of pending messages; which may be stored in XX + protection. ~ucm_registers_xx_free.xx_free read on read. */ +#define UCM_REG_XX_MSG_NUM 0xe0228 +/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */ +#define UCM_REG_XX_OVFL_EVNT_ID 0xe004c +/* [RW 16] Indirect access to the XX table of the XX protection mechanism. + The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] - + header pointer. */ +#define UCM_REG_XX_TABLE 0xe0300 +#define UMAC_COMMAND_CONFIG_REG_HD_ENA (0x1<<10) +#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE (0x1<<28) +#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15) +#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24) +#define UMAC_COMMAND_CONFIG_REG_PAD_EN (0x1<<5) +#define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE (0x1<<8) +#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN (0x1<<4) +#define UMAC_COMMAND_CONFIG_REG_RX_ENA (0x1<<1) +#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13) +#define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0) +#define UMAC_REG_COMMAND_CONFIG 0x8 +/* [RW 16] This is the duration for which MAC must wait to go back to ACTIVE + * state from LPI state when it receives packet for transmission. The + * decrement unit is 1 micro-second. */ +#define UMAC_REG_EEE_WAKE_TIMER 0x6c +/* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers + * to bit 17 of the MAC address etc. */ +#define UMAC_REG_MAC_ADDR0 0xc +/* [RW 16] Register Bit 0 refers to Bit 0 of the MAC address; Register Bit 1 + * refers to Bit 1 of the MAC address etc. Bits 16 to 31 are reserved. */ +#define UMAC_REG_MAC_ADDR1 0x10 +/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive + * logic to check frames. */ +#define UMAC_REG_MAXFR 0x14 +#define UMAC_REG_UMAC_EEE_CTRL 0x64 +#define UMAC_UMAC_EEE_CTRL_REG_EEE_EN (0x1<<3) +/* [RW 8] The event id for aggregated interrupt 0 */ +#define USDM_REG_AGG_INT_EVENT_0 0xc4038 +#define USDM_REG_AGG_INT_EVENT_1 0xc403c +#define USDM_REG_AGG_INT_EVENT_2 0xc4040 +#define USDM_REG_AGG_INT_EVENT_4 0xc4048 +#define USDM_REG_AGG_INT_EVENT_5 0xc404c +#define USDM_REG_AGG_INT_EVENT_6 0xc4050 +/* [RW 1] For each aggregated interrupt index whether the mode is normal (0) + or auto-mask-mode (1) */ +#define USDM_REG_AGG_INT_MODE_0 0xc41b8 +#define USDM_REG_AGG_INT_MODE_1 0xc41bc +#define USDM_REG_AGG_INT_MODE_4 0xc41c8 +#define USDM_REG_AGG_INT_MODE_5 0xc41cc +#define USDM_REG_AGG_INT_MODE_6 0xc41d0 +/* [RW 1] The T bit for aggregated interrupt 5 */ +#define USDM_REG_AGG_INT_T_5 0xc40cc +#define USDM_REG_AGG_INT_T_6 0xc40d0 +/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ +#define USDM_REG_CFC_RSP_START_ADDR 0xc4008 +/* [RW 16] The maximum value of the completion counter #0 */ +#define USDM_REG_CMP_COUNTER_MAX0 0xc401c +/* [RW 16] The maximum value of the completion counter #1 */ +#define USDM_REG_CMP_COUNTER_MAX1 0xc4020 +/* [RW 16] The maximum value of the completion counter #2 */ +#define USDM_REG_CMP_COUNTER_MAX2 0xc4024 +/* [RW 16] The maximum value of the completion counter #3 */ +#define USDM_REG_CMP_COUNTER_MAX3 0xc4028 +/* [RW 13] The start address in the internal RAM for the completion + counters. */ +#define USDM_REG_CMP_COUNTER_START_ADDR 0xc400c +#define USDM_REG_ENABLE_IN1 0xc4238 +#define USDM_REG_ENABLE_IN2 0xc423c +#define USDM_REG_ENABLE_OUT1 0xc4240 +#define USDM_REG_ENABLE_OUT2 0xc4244 +/* [RW 4] The initial number of messages that can be sent to the pxp control + interface without receiving any ACK. */ +#define USDM_REG_INIT_CREDIT_PXP_CTRL 0xc44c0 +/* [ST 32] The number of ACK after placement messages received */ +#define USDM_REG_NUM_OF_ACK_AFTER_PLACE 0xc4280 +/* [ST 32] The number of packet end messages received from the parser */ +#define USDM_REG_NUM_OF_PKT_END_MSG 0xc4278 +/* [ST 32] The number of requests received from the pxp async if */ +#define USDM_REG_NUM_OF_PXP_ASYNC_REQ 0xc427c +/* [ST 32] The number of commands received in queue 0 */ +#define USDM_REG_NUM_OF_Q0_CMD 0xc4248 +/* [ST 32] The number of commands received in queue 10 */ +#define USDM_REG_NUM_OF_Q10_CMD 0xc4270 +/* [ST 32] The number of commands received in queue 11 */ +#define USDM_REG_NUM_OF_Q11_CMD 0xc4274 +/* [ST 32] The number of commands received in queue 1 */ +#define USDM_REG_NUM_OF_Q1_CMD 0xc424c +/* [ST 32] The number of commands received in queue 2 */ +#define USDM_REG_NUM_OF_Q2_CMD 0xc4250 +/* [ST 32] The number of commands received in queue 3 */ +#define USDM_REG_NUM_OF_Q3_CMD 0xc4254 +/* [ST 32] The number of commands received in queue 4 */ +#define USDM_REG_NUM_OF_Q4_CMD 0xc4258 +/* [ST 32] The number of commands received in queue 5 */ +#define USDM_REG_NUM_OF_Q5_CMD 0xc425c +/* [ST 32] The number of commands received in queue 6 */ +#define USDM_REG_NUM_OF_Q6_CMD 0xc4260 +/* [ST 32] The number of commands received in queue 7 */ +#define USDM_REG_NUM_OF_Q7_CMD 0xc4264 +/* [ST 32] The number of commands received in queue 8 */ +#define USDM_REG_NUM_OF_Q8_CMD 0xc4268 +/* [ST 32] The number of commands received in queue 9 */ +#define USDM_REG_NUM_OF_Q9_CMD 0xc426c +/* [RW 13] The start address in the internal RAM for the packet end message */ +#define USDM_REG_PCK_END_MSG_START_ADDR 0xc4014 +/* [RW 13] The start address in the internal RAM for queue counters */ +#define USDM_REG_Q_COUNTER_START_ADDR 0xc4010 +/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ +#define USDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0xc4550 +/* [R 1] parser fifo empty in sdm_sync block */ +#define USDM_REG_SYNC_PARSER_EMPTY 0xc4558 +/* [R 1] parser serial fifo empty in sdm_sync block */ +#define USDM_REG_SYNC_SYNC_EMPTY 0xc4560 +/* [RW 32] Tick for timer counter. Applicable only when + ~usdm_registers_timer_tick_enable.timer_tick_enable =1 */ +#define USDM_REG_TIMER_TICK 0xc4000 +/* [RW 32] Interrupt mask register #0 read/write */ +#define USDM_REG_USDM_INT_MASK_0 0xc42a0 +#define USDM_REG_USDM_INT_MASK_1 0xc42b0 +/* [R 32] Interrupt register #0 read */ +#define USDM_REG_USDM_INT_STS_0 0xc4294 +#define USDM_REG_USDM_INT_STS_1 0xc42a4 +/* [RW 11] Parity mask register #0 read/write */ +#define USDM_REG_USDM_PRTY_MASK 0xc42c0 +/* [R 11] Parity register #0 read */ +#define USDM_REG_USDM_PRTY_STS 0xc42b4 +/* [RC 11] Parity register #0 read clear */ +#define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8 +/* [RW 5] The number of time_slots in the arbitration cycle */ +#define USEM_REG_ARB_CYCLE_SIZE 0x300034 +/* [RW 3] The source that is associated with arbitration element 0. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2 */ +#define USEM_REG_ARB_ELEMENT0 0x300020 +/* [RW 3] The source that is associated with arbitration element 1. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~usem_registers_arb_element0.arb_element0 */ +#define USEM_REG_ARB_ELEMENT1 0x300024 +/* [RW 3] The source that is associated with arbitration element 2. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~usem_registers_arb_element0.arb_element0 + and ~usem_registers_arb_element1.arb_element1 */ +#define USEM_REG_ARB_ELEMENT2 0x300028 +/* [RW 3] The source that is associated with arbitration element 3. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2.Could + not be equal to register ~usem_registers_arb_element0.arb_element0 and + ~usem_registers_arb_element1.arb_element1 and + ~usem_registers_arb_element2.arb_element2 */ +#define USEM_REG_ARB_ELEMENT3 0x30002c +/* [RW 3] The source that is associated with arbitration element 4. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~usem_registers_arb_element0.arb_element0 + and ~usem_registers_arb_element1.arb_element1 and + ~usem_registers_arb_element2.arb_element2 and + ~usem_registers_arb_element3.arb_element3 */ +#define USEM_REG_ARB_ELEMENT4 0x300030 +#define USEM_REG_ENABLE_IN 0x3000a4 +#define USEM_REG_ENABLE_OUT 0x3000a8 +/* [RW 32] This address space contains all registers and memories that are + placed in SEM_FAST block. The SEM_FAST registers are described in + appendix B. In order to access the sem_fast registers the base address + ~fast_memory.fast_memory should be added to eachsem_fast register offset. */ +#define USEM_REG_FAST_MEMORY 0x320000 +/* [RW 1] Disables input messages from FIC0 May be updated during run_time + by the microcode */ +#define USEM_REG_FIC0_DISABLE 0x300224 +/* [RW 1] Disables input messages from FIC1 May be updated during run_time + by the microcode */ +#define USEM_REG_FIC1_DISABLE 0x300234 +/* [RW 15] Interrupt table Read and write access to it is not possible in + the middle of the work */ +#define USEM_REG_INT_TABLE 0x300400 +/* [ST 24] Statistics register. The number of messages that entered through + FIC0 */ +#define USEM_REG_MSG_NUM_FIC0 0x300000 +/* [ST 24] Statistics register. The number of messages that entered through + FIC1 */ +#define USEM_REG_MSG_NUM_FIC1 0x300004 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC0 */ +#define USEM_REG_MSG_NUM_FOC0 0x300008 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC1 */ +#define USEM_REG_MSG_NUM_FOC1 0x30000c +/* [ST 24] Statistics register. The number of messages that were sent to + FOC2 */ +#define USEM_REG_MSG_NUM_FOC2 0x300010 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC3 */ +#define USEM_REG_MSG_NUM_FOC3 0x300014 +/* [RW 1] Disables input messages from the passive buffer May be updated + during run_time by the microcode */ +#define USEM_REG_PAS_DISABLE 0x30024c +/* [WB 128] Debug only. Passive buffer memory */ +#define USEM_REG_PASSIVE_BUFFER 0x302000 +/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */ +#define USEM_REG_PRAM 0x340000 +/* [R 16] Valid sleeping threads indication have bit per thread */ +#define USEM_REG_SLEEP_THREADS_VALID 0x30026c +/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */ +#define USEM_REG_SLOW_EXT_STORE_EMPTY 0x3002a0 +/* [RW 16] List of free threads . There is a bit per thread. */ +#define USEM_REG_THREADS_LIST 0x3002e4 +/* [RW 3] The arbitration scheme of time_slot 0 */ +#define USEM_REG_TS_0_AS 0x300038 +/* [RW 3] The arbitration scheme of time_slot 10 */ +#define USEM_REG_TS_10_AS 0x300060 +/* [RW 3] The arbitration scheme of time_slot 11 */ +#define USEM_REG_TS_11_AS 0x300064 +/* [RW 3] The arbitration scheme of time_slot 12 */ +#define USEM_REG_TS_12_AS 0x300068 +/* [RW 3] The arbitration scheme of time_slot 13 */ +#define USEM_REG_TS_13_AS 0x30006c +/* [RW 3] The arbitration scheme of time_slot 14 */ +#define USEM_REG_TS_14_AS 0x300070 +/* [RW 3] The arbitration scheme of time_slot 15 */ +#define USEM_REG_TS_15_AS 0x300074 +/* [RW 3] The arbitration scheme of time_slot 16 */ +#define USEM_REG_TS_16_AS 0x300078 +/* [RW 3] The arbitration scheme of time_slot 17 */ +#define USEM_REG_TS_17_AS 0x30007c +/* [RW 3] The arbitration scheme of time_slot 18 */ +#define USEM_REG_TS_18_AS 0x300080 +/* [RW 3] The arbitration scheme of time_slot 1 */ +#define USEM_REG_TS_1_AS 0x30003c +/* [RW 3] The arbitration scheme of time_slot 2 */ +#define USEM_REG_TS_2_AS 0x300040 +/* [RW 3] The arbitration scheme of time_slot 3 */ +#define USEM_REG_TS_3_AS 0x300044 +/* [RW 3] The arbitration scheme of time_slot 4 */ +#define USEM_REG_TS_4_AS 0x300048 +/* [RW 3] The arbitration scheme of time_slot 5 */ +#define USEM_REG_TS_5_AS 0x30004c +/* [RW 3] The arbitration scheme of time_slot 6 */ +#define USEM_REG_TS_6_AS 0x300050 +/* [RW 3] The arbitration scheme of time_slot 7 */ +#define USEM_REG_TS_7_AS 0x300054 +/* [RW 3] The arbitration scheme of time_slot 8 */ +#define USEM_REG_TS_8_AS 0x300058 +/* [RW 3] The arbitration scheme of time_slot 9 */ +#define USEM_REG_TS_9_AS 0x30005c +/* [RW 32] Interrupt mask register #0 read/write */ +#define USEM_REG_USEM_INT_MASK_0 0x300110 +#define USEM_REG_USEM_INT_MASK_1 0x300120 +/* [R 32] Interrupt register #0 read */ +#define USEM_REG_USEM_INT_STS_0 0x300104 +#define USEM_REG_USEM_INT_STS_1 0x300114 +/* [RW 32] Parity mask register #0 read/write */ +#define USEM_REG_USEM_PRTY_MASK_0 0x300130 +#define USEM_REG_USEM_PRTY_MASK_1 0x300140 +/* [R 32] Parity register #0 read */ +#define USEM_REG_USEM_PRTY_STS_0 0x300124 +#define USEM_REG_USEM_PRTY_STS_1 0x300134 +/* [RC 32] Parity register #0 read clear */ +#define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128 +#define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138 +/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 + * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ +#define USEM_REG_VFPF_ERR_NUM 0x300380 +#define VFC_MEMORIES_RST_REG_CAM_RST (0x1<<0) +#define VFC_MEMORIES_RST_REG_RAM_RST (0x1<<1) +#define VFC_REG_MEMORIES_RST 0x1943c +/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits + * [12:8] of the address should be the offset within the accessed LCID + * context; the bits [7:0] are the accessed LCID.Example: to write to REG10 + * LCID100. The RBC address should be 13'ha64. */ +#define XCM_REG_AG_CTX 0x28000 +/* [RW 2] The queue index for registration on Aux1 counter flag. */ +#define XCM_REG_AUX1_Q 0x20134 +/* [RW 2] Per each decision rule the queue index to register to. */ +#define XCM_REG_AUX_CNT_FLG_Q_19 0x201b0 +/* [R 5] Used to read the XX protection CAM occupancy counter. */ +#define XCM_REG_CAM_OCCUP 0x20244 +/* [RW 1] CDU AG read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define XCM_REG_CDU_AG_RD_IFEN 0x20044 +/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input + are disregarded; all other signals are treated as usual; if 1 - normal + activity. */ +#define XCM_REG_CDU_AG_WR_IFEN 0x20040 +/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is + disregarded; valid output is deasserted; all other signals are treated as + usual; if 1 - normal activity. */ +#define XCM_REG_CDU_SM_RD_IFEN 0x2004c +/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid + input is disregarded; all other signals are treated as usual; if 1 - + normal activity. */ +#define XCM_REG_CDU_SM_WR_IFEN 0x20048 +/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 1 at start-up. */ +#define XCM_REG_CFC_INIT_CRD 0x20404 +/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_CP_WEIGHT 0x200dc +/* [RW 1] Input csem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_CSEM_IFEN 0x20028 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the csem interface. */ +#define XCM_REG_CSEM_LENGTH_MIS 0x20228 +/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_CSEM_WEIGHT 0x200c4 +/* [RW 1] Input dorq Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_DORQ_IFEN 0x20030 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the dorq interface. */ +#define XCM_REG_DORQ_LENGTH_MIS 0x20230 +/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_DORQ_WEIGHT 0x200cc +/* [RW 8] The Event ID in case the ErrorFlg input message bit is set. */ +#define XCM_REG_ERR_EVNT_ID 0x200b0 +/* [RW 28] The CM erroneous header for QM and Timers formatting. */ +#define XCM_REG_ERR_XCM_HDR 0x200ac +/* [RW 8] The Event ID for Timers expiration. */ +#define XCM_REG_EXPR_EVNT_ID 0x200b4 +/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define XCM_REG_FIC0_INIT_CRD 0x2040c +/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 64 at start-up. */ +#define XCM_REG_FIC1_INIT_CRD 0x20410 +#define XCM_REG_GLB_DEL_ACK_MAX_CNT_0 0x20118 +#define XCM_REG_GLB_DEL_ACK_MAX_CNT_1 0x2011c +#define XCM_REG_GLB_DEL_ACK_TMR_VAL_0 0x20108 +#define XCM_REG_GLB_DEL_ACK_TMR_VAL_1 0x2010c +/* [RW 1] Arbitratiojn between Input Arbiter groups: 0 - fair Round-Robin; 1 + - strict priority defined by ~xcm_registers_gr_ag_pr.gr_ag_pr; + ~xcm_registers_gr_ld0_pr.gr_ld0_pr and + ~xcm_registers_gr_ld1_pr.gr_ld1_pr. */ +#define XCM_REG_GR_ARB_TYPE 0x2020c +/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed that the Channel group is the + compliment of the other 3 groups. */ +#define XCM_REG_GR_LD0_PR 0x20214 +/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the + highest priority is 3. It is supposed that the Channel group is the + compliment of the other 3 groups. */ +#define XCM_REG_GR_LD1_PR 0x20218 +/* [RW 1] Input nig0 Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_NIG0_IFEN 0x20038 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the nig0 interface. */ +#define XCM_REG_NIG0_LENGTH_MIS 0x20238 +/* [RW 3] The weight of the input nig0 in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_NIG0_WEIGHT 0x200d4 +/* [RW 1] Input nig1 Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_NIG1_IFEN 0x2003c +/* [RC 1] Set at message length mismatch (relative to last indication) at + the nig1 interface. */ +#define XCM_REG_NIG1_LENGTH_MIS 0x2023c +/* [RW 5] The number of double REG-pairs; loaded from the STORM context and + sent to STORM; for a specific connection type. The double REG-pairs are + used in order to align to STORM context row size of 128 bits. The offset + of these data in the STORM context is always 0. Index _i stands for the + connection type (one of 16). */ +#define XCM_REG_N_SM_CTX_LD_0 0x20060 +#define XCM_REG_N_SM_CTX_LD_1 0x20064 +#define XCM_REG_N_SM_CTX_LD_2 0x20068 +#define XCM_REG_N_SM_CTX_LD_3 0x2006c +#define XCM_REG_N_SM_CTX_LD_4 0x20070 +#define XCM_REG_N_SM_CTX_LD_5 0x20074 +/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_PBF_IFEN 0x20034 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the pbf interface. */ +#define XCM_REG_PBF_LENGTH_MIS 0x20234 +/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_PBF_WEIGHT 0x200d0 +#define XCM_REG_PHYS_QNUM3_0 0x20100 +#define XCM_REG_PHYS_QNUM3_1 0x20104 +/* [RW 8] The Event ID for Timers formatting in case of stop done. */ +#define XCM_REG_STOP_EVNT_ID 0x200b8 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the STORM interface. */ +#define XCM_REG_STORM_LENGTH_MIS 0x2021c +/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_STORM_WEIGHT 0x200bc +/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_STORM_XCM_IFEN 0x20010 +/* [RW 4] Timers output initial credit. Max credit available - 15.Write + writes the initial credit value; read returns the current value of the + credit counter. Must be initialized to 4 at start-up. */ +#define XCM_REG_TM_INIT_CRD 0x2041c +/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_TM_WEIGHT 0x200ec +/* [RW 28] The CM header for Timers expiration command. */ +#define XCM_REG_TM_XCM_HDR 0x200a8 +/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_TM_XCM_IFEN 0x2001c +/* [RW 1] Input tsem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_TSEM_IFEN 0x20024 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the tsem interface. */ +#define XCM_REG_TSEM_LENGTH_MIS 0x20224 +/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_TSEM_WEIGHT 0x200c0 +/* [RW 2] The queue index for registration on UNA greater NXT decision rule. */ +#define XCM_REG_UNA_GT_NXT_Q 0x20120 +/* [RW 1] Input usem Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_USEM_IFEN 0x2002c +/* [RC 1] Message length mismatch (relative to last indication) at the usem + interface. */ +#define XCM_REG_USEM_LENGTH_MIS 0x2022c +/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_USEM_WEIGHT 0x200c8 +#define XCM_REG_WU_DA_CNT_CMD00 0x201d4 +#define XCM_REG_WU_DA_CNT_CMD01 0x201d8 +#define XCM_REG_WU_DA_CNT_CMD10 0x201dc +#define XCM_REG_WU_DA_CNT_CMD11 0x201e0 +#define XCM_REG_WU_DA_CNT_UPD_VAL00 0x201e4 +#define XCM_REG_WU_DA_CNT_UPD_VAL01 0x201e8 +#define XCM_REG_WU_DA_CNT_UPD_VAL10 0x201ec +#define XCM_REG_WU_DA_CNT_UPD_VAL11 0x201f0 +#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00 0x201c4 +#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01 0x201c8 +#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD10 0x201cc +#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD11 0x201d0 +/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_XCM_CFC_IFEN 0x20050 +/* [RW 14] Interrupt mask register #0 read/write */ +#define XCM_REG_XCM_INT_MASK 0x202b4 +/* [R 14] Interrupt register #0 read */ +#define XCM_REG_XCM_INT_STS 0x202a8 +/* [RW 30] Parity mask register #0 read/write */ +#define XCM_REG_XCM_PRTY_MASK 0x202c4 +/* [R 30] Parity register #0 read */ +#define XCM_REG_XCM_PRTY_STS 0x202b8 +/* [RC 30] Parity register #0 read clear */ +#define XCM_REG_XCM_PRTY_STS_CLR 0x202bc + +/* [RW 4] The size of AG context region 0 in REG-pairs. Designates the MS + REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). + Is used to determine the number of the AG context REG-pairs written back; + when the Reg1WbFlg isn't set. */ +#define XCM_REG_XCM_REG0_SZ 0x200f4 +/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_XCM_STORM0_IFEN 0x20004 +/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_XCM_STORM1_IFEN 0x20008 +/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is + disregarded; acknowledge output is deasserted; all other signals are + treated as usual; if 1 - normal activity. */ +#define XCM_REG_XCM_TM_IFEN 0x20020 +/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is + disregarded; valid is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_XCM_XQM_IFEN 0x2000c +/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */ +#define XCM_REG_XCM_XQM_USE_Q 0x200f0 +/* [RW 4] The value by which CFC updates the activity counter at QM bypass. */ +#define XCM_REG_XQM_BYP_ACT_UPD 0x200fc +/* [RW 6] QM output initial credit. Max credit available - 32.Write writes + the initial credit value; read returns the current value of the credit + counter. Must be initialized to 32 at start-up. */ +#define XCM_REG_XQM_INIT_CRD 0x20420 +/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_XQM_P_WEIGHT 0x200e4 +/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0 + stands for weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_XQM_S_WEIGHT 0x200e8 +/* [RW 28] The CM header value for QM request (primary). */ +#define XCM_REG_XQM_XCM_HDR_P 0x200a0 +/* [RW 28] The CM header value for QM request (secondary). */ +#define XCM_REG_XQM_XCM_HDR_S 0x200a4 +/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_XQM_XCM_IFEN 0x20014 +/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded; + acknowledge output is deasserted; all other signals are treated as usual; + if 1 - normal activity. */ +#define XCM_REG_XSDM_IFEN 0x20018 +/* [RC 1] Set at message length mismatch (relative to last indication) at + the SDM interface. */ +#define XCM_REG_XSDM_LENGTH_MIS 0x20220 +/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for + weight 8 (the most prioritised); 1 stands for weight 1(least + prioritised); 2 stands for weight 2; tc. */ +#define XCM_REG_XSDM_WEIGHT 0x200e0 +/* [RW 17] Indirect access to the descriptor table of the XX protection + mechanism. The fields are: [5:0] - message length; 11:6] - message + pointer; 16:12] - next pointer. */ +#define XCM_REG_XX_DESCR_TABLE 0x20480 +#define XCM_REG_XX_DESCR_TABLE_SIZE 32 +/* [R 6] Used to read the XX protection Free counter. */ +#define XCM_REG_XX_FREE 0x20240 +/* [RW 6] Initial value for the credit counter; responsible for fulfilling + of the Input Stage XX protection buffer by the XX protection pending + messages. Max credit available - 3.Write writes the initial credit value; + read returns the current value of the credit counter. Must be initialized + to 2 at start-up. */ +#define XCM_REG_XX_INIT_CRD 0x20424 +/* [RW 6] The maximum number of pending messages; which may be stored in XX + protection. ~xcm_registers_xx_free.xx_free read on read. */ +#define XCM_REG_XX_MSG_NUM 0x20428 +/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */ +#define XCM_REG_XX_OVFL_EVNT_ID 0x20058 +#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) +#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) +#define XMAC_CTRL_REG_LINE_LOCAL_LPBK (0x1<<2) +#define XMAC_CTRL_REG_RX_EN (0x1<<1) +#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) +#define XMAC_CTRL_REG_TX_EN (0x1<<0) +#define XMAC_CTRL_REG_XLGMII_ALIGN_ENB (0x1<<7) +#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18) +#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17) +#define XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON (0x1<<1) +#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN (0x1<<0) +#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN (0x1<<3) +#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN (0x1<<4) +#define XMAC_PFC_CTRL_HI_REG_TX_PFC_EN (0x1<<5) +#define XMAC_REG_CLEAR_RX_LSS_STATUS 0x60 +#define XMAC_REG_CTRL 0 +/* [RW 16] Upper 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC + * packets transmitted by the MAC */ +#define XMAC_REG_CTRL_SA_HI 0x2c +/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC + * packets transmitted by the MAC */ +#define XMAC_REG_CTRL_SA_LO 0x28 +#define XMAC_REG_EEE_CTRL 0xd8 +#define XMAC_REG_EEE_TIMERS_HI 0xe4 +#define XMAC_REG_PAUSE_CTRL 0x68 +#define XMAC_REG_PFC_CTRL 0x70 +#define XMAC_REG_PFC_CTRL_HI 0x74 +#define XMAC_REG_RX_LSS_CTRL 0x50 +#define XMAC_REG_RX_LSS_STATUS 0x58 +/* [RW 14] Maximum packet size in receive direction; exclusive of preamble & + * CRC in strip mode */ +#define XMAC_REG_RX_MAX_SIZE 0x40 +#define XMAC_REG_TX_CTRL 0x20 +#define XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE (0x1<<0) +#define XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE (0x1<<1) +/* [RW 16] Indirect access to the XX table of the XX protection mechanism. + The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] - + header pointer. */ +#define XCM_REG_XX_TABLE 0x20500 +/* [RW 8] The event id for aggregated interrupt 0 */ +#define XSDM_REG_AGG_INT_EVENT_0 0x166038 +#define XSDM_REG_AGG_INT_EVENT_1 0x16603c +#define XSDM_REG_AGG_INT_EVENT_10 0x166060 +#define XSDM_REG_AGG_INT_EVENT_11 0x166064 +#define XSDM_REG_AGG_INT_EVENT_12 0x166068 +#define XSDM_REG_AGG_INT_EVENT_13 0x16606c +#define XSDM_REG_AGG_INT_EVENT_14 0x166070 +#define XSDM_REG_AGG_INT_EVENT_2 0x166040 +#define XSDM_REG_AGG_INT_EVENT_3 0x166044 +#define XSDM_REG_AGG_INT_EVENT_4 0x166048 +#define XSDM_REG_AGG_INT_EVENT_5 0x16604c +#define XSDM_REG_AGG_INT_EVENT_6 0x166050 +#define XSDM_REG_AGG_INT_EVENT_7 0x166054 +#define XSDM_REG_AGG_INT_EVENT_8 0x166058 +#define XSDM_REG_AGG_INT_EVENT_9 0x16605c +/* [RW 1] For each aggregated interrupt index whether the mode is normal (0) + or auto-mask-mode (1) */ +#define XSDM_REG_AGG_INT_MODE_0 0x1661b8 +#define XSDM_REG_AGG_INT_MODE_1 0x1661bc +/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */ +#define XSDM_REG_CFC_RSP_START_ADDR 0x166008 +/* [RW 16] The maximum value of the completion counter #0 */ +#define XSDM_REG_CMP_COUNTER_MAX0 0x16601c +/* [RW 16] The maximum value of the completion counter #1 */ +#define XSDM_REG_CMP_COUNTER_MAX1 0x166020 +/* [RW 16] The maximum value of the completion counter #2 */ +#define XSDM_REG_CMP_COUNTER_MAX2 0x166024 +/* [RW 16] The maximum value of the completion counter #3 */ +#define XSDM_REG_CMP_COUNTER_MAX3 0x166028 +/* [RW 13] The start address in the internal RAM for the completion + counters. */ +#define XSDM_REG_CMP_COUNTER_START_ADDR 0x16600c +#define XSDM_REG_ENABLE_IN1 0x166238 +#define XSDM_REG_ENABLE_IN2 0x16623c +#define XSDM_REG_ENABLE_OUT1 0x166240 +#define XSDM_REG_ENABLE_OUT2 0x166244 +/* [RW 4] The initial number of messages that can be sent to the pxp control + interface without receiving any ACK. */ +#define XSDM_REG_INIT_CREDIT_PXP_CTRL 0x1664bc +/* [ST 32] The number of ACK after placement messages received */ +#define XSDM_REG_NUM_OF_ACK_AFTER_PLACE 0x16627c +/* [ST 32] The number of packet end messages received from the parser */ +#define XSDM_REG_NUM_OF_PKT_END_MSG 0x166274 +/* [ST 32] The number of requests received from the pxp async if */ +#define XSDM_REG_NUM_OF_PXP_ASYNC_REQ 0x166278 +/* [ST 32] The number of commands received in queue 0 */ +#define XSDM_REG_NUM_OF_Q0_CMD 0x166248 +/* [ST 32] The number of commands received in queue 10 */ +#define XSDM_REG_NUM_OF_Q10_CMD 0x16626c +/* [ST 32] The number of commands received in queue 11 */ +#define XSDM_REG_NUM_OF_Q11_CMD 0x166270 +/* [ST 32] The number of commands received in queue 1 */ +#define XSDM_REG_NUM_OF_Q1_CMD 0x16624c +/* [ST 32] The number of commands received in queue 3 */ +#define XSDM_REG_NUM_OF_Q3_CMD 0x166250 +/* [ST 32] The number of commands received in queue 4 */ +#define XSDM_REG_NUM_OF_Q4_CMD 0x166254 +/* [ST 32] The number of commands received in queue 5 */ +#define XSDM_REG_NUM_OF_Q5_CMD 0x166258 +/* [ST 32] The number of commands received in queue 6 */ +#define XSDM_REG_NUM_OF_Q6_CMD 0x16625c +/* [ST 32] The number of commands received in queue 7 */ +#define XSDM_REG_NUM_OF_Q7_CMD 0x166260 +/* [ST 32] The number of commands received in queue 8 */ +#define XSDM_REG_NUM_OF_Q8_CMD 0x166264 +/* [ST 32] The number of commands received in queue 9 */ +#define XSDM_REG_NUM_OF_Q9_CMD 0x166268 +/* [RW 13] The start address in the internal RAM for queue counters */ +#define XSDM_REG_Q_COUNTER_START_ADDR 0x166010 +/* [W 17] Generate an operation after completion; bit-16 is + * AggVectIdx_valid; bits 15:8 are AggVectIdx; bits 7:5 are the TRIG and + * bits 4:0 are the T124Param[4:0] */ +#define XSDM_REG_OPERATION_GEN 0x1664c4 +/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ +#define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x166548 +/* [R 1] parser fifo empty in sdm_sync block */ +#define XSDM_REG_SYNC_PARSER_EMPTY 0x166550 +/* [R 1] parser serial fifo empty in sdm_sync block */ +#define XSDM_REG_SYNC_SYNC_EMPTY 0x166558 +/* [RW 32] Tick for timer counter. Applicable only when + ~xsdm_registers_timer_tick_enable.timer_tick_enable =1 */ +#define XSDM_REG_TIMER_TICK 0x166000 +/* [RW 32] Interrupt mask register #0 read/write */ +#define XSDM_REG_XSDM_INT_MASK_0 0x16629c +#define XSDM_REG_XSDM_INT_MASK_1 0x1662ac +/* [R 32] Interrupt register #0 read */ +#define XSDM_REG_XSDM_INT_STS_0 0x166290 +#define XSDM_REG_XSDM_INT_STS_1 0x1662a0 +/* [RW 11] Parity mask register #0 read/write */ +#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc +/* [R 11] Parity register #0 read */ +#define XSDM_REG_XSDM_PRTY_STS 0x1662b0 +/* [RC 11] Parity register #0 read clear */ +#define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4 +/* [RW 5] The number of time_slots in the arbitration cycle */ +#define XSEM_REG_ARB_CYCLE_SIZE 0x280034 +/* [RW 3] The source that is associated with arbitration element 0. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2 */ +#define XSEM_REG_ARB_ELEMENT0 0x280020 +/* [RW 3] The source that is associated with arbitration element 1. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~xsem_registers_arb_element0.arb_element0 */ +#define XSEM_REG_ARB_ELEMENT1 0x280024 +/* [RW 3] The source that is associated with arbitration element 2. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~xsem_registers_arb_element0.arb_element0 + and ~xsem_registers_arb_element1.arb_element1 */ +#define XSEM_REG_ARB_ELEMENT2 0x280028 +/* [RW 3] The source that is associated with arbitration element 3. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2.Could + not be equal to register ~xsem_registers_arb_element0.arb_element0 and + ~xsem_registers_arb_element1.arb_element1 and + ~xsem_registers_arb_element2.arb_element2 */ +#define XSEM_REG_ARB_ELEMENT3 0x28002c +/* [RW 3] The source that is associated with arbitration element 4. Source + decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3- + sleeping thread with priority 1; 4- sleeping thread with priority 2. + Could not be equal to register ~xsem_registers_arb_element0.arb_element0 + and ~xsem_registers_arb_element1.arb_element1 and + ~xsem_registers_arb_element2.arb_element2 and + ~xsem_registers_arb_element3.arb_element3 */ +#define XSEM_REG_ARB_ELEMENT4 0x280030 +#define XSEM_REG_ENABLE_IN 0x2800a4 +#define XSEM_REG_ENABLE_OUT 0x2800a8 +/* [RW 32] This address space contains all registers and memories that are + placed in SEM_FAST block. The SEM_FAST registers are described in + appendix B. In order to access the sem_fast registers the base address + ~fast_memory.fast_memory should be added to eachsem_fast register offset. */ +#define XSEM_REG_FAST_MEMORY 0x2a0000 +/* [RW 1] Disables input messages from FIC0 May be updated during run_time + by the microcode */ +#define XSEM_REG_FIC0_DISABLE 0x280224 +/* [RW 1] Disables input messages from FIC1 May be updated during run_time + by the microcode */ +#define XSEM_REG_FIC1_DISABLE 0x280234 +/* [RW 15] Interrupt table Read and write access to it is not possible in + the middle of the work */ +#define XSEM_REG_INT_TABLE 0x280400 +/* [ST 24] Statistics register. The number of messages that entered through + FIC0 */ +#define XSEM_REG_MSG_NUM_FIC0 0x280000 +/* [ST 24] Statistics register. The number of messages that entered through + FIC1 */ +#define XSEM_REG_MSG_NUM_FIC1 0x280004 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC0 */ +#define XSEM_REG_MSG_NUM_FOC0 0x280008 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC1 */ +#define XSEM_REG_MSG_NUM_FOC1 0x28000c +/* [ST 24] Statistics register. The number of messages that were sent to + FOC2 */ +#define XSEM_REG_MSG_NUM_FOC2 0x280010 +/* [ST 24] Statistics register. The number of messages that were sent to + FOC3 */ +#define XSEM_REG_MSG_NUM_FOC3 0x280014 +/* [RW 1] Disables input messages from the passive buffer May be updated + during run_time by the microcode */ +#define XSEM_REG_PAS_DISABLE 0x28024c +/* [WB 128] Debug only. Passive buffer memory */ +#define XSEM_REG_PASSIVE_BUFFER 0x282000 +/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */ +#define XSEM_REG_PRAM 0x2c0000 +/* [R 16] Valid sleeping threads indication have bit per thread */ +#define XSEM_REG_SLEEP_THREADS_VALID 0x28026c +/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */ +#define XSEM_REG_SLOW_EXT_STORE_EMPTY 0x2802a0 +/* [RW 16] List of free threads . There is a bit per thread. */ +#define XSEM_REG_THREADS_LIST 0x2802e4 +/* [RW 3] The arbitration scheme of time_slot 0 */ +#define XSEM_REG_TS_0_AS 0x280038 +/* [RW 3] The arbitration scheme of time_slot 10 */ +#define XSEM_REG_TS_10_AS 0x280060 +/* [RW 3] The arbitration scheme of time_slot 11 */ +#define XSEM_REG_TS_11_AS 0x280064 +/* [RW 3] The arbitration scheme of time_slot 12 */ +#define XSEM_REG_TS_12_AS 0x280068 +/* [RW 3] The arbitration scheme of time_slot 13 */ +#define XSEM_REG_TS_13_AS 0x28006c +/* [RW 3] The arbitration scheme of time_slot 14 */ +#define XSEM_REG_TS_14_AS 0x280070 +/* [RW 3] The arbitration scheme of time_slot 15 */ +#define XSEM_REG_TS_15_AS 0x280074 +/* [RW 3] The arbitration scheme of time_slot 16 */ +#define XSEM_REG_TS_16_AS 0x280078 +/* [RW 3] The arbitration scheme of time_slot 17 */ +#define XSEM_REG_TS_17_AS 0x28007c +/* [RW 3] The arbitration scheme of time_slot 18 */ +#define XSEM_REG_TS_18_AS 0x280080 +/* [RW 3] The arbitration scheme of time_slot 1 */ +#define XSEM_REG_TS_1_AS 0x28003c +/* [RW 3] The arbitration scheme of time_slot 2 */ +#define XSEM_REG_TS_2_AS 0x280040 +/* [RW 3] The arbitration scheme of time_slot 3 */ +#define XSEM_REG_TS_3_AS 0x280044 +/* [RW 3] The arbitration scheme of time_slot 4 */ +#define XSEM_REG_TS_4_AS 0x280048 +/* [RW 3] The arbitration scheme of time_slot 5 */ +#define XSEM_REG_TS_5_AS 0x28004c +/* [RW 3] The arbitration scheme of time_slot 6 */ +#define XSEM_REG_TS_6_AS 0x280050 +/* [RW 3] The arbitration scheme of time_slot 7 */ +#define XSEM_REG_TS_7_AS 0x280054 +/* [RW 3] The arbitration scheme of time_slot 8 */ +#define XSEM_REG_TS_8_AS 0x280058 +/* [RW 3] The arbitration scheme of time_slot 9 */ +#define XSEM_REG_TS_9_AS 0x28005c +/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 + * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ +#define XSEM_REG_VFPF_ERR_NUM 0x280380 +/* [RW 32] Interrupt mask register #0 read/write */ +#define XSEM_REG_XSEM_INT_MASK_0 0x280110 +#define XSEM_REG_XSEM_INT_MASK_1 0x280120 +/* [R 32] Interrupt register #0 read */ +#define XSEM_REG_XSEM_INT_STS_0 0x280104 +#define XSEM_REG_XSEM_INT_STS_1 0x280114 +/* [RW 32] Parity mask register #0 read/write */ +#define XSEM_REG_XSEM_PRTY_MASK_0 0x280130 +#define XSEM_REG_XSEM_PRTY_MASK_1 0x280140 +/* [R 32] Parity register #0 read */ +#define XSEM_REG_XSEM_PRTY_STS_0 0x280124 +#define XSEM_REG_XSEM_PRTY_STS_1 0x280134 +/* [RC 32] Parity register #0 read clear */ +#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128 +#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138 +#define MCPR_ACCESS_LOCK_LOCK (1L<<31) +#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0) +#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1) +#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) +#define MCPR_NVM_CFG4_FLASH_SIZE (0x7L<<0) +#define MCPR_NVM_COMMAND_DOIT (1L<<4) +#define MCPR_NVM_COMMAND_DONE (1L<<3) +#define MCPR_NVM_COMMAND_FIRST (1L<<7) +#define MCPR_NVM_COMMAND_LAST (1L<<8) +#define MCPR_NVM_COMMAND_WR (1L<<5) +#define MCPR_NVM_SW_ARB_ARB_ARB1 (1L<<9) +#define MCPR_NVM_SW_ARB_ARB_REQ_CLR1 (1L<<5) +#define MCPR_NVM_SW_ARB_ARB_REQ_SET1 (1L<<1) +#define BIGMAC_REGISTER_BMAC_CONTROL (0x00<<3) +#define BIGMAC_REGISTER_BMAC_XGXS_CONTROL (0x01<<3) +#define BIGMAC_REGISTER_CNT_MAX_SIZE (0x05<<3) +#define BIGMAC_REGISTER_RX_CONTROL (0x21<<3) +#define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS (0x46<<3) +#define BIGMAC_REGISTER_RX_LSS_STATUS (0x43<<3) +#define BIGMAC_REGISTER_RX_MAX_SIZE (0x23<<3) +#define BIGMAC_REGISTER_RX_STAT_GR64 (0x26<<3) +#define BIGMAC_REGISTER_RX_STAT_GRIPJ (0x42<<3) +#define BIGMAC_REGISTER_TX_CONTROL (0x07<<3) +#define BIGMAC_REGISTER_TX_MAX_SIZE (0x09<<3) +#define BIGMAC_REGISTER_TX_PAUSE_THRESHOLD (0x0A<<3) +#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3) +#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3) +#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3) +#define BIGMAC2_REGISTER_BMAC_CONTROL (0x00<<3) +#define BIGMAC2_REGISTER_BMAC_XGXS_CONTROL (0x01<<3) +#define BIGMAC2_REGISTER_CNT_MAX_SIZE (0x05<<3) +#define BIGMAC2_REGISTER_PFC_CONTROL (0x06<<3) +#define BIGMAC2_REGISTER_RX_CONTROL (0x3A<<3) +#define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62<<3) +#define BIGMAC2_REGISTER_RX_LSS_STAT (0x3E<<3) +#define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C<<3) +#define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40<<3) +#define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f<<3) +#define BIGMAC2_REGISTER_RX_STAT_GRPP (0x51<<3) +#define BIGMAC2_REGISTER_TX_CONTROL (0x1C<<3) +#define BIGMAC2_REGISTER_TX_MAX_SIZE (0x1E<<3) +#define BIGMAC2_REGISTER_TX_PAUSE_CONTROL (0x20<<3) +#define BIGMAC2_REGISTER_TX_SOURCE_ADDR (0x1D<<3) +#define BIGMAC2_REGISTER_TX_STAT_GTBYT (0x39<<3) +#define BIGMAC2_REGISTER_TX_STAT_GTPOK (0x22<<3) +#define BIGMAC2_REGISTER_TX_STAT_GTPP (0x24<<3) +#define EMAC_LED_1000MB_OVERRIDE (1L<<1) +#define EMAC_LED_100MB_OVERRIDE (1L<<2) +#define EMAC_LED_10MB_OVERRIDE (1L<<3) +#define EMAC_LED_2500MB_OVERRIDE (1L<<12) +#define EMAC_LED_OVERRIDE (1L<<0) +#define EMAC_LED_TRAFFIC (1L<<6) +#define EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26) +#define EMAC_MDIO_COMM_COMMAND_READ_22 (2L<<26) +#define EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26) +#define EMAC_MDIO_COMM_COMMAND_WRITE_22 (1L<<26) +#define EMAC_MDIO_COMM_COMMAND_WRITE_45 (1L<<26) +#define EMAC_MDIO_COMM_DATA (0xffffL<<0) +#define EMAC_MDIO_COMM_START_BUSY (1L<<29) +#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4) +#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31) +#define EMAC_MDIO_MODE_CLOCK_CNT (0x3ffL<<16) +#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16 +#define EMAC_MDIO_STATUS_10MB (1L<<1) +#define EMAC_MODE_25G_MODE (1L<<5) +#define EMAC_MODE_HALF_DUPLEX (1L<<1) +#define EMAC_MODE_PORT_GMII (2L<<2) +#define EMAC_MODE_PORT_MII (1L<<2) +#define EMAC_MODE_PORT_MII_10M (3L<<2) +#define EMAC_MODE_RESET (1L<<0) +#define EMAC_REG_EMAC_LED 0xc +#define EMAC_REG_EMAC_MAC_MATCH 0x10 +#define EMAC_REG_EMAC_MDIO_COMM 0xac +#define EMAC_REG_EMAC_MDIO_MODE 0xb4 +#define EMAC_REG_EMAC_MDIO_STATUS 0xb0 +#define EMAC_REG_EMAC_MODE 0x0 +#define EMAC_REG_EMAC_RX_MODE 0xc8 +#define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c +#define EMAC_REG_EMAC_RX_STAT_AC 0x180 +#define EMAC_REG_EMAC_RX_STAT_AC_28 0x1f4 +#define EMAC_REG_EMAC_RX_STAT_AC_COUNT 23 +#define EMAC_REG_EMAC_TX_MODE 0xbc +#define EMAC_REG_EMAC_TX_STAT_AC 0x280 +#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22 +#define EMAC_REG_RX_PFC_MODE 0x320 +#define EMAC_REG_RX_PFC_MODE_PRIORITIES (1L<<2) +#define EMAC_REG_RX_PFC_MODE_RX_EN (1L<<1) +#define EMAC_REG_RX_PFC_MODE_TX_EN (1L<<0) +#define EMAC_REG_RX_PFC_PARAM 0x324 +#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT 0 +#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT 16 +#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD 0x328 +#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT (0xffff<<0) +#define EMAC_REG_RX_PFC_STATS_XOFF_SENT 0x330 +#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT (0xffff<<0) +#define EMAC_REG_RX_PFC_STATS_XON_RCVD 0x32c +#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT (0xffff<<0) +#define EMAC_REG_RX_PFC_STATS_XON_SENT 0x334 +#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT (0xffff<<0) +#define EMAC_RX_MODE_FLOW_EN (1L<<2) +#define EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3) +#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10) +#define EMAC_RX_MODE_PROMISCUOUS (1L<<8) +#define EMAC_RX_MODE_RESET (1L<<0) +#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31) +#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3) +#define EMAC_TX_MODE_FLOW_EN (1L<<4) +#define EMAC_TX_MODE_RESET (1L<<0) +#define MISC_REGISTERS_GPIO_0 0 +#define MISC_REGISTERS_GPIO_1 1 +#define MISC_REGISTERS_GPIO_2 2 +#define MISC_REGISTERS_GPIO_3 3 +#define MISC_REGISTERS_GPIO_CLR_POS 16 +#define MISC_REGISTERS_GPIO_FLOAT (0xffL<<24) +#define MISC_REGISTERS_GPIO_FLOAT_POS 24 +#define MISC_REGISTERS_GPIO_HIGH 1 +#define MISC_REGISTERS_GPIO_INPUT_HI_Z 2 +#define MISC_REGISTERS_GPIO_INT_CLR_POS 24 +#define MISC_REGISTERS_GPIO_INT_OUTPUT_CLR 0 +#define MISC_REGISTERS_GPIO_INT_OUTPUT_SET 1 +#define MISC_REGISTERS_GPIO_INT_SET_POS 16 +#define MISC_REGISTERS_GPIO_LOW 0 +#define MISC_REGISTERS_GPIO_OUTPUT_HIGH 1 +#define MISC_REGISTERS_GPIO_OUTPUT_LOW 0 +#define MISC_REGISTERS_GPIO_PORT_SHIFT 4 +#define MISC_REGISTERS_GPIO_SET_POS 8 +#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 +#define MISC_REGISTERS_RESET_REG_1_RST_BRB1 (0x1<<0) +#define MISC_REGISTERS_RESET_REG_1_RST_DORQ (0x1<<19) +#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29) +#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7) +#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26) +#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27) +#define MISC_REGISTERS_RESET_REG_1_RST_XSEM (0x1<<22) +#define MISC_REGISTERS_RESET_REG_1_SET 0x584 +#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 +#define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24) +#define MISC_REGISTERS_RESET_REG_2_MSTAT1 (0x1<<25) +#define MISC_REGISTERS_RESET_REG_2_PGLC (0x1<<19) +#define MISC_REGISTERS_RESET_REG_2_RST_ATC (0x1<<17) +#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) +#define MISC_REGISTERS_RESET_REG_2_RST_BMAC1 (0x1<<1) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0 (0x1<<2) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1 (0x1<<3) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15) +#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE (0x1<<8) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU (0x1<<7) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5) +#define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13) +#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11) +#define MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO (0x1<<13) +#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9) +#define MISC_REGISTERS_RESET_REG_2_SET 0x594 +#define MISC_REGISTERS_RESET_REG_2_UMAC0 (0x1<<20) +#define MISC_REGISTERS_RESET_REG_2_UMAC1 (0x1<<21) +#define MISC_REGISTERS_RESET_REG_2_XMAC (0x1<<22) +#define MISC_REGISTERS_RESET_REG_2_XMAC_SOFT (0x1<<23) +#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8 +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN (0x1<<2) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD (0x1<<3) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW (0x1<<0) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ (0x1<<5) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN (0x1<<6) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD (0x1<<7) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW (0x1<<4) +#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8) +#define MISC_REGISTERS_RESET_REG_3_SET 0x5a4 +#define MISC_REGISTERS_SPIO_4 4 +#define MISC_REGISTERS_SPIO_5 5 +#define MISC_REGISTERS_SPIO_7 7 +#define MISC_REGISTERS_SPIO_CLR_POS 16 +#define MISC_REGISTERS_SPIO_FLOAT (0xffL<<24) +#define MISC_REGISTERS_SPIO_FLOAT_POS 24 +#define MISC_REGISTERS_SPIO_INPUT_HI_Z 2 +#define MISC_REGISTERS_SPIO_INT_OLD_SET_POS 16 +#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1 +#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0 +#define MISC_REGISTERS_SPIO_SET_POS 8 +#define MISC_SPIO_CLR_POS 16 +#define MISC_SPIO_FLOAT (0xffL<<24) +#define MISC_SPIO_FLOAT_POS 24 +#define MISC_SPIO_INPUT_HI_Z 2 +#define MISC_SPIO_INT_OLD_SET_POS 16 +#define MISC_SPIO_OUTPUT_HIGH 1 +#define MISC_SPIO_OUTPUT_LOW 0 +#define MISC_SPIO_SET_POS 8 +#define MISC_SPIO_SPIO4 0x10 +#define MISC_SPIO_SPIO5 0x20 +#define HW_LOCK_MAX_RESOURCE_VALUE 31 +#define HW_LOCK_RESOURCE_DCBX_ADMIN_MIB 13 +#define HW_LOCK_RESOURCE_DRV_FLAGS 10 +#define HW_LOCK_RESOURCE_GPIO 1 +#define HW_LOCK_RESOURCE_MDIO 0 +#define HW_LOCK_RESOURCE_NVRAM 12 +#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3 +#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 +#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 +#define HW_LOCK_RESOURCE_RECOVERY_REG 11 +#define HW_LOCK_RESOURCE_RESET 5 +#define HW_LOCK_RESOURCE_SPIO 2 +#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) +#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT (0x1<<19) +#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) +#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31) +#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30) +#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (0x1<<9) +#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (0x1<<8) +#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (0x1<<7) +#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (0x1<<6) +#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (0x1<<29) +#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (0x1<<28) +#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (0x1<<1) +#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (0x1<<0) +#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (0x1<<18) +#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (0x1<<11) +#define AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR (0x1<<10) +#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (0x1<<13) +#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (0x1<<12) +#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2) +#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1<<31) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30) +#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15) +#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (0x1<<14) +#define AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR (0x1<<14) +#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (0x1<<20) +#define AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT (0x1<<31) +#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (0x1<<30) +#define AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR (0x1<<0) +#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2) +#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3) +#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (0x1<<4) +#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (0x1<<3) +#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (0x1<<2) +#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (0x1<<3) +#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (0x1<<2) +#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (0x1<<22) +#define AEU_INPUTS_ATTN_BITS_SPIO5 (0x1<<15) +#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (0x1<<27) +#define AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR (0x1<<26) +#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR (0x1<<4) +#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (0x1<<25) +#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (0x1<<24) +#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (0x1<<29) +#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (0x1<<28) +#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (0x1<<23) +#define AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR (0x1<<22) +#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (0x1<<27) +#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (0x1<<26) +#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (0x1<<21) +#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (0x1<<20) +#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (0x1<<25) +#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (0x1<<24) +#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (0x1<<16) +#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (0x1<<9) +#define AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR (0x1<<8) +#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (0x1<<7) +#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (0x1<<6) +#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (0x1<<11) +#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (0x1<<10) + +#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (0x1<<9) + +#define RESERVED_GENERAL_ATTENTION_BIT_0 0 + +#define EVEREST_GEN_ATTN_IN_USE_MASK 0x7ffe0 +#define EVEREST_LATCHED_ATTN_IN_USE_MASK 0xffe00000 + +#define RESERVED_GENERAL_ATTENTION_BIT_6 6 +#define RESERVED_GENERAL_ATTENTION_BIT_7 7 +#define RESERVED_GENERAL_ATTENTION_BIT_8 8 +#define RESERVED_GENERAL_ATTENTION_BIT_9 9 +#define RESERVED_GENERAL_ATTENTION_BIT_10 10 +#define RESERVED_GENERAL_ATTENTION_BIT_11 11 +#define RESERVED_GENERAL_ATTENTION_BIT_12 12 +#define RESERVED_GENERAL_ATTENTION_BIT_13 13 +#define RESERVED_GENERAL_ATTENTION_BIT_14 14 +#define RESERVED_GENERAL_ATTENTION_BIT_15 15 +#define RESERVED_GENERAL_ATTENTION_BIT_16 16 +#define RESERVED_GENERAL_ATTENTION_BIT_17 17 +#define RESERVED_GENERAL_ATTENTION_BIT_18 18 +#define RESERVED_GENERAL_ATTENTION_BIT_19 19 +#define RESERVED_GENERAL_ATTENTION_BIT_20 20 +#define RESERVED_GENERAL_ATTENTION_BIT_21 21 + +/* storm asserts attention bits */ +#define TSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_7 +#define USTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_8 +#define CSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_9 +#define XSTORM_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_10 + +/* mcp error attention bit */ +#define MCP_FATAL_ASSERT_ATTENTION_BIT RESERVED_GENERAL_ATTENTION_BIT_11 + +/*E1H NIG status sync attention mapped to group 4-7*/ +#define LINK_SYNC_ATTENTION_BIT_FUNC_0 RESERVED_GENERAL_ATTENTION_BIT_12 +#define LINK_SYNC_ATTENTION_BIT_FUNC_1 RESERVED_GENERAL_ATTENTION_BIT_13 +#define LINK_SYNC_ATTENTION_BIT_FUNC_2 RESERVED_GENERAL_ATTENTION_BIT_14 +#define LINK_SYNC_ATTENTION_BIT_FUNC_3 RESERVED_GENERAL_ATTENTION_BIT_15 +#define LINK_SYNC_ATTENTION_BIT_FUNC_4 RESERVED_GENERAL_ATTENTION_BIT_16 +#define LINK_SYNC_ATTENTION_BIT_FUNC_5 RESERVED_GENERAL_ATTENTION_BIT_17 +#define LINK_SYNC_ATTENTION_BIT_FUNC_6 RESERVED_GENERAL_ATTENTION_BIT_18 +#define LINK_SYNC_ATTENTION_BIT_FUNC_7 RESERVED_GENERAL_ATTENTION_BIT_19 + + +#define LATCHED_ATTN_RBCR 23 +#define LATCHED_ATTN_RBCT 24 +#define LATCHED_ATTN_RBCN 25 +#define LATCHED_ATTN_RBCU 26 +#define LATCHED_ATTN_RBCP 27 +#define LATCHED_ATTN_TIMEOUT_GRC 28 +#define LATCHED_ATTN_RSVD_GRC 29 +#define LATCHED_ATTN_ROM_PARITY_MCP 30 +#define LATCHED_ATTN_UM_RX_PARITY_MCP 31 +#define LATCHED_ATTN_UM_TX_PARITY_MCP 32 +#define LATCHED_ATTN_SCPAD_PARITY_MCP 33 + +#define GENERAL_ATTEN_WORD(atten_name) ((94 + atten_name) / 32) +#define GENERAL_ATTEN_OFFSET(atten_name)\ + (1UL << ((94 + atten_name) % 32)) +/* + * This file defines GRC base address for every block. + * This file is included by chipsim, asm microcode and cpp microcode. + * These values are used in Design.xml on regBase attribute + * Use the base with the generated offsets of specific registers. + */ + +#define GRCBASE_PXPCS 0x000000 +#define GRCBASE_PCICONFIG 0x002000 +#define GRCBASE_PCIREG 0x002400 +#define GRCBASE_EMAC0 0x008000 +#define GRCBASE_EMAC1 0x008400 +#define GRCBASE_DBU 0x008800 +#define GRCBASE_MISC 0x00A000 +#define GRCBASE_DBG 0x00C000 +#define GRCBASE_NIG 0x010000 +#define GRCBASE_XCM 0x020000 +#define GRCBASE_PRS 0x040000 +#define GRCBASE_SRCH 0x040400 +#define GRCBASE_TSDM 0x042000 +#define GRCBASE_TCM 0x050000 +#define GRCBASE_BRB1 0x060000 +#define GRCBASE_MCP 0x080000 +#define GRCBASE_UPB 0x0C1000 +#define GRCBASE_CSDM 0x0C2000 +#define GRCBASE_USDM 0x0C4000 +#define GRCBASE_CCM 0x0D0000 +#define GRCBASE_UCM 0x0E0000 +#define GRCBASE_CDU 0x101000 +#define GRCBASE_DMAE 0x102000 +#define GRCBASE_PXP 0x103000 +#define GRCBASE_CFC 0x104000 +#define GRCBASE_HC 0x108000 +#define GRCBASE_PXP2 0x120000 +#define GRCBASE_PBF 0x140000 +#define GRCBASE_UMAC0 0x160000 +#define GRCBASE_UMAC1 0x160400 +#define GRCBASE_XPB 0x161000 +#define GRCBASE_MSTAT0 0x162000 +#define GRCBASE_MSTAT1 0x162800 +#define GRCBASE_XMAC0 0x163000 +#define GRCBASE_XMAC1 0x163800 +#define GRCBASE_TIMERS 0x164000 +#define GRCBASE_XSDM 0x166000 +#define GRCBASE_QM 0x168000 +#define GRCBASE_DQ 0x170000 +#define GRCBASE_TSEM 0x180000 +#define GRCBASE_CSEM 0x200000 +#define GRCBASE_XSEM 0x280000 +#define GRCBASE_USEM 0x300000 +#define GRCBASE_MISC_AEU GRCBASE_MISC + + +/* offset of configuration space in the pci core register */ +#define PCICFG_OFFSET 0x2000 +#define PCICFG_VENDOR_ID_OFFSET 0x00 +#define PCICFG_DEVICE_ID_OFFSET 0x02 +#define PCICFG_COMMAND_OFFSET 0x04 +#define PCICFG_COMMAND_IO_SPACE (1<<0) +#define PCICFG_COMMAND_MEM_SPACE (1<<1) +#define PCICFG_COMMAND_BUS_MASTER (1<<2) +#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3) +#define PCICFG_COMMAND_MWI_CYCLES (1<<4) +#define PCICFG_COMMAND_VGA_SNOOP (1<<5) +#define PCICFG_COMMAND_PERR_ENA (1<<6) +#define PCICFG_COMMAND_STEPPING (1<<7) +#define PCICFG_COMMAND_SERR_ENA (1<<8) +#define PCICFG_COMMAND_FAST_B2B (1<<9) +#define PCICFG_COMMAND_INT_DISABLE (1<<10) +#define PCICFG_COMMAND_RESERVED (0x1f<<11) +#define PCICFG_STATUS_OFFSET 0x06 +#define PCICFG_REVISION_ID_OFFSET 0x08 +#define PCICFG_REVESION_ID_MASK 0xff +#define PCICFG_REVESION_ID_ERROR_VAL 0xff +#define PCICFG_CACHE_LINE_SIZE 0x0c +#define PCICFG_LATENCY_TIMER 0x0d +#define PCICFG_BAR_1_LOW 0x10 +#define PCICFG_BAR_1_HIGH 0x14 +#define PCICFG_BAR_2_LOW 0x18 +#define PCICFG_BAR_2_HIGH 0x1c +#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c +#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e +#define PCICFG_INT_LINE 0x3c +#define PCICFG_INT_PIN 0x3d +#define PCICFG_PM_CAPABILITY 0x48 +#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16) +#define PCICFG_PM_CAPABILITY_CLOCK (1<<19) +#define PCICFG_PM_CAPABILITY_RESERVED (1<<20) +#define PCICFG_PM_CAPABILITY_DSI (1<<21) +#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22) +#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25) +#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26) +#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27) +#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28) +#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29) +#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30) +#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31) +#define PCICFG_PM_CSR_OFFSET 0x4c +#define PCICFG_PM_CSR_STATE (0x3<<0) +#define PCICFG_PM_CSR_PME_ENABLE (1<<8) +#define PCICFG_PM_CSR_PME_STATUS (1<<15) +#define PCICFG_MSI_CAP_ID_OFFSET 0x58 +#define PCICFG_MSI_CONTROL_ENABLE (0x1<<16) +#define PCICFG_MSI_CONTROL_MCAP (0x7<<17) +#define PCICFG_MSI_CONTROL_MENA (0x7<<20) +#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23) +#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24) +#define PCICFG_GRC_ADDRESS 0x78 +#define PCICFG_GRC_DATA 0x80 +#define PCICFG_ME_REGISTER 0x98 +#define PCICFG_MSIX_CAP_ID_OFFSET 0xa0 +#define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16) +#define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27) +#define PCICFG_MSIX_CONTROL_FUNC_MASK (0x1<<30) +#define PCICFG_MSIX_CONTROL_MSIX_ENABLE (0x1<<31) + +#define PCICFG_DEVICE_CONTROL 0xb4 +#define PCICFG_DEVICE_STATUS 0xb6 +#define PCICFG_DEVICE_STATUS_CORR_ERR_DET (1<<0) +#define PCICFG_DEVICE_STATUS_NON_FATAL_ERR_DET (1<<1) +#define PCICFG_DEVICE_STATUS_FATAL_ERR_DET (1<<2) +#define PCICFG_DEVICE_STATUS_UNSUP_REQ_DET (1<<3) +#define PCICFG_DEVICE_STATUS_AUX_PWR_DET (1<<4) +#define PCICFG_DEVICE_STATUS_NO_PEND (1<<5) +#define PCICFG_LINK_CONTROL 0xbc + + +#define BAR_USTRORM_INTMEM 0x400000 +#define BAR_CSTRORM_INTMEM 0x410000 +#define BAR_XSTRORM_INTMEM 0x420000 +#define BAR_TSTRORM_INTMEM 0x430000 + +/* for accessing the IGU in case of status block ACK */ +#define BAR_IGU_INTMEM 0x440000 + +#define BAR_DOORBELL_OFFSET 0x800000 + +#define BAR_ME_REGISTER 0x450000 + +/* config_2 offset */ +#define GRC_CONFIG_2_SIZE_REG 0x408 +#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0) +#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_256K (3L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_512K (4L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_1M (5L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_2M (6L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_4M (7L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_8M (8L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_16M (9L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_32M (10L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_64M (11L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_128M (12L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0) +#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0) +#define PCI_CONFIG_2_BAR1_64ENA (1L<<4) +#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5) +#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6) +#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7) +#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_8K (3L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_16K (4L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_32K (5L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_64K (6L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_128K (7L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_256K (8L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_512K (9L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_1M (10L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_2M (11L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_4M (12L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8) +#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8) +#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16) +#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17) + +/* config_3 offset */ +#define GRC_CONFIG_3_SIZE_REG 0x40c +#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0) +#define PCI_CONFIG_3_FORCE_PME (1L<<24) +#define PCI_CONFIG_3_PME_STATUS (1L<<25) +#define PCI_CONFIG_3_PME_ENABLE (1L<<26) +#define PCI_CONFIG_3_PM_STATE (0x3L<<27) +#define PCI_CONFIG_3_VAUX_PRESET (1L<<30) +#define PCI_CONFIG_3_PCI_POWER (1L<<31) + +#define GRC_BAR2_CONFIG 0x4e0 +#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0) +#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0) +#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0) +#define PCI_CONFIG_2_BAR2_64ENA (1L<<4) + +#define PCI_PM_DATA_A 0x410 +#define PCI_PM_DATA_B 0x414 +#define PCI_ID_VAL1 0x434 +#define PCI_ID_VAL2 0x438 +#define PCI_ID_VAL3 0x43c + +#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C +#define GRC_CONFIG_REG_PF_INIT_VF 0x624 +#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf +/* First VF_NUM for PF is encoded in this register. + * The number of VFs assigned to a PF is assumed to be a multiple of 8. + * Software should program these bits based on Total Number of VFs \ + * programmed for each PF. + * Since registers from 0x000-0x7ff are split across functions, each PF will + * have the same location for the same 4 bits + */ + +#define PXPCS_TL_CONTROL_5 0x814 +#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/ +#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN (1 << 28) /*WC*/ +#define PXPCS_TL_CONTROL_5_MRRS_ERR_ATTN (1 << 27) /*WC*/ +#define PXPCS_TL_CONTROL_5_MPS_ERR_ATTN (1 << 26) /*WC*/ +#define PXPCS_TL_CONTROL_5_TTX_BRIDGE_FORWARD_ERR (1 << 25) /*WC*/ +#define PXPCS_TL_CONTROL_5_TTX_TXINTF_OVERFLOW (1 << 24) /*WC*/ +#define PXPCS_TL_CONTROL_5_PHY_ERR_ATTN (1 << 23) /*RO*/ +#define PXPCS_TL_CONTROL_5_DL_ERR_ATTN (1 << 22) /*RO*/ +#define PXPCS_TL_CONTROL_5_TTX_ERR_NP_TAG_IN_USE (1 << 21) /*WC*/ +#define PXPCS_TL_CONTROL_5_TRX_ERR_UNEXP_RTAG (1 << 20) /*WC*/ +#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT1 (1 << 19) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 (1 << 18) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_ECRC1 (1 << 17) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP1 (1 << 16) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW1 (1 << 15) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL1 (1 << 14) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT1 (1 << 13) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT1 (1 << 12) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL1 (1 << 11) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP1 (1 << 10) /*WC*/ +#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT (1 << 9) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT (1 << 8) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_ECRC (1 << 7) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP (1 << 6) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW (1 << 5) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL (1 << 4) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT (1 << 3) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT (1 << 2) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL (1 << 1) /*WC*/ +#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP (1 << 0) /*WC*/ + + +#define PXPCS_TL_FUNC345_STAT 0x854 +#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT4 (1 << 29) /* WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4\ + (1 << 28) /* Unsupported Request Error Status in function4, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_ECRC4\ + (1 << 27) /* ECRC Error TLP Status Status in function 4, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP4\ + (1 << 26) /* Malformed TLP Status Status in function 4, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW4\ + (1 << 25) /* Receiver Overflow Status Status in function 4, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL4\ + (1 << 24) /* Unexpected Completion Status Status in function 4, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT4\ + (1 << 23) /* Receive UR Statusin function 4. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT4\ + (1 << 22) /* Completer Timeout Status Status in function 4, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL4\ + (1 << 21) /* Flow Control Protocol Error Status Status in \ + function 4, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP4\ + (1 << 20) /* Poisoned Error Status Status in function 4, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT3 (1 << 19) /* WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3\ + (1 << 18) /* Unsupported Request Error Status in function3, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_ECRC3\ + (1 << 17) /* ECRC Error TLP Status Status in function 3, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP3\ + (1 << 16) /* Malformed TLP Status Status in function 3, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW3\ + (1 << 15) /* Receiver Overflow Status Status in function 3, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL3\ + (1 << 14) /* Unexpected Completion Status Status in function 3, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT3\ + (1 << 13) /* Receive UR Statusin function 3. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT3\ + (1 << 12) /* Completer Timeout Status Status in function 3, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL3\ + (1 << 11) /* Flow Control Protocol Error Status Status in \ + function 3, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP3\ + (1 << 10) /* Poisoned Error Status Status in function 3, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT2 (1 << 9) /* WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2\ + (1 << 8) /* Unsupported Request Error Status for Function 2, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_ECRC2\ + (1 << 7) /* ECRC Error TLP Status Status for Function 2, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP2\ + (1 << 6) /* Malformed TLP Status Status for Function 2, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW2\ + (1 << 5) /* Receiver Overflow Status Status for Function 2, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL2\ + (1 << 4) /* Unexpected Completion Status Status for Function 2, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2\ + (1 << 3) /* Receive UR Statusfor Function 2. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2\ + (1 << 2) /* Completer Timeout Status Status for Function 2, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL2\ + (1 << 1) /* Flow Control Protocol Error Status Status for \ + Function 2, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP2\ + (1 << 0) /* Poisoned Error Status Status for Function 2, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ + + +#define PXPCS_TL_FUNC678_STAT 0x85C +#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT7 (1 << 29) /* WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7\ + (1 << 28) /* Unsupported Request Error Status in function7, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_ECRC7\ + (1 << 27) /* ECRC Error TLP Status Status in function 7, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP7\ + (1 << 26) /* Malformed TLP Status Status in function 7, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW7\ + (1 << 25) /* Receiver Overflow Status Status in function 7, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL7\ + (1 << 24) /* Unexpected Completion Status Status in function 7, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT7\ + (1 << 23) /* Receive UR Statusin function 7. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT7\ + (1 << 22) /* Completer Timeout Status Status in function 7, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL7\ + (1 << 21) /* Flow Control Protocol Error Status Status in \ + function 7, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP7\ + (1 << 20) /* Poisoned Error Status Status in function 7, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT6 (1 << 19) /* WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6\ + (1 << 18) /* Unsupported Request Error Status in function6, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_ECRC6\ + (1 << 17) /* ECRC Error TLP Status Status in function 6, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP6\ + (1 << 16) /* Malformed TLP Status Status in function 6, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW6\ + (1 << 15) /* Receiver Overflow Status Status in function 6, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL6\ + (1 << 14) /* Unexpected Completion Status Status in function 6, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT6\ + (1 << 13) /* Receive UR Statusin function 6. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT6\ + (1 << 12) /* Completer Timeout Status Status in function 6, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL6\ + (1 << 11) /* Flow Control Protocol Error Status Status in \ + function 6, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP6\ + (1 << 10) /* Poisoned Error Status Status in function 6, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT5 (1 << 9) /* WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5\ + (1 << 8) /* Unsupported Request Error Status for Function 5, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_ECRC5\ + (1 << 7) /* ECRC Error TLP Status Status for Function 5, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP5\ + (1 << 6) /* Malformed TLP Status Status for Function 5, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW5\ + (1 << 5) /* Receiver Overflow Status Status for Function 5, if \ + set, generate pcie_err_attn output when this error is seen.. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL5\ + (1 << 4) /* Unexpected Completion Status Status for Function 5, \ + if set, generate pcie_err_attn output when this error is seen. WC \ + */ +#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5\ + (1 << 3) /* Receive UR Statusfor Function 5. If set, generate \ + pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5\ + (1 << 2) /* Completer Timeout Status Status for Function 5, if \ + set, generate pcie_err_attn output when this error is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL5\ + (1 << 1) /* Flow Control Protocol Error Status Status for \ + Function 5, if set, generate pcie_err_attn output when this error \ + is seen. WC */ +#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP5\ + (1 << 0) /* Poisoned Error Status Status for Function 5, if set, \ + generate pcie_err_attn output when this error is seen.. WC */ + + +#define BAR_USTRORM_INTMEM 0x400000 +#define BAR_CSTRORM_INTMEM 0x410000 +#define BAR_XSTRORM_INTMEM 0x420000 +#define BAR_TSTRORM_INTMEM 0x430000 + +/* for accessing the IGU in case of status block ACK */ +#define BAR_IGU_INTMEM 0x440000 + +#define BAR_DOORBELL_OFFSET 0x800000 + +#define BAR_ME_REGISTER 0x450000 +#define ME_REG_PF_NUM_SHIFT 0 +#define ME_REG_PF_NUM\ + (7L<> 1; + } + + /* split the crc into 8 bits */ + for (i = 0; i < 8; i++) { + C[i] = crc & 1; + crc = crc >> 1; + } + + NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^ + D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^ + C[6] ^ C[7]; + NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^ + D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^ + D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ + C[6]; + NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^ + D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^ + C[0] ^ C[1] ^ C[4] ^ C[5]; + NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^ + D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^ + C[1] ^ C[2] ^ C[5] ^ C[6]; + NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^ + D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^ + C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7]; + NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^ + D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^ + C[3] ^ C[4] ^ C[7]; + NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^ + D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^ + C[5]; + NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^ + D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^ + C[6]; + + crc_res = 0; + for (i = 0; i < 8; i++) + crc_res |= (NewCRC[i] << i); + + return crc_res; +} + + +#endif /* BNX2X_REG_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c new file mode 100644 index 000000000..07cdf9bbf --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -0,0 +1,6060 @@ +/* bnx2x_sp.c: Broadcom Everest network driver. + * + * Copyright (c) 2011-2013 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Ariel Elior + * Written by: Vladislav Zolotarov + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include "bnx2x.h" +#include "bnx2x_cmn.h" +#include "bnx2x_sp.h" + +#define BNX2X_MAX_EMUL_MULTI 16 + +/**** Exe Queue interfaces ****/ + +/** + * bnx2x_exe_queue_init - init the Exe Queue object + * + * @o: pointer to the object + * @exe_len: length + * @owner: pointer to the owner + * @validate: validate function pointer + * @optimize: optimize function pointer + * @exec: execute function pointer + * @get: get function pointer + */ +static inline void bnx2x_exe_queue_init(struct bnx2x *bp, + struct bnx2x_exe_queue_obj *o, + int exe_len, + union bnx2x_qable_obj *owner, + exe_q_validate validate, + exe_q_remove remove, + exe_q_optimize optimize, + exe_q_execute exec, + exe_q_get get) +{ + memset(o, 0, sizeof(*o)); + + INIT_LIST_HEAD(&o->exe_queue); + INIT_LIST_HEAD(&o->pending_comp); + + spin_lock_init(&o->lock); + + o->exe_chunk_len = exe_len; + o->owner = owner; + + /* Owner specific callbacks */ + o->validate = validate; + o->remove = remove; + o->optimize = optimize; + o->execute = exec; + o->get = get; + + DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n", + exe_len); +} + +static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp, + struct bnx2x_exeq_elem *elem) +{ + DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n"); + kfree(elem); +} + +static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o) +{ + struct bnx2x_exeq_elem *elem; + int cnt = 0; + + spin_lock_bh(&o->lock); + + list_for_each_entry(elem, &o->exe_queue, link) + cnt++; + + spin_unlock_bh(&o->lock); + + return cnt; +} + +/** + * bnx2x_exe_queue_add - add a new element to the execution queue + * + * @bp: driver handle + * @o: queue + * @cmd: new command to add + * @restore: true - do not optimize the command + * + * If the element is optimized or is illegal, frees it. + */ +static inline int bnx2x_exe_queue_add(struct bnx2x *bp, + struct bnx2x_exe_queue_obj *o, + struct bnx2x_exeq_elem *elem, + bool restore) +{ + int rc; + + spin_lock_bh(&o->lock); + + if (!restore) { + /* Try to cancel this element queue */ + rc = o->optimize(bp, o->owner, elem); + if (rc) + goto free_and_exit; + + /* Check if this request is ok */ + rc = o->validate(bp, o->owner, elem); + if (rc) { + DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc); + goto free_and_exit; + } + } + + /* If so, add it to the execution queue */ + list_add_tail(&elem->link, &o->exe_queue); + + spin_unlock_bh(&o->lock); + + return 0; + +free_and_exit: + bnx2x_exe_queue_free_elem(bp, elem); + + spin_unlock_bh(&o->lock); + + return rc; +} + +static inline void __bnx2x_exe_queue_reset_pending( + struct bnx2x *bp, + struct bnx2x_exe_queue_obj *o) +{ + struct bnx2x_exeq_elem *elem; + + while (!list_empty(&o->pending_comp)) { + elem = list_first_entry(&o->pending_comp, + struct bnx2x_exeq_elem, link); + + list_del(&elem->link); + bnx2x_exe_queue_free_elem(bp, elem); + } +} + +/** + * bnx2x_exe_queue_step - execute one execution chunk atomically + * + * @bp: driver handle + * @o: queue + * @ramrod_flags: flags + * + * (Should be called while holding the exe_queue->lock). + */ +static inline int bnx2x_exe_queue_step(struct bnx2x *bp, + struct bnx2x_exe_queue_obj *o, + unsigned long *ramrod_flags) +{ + struct bnx2x_exeq_elem *elem, spacer; + int cur_len = 0, rc; + + memset(&spacer, 0, sizeof(spacer)); + + /* Next step should not be performed until the current is finished, + * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to + * properly clear object internals without sending any command to the FW + * which also implies there won't be any completion to clear the + * 'pending' list. + */ + if (!list_empty(&o->pending_comp)) { + if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { + DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n"); + __bnx2x_exe_queue_reset_pending(bp, o); + } else { + return 1; + } + } + + /* Run through the pending commands list and create a next + * execution chunk. + */ + while (!list_empty(&o->exe_queue)) { + elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem, + link); + WARN_ON(!elem->cmd_len); + + if (cur_len + elem->cmd_len <= o->exe_chunk_len) { + cur_len += elem->cmd_len; + /* Prevent from both lists being empty when moving an + * element. This will allow the call of + * bnx2x_exe_queue_empty() without locking. + */ + list_add_tail(&spacer.link, &o->pending_comp); + mb(); + list_move_tail(&elem->link, &o->pending_comp); + list_del(&spacer.link); + } else + break; + } + + /* Sanity check */ + if (!cur_len) + return 0; + + rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); + if (rc < 0) + /* In case of an error return the commands back to the queue + * and reset the pending_comp. + */ + list_splice_init(&o->pending_comp, &o->exe_queue); + else if (!rc) + /* If zero is returned, means there are no outstanding pending + * completions and we may dismiss the pending list. + */ + __bnx2x_exe_queue_reset_pending(bp, o); + + return rc; +} + +static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o) +{ + bool empty = list_empty(&o->exe_queue); + + /* Don't reorder!!! */ + mb(); + + return empty && list_empty(&o->pending_comp); +} + +static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem( + struct bnx2x *bp) +{ + DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n"); + return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC); +} + +/************************ raw_obj functions ***********************************/ +static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o) +{ + return !!test_bit(o->state, o->pstate); +} + +static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o) +{ + smp_mb__before_atomic(); + clear_bit(o->state, o->pstate); + smp_mb__after_atomic(); +} + +static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o) +{ + smp_mb__before_atomic(); + set_bit(o->state, o->pstate); + smp_mb__after_atomic(); +} + +/** + * bnx2x_state_wait - wait until the given bit(state) is cleared + * + * @bp: device handle + * @state: state which is to be cleared + * @state_p: state buffer + * + */ +static inline int bnx2x_state_wait(struct bnx2x *bp, int state, + unsigned long *pstate) +{ + /* can take a while if any port is running */ + int cnt = 5000; + + if (CHIP_REV_IS_EMUL(bp)) + cnt *= 20; + + DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state); + + might_sleep(); + while (cnt--) { + if (!test_bit(state, pstate)) { +#ifdef BNX2X_STOP_ON_ERROR + DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt); +#endif + return 0; + } + + usleep_range(1000, 2000); + + if (bp->panic) + return -EIO; + } + + /* timeout! */ + BNX2X_ERR("timeout waiting for state %d\n", state); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#endif + + return -EBUSY; +} + +static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw) +{ + return bnx2x_state_wait(bp, raw->state, raw->pstate); +} + +/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ +/* credit handling callbacks */ +static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset) +{ + struct bnx2x_credit_pool_obj *mp = o->macs_pool; + + WARN_ON(!mp); + + return mp->get_entry(mp, offset); +} + +static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_credit_pool_obj *mp = o->macs_pool; + + WARN_ON(!mp); + + return mp->get(mp, 1); +} + +static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset) +{ + struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + + WARN_ON(!vp); + + return vp->get_entry(vp, offset); +} + +static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + + WARN_ON(!vp); + + return vp->get(vp, 1); +} +static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset) +{ + struct bnx2x_credit_pool_obj *mp = o->macs_pool; + + return mp->put_entry(mp, offset); +} + +static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_credit_pool_obj *mp = o->macs_pool; + + return mp->put(mp, 1); +} + +static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset) +{ + struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + + return vp->put_entry(vp, offset); +} + +static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + + return vp->put(vp, 1); +} + +/** + * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details: Non-blocking implementation; should be called under execution + * queue lock. + */ +static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + if (o->head_reader) { + DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n"); + return -EBUSY; + } + + DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n"); + return 0; +} + +/** + * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock; notice it might release + * and reclaim it during its run. + */ +static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + int rc; + unsigned long ramrod_flags = o->saved_ramrod_flags; + + DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n", + ramrod_flags); + o->head_exe_request = false; + o->saved_ramrod_flags = 0; + rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags); + if (rc != 0) { + BNX2X_ERR("execution of pending commands failed with rc %d\n", + rc); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#endif + } +} + +/** + * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run + * + * @bp: device handle + * @o: vlan_mac object + * @ramrod_flags: ramrod flags of missed execution + * + * @details Should be called under execution queue lock. + */ +static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + unsigned long ramrod_flags) +{ + o->head_exe_request = true; + o->saved_ramrod_flags = ramrod_flags; + DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n", + ramrod_flags); +} + +/** + * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock. Notice if a pending + * execution exists, it would perform it - possibly releasing and + * reclaiming the execution queue lock. + */ +static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + /* It's possible a new pending execution was added since this writer + * executed. If so, execute again. [Ad infinitum] + */ + while (o->head_exe_request) { + DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n"); + __bnx2x_vlan_mac_h_exec_pending(bp, o); + } +} + + +/** + * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Should be called under the execution queue lock. May sleep. May + * release and reclaim execution queue lock during its run. + */ +static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + /* If we got here, we're holding lock --> no WRITER exists */ + o->head_reader++; + DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n", + o->head_reader); + + return 0; +} + +/** + * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details May sleep. Claims and releases execution queue lock during its run. + */ +int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + int rc; + + spin_lock_bh(&o->exe_queue.lock); + rc = __bnx2x_vlan_mac_h_read_lock(bp, o); + spin_unlock_bh(&o->exe_queue.lock); + + return rc; +} + +/** + * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Should be called under execution queue lock. Notice if a pending + * execution exists, it would be performed if this was the last + * reader. possibly releasing and reclaiming the execution queue lock. + */ +static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + if (!o->head_reader) { + BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n"); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#endif + } else { + o->head_reader--; + DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n", + o->head_reader); + } + + /* It's possible a new pending execution was added, and that this reader + * was last - if so we need to execute the command. + */ + if (!o->head_reader && o->head_exe_request) { + DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n"); + + /* Writer release will do the trick */ + __bnx2x_vlan_mac_h_write_unlock(bp, o); + } +} + +/** + * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock + * + * @bp: device handle + * @o: vlan_mac object + * + * @details Notice if a pending execution exists, it would be performed if this + * was the last reader. Claims and releases the execution queue lock + * during its run. + */ +void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + spin_lock_bh(&o->exe_queue.lock); + __bnx2x_vlan_mac_h_read_unlock(bp, o); + spin_unlock_bh(&o->exe_queue.lock); +} + +static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, + int n, u8 *base, u8 stride, u8 size) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + u8 *next = base; + int counter = 0; + int read_lock; + + DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n"); + read_lock = bnx2x_vlan_mac_h_read_lock(bp, o); + if (read_lock != 0) + BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n"); + + /* traverse list */ + list_for_each_entry(pos, &o->head, link) { + if (counter < n) { + memcpy(next, &pos->u, size); + counter++; + DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n", + counter, next); + next += stride + size; + } + } + + if (read_lock == 0) { + DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n"); + bnx2x_vlan_mac_h_read_unlock(bp, o); + } + + return counter * ETH_ALEN; +} + +/* check_add() callbacks */ +static int bnx2x_check_mac_add(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + + DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac); + + if (!is_valid_ether_addr(data->mac.mac)) + return -EINVAL; + + /* Check if a requested MAC already exists */ + list_for_each_entry(pos, &o->head, link) + if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) && + (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) + return -EEXIST; + + return 0; +} + +static int bnx2x_check_vlan_add(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + + DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan); + + list_for_each_entry(pos, &o->head, link) + if (data->vlan.vlan == pos->u.vlan.vlan) + return -EEXIST; + + return 0; +} + +/* check_del() callbacks */ +static struct bnx2x_vlan_mac_registry_elem * + bnx2x_check_mac_del(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + + DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac); + + list_for_each_entry(pos, &o->head, link) + if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) && + (data->mac.is_inner_mac == pos->u.mac.is_inner_mac)) + return pos; + + return NULL; +} + +static struct bnx2x_vlan_mac_registry_elem * + bnx2x_check_vlan_del(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + + DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan); + + list_for_each_entry(pos, &o->head, link) + if (data->vlan.vlan == pos->u.vlan.vlan) + return pos; + + return NULL; +} + +/* check_move() callback */ +static bool bnx2x_check_move(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *src_o, + struct bnx2x_vlan_mac_obj *dst_o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + int rc; + + /* Check if we can delete the requested configuration from the first + * object. + */ + pos = src_o->check_del(bp, src_o, data); + + /* check if configuration can be added */ + rc = dst_o->check_add(bp, dst_o, data); + + /* If this classification can not be added (is already set) + * or can't be deleted - return an error. + */ + if (rc || !pos) + return false; + + return true; +} + +static bool bnx2x_check_move_always_err( + struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *src_o, + struct bnx2x_vlan_mac_obj *dst_o, + union bnx2x_classification_ramrod_data *data) +{ + return false; +} + +static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_raw_obj *raw = &o->raw; + u8 rx_tx_flag = 0; + + if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || + (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) + rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD; + + if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || + (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) + rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD; + + return rx_tx_flag; +} + +static void bnx2x_set_mac_in_nig(struct bnx2x *bp, + bool add, unsigned char *dev_addr, int index) +{ + u32 wb_data[2]; + u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : + NIG_REG_LLH0_FUNC_MEM; + + if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp)) + return; + + if (index > BNX2X_LLH_CAM_MAX_PF_LINE) + return; + + DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n", + (add ? "ADD" : "DELETE"), index); + + if (add) { + /* LLH_FUNC_MEM is a u64 WB register */ + reg_offset += 8*index; + + wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | + (dev_addr[4] << 8) | dev_addr[5]); + wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); + + REG_WR_DMAE(bp, reg_offset, wb_data, 2); + } + + REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : + NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add); +} + +/** + * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod + * + * @bp: device handle + * @o: queue for which we want to configure this rule + * @add: if true the command is an ADD command, DEL otherwise + * @opcode: CLASSIFY_RULE_OPCODE_XXX + * @hdr: pointer to a header to setup + * + */ +static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, bool add, int opcode, + struct eth_classify_cmd_header *hdr) +{ + struct bnx2x_raw_obj *raw = &o->raw; + + hdr->client_id = raw->cl_id; + hdr->func_id = raw->func_id; + + /* Rx or/and Tx (internal switching) configuration ? */ + hdr->cmd_general_data |= + bnx2x_vlan_mac_get_rx_tx_flag(o); + + if (add) + hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD; + + hdr->cmd_general_data |= + (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT); +} + +/** + * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header + * + * @cid: connection id + * @type: BNX2X_FILTER_XXX_PENDING + * @hdr: pointer to header to setup + * @rule_cnt: + * + * currently we always configure one rule and echo field to contain a CID and an + * opcode type. + */ +static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type, + struct eth_classify_header *hdr, int rule_cnt) +{ + hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) | + (type << BNX2X_SWCID_SHIFT)); + hdr->rule_cnt = (u8)rule_cnt; +} + +/* hw_config() callbacks */ +static void bnx2x_set_one_mac_e2(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, int rule_idx, + int cam_offset) +{ + struct bnx2x_raw_obj *raw = &o->raw; + struct eth_classify_rules_ramrod_data *data = + (struct eth_classify_rules_ramrod_data *)(raw->rdata); + int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd; + union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; + bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; + unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; + u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; + + /* Set LLH CAM entry: currently only iSCSI and ETH macs are + * relevant. In addition, current implementation is tuned for a + * single ETH MAC. + * + * When multiple unicast ETH MACs PF configuration in switch + * independent mode is required (NetQ, multiple netdev MACs, + * etc.), consider better utilisation of 8 per function MAC + * entries in the LLH register. There is also + * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the + * total number of CAM entries to 16. + * + * Currently we won't configure NIG for MACs other than a primary ETH + * MAC and iSCSI L2 MAC. + * + * If this MAC is moving from one Queue to another, no need to change + * NIG configuration. + */ + if (cmd != BNX2X_VLAN_MAC_MOVE) { + if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags)) + bnx2x_set_mac_in_nig(bp, add, mac, + BNX2X_LLH_CAM_ISCSI_ETH_LINE); + else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags)) + bnx2x_set_mac_in_nig(bp, add, mac, + BNX2X_LLH_CAM_ETH_LINE); + } + + /* Reset the ramrod data buffer for the first rule */ + if (rule_idx == 0) + memset(data, 0, sizeof(*data)); + + /* Setup a command header */ + bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC, + &rule_entry->mac.header); + + DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n", + (add ? "add" : "delete"), mac, raw->cl_id); + + /* Set a MAC itself */ + bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, + &rule_entry->mac.mac_mid, + &rule_entry->mac.mac_lsb, mac); + rule_entry->mac.inner_mac = + cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac); + + /* MOVE: Add a rule that will add this MAC to the target Queue */ + if (cmd == BNX2X_VLAN_MAC_MOVE) { + rule_entry++; + rule_cnt++; + + /* Setup ramrod data */ + bnx2x_vlan_mac_set_cmd_hdr_e2(bp, + elem->cmd_data.vlan_mac.target_obj, + true, CLASSIFY_RULE_OPCODE_MAC, + &rule_entry->mac.header); + + /* Set a MAC itself */ + bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, + &rule_entry->mac.mac_mid, + &rule_entry->mac.mac_lsb, mac); + rule_entry->mac.inner_mac = + cpu_to_le16(elem->cmd_data.vlan_mac. + u.mac.is_inner_mac); + } + + /* Set the ramrod data header */ + /* TODO: take this to the higher level in order to prevent multiple + writing */ + bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, + rule_cnt); +} + +/** + * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod + * + * @bp: device handle + * @o: queue + * @type: + * @cam_offset: offset in cam memory + * @hdr: pointer to a header to setup + * + * E1/E1H + */ +static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, + struct mac_configuration_hdr *hdr) +{ + struct bnx2x_raw_obj *r = &o->raw; + + hdr->length = 1; + hdr->offset = (u8)cam_offset; + hdr->client_id = cpu_to_le16(0xff); + hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) | + (type << BNX2X_SWCID_SHIFT)); +} + +static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac, + u16 vlan_id, struct mac_configuration_entry *cfg_entry) +{ + struct bnx2x_raw_obj *r = &o->raw; + u32 cl_bit_vec = (1 << r->cl_id); + + cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec); + cfg_entry->pf_id = r->func_id; + cfg_entry->vlan_id = cpu_to_le16(vlan_id); + + if (add) { + SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, + T_ETH_MAC_COMMAND_SET); + SET_FLAG(cfg_entry->flags, + MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode); + + /* Set a MAC in a ramrod data */ + bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr, + &cfg_entry->middle_mac_addr, + &cfg_entry->lsb_mac_addr, mac); + } else + SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, + T_ETH_MAC_COMMAND_INVALIDATE); +} + +static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add, + u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config) +{ + struct mac_configuration_entry *cfg_entry = &config->config_table[0]; + struct bnx2x_raw_obj *raw = &o->raw; + + bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset, + &config->hdr); + bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id, + cfg_entry); + + DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n", + (add ? "setting" : "clearing"), + mac, raw->cl_id, cam_offset); +} + +/** + * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data + * + * @bp: device handle + * @o: bnx2x_vlan_mac_obj + * @elem: bnx2x_exeq_elem + * @rule_idx: rule_idx + * @cam_offset: cam_offset + */ +static void bnx2x_set_one_mac_e1x(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, int rule_idx, + int cam_offset) +{ + struct bnx2x_raw_obj *raw = &o->raw; + struct mac_configuration_cmd *config = + (struct mac_configuration_cmd *)(raw->rdata); + /* 57710 and 57711 do not support MOVE command, + * so it's either ADD or DEL + */ + bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? + true : false; + + /* Reset the ramrod data buffer */ + memset(config, 0, sizeof(*config)); + + bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state, + cam_offset, add, + elem->cmd_data.vlan_mac.u.mac.mac, 0, + ETH_VLAN_FILTER_ANY_VLAN, config); +} + +static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, int rule_idx, + int cam_offset) +{ + struct bnx2x_raw_obj *raw = &o->raw; + struct eth_classify_rules_ramrod_data *data = + (struct eth_classify_rules_ramrod_data *)(raw->rdata); + int rule_cnt = rule_idx + 1; + union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; + enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; + bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; + u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan; + + /* Reset the ramrod data buffer for the first rule */ + if (rule_idx == 0) + memset(data, 0, sizeof(*data)); + + /* Set a rule header */ + bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN, + &rule_entry->vlan.header); + + DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"), + vlan); + + /* Set a VLAN itself */ + rule_entry->vlan.vlan = cpu_to_le16(vlan); + + /* MOVE: Add a rule that will add this MAC to the target Queue */ + if (cmd == BNX2X_VLAN_MAC_MOVE) { + rule_entry++; + rule_cnt++; + + /* Setup ramrod data */ + bnx2x_vlan_mac_set_cmd_hdr_e2(bp, + elem->cmd_data.vlan_mac.target_obj, + true, CLASSIFY_RULE_OPCODE_VLAN, + &rule_entry->vlan.header); + + /* Set a VLAN itself */ + rule_entry->vlan.vlan = cpu_to_le16(vlan); + } + + /* Set the ramrod data header */ + /* TODO: take this to the higher level in order to prevent multiple + writing */ + bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, + rule_cnt); +} + +/** + * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element + * + * @bp: device handle + * @p: command parameters + * @ppos: pointer to the cookie + * + * reconfigure next MAC/VLAN/VLAN-MAC element from the + * previously configured elements list. + * + * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken + * into an account + * + * pointer to the cookie - that should be given back in the next call to make + * function handle the next element. If *ppos is set to NULL it will restart the + * iterator. If returned *ppos == NULL this means that the last element has been + * handled. + * + */ +static int bnx2x_vlan_mac_restore(struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p, + struct bnx2x_vlan_mac_registry_elem **ppos) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; + + /* If list is empty - there is nothing to do here */ + if (list_empty(&o->head)) { + *ppos = NULL; + return 0; + } + + /* make a step... */ + if (*ppos == NULL) + *ppos = list_first_entry(&o->head, + struct bnx2x_vlan_mac_registry_elem, + link); + else + *ppos = list_next_entry(*ppos, link); + + pos = *ppos; + + /* If it's the last step - return NULL */ + if (list_is_last(&pos->link, &o->head)) + *ppos = NULL; + + /* Prepare a 'user_req' */ + memcpy(&p->user_req.u, &pos->u, sizeof(pos->u)); + + /* Set the command */ + p->user_req.cmd = BNX2X_VLAN_MAC_ADD; + + /* Set vlan_mac_flags */ + p->user_req.vlan_mac_flags = pos->vlan_mac_flags; + + /* Set a restore bit */ + __set_bit(RAMROD_RESTORE, &p->ramrod_flags); + + return bnx2x_config_vlan_mac(bp, p); +} + +/* bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a + * pointer to an element with a specific criteria and NULL if such an element + * hasn't been found. + */ +static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac( + struct bnx2x_exe_queue_obj *o, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_exeq_elem *pos; + struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac; + + /* Check pending for execution commands */ + list_for_each_entry(pos, &o->exe_queue, link) + if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data, + sizeof(*data)) && + (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) + return pos; + + return NULL; +} + +static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan( + struct bnx2x_exe_queue_obj *o, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_exeq_elem *pos; + struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan; + + /* Check pending for execution commands */ + list_for_each_entry(pos, &o->exe_queue, link) + if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data, + sizeof(*data)) && + (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) + return pos; + + return NULL; +} + +/** + * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed + * + * @bp: device handle + * @qo: bnx2x_qable_obj + * @elem: bnx2x_exeq_elem + * + * Checks that the requested configuration can be added. If yes and if + * requested, consume CAM credit. + * + * The 'validate' is run after the 'optimize'. + * + */ +static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, + union bnx2x_qable_obj *qo, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; + struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; + int rc; + + /* Check the registry */ + rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u); + if (rc) { + DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n"); + return rc; + } + + /* Check if there is a pending ADD command for this + * MAC/VLAN/VLAN-MAC. Return an error if there is. + */ + if (exeq->get(exeq, elem)) { + DP(BNX2X_MSG_SP, "There is a pending ADD command already\n"); + return -EEXIST; + } + + /* TODO: Check the pending MOVE from other objects where this + * object is a destination object. + */ + + /* Consume the credit if not requested not to */ + if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &elem->cmd_data.vlan_mac.vlan_mac_flags) || + o->get_credit(o))) + return -EINVAL; + + return 0; +} + +/** + * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed + * + * @bp: device handle + * @qo: quable object to check + * @elem: element that needs to be deleted + * + * Checks that the requested configuration can be deleted. If yes and if + * requested, returns a CAM credit. + * + * The 'validate' is run after the 'optimize'. + */ +static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp, + union bnx2x_qable_obj *qo, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; + struct bnx2x_vlan_mac_registry_elem *pos; + struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; + struct bnx2x_exeq_elem query_elem; + + /* If this classification can not be deleted (doesn't exist) + * - return a BNX2X_EXIST. + */ + pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); + if (!pos) { + DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n"); + return -EEXIST; + } + + /* Check if there are pending DEL or MOVE commands for this + * MAC/VLAN/VLAN-MAC. Return an error if so. + */ + memcpy(&query_elem, elem, sizeof(query_elem)); + + /* Check for MOVE commands */ + query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE; + if (exeq->get(exeq, &query_elem)) { + BNX2X_ERR("There is a pending MOVE command already\n"); + return -EINVAL; + } + + /* Check for DEL commands */ + if (exeq->get(exeq, elem)) { + DP(BNX2X_MSG_SP, "There is a pending DEL command already\n"); + return -EEXIST; + } + + /* Return the credit to the credit pool if not requested not to */ + if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &elem->cmd_data.vlan_mac.vlan_mac_flags) || + o->put_credit(o))) { + BNX2X_ERR("Failed to return a credit\n"); + return -EINVAL; + } + + return 0; +} + +/** + * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed + * + * @bp: device handle + * @qo: quable object to check (source) + * @elem: element that needs to be moved + * + * Checks that the requested configuration can be moved. If yes and if + * requested, returns a CAM credit. + * + * The 'validate' is run after the 'optimize'. + */ +static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, + union bnx2x_qable_obj *qo, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac; + struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj; + struct bnx2x_exeq_elem query_elem; + struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue; + struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue; + + /* Check if we can perform this operation based on the current registry + * state. + */ + if (!src_o->check_move(bp, src_o, dest_o, + &elem->cmd_data.vlan_mac.u)) { + DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n"); + return -EINVAL; + } + + /* Check if there is an already pending DEL or MOVE command for the + * source object or ADD command for a destination object. Return an + * error if so. + */ + memcpy(&query_elem, elem, sizeof(query_elem)); + + /* Check DEL on source */ + query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; + if (src_exeq->get(src_exeq, &query_elem)) { + BNX2X_ERR("There is a pending DEL command on the source queue already\n"); + return -EINVAL; + } + + /* Check MOVE on source */ + if (src_exeq->get(src_exeq, elem)) { + DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n"); + return -EEXIST; + } + + /* Check ADD on destination */ + query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; + if (dest_exeq->get(dest_exeq, &query_elem)) { + BNX2X_ERR("There is a pending ADD command on the destination queue already\n"); + return -EINVAL; + } + + /* Consume the credit if not requested not to */ + if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, + &elem->cmd_data.vlan_mac.vlan_mac_flags) || + dest_o->get_credit(dest_o))) + return -EINVAL; + + if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &elem->cmd_data.vlan_mac.vlan_mac_flags) || + src_o->put_credit(src_o))) { + /* return the credit taken from dest... */ + dest_o->put_credit(dest_o); + return -EINVAL; + } + + return 0; +} + +static int bnx2x_validate_vlan_mac(struct bnx2x *bp, + union bnx2x_qable_obj *qo, + struct bnx2x_exeq_elem *elem) +{ + switch (elem->cmd_data.vlan_mac.cmd) { + case BNX2X_VLAN_MAC_ADD: + return bnx2x_validate_vlan_mac_add(bp, qo, elem); + case BNX2X_VLAN_MAC_DEL: + return bnx2x_validate_vlan_mac_del(bp, qo, elem); + case BNX2X_VLAN_MAC_MOVE: + return bnx2x_validate_vlan_mac_move(bp, qo, elem); + default: + return -EINVAL; + } +} + +static int bnx2x_remove_vlan_mac(struct bnx2x *bp, + union bnx2x_qable_obj *qo, + struct bnx2x_exeq_elem *elem) +{ + int rc = 0; + + /* If consumption wasn't required, nothing to do */ + if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &elem->cmd_data.vlan_mac.vlan_mac_flags)) + return 0; + + switch (elem->cmd_data.vlan_mac.cmd) { + case BNX2X_VLAN_MAC_ADD: + case BNX2X_VLAN_MAC_MOVE: + rc = qo->vlan_mac.put_credit(&qo->vlan_mac); + break; + case BNX2X_VLAN_MAC_DEL: + rc = qo->vlan_mac.get_credit(&qo->vlan_mac); + break; + default: + return -EINVAL; + } + + if (rc != true) + return -EINVAL; + + return 0; +} + +/** + * bnx2x_wait_vlan_mac - passively wait for 5 seconds until all work completes. + * + * @bp: device handle + * @o: bnx2x_vlan_mac_obj + * + */ +static int bnx2x_wait_vlan_mac(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + int cnt = 5000, rc; + struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; + struct bnx2x_raw_obj *raw = &o->raw; + + while (cnt--) { + /* Wait for the current command to complete */ + rc = raw->wait_comp(bp, raw); + if (rc) + return rc; + + /* Wait until there are no pending commands */ + if (!bnx2x_exe_queue_empty(exeq)) + usleep_range(1000, 2000); + else + return 0; + } + + return -EBUSY; +} + +static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + unsigned long *ramrod_flags) +{ + int rc = 0; + + spin_lock_bh(&o->exe_queue.lock); + + DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n"); + rc = __bnx2x_vlan_mac_h_write_trylock(bp, o); + + if (rc != 0) { + __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags); + + /* Calling function should not diffrentiate between this case + * and the case in which there is already a pending ramrod + */ + rc = 1; + } else { + rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); + } + spin_unlock_bh(&o->exe_queue.lock); + + return rc; +} + +/** + * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod + * + * @bp: device handle + * @o: bnx2x_vlan_mac_obj + * @cqe: + * @cont: if true schedule next execution chunk + * + */ +static int bnx2x_complete_vlan_mac(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + union event_ring_elem *cqe, + unsigned long *ramrod_flags) +{ + struct bnx2x_raw_obj *r = &o->raw; + int rc; + + /* Clearing the pending list & raw state should be made + * atomically (as execution flow assumes they represent the same). + */ + spin_lock_bh(&o->exe_queue.lock); + + /* Reset pending list */ + __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); + + /* Clear pending */ + r->clear_pending(r); + + spin_unlock_bh(&o->exe_queue.lock); + + /* If ramrod failed this is most likely a SW bug */ + if (cqe->message.error) + return -EINVAL; + + /* Run the next bulk of pending commands if requested */ + if (test_bit(RAMROD_CONT, ramrod_flags)) { + rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags); + + if (rc < 0) + return rc; + } + + /* If there is more work to do return PENDING */ + if (!bnx2x_exe_queue_empty(&o->exe_queue)) + return 1; + + return 0; +} + +/** + * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands. + * + * @bp: device handle + * @o: bnx2x_qable_obj + * @elem: bnx2x_exeq_elem + */ +static int bnx2x_optimize_vlan_mac(struct bnx2x *bp, + union bnx2x_qable_obj *qo, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_exeq_elem query, *pos; + struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; + struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; + + memcpy(&query, elem, sizeof(query)); + + switch (elem->cmd_data.vlan_mac.cmd) { + case BNX2X_VLAN_MAC_ADD: + query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; + break; + case BNX2X_VLAN_MAC_DEL: + query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; + break; + default: + /* Don't handle anything other than ADD or DEL */ + return 0; + } + + /* If we found the appropriate element - delete it */ + pos = exeq->get(exeq, &query); + if (pos) { + + /* Return the credit of the optimized command */ + if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &pos->cmd_data.vlan_mac.vlan_mac_flags)) { + if ((query.cmd_data.vlan_mac.cmd == + BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) { + BNX2X_ERR("Failed to return the credit for the optimized ADD command\n"); + return -EINVAL; + } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */ + BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n"); + return -EINVAL; + } + } + + DP(BNX2X_MSG_SP, "Optimizing %s command\n", + (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? + "ADD" : "DEL"); + + list_del(&pos->link); + bnx2x_exe_queue_free_elem(bp, pos); + return 1; + } + + return 0; +} + +/** + * bnx2x_vlan_mac_get_registry_elem - prepare a registry element + * + * @bp: device handle + * @o: + * @elem: + * @restore: + * @re: + * + * prepare a registry element according to the current command request. + */ +static inline int bnx2x_vlan_mac_get_registry_elem( + struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, + bool restore, + struct bnx2x_vlan_mac_registry_elem **re) +{ + enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; + struct bnx2x_vlan_mac_registry_elem *reg_elem; + + /* Allocate a new registry element if needed. */ + if (!restore && + ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) { + reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC); + if (!reg_elem) + return -ENOMEM; + + /* Get a new CAM offset */ + if (!o->get_cam_offset(o, ®_elem->cam_offset)) { + /* This shall never happen, because we have checked the + * CAM availability in the 'validate'. + */ + WARN_ON(1); + kfree(reg_elem); + return -EINVAL; + } + + DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset); + + /* Set a VLAN-MAC data */ + memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u, + sizeof(reg_elem->u)); + + /* Copy the flags (needed for DEL and RESTORE flows) */ + reg_elem->vlan_mac_flags = + elem->cmd_data.vlan_mac.vlan_mac_flags; + } else /* DEL, RESTORE */ + reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u); + + *re = reg_elem; + return 0; +} + +/** + * bnx2x_execute_vlan_mac - execute vlan mac command + * + * @bp: device handle + * @qo: + * @exe_chunk: + * @ramrod_flags: + * + * go and send a ramrod! + */ +static int bnx2x_execute_vlan_mac(struct bnx2x *bp, + union bnx2x_qable_obj *qo, + struct list_head *exe_chunk, + unsigned long *ramrod_flags) +{ + struct bnx2x_exeq_elem *elem; + struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj; + struct bnx2x_raw_obj *r = &o->raw; + int rc, idx = 0; + bool restore = test_bit(RAMROD_RESTORE, ramrod_flags); + bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags); + struct bnx2x_vlan_mac_registry_elem *reg_elem; + enum bnx2x_vlan_mac_cmd cmd; + + /* If DRIVER_ONLY execution is requested, cleanup a registry + * and exit. Otherwise send a ramrod to FW. + */ + if (!drv_only) { + WARN_ON(r->check_pending(r)); + + /* Set pending */ + r->set_pending(r); + + /* Fill the ramrod data */ + list_for_each_entry(elem, exe_chunk, link) { + cmd = elem->cmd_data.vlan_mac.cmd; + /* We will add to the target object in MOVE command, so + * change the object for a CAM search. + */ + if (cmd == BNX2X_VLAN_MAC_MOVE) + cam_obj = elem->cmd_data.vlan_mac.target_obj; + else + cam_obj = o; + + rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj, + elem, restore, + ®_elem); + if (rc) + goto error_exit; + + WARN_ON(!reg_elem); + + /* Push a new entry into the registry */ + if (!restore && + ((cmd == BNX2X_VLAN_MAC_ADD) || + (cmd == BNX2X_VLAN_MAC_MOVE))) + list_add(®_elem->link, &cam_obj->head); + + /* Configure a single command in a ramrod data buffer */ + o->set_one_rule(bp, o, elem, idx, + reg_elem->cam_offset); + + /* MOVE command consumes 2 entries in the ramrod data */ + if (cmd == BNX2X_VLAN_MAC_MOVE) + idx += 2; + else + idx++; + } + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid, + U64_HI(r->rdata_mapping), + U64_LO(r->rdata_mapping), + ETH_CONNECTION_TYPE); + if (rc) + goto error_exit; + } + + /* Now, when we are done with the ramrod - clean up the registry */ + list_for_each_entry(elem, exe_chunk, link) { + cmd = elem->cmd_data.vlan_mac.cmd; + if ((cmd == BNX2X_VLAN_MAC_DEL) || + (cmd == BNX2X_VLAN_MAC_MOVE)) { + reg_elem = o->check_del(bp, o, + &elem->cmd_data.vlan_mac.u); + + WARN_ON(!reg_elem); + + o->put_cam_offset(o, reg_elem->cam_offset); + list_del(®_elem->link); + kfree(reg_elem); + } + } + + if (!drv_only) + return 1; + else + return 0; + +error_exit: + r->clear_pending(r); + + /* Cleanup a registry in case of a failure */ + list_for_each_entry(elem, exe_chunk, link) { + cmd = elem->cmd_data.vlan_mac.cmd; + + if (cmd == BNX2X_VLAN_MAC_MOVE) + cam_obj = elem->cmd_data.vlan_mac.target_obj; + else + cam_obj = o; + + /* Delete all newly added above entries */ + if (!restore && + ((cmd == BNX2X_VLAN_MAC_ADD) || + (cmd == BNX2X_VLAN_MAC_MOVE))) { + reg_elem = o->check_del(bp, cam_obj, + &elem->cmd_data.vlan_mac.u); + if (reg_elem) { + list_del(®_elem->link); + kfree(reg_elem); + } + } + } + + return rc; +} + +static inline int bnx2x_vlan_mac_push_new_cmd( + struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p) +{ + struct bnx2x_exeq_elem *elem; + struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; + bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags); + + /* Allocate the execution queue element */ + elem = bnx2x_exe_queue_alloc_elem(bp); + if (!elem) + return -ENOMEM; + + /* Set the command 'length' */ + switch (p->user_req.cmd) { + case BNX2X_VLAN_MAC_MOVE: + elem->cmd_len = 2; + break; + default: + elem->cmd_len = 1; + } + + /* Fill the object specific info */ + memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req)); + + /* Try to add a new command to the pending list */ + return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore); +} + +/** + * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules. + * + * @bp: device handle + * @p: + * + */ +int bnx2x_config_vlan_mac(struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p) +{ + int rc = 0; + struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; + unsigned long *ramrod_flags = &p->ramrod_flags; + bool cont = test_bit(RAMROD_CONT, ramrod_flags); + struct bnx2x_raw_obj *raw = &o->raw; + + /* + * Add new elements to the execution list for commands that require it. + */ + if (!cont) { + rc = bnx2x_vlan_mac_push_new_cmd(bp, p); + if (rc) + return rc; + } + + /* If nothing will be executed further in this iteration we want to + * return PENDING if there are pending commands + */ + if (!bnx2x_exe_queue_empty(&o->exe_queue)) + rc = 1; + + if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { + DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n"); + raw->clear_pending(raw); + } + + /* Execute commands if required */ + if (cont || test_bit(RAMROD_EXEC, ramrod_flags) || + test_bit(RAMROD_COMP_WAIT, ramrod_flags)) { + rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj, + &p->ramrod_flags); + if (rc < 0) + return rc; + } + + /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set + * then user want to wait until the last command is done. + */ + if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { + /* Wait maximum for the current exe_queue length iterations plus + * one (for the current pending command). + */ + int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1; + + while (!bnx2x_exe_queue_empty(&o->exe_queue) && + max_iterations--) { + + /* Wait for the current command to complete */ + rc = raw->wait_comp(bp, raw); + if (rc) + return rc; + + /* Make a next step */ + rc = __bnx2x_vlan_mac_execute_step(bp, + p->vlan_mac_obj, + &p->ramrod_flags); + if (rc < 0) + return rc; + } + + return 0; + } + + return rc; +} + +/** + * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec + * + * @bp: device handle + * @o: + * @vlan_mac_flags: + * @ramrod_flags: execution flags to be used for this deletion + * + * if the last operation has completed successfully and there are no + * more elements left, positive value if the last operation has completed + * successfully and there are more previously configured elements, negative + * value is current operation has failed. + */ +static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + unsigned long *vlan_mac_flags, + unsigned long *ramrod_flags) +{ + struct bnx2x_vlan_mac_registry_elem *pos = NULL; + struct bnx2x_vlan_mac_ramrod_params p; + struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; + struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; + unsigned long flags; + int read_lock; + int rc = 0; + + /* Clear pending commands first */ + + spin_lock_bh(&exeq->lock); + + list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { + flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags; + if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == + BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { + rc = exeq->remove(bp, exeq->owner, exeq_pos); + if (rc) { + BNX2X_ERR("Failed to remove command\n"); + spin_unlock_bh(&exeq->lock); + return rc; + } + list_del(&exeq_pos->link); + bnx2x_exe_queue_free_elem(bp, exeq_pos); + } + } + + spin_unlock_bh(&exeq->lock); + + /* Prepare a command request */ + memset(&p, 0, sizeof(p)); + p.vlan_mac_obj = o; + p.ramrod_flags = *ramrod_flags; + p.user_req.cmd = BNX2X_VLAN_MAC_DEL; + + /* Add all but the last VLAN-MAC to the execution queue without actually + * execution anything. + */ + __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags); + __clear_bit(RAMROD_EXEC, &p.ramrod_flags); + __clear_bit(RAMROD_CONT, &p.ramrod_flags); + + DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n"); + read_lock = bnx2x_vlan_mac_h_read_lock(bp, o); + if (read_lock != 0) + return read_lock; + + list_for_each_entry(pos, &o->head, link) { + flags = pos->vlan_mac_flags; + if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == + BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) { + p.user_req.vlan_mac_flags = pos->vlan_mac_flags; + memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); + rc = bnx2x_config_vlan_mac(bp, &p); + if (rc < 0) { + BNX2X_ERR("Failed to add a new DEL command\n"); + bnx2x_vlan_mac_h_read_unlock(bp, o); + return rc; + } + } + } + + DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n"); + bnx2x_vlan_mac_h_read_unlock(bp, o); + + p.ramrod_flags = *ramrod_flags; + __set_bit(RAMROD_CONT, &p.ramrod_flags); + + return bnx2x_config_vlan_mac(bp, &p); +} + +static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id, + u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type) +{ + raw->func_id = func_id; + raw->cid = cid; + raw->cl_id = cl_id; + raw->rdata = rdata; + raw->rdata_mapping = rdata_mapping; + raw->state = state; + raw->pstate = pstate; + raw->obj_type = type; + raw->check_pending = bnx2x_raw_check_pending; + raw->clear_pending = bnx2x_raw_clear_pending; + raw->set_pending = bnx2x_raw_set_pending; + raw->wait_comp = bnx2x_raw_wait; +} + +static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o, + u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, + int state, unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *macs_pool, + struct bnx2x_credit_pool_obj *vlans_pool) +{ + INIT_LIST_HEAD(&o->head); + o->head_reader = 0; + o->head_exe_request = false; + o->saved_ramrod_flags = 0; + + o->macs_pool = macs_pool; + o->vlans_pool = vlans_pool; + + o->delete_all = bnx2x_vlan_mac_del_all; + o->restore = bnx2x_vlan_mac_restore; + o->complete = bnx2x_complete_vlan_mac; + o->wait = bnx2x_wait_vlan_mac; + + bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping, + state, pstate, type); +} + +void bnx2x_init_mac_obj(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *mac_obj, + u8 cl_id, u32 cid, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *macs_pool) +{ + union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj; + + bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata, + rdata_mapping, state, pstate, type, + macs_pool, NULL); + + /* CAM credit pool handling */ + mac_obj->get_credit = bnx2x_get_credit_mac; + mac_obj->put_credit = bnx2x_put_credit_mac; + mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; + mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; + + if (CHIP_IS_E1x(bp)) { + mac_obj->set_one_rule = bnx2x_set_one_mac_e1x; + mac_obj->check_del = bnx2x_check_mac_del; + mac_obj->check_add = bnx2x_check_mac_add; + mac_obj->check_move = bnx2x_check_move_always_err; + mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; + + /* Exe Queue */ + bnx2x_exe_queue_init(bp, + &mac_obj->exe_queue, 1, qable_obj, + bnx2x_validate_vlan_mac, + bnx2x_remove_vlan_mac, + bnx2x_optimize_vlan_mac, + bnx2x_execute_vlan_mac, + bnx2x_exeq_get_mac); + } else { + mac_obj->set_one_rule = bnx2x_set_one_mac_e2; + mac_obj->check_del = bnx2x_check_mac_del; + mac_obj->check_add = bnx2x_check_mac_add; + mac_obj->check_move = bnx2x_check_move; + mac_obj->ramrod_cmd = + RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; + mac_obj->get_n_elements = bnx2x_get_n_elements; + + /* Exe Queue */ + bnx2x_exe_queue_init(bp, + &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, + qable_obj, bnx2x_validate_vlan_mac, + bnx2x_remove_vlan_mac, + bnx2x_optimize_vlan_mac, + bnx2x_execute_vlan_mac, + bnx2x_exeq_get_mac); + } +} + +void bnx2x_init_vlan_obj(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *vlan_obj, + u8 cl_id, u32 cid, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *vlans_pool) +{ + union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj; + + bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata, + rdata_mapping, state, pstate, type, NULL, + vlans_pool); + + vlan_obj->get_credit = bnx2x_get_credit_vlan; + vlan_obj->put_credit = bnx2x_put_credit_vlan; + vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan; + vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan; + + if (CHIP_IS_E1x(bp)) { + BNX2X_ERR("Do not support chips others than E2 and newer\n"); + BUG(); + } else { + vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2; + vlan_obj->check_del = bnx2x_check_vlan_del; + vlan_obj->check_add = bnx2x_check_vlan_add; + vlan_obj->check_move = bnx2x_check_move; + vlan_obj->ramrod_cmd = + RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; + vlan_obj->get_n_elements = bnx2x_get_n_elements; + + /* Exe Queue */ + bnx2x_exe_queue_init(bp, + &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT, + qable_obj, bnx2x_validate_vlan_mac, + bnx2x_remove_vlan_mac, + bnx2x_optimize_vlan_mac, + bnx2x_execute_vlan_mac, + bnx2x_exeq_get_vlan); + } +} + +/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ +static inline void __storm_memset_mac_filters(struct bnx2x *bp, + struct tstorm_eth_mac_filter_config *mac_filters, + u16 pf_id) +{ + size_t size = sizeof(struct tstorm_eth_mac_filter_config); + + u32 addr = BAR_TSTRORM_INTMEM + + TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id); + + __storm_memset_struct(bp, addr, size, (u32 *)mac_filters); +} + +static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p) +{ + /* update the bp MAC filter structure */ + u32 mask = (1 << p->cl_id); + + struct tstorm_eth_mac_filter_config *mac_filters = + (struct tstorm_eth_mac_filter_config *)p->rdata; + + /* initial setting is drop-all */ + u8 drop_all_ucast = 1, drop_all_mcast = 1; + u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; + u8 unmatched_unicast = 0; + + /* In e1x there we only take into account rx accept flag since tx switching + * isn't enabled. */ + if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags)) + /* accept matched ucast */ + drop_all_ucast = 0; + + if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags)) + /* accept matched mcast */ + drop_all_mcast = 0; + + if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) { + /* accept all mcast */ + drop_all_ucast = 0; + accp_all_ucast = 1; + } + if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) { + /* accept all mcast */ + drop_all_mcast = 0; + accp_all_mcast = 1; + } + if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags)) + /* accept (all) bcast */ + accp_all_bcast = 1; + if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags)) + /* accept unmatched unicasts */ + unmatched_unicast = 1; + + mac_filters->ucast_drop_all = drop_all_ucast ? + mac_filters->ucast_drop_all | mask : + mac_filters->ucast_drop_all & ~mask; + + mac_filters->mcast_drop_all = drop_all_mcast ? + mac_filters->mcast_drop_all | mask : + mac_filters->mcast_drop_all & ~mask; + + mac_filters->ucast_accept_all = accp_all_ucast ? + mac_filters->ucast_accept_all | mask : + mac_filters->ucast_accept_all & ~mask; + + mac_filters->mcast_accept_all = accp_all_mcast ? + mac_filters->mcast_accept_all | mask : + mac_filters->mcast_accept_all & ~mask; + + mac_filters->bcast_accept_all = accp_all_bcast ? + mac_filters->bcast_accept_all | mask : + mac_filters->bcast_accept_all & ~mask; + + mac_filters->unmatched_unicast = unmatched_unicast ? + mac_filters->unmatched_unicast | mask : + mac_filters->unmatched_unicast & ~mask; + + DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n" + "accp_mcast 0x%x\naccp_bcast 0x%x\n", + mac_filters->ucast_drop_all, mac_filters->mcast_drop_all, + mac_filters->ucast_accept_all, mac_filters->mcast_accept_all, + mac_filters->bcast_accept_all); + + /* write the MAC filter structure*/ + __storm_memset_mac_filters(bp, mac_filters, p->func_id); + + /* The operation is completed */ + clear_bit(p->state, p->pstate); + smp_mb__after_atomic(); + + return 0; +} + +/* Setup ramrod data */ +static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid, + struct eth_classify_header *hdr, + u8 rule_cnt) +{ + hdr->echo = cpu_to_le32(cid); + hdr->rule_cnt = rule_cnt; +} + +static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, + unsigned long *accept_flags, + struct eth_filter_rules_cmd *cmd, + bool clear_accept_all) +{ + u16 state; + + /* start with 'drop-all' */ + state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | + ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; + + if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags)) + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + + if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags)) + state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; + + if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) { + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; + } + + if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) { + state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; + } + + if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags)) + state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; + + if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) { + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; + } + + if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags)) + state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; + + /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ + if (clear_accept_all) { + state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; + } + + cmd->state = cpu_to_le16(state); +} + +static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p) +{ + struct eth_filter_rules_ramrod_data *data = p->rdata; + int rc; + u8 rule_idx = 0; + + /* Reset the ramrod data buffer */ + memset(data, 0, sizeof(*data)); + + /* Setup ramrod data */ + + /* Tx (internal switching) */ + if (test_bit(RAMROD_TX, &p->ramrod_flags)) { + data->rules[rule_idx].client_id = p->cl_id; + data->rules[rule_idx].func_id = p->func_id; + + data->rules[rule_idx].cmd_general_data = + ETH_FILTER_RULES_CMD_TX_CMD; + + bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags, + &(data->rules[rule_idx++]), + false); + } + + /* Rx */ + if (test_bit(RAMROD_RX, &p->ramrod_flags)) { + data->rules[rule_idx].client_id = p->cl_id; + data->rules[rule_idx].func_id = p->func_id; + + data->rules[rule_idx].cmd_general_data = + ETH_FILTER_RULES_CMD_RX_CMD; + + bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags, + &(data->rules[rule_idx++]), + false); + } + + /* If FCoE Queue configuration has been requested configure the Rx and + * internal switching modes for this queue in separate rules. + * + * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: + * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED. + */ + if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) { + /* Tx (internal switching) */ + if (test_bit(RAMROD_TX, &p->ramrod_flags)) { + data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); + data->rules[rule_idx].func_id = p->func_id; + + data->rules[rule_idx].cmd_general_data = + ETH_FILTER_RULES_CMD_TX_CMD; + + bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags, + &(data->rules[rule_idx]), + true); + rule_idx++; + } + + /* Rx */ + if (test_bit(RAMROD_RX, &p->ramrod_flags)) { + data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); + data->rules[rule_idx].func_id = p->func_id; + + data->rules[rule_idx].cmd_general_data = + ETH_FILTER_RULES_CMD_RX_CMD; + + bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags, + &(data->rules[rule_idx]), + true); + rule_idx++; + } + } + + /* Set the ramrod header (most importantly - number of rules to + * configure). + */ + bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); + + DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n", + data->header.rule_cnt, p->rx_accept_flags, + p->tx_accept_flags); + + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ + + /* Send a ramrod */ + rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid, + U64_HI(p->rdata_mapping), + U64_LO(p->rdata_mapping), + ETH_CONNECTION_TYPE); + if (rc) + return rc; + + /* Ramrod completion is pending */ + return 1; +} + +static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p) +{ + return bnx2x_state_wait(bp, p->state, p->pstate); +} + +static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p) +{ + /* Do nothing */ + return 0; +} + +int bnx2x_config_rx_mode(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p) +{ + int rc; + + /* Configure the new classification in the chip */ + rc = p->rx_mode_obj->config_rx_mode(bp, p); + if (rc < 0) + return rc; + + /* Wait for a ramrod completion if was requested */ + if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { + rc = p->rx_mode_obj->wait_comp(bp, p); + if (rc) + return rc; + } + + return rc; +} + +void bnx2x_init_rx_mode_obj(struct bnx2x *bp, + struct bnx2x_rx_mode_obj *o) +{ + if (CHIP_IS_E1x(bp)) { + o->wait_comp = bnx2x_empty_rx_mode_wait; + o->config_rx_mode = bnx2x_set_rx_mode_e1x; + } else { + o->wait_comp = bnx2x_wait_rx_mode_comp_e2; + o->config_rx_mode = bnx2x_set_rx_mode_e2; + } +} + +/********************* Multicast verbs: SET, CLEAR ****************************/ +static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac) +{ + return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff; +} + +struct bnx2x_mcast_mac_elem { + struct list_head link; + u8 mac[ETH_ALEN]; + u8 pad[2]; /* For a natural alignment of the following buffer */ +}; + +struct bnx2x_pending_mcast_cmd { + struct list_head link; + int type; /* BNX2X_MCAST_CMD_X */ + union { + struct list_head macs_head; + u32 macs_num; /* Needed for DEL command */ + int next_bin; /* Needed for RESTORE flow with aprox match */ + } data; + + bool done; /* set to true, when the command has been handled, + * practically used in 57712 handling only, where one pending + * command may be handled in a few operations. As long as for + * other chips every operation handling is completed in a + * single ramrod, there is no need to utilize this field. + */ +}; + +static int bnx2x_mcast_wait(struct bnx2x *bp, + struct bnx2x_mcast_obj *o) +{ + if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) || + o->raw.wait_comp(bp, &o->raw)) + return -EBUSY; + + return 0; +} + +static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd) +{ + int total_sz; + struct bnx2x_pending_mcast_cmd *new_cmd; + struct bnx2x_mcast_mac_elem *cur_mac = NULL; + struct bnx2x_mcast_list_elem *pos; + int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ? + p->mcast_list_len : 0); + + /* If the command is empty ("handle pending commands only"), break */ + if (!p->mcast_list_len) + return 0; + + total_sz = sizeof(*new_cmd) + + macs_list_len * sizeof(struct bnx2x_mcast_mac_elem); + + /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */ + new_cmd = kzalloc(total_sz, GFP_ATOMIC); + + if (!new_cmd) + return -ENOMEM; + + DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n", + cmd, macs_list_len); + + INIT_LIST_HEAD(&new_cmd->data.macs_head); + + new_cmd->type = cmd; + new_cmd->done = false; + + switch (cmd) { + case BNX2X_MCAST_CMD_ADD: + cur_mac = (struct bnx2x_mcast_mac_elem *) + ((u8 *)new_cmd + sizeof(*new_cmd)); + + /* Push the MACs of the current command into the pending command + * MACs list: FIFO + */ + list_for_each_entry(pos, &p->mcast_list, link) { + memcpy(cur_mac->mac, pos->mac, ETH_ALEN); + list_add_tail(&cur_mac->link, &new_cmd->data.macs_head); + cur_mac++; + } + + break; + + case BNX2X_MCAST_CMD_DEL: + new_cmd->data.macs_num = p->mcast_list_len; + break; + + case BNX2X_MCAST_CMD_RESTORE: + new_cmd->data.next_bin = 0; + break; + + default: + kfree(new_cmd); + BNX2X_ERR("Unknown command: %d\n", cmd); + return -EINVAL; + } + + /* Push the new pending command to the tail of the pending list: FIFO */ + list_add_tail(&new_cmd->link, &o->pending_cmds_head); + + o->set_sched(o); + + return 1; +} + +/** + * bnx2x_mcast_get_next_bin - get the next set bin (index) + * + * @o: + * @last: index to start looking from (including) + * + * returns the next found (set) bin or a negative value if none is found. + */ +static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last) +{ + int i, j, inner_start = last % BIT_VEC64_ELEM_SZ; + + for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) { + if (o->registry.aprox_match.vec[i]) + for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) { + int cur_bit = j + BIT_VEC64_ELEM_SZ * i; + if (BIT_VEC64_TEST_BIT(o->registry.aprox_match. + vec, cur_bit)) { + return cur_bit; + } + } + inner_start = 0; + } + + /* None found */ + return -1; +} + +/** + * bnx2x_mcast_clear_first_bin - find the first set bin and clear it + * + * @o: + * + * returns the index of the found bin or -1 if none is found + */ +static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o) +{ + int cur_bit = bnx2x_mcast_get_next_bin(o, 0); + + if (cur_bit >= 0) + BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit); + + return cur_bit; +} + +static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o) +{ + struct bnx2x_raw_obj *raw = &o->raw; + u8 rx_tx_flag = 0; + + if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || + (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) + rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD; + + if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || + (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) + rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD; + + return rx_tx_flag; +} + +static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, int idx, + union bnx2x_mcast_config_data *cfg_data, + enum bnx2x_mcast_cmd cmd) +{ + struct bnx2x_raw_obj *r = &o->raw; + struct eth_multicast_rules_ramrod_data *data = + (struct eth_multicast_rules_ramrod_data *)(r->rdata); + u8 func_id = r->func_id; + u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o); + int bin; + + if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) + rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD; + + data->rules[idx].cmd_general_data |= rx_tx_add_flag; + + /* Get a bin and update a bins' vector */ + switch (cmd) { + case BNX2X_MCAST_CMD_ADD: + bin = bnx2x_mcast_bin_from_mac(cfg_data->mac); + BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); + break; + + case BNX2X_MCAST_CMD_DEL: + /* If there were no more bins to clear + * (bnx2x_mcast_clear_first_bin() returns -1) then we would + * clear any (0xff) bin. + * See bnx2x_mcast_validate_e2() for explanation when it may + * happen. + */ + bin = bnx2x_mcast_clear_first_bin(o); + break; + + case BNX2X_MCAST_CMD_RESTORE: + bin = cfg_data->bin; + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd); + return; + } + + DP(BNX2X_MSG_SP, "%s bin %d\n", + ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ? + "Setting" : "Clearing"), bin); + + data->rules[idx].bin_id = (u8)bin; + data->rules[idx].func_id = func_id; + data->rules[idx].engine_id = o->engine_id; +} + +/** + * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry + * + * @bp: device handle + * @o: + * @start_bin: index in the registry to start from (including) + * @rdata_idx: index in the ramrod data to start from + * + * returns last handled bin index or -1 if all bins have been handled + */ +static inline int bnx2x_mcast_handle_restore_cmd_e2( + struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin, + int *rdata_idx) +{ + int cur_bin, cnt = *rdata_idx; + union bnx2x_mcast_config_data cfg_data = {NULL}; + + /* go through the registry and configure the bins from it */ + for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0; + cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) { + + cfg_data.bin = (u8)cur_bin; + o->set_one_rule(bp, o, cnt, &cfg_data, + BNX2X_MCAST_CMD_RESTORE); + + cnt++; + + DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin); + + /* Break if we reached the maximum number + * of rules. + */ + if (cnt >= o->max_cmd_len) + break; + } + + *rdata_idx = cnt; + + return cur_bin; +} + +static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, + int *line_idx) +{ + struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n; + int cnt = *line_idx; + union bnx2x_mcast_config_data cfg_data = {NULL}; + + list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head, + link) { + + cfg_data.mac = &pmac_pos->mac[0]; + o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); + + cnt++; + + DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", + pmac_pos->mac); + + list_del(&pmac_pos->link); + + /* Break if we reached the maximum number + * of rules. + */ + if (cnt >= o->max_cmd_len) + break; + } + + *line_idx = cnt; + + /* if no more MACs to configure - we are done */ + if (list_empty(&cmd_pos->data.macs_head)) + cmd_pos->done = true; +} + +static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, + int *line_idx) +{ + int cnt = *line_idx; + + while (cmd_pos->data.macs_num) { + o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type); + + cnt++; + + cmd_pos->data.macs_num--; + + DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n", + cmd_pos->data.macs_num, cnt); + + /* Break if we reached the maximum + * number of rules. + */ + if (cnt >= o->max_cmd_len) + break; + } + + *line_idx = cnt; + + /* If we cleared all bins - we are done */ + if (!cmd_pos->data.macs_num) + cmd_pos->done = true; +} + +static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, + int *line_idx) +{ + cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin, + line_idx); + + if (cmd_pos->data.next_bin < 0) + /* If o->set_restore returned -1 we are done */ + cmd_pos->done = true; + else + /* Start from the next bin next time */ + cmd_pos->data.next_bin++; +} + +static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p) +{ + struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n; + int cnt = 0; + struct bnx2x_mcast_obj *o = p->mcast_obj; + + list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head, + link) { + switch (cmd_pos->type) { + case BNX2X_MCAST_CMD_ADD: + bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt); + break; + + case BNX2X_MCAST_CMD_DEL: + bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt); + break; + + case BNX2X_MCAST_CMD_RESTORE: + bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos, + &cnt); + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); + return -EINVAL; + } + + /* If the command has been completed - remove it from the list + * and free the memory + */ + if (cmd_pos->done) { + list_del(&cmd_pos->link); + kfree(cmd_pos); + } + + /* Break if we reached the maximum number of rules */ + if (cnt >= o->max_cmd_len) + break; + } + + return cnt; +} + +static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, + int *line_idx) +{ + struct bnx2x_mcast_list_elem *mlist_pos; + union bnx2x_mcast_config_data cfg_data = {NULL}; + int cnt = *line_idx; + + list_for_each_entry(mlist_pos, &p->mcast_list, link) { + cfg_data.mac = mlist_pos->mac; + o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD); + + cnt++; + + DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", + mlist_pos->mac); + } + + *line_idx = cnt; +} + +static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, + int *line_idx) +{ + int cnt = *line_idx, i; + + for (i = 0; i < p->mcast_list_len; i++) { + o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL); + + cnt++; + + DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n", + p->mcast_list_len - i - 1); + } + + *line_idx = cnt; +} + +/** + * bnx2x_mcast_handle_current_cmd - + * + * @bp: device handle + * @p: + * @cmd: + * @start_cnt: first line in the ramrod data that may be used + * + * This function is called iff there is enough place for the current command in + * the ramrod data. + * Returns number of lines filled in the ramrod data in total. + */ +static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd, + int start_cnt) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + int cnt = start_cnt; + + DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len); + + switch (cmd) { + case BNX2X_MCAST_CMD_ADD: + bnx2x_mcast_hdl_add(bp, o, p, &cnt); + break; + + case BNX2X_MCAST_CMD_DEL: + bnx2x_mcast_hdl_del(bp, o, p, &cnt); + break; + + case BNX2X_MCAST_CMD_RESTORE: + o->hdl_restore(bp, o, 0, &cnt); + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd); + return -EINVAL; + } + + /* The current command has been handled */ + p->mcast_list_len = 0; + + return cnt; +} + +static int bnx2x_mcast_validate_e2(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + int reg_sz = o->get_registry_size(o); + + switch (cmd) { + /* DEL command deletes all currently configured MACs */ + case BNX2X_MCAST_CMD_DEL: + o->set_registry_size(o, 0); + /* Don't break */ + + /* RESTORE command will restore the entire multicast configuration */ + case BNX2X_MCAST_CMD_RESTORE: + /* Here we set the approximate amount of work to do, which in + * fact may be only less as some MACs in postponed ADD + * command(s) scheduled before this command may fall into + * the same bin and the actual number of bins set in the + * registry would be less than we estimated here. See + * bnx2x_mcast_set_one_rule_e2() for further details. + */ + p->mcast_list_len = reg_sz; + break; + + case BNX2X_MCAST_CMD_ADD: + case BNX2X_MCAST_CMD_CONT: + /* Here we assume that all new MACs will fall into new bins. + * However we will correct the real registry size after we + * handle all pending commands. + */ + o->set_registry_size(o, reg_sz + p->mcast_list_len); + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd); + return -EINVAL; + } + + /* Increase the total number of MACs pending to be configured */ + o->total_pending_num += p->mcast_list_len; + + return 0; +} + +static void bnx2x_mcast_revert_e2(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int old_num_bins) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + + o->set_registry_size(o, old_num_bins); + o->total_pending_num -= p->mcast_list_len; +} + +/** + * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values + * + * @bp: device handle + * @p: + * @len: number of rules to handle + */ +static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + u8 len) +{ + struct bnx2x_raw_obj *r = &p->mcast_obj->raw; + struct eth_multicast_rules_ramrod_data *data = + (struct eth_multicast_rules_ramrod_data *)(r->rdata); + + data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) | + (BNX2X_FILTER_MCAST_PENDING << + BNX2X_SWCID_SHIFT)); + data->header.rule_cnt = len; +} + +/** + * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins + * + * @bp: device handle + * @o: + * + * Recalculate the actual number of set bins in the registry using Brian + * Kernighan's algorithm: it's execution complexity is as a number of set bins. + * + * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1(). + */ +static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp, + struct bnx2x_mcast_obj *o) +{ + int i, cnt = 0; + u64 elem; + + for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) { + elem = o->registry.aprox_match.vec[i]; + for (; elem; cnt++) + elem &= elem - 1; + } + + o->set_registry_size(o, cnt); + + return 0; +} + +static int bnx2x_mcast_setup_e2(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd) +{ + struct bnx2x_raw_obj *raw = &p->mcast_obj->raw; + struct bnx2x_mcast_obj *o = p->mcast_obj; + struct eth_multicast_rules_ramrod_data *data = + (struct eth_multicast_rules_ramrod_data *)(raw->rdata); + int cnt = 0, rc; + + /* Reset the ramrod data buffer */ + memset(data, 0, sizeof(*data)); + + cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p); + + /* If there are no more pending commands - clear SCHEDULED state */ + if (list_empty(&o->pending_cmds_head)) + o->clear_sched(o); + + /* The below may be true iff there was enough room in ramrod + * data for all pending commands and for the current + * command. Otherwise the current command would have been added + * to the pending commands and p->mcast_list_len would have been + * zeroed. + */ + if (p->mcast_list_len > 0) + cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt); + + /* We've pulled out some MACs - update the total number of + * outstanding. + */ + o->total_pending_num -= cnt; + + /* send a ramrod */ + WARN_ON(o->total_pending_num < 0); + WARN_ON(cnt > o->max_cmd_len); + + bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt); + + /* Update a registry size if there are no more pending operations. + * + * We don't want to change the value of the registry size if there are + * pending operations because we want it to always be equal to the + * exact or the approximate number (see bnx2x_mcast_validate_e2()) of + * set bins after the last requested operation in order to properly + * evaluate the size of the next DEL/RESTORE operation. + * + * Note that we update the registry itself during command(s) handling + * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we + * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but + * with a limited amount of update commands (per MAC/bin) and we don't + * know in this scope what the actual state of bins configuration is + * going to be after this ramrod. + */ + if (!o->total_pending_num) + bnx2x_mcast_refresh_registry_e2(bp, o); + + /* If CLEAR_ONLY was requested - don't send a ramrod and clear + * RAMROD_PENDING status immediately. + */ + if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { + raw->clear_pending(raw); + return 0; + } else { + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ + + /* Send a ramrod */ + rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES, + raw->cid, U64_HI(raw->rdata_mapping), + U64_LO(raw->rdata_mapping), + ETH_CONNECTION_TYPE); + if (rc) + return rc; + + /* Ramrod completion is pending */ + return 1; + } +} + +static int bnx2x_mcast_validate_e1h(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd) +{ + /* Mark, that there is a work to do */ + if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE)) + p->mcast_list_len = 1; + + return 0; +} + +static void bnx2x_mcast_revert_e1h(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int old_num_bins) +{ + /* Do nothing */ +} + +#define BNX2X_57711_SET_MC_FILTER(filter, bit) \ +do { \ + (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \ +} while (0) + +static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, + struct bnx2x_mcast_ramrod_params *p, + u32 *mc_filter) +{ + struct bnx2x_mcast_list_elem *mlist_pos; + int bit; + + list_for_each_entry(mlist_pos, &p->mcast_list, link) { + bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac); + BNX2X_57711_SET_MC_FILTER(mc_filter, bit); + + DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n", + mlist_pos->mac, bit); + + /* bookkeeping... */ + BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, + bit); + } +} + +static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, + u32 *mc_filter) +{ + int bit; + + for (bit = bnx2x_mcast_get_next_bin(o, 0); + bit >= 0; + bit = bnx2x_mcast_get_next_bin(o, bit + 1)) { + BNX2X_57711_SET_MC_FILTER(mc_filter, bit); + DP(BNX2X_MSG_SP, "About to set bin %d\n", bit); + } +} + +/* On 57711 we write the multicast MACs' approximate match + * table by directly into the TSTORM's internal RAM. So we don't + * really need to handle any tricks to make it work. + */ +static int bnx2x_mcast_setup_e1h(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd) +{ + int i; + struct bnx2x_mcast_obj *o = p->mcast_obj; + struct bnx2x_raw_obj *r = &o->raw; + + /* If CLEAR_ONLY has been requested - clear the registry + * and clear a pending bit. + */ + if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { + u32 mc_filter[MC_HASH_SIZE] = {0}; + + /* Set the multicast filter bits before writing it into + * the internal memory. + */ + switch (cmd) { + case BNX2X_MCAST_CMD_ADD: + bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter); + break; + + case BNX2X_MCAST_CMD_DEL: + DP(BNX2X_MSG_SP, + "Invalidating multicast MACs configuration\n"); + + /* clear the registry */ + memset(o->registry.aprox_match.vec, 0, + sizeof(o->registry.aprox_match.vec)); + break; + + case BNX2X_MCAST_CMD_RESTORE: + bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter); + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd); + return -EINVAL; + } + + /* Set the mcast filter in the internal memory */ + for (i = 0; i < MC_HASH_SIZE; i++) + REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]); + } else + /* clear the registry */ + memset(o->registry.aprox_match.vec, 0, + sizeof(o->registry.aprox_match.vec)); + + /* We are done */ + r->clear_pending(r); + + return 0; +} + +static int bnx2x_mcast_validate_e1(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + int reg_sz = o->get_registry_size(o); + + switch (cmd) { + /* DEL command deletes all currently configured MACs */ + case BNX2X_MCAST_CMD_DEL: + o->set_registry_size(o, 0); + /* Don't break */ + + /* RESTORE command will restore the entire multicast configuration */ + case BNX2X_MCAST_CMD_RESTORE: + p->mcast_list_len = reg_sz; + DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n", + cmd, p->mcast_list_len); + break; + + case BNX2X_MCAST_CMD_ADD: + case BNX2X_MCAST_CMD_CONT: + /* Multicast MACs on 57710 are configured as unicast MACs and + * there is only a limited number of CAM entries for that + * matter. + */ + if (p->mcast_list_len > o->max_cmd_len) { + BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n", + o->max_cmd_len); + return -EINVAL; + } + /* Every configured MAC should be cleared if DEL command is + * called. Only the last ADD command is relevant as long as + * every ADD commands overrides the previous configuration. + */ + DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len); + if (p->mcast_list_len > 0) + o->set_registry_size(o, p->mcast_list_len); + + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd); + return -EINVAL; + } + + /* We want to ensure that commands are executed one by one for 57710. + * Therefore each none-empty command will consume o->max_cmd_len. + */ + if (p->mcast_list_len) + o->total_pending_num += o->max_cmd_len; + + return 0; +} + +static void bnx2x_mcast_revert_e1(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int old_num_macs) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + + o->set_registry_size(o, old_num_macs); + + /* If current command hasn't been handled yet and we are + * here means that it's meant to be dropped and we have to + * update the number of outstanding MACs accordingly. + */ + if (p->mcast_list_len) + o->total_pending_num -= o->max_cmd_len; +} + +static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, int idx, + union bnx2x_mcast_config_data *cfg_data, + enum bnx2x_mcast_cmd cmd) +{ + struct bnx2x_raw_obj *r = &o->raw; + struct mac_configuration_cmd *data = + (struct mac_configuration_cmd *)(r->rdata); + + /* copy mac */ + if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) { + bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr, + &data->config_table[idx].middle_mac_addr, + &data->config_table[idx].lsb_mac_addr, + cfg_data->mac); + + data->config_table[idx].vlan_id = 0; + data->config_table[idx].pf_id = r->func_id; + data->config_table[idx].clients_bit_vector = + cpu_to_le32(1 << r->cl_id); + + SET_FLAG(data->config_table[idx].flags, + MAC_CONFIGURATION_ENTRY_ACTION_TYPE, + T_ETH_MAC_COMMAND_SET); + } +} + +/** + * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd + * + * @bp: device handle + * @p: + * @len: number of rules to handle + */ +static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + u8 len) +{ + struct bnx2x_raw_obj *r = &p->mcast_obj->raw; + struct mac_configuration_cmd *data = + (struct mac_configuration_cmd *)(r->rdata); + + u8 offset = (CHIP_REV_IS_SLOW(bp) ? + BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) : + BNX2X_MAX_MULTICAST*(1 + r->func_id)); + + data->hdr.offset = offset; + data->hdr.client_id = cpu_to_le16(0xff); + data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) | + (BNX2X_FILTER_MCAST_PENDING << + BNX2X_SWCID_SHIFT)); + data->hdr.length = len; +} + +/** + * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710 + * + * @bp: device handle + * @o: + * @start_idx: index in the registry to start from + * @rdata_idx: index in the ramrod data to start from + * + * restore command for 57710 is like all other commands - always a stand alone + * command - start_idx and rdata_idx will always be 0. This function will always + * succeed. + * returns -1 to comply with 57712 variant. + */ +static inline int bnx2x_mcast_handle_restore_cmd_e1( + struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx, + int *rdata_idx) +{ + struct bnx2x_mcast_mac_elem *elem; + int i = 0; + union bnx2x_mcast_config_data cfg_data = {NULL}; + + /* go through the registry and configure the MACs from it. */ + list_for_each_entry(elem, &o->registry.exact_match.macs, link) { + cfg_data.mac = &elem->mac[0]; + o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE); + + i++; + + DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", + cfg_data.mac); + } + + *rdata_idx = i; + + return -1; +} + +static inline int bnx2x_mcast_handle_pending_cmds_e1( + struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) +{ + struct bnx2x_pending_mcast_cmd *cmd_pos; + struct bnx2x_mcast_mac_elem *pmac_pos; + struct bnx2x_mcast_obj *o = p->mcast_obj; + union bnx2x_mcast_config_data cfg_data = {NULL}; + int cnt = 0; + + /* If nothing to be done - return */ + if (list_empty(&o->pending_cmds_head)) + return 0; + + /* Handle the first command */ + cmd_pos = list_first_entry(&o->pending_cmds_head, + struct bnx2x_pending_mcast_cmd, link); + + switch (cmd_pos->type) { + case BNX2X_MCAST_CMD_ADD: + list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) { + cfg_data.mac = &pmac_pos->mac[0]; + o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); + + cnt++; + + DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", + pmac_pos->mac); + } + break; + + case BNX2X_MCAST_CMD_DEL: + cnt = cmd_pos->data.macs_num; + DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt); + break; + + case BNX2X_MCAST_CMD_RESTORE: + o->hdl_restore(bp, o, 0, &cnt); + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); + return -EINVAL; + } + + list_del(&cmd_pos->link); + kfree(cmd_pos); + + return cnt; +} + +/** + * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr(). + * + * @fw_hi: + * @fw_mid: + * @fw_lo: + * @mac: + */ +static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, + __le16 *fw_lo, u8 *mac) +{ + mac[1] = ((u8 *)fw_hi)[0]; + mac[0] = ((u8 *)fw_hi)[1]; + mac[3] = ((u8 *)fw_mid)[0]; + mac[2] = ((u8 *)fw_mid)[1]; + mac[5] = ((u8 *)fw_lo)[0]; + mac[4] = ((u8 *)fw_lo)[1]; +} + +/** + * bnx2x_mcast_refresh_registry_e1 - + * + * @bp: device handle + * @cnt: + * + * Check the ramrod data first entry flag to see if it's a DELETE or ADD command + * and update the registry correspondingly: if ADD - allocate a memory and add + * the entries to the registry (list), if DELETE - clear the registry and free + * the memory. + */ +static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp, + struct bnx2x_mcast_obj *o) +{ + struct bnx2x_raw_obj *raw = &o->raw; + struct bnx2x_mcast_mac_elem *elem; + struct mac_configuration_cmd *data = + (struct mac_configuration_cmd *)(raw->rdata); + + /* If first entry contains a SET bit - the command was ADD, + * otherwise - DEL_ALL + */ + if (GET_FLAG(data->config_table[0].flags, + MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) { + int i, len = data->hdr.length; + + /* Break if it was a RESTORE command */ + if (!list_empty(&o->registry.exact_match.macs)) + return 0; + + elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC); + if (!elem) { + BNX2X_ERR("Failed to allocate registry memory\n"); + return -ENOMEM; + } + + for (i = 0; i < len; i++, elem++) { + bnx2x_get_fw_mac_addr( + &data->config_table[i].msb_mac_addr, + &data->config_table[i].middle_mac_addr, + &data->config_table[i].lsb_mac_addr, + elem->mac); + DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n", + elem->mac); + list_add_tail(&elem->link, + &o->registry.exact_match.macs); + } + } else { + elem = list_first_entry(&o->registry.exact_match.macs, + struct bnx2x_mcast_mac_elem, link); + DP(BNX2X_MSG_SP, "Deleting a registry\n"); + kfree(elem); + INIT_LIST_HEAD(&o->registry.exact_match.macs); + } + + return 0; +} + +static int bnx2x_mcast_setup_e1(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + struct bnx2x_raw_obj *raw = &o->raw; + struct mac_configuration_cmd *data = + (struct mac_configuration_cmd *)(raw->rdata); + int cnt = 0, i, rc; + + /* Reset the ramrod data buffer */ + memset(data, 0, sizeof(*data)); + + /* First set all entries as invalid */ + for (i = 0; i < o->max_cmd_len ; i++) + SET_FLAG(data->config_table[i].flags, + MAC_CONFIGURATION_ENTRY_ACTION_TYPE, + T_ETH_MAC_COMMAND_INVALIDATE); + + /* Handle pending commands first */ + cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p); + + /* If there are no more pending commands - clear SCHEDULED state */ + if (list_empty(&o->pending_cmds_head)) + o->clear_sched(o); + + /* The below may be true iff there were no pending commands */ + if (!cnt) + cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0); + + /* For 57710 every command has o->max_cmd_len length to ensure that + * commands are done one at a time. + */ + o->total_pending_num -= o->max_cmd_len; + + /* send a ramrod */ + + WARN_ON(cnt > o->max_cmd_len); + + /* Set ramrod header (in particular, a number of entries to update) */ + bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt); + + /* update a registry: we need the registry contents to be always up + * to date in order to be able to execute a RESTORE opcode. Here + * we use the fact that for 57710 we sent one command at a time + * hence we may take the registry update out of the command handling + * and do it in a simpler way here. + */ + rc = bnx2x_mcast_refresh_registry_e1(bp, o); + if (rc) + return rc; + + /* If CLEAR_ONLY was requested - don't send a ramrod and clear + * RAMROD_PENDING status immediately. + */ + if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { + raw->clear_pending(raw); + return 0; + } else { + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ + + /* Send a ramrod */ + rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid, + U64_HI(raw->rdata_mapping), + U64_LO(raw->rdata_mapping), + ETH_CONNECTION_TYPE); + if (rc) + return rc; + + /* Ramrod completion is pending */ + return 1; + } +} + +static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o) +{ + return o->registry.exact_match.num_macs_set; +} + +static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o) +{ + return o->registry.aprox_match.num_bins_set; +} + +static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o, + int n) +{ + o->registry.exact_match.num_macs_set = n; +} + +static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o, + int n) +{ + o->registry.aprox_match.num_bins_set = n; +} + +int bnx2x_config_mcast(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + struct bnx2x_raw_obj *r = &o->raw; + int rc = 0, old_reg_size; + + /* This is needed to recover number of currently configured mcast macs + * in case of failure. + */ + old_reg_size = o->get_registry_size(o); + + /* Do some calculations and checks */ + rc = o->validate(bp, p, cmd); + if (rc) + return rc; + + /* Return if there is no work to do */ + if ((!p->mcast_list_len) && (!o->check_sched(o))) + return 0; + + DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n", + o->total_pending_num, p->mcast_list_len, o->max_cmd_len); + + /* Enqueue the current command to the pending list if we can't complete + * it in the current iteration + */ + if (r->check_pending(r) || + ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) { + rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd); + if (rc < 0) + goto error_exit1; + + /* As long as the current command is in a command list we + * don't need to handle it separately. + */ + p->mcast_list_len = 0; + } + + if (!r->check_pending(r)) { + + /* Set 'pending' state */ + r->set_pending(r); + + /* Configure the new classification in the chip */ + rc = o->config_mcast(bp, p, cmd); + if (rc < 0) + goto error_exit2; + + /* Wait for a ramrod completion if was requested */ + if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) + rc = o->wait_comp(bp, o); + } + + return rc; + +error_exit2: + r->clear_pending(r); + +error_exit1: + o->revert(bp, p, old_reg_size); + + return rc; +} + +static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o) +{ + smp_mb__before_atomic(); + clear_bit(o->sched_state, o->raw.pstate); + smp_mb__after_atomic(); +} + +static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o) +{ + smp_mb__before_atomic(); + set_bit(o->sched_state, o->raw.pstate); + smp_mb__after_atomic(); +} + +static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o) +{ + return !!test_bit(o->sched_state, o->raw.pstate); +} + +static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o) +{ + return o->raw.check_pending(&o->raw) || o->check_sched(o); +} + +void bnx2x_init_mcast_obj(struct bnx2x *bp, + struct bnx2x_mcast_obj *mcast_obj, + u8 mcast_cl_id, u32 mcast_cid, u8 func_id, + u8 engine_id, void *rdata, dma_addr_t rdata_mapping, + int state, unsigned long *pstate, bnx2x_obj_type type) +{ + memset(mcast_obj, 0, sizeof(*mcast_obj)); + + bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id, + rdata, rdata_mapping, state, pstate, type); + + mcast_obj->engine_id = engine_id; + + INIT_LIST_HEAD(&mcast_obj->pending_cmds_head); + + mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED; + mcast_obj->check_sched = bnx2x_mcast_check_sched; + mcast_obj->set_sched = bnx2x_mcast_set_sched; + mcast_obj->clear_sched = bnx2x_mcast_clear_sched; + + if (CHIP_IS_E1(bp)) { + mcast_obj->config_mcast = bnx2x_mcast_setup_e1; + mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; + mcast_obj->hdl_restore = + bnx2x_mcast_handle_restore_cmd_e1; + mcast_obj->check_pending = bnx2x_mcast_check_pending; + + if (CHIP_REV_IS_SLOW(bp)) + mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI; + else + mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST; + + mcast_obj->wait_comp = bnx2x_mcast_wait; + mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1; + mcast_obj->validate = bnx2x_mcast_validate_e1; + mcast_obj->revert = bnx2x_mcast_revert_e1; + mcast_obj->get_registry_size = + bnx2x_mcast_get_registry_size_exact; + mcast_obj->set_registry_size = + bnx2x_mcast_set_registry_size_exact; + + /* 57710 is the only chip that uses the exact match for mcast + * at the moment. + */ + INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs); + + } else if (CHIP_IS_E1H(bp)) { + mcast_obj->config_mcast = bnx2x_mcast_setup_e1h; + mcast_obj->enqueue_cmd = NULL; + mcast_obj->hdl_restore = NULL; + mcast_obj->check_pending = bnx2x_mcast_check_pending; + + /* 57711 doesn't send a ramrod, so it has unlimited credit + * for one command. + */ + mcast_obj->max_cmd_len = -1; + mcast_obj->wait_comp = bnx2x_mcast_wait; + mcast_obj->set_one_rule = NULL; + mcast_obj->validate = bnx2x_mcast_validate_e1h; + mcast_obj->revert = bnx2x_mcast_revert_e1h; + mcast_obj->get_registry_size = + bnx2x_mcast_get_registry_size_aprox; + mcast_obj->set_registry_size = + bnx2x_mcast_set_registry_size_aprox; + } else { + mcast_obj->config_mcast = bnx2x_mcast_setup_e2; + mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; + mcast_obj->hdl_restore = + bnx2x_mcast_handle_restore_cmd_e2; + mcast_obj->check_pending = bnx2x_mcast_check_pending; + /* TODO: There should be a proper HSI define for this number!!! + */ + mcast_obj->max_cmd_len = 16; + mcast_obj->wait_comp = bnx2x_mcast_wait; + mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2; + mcast_obj->validate = bnx2x_mcast_validate_e2; + mcast_obj->revert = bnx2x_mcast_revert_e2; + mcast_obj->get_registry_size = + bnx2x_mcast_get_registry_size_aprox; + mcast_obj->set_registry_size = + bnx2x_mcast_set_registry_size_aprox; + } +} + +/*************************** Credit handling **********************************/ + +/** + * atomic_add_ifless - add if the result is less than a given value. + * + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...if (v + a) is less than u. + * + * returns true if (v + a) was less than u, and false otherwise. + * + */ +static inline bool __atomic_add_ifless(atomic_t *v, int a, int u) +{ + int c, old; + + c = atomic_read(v); + for (;;) { + if (unlikely(c + a >= u)) + return false; + + old = atomic_cmpxchg((v), c, c + a); + if (likely(old == c)) + break; + c = old; + } + + return true; +} + +/** + * atomic_dec_ifmoe - dec if the result is more or equal than a given value. + * + * @v: pointer of type atomic_t + * @a: the amount to dec from v... + * @u: ...if (v - a) is more or equal than u. + * + * returns true if (v - a) was more or equal than u, and false + * otherwise. + */ +static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u) +{ + int c, old; + + c = atomic_read(v); + for (;;) { + if (unlikely(c - a < u)) + return false; + + old = atomic_cmpxchg((v), c, c - a); + if (likely(old == c)) + break; + c = old; + } + + return true; +} + +static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt) +{ + bool rc; + + smp_mb(); + rc = __atomic_dec_ifmoe(&o->credit, cnt, 0); + smp_mb(); + + return rc; +} + +static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt) +{ + bool rc; + + smp_mb(); + + /* Don't let to refill if credit + cnt > pool_sz */ + rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1); + + smp_mb(); + + return rc; +} + +static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o) +{ + int cur_credit; + + smp_mb(); + cur_credit = atomic_read(&o->credit); + + return cur_credit; +} + +static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o, + int cnt) +{ + return true; +} + +static bool bnx2x_credit_pool_get_entry( + struct bnx2x_credit_pool_obj *o, + int *offset) +{ + int idx, vec, i; + + *offset = -1; + + /* Find "internal cam-offset" then add to base for this object... */ + for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) { + + /* Skip the current vector if there are no free entries in it */ + if (!o->pool_mirror[vec]) + continue; + + /* If we've got here we are going to find a free entry */ + for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0; + i < BIT_VEC64_ELEM_SZ; idx++, i++) + + if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) { + /* Got one!! */ + BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx); + *offset = o->base_pool_offset + idx; + return true; + } + } + + return false; +} + +static bool bnx2x_credit_pool_put_entry( + struct bnx2x_credit_pool_obj *o, + int offset) +{ + if (offset < o->base_pool_offset) + return false; + + offset -= o->base_pool_offset; + + if (offset >= o->pool_sz) + return false; + + /* Return the entry to the pool */ + BIT_VEC64_SET_BIT(o->pool_mirror, offset); + + return true; +} + +static bool bnx2x_credit_pool_put_entry_always_true( + struct bnx2x_credit_pool_obj *o, + int offset) +{ + return true; +} + +static bool bnx2x_credit_pool_get_entry_always_true( + struct bnx2x_credit_pool_obj *o, + int *offset) +{ + *offset = -1; + return true; +} +/** + * bnx2x_init_credit_pool - initialize credit pool internals. + * + * @p: + * @base: Base entry in the CAM to use. + * @credit: pool size. + * + * If base is negative no CAM entries handling will be performed. + * If credit is negative pool operations will always succeed (unlimited pool). + * + */ +static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p, + int base, int credit) +{ + /* Zero the object first */ + memset(p, 0, sizeof(*p)); + + /* Set the table to all 1s */ + memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror)); + + /* Init a pool as full */ + atomic_set(&p->credit, credit); + + /* The total poll size */ + p->pool_sz = credit; + + p->base_pool_offset = base; + + /* Commit the change */ + smp_mb(); + + p->check = bnx2x_credit_pool_check; + + /* if pool credit is negative - disable the checks */ + if (credit >= 0) { + p->put = bnx2x_credit_pool_put; + p->get = bnx2x_credit_pool_get; + p->put_entry = bnx2x_credit_pool_put_entry; + p->get_entry = bnx2x_credit_pool_get_entry; + } else { + p->put = bnx2x_credit_pool_always_true; + p->get = bnx2x_credit_pool_always_true; + p->put_entry = bnx2x_credit_pool_put_entry_always_true; + p->get_entry = bnx2x_credit_pool_get_entry_always_true; + } + + /* If base is negative - disable entries handling */ + if (base < 0) { + p->put_entry = bnx2x_credit_pool_put_entry_always_true; + p->get_entry = bnx2x_credit_pool_get_entry_always_true; + } +} + +void bnx2x_init_mac_credit_pool(struct bnx2x *bp, + struct bnx2x_credit_pool_obj *p, u8 func_id, + u8 func_num) +{ +/* TODO: this will be defined in consts as well... */ +#define BNX2X_CAM_SIZE_EMUL 5 + + int cam_sz; + + if (CHIP_IS_E1(bp)) { + /* In E1, Multicast is saved in cam... */ + if (!CHIP_REV_IS_SLOW(bp)) + cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST; + else + cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI; + + bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz); + + } else if (CHIP_IS_E1H(bp)) { + /* CAM credit is equaly divided between all active functions + * on the PORT!. + */ + if ((func_num > 0)) { + if (!CHIP_REV_IS_SLOW(bp)) + cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num)); + else + cam_sz = BNX2X_CAM_SIZE_EMUL; + bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz); + } else { + /* this should never happen! Block MAC operations. */ + bnx2x_init_credit_pool(p, 0, 0); + } + + } else { + + /* CAM credit is equaly divided between all active functions + * on the PATH. + */ + if ((func_num > 0)) { + if (!CHIP_REV_IS_SLOW(bp)) + cam_sz = (MAX_MAC_CREDIT_E2 / func_num); + else + cam_sz = BNX2X_CAM_SIZE_EMUL; + + /* No need for CAM entries handling for 57712 and + * newer. + */ + bnx2x_init_credit_pool(p, -1, cam_sz); + } else { + /* this should never happen! Block MAC operations. */ + bnx2x_init_credit_pool(p, 0, 0); + } + } +} + +void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, + struct bnx2x_credit_pool_obj *p, + u8 func_id, + u8 func_num) +{ + if (CHIP_IS_E1x(bp)) { + /* There is no VLAN credit in HW on 57710 and 57711 only + * MAC / MAC-VLAN can be set + */ + bnx2x_init_credit_pool(p, 0, -1); + } else { + /* CAM credit is equally divided between all active functions + * on the PATH. + */ + if (func_num > 0) { + int credit = MAX_VLAN_CREDIT_E2 / func_num; + bnx2x_init_credit_pool(p, func_id * credit, credit); + } else + /* this should never happen! Block VLAN operations. */ + bnx2x_init_credit_pool(p, 0, 0); + } +} + +/****************** RSS Configuration ******************/ +/** + * bnx2x_debug_print_ind_table - prints the indirection table configuration. + * + * @bp: driver handle + * @p: pointer to rss configuration + * + * Prints it when NETIF_MSG_IFUP debug level is configured. + */ +static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp, + struct bnx2x_config_rss_params *p) +{ + int i; + + DP(BNX2X_MSG_SP, "Setting indirection table to:\n"); + DP(BNX2X_MSG_SP, "0x0000: "); + for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { + DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]); + + /* Print 4 bytes in a line */ + if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) && + (((i + 1) & 0x3) == 0)) { + DP_CONT(BNX2X_MSG_SP, "\n"); + DP(BNX2X_MSG_SP, "0x%04x: ", i + 1); + } + } + + DP_CONT(BNX2X_MSG_SP, "\n"); +} + +/** + * bnx2x_setup_rss - configure RSS + * + * @bp: device handle + * @p: rss configuration + * + * sends on UPDATE ramrod for that matter. + */ +static int bnx2x_setup_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *p) +{ + struct bnx2x_rss_config_obj *o = p->rss_obj; + struct bnx2x_raw_obj *r = &o->raw; + struct eth_rss_update_ramrod_data *data = + (struct eth_rss_update_ramrod_data *)(r->rdata); + u16 caps = 0; + u8 rss_mode = 0; + int rc; + + memset(data, 0, sizeof(*data)); + + DP(BNX2X_MSG_SP, "Configuring RSS\n"); + + /* Set an echo field */ + data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) | + (r->state << BNX2X_SWCID_SHIFT)); + + /* RSS mode */ + if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags)) + rss_mode = ETH_RSS_MODE_DISABLED; + else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags)) + rss_mode = ETH_RSS_MODE_REGULAR; + + data->rss_mode = rss_mode; + + DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode); + + /* RSS capabilities */ + if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags)) + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; + + if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags)) + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; + + if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags)) + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY; + + if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; + + if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags)) + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; + + if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags)) + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; + + if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags)) + caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY; + + /* RSS keys */ + if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { + memcpy(&data->rss_key[0], &p->rss_key[0], + sizeof(data->rss_key)); + caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; + } + + data->capabilities = cpu_to_le16(caps); + + /* Hashing mask */ + data->rss_result_mask = p->rss_result_mask; + + /* RSS engine ID */ + data->rss_engine_id = o->engine_id; + + DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id); + + /* Indirection table */ + memcpy(data->indirection_table, p->ind_table, + T_ETH_INDIRECTION_TABLE_SIZE); + + /* Remember the last configuration */ + memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); + + /* Print the indirection table */ + if (netif_msg_ifup(bp)) + bnx2x_debug_print_ind_table(bp, p); + + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ + + /* Send a ramrod */ + rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid, + U64_HI(r->rdata_mapping), + U64_LO(r->rdata_mapping), + ETH_CONNECTION_TYPE); + + if (rc < 0) + return rc; + + return 1; +} + +void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, + u8 *ind_table) +{ + memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table)); +} + +int bnx2x_config_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *p) +{ + int rc; + struct bnx2x_rss_config_obj *o = p->rss_obj; + struct bnx2x_raw_obj *r = &o->raw; + + /* Do nothing if only driver cleanup was requested */ + if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { + DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n", + p->ramrod_flags); + return 0; + } + + r->set_pending(r); + + rc = o->config_rss(bp, p); + if (rc < 0) { + r->clear_pending(r); + return rc; + } + + if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) + rc = r->wait_comp(bp, r); + + return rc; +} + +void bnx2x_init_rss_config_obj(struct bnx2x *bp, + struct bnx2x_rss_config_obj *rss_obj, + u8 cl_id, u32 cid, u8 func_id, u8 engine_id, + void *rdata, dma_addr_t rdata_mapping, + int state, unsigned long *pstate, + bnx2x_obj_type type) +{ + bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata, + rdata_mapping, state, pstate, type); + + rss_obj->engine_id = engine_id; + rss_obj->config_rss = bnx2x_setup_rss; +} + +/********************** Queue state object ***********************************/ + +/** + * bnx2x_queue_state_change - perform Queue state change transition + * + * @bp: device handle + * @params: parameters to perform the transition + * + * returns 0 in case of successfully completed transition, negative error + * code in case of failure, positive (EBUSY) value if there is a completion + * to that is still pending (possible only if RAMROD_COMP_WAIT is + * not set in params->ramrod_flags for asynchronous commands). + * + */ +int bnx2x_queue_state_change(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + int rc, pending_bit; + unsigned long *pending = &o->pending; + + /* Check that the requested transition is legal */ + rc = o->check_transition(bp, o, params); + if (rc) { + BNX2X_ERR("check transition returned an error. rc %d\n", rc); + return -EINVAL; + } + + /* Set "pending" bit */ + DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending); + pending_bit = o->set_pending(o, params); + DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending); + + /* Don't send a command if only driver cleanup was requested */ + if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) + o->complete_cmd(bp, o, pending_bit); + else { + /* Send a ramrod */ + rc = o->send_cmd(bp, params); + if (rc) { + o->next_state = BNX2X_Q_STATE_MAX; + clear_bit(pending_bit, pending); + smp_mb__after_atomic(); + return rc; + } + + if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { + rc = o->wait_comp(bp, o, pending_bit); + if (rc) + return rc; + + return 0; + } + } + + return !!test_bit(pending_bit, pending); +} + +static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj, + struct bnx2x_queue_state_params *params) +{ + enum bnx2x_queue_cmd cmd = params->cmd, bit; + + /* ACTIVATE and DEACTIVATE commands are implemented on top of + * UPDATE command. + */ + if ((cmd == BNX2X_Q_CMD_ACTIVATE) || + (cmd == BNX2X_Q_CMD_DEACTIVATE)) + bit = BNX2X_Q_CMD_UPDATE; + else + bit = cmd; + + set_bit(bit, &obj->pending); + return bit; +} + +static int bnx2x_queue_wait_comp(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + enum bnx2x_queue_cmd cmd) +{ + return bnx2x_state_wait(bp, cmd, &o->pending); +} + +/** + * bnx2x_queue_comp_cmd - complete the state change command. + * + * @bp: device handle + * @o: + * @cmd: + * + * Checks that the arrived completion is expected. + */ +static int bnx2x_queue_comp_cmd(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + enum bnx2x_queue_cmd cmd) +{ + unsigned long cur_pending = o->pending; + + if (!test_and_clear_bit(cmd, &cur_pending)) { + BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n", + cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], + o->state, cur_pending, o->next_state); + return -EINVAL; + } + + if (o->next_tx_only >= o->max_cos) + /* >= because tx only must always be smaller than cos since the + * primary connection supports COS 0 + */ + BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d", + o->next_tx_only, o->max_cos); + + DP(BNX2X_MSG_SP, + "Completing command %d for queue %d, setting state to %d\n", + cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state); + + if (o->next_tx_only) /* print num tx-only if any exist */ + DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n", + o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only); + + o->state = o->next_state; + o->num_tx_only = o->next_tx_only; + o->next_state = BNX2X_Q_STATE_MAX; + + /* It's important that o->state and o->next_state are + * updated before o->pending. + */ + wmb(); + + clear_bit(cmd, &o->pending); + smp_mb__after_atomic(); + + return 0; +} + +static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp, + struct bnx2x_queue_state_params *cmd_params, + struct client_init_ramrod_data *data) +{ + struct bnx2x_queue_setup_params *params = &cmd_params->params.setup; + + /* Rx data */ + + /* IPv6 TPA supported for E2 and above only */ + data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, ¶ms->flags) * + CLIENT_INIT_RX_DATA_TPA_EN_IPV6; +} + +static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + struct bnx2x_general_setup_params *params, + struct client_init_general_data *gen_data, + unsigned long *flags) +{ + gen_data->client_id = o->cl_id; + + if (test_bit(BNX2X_Q_FLG_STATS, flags)) { + gen_data->statistics_counter_id = + params->stat_id; + gen_data->statistics_en_flg = 1; + gen_data->statistics_zero_flg = + test_bit(BNX2X_Q_FLG_ZERO_STATS, flags); + } else + gen_data->statistics_counter_id = + DISABLE_STATISTIC_COUNTER_ID_VALUE; + + gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags); + gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags); + gen_data->sp_client_id = params->spcl_id; + gen_data->mtu = cpu_to_le16(params->mtu); + gen_data->func_id = o->func_id; + + gen_data->cos = params->cos; + + gen_data->traffic_type = + test_bit(BNX2X_Q_FLG_FCOE, flags) ? + LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; + + gen_data->fp_hsi_ver = params->fp_hsi; + + DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n", + gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg); +} + +static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o, + struct bnx2x_txq_setup_params *params, + struct client_init_tx_data *tx_data, + unsigned long *flags) +{ + tx_data->enforce_security_flg = + test_bit(BNX2X_Q_FLG_TX_SEC, flags); + tx_data->default_vlan = + cpu_to_le16(params->default_vlan); + tx_data->default_vlan_flg = + test_bit(BNX2X_Q_FLG_DEF_VLAN, flags); + tx_data->tx_switching_flg = + test_bit(BNX2X_Q_FLG_TX_SWITCH, flags); + tx_data->anti_spoofing_flg = + test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); + tx_data->force_default_pri_flg = + test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags); + tx_data->refuse_outband_vlan_flg = + test_bit(BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN, flags); + tx_data->tunnel_lso_inc_ip_id = + test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags); + tx_data->tunnel_non_lso_pcsum_location = + test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT : + CSUM_ON_BD; + + tx_data->tx_status_block_id = params->fw_sb_id; + tx_data->tx_sb_index_number = params->sb_cq_index; + tx_data->tss_leading_client_id = params->tss_leading_cl_id; + + tx_data->tx_bd_page_base.lo = + cpu_to_le32(U64_LO(params->dscr_map)); + tx_data->tx_bd_page_base.hi = + cpu_to_le32(U64_HI(params->dscr_map)); + + /* Don't configure any Tx switching mode during queue SETUP */ + tx_data->state = 0; +} + +static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o, + struct rxq_pause_params *params, + struct client_init_rx_data *rx_data) +{ + /* flow control data */ + rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo); + rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi); + rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo); + rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi); + rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo); + rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi); + rx_data->rx_cos_mask = cpu_to_le16(params->pri_map); +} + +static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o, + struct bnx2x_rxq_setup_params *params, + struct client_init_rx_data *rx_data, + unsigned long *flags) +{ + rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) * + CLIENT_INIT_RX_DATA_TPA_EN_IPV4; + rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) * + CLIENT_INIT_RX_DATA_TPA_MODE; + rx_data->vmqueue_mode_en_flg = 0; + + rx_data->cache_line_alignment_log_size = + params->cache_line_log; + rx_data->enable_dynamic_hc = + test_bit(BNX2X_Q_FLG_DHC, flags); + rx_data->max_sges_for_packet = params->max_sges_pkt; + rx_data->client_qzone_id = params->cl_qzone_id; + rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz); + + /* Always start in DROP_ALL mode */ + rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL | + CLIENT_INIT_RX_DATA_MCAST_DROP_ALL); + + /* We don't set drop flags */ + rx_data->drop_ip_cs_err_flg = 0; + rx_data->drop_tcp_cs_err_flg = 0; + rx_data->drop_ttl0_flg = 0; + rx_data->drop_udp_cs_err_flg = 0; + rx_data->inner_vlan_removal_enable_flg = + test_bit(BNX2X_Q_FLG_VLAN, flags); + rx_data->outer_vlan_removal_enable_flg = + test_bit(BNX2X_Q_FLG_OV, flags); + rx_data->status_block_id = params->fw_sb_id; + rx_data->rx_sb_index_number = params->sb_cq_index; + rx_data->max_tpa_queues = params->max_tpa_queues; + rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz); + rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz); + rx_data->bd_page_base.lo = + cpu_to_le32(U64_LO(params->dscr_map)); + rx_data->bd_page_base.hi = + cpu_to_le32(U64_HI(params->dscr_map)); + rx_data->sge_page_base.lo = + cpu_to_le32(U64_LO(params->sge_map)); + rx_data->sge_page_base.hi = + cpu_to_le32(U64_HI(params->sge_map)); + rx_data->cqe_page_base.lo = + cpu_to_le32(U64_LO(params->rcq_map)); + rx_data->cqe_page_base.hi = + cpu_to_le32(U64_HI(params->rcq_map)); + rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags); + + if (test_bit(BNX2X_Q_FLG_MCAST, flags)) { + rx_data->approx_mcast_engine_id = params->mcast_engine_id; + rx_data->is_approx_mcast = 1; + } + + rx_data->rss_engine_id = params->rss_engine_id; + + /* silent vlan removal */ + rx_data->silent_vlan_removal_flg = + test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags); + rx_data->silent_vlan_value = + cpu_to_le16(params->silent_removal_value); + rx_data->silent_vlan_mask = + cpu_to_le16(params->silent_removal_mask); +} + +/* initialize the general, tx and rx parts of a queue object */ +static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp, + struct bnx2x_queue_state_params *cmd_params, + struct client_init_ramrod_data *data) +{ + bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj, + &cmd_params->params.setup.gen_params, + &data->general, + &cmd_params->params.setup.flags); + + bnx2x_q_fill_init_tx_data(cmd_params->q_obj, + &cmd_params->params.setup.txq_params, + &data->tx, + &cmd_params->params.setup.flags); + + bnx2x_q_fill_init_rx_data(cmd_params->q_obj, + &cmd_params->params.setup.rxq_params, + &data->rx, + &cmd_params->params.setup.flags); + + bnx2x_q_fill_init_pause_data(cmd_params->q_obj, + &cmd_params->params.setup.pause_params, + &data->rx); +} + +/* initialize the general and tx parts of a tx-only queue object */ +static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp, + struct bnx2x_queue_state_params *cmd_params, + struct tx_queue_init_ramrod_data *data) +{ + bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj, + &cmd_params->params.tx_only.gen_params, + &data->general, + &cmd_params->params.tx_only.flags); + + bnx2x_q_fill_init_tx_data(cmd_params->q_obj, + &cmd_params->params.tx_only.txq_params, + &data->tx, + &cmd_params->params.tx_only.flags); + + DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x", + cmd_params->q_obj->cids[0], + data->tx.tx_bd_page_base.lo, + data->tx.tx_bd_page_base.hi); +} + +/** + * bnx2x_q_init - init HW/FW queue + * + * @bp: device handle + * @params: + * + * HW/FW initial Queue configuration: + * - HC: Rx and Tx + * - CDU context validation + * + */ +static inline int bnx2x_q_init(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + struct bnx2x_queue_init_params *init = ¶ms->params.init; + u16 hc_usec; + u8 cos; + + /* Tx HC configuration */ + if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) && + test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) { + hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0; + + bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id, + init->tx.sb_cq_index, + !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags), + hc_usec); + } + + /* Rx HC configuration */ + if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) && + test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) { + hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0; + + bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id, + init->rx.sb_cq_index, + !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags), + hc_usec); + } + + /* Set CDU context validation values */ + for (cos = 0; cos < o->max_cos; cos++) { + DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n", + o->cids[cos], cos); + DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]); + bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]); + } + + /* As no ramrod is sent, complete the command immediately */ + o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT); + + mmiowb(); + smp_mb(); + + return 0; +} + +static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + struct client_init_ramrod_data *rdata = + (struct client_init_ramrod_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; + + /* Clear the ramrod data */ + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + bnx2x_q_fill_setup_data_cmn(bp, params, rdata); + + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ + return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], + U64_HI(data_mapping), + U64_LO(data_mapping), ETH_CONNECTION_TYPE); +} + +static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + struct client_init_ramrod_data *rdata = + (struct client_init_ramrod_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; + + /* Clear the ramrod data */ + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + bnx2x_q_fill_setup_data_cmn(bp, params, rdata); + bnx2x_q_fill_setup_data_e2(bp, params, rdata); + + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ + return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], + U64_HI(data_mapping), + U64_LO(data_mapping), ETH_CONNECTION_TYPE); +} + +static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + struct tx_queue_init_ramrod_data *rdata = + (struct tx_queue_init_ramrod_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP; + struct bnx2x_queue_setup_tx_only_params *tx_only_params = + ¶ms->params.tx_only; + u8 cid_index = tx_only_params->cid_index; + + if (cid_index >= o->max_cos) { + BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", + o->cl_id, cid_index); + return -EINVAL; + } + + DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n", + tx_only_params->gen_params.cos, + tx_only_params->gen_params.spcl_id); + + /* Clear the ramrod data */ + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + bnx2x_q_fill_setup_tx_only(bp, params, rdata); + + DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n", + o->cids[cid_index], rdata->general.client_id, + rdata->general.sp_client_id, rdata->general.cos); + + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ + return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], + U64_HI(data_mapping), + U64_LO(data_mapping), ETH_CONNECTION_TYPE); +} + +static void bnx2x_q_fill_update_data(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj, + struct bnx2x_queue_update_params *params, + struct client_update_ramrod_data *data) +{ + /* Client ID of the client to update */ + data->client_id = obj->cl_id; + + /* Function ID of the client to update */ + data->func_id = obj->func_id; + + /* Default VLAN value */ + data->default_vlan = cpu_to_le16(params->def_vlan); + + /* Inner VLAN stripping */ + data->inner_vlan_removal_enable_flg = + test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags); + data->inner_vlan_removal_change_flg = + test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, + ¶ms->update_flags); + + /* Outer VLAN stripping */ + data->outer_vlan_removal_enable_flg = + test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); + data->outer_vlan_removal_change_flg = + test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG, + ¶ms->update_flags); + + /* Drop packets that have source MAC that doesn't belong to this + * Queue. + */ + data->anti_spoofing_enable_flg = + test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags); + data->anti_spoofing_change_flg = + test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, ¶ms->update_flags); + + /* Activate/Deactivate */ + data->activate_flg = + test_bit(BNX2X_Q_UPDATE_ACTIVATE, ¶ms->update_flags); + data->activate_change_flg = + test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags); + + /* Enable default VLAN */ + data->default_vlan_enable_flg = + test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags); + data->default_vlan_change_flg = + test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, + ¶ms->update_flags); + + /* silent vlan removal */ + data->silent_vlan_change_flg = + test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, + ¶ms->update_flags); + data->silent_vlan_removal_flg = + test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags); + data->silent_vlan_value = cpu_to_le16(params->silent_removal_value); + data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask); + + /* tx switching */ + data->tx_switching_flg = + test_bit(BNX2X_Q_UPDATE_TX_SWITCHING, ¶ms->update_flags); + data->tx_switching_change_flg = + test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, + ¶ms->update_flags); + + /* PTP */ + data->handle_ptp_pkts_flg = + test_bit(BNX2X_Q_UPDATE_PTP_PKTS, ¶ms->update_flags); + data->handle_ptp_pkts_change_flg = + test_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, ¶ms->update_flags); +} + +static inline int bnx2x_q_send_update(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + struct client_update_ramrod_data *rdata = + (struct client_update_ramrod_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + struct bnx2x_queue_update_params *update_params = + ¶ms->params.update; + u8 cid_index = update_params->cid_index; + + if (cid_index >= o->max_cos) { + BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", + o->cl_id, cid_index); + return -EINVAL; + } + + /* Clear the ramrod data */ + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + bnx2x_q_fill_update_data(bp, o, update_params, rdata); + + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ + return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, + o->cids[cid_index], U64_HI(data_mapping), + U64_LO(data_mapping), ETH_CONNECTION_TYPE); +} + +/** + * bnx2x_q_send_deactivate - send DEACTIVATE command + * + * @bp: device handle + * @params: + * + * implemented using the UPDATE command. + */ +static inline int bnx2x_q_send_deactivate(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_update_params *update = ¶ms->params.update; + + memset(update, 0, sizeof(*update)); + + __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); + + return bnx2x_q_send_update(bp, params); +} + +/** + * bnx2x_q_send_activate - send ACTIVATE command + * + * @bp: device handle + * @params: + * + * implemented using the UPDATE command. + */ +static inline int bnx2x_q_send_activate(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_update_params *update = ¶ms->params.update; + + memset(update, 0, sizeof(*update)); + + __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags); + __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); + + return bnx2x_q_send_update(bp, params); +} + +static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj, + struct bnx2x_queue_update_tpa_params *params, + struct tpa_update_ramrod_data *data) +{ + data->client_id = obj->cl_id; + data->complete_on_both_clients = params->complete_on_both_clients; + data->dont_verify_rings_pause_thr_flg = + params->dont_verify_thr; + data->max_agg_size = cpu_to_le16(params->max_agg_sz); + data->max_sges_for_packet = params->max_sges_pkt; + data->max_tpa_queues = params->max_tpa_queues; + data->sge_buff_size = cpu_to_le16(params->sge_buff_sz); + data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map)); + data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map)); + data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high); + data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low); + data->tpa_mode = params->tpa_mode; + data->update_ipv4 = params->update_ipv4; + data->update_ipv6 = params->update_ipv6; +} + +static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + struct tpa_update_ramrod_data *rdata = + (struct tpa_update_ramrod_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + struct bnx2x_queue_update_tpa_params *update_tpa_params = + ¶ms->params.update_tpa; + u16 type; + + /* Clear the ramrod data */ + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata); + + /* Add the function id inside the type, so that sp post function + * doesn't automatically add the PF func-id, this is required + * for operations done by PFs on behalf of their VFs + */ + type = ETH_CONNECTION_TYPE | + ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT); + + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ + return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE, + o->cids[BNX2X_PRIMARY_CID_INDEX], + U64_HI(data_mapping), + U64_LO(data_mapping), type); +} + +static inline int bnx2x_q_send_halt(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, + o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id, + ETH_CONNECTION_TYPE); +} + +static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + u8 cid_idx = params->params.cfc_del.cid_index; + + if (cid_idx >= o->max_cos) { + BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", + o->cl_id, cid_idx); + return -EINVAL; + } + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, + o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE); +} + +static inline int bnx2x_q_send_terminate(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + u8 cid_index = params->params.terminate.cid_index; + + if (cid_index >= o->max_cos) { + BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", + o->cl_id, cid_index); + return -EINVAL; + } + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, + o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE); +} + +static inline int bnx2x_q_send_empty(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY, + o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0, + ETH_CONNECTION_TYPE); +} + +static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + switch (params->cmd) { + case BNX2X_Q_CMD_INIT: + return bnx2x_q_init(bp, params); + case BNX2X_Q_CMD_SETUP_TX_ONLY: + return bnx2x_q_send_setup_tx_only(bp, params); + case BNX2X_Q_CMD_DEACTIVATE: + return bnx2x_q_send_deactivate(bp, params); + case BNX2X_Q_CMD_ACTIVATE: + return bnx2x_q_send_activate(bp, params); + case BNX2X_Q_CMD_UPDATE: + return bnx2x_q_send_update(bp, params); + case BNX2X_Q_CMD_UPDATE_TPA: + return bnx2x_q_send_update_tpa(bp, params); + case BNX2X_Q_CMD_HALT: + return bnx2x_q_send_halt(bp, params); + case BNX2X_Q_CMD_CFC_DEL: + return bnx2x_q_send_cfc_del(bp, params); + case BNX2X_Q_CMD_TERMINATE: + return bnx2x_q_send_terminate(bp, params); + case BNX2X_Q_CMD_EMPTY: + return bnx2x_q_send_empty(bp, params); + default: + BNX2X_ERR("Unknown command: %d\n", params->cmd); + return -EINVAL; + } +} + +static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + switch (params->cmd) { + case BNX2X_Q_CMD_SETUP: + return bnx2x_q_send_setup_e1x(bp, params); + case BNX2X_Q_CMD_INIT: + case BNX2X_Q_CMD_SETUP_TX_ONLY: + case BNX2X_Q_CMD_DEACTIVATE: + case BNX2X_Q_CMD_ACTIVATE: + case BNX2X_Q_CMD_UPDATE: + case BNX2X_Q_CMD_UPDATE_TPA: + case BNX2X_Q_CMD_HALT: + case BNX2X_Q_CMD_CFC_DEL: + case BNX2X_Q_CMD_TERMINATE: + case BNX2X_Q_CMD_EMPTY: + return bnx2x_queue_send_cmd_cmn(bp, params); + default: + BNX2X_ERR("Unknown command: %d\n", params->cmd); + return -EINVAL; + } +} + +static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + switch (params->cmd) { + case BNX2X_Q_CMD_SETUP: + return bnx2x_q_send_setup_e2(bp, params); + case BNX2X_Q_CMD_INIT: + case BNX2X_Q_CMD_SETUP_TX_ONLY: + case BNX2X_Q_CMD_DEACTIVATE: + case BNX2X_Q_CMD_ACTIVATE: + case BNX2X_Q_CMD_UPDATE: + case BNX2X_Q_CMD_UPDATE_TPA: + case BNX2X_Q_CMD_HALT: + case BNX2X_Q_CMD_CFC_DEL: + case BNX2X_Q_CMD_TERMINATE: + case BNX2X_Q_CMD_EMPTY: + return bnx2x_queue_send_cmd_cmn(bp, params); + default: + BNX2X_ERR("Unknown command: %d\n", params->cmd); + return -EINVAL; + } +} + +/** + * bnx2x_queue_chk_transition - check state machine of a regular Queue + * + * @bp: device handle + * @o: + * @params: + * + * (not Forwarding) + * It both checks if the requested command is legal in a current + * state and, if it's legal, sets a `next_state' in the object + * that will be used in the completion flow to set the `state' + * of the object. + * + * returns 0 if a requested command is a legal transition, + * -EINVAL otherwise. + */ +static int bnx2x_queue_chk_transition(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + struct bnx2x_queue_state_params *params) +{ + enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX; + enum bnx2x_queue_cmd cmd = params->cmd; + struct bnx2x_queue_update_params *update_params = + ¶ms->params.update; + u8 next_tx_only = o->num_tx_only; + + /* Forget all pending for completion commands if a driver only state + * transition has been requested. + */ + if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { + o->pending = 0; + o->next_state = BNX2X_Q_STATE_MAX; + } + + /* Don't allow a next state transition if we are in the middle of + * the previous one. + */ + if (o->pending) { + BNX2X_ERR("Blocking transition since pending was %lx\n", + o->pending); + return -EBUSY; + } + + switch (state) { + case BNX2X_Q_STATE_RESET: + if (cmd == BNX2X_Q_CMD_INIT) + next_state = BNX2X_Q_STATE_INITIALIZED; + + break; + case BNX2X_Q_STATE_INITIALIZED: + if (cmd == BNX2X_Q_CMD_SETUP) { + if (test_bit(BNX2X_Q_FLG_ACTIVE, + ¶ms->params.setup.flags)) + next_state = BNX2X_Q_STATE_ACTIVE; + else + next_state = BNX2X_Q_STATE_INACTIVE; + } + + break; + case BNX2X_Q_STATE_ACTIVE: + if (cmd == BNX2X_Q_CMD_DEACTIVATE) + next_state = BNX2X_Q_STATE_INACTIVE; + + else if ((cmd == BNX2X_Q_CMD_EMPTY) || + (cmd == BNX2X_Q_CMD_UPDATE_TPA)) + next_state = BNX2X_Q_STATE_ACTIVE; + + else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) { + next_state = BNX2X_Q_STATE_MULTI_COS; + next_tx_only = 1; + } + + else if (cmd == BNX2X_Q_CMD_HALT) + next_state = BNX2X_Q_STATE_STOPPED; + + else if (cmd == BNX2X_Q_CMD_UPDATE) { + /* If "active" state change is requested, update the + * state accordingly. + */ + if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, + &update_params->update_flags) && + !test_bit(BNX2X_Q_UPDATE_ACTIVATE, + &update_params->update_flags)) + next_state = BNX2X_Q_STATE_INACTIVE; + else + next_state = BNX2X_Q_STATE_ACTIVE; + } + + break; + case BNX2X_Q_STATE_MULTI_COS: + if (cmd == BNX2X_Q_CMD_TERMINATE) + next_state = BNX2X_Q_STATE_MCOS_TERMINATED; + + else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) { + next_state = BNX2X_Q_STATE_MULTI_COS; + next_tx_only = o->num_tx_only + 1; + } + + else if ((cmd == BNX2X_Q_CMD_EMPTY) || + (cmd == BNX2X_Q_CMD_UPDATE_TPA)) + next_state = BNX2X_Q_STATE_MULTI_COS; + + else if (cmd == BNX2X_Q_CMD_UPDATE) { + /* If "active" state change is requested, update the + * state accordingly. + */ + if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, + &update_params->update_flags) && + !test_bit(BNX2X_Q_UPDATE_ACTIVATE, + &update_params->update_flags)) + next_state = BNX2X_Q_STATE_INACTIVE; + else + next_state = BNX2X_Q_STATE_MULTI_COS; + } + + break; + case BNX2X_Q_STATE_MCOS_TERMINATED: + if (cmd == BNX2X_Q_CMD_CFC_DEL) { + next_tx_only = o->num_tx_only - 1; + if (next_tx_only == 0) + next_state = BNX2X_Q_STATE_ACTIVE; + else + next_state = BNX2X_Q_STATE_MULTI_COS; + } + + break; + case BNX2X_Q_STATE_INACTIVE: + if (cmd == BNX2X_Q_CMD_ACTIVATE) + next_state = BNX2X_Q_STATE_ACTIVE; + + else if ((cmd == BNX2X_Q_CMD_EMPTY) || + (cmd == BNX2X_Q_CMD_UPDATE_TPA)) + next_state = BNX2X_Q_STATE_INACTIVE; + + else if (cmd == BNX2X_Q_CMD_HALT) + next_state = BNX2X_Q_STATE_STOPPED; + + else if (cmd == BNX2X_Q_CMD_UPDATE) { + /* If "active" state change is requested, update the + * state accordingly. + */ + if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, + &update_params->update_flags) && + test_bit(BNX2X_Q_UPDATE_ACTIVATE, + &update_params->update_flags)){ + if (o->num_tx_only == 0) + next_state = BNX2X_Q_STATE_ACTIVE; + else /* tx only queues exist for this queue */ + next_state = BNX2X_Q_STATE_MULTI_COS; + } else + next_state = BNX2X_Q_STATE_INACTIVE; + } + + break; + case BNX2X_Q_STATE_STOPPED: + if (cmd == BNX2X_Q_CMD_TERMINATE) + next_state = BNX2X_Q_STATE_TERMINATED; + + break; + case BNX2X_Q_STATE_TERMINATED: + if (cmd == BNX2X_Q_CMD_CFC_DEL) + next_state = BNX2X_Q_STATE_RESET; + + break; + default: + BNX2X_ERR("Illegal state: %d\n", state); + } + + /* Transition is assured */ + if (next_state != BNX2X_Q_STATE_MAX) { + DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n", + state, cmd, next_state); + o->next_state = next_state; + o->next_tx_only = next_tx_only; + return 0; + } + + DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd); + + return -EINVAL; +} + +void bnx2x_init_queue_obj(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj, + u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id, + void *rdata, + dma_addr_t rdata_mapping, unsigned long type) +{ + memset(obj, 0, sizeof(*obj)); + + /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */ + BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt); + + memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt); + obj->max_cos = cid_cnt; + obj->cl_id = cl_id; + obj->func_id = func_id; + obj->rdata = rdata; + obj->rdata_mapping = rdata_mapping; + obj->type = type; + obj->next_state = BNX2X_Q_STATE_MAX; + + if (CHIP_IS_E1x(bp)) + obj->send_cmd = bnx2x_queue_send_cmd_e1x; + else + obj->send_cmd = bnx2x_queue_send_cmd_e2; + + obj->check_transition = bnx2x_queue_chk_transition; + + obj->complete_cmd = bnx2x_queue_comp_cmd; + obj->wait_comp = bnx2x_queue_wait_comp; + obj->set_pending = bnx2x_queue_set_pending; +} + +/* return a queue object's logical state*/ +int bnx2x_get_q_logical_state(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj) +{ + switch (obj->state) { + case BNX2X_Q_STATE_ACTIVE: + case BNX2X_Q_STATE_MULTI_COS: + return BNX2X_Q_LOGICAL_STATE_ACTIVE; + case BNX2X_Q_STATE_RESET: + case BNX2X_Q_STATE_INITIALIZED: + case BNX2X_Q_STATE_MCOS_TERMINATED: + case BNX2X_Q_STATE_INACTIVE: + case BNX2X_Q_STATE_STOPPED: + case BNX2X_Q_STATE_TERMINATED: + case BNX2X_Q_STATE_FLRED: + return BNX2X_Q_LOGICAL_STATE_STOPPED; + default: + return -EINVAL; + } +} + +/********************** Function state object *********************************/ +enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o) +{ + /* in the middle of transaction - return INVALID state */ + if (o->pending) + return BNX2X_F_STATE_MAX; + + /* unsure the order of reading of o->pending and o->state + * o->pending should be read first + */ + rmb(); + + return o->state; +} + +static int bnx2x_func_wait_comp(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o, + enum bnx2x_func_cmd cmd) +{ + return bnx2x_state_wait(bp, cmd, &o->pending); +} + +/** + * bnx2x_func_state_change_comp - complete the state machine transition + * + * @bp: device handle + * @o: + * @cmd: + * + * Called on state change transition. Completes the state + * machine transition only - no HW interaction. + */ +static inline int bnx2x_func_state_change_comp(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o, + enum bnx2x_func_cmd cmd) +{ + unsigned long cur_pending = o->pending; + + if (!test_and_clear_bit(cmd, &cur_pending)) { + BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n", + cmd, BP_FUNC(bp), o->state, + cur_pending, o->next_state); + return -EINVAL; + } + + DP(BNX2X_MSG_SP, + "Completing command %d for func %d, setting state to %d\n", + cmd, BP_FUNC(bp), o->next_state); + + o->state = o->next_state; + o->next_state = BNX2X_F_STATE_MAX; + + /* It's important that o->state and o->next_state are + * updated before o->pending. + */ + wmb(); + + clear_bit(cmd, &o->pending); + smp_mb__after_atomic(); + + return 0; +} + +/** + * bnx2x_func_comp_cmd - complete the state change command + * + * @bp: device handle + * @o: + * @cmd: + * + * Checks that the arrived completion is expected. + */ +static int bnx2x_func_comp_cmd(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o, + enum bnx2x_func_cmd cmd) +{ + /* Complete the state machine part first, check if it's a + * legal completion. + */ + int rc = bnx2x_func_state_change_comp(bp, o, cmd); + return rc; +} + +/** + * bnx2x_func_chk_transition - perform function state machine transition + * + * @bp: device handle + * @o: + * @params: + * + * It both checks if the requested command is legal in a current + * state and, if it's legal, sets a `next_state' in the object + * that will be used in the completion flow to set the `state' + * of the object. + * + * returns 0 if a requested command is a legal transition, + * -EINVAL otherwise. + */ +static int bnx2x_func_chk_transition(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o, + struct bnx2x_func_state_params *params) +{ + enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX; + enum bnx2x_func_cmd cmd = params->cmd; + + /* Forget all pending for completion commands if a driver only state + * transition has been requested. + */ + if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { + o->pending = 0; + o->next_state = BNX2X_F_STATE_MAX; + } + + /* Don't allow a next state transition if we are in the middle of + * the previous one. + */ + if (o->pending) + return -EBUSY; + + switch (state) { + case BNX2X_F_STATE_RESET: + if (cmd == BNX2X_F_CMD_HW_INIT) + next_state = BNX2X_F_STATE_INITIALIZED; + + break; + case BNX2X_F_STATE_INITIALIZED: + if (cmd == BNX2X_F_CMD_START) + next_state = BNX2X_F_STATE_STARTED; + + else if (cmd == BNX2X_F_CMD_HW_RESET) + next_state = BNX2X_F_STATE_RESET; + + break; + case BNX2X_F_STATE_STARTED: + if (cmd == BNX2X_F_CMD_STOP) + next_state = BNX2X_F_STATE_INITIALIZED; + /* afex ramrods can be sent only in started mode, and only + * if not pending for function_stop ramrod completion + * for these events - next state remained STARTED. + */ + else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) && + (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) + next_state = BNX2X_F_STATE_STARTED; + + else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) && + (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) + next_state = BNX2X_F_STATE_STARTED; + + /* Switch_update ramrod can be sent in either started or + * tx_stopped state, and it doesn't change the state. + */ + else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) && + (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) + next_state = BNX2X_F_STATE_STARTED; + + else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) && + (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) + next_state = BNX2X_F_STATE_STARTED; + + else if (cmd == BNX2X_F_CMD_TX_STOP) + next_state = BNX2X_F_STATE_TX_STOPPED; + + break; + case BNX2X_F_STATE_TX_STOPPED: + if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) && + (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) + next_state = BNX2X_F_STATE_TX_STOPPED; + + else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) && + (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) + next_state = BNX2X_F_STATE_TX_STOPPED; + + else if (cmd == BNX2X_F_CMD_TX_START) + next_state = BNX2X_F_STATE_STARTED; + + break; + default: + BNX2X_ERR("Unknown state: %d\n", state); + } + + /* Transition is assured */ + if (next_state != BNX2X_F_STATE_MAX) { + DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n", + state, cmd, next_state); + o->next_state = next_state; + return 0; + } + + DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n", + state, cmd); + + return -EINVAL; +} + +/** + * bnx2x_func_init_func - performs HW init at function stage + * + * @bp: device handle + * @drv: + * + * Init HW when the current phase is + * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only + * HW blocks. + */ +static inline int bnx2x_func_init_func(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + return drv->init_hw_func(bp); +} + +/** + * bnx2x_func_init_port - performs HW init at port stage + * + * @bp: device handle + * @drv: + * + * Init HW when the current phase is + * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and + * FUNCTION-only HW blocks. + * + */ +static inline int bnx2x_func_init_port(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + int rc = drv->init_hw_port(bp); + if (rc) + return rc; + + return bnx2x_func_init_func(bp, drv); +} + +/** + * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage + * + * @bp: device handle + * @drv: + * + * Init HW when the current phase is + * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP, + * PORT-only and FUNCTION-only HW blocks. + */ +static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + int rc = drv->init_hw_cmn_chip(bp); + if (rc) + return rc; + + return bnx2x_func_init_port(bp, drv); +} + +/** + * bnx2x_func_init_cmn - performs HW init at common stage + * + * @bp: device handle + * @drv: + * + * Init HW when the current phase is + * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON, + * PORT-only and FUNCTION-only HW blocks. + */ +static inline int bnx2x_func_init_cmn(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + int rc = drv->init_hw_cmn(bp); + if (rc) + return rc; + + return bnx2x_func_init_port(bp, drv); +} + +static int bnx2x_func_hw_init(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + u32 load_code = params->params.hw_init.load_phase; + struct bnx2x_func_sp_obj *o = params->f_obj; + const struct bnx2x_func_sp_drv_ops *drv = o->drv; + int rc = 0; + + DP(BNX2X_MSG_SP, "function %d load_code %x\n", + BP_ABS_FUNC(bp), load_code); + + /* Prepare buffers for unzipping the FW */ + rc = drv->gunzip_init(bp); + if (rc) + return rc; + + /* Prepare FW */ + rc = drv->init_fw(bp); + if (rc) { + BNX2X_ERR("Error loading firmware\n"); + goto init_err; + } + + /* Handle the beginning of COMMON_XXX pases separately... */ + switch (load_code) { + case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: + rc = bnx2x_func_init_cmn_chip(bp, drv); + if (rc) + goto init_err; + + break; + case FW_MSG_CODE_DRV_LOAD_COMMON: + rc = bnx2x_func_init_cmn(bp, drv); + if (rc) + goto init_err; + + break; + case FW_MSG_CODE_DRV_LOAD_PORT: + rc = bnx2x_func_init_port(bp, drv); + if (rc) + goto init_err; + + break; + case FW_MSG_CODE_DRV_LOAD_FUNCTION: + rc = bnx2x_func_init_func(bp, drv); + if (rc) + goto init_err; + + break; + default: + BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); + rc = -EINVAL; + } + +init_err: + drv->gunzip_end(bp); + + /* In case of success, complete the command immediately: no ramrods + * have been sent. + */ + if (!rc) + o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT); + + return rc; +} + +/** + * bnx2x_func_reset_func - reset HW at function stage + * + * @bp: device handle + * @drv: + * + * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only + * FUNCTION-only HW blocks. + */ +static inline void bnx2x_func_reset_func(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + drv->reset_hw_func(bp); +} + +/** + * bnx2x_func_reset_port - reset HW at port stage + * + * @bp: device handle + * @drv: + * + * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset + * FUNCTION-only and PORT-only HW blocks. + * + * !!!IMPORTANT!!! + * + * It's important to call reset_port before reset_func() as the last thing + * reset_func does is pf_disable() thus disabling PGLUE_B, which + * makes impossible any DMAE transactions. + */ +static inline void bnx2x_func_reset_port(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + drv->reset_hw_port(bp); + bnx2x_func_reset_func(bp, drv); +} + +/** + * bnx2x_func_reset_cmn - reset HW at common stage + * + * @bp: device handle + * @drv: + * + * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and + * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON, + * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks. + */ +static inline void bnx2x_func_reset_cmn(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + bnx2x_func_reset_port(bp, drv); + drv->reset_hw_cmn(bp); +} + +static inline int bnx2x_func_hw_reset(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + u32 reset_phase = params->params.hw_reset.reset_phase; + struct bnx2x_func_sp_obj *o = params->f_obj; + const struct bnx2x_func_sp_drv_ops *drv = o->drv; + + DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp), + reset_phase); + + switch (reset_phase) { + case FW_MSG_CODE_DRV_UNLOAD_COMMON: + bnx2x_func_reset_cmn(bp, drv); + break; + case FW_MSG_CODE_DRV_UNLOAD_PORT: + bnx2x_func_reset_port(bp, drv); + break; + case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: + bnx2x_func_reset_func(bp, drv); + break; + default: + BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n", + reset_phase); + break; + } + + /* Complete the command immediately: no ramrods have been sent. */ + o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); + + return 0; +} + +static inline int bnx2x_func_send_start(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + struct function_start_data *rdata = + (struct function_start_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + struct bnx2x_func_start_params *start_params = ¶ms->params.start; + + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->function_mode = (u8)start_params->mf_mode; + rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); + rdata->path_id = BP_PATH(bp); + rdata->network_cos_mode = start_params->network_cos_mode; + rdata->tunnel_mode = start_params->tunnel_mode; + rdata->gre_tunnel_type = start_params->gre_tunnel_type; + rdata->inner_gre_rss_en = start_params->inner_gre_rss_en; + rdata->vxlan_dst_port = cpu_to_le16(4789); + rdata->sd_accept_mf_clss_fail = start_params->class_fail; + if (start_params->class_fail_ethtype) { + rdata->sd_accept_mf_clss_fail_match_ethtype = 1; + rdata->sd_accept_mf_clss_fail_ethtype = + cpu_to_le16(start_params->class_fail_ethtype); + } + + rdata->sd_vlan_force_pri_flg = start_params->sd_vlan_force_pri; + rdata->sd_vlan_force_pri_val = start_params->sd_vlan_force_pri_val; + if (start_params->sd_vlan_eth_type) + rdata->sd_vlan_eth_type = + cpu_to_le16(start_params->sd_vlan_eth_type); + else + rdata->sd_vlan_eth_type = + cpu_to_le16(0x8100); + + rdata->no_added_tags = start_params->no_added_tags; + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, + U64_HI(data_mapping), + U64_LO(data_mapping), NONE_CONNECTION_TYPE); +} + +static inline int bnx2x_func_send_switch_update(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + struct function_update_data *rdata = + (struct function_update_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + struct bnx2x_func_switch_update_params *switch_update_params = + ¶ms->params.switch_update; + + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + if (test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG, + &switch_update_params->changes)) { + rdata->tx_switch_suspend_change_flg = 1; + rdata->tx_switch_suspend = + test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND, + &switch_update_params->changes); + } + + if (test_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG, + &switch_update_params->changes)) { + rdata->sd_vlan_tag_change_flg = 1; + rdata->sd_vlan_tag = + cpu_to_le16(switch_update_params->vlan); + } + + if (test_bit(BNX2X_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG, + &switch_update_params->changes)) { + rdata->sd_vlan_eth_type_change_flg = 1; + rdata->sd_vlan_eth_type = + cpu_to_le16(switch_update_params->vlan_eth_type); + } + + if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG, + &switch_update_params->changes)) { + rdata->sd_vlan_force_pri_change_flg = 1; + if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG, + &switch_update_params->changes)) + rdata->sd_vlan_force_pri_flg = 1; + rdata->sd_vlan_force_pri_flg = + switch_update_params->vlan_force_prio; + } + + if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, + &switch_update_params->changes)) { + rdata->update_tunn_cfg_flg = 1; + if (test_bit(BNX2X_F_UPDATE_TUNNEL_CLSS_EN, + &switch_update_params->changes)) + rdata->tunn_clss_en = 1; + if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN, + &switch_update_params->changes)) + rdata->inner_gre_rss_en = 1; + rdata->tunnel_mode = switch_update_params->tunnel_mode; + rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type; + rdata->vxlan_dst_port = cpu_to_le16(4789); + } + + rdata->echo = SWITCH_UPDATE; + + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, + U64_HI(data_mapping), + U64_LO(data_mapping), NONE_CONNECTION_TYPE); +} + +static inline int bnx2x_func_send_afex_update(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + struct function_update_data *rdata = + (struct function_update_data *)o->afex_rdata; + dma_addr_t data_mapping = o->afex_rdata_mapping; + struct bnx2x_func_afex_update_params *afex_update_params = + ¶ms->params.afex_update; + + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->vif_id_change_flg = 1; + rdata->vif_id = cpu_to_le16(afex_update_params->vif_id); + rdata->afex_default_vlan_change_flg = 1; + rdata->afex_default_vlan = + cpu_to_le16(afex_update_params->afex_default_vlan); + rdata->allowed_priorities_change_flg = 1; + rdata->allowed_priorities = afex_update_params->allowed_priorities; + rdata->echo = AFEX_UPDATE; + + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ + DP(BNX2X_MSG_SP, + "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n", + rdata->vif_id, + rdata->afex_default_vlan, rdata->allowed_priorities); + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, + U64_HI(data_mapping), + U64_LO(data_mapping), NONE_CONNECTION_TYPE); +} + +static +inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + struct afex_vif_list_ramrod_data *rdata = + (struct afex_vif_list_ramrod_data *)o->afex_rdata; + struct bnx2x_func_afex_viflists_params *afex_vif_params = + ¶ms->params.afex_viflists; + u64 *p_rdata = (u64 *)rdata; + + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index); + rdata->func_bit_map = afex_vif_params->func_bit_map; + rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command; + rdata->func_to_clear = afex_vif_params->func_to_clear; + + /* send in echo type of sub command */ + rdata->echo = afex_vif_params->afex_vif_list_command; + + /* No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n", + rdata->afex_vif_list_command, rdata->vif_list_index, + rdata->func_bit_map, rdata->func_to_clear); + + /* this ramrod sends data directly and not through DMA mapping */ + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0, + U64_HI(*p_rdata), U64_LO(*p_rdata), + NONE_CONNECTION_TYPE); +} + +static inline int bnx2x_func_send_stop(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, + NONE_CONNECTION_TYPE); +} + +static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0, + NONE_CONNECTION_TYPE); +} +static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + struct flow_control_configuration *rdata = + (struct flow_control_configuration *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + struct bnx2x_func_tx_start_params *tx_start_params = + ¶ms->params.tx_start; + int i; + + memset(rdata, 0, sizeof(*rdata)); + + rdata->dcb_enabled = tx_start_params->dcb_enabled; + rdata->dcb_version = tx_start_params->dcb_version; + rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en; + + for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++) + rdata->traffic_type_to_priority_cos[i] = + tx_start_params->traffic_type_to_priority_cos[i]; + + /* No need for an explicit memory barrier here as long as we + * ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read. If the memory read is removed we will have to put a + * full memory barrier there (inside bnx2x_sp_post()). + */ + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, + U64_HI(data_mapping), + U64_LO(data_mapping), NONE_CONNECTION_TYPE); +} + +static inline +int bnx2x_func_send_set_timesync(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + struct set_timesync_ramrod_data *rdata = + (struct set_timesync_ramrod_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + struct bnx2x_func_set_timesync_params *set_timesync_params = + ¶ms->params.set_timesync; + + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->drift_adjust_cmd = set_timesync_params->drift_adjust_cmd; + rdata->offset_cmd = set_timesync_params->offset_cmd; + rdata->add_sub_drift_adjust_value = + set_timesync_params->add_sub_drift_adjust_value; + rdata->drift_adjust_value = set_timesync_params->drift_adjust_value; + rdata->drift_adjust_period = set_timesync_params->drift_adjust_period; + rdata->offset_delta.lo = + cpu_to_le32(U64_LO(set_timesync_params->offset_delta)); + rdata->offset_delta.hi = + cpu_to_le32(U64_HI(set_timesync_params->offset_delta)); + + DP(BNX2X_MSG_SP, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n", + rdata->drift_adjust_cmd, rdata->offset_cmd, + rdata->add_sub_drift_adjust_value, rdata->drift_adjust_value, + rdata->drift_adjust_period, rdata->offset_delta.lo, + rdata->offset_delta.hi); + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0, + U64_HI(data_mapping), + U64_LO(data_mapping), NONE_CONNECTION_TYPE); +} + +static int bnx2x_func_send_cmd(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + switch (params->cmd) { + case BNX2X_F_CMD_HW_INIT: + return bnx2x_func_hw_init(bp, params); + case BNX2X_F_CMD_START: + return bnx2x_func_send_start(bp, params); + case BNX2X_F_CMD_STOP: + return bnx2x_func_send_stop(bp, params); + case BNX2X_F_CMD_HW_RESET: + return bnx2x_func_hw_reset(bp, params); + case BNX2X_F_CMD_AFEX_UPDATE: + return bnx2x_func_send_afex_update(bp, params); + case BNX2X_F_CMD_AFEX_VIFLISTS: + return bnx2x_func_send_afex_viflists(bp, params); + case BNX2X_F_CMD_TX_STOP: + return bnx2x_func_send_tx_stop(bp, params); + case BNX2X_F_CMD_TX_START: + return bnx2x_func_send_tx_start(bp, params); + case BNX2X_F_CMD_SWITCH_UPDATE: + return bnx2x_func_send_switch_update(bp, params); + case BNX2X_F_CMD_SET_TIMESYNC: + return bnx2x_func_send_set_timesync(bp, params); + default: + BNX2X_ERR("Unknown command: %d\n", params->cmd); + return -EINVAL; + } +} + +void bnx2x_init_func_obj(struct bnx2x *bp, + struct bnx2x_func_sp_obj *obj, + void *rdata, dma_addr_t rdata_mapping, + void *afex_rdata, dma_addr_t afex_rdata_mapping, + struct bnx2x_func_sp_drv_ops *drv_iface) +{ + memset(obj, 0, sizeof(*obj)); + + mutex_init(&obj->one_pending_mutex); + + obj->rdata = rdata; + obj->rdata_mapping = rdata_mapping; + obj->afex_rdata = afex_rdata; + obj->afex_rdata_mapping = afex_rdata_mapping; + obj->send_cmd = bnx2x_func_send_cmd; + obj->check_transition = bnx2x_func_chk_transition; + obj->complete_cmd = bnx2x_func_comp_cmd; + obj->wait_comp = bnx2x_func_wait_comp; + + obj->drv = drv_iface; +} + +/** + * bnx2x_func_state_change - perform Function state change transition + * + * @bp: device handle + * @params: parameters to perform the transaction + * + * returns 0 in case of successfully completed transition, + * negative error code in case of failure, positive + * (EBUSY) value if there is a completion to that is + * still pending (possible only if RAMROD_COMP_WAIT is + * not set in params->ramrod_flags for asynchronous + * commands). + */ +int bnx2x_func_state_change(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + int rc, cnt = 300; + enum bnx2x_func_cmd cmd = params->cmd; + unsigned long *pending = &o->pending; + + mutex_lock(&o->one_pending_mutex); + + /* Check that the requested transition is legal */ + rc = o->check_transition(bp, o, params); + if ((rc == -EBUSY) && + (test_bit(RAMROD_RETRY, ¶ms->ramrod_flags))) { + while ((rc == -EBUSY) && (--cnt > 0)) { + mutex_unlock(&o->one_pending_mutex); + msleep(10); + mutex_lock(&o->one_pending_mutex); + rc = o->check_transition(bp, o, params); + } + if (rc == -EBUSY) { + mutex_unlock(&o->one_pending_mutex); + BNX2X_ERR("timeout waiting for previous ramrod completion\n"); + return rc; + } + } else if (rc) { + mutex_unlock(&o->one_pending_mutex); + return rc; + } + + /* Set "pending" bit */ + set_bit(cmd, pending); + + /* Don't send a command if only driver cleanup was requested */ + if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { + bnx2x_func_state_change_comp(bp, o, cmd); + mutex_unlock(&o->one_pending_mutex); + } else { + /* Send a ramrod */ + rc = o->send_cmd(bp, params); + + mutex_unlock(&o->one_pending_mutex); + + if (rc) { + o->next_state = BNX2X_F_STATE_MAX; + clear_bit(cmd, pending); + smp_mb__after_atomic(); + return rc; + } + + if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { + rc = o->wait_comp(bp, o, cmd); + if (rc) + return rc; + + return 0; + } + } + + return !!test_bit(cmd, pending); +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h new file mode 100644 index 000000000..86baecb7c --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -0,0 +1,1496 @@ +/* bnx2x_sp.h: Broadcom Everest network driver. + * + * Copyright (c) 2011-2013 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Ariel Elior + * Written by: Vladislav Zolotarov + * + */ +#ifndef BNX2X_SP_VERBS +#define BNX2X_SP_VERBS + +struct bnx2x; +struct eth_context; + +/* Bits representing general command's configuration */ +enum { + RAMROD_TX, + RAMROD_RX, + /* Wait until all pending commands complete */ + RAMROD_COMP_WAIT, + /* Don't send a ramrod, only update a registry */ + RAMROD_DRV_CLR_ONLY, + /* Configure HW according to the current object state */ + RAMROD_RESTORE, + /* Execute the next command now */ + RAMROD_EXEC, + /* Don't add a new command and continue execution of postponed + * commands. If not set a new command will be added to the + * pending commands list. + */ + RAMROD_CONT, + /* If there is another pending ramrod, wait until it finishes and + * re-try to submit this one. This flag can be set only in sleepable + * context, and should not be set from the context that completes the + * ramrods as deadlock will occur. + */ + RAMROD_RETRY, +}; + +typedef enum { + BNX2X_OBJ_TYPE_RX, + BNX2X_OBJ_TYPE_TX, + BNX2X_OBJ_TYPE_RX_TX, +} bnx2x_obj_type; + +/* Public slow path states */ +enum { + BNX2X_FILTER_MAC_PENDING, + BNX2X_FILTER_VLAN_PENDING, + BNX2X_FILTER_VLAN_MAC_PENDING, + BNX2X_FILTER_RX_MODE_PENDING, + BNX2X_FILTER_RX_MODE_SCHED, + BNX2X_FILTER_ISCSI_ETH_START_SCHED, + BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, + BNX2X_FILTER_FCOE_ETH_START_SCHED, + BNX2X_FILTER_FCOE_ETH_STOP_SCHED, + BNX2X_FILTER_MCAST_PENDING, + BNX2X_FILTER_MCAST_SCHED, + BNX2X_FILTER_RSS_CONF_PENDING, + BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, + BNX2X_AFEX_PENDING_VIFSET_MCP_ACK +}; + +struct bnx2x_raw_obj { + u8 func_id; + + /* Queue params */ + u8 cl_id; + u32 cid; + + /* Ramrod data buffer params */ + void *rdata; + dma_addr_t rdata_mapping; + + /* Ramrod state params */ + int state; /* "ramrod is pending" state bit */ + unsigned long *pstate; /* pointer to state buffer */ + + bnx2x_obj_type obj_type; + + int (*wait_comp)(struct bnx2x *bp, + struct bnx2x_raw_obj *o); + + bool (*check_pending)(struct bnx2x_raw_obj *o); + void (*clear_pending)(struct bnx2x_raw_obj *o); + void (*set_pending)(struct bnx2x_raw_obj *o); +}; + +/************************* VLAN-MAC commands related parameters ***************/ +struct bnx2x_mac_ramrod_data { + u8 mac[ETH_ALEN]; + u8 is_inner_mac; +}; + +struct bnx2x_vlan_ramrod_data { + u16 vlan; +}; + +struct bnx2x_vlan_mac_ramrod_data { + u8 mac[ETH_ALEN]; + u8 is_inner_mac; + u16 vlan; +}; + +union bnx2x_classification_ramrod_data { + struct bnx2x_mac_ramrod_data mac; + struct bnx2x_vlan_ramrod_data vlan; + struct bnx2x_vlan_mac_ramrod_data vlan_mac; +}; + +/* VLAN_MAC commands */ +enum bnx2x_vlan_mac_cmd { + BNX2X_VLAN_MAC_ADD, + BNX2X_VLAN_MAC_DEL, + BNX2X_VLAN_MAC_MOVE, +}; + +struct bnx2x_vlan_mac_data { + /* Requested command: BNX2X_VLAN_MAC_XX */ + enum bnx2x_vlan_mac_cmd cmd; + /* used to contain the data related vlan_mac_flags bits from + * ramrod parameters. + */ + unsigned long vlan_mac_flags; + + /* Needed for MOVE command */ + struct bnx2x_vlan_mac_obj *target_obj; + + union bnx2x_classification_ramrod_data u; +}; + +/*************************** Exe Queue obj ************************************/ +union bnx2x_exe_queue_cmd_data { + struct bnx2x_vlan_mac_data vlan_mac; + + struct { + /* TODO */ + } mcast; +}; + +struct bnx2x_exeq_elem { + struct list_head link; + + /* Length of this element in the exe_chunk. */ + int cmd_len; + + union bnx2x_exe_queue_cmd_data cmd_data; +}; + +union bnx2x_qable_obj; + +union bnx2x_exeq_comp_elem { + union event_ring_elem *elem; +}; + +struct bnx2x_exe_queue_obj; + +typedef int (*exe_q_validate)(struct bnx2x *bp, + union bnx2x_qable_obj *o, + struct bnx2x_exeq_elem *elem); + +typedef int (*exe_q_remove)(struct bnx2x *bp, + union bnx2x_qable_obj *o, + struct bnx2x_exeq_elem *elem); + +/* Return positive if entry was optimized, 0 - if not, negative + * in case of an error. + */ +typedef int (*exe_q_optimize)(struct bnx2x *bp, + union bnx2x_qable_obj *o, + struct bnx2x_exeq_elem *elem); +typedef int (*exe_q_execute)(struct bnx2x *bp, + union bnx2x_qable_obj *o, + struct list_head *exe_chunk, + unsigned long *ramrod_flags); +typedef struct bnx2x_exeq_elem * + (*exe_q_get)(struct bnx2x_exe_queue_obj *o, + struct bnx2x_exeq_elem *elem); + +struct bnx2x_exe_queue_obj { + /* Commands pending for an execution. */ + struct list_head exe_queue; + + /* Commands pending for an completion. */ + struct list_head pending_comp; + + spinlock_t lock; + + /* Maximum length of commands' list for one execution */ + int exe_chunk_len; + + union bnx2x_qable_obj *owner; + + /****** Virtual functions ******/ + /** + * Called before commands execution for commands that are really + * going to be executed (after 'optimize'). + * + * Must run under exe_queue->lock + */ + exe_q_validate validate; + + /** + * Called before removing pending commands, cleaning allocated + * resources (e.g., credits from validate) + */ + exe_q_remove remove; + + /** + * This will try to cancel the current pending commands list + * considering the new command. + * + * Returns the number of optimized commands or a negative error code + * + * Must run under exe_queue->lock + */ + exe_q_optimize optimize; + + /** + * Run the next commands chunk (owner specific). + */ + exe_q_execute execute; + + /** + * Return the exe_queue element containing the specific command + * if any. Otherwise return NULL. + */ + exe_q_get get; +}; +/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ +/* + * Element in the VLAN_MAC registry list having all currently configured + * rules. + */ +struct bnx2x_vlan_mac_registry_elem { + struct list_head link; + + /* Used to store the cam offset used for the mac/vlan/vlan-mac. + * Relevant for 57710 and 57711 only. VLANs and MACs share the + * same CAM for these chips. + */ + int cam_offset; + + /* Needed for DEL and RESTORE flows */ + unsigned long vlan_mac_flags; + + union bnx2x_classification_ramrod_data u; +}; + +/* Bits representing VLAN_MAC commands specific flags */ +enum { + BNX2X_UC_LIST_MAC, + BNX2X_ETH_MAC, + BNX2X_ISCSI_ETH_MAC, + BNX2X_NETQ_ETH_MAC, + BNX2X_DONT_CONSUME_CAM_CREDIT, + BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, +}; +/* When looking for matching filters, some flags are not interesting */ +#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \ + 1 << BNX2X_ETH_MAC | \ + 1 << BNX2X_ISCSI_ETH_MAC | \ + 1 << BNX2X_NETQ_ETH_MAC) +#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \ + ((flags) & BNX2X_VLAN_MAC_CMP_MASK) + +struct bnx2x_vlan_mac_ramrod_params { + /* Object to run the command from */ + struct bnx2x_vlan_mac_obj *vlan_mac_obj; + + /* General command flags: COMP_WAIT, etc. */ + unsigned long ramrod_flags; + + /* Command specific configuration request */ + struct bnx2x_vlan_mac_data user_req; +}; + +struct bnx2x_vlan_mac_obj { + struct bnx2x_raw_obj raw; + + /* Bookkeeping list: will prevent the addition of already existing + * entries. + */ + struct list_head head; + /* Implement a simple reader/writer lock on the head list. + * all these fields should only be accessed under the exe_queue lock + */ + u8 head_reader; /* Num. of readers accessing head list */ + bool head_exe_request; /* Pending execution request. */ + unsigned long saved_ramrod_flags; /* Ramrods of pending execution */ + + /* TODO: Add it's initialization in the init functions */ + struct bnx2x_exe_queue_obj exe_queue; + + /* MACs credit pool */ + struct bnx2x_credit_pool_obj *macs_pool; + + /* VLANs credit pool */ + struct bnx2x_credit_pool_obj *vlans_pool; + + /* RAMROD command to be used */ + int ramrod_cmd; + + /* copy first n elements onto preallocated buffer + * + * @param n number of elements to get + * @param buf buffer preallocated by caller into which elements + * will be copied. Note elements are 4-byte aligned + * so buffer size must be able to accommodate the + * aligned elements. + * + * @return number of copied bytes + */ + int (*get_n_elements)(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, int n, u8 *base, + u8 stride, u8 size); + + /** + * Checks if ADD-ramrod with the given params may be performed. + * + * @return zero if the element may be added + */ + + int (*check_add)(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data); + + /** + * Checks if DEL-ramrod with the given params may be performed. + * + * @return true if the element may be deleted + */ + struct bnx2x_vlan_mac_registry_elem * + (*check_del)(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data); + + /** + * Checks if DEL-ramrod with the given params may be performed. + * + * @return true if the element may be deleted + */ + bool (*check_move)(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *src_o, + struct bnx2x_vlan_mac_obj *dst_o, + union bnx2x_classification_ramrod_data *data); + + /** + * Update the relevant credit object(s) (consume/return + * correspondingly). + */ + bool (*get_credit)(struct bnx2x_vlan_mac_obj *o); + bool (*put_credit)(struct bnx2x_vlan_mac_obj *o); + bool (*get_cam_offset)(struct bnx2x_vlan_mac_obj *o, int *offset); + bool (*put_cam_offset)(struct bnx2x_vlan_mac_obj *o, int offset); + + /** + * Configures one rule in the ramrod data buffer. + */ + void (*set_one_rule)(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, int rule_idx, + int cam_offset); + + /** + * Delete all configured elements having the given + * vlan_mac_flags specification. Assumes no pending for + * execution commands. Will schedule all all currently + * configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags + * specification for deletion and will use the given + * ramrod_flags for the last DEL operation. + * + * @param bp + * @param o + * @param ramrod_flags RAMROD_XX flags + * + * @return 0 if the last operation has completed successfully + * and there are no more elements left, positive value + * if there are pending for completion commands, + * negative value in case of failure. + */ + int (*delete_all)(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + unsigned long *vlan_mac_flags, + unsigned long *ramrod_flags); + + /** + * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously + * configured elements list. + * + * @param bp + * @param p Command parameters (RAMROD_COMP_WAIT bit in + * ramrod_flags is only taken into an account) + * @param ppos a pointer to the cookie that should be given back in the + * next call to make function handle the next element. If + * *ppos is set to NULL it will restart the iterator. + * If returned *ppos == NULL this means that the last + * element has been handled. + * + * @return int + */ + int (*restore)(struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p, + struct bnx2x_vlan_mac_registry_elem **ppos); + + /** + * Should be called on a completion arrival. + * + * @param bp + * @param o + * @param cqe Completion element we are handling + * @param ramrod_flags if RAMROD_CONT is set the next bulk of + * pending commands will be executed. + * RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE + * may also be set if needed. + * + * @return 0 if there are neither pending nor waiting for + * completion commands. Positive value if there are + * pending for execution or for completion commands. + * Negative value in case of an error (including an + * error in the cqe). + */ + int (*complete)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, + union event_ring_elem *cqe, + unsigned long *ramrod_flags); + + /** + * Wait for completion of all commands. Don't schedule new ones, + * just wait. It assumes that the completion code will schedule + * for new commands. + */ + int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o); +}; + +enum { + BNX2X_LLH_CAM_ISCSI_ETH_LINE = 0, + BNX2X_LLH_CAM_ETH_LINE, + BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2 +}; + +/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ + +/* RX_MODE ramrod special flags: set in rx_mode_flags field in + * a bnx2x_rx_mode_ramrod_params. + */ +enum { + BNX2X_RX_MODE_FCOE_ETH, + BNX2X_RX_MODE_ISCSI_ETH, +}; + +enum { + BNX2X_ACCEPT_UNICAST, + BNX2X_ACCEPT_MULTICAST, + BNX2X_ACCEPT_ALL_UNICAST, + BNX2X_ACCEPT_ALL_MULTICAST, + BNX2X_ACCEPT_BROADCAST, + BNX2X_ACCEPT_UNMATCHED, + BNX2X_ACCEPT_ANY_VLAN +}; + +struct bnx2x_rx_mode_ramrod_params { + struct bnx2x_rx_mode_obj *rx_mode_obj; + unsigned long *pstate; + int state; + u8 cl_id; + u32 cid; + u8 func_id; + unsigned long ramrod_flags; + unsigned long rx_mode_flags; + + /* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to + * a tstorm_eth_mac_filter_config (e1x). + */ + void *rdata; + dma_addr_t rdata_mapping; + + /* Rx mode settings */ + unsigned long rx_accept_flags; + + /* internal switching settings */ + unsigned long tx_accept_flags; +}; + +struct bnx2x_rx_mode_obj { + int (*config_rx_mode)(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p); + + int (*wait_comp)(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p); +}; + +/********************** Set multicast group ***********************************/ + +struct bnx2x_mcast_list_elem { + struct list_head link; + u8 *mac; +}; + +union bnx2x_mcast_config_data { + u8 *mac; + u8 bin; /* used in a RESTORE flow */ +}; + +struct bnx2x_mcast_ramrod_params { + struct bnx2x_mcast_obj *mcast_obj; + + /* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */ + unsigned long ramrod_flags; + + struct list_head mcast_list; /* list of struct bnx2x_mcast_list_elem */ + /** TODO: + * - rename it to macs_num. + * - Add a new command type for handling pending commands + * (remove "zero semantics"). + * + * Length of mcast_list. If zero and ADD_CONT command - post + * pending commands. + */ + int mcast_list_len; +}; + +enum bnx2x_mcast_cmd { + BNX2X_MCAST_CMD_ADD, + BNX2X_MCAST_CMD_CONT, + BNX2X_MCAST_CMD_DEL, + BNX2X_MCAST_CMD_RESTORE, +}; + +struct bnx2x_mcast_obj { + struct bnx2x_raw_obj raw; + + union { + struct { + #define BNX2X_MCAST_BINS_NUM 256 + #define BNX2X_MCAST_VEC_SZ (BNX2X_MCAST_BINS_NUM / 64) + u64 vec[BNX2X_MCAST_VEC_SZ]; + + /** Number of BINs to clear. Should be updated + * immediately when a command arrives in order to + * properly create DEL commands. + */ + int num_bins_set; + } aprox_match; + + struct { + struct list_head macs; + int num_macs_set; + } exact_match; + } registry; + + /* Pending commands */ + struct list_head pending_cmds_head; + + /* A state that is set in raw.pstate, when there are pending commands */ + int sched_state; + + /* Maximal number of mcast MACs configured in one command */ + int max_cmd_len; + + /* Total number of currently pending MACs to configure: both + * in the pending commands list and in the current command. + */ + int total_pending_num; + + u8 engine_id; + + /** + * @param cmd command to execute (BNX2X_MCAST_CMD_X, see above) + */ + int (*config_mcast)(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd); + + /** + * Fills the ramrod data during the RESTORE flow. + * + * @param bp + * @param o + * @param start_idx Registry index to start from + * @param rdata_idx Index in the ramrod data to start from + * + * @return -1 if we handled the whole registry or index of the last + * handled registry element. + */ + int (*hdl_restore)(struct bnx2x *bp, struct bnx2x_mcast_obj *o, + int start_bin, int *rdata_idx); + + int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o, + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd); + + void (*set_one_rule)(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, int idx, + union bnx2x_mcast_config_data *cfg_data, + enum bnx2x_mcast_cmd cmd); + + /** Checks if there are more mcast MACs to be set or a previous + * command is still pending. + */ + bool (*check_pending)(struct bnx2x_mcast_obj *o); + + /** + * Set/Clear/Check SCHEDULED state of the object + */ + void (*set_sched)(struct bnx2x_mcast_obj *o); + void (*clear_sched)(struct bnx2x_mcast_obj *o); + bool (*check_sched)(struct bnx2x_mcast_obj *o); + + /* Wait until all pending commands complete */ + int (*wait_comp)(struct bnx2x *bp, struct bnx2x_mcast_obj *o); + + /** + * Handle the internal object counters needed for proper + * commands handling. Checks that the provided parameters are + * feasible. + */ + int (*validate)(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd); + + /** + * Restore the values of internal counters in case of a failure. + */ + void (*revert)(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int old_num_bins); + + int (*get_registry_size)(struct bnx2x_mcast_obj *o); + void (*set_registry_size)(struct bnx2x_mcast_obj *o, int n); +}; + +/*************************** Credit handling **********************************/ +struct bnx2x_credit_pool_obj { + + /* Current amount of credit in the pool */ + atomic_t credit; + + /* Maximum allowed credit. put() will check against it. */ + int pool_sz; + + /* Allocate a pool table statically. + * + * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272) + * + * The set bit in the table will mean that the entry is available. + */ +#define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64) + u64 pool_mirror[BNX2X_POOL_VEC_SIZE]; + + /* Base pool offset (initialized differently */ + int base_pool_offset; + + /** + * Get the next free pool entry. + * + * @return true if there was a free entry in the pool + */ + bool (*get_entry)(struct bnx2x_credit_pool_obj *o, int *entry); + + /** + * Return the entry back to the pool. + * + * @return true if entry is legal and has been successfully + * returned to the pool. + */ + bool (*put_entry)(struct bnx2x_credit_pool_obj *o, int entry); + + /** + * Get the requested amount of credit from the pool. + * + * @param cnt Amount of requested credit + * @return true if the operation is successful + */ + bool (*get)(struct bnx2x_credit_pool_obj *o, int cnt); + + /** + * Returns the credit to the pool. + * + * @param cnt Amount of credit to return + * @return true if the operation is successful + */ + bool (*put)(struct bnx2x_credit_pool_obj *o, int cnt); + + /** + * Reads the current amount of credit. + */ + int (*check)(struct bnx2x_credit_pool_obj *o); +}; + +/*************************** RSS configuration ********************************/ +enum { + /* RSS_MODE bits are mutually exclusive */ + BNX2X_RSS_MODE_DISABLED, + BNX2X_RSS_MODE_REGULAR, + + BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */ + + BNX2X_RSS_IPV4, + BNX2X_RSS_IPV4_TCP, + BNX2X_RSS_IPV4_UDP, + BNX2X_RSS_IPV6, + BNX2X_RSS_IPV6_TCP, + BNX2X_RSS_IPV6_UDP, + BNX2X_RSS_GRE_INNER_HDRS, +}; + +struct bnx2x_config_rss_params { + struct bnx2x_rss_config_obj *rss_obj; + + /* may have RAMROD_COMP_WAIT set only */ + unsigned long ramrod_flags; + + /* BNX2X_RSS_X bits */ + unsigned long rss_flags; + + /* Number hash bits to take into an account */ + u8 rss_result_mask; + + /* Indirection table */ + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + + /* RSS hash values */ + u32 rss_key[10]; + + /* valid only iff BNX2X_RSS_UPDATE_TOE is set */ + u16 toe_rss_bitmap; +}; + +struct bnx2x_rss_config_obj { + struct bnx2x_raw_obj raw; + + /* RSS engine to use */ + u8 engine_id; + + /* Last configured indirection table */ + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + + /* flags for enabling 4-tupple hash on UDP */ + u8 udp_rss_v4; + u8 udp_rss_v6; + + int (*config_rss)(struct bnx2x *bp, + struct bnx2x_config_rss_params *p); +}; + +/*********************** Queue state update ***********************************/ + +/* UPDATE command options */ +enum { + BNX2X_Q_UPDATE_IN_VLAN_REM, + BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, + BNX2X_Q_UPDATE_OUT_VLAN_REM, + BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG, + BNX2X_Q_UPDATE_ANTI_SPOOF, + BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, + BNX2X_Q_UPDATE_ACTIVATE, + BNX2X_Q_UPDATE_ACTIVATE_CHNG, + BNX2X_Q_UPDATE_DEF_VLAN_EN, + BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, + BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, + BNX2X_Q_UPDATE_SILENT_VLAN_REM, + BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, + BNX2X_Q_UPDATE_TX_SWITCHING, + BNX2X_Q_UPDATE_PTP_PKTS_CHNG, + BNX2X_Q_UPDATE_PTP_PKTS, +}; + +/* Allowed Queue states */ +enum bnx2x_q_state { + BNX2X_Q_STATE_RESET, + BNX2X_Q_STATE_INITIALIZED, + BNX2X_Q_STATE_ACTIVE, + BNX2X_Q_STATE_MULTI_COS, + BNX2X_Q_STATE_MCOS_TERMINATED, + BNX2X_Q_STATE_INACTIVE, + BNX2X_Q_STATE_STOPPED, + BNX2X_Q_STATE_TERMINATED, + BNX2X_Q_STATE_FLRED, + BNX2X_Q_STATE_MAX, +}; + +/* Allowed Queue states */ +enum bnx2x_q_logical_state { + BNX2X_Q_LOGICAL_STATE_ACTIVE, + BNX2X_Q_LOGICAL_STATE_STOPPED, +}; + +/* Allowed commands */ +enum bnx2x_queue_cmd { + BNX2X_Q_CMD_INIT, + BNX2X_Q_CMD_SETUP, + BNX2X_Q_CMD_SETUP_TX_ONLY, + BNX2X_Q_CMD_DEACTIVATE, + BNX2X_Q_CMD_ACTIVATE, + BNX2X_Q_CMD_UPDATE, + BNX2X_Q_CMD_UPDATE_TPA, + BNX2X_Q_CMD_HALT, + BNX2X_Q_CMD_CFC_DEL, + BNX2X_Q_CMD_TERMINATE, + BNX2X_Q_CMD_EMPTY, + BNX2X_Q_CMD_MAX, +}; + +/* queue SETUP + INIT flags */ +enum { + BNX2X_Q_FLG_TPA, + BNX2X_Q_FLG_TPA_IPV6, + BNX2X_Q_FLG_TPA_GRO, + BNX2X_Q_FLG_STATS, + BNX2X_Q_FLG_ZERO_STATS, + BNX2X_Q_FLG_ACTIVE, + BNX2X_Q_FLG_OV, + BNX2X_Q_FLG_VLAN, + BNX2X_Q_FLG_COS, + BNX2X_Q_FLG_HC, + BNX2X_Q_FLG_HC_EN, + BNX2X_Q_FLG_DHC, + BNX2X_Q_FLG_FCOE, + BNX2X_Q_FLG_LEADING_RSS, + BNX2X_Q_FLG_MCAST, + BNX2X_Q_FLG_DEF_VLAN, + BNX2X_Q_FLG_TX_SWITCH, + BNX2X_Q_FLG_TX_SEC, + BNX2X_Q_FLG_ANTI_SPOOF, + BNX2X_Q_FLG_SILENT_VLAN_REM, + BNX2X_Q_FLG_FORCE_DEFAULT_PRI, + BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN, + BNX2X_Q_FLG_PCSUM_ON_PKT, + BNX2X_Q_FLG_TUN_INC_INNER_IP_ID +}; + +/* Queue type options: queue type may be a combination of below. */ +enum bnx2x_q_type { + /** TODO: Consider moving both these flags into the init() + * ramrod params. + */ + BNX2X_Q_TYPE_HAS_RX, + BNX2X_Q_TYPE_HAS_TX, +}; + +#define BNX2X_PRIMARY_CID_INDEX 0 +#define BNX2X_MULTI_TX_COS_E1X 3 /* QM only */ +#define BNX2X_MULTI_TX_COS_E2_E3A0 2 +#define BNX2X_MULTI_TX_COS_E3B0 3 +#define BNX2X_MULTI_TX_COS 3 /* Maximum possible */ + +#define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN) +/* DMAE channel to be used by FW for timesync workaroun. A driver that sends + * timesync-related ramrods must not use this DMAE command ID. + */ +#define FW_DMAE_CMD_ID 6 + +struct bnx2x_queue_init_params { + struct { + unsigned long flags; + u16 hc_rate; + u8 fw_sb_id; + u8 sb_cq_index; + } tx; + + struct { + unsigned long flags; + u16 hc_rate; + u8 fw_sb_id; + u8 sb_cq_index; + } rx; + + /* CID context in the host memory */ + struct eth_context *cxts[BNX2X_MULTI_TX_COS]; + + /* maximum number of cos supported by hardware */ + u8 max_cos; +}; + +struct bnx2x_queue_terminate_params { + /* index within the tx_only cids of this queue object */ + u8 cid_index; +}; + +struct bnx2x_queue_cfc_del_params { + /* index within the tx_only cids of this queue object */ + u8 cid_index; +}; + +struct bnx2x_queue_update_params { + unsigned long update_flags; /* BNX2X_Q_UPDATE_XX bits */ + u16 def_vlan; + u16 silent_removal_value; + u16 silent_removal_mask; +/* index within the tx_only cids of this queue object */ + u8 cid_index; +}; + +struct bnx2x_queue_update_tpa_params { + dma_addr_t sge_map; + u8 update_ipv4; + u8 update_ipv6; + u8 max_tpa_queues; + u8 max_sges_pkt; + u8 complete_on_both_clients; + u8 dont_verify_thr; + u8 tpa_mode; + u8 _pad; + + u16 sge_buff_sz; + u16 max_agg_sz; + + u16 sge_pause_thr_low; + u16 sge_pause_thr_high; +}; + +struct rxq_pause_params { + u16 bd_th_lo; + u16 bd_th_hi; + u16 rcq_th_lo; + u16 rcq_th_hi; + u16 sge_th_lo; /* valid iff BNX2X_Q_FLG_TPA */ + u16 sge_th_hi; /* valid iff BNX2X_Q_FLG_TPA */ + u16 pri_map; +}; + +/* general */ +struct bnx2x_general_setup_params { + /* valid iff BNX2X_Q_FLG_STATS */ + u8 stat_id; + + u8 spcl_id; + u16 mtu; + u8 cos; + + u8 fp_hsi; +}; + +struct bnx2x_rxq_setup_params { + /* dma */ + dma_addr_t dscr_map; + dma_addr_t sge_map; + dma_addr_t rcq_map; + dma_addr_t rcq_np_map; + + u16 drop_flags; + u16 buf_sz; + u8 fw_sb_id; + u8 cl_qzone_id; + + /* valid iff BNX2X_Q_FLG_TPA */ + u16 tpa_agg_sz; + u16 sge_buf_sz; + u8 max_sges_pkt; + u8 max_tpa_queues; + u8 rss_engine_id; + + /* valid iff BNX2X_Q_FLG_MCAST */ + u8 mcast_engine_id; + + u8 cache_line_log; + + u8 sb_cq_index; + + /* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */ + u16 silent_removal_value; + u16 silent_removal_mask; +}; + +struct bnx2x_txq_setup_params { + /* dma */ + dma_addr_t dscr_map; + + u8 fw_sb_id; + u8 sb_cq_index; + u8 cos; /* valid iff BNX2X_Q_FLG_COS */ + u16 traffic_type; + /* equals to the leading rss client id, used for TX classification*/ + u8 tss_leading_cl_id; + + /* valid iff BNX2X_Q_FLG_DEF_VLAN */ + u16 default_vlan; +}; + +struct bnx2x_queue_setup_params { + struct bnx2x_general_setup_params gen_params; + struct bnx2x_txq_setup_params txq_params; + struct bnx2x_rxq_setup_params rxq_params; + struct rxq_pause_params pause_params; + unsigned long flags; +}; + +struct bnx2x_queue_setup_tx_only_params { + struct bnx2x_general_setup_params gen_params; + struct bnx2x_txq_setup_params txq_params; + unsigned long flags; + /* index within the tx_only cids of this queue object */ + u8 cid_index; +}; + +struct bnx2x_queue_state_params { + struct bnx2x_queue_sp_obj *q_obj; + + /* Current command */ + enum bnx2x_queue_cmd cmd; + + /* may have RAMROD_COMP_WAIT set only */ + unsigned long ramrod_flags; + + /* Params according to the current command */ + union { + struct bnx2x_queue_update_params update; + struct bnx2x_queue_update_tpa_params update_tpa; + struct bnx2x_queue_setup_params setup; + struct bnx2x_queue_init_params init; + struct bnx2x_queue_setup_tx_only_params tx_only; + struct bnx2x_queue_terminate_params terminate; + struct bnx2x_queue_cfc_del_params cfc_del; + } params; +}; + +struct bnx2x_viflist_params { + u8 echo_res; + u8 func_bit_map_res; +}; + +struct bnx2x_queue_sp_obj { + u32 cids[BNX2X_MULTI_TX_COS]; + u8 cl_id; + u8 func_id; + + /* number of traffic classes supported by queue. + * The primary connection of the queue supports the first traffic + * class. Any further traffic class is supported by a tx-only + * connection. + * + * Therefore max_cos is also a number of valid entries in the cids + * array. + */ + u8 max_cos; + u8 num_tx_only, next_tx_only; + + enum bnx2x_q_state state, next_state; + + /* bits from enum bnx2x_q_type */ + unsigned long type; + + /* BNX2X_Q_CMD_XX bits. This object implements "one + * pending" paradigm but for debug and tracing purposes it's + * more convenient to have different bits for different + * commands. + */ + unsigned long pending; + + /* Buffer to use as a ramrod data and its mapping */ + void *rdata; + dma_addr_t rdata_mapping; + + /** + * Performs one state change according to the given parameters. + * + * @return 0 in case of success and negative value otherwise. + */ + int (*send_cmd)(struct bnx2x *bp, + struct bnx2x_queue_state_params *params); + + /** + * Sets the pending bit according to the requested transition. + */ + int (*set_pending)(struct bnx2x_queue_sp_obj *o, + struct bnx2x_queue_state_params *params); + + /** + * Checks that the requested state transition is legal. + */ + int (*check_transition)(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + struct bnx2x_queue_state_params *params); + + /** + * Completes the pending command. + */ + int (*complete_cmd)(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + enum bnx2x_queue_cmd); + + int (*wait_comp)(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + enum bnx2x_queue_cmd cmd); +}; + +/********************** Function state update *********************************/ + +/* UPDATE command options */ +enum { + BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG, + BNX2X_F_UPDATE_TX_SWITCH_SUSPEND, + BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG, + BNX2X_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG, + BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG, + BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG, + BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, + BNX2X_F_UPDATE_TUNNEL_CLSS_EN, + BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN, +}; + +/* Allowed Function states */ +enum bnx2x_func_state { + BNX2X_F_STATE_RESET, + BNX2X_F_STATE_INITIALIZED, + BNX2X_F_STATE_STARTED, + BNX2X_F_STATE_TX_STOPPED, + BNX2X_F_STATE_MAX, +}; + +/* Allowed Function commands */ +enum bnx2x_func_cmd { + BNX2X_F_CMD_HW_INIT, + BNX2X_F_CMD_START, + BNX2X_F_CMD_STOP, + BNX2X_F_CMD_HW_RESET, + BNX2X_F_CMD_AFEX_UPDATE, + BNX2X_F_CMD_AFEX_VIFLISTS, + BNX2X_F_CMD_TX_STOP, + BNX2X_F_CMD_TX_START, + BNX2X_F_CMD_SWITCH_UPDATE, + BNX2X_F_CMD_SET_TIMESYNC, + BNX2X_F_CMD_MAX, +}; + +struct bnx2x_func_hw_init_params { + /* A load phase returned by MCP. + * + * May be: + * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP + * FW_MSG_CODE_DRV_LOAD_COMMON + * FW_MSG_CODE_DRV_LOAD_PORT + * FW_MSG_CODE_DRV_LOAD_FUNCTION + */ + u32 load_phase; +}; + +struct bnx2x_func_hw_reset_params { + /* A load phase returned by MCP. + * + * May be: + * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP + * FW_MSG_CODE_DRV_LOAD_COMMON + * FW_MSG_CODE_DRV_LOAD_PORT + * FW_MSG_CODE_DRV_LOAD_FUNCTION + */ + u32 reset_phase; +}; + +struct bnx2x_func_start_params { + /* Multi Function mode: + * - Single Function + * - Switch Dependent + * - Switch Independent + */ + u16 mf_mode; + + /* Switch Dependent mode outer VLAN tag */ + u16 sd_vlan_tag; + + /* Function cos mode */ + u8 network_cos_mode; + + /* TUNN_MODE_NONE/TUNN_MODE_VXLAN/TUNN_MODE_GRE */ + u8 tunnel_mode; + + /* tunneling classification enablement */ + u8 tunn_clss_en; + + /* NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */ + u8 gre_tunnel_type; + + /* Enables Inner GRE RSS on the function, depends on the client RSS + * capailities + */ + u8 inner_gre_rss_en; + + /* Allows accepting of packets failing MF classification, possibly + * only matching a given ethertype + */ + u8 class_fail; + u16 class_fail_ethtype; + + /* Override priority of output packets */ + u8 sd_vlan_force_pri; + u8 sd_vlan_force_pri_val; + + /* Replace vlan's ethertype */ + u16 sd_vlan_eth_type; + + /* Prevent inner vlans from being added by FW */ + u8 no_added_tags; +}; + +struct bnx2x_func_switch_update_params { + unsigned long changes; /* BNX2X_F_UPDATE_XX bits */ + u16 vlan; + u16 vlan_eth_type; + u8 vlan_force_prio; + u8 tunnel_mode; + u8 gre_tunnel_type; +}; + +struct bnx2x_func_afex_update_params { + u16 vif_id; + u16 afex_default_vlan; + u8 allowed_priorities; +}; + +struct bnx2x_func_afex_viflists_params { + u16 vif_list_index; + u8 func_bit_map; + u8 afex_vif_list_command; + u8 func_to_clear; +}; + +struct bnx2x_func_tx_start_params { + struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; + u8 dcb_enabled; + u8 dcb_version; + u8 dont_add_pri_0_en; +}; + +struct bnx2x_func_set_timesync_params { + /* Reset, set or keep the current drift value */ + u8 drift_adjust_cmd; + + /* Dec, inc or keep the current offset */ + u8 offset_cmd; + + /* Drift value direction */ + u8 add_sub_drift_adjust_value; + + /* Drift, period and offset values to be used according to the commands + * above. + */ + u8 drift_adjust_value; + u32 drift_adjust_period; + u64 offset_delta; +}; + +struct bnx2x_func_state_params { + struct bnx2x_func_sp_obj *f_obj; + + /* Current command */ + enum bnx2x_func_cmd cmd; + + /* may have RAMROD_COMP_WAIT set only */ + unsigned long ramrod_flags; + + /* Params according to the current command */ + union { + struct bnx2x_func_hw_init_params hw_init; + struct bnx2x_func_hw_reset_params hw_reset; + struct bnx2x_func_start_params start; + struct bnx2x_func_switch_update_params switch_update; + struct bnx2x_func_afex_update_params afex_update; + struct bnx2x_func_afex_viflists_params afex_viflists; + struct bnx2x_func_tx_start_params tx_start; + struct bnx2x_func_set_timesync_params set_timesync; + } params; +}; + +struct bnx2x_func_sp_drv_ops { + /* Init tool + runtime initialization: + * - Common Chip + * - Common (per Path) + * - Port + * - Function phases + */ + int (*init_hw_cmn_chip)(struct bnx2x *bp); + int (*init_hw_cmn)(struct bnx2x *bp); + int (*init_hw_port)(struct bnx2x *bp); + int (*init_hw_func)(struct bnx2x *bp); + + /* Reset Function HW: Common, Port, Function phases. */ + void (*reset_hw_cmn)(struct bnx2x *bp); + void (*reset_hw_port)(struct bnx2x *bp); + void (*reset_hw_func)(struct bnx2x *bp); + + /* Init/Free GUNZIP resources */ + int (*gunzip_init)(struct bnx2x *bp); + void (*gunzip_end)(struct bnx2x *bp); + + /* Prepare/Release FW resources */ + int (*init_fw)(struct bnx2x *bp); + void (*release_fw)(struct bnx2x *bp); +}; + +struct bnx2x_func_sp_obj { + enum bnx2x_func_state state, next_state; + + /* BNX2X_FUNC_CMD_XX bits. This object implements "one + * pending" paradigm but for debug and tracing purposes it's + * more convenient to have different bits for different + * commands. + */ + unsigned long pending; + + /* Buffer to use as a ramrod data and its mapping */ + void *rdata; + dma_addr_t rdata_mapping; + + /* Buffer to use as a afex ramrod data and its mapping. + * This can't be same rdata as above because afex ramrod requests + * can arrive to the object in parallel to other ramrod requests. + */ + void *afex_rdata; + dma_addr_t afex_rdata_mapping; + + /* this mutex validates that when pending flag is taken, the next + * ramrod to be sent will be the one set the pending bit + */ + struct mutex one_pending_mutex; + + /* Driver interface */ + struct bnx2x_func_sp_drv_ops *drv; + + /** + * Performs one state change according to the given parameters. + * + * @return 0 in case of success and negative value otherwise. + */ + int (*send_cmd)(struct bnx2x *bp, + struct bnx2x_func_state_params *params); + + /** + * Checks that the requested state transition is legal. + */ + int (*check_transition)(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o, + struct bnx2x_func_state_params *params); + + /** + * Completes the pending command. + */ + int (*complete_cmd)(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o, + enum bnx2x_func_cmd cmd); + + int (*wait_comp)(struct bnx2x *bp, struct bnx2x_func_sp_obj *o, + enum bnx2x_func_cmd cmd); +}; + +/********************** Interfaces ********************************************/ +/* Queueable objects set */ +union bnx2x_qable_obj { + struct bnx2x_vlan_mac_obj vlan_mac; +}; +/************** Function state update *********/ +void bnx2x_init_func_obj(struct bnx2x *bp, + struct bnx2x_func_sp_obj *obj, + void *rdata, dma_addr_t rdata_mapping, + void *afex_rdata, dma_addr_t afex_rdata_mapping, + struct bnx2x_func_sp_drv_ops *drv_iface); + +int bnx2x_func_state_change(struct bnx2x *bp, + struct bnx2x_func_state_params *params); + +enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o); +/******************* Queue State **************/ +void bnx2x_init_queue_obj(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj, u8 cl_id, u32 *cids, + u8 cid_cnt, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, unsigned long type); + +int bnx2x_queue_state_change(struct bnx2x *bp, + struct bnx2x_queue_state_params *params); + +int bnx2x_get_q_logical_state(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj); + +/********************* VLAN-MAC ****************/ +void bnx2x_init_mac_obj(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *mac_obj, + u8 cl_id, u32 cid, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *macs_pool); + +void bnx2x_init_vlan_obj(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *vlan_obj, + u8 cl_id, u32 cid, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *vlans_pool); + +int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o); +void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o); +int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o); +int bnx2x_config_vlan_mac(struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p); + +int bnx2x_vlan_mac_move(struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p, + struct bnx2x_vlan_mac_obj *dest_o); + +/********************* RX MODE ****************/ + +void bnx2x_init_rx_mode_obj(struct bnx2x *bp, + struct bnx2x_rx_mode_obj *o); + +/** + * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters. + * + * @p: Command parameters + * + * Return: 0 - if operation was successful and there is no pending completions, + * positive number - if there are pending completions, + * negative - if there were errors + */ +int bnx2x_config_rx_mode(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p); + +/****************** MULTICASTS ****************/ + +void bnx2x_init_mcast_obj(struct bnx2x *bp, + struct bnx2x_mcast_obj *mcast_obj, + u8 mcast_cl_id, u32 mcast_cid, u8 func_id, + u8 engine_id, void *rdata, dma_addr_t rdata_mapping, + int state, unsigned long *pstate, + bnx2x_obj_type type); + +/** + * bnx2x_config_mcast - Configure multicast MACs list. + * + * @cmd: command to execute: BNX2X_MCAST_CMD_X + * + * May configure a new list + * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up + * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current + * configuration, continue to execute the pending commands + * (BNX2X_MCAST_CMD_CONT). + * + * If previous command is still pending or if number of MACs to + * configure is more that maximum number of MACs in one command, + * the current command will be enqueued to the tail of the + * pending commands list. + * + * Return: 0 is operation was successful and there are no pending completions, + * negative if there were errors, positive if there are pending + * completions. + */ +int bnx2x_config_mcast(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + enum bnx2x_mcast_cmd cmd); + +/****************** CREDIT POOL ****************/ +void bnx2x_init_mac_credit_pool(struct bnx2x *bp, + struct bnx2x_credit_pool_obj *p, u8 func_id, + u8 func_num); +void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, + struct bnx2x_credit_pool_obj *p, u8 func_id, + u8 func_num); + +/****************** RSS CONFIGURATION ****************/ +void bnx2x_init_rss_config_obj(struct bnx2x *bp, + struct bnx2x_rss_config_obj *rss_obj, + u8 cl_id, u32 cid, u8 func_id, u8 engine_id, + void *rdata, dma_addr_t rdata_mapping, + int state, unsigned long *pstate, + bnx2x_obj_type type); + +/** + * bnx2x_config_rss - Updates RSS configuration according to provided parameters + * + * Return: 0 in case of success + */ +int bnx2x_config_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *p); + +/** + * bnx2x_get_rss_ind_table - Return the current ind_table configuration. + * + * @ind_table: buffer to fill with the current indirection + * table content. Should be at least + * T_ETH_INDIRECTION_TABLE_SIZE bytes long. + */ +void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, + u8 *ind_table); + +#endif /* BNX2X_SP_VERBS */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c new file mode 100644 index 000000000..f67348d16 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -0,0 +1,3140 @@ +/* bnx2x_sriov.c: Broadcom Everest network driver. + * + * Copyright 2009-2013 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Ariel Elior + * Written by: Shmulik Ravid + * Ariel Elior + * + */ +#include "bnx2x.h" +#include "bnx2x_init.h" +#include "bnx2x_cmn.h" +#include "bnx2x_sp.h" +#include +#include + +static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx, + struct bnx2x_virtf **vf, + struct pf_vf_bulletin_content **bulletin, + bool test_queue); + +/* General service functions */ +static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, + u16 pf_id) +{ + REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); + REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); + REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); + REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), + pf_id); +} + +static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, + u8 enable) +{ + REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), + enable); + REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), + enable); + REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), + enable); + REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), + enable); +} + +int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) +{ + int idx; + + for_each_vf(bp, idx) + if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) + break; + return idx; +} + +static +struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) +{ + u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); + return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; +} + +static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, + u8 igu_sb_id, u8 segment, u16 index, u8 op, + u8 update) +{ + /* acking a VF sb through the PF - use the GRC */ + u32 ctl; + u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; + u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; + u32 func_encode = vf->abs_vfid; + u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; + struct igu_regular cmd_data = {0}; + + cmd_data.sb_id_and_flags = + ((index << IGU_REGULAR_SB_INDEX_SHIFT) | + (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | + (update << IGU_REGULAR_BUPDATE_SHIFT) | + (op << IGU_REGULAR_ENABLE_INT_SHIFT)); + + ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | + func_encode << IGU_CTRL_REG_FID_SHIFT | + IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; + + DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", + cmd_data.sb_id_and_flags, igu_addr_data); + REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); + mmiowb(); + barrier(); + + DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", + ctl, igu_addr_ctl); + REG_WR(bp, igu_addr_ctl, ctl); + mmiowb(); + barrier(); +} + +static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp, + struct bnx2x_virtf *vf, + bool print_err) +{ + if (!bnx2x_leading_vfq(vf, sp_initialized)) { + if (print_err) + BNX2X_ERR("Slowpath objects not yet initialized!\n"); + else + DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n"); + return false; + } + return true; +} + +/* VFOP operations states */ +void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_queue_init_params *init_params, + struct bnx2x_queue_setup_params *setup_params, + u16 q_idx, u16 sb_idx) +{ + DP(BNX2X_MSG_IOV, + "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", + vf->abs_vfid, + q_idx, + sb_idx, + init_params->tx.sb_cq_index, + init_params->tx.hc_rate, + setup_params->flags, + setup_params->txq_params.traffic_type); +} + +void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_queue_init_params *init_params, + struct bnx2x_queue_setup_params *setup_params, + u16 q_idx, u16 sb_idx) +{ + struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; + + DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" + "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", + vf->abs_vfid, + q_idx, + sb_idx, + init_params->rx.sb_cq_index, + init_params->rx.hc_rate, + setup_params->gen_params.mtu, + rxq_params->buf_sz, + rxq_params->sge_buf_sz, + rxq_params->max_sges_pkt, + rxq_params->tpa_agg_sz, + setup_params->flags, + rxq_params->drop_flags, + rxq_params->cache_line_log); +} + +void bnx2x_vfop_qctor_prep(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vf_queue *q, + struct bnx2x_vf_queue_construct_params *p, + unsigned long q_type) +{ + struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; + struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; + + /* INIT */ + + /* Enable host coalescing in the transition to INIT state */ + if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) + __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); + + if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) + __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); + + /* FW SB ID */ + init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); + init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); + + /* context */ + init_p->cxts[0] = q->cxt; + + /* SETUP */ + + /* Setup-op general parameters */ + setup_p->gen_params.spcl_id = vf->sp_cl_id; + setup_p->gen_params.stat_id = vfq_stat_id(vf, q); + setup_p->gen_params.fp_hsi = vf->fp_hsi; + + /* Setup-op pause params: + * Nothing to do, the pause thresholds are set by default to 0 which + * effectively turns off the feature for this queue. We don't want + * one queue (VF) to interfering with another queue (another VF) + */ + if (vf->cfg_flags & VF_CFG_FW_FC) + BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", + vf->abs_vfid); + /* Setup-op flags: + * collect statistics, zero statistics, local-switching, security, + * OV for Flex10, RSS and MCAST for leading + */ + if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) + __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); + + /* for VFs, enable tx switching, bd coherency, and mac address + * anti-spoofing + */ + __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); + __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); + __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); + + /* Setup-op rx parameters */ + if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { + struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; + + rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); + rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); + rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); + + if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) + rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; + } + + /* Setup-op tx parameters */ + if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { + setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; + setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); + } +} + +static int bnx2x_vf_queue_create(struct bnx2x *bp, + struct bnx2x_virtf *vf, int qid, + struct bnx2x_vf_queue_construct_params *qctor) +{ + struct bnx2x_queue_state_params *q_params; + int rc = 0; + + DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); + + /* Prepare ramrod information */ + q_params = &qctor->qstate; + q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj); + set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags); + + if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == + BNX2X_Q_LOGICAL_STATE_ACTIVE) { + DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n"); + goto out; + } + + /* Run Queue 'construction' ramrods */ + q_params->cmd = BNX2X_Q_CMD_INIT; + rc = bnx2x_queue_state_change(bp, q_params); + if (rc) + goto out; + + memcpy(&q_params->params.setup, &qctor->prep_qsetup, + sizeof(struct bnx2x_queue_setup_params)); + q_params->cmd = BNX2X_Q_CMD_SETUP; + rc = bnx2x_queue_state_change(bp, q_params); + if (rc) + goto out; + + /* enable interrupts */ + bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)), + USTORM_ID, 0, IGU_INT_ENABLE, 0); +out: + return rc; +} + +static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid) +{ + enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT, + BNX2X_Q_CMD_TERMINATE, + BNX2X_Q_CMD_CFC_DEL}; + struct bnx2x_queue_state_params q_params; + int rc, i; + + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + + /* Prepare ramrod information */ + memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params)); + q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj); + set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + + if (bnx2x_get_q_logical_state(bp, q_params.q_obj) == + BNX2X_Q_LOGICAL_STATE_STOPPED) { + DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n"); + goto out; + } + + /* Run Queue 'destruction' ramrods */ + for (i = 0; i < ARRAY_SIZE(cmds); i++) { + q_params.cmd = cmds[i]; + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]); + return rc; + } + } +out: + /* Clean Context */ + if (bnx2x_vfq(vf, qid, cxt)) { + bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0; + bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0; + } + + return 0; +} + +static void +bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) +{ + struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); + if (vf) { + /* the first igu entry belonging to VFs of this PF */ + if (!BP_VFDB(bp)->first_vf_igu_entry) + BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; + + /* the first igu entry belonging to this VF */ + if (!vf_sb_count(vf)) + vf->igu_base_id = igu_sb_id; + + ++vf_sb_count(vf); + ++vf->sb_count; + } + BP_VFDB(bp)->vf_sbs_pool++; +} + +static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *obj, + atomic_t *counter) +{ + struct list_head *pos; + int read_lock; + int cnt = 0; + + read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); + if (read_lock) + DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); + + list_for_each(pos, &obj->head) + cnt++; + + if (!read_lock) + bnx2x_vlan_mac_h_read_unlock(bp, obj); + + atomic_set(counter, cnt); +} + +static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid, bool drv_only, bool mac) +{ + struct bnx2x_vlan_mac_ramrod_params ramrod; + int rc; + + DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid, + mac ? "MACs" : "VLANs"); + + /* Prepare ramrod params */ + memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); + if (mac) { + set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); + } else { + set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); + } + ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL; + + set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); + if (drv_only) + set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); + else + set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); + + /* Start deleting */ + rc = ramrod.vlan_mac_obj->delete_all(bp, + ramrod.vlan_mac_obj, + &ramrod.user_req.vlan_mac_flags, + &ramrod.ramrod_flags); + if (rc) { + BNX2X_ERR("Failed to delete all %s\n", + mac ? "MACs" : "VLANs"); + return rc; + } + + /* Clear the vlan counters */ + if (!mac) + atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0); + + return 0; +} + +static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, + struct bnx2x_virtf *vf, int qid, + struct bnx2x_vf_mac_vlan_filter *filter, + bool drv_only) +{ + struct bnx2x_vlan_mac_ramrod_params ramrod; + int rc; + + DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n", + vf->abs_vfid, filter->add ? "Adding" : "Deleting", + filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN"); + + /* Prepare ramrod params */ + memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); + if (filter->type == BNX2X_VF_FILTER_VLAN) { + set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); + ramrod.user_req.u.vlan.vlan = filter->vid; + } else { + set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); + ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); + memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN); + } + ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD : + BNX2X_VLAN_MAC_DEL; + + /* Verify there are available vlan credits */ + if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && + (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= + vf_vlan_rules_cnt(vf))) { + BNX2X_ERR("No credits for vlan [%d >= %d]\n", + atomic_read(&bnx2x_vfq(vf, qid, vlan_count)), + vf_vlan_rules_cnt(vf)); + return -ENOMEM; + } + + set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); + if (drv_only) + set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); + else + set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); + + /* Add/Remove the filter */ + rc = bnx2x_config_vlan_mac(bp, &ramrod); + if (rc && rc != -EEXIST) { + BNX2X_ERR("Failed to %s %s\n", + filter->add ? "add" : "delete", + filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : + "VLAN"); + return rc; + } + + /* Update the vlan counters */ + if (filter->type == BNX2X_VF_FILTER_VLAN) + bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj, + &bnx2x_vfq(vf, qid, vlan_count)); + + return 0; +} + +int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mac_vlan_filters *filters, + int qid, bool drv_only) +{ + int rc = 0, i; + + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + + if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) + return -EINVAL; + + /* Prepare ramrod params */ + for (i = 0; i < filters->count; i++) { + rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, + &filters->filters[i], drv_only); + if (rc) + break; + } + + /* Rollback if needed */ + if (i != filters->count) { + BNX2X_ERR("Managed only %d/%d filters - rolling back\n", + i, filters->count + 1); + while (--i >= 0) { + filters->filters[i].add = !filters->filters[i].add; + bnx2x_vf_mac_vlan_config(bp, vf, qid, + &filters->filters[i], + drv_only); + } + } + + /* It's our responsibility to free the filters */ + kfree(filters); + + return rc; +} + +int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, + struct bnx2x_vf_queue_construct_params *qctor) +{ + int rc; + + DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); + + rc = bnx2x_vf_queue_create(bp, vf, qid, qctor); + if (rc) + goto op_err; + + /* Configure vlan0 for leading queue */ + if (!qid) { + struct bnx2x_vf_mac_vlan_filter filter; + + memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter)); + filter.type = BNX2X_VF_FILTER_VLAN; + filter.add = true; + filter.vid = 0; + rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false); + if (rc) + goto op_err; + } + + /* Schedule the configuration of any pending vlan filters */ + vf->cfg_flags |= VF_CFG_VLAN; + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, + BNX2X_MSG_IOV); + return 0; +op_err: + BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); + return rc; +} + +static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid) +{ + int rc; + + DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); + + /* If needed, clean the filtering data base */ + if ((qid == LEADING_IDX) && + bnx2x_validate_vf_sp_objs(bp, vf, false)) { + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false); + if (rc) + goto op_err; + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true); + if (rc) + goto op_err; + } + + /* Terminate queue */ + if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) { + struct bnx2x_queue_state_params qstate; + + memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); + qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); + qstate.q_obj->state = BNX2X_Q_STATE_STOPPED; + qstate.cmd = BNX2X_Q_CMD_TERMINATE; + set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); + rc = bnx2x_queue_state_change(bp, &qstate); + if (rc) + goto op_err; + } + + return 0; +op_err: + BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); + return rc; +} + +int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, + bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only) +{ + struct bnx2x_mcast_list_elem *mc = NULL; + struct bnx2x_mcast_ramrod_params mcast; + int rc, i; + + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + + /* Prepare Multicast command */ + memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params)); + mcast.mcast_obj = &vf->mcast_obj; + if (drv_only) + set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags); + else + set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags); + if (mc_num) { + mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem), + GFP_KERNEL); + if (!mc) { + BNX2X_ERR("Cannot Configure multicasts due to lack of memory\n"); + return -ENOMEM; + } + } + + /* clear existing mcasts */ + mcast.mcast_list_len = vf->mcast_list_len; + vf->mcast_list_len = mc_num; + rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); + if (rc) { + BNX2X_ERR("Failed to remove multicasts\n"); + kfree(mc); + return rc; + } + + /* update mcast list on the ramrod params */ + if (mc_num) { + INIT_LIST_HEAD(&mcast.mcast_list); + for (i = 0; i < mc_num; i++) { + mc[i].mac = mcasts[i]; + list_add_tail(&mc[i].link, + &mcast.mcast_list); + } + + /* add new mcasts */ + mcast.mcast_list_len = mc_num; + rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); + if (rc) + BNX2X_ERR("Faled to add multicasts\n"); + kfree(mc); + } + + return rc; +} + +static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, + struct bnx2x_rx_mode_ramrod_params *ramrod, + struct bnx2x_virtf *vf, + unsigned long accept_flags) +{ + struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); + + memset(ramrod, 0, sizeof(*ramrod)); + ramrod->cid = vfq->cid; + ramrod->cl_id = vfq_cl_id(vf, vfq); + ramrod->rx_mode_obj = &bp->rx_mode_obj; + ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); + ramrod->rx_accept_flags = accept_flags; + ramrod->tx_accept_flags = accept_flags; + ramrod->pstate = &vf->filter_state; + ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; + + set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); + set_bit(RAMROD_RX, &ramrod->ramrod_flags); + set_bit(RAMROD_TX, &ramrod->ramrod_flags); + + ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); + ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); +} + +int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid, unsigned long accept_flags) +{ + struct bnx2x_rx_mode_ramrod_params ramrod; + + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + + bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags); + set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); + vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags; + return bnx2x_config_rx_mode(bp, &ramrod); +} + +int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) +{ + int rc; + + DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); + + /* Remove all classification configuration for leading queue */ + if (qid == LEADING_IDX) { + rc = bnx2x_vf_rxmode(bp, vf, qid, 0); + if (rc) + goto op_err; + + /* Remove filtering if feasible */ + if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, + false, false); + if (rc) + goto op_err; + rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, + false, true); + if (rc) + goto op_err; + rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); + if (rc) + goto op_err; + } + } + + /* Destroy queue */ + rc = bnx2x_vf_queue_destroy(bp, vf, qid); + if (rc) + goto op_err; + return rc; +op_err: + BNX2X_ERR("vf[%d:%d] error: rc %d\n", + vf->abs_vfid, qid, rc); + return rc; +} + +/* VF enable primitives + * when pretend is required the caller is responsible + * for calling pretend prior to calling these routines + */ + +/* internal vf enable - until vf is enabled internally all transactions + * are blocked. This routine should always be called last with pretend. + */ +static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) +{ + REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); +} + +/* clears vf error in all semi blocks */ +static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) +{ + REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); + REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); + REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); + REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); +} + +static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) +{ + u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; + u32 was_err_reg = 0; + + switch (was_err_group) { + case 0: + was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; + break; + case 1: + was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; + break; + case 2: + was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; + break; + case 3: + was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; + break; + } + REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); +} + +static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + int i; + u32 val; + + /* Set VF masks and configuration - pretend */ + bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); + + REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); + REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); + REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); + REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); + REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); + REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); + + val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); + val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); + if (vf->cfg_flags & VF_CFG_INT_SIMD) + val |= IGU_VF_CONF_SINGLE_ISR_EN; + val &= ~IGU_VF_CONF_PARENT_MASK; + val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT; + REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); + + DP(BNX2X_MSG_IOV, + "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n", + vf->abs_vfid, val); + + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + + /* iterate over all queues, clear sb consumer */ + for (i = 0; i < vf_sb_count(vf); i++) { + u8 igu_sb_id = vf_igu_sb(vf, i); + + /* zero prod memory */ + REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); + + /* clear sb state machine */ + bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, + false /* VF */); + + /* disable + update */ + bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, + IGU_INT_DISABLE, 1); + } +} + +void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) +{ + /* set the VF-PF association in the FW */ + storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); + storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); + + /* clear vf errors*/ + bnx2x_vf_semi_clear_err(bp, abs_vfid); + bnx2x_vf_pglue_clear_err(bp, abs_vfid); + + /* internal vf-enable - pretend */ + bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); + DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); + bnx2x_vf_enable_internal(bp, true); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); +} + +static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + /* Reset vf in IGU interrupts are still disabled */ + bnx2x_vf_igu_reset(bp, vf); + + /* pretend to enable the vf with the PBF */ + bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); + REG_WR(bp, PBF_REG_DISABLE_VF, 0); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); +} + +static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) +{ + struct pci_dev *dev; + struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); + + if (!vf) + return false; + + dev = pci_get_bus_and_slot(vf->bus, vf->devfn); + if (dev) + return bnx2x_is_pcie_pending(dev); + return false; +} + +int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) +{ + /* Verify no pending pci transactions */ + if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) + BNX2X_ERR("PCIE Transactions still pending\n"); + + return 0; +} + +static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp, + struct bnx2x_virtf *vf, + int new) +{ + int num = vf_vlan_rules_cnt(vf); + int diff = new - num; + bool rc = true; + + DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n", + vf->abs_vfid, new, num); + + if (diff > 0) + rc = bp->vlans_pool.get(&bp->vlans_pool, diff); + else if (diff < 0) + rc = bp->vlans_pool.put(&bp->vlans_pool, -diff); + + if (rc) + vf_vlan_rules_cnt(vf) = new; + else + DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n", + vf->abs_vfid); +} + +/* must be called after the number of PF queues and the number of VFs are + * both known + */ +static void +bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + struct vf_pf_resc_request *resc = &vf->alloc_resc; + u16 vlan_count = 0; + + /* will be set only during VF-ACQUIRE */ + resc->num_rxqs = 0; + resc->num_txqs = 0; + + /* no credit calculations for macs (just yet) */ + resc->num_mac_filters = 1; + + /* divvy up vlan rules */ + bnx2x_iov_re_set_vlan_filters(bp, vf, 0); + vlan_count = bp->vlans_pool.check(&bp->vlans_pool); + vlan_count = 1 << ilog2(vlan_count); + bnx2x_iov_re_set_vlan_filters(bp, vf, + vlan_count / BNX2X_NR_VIRTFN(bp)); + + /* no real limitation */ + resc->num_mc_filters = 0; + + /* num_sbs already set */ + resc->num_sbs = vf->sb_count; +} + +/* FLR routines: */ +static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + /* reset the state variables */ + bnx2x_iov_static_resc(bp, vf); + vf->state = VF_FREE; +} + +static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); + + /* DQ usage counter */ + bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); + bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, + "DQ VF usage counter timed out", + poll_cnt); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + + /* FW cleanup command - poll for the results */ + if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), + poll_cnt)) + BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid); + + /* verify TX hw is flushed */ + bnx2x_tx_hw_flushed(bp, poll_cnt); +} + +static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + int rc, i; + + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + + /* the cleanup operations are valid if and only if the VF + * was first acquired. + */ + for (i = 0; i < vf_rxq_count(vf); i++) { + rc = bnx2x_vf_queue_flr(bp, vf, i); + if (rc) + goto out; + } + + /* remove multicasts */ + bnx2x_vf_mcast(bp, vf, NULL, 0, true); + + /* dispatch final cleanup and wait for HW queues to flush */ + bnx2x_vf_flr_clnup_hw(bp, vf); + + /* release VF resources */ + bnx2x_vf_free_resc(bp, vf); + + /* re-open the mailbox */ + bnx2x_vf_enable_mbx(bp, vf->abs_vfid); + return; +out: + BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n", + vf->abs_vfid, i, rc); +} + +static void bnx2x_vf_flr_clnup(struct bnx2x *bp) +{ + struct bnx2x_virtf *vf; + int i; + + for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) { + /* VF should be RESET & in FLR cleanup states */ + if (bnx2x_vf(bp, i, state) != VF_RESET || + !bnx2x_vf(bp, i, flr_clnup_stage)) + continue; + + DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", + i, BNX2X_NR_VIRTFN(bp)); + + vf = BP_VF(bp, i); + + /* lock the vf pf channel */ + bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); + + /* invoke the VF FLR SM */ + bnx2x_vf_flr(bp, vf); + + /* mark the VF to be ACKED and continue */ + vf->flr_clnup_stage = false; + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); + } + + /* Acknowledge the handled VFs. + * we are acknowledge all the vfs which an flr was requested for, even + * if amongst them there are such that we never opened, since the mcp + * will interrupt us immediately again if we only ack some of the bits, + * resulting in an endless loop. This can happen for example in KVM + * where an 'all ones' flr request is sometimes given by hyper visor + */ + DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n", + bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); + for (i = 0; i < FLRD_VFS_DWORDS; i++) + SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], + bp->vfdb->flrd_vfs[i]); + + bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); + + /* clear the acked bits - better yet if the MCP implemented + * write to clear semantics + */ + for (i = 0; i < FLRD_VFS_DWORDS; i++) + SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); +} + +void bnx2x_vf_handle_flr_event(struct bnx2x *bp) +{ + int i; + + /* Read FLR'd VFs */ + for (i = 0; i < FLRD_VFS_DWORDS; i++) + bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); + + DP(BNX2X_MSG_MCP, + "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n", + bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); + + for_each_vf(bp, i) { + struct bnx2x_virtf *vf = BP_VF(bp, i); + u32 reset = 0; + + if (vf->abs_vfid < 32) + reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); + else + reset = bp->vfdb->flrd_vfs[1] & + (1 << (vf->abs_vfid - 32)); + + if (reset) { + /* set as reset and ready for cleanup */ + vf->state = VF_RESET; + vf->flr_clnup_stage = true; + + DP(BNX2X_MSG_IOV, + "Initiating Final cleanup for VF %d\n", + vf->abs_vfid); + } + } + + /* do the FLR cleanup for all marked VFs*/ + bnx2x_vf_flr_clnup(bp); +} + +/* IOV global initialization routines */ +void bnx2x_iov_init_dq(struct bnx2x *bp) +{ + if (!IS_SRIOV(bp)) + return; + + /* Set the DQ such that the CID reflect the abs_vfid */ + REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); + REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); + + /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to + * the PF L2 queues + */ + REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); + + /* The VF window size is the log2 of the max number of CIDs per VF */ + REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); + + /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match + * the Pf doorbell size although the 2 are independent. + */ + REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); + + /* No security checks for now - + * configure single rule (out of 16) mask = 0x1, value = 0x0, + * CID range 0 - 0x1ffff + */ + REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); + REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); + REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); + REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); + + /* set the VF doorbell threshold. This threshold represents the amount + * of doorbells allowed in the main DORQ fifo for a specific VF. + */ + REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64); +} + +void bnx2x_iov_init_dmae(struct bnx2x *bp) +{ + if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) + REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); +} + +static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) +{ + struct pci_dev *dev = bp->pdev; + struct bnx2x_sriov *iov = &bp->vfdb->sriov; + + return dev->bus->number + ((dev->devfn + iov->offset + + iov->stride * vfid) >> 8); +} + +static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) +{ + struct pci_dev *dev = bp->pdev; + struct bnx2x_sriov *iov = &bp->vfdb->sriov; + + return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; +} + +static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + int i, n; + struct pci_dev *dev = bp->pdev; + struct bnx2x_sriov *iov = &bp->vfdb->sriov; + + for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { + u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); + u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); + + size /= iov->total; + vf->bars[n].bar = start + size * vf->abs_vfid; + vf->bars[n].size = size; + } +} + +static int bnx2x_ari_enabled(struct pci_dev *dev) +{ + return dev->bus->self && dev->bus->self->ari_enabled; +} + +static int +bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) +{ + int sb_id; + u32 val; + u8 fid, current_pf = 0; + + /* IGU in normal mode - read CAM */ + for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { + val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); + if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) + continue; + fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); + if (fid & IGU_FID_ENCODE_IS_PF) + current_pf = fid & IGU_FID_PF_NUM_MASK; + else if (current_pf == BP_FUNC(bp)) + bnx2x_vf_set_igu_info(bp, sb_id, + (fid & IGU_FID_VF_NUM_MASK)); + DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", + ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), + ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : + (fid & IGU_FID_VF_NUM_MASK)), sb_id, + GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); + } + DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); + return BP_VFDB(bp)->vf_sbs_pool; +} + +static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) +{ + if (bp->vfdb) { + kfree(bp->vfdb->vfqs); + kfree(bp->vfdb->vfs); + kfree(bp->vfdb); + } + bp->vfdb = NULL; +} + +static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) +{ + int pos; + struct pci_dev *dev = bp->pdev; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) { + BNX2X_ERR("failed to find SRIOV capability in device\n"); + return -ENODEV; + } + + iov->pos = pos; + DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); + pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); + pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); + pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); + pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); + pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); + pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); + pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); + pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); + + return 0; +} + +static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) +{ + u32 val; + + /* read the SRIOV capability structure + * The fields can be read via configuration read or + * directly from the device (starting at offset PCICFG_OFFSET) + */ + if (bnx2x_sriov_pci_cfg_info(bp, iov)) + return -ENODEV; + + /* get the number of SRIOV bars */ + iov->nres = 0; + + /* read the first_vfid */ + val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); + iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) + * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); + + DP(BNX2X_MSG_IOV, + "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", + BP_FUNC(bp), + iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, + iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); + + return 0; +} + +/* must be called after PF bars are mapped */ +int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, + int num_vfs_param) +{ + int err, i; + struct bnx2x_sriov *iov; + struct pci_dev *dev = bp->pdev; + + bp->vfdb = NULL; + + /* verify is pf */ + if (IS_VF(bp)) + return 0; + + /* verify sriov capability is present in configuration space */ + if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) + return 0; + + /* verify chip revision */ + if (CHIP_IS_E1x(bp)) + return 0; + + /* check if SRIOV support is turned off */ + if (!num_vfs_param) + return 0; + + /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ + if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { + BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", + BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); + return 0; + } + + /* SRIOV can be enabled only with MSIX */ + if (int_mode_param == BNX2X_INT_MODE_MSI || + int_mode_param == BNX2X_INT_MODE_INTX) { + BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); + return 0; + } + + err = -EIO; + /* verify ari is enabled */ + if (!bnx2x_ari_enabled(bp->pdev)) { + BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n"); + return 0; + } + + /* verify igu is in normal mode */ + if (CHIP_INT_MODE_IS_BC(bp)) { + BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); + return 0; + } + + /* allocate the vfs database */ + bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); + if (!bp->vfdb) { + BNX2X_ERR("failed to allocate vf database\n"); + err = -ENOMEM; + goto failed; + } + + /* get the sriov info - Linux already collected all the pertinent + * information, however the sriov structure is for the private use + * of the pci module. Also we want this information regardless + * of the hyper-visor. + */ + iov = &(bp->vfdb->sriov); + err = bnx2x_sriov_info(bp, iov); + if (err) + goto failed; + + /* SR-IOV capability was enabled but there are no VFs*/ + if (iov->total == 0) + goto failed; + + iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); + + DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n", + num_vfs_param, iov->nr_virtfn); + + /* allocate the vf array */ + bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * + BNX2X_NR_VIRTFN(bp), GFP_KERNEL); + if (!bp->vfdb->vfs) { + BNX2X_ERR("failed to allocate vf array\n"); + err = -ENOMEM; + goto failed; + } + + /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ + for_each_vf(bp, i) { + bnx2x_vf(bp, i, index) = i; + bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; + bnx2x_vf(bp, i, state) = VF_FREE; + mutex_init(&bnx2x_vf(bp, i, op_mutex)); + bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; + } + + /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ + if (!bnx2x_get_vf_igu_cam_info(bp)) { + BNX2X_ERR("No entries in IGU CAM for vfs\n"); + err = -EINVAL; + goto failed; + } + + /* allocate the queue arrays for all VFs */ + bp->vfdb->vfqs = kzalloc( + BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), + GFP_KERNEL); + + if (!bp->vfdb->vfqs) { + BNX2X_ERR("failed to allocate vf queue array\n"); + err = -ENOMEM; + goto failed; + } + + /* Prepare the VFs event synchronization mechanism */ + mutex_init(&bp->vfdb->event_mutex); + + mutex_init(&bp->vfdb->bulletin_mutex); + + return 0; +failed: + DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); + __bnx2x_iov_free_vfdb(bp); + return err; +} + +void bnx2x_iov_remove_one(struct bnx2x *bp) +{ + int vf_idx; + + /* if SRIOV is not enabled there's nothing to do */ + if (!IS_SRIOV(bp)) + return; + + bnx2x_disable_sriov(bp); + + /* disable access to all VFs */ + for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { + bnx2x_pretend_func(bp, + HW_VF_HANDLE(bp, + bp->vfdb->sriov.first_vf_in_pf + + vf_idx)); + DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n", + bp->vfdb->sriov.first_vf_in_pf + vf_idx); + bnx2x_vf_enable_internal(bp, 0); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + } + + /* free vf database */ + __bnx2x_iov_free_vfdb(bp); +} + +void bnx2x_iov_free_mem(struct bnx2x *bp) +{ + int i; + + if (!IS_SRIOV(bp)) + return; + + /* free vfs hw contexts */ + for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { + struct hw_dma *cxt = &bp->vfdb->context[i]; + BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); + } + + BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, + BP_VFDB(bp)->sp_dma.mapping, + BP_VFDB(bp)->sp_dma.size); + + BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, + BP_VF_MBX_DMA(bp)->mapping, + BP_VF_MBX_DMA(bp)->size); + + BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, + BP_VF_BULLETIN_DMA(bp)->mapping, + BP_VF_BULLETIN_DMA(bp)->size); +} + +int bnx2x_iov_alloc_mem(struct bnx2x *bp) +{ + size_t tot_size; + int i, rc = 0; + + if (!IS_SRIOV(bp)) + return rc; + + /* allocate vfs hw contexts */ + tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * + BNX2X_CIDS_PER_VF * sizeof(union cdu_context); + + for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { + struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); + cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); + + if (cxt->size) { + cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size); + if (!cxt->addr) + goto alloc_mem_err; + } else { + cxt->addr = NULL; + cxt->mapping = 0; + } + tot_size -= cxt->size; + } + + /* allocate vfs ramrods dma memory - client_init and set_mac */ + tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); + BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping, + tot_size); + if (!BP_VFDB(bp)->sp_dma.addr) + goto alloc_mem_err; + BP_VFDB(bp)->sp_dma.size = tot_size; + + /* allocate mailboxes */ + tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; + BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping, + tot_size); + if (!BP_VF_MBX_DMA(bp)->addr) + goto alloc_mem_err; + + BP_VF_MBX_DMA(bp)->size = tot_size; + + /* allocate local bulletin boards */ + tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; + BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping, + tot_size); + if (!BP_VF_BULLETIN_DMA(bp)->addr) + goto alloc_mem_err; + + BP_VF_BULLETIN_DMA(bp)->size = tot_size; + + return 0; + +alloc_mem_err: + return -ENOMEM; +} + +static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_queue *q) +{ + u8 cl_id = vfq_cl_id(vf, q); + u8 func_id = FW_VF_HANDLE(vf->abs_vfid); + unsigned long q_type = 0; + + set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); + set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); + + /* Queue State object */ + bnx2x_init_queue_obj(bp, &q->sp_obj, + cl_id, &q->cid, 1, func_id, + bnx2x_vf_sp(bp, vf, q_data), + bnx2x_vf_sp_map(bp, vf, q_data), + q_type); + + /* sp indication is set only when vlan/mac/etc. are initialized */ + q->sp_initialized = false; + + DP(BNX2X_MSG_IOV, + "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", + vf->abs_vfid, q->sp_obj.func_id, q->cid); +} + +static int bnx2x_max_speed_cap(struct bnx2x *bp) +{ + u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)]; + + if (supported & + (SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full)) + return 20000; + + return 10000; /* assume lowest supported speed is 10G */ +} + +int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx) +{ + struct bnx2x_link_report_data *state = &bp->last_reported_link; + struct pf_vf_bulletin_content *bulletin; + struct bnx2x_virtf *vf; + bool update = true; + int rc = 0; + + /* sanity and init */ + rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false); + if (rc) + return rc; + + mutex_lock(&bp->vfdb->bulletin_mutex); + + if (vf->link_cfg == IFLA_VF_LINK_STATE_AUTO) { + bulletin->valid_bitmap |= 1 << LINK_VALID; + + bulletin->link_speed = state->line_speed; + bulletin->link_flags = 0; + if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &state->link_report_flags)) + bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN; + if (test_bit(BNX2X_LINK_REPORT_FD, + &state->link_report_flags)) + bulletin->link_flags |= VFPF_LINK_REPORT_FULL_DUPLEX; + if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON, + &state->link_report_flags)) + bulletin->link_flags |= VFPF_LINK_REPORT_RX_FC_ON; + if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON, + &state->link_report_flags)) + bulletin->link_flags |= VFPF_LINK_REPORT_TX_FC_ON; + } else if (vf->link_cfg == IFLA_VF_LINK_STATE_DISABLE && + !(bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) { + bulletin->valid_bitmap |= 1 << LINK_VALID; + bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN; + } else if (vf->link_cfg == IFLA_VF_LINK_STATE_ENABLE && + (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) { + bulletin->valid_bitmap |= 1 << LINK_VALID; + bulletin->link_speed = bnx2x_max_speed_cap(bp); + bulletin->link_flags &= ~VFPF_LINK_REPORT_LINK_DOWN; + } else { + update = false; + } + + if (update) { + DP(NETIF_MSG_LINK | BNX2X_MSG_IOV, + "vf %d mode %u speed %d flags %x\n", idx, + vf->link_cfg, bulletin->link_speed, bulletin->link_flags); + + /* Post update on VF's bulletin board */ + rc = bnx2x_post_vf_bulletin(bp, idx); + if (rc) { + BNX2X_ERR("failed to update VF[%d] bulletin\n", idx); + goto out; + } + } + +out: + mutex_unlock(&bp->vfdb->bulletin_mutex); + return rc; +} + +int bnx2x_set_vf_link_state(struct net_device *dev, int idx, int link_state) +{ + struct bnx2x *bp = netdev_priv(dev); + struct bnx2x_virtf *vf = BP_VF(bp, idx); + + if (!vf) + return -EINVAL; + + if (vf->link_cfg == link_state) + return 0; /* nothing todo */ + + vf->link_cfg = link_state; + + return bnx2x_iov_link_update_vf(bp, idx); +} + +void bnx2x_iov_link_update(struct bnx2x *bp) +{ + int vfid; + + if (!IS_SRIOV(bp)) + return; + + for_each_vf(bp, vfid) + bnx2x_iov_link_update_vf(bp, vfid); +} + +/* called by bnx2x_nic_load */ +int bnx2x_iov_nic_init(struct bnx2x *bp) +{ + int vfid; + + if (!IS_SRIOV(bp)) { + DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); + return 0; + } + + DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); + + /* let FLR complete ... */ + msleep(100); + + /* initialize vf database */ + for_each_vf(bp, vfid) { + struct bnx2x_virtf *vf = BP_VF(bp, vfid); + + int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * + BNX2X_CIDS_PER_VF; + + union cdu_context *base_cxt = (union cdu_context *) + BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + + (base_vf_cid & (ILT_PAGE_CIDS-1)); + + DP(BNX2X_MSG_IOV, + "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", + vf->abs_vfid, vf_sb_count(vf), base_vf_cid, + BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); + + /* init statically provisioned resources */ + bnx2x_iov_static_resc(bp, vf); + + /* queues are initialized during VF-ACQUIRE */ + vf->filter_state = 0; + vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); + + /* init mcast object - This object will be re-initialized + * during VF-ACQUIRE with the proper cl_id and cid. + * It needs to be initialized here so that it can be safely + * handled by a subsequent FLR flow. + */ + vf->mcast_list_len = 0; + bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, + 0xFF, 0xFF, 0xFF, + bnx2x_vf_sp(bp, vf, mcast_rdata), + bnx2x_vf_sp_map(bp, vf, mcast_rdata), + BNX2X_FILTER_MCAST_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX); + + /* set the mailbox message addresses */ + BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) + (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * + MBX_MSG_ALIGNED_SIZE); + + BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + + vfid * MBX_MSG_ALIGNED_SIZE; + + /* Enable vf mailbox */ + bnx2x_vf_enable_mbx(bp, vf->abs_vfid); + } + + /* Final VF init */ + for_each_vf(bp, vfid) { + struct bnx2x_virtf *vf = BP_VF(bp, vfid); + + /* fill in the BDF and bars */ + vf->bus = bnx2x_vf_bus(bp, vfid); + vf->devfn = bnx2x_vf_devfn(bp, vfid); + bnx2x_vf_set_bars(bp, vf); + + DP(BNX2X_MSG_IOV, + "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", + vf->abs_vfid, vf->bus, vf->devfn, + (unsigned)vf->bars[0].bar, vf->bars[0].size, + (unsigned)vf->bars[1].bar, vf->bars[1].size, + (unsigned)vf->bars[2].bar, vf->bars[2].size); + } + + return 0; +} + +/* called by bnx2x_chip_cleanup */ +int bnx2x_iov_chip_cleanup(struct bnx2x *bp) +{ + int i; + + if (!IS_SRIOV(bp)) + return 0; + + /* release all the VFs */ + for_each_vf(bp, i) + bnx2x_vf_release(bp, BP_VF(bp, i)); + + return 0; +} + +/* called by bnx2x_init_hw_func, returns the next ilt line */ +int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) +{ + int i; + struct bnx2x_ilt *ilt = BP_ILT(bp); + + if (!IS_SRIOV(bp)) + return line; + + /* set vfs ilt lines */ + for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { + struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); + + ilt->lines[line+i].page = hw_cxt->addr; + ilt->lines[line+i].page_mapping = hw_cxt->mapping; + ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ + } + return line + i; +} + +static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) +{ + return ((cid >= BNX2X_FIRST_VF_CID) && + ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); +} + +static +void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, + struct bnx2x_vf_queue *vfq, + union event_ring_elem *elem) +{ + unsigned long ramrod_flags = 0; + int rc = 0; + + /* Always push next commands out, don't wait here */ + set_bit(RAMROD_CONT, &ramrod_flags); + + switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { + case BNX2X_FILTER_MAC_PENDING: + rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, + &ramrod_flags); + break; + case BNX2X_FILTER_VLAN_PENDING: + rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, + &ramrod_flags); + break; + default: + BNX2X_ERR("Unsupported classification command: %d\n", + elem->message.data.eth_event.echo); + return; + } + if (rc < 0) + BNX2X_ERR("Failed to schedule new commands: %d\n", rc); + else if (rc > 0) + DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); +} + +static +void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, + struct bnx2x_virtf *vf) +{ + struct bnx2x_mcast_ramrod_params rparam = {NULL}; + int rc; + + rparam.mcast_obj = &vf->mcast_obj; + vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); + + /* If there are pending mcast commands - send them */ + if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); + if (rc < 0) + BNX2X_ERR("Failed to send pending mcast commands: %d\n", + rc); + } +} + +static +void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, + struct bnx2x_virtf *vf) +{ + smp_mb__before_atomic(); + clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); + smp_mb__after_atomic(); +} + +static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, + struct bnx2x_virtf *vf) +{ + vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw); +} + +int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) +{ + struct bnx2x_virtf *vf; + int qidx = 0, abs_vfid; + u8 opcode; + u16 cid = 0xffff; + + if (!IS_SRIOV(bp)) + return 1; + + /* first get the cid - the only events we handle here are cfc-delete + * and set-mac completion + */ + opcode = elem->message.opcode; + + switch (opcode) { + case EVENT_RING_OPCODE_CFC_DEL: + cid = SW_CID((__force __le32) + elem->message.data.cfc_del_event.cid); + DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); + break; + case EVENT_RING_OPCODE_CLASSIFICATION_RULES: + case EVENT_RING_OPCODE_MULTICAST_RULES: + case EVENT_RING_OPCODE_FILTERS_RULES: + case EVENT_RING_OPCODE_RSS_UPDATE_RULES: + cid = (elem->message.data.eth_event.echo & + BNX2X_SWCID_MASK); + DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); + break; + case EVENT_RING_OPCODE_VF_FLR: + abs_vfid = elem->message.data.vf_flr_event.vf_id; + DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", + abs_vfid); + goto get_vf; + case EVENT_RING_OPCODE_MALICIOUS_VF: + abs_vfid = elem->message.data.malicious_vf_event.vf_id; + BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n", + abs_vfid, + elem->message.data.malicious_vf_event.err_id); + goto get_vf; + default: + return 1; + } + + /* check if the cid is the VF range */ + if (!bnx2x_iov_is_vf_cid(bp, cid)) { + DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); + return 1; + } + + /* extract vf and rxq index from vf_cid - relies on the following: + * 1. vfid on cid reflects the true abs_vfid + * 2. The max number of VFs (per path) is 64 + */ + qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); + abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); +get_vf: + vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); + + if (!vf) { + BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", + cid, abs_vfid); + return 0; + } + + switch (opcode) { + case EVENT_RING_OPCODE_CFC_DEL: + DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", + vf->abs_vfid, qidx); + vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, + &vfq_get(vf, + qidx)->sp_obj, + BNX2X_Q_CMD_CFC_DEL); + break; + case EVENT_RING_OPCODE_CLASSIFICATION_RULES: + DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", + vf->abs_vfid, qidx); + bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); + break; + case EVENT_RING_OPCODE_MULTICAST_RULES: + DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", + vf->abs_vfid, qidx); + bnx2x_vf_handle_mcast_eqe(bp, vf); + break; + case EVENT_RING_OPCODE_FILTERS_RULES: + DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", + vf->abs_vfid, qidx); + bnx2x_vf_handle_filters_eqe(bp, vf); + break; + case EVENT_RING_OPCODE_RSS_UPDATE_RULES: + DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n", + vf->abs_vfid, qidx); + bnx2x_vf_handle_rss_update_eqe(bp, vf); + case EVENT_RING_OPCODE_VF_FLR: + case EVENT_RING_OPCODE_MALICIOUS_VF: + /* Do nothing for now */ + return 0; + } + + return 0; +} + +static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) +{ + /* extract the vf from vf_cid - relies on the following: + * 1. vfid on cid reflects the true abs_vfid + * 2. The max number of VFs (per path) is 64 + */ + int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); + return bnx2x_vf_by_abs_fid(bp, abs_vfid); +} + +void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, + struct bnx2x_queue_sp_obj **q_obj) +{ + struct bnx2x_virtf *vf; + + if (!IS_SRIOV(bp)) + return; + + vf = bnx2x_vf_by_cid(bp, vf_cid); + + if (vf) { + /* extract queue index from vf_cid - relies on the following: + * 1. vfid on cid reflects the true abs_vfid + * 2. The max number of VFs (per path) is 64 + */ + int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); + *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); + } else { + BNX2X_ERR("No vf matching cid %d\n", vf_cid); + } +} + +void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) +{ + int i; + int first_queue_query_index, num_queues_req; + dma_addr_t cur_data_offset; + struct stats_query_entry *cur_query_entry; + u8 stats_count = 0; + bool is_fcoe = false; + + if (!IS_SRIOV(bp)) + return; + + if (!NO_FCOE(bp)) + is_fcoe = true; + + /* fcoe adds one global request and one queue request */ + num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; + first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - + (is_fcoe ? 0 : 1); + + DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), + "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", + BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, + first_queue_query_index + num_queues_req); + + cur_data_offset = bp->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, queue_stats) + + num_queues_req * sizeof(struct per_queue_stats); + + cur_query_entry = &bp->fw_stats_req-> + query[first_queue_query_index + num_queues_req]; + + for_each_vf(bp, i) { + int j; + struct bnx2x_virtf *vf = BP_VF(bp, i); + + if (vf->state != VF_ENABLED) { + DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), + "vf %d not enabled so no stats for it\n", + vf->abs_vfid); + continue; + } + + DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); + for_each_vfq(vf, j) { + struct bnx2x_vf_queue *rxq = vfq_get(vf, j); + + dma_addr_t q_stats_addr = + vf->fw_stat_map + j * vf->stats_stride; + + /* collect stats fro active queues only */ + if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == + BNX2X_Q_LOGICAL_STATE_STOPPED) + continue; + + /* create stats query entry for this queue */ + cur_query_entry->kind = STATS_TYPE_QUEUE; + cur_query_entry->index = vfq_stat_id(vf, rxq); + cur_query_entry->funcID = + cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); + cur_query_entry->address.hi = + cpu_to_le32(U64_HI(q_stats_addr)); + cur_query_entry->address.lo = + cpu_to_le32(U64_LO(q_stats_addr)); + DP(BNX2X_MSG_IOV, + "added address %x %x for vf %d queue %d client %d\n", + cur_query_entry->address.hi, + cur_query_entry->address.lo, cur_query_entry->funcID, + j, cur_query_entry->index); + cur_query_entry++; + cur_data_offset += sizeof(struct per_queue_stats); + stats_count++; + + /* all stats are coalesced to the leading queue */ + if (vf->cfg_flags & VF_CFG_STATS_COALESCE) + break; + } + } + bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; +} + +/* VF API helpers */ +static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, + u8 enable) +{ + u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; + u32 val = enable ? (abs_vfid | (1 << 6)) : 0; + + REG_WR(bp, reg, val); +} + +static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + int i; + + for_each_vfq(vf, i) + bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, + vfq_qzone_id(vf, vfq_get(vf, i)), false); +} + +static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + u32 val; + + /* clear the VF configuration - pretend */ + bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); + val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); + val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | + IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK); + REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); +} + +u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), + BNX2X_VF_MAX_QUEUES); +} + +static +int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct vf_pf_resc_request *req_resc) +{ + u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); + u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); + + /* Save a vlan filter for the Hypervisor */ + return ((req_resc->num_rxqs <= rxq_cnt) && + (req_resc->num_txqs <= txq_cnt) && + (req_resc->num_sbs <= vf_sb_count(vf)) && + (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && + (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf))); +} + +/* CORE VF API */ +int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct vf_pf_resc_request *resc) +{ + int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * + BNX2X_CIDS_PER_VF; + + union cdu_context *base_cxt = (union cdu_context *) + BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + + (base_vf_cid & (ILT_PAGE_CIDS-1)); + int i; + + /* if state is 'acquired' the VF was not released or FLR'd, in + * this case the returned resources match the acquired already + * acquired resources. Verify that the requested numbers do + * not exceed the already acquired numbers. + */ + if (vf->state == VF_ACQUIRED) { + DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", + vf->abs_vfid); + + if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { + BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", + vf->abs_vfid); + return -EINVAL; + } + return 0; + } + + /* Otherwise vf state must be 'free' or 'reset' */ + if (vf->state != VF_FREE && vf->state != VF_RESET) { + BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", + vf->abs_vfid, vf->state); + return -EINVAL; + } + + /* static allocation: + * the global maximum number are fixed per VF. Fail the request if + * requested number exceed these globals + */ + if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { + DP(BNX2X_MSG_IOV, + "cannot fulfill vf resource request. Placing maximal available values in response\n"); + /* set the max resource in the vf */ + return -ENOMEM; + } + + /* Set resources counters - 0 request means max available */ + vf_sb_count(vf) = resc->num_sbs; + vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); + vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); + if (resc->num_mac_filters) + vf_mac_rules_cnt(vf) = resc->num_mac_filters; + /* Add an additional vlan filter credit for the hypervisor */ + bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1); + + DP(BNX2X_MSG_IOV, + "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", + vf_sb_count(vf), vf_rxq_count(vf), + vf_txq_count(vf), vf_mac_rules_cnt(vf), + vf_vlan_rules_visible_cnt(vf)); + + /* Initialize the queues */ + if (!vf->vfqs) { + DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); + return -EINVAL; + } + + for_each_vfq(vf, i) { + struct bnx2x_vf_queue *q = vfq_get(vf, i); + + if (!q) { + BNX2X_ERR("q number %d was not allocated\n", i); + return -EINVAL; + } + + q->index = i; + q->cxt = &((base_cxt + i)->eth); + q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; + + DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", + vf->abs_vfid, i, q->index, q->cid, q->cxt); + + /* init SP objects */ + bnx2x_vfq_init(bp, vf, q); + } + vf->state = VF_ACQUIRED; + return 0; +} + +int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) +{ + struct bnx2x_func_init_params func_init = {0}; + u16 flags = 0; + int i; + + /* the sb resources are initialized at this point, do the + * FW/HW initializations + */ + for_each_vf_sb(vf, i) + bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, + vf_igu_sb(vf, i), vf_igu_sb(vf, i)); + + /* Sanity checks */ + if (vf->state != VF_ACQUIRED) { + DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", + vf->abs_vfid, vf->state); + return -EINVAL; + } + + /* let FLR complete ... */ + msleep(100); + + /* FLR cleanup epilogue */ + if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) + return -EBUSY; + + /* reset IGU VF statistics: MSIX */ + REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); + + /* vf init */ + if (vf->cfg_flags & VF_CFG_STATS) + flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); + + if (vf->cfg_flags & VF_CFG_TPA) + flags |= FUNC_FLG_TPA; + + if (is_vf_multi(vf)) + flags |= FUNC_FLG_RSS; + + /* function setup */ + func_init.func_flgs = flags; + func_init.pf_id = BP_FUNC(bp); + func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); + func_init.fw_stat_map = vf->fw_stat_map; + func_init.spq_map = vf->spq_map; + func_init.spq_prod = 0; + bnx2x_func_init(bp, &func_init); + + /* Enable the vf */ + bnx2x_vf_enable_access(bp, vf->abs_vfid); + bnx2x_vf_enable_traffic(bp, vf); + + /* queue protection table */ + for_each_vfq(vf, i) + bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, + vfq_qzone_id(vf, vfq_get(vf, i)), true); + + vf->state = VF_ENABLED; + + /* update vf bulletin board */ + bnx2x_post_vf_bulletin(bp, vf->index); + + return 0; +} + +struct set_vf_state_cookie { + struct bnx2x_virtf *vf; + u8 state; +}; + +static void bnx2x_set_vf_state(void *cookie) +{ + struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; + + p->vf->state = p->state; +} + +int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + int rc = 0, i; + + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + + /* Close all queues */ + for (i = 0; i < vf_rxq_count(vf); i++) { + rc = bnx2x_vf_queue_teardown(bp, vf, i); + if (rc) + goto op_err; + } + + /* disable the interrupts */ + DP(BNX2X_MSG_IOV, "disabling igu\n"); + bnx2x_vf_igu_disable(bp, vf); + + /* disable the VF */ + DP(BNX2X_MSG_IOV, "clearing qtbl\n"); + bnx2x_vf_clr_qtbl(bp, vf); + + /* need to make sure there are no outstanding stats ramrods which may + * cause the device to access the VF's stats buffer which it will free + * as soon as we return from the close flow. + */ + { + struct set_vf_state_cookie cookie; + + cookie.vf = vf; + cookie.state = VF_ACQUIRED; + rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); + if (rc) + goto op_err; + } + + DP(BNX2X_MSG_IOV, "set state to acquired\n"); + + return 0; +op_err: + BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc); + return rc; +} + +/* VF release can be called either: 1. The VF was acquired but + * not enabled 2. the vf was enabled or in the process of being + * enabled + */ +int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + int rc; + + DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, + vf->state == VF_FREE ? "Free" : + vf->state == VF_ACQUIRED ? "Acquired" : + vf->state == VF_ENABLED ? "Enabled" : + vf->state == VF_RESET ? "Reset" : + "Unknown"); + + switch (vf->state) { + case VF_ENABLED: + rc = bnx2x_vf_close(bp, vf); + if (rc) + goto op_err; + /* Fallthrough to release resources */ + case VF_ACQUIRED: + DP(BNX2X_MSG_IOV, "about to free resources\n"); + bnx2x_vf_free_resc(bp, vf); + break; + + case VF_FREE: + case VF_RESET: + default: + break; + } + return 0; +op_err: + BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc); + return rc; +} + +int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_config_rss_params *rss) +{ + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags); + return bnx2x_config_rss(bp, rss); +} + +int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct vfpf_tpa_tlv *tlv, + struct bnx2x_queue_update_tpa_params *params) +{ + aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr; + struct bnx2x_queue_state_params qstate; + int qid, rc = 0; + + DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + + /* Set ramrod params */ + memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); + memcpy(&qstate.params.update_tpa, params, + sizeof(struct bnx2x_queue_update_tpa_params)); + qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA; + set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); + + for (qid = 0; qid < vf_rxq_count(vf); qid++) { + qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); + qstate.params.update_tpa.sge_map = sge_addr[qid]; + DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n", + vf->abs_vfid, qid, U64_HI(sge_addr[qid]), + U64_LO(sge_addr[qid])); + rc = bnx2x_queue_state_change(bp, &qstate); + if (rc) { + BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n", + U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]), + vf->abs_vfid, qid); + return rc; + } + } + + return rc; +} + +/* VF release ~ VF close + VF release-resources + * Release is the ultimate SW shutdown and is called whenever an + * irrecoverable error is encountered. + */ +int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + int rc; + + DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); + bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); + + rc = bnx2x_vf_free(bp, vf); + if (rc) + WARN(rc, + "VF[%d] Failed to allocate resources for release op- rc=%d\n", + vf->abs_vfid, rc); + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); + return rc; +} + +void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, + enum channel_tlvs tlv) +{ + /* we don't lock the channel for unsupported tlvs */ + if (!bnx2x_tlv_supported(tlv)) { + BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n"); + return; + } + + /* lock the channel */ + mutex_lock(&vf->op_mutex); + + /* record the locking op */ + vf->op_current = tlv; + + /* log the lock */ + DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", + vf->abs_vfid, tlv); +} + +void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, + enum channel_tlvs expected_tlv) +{ + enum channel_tlvs current_tlv; + + if (!vf) { + BNX2X_ERR("VF was %p\n", vf); + return; + } + + current_tlv = vf->op_current; + + /* we don't unlock the channel for unsupported tlvs */ + if (!bnx2x_tlv_supported(expected_tlv)) + return; + + WARN(expected_tlv != vf->op_current, + "lock mismatch: expected %d found %d", expected_tlv, + vf->op_current); + + /* record the locking op */ + vf->op_current = CHANNEL_TLV_NONE; + + /* lock the channel */ + mutex_unlock(&vf->op_mutex); + + /* log the unlock */ + DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", + vf->abs_vfid, current_tlv); +} + +static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) +{ + struct bnx2x_queue_state_params q_params; + u32 prev_flags; + int i, rc; + + /* Verify changes are needed and record current Tx switching state */ + prev_flags = bp->flags; + if (enable) + bp->flags |= TX_SWITCHING; + else + bp->flags &= ~TX_SWITCHING; + if (prev_flags == bp->flags) + return 0; + + /* Verify state enables the sending of queue ramrods */ + if ((bp->state != BNX2X_STATE_OPEN) || + (bnx2x_get_q_logical_state(bp, + &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) != + BNX2X_Q_LOGICAL_STATE_ACTIVE)) + return 0; + + /* send q. update ramrod to configure Tx switching */ + memset(&q_params, 0, sizeof(q_params)); + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + q_params.cmd = BNX2X_Q_CMD_UPDATE; + __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, + &q_params.params.update.update_flags); + if (enable) + __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING, + &q_params.params.update.update_flags); + else + __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING, + &q_params.params.update.update_flags); + + /* send the ramrod on all the queues of the PF */ + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + /* Set the appropriate Queue object */ + q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + + /* Update the Queue state */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to configure Tx switching\n"); + return rc; + } + } + + DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled"); + return 0; +} + +int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) +{ + struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); + + if (!IS_SRIOV(bp)) { + BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n"); + return -EINVAL; + } + + DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", + num_vfs_param, BNX2X_NR_VIRTFN(bp)); + + /* HW channel is only operational when PF is up */ + if (bp->state != BNX2X_STATE_OPEN) { + BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n"); + return -EINVAL; + } + + /* we are always bound by the total_vfs in the configuration space */ + if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { + BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n", + num_vfs_param, BNX2X_NR_VIRTFN(bp)); + num_vfs_param = BNX2X_NR_VIRTFN(bp); + } + + bp->requested_nr_virtfn = num_vfs_param; + if (num_vfs_param == 0) { + bnx2x_set_pf_tx_switching(bp, false); + bnx2x_disable_sriov(bp); + return 0; + } else { + return bnx2x_enable_sriov(bp); + } +} + +#define IGU_ENTRY_SIZE 4 + +int bnx2x_enable_sriov(struct bnx2x *bp) +{ + int rc = 0, req_vfs = bp->requested_nr_virtfn; + int vf_idx, sb_idx, vfq_idx, qcount, first_vf; + u32 igu_entry, address; + u16 num_vf_queues; + + if (req_vfs == 0) + return 0; + + first_vf = bp->vfdb->sriov.first_vf_in_pf; + + /* statically distribute vf sb pool between VFs */ + num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES, + BP_VFDB(bp)->vf_sbs_pool / req_vfs); + + /* zero previous values learned from igu cam */ + for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) { + struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); + + vf->sb_count = 0; + vf_sb_count(BP_VF(bp, vf_idx)) = 0; + } + bp->vfdb->vf_sbs_pool = 0; + + /* prepare IGU cam */ + sb_idx = BP_VFDB(bp)->first_vf_igu_entry; + address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE; + for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { + for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) { + igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT | + vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT | + IGU_REG_MAPPING_MEMORY_VALID; + DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n", + sb_idx, vf_idx); + REG_WR(bp, address, igu_entry); + sb_idx++; + address += IGU_ENTRY_SIZE; + } + } + + /* Reinitialize vf database according to igu cam */ + bnx2x_get_vf_igu_cam_info(bp); + + DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n", + BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); + + qcount = 0; + for_each_vf(bp, vf_idx) { + struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); + + /* set local queue arrays */ + vf->vfqs = &bp->vfdb->vfqs[qcount]; + qcount += vf_sb_count(vf); + bnx2x_iov_static_resc(bp, vf); + } + + /* prepare msix vectors in VF configuration space - the value in the + * PCI configuration space should be the index of the last entry, + * namely one less than the actual size of the table + */ + for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { + bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); + REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, + num_vf_queues - 1); + DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", + vf_idx, num_vf_queues - 1); + } + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + + /* enable sriov. This will probe all the VFs, and consequentially cause + * the "acquire" messages to appear on the VF PF channel. + */ + DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); + bnx2x_disable_sriov(bp); + + rc = bnx2x_set_pf_tx_switching(bp, true); + if (rc) + return rc; + + rc = pci_enable_sriov(bp->pdev, req_vfs); + if (rc) { + BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); + return rc; + } + DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); + return req_vfs; +} + +void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) +{ + int vfidx; + struct pf_vf_bulletin_content *bulletin; + + DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); + for_each_vf(bp, vfidx) { + bulletin = BP_VF_BULLETIN(bp, vfidx); + if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) + bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); + } +} + +void bnx2x_disable_sriov(struct bnx2x *bp) +{ + if (pci_vfs_assigned(bp->pdev)) { + DP(BNX2X_MSG_IOV, + "Unloading driver while VFs are assigned - VFs will not be deallocated\n"); + return; + } + + pci_disable_sriov(bp->pdev); +} + +static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx, + struct bnx2x_virtf **vf, + struct pf_vf_bulletin_content **bulletin, + bool test_queue) +{ + if (bp->state != BNX2X_STATE_OPEN) { + BNX2X_ERR("PF is down - can't utilize iov-related functionality\n"); + return -EINVAL; + } + + if (!IS_SRIOV(bp)) { + BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n"); + return -EINVAL; + } + + if (vfidx >= BNX2X_NR_VIRTFN(bp)) { + BNX2X_ERR("VF is uninitialized - can't utilize iov-related functionality. vfidx was %d BNX2X_NR_VIRTFN was %d\n", + vfidx, BNX2X_NR_VIRTFN(bp)); + return -EINVAL; + } + + /* init members */ + *vf = BP_VF(bp, vfidx); + *bulletin = BP_VF_BULLETIN(bp, vfidx); + + if (!*vf) { + BNX2X_ERR("Unable to get VF structure for vfidx %d\n", vfidx); + return -EINVAL; + } + + if (test_queue && !(*vf)->vfqs) { + BNX2X_ERR("vfqs struct is null. Was this invoked before dynamically enabling SR-IOV? vfidx was %d\n", + vfidx); + return -EINVAL; + } + + if (!*bulletin) { + BNX2X_ERR("Bulletin Board struct is null for vfidx %d\n", + vfidx); + return -EINVAL; + } + + return 0; +} + +int bnx2x_get_vf_config(struct net_device *dev, int vfidx, + struct ifla_vf_info *ivi) +{ + struct bnx2x *bp = netdev_priv(dev); + struct bnx2x_virtf *vf = NULL; + struct pf_vf_bulletin_content *bulletin = NULL; + struct bnx2x_vlan_mac_obj *mac_obj; + struct bnx2x_vlan_mac_obj *vlan_obj; + int rc; + + /* sanity and init */ + rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); + if (rc) + return rc; + + mac_obj = &bnx2x_leading_vfq(vf, mac_obj); + vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); + if (!mac_obj || !vlan_obj) { + BNX2X_ERR("VF partially initialized\n"); + return -EINVAL; + } + + ivi->vf = vfidx; + ivi->qos = 0; + ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */ + ivi->min_tx_rate = 0; + ivi->spoofchk = 1; /*always enabled */ + if (vf->state == VF_ENABLED) { + /* mac and vlan are in vlan_mac objects */ + if (bnx2x_validate_vf_sp_objs(bp, vf, false)) { + mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, + 0, ETH_ALEN); + vlan_obj->get_n_elements(bp, vlan_obj, 1, + (u8 *)&ivi->vlan, 0, + VLAN_HLEN); + } + } else { + mutex_lock(&bp->vfdb->bulletin_mutex); + /* mac */ + if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) + /* mac configured by ndo so its in bulletin board */ + memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); + else + /* function has not been loaded yet. Show mac as 0s */ + eth_zero_addr(ivi->mac); + + /* vlan */ + if (bulletin->valid_bitmap & (1 << VLAN_VALID)) + /* vlan configured by ndo so its in bulletin board */ + memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); + else + /* function has not been loaded yet. Show vlans as 0s */ + memset(&ivi->vlan, 0, VLAN_HLEN); + + mutex_unlock(&bp->vfdb->bulletin_mutex); + } + + return 0; +} + +/* New mac for VF. Consider these cases: + * 1. VF hasn't been acquired yet - save the mac in local bulletin board and + * supply at acquire. + * 2. VF has already been acquired but has not yet initialized - store in local + * bulletin board. mac will be posted on VF bulletin board after VF init. VF + * will configure this mac when it is ready. + * 3. VF has already initialized but has not yet setup a queue - post the new + * mac on VF's bulletin board right now. VF will configure this mac when it + * is ready. + * 4. VF has already set a queue - delete any macs already configured for this + * queue and manually config the new mac. + * In any event, once this function has been called refuse any attempts by the + * VF to configure any mac for itself except for this mac. In case of a race + * where the VF fails to see the new post on its bulletin board before sending a + * mac configuration request, the PF will simply fail the request and VF can try + * again after consulting its bulletin board. + */ +int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) +{ + struct bnx2x *bp = netdev_priv(dev); + int rc, q_logical_state; + struct bnx2x_virtf *vf = NULL; + struct pf_vf_bulletin_content *bulletin = NULL; + + if (!is_valid_ether_addr(mac)) { + BNX2X_ERR("mac address invalid\n"); + return -EINVAL; + } + + /* sanity and init */ + rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); + if (rc) + return rc; + + mutex_lock(&bp->vfdb->bulletin_mutex); + + /* update PF's copy of the VF's bulletin. Will no longer accept mac + * configuration requests from vf unless match this mac + */ + bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; + memcpy(bulletin->mac, mac, ETH_ALEN); + + /* Post update on VF's bulletin board */ + rc = bnx2x_post_vf_bulletin(bp, vfidx); + + /* release lock before checking return code */ + mutex_unlock(&bp->vfdb->bulletin_mutex); + + if (rc) { + BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); + return rc; + } + + q_logical_state = + bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); + if (vf->state == VF_ENABLED && + q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { + /* configure the mac in device on this vf's queue */ + unsigned long ramrod_flags = 0; + struct bnx2x_vlan_mac_obj *mac_obj; + + /* User should be able to see failure reason in system logs */ + if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) + return -EINVAL; + + /* must lock vfpf channel to protect against vf flows */ + bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); + + /* remove existing eth macs */ + mac_obj = &bnx2x_leading_vfq(vf, mac_obj); + rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); + if (rc) { + BNX2X_ERR("failed to delete eth macs\n"); + rc = -EINVAL; + goto out; + } + + /* remove existing uc list macs */ + rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); + if (rc) { + BNX2X_ERR("failed to delete uc_list macs\n"); + rc = -EINVAL; + goto out; + } + + /* configure the new mac to device */ + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, + BNX2X_ETH_MAC, &ramrod_flags); + +out: + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); + } + + return rc; +} + +int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) +{ + struct bnx2x_queue_state_params q_params = {NULL}; + struct bnx2x_vlan_mac_ramrod_params ramrod_param; + struct bnx2x_queue_update_params *update_params; + struct pf_vf_bulletin_content *bulletin = NULL; + struct bnx2x_rx_mode_ramrod_params rx_ramrod; + struct bnx2x *bp = netdev_priv(dev); + struct bnx2x_vlan_mac_obj *vlan_obj; + unsigned long vlan_mac_flags = 0; + unsigned long ramrod_flags = 0; + struct bnx2x_virtf *vf = NULL; + unsigned long accept_flags; + int rc; + + if (vlan > 4095) { + BNX2X_ERR("illegal vlan value %d\n", vlan); + return -EINVAL; + } + + DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", + vfidx, vlan, 0); + + /* sanity and init */ + rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); + if (rc) + return rc; + + /* update PF's copy of the VF's bulletin. No point in posting the vlan + * to the VF since it doesn't have anything to do with it. But it useful + * to store it here in case the VF is not up yet and we can only + * configure the vlan later when it does. Treat vlan id 0 as remove the + * Host tag. + */ + mutex_lock(&bp->vfdb->bulletin_mutex); + + if (vlan > 0) + bulletin->valid_bitmap |= 1 << VLAN_VALID; + else + bulletin->valid_bitmap &= ~(1 << VLAN_VALID); + bulletin->vlan = vlan; + + mutex_unlock(&bp->vfdb->bulletin_mutex); + + /* is vf initialized and queue set up? */ + if (vf->state != VF_ENABLED || + bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != + BNX2X_Q_LOGICAL_STATE_ACTIVE) + return rc; + + /* User should be able to see error in system logs */ + if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) + return -EINVAL; + + /* must lock vfpf channel to protect against vf flows */ + bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); + + /* remove existing vlans */ + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); + rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, + &ramrod_flags); + if (rc) { + BNX2X_ERR("failed to delete vlans\n"); + rc = -EINVAL; + goto out; + } + + /* need to remove/add the VF's accept_any_vlan bit */ + accept_flags = bnx2x_leading_vfq(vf, accept_flags); + if (vlan) + clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); + else + set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); + + bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, + accept_flags); + bnx2x_leading_vfq(vf, accept_flags) = accept_flags; + bnx2x_config_rx_mode(bp, &rx_ramrod); + + /* configure the new vlan to device */ + memset(&ramrod_param, 0, sizeof(ramrod_param)); + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + ramrod_param.vlan_mac_obj = vlan_obj; + ramrod_param.ramrod_flags = ramrod_flags; + set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &ramrod_param.user_req.vlan_mac_flags); + ramrod_param.user_req.u.vlan.vlan = vlan; + ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; + rc = bnx2x_config_vlan_mac(bp, &ramrod_param); + if (rc) { + BNX2X_ERR("failed to configure vlan\n"); + rc = -EINVAL; + goto out; + } + + /* send queue update ramrod to configure default vlan and silent + * vlan removal + */ + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + q_params.cmd = BNX2X_Q_CMD_UPDATE; + q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); + update_params = &q_params.params.update; + __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, + &update_params->update_flags); + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, + &update_params->update_flags); + if (vlan == 0) { + /* if vlan is 0 then we want to leave the VF traffic + * untagged, and leave the incoming traffic untouched + * (i.e. do not remove any vlan tags). + */ + __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, + &update_params->update_flags); + __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, + &update_params->update_flags); + } else { + /* configure default vlan to vf queue and set silent + * vlan removal (the vf remains unaware of this vlan). + */ + __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, + &update_params->update_flags); + __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, + &update_params->update_flags); + update_params->def_vlan = vlan; + update_params->silent_removal_value = + vlan & VLAN_VID_MASK; + update_params->silent_removal_mask = VLAN_VID_MASK; + } + + /* Update the Queue state */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to configure default VLAN\n"); + goto out; + } + + + /* clear the flag indicating that this VF needs its vlan + * (will only be set if the HV configured the Vlan before vf was + * up and we were called because the VF came up later + */ +out: + vf->cfg_flags &= ~VF_CFG_VLAN; + bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); + + return rc; +} + +/* crc is the first field in the bulletin board. Compute the crc over the + * entire bulletin board excluding the crc field itself. Use the length field + * as the Bulletin Board was posted by a PF with possibly a different version + * from the vf which will sample it. Therefore, the length is computed by the + * PF and then used blindly by the VF. + */ +u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin) +{ + return crc32(BULLETIN_CRC_SEED, + ((u8 *)bulletin) + sizeof(bulletin->crc), + bulletin->length - sizeof(bulletin->crc)); +} + +/* Check for new posts on the bulletin board */ +enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) +{ + struct pf_vf_bulletin_content *bulletin; + int attempts; + + /* sampling structure in mid post may result with corrupted data + * validate crc to ensure coherency. + */ + for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { + u32 crc; + + /* sample the bulletin board */ + memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin, + sizeof(union pf_vf_bulletin)); + + crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content); + + if (bp->shadow_bulletin.content.crc == crc) + break; + + BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", + bp->shadow_bulletin.content.crc, crc); + } + + if (attempts >= BULLETIN_ATTEMPTS) { + BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", + attempts); + return PFVF_BULLETIN_CRC_ERR; + } + bulletin = &bp->shadow_bulletin.content; + + /* bulletin board hasn't changed since last sample */ + if (bp->old_bulletin.version == bulletin->version) + return PFVF_BULLETIN_UNCHANGED; + + /* the mac address in bulletin board is valid and is new */ + if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID && + !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) { + /* update new mac to net device */ + memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN); + } + + if (bulletin->valid_bitmap & (1 << LINK_VALID)) { + DP(BNX2X_MSG_IOV, "link update speed %d flags %x\n", + bulletin->link_speed, bulletin->link_flags); + + bp->vf_link_vars.line_speed = bulletin->link_speed; + bp->vf_link_vars.link_report_flags = 0; + /* Link is down */ + if (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN) + __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, + &bp->vf_link_vars.link_report_flags); + /* Full DUPLEX */ + if (bulletin->link_flags & VFPF_LINK_REPORT_FULL_DUPLEX) + __set_bit(BNX2X_LINK_REPORT_FD, + &bp->vf_link_vars.link_report_flags); + /* Rx Flow Control is ON */ + if (bulletin->link_flags & VFPF_LINK_REPORT_RX_FC_ON) + __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, + &bp->vf_link_vars.link_report_flags); + /* Tx Flow Control is ON */ + if (bulletin->link_flags & VFPF_LINK_REPORT_TX_FC_ON) + __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, + &bp->vf_link_vars.link_report_flags); + __bnx2x_link_report(bp); + } + + /* copy new bulletin board to bp */ + memcpy(&bp->old_bulletin, bulletin, + sizeof(struct pf_vf_bulletin_content)); + + return PFVF_BULLETIN_UPDATED; +} + +void bnx2x_timer_sriov(struct bnx2x *bp) +{ + bnx2x_sample_bulletin(bp); + + /* if channel is down we need to self destruct */ + if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, + BNX2X_MSG_IOV); +} + +void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) +{ + /* vf doorbells are embedded within the regview */ + return bp->regview + PXP_VF_ADDR_DB_START; +} + +void bnx2x_vf_pci_dealloc(struct bnx2x *bp) +{ + BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, + sizeof(struct bnx2x_vf_mbx_msg)); + BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, + sizeof(union pf_vf_bulletin)); +} + +int bnx2x_vf_pci_alloc(struct bnx2x *bp) +{ + mutex_init(&bp->vf2pf_mutex); + + /* allocate vf2pf mailbox for vf to pf channel */ + bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping, + sizeof(struct bnx2x_vf_mbx_msg)); + if (!bp->vf2pf_mbox) + goto alloc_mem_err; + + /* allocate pf 2 vf bulletin board */ + bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping, + sizeof(union pf_vf_bulletin)); + if (!bp->pf2vf_bulletin) + goto alloc_mem_err; + + bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true); + + return 0; + +alloc_mem_err: + bnx2x_vf_pci_dealloc(bp); + return -ENOMEM; +} + +void bnx2x_iov_channel_down(struct bnx2x *bp) +{ + int vf_idx; + struct pf_vf_bulletin_content *bulletin; + + if (!IS_SRIOV(bp)) + return; + + for_each_vf(bp, vf_idx) { + /* locate this VFs bulletin board and update the channel down + * bit + */ + bulletin = BP_VF_BULLETIN(bp, vf_idx); + bulletin->valid_bitmap |= 1 << CHANNEL_DOWN; + + /* update vf bulletin board */ + bnx2x_post_vf_bulletin(bp, vf_idx); + } +} + +void bnx2x_iov_task(struct work_struct *work) +{ + struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work); + + if (!netif_running(bp->dev)) + return; + + if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR, + &bp->iov_task_state)) + bnx2x_vf_handle_flr_event(bp); + + if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG, + &bp->iov_task_state)) + bnx2x_vf_mbx(bp); +} + +void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) +{ + smp_mb__before_atomic(); + set_bit(flag, &bp->iov_task_state); + smp_mb__after_atomic(); + DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); + queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h new file mode 100644 index 000000000..66ee62a04 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -0,0 +1,608 @@ +/* bnx2x_sriov.h: Broadcom Everest network driver. + * + * Copyright 2009-2013 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Ariel Elior + * Written by: Shmulik Ravid + * Ariel Elior + */ +#ifndef BNX2X_SRIOV_H +#define BNX2X_SRIOV_H + +#include "bnx2x_vfpf.h" +#include "bnx2x.h" + +enum sample_bulletin_result { + PFVF_BULLETIN_UNCHANGED, + PFVF_BULLETIN_UPDATED, + PFVF_BULLETIN_CRC_ERR +}; + +#ifdef CONFIG_BNX2X_SRIOV + +extern struct workqueue_struct *bnx2x_iov_wq; + +/* The bnx2x device structure holds vfdb structure described below. + * The VF array is indexed by the relative vfid. + */ +#define BNX2X_VF_MAX_QUEUES 16 +#define BNX2X_VF_MAX_TPA_AGG_QUEUES 8 + +struct bnx2x_sriov { + u32 first_vf_in_pf; + + /* standard SRIOV capability fields, mostly for debugging */ + int pos; /* capability position */ + int nres; /* number of resources */ + u32 cap; /* SR-IOV Capabilities */ + u16 ctrl; /* SR-IOV Control */ + u16 total; /* total VFs associated with the PF */ + u16 initial; /* initial VFs associated with the PF */ + u16 nr_virtfn; /* number of VFs available */ + u16 offset; /* first VF Routing ID offset */ + u16 stride; /* following VF stride */ + u32 pgsz; /* page size for BAR alignment */ + u8 link; /* Function Dependency Link */ +}; + +/* bars */ +struct bnx2x_vf_bar { + u64 bar; + u32 size; +}; + +struct bnx2x_vf_bar_info { + struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS]; + u8 nr_bars; +}; + +/* vf queue (used both for rx or tx) */ +struct bnx2x_vf_queue { + struct eth_context *cxt; + + /* MACs object */ + struct bnx2x_vlan_mac_obj mac_obj; + + /* VLANs object */ + struct bnx2x_vlan_mac_obj vlan_obj; + atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ + unsigned long accept_flags; /* last accept flags configured */ + + /* Queue Slow-path State object */ + struct bnx2x_queue_sp_obj sp_obj; + + u32 cid; + u16 index; + u16 sb_idx; + bool is_leading; + bool sp_initialized; +}; + +/* struct bnx2x_vf_queue_construct_params - prepare queue construction + * parameters: q-init, q-setup and SB index + */ +struct bnx2x_vf_queue_construct_params { + struct bnx2x_queue_state_params qstate; + struct bnx2x_queue_setup_params prep_qsetup; +}; + +/* forward */ +struct bnx2x_virtf; + +/* VFOP definitions */ + +struct bnx2x_vf_mac_vlan_filter { + int type; +#define BNX2X_VF_FILTER_MAC 1 +#define BNX2X_VF_FILTER_VLAN 2 + + bool add; + u8 *mac; + u16 vid; +}; + +struct bnx2x_vf_mac_vlan_filters { + int count; + struct bnx2x_vf_mac_vlan_filter filters[]; +}; + +/* vf context */ +struct bnx2x_virtf { + u16 cfg_flags; +#define VF_CFG_STATS 0x0001 +#define VF_CFG_FW_FC 0x0002 +#define VF_CFG_TPA 0x0004 +#define VF_CFG_INT_SIMD 0x0008 +#define VF_CACHE_LINE 0x0010 +#define VF_CFG_VLAN 0x0020 +#define VF_CFG_STATS_COALESCE 0x0040 +#define VF_CFG_EXT_BULLETIN 0x0080 + u8 link_cfg; /* IFLA_VF_LINK_STATE_AUTO + * IFLA_VF_LINK_STATE_ENABLE + * IFLA_VF_LINK_STATE_DISABLE + */ + u8 state; +#define VF_FREE 0 /* VF ready to be acquired holds no resc */ +#define VF_ACQUIRED 1 /* VF acquired, but not initialized */ +#define VF_ENABLED 2 /* VF Enabled */ +#define VF_RESET 3 /* VF FLR'd, pending cleanup */ + + bool flr_clnup_stage; /* true during flr cleanup */ + + /* dma */ + dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ + u16 stats_stride; + dma_addr_t spq_map; + dma_addr_t bulletin_map; + + /* Allocated resources counters. Before the VF is acquired, the + * counters hold the following values: + * + * - xxq_count = 0 as the queues memory is not allocated yet. + * + * - sb_count = The number of status blocks configured for this VF in + * the IGU CAM. Initially read during probe. + * + * - xx_rules_count = The number of rules statically and equally + * allocated for each VF, during PF load. + */ + struct vf_pf_resc_request alloc_resc; +#define vf_rxq_count(vf) ((vf)->alloc_resc.num_rxqs) +#define vf_txq_count(vf) ((vf)->alloc_resc.num_txqs) +#define vf_sb_count(vf) ((vf)->alloc_resc.num_sbs) +#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters) +#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters) +#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters) + /* Hide a single vlan filter credit for the hypervisor */ +#define vf_vlan_rules_visible_cnt(vf) (vf_vlan_rules_cnt(vf) - 1) + + u8 sb_count; /* actual number of SBs */ + u8 igu_base_id; /* base igu status block id */ + + struct bnx2x_vf_queue *vfqs; +#define LEADING_IDX 0 +#define bnx2x_vfq_is_leading(vfq) ((vfq)->index == LEADING_IDX) +#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var) +#define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var) + + u8 index; /* index in the vf array */ + u8 abs_vfid; + u8 sp_cl_id; + u32 error; /* 0 means all's-well */ + + /* BDF */ + unsigned int bus; + unsigned int devfn; + + /* bars */ + struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS]; + + /* set-mac ramrod state 1-pending, 0-done */ + unsigned long filter_state; + + /* leading rss client id ~~ the client id of the first rxq, must be + * set for each txq. + */ + int leading_rss; + + /* MCAST object */ + int mcast_list_len; + struct bnx2x_mcast_obj mcast_obj; + + /* RSS configuration object */ + struct bnx2x_rss_config_obj rss_conf_obj; + + /* slow-path operations */ + struct mutex op_mutex; /* one vfop at a time mutex */ + enum channel_tlvs op_current; + + u8 fp_hsi; +}; + +#define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn) + +#define for_each_vf(bp, var) \ + for ((var) = 0; (var) < BNX2X_NR_VIRTFN(bp); (var)++) + +#define for_each_vfq(vf, var) \ + for ((var) = 0; (var) < vf_rxq_count(vf); (var)++) + +#define for_each_vf_sb(vf, var) \ + for ((var) = 0; (var) < vf_sb_count(vf); (var)++) + +#define is_vf_multi(vf) (vf_rxq_count(vf) > 1) + +#define HW_VF_HANDLE(bp, abs_vfid) \ + (u16)(BP_ABS_FUNC((bp)) | (1<<3) | ((u16)(abs_vfid) << 4)) + +#define FW_PF_MAX_HANDLE 8 + +#define FW_VF_HANDLE(abs_vfid) \ + (abs_vfid + FW_PF_MAX_HANDLE) + +/* locking and unlocking the channel mutex */ +void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, + enum channel_tlvs tlv); + +void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, + enum channel_tlvs expected_tlv); + +/* VF mail box (aka vf-pf channel) */ + +/* a container for the bi-directional vf<-->pf messages. + * The actual response will be placed according to the offset parameter + * provided in the request + */ + +#define MBX_MSG_ALIGN 8 +#define MBX_MSG_ALIGNED_SIZE (roundup(sizeof(struct bnx2x_vf_mbx_msg), \ + MBX_MSG_ALIGN)) + +struct bnx2x_vf_mbx_msg { + union vfpf_tlvs req; + union pfvf_tlvs resp; +}; + +struct bnx2x_vf_mbx { + struct bnx2x_vf_mbx_msg *msg; + dma_addr_t msg_mapping; + + /* VF GPA address */ + u32 vf_addr_lo; + u32 vf_addr_hi; + + struct vfpf_first_tlv first_tlv; /* saved VF request header */ +}; + +struct bnx2x_vf_sp { + union { + struct eth_classify_rules_ramrod_data e2; + } mac_rdata; + + union { + struct eth_classify_rules_ramrod_data e2; + } vlan_rdata; + + union { + struct eth_filter_rules_ramrod_data e2; + } rx_mode_rdata; + + union { + struct eth_multicast_rules_ramrod_data e2; + } mcast_rdata; + + union { + struct client_init_ramrod_data init_data; + struct client_update_ramrod_data update_data; + } q_data; + + union { + struct eth_rss_update_ramrod_data e2; + } rss_rdata; +}; + +struct hw_dma { + void *addr; + dma_addr_t mapping; + size_t size; +}; + +struct bnx2x_vfdb { +#define BP_VFDB(bp) ((bp)->vfdb) + /* vf array */ + struct bnx2x_virtf *vfs; +#define BP_VF(bp, idx) ((BP_VFDB(bp) && (bp)->vfdb->vfs) ? \ + &((bp)->vfdb->vfs[idx]) : NULL) +#define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[idx].var) + + /* queue array - for all vfs */ + struct bnx2x_vf_queue *vfqs; + + /* vf HW contexts */ + struct hw_dma context[BNX2X_VF_CIDS/ILT_PAGE_CIDS]; +#define BP_VF_CXT_PAGE(bp, i) (&(bp)->vfdb->context[i]) + + /* SR-IOV information */ + struct bnx2x_sriov sriov; + struct hw_dma mbx_dma; +#define BP_VF_MBX_DMA(bp) (&((bp)->vfdb->mbx_dma)) + struct bnx2x_vf_mbx mbxs[BNX2X_MAX_NUM_OF_VFS]; +#define BP_VF_MBX(bp, vfid) (&((bp)->vfdb->mbxs[vfid])) + + struct hw_dma bulletin_dma; +#define BP_VF_BULLETIN_DMA(bp) (&((bp)->vfdb->bulletin_dma)) +#define BP_VF_BULLETIN(bp, vf) \ + (((struct pf_vf_bulletin_content *)(BP_VF_BULLETIN_DMA(bp)->addr)) \ + + (vf)) + + struct hw_dma sp_dma; +#define bnx2x_vf_sp(bp, vf, field) ((bp)->vfdb->sp_dma.addr + \ + (vf)->index * sizeof(struct bnx2x_vf_sp) + \ + offsetof(struct bnx2x_vf_sp, field)) +#define bnx2x_vf_sp_map(bp, vf, field) ((bp)->vfdb->sp_dma.mapping + \ + (vf)->index * sizeof(struct bnx2x_vf_sp) + \ + offsetof(struct bnx2x_vf_sp, field)) + +#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32) + u32 flrd_vfs[FLRD_VFS_DWORDS]; + + /* the number of msix vectors belonging to this PF designated for VFs */ + u16 vf_sbs_pool; + u16 first_vf_igu_entry; + + /* sp_rtnl synchronization */ + struct mutex event_mutex; + u64 event_occur; + + /* bulletin board update synchronization */ + struct mutex bulletin_mutex; +}; + +/* queue access */ +static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index) +{ + return &(vf->vfqs[index]); +} + +/* FW ids */ +static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx) +{ + return vf->igu_base_id + sb_idx; +} + +static inline u8 vf_hc_qzone(struct bnx2x_virtf *vf, u16 sb_idx) +{ + return vf_igu_sb(vf, sb_idx); +} + +static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) +{ + return vf->igu_base_id + q->index; +} + +static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) +{ + if (vf->cfg_flags & VF_CFG_STATS_COALESCE) + return vf->leading_rss; + else + return vfq_cl_id(vf, q); +} + +static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) +{ + return vfq_cl_id(vf, q); +} + +/* global iov routines */ +int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line); +int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, int num_vfs_param); +void bnx2x_iov_remove_one(struct bnx2x *bp); +void bnx2x_iov_free_mem(struct bnx2x *bp); +int bnx2x_iov_alloc_mem(struct bnx2x *bp); +int bnx2x_iov_nic_init(struct bnx2x *bp); +int bnx2x_iov_chip_cleanup(struct bnx2x *bp); +void bnx2x_iov_init_dq(struct bnx2x *bp); +void bnx2x_iov_init_dmae(struct bnx2x *bp); +void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, + struct bnx2x_queue_sp_obj **q_obj); +int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem); +void bnx2x_iov_adjust_stats_req(struct bnx2x *bp); +void bnx2x_iov_storm_stats_update(struct bnx2x *bp); +/* global vf mailbox routines */ +void bnx2x_vf_mbx(struct bnx2x *bp); +void bnx2x_vf_mbx_schedule(struct bnx2x *bp, + struct vf_pf_event_data *vfpf_event); +void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid); + +/* CORE VF API */ +typedef u8 bnx2x_mac_addr_t[ETH_ALEN]; + +/* acquire */ +int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct vf_pf_resc_request *resc); +/* init */ +int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, + dma_addr_t *sb_map); + +/* VFOP queue construction helpers */ +void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_queue_init_params *init_params, + struct bnx2x_queue_setup_params *setup_params, + u16 q_idx, u16 sb_idx); + +void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_queue_init_params *init_params, + struct bnx2x_queue_setup_params *setup_params, + u16 q_idx, u16 sb_idx); + +void bnx2x_vfop_qctor_prep(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vf_queue *q, + struct bnx2x_vf_queue_construct_params *p, + unsigned long q_type); + +int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mac_vlan_filters *filters, + int qid, bool drv_only); + +int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, + struct bnx2x_vf_queue_construct_params *qctor); + +int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid); + +int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, + bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only); + +int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, + int qid, unsigned long accept_flags); + +int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf); + +int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf); + +int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_config_rss_params *rss); + +int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct vfpf_tpa_tlv *tlv, + struct bnx2x_queue_update_tpa_params *params); + +/* VF release ~ VF close + VF release-resources + * + * Release is the ultimate SW shutdown and is called whenever an + * irrecoverable error is encountered. + */ +int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf); +int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid); +u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf); + +/* FLR routines */ + +/* VF FLR helpers */ +int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid); +void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid); + +/* Handles an FLR (or VF_DISABLE) notification form the MCP */ +void bnx2x_vf_handle_flr_event(struct bnx2x *bp); + +bool bnx2x_tlv_supported(u16 tlvtype); + +u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin); +int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf); +void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin, + bool support_long); + +enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); + +/* VF side vfpf channel functions */ +int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count); +int bnx2x_vfpf_release(struct bnx2x *bp); +int bnx2x_vfpf_release(struct bnx2x *bp); +int bnx2x_vfpf_init(struct bnx2x *bp); +void bnx2x_vfpf_close_vf(struct bnx2x *bp); +int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, + bool is_leading); +int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set); +int bnx2x_vfpf_config_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *params); +int bnx2x_vfpf_set_mcast(struct net_device *dev); +int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp); + +static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf, + size_t buf_len) +{ + strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len); +} + +static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, + struct bnx2x_fastpath *fp) +{ + return PXP_VF_ADDR_USDM_QUEUES_START + + bp->acquire_resp.resc.hw_qid[fp->index] * + sizeof(struct ustorm_queue_zone_data); +} + +enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); +void bnx2x_timer_sriov(struct bnx2x *bp); +void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp); +void bnx2x_vf_pci_dealloc(struct bnx2x *bp); +int bnx2x_vf_pci_alloc(struct bnx2x *bp); +int bnx2x_enable_sriov(struct bnx2x *bp); +void bnx2x_disable_sriov(struct bnx2x *bp); +static inline int bnx2x_vf_headroom(struct bnx2x *bp) +{ + return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF; +} +void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); +int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); +void bnx2x_iov_channel_down(struct bnx2x *bp); + +void bnx2x_iov_task(struct work_struct *work); + +void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag); + +void bnx2x_iov_link_update(struct bnx2x *bp); +int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx); + +int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state); + +#else /* CONFIG_BNX2X_SRIOV */ + +static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, + struct bnx2x_queue_sp_obj **q_obj) {} +static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {} +static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp, + union event_ring_elem *elem) {return 1; } +static inline void bnx2x_vf_mbx(struct bnx2x *bp) {} +static inline void bnx2x_vf_mbx_schedule(struct bnx2x *bp, + struct vf_pf_event_data *vfpf_event) {} +static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; } +static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {} +static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; } +static inline void bnx2x_iov_free_mem(struct bnx2x *bp) {} +static inline int bnx2x_iov_chip_cleanup(struct bnx2x *bp) {return 0; } +static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {} +static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, + int num_vfs_param) {return 0; } +static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {} +static inline int bnx2x_enable_sriov(struct bnx2x *bp) {return 0; } +static inline void bnx2x_disable_sriov(struct bnx2x *bp) {} +static inline int bnx2x_vfpf_acquire(struct bnx2x *bp, + u8 tx_count, u8 rx_count) {return 0; } +static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; } +static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; } +static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {} +static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; } +static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, + u8 vf_qid, bool set) {return 0; } +static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *params) {return 0; } +static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; } +static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; } +static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; } +static inline int bnx2x_vf_headroom(struct bnx2x *bp) {return 0; } +static inline void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) {} +static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf, + size_t buf_len) {} +static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, + struct bnx2x_fastpath *fp) {return 0; } +static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) +{ + return PFVF_BULLETIN_UNCHANGED; +} +static inline void bnx2x_timer_sriov(struct bnx2x *bp) {} + +static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) +{ + return NULL; +} + +static inline void bnx2x_vf_pci_dealloc(struct bnx2x *bp) {} +static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } +static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} +static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } +static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {} + +static inline void bnx2x_iov_task(struct work_struct *work) {} +static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {} +static inline void bnx2x_iov_link_update(struct bnx2x *bp) {} +static inline int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx) {return 0; } + +static inline int bnx2x_set_vf_link_state(struct net_device *dev, int vf, + int link_state) {return 0; } +struct pf_vf_bulletin_content; +static inline void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin, + bool support_long) {} + +#endif /* CONFIG_BNX2X_SRIOV */ +#endif /* bnx2x_sriov.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c new file mode 100644 index 000000000..69d699f07 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -0,0 +1,2002 @@ +/* bnx2x_stats.c: Broadcom Everest network driver. + * + * Copyright (c) 2007-2013 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Ariel Elior + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "bnx2x_stats.h" +#include "bnx2x_cmn.h" +#include "bnx2x_sriov.h" + +/* Statistics */ + +/* + * General service functions + */ + +static inline long bnx2x_hilo(u32 *hiref) +{ + u32 lo = *(hiref + 1); +#if (BITS_PER_LONG == 64) + u32 hi = *hiref; + + return HILO_U64(hi, lo); +#else + return lo; +#endif +} + +static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) +{ + u16 res = 0; + + /* 'newest' convention - shmem2 cotains the size of the port stats */ + if (SHMEM2_HAS(bp, sizeof_port_stats)) { + u32 size = SHMEM2_RD(bp, sizeof_port_stats); + if (size) + res = size; + + /* prevent newer BC from causing buffer overflow */ + if (res > sizeof(struct host_port_stats)) + res = sizeof(struct host_port_stats); + } + + /* Older convention - all BCs support the port stats' fields up until + * the 'not_used' field + */ + if (!res) { + res = offsetof(struct host_port_stats, not_used) + 4; + + /* if PFC stats are supported by the MFW, DMA them as well */ + if (bp->flags & BC_SUPPORTS_PFC_STATS) { + res += offsetof(struct host_port_stats, + pfc_frames_rx_lo) - + offsetof(struct host_port_stats, + pfc_frames_tx_hi) + 4 ; + } + } + + res >>= 2; + + WARN_ON(res > 2 * DMAE_LEN32_RD_MAX); + return res; +} + +/* + * Init service functions + */ + +static void bnx2x_dp_stats(struct bnx2x *bp) +{ + int i; + + DP(BNX2X_MSG_STATS, "dumping stats:\n" + "fw_stats_req\n" + " hdr\n" + " cmd_num %d\n" + " reserved0 %d\n" + " drv_stats_counter %d\n" + " reserved1 %d\n" + " stats_counters_addrs %x %x\n", + bp->fw_stats_req->hdr.cmd_num, + bp->fw_stats_req->hdr.reserved0, + bp->fw_stats_req->hdr.drv_stats_counter, + bp->fw_stats_req->hdr.reserved1, + bp->fw_stats_req->hdr.stats_counters_addrs.hi, + bp->fw_stats_req->hdr.stats_counters_addrs.lo); + + for (i = 0; i < bp->fw_stats_req->hdr.cmd_num; i++) { + DP(BNX2X_MSG_STATS, + "query[%d]\n" + " kind %d\n" + " index %d\n" + " funcID %d\n" + " reserved %d\n" + " address %x %x\n", + i, bp->fw_stats_req->query[i].kind, + bp->fw_stats_req->query[i].index, + bp->fw_stats_req->query[i].funcID, + bp->fw_stats_req->query[i].reserved, + bp->fw_stats_req->query[i].address.hi, + bp->fw_stats_req->query[i].address.lo); + } +} + +/* Post the next statistics ramrod. Protect it with the spin in + * order to ensure the strict order between statistics ramrods + * (each ramrod has a sequence number passed in a + * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be + * sent in order). + */ +static void bnx2x_storm_stats_post(struct bnx2x *bp) +{ + int rc; + + if (bp->stats_pending) + return; + + bp->fw_stats_req->hdr.drv_stats_counter = + cpu_to_le16(bp->stats_counter++); + + DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", + le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter)); + + /* adjust the ramrod to include VF queues statistics */ + bnx2x_iov_adjust_stats_req(bp); + bnx2x_dp_stats(bp); + + /* send FW stats ramrod */ + rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, + U64_HI(bp->fw_stats_req_mapping), + U64_LO(bp->fw_stats_req_mapping), + NONE_CONNECTION_TYPE); + if (rc == 0) + bp->stats_pending = 1; +} + +static void bnx2x_hw_stats_post(struct bnx2x *bp) +{ + struct dmae_command *dmae = &bp->stats_dmae; + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + *stats_comp = DMAE_COMP_VAL; + if (CHIP_REV_IS_SLOW(bp)) + return; + + /* Update MCP's statistics if possible */ + if (bp->func_stx) + memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats, + sizeof(bp->func_stats)); + + /* loader */ + if (bp->executer_idx) { + int loader_idx = PMF_DMAE_C(bp); + u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, + true, DMAE_COMP_GRC); + opcode = bnx2x_dmae_opcode_clr_src_reset(opcode); + + memset(dmae, 0, sizeof(struct dmae_command)); + dmae->opcode = opcode; + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); + dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + + sizeof(struct dmae_command) * + (loader_idx + 1)) >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct dmae_command) >> 2; + if (CHIP_IS_E1(bp)) + dmae->len--; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + *stats_comp = 0; + bnx2x_post_dmae(bp, dmae, loader_idx); + + } else if (bp->func_stx) { + *stats_comp = 0; + bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp); + } +} + +static void bnx2x_stats_comp(struct bnx2x *bp) +{ + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + int cnt = 10; + + might_sleep(); + while (*stats_comp != DMAE_COMP_VAL) { + if (!cnt) { + BNX2X_ERR("timeout waiting for stats finished\n"); + break; + } + cnt--; + usleep_range(1000, 2000); + } +} + +/* + * Statistics service functions + */ + +/* should be called under stats_sema */ +static void bnx2x_stats_pmf_update(struct bnx2x *bp) +{ + struct dmae_command *dmae; + u32 opcode; + int loader_idx = PMF_DMAE_C(bp); + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + /* sanity */ + if (!bp->port.pmf || !bp->port.port_stx) { + BNX2X_ERR("BUG!\n"); + return; + } + + bp->executer_idx = 0; + + opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0); + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC); + dmae->src_addr_lo = bp->port.port_stx >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); + dmae->len = DMAE_LEN32_RD_MAX; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); + dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + + DMAE_LEN32_RD_MAX * 4); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + + DMAE_LEN32_RD_MAX * 4); + dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX; + + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); +} + +static void bnx2x_port_stats_init(struct bnx2x *bp) +{ + struct dmae_command *dmae; + int port = BP_PORT(bp); + u32 opcode; + int loader_idx = PMF_DMAE_C(bp); + u32 mac_addr; + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + /* sanity */ + if (!bp->link_vars.link_up || !bp->port.pmf) { + BNX2X_ERR("BUG!\n"); + return; + } + + bp->executer_idx = 0; + + /* MCP */ + opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, + true, DMAE_COMP_GRC); + + if (bp->port.port_stx) { + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); + dmae->dst_addr_lo = bp->port.port_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = bnx2x_get_port_stats_dma_len(bp); + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + if (bp->func_stx) { + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); + dmae->dst_addr_lo = bp->func_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_func_stats) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + /* MAC */ + opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, + true, DMAE_COMP_GRC); + + /* EMAC is special */ + if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { + mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); + + /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (mac_addr + + EMAC_REG_EMAC_RX_STAT_AC) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); + dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + /* EMAC_REG_EMAC_RX_STAT_AC_28 */ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (mac_addr + + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + + offsetof(struct emac_stats, rx_stat_falsecarriererrors)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + + offsetof(struct emac_stats, rx_stat_falsecarriererrors)); + dmae->len = 1; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (mac_addr + + EMAC_REG_EMAC_TX_STAT_AC) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + + offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + + offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); + dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } else { + u32 tx_src_addr_lo, rx_src_addr_lo; + u16 rx_len, tx_len; + + /* configure the params according to MAC type */ + switch (bp->link_vars.mac_type) { + case MAC_TYPE_BMAC: + mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM); + + /* BIGMAC_REGISTER_TX_STAT_GTPKT .. + BIGMAC_REGISTER_TX_STAT_GTBYT */ + if (CHIP_IS_E1x(bp)) { + tx_src_addr_lo = (mac_addr + + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; + tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; + rx_src_addr_lo = (mac_addr + + BIGMAC_REGISTER_RX_STAT_GR64) >> 2; + rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - + BIGMAC_REGISTER_RX_STAT_GR64) >> 2; + } else { + tx_src_addr_lo = (mac_addr + + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; + tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; + rx_src_addr_lo = (mac_addr + + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; + rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; + } + break; + + case MAC_TYPE_UMAC: /* handled by MSTAT */ + case MAC_TYPE_XMAC: /* handled by MSTAT */ + default: + mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0; + tx_src_addr_lo = (mac_addr + + MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2; + rx_src_addr_lo = (mac_addr + + MSTAT_REG_RX_STAT_GR64_LO) >> 2; + tx_len = sizeof(bp->slowpath-> + mac_stats.mstat_stats.stats_tx) >> 2; + rx_len = sizeof(bp->slowpath-> + mac_stats.mstat_stats.stats_rx) >> 2; + break; + } + + /* TX stats */ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = tx_src_addr_lo; + dmae->src_addr_hi = 0; + dmae->len = tx_len; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + /* RX stats */ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_hi = 0; + dmae->src_addr_lo = rx_src_addr_lo; + dmae->dst_addr_lo = + U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); + dmae->dst_addr_hi = + U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); + dmae->len = rx_len; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + /* NIG */ + if (!CHIP_IS_E3(bp)) { + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : + NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + + offsetof(struct nig_stats, egress_mac_pkt0_lo)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + + offsetof(struct nig_stats, egress_mac_pkt0_lo)); + dmae->len = (2*sizeof(u32)) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : + NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + + offsetof(struct nig_stats, egress_mac_pkt1_lo)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + + offsetof(struct nig_stats, egress_mac_pkt1_lo)); + dmae->len = (2*sizeof(u32)) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, + true, DMAE_COMP_PCI); + dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : + NIG_REG_STAT0_BRB_DISCARD) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); + dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; + + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; +} + +static void bnx2x_func_stats_init(struct bnx2x *bp) +{ + struct dmae_command *dmae = &bp->stats_dmae; + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + /* sanity */ + if (!bp->func_stx) { + BNX2X_ERR("BUG!\n"); + return; + } + + bp->executer_idx = 0; + memset(dmae, 0, sizeof(struct dmae_command)); + + dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, + true, DMAE_COMP_PCI); + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); + dmae->dst_addr_lo = bp->func_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_func_stats) >> 2; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; +} + +/* should be called under stats_sema */ +static void bnx2x_stats_start(struct bnx2x *bp) +{ + if (IS_PF(bp)) { + if (bp->port.pmf) + bnx2x_port_stats_init(bp); + + else if (bp->func_stx) + bnx2x_func_stats_init(bp); + + bnx2x_hw_stats_post(bp); + bnx2x_storm_stats_post(bp); + } +} + +static void bnx2x_stats_pmf_start(struct bnx2x *bp) +{ + bnx2x_stats_comp(bp); + bnx2x_stats_pmf_update(bp); + bnx2x_stats_start(bp); +} + +static void bnx2x_stats_restart(struct bnx2x *bp) +{ + /* vfs travel through here as part of the statistics FSM, but no action + * is required + */ + if (IS_VF(bp)) + return; + + bnx2x_stats_comp(bp); + bnx2x_stats_start(bp); +} + +static void bnx2x_bmac_stats_update(struct bnx2x *bp) +{ + struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct { + u32 lo; + u32 hi; + } diff; + + if (CHIP_IS_E1x(bp)) { + struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats); + + /* the macros below will use "bmac1_stats" type */ + UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); + UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); + UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); + UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); + UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); + UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); + UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); + + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); + UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); + UPDATE_STAT64(tx_stat_gt127, + tx_stat_etherstatspkts65octetsto127octets); + UPDATE_STAT64(tx_stat_gt255, + tx_stat_etherstatspkts128octetsto255octets); + UPDATE_STAT64(tx_stat_gt511, + tx_stat_etherstatspkts256octetsto511octets); + UPDATE_STAT64(tx_stat_gt1023, + tx_stat_etherstatspkts512octetsto1023octets); + UPDATE_STAT64(tx_stat_gt1518, + tx_stat_etherstatspkts1024octetsto1522octets); + UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); + UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); + UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); + UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); + UPDATE_STAT64(tx_stat_gterr, + tx_stat_dot3statsinternalmactransmiterrors); + UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); + + } else { + struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats); + + /* the macros below will use "bmac2_stats" type */ + UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); + UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); + UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); + UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); + UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); + UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); + UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); + UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); + UPDATE_STAT64(tx_stat_gt127, + tx_stat_etherstatspkts65octetsto127octets); + UPDATE_STAT64(tx_stat_gt255, + tx_stat_etherstatspkts128octetsto255octets); + UPDATE_STAT64(tx_stat_gt511, + tx_stat_etherstatspkts256octetsto511octets); + UPDATE_STAT64(tx_stat_gt1023, + tx_stat_etherstatspkts512octetsto1023octets); + UPDATE_STAT64(tx_stat_gt1518, + tx_stat_etherstatspkts1024octetsto1522octets); + UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); + UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); + UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); + UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); + UPDATE_STAT64(tx_stat_gterr, + tx_stat_dot3statsinternalmactransmiterrors); + UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); + + /* collect PFC stats */ + pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi; + pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo; + + pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi; + pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo; + } + + estats->pause_frames_received_hi = + pstats->mac_stx[1].rx_stat_mac_xpf_hi; + estats->pause_frames_received_lo = + pstats->mac_stx[1].rx_stat_mac_xpf_lo; + + estats->pause_frames_sent_hi = + pstats->mac_stx[1].tx_stat_outxoffsent_hi; + estats->pause_frames_sent_lo = + pstats->mac_stx[1].tx_stat_outxoffsent_lo; + + estats->pfc_frames_received_hi = + pstats->pfc_frames_rx_hi; + estats->pfc_frames_received_lo = + pstats->pfc_frames_rx_lo; + estats->pfc_frames_sent_hi = + pstats->pfc_frames_tx_hi; + estats->pfc_frames_sent_lo = + pstats->pfc_frames_tx_lo; +} + +static void bnx2x_mstat_stats_update(struct bnx2x *bp) +{ + struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); + struct bnx2x_eth_stats *estats = &bp->eth_stats; + + struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats); + + ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets); + ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors); + ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts); + ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong); + ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments); + ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived); + ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered); + ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf); + ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent); + ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone); + + /* collect pfc stats */ + ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi, + pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo); + ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi, + pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo); + + ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets); + ADD_STAT64(stats_tx.tx_gt127, + tx_stat_etherstatspkts65octetsto127octets); + ADD_STAT64(stats_tx.tx_gt255, + tx_stat_etherstatspkts128octetsto255octets); + ADD_STAT64(stats_tx.tx_gt511, + tx_stat_etherstatspkts256octetsto511octets); + ADD_STAT64(stats_tx.tx_gt1023, + tx_stat_etherstatspkts512octetsto1023octets); + ADD_STAT64(stats_tx.tx_gt1518, + tx_stat_etherstatspkts1024octetsto1522octets); + ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047); + + ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095); + ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216); + ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383); + + ADD_STAT64(stats_tx.tx_gterr, + tx_stat_dot3statsinternalmactransmiterrors); + ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl); + + estats->etherstatspkts1024octetsto1522octets_hi = + pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi; + estats->etherstatspkts1024octetsto1522octets_lo = + pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo; + + estats->etherstatspktsover1522octets_hi = + pstats->mac_stx[1].tx_stat_mac_2047_hi; + estats->etherstatspktsover1522octets_lo = + pstats->mac_stx[1].tx_stat_mac_2047_lo; + + ADD_64(estats->etherstatspktsover1522octets_hi, + pstats->mac_stx[1].tx_stat_mac_4095_hi, + estats->etherstatspktsover1522octets_lo, + pstats->mac_stx[1].tx_stat_mac_4095_lo); + + ADD_64(estats->etherstatspktsover1522octets_hi, + pstats->mac_stx[1].tx_stat_mac_9216_hi, + estats->etherstatspktsover1522octets_lo, + pstats->mac_stx[1].tx_stat_mac_9216_lo); + + ADD_64(estats->etherstatspktsover1522octets_hi, + pstats->mac_stx[1].tx_stat_mac_16383_hi, + estats->etherstatspktsover1522octets_lo, + pstats->mac_stx[1].tx_stat_mac_16383_lo); + + estats->pause_frames_received_hi = + pstats->mac_stx[1].rx_stat_mac_xpf_hi; + estats->pause_frames_received_lo = + pstats->mac_stx[1].rx_stat_mac_xpf_lo; + + estats->pause_frames_sent_hi = + pstats->mac_stx[1].tx_stat_outxoffsent_hi; + estats->pause_frames_sent_lo = + pstats->mac_stx[1].tx_stat_outxoffsent_lo; + + estats->pfc_frames_received_hi = + pstats->pfc_frames_rx_hi; + estats->pfc_frames_received_lo = + pstats->pfc_frames_rx_lo; + estats->pfc_frames_sent_hi = + pstats->pfc_frames_tx_hi; + estats->pfc_frames_sent_lo = + pstats->pfc_frames_tx_lo; +} + +static void bnx2x_emac_stats_update(struct bnx2x *bp) +{ + struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats); + struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); + struct bnx2x_eth_stats *estats = &bp->eth_stats; + + UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); + UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); + UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors); + UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors); + UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors); + UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors); + UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts); + UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong); + UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments); + UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers); + UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived); + UPDATE_EXTEND_STAT(rx_stat_xoffstateentered); + UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived); + UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived); + UPDATE_EXTEND_STAT(tx_stat_outxonsent); + UPDATE_EXTEND_STAT(tx_stat_outxoffsent); + UPDATE_EXTEND_STAT(tx_stat_flowcontroldone); + UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions); + UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes); + UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes); + UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions); + UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions); + UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); + UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); + UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); + + estats->pause_frames_received_hi = + pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi; + estats->pause_frames_received_lo = + pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo; + ADD_64(estats->pause_frames_received_hi, + pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi, + estats->pause_frames_received_lo, + pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo); + + estats->pause_frames_sent_hi = + pstats->mac_stx[1].tx_stat_outxonsent_hi; + estats->pause_frames_sent_lo = + pstats->mac_stx[1].tx_stat_outxonsent_lo; + ADD_64(estats->pause_frames_sent_hi, + pstats->mac_stx[1].tx_stat_outxoffsent_hi, + estats->pause_frames_sent_lo, + pstats->mac_stx[1].tx_stat_outxoffsent_lo); +} + +static int bnx2x_hw_stats_update(struct bnx2x *bp) +{ + struct nig_stats *new = bnx2x_sp(bp, nig_stats); + struct nig_stats *old = &(bp->port.old_nig_stats); + struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct { + u32 lo; + u32 hi; + } diff; + + switch (bp->link_vars.mac_type) { + case MAC_TYPE_BMAC: + bnx2x_bmac_stats_update(bp); + break; + + case MAC_TYPE_EMAC: + bnx2x_emac_stats_update(bp); + break; + + case MAC_TYPE_UMAC: + case MAC_TYPE_XMAC: + bnx2x_mstat_stats_update(bp); + break; + + case MAC_TYPE_NONE: /* unreached */ + DP(BNX2X_MSG_STATS, + "stats updated by DMAE but no MAC active\n"); + return -1; + + default: /* unreached */ + BNX2X_ERR("Unknown MAC type\n"); + } + + ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, + new->brb_discard - old->brb_discard); + ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, + new->brb_truncate - old->brb_truncate); + + if (!CHIP_IS_E3(bp)) { + UPDATE_STAT64_NIG(egress_mac_pkt0, + etherstatspkts1024octetsto1522octets); + UPDATE_STAT64_NIG(egress_mac_pkt1, + etherstatspktsover1522octets); + } + + memcpy(old, new, sizeof(struct nig_stats)); + + memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), + sizeof(struct mac_stx)); + estats->brb_drop_hi = pstats->brb_drop_hi; + estats->brb_drop_lo = pstats->brb_drop_lo; + + pstats->host_port_stats_counter++; + + if (CHIP_IS_E3(bp)) { + u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1 + : MISC_REG_CPMU_LP_SM_ENT_CNT_P0; + estats->eee_tx_lpi += REG_RD(bp, lpi_reg); + } + + if (!BP_NOMCP(bp)) { + u32 nig_timer_max = + SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); + if (nig_timer_max != estats->nig_timer_max) { + estats->nig_timer_max = nig_timer_max; + BNX2X_ERR("NIG timer max (%u)\n", + estats->nig_timer_max); + } + } + + return 0; +} + +static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp) +{ + struct stats_counter *counters = &bp->fw_stats_data->storm_counters; + u16 cur_stats_counter; + /* Make sure we use the value of the counter + * used for sending the last stats ramrod. + */ + cur_stats_counter = bp->stats_counter - 1; + + /* are storm stats valid? */ + if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { + DP(BNX2X_MSG_STATS, + "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n", + le16_to_cpu(counters->xstats_counter), bp->stats_counter); + return -EAGAIN; + } + + if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) { + DP(BNX2X_MSG_STATS, + "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n", + le16_to_cpu(counters->ustats_counter), bp->stats_counter); + return -EAGAIN; + } + + if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) { + DP(BNX2X_MSG_STATS, + "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n", + le16_to_cpu(counters->cstats_counter), bp->stats_counter); + return -EAGAIN; + } + + if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) { + DP(BNX2X_MSG_STATS, + "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n", + le16_to_cpu(counters->tstats_counter), bp->stats_counter); + return -EAGAIN; + } + return 0; +} + +static int bnx2x_storm_stats_update(struct bnx2x *bp) +{ + struct tstorm_per_port_stats *tport = + &bp->fw_stats_data->port.tstorm_port_statistics; + struct tstorm_per_pf_stats *tfunc = + &bp->fw_stats_data->pf.tstorm_pf_statistics; + struct host_func_stats *fstats = &bp->func_stats; + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old; + int i; + + /* vfs stat counter is managed by pf */ + if (IS_PF(bp) && bnx2x_storm_stats_validate_counters(bp)) + return -EAGAIN; + + estats->error_bytes_received_hi = 0; + estats->error_bytes_received_lo = 0; + + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + struct tstorm_per_queue_stats *tclient = + &bp->fw_stats_data->queue_stats[i]. + tstorm_queue_statistics; + struct tstorm_per_queue_stats *old_tclient = + &bnx2x_fp_stats(bp, fp)->old_tclient; + struct ustorm_per_queue_stats *uclient = + &bp->fw_stats_data->queue_stats[i]. + ustorm_queue_statistics; + struct ustorm_per_queue_stats *old_uclient = + &bnx2x_fp_stats(bp, fp)->old_uclient; + struct xstorm_per_queue_stats *xclient = + &bp->fw_stats_data->queue_stats[i]. + xstorm_queue_statistics; + struct xstorm_per_queue_stats *old_xclient = + &bnx2x_fp_stats(bp, fp)->old_xclient; + struct bnx2x_eth_q_stats *qstats = + &bnx2x_fp_stats(bp, fp)->eth_q_stats; + struct bnx2x_eth_q_stats_old *qstats_old = + &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; + + u32 diff; + + DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n", + i, xclient->ucast_pkts_sent, + xclient->bcast_pkts_sent, xclient->mcast_pkts_sent); + + DP(BNX2X_MSG_STATS, "---------------\n"); + + UPDATE_QSTAT(tclient->rcv_bcast_bytes, + total_broadcast_bytes_received); + UPDATE_QSTAT(tclient->rcv_mcast_bytes, + total_multicast_bytes_received); + UPDATE_QSTAT(tclient->rcv_ucast_bytes, + total_unicast_bytes_received); + + /* + * sum to total_bytes_received all + * unicast/multicast/broadcast + */ + qstats->total_bytes_received_hi = + qstats->total_broadcast_bytes_received_hi; + qstats->total_bytes_received_lo = + qstats->total_broadcast_bytes_received_lo; + + ADD_64(qstats->total_bytes_received_hi, + qstats->total_multicast_bytes_received_hi, + qstats->total_bytes_received_lo, + qstats->total_multicast_bytes_received_lo); + + ADD_64(qstats->total_bytes_received_hi, + qstats->total_unicast_bytes_received_hi, + qstats->total_bytes_received_lo, + qstats->total_unicast_bytes_received_lo); + + qstats->valid_bytes_received_hi = + qstats->total_bytes_received_hi; + qstats->valid_bytes_received_lo = + qstats->total_bytes_received_lo; + + UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, + total_unicast_packets_received); + UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, + total_multicast_packets_received); + UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, + total_broadcast_packets_received); + UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard, + etherstatsoverrsizepkts, 32); + UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16); + + SUB_EXTEND_USTAT(ucast_no_buff_pkts, + total_unicast_packets_received); + SUB_EXTEND_USTAT(mcast_no_buff_pkts, + total_multicast_packets_received); + SUB_EXTEND_USTAT(bcast_no_buff_pkts, + total_broadcast_packets_received); + UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard); + UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard); + UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard); + + UPDATE_QSTAT(xclient->bcast_bytes_sent, + total_broadcast_bytes_transmitted); + UPDATE_QSTAT(xclient->mcast_bytes_sent, + total_multicast_bytes_transmitted); + UPDATE_QSTAT(xclient->ucast_bytes_sent, + total_unicast_bytes_transmitted); + + /* + * sum to total_bytes_transmitted all + * unicast/multicast/broadcast + */ + qstats->total_bytes_transmitted_hi = + qstats->total_unicast_bytes_transmitted_hi; + qstats->total_bytes_transmitted_lo = + qstats->total_unicast_bytes_transmitted_lo; + + ADD_64(qstats->total_bytes_transmitted_hi, + qstats->total_broadcast_bytes_transmitted_hi, + qstats->total_bytes_transmitted_lo, + qstats->total_broadcast_bytes_transmitted_lo); + + ADD_64(qstats->total_bytes_transmitted_hi, + qstats->total_multicast_bytes_transmitted_hi, + qstats->total_bytes_transmitted_lo, + qstats->total_multicast_bytes_transmitted_lo); + + UPDATE_EXTEND_XSTAT(ucast_pkts_sent, + total_unicast_packets_transmitted); + UPDATE_EXTEND_XSTAT(mcast_pkts_sent, + total_multicast_packets_transmitted); + UPDATE_EXTEND_XSTAT(bcast_pkts_sent, + total_broadcast_packets_transmitted); + + UPDATE_EXTEND_TSTAT(checksum_discard, + total_packets_received_checksum_discarded); + UPDATE_EXTEND_TSTAT(ttl0_discard, + total_packets_received_ttl0_discarded); + + UPDATE_EXTEND_XSTAT(error_drop_pkts, + total_transmitted_dropped_packets_error); + + /* TPA aggregations completed */ + UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations); + /* Number of network frames aggregated by TPA */ + UPDATE_EXTEND_E_USTAT(coalesced_pkts, + total_tpa_aggregated_frames); + /* Total number of bytes in completed TPA aggregations */ + UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes); + + UPDATE_ESTAT_QSTAT_64(total_tpa_bytes); + + UPDATE_FSTAT_QSTAT(total_bytes_received); + UPDATE_FSTAT_QSTAT(total_bytes_transmitted); + UPDATE_FSTAT_QSTAT(total_unicast_packets_received); + UPDATE_FSTAT_QSTAT(total_multicast_packets_received); + UPDATE_FSTAT_QSTAT(total_broadcast_packets_received); + UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted); + UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted); + UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted); + UPDATE_FSTAT_QSTAT(valid_bytes_received); + } + + ADD_64(estats->total_bytes_received_hi, + estats->rx_stat_ifhcinbadoctets_hi, + estats->total_bytes_received_lo, + estats->rx_stat_ifhcinbadoctets_lo); + + ADD_64_LE(estats->total_bytes_received_hi, + tfunc->rcv_error_bytes.hi, + estats->total_bytes_received_lo, + tfunc->rcv_error_bytes.lo); + + ADD_64_LE(estats->error_bytes_received_hi, + tfunc->rcv_error_bytes.hi, + estats->error_bytes_received_lo, + tfunc->rcv_error_bytes.lo); + + UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong); + + ADD_64(estats->error_bytes_received_hi, + estats->rx_stat_ifhcinbadoctets_hi, + estats->error_bytes_received_lo, + estats->rx_stat_ifhcinbadoctets_lo); + + if (bp->port.pmf) { + struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old; + UPDATE_FW_STAT(mac_filter_discard); + UPDATE_FW_STAT(mf_tag_discard); + UPDATE_FW_STAT(brb_truncate_discard); + UPDATE_FW_STAT(mac_discard); + } + + fstats->host_func_stats_start = ++fstats->host_func_stats_end; + + bp->stats_pending = 0; + + return 0; +} + +static void bnx2x_net_stats_update(struct bnx2x *bp) +{ + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct net_device_stats *nstats = &bp->dev->stats; + unsigned long tmp; + int i; + + nstats->rx_packets = + bnx2x_hilo(&estats->total_unicast_packets_received_hi) + + bnx2x_hilo(&estats->total_multicast_packets_received_hi) + + bnx2x_hilo(&estats->total_broadcast_packets_received_hi); + + nstats->tx_packets = + bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) + + bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) + + bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi); + + nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); + + nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); + + tmp = estats->mac_discard; + for_each_rx_queue(bp, i) { + struct tstorm_per_queue_stats *old_tclient = + &bp->fp_stats[i].old_tclient; + tmp += le32_to_cpu(old_tclient->checksum_discard); + } + nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped; + + nstats->tx_dropped = 0; + + nstats->multicast = + bnx2x_hilo(&estats->total_multicast_packets_received_hi); + + nstats->collisions = + bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi); + + nstats->rx_length_errors = + bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) + + bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi); + nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) + + bnx2x_hilo(&estats->brb_truncate_hi); + nstats->rx_crc_errors = + bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi); + nstats->rx_frame_errors = + bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); + nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); + nstats->rx_missed_errors = 0; + + nstats->rx_errors = nstats->rx_length_errors + + nstats->rx_over_errors + + nstats->rx_crc_errors + + nstats->rx_frame_errors + + nstats->rx_fifo_errors + + nstats->rx_missed_errors; + + nstats->tx_aborted_errors = + bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) + + bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi); + nstats->tx_carrier_errors = + bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi); + nstats->tx_fifo_errors = 0; + nstats->tx_heartbeat_errors = 0; + nstats->tx_window_errors = 0; + + nstats->tx_errors = nstats->tx_aborted_errors + + nstats->tx_carrier_errors + + bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi); +} + +static void bnx2x_drv_stats_update(struct bnx2x *bp) +{ + struct bnx2x_eth_stats *estats = &bp->eth_stats; + int i; + + for_each_queue(bp, i) { + struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; + struct bnx2x_eth_q_stats_old *qstats_old = + &bp->fp_stats[i].eth_q_stats_old; + + UPDATE_ESTAT_QSTAT(driver_xoff); + UPDATE_ESTAT_QSTAT(rx_err_discard_pkt); + UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed); + UPDATE_ESTAT_QSTAT(hw_csum_err); + UPDATE_ESTAT_QSTAT(driver_filtered_tx_pkt); + } +} + +static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp) +{ + u32 val; + + if (SHMEM2_HAS(bp, edebug_driver_if[1])) { + val = SHMEM2_RD(bp, edebug_driver_if[1]); + + if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) + return true; + } + + return false; +} + +static void bnx2x_stats_update(struct bnx2x *bp) +{ + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + if (bnx2x_edebug_stats_stopped(bp)) + return; + + if (IS_PF(bp)) { + if (*stats_comp != DMAE_COMP_VAL) + return; + + if (bp->port.pmf) + bnx2x_hw_stats_update(bp); + + if (bnx2x_storm_stats_update(bp)) { + if (bp->stats_pending++ == 3) { + BNX2X_ERR("storm stats were not updated for 3 times\n"); + bnx2x_panic(); + } + return; + } + } else { + /* vf doesn't collect HW statistics, and doesn't get completions + * perform only update + */ + bnx2x_storm_stats_update(bp); + } + + bnx2x_net_stats_update(bp); + bnx2x_drv_stats_update(bp); + + /* vf is done */ + if (IS_VF(bp)) + return; + + if (netif_msg_timer(bp)) { + struct bnx2x_eth_stats *estats = &bp->eth_stats; + + netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", + estats->brb_drop_lo, estats->brb_truncate_lo); + } + + bnx2x_hw_stats_post(bp); + bnx2x_storm_stats_post(bp); +} + +static void bnx2x_port_stats_stop(struct bnx2x *bp) +{ + struct dmae_command *dmae; + u32 opcode; + int loader_idx = PMF_DMAE_C(bp); + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + bp->executer_idx = 0; + + opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0); + + if (bp->port.port_stx) { + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + if (bp->func_stx) + dmae->opcode = bnx2x_dmae_opcode_add_comp( + opcode, DMAE_COMP_GRC); + else + dmae->opcode = bnx2x_dmae_opcode_add_comp( + opcode, DMAE_COMP_PCI); + + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); + dmae->dst_addr_lo = bp->port.port_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = bnx2x_get_port_stats_dma_len(bp); + if (bp->func_stx) { + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } else { + dmae->comp_addr_lo = + U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = + U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + } + } + + if (bp->func_stx) { + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = + bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); + dmae->dst_addr_lo = bp->func_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = sizeof(struct host_func_stats) >> 2; + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + } +} + +static void bnx2x_stats_stop(struct bnx2x *bp) +{ + bool update = false; + + bnx2x_stats_comp(bp); + + if (bp->port.pmf) + update = (bnx2x_hw_stats_update(bp) == 0); + + update |= (bnx2x_storm_stats_update(bp) == 0); + + if (update) { + bnx2x_net_stats_update(bp); + + if (bp->port.pmf) + bnx2x_port_stats_stop(bp); + + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); + } +} + +static void bnx2x_stats_do_nothing(struct bnx2x *bp) +{ +} + +static const struct { + void (*action)(struct bnx2x *bp); + enum bnx2x_stats_state next_state; +} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = { +/* state event */ +{ +/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED}, +/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED}, +/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}, +/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED} +}, +{ +/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED}, +/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED}, +/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED}, +/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED} +} +}; + +void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) +{ + enum bnx2x_stats_state state = bp->stats_state; + + if (unlikely(bp->panic)) + return; + + /* Statistics update run from timer context, and we don't want to stop + * that context in case someone is in the middle of a transition. + * For other events, wait a bit until lock is taken. + */ + if (down_trylock(&bp->stats_lock)) { + if (event == STATS_EVENT_UPDATE) + return; + + DP(BNX2X_MSG_STATS, + "Unlikely stats' lock contention [event %d]\n", event); + if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) { + BNX2X_ERR("Failed to take stats lock [event %d]\n", + event); + return; + } + } + + bnx2x_stats_stm[state][event].action(bp); + bp->stats_state = bnx2x_stats_stm[state][event].next_state; + + up(&bp->stats_lock); + + if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) + DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", + state, event, bp->stats_state); +} + +static void bnx2x_port_stats_base_init(struct bnx2x *bp) +{ + struct dmae_command *dmae; + u32 *stats_comp = bnx2x_sp(bp, stats_comp); + + /* sanity */ + if (!bp->port.pmf || !bp->port.port_stx) { + BNX2X_ERR("BUG!\n"); + return; + } + + bp->executer_idx = 0; + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, + true, DMAE_COMP_PCI); + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); + dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); + dmae->dst_addr_lo = bp->port.port_stx >> 2; + dmae->dst_addr_hi = 0; + dmae->len = bnx2x_get_port_stats_dma_len(bp); + dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); + dmae->comp_val = DMAE_COMP_VAL; + + *stats_comp = 0; + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); +} + +/* This function will prepare the statistics ramrod data the way + * we will only have to increment the statistics counter and + * send the ramrod each time we have to. + */ +static void bnx2x_prep_fw_stats_req(struct bnx2x *bp) +{ + int i; + int first_queue_query_index; + struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr; + + dma_addr_t cur_data_offset; + struct stats_query_entry *cur_query_entry; + + stats_hdr->cmd_num = bp->fw_stats_num; + stats_hdr->drv_stats_counter = 0; + + /* storm_counters struct contains the counters of completed + * statistics requests per storm which are incremented by FW + * each time it completes hadning a statistics ramrod. We will + * check these counters in the timer handler and discard a + * (statistics) ramrod completion. + */ + cur_data_offset = bp->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, storm_counters); + + stats_hdr->stats_counters_addrs.hi = + cpu_to_le32(U64_HI(cur_data_offset)); + stats_hdr->stats_counters_addrs.lo = + cpu_to_le32(U64_LO(cur_data_offset)); + + /* prepare to the first stats ramrod (will be completed with + * the counters equal to zero) - init counters to somethig different. + */ + memset(&bp->fw_stats_data->storm_counters, 0xff, + sizeof(struct stats_counter)); + + /**** Port FW statistics data ****/ + cur_data_offset = bp->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, port); + + cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX]; + + cur_query_entry->kind = STATS_TYPE_PORT; + /* For port query index is a DONT CARE */ + cur_query_entry->index = BP_PORT(bp); + /* For port query funcID is a DONT CARE */ + cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); + cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); + cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); + + /**** PF FW statistics data ****/ + cur_data_offset = bp->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, pf); + + cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX]; + + cur_query_entry->kind = STATS_TYPE_PF; + /* For PF query index is a DONT CARE */ + cur_query_entry->index = BP_PORT(bp); + cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); + cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); + cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); + + /**** FCoE FW statistics data ****/ + if (!NO_FCOE(bp)) { + cur_data_offset = bp->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, fcoe); + + cur_query_entry = + &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX]; + + cur_query_entry->kind = STATS_TYPE_FCOE; + /* For FCoE query index is a DONT CARE */ + cur_query_entry->index = BP_PORT(bp); + cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); + cur_query_entry->address.hi = + cpu_to_le32(U64_HI(cur_data_offset)); + cur_query_entry->address.lo = + cpu_to_le32(U64_LO(cur_data_offset)); + } + + /**** Clients' queries ****/ + cur_data_offset = bp->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, queue_stats); + + /* first queue query index depends whether FCoE offloaded request will + * be included in the ramrod + */ + if (!NO_FCOE(bp)) + first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX; + else + first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1; + + for_each_eth_queue(bp, i) { + cur_query_entry = + &bp->fw_stats_req-> + query[first_queue_query_index + i]; + + cur_query_entry->kind = STATS_TYPE_QUEUE; + cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]); + cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); + cur_query_entry->address.hi = + cpu_to_le32(U64_HI(cur_data_offset)); + cur_query_entry->address.lo = + cpu_to_le32(U64_LO(cur_data_offset)); + + cur_data_offset += sizeof(struct per_queue_stats); + } + + /* add FCoE queue query if needed */ + if (!NO_FCOE(bp)) { + cur_query_entry = + &bp->fw_stats_req-> + query[first_queue_query_index + i]; + + cur_query_entry->kind = STATS_TYPE_QUEUE; + cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]); + cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); + cur_query_entry->address.hi = + cpu_to_le32(U64_HI(cur_data_offset)); + cur_query_entry->address.lo = + cpu_to_le32(U64_LO(cur_data_offset)); + } +} + +void bnx2x_memset_stats(struct bnx2x *bp) +{ + int i; + + /* function stats */ + for_each_queue(bp, i) { + struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i]; + + memset(&fp_stats->old_tclient, 0, + sizeof(fp_stats->old_tclient)); + memset(&fp_stats->old_uclient, 0, + sizeof(fp_stats->old_uclient)); + memset(&fp_stats->old_xclient, 0, + sizeof(fp_stats->old_xclient)); + if (bp->stats_init) { + memset(&fp_stats->eth_q_stats, 0, + sizeof(fp_stats->eth_q_stats)); + memset(&fp_stats->eth_q_stats_old, 0, + sizeof(fp_stats->eth_q_stats_old)); + } + } + + memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); + + if (bp->stats_init) { + memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old)); + memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old)); + memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old)); + memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); + memset(&bp->func_stats, 0, sizeof(bp->func_stats)); + } + + bp->stats_state = STATS_STATE_DISABLED; + + if (bp->port.pmf && bp->port.port_stx) + bnx2x_port_stats_base_init(bp); + + /* mark the end of statistics initialization */ + bp->stats_init = false; +} + +void bnx2x_stats_init(struct bnx2x *bp) +{ + int /*abs*/port = BP_PORT(bp); + int mb_idx = BP_FW_MB_IDX(bp); + + if (IS_VF(bp)) { + bnx2x_memset_stats(bp); + return; + } + + bp->stats_pending = 0; + bp->executer_idx = 0; + bp->stats_counter = 0; + + /* port and func stats for management */ + if (!BP_NOMCP(bp)) { + bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); + bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); + + } else { + bp->port.port_stx = 0; + bp->func_stx = 0; + } + DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", + bp->port.port_stx, bp->func_stx); + + /* pmf should retrieve port statistics from SP on a non-init*/ + if (!bp->stats_init && bp->port.pmf && bp->port.port_stx) + bnx2x_stats_handle(bp, STATS_EVENT_PMF); + + port = BP_PORT(bp); + /* port stats */ + memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); + bp->port.old_nig_stats.brb_discard = + REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); + bp->port.old_nig_stats.brb_truncate = + REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); + if (!CHIP_IS_E3(bp)) { + REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, + &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); + REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, + &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); + } + + /* Prepare statistics ramrod data */ + bnx2x_prep_fw_stats_req(bp); + + /* Clean SP from previous statistics */ + if (bp->stats_init) { + if (bp->func_stx) { + memset(bnx2x_sp(bp, func_stats), 0, + sizeof(struct host_func_stats)); + bnx2x_func_stats_init(bp); + bnx2x_hw_stats_post(bp); + bnx2x_stats_comp(bp); + } + } + + bnx2x_memset_stats(bp); +} + +void bnx2x_save_statistics(struct bnx2x *bp) +{ + int i; + struct net_device_stats *nstats = &bp->dev->stats; + + /* save queue statistics */ + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + struct bnx2x_eth_q_stats *qstats = + &bnx2x_fp_stats(bp, fp)->eth_q_stats; + struct bnx2x_eth_q_stats_old *qstats_old = + &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; + + UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi); + UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo); + UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi); + UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo); + UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi); + UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo); + UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi); + UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo); + UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi); + UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo); + UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi); + UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo); + UPDATE_QSTAT_OLD(total_tpa_bytes_hi); + UPDATE_QSTAT_OLD(total_tpa_bytes_lo); + } + + /* save net_device_stats statistics */ + bp->net_stats_old.rx_dropped = nstats->rx_dropped; + + /* store port firmware statistics */ + if (bp->port.pmf && IS_MF(bp)) { + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old; + UPDATE_FW_STAT_OLD(mac_filter_discard); + UPDATE_FW_STAT_OLD(mf_tag_discard); + UPDATE_FW_STAT_OLD(brb_truncate_discard); + UPDATE_FW_STAT_OLD(mac_discard); + } +} + +void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, + u32 stats_type) +{ + int i; + struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats; + struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct per_queue_stats *fcoe_q_stats = + &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]; + + struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = + &fcoe_q_stats->tstorm_queue_statistics; + + struct ustorm_per_queue_stats *fcoe_q_ustorm_stats = + &fcoe_q_stats->ustorm_queue_statistics; + + struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = + &fcoe_q_stats->xstorm_queue_statistics; + + struct fcoe_statistics_params *fw_fcoe_stat = + &bp->fw_stats_data->fcoe; + + memset(afex_stats, 0, sizeof(struct afex_stats)); + + for_each_eth_queue(bp, i) { + struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; + + ADD_64(afex_stats->rx_unicast_bytes_hi, + qstats->total_unicast_bytes_received_hi, + afex_stats->rx_unicast_bytes_lo, + qstats->total_unicast_bytes_received_lo); + + ADD_64(afex_stats->rx_broadcast_bytes_hi, + qstats->total_broadcast_bytes_received_hi, + afex_stats->rx_broadcast_bytes_lo, + qstats->total_broadcast_bytes_received_lo); + + ADD_64(afex_stats->rx_multicast_bytes_hi, + qstats->total_multicast_bytes_received_hi, + afex_stats->rx_multicast_bytes_lo, + qstats->total_multicast_bytes_received_lo); + + ADD_64(afex_stats->rx_unicast_frames_hi, + qstats->total_unicast_packets_received_hi, + afex_stats->rx_unicast_frames_lo, + qstats->total_unicast_packets_received_lo); + + ADD_64(afex_stats->rx_broadcast_frames_hi, + qstats->total_broadcast_packets_received_hi, + afex_stats->rx_broadcast_frames_lo, + qstats->total_broadcast_packets_received_lo); + + ADD_64(afex_stats->rx_multicast_frames_hi, + qstats->total_multicast_packets_received_hi, + afex_stats->rx_multicast_frames_lo, + qstats->total_multicast_packets_received_lo); + + /* sum to rx_frames_discarded all discraded + * packets due to size, ttl0 and checksum + */ + ADD_64(afex_stats->rx_frames_discarded_hi, + qstats->total_packets_received_checksum_discarded_hi, + afex_stats->rx_frames_discarded_lo, + qstats->total_packets_received_checksum_discarded_lo); + + ADD_64(afex_stats->rx_frames_discarded_hi, + qstats->total_packets_received_ttl0_discarded_hi, + afex_stats->rx_frames_discarded_lo, + qstats->total_packets_received_ttl0_discarded_lo); + + ADD_64(afex_stats->rx_frames_discarded_hi, + qstats->etherstatsoverrsizepkts_hi, + afex_stats->rx_frames_discarded_lo, + qstats->etherstatsoverrsizepkts_lo); + + ADD_64(afex_stats->rx_frames_dropped_hi, + qstats->no_buff_discard_hi, + afex_stats->rx_frames_dropped_lo, + qstats->no_buff_discard_lo); + + ADD_64(afex_stats->tx_unicast_bytes_hi, + qstats->total_unicast_bytes_transmitted_hi, + afex_stats->tx_unicast_bytes_lo, + qstats->total_unicast_bytes_transmitted_lo); + + ADD_64(afex_stats->tx_broadcast_bytes_hi, + qstats->total_broadcast_bytes_transmitted_hi, + afex_stats->tx_broadcast_bytes_lo, + qstats->total_broadcast_bytes_transmitted_lo); + + ADD_64(afex_stats->tx_multicast_bytes_hi, + qstats->total_multicast_bytes_transmitted_hi, + afex_stats->tx_multicast_bytes_lo, + qstats->total_multicast_bytes_transmitted_lo); + + ADD_64(afex_stats->tx_unicast_frames_hi, + qstats->total_unicast_packets_transmitted_hi, + afex_stats->tx_unicast_frames_lo, + qstats->total_unicast_packets_transmitted_lo); + + ADD_64(afex_stats->tx_broadcast_frames_hi, + qstats->total_broadcast_packets_transmitted_hi, + afex_stats->tx_broadcast_frames_lo, + qstats->total_broadcast_packets_transmitted_lo); + + ADD_64(afex_stats->tx_multicast_frames_hi, + qstats->total_multicast_packets_transmitted_hi, + afex_stats->tx_multicast_frames_lo, + qstats->total_multicast_packets_transmitted_lo); + + ADD_64(afex_stats->tx_frames_dropped_hi, + qstats->total_transmitted_dropped_packets_error_hi, + afex_stats->tx_frames_dropped_lo, + qstats->total_transmitted_dropped_packets_error_lo); + } + + /* now add FCoE statistics which are collected separately + * (both offloaded and non offloaded) + */ + if (!NO_FCOE(bp)) { + ADD_64_LE(afex_stats->rx_unicast_bytes_hi, + LE32_0, + afex_stats->rx_unicast_bytes_lo, + fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); + + ADD_64_LE(afex_stats->rx_unicast_bytes_hi, + fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, + afex_stats->rx_unicast_bytes_lo, + fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); + + ADD_64_LE(afex_stats->rx_broadcast_bytes_hi, + fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, + afex_stats->rx_broadcast_bytes_lo, + fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); + + ADD_64_LE(afex_stats->rx_multicast_bytes_hi, + fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, + afex_stats->rx_multicast_bytes_lo, + fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); + + ADD_64_LE(afex_stats->rx_unicast_frames_hi, + LE32_0, + afex_stats->rx_unicast_frames_lo, + fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); + + ADD_64_LE(afex_stats->rx_unicast_frames_hi, + LE32_0, + afex_stats->rx_unicast_frames_lo, + fcoe_q_tstorm_stats->rcv_ucast_pkts); + + ADD_64_LE(afex_stats->rx_broadcast_frames_hi, + LE32_0, + afex_stats->rx_broadcast_frames_lo, + fcoe_q_tstorm_stats->rcv_bcast_pkts); + + ADD_64_LE(afex_stats->rx_multicast_frames_hi, + LE32_0, + afex_stats->rx_multicast_frames_lo, + fcoe_q_tstorm_stats->rcv_ucast_pkts); + + ADD_64_LE(afex_stats->rx_frames_discarded_hi, + LE32_0, + afex_stats->rx_frames_discarded_lo, + fcoe_q_tstorm_stats->checksum_discard); + + ADD_64_LE(afex_stats->rx_frames_discarded_hi, + LE32_0, + afex_stats->rx_frames_discarded_lo, + fcoe_q_tstorm_stats->pkts_too_big_discard); + + ADD_64_LE(afex_stats->rx_frames_discarded_hi, + LE32_0, + afex_stats->rx_frames_discarded_lo, + fcoe_q_tstorm_stats->ttl0_discard); + + ADD_64_LE16(afex_stats->rx_frames_dropped_hi, + LE16_0, + afex_stats->rx_frames_dropped_lo, + fcoe_q_tstorm_stats->no_buff_discard); + + ADD_64_LE(afex_stats->rx_frames_dropped_hi, + LE32_0, + afex_stats->rx_frames_dropped_lo, + fcoe_q_ustorm_stats->ucast_no_buff_pkts); + + ADD_64_LE(afex_stats->rx_frames_dropped_hi, + LE32_0, + afex_stats->rx_frames_dropped_lo, + fcoe_q_ustorm_stats->mcast_no_buff_pkts); + + ADD_64_LE(afex_stats->rx_frames_dropped_hi, + LE32_0, + afex_stats->rx_frames_dropped_lo, + fcoe_q_ustorm_stats->bcast_no_buff_pkts); + + ADD_64_LE(afex_stats->rx_frames_dropped_hi, + LE32_0, + afex_stats->rx_frames_dropped_lo, + fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt); + + ADD_64_LE(afex_stats->rx_frames_dropped_hi, + LE32_0, + afex_stats->rx_frames_dropped_lo, + fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt); + + ADD_64_LE(afex_stats->tx_unicast_bytes_hi, + LE32_0, + afex_stats->tx_unicast_bytes_lo, + fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); + + ADD_64_LE(afex_stats->tx_unicast_bytes_hi, + fcoe_q_xstorm_stats->ucast_bytes_sent.hi, + afex_stats->tx_unicast_bytes_lo, + fcoe_q_xstorm_stats->ucast_bytes_sent.lo); + + ADD_64_LE(afex_stats->tx_broadcast_bytes_hi, + fcoe_q_xstorm_stats->bcast_bytes_sent.hi, + afex_stats->tx_broadcast_bytes_lo, + fcoe_q_xstorm_stats->bcast_bytes_sent.lo); + + ADD_64_LE(afex_stats->tx_multicast_bytes_hi, + fcoe_q_xstorm_stats->mcast_bytes_sent.hi, + afex_stats->tx_multicast_bytes_lo, + fcoe_q_xstorm_stats->mcast_bytes_sent.lo); + + ADD_64_LE(afex_stats->tx_unicast_frames_hi, + LE32_0, + afex_stats->tx_unicast_frames_lo, + fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); + + ADD_64_LE(afex_stats->tx_unicast_frames_hi, + LE32_0, + afex_stats->tx_unicast_frames_lo, + fcoe_q_xstorm_stats->ucast_pkts_sent); + + ADD_64_LE(afex_stats->tx_broadcast_frames_hi, + LE32_0, + afex_stats->tx_broadcast_frames_lo, + fcoe_q_xstorm_stats->bcast_pkts_sent); + + ADD_64_LE(afex_stats->tx_multicast_frames_hi, + LE32_0, + afex_stats->tx_multicast_frames_lo, + fcoe_q_xstorm_stats->mcast_pkts_sent); + + ADD_64_LE(afex_stats->tx_frames_dropped_hi, + LE32_0, + afex_stats->tx_frames_dropped_lo, + fcoe_q_xstorm_stats->error_drop_pkts); + } + + /* if port stats are requested, add them to the PMF + * stats, as anyway they will be accumulated by the + * MCP before sent to the switch + */ + if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) { + ADD_64(afex_stats->rx_frames_dropped_hi, + 0, + afex_stats->rx_frames_dropped_lo, + estats->mac_filter_discard); + ADD_64(afex_stats->rx_frames_dropped_hi, + 0, + afex_stats->rx_frames_dropped_lo, + estats->brb_truncate_discard); + ADD_64(afex_stats->rx_frames_discarded_hi, + 0, + afex_stats->rx_frames_discarded_lo, + estats->mac_discard); + } +} + +int bnx2x_stats_safe_exec(struct bnx2x *bp, + void (func_to_exec)(void *cookie), + void *cookie) +{ + int cnt = 10, rc = 0; + + /* Wait for statistics to end [while blocking further requests], + * then run supplied function 'safely'. + */ + rc = down_timeout(&bp->stats_lock, HZ / 10); + if (unlikely(rc)) { + BNX2X_ERR("Failed to take statistics lock for safe execution\n"); + goto out_no_lock; + } + + bnx2x_stats_comp(bp); + while (bp->stats_pending && cnt--) + if (bnx2x_storm_stats_update(bp)) + usleep_range(1000, 2000); + if (bp->stats_pending) { + BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n"); + rc = -EBUSY; + goto out; + } + + func_to_exec(cookie); + +out: + /* No need to restart statistics - if they're enabled, the timer + * will restart the statistics. + */ + up(&bp->stats_lock); +out_no_lock: + return rc; +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h new file mode 100644 index 000000000..965539a9d --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -0,0 +1,555 @@ +/* bnx2x_stats.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2013 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Ariel Elior + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ +#ifndef BNX2X_STATS_H +#define BNX2X_STATS_H + +#include + +struct nig_stats { + u32 brb_discard; + u32 brb_packet; + u32 brb_truncate; + u32 flow_ctrl_discard; + u32 flow_ctrl_octets; + u32 flow_ctrl_packet; + u32 mng_discard; + u32 mng_octet_inp; + u32 mng_octet_out; + u32 mng_packet_inp; + u32 mng_packet_out; + u32 pbf_octets; + u32 pbf_packet; + u32 safc_inp; + u32 egress_mac_pkt0_lo; + u32 egress_mac_pkt0_hi; + u32 egress_mac_pkt1_lo; + u32 egress_mac_pkt1_hi; +}; + +enum bnx2x_stats_event { + STATS_EVENT_PMF = 0, + STATS_EVENT_LINK_UP, + STATS_EVENT_UPDATE, + STATS_EVENT_STOP, + STATS_EVENT_MAX +}; + +enum bnx2x_stats_state { + STATS_STATE_DISABLED = 0, + STATS_STATE_ENABLED, + STATS_STATE_MAX +}; + +struct bnx2x_eth_stats { + u32 total_bytes_received_hi; + u32 total_bytes_received_lo; + u32 total_bytes_transmitted_hi; + u32 total_bytes_transmitted_lo; + u32 total_unicast_packets_received_hi; + u32 total_unicast_packets_received_lo; + u32 total_multicast_packets_received_hi; + u32 total_multicast_packets_received_lo; + u32 total_broadcast_packets_received_hi; + u32 total_broadcast_packets_received_lo; + u32 total_unicast_packets_transmitted_hi; + u32 total_unicast_packets_transmitted_lo; + u32 total_multicast_packets_transmitted_hi; + u32 total_multicast_packets_transmitted_lo; + u32 total_broadcast_packets_transmitted_hi; + u32 total_broadcast_packets_transmitted_lo; + u32 valid_bytes_received_hi; + u32 valid_bytes_received_lo; + + u32 error_bytes_received_hi; + u32 error_bytes_received_lo; + u32 etherstatsoverrsizepkts_hi; + u32 etherstatsoverrsizepkts_lo; + u32 no_buff_discard_hi; + u32 no_buff_discard_lo; + + u32 rx_stat_ifhcinbadoctets_hi; + u32 rx_stat_ifhcinbadoctets_lo; + u32 tx_stat_ifhcoutbadoctets_hi; + u32 tx_stat_ifhcoutbadoctets_lo; + u32 rx_stat_dot3statsfcserrors_hi; + u32 rx_stat_dot3statsfcserrors_lo; + u32 rx_stat_dot3statsalignmenterrors_hi; + u32 rx_stat_dot3statsalignmenterrors_lo; + u32 rx_stat_dot3statscarriersenseerrors_hi; + u32 rx_stat_dot3statscarriersenseerrors_lo; + u32 rx_stat_falsecarriererrors_hi; + u32 rx_stat_falsecarriererrors_lo; + u32 rx_stat_etherstatsundersizepkts_hi; + u32 rx_stat_etherstatsundersizepkts_lo; + u32 rx_stat_dot3statsframestoolong_hi; + u32 rx_stat_dot3statsframestoolong_lo; + u32 rx_stat_etherstatsfragments_hi; + u32 rx_stat_etherstatsfragments_lo; + u32 rx_stat_etherstatsjabbers_hi; + u32 rx_stat_etherstatsjabbers_lo; + u32 rx_stat_maccontrolframesreceived_hi; + u32 rx_stat_maccontrolframesreceived_lo; + u32 rx_stat_bmac_xpf_hi; + u32 rx_stat_bmac_xpf_lo; + u32 rx_stat_bmac_xcf_hi; + u32 rx_stat_bmac_xcf_lo; + u32 rx_stat_xoffstateentered_hi; + u32 rx_stat_xoffstateentered_lo; + u32 rx_stat_xonpauseframesreceived_hi; + u32 rx_stat_xonpauseframesreceived_lo; + u32 rx_stat_xoffpauseframesreceived_hi; + u32 rx_stat_xoffpauseframesreceived_lo; + u32 tx_stat_outxonsent_hi; + u32 tx_stat_outxonsent_lo; + u32 tx_stat_outxoffsent_hi; + u32 tx_stat_outxoffsent_lo; + u32 tx_stat_flowcontroldone_hi; + u32 tx_stat_flowcontroldone_lo; + u32 tx_stat_etherstatscollisions_hi; + u32 tx_stat_etherstatscollisions_lo; + u32 tx_stat_dot3statssinglecollisionframes_hi; + u32 tx_stat_dot3statssinglecollisionframes_lo; + u32 tx_stat_dot3statsmultiplecollisionframes_hi; + u32 tx_stat_dot3statsmultiplecollisionframes_lo; + u32 tx_stat_dot3statsdeferredtransmissions_hi; + u32 tx_stat_dot3statsdeferredtransmissions_lo; + u32 tx_stat_dot3statsexcessivecollisions_hi; + u32 tx_stat_dot3statsexcessivecollisions_lo; + u32 tx_stat_dot3statslatecollisions_hi; + u32 tx_stat_dot3statslatecollisions_lo; + u32 tx_stat_etherstatspkts64octets_hi; + u32 tx_stat_etherstatspkts64octets_lo; + u32 tx_stat_etherstatspkts65octetsto127octets_hi; + u32 tx_stat_etherstatspkts65octetsto127octets_lo; + u32 tx_stat_etherstatspkts128octetsto255octets_hi; + u32 tx_stat_etherstatspkts128octetsto255octets_lo; + u32 tx_stat_etherstatspkts256octetsto511octets_hi; + u32 tx_stat_etherstatspkts256octetsto511octets_lo; + u32 tx_stat_etherstatspkts512octetsto1023octets_hi; + u32 tx_stat_etherstatspkts512octetsto1023octets_lo; + u32 tx_stat_etherstatspkts1024octetsto1522octets_hi; + u32 tx_stat_etherstatspkts1024octetsto1522octets_lo; + u32 tx_stat_etherstatspktsover1522octets_hi; + u32 tx_stat_etherstatspktsover1522octets_lo; + u32 tx_stat_bmac_2047_hi; + u32 tx_stat_bmac_2047_lo; + u32 tx_stat_bmac_4095_hi; + u32 tx_stat_bmac_4095_lo; + u32 tx_stat_bmac_9216_hi; + u32 tx_stat_bmac_9216_lo; + u32 tx_stat_bmac_16383_hi; + u32 tx_stat_bmac_16383_lo; + u32 tx_stat_dot3statsinternalmactransmiterrors_hi; + u32 tx_stat_dot3statsinternalmactransmiterrors_lo; + u32 tx_stat_bmac_ufl_hi; + u32 tx_stat_bmac_ufl_lo; + + u32 pause_frames_received_hi; + u32 pause_frames_received_lo; + u32 pause_frames_sent_hi; + u32 pause_frames_sent_lo; + + u32 etherstatspkts1024octetsto1522octets_hi; + u32 etherstatspkts1024octetsto1522octets_lo; + u32 etherstatspktsover1522octets_hi; + u32 etherstatspktsover1522octets_lo; + + u32 brb_drop_hi; + u32 brb_drop_lo; + u32 brb_truncate_hi; + u32 brb_truncate_lo; + + u32 mac_filter_discard; + u32 mf_tag_discard; + u32 brb_truncate_discard; + u32 mac_discard; + + u32 driver_xoff; + u32 rx_err_discard_pkt; + u32 rx_skb_alloc_failed; + u32 hw_csum_err; + + u32 nig_timer_max; + + /* TPA */ + u32 total_tpa_aggregations_hi; + u32 total_tpa_aggregations_lo; + u32 total_tpa_aggregated_frames_hi; + u32 total_tpa_aggregated_frames_lo; + u32 total_tpa_bytes_hi; + u32 total_tpa_bytes_lo; + + /* PFC */ + u32 pfc_frames_received_hi; + u32 pfc_frames_received_lo; + u32 pfc_frames_sent_hi; + u32 pfc_frames_sent_lo; + + /* Recovery */ + u32 recoverable_error; + u32 unrecoverable_error; + u32 driver_filtered_tx_pkt; + /* src: Clear-on-Read register; Will not survive PMF Migration */ + u32 eee_tx_lpi; +}; + +struct bnx2x_eth_q_stats { + u32 total_unicast_bytes_received_hi; + u32 total_unicast_bytes_received_lo; + u32 total_broadcast_bytes_received_hi; + u32 total_broadcast_bytes_received_lo; + u32 total_multicast_bytes_received_hi; + u32 total_multicast_bytes_received_lo; + u32 total_bytes_received_hi; + u32 total_bytes_received_lo; + u32 total_unicast_bytes_transmitted_hi; + u32 total_unicast_bytes_transmitted_lo; + u32 total_broadcast_bytes_transmitted_hi; + u32 total_broadcast_bytes_transmitted_lo; + u32 total_multicast_bytes_transmitted_hi; + u32 total_multicast_bytes_transmitted_lo; + u32 total_bytes_transmitted_hi; + u32 total_bytes_transmitted_lo; + u32 total_unicast_packets_received_hi; + u32 total_unicast_packets_received_lo; + u32 total_multicast_packets_received_hi; + u32 total_multicast_packets_received_lo; + u32 total_broadcast_packets_received_hi; + u32 total_broadcast_packets_received_lo; + u32 total_unicast_packets_transmitted_hi; + u32 total_unicast_packets_transmitted_lo; + u32 total_multicast_packets_transmitted_hi; + u32 total_multicast_packets_transmitted_lo; + u32 total_broadcast_packets_transmitted_hi; + u32 total_broadcast_packets_transmitted_lo; + u32 valid_bytes_received_hi; + u32 valid_bytes_received_lo; + + u32 etherstatsoverrsizepkts_hi; + u32 etherstatsoverrsizepkts_lo; + u32 no_buff_discard_hi; + u32 no_buff_discard_lo; + + u32 driver_xoff; + u32 rx_err_discard_pkt; + u32 rx_skb_alloc_failed; + u32 hw_csum_err; + + u32 total_packets_received_checksum_discarded_hi; + u32 total_packets_received_checksum_discarded_lo; + u32 total_packets_received_ttl0_discarded_hi; + u32 total_packets_received_ttl0_discarded_lo; + u32 total_transmitted_dropped_packets_error_hi; + u32 total_transmitted_dropped_packets_error_lo; + + /* TPA */ + u32 total_tpa_aggregations_hi; + u32 total_tpa_aggregations_lo; + u32 total_tpa_aggregated_frames_hi; + u32 total_tpa_aggregated_frames_lo; + u32 total_tpa_bytes_hi; + u32 total_tpa_bytes_lo; + u32 driver_filtered_tx_pkt; +}; + +struct bnx2x_eth_stats_old { + u32 rx_stat_dot3statsframestoolong_hi; + u32 rx_stat_dot3statsframestoolong_lo; +}; + +struct bnx2x_eth_q_stats_old { + /* Fields to perserve over fw reset*/ + u32 total_unicast_bytes_received_hi; + u32 total_unicast_bytes_received_lo; + u32 total_broadcast_bytes_received_hi; + u32 total_broadcast_bytes_received_lo; + u32 total_multicast_bytes_received_hi; + u32 total_multicast_bytes_received_lo; + u32 total_unicast_bytes_transmitted_hi; + u32 total_unicast_bytes_transmitted_lo; + u32 total_broadcast_bytes_transmitted_hi; + u32 total_broadcast_bytes_transmitted_lo; + u32 total_multicast_bytes_transmitted_hi; + u32 total_multicast_bytes_transmitted_lo; + u32 total_tpa_bytes_hi; + u32 total_tpa_bytes_lo; + + /* Fields to perserve last of */ + u32 total_bytes_received_hi; + u32 total_bytes_received_lo; + u32 total_bytes_transmitted_hi; + u32 total_bytes_transmitted_lo; + u32 total_unicast_packets_received_hi; + u32 total_unicast_packets_received_lo; + u32 total_multicast_packets_received_hi; + u32 total_multicast_packets_received_lo; + u32 total_broadcast_packets_received_hi; + u32 total_broadcast_packets_received_lo; + u32 total_unicast_packets_transmitted_hi; + u32 total_unicast_packets_transmitted_lo; + u32 total_multicast_packets_transmitted_hi; + u32 total_multicast_packets_transmitted_lo; + u32 total_broadcast_packets_transmitted_hi; + u32 total_broadcast_packets_transmitted_lo; + u32 valid_bytes_received_hi; + u32 valid_bytes_received_lo; + + u32 total_tpa_bytes_hi_old; + u32 total_tpa_bytes_lo_old; + + u32 driver_xoff_old; + u32 rx_err_discard_pkt_old; + u32 rx_skb_alloc_failed_old; + u32 hw_csum_err_old; + u32 driver_filtered_tx_pkt_old; +}; + +struct bnx2x_net_stats_old { + u32 rx_dropped; +}; + +struct bnx2x_fw_port_stats_old { + u32 mac_filter_discard; + u32 mf_tag_discard; + u32 brb_truncate_discard; + u32 mac_discard; +}; + +/**************************************************************************** +* Macros +****************************************************************************/ + +/* sum[hi:lo] += add[hi:lo] */ +#define ADD_64(s_hi, a_hi, s_lo, a_lo) \ + do { \ + s_lo += a_lo; \ + s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ + } while (0) + +#define LE32_0 ((__force __le32) 0) +#define LE16_0 ((__force __le16) 0) + +/* The _force is for cases where high value is 0 */ +#define ADD_64_LE(s_hi, a_hi_le, s_lo, a_lo_le) \ + ADD_64(s_hi, le32_to_cpu(a_hi_le), \ + s_lo, le32_to_cpu(a_lo_le)) + +#define ADD_64_LE16(s_hi, a_hi_le, s_lo, a_lo_le) \ + ADD_64(s_hi, le16_to_cpu(a_hi_le), \ + s_lo, le16_to_cpu(a_lo_le)) + +/* difference = minuend - subtrahend */ +#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ + do { \ + if (m_lo < s_lo) { \ + /* underflow */ \ + d_hi = m_hi - s_hi; \ + if (d_hi > 0) { \ + /* we can 'loan' 1 */ \ + d_hi--; \ + d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ + } else { \ + /* m_hi <= s_hi */ \ + d_hi = 0; \ + d_lo = 0; \ + } \ + } else { \ + /* m_lo >= s_lo */ \ + if (m_hi < s_hi) { \ + d_hi = 0; \ + d_lo = 0; \ + } else { \ + /* m_hi >= s_hi */ \ + d_hi = m_hi - s_hi; \ + d_lo = m_lo - s_lo; \ + } \ + } \ + } while (0) + +#define UPDATE_STAT64(s, t) \ + do { \ + DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \ + diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \ + pstats->mac_stx[0].t##_hi = new->s##_hi; \ + pstats->mac_stx[0].t##_lo = new->s##_lo; \ + ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \ + pstats->mac_stx[1].t##_lo, diff.lo); \ + } while (0) + +#define UPDATE_STAT64_NIG(s, t) \ + do { \ + DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \ + diff.lo, new->s##_lo, old->s##_lo); \ + ADD_64(estats->t##_hi, diff.hi, \ + estats->t##_lo, diff.lo); \ + } while (0) + +/* sum[hi:lo] += add */ +#define ADD_EXTEND_64(s_hi, s_lo, a) \ + do { \ + s_lo += a; \ + s_hi += (s_lo < a) ? 1 : 0; \ + } while (0) + +#define ADD_STAT64(diff, t) \ + do { \ + ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \ + pstats->mac_stx[1].t##_lo, new->diff##_lo); \ + } while (0) + +#define UPDATE_EXTEND_STAT(s) \ + do { \ + ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \ + pstats->mac_stx[1].s##_lo, \ + new->s); \ + } while (0) + +#define UPDATE_EXTEND_TSTAT_X(s, t, size) \ + do { \ + diff = le##size##_to_cpu(tclient->s) - \ + le##size##_to_cpu(old_tclient->s); \ + old_tclient->s = tclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_TSTAT(s, t) UPDATE_EXTEND_TSTAT_X(s, t, 32) + +#define UPDATE_EXTEND_E_TSTAT(s, t, size) \ + do { \ + UPDATE_EXTEND_TSTAT_X(s, t, size); \ + ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_USTAT(s, t) \ + do { \ + diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ + old_uclient->s = uclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_E_USTAT(s, t) \ + do { \ + UPDATE_EXTEND_USTAT(s, t); \ + ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_XSTAT(s, t) \ + do { \ + diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \ + old_xclient->s = xclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +#define UPDATE_QSTAT(s, t) \ + do { \ + qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \ + qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \ + + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \ + } while (0) + +#define UPDATE_QSTAT_OLD(f) \ + do { \ + qstats_old->f = qstats->f; \ + } while (0) + +#define UPDATE_ESTAT_QSTAT_64(s) \ + do { \ + ADD_64(estats->s##_hi, qstats->s##_hi, \ + estats->s##_lo, qstats->s##_lo); \ + SUB_64(estats->s##_hi, qstats_old->s##_hi_old, \ + estats->s##_lo, qstats_old->s##_lo_old); \ + qstats_old->s##_hi_old = qstats->s##_hi; \ + qstats_old->s##_lo_old = qstats->s##_lo; \ + } while (0) + +#define UPDATE_ESTAT_QSTAT(s) \ + do { \ + estats->s += qstats->s; \ + estats->s -= qstats_old->s##_old; \ + qstats_old->s##_old = qstats->s; \ + } while (0) + +#define UPDATE_FSTAT_QSTAT(s) \ + do { \ + ADD_64(fstats->s##_hi, qstats->s##_hi, \ + fstats->s##_lo, qstats->s##_lo); \ + SUB_64(fstats->s##_hi, qstats_old->s##_hi, \ + fstats->s##_lo, qstats_old->s##_lo); \ + estats->s##_hi = fstats->s##_hi; \ + estats->s##_lo = fstats->s##_lo; \ + qstats_old->s##_hi = qstats->s##_hi; \ + qstats_old->s##_lo = qstats->s##_lo; \ + } while (0) + +#define UPDATE_FW_STAT(s) \ + do { \ + estats->s = le32_to_cpu(tport->s) + fwstats->s; \ + } while (0) + +#define UPDATE_FW_STAT_OLD(f) \ + do { \ + fwstats->f = estats->f; \ + } while (0) + +#define UPDATE_ESTAT(s, t) \ + do { \ + SUB_64(estats->s##_hi, estats_old->t##_hi, \ + estats->s##_lo, estats_old->t##_lo); \ + ADD_64(estats->s##_hi, estats->t##_hi, \ + estats->s##_lo, estats->t##_lo); \ + estats_old->t##_hi = estats->t##_hi; \ + estats_old->t##_lo = estats->t##_lo; \ + } while (0) + +/* minuend -= subtrahend */ +#define SUB_64(m_hi, s_hi, m_lo, s_lo) \ + do { \ + DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \ + } while (0) + +/* minuend[hi:lo] -= subtrahend */ +#define SUB_EXTEND_64(m_hi, m_lo, s) \ + do { \ + SUB_64(m_hi, 0, m_lo, s); \ + } while (0) + +#define SUB_EXTEND_USTAT(s, t) \ + do { \ + diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ + SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +/* forward */ +struct bnx2x; + +void bnx2x_memset_stats(struct bnx2x *bp); +void bnx2x_stats_init(struct bnx2x *bp); +void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); +int bnx2x_stats_safe_exec(struct bnx2x *bp, + void (func_to_exec)(void *cookie), + void *cookie); + +/** + * bnx2x_save_statistics - save statistics when unloading. + * + * @bp: driver handle + */ +void bnx2x_save_statistics(struct bnx2x *bp); + +void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, + u32 stats_type); +#endif /* BNX2X_STATS_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c new file mode 100644 index 000000000..06b8c0d8f --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -0,0 +1,2168 @@ +/* bnx2x_vfpf.c: Broadcom Everest network driver. + * + * Copyright 2009-2013 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Ariel Elior + * Written by: Shmulik Ravid + * Ariel Elior + */ + +#include "bnx2x.h" +#include "bnx2x_cmn.h" +#include + +static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx); + +/* place a given tlv on the tlv buffer at a given offset */ +static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, + u16 offset, u16 type, u16 length) +{ + struct channel_tlv *tl = + (struct channel_tlv *)(tlvs_list + offset); + + tl->type = type; + tl->length = length; +} + +/* Clear the mailbox and init the header of the first tlv */ +static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, + u16 type, u16 length) +{ + mutex_lock(&bp->vf2pf_mutex); + + DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n", + type); + + /* Clear mailbox */ + memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg)); + + /* init type and length */ + bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length); + + /* init first tlv header */ + first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req); +} + +/* releases the mailbox */ +static void bnx2x_vfpf_finalize(struct bnx2x *bp, + struct vfpf_first_tlv *first_tlv) +{ + DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n", + first_tlv->tl.type); + + mutex_unlock(&bp->vf2pf_mutex); +} + +/* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */ +static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list, + enum channel_tlvs req_tlv) +{ + struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list; + + do { + if (tlv->type == req_tlv) + return tlv; + + if (!tlv->length) { + BNX2X_ERR("Found TLV with length 0\n"); + return NULL; + } + + tlvs_list += tlv->length; + tlv = (struct channel_tlv *)tlvs_list; + } while (tlv->type != CHANNEL_TLV_LIST_END); + + DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv); + + return NULL; +} + +/* list the types and lengths of the tlvs on the buffer */ +static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list) +{ + int i = 1; + struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list; + + while (tlv->type != CHANNEL_TLV_LIST_END) { + /* output tlv */ + DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i, + tlv->type, tlv->length); + + /* advance to next tlv */ + tlvs_list += tlv->length; + + /* cast general tlv list pointer to channel tlv header*/ + tlv = (struct channel_tlv *)tlvs_list; + + i++; + + /* break condition for this loop */ + if (i > MAX_TLVS_IN_LIST) { + WARN(true, "corrupt tlvs"); + return; + } + } + + /* output last tlv */ + DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i, + tlv->type, tlv->length); +} + +/* test whether we support a tlv type */ +bool bnx2x_tlv_supported(u16 tlvtype) +{ + return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; +} + +static inline int bnx2x_pfvf_status_codes(int rc) +{ + switch (rc) { + case 0: + return PFVF_STATUS_SUCCESS; + case -ENOMEM: + return PFVF_STATUS_NO_RESOURCE; + default: + return PFVF_STATUS_FAILURE; + } +} + +static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) +{ + struct cstorm_vf_zone_data __iomem *zone_data = + REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START); + int tout = 100, interval = 100; /* wait for 10 seconds */ + + if (*done) { + BNX2X_ERR("done was non zero before message to pf was sent\n"); + WARN_ON(true); + return -EINVAL; + } + + /* if PF indicated channel is down avoid sending message. Return success + * so calling flow can continue + */ + bnx2x_sample_bulletin(bp); + if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { + DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n"); + *done = PFVF_STATUS_SUCCESS; + return -EINVAL; + } + + /* Write message address */ + writel(U64_LO(msg_mapping), + &zone_data->non_trigger.vf_pf_channel.msg_addr_lo); + writel(U64_HI(msg_mapping), + &zone_data->non_trigger.vf_pf_channel.msg_addr_hi); + + /* make sure the address is written before FW accesses it */ + wmb(); + + /* Trigger the PF FW */ + writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid); + + /* Wait for PF to complete */ + while ((tout >= 0) && (!*done)) { + msleep(interval); + tout -= 1; + + /* progress indicator - HV can take its own sweet time in + * answering VFs... + */ + DP_CONT(BNX2X_MSG_IOV, "."); + } + + if (!*done) { + BNX2X_ERR("PF response has timed out\n"); + return -EAGAIN; + } + DP(BNX2X_MSG_SP, "Got a response from PF\n"); + return 0; +} + +static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id) +{ + u32 me_reg; + int tout = 10, interval = 100; /* Wait for 1 sec */ + + do { + /* pxp traps vf read of doorbells and returns me reg value */ + me_reg = readl(bp->doorbells); + if (GOOD_ME_REG(me_reg)) + break; + + msleep(interval); + + BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?", + me_reg); + } while (tout-- > 0); + + if (!GOOD_ME_REG(me_reg)) { + BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg); + return -EINVAL; + } + + DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg); + + *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT; + + return 0; +} + +int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) +{ + int rc = 0, attempts = 0; + struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire; + struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp; + struct vfpf_port_phys_id_resp_tlv *phys_port_resp; + struct vfpf_fp_hsi_resp_tlv *fp_hsi_resp; + u32 vf_id; + bool resources_acquired = false; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req)); + + if (bnx2x_get_vf_id(bp, &vf_id)) { + rc = -EAGAIN; + goto out; + } + + req->vfdev_info.vf_id = vf_id; + req->vfdev_info.vf_os = 0; + req->vfdev_info.fp_hsi_ver = ETH_FP_HSI_VERSION; + + req->resc_request.num_rxqs = rx_count; + req->resc_request.num_txqs = tx_count; + req->resc_request.num_sbs = bp->igu_sb_cnt; + req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS; + req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS; + + /* pf 2 vf bulletin board address */ + req->bulletin_addr = bp->pf2vf_bulletin_mapping; + + /* Request physical port identifier */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, + CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv)); + + /* Bulletin support for bulletin board with length > legacy length */ + req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, + req->first_tlv.tl.length + sizeof(struct channel_tlv), + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + while (!resources_acquired) { + DP(BNX2X_MSG_SP, "attempting to acquire resources\n"); + + /* send acquire request */ + rc = bnx2x_send_msg2pf(bp, + &resp->hdr.status, + bp->vf2pf_mbox_mapping); + + /* PF timeout */ + if (rc) + goto out; + + /* copy acquire response from buffer to bp */ + memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp)); + + attempts++; + + /* test whether the PF accepted our request. If not, humble + * the request and try again. + */ + if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) { + DP(BNX2X_MSG_SP, "resources acquired\n"); + resources_acquired = true; + } else if (bp->acquire_resp.hdr.status == + PFVF_STATUS_NO_RESOURCE && + attempts < VF_ACQUIRE_THRESH) { + DP(BNX2X_MSG_SP, + "PF unwilling to fulfill resource request. Try PF recommended amount\n"); + + /* humble our request */ + req->resc_request.num_txqs = + min(req->resc_request.num_txqs, + bp->acquire_resp.resc.num_txqs); + req->resc_request.num_rxqs = + min(req->resc_request.num_rxqs, + bp->acquire_resp.resc.num_rxqs); + req->resc_request.num_sbs = + min(req->resc_request.num_sbs, + bp->acquire_resp.resc.num_sbs); + req->resc_request.num_mac_filters = + min(req->resc_request.num_mac_filters, + bp->acquire_resp.resc.num_mac_filters); + req->resc_request.num_vlan_filters = + min(req->resc_request.num_vlan_filters, + bp->acquire_resp.resc.num_vlan_filters); + req->resc_request.num_mc_filters = + min(req->resc_request.num_mc_filters, + bp->acquire_resp.resc.num_mc_filters); + + /* Clear response buffer */ + memset(&bp->vf2pf_mbox->resp, 0, + sizeof(union pfvf_tlvs)); + } else { + /* Determine reason of PF failure of acquire process */ + fp_hsi_resp = bnx2x_search_tlv_list(bp, resp, + CHANNEL_TLV_FP_HSI_SUPPORT); + if (fp_hsi_resp && !fp_hsi_resp->is_supported) + BNX2X_ERR("Old hypervisor - doesn't support current fastpath HSI version; Need to downgrade VF driver [or upgrade hypervisor]\n"); + else + BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n", + bp->acquire_resp.hdr.status); + rc = -EAGAIN; + goto out; + } + } + + /* Retrieve physical port id (if possible) */ + phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *) + bnx2x_search_tlv_list(bp, resp, + CHANNEL_TLV_PHYS_PORT_ID); + if (phys_port_resp) { + memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN); + bp->flags |= HAS_PHYS_PORT_ID; + } + + /* Old Hypevisors might not even support the FP_HSI_SUPPORT TLV. + * If that's the case, we need to make certain required FW was + * supported by such a hypervisor [i.e., v0-v2]. + */ + fp_hsi_resp = bnx2x_search_tlv_list(bp, resp, + CHANNEL_TLV_FP_HSI_SUPPORT); + if (!fp_hsi_resp && (ETH_FP_HSI_VERSION > ETH_FP_HSI_VER_2)) { + BNX2X_ERR("Old hypervisor - need to downgrade VF's driver\n"); + + /* Since acquire succeeded on the PF side, we need to send a + * release message in order to allow future probes. + */ + bnx2x_vfpf_finalize(bp, &req->first_tlv); + bnx2x_vfpf_release(bp); + + rc = -EINVAL; + goto out; + } + + /* get HW info */ + bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff); + bp->link_params.chip_id = bp->common.chip_id; + bp->db_size = bp->acquire_resp.pfdev_info.db_size; + bp->common.int_block = INT_BLOCK_IGU; + bp->common.chip_port_mode = CHIP_2_PORT_MODE; + bp->igu_dsb_id = -1; + bp->mf_ov = 0; + bp->mf_mode = 0; + bp->common.flash_size = 0; + bp->flags |= + NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG; + bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs; + bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id; + strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver, + sizeof(bp->fw_ver)); + + if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr)) + memcpy(bp->dev->dev_addr, + bp->acquire_resp.resc.current_mac_addr, + ETH_ALEN); + +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + return rc; +} + +int bnx2x_vfpf_release(struct bnx2x *bp) +{ + struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + u32 rc, vf_id; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req)); + + if (bnx2x_get_vf_id(bp, &vf_id)) { + rc = -EAGAIN; + goto out; + } + + req->vf_id = vf_id; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + /* send release request */ + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + + if (rc) + /* PF timeout */ + goto out; + + if (resp->hdr.status == PFVF_STATUS_SUCCESS) { + /* PF released us */ + DP(BNX2X_MSG_SP, "vf released\n"); + } else { + /* PF reports error */ + BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n", + resp->hdr.status); + rc = -EAGAIN; + goto out; + } +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return rc; +} + +/* Tell PF about SB addresses */ +int bnx2x_vfpf_init(struct bnx2x *bp) +{ + struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + int rc, i; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req)); + + /* status blocks */ + for_each_eth_queue(bp, i) + req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i, + status_blk_mapping); + + /* statistics - requests only supports single queue for now */ + req->stats_addr = bp->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, queue_stats); + + req->stats_stride = sizeof(struct per_queue_stats); + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) + goto out; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("INIT VF failed: %d. Breaking...\n", + resp->hdr.status); + rc = -EAGAIN; + goto out; + } + + DP(BNX2X_MSG_SP, "INIT VF Succeeded\n"); +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return rc; +} + +/* CLOSE VF - opposite to INIT_VF */ +void bnx2x_vfpf_close_vf(struct bnx2x *bp) +{ + struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + int i, rc; + u32 vf_id; + + /* If we haven't got a valid VF id, there is no sense to + * continue with sending messages + */ + if (bnx2x_get_vf_id(bp, &vf_id)) + goto free_irq; + + /* Close the queues */ + for_each_queue(bp, i) + bnx2x_vfpf_teardown_queue(bp, i); + + /* remove mac */ + bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false); + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req)); + + req->vf_id = vf_id; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + + if (rc) + BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc); + + else if (resp->hdr.status != PFVF_STATUS_SUCCESS) + BNX2X_ERR("Sending CLOSE failed: pf response was %d\n", + resp->hdr.status); + + bnx2x_vfpf_finalize(bp, &req->first_tlv); + +free_irq: + /* Disable HW interrupts, NAPI */ + bnx2x_netif_stop(bp, 0); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); + + /* Release IRQs */ + bnx2x_free_irq(bp); +} + +static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_queue *q) +{ + u8 cl_id = vfq_cl_id(vf, q); + u8 func_id = FW_VF_HANDLE(vf->abs_vfid); + + /* mac */ + bnx2x_init_mac_obj(bp, &q->mac_obj, + cl_id, q->cid, func_id, + bnx2x_vf_sp(bp, vf, mac_rdata), + bnx2x_vf_sp_map(bp, vf, mac_rdata), + BNX2X_FILTER_MAC_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX, + &bp->macs_pool); + /* vlan */ + bnx2x_init_vlan_obj(bp, &q->vlan_obj, + cl_id, q->cid, func_id, + bnx2x_vf_sp(bp, vf, vlan_rdata), + bnx2x_vf_sp_map(bp, vf, vlan_rdata), + BNX2X_FILTER_VLAN_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX, + &bp->vlans_pool); + + /* mcast */ + bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, + q->cid, func_id, func_id, + bnx2x_vf_sp(bp, vf, mcast_rdata), + bnx2x_vf_sp_map(bp, vf, mcast_rdata), + BNX2X_FILTER_MCAST_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX); + + /* rss */ + bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid, + func_id, func_id, + bnx2x_vf_sp(bp, vf, rss_rdata), + bnx2x_vf_sp_map(bp, vf, rss_rdata), + BNX2X_FILTER_RSS_CONF_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX); + + vf->leading_rss = cl_id; + q->is_leading = true; + q->sp_initialized = true; +} + +/* ask the pf to open a queue for the vf */ +int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, + bool is_leading) +{ + struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + u8 fp_idx = fp->index; + u16 tpa_agg_size = 0, flags = 0; + int rc; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req)); + + /* select tpa mode to request */ + if (fp->mode != TPA_MODE_DISABLED) { + flags |= VFPF_QUEUE_FLG_TPA; + flags |= VFPF_QUEUE_FLG_TPA_IPV6; + if (fp->mode == TPA_MODE_GRO) + flags |= VFPF_QUEUE_FLG_TPA_GRO; + tpa_agg_size = TPA_AGG_SIZE; + } + + if (is_leading) + flags |= VFPF_QUEUE_FLG_LEADING_RSS; + + /* calculate queue flags */ + flags |= VFPF_QUEUE_FLG_STATS; + flags |= VFPF_QUEUE_FLG_CACHE_ALIGN; + flags |= VFPF_QUEUE_FLG_VLAN; + + /* Common */ + req->vf_qid = fp_idx; + req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID; + + /* Rx */ + req->rxq.rcq_addr = fp->rx_comp_mapping; + req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE; + req->rxq.rxq_addr = fp->rx_desc_mapping; + req->rxq.sge_addr = fp->rx_sge_mapping; + req->rxq.vf_sb = fp_idx; + req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS; + req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0; + req->rxq.mtu = bp->dev->mtu; + req->rxq.buf_sz = fp->rx_buf_size; + req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE; + req->rxq.tpa_agg_sz = tpa_agg_size; + req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT; + req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) & + (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; + req->rxq.flags = flags; + req->rxq.drop_flags = 0; + req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT; + req->rxq.stat_id = -1; /* No stats at the moment */ + + /* Tx */ + req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping; + req->txq.vf_sb = fp_idx; + req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0; + req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0; + req->txq.flags = flags; + req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) + BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n", + fp_idx); + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n", + fp_idx, resp->hdr.status); + rc = -EINVAL; + } + + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return rc; +} + +static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) +{ + struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + int rc; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q, + sizeof(*req)); + + req->vf_qid = qidx; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + + if (rc) { + BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx, + rc); + goto out; + } + + /* PF failed the transaction */ + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx, + resp->hdr.status); + rc = -EINVAL; + } + +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return rc; +} + +/* request pf to add a mac for the vf */ +int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set) +{ + struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; + int rc = 0; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, + sizeof(*req)); + + req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED; + req->vf_qid = vf_qid; + req->n_mac_vlan_filters = 1; + + req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID; + if (set) + req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC; + + /* sample bulletin board for new mac */ + bnx2x_sample_bulletin(bp); + + /* copy mac from device to request */ + memcpy(req->filters[0].mac, addr, ETH_ALEN); + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + /* send message to pf */ + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) { + BNX2X_ERR("failed to send message to pf. rc was %d\n", rc); + goto out; + } + + /* failure may mean PF was configured with a new mac for us */ + while (resp->hdr.status == PFVF_STATUS_FAILURE) { + DP(BNX2X_MSG_IOV, + "vfpf SET MAC failed. Check bulletin board for new posts\n"); + + /* copy mac from bulletin to device */ + memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); + + /* check if bulletin board was updated */ + if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) { + /* copy mac from device to request */ + memcpy(req->filters[0].mac, bp->dev->dev_addr, + ETH_ALEN); + + /* send message to pf */ + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, + bp->vf2pf_mbox_mapping); + } else { + /* no new info in bulletin */ + break; + } + } + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status); + rc = -EINVAL; + } +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return rc; +} + +/* request pf to config rss table for vf queues*/ +int bnx2x_vfpf_config_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *params) +{ + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss; + int rc = 0; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS, + sizeof(*req)); + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); + memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key)); + req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE; + req->rss_key_size = T_ETH_RSS_KEY; + req->rss_result_mask = params->rss_result_mask; + + /* flags handled individually for backward/forward compatibility */ + if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED)) + req->rss_flags |= VFPF_RSS_MODE_DISABLED; + if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR)) + req->rss_flags |= VFPF_RSS_MODE_REGULAR; + if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH)) + req->rss_flags |= VFPF_RSS_SET_SRCH; + if (params->rss_flags & (1 << BNX2X_RSS_IPV4)) + req->rss_flags |= VFPF_RSS_IPV4; + if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP)) + req->rss_flags |= VFPF_RSS_IPV4_TCP; + if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP)) + req->rss_flags |= VFPF_RSS_IPV4_UDP; + if (params->rss_flags & (1 << BNX2X_RSS_IPV6)) + req->rss_flags |= VFPF_RSS_IPV6; + if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP)) + req->rss_flags |= VFPF_RSS_IPV6_TCP; + if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP)) + req->rss_flags |= VFPF_RSS_IPV6_UDP; + + DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + /* send message to pf */ + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) { + BNX2X_ERR("failed to send message to pf. rc was %d\n", rc); + goto out; + } + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + /* Since older drivers don't support this feature (and VF has + * no way of knowing other than failing this), don't propagate + * an error in this case. + */ + DP(BNX2X_MSG_IOV, + "Failed to send rss message to PF over VF-PF channel [%d]\n", + resp->hdr.status); + } +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return rc; +} + +int bnx2x_vfpf_set_mcast(struct net_device *dev) +{ + struct bnx2x *bp = netdev_priv(dev); + struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + int rc, i = 0; + struct netdev_hw_addr *ha; + + if (bp->state != BNX2X_STATE_OPEN) { + DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); + return -EINVAL; + } + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, + sizeof(*req)); + + /* Get Rx mode requested */ + DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags); + + netdev_for_each_mc_addr(ha, dev) { + DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", + bnx2x_mc_addr(ha)); + memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN); + i++; + } + + /* We support four PFVF_MAX_MULTICAST_PER_VF mcast + * addresses tops + */ + if (i >= PFVF_MAX_MULTICAST_PER_VF) { + DP(NETIF_MSG_IFUP, + "VF supports not more than %d multicast MAC addresses\n", + PFVF_MAX_MULTICAST_PER_VF); + return -EINVAL; + } + + req->n_multicast = i; + req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED; + req->vf_qid = 0; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) { + BNX2X_ERR("Sending a message failed: %d\n", rc); + goto out; + } + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("Set Rx mode/multicast failed: %d\n", + resp->hdr.status); + rc = -EINVAL; + } +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return 0; +} + +int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) +{ + int mode = bp->rx_mode; + struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + int rc; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, + sizeof(*req)); + + DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode); + + /* Ignore everything accept MODE_NONE */ + if (mode == BNX2X_RX_MODE_NONE) { + req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE; + } else { + /* Current PF driver will not look at the specific flags, + * but they are required when working with older drivers on hv. + */ + req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST; + req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST; + req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; + } + + req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED; + req->vf_qid = 0; + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) + BNX2X_ERR("Sending a message failed: %d\n", rc); + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status); + rc = -EINVAL; + } + + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return rc; +} + +/* General service functions */ +static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid) +{ + u32 addr = BAR_CSTRORM_INTMEM + + CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid); + + REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY); +} + +static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid) +{ + u32 addr = BAR_CSTRORM_INTMEM + + CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid); + + REG_WR8(bp, addr, 1); +} + +/* enable vf_pf mailbox (aka vf-pf-channel) */ +void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) +{ + bnx2x_vf_flr_clnup_epilog(bp, abs_vfid); + + /* enable the mailbox in the FW */ + storm_memset_vf_mbx_ack(bp, abs_vfid); + storm_memset_vf_mbx_valid(bp, abs_vfid); + + /* enable the VF access to the mailbox */ + bnx2x_vf_enable_access(bp, abs_vfid); +} + +/* this works only on !E1h */ +static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf, + dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi, + u32 vf_addr_lo, u32 len32) +{ + struct dmae_command dmae; + + if (CHIP_IS_E1x(bp)) { + BNX2X_ERR("Chip revision does not support VFs\n"); + return DMAE_NOT_RDY; + } + + if (!bp->dmae_ready) { + BNX2X_ERR("DMAE is not ready, can not copy\n"); + return DMAE_NOT_RDY; + } + + /* set opcode and fixed command fields */ + bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI); + + if (from_vf) { + dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) | + (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) | + (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT); + + dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT); + + dmae.src_addr_lo = vf_addr_lo; + dmae.src_addr_hi = vf_addr_hi; + dmae.dst_addr_lo = U64_LO(pf_addr); + dmae.dst_addr_hi = U64_HI(pf_addr); + } else { + dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) | + (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) | + (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT); + + dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT); + + dmae.src_addr_lo = U64_LO(pf_addr); + dmae.src_addr_hi = U64_HI(pf_addr); + dmae.dst_addr_lo = vf_addr_lo; + dmae.dst_addr_hi = vf_addr_hi; + } + dmae.len = len32; + + /* issue the command and wait for completion */ + return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); +} + +static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp, + struct bnx2x_virtf *vf) +{ + struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); + u16 length, type; + + /* prepare response */ + type = mbx->first_tlv.tl.type; + length = type == CHANNEL_TLV_ACQUIRE ? + sizeof(struct pfvf_acquire_resp_tlv) : + sizeof(struct pfvf_general_resp_tlv); + bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length); + bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); +} + +static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, + struct bnx2x_virtf *vf, + int vf_rc) +{ + struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); + struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp; + dma_addr_t pf_addr; + u64 vf_addr; + int rc; + + bnx2x_dp_tlv_list(bp, resp); + DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n", + mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); + + resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc); + + /* send response */ + vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) + + mbx->first_tlv.resp_msg_offset; + pf_addr = mbx->msg_mapping + + offsetof(struct bnx2x_vf_mbx_msg, resp); + + /* Copy the response buffer. The first u64 is written afterwards, as + * the vf is sensitive to the header being written + */ + vf_addr += sizeof(u64); + pf_addr += sizeof(u64); + rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, + U64_HI(vf_addr), + U64_LO(vf_addr), + (sizeof(union pfvf_tlvs) - sizeof(u64))/4); + if (rc) { + BNX2X_ERR("Failed to copy response body to VF %d\n", + vf->abs_vfid); + goto mbx_error; + } + vf_addr -= sizeof(u64); + pf_addr -= sizeof(u64); + + /* ack the FW */ + storm_memset_vf_mbx_ack(bp, vf->abs_vfid); + mmiowb(); + + /* copy the response header including status-done field, + * must be last dmae, must be after FW is acked + */ + rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, + U64_HI(vf_addr), + U64_LO(vf_addr), + sizeof(u64)/4); + + /* unlock channel mutex */ + bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); + + if (rc) { + BNX2X_ERR("Failed to copy response status to VF %d\n", + vf->abs_vfid); + goto mbx_error; + } + return; + +mbx_error: + bnx2x_vf_release(bp, vf); +} + +static void bnx2x_vf_mbx_resp(struct bnx2x *bp, + struct bnx2x_virtf *vf, + int rc) +{ + bnx2x_vf_mbx_resp_single_tlv(bp, vf); + bnx2x_vf_mbx_resp_send_msg(bp, vf, rc); +} + +static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp, + struct bnx2x_virtf *vf, + void *buffer, + u16 *offset) +{ + struct vfpf_port_phys_id_resp_tlv *port_id; + + if (!(bp->flags & HAS_PHYS_PORT_ID)) + return; + + bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID, + sizeof(struct vfpf_port_phys_id_resp_tlv)); + + port_id = (struct vfpf_port_phys_id_resp_tlv *) + (((u8 *)buffer) + *offset); + memcpy(port_id->id, bp->phys_port_id, ETH_ALEN); + + /* Offset should continue representing the offset to the tail + * of TLV data (outside this function scope) + */ + *offset += sizeof(struct vfpf_port_phys_id_resp_tlv); +} + +static void bnx2x_vf_mbx_resp_fp_hsi_ver(struct bnx2x *bp, + struct bnx2x_virtf *vf, + void *buffer, + u16 *offset) +{ + struct vfpf_fp_hsi_resp_tlv *fp_hsi; + + bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_FP_HSI_SUPPORT, + sizeof(struct vfpf_fp_hsi_resp_tlv)); + + fp_hsi = (struct vfpf_fp_hsi_resp_tlv *) + (((u8 *)buffer) + *offset); + fp_hsi->is_supported = (vf->fp_hsi > ETH_FP_HSI_VERSION) ? 0 : 1; + + /* Offset should continue representing the offset to the tail + * of TLV data (outside this function scope) + */ + *offset += sizeof(struct vfpf_fp_hsi_resp_tlv); +} + +static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx, int vfop_status) +{ + int i; + struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp; + struct pf_vf_resc *resc = &resp->resc; + u8 status = bnx2x_pfvf_status_codes(vfop_status); + u16 length; + + memset(resp, 0, sizeof(*resp)); + + /* fill in pfdev info */ + resp->pfdev_info.chip_num = bp->common.chip_id; + resp->pfdev_info.db_size = bp->db_size; + resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; + resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | + PFVF_CAP_TPA | + PFVF_CAP_TPA_UPDATE); + bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver, + sizeof(resp->pfdev_info.fw_ver)); + + if (status == PFVF_STATUS_NO_RESOURCE || + status == PFVF_STATUS_SUCCESS) { + /* set resources numbers, if status equals NO_RESOURCE these + * are max possible numbers + */ + resc->num_rxqs = vf_rxq_count(vf) ? : + bnx2x_vf_max_queue_cnt(bp, vf); + resc->num_txqs = vf_txq_count(vf) ? : + bnx2x_vf_max_queue_cnt(bp, vf); + resc->num_sbs = vf_sb_count(vf); + resc->num_mac_filters = vf_mac_rules_cnt(vf); + resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf); + resc->num_mc_filters = 0; + + if (status == PFVF_STATUS_SUCCESS) { + /* fill in the allocated resources */ + struct pf_vf_bulletin_content *bulletin = + BP_VF_BULLETIN(bp, vf->index); + + for_each_vfq(vf, i) + resc->hw_qid[i] = + vfq_qzone_id(vf, vfq_get(vf, i)); + + for_each_vf_sb(vf, i) { + resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i); + resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i); + } + + /* if a mac has been set for this vf, supply it */ + if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) { + memcpy(resc->current_mac_addr, bulletin->mac, + ETH_ALEN); + } + } + } + + DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n" + "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n", + vf->abs_vfid, + resp->pfdev_info.chip_num, + resp->pfdev_info.db_size, + resp->pfdev_info.indices_per_sb, + resp->pfdev_info.pf_cap, + resc->num_rxqs, + resc->num_txqs, + resc->num_sbs, + resc->num_mac_filters, + resc->num_vlan_filters, + resc->num_mc_filters, + resp->pfdev_info.fw_ver); + + DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ "); + for (i = 0; i < vf_rxq_count(vf); i++) + DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]); + DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ "); + for (i = 0; i < vf_sb_count(vf); i++) + DP_CONT(BNX2X_MSG_IOV, "%d:%d ", + resc->hw_sbs[i].hw_sb_id, + resc->hw_sbs[i].sb_qid); + DP_CONT(BNX2X_MSG_IOV, "]\n"); + + /* prepare response */ + length = sizeof(struct pfvf_acquire_resp_tlv); + bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length); + + /* Handle possible VF requests for physical port identifiers. + * 'length' should continue to indicate the offset of the first empty + * place in the buffer (i.e., where next TLV should be inserted) + */ + if (bnx2x_search_tlv_list(bp, &mbx->msg->req, + CHANNEL_TLV_PHYS_PORT_ID)) + bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length); + + /* `New' vfs will want to know if fastpath HSI is supported, since + * if that's not the case they could print into system log the fact + * the driver version must be updated. + */ + bnx2x_vf_mbx_resp_fp_hsi_ver(bp, vf, &mbx->msg->resp, &length); + + bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* send the response */ + bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status); +} + +static bool bnx2x_vf_mbx_is_windows_vm(struct bnx2x *bp, + struct vfpf_acquire_tlv *acquire) +{ + /* Windows driver does one of three things: + * 1. Old driver doesn't have bulletin board address set. + * 2. 'Middle' driver sends mc_num == 32. + * 3. New driver sets the OS field. + */ + if (!acquire->bulletin_addr || + acquire->resc_request.num_mc_filters == 32 || + ((acquire->vfdev_info.vf_os & VF_OS_MASK) == + VF_OS_WINDOWS)) + return true; + + return false; +} + +static int bnx2x_vf_mbx_acquire_chk_dorq(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + /* Linux drivers which correctly set the doorbell size also + * send a physical port request + */ + if (bnx2x_search_tlv_list(bp, &mbx->msg->req, + CHANNEL_TLV_PHYS_PORT_ID)) + return 0; + + /* Issue does not exist in windows VMs */ + if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire)) + return 0; + + return -EOPNOTSUPP; +} + +static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + int rc; + struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire; + + /* log vfdef info */ + DP(BNX2X_MSG_IOV, + "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n", + vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os, + acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs, + acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters, + acquire->resc_request.num_vlan_filters, + acquire->resc_request.num_mc_filters); + + /* Prevent VFs with old drivers from loading, since they calculate + * CIDs incorrectly requiring a VF-flr [VM reboot] in order to recover + * while being upgraded. + */ + rc = bnx2x_vf_mbx_acquire_chk_dorq(bp, vf, mbx); + if (rc) { + DP(BNX2X_MSG_IOV, + "VF [%d] - Can't support acquire request due to doorbell mismatch. Please update VM driver\n", + vf->abs_vfid); + goto out; + } + + /* Verify the VF fastpath HSI can be supported by the loaded FW. + * Linux vfs should be oblivious to changes between v0 and v2. + */ + if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire)) + vf->fp_hsi = acquire->vfdev_info.fp_hsi_ver; + else + vf->fp_hsi = max_t(u8, acquire->vfdev_info.fp_hsi_ver, + ETH_FP_HSI_VER_2); + if (vf->fp_hsi > ETH_FP_HSI_VERSION) { + DP(BNX2X_MSG_IOV, + "VF [%d] - Can't support acquire request since VF requests a FW version which is too new [%02x > %02x]\n", + vf->abs_vfid, acquire->vfdev_info.fp_hsi_ver, + ETH_FP_HSI_VERSION); + rc = -EINVAL; + goto out; + } + + /* acquire the resources */ + rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request); + + /* store address of vf's bulletin board */ + vf->bulletin_map = acquire->bulletin_addr; + if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_EXT_BULLETIN) { + DP(BNX2X_MSG_IOV, "VF[%d] supports long bulletin boards\n", + vf->abs_vfid); + vf->cfg_flags |= VF_CFG_EXT_BULLETIN; + } else { + vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN; + } + +out: + /* response */ + bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc); +} + +static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct vfpf_init_tlv *init = &mbx->msg->req.init; + int rc; + + /* record ghost addresses from vf message */ + vf->spq_map = init->spq_addr; + vf->fw_stat_map = init->stats_addr; + vf->stats_stride = init->stats_stride; + rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); + + /* set VF multiqueue statistics collection mode */ + if (init->flags & VFPF_INIT_FLG_STATS_COALESCE) + vf->cfg_flags |= VF_CFG_STATS_COALESCE; + + /* Update VF's view of link state */ + if (vf->cfg_flags & VF_CFG_EXT_BULLETIN) + bnx2x_iov_link_update_vf(bp, vf->index); + + /* response */ + bnx2x_vf_mbx_resp(bp, vf, rc); +} + +/* convert MBX queue-flags to standard SP queue-flags */ +static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags, + unsigned long *sp_q_flags) +{ + if (mbx_q_flags & VFPF_QUEUE_FLG_TPA) + __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6) + __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO) + __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_STATS) + __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN) + __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_COS) + __set_bit(BNX2X_Q_FLG_COS, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_HC) + __set_bit(BNX2X_Q_FLG_HC, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) + __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS) + __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags); + + /* outer vlan removal is set according to PF's multi function mode */ + if (IS_MF_SD(bp)) + __set_bit(BNX2X_Q_FLG_OV, sp_q_flags); +} + +static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q; + struct bnx2x_vf_queue_construct_params qctor; + int rc = 0; + + /* verify vf_qid */ + if (setup_q->vf_qid >= vf_rxq_count(vf)) { + BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n", + setup_q->vf_qid, vf_rxq_count(vf)); + rc = -EINVAL; + goto response; + } + + /* tx queues must be setup alongside rx queues thus if the rx queue + * is not marked as valid there's nothing to do. + */ + if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) { + struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid); + unsigned long q_type = 0; + + struct bnx2x_queue_init_params *init_p; + struct bnx2x_queue_setup_params *setup_p; + + if (bnx2x_vfq_is_leading(q)) + bnx2x_leading_vfq_init(bp, vf, q); + + /* re-init the VF operation context */ + memset(&qctor, 0 , + sizeof(struct bnx2x_vf_queue_construct_params)); + setup_p = &qctor.prep_qsetup; + init_p = &qctor.qstate.params.init; + + /* activate immediately */ + __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags); + + if (setup_q->param_valid & VFPF_TXQ_VALID) { + struct bnx2x_txq_setup_params *txq_params = + &setup_p->txq_params; + + __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); + + /* save sb resource index */ + q->sb_idx = setup_q->txq.vf_sb; + + /* tx init */ + init_p->tx.hc_rate = setup_q->txq.hc_rate; + init_p->tx.sb_cq_index = setup_q->txq.sb_index; + + bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags, + &init_p->tx.flags); + + /* tx setup - flags */ + bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags, + &setup_p->flags); + + /* tx setup - general, nothing */ + + /* tx setup - tx */ + txq_params->dscr_map = setup_q->txq.txq_addr; + txq_params->sb_cq_index = setup_q->txq.sb_index; + txq_params->traffic_type = setup_q->txq.traffic_type; + + bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p, + q->index, q->sb_idx); + } + + if (setup_q->param_valid & VFPF_RXQ_VALID) { + struct bnx2x_rxq_setup_params *rxq_params = + &setup_p->rxq_params; + + __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); + + /* Note: there is no support for different SBs + * for TX and RX + */ + q->sb_idx = setup_q->rxq.vf_sb; + + /* rx init */ + init_p->rx.hc_rate = setup_q->rxq.hc_rate; + init_p->rx.sb_cq_index = setup_q->rxq.sb_index; + bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags, + &init_p->rx.flags); + + /* rx setup - flags */ + bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags, + &setup_p->flags); + + /* rx setup - general */ + setup_p->gen_params.mtu = setup_q->rxq.mtu; + + /* rx setup - rx */ + rxq_params->drop_flags = setup_q->rxq.drop_flags; + rxq_params->dscr_map = setup_q->rxq.rxq_addr; + rxq_params->sge_map = setup_q->rxq.sge_addr; + rxq_params->rcq_map = setup_q->rxq.rcq_addr; + rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr; + rxq_params->buf_sz = setup_q->rxq.buf_sz; + rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz; + rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt; + rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz; + rxq_params->cache_line_log = + setup_q->rxq.cache_line_log; + rxq_params->sb_cq_index = setup_q->rxq.sb_index; + + /* rx setup - multicast engine */ + if (bnx2x_vfq_is_leading(q)) { + u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid); + + rxq_params->mcast_engine_id = mcast_id; + __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags); + } + + bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p, + q->index, q->sb_idx); + } + /* complete the preparations */ + bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type); + + rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor); + if (rc) + goto response; + } +response: + bnx2x_vf_mbx_resp(bp, vf, rc); +} + +static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct vfpf_set_q_filters_tlv *tlv, + struct bnx2x_vf_mac_vlan_filters **pfl, + u32 type_flag) +{ + int i, j; + struct bnx2x_vf_mac_vlan_filters *fl = NULL; + size_t fsz; + + fsz = tlv->n_mac_vlan_filters * + sizeof(struct bnx2x_vf_mac_vlan_filter) + + sizeof(struct bnx2x_vf_mac_vlan_filters); + + fl = kzalloc(fsz, GFP_KERNEL); + if (!fl) + return -ENOMEM; + + for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) { + struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i]; + + if ((msg_filter->flags & type_flag) != type_flag) + continue; + if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) { + fl->filters[j].mac = msg_filter->mac; + fl->filters[j].type = BNX2X_VF_FILTER_MAC; + } else { + fl->filters[j].vid = msg_filter->vlan_tag; + fl->filters[j].type = BNX2X_VF_FILTER_VLAN; + } + fl->filters[j].add = + (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ? + true : false; + fl->count++; + } + if (!fl->count) + kfree(fl); + else + *pfl = fl; + + return 0; +} + +static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx, + struct vfpf_q_mac_vlan_filter *filter) +{ + DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags); + if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID) + DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag); + if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID) + DP_CONT(msglvl, ", MAC=%pM", filter->mac); + DP_CONT(msglvl, "\n"); +} + +static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl, + struct vfpf_set_q_filters_tlv *filters) +{ + int i; + + if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) + for (i = 0; i < filters->n_mac_vlan_filters; i++) + bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i, + &filters->filters[i]); + + if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) + DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask); + + if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) + for (i = 0; i < filters->n_multicast; i++) + DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]); +} + +#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID +#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID + +static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + int rc = 0; + + struct vfpf_set_q_filters_tlv *msg = + &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters; + + /* check for any mac/vlan changes */ + if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { + /* build mac list */ + struct bnx2x_vf_mac_vlan_filters *fl = NULL; + + rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, + VFPF_MAC_FILTER); + if (rc) + goto op_err; + + if (fl) { + + /* set mac list */ + rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, + msg->vf_qid, + false); + if (rc) + goto op_err; + } + + /* build vlan list */ + fl = NULL; + + rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, + VFPF_VLAN_FILTER); + if (rc) + goto op_err; + + if (fl) { + /* set vlan list */ + rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, + msg->vf_qid, + false); + if (rc) + goto op_err; + } + } + + if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { + unsigned long accept = 0; + struct pf_vf_bulletin_content *bulletin = + BP_VF_BULLETIN(bp, vf->index); + + /* Ignore VF requested mode; instead set a regular mode */ + if (msg->rx_mask != VFPF_RX_MASK_ACCEPT_NONE) { + __set_bit(BNX2X_ACCEPT_UNICAST, &accept); + __set_bit(BNX2X_ACCEPT_MULTICAST, &accept); + __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); + } + + /* A packet arriving the vf's mac should be accepted + * with any vlan, unless a vlan has already been + * configured. + */ + if (!(bulletin->valid_bitmap & (1 << VLAN_VALID))) + __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); + + /* set rx-mode */ + rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept); + if (rc) + goto op_err; + } + + if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) { + /* set mcasts */ + rc = bnx2x_vf_mcast(bp, vf, msg->multicast, + msg->n_multicast, false); + if (rc) + goto op_err; + } +op_err: + if (rc) + BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n", + vf->abs_vfid, msg->vf_qid, rc); + return rc; +} + +static int bnx2x_filters_validate_mac(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct vfpf_set_q_filters_tlv *filters) +{ + struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); + int rc = 0; + + /* if a mac was already set for this VF via the set vf mac ndo, we only + * accept mac configurations of that mac. Why accept them at all? + * because PF may have been unable to configure the mac at the time + * since queue was not set up. + */ + if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) { + /* once a mac was set by ndo can only accept a single mac... */ + if (filters->n_mac_vlan_filters > 1) { + BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n", + vf->abs_vfid); + rc = -EPERM; + goto response; + } + + /* ...and only the mac set by the ndo */ + if (filters->n_mac_vlan_filters == 1 && + !ether_addr_equal(filters->filters->mac, bulletin->mac)) { + BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n", + vf->abs_vfid); + + rc = -EPERM; + goto response; + } + } + +response: + return rc; +} + +static int bnx2x_filters_validate_vlan(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct vfpf_set_q_filters_tlv *filters) +{ + struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); + int rc = 0; + + /* if vlan was set by hypervisor we don't allow guest to config vlan */ + if (bulletin->valid_bitmap & 1 << VLAN_VALID) { + int i; + + /* search for vlan filters */ + for (i = 0; i < filters->n_mac_vlan_filters; i++) { + if (filters->filters[i].flags & + VFPF_Q_FILTER_VLAN_TAG_VALID) { + BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", + vf->abs_vfid); + rc = -EPERM; + goto response; + } + } + } + + /* verify vf_qid */ + if (filters->vf_qid > vf_rxq_count(vf)) { + rc = -EPERM; + goto response; + } + +response: + return rc; +} + +static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters; + int rc; + + rc = bnx2x_filters_validate_mac(bp, vf, filters); + if (rc) + goto response; + + rc = bnx2x_filters_validate_vlan(bp, vf, filters); + if (rc) + goto response; + + DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n", + vf->abs_vfid, + filters->vf_qid); + + /* print q_filter message */ + bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters); + + rc = bnx2x_vf_mbx_qfilters(bp, vf); +response: + bnx2x_vf_mbx_resp(bp, vf, rc); +} + +static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + int qid = mbx->msg->req.q_op.vf_qid; + int rc; + + DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n", + vf->abs_vfid, qid); + + rc = bnx2x_vf_queue_teardown(bp, vf, qid); + bnx2x_vf_mbx_resp(bp, vf, rc); +} + +static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + int rc; + + DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid); + + rc = bnx2x_vf_close(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); +} + +static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + int rc; + + DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid); + + rc = bnx2x_vf_free(bp, vf); + bnx2x_vf_mbx_resp(bp, vf, rc); +} + +static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct bnx2x_config_rss_params rss; + struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss; + int rc = 0; + + if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE || + rss_tlv->rss_key_size != T_ETH_RSS_KEY) { + BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n", + vf->index); + rc = -EINVAL; + goto mbx_resp; + } + + memset(&rss, 0, sizeof(struct bnx2x_config_rss_params)); + + /* set vfop params according to rss tlv */ + memcpy(rss.ind_table, rss_tlv->ind_table, + T_ETH_INDIRECTION_TABLE_SIZE); + memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key)); + rss.rss_obj = &vf->rss_conf_obj; + rss.rss_result_mask = rss_tlv->rss_result_mask; + + /* flags handled individually for backward/forward compatibility */ + rss.rss_flags = 0; + rss.ramrod_flags = 0; + + if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) + __set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) + __set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH) + __set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV4) + __set_bit(BNX2X_RSS_IPV4, &rss.rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) + __set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) + __set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV6) + __set_bit(BNX2X_RSS_IPV6, &rss.rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) + __set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP) + __set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags); + + if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) && + rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) || + (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) && + rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) { + BNX2X_ERR("about to hit a FW assert. aborting...\n"); + rc = -EINVAL; + goto mbx_resp; + } + + rc = bnx2x_vf_rss_update(bp, vf, &rss); +mbx_resp: + bnx2x_vf_mbx_resp(bp, vf, rc); +} + +static int bnx2x_validate_tpa_params(struct bnx2x *bp, + struct vfpf_tpa_tlv *tpa_tlv) +{ + int rc = 0; + + if (tpa_tlv->tpa_client_info.max_sges_for_packet > + U_ETH_MAX_SGES_FOR_PACKET) { + rc = -EINVAL; + BNX2X_ERR("TPA update: max_sges received %d, max is %d\n", + tpa_tlv->tpa_client_info.max_sges_for_packet, + U_ETH_MAX_SGES_FOR_PACKET); + } + + if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) { + rc = -EINVAL; + BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n", + tpa_tlv->tpa_client_info.max_tpa_queues, + MAX_AGG_QS(bp)); + } + + return rc; +} + +static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct bnx2x_queue_update_tpa_params vf_op_params; + struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa; + int rc = 0; + + memset(&vf_op_params, 0, sizeof(vf_op_params)); + + if (bnx2x_validate_tpa_params(bp, tpa_tlv)) + goto mbx_resp; + + vf_op_params.complete_on_both_clients = + tpa_tlv->tpa_client_info.complete_on_both_clients; + vf_op_params.dont_verify_thr = + tpa_tlv->tpa_client_info.dont_verify_thr; + vf_op_params.max_agg_sz = + tpa_tlv->tpa_client_info.max_agg_size; + vf_op_params.max_sges_pkt = + tpa_tlv->tpa_client_info.max_sges_for_packet; + vf_op_params.max_tpa_queues = + tpa_tlv->tpa_client_info.max_tpa_queues; + vf_op_params.sge_buff_sz = + tpa_tlv->tpa_client_info.sge_buff_size; + vf_op_params.sge_pause_thr_high = + tpa_tlv->tpa_client_info.sge_pause_thr_high; + vf_op_params.sge_pause_thr_low = + tpa_tlv->tpa_client_info.sge_pause_thr_low; + vf_op_params.tpa_mode = + tpa_tlv->tpa_client_info.tpa_mode; + vf_op_params.update_ipv4 = + tpa_tlv->tpa_client_info.update_ipv4; + vf_op_params.update_ipv6 = + tpa_tlv->tpa_client_info.update_ipv6; + + rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params); + +mbx_resp: + bnx2x_vf_mbx_resp(bp, vf, rc); +} + +/* dispatch request */ +static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + int i; + + /* check if tlv type is known */ + if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) { + /* Lock the per vf op mutex and note the locker's identity. + * The unlock will take place in mbx response. + */ + bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); + + /* switch on the opcode */ + switch (mbx->first_tlv.tl.type) { + case CHANNEL_TLV_ACQUIRE: + bnx2x_vf_mbx_acquire(bp, vf, mbx); + return; + case CHANNEL_TLV_INIT: + bnx2x_vf_mbx_init_vf(bp, vf, mbx); + return; + case CHANNEL_TLV_SETUP_Q: + bnx2x_vf_mbx_setup_q(bp, vf, mbx); + return; + case CHANNEL_TLV_SET_Q_FILTERS: + bnx2x_vf_mbx_set_q_filters(bp, vf, mbx); + return; + case CHANNEL_TLV_TEARDOWN_Q: + bnx2x_vf_mbx_teardown_q(bp, vf, mbx); + return; + case CHANNEL_TLV_CLOSE: + bnx2x_vf_mbx_close_vf(bp, vf, mbx); + return; + case CHANNEL_TLV_RELEASE: + bnx2x_vf_mbx_release_vf(bp, vf, mbx); + return; + case CHANNEL_TLV_UPDATE_RSS: + bnx2x_vf_mbx_update_rss(bp, vf, mbx); + return; + case CHANNEL_TLV_UPDATE_TPA: + bnx2x_vf_mbx_update_tpa(bp, vf, mbx); + return; + } + + } else { + /* unknown TLV - this may belong to a VF driver from the future + * - a version written after this PF driver was written, which + * supports features unknown as of yet. Too bad since we don't + * support them. Or this may be because someone wrote a crappy + * VF driver and is sending garbage over the channel. + */ + BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n", + mbx->first_tlv.tl.type, mbx->first_tlv.tl.length, + vf->state); + for (i = 0; i < 20; i++) + DP_CONT(BNX2X_MSG_IOV, "%x ", + mbx->msg->req.tlv_buf_size.tlv_buffer[i]); + } + + /* can we respond to VF (do we have an address for it?) */ + if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { + /* notify the VF that we do not support this request */ + bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED); + } else { + /* can't send a response since this VF is unknown to us + * just ack the FW to release the mailbox and unlock + * the channel. + */ + storm_memset_vf_mbx_ack(bp, vf->abs_vfid); + /* Firmware ack should be written before unlocking channel */ + mmiowb(); + bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type); + } +} + +void bnx2x_vf_mbx_schedule(struct bnx2x *bp, + struct vf_pf_event_data *vfpf_event) +{ + u8 vf_idx; + + DP(BNX2X_MSG_IOV, + "vf pf event received: vfid %d, address_hi %x, address lo %x", + vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo); + /* Sanity checks consider removing later */ + + /* check if the vf_id is valid */ + if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf > + BNX2X_NR_VIRTFN(bp)) { + BNX2X_ERR("Illegal vf_id %d max allowed: %d\n", + vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp)); + return; + } + + vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id); + + /* Update VFDB with current message and schedule its handling */ + mutex_lock(&BP_VFDB(bp)->event_mutex); + BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi; + BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo; + BP_VFDB(bp)->event_occur |= (1ULL << vf_idx); + mutex_unlock(&BP_VFDB(bp)->event_mutex); + + bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG); +} + +/* handle new vf-pf messages */ +void bnx2x_vf_mbx(struct bnx2x *bp) +{ + struct bnx2x_vfdb *vfdb = BP_VFDB(bp); + u64 events; + u8 vf_idx; + int rc; + + if (!vfdb) + return; + + mutex_lock(&vfdb->event_mutex); + events = vfdb->event_occur; + vfdb->event_occur = 0; + mutex_unlock(&vfdb->event_mutex); + + for_each_vf(bp, vf_idx) { + struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx); + struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); + + /* Handle VFs which have pending events */ + if (!(events & (1ULL << vf_idx))) + continue; + + DP(BNX2X_MSG_IOV, + "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n", + vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo, + mbx->first_tlv.resp_msg_offset); + + /* dmae to get the VF request */ + rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, + vf->abs_vfid, mbx->vf_addr_hi, + mbx->vf_addr_lo, + sizeof(union vfpf_tlvs)/4); + if (rc) { + BNX2X_ERR("Failed to copy request VF %d\n", + vf->abs_vfid); + bnx2x_vf_release(bp, vf); + return; + } + + /* process the VF message header */ + mbx->first_tlv = mbx->msg->req.first_tlv; + + /* Clean response buffer to refrain from falsely + * seeing chains. + */ + memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs)); + + /* dispatch the request (will prepare the response) */ + bnx2x_vf_mbx_request(bp, vf, mbx); + } +} + +void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin, + bool support_long) +{ + /* Older VFs contain a bug where they can't check CRC for bulletin + * boards of length greater than legacy size. + */ + bulletin->length = support_long ? BULLETIN_CONTENT_SIZE : + BULLETIN_CONTENT_LEGACY_SIZE; + bulletin->crc = bnx2x_crc_vf_bulletin(bulletin); +} + +/* propagate local bulletin board to vf */ +int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf) +{ + struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf); + dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping + + vf * BULLETIN_CONTENT_SIZE; + dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map); + int rc; + + /* can only update vf after init took place */ + if (bnx2x_vf(bp, vf, state) != VF_ENABLED && + bnx2x_vf(bp, vf, state) != VF_ACQUIRED) + return 0; + + /* increment bulletin board version and compute crc */ + bulletin->version++; + bnx2x_vf_bulletin_finalize(bulletin, + (bnx2x_vf(bp, vf, cfg_flags) & + VF_CFG_EXT_BULLETIN) ? true : false); + + /* propagate bulletin board via dmae to vm memory */ + rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, + bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr), + U64_LO(vf_addr), bulletin->length / 4); + return rc; +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h new file mode 100644 index 000000000..b86479fc0 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h @@ -0,0 +1,462 @@ +/* bnx2x_vfpf.h: Broadcom Everest network driver. + * + * Copyright (c) 2011-2013 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Ariel Elior + * Written by: Ariel Elior + */ +#ifndef VF_PF_IF_H +#define VF_PF_IF_H + +#ifdef CONFIG_BNX2X_SRIOV + +/* Common definitions for all HVs */ +struct vf_pf_resc_request { + u8 num_rxqs; + u8 num_txqs; + u8 num_sbs; + u8 num_mac_filters; + u8 num_vlan_filters; + u8 num_mc_filters; /* No limit so superfluous */ +}; + +struct hw_sb_info { + u8 hw_sb_id; /* aka absolute igu id, used to ack the sb */ + u8 sb_qid; /* used to update DHC for sb */ +}; + +/* HW VF-PF channel definitions + * A.K.A VF-PF mailbox + */ +#define TLV_BUFFER_SIZE 1024 +#define PF_VF_BULLETIN_SIZE 512 + +#define VFPF_QUEUE_FLG_TPA 0x0001 +#define VFPF_QUEUE_FLG_TPA_IPV6 0x0002 +#define VFPF_QUEUE_FLG_TPA_GRO 0x0004 +#define VFPF_QUEUE_FLG_CACHE_ALIGN 0x0008 +#define VFPF_QUEUE_FLG_STATS 0x0010 +#define VFPF_QUEUE_FLG_OV 0x0020 +#define VFPF_QUEUE_FLG_VLAN 0x0040 +#define VFPF_QUEUE_FLG_COS 0x0080 +#define VFPF_QUEUE_FLG_HC 0x0100 +#define VFPF_QUEUE_FLG_DHC 0x0200 +#define VFPF_QUEUE_FLG_LEADING_RSS 0x0400 + +#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0) +#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1) +#define VFPF_QUEUE_DROP_TTL0 (1 << 2) +#define VFPF_QUEUE_DROP_UDP_CS_ERR (1 << 3) + +#define VFPF_RX_MASK_ACCEPT_NONE 0x00000000 +#define VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST 0x00000001 +#define VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST 0x00000002 +#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004 +#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008 +#define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010 +#define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content)) +#define BULLETIN_CONTENT_LEGACY_SIZE (32) +#define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */ +#define BULLETIN_CRC_SEED 0 + +enum { + PFVF_STATUS_WAITING = 0, + PFVF_STATUS_SUCCESS, + PFVF_STATUS_FAILURE, + PFVF_STATUS_NOT_SUPPORTED, + PFVF_STATUS_NO_RESOURCE +}; + +/* vf pf channel tlvs */ +/* general tlv header (used for both vf->pf request and pf->vf response) */ +struct channel_tlv { + u16 type; + u16 length; +}; + +/* header of first vf->pf tlv carries the offset used to calculate response + * buffer address + */ +struct vfpf_first_tlv { + struct channel_tlv tl; + u32 resp_msg_offset; +}; + +/* header of pf->vf tlvs, carries the status of handling the request */ +struct pfvf_tlv { + struct channel_tlv tl; + u8 status; + u8 padding[3]; +}; + +/* response tlv used for most tlvs */ +struct pfvf_general_resp_tlv { + struct pfvf_tlv hdr; +}; + +/* used to terminate and pad a tlv list */ +struct channel_list_end_tlv { + struct channel_tlv tl; + u8 padding[4]; +}; + +/* Acquire */ +struct vfpf_acquire_tlv { + struct vfpf_first_tlv first_tlv; + + struct vf_pf_vfdev_info { + /* the following fields are for debug purposes */ + u8 vf_id; /* ME register value */ + u8 vf_os; /* e.g. Linux, W2K8 */ +#define VF_OS_SUBVERSION_MASK (0x1f) +#define VF_OS_MASK (0xe0) +#define VF_OS_SHIFT (5) +#define VF_OS_UNDEFINED (0 << VF_OS_SHIFT) +#define VF_OS_WINDOWS (1 << VF_OS_SHIFT) + + u8 fp_hsi_ver; + u8 caps; +#define VF_CAP_SUPPORT_EXT_BULLETIN (1 << 0) + } vfdev_info; + + struct vf_pf_resc_request resc_request; + + aligned_u64 bulletin_addr; +}; + +/* simple operation request on queue */ +struct vfpf_q_op_tlv { + struct vfpf_first_tlv first_tlv; + u8 vf_qid; + u8 padding[3]; +}; + +/* receive side scaling tlv */ +struct vfpf_rss_tlv { + struct vfpf_first_tlv first_tlv; + u32 rss_flags; +#define VFPF_RSS_MODE_DISABLED (1 << 0) +#define VFPF_RSS_MODE_REGULAR (1 << 1) +#define VFPF_RSS_SET_SRCH (1 << 2) +#define VFPF_RSS_IPV4 (1 << 3) +#define VFPF_RSS_IPV4_TCP (1 << 4) +#define VFPF_RSS_IPV4_UDP (1 << 5) +#define VFPF_RSS_IPV6 (1 << 6) +#define VFPF_RSS_IPV6_TCP (1 << 7) +#define VFPF_RSS_IPV6_UDP (1 << 8) + u8 rss_result_mask; + u8 ind_table_size; + u8 rss_key_size; + u8 padding; + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + u32 rss_key[T_ETH_RSS_KEY]; /* hash values */ +}; + +/* acquire response tlv - carries the allocated resources */ +struct pfvf_acquire_resp_tlv { + struct pfvf_tlv hdr; + struct pf_vf_pfdev_info { + u32 chip_num; + u32 pf_cap; +#define PFVF_CAP_RSS 0x00000001 +#define PFVF_CAP_DHC 0x00000002 +#define PFVF_CAP_TPA 0x00000004 +#define PFVF_CAP_TPA_UPDATE 0x00000008 + char fw_ver[32]; + u16 db_size; + u8 indices_per_sb; + u8 padding; + } pfdev_info; + struct pf_vf_resc { + /* in case of status NO_RESOURCE in message hdr, pf will fill + * this struct with suggested amount of resources for next + * acquire request + */ +#define PFVF_MAX_QUEUES_PER_VF 16 +#define PFVF_MAX_SBS_PER_VF 16 + struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF]; + u8 hw_qid[PFVF_MAX_QUEUES_PER_VF]; + u8 num_rxqs; + u8 num_txqs; + u8 num_sbs; + u8 num_mac_filters; + u8 num_vlan_filters; + u8 num_mc_filters; + u8 permanent_mac_addr[ETH_ALEN]; + u8 current_mac_addr[ETH_ALEN]; + u8 padding[2]; + } resc; +}; + +struct vfpf_port_phys_id_resp_tlv { + struct channel_tlv tl; + u8 id[ETH_ALEN]; + u8 padding[2]; +}; + +struct vfpf_fp_hsi_resp_tlv { + struct channel_tlv tl; + u8 is_supported; + u8 padding[3]; +}; + +#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues + * stats will be coalesced on + * the leading RSS queue + */ + +/* Init VF */ +struct vfpf_init_tlv { + struct vfpf_first_tlv first_tlv; + aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */ + aligned_u64 spq_addr; + aligned_u64 stats_addr; + u16 stats_stride; + u32 flags; + u32 padding[2]; +}; + +/* Setup Queue */ +struct vfpf_setup_q_tlv { + struct vfpf_first_tlv first_tlv; + + struct vf_pf_rxq_params { + /* physical addresses */ + aligned_u64 rcq_addr; + aligned_u64 rcq_np_addr; + aligned_u64 rxq_addr; + aligned_u64 sge_addr; + + /* sb + hc info */ + u8 vf_sb; /* index in hw_sbs[] */ + u8 sb_index; /* Index in the SB */ + u16 hc_rate; /* desired interrupts per sec. */ + /* valid iff VFPF_QUEUE_FLG_HC */ + /* rx buffer info */ + u16 mtu; + u16 buf_sz; + u16 flags; /* VFPF_QUEUE_FLG_X flags */ + u16 stat_id; /* valid iff VFPF_QUEUE_FLG_STATS */ + + /* valid iff VFPF_QUEUE_FLG_TPA */ + u16 sge_buf_sz; + u16 tpa_agg_sz; + u8 max_sge_pkt; + + u8 drop_flags; /* VFPF_QUEUE_DROP_X, for Linux VMs + * all the flags are turned off + */ + + u8 cache_line_log; /* VFPF_QUEUE_FLG_CACHE_ALIGN */ + u8 padding; + } rxq; + + struct vf_pf_txq_params { + /* physical addresses */ + aligned_u64 txq_addr; + + /* sb + hc info */ + u8 vf_sb; /* index in hw_sbs[] */ + u8 sb_index; /* Index in the SB */ + u16 hc_rate; /* desired interrupts per sec. */ + /* valid iff VFPF_QUEUE_FLG_HC */ + u32 flags; /* VFPF_QUEUE_FLG_X flags */ + u16 stat_id; /* valid iff VFPF_QUEUE_FLG_STATS */ + u8 traffic_type; /* see in setup_context() */ + u8 padding; + } txq; + + u8 vf_qid; /* index in hw_qid[] */ + u8 param_valid; +#define VFPF_RXQ_VALID 0x01 +#define VFPF_TXQ_VALID 0x02 + u8 padding[2]; +}; + +/* Set Queue Filters */ +struct vfpf_q_mac_vlan_filter { + u32 flags; +#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01 +#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02 +#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */ + u8 mac[ETH_ALEN]; + u16 vlan_tag; +}; + +/* configure queue filters */ +struct vfpf_set_q_filters_tlv { + struct vfpf_first_tlv first_tlv; + + u32 flags; +#define VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED 0x01 +#define VFPF_SET_Q_FILTERS_MULTICAST_CHANGED 0x02 +#define VFPF_SET_Q_FILTERS_RX_MASK_CHANGED 0x04 + + u8 vf_qid; /* index in hw_qid[] */ + u8 n_mac_vlan_filters; + u8 n_multicast; + u8 padding; + +#define PFVF_MAX_MAC_FILTERS 16 +#define PFVF_MAX_VLAN_FILTERS 16 +#define PFVF_MAX_FILTERS (PFVF_MAX_MAC_FILTERS +\ + PFVF_MAX_VLAN_FILTERS) + struct vfpf_q_mac_vlan_filter filters[PFVF_MAX_FILTERS]; + +#define PFVF_MAX_MULTICAST_PER_VF 32 + u8 multicast[PFVF_MAX_MULTICAST_PER_VF][ETH_ALEN]; + + u32 rx_mask; /* see mask constants at the top of the file */ +}; + +struct vfpf_tpa_tlv { + struct vfpf_first_tlv first_tlv; + + struct vf_pf_tpa_client_info { + aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF]; + u8 update_ipv4; + u8 update_ipv6; + u8 max_tpa_queues; + u8 max_sges_for_packet; + u8 complete_on_both_clients; + u8 dont_verify_thr; + u8 tpa_mode; + u16 sge_buff_size; + u16 max_agg_size; + u16 sge_pause_thr_low; + u16 sge_pause_thr_high; + } tpa_client_info; +}; + +/* close VF (disable VF) */ +struct vfpf_close_tlv { + struct vfpf_first_tlv first_tlv; + u16 vf_id; /* for debug */ + u8 padding[2]; +}; + +/* release the VF's acquired resources */ +struct vfpf_release_tlv { + struct vfpf_first_tlv first_tlv; + u16 vf_id; + u8 padding[2]; +}; + +struct tlv_buffer_size { + u8 tlv_buffer[TLV_BUFFER_SIZE]; +}; + +union vfpf_tlvs { + struct vfpf_first_tlv first_tlv; + struct vfpf_acquire_tlv acquire; + struct vfpf_init_tlv init; + struct vfpf_close_tlv close; + struct vfpf_q_op_tlv q_op; + struct vfpf_setup_q_tlv setup_q; + struct vfpf_set_q_filters_tlv set_q_filters; + struct vfpf_release_tlv release; + struct vfpf_rss_tlv update_rss; + struct vfpf_tpa_tlv update_tpa; + struct channel_list_end_tlv list_end; + struct tlv_buffer_size tlv_buf_size; +}; + +union pfvf_tlvs { + struct pfvf_general_resp_tlv general_resp; + struct pfvf_acquire_resp_tlv acquire_resp; + struct channel_list_end_tlv list_end; + struct tlv_buffer_size tlv_buf_size; +}; + +/* This is a structure which is allocated in the VF, which the PF may update + * when it deems it necessary to do so. The bulletin board is sampled + * periodically by the VF. A copy per VF is maintained in the PF (to prevent + * loss of data upon multiple updates (or the need for read modify write)). + */ +struct pf_vf_bulletin_size { + u8 size[PF_VF_BULLETIN_SIZE]; +}; + +struct pf_vf_bulletin_content { + u32 crc; /* crc of structure to ensure is not in + * mid-update + */ + u16 version; + u16 length; + + aligned_u64 valid_bitmap; /* bitmap indicating which fields + * hold valid values + */ + +#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address + * is available for it + */ +#define VLAN_VALID 1 /* when set, the vf should not access + * the vfpf channel + */ +#define CHANNEL_DOWN 2 /* vfpf channel is disabled. VFs are not + * to attempt to send messages on the + * channel after this bit is set + */ +#define LINK_VALID 3 /* alert the VF thet a new link status + * update is available for it + */ + u8 mac[ETH_ALEN]; + u8 mac_padding[2]; + + u16 vlan; + u8 vlan_padding[6]; + + u16 link_speed; /* Effective line speed */ + u8 link_speed_padding[6]; + u32 link_flags; /* VFPF_LINK_REPORT_XXX flags */ +#define VFPF_LINK_REPORT_LINK_DOWN (1 << 0) +#define VFPF_LINK_REPORT_FULL_DUPLEX (1 << 1) +#define VFPF_LINK_REPORT_RX_FC_ON (1 << 2) +#define VFPF_LINK_REPORT_TX_FC_ON (1 << 3) + u8 link_flags_padding[4]; +}; + +union pf_vf_bulletin { + struct pf_vf_bulletin_content content; + struct pf_vf_bulletin_size size; +}; + +#define MAX_TLVS_IN_LIST 50 + +enum channel_tlvs { + CHANNEL_TLV_NONE, + CHANNEL_TLV_ACQUIRE, + CHANNEL_TLV_INIT, + CHANNEL_TLV_SETUP_Q, + CHANNEL_TLV_SET_Q_FILTERS, + CHANNEL_TLV_ACTIVATE_Q, + CHANNEL_TLV_DEACTIVATE_Q, + CHANNEL_TLV_TEARDOWN_Q, + CHANNEL_TLV_CLOSE, + CHANNEL_TLV_RELEASE, + CHANNEL_TLV_UPDATE_RSS_DEPRECATED, + CHANNEL_TLV_PF_RELEASE_VF, + CHANNEL_TLV_LIST_END, + CHANNEL_TLV_FLR, + CHANNEL_TLV_PF_SET_MAC, + CHANNEL_TLV_PF_SET_VLAN, + CHANNEL_TLV_UPDATE_RSS, + CHANNEL_TLV_PHYS_PORT_ID, + CHANNEL_TLV_UPDATE_TPA, + CHANNEL_TLV_FP_HSI_SUPPORT, + CHANNEL_TLV_MAX +}; + +#endif /* CONFIG_BNX2X_SRIOV */ +#endif /* VF_PF_IF_H */ diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c new file mode 100644 index 000000000..17c145fdf --- /dev/null +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -0,0 +1,5759 @@ +/* cnic.c: QLogic CNIC core network driver. + * + * Copyright (c) 2006-2014 Broadcom Corporation + * Copyright (c) 2014-2015 QLogic Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com) + * Previously modified and maintained by: Michael Chan + * Maintained By: Dept-HSGLinuxNICDev@qlogic.com + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if IS_ENABLED(CONFIG_VLAN_8021Q) +#define BCM_VLAN 1 +#endif +#include +#include +#include +#include +#include +#include +#include + +#define BCM_CNIC 1 +#include "cnic_if.h" +#include "bnx2.h" +#include "bnx2x/bnx2x.h" +#include "bnx2x/bnx2x_reg.h" +#include "bnx2x/bnx2x_fw_defs.h" +#include "bnx2x/bnx2x_hsi.h" +#include "../../../scsi/bnx2i/57xx_iscsi_constants.h" +#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h" +#include "../../../scsi/bnx2fc/bnx2fc_constants.h" +#include "cnic.h" +#include "cnic_defs.h" + +#define CNIC_MODULE_NAME "cnic" + +static char version[] = + "QLogic " CNIC_MODULE_NAME "Driver v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("Michael Chan and John(Zongxi) " + "Chen (zongxi@broadcom.com"); +MODULE_DESCRIPTION("QLogic cnic Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(CNIC_MODULE_VERSION); + +/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */ +static LIST_HEAD(cnic_dev_list); +static LIST_HEAD(cnic_udev_list); +static DEFINE_RWLOCK(cnic_dev_lock); +static DEFINE_MUTEX(cnic_lock); + +static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; + +/* helper function, assuming cnic_lock is held */ +static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type) +{ + return rcu_dereference_protected(cnic_ulp_tbl[type], + lockdep_is_held(&cnic_lock)); +} + +static int cnic_service_bnx2(void *, void *); +static int cnic_service_bnx2x(void *, void *); +static int cnic_ctl(void *, struct cnic_ctl_info *); + +static struct cnic_ops cnic_bnx2_ops = { + .cnic_owner = THIS_MODULE, + .cnic_handler = cnic_service_bnx2, + .cnic_ctl = cnic_ctl, +}; + +static struct cnic_ops cnic_bnx2x_ops = { + .cnic_owner = THIS_MODULE, + .cnic_handler = cnic_service_bnx2x, + .cnic_ctl = cnic_ctl, +}; + +static struct workqueue_struct *cnic_wq; + +static void cnic_shutdown_rings(struct cnic_dev *); +static void cnic_init_rings(struct cnic_dev *); +static int cnic_cm_set_pg(struct cnic_sock *); + +static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) +{ + struct cnic_uio_dev *udev = uinfo->priv; + struct cnic_dev *dev; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (udev->uio_dev != -1) + return -EBUSY; + + rtnl_lock(); + dev = udev->dev; + + if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) { + rtnl_unlock(); + return -ENODEV; + } + + udev->uio_dev = iminor(inode); + + cnic_shutdown_rings(dev); + cnic_init_rings(dev); + rtnl_unlock(); + + return 0; +} + +static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) +{ + struct cnic_uio_dev *udev = uinfo->priv; + + udev->uio_dev = -1; + return 0; +} + +static inline void cnic_hold(struct cnic_dev *dev) +{ + atomic_inc(&dev->ref_count); +} + +static inline void cnic_put(struct cnic_dev *dev) +{ + atomic_dec(&dev->ref_count); +} + +static inline void csk_hold(struct cnic_sock *csk) +{ + atomic_inc(&csk->ref_count); +} + +static inline void csk_put(struct cnic_sock *csk) +{ + atomic_dec(&csk->ref_count); +} + +static struct cnic_dev *cnic_from_netdev(struct net_device *netdev) +{ + struct cnic_dev *cdev; + + read_lock(&cnic_dev_lock); + list_for_each_entry(cdev, &cnic_dev_list, list) { + if (netdev == cdev->netdev) { + cnic_hold(cdev); + read_unlock(&cnic_dev_lock); + return cdev; + } + } + read_unlock(&cnic_dev_lock); + return NULL; +} + +static inline void ulp_get(struct cnic_ulp_ops *ulp_ops) +{ + atomic_inc(&ulp_ops->ref_count); +} + +static inline void ulp_put(struct cnic_ulp_ops *ulp_ops) +{ + atomic_dec(&ulp_ops->ref_count); +} + +static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + struct drv_ctl_io *io = &info.data.io; + + info.cmd = DRV_CTL_CTX_WR_CMD; + io->cid_addr = cid_addr; + io->offset = off; + io->data = val; + ethdev->drv_ctl(dev->netdev, &info); +} + +static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + struct drv_ctl_io *io = &info.data.io; + + info.cmd = DRV_CTL_CTXTBL_WR_CMD; + io->offset = off; + io->dma_addr = addr; + ethdev->drv_ctl(dev->netdev, &info); +} + +static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + struct drv_ctl_l2_ring *ring = &info.data.ring; + + if (start) + info.cmd = DRV_CTL_START_L2_CMD; + else + info.cmd = DRV_CTL_STOP_L2_CMD; + + ring->cid = cid; + ring->client_id = cl_id; + ethdev->drv_ctl(dev->netdev, &info); +} + +static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + struct drv_ctl_io *io = &info.data.io; + + info.cmd = DRV_CTL_IO_WR_CMD; + io->offset = off; + io->data = val; + ethdev->drv_ctl(dev->netdev, &info); +} + +static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + struct drv_ctl_io *io = &info.data.io; + + info.cmd = DRV_CTL_IO_RD_CMD; + io->offset = off; + ethdev->drv_ctl(dev->netdev, &info); + return io->data; +} + +static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + struct fcoe_capabilities *fcoe_cap = + &info.data.register_data.fcoe_features; + + if (reg) { + info.cmd = DRV_CTL_ULP_REGISTER_CMD; + if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap) + memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap)); + } else { + info.cmd = DRV_CTL_ULP_UNREGISTER_CMD; + } + + info.data.ulp_type = ulp_type; + ethdev->drv_ctl(dev->netdev, &info); +} + +static int cnic_in_use(struct cnic_sock *csk) +{ + return test_bit(SK_F_INUSE, &csk->flags); +} + +static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct drv_ctl_info info; + + info.cmd = cmd; + info.data.credit.credit_count = count; + ethdev->drv_ctl(dev->netdev, &info); +} + +static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid) +{ + u32 i; + + if (!cp->ctx_tbl) + return -EINVAL; + + for (i = 0; i < cp->max_cid_space; i++) { + if (cp->ctx_tbl[i].cid == cid) { + *l5_cid = i; + return 0; + } + } + return -EINVAL; +} + +static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, + struct cnic_sock *csk) +{ + struct iscsi_path path_req; + char *buf = NULL; + u16 len = 0; + u32 msg_type = ISCSI_KEVENT_IF_DOWN; + struct cnic_ulp_ops *ulp_ops; + struct cnic_uio_dev *udev = cp->udev; + int rc = 0, retry = 0; + + if (!udev || udev->uio_dev == -1) + return -ENODEV; + + if (csk) { + len = sizeof(path_req); + buf = (char *) &path_req; + memset(&path_req, 0, len); + + msg_type = ISCSI_KEVENT_PATH_REQ; + path_req.handle = (u64) csk->l5_cid; + if (test_bit(SK_F_IPV6, &csk->flags)) { + memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0], + sizeof(struct in6_addr)); + path_req.ip_addr_len = 16; + } else { + memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0], + sizeof(struct in_addr)); + path_req.ip_addr_len = 4; + } + path_req.vlan_id = csk->vlan_id; + path_req.pmtu = csk->mtu; + } + + while (retry < 3) { + rc = 0; + rcu_read_lock(); + ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]); + if (ulp_ops) + rc = ulp_ops->iscsi_nl_send_msg( + cp->ulp_handle[CNIC_ULP_ISCSI], + msg_type, buf, len); + rcu_read_unlock(); + if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ) + break; + + msleep(100); + retry++; + } + return rc; +} + +static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8); + +static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, + char *buf, u16 len) +{ + int rc = -EINVAL; + + switch (msg_type) { + case ISCSI_UEVENT_PATH_UPDATE: { + struct cnic_local *cp; + u32 l5_cid; + struct cnic_sock *csk; + struct iscsi_path *path_resp; + + if (len < sizeof(*path_resp)) + break; + + path_resp = (struct iscsi_path *) buf; + cp = dev->cnic_priv; + l5_cid = (u32) path_resp->handle; + if (l5_cid >= MAX_CM_SK_TBL_SZ) + break; + + if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) { + rc = -ENODEV; + break; + } + csk = &cp->csk_tbl[l5_cid]; + csk_hold(csk); + if (cnic_in_use(csk) && + test_bit(SK_F_CONNECT_START, &csk->flags)) { + + csk->vlan_id = path_resp->vlan_id; + + memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN); + if (test_bit(SK_F_IPV6, &csk->flags)) + memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, + sizeof(struct in6_addr)); + else + memcpy(&csk->src_ip[0], &path_resp->src.v4_addr, + sizeof(struct in_addr)); + + if (is_valid_ether_addr(csk->ha)) { + cnic_cm_set_pg(csk); + } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) && + !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { + + cnic_cm_upcall(cp, csk, + L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); + clear_bit(SK_F_CONNECT_START, &csk->flags); + } + } + csk_put(csk); + rc = 0; + } + } + + return rc; +} + +static int cnic_offld_prep(struct cnic_sock *csk) +{ + if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) + return 0; + + if (!test_bit(SK_F_CONNECT_START, &csk->flags)) { + clear_bit(SK_F_OFFLD_SCHED, &csk->flags); + return 0; + } + + return 1; +} + +static int cnic_close_prep(struct cnic_sock *csk) +{ + clear_bit(SK_F_CONNECT_START, &csk->flags); + smp_mb__after_atomic(); + + if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { + while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) + msleep(1); + + return 1; + } + return 0; +} + +static int cnic_abort_prep(struct cnic_sock *csk) +{ + clear_bit(SK_F_CONNECT_START, &csk->flags); + smp_mb__after_atomic(); + + while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) + msleep(1); + + if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { + csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; + return 1; + } + + return 0; +} + +int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) +{ + struct cnic_dev *dev; + + if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { + pr_err("%s: Bad type %d\n", __func__, ulp_type); + return -EINVAL; + } + mutex_lock(&cnic_lock); + if (cnic_ulp_tbl_prot(ulp_type)) { + pr_err("%s: Type %d has already been registered\n", + __func__, ulp_type); + mutex_unlock(&cnic_lock); + return -EBUSY; + } + + read_lock(&cnic_dev_lock); + list_for_each_entry(dev, &cnic_dev_list, list) { + struct cnic_local *cp = dev->cnic_priv; + + clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]); + } + read_unlock(&cnic_dev_lock); + + atomic_set(&ulp_ops->ref_count, 0); + rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); + mutex_unlock(&cnic_lock); + + /* Prevent race conditions with netdev_event */ + rtnl_lock(); + list_for_each_entry(dev, &cnic_dev_list, list) { + struct cnic_local *cp = dev->cnic_priv; + + if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type])) + ulp_ops->cnic_init(dev); + } + rtnl_unlock(); + + return 0; +} + +int cnic_unregister_driver(int ulp_type) +{ + struct cnic_dev *dev; + struct cnic_ulp_ops *ulp_ops; + int i = 0; + + if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { + pr_err("%s: Bad type %d\n", __func__, ulp_type); + return -EINVAL; + } + mutex_lock(&cnic_lock); + ulp_ops = cnic_ulp_tbl_prot(ulp_type); + if (!ulp_ops) { + pr_err("%s: Type %d has not been registered\n", + __func__, ulp_type); + goto out_unlock; + } + read_lock(&cnic_dev_lock); + list_for_each_entry(dev, &cnic_dev_list, list) { + struct cnic_local *cp = dev->cnic_priv; + + if (rcu_access_pointer(cp->ulp_ops[ulp_type])) { + pr_err("%s: Type %d still has devices registered\n", + __func__, ulp_type); + read_unlock(&cnic_dev_lock); + goto out_unlock; + } + } + read_unlock(&cnic_dev_lock); + + RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL); + + mutex_unlock(&cnic_lock); + synchronize_rcu(); + while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) { + msleep(100); + i++; + } + + if (atomic_read(&ulp_ops->ref_count) != 0) + pr_warn("%s: Failed waiting for ref count to go to zero\n", + __func__); + return 0; + +out_unlock: + mutex_unlock(&cnic_lock); + return -EINVAL; +} + +static int cnic_start_hw(struct cnic_dev *); +static void cnic_stop_hw(struct cnic_dev *); + +static int cnic_register_device(struct cnic_dev *dev, int ulp_type, + void *ulp_ctx) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_ulp_ops *ulp_ops; + + if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { + pr_err("%s: Bad type %d\n", __func__, ulp_type); + return -EINVAL; + } + mutex_lock(&cnic_lock); + if (cnic_ulp_tbl_prot(ulp_type) == NULL) { + pr_err("%s: Driver with type %d has not been registered\n", + __func__, ulp_type); + mutex_unlock(&cnic_lock); + return -EAGAIN; + } + if (rcu_access_pointer(cp->ulp_ops[ulp_type])) { + pr_err("%s: Type %d has already been registered to this device\n", + __func__, ulp_type); + mutex_unlock(&cnic_lock); + return -EBUSY; + } + + clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); + cp->ulp_handle[ulp_type] = ulp_ctx; + ulp_ops = cnic_ulp_tbl_prot(ulp_type); + rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); + cnic_hold(dev); + + if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) + if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type])) + ulp_ops->cnic_start(cp->ulp_handle[ulp_type]); + + mutex_unlock(&cnic_lock); + + cnic_ulp_ctl(dev, ulp_type, true); + + return 0; + +} +EXPORT_SYMBOL(cnic_register_driver); + +static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) +{ + struct cnic_local *cp = dev->cnic_priv; + int i = 0; + + if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { + pr_err("%s: Bad type %d\n", __func__, ulp_type); + return -EINVAL; + } + + if (ulp_type == CNIC_ULP_ISCSI) + cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); + + mutex_lock(&cnic_lock); + if (rcu_access_pointer(cp->ulp_ops[ulp_type])) { + RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); + cnic_put(dev); + } else { + pr_err("%s: device not registered to this ulp type %d\n", + __func__, ulp_type); + mutex_unlock(&cnic_lock); + return -EINVAL; + } + mutex_unlock(&cnic_lock); + + if (ulp_type == CNIC_ULP_FCOE) + dev->fcoe_cap = NULL; + + synchronize_rcu(); + + while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) && + i < 20) { + msleep(100); + i++; + } + if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type])) + netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n"); + + cnic_ulp_ctl(dev, ulp_type, false); + + return 0; +} +EXPORT_SYMBOL(cnic_unregister_driver); + +static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id, + u32 next) +{ + id_tbl->start = start_id; + id_tbl->max = size; + id_tbl->next = next; + spin_lock_init(&id_tbl->lock); + id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL); + if (!id_tbl->table) + return -ENOMEM; + + return 0; +} + +static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl) +{ + kfree(id_tbl->table); + id_tbl->table = NULL; +} + +static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id) +{ + int ret = -1; + + id -= id_tbl->start; + if (id >= id_tbl->max) + return ret; + + spin_lock(&id_tbl->lock); + if (!test_bit(id, id_tbl->table)) { + set_bit(id, id_tbl->table); + ret = 0; + } + spin_unlock(&id_tbl->lock); + return ret; +} + +/* Returns -1 if not successful */ +static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl) +{ + u32 id; + + spin_lock(&id_tbl->lock); + id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); + if (id >= id_tbl->max) { + id = -1; + if (id_tbl->next != 0) { + id = find_first_zero_bit(id_tbl->table, id_tbl->next); + if (id >= id_tbl->next) + id = -1; + } + } + + if (id < id_tbl->max) { + set_bit(id, id_tbl->table); + id_tbl->next = (id + 1) & (id_tbl->max - 1); + id += id_tbl->start; + } + + spin_unlock(&id_tbl->lock); + + return id; +} + +static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id) +{ + if (id == -1) + return; + + id -= id_tbl->start; + if (id >= id_tbl->max) + return; + + clear_bit(id, id_tbl->table); +} + +static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) +{ + int i; + + if (!dma->pg_arr) + return; + + for (i = 0; i < dma->num_pages; i++) { + if (dma->pg_arr[i]) { + dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE, + dma->pg_arr[i], dma->pg_map_arr[i]); + dma->pg_arr[i] = NULL; + } + } + if (dma->pgtbl) { + dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size, + dma->pgtbl, dma->pgtbl_map); + dma->pgtbl = NULL; + } + kfree(dma->pg_arr); + dma->pg_arr = NULL; + dma->num_pages = 0; +} + +static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) +{ + int i; + __le32 *page_table = (__le32 *) dma->pgtbl; + + for (i = 0; i < dma->num_pages; i++) { + /* Each entry needs to be in big endian format. */ + *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); + page_table++; + *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); + page_table++; + } +} + +static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) +{ + int i; + __le32 *page_table = (__le32 *) dma->pgtbl; + + for (i = 0; i < dma->num_pages; i++) { + /* Each entry needs to be in little endian format. */ + *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); + page_table++; + *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); + page_table++; + } +} + +static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, + int pages, int use_pg_tbl) +{ + int i, size; + struct cnic_local *cp = dev->cnic_priv; + + size = pages * (sizeof(void *) + sizeof(dma_addr_t)); + dma->pg_arr = kzalloc(size, GFP_ATOMIC); + if (dma->pg_arr == NULL) + return -ENOMEM; + + dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages); + dma->num_pages = pages; + + for (i = 0; i < pages; i++) { + dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, + CNIC_PAGE_SIZE, + &dma->pg_map_arr[i], + GFP_ATOMIC); + if (dma->pg_arr[i] == NULL) + goto error; + } + if (!use_pg_tbl) + return 0; + + dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) & + ~(CNIC_PAGE_SIZE - 1); + dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, + &dma->pgtbl_map, GFP_ATOMIC); + if (dma->pgtbl == NULL) + goto error; + + cp->setup_pgtbl(dev, dma); + + return 0; + +error: + cnic_free_dma(dev, dma); + return -ENOMEM; +} + +static void cnic_free_context(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int i; + + for (i = 0; i < cp->ctx_blks; i++) { + if (cp->ctx_arr[i].ctx) { + dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size, + cp->ctx_arr[i].ctx, + cp->ctx_arr[i].mapping); + cp->ctx_arr[i].ctx = NULL; + } + } +} + +static void __cnic_free_uio_rings(struct cnic_uio_dev *udev) +{ + if (udev->l2_buf) { + dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size, + udev->l2_buf, udev->l2_buf_map); + udev->l2_buf = NULL; + } + + if (udev->l2_ring) { + dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, + udev->l2_ring, udev->l2_ring_map); + udev->l2_ring = NULL; + } + +} + +static void __cnic_free_uio(struct cnic_uio_dev *udev) +{ + uio_unregister_device(&udev->cnic_uinfo); + + __cnic_free_uio_rings(udev); + + pci_dev_put(udev->pdev); + kfree(udev); +} + +static void cnic_free_uio(struct cnic_uio_dev *udev) +{ + if (!udev) + return; + + write_lock(&cnic_dev_lock); + list_del_init(&udev->list); + write_unlock(&cnic_dev_lock); + __cnic_free_uio(udev); +} + +static void cnic_free_resc(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_uio_dev *udev = cp->udev; + + if (udev) { + udev->dev = NULL; + cp->udev = NULL; + if (udev->uio_dev == -1) + __cnic_free_uio_rings(udev); + } + + cnic_free_context(dev); + kfree(cp->ctx_arr); + cp->ctx_arr = NULL; + cp->ctx_blks = 0; + + cnic_free_dma(dev, &cp->gbl_buf_info); + cnic_free_dma(dev, &cp->kwq_info); + cnic_free_dma(dev, &cp->kwq_16_data_info); + cnic_free_dma(dev, &cp->kcq2.dma); + cnic_free_dma(dev, &cp->kcq1.dma); + kfree(cp->iscsi_tbl); + cp->iscsi_tbl = NULL; + kfree(cp->ctx_tbl); + cp->ctx_tbl = NULL; + + cnic_free_id_tbl(&cp->fcoe_cid_tbl); + cnic_free_id_tbl(&cp->cid_tbl); +} + +static int cnic_alloc_context(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + + if (BNX2_CHIP(cp) == BNX2_CHIP_5709) { + int i, k, arr_size; + + cp->ctx_blk_size = CNIC_PAGE_SIZE; + cp->cids_per_blk = CNIC_PAGE_SIZE / 128; + arr_size = BNX2_MAX_CID / cp->cids_per_blk * + sizeof(struct cnic_ctx); + cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); + if (cp->ctx_arr == NULL) + return -ENOMEM; + + k = 0; + for (i = 0; i < 2; i++) { + u32 j, reg, off, lo, hi; + + if (i == 0) + off = BNX2_PG_CTX_MAP; + else + off = BNX2_ISCSI_CTX_MAP; + + reg = cnic_reg_rd_ind(dev, off); + lo = reg >> 16; + hi = reg & 0xffff; + for (j = lo; j < hi; j += cp->cids_per_blk, k++) + cp->ctx_arr[k].cid = j; + } + + cp->ctx_blks = k; + if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) { + cp->ctx_blks = 0; + return -ENOMEM; + } + + for (i = 0; i < cp->ctx_blks; i++) { + cp->ctx_arr[i].ctx = + dma_alloc_coherent(&dev->pcidev->dev, + CNIC_PAGE_SIZE, + &cp->ctx_arr[i].mapping, + GFP_KERNEL); + if (cp->ctx_arr[i].ctx == NULL) + return -ENOMEM; + } + } + return 0; +} + +static u16 cnic_bnx2_next_idx(u16 idx) +{ + return idx + 1; +} + +static u16 cnic_bnx2_hw_idx(u16 idx) +{ + return idx; +} + +static u16 cnic_bnx2x_next_idx(u16 idx) +{ + idx++; + if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) + idx++; + + return idx; +} + +static u16 cnic_bnx2x_hw_idx(u16 idx) +{ + if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) + idx++; + return idx; +} + +static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info, + bool use_pg_tbl) +{ + int err, i, use_page_tbl = 0; + struct kcqe **kcq; + + if (use_pg_tbl) + use_page_tbl = 1; + + err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl); + if (err) + return err; + + kcq = (struct kcqe **) info->dma.pg_arr; + info->kcq = kcq; + + info->next_idx = cnic_bnx2_next_idx; + info->hw_idx = cnic_bnx2_hw_idx; + if (use_pg_tbl) + return 0; + + info->next_idx = cnic_bnx2x_next_idx; + info->hw_idx = cnic_bnx2x_hw_idx; + + for (i = 0; i < KCQ_PAGE_CNT; i++) { + struct bnx2x_bd_chain_next *next = + (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT]; + int j = i + 1; + + if (j >= KCQ_PAGE_CNT) + j = 0; + next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32; + next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff; + } + return 0; +} + +static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages) +{ + struct cnic_local *cp = udev->dev->cnic_priv; + + if (udev->l2_ring) + return 0; + + udev->l2_ring_size = pages * CNIC_PAGE_SIZE; + udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, + &udev->l2_ring_map, + GFP_KERNEL | __GFP_COMP); + if (!udev->l2_ring) + return -ENOMEM; + + udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; + udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size); + udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, + &udev->l2_buf_map, + GFP_KERNEL | __GFP_COMP); + if (!udev->l2_buf) { + __cnic_free_uio_rings(udev); + return -ENOMEM; + } + + return 0; + +} + +static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_uio_dev *udev; + + list_for_each_entry(udev, &cnic_udev_list, list) { + if (udev->pdev == dev->pcidev) { + udev->dev = dev; + if (__cnic_alloc_uio_rings(udev, pages)) { + udev->dev = NULL; + return -ENOMEM; + } + cp->udev = udev; + return 0; + } + } + + udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC); + if (!udev) + return -ENOMEM; + + udev->uio_dev = -1; + + udev->dev = dev; + udev->pdev = dev->pcidev; + + if (__cnic_alloc_uio_rings(udev, pages)) + goto err_udev; + + list_add(&udev->list, &cnic_udev_list); + + pci_dev_get(udev->pdev); + + cp->udev = udev; + + return 0; + + err_udev: + kfree(udev); + return -ENOMEM; +} + +static int cnic_init_uio(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_uio_dev *udev = cp->udev; + struct uio_info *uinfo; + int ret = 0; + + if (!udev) + return -ENOMEM; + + uinfo = &udev->cnic_uinfo; + + uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0); + uinfo->mem[0].internal_addr = dev->regview; + uinfo->mem[0].memtype = UIO_MEM_PHYS; + + if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { + uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID + + TX_MAX_TSS_RINGS + 1); + uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & + CNIC_PAGE_MASK; + if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) + uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; + else + uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; + + uinfo->name = "bnx2_cnic"; + } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { + uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0); + + uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & + CNIC_PAGE_MASK; + uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); + + uinfo->name = "bnx2x_cnic"; + } + + uinfo->mem[1].memtype = UIO_MEM_LOGICAL; + + uinfo->mem[2].addr = (unsigned long) udev->l2_ring; + uinfo->mem[2].size = udev->l2_ring_size; + uinfo->mem[2].memtype = UIO_MEM_LOGICAL; + + uinfo->mem[3].addr = (unsigned long) udev->l2_buf; + uinfo->mem[3].size = udev->l2_buf_size; + uinfo->mem[3].memtype = UIO_MEM_LOGICAL; + + uinfo->version = CNIC_MODULE_VERSION; + uinfo->irq = UIO_IRQ_CUSTOM; + + uinfo->open = cnic_uio_open; + uinfo->release = cnic_uio_close; + + if (udev->uio_dev == -1) { + if (!uinfo->priv) { + uinfo->priv = udev; + + ret = uio_register_device(&udev->pdev->dev, uinfo); + } + } else { + cnic_init_rings(dev); + } + + return ret; +} + +static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int ret; + + ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1); + if (ret) + goto error; + cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr; + + ret = cnic_alloc_kcq(dev, &cp->kcq1, true); + if (ret) + goto error; + + ret = cnic_alloc_context(dev); + if (ret) + goto error; + + ret = cnic_alloc_uio_rings(dev, 2); + if (ret) + goto error; + + ret = cnic_init_uio(dev); + if (ret) + goto error; + + return 0; + +error: + cnic_free_resc(dev); + return ret; +} + +static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + int ctx_blk_size = cp->ethdev->ctx_blk_size; + int total_mem, blks, i; + + total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space; + blks = total_mem / ctx_blk_size; + if (total_mem % ctx_blk_size) + blks++; + + if (blks > cp->ethdev->ctx_tbl_len) + return -ENOMEM; + + cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL); + if (cp->ctx_arr == NULL) + return -ENOMEM; + + cp->ctx_blks = blks; + cp->ctx_blk_size = ctx_blk_size; + if (!CHIP_IS_E1(bp)) + cp->ctx_align = 0; + else + cp->ctx_align = ctx_blk_size; + + cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE; + + for (i = 0; i < blks; i++) { + cp->ctx_arr[i].ctx = + dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size, + &cp->ctx_arr[i].mapping, + GFP_KERNEL); + if (cp->ctx_arr[i].ctx == NULL) + return -ENOMEM; + + if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) { + if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) { + cnic_free_context(dev); + cp->ctx_blk_size += cp->ctx_align; + i = -1; + continue; + } + } + } + return 0; +} + +static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + struct cnic_eth_dev *ethdev = cp->ethdev; + u32 start_cid = ethdev->starting_cid; + int i, j, n, ret, pages; + struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; + + cp->max_cid_space = MAX_ISCSI_TBL_SZ; + cp->iscsi_start_cid = start_cid; + cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; + + if (BNX2X_CHIP_IS_E2_PLUS(bp)) { + cp->max_cid_space += dev->max_fcoe_conn; + cp->fcoe_init_cid = ethdev->fcoe_init_cid; + if (!cp->fcoe_init_cid) + cp->fcoe_init_cid = 0x10; + } + + cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ, + GFP_KERNEL); + if (!cp->iscsi_tbl) + goto error; + + cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) * + cp->max_cid_space, GFP_KERNEL); + if (!cp->ctx_tbl) + goto error; + + for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { + cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i]; + cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; + } + + for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) + cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; + + pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / + CNIC_PAGE_SIZE; + + ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); + if (ret) + return -ENOMEM; + + n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; + for (i = 0, j = 0; i < cp->max_cid_space; i++) { + long off = CNIC_KWQ16_DATA_SIZE * (i % n); + + cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off; + cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] + + off; + + if ((i % n) == (n - 1)) + j++; + } + + ret = cnic_alloc_kcq(dev, &cp->kcq1, false); + if (ret) + goto error; + + if (CNIC_SUPPORTS_FCOE(bp)) { + ret = cnic_alloc_kcq(dev, &cp->kcq2, true); + if (ret) + goto error; + } + + pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE; + ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); + if (ret) + goto error; + + ret = cnic_alloc_bnx2x_context(dev); + if (ret) + goto error; + + if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI) + return 0; + + cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; + + cp->l2_rx_ring_size = 15; + + ret = cnic_alloc_uio_rings(dev, 4); + if (ret) + goto error; + + ret = cnic_init_uio(dev); + if (ret) + goto error; + + return 0; + +error: + cnic_free_resc(dev); + return -ENOMEM; +} + +static inline u32 cnic_kwq_avail(struct cnic_local *cp) +{ + return cp->max_kwq_idx - + ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx); +} + +static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num_wqes) +{ + struct cnic_local *cp = dev->cnic_priv; + struct kwqe *prod_qe; + u16 prod, sw_prod, i; + + if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) + return -EAGAIN; /* bnx2 is down */ + + spin_lock_bh(&cp->cnic_ulp_lock); + if (num_wqes > cnic_kwq_avail(cp) && + !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) { + spin_unlock_bh(&cp->cnic_ulp_lock); + return -EAGAIN; + } + + clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); + + prod = cp->kwq_prod_idx; + sw_prod = prod & MAX_KWQ_IDX; + for (i = 0; i < num_wqes; i++) { + prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)]; + memcpy(prod_qe, wqes[i], sizeof(struct kwqe)); + prod++; + sw_prod = prod & MAX_KWQ_IDX; + } + cp->kwq_prod_idx = prod; + + CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx); + + spin_unlock_bh(&cp->cnic_ulp_lock); + return 0; +} + +static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid, + union l5cm_specific_data *l5_data) +{ + struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; + dma_addr_t map; + + map = ctx->kwqe_data_mapping; + l5_data->phy_address.lo = (u64) map & 0xffffffff; + l5_data->phy_address.hi = (u64) map >> 32; + return ctx->kwqe_data; +} + +static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, + u32 type, union l5cm_specific_data *l5_data) +{ + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + struct l5cm_spe kwqe; + struct kwqe_16 *kwq[1]; + u16 type_16; + int ret; + + kwqe.hdr.conn_and_cmd_data = + cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | + BNX2X_HW_CID(bp, cid))); + + type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; + type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & + SPE_HDR_FUNCTION_ID; + + kwqe.hdr.type = cpu_to_le16(type_16); + kwqe.hdr.reserved1 = 0; + kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); + kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); + + kwq[0] = (struct kwqe_16 *) &kwqe; + + spin_lock_bh(&cp->cnic_ulp_lock); + ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1); + spin_unlock_bh(&cp->cnic_ulp_lock); + + if (ret == 1) + return 0; + + return ret; +} + +static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, + struct kcqe *cqes[], u32 num_cqes) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_ulp_ops *ulp_ops; + + rcu_read_lock(); + ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); + if (likely(ulp_ops)) { + ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], + cqes, num_cqes); + } + rcu_read_unlock(); +} + +static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps, + int en_tcp_dack) +{ + struct bnx2x *bp = netdev_priv(dev->netdev); + u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; + u16 tstorm_flags = 0; + + if (time_stamps) { + xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED; + tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED; + } + if (en_tcp_dack) + tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN; + + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags); + + CNIC_WR16(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags); +} + +static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; + int hq_bds, pages; + u32 pfid = bp->pfid; + + cp->num_iscsi_tasks = req1->num_tasks_per_conn; + cp->num_ccells = req1->num_ccells_per_conn; + cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE * + cp->num_iscsi_tasks; + cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * + BNX2X_ISCSI_R2TQE_SIZE; + cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; + pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; + hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); + cp->num_cqs = req1->num_cqs; + + if (!dev->max_iscsi_conn) + return 0; + + /* init Tstorm RAM */ + CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), + req1->rq_num_wqes); + CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), + CNIC_PAGE_SIZE); + CNIC_WR8(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); + CNIC_WR16(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), + req1->num_tasks_per_conn); + + /* init Ustorm RAM */ + CNIC_WR16(dev, BAR_USTRORM_INTMEM + + USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), + req1->rq_buffer_size); + CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), + CNIC_PAGE_SIZE); + CNIC_WR8(dev, BAR_USTRORM_INTMEM + + USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); + CNIC_WR16(dev, BAR_USTRORM_INTMEM + + USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), + req1->num_tasks_per_conn); + CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid), + req1->rq_num_wqes); + CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid), + req1->cq_num_wqes); + CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid), + cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); + + /* init Xstorm RAM */ + CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), + CNIC_PAGE_SIZE); + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); + CNIC_WR16(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), + req1->num_tasks_per_conn); + CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), + hq_bds); + CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid), + req1->num_tasks_per_conn); + CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid), + cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); + + /* init Cstorm RAM */ + CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), + CNIC_PAGE_SIZE); + CNIC_WR8(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); + CNIC_WR16(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), + req1->num_tasks_per_conn); + CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid), + req1->cq_num_wqes); + CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), + hq_bds); + + cnic_bnx2x_set_tcp_options(dev, + req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE, + req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE); + + return 0; +} + +static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; + struct bnx2x *bp = netdev_priv(dev->netdev); + u32 pfid = bp->pfid; + struct iscsi_kcqe kcqe; + struct kcqe *cqes[1]; + + memset(&kcqe, 0, sizeof(kcqe)); + if (!dev->max_iscsi_conn) { + kcqe.completion_status = + ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED; + goto done; + } + + CNIC_WR(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]); + CNIC_WR(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4, + req2->error_bit_map[1]); + + CNIC_WR16(dev, BAR_USTRORM_INTMEM + + USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn); + CNIC_WR(dev, BAR_USTRORM_INTMEM + + USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]); + CNIC_WR(dev, BAR_USTRORM_INTMEM + + USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4, + req2->error_bit_map[1]); + + CNIC_WR16(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn); + + kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; + +done: + kcqe.op_code = ISCSI_KCQE_OPCODE_INIT; + cqes[0] = (struct kcqe *) &kcqe; + cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); + + return 0; +} + +static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; + + if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) { + struct cnic_iscsi *iscsi = ctx->proto.iscsi; + + cnic_free_dma(dev, &iscsi->hq_info); + cnic_free_dma(dev, &iscsi->r2tq_info); + cnic_free_dma(dev, &iscsi->task_array_info); + cnic_free_id(&cp->cid_tbl, ctx->cid); + } else { + cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid); + } + + ctx->cid = 0; +} + +static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) +{ + u32 cid; + int ret, pages; + struct cnic_local *cp = dev->cnic_priv; + struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; + struct cnic_iscsi *iscsi = ctx->proto.iscsi; + + if (ctx->ulp_proto_id == CNIC_ULP_FCOE) { + cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl); + if (cid == -1) { + ret = -ENOMEM; + goto error; + } + ctx->cid = cid; + return 0; + } + + cid = cnic_alloc_new_id(&cp->cid_tbl); + if (cid == -1) { + ret = -ENOMEM; + goto error; + } + + ctx->cid = cid; + pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE; + + ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); + if (ret) + goto error; + + pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE; + ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); + if (ret) + goto error; + + pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; + ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); + if (ret) + goto error; + + return 0; + +error: + cnic_free_bnx2x_conn_resc(dev, l5_cid); + return ret; +} + +static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init, + struct regpair *ctx_addr) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk; + int off = (cid - ethdev->starting_cid) % cp->cids_per_blk; + unsigned long align_off = 0; + dma_addr_t ctx_map; + void *ctx; + + if (cp->ctx_align) { + unsigned long mask = cp->ctx_align - 1; + + if (cp->ctx_arr[blk].mapping & mask) + align_off = cp->ctx_align - + (cp->ctx_arr[blk].mapping & mask); + } + ctx_map = cp->ctx_arr[blk].mapping + align_off + + (off * BNX2X_CONTEXT_MEM_SIZE); + ctx = cp->ctx_arr[blk].ctx + align_off + + (off * BNX2X_CONTEXT_MEM_SIZE); + if (init) + memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE); + + ctx_addr->lo = ctx_map & 0xffffffff; + ctx_addr->hi = (u64) ctx_map >> 32; + return ctx; +} + +static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num) +{ + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + struct iscsi_kwqe_conn_offload1 *req1 = + (struct iscsi_kwqe_conn_offload1 *) wqes[0]; + struct iscsi_kwqe_conn_offload2 *req2 = + (struct iscsi_kwqe_conn_offload2 *) wqes[1]; + struct iscsi_kwqe_conn_offload3 *req3; + struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; + struct cnic_iscsi *iscsi = ctx->proto.iscsi; + u32 cid = ctx->cid; + u32 hw_cid = BNX2X_HW_CID(bp, cid); + struct iscsi_context *ictx; + struct regpair context_addr; + int i, j, n = 2, n_max; + u8 port = BP_PORT(bp); + + ctx->ctx_flags = 0; + if (!req2->num_additional_wqes) + return -EINVAL; + + n_max = req2->num_additional_wqes + 2; + + ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr); + if (ictx == NULL) + return -ENOMEM; + + req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; + + ictx->xstorm_ag_context.hq_prod = 1; + + ictx->xstorm_st_context.iscsi.first_burst_length = + ISCSI_DEF_FIRST_BURST_LEN; + ictx->xstorm_st_context.iscsi.max_send_pdu_length = + ISCSI_DEF_MAX_RECV_SEG_LEN; + ictx->xstorm_st_context.iscsi.sq_pbl_base.lo = + req1->sq_page_table_addr_lo; + ictx->xstorm_st_context.iscsi.sq_pbl_base.hi = + req1->sq_page_table_addr_hi; + ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi; + ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo; + ictx->xstorm_st_context.iscsi.hq_pbl_base.lo = + iscsi->hq_info.pgtbl_map & 0xffffffff; + ictx->xstorm_st_context.iscsi.hq_pbl_base.hi = + (u64) iscsi->hq_info.pgtbl_map >> 32; + ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo = + iscsi->hq_info.pgtbl[0]; + ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi = + iscsi->hq_info.pgtbl[1]; + ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo = + iscsi->r2tq_info.pgtbl_map & 0xffffffff; + ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi = + (u64) iscsi->r2tq_info.pgtbl_map >> 32; + ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo = + iscsi->r2tq_info.pgtbl[0]; + ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi = + iscsi->r2tq_info.pgtbl[1]; + ictx->xstorm_st_context.iscsi.task_pbl_base.lo = + iscsi->task_array_info.pgtbl_map & 0xffffffff; + ictx->xstorm_st_context.iscsi.task_pbl_base.hi = + (u64) iscsi->task_array_info.pgtbl_map >> 32; + ictx->xstorm_st_context.iscsi.task_pbl_cache_idx = + BNX2X_ISCSI_PBL_NOT_CACHED; + ictx->xstorm_st_context.iscsi.flags.flags |= + XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA; + ictx->xstorm_st_context.iscsi.flags.flags |= + XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; + ictx->xstorm_st_context.common.ethernet.reserved_vlan_type = + ETH_P_8021Q; + if (BNX2X_CHIP_IS_E2_PLUS(bp) && + bp->common.chip_port_mode == CHIP_2_PORT_MODE) { + + port = 0; + } + ictx->xstorm_st_context.common.flags = + 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT; + ictx->xstorm_st_context.common.flags = + port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT; + + ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; + /* TSTORM requires the base address of RQ DB & not PTE */ + ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = + req2->rq_page_table_addr_lo & CNIC_PAGE_MASK; + ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = + req2->rq_page_table_addr_hi; + ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; + ictx->tstorm_st_context.tcp.cwnd = 0x5A8; + ictx->tstorm_st_context.tcp.flags2 |= + TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN; + ictx->tstorm_st_context.tcp.ooo_support_mode = + TCP_TSTORM_OOO_DROP_AND_PROC_ACK; + + ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG; + + ictx->ustorm_st_context.ring.rq.pbl_base.lo = + req2->rq_page_table_addr_lo; + ictx->ustorm_st_context.ring.rq.pbl_base.hi = + req2->rq_page_table_addr_hi; + ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi; + ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo; + ictx->ustorm_st_context.ring.r2tq.pbl_base.lo = + iscsi->r2tq_info.pgtbl_map & 0xffffffff; + ictx->ustorm_st_context.ring.r2tq.pbl_base.hi = + (u64) iscsi->r2tq_info.pgtbl_map >> 32; + ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo = + iscsi->r2tq_info.pgtbl[0]; + ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi = + iscsi->r2tq_info.pgtbl[1]; + ictx->ustorm_st_context.ring.cq_pbl_base.lo = + req1->cq_page_table_addr_lo; + ictx->ustorm_st_context.ring.cq_pbl_base.hi = + req1->cq_page_table_addr_hi; + ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN; + ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi; + ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo; + ictx->ustorm_st_context.task_pbe_cache_index = + BNX2X_ISCSI_PBL_NOT_CACHED; + ictx->ustorm_st_context.task_pdu_cache_index = + BNX2X_ISCSI_PDU_HEADER_NOT_CACHED; + + for (i = 1, j = 1; i < cp->num_cqs; i++, j++) { + if (j == 3) { + if (n >= n_max) + break; + req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; + j = 0; + } + ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN; + ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo = + req3->qp_first_pte[j].hi; + ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi = + req3->qp_first_pte[j].lo; + } + + ictx->ustorm_st_context.task_pbl_base.lo = + iscsi->task_array_info.pgtbl_map & 0xffffffff; + ictx->ustorm_st_context.task_pbl_base.hi = + (u64) iscsi->task_array_info.pgtbl_map >> 32; + ictx->ustorm_st_context.tce_phy_addr.lo = + iscsi->task_array_info.pgtbl[0]; + ictx->ustorm_st_context.tce_phy_addr.hi = + iscsi->task_array_info.pgtbl[1]; + ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; + ictx->ustorm_st_context.num_cqs = cp->num_cqs; + ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN; + ictx->ustorm_st_context.negotiated_rx_and_flags |= + ISCSI_DEF_MAX_BURST_LEN; + ictx->ustorm_st_context.negotiated_rx |= + ISCSI_DEFAULT_MAX_OUTSTANDING_R2T << + USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT; + + ictx->cstorm_st_context.hq_pbl_base.lo = + iscsi->hq_info.pgtbl_map & 0xffffffff; + ictx->cstorm_st_context.hq_pbl_base.hi = + (u64) iscsi->hq_info.pgtbl_map >> 32; + ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0]; + ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1]; + ictx->cstorm_st_context.task_pbl_base.lo = + iscsi->task_array_info.pgtbl_map & 0xffffffff; + ictx->cstorm_st_context.task_pbl_base.hi = + (u64) iscsi->task_array_info.pgtbl_map >> 32; + /* CSTORM and USTORM initialization is different, CSTORM requires + * CQ DB base & not PTE addr */ + ictx->cstorm_st_context.cq_db_base.lo = + req1->cq_page_table_addr_lo & CNIC_PAGE_MASK; + ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; + ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; + ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; + for (i = 0; i < cp->num_cqs; i++) { + ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] = + ISCSI_INITIAL_SN; + ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] = + ISCSI_INITIAL_SN; + } + + ictx->xstorm_ag_context.cdu_reserved = + CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, + ISCSI_CONNECTION_TYPE); + ictx->ustorm_ag_context.cdu_usage = + CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, + ISCSI_CONNECTION_TYPE); + return 0; + +} + +static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num, int *work) +{ + struct iscsi_kwqe_conn_offload1 *req1; + struct iscsi_kwqe_conn_offload2 *req2; + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + struct cnic_context *ctx; + struct iscsi_kcqe kcqe; + struct kcqe *cqes[1]; + u32 l5_cid; + int ret = 0; + + if (num < 2) { + *work = num; + return -EINVAL; + } + + req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0]; + req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1]; + if ((num - 2) < req2->num_additional_wqes) { + *work = num; + return -EINVAL; + } + *work = 2 + req2->num_additional_wqes; + + l5_cid = req1->iscsi_conn_id; + if (l5_cid >= MAX_ISCSI_TBL_SZ) + return -EINVAL; + + memset(&kcqe, 0, sizeof(kcqe)); + kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN; + kcqe.iscsi_conn_id = l5_cid; + kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; + + ctx = &cp->ctx_tbl[l5_cid]; + if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) { + kcqe.completion_status = + ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY; + goto done; + } + + if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) { + atomic_dec(&cp->iscsi_conn); + goto done; + } + ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); + if (ret) { + atomic_dec(&cp->iscsi_conn); + ret = 0; + goto done; + } + ret = cnic_setup_bnx2x_ctx(dev, wqes, num); + if (ret < 0) { + cnic_free_bnx2x_conn_resc(dev, l5_cid); + atomic_dec(&cp->iscsi_conn); + goto done; + } + + kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; + kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid); + +done: + cqes[0] = (struct kcqe *) &kcqe; + cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); + return 0; +} + + +static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct cnic_local *cp = dev->cnic_priv; + struct iscsi_kwqe_conn_update *req = + (struct iscsi_kwqe_conn_update *) kwqe; + void *data; + union l5cm_specific_data l5_data; + u32 l5_cid, cid = BNX2X_SW_CID(req->context_id); + int ret; + + if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0) + return -EINVAL; + + data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); + if (!data) + return -ENOMEM; + + memcpy(data, kwqe, sizeof(struct kwqe)); + + ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN, + req->context_id, ISCSI_CONNECTION_TYPE, &l5_data); + return ret; +} + +static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) +{ + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; + union l5cm_specific_data l5_data; + int ret; + u32 hw_cid; + + init_waitqueue_head(&ctx->waitq); + ctx->wait_cond = 0; + memset(&l5_data, 0, sizeof(l5_data)); + hw_cid = BNX2X_HW_CID(bp, ctx->cid); + + ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, + hw_cid, NONE_CONNECTION_TYPE, &l5_data); + + if (ret == 0) { + wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO); + if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags))) + return -EBUSY; + } + + return 0; +} + +static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct cnic_local *cp = dev->cnic_priv; + struct iscsi_kwqe_conn_destroy *req = + (struct iscsi_kwqe_conn_destroy *) kwqe; + u32 l5_cid = req->reserved0; + struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; + int ret = 0; + struct iscsi_kcqe kcqe; + struct kcqe *cqes[1]; + + if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) + goto skip_cfc_delete; + + if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { + unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies; + + if (delta > (2 * HZ)) + delta = 0; + + set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); + queue_delayed_work(cnic_wq, &cp->delete_task, delta); + goto destroy_reply; + } + + ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid); + +skip_cfc_delete: + cnic_free_bnx2x_conn_resc(dev, l5_cid); + + if (!ret) { + atomic_dec(&cp->iscsi_conn); + clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); + } + +destroy_reply: + memset(&kcqe, 0, sizeof(kcqe)); + kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN; + kcqe.iscsi_conn_id = l5_cid; + kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; + kcqe.iscsi_conn_context_id = req->context_id; + + cqes[0] = (struct kcqe *) &kcqe; + cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); + + return 0; +} + +static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, + struct l4_kwq_connect_req1 *kwqe1, + struct l4_kwq_connect_req3 *kwqe3, + struct l5cm_active_conn_buffer *conn_buf) +{ + struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf; + struct l5cm_xstorm_conn_buffer *xstorm_buf = + &conn_buf->xstorm_conn_buffer; + struct l5cm_tstorm_conn_buffer *tstorm_buf = + &conn_buf->tstorm_conn_buffer; + struct regpair context_addr; + u32 cid = BNX2X_SW_CID(kwqe1->cid); + struct in6_addr src_ip, dst_ip; + int i; + u32 *addrp; + + addrp = (u32 *) &conn_addr->local_ip_addr; + for (i = 0; i < 4; i++, addrp++) + src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); + + addrp = (u32 *) &conn_addr->remote_ip_addr; + for (i = 0; i < 4; i++, addrp++) + dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); + + cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr); + + xstorm_buf->context_addr.hi = context_addr.hi; + xstorm_buf->context_addr.lo = context_addr.lo; + xstorm_buf->mss = 0xffff; + xstorm_buf->rcv_buf = kwqe3->rcv_buf; + if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE) + xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE; + xstorm_buf->pseudo_header_checksum = + swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0)); + + if (kwqe3->ka_timeout) { + tstorm_buf->ka_enable = 1; + tstorm_buf->ka_timeout = kwqe3->ka_timeout; + tstorm_buf->ka_interval = kwqe3->ka_interval; + tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count; + } + tstorm_buf->max_rt_time = 0xffffffff; +} + +static void cnic_init_bnx2x_mac(struct cnic_dev *dev) +{ + struct bnx2x *bp = netdev_priv(dev->netdev); + u32 pfid = bp->pfid; + u8 *mac = dev->mac_addr; + + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]); + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]); + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]); + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]); + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]); + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]); + + CNIC_WR8(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]); + CNIC_WR8(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, + mac[4]); + CNIC_WR8(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]); + CNIC_WR8(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, + mac[2]); + CNIC_WR8(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]); + CNIC_WR8(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, + mac[0]); +} + +static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num, int *work) +{ + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + struct l4_kwq_connect_req1 *kwqe1 = + (struct l4_kwq_connect_req1 *) wqes[0]; + struct l4_kwq_connect_req3 *kwqe3; + struct l5cm_active_conn_buffer *conn_buf; + struct l5cm_conn_addr_params *conn_addr; + union l5cm_specific_data l5_data; + u32 l5_cid = kwqe1->pg_cid; + struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; + struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; + int ret; + + if (num < 2) { + *work = num; + return -EINVAL; + } + + if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) + *work = 3; + else + *work = 2; + + if (num < *work) { + *work = num; + return -EINVAL; + } + + if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) { + netdev_err(dev->netdev, "conn_buf size too big\n"); + return -ENOMEM; + } + conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); + if (!conn_buf) + return -ENOMEM; + + memset(conn_buf, 0, sizeof(*conn_buf)); + + conn_addr = &conn_buf->conn_addr_buf; + conn_addr->remote_addr_0 = csk->ha[0]; + conn_addr->remote_addr_1 = csk->ha[1]; + conn_addr->remote_addr_2 = csk->ha[2]; + conn_addr->remote_addr_3 = csk->ha[3]; + conn_addr->remote_addr_4 = csk->ha[4]; + conn_addr->remote_addr_5 = csk->ha[5]; + + if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) { + struct l4_kwq_connect_req2 *kwqe2 = + (struct l4_kwq_connect_req2 *) wqes[1]; + + conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4; + conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3; + conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2; + + conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4; + conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3; + conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2; + conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION; + } + kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1]; + + conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip; + conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip; + conn_addr->local_tcp_port = kwqe1->src_port; + conn_addr->remote_tcp_port = kwqe1->dst_port; + + conn_addr->pmtu = kwqe3->pmtu; + cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf); + + CNIC_WR16(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id); + + ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, + kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); + if (!ret) + set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); + + return ret; +} + +static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe; + union l5cm_specific_data l5_data; + int ret; + + memset(&l5_data, 0, sizeof(l5_data)); + ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE, + req->cid, ISCSI_CONNECTION_TYPE, &l5_data); + return ret; +} + +static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe; + union l5cm_specific_data l5_data; + int ret; + + memset(&l5_data, 0, sizeof(l5_data)); + ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT, + req->cid, ISCSI_CONNECTION_TYPE, &l5_data); + return ret; +} +static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe; + struct l4_kcq kcqe; + struct kcqe *cqes[1]; + + memset(&kcqe, 0, sizeof(kcqe)); + kcqe.pg_host_opaque = req->host_opaque; + kcqe.pg_cid = req->host_opaque; + kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG; + cqes[0] = (struct kcqe *) &kcqe; + cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); + return 0; +} + +static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe; + struct l4_kcq kcqe; + struct kcqe *cqes[1]; + + memset(&kcqe, 0, sizeof(kcqe)); + kcqe.pg_host_opaque = req->pg_host_opaque; + kcqe.pg_cid = req->pg_cid; + kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG; + cqes[0] = (struct kcqe *) &kcqe; + cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); + return 0; +} + +static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct fcoe_kwqe_stat *req; + struct fcoe_stat_ramrod_params *fcoe_stat; + union l5cm_specific_data l5_data; + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + int ret; + u32 cid; + + req = (struct fcoe_kwqe_stat *) kwqe; + cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); + + fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); + if (!fcoe_stat) + return -ENOMEM; + + memset(fcoe_stat, 0, sizeof(*fcoe_stat)); + memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req)); + + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid, + FCOE_CONNECTION_TYPE, &l5_data); + return ret; +} + +static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num, int *work) +{ + int ret; + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + u32 cid; + struct fcoe_init_ramrod_params *fcoe_init; + struct fcoe_kwqe_init1 *req1; + struct fcoe_kwqe_init2 *req2; + struct fcoe_kwqe_init3 *req3; + union l5cm_specific_data l5_data; + + if (num < 3) { + *work = num; + return -EINVAL; + } + req1 = (struct fcoe_kwqe_init1 *) wqes[0]; + req2 = (struct fcoe_kwqe_init2 *) wqes[1]; + req3 = (struct fcoe_kwqe_init3 *) wqes[2]; + if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) { + *work = 1; + return -EINVAL; + } + if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) { + *work = 2; + return -EINVAL; + } + + if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) { + netdev_err(dev->netdev, "fcoe_init size too big\n"); + return -ENOMEM; + } + fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); + if (!fcoe_init) + return -ENOMEM; + + memset(fcoe_init, 0, sizeof(*fcoe_init)); + memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1)); + memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2)); + memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3)); + fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff; + fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32; + fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages; + + fcoe_init->sb_num = cp->status_blk_num; + fcoe_init->eq_prod = MAX_KCQ_IDX; + fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS; + cp->kcq2.sw_prod_idx = 0; + + cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid, + FCOE_CONNECTION_TYPE, &l5_data); + *work = 3; + return ret; +} + +static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num, int *work) +{ + int ret = 0; + u32 cid = -1, l5_cid; + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + struct fcoe_kwqe_conn_offload1 *req1; + struct fcoe_kwqe_conn_offload2 *req2; + struct fcoe_kwqe_conn_offload3 *req3; + struct fcoe_kwqe_conn_offload4 *req4; + struct fcoe_conn_offload_ramrod_params *fcoe_offload; + struct cnic_context *ctx; + struct fcoe_context *fctx; + struct regpair ctx_addr; + union l5cm_specific_data l5_data; + struct fcoe_kcqe kcqe; + struct kcqe *cqes[1]; + + if (num < 4) { + *work = num; + return -EINVAL; + } + req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0]; + req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1]; + req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2]; + req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3]; + + *work = 4; + + l5_cid = req1->fcoe_conn_id; + if (l5_cid >= dev->max_fcoe_conn) + goto err_reply; + + l5_cid += BNX2X_FCOE_L5_CID_BASE; + + ctx = &cp->ctx_tbl[l5_cid]; + if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) + goto err_reply; + + ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); + if (ret) { + ret = 0; + goto err_reply; + } + cid = ctx->cid; + + fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr); + if (fctx) { + u32 hw_cid = BNX2X_HW_CID(bp, cid); + u32 val; + + val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, + FCOE_CONNECTION_TYPE); + fctx->xstorm_ag_context.cdu_reserved = val; + val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, + FCOE_CONNECTION_TYPE); + fctx->ustorm_ag_context.cdu_usage = val; + } + if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) { + netdev_err(dev->netdev, "fcoe_offload size too big\n"); + goto err_reply; + } + fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); + if (!fcoe_offload) + goto err_reply; + + memset(fcoe_offload, 0, sizeof(*fcoe_offload)); + memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1)); + memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2)); + memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3)); + memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4)); + + cid = BNX2X_HW_CID(bp, cid); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid, + FCOE_CONNECTION_TYPE, &l5_data); + if (!ret) + set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); + + return ret; + +err_reply: + if (cid != -1) + cnic_free_bnx2x_conn_resc(dev, l5_cid); + + memset(&kcqe, 0, sizeof(kcqe)); + kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN; + kcqe.fcoe_conn_id = req1->fcoe_conn_id; + kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; + + cqes[0] = (struct kcqe *) &kcqe; + cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); + return ret; +} + +static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct fcoe_kwqe_conn_enable_disable *req; + struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable; + union l5cm_specific_data l5_data; + int ret; + u32 cid, l5_cid; + struct cnic_local *cp = dev->cnic_priv; + + req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; + cid = req->context_id; + l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE; + + if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) { + netdev_err(dev->netdev, "fcoe_enable size too big\n"); + return -ENOMEM; + } + fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); + if (!fcoe_enable) + return -ENOMEM; + + memset(fcoe_enable, 0, sizeof(*fcoe_enable)); + memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req)); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid, + FCOE_CONNECTION_TYPE, &l5_data); + return ret; +} + +static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct fcoe_kwqe_conn_enable_disable *req; + struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable; + union l5cm_specific_data l5_data; + int ret; + u32 cid, l5_cid; + struct cnic_local *cp = dev->cnic_priv; + + req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; + cid = req->context_id; + l5_cid = req->conn_id; + if (l5_cid >= dev->max_fcoe_conn) + return -EINVAL; + + l5_cid += BNX2X_FCOE_L5_CID_BASE; + + if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) { + netdev_err(dev->netdev, "fcoe_disable size too big\n"); + return -ENOMEM; + } + fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); + if (!fcoe_disable) + return -ENOMEM; + + memset(fcoe_disable, 0, sizeof(*fcoe_disable)); + memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req)); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid, + FCOE_CONNECTION_TYPE, &l5_data); + return ret; +} + +static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct fcoe_kwqe_conn_destroy *req; + union l5cm_specific_data l5_data; + int ret; + u32 cid, l5_cid; + struct cnic_local *cp = dev->cnic_priv; + struct cnic_context *ctx; + struct fcoe_kcqe kcqe; + struct kcqe *cqes[1]; + + req = (struct fcoe_kwqe_conn_destroy *) kwqe; + cid = req->context_id; + l5_cid = req->conn_id; + if (l5_cid >= dev->max_fcoe_conn) + return -EINVAL; + + l5_cid += BNX2X_FCOE_L5_CID_BASE; + + ctx = &cp->ctx_tbl[l5_cid]; + + init_waitqueue_head(&ctx->waitq); + ctx->wait_cond = 0; + + memset(&kcqe, 0, sizeof(kcqe)); + kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR; + memset(&l5_data, 0, sizeof(l5_data)); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid, + FCOE_CONNECTION_TYPE, &l5_data); + if (ret == 0) { + wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO); + if (ctx->wait_cond) + kcqe.completion_status = 0; + } + + set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); + queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000)); + + kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN; + kcqe.fcoe_conn_id = req->conn_id; + kcqe.fcoe_conn_context_id = cid; + + cqes[0] = (struct kcqe *) &kcqe; + cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); + return ret; +} + +static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 i; + + for (i = start_cid; i < cp->max_cid_space; i++) { + struct cnic_context *ctx = &cp->ctx_tbl[i]; + int j; + + while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) + msleep(10); + + for (j = 0; j < 5; j++) { + if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) + break; + msleep(20); + } + + if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) + netdev_warn(dev->netdev, "CID %x not deleted\n", + ctx->cid); + } +} + +static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct fcoe_kwqe_destroy *req; + union l5cm_specific_data l5_data; + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + int ret; + u32 cid; + + cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ); + + req = (struct fcoe_kwqe_destroy *) kwqe; + cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); + + memset(&l5_data, 0, sizeof(l5_data)); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid, + FCOE_CONNECTION_TYPE, &l5_data); + return ret; +} + +static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct cnic_local *cp = dev->cnic_priv; + struct kcqe kcqe; + struct kcqe *cqes[1]; + u32 cid; + u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); + u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK; + u32 kcqe_op; + int ulp_type; + + cid = kwqe->kwqe_info0; + memset(&kcqe, 0, sizeof(kcqe)); + + if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) { + u32 l5_cid = 0; + + ulp_type = CNIC_ULP_FCOE; + if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) { + struct fcoe_kwqe_conn_enable_disable *req; + + req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; + kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN; + cid = req->context_id; + l5_cid = req->conn_id; + } else if (opcode == FCOE_KWQE_OPCODE_DESTROY) { + kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC; + } else { + return; + } + kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT; + kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE; + kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR; + kcqe.kcqe_info2 = cid; + kcqe.kcqe_info0 = l5_cid; + + } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) { + ulp_type = CNIC_ULP_ISCSI; + if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN) + cid = kwqe->kwqe_info1; + + kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT; + kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI; + kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR; + kcqe.kcqe_info2 = cid; + cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0); + + } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) { + struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe; + + ulp_type = CNIC_ULP_L4; + if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1) + kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE; + else if (opcode == L4_KWQE_OPCODE_VALUE_RESET) + kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP; + else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE) + kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; + else + return; + + kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) | + KCQE_FLAGS_LAYER_MASK_L4; + l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR; + l4kcqe->cid = cid; + cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id); + } else { + return; + } + + cqes[0] = &kcqe; + cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1); +} + +static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev, + struct kwqe *wqes[], u32 num_wqes) +{ + int i, work, ret; + u32 opcode; + struct kwqe *kwqe; + + if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) + return -EAGAIN; /* bnx2 is down */ + + for (i = 0; i < num_wqes; ) { + kwqe = wqes[i]; + opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); + work = 1; + + switch (opcode) { + case ISCSI_KWQE_OPCODE_INIT1: + ret = cnic_bnx2x_iscsi_init1(dev, kwqe); + break; + case ISCSI_KWQE_OPCODE_INIT2: + ret = cnic_bnx2x_iscsi_init2(dev, kwqe); + break; + case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1: + ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i], + num_wqes - i, &work); + break; + case ISCSI_KWQE_OPCODE_UPDATE_CONN: + ret = cnic_bnx2x_iscsi_update(dev, kwqe); + break; + case ISCSI_KWQE_OPCODE_DESTROY_CONN: + ret = cnic_bnx2x_iscsi_destroy(dev, kwqe); + break; + case L4_KWQE_OPCODE_VALUE_CONNECT1: + ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i, + &work); + break; + case L4_KWQE_OPCODE_VALUE_CLOSE: + ret = cnic_bnx2x_close(dev, kwqe); + break; + case L4_KWQE_OPCODE_VALUE_RESET: + ret = cnic_bnx2x_reset(dev, kwqe); + break; + case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG: + ret = cnic_bnx2x_offload_pg(dev, kwqe); + break; + case L4_KWQE_OPCODE_VALUE_UPDATE_PG: + ret = cnic_bnx2x_update_pg(dev, kwqe); + break; + case L4_KWQE_OPCODE_VALUE_UPLOAD_PG: + ret = 0; + break; + default: + ret = 0; + netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", + opcode); + break; + } + if (ret < 0) { + netdev_err(dev->netdev, "KWQE(0x%x) failed\n", + opcode); + + /* Possibly bnx2x parity error, send completion + * to ulp drivers with error code to speed up + * cleanup and reset recovery. + */ + if (ret == -EIO || ret == -EAGAIN) + cnic_bnx2x_kwqe_err(dev, kwqe); + } + i += work; + } + return 0; +} + +static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev, + struct kwqe *wqes[], u32 num_wqes) +{ + struct bnx2x *bp = netdev_priv(dev->netdev); + int i, work, ret; + u32 opcode; + struct kwqe *kwqe; + + if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) + return -EAGAIN; /* bnx2 is down */ + + if (!BNX2X_CHIP_IS_E2_PLUS(bp)) + return -EINVAL; + + for (i = 0; i < num_wqes; ) { + kwqe = wqes[i]; + opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); + work = 1; + + switch (opcode) { + case FCOE_KWQE_OPCODE_INIT1: + ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i], + num_wqes - i, &work); + break; + case FCOE_KWQE_OPCODE_OFFLOAD_CONN1: + ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i], + num_wqes - i, &work); + break; + case FCOE_KWQE_OPCODE_ENABLE_CONN: + ret = cnic_bnx2x_fcoe_enable(dev, kwqe); + break; + case FCOE_KWQE_OPCODE_DISABLE_CONN: + ret = cnic_bnx2x_fcoe_disable(dev, kwqe); + break; + case FCOE_KWQE_OPCODE_DESTROY_CONN: + ret = cnic_bnx2x_fcoe_destroy(dev, kwqe); + break; + case FCOE_KWQE_OPCODE_DESTROY: + ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe); + break; + case FCOE_KWQE_OPCODE_STAT: + ret = cnic_bnx2x_fcoe_stat(dev, kwqe); + break; + default: + ret = 0; + netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", + opcode); + break; + } + if (ret < 0) { + netdev_err(dev->netdev, "KWQE(0x%x) failed\n", + opcode); + + /* Possibly bnx2x parity error, send completion + * to ulp drivers with error code to speed up + * cleanup and reset recovery. + */ + if (ret == -EIO || ret == -EAGAIN) + cnic_bnx2x_kwqe_err(dev, kwqe); + } + i += work; + } + return 0; +} + +static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num_wqes) +{ + int ret = -EINVAL; + u32 layer_code; + + if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) + return -EAGAIN; /* bnx2x is down */ + + if (!num_wqes) + return 0; + + layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK; + switch (layer_code) { + case KWQE_FLAGS_LAYER_MASK_L5_ISCSI: + case KWQE_FLAGS_LAYER_MASK_L4: + case KWQE_FLAGS_LAYER_MASK_L2: + ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes); + break; + + case KWQE_FLAGS_LAYER_MASK_L5_FCOE: + ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes); + break; + } + return ret; +} + +static inline u32 cnic_get_kcqe_layer_mask(u32 opflag) +{ + if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN)) + return KCQE_FLAGS_LAYER_MASK_L4; + + return opflag & KCQE_FLAGS_LAYER_MASK; +} + +static void service_kcqes(struct cnic_dev *dev, int num_cqes) +{ + struct cnic_local *cp = dev->cnic_priv; + int i, j, comp = 0; + + i = 0; + j = 1; + while (num_cqes) { + struct cnic_ulp_ops *ulp_ops; + int ulp_type; + u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; + u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag); + + if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) + comp++; + + while (j < num_cqes) { + u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; + + if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer) + break; + + if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) + comp++; + j++; + } + + if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA) + ulp_type = CNIC_ULP_RDMA; + else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI) + ulp_type = CNIC_ULP_ISCSI; + else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE) + ulp_type = CNIC_ULP_FCOE; + else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4) + ulp_type = CNIC_ULP_L4; + else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) + goto end; + else { + netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n", + kcqe_op_flag); + goto end; + } + + rcu_read_lock(); + ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); + if (likely(ulp_ops)) { + ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], + cp->completed_kcq + i, j); + } + rcu_read_unlock(); +end: + num_cqes -= j; + i += j; + j = 1; + } + if (unlikely(comp)) + cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp); +} + +static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info) +{ + struct cnic_local *cp = dev->cnic_priv; + u16 i, ri, hw_prod, last; + struct kcqe *kcqe; + int kcqe_cnt = 0, last_cnt = 0; + + i = ri = last = info->sw_prod_idx; + ri &= MAX_KCQ_IDX; + hw_prod = *info->hw_prod_idx_ptr; + hw_prod = info->hw_idx(hw_prod); + + while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) { + kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)]; + cp->completed_kcq[kcqe_cnt++] = kcqe; + i = info->next_idx(i); + ri = i & MAX_KCQ_IDX; + if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) { + last_cnt = kcqe_cnt; + last = i; + } + } + + info->sw_prod_idx = last; + return last_cnt; +} + +static int cnic_l2_completion(struct cnic_local *cp) +{ + u16 hw_cons, sw_cons; + struct cnic_uio_dev *udev = cp->udev; + union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) + (udev->l2_ring + (2 * CNIC_PAGE_SIZE)); + u32 cmd; + int comp = 0; + + if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags)) + return 0; + + hw_cons = *cp->rx_cons_ptr; + if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT) + hw_cons++; + + sw_cons = cp->rx_cons; + while (sw_cons != hw_cons) { + u8 cqe_fp_flags; + + cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT]; + cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; + if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) { + cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data); + cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT; + if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP || + cmd == RAMROD_CMD_ID_ETH_HALT) + comp++; + } + sw_cons = BNX2X_NEXT_RCQE(sw_cons); + } + return comp; +} + +static void cnic_chk_pkt_rings(struct cnic_local *cp) +{ + u16 rx_cons, tx_cons; + int comp = 0; + + if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) + return; + + rx_cons = *cp->rx_cons_ptr; + tx_cons = *cp->tx_cons_ptr; + if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { + if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) + comp = cnic_l2_completion(cp); + + cp->tx_cons = tx_cons; + cp->rx_cons = rx_cons; + + if (cp->udev) + uio_event_notify(&cp->udev->cnic_uinfo); + } + if (comp) + clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); +} + +static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; + int kcqe_cnt; + + /* status block index must be read before reading other fields */ + rmb(); + cp->kwq_con_idx = *cp->kwq_con_idx_ptr; + + while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { + + service_kcqes(dev, kcqe_cnt); + + /* Tell compiler that status_blk fields can change. */ + barrier(); + status_idx = (u16) *cp->kcq1.status_idx_ptr; + /* status block index must be read first */ + rmb(); + cp->kwq_con_idx = *cp->kwq_con_idx_ptr; + } + + CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx); + + cnic_chk_pkt_rings(cp); + + return status_idx; +} + +static int cnic_service_bnx2(void *data, void *status_blk) +{ + struct cnic_dev *dev = data; + + if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) { + struct status_block *sblk = status_blk; + + return sblk->status_idx; + } + + return cnic_service_bnx2_queues(dev); +} + +static void cnic_service_bnx2_msix(unsigned long data) +{ + struct cnic_dev *dev = (struct cnic_dev *) data; + struct cnic_local *cp = dev->cnic_priv; + + cp->last_status_idx = cnic_service_bnx2_queues(dev); + + CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); +} + +static void cnic_doirq(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + + if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { + u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX; + + prefetch(cp->status_blk.gen); + prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); + + tasklet_schedule(&cp->cnic_irq_task); + } +} + +static irqreturn_t cnic_irq(int irq, void *dev_instance) +{ + struct cnic_dev *dev = dev_instance; + struct cnic_local *cp = dev->cnic_priv; + + if (cp->ack_int) + cp->ack_int(dev); + + cnic_doirq(dev); + + return IRQ_HANDLED; +} + +static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm, + u16 index, u8 op, u8 update) +{ + struct bnx2x *bp = netdev_priv(dev->netdev); + u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 + + COMMAND_REG_INT_ACK); + struct igu_ack_register igu_ack; + + igu_ack.status_block_index = index; + igu_ack.sb_id_and_flags = + ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | + (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | + (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | + (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); + + CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack)); +} + +static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment, + u16 index, u8 op, u8 update) +{ + struct igu_regular cmd_data; + u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8; + + cmd_data.sb_id_and_flags = + (index << IGU_REGULAR_SB_INDEX_SHIFT) | + (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | + (update << IGU_REGULAR_BUPDATE_SHIFT) | + (op << IGU_REGULAR_ENABLE_INT_SHIFT); + + + CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags); +} + +static void cnic_ack_bnx2x_msix(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + + cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0, + IGU_INT_DISABLE, 0); +} + +static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + + cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0, + IGU_INT_DISABLE, 0); +} + +static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx) +{ + struct cnic_local *cp = dev->cnic_priv; + + cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx, + IGU_INT_ENABLE, 1); +} + +static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx) +{ + struct cnic_local *cp = dev->cnic_priv; + + cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx, + IGU_INT_ENABLE, 1); +} + +static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) +{ + u32 last_status = *info->status_idx_ptr; + int kcqe_cnt; + + /* status block index must be read before reading the KCQ */ + rmb(); + while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { + + service_kcqes(dev, kcqe_cnt); + + /* Tell compiler that sblk fields can change. */ + barrier(); + + last_status = *info->status_idx_ptr; + /* status block index must be read before reading the KCQ */ + rmb(); + } + return last_status; +} + +static void cnic_service_bnx2x_bh(unsigned long data) +{ + struct cnic_dev *dev = (struct cnic_dev *) data; + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + u32 status_idx, new_status_idx; + + if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) + return; + + while (1) { + status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); + + CNIC_WR16(dev, cp->kcq1.io_addr, + cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); + + if (!CNIC_SUPPORTS_FCOE(bp)) { + cp->arm_int(dev, status_idx); + break; + } + + new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); + + if (new_status_idx != status_idx) + continue; + + CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + + MAX_KCQ_IDX); + + cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, + status_idx, IGU_INT_ENABLE, 1); + + break; + } +} + +static int cnic_service_bnx2x(void *data, void *status_blk) +{ + struct cnic_dev *dev = data; + struct cnic_local *cp = dev->cnic_priv; + + if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) + cnic_doirq(dev); + + cnic_chk_pkt_rings(cp); + + return 0; +} + +static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type) +{ + struct cnic_ulp_ops *ulp_ops; + + if (if_type == CNIC_ULP_ISCSI) + cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); + + mutex_lock(&cnic_lock); + ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], + lockdep_is_held(&cnic_lock)); + if (!ulp_ops) { + mutex_unlock(&cnic_lock); + return; + } + set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); + mutex_unlock(&cnic_lock); + + if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) + ulp_ops->cnic_stop(cp->ulp_handle[if_type]); + + clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); +} + +static void cnic_ulp_stop(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int if_type; + + for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) + cnic_ulp_stop_one(cp, if_type); +} + +static void cnic_ulp_start(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int if_type; + + for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { + struct cnic_ulp_ops *ulp_ops; + + mutex_lock(&cnic_lock); + ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], + lockdep_is_held(&cnic_lock)); + if (!ulp_ops || !ulp_ops->cnic_start) { + mutex_unlock(&cnic_lock); + continue; + } + set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); + mutex_unlock(&cnic_lock); + + if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) + ulp_ops->cnic_start(cp->ulp_handle[if_type]); + + clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); + } +} + +static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_ulp_ops *ulp_ops; + int rc; + + mutex_lock(&cnic_lock); + ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type], + lockdep_is_held(&cnic_lock)); + if (ulp_ops && ulp_ops->cnic_get_stats) + rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]); + else + rc = -ENODEV; + mutex_unlock(&cnic_lock); + return rc; +} + +static int cnic_ctl(void *data, struct cnic_ctl_info *info) +{ + struct cnic_dev *dev = data; + int ulp_type = CNIC_ULP_ISCSI; + + switch (info->cmd) { + case CNIC_CTL_STOP_CMD: + cnic_hold(dev); + + cnic_ulp_stop(dev); + cnic_stop_hw(dev); + + cnic_put(dev); + break; + case CNIC_CTL_START_CMD: + cnic_hold(dev); + + if (!cnic_start_hw(dev)) + cnic_ulp_start(dev); + + cnic_put(dev); + break; + case CNIC_CTL_STOP_ISCSI_CMD: { + struct cnic_local *cp = dev->cnic_priv; + set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags); + queue_delayed_work(cnic_wq, &cp->delete_task, 0); + break; + } + case CNIC_CTL_COMPLETION_CMD: { + struct cnic_ctl_completion *comp = &info->data.comp; + u32 cid = BNX2X_SW_CID(comp->cid); + u32 l5_cid; + struct cnic_local *cp = dev->cnic_priv; + + if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) + break; + + if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) { + struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; + + if (unlikely(comp->error)) { + set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags); + netdev_err(dev->netdev, + "CID %x CFC delete comp error %x\n", + cid, comp->error); + } + + ctx->wait_cond = 1; + wake_up(&ctx->waitq); + } + break; + } + case CNIC_CTL_FCOE_STATS_GET_CMD: + ulp_type = CNIC_ULP_FCOE; + /* fall through */ + case CNIC_CTL_ISCSI_STATS_GET_CMD: + cnic_hold(dev); + cnic_copy_ulp_stats(dev, ulp_type); + cnic_put(dev); + break; + + default: + return -EINVAL; + } + return 0; +} + +static void cnic_ulp_init(struct cnic_dev *dev) +{ + int i; + struct cnic_local *cp = dev->cnic_priv; + + for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { + struct cnic_ulp_ops *ulp_ops; + + mutex_lock(&cnic_lock); + ulp_ops = cnic_ulp_tbl_prot(i); + if (!ulp_ops || !ulp_ops->cnic_init) { + mutex_unlock(&cnic_lock); + continue; + } + ulp_get(ulp_ops); + mutex_unlock(&cnic_lock); + + if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) + ulp_ops->cnic_init(dev); + + ulp_put(ulp_ops); + } +} + +static void cnic_ulp_exit(struct cnic_dev *dev) +{ + int i; + struct cnic_local *cp = dev->cnic_priv; + + for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { + struct cnic_ulp_ops *ulp_ops; + + mutex_lock(&cnic_lock); + ulp_ops = cnic_ulp_tbl_prot(i); + if (!ulp_ops || !ulp_ops->cnic_exit) { + mutex_unlock(&cnic_lock); + continue; + } + ulp_get(ulp_ops); + mutex_unlock(&cnic_lock); + + if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) + ulp_ops->cnic_exit(dev); + + ulp_put(ulp_ops); + } +} + +static int cnic_cm_offload_pg(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_offload_pg *l4kwqe; + struct kwqe *wqes[1]; + + l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1; + memset(l4kwqe, 0, sizeof(*l4kwqe)); + wqes[0] = (struct kwqe *) l4kwqe; + + l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG; + l4kwqe->flags = + L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT; + l4kwqe->l2hdr_nbytes = ETH_HLEN; + + l4kwqe->da0 = csk->ha[0]; + l4kwqe->da1 = csk->ha[1]; + l4kwqe->da2 = csk->ha[2]; + l4kwqe->da3 = csk->ha[3]; + l4kwqe->da4 = csk->ha[4]; + l4kwqe->da5 = csk->ha[5]; + + l4kwqe->sa0 = dev->mac_addr[0]; + l4kwqe->sa1 = dev->mac_addr[1]; + l4kwqe->sa2 = dev->mac_addr[2]; + l4kwqe->sa3 = dev->mac_addr[3]; + l4kwqe->sa4 = dev->mac_addr[4]; + l4kwqe->sa5 = dev->mac_addr[5]; + + l4kwqe->etype = ETH_P_IP; + l4kwqe->ipid_start = DEF_IPID_START; + l4kwqe->host_opaque = csk->l5_cid; + + if (csk->vlan_id) { + l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING; + l4kwqe->vlan_tag = csk->vlan_id; + l4kwqe->l2hdr_nbytes += 4; + } + + return dev->submit_kwqes(dev, wqes, 1); +} + +static int cnic_cm_update_pg(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_update_pg *l4kwqe; + struct kwqe *wqes[1]; + + l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1; + memset(l4kwqe, 0, sizeof(*l4kwqe)); + wqes[0] = (struct kwqe *) l4kwqe; + + l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG; + l4kwqe->flags = + L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT; + l4kwqe->pg_cid = csk->pg_cid; + + l4kwqe->da0 = csk->ha[0]; + l4kwqe->da1 = csk->ha[1]; + l4kwqe->da2 = csk->ha[2]; + l4kwqe->da3 = csk->ha[3]; + l4kwqe->da4 = csk->ha[4]; + l4kwqe->da5 = csk->ha[5]; + + l4kwqe->pg_host_opaque = csk->l5_cid; + l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA; + + return dev->submit_kwqes(dev, wqes, 1); +} + +static int cnic_cm_upload_pg(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_upload *l4kwqe; + struct kwqe *wqes[1]; + + l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1; + memset(l4kwqe, 0, sizeof(*l4kwqe)); + wqes[0] = (struct kwqe *) l4kwqe; + + l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG; + l4kwqe->flags = + L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT; + l4kwqe->cid = csk->pg_cid; + + return dev->submit_kwqes(dev, wqes, 1); +} + +static int cnic_cm_conn_req(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_connect_req1 *l4kwqe1; + struct l4_kwq_connect_req2 *l4kwqe2; + struct l4_kwq_connect_req3 *l4kwqe3; + struct kwqe *wqes[3]; + u8 tcp_flags = 0; + int num_wqes = 2; + + l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1; + l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2; + l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3; + memset(l4kwqe1, 0, sizeof(*l4kwqe1)); + memset(l4kwqe2, 0, sizeof(*l4kwqe2)); + memset(l4kwqe3, 0, sizeof(*l4kwqe3)); + + l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3; + l4kwqe3->flags = + L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT; + l4kwqe3->ka_timeout = csk->ka_timeout; + l4kwqe3->ka_interval = csk->ka_interval; + l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count; + l4kwqe3->tos = csk->tos; + l4kwqe3->ttl = csk->ttl; + l4kwqe3->snd_seq_scale = csk->snd_seq_scale; + l4kwqe3->pmtu = csk->mtu; + l4kwqe3->rcv_buf = csk->rcv_buf; + l4kwqe3->snd_buf = csk->snd_buf; + l4kwqe3->seed = csk->seed; + + wqes[0] = (struct kwqe *) l4kwqe1; + if (test_bit(SK_F_IPV6, &csk->flags)) { + wqes[1] = (struct kwqe *) l4kwqe2; + wqes[2] = (struct kwqe *) l4kwqe3; + num_wqes = 3; + + l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6; + l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2; + l4kwqe2->flags = + L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT | + L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT; + l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]); + l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]); + l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]); + l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]); + l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]); + l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]); + l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) - + sizeof(struct tcphdr); + } else { + wqes[1] = (struct kwqe *) l4kwqe3; + l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) - + sizeof(struct tcphdr); + } + + l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1; + l4kwqe1->flags = + (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) | + L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT; + l4kwqe1->cid = csk->cid; + l4kwqe1->pg_cid = csk->pg_cid; + l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]); + l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]); + l4kwqe1->src_port = be16_to_cpu(csk->src_port); + l4kwqe1->dst_port = be16_to_cpu(csk->dst_port); + if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK) + tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK; + if (csk->tcp_flags & SK_TCP_KEEP_ALIVE) + tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE; + if (csk->tcp_flags & SK_TCP_NAGLE) + tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE; + if (csk->tcp_flags & SK_TCP_TIMESTAMP) + tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP; + if (csk->tcp_flags & SK_TCP_SACK) + tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK; + if (csk->tcp_flags & SK_TCP_SEG_SCALING) + tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING; + + l4kwqe1->tcp_flags = tcp_flags; + + return dev->submit_kwqes(dev, wqes, num_wqes); +} + +static int cnic_cm_close_req(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_close_req *l4kwqe; + struct kwqe *wqes[1]; + + l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2; + memset(l4kwqe, 0, sizeof(*l4kwqe)); + wqes[0] = (struct kwqe *) l4kwqe; + + l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE; + l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT; + l4kwqe->cid = csk->cid; + + return dev->submit_kwqes(dev, wqes, 1); +} + +static int cnic_cm_abort_req(struct cnic_sock *csk) +{ + struct cnic_dev *dev = csk->dev; + struct l4_kwq_reset_req *l4kwqe; + struct kwqe *wqes[1]; + + l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2; + memset(l4kwqe, 0, sizeof(*l4kwqe)); + wqes[0] = (struct kwqe *) l4kwqe; + + l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET; + l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT; + l4kwqe->cid = csk->cid; + + return dev->submit_kwqes(dev, wqes, 1); +} + +static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid, + u32 l5_cid, struct cnic_sock **csk, void *context) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_sock *csk1; + + if (l5_cid >= MAX_CM_SK_TBL_SZ) + return -EINVAL; + + if (cp->ctx_tbl) { + struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; + + if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) + return -EAGAIN; + } + + csk1 = &cp->csk_tbl[l5_cid]; + if (atomic_read(&csk1->ref_count)) + return -EAGAIN; + + if (test_and_set_bit(SK_F_INUSE, &csk1->flags)) + return -EBUSY; + + csk1->dev = dev; + csk1->cid = cid; + csk1->l5_cid = l5_cid; + csk1->ulp_type = ulp_type; + csk1->context = context; + + csk1->ka_timeout = DEF_KA_TIMEOUT; + csk1->ka_interval = DEF_KA_INTERVAL; + csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT; + csk1->tos = DEF_TOS; + csk1->ttl = DEF_TTL; + csk1->snd_seq_scale = DEF_SND_SEQ_SCALE; + csk1->rcv_buf = DEF_RCV_BUF; + csk1->snd_buf = DEF_SND_BUF; + csk1->seed = DEF_SEED; + csk1->tcp_flags = 0; + + *csk = csk1; + return 0; +} + +static void cnic_cm_cleanup(struct cnic_sock *csk) +{ + if (csk->src_port) { + struct cnic_dev *dev = csk->dev; + struct cnic_local *cp = dev->cnic_priv; + + cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port)); + csk->src_port = 0; + } +} + +static void cnic_close_conn(struct cnic_sock *csk) +{ + if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) { + cnic_cm_upload_pg(csk); + clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); + } + cnic_cm_cleanup(csk); +} + +static int cnic_cm_destroy(struct cnic_sock *csk) +{ + if (!cnic_in_use(csk)) + return -EINVAL; + + csk_hold(csk); + clear_bit(SK_F_INUSE, &csk->flags); + smp_mb__after_atomic(); + while (atomic_read(&csk->ref_count) != 1) + msleep(1); + cnic_cm_cleanup(csk); + + csk->flags = 0; + csk_put(csk); + return 0; +} + +static inline u16 cnic_get_vlan(struct net_device *dev, + struct net_device **vlan_dev) +{ + if (dev->priv_flags & IFF_802_1Q_VLAN) { + *vlan_dev = vlan_dev_real_dev(dev); + return vlan_dev_vlan_id(dev); + } + *vlan_dev = dev; + return 0; +} + +static int cnic_get_v4_route(struct sockaddr_in *dst_addr, + struct dst_entry **dst) +{ +#if defined(CONFIG_INET) + struct rtable *rt; + + rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0); + if (!IS_ERR(rt)) { + *dst = &rt->dst; + return 0; + } + return PTR_ERR(rt); +#else + return -ENETUNREACH; +#endif +} + +static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, + struct dst_entry **dst) +{ +#if IS_ENABLED(CONFIG_IPV6) + struct flowi6 fl6; + + memset(&fl6, 0, sizeof(fl6)); + fl6.daddr = dst_addr->sin6_addr; + if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) + fl6.flowi6_oif = dst_addr->sin6_scope_id; + + *dst = ip6_route_output(&init_net, NULL, &fl6); + if ((*dst)->error) { + dst_release(*dst); + *dst = NULL; + return -ENETUNREACH; + } else + return 0; +#endif + + return -ENETUNREACH; +} + +static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr, + int ulp_type) +{ + struct cnic_dev *dev = NULL; + struct dst_entry *dst; + struct net_device *netdev = NULL; + int err = -ENETUNREACH; + + if (dst_addr->sin_family == AF_INET) + err = cnic_get_v4_route(dst_addr, &dst); + else if (dst_addr->sin_family == AF_INET6) { + struct sockaddr_in6 *dst_addr6 = + (struct sockaddr_in6 *) dst_addr; + + err = cnic_get_v6_route(dst_addr6, &dst); + } else + return NULL; + + if (err) + return NULL; + + if (!dst->dev) + goto done; + + cnic_get_vlan(dst->dev, &netdev); + + dev = cnic_from_netdev(netdev); + +done: + dst_release(dst); + if (dev) + cnic_put(dev); + return dev; +} + +static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr) +{ + struct cnic_dev *dev = csk->dev; + struct cnic_local *cp = dev->cnic_priv; + + return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk); +} + +static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr) +{ + struct cnic_dev *dev = csk->dev; + struct cnic_local *cp = dev->cnic_priv; + int is_v6, rc = 0; + struct dst_entry *dst = NULL; + struct net_device *realdev; + __be16 local_port; + u32 port_id; + + if (saddr->local.v6.sin6_family == AF_INET6 && + saddr->remote.v6.sin6_family == AF_INET6) + is_v6 = 1; + else if (saddr->local.v4.sin_family == AF_INET && + saddr->remote.v4.sin_family == AF_INET) + is_v6 = 0; + else + return -EINVAL; + + clear_bit(SK_F_IPV6, &csk->flags); + + if (is_v6) { + set_bit(SK_F_IPV6, &csk->flags); + cnic_get_v6_route(&saddr->remote.v6, &dst); + + memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr, + sizeof(struct in6_addr)); + csk->dst_port = saddr->remote.v6.sin6_port; + local_port = saddr->local.v6.sin6_port; + + } else { + cnic_get_v4_route(&saddr->remote.v4, &dst); + + csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr; + csk->dst_port = saddr->remote.v4.sin_port; + local_port = saddr->local.v4.sin_port; + } + + csk->vlan_id = 0; + csk->mtu = dev->netdev->mtu; + if (dst && dst->dev) { + u16 vlan = cnic_get_vlan(dst->dev, &realdev); + if (realdev == dev->netdev) { + csk->vlan_id = vlan; + csk->mtu = dst_mtu(dst); + } + } + + port_id = be16_to_cpu(local_port); + if (port_id >= CNIC_LOCAL_PORT_MIN && + port_id < CNIC_LOCAL_PORT_MAX) { + if (cnic_alloc_id(&cp->csk_port_tbl, port_id)) + port_id = 0; + } else + port_id = 0; + + if (!port_id) { + port_id = cnic_alloc_new_id(&cp->csk_port_tbl); + if (port_id == -1) { + rc = -ENOMEM; + goto err_out; + } + local_port = cpu_to_be16(port_id); + } + csk->src_port = local_port; + +err_out: + dst_release(dst); + return rc; +} + +static void cnic_init_csk_state(struct cnic_sock *csk) +{ + csk->state = 0; + clear_bit(SK_F_OFFLD_SCHED, &csk->flags); + clear_bit(SK_F_CLOSING, &csk->flags); +} + +static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr) +{ + struct cnic_local *cp = csk->dev->cnic_priv; + int err = 0; + + if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI) + return -EOPNOTSUPP; + + if (!cnic_in_use(csk)) + return -EINVAL; + + if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags)) + return -EINVAL; + + cnic_init_csk_state(csk); + + err = cnic_get_route(csk, saddr); + if (err) + goto err_out; + + err = cnic_resolve_addr(csk, saddr); + if (!err) + return 0; + +err_out: + clear_bit(SK_F_CONNECT_START, &csk->flags); + return err; +} + +static int cnic_cm_abort(struct cnic_sock *csk) +{ + struct cnic_local *cp = csk->dev->cnic_priv; + u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP; + + if (!cnic_in_use(csk)) + return -EINVAL; + + if (cnic_abort_prep(csk)) + return cnic_cm_abort_req(csk); + + /* Getting here means that we haven't started connect, or + * connect was not successful, or it has been reset by the target. + */ + + cp->close_conn(csk, opcode); + if (csk->state != opcode) { + /* Wait for remote reset sequence to complete */ + while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) + msleep(1); + + return -EALREADY; + } + + return 0; +} + +static int cnic_cm_close(struct cnic_sock *csk) +{ + if (!cnic_in_use(csk)) + return -EINVAL; + + if (cnic_close_prep(csk)) { + csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; + return cnic_cm_close_req(csk); + } else { + /* Wait for remote reset sequence to complete */ + while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) + msleep(1); + + return -EALREADY; + } + return 0; +} + +static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk, + u8 opcode) +{ + struct cnic_ulp_ops *ulp_ops; + int ulp_type = csk->ulp_type; + + rcu_read_lock(); + ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); + if (ulp_ops) { + if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE) + ulp_ops->cm_connect_complete(csk); + else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) + ulp_ops->cm_close_complete(csk); + else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) + ulp_ops->cm_remote_abort(csk); + else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) + ulp_ops->cm_abort_complete(csk); + else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED) + ulp_ops->cm_remote_close(csk); + } + rcu_read_unlock(); +} + +static int cnic_cm_set_pg(struct cnic_sock *csk) +{ + if (cnic_offld_prep(csk)) { + if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) + cnic_cm_update_pg(csk); + else + cnic_cm_offload_pg(csk); + } + return 0; +} + +static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 l5_cid = kcqe->pg_host_opaque; + u8 opcode = kcqe->op_code; + struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; + + csk_hold(csk); + if (!cnic_in_use(csk)) + goto done; + + if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { + clear_bit(SK_F_OFFLD_SCHED, &csk->flags); + goto done; + } + /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */ + if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) { + clear_bit(SK_F_OFFLD_SCHED, &csk->flags); + cnic_cm_upcall(cp, csk, + L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); + goto done; + } + + csk->pg_cid = kcqe->pg_cid; + set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); + cnic_cm_conn_req(csk); + +done: + csk_put(csk); +} + +static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe) +{ + struct cnic_local *cp = dev->cnic_priv; + struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe; + u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE; + struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; + + ctx->timestamp = jiffies; + ctx->wait_cond = 1; + wake_up(&ctx->waitq); +} + +static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) +{ + struct cnic_local *cp = dev->cnic_priv; + struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe; + u8 opcode = l4kcqe->op_code; + u32 l5_cid; + struct cnic_sock *csk; + + if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) { + cnic_process_fcoe_term_conn(dev, kcqe); + return; + } + if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG || + opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { + cnic_cm_process_offld_pg(dev, l4kcqe); + return; + } + + l5_cid = l4kcqe->conn_id; + if (opcode & 0x80) + l5_cid = l4kcqe->cid; + if (l5_cid >= MAX_CM_SK_TBL_SZ) + return; + + csk = &cp->csk_tbl[l5_cid]; + csk_hold(csk); + + if (!cnic_in_use(csk)) { + csk_put(csk); + return; + } + + switch (opcode) { + case L5CM_RAMROD_CMD_ID_TCP_CONNECT: + if (l4kcqe->status != 0) { + clear_bit(SK_F_OFFLD_SCHED, &csk->flags); + cnic_cm_upcall(cp, csk, + L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); + } + break; + case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE: + if (l4kcqe->status == 0) + set_bit(SK_F_OFFLD_COMPLETE, &csk->flags); + else if (l4kcqe->status == + L4_KCQE_COMPLETION_STATUS_PARITY_ERROR) + set_bit(SK_F_HW_ERR, &csk->flags); + + smp_mb__before_atomic(); + clear_bit(SK_F_OFFLD_SCHED, &csk->flags); + cnic_cm_upcall(cp, csk, opcode); + break; + + case L5CM_RAMROD_CMD_ID_CLOSE: { + struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe; + + if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) { + netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n", + l4kcqe->status, l5kcqe->completion_status); + opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; + /* Fall through */ + } else { + break; + } + } + case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: + case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: + case L4_KCQE_OPCODE_VALUE_RESET_COMP: + case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: + case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: + if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR) + set_bit(SK_F_HW_ERR, &csk->flags); + + cp->close_conn(csk, opcode); + break; + + case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED: + /* after we already sent CLOSE_REQ */ + if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) && + !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) && + csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) + cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP); + else + cnic_cm_upcall(cp, csk, opcode); + break; + } + csk_put(csk); +} + +static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num) +{ + struct cnic_dev *dev = data; + int i; + + for (i = 0; i < num; i++) + cnic_cm_process_kcqe(dev, kcqe[i]); +} + +static struct cnic_ulp_ops cm_ulp_ops = { + .indicate_kcqes = cnic_cm_indicate_kcqe, +}; + +static void cnic_cm_free_mem(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + + kfree(cp->csk_tbl); + cp->csk_tbl = NULL; + cnic_free_id_tbl(&cp->csk_port_tbl); +} + +static int cnic_cm_alloc_mem(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 port_id; + + cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ, + GFP_KERNEL); + if (!cp->csk_tbl) + return -ENOMEM; + + port_id = prandom_u32(); + port_id %= CNIC_LOCAL_PORT_RANGE; + if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, + CNIC_LOCAL_PORT_MIN, port_id)) { + cnic_cm_free_mem(dev); + return -ENOMEM; + } + return 0; +} + +static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode) +{ + if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { + /* Unsolicited RESET_COMP or RESET_RECEIVED */ + opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED; + csk->state = opcode; + } + + /* 1. If event opcode matches the expected event in csk->state + * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any + * event + * 3. If the expected event is 0, meaning the connection was never + * never established, we accept the opcode from cm_abort. + */ + if (opcode == csk->state || csk->state == 0 || + csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP || + csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) { + if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) { + if (csk->state == 0) + csk->state = opcode; + return 1; + } + } + return 0; +} + +static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode) +{ + struct cnic_dev *dev = csk->dev; + struct cnic_local *cp = dev->cnic_priv; + + if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) { + cnic_cm_upcall(cp, csk, opcode); + return; + } + + clear_bit(SK_F_CONNECT_START, &csk->flags); + cnic_close_conn(csk); + csk->state = opcode; + cnic_cm_upcall(cp, csk, opcode); +} + +static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev) +{ +} + +static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev) +{ + u32 seed; + + seed = prandom_u32(); + cnic_ctx_wr(dev, 45, 0, seed); + return 0; +} + +static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode) +{ + struct cnic_dev *dev = csk->dev; + struct cnic_local *cp = dev->cnic_priv; + struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid]; + union l5cm_specific_data l5_data; + u32 cmd = 0; + int close_complete = 0; + + switch (opcode) { + case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: + case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: + case L4_KCQE_OPCODE_VALUE_RESET_COMP: + if (cnic_ready_to_close(csk, opcode)) { + if (test_bit(SK_F_HW_ERR, &csk->flags)) + close_complete = 1; + else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) + cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE; + else + close_complete = 1; + } + break; + case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: + cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; + break; + case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: + close_complete = 1; + break; + } + if (cmd) { + memset(&l5_data, 0, sizeof(l5_data)); + + cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE, + &l5_data); + } else if (close_complete) { + ctx->timestamp = jiffies; + cnic_close_conn(csk); + cnic_cm_upcall(cp, csk, csk->state); + } +} + +static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + + if (!cp->ctx_tbl) + return; + + if (!netif_running(dev->netdev)) + return; + + cnic_bnx2x_delete_wait(dev, 0); + + cancel_delayed_work(&cp->delete_task); + flush_workqueue(cnic_wq); + + if (atomic_read(&cp->iscsi_conn) != 0) + netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n", + atomic_read(&cp->iscsi_conn)); +} + +static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) +{ + struct bnx2x *bp = netdev_priv(dev->netdev); + u32 pfid = bp->pfid; + u32 port = BP_PORT(bp); + + cnic_init_bnx2x_mac(dev); + cnic_bnx2x_set_tcp_options(dev, 0, 1); + + CNIC_WR16(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0); + + CNIC_WR(dev, BAR_XSTRORM_INTMEM + + XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1); + CNIC_WR(dev, BAR_XSTRORM_INTMEM + + XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port), + DEF_MAX_DA_COUNT); + + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL); + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS); + CNIC_WR8(dev, BAR_XSTRORM_INTMEM + + XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2); + CNIC_WR(dev, BAR_XSTRORM_INTMEM + + XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER); + + CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid), + DEF_MAX_CWND); + return 0; +} + +static void cnic_delete_task(struct work_struct *work) +{ + struct cnic_local *cp; + struct cnic_dev *dev; + u32 i; + int need_resched = 0; + + cp = container_of(work, struct cnic_local, delete_task.work); + dev = cp->dev; + + if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) { + struct drv_ctl_info info; + + cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI); + + info.cmd = DRV_CTL_ISCSI_STOPPED_CMD; + cp->ethdev->drv_ctl(dev->netdev, &info); + } + + for (i = 0; i < cp->max_cid_space; i++) { + struct cnic_context *ctx = &cp->ctx_tbl[i]; + int err; + + if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) || + !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) + continue; + + if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { + need_resched = 1; + continue; + } + + if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) + continue; + + err = cnic_bnx2x_destroy_ramrod(dev, i); + + cnic_free_bnx2x_conn_resc(dev, i); + if (!err) { + if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) + atomic_dec(&cp->iscsi_conn); + + clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); + } + } + + if (need_resched) + queue_delayed_work(cnic_wq, &cp->delete_task, + msecs_to_jiffies(10)); + +} + +static int cnic_cm_open(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int err; + + err = cnic_cm_alloc_mem(dev); + if (err) + return err; + + err = cp->start_cm(dev); + + if (err) + goto err_out; + + INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task); + + dev->cm_create = cnic_cm_create; + dev->cm_destroy = cnic_cm_destroy; + dev->cm_connect = cnic_cm_connect; + dev->cm_abort = cnic_cm_abort; + dev->cm_close = cnic_cm_close; + dev->cm_select_dev = cnic_cm_select_dev; + + cp->ulp_handle[CNIC_ULP_L4] = dev; + rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops); + return 0; + +err_out: + cnic_cm_free_mem(dev); + return err; +} + +static int cnic_cm_shutdown(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + int i; + + if (!cp->csk_tbl) + return 0; + + for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) { + struct cnic_sock *csk = &cp->csk_tbl[i]; + + clear_bit(SK_F_INUSE, &csk->flags); + cnic_cm_cleanup(csk); + } + cnic_cm_free_mem(dev); + + return 0; +} + +static void cnic_init_context(struct cnic_dev *dev, u32 cid) +{ + u32 cid_addr; + int i; + + cid_addr = GET_CID_ADDR(cid); + + for (i = 0; i < CTX_SIZE; i += 4) + cnic_ctx_wr(dev, cid_addr, i, 0); +} + +static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) +{ + struct cnic_local *cp = dev->cnic_priv; + int ret = 0, i; + u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0; + + if (BNX2_CHIP(cp) != BNX2_CHIP_5709) + return 0; + + for (i = 0; i < cp->ctx_blks; i++) { + int j; + u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; + u32 val; + + memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE); + + CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, + (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); + CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1, + (u64) cp->ctx_arr[i].mapping >> 32); + CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx | + BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); + for (j = 0; j < 10; j++) { + + val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL); + if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) + break; + udelay(5); + } + if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) { + ret = -EBUSY; + break; + } + } + return ret; +} + +static void cnic_free_irq(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + cp->disable_int_sync(dev); + tasklet_kill(&cp->cnic_irq_task); + free_irq(ethdev->irq_arr[0].vector, dev); + } +} + +static int cnic_request_irq(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + int err; + + err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev); + if (err) + tasklet_disable(&cp->cnic_irq_task); + + return err; +} + +static int cnic_init_bnx2_irq(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + int err, i = 0; + int sblk_num = cp->status_blk_num; + u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) + + BNX2_HC_SB_CONFIG_1; + + CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT); + + CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8); + CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220); + CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220); + + cp->last_status_idx = cp->status_blk.bnx2->status_idx; + tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix, + (unsigned long) dev); + err = cnic_request_irq(dev); + if (err) + return err; + + while (cp->status_blk.bnx2->status_completion_producer_index && + i < 10) { + CNIC_WR(dev, BNX2_HC_COALESCE_NOW, + 1 << (11 + sblk_num)); + udelay(10); + i++; + barrier(); + } + if (cp->status_blk.bnx2->status_completion_producer_index) { + cnic_free_irq(dev); + goto failed; + } + + } else { + struct status_block *sblk = cp->status_blk.gen; + u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND); + int i = 0; + + while (sblk->status_completion_producer_index && i < 10) { + CNIC_WR(dev, BNX2_HC_COMMAND, + hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); + udelay(10); + i++; + barrier(); + } + if (sblk->status_completion_producer_index) + goto failed; + + } + return 0; + +failed: + netdev_err(dev->netdev, "KCQ index not resetting to 0\n"); + return -EBUSY; +} + +static void cnic_enable_bnx2_int(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) + return; + + CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); +} + +static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) + return; + + CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | + BNX2_PCICFG_INT_ACK_CMD_MASK_INT); + CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD); + synchronize_irq(ethdev->irq_arr[0].vector); +} + +static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct cnic_uio_dev *udev = cp->udev; + u32 cid_addr, tx_cid, sb_id; + u32 val, offset0, offset1, offset2, offset3; + int i; + struct bnx2_tx_bd *txbd; + dma_addr_t buf_map, ring_map = udev->l2_ring_map; + struct status_block *s_blk = cp->status_blk.gen; + + sb_id = cp->status_blk_num; + tx_cid = 20; + cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + struct status_block_msix *sblk = cp->status_blk.bnx2; + + tx_cid = TX_TSS_CID + sb_id - 1; + CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | + (TX_TSS_CID << 7)); + cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; + } + cp->tx_cons = *cp->tx_cons_ptr; + + cid_addr = GET_CID_ADDR(tx_cid); + if (BNX2_CHIP(cp) == BNX2_CHIP_5709) { + u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40; + + for (i = 0; i < PHY_CTX_SIZE; i += 4) + cnic_ctx_wr(dev, cid_addr2, i, 0); + + offset0 = BNX2_L2CTX_TYPE_XI; + offset1 = BNX2_L2CTX_CMD_TYPE_XI; + offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; + offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; + } else { + cnic_init_context(dev, tx_cid); + cnic_init_context(dev, tx_cid + 1); + + offset0 = BNX2_L2CTX_TYPE; + offset1 = BNX2_L2CTX_CMD_TYPE; + offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; + offset3 = BNX2_L2CTX_TBDR_BHADDR_LO; + } + val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2; + cnic_ctx_wr(dev, cid_addr, offset0, val); + + val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); + cnic_ctx_wr(dev, cid_addr, offset1, val); + + txbd = udev->l2_ring; + + buf_map = udev->l2_buf_map; + for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) { + txbd->tx_bd_haddr_hi = (u64) buf_map >> 32; + txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff; + } + val = (u64) ring_map >> 32; + cnic_ctx_wr(dev, cid_addr, offset2, val); + txbd->tx_bd_haddr_hi = val; + + val = (u64) ring_map & 0xffffffff; + cnic_ctx_wr(dev, cid_addr, offset3, val); + txbd->tx_bd_haddr_lo = val; +} + +static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct cnic_uio_dev *udev = cp->udev; + u32 cid_addr, sb_id, val, coal_reg, coal_val; + int i; + struct bnx2_rx_bd *rxbd; + struct status_block *s_blk = cp->status_blk.gen; + dma_addr_t ring_map = udev->l2_ring_map; + + sb_id = cp->status_blk_num; + cnic_init_context(dev, 2); + cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2; + coal_reg = BNX2_HC_COMMAND; + coal_val = CNIC_RD(dev, coal_reg); + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + struct status_block_msix *sblk = cp->status_blk.bnx2; + + cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index; + coal_reg = BNX2_HC_COALESCE_NOW; + coal_val = 1 << (11 + sb_id); + } + i = 0; + while (!(*cp->rx_cons_ptr != 0) && i < 10) { + CNIC_WR(dev, coal_reg, coal_val); + udelay(10); + i++; + barrier(); + } + cp->rx_cons = *cp->rx_cons_ptr; + + cid_addr = GET_CID_ADDR(2); + val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | + BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); + cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val); + + if (sb_id == 0) + val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT; + else + val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); + cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); + + rxbd = udev->l2_ring + CNIC_PAGE_SIZE; + for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) { + dma_addr_t buf_map; + int n = (i % cp->l2_rx_ring_size) + 1; + + buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); + rxbd->rx_bd_len = cp->l2_single_buf_size; + rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; + rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; + rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; + } + val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32; + cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); + rxbd->rx_bd_haddr_hi = val; + + val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff; + cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); + rxbd->rx_bd_haddr_lo = val; + + val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD); + cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2)); +} + +static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev) +{ + struct kwqe *wqes[1], l2kwqe; + + memset(&l2kwqe, 0, sizeof(l2kwqe)); + wqes[0] = &l2kwqe; + l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) | + (L2_KWQE_OPCODE_VALUE_FLUSH << + KWQE_OPCODE_SHIFT) | 2; + dev->submit_kwqes(dev, wqes, 1); +} + +static void cnic_set_bnx2_mac(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 val; + + val = cp->func << 2; + + cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val); + + val = cnic_reg_rd_ind(dev, cp->shmem_base + + BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER); + dev->mac_addr[0] = (u8) (val >> 8); + dev->mac_addr[1] = (u8) val; + + CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val); + + val = cnic_reg_rd_ind(dev, cp->shmem_base + + BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER); + dev->mac_addr[2] = (u8) (val >> 24); + dev->mac_addr[3] = (u8) (val >> 16); + dev->mac_addr[4] = (u8) (val >> 8); + dev->mac_addr[5] = (u8) val; + + CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val); + + val = 4 | BNX2_RPM_SORT_USER2_BC_EN; + if (BNX2_CHIP(cp) != BNX2_CHIP_5709) + val |= BNX2_RPM_SORT_USER2_PROM_VLAN; + + CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0); + CNIC_WR(dev, BNX2_RPM_SORT_USER2, val); + CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA); +} + +static int cnic_start_bnx2_hw(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + struct status_block *sblk = cp->status_blk.gen; + u32 val, kcq_cid_addr, kwq_cid_addr; + int err; + + cnic_set_bnx2_mac(dev); + + val = CNIC_RD(dev, BNX2_MQ_CONFIG); + val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; + if (CNIC_PAGE_BITS > 12) + val |= (12 - 8) << 4; + else + val |= (CNIC_PAGE_BITS - 8) << 4; + + CNIC_WR(dev, BNX2_MQ_CONFIG, val); + + CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8); + CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220); + CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220); + + err = cnic_setup_5709_context(dev, 1); + if (err) + return err; + + cnic_init_context(dev, KWQ_CID); + cnic_init_context(dev, KCQ_CID); + + kwq_cid_addr = GET_CID_ADDR(KWQ_CID); + cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX; + + cp->max_kwq_idx = MAX_KWQ_IDX; + cp->kwq_prod_idx = 0; + cp->kwq_con_idx = 0; + set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); + + if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708) + cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15; + else + cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index; + + /* Initialize the kernel work queue context. */ + val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | + (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; + cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); + + val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; + cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); + + val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; + cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); + + val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); + cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); + + val = (u32) cp->kwq_info.pgtbl_map; + cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); + + kcq_cid_addr = GET_CID_ADDR(KCQ_CID); + cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX; + + cp->kcq1.sw_prod_idx = 0; + cp->kcq1.hw_prod_idx_ptr = + &sblk->status_completion_producer_index; + + cp->kcq1.status_idx_ptr = &sblk->status_idx; + + /* Initialize the kernel complete queue context. */ + val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | + (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; + cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); + + val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; + cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); + + val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; + cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); + + val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); + cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); + + val = (u32) cp->kcq1.dma.pgtbl_map; + cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); + + cp->int_num = 0; + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { + struct status_block_msix *msblk = cp->status_blk.bnx2; + u32 sb_id = cp->status_blk_num; + u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id); + + cp->kcq1.hw_prod_idx_ptr = + &msblk->status_completion_producer_index; + cp->kcq1.status_idx_ptr = &msblk->status_idx; + cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index; + cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; + cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); + cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); + } + + /* Enable Commnad Scheduler notification when we write to the + * host producer index of the kernel contexts. */ + CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2); + + /* Enable Command Scheduler notification when we write to either + * the Send Queue or Receive Queue producer indexes of the kernel + * bypass contexts. */ + CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7); + CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7); + + /* Notify COM when the driver post an application buffer. */ + CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000); + + /* Set the CP and COM doorbells. These two processors polls the + * doorbell for a non zero value before running. This must be done + * after setting up the kernel queue contexts. */ + cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1); + cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1); + + cnic_init_bnx2_tx_ring(dev); + cnic_init_bnx2_rx_ring(dev); + + err = cnic_init_bnx2_irq(dev); + if (err) { + netdev_err(dev->netdev, "cnic_init_irq failed\n"); + cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); + cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); + return err; + } + + ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ; + + return 0; +} + +static void cnic_setup_bnx2x_context(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + u32 start_offset = ethdev->ctx_tbl_offset; + int i; + + for (i = 0; i < cp->ctx_blks; i++) { + struct cnic_ctx *ctx = &cp->ctx_arr[i]; + dma_addr_t map = ctx->mapping; + + if (cp->ctx_align) { + unsigned long mask = cp->ctx_align - 1; + + map = (map + mask) & ~mask; + } + + cnic_ctx_tbl_wr(dev, start_offset + i, map); + } +} + +static int cnic_init_bnx2x_irq(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + int err = 0; + + tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh, + (unsigned long) dev); + if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) + err = cnic_request_irq(dev); + + return err; +} + +static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev, + u16 sb_id, u8 sb_index, + u8 disable) +{ + struct bnx2x *bp = netdev_priv(dev->netdev); + + u32 addr = BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + + offsetof(struct hc_status_block_data_e1x, index_data) + + sizeof(struct hc_index_data)*sb_index + + offsetof(struct hc_index_data, flags); + u16 flags = CNIC_RD16(dev, addr); + /* clear and set */ + flags &= ~HC_INDEX_DATA_HC_ENABLED; + flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) & + HC_INDEX_DATA_HC_ENABLED); + CNIC_WR16(dev, addr, flags); +} + +static void cnic_enable_bnx2x_int(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + u8 sb_id = cp->status_blk_num; + + CNIC_WR8(dev, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + + offsetof(struct hc_status_block_data_e1x, index_data) + + sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS + + offsetof(struct hc_index_data, timeout), 64 / 4); + cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0); +} + +static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev) +{ +} + +static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, + struct client_init_ramrod_data *data) +{ + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + struct cnic_uio_dev *udev = cp->udev; + union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring; + dma_addr_t buf_map, ring_map = udev->l2_ring_map; + struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; + int i; + u32 cli = cp->ethdev->iscsi_l2_client_id; + u32 val; + + memset(txbd, 0, CNIC_PAGE_SIZE); + + buf_map = udev->l2_buf_map; + for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) { + struct eth_tx_start_bd *start_bd = &txbd->start_bd; + struct eth_tx_parse_bd_e1x *pbd_e1x = + &((txbd + 1)->parse_bd_e1x); + struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2); + struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd); + + start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32); + start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); + reg_bd->addr_hi = start_bd->addr_hi; + reg_bd->addr_lo = start_bd->addr_lo + 0x10; + start_bd->nbytes = cpu_to_le16(0x10); + start_bd->nbd = cpu_to_le16(3); + start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; + start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS; + start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); + + if (BNX2X_CHIP_IS_E2_PLUS(bp)) + pbd_e2->parsing_data = (UNICAST_ADDRESS << + ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT); + else + pbd_e1x->global_data = (UNICAST_ADDRESS << + ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT); + } + + val = (u64) ring_map >> 32; + txbd->next_bd.addr_hi = cpu_to_le32(val); + + data->tx.tx_bd_page_base.hi = cpu_to_le32(val); + + val = (u64) ring_map & 0xffffffff; + txbd->next_bd.addr_lo = cpu_to_le32(val); + + data->tx.tx_bd_page_base.lo = cpu_to_le32(val); + + /* Other ramrod params */ + data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS; + data->tx.tx_status_block_id = BNX2X_DEF_SB_ID; + + /* reset xstorm per client statistics */ + if (cli < MAX_STAT_COUNTER_ID) { + data->general.statistics_zero_flg = 1; + data->general.statistics_en_flg = 1; + data->general.statistics_counter_id = cli; + } + + cp->tx_cons_ptr = + &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS]; +} + +static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, + struct client_init_ramrod_data *data) +{ + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + struct cnic_uio_dev *udev = cp->udev; + struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + + CNIC_PAGE_SIZE); + struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) + (udev->l2_ring + (2 * CNIC_PAGE_SIZE)); + struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; + int i; + u32 cli = cp->ethdev->iscsi_l2_client_id; + int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli); + u32 val; + dma_addr_t ring_map = udev->l2_ring_map; + + /* General data */ + data->general.client_id = cli; + data->general.activate_flg = 1; + data->general.sp_client_id = cli; + data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14); + data->general.func_id = bp->pfid; + + for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { + dma_addr_t buf_map; + int n = (i % cp->l2_rx_ring_size) + 1; + + buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); + rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32); + rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); + } + + val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32; + rxbd->addr_hi = cpu_to_le32(val); + data->rx.bd_page_base.hi = cpu_to_le32(val); + + val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff; + rxbd->addr_lo = cpu_to_le32(val); + data->rx.bd_page_base.lo = cpu_to_le32(val); + + rxcqe += BNX2X_MAX_RCQ_DESC_CNT; + val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32; + rxcqe->addr_hi = cpu_to_le32(val); + data->rx.cqe_page_base.hi = cpu_to_le32(val); + + val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff; + rxcqe->addr_lo = cpu_to_le32(val); + data->rx.cqe_page_base.lo = cpu_to_le32(val); + + /* Other ramrod params */ + data->rx.client_qzone_id = cl_qzone_id; + data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS; + data->rx.status_block_id = BNX2X_DEF_SB_ID; + + data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT; + + data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size); + data->rx.outer_vlan_removal_enable_flg = 1; + data->rx.silent_vlan_removal_flg = 1; + data->rx.silent_vlan_value = 0; + data->rx.silent_vlan_mask = 0xffff; + + cp->rx_cons_ptr = + &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS]; + cp->rx_cons = *cp->rx_cons_ptr; +} + +static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + u32 pfid = bp->pfid; + + cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); + cp->kcq1.sw_prod_idx = 0; + + if (BNX2X_CHIP_IS_E2_PLUS(bp)) { + struct host_hc_status_block_e2 *sb = cp->status_blk.gen; + + cp->kcq1.hw_prod_idx_ptr = + &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; + cp->kcq1.status_idx_ptr = + &sb->sb.running_index[SM_RX_ID]; + } else { + struct host_hc_status_block_e1x *sb = cp->status_blk.gen; + + cp->kcq1.hw_prod_idx_ptr = + &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; + cp->kcq1.status_idx_ptr = + &sb->sb.running_index[SM_RX_ID]; + } + + if (BNX2X_CHIP_IS_E2_PLUS(bp)) { + struct host_hc_status_block_e2 *sb = cp->status_blk.gen; + + cp->kcq2.io_addr = BAR_USTRORM_INTMEM + + USTORM_FCOE_EQ_PROD_OFFSET(pfid); + cp->kcq2.sw_prod_idx = 0; + cp->kcq2.hw_prod_idx_ptr = + &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS]; + cp->kcq2.status_idx_ptr = + &sb->sb.running_index[SM_RX_ID]; + } +} + +static int cnic_start_bnx2x_hw(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + struct cnic_eth_dev *ethdev = cp->ethdev; + int func, ret; + u32 pfid; + + dev->stats_addr = ethdev->addr_drv_info_to_mcp; + cp->func = bp->pf_num; + + func = CNIC_FUNC(cp); + pfid = bp->pfid; + + ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, + cp->iscsi_start_cid, 0); + + if (ret) + return -ENOMEM; + + if (BNX2X_CHIP_IS_E2_PLUS(bp)) { + ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn, + cp->fcoe_start_cid, 0); + + if (ret) + return -ENOMEM; + } + + cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2; + + cnic_init_bnx2x_kcq(dev); + + /* Only 1 EQ */ + CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX); + CNIC_WR(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0); + CNIC_WR(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0), + cp->kcq1.dma.pg_map_arr[1] & 0xffffffff); + CNIC_WR(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4, + (u64) cp->kcq1.dma.pg_map_arr[1] >> 32); + CNIC_WR(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0), + cp->kcq1.dma.pg_map_arr[0] & 0xffffffff); + CNIC_WR(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4, + (u64) cp->kcq1.dma.pg_map_arr[0] >> 32); + CNIC_WR8(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1); + CNIC_WR16(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num); + CNIC_WR8(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0), + HC_INDEX_ISCSI_EQ_CONS); + + CNIC_WR(dev, BAR_USTRORM_INTMEM + + USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid), + cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff); + CNIC_WR(dev, BAR_USTRORM_INTMEM + + USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4, + (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32); + + CNIC_WR(dev, BAR_TSTRORM_INTMEM + + TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF); + + cnic_setup_bnx2x_context(dev); + + ret = cnic_init_bnx2x_irq(dev); + if (ret) + return ret; + + ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ; + return 0; +} + +static void cnic_init_rings(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + struct cnic_uio_dev *udev = cp->udev; + + if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) + return; + + if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { + cnic_init_bnx2_tx_ring(dev); + cnic_init_bnx2_rx_ring(dev); + set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); + } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { + u32 cli = cp->ethdev->iscsi_l2_client_id; + u32 cid = cp->ethdev->iscsi_l2_cid; + u32 cl_qzone_id; + struct client_init_ramrod_data *data; + union l5cm_specific_data l5_data; + struct ustorm_eth_rx_producers rx_prods = {0}; + u32 off, i, *cid_ptr; + + rx_prods.bd_prod = 0; + rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; + barrier(); + + cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli); + + off = BAR_USTRORM_INTMEM + + (BNX2X_CHIP_IS_E2_PLUS(bp) ? + USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) : + USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli)); + + for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) + CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); + + set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); + + data = udev->l2_buf; + cid_ptr = udev->l2_buf + 12; + + memset(data, 0, sizeof(*data)); + + cnic_init_bnx2x_tx_ring(dev, data); + cnic_init_bnx2x_rx_ring(dev, data); + + l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff; + l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32; + + set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); + + cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, + cid, ETH_CONNECTION_TYPE, &l5_data); + + i = 0; + while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && + ++i < 10) + msleep(1); + + if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) + netdev_err(dev->netdev, + "iSCSI CLIENT_SETUP did not complete\n"); + cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); + cnic_ring_ctl(dev, cid, cli, 1); + *cid_ptr = cid >> 4; + *(cid_ptr + 1) = cid * bp->db_size; + *(cid_ptr + 2) = UIO_USE_TX_DOORBELL; + } +} + +static void cnic_shutdown_rings(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_uio_dev *udev = cp->udev; + void *rx_ring; + + if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) + return; + + if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { + cnic_shutdown_bnx2_rx_ring(dev); + } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { + u32 cli = cp->ethdev->iscsi_l2_client_id; + u32 cid = cp->ethdev->iscsi_l2_cid; + union l5cm_specific_data l5_data; + int i; + + cnic_ring_ctl(dev, cid, cli, 0); + + set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); + + l5_data.phy_address.lo = cli; + l5_data.phy_address.hi = 0; + cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT, + cid, ETH_CONNECTION_TYPE, &l5_data); + i = 0; + while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && + ++i < 10) + msleep(1); + + if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) + netdev_err(dev->netdev, + "iSCSI CLIENT_HALT did not complete\n"); + cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); + + memset(&l5_data, 0, sizeof(l5_data)); + cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, + cid, NONE_CONNECTION_TYPE, &l5_data); + msleep(10); + } + clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); + rx_ring = udev->l2_ring + CNIC_PAGE_SIZE; + memset(rx_ring, 0, CNIC_PAGE_SIZE); +} + +static int cnic_register_netdev(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + int err; + + if (!ethdev) + return -ENODEV; + + if (ethdev->drv_state & CNIC_DRV_STATE_REGD) + return 0; + + err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); + if (err) + netdev_err(dev->netdev, "register_cnic failed\n"); + + /* Read iSCSI config again. On some bnx2x device, iSCSI config + * can change after firmware is downloaded. + */ + dev->max_iscsi_conn = ethdev->max_iscsi_conn; + if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI) + dev->max_iscsi_conn = 0; + + return err; +} + +static void cnic_unregister_netdev(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + + if (!ethdev) + return; + + ethdev->drv_unregister_cnic(dev->netdev); +} + +static int cnic_start_hw(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct cnic_eth_dev *ethdev = cp->ethdev; + int err; + + if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) + return -EALREADY; + + dev->regview = ethdev->io_base; + pci_dev_get(dev->pcidev); + cp->func = PCI_FUNC(dev->pcidev->devfn); + cp->status_blk.gen = ethdev->irq_arr[0].status_blk; + cp->status_blk_num = ethdev->irq_arr[0].status_blk_num; + + err = cp->alloc_resc(dev); + if (err) { + netdev_err(dev->netdev, "allocate resource failure\n"); + goto err1; + } + + err = cp->start_hw(dev); + if (err) + goto err1; + + err = cnic_cm_open(dev); + if (err) + goto err1; + + set_bit(CNIC_F_CNIC_UP, &dev->flags); + + cp->enable_int(dev); + + return 0; + +err1: + cp->free_resc(dev); + pci_dev_put(dev->pcidev); + return err; +} + +static void cnic_stop_bnx2_hw(struct cnic_dev *dev) +{ + cnic_disable_bnx2_int_sync(dev); + + cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); + cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); + + cnic_init_context(dev, KWQ_CID); + cnic_init_context(dev, KCQ_CID); + + cnic_setup_5709_context(dev, 0); + cnic_free_irq(dev); + + cnic_free_resc(dev); +} + + +static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + struct bnx2x *bp = netdev_priv(dev->netdev); + u32 hc_index = HC_INDEX_ISCSI_EQ_CONS; + u32 sb_id = cp->status_blk_num; + u32 idx_off, syn_off; + + cnic_free_irq(dev); + + if (BNX2X_CHIP_IS_E2_PLUS(bp)) { + idx_off = offsetof(struct hc_status_block_e2, index_values) + + (hc_index * sizeof(u16)); + + syn_off = CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index, sb_id); + } else { + idx_off = offsetof(struct hc_status_block_e1x, index_values) + + (hc_index * sizeof(u16)); + + syn_off = CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index, sb_id); + } + CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0); + CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) + + idx_off, 0); + + *cp->kcq1.hw_prod_idx_ptr = 0; + CNIC_WR(dev, BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0); + CNIC_WR16(dev, cp->kcq1.io_addr, 0); + cnic_free_resc(dev); +} + +static void cnic_stop_hw(struct cnic_dev *dev) +{ + if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { + struct cnic_local *cp = dev->cnic_priv; + int i = 0; + + /* Need to wait for the ring shutdown event to complete + * before clearing the CNIC_UP flag. + */ + while (cp->udev && cp->udev->uio_dev != -1 && i < 15) { + msleep(100); + i++; + } + cnic_shutdown_rings(dev); + cp->stop_cm(dev); + cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ; + clear_bit(CNIC_F_CNIC_UP, &dev->flags); + RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL); + synchronize_rcu(); + cnic_cm_shutdown(dev); + cp->stop_hw(dev); + pci_dev_put(dev->pcidev); + } +} + +static void cnic_free_dev(struct cnic_dev *dev) +{ + int i = 0; + + while ((atomic_read(&dev->ref_count) != 0) && i < 10) { + msleep(100); + i++; + } + if (atomic_read(&dev->ref_count) != 0) + netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n"); + + netdev_info(dev->netdev, "Removed CNIC device\n"); + dev_put(dev->netdev); + kfree(dev); +} + +static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, + struct pci_dev *pdev) +{ + struct cnic_dev *cdev; + struct cnic_local *cp; + int alloc_size; + + alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local); + + cdev = kzalloc(alloc_size, GFP_KERNEL); + if (cdev == NULL) + return NULL; + + cdev->netdev = dev; + cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev); + cdev->register_device = cnic_register_device; + cdev->unregister_device = cnic_unregister_device; + cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv; + + cp = cdev->cnic_priv; + cp->dev = cdev; + cp->l2_single_buf_size = 0x400; + cp->l2_rx_ring_size = 3; + + spin_lock_init(&cp->cnic_ulp_lock); + + netdev_info(dev, "Added CNIC device\n"); + + return cdev; +} + +static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) +{ + struct pci_dev *pdev; + struct cnic_dev *cdev; + struct cnic_local *cp; + struct bnx2 *bp = netdev_priv(dev); + struct cnic_eth_dev *ethdev = NULL; + + if (bp->cnic_probe) + ethdev = (bp->cnic_probe)(dev); + + if (!ethdev) + return NULL; + + pdev = ethdev->pdev; + if (!pdev) + return NULL; + + dev_hold(dev); + pci_dev_get(pdev); + if ((pdev->device == PCI_DEVICE_ID_NX2_5709 || + pdev->device == PCI_DEVICE_ID_NX2_5709S) && + (pdev->revision < 0x10)) { + pci_dev_put(pdev); + goto cnic_err; + } + pci_dev_put(pdev); + + cdev = cnic_alloc_dev(dev, pdev); + if (cdev == NULL) + goto cnic_err; + + set_bit(CNIC_F_BNX2_CLASS, &cdev->flags); + cdev->submit_kwqes = cnic_submit_bnx2_kwqes; + + cp = cdev->cnic_priv; + cp->ethdev = ethdev; + cdev->pcidev = pdev; + cp->chip_id = ethdev->chip_id; + + cdev->max_iscsi_conn = ethdev->max_iscsi_conn; + + cp->cnic_ops = &cnic_bnx2_ops; + cp->start_hw = cnic_start_bnx2_hw; + cp->stop_hw = cnic_stop_bnx2_hw; + cp->setup_pgtbl = cnic_setup_page_tbl; + cp->alloc_resc = cnic_alloc_bnx2_resc; + cp->free_resc = cnic_free_resc; + cp->start_cm = cnic_cm_init_bnx2_hw; + cp->stop_cm = cnic_cm_stop_bnx2_hw; + cp->enable_int = cnic_enable_bnx2_int; + cp->disable_int_sync = cnic_disable_bnx2_int_sync; + cp->close_conn = cnic_close_bnx2_conn; + return cdev; + +cnic_err: + dev_put(dev); + return NULL; +} + +static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) +{ + struct pci_dev *pdev; + struct cnic_dev *cdev; + struct cnic_local *cp; + struct bnx2x *bp = netdev_priv(dev); + struct cnic_eth_dev *ethdev = NULL; + + if (bp->cnic_probe) + ethdev = bp->cnic_probe(dev); + + if (!ethdev) + return NULL; + + pdev = ethdev->pdev; + if (!pdev) + return NULL; + + dev_hold(dev); + cdev = cnic_alloc_dev(dev, pdev); + if (cdev == NULL) { + dev_put(dev); + return NULL; + } + + set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags); + cdev->submit_kwqes = cnic_submit_bnx2x_kwqes; + + cp = cdev->cnic_priv; + cp->ethdev = ethdev; + cdev->pcidev = pdev; + cp->chip_id = ethdev->chip_id; + + cdev->stats_addr = ethdev->addr_drv_info_to_mcp; + + if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) + cdev->max_iscsi_conn = ethdev->max_iscsi_conn; + if (CNIC_SUPPORTS_FCOE(bp)) { + cdev->max_fcoe_conn = ethdev->max_fcoe_conn; + cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges; + } + + if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS) + cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS; + + memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN); + + cp->cnic_ops = &cnic_bnx2x_ops; + cp->start_hw = cnic_start_bnx2x_hw; + cp->stop_hw = cnic_stop_bnx2x_hw; + cp->setup_pgtbl = cnic_setup_page_tbl_le; + cp->alloc_resc = cnic_alloc_bnx2x_resc; + cp->free_resc = cnic_free_resc; + cp->start_cm = cnic_cm_init_bnx2x_hw; + cp->stop_cm = cnic_cm_stop_bnx2x_hw; + cp->enable_int = cnic_enable_bnx2x_int; + cp->disable_int_sync = cnic_disable_bnx2x_int_sync; + if (BNX2X_CHIP_IS_E2_PLUS(bp)) { + cp->ack_int = cnic_ack_bnx2x_e2_msix; + cp->arm_int = cnic_arm_bnx2x_e2_msix; + } else { + cp->ack_int = cnic_ack_bnx2x_msix; + cp->arm_int = cnic_arm_bnx2x_msix; + } + cp->close_conn = cnic_close_bnx2x_conn; + return cdev; +} + +static struct cnic_dev *is_cnic_dev(struct net_device *dev) +{ + struct ethtool_drvinfo drvinfo; + struct cnic_dev *cdev = NULL; + + if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) { + memset(&drvinfo, 0, sizeof(drvinfo)); + dev->ethtool_ops->get_drvinfo(dev, &drvinfo); + + if (!strcmp(drvinfo.driver, "bnx2")) + cdev = init_bnx2_cnic(dev); + if (!strcmp(drvinfo.driver, "bnx2x")) + cdev = init_bnx2x_cnic(dev); + if (cdev) { + write_lock(&cnic_dev_lock); + list_add(&cdev->list, &cnic_dev_list); + write_unlock(&cnic_dev_lock); + } + } + return cdev; +} + +static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event, + u16 vlan_id) +{ + int if_type; + + for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { + struct cnic_ulp_ops *ulp_ops; + void *ctx; + + mutex_lock(&cnic_lock); + ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], + lockdep_is_held(&cnic_lock)); + if (!ulp_ops || !ulp_ops->indicate_netevent) { + mutex_unlock(&cnic_lock); + continue; + } + + ctx = cp->ulp_handle[if_type]; + + set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); + mutex_unlock(&cnic_lock); + + ulp_ops->indicate_netevent(ctx, event, vlan_id); + + clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); + } +} + +/* netdev event handler */ +static int cnic_netdev_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + struct cnic_dev *dev; + int new_dev = 0; + + dev = cnic_from_netdev(netdev); + + if (!dev && event == NETDEV_REGISTER) { + /* Check for the hot-plug device */ + dev = is_cnic_dev(netdev); + if (dev) { + new_dev = 1; + cnic_hold(dev); + } + } + if (dev) { + struct cnic_local *cp = dev->cnic_priv; + + if (new_dev) + cnic_ulp_init(dev); + else if (event == NETDEV_UNREGISTER) + cnic_ulp_exit(dev); + + if (event == NETDEV_UP) { + if (cnic_register_netdev(dev) != 0) { + cnic_put(dev); + goto done; + } + if (!cnic_start_hw(dev)) + cnic_ulp_start(dev); + } + + cnic_rcv_netevent(cp, event, 0); + + if (event == NETDEV_GOING_DOWN) { + cnic_ulp_stop(dev); + cnic_stop_hw(dev); + cnic_unregister_netdev(dev); + } else if (event == NETDEV_UNREGISTER) { + write_lock(&cnic_dev_lock); + list_del_init(&dev->list); + write_unlock(&cnic_dev_lock); + + cnic_put(dev); + cnic_free_dev(dev); + goto done; + } + cnic_put(dev); + } else { + struct net_device *realdev; + u16 vid; + + vid = cnic_get_vlan(netdev, &realdev); + if (realdev) { + dev = cnic_from_netdev(realdev); + if (dev) { + vid |= VLAN_TAG_PRESENT; + cnic_rcv_netevent(dev->cnic_priv, event, vid); + cnic_put(dev); + } + } + } +done: + return NOTIFY_DONE; +} + +static struct notifier_block cnic_netdev_notifier = { + .notifier_call = cnic_netdev_event +}; + +static void cnic_release(void) +{ + struct cnic_uio_dev *udev; + + while (!list_empty(&cnic_udev_list)) { + udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev, + list); + cnic_free_uio(udev); + } +} + +static int __init cnic_init(void) +{ + int rc = 0; + + pr_info("%s", version); + + rc = register_netdevice_notifier(&cnic_netdev_notifier); + if (rc) { + cnic_release(); + return rc; + } + + cnic_wq = create_singlethread_workqueue("cnic_wq"); + if (!cnic_wq) { + cnic_release(); + unregister_netdevice_notifier(&cnic_netdev_notifier); + return -ENOMEM; + } + + return 0; +} + +static void __exit cnic_exit(void) +{ + unregister_netdevice_notifier(&cnic_netdev_notifier); + cnic_release(); + destroy_workqueue(cnic_wq); +} + +module_init(cnic_init); +module_exit(cnic_exit); diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h new file mode 100644 index 000000000..4baea81ba --- /dev/null +++ b/drivers/net/ethernet/broadcom/cnic.h @@ -0,0 +1,427 @@ +/* cnic.h: QLogic CNIC core network driver. + * + * Copyright (c) 2006-2014 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + + +#ifndef CNIC_H +#define CNIC_H + +#define HC_INDEX_ISCSI_EQ_CONS 6 + +#define HC_INDEX_FCOE_EQ_CONS 3 + +#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5 +#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1 + +#define KWQ_PAGE_CNT 4 +#define KCQ_PAGE_CNT 16 + +#define KWQ_CID 24 +#define KCQ_CID 25 + +/* + * krnlq_context definition + */ +#define L5_KRNLQ_FLAGS 0x00000000 +#define L5_KRNLQ_SIZE 0x00000000 +#define L5_KRNLQ_TYPE 0x00000000 +#define KRNLQ_FLAGS_PG_SZ (0xf<<0) +#define KRNLQ_FLAGS_PG_SZ_256 (0<<0) +#define KRNLQ_FLAGS_PG_SZ_512 (1<<0) +#define KRNLQ_FLAGS_PG_SZ_1K (2<<0) +#define KRNLQ_FLAGS_PG_SZ_2K (3<<0) +#define KRNLQ_FLAGS_PG_SZ_4K (4<<0) +#define KRNLQ_FLAGS_PG_SZ_8K (5<<0) +#define KRNLQ_FLAGS_PG_SZ_16K (6<<0) +#define KRNLQ_FLAGS_PG_SZ_32K (7<<0) +#define KRNLQ_FLAGS_PG_SZ_64K (8<<0) +#define KRNLQ_FLAGS_PG_SZ_128K (9<<0) +#define KRNLQ_FLAGS_PG_SZ_256K (10<<0) +#define KRNLQ_FLAGS_PG_SZ_512K (11<<0) +#define KRNLQ_FLAGS_PG_SZ_1M (12<<0) +#define KRNLQ_FLAGS_PG_SZ_2M (13<<0) +#define KRNLQ_FLAGS_QE_SELF_SEQ (1<<15) +#define KRNLQ_SIZE_TYPE_SIZE ((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16) +#define KRNLQ_TYPE_TYPE (0xf<<28) +#define KRNLQ_TYPE_TYPE_EMPTY (0<<28) +#define KRNLQ_TYPE_TYPE_KRNLQ (6<<28) + +#define L5_KRNLQ_HOST_QIDX 0x00000004 +#define L5_KRNLQ_HOST_FW_QIDX 0x00000008 +#define L5_KRNLQ_NX_QE_SELF_SEQ 0x0000000c +#define L5_KRNLQ_QE_SELF_SEQ_MAX 0x0000000c +#define L5_KRNLQ_NX_QE_HADDR_HI 0x00000010 +#define L5_KRNLQ_NX_QE_HADDR_LO 0x00000014 +#define L5_KRNLQ_PGTBL_PGIDX 0x00000018 +#define L5_KRNLQ_NX_PG_QIDX 0x00000018 +#define L5_KRNLQ_PGTBL_NPAGES 0x0000001c +#define L5_KRNLQ_QIDX_INCR 0x0000001c +#define L5_KRNLQ_PGTBL_HADDR_HI 0x00000020 +#define L5_KRNLQ_PGTBL_HADDR_LO 0x00000024 + +#define BNX2_PG_CTX_MAP 0x1a0034 +#define BNX2_ISCSI_CTX_MAP 0x1a0074 + +#define MAX_COMPLETED_KCQE 64 + +#define MAX_CNIC_L5_CONTEXT 256 + +#define MAX_CM_SK_TBL_SZ MAX_CNIC_L5_CONTEXT + +#define MAX_ISCSI_TBL_SZ 256 + +#define CNIC_LOCAL_PORT_MIN 60000 +#define CNIC_LOCAL_PORT_MAX 61024 +#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN) + +#define KWQE_CNT (BNX2_PAGE_SIZE / sizeof(struct kwqe)) +#define KCQE_CNT (BNX2_PAGE_SIZE / sizeof(struct kcqe)) +#define MAX_KWQE_CNT (KWQE_CNT - 1) +#define MAX_KCQE_CNT (KCQE_CNT - 1) + +#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1) +#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1) + +#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BNX2_PAGE_BITS - 5)) +#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT) + +#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BNX2_PAGE_BITS - 5)) +#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT) + +#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \ + (MAX_KCQE_CNT - 1)) ? \ + (x) + 2 : (x) + 1 + +#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp) +#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp) +#define BNX2X_KWQ_DATA(cp, x) \ + &(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)] + +#define DEF_IPID_START 0x8000 + +#define DEF_KA_TIMEOUT 10000 +#define DEF_KA_INTERVAL 300000 +#define DEF_KA_MAX_PROBE_COUNT 3 +#define DEF_TOS 0 +#define DEF_TTL 0xfe +#define DEF_SND_SEQ_SCALE 0 +#define DEF_RCV_BUF 0xffff +#define DEF_SND_BUF 0xffff +#define DEF_SEED 0 +#define DEF_MAX_RT_TIME 500 +#define DEF_MAX_DA_COUNT 2 +#define DEF_SWS_TIMER 1000 +#define DEF_MAX_CWND 0xffff + +struct cnic_ctx { + u32 cid; + void *ctx; + dma_addr_t mapping; +}; + +#define BNX2_MAX_CID 0x2000 + +struct cnic_dma { + int num_pages; + void **pg_arr; + dma_addr_t *pg_map_arr; + int pgtbl_size; + u32 *pgtbl; + dma_addr_t pgtbl_map; +}; + +struct cnic_id_tbl { + spinlock_t lock; + u32 start; + u32 max; + u32 next; + unsigned long *table; +}; + +#define CNIC_KWQ16_DATA_SIZE 128 + +struct kwqe_16_data { + u8 data[CNIC_KWQ16_DATA_SIZE]; +}; + +struct cnic_iscsi { + struct cnic_dma task_array_info; + struct cnic_dma r2tq_info; + struct cnic_dma hq_info; +}; + +struct cnic_context { + u32 cid; + struct kwqe_16_data *kwqe_data; + dma_addr_t kwqe_data_mapping; + wait_queue_head_t waitq; + int wait_cond; + unsigned long timestamp; + unsigned long ctx_flags; +#define CTX_FL_OFFLD_START 0 +#define CTX_FL_DELETE_WAIT 1 +#define CTX_FL_CID_ERROR 2 + u8 ulp_proto_id; + union { + struct cnic_iscsi *iscsi; + } proto; +}; + +struct kcq_info { + struct cnic_dma dma; + struct kcqe **kcq; + + u16 *hw_prod_idx_ptr; + u16 sw_prod_idx; + u16 *status_idx_ptr; + u32 io_addr; + + u16 (*next_idx)(u16); + u16 (*hw_idx)(u16); +}; + +#define UIO_USE_TX_DOORBELL 0x017855DB + +struct cnic_uio_dev { + struct uio_info cnic_uinfo; + u32 uio_dev; + + int l2_ring_size; + void *l2_ring; + dma_addr_t l2_ring_map; + + int l2_buf_size; + void *l2_buf; + dma_addr_t l2_buf_map; + + struct cnic_dev *dev; + struct pci_dev *pdev; + struct list_head list; +}; + +struct cnic_local { + + spinlock_t cnic_ulp_lock; + void *ulp_handle[MAX_CNIC_ULP_TYPE]; + unsigned long ulp_flags[MAX_CNIC_ULP_TYPE]; +#define ULP_F_INIT 0 +#define ULP_F_START 1 +#define ULP_F_CALL_PENDING 2 + struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE]; + + unsigned long cnic_local_flags; +#define CNIC_LCL_FL_KWQ_INIT 0x0 +#define CNIC_LCL_FL_L2_WAIT 0x1 +#define CNIC_LCL_FL_RINGS_INITED 0x2 +#define CNIC_LCL_FL_STOP_ISCSI 0x4 + + struct cnic_dev *dev; + + struct cnic_eth_dev *ethdev; + + struct cnic_uio_dev *udev; + + int l2_rx_ring_size; + int l2_single_buf_size; + + u16 *rx_cons_ptr; + u16 *tx_cons_ptr; + u16 rx_cons; + u16 tx_cons; + + struct cnic_dma kwq_info; + struct kwqe **kwq; + + struct cnic_dma kwq_16_data_info; + + u16 max_kwq_idx; + + u16 kwq_prod_idx; + u32 kwq_io_addr; + + u16 *kwq_con_idx_ptr; + u16 kwq_con_idx; + + struct kcq_info kcq1; + struct kcq_info kcq2; + + union { + void *gen; + struct status_block_msix *bnx2; + struct host_hc_status_block_e1x *bnx2x_e1x; + /* index values - which counter to update */ + #define SM_RX_ID 0 + #define SM_TX_ID 1 + } status_blk; + + struct host_sp_status_block *bnx2x_def_status_blk; + + u32 status_blk_num; + u32 bnx2x_igu_sb_id; + u32 int_num; + u32 last_status_idx; + struct tasklet_struct cnic_irq_task; + + struct kcqe *completed_kcq[MAX_COMPLETED_KCQE]; + + struct cnic_sock *csk_tbl; + struct cnic_id_tbl csk_port_tbl; + + struct cnic_dma gbl_buf_info; + + struct cnic_iscsi *iscsi_tbl; + struct cnic_context *ctx_tbl; + struct cnic_id_tbl cid_tbl; + atomic_t iscsi_conn; + u32 iscsi_start_cid; + + u32 fcoe_init_cid; + u32 fcoe_start_cid; + struct cnic_id_tbl fcoe_cid_tbl; + + u32 max_cid_space; + + /* per connection parameters */ + int num_iscsi_tasks; + int num_ccells; + int task_array_size; + int r2tq_size; + int hq_size; + int num_cqs; + + struct delayed_work delete_task; + + struct cnic_ctx *ctx_arr; + int ctx_blks; + int ctx_blk_size; + unsigned long ctx_align; + int cids_per_blk; + + u32 chip_id; + int func; + + u32 shmem_base; + + struct cnic_ops *cnic_ops; + int (*start_hw)(struct cnic_dev *); + void (*stop_hw)(struct cnic_dev *); + void (*setup_pgtbl)(struct cnic_dev *, + struct cnic_dma *); + int (*alloc_resc)(struct cnic_dev *); + void (*free_resc)(struct cnic_dev *); + int (*start_cm)(struct cnic_dev *); + void (*stop_cm)(struct cnic_dev *); + void (*enable_int)(struct cnic_dev *); + void (*disable_int_sync)(struct cnic_dev *); + void (*ack_int)(struct cnic_dev *); + void (*arm_int)(struct cnic_dev *, u32 index); + void (*close_conn)(struct cnic_sock *, u32 opcode); +}; + +struct bnx2x_bd_chain_next { + u32 addr_lo; + u32 addr_hi; + u8 reserved[8]; +}; + +#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1) + +#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN) +#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT) + +#define CDU_REGION_NUMBER_XCM_AG 2 +#define CDU_REGION_NUMBER_UCM_AG 4 + +#define CDU_VALID_DATA(_cid, _region, _type) \ + (((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf))) + +#define CDU_CRC8(_cid, _region, _type) \ + (calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff)) + +#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type) \ + (0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f)) + +#define BNX2X_CONTEXT_MEM_SIZE 1024 +#define BNX2X_FCOE_CID 16 + +#define BNX2X_ISCSI_START_CID 18 +#define BNX2X_ISCSI_NUM_CONNECTIONS 128 +#define BNX2X_ISCSI_TASK_CONTEXT_SIZE 128 +#define BNX2X_ISCSI_MAX_PENDING_R2TS 4 +#define BNX2X_ISCSI_R2TQE_SIZE 8 +#define BNX2X_ISCSI_HQ_BD_SIZE 64 +#define BNX2X_ISCSI_GLB_BUF_SIZE 64 +#define BNX2X_ISCSI_PBL_NOT_CACHED 0xff +#define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff + +#define BNX2X_FCOE_NUM_CONNECTIONS 1024 + +#define BNX2X_FCOE_L5_CID_BASE MAX_ISCSI_TBL_SZ + +#define BNX2X_CHIP_IS_E2_PLUS(bp) (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) + +#define BNX2X_RX_DESC_CNT (BNX2_PAGE_SIZE / \ + sizeof(struct eth_rx_bd)) +#define BNX2X_MAX_RX_DESC_CNT (BNX2X_RX_DESC_CNT - 2) +#define BNX2X_RCQ_DESC_CNT (BNX2_PAGE_SIZE / \ + sizeof(union eth_rx_cqe)) +#define BNX2X_MAX_RCQ_DESC_CNT (BNX2X_RCQ_DESC_CNT - 1) + +#define BNX2X_NEXT_RCQE(x) (((x) & BNX2X_MAX_RCQ_DESC_CNT) == \ + (BNX2X_MAX_RCQ_DESC_CNT - 1)) ? \ + ((x) + 2) : ((x) + 1) + +#define BNX2X_DEF_SB_ID HC_SP_SB_ID + +#define BNX2X_SHMEM_MF_BLK_OFFSET 0x7e4 + +#define BNX2X_SHMEM_ADDR(base, field) (base + \ + offsetof(struct shmem_region, field)) + +#define BNX2X_SHMEM2_ADDR(base, field) (base + \ + offsetof(struct shmem2_region, field)) + +#define BNX2X_SHMEM2_HAS(base, field) \ + ((base) && \ + (CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base, size)) > \ + offsetof(struct shmem2_region, field))) + +#define BNX2X_MF_CFG_ADDR(base, field) \ + ((base) + offsetof(struct mf_cfg, field)) + +#ifndef ETH_MAX_RX_CLIENTS_E2 +#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H +#endif + +#define CNIC_FUNC(cp) ((cp)->func) + +#define BNX2X_HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ + (BP_VN(bp) << 17) | (x)) + +#define BNX2X_SW_CID(x) (x & 0x1ffff) + +#define BNX2X_CL_QZONE_ID(bp, cli) \ + (BNX2X_CHIP_IS_E2_PLUS(bp) ? cli : \ + cli + (BP_PORT(bp) * ETH_MAX_RX_CLIENTS_E1H)) + +#ifndef MAX_STAT_COUNTER_ID +#define MAX_STAT_COUNTER_ID \ + (CHIP_IS_E1H(bp) ? MAX_STAT_COUNTER_ID_E1H : \ + ((BNX2X_CHIP_IS_E2_PLUS(bp)) ? MAX_STAT_COUNTER_ID_E2 : \ + MAX_STAT_COUNTER_ID_E1)) +#endif + +#define CNIC_SUPPORTS_FCOE(cp) \ + (BNX2X_CHIP_IS_E2_PLUS(bp) && !NO_FCOE(bp)) + +#define CNIC_RAMROD_TMO (HZ / 4) + +#endif + diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h new file mode 100644 index 000000000..b38499774 --- /dev/null +++ b/drivers/net/ethernet/broadcom/cnic_defs.h @@ -0,0 +1,5462 @@ + +/* cnic.c: QLogic CNIC core network driver. + * + * Copyright (c) 2006-2014 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + +#ifndef CNIC_DEFS_H +#define CNIC_DEFS_H + +/* KWQ (kernel work queue) request op codes */ +#define L2_KWQE_OPCODE_VALUE_FLUSH (4) +#define L2_KWQE_OPCODE_VALUE_VM_FREE_RX_QUEUE (8) + +#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50) +#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51) +#define L4_KWQE_OPCODE_VALUE_CONNECT3 (52) +#define L4_KWQE_OPCODE_VALUE_RESET (53) +#define L4_KWQE_OPCODE_VALUE_CLOSE (54) +#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET (60) +#define L4_KWQE_OPCODE_VALUE_INIT_ULP (61) + +#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG (1) +#define L4_KWQE_OPCODE_VALUE_UPDATE_PG (9) +#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG (14) + +#define L5CM_RAMROD_CMD_ID_BASE (0x80) +#define L5CM_RAMROD_CMD_ID_TCP_CONNECT (L5CM_RAMROD_CMD_ID_BASE + 3) +#define L5CM_RAMROD_CMD_ID_CLOSE (L5CM_RAMROD_CMD_ID_BASE + 12) +#define L5CM_RAMROD_CMD_ID_ABORT (L5CM_RAMROD_CMD_ID_BASE + 13) +#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14) +#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15) + +#define FCOE_RAMROD_CMD_ID_INIT_FUNC (FCOE_KCQE_OPCODE_INIT_FUNC) +#define FCOE_RAMROD_CMD_ID_DESTROY_FUNC (FCOE_KCQE_OPCODE_DESTROY_FUNC) +#define FCOE_RAMROD_CMD_ID_STAT_FUNC (FCOE_KCQE_OPCODE_STAT_FUNC) +#define FCOE_RAMROD_CMD_ID_OFFLOAD_CONN (FCOE_KCQE_OPCODE_OFFLOAD_CONN) +#define FCOE_RAMROD_CMD_ID_ENABLE_CONN (FCOE_KCQE_OPCODE_ENABLE_CONN) +#define FCOE_RAMROD_CMD_ID_DISABLE_CONN (FCOE_KCQE_OPCODE_DISABLE_CONN) +#define FCOE_RAMROD_CMD_ID_DESTROY_CONN (FCOE_KCQE_OPCODE_DESTROY_CONN) +#define FCOE_RAMROD_CMD_ID_TERMINATE_CONN (0x81) + +/* KCQ (kernel completion queue) response op codes */ +#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53) +#define L4_KCQE_OPCODE_VALUE_RESET_COMP (54) +#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE (55) +#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE (56) +#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED (57) +#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED (58) +#define L4_KCQE_OPCODE_VALUE_INIT_ULP (61) + +#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG (1) +#define L4_KCQE_OPCODE_VALUE_UPDATE_PG (9) +#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14) + +/* KCQ (kernel completion queue) completion status */ +#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0) +#define L4_KCQE_COMPLETION_STATUS_NIC_ERROR (4) +#define L4_KCQE_COMPLETION_STATUS_PARITY_ERROR (0x81) +#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93) + +#define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83) +#define L4_KCQE_COMPLETION_STATUS_OFFLOADED_PG (0x89) + +#define L4_KCQE_OPCODE_VALUE_OOO_EVENT_NOTIFICATION (0xa0) +#define L4_KCQE_OPCODE_VALUE_OOO_FLUSH (0xa1) + +#define L4_LAYER_CODE (4) +#define L2_LAYER_CODE (2) + +/* + * L4 KCQ CQE + */ +struct l4_kcq { + u32 cid; + u32 pg_cid; + u32 conn_id; + u32 pg_host_opaque; +#if defined(__BIG_ENDIAN) + u16 status; + u16 reserved1; +#elif defined(__LITTLE_ENDIAN) + u16 reserved1; + u16 status; +#endif + u32 reserved2[2]; +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KCQ_RESERVED3 (0x7<<0) +#define L4_KCQ_RESERVED3_SHIFT 0 +#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */ +#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3 +#define L4_KCQ_LAYER_CODE (0x7<<4) +#define L4_KCQ_LAYER_CODE_SHIFT 4 +#define L4_KCQ_RESERVED4 (0x1<<7) +#define L4_KCQ_RESERVED4_SHIFT 7 + u8 op_code; + u16 qe_self_seq; +#elif defined(__LITTLE_ENDIAN) + u16 qe_self_seq; + u8 op_code; + u8 flags; +#define L4_KCQ_RESERVED3 (0xF<<0) +#define L4_KCQ_RESERVED3_SHIFT 0 +#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */ +#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3 +#define L4_KCQ_LAYER_CODE (0x7<<4) +#define L4_KCQ_LAYER_CODE_SHIFT 4 +#define L4_KCQ_RESERVED4 (0x1<<7) +#define L4_KCQ_RESERVED4_SHIFT 7 +#endif +}; + + +/* + * L4 KCQ CQE PG upload + */ +struct l4_kcq_upload_pg { + u32 pg_cid; +#if defined(__BIG_ENDIAN) + u16 pg_status; + u16 pg_ipid_count; +#elif defined(__LITTLE_ENDIAN) + u16 pg_ipid_count; + u16 pg_status; +#endif + u32 reserved1[5]; +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0) +#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0 +#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4) +#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4 +#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7) +#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7 + u8 op_code; + u16 qe_self_seq; +#elif defined(__LITTLE_ENDIAN) + u16 qe_self_seq; + u8 op_code; + u8 flags; +#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0) +#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0 +#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4) +#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4 +#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7) +#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7 +#endif +}; + + +/* + * Gracefully close the connection request + */ +struct l4_kwq_close_req { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0) +#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0 +#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4) +#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0) +#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0 +#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4) +#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 cid; + u32 reserved2[6]; +}; + + +/* + * The first request to be passed in order to establish connection in option2 + */ +struct l4_kwq_connect_req1 { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u8 reserved0; + u8 conn_flags; +#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0) +#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1) +#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1 +#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2) +#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2 +#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3) +#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3 +#elif defined(__LITTLE_ENDIAN) + u8 conn_flags; +#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0) +#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1) +#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1 +#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2) +#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2 +#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3) +#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3 + u8 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 cid; + u32 pg_cid; + u32 src_ip; + u32 dst_ip; +#if defined(__BIG_ENDIAN) + u16 dst_port; + u16 src_port; +#elif defined(__LITTLE_ENDIAN) + u16 src_port; + u16 dst_port; +#endif +#if defined(__BIG_ENDIAN) + u8 rsrv1[3]; + u8 tcp_flags; +#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0) +#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1) +#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1 +#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2) +#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2 +#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3) +#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3 +#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4) +#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4 +#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5) +#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5 +#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6) +#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6 +#elif defined(__LITTLE_ENDIAN) + u8 tcp_flags; +#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0) +#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0 +#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1) +#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1 +#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2) +#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2 +#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3) +#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3 +#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4) +#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4 +#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5) +#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5 +#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6) +#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6 + u8 rsrv1[3]; +#endif + u32 rsrv2; +}; + + +/* + * The second ( optional )request to be passed in order to establish + * connection in option2 - for IPv6 only + */ +struct l4_kwq_connect_req2 { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u8 reserved0; + u8 rsrv; +#elif defined(__LITTLE_ENDIAN) + u8 rsrv; + u8 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 reserved2; + u32 src_ip_v6_2; + u32 src_ip_v6_3; + u32 src_ip_v6_4; + u32 dst_ip_v6_2; + u32 dst_ip_v6_3; + u32 dst_ip_v6_4; +}; + + +/* + * The third ( and last )request to be passed in order to establish + * connection in option2 + */ +struct l4_kwq_connect_req3 { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0) +#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0 +#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4) +#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4 +#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 ka_timeout; + u32 ka_interval ; +#if defined(__BIG_ENDIAN) + u8 snd_seq_scale; + u8 ttl; + u8 tos; + u8 ka_max_probe_count; +#elif defined(__LITTLE_ENDIAN) + u8 ka_max_probe_count; + u8 tos; + u8 ttl; + u8 snd_seq_scale; +#endif +#if defined(__BIG_ENDIAN) + u16 pmtu; + u16 mss; +#elif defined(__LITTLE_ENDIAN) + u16 mss; + u16 pmtu; +#endif + u32 rcv_buf; + u32 snd_buf; + u32 seed; +}; + + +/* + * a KWQE request to offload a PG connection + */ +struct l4_kwq_offload_pg { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0) +#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0 +#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4) +#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4 +#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0) +#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0 +#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4) +#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4 +#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7 +#endif +#if defined(__BIG_ENDIAN) + u8 l2hdr_nbytes; + u8 pg_flags; +#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0) +#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0 +#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1) +#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1 +#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2) +#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2 + u8 da0; + u8 da1; +#elif defined(__LITTLE_ENDIAN) + u8 da1; + u8 da0; + u8 pg_flags; +#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0) +#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0 +#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1) +#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1 +#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2) +#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2 + u8 l2hdr_nbytes; +#endif +#if defined(__BIG_ENDIAN) + u8 da2; + u8 da3; + u8 da4; + u8 da5; +#elif defined(__LITTLE_ENDIAN) + u8 da5; + u8 da4; + u8 da3; + u8 da2; +#endif +#if defined(__BIG_ENDIAN) + u8 sa0; + u8 sa1; + u8 sa2; + u8 sa3; +#elif defined(__LITTLE_ENDIAN) + u8 sa3; + u8 sa2; + u8 sa1; + u8 sa0; +#endif +#if defined(__BIG_ENDIAN) + u8 sa4; + u8 sa5; + u16 etype; +#elif defined(__LITTLE_ENDIAN) + u16 etype; + u8 sa5; + u8 sa4; +#endif +#if defined(__BIG_ENDIAN) + u16 vlan_tag; + u16 ipid_start; +#elif defined(__LITTLE_ENDIAN) + u16 ipid_start; + u16 vlan_tag; +#endif +#if defined(__BIG_ENDIAN) + u16 ipid_count; + u16 reserved3; +#elif defined(__LITTLE_ENDIAN) + u16 reserved3; + u16 ipid_count; +#endif + u32 host_opaque; +}; + + +/* + * Abortively close the connection request + */ +struct l4_kwq_reset_req { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0) +#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0 +#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4) +#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4 +#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_code; + u8 flags; +#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0) +#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0 +#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4) +#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4 +#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 cid; + u32 reserved2[6]; +}; + + +/* + * a KWQE request to update a PG connection + */ +struct l4_kwq_update_pg { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0) +#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0 +#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4) +#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4 +#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7 + u8 opcode; + u16 oper16; +#elif defined(__LITTLE_ENDIAN) + u16 oper16; + u8 opcode; + u8 flags; +#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0) +#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0 +#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4) +#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4 +#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 pg_cid; + u32 pg_host_opaque; +#if defined(__BIG_ENDIAN) + u8 pg_valids; +#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0) +#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0 +#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1) +#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1 +#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2) +#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2 + u8 pg_unused_a; + u16 pg_ipid_count; +#elif defined(__LITTLE_ENDIAN) + u16 pg_ipid_count; + u8 pg_unused_a; + u8 pg_valids; +#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0) +#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0 +#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1) +#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1 +#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2) +#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2 +#endif +#if defined(__BIG_ENDIAN) + u16 reserverd3; + u8 da0; + u8 da1; +#elif defined(__LITTLE_ENDIAN) + u8 da1; + u8 da0; + u16 reserverd3; +#endif +#if defined(__BIG_ENDIAN) + u8 da2; + u8 da3; + u8 da4; + u8 da5; +#elif defined(__LITTLE_ENDIAN) + u8 da5; + u8 da4; + u8 da3; + u8 da2; +#endif + u32 reserved4; + u32 reserved5; +}; + + +/* + * a KWQE request to upload a PG or L4 context + */ +struct l4_kwq_upload { +#if defined(__BIG_ENDIAN) + u8 flags; +#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0) +#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0 +#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4) +#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4 +#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7 + u8 opcode; + u16 oper16; +#elif defined(__LITTLE_ENDIAN) + u16 oper16; + u8 opcode; + u8 flags; +#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0) +#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0 +#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4) +#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4 +#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7) +#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7 +#endif + u32 cid; + u32 reserved2[6]; +}; + +/* + * bnx2x structures + */ + +/* + * The iscsi aggregative context of Cstorm + */ +struct cstorm_iscsi_ag_context { + u32 agg_vars1; +#define CSTORM_ISCSI_AG_CONTEXT_STATE (0xFF<<0) +#define CSTORM_ISCSI_AG_CONTEXT_STATE_SHIFT 0 +#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<8) +#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 8 +#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<9) +#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 9 +#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<10) +#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 10 +#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<11) +#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 11 +#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN (0x1<<12) +#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12 +#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13) +#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13 +#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<14) +#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 14 +#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16) +#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16 +#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18) +#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18 +#define __CSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<19) +#define __CSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 19 +#define __CSTORM_ISCSI_AG_CONTEXT_AUX2_CF_EN (0x1<<20) +#define __CSTORM_ISCSI_AG_CONTEXT_AUX2_CF_EN_SHIFT 20 +#define __CSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<21) +#define __CSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 21 +#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<22) +#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 22 +#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23) +#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23 +#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26) +#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE_SHIFT 26 +#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52 (0x3<<28) +#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52_SHIFT 28 +#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53 (0x3<<30) +#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53_SHIFT 30 +#if defined(__BIG_ENDIAN) + u8 __aux1_th; + u8 __aux1_val; + u16 __agg_vars2; +#elif defined(__LITTLE_ENDIAN) + u16 __agg_vars2; + u8 __aux1_val; + u8 __aux1_th; +#endif + u32 rel_seq; + u32 rel_seq_th; +#if defined(__BIG_ENDIAN) + u16 hq_cons; + u16 hq_prod; +#elif defined(__LITTLE_ENDIAN) + u16 hq_prod; + u16 hq_cons; +#endif +#if defined(__BIG_ENDIAN) + u8 __reserved62; + u8 __reserved61; + u8 __reserved60; + u8 __reserved59; +#elif defined(__LITTLE_ENDIAN) + u8 __reserved59; + u8 __reserved60; + u8 __reserved61; + u8 __reserved62; +#endif +#if defined(__BIG_ENDIAN) + u16 __reserved64; + u16 cq_u_prod; +#elif defined(__LITTLE_ENDIAN) + u16 cq_u_prod; + u16 __reserved64; +#endif + u32 __cq_u_prod1; +#if defined(__BIG_ENDIAN) + u16 __agg_vars3; + u16 cq_u_pend; +#elif defined(__LITTLE_ENDIAN) + u16 cq_u_pend; + u16 __agg_vars3; +#endif +#if defined(__BIG_ENDIAN) + u16 __aux2_th; + u16 aux2_val; +#elif defined(__LITTLE_ENDIAN) + u16 aux2_val; + u16 __aux2_th; +#endif +}; + +/* + * The fcoe extra aggregative context section of Tstorm + */ +struct tstorm_fcoe_extra_ag_context_section { + u32 __agg_val1; +#if defined(__BIG_ENDIAN) + u8 __tcp_agg_vars2; + u8 __agg_val3; + u16 __agg_val2; +#elif defined(__LITTLE_ENDIAN) + u16 __agg_val2; + u8 __agg_val3; + u8 __tcp_agg_vars2; +#endif +#if defined(__BIG_ENDIAN) + u16 __agg_val5; + u8 __agg_val6; + u8 __tcp_agg_vars3; +#elif defined(__LITTLE_ENDIAN) + u8 __tcp_agg_vars3; + u8 __agg_val6; + u16 __agg_val5; +#endif + u32 __lcq_prod; + u32 rtt_seq; + u32 rtt_time; + u32 __reserved66; + u32 wnd_right_edge; + u32 tcp_agg_vars1; +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8 +#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN (0x1<<9) +#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN_SHIFT 9 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18 +#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19) +#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19 +#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20) +#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20 +#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21) +#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21 +#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22) +#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24 +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28) +#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28 + u32 snd_max; + u32 __lcq_cons; + u32 __reserved2; +}; + +/* + * The fcoe aggregative context of Tstorm + */ +struct tstorm_fcoe_ag_context { +#if defined(__BIG_ENDIAN) + u16 ulp_credit; + u8 agg_vars1; +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4) +#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4 +#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6) +#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6 +#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7) +#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7 + u8 state; +#elif defined(__LITTLE_ENDIAN) + u8 state; + u8 agg_vars1; +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4) +#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4 +#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6) +#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6 +#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7) +#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7 + u16 ulp_credit; +#endif +#if defined(__BIG_ENDIAN) + u16 __agg_val4; + u16 agg_vars2; +#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0) +#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0 +#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1) +#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1 +#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2) +#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2 +#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4) +#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4 +#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6) +#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6 +#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8) +#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8 +#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10) +#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10 +#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11) +#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11 +#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12) +#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12 +#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13) +#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13 +#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14) +#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 +#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15) +#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 +#elif defined(__LITTLE_ENDIAN) + u16 agg_vars2; +#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0) +#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0 +#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1) +#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1 +#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2) +#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2 +#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4) +#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4 +#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6) +#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6 +#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8) +#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8 +#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10) +#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10 +#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11) +#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11 +#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12) +#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12 +#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13) +#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13 +#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14) +#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 +#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15) +#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 + u16 __agg_val4; +#endif + struct tstorm_fcoe_extra_ag_context_section __extra_section; +}; + + + +/* + * The tcp aggregative context section of Tstorm + */ +struct tstorm_tcp_tcp_ag_context_section { + u32 __agg_val1; +#if defined(__BIG_ENDIAN) + u8 __tcp_agg_vars2; + u8 __agg_val3; + u16 __agg_val2; +#elif defined(__LITTLE_ENDIAN) + u16 __agg_val2; + u8 __agg_val3; + u8 __tcp_agg_vars2; +#endif +#if defined(__BIG_ENDIAN) + u16 __agg_val5; + u8 __agg_val6; + u8 __tcp_agg_vars3; +#elif defined(__LITTLE_ENDIAN) + u8 __tcp_agg_vars3; + u8 __agg_val6; + u16 __agg_val5; +#endif + u32 snd_nxt; + u32 rtt_seq; + u32 rtt_time; + u32 wnd_right_edge_local; + u32 wnd_right_edge; + u32 tcp_agg_vars1; +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN (0x1<<9) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN_SHIFT 9 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18 +#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19) +#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19 +#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20) +#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20 +#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21) +#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21 +#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22) +#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24 +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28) +#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28 + u32 snd_max; + u32 snd_una; + u32 __reserved2; +}; + +/* + * The iscsi aggregative context of Tstorm + */ +struct tstorm_iscsi_ag_context { +#if defined(__BIG_ENDIAN) + u16 ulp_credit; + u8 agg_vars1; +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4) +#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4 +#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6) +#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6 +#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7) +#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7 + u8 state; +#elif defined(__LITTLE_ENDIAN) + u8 state; + u8 agg_vars1; +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4) +#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4 +#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6) +#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6 +#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7) +#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7 + u16 ulp_credit; +#endif +#if defined(__BIG_ENDIAN) + u16 __agg_val4; + u16 agg_vars2; +#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0) +#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0 +#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1) +#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1 +#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2) +#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2 +#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4) +#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4 +#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6) +#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6 +#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8) +#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8 +#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10) +#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10 +#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11) +#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11 +#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12) +#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12 +#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13) +#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13 +#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14) +#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 +#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15) +#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 +#elif defined(__LITTLE_ENDIAN) + u16 agg_vars2; +#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0) +#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0 +#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1) +#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1 +#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2) +#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2 +#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4) +#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4 +#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6) +#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6 +#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8) +#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8 +#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10) +#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10 +#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11) +#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11 +#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12) +#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12 +#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13) +#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13 +#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14) +#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 +#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15) +#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 + u16 __agg_val4; +#endif + struct tstorm_tcp_tcp_ag_context_section tcp; +}; + + + +/* + * The fcoe aggregative context of Ustorm + */ +struct ustorm_fcoe_ag_context { +#if defined(__BIG_ENDIAN) + u8 __aux_counter_flags; + u8 agg_vars2; +#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0) +#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0 +#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2) +#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2 +#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) +#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 +#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) +#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 + u8 agg_vars1; +#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4) +#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4 +#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6) +#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6 + u8 state; +#elif defined(__LITTLE_ENDIAN) + u8 state; + u8 agg_vars1; +#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4) +#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4 +#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6) +#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6 + u8 agg_vars2; +#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0) +#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0 +#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2) +#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2 +#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) +#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 +#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) +#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 + u8 __aux_counter_flags; +#endif +#if defined(__BIG_ENDIAN) + u8 cdu_usage; + u8 agg_misc2; + u16 pbf_tx_seq_ack; +#elif defined(__LITTLE_ENDIAN) + u16 pbf_tx_seq_ack; + u8 agg_misc2; + u8 cdu_usage; +#endif + u32 agg_misc4; +#if defined(__BIG_ENDIAN) + u8 agg_val3_th; + u8 agg_val3; + u16 agg_misc3; +#elif defined(__LITTLE_ENDIAN) + u16 agg_misc3; + u8 agg_val3; + u8 agg_val3_th; +#endif + u32 expired_task_id; + u32 agg_misc4_th; +#if defined(__BIG_ENDIAN) + u16 cq_prod; + u16 cq_cons; +#elif defined(__LITTLE_ENDIAN) + u16 cq_cons; + u16 cq_prod; +#endif +#if defined(__BIG_ENDIAN) + u16 __reserved2; + u8 decision_rules; +#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0) +#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0 +#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) +#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 +#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6) +#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6 +#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7) +#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7 + u8 decision_rule_enable_bits; +#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0) +#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0 +#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) +#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 +#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2) +#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2 +#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3) +#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 +#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4) +#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4 +#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5) +#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5 +#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6) +#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 +#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7) +#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7 +#elif defined(__LITTLE_ENDIAN) + u8 decision_rule_enable_bits; +#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0) +#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0 +#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) +#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 +#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2) +#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2 +#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3) +#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 +#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4) +#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4 +#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5) +#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5 +#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6) +#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 +#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7) +#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7 + u8 decision_rules; +#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0) +#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0 +#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) +#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 +#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6) +#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6 +#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7) +#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7 + u16 __reserved2; +#endif +}; + + +/* + * The iscsi aggregative context of Ustorm + */ +struct ustorm_iscsi_ag_context { +#if defined(__BIG_ENDIAN) + u8 __aux_counter_flags; + u8 agg_vars2; +#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0) +#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0 +#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2) +#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2 +#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) +#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 +#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) +#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 + u8 agg_vars1; +#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4) +#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4 +#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6) +#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6 + u8 state; +#elif defined(__LITTLE_ENDIAN) + u8 state; + u8 agg_vars1; +#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4) +#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4 +#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6) +#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6 + u8 agg_vars2; +#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0) +#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0 +#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2) +#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2 +#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4) +#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4 +#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7) +#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7 + u8 __aux_counter_flags; +#endif +#if defined(__BIG_ENDIAN) + u8 cdu_usage; + u8 agg_misc2; + u16 __cq_local_comp_itt_val; +#elif defined(__LITTLE_ENDIAN) + u16 __cq_local_comp_itt_val; + u8 agg_misc2; + u8 cdu_usage; +#endif + u32 agg_misc4; +#if defined(__BIG_ENDIAN) + u8 agg_val3_th; + u8 agg_val3; + u16 agg_misc3; +#elif defined(__LITTLE_ENDIAN) + u16 agg_misc3; + u8 agg_val3; + u8 agg_val3_th; +#endif + u32 agg_val1; + u32 agg_misc4_th; +#if defined(__BIG_ENDIAN) + u16 agg_val2_th; + u16 agg_val2; +#elif defined(__LITTLE_ENDIAN) + u16 agg_val2; + u16 agg_val2_th; +#endif +#if defined(__BIG_ENDIAN) + u16 __reserved2; + u8 decision_rules; +#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0) +#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0 +#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) +#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 +#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6) +#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6 +#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7) +#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7 + u8 decision_rule_enable_bits; +#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0) +#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0 +#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) +#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 +#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2) +#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2 +#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3) +#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 +#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4) +#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4 +#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5) +#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5 +#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6) +#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 +#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7) +#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7 +#elif defined(__LITTLE_ENDIAN) + u8 decision_rule_enable_bits; +#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0) +#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0 +#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1) +#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1 +#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2) +#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2 +#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3) +#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3 +#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4) +#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4 +#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5) +#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5 +#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6) +#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6 +#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7) +#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7 + u8 decision_rules; +#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0) +#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0 +#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) +#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 +#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6) +#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6 +#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7) +#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7 + u16 __reserved2; +#endif +}; + + +/* + * The fcoe aggregative context section of Xstorm + */ +struct xstorm_fcoe_extra_ag_context_section { +#if defined(__BIG_ENDIAN) + u8 tcp_agg_vars1; +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51 (0x3<<0) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2 +#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4) +#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7 + u8 __reserved_da_cnt; + u16 __mtu; +#elif defined(__LITTLE_ENDIAN) + u16 __mtu; + u8 __reserved_da_cnt; + u8 tcp_agg_vars1; +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51 (0x3<<0) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2 +#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4) +#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7 +#endif + u32 snd_nxt; + u32 __xfrqe_bd_addr_lo; + u32 __xfrqe_bd_addr_hi; + u32 __xfrqe_data1; +#if defined(__BIG_ENDIAN) + u8 __agg_val8_th; + u8 __tx_dest; + u16 tcp_agg_vars2; +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57_SHIFT 0 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58 (0x1<<1) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58_SHIFT 1 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59 (0x1<<2) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59_SHIFT 2 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60 (0x1<<5) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6 +#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7) +#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 tcp_agg_vars2; +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57_SHIFT 0 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58 (0x1<<1) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58_SHIFT 1 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59 (0x1<<2) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59_SHIFT 2 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60 (0x1<<5) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6 +#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7) +#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12 +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14) +#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14 + u8 __tx_dest; + u8 __agg_val8_th; +#endif + u32 __sq_base_addr_lo; + u32 __sq_base_addr_hi; + u32 __xfrq_base_addr_lo; + u32 __xfrq_base_addr_hi; +#if defined(__BIG_ENDIAN) + u16 __xfrq_cons; + u16 __xfrq_prod; +#elif defined(__LITTLE_ENDIAN) + u16 __xfrq_prod; + u16 __xfrq_cons; +#endif +#if defined(__BIG_ENDIAN) + u8 __tcp_agg_vars5; + u8 __tcp_agg_vars4; + u8 __tcp_agg_vars3; + u8 __reserved_force_pure_ack_cnt; +#elif defined(__LITTLE_ENDIAN) + u8 __reserved_force_pure_ack_cnt; + u8 __tcp_agg_vars3; + u8 __tcp_agg_vars4; + u8 __tcp_agg_vars5; +#endif + u32 __tcp_agg_vars6; +#if defined(__BIG_ENDIAN) + u16 __xfrqe_mng; + u16 __tcp_agg_vars7; +#elif defined(__LITTLE_ENDIAN) + u16 __tcp_agg_vars7; + u16 __xfrqe_mng; +#endif + u32 __xfrqe_data0; + u32 __agg_val10_th; +#if defined(__BIG_ENDIAN) + u16 __reserved3; + u8 __reserved2; + u8 __da_only_cnt; +#elif defined(__LITTLE_ENDIAN) + u8 __da_only_cnt; + u8 __reserved2; + u16 __reserved3; +#endif +}; + +/* + * The fcoe aggregative context of Xstorm + */ +struct xstorm_fcoe_ag_context { +#if defined(__BIG_ENDIAN) + u16 agg_val1; + u8 agg_vars1; +#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51 (0x1<<2) +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51_SHIFT 2 +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52 (0x1<<3) +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52_SHIFT 3 +#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) +#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 +#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN (0x1<<5) +#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN_SHIFT 5 +#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) +#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN (0x1<<7) +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN_SHIFT 7 + u8 __state; +#elif defined(__LITTLE_ENDIAN) + u8 __state; + u8 agg_vars1; +#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51 (0x1<<2) +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51_SHIFT 2 +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52 (0x1<<3) +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52_SHIFT 3 +#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) +#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 +#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN (0x1<<5) +#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN_SHIFT 5 +#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) +#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN (0x1<<7) +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN_SHIFT 7 + u16 agg_val1; +#endif +#if defined(__BIG_ENDIAN) + u8 cdu_reserved; + u8 __agg_vars4; + u8 agg_vars3; +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 +#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF (0x3<<6) +#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF_SHIFT 6 + u8 agg_vars2; +#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF (0x3<<0) +#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_SHIFT 0 +#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) +#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 +#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG (0x1<<3) +#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG_SHIFT 3 +#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG (0x1<<4) +#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG_SHIFT 4 +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1 (0x3<<5) +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1_SHIFT 5 +#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7) +#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7 +#elif defined(__LITTLE_ENDIAN) + u8 agg_vars2; +#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF (0x3<<0) +#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_SHIFT 0 +#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) +#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 +#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG (0x1<<3) +#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG_SHIFT 3 +#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG (0x1<<4) +#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG_SHIFT 4 +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1 (0x3<<5) +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1_SHIFT 5 +#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7) +#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7 + u8 agg_vars3; +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 +#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF (0x3<<6) +#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF_SHIFT 6 + u8 __agg_vars4; + u8 cdu_reserved; +#endif + u32 more_to_send; +#if defined(__BIG_ENDIAN) + u16 agg_vars5; +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5 (0x3<<0) +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5_SHIFT 0 +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 +#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE (0x3<<14) +#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE_SHIFT 14 + u16 sq_cons; +#elif defined(__LITTLE_ENDIAN) + u16 sq_cons; + u16 agg_vars5; +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5 (0x3<<0) +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5_SHIFT 0 +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) +#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 +#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE (0x3<<14) +#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE_SHIFT 14 +#endif + struct xstorm_fcoe_extra_ag_context_section __extra_section; +#if defined(__BIG_ENDIAN) + u16 agg_vars7; +#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) +#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 +#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG (0x1<<3) +#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG_SHIFT 3 +#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF (0x3<<4) +#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF_SHIFT 4 +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3 (0x3<<6) +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3_SHIFT 6 +#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF (0x3<<8) +#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF_SHIFT 8 +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62 (0x1<<10) +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62_SHIFT 10 +#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<11) +#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 +#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG (0x1<<12) +#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG_SHIFT 12 +#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG (0x1<<13) +#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG_SHIFT 13 +#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG (0x1<<14) +#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG_SHIFT 14 +#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG (0x1<<15) +#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG_SHIFT 15 + u8 agg_val3_th; + u8 agg_vars6; +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6 (0x7<<0) +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6_SHIFT 0 +#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE (0x7<<3) +#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE_SHIFT 3 +#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE (0x3<<6) +#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE_SHIFT 6 +#elif defined(__LITTLE_ENDIAN) + u8 agg_vars6; +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6 (0x7<<0) +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6_SHIFT 0 +#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE (0x7<<3) +#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE_SHIFT 3 +#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE (0x3<<6) +#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE_SHIFT 6 + u8 agg_val3_th; + u16 agg_vars7; +#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) +#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 +#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG (0x1<<3) +#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG_SHIFT 3 +#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF (0x3<<4) +#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF_SHIFT 4 +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3 (0x3<<6) +#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3_SHIFT 6 +#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF (0x3<<8) +#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF_SHIFT 8 +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62 (0x1<<10) +#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62_SHIFT 10 +#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<11) +#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 +#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG (0x1<<12) +#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG_SHIFT 12 +#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG (0x1<<13) +#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG_SHIFT 13 +#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG (0x1<<14) +#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG_SHIFT 14 +#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG (0x1<<15) +#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG_SHIFT 15 +#endif +#if defined(__BIG_ENDIAN) + u16 __agg_val11_th; + u16 __agg_val11; +#elif defined(__LITTLE_ENDIAN) + u16 __agg_val11; + u16 __agg_val11_th; +#endif +#if defined(__BIG_ENDIAN) + u8 __reserved1; + u8 __agg_val6_th; + u16 __agg_val9; +#elif defined(__LITTLE_ENDIAN) + u16 __agg_val9; + u8 __agg_val6_th; + u8 __reserved1; +#endif +#if defined(__BIG_ENDIAN) + u16 confq_cons; + u16 confq_prod; +#elif defined(__LITTLE_ENDIAN) + u16 confq_prod; + u16 confq_cons; +#endif + u32 agg_vars8; +#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0) +#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC2_SHIFT 0 +#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3 (0xFF<<24) +#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3_SHIFT 24 +#if defined(__BIG_ENDIAN) + u16 __cache_wqe_db; + u16 sq_prod; +#elif defined(__LITTLE_ENDIAN) + u16 sq_prod; + u16 __cache_wqe_db; +#endif +#if defined(__BIG_ENDIAN) + u8 agg_val3; + u8 agg_val6; + u8 agg_val5_th; + u8 agg_val5; +#elif defined(__LITTLE_ENDIAN) + u8 agg_val5; + u8 agg_val5_th; + u8 agg_val6; + u8 agg_val3; +#endif +#if defined(__BIG_ENDIAN) + u16 __agg_misc1; + u16 agg_limit1; +#elif defined(__LITTLE_ENDIAN) + u16 agg_limit1; + u16 __agg_misc1; +#endif + u32 completion_seq; + u32 confq_pbl_base_lo; + u32 confq_pbl_base_hi; +}; + + + +/* + * The tcp aggregative context section of Xstorm + */ +struct xstorm_tcp_tcp_ag_context_section { +#if defined(__BIG_ENDIAN) + u8 tcp_agg_vars1; +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF (0x3<<0) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF_SHIFT 0 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2 +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4) +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN (0x1<<6) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN_SHIFT 6 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG (0x1<<7) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG_SHIFT 7 + u8 __da_cnt; + u16 mss; +#elif defined(__LITTLE_ENDIAN) + u16 mss; + u8 __da_cnt; + u8 tcp_agg_vars1; +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF (0x3<<0) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF_SHIFT 0 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2 +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4) +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN (0x1<<6) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN_SHIFT 6 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG (0x1<<7) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG_SHIFT 7 +#endif + u32 snd_nxt; + u32 tx_wnd; + u32 snd_una; + u32 local_adv_wnd; +#if defined(__BIG_ENDIAN) + u8 __agg_val8_th; + u8 __tx_dest; + u16 tcp_agg_vars2; +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0) +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4 +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5) +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6 +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7) +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 tcp_agg_vars2; +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0) +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4 +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5) +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6 +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7) +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14 + u8 __tx_dest; + u8 __agg_val8_th; +#endif + u32 ack_to_far_end; + u32 rto_timer; + u32 ka_timer; + u32 ts_to_echo; +#if defined(__BIG_ENDIAN) + u16 __agg_val7_th; + u16 __agg_val7; +#elif defined(__LITTLE_ENDIAN) + u16 __agg_val7; + u16 __agg_val7_th; +#endif +#if defined(__BIG_ENDIAN) + u8 __tcp_agg_vars5; + u8 __tcp_agg_vars4; + u8 __tcp_agg_vars3; + u8 __force_pure_ack_cnt; +#elif defined(__LITTLE_ENDIAN) + u8 __force_pure_ack_cnt; + u8 __tcp_agg_vars3; + u8 __tcp_agg_vars4; + u8 __tcp_agg_vars5; +#endif + u32 tcp_agg_vars6; +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN (0x1<<0) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN_SHIFT 0 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_EN (0x1<<1) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_EN_SHIFT 1 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN (0x1<<2) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN_SHIFT 2 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<3) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 3 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG (0x1<<4) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG_SHIFT 4 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG (0x1<<5) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG_SHIFT 5 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF (0x3<<6) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF_SHIFT 6 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF (0x3<<8) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_SHIFT 8 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF (0x3<<10) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_SHIFT 10 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF (0x3<<12) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_SHIFT 12 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF (0x3<<14) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_SHIFT 14 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF (0x3<<16) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF_SHIFT 16 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF (0x3<<18) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF_SHIFT 18 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF (0x3<<20) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF_SHIFT 20 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF (0x3<<22) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF_SHIFT 22 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF (0x3<<24) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF_SHIFT 24 +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG (0x1<<26) +#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG_SHIFT 26 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71 (0x1<<27) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71_SHIFT 27 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY (0x1<<28) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY_SHIFT 28 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG (0x1<<29) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG_SHIFT 29 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG (0x1<<30) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG_SHIFT 30 +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG (0x1<<31) +#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG_SHIFT 31 +#if defined(__BIG_ENDIAN) + u16 __agg_misc6; + u16 __tcp_agg_vars7; +#elif defined(__LITTLE_ENDIAN) + u16 __tcp_agg_vars7; + u16 __agg_misc6; +#endif + u32 __agg_val10; + u32 __agg_val10_th; +#if defined(__BIG_ENDIAN) + u16 __reserved3; + u8 __reserved2; + u8 __da_only_cnt; +#elif defined(__LITTLE_ENDIAN) + u8 __da_only_cnt; + u8 __reserved2; + u16 __reserved3; +#endif +}; + +/* + * The iscsi aggregative context of Xstorm + */ +struct xstorm_iscsi_ag_context { +#if defined(__BIG_ENDIAN) + u16 agg_val1; + u8 agg_vars1; +#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) +#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 +#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5) +#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5 +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 +#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7) +#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7 + u8 state; +#elif defined(__LITTLE_ENDIAN) + u8 state; + u8 agg_vars1; +#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) +#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 +#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5) +#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5 +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 +#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7) +#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7 + u16 agg_val1; +#endif +#if defined(__BIG_ENDIAN) + u8 cdu_reserved; + u8 __agg_vars4; + u8 agg_vars3; +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 +#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6) +#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6 + u8 agg_vars2; +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0) +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0 +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4 +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5 +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7) +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7 +#elif defined(__LITTLE_ENDIAN) + u8 agg_vars2; +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0) +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0 +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4 +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5 +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7) +#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7 + u8 agg_vars3; +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 +#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6) +#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6 + u8 __agg_vars4; + u8 cdu_reserved; +#endif + u32 more_to_send; +#if defined(__BIG_ENDIAN) + u16 agg_vars5; +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0 +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14 + u16 sq_cons; +#elif defined(__LITTLE_ENDIAN) + u16 sq_cons; + u16 agg_vars5; +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0 +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) +#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14 +#endif + struct xstorm_tcp_tcp_ag_context_section tcp; +#if defined(__BIG_ENDIAN) + u16 agg_vars7; +#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) +#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3 +#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4) +#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4 +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6 +#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8) +#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8 +#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10) +#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14 +#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15) +#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15 + u8 agg_val3_th; + u8 agg_vars6; +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0 +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3 +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6 +#elif defined(__LITTLE_ENDIAN) + u8 agg_vars6; +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0 +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3 +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6 + u8 agg_val3_th; + u16 agg_vars7; +#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) +#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3 +#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4) +#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4 +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6) +#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6 +#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8) +#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8 +#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10) +#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13 +#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14) +#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14 +#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15) +#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15 +#endif +#if defined(__BIG_ENDIAN) + u16 __agg_val11_th; + u16 __gen_data; +#elif defined(__LITTLE_ENDIAN) + u16 __gen_data; + u16 __agg_val11_th; +#endif +#if defined(__BIG_ENDIAN) + u8 __reserved1; + u8 __agg_val6_th; + u16 __agg_val9; +#elif defined(__LITTLE_ENDIAN) + u16 __agg_val9; + u8 __agg_val6_th; + u8 __reserved1; +#endif +#if defined(__BIG_ENDIAN) + u16 hq_prod; + u16 hq_cons; +#elif defined(__LITTLE_ENDIAN) + u16 hq_cons; + u16 hq_prod; +#endif + u32 agg_vars8; +#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0) +#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2_SHIFT 0 +#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3 (0xFF<<24) +#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3_SHIFT 24 +#if defined(__BIG_ENDIAN) + u16 r2tq_prod; + u16 sq_prod; +#elif defined(__LITTLE_ENDIAN) + u16 sq_prod; + u16 r2tq_prod; +#endif +#if defined(__BIG_ENDIAN) + u8 agg_val3; + u8 agg_val6; + u8 agg_val5_th; + u8 agg_val5; +#elif defined(__LITTLE_ENDIAN) + u8 agg_val5; + u8 agg_val5_th; + u8 agg_val6; + u8 agg_val3; +#endif +#if defined(__BIG_ENDIAN) + u16 __agg_misc1; + u16 agg_limit1; +#elif defined(__LITTLE_ENDIAN) + u16 agg_limit1; + u16 __agg_misc1; +#endif + u32 hq_cons_tcp_seq; + u32 exp_stat_sn; + u32 rst_seq_num; +}; + + +/* + * The L5cm aggregative context of XStorm + */ +struct xstorm_l5cm_ag_context { +#if defined(__BIG_ENDIAN) + u16 agg_val1; + u8 agg_vars1; +#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) +#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 +#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN (0x1<<5) +#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN_SHIFT 5 +#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) +#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 +#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7) +#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7 + u8 state; +#elif defined(__LITTLE_ENDIAN) + u8 state; + u8 agg_vars1; +#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0) +#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0 +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1) +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1 +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2) +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) +#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 +#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4) +#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4 +#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN (0x1<<5) +#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN_SHIFT 5 +#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6) +#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6 +#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7) +#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7 + u16 agg_val1; +#endif +#if defined(__BIG_ENDIAN) + u8 cdu_reserved; + u8 __agg_vars4; + u8 agg_vars3; +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 +#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF (0x3<<6) +#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6 + u8 agg_vars2; +#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF (0x3<<0) +#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_SHIFT 0 +#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) +#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 +#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG (0x1<<3) +#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG_SHIFT 3 +#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG (0x1<<4) +#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG_SHIFT 4 +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1 (0x3<<5) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1_SHIFT 5 +#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN (0x1<<7) +#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN_SHIFT 7 +#elif defined(__LITTLE_ENDIAN) + u8 agg_vars2; +#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF (0x3<<0) +#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_SHIFT 0 +#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2) +#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2 +#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG (0x1<<3) +#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG_SHIFT 3 +#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG (0x1<<4) +#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG_SHIFT 4 +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1 (0x3<<5) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1_SHIFT 5 +#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN (0x1<<7) +#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN_SHIFT 7 + u8 agg_vars3; +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 +#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF (0x3<<6) +#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6 + u8 __agg_vars4; + u8 cdu_reserved; +#endif + u32 more_to_send; +#if defined(__BIG_ENDIAN) + u16 agg_vars5; +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5 (0x3<<0) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5_SHIFT 0 +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2 (0x3<<14) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2_SHIFT 14 + u16 agg_val4_th; +#elif defined(__LITTLE_ENDIAN) + u16 agg_val4_th; + u16 agg_vars5; +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5 (0x3<<0) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5_SHIFT 0 +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2) +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2 +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8) +#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8 +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2 (0x3<<14) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2_SHIFT 14 +#endif + struct xstorm_tcp_tcp_ag_context_section tcp; +#if defined(__BIG_ENDIAN) + u16 agg_vars7; +#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) +#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 +#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG (0x1<<3) +#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG_SHIFT 3 +#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4) +#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4 +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3 (0x3<<6) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3_SHIFT 6 +#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF (0x3<<8) +#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF_SHIFT 8 +#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10) +#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10 +#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN (0x1<<11) +#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 +#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG (0x1<<12) +#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG_SHIFT 12 +#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG (0x1<<13) +#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG_SHIFT 13 +#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG (0x1<<14) +#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG_SHIFT 14 +#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15) +#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15 + u8 agg_val3_th; + u8 agg_vars6; +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6 (0x7<<0) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6_SHIFT 0 +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7 (0x7<<3) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7_SHIFT 3 +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4 (0x3<<6) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4_SHIFT 6 +#elif defined(__LITTLE_ENDIAN) + u8 agg_vars6; +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6 (0x7<<0) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6_SHIFT 0 +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7 (0x7<<3) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7_SHIFT 3 +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4 (0x3<<6) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4_SHIFT 6 + u8 agg_val3_th; + u16 agg_vars7; +#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0) +#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 +#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG (0x1<<3) +#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG_SHIFT 3 +#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4) +#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4 +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3 (0x3<<6) +#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3_SHIFT 6 +#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF (0x3<<8) +#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF_SHIFT 8 +#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10) +#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10 +#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN (0x1<<11) +#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN_SHIFT 11 +#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG (0x1<<12) +#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG_SHIFT 12 +#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG (0x1<<13) +#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG_SHIFT 13 +#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG (0x1<<14) +#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG_SHIFT 14 +#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15) +#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15 +#endif +#if defined(__BIG_ENDIAN) + u16 __agg_val11_th; + u16 __gen_data; +#elif defined(__LITTLE_ENDIAN) + u16 __gen_data; + u16 __agg_val11_th; +#endif +#if defined(__BIG_ENDIAN) + u8 __reserved1; + u8 __agg_val6_th; + u16 __agg_val9; +#elif defined(__LITTLE_ENDIAN) + u16 __agg_val9; + u8 __agg_val6_th; + u8 __reserved1; +#endif +#if defined(__BIG_ENDIAN) + u16 agg_val2_th; + u16 agg_val2; +#elif defined(__LITTLE_ENDIAN) + u16 agg_val2; + u16 agg_val2_th; +#endif + u32 agg_vars8; +#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0) +#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC2_SHIFT 0 +#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC3 (0xFF<<24) +#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC3_SHIFT 24 +#if defined(__BIG_ENDIAN) + u16 agg_misc0; + u16 agg_val4; +#elif defined(__LITTLE_ENDIAN) + u16 agg_val4; + u16 agg_misc0; +#endif +#if defined(__BIG_ENDIAN) + u8 agg_val3; + u8 agg_val6; + u8 agg_val5_th; + u8 agg_val5; +#elif defined(__LITTLE_ENDIAN) + u8 agg_val5; + u8 agg_val5_th; + u8 agg_val6; + u8 agg_val3; +#endif +#if defined(__BIG_ENDIAN) + u16 __agg_misc1; + u16 agg_limit1; +#elif defined(__LITTLE_ENDIAN) + u16 agg_limit1; + u16 __agg_misc1; +#endif + u32 completion_seq; + u32 agg_misc4; + u32 rst_seq_num; +}; + +/* + * ABTS info $$KEEP_ENDIANNESS$$ + */ +struct fcoe_abts_info { + __le16 aborted_task_id; + __le16 reserved0; + __le32 reserved1; +}; + + +/* + * Fixed size structure in order to plant it in Union structure + * $$KEEP_ENDIANNESS$$ + */ +struct fcoe_abts_rsp_union { + u8 r_ctl; + u8 rsrv[3]; + __le32 abts_rsp_payload[7]; +}; + + +/* + * 4 regs size $$KEEP_ENDIANNESS$$ + */ +struct fcoe_bd_ctx { + __le32 buf_addr_hi; + __le32 buf_addr_lo; + __le16 buf_len; + __le16 rsrv0; + __le16 flags; + __le16 rsrv1; +}; + + +/* + * FCoE cached sges context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_cached_sge_ctx { + struct regpair cur_buf_addr; + __le16 cur_buf_rem; + __le16 second_buf_rem; + struct regpair second_buf_addr; +}; + + +/* + * Cleanup info $$KEEP_ENDIANNESS$$ + */ +struct fcoe_cleanup_info { + __le16 cleaned_task_id; + __le16 rolled_tx_seq_cnt; + __le32 rolled_tx_data_offset; +}; + + +/* + * Fcp RSP flags $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_rsp_flags { + u8 flags; +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0) +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0 +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1) +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2) +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3) +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3 +#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4) +#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4 +#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5) +#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5 +}; + +/* + * Fcp RSP payload $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_rsp_payload { + struct regpair reserved0; + __le32 fcp_resid; + u8 scsi_status_code; + struct fcoe_fcp_rsp_flags fcp_flags; + __le16 retry_delay_timer; + __le32 fcp_rsp_len; + __le32 fcp_sns_len; +}; + +/* + * Fixed size structure in order to plant it in Union structure + * $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_rsp_union { + struct fcoe_fcp_rsp_payload payload; + struct regpair reserved0; +}; + +/* + * FC header $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fc_hdr { + u8 s_id[3]; + u8 cs_ctl; + u8 d_id[3]; + u8 r_ctl; + __le16 seq_cnt; + u8 df_ctl; + u8 seq_id; + u8 f_ctl[3]; + u8 type; + __le32 parameters; + __le16 rx_id; + __le16 ox_id; +}; + +/* + * FC header union $$KEEP_ENDIANNESS$$ + */ +struct fcoe_mp_rsp_union { + struct fcoe_fc_hdr fc_hdr; + __le32 mp_payload_len; + __le32 rsrv; +}; + +/* + * Completion information $$KEEP_ENDIANNESS$$ + */ +union fcoe_comp_flow_info { + struct fcoe_fcp_rsp_union fcp_rsp; + struct fcoe_abts_rsp_union abts_rsp; + struct fcoe_mp_rsp_union mp_rsp; + __le32 opaque[8]; +}; + + +/* + * External ABTS info $$KEEP_ENDIANNESS$$ + */ +struct fcoe_ext_abts_info { + __le32 rsrv0[6]; + struct fcoe_abts_info ctx; +}; + + +/* + * External cleanup info $$KEEP_ENDIANNESS$$ + */ +struct fcoe_ext_cleanup_info { + __le32 rsrv0[6]; + struct fcoe_cleanup_info ctx; +}; + + +/* + * Fcoe FW Tx sequence context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fw_tx_seq_ctx { + __le32 data_offset; + __le16 seq_cnt; + __le16 rsrv0; +}; + +/* + * Fcoe external FW Tx sequence context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_ext_fw_tx_seq_ctx { + __le32 rsrv0[6]; + struct fcoe_fw_tx_seq_ctx ctx; +}; + + +/* + * FCoE multiple sges context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_mul_sges_ctx { + struct regpair cur_sge_addr; + __le16 cur_sge_off; + u8 cur_sge_idx; + u8 sgl_size; +}; + +/* + * FCoE external multiple sges context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_ext_mul_sges_ctx { + struct fcoe_mul_sges_ctx mul_sgl; + struct regpair rsrv0; +}; + + +/* + * FCP CMD payload $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_cmd_payload { + __le32 opaque[8]; +}; + + + + + +/* + * Fcp xfr rdy payload $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_xfr_rdy_payload { + __le32 burst_len; + __le32 data_ro; +}; + + +/* + * FC frame $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fc_frame { + struct fcoe_fc_hdr fc_hdr; + __le32 reserved0[2]; +}; + + + + +/* + * FCoE KCQ CQE parameters $$KEEP_ENDIANNESS$$ + */ +union fcoe_kcqe_params { + __le32 reserved0[4]; +}; + +/* + * FCoE KCQ CQE $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kcqe { + __le32 fcoe_conn_id; + __le32 completion_status; + __le32 fcoe_conn_context_id; + union fcoe_kcqe_params params; + __le16 qe_self_seq; + u8 op_code; + u8 flags; +#define FCOE_KCQE_RESERVED0 (0x7<<0) +#define FCOE_KCQE_RESERVED0_SHIFT 0 +#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3) +#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3 +#define FCOE_KCQE_LAYER_CODE (0x7<<4) +#define FCOE_KCQE_LAYER_CODE_SHIFT 4 +#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7) +#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7 +}; + + + +/* + * FCoE KWQE header $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_header { + u8 op_code; + u8 flags; +#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0) +#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0 +#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4) +#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4 +#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7) +#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7 +}; + +/* + * FCoE firmware init request 1 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_init1 { + __le16 num_tasks; + struct fcoe_kwqe_header hdr; + __le32 task_list_pbl_addr_lo; + __le32 task_list_pbl_addr_hi; + __le32 dummy_buffer_addr_lo; + __le32 dummy_buffer_addr_hi; + __le16 sq_num_wqes; + __le16 rq_num_wqes; + __le16 rq_buffer_log_size; + __le16 cq_num_wqes; + __le16 mtu; + u8 num_sessions_log; + u8 flags; +#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0) +#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0 +#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4) +#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4 +#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7) +#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7 +}; + +/* + * FCoE firmware init request 2 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_init2 { + u8 hsi_major_version; + u8 hsi_minor_version; + struct fcoe_kwqe_header hdr; + __le32 hash_tbl_pbl_addr_lo; + __le32 hash_tbl_pbl_addr_hi; + __le32 t2_hash_tbl_addr_lo; + __le32 t2_hash_tbl_addr_hi; + __le32 t2_ptr_hash_tbl_addr_lo; + __le32 t2_ptr_hash_tbl_addr_hi; + __le32 free_list_count; +}; + +/* + * FCoE firmware init request 3 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_init3 { + __le16 reserved0; + struct fcoe_kwqe_header hdr; + __le32 error_bit_map_lo; + __le32 error_bit_map_hi; + u8 perf_config; + u8 reserved21[3]; + __le32 reserved2[4]; +}; + +/* + * FCoE connection offload request 1 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_offload1 { + __le16 fcoe_conn_id; + struct fcoe_kwqe_header hdr; + __le32 sq_addr_lo; + __le32 sq_addr_hi; + __le32 rq_pbl_addr_lo; + __le32 rq_pbl_addr_hi; + __le32 rq_first_pbe_addr_lo; + __le32 rq_first_pbe_addr_hi; + __le16 rq_prod; + __le16 reserved0; +}; + +/* + * FCoE connection offload request 2 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_offload2 { + __le16 tx_max_fc_pay_len; + struct fcoe_kwqe_header hdr; + __le32 cq_addr_lo; + __le32 cq_addr_hi; + __le32 xferq_addr_lo; + __le32 xferq_addr_hi; + __le32 conn_db_addr_lo; + __le32 conn_db_addr_hi; + __le32 reserved1; +}; + +/* + * FCoE connection offload request 3 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_offload3 { + __le16 vlan_tag; +#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0) +#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0 +#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12) +#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12 +#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13) +#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13 + struct fcoe_kwqe_header hdr; + u8 s_id[3]; + u8 tx_max_conc_seqs_c3; + u8 d_id[3]; + u8 flags; +#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0) +#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0 +#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1) +#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1 +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2) +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2 +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3) +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3 +#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4) +#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4 +#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5) +#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5 +#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6) +#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6 +#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7) +#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7 + __le32 reserved; + __le32 confq_first_pbe_addr_lo; + __le32 confq_first_pbe_addr_hi; + __le16 tx_total_conc_seqs; + __le16 rx_max_fc_pay_len; + __le16 rx_total_conc_seqs; + u8 rx_max_conc_seqs_c3; + u8 rx_open_seqs_exch_c3; +}; + +/* + * FCoE connection offload request 4 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_offload4 { + u8 e_d_tov_timer_val; + u8 reserved2; + struct fcoe_kwqe_header hdr; + u8 src_mac_addr_lo[2]; + u8 src_mac_addr_mid[2]; + u8 src_mac_addr_hi[2]; + u8 dst_mac_addr_hi[2]; + u8 dst_mac_addr_lo[2]; + u8 dst_mac_addr_mid[2]; + __le32 lcq_addr_lo; + __le32 lcq_addr_hi; + __le32 confq_pbl_base_addr_lo; + __le32 confq_pbl_base_addr_hi; +}; + +/* + * FCoE connection enable request $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_enable_disable { + __le16 reserved0; + struct fcoe_kwqe_header hdr; + u8 src_mac_addr_lo[2]; + u8 src_mac_addr_mid[2]; + u8 src_mac_addr_hi[2]; + u16 vlan_tag; +#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0 +#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12 +#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13 + u8 dst_mac_addr_lo[2]; + u8 dst_mac_addr_mid[2]; + u8 dst_mac_addr_hi[2]; + __le16 reserved1; + u8 s_id[3]; + u8 vlan_flag; + u8 d_id[3]; + u8 reserved3; + __le32 context_id; + __le32 conn_id; + __le32 reserved4; +}; + +/* + * FCoE connection destroy request $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_destroy { + __le16 reserved0; + struct fcoe_kwqe_header hdr; + __le32 context_id; + __le32 conn_id; + __le32 reserved1[5]; +}; + +/* + * FCoe destroy request $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_destroy { + __le16 reserved0; + struct fcoe_kwqe_header hdr; + __le32 reserved1[7]; +}; + +/* + * FCoe statistics request $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_stat { + __le16 reserved0; + struct fcoe_kwqe_header hdr; + __le32 stat_params_addr_lo; + __le32 stat_params_addr_hi; + __le32 reserved1[5]; +}; + +/* + * FCoE KWQ WQE $$KEEP_ENDIANNESS$$ + */ +union fcoe_kwqe { + struct fcoe_kwqe_init1 init1; + struct fcoe_kwqe_init2 init2; + struct fcoe_kwqe_init3 init3; + struct fcoe_kwqe_conn_offload1 conn_offload1; + struct fcoe_kwqe_conn_offload2 conn_offload2; + struct fcoe_kwqe_conn_offload3 conn_offload3; + struct fcoe_kwqe_conn_offload4 conn_offload4; + struct fcoe_kwqe_conn_enable_disable conn_enable_disable; + struct fcoe_kwqe_conn_destroy conn_destroy; + struct fcoe_kwqe_destroy destroy; + struct fcoe_kwqe_stat statistics; +}; + + + + + + + + + + + + + + + + +/* + * TX SGL context $$KEEP_ENDIANNESS$$ + */ +union fcoe_sgl_union_ctx { + struct fcoe_cached_sge_ctx cached_sge; + struct fcoe_ext_mul_sges_ctx sgl; + __le32 opaque[5]; +}; + +/* + * Data-In/ELS/BLS information $$KEEP_ENDIANNESS$$ + */ +struct fcoe_read_flow_info { + union fcoe_sgl_union_ctx sgl_ctx; + __le32 rsrv0[3]; +}; + + +/* + * Fcoe stat context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_s_stat_ctx { + u8 flags; +#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0) +#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0 +#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1) +#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1 +#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2) +#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2 +#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3) +#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3 +#define FCOE_S_STAT_CTX_P_RJT (0x1<<4) +#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4 +#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5) +#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5 +#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6) +#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6 +}; + +/* + * Fcoe rx seq context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_rx_seq_ctx { + u8 seq_id; + struct fcoe_s_stat_ctx s_stat; + __le16 seq_cnt; + __le32 low_exp_ro; + __le32 high_exp_ro; +}; + + +/* + * Fcoe rx_wr union context $$KEEP_ENDIANNESS$$ + */ +union fcoe_rx_wr_union_ctx { + struct fcoe_read_flow_info read_info; + union fcoe_comp_flow_info comp_info; + __le32 opaque[8]; +}; + + + +/* + * FCoE SQ element $$KEEP_ENDIANNESS$$ + */ +struct fcoe_sqe { + __le16 wqe; +#define FCOE_SQE_TASK_ID (0x7FFF<<0) +#define FCOE_SQE_TASK_ID_SHIFT 0 +#define FCOE_SQE_TOGGLE_BIT (0x1<<15) +#define FCOE_SQE_TOGGLE_BIT_SHIFT 15 +}; + + + +/* + * 14 regs $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_tx_only { + union fcoe_sgl_union_ctx sgl_ctx; + __le32 rsrv0; +}; + +/* + * 32 bytes (8 regs) used for TX only purposes $$KEEP_ENDIANNESS$$ + */ +union fcoe_tx_wr_rx_rd_union_ctx { + struct fcoe_fc_frame tx_frame; + struct fcoe_fcp_cmd_payload fcp_cmd; + struct fcoe_ext_cleanup_info cleanup; + struct fcoe_ext_abts_info abts; + struct fcoe_ext_fw_tx_seq_ctx tx_seq; + __le32 opaque[8]; +}; + +/* + * tce_tx_wr_rx_rd_const $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_tx_wr_rx_rd_const { + u8 init_flags; +#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE (0x7<<0) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT 0 +#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE (0x1<<3) +#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT 3 +#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE (0x1<<4) +#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT 4 +#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE (0x3<<5) +#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT 5 +#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV (0x1<<7) +#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV_SHIFT 7 + u8 tx_flags; +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID (0x1<<0) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID_SHIFT 0 +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE (0xF<<1) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT 1 +#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1 (0x1<<5) +#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1_SHIFT 5 +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT (0x1<<6) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT_SHIFT 6 +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_COMP_TRNS (0x1<<7) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_COMP_TRNS_SHIFT 7 + __le16 rsrv3; + __le32 verify_tx_seq; +}; + +/* + * tce_tx_wr_rx_rd $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_tx_wr_rx_rd { + union fcoe_tx_wr_rx_rd_union_ctx union_ctx; + struct fcoe_tce_tx_wr_rx_rd_const const_ctx; +}; + +/* + * tce_rx_wr_tx_rd_const $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_rx_wr_tx_rd_const { + __le32 data_2_trns; + __le32 init_flags; +#define FCOE_TCE_RX_WR_TX_RD_CONST_CID (0xFFFFFF<<0) +#define FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT 0 +#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0 (0xFF<<24) +#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0_SHIFT 24 +}; + +/* + * tce_rx_wr_tx_rd_var $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_rx_wr_tx_rd_var { + __le16 rx_flags; +#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1 (0xF<<0) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1_SHIFT 0 +#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE (0x7<<4) +#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT 4 +#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ (0x1<<7) +#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ_SHIFT 7 +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE (0xF<<8) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT 8 +#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME (0x1<<12) +#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT 12 +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT (0x1<<13) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT_SHIFT 13 +#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2 (0x1<<14) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2_SHIFT 14 +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID (0x1<<15) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID_SHIFT 15 + __le16 rx_id; + struct fcoe_fcp_xfr_rdy_payload fcp_xfr_rdy; +}; + +/* + * tce_rx_wr_tx_rd $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_rx_wr_tx_rd { + struct fcoe_tce_rx_wr_tx_rd_const const_ctx; + struct fcoe_tce_rx_wr_tx_rd_var var_ctx; +}; + +/* + * tce_rx_only $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_rx_only { + struct fcoe_rx_seq_ctx rx_seq_ctx; + union fcoe_rx_wr_union_ctx union_ctx; +}; + +/* + * task_ctx_entry $$KEEP_ENDIANNESS$$ + */ +struct fcoe_task_ctx_entry { + struct fcoe_tce_tx_only txwr_only; + struct fcoe_tce_tx_wr_rx_rd txwr_rxrd; + struct fcoe_tce_rx_wr_tx_rd rxwr_txrd; + struct fcoe_tce_rx_only rxwr_only; +}; + + + + + + + + + + +/* + * FCoE XFRQ element $$KEEP_ENDIANNESS$$ + */ +struct fcoe_xfrqe { + __le16 wqe; +#define FCOE_XFRQE_TASK_ID (0x7FFF<<0) +#define FCOE_XFRQE_TASK_ID_SHIFT 0 +#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15) +#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15 +}; + + +/* + * Cached SGEs $$KEEP_ENDIANNESS$$ + */ +struct common_fcoe_sgl { + struct fcoe_bd_ctx sge[3]; +}; + + +/* + * FCoE SQ\XFRQ element + */ +struct fcoe_cached_wqe { + struct fcoe_sqe sqe; + struct fcoe_xfrqe xfrqe; +}; + + +/* + * FCoE connection enable\disable params passed by driver to FW in FCoE enable + * ramrod $$KEEP_ENDIANNESS$$ + */ +struct fcoe_conn_enable_disable_ramrod_params { + struct fcoe_kwqe_conn_enable_disable enable_disable_kwqe; +}; + + +/* + * FCoE connection offload params passed by driver to FW in FCoE offload ramrod + * $$KEEP_ENDIANNESS$$ + */ +struct fcoe_conn_offload_ramrod_params { + struct fcoe_kwqe_conn_offload1 offload_kwqe1; + struct fcoe_kwqe_conn_offload2 offload_kwqe2; + struct fcoe_kwqe_conn_offload3 offload_kwqe3; + struct fcoe_kwqe_conn_offload4 offload_kwqe4; +}; + + +struct ustorm_fcoe_mng_ctx { +#if defined(__BIG_ENDIAN) + u8 mid_seq_proc_flag; + u8 tce_in_cam_flag; + u8 tce_on_ior_flag; + u8 en_cached_tce_flag; +#elif defined(__LITTLE_ENDIAN) + u8 en_cached_tce_flag; + u8 tce_on_ior_flag; + u8 tce_in_cam_flag; + u8 mid_seq_proc_flag; +#endif +#if defined(__BIG_ENDIAN) + u8 tce_cam_addr; + u8 cached_conn_flag; + u16 rsrv0; +#elif defined(__LITTLE_ENDIAN) + u16 rsrv0; + u8 cached_conn_flag; + u8 tce_cam_addr; +#endif +#if defined(__BIG_ENDIAN) + u16 dma_tce_ram_addr; + u16 tce_ram_addr; +#elif defined(__LITTLE_ENDIAN) + u16 tce_ram_addr; + u16 dma_tce_ram_addr; +#endif +#if defined(__BIG_ENDIAN) + u16 ox_id; + u16 wr_done_seq; +#elif defined(__LITTLE_ENDIAN) + u16 wr_done_seq; + u16 ox_id; +#endif + struct regpair task_addr; +}; + +/* + * Parameters initialized during offloaded according to FLOGI/PLOGI/PRLI and + * used in FCoE context section + */ +struct ustorm_fcoe_params { +#if defined(__BIG_ENDIAN) + u16 fcoe_conn_id; + u16 flags; +#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0) +#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0 +#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1) +#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1 +#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2) +#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2 +#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3) +#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3 +#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4) +#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4 +#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5) +#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5 +#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6) +#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6 +#define USTORM_FCOE_PARAMS_RSRV0 (0x1FF<<7) +#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 7 +#elif defined(__LITTLE_ENDIAN) + u16 flags; +#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0) +#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0 +#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1) +#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1 +#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2) +#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2 +#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3) +#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3 +#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4) +#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4 +#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5) +#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5 +#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6) +#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6 +#define USTORM_FCOE_PARAMS_RSRV0 (0x1FF<<7) +#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 7 + u16 fcoe_conn_id; +#endif +#if defined(__BIG_ENDIAN) + u8 hc_csdm_byte_en; + u8 func_id; + u8 port_id; + u8 vnic_id; +#elif defined(__LITTLE_ENDIAN) + u8 vnic_id; + u8 port_id; + u8 func_id; + u8 hc_csdm_byte_en; +#endif +#if defined(__BIG_ENDIAN) + u16 rx_total_conc_seqs; + u16 rx_max_fc_pay_len; +#elif defined(__LITTLE_ENDIAN) + u16 rx_max_fc_pay_len; + u16 rx_total_conc_seqs; +#endif +#if defined(__BIG_ENDIAN) + u8 task_pbe_idx_off; + u8 task_in_page_log_size; + u16 rx_max_conc_seqs; +#elif defined(__LITTLE_ENDIAN) + u16 rx_max_conc_seqs; + u8 task_in_page_log_size; + u8 task_pbe_idx_off; +#endif +}; + +/* + * FCoE 16-bits index structure + */ +struct fcoe_idx16_fields { + u16 fields; +#define FCOE_IDX16_FIELDS_IDX (0x7FFF<<0) +#define FCOE_IDX16_FIELDS_IDX_SHIFT 0 +#define FCOE_IDX16_FIELDS_MSB (0x1<<15) +#define FCOE_IDX16_FIELDS_MSB_SHIFT 15 +}; + +/* + * FCoE 16-bits index union + */ +union fcoe_idx16_field_union { + struct fcoe_idx16_fields fields; + u16 val; +}; + +/* + * Parameters required for placement according to SGL + */ +struct ustorm_fcoe_data_place_mng { +#if defined(__BIG_ENDIAN) + u16 sge_off; + u8 num_sges; + u8 sge_idx; +#elif defined(__LITTLE_ENDIAN) + u8 sge_idx; + u8 num_sges; + u16 sge_off; +#endif +}; + +/* + * Parameters required for placement according to SGL + */ +struct ustorm_fcoe_data_place { + struct ustorm_fcoe_data_place_mng cached_mng; + struct fcoe_bd_ctx cached_sge[2]; +}; + +/* + * TX processing shall write and RX processing shall read from this section + */ +union fcoe_u_tce_tx_wr_rx_rd_union { + struct fcoe_abts_info abts; + struct fcoe_cleanup_info cleanup; + struct fcoe_fw_tx_seq_ctx tx_seq_ctx; + u32 opaque[2]; +}; + +/* + * TX processing shall write and RX processing shall read from this section + */ +struct fcoe_u_tce_tx_wr_rx_rd { + union fcoe_u_tce_tx_wr_rx_rd_union union_ctx; + struct fcoe_tce_tx_wr_rx_rd_const const_ctx; +}; + +struct ustorm_fcoe_tce { + struct fcoe_u_tce_tx_wr_rx_rd txwr_rxrd; + struct fcoe_tce_rx_wr_tx_rd rxwr_txrd; + struct fcoe_tce_rx_only rxwr; +}; + +struct ustorm_fcoe_cache_ctx { + u32 rsrv0; + struct ustorm_fcoe_data_place data_place; + struct ustorm_fcoe_tce tce; +}; + +/* + * Ustorm FCoE Storm Context + */ +struct ustorm_fcoe_st_context { + struct ustorm_fcoe_mng_ctx mng_ctx; + struct ustorm_fcoe_params fcoe_params; + struct regpair cq_base_addr; + struct regpair rq_pbl_base; + struct regpair rq_cur_page_addr; + struct regpair confq_pbl_base_addr; + struct regpair conn_db_base; + struct regpair xfrq_base_addr; + struct regpair lcq_base_addr; +#if defined(__BIG_ENDIAN) + union fcoe_idx16_field_union rq_cons; + union fcoe_idx16_field_union rq_prod; +#elif defined(__LITTLE_ENDIAN) + union fcoe_idx16_field_union rq_prod; + union fcoe_idx16_field_union rq_cons; +#endif +#if defined(__BIG_ENDIAN) + u16 xfrq_prod; + u16 cq_cons; +#elif defined(__LITTLE_ENDIAN) + u16 cq_cons; + u16 xfrq_prod; +#endif +#if defined(__BIG_ENDIAN) + u16 lcq_cons; + u16 hc_cram_address; +#elif defined(__LITTLE_ENDIAN) + u16 hc_cram_address; + u16 lcq_cons; +#endif +#if defined(__BIG_ENDIAN) + u16 sq_xfrq_lcq_confq_size; + u16 confq_prod; +#elif defined(__LITTLE_ENDIAN) + u16 confq_prod; + u16 sq_xfrq_lcq_confq_size; +#endif +#if defined(__BIG_ENDIAN) + u8 hc_csdm_agg_int; + u8 rsrv2; + u8 available_rqes; + u8 sp_q_flush_cnt; +#elif defined(__LITTLE_ENDIAN) + u8 sp_q_flush_cnt; + u8 available_rqes; + u8 rsrv2; + u8 hc_csdm_agg_int; +#endif +#if defined(__BIG_ENDIAN) + u16 num_pend_tasks; + u16 pbf_ack_ram_addr; +#elif defined(__LITTLE_ENDIAN) + u16 pbf_ack_ram_addr; + u16 num_pend_tasks; +#endif + struct ustorm_fcoe_cache_ctx cache_ctx; +}; + +/* + * The FCoE non-aggregative context of Tstorm + */ +struct tstorm_fcoe_st_context { + struct regpair reserved0; + struct regpair reserved1; +}; + +/* + * Ethernet context section + */ +struct xstorm_fcoe_eth_context_section { +#if defined(__BIG_ENDIAN) + u8 remote_addr_4; + u8 remote_addr_5; + u8 local_addr_0; + u8 local_addr_1; +#elif defined(__LITTLE_ENDIAN) + u8 local_addr_1; + u8 local_addr_0; + u8 remote_addr_5; + u8 remote_addr_4; +#endif +#if defined(__BIG_ENDIAN) + u8 remote_addr_0; + u8 remote_addr_1; + u8 remote_addr_2; + u8 remote_addr_3; +#elif defined(__LITTLE_ENDIAN) + u8 remote_addr_3; + u8 remote_addr_2; + u8 remote_addr_1; + u8 remote_addr_0; +#endif +#if defined(__BIG_ENDIAN) + u16 reserved_vlan_type; + u16 params; +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0) +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0 +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12) +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12 +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13) +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13 +#elif defined(__LITTLE_ENDIAN) + u16 params; +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0) +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0 +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12) +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12 +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13) +#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13 + u16 reserved_vlan_type; +#endif +#if defined(__BIG_ENDIAN) + u8 local_addr_2; + u8 local_addr_3; + u8 local_addr_4; + u8 local_addr_5; +#elif defined(__LITTLE_ENDIAN) + u8 local_addr_5; + u8 local_addr_4; + u8 local_addr_3; + u8 local_addr_2; +#endif +}; + +/* + * Flags used in FCoE context section - 1 byte + */ +struct xstorm_fcoe_context_flags { + u8 flags; +#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q (0x3<<0) +#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q_SHIFT 0 +#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ (0x1<<2) +#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ_SHIFT 2 +#define XSTORM_FCOE_CONTEXT_FLAGS_B_BLOCK_SQ (0x1<<3) +#define XSTORM_FCOE_CONTEXT_FLAGS_B_BLOCK_SQ_SHIFT 3 +#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT (0x1<<4) +#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT_SHIFT 4 +#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE (0x1<<5) +#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE_SHIFT 5 +#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE (0x1<<6) +#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE_SHIFT 6 +#define XSTORM_FCOE_CONTEXT_FLAGS_B_VNTAG_VLAN (0x1<<7) +#define XSTORM_FCOE_CONTEXT_FLAGS_B_VNTAG_VLAN_SHIFT 7 +}; + +struct xstorm_fcoe_tce { + struct fcoe_tce_tx_only txwr; + struct fcoe_tce_tx_wr_rx_rd txwr_rxrd; +}; + +/* + * FCP_DATA parameters required for transmission + */ +struct xstorm_fcoe_fcp_data { + u32 io_rem; +#if defined(__BIG_ENDIAN) + u16 cached_sge_off; + u8 cached_num_sges; + u8 cached_sge_idx; +#elif defined(__LITTLE_ENDIAN) + u8 cached_sge_idx; + u8 cached_num_sges; + u16 cached_sge_off; +#endif + u32 buf_addr_hi_0; + u32 buf_addr_lo_0; +#if defined(__BIG_ENDIAN) + u16 num_of_pending_tasks; + u16 buf_len_0; +#elif defined(__LITTLE_ENDIAN) + u16 buf_len_0; + u16 num_of_pending_tasks; +#endif + u32 buf_addr_hi_1; + u32 buf_addr_lo_1; +#if defined(__BIG_ENDIAN) + u16 task_pbe_idx_off; + u16 buf_len_1; +#elif defined(__LITTLE_ENDIAN) + u16 buf_len_1; + u16 task_pbe_idx_off; +#endif + u32 buf_addr_hi_2; + u32 buf_addr_lo_2; +#if defined(__BIG_ENDIAN) + u16 ox_id; + u16 buf_len_2; +#elif defined(__LITTLE_ENDIAN) + u16 buf_len_2; + u16 ox_id; +#endif +}; + +/* + * vlan configuration + */ +struct xstorm_fcoe_vlan_conf { + u8 vlan_conf; +#define XSTORM_FCOE_VLAN_CONF_PRIORITY (0x7<<0) +#define XSTORM_FCOE_VLAN_CONF_PRIORITY_SHIFT 0 +#define XSTORM_FCOE_VLAN_CONF_INNER_VLAN_FLAG (0x1<<3) +#define XSTORM_FCOE_VLAN_CONF_INNER_VLAN_FLAG_SHIFT 3 +#define XSTORM_FCOE_VLAN_CONF_RESERVED (0xF<<4) +#define XSTORM_FCOE_VLAN_CONF_RESERVED_SHIFT 4 +}; + +/* + * FCoE 16-bits vlan structure + */ +struct fcoe_vlan_fields { + u16 fields; +#define FCOE_VLAN_FIELDS_VID (0xFFF<<0) +#define FCOE_VLAN_FIELDS_VID_SHIFT 0 +#define FCOE_VLAN_FIELDS_CLI (0x1<<12) +#define FCOE_VLAN_FIELDS_CLI_SHIFT 12 +#define FCOE_VLAN_FIELDS_PRI (0x7<<13) +#define FCOE_VLAN_FIELDS_PRI_SHIFT 13 +}; + +/* + * FCoE 16-bits vlan union + */ +union fcoe_vlan_field_union { + struct fcoe_vlan_fields fields; + u16 val; +}; + +/* + * FCoE 16-bits vlan, vif union + */ +union fcoe_vlan_vif_field_union { + union fcoe_vlan_field_union vlan; + u16 vif; +}; + +/* + * FCoE context section + */ +struct xstorm_fcoe_context_section { +#if defined(__BIG_ENDIAN) + u8 cs_ctl; + u8 s_id[3]; +#elif defined(__LITTLE_ENDIAN) + u8 s_id[3]; + u8 cs_ctl; +#endif +#if defined(__BIG_ENDIAN) + u8 rctl; + u8 d_id[3]; +#elif defined(__LITTLE_ENDIAN) + u8 d_id[3]; + u8 rctl; +#endif +#if defined(__BIG_ENDIAN) + u16 sq_xfrq_lcq_confq_size; + u16 tx_max_fc_pay_len; +#elif defined(__LITTLE_ENDIAN) + u16 tx_max_fc_pay_len; + u16 sq_xfrq_lcq_confq_size; +#endif + u32 lcq_prod; +#if defined(__BIG_ENDIAN) + u8 port_id; + u8 func_id; + u8 seq_id; + struct xstorm_fcoe_context_flags tx_flags; +#elif defined(__LITTLE_ENDIAN) + struct xstorm_fcoe_context_flags tx_flags; + u8 seq_id; + u8 func_id; + u8 port_id; +#endif +#if defined(__BIG_ENDIAN) + u16 mtu; + u8 func_mode; + u8 vnic_id; +#elif defined(__LITTLE_ENDIAN) + u8 vnic_id; + u8 func_mode; + u16 mtu; +#endif + struct regpair confq_curr_page_addr; + struct fcoe_cached_wqe cached_wqe[8]; + struct regpair lcq_base_addr; + struct xstorm_fcoe_tce tce; + struct xstorm_fcoe_fcp_data fcp_data; +#if defined(__BIG_ENDIAN) + u8 tx_max_conc_seqs_c3; + u8 vlan_flag; + u8 dcb_val; + u8 data_pb_cmd_size; +#elif defined(__LITTLE_ENDIAN) + u8 data_pb_cmd_size; + u8 dcb_val; + u8 vlan_flag; + u8 tx_max_conc_seqs_c3; +#endif +#if defined(__BIG_ENDIAN) + u16 fcoe_tx_stat_params_ram_addr; + u16 fcoe_tx_fc_seq_ram_addr; +#elif defined(__LITTLE_ENDIAN) + u16 fcoe_tx_fc_seq_ram_addr; + u16 fcoe_tx_stat_params_ram_addr; +#endif +#if defined(__BIG_ENDIAN) + u8 fcp_cmd_line_credit; + u8 eth_hdr_size; + u16 pbf_addr; +#elif defined(__LITTLE_ENDIAN) + u16 pbf_addr; + u8 eth_hdr_size; + u8 fcp_cmd_line_credit; +#endif +#if defined(__BIG_ENDIAN) + union fcoe_vlan_vif_field_union multi_func_val; + u8 page_log_size; + struct xstorm_fcoe_vlan_conf orig_vlan_conf; +#elif defined(__LITTLE_ENDIAN) + struct xstorm_fcoe_vlan_conf orig_vlan_conf; + u8 page_log_size; + union fcoe_vlan_vif_field_union multi_func_val; +#endif +#if defined(__BIG_ENDIAN) + u16 fcp_cmd_frame_size; + u16 pbf_addr_ff; +#elif defined(__LITTLE_ENDIAN) + u16 pbf_addr_ff; + u16 fcp_cmd_frame_size; +#endif +#if defined(__BIG_ENDIAN) + u8 vlan_num; + u8 cos; + u8 cache_xfrq_cons; + u8 cache_sq_cons; +#elif defined(__LITTLE_ENDIAN) + u8 cache_sq_cons; + u8 cache_xfrq_cons; + u8 cos; + u8 vlan_num; +#endif + u32 verify_tx_seq; +}; + +/* + * Xstorm FCoE Storm Context + */ +struct xstorm_fcoe_st_context { + struct xstorm_fcoe_eth_context_section eth; + struct xstorm_fcoe_context_section fcoe; +}; + +/* + * Fcoe connection context + */ +struct fcoe_context { + struct ustorm_fcoe_st_context ustorm_st_context; + struct tstorm_fcoe_st_context tstorm_st_context; + struct xstorm_fcoe_ag_context xstorm_ag_context; + struct tstorm_fcoe_ag_context tstorm_ag_context; + struct ustorm_fcoe_ag_context ustorm_ag_context; + struct timers_block_context timers_context; + struct xstorm_fcoe_st_context xstorm_st_context; +}; + +/* + * FCoE init params passed by driver to FW in FCoE init ramrod + * $$KEEP_ENDIANNESS$$ + */ +struct fcoe_init_ramrod_params { + struct fcoe_kwqe_init1 init_kwqe1; + struct fcoe_kwqe_init2 init_kwqe2; + struct fcoe_kwqe_init3 init_kwqe3; + struct regpair eq_pbl_base; + __le32 eq_pbl_size; + __le32 reserved2; + __le16 eq_prod; + __le16 sb_num; + u8 sb_id; + u8 reserved0; + __le16 reserved1; +}; + +/* + * FCoE statistics params buffer passed by driver to FW in FCoE statistics + * ramrod $$KEEP_ENDIANNESS$$ + */ +struct fcoe_stat_ramrod_params { + struct fcoe_kwqe_stat stat_kwqe; +}; + +/* + * CQ DB CQ producer and pending completion counter + */ +struct iscsi_cq_db_prod_pnd_cmpltn_cnt { +#if defined(__BIG_ENDIAN) + u16 cntr; + u16 prod; +#elif defined(__LITTLE_ENDIAN) + u16 prod; + u16 cntr; +#endif +}; + +/* + * CQ DB pending completion ITT array + */ +struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr { + struct iscsi_cq_db_prod_pnd_cmpltn_cnt prod_pend_comp[8]; +}; + +/* + * Cstorm CQ sequence to notify array, updated by driver + */ +struct iscsi_cq_db_sqn_2_notify_arr { + u16 sqn[8]; +}; + +/* + * Cstorm iSCSI Storm Context + */ +struct cstorm_iscsi_st_context { + struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr cq_c_prod_pend_comp_ctr_arr; + struct iscsi_cq_db_sqn_2_notify_arr cq_c_prod_sqn_arr; + struct iscsi_cq_db_sqn_2_notify_arr cq_c_sqn_2_notify_arr; + struct regpair hq_pbl_base; + struct regpair hq_curr_pbe; + struct regpair task_pbl_base; + struct regpair cq_db_base; +#if defined(__BIG_ENDIAN) + u16 hq_bd_itt; + u16 iscsi_conn_id; +#elif defined(__LITTLE_ENDIAN) + u16 iscsi_conn_id; + u16 hq_bd_itt; +#endif + u32 hq_bd_data_segment_len; + u32 hq_bd_buffer_offset; +#if defined(__BIG_ENDIAN) + u8 rsrv; + u8 cq_proc_en_bit_map; + u8 cq_pend_comp_itt_valid_bit_map; + u8 hq_bd_opcode; +#elif defined(__LITTLE_ENDIAN) + u8 hq_bd_opcode; + u8 cq_pend_comp_itt_valid_bit_map; + u8 cq_proc_en_bit_map; + u8 rsrv; +#endif + u32 hq_tcp_seq; +#if defined(__BIG_ENDIAN) + u16 flags; +#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0) +#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0 +#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1) +#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1 +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2) +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2 +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3) +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3 +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4) +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4 +#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5) +#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5 + u16 hq_cons; +#elif defined(__LITTLE_ENDIAN) + u16 hq_cons; + u16 flags; +#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0) +#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0 +#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1) +#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1 +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2) +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2 +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3) +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3 +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4) +#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4 +#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5) +#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5 +#endif + struct regpair rsrv1; +}; + + +/* + * SCSI read/write SQ WQE + */ +struct iscsi_cmd_pdu_hdr_little_endian { +#if defined(__BIG_ENDIAN) + u8 opcode; + u8 op_attr; +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES (0x7<<0) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES_SHIFT 0 +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x3<<3) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 3 +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG (0x1<<5) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG_SHIFT 5 +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG (0x1<<6) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG_SHIFT 6 +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7 + u16 rsrv0; +#elif defined(__LITTLE_ENDIAN) + u16 rsrv0; + u8 op_attr; +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES (0x7<<0) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES_SHIFT 0 +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x3<<3) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 3 +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG (0x1<<5) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG_SHIFT 5 +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG (0x1<<6) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG_SHIFT 6 +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7 + u8 opcode; +#endif + u32 data_fields; +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) +#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 + struct regpair lun; + u32 itt; + u32 expected_data_transfer_length; + u32 cmd_sn; + u32 exp_stat_sn; + u32 scsi_command_block[4]; +}; + + +/* + * Buffer per connection, used in Tstorm + */ +struct iscsi_conn_buf { + struct regpair reserved[8]; +}; + + +/* + * iSCSI context region, used only in iSCSI + */ +struct ustorm_iscsi_rq_db { + struct regpair pbl_base; + struct regpair curr_pbe; +}; + +/* + * iSCSI context region, used only in iSCSI + */ +struct ustorm_iscsi_r2tq_db { + struct regpair pbl_base; + struct regpair curr_pbe; +}; + +/* + * iSCSI context region, used only in iSCSI + */ +struct ustorm_iscsi_cq_db { +#if defined(__BIG_ENDIAN) + u16 cq_sn; + u16 prod; +#elif defined(__LITTLE_ENDIAN) + u16 prod; + u16 cq_sn; +#endif + struct regpair curr_pbe; +}; + +/* + * iSCSI context region, used only in iSCSI + */ +struct rings_db { + struct ustorm_iscsi_rq_db rq; + struct ustorm_iscsi_r2tq_db r2tq; + struct ustorm_iscsi_cq_db cq[8]; +#if defined(__BIG_ENDIAN) + u16 rq_prod; + u16 r2tq_prod; +#elif defined(__LITTLE_ENDIAN) + u16 r2tq_prod; + u16 rq_prod; +#endif + struct regpair cq_pbl_base; +}; + +/* + * iSCSI context region, used only in iSCSI + */ +struct ustorm_iscsi_placement_db { + u32 sgl_base_lo; + u32 sgl_base_hi; + u32 local_sge_0_address_hi; + u32 local_sge_0_address_lo; +#if defined(__BIG_ENDIAN) + u16 curr_sge_offset; + u16 local_sge_0_size; +#elif defined(__LITTLE_ENDIAN) + u16 local_sge_0_size; + u16 curr_sge_offset; +#endif + u32 local_sge_1_address_hi; + u32 local_sge_1_address_lo; +#if defined(__BIG_ENDIAN) + u8 exp_padding_2b; + u8 nal_len_3b; + u16 local_sge_1_size; +#elif defined(__LITTLE_ENDIAN) + u16 local_sge_1_size; + u8 nal_len_3b; + u8 exp_padding_2b; +#endif +#if defined(__BIG_ENDIAN) + u8 sgl_size; + u8 local_sge_index_2b; + u16 reserved7; +#elif defined(__LITTLE_ENDIAN) + u16 reserved7; + u8 local_sge_index_2b; + u8 sgl_size; +#endif + u32 rem_pdu; + u32 place_db_bitfield_1; +#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD (0xFFFFFF<<0) +#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD_SHIFT 0 +#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID (0xFF<<24) +#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID_SHIFT 24 + u32 place_db_bitfield_2; +#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE (0xFFFFFF<<0) +#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE_SHIFT 0 +#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX (0xFF<<24) +#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX_SHIFT 24 + u32 nal; +#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE (0xFFFFFF<<0) +#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE_SHIFT 0 +#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B (0xFF<<24) +#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B_SHIFT 24 +}; + +/* + * Ustorm iSCSI Storm Context + */ +struct ustorm_iscsi_st_context { + u32 exp_stat_sn; + u32 exp_data_sn; + struct rings_db ring; + struct regpair task_pbl_base; + struct regpair tce_phy_addr; + struct ustorm_iscsi_placement_db place_db; + u32 reserved8; + u32 rem_rcv_len; +#if defined(__BIG_ENDIAN) + u16 hdr_itt; + u16 iscsi_conn_id; +#elif defined(__LITTLE_ENDIAN) + u16 iscsi_conn_id; + u16 hdr_itt; +#endif + u32 nal_bytes; +#if defined(__BIG_ENDIAN) + u8 hdr_second_byte_union; + u8 bitfield_0; +#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0) +#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0 +#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1) +#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1 +#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2) +#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2 +#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3) +#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3 + u8 task_pdu_cache_index; + u8 task_pbe_cache_index; +#elif defined(__LITTLE_ENDIAN) + u8 task_pbe_cache_index; + u8 task_pdu_cache_index; + u8 bitfield_0; +#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0) +#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0 +#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1) +#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1 +#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2) +#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2 +#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3) +#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3 + u8 hdr_second_byte_union; +#endif +#if defined(__BIG_ENDIAN) + u16 reserved3; + u8 reserved2; + u8 acDecrement; +#elif defined(__LITTLE_ENDIAN) + u8 acDecrement; + u8 reserved2; + u16 reserved3; +#endif + u32 task_stat; +#if defined(__BIG_ENDIAN) + u8 hdr_opcode; + u8 num_cqs; + u16 reserved5; +#elif defined(__LITTLE_ENDIAN) + u16 reserved5; + u8 num_cqs; + u8 hdr_opcode; +#endif + u32 negotiated_rx; +#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH (0xFFFFFF<<0) +#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH_SHIFT 0 +#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS (0xFF<<24) +#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT 24 + u32 negotiated_rx_and_flags; +#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH (0xFFFFFF<<0) +#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH_SHIFT 0 +#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED (0x1<<24) +#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED_SHIFT 24 +#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN (0x1<<25) +#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN_SHIFT 25 +#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN (0x1<<26) +#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN_SHIFT 26 +#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR (0x1<<27) +#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR_SHIFT 27 +#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID (0x1<<28) +#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID_SHIFT 28 +#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE (0x3<<29) +#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE_SHIFT 29 +#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED (0x1<<31) +#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED_SHIFT 31 +}; + +/* + * TCP context region, shared in TOE, RDMA and ISCSI + */ +struct tstorm_tcp_st_context_section { + u32 flags1; +#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT (0xFFFFFF<<0) +#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_SHIFT 0 +#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID (0x1<<24) +#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID_SHIFT 24 +#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS (0x1<<25) +#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS_SHIFT 25 +#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0 (0x1<<26) +#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0_SHIFT 26 +#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD (0x1<<27) +#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD_SHIFT 27 +#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED (0x1<<28) +#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED_SHIFT 28 +#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE (0x1<<29) +#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE_SHIFT 29 +#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN (0x1<<30) +#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN_SHIFT 30 +#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN (0x1<<31) +#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN_SHIFT 31 + u32 flags2; +#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION (0xFFFFFF<<0) +#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_SHIFT 0 +#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN (0x1<<24) +#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN_SHIFT 24 +#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN (0x1<<25) +#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN_SHIFT 25 +#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT (0x1<<26) +#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT_SHIFT 26 +#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT (0x1<<27) +#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT_SHIFT 27 +#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<28) +#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 28 +#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<29) +#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 29 +#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK (0x1<<30) +#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK_SHIFT 30 +#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK (0x1<<31) +#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK_SHIFT 31 +#if defined(__BIG_ENDIAN) + u16 mss; + u8 tcp_sm_state; + u8 rto_exp; +#elif defined(__LITTLE_ENDIAN) + u8 rto_exp; + u8 tcp_sm_state; + u16 mss; +#endif + u32 rcv_nxt; + u32 timestamp_recent; + u32 timestamp_recent_time; + u32 cwnd; + u32 ss_thresh; + u32 cwnd_accum; + u32 prev_seg_seq; + u32 expected_rel_seq; + u32 recover; +#if defined(__BIG_ENDIAN) + u8 retransmit_count; + u8 ka_max_probe_count; + u8 persist_probe_count; + u8 ka_probe_count; +#elif defined(__LITTLE_ENDIAN) + u8 ka_probe_count; + u8 persist_probe_count; + u8 ka_max_probe_count; + u8 retransmit_count; +#endif +#if defined(__BIG_ENDIAN) + u8 statistics_counter_id; + u8 ooo_support_mode; + u8 snd_wnd_scale; + u8 dup_ack_count; +#elif defined(__LITTLE_ENDIAN) + u8 dup_ack_count; + u8 snd_wnd_scale; + u8 ooo_support_mode; + u8 statistics_counter_id; +#endif + u32 retransmit_start_time; + u32 ka_timeout; + u32 ka_interval; + u32 isle_start_seq; + u32 isle_end_seq; +#if defined(__BIG_ENDIAN) + u16 second_isle_address; + u16 recent_seg_wnd; +#elif defined(__LITTLE_ENDIAN) + u16 recent_seg_wnd; + u16 second_isle_address; +#endif +#if defined(__BIG_ENDIAN) + u8 max_isles_ever_happened; + u8 isles_number; + u16 last_isle_address; +#elif defined(__LITTLE_ENDIAN) + u16 last_isle_address; + u8 isles_number; + u8 max_isles_ever_happened; +#endif + u32 max_rt_time; +#if defined(__BIG_ENDIAN) + u16 lsb_mac_address; + u16 vlan_id; +#elif defined(__LITTLE_ENDIAN) + u16 vlan_id; + u16 lsb_mac_address; +#endif +#if defined(__BIG_ENDIAN) + u16 msb_mac_address; + u16 mid_mac_address; +#elif defined(__LITTLE_ENDIAN) + u16 mid_mac_address; + u16 msb_mac_address; +#endif + u32 rightmost_received_seq; +}; + +/* + * Termination variables + */ +struct iscsi_term_vars { + u8 BitMap; +#define ISCSI_TERM_VARS_TCP_STATE (0xF<<0) +#define ISCSI_TERM_VARS_TCP_STATE_SHIFT 0 +#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT (0x1<<4) +#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT_SHIFT 4 +#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT (0x1<<5) +#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT_SHIFT 5 +#define ISCSI_TERM_VARS_TERM_ON_CHIP (0x1<<6) +#define ISCSI_TERM_VARS_TERM_ON_CHIP_SHIFT 6 +#define ISCSI_TERM_VARS_RSRV (0x1<<7) +#define ISCSI_TERM_VARS_RSRV_SHIFT 7 +}; + +/* + * iSCSI context region, used only in iSCSI + */ +struct tstorm_iscsi_st_context_section { + u32 nalPayload; + u32 b2nh; +#if defined(__BIG_ENDIAN) + u16 rq_cons; + u8 flags; +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN (0x3<<5) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN_SHIFT 5 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0 (0x1<<7) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0_SHIFT 7 + u8 hdr_bytes_2_fetch; +#elif defined(__LITTLE_ENDIAN) + u8 hdr_bytes_2_fetch; + u8 flags; +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN (0x3<<5) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN_SHIFT 5 +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0 (0x1<<7) +#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0_SHIFT 7 + u16 rq_cons; +#endif + struct regpair rq_db_phy_addr; +#if defined(__BIG_ENDIAN) + struct iscsi_term_vars term_vars; + u8 rsrv1; + u16 iscsi_conn_id; +#elif defined(__LITTLE_ENDIAN) + u16 iscsi_conn_id; + u8 rsrv1; + struct iscsi_term_vars term_vars; +#endif + u32 process_nxt; +}; + +/* + * The iSCSI non-aggregative context of Tstorm + */ +struct tstorm_iscsi_st_context { + struct tstorm_tcp_st_context_section tcp; + struct tstorm_iscsi_st_context_section iscsi; +}; + +/* + * Ethernet context section, shared in TOE, RDMA and ISCSI + */ +struct xstorm_eth_context_section { +#if defined(__BIG_ENDIAN) + u8 remote_addr_4; + u8 remote_addr_5; + u8 local_addr_0; + u8 local_addr_1; +#elif defined(__LITTLE_ENDIAN) + u8 local_addr_1; + u8 local_addr_0; + u8 remote_addr_5; + u8 remote_addr_4; +#endif +#if defined(__BIG_ENDIAN) + u8 remote_addr_0; + u8 remote_addr_1; + u8 remote_addr_2; + u8 remote_addr_3; +#elif defined(__LITTLE_ENDIAN) + u8 remote_addr_3; + u8 remote_addr_2; + u8 remote_addr_1; + u8 remote_addr_0; +#endif +#if defined(__BIG_ENDIAN) + u16 reserved_vlan_type; + u16 vlan_params; +#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0) +#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0 +#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12) +#define XSTORM_ETH_CONTEXT_SECTION_CFI_SHIFT 12 +#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13) +#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13 +#elif defined(__LITTLE_ENDIAN) + u16 vlan_params; +#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0) +#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0 +#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12) +#define XSTORM_ETH_CONTEXT_SECTION_CFI_SHIFT 12 +#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13) +#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13 + u16 reserved_vlan_type; +#endif +#if defined(__BIG_ENDIAN) + u8 local_addr_2; + u8 local_addr_3; + u8 local_addr_4; + u8 local_addr_5; +#elif defined(__LITTLE_ENDIAN) + u8 local_addr_5; + u8 local_addr_4; + u8 local_addr_3; + u8 local_addr_2; +#endif +}; + +/* + * IpV4 context section, shared in TOE, RDMA and ISCSI + */ +struct xstorm_ip_v4_context_section { +#if defined(__BIG_ENDIAN) + u16 __pbf_hdr_cmd_rsvd_id; + u16 __pbf_hdr_cmd_rsvd_flags_offset; +#elif defined(__LITTLE_ENDIAN) + u16 __pbf_hdr_cmd_rsvd_flags_offset; + u16 __pbf_hdr_cmd_rsvd_id; +#endif +#if defined(__BIG_ENDIAN) + u8 __pbf_hdr_cmd_rsvd_ver_ihl; + u8 tos; + u16 __pbf_hdr_cmd_rsvd_length; +#elif defined(__LITTLE_ENDIAN) + u16 __pbf_hdr_cmd_rsvd_length; + u8 tos; + u8 __pbf_hdr_cmd_rsvd_ver_ihl; +#endif + u32 ip_local_addr; +#if defined(__BIG_ENDIAN) + u8 ttl; + u8 __pbf_hdr_cmd_rsvd_protocol; + u16 __pbf_hdr_cmd_rsvd_csum; +#elif defined(__LITTLE_ENDIAN) + u16 __pbf_hdr_cmd_rsvd_csum; + u8 __pbf_hdr_cmd_rsvd_protocol; + u8 ttl; +#endif + u32 __pbf_hdr_cmd_rsvd_1; + u32 ip_remote_addr; +}; + +/* + * context section, shared in TOE, RDMA and ISCSI + */ +struct xstorm_padded_ip_v4_context_section { + struct xstorm_ip_v4_context_section ip_v4; + u32 reserved1[4]; +}; + +/* + * IpV6 context section, shared in TOE, RDMA and ISCSI + */ +struct xstorm_ip_v6_context_section { +#if defined(__BIG_ENDIAN) + u16 pbf_hdr_cmd_rsvd_payload_len; + u8 pbf_hdr_cmd_rsvd_nxt_hdr; + u8 hop_limit; +#elif defined(__LITTLE_ENDIAN) + u8 hop_limit; + u8 pbf_hdr_cmd_rsvd_nxt_hdr; + u16 pbf_hdr_cmd_rsvd_payload_len; +#endif + u32 priority_flow_label; +#define XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL (0xFFFFF<<0) +#define XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL_SHIFT 0 +#define XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS (0xFF<<20) +#define XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS_SHIFT 20 +#define XSTORM_IP_V6_CONTEXT_SECTION_PBF_HDR_CMD_RSVD_VER (0xF<<28) +#define XSTORM_IP_V6_CONTEXT_SECTION_PBF_HDR_CMD_RSVD_VER_SHIFT 28 + u32 ip_local_addr_lo_hi; + u32 ip_local_addr_lo_lo; + u32 ip_local_addr_hi_hi; + u32 ip_local_addr_hi_lo; + u32 ip_remote_addr_lo_hi; + u32 ip_remote_addr_lo_lo; + u32 ip_remote_addr_hi_hi; + u32 ip_remote_addr_hi_lo; +}; + +union xstorm_ip_context_section_types { + struct xstorm_padded_ip_v4_context_section padded_ip_v4; + struct xstorm_ip_v6_context_section ip_v6; +}; + +/* + * TCP context section, shared in TOE, RDMA and ISCSI + */ +struct xstorm_tcp_context_section { + u32 snd_max; +#if defined(__BIG_ENDIAN) + u16 remote_port; + u16 local_port; +#elif defined(__LITTLE_ENDIAN) + u16 local_port; + u16 remote_port; +#endif +#if defined(__BIG_ENDIAN) + u8 original_nagle_1b; + u8 ts_enabled; + u16 tcp_params; +#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0) +#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0 +#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT (0x1<<8) +#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT_SHIFT 8 +#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED (0x1<<9) +#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9 +#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10) +#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10 +#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV (0x1<<11) +#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV_SHIFT 11 +#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12) +#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12 +#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13) +#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13 +#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14) +#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 tcp_params; +#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0) +#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0 +#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT (0x1<<8) +#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT_SHIFT 8 +#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED (0x1<<9) +#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9 +#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10) +#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10 +#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV (0x1<<11) +#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV_SHIFT 11 +#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12) +#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12 +#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13) +#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13 +#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14) +#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14 + u8 ts_enabled; + u8 original_nagle_1b; +#endif +#if defined(__BIG_ENDIAN) + u16 pseudo_csum; + u16 window_scaling_factor; +#elif defined(__LITTLE_ENDIAN) + u16 window_scaling_factor; + u16 pseudo_csum; +#endif +#if defined(__BIG_ENDIAN) + u16 reserved2; + u8 statistics_counter_id; + u8 statistics_params; +#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0) +#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0 +#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1) +#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1 +#define XSTORM_TCP_CONTEXT_SECTION_RESERVED (0x3F<<2) +#define XSTORM_TCP_CONTEXT_SECTION_RESERVED_SHIFT 2 +#elif defined(__LITTLE_ENDIAN) + u8 statistics_params; +#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0) +#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0 +#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1) +#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1 +#define XSTORM_TCP_CONTEXT_SECTION_RESERVED (0x3F<<2) +#define XSTORM_TCP_CONTEXT_SECTION_RESERVED_SHIFT 2 + u8 statistics_counter_id; + u16 reserved2; +#endif + u32 ts_time_diff; + u32 __next_timer_expir; +}; + +/* + * Common context section, shared in TOE, RDMA and ISCSI + */ +struct xstorm_common_context_section { + struct xstorm_eth_context_section ethernet; + union xstorm_ip_context_section_types ip_union; + struct xstorm_tcp_context_section tcp; +#if defined(__BIG_ENDIAN) + u8 __dcb_val; + u8 flags; +#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED (0x1<<0) +#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT 0 +#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT (0x7<<1) +#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT 1 +#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE (0x1<<4) +#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE_SHIFT 4 +#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY (0x7<<5) +#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY_SHIFT 5 + u8 reserved; + u8 ip_version_1b; +#elif defined(__LITTLE_ENDIAN) + u8 ip_version_1b; + u8 reserved; + u8 flags; +#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED (0x1<<0) +#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT 0 +#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT (0x7<<1) +#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT 1 +#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE (0x1<<4) +#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE_SHIFT 4 +#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY (0x7<<5) +#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY_SHIFT 5 + u8 __dcb_val; +#endif +}; + +/* + * Flags used in ISCSI context section + */ +struct xstorm_iscsi_context_flags { + u8 flags; +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA (0x1<<0) +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA_SHIFT 0 +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T (0x1<<1) +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T_SHIFT 1 +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_HEADER_DIGEST (0x1<<2) +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_HEADER_DIGEST_SHIFT 2 +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_DATA_DIGEST (0x1<<3) +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_DATA_DIGEST_SHIFT 3 +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_HQ_BD_WRITTEN (0x1<<4) +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_HQ_BD_WRITTEN_SHIFT 4 +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_LAST_OP_SQ (0x1<<5) +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_LAST_OP_SQ_SHIFT 5 +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_UPDATE_SND_NXT (0x1<<6) +#define XSTORM_ISCSI_CONTEXT_FLAGS_B_UPDATE_SND_NXT_SHIFT 6 +#define XSTORM_ISCSI_CONTEXT_FLAGS_RESERVED4 (0x1<<7) +#define XSTORM_ISCSI_CONTEXT_FLAGS_RESERVED4_SHIFT 7 +}; + +struct iscsi_task_context_entry_x { + u32 data_out_buffer_offset; + u32 itt; + u32 data_sn; +}; + +struct iscsi_task_context_entry_xuc_x_write_only { + u32 tx_r2t_sn; +}; + +struct iscsi_task_context_entry_xuc_xu_write_both { + u32 sgl_base_lo; + u32 sgl_base_hi; +#if defined(__BIG_ENDIAN) + u8 sgl_size; + u8 sge_index; + u16 sge_offset; +#elif defined(__LITTLE_ENDIAN) + u16 sge_offset; + u8 sge_index; + u8 sgl_size; +#endif +}; + +/* + * iSCSI context section + */ +struct xstorm_iscsi_context_section { + u32 first_burst_length; + u32 max_send_pdu_length; + struct regpair sq_pbl_base; + struct regpair sq_curr_pbe; + struct regpair hq_pbl_base; + struct regpair hq_curr_pbe_base; + struct regpair r2tq_pbl_base; + struct regpair r2tq_curr_pbe_base; + struct regpair task_pbl_base; +#if defined(__BIG_ENDIAN) + u16 data_out_count; + struct xstorm_iscsi_context_flags flags; + u8 task_pbl_cache_idx; +#elif defined(__LITTLE_ENDIAN) + u8 task_pbl_cache_idx; + struct xstorm_iscsi_context_flags flags; + u16 data_out_count; +#endif + u32 seq_more_2_send; + u32 pdu_more_2_send; + struct iscsi_task_context_entry_x temp_tce_x; + struct iscsi_task_context_entry_xuc_x_write_only temp_tce_x_wr; + struct iscsi_task_context_entry_xuc_xu_write_both temp_tce_xu_wr; + struct regpair lun; + u32 exp_data_transfer_len_ttt; + u32 pdu_data_2_rxmit; + u32 rxmit_bytes_2_dr; +#if defined(__BIG_ENDIAN) + u16 rxmit_sge_offset; + u16 hq_rxmit_cons; +#elif defined(__LITTLE_ENDIAN) + u16 hq_rxmit_cons; + u16 rxmit_sge_offset; +#endif +#if defined(__BIG_ENDIAN) + u16 r2tq_cons; + u8 rxmit_flags; +#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD (0x1<<0) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD_SHIFT 0 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR (0x1<<1) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR_SHIFT 1 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU (0x1<<2) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU_SHIFT 2 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR (0x1<<3) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR_SHIFT 3 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR (0x1<<4) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR_SHIFT 4 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING (0x3<<5) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING_SHIFT 5 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT (0x1<<7) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT_SHIFT 7 + u8 rxmit_sge_idx; +#elif defined(__LITTLE_ENDIAN) + u8 rxmit_sge_idx; + u8 rxmit_flags; +#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD (0x1<<0) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD_SHIFT 0 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR (0x1<<1) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR_SHIFT 1 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU (0x1<<2) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU_SHIFT 2 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR (0x1<<3) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR_SHIFT 3 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR (0x1<<4) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR_SHIFT 4 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING (0x3<<5) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING_SHIFT 5 +#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT (0x1<<7) +#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT_SHIFT 7 + u16 r2tq_cons; +#endif + u32 hq_rxmit_tcp_seq; +}; + +/* + * Xstorm iSCSI Storm Context + */ +struct xstorm_iscsi_st_context { + struct xstorm_common_context_section common; + struct xstorm_iscsi_context_section iscsi; +}; + +/* + * Iscsi connection context + */ +struct iscsi_context { + struct ustorm_iscsi_st_context ustorm_st_context; + struct tstorm_iscsi_st_context tstorm_st_context; + struct xstorm_iscsi_ag_context xstorm_ag_context; + struct tstorm_iscsi_ag_context tstorm_ag_context; + struct cstorm_iscsi_ag_context cstorm_ag_context; + struct ustorm_iscsi_ag_context ustorm_ag_context; + struct timers_block_context timers_context; + struct regpair upb_context; + struct xstorm_iscsi_st_context xstorm_st_context; + struct regpair xpb_context; + struct cstorm_iscsi_st_context cstorm_st_context; +}; + + +/* + * PDU header of an iSCSI DATA-OUT + */ +struct iscsi_data_pdu_hdr_little_endian { +#if defined(__BIG_ENDIAN) + u8 opcode; + u8 op_attr; +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0) +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7) +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7 + u16 rsrv0; +#elif defined(__LITTLE_ENDIAN) + u16 rsrv0; + u8 op_attr; +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0) +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7) +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7 + u8 opcode; +#endif + u32 data_fields; +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) +#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 + struct regpair lun; + u32 itt; + u32 ttt; + u32 rsrv2; + u32 exp_stat_sn; + u32 rsrv3; + u32 data_sn; + u32 buffer_offset; + u32 rsrv4; +}; + + +/* + * PDU header of an iSCSI login request + */ +struct iscsi_login_req_hdr_little_endian { +#if defined(__BIG_ENDIAN) + u8 opcode; + u8 op_attr; +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG (0x3<<0) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG_SHIFT 0 +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG (0x3<<2) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG_SHIFT 2 +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0 (0x3<<4) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0_SHIFT 4 +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6 +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT (0x1<<7) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT_SHIFT 7 + u8 version_max; + u8 version_min; +#elif defined(__LITTLE_ENDIAN) + u8 version_min; + u8 version_max; + u8 op_attr; +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG (0x3<<0) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG_SHIFT 0 +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG (0x3<<2) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG_SHIFT 2 +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0 (0x3<<4) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0_SHIFT 4 +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6 +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT (0x1<<7) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT_SHIFT 7 + u8 opcode; +#endif + u32 data_fields; +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) +#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 + u32 isid_lo; +#if defined(__BIG_ENDIAN) + u16 isid_hi; + u16 tsih; +#elif defined(__LITTLE_ENDIAN) + u16 tsih; + u16 isid_hi; +#endif + u32 itt; +#if defined(__BIG_ENDIAN) + u16 cid; + u16 rsrv1; +#elif defined(__LITTLE_ENDIAN) + u16 rsrv1; + u16 cid; +#endif + u32 cmd_sn; + u32 exp_stat_sn; + u32 rsrv2[4]; +}; + +/* + * PDU header of an iSCSI logout request + */ +struct iscsi_logout_req_hdr_little_endian { +#if defined(__BIG_ENDIAN) + u8 opcode; + u8 op_attr; +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE (0x7F<<0) +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE_SHIFT 0 +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7) +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7 + u16 rsrv0; +#elif defined(__LITTLE_ENDIAN) + u16 rsrv0; + u8 op_attr; +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE (0x7F<<0) +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE_SHIFT 0 +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7) +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7 + u8 opcode; +#endif + u32 data_fields; +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) +#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 + u32 rsrv2[2]; + u32 itt; +#if defined(__BIG_ENDIAN) + u16 cid; + u16 rsrv1; +#elif defined(__LITTLE_ENDIAN) + u16 rsrv1; + u16 cid; +#endif + u32 cmd_sn; + u32 exp_stat_sn; + u32 rsrv3[4]; +}; + +/* + * PDU header of an iSCSI TMF request + */ +struct iscsi_tmf_req_hdr_little_endian { +#if defined(__BIG_ENDIAN) + u8 opcode; + u8 op_attr; +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION (0x7F<<0) +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION_SHIFT 0 +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7) +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7 + u16 rsrv0; +#elif defined(__LITTLE_ENDIAN) + u16 rsrv0; + u8 op_attr; +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION (0x7F<<0) +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION_SHIFT 0 +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7) +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7 + u8 opcode; +#endif + u32 data_fields; +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) +#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 + struct regpair lun; + u32 itt; + u32 referenced_task_tag; + u32 cmd_sn; + u32 exp_stat_sn; + u32 ref_cmd_sn; + u32 exp_data_sn; + u32 rsrv2[2]; +}; + +/* + * PDU header of an iSCSI Text request + */ +struct iscsi_text_req_hdr_little_endian { +#if defined(__BIG_ENDIAN) + u8 opcode; + u8 op_attr; +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1 (0x3F<<0) +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6) +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6 +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL (0x1<<7) +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL_SHIFT 7 + u16 rsrv0; +#elif defined(__LITTLE_ENDIAN) + u16 rsrv0; + u8 op_attr; +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1 (0x3F<<0) +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6) +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6 +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL (0x1<<7) +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL_SHIFT 7 + u8 opcode; +#endif + u32 data_fields; +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) +#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 + struct regpair lun; + u32 itt; + u32 ttt; + u32 cmd_sn; + u32 exp_stat_sn; + u32 rsrv3[4]; +}; + +/* + * PDU header of an iSCSI Nop-Out + */ +struct iscsi_nop_out_hdr_little_endian { +#if defined(__BIG_ENDIAN) + u8 opcode; + u8 op_attr; +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0) +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1 (0x1<<7) +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1_SHIFT 7 + u16 rsrv0; +#elif defined(__LITTLE_ENDIAN) + u16 rsrv0; + u8 op_attr; +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0) +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0 +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1 (0x1<<7) +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1_SHIFT 7 + u8 opcode; +#endif + u32 data_fields; +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0) +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0 +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24) +#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24 + struct regpair lun; + u32 itt; + u32 ttt; + u32 cmd_sn; + u32 exp_stat_sn; + u32 rsrv3[4]; +}; + +/* + * iscsi pdu headers in little endian form. + */ +union iscsi_pdu_headers_little_endian { + u32 fullHeaderSize[12]; + struct iscsi_cmd_pdu_hdr_little_endian command_pdu_hdr; + struct iscsi_data_pdu_hdr_little_endian data_out_pdu_hdr; + struct iscsi_login_req_hdr_little_endian login_req_pdu_hdr; + struct iscsi_logout_req_hdr_little_endian logout_req_pdu_hdr; + struct iscsi_tmf_req_hdr_little_endian tmf_req_pdu_hdr; + struct iscsi_text_req_hdr_little_endian text_req_pdu_hdr; + struct iscsi_nop_out_hdr_little_endian nop_out_pdu_hdr; +}; + +struct iscsi_hq_bd { + union iscsi_pdu_headers_little_endian pdu_header; +#if defined(__BIG_ENDIAN) + u16 reserved1; + u16 lcl_cmp_flg; +#elif defined(__LITTLE_ENDIAN) + u16 lcl_cmp_flg; + u16 reserved1; +#endif + u32 sgl_base_lo; + u32 sgl_base_hi; +#if defined(__BIG_ENDIAN) + u8 sgl_size; + u8 sge_index; + u16 sge_offset; +#elif defined(__LITTLE_ENDIAN) + u16 sge_offset; + u8 sge_index; + u8 sgl_size; +#endif +}; + + +/* + * CQE data for L2 OOO connection $$KEEP_ENDIANNESS$$ + */ +struct iscsi_l2_ooo_data { + __le32 iscsi_cid; + u8 drop_isle; + u8 drop_size; + u8 ooo_opcode; + u8 ooo_isle; + u8 reserved[8]; +}; + + + + + + +struct iscsi_task_context_entry_xuc_c_write_only { + u32 total_data_acked; +}; + +struct iscsi_task_context_r2t_table_entry { + u32 ttt; + u32 desired_data_len; +}; + +struct iscsi_task_context_entry_xuc_u_write_only { + u32 exp_r2t_sn; + struct iscsi_task_context_r2t_table_entry r2t_table[4]; +#if defined(__BIG_ENDIAN) + u16 data_in_count; + u8 cq_id; + u8 valid_1b; +#elif defined(__LITTLE_ENDIAN) + u8 valid_1b; + u8 cq_id; + u16 data_in_count; +#endif +}; + +struct iscsi_task_context_entry_xuc { + struct iscsi_task_context_entry_xuc_c_write_only write_c; + u32 exp_data_transfer_len; + struct iscsi_task_context_entry_xuc_x_write_only write_x; + u32 lun_lo; + struct iscsi_task_context_entry_xuc_xu_write_both write_xu; + u32 lun_hi; + struct iscsi_task_context_entry_xuc_u_write_only write_u; +}; + +struct iscsi_task_context_entry_u { + u32 exp_r2t_buff_offset; + u32 rem_rcv_len; + u32 exp_data_sn; +}; + +struct iscsi_task_context_entry { + struct iscsi_task_context_entry_x tce_x; +#if defined(__BIG_ENDIAN) + u16 data_out_count; + u16 rsrv0; +#elif defined(__LITTLE_ENDIAN) + u16 rsrv0; + u16 data_out_count; +#endif + struct iscsi_task_context_entry_xuc tce_xuc; + struct iscsi_task_context_entry_u tce_u; + u32 rsrv1[7]; +}; + + + + + + + + +struct iscsi_task_context_entry_xuc_x_init_only { + struct regpair lun; + u32 exp_data_transfer_len; +}; + + + + + + + + + + + + + + + + + +/* + * ipv6 structure + */ +struct ip_v6_addr { + u32 ip_addr_lo_lo; + u32 ip_addr_lo_hi; + u32 ip_addr_hi_lo; + u32 ip_addr_hi_hi; +}; + + + +/* + * l5cm- connection identification params + */ +struct l5cm_conn_addr_params { + u32 pmtu; +#if defined(__BIG_ENDIAN) + u8 remote_addr_3; + u8 remote_addr_2; + u8 remote_addr_1; + u8 remote_addr_0; +#elif defined(__LITTLE_ENDIAN) + u8 remote_addr_0; + u8 remote_addr_1; + u8 remote_addr_2; + u8 remote_addr_3; +#endif +#if defined(__BIG_ENDIAN) + u16 params; +#define L5CM_CONN_ADDR_PARAMS_IP_VERSION (0x1<<0) +#define L5CM_CONN_ADDR_PARAMS_IP_VERSION_SHIFT 0 +#define L5CM_CONN_ADDR_PARAMS_RSRV (0x7FFF<<1) +#define L5CM_CONN_ADDR_PARAMS_RSRV_SHIFT 1 + u8 remote_addr_5; + u8 remote_addr_4; +#elif defined(__LITTLE_ENDIAN) + u8 remote_addr_4; + u8 remote_addr_5; + u16 params; +#define L5CM_CONN_ADDR_PARAMS_IP_VERSION (0x1<<0) +#define L5CM_CONN_ADDR_PARAMS_IP_VERSION_SHIFT 0 +#define L5CM_CONN_ADDR_PARAMS_RSRV (0x7FFF<<1) +#define L5CM_CONN_ADDR_PARAMS_RSRV_SHIFT 1 +#endif + struct ip_v6_addr local_ip_addr; + struct ip_v6_addr remote_ip_addr; + u32 ipv6_flow_label_20b; + u32 reserved1; +#if defined(__BIG_ENDIAN) + u16 remote_tcp_port; + u16 local_tcp_port; +#elif defined(__LITTLE_ENDIAN) + u16 local_tcp_port; + u16 remote_tcp_port; +#endif +}; + +/* + * l5cm-xstorm connection buffer + */ +struct l5cm_xstorm_conn_buffer { +#if defined(__BIG_ENDIAN) + u16 rsrv1; + u16 params; +#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE (0x1<<0) +#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE_SHIFT 0 +#define L5CM_XSTORM_CONN_BUFFER_RSRV (0x7FFF<<1) +#define L5CM_XSTORM_CONN_BUFFER_RSRV_SHIFT 1 +#elif defined(__LITTLE_ENDIAN) + u16 params; +#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE (0x1<<0) +#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE_SHIFT 0 +#define L5CM_XSTORM_CONN_BUFFER_RSRV (0x7FFF<<1) +#define L5CM_XSTORM_CONN_BUFFER_RSRV_SHIFT 1 + u16 rsrv1; +#endif +#if defined(__BIG_ENDIAN) + u16 mss; + u16 pseudo_header_checksum; +#elif defined(__LITTLE_ENDIAN) + u16 pseudo_header_checksum; + u16 mss; +#endif + u32 rcv_buf; + u32 rsrv2; + struct regpair context_addr; +}; + +/* + * l5cm-tstorm connection buffer + */ +struct l5cm_tstorm_conn_buffer { + u32 rsrv1[2]; +#if defined(__BIG_ENDIAN) + u16 params; +#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0) +#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE_SHIFT 0 +#define L5CM_TSTORM_CONN_BUFFER_RSRV (0x7FFF<<1) +#define L5CM_TSTORM_CONN_BUFFER_RSRV_SHIFT 1 + u8 ka_max_probe_count; + u8 ka_enable; +#elif defined(__LITTLE_ENDIAN) + u8 ka_enable; + u8 ka_max_probe_count; + u16 params; +#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0) +#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE_SHIFT 0 +#define L5CM_TSTORM_CONN_BUFFER_RSRV (0x7FFF<<1) +#define L5CM_TSTORM_CONN_BUFFER_RSRV_SHIFT 1 +#endif + u32 ka_timeout; + u32 ka_interval; + u32 max_rt_time; +}; + +/* + * l5cm connection buffer for active side + */ +struct l5cm_active_conn_buffer { + struct l5cm_conn_addr_params conn_addr_buf; + struct l5cm_xstorm_conn_buffer xstorm_conn_buffer; + struct l5cm_tstorm_conn_buffer tstorm_conn_buffer; +}; + + + +/* + * The l5cm opaque buffer passed in add new connection ramrod passive side + */ +struct l5cm_hash_input_string { + u32 __opaque1; +#if defined(__BIG_ENDIAN) + u16 __opaque3; + u16 __opaque2; +#elif defined(__LITTLE_ENDIAN) + u16 __opaque2; + u16 __opaque3; +#endif + struct ip_v6_addr __opaque4; + struct ip_v6_addr __opaque5; + u32 __opaque6; + u32 __opaque7[5]; +}; + + +/* + * syn cookie component + */ +struct l5cm_syn_cookie_comp { + u32 __opaque; +}; + +/* + * data related to listeners of a TCP port + */ +struct l5cm_port_listener_data { + u8 params; +#define L5CM_PORT_LISTENER_DATA_ENABLE (0x1<<0) +#define L5CM_PORT_LISTENER_DATA_ENABLE_SHIFT 0 +#define L5CM_PORT_LISTENER_DATA_IP_INDEX (0xF<<1) +#define L5CM_PORT_LISTENER_DATA_IP_INDEX_SHIFT 1 +#define L5CM_PORT_LISTENER_DATA_NET_FILTER (0x1<<5) +#define L5CM_PORT_LISTENER_DATA_NET_FILTER_SHIFT 5 +#define L5CM_PORT_LISTENER_DATA_DEFFERED_MODE (0x1<<6) +#define L5CM_PORT_LISTENER_DATA_DEFFERED_MODE_SHIFT 6 +#define L5CM_PORT_LISTENER_DATA_MPA_MODE (0x1<<7) +#define L5CM_PORT_LISTENER_DATA_MPA_MODE_SHIFT 7 +}; + +/* + * Opaque structure passed from U to X when final ack arrives + */ +struct l5cm_opaque_buf { + u32 __opaque1; + u32 __opaque2; + u32 __opaque3; + u32 __opaque4; + struct l5cm_syn_cookie_comp __opaque5; +#if defined(__BIG_ENDIAN) + u16 rsrv2; + u8 rsrv; + struct l5cm_port_listener_data __opaque6; +#elif defined(__LITTLE_ENDIAN) + struct l5cm_port_listener_data __opaque6; + u8 rsrv; + u16 rsrv2; +#endif +}; + + +/* + * l5cm slow path element + */ +struct l5cm_packet_size { + u32 size; + u32 rsrv; +}; + + +/* + * The final-ack union structure in PCS entry after final ack arrived + */ +struct l5cm_pcse_ack { + struct l5cm_xstorm_conn_buffer tx_socket_params; + struct l5cm_opaque_buf opaque_buf; + struct l5cm_tstorm_conn_buffer rx_socket_params; +}; + + +/* + * The syn union structure in PCS entry after syn arrived + */ +struct l5cm_pcse_syn { + struct l5cm_opaque_buf opaque_buf; + u32 rsrv[12]; +}; + + +/* + * pcs entry data for passive connections + */ +struct l5cm_pcs_attributes { +#if defined(__BIG_ENDIAN) + u16 pcs_id; + u8 status; + u8 flags; +#define L5CM_PCS_ATTRIBUTES_NET_FILTER (0x1<<0) +#define L5CM_PCS_ATTRIBUTES_NET_FILTER_SHIFT 0 +#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH (0x1<<1) +#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH_SHIFT 1 +#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT (0x1<<2) +#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT_SHIFT 2 +#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT (0x1<<3) +#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT_SHIFT 3 +#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC (0x1<<4) +#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC_SHIFT 4 +#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD (0x1<<5) +#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD_SHIFT 5 +#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET (0x1<<6) +#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET_SHIFT 6 +#define L5CM_PCS_ATTRIBUTES_RSRV (0x1<<7) +#define L5CM_PCS_ATTRIBUTES_RSRV_SHIFT 7 +#elif defined(__LITTLE_ENDIAN) + u8 flags; +#define L5CM_PCS_ATTRIBUTES_NET_FILTER (0x1<<0) +#define L5CM_PCS_ATTRIBUTES_NET_FILTER_SHIFT 0 +#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH (0x1<<1) +#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH_SHIFT 1 +#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT (0x1<<2) +#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT_SHIFT 2 +#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT (0x1<<3) +#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT_SHIFT 3 +#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC (0x1<<4) +#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC_SHIFT 4 +#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD (0x1<<5) +#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD_SHIFT 5 +#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET (0x1<<6) +#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET_SHIFT 6 +#define L5CM_PCS_ATTRIBUTES_RSRV (0x1<<7) +#define L5CM_PCS_ATTRIBUTES_RSRV_SHIFT 7 + u8 status; + u16 pcs_id; +#endif +}; + + +union l5cm_seg_params { + struct l5cm_pcse_syn syn_seg_params; + struct l5cm_pcse_ack ack_seg_params; +}; + +/* + * pcs entry data for passive connections + */ +struct l5cm_pcs_hdr { + struct l5cm_hash_input_string hash_input_string; + struct l5cm_conn_addr_params conn_addr_buf; + u32 cid; + u32 hash_result; + union l5cm_seg_params seg_params; + struct l5cm_pcs_attributes att; +#if defined(__BIG_ENDIAN) + u16 rsrv; + u16 rx_seg_size; +#elif defined(__LITTLE_ENDIAN) + u16 rx_seg_size; + u16 rsrv; +#endif +}; + +/* + * pcs entry for passive connections + */ +struct l5cm_pcs_entry { + struct l5cm_pcs_hdr hdr; + u8 rx_segment[1516]; +}; + + + + +/* + * l5cm connection parameters + */ +union l5cm_reduce_param_union { + u32 opaque1; + u32 opaque2; +}; + +/* + * l5cm connection parameters + */ +struct l5cm_reduce_conn { + union l5cm_reduce_param_union opaque1; + u32 opaque2; +}; + +/* + * l5cm slow path element + */ +union l5cm_specific_data { + u8 protocol_data[8]; + struct regpair phy_address; + struct l5cm_packet_size packet_size; + struct l5cm_reduce_conn reduced_conn; +}; + +/* + * l5 slow path element + */ +struct l5cm_spe { + struct spe_hdr hdr; + union l5cm_specific_data data; +}; + + + + +/* + * Termination variables + */ +struct l5cm_term_vars { + u8 BitMap; +#define L5CM_TERM_VARS_TCP_STATE (0xF<<0) +#define L5CM_TERM_VARS_TCP_STATE_SHIFT 0 +#define L5CM_TERM_VARS_FIN_RECEIVED_SBIT (0x1<<4) +#define L5CM_TERM_VARS_FIN_RECEIVED_SBIT_SHIFT 4 +#define L5CM_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT (0x1<<5) +#define L5CM_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT_SHIFT 5 +#define L5CM_TERM_VARS_TERM_ON_CHIP (0x1<<6) +#define L5CM_TERM_VARS_TERM_ON_CHIP_SHIFT 6 +#define L5CM_TERM_VARS_RSRV (0x1<<7) +#define L5CM_TERM_VARS_RSRV_SHIFT 7 +}; + + + + +/* + * Tstorm Tcp flags + */ +struct tstorm_l5cm_tcp_flags { + u16 flags; +#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0) +#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0 +#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN (0x1<<12) +#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_SHIFT 12 +#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13) +#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13 +#define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14) +#define TSTORM_L5CM_TCP_FLAGS_RSRV1_SHIFT 14 +}; + + +/* + * Xstorm Tcp flags + */ +struct xstorm_l5cm_tcp_flags { + u8 flags; +#define XSTORM_L5CM_TCP_FLAGS_ENC_ENABLED (0x1<<0) +#define XSTORM_L5CM_TCP_FLAGS_ENC_ENABLED_SHIFT 0 +#define XSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<1) +#define XSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 1 +#define XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN (0x1<<2) +#define XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN_SHIFT 2 +#define XSTORM_L5CM_TCP_FLAGS_RSRV (0x1F<<3) +#define XSTORM_L5CM_TCP_FLAGS_RSRV_SHIFT 3 +}; + + + +/* + * Out-of-order states + */ +enum tcp_ooo_event { + TCP_EVENT_ADD_PEN = 0, + TCP_EVENT_ADD_NEW_ISLE = 1, + TCP_EVENT_ADD_ISLE_RIGHT = 2, + TCP_EVENT_ADD_ISLE_LEFT = 3, + TCP_EVENT_JOIN = 4, + TCP_EVENT_NOP = 5, + MAX_TCP_OOO_EVENT +}; + + +/* + * OOO support modes + */ +enum tcp_tstorm_ooo { + TCP_TSTORM_OOO_DROP_AND_PROC_ACK = 0, + TCP_TSTORM_OOO_SEND_PURE_ACK = 1, + TCP_TSTORM_OOO_SUPPORTED = 2, + MAX_TCP_TSTORM_OOO +}; + + + + + + + + + +#endif /* __5710_HSI_CNIC_LE__ */ diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h new file mode 100644 index 000000000..ef6125b0e --- /dev/null +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -0,0 +1,371 @@ +/* cnic_if.h: QLogic cnic core network driver. + * + * Copyright (c) 2006-2014 Broadcom Corporation + * Copyright (c) 2014-2015 QLogic Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + + +#ifndef CNIC_IF_H +#define CNIC_IF_H + +#include "bnx2x/bnx2x_mfw_req.h" + +#define CNIC_MODULE_VERSION "2.5.21" +#define CNIC_MODULE_RELDATE "January 29, 2015" + +#define CNIC_ULP_RDMA 0 +#define CNIC_ULP_ISCSI 1 +#define CNIC_ULP_FCOE 2 +#define CNIC_ULP_L4 3 +#define MAX_CNIC_ULP_TYPE_EXT 3 +#define MAX_CNIC_ULP_TYPE 4 + +/* Use CPU native page size up to 16K for cnic ring sizes. */ +#if (PAGE_SHIFT > 14) +#define CNIC_PAGE_BITS 14 +#else +#define CNIC_PAGE_BITS PAGE_SHIFT +#endif +#define CNIC_PAGE_SIZE (1 << (CNIC_PAGE_BITS)) +#define CNIC_PAGE_ALIGN(addr) ALIGN(addr, CNIC_PAGE_SIZE) +#define CNIC_PAGE_MASK (~((CNIC_PAGE_SIZE) - 1)) + +struct kwqe { + u32 kwqe_op_flag; + +#define KWQE_QID_SHIFT 8 +#define KWQE_OPCODE_MASK 0x00ff0000 +#define KWQE_OPCODE_SHIFT 16 +#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT) +#define KWQE_LAYER_MASK 0x70000000 +#define KWQE_LAYER_SHIFT 28 +#define KWQE_FLAGS_LAYER_MASK_L2 (2<<28) +#define KWQE_FLAGS_LAYER_MASK_L3 (3<<28) +#define KWQE_FLAGS_LAYER_MASK_L4 (4<<28) +#define KWQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28) +#define KWQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28) +#define KWQE_FLAGS_LAYER_MASK_L5_FCOE (7<<28) + + u32 kwqe_info0; + u32 kwqe_info1; + u32 kwqe_info2; + u32 kwqe_info3; + u32 kwqe_info4; + u32 kwqe_info5; + u32 kwqe_info6; +}; + +struct kwqe_16 { + u32 kwqe_info0; + u32 kwqe_info1; + u32 kwqe_info2; + u32 kwqe_info3; +}; + +struct kcqe { + u32 kcqe_info0; + u32 kcqe_info1; + u32 kcqe_info2; + u32 kcqe_info3; + u32 kcqe_info4; + u32 kcqe_info5; + u32 kcqe_info6; + u32 kcqe_op_flag; + #define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */ + #define KCQE_FLAGS_LAYER_MASK (0x7<<28) + #define KCQE_FLAGS_LAYER_MASK_MISC (0<<28) + #define KCQE_FLAGS_LAYER_MASK_L2 (2<<28) + #define KCQE_FLAGS_LAYER_MASK_L3 (3<<28) + #define KCQE_FLAGS_LAYER_MASK_L4 (4<<28) + #define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28) + #define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28) + #define KCQE_FLAGS_LAYER_MASK_L5_FCOE (7<<28) + #define KCQE_FLAGS_NEXT (1<<31) + #define KCQE_FLAGS_OPCODE_MASK (0xff<<16) + #define KCQE_FLAGS_OPCODE_SHIFT (16) + #define KCQE_OPCODE(op) \ + (((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT) +}; + +#define MAX_CNIC_CTL_DATA 64 +#define MAX_DRV_CTL_DATA 64 + +#define CNIC_CTL_STOP_CMD 1 +#define CNIC_CTL_START_CMD 2 +#define CNIC_CTL_COMPLETION_CMD 3 +#define CNIC_CTL_STOP_ISCSI_CMD 4 +#define CNIC_CTL_FCOE_STATS_GET_CMD 5 +#define CNIC_CTL_ISCSI_STATS_GET_CMD 6 + +#define DRV_CTL_IO_WR_CMD 0x101 +#define DRV_CTL_IO_RD_CMD 0x102 +#define DRV_CTL_CTX_WR_CMD 0x103 +#define DRV_CTL_CTXTBL_WR_CMD 0x104 +#define DRV_CTL_RET_L5_SPQ_CREDIT_CMD 0x105 +#define DRV_CTL_START_L2_CMD 0x106 +#define DRV_CTL_STOP_L2_CMD 0x107 +#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c +#define DRV_CTL_ISCSI_STOPPED_CMD 0x10d +#define DRV_CTL_ULP_REGISTER_CMD 0x10e +#define DRV_CTL_ULP_UNREGISTER_CMD 0x10f + +struct cnic_ctl_completion { + u32 cid; + u8 opcode; + u8 error; +}; + +struct cnic_ctl_info { + int cmd; + union { + struct cnic_ctl_completion comp; + char bytes[MAX_CNIC_CTL_DATA]; + } data; +}; + +struct drv_ctl_spq_credit { + u32 credit_count; +}; + +struct drv_ctl_io { + u32 cid_addr; + u32 offset; + u32 data; + dma_addr_t dma_addr; +}; + +struct drv_ctl_l2_ring { + u32 client_id; + u32 cid; +}; + +struct drv_ctl_register_data { + int ulp_type; + struct fcoe_capabilities fcoe_features; +}; + +struct drv_ctl_info { + int cmd; + union { + struct drv_ctl_spq_credit credit; + struct drv_ctl_io io; + struct drv_ctl_l2_ring ring; + int ulp_type; + struct drv_ctl_register_data register_data; + char bytes[MAX_DRV_CTL_DATA]; + } data; +}; + +struct cnic_ops { + struct module *cnic_owner; + /* Calls to these functions are protected by RCU. When + * unregistering, we wait for any calls to complete before + * continuing. + */ + int (*cnic_handler)(void *, void *); + int (*cnic_ctl)(void *, struct cnic_ctl_info *); +}; + +#define MAX_CNIC_VEC 8 + +struct cnic_irq { + unsigned int vector; + void *status_blk; + u32 status_blk_num; + u32 status_blk_num2; + u32 irq_flags; +#define CNIC_IRQ_FL_MSIX 0x00000001 +}; + +struct cnic_eth_dev { + struct module *drv_owner; + u32 drv_state; +#define CNIC_DRV_STATE_REGD 0x00000001 +#define CNIC_DRV_STATE_USING_MSIX 0x00000002 +#define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004 +#define CNIC_DRV_STATE_NO_ISCSI 0x00000008 +#define CNIC_DRV_STATE_NO_FCOE 0x00000010 +#define CNIC_DRV_STATE_HANDLES_IRQ 0x00000020 + u32 chip_id; + u32 max_kwqe_pending; + struct pci_dev *pdev; + void __iomem *io_base; + void __iomem *io_base2; + const void *iro_arr; + + u32 ctx_tbl_offset; + u32 ctx_tbl_len; + int ctx_blk_size; + u32 starting_cid; + u32 max_iscsi_conn; + u32 max_fcoe_conn; + u32 max_rdma_conn; + u32 fcoe_init_cid; + u32 max_fcoe_exchanges; + u32 fcoe_wwn_port_name_hi; + u32 fcoe_wwn_port_name_lo; + u32 fcoe_wwn_node_name_hi; + u32 fcoe_wwn_node_name_lo; + + u16 iscsi_l2_client_id; + u16 iscsi_l2_cid; + u8 iscsi_mac[ETH_ALEN]; + + int num_irq; + struct cnic_irq irq_arr[MAX_CNIC_VEC]; + int (*drv_register_cnic)(struct net_device *, + struct cnic_ops *, void *); + int (*drv_unregister_cnic)(struct net_device *); + int (*drv_submit_kwqes_32)(struct net_device *, + struct kwqe *[], u32); + int (*drv_submit_kwqes_16)(struct net_device *, + struct kwqe_16 *[], u32); + int (*drv_ctl)(struct net_device *, struct drv_ctl_info *); + unsigned long reserved1[2]; + union drv_info_to_mcp *addr_drv_info_to_mcp; +}; + +struct cnic_sockaddr { + union { + struct sockaddr_in v4; + struct sockaddr_in6 v6; + } local; + union { + struct sockaddr_in v4; + struct sockaddr_in6 v6; + } remote; +}; + +struct cnic_sock { + struct cnic_dev *dev; + void *context; + u32 src_ip[4]; + u32 dst_ip[4]; + u16 src_port; + u16 dst_port; + u16 vlan_id; + unsigned char old_ha[ETH_ALEN]; + unsigned char ha[ETH_ALEN]; + u32 mtu; + u32 cid; + u32 l5_cid; + u32 pg_cid; + int ulp_type; + + u32 ka_timeout; + u32 ka_interval; + u8 ka_max_probe_count; + u8 tos; + u8 ttl; + u8 snd_seq_scale; + u32 rcv_buf; + u32 snd_buf; + u32 seed; + + unsigned long tcp_flags; +#define SK_TCP_NO_DELAY_ACK 0x1 +#define SK_TCP_KEEP_ALIVE 0x2 +#define SK_TCP_NAGLE 0x4 +#define SK_TCP_TIMESTAMP 0x8 +#define SK_TCP_SACK 0x10 +#define SK_TCP_SEG_SCALING 0x20 + unsigned long flags; +#define SK_F_INUSE 0 +#define SK_F_OFFLD_COMPLETE 1 +#define SK_F_OFFLD_SCHED 2 +#define SK_F_PG_OFFLD_COMPLETE 3 +#define SK_F_CONNECT_START 4 +#define SK_F_IPV6 5 +#define SK_F_CLOSING 7 +#define SK_F_HW_ERR 8 + + atomic_t ref_count; + u32 state; + struct kwqe kwqe1; + struct kwqe kwqe2; + struct kwqe kwqe3; +}; + +struct cnic_dev { + struct net_device *netdev; + struct pci_dev *pcidev; + void __iomem *regview; + struct list_head list; + + int (*register_device)(struct cnic_dev *dev, int ulp_type, + void *ulp_ctx); + int (*unregister_device)(struct cnic_dev *dev, int ulp_type); + int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num_wqes); + int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[], + u32 num_wqes); + + int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **, + void *); + int (*cm_destroy)(struct cnic_sock *); + int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *); + int (*cm_abort)(struct cnic_sock *); + int (*cm_close)(struct cnic_sock *); + struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type); + int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type, + char *data, u16 data_size); + unsigned long flags; +#define CNIC_F_CNIC_UP 1 +#define CNIC_F_BNX2_CLASS 3 +#define CNIC_F_BNX2X_CLASS 4 + atomic_t ref_count; + u8 mac_addr[ETH_ALEN]; + + int max_iscsi_conn; + int max_fcoe_conn; + int max_rdma_conn; + + int max_fcoe_exchanges; + + union drv_info_to_mcp *stats_addr; + struct fcoe_capabilities *fcoe_cap; + + void *cnic_priv; +}; + +#define CNIC_WR(dev, off, val) writel(val, dev->regview + off) +#define CNIC_WR16(dev, off, val) writew(val, dev->regview + off) +#define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off) +#define CNIC_RD(dev, off) readl(dev->regview + off) +#define CNIC_RD16(dev, off) readw(dev->regview + off) + +struct cnic_ulp_ops { + /* Calls to these functions are protected by RCU. When + * unregistering, we wait for any calls to complete before + * continuing. + */ + + void (*cnic_init)(struct cnic_dev *dev); + void (*cnic_exit)(struct cnic_dev *dev); + void (*cnic_start)(void *ulp_ctx); + void (*cnic_stop)(void *ulp_ctx); + void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[], + u32 num_cqes); + void (*indicate_netevent)(void *ulp_ctx, unsigned long event, u16 vid); + void (*cm_connect_complete)(struct cnic_sock *); + void (*cm_close_complete)(struct cnic_sock *); + void (*cm_abort_complete)(struct cnic_sock *); + void (*cm_remote_close)(struct cnic_sock *); + void (*cm_remote_abort)(struct cnic_sock *); + int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type, + char *data, u16 data_size); + int (*cnic_get_stats)(void *ulp_ctx); + struct module *owner; + atomic_t ref_count; +}; + +int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); + +int cnic_unregister_driver(int ulp_type); + +#endif diff --git a/drivers/net/ethernet/broadcom/genet/Makefile b/drivers/net/ethernet/broadcom/genet/Makefile new file mode 100644 index 000000000..9b6885efa --- /dev/null +++ b/drivers/net/ethernet/broadcom/genet/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_BCMGENET) += genet.o +genet-objs := bcmgenet.o bcmmii.o bcmgenet_wol.o diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c new file mode 100644 index 000000000..6043734ea --- /dev/null +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -0,0 +1,3365 @@ +/* + * Broadcom GENET (Gigabit Ethernet) controller driver + * + * Copyright (c) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) "bcmgenet: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "bcmgenet.h" + +/* Maximum number of hardware queues, downsized if needed */ +#define GENET_MAX_MQ_CNT 4 + +/* Default highest priority queue for multi queue support */ +#define GENET_Q0_PRIORITY 0 + +#define GENET_Q16_RX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q) +#define GENET_Q16_TX_BD_CNT \ + (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q) + +#define RX_BUF_LENGTH 2048 +#define SKB_ALIGNMENT 32 + +/* Tx/Rx DMA register offset, skip 256 descriptors */ +#define WORDS_PER_BD(p) (p->hw_params->words_per_bd) +#define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32)) + +#define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \ + TOTAL_DESC * DMA_DESC_SIZE) + +#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ + TOTAL_DESC * DMA_DESC_SIZE) + +static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, + void __iomem *d, u32 value) +{ + __raw_writel(value, d + DMA_DESC_LENGTH_STATUS); +} + +static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv, + void __iomem *d) +{ + return __raw_readl(d + DMA_DESC_LENGTH_STATUS); +} + +static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, + void __iomem *d, + dma_addr_t addr) +{ + __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); + + /* Register writes to GISB bus can take couple hundred nanoseconds + * and are done for each packet, save these expensive writes unless + * the platform is explicitly configured for 64-bits/LPAE. + */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (priv->hw_params->flags & GENET_HAS_40BITS) + __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); +#endif +} + +/* Combined address + length/status setter */ +static inline void dmadesc_set(struct bcmgenet_priv *priv, + void __iomem *d, dma_addr_t addr, u32 val) +{ + dmadesc_set_length_status(priv, d, val); + dmadesc_set_addr(priv, d, addr); +} + +static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, + void __iomem *d) +{ + dma_addr_t addr; + + addr = __raw_readl(d + DMA_DESC_ADDRESS_LO); + + /* Register writes to GISB bus can take couple hundred nanoseconds + * and are done for each packet, save these expensive writes unless + * the platform is explicitly configured for 64-bits/LPAE. + */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (priv->hw_params->flags & GENET_HAS_40BITS) + addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32; +#endif + return addr; +} + +#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x" + +#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + +static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1); + else + return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL); +} + +static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1); + else + bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL); +} + +/* These macros are defined to deal with register map change + * between GENET1.1 and GENET2. Only those currently being used + * by driver are defined. + */ +static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); + else + return __raw_readl(priv->base + + priv->hw_params->tbuf_offset + TBUF_CTRL); +} + +static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); + else + __raw_writel(val, priv->base + + priv->hw_params->tbuf_offset + TBUF_CTRL); +} + +static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv)) + return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); + else + return __raw_readl(priv->base + + priv->hw_params->tbuf_offset + TBUF_BP_MC); +} + +static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) +{ + if (GENET_IS_V1(priv)) + bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); + else + __raw_writel(val, priv->base + + priv->hw_params->tbuf_offset + TBUF_BP_MC); +} + +/* RX/TX DMA register accessors */ +enum dma_reg { + DMA_RING_CFG = 0, + DMA_CTRL, + DMA_STATUS, + DMA_SCB_BURST_SIZE, + DMA_ARB_CTRL, + DMA_PRIORITY_0, + DMA_PRIORITY_1, + DMA_PRIORITY_2, + DMA_INDEX2RING_0, + DMA_INDEX2RING_1, + DMA_INDEX2RING_2, + DMA_INDEX2RING_3, + DMA_INDEX2RING_4, + DMA_INDEX2RING_5, + DMA_INDEX2RING_6, + DMA_INDEX2RING_7, +}; + +static const u8 bcmgenet_dma_regs_v3plus[] = { + [DMA_RING_CFG] = 0x00, + [DMA_CTRL] = 0x04, + [DMA_STATUS] = 0x08, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x2C, + [DMA_PRIORITY_0] = 0x30, + [DMA_PRIORITY_1] = 0x34, + [DMA_PRIORITY_2] = 0x38, + [DMA_INDEX2RING_0] = 0x70, + [DMA_INDEX2RING_1] = 0x74, + [DMA_INDEX2RING_2] = 0x78, + [DMA_INDEX2RING_3] = 0x7C, + [DMA_INDEX2RING_4] = 0x80, + [DMA_INDEX2RING_5] = 0x84, + [DMA_INDEX2RING_6] = 0x88, + [DMA_INDEX2RING_7] = 0x8C, +}; + +static const u8 bcmgenet_dma_regs_v2[] = { + [DMA_RING_CFG] = 0x00, + [DMA_CTRL] = 0x04, + [DMA_STATUS] = 0x08, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x30, + [DMA_PRIORITY_0] = 0x34, + [DMA_PRIORITY_1] = 0x38, + [DMA_PRIORITY_2] = 0x3C, +}; + +static const u8 bcmgenet_dma_regs_v1[] = { + [DMA_CTRL] = 0x00, + [DMA_STATUS] = 0x04, + [DMA_SCB_BURST_SIZE] = 0x0C, + [DMA_ARB_CTRL] = 0x30, + [DMA_PRIORITY_0] = 0x34, + [DMA_PRIORITY_1] = 0x38, + [DMA_PRIORITY_2] = 0x3C, +}; + +/* Set at runtime once bcmgenet version is known */ +static const u8 *bcmgenet_dma_regs; + +static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) +{ + return netdev_priv(dev_get_drvdata(dev)); +} + +static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, + enum dma_reg r) +{ + return __raw_readl(priv->base + GENET_TDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, + u32 val, enum dma_reg r) +{ + __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, + enum dma_reg r) +{ + return __raw_readl(priv->base + GENET_RDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, + u32 val, enum dma_reg r) +{ + __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + + DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +/* RDMA/TDMA ring registers and accessors + * we merge the common fields and just prefix with T/D the registers + * having different meaning depending on the direction + */ +enum dma_ring_reg { + TDMA_READ_PTR = 0, + RDMA_WRITE_PTR = TDMA_READ_PTR, + TDMA_READ_PTR_HI, + RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI, + TDMA_CONS_INDEX, + RDMA_PROD_INDEX = TDMA_CONS_INDEX, + TDMA_PROD_INDEX, + RDMA_CONS_INDEX = TDMA_PROD_INDEX, + DMA_RING_BUF_SIZE, + DMA_START_ADDR, + DMA_START_ADDR_HI, + DMA_END_ADDR, + DMA_END_ADDR_HI, + DMA_MBUF_DONE_THRESH, + TDMA_FLOW_PERIOD, + RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD, + TDMA_WRITE_PTR, + RDMA_READ_PTR = TDMA_WRITE_PTR, + TDMA_WRITE_PTR_HI, + RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI +}; + +/* GENET v4 supports 40-bits pointer addressing + * for obvious reasons the LO and HI word parts + * are contiguous, but this offsets the other + * registers. + */ +static const u8 genet_dma_ring_regs_v4[] = { + [TDMA_READ_PTR] = 0x00, + [TDMA_READ_PTR_HI] = 0x04, + [TDMA_CONS_INDEX] = 0x08, + [TDMA_PROD_INDEX] = 0x0C, + [DMA_RING_BUF_SIZE] = 0x10, + [DMA_START_ADDR] = 0x14, + [DMA_START_ADDR_HI] = 0x18, + [DMA_END_ADDR] = 0x1C, + [DMA_END_ADDR_HI] = 0x20, + [DMA_MBUF_DONE_THRESH] = 0x24, + [TDMA_FLOW_PERIOD] = 0x28, + [TDMA_WRITE_PTR] = 0x2C, + [TDMA_WRITE_PTR_HI] = 0x30, +}; + +static const u8 genet_dma_ring_regs_v123[] = { + [TDMA_READ_PTR] = 0x00, + [TDMA_CONS_INDEX] = 0x04, + [TDMA_PROD_INDEX] = 0x08, + [DMA_RING_BUF_SIZE] = 0x0C, + [DMA_START_ADDR] = 0x10, + [DMA_END_ADDR] = 0x14, + [DMA_MBUF_DONE_THRESH] = 0x18, + [TDMA_FLOW_PERIOD] = 0x1C, + [TDMA_WRITE_PTR] = 0x20, +}; + +/* Set at runtime once GENET version is known */ +static const u8 *genet_dma_ring_regs; + +static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, + unsigned int ring, + enum dma_ring_reg r) +{ + return __raw_readl(priv->base + GENET_TDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, + unsigned int ring, u32 val, + enum dma_ring_reg r) +{ + __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, + unsigned int ring, + enum dma_ring_reg r) +{ + return __raw_readl(priv->base + GENET_RDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, + unsigned int ring, u32 val, + enum dma_ring_reg r) +{ + __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + + (DMA_RING_SIZE * ring) + + genet_dma_ring_regs[r]); +} + +static int bcmgenet_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + if (!priv->phydev) + return -ENODEV; + + return phy_ethtool_gset(priv->phydev, cmd); +} + +static int bcmgenet_set_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + if (!priv->phydev) + return -ENODEV; + + return phy_ethtool_sset(priv->phydev, cmd); +} + +static int bcmgenet_set_rx_csum(struct net_device *dev, + netdev_features_t wanted) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 rbuf_chk_ctrl; + bool rx_csum_en; + + rx_csum_en = !!(wanted & NETIF_F_RXCSUM); + + rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); + + /* enable rx checksumming */ + if (rx_csum_en) + rbuf_chk_ctrl |= RBUF_RXCHK_EN; + else + rbuf_chk_ctrl &= ~RBUF_RXCHK_EN; + priv->desc_rxchk_en = rx_csum_en; + + /* If UniMAC forwards CRC, we need to skip over it to get + * a valid CHK bit to be set in the per-packet status word + */ + if (rx_csum_en && priv->crc_fwd_en) + rbuf_chk_ctrl |= RBUF_SKIP_FCS; + else + rbuf_chk_ctrl &= ~RBUF_SKIP_FCS; + + bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL); + + return 0; +} + +static int bcmgenet_set_tx_csum(struct net_device *dev, + netdev_features_t wanted) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + bool desc_64b_en; + u32 tbuf_ctrl, rbuf_ctrl; + + tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv); + rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL); + + desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); + + /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */ + if (desc_64b_en) { + tbuf_ctrl |= RBUF_64B_EN; + rbuf_ctrl |= RBUF_64B_EN; + } else { + tbuf_ctrl &= ~RBUF_64B_EN; + rbuf_ctrl &= ~RBUF_64B_EN; + } + priv->desc_64b_en = desc_64b_en; + + bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl); + bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL); + + return 0; +} + +static int bcmgenet_set_features(struct net_device *dev, + netdev_features_t features) +{ + netdev_features_t changed = features ^ dev->features; + netdev_features_t wanted = dev->wanted_features; + int ret = 0; + + if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) + ret = bcmgenet_set_tx_csum(dev, wanted); + if (changed & (NETIF_F_RXCSUM)) + ret = bcmgenet_set_rx_csum(dev, wanted); + + return ret; +} + +static u32 bcmgenet_get_msglevel(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + return priv->msg_enable; +} + +static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + priv->msg_enable = level; +} + +/* standard ethtool support functions. */ +enum bcmgenet_stat_type { + BCMGENET_STAT_NETDEV = -1, + BCMGENET_STAT_MIB_RX, + BCMGENET_STAT_MIB_TX, + BCMGENET_STAT_RUNT, + BCMGENET_STAT_MISC, + BCMGENET_STAT_SOFT, +}; + +struct bcmgenet_stats { + char stat_string[ETH_GSTRING_LEN]; + int stat_sizeof; + int stat_offset; + enum bcmgenet_stat_type type; + /* reg offset from UMAC base for misc counters */ + u16 reg_offset; +}; + +#define STAT_NETDEV(m) { \ + .stat_string = __stringify(m), \ + .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ + .stat_offset = offsetof(struct net_device_stats, m), \ + .type = BCMGENET_STAT_NETDEV, \ +} + +#define STAT_GENET_MIB(str, m, _type) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ + .stat_offset = offsetof(struct bcmgenet_priv, m), \ + .type = _type, \ +} + +#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) +#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) +#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) +#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT) + +#define STAT_GENET_MISC(str, m, offset) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ + .stat_offset = offsetof(struct bcmgenet_priv, m), \ + .type = BCMGENET_STAT_MISC, \ + .reg_offset = offset, \ +} + + +/* There is a 0xC gap between the end of RX and beginning of TX stats and then + * between the end of TX stats and the beginning of the RX RUNT + */ +#define BCMGENET_STAT_OFFSET 0xc + +/* Hardware counters must be kept in sync because the order/offset + * is important here (order in structure declaration = order in hardware) + */ +static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { + /* general stats */ + STAT_NETDEV(rx_packets), + STAT_NETDEV(tx_packets), + STAT_NETDEV(rx_bytes), + STAT_NETDEV(tx_bytes), + STAT_NETDEV(rx_errors), + STAT_NETDEV(tx_errors), + STAT_NETDEV(rx_dropped), + STAT_NETDEV(tx_dropped), + STAT_NETDEV(multicast), + /* UniMAC RSV counters */ + STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), + STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), + STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), + STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), + STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), + STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), + STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), + STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), + STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), + STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), + STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt), + STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes), + STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca), + STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca), + STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs), + STAT_GENET_MIB_RX("rx_control", mib.rx.cf), + STAT_GENET_MIB_RX("rx_pause", mib.rx.pf), + STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo), + STAT_GENET_MIB_RX("rx_align", mib.rx.aln), + STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr), + STAT_GENET_MIB_RX("rx_code", mib.rx.cde), + STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr), + STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr), + STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr), + STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue), + STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok), + STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc), + STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp), + STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc), + /* UniMAC TSV counters */ + STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), + STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), + STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), + STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), + STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), + STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), + STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), + STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), + STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), + STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), + STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts), + STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca), + STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca), + STAT_GENET_MIB_TX("tx_pause", mib.tx.pf), + STAT_GENET_MIB_TX("tx_control", mib.tx.cf), + STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs), + STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr), + STAT_GENET_MIB_TX("tx_defer", mib.tx.drf), + STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf), + STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl), + STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl), + STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl), + STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl), + STAT_GENET_MIB_TX("tx_frags", mib.tx.frg), + STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl), + STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr), + STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes), + STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok), + STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc), + /* UniMAC RUNT counters */ + STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt), + STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), + STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), + STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), + /* Misc UniMAC counters */ + STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, + UMAC_RBUF_OVFL_CNT), + STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), + STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), + STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), + STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), + STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), +}; + +#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) + +static void bcmgenet_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, "bcmgenet", sizeof(info->driver)); + strlcpy(info->version, "v2.0", sizeof(info->version)); + info->n_stats = BCMGENET_STATS_LEN; +} + +static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return BCMGENET_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void bcmgenet_get_strings(struct net_device *dev, u32 stringset, + u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + bcmgenet_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) +{ + int i, j = 0; + + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + const struct bcmgenet_stats *s; + u8 offset = 0; + u32 val = 0; + char *p; + + s = &bcmgenet_gstrings_stats[i]; + switch (s->type) { + case BCMGENET_STAT_NETDEV: + case BCMGENET_STAT_SOFT: + continue; + case BCMGENET_STAT_MIB_RX: + case BCMGENET_STAT_MIB_TX: + case BCMGENET_STAT_RUNT: + if (s->type != BCMGENET_STAT_MIB_RX) + offset = BCMGENET_STAT_OFFSET; + val = bcmgenet_umac_readl(priv, + UMAC_MIB_START + j + offset); + break; + case BCMGENET_STAT_MISC: + val = bcmgenet_umac_readl(priv, s->reg_offset); + /* clear if overflowed */ + if (val == ~0) + bcmgenet_umac_writel(priv, 0, s->reg_offset); + break; + } + + j += s->stat_sizeof; + p = (char *)priv + s->stat_offset; + *(u32 *)p = val; + } +} + +static void bcmgenet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int i; + + if (netif_running(dev)) + bcmgenet_update_mib_counters(priv); + + for (i = 0; i < BCMGENET_STATS_LEN; i++) { + const struct bcmgenet_stats *s; + char *p; + + s = &bcmgenet_gstrings_stats[i]; + if (s->type == BCMGENET_STAT_NETDEV) + p = (char *)&dev->stats; + else + p = (char *)priv; + p += s->stat_offset; + data[i] = *(u32 *)p; + } +} + +static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; + u32 reg; + + if (enable && !priv->clk_eee_enabled) { + clk_prepare_enable(priv->clk_eee); + priv->clk_eee_enabled = true; + } + + reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL); + if (enable) + reg |= EEE_EN; + else + reg &= ~EEE_EN; + bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL); + + /* Enable EEE and switch to a 27Mhz clock automatically */ + reg = __raw_readl(priv->base + off); + if (enable) + reg |= TBUF_EEE_EN | TBUF_PM_EN; + else + reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); + __raw_writel(reg, priv->base + off); + + /* Do the same for thing for RBUF */ + reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL); + if (enable) + reg |= RBUF_EEE_EN | RBUF_PM_EN; + else + reg &= ~(RBUF_EEE_EN | RBUF_PM_EN); + bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL); + + if (!enable && priv->clk_eee_enabled) { + clk_disable_unprepare(priv->clk_eee); + priv->clk_eee_enabled = false; + } + + priv->eee.eee_enabled = enable; + priv->eee.eee_active = enable; +} + +static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct ethtool_eee *p = &priv->eee; + + if (GENET_IS_V1(priv)) + return -EOPNOTSUPP; + + e->eee_enabled = p->eee_enabled; + e->eee_active = p->eee_active; + e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); + + return phy_ethtool_get_eee(priv->phydev, e); +} + +static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct ethtool_eee *p = &priv->eee; + int ret = 0; + + if (GENET_IS_V1(priv)) + return -EOPNOTSUPP; + + p->eee_enabled = e->eee_enabled; + + if (!p->eee_enabled) { + bcmgenet_eee_enable_set(dev, false); + } else { + ret = phy_init_eee(priv->phydev, 0); + if (ret) { + netif_err(priv, hw, dev, "EEE initialization failed\n"); + return ret; + } + + bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); + bcmgenet_eee_enable_set(dev, true); + } + + return phy_ethtool_set_eee(priv->phydev, e); +} + +static int bcmgenet_nway_reset(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + return genphy_restart_aneg(priv->phydev); +} + +/* standard ethtool support functions. */ +static struct ethtool_ops bcmgenet_ethtool_ops = { + .get_strings = bcmgenet_get_strings, + .get_sset_count = bcmgenet_get_sset_count, + .get_ethtool_stats = bcmgenet_get_ethtool_stats, + .get_settings = bcmgenet_get_settings, + .set_settings = bcmgenet_set_settings, + .get_drvinfo = bcmgenet_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = bcmgenet_get_msglevel, + .set_msglevel = bcmgenet_set_msglevel, + .get_wol = bcmgenet_get_wol, + .set_wol = bcmgenet_set_wol, + .get_eee = bcmgenet_get_eee, + .set_eee = bcmgenet_set_eee, + .nway_reset = bcmgenet_nway_reset, +}; + +/* Power down the unimac, based on mode. */ +static int bcmgenet_power_down(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + int ret = 0; + u32 reg; + + switch (mode) { + case GENET_POWER_CABLE_SENSE: + phy_detach(priv->phydev); + break; + + case GENET_POWER_WOL_MAGIC: + ret = bcmgenet_wol_power_down_cfg(priv, mode); + break; + + case GENET_POWER_PASSIVE: + /* Power down LED */ + if (priv->hw_params->flags & GENET_HAS_EXT) { + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + reg |= (EXT_PWR_DOWN_PHY | + EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + + bcmgenet_phy_power_set(priv->dev, false); + } + break; + default: + break; + } + + return 0; +} + +static void bcmgenet_power_up(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + u32 reg; + + if (!(priv->hw_params->flags & GENET_HAS_EXT)) + return; + + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + + switch (mode) { + case GENET_POWER_PASSIVE: + reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY | + EXT_PWR_DOWN_BIAS); + /* fallthrough */ + case GENET_POWER_CABLE_SENSE: + /* enable APD */ + reg |= EXT_PWR_DN_EN_LD; + break; + case GENET_POWER_WOL_MAGIC: + bcmgenet_wol_power_up_cfg(priv, mode); + return; + default: + break; + } + + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + + if (mode == GENET_POWER_PASSIVE) + bcmgenet_mii_reset(priv->dev); +} + +/* ioctl handle special commands that are not present in ethtool. */ +static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int val = 0; + + if (!netif_running(dev)) + return -EINVAL; + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + if (!priv->phydev) + val = -ENODEV; + else + val = phy_mii_ioctl(priv->phydev, rq, cmd); + break; + + default: + val = -EINVAL; + break; + } + + return val; +} + +static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *ring) +{ + struct enet_cb *tx_cb_ptr; + + tx_cb_ptr = ring->cbs; + tx_cb_ptr += ring->write_ptr - ring->cb_ptr; + + /* Advancing local write pointer */ + if (ring->write_ptr == ring->end_ptr) + ring->write_ptr = ring->cb_ptr; + else + ring->write_ptr++; + + return tx_cb_ptr; +} + +/* Simple helper to free a control block's resources */ +static void bcmgenet_free_cb(struct enet_cb *cb) +{ + dev_kfree_skb_any(cb->skb); + cb->skb = NULL; + dma_unmap_addr_set(cb, dma_addr, 0); +} + +static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, + 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, + 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, + INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring) +{ + bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, + INTRL2_CPU_MASK_SET); +} + +/* Unlocked version of the reclaim routine */ +static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, + struct bcmgenet_tx_ring *ring) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct enet_cb *tx_cb_ptr; + struct netdev_queue *txq; + unsigned int pkts_compl = 0; + unsigned int c_index; + unsigned int txbds_ready; + unsigned int txbds_processed = 0; + + /* Compute how many buffers are transmitted since last xmit call */ + c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); + c_index &= DMA_C_INDEX_MASK; + + if (likely(c_index >= ring->c_index)) + txbds_ready = c_index - ring->c_index; + else + txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index; + + netif_dbg(priv, tx_done, dev, + "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", + __func__, ring->index, ring->c_index, c_index, txbds_ready); + + /* Reclaim transmitted buffers */ + while (txbds_processed < txbds_ready) { + tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr]; + if (tx_cb_ptr->skb) { + pkts_compl++; + dev->stats.tx_packets++; + dev->stats.tx_bytes += tx_cb_ptr->skb->len; + dma_unmap_single(&dev->dev, + dma_unmap_addr(tx_cb_ptr, dma_addr), + tx_cb_ptr->skb->len, + DMA_TO_DEVICE); + bcmgenet_free_cb(tx_cb_ptr); + } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { + dev->stats.tx_bytes += + dma_unmap_len(tx_cb_ptr, dma_len); + dma_unmap_page(&dev->dev, + dma_unmap_addr(tx_cb_ptr, dma_addr), + dma_unmap_len(tx_cb_ptr, dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); + } + + txbds_processed++; + if (likely(ring->clean_ptr < ring->end_ptr)) + ring->clean_ptr++; + else + ring->clean_ptr = ring->cb_ptr; + } + + ring->free_bds += txbds_processed; + ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK; + + if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { + txq = netdev_get_tx_queue(dev, ring->queue); + if (netif_tx_queue_stopped(txq)) + netif_tx_wake_queue(txq); + } + + return pkts_compl; +} + +static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, + struct bcmgenet_tx_ring *ring) +{ + unsigned int released; + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + released = __bcmgenet_tx_reclaim(dev, ring); + spin_unlock_irqrestore(&ring->lock, flags); + + return released; +} + +static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) +{ + struct bcmgenet_tx_ring *ring = + container_of(napi, struct bcmgenet_tx_ring, napi); + unsigned int work_done = 0; + + work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring); + + if (work_done == 0) { + napi_complete(napi); + ring->int_enable(ring); + + return 0; + } + + return budget; +} + +static void bcmgenet_tx_reclaim_all(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int i; + + if (netif_is_multiqueue(dev)) { + for (i = 0; i < priv->hw_params->tx_queues; i++) + bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); + } + + bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); +} + +/* Transmits a single SKB (either head of a fragment or a single SKB) + * caller must hold priv->lock + */ +static int bcmgenet_xmit_single(struct net_device *dev, + struct sk_buff *skb, + u16 dma_desc_flags, + struct bcmgenet_tx_ring *ring) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + struct enet_cb *tx_cb_ptr; + unsigned int skb_len; + dma_addr_t mapping; + u32 length_status; + int ret; + + tx_cb_ptr = bcmgenet_get_txcb(priv, ring); + + if (unlikely(!tx_cb_ptr)) + BUG(); + + tx_cb_ptr->skb = skb; + + skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb); + + mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); + ret = dma_mapping_error(kdev, mapping); + if (ret) { + priv->mib.tx_dma_failed++; + netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); + dev_kfree_skb(skb); + return ret; + } + + dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); + dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len); + length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | + (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) | + DMA_TX_APPEND_CRC; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + length_status |= DMA_TX_DO_CSUM; + + dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status); + + return 0; +} + +/* Transmit a SKB fragment */ +static int bcmgenet_xmit_frag(struct net_device *dev, + skb_frag_t *frag, + u16 dma_desc_flags, + struct bcmgenet_tx_ring *ring) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + struct enet_cb *tx_cb_ptr; + dma_addr_t mapping; + int ret; + + tx_cb_ptr = bcmgenet_get_txcb(priv, ring); + + if (unlikely(!tx_cb_ptr)) + BUG(); + tx_cb_ptr->skb = NULL; + + mapping = skb_frag_dma_map(kdev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + ret = dma_mapping_error(kdev, mapping); + if (ret) { + priv->mib.tx_dma_failed++; + netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n", + __func__); + return ret; + } + + dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); + dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size); + + dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, + (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | + (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); + + return 0; +} + +/* Reallocate the SKB to put enough headroom in front of it and insert + * the transmit checksum offsets in the descriptors + */ +static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, + struct sk_buff *skb) +{ + struct status_64 *status = NULL; + struct sk_buff *new_skb; + u16 offset; + u8 ip_proto; + u16 ip_ver; + u32 tx_csum_info; + + if (unlikely(skb_headroom(skb) < sizeof(*status))) { + /* If 64 byte status block enabled, must make sure skb has + * enough headroom for us to insert 64B status block. + */ + new_skb = skb_realloc_headroom(skb, sizeof(*status)); + dev_kfree_skb(skb); + if (!new_skb) { + dev->stats.tx_errors++; + dev->stats.tx_dropped++; + return NULL; + } + skb = new_skb; + } + + skb_push(skb, sizeof(*status)); + status = (struct status_64 *)skb->data; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + ip_ver = htons(skb->protocol); + switch (ip_ver) { + case ETH_P_IP: + ip_proto = ip_hdr(skb)->protocol; + break; + case ETH_P_IPV6: + ip_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + return skb; + } + + offset = skb_checksum_start_offset(skb) - sizeof(*status); + tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | + (offset + skb->csum_offset); + + /* Set the length valid bit for TCP and UDP and just set + * the special UDP flag for IPv4, else just set to 0. + */ + if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { + tx_csum_info |= STATUS_TX_CSUM_LV; + if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) + tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; + } else { + tx_csum_info = 0; + } + + status->tx_csum_info = tx_csum_info; + } + + return skb; +} + +static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_tx_ring *ring = NULL; + struct netdev_queue *txq; + unsigned long flags = 0; + int nr_frags, index; + u16 dma_desc_flags; + int ret; + int i; + + index = skb_get_queue_mapping(skb); + /* Mapping strategy: + * queue_mapping = 0, unclassified, packet xmited through ring16 + * queue_mapping = 1, goes to ring 0. (highest priority queue + * queue_mapping = 2, goes to ring 1. + * queue_mapping = 3, goes to ring 2. + * queue_mapping = 4, goes to ring 3. + */ + if (index == 0) + index = DESC_INDEX; + else + index -= 1; + + nr_frags = skb_shinfo(skb)->nr_frags; + ring = &priv->tx_rings[index]; + txq = netdev_get_tx_queue(dev, ring->queue); + + spin_lock_irqsave(&ring->lock, flags); + if (ring->free_bds <= nr_frags + 1) { + netif_tx_stop_queue(txq); + netdev_err(dev, "%s: tx ring %d full when queue %d awake\n", + __func__, index, ring->queue); + ret = NETDEV_TX_BUSY; + goto out; + } + + if (skb_padto(skb, ETH_ZLEN)) { + ret = NETDEV_TX_OK; + goto out; + } + + /* set the SKB transmit checksum */ + if (priv->desc_64b_en) { + skb = bcmgenet_put_tx_csum(dev, skb); + if (!skb) { + ret = NETDEV_TX_OK; + goto out; + } + } + + dma_desc_flags = DMA_SOP; + if (nr_frags == 0) + dma_desc_flags |= DMA_EOP; + + /* Transmit single SKB or head of fragment list */ + ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring); + if (ret) { + ret = NETDEV_TX_OK; + goto out; + } + + /* xmit fragment */ + for (i = 0; i < nr_frags; i++) { + ret = bcmgenet_xmit_frag(dev, + &skb_shinfo(skb)->frags[i], + (i == nr_frags - 1) ? DMA_EOP : 0, + ring); + if (ret) { + ret = NETDEV_TX_OK; + goto out; + } + } + + skb_tx_timestamp(skb); + + /* Decrement total BD count and advance our write pointer */ + ring->free_bds -= nr_frags + 1; + ring->prod_index += nr_frags + 1; + ring->prod_index &= DMA_P_INDEX_MASK; + + if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) + netif_tx_stop_queue(txq); + + if (!skb->xmit_more || netif_xmit_stopped(txq)) + /* Packets are ready, update producer index */ + bcmgenet_tdma_ring_writel(priv, ring->index, + ring->prod_index, TDMA_PROD_INDEX); +out: + spin_unlock_irqrestore(&ring->lock, flags); + + return ret; +} + +static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, + struct enet_cb *cb) +{ + struct device *kdev = &priv->pdev->dev; + struct sk_buff *skb; + struct sk_buff *rx_skb; + dma_addr_t mapping; + + /* Allocate a new Rx skb */ + skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT); + if (!skb) { + priv->mib.alloc_rx_buff_failed++; + netif_err(priv, rx_err, priv->dev, + "%s: Rx skb allocation failed\n", __func__); + return NULL; + } + + /* DMA-map the new Rx skb */ + mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(kdev, mapping)) { + priv->mib.rx_dma_failed++; + dev_kfree_skb_any(skb); + netif_err(priv, rx_err, priv->dev, + "%s: Rx skb DMA mapping failed\n", __func__); + return NULL; + } + + /* Grab the current Rx skb from the ring and DMA-unmap it */ + rx_skb = cb->skb; + if (likely(rx_skb)) + dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), + priv->rx_buf_len, DMA_FROM_DEVICE); + + /* Put the new Rx skb on the ring */ + cb->skb = skb; + dma_unmap_addr_set(cb, dma_addr, mapping); + dmadesc_set_addr(priv, cb->bd_addr, mapping); + + /* Return the current Rx skb to caller */ + return rx_skb; +} + +/* bcmgenet_desc_rx - descriptor based rx process. + * this could be called from bottom half, or from NAPI polling method. + */ +static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, + unsigned int budget) +{ + struct bcmgenet_priv *priv = ring->priv; + struct net_device *dev = priv->dev; + struct enet_cb *cb; + struct sk_buff *skb; + u32 dma_length_status; + unsigned long dma_flag; + int len; + unsigned int rxpktprocessed = 0, rxpkttoprocess; + unsigned int p_index; + unsigned int discards; + unsigned int chksum_ok = 0; + + p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); + + discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) & + DMA_P_INDEX_DISCARD_CNT_MASK; + if (discards > ring->old_discards) { + discards = discards - ring->old_discards; + dev->stats.rx_missed_errors += discards; + dev->stats.rx_errors += discards; + ring->old_discards += discards; + + /* Clear HW register when we reach 75% of maximum 0xFFFF */ + if (ring->old_discards >= 0xC000) { + ring->old_discards = 0; + bcmgenet_rdma_ring_writel(priv, ring->index, 0, + RDMA_PROD_INDEX); + } + } + + p_index &= DMA_P_INDEX_MASK; + + if (likely(p_index >= ring->c_index)) + rxpkttoprocess = p_index - ring->c_index; + else + rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index + + p_index; + + netif_dbg(priv, rx_status, dev, + "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); + + while ((rxpktprocessed < rxpkttoprocess) && + (rxpktprocessed < budget)) { + cb = &priv->rx_cbs[ring->read_ptr]; + skb = bcmgenet_rx_refill(priv, cb); + + if (unlikely(!skb)) { + dev->stats.rx_dropped++; + dev->stats.rx_errors++; + goto next; + } + + if (!priv->desc_64b_en) { + dma_length_status = + dmadesc_get_length_status(priv, cb->bd_addr); + } else { + struct status_64 *status; + + status = (struct status_64 *)skb->data; + dma_length_status = status->length_status; + } + + /* DMA flags and length are still valid no matter how + * we got the Receive Status Vector (64B RSB or register) + */ + dma_flag = dma_length_status & 0xffff; + len = dma_length_status >> DMA_BUFLENGTH_SHIFT; + + netif_dbg(priv, rx_status, dev, + "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", + __func__, p_index, ring->c_index, + ring->read_ptr, dma_length_status); + + if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { + netif_err(priv, rx_status, dev, + "dropping fragmented packet!\n"); + dev->stats.rx_dropped++; + dev->stats.rx_errors++; + dev_kfree_skb_any(skb); + goto next; + } + + /* report errors */ + if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | + DMA_RX_OV | + DMA_RX_NO | + DMA_RX_LG | + DMA_RX_RXER))) { + netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", + (unsigned int)dma_flag); + if (dma_flag & DMA_RX_CRC_ERROR) + dev->stats.rx_crc_errors++; + if (dma_flag & DMA_RX_OV) + dev->stats.rx_over_errors++; + if (dma_flag & DMA_RX_NO) + dev->stats.rx_frame_errors++; + if (dma_flag & DMA_RX_LG) + dev->stats.rx_length_errors++; + dev->stats.rx_dropped++; + dev->stats.rx_errors++; + dev_kfree_skb_any(skb); + goto next; + } /* error packet */ + + chksum_ok = (dma_flag & priv->dma_rx_chk_bit) && + priv->desc_rxchk_en; + + skb_put(skb, len); + if (priv->desc_64b_en) { + skb_pull(skb, 64); + len -= 64; + } + + if (likely(chksum_ok)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* remove hardware 2bytes added for IP alignment */ + skb_pull(skb, 2); + len -= 2; + + if (priv->crc_fwd_en) { + skb_trim(skb, len - ETH_FCS_LEN); + len -= ETH_FCS_LEN; + } + + /*Finish setting up the received SKB and send it to the kernel*/ + skb->protocol = eth_type_trans(skb, priv->dev); + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + if (dma_flag & DMA_RX_MULT) + dev->stats.multicast++; + + /* Notify kernel */ + napi_gro_receive(&ring->napi, skb); + netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); + +next: + rxpktprocessed++; + if (likely(ring->read_ptr < ring->end_ptr)) + ring->read_ptr++; + else + ring->read_ptr = ring->cb_ptr; + + ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; + bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX); + } + + return rxpktprocessed; +} + +/* Rx NAPI polling method */ +static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) +{ + struct bcmgenet_rx_ring *ring = container_of(napi, + struct bcmgenet_rx_ring, napi); + unsigned int work_done; + + work_done = bcmgenet_desc_rx(ring, budget); + + if (work_done < budget) { + napi_complete(napi); + ring->int_enable(ring); + } + + return work_done; +} + +/* Assign skb to RX DMA descriptor. */ +static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, + struct bcmgenet_rx_ring *ring) +{ + struct enet_cb *cb; + struct sk_buff *skb; + int i; + + netif_dbg(priv, hw, priv->dev, "%s\n", __func__); + + /* loop here for each buffer needing assign */ + for (i = 0; i < ring->size; i++) { + cb = ring->cbs + i; + skb = bcmgenet_rx_refill(priv, cb); + if (skb) + dev_kfree_skb_any(skb); + if (!cb->skb) + return -ENOMEM; + } + + return 0; +} + +static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) +{ + struct enet_cb *cb; + int i; + + for (i = 0; i < priv->num_rx_bds; i++) { + cb = &priv->rx_cbs[i]; + + if (dma_unmap_addr(cb, dma_addr)) { + dma_unmap_single(&priv->dev->dev, + dma_unmap_addr(cb, dma_addr), + priv->rx_buf_len, DMA_FROM_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + } + + if (cb->skb) + bcmgenet_free_cb(cb); + } +} + +static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) +{ + u32 reg; + + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (enable) + reg |= mask; + else + reg &= ~mask; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + /* UniMAC stops on a packet boundary, wait for a full-size packet + * to be processed + */ + if (enable == 0) + usleep_range(1000, 2000); +} + +static int reset_umac(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + unsigned int timeout = 0; + u32 reg; + + /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ + bcmgenet_rbuf_ctrl_set(priv, 0); + udelay(10); + + /* disable MAC while updating its registers */ + bcmgenet_umac_writel(priv, 0, UMAC_CMD); + + /* issue soft reset, wait for it to complete */ + bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); + while (timeout++ < 1000) { + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (!(reg & CMD_SW_RESET)) + return 0; + + udelay(1); + } + + if (timeout == 1000) { + dev_err(kdev, + "timeout waiting for MAC to come out of reset\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) +{ + /* Mask all interrupts.*/ + bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); + bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); + bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); + bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); + bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); + bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); +} + +static int init_umac(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + int ret; + u32 reg; + u32 int0_enable = 0; + u32 int1_enable = 0; + int i; + + dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); + + ret = reset_umac(priv); + if (ret) + return ret; + + bcmgenet_umac_writel(priv, 0, UMAC_CMD); + /* clear tx/rx counter */ + bcmgenet_umac_writel(priv, + MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, + UMAC_MIB_CTRL); + bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); + + bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + + /* init rx registers, enable ip header optimization */ + reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); + reg |= RBUF_ALIGN_2B; + bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); + + if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) + bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); + + bcmgenet_intr_disable(priv); + + /* Enable Rx default queue 16 interrupts */ + int0_enable |= UMAC_IRQ_RXDMA_DONE; + + /* Enable Tx default queue 16 interrupts */ + int0_enable |= UMAC_IRQ_TXDMA_DONE; + + /* Monitor cable plug/unplugged event for internal PHY */ + if (phy_is_internal(priv->phydev)) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + } else if (priv->ext_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + int0_enable |= UMAC_IRQ_LINK_EVENT; + + reg = bcmgenet_bp_mc_get(priv); + reg |= BIT(priv->hw_params->bp_in_en_shift); + + /* bp_mask: back pressure mask */ + if (netif_is_multiqueue(priv->dev)) + reg |= priv->hw_params->bp_in_mask; + else + reg &= ~priv->hw_params->bp_in_mask; + bcmgenet_bp_mc_set(priv, reg); + } + + /* Enable MDIO interrupts on GENET v3+ */ + if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) + int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); + + /* Enable Rx priority queue interrupts */ + for (i = 0; i < priv->hw_params->rx_queues; ++i) + int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i)); + + /* Enable Tx priority queue interrupts */ + for (i = 0; i < priv->hw_params->tx_queues; ++i) + int1_enable |= (1 << i); + + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); + bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); + + /* Enable rx/tx engine.*/ + dev_dbg(kdev, "done init umac\n"); + + return 0; +} + +/* Initialize a Tx ring along with corresponding hardware registers */ +static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, + unsigned int index, unsigned int size, + unsigned int start_ptr, unsigned int end_ptr) +{ + struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; + u32 words_per_bd = WORDS_PER_BD(priv); + u32 flow_period_val = 0; + + spin_lock_init(&ring->lock); + ring->priv = priv; + ring->index = index; + if (index == DESC_INDEX) { + ring->queue = 0; + ring->int_enable = bcmgenet_tx_ring16_int_enable; + ring->int_disable = bcmgenet_tx_ring16_int_disable; + } else { + ring->queue = index + 1; + ring->int_enable = bcmgenet_tx_ring_int_enable; + ring->int_disable = bcmgenet_tx_ring_int_disable; + } + ring->cbs = priv->tx_cbs + start_ptr; + ring->size = size; + ring->clean_ptr = start_ptr; + ring->c_index = 0; + ring->free_bds = size; + ring->write_ptr = start_ptr; + ring->cb_ptr = start_ptr; + ring->end_ptr = end_ptr - 1; + ring->prod_index = 0; + + /* Set flow period for ring != 16 */ + if (index != DESC_INDEX) + flow_period_val = ENET_MAX_MTU_SIZE << 16; + + bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX); + bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX); + bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); + /* Disable rate control for now */ + bcmgenet_tdma_ring_writel(priv, index, flow_period_val, + TDMA_FLOW_PERIOD); + bcmgenet_tdma_ring_writel(priv, index, + ((size << DMA_RING_SIZE_SHIFT) | + RX_BUF_LENGTH), DMA_RING_BUF_SIZE); + + /* Set start and end address, read and write pointers */ + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + DMA_START_ADDR); + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + TDMA_READ_PTR); + bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, + TDMA_WRITE_PTR); + bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, + DMA_END_ADDR); +} + +/* Initialize a RDMA ring */ +static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, + unsigned int index, unsigned int size, + unsigned int start_ptr, unsigned int end_ptr) +{ + struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; + u32 words_per_bd = WORDS_PER_BD(priv); + int ret; + + ring->priv = priv; + ring->index = index; + if (index == DESC_INDEX) { + ring->int_enable = bcmgenet_rx_ring16_int_enable; + ring->int_disable = bcmgenet_rx_ring16_int_disable; + } else { + ring->int_enable = bcmgenet_rx_ring_int_enable; + ring->int_disable = bcmgenet_rx_ring_int_disable; + } + ring->cbs = priv->rx_cbs + start_ptr; + ring->size = size; + ring->c_index = 0; + ring->read_ptr = start_ptr; + ring->cb_ptr = start_ptr; + ring->end_ptr = end_ptr - 1; + + ret = bcmgenet_alloc_rx_buffers(priv, ring); + if (ret) + return ret; + + bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); + bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); + bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); + bcmgenet_rdma_ring_writel(priv, index, + ((size << DMA_RING_SIZE_SHIFT) | + RX_BUF_LENGTH), DMA_RING_BUF_SIZE); + bcmgenet_rdma_ring_writel(priv, index, + (DMA_FC_THRESH_LO << + DMA_XOFF_THRESHOLD_SHIFT) | + DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); + + /* Set start and end address, read and write pointers */ + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + DMA_START_ADDR); + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + RDMA_READ_PTR); + bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, + RDMA_WRITE_PTR); + bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, + DMA_END_ADDR); + + return ret; +} + +static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); + } + + ring = &priv->tx_rings[DESC_INDEX]; + netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); +} + +static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + napi_enable(&ring->napi); + } + + ring = &priv->tx_rings[DESC_INDEX]; + napi_enable(&ring->napi); +} + +static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + napi_disable(&ring->napi); + } + + ring = &priv->tx_rings[DESC_INDEX]; + napi_disable(&ring->napi); +} + +static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_tx_ring *ring; + + for (i = 0; i < priv->hw_params->tx_queues; ++i) { + ring = &priv->tx_rings[i]; + netif_napi_del(&ring->napi); + } + + ring = &priv->tx_rings[DESC_INDEX]; + netif_napi_del(&ring->napi); +} + +/* Initialize Tx queues + * + * Queues 0-3 are priority-based, each one has 32 descriptors, + * with queue 0 being the highest priority queue. + * + * Queue 16 is the default Tx queue with + * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors. + * + * The transmit control block pool is then partitioned as follows: + * - Tx queue 0 uses tx_cbs[0..31] + * - Tx queue 1 uses tx_cbs[32..63] + * - Tx queue 2 uses tx_cbs[64..95] + * - Tx queue 3 uses tx_cbs[96..127] + * - Tx queue 16 uses tx_cbs[128..255] + */ +static void bcmgenet_init_tx_queues(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 i, dma_enable; + u32 dma_ctrl, ring_cfg; + u32 dma_priority[3] = {0, 0, 0}; + + dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); + dma_enable = dma_ctrl & DMA_EN; + dma_ctrl &= ~DMA_EN; + bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); + + dma_ctrl = 0; + ring_cfg = 0; + + /* Enable strict priority arbiter mode */ + bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); + + /* Initialize Tx priority queues */ + for (i = 0; i < priv->hw_params->tx_queues; i++) { + bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, + i * priv->hw_params->tx_bds_per_q, + (i + 1) * priv->hw_params->tx_bds_per_q); + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + dma_priority[DMA_PRIO_REG_INDEX(i)] |= + ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); + } + + /* Initialize Tx default queue 16 */ + bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT, + priv->hw_params->tx_queues * + priv->hw_params->tx_bds_per_q, + TOTAL_DESC); + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); + dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= + ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << + DMA_PRIO_REG_SHIFT(DESC_INDEX)); + + /* Set Tx queue priorities */ + bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); + bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); + bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); + + /* Initialize Tx NAPI */ + bcmgenet_init_tx_napi(priv); + + /* Enable Tx queues */ + bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); + + /* Enable Tx DMA */ + if (dma_enable) + dma_ctrl |= DMA_EN; + bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); +} + +static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64); + } + + ring = &priv->rx_rings[DESC_INDEX]; + netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64); +} + +static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + napi_enable(&ring->napi); + } + + ring = &priv->rx_rings[DESC_INDEX]; + napi_enable(&ring->napi); +} + +static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + napi_disable(&ring->napi); + } + + ring = &priv->rx_rings[DESC_INDEX]; + napi_disable(&ring->napi); +} + +static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) +{ + unsigned int i; + struct bcmgenet_rx_ring *ring; + + for (i = 0; i < priv->hw_params->rx_queues; ++i) { + ring = &priv->rx_rings[i]; + netif_napi_del(&ring->napi); + } + + ring = &priv->rx_rings[DESC_INDEX]; + netif_napi_del(&ring->napi); +} + +/* Initialize Rx queues + * + * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be + * used to direct traffic to these queues. + * + * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors. + */ +static int bcmgenet_init_rx_queues(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 i; + u32 dma_enable; + u32 dma_ctrl; + u32 ring_cfg; + int ret; + + dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL); + dma_enable = dma_ctrl & DMA_EN; + dma_ctrl &= ~DMA_EN; + bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + + dma_ctrl = 0; + ring_cfg = 0; + + /* Initialize Rx priority queues */ + for (i = 0; i < priv->hw_params->rx_queues; i++) { + ret = bcmgenet_init_rx_ring(priv, i, + priv->hw_params->rx_bds_per_q, + i * priv->hw_params->rx_bds_per_q, + (i + 1) * + priv->hw_params->rx_bds_per_q); + if (ret) + return ret; + + ring_cfg |= (1 << i); + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + } + + /* Initialize Rx default queue 16 */ + ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT, + priv->hw_params->rx_queues * + priv->hw_params->rx_bds_per_q, + TOTAL_DESC); + if (ret) + return ret; + + ring_cfg |= (1 << DESC_INDEX); + dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); + + /* Initialize Rx NAPI */ + bcmgenet_init_rx_napi(priv); + + /* Enable rings */ + bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG); + + /* Configure ring as descriptor ring and re-enable DMA if enabled */ + if (dma_enable) + dma_ctrl |= DMA_EN; + bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + + return 0; +} + +static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) +{ + int ret = 0; + int timeout = 0; + u32 reg; + + /* Disable TDMA to stop add more frames in TX DMA */ + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~DMA_EN; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + /* Check TDMA status register to confirm TDMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_tdma_readl(priv, DMA_STATUS); + if (reg & DMA_DISABLED) + break; + + udelay(1); + } + + if (timeout == DMA_TIMEOUT_VAL) { + netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); + ret = -ETIMEDOUT; + } + + /* Wait 10ms for packet drain in both tx and rx dma */ + usleep_range(10000, 20000); + + /* Disable RDMA */ + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~DMA_EN; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + timeout = 0; + /* Check RDMA status register to confirm RDMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_rdma_readl(priv, DMA_STATUS); + if (reg & DMA_DISABLED) + break; + + udelay(1); + } + + if (timeout == DMA_TIMEOUT_VAL) { + netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); + ret = -ETIMEDOUT; + } + + return ret; +} + +static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) +{ + int i; + + bcmgenet_fini_rx_napi(priv); + bcmgenet_fini_tx_napi(priv); + + /* disable DMA */ + bcmgenet_dma_teardown(priv); + + for (i = 0; i < priv->num_tx_bds; i++) { + if (priv->tx_cbs[i].skb != NULL) { + dev_kfree_skb(priv->tx_cbs[i].skb); + priv->tx_cbs[i].skb = NULL; + } + } + + bcmgenet_free_rx_buffers(priv); + kfree(priv->rx_cbs); + kfree(priv->tx_cbs); +} + +/* init_edma: Initialize DMA control register */ +static int bcmgenet_init_dma(struct bcmgenet_priv *priv) +{ + int ret; + unsigned int i; + struct enet_cb *cb; + + netif_dbg(priv, hw, priv->dev, "%s\n", __func__); + + /* Initialize common Rx ring structures */ + priv->rx_bds = priv->base + priv->hw_params->rdma_offset; + priv->num_rx_bds = TOTAL_DESC; + priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), + GFP_KERNEL); + if (!priv->rx_cbs) + return -ENOMEM; + + for (i = 0; i < priv->num_rx_bds; i++) { + cb = priv->rx_cbs + i; + cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; + } + + /* Initialize common TX ring structures */ + priv->tx_bds = priv->base + priv->hw_params->tdma_offset; + priv->num_tx_bds = TOTAL_DESC; + priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), + GFP_KERNEL); + if (!priv->tx_cbs) { + kfree(priv->rx_cbs); + return -ENOMEM; + } + + for (i = 0; i < priv->num_tx_bds; i++) { + cb = priv->tx_cbs + i; + cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; + } + + /* Init rDma */ + bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); + + /* Initialize Rx queues */ + ret = bcmgenet_init_rx_queues(priv->dev); + if (ret) { + netdev_err(priv->dev, "failed to initialize Rx queues\n"); + bcmgenet_free_rx_buffers(priv); + kfree(priv->rx_cbs); + kfree(priv->tx_cbs); + return ret; + } + + /* Init tDma */ + bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); + + /* Initialize Tx queues */ + bcmgenet_init_tx_queues(priv->dev); + + return 0; +} + +/* Interrupt bottom half */ +static void bcmgenet_irq_task(struct work_struct *work) +{ + struct bcmgenet_priv *priv = container_of( + work, struct bcmgenet_priv, bcmgenet_irq_work); + + netif_dbg(priv, intr, priv->dev, "%s\n", __func__); + + if (priv->irq0_stat & UMAC_IRQ_MPD_R) { + priv->irq0_stat &= ~UMAC_IRQ_MPD_R; + netif_dbg(priv, wol, priv->dev, + "magic packet detected, waking up\n"); + bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); + } + + /* Link UP/DOWN event */ + if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && + (priv->irq0_stat & UMAC_IRQ_LINK_EVENT)) { + phy_mac_interrupt(priv->phydev, + !!(priv->irq0_stat & UMAC_IRQ_LINK_UP)); + priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT; + } +} + +/* bcmgenet_isr1: handle Rx and Tx priority queues */ +static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) +{ + struct bcmgenet_priv *priv = dev_id; + struct bcmgenet_rx_ring *rx_ring; + struct bcmgenet_tx_ring *tx_ring; + unsigned int index; + + /* Save irq status for bottom-half processing. */ + priv->irq1_stat = + bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & + ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); + + /* clear interrupts */ + bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); + + netif_dbg(priv, intr, priv->dev, + "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); + + /* Check Rx priority queue interrupts */ + for (index = 0; index < priv->hw_params->rx_queues; index++) { + if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) + continue; + + rx_ring = &priv->rx_rings[index]; + + if (likely(napi_schedule_prep(&rx_ring->napi))) { + rx_ring->int_disable(rx_ring); + __napi_schedule(&rx_ring->napi); + } + } + + /* Check Tx priority queue interrupts */ + for (index = 0; index < priv->hw_params->tx_queues; index++) { + if (!(priv->irq1_stat & BIT(index))) + continue; + + tx_ring = &priv->tx_rings[index]; + + if (likely(napi_schedule_prep(&tx_ring->napi))) { + tx_ring->int_disable(tx_ring); + __napi_schedule(&tx_ring->napi); + } + } + + return IRQ_HANDLED; +} + +/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */ +static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) +{ + struct bcmgenet_priv *priv = dev_id; + struct bcmgenet_rx_ring *rx_ring; + struct bcmgenet_tx_ring *tx_ring; + + /* Save irq status for bottom-half processing. */ + priv->irq0_stat = + bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & + ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); + + /* clear interrupts */ + bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); + + netif_dbg(priv, intr, priv->dev, + "IRQ=0x%x\n", priv->irq0_stat); + + if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) { + rx_ring = &priv->rx_rings[DESC_INDEX]; + + if (likely(napi_schedule_prep(&rx_ring->napi))) { + rx_ring->int_disable(rx_ring); + __napi_schedule(&rx_ring->napi); + } + } + + if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) { + tx_ring = &priv->tx_rings[DESC_INDEX]; + + if (likely(napi_schedule_prep(&tx_ring->napi))) { + tx_ring->int_disable(tx_ring); + __napi_schedule(&tx_ring->napi); + } + } + + if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | + UMAC_IRQ_PHY_DET_F | + UMAC_IRQ_LINK_EVENT | + UMAC_IRQ_HFB_SM | + UMAC_IRQ_HFB_MM | + UMAC_IRQ_MPD_R)) { + /* all other interested interrupts handled in bottom half */ + schedule_work(&priv->bcmgenet_irq_work); + } + + if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && + priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { + priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); + wake_up(&priv->wq); + } + + return IRQ_HANDLED; +} + +static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id) +{ + struct bcmgenet_priv *priv = dev_id; + + pm_wakeup_event(&priv->pdev->dev, 0); + + return IRQ_HANDLED; +} + +static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) +{ + u32 reg; + + reg = bcmgenet_rbuf_ctrl_get(priv); + reg |= BIT(1); + bcmgenet_rbuf_ctrl_set(priv, reg); + udelay(10); + + reg &= ~BIT(1); + bcmgenet_rbuf_ctrl_set(priv, reg); + udelay(10); +} + +static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, + unsigned char *addr) +{ + bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | + (addr[2] << 8) | addr[3], UMAC_MAC0); + bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); +} + +/* Returns a reusable dma control register value */ +static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) +{ + u32 reg; + u32 dma_ctrl; + + /* disable DMA */ + dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); + udelay(10); + bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); + + return dma_ctrl; +} + +static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) +{ + u32 reg; + + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg |= dma_ctrl; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg |= dma_ctrl; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); +} + +static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv, + u32 f_index) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + return !!(reg & (1 << (f_index % 32))); +} + +static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg |= (1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg, offset); +} + +static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv, + u32 f_index, u32 rx_queue) +{ + u32 offset; + u32 reg; + + offset = f_index / 8; + reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset); + reg &= ~(0xF << (4 * (f_index % 8))); + reg |= ((rx_queue & 0xF) << (4 * (f_index % 8))); + bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset); +} + +static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv, + u32 f_index, u32 f_length) +{ + u32 offset; + u32 reg; + + offset = HFB_FLT_LEN_V3PLUS + + ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) * + sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg &= ~(0xFF << (8 * (f_index % 4))); + reg |= ((f_length & 0xFF) << (8 * (f_index % 4))); + bcmgenet_hfb_reg_writel(priv, reg, offset); +} + +static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv) +{ + u32 f_index; + + for (f_index = 0; f_index < priv->hw_params->hfb_filter_cnt; f_index++) + if (!bcmgenet_hfb_is_filter_enabled(priv, f_index)) + return f_index; + + return -ENOMEM; +} + +/* bcmgenet_hfb_add_filter + * + * Add new filter to Hardware Filter Block to match and direct Rx traffic to + * desired Rx queue. + * + * f_data is an array of unsigned 32-bit integers where each 32-bit integer + * provides filter data for 2 bytes (4 nibbles) of Rx frame: + * + * bits 31:20 - unused + * bit 19 - nibble 0 match enable + * bit 18 - nibble 1 match enable + * bit 17 - nibble 2 match enable + * bit 16 - nibble 3 match enable + * bits 15:12 - nibble 0 data + * bits 11:8 - nibble 1 data + * bits 7:4 - nibble 2 data + * bits 3:0 - nibble 3 data + * + * Example: + * In order to match: + * - Ethernet frame type = 0x0800 (IP) + * - IP version field = 4 + * - IP protocol field = 0x11 (UDP) + * + * The following filter is needed: + * u32 hfb_filter_ipv4_udp[] = { + * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000, + * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000, + * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011, + * }; + * + * To add the filter to HFB and direct the traffic to Rx queue 0, call: + * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp, + * ARRAY_SIZE(hfb_filter_ipv4_udp), 0); + */ +int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data, + u32 f_length, u32 rx_queue) +{ + int f_index; + u32 i; + + f_index = bcmgenet_hfb_find_unused_filter(priv); + if (f_index < 0) + return -ENOMEM; + + if (f_length > priv->hw_params->hfb_filter_size) + return -EINVAL; + + for (i = 0; i < f_length; i++) + bcmgenet_hfb_writel(priv, f_data[i], + (f_index * priv->hw_params->hfb_filter_size + i) * + sizeof(u32)); + + bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length); + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue); + bcmgenet_hfb_enable_filter(priv, f_index); + bcmgenet_hfb_reg_writel(priv, 0x1, HFB_CTRL); + + return 0; +} + +/* bcmgenet_hfb_clear + * + * Clear Hardware Filter Block and disable all filtering. + */ +static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) +{ + u32 i; + + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL); + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS); + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4); + + for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++) + bcmgenet_rdma_writel(priv, 0x0, i); + + for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++) + bcmgenet_hfb_reg_writel(priv, 0x0, + HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); + + for (i = 0; i < priv->hw_params->hfb_filter_cnt * + priv->hw_params->hfb_filter_size; i++) + bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32)); +} + +static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) +{ + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) + return; + + bcmgenet_hfb_clear(priv); +} + +static void bcmgenet_netif_start(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + /* Start the network engine */ + bcmgenet_enable_rx_napi(priv); + bcmgenet_enable_tx_napi(priv); + + umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); + + netif_tx_start_all_queues(dev); + + phy_start(priv->phydev); +} + +static int bcmgenet_open(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned long dma_ctrl; + u32 reg; + int ret; + + netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); + + /* Turn on the clock */ + if (!IS_ERR(priv->clk)) + clk_prepare_enable(priv->clk); + + /* If this is an internal GPHY, power it back on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (phy_is_internal(priv->phydev)) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + /* take MAC out of reset */ + bcmgenet_umac_reset(priv); + + ret = init_umac(priv); + if (ret) + goto err_clk_disable; + + /* disable ethernet MAC while updating its registers */ + umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); + + /* Make sure we reflect the value of CRC_CMD_FWD */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); + + bcmgenet_set_hw_addr(priv, dev->dev_addr); + + if (phy_is_internal(priv->phydev)) { + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + reg |= EXT_ENERGY_DET_MASK; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + } + + /* Disable RX/TX DMA and flush TX queues */ + dma_ctrl = bcmgenet_dma_disable(priv); + + /* Reinitialize TDMA and RDMA and SW housekeeping */ + ret = bcmgenet_init_dma(priv); + if (ret) { + netdev_err(dev, "failed to initialize DMA\n"); + goto err_clk_disable; + } + + /* Always enable ring 16 - descriptor ring */ + bcmgenet_enable_dma(priv, dma_ctrl); + + /* HFB init */ + bcmgenet_hfb_init(priv); + + ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, + dev->name, priv); + if (ret < 0) { + netdev_err(dev, "can't request IRQ %d\n", priv->irq0); + goto err_fini_dma; + } + + ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, + dev->name, priv); + if (ret < 0) { + netdev_err(dev, "can't request IRQ %d\n", priv->irq1); + goto err_irq0; + } + + /* Re-configure the port multiplexer towards the PHY device */ + bcmgenet_mii_config(priv->dev, false); + + phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup, + priv->phy_interface); + + bcmgenet_netif_start(dev); + + return 0; + +err_irq0: + free_irq(priv->irq0, dev); +err_fini_dma: + bcmgenet_fini_dma(priv); +err_clk_disable: + if (!IS_ERR(priv->clk)) + clk_disable_unprepare(priv->clk); + return ret; +} + +static void bcmgenet_netif_stop(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + netif_tx_stop_all_queues(dev); + phy_stop(priv->phydev); + bcmgenet_intr_disable(priv); + bcmgenet_disable_rx_napi(priv); + bcmgenet_disable_tx_napi(priv); + + /* Wait for pending work items to complete. Since interrupts are + * disabled no new work will be scheduled. + */ + cancel_work_sync(&priv->bcmgenet_irq_work); + + priv->old_link = -1; + priv->old_speed = -1; + priv->old_duplex = -1; + priv->old_pause = -1; +} + +static int bcmgenet_close(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret; + + netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); + + bcmgenet_netif_stop(dev); + + /* Really kill the PHY state machine and disconnect from it */ + phy_disconnect(priv->phydev); + + /* Disable MAC receive */ + umac_enable_set(priv, CMD_RX_EN, false); + + ret = bcmgenet_dma_teardown(priv); + if (ret) + return ret; + + /* Disable MAC transmit. TX DMA disabled have to done before this */ + umac_enable_set(priv, CMD_TX_EN, false); + + /* tx reclaim */ + bcmgenet_tx_reclaim_all(dev); + bcmgenet_fini_dma(priv); + + free_irq(priv->irq0, priv); + free_irq(priv->irq1, priv); + + if (phy_is_internal(priv->phydev)) + ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + + if (!IS_ERR(priv->clk)) + clk_disable_unprepare(priv->clk); + + return ret; +} + +static void bcmgenet_timeout(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); + + dev->trans_start = jiffies; + + dev->stats.tx_errors++; + + netif_tx_wake_all_queues(dev); +} + +#define MAX_MC_COUNT 16 + +static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, + unsigned char *addr, + int *i, + int *mc) +{ + u32 reg; + + bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1], + UMAC_MDF_ADDR + (*i * 4)); + bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 | + addr[4] << 8 | addr[5], + UMAC_MDF_ADDR + ((*i + 1) * 4)); + reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL); + reg |= (1 << (MAX_MC_COUNT - *mc)); + bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); + *i += 2; + (*mc)++; +} + +static void bcmgenet_set_rx_mode(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct netdev_hw_addr *ha; + int i, mc; + u32 reg; + + netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); + + /* Promiscuous mode */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (dev->flags & IFF_PROMISC) { + reg |= CMD_PROMISC; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); + return; + } else { + reg &= ~CMD_PROMISC; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + } + + /* UniMac doesn't support ALLMULTI */ + if (dev->flags & IFF_ALLMULTI) { + netdev_warn(dev, "ALLMULTI is not supported\n"); + return; + } + + /* update MDF filter */ + i = 0; + mc = 0; + /* Broadcast */ + bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc); + /* my own address.*/ + bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc); + /* Unicast list*/ + if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc)) + return; + + if (!netdev_uc_empty(dev)) + netdev_for_each_uc_addr(ha, dev) + bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); + /* Multicast */ + if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc)) + return; + + netdev_for_each_mc_addr(ha, dev) + bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); +} + +/* Set the hardware MAC address. */ +static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + /* Setting the MAC address at the hardware level is not possible + * without disabling the UniMAC RX/TX enable bits. + */ + if (netif_running(dev)) + return -EBUSY; + + ether_addr_copy(dev->dev_addr, addr->sa_data); + + return 0; +} + +static const struct net_device_ops bcmgenet_netdev_ops = { + .ndo_open = bcmgenet_open, + .ndo_stop = bcmgenet_close, + .ndo_start_xmit = bcmgenet_xmit, + .ndo_tx_timeout = bcmgenet_timeout, + .ndo_set_rx_mode = bcmgenet_set_rx_mode, + .ndo_set_mac_address = bcmgenet_set_mac_addr, + .ndo_do_ioctl = bcmgenet_ioctl, + .ndo_set_features = bcmgenet_set_features, +}; + +/* Array of GENET hardware parameters/characteristics */ +static struct bcmgenet_hw_params bcmgenet_hw_params[] = { + [GENET_V1] = { + .tx_queues = 0, + .tx_bds_per_q = 0, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 16, + .bp_in_mask = 0xffff, + .hfb_filter_cnt = 16, + .qtag_mask = 0x1F, + .hfb_offset = 0x1000, + .rdma_offset = 0x2000, + .tdma_offset = 0x3000, + .words_per_bd = 2, + }, + [GENET_V2] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 16, + .bp_in_mask = 0xffff, + .hfb_filter_cnt = 16, + .qtag_mask = 0x1F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x1000, + .hfb_reg_offset = 0x2000, + .rdma_offset = 0x3000, + .tdma_offset = 0x4000, + .words_per_bd = 2, + .flags = GENET_HAS_EXT, + }, + [GENET_V3] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x10000, + .tdma_offset = 0x11000, + .words_per_bd = 2, + .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR | + GENET_HAS_MOCA_LINK_DET, + }, + [GENET_V4] = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x2000, + .tdma_offset = 0x4000, + .words_per_bd = 3, + .flags = GENET_HAS_40BITS | GENET_HAS_EXT | + GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, + }, +}; + +/* Infer hardware parameters from the detected GENET version */ +static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) +{ + struct bcmgenet_hw_params *params; + u32 reg; + u8 major; + u16 gphy_rev; + + if (GENET_IS_V4(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; + genet_dma_ring_regs = genet_dma_ring_regs_v4; + priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; + priv->version = GENET_V4; + } else if (GENET_IS_V3(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; + priv->version = GENET_V3; + } else if (GENET_IS_V2(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v2; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + priv->dma_rx_chk_bit = DMA_RX_CHK_V12; + priv->version = GENET_V2; + } else if (GENET_IS_V1(priv)) { + bcmgenet_dma_regs = bcmgenet_dma_regs_v1; + genet_dma_ring_regs = genet_dma_ring_regs_v123; + priv->dma_rx_chk_bit = DMA_RX_CHK_V12; + priv->version = GENET_V1; + } + + /* enum genet_version starts at 1 */ + priv->hw_params = &bcmgenet_hw_params[priv->version]; + params = priv->hw_params; + + /* Read GENET HW version */ + reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); + major = (reg >> 24 & 0x0f); + if (major == 5) + major = 4; + else if (major == 0) + major = 1; + if (major != priv->version) { + dev_err(&priv->pdev->dev, + "GENET version mismatch, got: %d, configured for: %d\n", + major, priv->version); + } + + /* Print the GENET core version */ + dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, + major, (reg >> 16) & 0x0f, reg & 0xffff); + + /* Store the integrated PHY revision for the MDIO probing function + * to pass this information to the PHY driver. The PHY driver expects + * to find the PHY major revision in bits 15:8 while the GENET register + * stores that information in bits 7:0, account for that. + * + * On newer chips, starting with PHY revision G0, a new scheme is + * deployed similar to the Starfighter 2 switch with GPHY major + * revision in bits 15:8 and patch level in bits 7:0. Major revision 0 + * is reserved as well as special value 0x01ff, we have a small + * heuristic to check for the new GPHY revision and re-arrange things + * so the GPHY driver is happy. + */ + gphy_rev = reg & 0xffff; + + /* This is the good old scheme, just GPHY major, no minor nor patch */ + if ((gphy_rev & 0xf0) != 0) + priv->gphy_rev = gphy_rev << 8; + + /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */ + else if ((gphy_rev & 0xff00) != 0) + priv->gphy_rev = gphy_rev; + + /* This is reserved so should require special treatment */ + else if (gphy_rev == 0 || gphy_rev == 0x01ff) { + pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev); + return; + } + +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (!(params->flags & GENET_HAS_40BITS)) + pr_warn("GENET does not support 40-bits PA\n"); +#endif + + pr_debug("Configuration for version: %d\n" + "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n" + "BP << en: %2d, BP msk: 0x%05x\n" + "HFB count: %2d, QTAQ msk: 0x%05x\n" + "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" + "RDMA: 0x%05x, TDMA: 0x%05x\n" + "Words/BD: %d\n", + priv->version, + params->tx_queues, params->tx_bds_per_q, + params->rx_queues, params->rx_bds_per_q, + params->bp_in_en_shift, params->bp_in_mask, + params->hfb_filter_cnt, params->qtag_mask, + params->tbuf_offset, params->hfb_offset, + params->hfb_reg_offset, + params->rdma_offset, params->tdma_offset, + params->words_per_bd); +} + +static const struct of_device_id bcmgenet_match[] = { + { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 }, + { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 }, + { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 }, + { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, + { }, +}; + +static int bcmgenet_probe(struct platform_device *pdev) +{ + struct bcmgenet_platform_data *pd = pdev->dev.platform_data; + struct device_node *dn = pdev->dev.of_node; + const struct of_device_id *of_id = NULL; + struct bcmgenet_priv *priv; + struct net_device *dev; + const void *macaddr; + struct resource *r; + int err = -EIO; + + /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ + dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, + GENET_MAX_MQ_CNT + 1); + if (!dev) { + dev_err(&pdev->dev, "can't allocate net device\n"); + return -ENOMEM; + } + + if (dn) { + of_id = of_match_node(bcmgenet_match, dn); + if (!of_id) + return -EINVAL; + } + + priv = netdev_priv(dev); + priv->irq0 = platform_get_irq(pdev, 0); + priv->irq1 = platform_get_irq(pdev, 1); + priv->wol_irq = platform_get_irq(pdev, 2); + if (!priv->irq0 || !priv->irq1) { + dev_err(&pdev->dev, "can't find IRQs\n"); + err = -EINVAL; + goto err; + } + + if (dn) { + macaddr = of_get_mac_address(dn); + if (!macaddr) { + dev_err(&pdev->dev, "can't find MAC address\n"); + err = -EINVAL; + goto err; + } + } else { + macaddr = pd->mac_address; + } + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(priv->base)) { + err = PTR_ERR(priv->base); + goto err; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + dev_set_drvdata(&pdev->dev, dev); + ether_addr_copy(dev->dev_addr, macaddr); + dev->watchdog_timeo = 2 * HZ; + dev->ethtool_ops = &bcmgenet_ethtool_ops; + dev->netdev_ops = &bcmgenet_netdev_ops; + + priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); + + /* Set hardware features */ + dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; + + /* Request the WOL interrupt and advertise suspend if available */ + priv->wol_irq_disabled = true; + err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0, + dev->name, priv); + if (!err) + device_set_wakeup_capable(&pdev->dev, 1); + + /* Set the needed headroom to account for any possible + * features enabling/disabling at runtime + */ + dev->needed_headroom += 64; + + netdev_boot_setup_check(dev); + + priv->dev = dev; + priv->pdev = pdev; + if (of_id) + priv->version = (enum bcmgenet_version)of_id->data; + else + priv->version = pd->genet_version; + + priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); + if (IS_ERR(priv->clk)) + dev_warn(&priv->pdev->dev, "failed to get enet clock\n"); + + if (!IS_ERR(priv->clk)) + clk_prepare_enable(priv->clk); + + bcmgenet_set_hw_params(priv); + + /* Mii wait queue */ + init_waitqueue_head(&priv->wq); + /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */ + priv->rx_buf_len = RX_BUF_LENGTH; + INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); + + priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol"); + if (IS_ERR(priv->clk_wol)) + dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n"); + + priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee"); + if (IS_ERR(priv->clk_eee)) { + dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n"); + priv->clk_eee = NULL; + } + + err = reset_umac(priv); + if (err) + goto err_clk_disable; + + err = bcmgenet_mii_init(dev); + if (err) + goto err_clk_disable; + + /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues + * just the ring 16 descriptor based TX + */ + netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); + netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); + + /* libphy will determine the link state */ + netif_carrier_off(dev); + + /* Turn off the main clock, WOL clock is handled separately */ + if (!IS_ERR(priv->clk)) + clk_disable_unprepare(priv->clk); + + err = register_netdev(dev); + if (err) + goto err; + + return err; + +err_clk_disable: + if (!IS_ERR(priv->clk)) + clk_disable_unprepare(priv->clk); +err: + free_netdev(dev); + return err; +} + +static int bcmgenet_remove(struct platform_device *pdev) +{ + struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); + + dev_set_drvdata(&pdev->dev, NULL); + unregister_netdev(priv->dev); + bcmgenet_mii_exit(priv->dev); + free_netdev(priv->dev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int bcmgenet_suspend(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret; + + if (!netif_running(dev)) + return 0; + + bcmgenet_netif_stop(dev); + + phy_suspend(priv->phydev); + + netif_device_detach(dev); + + /* Disable MAC receive */ + umac_enable_set(priv, CMD_RX_EN, false); + + ret = bcmgenet_dma_teardown(priv); + if (ret) + return ret; + + /* Disable MAC transmit. TX DMA disabled have to done before this */ + umac_enable_set(priv, CMD_TX_EN, false); + + /* tx reclaim */ + bcmgenet_tx_reclaim_all(dev); + bcmgenet_fini_dma(priv); + + /* Prepare the device for Wake-on-LAN and switch to the slow clock */ + if (device_may_wakeup(d) && priv->wolopts) { + ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); + clk_prepare_enable(priv->clk_wol); + } else if (phy_is_internal(priv->phydev)) { + ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + } + + /* Turn off the clocks */ + clk_disable_unprepare(priv->clk); + + return ret; +} + +static int bcmgenet_resume(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + unsigned long dma_ctrl; + int ret; + u32 reg; + + if (!netif_running(dev)) + return 0; + + /* Turn on the clock */ + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + /* If this is an internal GPHY, power it back on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (phy_is_internal(priv->phydev)) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + bcmgenet_umac_reset(priv); + + ret = init_umac(priv); + if (ret) + goto out_clk_disable; + + /* From WOL-enabled suspend, switch to regular clock */ + if (priv->wolopts) + clk_disable_unprepare(priv->clk_wol); + + phy_init_hw(priv->phydev); + /* Speed settings must be restored */ + bcmgenet_mii_config(priv->dev, false); + + /* disable ethernet MAC while updating its registers */ + umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); + + bcmgenet_set_hw_addr(priv, dev->dev_addr); + + if (phy_is_internal(priv->phydev)) { + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + reg |= EXT_ENERGY_DET_MASK; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + } + + if (priv->wolopts) + bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); + + /* Disable RX/TX DMA and flush TX queues */ + dma_ctrl = bcmgenet_dma_disable(priv); + + /* Reinitialize TDMA and RDMA and SW housekeeping */ + ret = bcmgenet_init_dma(priv); + if (ret) { + netdev_err(dev, "failed to initialize DMA\n"); + goto out_clk_disable; + } + + /* Always enable ring 16 - descriptor ring */ + bcmgenet_enable_dma(priv, dma_ctrl); + + netif_device_attach(dev); + + phy_resume(priv->phydev); + + if (priv->eee.eee_enabled) + bcmgenet_eee_enable_set(dev, true); + + bcmgenet_netif_start(dev); + + return 0; + +out_clk_disable: + clk_disable_unprepare(priv->clk); + return ret; +} +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume); + +static struct platform_driver bcmgenet_driver = { + .probe = bcmgenet_probe, + .remove = bcmgenet_remove, + .driver = { + .name = "bcmgenet", + .of_match_table = bcmgenet_match, + .pm = &bcmgenet_pm_ops, + }, +}; +module_platform_driver(bcmgenet_driver); + +MODULE_AUTHOR("Broadcom Corporation"); +MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver"); +MODULE_ALIAS("platform:bcmgenet"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h new file mode 100644 index 000000000..6f2887a5e --- /dev/null +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -0,0 +1,686 @@ +/* + * Copyright (c) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __BCMGENET_H__ +#define __BCMGENET_H__ + +#include +#include +#include +#include +#include +#include +#include + +/* total number of Buffer Descriptors, same for Rx/Tx */ +#define TOTAL_DESC 256 + +/* which ring is descriptor based */ +#define DESC_INDEX 16 + +/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528. + * 1536 is multiple of 256 bytes + */ +#define ENET_BRCM_TAG_LEN 6 +#define ENET_PAD 8 +#define ENET_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \ + ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD) +#define DMA_MAX_BURST_LENGTH 0x10 + +/* misc. configuration */ +#define CLEAR_ALL_HFB 0xFF +#define DMA_FC_THRESH_HI (TOTAL_DESC >> 4) +#define DMA_FC_THRESH_LO 5 + +/* 64B receive/transmit status block */ +struct status_64 { + u32 length_status; /* length and peripheral status */ + u32 ext_status; /* Extended status*/ + u32 rx_csum; /* partial rx checksum */ + u32 unused1[9]; /* unused */ + u32 tx_csum_info; /* Tx checksum info. */ + u32 unused2[3]; /* unused */ +}; + +/* Rx status bits */ +#define STATUS_RX_EXT_MASK 0x1FFFFF +#define STATUS_RX_CSUM_MASK 0xFFFF +#define STATUS_RX_CSUM_OK 0x10000 +#define STATUS_RX_CSUM_FR 0x20000 +#define STATUS_RX_PROTO_TCP 0 +#define STATUS_RX_PROTO_UDP 1 +#define STATUS_RX_PROTO_ICMP 2 +#define STATUS_RX_PROTO_OTHER 3 +#define STATUS_RX_PROTO_MASK 3 +#define STATUS_RX_PROTO_SHIFT 18 +#define STATUS_FILTER_INDEX_MASK 0xFFFF +/* Tx status bits */ +#define STATUS_TX_CSUM_START_MASK 0X7FFF +#define STATUS_TX_CSUM_START_SHIFT 16 +#define STATUS_TX_CSUM_PROTO_UDP 0x8000 +#define STATUS_TX_CSUM_OFFSET_MASK 0x7FFF +#define STATUS_TX_CSUM_LV 0x80000000 + +/* DMA Descriptor */ +#define DMA_DESC_LENGTH_STATUS 0x00 /* in bytes of data in buffer */ +#define DMA_DESC_ADDRESS_LO 0x04 /* lower bits of PA */ +#define DMA_DESC_ADDRESS_HI 0x08 /* upper 32 bits of PA, GENETv4+ */ + +/* Rx/Tx common counter group */ +struct bcmgenet_pkt_counters { + u32 cnt_64; /* RO Received/Transmited 64 bytes packet */ + u32 cnt_127; /* RO Rx/Tx 127 bytes packet */ + u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */ + u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */ + u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */ + u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */ + u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */ + u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/ + u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/ + u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/ +}; + +/* RSV, Receive Status Vector */ +struct bcmgenet_rx_counters { + struct bcmgenet_pkt_counters pkt_cnt; + u32 pkt; /* RO (0x428) Received pkt count*/ + u32 bytes; /* RO Received byte count */ + u32 mca; /* RO # of Received multicast pkt */ + u32 bca; /* RO # of Receive broadcast pkt */ + u32 fcs; /* RO # of Received FCS error */ + u32 cf; /* RO # of Received control frame pkt*/ + u32 pf; /* RO # of Received pause frame pkt */ + u32 uo; /* RO # of unknown op code pkt */ + u32 aln; /* RO # of alignment error count */ + u32 flr; /* RO # of frame length out of range count */ + u32 cde; /* RO # of code error pkt */ + u32 fcr; /* RO # of carrier sense error pkt */ + u32 ovr; /* RO # of oversize pkt*/ + u32 jbr; /* RO # of jabber count */ + u32 mtue; /* RO # of MTU error pkt*/ + u32 pok; /* RO # of Received good pkt */ + u32 uc; /* RO # of unicast pkt */ + u32 ppp; /* RO # of PPP pkt */ + u32 rcrc; /* RO (0x470),# of CRC match pkt */ +}; + +/* TSV, Transmit Status Vector */ +struct bcmgenet_tx_counters { + struct bcmgenet_pkt_counters pkt_cnt; + u32 pkts; /* RO (0x4a8) Transmited pkt */ + u32 mca; /* RO # of xmited multicast pkt */ + u32 bca; /* RO # of xmited broadcast pkt */ + u32 pf; /* RO # of xmited pause frame count */ + u32 cf; /* RO # of xmited control frame count */ + u32 fcs; /* RO # of xmited FCS error count */ + u32 ovr; /* RO # of xmited oversize pkt */ + u32 drf; /* RO # of xmited deferral pkt */ + u32 edf; /* RO # of xmited Excessive deferral pkt*/ + u32 scl; /* RO # of xmited single collision pkt */ + u32 mcl; /* RO # of xmited multiple collision pkt*/ + u32 lcl; /* RO # of xmited late collision pkt */ + u32 ecl; /* RO # of xmited excessive collision pkt*/ + u32 frg; /* RO # of xmited fragments pkt*/ + u32 ncl; /* RO # of xmited total collision count */ + u32 jbr; /* RO # of xmited jabber count*/ + u32 bytes; /* RO # of xmited byte count */ + u32 pok; /* RO # of xmited good pkt */ + u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */ +}; + +struct bcmgenet_mib_counters { + struct bcmgenet_rx_counters rx; + struct bcmgenet_tx_counters tx; + u32 rx_runt_cnt; + u32 rx_runt_fcs; + u32 rx_runt_fcs_align; + u32 rx_runt_bytes; + u32 rbuf_ovflow_cnt; + u32 rbuf_err_cnt; + u32 mdf_err_cnt; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + u32 tx_dma_failed; +}; + +#define UMAC_HD_BKP_CTRL 0x004 +#define HD_FC_EN (1 << 0) +#define HD_FC_BKOFF_OK (1 << 1) +#define IPG_CONFIG_RX_SHIFT 2 +#define IPG_CONFIG_RX_MASK 0x1F + +#define UMAC_CMD 0x008 +#define CMD_TX_EN (1 << 0) +#define CMD_RX_EN (1 << 1) +#define UMAC_SPEED_10 0 +#define UMAC_SPEED_100 1 +#define UMAC_SPEED_1000 2 +#define UMAC_SPEED_2500 3 +#define CMD_SPEED_SHIFT 2 +#define CMD_SPEED_MASK 3 +#define CMD_PROMISC (1 << 4) +#define CMD_PAD_EN (1 << 5) +#define CMD_CRC_FWD (1 << 6) +#define CMD_PAUSE_FWD (1 << 7) +#define CMD_RX_PAUSE_IGNORE (1 << 8) +#define CMD_TX_ADDR_INS (1 << 9) +#define CMD_HD_EN (1 << 10) +#define CMD_SW_RESET (1 << 13) +#define CMD_LCL_LOOP_EN (1 << 15) +#define CMD_AUTO_CONFIG (1 << 22) +#define CMD_CNTL_FRM_EN (1 << 23) +#define CMD_NO_LEN_CHK (1 << 24) +#define CMD_RMT_LOOP_EN (1 << 25) +#define CMD_PRBL_EN (1 << 27) +#define CMD_TX_PAUSE_IGNORE (1 << 28) +#define CMD_TX_RX_EN (1 << 29) +#define CMD_RUNT_FILTER_DIS (1 << 30) + +#define UMAC_MAC0 0x00C +#define UMAC_MAC1 0x010 +#define UMAC_MAX_FRAME_LEN 0x014 + +#define UMAC_EEE_CTRL 0x064 +#define EN_LPI_RX_PAUSE (1 << 0) +#define EN_LPI_TX_PFC (1 << 1) +#define EN_LPI_TX_PAUSE (1 << 2) +#define EEE_EN (1 << 3) +#define RX_FIFO_CHECK (1 << 4) +#define EEE_TX_CLK_DIS (1 << 5) +#define DIS_EEE_10M (1 << 6) +#define LP_IDLE_PREDICTION_MODE (1 << 7) + +#define UMAC_EEE_LPI_TIMER 0x068 +#define UMAC_EEE_WAKE_TIMER 0x06C +#define UMAC_EEE_REF_COUNT 0x070 +#define EEE_REFERENCE_COUNT_MASK 0xffff + +#define UMAC_TX_FLUSH 0x334 + +#define UMAC_MIB_START 0x400 + +#define UMAC_MDIO_CMD 0x614 +#define MDIO_START_BUSY (1 << 29) +#define MDIO_READ_FAIL (1 << 28) +#define MDIO_RD (2 << 26) +#define MDIO_WR (1 << 26) +#define MDIO_PMD_SHIFT 21 +#define MDIO_PMD_MASK 0x1F +#define MDIO_REG_SHIFT 16 +#define MDIO_REG_MASK 0x1F + +#define UMAC_RBUF_OVFL_CNT 0x61C + +#define UMAC_MPD_CTRL 0x620 +#define MPD_EN (1 << 0) +#define MPD_PW_EN (1 << 27) +#define MPD_MSEQ_LEN_SHIFT 16 +#define MPD_MSEQ_LEN_MASK 0xFF + +#define UMAC_MPD_PW_MS 0x624 +#define UMAC_MPD_PW_LS 0x628 +#define UMAC_RBUF_ERR_CNT 0x634 +#define UMAC_MDF_ERR_CNT 0x638 +#define UMAC_MDF_CTRL 0x650 +#define UMAC_MDF_ADDR 0x654 +#define UMAC_MIB_CTRL 0x580 +#define MIB_RESET_RX (1 << 0) +#define MIB_RESET_RUNT (1 << 1) +#define MIB_RESET_TX (1 << 2) + +#define RBUF_CTRL 0x00 +#define RBUF_64B_EN (1 << 0) +#define RBUF_ALIGN_2B (1 << 1) +#define RBUF_BAD_DIS (1 << 2) + +#define RBUF_STATUS 0x0C +#define RBUF_STATUS_WOL (1 << 0) +#define RBUF_STATUS_MPD_INTR_ACTIVE (1 << 1) +#define RBUF_STATUS_ACPI_INTR_ACTIVE (1 << 2) + +#define RBUF_CHK_CTRL 0x14 +#define RBUF_RXCHK_EN (1 << 0) +#define RBUF_SKIP_FCS (1 << 4) + +#define RBUF_ENERGY_CTRL 0x9c +#define RBUF_EEE_EN (1 << 0) +#define RBUF_PM_EN (1 << 1) + +#define RBUF_TBUF_SIZE_CTRL 0xb4 + +#define RBUF_HFB_CTRL_V1 0x38 +#define RBUF_HFB_FILTER_EN_SHIFT 16 +#define RBUF_HFB_FILTER_EN_MASK 0xffff0000 +#define RBUF_HFB_EN (1 << 0) +#define RBUF_HFB_256B (1 << 1) +#define RBUF_ACPI_EN (1 << 2) + +#define RBUF_HFB_LEN_V1 0x3C +#define RBUF_FLTR_LEN_MASK 0xFF +#define RBUF_FLTR_LEN_SHIFT 8 + +#define TBUF_CTRL 0x00 +#define TBUF_BP_MC 0x0C +#define TBUF_ENERGY_CTRL 0x14 +#define TBUF_EEE_EN (1 << 0) +#define TBUF_PM_EN (1 << 1) + +#define TBUF_CTRL_V1 0x80 +#define TBUF_BP_MC_V1 0xA0 + +#define HFB_CTRL 0x00 +#define HFB_FLT_ENABLE_V3PLUS 0x04 +#define HFB_FLT_LEN_V2 0x04 +#define HFB_FLT_LEN_V3PLUS 0x1C + +/* uniMac intrl2 registers */ +#define INTRL2_CPU_STAT 0x00 +#define INTRL2_CPU_SET 0x04 +#define INTRL2_CPU_CLEAR 0x08 +#define INTRL2_CPU_MASK_STATUS 0x0C +#define INTRL2_CPU_MASK_SET 0x10 +#define INTRL2_CPU_MASK_CLEAR 0x14 + +/* INTRL2 instance 0 definitions */ +#define UMAC_IRQ_SCB (1 << 0) +#define UMAC_IRQ_EPHY (1 << 1) +#define UMAC_IRQ_PHY_DET_R (1 << 2) +#define UMAC_IRQ_PHY_DET_F (1 << 3) +#define UMAC_IRQ_LINK_UP (1 << 4) +#define UMAC_IRQ_LINK_DOWN (1 << 5) +#define UMAC_IRQ_LINK_EVENT (UMAC_IRQ_LINK_UP | UMAC_IRQ_LINK_DOWN) +#define UMAC_IRQ_UMAC (1 << 6) +#define UMAC_IRQ_UMAC_TSV (1 << 7) +#define UMAC_IRQ_TBUF_UNDERRUN (1 << 8) +#define UMAC_IRQ_RBUF_OVERFLOW (1 << 9) +#define UMAC_IRQ_HFB_SM (1 << 10) +#define UMAC_IRQ_HFB_MM (1 << 11) +#define UMAC_IRQ_MPD_R (1 << 12) +#define UMAC_IRQ_RXDMA_MBDONE (1 << 13) +#define UMAC_IRQ_RXDMA_PDONE (1 << 14) +#define UMAC_IRQ_RXDMA_BDONE (1 << 15) +#define UMAC_IRQ_RXDMA_DONE (UMAC_IRQ_RXDMA_PDONE | \ + UMAC_IRQ_RXDMA_BDONE) +#define UMAC_IRQ_TXDMA_MBDONE (1 << 16) +#define UMAC_IRQ_TXDMA_PDONE (1 << 17) +#define UMAC_IRQ_TXDMA_BDONE (1 << 18) +#define UMAC_IRQ_TXDMA_DONE (UMAC_IRQ_TXDMA_PDONE | \ + UMAC_IRQ_TXDMA_BDONE) +/* Only valid for GENETv3+ */ +#define UMAC_IRQ_MDIO_DONE (1 << 23) +#define UMAC_IRQ_MDIO_ERROR (1 << 24) + +/* INTRL2 instance 1 definitions */ +#define UMAC_IRQ1_TX_INTR_MASK 0xFFFF +#define UMAC_IRQ1_RX_INTR_MASK 0xFFFF +#define UMAC_IRQ1_RX_INTR_SHIFT 16 + +/* Register block offsets */ +#define GENET_SYS_OFF 0x0000 +#define GENET_GR_BRIDGE_OFF 0x0040 +#define GENET_EXT_OFF 0x0080 +#define GENET_INTRL2_0_OFF 0x0200 +#define GENET_INTRL2_1_OFF 0x0240 +#define GENET_RBUF_OFF 0x0300 +#define GENET_UMAC_OFF 0x0800 + +/* SYS block offsets and register definitions */ +#define SYS_REV_CTRL 0x00 +#define SYS_PORT_CTRL 0x04 +#define PORT_MODE_INT_EPHY 0 +#define PORT_MODE_INT_GPHY 1 +#define PORT_MODE_EXT_EPHY 2 +#define PORT_MODE_EXT_GPHY 3 +#define PORT_MODE_EXT_RVMII_25 (4 | BIT(4)) +#define PORT_MODE_EXT_RVMII_50 4 +#define LED_ACT_SOURCE_MAC (1 << 9) + +#define SYS_RBUF_FLUSH_CTRL 0x08 +#define SYS_TBUF_FLUSH_CTRL 0x0C +#define RBUF_FLUSH_CTRL_V1 0x04 + +/* Ext block register offsets and definitions */ +#define EXT_EXT_PWR_MGMT 0x00 +#define EXT_PWR_DOWN_BIAS (1 << 0) +#define EXT_PWR_DOWN_DLL (1 << 1) +#define EXT_PWR_DOWN_PHY (1 << 2) +#define EXT_PWR_DN_EN_LD (1 << 3) +#define EXT_ENERGY_DET (1 << 4) +#define EXT_IDDQ_FROM_PHY (1 << 5) +#define EXT_PHY_RESET (1 << 8) +#define EXT_ENERGY_DET_MASK (1 << 12) + +#define EXT_RGMII_OOB_CTRL 0x0C +#define RGMII_LINK (1 << 4) +#define OOB_DISABLE (1 << 5) +#define RGMII_MODE_EN (1 << 6) +#define ID_MODE_DIS (1 << 16) + +#define EXT_GPHY_CTRL 0x1C +#define EXT_CFG_IDDQ_BIAS (1 << 0) +#define EXT_CFG_PWR_DOWN (1 << 1) +#define EXT_CK25_DIS (1 << 4) +#define EXT_GPHY_RESET (1 << 5) + +/* DMA rings size */ +#define DMA_RING_SIZE (0x40) +#define DMA_RINGS_SIZE (DMA_RING_SIZE * (DESC_INDEX + 1)) + +/* DMA registers common definitions */ +#define DMA_RW_POINTER_MASK 0x1FF +#define DMA_P_INDEX_DISCARD_CNT_MASK 0xFFFF +#define DMA_P_INDEX_DISCARD_CNT_SHIFT 16 +#define DMA_BUFFER_DONE_CNT_MASK 0xFFFF +#define DMA_BUFFER_DONE_CNT_SHIFT 16 +#define DMA_P_INDEX_MASK 0xFFFF +#define DMA_C_INDEX_MASK 0xFFFF + +/* DMA ring size register */ +#define DMA_RING_SIZE_MASK 0xFFFF +#define DMA_RING_SIZE_SHIFT 16 +#define DMA_RING_BUFFER_SIZE_MASK 0xFFFF + +/* DMA interrupt threshold register */ +#define DMA_INTR_THRESHOLD_MASK 0x00FF + +/* DMA XON/XOFF register */ +#define DMA_XON_THREHOLD_MASK 0xFFFF +#define DMA_XOFF_THRESHOLD_MASK 0xFFFF +#define DMA_XOFF_THRESHOLD_SHIFT 16 + +/* DMA flow period register */ +#define DMA_FLOW_PERIOD_MASK 0xFFFF +#define DMA_MAX_PKT_SIZE_MASK 0xFFFF +#define DMA_MAX_PKT_SIZE_SHIFT 16 + + +/* DMA control register */ +#define DMA_EN (1 << 0) +#define DMA_RING_BUF_EN_SHIFT 0x01 +#define DMA_RING_BUF_EN_MASK 0xFFFF +#define DMA_TSB_SWAP_EN (1 << 20) + +/* DMA status register */ +#define DMA_DISABLED (1 << 0) +#define DMA_DESC_RAM_INIT_BUSY (1 << 1) + +/* DMA SCB burst size register */ +#define DMA_SCB_BURST_SIZE_MASK 0x1F + +/* DMA activity vector register */ +#define DMA_ACTIVITY_VECTOR_MASK 0x1FFFF + +/* DMA backpressure mask register */ +#define DMA_BACKPRESSURE_MASK 0x1FFFF +#define DMA_PFC_ENABLE (1 << 31) + +/* DMA backpressure status register */ +#define DMA_BACKPRESSURE_STATUS_MASK 0x1FFFF + +/* DMA override register */ +#define DMA_LITTLE_ENDIAN_MODE (1 << 0) +#define DMA_REGISTER_MODE (1 << 1) + +/* DMA timeout register */ +#define DMA_TIMEOUT_MASK 0xFFFF +#define DMA_TIMEOUT_VAL 5000 /* micro seconds */ + +/* TDMA rate limiting control register */ +#define DMA_RATE_LIMIT_EN_MASK 0xFFFF + +/* TDMA arbitration control register */ +#define DMA_ARBITER_MODE_MASK 0x03 +#define DMA_RING_BUF_PRIORITY_MASK 0x1F +#define DMA_RING_BUF_PRIORITY_SHIFT 5 +#define DMA_PRIO_REG_INDEX(q) ((q) / 6) +#define DMA_PRIO_REG_SHIFT(q) (((q) % 6) * DMA_RING_BUF_PRIORITY_SHIFT) +#define DMA_RATE_ADJ_MASK 0xFF + +/* Tx/Rx Dma Descriptor common bits*/ +#define DMA_BUFLENGTH_MASK 0x0fff +#define DMA_BUFLENGTH_SHIFT 16 +#define DMA_OWN 0x8000 +#define DMA_EOP 0x4000 +#define DMA_SOP 0x2000 +#define DMA_WRAP 0x1000 +/* Tx specific Dma descriptor bits */ +#define DMA_TX_UNDERRUN 0x0200 +#define DMA_TX_APPEND_CRC 0x0040 +#define DMA_TX_OW_CRC 0x0020 +#define DMA_TX_DO_CSUM 0x0010 +#define DMA_TX_QTAG_SHIFT 7 + +/* Rx Specific Dma descriptor bits */ +#define DMA_RX_CHK_V3PLUS 0x8000 +#define DMA_RX_CHK_V12 0x1000 +#define DMA_RX_BRDCAST 0x0040 +#define DMA_RX_MULT 0x0020 +#define DMA_RX_LG 0x0010 +#define DMA_RX_NO 0x0008 +#define DMA_RX_RXER 0x0004 +#define DMA_RX_CRC_ERROR 0x0002 +#define DMA_RX_OV 0x0001 +#define DMA_RX_FI_MASK 0x001F +#define DMA_RX_FI_SHIFT 0x0007 +#define DMA_DESC_ALLOC_MASK 0x00FF + +#define DMA_ARBITER_RR 0x00 +#define DMA_ARBITER_WRR 0x01 +#define DMA_ARBITER_SP 0x02 + +struct enet_cb { + struct sk_buff *skb; + void __iomem *bd_addr; + DEFINE_DMA_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_LEN(dma_len); +}; + +/* power management mode */ +enum bcmgenet_power_mode { + GENET_POWER_CABLE_SENSE = 0, + GENET_POWER_PASSIVE, + GENET_POWER_WOL_MAGIC, +}; + +struct bcmgenet_priv; + +/* We support both runtime GENET detection and compile-time + * to optimize code-paths for a given hardware + */ +enum bcmgenet_version { + GENET_V1 = 1, + GENET_V2, + GENET_V3, + GENET_V4 +}; + +#define GENET_IS_V1(p) ((p)->version == GENET_V1) +#define GENET_IS_V2(p) ((p)->version == GENET_V2) +#define GENET_IS_V3(p) ((p)->version == GENET_V3) +#define GENET_IS_V4(p) ((p)->version == GENET_V4) + +/* Hardware flags */ +#define GENET_HAS_40BITS (1 << 0) +#define GENET_HAS_EXT (1 << 1) +#define GENET_HAS_MDIO_INTR (1 << 2) +#define GENET_HAS_MOCA_LINK_DET (1 << 3) + +/* BCMGENET hardware parameters, keep this structure nicely aligned + * since it is going to be used in hot paths + */ +struct bcmgenet_hw_params { + u8 tx_queues; + u8 tx_bds_per_q; + u8 rx_queues; + u8 rx_bds_per_q; + u8 bp_in_en_shift; + u32 bp_in_mask; + u8 hfb_filter_cnt; + u8 hfb_filter_size; + u8 qtag_mask; + u16 tbuf_offset; + u32 hfb_offset; + u32 hfb_reg_offset; + u32 rdma_offset; + u32 tdma_offset; + u32 words_per_bd; + u32 flags; +}; + +struct bcmgenet_tx_ring { + spinlock_t lock; /* ring lock */ + struct napi_struct napi; /* NAPI per tx queue */ + unsigned int index; /* ring index */ + unsigned int queue; /* queue index */ + struct enet_cb *cbs; /* tx ring buffer control block*/ + unsigned int size; /* size of each tx ring */ + unsigned int clean_ptr; /* Tx ring clean pointer */ + unsigned int c_index; /* last consumer index of each ring*/ + unsigned int free_bds; /* # of free bds for each ring */ + unsigned int write_ptr; /* Tx ring write pointer SW copy */ + unsigned int prod_index; /* Tx ring producer index SW copy */ + unsigned int cb_ptr; /* Tx ring initial CB ptr */ + unsigned int end_ptr; /* Tx ring end CB ptr */ + void (*int_enable)(struct bcmgenet_tx_ring *); + void (*int_disable)(struct bcmgenet_tx_ring *); + struct bcmgenet_priv *priv; +}; + +struct bcmgenet_rx_ring { + struct napi_struct napi; /* Rx NAPI struct */ + unsigned int index; /* Rx ring index */ + struct enet_cb *cbs; /* Rx ring buffer control block */ + unsigned int size; /* Rx ring size */ + unsigned int c_index; /* Rx last consumer index */ + unsigned int read_ptr; /* Rx ring read pointer */ + unsigned int cb_ptr; /* Rx ring initial CB ptr */ + unsigned int end_ptr; /* Rx ring end CB ptr */ + unsigned int old_discards; + void (*int_enable)(struct bcmgenet_rx_ring *); + void (*int_disable)(struct bcmgenet_rx_ring *); + struct bcmgenet_priv *priv; +}; + +/* device context */ +struct bcmgenet_priv { + void __iomem *base; + enum bcmgenet_version version; + struct net_device *dev; + + /* transmit variables */ + void __iomem *tx_bds; + struct enet_cb *tx_cbs; + unsigned int num_tx_bds; + + struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1]; + + /* receive variables */ + void __iomem *rx_bds; + struct enet_cb *rx_cbs; + unsigned int num_rx_bds; + unsigned int rx_buf_len; + + struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1]; + + /* other misc variables */ + struct bcmgenet_hw_params *hw_params; + + /* MDIO bus variables */ + wait_queue_head_t wq; + struct phy_device *phydev; + struct device_node *phy_dn; + struct mii_bus *mii_bus; + u16 gphy_rev; + struct clk *clk_eee; + bool clk_eee_enabled; + + /* PHY device variables */ + int old_link; + int old_speed; + int old_duplex; + int old_pause; + phy_interface_t phy_interface; + int phy_addr; + int ext_phy; + + /* Interrupt variables */ + struct work_struct bcmgenet_irq_work; + int irq0; + int irq1; + unsigned int irq0_stat; + unsigned int irq1_stat; + int wol_irq; + bool wol_irq_disabled; + + /* HW descriptors/checksum variables */ + bool desc_64b_en; + bool desc_rxchk_en; + bool crc_fwd_en; + + unsigned int dma_rx_chk_bit; + + u32 msg_enable; + + struct clk *clk; + struct platform_device *pdev; + + /* WOL */ + struct clk *clk_wol; + u32 wolopts; + + struct bcmgenet_mib_counters mib; + + struct ethtool_eee eee; +}; + +#define GENET_IO_MACRO(name, offset) \ +static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \ + u32 off) \ +{ \ + return __raw_readl(priv->base + offset + off); \ +} \ +static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv, \ + u32 val, u32 off) \ +{ \ + __raw_writel(val, priv->base + offset + off); \ +} + +GENET_IO_MACRO(ext, GENET_EXT_OFF); +GENET_IO_MACRO(umac, GENET_UMAC_OFF); +GENET_IO_MACRO(sys, GENET_SYS_OFF); + +/* interrupt l2 registers accessors */ +GENET_IO_MACRO(intrl2_0, GENET_INTRL2_0_OFF); +GENET_IO_MACRO(intrl2_1, GENET_INTRL2_1_OFF); + +/* HFB register accessors */ +GENET_IO_MACRO(hfb, priv->hw_params->hfb_offset); + +/* GENET v2+ HFB control and filter len helpers */ +GENET_IO_MACRO(hfb_reg, priv->hw_params->hfb_reg_offset); + +/* RBUF register accessors */ +GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); + +/* MDIO routines */ +int bcmgenet_mii_init(struct net_device *dev); +int bcmgenet_mii_config(struct net_device *dev, bool init); +void bcmgenet_mii_exit(struct net_device *dev); +void bcmgenet_mii_reset(struct net_device *dev); +void bcmgenet_phy_power_set(struct net_device *dev, bool enable); +void bcmgenet_mii_setup(struct net_device *dev); + +/* Wake-on-LAN routines */ +void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol); +int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol); +int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode); +void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode); + +#endif /* __BCMGENET_H__ */ diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c new file mode 100644 index 000000000..b97122926 --- /dev/null +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -0,0 +1,210 @@ +/* + * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support + * + * Copyright (c) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) "bcmgenet_wol: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bcmgenet.h" + +/* ethtool function - get WOL (Wake on LAN) settings, Only Magic Packet + * Detection is supported through ethtool + */ +void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg; + + wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE; + wol->wolopts = priv->wolopts; + memset(wol->sopass, 0, sizeof(wol->sopass)); + + if (wol->wolopts & WAKE_MAGICSECURE) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_MS); + put_unaligned_be16(reg, &wol->sopass[0]); + reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_LS); + put_unaligned_be32(reg, &wol->sopass[2]); + } +} + +/* ethtool function - set WOL (Wake on LAN) settings. + * Only for magic packet detection mode. + */ +int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + u32 reg; + + if (!device_can_wakeup(kdev)) + return -ENOTSUPP; + + if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) + return -EINVAL; + + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + if (wol->wolopts & WAKE_MAGICSECURE) { + bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), + UMAC_MPD_PW_MS); + bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), + UMAC_MPD_PW_LS); + reg |= MPD_PW_EN; + } else { + reg &= ~MPD_PW_EN; + } + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + + /* Flag the device and relevant IRQ as wakeup capable */ + if (wol->wolopts) { + device_set_wakeup_enable(kdev, 1); + /* Avoid unbalanced enable_irq_wake calls */ + if (priv->wol_irq_disabled) + enable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = false; + } else { + device_set_wakeup_enable(kdev, 0); + /* Avoid unbalanced disable_irq_wake calls */ + if (!priv->wol_irq_disabled) + disable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = true; + } + + priv->wolopts = wol->wolopts; + + return 0; +} + +static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv) +{ + struct net_device *dev = priv->dev; + int retries = 0; + + while (!(bcmgenet_rbuf_readl(priv, RBUF_STATUS) + & RBUF_STATUS_WOL)) { + retries++; + if (retries > 5) { + netdev_crit(dev, "polling wol mode timeout\n"); + return -ETIMEDOUT; + } + mdelay(1); + } + + return retries; +} + +int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + struct net_device *dev = priv->dev; + u32 cpu_mask_clear; + int retries = 0; + u32 reg; + + if (mode != GENET_POWER_WOL_MAGIC) { + netif_err(priv, wol, dev, "unsupported mode: %d\n", mode); + return -EINVAL; + } + + /* disable RX */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~CMD_RX_EN; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + mdelay(10); + + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + reg |= MPD_EN; + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + + /* Do not leave UniMAC in MPD mode only */ + retries = bcmgenet_poll_wol_status(priv); + if (retries < 0) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + reg &= ~MPD_EN; + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + return retries; + } + + netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n", + retries); + + /* Enable CRC forward */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + priv->crc_fwd_en = 1; + reg |= CMD_CRC_FWD; + + /* Receiver must be enabled for WOL MP detection */ + reg |= CMD_RX_EN; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + if (priv->hw_params->flags & GENET_HAS_EXT) { + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + reg &= ~EXT_ENERGY_DET_MASK; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + } + + /* Enable the MPD interrupt */ + cpu_mask_clear = UMAC_IRQ_MPD_R; + + bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); + + return 0; +} + +void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) +{ + u32 cpu_mask_set; + u32 reg; + + if (mode != GENET_POWER_WOL_MAGIC) { + netif_err(priv, wol, priv->dev, "invalid mode: %d\n", mode); + return; + } + + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + reg &= ~MPD_EN; + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + + /* Disable CRC Forward */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~CMD_CRC_FWD; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + priv->crc_fwd_en = 0; + + /* Stop monitoring magic packet IRQ */ + cpu_mask_set = UMAC_IRQ_MPD_R; + + /* Stop monitoring magic packet IRQ */ + bcmgenet_intrl2_0_writel(priv, cpu_mask_set, INTRL2_CPU_MASK_SET); +} diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c new file mode 100644 index 000000000..420949cc5 --- /dev/null +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -0,0 +1,592 @@ +/* + * Broadcom GENET MDIO routines + * + * Copyright (c) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bcmgenet.h" + +/* read a value from the MII */ +static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location) +{ + int ret; + struct net_device *dev = bus->priv; + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg; + + bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | + (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD); + /* Start MDIO transaction*/ + reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); + reg |= MDIO_START_BUSY; + bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD); + wait_event_timeout(priv->wq, + !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) + & MDIO_START_BUSY), + HZ / 100); + ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); + + if (ret & MDIO_READ_FAIL) + return -EIO; + + return ret & 0xffff; +} + +/* write a value to the MII */ +static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id, + int location, u16 val) +{ + struct net_device *dev = bus->priv; + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg; + + bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) | + (location << MDIO_REG_SHIFT) | (0xffff & val)), + UMAC_MDIO_CMD); + reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); + reg |= MDIO_START_BUSY; + bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD); + wait_event_timeout(priv->wq, + !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) & + MDIO_START_BUSY), + HZ / 100); + + return 0; +} + +/* setup netdev link state when PHY link status change and + * update UMAC and RGMII block when link up + */ +void bcmgenet_mii_setup(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + u32 reg, cmd_bits = 0; + bool status_changed = false; + + if (priv->old_link != phydev->link) { + status_changed = true; + priv->old_link = phydev->link; + } + + if (phydev->link) { + /* check speed/duplex/pause changes */ + if (priv->old_speed != phydev->speed) { + status_changed = true; + priv->old_speed = phydev->speed; + } + + if (priv->old_duplex != phydev->duplex) { + status_changed = true; + priv->old_duplex = phydev->duplex; + } + + if (priv->old_pause != phydev->pause) { + status_changed = true; + priv->old_pause = phydev->pause; + } + + /* done if nothing has changed */ + if (!status_changed) + return; + + /* speed */ + if (phydev->speed == SPEED_1000) + cmd_bits = UMAC_SPEED_1000; + else if (phydev->speed == SPEED_100) + cmd_bits = UMAC_SPEED_100; + else + cmd_bits = UMAC_SPEED_10; + cmd_bits <<= CMD_SPEED_SHIFT; + + /* duplex */ + if (phydev->duplex != DUPLEX_FULL) + cmd_bits |= CMD_HD_EN; + + /* pause capability */ + if (!phydev->pause) + cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; + + /* + * Program UMAC and RGMII block based on established + * link speed, duplex, and pause. The speed set in + * umac->cmd tell RGMII block which clock to use for + * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps). + * Receive clock is provided by the PHY. + */ + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~OOB_DISABLE; + reg |= RGMII_LINK; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | + CMD_HD_EN | + CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); + reg |= cmd_bits; + bcmgenet_umac_writel(priv, reg, UMAC_CMD); + } else { + /* done if nothing has changed */ + if (!status_changed) + return; + + /* needed for MoCA fixed PHY to reflect correct link status */ + netif_carrier_off(dev); + } + + phy_print_status(phydev); +} + +void bcmgenet_mii_reset(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (priv->phydev) { + phy_init_hw(priv->phydev); + phy_start_aneg(priv->phydev); + } +} + +void bcmgenet_phy_power_set(struct net_device *dev, bool enable) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg = 0; + + /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */ + if (!GENET_IS_V4(priv)) + return; + + reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL); + if (enable) { + reg &= ~EXT_CK25_DIS; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + + reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN); + reg |= EXT_GPHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + + reg &= ~EXT_GPHY_RESET; + } else { + reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | EXT_GPHY_RESET; + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + mdelay(1); + reg |= EXT_CK25_DIS; + } + bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); + udelay(60); +} + +static void bcmgenet_internal_phy_setup(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg; + + /* Power up PHY */ + bcmgenet_phy_power_set(dev, true); + /* enable APD */ + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + reg |= EXT_PWR_DN_EN_LD; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + bcmgenet_mii_reset(dev); +} + +static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) +{ + u32 reg; + + /* Speed settings are set in bcmgenet_mii_setup() */ + reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL); + reg |= LED_ACT_SOURCE_MAC; + bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); +} + +int bcmgenet_mii_config(struct net_device *dev, bool init) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + struct device *kdev = &priv->pdev->dev; + const char *phy_name = NULL; + u32 id_mode_dis = 0; + u32 port_ctrl; + u32 reg; + + priv->ext_phy = !phy_is_internal(priv->phydev) && + (priv->phy_interface != PHY_INTERFACE_MODE_MOCA); + + if (phy_is_internal(priv->phydev)) + priv->phy_interface = PHY_INTERFACE_MODE_NA; + + switch (priv->phy_interface) { + case PHY_INTERFACE_MODE_NA: + case PHY_INTERFACE_MODE_MOCA: + /* Irrespective of the actually configured PHY speed (100 or + * 1000) GENETv4 only has an internal GPHY so we will just end + * up masking the Gigabit features from what we support, not + * switching to the EPHY + */ + if (GENET_IS_V4(priv)) + port_ctrl = PORT_MODE_INT_GPHY; + else + port_ctrl = PORT_MODE_INT_EPHY; + + bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); + + if (phy_is_internal(priv->phydev)) { + phy_name = "internal PHY"; + bcmgenet_internal_phy_setup(dev); + } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + phy_name = "MoCA"; + bcmgenet_moca_phy_setup(priv); + } + break; + + case PHY_INTERFACE_MODE_MII: + phy_name = "external MII"; + phydev->supported &= PHY_BASIC_FEATURES; + bcmgenet_sys_writel(priv, + PORT_MODE_EXT_EPHY, SYS_PORT_CTRL); + break; + + case PHY_INTERFACE_MODE_REVMII: + phy_name = "external RvMII"; + /* of_mdiobus_register took care of reading the 'max-speed' + * PHY property for us, effectively limiting the PHY supported + * capabilities, use that knowledge to also configure the + * Reverse MII interface correctly. + */ + if ((priv->phydev->supported & PHY_BASIC_FEATURES) == + PHY_BASIC_FEATURES) + port_ctrl = PORT_MODE_EXT_RVMII_25; + else + port_ctrl = PORT_MODE_EXT_RVMII_50; + bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); + break; + + case PHY_INTERFACE_MODE_RGMII: + /* RGMII_NO_ID: TXC transitions at the same time as TXD + * (requires PCB or receiver-side delay) + * RGMII: Add 2ns delay on TXC (90 degree shift) + * + * ID is implicitly disabled for 100Mbps (RG)MII operation. + */ + id_mode_dis = BIT(16); + /* fall through */ + case PHY_INTERFACE_MODE_RGMII_TXID: + if (id_mode_dis) + phy_name = "external RGMII (no delay)"; + else + phy_name = "external RGMII (TX delay)"; + bcmgenet_sys_writel(priv, + PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); + break; + default: + dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface); + return -EINVAL; + } + + /* This is an external PHY (xMII), so we need to enable the RGMII + * block for the interface to work + */ + if (priv->ext_phy) { + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg |= RGMII_MODE_EN | id_mode_dis; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + } + + if (init) + dev_info(kdev, "configuring instance for %s\n", phy_name); + + return 0; +} + +static int bcmgenet_mii_probe(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device_node *dn = priv->pdev->dev.of_node; + struct phy_device *phydev; + u32 phy_flags; + int ret; + + /* Communicate the integrated PHY revision */ + phy_flags = priv->gphy_rev; + + /* Initialize link state variables that bcmgenet_mii_setup() uses */ + priv->old_link = -1; + priv->old_speed = -1; + priv->old_duplex = -1; + priv->old_pause = -1; + + if (dn) { + if (priv->phydev) { + pr_info("PHY already attached\n"); + return 0; + } + + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (!priv->phy_dn && of_phy_is_fixed_link(dn)) { + ret = of_phy_register_fixed_link(dn); + if (ret) + return ret; + + priv->phy_dn = of_node_get(dn); + } + + phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, + phy_flags, priv->phy_interface); + if (!phydev) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } + } else { + phydev = priv->phydev; + phydev->dev_flags = phy_flags; + + ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup, + priv->phy_interface); + if (ret) { + pr_err("could not attach to PHY\n"); + return -ENODEV; + } + } + + priv->phydev = phydev; + + /* Configure port multiplexer based on what the probed PHY device since + * reading the 'max-speed' property determines the maximum supported + * PHY speed which is needed for bcmgenet_mii_config() to configure + * things appropriately. + */ + ret = bcmgenet_mii_config(dev, true); + if (ret) { + phy_disconnect(priv->phydev); + return ret; + } + + phydev->advertising = phydev->supported; + + /* The internal PHY has its link interrupts routed to the + * Ethernet MAC ISRs + */ + if (phy_is_internal(priv->phydev)) + priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT; + else + priv->mii_bus->irq[phydev->addr] = PHY_POLL; + + pr_info("attached PHY at address %d [%s]\n", + phydev->addr, phydev->drv->name); + + return 0; +} + +static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv) +{ + struct mii_bus *bus; + + if (priv->mii_bus) + return 0; + + priv->mii_bus = mdiobus_alloc(); + if (!priv->mii_bus) { + pr_err("failed to allocate\n"); + return -ENOMEM; + } + + bus = priv->mii_bus; + bus->priv = priv->dev; + bus->name = "bcmgenet MII bus"; + bus->parent = &priv->pdev->dev; + bus->read = bcmgenet_mii_read; + bus->write = bcmgenet_mii_write; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", + priv->pdev->name, priv->pdev->id); + + bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); + if (!bus->irq) { + mdiobus_free(priv->mii_bus); + return -ENOMEM; + } + + return 0; +} + +static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + struct device *kdev = &priv->pdev->dev; + struct device_node *mdio_dn; + char *compat; + int ret; + + compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version); + if (!compat) + return -ENOMEM; + + mdio_dn = of_find_compatible_node(dn, NULL, compat); + kfree(compat); + if (!mdio_dn) { + dev_err(kdev, "unable to find MDIO bus node\n"); + return -ENODEV; + } + + ret = of_mdiobus_register(priv->mii_bus, mdio_dn); + if (ret) { + dev_err(kdev, "failed to register MDIO bus\n"); + return ret; + } + + /* Fetch the PHY phandle */ + priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0); + + /* Get the link mode */ + priv->phy_interface = of_get_phy_mode(dn); + + return 0; +} + +static int bcmgenet_fixed_phy_link_update(struct net_device *dev, + struct fixed_phy_status *status) +{ + if (dev && dev->phydev && status) + status->link = dev->phydev->link; + + return 0; +} + +static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) +{ + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_platform_data *pd = kdev->platform_data; + struct mii_bus *mdio = priv->mii_bus; + struct phy_device *phydev; + int ret; + + if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + /* + * Internal or external PHY with MDIO access + */ + if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) + mdio->phy_mask = ~(1 << pd->phy_address); + else + mdio->phy_mask = 0; + + ret = mdiobus_register(mdio); + if (ret) { + dev_err(kdev, "failed to register MDIO bus\n"); + return ret; + } + + if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) + phydev = mdio->phy_map[pd->phy_address]; + else + phydev = phy_find_first(mdio); + + if (!phydev) { + dev_err(kdev, "failed to register PHY device\n"); + mdiobus_unregister(mdio); + return -ENODEV; + } + } else { + /* + * MoCA port or no MDIO access. + * Use fixed PHY to represent the link layer. + */ + struct fixed_phy_status fphy_status = { + .link = 1, + .speed = pd->phy_speed, + .duplex = pd->phy_duplex, + .pause = 0, + .asym_pause = 0, + }; + + phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); + if (!phydev || IS_ERR(phydev)) { + dev_err(kdev, "failed to register fixed PHY device\n"); + return -ENODEV; + } + + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) { + ret = fixed_phy_set_link_update( + phydev, bcmgenet_fixed_phy_link_update); + if (!ret) + phydev->link = 0; + } + } + + priv->phydev = phydev; + priv->phy_interface = pd->phy_interface; + + return 0; +} + +static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + + if (dn) + return bcmgenet_mii_of_init(priv); + else + return bcmgenet_mii_pd_init(priv); +} + +int bcmgenet_mii_init(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret; + + ret = bcmgenet_mii_alloc(priv); + if (ret) + return ret; + + ret = bcmgenet_mii_bus_init(priv); + if (ret) + goto out_free; + + ret = bcmgenet_mii_probe(dev); + if (ret) + goto out; + + return 0; + +out: + of_node_put(priv->phy_dn); + mdiobus_unregister(priv->mii_bus); +out_free: + kfree(priv->mii_bus->irq); + mdiobus_free(priv->mii_bus); + return ret; +} + +void bcmgenet_mii_exit(struct net_device *dev) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + of_node_put(priv->phy_dn); + mdiobus_unregister(priv->mii_bus); + kfree(priv->mii_bus->irq); + mdiobus_free(priv->mii_bus); +} diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c new file mode 100644 index 000000000..ac27e2426 --- /dev/null +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -0,0 +1,2670 @@ +/* + * Copyright (C) 2001,2002,2003,2004 Broadcom Corporation + * Copyright (c) 2006, 2007 Maciej W. Rozycki + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * + * This driver is designed for the Broadcom SiByte SOC built-in + * Ethernet controllers. Written by Mitch Lichtenberg at Broadcom Corp. + * + * Updated to the driver model and the PHY abstraction layer + * by Maciej W. Rozycki. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include /* Processor type for cache alignment. */ + +/* Operational parameters that usually are not changed. */ + +#define CONFIG_SBMAC_COALESCE + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (2*HZ) + + +MODULE_AUTHOR("Mitch Lichtenberg (Broadcom Corp.)"); +MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver"); + +/* A few user-configurable values which may be modified when a driver + module is loaded. */ + +/* 1 normal messages, 0 quiet .. 7 verbose. */ +static int debug = 1; +module_param(debug, int, S_IRUGO); +MODULE_PARM_DESC(debug, "Debug messages"); + +#ifdef CONFIG_SBMAC_COALESCE +static int int_pktcnt_tx = 255; +module_param(int_pktcnt_tx, int, S_IRUGO); +MODULE_PARM_DESC(int_pktcnt_tx, "TX packet count"); + +static int int_timeout_tx = 255; +module_param(int_timeout_tx, int, S_IRUGO); +MODULE_PARM_DESC(int_timeout_tx, "TX timeout value"); + +static int int_pktcnt_rx = 64; +module_param(int_pktcnt_rx, int, S_IRUGO); +MODULE_PARM_DESC(int_pktcnt_rx, "RX packet count"); + +static int int_timeout_rx = 64; +module_param(int_timeout_rx, int, S_IRUGO); +MODULE_PARM_DESC(int_timeout_rx, "RX timeout value"); +#endif + +#include +#include +#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) +#include +#include +#define R_MAC_DMA_OODPKTLOST_RX R_MAC_DMA_OODPKTLOST +#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) +#include +#include +#else +#error invalid SiByte MAC configuration +#endif +#include +#include +#include + +#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) +#define UNIT_INT(n) (K_BCM1480_INT_MAC_0 + ((n) * 2)) +#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) +#define UNIT_INT(n) (K_INT_MAC_0 + (n)) +#else +#error invalid SiByte MAC configuration +#endif + +#ifdef K_INT_PHY +#define SBMAC_PHY_INT K_INT_PHY +#else +#define SBMAC_PHY_INT PHY_POLL +#endif + +/********************************************************************** + * Simple types + ********************************************************************* */ + +enum sbmac_speed { + sbmac_speed_none = 0, + sbmac_speed_10 = SPEED_10, + sbmac_speed_100 = SPEED_100, + sbmac_speed_1000 = SPEED_1000, +}; + +enum sbmac_duplex { + sbmac_duplex_none = -1, + sbmac_duplex_half = DUPLEX_HALF, + sbmac_duplex_full = DUPLEX_FULL, +}; + +enum sbmac_fc { + sbmac_fc_none, + sbmac_fc_disabled, + sbmac_fc_frame, + sbmac_fc_collision, + sbmac_fc_carrier, +}; + +enum sbmac_state { + sbmac_state_uninit, + sbmac_state_off, + sbmac_state_on, + sbmac_state_broken, +}; + + +/********************************************************************** + * Macros + ********************************************************************* */ + + +#define SBDMA_NEXTBUF(d,f) ((((d)->f+1) == (d)->sbdma_dscrtable_end) ? \ + (d)->sbdma_dscrtable : (d)->f+1) + + +#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES) + +#define SBMAC_MAX_TXDESCR 256 +#define SBMAC_MAX_RXDESCR 256 + +#define ENET_PACKET_SIZE 1518 +/*#define ENET_PACKET_SIZE 9216 */ + +/********************************************************************** + * DMA Descriptor structure + ********************************************************************* */ + +struct sbdmadscr { + uint64_t dscr_a; + uint64_t dscr_b; +}; + +/********************************************************************** + * DMA Controller structure + ********************************************************************* */ + +struct sbmacdma { + + /* + * This stuff is used to identify the channel and the registers + * associated with it. + */ + struct sbmac_softc *sbdma_eth; /* back pointer to associated + MAC */ + int sbdma_channel; /* channel number */ + int sbdma_txdir; /* direction (1=transmit) */ + int sbdma_maxdescr; /* total # of descriptors + in ring */ +#ifdef CONFIG_SBMAC_COALESCE + int sbdma_int_pktcnt; + /* # descriptors rx/tx + before interrupt */ + int sbdma_int_timeout; + /* # usec rx/tx interrupt */ +#endif + void __iomem *sbdma_config0; /* DMA config register 0 */ + void __iomem *sbdma_config1; /* DMA config register 1 */ + void __iomem *sbdma_dscrbase; + /* descriptor base address */ + void __iomem *sbdma_dscrcnt; /* descriptor count register */ + void __iomem *sbdma_curdscr; /* current descriptor + address */ + void __iomem *sbdma_oodpktlost; + /* pkt drop (rx only) */ + + /* + * This stuff is for maintenance of the ring + */ + void *sbdma_dscrtable_unaligned; + struct sbdmadscr *sbdma_dscrtable; + /* base of descriptor table */ + struct sbdmadscr *sbdma_dscrtable_end; + /* end of descriptor table */ + struct sk_buff **sbdma_ctxtable; + /* context table, one + per descr */ + dma_addr_t sbdma_dscrtable_phys; + /* and also the phys addr */ + struct sbdmadscr *sbdma_addptr; /* next dscr for sw to add */ + struct sbdmadscr *sbdma_remptr; /* next dscr for sw + to remove */ +}; + + +/********************************************************************** + * Ethernet softc structure + ********************************************************************* */ + +struct sbmac_softc { + + /* + * Linux-specific things + */ + struct net_device *sbm_dev; /* pointer to linux device */ + struct napi_struct napi; + struct phy_device *phy_dev; /* the associated PHY device */ + struct mii_bus *mii_bus; /* the MII bus */ + int phy_irq[PHY_MAX_ADDR]; + spinlock_t sbm_lock; /* spin lock */ + int sbm_devflags; /* current device flags */ + + /* + * Controller-specific things + */ + void __iomem *sbm_base; /* MAC's base address */ + enum sbmac_state sbm_state; /* current state */ + + void __iomem *sbm_macenable; /* MAC Enable Register */ + void __iomem *sbm_maccfg; /* MAC Config Register */ + void __iomem *sbm_fifocfg; /* FIFO Config Register */ + void __iomem *sbm_framecfg; /* Frame Config Register */ + void __iomem *sbm_rxfilter; /* Receive Filter Register */ + void __iomem *sbm_isr; /* Interrupt Status Register */ + void __iomem *sbm_imr; /* Interrupt Mask Register */ + void __iomem *sbm_mdio; /* MDIO Register */ + + enum sbmac_speed sbm_speed; /* current speed */ + enum sbmac_duplex sbm_duplex; /* current duplex */ + enum sbmac_fc sbm_fc; /* cur. flow control setting */ + int sbm_pause; /* current pause setting */ + int sbm_link; /* current link state */ + + unsigned char sbm_hwaddr[ETH_ALEN]; + + struct sbmacdma sbm_txdma; /* only channel 0 for now */ + struct sbmacdma sbm_rxdma; + int rx_hw_checksum; + int sbe_idx; +}; + + +/********************************************************************** + * Externs + ********************************************************************* */ + +/********************************************************************** + * Prototypes + ********************************************************************* */ + +static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, + int txrx, int maxdescr); +static void sbdma_channel_start(struct sbmacdma *d, int rxtx); +static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, + struct sk_buff *m); +static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); +static void sbdma_emptyring(struct sbmacdma *d); +static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d); +static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, + int work_to_do, int poll); +static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, + int poll); +static int sbmac_initctx(struct sbmac_softc *s); +static void sbmac_channel_start(struct sbmac_softc *s); +static void sbmac_channel_stop(struct sbmac_softc *s); +static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *, + enum sbmac_state); +static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff); +static uint64_t sbmac_addr2reg(unsigned char *ptr); +static irqreturn_t sbmac_intr(int irq, void *dev_instance); +static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev); +static void sbmac_setmulti(struct sbmac_softc *sc); +static int sbmac_init(struct platform_device *pldev, long long base); +static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed); +static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex, + enum sbmac_fc fc); + +static int sbmac_open(struct net_device *dev); +static void sbmac_tx_timeout (struct net_device *dev); +static void sbmac_set_rx_mode(struct net_device *dev); +static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int sbmac_close(struct net_device *dev); +static int sbmac_poll(struct napi_struct *napi, int budget); + +static void sbmac_mii_poll(struct net_device *dev); +static int sbmac_mii_probe(struct net_device *dev); + +static void sbmac_mii_sync(void __iomem *sbm_mdio); +static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data, + int bitcnt); +static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx); +static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx, + u16 val); + + +/********************************************************************** + * Globals + ********************************************************************* */ + +static char sbmac_string[] = "sb1250-mac"; + +static char sbmac_mdio_string[] = "sb1250-mac-mdio"; + + +/********************************************************************** + * MDIO constants + ********************************************************************* */ + +#define MII_COMMAND_START 0x01 +#define MII_COMMAND_READ 0x02 +#define MII_COMMAND_WRITE 0x01 +#define MII_COMMAND_ACK 0x02 + +#define M_MAC_MDIO_DIR_OUTPUT 0 /* for clarity */ + +#define ENABLE 1 +#define DISABLE 0 + +/********************************************************************** + * SBMAC_MII_SYNC(sbm_mdio) + * + * Synchronize with the MII - send a pattern of bits to the MII + * that will guarantee that it is ready to accept a command. + * + * Input parameters: + * sbm_mdio - address of the MAC's MDIO register + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbmac_mii_sync(void __iomem *sbm_mdio) +{ + int cnt; + uint64_t bits; + int mac_mdio_genc; + + mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; + + bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT; + + __raw_writeq(bits | mac_mdio_genc, sbm_mdio); + + for (cnt = 0; cnt < 32; cnt++) { + __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio); + __raw_writeq(bits | mac_mdio_genc, sbm_mdio); + } +} + +/********************************************************************** + * SBMAC_MII_SENDDATA(sbm_mdio, data, bitcnt) + * + * Send some bits to the MII. The bits to be sent are right- + * justified in the 'data' parameter. + * + * Input parameters: + * sbm_mdio - address of the MAC's MDIO register + * data - data to send + * bitcnt - number of bits to send + ********************************************************************* */ + +static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data, + int bitcnt) +{ + int i; + uint64_t bits; + unsigned int curmask; + int mac_mdio_genc; + + mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; + + bits = M_MAC_MDIO_DIR_OUTPUT; + __raw_writeq(bits | mac_mdio_genc, sbm_mdio); + + curmask = 1 << (bitcnt - 1); + + for (i = 0; i < bitcnt; i++) { + if (data & curmask) + bits |= M_MAC_MDIO_OUT; + else bits &= ~M_MAC_MDIO_OUT; + __raw_writeq(bits | mac_mdio_genc, sbm_mdio); + __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio); + __raw_writeq(bits | mac_mdio_genc, sbm_mdio); + curmask >>= 1; + } +} + + + +/********************************************************************** + * SBMAC_MII_READ(bus, phyaddr, regidx) + * Read a PHY register. + * + * Input parameters: + * bus - MDIO bus handle + * phyaddr - PHY's address + * regnum - index of register to read + * + * Return value: + * value read, or 0xffff if an error occurred. + ********************************************************************* */ + +static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx) +{ + struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv; + void __iomem *sbm_mdio = sc->sbm_mdio; + int idx; + int error; + int regval; + int mac_mdio_genc; + + /* + * Synchronize ourselves so that the PHY knows the next + * thing coming down is a command + */ + sbmac_mii_sync(sbm_mdio); + + /* + * Send the data to the PHY. The sequence is + * a "start" command (2 bits) + * a "read" command (2 bits) + * the PHY addr (5 bits) + * the register index (5 bits) + */ + sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2); + sbmac_mii_senddata(sbm_mdio, MII_COMMAND_READ, 2); + sbmac_mii_senddata(sbm_mdio, phyaddr, 5); + sbmac_mii_senddata(sbm_mdio, regidx, 5); + + mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; + + /* + * Switch the port around without a clock transition. + */ + __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); + + /* + * Send out a clock pulse to signal we want the status + */ + __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, + sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); + + /* + * If an error occurred, the PHY will signal '1' back + */ + error = __raw_readq(sbm_mdio) & M_MAC_MDIO_IN; + + /* + * Issue an 'idle' clock pulse, but keep the direction + * the same. + */ + __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, + sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); + + regval = 0; + + for (idx = 0; idx < 16; idx++) { + regval <<= 1; + + if (error == 0) { + if (__raw_readq(sbm_mdio) & M_MAC_MDIO_IN) + regval |= 1; + } + + __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, + sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); + } + + /* Switch back to output */ + __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio); + + if (error == 0) + return regval; + return 0xffff; +} + + +/********************************************************************** + * SBMAC_MII_WRITE(bus, phyaddr, regidx, regval) + * + * Write a value to a PHY register. + * + * Input parameters: + * bus - MDIO bus handle + * phyaddr - PHY to use + * regidx - register within the PHY + * regval - data to write to register + * + * Return value: + * 0 for success + ********************************************************************* */ + +static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx, + u16 regval) +{ + struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv; + void __iomem *sbm_mdio = sc->sbm_mdio; + int mac_mdio_genc; + + sbmac_mii_sync(sbm_mdio); + + sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2); + sbmac_mii_senddata(sbm_mdio, MII_COMMAND_WRITE, 2); + sbmac_mii_senddata(sbm_mdio, phyaddr, 5); + sbmac_mii_senddata(sbm_mdio, regidx, 5); + sbmac_mii_senddata(sbm_mdio, MII_COMMAND_ACK, 2); + sbmac_mii_senddata(sbm_mdio, regval, 16); + + mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; + + __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio); + + return 0; +} + + + +/********************************************************************** + * SBDMA_INITCTX(d,s,chan,txrx,maxdescr) + * + * Initialize a DMA channel context. Since there are potentially + * eight DMA channels per MAC, it's nice to do this in a standard + * way. + * + * Input parameters: + * d - struct sbmacdma (DMA channel context) + * s - struct sbmac_softc (pointer to a MAC) + * chan - channel number (0..1 right now) + * txrx - Identifies DMA_TX or DMA_RX for channel direction + * maxdescr - number of descriptors + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, + int txrx, int maxdescr) +{ +#ifdef CONFIG_SBMAC_COALESCE + int int_pktcnt, int_timeout; +#endif + + /* + * Save away interesting stuff in the structure + */ + + d->sbdma_eth = s; + d->sbdma_channel = chan; + d->sbdma_txdir = txrx; + +#if 0 + /* RMON clearing */ + s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING; +#endif + + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BYTES); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_COLLISIONS); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_LATE_COL); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_EX_COL); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_FCS_ERROR); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_ABORT); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BAD); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_GOOD); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_RUNT); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_OVERSIZE); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BYTES); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_MCAST); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BCAST); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BAD); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_GOOD); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_RUNT); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_OVERSIZE); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_FCS_ERROR); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_LENGTH_ERROR); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_CODE_ERROR); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_ALIGN_ERROR); + + /* + * initialize register pointers + */ + + d->sbdma_config0 = + s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0); + d->sbdma_config1 = + s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1); + d->sbdma_dscrbase = + s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE); + d->sbdma_dscrcnt = + s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT); + d->sbdma_curdscr = + s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR); + if (d->sbdma_txdir) + d->sbdma_oodpktlost = NULL; + else + d->sbdma_oodpktlost = + s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_OODPKTLOST_RX); + + /* + * Allocate memory for the ring + */ + + d->sbdma_maxdescr = maxdescr; + + d->sbdma_dscrtable_unaligned = kcalloc(d->sbdma_maxdescr + 1, + sizeof(*d->sbdma_dscrtable), + GFP_KERNEL); + + /* + * The descriptor table must be aligned to at least 16 bytes or the + * MAC will corrupt it. + */ + d->sbdma_dscrtable = (struct sbdmadscr *) + ALIGN((unsigned long)d->sbdma_dscrtable_unaligned, + sizeof(*d->sbdma_dscrtable)); + + d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr; + + d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable); + + /* + * And context table + */ + + d->sbdma_ctxtable = kcalloc(d->sbdma_maxdescr, + sizeof(*d->sbdma_ctxtable), GFP_KERNEL); + +#ifdef CONFIG_SBMAC_COALESCE + /* + * Setup Rx/Tx DMA coalescing defaults + */ + + int_pktcnt = (txrx == DMA_TX) ? int_pktcnt_tx : int_pktcnt_rx; + if ( int_pktcnt ) { + d->sbdma_int_pktcnt = int_pktcnt; + } else { + d->sbdma_int_pktcnt = 1; + } + + int_timeout = (txrx == DMA_TX) ? int_timeout_tx : int_timeout_rx; + if ( int_timeout ) { + d->sbdma_int_timeout = int_timeout; + } else { + d->sbdma_int_timeout = 0; + } +#endif + +} + +/********************************************************************** + * SBDMA_CHANNEL_START(d) + * + * Initialize the hardware registers for a DMA channel. + * + * Input parameters: + * d - DMA channel to init (context must be previously init'd + * rxtx - DMA_RX or DMA_TX depending on what type of channel + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbdma_channel_start(struct sbmacdma *d, int rxtx) +{ + /* + * Turn on the DMA channel + */ + +#ifdef CONFIG_SBMAC_COALESCE + __raw_writeq(V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) | + 0, d->sbdma_config1); + __raw_writeq(M_DMA_EOP_INT_EN | + V_DMA_RINGSZ(d->sbdma_maxdescr) | + V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) | + 0, d->sbdma_config0); +#else + __raw_writeq(0, d->sbdma_config1); + __raw_writeq(V_DMA_RINGSZ(d->sbdma_maxdescr) | + 0, d->sbdma_config0); +#endif + + __raw_writeq(d->sbdma_dscrtable_phys, d->sbdma_dscrbase); + + /* + * Initialize ring pointers + */ + + d->sbdma_addptr = d->sbdma_dscrtable; + d->sbdma_remptr = d->sbdma_dscrtable; +} + +/********************************************************************** + * SBDMA_CHANNEL_STOP(d) + * + * Initialize the hardware registers for a DMA channel. + * + * Input parameters: + * d - DMA channel to init (context must be previously init'd + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbdma_channel_stop(struct sbmacdma *d) +{ + /* + * Turn off the DMA channel + */ + + __raw_writeq(0, d->sbdma_config1); + + __raw_writeq(0, d->sbdma_dscrbase); + + __raw_writeq(0, d->sbdma_config0); + + /* + * Zero ring pointers + */ + + d->sbdma_addptr = NULL; + d->sbdma_remptr = NULL; +} + +static inline void sbdma_align_skb(struct sk_buff *skb, + unsigned int power2, unsigned int offset) +{ + unsigned char *addr = skb->data; + unsigned char *newaddr = PTR_ALIGN(addr, power2); + + skb_reserve(skb, newaddr - addr + offset); +} + + +/********************************************************************** + * SBDMA_ADD_RCVBUFFER(d,sb) + * + * Add a buffer to the specified DMA channel. For receive channels, + * this queues a buffer for inbound packets. + * + * Input parameters: + * sc - softc structure + * d - DMA channel descriptor + * sb - sk_buff to add, or NULL if we should allocate one + * + * Return value: + * 0 if buffer could not be added (ring is full) + * 1 if buffer added successfully + ********************************************************************* */ + + +static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, + struct sk_buff *sb) +{ + struct net_device *dev = sc->sbm_dev; + struct sbdmadscr *dsc; + struct sbdmadscr *nextdsc; + struct sk_buff *sb_new = NULL; + int pktsize = ENET_PACKET_SIZE; + + /* get pointer to our current place in the ring */ + + dsc = d->sbdma_addptr; + nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr); + + /* + * figure out if the ring is full - if the next descriptor + * is the same as the one that we're going to remove from + * the ring, the ring is full + */ + + if (nextdsc == d->sbdma_remptr) { + return -ENOSPC; + } + + /* + * Allocate a sk_buff if we don't already have one. + * If we do have an sk_buff, reset it so that it's empty. + * + * Note: sk_buffs don't seem to be guaranteed to have any sort + * of alignment when they are allocated. Therefore, allocate enough + * extra space to make sure that: + * + * 1. the data does not start in the middle of a cache line. + * 2. The data does not end in the middle of a cache line + * 3. The buffer can be aligned such that the IP addresses are + * naturally aligned. + * + * Remember, the SOCs MAC writes whole cache lines at a time, + * without reading the old contents first. So, if the sk_buff's + * data portion starts in the middle of a cache line, the SOC + * DMA will trash the beginning (and ending) portions. + */ + + if (sb == NULL) { + sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE + + SMP_CACHE_BYTES * 2 + + NET_IP_ALIGN); + if (sb_new == NULL) + return -ENOBUFS; + + sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN); + } + else { + sb_new = sb; + /* + * nothing special to reinit buffer, it's already aligned + * and sb->data already points to a good place. + */ + } + + /* + * fill in the descriptor + */ + +#ifdef CONFIG_SBMAC_COALESCE + /* + * Do not interrupt per DMA transfer. + */ + dsc->dscr_a = virt_to_phys(sb_new->data) | + V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0; +#else + dsc->dscr_a = virt_to_phys(sb_new->data) | + V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | + M_DMA_DSCRA_INTERRUPT; +#endif + + /* receiving: no options */ + dsc->dscr_b = 0; + + /* + * fill in the context + */ + + d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new; + + /* + * point at next packet + */ + + d->sbdma_addptr = nextdsc; + + /* + * Give the buffer to the DMA engine. + */ + + __raw_writeq(1, d->sbdma_dscrcnt); + + return 0; /* we did it */ +} + +/********************************************************************** + * SBDMA_ADD_TXBUFFER(d,sb) + * + * Add a transmit buffer to the specified DMA channel, causing a + * transmit to start. + * + * Input parameters: + * d - DMA channel descriptor + * sb - sk_buff to add + * + * Return value: + * 0 transmit queued successfully + * otherwise error code + ********************************************************************* */ + + +static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *sb) +{ + struct sbdmadscr *dsc; + struct sbdmadscr *nextdsc; + uint64_t phys; + uint64_t ncb; + int length; + + /* get pointer to our current place in the ring */ + + dsc = d->sbdma_addptr; + nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr); + + /* + * figure out if the ring is full - if the next descriptor + * is the same as the one that we're going to remove from + * the ring, the ring is full + */ + + if (nextdsc == d->sbdma_remptr) { + return -ENOSPC; + } + + /* + * Under Linux, it's not necessary to copy/coalesce buffers + * like it is on NetBSD. We think they're all contiguous, + * but that may not be true for GBE. + */ + + length = sb->len; + + /* + * fill in the descriptor. Note that the number of cache + * blocks in the descriptor is the number of blocks + * *spanned*, so we need to add in the offset (if any) + * while doing the calculation. + */ + + phys = virt_to_phys(sb->data); + ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1))); + + dsc->dscr_a = phys | + V_DMA_DSCRA_A_SIZE(ncb) | +#ifndef CONFIG_SBMAC_COALESCE + M_DMA_DSCRA_INTERRUPT | +#endif + M_DMA_ETHTX_SOP; + + /* transmitting: set outbound options and length */ + + dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) | + V_DMA_DSCRB_PKT_SIZE(length); + + /* + * fill in the context + */ + + d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb; + + /* + * point at next packet + */ + + d->sbdma_addptr = nextdsc; + + /* + * Give the buffer to the DMA engine. + */ + + __raw_writeq(1, d->sbdma_dscrcnt); + + return 0; /* we did it */ +} + + + + +/********************************************************************** + * SBDMA_EMPTYRING(d) + * + * Free all allocated sk_buffs on the specified DMA channel; + * + * Input parameters: + * d - DMA channel + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbdma_emptyring(struct sbmacdma *d) +{ + int idx; + struct sk_buff *sb; + + for (idx = 0; idx < d->sbdma_maxdescr; idx++) { + sb = d->sbdma_ctxtable[idx]; + if (sb) { + dev_kfree_skb(sb); + d->sbdma_ctxtable[idx] = NULL; + } + } +} + + +/********************************************************************** + * SBDMA_FILLRING(d) + * + * Fill the specified DMA channel (must be receive channel) + * with sk_buffs + * + * Input parameters: + * sc - softc structure + * d - DMA channel + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d) +{ + int idx; + + for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) { + if (sbdma_add_rcvbuffer(sc, d, NULL) != 0) + break; + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void sbmac_netpoll(struct net_device *netdev) +{ + struct sbmac_softc *sc = netdev_priv(netdev); + int irq = sc->sbm_dev->irq; + + __raw_writeq(0, sc->sbm_imr); + + sbmac_intr(irq, netdev); + +#ifdef CONFIG_SBMAC_COALESCE + __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | + ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), + sc->sbm_imr); +#else + __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | + (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr); +#endif +} +#endif + +/********************************************************************** + * SBDMA_RX_PROCESS(sc,d,work_to_do,poll) + * + * Process "completed" receive buffers on the specified DMA channel. + * + * Input parameters: + * sc - softc structure + * d - DMA channel context + * work_to_do - no. of packets to process before enabling interrupt + * again (for NAPI) + * poll - 1: using polling (for NAPI) + * + * Return value: + * nothing + ********************************************************************* */ + +static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, + int work_to_do, int poll) +{ + struct net_device *dev = sc->sbm_dev; + int curidx; + int hwidx; + struct sbdmadscr *dsc; + struct sk_buff *sb; + int len; + int work_done = 0; + int dropped = 0; + + prefetch(d); + +again: + /* Check if the HW dropped any frames */ + dev->stats.rx_fifo_errors + += __raw_readq(sc->sbm_rxdma.sbdma_oodpktlost) & 0xffff; + __raw_writeq(0, sc->sbm_rxdma.sbdma_oodpktlost); + + while (work_to_do-- > 0) { + /* + * figure out where we are (as an index) and where + * the hardware is (also as an index) + * + * This could be done faster if (for example) the + * descriptor table was page-aligned and contiguous in + * both virtual and physical memory -- you could then + * just compare the low-order bits of the virtual address + * (sbdma_remptr) and the physical address (sbdma_curdscr CSR) + */ + + dsc = d->sbdma_remptr; + curidx = dsc - d->sbdma_dscrtable; + + prefetch(dsc); + prefetch(&d->sbdma_ctxtable[curidx]); + + hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - + d->sbdma_dscrtable_phys) / + sizeof(*d->sbdma_dscrtable); + + /* + * If they're the same, that means we've processed all + * of the descriptors up to (but not including) the one that + * the hardware is working on right now. + */ + + if (curidx == hwidx) + goto done; + + /* + * Otherwise, get the packet's sk_buff ptr back + */ + + sb = d->sbdma_ctxtable[curidx]; + d->sbdma_ctxtable[curidx] = NULL; + + len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4; + + /* + * Check packet status. If good, process it. + * If not, silently drop it and put it back on the + * receive ring. + */ + + if (likely (!(dsc->dscr_a & M_DMA_ETHRX_BAD))) { + + /* + * Add a new buffer to replace the old one. If we fail + * to allocate a buffer, we're going to drop this + * packet and put it right back on the receive ring. + */ + + if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) == + -ENOBUFS)) { + dev->stats.rx_dropped++; + /* Re-add old buffer */ + sbdma_add_rcvbuffer(sc, d, sb); + /* No point in continuing at the moment */ + printk(KERN_ERR "dropped packet (1)\n"); + d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); + goto done; + } else { + /* + * Set length into the packet + */ + skb_put(sb,len); + + /* + * Buffer has been replaced on the + * receive ring. Pass the buffer to + * the kernel + */ + sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev); + /* Check hw IPv4/TCP checksum if supported */ + if (sc->rx_hw_checksum == ENABLE) { + if (!((dsc->dscr_a) & M_DMA_ETHRX_BADIP4CS) && + !((dsc->dscr_a) & M_DMA_ETHRX_BADTCPCS)) { + sb->ip_summed = CHECKSUM_UNNECESSARY; + /* don't need to set sb->csum */ + } else { + skb_checksum_none_assert(sb); + } + } + prefetch(sb->data); + prefetch((const void *)(((char *)sb->data)+32)); + if (poll) + dropped = netif_receive_skb(sb); + else + dropped = netif_rx(sb); + + if (dropped == NET_RX_DROP) { + dev->stats.rx_dropped++; + d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); + goto done; + } + else { + dev->stats.rx_bytes += len; + dev->stats.rx_packets++; + } + } + } else { + /* + * Packet was mangled somehow. Just drop it and + * put it back on the receive ring. + */ + dev->stats.rx_errors++; + sbdma_add_rcvbuffer(sc, d, sb); + } + + + /* + * .. and advance to the next buffer. + */ + + d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); + work_done++; + } + if (!poll) { + work_to_do = 32; + goto again; /* collect fifo drop statistics again */ + } +done: + return work_done; +} + +/********************************************************************** + * SBDMA_TX_PROCESS(sc,d) + * + * Process "completed" transmit buffers on the specified DMA channel. + * This is normally called within the interrupt service routine. + * Note that this isn't really ideal for priority channels, since + * it processes all of the packets on a given channel before + * returning. + * + * Input parameters: + * sc - softc structure + * d - DMA channel context + * poll - 1: using polling (for NAPI) + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, + int poll) +{ + struct net_device *dev = sc->sbm_dev; + int curidx; + int hwidx; + struct sbdmadscr *dsc; + struct sk_buff *sb; + unsigned long flags; + int packets_handled = 0; + + spin_lock_irqsave(&(sc->sbm_lock), flags); + + if (d->sbdma_remptr == d->sbdma_addptr) + goto end_unlock; + + hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - + d->sbdma_dscrtable_phys) / sizeof(*d->sbdma_dscrtable); + + for (;;) { + /* + * figure out where we are (as an index) and where + * the hardware is (also as an index) + * + * This could be done faster if (for example) the + * descriptor table was page-aligned and contiguous in + * both virtual and physical memory -- you could then + * just compare the low-order bits of the virtual address + * (sbdma_remptr) and the physical address (sbdma_curdscr CSR) + */ + + curidx = d->sbdma_remptr - d->sbdma_dscrtable; + + /* + * If they're the same, that means we've processed all + * of the descriptors up to (but not including) the one that + * the hardware is working on right now. + */ + + if (curidx == hwidx) + break; + + /* + * Otherwise, get the packet's sk_buff ptr back + */ + + dsc = &(d->sbdma_dscrtable[curidx]); + sb = d->sbdma_ctxtable[curidx]; + d->sbdma_ctxtable[curidx] = NULL; + + /* + * Stats + */ + + dev->stats.tx_bytes += sb->len; + dev->stats.tx_packets++; + + /* + * for transmits, we just free buffers. + */ + + dev_kfree_skb_irq(sb); + + /* + * .. and advance to the next buffer. + */ + + d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); + + packets_handled++; + + } + + /* + * Decide if we should wake up the protocol or not. + * Other drivers seem to do this when we reach a low + * watermark on the transmit queue. + */ + + if (packets_handled) + netif_wake_queue(d->sbdma_eth->sbm_dev); + +end_unlock: + spin_unlock_irqrestore(&(sc->sbm_lock), flags); + +} + + + +/********************************************************************** + * SBMAC_INITCTX(s) + * + * Initialize an Ethernet context structure - this is called + * once per MAC on the 1250. Memory is allocated here, so don't + * call it again from inside the ioctl routines that bring the + * interface up/down + * + * Input parameters: + * s - sbmac context structure + * + * Return value: + * 0 + ********************************************************************* */ + +static int sbmac_initctx(struct sbmac_softc *s) +{ + + /* + * figure out the addresses of some ports + */ + + s->sbm_macenable = s->sbm_base + R_MAC_ENABLE; + s->sbm_maccfg = s->sbm_base + R_MAC_CFG; + s->sbm_fifocfg = s->sbm_base + R_MAC_THRSH_CFG; + s->sbm_framecfg = s->sbm_base + R_MAC_FRAMECFG; + s->sbm_rxfilter = s->sbm_base + R_MAC_ADFILTER_CFG; + s->sbm_isr = s->sbm_base + R_MAC_STATUS; + s->sbm_imr = s->sbm_base + R_MAC_INT_MASK; + s->sbm_mdio = s->sbm_base + R_MAC_MDIO; + + /* + * Initialize the DMA channels. Right now, only one per MAC is used + * Note: Only do this _once_, as it allocates memory from the kernel! + */ + + sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR); + sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR); + + /* + * initial state is OFF + */ + + s->sbm_state = sbmac_state_off; + + return 0; +} + + +static void sbdma_uninitctx(struct sbmacdma *d) +{ + if (d->sbdma_dscrtable_unaligned) { + kfree(d->sbdma_dscrtable_unaligned); + d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL; + } + + if (d->sbdma_ctxtable) { + kfree(d->sbdma_ctxtable); + d->sbdma_ctxtable = NULL; + } +} + + +static void sbmac_uninitctx(struct sbmac_softc *sc) +{ + sbdma_uninitctx(&(sc->sbm_txdma)); + sbdma_uninitctx(&(sc->sbm_rxdma)); +} + + +/********************************************************************** + * SBMAC_CHANNEL_START(s) + * + * Start packet processing on this MAC. + * + * Input parameters: + * s - sbmac structure + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbmac_channel_start(struct sbmac_softc *s) +{ + uint64_t reg; + void __iomem *port; + uint64_t cfg,fifo,framecfg; + int idx, th_value; + + /* + * Don't do this if running + */ + + if (s->sbm_state == sbmac_state_on) + return; + + /* + * Bring the controller out of reset, but leave it off. + */ + + __raw_writeq(0, s->sbm_macenable); + + /* + * Ignore all received packets + */ + + __raw_writeq(0, s->sbm_rxfilter); + + /* + * Calculate values for various control registers. + */ + + cfg = M_MAC_RETRY_EN | + M_MAC_TX_HOLD_SOP_EN | + V_MAC_TX_PAUSE_CNT_16K | + M_MAC_AP_STAT_EN | + M_MAC_FAST_SYNC | + M_MAC_SS_EN | + 0; + + /* + * Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars + * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above + * Use a larger RD_THRSH for gigabit + */ + if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) + th_value = 28; + else + th_value = 64; + + fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */ + ((s->sbm_speed == sbmac_speed_1000) + ? V_MAC_TX_RD_THRSH(th_value) : V_MAC_TX_RD_THRSH(4)) | + V_MAC_TX_RL_THRSH(4) | + V_MAC_RX_PL_THRSH(4) | + V_MAC_RX_RD_THRSH(4) | /* Must be '4' */ + V_MAC_RX_RL_THRSH(8) | + 0; + + framecfg = V_MAC_MIN_FRAMESZ_DEFAULT | + V_MAC_MAX_FRAMESZ_DEFAULT | + V_MAC_BACKOFF_SEL(1); + + /* + * Clear out the hash address map + */ + + port = s->sbm_base + R_MAC_HASH_BASE; + for (idx = 0; idx < MAC_HASH_COUNT; idx++) { + __raw_writeq(0, port); + port += sizeof(uint64_t); + } + + /* + * Clear out the exact-match table + */ + + port = s->sbm_base + R_MAC_ADDR_BASE; + for (idx = 0; idx < MAC_ADDR_COUNT; idx++) { + __raw_writeq(0, port); + port += sizeof(uint64_t); + } + + /* + * Clear out the DMA Channel mapping table registers + */ + + port = s->sbm_base + R_MAC_CHUP0_BASE; + for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) { + __raw_writeq(0, port); + port += sizeof(uint64_t); + } + + + port = s->sbm_base + R_MAC_CHLO0_BASE; + for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) { + __raw_writeq(0, port); + port += sizeof(uint64_t); + } + + /* + * Program the hardware address. It goes into the hardware-address + * register as well as the first filter register. + */ + + reg = sbmac_addr2reg(s->sbm_hwaddr); + + port = s->sbm_base + R_MAC_ADDR_BASE; + __raw_writeq(reg, port); + port = s->sbm_base + R_MAC_ETHERNET_ADDR; + +#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS + /* + * Pass1 SOCs do not receive packets addressed to the + * destination address in the R_MAC_ETHERNET_ADDR register. + * Set the value to zero. + */ + __raw_writeq(0, port); +#else + __raw_writeq(reg, port); +#endif + + /* + * Set the receive filter for no packets, and write values + * to the various config registers + */ + + __raw_writeq(0, s->sbm_rxfilter); + __raw_writeq(0, s->sbm_imr); + __raw_writeq(framecfg, s->sbm_framecfg); + __raw_writeq(fifo, s->sbm_fifocfg); + __raw_writeq(cfg, s->sbm_maccfg); + + /* + * Initialize DMA channels (rings should be ok now) + */ + + sbdma_channel_start(&(s->sbm_rxdma), DMA_RX); + sbdma_channel_start(&(s->sbm_txdma), DMA_TX); + + /* + * Configure the speed, duplex, and flow control + */ + + sbmac_set_speed(s,s->sbm_speed); + sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc); + + /* + * Fill the receive ring + */ + + sbdma_fillring(s, &(s->sbm_rxdma)); + + /* + * Turn on the rest of the bits in the enable register + */ + +#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) + __raw_writeq(M_MAC_RXDMA_EN0 | + M_MAC_TXDMA_EN0, s->sbm_macenable); +#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X) + __raw_writeq(M_MAC_RXDMA_EN0 | + M_MAC_TXDMA_EN0 | + M_MAC_RX_ENABLE | + M_MAC_TX_ENABLE, s->sbm_macenable); +#else +#error invalid SiByte MAC configuration +#endif + +#ifdef CONFIG_SBMAC_COALESCE + __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | + ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr); +#else + __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | + (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr); +#endif + + /* + * Enable receiving unicasts and broadcasts + */ + + __raw_writeq(M_MAC_UCAST_EN | M_MAC_BCAST_EN, s->sbm_rxfilter); + + /* + * we're running now. + */ + + s->sbm_state = sbmac_state_on; + + /* + * Program multicast addresses + */ + + sbmac_setmulti(s); + + /* + * If channel was in promiscuous mode before, turn that on + */ + + if (s->sbm_devflags & IFF_PROMISC) { + sbmac_promiscuous_mode(s,1); + } + +} + + +/********************************************************************** + * SBMAC_CHANNEL_STOP(s) + * + * Stop packet processing on this MAC. + * + * Input parameters: + * s - sbmac structure + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbmac_channel_stop(struct sbmac_softc *s) +{ + /* don't do this if already stopped */ + + if (s->sbm_state == sbmac_state_off) + return; + + /* don't accept any packets, disable all interrupts */ + + __raw_writeq(0, s->sbm_rxfilter); + __raw_writeq(0, s->sbm_imr); + + /* Turn off ticker */ + + /* XXX */ + + /* turn off receiver and transmitter */ + + __raw_writeq(0, s->sbm_macenable); + + /* We're stopped now. */ + + s->sbm_state = sbmac_state_off; + + /* + * Stop DMA channels (rings should be ok now) + */ + + sbdma_channel_stop(&(s->sbm_rxdma)); + sbdma_channel_stop(&(s->sbm_txdma)); + + /* Empty the receive and transmit rings */ + + sbdma_emptyring(&(s->sbm_rxdma)); + sbdma_emptyring(&(s->sbm_txdma)); + +} + +/********************************************************************** + * SBMAC_SET_CHANNEL_STATE(state) + * + * Set the channel's state ON or OFF + * + * Input parameters: + * state - new state + * + * Return value: + * old state + ********************************************************************* */ +static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *sc, + enum sbmac_state state) +{ + enum sbmac_state oldstate = sc->sbm_state; + + /* + * If same as previous state, return + */ + + if (state == oldstate) { + return oldstate; + } + + /* + * If new state is ON, turn channel on + */ + + if (state == sbmac_state_on) { + sbmac_channel_start(sc); + } + else { + sbmac_channel_stop(sc); + } + + /* + * Return previous state + */ + + return oldstate; +} + + +/********************************************************************** + * SBMAC_PROMISCUOUS_MODE(sc,onoff) + * + * Turn on or off promiscuous mode + * + * Input parameters: + * sc - softc + * onoff - 1 to turn on, 0 to turn off + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff) +{ + uint64_t reg; + + if (sc->sbm_state != sbmac_state_on) + return; + + if (onoff) { + reg = __raw_readq(sc->sbm_rxfilter); + reg |= M_MAC_ALLPKT_EN; + __raw_writeq(reg, sc->sbm_rxfilter); + } + else { + reg = __raw_readq(sc->sbm_rxfilter); + reg &= ~M_MAC_ALLPKT_EN; + __raw_writeq(reg, sc->sbm_rxfilter); + } +} + +/********************************************************************** + * SBMAC_SETIPHDR_OFFSET(sc,onoff) + * + * Set the iphdr offset as 15 assuming ethernet encapsulation + * + * Input parameters: + * sc - softc + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbmac_set_iphdr_offset(struct sbmac_softc *sc) +{ + uint64_t reg; + + /* Hard code the off set to 15 for now */ + reg = __raw_readq(sc->sbm_rxfilter); + reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15); + __raw_writeq(reg, sc->sbm_rxfilter); + + /* BCM1250 pass1 didn't have hardware checksum. Everything + later does. */ + if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) { + sc->rx_hw_checksum = DISABLE; + } else { + sc->rx_hw_checksum = ENABLE; + } +} + + +/********************************************************************** + * SBMAC_ADDR2REG(ptr) + * + * Convert six bytes into the 64-bit register value that + * we typically write into the SBMAC's address/mcast registers + * + * Input parameters: + * ptr - pointer to 6 bytes + * + * Return value: + * register value + ********************************************************************* */ + +static uint64_t sbmac_addr2reg(unsigned char *ptr) +{ + uint64_t reg = 0; + + ptr += 6; + + reg |= (uint64_t) *(--ptr); + reg <<= 8; + reg |= (uint64_t) *(--ptr); + reg <<= 8; + reg |= (uint64_t) *(--ptr); + reg <<= 8; + reg |= (uint64_t) *(--ptr); + reg <<= 8; + reg |= (uint64_t) *(--ptr); + reg <<= 8; + reg |= (uint64_t) *(--ptr); + + return reg; +} + + +/********************************************************************** + * SBMAC_SET_SPEED(s,speed) + * + * Configure LAN speed for the specified MAC. + * Warning: must be called when MAC is off! + * + * Input parameters: + * s - sbmac structure + * speed - speed to set MAC to (see enum sbmac_speed) + * + * Return value: + * 1 if successful + * 0 indicates invalid parameters + ********************************************************************* */ + +static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed) +{ + uint64_t cfg; + uint64_t framecfg; + + /* + * Save new current values + */ + + s->sbm_speed = speed; + + if (s->sbm_state == sbmac_state_on) + return 0; /* save for next restart */ + + /* + * Read current register values + */ + + cfg = __raw_readq(s->sbm_maccfg); + framecfg = __raw_readq(s->sbm_framecfg); + + /* + * Mask out the stuff we want to change + */ + + cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL); + framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH | + M_MAC_SLOT_SIZE); + + /* + * Now add in the new bits + */ + + switch (speed) { + case sbmac_speed_10: + framecfg |= V_MAC_IFG_RX_10 | + V_MAC_IFG_TX_10 | + K_MAC_IFG_THRSH_10 | + V_MAC_SLOT_SIZE_10; + cfg |= V_MAC_SPEED_SEL_10MBPS; + break; + + case sbmac_speed_100: + framecfg |= V_MAC_IFG_RX_100 | + V_MAC_IFG_TX_100 | + V_MAC_IFG_THRSH_100 | + V_MAC_SLOT_SIZE_100; + cfg |= V_MAC_SPEED_SEL_100MBPS ; + break; + + case sbmac_speed_1000: + framecfg |= V_MAC_IFG_RX_1000 | + V_MAC_IFG_TX_1000 | + V_MAC_IFG_THRSH_1000 | + V_MAC_SLOT_SIZE_1000; + cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN; + break; + + default: + return 0; + } + + /* + * Send the bits back to the hardware + */ + + __raw_writeq(framecfg, s->sbm_framecfg); + __raw_writeq(cfg, s->sbm_maccfg); + + return 1; +} + +/********************************************************************** + * SBMAC_SET_DUPLEX(s,duplex,fc) + * + * Set Ethernet duplex and flow control options for this MAC + * Warning: must be called when MAC is off! + * + * Input parameters: + * s - sbmac structure + * duplex - duplex setting (see enum sbmac_duplex) + * fc - flow control setting (see enum sbmac_fc) + * + * Return value: + * 1 if ok + * 0 if an invalid parameter combination was specified + ********************************************************************* */ + +static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex, + enum sbmac_fc fc) +{ + uint64_t cfg; + + /* + * Save new current values + */ + + s->sbm_duplex = duplex; + s->sbm_fc = fc; + + if (s->sbm_state == sbmac_state_on) + return 0; /* save for next restart */ + + /* + * Read current register values + */ + + cfg = __raw_readq(s->sbm_maccfg); + + /* + * Mask off the stuff we're about to change + */ + + cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN); + + + switch (duplex) { + case sbmac_duplex_half: + switch (fc) { + case sbmac_fc_disabled: + cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED; + break; + + case sbmac_fc_collision: + cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED; + break; + + case sbmac_fc_carrier: + cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR; + break; + + case sbmac_fc_frame: /* not valid in half duplex */ + default: /* invalid selection */ + return 0; + } + break; + + case sbmac_duplex_full: + switch (fc) { + case sbmac_fc_disabled: + cfg |= V_MAC_FC_CMD_DISABLED; + break; + + case sbmac_fc_frame: + cfg |= V_MAC_FC_CMD_ENABLED; + break; + + case sbmac_fc_collision: /* not valid in full duplex */ + case sbmac_fc_carrier: /* not valid in full duplex */ + default: + return 0; + } + break; + default: + return 0; + } + + /* + * Send the bits back to the hardware + */ + + __raw_writeq(cfg, s->sbm_maccfg); + + return 1; +} + + + + +/********************************************************************** + * SBMAC_INTR() + * + * Interrupt handler for MAC interrupts + * + * Input parameters: + * MAC structure + * + * Return value: + * nothing + ********************************************************************* */ +static irqreturn_t sbmac_intr(int irq,void *dev_instance) +{ + struct net_device *dev = (struct net_device *) dev_instance; + struct sbmac_softc *sc = netdev_priv(dev); + uint64_t isr; + int handled = 0; + + /* + * Read the ISR (this clears the bits in the real + * register, except for counter addr) + */ + + isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR; + + if (isr == 0) + return IRQ_RETVAL(0); + handled = 1; + + /* + * Transmits on channel 0 + */ + + if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) + sbdma_tx_process(sc,&(sc->sbm_txdma), 0); + + if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) { + if (napi_schedule_prep(&sc->napi)) { + __raw_writeq(0, sc->sbm_imr); + __napi_schedule(&sc->napi); + /* Depend on the exit from poll to reenable intr */ + } + else { + /* may leave some packets behind */ + sbdma_rx_process(sc,&(sc->sbm_rxdma), + SBMAC_MAX_RXDESCR * 2, 0); + } + } + return IRQ_RETVAL(handled); +} + +/********************************************************************** + * SBMAC_START_TX(skb,dev) + * + * Start output on the specified interface. Basically, we + * queue as many buffers as we can until the ring fills up, or + * we run off the end of the queue, whichever comes first. + * + * Input parameters: + * + * + * Return value: + * nothing + ********************************************************************* */ +static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct sbmac_softc *sc = netdev_priv(dev); + unsigned long flags; + + /* lock eth irq */ + spin_lock_irqsave(&sc->sbm_lock, flags); + + /* + * Put the buffer on the transmit ring. If we + * don't have room, stop the queue. + */ + + if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) { + /* XXX save skb that we could not send */ + netif_stop_queue(dev); + spin_unlock_irqrestore(&sc->sbm_lock, flags); + + return NETDEV_TX_BUSY; + } + + spin_unlock_irqrestore(&sc->sbm_lock, flags); + + return NETDEV_TX_OK; +} + +/********************************************************************** + * SBMAC_SETMULTI(sc) + * + * Reprogram the multicast table into the hardware, given + * the list of multicasts associated with the interface + * structure. + * + * Input parameters: + * sc - softc + * + * Return value: + * nothing + ********************************************************************* */ + +static void sbmac_setmulti(struct sbmac_softc *sc) +{ + uint64_t reg; + void __iomem *port; + int idx; + struct netdev_hw_addr *ha; + struct net_device *dev = sc->sbm_dev; + + /* + * Clear out entire multicast table. We do this by nuking + * the entire hash table and all the direct matches except + * the first one, which is used for our station address + */ + + for (idx = 1; idx < MAC_ADDR_COUNT; idx++) { + port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t)); + __raw_writeq(0, port); + } + + for (idx = 0; idx < MAC_HASH_COUNT; idx++) { + port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t)); + __raw_writeq(0, port); + } + + /* + * Clear the filter to say we don't want any multicasts. + */ + + reg = __raw_readq(sc->sbm_rxfilter); + reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN); + __raw_writeq(reg, sc->sbm_rxfilter); + + if (dev->flags & IFF_ALLMULTI) { + /* + * Enable ALL multicasts. Do this by inverting the + * multicast enable bit. + */ + reg = __raw_readq(sc->sbm_rxfilter); + reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN); + __raw_writeq(reg, sc->sbm_rxfilter); + return; + } + + + /* + * Progam new multicast entries. For now, only use the + * perfect filter. In the future we'll need to use the + * hash filter if the perfect filter overflows + */ + + /* XXX only using perfect filter for now, need to use hash + * XXX if the table overflows */ + + idx = 1; /* skip station address */ + netdev_for_each_mc_addr(ha, dev) { + if (idx == MAC_ADDR_COUNT) + break; + reg = sbmac_addr2reg(ha->addr); + port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t)); + __raw_writeq(reg, port); + idx++; + } + + /* + * Enable the "accept multicast bits" if we programmed at least one + * multicast. + */ + + if (idx > 1) { + reg = __raw_readq(sc->sbm_rxfilter); + reg |= M_MAC_MCAST_EN; + __raw_writeq(reg, sc->sbm_rxfilter); + } +} + +static int sb1250_change_mtu(struct net_device *_dev, int new_mtu) +{ + if (new_mtu > ENET_PACKET_SIZE) + return -EINVAL; + _dev->mtu = new_mtu; + pr_info("changing the mtu to %d\n", new_mtu); + return 0; +} + +static const struct net_device_ops sbmac_netdev_ops = { + .ndo_open = sbmac_open, + .ndo_stop = sbmac_close, + .ndo_start_xmit = sbmac_start_tx, + .ndo_set_rx_mode = sbmac_set_rx_mode, + .ndo_tx_timeout = sbmac_tx_timeout, + .ndo_do_ioctl = sbmac_mii_ioctl, + .ndo_change_mtu = sb1250_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = sbmac_netpoll, +#endif +}; + +/********************************************************************** + * SBMAC_INIT(dev) + * + * Attach routine - init hardware and hook ourselves into linux + * + * Input parameters: + * dev - net_device structure + * + * Return value: + * status + ********************************************************************* */ + +static int sbmac_init(struct platform_device *pldev, long long base) +{ + struct net_device *dev = platform_get_drvdata(pldev); + int idx = pldev->id; + struct sbmac_softc *sc = netdev_priv(dev); + unsigned char *eaddr; + uint64_t ea_reg; + int i; + int err; + + sc->sbm_dev = dev; + sc->sbe_idx = idx; + + eaddr = sc->sbm_hwaddr; + + /* + * Read the ethernet address. The firmware left this programmed + * for us in the ethernet address register for each mac. + */ + + ea_reg = __raw_readq(sc->sbm_base + R_MAC_ETHERNET_ADDR); + __raw_writeq(0, sc->sbm_base + R_MAC_ETHERNET_ADDR); + for (i = 0; i < 6; i++) { + eaddr[i] = (uint8_t) (ea_reg & 0xFF); + ea_reg >>= 8; + } + + for (i = 0; i < 6; i++) { + dev->dev_addr[i] = eaddr[i]; + } + + /* + * Initialize context (get pointers to registers and stuff), then + * allocate the memory for the descriptor tables. + */ + + sbmac_initctx(sc); + + /* + * Set up Linux device callins + */ + + spin_lock_init(&(sc->sbm_lock)); + + dev->netdev_ops = &sbmac_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + netif_napi_add(dev, &sc->napi, sbmac_poll, 16); + + dev->irq = UNIT_INT(idx); + + /* This is needed for PASS2 for Rx H/W checksum feature */ + sbmac_set_iphdr_offset(sc); + + sc->mii_bus = mdiobus_alloc(); + if (sc->mii_bus == NULL) { + err = -ENOMEM; + goto uninit_ctx; + } + + sc->mii_bus->name = sbmac_mdio_string; + snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pldev->name, idx); + sc->mii_bus->priv = sc; + sc->mii_bus->read = sbmac_mii_read; + sc->mii_bus->write = sbmac_mii_write; + sc->mii_bus->irq = sc->phy_irq; + for (i = 0; i < PHY_MAX_ADDR; ++i) + sc->mii_bus->irq[i] = SBMAC_PHY_INT; + + sc->mii_bus->parent = &pldev->dev; + /* + * Probe PHY address + */ + err = mdiobus_register(sc->mii_bus); + if (err) { + printk(KERN_ERR "%s: unable to register MDIO bus\n", + dev->name); + goto free_mdio; + } + platform_set_drvdata(pldev, sc->mii_bus); + + err = register_netdev(dev); + if (err) { + printk(KERN_ERR "%s.%d: unable to register netdev\n", + sbmac_string, idx); + goto unreg_mdio; + } + + pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name); + + if (sc->rx_hw_checksum == ENABLE) + pr_info("%s: enabling TCP rcv checksum\n", dev->name); + + /* + * Display Ethernet address (this is called during the config + * process so we need to finish off the config message that + * was being displayed) + */ + pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n", + dev->name, base, eaddr); + + return 0; +unreg_mdio: + mdiobus_unregister(sc->mii_bus); +free_mdio: + mdiobus_free(sc->mii_bus); +uninit_ctx: + sbmac_uninitctx(sc); + return err; +} + + +static int sbmac_open(struct net_device *dev) +{ + struct sbmac_softc *sc = netdev_priv(dev); + int err; + + if (debug > 1) + pr_debug("%s: sbmac_open() irq %d.\n", dev->name, dev->irq); + + /* + * map/route interrupt (clear status first, in case something + * weird is pending; we haven't initialized the mac registers + * yet) + */ + + __raw_readq(sc->sbm_isr); + err = request_irq(dev->irq, sbmac_intr, IRQF_SHARED, dev->name, dev); + if (err) { + printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, + dev->irq); + goto out_err; + } + + sc->sbm_speed = sbmac_speed_none; + sc->sbm_duplex = sbmac_duplex_none; + sc->sbm_fc = sbmac_fc_none; + sc->sbm_pause = -1; + sc->sbm_link = 0; + + /* + * Attach to the PHY + */ + err = sbmac_mii_probe(dev); + if (err) + goto out_unregister; + + /* + * Turn on the channel + */ + + sbmac_set_channel_state(sc,sbmac_state_on); + + netif_start_queue(dev); + + sbmac_set_rx_mode(dev); + + phy_start(sc->phy_dev); + + napi_enable(&sc->napi); + + return 0; + +out_unregister: + free_irq(dev->irq, dev); +out_err: + return err; +} + +static int sbmac_mii_probe(struct net_device *dev) +{ + struct sbmac_softc *sc = netdev_priv(dev); + struct phy_device *phy_dev; + int i; + + for (i = 0; i < PHY_MAX_ADDR; i++) { + phy_dev = sc->mii_bus->phy_map[i]; + if (phy_dev) + break; + } + if (!phy_dev) { + printk(KERN_ERR "%s: no PHY found\n", dev->name); + return -ENXIO; + } + + phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll, + PHY_INTERFACE_MODE_GMII); + if (IS_ERR(phy_dev)) { + printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); + return PTR_ERR(phy_dev); + } + + /* Remove any features not supported by the controller */ + phy_dev->supported &= SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_MII | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause; + phy_dev->advertising = phy_dev->supported; + + pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + dev->name, phy_dev->drv->name, + dev_name(&phy_dev->dev), phy_dev->irq); + + sc->phy_dev = phy_dev; + + return 0; +} + + +static void sbmac_mii_poll(struct net_device *dev) +{ + struct sbmac_softc *sc = netdev_priv(dev); + struct phy_device *phy_dev = sc->phy_dev; + unsigned long flags; + enum sbmac_fc fc; + int link_chg, speed_chg, duplex_chg, pause_chg, fc_chg; + + link_chg = (sc->sbm_link != phy_dev->link); + speed_chg = (sc->sbm_speed != phy_dev->speed); + duplex_chg = (sc->sbm_duplex != phy_dev->duplex); + pause_chg = (sc->sbm_pause != phy_dev->pause); + + if (!link_chg && !speed_chg && !duplex_chg && !pause_chg) + return; /* Hmmm... */ + + if (!phy_dev->link) { + if (link_chg) { + sc->sbm_link = phy_dev->link; + sc->sbm_speed = sbmac_speed_none; + sc->sbm_duplex = sbmac_duplex_none; + sc->sbm_fc = sbmac_fc_disabled; + sc->sbm_pause = -1; + pr_info("%s: link unavailable\n", dev->name); + } + return; + } + + if (phy_dev->duplex == DUPLEX_FULL) { + if (phy_dev->pause) + fc = sbmac_fc_frame; + else + fc = sbmac_fc_disabled; + } else + fc = sbmac_fc_collision; + fc_chg = (sc->sbm_fc != fc); + + pr_info("%s: link available: %dbase-%cD\n", dev->name, phy_dev->speed, + phy_dev->duplex == DUPLEX_FULL ? 'F' : 'H'); + + spin_lock_irqsave(&sc->sbm_lock, flags); + + sc->sbm_speed = phy_dev->speed; + sc->sbm_duplex = phy_dev->duplex; + sc->sbm_fc = fc; + sc->sbm_pause = phy_dev->pause; + sc->sbm_link = phy_dev->link; + + if ((speed_chg || duplex_chg || fc_chg) && + sc->sbm_state != sbmac_state_off) { + /* + * something changed, restart the channel + */ + if (debug > 1) + pr_debug("%s: restarting channel " + "because PHY state changed\n", dev->name); + sbmac_channel_stop(sc); + sbmac_channel_start(sc); + } + + spin_unlock_irqrestore(&sc->sbm_lock, flags); +} + + +static void sbmac_tx_timeout (struct net_device *dev) +{ + struct sbmac_softc *sc = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&sc->sbm_lock, flags); + + + dev->trans_start = jiffies; /* prevent tx timeout */ + dev->stats.tx_errors++; + + spin_unlock_irqrestore(&sc->sbm_lock, flags); + + printk (KERN_WARNING "%s: Transmit timed out\n",dev->name); +} + + + + +static void sbmac_set_rx_mode(struct net_device *dev) +{ + unsigned long flags; + struct sbmac_softc *sc = netdev_priv(dev); + + spin_lock_irqsave(&sc->sbm_lock, flags); + if ((dev->flags ^ sc->sbm_devflags) & IFF_PROMISC) { + /* + * Promiscuous changed. + */ + + if (dev->flags & IFF_PROMISC) { + sbmac_promiscuous_mode(sc,1); + } + else { + sbmac_promiscuous_mode(sc,0); + } + } + spin_unlock_irqrestore(&sc->sbm_lock, flags); + + /* + * Program the multicasts. Do this every time. + */ + + sbmac_setmulti(sc); + +} + +static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct sbmac_softc *sc = netdev_priv(dev); + + if (!netif_running(dev) || !sc->phy_dev) + return -EINVAL; + + return phy_mii_ioctl(sc->phy_dev, rq, cmd); +} + +static int sbmac_close(struct net_device *dev) +{ + struct sbmac_softc *sc = netdev_priv(dev); + + napi_disable(&sc->napi); + + phy_stop(sc->phy_dev); + + sbmac_set_channel_state(sc, sbmac_state_off); + + netif_stop_queue(dev); + + if (debug > 1) + pr_debug("%s: Shutting down ethercard\n", dev->name); + + phy_disconnect(sc->phy_dev); + sc->phy_dev = NULL; + free_irq(dev->irq, dev); + + sbdma_emptyring(&(sc->sbm_txdma)); + sbdma_emptyring(&(sc->sbm_rxdma)); + + return 0; +} + +static int sbmac_poll(struct napi_struct *napi, int budget) +{ + struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi); + int work_done; + + work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1); + sbdma_tx_process(sc, &(sc->sbm_txdma), 1); + + if (work_done < budget) { + napi_complete(napi); + +#ifdef CONFIG_SBMAC_COALESCE + __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | + ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), + sc->sbm_imr); +#else + __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | + (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr); +#endif + } + + return work_done; +} + + +static int sbmac_probe(struct platform_device *pldev) +{ + struct net_device *dev; + struct sbmac_softc *sc; + void __iomem *sbm_base; + struct resource *res; + u64 sbmac_orig_hwaddr; + int err; + + res = platform_get_resource(pldev, IORESOURCE_MEM, 0); + BUG_ON(!res); + sbm_base = ioremap_nocache(res->start, resource_size(res)); + if (!sbm_base) { + printk(KERN_ERR "%s: unable to map device registers\n", + dev_name(&pldev->dev)); + err = -ENOMEM; + goto out_out; + } + + /* + * The R_MAC_ETHERNET_ADDR register will be set to some nonzero + * value for us by the firmware if we're going to use this MAC. + * If we find a zero, skip this MAC. + */ + sbmac_orig_hwaddr = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR); + pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", dev_name(&pldev->dev), + sbmac_orig_hwaddr ? "" : "not ", (long long)res->start); + if (sbmac_orig_hwaddr == 0) { + err = 0; + goto out_unmap; + } + + /* + * Okay, cool. Initialize this MAC. + */ + dev = alloc_etherdev(sizeof(struct sbmac_softc)); + if (!dev) { + err = -ENOMEM; + goto out_unmap; + } + + platform_set_drvdata(pldev, dev); + SET_NETDEV_DEV(dev, &pldev->dev); + + sc = netdev_priv(dev); + sc->sbm_base = sbm_base; + + err = sbmac_init(pldev, res->start); + if (err) + goto out_kfree; + + return 0; + +out_kfree: + free_netdev(dev); + __raw_writeq(sbmac_orig_hwaddr, sbm_base + R_MAC_ETHERNET_ADDR); + +out_unmap: + iounmap(sbm_base); + +out_out: + return err; +} + +static int __exit sbmac_remove(struct platform_device *pldev) +{ + struct net_device *dev = platform_get_drvdata(pldev); + struct sbmac_softc *sc = netdev_priv(dev); + + unregister_netdev(dev); + sbmac_uninitctx(sc); + mdiobus_unregister(sc->mii_bus); + mdiobus_free(sc->mii_bus); + iounmap(sc->sbm_base); + free_netdev(dev); + + return 0; +} + +static struct platform_driver sbmac_driver = { + .probe = sbmac_probe, + .remove = __exit_p(sbmac_remove), + .driver = { + .name = sbmac_string, + }, +}; + +module_platform_driver(sbmac_driver); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c new file mode 100644 index 000000000..6eb005247 --- /dev/null +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -0,0 +1,18271 @@ +/* + * tg3.c: Broadcom Tigon3 ethernet driver. + * + * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) + * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) + * Copyright (C) 2004 Sun Microsystems Inc. + * Copyright (C) 2005-2014 Broadcom Corporation. + * +/*(DEBLOBBED)*/ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include + +#ifdef CONFIG_SPARC +#include +#include +#endif + +#define BAR_0 0 +#define BAR_2 2 + +#include "tg3.h" + +/* Functions & macros to verify TG3_FLAGS types */ + +static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) +{ + return test_bit(flag, bits); +} + +static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) +{ + set_bit(flag, bits); +} + +static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) +{ + clear_bit(flag, bits); +} + +#define tg3_flag(tp, flag) \ + _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) +#define tg3_flag_set(tp, flag) \ + _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) +#define tg3_flag_clear(tp, flag) \ + _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) + +#define DRV_MODULE_NAME "tg3" +#define TG3_MAJ_NUM 3 +#define TG3_MIN_NUM 137 +#define DRV_MODULE_VERSION \ + __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) +#define DRV_MODULE_RELDATE "May 11, 2014" + +#define RESET_KIND_SHUTDOWN 0 +#define RESET_KIND_INIT 1 +#define RESET_KIND_SUSPEND 2 + +#define TG3_DEF_RX_MODE 0 +#define TG3_DEF_TX_MODE 0 +#define TG3_DEF_MSG_ENABLE \ + (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + +#define TG3_GRC_LCLCTL_PWRSW_DELAY 100 + +/* length of time before we decide the hardware is borked, + * and dev->tx_timeout() should be called to fix the problem + */ + +#define TG3_TX_TIMEOUT (5 * HZ) + +/* hardware minimum and maximum for a single frame's data payload */ +#define TG3_MIN_MTU 60 +#define TG3_MAX_MTU(tp) \ + (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) + +/* These numbers seem to be hard coded in the NIC firmware somehow. + * You can't change the ring sizes, but you can change where you place + * them in the NIC onboard memory. + */ +#define TG3_RX_STD_RING_SIZE(tp) \ + (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ + TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) +#define TG3_DEF_RX_RING_PENDING 200 +#define TG3_RX_JMB_RING_SIZE(tp) \ + (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ + TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) +#define TG3_DEF_RX_JUMBO_RING_PENDING 100 + +/* Do not place this n-ring entries value into the tp struct itself, + * we really want to expose these constants to GCC so that modulo et + * al. operations are done with shifts and masks instead of with + * hw multiply/modulo instructions. Another solution would be to + * replace things like '% foo' with '& (foo - 1)'. + */ + +#define TG3_TX_RING_SIZE 512 +#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) + +#define TG3_RX_STD_RING_BYTES(tp) \ + (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) +#define TG3_RX_JMB_RING_BYTES(tp) \ + (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) +#define TG3_RX_RCB_RING_BYTES(tp) \ + (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) +#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ + TG3_TX_RING_SIZE) +#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) + +#define TG3_DMA_BYTE_ENAB 64 + +#define TG3_RX_STD_DMA_SZ 1536 +#define TG3_RX_JMB_DMA_SZ 9046 + +#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) + +#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) +#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) + +#define TG3_RX_STD_BUFF_RING_SIZE(tp) \ + (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) + +#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ + (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) + +/* Due to a hardware bug, the 5701 can only DMA to memory addresses + * that are at least dword aligned when used in PCIX mode. The driver + * works around this bug by double copying the packet. This workaround + * is built into the normal double copy length check for efficiency. + * + * However, the double copy is only necessary on those architectures + * where unaligned memory accesses are inefficient. For those architectures + * where unaligned memory accesses incur little penalty, we can reintegrate + * the 5701 in the normal rx path. Doing so saves a device structure + * dereference by hardcoding the double copy threshold in place. + */ +#define TG3_RX_COPY_THRESHOLD 256 +#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD +#else + #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) +#endif + +#if (NET_IP_ALIGN != 0) +#define TG3_RX_OFFSET(tp) ((tp)->rx_offset) +#else +#define TG3_RX_OFFSET(tp) (NET_SKB_PAD) +#endif + +/* minimum number of free TX descriptors required to wake up TX process */ +#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) +#define TG3_TX_BD_DMA_MAX_2K 2048 +#define TG3_TX_BD_DMA_MAX_4K 4096 + +#define TG3_RAW_IP_ALIGN 2 + +#define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3) +#define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1) + +#define TG3_FW_UPDATE_TIMEOUT_SEC 5 +#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2) + +#define FIRMWARE_TG3 "/*(DEBLOBBED)*/" +#define FIRMWARE_TG357766 "/*(DEBLOBBED)*/" +#define FIRMWARE_TG3TSO "/*(DEBLOBBED)*/" +#define FIRMWARE_TG3TSO5 "/*(DEBLOBBED)*/" + +static char version[] = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; + +MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); +MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); +/*(DEBLOBBED)*/ + +static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ +module_param(tg3_debug, int, 0); +MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); + +#define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001 +#define TG3_DRV_DATA_FLAG_5705_10_100 0x0002 + +static const struct pci_device_id tg3_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901), + .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | + TG3_DRV_DATA_FLAG_5705_10_100}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2), + .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | + TG3_DRV_DATA_FLAG_5705_10_100}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F), + .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY | + TG3_DRV_DATA_FLAG_5705_10_100}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F), + .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F), + .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, + {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M, + PCI_VENDOR_ID_LENOVO, + TG3PCI_SUBDEVICE_ID_LENOVO_5787M), + .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F), + .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, + {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, + PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A), + .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, + {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780, + PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B), + .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790), + .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791), + .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795), + .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)}, + {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, + {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, + {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, + {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */ + {} +}; + +MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); + +static const struct { + const char string[ETH_GSTRING_LEN]; +} ethtool_stats_keys[] = { + { "rx_octets" }, + { "rx_fragments" }, + { "rx_ucast_packets" }, + { "rx_mcast_packets" }, + { "rx_bcast_packets" }, + { "rx_fcs_errors" }, + { "rx_align_errors" }, + { "rx_xon_pause_rcvd" }, + { "rx_xoff_pause_rcvd" }, + { "rx_mac_ctrl_rcvd" }, + { "rx_xoff_entered" }, + { "rx_frame_too_long_errors" }, + { "rx_jabbers" }, + { "rx_undersize_packets" }, + { "rx_in_length_errors" }, + { "rx_out_length_errors" }, + { "rx_64_or_less_octet_packets" }, + { "rx_65_to_127_octet_packets" }, + { "rx_128_to_255_octet_packets" }, + { "rx_256_to_511_octet_packets" }, + { "rx_512_to_1023_octet_packets" }, + { "rx_1024_to_1522_octet_packets" }, + { "rx_1523_to_2047_octet_packets" }, + { "rx_2048_to_4095_octet_packets" }, + { "rx_4096_to_8191_octet_packets" }, + { "rx_8192_to_9022_octet_packets" }, + + { "tx_octets" }, + { "tx_collisions" }, + + { "tx_xon_sent" }, + { "tx_xoff_sent" }, + { "tx_flow_control" }, + { "tx_mac_errors" }, + { "tx_single_collisions" }, + { "tx_mult_collisions" }, + { "tx_deferred" }, + { "tx_excessive_collisions" }, + { "tx_late_collisions" }, + { "tx_collide_2times" }, + { "tx_collide_3times" }, + { "tx_collide_4times" }, + { "tx_collide_5times" }, + { "tx_collide_6times" }, + { "tx_collide_7times" }, + { "tx_collide_8times" }, + { "tx_collide_9times" }, + { "tx_collide_10times" }, + { "tx_collide_11times" }, + { "tx_collide_12times" }, + { "tx_collide_13times" }, + { "tx_collide_14times" }, + { "tx_collide_15times" }, + { "tx_ucast_packets" }, + { "tx_mcast_packets" }, + { "tx_bcast_packets" }, + { "tx_carrier_sense_errors" }, + { "tx_discards" }, + { "tx_errors" }, + + { "dma_writeq_full" }, + { "dma_write_prioq_full" }, + { "rxbds_empty" }, + { "rx_discards" }, + { "rx_errors" }, + { "rx_threshold_hit" }, + + { "dma_readq_full" }, + { "dma_read_prioq_full" }, + { "tx_comp_queue_full" }, + + { "ring_set_send_prod_index" }, + { "ring_status_update" }, + { "nic_irqs" }, + { "nic_avoided_irqs" }, + { "nic_tx_threshold_hit" }, + + { "mbuf_lwm_thresh_hit" }, +}; + +#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) +#define TG3_NVRAM_TEST 0 +#define TG3_LINK_TEST 1 +#define TG3_REGISTER_TEST 2 +#define TG3_MEMORY_TEST 3 +#define TG3_MAC_LOOPB_TEST 4 +#define TG3_PHY_LOOPB_TEST 5 +#define TG3_EXT_LOOPB_TEST 6 +#define TG3_INTERRUPT_TEST 7 + + +static const struct { + const char string[ETH_GSTRING_LEN]; +} ethtool_test_keys[] = { + [TG3_NVRAM_TEST] = { "nvram test (online) " }, + [TG3_LINK_TEST] = { "link test (online) " }, + [TG3_REGISTER_TEST] = { "register test (offline)" }, + [TG3_MEMORY_TEST] = { "memory test (offline)" }, + [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" }, + [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" }, + [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" }, + [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" }, +}; + +#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) + + +static void tg3_write32(struct tg3 *tp, u32 off, u32 val) +{ + writel(val, tp->regs + off); +} + +static u32 tg3_read32(struct tg3 *tp, u32 off) +{ + return readl(tp->regs + off); +} + +static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) +{ + writel(val, tp->aperegs + off); +} + +static u32 tg3_ape_read32(struct tg3 *tp, u32 off) +{ + return readl(tp->aperegs + off); +} + +static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) +{ + unsigned long flags; + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); + pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); +} + +static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) +{ + writel(val, tp->regs + off); + readl(tp->regs + off); +} + +static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); + pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); + return val; +} + +static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) +{ + unsigned long flags; + + if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { + pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + + TG3_64BIT_REG_LOW, val); + return; + } + if (off == TG3_RX_STD_PROD_IDX_REG) { + pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + + TG3_64BIT_REG_LOW, val); + return; + } + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); + pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); + + /* In indirect mode when disabling interrupts, we also need + * to clear the interrupt bit in the GRC local ctrl register. + */ + if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && + (val == 0x1)) { + pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, + tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); + } +} + +static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); + pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); + return val; +} + +/* usec_wait specifies the wait time in usec when writing to certain registers + * where it is unsafe to read back the register without some delay. + * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. + * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. + */ +static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) +{ + if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) + /* Non-posted methods */ + tp->write32(tp, off, val); + else { + /* Posted method */ + tg3_write32(tp, off, val); + if (usec_wait) + udelay(usec_wait); + tp->read32(tp, off); + } + /* Wait again after the read for the posted method to guarantee that + * the wait time is met. + */ + if (usec_wait) + udelay(usec_wait); +} + +static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) +{ + tp->write32_mbox(tp, off, val); + if (tg3_flag(tp, FLUSH_POSTED_WRITES) || + (!tg3_flag(tp, MBOX_WRITE_REORDER) && + !tg3_flag(tp, ICH_WORKAROUND))) + tp->read32_mbox(tp, off); +} + +static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) +{ + void __iomem *mbox = tp->regs + off; + writel(val, mbox); + if (tg3_flag(tp, TXD_MBOX_HWBUG)) + writel(val, mbox); + if (tg3_flag(tp, MBOX_WRITE_REORDER) || + tg3_flag(tp, FLUSH_POSTED_WRITES)) + readl(mbox); +} + +static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) +{ + return readl(tp->regs + off + GRCMBOX_BASE); +} + +static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) +{ + writel(val, tp->regs + off + GRCMBOX_BASE); +} + +#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) +#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) +#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) +#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) +#define tr32_mailbox(reg) tp->read32_mbox(tp, reg) + +#define tw32(reg, val) tp->write32(tp, reg, val) +#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) +#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) +#define tr32(reg) tp->read32(tp, reg) + +static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) +{ + unsigned long flags; + + if (tg3_asic_rev(tp) == ASIC_REV_5906 && + (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) + return; + + spin_lock_irqsave(&tp->indirect_lock, flags); + if (tg3_flag(tp, SRAM_USE_CONFIG)) { + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); + + /* Always leave this as zero. */ + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); + } else { + tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); + tw32_f(TG3PCI_MEM_WIN_DATA, val); + + /* Always leave this as zero. */ + tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); + } + spin_unlock_irqrestore(&tp->indirect_lock, flags); +} + +static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) +{ + unsigned long flags; + + if (tg3_asic_rev(tp) == ASIC_REV_5906 && + (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { + *val = 0; + return; + } + + spin_lock_irqsave(&tp->indirect_lock, flags); + if (tg3_flag(tp, SRAM_USE_CONFIG)) { + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); + pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); + + /* Always leave this as zero. */ + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); + } else { + tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); + *val = tr32(TG3PCI_MEM_WIN_DATA); + + /* Always leave this as zero. */ + tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); + } + spin_unlock_irqrestore(&tp->indirect_lock, flags); +} + +static void tg3_ape_lock_init(struct tg3 *tp) +{ + int i; + u32 regbase, bit; + + if (tg3_asic_rev(tp) == ASIC_REV_5761) + regbase = TG3_APE_LOCK_GRANT; + else + regbase = TG3_APE_PER_LOCK_GRANT; + + /* Make sure the driver hasn't any stale locks. */ + for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) { + switch (i) { + case TG3_APE_LOCK_PHY0: + case TG3_APE_LOCK_PHY1: + case TG3_APE_LOCK_PHY2: + case TG3_APE_LOCK_PHY3: + bit = APE_LOCK_GRANT_DRIVER; + break; + default: + if (!tp->pci_fn) + bit = APE_LOCK_GRANT_DRIVER; + else + bit = 1 << tp->pci_fn; + } + tg3_ape_write32(tp, regbase + 4 * i, bit); + } + +} + +static int tg3_ape_lock(struct tg3 *tp, int locknum) +{ + int i, off; + int ret = 0; + u32 status, req, gnt, bit; + + if (!tg3_flag(tp, ENABLE_APE)) + return 0; + + switch (locknum) { + case TG3_APE_LOCK_GPIO: + if (tg3_asic_rev(tp) == ASIC_REV_5761) + return 0; + case TG3_APE_LOCK_GRC: + case TG3_APE_LOCK_MEM: + if (!tp->pci_fn) + bit = APE_LOCK_REQ_DRIVER; + else + bit = 1 << tp->pci_fn; + break; + case TG3_APE_LOCK_PHY0: + case TG3_APE_LOCK_PHY1: + case TG3_APE_LOCK_PHY2: + case TG3_APE_LOCK_PHY3: + bit = APE_LOCK_REQ_DRIVER; + break; + default: + return -EINVAL; + } + + if (tg3_asic_rev(tp) == ASIC_REV_5761) { + req = TG3_APE_LOCK_REQ; + gnt = TG3_APE_LOCK_GRANT; + } else { + req = TG3_APE_PER_LOCK_REQ; + gnt = TG3_APE_PER_LOCK_GRANT; + } + + off = 4 * locknum; + + tg3_ape_write32(tp, req + off, bit); + + /* Wait for up to 1 millisecond to acquire lock. */ + for (i = 0; i < 100; i++) { + status = tg3_ape_read32(tp, gnt + off); + if (status == bit) + break; + if (pci_channel_offline(tp->pdev)) + break; + + udelay(10); + } + + if (status != bit) { + /* Revoke the lock request. */ + tg3_ape_write32(tp, gnt + off, bit); + ret = -EBUSY; + } + + return ret; +} + +static void tg3_ape_unlock(struct tg3 *tp, int locknum) +{ + u32 gnt, bit; + + if (!tg3_flag(tp, ENABLE_APE)) + return; + + switch (locknum) { + case TG3_APE_LOCK_GPIO: + if (tg3_asic_rev(tp) == ASIC_REV_5761) + return; + case TG3_APE_LOCK_GRC: + case TG3_APE_LOCK_MEM: + if (!tp->pci_fn) + bit = APE_LOCK_GRANT_DRIVER; + else + bit = 1 << tp->pci_fn; + break; + case TG3_APE_LOCK_PHY0: + case TG3_APE_LOCK_PHY1: + case TG3_APE_LOCK_PHY2: + case TG3_APE_LOCK_PHY3: + bit = APE_LOCK_GRANT_DRIVER; + break; + default: + return; + } + + if (tg3_asic_rev(tp) == ASIC_REV_5761) + gnt = TG3_APE_LOCK_GRANT; + else + gnt = TG3_APE_PER_LOCK_GRANT; + + tg3_ape_write32(tp, gnt + 4 * locknum, bit); +} + +static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us) +{ + u32 apedata; + + while (timeout_us) { + if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) + return -EBUSY; + + apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + break; + + tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); + + udelay(10); + timeout_us -= (timeout_us > 10) ? 10 : timeout_us; + } + + return timeout_us ? 0 : -EBUSY; +} + +static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us) +{ + u32 i, apedata; + + for (i = 0; i < timeout_us / 10; i++) { + apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + break; + + udelay(10); + } + + return i == timeout_us / 10; +} + +static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, + u32 len) +{ + int err; + u32 i, bufoff, msgoff, maxlen, apedata; + + if (!tg3_flag(tp, APE_HAS_NCSI)) + return 0; + + apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); + if (apedata != APE_SEG_SIG_MAGIC) + return -ENODEV; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); + if (!(apedata & APE_FW_STATUS_READY)) + return -EAGAIN; + + bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) + + TG3_APE_SHMEM_BASE; + msgoff = bufoff + 2 * sizeof(u32); + maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN); + + while (len) { + u32 length; + + /* Cap xfer sizes to scratchpad limits. */ + length = (len > maxlen) ? maxlen : len; + len -= length; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); + if (!(apedata & APE_FW_STATUS_READY)) + return -EAGAIN; + + /* Wait for up to 1 msec for APE to service previous event. */ + err = tg3_ape_event_lock(tp, 1000); + if (err) + return err; + + apedata = APE_EVENT_STATUS_DRIVER_EVNT | + APE_EVENT_STATUS_SCRTCHPD_READ | + APE_EVENT_STATUS_EVENT_PENDING; + tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata); + + tg3_ape_write32(tp, bufoff, base_off); + tg3_ape_write32(tp, bufoff + sizeof(u32), length); + + tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); + tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); + + base_off += length; + + if (tg3_ape_wait_for_event(tp, 30000)) + return -EAGAIN; + + for (i = 0; length; i += 4, length -= 4) { + u32 val = tg3_ape_read32(tp, msgoff + i); + memcpy(data, &val, sizeof(u32)); + data++; + } + } + + return 0; +} + +static int tg3_ape_send_event(struct tg3 *tp, u32 event) +{ + int err; + u32 apedata; + + apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); + if (apedata != APE_SEG_SIG_MAGIC) + return -EAGAIN; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); + if (!(apedata & APE_FW_STATUS_READY)) + return -EAGAIN; + + /* Wait for up to 1 millisecond for APE to service previous event. */ + err = tg3_ape_event_lock(tp, 1000); + if (err) + return err; + + tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, + event | APE_EVENT_STATUS_EVENT_PENDING); + + tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); + tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); + + return 0; +} + +static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) +{ + u32 event; + u32 apedata; + + if (!tg3_flag(tp, ENABLE_APE)) + return; + + switch (kind) { + case RESET_KIND_INIT: + tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, + APE_HOST_SEG_SIG_MAGIC); + tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, + APE_HOST_SEG_LEN_MAGIC); + apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); + tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); + tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, + APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); + tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, + APE_HOST_BEHAV_NO_PHYLOCK); + tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, + TG3_APE_HOST_DRVR_STATE_START); + + event = APE_EVENT_STATUS_STATE_START; + break; + case RESET_KIND_SHUTDOWN: + /* With the interface we are currently using, + * APE does not track driver state. Wiping + * out the HOST SEGMENT SIGNATURE forces + * the APE to assume OS absent status. + */ + tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); + + if (device_may_wakeup(&tp->pdev->dev) && + tg3_flag(tp, WOL_ENABLE)) { + tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, + TG3_APE_HOST_WOL_SPEED_AUTO); + apedata = TG3_APE_HOST_DRVR_STATE_WOL; + } else + apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; + + tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); + + event = APE_EVENT_STATUS_STATE_UNLOAD; + break; + default: + return; + } + + event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; + + tg3_ape_send_event(tp, event); +} + +static void tg3_disable_ints(struct tg3 *tp) +{ + int i; + + tw32(TG3PCI_MISC_HOST_CTRL, + (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); + for (i = 0; i < tp->irq_max; i++) + tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); +} + +static void tg3_enable_ints(struct tg3 *tp) +{ + int i; + + tp->irq_sync = 0; + wmb(); + + tw32(TG3PCI_MISC_HOST_CTRL, + (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); + + tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); + if (tg3_flag(tp, 1SHOT_MSI)) + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); + + tp->coal_now |= tnapi->coal_now; + } + + /* Force an initial interrupt */ + if (!tg3_flag(tp, TAGGED_STATUS) && + (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) + tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); + else + tw32(HOSTCC_MODE, tp->coal_now); + + tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); +} + +static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) +{ + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + unsigned int work_exists = 0; + + /* check for phy events */ + if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { + if (sblk->status & SD_STATUS_LINK_CHG) + work_exists = 1; + } + + /* check for TX work to do */ + if (sblk->idx[0].tx_consumer != tnapi->tx_cons) + work_exists = 1; + + /* check for RX work to do */ + if (tnapi->rx_rcb_prod_idx && + *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) + work_exists = 1; + + return work_exists; +} + +/* tg3_int_reenable + * similar to tg3_enable_ints, but it accurately determines whether there + * is new work pending and can return without flushing the PIO write + * which reenables interrupts + */ +static void tg3_int_reenable(struct tg3_napi *tnapi) +{ + struct tg3 *tp = tnapi->tp; + + tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); + mmiowb(); + + /* When doing tagged status, this work check is unnecessary. + * The last_tag we write above tells the chip which piece of + * work we've completed. + */ + if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) + tw32(HOSTCC_MODE, tp->coalesce_mode | + HOSTCC_MODE_ENABLE | tnapi->coal_now); +} + +static void tg3_switch_clocks(struct tg3 *tp) +{ + u32 clock_ctrl; + u32 orig_clock_ctrl; + + if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) + return; + + clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); + + orig_clock_ctrl = clock_ctrl; + clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | + CLOCK_CTRL_CLKRUN_OENABLE | + 0x1f); + tp->pci_clock_ctrl = clock_ctrl; + + if (tg3_flag(tp, 5705_PLUS)) { + if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { + tw32_wait_f(TG3PCI_CLOCK_CTRL, + clock_ctrl | CLOCK_CTRL_625_CORE, 40); + } + } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { + tw32_wait_f(TG3PCI_CLOCK_CTRL, + clock_ctrl | + (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), + 40); + tw32_wait_f(TG3PCI_CLOCK_CTRL, + clock_ctrl | (CLOCK_CTRL_ALTCLK), + 40); + } + tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); +} + +#define PHY_BUSY_LOOPS 5000 + +static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg, + u32 *val) +{ + u32 frame_val; + unsigned int loops; + int ret; + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, + (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); + udelay(80); + } + + tg3_ape_lock(tp, tp->phy_ape_lock); + + *val = 0x0; + + frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & + MI_COM_PHY_ADDR_MASK); + frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & + MI_COM_REG_ADDR_MASK); + frame_val |= (MI_COM_CMD_READ | MI_COM_START); + + tw32_f(MAC_MI_COM, frame_val); + + loops = PHY_BUSY_LOOPS; + while (loops != 0) { + udelay(10); + frame_val = tr32(MAC_MI_COM); + + if ((frame_val & MI_COM_BUSY) == 0) { + udelay(5); + frame_val = tr32(MAC_MI_COM); + break; + } + loops -= 1; + } + + ret = -EBUSY; + if (loops != 0) { + *val = frame_val & MI_COM_DATA_MASK; + ret = 0; + } + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + } + + tg3_ape_unlock(tp, tp->phy_ape_lock); + + return ret; +} + +static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) +{ + return __tg3_readphy(tp, tp->phy_addr, reg, val); +} + +static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg, + u32 val) +{ + u32 frame_val; + unsigned int loops; + int ret; + + if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && + (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL)) + return 0; + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, + (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); + udelay(80); + } + + tg3_ape_lock(tp, tp->phy_ape_lock); + + frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) & + MI_COM_PHY_ADDR_MASK); + frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & + MI_COM_REG_ADDR_MASK); + frame_val |= (val & MI_COM_DATA_MASK); + frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); + + tw32_f(MAC_MI_COM, frame_val); + + loops = PHY_BUSY_LOOPS; + while (loops != 0) { + udelay(10); + frame_val = tr32(MAC_MI_COM); + if ((frame_val & MI_COM_BUSY) == 0) { + udelay(5); + frame_val = tr32(MAC_MI_COM); + break; + } + loops -= 1; + } + + ret = -EBUSY; + if (loops != 0) + ret = 0; + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + } + + tg3_ape_unlock(tp, tp->phy_ape_lock); + + return ret; +} + +static int tg3_writephy(struct tg3 *tp, int reg, u32 val) +{ + return __tg3_writephy(tp, tp->phy_addr, reg, val); +} + +static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, + MII_TG3_MMD_CTRL_DATA_NOINC | devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); + +done: + return err; +} + +static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, + MII_TG3_MMD_CTRL_DATA_NOINC | devad); + if (err) + goto done; + + err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); + +done: + return err; +} + +static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); + if (!err) + err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); + + return err; +} + +static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); + if (!err) + err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); + + return err; +} + +static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_AUX_CTRL, + (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | + MII_TG3_AUXCTL_SHDWSEL_MISC); + if (!err) + err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); + + return err; +} + +static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) +{ + if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) + set |= MII_TG3_AUXCTL_MISC_WREN; + + return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); +} + +static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) +{ + u32 val; + int err; + + err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); + + if (err) + return err; + + if (enable) + val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; + else + val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; + + err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, + val | MII_TG3_AUXCTL_ACTL_TX_6DB); + + return err; +} + +static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val) +{ + return tg3_writephy(tp, MII_TG3_MISC_SHDW, + reg | val | MII_TG3_MISC_SHDW_WREN); +} + +static int tg3_bmcr_reset(struct tg3 *tp) +{ + u32 phy_control; + int limit, err; + + /* OK, reset it, and poll the BMCR_RESET bit until it + * clears or we time out. + */ + phy_control = BMCR_RESET; + err = tg3_writephy(tp, MII_BMCR, phy_control); + if (err != 0) + return -EBUSY; + + limit = 5000; + while (limit--) { + err = tg3_readphy(tp, MII_BMCR, &phy_control); + if (err != 0) + return -EBUSY; + + if ((phy_control & BMCR_RESET) == 0) { + udelay(40); + break; + } + udelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) +{ + struct tg3 *tp = bp->priv; + u32 val; + + spin_lock_bh(&tp->lock); + + if (__tg3_readphy(tp, mii_id, reg, &val)) + val = -EIO; + + spin_unlock_bh(&tp->lock); + + return val; +} + +static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) +{ + struct tg3 *tp = bp->priv; + u32 ret = 0; + + spin_lock_bh(&tp->lock); + + if (__tg3_writephy(tp, mii_id, reg, val)) + ret = -EIO; + + spin_unlock_bh(&tp->lock); + + return ret; +} + +static void tg3_mdio_config_5785(struct tg3 *tp) +{ + u32 val; + struct phy_device *phydev; + + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { + case PHY_ID_BCM50610: + case PHY_ID_BCM50610M: + val = MAC_PHYCFG2_50610_LED_MODES; + break; + case PHY_ID_BCMAC131: + val = MAC_PHYCFG2_AC131_LED_MODES; + break; + case PHY_ID_RTL8211C: + val = MAC_PHYCFG2_RTL8211C_LED_MODES; + break; + case PHY_ID_RTL8201E: + val = MAC_PHYCFG2_RTL8201E_LED_MODES; + break; + default: + return; + } + + if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { + tw32(MAC_PHYCFG2, val); + + val = tr32(MAC_PHYCFG1); + val &= ~(MAC_PHYCFG1_RGMII_INT | + MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); + val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; + tw32(MAC_PHYCFG1, val); + + return; + } + + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) + val |= MAC_PHYCFG2_EMODE_MASK_MASK | + MAC_PHYCFG2_FMODE_MASK_MASK | + MAC_PHYCFG2_GMODE_MASK_MASK | + MAC_PHYCFG2_ACT_MASK_MASK | + MAC_PHYCFG2_QUAL_MASK_MASK | + MAC_PHYCFG2_INBAND_ENABLE; + + tw32(MAC_PHYCFG2, val); + + val = tr32(MAC_PHYCFG1); + val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | + MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) + val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) + val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; + } + val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | + MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; + tw32(MAC_PHYCFG1, val); + + val = tr32(MAC_EXT_RGMII_MODE); + val &= ~(MAC_RGMII_MODE_RX_INT_B | + MAC_RGMII_MODE_RX_QUALITY | + MAC_RGMII_MODE_RX_ACTIVITY | + MAC_RGMII_MODE_RX_ENG_DET | + MAC_RGMII_MODE_TX_ENABLE | + MAC_RGMII_MODE_TX_LOWPWR | + MAC_RGMII_MODE_TX_RESET); + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) + val |= MAC_RGMII_MODE_RX_INT_B | + MAC_RGMII_MODE_RX_QUALITY | + MAC_RGMII_MODE_RX_ACTIVITY | + MAC_RGMII_MODE_RX_ENG_DET; + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) + val |= MAC_RGMII_MODE_TX_ENABLE | + MAC_RGMII_MODE_TX_LOWPWR | + MAC_RGMII_MODE_TX_RESET; + } + tw32(MAC_EXT_RGMII_MODE, val); +} + +static void tg3_mdio_start(struct tg3 *tp) +{ + tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + + if (tg3_flag(tp, MDIOBUS_INITED) && + tg3_asic_rev(tp) == ASIC_REV_5785) + tg3_mdio_config_5785(tp); +} + +static int tg3_mdio_init(struct tg3 *tp) +{ + int i; + u32 reg; + struct phy_device *phydev; + + if (tg3_flag(tp, 5717_PLUS)) { + u32 is_serdes; + + tp->phy_addr = tp->pci_fn + 1; + + if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) + is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; + else + is_serdes = tr32(TG3_CPMU_PHY_STRAP) & + TG3_CPMU_PHY_STRAP_IS_SERDES; + if (is_serdes) + tp->phy_addr += 7; + } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) { + int addr; + + addr = ssb_gige_get_phyaddr(tp->pdev); + if (addr < 0) + return addr; + tp->phy_addr = addr; + } else + tp->phy_addr = TG3_PHY_MII_ADDR; + + tg3_mdio_start(tp); + + if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) + return 0; + + tp->mdio_bus = mdiobus_alloc(); + if (tp->mdio_bus == NULL) + return -ENOMEM; + + tp->mdio_bus->name = "tg3 mdio bus"; + snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", + (tp->pdev->bus->number << 8) | tp->pdev->devfn); + tp->mdio_bus->priv = tp; + tp->mdio_bus->parent = &tp->pdev->dev; + tp->mdio_bus->read = &tg3_mdio_read; + tp->mdio_bus->write = &tg3_mdio_write; + tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr); + tp->mdio_bus->irq = &tp->mdio_irq[0]; + + for (i = 0; i < PHY_MAX_ADDR; i++) + tp->mdio_bus->irq[i] = PHY_POLL; + + /* The bus registration will look for all the PHYs on the mdio bus. + * Unfortunately, it does not ensure the PHY is powered up before + * accessing the PHY ID registers. A chip reset is the + * quickest way to bring the device back to an operational state.. + */ + if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) + tg3_bmcr_reset(tp); + + i = mdiobus_register(tp->mdio_bus); + if (i) { + dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); + mdiobus_free(tp->mdio_bus); + return i; + } + + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + + if (!phydev || !phydev->drv) { + dev_warn(&tp->pdev->dev, "No PHY devices\n"); + mdiobus_unregister(tp->mdio_bus); + mdiobus_free(tp->mdio_bus); + return -ENODEV; + } + + switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { + case PHY_ID_BCM57780: + phydev->interface = PHY_INTERFACE_MODE_GMII; + phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; + break; + case PHY_ID_BCM50610: + case PHY_ID_BCM50610M: + phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | + PHY_BRCM_RX_REFCLK_UNUSED | + PHY_BRCM_DIS_TXCRXC_NOENRGY | + PHY_BRCM_AUTO_PWRDWN_ENABLE; + if (tg3_flag(tp, RGMII_INBAND_DISABLE)) + phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) + phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) + phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; + /* fallthru */ + case PHY_ID_RTL8211C: + phydev->interface = PHY_INTERFACE_MODE_RGMII; + break; + case PHY_ID_RTL8201E: + case PHY_ID_BCMAC131: + phydev->interface = PHY_INTERFACE_MODE_MII; + phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; + tp->phy_flags |= TG3_PHYFLG_IS_FET; + break; + } + + tg3_flag_set(tp, MDIOBUS_INITED); + + if (tg3_asic_rev(tp) == ASIC_REV_5785) + tg3_mdio_config_5785(tp); + + return 0; +} + +static void tg3_mdio_fini(struct tg3 *tp) +{ + if (tg3_flag(tp, MDIOBUS_INITED)) { + tg3_flag_clear(tp, MDIOBUS_INITED); + mdiobus_unregister(tp->mdio_bus); + mdiobus_free(tp->mdio_bus); + } +} + +/* tp->lock is held. */ +static inline void tg3_generate_fw_event(struct tg3 *tp) +{ + u32 val; + + val = tr32(GRC_RX_CPU_EVENT); + val |= GRC_RX_CPU_DRIVER_EVENT; + tw32_f(GRC_RX_CPU_EVENT, val); + + tp->last_event_jiffies = jiffies; +} + +#define TG3_FW_EVENT_TIMEOUT_USEC 2500 + +/* tp->lock is held. */ +static void tg3_wait_for_event_ack(struct tg3 *tp) +{ + int i; + unsigned int delay_cnt; + long time_remain; + + /* If enough time has passed, no wait is necessary. */ + time_remain = (long)(tp->last_event_jiffies + 1 + + usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - + (long)jiffies; + if (time_remain < 0) + return; + + /* Check if we can shorten the wait time. */ + delay_cnt = jiffies_to_usecs(time_remain); + if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) + delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; + delay_cnt = (delay_cnt >> 3) + 1; + + for (i = 0; i < delay_cnt; i++) { + if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) + break; + if (pci_channel_offline(tp->pdev)) + break; + + udelay(8); + } +} + +/* tp->lock is held. */ +static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data) +{ + u32 reg, val; + + val = 0; + if (!tg3_readphy(tp, MII_BMCR, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_BMSR, ®)) + val |= (reg & 0xffff); + *data++ = val; + + val = 0; + if (!tg3_readphy(tp, MII_ADVERTISE, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_LPA, ®)) + val |= (reg & 0xffff); + *data++ = val; + + val = 0; + if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { + if (!tg3_readphy(tp, MII_CTRL1000, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_STAT1000, ®)) + val |= (reg & 0xffff); + } + *data++ = val; + + if (!tg3_readphy(tp, MII_PHYADDR, ®)) + val = reg << 16; + else + val = 0; + *data++ = val; +} + +/* tp->lock is held. */ +static void tg3_ump_link_report(struct tg3 *tp) +{ + u32 data[4]; + + if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) + return; + + tg3_phy_gather_ump_data(tp, data); + + tg3_wait_for_event_ack(tp); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]); + + tg3_generate_fw_event(tp); +} + +/* tp->lock is held. */ +static void tg3_stop_fw(struct tg3 *tp) +{ + if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { + /* Wait for RX cpu to ACK the previous event. */ + tg3_wait_for_event_ack(tp); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); + + tg3_generate_fw_event(tp); + + /* Wait for RX cpu to ACK this event. */ + tg3_wait_for_event_ack(tp); + } +} + +/* tp->lock is held. */ +static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) +{ + tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, + NIC_SRAM_FIRMWARE_MBOX_MAGIC1); + + if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD); + break; + + case RESET_KIND_SUSPEND: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_SUSPEND); + break; + + default: + break; + } + } +} + +/* tp->lock is held. */ +static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) +{ + if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START_DONE); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD_DONE); + break; + + default: + break; + } + } +} + +/* tp->lock is held. */ +static void tg3_write_sig_legacy(struct tg3 *tp, int kind) +{ + if (tg3_flag(tp, ENABLE_ASF)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD); + break; + + case RESET_KIND_SUSPEND: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_SUSPEND); + break; + + default: + break; + } + } +} + +static int tg3_poll_fw(struct tg3 *tp) +{ + int i; + u32 val; + + if (tg3_flag(tp, NO_FWARE_REPORTED)) + return 0; + + if (tg3_flag(tp, IS_SSB_CORE)) { + /* We don't use firmware. */ + return 0; + } + + if (tg3_asic_rev(tp) == ASIC_REV_5906) { + /* Wait up to 20ms for init done. */ + for (i = 0; i < 200; i++) { + if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) + return 0; + if (pci_channel_offline(tp->pdev)) + return -ENODEV; + + udelay(100); + } + return -ENODEV; + } + + /* Wait for firmware initialization to complete. */ + for (i = 0; i < 100000; i++) { + tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); + if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) + break; + if (pci_channel_offline(tp->pdev)) { + if (!tg3_flag(tp, NO_FWARE_REPORTED)) { + tg3_flag_set(tp, NO_FWARE_REPORTED); + netdev_info(tp->dev, "No firmware running\n"); + } + + break; + } + + udelay(10); + } + + /* Chip might not be fitted with firmware. Some Sun onboard + * parts are configured like that. So don't signal the timeout + * of the above loop as an error, but do report the lack of + * running firmware once. + */ + if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { + tg3_flag_set(tp, NO_FWARE_REPORTED); + + netdev_info(tp->dev, "No firmware running\n"); + } + + if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { + /* The 57765 A0 needs a little more + * time to do some important work. + */ + mdelay(10); + } + + return 0; +} + +static void tg3_link_report(struct tg3 *tp) +{ + if (!netif_carrier_ok(tp->dev)) { + netif_info(tp, link, tp->dev, "Link is down\n"); + tg3_ump_link_report(tp); + } else if (netif_msg_link(tp)) { + netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", + (tp->link_config.active_speed == SPEED_1000 ? + 1000 : + (tp->link_config.active_speed == SPEED_100 ? + 100 : 10)), + (tp->link_config.active_duplex == DUPLEX_FULL ? + "full" : "half")); + + netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", + (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? + "on" : "off", + (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? + "on" : "off"); + + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) + netdev_info(tp->dev, "EEE is %s\n", + tp->setlpicnt ? "enabled" : "disabled"); + + tg3_ump_link_report(tp); + } + + tp->link_up = netif_carrier_ok(tp->dev); +} + +static u32 tg3_decode_flowctrl_1000T(u32 adv) +{ + u32 flowctrl = 0; + + if (adv & ADVERTISE_PAUSE_CAP) { + flowctrl |= FLOW_CTRL_RX; + if (!(adv & ADVERTISE_PAUSE_ASYM)) + flowctrl |= FLOW_CTRL_TX; + } else if (adv & ADVERTISE_PAUSE_ASYM) + flowctrl |= FLOW_CTRL_TX; + + return flowctrl; +} + +static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) +{ + u16 miireg; + + if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) + miireg = ADVERTISE_1000XPAUSE; + else if (flow_ctrl & FLOW_CTRL_TX) + miireg = ADVERTISE_1000XPSE_ASYM; + else if (flow_ctrl & FLOW_CTRL_RX) + miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; + else + miireg = 0; + + return miireg; +} + +static u32 tg3_decode_flowctrl_1000X(u32 adv) +{ + u32 flowctrl = 0; + + if (adv & ADVERTISE_1000XPAUSE) { + flowctrl |= FLOW_CTRL_RX; + if (!(adv & ADVERTISE_1000XPSE_ASYM)) + flowctrl |= FLOW_CTRL_TX; + } else if (adv & ADVERTISE_1000XPSE_ASYM) + flowctrl |= FLOW_CTRL_TX; + + return flowctrl; +} + +static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) +{ + u8 cap = 0; + + if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) { + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) { + if (lcladv & ADVERTISE_1000XPAUSE) + cap = FLOW_CTRL_RX; + if (rmtadv & ADVERTISE_1000XPAUSE) + cap = FLOW_CTRL_TX; + } + + return cap; +} + +static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) +{ + u8 autoneg; + u8 flowctrl = 0; + u32 old_rx_mode = tp->rx_mode; + u32 old_tx_mode = tp->tx_mode; + + if (tg3_flag(tp, USE_PHYLIB)) + autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg; + else + autoneg = tp->link_config.autoneg; + + if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); + else + flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); + } else + flowctrl = tp->link_config.flowctrl; + + tp->link_config.active_flowctrl = flowctrl; + + if (flowctrl & FLOW_CTRL_RX) + tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; + else + tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; + + if (old_rx_mode != tp->rx_mode) + tw32_f(MAC_RX_MODE, tp->rx_mode); + + if (flowctrl & FLOW_CTRL_TX) + tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; + else + tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; + + if (old_tx_mode != tp->tx_mode) + tw32_f(MAC_TX_MODE, tp->tx_mode); +} + +static void tg3_adjust_link(struct net_device *dev) +{ + u8 oldflowctrl, linkmesg = 0; + u32 mac_mode, lcl_adv, rmt_adv; + struct tg3 *tp = netdev_priv(dev); + struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + + spin_lock_bh(&tp->lock); + + mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | + MAC_MODE_HALF_DUPLEX); + + oldflowctrl = tp->link_config.active_flowctrl; + + if (phydev->link) { + lcl_adv = 0; + rmt_adv = 0; + + if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) + mac_mode |= MAC_MODE_PORT_MODE_MII; + else if (phydev->speed == SPEED_1000 || + tg3_asic_rev(tp) != ASIC_REV_5785) + mac_mode |= MAC_MODE_PORT_MODE_GMII; + else + mac_mode |= MAC_MODE_PORT_MODE_MII; + + if (phydev->duplex == DUPLEX_HALF) + mac_mode |= MAC_MODE_HALF_DUPLEX; + else { + lcl_adv = mii_advertise_flowctrl( + tp->link_config.flowctrl); + + if (phydev->pause) + rmt_adv = LPA_PAUSE_CAP; + if (phydev->asym_pause) + rmt_adv |= LPA_PAUSE_ASYM; + } + + tg3_setup_flow_control(tp, lcl_adv, rmt_adv); + } else + mac_mode |= MAC_MODE_PORT_MODE_GMII; + + if (mac_mode != tp->mac_mode) { + tp->mac_mode = mac_mode; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + } + + if (tg3_asic_rev(tp) == ASIC_REV_5785) { + if (phydev->speed == SPEED_10) + tw32(MAC_MI_STAT, + MAC_MI_STAT_10MBPS_MODE | + MAC_MI_STAT_LNKSTAT_ATTN_ENAB); + else + tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); + } + + if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) + tw32(MAC_TX_LENGTHS, + ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT) | + (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); + else + tw32(MAC_TX_LENGTHS, + ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT) | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); + + if (phydev->link != tp->old_link || + phydev->speed != tp->link_config.active_speed || + phydev->duplex != tp->link_config.active_duplex || + oldflowctrl != tp->link_config.active_flowctrl) + linkmesg = 1; + + tp->old_link = phydev->link; + tp->link_config.active_speed = phydev->speed; + tp->link_config.active_duplex = phydev->duplex; + + spin_unlock_bh(&tp->lock); + + if (linkmesg) + tg3_link_report(tp); +} + +static int tg3_phy_init(struct tg3 *tp) +{ + struct phy_device *phydev; + + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) + return 0; + + /* Bring the PHY back to a known state. */ + tg3_bmcr_reset(tp); + + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + + /* Attach the MAC to the PHY. */ + phydev = phy_connect(tp->dev, dev_name(&phydev->dev), + tg3_adjust_link, phydev->interface); + if (IS_ERR(phydev)) { + dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + /* Mask with MAC supported features. */ + switch (phydev->interface) { + case PHY_INTERFACE_MODE_GMII: + case PHY_INTERFACE_MODE_RGMII: + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + phydev->supported &= (PHY_GBIT_FEATURES | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; + } + /* fallthru */ + case PHY_INTERFACE_MODE_MII: + phydev->supported &= (PHY_BASIC_FEATURES | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; + default: + phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]); + return -EINVAL; + } + + tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; + + phydev->advertising = phydev->supported; + + return 0; +} + +static void tg3_phy_start(struct tg3 *tp) +{ + struct phy_device *phydev; + + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return; + + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; + phydev->speed = tp->link_config.speed; + phydev->duplex = tp->link_config.duplex; + phydev->autoneg = tp->link_config.autoneg; + phydev->advertising = tp->link_config.advertising; + } + + phy_start(phydev); + + phy_start_aneg(phydev); +} + +static void tg3_phy_stop(struct tg3 *tp) +{ + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return; + + phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]); +} + +static void tg3_phy_fini(struct tg3 *tp) +{ + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { + phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]); + tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; + } +} + +static int tg3_phy_set_extloopbk(struct tg3 *tp) +{ + int err; + u32 val; + + if (tp->phy_flags & TG3_PHYFLG_IS_FET) + return 0; + + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + /* Cannot do read-modify-write on 5401 */ + err = tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, + MII_TG3_AUXCTL_ACTL_EXTLOOPBK | + 0x4c20); + goto done; + } + + err = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); + if (err) + return err; + + val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK; + err = tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val); + +done: + return err; +} + +static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) +{ + u32 phytest; + + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { + u32 phy; + + tg3_writephy(tp, MII_TG3_FET_TEST, + phytest | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { + if (enable) + phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; + else + phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; + tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); + } + tg3_writephy(tp, MII_TG3_FET_TEST, phytest); + } +} + +static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) +{ + u32 reg; + + if (!tg3_flag(tp, 5705_PLUS) || + (tg3_flag(tp, 5717_PLUS) && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) + return; + + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + tg3_phy_fet_toggle_apd(tp, enable); + return; + } + + reg = MII_TG3_MISC_SHDW_SCR5_LPED | + MII_TG3_MISC_SHDW_SCR5_DLPTLM | + MII_TG3_MISC_SHDW_SCR5_SDTL | + MII_TG3_MISC_SHDW_SCR5_C125OE; + if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable) + reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; + + tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg); + + + reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS; + if (enable) + reg |= MII_TG3_MISC_SHDW_APD_ENABLE; + + tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg); +} + +static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable) +{ + u32 phy; + + if (!tg3_flag(tp, 5705_PLUS) || + (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + return; + + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + u32 ephy; + + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { + u32 reg = MII_TG3_FET_SHDW_MISCCTRL; + + tg3_writephy(tp, MII_TG3_FET_TEST, + ephy | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, reg, &phy)) { + if (enable) + phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; + else + phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; + tg3_writephy(tp, reg, phy); + } + tg3_writephy(tp, MII_TG3_FET_TEST, ephy); + } + } else { + int ret; + + ret = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); + if (!ret) { + if (enable) + phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; + else + phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_MISC, phy); + } + } +} + +static void tg3_phy_set_wirespeed(struct tg3 *tp) +{ + int ret; + u32 val; + + if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) + return; + + ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); + if (!ret) + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, + val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); +} + +static void tg3_phy_apply_otp(struct tg3 *tp) +{ + u32 otp, phy; + + if (!tp->phy_otp) + return; + + otp = tp->phy_otp; + + if (tg3_phy_toggle_auxctl_smdsp(tp, true)) + return; + + phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); + phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); + + phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | + ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); + + phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); + phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; + tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); + + phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); + + phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); + + phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | + ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); + + tg3_phy_toggle_auxctl_smdsp(tp, false); +} + +static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee) +{ + u32 val; + struct ethtool_eee *dest = &tp->eee; + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + return; + + if (eee) + dest = eee; + + if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val)) + return; + + /* Pull eee_active */ + if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || + val == TG3_CL45_D7_EEERES_STAT_LP_100TX) { + dest->eee_active = 1; + } else + dest->eee_active = 0; + + /* Pull lp advertised settings */ + if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val)) + return; + dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); + + /* Pull advertised and eee_enabled settings */ + if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) + return; + dest->eee_enabled = !!val; + dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val); + + /* Pull tx_lpi_enabled */ + val = tr32(TG3_CPMU_EEE_MODE); + dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX); + + /* Pull lpi timer value */ + dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff; +} + +static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) +{ + u32 val; + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + return; + + tp->setlpicnt = 0; + + if (tp->link_config.autoneg == AUTONEG_ENABLE && + current_link_up && + tp->link_config.active_duplex == DUPLEX_FULL && + (tp->link_config.active_speed == SPEED_100 || + tp->link_config.active_speed == SPEED_1000)) { + u32 eeectl; + + if (tp->link_config.active_speed == SPEED_1000) + eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; + else + eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; + + tw32(TG3_CPMU_EEE_CTRL, eeectl); + + tg3_eee_pull_config(tp, NULL); + if (tp->eee.eee_active) + tp->setlpicnt = 2; + } + + if (!tp->setlpicnt) { + if (current_link_up && + !tg3_phy_toggle_auxctl_smdsp(tp, true)) { + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); + tg3_phy_toggle_auxctl_smdsp(tp, false); + } + + val = tr32(TG3_CPMU_EEE_MODE); + tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); + } +} + +static void tg3_phy_eee_enable(struct tg3 *tp) +{ + u32 val; + + if (tp->link_config.active_speed == SPEED_1000 && + (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_flag(tp, 57765_CLASS)) && + !tg3_phy_toggle_auxctl_smdsp(tp, true)) { + val = MII_TG3_DSP_TAP26_ALNOKO | + MII_TG3_DSP_TAP26_RMRXSTO; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); + tg3_phy_toggle_auxctl_smdsp(tp, false); + } + + val = tr32(TG3_CPMU_EEE_MODE); + tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); +} + +static int tg3_wait_macro_done(struct tg3 *tp) +{ + int limit = 100; + + while (limit--) { + u32 tmp32; + + if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { + if ((tmp32 & 0x1000) == 0) + break; + } + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) +{ + static const u32 test_pat[4][6] = { + { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, + { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, + { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, + { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } + }; + int chan; + + for (chan = 0; chan < 4; chan++) { + int i; + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + (chan * 0x2000) | 0x0200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); + + for (i = 0; i < 6; i++) + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, + test_pat[chan][i]); + + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); + if (tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + (chan * 0x2000) | 0x0200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); + if (tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); + if (tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + + for (i = 0; i < 6; i += 2) { + u32 low, high; + + if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || + tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + low &= 0x7fff; + high &= 0x000f; + if (low != test_pat[chan][i] || + high != test_pat[chan][i+1]) { + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); + + return -EBUSY; + } + } + } + + return 0; +} + +static int tg3_phy_reset_chanpat(struct tg3 *tp) +{ + int chan; + + for (chan = 0; chan < 4; chan++) { + int i; + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + (chan * 0x2000) | 0x0200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); + for (i = 0; i < 6; i++) + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); + if (tg3_wait_macro_done(tp)) + return -EBUSY; + } + + return 0; +} + +static int tg3_phy_reset_5703_4_5(struct tg3 *tp) +{ + u32 reg32, phy9_orig; + int retries, do_phy_reset, err; + + retries = 10; + do_phy_reset = 1; + do { + if (do_phy_reset) { + err = tg3_bmcr_reset(tp); + if (err) + return err; + do_phy_reset = 0; + } + + /* Disable transmitter and interrupt. */ + if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) + continue; + + reg32 |= 0x3000; + tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); + + /* Set full-duplex, 1000 mbps. */ + tg3_writephy(tp, MII_BMCR, + BMCR_FULLDPLX | BMCR_SPEED1000); + + /* Set to master mode. */ + if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig)) + continue; + + tg3_writephy(tp, MII_CTRL1000, + CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); + + err = tg3_phy_toggle_auxctl_smdsp(tp, true); + if (err) + return err; + + /* Block the PHY control access. */ + tg3_phydsp_write(tp, 0x8005, 0x0800); + + err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); + if (!err) + break; + } while (--retries); + + err = tg3_phy_reset_chanpat(tp); + if (err) + return err; + + tg3_phydsp_write(tp, 0x8005, 0x0000); + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); + + tg3_phy_toggle_auxctl_smdsp(tp, false); + + tg3_writephy(tp, MII_CTRL1000, phy9_orig); + + err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32); + if (err) + return err; + + reg32 &= ~0x3000; + tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); + + return 0; +} + +static void tg3_carrier_off(struct tg3 *tp) +{ + netif_carrier_off(tp->dev); + tp->link_up = false; +} + +static void tg3_warn_mgmt_link_flap(struct tg3 *tp) +{ + if (tg3_flag(tp, ENABLE_ASF)) + netdev_warn(tp->dev, + "Management side-band traffic will be interrupted during phy settings change\n"); +} + +/* This will reset the tigon3 PHY if there is no valid + * link unless the FORCE argument is non-zero. + */ +static int tg3_phy_reset(struct tg3 *tp) +{ + u32 val, cpmuctrl; + int err; + + if (tg3_asic_rev(tp) == ASIC_REV_5906) { + val = tr32(GRC_MISC_CFG); + tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); + udelay(40); + } + err = tg3_readphy(tp, MII_BMSR, &val); + err |= tg3_readphy(tp, MII_BMSR, &val); + if (err != 0) + return -EBUSY; + + if (netif_running(tp->dev) && tp->link_up) { + netif_carrier_off(tp->dev); + tg3_link_report(tp); + } + + if (tg3_asic_rev(tp) == ASIC_REV_5703 || + tg3_asic_rev(tp) == ASIC_REV_5704 || + tg3_asic_rev(tp) == ASIC_REV_5705) { + err = tg3_phy_reset_5703_4_5(tp); + if (err) + return err; + goto out; + } + + cpmuctrl = 0; + if (tg3_asic_rev(tp) == ASIC_REV_5784 && + tg3_chip_rev(tp) != CHIPREV_5784_AX) { + cpmuctrl = tr32(TG3_CPMU_CTRL); + if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) + tw32(TG3_CPMU_CTRL, + cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); + } + + err = tg3_bmcr_reset(tp); + if (err) + return err; + + if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { + val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; + tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); + + tw32(TG3_CPMU_CTRL, cpmuctrl); + } + + if (tg3_chip_rev(tp) == CHIPREV_5784_AX || + tg3_chip_rev(tp) == CHIPREV_5761_AX) { + val = tr32(TG3_CPMU_LSPD_1000MB_CLK); + if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == + CPMU_LSPD_1000MB_MACCLK_12_5) { + val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; + udelay(40); + tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); + } + } + + if (tg3_flag(tp, 5717_PLUS) && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) + return 0; + + tg3_phy_apply_otp(tp); + + if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) + tg3_phy_toggle_apd(tp, true); + else + tg3_phy_toggle_apd(tp, false); + +out: + if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && + !tg3_phy_toggle_auxctl_smdsp(tp, true)) { + tg3_phydsp_write(tp, 0x201f, 0x2aaa); + tg3_phydsp_write(tp, 0x000a, 0x0323); + tg3_phy_toggle_auxctl_smdsp(tp, false); + } + + if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + } + + if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { + if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { + tg3_phydsp_write(tp, 0x000a, 0x310b); + tg3_phydsp_write(tp, 0x201f, 0x9506); + tg3_phydsp_write(tp, 0x401f, 0x14e2); + tg3_phy_toggle_auxctl_smdsp(tp, false); + } + } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { + if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); + if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); + tg3_writephy(tp, MII_TG3_TEST1, + MII_TG3_TEST1_TRIM_EN | 0x4); + } else + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); + + tg3_phy_toggle_auxctl_smdsp(tp, false); + } + } + + /* Set Extended packet length bit (bit 14) on all chips that */ + /* support jumbo frames */ + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + /* Cannot do read-modify-write on 5401 */ + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); + } else if (tg3_flag(tp, JUMBO_CAPABLE)) { + /* Set bit 14 with read-modify-write to preserve other bits */ + err = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); + if (!err) + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, + val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); + } + + /* Set phy register 0x10 bit 0 to high fifo elasticity to support + * jumbo frames transmission. + */ + if (tg3_flag(tp, JUMBO_CAPABLE)) { + if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) + tg3_writephy(tp, MII_TG3_EXT_CTRL, + val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); + } + + if (tg3_asic_rev(tp) == ASIC_REV_5906) { + /* adjust output voltage */ + tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); + } + + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0) + tg3_phydsp_write(tp, 0xffb, 0x4000); + + tg3_phy_toggle_automdix(tp, true); + tg3_phy_set_wirespeed(tp); + return 0; +} + +#define TG3_GPIO_MSG_DRVR_PRES 0x00000001 +#define TG3_GPIO_MSG_NEED_VAUX 0x00000002 +#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \ + TG3_GPIO_MSG_NEED_VAUX) +#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \ + ((TG3_GPIO_MSG_DRVR_PRES << 0) | \ + (TG3_GPIO_MSG_DRVR_PRES << 4) | \ + (TG3_GPIO_MSG_DRVR_PRES << 8) | \ + (TG3_GPIO_MSG_DRVR_PRES << 12)) + +#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \ + ((TG3_GPIO_MSG_NEED_VAUX << 0) | \ + (TG3_GPIO_MSG_NEED_VAUX << 4) | \ + (TG3_GPIO_MSG_NEED_VAUX << 8) | \ + (TG3_GPIO_MSG_NEED_VAUX << 12)) + +static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) +{ + u32 status, shift; + + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719) + status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); + else + status = tr32(TG3_CPMU_DRV_STATUS); + + shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; + status &= ~(TG3_GPIO_MSG_MASK << shift); + status |= (newstat << shift); + + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719) + tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); + else + tw32(TG3_CPMU_DRV_STATUS, status); + + return status >> TG3_APE_GPIO_MSG_SHIFT; +} + +static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) +{ + if (!tg3_flag(tp, IS_NIC)) + return 0; + + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) { + if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) + return -EIO; + + tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES); + + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); + } else { + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } + + return 0; +} + +static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) +{ + u32 grc_local_ctrl; + + if (!tg3_flag(tp, IS_NIC) || + tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701) + return; + + grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; + + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, + TG3_GRC_LCLCTL_PWRSW_DELAY); +} + +static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) +{ + if (!tg3_flag(tp, IS_NIC)) + return; + + if (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701) { + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | + (GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT0 | + GRC_LCLCTRL_GPIO_OUTPUT1), + TG3_GRC_LCLCTL_PWRSW_DELAY); + } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { + /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ + u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT0 | + GRC_LCLCTRL_GPIO_OUTPUT1 | + tp->grc_local_ctrl; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } else { + u32 no_gpio2; + u32 grc_local_ctrl = 0; + + /* Workaround to prevent overdrawing Amps. */ + if (tg3_asic_rev(tp) == ASIC_REV_5714) { + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | + grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } + + /* On 5753 and variants, GPIO2 cannot be used. */ + no_gpio2 = tp->nic_sram_data_cfg & + NIC_SRAM_DATA_CFG_NO_GPIO2; + + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT1 | + GRC_LCLCTRL_GPIO_OUTPUT2; + if (no_gpio2) { + grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT2); + } + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; + + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + if (!no_gpio2) { + grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } + } +} + +static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable) +{ + u32 msg = 0; + + /* Serialize power state transitions */ + if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) + return; + + if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable) + msg = TG3_GPIO_MSG_NEED_VAUX; + + msg = tg3_set_function_status(tp, msg); + + if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK) + goto done; + + if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK) + tg3_pwrsrc_switch_to_vaux(tp); + else + tg3_pwrsrc_die_with_vmain(tp); + +done: + tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); +} + +static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) +{ + bool need_vaux = false; + + /* The GPIOs do something completely different on 57765. */ + if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS)) + return; + + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) { + tg3_frob_aux_power_5717(tp, include_wol ? + tg3_flag(tp, WOL_ENABLE) != 0 : 0); + return; + } + + if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { + struct net_device *dev_peer; + + dev_peer = pci_get_drvdata(tp->pdev_peer); + + /* remove_one() may have been run on the peer. */ + if (dev_peer) { + struct tg3 *tp_peer = netdev_priv(dev_peer); + + if (tg3_flag(tp_peer, INIT_COMPLETE)) + return; + + if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) || + tg3_flag(tp_peer, ENABLE_ASF)) + need_vaux = true; + } + } + + if ((include_wol && tg3_flag(tp, WOL_ENABLE)) || + tg3_flag(tp, ENABLE_ASF)) + need_vaux = true; + + if (need_vaux) + tg3_pwrsrc_switch_to_vaux(tp); + else + tg3_pwrsrc_die_with_vmain(tp); +} + +static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) +{ + if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) + return 1; + else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { + if (speed != SPEED_10) + return 1; + } else if (speed == SPEED_10) + return 1; + + return 0; +} + +static bool tg3_phy_power_bug(struct tg3 *tp) +{ + switch (tg3_asic_rev(tp)) { + case ASIC_REV_5700: + case ASIC_REV_5704: + return true; + case ASIC_REV_5780: + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + return true; + return false; + case ASIC_REV_5717: + if (!tp->pci_fn) + return true; + return false; + case ASIC_REV_5719: + case ASIC_REV_5720: + if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + !tp->pci_fn) + return true; + return false; + } + + return false; +} + +static bool tg3_phy_led_bug(struct tg3 *tp) +{ + switch (tg3_asic_rev(tp)) { + case ASIC_REV_5719: + case ASIC_REV_5720: + if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + !tp->pci_fn) + return true; + return false; + } + + return false; +} + +static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) +{ + u32 val; + + if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) + return; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + if (tg3_asic_rev(tp) == ASIC_REV_5704) { + u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); + u32 serdes_cfg = tr32(MAC_SERDES_CFG); + + sg_dig_ctrl |= + SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; + tw32(SG_DIG_CTRL, sg_dig_ctrl); + tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); + } + return; + } + + if (tg3_asic_rev(tp) == ASIC_REV_5906) { + tg3_bmcr_reset(tp); + val = tr32(GRC_MISC_CFG); + tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); + udelay(40); + return; + } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + u32 phytest; + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { + u32 phy; + + tg3_writephy(tp, MII_ADVERTISE, 0); + tg3_writephy(tp, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART); + + tg3_writephy(tp, MII_TG3_FET_TEST, + phytest | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { + phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; + tg3_writephy(tp, + MII_TG3_FET_SHDW_AUXMODE4, + phy); + } + tg3_writephy(tp, MII_TG3_FET_TEST, phytest); + } + return; + } else if (do_low_power) { + if (!tg3_phy_led_bug(tp)) + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_FORCE_LED_OFF); + + val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | + MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | + MII_TG3_AUXCTL_PCTL_VREG_11V; + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); + } + + /* The PHY should not be powered down on some chips because + * of bugs. + */ + if (tg3_phy_power_bug(tp)) + return; + + if (tg3_chip_rev(tp) == CHIPREV_5784_AX || + tg3_chip_rev(tp) == CHIPREV_5761_AX) { + val = tr32(TG3_CPMU_LSPD_1000MB_CLK); + val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; + val |= CPMU_LSPD_1000MB_MACCLK_12_5; + tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); + } + + tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); +} + +/* tp->lock is held. */ +static int tg3_nvram_lock(struct tg3 *tp) +{ + if (tg3_flag(tp, NVRAM)) { + int i; + + if (tp->nvram_lock_cnt == 0) { + tw32(NVRAM_SWARB, SWARB_REQ_SET1); + for (i = 0; i < 8000; i++) { + if (tr32(NVRAM_SWARB) & SWARB_GNT1) + break; + udelay(20); + } + if (i == 8000) { + tw32(NVRAM_SWARB, SWARB_REQ_CLR1); + return -ENODEV; + } + } + tp->nvram_lock_cnt++; + } + return 0; +} + +/* tp->lock is held. */ +static void tg3_nvram_unlock(struct tg3 *tp) +{ + if (tg3_flag(tp, NVRAM)) { + if (tp->nvram_lock_cnt > 0) + tp->nvram_lock_cnt--; + if (tp->nvram_lock_cnt == 0) + tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); + } +} + +/* tp->lock is held. */ +static void tg3_enable_nvram_access(struct tg3 *tp) +{ + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { + u32 nvaccess = tr32(NVRAM_ACCESS); + + tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); + } +} + +/* tp->lock is held. */ +static void tg3_disable_nvram_access(struct tg3 *tp) +{ + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { + u32 nvaccess = tr32(NVRAM_ACCESS); + + tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); + } +} + +static int tg3_nvram_read_using_eeprom(struct tg3 *tp, + u32 offset, u32 *val) +{ + u32 tmp; + int i; + + if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) + return -EINVAL; + + tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | + EEPROM_ADDR_DEVID_MASK | + EEPROM_ADDR_READ); + tw32(GRC_EEPROM_ADDR, + tmp | + (0 << EEPROM_ADDR_DEVID_SHIFT) | + ((offset << EEPROM_ADDR_ADDR_SHIFT) & + EEPROM_ADDR_ADDR_MASK) | + EEPROM_ADDR_READ | EEPROM_ADDR_START); + + for (i = 0; i < 1000; i++) { + tmp = tr32(GRC_EEPROM_ADDR); + + if (tmp & EEPROM_ADDR_COMPLETE) + break; + msleep(1); + } + if (!(tmp & EEPROM_ADDR_COMPLETE)) + return -EBUSY; + + tmp = tr32(GRC_EEPROM_DATA); + + /* + * The data will always be opposite the native endian + * format. Perform a blind byteswap to compensate. + */ + *val = swab32(tmp); + + return 0; +} + +#define NVRAM_CMD_TIMEOUT 5000 + +static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) +{ + int i; + + tw32(NVRAM_CMD, nvram_cmd); + for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { + usleep_range(10, 40); + if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { + udelay(10); + break; + } + } + + if (i == NVRAM_CMD_TIMEOUT) + return -EBUSY; + + return 0; +} + +static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) +{ + if (tg3_flag(tp, NVRAM) && + tg3_flag(tp, NVRAM_BUFFERED) && + tg3_flag(tp, FLASH) && + !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && + (tp->nvram_jedecnum == JEDEC_ATMEL)) + + addr = ((addr / tp->nvram_pagesize) << + ATMEL_AT45DB0X1B_PAGE_POS) + + (addr % tp->nvram_pagesize); + + return addr; +} + +static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) +{ + if (tg3_flag(tp, NVRAM) && + tg3_flag(tp, NVRAM_BUFFERED) && + tg3_flag(tp, FLASH) && + !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && + (tp->nvram_jedecnum == JEDEC_ATMEL)) + + addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * + tp->nvram_pagesize) + + (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); + + return addr; +} + +/* NOTE: Data read in from NVRAM is byteswapped according to + * the byteswapping settings for all other register accesses. + * tg3 devices are BE devices, so on a BE machine, the data + * returned will be exactly as it is seen in NVRAM. On a LE + * machine, the 32-bit value will be byteswapped. + */ +static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) +{ + int ret; + + if (!tg3_flag(tp, NVRAM)) + return tg3_nvram_read_using_eeprom(tp, offset, val); + + offset = tg3_nvram_phys_addr(tp, offset); + + if (offset > NVRAM_ADDR_MSK) + return -EINVAL; + + ret = tg3_nvram_lock(tp); + if (ret) + return ret; + + tg3_enable_nvram_access(tp); + + tw32(NVRAM_ADDR, offset); + ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | + NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); + + if (ret == 0) + *val = tr32(NVRAM_RDDATA); + + tg3_disable_nvram_access(tp); + + tg3_nvram_unlock(tp); + + return ret; +} + +/* Ensures NVRAM data is in bytestream format. */ +static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) +{ + u32 v; + int res = tg3_nvram_read(tp, offset, &v); + if (!res) + *val = cpu_to_be32(v); + return res; +} + +static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, + u32 offset, u32 len, u8 *buf) +{ + int i, j, rc = 0; + u32 val; + + for (i = 0; i < len; i += 4) { + u32 addr; + __be32 data; + + addr = offset + i; + + memcpy(&data, buf + i, 4); + + /* + * The SEEPROM interface expects the data to always be opposite + * the native endian format. We accomplish this by reversing + * all the operations that would have been performed on the + * data from a call to tg3_nvram_read_be32(). + */ + tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); + + val = tr32(GRC_EEPROM_ADDR); + tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); + + val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | + EEPROM_ADDR_READ); + tw32(GRC_EEPROM_ADDR, val | + (0 << EEPROM_ADDR_DEVID_SHIFT) | + (addr & EEPROM_ADDR_ADDR_MASK) | + EEPROM_ADDR_START | + EEPROM_ADDR_WRITE); + + for (j = 0; j < 1000; j++) { + val = tr32(GRC_EEPROM_ADDR); + + if (val & EEPROM_ADDR_COMPLETE) + break; + msleep(1); + } + if (!(val & EEPROM_ADDR_COMPLETE)) { + rc = -EBUSY; + break; + } + } + + return rc; +} + +/* offset and length are dword aligned */ +static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, + u8 *buf) +{ + int ret = 0; + u32 pagesize = tp->nvram_pagesize; + u32 pagemask = pagesize - 1; + u32 nvram_cmd; + u8 *tmp; + + tmp = kmalloc(pagesize, GFP_KERNEL); + if (tmp == NULL) + return -ENOMEM; + + while (len) { + int j; + u32 phy_addr, page_off, size; + + phy_addr = offset & ~pagemask; + + for (j = 0; j < pagesize; j += 4) { + ret = tg3_nvram_read_be32(tp, phy_addr + j, + (__be32 *) (tmp + j)); + if (ret) + break; + } + if (ret) + break; + + page_off = offset & pagemask; + size = pagesize; + if (len < size) + size = len; + + len -= size; + + memcpy(tmp + page_off, buf, size); + + offset = offset + (pagesize - page_off); + + tg3_enable_nvram_access(tp); + + /* + * Before we can erase the flash page, we need + * to issue a special "write enable" command. + */ + nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; + + if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + break; + + /* Erase the target page */ + tw32(NVRAM_ADDR, phy_addr); + + nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | + NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; + + if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + break; + + /* Issue another write enable to start the write. */ + nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; + + if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + break; + + for (j = 0; j < pagesize; j += 4) { + __be32 data; + + data = *((__be32 *) (tmp + j)); + + tw32(NVRAM_WRDATA, be32_to_cpu(data)); + + tw32(NVRAM_ADDR, phy_addr + j); + + nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | + NVRAM_CMD_WR; + + if (j == 0) + nvram_cmd |= NVRAM_CMD_FIRST; + else if (j == (pagesize - 4)) + nvram_cmd |= NVRAM_CMD_LAST; + + ret = tg3_nvram_exec_cmd(tp, nvram_cmd); + if (ret) + break; + } + if (ret) + break; + } + + nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; + tg3_nvram_exec_cmd(tp, nvram_cmd); + + kfree(tmp); + + return ret; +} + +/* offset and length are dword aligned */ +static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, + u8 *buf) +{ + int i, ret = 0; + + for (i = 0; i < len; i += 4, offset += 4) { + u32 page_off, phy_addr, nvram_cmd; + __be32 data; + + memcpy(&data, buf + i, 4); + tw32(NVRAM_WRDATA, be32_to_cpu(data)); + + page_off = offset % tp->nvram_pagesize; + + phy_addr = tg3_nvram_phys_addr(tp, offset); + + nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; + + if (page_off == 0 || i == 0) + nvram_cmd |= NVRAM_CMD_FIRST; + if (page_off == (tp->nvram_pagesize - 4)) + nvram_cmd |= NVRAM_CMD_LAST; + + if (i == (len - 4)) + nvram_cmd |= NVRAM_CMD_LAST; + + if ((nvram_cmd & NVRAM_CMD_FIRST) || + !tg3_flag(tp, FLASH) || + !tg3_flag(tp, 57765_PLUS)) + tw32(NVRAM_ADDR, phy_addr); + + if (tg3_asic_rev(tp) != ASIC_REV_5752 && + !tg3_flag(tp, 5755_PLUS) && + (tp->nvram_jedecnum == JEDEC_ST) && + (nvram_cmd & NVRAM_CMD_FIRST)) { + u32 cmd; + + cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; + ret = tg3_nvram_exec_cmd(tp, cmd); + if (ret) + break; + } + if (!tg3_flag(tp, FLASH)) { + /* We always do complete word writes to eeprom. */ + nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); + } + + ret = tg3_nvram_exec_cmd(tp, nvram_cmd); + if (ret) + break; + } + return ret; +} + +/* offset and length are dword aligned */ +static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) +{ + int ret; + + if (tg3_flag(tp, EEPROM_WRITE_PROT)) { + tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & + ~GRC_LCLCTRL_GPIO_OUTPUT1); + udelay(40); + } + + if (!tg3_flag(tp, NVRAM)) { + ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); + } else { + u32 grc_mode; + + ret = tg3_nvram_lock(tp); + if (ret) + return ret; + + tg3_enable_nvram_access(tp); + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) + tw32(NVRAM_WRITE1, 0x406); + + grc_mode = tr32(GRC_MODE); + tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); + + if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { + ret = tg3_nvram_write_block_buffered(tp, offset, len, + buf); + } else { + ret = tg3_nvram_write_block_unbuffered(tp, offset, len, + buf); + } + + grc_mode = tr32(GRC_MODE); + tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); + + tg3_disable_nvram_access(tp); + tg3_nvram_unlock(tp); + } + + if (tg3_flag(tp, EEPROM_WRITE_PROT)) { + tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); + udelay(40); + } + + return ret; +} + +#define RX_CPU_SCRATCH_BASE 0x30000 +#define RX_CPU_SCRATCH_SIZE 0x04000 +#define TX_CPU_SCRATCH_BASE 0x34000 +#define TX_CPU_SCRATCH_SIZE 0x04000 + +/* tp->lock is held. */ +static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base) +{ + int i; + const int iters = 10000; + + for (i = 0; i < iters; i++) { + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); + if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) + break; + if (pci_channel_offline(tp->pdev)) + return -EBUSY; + } + + return (i == iters) ? -EBUSY : 0; +} + +/* tp->lock is held. */ +static int tg3_rxcpu_pause(struct tg3 *tp) +{ + int rc = tg3_pause_cpu(tp, RX_CPU_BASE); + + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); + udelay(10); + + return rc; +} + +/* tp->lock is held. */ +static int tg3_txcpu_pause(struct tg3 *tp) +{ + return tg3_pause_cpu(tp, TX_CPU_BASE); +} + +/* tp->lock is held. */ +static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base) +{ + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32_f(cpu_base + CPU_MODE, 0x00000000); +} + +/* tp->lock is held. */ +static void tg3_rxcpu_resume(struct tg3 *tp) +{ + tg3_resume_cpu(tp, RX_CPU_BASE); +} + +/* tp->lock is held. */ +static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base) +{ + int rc; + + BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); + + if (tg3_asic_rev(tp) == ASIC_REV_5906) { + u32 val = tr32(GRC_VCPU_EXT_CTRL); + + tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); + return 0; + } + if (cpu_base == RX_CPU_BASE) { + rc = tg3_rxcpu_pause(tp); + } else { + /* + * There is only an Rx CPU for the 5750 derivative in the + * BCM4785. + */ + if (tg3_flag(tp, IS_SSB_CORE)) + return 0; + + rc = tg3_txcpu_pause(tp); + } + + if (rc) { + netdev_err(tp->dev, "%s timed out, %s CPU\n", + __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX"); + return -ENODEV; + } + + /* Clear firmware's nvram arbitration. */ + if (tg3_flag(tp, NVRAM)) + tw32(NVRAM_SWARB, SWARB_REQ_CLR0); + return 0; +} + +static int tg3_fw_data_len(struct tg3 *tp, + const struct tg3_firmware_hdr *fw_hdr) +{ + int fw_len; + + /* Non fragmented firmware have one firmware header followed by a + * contiguous chunk of data to be written. The length field in that + * header is not the length of data to be written but the complete + * length of the bss. The data length is determined based on + * tp->fw->size minus headers. + * + * Fragmented firmware have a main header followed by multiple + * fragments. Each fragment is identical to non fragmented firmware + * with a firmware header followed by a contiguous chunk of data. In + * the main header, the length field is unused and set to 0xffffffff. + * In each fragment header the length is the entire size of that + * fragment i.e. fragment data + header length. Data length is + * therefore length field in the header minus TG3_FW_HDR_LEN. + */ + if (tp->fw_len == 0xffffffff) + fw_len = be32_to_cpu(fw_hdr->len); + else + fw_len = tp->fw->size; + + return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32); +} + +/* tp->lock is held. */ +static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, + u32 cpu_scratch_base, int cpu_scratch_size, + const struct tg3_firmware_hdr *fw_hdr) +{ + int err, i; + void (*write_op)(struct tg3 *, u32, u32); + int total_len = tp->fw->size; + + if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { + netdev_err(tp->dev, + "%s: Trying to load TX cpu firmware which is 5705\n", + __func__); + return -EINVAL; + } + + if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766) + write_op = tg3_write_mem; + else + write_op = tg3_write_indirect_reg32; + + if (tg3_asic_rev(tp) != ASIC_REV_57766) { + /* It is possible that bootcode is still loading at this point. + * Get the nvram lock first before halting the cpu. + */ + int lock_err = tg3_nvram_lock(tp); + err = tg3_halt_cpu(tp, cpu_base); + if (!lock_err) + tg3_nvram_unlock(tp); + if (err) + goto out; + + for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) + write_op(tp, cpu_scratch_base + i, 0); + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, + tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT); + } else { + /* Subtract additional main header for fragmented firmware and + * advance to the first fragment + */ + total_len -= TG3_FW_HDR_LEN; + fw_hdr++; + } + + do { + u32 *fw_data = (u32 *)(fw_hdr + 1); + for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++) + write_op(tp, cpu_scratch_base + + (be32_to_cpu(fw_hdr->base_addr) & 0xffff) + + (i * sizeof(u32)), + be32_to_cpu(fw_data[i])); + + total_len -= be32_to_cpu(fw_hdr->len); + + /* Advance to next fragment */ + fw_hdr = (struct tg3_firmware_hdr *) + ((void *)fw_hdr + be32_to_cpu(fw_hdr->len)); + } while (total_len > 0); + + err = 0; + +out: + return err; +} + +/* tp->lock is held. */ +static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc) +{ + int i; + const int iters = 5; + + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32_f(cpu_base + CPU_PC, pc); + + for (i = 0; i < iters; i++) { + if (tr32(cpu_base + CPU_PC) == pc) + break; + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); + tw32_f(cpu_base + CPU_PC, pc); + udelay(1000); + } + + return (i == iters) ? -EBUSY : 0; +} + +/* tp->lock is held. */ +static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) +{ + const struct tg3_firmware_hdr *fw_hdr; + int err; + + fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + start address and length. We are setting complete length. + length = end_address_of_bss - start_address_of_text. + Remainder is the blob to be loaded contiguously + from start address. */ + + err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, + RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, + fw_hdr); + if (err) + return err; + + err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, + TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, + fw_hdr); + if (err) + return err; + + /* Now startup only the RX cpu. */ + err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE, + be32_to_cpu(fw_hdr->base_addr)); + if (err) { + netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " + "should be %08x\n", __func__, + tr32(RX_CPU_BASE + CPU_PC), + be32_to_cpu(fw_hdr->base_addr)); + return -ENODEV; + } + + tg3_rxcpu_resume(tp); + + return 0; +} + +static int tg3_validate_rxcpu_state(struct tg3 *tp) +{ + const int iters = 1000; + int i; + u32 val; + + /* Wait for boot code to complete initialization and enter service + * loop. It is then safe to download service patches + */ + for (i = 0; i < iters; i++) { + if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP) + break; + + udelay(10); + } + + if (i == iters) { + netdev_err(tp->dev, "Boot code not ready for service patches\n"); + return -EBUSY; + } + + val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE); + if (val & 0xff) { + netdev_warn(tp->dev, + "Other patches exist. Not downloading EEE patch\n"); + return -EEXIST; + } + + return 0; +} + +/* tp->lock is held. */ +static void tg3_load_57766_firmware(struct tg3 *tp) +{ + struct tg3_firmware_hdr *fw_hdr; + + if (!tg3_flag(tp, NO_NVRAM)) + return; + + if (tg3_validate_rxcpu_state(tp)) + return; + + if (!tp->fw) + return; + + /* This firmware blob has a different format than older firmware + * releases as given below. The main difference is we have fragmented + * data to be written to non-contiguous locations. + * + * In the beginning we have a firmware header identical to other + * firmware which consists of version, base addr and length. The length + * here is unused and set to 0xffffffff. + * + * This is followed by a series of firmware fragments which are + * individually identical to previous firmware. i.e. they have the + * firmware header and followed by data for that fragment. The version + * field of the individual fragment header is unused. + */ + + fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; + if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR) + return; + + if (tg3_rxcpu_pause(tp)) + return; + + /* tg3_load_firmware_cpu() will always succeed for the 57766 */ + tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr); + + tg3_rxcpu_resume(tp); +} + +/* tp->lock is held. */ +static int tg3_load_tso_firmware(struct tg3 *tp) +{ + const struct tg3_firmware_hdr *fw_hdr; + unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; + int err; + + if (!tg3_flag(tp, FW_TSO)) + return 0; + + fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + start address and length. We are setting complete length. + length = end_address_of_bss - start_address_of_text. + Remainder is the blob to be loaded contiguously + from start address. */ + + cpu_scratch_size = tp->fw_len; + + if (tg3_asic_rev(tp) == ASIC_REV_5705) { + cpu_base = RX_CPU_BASE; + cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; + } else { + cpu_base = TX_CPU_BASE; + cpu_scratch_base = TX_CPU_SCRATCH_BASE; + cpu_scratch_size = TX_CPU_SCRATCH_SIZE; + } + + err = tg3_load_firmware_cpu(tp, cpu_base, + cpu_scratch_base, cpu_scratch_size, + fw_hdr); + if (err) + return err; + + /* Now startup the cpu. */ + err = tg3_pause_cpu_and_set_pc(tp, cpu_base, + be32_to_cpu(fw_hdr->base_addr)); + if (err) { + netdev_err(tp->dev, + "%s fails to set CPU PC, is %08x should be %08x\n", + __func__, tr32(cpu_base + CPU_PC), + be32_to_cpu(fw_hdr->base_addr)); + return -ENODEV; + } + + tg3_resume_cpu(tp, cpu_base); + return 0; +} + +/* tp->lock is held. */ +static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index) +{ + u32 addr_high, addr_low; + + addr_high = ((mac_addr[0] << 8) | mac_addr[1]); + addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]); + + if (index < 4) { + tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high); + tw32(MAC_ADDR_0_LOW + (index * 8), addr_low); + } else { + index -= 4; + tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high); + tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low); + } +} + +/* tp->lock is held. */ +static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1) +{ + u32 addr_high; + int i; + + for (i = 0; i < 4; i++) { + if (i == 1 && skip_mac_1) + continue; + __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); + } + + if (tg3_asic_rev(tp) == ASIC_REV_5703 || + tg3_asic_rev(tp) == ASIC_REV_5704) { + for (i = 4; i < 16; i++) + __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i); + } + + addr_high = (tp->dev->dev_addr[0] + + tp->dev->dev_addr[1] + + tp->dev->dev_addr[2] + + tp->dev->dev_addr[3] + + tp->dev->dev_addr[4] + + tp->dev->dev_addr[5]) & + TX_BACKOFF_SEED_MASK; + tw32(MAC_TX_BACKOFF_SEED, addr_high); +} + +static void tg3_enable_register_access(struct tg3 *tp) +{ + /* + * Make sure register accesses (indirect or otherwise) will function + * correctly. + */ + pci_write_config_dword(tp->pdev, + TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); +} + +static int tg3_power_up(struct tg3 *tp) +{ + int err; + + tg3_enable_register_access(tp); + + err = pci_set_power_state(tp->pdev, PCI_D0); + if (!err) { + /* Switch out of Vaux if it is a NIC */ + tg3_pwrsrc_switch_to_vmain(tp); + } else { + netdev_err(tp->dev, "Transition to D0 failed\n"); + } + + return err; +} + +static int tg3_setup_phy(struct tg3 *, bool); + +static int tg3_power_down_prepare(struct tg3 *tp) +{ + u32 misc_host_ctrl; + bool device_should_wake, do_low_power; + + tg3_enable_register_access(tp); + + /* Restore the CLKREQ setting. */ + if (tg3_flag(tp, CLKREQ_BUG)) + pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); + + misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); + tw32(TG3PCI_MISC_HOST_CTRL, + misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); + + device_should_wake = device_may_wakeup(&tp->pdev->dev) && + tg3_flag(tp, WOL_ENABLE); + + if (tg3_flag(tp, USE_PHYLIB)) { + do_low_power = false; + if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && + !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + struct phy_device *phydev; + u32 phyid, advertising; + + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + + tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; + + tp->link_config.speed = phydev->speed; + tp->link_config.duplex = phydev->duplex; + tp->link_config.autoneg = phydev->autoneg; + tp->link_config.advertising = phydev->advertising; + + advertising = ADVERTISED_TP | + ADVERTISED_Pause | + ADVERTISED_Autoneg | + ADVERTISED_10baseT_Half; + + if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { + if (tg3_flag(tp, WOL_SPEED_100MB)) + advertising |= + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Full; + else + advertising |= ADVERTISED_10baseT_Full; + } + + phydev->advertising = advertising; + + phy_start_aneg(phydev); + + phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; + if (phyid != PHY_ID_BCMAC131) { + phyid &= PHY_BCM_OUI_MASK; + if (phyid == PHY_BCM_OUI_1 || + phyid == PHY_BCM_OUI_2 || + phyid == PHY_BCM_OUI_3) + do_low_power = true; + } + } + } else { + do_low_power = true; + + if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) + tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + tg3_setup_phy(tp, false); + } + + if (tg3_asic_rev(tp) == ASIC_REV_5906) { + u32 val; + + val = tr32(GRC_VCPU_EXT_CTRL); + tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); + } else if (!tg3_flag(tp, ENABLE_ASF)) { + int i; + u32 val; + + for (i = 0; i < 200; i++) { + tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); + if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) + break; + msleep(1); + } + } + if (tg3_flag(tp, WOL_CAP)) + tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | + WOL_DRV_STATE_SHUTDOWN | + WOL_DRV_WOL | + WOL_SET_MAGIC_PKT); + + if (device_should_wake) { + u32 mac_mode; + + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { + if (do_low_power && + !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_PWRCTL, + MII_TG3_AUXCTL_PCTL_WOL_EN | + MII_TG3_AUXCTL_PCTL_100TX_LPWR | + MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); + udelay(40); + } + + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + mac_mode = MAC_MODE_PORT_MODE_GMII; + else if (tp->phy_flags & + TG3_PHYFLG_KEEP_LINK_ON_PWRDN) { + if (tp->link_config.active_speed == SPEED_1000) + mac_mode = MAC_MODE_PORT_MODE_GMII; + else + mac_mode = MAC_MODE_PORT_MODE_MII; + } else + mac_mode = MAC_MODE_PORT_MODE_MII; + + mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; + if (tg3_asic_rev(tp) == ASIC_REV_5700) { + u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? + SPEED_100 : SPEED_10; + if (tg3_5700_link_polarity(tp, speed)) + mac_mode |= MAC_MODE_LINK_POLARITY; + else + mac_mode &= ~MAC_MODE_LINK_POLARITY; + } + } else { + mac_mode = MAC_MODE_PORT_MODE_TBI; + } + + if (!tg3_flag(tp, 5750_PLUS)) + tw32(MAC_LED_CTRL, tp->led_ctrl); + + mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; + if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && + (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) + mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; + + if (tg3_flag(tp, ENABLE_APE)) + mac_mode |= MAC_MODE_APE_TX_EN | + MAC_MODE_APE_RX_EN | + MAC_MODE_TDE_ENABLE; + + tw32_f(MAC_MODE, mac_mode); + udelay(100); + + tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); + udelay(10); + } + + if (!tg3_flag(tp, WOL_SPEED_100MB) && + (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701)) { + u32 base_val; + + base_val = tp->pci_clock_ctrl; + base_val |= (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE); + + tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | + CLOCK_CTRL_PWRDOWN_PLL133, 40); + } else if (tg3_flag(tp, 5780_CLASS) || + tg3_flag(tp, CPMU_PRESENT) || + tg3_asic_rev(tp) == ASIC_REV_5906) { + /* do nothing */ + } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { + u32 newbits1, newbits2; + + if (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701) { + newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE | + CLOCK_CTRL_ALTCLK); + newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; + } else if (tg3_flag(tp, 5705_PLUS)) { + newbits1 = CLOCK_CTRL_625_CORE; + newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; + } else { + newbits1 = CLOCK_CTRL_ALTCLK; + newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; + } + + tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, + 40); + + tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, + 40); + + if (!tg3_flag(tp, 5705_PLUS)) { + u32 newbits3; + + if (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701) { + newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE | + CLOCK_CTRL_44MHZ_CORE); + } else { + newbits3 = CLOCK_CTRL_44MHZ_CORE; + } + + tw32_wait_f(TG3PCI_CLOCK_CTRL, + tp->pci_clock_ctrl | newbits3, 40); + } + } + + if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) + tg3_power_down_phy(tp, do_low_power); + + tg3_frob_aux_power(tp, true); + + /* Workaround for unstable PLL clock */ + if ((!tg3_flag(tp, IS_SSB_CORE)) && + ((tg3_chip_rev(tp) == CHIPREV_5750_AX) || + (tg3_chip_rev(tp) == CHIPREV_5750_BX))) { + u32 val = tr32(0x7d00); + + val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); + tw32(0x7d00, val); + if (!tg3_flag(tp, ENABLE_ASF)) { + int err; + + err = tg3_nvram_lock(tp); + tg3_halt_cpu(tp, RX_CPU_BASE); + if (!err) + tg3_nvram_unlock(tp); + } + } + + tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); + + tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN); + + return 0; +} + +static void tg3_power_down(struct tg3 *tp) +{ + pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); + pci_set_power_state(tp->pdev, PCI_D3hot); +} + +static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) +{ + switch (val & MII_TG3_AUX_STAT_SPDMASK) { + case MII_TG3_AUX_STAT_10HALF: + *speed = SPEED_10; + *duplex = DUPLEX_HALF; + break; + + case MII_TG3_AUX_STAT_10FULL: + *speed = SPEED_10; + *duplex = DUPLEX_FULL; + break; + + case MII_TG3_AUX_STAT_100HALF: + *speed = SPEED_100; + *duplex = DUPLEX_HALF; + break; + + case MII_TG3_AUX_STAT_100FULL: + *speed = SPEED_100; + *duplex = DUPLEX_FULL; + break; + + case MII_TG3_AUX_STAT_1000HALF: + *speed = SPEED_1000; + *duplex = DUPLEX_HALF; + break; + + case MII_TG3_AUX_STAT_1000FULL: + *speed = SPEED_1000; + *duplex = DUPLEX_FULL; + break; + + default: + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : + SPEED_10; + *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : + DUPLEX_HALF; + break; + } + *speed = SPEED_UNKNOWN; + *duplex = DUPLEX_UNKNOWN; + break; + } +} + +static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) +{ + int err = 0; + u32 val, new_adv; + + new_adv = ADVERTISE_CSMA; + new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; + new_adv |= mii_advertise_flowctrl(flowctrl); + + err = tg3_writephy(tp, MII_ADVERTISE, new_adv); + if (err) + goto done; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); + + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) + new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; + + err = tg3_writephy(tp, MII_CTRL1000, new_adv); + if (err) + goto done; + } + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + goto done; + + tw32(TG3_CPMU_EEE_MODE, + tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); + + err = tg3_phy_toggle_auxctl_smdsp(tp, true); + if (!err) { + u32 err2; + + val = 0; + /* Advertise 100-BaseTX EEE ability */ + if (advertise & ADVERTISED_100baseT_Full) + val |= MDIO_AN_EEE_ADV_100TX; + /* Advertise 1000-BaseT EEE ability */ + if (advertise & ADVERTISED_1000baseT_Full) + val |= MDIO_AN_EEE_ADV_1000T; + + if (!tp->eee.eee_enabled) { + val = 0; + tp->eee.advertised = 0; + } else { + tp->eee.advertised = advertise & + (ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full); + } + + err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); + if (err) + val = 0; + + switch (tg3_asic_rev(tp)) { + case ASIC_REV_5717: + case ASIC_REV_57765: + case ASIC_REV_57766: + case ASIC_REV_5719: + /* If we advertised any eee advertisements above... */ + if (val) + val = MII_TG3_DSP_TAP26_ALNOKO | + MII_TG3_DSP_TAP26_RMRXSTO | + MII_TG3_DSP_TAP26_OPCSINPT; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); + /* Fall through */ + case ASIC_REV_5720: + case ASIC_REV_5762: + if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) + tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | + MII_TG3_DSP_CH34TP2_HIBW01); + } + + err2 = tg3_phy_toggle_auxctl_smdsp(tp, false); + if (!err) + err = err2; + } + +done: + return err; +} + +static void tg3_phy_copper_begin(struct tg3 *tp) +{ + if (tp->link_config.autoneg == AUTONEG_ENABLE || + (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + u32 adv, fc; + + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && + !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { + adv = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full; + if (tg3_flag(tp, WOL_SPEED_100MB)) + adv |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full; + if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) { + if (!(tp->phy_flags & + TG3_PHYFLG_DISABLE_1G_HD_ADV)) + adv |= ADVERTISED_1000baseT_Half; + adv |= ADVERTISED_1000baseT_Full; + } + + fc = FLOW_CTRL_TX | FLOW_CTRL_RX; + } else { + adv = tp->link_config.advertising; + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + adv &= ~(ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full); + + fc = tp->link_config.flowctrl; + } + + tg3_phy_autoneg_cfg(tp, adv, fc); + + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && + (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) { + /* Normally during power down we want to autonegotiate + * the lowest possible speed for WOL. However, to avoid + * link flap, we leave it untouched. + */ + return; + } + + tg3_writephy(tp, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART); + } else { + int i; + u32 bmcr, orig_bmcr; + + tp->link_config.active_speed = tp->link_config.speed; + tp->link_config.active_duplex = tp->link_config.duplex; + + if (tg3_asic_rev(tp) == ASIC_REV_5714) { + /* With autoneg disabled, 5715 only links up when the + * advertisement register has the configured speed + * enabled. + */ + tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL); + } + + bmcr = 0; + switch (tp->link_config.speed) { + default: + case SPEED_10: + break; + + case SPEED_100: + bmcr |= BMCR_SPEED100; + break; + + case SPEED_1000: + bmcr |= BMCR_SPEED1000; + break; + } + + if (tp->link_config.duplex == DUPLEX_FULL) + bmcr |= BMCR_FULLDPLX; + + if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && + (bmcr != orig_bmcr)) { + tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); + for (i = 0; i < 1500; i++) { + u32 tmp; + + udelay(10); + if (tg3_readphy(tp, MII_BMSR, &tmp) || + tg3_readphy(tp, MII_BMSR, &tmp)) + continue; + if (!(tmp & BMSR_LSTATUS)) { + udelay(40); + break; + } + } + tg3_writephy(tp, MII_BMCR, bmcr); + udelay(40); + } + } +} + +static int tg3_phy_pull_config(struct tg3 *tp) +{ + int err; + u32 val; + + err = tg3_readphy(tp, MII_BMCR, &val); + if (err) + goto done; + + if (!(val & BMCR_ANENABLE)) { + tp->link_config.autoneg = AUTONEG_DISABLE; + tp->link_config.advertising = 0; + tg3_flag_clear(tp, PAUSE_AUTONEG); + + err = -EIO; + + switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) { + case 0: + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + goto done; + + tp->link_config.speed = SPEED_10; + break; + case BMCR_SPEED100: + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + goto done; + + tp->link_config.speed = SPEED_100; + break; + case BMCR_SPEED1000: + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + tp->link_config.speed = SPEED_1000; + break; + } + /* Fall through */ + default: + goto done; + } + + if (val & BMCR_FULLDPLX) + tp->link_config.duplex = DUPLEX_FULL; + else + tp->link_config.duplex = DUPLEX_HALF; + + tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; + + err = 0; + goto done; + } + + tp->link_config.autoneg = AUTONEG_ENABLE; + tp->link_config.advertising = ADVERTISED_Autoneg; + tg3_flag_set(tp, PAUSE_AUTONEG); + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { + u32 adv; + + err = tg3_readphy(tp, MII_ADVERTISE, &val); + if (err) + goto done; + + adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL); + tp->link_config.advertising |= adv | ADVERTISED_TP; + + tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val); + } else { + tp->link_config.advertising |= ADVERTISED_FIBRE; + } + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + u32 adv; + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { + err = tg3_readphy(tp, MII_CTRL1000, &val); + if (err) + goto done; + + adv = mii_ctrl1000_to_ethtool_adv_t(val); + } else { + err = tg3_readphy(tp, MII_ADVERTISE, &val); + if (err) + goto done; + + adv = tg3_decode_flowctrl_1000X(val); + tp->link_config.flowctrl = adv; + + val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL); + adv = mii_adv_to_ethtool_adv_x(val); + } + + tp->link_config.advertising |= adv; + } + +done: + return err; +} + +static int tg3_init_5401phy_dsp(struct tg3 *tp) +{ + int err; + + /* Turn off tap power management. */ + /* Set Extended packet length bit */ + err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); + + err |= tg3_phydsp_write(tp, 0x0012, 0x1804); + err |= tg3_phydsp_write(tp, 0x0013, 0x1204); + err |= tg3_phydsp_write(tp, 0x8006, 0x0132); + err |= tg3_phydsp_write(tp, 0x8006, 0x0232); + err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); + + udelay(40); + + return err; +} + +static bool tg3_phy_eee_config_ok(struct tg3 *tp) +{ + struct ethtool_eee eee; + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + return true; + + tg3_eee_pull_config(tp, &eee); + + if (tp->eee.eee_enabled) { + if (tp->eee.advertised != eee.advertised || + tp->eee.tx_lpi_timer != eee.tx_lpi_timer || + tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled) + return false; + } else { + /* EEE is disabled but we're advertising */ + if (eee.advertised) + return false; + } + + return true; +} + +static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) +{ + u32 advmsk, tgtadv, advertising; + + advertising = tp->link_config.advertising; + tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL; + + advmsk = ADVERTISE_ALL; + if (tp->link_config.active_duplex == DUPLEX_FULL) { + tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); + advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + } + + if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) + return false; + + if ((*lcladv & advmsk) != tgtadv) + return false; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + u32 tg3_ctrl; + + tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising); + + if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) + return false; + + if (tgtadv && + (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) { + tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; + tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL | + CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); + } else { + tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); + } + + if (tg3_ctrl != tgtadv) + return false; + } + + return true; +} + +static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv) +{ + u32 lpeth = 0; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + u32 val; + + if (tg3_readphy(tp, MII_STAT1000, &val)) + return false; + + lpeth = mii_stat1000_to_ethtool_lpa_t(val); + } + + if (tg3_readphy(tp, MII_LPA, rmtadv)) + return false; + + lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv); + tp->link_config.rmt_adv = lpeth; + + return true; +} + +static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up) +{ + if (curr_link_up != tp->link_up) { + if (curr_link_up) { + netif_carrier_on(tp->dev); + } else { + netif_carrier_off(tp->dev); + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } + + tg3_link_report(tp); + return true; + } + + return false; +} + +static void tg3_clear_mac_status(struct tg3 *tp) +{ + tw32(MAC_EVENT, 0); + + tw32_f(MAC_STATUS, + MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_MI_COMPLETION | + MAC_STATUS_LNKSTATE_CHANGED); + udelay(40); +} + +static void tg3_setup_eee(struct tg3 *tp) +{ + u32 val; + + val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | + TG3_CPMU_EEE_LNKIDL_UART_IDL; + if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) + val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; + + tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); + + tw32_f(TG3_CPMU_EEE_CTRL, + TG3_CPMU_EEE_CTRL_EXIT_20_1_US); + + val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | + (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) | + TG3_CPMU_EEEMD_LPI_IN_RX | + TG3_CPMU_EEEMD_EEE_ENABLE; + + if (tg3_asic_rev(tp) != ASIC_REV_5717) + val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; + + if (tg3_flag(tp, ENABLE_APE)) + val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; + + tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0); + + tw32_f(TG3_CPMU_EEE_DBTMR1, + TG3_CPMU_DBTMR1_PCIEXIT_2047US | + (tp->eee.tx_lpi_timer & 0xffff)); + + tw32_f(TG3_CPMU_EEE_DBTMR2, + TG3_CPMU_DBTMR2_APE_TX_2047US | + TG3_CPMU_DBTMR2_TXIDXEQ_2047US); +} + +static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) +{ + bool current_link_up; + u32 bmsr, val; + u32 lcl_adv, rmt_adv; + u16 current_speed; + u8 current_duplex; + int i, err; + + tg3_clear_mac_status(tp); + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, + (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); + udelay(80); + } + + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); + + /* Some third-party PHYs need to be reset on link going + * down. + */ + if ((tg3_asic_rev(tp) == ASIC_REV_5703 || + tg3_asic_rev(tp) == ASIC_REV_5704 || + tg3_asic_rev(tp) == ASIC_REV_5705) && + tp->link_up) { + tg3_readphy(tp, MII_BMSR, &bmsr); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + !(bmsr & BMSR_LSTATUS)) + force_reset = true; + } + if (force_reset) + tg3_phy_reset(tp); + + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + tg3_readphy(tp, MII_BMSR, &bmsr); + if (tg3_readphy(tp, MII_BMSR, &bmsr) || + !tg3_flag(tp, INIT_COMPLETE)) + bmsr = 0; + + if (!(bmsr & BMSR_LSTATUS)) { + err = tg3_init_5401phy_dsp(tp); + if (err) + return err; + + tg3_readphy(tp, MII_BMSR, &bmsr); + for (i = 0; i < 1000; i++) { + udelay(10); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + (bmsr & BMSR_LSTATUS)) { + udelay(40); + break; + } + } + + if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == + TG3_PHY_REV_BCM5401_B0 && + !(bmsr & BMSR_LSTATUS) && + tp->link_config.active_speed == SPEED_1000) { + err = tg3_phy_reset(tp); + if (!err) + err = tg3_init_5401phy_dsp(tp); + if (err) + return err; + } + } + } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) { + /* 5701 {A0,B0} CRC bug workaround */ + tg3_writephy(tp, 0x15, 0x0a75); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); + } + + /* Clear pending interrupts... */ + tg3_readphy(tp, MII_TG3_ISTAT, &val); + tg3_readphy(tp, MII_TG3_ISTAT, &val); + + if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) + tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); + else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) + tg3_writephy(tp, MII_TG3_IMASK, ~0); + + if (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701) { + if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_LNK3_LED_MODE); + else + tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); + } + + current_link_up = false; + current_speed = SPEED_UNKNOWN; + current_duplex = DUPLEX_UNKNOWN; + tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; + tp->link_config.rmt_adv = 0; + + if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { + err = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_MISCTEST, + &val); + if (!err && !(val & (1 << 10))) { + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_MISCTEST, + val | (1 << 10)); + goto relink; + } + } + + bmsr = 0; + for (i = 0; i < 100; i++) { + tg3_readphy(tp, MII_BMSR, &bmsr); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + (bmsr & BMSR_LSTATUS)) + break; + udelay(40); + } + + if (bmsr & BMSR_LSTATUS) { + u32 aux_stat, bmcr; + + tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); + for (i = 0; i < 2000; i++) { + udelay(10); + if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && + aux_stat) + break; + } + + tg3_aux_stat_to_speed_duplex(tp, aux_stat, + ¤t_speed, + ¤t_duplex); + + bmcr = 0; + for (i = 0; i < 200; i++) { + tg3_readphy(tp, MII_BMCR, &bmcr); + if (tg3_readphy(tp, MII_BMCR, &bmcr)) + continue; + if (bmcr && bmcr != 0x7fff) + break; + udelay(10); + } + + lcl_adv = 0; + rmt_adv = 0; + + tp->link_config.active_speed = current_speed; + tp->link_config.active_duplex = current_duplex; + + if (tp->link_config.autoneg == AUTONEG_ENABLE) { + bool eee_config_ok = tg3_phy_eee_config_ok(tp); + + if ((bmcr & BMCR_ANENABLE) && + eee_config_ok && + tg3_phy_copper_an_config_ok(tp, &lcl_adv) && + tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) + current_link_up = true; + + /* EEE settings changes take effect only after a phy + * reset. If we have skipped a reset due to Link Flap + * Avoidance being enabled, do it now. + */ + if (!eee_config_ok && + (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && + !force_reset) { + tg3_setup_eee(tp); + tg3_phy_reset(tp); + } + } else { + if (!(bmcr & BMCR_ANENABLE) && + tp->link_config.speed == current_speed && + tp->link_config.duplex == current_duplex) { + current_link_up = true; + } + } + + if (current_link_up && + tp->link_config.active_duplex == DUPLEX_FULL) { + u32 reg, bit; + + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + reg = MII_TG3_FET_GEN_STAT; + bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; + } else { + reg = MII_TG3_EXT_STAT; + bit = MII_TG3_EXT_STAT_MDIX; + } + + if (!tg3_readphy(tp, reg, &val) && (val & bit)) + tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; + + tg3_setup_flow_control(tp, lcl_adv, rmt_adv); + } + } + +relink: + if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + tg3_phy_copper_begin(tp); + + if (tg3_flag(tp, ROBOSWITCH)) { + current_link_up = true; + /* FIXME: when BCM5325 switch is used use 100 MBit/s */ + current_speed = SPEED_1000; + current_duplex = DUPLEX_FULL; + tp->link_config.active_speed = current_speed; + tp->link_config.active_duplex = current_duplex; + } + + tg3_readphy(tp, MII_BMSR, &bmsr); + if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || + (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) + current_link_up = true; + } + + tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; + if (current_link_up) { + if (tp->link_config.active_speed == SPEED_100 || + tp->link_config.active_speed == SPEED_10) + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + else + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + else + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + + /* In order for the 5750 core in BCM4785 chip to work properly + * in RGMII mode, the Led Control Register must be set up. + */ + if (tg3_flag(tp, RGMII_MODE)) { + u32 led_ctrl = tr32(MAC_LED_CTRL); + led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON); + + if (tp->link_config.active_speed == SPEED_10) + led_ctrl |= LED_CTRL_LNKLED_OVERRIDE; + else if (tp->link_config.active_speed == SPEED_100) + led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_100MBPS_ON); + else if (tp->link_config.active_speed == SPEED_1000) + led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_1000MBPS_ON); + + tw32(MAC_LED_CTRL, led_ctrl); + udelay(40); + } + + tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; + if (tp->link_config.active_duplex == DUPLEX_HALF) + tp->mac_mode |= MAC_MODE_HALF_DUPLEX; + + if (tg3_asic_rev(tp) == ASIC_REV_5700) { + if (current_link_up && + tg3_5700_link_polarity(tp, tp->link_config.active_speed)) + tp->mac_mode |= MAC_MODE_LINK_POLARITY; + else + tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; + } + + /* ??? Without this setting Netgear GA302T PHY does not + * ??? send/receive packets... + */ + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && + tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) { + tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + } + + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tg3_phy_eee_adjust(tp, current_link_up); + + if (tg3_flag(tp, USE_LINKCHG_REG)) { + /* Polled via timer. */ + tw32_f(MAC_EVENT, 0); + } else { + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + } + udelay(40); + + if (tg3_asic_rev(tp) == ASIC_REV_5700 && + current_link_up && + tp->link_config.active_speed == SPEED_1000 && + (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { + udelay(120); + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + udelay(40); + tg3_write_mem(tp, + NIC_SRAM_FIRMWARE_MBOX, + NIC_SRAM_FIRMWARE_MBOX_MAGIC2); + } + + /* Prevent send BD corruption. */ + if (tg3_flag(tp, CLKREQ_BUG)) { + if (tp->link_config.active_speed == SPEED_100 || + tp->link_config.active_speed == SPEED_10) + pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); + else + pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); + } + + tg3_test_and_report_link_chg(tp, current_link_up); + + return 0; +} + +struct tg3_fiber_aneginfo { + int state; +#define ANEG_STATE_UNKNOWN 0 +#define ANEG_STATE_AN_ENABLE 1 +#define ANEG_STATE_RESTART_INIT 2 +#define ANEG_STATE_RESTART 3 +#define ANEG_STATE_DISABLE_LINK_OK 4 +#define ANEG_STATE_ABILITY_DETECT_INIT 5 +#define ANEG_STATE_ABILITY_DETECT 6 +#define ANEG_STATE_ACK_DETECT_INIT 7 +#define ANEG_STATE_ACK_DETECT 8 +#define ANEG_STATE_COMPLETE_ACK_INIT 9 +#define ANEG_STATE_COMPLETE_ACK 10 +#define ANEG_STATE_IDLE_DETECT_INIT 11 +#define ANEG_STATE_IDLE_DETECT 12 +#define ANEG_STATE_LINK_OK 13 +#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 +#define ANEG_STATE_NEXT_PAGE_WAIT 15 + + u32 flags; +#define MR_AN_ENABLE 0x00000001 +#define MR_RESTART_AN 0x00000002 +#define MR_AN_COMPLETE 0x00000004 +#define MR_PAGE_RX 0x00000008 +#define MR_NP_LOADED 0x00000010 +#define MR_TOGGLE_TX 0x00000020 +#define MR_LP_ADV_FULL_DUPLEX 0x00000040 +#define MR_LP_ADV_HALF_DUPLEX 0x00000080 +#define MR_LP_ADV_SYM_PAUSE 0x00000100 +#define MR_LP_ADV_ASYM_PAUSE 0x00000200 +#define MR_LP_ADV_REMOTE_FAULT1 0x00000400 +#define MR_LP_ADV_REMOTE_FAULT2 0x00000800 +#define MR_LP_ADV_NEXT_PAGE 0x00001000 +#define MR_TOGGLE_RX 0x00002000 +#define MR_NP_RX 0x00004000 + +#define MR_LINK_OK 0x80000000 + + unsigned long link_time, cur_time; + + u32 ability_match_cfg; + int ability_match_count; + + char ability_match, idle_match, ack_match; + + u32 txconfig, rxconfig; +#define ANEG_CFG_NP 0x00000080 +#define ANEG_CFG_ACK 0x00000040 +#define ANEG_CFG_RF2 0x00000020 +#define ANEG_CFG_RF1 0x00000010 +#define ANEG_CFG_PS2 0x00000001 +#define ANEG_CFG_PS1 0x00008000 +#define ANEG_CFG_HD 0x00004000 +#define ANEG_CFG_FD 0x00002000 +#define ANEG_CFG_INVAL 0x00001f06 + +}; +#define ANEG_OK 0 +#define ANEG_DONE 1 +#define ANEG_TIMER_ENAB 2 +#define ANEG_FAILED -1 + +#define ANEG_STATE_SETTLE_TIME 10000 + +static int tg3_fiber_aneg_smachine(struct tg3 *tp, + struct tg3_fiber_aneginfo *ap) +{ + u16 flowctrl; + unsigned long delta; + u32 rx_cfg_reg; + int ret; + + if (ap->state == ANEG_STATE_UNKNOWN) { + ap->rxconfig = 0; + ap->link_time = 0; + ap->cur_time = 0; + ap->ability_match_cfg = 0; + ap->ability_match_count = 0; + ap->ability_match = 0; + ap->idle_match = 0; + ap->ack_match = 0; + } + ap->cur_time++; + + if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { + rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); + + if (rx_cfg_reg != ap->ability_match_cfg) { + ap->ability_match_cfg = rx_cfg_reg; + ap->ability_match = 0; + ap->ability_match_count = 0; + } else { + if (++ap->ability_match_count > 1) { + ap->ability_match = 1; + ap->ability_match_cfg = rx_cfg_reg; + } + } + if (rx_cfg_reg & ANEG_CFG_ACK) + ap->ack_match = 1; + else + ap->ack_match = 0; + + ap->idle_match = 0; + } else { + ap->idle_match = 1; + ap->ability_match_cfg = 0; + ap->ability_match_count = 0; + ap->ability_match = 0; + ap->ack_match = 0; + + rx_cfg_reg = 0; + } + + ap->rxconfig = rx_cfg_reg; + ret = ANEG_OK; + + switch (ap->state) { + case ANEG_STATE_UNKNOWN: + if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) + ap->state = ANEG_STATE_AN_ENABLE; + + /* fallthru */ + case ANEG_STATE_AN_ENABLE: + ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); + if (ap->flags & MR_AN_ENABLE) { + ap->link_time = 0; + ap->cur_time = 0; + ap->ability_match_cfg = 0; + ap->ability_match_count = 0; + ap->ability_match = 0; + ap->idle_match = 0; + ap->ack_match = 0; + + ap->state = ANEG_STATE_RESTART_INIT; + } else { + ap->state = ANEG_STATE_DISABLE_LINK_OK; + } + break; + + case ANEG_STATE_RESTART_INIT: + ap->link_time = ap->cur_time; + ap->flags &= ~(MR_NP_LOADED); + ap->txconfig = 0; + tw32(MAC_TX_AUTO_NEG, 0); + tp->mac_mode |= MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + ret = ANEG_TIMER_ENAB; + ap->state = ANEG_STATE_RESTART; + + /* fallthru */ + case ANEG_STATE_RESTART: + delta = ap->cur_time - ap->link_time; + if (delta > ANEG_STATE_SETTLE_TIME) + ap->state = ANEG_STATE_ABILITY_DETECT_INIT; + else + ret = ANEG_TIMER_ENAB; + break; + + case ANEG_STATE_DISABLE_LINK_OK: + ret = ANEG_DONE; + break; + + case ANEG_STATE_ABILITY_DETECT_INIT: + ap->flags &= ~(MR_TOGGLE_TX); + ap->txconfig = ANEG_CFG_FD; + flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + if (flowctrl & ADVERTISE_1000XPAUSE) + ap->txconfig |= ANEG_CFG_PS1; + if (flowctrl & ADVERTISE_1000XPSE_ASYM) + ap->txconfig |= ANEG_CFG_PS2; + tw32(MAC_TX_AUTO_NEG, ap->txconfig); + tp->mac_mode |= MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + ap->state = ANEG_STATE_ABILITY_DETECT; + break; + + case ANEG_STATE_ABILITY_DETECT: + if (ap->ability_match != 0 && ap->rxconfig != 0) + ap->state = ANEG_STATE_ACK_DETECT_INIT; + break; + + case ANEG_STATE_ACK_DETECT_INIT: + ap->txconfig |= ANEG_CFG_ACK; + tw32(MAC_TX_AUTO_NEG, ap->txconfig); + tp->mac_mode |= MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + ap->state = ANEG_STATE_ACK_DETECT; + + /* fallthru */ + case ANEG_STATE_ACK_DETECT: + if (ap->ack_match != 0) { + if ((ap->rxconfig & ~ANEG_CFG_ACK) == + (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { + ap->state = ANEG_STATE_COMPLETE_ACK_INIT; + } else { + ap->state = ANEG_STATE_AN_ENABLE; + } + } else if (ap->ability_match != 0 && + ap->rxconfig == 0) { + ap->state = ANEG_STATE_AN_ENABLE; + } + break; + + case ANEG_STATE_COMPLETE_ACK_INIT: + if (ap->rxconfig & ANEG_CFG_INVAL) { + ret = ANEG_FAILED; + break; + } + ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | + MR_LP_ADV_HALF_DUPLEX | + MR_LP_ADV_SYM_PAUSE | + MR_LP_ADV_ASYM_PAUSE | + MR_LP_ADV_REMOTE_FAULT1 | + MR_LP_ADV_REMOTE_FAULT2 | + MR_LP_ADV_NEXT_PAGE | + MR_TOGGLE_RX | + MR_NP_RX); + if (ap->rxconfig & ANEG_CFG_FD) + ap->flags |= MR_LP_ADV_FULL_DUPLEX; + if (ap->rxconfig & ANEG_CFG_HD) + ap->flags |= MR_LP_ADV_HALF_DUPLEX; + if (ap->rxconfig & ANEG_CFG_PS1) + ap->flags |= MR_LP_ADV_SYM_PAUSE; + if (ap->rxconfig & ANEG_CFG_PS2) + ap->flags |= MR_LP_ADV_ASYM_PAUSE; + if (ap->rxconfig & ANEG_CFG_RF1) + ap->flags |= MR_LP_ADV_REMOTE_FAULT1; + if (ap->rxconfig & ANEG_CFG_RF2) + ap->flags |= MR_LP_ADV_REMOTE_FAULT2; + if (ap->rxconfig & ANEG_CFG_NP) + ap->flags |= MR_LP_ADV_NEXT_PAGE; + + ap->link_time = ap->cur_time; + + ap->flags ^= (MR_TOGGLE_TX); + if (ap->rxconfig & 0x0008) + ap->flags |= MR_TOGGLE_RX; + if (ap->rxconfig & ANEG_CFG_NP) + ap->flags |= MR_NP_RX; + ap->flags |= MR_PAGE_RX; + + ap->state = ANEG_STATE_COMPLETE_ACK; + ret = ANEG_TIMER_ENAB; + break; + + case ANEG_STATE_COMPLETE_ACK: + if (ap->ability_match != 0 && + ap->rxconfig == 0) { + ap->state = ANEG_STATE_AN_ENABLE; + break; + } + delta = ap->cur_time - ap->link_time; + if (delta > ANEG_STATE_SETTLE_TIME) { + if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { + ap->state = ANEG_STATE_IDLE_DETECT_INIT; + } else { + if ((ap->txconfig & ANEG_CFG_NP) == 0 && + !(ap->flags & MR_NP_RX)) { + ap->state = ANEG_STATE_IDLE_DETECT_INIT; + } else { + ret = ANEG_FAILED; + } + } + } + break; + + case ANEG_STATE_IDLE_DETECT_INIT: + ap->link_time = ap->cur_time; + tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + ap->state = ANEG_STATE_IDLE_DETECT; + ret = ANEG_TIMER_ENAB; + break; + + case ANEG_STATE_IDLE_DETECT: + if (ap->ability_match != 0 && + ap->rxconfig == 0) { + ap->state = ANEG_STATE_AN_ENABLE; + break; + } + delta = ap->cur_time - ap->link_time; + if (delta > ANEG_STATE_SETTLE_TIME) { + /* XXX another gem from the Broadcom driver :( */ + ap->state = ANEG_STATE_LINK_OK; + } + break; + + case ANEG_STATE_LINK_OK: + ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); + ret = ANEG_DONE; + break; + + case ANEG_STATE_NEXT_PAGE_WAIT_INIT: + /* ??? unimplemented */ + break; + + case ANEG_STATE_NEXT_PAGE_WAIT: + /* ??? unimplemented */ + break; + + default: + ret = ANEG_FAILED; + break; + } + + return ret; +} + +static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) +{ + int res = 0; + struct tg3_fiber_aneginfo aninfo; + int status = ANEG_FAILED; + unsigned int tick; + u32 tmp; + + tw32_f(MAC_TX_AUTO_NEG, 0); + + tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; + tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); + udelay(40); + + tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); + udelay(40); + + memset(&aninfo, 0, sizeof(aninfo)); + aninfo.flags |= MR_AN_ENABLE; + aninfo.state = ANEG_STATE_UNKNOWN; + aninfo.cur_time = 0; + tick = 0; + while (++tick < 195000) { + status = tg3_fiber_aneg_smachine(tp, &aninfo); + if (status == ANEG_DONE || status == ANEG_FAILED) + break; + + udelay(1); + } + + tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + *txflags = aninfo.txconfig; + *rxflags = aninfo.flags; + + if (status == ANEG_DONE && + (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | + MR_LP_ADV_FULL_DUPLEX))) + res = 1; + + return res; +} + +static void tg3_init_bcm8002(struct tg3 *tp) +{ + u32 mac_status = tr32(MAC_STATUS); + int i; + + /* Reset when initting first time or we have a link. */ + if (tg3_flag(tp, INIT_COMPLETE) && + !(mac_status & MAC_STATUS_PCS_SYNCED)) + return; + + /* Set PLL lock range. */ + tg3_writephy(tp, 0x16, 0x8007); + + /* SW reset */ + tg3_writephy(tp, MII_BMCR, BMCR_RESET); + + /* Wait for reset to complete. */ + /* XXX schedule_timeout() ... */ + for (i = 0; i < 500; i++) + udelay(10); + + /* Config mode; select PMA/Ch 1 regs. */ + tg3_writephy(tp, 0x10, 0x8411); + + /* Enable auto-lock and comdet, select txclk for tx. */ + tg3_writephy(tp, 0x11, 0x0a10); + + tg3_writephy(tp, 0x18, 0x00a0); + tg3_writephy(tp, 0x16, 0x41ff); + + /* Assert and deassert POR. */ + tg3_writephy(tp, 0x13, 0x0400); + udelay(40); + tg3_writephy(tp, 0x13, 0x0000); + + tg3_writephy(tp, 0x11, 0x0a50); + udelay(40); + tg3_writephy(tp, 0x11, 0x0a10); + + /* Wait for signal to stabilize */ + /* XXX schedule_timeout() ... */ + for (i = 0; i < 15000; i++) + udelay(10); + + /* Deselect the channel register so we can read the PHYID + * later. + */ + tg3_writephy(tp, 0x10, 0x8011); +} + +static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) +{ + u16 flowctrl; + bool current_link_up; + u32 sg_dig_ctrl, sg_dig_status; + u32 serdes_cfg, expected_sg_dig_ctrl; + int workaround, port_a; + + serdes_cfg = 0; + expected_sg_dig_ctrl = 0; + workaround = 0; + port_a = 1; + current_link_up = false; + + if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 && + tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) { + workaround = 1; + if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) + port_a = 0; + + /* preserve bits 0-11,13,14 for signal pre-emphasis */ + /* preserve bits 20-23 for voltage regulator */ + serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; + } + + sg_dig_ctrl = tr32(SG_DIG_CTRL); + + if (tp->link_config.autoneg != AUTONEG_ENABLE) { + if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { + if (workaround) { + u32 val = serdes_cfg; + + if (port_a) + val |= 0xc010000; + else + val |= 0x4010000; + tw32_f(MAC_SERDES_CFG, val); + } + + tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); + } + if (mac_status & MAC_STATUS_PCS_SYNCED) { + tg3_setup_flow_control(tp, 0, 0); + current_link_up = true; + } + goto out; + } + + /* Want auto-negotiation. */ + expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; + + flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + if (flowctrl & ADVERTISE_1000XPAUSE) + expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; + if (flowctrl & ADVERTISE_1000XPSE_ASYM) + expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; + + if (sg_dig_ctrl != expected_sg_dig_ctrl) { + if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && + tp->serdes_counter && + ((mac_status & (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_RCVD_CFG)) == + MAC_STATUS_PCS_SYNCED)) { + tp->serdes_counter--; + current_link_up = true; + goto out; + } +restart_autoneg: + if (workaround) + tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); + tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); + udelay(5); + tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); + + tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } else if (mac_status & (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET)) { + sg_dig_status = tr32(SG_DIG_STATUS); + mac_status = tr32(MAC_STATUS); + + if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && + (mac_status & MAC_STATUS_PCS_SYNCED)) { + u32 local_adv = 0, remote_adv = 0; + + if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) + local_adv |= ADVERTISE_1000XPAUSE; + if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) + local_adv |= ADVERTISE_1000XPSE_ASYM; + + if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) + remote_adv |= LPA_1000XPAUSE; + if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) + remote_adv |= LPA_1000XPAUSE_ASYM; + + tp->link_config.rmt_adv = + mii_adv_to_ethtool_adv_x(remote_adv); + + tg3_setup_flow_control(tp, local_adv, remote_adv); + current_link_up = true; + tp->serdes_counter = 0; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { + if (tp->serdes_counter) + tp->serdes_counter--; + else { + if (workaround) { + u32 val = serdes_cfg; + + if (port_a) + val |= 0xc010000; + else + val |= 0x4010000; + + tw32_f(MAC_SERDES_CFG, val); + } + + tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); + udelay(40); + + /* Link parallel detection - link is up */ + /* only if we have PCS_SYNC and not */ + /* receiving config code words */ + mac_status = tr32(MAC_STATUS); + if ((mac_status & MAC_STATUS_PCS_SYNCED) && + !(mac_status & MAC_STATUS_RCVD_CFG)) { + tg3_setup_flow_control(tp, 0, 0); + current_link_up = true; + tp->phy_flags |= + TG3_PHYFLG_PARALLEL_DETECT; + tp->serdes_counter = + SERDES_PARALLEL_DET_TIMEOUT; + } else + goto restart_autoneg; + } + } + } else { + tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } + +out: + return current_link_up; +} + +static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) +{ + bool current_link_up = false; + + if (!(mac_status & MAC_STATUS_PCS_SYNCED)) + goto out; + + if (tp->link_config.autoneg == AUTONEG_ENABLE) { + u32 txflags, rxflags; + int i; + + if (fiber_autoneg(tp, &txflags, &rxflags)) { + u32 local_adv = 0, remote_adv = 0; + + if (txflags & ANEG_CFG_PS1) + local_adv |= ADVERTISE_1000XPAUSE; + if (txflags & ANEG_CFG_PS2) + local_adv |= ADVERTISE_1000XPSE_ASYM; + + if (rxflags & MR_LP_ADV_SYM_PAUSE) + remote_adv |= LPA_1000XPAUSE; + if (rxflags & MR_LP_ADV_ASYM_PAUSE) + remote_adv |= LPA_1000XPAUSE_ASYM; + + tp->link_config.rmt_adv = + mii_adv_to_ethtool_adv_x(remote_adv); + + tg3_setup_flow_control(tp, local_adv, remote_adv); + + current_link_up = true; + } + for (i = 0; i < 30; i++) { + udelay(20); + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + udelay(40); + if ((tr32(MAC_STATUS) & + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)) == 0) + break; + } + + mac_status = tr32(MAC_STATUS); + if (!current_link_up && + (mac_status & MAC_STATUS_PCS_SYNCED) && + !(mac_status & MAC_STATUS_RCVD_CFG)) + current_link_up = true; + } else { + tg3_setup_flow_control(tp, 0, 0); + + /* Forcing 1000FD link up. */ + current_link_up = true; + + tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); + udelay(40); + + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + } + +out: + return current_link_up; +} + +static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset) +{ + u32 orig_pause_cfg; + u16 orig_active_speed; + u8 orig_active_duplex; + u32 mac_status; + bool current_link_up; + int i; + + orig_pause_cfg = tp->link_config.active_flowctrl; + orig_active_speed = tp->link_config.active_speed; + orig_active_duplex = tp->link_config.active_duplex; + + if (!tg3_flag(tp, HW_AUTONEG) && + tp->link_up && + tg3_flag(tp, INIT_COMPLETE)) { + mac_status = tr32(MAC_STATUS); + mac_status &= (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_RCVD_CFG); + if (mac_status == (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET)) { + tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + return 0; + } + } + + tw32_f(MAC_TX_AUTO_NEG, 0); + + tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); + tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + if (tp->phy_id == TG3_PHY_ID_BCM8002) + tg3_init_bcm8002(tp); + + /* Enable link change event even when serdes polling. */ + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + udelay(40); + + current_link_up = false; + tp->link_config.rmt_adv = 0; + mac_status = tr32(MAC_STATUS); + + if (tg3_flag(tp, HW_AUTONEG)) + current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); + else + current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); + + tp->napi[0].hw_status->status = + (SD_STATUS_UPDATED | + (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); + + for (i = 0; i < 100; i++) { + tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + udelay(5); + if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_LNKSTATE_CHANGED)) == 0) + break; + } + + mac_status = tr32(MAC_STATUS); + if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { + current_link_up = false; + if (tp->link_config.autoneg == AUTONEG_ENABLE && + tp->serdes_counter == 0) { + tw32_f(MAC_MODE, (tp->mac_mode | + MAC_MODE_SEND_CONFIGS)); + udelay(1); + tw32_f(MAC_MODE, tp->mac_mode); + } + } + + if (current_link_up) { + tp->link_config.active_speed = SPEED_1000; + tp->link_config.active_duplex = DUPLEX_FULL; + tw32(MAC_LED_CTRL, (tp->led_ctrl | + LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_1000MBPS_ON)); + } else { + tp->link_config.active_speed = SPEED_UNKNOWN; + tp->link_config.active_duplex = DUPLEX_UNKNOWN; + tw32(MAC_LED_CTRL, (tp->led_ctrl | + LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_TRAFFIC_OVERRIDE)); + } + + if (!tg3_test_and_report_link_chg(tp, current_link_up)) { + u32 now_pause_cfg = tp->link_config.active_flowctrl; + if (orig_pause_cfg != now_pause_cfg || + orig_active_speed != tp->link_config.active_speed || + orig_active_duplex != tp->link_config.active_duplex) + tg3_link_report(tp); + } + + return 0; +} + +static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset) +{ + int err = 0; + u32 bmsr, bmcr; + u16 current_speed = SPEED_UNKNOWN; + u8 current_duplex = DUPLEX_UNKNOWN; + bool current_link_up = false; + u32 local_adv, remote_adv, sgsr; + + if ((tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) && + !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) && + (sgsr & SERDES_TG3_SGMII_MODE)) { + + if (force_reset) + tg3_phy_reset(tp); + + tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; + + if (!(sgsr & SERDES_TG3_LINK_UP)) { + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + } else { + current_link_up = true; + if (sgsr & SERDES_TG3_SPEED_1000) { + current_speed = SPEED_1000; + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + } else if (sgsr & SERDES_TG3_SPEED_100) { + current_speed = SPEED_100; + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + } else { + current_speed = SPEED_10; + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + } + + if (sgsr & SERDES_TG3_FULL_DUPLEX) + current_duplex = DUPLEX_FULL; + else + current_duplex = DUPLEX_HALF; + } + + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tg3_clear_mac_status(tp); + + goto fiber_setup_done; + } + + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tg3_clear_mac_status(tp); + + if (force_reset) + tg3_phy_reset(tp); + + tp->link_config.rmt_adv = 0; + + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + if (tg3_asic_rev(tp) == ASIC_REV_5714) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + bmsr |= BMSR_LSTATUS; + else + bmsr &= ~BMSR_LSTATUS; + } + + err |= tg3_readphy(tp, MII_BMCR, &bmcr); + + if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { + /* do nothing, just check for link up at the end */ + } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { + u32 adv, newadv; + + err |= tg3_readphy(tp, MII_ADVERTISE, &adv); + newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | + ADVERTISE_1000XPAUSE | + ADVERTISE_1000XPSE_ASYM | + ADVERTISE_SLCT); + + newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); + + if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { + tg3_writephy(tp, MII_ADVERTISE, newadv); + bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; + tg3_writephy(tp, MII_BMCR, bmcr); + + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + + return err; + } + } else { + u32 new_bmcr; + + bmcr &= ~BMCR_SPEED1000; + new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); + + if (tp->link_config.duplex == DUPLEX_FULL) + new_bmcr |= BMCR_FULLDPLX; + + if (new_bmcr != bmcr) { + /* BMCR_SPEED1000 is a reserved bit that needs + * to be set on write. + */ + new_bmcr |= BMCR_SPEED1000; + + /* Force a linkdown */ + if (tp->link_up) { + u32 adv; + + err |= tg3_readphy(tp, MII_ADVERTISE, &adv); + adv &= ~(ADVERTISE_1000XFULL | + ADVERTISE_1000XHALF | + ADVERTISE_SLCT); + tg3_writephy(tp, MII_ADVERTISE, adv); + tg3_writephy(tp, MII_BMCR, bmcr | + BMCR_ANRESTART | + BMCR_ANENABLE); + udelay(10); + tg3_carrier_off(tp); + } + tg3_writephy(tp, MII_BMCR, new_bmcr); + bmcr = new_bmcr; + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + if (tg3_asic_rev(tp) == ASIC_REV_5714) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + bmsr |= BMSR_LSTATUS; + else + bmsr &= ~BMSR_LSTATUS; + } + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } + } + + if (bmsr & BMSR_LSTATUS) { + current_speed = SPEED_1000; + current_link_up = true; + if (bmcr & BMCR_FULLDPLX) + current_duplex = DUPLEX_FULL; + else + current_duplex = DUPLEX_HALF; + + local_adv = 0; + remote_adv = 0; + + if (bmcr & BMCR_ANENABLE) { + u32 common; + + err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); + err |= tg3_readphy(tp, MII_LPA, &remote_adv); + common = local_adv & remote_adv; + if (common & (ADVERTISE_1000XHALF | + ADVERTISE_1000XFULL)) { + if (common & ADVERTISE_1000XFULL) + current_duplex = DUPLEX_FULL; + else + current_duplex = DUPLEX_HALF; + + tp->link_config.rmt_adv = + mii_adv_to_ethtool_adv_x(remote_adv); + } else if (!tg3_flag(tp, 5780_CLASS)) { + /* Link is up via parallel detect */ + } else { + current_link_up = false; + } + } + } + +fiber_setup_done: + if (current_link_up && current_duplex == DUPLEX_FULL) + tg3_setup_flow_control(tp, local_adv, remote_adv); + + tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; + if (tp->link_config.active_duplex == DUPLEX_HALF) + tp->mac_mode |= MAC_MODE_HALF_DUPLEX; + + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + + tp->link_config.active_speed = current_speed; + tp->link_config.active_duplex = current_duplex; + + tg3_test_and_report_link_chg(tp, current_link_up); + return err; +} + +static void tg3_serdes_parallel_detect(struct tg3 *tp) +{ + if (tp->serdes_counter) { + /* Give autoneg time to complete. */ + tp->serdes_counter--; + return; + } + + if (!tp->link_up && + (tp->link_config.autoneg == AUTONEG_ENABLE)) { + u32 bmcr; + + tg3_readphy(tp, MII_BMCR, &bmcr); + if (bmcr & BMCR_ANENABLE) { + u32 phy1, phy2; + + /* Select shadow register 0x1f */ + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); + tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); + + /* Select expansion interrupt status register */ + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + MII_TG3_DSP_EXP1_INT_STAT); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); + + if ((phy1 & 0x10) && !(phy2 & 0x20)) { + /* We have signal detect and not receiving + * config code words, link is up by parallel + * detection. + */ + + bmcr &= ~BMCR_ANENABLE; + bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; + tg3_writephy(tp, MII_BMCR, bmcr); + tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; + } + } + } else if (tp->link_up && + (tp->link_config.autoneg == AUTONEG_ENABLE) && + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { + u32 phy2; + + /* Select expansion interrupt status register */ + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + MII_TG3_DSP_EXP1_INT_STAT); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); + if (phy2 & 0x20) { + u32 bmcr; + + /* Config code words received, turn on autoneg. */ + tg3_readphy(tp, MII_BMCR, &bmcr); + tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); + + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + + } + } +} + +static int tg3_setup_phy(struct tg3 *tp, bool force_reset) +{ + u32 val; + int err; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + err = tg3_setup_fiber_phy(tp, force_reset); + else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + err = tg3_setup_fiber_mii_phy(tp, force_reset); + else + err = tg3_setup_copper_phy(tp, force_reset); + + if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { + u32 scale; + + val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; + if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) + scale = 65; + else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) + scale = 6; + else + scale = 12; + + val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; + val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); + tw32(GRC_MISC_CFG, val); + } + + val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT); + if (tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_5762) + val |= tr32(MAC_TX_LENGTHS) & + (TX_LENGTHS_JMB_FRM_LEN_MSK | + TX_LENGTHS_CNT_DWN_VAL_MSK); + + if (tp->link_config.active_speed == SPEED_1000 && + tp->link_config.active_duplex == DUPLEX_HALF) + tw32(MAC_TX_LENGTHS, val | + (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); + else + tw32(MAC_TX_LENGTHS, val | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); + + if (!tg3_flag(tp, 5705_PLUS)) { + if (tp->link_up) { + tw32(HOSTCC_STAT_COAL_TICKS, + tp->coal.stats_block_coalesce_usecs); + } else { + tw32(HOSTCC_STAT_COAL_TICKS, 0); + } + } + + if (tg3_flag(tp, ASPM_WORKAROUND)) { + val = tr32(PCIE_PWR_MGMT_THRESH); + if (!tp->link_up) + val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | + tp->pwrmgmt_thresh; + else + val |= PCIE_PWR_MGMT_L1_THRESH_MSK; + tw32(PCIE_PWR_MGMT_THRESH, val); + } + + return err; +} + +/* tp->lock must be held */ +static u64 tg3_refclk_read(struct tg3 *tp) +{ + u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB); + return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32; +} + +/* tp->lock must be held */ +static void tg3_refclk_write(struct tg3 *tp, u64 newval) +{ + u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); + + tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP); + tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff); + tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32); + tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME); +} + +static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); +static inline void tg3_full_unlock(struct tg3 *tp); +static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) +{ + struct tg3 *tp = netdev_priv(dev); + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + + if (tg3_flag(tp, PTP_CAPABLE)) { + info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + } + + if (tp->ptp_clock) + info->phc_index = ptp_clock_index(tp->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); + return 0; +} + +static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); + bool neg_adj = false; + u32 correction = 0; + + if (ppb < 0) { + neg_adj = true; + ppb = -ppb; + } + + /* Frequency adjustment is performed using hardware with a 24 bit + * accumulator and a programmable correction value. On each clk, the + * correction value gets added to the accumulator and when it + * overflows, the time counter is incremented/decremented. + * + * So conversion from ppb to correction value is + * ppb * (1 << 24) / 1000000000 + */ + correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) & + TG3_EAV_REF_CLK_CORRECT_MASK; + + tg3_full_lock(tp, 0); + + if (correction) + tw32(TG3_EAV_REF_CLK_CORRECT_CTL, + TG3_EAV_REF_CLK_CORRECT_EN | + (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction); + else + tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0); + + tg3_full_unlock(tp); + + return 0; +} + +static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); + + tg3_full_lock(tp, 0); + tp->ptp_adjust += delta; + tg3_full_unlock(tp); + + return 0; +} + +static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + u64 ns; + struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); + + tg3_full_lock(tp, 0); + ns = tg3_refclk_read(tp); + ns += tp->ptp_adjust; + tg3_full_unlock(tp); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int tg3_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + u64 ns; + struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); + + ns = timespec64_to_ns(ts); + + tg3_full_lock(tp, 0); + tg3_refclk_write(tp, ns); + tp->ptp_adjust = 0; + tg3_full_unlock(tp); + + return 0; +} + +static int tg3_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); + u32 clock_ctl; + int rval = 0; + + switch (rq->type) { + case PTP_CLK_REQ_PEROUT: + if (rq->perout.index != 0) + return -EINVAL; + + tg3_full_lock(tp, 0); + clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL); + clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK; + + if (on) { + u64 nsec; + + nsec = rq->perout.start.sec * 1000000000ULL + + rq->perout.start.nsec; + + if (rq->perout.period.sec || rq->perout.period.nsec) { + netdev_warn(tp->dev, + "Device supports only a one-shot timesync output, period must be 0\n"); + rval = -EINVAL; + goto err_out; + } + + if (nsec & (1ULL << 63)) { + netdev_warn(tp->dev, + "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n"); + rval = -EINVAL; + goto err_out; + } + + tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff)); + tw32(TG3_EAV_WATCHDOG0_MSB, + TG3_EAV_WATCHDOG0_EN | + ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK)); + + tw32(TG3_EAV_REF_CLCK_CTL, + clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0); + } else { + tw32(TG3_EAV_WATCHDOG0_MSB, 0); + tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl); + } + +err_out: + tg3_full_unlock(tp); + return rval; + + default: + break; + } + + return -EOPNOTSUPP; +} + +static const struct ptp_clock_info tg3_ptp_caps = { + .owner = THIS_MODULE, + .name = "tg3 clock", + .max_adj = 250000000, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 1, + .n_pins = 0, + .pps = 0, + .adjfreq = tg3_ptp_adjfreq, + .adjtime = tg3_ptp_adjtime, + .gettime64 = tg3_ptp_gettime, + .settime64 = tg3_ptp_settime, + .enable = tg3_ptp_enable, +}; + +static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, + struct skb_shared_hwtstamps *timestamp) +{ + memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); + timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) + + tp->ptp_adjust); +} + +/* tp->lock must be held */ +static void tg3_ptp_init(struct tg3 *tp) +{ + if (!tg3_flag(tp, PTP_CAPABLE)) + return; + + /* Initialize the hardware clock to the system time. */ + tg3_refclk_write(tp, ktime_to_ns(ktime_get_real())); + tp->ptp_adjust = 0; + tp->ptp_info = tg3_ptp_caps; +} + +/* tp->lock must be held */ +static void tg3_ptp_resume(struct tg3 *tp) +{ + if (!tg3_flag(tp, PTP_CAPABLE)) + return; + + tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust); + tp->ptp_adjust = 0; +} + +static void tg3_ptp_fini(struct tg3 *tp) +{ + if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock) + return; + + ptp_clock_unregister(tp->ptp_clock); + tp->ptp_clock = NULL; + tp->ptp_adjust = 0; +} + +static inline int tg3_irq_sync(struct tg3 *tp) +{ + return tp->irq_sync; +} + +static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) +{ + int i; + + dst = (u32 *)((u8 *)dst + off); + for (i = 0; i < len; i += sizeof(u32)) + *dst++ = tr32(off + i); +} + +static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) +{ + tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); + tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); + tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); + tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); + tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); + tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); + tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); + tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); + tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); + tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); + tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); + tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); + tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); + tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); + tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); + tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); + tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); + tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); + tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); + + if (tg3_flag(tp, SUPPORT_MSIX)) + tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); + + tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); + tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); + tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); + tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); + tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); + + if (!tg3_flag(tp, 5705_PLUS)) { + tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); + tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); + tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); + } + + tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); + tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); + tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); + tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); + tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); + + if (tg3_flag(tp, NVRAM)) + tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); +} + +static void tg3_dump_state(struct tg3 *tp) +{ + int i; + u32 *regs; + + regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); + if (!regs) + return; + + if (tg3_flag(tp, PCI_EXPRESS)) { + /* Read up to but not including private PCI registers */ + for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) + regs[i / sizeof(u32)] = tr32(i); + } else + tg3_dump_legacy_regs(tp, regs); + + for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { + if (!regs[i + 0] && !regs[i + 1] && + !regs[i + 2] && !regs[i + 3]) + continue; + + netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + i * 4, + regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); + } + + kfree(regs); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + /* SW status block */ + netdev_err(tp->dev, + "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", + i, + tnapi->hw_status->status, + tnapi->hw_status->status_tag, + tnapi->hw_status->rx_jumbo_consumer, + tnapi->hw_status->rx_consumer, + tnapi->hw_status->rx_mini_consumer, + tnapi->hw_status->idx[0].rx_producer, + tnapi->hw_status->idx[0].tx_consumer); + + netdev_err(tp->dev, + "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", + i, + tnapi->last_tag, tnapi->last_irq_tag, + tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, + tnapi->rx_rcb_ptr, + tnapi->prodring.rx_std_prod_idx, + tnapi->prodring.rx_std_cons_idx, + tnapi->prodring.rx_jmb_prod_idx, + tnapi->prodring.rx_jmb_cons_idx); + } +} + +/* This is called whenever we suspect that the system chipset is re- + * ordering the sequence of MMIO to the tx send mailbox. The symptom + * is bogus tx completions. We try to recover by setting the + * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later + * in the workqueue. + */ +static void tg3_tx_recover(struct tg3 *tp) +{ + BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || + tp->write32_tx_mbox == tg3_write_indirect_mbox); + + netdev_warn(tp->dev, + "The system may be re-ordering memory-mapped I/O " + "cycles to the network device, attempting to recover. " + "Please report the problem to the driver maintainer " + "and include system chipset information.\n"); + + tg3_flag_set(tp, TX_RECOVERY_PENDING); +} + +static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) +{ + /* Tell compiler to fetch tx indices from memory. */ + barrier(); + return tnapi->tx_pending - + ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); +} + +/* Tigon3 never reports partial packet sends. So we do not + * need special logic to handle SKBs that have not had all + * of their frags sent yet, like SunGEM does. + */ +static void tg3_tx(struct tg3_napi *tnapi) +{ + struct tg3 *tp = tnapi->tp; + u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; + u32 sw_idx = tnapi->tx_cons; + struct netdev_queue *txq; + int index = tnapi - tp->napi; + unsigned int pkts_compl = 0, bytes_compl = 0; + + if (tg3_flag(tp, ENABLE_TSS)) + index--; + + txq = netdev_get_tx_queue(tp->dev, index); + + while (sw_idx != hw_idx) { + struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; + struct sk_buff *skb = ri->skb; + int i, tx_bug = 0; + + if (unlikely(skb == NULL)) { + tg3_tx_recover(tp); + return; + } + + if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) { + struct skb_shared_hwtstamps timestamp; + u64 hwclock = tr32(TG3_TX_TSTAMP_LSB); + hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32; + + tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); + + skb_tstamp_tx(skb, ×tamp); + } + + pci_unmap_single(tp->pdev, + dma_unmap_addr(ri, mapping), + skb_headlen(skb), + PCI_DMA_TODEVICE); + + ri->skb = NULL; + + while (ri->fragmented) { + ri->fragmented = false; + sw_idx = NEXT_TX(sw_idx); + ri = &tnapi->tx_buffers[sw_idx]; + } + + sw_idx = NEXT_TX(sw_idx); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + ri = &tnapi->tx_buffers[sw_idx]; + if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) + tx_bug = 1; + + pci_unmap_page(tp->pdev, + dma_unmap_addr(ri, mapping), + skb_frag_size(&skb_shinfo(skb)->frags[i]), + PCI_DMA_TODEVICE); + + while (ri->fragmented) { + ri->fragmented = false; + sw_idx = NEXT_TX(sw_idx); + ri = &tnapi->tx_buffers[sw_idx]; + } + + sw_idx = NEXT_TX(sw_idx); + } + + pkts_compl++; + bytes_compl += skb->len; + + dev_kfree_skb_any(skb); + + if (unlikely(tx_bug)) { + tg3_tx_recover(tp); + return; + } + } + + netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); + + tnapi->tx_cons = sw_idx; + + /* Need to make the tx_cons update visible to tg3_start_xmit() + * before checking for netif_queue_stopped(). Without the + * memory barrier, there is a small possibility that tg3_start_xmit() + * will miss it and cause the queue to be stopped forever. + */ + smp_mb(); + + if (unlikely(netif_tx_queue_stopped(txq) && + (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { + __netif_tx_lock(txq, smp_processor_id()); + if (netif_tx_queue_stopped(txq) && + (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) + netif_tx_wake_queue(txq); + __netif_tx_unlock(txq); + } +} + +static void tg3_frag_free(bool is_frag, void *data) +{ + if (is_frag) + put_page(virt_to_head_page(data)); + else + kfree(data); +} + +static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) +{ + unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + if (!ri->data) + return; + + pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), + map_sz, PCI_DMA_FROMDEVICE); + tg3_frag_free(skb_size <= PAGE_SIZE, ri->data); + ri->data = NULL; +} + + +/* Returns size of skb allocated or < 0 on error. + * + * We only need to fill in the address because the other members + * of the RX descriptor are invariant, see tg3_init_rings. + * + * Note the purposeful assymetry of cpu vs. chip accesses. For + * posting buffers we only dirty the first cache line of the RX + * descriptor (containing the address). Whereas for the RX status + * buffers the cpu only reads the last cacheline of the RX descriptor + * (to fetch the error flags, vlan tag, checksum, and opaque cookie). + */ +static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, + u32 opaque_key, u32 dest_idx_unmasked, + unsigned int *frag_size) +{ + struct tg3_rx_buffer_desc *desc; + struct ring_info *map; + u8 *data; + dma_addr_t mapping; + int skb_size, data_size, dest_idx; + + switch (opaque_key) { + case RXD_OPAQUE_RING_STD: + dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; + desc = &tpr->rx_std[dest_idx]; + map = &tpr->rx_std_buffers[dest_idx]; + data_size = tp->rx_pkt_map_sz; + break; + + case RXD_OPAQUE_RING_JUMBO: + dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; + desc = &tpr->rx_jmb[dest_idx].std; + map = &tpr->rx_jmb_buffers[dest_idx]; + data_size = TG3_RX_JMB_MAP_SZ; + break; + + default: + return -EINVAL; + } + + /* Do not overwrite any of the map or rp information + * until we are sure we can commit to a new buffer. + * + * Callers depend upon this behavior and assume that + * we leave everything unchanged if we fail. + */ + skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + if (skb_size <= PAGE_SIZE) { + data = netdev_alloc_frag(skb_size); + *frag_size = skb_size; + } else { + data = kmalloc(skb_size, GFP_ATOMIC); + *frag_size = 0; + } + if (!data) + return -ENOMEM; + + mapping = pci_map_single(tp->pdev, + data + TG3_RX_OFFSET(tp), + data_size, + PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) { + tg3_frag_free(skb_size <= PAGE_SIZE, data); + return -EIO; + } + + map->data = data; + dma_unmap_addr_set(map, mapping, mapping); + + desc->addr_hi = ((u64)mapping >> 32); + desc->addr_lo = ((u64)mapping & 0xffffffff); + + return data_size; +} + +/* We only need to move over in the address because the other + * members of the RX descriptor are invariant. See notes above + * tg3_alloc_rx_data for full details. + */ +static void tg3_recycle_rx(struct tg3_napi *tnapi, + struct tg3_rx_prodring_set *dpr, + u32 opaque_key, int src_idx, + u32 dest_idx_unmasked) +{ + struct tg3 *tp = tnapi->tp; + struct tg3_rx_buffer_desc *src_desc, *dest_desc; + struct ring_info *src_map, *dest_map; + struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; + int dest_idx; + + switch (opaque_key) { + case RXD_OPAQUE_RING_STD: + dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; + dest_desc = &dpr->rx_std[dest_idx]; + dest_map = &dpr->rx_std_buffers[dest_idx]; + src_desc = &spr->rx_std[src_idx]; + src_map = &spr->rx_std_buffers[src_idx]; + break; + + case RXD_OPAQUE_RING_JUMBO: + dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; + dest_desc = &dpr->rx_jmb[dest_idx].std; + dest_map = &dpr->rx_jmb_buffers[dest_idx]; + src_desc = &spr->rx_jmb[src_idx].std; + src_map = &spr->rx_jmb_buffers[src_idx]; + break; + + default: + return; + } + + dest_map->data = src_map->data; + dma_unmap_addr_set(dest_map, mapping, + dma_unmap_addr(src_map, mapping)); + dest_desc->addr_hi = src_desc->addr_hi; + dest_desc->addr_lo = src_desc->addr_lo; + + /* Ensure that the update to the skb happens after the physical + * addresses have been transferred to the new BD location. + */ + smp_wmb(); + + src_map->data = NULL; +} + +/* The RX ring scheme is composed of multiple rings which post fresh + * buffers to the chip, and one special ring the chip uses to report + * status back to the host. + * + * The special ring reports the status of received packets to the + * host. The chip does not write into the original descriptor the + * RX buffer was obtained from. The chip simply takes the original + * descriptor as provided by the host, updates the status and length + * field, then writes this into the next status ring entry. + * + * Each ring the host uses to post buffers to the chip is described + * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, + * it is first placed into the on-chip ram. When the packet's length + * is known, it walks down the TG3_BDINFO entries to select the ring. + * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO + * which is within the range of the new packet's length is chosen. + * + * The "separate ring for rx status" scheme may sound queer, but it makes + * sense from a cache coherency perspective. If only the host writes + * to the buffer post rings, and only the chip writes to the rx status + * rings, then cache lines never move beyond shared-modified state. + * If both the host and chip were to write into the same ring, cache line + * eviction could occur since both entities want it in an exclusive state. + */ +static int tg3_rx(struct tg3_napi *tnapi, int budget) +{ + struct tg3 *tp = tnapi->tp; + u32 work_mask, rx_std_posted = 0; + u32 std_prod_idx, jmb_prod_idx; + u32 sw_idx = tnapi->rx_rcb_ptr; + u16 hw_idx; + int received; + struct tg3_rx_prodring_set *tpr = &tnapi->prodring; + + hw_idx = *(tnapi->rx_rcb_prod_idx); + /* + * We need to order the read of hw_idx and the read of + * the opaque cookie. + */ + rmb(); + work_mask = 0; + received = 0; + std_prod_idx = tpr->rx_std_prod_idx; + jmb_prod_idx = tpr->rx_jmb_prod_idx; + while (sw_idx != hw_idx && budget > 0) { + struct ring_info *ri; + struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; + unsigned int len; + struct sk_buff *skb; + dma_addr_t dma_addr; + u32 opaque_key, desc_idx, *post_ptr; + u8 *data; + u64 tstamp = 0; + + desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; + opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; + if (opaque_key == RXD_OPAQUE_RING_STD) { + ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; + dma_addr = dma_unmap_addr(ri, mapping); + data = ri->data; + post_ptr = &std_prod_idx; + rx_std_posted++; + } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { + ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; + dma_addr = dma_unmap_addr(ri, mapping); + data = ri->data; + post_ptr = &jmb_prod_idx; + } else + goto next_pkt_nopost; + + work_mask |= opaque_key; + + if (desc->err_vlan & RXD_ERR_MASK) { + drop_it: + tg3_recycle_rx(tnapi, tpr, opaque_key, + desc_idx, *post_ptr); + drop_it_no_recycle: + /* Other statistics kept track of by card. */ + tp->rx_dropped++; + goto next_pkt; + } + + prefetch(data + TG3_RX_OFFSET(tp)); + len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - + ETH_FCS_LEN; + + if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == + RXD_FLAG_PTPSTAT_PTPV1 || + (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) == + RXD_FLAG_PTPSTAT_PTPV2) { + tstamp = tr32(TG3_RX_TSTAMP_LSB); + tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32; + } + + if (len > TG3_RX_COPY_THRESH(tp)) { + int skb_size; + unsigned int frag_size; + + skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, + *post_ptr, &frag_size); + if (skb_size < 0) + goto drop_it; + + pci_unmap_single(tp->pdev, dma_addr, skb_size, + PCI_DMA_FROMDEVICE); + + /* Ensure that the update to the data happens + * after the usage of the old DMA mapping. + */ + smp_wmb(); + + ri->data = NULL; + + skb = build_skb(data, frag_size); + if (!skb) { + tg3_frag_free(frag_size != 0, data); + goto drop_it_no_recycle; + } + skb_reserve(skb, TG3_RX_OFFSET(tp)); + } else { + tg3_recycle_rx(tnapi, tpr, opaque_key, + desc_idx, *post_ptr); + + skb = netdev_alloc_skb(tp->dev, + len + TG3_RAW_IP_ALIGN); + if (skb == NULL) + goto drop_it_no_recycle; + + skb_reserve(skb, TG3_RAW_IP_ALIGN); + pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + memcpy(skb->data, + data + TG3_RX_OFFSET(tp), + len); + pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + } + + skb_put(skb, len); + if (tstamp) + tg3_hwclock_to_timestamp(tp, tstamp, + skb_hwtstamps(skb)); + + if ((tp->dev->features & NETIF_F_RXCSUM) && + (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && + (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) + >> RXD_TCPCSUM_SHIFT) == 0xffff)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + + skb->protocol = eth_type_trans(skb, tp->dev); + + if (len > (tp->dev->mtu + ETH_HLEN) && + skb->protocol != htons(ETH_P_8021Q) && + skb->protocol != htons(ETH_P_8021AD)) { + dev_kfree_skb_any(skb); + goto drop_it_no_recycle; + } + + if (desc->type_flags & RXD_FLAG_VLAN && + !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + desc->err_vlan & RXD_VLAN_MASK); + + napi_gro_receive(&tnapi->napi, skb); + + received++; + budget--; + +next_pkt: + (*post_ptr)++; + + if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { + tpr->rx_std_prod_idx = std_prod_idx & + tp->rx_std_ring_mask; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, + tpr->rx_std_prod_idx); + work_mask &= ~RXD_OPAQUE_RING_STD; + rx_std_posted = 0; + } +next_pkt_nopost: + sw_idx++; + sw_idx &= tp->rx_ret_ring_mask; + + /* Refresh hw_idx to see if there is new work */ + if (sw_idx == hw_idx) { + hw_idx = *(tnapi->rx_rcb_prod_idx); + rmb(); + } + } + + /* ACK the status ring. */ + tnapi->rx_rcb_ptr = sw_idx; + tw32_rx_mbox(tnapi->consmbox, sw_idx); + + /* Refill RX ring(s). */ + if (!tg3_flag(tp, ENABLE_RSS)) { + /* Sync BD data before updating mailbox */ + wmb(); + + if (work_mask & RXD_OPAQUE_RING_STD) { + tpr->rx_std_prod_idx = std_prod_idx & + tp->rx_std_ring_mask; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, + tpr->rx_std_prod_idx); + } + if (work_mask & RXD_OPAQUE_RING_JUMBO) { + tpr->rx_jmb_prod_idx = jmb_prod_idx & + tp->rx_jmb_ring_mask; + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, + tpr->rx_jmb_prod_idx); + } + mmiowb(); + } else if (work_mask) { + /* rx_std_buffers[] and rx_jmb_buffers[] entries must be + * updated before the producer indices can be updated. + */ + smp_wmb(); + + tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; + tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; + + if (tnapi != &tp->napi[1]) { + tp->rx_refill = true; + napi_schedule(&tp->napi[1].napi); + } + } + + return received; +} + +static void tg3_poll_link(struct tg3 *tp) +{ + /* handle link change and other phy events */ + if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { + struct tg3_hw_status *sblk = tp->napi[0].hw_status; + + if (sblk->status & SD_STATUS_LINK_CHG) { + sblk->status = SD_STATUS_UPDATED | + (sblk->status & ~SD_STATUS_LINK_CHG); + spin_lock(&tp->lock); + if (tg3_flag(tp, USE_PHYLIB)) { + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_MI_COMPLETION | + MAC_STATUS_LNKSTATE_CHANGED)); + udelay(40); + } else + tg3_setup_phy(tp, false); + spin_unlock(&tp->lock); + } + } +} + +static int tg3_rx_prodring_xfer(struct tg3 *tp, + struct tg3_rx_prodring_set *dpr, + struct tg3_rx_prodring_set *spr) +{ + u32 si, di, cpycnt, src_prod_idx; + int i, err = 0; + + while (1) { + src_prod_idx = spr->rx_std_prod_idx; + + /* Make sure updates to the rx_std_buffers[] entries and the + * standard producer index are seen in the correct order. + */ + smp_rmb(); + + if (spr->rx_std_cons_idx == src_prod_idx) + break; + + if (spr->rx_std_cons_idx < src_prod_idx) + cpycnt = src_prod_idx - spr->rx_std_cons_idx; + else + cpycnt = tp->rx_std_ring_mask + 1 - + spr->rx_std_cons_idx; + + cpycnt = min(cpycnt, + tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); + + si = spr->rx_std_cons_idx; + di = dpr->rx_std_prod_idx; + + for (i = di; i < di + cpycnt; i++) { + if (dpr->rx_std_buffers[i].data) { + cpycnt = i - di; + err = -ENOSPC; + break; + } + } + + if (!cpycnt) + break; + + /* Ensure that updates to the rx_std_buffers ring and the + * shadowed hardware producer ring from tg3_recycle_skb() are + * ordered correctly WRT the skb check above. + */ + smp_rmb(); + + memcpy(&dpr->rx_std_buffers[di], + &spr->rx_std_buffers[si], + cpycnt * sizeof(struct ring_info)); + + for (i = 0; i < cpycnt; i++, di++, si++) { + struct tg3_rx_buffer_desc *sbd, *dbd; + sbd = &spr->rx_std[si]; + dbd = &dpr->rx_std[di]; + dbd->addr_hi = sbd->addr_hi; + dbd->addr_lo = sbd->addr_lo; + } + + spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & + tp->rx_std_ring_mask; + dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & + tp->rx_std_ring_mask; + } + + while (1) { + src_prod_idx = spr->rx_jmb_prod_idx; + + /* Make sure updates to the rx_jmb_buffers[] entries and + * the jumbo producer index are seen in the correct order. + */ + smp_rmb(); + + if (spr->rx_jmb_cons_idx == src_prod_idx) + break; + + if (spr->rx_jmb_cons_idx < src_prod_idx) + cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; + else + cpycnt = tp->rx_jmb_ring_mask + 1 - + spr->rx_jmb_cons_idx; + + cpycnt = min(cpycnt, + tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); + + si = spr->rx_jmb_cons_idx; + di = dpr->rx_jmb_prod_idx; + + for (i = di; i < di + cpycnt; i++) { + if (dpr->rx_jmb_buffers[i].data) { + cpycnt = i - di; + err = -ENOSPC; + break; + } + } + + if (!cpycnt) + break; + + /* Ensure that updates to the rx_jmb_buffers ring and the + * shadowed hardware producer ring from tg3_recycle_skb() are + * ordered correctly WRT the skb check above. + */ + smp_rmb(); + + memcpy(&dpr->rx_jmb_buffers[di], + &spr->rx_jmb_buffers[si], + cpycnt * sizeof(struct ring_info)); + + for (i = 0; i < cpycnt; i++, di++, si++) { + struct tg3_rx_buffer_desc *sbd, *dbd; + sbd = &spr->rx_jmb[si].std; + dbd = &dpr->rx_jmb[di].std; + dbd->addr_hi = sbd->addr_hi; + dbd->addr_lo = sbd->addr_lo; + } + + spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & + tp->rx_jmb_ring_mask; + dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & + tp->rx_jmb_ring_mask; + } + + return err; +} + +static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) +{ + struct tg3 *tp = tnapi->tp; + + /* run TX completion thread */ + if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { + tg3_tx(tnapi); + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) + return work_done; + } + + if (!tnapi->rx_rcb_prod_idx) + return work_done; + + /* run RX thread, within the bounds set by NAPI. + * All RX "locking" is done by ensuring outside + * code synchronizes with tg3->napi.poll() + */ + if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) + work_done += tg3_rx(tnapi, budget - work_done); + + if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { + struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; + int i, err = 0; + u32 std_prod_idx = dpr->rx_std_prod_idx; + u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; + + tp->rx_refill = false; + for (i = 1; i <= tp->rxq_cnt; i++) + err |= tg3_rx_prodring_xfer(tp, dpr, + &tp->napi[i].prodring); + + wmb(); + + if (std_prod_idx != dpr->rx_std_prod_idx) + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, + dpr->rx_std_prod_idx); + + if (jmb_prod_idx != dpr->rx_jmb_prod_idx) + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, + dpr->rx_jmb_prod_idx); + + mmiowb(); + + if (err) + tw32_f(HOSTCC_MODE, tp->coal_now); + } + + return work_done; +} + +static inline void tg3_reset_task_schedule(struct tg3 *tp) +{ + if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) + schedule_work(&tp->reset_task); +} + +static inline void tg3_reset_task_cancel(struct tg3 *tp) +{ + cancel_work_sync(&tp->reset_task); + tg3_flag_clear(tp, RESET_TASK_PENDING); + tg3_flag_clear(tp, TX_RECOVERY_PENDING); +} + +static int tg3_poll_msix(struct napi_struct *napi, int budget) +{ + struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); + struct tg3 *tp = tnapi->tp; + int work_done = 0; + struct tg3_hw_status *sblk = tnapi->hw_status; + + while (1) { + work_done = tg3_poll_work(tnapi, work_done, budget); + + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) + goto tx_recovery; + + if (unlikely(work_done >= budget)) + break; + + /* tp->last_tag is used in tg3_int_reenable() below + * to tell the hw how much work has been processed, + * so we must read it before checking for more work. + */ + tnapi->last_tag = sblk->status_tag; + tnapi->last_irq_tag = tnapi->last_tag; + rmb(); + + /* check for RX/TX work to do */ + if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && + *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { + + /* This test here is not race free, but will reduce + * the number of interrupts by looping again. + */ + if (tnapi == &tp->napi[1] && tp->rx_refill) + continue; + + napi_complete_done(napi, work_done); + /* Reenable interrupts. */ + tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); + + /* This test here is synchronized by napi_schedule() + * and napi_complete() to close the race condition. + */ + if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) { + tw32(HOSTCC_MODE, tp->coalesce_mode | + HOSTCC_MODE_ENABLE | + tnapi->coal_now); + } + mmiowb(); + break; + } + } + + return work_done; + +tx_recovery: + /* work_done is guaranteed to be less than budget. */ + napi_complete(napi); + tg3_reset_task_schedule(tp); + return work_done; +} + +static void tg3_process_error(struct tg3 *tp) +{ + u32 val; + bool real_error = false; + + if (tg3_flag(tp, ERROR_PROCESSED)) + return; + + /* Check Flow Attention register */ + val = tr32(HOSTCC_FLOW_ATTN); + if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { + netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); + real_error = true; + } + + if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { + netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); + real_error = true; + } + + if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { + netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); + real_error = true; + } + + if (!real_error) + return; + + tg3_dump_state(tp); + + tg3_flag_set(tp, ERROR_PROCESSED); + tg3_reset_task_schedule(tp); +} + +static int tg3_poll(struct napi_struct *napi, int budget) +{ + struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); + struct tg3 *tp = tnapi->tp; + int work_done = 0; + struct tg3_hw_status *sblk = tnapi->hw_status; + + while (1) { + if (sblk->status & SD_STATUS_ERROR) + tg3_process_error(tp); + + tg3_poll_link(tp); + + work_done = tg3_poll_work(tnapi, work_done, budget); + + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) + goto tx_recovery; + + if (unlikely(work_done >= budget)) + break; + + if (tg3_flag(tp, TAGGED_STATUS)) { + /* tp->last_tag is used in tg3_int_reenable() below + * to tell the hw how much work has been processed, + * so we must read it before checking for more work. + */ + tnapi->last_tag = sblk->status_tag; + tnapi->last_irq_tag = tnapi->last_tag; + rmb(); + } else + sblk->status &= ~SD_STATUS_UPDATED; + + if (likely(!tg3_has_work(tnapi))) { + napi_complete_done(napi, work_done); + tg3_int_reenable(tnapi); + break; + } + } + + return work_done; + +tx_recovery: + /* work_done is guaranteed to be less than budget. */ + napi_complete(napi); + tg3_reset_task_schedule(tp); + return work_done; +} + +static void tg3_napi_disable(struct tg3 *tp) +{ + int i; + + for (i = tp->irq_cnt - 1; i >= 0; i--) + napi_disable(&tp->napi[i].napi); +} + +static void tg3_napi_enable(struct tg3 *tp) +{ + int i; + + for (i = 0; i < tp->irq_cnt; i++) + napi_enable(&tp->napi[i].napi); +} + +static void tg3_napi_init(struct tg3 *tp) +{ + int i; + + netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64); + for (i = 1; i < tp->irq_cnt; i++) + netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64); +} + +static void tg3_napi_fini(struct tg3 *tp) +{ + int i; + + for (i = 0; i < tp->irq_cnt; i++) + netif_napi_del(&tp->napi[i].napi); +} + +static inline void tg3_netif_stop(struct tg3 *tp) +{ + tp->dev->trans_start = jiffies; /* prevent tx timeout */ + tg3_napi_disable(tp); + netif_carrier_off(tp->dev); + netif_tx_disable(tp->dev); +} + +/* tp->lock must be held */ +static inline void tg3_netif_start(struct tg3 *tp) +{ + tg3_ptp_resume(tp); + + /* NOTE: unconditional netif_tx_wake_all_queues is only + * appropriate so long as all callers are assured to + * have free tx slots (such as after tg3_init_hw) + */ + netif_tx_wake_all_queues(tp->dev); + + if (tp->link_up) + netif_carrier_on(tp->dev); + + tg3_napi_enable(tp); + tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; + tg3_enable_ints(tp); +} + +static void tg3_irq_quiesce(struct tg3 *tp) + __releases(tp->lock) + __acquires(tp->lock) +{ + int i; + + BUG_ON(tp->irq_sync); + + tp->irq_sync = 1; + smp_mb(); + + spin_unlock_bh(&tp->lock); + + for (i = 0; i < tp->irq_cnt; i++) + synchronize_irq(tp->napi[i].irq_vec); + + spin_lock_bh(&tp->lock); +} + +/* Fully shutdown all tg3 driver activity elsewhere in the system. + * If irq_sync is non-zero, then the IRQ handler must be synchronized + * with as well. Most of the time, this is not necessary except when + * shutting down the device. + */ +static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) +{ + spin_lock_bh(&tp->lock); + if (irq_sync) + tg3_irq_quiesce(tp); +} + +static inline void tg3_full_unlock(struct tg3 *tp) +{ + spin_unlock_bh(&tp->lock); +} + +/* One-shot MSI handler - Chip automatically disables interrupt + * after sending MSI so driver doesn't have to do it. + */ +static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) +{ + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + + prefetch(tnapi->hw_status); + if (tnapi->rx_rcb) + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + + if (likely(!tg3_irq_sync(tp))) + napi_schedule(&tnapi->napi); + + return IRQ_HANDLED; +} + +/* MSI ISR - No need to check for interrupt sharing and no need to + * flush status block and interrupt mailbox. PCI ordering rules + * guarantee that MSI will arrive after the status block. + */ +static irqreturn_t tg3_msi(int irq, void *dev_id) +{ + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + + prefetch(tnapi->hw_status); + if (tnapi->rx_rcb) + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + /* + * Writing any value to intr-mbox-0 clears PCI INTA# and + * chip-internal interrupt pending events. + * Writing non-zero to intr-mbox-0 additional tells the + * NIC to stop sending us irqs, engaging "in-intr-handler" + * event coalescing. + */ + tw32_mailbox(tnapi->int_mbox, 0x00000001); + if (likely(!tg3_irq_sync(tp))) + napi_schedule(&tnapi->napi); + + return IRQ_RETVAL(1); +} + +static irqreturn_t tg3_interrupt(int irq, void *dev_id) +{ + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + unsigned int handled = 1; + + /* In INTx mode, it is possible for the interrupt to arrive at + * the CPU before the status block posted prior to the interrupt. + * Reading the PCI State register will confirm whether the + * interrupt is ours and will flush the status block. + */ + if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { + if (tg3_flag(tp, CHIP_RESETTING) || + (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { + handled = 0; + goto out; + } + } + + /* + * Writing any value to intr-mbox-0 clears PCI INTA# and + * chip-internal interrupt pending events. + * Writing non-zero to intr-mbox-0 additional tells the + * NIC to stop sending us irqs, engaging "in-intr-handler" + * event coalescing. + * + * Flush the mailbox to de-assert the IRQ immediately to prevent + * spurious interrupts. The flush impacts performance but + * excessive spurious interrupts can be worse in some cases. + */ + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); + if (tg3_irq_sync(tp)) + goto out; + sblk->status &= ~SD_STATUS_UPDATED; + if (likely(tg3_has_work(tnapi))) { + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + napi_schedule(&tnapi->napi); + } else { + /* No work, shared interrupt perhaps? re-enable + * interrupts, and flush that PCI write + */ + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, + 0x00000000); + } +out: + return IRQ_RETVAL(handled); +} + +static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) +{ + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + unsigned int handled = 1; + + /* In INTx mode, it is possible for the interrupt to arrive at + * the CPU before the status block posted prior to the interrupt. + * Reading the PCI State register will confirm whether the + * interrupt is ours and will flush the status block. + */ + if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { + if (tg3_flag(tp, CHIP_RESETTING) || + (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { + handled = 0; + goto out; + } + } + + /* + * writing any value to intr-mbox-0 clears PCI INTA# and + * chip-internal interrupt pending events. + * writing non-zero to intr-mbox-0 additional tells the + * NIC to stop sending us irqs, engaging "in-intr-handler" + * event coalescing. + * + * Flush the mailbox to de-assert the IRQ immediately to prevent + * spurious interrupts. The flush impacts performance but + * excessive spurious interrupts can be worse in some cases. + */ + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); + + /* + * In a shared interrupt configuration, sometimes other devices' + * interrupts will scream. We record the current status tag here + * so that the above check can report that the screaming interrupts + * are unhandled. Eventually they will be silenced. + */ + tnapi->last_irq_tag = sblk->status_tag; + + if (tg3_irq_sync(tp)) + goto out; + + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + + napi_schedule(&tnapi->napi); + +out: + return IRQ_RETVAL(handled); +} + +/* ISR for interrupt test */ +static irqreturn_t tg3_test_isr(int irq, void *dev_id) +{ + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + + if ((sblk->status & SD_STATUS_UPDATED) || + !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { + tg3_disable_ints(tp); + return IRQ_RETVAL(1); + } + return IRQ_RETVAL(0); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void tg3_poll_controller(struct net_device *dev) +{ + int i; + struct tg3 *tp = netdev_priv(dev); + + if (tg3_irq_sync(tp)) + return; + + for (i = 0; i < tp->irq_cnt; i++) + tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); +} +#endif + +static void tg3_tx_timeout(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + + if (netif_msg_tx_err(tp)) { + netdev_err(dev, "transmit timed out, resetting\n"); + tg3_dump_state(tp); + } + + tg3_reset_task_schedule(tp); +} + +/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ +static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) +{ + u32 base = (u32) mapping & 0xffffffff; + + return base + len + 8 < base; +} + +/* Test for TSO DMA buffers that cross into regions which are within MSS bytes + * of any 4GB boundaries: 4G, 8G, etc + */ +static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping, + u32 len, u32 mss) +{ + if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) { + u32 base = (u32) mapping & 0xffffffff; + + return ((base + len + (mss & 0x3fff)) < base); + } + return 0; +} + +/* Test for DMA addresses > 40-bit */ +static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, + int len) +{ +#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) + if (tg3_flag(tp, 40BIT_DMA_BUG)) + return ((u64) mapping + len) > DMA_BIT_MASK(40); + return 0; +#else + return 0; +#endif +} + +static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, + dma_addr_t mapping, u32 len, u32 flags, + u32 mss, u32 vlan) +{ + txbd->addr_hi = ((u64) mapping >> 32); + txbd->addr_lo = ((u64) mapping & 0xffffffff); + txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); + txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); +} + +static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, + dma_addr_t map, u32 len, u32 flags, + u32 mss, u32 vlan) +{ + struct tg3 *tp = tnapi->tp; + bool hwbug = false; + + if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) + hwbug = true; + + if (tg3_4g_overflow_test(map, len)) + hwbug = true; + + if (tg3_4g_tso_overflow_test(tp, map, len, mss)) + hwbug = true; + + if (tg3_40bit_overflow_test(tp, map, len)) + hwbug = true; + + if (tp->dma_limit) { + u32 prvidx = *entry; + u32 tmp_flag = flags & ~TXD_FLAG_END; + while (len > tp->dma_limit && *budget) { + u32 frag_len = tp->dma_limit; + len -= tp->dma_limit; + + /* Avoid the 8byte DMA problem */ + if (len <= 8) { + len += tp->dma_limit / 2; + frag_len = tp->dma_limit / 2; + } + + tnapi->tx_buffers[*entry].fragmented = true; + + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + frag_len, tmp_flag, mss, vlan); + *budget -= 1; + prvidx = *entry; + *entry = NEXT_TX(*entry); + + map += frag_len; + } + + if (len) { + if (*budget) { + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + len, flags, mss, vlan); + *budget -= 1; + *entry = NEXT_TX(*entry); + } else { + hwbug = true; + tnapi->tx_buffers[prvidx].fragmented = false; + } + } + } else { + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + len, flags, mss, vlan); + *entry = NEXT_TX(*entry); + } + + return hwbug; +} + +static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) +{ + int i; + struct sk_buff *skb; + struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; + + skb = txb->skb; + txb->skb = NULL; + + pci_unmap_single(tnapi->tp->pdev, + dma_unmap_addr(txb, mapping), + skb_headlen(skb), + PCI_DMA_TODEVICE); + + while (txb->fragmented) { + txb->fragmented = false; + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; + } + + for (i = 0; i <= last; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; + + pci_unmap_page(tnapi->tp->pdev, + dma_unmap_addr(txb, mapping), + skb_frag_size(frag), PCI_DMA_TODEVICE); + + while (txb->fragmented) { + txb->fragmented = false; + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; + } + } +} + +/* Workaround 4GB and 40-bit hardware DMA bugs. */ +static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, + struct sk_buff **pskb, + u32 *entry, u32 *budget, + u32 base_flags, u32 mss, u32 vlan) +{ + struct tg3 *tp = tnapi->tp; + struct sk_buff *new_skb, *skb = *pskb; + dma_addr_t new_addr = 0; + int ret = 0; + + if (tg3_asic_rev(tp) != ASIC_REV_5701) + new_skb = skb_copy(skb, GFP_ATOMIC); + else { + int more_headroom = 4 - ((unsigned long)skb->data & 3); + + new_skb = skb_copy_expand(skb, + skb_headroom(skb) + more_headroom, + skb_tailroom(skb), GFP_ATOMIC); + } + + if (!new_skb) { + ret = -1; + } else { + /* New SKB is guaranteed to be linear. */ + new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, + PCI_DMA_TODEVICE); + /* Make sure the mapping succeeded */ + if (pci_dma_mapping_error(tp->pdev, new_addr)) { + dev_kfree_skb_any(new_skb); + ret = -1; + } else { + u32 save_entry = *entry; + + base_flags |= TXD_FLAG_END; + + tnapi->tx_buffers[*entry].skb = new_skb; + dma_unmap_addr_set(&tnapi->tx_buffers[*entry], + mapping, new_addr); + + if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, + new_skb->len, base_flags, + mss, vlan)) { + tg3_tx_skb_unmap(tnapi, save_entry, -1); + dev_kfree_skb_any(new_skb); + ret = -1; + } + } + } + + dev_kfree_skb_any(skb); + *pskb = new_skb; + return ret; +} + +static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); + +/* Use GSO to workaround all TSO packets that meet HW bug conditions + * indicated in tg3_tx_frag_set() + */ +static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, + struct netdev_queue *txq, struct sk_buff *skb) +{ + struct sk_buff *segs, *nskb; + u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; + + /* Estimate the number of fragments in the worst case */ + if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) { + netif_tx_stop_queue(txq); + + /* netif_tx_stop_queue() must be done before checking + * checking tx index in tg3_tx_avail() below, because in + * tg3_tx(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); + if (tg3_tx_avail(tnapi) <= frag_cnt_est) + return NETDEV_TX_BUSY; + + netif_tx_wake_queue(txq); + } + + segs = skb_gso_segment(skb, tp->dev->features & + ~(NETIF_F_TSO | NETIF_F_TSO6)); + if (IS_ERR(segs) || !segs) + goto tg3_tso_bug_end; + + do { + nskb = segs; + segs = segs->next; + nskb->next = NULL; + tg3_start_xmit(nskb, tp->dev); + } while (segs); + +tg3_tso_bug_end: + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +/* hard_start_xmit for all devices */ +static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + u32 len, entry, base_flags, mss, vlan = 0; + u32 budget; + int i = -1, would_hit_hwbug; + dma_addr_t mapping; + struct tg3_napi *tnapi; + struct netdev_queue *txq; + unsigned int last; + struct iphdr *iph = NULL; + struct tcphdr *tcph = NULL; + __sum16 tcp_csum = 0, ip_csum = 0; + __be16 ip_tot_len = 0; + + txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + tnapi = &tp->napi[skb_get_queue_mapping(skb)]; + if (tg3_flag(tp, ENABLE_TSS)) + tnapi++; + + budget = tg3_tx_avail(tnapi); + + /* We are running in BH disabled context with netif_tx_lock + * and TX reclaim runs via tp->napi.poll inside of a software + * interrupt. Furthermore, IRQ processing runs lockless so we have + * no IRQ context deadlocks to worry about either. Rejoice! + */ + if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { + if (!netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); + + /* This is a hard error, log it. */ + netdev_err(dev, + "BUG! Tx Ring full when queue awake!\n"); + } + return NETDEV_TX_BUSY; + } + + entry = tnapi->tx_prod; + base_flags = 0; + + mss = skb_shinfo(skb)->gso_size; + if (mss) { + u32 tcp_opt_len, hdr_len; + + if (skb_cow_head(skb, 0)) + goto drop; + + iph = ip_hdr(skb); + tcp_opt_len = tcp_optlen(skb); + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; + + /* HW/FW can not correctly segment packets that have been + * vlan encapsulated. + */ + if (skb->protocol == htons(ETH_P_8021Q) || + skb->protocol == htons(ETH_P_8021AD)) + return tg3_tso_bug(tp, tnapi, txq, skb); + + if (!skb_is_gso_v6(skb)) { + if (unlikely((ETH_HLEN + hdr_len) > 80) && + tg3_flag(tp, TSO_BUG)) + return tg3_tso_bug(tp, tnapi, txq, skb); + + ip_csum = iph->check; + ip_tot_len = iph->tot_len; + iph->check = 0; + iph->tot_len = htons(mss + hdr_len); + } + + base_flags |= (TXD_FLAG_CPU_PRE_DMA | + TXD_FLAG_CPU_POST_DMA); + + tcph = tcp_hdr(skb); + tcp_csum = tcph->check; + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) { + tcph->check = 0; + base_flags &= ~TXD_FLAG_TCPUDP_CSUM; + } else { + tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + 0, IPPROTO_TCP, 0); + } + + if (tg3_flag(tp, HW_TSO_3)) { + mss |= (hdr_len & 0xc) << 12; + if (hdr_len & 0x10) + base_flags |= 0x00000010; + base_flags |= (hdr_len & 0x3e0) << 5; + } else if (tg3_flag(tp, HW_TSO_2)) + mss |= hdr_len << 9; + else if (tg3_flag(tp, HW_TSO_1) || + tg3_asic_rev(tp) == ASIC_REV_5705) { + if (tcp_opt_len || iph->ihl > 5) { + int tsflags; + + tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); + mss |= (tsflags << 11); + } + } else { + if (tcp_opt_len || iph->ihl > 5) { + int tsflags; + + tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); + base_flags |= tsflags << 12; + } + } + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + /* HW/FW can not correctly checksum packets that have been + * vlan encapsulated. + */ + if (skb->protocol == htons(ETH_P_8021Q) || + skb->protocol == htons(ETH_P_8021AD)) { + if (skb_checksum_help(skb)) + goto drop; + } else { + base_flags |= TXD_FLAG_TCPUDP_CSUM; + } + } + + if (tg3_flag(tp, USE_JUMBO_BDFLAG) && + !mss && skb->len > VLAN_ETH_FRAME_LEN) + base_flags |= TXD_FLAG_JMB_PKT; + + if (skb_vlan_tag_present(skb)) { + base_flags |= TXD_FLAG_VLAN; + vlan = skb_vlan_tag_get(skb); + } + + if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) && + tg3_flag(tp, TX_TSTAMP_EN)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + base_flags |= TXD_FLAG_HWTSTAMP; + } + + len = skb_headlen(skb); + + mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(tp->pdev, mapping)) + goto drop; + + + tnapi->tx_buffers[entry].skb = skb; + dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); + + would_hit_hwbug = 0; + + if (tg3_flag(tp, 5701_DMA_BUG)) + would_hit_hwbug = 1; + + if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | + ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), + mss, vlan)) { + would_hit_hwbug = 1; + } else if (skb_shinfo(skb)->nr_frags > 0) { + u32 tmp_mss = mss; + + if (!tg3_flag(tp, HW_TSO_1) && + !tg3_flag(tp, HW_TSO_2) && + !tg3_flag(tp, HW_TSO_3)) + tmp_mss = 0; + + /* Now loop through additional data + * fragments, and queue them. + */ + last = skb_shinfo(skb)->nr_frags - 1; + for (i = 0; i <= last; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + len = skb_frag_size(frag); + mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, + len, DMA_TO_DEVICE); + + tnapi->tx_buffers[entry].skb = NULL; + dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, + mapping); + if (dma_mapping_error(&tp->pdev->dev, mapping)) + goto dma_error; + + if (!budget || + tg3_tx_frag_set(tnapi, &entry, &budget, mapping, + len, base_flags | + ((i == last) ? TXD_FLAG_END : 0), + tmp_mss, vlan)) { + would_hit_hwbug = 1; + break; + } + } + } + + if (would_hit_hwbug) { + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); + + if (mss) { + /* If it's a TSO packet, do GSO instead of + * allocating and copying to a large linear SKB + */ + if (ip_tot_len) { + iph->check = ip_csum; + iph->tot_len = ip_tot_len; + } + tcph->check = tcp_csum; + return tg3_tso_bug(tp, tnapi, txq, skb); + } + + /* If the workaround fails due to memory/mapping + * failure, silently drop this packet. + */ + entry = tnapi->tx_prod; + budget = tg3_tx_avail(tnapi); + if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget, + base_flags, mss, vlan)) + goto drop_nofree; + } + + skb_tx_timestamp(skb); + netdev_tx_sent_queue(txq, skb->len); + + /* Sync BD data before updating mailbox */ + wmb(); + + tnapi->tx_prod = entry; + if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { + netif_tx_stop_queue(txq); + + /* netif_tx_stop_queue() must be done before checking + * checking tx index in tg3_tx_avail() below, because in + * tg3_tx(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); + if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) + netif_tx_wake_queue(txq); + } + + if (!skb->xmit_more || netif_xmit_stopped(txq)) { + /* Packets are ready, update Tx producer idx on card. */ + tw32_tx_mbox(tnapi->prodmbox, entry); + mmiowb(); + } + + return NETDEV_TX_OK; + +dma_error: + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); + tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; +drop: + dev_kfree_skb_any(skb); +drop_nofree: + tp->tx_dropped++; + return NETDEV_TX_OK; +} + +static void tg3_mac_loopback(struct tg3 *tp, bool enable) +{ + if (enable) { + tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | + MAC_MODE_PORT_MODE_MASK); + + tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; + + if (!tg3_flag(tp, 5705_PLUS)) + tp->mac_mode |= MAC_MODE_LINK_POLARITY; + + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + else + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + } else { + tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; + + if (tg3_flag(tp, 5705_PLUS) || + (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || + tg3_asic_rev(tp) == ASIC_REV_5700) + tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; + } + + tw32(MAC_MODE, tp->mac_mode); + udelay(40); +} + +static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) +{ + u32 val, bmcr, mac_mode, ptest = 0; + + tg3_phy_toggle_apd(tp, false); + tg3_phy_toggle_automdix(tp, false); + + if (extlpbk && tg3_phy_set_extloopbk(tp)) + return -EIO; + + bmcr = BMCR_FULLDPLX; + switch (speed) { + case SPEED_10: + break; + case SPEED_100: + bmcr |= BMCR_SPEED100; + break; + case SPEED_1000: + default: + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + speed = SPEED_100; + bmcr |= BMCR_SPEED100; + } else { + speed = SPEED_1000; + bmcr |= BMCR_SPEED1000; + } + } + + if (extlpbk) { + if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { + tg3_readphy(tp, MII_CTRL1000, &val); + val |= CTL1000_AS_MASTER | + CTL1000_ENABLE_MASTER; + tg3_writephy(tp, MII_CTRL1000, val); + } else { + ptest = MII_TG3_FET_PTEST_TRIM_SEL | + MII_TG3_FET_PTEST_TRIM_2; + tg3_writephy(tp, MII_TG3_FET_PTEST, ptest); + } + } else + bmcr |= BMCR_LOOPBACK; + + tg3_writephy(tp, MII_BMCR, bmcr); + + /* The write needs to be flushed for the FETs */ + if (tp->phy_flags & TG3_PHYFLG_IS_FET) + tg3_readphy(tp, MII_BMCR, &bmcr); + + udelay(40); + + if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && + tg3_asic_rev(tp) == ASIC_REV_5785) { + tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | + MII_TG3_FET_PTEST_FRC_TX_LINK | + MII_TG3_FET_PTEST_FRC_TX_LOCK); + + /* The write needs to be flushed for the AC131 */ + tg3_readphy(tp, MII_TG3_FET_PTEST, &val); + } + + /* Reset to prevent losing 1st rx packet intermittently */ + if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + tg3_flag(tp, 5780_CLASS)) { + tw32_f(MAC_RX_MODE, RX_MODE_RESET); + udelay(10); + tw32_f(MAC_RX_MODE, tp->rx_mode); + } + + mac_mode = tp->mac_mode & + ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); + if (speed == SPEED_1000) + mac_mode |= MAC_MODE_PORT_MODE_GMII; + else + mac_mode |= MAC_MODE_PORT_MODE_MII; + + if (tg3_asic_rev(tp) == ASIC_REV_5700) { + u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; + + if (masked_phy_id == TG3_PHY_ID_BCM5401) + mac_mode &= ~MAC_MODE_LINK_POLARITY; + else if (masked_phy_id == TG3_PHY_ID_BCM5411) + mac_mode |= MAC_MODE_LINK_POLARITY; + + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_LNK3_LED_MODE); + } + + tw32(MAC_MODE, mac_mode); + udelay(40); + + return 0; +} + +static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) +{ + struct tg3 *tp = netdev_priv(dev); + + if (features & NETIF_F_LOOPBACK) { + if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) + return; + + spin_lock_bh(&tp->lock); + tg3_mac_loopback(tp, true); + netif_carrier_on(tp->dev); + spin_unlock_bh(&tp->lock); + netdev_info(dev, "Internal MAC loopback mode enabled.\n"); + } else { + if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) + return; + + spin_lock_bh(&tp->lock); + tg3_mac_loopback(tp, false); + /* Force link status check */ + tg3_setup_phy(tp, true); + spin_unlock_bh(&tp->lock); + netdev_info(dev, "Internal MAC loopback mode disabled.\n"); + } +} + +static netdev_features_t tg3_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct tg3 *tp = netdev_priv(dev); + + if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) + features &= ~NETIF_F_ALL_TSO; + + return features; +} + +static int tg3_set_features(struct net_device *dev, netdev_features_t features) +{ + netdev_features_t changed = dev->features ^ features; + + if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) + tg3_set_loopback(dev, features); + + return 0; +} + +static void tg3_rx_prodring_free(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) +{ + int i; + + if (tpr != &tp->napi[0].prodring) { + for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; + i = (i + 1) & tp->rx_std_ring_mask) + tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], + tp->rx_pkt_map_sz); + + if (tg3_flag(tp, JUMBO_CAPABLE)) { + for (i = tpr->rx_jmb_cons_idx; + i != tpr->rx_jmb_prod_idx; + i = (i + 1) & tp->rx_jmb_ring_mask) { + tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], + TG3_RX_JMB_MAP_SZ); + } + } + + return; + } + + for (i = 0; i <= tp->rx_std_ring_mask; i++) + tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], + tp->rx_pkt_map_sz); + + if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { + for (i = 0; i <= tp->rx_jmb_ring_mask; i++) + tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], + TG3_RX_JMB_MAP_SZ); + } +} + +/* Initialize rx rings for packet processing. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. tp->{tx,}lock are held and thus + * we may not sleep. + */ +static int tg3_rx_prodring_alloc(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) +{ + u32 i, rx_pkt_dma_sz; + + tpr->rx_std_cons_idx = 0; + tpr->rx_std_prod_idx = 0; + tpr->rx_jmb_cons_idx = 0; + tpr->rx_jmb_prod_idx = 0; + + if (tpr != &tp->napi[0].prodring) { + memset(&tpr->rx_std_buffers[0], 0, + TG3_RX_STD_BUFF_RING_SIZE(tp)); + if (tpr->rx_jmb_buffers) + memset(&tpr->rx_jmb_buffers[0], 0, + TG3_RX_JMB_BUFF_RING_SIZE(tp)); + goto done; + } + + /* Zero out all descriptors. */ + memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); + + rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; + if (tg3_flag(tp, 5780_CLASS) && + tp->dev->mtu > ETH_DATA_LEN) + rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; + tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); + + /* Initialize invariants of the rings, we only set this + * stuff once. This works because the card does not + * write into the rx buffer posting rings. + */ + for (i = 0; i <= tp->rx_std_ring_mask; i++) { + struct tg3_rx_buffer_desc *rxd; + + rxd = &tpr->rx_std[i]; + rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; + rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); + rxd->opaque = (RXD_OPAQUE_RING_STD | + (i << RXD_OPAQUE_INDEX_SHIFT)); + } + + /* Now allocate fresh SKBs for each rx ring. */ + for (i = 0; i < tp->rx_pending; i++) { + unsigned int frag_size; + + if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i, + &frag_size) < 0) { + netdev_warn(tp->dev, + "Using a smaller RX standard ring. Only " + "%d out of %d buffers were allocated " + "successfully\n", i, tp->rx_pending); + if (i == 0) + goto initfail; + tp->rx_pending = i; + break; + } + } + + if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) + goto done; + + memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); + + if (!tg3_flag(tp, JUMBO_RING_ENABLE)) + goto done; + + for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { + struct tg3_rx_buffer_desc *rxd; + + rxd = &tpr->rx_jmb[i].std; + rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; + rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | + RXD_FLAG_JUMBO; + rxd->opaque = (RXD_OPAQUE_RING_JUMBO | + (i << RXD_OPAQUE_INDEX_SHIFT)); + } + + for (i = 0; i < tp->rx_jumbo_pending; i++) { + unsigned int frag_size; + + if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i, + &frag_size) < 0) { + netdev_warn(tp->dev, + "Using a smaller RX jumbo ring. Only %d " + "out of %d buffers were allocated " + "successfully\n", i, tp->rx_jumbo_pending); + if (i == 0) + goto initfail; + tp->rx_jumbo_pending = i; + break; + } + } + +done: + return 0; + +initfail: + tg3_rx_prodring_free(tp, tpr); + return -ENOMEM; +} + +static void tg3_rx_prodring_fini(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) +{ + kfree(tpr->rx_std_buffers); + tpr->rx_std_buffers = NULL; + kfree(tpr->rx_jmb_buffers); + tpr->rx_jmb_buffers = NULL; + if (tpr->rx_std) { + dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), + tpr->rx_std, tpr->rx_std_mapping); + tpr->rx_std = NULL; + } + if (tpr->rx_jmb) { + dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), + tpr->rx_jmb, tpr->rx_jmb_mapping); + tpr->rx_jmb = NULL; + } +} + +static int tg3_rx_prodring_init(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) +{ + tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), + GFP_KERNEL); + if (!tpr->rx_std_buffers) + return -ENOMEM; + + tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_STD_RING_BYTES(tp), + &tpr->rx_std_mapping, + GFP_KERNEL); + if (!tpr->rx_std) + goto err_out; + + if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { + tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), + GFP_KERNEL); + if (!tpr->rx_jmb_buffers) + goto err_out; + + tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_JMB_RING_BYTES(tp), + &tpr->rx_jmb_mapping, + GFP_KERNEL); + if (!tpr->rx_jmb) + goto err_out; + } + + return 0; + +err_out: + tg3_rx_prodring_fini(tp, tpr); + return -ENOMEM; +} + +/* Free up pending packets in all rx/tx rings. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. tp->{tx,}lock is not held and we are not + * in an interrupt context and thus may sleep. + */ +static void tg3_free_rings(struct tg3 *tp) +{ + int i, j; + + for (j = 0; j < tp->irq_cnt; j++) { + struct tg3_napi *tnapi = &tp->napi[j]; + + tg3_rx_prodring_free(tp, &tnapi->prodring); + + if (!tnapi->tx_buffers) + continue; + + for (i = 0; i < TG3_TX_RING_SIZE; i++) { + struct sk_buff *skb = tnapi->tx_buffers[i].skb; + + if (!skb) + continue; + + tg3_tx_skb_unmap(tnapi, i, + skb_shinfo(skb)->nr_frags - 1); + + dev_kfree_skb_any(skb); + } + netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j)); + } +} + +/* Initialize tx/rx rings for packet processing. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. tp->{tx,}lock are held and thus + * we may not sleep. + */ +static int tg3_init_rings(struct tg3 *tp) +{ + int i; + + /* Free up all the SKBs. */ + tg3_free_rings(tp); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tnapi->last_tag = 0; + tnapi->last_irq_tag = 0; + tnapi->hw_status->status = 0; + tnapi->hw_status->status_tag = 0; + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + + tnapi->tx_prod = 0; + tnapi->tx_cons = 0; + if (tnapi->tx_ring) + memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); + + tnapi->rx_rcb_ptr = 0; + if (tnapi->rx_rcb) + memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); + + if (tnapi->prodring.rx_std && + tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { + tg3_free_rings(tp); + return -ENOMEM; + } + } + + return 0; +} + +static void tg3_mem_tx_release(struct tg3 *tp) +{ + int i; + + for (i = 0; i < tp->irq_max; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (tnapi->tx_ring) { + dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, + tnapi->tx_ring, tnapi->tx_desc_mapping); + tnapi->tx_ring = NULL; + } + + kfree(tnapi->tx_buffers); + tnapi->tx_buffers = NULL; + } +} + +static int tg3_mem_tx_acquire(struct tg3 *tp) +{ + int i; + struct tg3_napi *tnapi = &tp->napi[0]; + + /* If multivector TSS is enabled, vector 0 does not handle + * tx interrupts. Don't allocate any resources for it. + */ + if (tg3_flag(tp, ENABLE_TSS)) + tnapi++; + + for (i = 0; i < tp->txq_cnt; i++, tnapi++) { + tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) * + TG3_TX_RING_SIZE, GFP_KERNEL); + if (!tnapi->tx_buffers) + goto err_out; + + tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, + TG3_TX_RING_BYTES, + &tnapi->tx_desc_mapping, + GFP_KERNEL); + if (!tnapi->tx_ring) + goto err_out; + } + + return 0; + +err_out: + tg3_mem_tx_release(tp); + return -ENOMEM; +} + +static void tg3_mem_rx_release(struct tg3 *tp) +{ + int i; + + for (i = 0; i < tp->irq_max; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tg3_rx_prodring_fini(tp, &tnapi->prodring); + + if (!tnapi->rx_rcb) + continue; + + dma_free_coherent(&tp->pdev->dev, + TG3_RX_RCB_RING_BYTES(tp), + tnapi->rx_rcb, + tnapi->rx_rcb_mapping); + tnapi->rx_rcb = NULL; + } +} + +static int tg3_mem_rx_acquire(struct tg3 *tp) +{ + unsigned int i, limit; + + limit = tp->rxq_cnt; + + /* If RSS is enabled, we need a (dummy) producer ring + * set on vector zero. This is the true hw prodring. + */ + if (tg3_flag(tp, ENABLE_RSS)) + limit++; + + for (i = 0; i < limit; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (tg3_rx_prodring_init(tp, &tnapi->prodring)) + goto err_out; + + /* If multivector RSS is enabled, vector 0 + * does not handle rx or tx interrupts. + * Don't allocate any resources for it. + */ + if (!i && tg3_flag(tp, ENABLE_RSS)) + continue; + + tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev, + TG3_RX_RCB_RING_BYTES(tp), + &tnapi->rx_rcb_mapping, + GFP_KERNEL); + if (!tnapi->rx_rcb) + goto err_out; + } + + return 0; + +err_out: + tg3_mem_rx_release(tp); + return -ENOMEM; +} + +/* + * Must not be invoked with interrupt sources disabled and + * the hardware shutdown down. + */ +static void tg3_free_consistent(struct tg3 *tp) +{ + int i; + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (tnapi->hw_status) { + dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, + tnapi->hw_status, + tnapi->status_mapping); + tnapi->hw_status = NULL; + } + } + + tg3_mem_rx_release(tp); + tg3_mem_tx_release(tp); + + if (tp->hw_stats) { + dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), + tp->hw_stats, tp->stats_mapping); + tp->hw_stats = NULL; + } +} + +/* + * Must not be invoked with interrupt sources disabled and + * the hardware shutdown down. Can sleep. + */ +static int tg3_alloc_consistent(struct tg3 *tp) +{ + int i; + + tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev, + sizeof(struct tg3_hw_stats), + &tp->stats_mapping, GFP_KERNEL); + if (!tp->hw_stats) + goto err_out; + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + struct tg3_hw_status *sblk; + + tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev, + TG3_HW_STATUS_SIZE, + &tnapi->status_mapping, + GFP_KERNEL); + if (!tnapi->hw_status) + goto err_out; + + sblk = tnapi->hw_status; + + if (tg3_flag(tp, ENABLE_RSS)) { + u16 *prodptr = NULL; + + /* + * When RSS is enabled, the status block format changes + * slightly. The "rx_jumbo_consumer", "reserved", + * and "rx_mini_consumer" members get mapped to the + * other three rx return ring producer indexes. + */ + switch (i) { + case 1: + prodptr = &sblk->idx[0].rx_producer; + break; + case 2: + prodptr = &sblk->rx_jumbo_consumer; + break; + case 3: + prodptr = &sblk->reserved; + break; + case 4: + prodptr = &sblk->rx_mini_consumer; + break; + } + tnapi->rx_rcb_prod_idx = prodptr; + } else { + tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; + } + } + + if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp)) + goto err_out; + + return 0; + +err_out: + tg3_free_consistent(tp); + return -ENOMEM; +} + +#define MAX_WAIT_CNT 1000 + +/* To stop a block, clear the enable bit and poll till it + * clears. tp->lock is held. + */ +static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent) +{ + unsigned int i; + u32 val; + + if (tg3_flag(tp, 5705_PLUS)) { + switch (ofs) { + case RCVLSC_MODE: + case DMAC_MODE: + case MBFREE_MODE: + case BUFMGR_MODE: + case MEMARB_MODE: + /* We can't enable/disable these bits of the + * 5705/5750, just say success. + */ + return 0; + + default: + break; + } + } + + val = tr32(ofs); + val &= ~enable_bit; + tw32_f(ofs, val); + + for (i = 0; i < MAX_WAIT_CNT; i++) { + if (pci_channel_offline(tp->pdev)) { + dev_err(&tp->pdev->dev, + "tg3_stop_block device offline, " + "ofs=%lx enable_bit=%x\n", + ofs, enable_bit); + return -ENODEV; + } + + udelay(100); + val = tr32(ofs); + if ((val & enable_bit) == 0) + break; + } + + if (i == MAX_WAIT_CNT && !silent) { + dev_err(&tp->pdev->dev, + "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", + ofs, enable_bit); + return -ENODEV; + } + + return 0; +} + +/* tp->lock is held. */ +static int tg3_abort_hw(struct tg3 *tp, bool silent) +{ + int i, err; + + tg3_disable_ints(tp); + + if (pci_channel_offline(tp->pdev)) { + tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE); + tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; + err = -ENODEV; + goto err_no_dev; + } + + tp->rx_mode &= ~RX_MODE_ENABLE; + tw32_f(MAC_RX_MODE, tp->rx_mode); + udelay(10); + + err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); + + err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); + + tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tp->tx_mode &= ~TX_MODE_ENABLE; + tw32_f(MAC_TX_MODE, tp->tx_mode); + + for (i = 0; i < MAX_WAIT_CNT; i++) { + udelay(100); + if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) + break; + } + if (i >= MAX_WAIT_CNT) { + dev_err(&tp->pdev->dev, + "%s timed out, TX_MODE_ENABLE will not clear " + "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); + err |= -ENODEV; + } + + err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); + + tw32(FTQ_RESET, 0xffffffff); + tw32(FTQ_RESET, 0x00000000); + + err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); + +err_no_dev: + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + if (tnapi->hw_status) + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + } + + return err; +} + +/* Save PCI command register before chip reset */ +static void tg3_save_pci_state(struct tg3 *tp) +{ + pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); +} + +/* Restore PCI state after chip reset */ +static void tg3_restore_pci_state(struct tg3 *tp) +{ + u32 val; + + /* Re-enable indirect register accesses. */ + pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + tp->misc_host_ctrl); + + /* Set MAX PCI retry to zero. */ + val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && + tg3_flag(tp, PCIX_MODE)) + val |= PCISTATE_RETRY_SAME_DMA; + /* Allow reads and writes to the APE register and memory space. */ + if (tg3_flag(tp, ENABLE_APE)) + val |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR | + PCISTATE_ALLOW_APE_PSPACE_WR; + pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); + + pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); + + if (!tg3_flag(tp, PCI_EXPRESS)) { + pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, + tp->pci_cacheline_sz); + pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, + tp->pci_lat_timer); + } + + /* Make sure PCI-X relaxed ordering bit is clear. */ + if (tg3_flag(tp, PCIX_MODE)) { + u16 pcix_cmd; + + pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + &pcix_cmd); + pcix_cmd &= ~PCI_X_CMD_ERO; + pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + pcix_cmd); + } + + if (tg3_flag(tp, 5780_CLASS)) { + + /* Chip reset on 5780 will reset MSI enable bit, + * so need to restore it. + */ + if (tg3_flag(tp, USING_MSI)) { + u16 ctrl; + + pci_read_config_word(tp->pdev, + tp->msi_cap + PCI_MSI_FLAGS, + &ctrl); + pci_write_config_word(tp->pdev, + tp->msi_cap + PCI_MSI_FLAGS, + ctrl | PCI_MSI_FLAGS_ENABLE); + val = tr32(MSGINT_MODE); + tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); + } + } +} + +static void tg3_override_clk(struct tg3 *tp) +{ + u32 val; + + switch (tg3_asic_rev(tp)) { + case ASIC_REV_5717: + val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); + tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | + TG3_CPMU_MAC_ORIDE_ENABLE); + break; + + case ASIC_REV_5719: + case ASIC_REV_5720: + tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN); + break; + + default: + return; + } +} + +static void tg3_restore_clk(struct tg3 *tp) +{ + u32 val; + + switch (tg3_asic_rev(tp)) { + case ASIC_REV_5717: + val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); + tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, + val & ~TG3_CPMU_MAC_ORIDE_ENABLE); + break; + + case ASIC_REV_5719: + case ASIC_REV_5720: + val = tr32(TG3_CPMU_CLCK_ORIDE); + tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); + break; + + default: + return; + } +} + +/* tp->lock is held. */ +static int tg3_chip_reset(struct tg3 *tp) + __releases(tp->lock) + __acquires(tp->lock) +{ + u32 val; + void (*write_op)(struct tg3 *, u32, u32); + int i, err; + + if (!pci_device_is_present(tp->pdev)) + return -ENODEV; + + tg3_nvram_lock(tp); + + tg3_ape_lock(tp, TG3_APE_LOCK_GRC); + + /* No matching tg3_nvram_unlock() after this because + * chip reset below will undo the nvram lock. + */ + tp->nvram_lock_cnt = 0; + + /* GRC_MISC_CFG core clock reset will clear the memory + * enable bit in PCI register 4 and the MSI enable bit + * on some chips, so we save relevant registers here. + */ + tg3_save_pci_state(tp); + + if (tg3_asic_rev(tp) == ASIC_REV_5752 || + tg3_flag(tp, 5755_PLUS)) + tw32(GRC_FASTBOOT_PC, 0); + + /* + * We must avoid the readl() that normally takes place. + * It locks machines, causes machine checks, and other + * fun things. So, temporarily disable the 5701 + * hardware workaround, while we do the reset. + */ + write_op = tp->write32; + if (write_op == tg3_write_flush_reg32) + tp->write32 = tg3_write32; + + /* Prevent the irq handler from reading or writing PCI registers + * during chip reset when the memory enable bit in the PCI command + * register may be cleared. The chip does not generate interrupt + * at this time, but the irq handler may still be called due to irq + * sharing or irqpoll. + */ + tg3_flag_set(tp, CHIP_RESETTING); + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + if (tnapi->hw_status) { + tnapi->hw_status->status = 0; + tnapi->hw_status->status_tag = 0; + } + tnapi->last_tag = 0; + tnapi->last_irq_tag = 0; + } + smp_mb(); + + tg3_full_unlock(tp); + + for (i = 0; i < tp->irq_cnt; i++) + synchronize_irq(tp->napi[i].irq_vec); + + tg3_full_lock(tp, 0); + + if (tg3_asic_rev(tp) == ASIC_REV_57780) { + val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; + tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); + } + + /* do the reset */ + val = GRC_MISC_CFG_CORECLK_RESET; + + if (tg3_flag(tp, PCI_EXPRESS)) { + /* Force PCIe 1.0a mode */ + if (tg3_asic_rev(tp) != ASIC_REV_5785 && + !tg3_flag(tp, 57765_PLUS) && + tr32(TG3_PCIE_PHY_TSTCTL) == + (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) + tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); + + if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) { + tw32(GRC_MISC_CFG, (1 << 29)); + val |= (1 << 29); + } + } + + if (tg3_asic_rev(tp) == ASIC_REV_5906) { + tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); + tw32(GRC_VCPU_EXT_CTRL, + tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); + } + + /* Set the clock to the highest frequency to avoid timeouts. With link + * aware mode, the clock speed could be slow and bootcode does not + * complete within the expected time. Override the clock to allow the + * bootcode to finish sooner and then restore it. + */ + tg3_override_clk(tp); + + /* Manage gphy power for all CPMU absent PCIe devices. */ + if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) + val |= GRC_MISC_CFG_KEEP_GPHY_POWER; + + tw32(GRC_MISC_CFG, val); + + /* restore 5701 hardware bug workaround write method */ + tp->write32 = write_op; + + /* Unfortunately, we have to delay before the PCI read back. + * Some 575X chips even will not respond to a PCI cfg access + * when the reset command is given to the chip. + * + * How do these hardware designers expect things to work + * properly if the PCI write is posted for a long period + * of time? It is always necessary to have some method by + * which a register read back can occur to push the write + * out which does the reset. + * + * For most tg3 variants the trick below was working. + * Ho hum... + */ + udelay(120); + + /* Flush PCI posted writes. The normal MMIO registers + * are inaccessible at this time so this is the only + * way to make this reliably (actually, this is no longer + * the case, see above). I tried to use indirect + * register read/write but this upset some 5701 variants. + */ + pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); + + udelay(120); + + if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) { + u16 val16; + + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) { + int j; + u32 cfg_val; + + /* Wait for link training to complete. */ + for (j = 0; j < 5000; j++) + udelay(100); + + pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); + pci_write_config_dword(tp->pdev, 0xc4, + cfg_val | (1 << 15)); + } + + /* Clear the "no snoop" and "relaxed ordering" bits. */ + val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN; + /* + * Older PCIe devices only support the 128 byte + * MPS setting. Enforce the restriction. + */ + if (!tg3_flag(tp, CPMU_PRESENT)) + val16 |= PCI_EXP_DEVCTL_PAYLOAD; + pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16); + + /* Clear error status */ + pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA, + PCI_EXP_DEVSTA_CED | + PCI_EXP_DEVSTA_NFED | + PCI_EXP_DEVSTA_FED | + PCI_EXP_DEVSTA_URD); + } + + tg3_restore_pci_state(tp); + + tg3_flag_clear(tp, CHIP_RESETTING); + tg3_flag_clear(tp, ERROR_PROCESSED); + + val = 0; + if (tg3_flag(tp, 5780_CLASS)) + val = tr32(MEMARB_MODE); + tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); + + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) { + tg3_stop_fw(tp); + tw32(0x5000, 0x400); + } + + if (tg3_flag(tp, IS_SSB_CORE)) { + /* + * BCM4785: In order to avoid repercussions from using + * potentially defective internal ROM, stop the Rx RISC CPU, + * which is not required. + */ + tg3_stop_fw(tp); + tg3_halt_cpu(tp, RX_CPU_BASE); + } + + err = tg3_poll_fw(tp); + if (err) + return err; + + tw32(GRC_MODE, tp->grc_mode); + + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) { + val = tr32(0xc4); + + tw32(0xc4, val | (1 << 15)); + } + + if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && + tg3_asic_rev(tp) == ASIC_REV_5705) { + tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) + tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; + tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); + } + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + tp->mac_mode = MAC_MODE_PORT_MODE_TBI; + val = tp->mac_mode; + } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { + tp->mac_mode = MAC_MODE_PORT_MODE_GMII; + val = tp->mac_mode; + } else + val = 0; + + tw32_f(MAC_MODE, val); + udelay(40); + + tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); + + tg3_mdio_start(tp); + + if (tg3_flag(tp, PCI_EXPRESS) && + tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && + tg3_asic_rev(tp) != ASIC_REV_5785 && + !tg3_flag(tp, 57765_PLUS)) { + val = tr32(0x7c00); + + tw32(0x7c00, val | (1 << 25)); + } + + tg3_restore_clk(tp); + + /* Reprobe ASF enable state. */ + tg3_flag_clear(tp, ENABLE_ASF); + tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | + TG3_PHYFLG_KEEP_LINK_ON_PWRDN); + + tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); + tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); + if (val == NIC_SRAM_DATA_SIG_MAGIC) { + u32 nic_cfg; + + tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); + if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { + tg3_flag_set(tp, ENABLE_ASF); + tp->last_event_jiffies = jiffies; + if (tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ASF_NEW_HANDSHAKE); + + tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg); + if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK) + tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; + if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID) + tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; + } + } + + return 0; +} + +static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *); +static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *); +static void __tg3_set_rx_mode(struct net_device *); + +/* tp->lock is held. */ +static int tg3_halt(struct tg3 *tp, int kind, bool silent) +{ + int err; + + tg3_stop_fw(tp); + + tg3_write_sig_pre_reset(tp, kind); + + tg3_abort_hw(tp, silent); + err = tg3_chip_reset(tp); + + __tg3_set_mac_addr(tp, false); + + tg3_write_sig_legacy(tp, kind); + tg3_write_sig_post_reset(tp, kind); + + if (tp->hw_stats) { + /* Save the stats across chip resets... */ + tg3_get_nstats(tp, &tp->net_stats_prev); + tg3_get_estats(tp, &tp->estats_prev); + + /* And make sure the next sample is new data */ + memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); + } + + return err; +} + +static int tg3_set_mac_addr(struct net_device *dev, void *p) +{ + struct tg3 *tp = netdev_priv(dev); + struct sockaddr *addr = p; + int err = 0; + bool skip_mac_1 = false; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + if (!netif_running(dev)) + return 0; + + if (tg3_flag(tp, ENABLE_ASF)) { + u32 addr0_high, addr0_low, addr1_high, addr1_low; + + addr0_high = tr32(MAC_ADDR_0_HIGH); + addr0_low = tr32(MAC_ADDR_0_LOW); + addr1_high = tr32(MAC_ADDR_1_HIGH); + addr1_low = tr32(MAC_ADDR_1_LOW); + + /* Skip MAC addr 1 if ASF is using it. */ + if ((addr0_high != addr1_high || addr0_low != addr1_low) && + !(addr1_high == 0 && addr1_low == 0)) + skip_mac_1 = true; + } + spin_lock_bh(&tp->lock); + __tg3_set_mac_addr(tp, skip_mac_1); + __tg3_set_rx_mode(dev); + spin_unlock_bh(&tp->lock); + + return err; +} + +/* tp->lock is held. */ +static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, + dma_addr_t mapping, u32 maxlen_flags, + u32 nic_addr) +{ + tg3_write_mem(tp, + (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), + ((u64) mapping >> 32)); + tg3_write_mem(tp, + (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), + ((u64) mapping & 0xffffffff)); + tg3_write_mem(tp, + (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), + maxlen_flags); + + if (!tg3_flag(tp, 5705_PLUS)) + tg3_write_mem(tp, + (bdinfo_addr + TG3_BDINFO_NIC_ADDR), + nic_addr); +} + + +static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec) +{ + int i = 0; + + if (!tg3_flag(tp, ENABLE_TSS)) { + tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); + tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); + tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); + } else { + tw32(HOSTCC_TXCOL_TICKS, 0); + tw32(HOSTCC_TXMAX_FRAMES, 0); + tw32(HOSTCC_TXCOAL_MAXF_INT, 0); + + for (; i < tp->txq_cnt; i++) { + u32 reg; + + reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; + tw32(reg, ec->tx_coalesce_usecs); + reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; + tw32(reg, ec->tx_max_coalesced_frames); + reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; + tw32(reg, ec->tx_max_coalesced_frames_irq); + } + } + + for (; i < tp->irq_max - 1; i++) { + tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); + tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); + tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); + } +} + +static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec) +{ + int i = 0; + u32 limit = tp->rxq_cnt; + + if (!tg3_flag(tp, ENABLE_RSS)) { + tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); + tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); + tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); + limit--; + } else { + tw32(HOSTCC_RXCOL_TICKS, 0); + tw32(HOSTCC_RXMAX_FRAMES, 0); + tw32(HOSTCC_RXCOAL_MAXF_INT, 0); + } + + for (; i < limit; i++) { + u32 reg; + + reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; + tw32(reg, ec->rx_coalesce_usecs); + reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; + tw32(reg, ec->rx_max_coalesced_frames); + reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; + tw32(reg, ec->rx_max_coalesced_frames_irq); + } + + for (; i < tp->irq_max - 1; i++) { + tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); + tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); + tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); + } +} + +static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) +{ + tg3_coal_tx_init(tp, ec); + tg3_coal_rx_init(tp, ec); + + if (!tg3_flag(tp, 5705_PLUS)) { + u32 val = ec->stats_block_coalesce_usecs; + + tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); + tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); + + if (!tp->link_up) + val = 0; + + tw32(HOSTCC_STAT_COAL_TICKS, val); + } +} + +/* tp->lock is held. */ +static void tg3_tx_rcbs_disable(struct tg3 *tp) +{ + u32 txrcb, limit; + + /* Disable all transmit rings but the first. */ + if (!tg3_flag(tp, 5705_PLUS)) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; + else if (tg3_flag(tp, 5717_PLUS)) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; + else if (tg3_flag(tp, 57765_CLASS) || + tg3_asic_rev(tp) == ASIC_REV_5762) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; + else + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; + + for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; + txrcb < limit; txrcb += TG3_BDINFO_SIZE) + tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); +} + +/* tp->lock is held. */ +static void tg3_tx_rcbs_init(struct tg3 *tp) +{ + int i = 0; + u32 txrcb = NIC_SRAM_SEND_RCB; + + if (tg3_flag(tp, ENABLE_TSS)) + i++; + + for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (!tnapi->tx_ring) + continue; + + tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, + (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), + NIC_SRAM_TX_BUFFER_DESC); + } +} + +/* tp->lock is held. */ +static void tg3_rx_ret_rcbs_disable(struct tg3 *tp) +{ + u32 rxrcb, limit; + + /* Disable all receive return rings but the first. */ + if (tg3_flag(tp, 5717_PLUS)) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; + else if (!tg3_flag(tp, 5705_PLUS)) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; + else if (tg3_asic_rev(tp) == ASIC_REV_5755 || + tg3_asic_rev(tp) == ASIC_REV_5762 || + tg3_flag(tp, 57765_CLASS)) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; + else + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; + + for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; + rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) + tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); +} + +/* tp->lock is held. */ +static void tg3_rx_ret_rcbs_init(struct tg3 *tp) +{ + int i = 0; + u32 rxrcb = NIC_SRAM_RCV_RET_RCB; + + if (tg3_flag(tp, ENABLE_RSS)) + i++; + + for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (!tnapi->rx_rcb) + continue; + + tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, + (tp->rx_ret_ring_mask + 1) << + BDINFO_FLAGS_MAXLEN_SHIFT, 0); + } +} + +/* tp->lock is held. */ +static void tg3_rings_reset(struct tg3 *tp) +{ + int i; + u32 stblk; + struct tg3_napi *tnapi = &tp->napi[0]; + + tg3_tx_rcbs_disable(tp); + + tg3_rx_ret_rcbs_disable(tp); + + /* Disable interrupts */ + tw32_mailbox_f(tp->napi[0].int_mbox, 1); + tp->napi[0].chk_msi_cnt = 0; + tp->napi[0].last_rx_cons = 0; + tp->napi[0].last_tx_cons = 0; + + /* Zero mailbox registers. */ + if (tg3_flag(tp, SUPPORT_MSIX)) { + for (i = 1; i < tp->irq_max; i++) { + tp->napi[i].tx_prod = 0; + tp->napi[i].tx_cons = 0; + if (tg3_flag(tp, ENABLE_TSS)) + tw32_mailbox(tp->napi[i].prodmbox, 0); + tw32_rx_mbox(tp->napi[i].consmbox, 0); + tw32_mailbox_f(tp->napi[i].int_mbox, 1); + tp->napi[i].chk_msi_cnt = 0; + tp->napi[i].last_rx_cons = 0; + tp->napi[i].last_tx_cons = 0; + } + if (!tg3_flag(tp, ENABLE_TSS)) + tw32_mailbox(tp->napi[0].prodmbox, 0); + } else { + tp->napi[0].tx_prod = 0; + tp->napi[0].tx_cons = 0; + tw32_mailbox(tp->napi[0].prodmbox, 0); + tw32_rx_mbox(tp->napi[0].consmbox, 0); + } + + /* Make sure the NIC-based send BD rings are disabled. */ + if (!tg3_flag(tp, 5705_PLUS)) { + u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; + for (i = 0; i < 16; i++) + tw32_tx_mbox(mbox + i * 8, 0); + } + + /* Clear status block in ram. */ + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + + /* Set status block DMA address */ + tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tnapi->status_mapping >> 32)); + tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tnapi->status_mapping & 0xffffffff)); + + stblk = HOSTCC_STATBLCK_RING1; + + for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { + u64 mapping = (u64)tnapi->status_mapping; + tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); + tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); + stblk += 8; + + /* Clear status block in ram. */ + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + } + + tg3_tx_rcbs_init(tp); + tg3_rx_ret_rcbs_init(tp); +} + +static void tg3_setup_rxbd_thresholds(struct tg3 *tp) +{ + u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; + + if (!tg3_flag(tp, 5750_PLUS) || + tg3_flag(tp, 5780_CLASS) || + tg3_asic_rev(tp) == ASIC_REV_5750 || + tg3_asic_rev(tp) == ASIC_REV_5752 || + tg3_flag(tp, 57765_PLUS)) + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; + else if (tg3_asic_rev(tp) == ASIC_REV_5755 || + tg3_asic_rev(tp) == ASIC_REV_5787) + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; + else + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; + + nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); + host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); + + val = min(nic_rep_thresh, host_rep_thresh); + tw32(RCVBDI_STD_THRESH, val); + + if (tg3_flag(tp, 57765_PLUS)) + tw32(STD_REPLENISH_LWM, bdcache_maxcnt); + + if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) + return; + + bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; + + host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); + + val = min(bdcache_maxcnt / 2, host_rep_thresh); + tw32(RCVBDI_JUMBO_THRESH, val); + + if (tg3_flag(tp, 57765_PLUS)) + tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); +} + +static inline u32 calc_crc(unsigned char *buf, int len) +{ + u32 reg; + u32 tmp; + int j, k; + + reg = 0xffffffff; + + for (j = 0; j < len; j++) { + reg ^= buf[j]; + + for (k = 0; k < 8; k++) { + tmp = reg & 0x01; + + reg >>= 1; + + if (tmp) + reg ^= 0xedb88320; + } + } + + return ~reg; +} + +static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) +{ + /* accept or reject all multicast frames */ + tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); + tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); + tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); + tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); +} + +static void __tg3_set_rx_mode(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + u32 rx_mode; + + rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | + RX_MODE_KEEP_VLAN_TAG); + +#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) + /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG + * flag clear. + */ + if (!tg3_flag(tp, ENABLE_ASF)) + rx_mode |= RX_MODE_KEEP_VLAN_TAG; +#endif + + if (dev->flags & IFF_PROMISC) { + /* Promiscuous mode. */ + rx_mode |= RX_MODE_PROMISC; + } else if (dev->flags & IFF_ALLMULTI) { + /* Accept all multicast. */ + tg3_set_multi(tp, 1); + } else if (netdev_mc_empty(dev)) { + /* Reject all multicast. */ + tg3_set_multi(tp, 0); + } else { + /* Accept one or more multicast(s). */ + struct netdev_hw_addr *ha; + u32 mc_filter[4] = { 0, }; + u32 regidx; + u32 bit; + u32 crc; + + netdev_for_each_mc_addr(ha, dev) { + crc = calc_crc(ha->addr, ETH_ALEN); + bit = ~crc & 0x7f; + regidx = (bit & 0x60) >> 5; + bit &= 0x1f; + mc_filter[regidx] |= (1 << bit); + } + + tw32(MAC_HASH_REG_0, mc_filter[0]); + tw32(MAC_HASH_REG_1, mc_filter[1]); + tw32(MAC_HASH_REG_2, mc_filter[2]); + tw32(MAC_HASH_REG_3, mc_filter[3]); + } + + if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) { + rx_mode |= RX_MODE_PROMISC; + } else if (!(dev->flags & IFF_PROMISC)) { + /* Add all entries into to the mac addr filter list */ + int i = 0; + struct netdev_hw_addr *ha; + + netdev_for_each_uc_addr(ha, dev) { + __tg3_set_one_mac_addr(tp, ha->addr, + i + TG3_UCAST_ADDR_IDX(tp)); + i++; + } + } + + if (rx_mode != tp->rx_mode) { + tp->rx_mode = rx_mode; + tw32_f(MAC_RX_MODE, rx_mode); + udelay(10); + } +} + +static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt) +{ + int i; + + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) + tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt); +} + +static void tg3_rss_check_indir_tbl(struct tg3 *tp) +{ + int i; + + if (!tg3_flag(tp, SUPPORT_MSIX)) + return; + + if (tp->rxq_cnt == 1) { + memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); + return; + } + + /* Validate table against current IRQ count */ + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { + if (tp->rss_ind_tbl[i] >= tp->rxq_cnt) + break; + } + + if (i != TG3_RSS_INDIR_TBL_SIZE) + tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt); +} + +static void tg3_rss_write_indir_tbl(struct tg3 *tp) +{ + int i = 0; + u32 reg = MAC_RSS_INDIR_TBL_0; + + while (i < TG3_RSS_INDIR_TBL_SIZE) { + u32 val = tp->rss_ind_tbl[i]; + i++; + for (; i % 8; i++) { + val <<= 4; + val |= tp->rss_ind_tbl[i]; + } + tw32(reg, val); + reg += 4; + } +} + +static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp) +{ + if (tg3_asic_rev(tp) == ASIC_REV_5719) + return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719; + else + return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720; +} + +/* tp->lock is held. */ +static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) +{ + u32 val, rdmac_mode; + int i, err, limit; + struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; + + tg3_disable_ints(tp); + + tg3_stop_fw(tp); + + tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); + + if (tg3_flag(tp, INIT_COMPLETE)) + tg3_abort_hw(tp, 1); + + if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && + !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { + tg3_phy_pull_config(tp); + tg3_eee_pull_config(tp, NULL); + tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; + } + + /* Enable MAC control of LPI */ + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) + tg3_setup_eee(tp); + + if (reset_phy) + tg3_phy_reset(tp); + + err = tg3_chip_reset(tp); + if (err) + return err; + + tg3_write_sig_legacy(tp, RESET_KIND_INIT); + + if (tg3_chip_rev(tp) == CHIPREV_5784_AX) { + val = tr32(TG3_CPMU_CTRL); + val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); + tw32(TG3_CPMU_CTRL, val); + + val = tr32(TG3_CPMU_LSPD_10MB_CLK); + val &= ~CPMU_LSPD_10MB_MACCLK_MASK; + val |= CPMU_LSPD_10MB_MACCLK_6_25; + tw32(TG3_CPMU_LSPD_10MB_CLK, val); + + val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); + val &= ~CPMU_LNK_AWARE_MACCLK_MASK; + val |= CPMU_LNK_AWARE_MACCLK_6_25; + tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); + + val = tr32(TG3_CPMU_HST_ACC); + val &= ~CPMU_HST_ACC_MACCLK_MASK; + val |= CPMU_HST_ACC_MACCLK_6_25; + tw32(TG3_CPMU_HST_ACC, val); + } + + if (tg3_asic_rev(tp) == ASIC_REV_57780) { + val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; + val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | + PCIE_PWR_MGMT_L1_THRESH_4MS; + tw32(PCIE_PWR_MGMT_THRESH, val); + + val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; + tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); + + tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); + + val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; + tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); + } + + if (tg3_flag(tp, L1PLLPD_EN)) { + u32 grc_mode = tr32(GRC_MODE); + + /* Access the lower 1K of PL PCIE block registers. */ + val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; + tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); + + val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); + tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, + val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); + + tw32(GRC_MODE, grc_mode); + } + + if (tg3_flag(tp, 57765_CLASS)) { + if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) { + u32 grc_mode = tr32(GRC_MODE); + + /* Access the lower 1K of PL PCIE block registers. */ + val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; + tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); + + val = tr32(TG3_PCIE_TLDLPL_PORT + + TG3_PCIE_PL_LO_PHYCTL5); + tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, + val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); + + tw32(GRC_MODE, grc_mode); + } + + if (tg3_chip_rev(tp) != CHIPREV_57765_AX) { + u32 grc_mode; + + /* Fix transmit hangs */ + val = tr32(TG3_CPMU_PADRNG_CTL); + val |= TG3_CPMU_PADRNG_CTL_RDIV2; + tw32(TG3_CPMU_PADRNG_CTL, val); + + grc_mode = tr32(GRC_MODE); + + /* Access the lower 1K of DL PCIE block registers. */ + val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; + tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); + + val = tr32(TG3_PCIE_TLDLPL_PORT + + TG3_PCIE_DL_LO_FTSMAX); + val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; + tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, + val | TG3_PCIE_DL_LO_FTSMAX_VAL); + + tw32(GRC_MODE, grc_mode); + } + + val = tr32(TG3_CPMU_LSPD_10MB_CLK); + val &= ~CPMU_LSPD_10MB_MACCLK_MASK; + val |= CPMU_LSPD_10MB_MACCLK_6_25; + tw32(TG3_CPMU_LSPD_10MB_CLK, val); + } + + /* This works around an issue with Athlon chipsets on + * B3 tigon3 silicon. This bit has no effect on any + * other revision. But do not set this on PCI Express + * chips and don't even touch the clocks if the CPMU is present. + */ + if (!tg3_flag(tp, CPMU_PRESENT)) { + if (!tg3_flag(tp, PCI_EXPRESS)) + tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; + tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); + } + + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 && + tg3_flag(tp, PCIX_MODE)) { + val = tr32(TG3PCI_PCISTATE); + val |= PCISTATE_RETRY_SAME_DMA; + tw32(TG3PCI_PCISTATE, val); + } + + if (tg3_flag(tp, ENABLE_APE)) { + /* Allow reads and writes to the + * APE register and memory space. + */ + val = tr32(TG3PCI_PCISTATE); + val |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR | + PCISTATE_ALLOW_APE_PSPACE_WR; + tw32(TG3PCI_PCISTATE, val); + } + + if (tg3_chip_rev(tp) == CHIPREV_5704_BX) { + /* Enable some hw fixes. */ + val = tr32(TG3PCI_MSI_DATA); + val |= (1 << 26) | (1 << 28) | (1 << 29); + tw32(TG3PCI_MSI_DATA, val); + } + + /* Descriptor ring init may make accesses to the + * NIC SRAM area to setup the TX descriptors, so we + * can only do this after the hardware has been + * successfully reset. + */ + err = tg3_init_rings(tp); + if (err) + return err; + + if (tg3_flag(tp, 57765_PLUS)) { + val = tr32(TG3PCI_DMA_RW_CTRL) & + ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; + if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) + val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; + if (!tg3_flag(tp, 57765_CLASS) && + tg3_asic_rev(tp) != ASIC_REV_5717 && + tg3_asic_rev(tp) != ASIC_REV_5762) + val |= DMA_RWCTRL_TAGGED_STAT_WA; + tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); + } else if (tg3_asic_rev(tp) != ASIC_REV_5784 && + tg3_asic_rev(tp) != ASIC_REV_5761) { + /* This value is determined during the probe time DMA + * engine test, tg3_test_dma. + */ + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + } + + tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | + GRC_MODE_4X_NIC_SEND_RINGS | + GRC_MODE_NO_TX_PHDR_CSUM | + GRC_MODE_NO_RX_PHDR_CSUM); + tp->grc_mode |= GRC_MODE_HOST_SENDBDS; + + /* Pseudo-header checksum is done by hardware logic and not + * the offload processers, so make the chip do the pseudo- + * header checksums on receive. For transmit it is more + * convenient to do the pseudo-header checksum in software + * as Linux does that on transmit for us in all cases. + */ + tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; + + val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP; + if (tp->rxptpctl) + tw32(TG3_RX_PTP_CTL, + tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); + + if (tg3_flag(tp, PTP_CAPABLE)) + val |= GRC_MODE_TIME_SYNC_ENABLE; + + tw32(GRC_MODE, tp->grc_mode | val); + + /* Setup the timer prescalar register. Clock is always 66Mhz. */ + val = tr32(GRC_MISC_CFG); + val &= ~0xff; + val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); + tw32(GRC_MISC_CFG, val); + + /* Initialize MBUF/DESC pool. */ + if (tg3_flag(tp, 5750_PLUS)) { + /* Do nothing. */ + } else if (tg3_asic_rev(tp) != ASIC_REV_5705) { + tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); + if (tg3_asic_rev(tp) == ASIC_REV_5704) + tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); + else + tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); + tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); + tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); + } else if (tg3_flag(tp, TSO_CAPABLE)) { + int fw_len; + + fw_len = tp->fw_len; + fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); + tw32(BUFMGR_MB_POOL_ADDR, + NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); + tw32(BUFMGR_MB_POOL_SIZE, + NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); + } + + if (tp->dev->mtu <= ETH_DATA_LEN) { + tw32(BUFMGR_MB_RDMA_LOW_WATER, + tp->bufmgr_config.mbuf_read_dma_low_water); + tw32(BUFMGR_MB_MACRX_LOW_WATER, + tp->bufmgr_config.mbuf_mac_rx_low_water); + tw32(BUFMGR_MB_HIGH_WATER, + tp->bufmgr_config.mbuf_high_water); + } else { + tw32(BUFMGR_MB_RDMA_LOW_WATER, + tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); + tw32(BUFMGR_MB_MACRX_LOW_WATER, + tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); + tw32(BUFMGR_MB_HIGH_WATER, + tp->bufmgr_config.mbuf_high_water_jumbo); + } + tw32(BUFMGR_DMA_LOW_WATER, + tp->bufmgr_config.dma_low_water); + tw32(BUFMGR_DMA_HIGH_WATER, + tp->bufmgr_config.dma_high_water); + + val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; + if (tg3_asic_rev(tp) == ASIC_REV_5719) + val |= BUFMGR_MODE_NO_TX_UNDERRUN; + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5762 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) + val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; + tw32(BUFMGR_MODE, val); + for (i = 0; i < 2000; i++) { + if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) + break; + udelay(10); + } + if (i >= 2000) { + netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); + return -ENODEV; + } + + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1) + tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); + + tg3_setup_rxbd_thresholds(tp); + + /* Initialize TG3_BDINFO's at: + * RCVDBDI_STD_BD: standard eth size rx ring + * RCVDBDI_JUMBO_BD: jumbo frame rx ring + * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) + * + * like so: + * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring + * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | + * ring attribute flags + * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM + * + * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. + * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. + * + * The size of each ring is fixed in the firmware, but the location is + * configurable. + */ + tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tpr->rx_std_mapping >> 32)); + tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tpr->rx_std_mapping & 0xffffffff)); + if (!tg3_flag(tp, 5717_PLUS)) + tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, + NIC_SRAM_RX_BUFFER_DESC); + + /* Disable the mini ring */ + if (!tg3_flag(tp, 5705_PLUS)) + tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + + /* Program the jumbo buffer descriptor ring control + * blocks on those devices that have them. + */ + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || + (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { + + if (tg3_flag(tp, JUMBO_RING_ENABLE)) { + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tpr->rx_jmb_mapping >> 32)); + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tpr->rx_jmb_mapping & 0xffffffff)); + val = TG3_RX_JMB_RING_SIZE(tp) << + BDINFO_FLAGS_MAXLEN_SHIFT; + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, + val | BDINFO_FLAGS_USE_EXT_RECV); + if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || + tg3_flag(tp, 57765_CLASS) || + tg3_asic_rev(tp) == ASIC_REV_5762) + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, + NIC_SRAM_RX_JUMBO_BUFFER_DESC); + } else { + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + } + + if (tg3_flag(tp, 57765_PLUS)) { + val = TG3_RX_STD_RING_SIZE(tp); + val <<= BDINFO_FLAGS_MAXLEN_SHIFT; + val |= (TG3_RX_STD_DMA_SZ << 2); + } else + val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; + } else + val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; + + tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); + + tpr->rx_std_prod_idx = tp->rx_pending; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); + + tpr->rx_jmb_prod_idx = + tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); + + tg3_rings_reset(tp); + + /* Initialize MAC address and backoff seed. */ + __tg3_set_mac_addr(tp, false); + + /* MTU + ethernet header + FCS + optional VLAN tag */ + tw32(MAC_RX_MTU_SIZE, + tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + + /* The slot time is changed by tg3_setup_phy if we + * run at gigabit with half duplex. + */ + val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT) | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT); + + if (tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_5762) + val |= tr32(MAC_TX_LENGTHS) & + (TX_LENGTHS_JMB_FRM_LEN_MSK | + TX_LENGTHS_CNT_DWN_VAL_MSK); + + tw32(MAC_TX_LENGTHS, val); + + /* Receive rules. */ + tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); + tw32(RCVLPC_CONFIG, 0x0181); + + /* Calculate RDMAC_MODE setting early, we need it to determine + * the RCVLPC_STATE_ENABLE mask. + */ + rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | + RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | + RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | + RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | + RDMAC_MODE_LNGREAD_ENAB); + + if (tg3_asic_rev(tp) == ASIC_REV_5717) + rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; + + if (tg3_asic_rev(tp) == ASIC_REV_5784 || + tg3_asic_rev(tp) == ASIC_REV_5785 || + tg3_asic_rev(tp) == ASIC_REV_57780) + rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | + RDMAC_MODE_MBUF_RBD_CRPT_ENAB | + RDMAC_MODE_MBUF_SBD_CRPT_ENAB; + + if (tg3_asic_rev(tp) == ASIC_REV_5705 && + tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { + if (tg3_flag(tp, TSO_CAPABLE) && + tg3_asic_rev(tp) == ASIC_REV_5705) { + rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; + } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && + !tg3_flag(tp, IS_5788)) { + rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; + } + } + + if (tg3_flag(tp, PCI_EXPRESS)) + rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; + + if (tg3_asic_rev(tp) == ASIC_REV_57766) { + tp->dma_limit = 0; + if (tp->dev->mtu <= ETH_DATA_LEN) { + rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR; + tp->dma_limit = TG3_TX_BD_DMA_MAX_2K; + } + } + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) + rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; + + if (tg3_flag(tp, 57765_PLUS) || + tg3_asic_rev(tp) == ASIC_REV_5785 || + tg3_asic_rev(tp) == ASIC_REV_57780) + rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; + + if (tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_5762) + rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; + + if (tg3_asic_rev(tp) == ASIC_REV_5761 || + tg3_asic_rev(tp) == ASIC_REV_5784 || + tg3_asic_rev(tp) == ASIC_REV_5785 || + tg3_asic_rev(tp) == ASIC_REV_57780 || + tg3_flag(tp, 57765_PLUS)) { + u32 tgtreg; + + if (tg3_asic_rev(tp) == ASIC_REV_5762) + tgtreg = TG3_RDMA_RSRVCTRL_REG2; + else + tgtreg = TG3_RDMA_RSRVCTRL_REG; + + val = tr32(tgtreg); + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || + tg3_asic_rev(tp) == ASIC_REV_5762) { + val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | + TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | + TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); + val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | + TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | + TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; + } + tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); + } + + if (tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_5762) { + u32 tgtreg; + + if (tg3_asic_rev(tp) == ASIC_REV_5762) + tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2; + else + tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL; + + val = tr32(tgtreg); + tw32(tgtreg, val | + TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | + TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); + } + + /* Receive/send statistics. */ + if (tg3_flag(tp, 5750_PLUS)) { + val = tr32(RCVLPC_STATS_ENABLE); + val &= ~RCVLPC_STATSENAB_DACK_FIX; + tw32(RCVLPC_STATS_ENABLE, val); + } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && + tg3_flag(tp, TSO_CAPABLE)) { + val = tr32(RCVLPC_STATS_ENABLE); + val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; + tw32(RCVLPC_STATS_ENABLE, val); + } else { + tw32(RCVLPC_STATS_ENABLE, 0xffffff); + } + tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); + tw32(SNDDATAI_STATSENAB, 0xffffff); + tw32(SNDDATAI_STATSCTRL, + (SNDDATAI_SCTRL_ENABLE | + SNDDATAI_SCTRL_FASTUPD)); + + /* Setup host coalescing engine. */ + tw32(HOSTCC_MODE, 0); + for (i = 0; i < 2000; i++) { + if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) + break; + udelay(10); + } + + __tg3_set_coalesce(tp, &tp->coal); + + if (!tg3_flag(tp, 5705_PLUS)) { + /* Status/statistics block address. See tg3_timer, + * the tg3_periodic_fetch_stats call there, and + * tg3_get_stats to see how this works for 5705/5750 chips. + */ + tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tp->stats_mapping >> 32)); + tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tp->stats_mapping & 0xffffffff)); + tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); + + tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); + + /* Clear statistics and status block memory areas */ + for (i = NIC_SRAM_STATS_BLK; + i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; + i += sizeof(u32)) { + tg3_write_mem(tp, i, 0); + udelay(40); + } + } + + tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); + + tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); + tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); + if (!tg3_flag(tp, 5705_PLUS)) + tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); + + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + /* reset to prevent losing 1st rx packet intermittently */ + tw32_f(MAC_RX_MODE, RX_MODE_RESET); + udelay(10); + } + + tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | + MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | + MAC_MODE_FHDE_ENABLE; + if (tg3_flag(tp, ENABLE_APE)) + tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; + if (!tg3_flag(tp, 5705_PLUS) && + !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + tg3_asic_rev(tp) != ASIC_REV_5700) + tp->mac_mode |= MAC_MODE_LINK_POLARITY; + tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); + udelay(40); + + /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). + * If TG3_FLAG_IS_NIC is zero, we should read the + * register to preserve the GPIO settings for LOMs. The GPIOs, + * whether used as inputs or outputs, are set by boot code after + * reset. + */ + if (!tg3_flag(tp, IS_NIC)) { + u32 gpio_mask; + + gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | + GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; + + if (tg3_asic_rev(tp) == ASIC_REV_5752) + gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | + GRC_LCLCTRL_GPIO_OUTPUT3; + + if (tg3_asic_rev(tp) == ASIC_REV_5755) + gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; + + tp->grc_local_ctrl &= ~gpio_mask; + tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; + + /* GPIO1 must be driven high for eeprom write protect */ + if (tg3_flag(tp, EEPROM_WRITE_PROT)) + tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OUTPUT1); + } + tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); + udelay(100); + + if (tg3_flag(tp, USING_MSIX)) { + val = tr32(MSGINT_MODE); + val |= MSGINT_MODE_ENABLE; + if (tp->irq_cnt > 1) + val |= MSGINT_MODE_MULTIVEC_EN; + if (!tg3_flag(tp, 1SHOT_MSI)) + val |= MSGINT_MODE_ONE_SHOT_DISABLE; + tw32(MSGINT_MODE, val); + } + + if (!tg3_flag(tp, 5705_PLUS)) { + tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); + udelay(40); + } + + val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | + WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | + WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | + WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | + WDMAC_MODE_LNGREAD_ENAB); + + if (tg3_asic_rev(tp) == ASIC_REV_5705 && + tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { + if (tg3_flag(tp, TSO_CAPABLE) && + (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) { + /* nothing */ + } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && + !tg3_flag(tp, IS_5788)) { + val |= WDMAC_MODE_RX_ACCEL; + } + } + + /* Enable host coalescing bug fix */ + if (tg3_flag(tp, 5755_PLUS)) + val |= WDMAC_MODE_STATUS_TAG_FIX; + + if (tg3_asic_rev(tp) == ASIC_REV_5785) + val |= WDMAC_MODE_BURST_ALL_DATA; + + tw32_f(WDMAC_MODE, val); + udelay(40); + + if (tg3_flag(tp, PCIX_MODE)) { + u16 pcix_cmd; + + pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + &pcix_cmd); + if (tg3_asic_rev(tp) == ASIC_REV_5703) { + pcix_cmd &= ~PCI_X_CMD_MAX_READ; + pcix_cmd |= PCI_X_CMD_READ_2K; + } else if (tg3_asic_rev(tp) == ASIC_REV_5704) { + pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); + pcix_cmd |= PCI_X_CMD_READ_2K; + } + pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + pcix_cmd); + } + + tw32_f(RDMAC_MODE, rdmac_mode); + udelay(40); + + if (tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) { + for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) { + if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp)) + break; + } + if (i < TG3_NUM_RDMA_CHANNELS) { + val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); + val |= tg3_lso_rd_dma_workaround_bit(tp); + tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); + tg3_flag_set(tp, 5719_5720_RDMA_BUG); + } + } + + tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); + if (!tg3_flag(tp, 5705_PLUS)) + tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); + + if (tg3_asic_rev(tp) == ASIC_REV_5761) + tw32(SNDDATAC_MODE, + SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); + else + tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); + + tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); + tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); + val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; + if (tg3_flag(tp, LRG_PROD_RING_CAP)) + val |= RCVDBDI_MODE_LRG_RING_SZ; + tw32(RCVDBDI_MODE, val); + tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) + tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); + val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; + if (tg3_flag(tp, ENABLE_TSS)) + val |= SNDBDI_MODE_MULTI_TXQ_EN; + tw32(SNDBDI_MODE, val); + tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); + + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { + err = tg3_load_5701_a0_firmware_fix(tp); + if (err) + return err; + } + + if (tg3_asic_rev(tp) == ASIC_REV_57766) { + /* Ignore any errors for the firmware download. If download + * fails, the device will operate with EEE disabled + */ + tg3_load_57766_firmware(tp); + } + + if (tg3_flag(tp, TSO_CAPABLE)) { + err = tg3_load_tso_firmware(tp); + if (err) + return err; + } + + tp->tx_mode = TX_MODE_ENABLE; + + if (tg3_flag(tp, 5755_PLUS) || + tg3_asic_rev(tp) == ASIC_REV_5906) + tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; + + if (tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_5762) { + val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; + tp->tx_mode &= ~val; + tp->tx_mode |= tr32(MAC_TX_MODE) & val; + } + + tw32_f(MAC_TX_MODE, tp->tx_mode); + udelay(100); + + if (tg3_flag(tp, ENABLE_RSS)) { + u32 rss_key[10]; + + tg3_rss_write_indir_tbl(tp); + + netdev_rss_key_fill(rss_key, 10 * sizeof(u32)); + + for (i = 0; i < 10 ; i++) + tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]); + } + + tp->rx_mode = RX_MODE_ENABLE; + if (tg3_flag(tp, 5755_PLUS)) + tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; + + if (tg3_asic_rev(tp) == ASIC_REV_5762) + tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX; + + if (tg3_flag(tp, ENABLE_RSS)) + tp->rx_mode |= RX_MODE_RSS_ENABLE | + RX_MODE_RSS_ITBL_HASH_BITS_7 | + RX_MODE_RSS_IPV6_HASH_EN | + RX_MODE_RSS_TCP_IPV6_HASH_EN | + RX_MODE_RSS_IPV4_HASH_EN | + RX_MODE_RSS_TCP_IPV4_HASH_EN; + + tw32_f(MAC_RX_MODE, tp->rx_mode); + udelay(10); + + tw32(MAC_LED_CTRL, tp->led_ctrl); + + tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + tw32_f(MAC_RX_MODE, RX_MODE_RESET); + udelay(10); + } + tw32_f(MAC_RX_MODE, tp->rx_mode); + udelay(10); + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + if ((tg3_asic_rev(tp) == ASIC_REV_5704) && + !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { + /* Set drive transmission level to 1.2V */ + /* only if the signal pre-emphasis bit is not set */ + val = tr32(MAC_SERDES_CFG); + val &= 0xfffff000; + val |= 0x880; + tw32(MAC_SERDES_CFG, val); + } + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) + tw32(MAC_SERDES_CFG, 0x616000); + } + + /* Prevent chip from dropping frames when flow control + * is enabled. + */ + if (tg3_flag(tp, 57765_CLASS)) + val = 1; + else + val = 2; + tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); + + if (tg3_asic_rev(tp) == ASIC_REV_5704 && + (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { + /* Use hardware link auto-negotiation */ + tg3_flag_set(tp, HW_AUTONEG); + } + + if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + tg3_asic_rev(tp) == ASIC_REV_5714) { + u32 tmp; + + tmp = tr32(SERDES_RX_CTRL); + tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); + tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; + tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; + tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); + } + + if (!tg3_flag(tp, USE_PHYLIB)) { + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; + + err = tg3_setup_phy(tp, false); + if (err) + return err; + + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { + u32 tmp; + + /* Clear CRC stats. */ + if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { + tg3_writephy(tp, MII_TG3_TEST1, + tmp | MII_TG3_TEST1_CRC_EN); + tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); + } + } + } + + __tg3_set_rx_mode(tp->dev); + + /* Initialize receive rules. */ + tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); + tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); + tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); + tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); + + if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) + limit = 8; + else + limit = 16; + if (tg3_flag(tp, ENABLE_ASF)) + limit -= 4; + switch (limit) { + case 16: + tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); + case 15: + tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); + case 14: + tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); + case 13: + tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); + case 12: + tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); + case 11: + tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); + case 10: + tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); + case 9: + tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); + case 8: + tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); + case 7: + tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); + case 6: + tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); + case 5: + tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); + case 4: + /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ + case 3: + /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ + case 2: + case 1: + + default: + break; + } + + if (tg3_flag(tp, ENABLE_APE)) + /* Write our heartbeat update interval to APE. */ + tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, + APE_HOST_HEARTBEAT_INT_DISABLE); + + tg3_write_sig_post_reset(tp, RESET_KIND_INIT); + + return 0; +} + +/* Called at device open time to get the chip ready for + * packet processing. Invoked with tp->lock held. + */ +static int tg3_init_hw(struct tg3 *tp, bool reset_phy) +{ + /* Chip may have been just powered on. If so, the boot code may still + * be running initialization. Wait for it to finish to avoid races in + * accessing the hardware. + */ + tg3_enable_register_access(tp); + tg3_poll_fw(tp); + + tg3_switch_clocks(tp); + + tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); + + return tg3_reset_hw(tp, reset_phy); +} + +static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) +{ + int i; + + for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) { + u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN; + + tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len); + off += len; + + if (ocir->signature != TG3_OCIR_SIG_MAGIC || + !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE)) + memset(ocir, 0, TG3_OCIR_LEN); + } +} + +/* sysfs attributes for hwmon */ +static ssize_t tg3_show_temp(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct tg3 *tp = dev_get_drvdata(dev); + u32 temperature; + + spin_lock_bh(&tp->lock); + tg3_ape_scratchpad_read(tp, &temperature, attr->index, + sizeof(temperature)); + spin_unlock_bh(&tp->lock); + return sprintf(buf, "%u\n", temperature); +} + + +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL, + TG3_TEMP_SENSOR_OFFSET); +static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL, + TG3_TEMP_CAUTION_OFFSET); +static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL, + TG3_TEMP_MAX_OFFSET); + +static struct attribute *tg3_attrs[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_crit.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + NULL +}; +ATTRIBUTE_GROUPS(tg3); + +static void tg3_hwmon_close(struct tg3 *tp) +{ + if (tp->hwmon_dev) { + hwmon_device_unregister(tp->hwmon_dev); + tp->hwmon_dev = NULL; + } +} + +static void tg3_hwmon_open(struct tg3 *tp) +{ + int i; + u32 size = 0; + struct pci_dev *pdev = tp->pdev; + struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; + + tg3_sd_scan_scratchpad(tp, ocirs); + + for (i = 0; i < TG3_SD_NUM_RECS; i++) { + if (!ocirs[i].src_data_length) + continue; + + size += ocirs[i].src_hdr_length; + size += ocirs[i].src_data_length; + } + + if (!size) + return; + + tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", + tp, tg3_groups); + if (IS_ERR(tp->hwmon_dev)) { + tp->hwmon_dev = NULL; + dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); + } +} + + +#define TG3_STAT_ADD32(PSTAT, REG) \ +do { u32 __val = tr32(REG); \ + (PSTAT)->low += __val; \ + if ((PSTAT)->low < __val) \ + (PSTAT)->high += 1; \ +} while (0) + +static void tg3_periodic_fetch_stats(struct tg3 *tp) +{ + struct tg3_hw_stats *sp = tp->hw_stats; + + if (!tp->link_up) + return; + + TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); + TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); + TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); + TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); + TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); + TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); + TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); + TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); + TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); + TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); + TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); + TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); + TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); + if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) && + (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low + + sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) { + u32 val; + + val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); + val &= ~tg3_lso_rd_dma_workaround_bit(tp); + tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val); + tg3_flag_clear(tp, 5719_5720_RDMA_BUG); + } + + TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); + TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); + TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); + TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); + TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); + TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); + TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); + TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); + TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); + TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); + TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); + TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); + TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); + TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); + + TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); + if (tg3_asic_rev(tp) != ASIC_REV_5717 && + tg3_asic_rev(tp) != ASIC_REV_5762 && + tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 && + tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) { + TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); + } else { + u32 val = tr32(HOSTCC_FLOW_ATTN); + val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; + if (val) { + tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); + sp->rx_discards.low += val; + if (sp->rx_discards.low < val) + sp->rx_discards.high += 1; + } + sp->mbuf_lwm_thresh_hit = sp->rx_discards; + } + TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); +} + +static void tg3_chk_missed_msi(struct tg3 *tp) +{ + u32 i; + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (tg3_has_work(tnapi)) { + if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && + tnapi->last_tx_cons == tnapi->tx_cons) { + if (tnapi->chk_msi_cnt < 1) { + tnapi->chk_msi_cnt++; + return; + } + tg3_msi(0, tnapi); + } + } + tnapi->chk_msi_cnt = 0; + tnapi->last_rx_cons = tnapi->rx_rcb_ptr; + tnapi->last_tx_cons = tnapi->tx_cons; + } +} + +static void tg3_timer(unsigned long __opaque) +{ + struct tg3 *tp = (struct tg3 *) __opaque; + + spin_lock(&tp->lock); + + if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) { + spin_unlock(&tp->lock); + goto restart_timer; + } + + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_flag(tp, 57765_CLASS)) + tg3_chk_missed_msi(tp); + + if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { + /* BCM4785: Flush posted writes from GbE to host memory. */ + tr32(HOSTCC_MODE); + } + + if (!tg3_flag(tp, TAGGED_STATUS)) { + /* All of this garbage is because when using non-tagged + * IRQ status the mailbox/status_block protocol the chip + * uses with the cpu is race prone. + */ + if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { + tw32(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); + } else { + tw32(HOSTCC_MODE, tp->coalesce_mode | + HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); + } + + if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { + spin_unlock(&tp->lock); + tg3_reset_task_schedule(tp); + goto restart_timer; + } + } + + /* This part only runs once per second. */ + if (!--tp->timer_counter) { + if (tg3_flag(tp, 5705_PLUS)) + tg3_periodic_fetch_stats(tp); + + if (tp->setlpicnt && !--tp->setlpicnt) + tg3_phy_eee_enable(tp); + + if (tg3_flag(tp, USE_LINKCHG_REG)) { + u32 mac_stat; + int phy_event; + + mac_stat = tr32(MAC_STATUS); + + phy_event = 0; + if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { + if (mac_stat & MAC_STATUS_MI_INTERRUPT) + phy_event = 1; + } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) + phy_event = 1; + + if (phy_event) + tg3_setup_phy(tp, false); + } else if (tg3_flag(tp, POLL_SERDES)) { + u32 mac_stat = tr32(MAC_STATUS); + int need_setup = 0; + + if (tp->link_up && + (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { + need_setup = 1; + } + if (!tp->link_up && + (mac_stat & (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET))) { + need_setup = 1; + } + if (need_setup) { + if (!tp->serdes_counter) { + tw32_f(MAC_MODE, + (tp->mac_mode & + ~MAC_MODE_PORT_MODE_MASK)); + udelay(40); + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + } + tg3_setup_phy(tp, false); + } + } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + tg3_flag(tp, 5780_CLASS)) { + tg3_serdes_parallel_detect(tp); + } else if (tg3_flag(tp, POLL_CPMU_LINK)) { + u32 cpmu = tr32(TG3_CPMU_STATUS); + bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) == + TG3_CPMU_STATUS_LINK_MASK); + + if (link_up != tp->link_up) + tg3_setup_phy(tp, false); + } + + tp->timer_counter = tp->timer_multiplier; + } + + /* Heartbeat is only sent once every 2 seconds. + * + * The heartbeat is to tell the ASF firmware that the host + * driver is still alive. In the event that the OS crashes, + * ASF needs to reset the hardware to free up the FIFO space + * that may be filled with rx packets destined for the host. + * If the FIFO is full, ASF will no longer function properly. + * + * Unintended resets have been reported on real time kernels + * where the timer doesn't run on time. Netpoll will also have + * same problem. + * + * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware + * to check the ring condition when the heartbeat is expiring + * before doing the reset. This will prevent most unintended + * resets. + */ + if (!--tp->asf_counter) { + if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { + tg3_wait_for_event_ack(tp); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, + FWCMD_NICDRV_ALIVE3); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, + TG3_FW_UPDATE_TIMEOUT_SEC); + + tg3_generate_fw_event(tp); + } + tp->asf_counter = tp->asf_multiplier; + } + + spin_unlock(&tp->lock); + +restart_timer: + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); +} + +static void tg3_timer_init(struct tg3 *tp) +{ + if (tg3_flag(tp, TAGGED_STATUS) && + tg3_asic_rev(tp) != ASIC_REV_5717 && + !tg3_flag(tp, 57765_CLASS)) + tp->timer_offset = HZ; + else + tp->timer_offset = HZ / 10; + + BUG_ON(tp->timer_offset > HZ); + + tp->timer_multiplier = (HZ / tp->timer_offset); + tp->asf_multiplier = (HZ / tp->timer_offset) * + TG3_FW_UPDATE_FREQ_SEC; + + init_timer(&tp->timer); + tp->timer.data = (unsigned long) tp; + tp->timer.function = tg3_timer; +} + +static void tg3_timer_start(struct tg3 *tp) +{ + tp->asf_counter = tp->asf_multiplier; + tp->timer_counter = tp->timer_multiplier; + + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); +} + +static void tg3_timer_stop(struct tg3 *tp) +{ + del_timer_sync(&tp->timer); +} + +/* Restart hardware after configuration changes, self-test, etc. + * Invoked with tp->lock held. + */ +static int tg3_restart_hw(struct tg3 *tp, bool reset_phy) + __releases(tp->lock) + __acquires(tp->lock) +{ + int err; + + err = tg3_init_hw(tp, reset_phy); + if (err) { + netdev_err(tp->dev, + "Failed to re-initialize device, aborting\n"); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_full_unlock(tp); + tg3_timer_stop(tp); + tp->irq_sync = 0; + tg3_napi_enable(tp); + dev_close(tp->dev); + tg3_full_lock(tp, 0); + } + return err; +} + +static void tg3_reset_task(struct work_struct *work) +{ + struct tg3 *tp = container_of(work, struct tg3, reset_task); + int err; + + rtnl_lock(); + tg3_full_lock(tp, 0); + + if (!netif_running(tp->dev)) { + tg3_flag_clear(tp, RESET_TASK_PENDING); + tg3_full_unlock(tp); + rtnl_unlock(); + return; + } + + tg3_full_unlock(tp); + + tg3_phy_stop(tp); + + tg3_netif_stop(tp); + + tg3_full_lock(tp, 1); + + if (tg3_flag(tp, TX_RECOVERY_PENDING)) { + tp->write32_tx_mbox = tg3_write32_tx_mbox; + tp->write32_rx_mbox = tg3_write_flush_reg32; + tg3_flag_set(tp, MBOX_WRITE_REORDER); + tg3_flag_clear(tp, TX_RECOVERY_PENDING); + } + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); + err = tg3_init_hw(tp, true); + if (err) + goto out; + + tg3_netif_start(tp); + +out: + tg3_full_unlock(tp); + + if (!err) + tg3_phy_start(tp); + + tg3_flag_clear(tp, RESET_TASK_PENDING); + rtnl_unlock(); +} + +static int tg3_request_irq(struct tg3 *tp, int irq_num) +{ + irq_handler_t fn; + unsigned long flags; + char *name; + struct tg3_napi *tnapi = &tp->napi[irq_num]; + + if (tp->irq_cnt == 1) + name = tp->dev->name; + else { + name = &tnapi->irq_lbl[0]; + if (tnapi->tx_buffers && tnapi->rx_rcb) + snprintf(name, IFNAMSIZ, + "%s-txrx-%d", tp->dev->name, irq_num); + else if (tnapi->tx_buffers) + snprintf(name, IFNAMSIZ, + "%s-tx-%d", tp->dev->name, irq_num); + else if (tnapi->rx_rcb) + snprintf(name, IFNAMSIZ, + "%s-rx-%d", tp->dev->name, irq_num); + else + snprintf(name, IFNAMSIZ, + "%s-%d", tp->dev->name, irq_num); + name[IFNAMSIZ-1] = 0; + } + + if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { + fn = tg3_msi; + if (tg3_flag(tp, 1SHOT_MSI)) + fn = tg3_msi_1shot; + flags = 0; + } else { + fn = tg3_interrupt; + if (tg3_flag(tp, TAGGED_STATUS)) + fn = tg3_interrupt_tagged; + flags = IRQF_SHARED; + } + + return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); +} + +static int tg3_test_interrupt(struct tg3 *tp) +{ + struct tg3_napi *tnapi = &tp->napi[0]; + struct net_device *dev = tp->dev; + int err, i, intr_ok = 0; + u32 val; + + if (!netif_running(dev)) + return -ENODEV; + + tg3_disable_ints(tp); + + free_irq(tnapi->irq_vec, tnapi); + + /* + * Turn off MSI one shot mode. Otherwise this test has no + * observable way to know whether the interrupt was delivered. + */ + if (tg3_flag(tp, 57765_PLUS)) { + val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; + tw32(MSGINT_MODE, val); + } + + err = request_irq(tnapi->irq_vec, tg3_test_isr, + IRQF_SHARED, dev->name, tnapi); + if (err) + return err; + + tnapi->hw_status->status &= ~SD_STATUS_UPDATED; + tg3_enable_ints(tp); + + tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | + tnapi->coal_now); + + for (i = 0; i < 5; i++) { + u32 int_mbox, misc_host_ctrl; + + int_mbox = tr32_mailbox(tnapi->int_mbox); + misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); + + if ((int_mbox != 0) || + (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { + intr_ok = 1; + break; + } + + if (tg3_flag(tp, 57765_PLUS) && + tnapi->hw_status->status_tag != tnapi->last_tag) + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); + + msleep(10); + } + + tg3_disable_ints(tp); + + free_irq(tnapi->irq_vec, tnapi); + + err = tg3_request_irq(tp, 0); + + if (err) + return err; + + if (intr_ok) { + /* Reenable MSI one shot mode. */ + if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) { + val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; + tw32(MSGINT_MODE, val); + } + return 0; + } + + return -EIO; +} + +/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is + * successfully restored + */ +static int tg3_test_msi(struct tg3 *tp) +{ + int err; + u16 pci_cmd; + + if (!tg3_flag(tp, USING_MSI)) + return 0; + + /* Turn off SERR reporting in case MSI terminates with Master + * Abort. + */ + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_write_config_word(tp->pdev, PCI_COMMAND, + pci_cmd & ~PCI_COMMAND_SERR); + + err = tg3_test_interrupt(tp); + + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + + if (!err) + return 0; + + /* other failures */ + if (err != -EIO) + return err; + + /* MSI test failed, go back to INTx mode */ + netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " + "to INTx mode. Please report this failure to the PCI " + "maintainer and include system chipset information\n"); + + free_irq(tp->napi[0].irq_vec, &tp->napi[0]); + + pci_disable_msi(tp->pdev); + + tg3_flag_clear(tp, USING_MSI); + tp->napi[0].irq_vec = tp->pdev->irq; + + err = tg3_request_irq(tp, 0); + if (err) + return err; + + /* Need to reset the chip because the MSI cycle may have terminated + * with Master Abort. + */ + tg3_full_lock(tp, 1); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + err = tg3_init_hw(tp, true); + + tg3_full_unlock(tp); + + if (err) + free_irq(tp->napi[0].irq_vec, &tp->napi[0]); + + return err; +} + +static int tg3_request_firmware(struct tg3 *tp) +{ + const struct tg3_firmware_hdr *fw_hdr; + + if (reject_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { + netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", + tp->fw_needed); + return -ENOENT; + } + + fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + * start address and _full_ length including BSS sections + * (which must be longer than the actual data, of course + */ + + tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */ + if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) { + netdev_err(tp->dev, "bogus length %d in \"%s\"\n", + tp->fw_len, tp->fw_needed); + release_firmware(tp->fw); + tp->fw = NULL; + return -EINVAL; + } + + /* We no longer need firmware; we have it. */ + tp->fw_needed = NULL; + return 0; +} + +static u32 tg3_irq_count(struct tg3 *tp) +{ + u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt); + + if (irq_cnt > 1) { + /* We want as many rx rings enabled as there are cpus. + * In multiqueue MSI-X mode, the first MSI-X vector + * only deals with link interrupts, etc, so we add + * one to the number of vectors we are requesting. + */ + irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max); + } + + return irq_cnt; +} + +static bool tg3_enable_msix(struct tg3 *tp) +{ + int i, rc; + struct msix_entry msix_ent[TG3_IRQ_MAX_VECS]; + + tp->txq_cnt = tp->txq_req; + tp->rxq_cnt = tp->rxq_req; + if (!tp->rxq_cnt) + tp->rxq_cnt = netif_get_num_default_rss_queues(); + if (tp->rxq_cnt > tp->rxq_max) + tp->rxq_cnt = tp->rxq_max; + + /* Disable multiple TX rings by default. Simple round-robin hardware + * scheduling of the TX rings can cause starvation of rings with + * small packets when other rings have TSO or jumbo packets. + */ + if (!tp->txq_req) + tp->txq_cnt = 1; + + tp->irq_cnt = tg3_irq_count(tp); + + for (i = 0; i < tp->irq_max; i++) { + msix_ent[i].entry = i; + msix_ent[i].vector = 0; + } + + rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt); + if (rc < 0) { + return false; + } else if (rc < tp->irq_cnt) { + netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", + tp->irq_cnt, rc); + tp->irq_cnt = rc; + tp->rxq_cnt = max(rc - 1, 1); + if (tp->txq_cnt) + tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max); + } + + for (i = 0; i < tp->irq_max; i++) + tp->napi[i].irq_vec = msix_ent[i].vector; + + if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) { + pci_disable_msix(tp->pdev); + return false; + } + + if (tp->irq_cnt == 1) + return true; + + tg3_flag_set(tp, ENABLE_RSS); + + if (tp->txq_cnt > 1) + tg3_flag_set(tp, ENABLE_TSS); + + netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt); + + return true; +} + +static void tg3_ints_init(struct tg3 *tp) +{ + if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && + !tg3_flag(tp, TAGGED_STATUS)) { + /* All MSI supporting chips should support tagged + * status. Assert that this is the case. + */ + netdev_warn(tp->dev, + "MSI without TAGGED_STATUS? Not using MSI\n"); + goto defcfg; + } + + if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) + tg3_flag_set(tp, USING_MSIX); + else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) + tg3_flag_set(tp, USING_MSI); + + if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { + u32 msi_mode = tr32(MSGINT_MODE); + if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) + msi_mode |= MSGINT_MODE_MULTIVEC_EN; + if (!tg3_flag(tp, 1SHOT_MSI)) + msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE; + tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); + } +defcfg: + if (!tg3_flag(tp, USING_MSIX)) { + tp->irq_cnt = 1; + tp->napi[0].irq_vec = tp->pdev->irq; + } + + if (tp->irq_cnt == 1) { + tp->txq_cnt = 1; + tp->rxq_cnt = 1; + netif_set_real_num_tx_queues(tp->dev, 1); + netif_set_real_num_rx_queues(tp->dev, 1); + } +} + +static void tg3_ints_fini(struct tg3 *tp) +{ + if (tg3_flag(tp, USING_MSIX)) + pci_disable_msix(tp->pdev); + else if (tg3_flag(tp, USING_MSI)) + pci_disable_msi(tp->pdev); + tg3_flag_clear(tp, USING_MSI); + tg3_flag_clear(tp, USING_MSIX); + tg3_flag_clear(tp, ENABLE_RSS); + tg3_flag_clear(tp, ENABLE_TSS); +} + +static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq, + bool init) +{ + struct net_device *dev = tp->dev; + int i, err; + + /* + * Setup interrupts first so we know how + * many NAPI resources to allocate + */ + tg3_ints_init(tp); + + tg3_rss_check_indir_tbl(tp); + + /* The placement of this call is tied + * to the setup and use of Host TX descriptors. + */ + err = tg3_alloc_consistent(tp); + if (err) + goto out_ints_fini; + + tg3_napi_init(tp); + + tg3_napi_enable(tp); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + err = tg3_request_irq(tp, i); + if (err) { + for (i--; i >= 0; i--) { + tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); + } + goto out_napi_fini; + } + } + + tg3_full_lock(tp, 0); + + if (init) + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); + + err = tg3_init_hw(tp, reset_phy); + if (err) { + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_free_rings(tp); + } + + tg3_full_unlock(tp); + + if (err) + goto out_free_irq; + + if (test_irq && tg3_flag(tp, USING_MSI)) { + err = tg3_test_msi(tp); + + if (err) { + tg3_full_lock(tp, 0); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_free_rings(tp); + tg3_full_unlock(tp); + + goto out_napi_fini; + } + + if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { + u32 val = tr32(PCIE_TRANSACTION_CFG); + + tw32(PCIE_TRANSACTION_CFG, + val | PCIE_TRANS_CFG_1SHOT_MSI); + } + } + + tg3_phy_start(tp); + + tg3_hwmon_open(tp); + + tg3_full_lock(tp, 0); + + tg3_timer_start(tp); + tg3_flag_set(tp, INIT_COMPLETE); + tg3_enable_ints(tp); + + tg3_ptp_resume(tp); + + tg3_full_unlock(tp); + + netif_tx_start_all_queues(dev); + + /* + * Reset loopback feature if it was turned on while the device was down + * make sure that it's installed properly now. + */ + if (dev->features & NETIF_F_LOOPBACK) + tg3_set_loopback(dev, dev->features); + + return 0; + +out_free_irq: + for (i = tp->irq_cnt - 1; i >= 0; i--) { + struct tg3_napi *tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); + } + +out_napi_fini: + tg3_napi_disable(tp); + tg3_napi_fini(tp); + tg3_free_consistent(tp); + +out_ints_fini: + tg3_ints_fini(tp); + + return err; +} + +static void tg3_stop(struct tg3 *tp) +{ + int i; + + tg3_reset_task_cancel(tp); + tg3_netif_stop(tp); + + tg3_timer_stop(tp); + + tg3_hwmon_close(tp); + + tg3_phy_stop(tp); + + tg3_full_lock(tp, 1); + + tg3_disable_ints(tp); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_free_rings(tp); + tg3_flag_clear(tp, INIT_COMPLETE); + + tg3_full_unlock(tp); + + for (i = tp->irq_cnt - 1; i >= 0; i--) { + struct tg3_napi *tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); + } + + tg3_ints_fini(tp); + + tg3_napi_fini(tp); + + tg3_free_consistent(tp); +} + +static int tg3_open(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + int err; + + if (tp->pcierr_recovery) { + netdev_err(dev, "Failed to open device. PCI error recovery " + "in progress\n"); + return -EAGAIN; + } + + if (tp->fw_needed) { + err = tg3_request_firmware(tp); + if (tg3_asic_rev(tp) == ASIC_REV_57766) { + if (err) { + netdev_warn(tp->dev, "EEE capability disabled\n"); + tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; + } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { + netdev_warn(tp->dev, "EEE capability restored\n"); + tp->phy_flags |= TG3_PHYFLG_EEE_CAP; + } + } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { + if (err) + return err; + } else if (err) { + netdev_warn(tp->dev, "TSO capability disabled\n"); + tg3_flag_clear(tp, TSO_CAPABLE); + } else if (!tg3_flag(tp, TSO_CAPABLE)) { + netdev_notice(tp->dev, "TSO capability restored\n"); + tg3_flag_set(tp, TSO_CAPABLE); + } + } + + tg3_carrier_off(tp); + + err = tg3_power_up(tp); + if (err) + return err; + + tg3_full_lock(tp, 0); + + tg3_disable_ints(tp); + tg3_flag_clear(tp, INIT_COMPLETE); + + tg3_full_unlock(tp); + + err = tg3_start(tp, + !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN), + true, true); + if (err) { + tg3_frob_aux_power(tp, false); + pci_set_power_state(tp->pdev, PCI_D3hot); + } + + return err; +} + +static int tg3_close(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + + if (tp->pcierr_recovery) { + netdev_err(dev, "Failed to close device. PCI error recovery " + "in progress\n"); + return -EAGAIN; + } + + tg3_stop(tp); + + /* Clear stats across close / open calls */ + memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev)); + memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); + + if (pci_device_is_present(tp->pdev)) { + tg3_power_down_prepare(tp); + + tg3_carrier_off(tp); + } + return 0; +} + +static inline u64 get_stat64(tg3_stat64_t *val) +{ + return ((u64)val->high << 32) | ((u64)val->low); +} + +static u64 tg3_calc_crc_errors(struct tg3 *tp) +{ + struct tg3_hw_stats *hw_stats = tp->hw_stats; + + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701)) { + u32 val; + + if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { + tg3_writephy(tp, MII_TG3_TEST1, + val | MII_TG3_TEST1_CRC_EN); + tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); + } else + val = 0; + + tp->phy_crc_errors += val; + + return tp->phy_crc_errors; + } + + return get_stat64(&hw_stats->rx_fcs_errors); +} + +#define ESTAT_ADD(member) \ + estats->member = old_estats->member + \ + get_stat64(&hw_stats->member) + +static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats) +{ + struct tg3_ethtool_stats *old_estats = &tp->estats_prev; + struct tg3_hw_stats *hw_stats = tp->hw_stats; + + ESTAT_ADD(rx_octets); + ESTAT_ADD(rx_fragments); + ESTAT_ADD(rx_ucast_packets); + ESTAT_ADD(rx_mcast_packets); + ESTAT_ADD(rx_bcast_packets); + ESTAT_ADD(rx_fcs_errors); + ESTAT_ADD(rx_align_errors); + ESTAT_ADD(rx_xon_pause_rcvd); + ESTAT_ADD(rx_xoff_pause_rcvd); + ESTAT_ADD(rx_mac_ctrl_rcvd); + ESTAT_ADD(rx_xoff_entered); + ESTAT_ADD(rx_frame_too_long_errors); + ESTAT_ADD(rx_jabbers); + ESTAT_ADD(rx_undersize_packets); + ESTAT_ADD(rx_in_length_errors); + ESTAT_ADD(rx_out_length_errors); + ESTAT_ADD(rx_64_or_less_octet_packets); + ESTAT_ADD(rx_65_to_127_octet_packets); + ESTAT_ADD(rx_128_to_255_octet_packets); + ESTAT_ADD(rx_256_to_511_octet_packets); + ESTAT_ADD(rx_512_to_1023_octet_packets); + ESTAT_ADD(rx_1024_to_1522_octet_packets); + ESTAT_ADD(rx_1523_to_2047_octet_packets); + ESTAT_ADD(rx_2048_to_4095_octet_packets); + ESTAT_ADD(rx_4096_to_8191_octet_packets); + ESTAT_ADD(rx_8192_to_9022_octet_packets); + + ESTAT_ADD(tx_octets); + ESTAT_ADD(tx_collisions); + ESTAT_ADD(tx_xon_sent); + ESTAT_ADD(tx_xoff_sent); + ESTAT_ADD(tx_flow_control); + ESTAT_ADD(tx_mac_errors); + ESTAT_ADD(tx_single_collisions); + ESTAT_ADD(tx_mult_collisions); + ESTAT_ADD(tx_deferred); + ESTAT_ADD(tx_excessive_collisions); + ESTAT_ADD(tx_late_collisions); + ESTAT_ADD(tx_collide_2times); + ESTAT_ADD(tx_collide_3times); + ESTAT_ADD(tx_collide_4times); + ESTAT_ADD(tx_collide_5times); + ESTAT_ADD(tx_collide_6times); + ESTAT_ADD(tx_collide_7times); + ESTAT_ADD(tx_collide_8times); + ESTAT_ADD(tx_collide_9times); + ESTAT_ADD(tx_collide_10times); + ESTAT_ADD(tx_collide_11times); + ESTAT_ADD(tx_collide_12times); + ESTAT_ADD(tx_collide_13times); + ESTAT_ADD(tx_collide_14times); + ESTAT_ADD(tx_collide_15times); + ESTAT_ADD(tx_ucast_packets); + ESTAT_ADD(tx_mcast_packets); + ESTAT_ADD(tx_bcast_packets); + ESTAT_ADD(tx_carrier_sense_errors); + ESTAT_ADD(tx_discards); + ESTAT_ADD(tx_errors); + + ESTAT_ADD(dma_writeq_full); + ESTAT_ADD(dma_write_prioq_full); + ESTAT_ADD(rxbds_empty); + ESTAT_ADD(rx_discards); + ESTAT_ADD(rx_errors); + ESTAT_ADD(rx_threshold_hit); + + ESTAT_ADD(dma_readq_full); + ESTAT_ADD(dma_read_prioq_full); + ESTAT_ADD(tx_comp_queue_full); + + ESTAT_ADD(ring_set_send_prod_index); + ESTAT_ADD(ring_status_update); + ESTAT_ADD(nic_irqs); + ESTAT_ADD(nic_avoided_irqs); + ESTAT_ADD(nic_tx_threshold_hit); + + ESTAT_ADD(mbuf_lwm_thresh_hit); +} + +static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats) +{ + struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; + struct tg3_hw_stats *hw_stats = tp->hw_stats; + + stats->rx_packets = old_stats->rx_packets + + get_stat64(&hw_stats->rx_ucast_packets) + + get_stat64(&hw_stats->rx_mcast_packets) + + get_stat64(&hw_stats->rx_bcast_packets); + + stats->tx_packets = old_stats->tx_packets + + get_stat64(&hw_stats->tx_ucast_packets) + + get_stat64(&hw_stats->tx_mcast_packets) + + get_stat64(&hw_stats->tx_bcast_packets); + + stats->rx_bytes = old_stats->rx_bytes + + get_stat64(&hw_stats->rx_octets); + stats->tx_bytes = old_stats->tx_bytes + + get_stat64(&hw_stats->tx_octets); + + stats->rx_errors = old_stats->rx_errors + + get_stat64(&hw_stats->rx_errors); + stats->tx_errors = old_stats->tx_errors + + get_stat64(&hw_stats->tx_errors) + + get_stat64(&hw_stats->tx_mac_errors) + + get_stat64(&hw_stats->tx_carrier_sense_errors) + + get_stat64(&hw_stats->tx_discards); + + stats->multicast = old_stats->multicast + + get_stat64(&hw_stats->rx_mcast_packets); + stats->collisions = old_stats->collisions + + get_stat64(&hw_stats->tx_collisions); + + stats->rx_length_errors = old_stats->rx_length_errors + + get_stat64(&hw_stats->rx_frame_too_long_errors) + + get_stat64(&hw_stats->rx_undersize_packets); + + stats->rx_frame_errors = old_stats->rx_frame_errors + + get_stat64(&hw_stats->rx_align_errors); + stats->tx_aborted_errors = old_stats->tx_aborted_errors + + get_stat64(&hw_stats->tx_discards); + stats->tx_carrier_errors = old_stats->tx_carrier_errors + + get_stat64(&hw_stats->tx_carrier_sense_errors); + + stats->rx_crc_errors = old_stats->rx_crc_errors + + tg3_calc_crc_errors(tp); + + stats->rx_missed_errors = old_stats->rx_missed_errors + + get_stat64(&hw_stats->rx_discards); + + stats->rx_dropped = tp->rx_dropped; + stats->tx_dropped = tp->tx_dropped; +} + +static int tg3_get_regs_len(struct net_device *dev) +{ + return TG3_REG_BLK_SIZE; +} + +static void tg3_get_regs(struct net_device *dev, + struct ethtool_regs *regs, void *_p) +{ + struct tg3 *tp = netdev_priv(dev); + + regs->version = 0; + + memset(_p, 0, TG3_REG_BLK_SIZE); + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + return; + + tg3_full_lock(tp, 0); + + tg3_dump_legacy_regs(tp, (u32 *)_p); + + tg3_full_unlock(tp); +} + +static int tg3_get_eeprom_len(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + + return tp->nvram_size; +} + +static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) +{ + struct tg3 *tp = netdev_priv(dev); + int ret, cpmu_restore = 0; + u8 *pd; + u32 i, offset, len, b_offset, b_count, cpmu_val = 0; + __be32 val; + + if (tg3_flag(tp, NO_NVRAM)) + return -EINVAL; + + offset = eeprom->offset; + len = eeprom->len; + eeprom->len = 0; + + eeprom->magic = TG3_EEPROM_MAGIC; + + /* Override clock, link aware and link idle modes */ + if (tg3_flag(tp, CPMU_PRESENT)) { + cpmu_val = tr32(TG3_CPMU_CTRL); + if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE | + CPMU_CTRL_LINK_IDLE_MODE)) { + tw32(TG3_CPMU_CTRL, cpmu_val & + ~(CPMU_CTRL_LINK_AWARE_MODE | + CPMU_CTRL_LINK_IDLE_MODE)); + cpmu_restore = 1; + } + } + tg3_override_clk(tp); + + if (offset & 3) { + /* adjustments to start on required 4 byte boundary */ + b_offset = offset & 3; + b_count = 4 - b_offset; + if (b_count > len) { + /* i.e. offset=1 len=2 */ + b_count = len; + } + ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); + if (ret) + goto eeprom_done; + memcpy(data, ((char *)&val) + b_offset, b_count); + len -= b_count; + offset += b_count; + eeprom->len += b_count; + } + + /* read bytes up to the last 4 byte boundary */ + pd = &data[eeprom->len]; + for (i = 0; i < (len - (len & 3)); i += 4) { + ret = tg3_nvram_read_be32(tp, offset + i, &val); + if (ret) { + if (i) + i -= 4; + eeprom->len += i; + goto eeprom_done; + } + memcpy(pd + i, &val, 4); + if (need_resched()) { + if (signal_pending(current)) { + eeprom->len += i; + ret = -EINTR; + goto eeprom_done; + } + cond_resched(); + } + } + eeprom->len += i; + + if (len & 3) { + /* read last bytes not ending on 4 byte boundary */ + pd = &data[eeprom->len]; + b_count = len & 3; + b_offset = offset + len - b_count; + ret = tg3_nvram_read_be32(tp, b_offset, &val); + if (ret) + goto eeprom_done; + memcpy(pd, &val, b_count); + eeprom->len += b_count; + } + ret = 0; + +eeprom_done: + /* Restore clock, link aware and link idle modes */ + tg3_restore_clk(tp); + if (cpmu_restore) + tw32(TG3_CPMU_CTRL, cpmu_val); + + return ret; +} + +static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) +{ + struct tg3 *tp = netdev_priv(dev); + int ret; + u32 offset, len, b_offset, odd_len; + u8 *buf; + __be32 start, end; + + if (tg3_flag(tp, NO_NVRAM) || + eeprom->magic != TG3_EEPROM_MAGIC) + return -EINVAL; + + offset = eeprom->offset; + len = eeprom->len; + + if ((b_offset = (offset & 3))) { + /* adjustments to start on required 4 byte boundary */ + ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); + if (ret) + return ret; + len += b_offset; + offset &= ~3; + if (len < 4) + len = 4; + } + + odd_len = 0; + if (len & 3) { + /* adjustments to end on required 4 byte boundary */ + odd_len = 1; + len = (len + 3) & ~3; + ret = tg3_nvram_read_be32(tp, offset+len-4, &end); + if (ret) + return ret; + } + + buf = data; + if (b_offset || odd_len) { + buf = kmalloc(len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + if (b_offset) + memcpy(buf, &start, 4); + if (odd_len) + memcpy(buf+len-4, &end, 4); + memcpy(buf + b_offset, data, eeprom->len); + } + + ret = tg3_nvram_write_block(tp, offset, len, buf); + + if (buf != data) + kfree(buf); + + return ret; +} + +static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct tg3 *tp = netdev_priv(dev); + + if (tg3_flag(tp, USE_PHYLIB)) { + struct phy_device *phydev; + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return -EAGAIN; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + return phy_ethtool_gset(phydev, cmd); + } + + cmd->supported = (SUPPORTED_Autoneg); + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) + cmd->supported |= (SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { + cmd->supported |= (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_TP); + cmd->port = PORT_TP; + } else { + cmd->supported |= SUPPORTED_FIBRE; + cmd->port = PORT_FIBRE; + } + + cmd->advertising = tp->link_config.advertising; + if (tg3_flag(tp, PAUSE_AUTONEG)) { + if (tp->link_config.flowctrl & FLOW_CTRL_RX) { + if (tp->link_config.flowctrl & FLOW_CTRL_TX) { + cmd->advertising |= ADVERTISED_Pause; + } else { + cmd->advertising |= ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + } + } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { + cmd->advertising |= ADVERTISED_Asym_Pause; + } + } + if (netif_running(dev) && tp->link_up) { + ethtool_cmd_speed_set(cmd, tp->link_config.active_speed); + cmd->duplex = tp->link_config.active_duplex; + cmd->lp_advertising = tp->link_config.rmt_adv; + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { + if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE) + cmd->eth_tp_mdix = ETH_TP_MDI_X; + else + cmd->eth_tp_mdix = ETH_TP_MDI; + } + } else { + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->duplex = DUPLEX_UNKNOWN; + cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + } + cmd->phy_address = tp->phy_addr; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = tp->link_config.autoneg; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct tg3 *tp = netdev_priv(dev); + u32 speed = ethtool_cmd_speed(cmd); + + if (tg3_flag(tp, USE_PHYLIB)) { + struct phy_device *phydev; + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return -EAGAIN; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + return phy_ethtool_sset(phydev, cmd); + } + + if (cmd->autoneg != AUTONEG_ENABLE && + cmd->autoneg != AUTONEG_DISABLE) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_DISABLE && + cmd->duplex != DUPLEX_FULL && + cmd->duplex != DUPLEX_HALF) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_ENABLE) { + u32 mask = ADVERTISED_Autoneg | + ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) + mask |= ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full; + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + mask |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_TP; + else + mask |= ADVERTISED_FIBRE; + + if (cmd->advertising & ~mask) + return -EINVAL; + + mask &= (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full); + + cmd->advertising &= mask; + } else { + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { + if (speed != SPEED_1000) + return -EINVAL; + + if (cmd->duplex != DUPLEX_FULL) + return -EINVAL; + } else { + if (speed != SPEED_100 && + speed != SPEED_10) + return -EINVAL; + } + } + + tg3_full_lock(tp, 0); + + tp->link_config.autoneg = cmd->autoneg; + if (cmd->autoneg == AUTONEG_ENABLE) { + tp->link_config.advertising = (cmd->advertising | + ADVERTISED_Autoneg); + tp->link_config.speed = SPEED_UNKNOWN; + tp->link_config.duplex = DUPLEX_UNKNOWN; + } else { + tp->link_config.advertising = 0; + tp->link_config.speed = speed; + tp->link_config.duplex = cmd->duplex; + } + + tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; + + tg3_warn_mgmt_link_flap(tp); + + if (netif_running(dev)) + tg3_setup_phy(tp, true); + + tg3_full_unlock(tp); + + return 0; +} + +static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct tg3 *tp = netdev_priv(dev); + + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version)); + strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info)); +} + +static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct tg3 *tp = netdev_priv(dev); + + if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) + wol->supported = WAKE_MAGIC; + else + wol->supported = 0; + wol->wolopts = 0; + if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) + wol->wolopts = WAKE_MAGIC; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct tg3 *tp = netdev_priv(dev); + struct device *dp = &tp->pdev->dev; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + if ((wol->wolopts & WAKE_MAGIC) && + !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) + return -EINVAL; + + device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); + + if (device_may_wakeup(dp)) + tg3_flag_set(tp, WOL_ENABLE); + else + tg3_flag_clear(tp, WOL_ENABLE); + + return 0; +} + +static u32 tg3_get_msglevel(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + return tp->msg_enable; +} + +static void tg3_set_msglevel(struct net_device *dev, u32 value) +{ + struct tg3 *tp = netdev_priv(dev); + tp->msg_enable = value; +} + +static int tg3_nway_reset(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + int r; + + if (!netif_running(dev)) + return -EAGAIN; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + return -EINVAL; + + tg3_warn_mgmt_link_flap(tp); + + if (tg3_flag(tp, USE_PHYLIB)) { + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return -EAGAIN; + r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]); + } else { + u32 bmcr; + + spin_lock_bh(&tp->lock); + r = -EINVAL; + tg3_readphy(tp, MII_BMCR, &bmcr); + if (!tg3_readphy(tp, MII_BMCR, &bmcr) && + ((bmcr & BMCR_ANENABLE) || + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { + tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | + BMCR_ANENABLE); + r = 0; + } + spin_unlock_bh(&tp->lock); + } + + return r; +} + +static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +{ + struct tg3 *tp = netdev_priv(dev); + + ering->rx_max_pending = tp->rx_std_ring_mask; + if (tg3_flag(tp, JUMBO_RING_ENABLE)) + ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; + else + ering->rx_jumbo_max_pending = 0; + + ering->tx_max_pending = TG3_TX_RING_SIZE - 1; + + ering->rx_pending = tp->rx_pending; + if (tg3_flag(tp, JUMBO_RING_ENABLE)) + ering->rx_jumbo_pending = tp->rx_jumbo_pending; + else + ering->rx_jumbo_pending = 0; + + ering->tx_pending = tp->napi[0].tx_pending; +} + +static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +{ + struct tg3 *tp = netdev_priv(dev); + int i, irq_sync = 0, err = 0; + + if ((ering->rx_pending > tp->rx_std_ring_mask) || + (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || + (ering->tx_pending > TG3_TX_RING_SIZE - 1) || + (ering->tx_pending <= MAX_SKB_FRAGS) || + (tg3_flag(tp, TSO_BUG) && + (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) + return -EINVAL; + + if (netif_running(dev)) { + tg3_phy_stop(tp); + tg3_netif_stop(tp); + irq_sync = 1; + } + + tg3_full_lock(tp, irq_sync); + + tp->rx_pending = ering->rx_pending; + + if (tg3_flag(tp, MAX_RXPEND_64) && + tp->rx_pending > 63) + tp->rx_pending = 63; + + if (tg3_flag(tp, JUMBO_RING_ENABLE)) + tp->rx_jumbo_pending = ering->rx_jumbo_pending; + + for (i = 0; i < tp->irq_max; i++) + tp->napi[i].tx_pending = ering->tx_pending; + + if (netif_running(dev)) { + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + err = tg3_restart_hw(tp, false); + if (!err) + tg3_netif_start(tp); + } + + tg3_full_unlock(tp); + + if (irq_sync && !err) + tg3_phy_start(tp); + + return err; +} + +static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) +{ + struct tg3 *tp = netdev_priv(dev); + + epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); + + if (tp->link_config.flowctrl & FLOW_CTRL_RX) + epause->rx_pause = 1; + else + epause->rx_pause = 0; + + if (tp->link_config.flowctrl & FLOW_CTRL_TX) + epause->tx_pause = 1; + else + epause->tx_pause = 0; +} + +static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) +{ + struct tg3 *tp = netdev_priv(dev); + int err = 0; + + if (tp->link_config.autoneg == AUTONEG_ENABLE) + tg3_warn_mgmt_link_flap(tp); + + if (tg3_flag(tp, USE_PHYLIB)) { + u32 newadv; + struct phy_device *phydev; + + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + + if (!(phydev->supported & SUPPORTED_Pause) || + (!(phydev->supported & SUPPORTED_Asym_Pause) && + (epause->rx_pause != epause->tx_pause))) + return -EINVAL; + + tp->link_config.flowctrl = 0; + if (epause->rx_pause) { + tp->link_config.flowctrl |= FLOW_CTRL_RX; + + if (epause->tx_pause) { + tp->link_config.flowctrl |= FLOW_CTRL_TX; + newadv = ADVERTISED_Pause; + } else + newadv = ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + } else if (epause->tx_pause) { + tp->link_config.flowctrl |= FLOW_CTRL_TX; + newadv = ADVERTISED_Asym_Pause; + } else + newadv = 0; + + if (epause->autoneg) + tg3_flag_set(tp, PAUSE_AUTONEG); + else + tg3_flag_clear(tp, PAUSE_AUTONEG); + + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { + u32 oldadv = phydev->advertising & + (ADVERTISED_Pause | ADVERTISED_Asym_Pause); + if (oldadv != newadv) { + phydev->advertising &= + ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + phydev->advertising |= newadv; + if (phydev->autoneg) { + /* + * Always renegotiate the link to + * inform our link partner of our + * flow control settings, even if the + * flow control is forced. Let + * tg3_adjust_link() do the final + * flow control setup. + */ + return phy_start_aneg(phydev); + } + } + + if (!epause->autoneg) + tg3_setup_flow_control(tp, 0, 0); + } else { + tp->link_config.advertising &= + ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + tp->link_config.advertising |= newadv; + } + } else { + int irq_sync = 0; + + if (netif_running(dev)) { + tg3_netif_stop(tp); + irq_sync = 1; + } + + tg3_full_lock(tp, irq_sync); + + if (epause->autoneg) + tg3_flag_set(tp, PAUSE_AUTONEG); + else + tg3_flag_clear(tp, PAUSE_AUTONEG); + if (epause->rx_pause) + tp->link_config.flowctrl |= FLOW_CTRL_RX; + else + tp->link_config.flowctrl &= ~FLOW_CTRL_RX; + if (epause->tx_pause) + tp->link_config.flowctrl |= FLOW_CTRL_TX; + else + tp->link_config.flowctrl &= ~FLOW_CTRL_TX; + + if (netif_running(dev)) { + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + err = tg3_restart_hw(tp, false); + if (!err) + tg3_netif_start(tp); + } + + tg3_full_unlock(tp); + } + + tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; + + return err; +} + +static int tg3_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return TG3_NUM_TEST; + case ETH_SS_STATS: + return TG3_NUM_STATS; + default: + return -EOPNOTSUPP; + } +} + +static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + u32 *rules __always_unused) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!tg3_flag(tp, SUPPORT_MSIX)) + return -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + if (netif_running(tp->dev)) + info->data = tp->rxq_cnt; + else { + info->data = num_online_cpus(); + if (info->data > TG3_RSS_MAX_NUM_QS) + info->data = TG3_RSS_MAX_NUM_QS; + } + + /* The first interrupt vector only + * handles link interrupts. + */ + info->data -= 1; + return 0; + + default: + return -EOPNOTSUPP; + } +} + +static u32 tg3_get_rxfh_indir_size(struct net_device *dev) +{ + u32 size = 0; + struct tg3 *tp = netdev_priv(dev); + + if (tg3_flag(tp, SUPPORT_MSIX)) + size = TG3_RSS_INDIR_TBL_SIZE; + + return size; +} + +static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct tg3 *tp = netdev_priv(dev); + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) + indir[i] = tp->rss_ind_tbl[i]; + + return 0; +} + +static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, + const u8 hfunc) +{ + struct tg3 *tp = netdev_priv(dev); + size_t i; + + /* We require at least one supported parameter to be changed and no + * change in any of the unsupported parameters + */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + + if (!indir) + return 0; + + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) + tp->rss_ind_tbl[i] = indir[i]; + + if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS)) + return 0; + + /* It is legal to write the indirection + * table while the device is running. + */ + tg3_full_lock(tp, 0); + tg3_rss_write_indir_tbl(tp); + tg3_full_unlock(tp); + + return 0; +} + +static void tg3_get_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct tg3 *tp = netdev_priv(dev); + u32 deflt_qs = netif_get_num_default_rss_queues(); + + channel->max_rx = tp->rxq_max; + channel->max_tx = tp->txq_max; + + if (netif_running(dev)) { + channel->rx_count = tp->rxq_cnt; + channel->tx_count = tp->txq_cnt; + } else { + if (tp->rxq_req) + channel->rx_count = tp->rxq_req; + else + channel->rx_count = min(deflt_qs, tp->rxq_max); + + if (tp->txq_req) + channel->tx_count = tp->txq_req; + else + channel->tx_count = min(deflt_qs, tp->txq_max); + } +} + +static int tg3_set_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!tg3_flag(tp, SUPPORT_MSIX)) + return -EOPNOTSUPP; + + if (channel->rx_count > tp->rxq_max || + channel->tx_count > tp->txq_max) + return -EINVAL; + + tp->rxq_req = channel->rx_count; + tp->txq_req = channel->tx_count; + + if (!netif_running(dev)) + return 0; + + tg3_stop(tp); + + tg3_carrier_off(tp); + + tg3_start(tp, true, false, false); + + return 0; +} + +static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); + break; + case ETH_SS_TEST: + memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); + break; + default: + WARN_ON(1); /* we need a WARN() */ + break; + } +} + +static int tg3_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!netif_running(tp->dev)) + return -EAGAIN; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 1; /* cycle on/off once per second */ + + case ETHTOOL_ID_ON: + tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_1000MBPS_ON | + LED_CTRL_100MBPS_ON | + LED_CTRL_10MBPS_ON | + LED_CTRL_TRAFFIC_OVERRIDE | + LED_CTRL_TRAFFIC_BLINK | + LED_CTRL_TRAFFIC_LED); + break; + + case ETHTOOL_ID_OFF: + tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_TRAFFIC_OVERRIDE); + break; + + case ETHTOOL_ID_INACTIVE: + tw32(MAC_LED_CTRL, tp->led_ctrl); + break; + } + + return 0; +} + +static void tg3_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *estats, u64 *tmp_stats) +{ + struct tg3 *tp = netdev_priv(dev); + + if (tp->hw_stats) + tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats); + else + memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats)); +} + +static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen) +{ + int i; + __be32 *buf; + u32 offset = 0, len = 0; + u32 magic, val; + + if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) + return NULL; + + if (magic == TG3_EEPROM_MAGIC) { + for (offset = TG3_NVM_DIR_START; + offset < TG3_NVM_DIR_END; + offset += TG3_NVM_DIRENT_SIZE) { + if (tg3_nvram_read(tp, offset, &val)) + return NULL; + + if ((val >> TG3_NVM_DIRTYPE_SHIFT) == + TG3_NVM_DIRTYPE_EXTVPD) + break; + } + + if (offset != TG3_NVM_DIR_END) { + len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; + if (tg3_nvram_read(tp, offset + 4, &offset)) + return NULL; + + offset = tg3_nvram_logical_addr(tp, offset); + } + } + + if (!offset || !len) { + offset = TG3_NVM_VPD_OFF; + len = TG3_NVM_VPD_LEN; + } + + buf = kmalloc(len, GFP_KERNEL); + if (buf == NULL) + return NULL; + + if (magic == TG3_EEPROM_MAGIC) { + for (i = 0; i < len; i += 4) { + /* The data is in little-endian format in NVRAM. + * Use the big-endian read routines to preserve + * the byte order as it exists in NVRAM. + */ + if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) + goto error; + } + } else { + u8 *ptr; + ssize_t cnt; + unsigned int pos = 0; + + ptr = (u8 *)&buf[0]; + for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) { + cnt = pci_read_vpd(tp->pdev, pos, + len - pos, ptr); + if (cnt == -ETIMEDOUT || cnt == -EINTR) + cnt = 0; + else if (cnt < 0) + goto error; + } + if (pos != len) + goto error; + } + + *vpdlen = len; + + return buf; + +error: + kfree(buf); + return NULL; +} + +#define NVRAM_TEST_SIZE 0x100 +#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 +#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 +#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c +#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 +#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 +#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50 +#define NVRAM_SELFBOOT_HW_SIZE 0x20 +#define NVRAM_SELFBOOT_DATA_SIZE 0x1c + +static int tg3_test_nvram(struct tg3 *tp) +{ + u32 csum, magic, len; + __be32 *buf; + int i, j, k, err = 0, size; + + if (tg3_flag(tp, NO_NVRAM)) + return 0; + + if (tg3_nvram_read(tp, 0, &magic) != 0) + return -EIO; + + if (magic == TG3_EEPROM_MAGIC) + size = NVRAM_TEST_SIZE; + else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { + if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == + TG3_EEPROM_SB_FORMAT_1) { + switch (magic & TG3_EEPROM_SB_REVISION_MASK) { + case TG3_EEPROM_SB_REVISION_0: + size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; + break; + case TG3_EEPROM_SB_REVISION_2: + size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; + break; + case TG3_EEPROM_SB_REVISION_3: + size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; + break; + case TG3_EEPROM_SB_REVISION_4: + size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; + break; + case TG3_EEPROM_SB_REVISION_5: + size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; + break; + case TG3_EEPROM_SB_REVISION_6: + size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; + break; + default: + return -EIO; + } + } else + return 0; + } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) + size = NVRAM_SELFBOOT_HW_SIZE; + else + return -EIO; + + buf = kmalloc(size, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + err = -EIO; + for (i = 0, j = 0; i < size; i += 4, j++) { + err = tg3_nvram_read_be32(tp, i, &buf[j]); + if (err) + break; + } + if (i < size) + goto out; + + /* Selfboot format */ + magic = be32_to_cpu(buf[0]); + if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == + TG3_EEPROM_MAGIC_FW) { + u8 *buf8 = (u8 *) buf, csum8 = 0; + + if ((magic & TG3_EEPROM_SB_REVISION_MASK) == + TG3_EEPROM_SB_REVISION_2) { + /* For rev 2, the csum doesn't include the MBA. */ + for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) + csum8 += buf8[i]; + for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) + csum8 += buf8[i]; + } else { + for (i = 0; i < size; i++) + csum8 += buf8[i]; + } + + if (csum8 == 0) { + err = 0; + goto out; + } + + err = -EIO; + goto out; + } + + if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == + TG3_EEPROM_MAGIC_HW) { + u8 data[NVRAM_SELFBOOT_DATA_SIZE]; + u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; + u8 *buf8 = (u8 *) buf; + + /* Separate the parity bits and the data bytes. */ + for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { + if ((i == 0) || (i == 8)) { + int l; + u8 msk; + + for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) + parity[k++] = buf8[i] & msk; + i++; + } else if (i == 16) { + int l; + u8 msk; + + for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) + parity[k++] = buf8[i] & msk; + i++; + + for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) + parity[k++] = buf8[i] & msk; + i++; + } + data[j++] = buf8[i]; + } + + err = -EIO; + for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { + u8 hw8 = hweight8(data[i]); + + if ((hw8 & 0x1) && parity[i]) + goto out; + else if (!(hw8 & 0x1) && !parity[i]) + goto out; + } + err = 0; + goto out; + } + + err = -EIO; + + /* Bootstrap checksum at offset 0x10 */ + csum = calc_crc((unsigned char *) buf, 0x10); + if (csum != le32_to_cpu(buf[0x10/4])) + goto out; + + /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ + csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); + if (csum != le32_to_cpu(buf[0xfc/4])) + goto out; + + kfree(buf); + + buf = tg3_vpd_readblock(tp, &len); + if (!buf) + return -ENOMEM; + + i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA); + if (i > 0) { + j = pci_vpd_lrdt_size(&((u8 *)buf)[i]); + if (j < 0) + goto out; + + if (i + PCI_VPD_LRDT_TAG_SIZE + j > len) + goto out; + + i += PCI_VPD_LRDT_TAG_SIZE; + j = pci_vpd_find_info_keyword((u8 *)buf, i, j, + PCI_VPD_RO_KEYWORD_CHKSUM); + if (j > 0) { + u8 csum8 = 0; + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + + for (i = 0; i <= j; i++) + csum8 += ((u8 *)buf)[i]; + + if (csum8) + goto out; + } + } + + err = 0; + +out: + kfree(buf); + return err; +} + +#define TG3_SERDES_TIMEOUT_SEC 2 +#define TG3_COPPER_TIMEOUT_SEC 6 + +static int tg3_test_link(struct tg3 *tp) +{ + int i, max; + + if (!netif_running(tp->dev)) + return -ENODEV; + + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + max = TG3_SERDES_TIMEOUT_SEC; + else + max = TG3_COPPER_TIMEOUT_SEC; + + for (i = 0; i < max; i++) { + if (tp->link_up) + return 0; + + if (msleep_interruptible(1000)) + break; + } + + return -EIO; +} + +/* Only test the commonly used registers */ +static int tg3_test_registers(struct tg3 *tp) +{ + int i, is_5705, is_5750; + u32 offset, read_mask, write_mask, val, save_val, read_val; + static struct { + u16 offset; + u16 flags; +#define TG3_FL_5705 0x1 +#define TG3_FL_NOT_5705 0x2 +#define TG3_FL_NOT_5788 0x4 +#define TG3_FL_NOT_5750 0x8 + u32 read_mask; + u32 write_mask; + } reg_tbl[] = { + /* MAC Control Registers */ + { MAC_MODE, TG3_FL_NOT_5705, + 0x00000000, 0x00ef6f8c }, + { MAC_MODE, TG3_FL_5705, + 0x00000000, 0x01ef6b8c }, + { MAC_STATUS, TG3_FL_NOT_5705, + 0x03800107, 0x00000000 }, + { MAC_STATUS, TG3_FL_5705, + 0x03800100, 0x00000000 }, + { MAC_ADDR_0_HIGH, 0x0000, + 0x00000000, 0x0000ffff }, + { MAC_ADDR_0_LOW, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_RX_MTU_SIZE, 0x0000, + 0x00000000, 0x0000ffff }, + { MAC_TX_MODE, 0x0000, + 0x00000000, 0x00000070 }, + { MAC_TX_LENGTHS, 0x0000, + 0x00000000, 0x00003fff }, + { MAC_RX_MODE, TG3_FL_NOT_5705, + 0x00000000, 0x000007fc }, + { MAC_RX_MODE, TG3_FL_5705, + 0x00000000, 0x000007dc }, + { MAC_HASH_REG_0, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_HASH_REG_1, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_HASH_REG_2, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_HASH_REG_3, 0x0000, + 0x00000000, 0xffffffff }, + + /* Receive Data and Receive BD Initiator Control Registers. */ + { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, + 0x00000000, 0x00000003 }, + { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVDBDI_STD_BD+0, 0x0000, + 0x00000000, 0xffffffff }, + { RCVDBDI_STD_BD+4, 0x0000, + 0x00000000, 0xffffffff }, + { RCVDBDI_STD_BD+8, 0x0000, + 0x00000000, 0xffff0002 }, + { RCVDBDI_STD_BD+0xc, 0x0000, + 0x00000000, 0xffffffff }, + + /* Receive BD Initiator Control Registers. */ + { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVBDI_STD_THRESH, TG3_FL_5705, + 0x00000000, 0x000003ff }, + { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + + /* Host Coalescing Control Registers. */ + { HOSTCC_MODE, TG3_FL_NOT_5705, + 0x00000000, 0x00000004 }, + { HOSTCC_MODE, TG3_FL_5705, + 0x00000000, 0x000000f6 }, + { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXCOL_TICKS, TG3_FL_5705, + 0x00000000, 0x000003ff }, + { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXCOL_TICKS, TG3_FL_5705, + 0x00000000, 0x000003ff }, + { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, + 0x00000000, 0xffffffff }, + { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, + 0x00000000, 0xffffffff }, + { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, + 0xffffffff, 0x00000000 }, + { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, + 0xffffffff, 0x00000000 }, + + /* Buffer Manager Control Registers. */ + { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, + 0x00000000, 0x007fff80 }, + { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, + 0x00000000, 0x007fffff }, + { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, + 0x00000000, 0x0000003f }, + { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, + 0x00000000, 0x000001ff }, + { BUFMGR_MB_HIGH_WATER, 0x0000, + 0x00000000, 0x000001ff }, + { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, + 0xffffffff, 0x00000000 }, + { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, + 0xffffffff, 0x00000000 }, + + /* Mailbox Registers */ + { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, + 0x00000000, 0x000001ff }, + { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, + 0x00000000, 0x000001ff }, + { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, + 0x00000000, 0x000007ff }, + { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, + 0x00000000, 0x000001ff }, + + { 0xffff, 0x0000, 0x00000000, 0x00000000 }, + }; + + is_5705 = is_5750 = 0; + if (tg3_flag(tp, 5705_PLUS)) { + is_5705 = 1; + if (tg3_flag(tp, 5750_PLUS)) + is_5750 = 1; + } + + for (i = 0; reg_tbl[i].offset != 0xffff; i++) { + if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) + continue; + + if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) + continue; + + if (tg3_flag(tp, IS_5788) && + (reg_tbl[i].flags & TG3_FL_NOT_5788)) + continue; + + if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) + continue; + + offset = (u32) reg_tbl[i].offset; + read_mask = reg_tbl[i].read_mask; + write_mask = reg_tbl[i].write_mask; + + /* Save the original register content */ + save_val = tr32(offset); + + /* Determine the read-only value. */ + read_val = save_val & read_mask; + + /* Write zero to the register, then make sure the read-only bits + * are not changed and the read/write bits are all zeros. + */ + tw32(offset, 0); + + val = tr32(offset); + + /* Test the read-only and read/write bits. */ + if (((val & read_mask) != read_val) || (val & write_mask)) + goto out; + + /* Write ones to all the bits defined by RdMask and WrMask, then + * make sure the read-only bits are not changed and the + * read/write bits are all ones. + */ + tw32(offset, read_mask | write_mask); + + val = tr32(offset); + + /* Test the read-only bits. */ + if ((val & read_mask) != read_val) + goto out; + + /* Test the read/write bits. */ + if ((val & write_mask) != write_mask) + goto out; + + tw32(offset, save_val); + } + + return 0; + +out: + if (netif_msg_hw(tp)) + netdev_err(tp->dev, + "Register test failed at offset %x\n", offset); + tw32(offset, save_val); + return -EIO; +} + +static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) +{ + static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; + int i; + u32 j; + + for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { + for (j = 0; j < len; j += 4) { + u32 val; + + tg3_write_mem(tp, offset + j, test_pattern[i]); + tg3_read_mem(tp, offset + j, &val); + if (val != test_pattern[i]) + return -EIO; + } + } + return 0; +} + +static int tg3_test_memory(struct tg3 *tp) +{ + static struct mem_entry { + u32 offset; + u32 len; + } mem_tbl_570x[] = { + { 0x00000000, 0x00b50}, + { 0x00002000, 0x1c000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5705[] = { + { 0x00000100, 0x0000c}, + { 0x00000200, 0x00008}, + { 0x00004000, 0x00800}, + { 0x00006000, 0x01000}, + { 0x00008000, 0x02000}, + { 0x00010000, 0x0e000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5755[] = { + { 0x00000200, 0x00008}, + { 0x00004000, 0x00800}, + { 0x00006000, 0x00800}, + { 0x00008000, 0x02000}, + { 0x00010000, 0x0c000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5906[] = { + { 0x00000200, 0x00008}, + { 0x00004000, 0x00400}, + { 0x00006000, 0x00400}, + { 0x00008000, 0x01000}, + { 0x00010000, 0x01000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5717[] = { + { 0x00000200, 0x00008}, + { 0x00010000, 0x0a000}, + { 0x00020000, 0x13c00}, + { 0xffffffff, 0x00000} + }, mem_tbl_57765[] = { + { 0x00000200, 0x00008}, + { 0x00004000, 0x00800}, + { 0x00006000, 0x09800}, + { 0x00010000, 0x0a000}, + { 0xffffffff, 0x00000} + }; + struct mem_entry *mem_tbl; + int err = 0; + int i; + + if (tg3_flag(tp, 5717_PLUS)) + mem_tbl = mem_tbl_5717; + else if (tg3_flag(tp, 57765_CLASS) || + tg3_asic_rev(tp) == ASIC_REV_5762) + mem_tbl = mem_tbl_57765; + else if (tg3_flag(tp, 5755_PLUS)) + mem_tbl = mem_tbl_5755; + else if (tg3_asic_rev(tp) == ASIC_REV_5906) + mem_tbl = mem_tbl_5906; + else if (tg3_flag(tp, 5705_PLUS)) + mem_tbl = mem_tbl_5705; + else + mem_tbl = mem_tbl_570x; + + for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { + err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); + if (err) + break; + } + + return err; +} + +#define TG3_TSO_MSS 500 + +#define TG3_TSO_IP_HDR_LEN 20 +#define TG3_TSO_TCP_HDR_LEN 20 +#define TG3_TSO_TCP_OPT_LEN 12 + +static const u8 tg3_tso_header[] = { +0x08, 0x00, +0x45, 0x00, 0x00, 0x00, +0x00, 0x00, 0x40, 0x00, +0x40, 0x06, 0x00, 0x00, +0x0a, 0x00, 0x00, 0x01, +0x0a, 0x00, 0x00, 0x02, +0x0d, 0x00, 0xe0, 0x00, +0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x02, 0x00, +0x80, 0x10, 0x10, 0x00, +0x14, 0x09, 0x00, 0x00, +0x01, 0x01, 0x08, 0x0a, +0x11, 0x11, 0x11, 0x11, +0x11, 0x11, 0x11, 0x11, +}; + +static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) +{ + u32 rx_start_idx, rx_idx, tx_idx, opaque_key; + u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; + u32 budget; + struct sk_buff *skb; + u8 *tx_data, *rx_data; + dma_addr_t map; + int num_pkts, tx_len, rx_len, i, err; + struct tg3_rx_buffer_desc *desc; + struct tg3_napi *tnapi, *rnapi; + struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; + + tnapi = &tp->napi[0]; + rnapi = &tp->napi[0]; + if (tp->irq_cnt > 1) { + if (tg3_flag(tp, ENABLE_RSS)) + rnapi = &tp->napi[1]; + if (tg3_flag(tp, ENABLE_TSS)) + tnapi = &tp->napi[1]; + } + coal_now = tnapi->coal_now | rnapi->coal_now; + + err = -EIO; + + tx_len = pktsz; + skb = netdev_alloc_skb(tp->dev, tx_len); + if (!skb) + return -ENOMEM; + + tx_data = skb_put(skb, tx_len); + memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN); + memset(tx_data + ETH_ALEN, 0x0, 8); + + tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); + + if (tso_loopback) { + struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; + + u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + + TG3_TSO_TCP_OPT_LEN; + + memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, + sizeof(tg3_tso_header)); + mss = TG3_TSO_MSS; + + val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); + num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); + + /* Set the total length field in the IP header */ + iph->tot_len = htons((u16)(mss + hdr_len)); + + base_flags = (TXD_FLAG_CPU_PRE_DMA | + TXD_FLAG_CPU_POST_DMA); + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) { + struct tcphdr *th; + val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; + th = (struct tcphdr *)&tx_data[val]; + th->check = 0; + } else + base_flags |= TXD_FLAG_TCPUDP_CSUM; + + if (tg3_flag(tp, HW_TSO_3)) { + mss |= (hdr_len & 0xc) << 12; + if (hdr_len & 0x10) + base_flags |= 0x00000010; + base_flags |= (hdr_len & 0x3e0) << 5; + } else if (tg3_flag(tp, HW_TSO_2)) + mss |= hdr_len << 9; + else if (tg3_flag(tp, HW_TSO_1) || + tg3_asic_rev(tp) == ASIC_REV_5705) { + mss |= (TG3_TSO_TCP_OPT_LEN << 9); + } else { + base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); + } + + data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); + } else { + num_pkts = 1; + data_off = ETH_HLEN; + + if (tg3_flag(tp, USE_JUMBO_BDFLAG) && + tx_len > VLAN_ETH_FRAME_LEN) + base_flags |= TXD_FLAG_JMB_PKT; + } + + for (i = data_off; i < tx_len; i++) + tx_data[i] = (u8) (i & 0xff); + + map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(tp->pdev, map)) { + dev_kfree_skb(skb); + return -EIO; + } + + val = tnapi->tx_prod; + tnapi->tx_buffers[val].skb = skb; + dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); + + tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | + rnapi->coal_now); + + udelay(10); + + rx_start_idx = rnapi->hw_status->idx[0].rx_producer; + + budget = tg3_tx_avail(tnapi); + if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, + base_flags | TXD_FLAG_END, mss, 0)) { + tnapi->tx_buffers[val].skb = NULL; + dev_kfree_skb(skb); + return -EIO; + } + + tnapi->tx_prod++; + + /* Sync BD data before updating mailbox */ + wmb(); + + tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); + tr32_mailbox(tnapi->prodmbox); + + udelay(10); + + /* 350 usec to allow enough time on some 10/100 Mbps devices. */ + for (i = 0; i < 35; i++) { + tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | + coal_now); + + udelay(10); + + tx_idx = tnapi->hw_status->idx[0].tx_consumer; + rx_idx = rnapi->hw_status->idx[0].rx_producer; + if ((tx_idx == tnapi->tx_prod) && + (rx_idx == (rx_start_idx + num_pkts))) + break; + } + + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1); + dev_kfree_skb(skb); + + if (tx_idx != tnapi->tx_prod) + goto out; + + if (rx_idx != rx_start_idx + num_pkts) + goto out; + + val = data_off; + while (rx_idx != rx_start_idx) { + desc = &rnapi->rx_rcb[rx_start_idx++]; + desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; + opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; + + if ((desc->err_vlan & RXD_ERR_MASK) != 0 && + (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) + goto out; + + rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) + - ETH_FCS_LEN; + + if (!tso_loopback) { + if (rx_len != tx_len) + goto out; + + if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { + if (opaque_key != RXD_OPAQUE_RING_STD) + goto out; + } else { + if (opaque_key != RXD_OPAQUE_RING_JUMBO) + goto out; + } + } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && + (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) + >> RXD_TCPCSUM_SHIFT != 0xffff) { + goto out; + } + + if (opaque_key == RXD_OPAQUE_RING_STD) { + rx_data = tpr->rx_std_buffers[desc_idx].data; + map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], + mapping); + } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { + rx_data = tpr->rx_jmb_buffers[desc_idx].data; + map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], + mapping); + } else + goto out; + + pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, + PCI_DMA_FROMDEVICE); + + rx_data += TG3_RX_OFFSET(tp); + for (i = data_off; i < rx_len; i++, val++) { + if (*(rx_data + i) != (u8) (val & 0xff)) + goto out; + } + } + + err = 0; + + /* tg3_free_rings will unmap and free the rx_data */ +out: + return err; +} + +#define TG3_STD_LOOPBACK_FAILED 1 +#define TG3_JMB_LOOPBACK_FAILED 2 +#define TG3_TSO_LOOPBACK_FAILED 4 +#define TG3_LOOPBACK_FAILED \ + (TG3_STD_LOOPBACK_FAILED | \ + TG3_JMB_LOOPBACK_FAILED | \ + TG3_TSO_LOOPBACK_FAILED) + +static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk) +{ + int err = -EIO; + u32 eee_cap; + u32 jmb_pkt_sz = 9000; + + if (tp->dma_limit) + jmb_pkt_sz = tp->dma_limit - ETH_HLEN; + + eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; + tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; + + if (!netif_running(tp->dev)) { + data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; + data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; + if (do_extlpbk) + data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; + goto done; + } + + err = tg3_reset_hw(tp, true); + if (err) { + data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED; + data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED; + if (do_extlpbk) + data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED; + goto done; + } + + if (tg3_flag(tp, ENABLE_RSS)) { + int i; + + /* Reroute all rx packets to the 1st queue */ + for (i = MAC_RSS_INDIR_TBL_0; + i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) + tw32(i, 0x0); + } + + /* HW errata - mac loopback fails in some cases on 5780. + * Normal traffic and PHY loopback are not affected by + * errata. Also, the MAC loopback test is deprecated for + * all newer ASIC revisions. + */ + if (tg3_asic_rev(tp) != ASIC_REV_5780 && + !tg3_flag(tp, CPMU_PRESENT)) { + tg3_mac_loopback(tp, true); + + if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) + data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; + + if (tg3_flag(tp, JUMBO_RING_ENABLE) && + tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) + data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; + + tg3_mac_loopback(tp, false); + } + + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + !tg3_flag(tp, USE_PHYLIB)) { + int i; + + tg3_phy_lpbk_set(tp, 0, false); + + /* Wait for link */ + for (i = 0; i < 100; i++) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + break; + mdelay(1); + } + + if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) + data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED; + if (tg3_flag(tp, TSO_CAPABLE) && + tg3_run_loopback(tp, ETH_FRAME_LEN, true)) + data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED; + if (tg3_flag(tp, JUMBO_RING_ENABLE) && + tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) + data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED; + + if (do_extlpbk) { + tg3_phy_lpbk_set(tp, 0, true); + + /* All link indications report up, but the hardware + * isn't really ready for about 20 msec. Double it + * to be sure. + */ + mdelay(40); + + if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) + data[TG3_EXT_LOOPB_TEST] |= + TG3_STD_LOOPBACK_FAILED; + if (tg3_flag(tp, TSO_CAPABLE) && + tg3_run_loopback(tp, ETH_FRAME_LEN, true)) + data[TG3_EXT_LOOPB_TEST] |= + TG3_TSO_LOOPBACK_FAILED; + if (tg3_flag(tp, JUMBO_RING_ENABLE) && + tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) + data[TG3_EXT_LOOPB_TEST] |= + TG3_JMB_LOOPBACK_FAILED; + } + + /* Re-enable gphy autopowerdown. */ + if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) + tg3_phy_toggle_apd(tp, true); + } + + err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] | + data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0; + +done: + tp->phy_flags |= eee_cap; + + return err; +} + +static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, + u64 *data) +{ + struct tg3 *tp = netdev_priv(dev); + bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + if (tg3_power_up(tp)) { + etest->flags |= ETH_TEST_FL_FAILED; + memset(data, 1, sizeof(u64) * TG3_NUM_TEST); + return; + } + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); + } + + memset(data, 0, sizeof(u64) * TG3_NUM_TEST); + + if (tg3_test_nvram(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[TG3_NVRAM_TEST] = 1; + } + if (!doextlpbk && tg3_test_link(tp)) { + etest->flags |= ETH_TEST_FL_FAILED; + data[TG3_LINK_TEST] = 1; + } + if (etest->flags & ETH_TEST_FL_OFFLINE) { + int err, err2 = 0, irq_sync = 0; + + if (netif_running(dev)) { + tg3_phy_stop(tp); + tg3_netif_stop(tp); + irq_sync = 1; + } + + tg3_full_lock(tp, irq_sync); + tg3_halt(tp, RESET_KIND_SUSPEND, 1); + err = tg3_nvram_lock(tp); + tg3_halt_cpu(tp, RX_CPU_BASE); + if (!tg3_flag(tp, 5705_PLUS)) + tg3_halt_cpu(tp, TX_CPU_BASE); + if (!err) + tg3_nvram_unlock(tp); + + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + tg3_phy_reset(tp); + + if (tg3_test_registers(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[TG3_REGISTER_TEST] = 1; + } + + if (tg3_test_memory(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[TG3_MEMORY_TEST] = 1; + } + + if (doextlpbk) + etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; + + if (tg3_test_loopback(tp, data, doextlpbk)) + etest->flags |= ETH_TEST_FL_FAILED; + + tg3_full_unlock(tp); + + if (tg3_test_interrupt(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[TG3_INTERRUPT_TEST] = 1; + } + + tg3_full_lock(tp, 0); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + if (netif_running(dev)) { + tg3_flag_set(tp, INIT_COMPLETE); + err2 = tg3_restart_hw(tp, true); + if (!err2) + tg3_netif_start(tp); + } + + tg3_full_unlock(tp); + + if (irq_sync && !err2) + tg3_phy_start(tp); + } + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + tg3_power_down_prepare(tp); + +} + +static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +{ + struct tg3 *tp = netdev_priv(dev); + struct hwtstamp_config stmpconf; + + if (!tg3_flag(tp, PTP_CAPABLE)) + return -EOPNOTSUPP; + + if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) + return -EFAULT; + + if (stmpconf.flags) + return -EINVAL; + + if (stmpconf.tx_type != HWTSTAMP_TX_ON && + stmpconf.tx_type != HWTSTAMP_TX_OFF) + return -ERANGE; + + switch (stmpconf.rx_filter) { + case HWTSTAMP_FILTER_NONE: + tp->rxptpctl = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | + TG3_RX_PTP_CTL_ALL_V1_EVENTS; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | + TG3_RX_PTP_CTL_SYNC_EVNT; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN | + TG3_RX_PTP_CTL_DELAY_REQ; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | + TG3_RX_PTP_CTL_ALL_V2_EVENTS; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | + TG3_RX_PTP_CTL_ALL_V2_EVENTS; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | + TG3_RX_PTP_CTL_ALL_V2_EVENTS; + break; + case HWTSTAMP_FILTER_PTP_V2_SYNC: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | + TG3_RX_PTP_CTL_SYNC_EVNT; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | + TG3_RX_PTP_CTL_SYNC_EVNT; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | + TG3_RX_PTP_CTL_SYNC_EVNT; + break; + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN | + TG3_RX_PTP_CTL_DELAY_REQ; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | + TG3_RX_PTP_CTL_DELAY_REQ; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | + TG3_RX_PTP_CTL_DELAY_REQ; + break; + default: + return -ERANGE; + } + + if (netif_running(dev) && tp->rxptpctl) + tw32(TG3_RX_PTP_CTL, + tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); + + if (stmpconf.tx_type == HWTSTAMP_TX_ON) + tg3_flag_set(tp, TX_TSTAMP_EN); + else + tg3_flag_clear(tp, TX_TSTAMP_EN); + + return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? + -EFAULT : 0; +} + +static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +{ + struct tg3 *tp = netdev_priv(dev); + struct hwtstamp_config stmpconf; + + if (!tg3_flag(tp, PTP_CAPABLE)) + return -EOPNOTSUPP; + + stmpconf.flags = 0; + stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ? + HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF); + + switch (tp->rxptpctl) { + case 0: + stmpconf.rx_filter = HWTSTAMP_FILTER_NONE; + break; + case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS: + stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + break; + case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT: + stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; + break; + case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ: + stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; + break; + case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: + stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: + stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + break; + case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: + stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + break; + case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: + stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; + break; + case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: + stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC; + break; + case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT: + stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; + break; + case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ: + stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; + break; + case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ: + stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ; + break; + case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ: + stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; + break; + default: + WARN_ON_ONCE(1); + return -ERANGE; + } + + return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? + -EFAULT : 0; +} + +static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + struct tg3 *tp = netdev_priv(dev); + int err; + + if (tg3_flag(tp, USE_PHYLIB)) { + struct phy_device *phydev; + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return -EAGAIN; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + return phy_mii_ioctl(phydev, ifr, cmd); + } + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = tp->phy_addr; + + /* fallthru */ + case SIOCGMIIREG: { + u32 mii_regval; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + break; /* We have no PHY */ + + if (!netif_running(dev)) + return -EAGAIN; + + spin_lock_bh(&tp->lock); + err = __tg3_readphy(tp, data->phy_id & 0x1f, + data->reg_num & 0x1f, &mii_regval); + spin_unlock_bh(&tp->lock); + + data->val_out = mii_regval; + + return err; + } + + case SIOCSMIIREG: + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + break; /* We have no PHY */ + + if (!netif_running(dev)) + return -EAGAIN; + + spin_lock_bh(&tp->lock); + err = __tg3_writephy(tp, data->phy_id & 0x1f, + data->reg_num & 0x1f, data->val_in); + spin_unlock_bh(&tp->lock); + + return err; + + case SIOCSHWTSTAMP: + return tg3_hwtstamp_set(dev, ifr); + + case SIOCGHWTSTAMP: + return tg3_hwtstamp_get(dev, ifr); + + default: + /* do nothing */ + break; + } + return -EOPNOTSUPP; +} + +static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct tg3 *tp = netdev_priv(dev); + + memcpy(ec, &tp->coal, sizeof(*ec)); + return 0; +} + +static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct tg3 *tp = netdev_priv(dev); + u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; + u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; + + if (!tg3_flag(tp, 5705_PLUS)) { + max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; + max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; + max_stat_coal_ticks = MAX_STAT_COAL_TICKS; + min_stat_coal_ticks = MIN_STAT_COAL_TICKS; + } + + if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || + (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || + (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || + (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || + (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || + (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || + (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || + (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || + (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || + (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) + return -EINVAL; + + /* No rx interrupts will be generated if both are zero */ + if ((ec->rx_coalesce_usecs == 0) && + (ec->rx_max_coalesced_frames == 0)) + return -EINVAL; + + /* No tx interrupts will be generated if both are zero */ + if ((ec->tx_coalesce_usecs == 0) && + (ec->tx_max_coalesced_frames == 0)) + return -EINVAL; + + /* Only copy relevant parameters, ignore all others. */ + tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; + tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; + tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; + tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; + tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; + tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; + tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; + tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; + tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; + + if (netif_running(dev)) { + tg3_full_lock(tp, 0); + __tg3_set_coalesce(tp, &tp->coal); + tg3_full_unlock(tp); + } + return 0; +} + +static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { + netdev_warn(tp->dev, "Board does not support EEE!\n"); + return -EOPNOTSUPP; + } + + if (edata->advertised != tp->eee.advertised) { + netdev_warn(tp->dev, + "Direct manipulation of EEE advertisement is not supported\n"); + return -EINVAL; + } + + if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) { + netdev_warn(tp->dev, + "Maximal Tx Lpi timer supported is %#x(u)\n", + TG3_CPMU_DBTMR1_LNKIDLE_MAX); + return -EINVAL; + } + + tp->eee = *edata; + + tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; + tg3_warn_mgmt_link_flap(tp); + + if (netif_running(tp->dev)) { + tg3_full_lock(tp, 0); + tg3_setup_eee(tp); + tg3_phy_reset(tp); + tg3_full_unlock(tp); + } + + return 0; +} + +static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { + netdev_warn(tp->dev, + "Board does not support EEE!\n"); + return -EOPNOTSUPP; + } + + *edata = tp->eee; + return 0; +} + +static const struct ethtool_ops tg3_ethtool_ops = { + .get_settings = tg3_get_settings, + .set_settings = tg3_set_settings, + .get_drvinfo = tg3_get_drvinfo, + .get_regs_len = tg3_get_regs_len, + .get_regs = tg3_get_regs, + .get_wol = tg3_get_wol, + .set_wol = tg3_set_wol, + .get_msglevel = tg3_get_msglevel, + .set_msglevel = tg3_set_msglevel, + .nway_reset = tg3_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = tg3_get_eeprom_len, + .get_eeprom = tg3_get_eeprom, + .set_eeprom = tg3_set_eeprom, + .get_ringparam = tg3_get_ringparam, + .set_ringparam = tg3_set_ringparam, + .get_pauseparam = tg3_get_pauseparam, + .set_pauseparam = tg3_set_pauseparam, + .self_test = tg3_self_test, + .get_strings = tg3_get_strings, + .set_phys_id = tg3_set_phys_id, + .get_ethtool_stats = tg3_get_ethtool_stats, + .get_coalesce = tg3_get_coalesce, + .set_coalesce = tg3_set_coalesce, + .get_sset_count = tg3_get_sset_count, + .get_rxnfc = tg3_get_rxnfc, + .get_rxfh_indir_size = tg3_get_rxfh_indir_size, + .get_rxfh = tg3_get_rxfh, + .set_rxfh = tg3_set_rxfh, + .get_channels = tg3_get_channels, + .set_channels = tg3_set_channels, + .get_ts_info = tg3_get_ts_info, + .get_eee = tg3_get_eee, + .set_eee = tg3_set_eee, +}; + +static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct tg3 *tp = netdev_priv(dev); + + spin_lock_bh(&tp->lock); + if (!tp->hw_stats) { + *stats = tp->net_stats_prev; + spin_unlock_bh(&tp->lock); + return stats; + } + + tg3_get_nstats(tp, stats); + spin_unlock_bh(&tp->lock); + + return stats; +} + +static void tg3_set_rx_mode(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!netif_running(dev)) + return; + + tg3_full_lock(tp, 0); + __tg3_set_rx_mode(dev); + tg3_full_unlock(tp); +} + +static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, + int new_mtu) +{ + dev->mtu = new_mtu; + + if (new_mtu > ETH_DATA_LEN) { + if (tg3_flag(tp, 5780_CLASS)) { + netdev_update_features(dev); + tg3_flag_clear(tp, TSO_CAPABLE); + } else { + tg3_flag_set(tp, JUMBO_RING_ENABLE); + } + } else { + if (tg3_flag(tp, 5780_CLASS)) { + tg3_flag_set(tp, TSO_CAPABLE); + netdev_update_features(dev); + } + tg3_flag_clear(tp, JUMBO_RING_ENABLE); + } +} + +static int tg3_change_mtu(struct net_device *dev, int new_mtu) +{ + struct tg3 *tp = netdev_priv(dev); + int err; + bool reset_phy = false; + + if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) + return -EINVAL; + + if (!netif_running(dev)) { + /* We'll just catch it later when the + * device is up'd. + */ + tg3_set_mtu(dev, tp, new_mtu); + return 0; + } + + tg3_phy_stop(tp); + + tg3_netif_stop(tp); + + tg3_set_mtu(dev, tp, new_mtu); + + tg3_full_lock(tp, 1); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + + /* Reset PHY, otherwise the read DMA engine will be in a mode that + * breaks all requests to 256 bytes. + */ + if (tg3_asic_rev(tp) == ASIC_REV_57766) + reset_phy = true; + + err = tg3_restart_hw(tp, reset_phy); + + if (!err) + tg3_netif_start(tp); + + tg3_full_unlock(tp); + + if (!err) + tg3_phy_start(tp); + + return err; +} + +static const struct net_device_ops tg3_netdev_ops = { + .ndo_open = tg3_open, + .ndo_stop = tg3_close, + .ndo_start_xmit = tg3_start_xmit, + .ndo_get_stats64 = tg3_get_stats64, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = tg3_set_rx_mode, + .ndo_set_mac_address = tg3_set_mac_addr, + .ndo_do_ioctl = tg3_ioctl, + .ndo_tx_timeout = tg3_tx_timeout, + .ndo_change_mtu = tg3_change_mtu, + .ndo_fix_features = tg3_fix_features, + .ndo_set_features = tg3_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = tg3_poll_controller, +#endif +}; + +static void tg3_get_eeprom_size(struct tg3 *tp) +{ + u32 cursize, val, magic; + + tp->nvram_size = EEPROM_CHIP_SIZE; + + if (tg3_nvram_read(tp, 0, &magic) != 0) + return; + + if ((magic != TG3_EEPROM_MAGIC) && + ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && + ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) + return; + + /* + * Size the chip by reading offsets at increasing powers of two. + * When we encounter our validation signature, we know the addressing + * has wrapped around, and thus have our chip size. + */ + cursize = 0x10; + + while (cursize < tp->nvram_size) { + if (tg3_nvram_read(tp, cursize, &val) != 0) + return; + + if (val == magic) + break; + + cursize <<= 1; + } + + tp->nvram_size = cursize; +} + +static void tg3_get_nvram_size(struct tg3 *tp) +{ + u32 val; + + if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) + return; + + /* Selfboot format */ + if (val != TG3_EEPROM_MAGIC) { + tg3_get_eeprom_size(tp); + return; + } + + if (tg3_nvram_read(tp, 0xf0, &val) == 0) { + if (val != 0) { + /* This is confusing. We want to operate on the + * 16-bit value at offset 0xf2. The tg3_nvram_read() + * call will read from NVRAM and byteswap the data + * according to the byteswapping settings for all + * other register accesses. This ensures the data we + * want will always reside in the lower 16-bits. + * However, the data in NVRAM is in LE format, which + * means the data from the NVRAM read will always be + * opposite the endianness of the CPU. The 16-bit + * byteswap then brings the data to CPU endianness. + */ + tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; + return; + } + } + tp->nvram_size = TG3_NVRAM_SIZE_512KB; +} + +static void tg3_get_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { + tg3_flag_set(tp, FLASH); + } else { + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + } + + if (tg3_asic_rev(tp) == ASIC_REV_5750 || + tg3_flag(tp, 5780_CLASS)) { + switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { + case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + break; + case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; + break; + case FLASH_VENDOR_ATMEL_EEPROM: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + break; + case FLASH_VENDOR_ST: + tp->nvram_jedecnum = JEDEC_ST; + tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + break; + case FLASH_VENDOR_SAIFUN: + tp->nvram_jedecnum = JEDEC_SAIFUN; + tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; + break; + case FLASH_VENDOR_SST_SMALL: + case FLASH_VENDOR_SST_LARGE: + tp->nvram_jedecnum = JEDEC_SST; + tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; + break; + } + } else { + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + } +} + +static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) +{ + switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { + case FLASH_5752PAGE_SIZE_256: + tp->nvram_pagesize = 256; + break; + case FLASH_5752PAGE_SIZE_512: + tp->nvram_pagesize = 512; + break; + case FLASH_5752PAGE_SIZE_1K: + tp->nvram_pagesize = 1024; + break; + case FLASH_5752PAGE_SIZE_2K: + tp->nvram_pagesize = 2048; + break; + case FLASH_5752PAGE_SIZE_4K: + tp->nvram_pagesize = 4096; + break; + case FLASH_5752PAGE_SIZE_264: + tp->nvram_pagesize = 264; + break; + case FLASH_5752PAGE_SIZE_528: + tp->nvram_pagesize = 528; + break; + } +} + +static void tg3_get_5752_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) + tg3_flag_set(tp, PROTECTED_NVRAM); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: + case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + break; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + break; + } + + if (tg3_flag(tp, FLASH)) { + tg3_nvram_get_pagesize(tp, nvcfg1); + } else { + /* For eeprom, set pagesize to maximum eeprom size */ + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + } +} + +static void tg3_get_5755_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1, protect = 0; + + nvcfg1 = tr32(NVRAM_CFG1); + + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) { + tg3_flag_set(tp, PROTECTED_NVRAM); + protect = 1; + } + + nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; + switch (nvcfg1) { + case FLASH_5755VENDOR_ATMEL_FLASH_1: + case FLASH_5755VENDOR_ATMEL_FLASH_2: + case FLASH_5755VENDOR_ATMEL_FLASH_3: + case FLASH_5755VENDOR_ATMEL_FLASH_5: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 264; + if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || + nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) + tp->nvram_size = (protect ? 0x3e200 : + TG3_NVRAM_SIZE_512KB); + else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) + tp->nvram_size = (protect ? 0x1f200 : + TG3_NVRAM_SIZE_256KB); + else + tp->nvram_size = (protect ? 0x1f200 : + TG3_NVRAM_SIZE_128KB); + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 256; + if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_64KB : + TG3_NVRAM_SIZE_128KB); + else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_64KB : + TG3_NVRAM_SIZE_256KB); + else + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_128KB : + TG3_NVRAM_SIZE_512KB); + break; + } +} + +static void tg3_get_5787_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: + case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + break; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_5755VENDOR_ATMEL_FLASH_1: + case FLASH_5755VENDOR_ATMEL_FLASH_2: + case FLASH_5755VENDOR_ATMEL_FLASH_3: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 264; + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 256; + break; + } +} + +static void tg3_get_5761_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1, protect = 0; + + nvcfg1 = tr32(NVRAM_CFG1); + + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) { + tg3_flag_set(tp, PROTECTED_NVRAM); + protect = 1; + } + + nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; + switch (nvcfg1) { + case FLASH_5761VENDOR_ATMEL_ADB021D: + case FLASH_5761VENDOR_ATMEL_ADB041D: + case FLASH_5761VENDOR_ATMEL_ADB081D: + case FLASH_5761VENDOR_ATMEL_ADB161D: + case FLASH_5761VENDOR_ATMEL_MDB021D: + case FLASH_5761VENDOR_ATMEL_MDB041D: + case FLASH_5761VENDOR_ATMEL_MDB081D: + case FLASH_5761VENDOR_ATMEL_MDB161D: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); + tp->nvram_pagesize = 256; + break; + case FLASH_5761VENDOR_ST_A_M45PE20: + case FLASH_5761VENDOR_ST_A_M45PE40: + case FLASH_5761VENDOR_ST_A_M45PE80: + case FLASH_5761VENDOR_ST_A_M45PE16: + case FLASH_5761VENDOR_ST_M_M45PE20: + case FLASH_5761VENDOR_ST_M_M45PE40: + case FLASH_5761VENDOR_ST_M_M45PE80: + case FLASH_5761VENDOR_ST_M_M45PE16: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 256; + break; + } + + if (protect) { + tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); + } else { + switch (nvcfg1) { + case FLASH_5761VENDOR_ATMEL_ADB161D: + case FLASH_5761VENDOR_ATMEL_MDB161D: + case FLASH_5761VENDOR_ST_A_M45PE16: + case FLASH_5761VENDOR_ST_M_M45PE16: + tp->nvram_size = TG3_NVRAM_SIZE_2MB; + break; + case FLASH_5761VENDOR_ATMEL_ADB081D: + case FLASH_5761VENDOR_ATMEL_MDB081D: + case FLASH_5761VENDOR_ST_A_M45PE80: + case FLASH_5761VENDOR_ST_M_M45PE80: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + case FLASH_5761VENDOR_ATMEL_ADB041D: + case FLASH_5761VENDOR_ATMEL_MDB041D: + case FLASH_5761VENDOR_ST_A_M45PE40: + case FLASH_5761VENDOR_ST_M_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5761VENDOR_ATMEL_ADB021D: + case FLASH_5761VENDOR_ATMEL_MDB021D: + case FLASH_5761VENDOR_ST_A_M45PE20: + case FLASH_5761VENDOR_ST_M_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + } + } +} + +static void tg3_get_5906_nvram_info(struct tg3 *tp) +{ + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; +} + +static void tg3_get_57780_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + return; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_57780VENDOR_ATMEL_AT45DB011D: + case FLASH_57780VENDOR_ATMEL_AT45DB011B: + case FLASH_57780VENDOR_ATMEL_AT45DB021D: + case FLASH_57780VENDOR_ATMEL_AT45DB021B: + case FLASH_57780VENDOR_ATMEL_AT45DB041D: + case FLASH_57780VENDOR_ATMEL_AT45DB041B: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_57780VENDOR_ATMEL_AT45DB011D: + case FLASH_57780VENDOR_ATMEL_AT45DB011B: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + case FLASH_57780VENDOR_ATMEL_AT45DB021D: + case FLASH_57780VENDOR_ATMEL_AT45DB021B: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_57780VENDOR_ATMEL_AT45DB041D: + case FLASH_57780VENDOR_ATMEL_AT45DB041B: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + } + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5752VENDOR_ST_M45PE10: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + case FLASH_5752VENDOR_ST_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + } + break; + default: + tg3_flag_set(tp, NO_NVRAM); + return; + } + + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); +} + + +static void tg3_get_5717_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ATMEL_EEPROM: + case FLASH_5717VENDOR_MICRO_EEPROM: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + return; + case FLASH_5717VENDOR_ATMEL_MDB011D: + case FLASH_5717VENDOR_ATMEL_ADB011B: + case FLASH_5717VENDOR_ATMEL_ADB011D: + case FLASH_5717VENDOR_ATMEL_MDB021D: + case FLASH_5717VENDOR_ATMEL_ADB021B: + case FLASH_5717VENDOR_ATMEL_ADB021D: + case FLASH_5717VENDOR_ATMEL_45USPT: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ATMEL_MDB021D: + /* Detect size with tg3_nvram_get_size() */ + break; + case FLASH_5717VENDOR_ATMEL_ADB021B: + case FLASH_5717VENDOR_ATMEL_ADB021D: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + case FLASH_5717VENDOR_ST_M_M25PE10: + case FLASH_5717VENDOR_ST_A_M25PE10: + case FLASH_5717VENDOR_ST_M_M45PE10: + case FLASH_5717VENDOR_ST_A_M45PE10: + case FLASH_5717VENDOR_ST_M_M25PE20: + case FLASH_5717VENDOR_ST_A_M25PE20: + case FLASH_5717VENDOR_ST_M_M45PE20: + case FLASH_5717VENDOR_ST_A_M45PE20: + case FLASH_5717VENDOR_ST_25USPT: + case FLASH_5717VENDOR_ST_45USPT: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ST_M_M25PE20: + case FLASH_5717VENDOR_ST_M_M45PE20: + /* Detect size with tg3_nvram_get_size() */ + break; + case FLASH_5717VENDOR_ST_A_M25PE20: + case FLASH_5717VENDOR_ST_A_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + default: + tg3_flag_set(tp, NO_NVRAM); + return; + } + + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); +} + +static void tg3_get_5720_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1, nvmpinstrp; + + nvcfg1 = tr32(NVRAM_CFG1); + nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; + + if (tg3_asic_rev(tp) == ASIC_REV_5762) { + if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) { + tg3_flag_set(tp, NO_NVRAM); + return; + } + + switch (nvmpinstrp) { + case FLASH_5762_EEPROM_HD: + nvmpinstrp = FLASH_5720_EEPROM_HD; + break; + case FLASH_5762_EEPROM_LD: + nvmpinstrp = FLASH_5720_EEPROM_LD; + break; + case FLASH_5720VENDOR_M_ST_M45PE20: + /* This pinstrap supports multiple sizes, so force it + * to read the actual size from location 0xf0. + */ + nvmpinstrp = FLASH_5720VENDOR_ST_45USPT; + break; + } + } + + switch (nvmpinstrp) { + case FLASH_5720_EEPROM_HD: + case FLASH_5720_EEPROM_LD: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + if (nvmpinstrp == FLASH_5720_EEPROM_HD) + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + else + tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; + return; + case FLASH_5720VENDOR_M_ATMEL_DB011D: + case FLASH_5720VENDOR_A_ATMEL_DB011B: + case FLASH_5720VENDOR_A_ATMEL_DB011D: + case FLASH_5720VENDOR_M_ATMEL_DB021D: + case FLASH_5720VENDOR_A_ATMEL_DB021B: + case FLASH_5720VENDOR_A_ATMEL_DB021D: + case FLASH_5720VENDOR_M_ATMEL_DB041D: + case FLASH_5720VENDOR_A_ATMEL_DB041B: + case FLASH_5720VENDOR_A_ATMEL_DB041D: + case FLASH_5720VENDOR_M_ATMEL_DB081D: + case FLASH_5720VENDOR_A_ATMEL_DB081D: + case FLASH_5720VENDOR_ATMEL_45USPT: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvmpinstrp) { + case FLASH_5720VENDOR_M_ATMEL_DB021D: + case FLASH_5720VENDOR_A_ATMEL_DB021B: + case FLASH_5720VENDOR_A_ATMEL_DB021D: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_5720VENDOR_M_ATMEL_DB041D: + case FLASH_5720VENDOR_A_ATMEL_DB041B: + case FLASH_5720VENDOR_A_ATMEL_DB041D: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5720VENDOR_M_ATMEL_DB081D: + case FLASH_5720VENDOR_A_ATMEL_DB081D: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + default: + if (tg3_asic_rev(tp) != ASIC_REV_5762) + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + case FLASH_5720VENDOR_M_ST_M25PE10: + case FLASH_5720VENDOR_M_ST_M45PE10: + case FLASH_5720VENDOR_A_ST_M25PE10: + case FLASH_5720VENDOR_A_ST_M45PE10: + case FLASH_5720VENDOR_M_ST_M25PE20: + case FLASH_5720VENDOR_M_ST_M45PE20: + case FLASH_5720VENDOR_A_ST_M25PE20: + case FLASH_5720VENDOR_A_ST_M45PE20: + case FLASH_5720VENDOR_M_ST_M25PE40: + case FLASH_5720VENDOR_M_ST_M45PE40: + case FLASH_5720VENDOR_A_ST_M25PE40: + case FLASH_5720VENDOR_A_ST_M45PE40: + case FLASH_5720VENDOR_M_ST_M25PE80: + case FLASH_5720VENDOR_M_ST_M45PE80: + case FLASH_5720VENDOR_A_ST_M25PE80: + case FLASH_5720VENDOR_A_ST_M45PE80: + case FLASH_5720VENDOR_ST_25USPT: + case FLASH_5720VENDOR_ST_45USPT: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvmpinstrp) { + case FLASH_5720VENDOR_M_ST_M25PE20: + case FLASH_5720VENDOR_M_ST_M45PE20: + case FLASH_5720VENDOR_A_ST_M25PE20: + case FLASH_5720VENDOR_A_ST_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_5720VENDOR_M_ST_M25PE40: + case FLASH_5720VENDOR_M_ST_M45PE40: + case FLASH_5720VENDOR_A_ST_M25PE40: + case FLASH_5720VENDOR_A_ST_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5720VENDOR_M_ST_M25PE80: + case FLASH_5720VENDOR_M_ST_M45PE80: + case FLASH_5720VENDOR_A_ST_M25PE80: + case FLASH_5720VENDOR_A_ST_M45PE80: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + default: + if (tg3_asic_rev(tp) != ASIC_REV_5762) + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + default: + tg3_flag_set(tp, NO_NVRAM); + return; + } + + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); + + if (tg3_asic_rev(tp) == ASIC_REV_5762) { + u32 val; + + if (tg3_nvram_read(tp, 0, &val)) + return; + + if (val != TG3_EEPROM_MAGIC && + (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) + tg3_flag_set(tp, NO_NVRAM); + } +} + +/* Chips other than 5700/5701 use the NVRAM for fetching info. */ +static void tg3_nvram_init(struct tg3 *tp) +{ + if (tg3_flag(tp, IS_SSB_CORE)) { + /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */ + tg3_flag_clear(tp, NVRAM); + tg3_flag_clear(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, NO_NVRAM); + return; + } + + tw32_f(GRC_EEPROM_ADDR, + (EEPROM_ADDR_FSM_RESET | + (EEPROM_DEFAULT_CLOCK_PERIOD << + EEPROM_ADDR_CLKPERD_SHIFT))); + + msleep(1); + + /* Enable seeprom accesses. */ + tw32_f(GRC_LOCAL_CTRL, + tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); + udelay(100); + + if (tg3_asic_rev(tp) != ASIC_REV_5700 && + tg3_asic_rev(tp) != ASIC_REV_5701) { + tg3_flag_set(tp, NVRAM); + + if (tg3_nvram_lock(tp)) { + netdev_warn(tp->dev, + "Cannot get nvram lock, %s failed\n", + __func__); + return; + } + tg3_enable_nvram_access(tp); + + tp->nvram_size = 0; + + if (tg3_asic_rev(tp) == ASIC_REV_5752) + tg3_get_5752_nvram_info(tp); + else if (tg3_asic_rev(tp) == ASIC_REV_5755) + tg3_get_5755_nvram_info(tp); + else if (tg3_asic_rev(tp) == ASIC_REV_5787 || + tg3_asic_rev(tp) == ASIC_REV_5784 || + tg3_asic_rev(tp) == ASIC_REV_5785) + tg3_get_5787_nvram_info(tp); + else if (tg3_asic_rev(tp) == ASIC_REV_5761) + tg3_get_5761_nvram_info(tp); + else if (tg3_asic_rev(tp) == ASIC_REV_5906) + tg3_get_5906_nvram_info(tp); + else if (tg3_asic_rev(tp) == ASIC_REV_57780 || + tg3_flag(tp, 57765_CLASS)) + tg3_get_57780_nvram_info(tp); + else if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719) + tg3_get_5717_nvram_info(tp); + else if (tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_5762) + tg3_get_5720_nvram_info(tp); + else + tg3_get_nvram_info(tp); + + if (tp->nvram_size == 0) + tg3_get_nvram_size(tp); + + tg3_disable_nvram_access(tp); + tg3_nvram_unlock(tp); + + } else { + tg3_flag_clear(tp, NVRAM); + tg3_flag_clear(tp, NVRAM_BUFFERED); + + tg3_get_eeprom_size(tp); + } +} + +struct subsys_tbl_ent { + u16 subsys_vendor, subsys_devid; + u32 phy_id; +}; + +static struct subsys_tbl_ent subsys_id_to_phy_id[] = { + /* Broadcom boards. */ + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, + + /* 3com boards. */ + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, + + /* DELL boards. */ + { TG3PCI_SUBVENDOR_ID_DELL, + TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, + { TG3PCI_SUBVENDOR_ID_DELL, + TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, + { TG3PCI_SUBVENDOR_ID_DELL, + TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, + { TG3PCI_SUBVENDOR_ID_DELL, + TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, + + /* Compaq boards. */ + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, + + /* IBM boards. */ + { TG3PCI_SUBVENDOR_ID_IBM, + TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } +}; + +static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { + if ((subsys_id_to_phy_id[i].subsys_vendor == + tp->pdev->subsystem_vendor) && + (subsys_id_to_phy_id[i].subsys_devid == + tp->pdev->subsystem_device)) + return &subsys_id_to_phy_id[i]; + } + return NULL; +} + +static void tg3_get_eeprom_hw_cfg(struct tg3 *tp) +{ + u32 val; + + tp->phy_id = TG3_PHY_ID_INVALID; + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + + /* Assume an onboard device and WOL capable by default. */ + tg3_flag_set(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, WOL_CAP); + + if (tg3_asic_rev(tp) == ASIC_REV_5906) { + if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { + tg3_flag_clear(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, IS_NIC); + } + val = tr32(VCPU_CFGSHDW); + if (val & VCPU_CFGSHDW_ASPM_DBNC) + tg3_flag_set(tp, ASPM_WORKAROUND); + if ((val & VCPU_CFGSHDW_WOL_ENABLE) && + (val & VCPU_CFGSHDW_WOL_MAGPKT)) { + tg3_flag_set(tp, WOL_ENABLE); + device_set_wakeup_enable(&tp->pdev->dev, true); + } + goto done; + } + + tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); + if (val == NIC_SRAM_DATA_SIG_MAGIC) { + u32 nic_cfg, led_cfg; + u32 cfg2 = 0, cfg4 = 0, cfg5 = 0; + u32 nic_phy_id, ver, eeprom_phy_id; + int eeprom_phy_serdes = 0; + + tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); + tp->nic_sram_data_cfg = nic_cfg; + + tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); + ver >>= NIC_SRAM_DATA_VER_SHIFT; + if (tg3_asic_rev(tp) != ASIC_REV_5700 && + tg3_asic_rev(tp) != ASIC_REV_5701 && + tg3_asic_rev(tp) != ASIC_REV_5703 && + (ver > 0) && (ver < 0x100)) + tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); + + if (tg3_asic_rev(tp) == ASIC_REV_5785) + tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); + + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) + tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5); + + if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == + NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) + eeprom_phy_serdes = 1; + + tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); + if (nic_phy_id != 0) { + u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; + u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; + + eeprom_phy_id = (id1 >> 16) << 10; + eeprom_phy_id |= (id2 & 0xfc00) << 16; + eeprom_phy_id |= (id2 & 0x03ff) << 0; + } else + eeprom_phy_id = 0; + + tp->phy_id = eeprom_phy_id; + if (eeprom_phy_serdes) { + if (!tg3_flag(tp, 5705_PLUS)) + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; + else + tp->phy_flags |= TG3_PHYFLG_MII_SERDES; + } + + if (tg3_flag(tp, 5750_PLUS)) + led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | + SHASTA_EXT_LED_MODE_MASK); + else + led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; + + switch (led_cfg) { + default: + case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + break; + + case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: + tp->led_ctrl = LED_CTRL_MODE_PHY_2; + break; + + case NIC_SRAM_DATA_CFG_LED_MODE_MAC: + tp->led_ctrl = LED_CTRL_MODE_MAC; + + /* Default to PHY_1_MODE if 0 (MAC_MODE) is + * read on some older 5700/5701 bootcode. + */ + if (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701) + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + + break; + + case SHASTA_EXT_LED_SHARED: + tp->led_ctrl = LED_CTRL_MODE_SHARED; + if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 && + tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1) + tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | + LED_CTRL_MODE_PHY_2); + + if (tg3_flag(tp, 5717_PLUS) || + tg3_asic_rev(tp) == ASIC_REV_5762) + tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE | + LED_CTRL_BLINK_RATE_MASK; + + break; + + case SHASTA_EXT_LED_MAC: + tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; + break; + + case SHASTA_EXT_LED_COMBO: + tp->led_ctrl = LED_CTRL_MODE_COMBO; + if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) + tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | + LED_CTRL_MODE_PHY_2); + break; + + } + + if ((tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701) && + tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) + tp->led_ctrl = LED_CTRL_MODE_PHY_2; + + if (tg3_chip_rev(tp) == CHIPREV_5784_AX) + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + + if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { + tg3_flag_set(tp, EEPROM_WRITE_PROT); + if ((tp->pdev->subsystem_vendor == + PCI_VENDOR_ID_ARIMA) && + (tp->pdev->subsystem_device == 0x205a || + tp->pdev->subsystem_device == 0x2063)) + tg3_flag_clear(tp, EEPROM_WRITE_PROT); + } else { + tg3_flag_clear(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, IS_NIC); + } + + if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { + tg3_flag_set(tp, ENABLE_ASF); + if (tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ASF_NEW_HANDSHAKE); + } + + if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && + tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ENABLE_APE); + + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && + !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) + tg3_flag_clear(tp, WOL_CAP); + + if (tg3_flag(tp, WOL_CAP) && + (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { + tg3_flag_set(tp, WOL_ENABLE); + device_set_wakeup_enable(&tp->pdev->dev, true); + } + + if (cfg2 & (1 << 17)) + tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; + + /* serdes signal pre-emphasis in register 0x590 set by */ + /* bootcode if bit 18 is set */ + if (cfg2 & (1 << 18)) + tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; + + if ((tg3_flag(tp, 57765_PLUS) || + (tg3_asic_rev(tp) == ASIC_REV_5784 && + tg3_chip_rev(tp) != CHIPREV_5784_AX)) && + (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) + tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; + + if (tg3_flag(tp, PCI_EXPRESS)) { + u32 cfg3; + + tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); + if (tg3_asic_rev(tp) != ASIC_REV_5785 && + !tg3_flag(tp, 57765_PLUS) && + (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)) + tg3_flag_set(tp, ASPM_WORKAROUND); + if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID) + tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN; + if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK) + tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK; + } + + if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) + tg3_flag_set(tp, RGMII_INBAND_DISABLE); + if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) + tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); + if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) + tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); + + if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV) + tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV; + } +done: + if (tg3_flag(tp, WOL_CAP)) + device_set_wakeup_enable(&tp->pdev->dev, + tg3_flag(tp, WOL_ENABLE)); + else + device_set_wakeup_capable(&tp->pdev->dev, false); +} + +static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val) +{ + int i, err; + u32 val2, off = offset * 8; + + err = tg3_nvram_lock(tp); + if (err) + return err; + + tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE); + tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN | + APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START); + tg3_ape_read32(tp, TG3_APE_OTP_CTRL); + udelay(10); + + for (i = 0; i < 100; i++) { + val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS); + if (val2 & APE_OTP_STATUS_CMD_DONE) { + *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA); + break; + } + udelay(10); + } + + tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0); + + tg3_nvram_unlock(tp); + if (val2 & APE_OTP_STATUS_CMD_DONE) + return 0; + + return -EBUSY; +} + +static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd) +{ + int i; + u32 val; + + tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); + tw32(OTP_CTRL, cmd); + + /* Wait for up to 1 ms for command to execute. */ + for (i = 0; i < 100; i++) { + val = tr32(OTP_STATUS); + if (val & OTP_STATUS_CMD_DONE) + break; + udelay(10); + } + + return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; +} + +/* Read the gphy configuration from the OTP region of the chip. The gphy + * configuration is a 32-bit value that straddles the alignment boundary. + * We do two 32-bit reads and then shift and merge the results. + */ +static u32 tg3_read_otp_phycfg(struct tg3 *tp) +{ + u32 bhalf_otp, thalf_otp; + + tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); + + if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) + return 0; + + tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); + + if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) + return 0; + + thalf_otp = tr32(OTP_READ_DATA); + + tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); + + if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) + return 0; + + bhalf_otp = tr32(OTP_READ_DATA); + + return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); +} + +static void tg3_phy_init_link_config(struct tg3 *tp) +{ + u32 adv = ADVERTISED_Autoneg; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV)) + adv |= ADVERTISED_1000baseT_Half; + adv |= ADVERTISED_1000baseT_Full; + } + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + adv |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_TP; + else + adv |= ADVERTISED_FIBRE; + + tp->link_config.advertising = adv; + tp->link_config.speed = SPEED_UNKNOWN; + tp->link_config.duplex = DUPLEX_UNKNOWN; + tp->link_config.autoneg = AUTONEG_ENABLE; + tp->link_config.active_speed = SPEED_UNKNOWN; + tp->link_config.active_duplex = DUPLEX_UNKNOWN; + + tp->old_link = -1; +} + +static int tg3_phy_probe(struct tg3 *tp) +{ + u32 hw_phy_id_1, hw_phy_id_2; + u32 hw_phy_id, hw_phy_id_masked; + int err; + + /* flow control autonegotiation is default behavior */ + tg3_flag_set(tp, PAUSE_AUTONEG); + tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; + + if (tg3_flag(tp, ENABLE_APE)) { + switch (tp->pci_fn) { + case 0: + tp->phy_ape_lock = TG3_APE_LOCK_PHY0; + break; + case 1: + tp->phy_ape_lock = TG3_APE_LOCK_PHY1; + break; + case 2: + tp->phy_ape_lock = TG3_APE_LOCK_PHY2; + break; + case 3: + tp->phy_ape_lock = TG3_APE_LOCK_PHY3; + break; + } + } + + if (!tg3_flag(tp, ENABLE_ASF) && + !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && + !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) + tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | + TG3_PHYFLG_KEEP_LINK_ON_PWRDN); + + if (tg3_flag(tp, USE_PHYLIB)) + return tg3_phy_init(tp); + + /* Reading the PHY ID register can conflict with ASF + * firmware access to the PHY hardware. + */ + err = 0; + if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { + hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; + } else { + /* Now read the physical PHY_ID from the chip and verify + * that it is sane. If it doesn't look good, we fall back + * to either the hard-coded table based PHY_ID and failing + * that the value found in the eeprom area. + */ + err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); + err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); + + hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; + hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; + hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; + + hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; + } + + if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { + tp->phy_id = hw_phy_id; + if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; + else + tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; + } else { + if (tp->phy_id != TG3_PHY_ID_INVALID) { + /* Do nothing, phy ID already set up in + * tg3_get_eeprom_hw_cfg(). + */ + } else { + struct subsys_tbl_ent *p; + + /* No eeprom signature? Try the hardcoded + * subsys device table. + */ + p = tg3_lookup_by_subsys(tp); + if (p) { + tp->phy_id = p->phy_id; + } else if (!tg3_flag(tp, IS_SSB_CORE)) { + /* For now we saw the IDs 0xbc050cd0, + * 0xbc050f80 and 0xbc050c30 on devices + * connected to an BCM4785 and there are + * probably more. Just assume that the phy is + * supported when it is connected to a SSB core + * for now. + */ + return -ENODEV; + } + + if (!tp->phy_id || + tp->phy_id == TG3_PHY_ID_BCM8002) + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; + } + } + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && + (tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_57766 || + tg3_asic_rev(tp) == ASIC_REV_5762 || + (tg3_asic_rev(tp) == ASIC_REV_5717 && + tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || + (tg3_asic_rev(tp) == ASIC_REV_57765 && + tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) { + tp->phy_flags |= TG3_PHYFLG_EEE_CAP; + + tp->eee.supported = SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full; + tp->eee.advertised = ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full; + tp->eee.eee_enabled = 1; + tp->eee.tx_lpi_enabled = 1; + tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US; + } + + tg3_phy_init_link_config(tp); + + if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && + !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && + !tg3_flag(tp, ENABLE_APE) && + !tg3_flag(tp, ENABLE_ASF)) { + u32 bmsr, dummy; + + tg3_readphy(tp, MII_BMSR, &bmsr); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + (bmsr & BMSR_LSTATUS)) + goto skip_phy_reset; + + err = tg3_phy_reset(tp); + if (err) + return err; + + tg3_phy_set_wirespeed(tp); + + if (!tg3_phy_copper_an_config_ok(tp, &dummy)) { + tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, + tp->link_config.flowctrl); + + tg3_writephy(tp, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART); + } + } + +skip_phy_reset: + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + err = tg3_init_5401phy_dsp(tp); + if (err) + return err; + + err = tg3_init_5401phy_dsp(tp); + } + + return err; +} + +static void tg3_read_vpd(struct tg3 *tp) +{ + u8 *vpd_data; + unsigned int block_end, rosize, len; + u32 vpdlen; + int j, i = 0; + + vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); + if (!vpd_data) + goto out_no_vpd; + + i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA); + if (i < 0) + goto out_not_found; + + rosize = pci_vpd_lrdt_size(&vpd_data[i]); + block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize; + i += PCI_VPD_LRDT_TAG_SIZE; + + if (block_end > vpdlen) + goto out_not_found; + + j = pci_vpd_find_info_keyword(vpd_data, i, rosize, + PCI_VPD_RO_KEYWORD_MFR_ID); + if (j > 0) { + len = pci_vpd_info_field_size(&vpd_data[j]); + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + if (j + len > block_end || len != 4 || + memcmp(&vpd_data[j], "1028", 4)) + goto partno; + + j = pci_vpd_find_info_keyword(vpd_data, i, rosize, + PCI_VPD_RO_KEYWORD_VENDOR0); + if (j < 0) + goto partno; + + len = pci_vpd_info_field_size(&vpd_data[j]); + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + if (j + len > block_end) + goto partno; + + if (len >= sizeof(tp->fw_ver)) + len = sizeof(tp->fw_ver) - 1; + memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); + snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, + &vpd_data[j]); + } + +partno: + i = pci_vpd_find_info_keyword(vpd_data, i, rosize, + PCI_VPD_RO_KEYWORD_PARTNO); + if (i < 0) + goto out_not_found; + + len = pci_vpd_info_field_size(&vpd_data[i]); + + i += PCI_VPD_INFO_FLD_HDR_SIZE; + if (len > TG3_BPN_SIZE || + (len + i) > vpdlen) + goto out_not_found; + + memcpy(tp->board_part_number, &vpd_data[i], len); + +out_not_found: + kfree(vpd_data); + if (tp->board_part_number[0]) + return; + +out_no_vpd: + if (tg3_asic_rev(tp) == ASIC_REV_5717) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C) + strcpy(tp->board_part_number, "BCM5717"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) + strcpy(tp->board_part_number, "BCM5718"); + else + goto nomatch; + } else if (tg3_asic_rev(tp) == ASIC_REV_57780) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) + strcpy(tp->board_part_number, "BCM57780"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) + strcpy(tp->board_part_number, "BCM57760"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) + strcpy(tp->board_part_number, "BCM57790"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) + strcpy(tp->board_part_number, "BCM57788"); + else + goto nomatch; + } else if (tg3_asic_rev(tp) == ASIC_REV_57765) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) + strcpy(tp->board_part_number, "BCM57761"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) + strcpy(tp->board_part_number, "BCM57765"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) + strcpy(tp->board_part_number, "BCM57781"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) + strcpy(tp->board_part_number, "BCM57785"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) + strcpy(tp->board_part_number, "BCM57791"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) + strcpy(tp->board_part_number, "BCM57795"); + else + goto nomatch; + } else if (tg3_asic_rev(tp) == ASIC_REV_57766) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762) + strcpy(tp->board_part_number, "BCM57762"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766) + strcpy(tp->board_part_number, "BCM57766"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782) + strcpy(tp->board_part_number, "BCM57782"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) + strcpy(tp->board_part_number, "BCM57786"); + else + goto nomatch; + } else if (tg3_asic_rev(tp) == ASIC_REV_5906) { + strcpy(tp->board_part_number, "BCM95906"); + } else { +nomatch: + strcpy(tp->board_part_number, "none"); + } +} + +static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) +{ + u32 val; + + if (tg3_nvram_read(tp, offset, &val) || + (val & 0xfc000000) != 0x0c000000 || + tg3_nvram_read(tp, offset + 4, &val) || + val != 0) + return 0; + + return 1; +} + +static void tg3_read_bc_ver(struct tg3 *tp) +{ + u32 val, offset, start, ver_offset; + int i, dst_off; + bool newver = false; + + if (tg3_nvram_read(tp, 0xc, &offset) || + tg3_nvram_read(tp, 0x4, &start)) + return; + + offset = tg3_nvram_logical_addr(tp, offset); + + if (tg3_nvram_read(tp, offset, &val)) + return; + + if ((val & 0xfc000000) == 0x0c000000) { + if (tg3_nvram_read(tp, offset + 4, &val)) + return; + + if (val == 0) + newver = true; + } + + dst_off = strlen(tp->fw_ver); + + if (newver) { + if (TG3_VER_SIZE - dst_off < 16 || + tg3_nvram_read(tp, offset + 8, &ver_offset)) + return; + + offset = offset + ver_offset - start; + for (i = 0; i < 16; i += 4) { + __be32 v; + if (tg3_nvram_read_be32(tp, offset + i, &v)) + return; + + memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); + } + } else { + u32 major, minor; + + if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) + return; + + major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> + TG3_NVM_BCVER_MAJSFT; + minor = ver_offset & TG3_NVM_BCVER_MINMSK; + snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, + "v%d.%02d", major, minor); + } +} + +static void tg3_read_hwsb_ver(struct tg3 *tp) +{ + u32 val, major, minor; + + /* Use native endian representation */ + if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) + return; + + major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> + TG3_NVM_HWSB_CFG1_MAJSFT; + minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> + TG3_NVM_HWSB_CFG1_MINSFT; + + snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); +} + +static void tg3_read_sb_ver(struct tg3 *tp, u32 val) +{ + u32 offset, major, minor, build; + + strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); + + if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) + return; + + switch (val & TG3_EEPROM_SB_REVISION_MASK) { + case TG3_EEPROM_SB_REVISION_0: + offset = TG3_EEPROM_SB_F1R0_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_2: + offset = TG3_EEPROM_SB_F1R2_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_3: + offset = TG3_EEPROM_SB_F1R3_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_4: + offset = TG3_EEPROM_SB_F1R4_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_5: + offset = TG3_EEPROM_SB_F1R5_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_6: + offset = TG3_EEPROM_SB_F1R6_EDH_OFF; + break; + default: + return; + } + + if (tg3_nvram_read(tp, offset, &val)) + return; + + build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> + TG3_EEPROM_SB_EDH_BLD_SHFT; + major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> + TG3_EEPROM_SB_EDH_MAJ_SHFT; + minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; + + if (minor > 99 || build > 26) + return; + + offset = strlen(tp->fw_ver); + snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, + " v%d.%02d", major, minor); + + if (build > 0) { + offset = strlen(tp->fw_ver); + if (offset < TG3_VER_SIZE - 1) + tp->fw_ver[offset] = 'a' + build - 1; + } +} + +static void tg3_read_mgmtfw_ver(struct tg3 *tp) +{ + u32 val, offset, start; + int i, vlen; + + for (offset = TG3_NVM_DIR_START; + offset < TG3_NVM_DIR_END; + offset += TG3_NVM_DIRENT_SIZE) { + if (tg3_nvram_read(tp, offset, &val)) + return; + + if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) + break; + } + + if (offset == TG3_NVM_DIR_END) + return; + + if (!tg3_flag(tp, 5705_PLUS)) + start = 0x08000000; + else if (tg3_nvram_read(tp, offset - 4, &start)) + return; + + if (tg3_nvram_read(tp, offset + 4, &offset) || + !tg3_fw_img_is_valid(tp, offset) || + tg3_nvram_read(tp, offset + 8, &val)) + return; + + offset += val - start; + + vlen = strlen(tp->fw_ver); + + tp->fw_ver[vlen++] = ','; + tp->fw_ver[vlen++] = ' '; + + for (i = 0; i < 4; i++) { + __be32 v; + if (tg3_nvram_read_be32(tp, offset, &v)) + return; + + offset += sizeof(v); + + if (vlen > TG3_VER_SIZE - sizeof(v)) { + memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); + break; + } + + memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); + vlen += sizeof(v); + } +} + +static void tg3_probe_ncsi(struct tg3 *tp) +{ + u32 apedata; + + apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); + if (apedata != APE_SEG_SIG_MAGIC) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); + if (!(apedata & APE_FW_STATUS_READY)) + return; + + if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) + tg3_flag_set(tp, APE_HAS_NCSI); +} + +static void tg3_read_dash_ver(struct tg3 *tp) +{ + int vlen; + u32 apedata; + char *fwtype; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); + + if (tg3_flag(tp, APE_HAS_NCSI)) + fwtype = "NCSI"; + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725) + fwtype = "SMASH"; + else + fwtype = "DASH"; + + vlen = strlen(tp->fw_ver); + + snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", + fwtype, + (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, + (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, + (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, + (apedata & APE_FW_VERSION_BLDMSK)); +} + +static void tg3_read_otp_ver(struct tg3 *tp) +{ + u32 val, val2; + + if (tg3_asic_rev(tp) != ASIC_REV_5762) + return; + + if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) && + !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) && + TG3_OTP_MAGIC0_VALID(val)) { + u64 val64 = (u64) val << 32 | val2; + u32 ver = 0; + int i, vlen; + + for (i = 0; i < 7; i++) { + if ((val64 & 0xff) == 0) + break; + ver = val64 & 0xff; + val64 >>= 8; + } + vlen = strlen(tp->fw_ver); + snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver); + } +} + +static void tg3_read_fw_ver(struct tg3 *tp) +{ + u32 val; + bool vpd_vers = false; + + if (tp->fw_ver[0] != 0) + vpd_vers = true; + + if (tg3_flag(tp, NO_NVRAM)) { + strcat(tp->fw_ver, "sb"); + tg3_read_otp_ver(tp); + return; + } + + if (tg3_nvram_read(tp, 0, &val)) + return; + + if (val == TG3_EEPROM_MAGIC) + tg3_read_bc_ver(tp); + else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) + tg3_read_sb_ver(tp, val); + else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) + tg3_read_hwsb_ver(tp); + + if (tg3_flag(tp, ENABLE_ASF)) { + if (tg3_flag(tp, ENABLE_APE)) { + tg3_probe_ncsi(tp); + if (!vpd_vers) + tg3_read_dash_ver(tp); + } else if (!vpd_vers) { + tg3_read_mgmtfw_ver(tp); + } + } + + tp->fw_ver[TG3_VER_SIZE - 1] = 0; +} + +static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) +{ + if (tg3_flag(tp, LRG_PROD_RING_CAP)) + return TG3_RX_RET_MAX_SIZE_5717; + else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) + return TG3_RX_RET_MAX_SIZE_5700; + else + return TG3_RX_RET_MAX_SIZE_5705; +} + +static const struct pci_device_id tg3_write_reorder_chipsets[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, + { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, + { }, +}; + +static struct pci_dev *tg3_find_peer(struct tg3 *tp) +{ + struct pci_dev *peer; + unsigned int func, devnr = tp->pdev->devfn & ~7; + + for (func = 0; func < 8; func++) { + peer = pci_get_slot(tp->pdev->bus, devnr | func); + if (peer && peer != tp->pdev) + break; + pci_dev_put(peer); + } + /* 5704 can be configured in single-port mode, set peer to + * tp->pdev in that case. + */ + if (!peer) { + peer = tp->pdev; + return peer; + } + + /* + * We don't need to keep the refcount elevated; there's no way + * to remove one half of this device without removing the other + */ + pci_dev_put(peer); + + return peer; +} + +static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) +{ + tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT; + if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) { + u32 reg; + + /* All devices that use the alternate + * ASIC REV location have a CPMU. + */ + tg3_flag_set(tp, CPMU_PRESENT); + + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) + reg = TG3PCI_GEN2_PRODID_ASICREV; + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) + reg = TG3PCI_GEN15_PRODID_ASICREV; + else + reg = TG3PCI_PRODID_ASICREV; + + pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id); + } + + /* Wrong chip ID in 5752 A0. This code can be removed later + * as A0 is not in production. + */ + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW) + tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; + + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0) + tp->pci_chip_rev_id = CHIPREV_ID_5720_A0; + + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) + tg3_flag_set(tp, 5717_PLUS); + + if (tg3_asic_rev(tp) == ASIC_REV_57765 || + tg3_asic_rev(tp) == ASIC_REV_57766) + tg3_flag_set(tp, 57765_CLASS); + + if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) || + tg3_asic_rev(tp) == ASIC_REV_5762) + tg3_flag_set(tp, 57765_PLUS); + + /* Intentionally exclude ASIC_REV_5906 */ + if (tg3_asic_rev(tp) == ASIC_REV_5755 || + tg3_asic_rev(tp) == ASIC_REV_5787 || + tg3_asic_rev(tp) == ASIC_REV_5784 || + tg3_asic_rev(tp) == ASIC_REV_5761 || + tg3_asic_rev(tp) == ASIC_REV_5785 || + tg3_asic_rev(tp) == ASIC_REV_57780 || + tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, 5755_PLUS); + + if (tg3_asic_rev(tp) == ASIC_REV_5780 || + tg3_asic_rev(tp) == ASIC_REV_5714) + tg3_flag_set(tp, 5780_CLASS); + + if (tg3_asic_rev(tp) == ASIC_REV_5750 || + tg3_asic_rev(tp) == ASIC_REV_5752 || + tg3_asic_rev(tp) == ASIC_REV_5906 || + tg3_flag(tp, 5755_PLUS) || + tg3_flag(tp, 5780_CLASS)) + tg3_flag_set(tp, 5750_PLUS); + + if (tg3_asic_rev(tp) == ASIC_REV_5705 || + tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, 5705_PLUS); +} + +static bool tg3_10_100_only_device(struct tg3 *tp, + const struct pci_device_id *ent) +{ + u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK; + + if ((tg3_asic_rev(tp) == ASIC_REV_5703 && + (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || + (tp->phy_flags & TG3_PHYFLG_IS_FET)) + return true; + + if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) { + if (tg3_asic_rev(tp) == ASIC_REV_5705) { + if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100) + return true; + } else { + return true; + } + } + + return false; +} + +static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) +{ + u32 misc_ctrl_reg; + u32 pci_state_reg, grc_misc_cfg; + u32 val; + u16 pci_cmd; + int err; + + /* Force memory write invalidate off. If we leave it on, + * then on 5700_BX chips we have to enable a workaround. + * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary + * to match the cacheline size. The Broadcom driver have this + * workaround but turns MWI off all the times so never uses + * it. This seems to suggest that the workaround is insufficient. + */ + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd &= ~PCI_COMMAND_INVALIDATE; + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + + /* Important! -- Make sure register accesses are byteswapped + * correctly. Also, for those chips that require it, make + * sure that indirect register accesses are enabled before + * the first operation. + */ + pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + &misc_ctrl_reg); + tp->misc_host_ctrl |= (misc_ctrl_reg & + MISC_HOST_CTRL_CHIPREV); + pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + tp->misc_host_ctrl); + + tg3_detect_asic_rev(tp, misc_ctrl_reg); + + /* If we have 5702/03 A1 or A2 on certain ICH chipsets, + * we need to disable memory and use config. cycles + * only to access all registers. The 5702/03 chips + * can mistakenly decode the special cycles from the + * ICH chipsets as memory write cycles, causing corruption + * of register and memory space. Only certain ICH bridges + * will drive special cycles with non-zero data during the + * address phase which can fall within the 5703's address + * range. This is not an ICH bug as the PCI spec allows + * non-zero address during special cycles. However, only + * these ICH bridges are known to drive non-zero addresses + * during special cycles. + * + * Since special cycles do not cross PCI bridges, we only + * enable this workaround if the 5703 is on the secondary + * bus of these ICH bridges. + */ + if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) || + (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) { + static struct tg3_dev_id { + u32 vendor; + u32 device; + u32 rev; + } ich_chipsets[] = { + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, + PCI_ANY_ID }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, + PCI_ANY_ID }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, + 0xa }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, + PCI_ANY_ID }, + { }, + }; + struct tg3_dev_id *pci_id = &ich_chipsets[0]; + struct pci_dev *bridge = NULL; + + while (pci_id->vendor != 0) { + bridge = pci_get_device(pci_id->vendor, pci_id->device, + bridge); + if (!bridge) { + pci_id++; + continue; + } + if (pci_id->rev != PCI_ANY_ID) { + if (bridge->revision > pci_id->rev) + continue; + } + if (bridge->subordinate && + (bridge->subordinate->number == + tp->pdev->bus->number)) { + tg3_flag_set(tp, ICH_WORKAROUND); + pci_dev_put(bridge); + break; + } + } + } + + if (tg3_asic_rev(tp) == ASIC_REV_5701) { + static struct tg3_dev_id { + u32 vendor; + u32 device; + } bridge_chipsets[] = { + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, + { }, + }; + struct tg3_dev_id *pci_id = &bridge_chipsets[0]; + struct pci_dev *bridge = NULL; + + while (pci_id->vendor != 0) { + bridge = pci_get_device(pci_id->vendor, + pci_id->device, + bridge); + if (!bridge) { + pci_id++; + continue; + } + if (bridge->subordinate && + (bridge->subordinate->number <= + tp->pdev->bus->number) && + (bridge->subordinate->busn_res.end >= + tp->pdev->bus->number)) { + tg3_flag_set(tp, 5701_DMA_BUG); + pci_dev_put(bridge); + break; + } + } + } + + /* The EPB bridge inside 5714, 5715, and 5780 cannot support + * DMA addresses > 40-bit. This bridge may have other additional + * 57xx devices behind it in some 4-port NIC designs for example. + * Any tg3 device found behind the bridge will also need the 40-bit + * DMA workaround. + */ + if (tg3_flag(tp, 5780_CLASS)) { + tg3_flag_set(tp, 40BIT_DMA_BUG); + tp->msi_cap = tp->pdev->msi_cap; + } else { + struct pci_dev *bridge = NULL; + + do { + bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, + PCI_DEVICE_ID_SERVERWORKS_EPB, + bridge); + if (bridge && bridge->subordinate && + (bridge->subordinate->number <= + tp->pdev->bus->number) && + (bridge->subordinate->busn_res.end >= + tp->pdev->bus->number)) { + tg3_flag_set(tp, 40BIT_DMA_BUG); + pci_dev_put(bridge); + break; + } + } while (bridge); + } + + if (tg3_asic_rev(tp) == ASIC_REV_5704 || + tg3_asic_rev(tp) == ASIC_REV_5714) + tp->pdev_peer = tg3_find_peer(tp); + + /* Determine TSO capabilities */ + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0) + ; /* Do nothing. HW bug. */ + else if (tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, HW_TSO_3); + else if (tg3_flag(tp, 5755_PLUS) || + tg3_asic_rev(tp) == ASIC_REV_5906) + tg3_flag_set(tp, HW_TSO_2); + else if (tg3_flag(tp, 5750_PLUS)) { + tg3_flag_set(tp, HW_TSO_1); + tg3_flag_set(tp, TSO_BUG); + if (tg3_asic_rev(tp) == ASIC_REV_5750 && + tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2) + tg3_flag_clear(tp, TSO_BUG); + } else if (tg3_asic_rev(tp) != ASIC_REV_5700 && + tg3_asic_rev(tp) != ASIC_REV_5701 && + tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { + tg3_flag_set(tp, FW_TSO); + tg3_flag_set(tp, TSO_BUG); + if (tg3_asic_rev(tp) == ASIC_REV_5705) + tp->fw_needed = FIRMWARE_TG3TSO5; + else + tp->fw_needed = FIRMWARE_TG3TSO; + } + + /* Selectively allow TSO based on operating conditions */ + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3) || + tg3_flag(tp, FW_TSO)) { + /* For firmware TSO, assume ASF is disabled. + * We'll disable TSO later if we discover ASF + * is enabled in tg3_get_eeprom_hw_cfg(). + */ + tg3_flag_set(tp, TSO_CAPABLE); + } else { + tg3_flag_clear(tp, TSO_CAPABLE); + tg3_flag_clear(tp, TSO_BUG); + tp->fw_needed = NULL; + } + + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) + tp->fw_needed = FIRMWARE_TG3; + + if (tg3_asic_rev(tp) == ASIC_REV_57766) + tp->fw_needed = FIRMWARE_TG357766; + + tp->irq_max = 1; + + if (tg3_flag(tp, 5750_PLUS)) { + tg3_flag_set(tp, SUPPORT_MSI); + if (tg3_chip_rev(tp) == CHIPREV_5750_AX || + tg3_chip_rev(tp) == CHIPREV_5750_BX || + (tg3_asic_rev(tp) == ASIC_REV_5714 && + tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 && + tp->pdev_peer == tp->pdev)) + tg3_flag_clear(tp, SUPPORT_MSI); + + if (tg3_flag(tp, 5755_PLUS) || + tg3_asic_rev(tp) == ASIC_REV_5906) { + tg3_flag_set(tp, 1SHOT_MSI); + } + + if (tg3_flag(tp, 57765_PLUS)) { + tg3_flag_set(tp, SUPPORT_MSIX); + tp->irq_max = TG3_IRQ_MAX_VECS; + } + } + + tp->txq_max = 1; + tp->rxq_max = 1; + if (tp->irq_max > 1) { + tp->rxq_max = TG3_RSS_MAX_NUM_QS; + tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS); + + if (tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) + tp->txq_max = tp->irq_max - 1; + } + + if (tg3_flag(tp, 5755_PLUS) || + tg3_asic_rev(tp) == ASIC_REV_5906) + tg3_flag_set(tp, SHORT_DMA_BUG); + + if (tg3_asic_rev(tp) == ASIC_REV_5719) + tp->dma_limit = TG3_TX_BD_DMA_MAX_4K; + + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_5762) + tg3_flag_set(tp, LRG_PROD_RING_CAP); + + if (tg3_flag(tp, 57765_PLUS) && + tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0) + tg3_flag_set(tp, USE_JUMBO_BDFLAG); + + if (!tg3_flag(tp, 5705_PLUS) || + tg3_flag(tp, 5780_CLASS) || + tg3_flag(tp, USE_JUMBO_BDFLAG)) + tg3_flag_set(tp, JUMBO_CAPABLE); + + pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, + &pci_state_reg); + + if (pci_is_pcie(tp->pdev)) { + u16 lnkctl; + + tg3_flag_set(tp, PCI_EXPRESS); + + pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl); + if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { + if (tg3_asic_rev(tp) == ASIC_REV_5906) { + tg3_flag_clear(tp, HW_TSO_2); + tg3_flag_clear(tp, TSO_CAPABLE); + } + if (tg3_asic_rev(tp) == ASIC_REV_5784 || + tg3_asic_rev(tp) == ASIC_REV_5761 || + tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 || + tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1) + tg3_flag_set(tp, CLKREQ_BUG); + } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) { + tg3_flag_set(tp, L1PLLPD_EN); + } + } else if (tg3_asic_rev(tp) == ASIC_REV_5785) { + /* BCM5785 devices are effectively PCIe devices, and should + * follow PCIe codepaths, but do not have a PCIe capabilities + * section. + */ + tg3_flag_set(tp, PCI_EXPRESS); + } else if (!tg3_flag(tp, 5705_PLUS) || + tg3_flag(tp, 5780_CLASS)) { + tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); + if (!tp->pcix_cap) { + dev_err(&tp->pdev->dev, + "Cannot find PCI-X capability, aborting\n"); + return -EIO; + } + + if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) + tg3_flag_set(tp, PCIX_MODE); + } + + /* If we have an AMD 762 or VIA K8T800 chipset, write + * reordering to the mailbox registers done by the host + * controller can cause major troubles. We read back from + * every mailbox register write to force the writes to be + * posted to the chip in order. + */ + if (pci_dev_present(tg3_write_reorder_chipsets) && + !tg3_flag(tp, PCI_EXPRESS)) + tg3_flag_set(tp, MBOX_WRITE_REORDER); + + pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, + &tp->pci_cacheline_sz); + pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, + &tp->pci_lat_timer); + if (tg3_asic_rev(tp) == ASIC_REV_5703 && + tp->pci_lat_timer < 64) { + tp->pci_lat_timer = 64; + pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, + tp->pci_lat_timer); + } + + /* Important! -- It is critical that the PCI-X hw workaround + * situation is decided before the first MMIO register access. + */ + if (tg3_chip_rev(tp) == CHIPREV_5700_BX) { + /* 5700 BX chips need to have their TX producer index + * mailboxes written twice to workaround a bug. + */ + tg3_flag_set(tp, TXD_MBOX_HWBUG); + + /* If we are in PCI-X mode, enable register write workaround. + * + * The workaround is to use indirect register accesses + * for all chip writes not to mailbox registers. + */ + if (tg3_flag(tp, PCIX_MODE)) { + u32 pm_reg; + + tg3_flag_set(tp, PCIX_TARGET_HWBUG); + + /* The chip can have it's power management PCI config + * space registers clobbered due to this bug. + * So explicitly force the chip into D0 here. + */ + pci_read_config_dword(tp->pdev, + tp->pdev->pm_cap + PCI_PM_CTRL, + &pm_reg); + pm_reg &= ~PCI_PM_CTRL_STATE_MASK; + pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; + pci_write_config_dword(tp->pdev, + tp->pdev->pm_cap + PCI_PM_CTRL, + pm_reg); + + /* Also, force SERR#/PERR# in PCI command. */ + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + } + } + + if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) + tg3_flag_set(tp, PCI_HIGH_SPEED); + if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) + tg3_flag_set(tp, PCI_32BIT); + + /* Chip-specific fixup from Broadcom driver */ + if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) && + (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { + pci_state_reg |= PCISTATE_RETRY_SAME_DMA; + pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); + } + + /* Default fast path register access methods */ + tp->read32 = tg3_read32; + tp->write32 = tg3_write32; + tp->read32_mbox = tg3_read32; + tp->write32_mbox = tg3_write32; + tp->write32_tx_mbox = tg3_write32; + tp->write32_rx_mbox = tg3_write32; + + /* Various workaround register access methods */ + if (tg3_flag(tp, PCIX_TARGET_HWBUG)) + tp->write32 = tg3_write_indirect_reg32; + else if (tg3_asic_rev(tp) == ASIC_REV_5701 || + (tg3_flag(tp, PCI_EXPRESS) && + tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) { + /* + * Back to back register writes can cause problems on these + * chips, the workaround is to read back all reg writes + * except those to mailbox regs. + * + * See tg3_write_indirect_reg32(). + */ + tp->write32 = tg3_write_flush_reg32; + } + + if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { + tp->write32_tx_mbox = tg3_write32_tx_mbox; + if (tg3_flag(tp, MBOX_WRITE_REORDER)) + tp->write32_rx_mbox = tg3_write_flush_reg32; + } + + if (tg3_flag(tp, ICH_WORKAROUND)) { + tp->read32 = tg3_read_indirect_reg32; + tp->write32 = tg3_write_indirect_reg32; + tp->read32_mbox = tg3_read_indirect_mbox; + tp->write32_mbox = tg3_write_indirect_mbox; + tp->write32_tx_mbox = tg3_write_indirect_mbox; + tp->write32_rx_mbox = tg3_write_indirect_mbox; + + iounmap(tp->regs); + tp->regs = NULL; + + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd &= ~PCI_COMMAND_MEMORY; + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + } + if (tg3_asic_rev(tp) == ASIC_REV_5906) { + tp->read32_mbox = tg3_read32_mbox_5906; + tp->write32_mbox = tg3_write32_mbox_5906; + tp->write32_tx_mbox = tg3_write32_mbox_5906; + tp->write32_rx_mbox = tg3_write32_mbox_5906; + } + + if (tp->write32 == tg3_write_indirect_reg32 || + (tg3_flag(tp, PCIX_MODE) && + (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701))) + tg3_flag_set(tp, SRAM_USE_CONFIG); + + /* The memory arbiter has to be enabled in order for SRAM accesses + * to succeed. Normally on powerup the tg3 chip firmware will make + * sure it is enabled, but other entities such as system netboot + * code might disable it. + */ + val = tr32(MEMARB_MODE); + tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); + + tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; + if (tg3_asic_rev(tp) == ASIC_REV_5704 || + tg3_flag(tp, 5780_CLASS)) { + if (tg3_flag(tp, PCIX_MODE)) { + pci_read_config_dword(tp->pdev, + tp->pcix_cap + PCI_X_STATUS, + &val); + tp->pci_fn = val & 0x7; + } + } else if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) { + tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); + if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG) + val = tr32(TG3_CPMU_STATUS); + + if (tg3_asic_rev(tp) == ASIC_REV_5717) + tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0; + else + tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >> + TG3_CPMU_STATUS_FSHFT_5719; + } + + if (tg3_flag(tp, FLUSH_POSTED_WRITES)) { + tp->write32_tx_mbox = tg3_write_flush_reg32; + tp->write32_rx_mbox = tg3_write_flush_reg32; + } + + /* Get eeprom hw config before calling tg3_set_power_state(). + * In particular, the TG3_FLAG_IS_NIC flag must be + * determined before calling tg3_set_power_state() so that + * we know whether or not to switch out of Vaux power. + * When the flag is set, it means that GPIO1 is used for eeprom + * write protect and also implies that it is a LOM where GPIOs + * are not used to switch power. + */ + tg3_get_eeprom_hw_cfg(tp); + + if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) { + tg3_flag_clear(tp, TSO_CAPABLE); + tg3_flag_clear(tp, TSO_BUG); + tp->fw_needed = NULL; + } + + if (tg3_flag(tp, ENABLE_APE)) { + /* Allow reads and writes to the + * APE register and memory space. + */ + pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR | + PCISTATE_ALLOW_APE_PSPACE_WR; + pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, + pci_state_reg); + + tg3_ape_lock_init(tp); + } + + /* Set up tp->grc_local_ctrl before calling + * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high + * will bring 5700's external PHY out of reset. + * It is also used as eeprom write protect on LOMs. + */ + tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; + if (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_flag(tp, EEPROM_WRITE_PROT)) + tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OUTPUT1); + /* Unused GPIO3 must be driven as output on 5752 because there + * are no pull-up resistors on unused GPIO pins. + */ + else if (tg3_asic_rev(tp) == ASIC_REV_5752) + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; + + if (tg3_asic_rev(tp) == ASIC_REV_5755 || + tg3_asic_rev(tp) == ASIC_REV_57780 || + tg3_flag(tp, 57765_CLASS)) + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; + + if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { + /* Turn off the debug UART. */ + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; + if (tg3_flag(tp, IS_NIC)) + /* Keep VMain power. */ + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OUTPUT0; + } + + if (tg3_asic_rev(tp) == ASIC_REV_5762) + tp->grc_local_ctrl |= + tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL; + + /* Switch out of Vaux if it is a NIC */ + tg3_pwrsrc_switch_to_vmain(tp); + + /* Derive initial jumbo mode from MTU assigned in + * ether_setup() via the alloc_etherdev() call + */ + if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) + tg3_flag_set(tp, JUMBO_RING_ENABLE); + + /* Determine WakeOnLan speed to use. */ + if (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) { + tg3_flag_clear(tp, WOL_SPEED_100MB); + } else { + tg3_flag_set(tp, WOL_SPEED_100MB); + } + + if (tg3_asic_rev(tp) == ASIC_REV_5906) + tp->phy_flags |= TG3_PHYFLG_IS_FET; + + /* A few boards don't want Ethernet@WireSpeed phy feature */ + if (tg3_asic_rev(tp) == ASIC_REV_5700 || + (tg3_asic_rev(tp) == ASIC_REV_5705 && + (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) && + (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) || + (tp->phy_flags & TG3_PHYFLG_IS_FET) || + (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; + + if (tg3_chip_rev(tp) == CHIPREV_5703_AX || + tg3_chip_rev(tp) == CHIPREV_5704_AX) + tp->phy_flags |= TG3_PHYFLG_ADC_BUG; + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) + tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; + + if (tg3_flag(tp, 5705_PLUS) && + !(tp->phy_flags & TG3_PHYFLG_IS_FET) && + tg3_asic_rev(tp) != ASIC_REV_5785 && + tg3_asic_rev(tp) != ASIC_REV_57780 && + !tg3_flag(tp, 57765_PLUS)) { + if (tg3_asic_rev(tp) == ASIC_REV_5755 || + tg3_asic_rev(tp) == ASIC_REV_5787 || + tg3_asic_rev(tp) == ASIC_REV_5784 || + tg3_asic_rev(tp) == ASIC_REV_5761) { + if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && + tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) + tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; + if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) + tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; + } else + tp->phy_flags |= TG3_PHYFLG_BER_BUG; + } + + if (tg3_asic_rev(tp) == ASIC_REV_5784 && + tg3_chip_rev(tp) != CHIPREV_5784_AX) { + tp->phy_otp = tg3_read_otp_phycfg(tp); + if (tp->phy_otp == 0) + tp->phy_otp = TG3_OTP_DEFAULT; + } + + if (tg3_flag(tp, CPMU_PRESENT)) + tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; + else + tp->mi_mode = MAC_MI_MODE_BASE; + + tp->coalesce_mode = 0; + if (tg3_chip_rev(tp) != CHIPREV_5700_AX && + tg3_chip_rev(tp) != CHIPREV_5700_BX) + tp->coalesce_mode |= HOSTCC_MODE_32BYTE; + + /* Set these bits to enable statistics workaround. */ + if (tg3_asic_rev(tp) == ASIC_REV_5717 || + tg3_asic_rev(tp) == ASIC_REV_5762 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) { + tp->coalesce_mode |= HOSTCC_MODE_ATTN; + tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; + } + + if (tg3_asic_rev(tp) == ASIC_REV_5785 || + tg3_asic_rev(tp) == ASIC_REV_57780) + tg3_flag_set(tp, USE_PHYLIB); + + err = tg3_mdio_init(tp); + if (err) + return err; + + /* Initialize data/descriptor byte/word swapping. */ + val = tr32(GRC_MODE); + if (tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_5762) + val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | + GRC_MODE_WORD_SWAP_B2HRX_DATA | + GRC_MODE_B2HRX_ENABLE | + GRC_MODE_HTX2B_ENABLE | + GRC_MODE_HOST_STACKUP); + else + val &= GRC_MODE_HOST_STACKUP; + + tw32(GRC_MODE, val | tp->grc_mode); + + tg3_switch_clocks(tp); + + /* Clear this out for sanity. */ + tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); + + /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ + tw32(TG3PCI_REG_BASE_ADDR, 0); + + pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, + &pci_state_reg); + if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && + !tg3_flag(tp, PCIX_TARGET_HWBUG)) { + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 || + tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) { + void __iomem *sram_base; + + /* Write some dummy words into the SRAM status block + * area, see if it reads back correctly. If the return + * value is bad, force enable the PCIX workaround. + */ + sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; + + writel(0x00000000, sram_base); + writel(0x00000000, sram_base + 4); + writel(0xffffffff, sram_base + 4); + if (readl(sram_base) != 0x00000000) + tg3_flag_set(tp, PCIX_TARGET_HWBUG); + } + } + + udelay(50); + tg3_nvram_init(tp); + + /* If the device has an NVRAM, no need to load patch firmware */ + if (tg3_asic_rev(tp) == ASIC_REV_57766 && + !tg3_flag(tp, NO_NVRAM)) + tp->fw_needed = NULL; + + grc_misc_cfg = tr32(GRC_MISC_CFG); + grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; + + if (tg3_asic_rev(tp) == ASIC_REV_5705 && + (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || + grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) + tg3_flag_set(tp, IS_5788); + + if (!tg3_flag(tp, IS_5788) && + tg3_asic_rev(tp) != ASIC_REV_5700) + tg3_flag_set(tp, TAGGED_STATUS); + if (tg3_flag(tp, TAGGED_STATUS)) { + tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | + HOSTCC_MODE_CLRTICK_TXBD); + + tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; + pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + tp->misc_host_ctrl); + } + + /* Preserve the APE MAC_MODE bits */ + if (tg3_flag(tp, ENABLE_APE)) + tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; + else + tp->mac_mode = 0; + + if (tg3_10_100_only_device(tp, ent)) + tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; + + err = tg3_phy_probe(tp); + if (err) { + dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); + /* ... but do not return immediately ... */ + tg3_mdio_fini(tp); + } + + tg3_read_vpd(tp); + tg3_read_fw_ver(tp); + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; + } else { + if (tg3_asic_rev(tp) == ASIC_REV_5700) + tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; + else + tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; + } + + /* 5700 {AX,BX} chips have a broken status block link + * change bit implementation, so we must use the + * status register in those cases. + */ + if (tg3_asic_rev(tp) == ASIC_REV_5700) + tg3_flag_set(tp, USE_LINKCHG_REG); + else + tg3_flag_clear(tp, USE_LINKCHG_REG); + + /* The led_ctrl is set during tg3_phy_probe, here we might + * have to force the link status polling mechanism based + * upon subsystem IDs. + */ + if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && + tg3_asic_rev(tp) == ASIC_REV_5701 && + !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { + tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; + tg3_flag_set(tp, USE_LINKCHG_REG); + } + + /* For all SERDES we poll the MAC status register. */ + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + tg3_flag_set(tp, POLL_SERDES); + else + tg3_flag_clear(tp, POLL_SERDES); + + if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF)) + tg3_flag_set(tp, POLL_CPMU_LINK); + + tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN; + tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; + if (tg3_asic_rev(tp) == ASIC_REV_5701 && + tg3_flag(tp, PCIX_MODE)) { + tp->rx_offset = NET_SKB_PAD; +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + tp->rx_copy_thresh = ~(u16)0; +#endif + } + + tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; + tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; + tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; + + tp->rx_std_max_post = tp->rx_std_ring_mask + 1; + + /* Increment the rx prod index on the rx std ring by at most + * 8 for these chips to workaround hw errata. + */ + if (tg3_asic_rev(tp) == ASIC_REV_5750 || + tg3_asic_rev(tp) == ASIC_REV_5752 || + tg3_asic_rev(tp) == ASIC_REV_5755) + tp->rx_std_max_post = 8; + + if (tg3_flag(tp, ASPM_WORKAROUND)) + tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & + PCIE_PWR_MGMT_L1_THRESH_MSK; + + return err; +} + +#ifdef CONFIG_SPARC +static int tg3_get_macaddr_sparc(struct tg3 *tp) +{ + struct net_device *dev = tp->dev; + struct pci_dev *pdev = tp->pdev; + struct device_node *dp = pci_device_to_OF_node(pdev); + const unsigned char *addr; + int len; + + addr = of_get_property(dp, "local-mac-address", &len); + if (addr && len == ETH_ALEN) { + memcpy(dev->dev_addr, addr, ETH_ALEN); + return 0; + } + return -ENODEV; +} + +static int tg3_get_default_macaddr_sparc(struct tg3 *tp) +{ + struct net_device *dev = tp->dev; + + memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN); + return 0; +} +#endif + +static int tg3_get_device_address(struct tg3 *tp) +{ + struct net_device *dev = tp->dev; + u32 hi, lo, mac_offset; + int addr_ok = 0; + int err; + +#ifdef CONFIG_SPARC + if (!tg3_get_macaddr_sparc(tp)) + return 0; +#endif + + if (tg3_flag(tp, IS_SSB_CORE)) { + err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]); + if (!err && is_valid_ether_addr(&dev->dev_addr[0])) + return 0; + } + + mac_offset = 0x7c; + if (tg3_asic_rev(tp) == ASIC_REV_5704 || + tg3_flag(tp, 5780_CLASS)) { + if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) + mac_offset = 0xcc; + if (tg3_nvram_lock(tp)) + tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); + else + tg3_nvram_unlock(tp); + } else if (tg3_flag(tp, 5717_PLUS)) { + if (tp->pci_fn & 1) + mac_offset = 0xcc; + if (tp->pci_fn > 1) + mac_offset += 0x18c; + } else if (tg3_asic_rev(tp) == ASIC_REV_5906) + mac_offset = 0x10; + + /* First try to get it from MAC address mailbox. */ + tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); + if ((hi >> 16) == 0x484b) { + dev->dev_addr[0] = (hi >> 8) & 0xff; + dev->dev_addr[1] = (hi >> 0) & 0xff; + + tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); + dev->dev_addr[2] = (lo >> 24) & 0xff; + dev->dev_addr[3] = (lo >> 16) & 0xff; + dev->dev_addr[4] = (lo >> 8) & 0xff; + dev->dev_addr[5] = (lo >> 0) & 0xff; + + /* Some old bootcode may report a 0 MAC address in SRAM */ + addr_ok = is_valid_ether_addr(&dev->dev_addr[0]); + } + if (!addr_ok) { + /* Next, try NVRAM. */ + if (!tg3_flag(tp, NO_NVRAM) && + !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && + !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { + memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); + memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); + } + /* Finally just fetch it out of the MAC control regs. */ + else { + hi = tr32(MAC_ADDR_0_HIGH); + lo = tr32(MAC_ADDR_0_LOW); + + dev->dev_addr[5] = lo & 0xff; + dev->dev_addr[4] = (lo >> 8) & 0xff; + dev->dev_addr[3] = (lo >> 16) & 0xff; + dev->dev_addr[2] = (lo >> 24) & 0xff; + dev->dev_addr[1] = hi & 0xff; + dev->dev_addr[0] = (hi >> 8) & 0xff; + } + } + + if (!is_valid_ether_addr(&dev->dev_addr[0])) { +#ifdef CONFIG_SPARC + if (!tg3_get_default_macaddr_sparc(tp)) + return 0; +#endif + return -EINVAL; + } + return 0; +} + +#define BOUNDARY_SINGLE_CACHELINE 1 +#define BOUNDARY_MULTI_CACHELINE 2 + +static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val) +{ + int cacheline_size; + u8 byte; + int goal; + + pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); + if (byte == 0) + cacheline_size = 1024; + else + cacheline_size = (int) byte * 4; + + /* On 5703 and later chips, the boundary bits have no + * effect. + */ + if (tg3_asic_rev(tp) != ASIC_REV_5700 && + tg3_asic_rev(tp) != ASIC_REV_5701 && + !tg3_flag(tp, PCI_EXPRESS)) + goto out; + +#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) + goal = BOUNDARY_MULTI_CACHELINE; +#else +#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) + goal = BOUNDARY_SINGLE_CACHELINE; +#else + goal = 0; +#endif +#endif + + if (tg3_flag(tp, 57765_PLUS)) { + val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; + goto out; + } + + if (!goal) + goto out; + + /* PCI controllers on most RISC systems tend to disconnect + * when a device tries to burst across a cache-line boundary. + * Therefore, letting tg3 do so just wastes PCI bandwidth. + * + * Unfortunately, for PCI-E there are only limited + * write-side controls for this, and thus for reads + * we will still get the disconnects. We'll also waste + * these PCI cycles for both read and write for chips + * other than 5700 and 5701 which do not implement the + * boundary bits. + */ + if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { + switch (cacheline_size) { + case 16: + case 32: + case 64: + case 128: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | + DMA_RWCTRL_WRITE_BNDRY_128_PCIX); + } else { + val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | + DMA_RWCTRL_WRITE_BNDRY_384_PCIX); + } + break; + + case 256: + val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | + DMA_RWCTRL_WRITE_BNDRY_256_PCIX); + break; + + default: + val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | + DMA_RWCTRL_WRITE_BNDRY_384_PCIX); + break; + } + } else if (tg3_flag(tp, PCI_EXPRESS)) { + switch (cacheline_size) { + case 16: + case 32: + case 64: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; + val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; + break; + } + /* fallthrough */ + case 128: + default: + val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; + val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; + break; + } + } else { + switch (cacheline_size) { + case 16: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_16 | + DMA_RWCTRL_WRITE_BNDRY_16); + break; + } + /* fallthrough */ + case 32: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_32 | + DMA_RWCTRL_WRITE_BNDRY_32); + break; + } + /* fallthrough */ + case 64: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_64 | + DMA_RWCTRL_WRITE_BNDRY_64); + break; + } + /* fallthrough */ + case 128: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_128 | + DMA_RWCTRL_WRITE_BNDRY_128); + break; + } + /* fallthrough */ + case 256: + val |= (DMA_RWCTRL_READ_BNDRY_256 | + DMA_RWCTRL_WRITE_BNDRY_256); + break; + case 512: + val |= (DMA_RWCTRL_READ_BNDRY_512 | + DMA_RWCTRL_WRITE_BNDRY_512); + break; + case 1024: + default: + val |= (DMA_RWCTRL_READ_BNDRY_1024 | + DMA_RWCTRL_WRITE_BNDRY_1024); + break; + } + } + +out: + return val; +} + +static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, + int size, bool to_device) +{ + struct tg3_internal_buffer_desc test_desc; + u32 sram_dma_descs; + int i, ret; + + sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; + + tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); + tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); + tw32(RDMAC_STATUS, 0); + tw32(WDMAC_STATUS, 0); + + tw32(BUFMGR_MODE, 0); + tw32(FTQ_RESET, 0); + + test_desc.addr_hi = ((u64) buf_dma) >> 32; + test_desc.addr_lo = buf_dma & 0xffffffff; + test_desc.nic_mbuf = 0x00002100; + test_desc.len = size; + + /* + * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz + * the *second* time the tg3 driver was getting loaded after an + * initial scan. + * + * Broadcom tells me: + * ...the DMA engine is connected to the GRC block and a DMA + * reset may affect the GRC block in some unpredictable way... + * The behavior of resets to individual blocks has not been tested. + * + * Broadcom noted the GRC reset will also reset all sub-components. + */ + if (to_device) { + test_desc.cqid_sqid = (13 << 8) | 2; + + tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); + udelay(40); + } else { + test_desc.cqid_sqid = (16 << 8) | 7; + + tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); + udelay(40); + } + test_desc.flags = 0x00000005; + + for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { + u32 val; + + val = *(((u32 *)&test_desc) + i); + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, + sram_dma_descs + (i * sizeof(u32))); + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); + } + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); + + if (to_device) + tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); + else + tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); + + ret = -ENODEV; + for (i = 0; i < 40; i++) { + u32 val; + + if (to_device) + val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); + else + val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); + if ((val & 0xffff) == sram_dma_descs) { + ret = 0; + break; + } + + udelay(100); + } + + return ret; +} + +#define TEST_BUFFER_SIZE 0x2000 + +static const struct pci_device_id tg3_dma_wait_state_chipsets[] = { + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, + { }, +}; + +static int tg3_test_dma(struct tg3 *tp) +{ + dma_addr_t buf_dma; + u32 *buf, saved_dma_rwctrl; + int ret = 0; + + buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, + &buf_dma, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto out_nofree; + } + + tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | + (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); + + tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); + + if (tg3_flag(tp, 57765_PLUS)) + goto out; + + if (tg3_flag(tp, PCI_EXPRESS)) { + /* DMA read watermark not used on PCIE */ + tp->dma_rwctrl |= 0x00180000; + } else if (!tg3_flag(tp, PCIX_MODE)) { + if (tg3_asic_rev(tp) == ASIC_REV_5705 || + tg3_asic_rev(tp) == ASIC_REV_5750) + tp->dma_rwctrl |= 0x003f0000; + else + tp->dma_rwctrl |= 0x003f000f; + } else { + if (tg3_asic_rev(tp) == ASIC_REV_5703 || + tg3_asic_rev(tp) == ASIC_REV_5704) { + u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); + u32 read_water = 0x7; + + /* If the 5704 is behind the EPB bridge, we can + * do the less restrictive ONE_DMA workaround for + * better performance. + */ + if (tg3_flag(tp, 40BIT_DMA_BUG) && + tg3_asic_rev(tp) == ASIC_REV_5704) + tp->dma_rwctrl |= 0x8000; + else if (ccval == 0x6 || ccval == 0x7) + tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; + + if (tg3_asic_rev(tp) == ASIC_REV_5703) + read_water = 4; + /* Set bit 23 to enable PCIX hw bug fix */ + tp->dma_rwctrl |= + (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | + (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | + (1 << 23); + } else if (tg3_asic_rev(tp) == ASIC_REV_5780) { + /* 5780 always in PCIX mode */ + tp->dma_rwctrl |= 0x00144000; + } else if (tg3_asic_rev(tp) == ASIC_REV_5714) { + /* 5714 always in PCIX mode */ + tp->dma_rwctrl |= 0x00148000; + } else { + tp->dma_rwctrl |= 0x001b000f; + } + } + if (tg3_flag(tp, ONE_DMA_AT_ONCE)) + tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; + + if (tg3_asic_rev(tp) == ASIC_REV_5703 || + tg3_asic_rev(tp) == ASIC_REV_5704) + tp->dma_rwctrl &= 0xfffffff0; + + if (tg3_asic_rev(tp) == ASIC_REV_5700 || + tg3_asic_rev(tp) == ASIC_REV_5701) { + /* Remove this if it causes problems for some boards. */ + tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; + + /* On 5700/5701 chips, we need to set this bit. + * Otherwise the chip will issue cacheline transactions + * to streamable DMA memory with not all the byte + * enables turned on. This is an error on several + * RISC PCI controllers, in particular sparc64. + * + * On 5703/5704 chips, this bit has been reassigned + * a different meaning. In particular, it is used + * on those chips to enable a PCI-X workaround. + */ + tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; + } + + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + + + if (tg3_asic_rev(tp) != ASIC_REV_5700 && + tg3_asic_rev(tp) != ASIC_REV_5701) + goto out; + + /* It is best to perform DMA test with maximum write burst size + * to expose the 5700/5701 write DMA bug. + */ + saved_dma_rwctrl = tp->dma_rwctrl; + tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + + while (1) { + u32 *p = buf, i; + + for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) + p[i] = i; + + /* Send the buffer to the chip. */ + ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true); + if (ret) { + dev_err(&tp->pdev->dev, + "%s: Buffer write failed. err = %d\n", + __func__, ret); + break; + } + + /* Now read it back. */ + ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false); + if (ret) { + dev_err(&tp->pdev->dev, "%s: Buffer read failed. " + "err = %d\n", __func__, ret); + break; + } + + /* Verify it. */ + for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { + if (p[i] == i) + continue; + + if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != + DMA_RWCTRL_WRITE_BNDRY_16) { + tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; + tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + break; + } else { + dev_err(&tp->pdev->dev, + "%s: Buffer corrupted on read back! " + "(%d != %d)\n", __func__, p[i], i); + ret = -ENODEV; + goto out; + } + } + + if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { + /* Success. */ + ret = 0; + break; + } + } + if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != + DMA_RWCTRL_WRITE_BNDRY_16) { + /* DMA test passed without adjusting DMA boundary, + * now look for chipsets that are known to expose the + * DMA bug without failing the test. + */ + if (pci_dev_present(tg3_dma_wait_state_chipsets)) { + tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; + tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; + } else { + /* Safe to use the calculated DMA boundary. */ + tp->dma_rwctrl = saved_dma_rwctrl; + } + + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + } + +out: + dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); +out_nofree: + return ret; +} + +static void tg3_init_bufmgr_config(struct tg3 *tp) +{ + if (tg3_flag(tp, 57765_PLUS)) { + tp->bufmgr_config.mbuf_read_dma_low_water = + DEFAULT_MB_RDMA_LOW_WATER_5705; + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER_57765; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER_57765; + + tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = + DEFAULT_MB_RDMA_LOW_WATER_5705; + tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = + DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; + tp->bufmgr_config.mbuf_high_water_jumbo = + DEFAULT_MB_HIGH_WATER_JUMBO_57765; + } else if (tg3_flag(tp, 5705_PLUS)) { + tp->bufmgr_config.mbuf_read_dma_low_water = + DEFAULT_MB_RDMA_LOW_WATER_5705; + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER_5705; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER_5705; + if (tg3_asic_rev(tp) == ASIC_REV_5906) { + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER_5906; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER_5906; + } + + tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = + DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; + tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = + DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; + tp->bufmgr_config.mbuf_high_water_jumbo = + DEFAULT_MB_HIGH_WATER_JUMBO_5780; + } else { + tp->bufmgr_config.mbuf_read_dma_low_water = + DEFAULT_MB_RDMA_LOW_WATER; + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER; + + tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = + DEFAULT_MB_RDMA_LOW_WATER_JUMBO; + tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = + DEFAULT_MB_MACRX_LOW_WATER_JUMBO; + tp->bufmgr_config.mbuf_high_water_jumbo = + DEFAULT_MB_HIGH_WATER_JUMBO; + } + + tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; + tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; +} + +static char *tg3_phy_string(struct tg3 *tp) +{ + switch (tp->phy_id & TG3_PHY_ID_MASK) { + case TG3_PHY_ID_BCM5400: return "5400"; + case TG3_PHY_ID_BCM5401: return "5401"; + case TG3_PHY_ID_BCM5411: return "5411"; + case TG3_PHY_ID_BCM5701: return "5701"; + case TG3_PHY_ID_BCM5703: return "5703"; + case TG3_PHY_ID_BCM5704: return "5704"; + case TG3_PHY_ID_BCM5705: return "5705"; + case TG3_PHY_ID_BCM5750: return "5750"; + case TG3_PHY_ID_BCM5752: return "5752"; + case TG3_PHY_ID_BCM5714: return "5714"; + case TG3_PHY_ID_BCM5780: return "5780"; + case TG3_PHY_ID_BCM5755: return "5755"; + case TG3_PHY_ID_BCM5787: return "5787"; + case TG3_PHY_ID_BCM5784: return "5784"; + case TG3_PHY_ID_BCM5756: return "5722/5756"; + case TG3_PHY_ID_BCM5906: return "5906"; + case TG3_PHY_ID_BCM5761: return "5761"; + case TG3_PHY_ID_BCM5718C: return "5718C"; + case TG3_PHY_ID_BCM5718S: return "5718S"; + case TG3_PHY_ID_BCM57765: return "57765"; + case TG3_PHY_ID_BCM5719C: return "5719C"; + case TG3_PHY_ID_BCM5720C: return "5720C"; + case TG3_PHY_ID_BCM5762: return "5762C"; + case TG3_PHY_ID_BCM8002: return "8002/serdes"; + case 0: return "serdes"; + default: return "unknown"; + } +} + +static char *tg3_bus_string(struct tg3 *tp, char *str) +{ + if (tg3_flag(tp, PCI_EXPRESS)) { + strcpy(str, "PCI Express"); + return str; + } else if (tg3_flag(tp, PCIX_MODE)) { + u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; + + strcpy(str, "PCIX:"); + + if ((clock_ctrl == 7) || + ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == + GRC_MISC_CFG_BOARD_ID_5704CIOBE)) + strcat(str, "133MHz"); + else if (clock_ctrl == 0) + strcat(str, "33MHz"); + else if (clock_ctrl == 2) + strcat(str, "50MHz"); + else if (clock_ctrl == 4) + strcat(str, "66MHz"); + else if (clock_ctrl == 6) + strcat(str, "100MHz"); + } else { + strcpy(str, "PCI:"); + if (tg3_flag(tp, PCI_HIGH_SPEED)) + strcat(str, "66MHz"); + else + strcat(str, "33MHz"); + } + if (tg3_flag(tp, PCI_32BIT)) + strcat(str, ":32-bit"); + else + strcat(str, ":64-bit"); + return str; +} + +static void tg3_init_coal(struct tg3 *tp) +{ + struct ethtool_coalesce *ec = &tp->coal; + + memset(ec, 0, sizeof(*ec)); + ec->cmd = ETHTOOL_GCOALESCE; + ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; + ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; + ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; + ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; + ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; + ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; + ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; + ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; + ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; + + if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | + HOSTCC_MODE_CLRTICK_TXBD)) { + ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; + ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; + ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; + ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; + } + + if (tg3_flag(tp, 5705_PLUS)) { + ec->rx_coalesce_usecs_irq = 0; + ec->tx_coalesce_usecs_irq = 0; + ec->stats_block_coalesce_usecs = 0; + } +} + +static int tg3_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev; + struct tg3 *tp; + int i, err; + u32 sndmbx, rcvmbx, intmbx; + char str[40]; + u64 dma_mask, persist_dma_mask; + netdev_features_t features = 0; + + printk_once(KERN_INFO "%s\n", version); + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); + return err; + } + + err = pci_request_regions(pdev, DRV_MODULE_NAME); + if (err) { + dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable_pdev; + } + + pci_set_master(pdev); + + dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); + if (!dev) { + err = -ENOMEM; + goto err_out_free_res; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + + tp = netdev_priv(dev); + tp->pdev = pdev; + tp->dev = dev; + tp->rx_mode = TG3_DEF_RX_MODE; + tp->tx_mode = TG3_DEF_TX_MODE; + tp->irq_sync = 1; + tp->pcierr_recovery = false; + + if (tg3_debug > 0) + tp->msg_enable = tg3_debug; + else + tp->msg_enable = TG3_DEF_MSG_ENABLE; + + if (pdev_is_ssb_gige_core(pdev)) { + tg3_flag_set(tp, IS_SSB_CORE); + if (ssb_gige_must_flush_posted_writes(pdev)) + tg3_flag_set(tp, FLUSH_POSTED_WRITES); + if (ssb_gige_one_dma_at_once(pdev)) + tg3_flag_set(tp, ONE_DMA_AT_ONCE); + if (ssb_gige_have_roboswitch(pdev)) { + tg3_flag_set(tp, USE_PHYLIB); + tg3_flag_set(tp, ROBOSWITCH); + } + if (ssb_gige_is_rgmii(pdev)) + tg3_flag_set(tp, RGMII_MODE); + } + + /* The word/byte swap controls here control register access byte + * swapping. DMA data byte swapping is controlled in the GRC_MODE + * setting below. + */ + tp->misc_host_ctrl = + MISC_HOST_CTRL_MASK_PCI_INT | + MISC_HOST_CTRL_WORD_SWAP | + MISC_HOST_CTRL_INDIR_ACCESS | + MISC_HOST_CTRL_PCISTATE_RW; + + /* The NONFRM (non-frame) byte/word swap controls take effect + * on descriptor entries, anything which isn't packet data. + * + * The StrongARM chips on the board (one for tx, one for rx) + * are running in big-endian mode. + */ + tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | + GRC_MODE_WSWAP_NONFRM_DATA); +#ifdef __BIG_ENDIAN + tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; +#endif + spin_lock_init(&tp->lock); + spin_lock_init(&tp->indirect_lock); + INIT_WORK(&tp->reset_task, tg3_reset_task); + + tp->regs = pci_ioremap_bar(pdev, BAR_0); + if (!tp->regs) { + dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_free_dev; + } + + if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) { + tg3_flag_set(tp, ENABLE_APE); + tp->aperegs = pci_ioremap_bar(pdev, BAR_2); + if (!tp->aperegs) { + dev_err(&pdev->dev, + "Cannot map APE registers, aborting\n"); + err = -ENOMEM; + goto err_out_iounmap; + } + } + + tp->rx_pending = TG3_DEF_RX_RING_PENDING; + tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; + + dev->ethtool_ops = &tg3_ethtool_ops; + dev->watchdog_timeo = TG3_TX_TIMEOUT; + dev->netdev_ops = &tg3_netdev_ops; + dev->irq = pdev->irq; + + err = tg3_get_invariants(tp, ent); + if (err) { + dev_err(&pdev->dev, + "Problem fetching invariants of chip, aborting\n"); + goto err_out_apeunmap; + } + + /* The EPB bridge inside 5714, 5715, and 5780 and any + * device behind the EPB cannot support DMA addresses > 40-bit. + * On 64-bit systems with IOMMU, use 40-bit dma_mask. + * On 64-bit systems without IOMMU, use 64-bit dma_mask and + * do DMA address check in tg3_start_xmit(). + */ + if (tg3_flag(tp, IS_5788)) + persist_dma_mask = dma_mask = DMA_BIT_MASK(32); + else if (tg3_flag(tp, 40BIT_DMA_BUG)) { + persist_dma_mask = dma_mask = DMA_BIT_MASK(40); +#ifdef CONFIG_HIGHMEM + dma_mask = DMA_BIT_MASK(64); +#endif + } else + persist_dma_mask = dma_mask = DMA_BIT_MASK(64); + + /* Configure DMA attributes. */ + if (dma_mask > DMA_BIT_MASK(32)) { + err = pci_set_dma_mask(pdev, dma_mask); + if (!err) { + features |= NETIF_F_HIGHDMA; + err = pci_set_consistent_dma_mask(pdev, + persist_dma_mask); + if (err < 0) { + dev_err(&pdev->dev, "Unable to obtain 64 bit " + "DMA for consistent allocations\n"); + goto err_out_apeunmap; + } + } + } + if (err || dma_mask == DMA_BIT_MASK(32)) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_out_apeunmap; + } + } + + tg3_init_bufmgr_config(tp); + + /* 5700 B0 chips do not support checksumming correctly due + * to hardware bugs. + */ + if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) { + features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; + + if (tg3_flag(tp, 5755_PLUS)) + features |= NETIF_F_IPV6_CSUM; + } + + /* TSO is on by default on chips that support hardware TSO. + * Firmware TSO on older chips gives lower performance, so it + * is off by default, but can be enabled using ethtool. + */ + if ((tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) && + (features & NETIF_F_IP_CSUM)) + features |= NETIF_F_TSO; + if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { + if (features & NETIF_F_IPV6_CSUM) + features |= NETIF_F_TSO6; + if (tg3_flag(tp, HW_TSO_3) || + tg3_asic_rev(tp) == ASIC_REV_5761 || + (tg3_asic_rev(tp) == ASIC_REV_5784 && + tg3_chip_rev(tp) != CHIPREV_5784_AX) || + tg3_asic_rev(tp) == ASIC_REV_5785 || + tg3_asic_rev(tp) == ASIC_REV_57780) + features |= NETIF_F_TSO_ECN; + } + + dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + dev->vlan_features |= features; + + /* + * Add loopback capability only for a subset of devices that support + * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY + * loopback for the remaining devices. + */ + if (tg3_asic_rev(tp) != ASIC_REV_5780 && + !tg3_flag(tp, CPMU_PRESENT)) + /* Add the loopback capability */ + features |= NETIF_F_LOOPBACK; + + dev->hw_features |= features; + dev->priv_flags |= IFF_UNICAST_FLT; + + if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 && + !tg3_flag(tp, TSO_CAPABLE) && + !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { + tg3_flag_set(tp, MAX_RXPEND_64); + tp->rx_pending = 63; + } + + err = tg3_get_device_address(tp); + if (err) { + dev_err(&pdev->dev, + "Could not obtain valid ethernet address, aborting\n"); + goto err_out_apeunmap; + } + + intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; + rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; + sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; + for (i = 0; i < tp->irq_max; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tnapi->tp = tp; + tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; + + tnapi->int_mbox = intmbx; + if (i <= 4) + intmbx += 0x8; + else + intmbx += 0x4; + + tnapi->consmbox = rcvmbx; + tnapi->prodmbox = sndmbx; + + if (i) + tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); + else + tnapi->coal_now = HOSTCC_MODE_NOW; + + if (!tg3_flag(tp, SUPPORT_MSIX)) + break; + + /* + * If we support MSIX, we'll be using RSS. If we're using + * RSS, the first vector only handles link interrupts and the + * remaining vectors handle rx and tx interrupts. Reuse the + * mailbox values for the next iteration. The values we setup + * above are still useful for the single vectored mode. + */ + if (!i) + continue; + + rcvmbx += 0x8; + + if (sndmbx & 0x4) + sndmbx -= 0x4; + else + sndmbx += 0xc; + } + + /* + * Reset chip in case UNDI or EFI driver did not shutdown + * DMA self test will enable WDMAC and we'll see (spurious) + * pending DMA on the PCI bus at that point. + */ + if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || + (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { + tg3_full_lock(tp, 0); + tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_full_unlock(tp); + } + + err = tg3_test_dma(tp); + if (err) { + dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); + goto err_out_apeunmap; + } + + tg3_init_coal(tp); + + pci_set_drvdata(pdev, dev); + + if (tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720 || + tg3_asic_rev(tp) == ASIC_REV_5762) + tg3_flag_set(tp, PTP_CAPABLE); + + tg3_timer_init(tp); + + tg3_carrier_off(tp); + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "Cannot register net device, aborting\n"); + goto err_out_apeunmap; + } + + if (tg3_flag(tp, PTP_CAPABLE)) { + tg3_ptp_init(tp); + tp->ptp_clock = ptp_clock_register(&tp->ptp_info, + &tp->pdev->dev); + if (IS_ERR(tp->ptp_clock)) + tp->ptp_clock = NULL; + } + + netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", + tp->board_part_number, + tg3_chip_rev_id(tp), + tg3_bus_string(tp, str), + dev->dev_addr); + + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { + struct phy_device *phydev; + phydev = tp->mdio_bus->phy_map[tp->phy_addr]; + netdev_info(dev, + "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", + phydev->drv->name, dev_name(&phydev->dev)); + } else { + char *ethtype; + + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + ethtype = "10/100Base-TX"; + else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + ethtype = "1000Base-SX"; + else + ethtype = "10/100/1000Base-T"; + + netdev_info(dev, "attached PHY is %s (%s Ethernet) " + "(WireSpeed[%d], EEE[%d])\n", + tg3_phy_string(tp), ethtype, + (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, + (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); + } + + netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", + (dev->features & NETIF_F_RXCSUM) != 0, + tg3_flag(tp, USE_LINKCHG_REG) != 0, + (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, + tg3_flag(tp, ENABLE_ASF) != 0, + tg3_flag(tp, TSO_CAPABLE) != 0); + netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", + tp->dma_rwctrl, + pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : + ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); + + pci_save_state(pdev); + + return 0; + +err_out_apeunmap: + if (tp->aperegs) { + iounmap(tp->aperegs); + tp->aperegs = NULL; + } + +err_out_iounmap: + if (tp->regs) { + iounmap(tp->regs); + tp->regs = NULL; + } + +err_out_free_dev: + free_netdev(dev); + +err_out_free_res: + pci_release_regions(pdev); + +err_out_disable_pdev: + if (pci_is_enabled(pdev)) + pci_disable_device(pdev); + return err; +} + +static void tg3_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (dev) { + struct tg3 *tp = netdev_priv(dev); + + tg3_ptp_fini(tp); + + release_firmware(tp->fw); + + tg3_reset_task_cancel(tp); + + if (tg3_flag(tp, USE_PHYLIB)) { + tg3_phy_fini(tp); + tg3_mdio_fini(tp); + } + + unregister_netdev(dev); + if (tp->aperegs) { + iounmap(tp->aperegs); + tp->aperegs = NULL; + } + if (tp->regs) { + iounmap(tp->regs); + tp->regs = NULL; + } + free_netdev(dev); + pci_release_regions(pdev); + pci_disable_device(pdev); + } +} + +#ifdef CONFIG_PM_SLEEP +static int tg3_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(dev); + int err = 0; + + rtnl_lock(); + + if (!netif_running(dev)) + goto unlock; + + tg3_reset_task_cancel(tp); + tg3_phy_stop(tp); + tg3_netif_stop(tp); + + tg3_timer_stop(tp); + + tg3_full_lock(tp, 1); + tg3_disable_ints(tp); + tg3_full_unlock(tp); + + netif_device_detach(dev); + + tg3_full_lock(tp, 0); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_flag_clear(tp, INIT_COMPLETE); + tg3_full_unlock(tp); + + err = tg3_power_down_prepare(tp); + if (err) { + int err2; + + tg3_full_lock(tp, 0); + + tg3_flag_set(tp, INIT_COMPLETE); + err2 = tg3_restart_hw(tp, true); + if (err2) + goto out; + + tg3_timer_start(tp); + + netif_device_attach(dev); + tg3_netif_start(tp); + +out: + tg3_full_unlock(tp); + + if (!err2) + tg3_phy_start(tp); + } + +unlock: + rtnl_unlock(); + return err; +} + +static int tg3_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(dev); + int err = 0; + + rtnl_lock(); + + if (!netif_running(dev)) + goto unlock; + + netif_device_attach(dev); + + tg3_full_lock(tp, 0); + + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); + + tg3_flag_set(tp, INIT_COMPLETE); + err = tg3_restart_hw(tp, + !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)); + if (err) + goto out; + + tg3_timer_start(tp); + + tg3_netif_start(tp); + +out: + tg3_full_unlock(tp); + + if (!err) + tg3_phy_start(tp); + +unlock: + rtnl_unlock(); + return err; +} +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); + +static void tg3_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(dev); + + rtnl_lock(); + netif_device_detach(dev); + + if (netif_running(dev)) + dev_close(dev); + + if (system_state == SYSTEM_POWER_OFF) + tg3_power_down(tp); + + rtnl_unlock(); +} + +/** + * tg3_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; + + netdev_info(netdev, "PCI I/O error detected\n"); + + rtnl_lock(); + + /* We needn't recover from permanent error */ + if (state == pci_channel_io_frozen) + tp->pcierr_recovery = true; + + /* We probably don't have netdev yet */ + if (!netdev || !netif_running(netdev)) + goto done; + + tg3_phy_stop(tp); + + tg3_netif_stop(tp); + + tg3_timer_stop(tp); + + /* Want to make sure that the reset task doesn't run */ + tg3_reset_task_cancel(tp); + + netif_device_detach(netdev); + + /* Clean up software state, even if MMIO is blocked */ + tg3_full_lock(tp, 0); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); + tg3_full_unlock(tp); + +done: + if (state == pci_channel_io_perm_failure) { + if (netdev) { + tg3_napi_enable(tp); + dev_close(netdev); + } + err = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_disable_device(pdev); + } + + rtnl_unlock(); + + return err; +} + +/** + * tg3_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + * At this point, the card has exprienced a hard reset, + * followed by fixups by BIOS, and has its config space + * set up identically to what it was at cold boot. + */ +static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; + int err; + + rtnl_lock(); + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + goto done; + } + + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!netdev || !netif_running(netdev)) { + rc = PCI_ERS_RESULT_RECOVERED; + goto done; + } + + err = tg3_power_up(tp); + if (err) + goto done; + + rc = PCI_ERS_RESULT_RECOVERED; + +done: + if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { + tg3_napi_enable(tp); + dev_close(netdev); + } + rtnl_unlock(); + + return rc; +} + +/** + * tg3_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells + * us that its OK to resume normal operation. + */ +static void tg3_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + int err; + + rtnl_lock(); + + if (!netif_running(netdev)) + goto done; + + tg3_full_lock(tp, 0); + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); + tg3_flag_set(tp, INIT_COMPLETE); + err = tg3_restart_hw(tp, true); + if (err) { + tg3_full_unlock(tp); + netdev_err(netdev, "Cannot restart hardware after reset.\n"); + goto done; + } + + netif_device_attach(netdev); + + tg3_timer_start(tp); + + tg3_netif_start(tp); + + tg3_full_unlock(tp); + + tg3_phy_start(tp); + +done: + tp->pcierr_recovery = false; + rtnl_unlock(); +} + +static const struct pci_error_handlers tg3_err_handler = { + .error_detected = tg3_io_error_detected, + .slot_reset = tg3_io_slot_reset, + .resume = tg3_io_resume +}; + +static struct pci_driver tg3_driver = { + .name = DRV_MODULE_NAME, + .id_table = tg3_pci_tbl, + .probe = tg3_init_one, + .remove = tg3_remove_one, + .err_handler = &tg3_err_handler, + .driver.pm = &tg3_pm_ops, + .shutdown = tg3_shutdown, +}; + +module_pci_driver(tg3_driver); diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h new file mode 100644 index 000000000..31c9f8295 --- /dev/null +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -0,0 +1,3427 @@ +/* $Id: tg3.h,v 1.37.2.32 2002/03/11 12:18:18 davem Exp $ + * tg3.h: Definitions for Broadcom Tigon3 ethernet driver. + * + * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) + * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) + * Copyright (C) 2004 Sun Microsystems Inc. + * Copyright (C) 2007-2014 Broadcom Corporation. + */ + +#ifndef _T3_H +#define _T3_H + +#define TG3_64BIT_REG_HIGH 0x00UL +#define TG3_64BIT_REG_LOW 0x04UL + +/* Descriptor block info. */ +#define TG3_BDINFO_HOST_ADDR 0x0UL /* 64-bit */ +#define TG3_BDINFO_MAXLEN_FLAGS 0x8UL /* 32-bit */ +#define BDINFO_FLAGS_USE_EXT_RECV 0x00000001 /* ext rx_buffer_desc */ +#define BDINFO_FLAGS_DISABLED 0x00000002 +#define BDINFO_FLAGS_MAXLEN_MASK 0xffff0000 +#define BDINFO_FLAGS_MAXLEN_SHIFT 16 +#define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */ +#define TG3_BDINFO_SIZE 0x10UL + +#define TG3_RX_STD_MAX_SIZE_5700 512 +#define TG3_RX_STD_MAX_SIZE_5717 2048 +#define TG3_RX_JMB_MAX_SIZE_5700 256 +#define TG3_RX_JMB_MAX_SIZE_5717 1024 +#define TG3_RX_RET_MAX_SIZE_5700 1024 +#define TG3_RX_RET_MAX_SIZE_5705 512 +#define TG3_RX_RET_MAX_SIZE_5717 4096 + +#define TG3_RSS_INDIR_TBL_SIZE 128 + +/* First 256 bytes are a mirror of PCI config space. */ +#define TG3PCI_VENDOR 0x00000000 +#define TG3PCI_VENDOR_BROADCOM 0x14e4 +#define TG3PCI_DEVICE 0x00000002 +#define TG3PCI_DEVICE_TIGON3_1 0x1644 /* BCM5700 */ +#define TG3PCI_DEVICE_TIGON3_2 0x1645 /* BCM5701 */ +#define TG3PCI_DEVICE_TIGON3_3 0x1646 /* BCM5702 */ +#define TG3PCI_DEVICE_TIGON3_4 0x1647 /* BCM5703 */ +#define TG3PCI_DEVICE_TIGON3_5761S 0x1688 +#define TG3PCI_DEVICE_TIGON3_5761SE 0x1689 +#define TG3PCI_DEVICE_TIGON3_57780 0x1692 +#define TG3PCI_DEVICE_TIGON3_5787M 0x1693 +#define TG3PCI_DEVICE_TIGON3_57760 0x1690 +#define TG3PCI_DEVICE_TIGON3_57790 0x1694 +#define TG3PCI_DEVICE_TIGON3_57788 0x1691 +#define TG3PCI_DEVICE_TIGON3_5785_G 0x1699 /* GPHY */ +#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */ +#define TG3PCI_DEVICE_TIGON3_5717 0x1655 +#define TG3PCI_DEVICE_TIGON3_5717_C 0x1665 +#define TG3PCI_DEVICE_TIGON3_5718 0x1656 +#define TG3PCI_DEVICE_TIGON3_57781 0x16b1 +#define TG3PCI_DEVICE_TIGON3_57785 0x16b5 +#define TG3PCI_DEVICE_TIGON3_57761 0x16b0 +#define TG3PCI_DEVICE_TIGON3_57765 0x16b4 +#define TG3PCI_DEVICE_TIGON3_57791 0x16b2 +#define TG3PCI_DEVICE_TIGON3_57795 0x16b6 +#define TG3PCI_DEVICE_TIGON3_5719 0x1657 +#define TG3PCI_DEVICE_TIGON3_5720 0x165f +#define TG3PCI_DEVICE_TIGON3_57762 0x1682 +#define TG3PCI_DEVICE_TIGON3_57766 0x1686 +#define TG3PCI_DEVICE_TIGON3_57786 0x16b3 +#define TG3PCI_DEVICE_TIGON3_57782 0x16b7 +#define TG3PCI_DEVICE_TIGON3_5762 0x1687 +#define TG3PCI_DEVICE_TIGON3_5725 0x1643 +#define TG3PCI_DEVICE_TIGON3_5727 0x16f3 +#define TG3PCI_DEVICE_TIGON3_57764 0x1642 +#define TG3PCI_DEVICE_TIGON3_57767 0x1683 +#define TG3PCI_DEVICE_TIGON3_57787 0x1641 +/* 0x04 --> 0x2c unused */ +#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644 +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5 0x0001 +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6 0x0002 +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9 0x0003 +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1 0x0005 +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8 0x0006 +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7 0x0007 +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10 0x0008 +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12 0x8008 +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1 0x0009 +#define TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2 0x8009 +#define TG3PCI_SUBVENDOR_ID_3COM PCI_VENDOR_ID_3COM +#define TG3PCI_SUBDEVICE_ID_3COM_3C996T 0x1000 +#define TG3PCI_SUBDEVICE_ID_3COM_3C996BT 0x1006 +#define TG3PCI_SUBDEVICE_ID_3COM_3C996SX 0x1004 +#define TG3PCI_SUBDEVICE_ID_3COM_3C1000T 0x1007 +#define TG3PCI_SUBDEVICE_ID_3COM_3C940BR01 0x1008 +#define TG3PCI_SUBVENDOR_ID_DELL PCI_VENDOR_ID_DELL +#define TG3PCI_SUBDEVICE_ID_DELL_VIPER 0x00d1 +#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106 +#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109 +#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a +#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ +#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c +#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a +#define TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING 0x007d +#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780 0x0085 +#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2 0x0099 +#define TG3PCI_SUBVENDOR_ID_IBM PCI_VENDOR_ID_IBM +#define TG3PCI_SUBDEVICE_ID_IBM_5703SAX2 0x0281 +#define TG3PCI_SUBDEVICE_ID_ACER_57780_A 0x0601 +#define TG3PCI_SUBDEVICE_ID_ACER_57780_B 0x0612 +#define TG3PCI_SUBDEVICE_ID_LENOVO_5787M 0x3056 + +/* 0x30 --> 0x64 unused */ +#define TG3PCI_MSI_DATA 0x00000064 +/* 0x66 --> 0x68 unused */ +#define TG3PCI_MISC_HOST_CTRL 0x00000068 +#define MISC_HOST_CTRL_CLEAR_INT 0x00000001 +#define MISC_HOST_CTRL_MASK_PCI_INT 0x00000002 +#define MISC_HOST_CTRL_BYTE_SWAP 0x00000004 +#define MISC_HOST_CTRL_WORD_SWAP 0x00000008 +#define MISC_HOST_CTRL_PCISTATE_RW 0x00000010 +#define MISC_HOST_CTRL_CLKREG_RW 0x00000020 +#define MISC_HOST_CTRL_REGWORD_SWAP 0x00000040 +#define MISC_HOST_CTRL_INDIR_ACCESS 0x00000080 +#define MISC_HOST_CTRL_IRQ_MASK_MODE 0x00000100 +#define MISC_HOST_CTRL_TAGGED_STATUS 0x00000200 +#define MISC_HOST_CTRL_CHIPREV 0xffff0000 +#define MISC_HOST_CTRL_CHIPREV_SHIFT 16 + +#define CHIPREV_ID_5700_A0 0x7000 +#define CHIPREV_ID_5700_A1 0x7001 +#define CHIPREV_ID_5700_B0 0x7100 +#define CHIPREV_ID_5700_B1 0x7101 +#define CHIPREV_ID_5700_B3 0x7102 +#define CHIPREV_ID_5700_ALTIMA 0x7104 +#define CHIPREV_ID_5700_C0 0x7200 +#define CHIPREV_ID_5701_A0 0x0000 +#define CHIPREV_ID_5701_B0 0x0100 +#define CHIPREV_ID_5701_B2 0x0102 +#define CHIPREV_ID_5701_B5 0x0105 +#define CHIPREV_ID_5703_A0 0x1000 +#define CHIPREV_ID_5703_A1 0x1001 +#define CHIPREV_ID_5703_A2 0x1002 +#define CHIPREV_ID_5703_A3 0x1003 +#define CHIPREV_ID_5704_A0 0x2000 +#define CHIPREV_ID_5704_A1 0x2001 +#define CHIPREV_ID_5704_A2 0x2002 +#define CHIPREV_ID_5704_A3 0x2003 +#define CHIPREV_ID_5705_A0 0x3000 +#define CHIPREV_ID_5705_A1 0x3001 +#define CHIPREV_ID_5705_A2 0x3002 +#define CHIPREV_ID_5705_A3 0x3003 +#define CHIPREV_ID_5750_A0 0x4000 +#define CHIPREV_ID_5750_A1 0x4001 +#define CHIPREV_ID_5750_A3 0x4003 +#define CHIPREV_ID_5750_C2 0x4202 +#define CHIPREV_ID_5752_A0_HW 0x5000 +#define CHIPREV_ID_5752_A0 0x6000 +#define CHIPREV_ID_5752_A1 0x6001 +#define CHIPREV_ID_5714_A2 0x9002 +#define CHIPREV_ID_5906_A1 0xc001 +#define CHIPREV_ID_57780_A0 0x57780000 +#define CHIPREV_ID_57780_A1 0x57780001 +#define CHIPREV_ID_5717_A0 0x05717000 +#define CHIPREV_ID_5717_C0 0x05717200 +#define CHIPREV_ID_57765_A0 0x57785000 +#define CHIPREV_ID_5719_A0 0x05719000 +#define CHIPREV_ID_5720_A0 0x05720000 +#define CHIPREV_ID_5762_A0 0x05762000 + +#define ASIC_REV_5700 0x07 +#define ASIC_REV_5701 0x00 +#define ASIC_REV_5703 0x01 +#define ASIC_REV_5704 0x02 +#define ASIC_REV_5705 0x03 +#define ASIC_REV_5750 0x04 +#define ASIC_REV_5752 0x06 +#define ASIC_REV_5780 0x08 +#define ASIC_REV_5714 0x09 +#define ASIC_REV_5755 0x0a +#define ASIC_REV_5787 0x0b +#define ASIC_REV_5906 0x0c +#define ASIC_REV_USE_PROD_ID_REG 0x0f +#define ASIC_REV_5784 0x5784 +#define ASIC_REV_5761 0x5761 +#define ASIC_REV_5785 0x5785 +#define ASIC_REV_57780 0x57780 +#define ASIC_REV_5717 0x5717 +#define ASIC_REV_57765 0x57785 +#define ASIC_REV_5719 0x5719 +#define ASIC_REV_5720 0x5720 +#define ASIC_REV_57766 0x57766 +#define ASIC_REV_5762 0x5762 +#define CHIPREV_5700_AX 0x70 +#define CHIPREV_5700_BX 0x71 +#define CHIPREV_5700_CX 0x72 +#define CHIPREV_5701_AX 0x00 +#define CHIPREV_5703_AX 0x10 +#define CHIPREV_5704_AX 0x20 +#define CHIPREV_5704_BX 0x21 +#define CHIPREV_5750_AX 0x40 +#define CHIPREV_5750_BX 0x41 +#define CHIPREV_5784_AX 0x57840 +#define CHIPREV_5761_AX 0x57610 +#define CHIPREV_57765_AX 0x577650 +#define METAL_REV_A0 0x00 +#define METAL_REV_A1 0x01 +#define METAL_REV_B0 0x00 +#define METAL_REV_B1 0x01 +#define METAL_REV_B2 0x02 +#define TG3PCI_DMA_RW_CTRL 0x0000006c +#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001 +#define DMA_RWCTRL_TAGGED_STAT_WA 0x00000080 +#define DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK 0x00000380 +#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700 +#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000 +#define DMA_RWCTRL_READ_BNDRY_16 0x00000100 +#define DMA_RWCTRL_READ_BNDRY_128_PCIX 0x00000100 +#define DMA_RWCTRL_READ_BNDRY_32 0x00000200 +#define DMA_RWCTRL_READ_BNDRY_256_PCIX 0x00000200 +#define DMA_RWCTRL_READ_BNDRY_64 0x00000300 +#define DMA_RWCTRL_READ_BNDRY_384_PCIX 0x00000300 +#define DMA_RWCTRL_READ_BNDRY_128 0x00000400 +#define DMA_RWCTRL_READ_BNDRY_256 0x00000500 +#define DMA_RWCTRL_READ_BNDRY_512 0x00000600 +#define DMA_RWCTRL_READ_BNDRY_1024 0x00000700 +#define DMA_RWCTRL_WRITE_BNDRY_MASK 0x00003800 +#define DMA_RWCTRL_WRITE_BNDRY_DISAB 0x00000000 +#define DMA_RWCTRL_WRITE_BNDRY_16 0x00000800 +#define DMA_RWCTRL_WRITE_BNDRY_128_PCIX 0x00000800 +#define DMA_RWCTRL_WRITE_BNDRY_32 0x00001000 +#define DMA_RWCTRL_WRITE_BNDRY_256_PCIX 0x00001000 +#define DMA_RWCTRL_WRITE_BNDRY_64 0x00001800 +#define DMA_RWCTRL_WRITE_BNDRY_384_PCIX 0x00001800 +#define DMA_RWCTRL_WRITE_BNDRY_128 0x00002000 +#define DMA_RWCTRL_WRITE_BNDRY_256 0x00002800 +#define DMA_RWCTRL_WRITE_BNDRY_512 0x00003000 +#define DMA_RWCTRL_WRITE_BNDRY_1024 0x00003800 +#define DMA_RWCTRL_ONE_DMA 0x00004000 +#define DMA_RWCTRL_READ_WATER 0x00070000 +#define DMA_RWCTRL_READ_WATER_SHIFT 16 +#define DMA_RWCTRL_WRITE_WATER 0x00380000 +#define DMA_RWCTRL_WRITE_WATER_SHIFT 19 +#define DMA_RWCTRL_USE_MEM_READ_MULT 0x00400000 +#define DMA_RWCTRL_ASSERT_ALL_BE 0x00800000 +#define DMA_RWCTRL_PCI_READ_CMD 0x0f000000 +#define DMA_RWCTRL_PCI_READ_CMD_SHIFT 24 +#define DMA_RWCTRL_PCI_WRITE_CMD 0xf0000000 +#define DMA_RWCTRL_PCI_WRITE_CMD_SHIFT 28 +#define DMA_RWCTRL_WRITE_BNDRY_64_PCIE 0x10000000 +#define DMA_RWCTRL_WRITE_BNDRY_128_PCIE 0x30000000 +#define DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE 0x70000000 +#define TG3PCI_PCISTATE 0x00000070 +#define PCISTATE_FORCE_RESET 0x00000001 +#define PCISTATE_INT_NOT_ACTIVE 0x00000002 +#define PCISTATE_CONV_PCI_MODE 0x00000004 +#define PCISTATE_BUS_SPEED_HIGH 0x00000008 +#define PCISTATE_BUS_32BIT 0x00000010 +#define PCISTATE_ROM_ENABLE 0x00000020 +#define PCISTATE_ROM_RETRY_ENABLE 0x00000040 +#define PCISTATE_FLAT_VIEW 0x00000100 +#define PCISTATE_RETRY_SAME_DMA 0x00002000 +#define PCISTATE_ALLOW_APE_CTLSPC_WR 0x00010000 +#define PCISTATE_ALLOW_APE_SHMEM_WR 0x00020000 +#define PCISTATE_ALLOW_APE_PSPACE_WR 0x00040000 +#define TG3PCI_CLOCK_CTRL 0x00000074 +#define CLOCK_CTRL_CORECLK_DISABLE 0x00000200 +#define CLOCK_CTRL_RXCLK_DISABLE 0x00000400 +#define CLOCK_CTRL_TXCLK_DISABLE 0x00000800 +#define CLOCK_CTRL_ALTCLK 0x00001000 +#define CLOCK_CTRL_PWRDOWN_PLL133 0x00008000 +#define CLOCK_CTRL_44MHZ_CORE 0x00040000 +#define CLOCK_CTRL_625_CORE 0x00100000 +#define CLOCK_CTRL_FORCE_CLKRUN 0x00200000 +#define CLOCK_CTRL_CLKRUN_OENABLE 0x00400000 +#define CLOCK_CTRL_DELAY_PCI_GRANT 0x80000000 +#define TG3PCI_REG_BASE_ADDR 0x00000078 +#define TG3PCI_MEM_WIN_BASE_ADDR 0x0000007c +#define TG3PCI_REG_DATA 0x00000080 +#define TG3PCI_MEM_WIN_DATA 0x00000084 +#define TG3PCI_MISC_LOCAL_CTRL 0x00000090 +/* 0x94 --> 0x98 unused */ +#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */ +#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */ +/* 0xa8 --> 0xb8 unused */ +#define TG3PCI_DUAL_MAC_CTRL 0x000000b8 +#define DUAL_MAC_CTRL_CH_MASK 0x00000003 +#define DUAL_MAC_CTRL_ID 0x00000004 +#define TG3PCI_PRODID_ASICREV 0x000000bc +#define PROD_ID_ASIC_REV_MASK 0x0fffffff +/* 0xc0 --> 0xf4 unused */ + +#define TG3PCI_GEN2_PRODID_ASICREV 0x000000f4 +#define TG3PCI_GEN15_PRODID_ASICREV 0x000000fc +/* 0xf8 --> 0x200 unused */ + +#define TG3_CORR_ERR_STAT 0x00000110 +#define TG3_CORR_ERR_STAT_CLEAR 0xffffffff +/* 0x114 --> 0x200 unused */ + +/* Mailbox registers */ +#define MAILBOX_INTERRUPT_0 0x00000200 /* 64-bit */ +#define MAILBOX_INTERRUPT_1 0x00000208 /* 64-bit */ +#define MAILBOX_INTERRUPT_2 0x00000210 /* 64-bit */ +#define MAILBOX_INTERRUPT_3 0x00000218 /* 64-bit */ +#define MAILBOX_GENERAL_0 0x00000220 /* 64-bit */ +#define MAILBOX_GENERAL_1 0x00000228 /* 64-bit */ +#define MAILBOX_GENERAL_2 0x00000230 /* 64-bit */ +#define MAILBOX_GENERAL_3 0x00000238 /* 64-bit */ +#define MAILBOX_GENERAL_4 0x00000240 /* 64-bit */ +#define MAILBOX_GENERAL_5 0x00000248 /* 64-bit */ +#define MAILBOX_GENERAL_6 0x00000250 /* 64-bit */ +#define MAILBOX_GENERAL_7 0x00000258 /* 64-bit */ +#define MAILBOX_RELOAD_STAT 0x00000260 /* 64-bit */ +#define MAILBOX_RCV_STD_PROD_IDX 0x00000268 /* 64-bit */ +#define TG3_RX_STD_PROD_IDX_REG (MAILBOX_RCV_STD_PROD_IDX + \ + TG3_64BIT_REG_LOW) +#define MAILBOX_RCV_JUMBO_PROD_IDX 0x00000270 /* 64-bit */ +#define TG3_RX_JMB_PROD_IDX_REG (MAILBOX_RCV_JUMBO_PROD_IDX + \ + TG3_64BIT_REG_LOW) +#define MAILBOX_RCV_MINI_PROD_IDX 0x00000278 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_0 0x00000280 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_1 0x00000288 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_2 0x00000290 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_3 0x00000298 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_4 0x000002a0 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_5 0x000002a8 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_6 0x000002b0 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_7 0x000002b8 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_8 0x000002c0 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_9 0x000002c8 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_10 0x000002d0 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_11 0x000002d8 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_12 0x000002e0 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_13 0x000002e8 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_14 0x000002f0 /* 64-bit */ +#define MAILBOX_RCVRET_CON_IDX_15 0x000002f8 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_0 0x00000300 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_1 0x00000308 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_2 0x00000310 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_3 0x00000318 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_4 0x00000320 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_5 0x00000328 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_6 0x00000330 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_7 0x00000338 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_8 0x00000340 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_9 0x00000348 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_10 0x00000350 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_11 0x00000358 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_12 0x00000360 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_13 0x00000368 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_14 0x00000370 /* 64-bit */ +#define MAILBOX_SNDHOST_PROD_IDX_15 0x00000378 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_0 0x00000380 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_1 0x00000388 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_2 0x00000390 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_3 0x00000398 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_4 0x000003a0 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_5 0x000003a8 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_6 0x000003b0 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_7 0x000003b8 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_8 0x000003c0 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_9 0x000003c8 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_10 0x000003d0 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_11 0x000003d8 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_12 0x000003e0 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_13 0x000003e8 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_14 0x000003f0 /* 64-bit */ +#define MAILBOX_SNDNIC_PROD_IDX_15 0x000003f8 /* 64-bit */ + +/* MAC control registers */ +#define MAC_MODE 0x00000400 +#define MAC_MODE_RESET 0x00000001 +#define MAC_MODE_HALF_DUPLEX 0x00000002 +#define MAC_MODE_PORT_MODE_MASK 0x0000000c +#define MAC_MODE_PORT_MODE_TBI 0x0000000c +#define MAC_MODE_PORT_MODE_GMII 0x00000008 +#define MAC_MODE_PORT_MODE_MII 0x00000004 +#define MAC_MODE_PORT_MODE_NONE 0x00000000 +#define MAC_MODE_PORT_INT_LPBACK 0x00000010 +#define MAC_MODE_TAGGED_MAC_CTRL 0x00000080 +#define MAC_MODE_TX_BURSTING 0x00000100 +#define MAC_MODE_MAX_DEFER 0x00000200 +#define MAC_MODE_LINK_POLARITY 0x00000400 +#define MAC_MODE_RXSTAT_ENABLE 0x00000800 +#define MAC_MODE_RXSTAT_CLEAR 0x00001000 +#define MAC_MODE_RXSTAT_FLUSH 0x00002000 +#define MAC_MODE_TXSTAT_ENABLE 0x00004000 +#define MAC_MODE_TXSTAT_CLEAR 0x00008000 +#define MAC_MODE_TXSTAT_FLUSH 0x00010000 +#define MAC_MODE_SEND_CONFIGS 0x00020000 +#define MAC_MODE_MAGIC_PKT_ENABLE 0x00040000 +#define MAC_MODE_ACPI_ENABLE 0x00080000 +#define MAC_MODE_MIP_ENABLE 0x00100000 +#define MAC_MODE_TDE_ENABLE 0x00200000 +#define MAC_MODE_RDE_ENABLE 0x00400000 +#define MAC_MODE_FHDE_ENABLE 0x00800000 +#define MAC_MODE_KEEP_FRAME_IN_WOL 0x01000000 +#define MAC_MODE_APE_RX_EN 0x08000000 +#define MAC_MODE_APE_TX_EN 0x10000000 +#define MAC_STATUS 0x00000404 +#define MAC_STATUS_PCS_SYNCED 0x00000001 +#define MAC_STATUS_SIGNAL_DET 0x00000002 +#define MAC_STATUS_RCVD_CFG 0x00000004 +#define MAC_STATUS_CFG_CHANGED 0x00000008 +#define MAC_STATUS_SYNC_CHANGED 0x00000010 +#define MAC_STATUS_PORT_DEC_ERR 0x00000400 +#define MAC_STATUS_LNKSTATE_CHANGED 0x00001000 +#define MAC_STATUS_MI_COMPLETION 0x00400000 +#define MAC_STATUS_MI_INTERRUPT 0x00800000 +#define MAC_STATUS_AP_ERROR 0x01000000 +#define MAC_STATUS_ODI_ERROR 0x02000000 +#define MAC_STATUS_RXSTAT_OVERRUN 0x04000000 +#define MAC_STATUS_TXSTAT_OVERRUN 0x08000000 +#define MAC_EVENT 0x00000408 +#define MAC_EVENT_PORT_DECODE_ERR 0x00000400 +#define MAC_EVENT_LNKSTATE_CHANGED 0x00001000 +#define MAC_EVENT_MI_COMPLETION 0x00400000 +#define MAC_EVENT_MI_INTERRUPT 0x00800000 +#define MAC_EVENT_AP_ERROR 0x01000000 +#define MAC_EVENT_ODI_ERROR 0x02000000 +#define MAC_EVENT_RXSTAT_OVERRUN 0x04000000 +#define MAC_EVENT_TXSTAT_OVERRUN 0x08000000 +#define MAC_LED_CTRL 0x0000040c +#define LED_CTRL_LNKLED_OVERRIDE 0x00000001 +#define LED_CTRL_1000MBPS_ON 0x00000002 +#define LED_CTRL_100MBPS_ON 0x00000004 +#define LED_CTRL_10MBPS_ON 0x00000008 +#define LED_CTRL_TRAFFIC_OVERRIDE 0x00000010 +#define LED_CTRL_TRAFFIC_BLINK 0x00000020 +#define LED_CTRL_TRAFFIC_LED 0x00000040 +#define LED_CTRL_1000MBPS_STATUS 0x00000080 +#define LED_CTRL_100MBPS_STATUS 0x00000100 +#define LED_CTRL_10MBPS_STATUS 0x00000200 +#define LED_CTRL_TRAFFIC_STATUS 0x00000400 +#define LED_CTRL_MODE_MAC 0x00000000 +#define LED_CTRL_MODE_PHY_1 0x00000800 +#define LED_CTRL_MODE_PHY_2 0x00001000 +#define LED_CTRL_MODE_SHASTA_MAC 0x00002000 +#define LED_CTRL_MODE_SHARED 0x00004000 +#define LED_CTRL_MODE_COMBO 0x00008000 +#define LED_CTRL_BLINK_RATE_MASK 0x7ff80000 +#define LED_CTRL_BLINK_RATE_SHIFT 19 +#define LED_CTRL_BLINK_PER_OVERRIDE 0x00080000 +#define LED_CTRL_BLINK_RATE_OVERRIDE 0x80000000 +#define MAC_ADDR_0_HIGH 0x00000410 /* upper 2 bytes */ +#define MAC_ADDR_0_LOW 0x00000414 /* lower 4 bytes */ +#define MAC_ADDR_1_HIGH 0x00000418 /* upper 2 bytes */ +#define MAC_ADDR_1_LOW 0x0000041c /* lower 4 bytes */ +#define MAC_ADDR_2_HIGH 0x00000420 /* upper 2 bytes */ +#define MAC_ADDR_2_LOW 0x00000424 /* lower 4 bytes */ +#define MAC_ADDR_3_HIGH 0x00000428 /* upper 2 bytes */ +#define MAC_ADDR_3_LOW 0x0000042c /* lower 4 bytes */ +#define MAC_ACPI_MBUF_PTR 0x00000430 +#define MAC_ACPI_LEN_OFFSET 0x00000434 +#define ACPI_LENOFF_LEN_MASK 0x0000ffff +#define ACPI_LENOFF_LEN_SHIFT 0 +#define ACPI_LENOFF_OFF_MASK 0x0fff0000 +#define ACPI_LENOFF_OFF_SHIFT 16 +#define MAC_TX_BACKOFF_SEED 0x00000438 +#define TX_BACKOFF_SEED_MASK 0x000003ff +#define MAC_RX_MTU_SIZE 0x0000043c +#define RX_MTU_SIZE_MASK 0x0000ffff +#define MAC_PCS_TEST 0x00000440 +#define PCS_TEST_PATTERN_MASK 0x000fffff +#define PCS_TEST_PATTERN_SHIFT 0 +#define PCS_TEST_ENABLE 0x00100000 +#define MAC_TX_AUTO_NEG 0x00000444 +#define TX_AUTO_NEG_MASK 0x0000ffff +#define TX_AUTO_NEG_SHIFT 0 +#define MAC_RX_AUTO_NEG 0x00000448 +#define RX_AUTO_NEG_MASK 0x0000ffff +#define RX_AUTO_NEG_SHIFT 0 +#define MAC_MI_COM 0x0000044c +#define MI_COM_CMD_MASK 0x0c000000 +#define MI_COM_CMD_WRITE 0x04000000 +#define MI_COM_CMD_READ 0x08000000 +#define MI_COM_READ_FAILED 0x10000000 +#define MI_COM_START 0x20000000 +#define MI_COM_BUSY 0x20000000 +#define MI_COM_PHY_ADDR_MASK 0x03e00000 +#define MI_COM_PHY_ADDR_SHIFT 21 +#define MI_COM_REG_ADDR_MASK 0x001f0000 +#define MI_COM_REG_ADDR_SHIFT 16 +#define MI_COM_DATA_MASK 0x0000ffff +#define MAC_MI_STAT 0x00000450 +#define MAC_MI_STAT_LNKSTAT_ATTN_ENAB 0x00000001 +#define MAC_MI_STAT_10MBPS_MODE 0x00000002 +#define MAC_MI_MODE 0x00000454 +#define MAC_MI_MODE_CLK_10MHZ 0x00000001 +#define MAC_MI_MODE_SHORT_PREAMBLE 0x00000002 +#define MAC_MI_MODE_AUTO_POLL 0x00000010 +#define MAC_MI_MODE_500KHZ_CONST 0x00008000 +#define MAC_MI_MODE_BASE 0x000c0000 /* XXX magic values XXX */ +#define MAC_AUTO_POLL_STATUS 0x00000458 +#define MAC_AUTO_POLL_ERROR 0x00000001 +#define MAC_TX_MODE 0x0000045c +#define TX_MODE_RESET 0x00000001 +#define TX_MODE_ENABLE 0x00000002 +#define TX_MODE_FLOW_CTRL_ENABLE 0x00000010 +#define TX_MODE_BIG_BCKOFF_ENABLE 0x00000020 +#define TX_MODE_LONG_PAUSE_ENABLE 0x00000040 +#define TX_MODE_MBUF_LOCKUP_FIX 0x00000100 +#define TX_MODE_JMB_FRM_LEN 0x00400000 +#define TX_MODE_CNT_DN_MODE 0x00800000 +#define MAC_TX_STATUS 0x00000460 +#define TX_STATUS_XOFFED 0x00000001 +#define TX_STATUS_SENT_XOFF 0x00000002 +#define TX_STATUS_SENT_XON 0x00000004 +#define TX_STATUS_LINK_UP 0x00000008 +#define TX_STATUS_ODI_UNDERRUN 0x00000010 +#define TX_STATUS_ODI_OVERRUN 0x00000020 +#define MAC_TX_LENGTHS 0x00000464 +#define TX_LENGTHS_SLOT_TIME_MASK 0x000000ff +#define TX_LENGTHS_SLOT_TIME_SHIFT 0 +#define TX_LENGTHS_IPG_MASK 0x00000f00 +#define TX_LENGTHS_IPG_SHIFT 8 +#define TX_LENGTHS_IPG_CRS_MASK 0x00003000 +#define TX_LENGTHS_IPG_CRS_SHIFT 12 +#define TX_LENGTHS_JMB_FRM_LEN_MSK 0x00ff0000 +#define TX_LENGTHS_CNT_DWN_VAL_MSK 0xff000000 +#define MAC_RX_MODE 0x00000468 +#define RX_MODE_RESET 0x00000001 +#define RX_MODE_ENABLE 0x00000002 +#define RX_MODE_FLOW_CTRL_ENABLE 0x00000004 +#define RX_MODE_KEEP_MAC_CTRL 0x00000008 +#define RX_MODE_KEEP_PAUSE 0x00000010 +#define RX_MODE_ACCEPT_OVERSIZED 0x00000020 +#define RX_MODE_ACCEPT_RUNTS 0x00000040 +#define RX_MODE_LEN_CHECK 0x00000080 +#define RX_MODE_PROMISC 0x00000100 +#define RX_MODE_NO_CRC_CHECK 0x00000200 +#define RX_MODE_KEEP_VLAN_TAG 0x00000400 +#define RX_MODE_RSS_IPV4_HASH_EN 0x00010000 +#define RX_MODE_RSS_TCP_IPV4_HASH_EN 0x00020000 +#define RX_MODE_RSS_IPV6_HASH_EN 0x00040000 +#define RX_MODE_RSS_TCP_IPV6_HASH_EN 0x00080000 +#define RX_MODE_RSS_ITBL_HASH_BITS_7 0x00700000 +#define RX_MODE_RSS_ENABLE 0x00800000 +#define RX_MODE_IPV6_CSUM_ENABLE 0x01000000 +#define RX_MODE_IPV4_FRAG_FIX 0x02000000 +#define MAC_RX_STATUS 0x0000046c +#define RX_STATUS_REMOTE_TX_XOFFED 0x00000001 +#define RX_STATUS_XOFF_RCVD 0x00000002 +#define RX_STATUS_XON_RCVD 0x00000004 +#define MAC_HASH_REG_0 0x00000470 +#define MAC_HASH_REG_1 0x00000474 +#define MAC_HASH_REG_2 0x00000478 +#define MAC_HASH_REG_3 0x0000047c +#define MAC_RCV_RULE_0 0x00000480 +#define MAC_RCV_VALUE_0 0x00000484 +#define MAC_RCV_RULE_1 0x00000488 +#define MAC_RCV_VALUE_1 0x0000048c +#define MAC_RCV_RULE_2 0x00000490 +#define MAC_RCV_VALUE_2 0x00000494 +#define MAC_RCV_RULE_3 0x00000498 +#define MAC_RCV_VALUE_3 0x0000049c +#define MAC_RCV_RULE_4 0x000004a0 +#define MAC_RCV_VALUE_4 0x000004a4 +#define MAC_RCV_RULE_5 0x000004a8 +#define MAC_RCV_VALUE_5 0x000004ac +#define MAC_RCV_RULE_6 0x000004b0 +#define MAC_RCV_VALUE_6 0x000004b4 +#define MAC_RCV_RULE_7 0x000004b8 +#define MAC_RCV_VALUE_7 0x000004bc +#define MAC_RCV_RULE_8 0x000004c0 +#define MAC_RCV_VALUE_8 0x000004c4 +#define MAC_RCV_RULE_9 0x000004c8 +#define MAC_RCV_VALUE_9 0x000004cc +#define MAC_RCV_RULE_10 0x000004d0 +#define MAC_RCV_VALUE_10 0x000004d4 +#define MAC_RCV_RULE_11 0x000004d8 +#define MAC_RCV_VALUE_11 0x000004dc +#define MAC_RCV_RULE_12 0x000004e0 +#define MAC_RCV_VALUE_12 0x000004e4 +#define MAC_RCV_RULE_13 0x000004e8 +#define MAC_RCV_VALUE_13 0x000004ec +#define MAC_RCV_RULE_14 0x000004f0 +#define MAC_RCV_VALUE_14 0x000004f4 +#define MAC_RCV_RULE_15 0x000004f8 +#define MAC_RCV_VALUE_15 0x000004fc +#define RCV_RULE_DISABLE_MASK 0x7fffffff +#define MAC_RCV_RULE_CFG 0x00000500 +#define RCV_RULE_CFG_DEFAULT_CLASS 0x00000008 +#define MAC_LOW_WMARK_MAX_RX_FRAME 0x00000504 +/* 0x508 --> 0x520 unused */ +#define MAC_HASHREGU_0 0x00000520 +#define MAC_HASHREGU_1 0x00000524 +#define MAC_HASHREGU_2 0x00000528 +#define MAC_HASHREGU_3 0x0000052c +#define MAC_EXTADDR_0_HIGH 0x00000530 +#define MAC_EXTADDR_0_LOW 0x00000534 +#define MAC_EXTADDR_1_HIGH 0x00000538 +#define MAC_EXTADDR_1_LOW 0x0000053c +#define MAC_EXTADDR_2_HIGH 0x00000540 +#define MAC_EXTADDR_2_LOW 0x00000544 +#define MAC_EXTADDR_3_HIGH 0x00000548 +#define MAC_EXTADDR_3_LOW 0x0000054c +#define MAC_EXTADDR_4_HIGH 0x00000550 +#define MAC_EXTADDR_4_LOW 0x00000554 +#define MAC_EXTADDR_5_HIGH 0x00000558 +#define MAC_EXTADDR_5_LOW 0x0000055c +#define MAC_EXTADDR_6_HIGH 0x00000560 +#define MAC_EXTADDR_6_LOW 0x00000564 +#define MAC_EXTADDR_7_HIGH 0x00000568 +#define MAC_EXTADDR_7_LOW 0x0000056c +#define MAC_EXTADDR_8_HIGH 0x00000570 +#define MAC_EXTADDR_8_LOW 0x00000574 +#define MAC_EXTADDR_9_HIGH 0x00000578 +#define MAC_EXTADDR_9_LOW 0x0000057c +#define MAC_EXTADDR_10_HIGH 0x00000580 +#define MAC_EXTADDR_10_LOW 0x00000584 +#define MAC_EXTADDR_11_HIGH 0x00000588 +#define MAC_EXTADDR_11_LOW 0x0000058c +#define MAC_SERDES_CFG 0x00000590 +#define MAC_SERDES_CFG_EDGE_SELECT 0x00001000 +#define MAC_SERDES_STAT 0x00000594 +/* 0x598 --> 0x5a0 unused */ +#define MAC_PHYCFG1 0x000005a0 +#define MAC_PHYCFG1_RGMII_INT 0x00000001 +#define MAC_PHYCFG1_RXCLK_TO_MASK 0x00001ff0 +#define MAC_PHYCFG1_RXCLK_TIMEOUT 0x00001000 +#define MAC_PHYCFG1_TXCLK_TO_MASK 0x01ff0000 +#define MAC_PHYCFG1_TXCLK_TIMEOUT 0x01000000 +#define MAC_PHYCFG1_RGMII_EXT_RX_DEC 0x02000000 +#define MAC_PHYCFG1_RGMII_SND_STAT_EN 0x04000000 +#define MAC_PHYCFG1_TXC_DRV 0x20000000 +#define MAC_PHYCFG2 0x000005a4 +#define MAC_PHYCFG2_INBAND_ENABLE 0x00000001 +#define MAC_PHYCFG2_EMODE_MASK_MASK 0x000001c0 +#define MAC_PHYCFG2_EMODE_MASK_AC131 0x000000c0 +#define MAC_PHYCFG2_EMODE_MASK_50610 0x00000100 +#define MAC_PHYCFG2_EMODE_MASK_RT8211 0x00000000 +#define MAC_PHYCFG2_EMODE_MASK_RT8201 0x000001c0 +#define MAC_PHYCFG2_EMODE_COMP_MASK 0x00000e00 +#define MAC_PHYCFG2_EMODE_COMP_AC131 0x00000600 +#define MAC_PHYCFG2_EMODE_COMP_50610 0x00000400 +#define MAC_PHYCFG2_EMODE_COMP_RT8211 0x00000800 +#define MAC_PHYCFG2_EMODE_COMP_RT8201 0x00000000 +#define MAC_PHYCFG2_FMODE_MASK_MASK 0x00007000 +#define MAC_PHYCFG2_FMODE_MASK_AC131 0x00006000 +#define MAC_PHYCFG2_FMODE_MASK_50610 0x00004000 +#define MAC_PHYCFG2_FMODE_MASK_RT8211 0x00000000 +#define MAC_PHYCFG2_FMODE_MASK_RT8201 0x00007000 +#define MAC_PHYCFG2_FMODE_COMP_MASK 0x00038000 +#define MAC_PHYCFG2_FMODE_COMP_AC131 0x00030000 +#define MAC_PHYCFG2_FMODE_COMP_50610 0x00008000 +#define MAC_PHYCFG2_FMODE_COMP_RT8211 0x00038000 +#define MAC_PHYCFG2_FMODE_COMP_RT8201 0x00000000 +#define MAC_PHYCFG2_GMODE_MASK_MASK 0x001c0000 +#define MAC_PHYCFG2_GMODE_MASK_AC131 0x001c0000 +#define MAC_PHYCFG2_GMODE_MASK_50610 0x00100000 +#define MAC_PHYCFG2_GMODE_MASK_RT8211 0x00000000 +#define MAC_PHYCFG2_GMODE_MASK_RT8201 0x001c0000 +#define MAC_PHYCFG2_GMODE_COMP_MASK 0x00e00000 +#define MAC_PHYCFG2_GMODE_COMP_AC131 0x00e00000 +#define MAC_PHYCFG2_GMODE_COMP_50610 0x00000000 +#define MAC_PHYCFG2_GMODE_COMP_RT8211 0x00200000 +#define MAC_PHYCFG2_GMODE_COMP_RT8201 0x00000000 +#define MAC_PHYCFG2_ACT_MASK_MASK 0x03000000 +#define MAC_PHYCFG2_ACT_MASK_AC131 0x03000000 +#define MAC_PHYCFG2_ACT_MASK_50610 0x01000000 +#define MAC_PHYCFG2_ACT_MASK_RT8211 0x03000000 +#define MAC_PHYCFG2_ACT_MASK_RT8201 0x01000000 +#define MAC_PHYCFG2_ACT_COMP_MASK 0x0c000000 +#define MAC_PHYCFG2_ACT_COMP_AC131 0x00000000 +#define MAC_PHYCFG2_ACT_COMP_50610 0x00000000 +#define MAC_PHYCFG2_ACT_COMP_RT8211 0x00000000 +#define MAC_PHYCFG2_ACT_COMP_RT8201 0x08000000 +#define MAC_PHYCFG2_QUAL_MASK_MASK 0x30000000 +#define MAC_PHYCFG2_QUAL_MASK_AC131 0x30000000 +#define MAC_PHYCFG2_QUAL_MASK_50610 0x30000000 +#define MAC_PHYCFG2_QUAL_MASK_RT8211 0x30000000 +#define MAC_PHYCFG2_QUAL_MASK_RT8201 0x30000000 +#define MAC_PHYCFG2_QUAL_COMP_MASK 0xc0000000 +#define MAC_PHYCFG2_QUAL_COMP_AC131 0x00000000 +#define MAC_PHYCFG2_QUAL_COMP_50610 0x00000000 +#define MAC_PHYCFG2_QUAL_COMP_RT8211 0x00000000 +#define MAC_PHYCFG2_QUAL_COMP_RT8201 0x00000000 +#define MAC_PHYCFG2_50610_LED_MODES \ + (MAC_PHYCFG2_EMODE_MASK_50610 | \ + MAC_PHYCFG2_EMODE_COMP_50610 | \ + MAC_PHYCFG2_FMODE_MASK_50610 | \ + MAC_PHYCFG2_FMODE_COMP_50610 | \ + MAC_PHYCFG2_GMODE_MASK_50610 | \ + MAC_PHYCFG2_GMODE_COMP_50610 | \ + MAC_PHYCFG2_ACT_MASK_50610 | \ + MAC_PHYCFG2_ACT_COMP_50610 | \ + MAC_PHYCFG2_QUAL_MASK_50610 | \ + MAC_PHYCFG2_QUAL_COMP_50610) +#define MAC_PHYCFG2_AC131_LED_MODES \ + (MAC_PHYCFG2_EMODE_MASK_AC131 | \ + MAC_PHYCFG2_EMODE_COMP_AC131 | \ + MAC_PHYCFG2_FMODE_MASK_AC131 | \ + MAC_PHYCFG2_FMODE_COMP_AC131 | \ + MAC_PHYCFG2_GMODE_MASK_AC131 | \ + MAC_PHYCFG2_GMODE_COMP_AC131 | \ + MAC_PHYCFG2_ACT_MASK_AC131 | \ + MAC_PHYCFG2_ACT_COMP_AC131 | \ + MAC_PHYCFG2_QUAL_MASK_AC131 | \ + MAC_PHYCFG2_QUAL_COMP_AC131) +#define MAC_PHYCFG2_RTL8211C_LED_MODES \ + (MAC_PHYCFG2_EMODE_MASK_RT8211 | \ + MAC_PHYCFG2_EMODE_COMP_RT8211 | \ + MAC_PHYCFG2_FMODE_MASK_RT8211 | \ + MAC_PHYCFG2_FMODE_COMP_RT8211 | \ + MAC_PHYCFG2_GMODE_MASK_RT8211 | \ + MAC_PHYCFG2_GMODE_COMP_RT8211 | \ + MAC_PHYCFG2_ACT_MASK_RT8211 | \ + MAC_PHYCFG2_ACT_COMP_RT8211 | \ + MAC_PHYCFG2_QUAL_MASK_RT8211 | \ + MAC_PHYCFG2_QUAL_COMP_RT8211) +#define MAC_PHYCFG2_RTL8201E_LED_MODES \ + (MAC_PHYCFG2_EMODE_MASK_RT8201 | \ + MAC_PHYCFG2_EMODE_COMP_RT8201 | \ + MAC_PHYCFG2_FMODE_MASK_RT8201 | \ + MAC_PHYCFG2_FMODE_COMP_RT8201 | \ + MAC_PHYCFG2_GMODE_MASK_RT8201 | \ + MAC_PHYCFG2_GMODE_COMP_RT8201 | \ + MAC_PHYCFG2_ACT_MASK_RT8201 | \ + MAC_PHYCFG2_ACT_COMP_RT8201 | \ + MAC_PHYCFG2_QUAL_MASK_RT8201 | \ + MAC_PHYCFG2_QUAL_COMP_RT8201) +#define MAC_EXT_RGMII_MODE 0x000005a8 +#define MAC_RGMII_MODE_TX_ENABLE 0x00000001 +#define MAC_RGMII_MODE_TX_LOWPWR 0x00000002 +#define MAC_RGMII_MODE_TX_RESET 0x00000004 +#define MAC_RGMII_MODE_RX_INT_B 0x00000100 +#define MAC_RGMII_MODE_RX_QUALITY 0x00000200 +#define MAC_RGMII_MODE_RX_ACTIVITY 0x00000400 +#define MAC_RGMII_MODE_RX_ENG_DET 0x00000800 +/* 0x5ac --> 0x5b0 unused */ +#define SERDES_RX_CTRL 0x000005b0 /* 5780/5714 only */ +#define SERDES_RX_SIG_DETECT 0x00000400 +#define SG_DIG_CTRL 0x000005b0 +#define SG_DIG_USING_HW_AUTONEG 0x80000000 +#define SG_DIG_SOFT_RESET 0x40000000 +#define SG_DIG_DISABLE_LINKRDY 0x20000000 +#define SG_DIG_CRC16_CLEAR_N 0x01000000 +#define SG_DIG_EN10B 0x00800000 +#define SG_DIG_CLEAR_STATUS 0x00400000 +#define SG_DIG_LOCAL_DUPLEX_STATUS 0x00200000 +#define SG_DIG_LOCAL_LINK_STATUS 0x00100000 +#define SG_DIG_SPEED_STATUS_MASK 0x000c0000 +#define SG_DIG_SPEED_STATUS_SHIFT 18 +#define SG_DIG_JUMBO_PACKET_DISABLE 0x00020000 +#define SG_DIG_RESTART_AUTONEG 0x00010000 +#define SG_DIG_FIBER_MODE 0x00008000 +#define SG_DIG_REMOTE_FAULT_MASK 0x00006000 +#define SG_DIG_PAUSE_MASK 0x00001800 +#define SG_DIG_PAUSE_CAP 0x00000800 +#define SG_DIG_ASYM_PAUSE 0x00001000 +#define SG_DIG_GBIC_ENABLE 0x00000400 +#define SG_DIG_CHECK_END_ENABLE 0x00000200 +#define SG_DIG_SGMII_AUTONEG_TIMER 0x00000100 +#define SG_DIG_CLOCK_PHASE_SELECT 0x00000080 +#define SG_DIG_GMII_INPUT_SELECT 0x00000040 +#define SG_DIG_MRADV_CRC16_SELECT 0x00000020 +#define SG_DIG_COMMA_DETECT_ENABLE 0x00000010 +#define SG_DIG_AUTONEG_TIMER_REDUCE 0x00000008 +#define SG_DIG_AUTONEG_LOW_ENABLE 0x00000004 +#define SG_DIG_REMOTE_LOOPBACK 0x00000002 +#define SG_DIG_LOOPBACK 0x00000001 +#define SG_DIG_COMMON_SETUP (SG_DIG_CRC16_CLEAR_N | \ + SG_DIG_LOCAL_DUPLEX_STATUS | \ + SG_DIG_LOCAL_LINK_STATUS | \ + (0x2 << SG_DIG_SPEED_STATUS_SHIFT) | \ + SG_DIG_FIBER_MODE | SG_DIG_GBIC_ENABLE) +#define SG_DIG_STATUS 0x000005b4 +#define SG_DIG_CRC16_BUS_MASK 0xffff0000 +#define SG_DIG_PARTNER_FAULT_MASK 0x00600000 /* If !MRADV_CRC16_SELECT */ +#define SG_DIG_PARTNER_ASYM_PAUSE 0x00100000 /* If !MRADV_CRC16_SELECT */ +#define SG_DIG_PARTNER_PAUSE_CAPABLE 0x00080000 /* If !MRADV_CRC16_SELECT */ +#define SG_DIG_PARTNER_HALF_DUPLEX 0x00040000 /* If !MRADV_CRC16_SELECT */ +#define SG_DIG_PARTNER_FULL_DUPLEX 0x00020000 /* If !MRADV_CRC16_SELECT */ +#define SG_DIG_PARTNER_NEXT_PAGE 0x00010000 /* If !MRADV_CRC16_SELECT */ +#define SG_DIG_AUTONEG_STATE_MASK 0x00000ff0 +#define SG_DIG_IS_SERDES 0x00000100 +#define SG_DIG_COMMA_DETECTOR 0x00000008 +#define SG_DIG_MAC_ACK_STATUS 0x00000004 +#define SG_DIG_AUTONEG_COMPLETE 0x00000002 +#define SG_DIG_AUTONEG_ERROR 0x00000001 +#define TG3_TX_TSTAMP_LSB 0x000005c0 +#define TG3_TX_TSTAMP_MSB 0x000005c4 +#define TG3_TSTAMP_MASK 0x7fffffffffffffffLL +/* 0x5c8 --> 0x600 unused */ +#define MAC_TX_MAC_STATE_BASE 0x00000600 /* 16 bytes */ +#define MAC_RX_MAC_STATE_BASE 0x00000610 /* 20 bytes */ +/* 0x624 --> 0x670 unused */ + +#define MAC_RSS_INDIR_TBL_0 0x00000630 + +#define MAC_RSS_HASH_KEY_0 0x00000670 +#define MAC_RSS_HASH_KEY_1 0x00000674 +#define MAC_RSS_HASH_KEY_2 0x00000678 +#define MAC_RSS_HASH_KEY_3 0x0000067c +#define MAC_RSS_HASH_KEY_4 0x00000680 +#define MAC_RSS_HASH_KEY_5 0x00000684 +#define MAC_RSS_HASH_KEY_6 0x00000688 +#define MAC_RSS_HASH_KEY_7 0x0000068c +#define MAC_RSS_HASH_KEY_8 0x00000690 +#define MAC_RSS_HASH_KEY_9 0x00000694 +/* 0x698 --> 0x6b0 unused */ + +#define TG3_RX_TSTAMP_LSB 0x000006b0 +#define TG3_RX_TSTAMP_MSB 0x000006b4 +/* 0x6b8 --> 0x6c8 unused */ + +#define TG3_RX_PTP_CTL 0x000006c8 +#define TG3_RX_PTP_CTL_SYNC_EVNT 0x00000001 +#define TG3_RX_PTP_CTL_DELAY_REQ 0x00000002 +#define TG3_RX_PTP_CTL_PDLAY_REQ 0x00000004 +#define TG3_RX_PTP_CTL_PDLAY_RES 0x00000008 +#define TG3_RX_PTP_CTL_ALL_V1_EVENTS (TG3_RX_PTP_CTL_SYNC_EVNT | \ + TG3_RX_PTP_CTL_DELAY_REQ) +#define TG3_RX_PTP_CTL_ALL_V2_EVENTS (TG3_RX_PTP_CTL_SYNC_EVNT | \ + TG3_RX_PTP_CTL_DELAY_REQ | \ + TG3_RX_PTP_CTL_PDLAY_REQ | \ + TG3_RX_PTP_CTL_PDLAY_RES) +#define TG3_RX_PTP_CTL_FOLLOW_UP 0x00000100 +#define TG3_RX_PTP_CTL_DELAY_RES 0x00000200 +#define TG3_RX_PTP_CTL_PDRES_FLW_UP 0x00000400 +#define TG3_RX_PTP_CTL_ANNOUNCE 0x00000800 +#define TG3_RX_PTP_CTL_SIGNALING 0x00001000 +#define TG3_RX_PTP_CTL_MANAGEMENT 0x00002000 +#define TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN 0x00800000 +#define TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN 0x01000000 +#define TG3_RX_PTP_CTL_RX_PTP_V2_EN (TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | \ + TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN) +#define TG3_RX_PTP_CTL_RX_PTP_V1_EN 0x02000000 +#define TG3_RX_PTP_CTL_HWTS_INTERLOCK 0x04000000 +/* 0x6cc --> 0x800 unused */ + +#define MAC_TX_STATS_OCTETS 0x00000800 +#define MAC_TX_STATS_RESV1 0x00000804 +#define MAC_TX_STATS_COLLISIONS 0x00000808 +#define MAC_TX_STATS_XON_SENT 0x0000080c +#define MAC_TX_STATS_XOFF_SENT 0x00000810 +#define MAC_TX_STATS_RESV2 0x00000814 +#define MAC_TX_STATS_MAC_ERRORS 0x00000818 +#define MAC_TX_STATS_SINGLE_COLLISIONS 0x0000081c +#define MAC_TX_STATS_MULT_COLLISIONS 0x00000820 +#define MAC_TX_STATS_DEFERRED 0x00000824 +#define MAC_TX_STATS_RESV3 0x00000828 +#define MAC_TX_STATS_EXCESSIVE_COL 0x0000082c +#define MAC_TX_STATS_LATE_COL 0x00000830 +#define MAC_TX_STATS_RESV4_1 0x00000834 +#define MAC_TX_STATS_RESV4_2 0x00000838 +#define MAC_TX_STATS_RESV4_3 0x0000083c +#define MAC_TX_STATS_RESV4_4 0x00000840 +#define MAC_TX_STATS_RESV4_5 0x00000844 +#define MAC_TX_STATS_RESV4_6 0x00000848 +#define MAC_TX_STATS_RESV4_7 0x0000084c +#define MAC_TX_STATS_RESV4_8 0x00000850 +#define MAC_TX_STATS_RESV4_9 0x00000854 +#define MAC_TX_STATS_RESV4_10 0x00000858 +#define MAC_TX_STATS_RESV4_11 0x0000085c +#define MAC_TX_STATS_RESV4_12 0x00000860 +#define MAC_TX_STATS_RESV4_13 0x00000864 +#define MAC_TX_STATS_RESV4_14 0x00000868 +#define MAC_TX_STATS_UCAST 0x0000086c +#define MAC_TX_STATS_MCAST 0x00000870 +#define MAC_TX_STATS_BCAST 0x00000874 +#define MAC_TX_STATS_RESV5_1 0x00000878 +#define MAC_TX_STATS_RESV5_2 0x0000087c +#define MAC_RX_STATS_OCTETS 0x00000880 +#define MAC_RX_STATS_RESV1 0x00000884 +#define MAC_RX_STATS_FRAGMENTS 0x00000888 +#define MAC_RX_STATS_UCAST 0x0000088c +#define MAC_RX_STATS_MCAST 0x00000890 +#define MAC_RX_STATS_BCAST 0x00000894 +#define MAC_RX_STATS_FCS_ERRORS 0x00000898 +#define MAC_RX_STATS_ALIGN_ERRORS 0x0000089c +#define MAC_RX_STATS_XON_PAUSE_RECVD 0x000008a0 +#define MAC_RX_STATS_XOFF_PAUSE_RECVD 0x000008a4 +#define MAC_RX_STATS_MAC_CTRL_RECVD 0x000008a8 +#define MAC_RX_STATS_XOFF_ENTERED 0x000008ac +#define MAC_RX_STATS_FRAME_TOO_LONG 0x000008b0 +#define MAC_RX_STATS_JABBERS 0x000008b4 +#define MAC_RX_STATS_UNDERSIZE 0x000008b8 +/* 0x8bc --> 0xc00 unused */ + +/* Send data initiator control registers */ +#define SNDDATAI_MODE 0x00000c00 +#define SNDDATAI_MODE_RESET 0x00000001 +#define SNDDATAI_MODE_ENABLE 0x00000002 +#define SNDDATAI_MODE_STAT_OFLOW_ENAB 0x00000004 +#define SNDDATAI_STATUS 0x00000c04 +#define SNDDATAI_STATUS_STAT_OFLOW 0x00000004 +#define SNDDATAI_STATSCTRL 0x00000c08 +#define SNDDATAI_SCTRL_ENABLE 0x00000001 +#define SNDDATAI_SCTRL_FASTUPD 0x00000002 +#define SNDDATAI_SCTRL_CLEAR 0x00000004 +#define SNDDATAI_SCTRL_FLUSH 0x00000008 +#define SNDDATAI_SCTRL_FORCE_ZERO 0x00000010 +#define SNDDATAI_STATSENAB 0x00000c0c +#define SNDDATAI_STATSINCMASK 0x00000c10 +#define ISO_PKT_TX 0x00000c20 +/* 0xc24 --> 0xc80 unused */ +#define SNDDATAI_COS_CNT_0 0x00000c80 +#define SNDDATAI_COS_CNT_1 0x00000c84 +#define SNDDATAI_COS_CNT_2 0x00000c88 +#define SNDDATAI_COS_CNT_3 0x00000c8c +#define SNDDATAI_COS_CNT_4 0x00000c90 +#define SNDDATAI_COS_CNT_5 0x00000c94 +#define SNDDATAI_COS_CNT_6 0x00000c98 +#define SNDDATAI_COS_CNT_7 0x00000c9c +#define SNDDATAI_COS_CNT_8 0x00000ca0 +#define SNDDATAI_COS_CNT_9 0x00000ca4 +#define SNDDATAI_COS_CNT_10 0x00000ca8 +#define SNDDATAI_COS_CNT_11 0x00000cac +#define SNDDATAI_COS_CNT_12 0x00000cb0 +#define SNDDATAI_COS_CNT_13 0x00000cb4 +#define SNDDATAI_COS_CNT_14 0x00000cb8 +#define SNDDATAI_COS_CNT_15 0x00000cbc +#define SNDDATAI_DMA_RDQ_FULL_CNT 0x00000cc0 +#define SNDDATAI_DMA_PRIO_RDQ_FULL_CNT 0x00000cc4 +#define SNDDATAI_SDCQ_FULL_CNT 0x00000cc8 +#define SNDDATAI_NICRNG_SSND_PIDX_CNT 0x00000ccc +#define SNDDATAI_STATS_UPDATED_CNT 0x00000cd0 +#define SNDDATAI_INTERRUPTS_CNT 0x00000cd4 +#define SNDDATAI_AVOID_INTERRUPTS_CNT 0x00000cd8 +#define SNDDATAI_SND_THRESH_HIT_CNT 0x00000cdc +/* 0xce0 --> 0x1000 unused */ + +/* Send data completion control registers */ +#define SNDDATAC_MODE 0x00001000 +#define SNDDATAC_MODE_RESET 0x00000001 +#define SNDDATAC_MODE_ENABLE 0x00000002 +#define SNDDATAC_MODE_CDELAY 0x00000010 +/* 0x1004 --> 0x1400 unused */ + +/* Send BD ring selector */ +#define SNDBDS_MODE 0x00001400 +#define SNDBDS_MODE_RESET 0x00000001 +#define SNDBDS_MODE_ENABLE 0x00000002 +#define SNDBDS_MODE_ATTN_ENABLE 0x00000004 +#define SNDBDS_STATUS 0x00001404 +#define SNDBDS_STATUS_ERROR_ATTN 0x00000004 +#define SNDBDS_HWDIAG 0x00001408 +/* 0x140c --> 0x1440 */ +#define SNDBDS_SEL_CON_IDX_0 0x00001440 +#define SNDBDS_SEL_CON_IDX_1 0x00001444 +#define SNDBDS_SEL_CON_IDX_2 0x00001448 +#define SNDBDS_SEL_CON_IDX_3 0x0000144c +#define SNDBDS_SEL_CON_IDX_4 0x00001450 +#define SNDBDS_SEL_CON_IDX_5 0x00001454 +#define SNDBDS_SEL_CON_IDX_6 0x00001458 +#define SNDBDS_SEL_CON_IDX_7 0x0000145c +#define SNDBDS_SEL_CON_IDX_8 0x00001460 +#define SNDBDS_SEL_CON_IDX_9 0x00001464 +#define SNDBDS_SEL_CON_IDX_10 0x00001468 +#define SNDBDS_SEL_CON_IDX_11 0x0000146c +#define SNDBDS_SEL_CON_IDX_12 0x00001470 +#define SNDBDS_SEL_CON_IDX_13 0x00001474 +#define SNDBDS_SEL_CON_IDX_14 0x00001478 +#define SNDBDS_SEL_CON_IDX_15 0x0000147c +/* 0x1480 --> 0x1800 unused */ + +/* Send BD initiator control registers */ +#define SNDBDI_MODE 0x00001800 +#define SNDBDI_MODE_RESET 0x00000001 +#define SNDBDI_MODE_ENABLE 0x00000002 +#define SNDBDI_MODE_ATTN_ENABLE 0x00000004 +#define SNDBDI_MODE_MULTI_TXQ_EN 0x00000020 +#define SNDBDI_STATUS 0x00001804 +#define SNDBDI_STATUS_ERROR_ATTN 0x00000004 +#define SNDBDI_IN_PROD_IDX_0 0x00001808 +#define SNDBDI_IN_PROD_IDX_1 0x0000180c +#define SNDBDI_IN_PROD_IDX_2 0x00001810 +#define SNDBDI_IN_PROD_IDX_3 0x00001814 +#define SNDBDI_IN_PROD_IDX_4 0x00001818 +#define SNDBDI_IN_PROD_IDX_5 0x0000181c +#define SNDBDI_IN_PROD_IDX_6 0x00001820 +#define SNDBDI_IN_PROD_IDX_7 0x00001824 +#define SNDBDI_IN_PROD_IDX_8 0x00001828 +#define SNDBDI_IN_PROD_IDX_9 0x0000182c +#define SNDBDI_IN_PROD_IDX_10 0x00001830 +#define SNDBDI_IN_PROD_IDX_11 0x00001834 +#define SNDBDI_IN_PROD_IDX_12 0x00001838 +#define SNDBDI_IN_PROD_IDX_13 0x0000183c +#define SNDBDI_IN_PROD_IDX_14 0x00001840 +#define SNDBDI_IN_PROD_IDX_15 0x00001844 +/* 0x1848 --> 0x1c00 unused */ + +/* Send BD completion control registers */ +#define SNDBDC_MODE 0x00001c00 +#define SNDBDC_MODE_RESET 0x00000001 +#define SNDBDC_MODE_ENABLE 0x00000002 +#define SNDBDC_MODE_ATTN_ENABLE 0x00000004 +/* 0x1c04 --> 0x2000 unused */ + +/* Receive list placement control registers */ +#define RCVLPC_MODE 0x00002000 +#define RCVLPC_MODE_RESET 0x00000001 +#define RCVLPC_MODE_ENABLE 0x00000002 +#define RCVLPC_MODE_CLASS0_ATTN_ENAB 0x00000004 +#define RCVLPC_MODE_MAPOOR_AATTN_ENAB 0x00000008 +#define RCVLPC_MODE_STAT_OFLOW_ENAB 0x00000010 +#define RCVLPC_STATUS 0x00002004 +#define RCVLPC_STATUS_CLASS0 0x00000004 +#define RCVLPC_STATUS_MAPOOR 0x00000008 +#define RCVLPC_STATUS_STAT_OFLOW 0x00000010 +#define RCVLPC_LOCK 0x00002008 +#define RCVLPC_LOCK_REQ_MASK 0x0000ffff +#define RCVLPC_LOCK_REQ_SHIFT 0 +#define RCVLPC_LOCK_GRANT_MASK 0xffff0000 +#define RCVLPC_LOCK_GRANT_SHIFT 16 +#define RCVLPC_NON_EMPTY_BITS 0x0000200c +#define RCVLPC_NON_EMPTY_BITS_MASK 0x0000ffff +#define RCVLPC_CONFIG 0x00002010 +#define RCVLPC_STATSCTRL 0x00002014 +#define RCVLPC_STATSCTRL_ENABLE 0x00000001 +#define RCVLPC_STATSCTRL_FASTUPD 0x00000002 +#define RCVLPC_STATS_ENABLE 0x00002018 +#define RCVLPC_STATSENAB_ASF_FIX 0x00000002 +#define RCVLPC_STATSENAB_DACK_FIX 0x00040000 +#define RCVLPC_STATSENAB_LNGBRST_RFIX 0x00400000 +#define RCVLPC_STATS_INCMASK 0x0000201c +/* 0x2020 --> 0x2100 unused */ +#define RCVLPC_SELLST_BASE 0x00002100 /* 16 16-byte entries */ +#define SELLST_TAIL 0x00000004 +#define SELLST_CONT 0x00000008 +#define SELLST_UNUSED 0x0000000c +#define RCVLPC_COS_CNTL_BASE 0x00002200 /* 16 4-byte entries */ +#define RCVLPC_DROP_FILTER_CNT 0x00002240 +#define RCVLPC_DMA_WQ_FULL_CNT 0x00002244 +#define RCVLPC_DMA_HIPRIO_WQ_FULL_CNT 0x00002248 +#define RCVLPC_NO_RCV_BD_CNT 0x0000224c +#define RCVLPC_IN_DISCARDS_CNT 0x00002250 +#define RCVLPC_IN_ERRORS_CNT 0x00002254 +#define RCVLPC_RCV_THRESH_HIT_CNT 0x00002258 +/* 0x225c --> 0x2400 unused */ + +/* Receive Data and Receive BD Initiator Control */ +#define RCVDBDI_MODE 0x00002400 +#define RCVDBDI_MODE_RESET 0x00000001 +#define RCVDBDI_MODE_ENABLE 0x00000002 +#define RCVDBDI_MODE_JUMBOBD_NEEDED 0x00000004 +#define RCVDBDI_MODE_FRM_TOO_BIG 0x00000008 +#define RCVDBDI_MODE_INV_RING_SZ 0x00000010 +#define RCVDBDI_MODE_LRG_RING_SZ 0x00010000 +#define RCVDBDI_STATUS 0x00002404 +#define RCVDBDI_STATUS_JUMBOBD_NEEDED 0x00000004 +#define RCVDBDI_STATUS_FRM_TOO_BIG 0x00000008 +#define RCVDBDI_STATUS_INV_RING_SZ 0x00000010 +#define RCVDBDI_SPLIT_FRAME_MINSZ 0x00002408 +/* 0x240c --> 0x2440 unused */ +#define RCVDBDI_JUMBO_BD 0x00002440 /* TG3_BDINFO_... */ +#define RCVDBDI_STD_BD 0x00002450 /* TG3_BDINFO_... */ +#define RCVDBDI_MINI_BD 0x00002460 /* TG3_BDINFO_... */ +#define RCVDBDI_JUMBO_CON_IDX 0x00002470 +#define RCVDBDI_STD_CON_IDX 0x00002474 +#define RCVDBDI_MINI_CON_IDX 0x00002478 +/* 0x247c --> 0x2480 unused */ +#define RCVDBDI_BD_PROD_IDX_0 0x00002480 +#define RCVDBDI_BD_PROD_IDX_1 0x00002484 +#define RCVDBDI_BD_PROD_IDX_2 0x00002488 +#define RCVDBDI_BD_PROD_IDX_3 0x0000248c +#define RCVDBDI_BD_PROD_IDX_4 0x00002490 +#define RCVDBDI_BD_PROD_IDX_5 0x00002494 +#define RCVDBDI_BD_PROD_IDX_6 0x00002498 +#define RCVDBDI_BD_PROD_IDX_7 0x0000249c +#define RCVDBDI_BD_PROD_IDX_8 0x000024a0 +#define RCVDBDI_BD_PROD_IDX_9 0x000024a4 +#define RCVDBDI_BD_PROD_IDX_10 0x000024a8 +#define RCVDBDI_BD_PROD_IDX_11 0x000024ac +#define RCVDBDI_BD_PROD_IDX_12 0x000024b0 +#define RCVDBDI_BD_PROD_IDX_13 0x000024b4 +#define RCVDBDI_BD_PROD_IDX_14 0x000024b8 +#define RCVDBDI_BD_PROD_IDX_15 0x000024bc +#define RCVDBDI_HWDIAG 0x000024c0 +/* 0x24c4 --> 0x2800 unused */ + +/* Receive Data Completion Control */ +#define RCVDCC_MODE 0x00002800 +#define RCVDCC_MODE_RESET 0x00000001 +#define RCVDCC_MODE_ENABLE 0x00000002 +#define RCVDCC_MODE_ATTN_ENABLE 0x00000004 +/* 0x2804 --> 0x2c00 unused */ + +/* Receive BD Initiator Control Registers */ +#define RCVBDI_MODE 0x00002c00 +#define RCVBDI_MODE_RESET 0x00000001 +#define RCVBDI_MODE_ENABLE 0x00000002 +#define RCVBDI_MODE_RCB_ATTN_ENAB 0x00000004 +#define RCVBDI_STATUS 0x00002c04 +#define RCVBDI_STATUS_RCB_ATTN 0x00000004 +#define RCVBDI_JUMBO_PROD_IDX 0x00002c08 +#define RCVBDI_STD_PROD_IDX 0x00002c0c +#define RCVBDI_MINI_PROD_IDX 0x00002c10 +#define RCVBDI_MINI_THRESH 0x00002c14 +#define RCVBDI_STD_THRESH 0x00002c18 +#define RCVBDI_JUMBO_THRESH 0x00002c1c +/* 0x2c20 --> 0x2d00 unused */ + +#define STD_REPLENISH_LWM 0x00002d00 +#define JMB_REPLENISH_LWM 0x00002d04 +/* 0x2d08 --> 0x3000 unused */ + +/* Receive BD Completion Control Registers */ +#define RCVCC_MODE 0x00003000 +#define RCVCC_MODE_RESET 0x00000001 +#define RCVCC_MODE_ENABLE 0x00000002 +#define RCVCC_MODE_ATTN_ENABLE 0x00000004 +#define RCVCC_STATUS 0x00003004 +#define RCVCC_STATUS_ERROR_ATTN 0x00000004 +#define RCVCC_JUMP_PROD_IDX 0x00003008 +#define RCVCC_STD_PROD_IDX 0x0000300c +#define RCVCC_MINI_PROD_IDX 0x00003010 +/* 0x3014 --> 0x3400 unused */ + +/* Receive list selector control registers */ +#define RCVLSC_MODE 0x00003400 +#define RCVLSC_MODE_RESET 0x00000001 +#define RCVLSC_MODE_ENABLE 0x00000002 +#define RCVLSC_MODE_ATTN_ENABLE 0x00000004 +#define RCVLSC_STATUS 0x00003404 +#define RCVLSC_STATUS_ERROR_ATTN 0x00000004 +/* 0x3408 --> 0x3600 unused */ + +#define TG3_CPMU_DRV_STATUS 0x0000344c + +/* CPMU registers */ +#define TG3_CPMU_CTRL 0x00003600 +#define CPMU_CTRL_LINK_IDLE_MODE 0x00000200 +#define CPMU_CTRL_LINK_AWARE_MODE 0x00000400 +#define CPMU_CTRL_LINK_SPEED_MODE 0x00004000 +#define CPMU_CTRL_GPHY_10MB_RXONLY 0x00010000 +#define TG3_CPMU_LSPD_10MB_CLK 0x00003604 +#define CPMU_LSPD_10MB_MACCLK_MASK 0x001f0000 +#define CPMU_LSPD_10MB_MACCLK_6_25 0x00130000 +/* 0x3608 --> 0x360c unused */ + +#define TG3_CPMU_LSPD_1000MB_CLK 0x0000360c +#define CPMU_LSPD_1000MB_MACCLK_62_5 0x00000000 +#define CPMU_LSPD_1000MB_MACCLK_12_5 0x00110000 +#define CPMU_LSPD_1000MB_MACCLK_MASK 0x001f0000 +#define TG3_CPMU_LNK_AWARE_PWRMD 0x00003610 +#define CPMU_LNK_AWARE_MACCLK_MASK 0x001f0000 +#define CPMU_LNK_AWARE_MACCLK_6_25 0x00130000 +/* 0x3614 --> 0x361c unused */ + +#define TG3_CPMU_HST_ACC 0x0000361c +#define CPMU_HST_ACC_MACCLK_MASK 0x001f0000 +#define CPMU_HST_ACC_MACCLK_6_25 0x00130000 +/* 0x3620 --> 0x3630 unused */ + +#define TG3_CPMU_CLCK_ORIDE 0x00003624 +#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000 + +#define TG3_CPMU_CLCK_ORIDE_ENABLE 0x00003628 +#define TG3_CPMU_MAC_ORIDE_ENABLE (1 << 13) + +#define TG3_CPMU_STATUS 0x0000362c +#define TG3_CPMU_STATUS_FMSK_5717 0x20000000 +#define TG3_CPMU_STATUS_FMSK_5719 0xc0000000 +#define TG3_CPMU_STATUS_FSHFT_5719 30 +#define TG3_CPMU_STATUS_LINK_MASK 0x180000 + +#define TG3_CPMU_CLCK_STAT 0x00003630 +#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000 +#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000 +#define CPMU_CLCK_STAT_MAC_CLCK_12_5 0x00110000 +#define CPMU_CLCK_STAT_MAC_CLCK_6_25 0x00130000 +/* 0x3634 --> 0x365c unused */ + +#define TG3_CPMU_MUTEX_REQ 0x0000365c +#define CPMU_MUTEX_REQ_DRIVER 0x00001000 +#define TG3_CPMU_MUTEX_GNT 0x00003660 +#define CPMU_MUTEX_GNT_DRIVER 0x00001000 +#define TG3_CPMU_PHY_STRAP 0x00003664 +#define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020 +#define TG3_CPMU_PADRNG_CTL 0x00003668 +#define TG3_CPMU_PADRNG_CTL_RDIV2 0x00040000 +/* 0x3664 --> 0x36b0 unused */ + +#define TG3_CPMU_EEE_MODE 0x000036b0 +#define TG3_CPMU_EEEMD_APE_TX_DET_EN 0x00000004 +#define TG3_CPMU_EEEMD_ERLY_L1_XIT_DET 0x00000008 +#define TG3_CPMU_EEEMD_SND_IDX_DET_EN 0x00000040 +#define TG3_CPMU_EEEMD_LPI_ENABLE 0x00000080 +#define TG3_CPMU_EEEMD_LPI_IN_TX 0x00000100 +#define TG3_CPMU_EEEMD_LPI_IN_RX 0x00000200 +#define TG3_CPMU_EEEMD_EEE_ENABLE 0x00100000 +#define TG3_CPMU_EEE_DBTMR1 0x000036b4 +#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000 +#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000007ff +#define TG3_CPMU_DBTMR1_LNKIDLE_MAX 0x0000ffff +#define TG3_CPMU_EEE_DBTMR2 0x000036b8 +#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000 +#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000007ff +#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc +#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000 +#define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004 +#define TG3_CPMU_EEE_LNKIDL_APE_TX_MT 0x00000002 +/* 0x36c0 --> 0x36d0 unused */ + +#define TG3_CPMU_EEE_CTRL 0x000036d0 +#define TG3_CPMU_EEE_CTRL_EXIT_16_5_US 0x0000019d +#define TG3_CPMU_EEE_CTRL_EXIT_36_US 0x00000384 +#define TG3_CPMU_EEE_CTRL_EXIT_20_1_US 0x000001f8 +/* 0x36d4 --> 0x3800 unused */ + +/* Mbuf cluster free registers */ +#define MBFREE_MODE 0x00003800 +#define MBFREE_MODE_RESET 0x00000001 +#define MBFREE_MODE_ENABLE 0x00000002 +#define MBFREE_STATUS 0x00003804 +/* 0x3808 --> 0x3c00 unused */ + +/* Host coalescing control registers */ +#define HOSTCC_MODE 0x00003c00 +#define HOSTCC_MODE_RESET 0x00000001 +#define HOSTCC_MODE_ENABLE 0x00000002 +#define HOSTCC_MODE_ATTN 0x00000004 +#define HOSTCC_MODE_NOW 0x00000008 +#define HOSTCC_MODE_FULL_STATUS 0x00000000 +#define HOSTCC_MODE_64BYTE 0x00000080 +#define HOSTCC_MODE_32BYTE 0x00000100 +#define HOSTCC_MODE_CLRTICK_RXBD 0x00000200 +#define HOSTCC_MODE_CLRTICK_TXBD 0x00000400 +#define HOSTCC_MODE_NOINT_ON_NOW 0x00000800 +#define HOSTCC_MODE_NOINT_ON_FORCE 0x00001000 +#define HOSTCC_MODE_COAL_VEC1_NOW 0x00002000 +#define HOSTCC_STATUS 0x00003c04 +#define HOSTCC_STATUS_ERROR_ATTN 0x00000004 +#define HOSTCC_RXCOL_TICKS 0x00003c08 +#define LOW_RXCOL_TICKS 0x00000032 +#define LOW_RXCOL_TICKS_CLRTCKS 0x00000014 +#define DEFAULT_RXCOL_TICKS 0x00000048 +#define HIGH_RXCOL_TICKS 0x00000096 +#define MAX_RXCOL_TICKS 0x000003ff +#define HOSTCC_TXCOL_TICKS 0x00003c0c +#define LOW_TXCOL_TICKS 0x00000096 +#define LOW_TXCOL_TICKS_CLRTCKS 0x00000048 +#define DEFAULT_TXCOL_TICKS 0x0000012c +#define HIGH_TXCOL_TICKS 0x00000145 +#define MAX_TXCOL_TICKS 0x000003ff +#define HOSTCC_RXMAX_FRAMES 0x00003c10 +#define LOW_RXMAX_FRAMES 0x00000005 +#define DEFAULT_RXMAX_FRAMES 0x00000008 +#define HIGH_RXMAX_FRAMES 0x00000012 +#define MAX_RXMAX_FRAMES 0x000000ff +#define HOSTCC_TXMAX_FRAMES 0x00003c14 +#define LOW_TXMAX_FRAMES 0x00000035 +#define DEFAULT_TXMAX_FRAMES 0x0000004b +#define HIGH_TXMAX_FRAMES 0x00000052 +#define MAX_TXMAX_FRAMES 0x000000ff +#define HOSTCC_RXCOAL_TICK_INT 0x00003c18 +#define DEFAULT_RXCOAL_TICK_INT 0x00000019 +#define DEFAULT_RXCOAL_TICK_INT_CLRTCKS 0x00000014 +#define MAX_RXCOAL_TICK_INT 0x000003ff +#define HOSTCC_TXCOAL_TICK_INT 0x00003c1c +#define DEFAULT_TXCOAL_TICK_INT 0x00000019 +#define DEFAULT_TXCOAL_TICK_INT_CLRTCKS 0x00000014 +#define MAX_TXCOAL_TICK_INT 0x000003ff +#define HOSTCC_RXCOAL_MAXF_INT 0x00003c20 +#define DEFAULT_RXCOAL_MAXF_INT 0x00000005 +#define MAX_RXCOAL_MAXF_INT 0x000000ff +#define HOSTCC_TXCOAL_MAXF_INT 0x00003c24 +#define DEFAULT_TXCOAL_MAXF_INT 0x00000005 +#define MAX_TXCOAL_MAXF_INT 0x000000ff +#define HOSTCC_STAT_COAL_TICKS 0x00003c28 +#define DEFAULT_STAT_COAL_TICKS 0x000f4240 +#define MAX_STAT_COAL_TICKS 0xd693d400 +#define MIN_STAT_COAL_TICKS 0x00000064 +/* 0x3c2c --> 0x3c30 unused */ +#define HOSTCC_STATS_BLK_HOST_ADDR 0x00003c30 /* 64-bit */ +#define HOSTCC_STATUS_BLK_HOST_ADDR 0x00003c38 /* 64-bit */ +#define HOSTCC_STATS_BLK_NIC_ADDR 0x00003c40 +#define HOSTCC_STATUS_BLK_NIC_ADDR 0x00003c44 +#define HOSTCC_FLOW_ATTN 0x00003c48 +#define HOSTCC_FLOW_ATTN_MBUF_LWM 0x00000040 +/* 0x3c4c --> 0x3c50 unused */ +#define HOSTCC_JUMBO_CON_IDX 0x00003c50 +#define HOSTCC_STD_CON_IDX 0x00003c54 +#define HOSTCC_MINI_CON_IDX 0x00003c58 +/* 0x3c5c --> 0x3c80 unused */ +#define HOSTCC_RET_PROD_IDX_0 0x00003c80 +#define HOSTCC_RET_PROD_IDX_1 0x00003c84 +#define HOSTCC_RET_PROD_IDX_2 0x00003c88 +#define HOSTCC_RET_PROD_IDX_3 0x00003c8c +#define HOSTCC_RET_PROD_IDX_4 0x00003c90 +#define HOSTCC_RET_PROD_IDX_5 0x00003c94 +#define HOSTCC_RET_PROD_IDX_6 0x00003c98 +#define HOSTCC_RET_PROD_IDX_7 0x00003c9c +#define HOSTCC_RET_PROD_IDX_8 0x00003ca0 +#define HOSTCC_RET_PROD_IDX_9 0x00003ca4 +#define HOSTCC_RET_PROD_IDX_10 0x00003ca8 +#define HOSTCC_RET_PROD_IDX_11 0x00003cac +#define HOSTCC_RET_PROD_IDX_12 0x00003cb0 +#define HOSTCC_RET_PROD_IDX_13 0x00003cb4 +#define HOSTCC_RET_PROD_IDX_14 0x00003cb8 +#define HOSTCC_RET_PROD_IDX_15 0x00003cbc +#define HOSTCC_SND_CON_IDX_0 0x00003cc0 +#define HOSTCC_SND_CON_IDX_1 0x00003cc4 +#define HOSTCC_SND_CON_IDX_2 0x00003cc8 +#define HOSTCC_SND_CON_IDX_3 0x00003ccc +#define HOSTCC_SND_CON_IDX_4 0x00003cd0 +#define HOSTCC_SND_CON_IDX_5 0x00003cd4 +#define HOSTCC_SND_CON_IDX_6 0x00003cd8 +#define HOSTCC_SND_CON_IDX_7 0x00003cdc +#define HOSTCC_SND_CON_IDX_8 0x00003ce0 +#define HOSTCC_SND_CON_IDX_9 0x00003ce4 +#define HOSTCC_SND_CON_IDX_10 0x00003ce8 +#define HOSTCC_SND_CON_IDX_11 0x00003cec +#define HOSTCC_SND_CON_IDX_12 0x00003cf0 +#define HOSTCC_SND_CON_IDX_13 0x00003cf4 +#define HOSTCC_SND_CON_IDX_14 0x00003cf8 +#define HOSTCC_SND_CON_IDX_15 0x00003cfc +#define HOSTCC_STATBLCK_RING1 0x00003d00 +/* 0x3d00 --> 0x3d80 unused */ + +#define HOSTCC_RXCOL_TICKS_VEC1 0x00003d80 +#define HOSTCC_TXCOL_TICKS_VEC1 0x00003d84 +#define HOSTCC_RXMAX_FRAMES_VEC1 0x00003d88 +#define HOSTCC_TXMAX_FRAMES_VEC1 0x00003d8c +#define HOSTCC_RXCOAL_MAXF_INT_VEC1 0x00003d90 +#define HOSTCC_TXCOAL_MAXF_INT_VEC1 0x00003d94 +/* 0x3d98 --> 0x4000 unused */ + +/* Memory arbiter control registers */ +#define MEMARB_MODE 0x00004000 +#define MEMARB_MODE_RESET 0x00000001 +#define MEMARB_MODE_ENABLE 0x00000002 +#define MEMARB_STATUS 0x00004004 +#define MEMARB_TRAP_ADDR_LOW 0x00004008 +#define MEMARB_TRAP_ADDR_HIGH 0x0000400c +/* 0x4010 --> 0x4400 unused */ + +/* Buffer manager control registers */ +#define BUFMGR_MODE 0x00004400 +#define BUFMGR_MODE_RESET 0x00000001 +#define BUFMGR_MODE_ENABLE 0x00000002 +#define BUFMGR_MODE_ATTN_ENABLE 0x00000004 +#define BUFMGR_MODE_BM_TEST 0x00000008 +#define BUFMGR_MODE_MBLOW_ATTN_ENAB 0x00000010 +#define BUFMGR_MODE_NO_TX_UNDERRUN 0x80000000 +#define BUFMGR_STATUS 0x00004404 +#define BUFMGR_STATUS_ERROR 0x00000004 +#define BUFMGR_STATUS_MBLOW 0x00000010 +#define BUFMGR_MB_POOL_ADDR 0x00004408 +#define BUFMGR_MB_POOL_SIZE 0x0000440c +#define BUFMGR_MB_RDMA_LOW_WATER 0x00004410 +#define DEFAULT_MB_RDMA_LOW_WATER 0x00000050 +#define DEFAULT_MB_RDMA_LOW_WATER_5705 0x00000000 +#define DEFAULT_MB_RDMA_LOW_WATER_JUMBO 0x00000130 +#define DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780 0x00000000 +#define BUFMGR_MB_MACRX_LOW_WATER 0x00004414 +#define DEFAULT_MB_MACRX_LOW_WATER 0x00000020 +#define DEFAULT_MB_MACRX_LOW_WATER_5705 0x00000010 +#define DEFAULT_MB_MACRX_LOW_WATER_5906 0x00000004 +#define DEFAULT_MB_MACRX_LOW_WATER_57765 0x0000002a +#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO 0x00000098 +#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780 0x0000004b +#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765 0x0000007e +#define BUFMGR_MB_HIGH_WATER 0x00004418 +#define DEFAULT_MB_HIGH_WATER 0x00000060 +#define DEFAULT_MB_HIGH_WATER_5705 0x00000060 +#define DEFAULT_MB_HIGH_WATER_5906 0x00000010 +#define DEFAULT_MB_HIGH_WATER_57765 0x000000a0 +#define DEFAULT_MB_HIGH_WATER_JUMBO 0x0000017c +#define DEFAULT_MB_HIGH_WATER_JUMBO_5780 0x00000096 +#define DEFAULT_MB_HIGH_WATER_JUMBO_57765 0x000000ea +#define BUFMGR_RX_MB_ALLOC_REQ 0x0000441c +#define BUFMGR_MB_ALLOC_BIT 0x10000000 +#define BUFMGR_RX_MB_ALLOC_RESP 0x00004420 +#define BUFMGR_TX_MB_ALLOC_REQ 0x00004424 +#define BUFMGR_TX_MB_ALLOC_RESP 0x00004428 +#define BUFMGR_DMA_DESC_POOL_ADDR 0x0000442c +#define BUFMGR_DMA_DESC_POOL_SIZE 0x00004430 +#define BUFMGR_DMA_LOW_WATER 0x00004434 +#define DEFAULT_DMA_LOW_WATER 0x00000005 +#define BUFMGR_DMA_HIGH_WATER 0x00004438 +#define DEFAULT_DMA_HIGH_WATER 0x0000000a +#define BUFMGR_RX_DMA_ALLOC_REQ 0x0000443c +#define BUFMGR_RX_DMA_ALLOC_RESP 0x00004440 +#define BUFMGR_TX_DMA_ALLOC_REQ 0x00004444 +#define BUFMGR_TX_DMA_ALLOC_RESP 0x00004448 +#define BUFMGR_HWDIAG_0 0x0000444c +#define BUFMGR_HWDIAG_1 0x00004450 +#define BUFMGR_HWDIAG_2 0x00004454 +/* 0x4458 --> 0x4800 unused */ + +/* Read DMA control registers */ +#define RDMAC_MODE 0x00004800 +#define RDMAC_MODE_RESET 0x00000001 +#define RDMAC_MODE_ENABLE 0x00000002 +#define RDMAC_MODE_TGTABORT_ENAB 0x00000004 +#define RDMAC_MODE_MSTABORT_ENAB 0x00000008 +#define RDMAC_MODE_PARITYERR_ENAB 0x00000010 +#define RDMAC_MODE_ADDROFLOW_ENAB 0x00000020 +#define RDMAC_MODE_FIFOOFLOW_ENAB 0x00000040 +#define RDMAC_MODE_FIFOURUN_ENAB 0x00000080 +#define RDMAC_MODE_FIFOOREAD_ENAB 0x00000100 +#define RDMAC_MODE_LNGREAD_ENAB 0x00000200 +#define RDMAC_MODE_SPLIT_ENABLE 0x00000800 +#define RDMAC_MODE_BD_SBD_CRPT_ENAB 0x00000800 +#define RDMAC_MODE_SPLIT_RESET 0x00001000 +#define RDMAC_MODE_MBUF_RBD_CRPT_ENAB 0x00001000 +#define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000 +#define RDMAC_MODE_FIFO_SIZE_128 0x00020000 +#define RDMAC_MODE_FIFO_LONG_BURST 0x00030000 +#define RDMAC_MODE_JMB_2K_MMRR 0x00800000 +#define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000 +#define RDMAC_MODE_IPV4_LSO_EN 0x08000000 +#define RDMAC_MODE_IPV6_LSO_EN 0x10000000 +#define RDMAC_MODE_H2BNC_VLAN_DET 0x20000000 +#define RDMAC_STATUS 0x00004804 +#define RDMAC_STATUS_TGTABORT 0x00000004 +#define RDMAC_STATUS_MSTABORT 0x00000008 +#define RDMAC_STATUS_PARITYERR 0x00000010 +#define RDMAC_STATUS_ADDROFLOW 0x00000020 +#define RDMAC_STATUS_FIFOOFLOW 0x00000040 +#define RDMAC_STATUS_FIFOURUN 0x00000080 +#define RDMAC_STATUS_FIFOOREAD 0x00000100 +#define RDMAC_STATUS_LNGREAD 0x00000200 +/* 0x4808 --> 0x4890 unused */ + +#define TG3_RDMA_RSRVCTRL_REG2 0x00004890 +#define TG3_LSO_RD_DMA_CRPTEN_CTRL2 0x000048a0 + +#define TG3_RDMA_RSRVCTRL_REG 0x00004900 +#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004 +#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K 0x00000c00 +#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK 0x00000ff0 +#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K 0x000c0000 +#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK 0x000ff000 +#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000 +#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000 +/* 0x4904 --> 0x4910 unused */ + +#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910 +#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K 0x00030000 +#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K 0x000c0000 +#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5719 0x02000000 +#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5720 0x00200000 +/* 0x4914 --> 0x4be0 unused */ + +#define TG3_NUM_RDMA_CHANNELS 4 +#define TG3_RDMA_LENGTH 0x00004be0 + +/* Write DMA control registers */ +#define WDMAC_MODE 0x00004c00 +#define WDMAC_MODE_RESET 0x00000001 +#define WDMAC_MODE_ENABLE 0x00000002 +#define WDMAC_MODE_TGTABORT_ENAB 0x00000004 +#define WDMAC_MODE_MSTABORT_ENAB 0x00000008 +#define WDMAC_MODE_PARITYERR_ENAB 0x00000010 +#define WDMAC_MODE_ADDROFLOW_ENAB 0x00000020 +#define WDMAC_MODE_FIFOOFLOW_ENAB 0x00000040 +#define WDMAC_MODE_FIFOURUN_ENAB 0x00000080 +#define WDMAC_MODE_FIFOOREAD_ENAB 0x00000100 +#define WDMAC_MODE_LNGREAD_ENAB 0x00000200 +#define WDMAC_MODE_RX_ACCEL 0x00000400 +#define WDMAC_MODE_STATUS_TAG_FIX 0x20000000 +#define WDMAC_MODE_BURST_ALL_DATA 0xc0000000 +#define WDMAC_STATUS 0x00004c04 +#define WDMAC_STATUS_TGTABORT 0x00000004 +#define WDMAC_STATUS_MSTABORT 0x00000008 +#define WDMAC_STATUS_PARITYERR 0x00000010 +#define WDMAC_STATUS_ADDROFLOW 0x00000020 +#define WDMAC_STATUS_FIFOOFLOW 0x00000040 +#define WDMAC_STATUS_FIFOURUN 0x00000080 +#define WDMAC_STATUS_FIFOOREAD 0x00000100 +#define WDMAC_STATUS_LNGREAD 0x00000200 +/* 0x4c08 --> 0x5000 unused */ + +/* Per-cpu register offsets (arm9) */ +#define CPU_MODE 0x00000000 +#define CPU_MODE_RESET 0x00000001 +#define CPU_MODE_HALT 0x00000400 +#define CPU_STATE 0x00000004 +#define CPU_EVTMASK 0x00000008 +/* 0xc --> 0x1c reserved */ +#define CPU_PC 0x0000001c +#define CPU_INSN 0x00000020 +#define CPU_SPAD_UFLOW 0x00000024 +#define CPU_WDOG_CLEAR 0x00000028 +#define CPU_WDOG_VECTOR 0x0000002c +#define CPU_WDOG_PC 0x00000030 +#define CPU_HW_BP 0x00000034 +/* 0x38 --> 0x44 unused */ +#define CPU_WDOG_SAVED_STATE 0x00000044 +#define CPU_LAST_BRANCH_ADDR 0x00000048 +#define CPU_SPAD_UFLOW_SET 0x0000004c +/* 0x50 --> 0x200 unused */ +#define CPU_R0 0x00000200 +#define CPU_R1 0x00000204 +#define CPU_R2 0x00000208 +#define CPU_R3 0x0000020c +#define CPU_R4 0x00000210 +#define CPU_R5 0x00000214 +#define CPU_R6 0x00000218 +#define CPU_R7 0x0000021c +#define CPU_R8 0x00000220 +#define CPU_R9 0x00000224 +#define CPU_R10 0x00000228 +#define CPU_R11 0x0000022c +#define CPU_R12 0x00000230 +#define CPU_R13 0x00000234 +#define CPU_R14 0x00000238 +#define CPU_R15 0x0000023c +#define CPU_R16 0x00000240 +#define CPU_R17 0x00000244 +#define CPU_R18 0x00000248 +#define CPU_R19 0x0000024c +#define CPU_R20 0x00000250 +#define CPU_R21 0x00000254 +#define CPU_R22 0x00000258 +#define CPU_R23 0x0000025c +#define CPU_R24 0x00000260 +#define CPU_R25 0x00000264 +#define CPU_R26 0x00000268 +#define CPU_R27 0x0000026c +#define CPU_R28 0x00000270 +#define CPU_R29 0x00000274 +#define CPU_R30 0x00000278 +#define CPU_R31 0x0000027c +/* 0x280 --> 0x400 unused */ + +#define RX_CPU_BASE 0x00005000 +#define RX_CPU_MODE 0x00005000 +#define RX_CPU_STATE 0x00005004 +#define RX_CPU_PGMCTR 0x0000501c +#define RX_CPU_HWBKPT 0x00005034 +#define TX_CPU_BASE 0x00005400 +#define TX_CPU_MODE 0x00005400 +#define TX_CPU_STATE 0x00005404 +#define TX_CPU_PGMCTR 0x0000541c + +#define VCPU_STATUS 0x00005100 +#define VCPU_STATUS_INIT_DONE 0x04000000 +#define VCPU_STATUS_DRV_RESET 0x08000000 + +#define VCPU_CFGSHDW 0x00005104 +#define VCPU_CFGSHDW_WOL_ENABLE 0x00000001 +#define VCPU_CFGSHDW_WOL_MAGPKT 0x00000004 +#define VCPU_CFGSHDW_ASPM_DBNC 0x00001000 + +/* Mailboxes */ +#define GRCMBOX_BASE 0x00005600 +#define GRCMBOX_INTERRUPT_0 0x00005800 /* 64-bit */ +#define GRCMBOX_INTERRUPT_1 0x00005808 /* 64-bit */ +#define GRCMBOX_INTERRUPT_2 0x00005810 /* 64-bit */ +#define GRCMBOX_INTERRUPT_3 0x00005818 /* 64-bit */ +#define GRCMBOX_GENERAL_0 0x00005820 /* 64-bit */ +#define GRCMBOX_GENERAL_1 0x00005828 /* 64-bit */ +#define GRCMBOX_GENERAL_2 0x00005830 /* 64-bit */ +#define GRCMBOX_GENERAL_3 0x00005838 /* 64-bit */ +#define GRCMBOX_GENERAL_4 0x00005840 /* 64-bit */ +#define GRCMBOX_GENERAL_5 0x00005848 /* 64-bit */ +#define GRCMBOX_GENERAL_6 0x00005850 /* 64-bit */ +#define GRCMBOX_GENERAL_7 0x00005858 /* 64-bit */ +#define GRCMBOX_RELOAD_STAT 0x00005860 /* 64-bit */ +#define GRCMBOX_RCVSTD_PROD_IDX 0x00005868 /* 64-bit */ +#define GRCMBOX_RCVJUMBO_PROD_IDX 0x00005870 /* 64-bit */ +#define GRCMBOX_RCVMINI_PROD_IDX 0x00005878 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_0 0x00005880 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_1 0x00005888 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_2 0x00005890 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_3 0x00005898 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_4 0x000058a0 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_5 0x000058a8 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_6 0x000058b0 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_7 0x000058b8 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_8 0x000058c0 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_9 0x000058c8 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_10 0x000058d0 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_11 0x000058d8 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_12 0x000058e0 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_13 0x000058e8 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_14 0x000058f0 /* 64-bit */ +#define GRCMBOX_RCVRET_CON_IDX_15 0x000058f8 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_0 0x00005900 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_1 0x00005908 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_2 0x00005910 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_3 0x00005918 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_4 0x00005920 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_5 0x00005928 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_6 0x00005930 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_7 0x00005938 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_8 0x00005940 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_9 0x00005948 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_10 0x00005950 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_11 0x00005958 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_12 0x00005960 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_13 0x00005968 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_14 0x00005970 /* 64-bit */ +#define GRCMBOX_SNDHOST_PROD_IDX_15 0x00005978 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_0 0x00005980 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_1 0x00005988 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_2 0x00005990 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_3 0x00005998 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_4 0x000059a0 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_5 0x000059a8 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_6 0x000059b0 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_7 0x000059b8 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_8 0x000059c0 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_9 0x000059c8 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_10 0x000059d0 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_11 0x000059d8 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_12 0x000059e0 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_13 0x000059e8 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_14 0x000059f0 /* 64-bit */ +#define GRCMBOX_SNDNIC_PROD_IDX_15 0x000059f8 /* 64-bit */ +#define GRCMBOX_HIGH_PRIO_EV_VECTOR 0x00005a00 +#define GRCMBOX_HIGH_PRIO_EV_MASK 0x00005a04 +#define GRCMBOX_LOW_PRIO_EV_VEC 0x00005a08 +#define GRCMBOX_LOW_PRIO_EV_MASK 0x00005a0c +/* 0x5a10 --> 0x5c00 */ + +/* Flow Through queues */ +#define FTQ_RESET 0x00005c00 +/* 0x5c04 --> 0x5c10 unused */ +#define FTQ_DMA_NORM_READ_CTL 0x00005c10 +#define FTQ_DMA_NORM_READ_FULL_CNT 0x00005c14 +#define FTQ_DMA_NORM_READ_FIFO_ENQDEQ 0x00005c18 +#define FTQ_DMA_NORM_READ_WRITE_PEEK 0x00005c1c +#define FTQ_DMA_HIGH_READ_CTL 0x00005c20 +#define FTQ_DMA_HIGH_READ_FULL_CNT 0x00005c24 +#define FTQ_DMA_HIGH_READ_FIFO_ENQDEQ 0x00005c28 +#define FTQ_DMA_HIGH_READ_WRITE_PEEK 0x00005c2c +#define FTQ_DMA_COMP_DISC_CTL 0x00005c30 +#define FTQ_DMA_COMP_DISC_FULL_CNT 0x00005c34 +#define FTQ_DMA_COMP_DISC_FIFO_ENQDEQ 0x00005c38 +#define FTQ_DMA_COMP_DISC_WRITE_PEEK 0x00005c3c +#define FTQ_SEND_BD_COMP_CTL 0x00005c40 +#define FTQ_SEND_BD_COMP_FULL_CNT 0x00005c44 +#define FTQ_SEND_BD_COMP_FIFO_ENQDEQ 0x00005c48 +#define FTQ_SEND_BD_COMP_WRITE_PEEK 0x00005c4c +#define FTQ_SEND_DATA_INIT_CTL 0x00005c50 +#define FTQ_SEND_DATA_INIT_FULL_CNT 0x00005c54 +#define FTQ_SEND_DATA_INIT_FIFO_ENQDEQ 0x00005c58 +#define FTQ_SEND_DATA_INIT_WRITE_PEEK 0x00005c5c +#define FTQ_DMA_NORM_WRITE_CTL 0x00005c60 +#define FTQ_DMA_NORM_WRITE_FULL_CNT 0x00005c64 +#define FTQ_DMA_NORM_WRITE_FIFO_ENQDEQ 0x00005c68 +#define FTQ_DMA_NORM_WRITE_WRITE_PEEK 0x00005c6c +#define FTQ_DMA_HIGH_WRITE_CTL 0x00005c70 +#define FTQ_DMA_HIGH_WRITE_FULL_CNT 0x00005c74 +#define FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ 0x00005c78 +#define FTQ_DMA_HIGH_WRITE_WRITE_PEEK 0x00005c7c +#define FTQ_SWTYPE1_CTL 0x00005c80 +#define FTQ_SWTYPE1_FULL_CNT 0x00005c84 +#define FTQ_SWTYPE1_FIFO_ENQDEQ 0x00005c88 +#define FTQ_SWTYPE1_WRITE_PEEK 0x00005c8c +#define FTQ_SEND_DATA_COMP_CTL 0x00005c90 +#define FTQ_SEND_DATA_COMP_FULL_CNT 0x00005c94 +#define FTQ_SEND_DATA_COMP_FIFO_ENQDEQ 0x00005c98 +#define FTQ_SEND_DATA_COMP_WRITE_PEEK 0x00005c9c +#define FTQ_HOST_COAL_CTL 0x00005ca0 +#define FTQ_HOST_COAL_FULL_CNT 0x00005ca4 +#define FTQ_HOST_COAL_FIFO_ENQDEQ 0x00005ca8 +#define FTQ_HOST_COAL_WRITE_PEEK 0x00005cac +#define FTQ_MAC_TX_CTL 0x00005cb0 +#define FTQ_MAC_TX_FULL_CNT 0x00005cb4 +#define FTQ_MAC_TX_FIFO_ENQDEQ 0x00005cb8 +#define FTQ_MAC_TX_WRITE_PEEK 0x00005cbc +#define FTQ_MB_FREE_CTL 0x00005cc0 +#define FTQ_MB_FREE_FULL_CNT 0x00005cc4 +#define FTQ_MB_FREE_FIFO_ENQDEQ 0x00005cc8 +#define FTQ_MB_FREE_WRITE_PEEK 0x00005ccc +#define FTQ_RCVBD_COMP_CTL 0x00005cd0 +#define FTQ_RCVBD_COMP_FULL_CNT 0x00005cd4 +#define FTQ_RCVBD_COMP_FIFO_ENQDEQ 0x00005cd8 +#define FTQ_RCVBD_COMP_WRITE_PEEK 0x00005cdc +#define FTQ_RCVLST_PLMT_CTL 0x00005ce0 +#define FTQ_RCVLST_PLMT_FULL_CNT 0x00005ce4 +#define FTQ_RCVLST_PLMT_FIFO_ENQDEQ 0x00005ce8 +#define FTQ_RCVLST_PLMT_WRITE_PEEK 0x00005cec +#define FTQ_RCVDATA_INI_CTL 0x00005cf0 +#define FTQ_RCVDATA_INI_FULL_CNT 0x00005cf4 +#define FTQ_RCVDATA_INI_FIFO_ENQDEQ 0x00005cf8 +#define FTQ_RCVDATA_INI_WRITE_PEEK 0x00005cfc +#define FTQ_RCVDATA_COMP_CTL 0x00005d00 +#define FTQ_RCVDATA_COMP_FULL_CNT 0x00005d04 +#define FTQ_RCVDATA_COMP_FIFO_ENQDEQ 0x00005d08 +#define FTQ_RCVDATA_COMP_WRITE_PEEK 0x00005d0c +#define FTQ_SWTYPE2_CTL 0x00005d10 +#define FTQ_SWTYPE2_FULL_CNT 0x00005d14 +#define FTQ_SWTYPE2_FIFO_ENQDEQ 0x00005d18 +#define FTQ_SWTYPE2_WRITE_PEEK 0x00005d1c +/* 0x5d20 --> 0x6000 unused */ + +/* Message signaled interrupt registers */ +#define MSGINT_MODE 0x00006000 +#define MSGINT_MODE_RESET 0x00000001 +#define MSGINT_MODE_ENABLE 0x00000002 +#define MSGINT_MODE_ONE_SHOT_DISABLE 0x00000020 +#define MSGINT_MODE_MULTIVEC_EN 0x00000080 +#define MSGINT_STATUS 0x00006004 +#define MSGINT_STATUS_MSI_REQ 0x00000001 +#define MSGINT_FIFO 0x00006008 +/* 0x600c --> 0x6400 unused */ + +/* DMA completion registers */ +#define DMAC_MODE 0x00006400 +#define DMAC_MODE_RESET 0x00000001 +#define DMAC_MODE_ENABLE 0x00000002 +/* 0x6404 --> 0x6800 unused */ + +/* GRC registers */ +#define GRC_MODE 0x00006800 +#define GRC_MODE_UPD_ON_COAL 0x00000001 +#define GRC_MODE_BSWAP_NONFRM_DATA 0x00000002 +#define GRC_MODE_WSWAP_NONFRM_DATA 0x00000004 +#define GRC_MODE_BSWAP_DATA 0x00000010 +#define GRC_MODE_WSWAP_DATA 0x00000020 +#define GRC_MODE_BYTE_SWAP_B2HRX_DATA 0x00000040 +#define GRC_MODE_WORD_SWAP_B2HRX_DATA 0x00000080 +#define GRC_MODE_SPLITHDR 0x00000100 +#define GRC_MODE_NOFRM_CRACKING 0x00000200 +#define GRC_MODE_INCL_CRC 0x00000400 +#define GRC_MODE_ALLOW_BAD_FRMS 0x00000800 +#define GRC_MODE_NOIRQ_ON_SENDS 0x00002000 +#define GRC_MODE_NOIRQ_ON_RCV 0x00004000 +#define GRC_MODE_FORCE_PCI32BIT 0x00008000 +#define GRC_MODE_B2HRX_ENABLE 0x00008000 +#define GRC_MODE_HOST_STACKUP 0x00010000 +#define GRC_MODE_HOST_SENDBDS 0x00020000 +#define GRC_MODE_HTX2B_ENABLE 0x00040000 +#define GRC_MODE_TIME_SYNC_ENABLE 0x00080000 +#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000 +#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000 +#define GRC_MODE_PCIE_TL_SEL 0x00000000 +#define GRC_MODE_PCIE_PL_SEL 0x00400000 +#define GRC_MODE_NO_RX_PHDR_CSUM 0x00800000 +#define GRC_MODE_IRQ_ON_TX_CPU_ATTN 0x01000000 +#define GRC_MODE_IRQ_ON_RX_CPU_ATTN 0x02000000 +#define GRC_MODE_IRQ_ON_MAC_ATTN 0x04000000 +#define GRC_MODE_IRQ_ON_DMA_ATTN 0x08000000 +#define GRC_MODE_IRQ_ON_FLOW_ATTN 0x10000000 +#define GRC_MODE_4X_NIC_SEND_RINGS 0x20000000 +#define GRC_MODE_PCIE_DL_SEL 0x20000000 +#define GRC_MODE_MCAST_FRM_ENABLE 0x40000000 +#define GRC_MODE_PCIE_HI_1K_EN 0x80000000 +#define GRC_MODE_PCIE_PORT_MASK (GRC_MODE_PCIE_TL_SEL | \ + GRC_MODE_PCIE_PL_SEL | \ + GRC_MODE_PCIE_DL_SEL | \ + GRC_MODE_PCIE_HI_1K_EN) +#define GRC_MISC_CFG 0x00006804 +#define GRC_MISC_CFG_CORECLK_RESET 0x00000001 +#define GRC_MISC_CFG_PRESCALAR_MASK 0x000000fe +#define GRC_MISC_CFG_PRESCALAR_SHIFT 1 +#define GRC_MISC_CFG_BOARD_ID_MASK 0x0001e000 +#define GRC_MISC_CFG_BOARD_ID_5700 0x0001e000 +#define GRC_MISC_CFG_BOARD_ID_5701 0x00000000 +#define GRC_MISC_CFG_BOARD_ID_5702FE 0x00004000 +#define GRC_MISC_CFG_BOARD_ID_5703 0x00000000 +#define GRC_MISC_CFG_BOARD_ID_5703S 0x00002000 +#define GRC_MISC_CFG_BOARD_ID_5704 0x00000000 +#define GRC_MISC_CFG_BOARD_ID_5704CIOBE 0x00004000 +#define GRC_MISC_CFG_BOARD_ID_5704_A2 0x00008000 +#define GRC_MISC_CFG_BOARD_ID_5788 0x00010000 +#define GRC_MISC_CFG_BOARD_ID_5788M 0x00018000 +#define GRC_MISC_CFG_BOARD_ID_AC91002A1 0x00018000 +#define GRC_MISC_CFG_EPHY_IDDQ 0x00200000 +#define GRC_MISC_CFG_KEEP_GPHY_POWER 0x04000000 +#define GRC_LOCAL_CTRL 0x00006808 +#define GRC_LCLCTRL_INT_ACTIVE 0x00000001 +#define GRC_LCLCTRL_CLEARINT 0x00000002 +#define GRC_LCLCTRL_SETINT 0x00000004 +#define GRC_LCLCTRL_INT_ON_ATTN 0x00000008 +#define GRC_LCLCTRL_GPIO_UART_SEL 0x00000010 /* 5755 only */ +#define GRC_LCLCTRL_USE_SIG_DETECT 0x00000010 /* 5714/5780 only */ +#define GRC_LCLCTRL_USE_EXT_SIG_DETECT 0x00000020 /* 5714/5780 only */ +#define GRC_LCLCTRL_GPIO_INPUT3 0x00000020 +#define GRC_LCLCTRL_GPIO_OE3 0x00000040 +#define GRC_LCLCTRL_GPIO_OUTPUT3 0x00000080 +#define GRC_LCLCTRL_GPIO_INPUT0 0x00000100 +#define GRC_LCLCTRL_GPIO_INPUT1 0x00000200 +#define GRC_LCLCTRL_GPIO_INPUT2 0x00000400 +#define GRC_LCLCTRL_GPIO_OE0 0x00000800 +#define GRC_LCLCTRL_GPIO_OE1 0x00001000 +#define GRC_LCLCTRL_GPIO_OE2 0x00002000 +#define GRC_LCLCTRL_GPIO_OUTPUT0 0x00004000 +#define GRC_LCLCTRL_GPIO_OUTPUT1 0x00008000 +#define GRC_LCLCTRL_GPIO_OUTPUT2 0x00010000 +#define GRC_LCLCTRL_EXTMEM_ENABLE 0x00020000 +#define GRC_LCLCTRL_MEMSZ_MASK 0x001c0000 +#define GRC_LCLCTRL_MEMSZ_256K 0x00000000 +#define GRC_LCLCTRL_MEMSZ_512K 0x00040000 +#define GRC_LCLCTRL_MEMSZ_1M 0x00080000 +#define GRC_LCLCTRL_MEMSZ_2M 0x000c0000 +#define GRC_LCLCTRL_MEMSZ_4M 0x00100000 +#define GRC_LCLCTRL_MEMSZ_8M 0x00140000 +#define GRC_LCLCTRL_MEMSZ_16M 0x00180000 +#define GRC_LCLCTRL_BANK_SELECT 0x00200000 +#define GRC_LCLCTRL_SSRAM_TYPE 0x00400000 +#define GRC_LCLCTRL_AUTO_SEEPROM 0x01000000 +#define GRC_TIMER 0x0000680c +#define GRC_RX_CPU_EVENT 0x00006810 +#define GRC_RX_CPU_DRIVER_EVENT 0x00004000 +#define GRC_RX_TIMER_REF 0x00006814 +#define GRC_RX_CPU_SEM 0x00006818 +#define GRC_REMOTE_RX_CPU_ATTN 0x0000681c +#define GRC_TX_CPU_EVENT 0x00006820 +#define GRC_TX_TIMER_REF 0x00006824 +#define GRC_TX_CPU_SEM 0x00006828 +#define GRC_REMOTE_TX_CPU_ATTN 0x0000682c +#define GRC_MEM_POWER_UP 0x00006830 /* 64-bit */ +#define GRC_EEPROM_ADDR 0x00006838 +#define EEPROM_ADDR_WRITE 0x00000000 +#define EEPROM_ADDR_READ 0x80000000 +#define EEPROM_ADDR_COMPLETE 0x40000000 +#define EEPROM_ADDR_FSM_RESET 0x20000000 +#define EEPROM_ADDR_DEVID_MASK 0x1c000000 +#define EEPROM_ADDR_DEVID_SHIFT 26 +#define EEPROM_ADDR_START 0x02000000 +#define EEPROM_ADDR_CLKPERD_SHIFT 16 +#define EEPROM_ADDR_ADDR_MASK 0x0000ffff +#define EEPROM_ADDR_ADDR_SHIFT 0 +#define EEPROM_DEFAULT_CLOCK_PERIOD 0x60 +#define EEPROM_CHIP_SIZE (64 * 1024) +#define GRC_EEPROM_DATA 0x0000683c +#define GRC_EEPROM_CTRL 0x00006840 +#define GRC_MDI_CTRL 0x00006844 +#define GRC_SEEPROM_DELAY 0x00006848 +/* 0x684c --> 0x6890 unused */ +#define GRC_VCPU_EXT_CTRL 0x00006890 +#define GRC_VCPU_EXT_CTRL_HALT_CPU 0x00400000 +#define GRC_VCPU_EXT_CTRL_DISABLE_WOL 0x20000000 +#define GRC_FASTBOOT_PC 0x00006894 /* 5752, 5755, 5787 */ + +#define TG3_EAV_REF_CLCK_LSB 0x00006900 +#define TG3_EAV_REF_CLCK_MSB 0x00006904 +#define TG3_EAV_REF_CLCK_CTL 0x00006908 +#define TG3_EAV_REF_CLCK_CTL_STOP 0x00000002 +#define TG3_EAV_REF_CLCK_CTL_RESUME 0x00000004 +#define TG3_EAV_CTL_TSYNC_GPIO_MASK (0x3 << 16) +#define TG3_EAV_CTL_TSYNC_WDOG0 (1 << 17) + +#define TG3_EAV_WATCHDOG0_LSB 0x00006918 +#define TG3_EAV_WATCHDOG0_MSB 0x0000691c +#define TG3_EAV_WATCHDOG0_EN (1 << 31) +#define TG3_EAV_WATCHDOG_MSB_MASK 0x7fffffff + +#define TG3_EAV_REF_CLK_CORRECT_CTL 0x00006928 +#define TG3_EAV_REF_CLK_CORRECT_EN (1 << 31) +#define TG3_EAV_REF_CLK_CORRECT_NEG (1 << 30) + +#define TG3_EAV_REF_CLK_CORRECT_MASK 0xffffff + +/* 0x692c --> 0x7000 unused */ + +/* NVRAM Control registers */ +#define NVRAM_CMD 0x00007000 +#define NVRAM_CMD_RESET 0x00000001 +#define NVRAM_CMD_DONE 0x00000008 +#define NVRAM_CMD_GO 0x00000010 +#define NVRAM_CMD_WR 0x00000020 +#define NVRAM_CMD_RD 0x00000000 +#define NVRAM_CMD_ERASE 0x00000040 +#define NVRAM_CMD_FIRST 0x00000080 +#define NVRAM_CMD_LAST 0x00000100 +#define NVRAM_CMD_WREN 0x00010000 +#define NVRAM_CMD_WRDI 0x00020000 +#define NVRAM_STAT 0x00007004 +#define NVRAM_WRDATA 0x00007008 +#define NVRAM_ADDR 0x0000700c +#define NVRAM_ADDR_MSK 0x00ffffff +#define NVRAM_RDDATA 0x00007010 +#define NVRAM_CFG1 0x00007014 +#define NVRAM_CFG1_FLASHIF_ENAB 0x00000001 +#define NVRAM_CFG1_BUFFERED_MODE 0x00000002 +#define NVRAM_CFG1_PASS_THRU 0x00000004 +#define NVRAM_CFG1_STATUS_BITS 0x00000070 +#define NVRAM_CFG1_BIT_BANG 0x00000008 +#define NVRAM_CFG1_FLASH_SIZE 0x02000000 +#define NVRAM_CFG1_COMPAT_BYPASS 0x80000000 +#define NVRAM_CFG1_VENDOR_MASK 0x03000003 +#define FLASH_VENDOR_ATMEL_EEPROM 0x02000000 +#define FLASH_VENDOR_ATMEL_FLASH_BUFFERED 0x02000003 +#define FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED 0x00000003 +#define FLASH_VENDOR_ST 0x03000001 +#define FLASH_VENDOR_SAIFUN 0x01000003 +#define FLASH_VENDOR_SST_SMALL 0x00000001 +#define FLASH_VENDOR_SST_LARGE 0x02000001 +#define NVRAM_CFG1_5752VENDOR_MASK 0x03c00003 +#define NVRAM_CFG1_5762VENDOR_MASK 0x03e00003 +#define FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ 0x00000000 +#define FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ 0x02000000 +#define FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED 0x02000003 +#define FLASH_5752VENDOR_ST_M45PE10 0x02400000 +#define FLASH_5752VENDOR_ST_M45PE20 0x02400002 +#define FLASH_5752VENDOR_ST_M45PE40 0x02400001 +#define FLASH_5755VENDOR_ATMEL_FLASH_1 0x03400001 +#define FLASH_5755VENDOR_ATMEL_FLASH_2 0x03400002 +#define FLASH_5755VENDOR_ATMEL_FLASH_3 0x03400000 +#define FLASH_5755VENDOR_ATMEL_FLASH_4 0x00000003 +#define FLASH_5755VENDOR_ATMEL_FLASH_5 0x02000003 +#define FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ 0x03c00003 +#define FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ 0x03c00002 +#define FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ 0x03000003 +#define FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ 0x03000002 +#define FLASH_5787VENDOR_MICRO_EEPROM_64KHZ 0x03000000 +#define FLASH_5787VENDOR_MICRO_EEPROM_376KHZ 0x02000000 +#define FLASH_5761VENDOR_ATMEL_MDB021D 0x00800003 +#define FLASH_5761VENDOR_ATMEL_MDB041D 0x00800000 +#define FLASH_5761VENDOR_ATMEL_MDB081D 0x00800002 +#define FLASH_5761VENDOR_ATMEL_MDB161D 0x00800001 +#define FLASH_5761VENDOR_ATMEL_ADB021D 0x00000003 +#define FLASH_5761VENDOR_ATMEL_ADB041D 0x00000000 +#define FLASH_5761VENDOR_ATMEL_ADB081D 0x00000002 +#define FLASH_5761VENDOR_ATMEL_ADB161D 0x00000001 +#define FLASH_5761VENDOR_ST_M_M45PE20 0x02800001 +#define FLASH_5761VENDOR_ST_M_M45PE40 0x02800000 +#define FLASH_5761VENDOR_ST_M_M45PE80 0x02800002 +#define FLASH_5761VENDOR_ST_M_M45PE16 0x02800003 +#define FLASH_5761VENDOR_ST_A_M45PE20 0x02000001 +#define FLASH_5761VENDOR_ST_A_M45PE40 0x02000000 +#define FLASH_5761VENDOR_ST_A_M45PE80 0x02000002 +#define FLASH_5761VENDOR_ST_A_M45PE16 0x02000003 +#define FLASH_57780VENDOR_ATMEL_AT45DB011D 0x00400000 +#define FLASH_57780VENDOR_ATMEL_AT45DB011B 0x03400000 +#define FLASH_57780VENDOR_ATMEL_AT45DB021D 0x00400002 +#define FLASH_57780VENDOR_ATMEL_AT45DB021B 0x03400002 +#define FLASH_57780VENDOR_ATMEL_AT45DB041D 0x00400001 +#define FLASH_57780VENDOR_ATMEL_AT45DB041B 0x03400001 +#define FLASH_5717VENDOR_ATMEL_EEPROM 0x02000001 +#define FLASH_5717VENDOR_MICRO_EEPROM 0x02000003 +#define FLASH_5717VENDOR_ATMEL_MDB011D 0x01000001 +#define FLASH_5717VENDOR_ATMEL_MDB021D 0x01000003 +#define FLASH_5717VENDOR_ST_M_M25PE10 0x02000000 +#define FLASH_5717VENDOR_ST_M_M25PE20 0x02000002 +#define FLASH_5717VENDOR_ST_M_M45PE10 0x00000001 +#define FLASH_5717VENDOR_ST_M_M45PE20 0x00000003 +#define FLASH_5717VENDOR_ATMEL_ADB011B 0x01400000 +#define FLASH_5717VENDOR_ATMEL_ADB021B 0x01400002 +#define FLASH_5717VENDOR_ATMEL_ADB011D 0x01400001 +#define FLASH_5717VENDOR_ATMEL_ADB021D 0x01400003 +#define FLASH_5717VENDOR_ST_A_M25PE10 0x02400000 +#define FLASH_5717VENDOR_ST_A_M25PE20 0x02400002 +#define FLASH_5717VENDOR_ST_A_M45PE10 0x02400001 +#define FLASH_5717VENDOR_ST_A_M45PE20 0x02400003 +#define FLASH_5717VENDOR_ATMEL_45USPT 0x03400000 +#define FLASH_5717VENDOR_ST_25USPT 0x03400002 +#define FLASH_5717VENDOR_ST_45USPT 0x03400001 +#define FLASH_5720_EEPROM_HD 0x00000001 +#define FLASH_5720_EEPROM_LD 0x00000003 +#define FLASH_5762_EEPROM_HD 0x02000001 +#define FLASH_5762_EEPROM_LD 0x02000003 +#define FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000 +#define FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002 +#define FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001 +#define FLASH_5720VENDOR_M_ATMEL_DB081D 0x01000003 +#define FLASH_5720VENDOR_M_ST_M25PE10 0x02000000 +#define FLASH_5720VENDOR_M_ST_M25PE20 0x02000002 +#define FLASH_5720VENDOR_M_ST_M25PE40 0x02000001 +#define FLASH_5720VENDOR_M_ST_M25PE80 0x02000003 +#define FLASH_5720VENDOR_M_ST_M45PE10 0x03000000 +#define FLASH_5720VENDOR_M_ST_M45PE20 0x03000002 +#define FLASH_5720VENDOR_M_ST_M45PE40 0x03000001 +#define FLASH_5720VENDOR_M_ST_M45PE80 0x03000003 +#define FLASH_5720VENDOR_A_ATMEL_DB011B 0x01800000 +#define FLASH_5720VENDOR_A_ATMEL_DB021B 0x01800002 +#define FLASH_5720VENDOR_A_ATMEL_DB041B 0x01800001 +#define FLASH_5720VENDOR_A_ATMEL_DB011D 0x01c00000 +#define FLASH_5720VENDOR_A_ATMEL_DB021D 0x01c00002 +#define FLASH_5720VENDOR_A_ATMEL_DB041D 0x01c00001 +#define FLASH_5720VENDOR_A_ATMEL_DB081D 0x01c00003 +#define FLASH_5720VENDOR_A_ST_M25PE10 0x02800000 +#define FLASH_5720VENDOR_A_ST_M25PE20 0x02800002 +#define FLASH_5720VENDOR_A_ST_M25PE40 0x02800001 +#define FLASH_5720VENDOR_A_ST_M25PE80 0x02800003 +#define FLASH_5720VENDOR_A_ST_M45PE10 0x02c00000 +#define FLASH_5720VENDOR_A_ST_M45PE20 0x02c00002 +#define FLASH_5720VENDOR_A_ST_M45PE40 0x02c00001 +#define FLASH_5720VENDOR_A_ST_M45PE80 0x02c00003 +#define FLASH_5720VENDOR_ATMEL_45USPT 0x03c00000 +#define FLASH_5720VENDOR_ST_25USPT 0x03c00002 +#define FLASH_5720VENDOR_ST_45USPT 0x03c00001 +#define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000 +#define FLASH_5752PAGE_SIZE_256 0x00000000 +#define FLASH_5752PAGE_SIZE_512 0x10000000 +#define FLASH_5752PAGE_SIZE_1K 0x20000000 +#define FLASH_5752PAGE_SIZE_2K 0x30000000 +#define FLASH_5752PAGE_SIZE_4K 0x40000000 +#define FLASH_5752PAGE_SIZE_264 0x50000000 +#define FLASH_5752PAGE_SIZE_528 0x60000000 +#define NVRAM_CFG2 0x00007018 +#define NVRAM_CFG3 0x0000701c +#define NVRAM_SWARB 0x00007020 +#define SWARB_REQ_SET0 0x00000001 +#define SWARB_REQ_SET1 0x00000002 +#define SWARB_REQ_SET2 0x00000004 +#define SWARB_REQ_SET3 0x00000008 +#define SWARB_REQ_CLR0 0x00000010 +#define SWARB_REQ_CLR1 0x00000020 +#define SWARB_REQ_CLR2 0x00000040 +#define SWARB_REQ_CLR3 0x00000080 +#define SWARB_GNT0 0x00000100 +#define SWARB_GNT1 0x00000200 +#define SWARB_GNT2 0x00000400 +#define SWARB_GNT3 0x00000800 +#define SWARB_REQ0 0x00001000 +#define SWARB_REQ1 0x00002000 +#define SWARB_REQ2 0x00004000 +#define SWARB_REQ3 0x00008000 +#define NVRAM_ACCESS 0x00007024 +#define ACCESS_ENABLE 0x00000001 +#define ACCESS_WR_ENABLE 0x00000002 +#define NVRAM_WRITE1 0x00007028 +/* 0x702c unused */ + +#define NVRAM_ADDR_LOCKOUT 0x00007030 +/* 0x7034 --> 0x7500 unused */ + +#define OTP_MODE 0x00007500 +#define OTP_MODE_OTP_THRU_GRC 0x00000001 +#define OTP_CTRL 0x00007504 +#define OTP_CTRL_OTP_PROG_ENABLE 0x00200000 +#define OTP_CTRL_OTP_CMD_READ 0x00000000 +#define OTP_CTRL_OTP_CMD_INIT 0x00000008 +#define OTP_CTRL_OTP_CMD_START 0x00000001 +#define OTP_STATUS 0x00007508 +#define OTP_STATUS_CMD_DONE 0x00000001 +#define OTP_ADDRESS 0x0000750c +#define OTP_ADDRESS_MAGIC1 0x000000a0 +#define OTP_ADDRESS_MAGIC2 0x00000080 +/* 0x7510 unused */ + +#define OTP_READ_DATA 0x00007514 +/* 0x7518 --> 0x7c04 unused */ + +#define PCIE_TRANSACTION_CFG 0x00007c04 +#define PCIE_TRANS_CFG_1SHOT_MSI 0x20000000 +#define PCIE_TRANS_CFG_LOM 0x00000020 +/* 0x7c08 --> 0x7d28 unused */ + +#define PCIE_PWR_MGMT_THRESH 0x00007d28 +#define PCIE_PWR_MGMT_L1_THRESH_MSK 0x0000ff00 +#define PCIE_PWR_MGMT_L1_THRESH_4MS 0x0000ff00 +#define PCIE_PWR_MGMT_EXT_ASPM_TMR_EN 0x01000000 +/* 0x7d2c --> 0x7d54 unused */ + +#define TG3_PCIE_LNKCTL 0x00007d54 +#define TG3_PCIE_LNKCTL_L1_PLL_PD_EN 0x00000008 +#define TG3_PCIE_LNKCTL_L1_PLL_PD_DIS 0x00000080 +/* 0x7d58 --> 0x7e70 unused */ + +#define TG3_PCIE_PHY_TSTCTL 0x00007e2c +#define TG3_PCIE_PHY_TSTCTL_PCIE10 0x00000040 +#define TG3_PCIE_PHY_TSTCTL_PSCRAM 0x00000020 + +#define TG3_PCIE_EIDLE_DELAY 0x00007e70 +#define TG3_PCIE_EIDLE_DELAY_MASK 0x0000001f +#define TG3_PCIE_EIDLE_DELAY_13_CLKS 0x0000000c +/* 0x7e74 --> 0x8000 unused */ + + +/* Alternate PCIE definitions */ +#define TG3_PCIE_TLDLPL_PORT 0x00007c00 +#define TG3_PCIE_DL_LO_FTSMAX 0x0000000c +#define TG3_PCIE_DL_LO_FTSMAX_MSK 0x000000ff +#define TG3_PCIE_DL_LO_FTSMAX_VAL 0x0000002c +#define TG3_PCIE_PL_LO_PHYCTL1 0x00000004 +#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000 +#define TG3_PCIE_PL_LO_PHYCTL5 0x00000014 +#define TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ 0x80000000 + +#define TG3_REG_BLK_SIZE 0x00008000 + +/* OTP bit definitions */ +#define TG3_OTP_AGCTGT_MASK 0x000000e0 +#define TG3_OTP_AGCTGT_SHIFT 1 +#define TG3_OTP_HPFFLTR_MASK 0x00000300 +#define TG3_OTP_HPFFLTR_SHIFT 1 +#define TG3_OTP_HPFOVER_MASK 0x00000400 +#define TG3_OTP_HPFOVER_SHIFT 1 +#define TG3_OTP_LPFDIS_MASK 0x00000800 +#define TG3_OTP_LPFDIS_SHIFT 11 +#define TG3_OTP_VDAC_MASK 0xff000000 +#define TG3_OTP_VDAC_SHIFT 24 +#define TG3_OTP_10BTAMP_MASK 0x0000f000 +#define TG3_OTP_10BTAMP_SHIFT 8 +#define TG3_OTP_ROFF_MASK 0x00e00000 +#define TG3_OTP_ROFF_SHIFT 11 +#define TG3_OTP_RCOFF_MASK 0x001c0000 +#define TG3_OTP_RCOFF_SHIFT 16 + +#define TG3_OTP_DEFAULT 0x286c1640 + + +/* Hardware Legacy NVRAM layout */ +#define TG3_NVM_VPD_OFF 0x100 +#define TG3_NVM_VPD_LEN 256 + +/* Hardware Selfboot NVRAM layout */ +#define TG3_NVM_HWSB_CFG1 0x00000004 +#define TG3_NVM_HWSB_CFG1_MAJMSK 0xf8000000 +#define TG3_NVM_HWSB_CFG1_MAJSFT 27 +#define TG3_NVM_HWSB_CFG1_MINMSK 0x07c00000 +#define TG3_NVM_HWSB_CFG1_MINSFT 22 + +#define TG3_EEPROM_MAGIC 0x669955aa +#define TG3_EEPROM_MAGIC_FW 0xa5000000 +#define TG3_EEPROM_MAGIC_FW_MSK 0xff000000 +#define TG3_EEPROM_SB_FORMAT_MASK 0x00e00000 +#define TG3_EEPROM_SB_FORMAT_1 0x00200000 +#define TG3_EEPROM_SB_REVISION_MASK 0x001f0000 +#define TG3_EEPROM_SB_REVISION_0 0x00000000 +#define TG3_EEPROM_SB_REVISION_2 0x00020000 +#define TG3_EEPROM_SB_REVISION_3 0x00030000 +#define TG3_EEPROM_SB_REVISION_4 0x00040000 +#define TG3_EEPROM_SB_REVISION_5 0x00050000 +#define TG3_EEPROM_SB_REVISION_6 0x00060000 +#define TG3_EEPROM_MAGIC_HW 0xabcd +#define TG3_EEPROM_MAGIC_HW_MSK 0xffff + +#define TG3_NVM_DIR_START 0x18 +#define TG3_NVM_DIR_END 0x78 +#define TG3_NVM_DIRENT_SIZE 0xc +#define TG3_NVM_DIRTYPE_SHIFT 24 +#define TG3_NVM_DIRTYPE_LENMSK 0x003fffff +#define TG3_NVM_DIRTYPE_ASFINI 1 +#define TG3_NVM_DIRTYPE_EXTVPD 20 +#define TG3_NVM_PTREV_BCVER 0x94 +#define TG3_NVM_BCVER_MAJMSK 0x0000ff00 +#define TG3_NVM_BCVER_MAJSFT 8 +#define TG3_NVM_BCVER_MINMSK 0x000000ff + +#define TG3_EEPROM_SB_F1R0_EDH_OFF 0x10 +#define TG3_EEPROM_SB_F1R2_EDH_OFF 0x14 +#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 +#define TG3_EEPROM_SB_F1R3_EDH_OFF 0x18 +#define TG3_EEPROM_SB_F1R4_EDH_OFF 0x1c +#define TG3_EEPROM_SB_F1R5_EDH_OFF 0x20 +#define TG3_EEPROM_SB_F1R6_EDH_OFF 0x4c +#define TG3_EEPROM_SB_EDH_MAJ_MASK 0x00000700 +#define TG3_EEPROM_SB_EDH_MAJ_SHFT 8 +#define TG3_EEPROM_SB_EDH_MIN_MASK 0x000000ff +#define TG3_EEPROM_SB_EDH_BLD_MASK 0x0000f800 +#define TG3_EEPROM_SB_EDH_BLD_SHFT 11 + + +/* 32K Window into NIC internal memory */ +#define NIC_SRAM_WIN_BASE 0x00008000 + +/* Offsets into first 32k of NIC internal memory. */ +#define NIC_SRAM_PAGE_ZERO 0x00000000 +#define NIC_SRAM_SEND_RCB 0x00000100 /* 16 * TG3_BDINFO_... */ +#define NIC_SRAM_RCV_RET_RCB 0x00000200 /* 16 * TG3_BDINFO_... */ +#define NIC_SRAM_STATS_BLK 0x00000300 +#define NIC_SRAM_STATUS_BLK 0x00000b00 + +#define NIC_SRAM_FIRMWARE_MBOX 0x00000b50 +#define NIC_SRAM_FIRMWARE_MBOX_MAGIC1 0x4B657654 +#define NIC_SRAM_FIRMWARE_MBOX_MAGIC2 0x4861764b /* !dma on linkchg */ + +#define NIC_SRAM_DATA_SIG 0x00000b54 +#define NIC_SRAM_DATA_SIG_MAGIC 0x4b657654 /* ascii for 'KevT' */ + +#define NIC_SRAM_DATA_CFG 0x00000b58 +#define NIC_SRAM_DATA_CFG_LED_MODE_MASK 0x0000000c +#define NIC_SRAM_DATA_CFG_LED_MODE_MAC 0x00000000 +#define NIC_SRAM_DATA_CFG_LED_MODE_PHY_1 0x00000004 +#define NIC_SRAM_DATA_CFG_LED_MODE_PHY_2 0x00000008 +#define NIC_SRAM_DATA_CFG_PHY_TYPE_MASK 0x00000030 +#define NIC_SRAM_DATA_CFG_PHY_TYPE_UNKNOWN 0x00000000 +#define NIC_SRAM_DATA_CFG_PHY_TYPE_COPPER 0x00000010 +#define NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER 0x00000020 +#define NIC_SRAM_DATA_CFG_WOL_ENABLE 0x00000040 +#define NIC_SRAM_DATA_CFG_ASF_ENABLE 0x00000080 +#define NIC_SRAM_DATA_CFG_EEPROM_WP 0x00000100 +#define NIC_SRAM_DATA_CFG_MINI_PCI 0x00001000 +#define NIC_SRAM_DATA_CFG_FIBER_WOL 0x00004000 +#define NIC_SRAM_DATA_CFG_NO_GPIO2 0x00100000 +#define NIC_SRAM_DATA_CFG_APE_ENABLE 0x00200000 + +#define NIC_SRAM_DATA_VER 0x00000b5c +#define NIC_SRAM_DATA_VER_SHIFT 16 + +#define NIC_SRAM_DATA_PHY_ID 0x00000b74 +#define NIC_SRAM_DATA_PHY_ID1_MASK 0xffff0000 +#define NIC_SRAM_DATA_PHY_ID2_MASK 0x0000ffff + +#define NIC_SRAM_FW_CMD_MBOX 0x00000b78 +#define FWCMD_NICDRV_ALIVE 0x00000001 +#define FWCMD_NICDRV_PAUSE_FW 0x00000002 +#define FWCMD_NICDRV_IPV4ADDR_CHG 0x00000003 +#define FWCMD_NICDRV_IPV6ADDR_CHG 0x00000004 +#define FWCMD_NICDRV_FIX_DMAR 0x00000005 +#define FWCMD_NICDRV_FIX_DMAW 0x00000006 +#define FWCMD_NICDRV_LINK_UPDATE 0x0000000c +#define FWCMD_NICDRV_ALIVE2 0x0000000d +#define FWCMD_NICDRV_ALIVE3 0x0000000e +#define NIC_SRAM_FW_CMD_LEN_MBOX 0x00000b7c +#define NIC_SRAM_FW_CMD_DATA_MBOX 0x00000b80 +#define NIC_SRAM_FW_ASF_STATUS_MBOX 0x00000c00 +#define NIC_SRAM_FW_DRV_STATE_MBOX 0x00000c04 +#define DRV_STATE_START 0x00000001 +#define DRV_STATE_START_DONE 0x80000001 +#define DRV_STATE_UNLOAD 0x00000002 +#define DRV_STATE_UNLOAD_DONE 0x80000002 +#define DRV_STATE_WOL 0x00000003 +#define DRV_STATE_SUSPEND 0x00000004 + +#define NIC_SRAM_FW_RESET_TYPE_MBOX 0x00000c08 + +#define NIC_SRAM_MAC_ADDR_HIGH_MBOX 0x00000c14 +#define NIC_SRAM_MAC_ADDR_LOW_MBOX 0x00000c18 + +#define NIC_SRAM_WOL_MBOX 0x00000d30 +#define WOL_SIGNATURE 0x474c0000 +#define WOL_DRV_STATE_SHUTDOWN 0x00000001 +#define WOL_DRV_WOL 0x00000002 +#define WOL_SET_MAGIC_PKT 0x00000004 + +#define NIC_SRAM_DATA_CFG_2 0x00000d38 + +#define NIC_SRAM_DATA_CFG_2_APD_EN 0x00004000 +#define SHASTA_EXT_LED_MODE_MASK 0x00018000 +#define SHASTA_EXT_LED_LEGACY 0x00000000 +#define SHASTA_EXT_LED_SHARED 0x00008000 +#define SHASTA_EXT_LED_MAC 0x00010000 +#define SHASTA_EXT_LED_COMBO 0x00018000 + +#define NIC_SRAM_DATA_CFG_3 0x00000d3c +#define NIC_SRAM_ASPM_DEBOUNCE 0x00000002 +#define NIC_SRAM_LNK_FLAP_AVOID 0x00400000 +#define NIC_SRAM_1G_ON_VAUX_OK 0x00800000 + +#define NIC_SRAM_DATA_CFG_4 0x00000d60 +#define NIC_SRAM_GMII_MODE 0x00000002 +#define NIC_SRAM_RGMII_INBAND_DISABLE 0x00000004 +#define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008 +#define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010 + +#define NIC_SRAM_CPMU_STATUS 0x00000e00 +#define NIC_SRAM_CPMUSTAT_SIG 0x0000362c +#define NIC_SRAM_CPMUSTAT_SIG_MSK 0x0000ffff + +#define NIC_SRAM_DATA_CFG_5 0x00000e0c +#define NIC_SRAM_DISABLE_1G_HALF_ADV 0x00000002 + +#define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000 + +#define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000 +#define NIC_SRAM_DMA_DESC_POOL_SIZE 0x00002000 +#define NIC_SRAM_TX_BUFFER_DESC 0x00004000 /* 512 entries */ +#define NIC_SRAM_RX_BUFFER_DESC 0x00006000 /* 256 entries */ +#define NIC_SRAM_RX_JUMBO_BUFFER_DESC 0x00007000 /* 256 entries */ +#define NIC_SRAM_MBUF_POOL_BASE 0x00008000 +#define NIC_SRAM_MBUF_POOL_SIZE96 0x00018000 +#define NIC_SRAM_MBUF_POOL_SIZE64 0x00010000 +#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000 +#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000 + +#define TG3_SRAM_RXCPU_SCRATCH_BASE_57766 0x00030000 +#define TG3_SRAM_RXCPU_SCRATCH_SIZE_57766 0x00010000 +#define TG3_57766_FW_BASE_ADDR 0x00030000 +#define TG3_57766_FW_HANDSHAKE 0x0003fccc +#define TG3_SBROM_IN_SERVICE_LOOP 0x51 + +#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700 128 +#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755 64 +#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906 32 + +#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700 64 +#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717 16 + + +/* Currently this is fixed. */ +#define TG3_PHY_MII_ADDR 0x01 + + +/*** Tigon3 specific PHY MII registers. ***/ +#define MII_TG3_MMD_CTRL 0x0d /* MMD Access Control register */ +#define MII_TG3_MMD_CTRL_DATA_NOINC 0x4000 +#define MII_TG3_MMD_ADDRESS 0x0e /* MMD Address Data register */ + +#define MII_TG3_EXT_CTRL 0x10 /* Extended control register */ +#define MII_TG3_EXT_CTRL_FIFO_ELASTIC 0x0001 +#define MII_TG3_EXT_CTRL_LNK3_LED_MODE 0x0002 +#define MII_TG3_EXT_CTRL_FORCE_LED_OFF 0x0008 +#define MII_TG3_EXT_CTRL_TBI 0x8000 + +#define MII_TG3_EXT_STAT 0x11 /* Extended status register */ +#define MII_TG3_EXT_STAT_MDIX 0x2000 +#define MII_TG3_EXT_STAT_LPASS 0x0100 + +#define MII_TG3_RXR_COUNTERS 0x14 /* Local/Remote Receiver Counts */ +#define MII_TG3_DSP_RW_PORT 0x15 /* DSP coefficient read/write port */ +#define MII_TG3_DSP_CONTROL 0x16 /* DSP control register */ +#define MII_TG3_DSP_ADDRESS 0x17 /* DSP address register */ + +#define MII_TG3_DSP_TAP1 0x0001 +#define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007 +#define MII_TG3_DSP_TAP26 0x001a +#define MII_TG3_DSP_TAP26_ALNOKO 0x0001 +#define MII_TG3_DSP_TAP26_RMRXSTO 0x0002 +#define MII_TG3_DSP_TAP26_OPCSINPT 0x0004 +#define MII_TG3_DSP_AADJ1CH0 0x001f +#define MII_TG3_DSP_CH34TP2 0x4022 +#define MII_TG3_DSP_CH34TP2_HIBW01 0x01ff +#define MII_TG3_DSP_AADJ1CH3 0x601f +#define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002 +#define MII_TG3_DSP_EXP1_INT_STAT 0x0f01 +#define MII_TG3_DSP_EXP8 0x0f08 +#define MII_TG3_DSP_EXP8_REJ2MHz 0x0001 +#define MII_TG3_DSP_EXP8_AEDW 0x0200 +#define MII_TG3_DSP_EXP75 0x0f75 +#define MII_TG3_DSP_EXP96 0x0f96 +#define MII_TG3_DSP_EXP97 0x0f97 + +#define MII_TG3_AUX_CTRL 0x18 /* auxiliary control register */ + +#define MII_TG3_AUXCTL_SHDWSEL_AUXCTL 0x0000 +#define MII_TG3_AUXCTL_ACTL_TX_6DB 0x0400 +#define MII_TG3_AUXCTL_ACTL_SMDSP_ENA 0x0800 +#define MII_TG3_AUXCTL_ACTL_EXTPKTLEN 0x4000 +#define MII_TG3_AUXCTL_ACTL_EXTLOOPBK 0x8000 + +#define MII_TG3_AUXCTL_SHDWSEL_PWRCTL 0x0002 +#define MII_TG3_AUXCTL_PCTL_WOL_EN 0x0008 +#define MII_TG3_AUXCTL_PCTL_100TX_LPWR 0x0010 +#define MII_TG3_AUXCTL_PCTL_SPR_ISOLATE 0x0020 +#define MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC 0x0040 +#define MII_TG3_AUXCTL_PCTL_VREG_11V 0x0180 + +#define MII_TG3_AUXCTL_SHDWSEL_MISCTEST 0x0004 + +#define MII_TG3_AUXCTL_SHDWSEL_MISC 0x0007 +#define MII_TG3_AUXCTL_MISC_WIRESPD_EN 0x0010 +#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200 +#define MII_TG3_AUXCTL_MISC_RDSEL_SHIFT 12 +#define MII_TG3_AUXCTL_MISC_WREN 0x8000 + + +#define MII_TG3_AUX_STAT 0x19 /* auxiliary status register */ +#define MII_TG3_AUX_STAT_LPASS 0x0004 +#define MII_TG3_AUX_STAT_SPDMASK 0x0700 +#define MII_TG3_AUX_STAT_10HALF 0x0100 +#define MII_TG3_AUX_STAT_10FULL 0x0200 +#define MII_TG3_AUX_STAT_100HALF 0x0300 +#define MII_TG3_AUX_STAT_100_4 0x0400 +#define MII_TG3_AUX_STAT_100FULL 0x0500 +#define MII_TG3_AUX_STAT_1000HALF 0x0600 +#define MII_TG3_AUX_STAT_1000FULL 0x0700 +#define MII_TG3_AUX_STAT_100 0x0008 +#define MII_TG3_AUX_STAT_FULL 0x0001 + +#define MII_TG3_ISTAT 0x1a /* IRQ status register */ +#define MII_TG3_IMASK 0x1b /* IRQ mask register */ + +/* ISTAT/IMASK event bits */ +#define MII_TG3_INT_LINKCHG 0x0002 +#define MII_TG3_INT_SPEEDCHG 0x0004 +#define MII_TG3_INT_DUPLEXCHG 0x0008 +#define MII_TG3_INT_ANEG_PAGE_RX 0x0400 + +#define MII_TG3_MISC_SHDW 0x1c +#define MII_TG3_MISC_SHDW_WREN 0x8000 + +#define MII_TG3_MISC_SHDW_APD_WKTM_84MS 0x0001 +#define MII_TG3_MISC_SHDW_APD_ENABLE 0x0020 +#define MII_TG3_MISC_SHDW_APD_SEL 0x2800 + +#define MII_TG3_MISC_SHDW_SCR5_C125OE 0x0001 +#define MII_TG3_MISC_SHDW_SCR5_DLLAPD 0x0002 +#define MII_TG3_MISC_SHDW_SCR5_SDTL 0x0004 +#define MII_TG3_MISC_SHDW_SCR5_DLPTLM 0x0008 +#define MII_TG3_MISC_SHDW_SCR5_LPED 0x0010 +#define MII_TG3_MISC_SHDW_SCR5_SEL 0x1400 + +#define MII_TG3_TEST1 0x1e +#define MII_TG3_TEST1_TRIM_EN 0x0010 +#define MII_TG3_TEST1_CRC_EN 0x8000 + +/* Clause 45 expansion registers */ +#define TG3_CL45_D7_EEERES_STAT 0x803e +#define TG3_CL45_D7_EEERES_STAT_LP_100TX 0x0002 +#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004 + + +/* Fast Ethernet Tranceiver definitions */ +#define MII_TG3_FET_PTEST 0x17 +#define MII_TG3_FET_PTEST_TRIM_SEL 0x0010 +#define MII_TG3_FET_PTEST_TRIM_2 0x0002 +#define MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000 +#define MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800 + +#define MII_TG3_FET_GEN_STAT 0x1c +#define MII_TG3_FET_GEN_STAT_MDIXSTAT 0x2000 + +#define MII_TG3_FET_TEST 0x1f +#define MII_TG3_FET_SHADOW_EN 0x0080 + +#define MII_TG3_FET_SHDW_MISCCTRL 0x10 +#define MII_TG3_FET_SHDW_MISCCTRL_MDIX 0x4000 + +#define MII_TG3_FET_SHDW_AUXMODE4 0x1a +#define MII_TG3_FET_SHDW_AUXMODE4_SBPD 0x0008 + +#define MII_TG3_FET_SHDW_AUXSTAT2 0x1b +#define MII_TG3_FET_SHDW_AUXSTAT2_APD 0x0020 + +/* Serdes PHY Register Definitions */ +#define SERDES_TG3_1000X_STATUS 0x14 +#define SERDES_TG3_SGMII_MODE 0x0001 +#define SERDES_TG3_LINK_UP 0x0002 +#define SERDES_TG3_FULL_DUPLEX 0x0004 +#define SERDES_TG3_SPEED_100 0x0008 +#define SERDES_TG3_SPEED_1000 0x0010 + +/* APE registers. Accessible through BAR1 */ +#define TG3_APE_GPIO_MSG 0x0008 +#define TG3_APE_GPIO_MSG_SHIFT 4 +#define TG3_APE_EVENT 0x000c +#define APE_EVENT_1 0x00000001 +#define TG3_APE_LOCK_REQ 0x002c +#define APE_LOCK_REQ_DRIVER 0x00001000 +#define TG3_APE_LOCK_GRANT 0x004c +#define APE_LOCK_GRANT_DRIVER 0x00001000 +#define TG3_APE_OTP_CTRL 0x00e8 +#define APE_OTP_CTRL_PROG_EN 0x200000 +#define APE_OTP_CTRL_CMD_RD 0x000000 +#define APE_OTP_CTRL_START 0x000001 +#define TG3_APE_OTP_STATUS 0x00ec +#define APE_OTP_STATUS_CMD_DONE 0x000001 +#define TG3_APE_OTP_ADDR 0x00f0 +#define APE_OTP_ADDR_CPU_ENABLE 0x80000000 +#define TG3_APE_OTP_RD_DATA 0x00f8 + +#define OTP_ADDRESS_MAGIC0 0x00000050 +#define TG3_OTP_MAGIC0_VALID(val) \ + ((((val) & 0xf0000000) == 0xa0000000) ||\ + (((val) & 0x0f000000) == 0x0a000000)) + +/* APE shared memory. Accessible through BAR1 */ +#define TG3_APE_SHMEM_BASE 0x4000 +#define TG3_APE_SEG_SIG 0x4000 +#define APE_SEG_SIG_MAGIC 0x41504521 +#define TG3_APE_FW_STATUS 0x400c +#define APE_FW_STATUS_READY 0x00000100 +#define TG3_APE_FW_FEATURES 0x4010 +#define TG3_APE_FW_FEATURE_NCSI 0x00000002 +#define TG3_APE_FW_VERSION 0x4018 +#define APE_FW_VERSION_MAJMSK 0xff000000 +#define APE_FW_VERSION_MAJSFT 24 +#define APE_FW_VERSION_MINMSK 0x00ff0000 +#define APE_FW_VERSION_MINSFT 16 +#define APE_FW_VERSION_REVMSK 0x0000ff00 +#define APE_FW_VERSION_REVSFT 8 +#define APE_FW_VERSION_BLDMSK 0x000000ff +#define TG3_APE_SEG_MSG_BUF_OFF 0x401c +#define TG3_APE_SEG_MSG_BUF_LEN 0x4020 +#define TG3_APE_HOST_SEG_SIG 0x4200 +#define APE_HOST_SEG_SIG_MAGIC 0x484f5354 +#define TG3_APE_HOST_SEG_LEN 0x4204 +#define APE_HOST_SEG_LEN_MAGIC 0x00000020 +#define TG3_APE_HOST_INIT_COUNT 0x4208 +#define TG3_APE_HOST_DRIVER_ID 0x420c +#define APE_HOST_DRIVER_ID_LINUX 0xf0000000 +#define APE_HOST_DRIVER_ID_MAGIC(maj, min) \ + (APE_HOST_DRIVER_ID_LINUX | (maj & 0xff) << 16 | (min & 0xff) << 8) +#define TG3_APE_HOST_BEHAVIOR 0x4210 +#define APE_HOST_BEHAV_NO_PHYLOCK 0x00000001 +#define TG3_APE_HOST_HEARTBEAT_INT_MS 0x4214 +#define APE_HOST_HEARTBEAT_INT_DISABLE 0 +#define APE_HOST_HEARTBEAT_INT_5SEC 5000 +#define TG3_APE_HOST_HEARTBEAT_COUNT 0x4218 +#define TG3_APE_HOST_DRVR_STATE 0x421c +#define TG3_APE_HOST_DRVR_STATE_START 0x00000001 +#define TG3_APE_HOST_DRVR_STATE_UNLOAD 0x00000002 +#define TG3_APE_HOST_DRVR_STATE_WOL 0x00000003 +#define TG3_APE_HOST_WOL_SPEED 0x4224 +#define TG3_APE_HOST_WOL_SPEED_AUTO 0x00008000 + +#define TG3_APE_EVENT_STATUS 0x4300 + +#define APE_EVENT_STATUS_DRIVER_EVNT 0x00000010 +#define APE_EVENT_STATUS_STATE_CHNGE 0x00000500 +#define APE_EVENT_STATUS_SCRTCHPD_READ 0x00001600 +#define APE_EVENT_STATUS_SCRTCHPD_WRITE 0x00001700 +#define APE_EVENT_STATUS_STATE_START 0x00010000 +#define APE_EVENT_STATUS_STATE_UNLOAD 0x00020000 +#define APE_EVENT_STATUS_STATE_WOL 0x00030000 +#define APE_EVENT_STATUS_STATE_SUSPEND 0x00040000 +#define APE_EVENT_STATUS_EVENT_PENDING 0x80000000 + +#define TG3_APE_PER_LOCK_REQ 0x8400 +#define APE_LOCK_PER_REQ_DRIVER 0x00001000 +#define TG3_APE_PER_LOCK_GRANT 0x8420 +#define APE_PER_LOCK_GRANT_DRIVER 0x00001000 + +/* APE convenience enumerations. */ +#define TG3_APE_LOCK_PHY0 0 +#define TG3_APE_LOCK_GRC 1 +#define TG3_APE_LOCK_PHY1 2 +#define TG3_APE_LOCK_PHY2 3 +#define TG3_APE_LOCK_MEM 4 +#define TG3_APE_LOCK_PHY3 5 +#define TG3_APE_LOCK_GPIO 7 + +#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 + + +/* There are two ways to manage the TX descriptors on the tigon3. + * Either the descriptors are in host DMA'able memory, or they + * exist only in the cards on-chip SRAM. All 16 send bds are under + * the same mode, they may not be configured individually. + * + * This driver always uses host memory TX descriptors. + * + * To use host memory TX descriptors: + * 1) Set GRC_MODE_HOST_SENDBDS in GRC_MODE register. + * Make sure GRC_MODE_4X_NIC_SEND_RINGS is clear. + * 2) Allocate DMA'able memory. + * 3) In NIC_SRAM_SEND_RCB (of desired index) of on-chip SRAM: + * a) Set TG3_BDINFO_HOST_ADDR to DMA address of memory + * obtained in step 2 + * b) Set TG3_BDINFO_NIC_ADDR to NIC_SRAM_TX_BUFFER_DESC. + * c) Set len field of TG3_BDINFO_MAXLEN_FLAGS to number + * of TX descriptors. Leave flags field clear. + * 4) Access TX descriptors via host memory. The chip + * will refetch into local SRAM as needed when producer + * index mailboxes are updated. + * + * To use on-chip TX descriptors: + * 1) Set GRC_MODE_4X_NIC_SEND_RINGS in GRC_MODE register. + * Make sure GRC_MODE_HOST_SENDBDS is clear. + * 2) In NIC_SRAM_SEND_RCB (of desired index) of on-chip SRAM: + * a) Set TG3_BDINFO_HOST_ADDR to zero. + * b) Set TG3_BDINFO_NIC_ADDR to NIC_SRAM_TX_BUFFER_DESC + * c) TG3_BDINFO_MAXLEN_FLAGS is don't care. + * 3) Access TX descriptors directly in on-chip SRAM + * using normal {read,write}l(). (and not using + * pointer dereferencing of ioremap()'d memory like + * the broken Broadcom driver does) + * + * Note that BDINFO_FLAGS_DISABLED should be set in the flags field of + * TG3_BDINFO_MAXLEN_FLAGS of all unused SEND_RCB indices. + */ +struct tg3_tx_buffer_desc { + u32 addr_hi; + u32 addr_lo; + + u32 len_flags; +#define TXD_FLAG_TCPUDP_CSUM 0x0001 +#define TXD_FLAG_IP_CSUM 0x0002 +#define TXD_FLAG_END 0x0004 +#define TXD_FLAG_IP_FRAG 0x0008 +#define TXD_FLAG_JMB_PKT 0x0008 +#define TXD_FLAG_IP_FRAG_END 0x0010 +#define TXD_FLAG_HWTSTAMP 0x0020 +#define TXD_FLAG_VLAN 0x0040 +#define TXD_FLAG_COAL_NOW 0x0080 +#define TXD_FLAG_CPU_PRE_DMA 0x0100 +#define TXD_FLAG_CPU_POST_DMA 0x0200 +#define TXD_FLAG_ADD_SRC_ADDR 0x1000 +#define TXD_FLAG_CHOOSE_SRC_ADDR 0x6000 +#define TXD_FLAG_NO_CRC 0x8000 +#define TXD_LEN_SHIFT 16 + + u32 vlan_tag; +#define TXD_VLAN_TAG_SHIFT 0 +#define TXD_MSS_SHIFT 16 +}; + +#define TXD_ADDR 0x00UL /* 64-bit */ +#define TXD_LEN_FLAGS 0x08UL /* 32-bit (upper 16-bits are len) */ +#define TXD_VLAN_TAG 0x0cUL /* 32-bit (upper 16-bits are tag) */ +#define TXD_SIZE 0x10UL + +struct tg3_rx_buffer_desc { + u32 addr_hi; + u32 addr_lo; + + u32 idx_len; +#define RXD_IDX_MASK 0xffff0000 +#define RXD_IDX_SHIFT 16 +#define RXD_LEN_MASK 0x0000ffff +#define RXD_LEN_SHIFT 0 + + u32 type_flags; +#define RXD_TYPE_SHIFT 16 +#define RXD_FLAGS_SHIFT 0 + +#define RXD_FLAG_END 0x0004 +#define RXD_FLAG_MINI 0x0800 +#define RXD_FLAG_JUMBO 0x0020 +#define RXD_FLAG_VLAN 0x0040 +#define RXD_FLAG_ERROR 0x0400 +#define RXD_FLAG_IP_CSUM 0x1000 +#define RXD_FLAG_TCPUDP_CSUM 0x2000 +#define RXD_FLAG_IS_TCP 0x4000 +#define RXD_FLAG_PTPSTAT_MASK 0x0210 +#define RXD_FLAG_PTPSTAT_PTPV1 0x0010 +#define RXD_FLAG_PTPSTAT_PTPV2 0x0200 + + u32 ip_tcp_csum; +#define RXD_IPCSUM_MASK 0xffff0000 +#define RXD_IPCSUM_SHIFT 16 +#define RXD_TCPCSUM_MASK 0x0000ffff +#define RXD_TCPCSUM_SHIFT 0 + + u32 err_vlan; + +#define RXD_VLAN_MASK 0x0000ffff + +#define RXD_ERR_BAD_CRC 0x00010000 +#define RXD_ERR_COLLISION 0x00020000 +#define RXD_ERR_LINK_LOST 0x00040000 +#define RXD_ERR_PHY_DECODE 0x00080000 +#define RXD_ERR_ODD_NIBBLE_RCVD_MII 0x00100000 +#define RXD_ERR_MAC_ABRT 0x00200000 +#define RXD_ERR_TOO_SMALL 0x00400000 +#define RXD_ERR_NO_RESOURCES 0x00800000 +#define RXD_ERR_HUGE_FRAME 0x01000000 + +#define RXD_ERR_MASK (RXD_ERR_BAD_CRC | RXD_ERR_COLLISION | \ + RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE | \ + RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL | \ + RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME) + + u32 reserved; + u32 opaque; +#define RXD_OPAQUE_INDEX_MASK 0x0000ffff +#define RXD_OPAQUE_INDEX_SHIFT 0 +#define RXD_OPAQUE_RING_STD 0x00010000 +#define RXD_OPAQUE_RING_JUMBO 0x00020000 +#define RXD_OPAQUE_RING_MINI 0x00040000 +#define RXD_OPAQUE_RING_MASK 0x00070000 +}; + +struct tg3_ext_rx_buffer_desc { + struct { + u32 addr_hi; + u32 addr_lo; + } addrlist[3]; + u32 len2_len1; + u32 resv_len3; + struct tg3_rx_buffer_desc std; +}; + +/* We only use this when testing out the DMA engine + * at probe time. This is the internal format of buffer + * descriptors used by the chip at NIC_SRAM_DMA_DESCS. + */ +struct tg3_internal_buffer_desc { + u32 addr_hi; + u32 addr_lo; + u32 nic_mbuf; + /* XXX FIX THIS */ +#ifdef __BIG_ENDIAN + u16 cqid_sqid; + u16 len; +#else + u16 len; + u16 cqid_sqid; +#endif + u32 flags; + u32 __cookie1; + u32 __cookie2; + u32 __cookie3; +}; + +#define TG3_HW_STATUS_SIZE 0x50 +struct tg3_hw_status { + u32 status; +#define SD_STATUS_UPDATED 0x00000001 +#define SD_STATUS_LINK_CHG 0x00000002 +#define SD_STATUS_ERROR 0x00000004 + + u32 status_tag; + +#ifdef __BIG_ENDIAN + u16 rx_consumer; + u16 rx_jumbo_consumer; +#else + u16 rx_jumbo_consumer; + u16 rx_consumer; +#endif + +#ifdef __BIG_ENDIAN + u16 reserved; + u16 rx_mini_consumer; +#else + u16 rx_mini_consumer; + u16 reserved; +#endif + struct { +#ifdef __BIG_ENDIAN + u16 tx_consumer; + u16 rx_producer; +#else + u16 rx_producer; + u16 tx_consumer; +#endif + } idx[16]; +}; + +typedef struct { + u32 high, low; +} tg3_stat64_t; + +struct tg3_hw_stats { + u8 __reserved0[0x400-0x300]; + + /* Statistics maintained by Receive MAC. */ + tg3_stat64_t rx_octets; + u64 __reserved1; + tg3_stat64_t rx_fragments; + tg3_stat64_t rx_ucast_packets; + tg3_stat64_t rx_mcast_packets; + tg3_stat64_t rx_bcast_packets; + tg3_stat64_t rx_fcs_errors; + tg3_stat64_t rx_align_errors; + tg3_stat64_t rx_xon_pause_rcvd; + tg3_stat64_t rx_xoff_pause_rcvd; + tg3_stat64_t rx_mac_ctrl_rcvd; + tg3_stat64_t rx_xoff_entered; + tg3_stat64_t rx_frame_too_long_errors; + tg3_stat64_t rx_jabbers; + tg3_stat64_t rx_undersize_packets; + tg3_stat64_t rx_in_length_errors; + tg3_stat64_t rx_out_length_errors; + tg3_stat64_t rx_64_or_less_octet_packets; + tg3_stat64_t rx_65_to_127_octet_packets; + tg3_stat64_t rx_128_to_255_octet_packets; + tg3_stat64_t rx_256_to_511_octet_packets; + tg3_stat64_t rx_512_to_1023_octet_packets; + tg3_stat64_t rx_1024_to_1522_octet_packets; + tg3_stat64_t rx_1523_to_2047_octet_packets; + tg3_stat64_t rx_2048_to_4095_octet_packets; + tg3_stat64_t rx_4096_to_8191_octet_packets; + tg3_stat64_t rx_8192_to_9022_octet_packets; + + u64 __unused0[37]; + + /* Statistics maintained by Transmit MAC. */ + tg3_stat64_t tx_octets; + u64 __reserved2; + tg3_stat64_t tx_collisions; + tg3_stat64_t tx_xon_sent; + tg3_stat64_t tx_xoff_sent; + tg3_stat64_t tx_flow_control; + tg3_stat64_t tx_mac_errors; + tg3_stat64_t tx_single_collisions; + tg3_stat64_t tx_mult_collisions; + tg3_stat64_t tx_deferred; + u64 __reserved3; + tg3_stat64_t tx_excessive_collisions; + tg3_stat64_t tx_late_collisions; + tg3_stat64_t tx_collide_2times; + tg3_stat64_t tx_collide_3times; + tg3_stat64_t tx_collide_4times; + tg3_stat64_t tx_collide_5times; + tg3_stat64_t tx_collide_6times; + tg3_stat64_t tx_collide_7times; + tg3_stat64_t tx_collide_8times; + tg3_stat64_t tx_collide_9times; + tg3_stat64_t tx_collide_10times; + tg3_stat64_t tx_collide_11times; + tg3_stat64_t tx_collide_12times; + tg3_stat64_t tx_collide_13times; + tg3_stat64_t tx_collide_14times; + tg3_stat64_t tx_collide_15times; + tg3_stat64_t tx_ucast_packets; + tg3_stat64_t tx_mcast_packets; + tg3_stat64_t tx_bcast_packets; + tg3_stat64_t tx_carrier_sense_errors; + tg3_stat64_t tx_discards; + tg3_stat64_t tx_errors; + + u64 __unused1[31]; + + /* Statistics maintained by Receive List Placement. */ + tg3_stat64_t COS_rx_packets[16]; + tg3_stat64_t COS_rx_filter_dropped; + tg3_stat64_t dma_writeq_full; + tg3_stat64_t dma_write_prioq_full; + tg3_stat64_t rxbds_empty; + tg3_stat64_t rx_discards; + tg3_stat64_t rx_errors; + tg3_stat64_t rx_threshold_hit; + + u64 __unused2[9]; + + /* Statistics maintained by Send Data Initiator. */ + tg3_stat64_t COS_out_packets[16]; + tg3_stat64_t dma_readq_full; + tg3_stat64_t dma_read_prioq_full; + tg3_stat64_t tx_comp_queue_full; + + /* Statistics maintained by Host Coalescing. */ + tg3_stat64_t ring_set_send_prod_index; + tg3_stat64_t ring_status_update; + tg3_stat64_t nic_irqs; + tg3_stat64_t nic_avoided_irqs; + tg3_stat64_t nic_tx_threshold_hit; + + /* NOT a part of the hardware statistics block format. + * These stats are here as storage for tg3_periodic_fetch_stats(). + */ + tg3_stat64_t mbuf_lwm_thresh_hit; + + u8 __reserved4[0xb00-0x9c8]; +}; + +#define TG3_SD_NUM_RECS 3 +#define TG3_OCIR_LEN (sizeof(struct tg3_ocir)) +#define TG3_OCIR_SIG_MAGIC 0x5253434f +#define TG3_OCIR_FLAG_ACTIVE 0x00000001 + +#define TG3_TEMP_CAUTION_OFFSET 0xc8 +#define TG3_TEMP_MAX_OFFSET 0xcc +#define TG3_TEMP_SENSOR_OFFSET 0xd4 + + +struct tg3_ocir { + u32 signature; + u16 version_flags; + u16 refresh_int; + u32 refresh_tmr; + u32 update_tmr; + u32 dst_base_addr; + u16 src_hdr_offset; + u16 src_hdr_length; + u16 src_data_offset; + u16 src_data_length; + u16 dst_hdr_offset; + u16 dst_data_offset; + u16 dst_reg_upd_offset; + u16 dst_sem_offset; + u32 reserved1[2]; + u32 port0_flags; + u32 port1_flags; + u32 port2_flags; + u32 port3_flags; + u32 reserved2[1]; +}; + + +/* 'mapping' is superfluous as the chip does not write into + * the tx/rx post rings so we could just fetch it from there. + * But the cache behavior is better how we are doing it now. + * + * This driver uses new build_skb() API : + * RX ring buffer contains pointer to kmalloc() data only, + * skb are built only after Hardware filled the frame. + */ +struct ring_info { + u8 *data; + DEFINE_DMA_UNMAP_ADDR(mapping); +}; + +struct tg3_tx_ring_info { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapping); + bool fragmented; +}; + +struct tg3_link_config { + /* Describes what we're trying to get. */ + u32 advertising; + u16 speed; + u8 duplex; + u8 autoneg; + u8 flowctrl; + + /* Describes what we actually have. */ + u8 active_flowctrl; + + u8 active_duplex; + u16 active_speed; + u32 rmt_adv; +}; + +struct tg3_bufmgr_config { + u32 mbuf_read_dma_low_water; + u32 mbuf_mac_rx_low_water; + u32 mbuf_high_water; + + u32 mbuf_read_dma_low_water_jumbo; + u32 mbuf_mac_rx_low_water_jumbo; + u32 mbuf_high_water_jumbo; + + u32 dma_low_water; + u32 dma_high_water; +}; + +struct tg3_ethtool_stats { + /* Statistics maintained by Receive MAC. */ + u64 rx_octets; + u64 rx_fragments; + u64 rx_ucast_packets; + u64 rx_mcast_packets; + u64 rx_bcast_packets; + u64 rx_fcs_errors; + u64 rx_align_errors; + u64 rx_xon_pause_rcvd; + u64 rx_xoff_pause_rcvd; + u64 rx_mac_ctrl_rcvd; + u64 rx_xoff_entered; + u64 rx_frame_too_long_errors; + u64 rx_jabbers; + u64 rx_undersize_packets; + u64 rx_in_length_errors; + u64 rx_out_length_errors; + u64 rx_64_or_less_octet_packets; + u64 rx_65_to_127_octet_packets; + u64 rx_128_to_255_octet_packets; + u64 rx_256_to_511_octet_packets; + u64 rx_512_to_1023_octet_packets; + u64 rx_1024_to_1522_octet_packets; + u64 rx_1523_to_2047_octet_packets; + u64 rx_2048_to_4095_octet_packets; + u64 rx_4096_to_8191_octet_packets; + u64 rx_8192_to_9022_octet_packets; + + /* Statistics maintained by Transmit MAC. */ + u64 tx_octets; + u64 tx_collisions; + u64 tx_xon_sent; + u64 tx_xoff_sent; + u64 tx_flow_control; + u64 tx_mac_errors; + u64 tx_single_collisions; + u64 tx_mult_collisions; + u64 tx_deferred; + u64 tx_excessive_collisions; + u64 tx_late_collisions; + u64 tx_collide_2times; + u64 tx_collide_3times; + u64 tx_collide_4times; + u64 tx_collide_5times; + u64 tx_collide_6times; + u64 tx_collide_7times; + u64 tx_collide_8times; + u64 tx_collide_9times; + u64 tx_collide_10times; + u64 tx_collide_11times; + u64 tx_collide_12times; + u64 tx_collide_13times; + u64 tx_collide_14times; + u64 tx_collide_15times; + u64 tx_ucast_packets; + u64 tx_mcast_packets; + u64 tx_bcast_packets; + u64 tx_carrier_sense_errors; + u64 tx_discards; + u64 tx_errors; + + /* Statistics maintained by Receive List Placement. */ + u64 dma_writeq_full; + u64 dma_write_prioq_full; + u64 rxbds_empty; + u64 rx_discards; + u64 rx_errors; + u64 rx_threshold_hit; + + /* Statistics maintained by Send Data Initiator. */ + u64 dma_readq_full; + u64 dma_read_prioq_full; + u64 tx_comp_queue_full; + + /* Statistics maintained by Host Coalescing. */ + u64 ring_set_send_prod_index; + u64 ring_status_update; + u64 nic_irqs; + u64 nic_avoided_irqs; + u64 nic_tx_threshold_hit; + + u64 mbuf_lwm_thresh_hit; +}; + +struct tg3_rx_prodring_set { + u32 rx_std_prod_idx; + u32 rx_std_cons_idx; + u32 rx_jmb_prod_idx; + u32 rx_jmb_cons_idx; + struct tg3_rx_buffer_desc *rx_std; + struct tg3_ext_rx_buffer_desc *rx_jmb; + struct ring_info *rx_std_buffers; + struct ring_info *rx_jmb_buffers; + dma_addr_t rx_std_mapping; + dma_addr_t rx_jmb_mapping; +}; + +#define TG3_RSS_MAX_NUM_QS 4 +#define TG3_IRQ_MAX_VECS_RSS (TG3_RSS_MAX_NUM_QS + 1) +#define TG3_IRQ_MAX_VECS TG3_IRQ_MAX_VECS_RSS + +struct tg3_napi { + struct napi_struct napi ____cacheline_aligned; + struct tg3 *tp; + struct tg3_hw_status *hw_status; + + u32 chk_msi_cnt; + u32 last_tag; + u32 last_irq_tag; + u32 int_mbox; + u32 coal_now; + + u32 consmbox ____cacheline_aligned; + u32 rx_rcb_ptr; + u32 last_rx_cons; + u16 *rx_rcb_prod_idx; + struct tg3_rx_prodring_set prodring; + struct tg3_rx_buffer_desc *rx_rcb; + + u32 tx_prod ____cacheline_aligned; + u32 tx_cons; + u32 tx_pending; + u32 last_tx_cons; + u32 prodmbox; + struct tg3_tx_buffer_desc *tx_ring; + struct tg3_tx_ring_info *tx_buffers; + + dma_addr_t status_mapping; + dma_addr_t rx_rcb_mapping; + dma_addr_t tx_desc_mapping; + + char irq_lbl[IFNAMSIZ]; + unsigned int irq_vec; +}; + +enum TG3_FLAGS { + TG3_FLAG_TAGGED_STATUS = 0, + TG3_FLAG_TXD_MBOX_HWBUG, + TG3_FLAG_USE_LINKCHG_REG, + TG3_FLAG_ERROR_PROCESSED, + TG3_FLAG_ENABLE_ASF, + TG3_FLAG_ASPM_WORKAROUND, + TG3_FLAG_POLL_SERDES, + TG3_FLAG_POLL_CPMU_LINK, + TG3_FLAG_MBOX_WRITE_REORDER, + TG3_FLAG_PCIX_TARGET_HWBUG, + TG3_FLAG_WOL_SPEED_100MB, + TG3_FLAG_WOL_ENABLE, + TG3_FLAG_EEPROM_WRITE_PROT, + TG3_FLAG_NVRAM, + TG3_FLAG_NVRAM_BUFFERED, + TG3_FLAG_SUPPORT_MSI, + TG3_FLAG_SUPPORT_MSIX, + TG3_FLAG_USING_MSI, + TG3_FLAG_USING_MSIX, + TG3_FLAG_PCIX_MODE, + TG3_FLAG_PCI_HIGH_SPEED, + TG3_FLAG_PCI_32BIT, + TG3_FLAG_SRAM_USE_CONFIG, + TG3_FLAG_TX_RECOVERY_PENDING, + TG3_FLAG_WOL_CAP, + TG3_FLAG_JUMBO_RING_ENABLE, + TG3_FLAG_PAUSE_AUTONEG, + TG3_FLAG_CPMU_PRESENT, + TG3_FLAG_40BIT_DMA_BUG, + TG3_FLAG_BROKEN_CHECKSUMS, + TG3_FLAG_JUMBO_CAPABLE, + TG3_FLAG_CHIP_RESETTING, + TG3_FLAG_INIT_COMPLETE, + TG3_FLAG_MAX_RXPEND_64, + TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */ + TG3_FLAG_ASF_NEW_HANDSHAKE, + TG3_FLAG_HW_AUTONEG, + TG3_FLAG_IS_NIC, + TG3_FLAG_FLASH, + TG3_FLAG_FW_TSO, + TG3_FLAG_HW_TSO_1, + TG3_FLAG_HW_TSO_2, + TG3_FLAG_HW_TSO_3, + TG3_FLAG_TSO_CAPABLE, + TG3_FLAG_TSO_BUG, + TG3_FLAG_ICH_WORKAROUND, + TG3_FLAG_1SHOT_MSI, + TG3_FLAG_NO_FWARE_REPORTED, + TG3_FLAG_NO_NVRAM_ADDR_TRANS, + TG3_FLAG_ENABLE_APE, + TG3_FLAG_PROTECTED_NVRAM, + TG3_FLAG_5701_DMA_BUG, + TG3_FLAG_USE_PHYLIB, + TG3_FLAG_MDIOBUS_INITED, + TG3_FLAG_LRG_PROD_RING_CAP, + TG3_FLAG_RGMII_INBAND_DISABLE, + TG3_FLAG_RGMII_EXT_IBND_RX_EN, + TG3_FLAG_RGMII_EXT_IBND_TX_EN, + TG3_FLAG_CLKREQ_BUG, + TG3_FLAG_NO_NVRAM, + TG3_FLAG_ENABLE_RSS, + TG3_FLAG_ENABLE_TSS, + TG3_FLAG_SHORT_DMA_BUG, + TG3_FLAG_USE_JUMBO_BDFLAG, + TG3_FLAG_L1PLLPD_EN, + TG3_FLAG_APE_HAS_NCSI, + TG3_FLAG_TX_TSTAMP_EN, + TG3_FLAG_4K_FIFO_LIMIT, + TG3_FLAG_5719_5720_RDMA_BUG, + TG3_FLAG_RESET_TASK_PENDING, + TG3_FLAG_PTP_CAPABLE, + TG3_FLAG_5705_PLUS, + TG3_FLAG_IS_5788, + TG3_FLAG_5750_PLUS, + TG3_FLAG_5780_CLASS, + TG3_FLAG_5755_PLUS, + TG3_FLAG_57765_PLUS, + TG3_FLAG_57765_CLASS, + TG3_FLAG_5717_PLUS, + TG3_FLAG_IS_SSB_CORE, + TG3_FLAG_FLUSH_POSTED_WRITES, + TG3_FLAG_ROBOSWITCH, + TG3_FLAG_ONE_DMA_AT_ONCE, + TG3_FLAG_RGMII_MODE, + + /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */ + TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */ +}; + +struct tg3_firmware_hdr { + __be32 version; /* unused for fragments */ + __be32 base_addr; + __be32 len; +}; +#define TG3_FW_HDR_LEN (sizeof(struct tg3_firmware_hdr)) + +struct tg3 { + /* begin "general, frequently-used members" cacheline section */ + + /* If the IRQ handler (which runs lockless) needs to be + * quiesced, the following bitmask state is used. The + * SYNC flag is set by non-IRQ context code to initiate + * the quiescence. + * + * When the IRQ handler notices that SYNC is set, it + * disables interrupts and returns. + * + * When all outstanding IRQ handlers have returned after + * the SYNC flag has been set, the setter can be assured + * that interrupts will no longer get run. + * + * In this way all SMP driver locks are never acquired + * in hw IRQ context, only sw IRQ context or lower. + */ + unsigned int irq_sync; + + /* SMP locking strategy: + * + * lock: Held during reset, PHY access, timer, and when + * updating tg3_flags. + * + * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds + * netif_tx_lock when it needs to call + * netif_wake_queue. + * + * Both of these locks are to be held with BH safety. + * + * Because the IRQ handler, tg3_poll, and tg3_start_xmit + * are running lockless, it is necessary to completely + * quiesce the chip with tg3_netif_stop and tg3_full_lock + * before reconfiguring the device. + * + * indirect_lock: Held when accessing registers indirectly + * with IRQ disabling. + */ + spinlock_t lock; + spinlock_t indirect_lock; + + u32 (*read32) (struct tg3 *, u32); + void (*write32) (struct tg3 *, u32, u32); + u32 (*read32_mbox) (struct tg3 *, u32); + void (*write32_mbox) (struct tg3 *, u32, + u32); + void __iomem *regs; + void __iomem *aperegs; + struct net_device *dev; + struct pci_dev *pdev; + + u32 coal_now; + u32 msg_enable; + + struct ptp_clock_info ptp_info; + struct ptp_clock *ptp_clock; + s64 ptp_adjust; + + /* begin "tx thread" cacheline section */ + void (*write32_tx_mbox) (struct tg3 *, u32, + u32); + u32 dma_limit; + u32 txq_req; + u32 txq_cnt; + u32 txq_max; + + /* begin "rx thread" cacheline section */ + struct tg3_napi napi[TG3_IRQ_MAX_VECS]; + void (*write32_rx_mbox) (struct tg3 *, u32, + u32); + u32 rx_copy_thresh; + u32 rx_std_ring_mask; + u32 rx_jmb_ring_mask; + u32 rx_ret_ring_mask; + u32 rx_pending; + u32 rx_jumbo_pending; + u32 rx_std_max_post; + u32 rx_offset; + u32 rx_pkt_map_sz; + u32 rxq_req; + u32 rxq_cnt; + u32 rxq_max; + bool rx_refill; + + + /* begin "everything else" cacheline(s) section */ + unsigned long rx_dropped; + unsigned long tx_dropped; + struct rtnl_link_stats64 net_stats_prev; + struct tg3_ethtool_stats estats_prev; + + DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS); + + union { + unsigned long phy_crc_errors; + unsigned long last_event_jiffies; + }; + + struct timer_list timer; + u16 timer_counter; + u16 timer_multiplier; + u32 timer_offset; + u16 asf_counter; + u16 asf_multiplier; + + /* 1 second counter for transient serdes link events */ + u32 serdes_counter; +#define SERDES_AN_TIMEOUT_5704S 2 +#define SERDES_PARALLEL_DET_TIMEOUT 1 +#define SERDES_AN_TIMEOUT_5714S 1 + + struct tg3_link_config link_config; + struct tg3_bufmgr_config bufmgr_config; + + /* cache h/w values, often passed straight to h/w */ + u32 rx_mode; + u32 tx_mode; + u32 mac_mode; + u32 mi_mode; + u32 misc_host_ctrl; + u32 grc_mode; + u32 grc_local_ctrl; + u32 dma_rwctrl; + u32 coalesce_mode; + u32 pwrmgmt_thresh; + u32 rxptpctl; + + /* PCI block */ + u32 pci_chip_rev_id; + u16 pci_cmd; + u8 pci_cacheline_sz; + u8 pci_lat_timer; + + int pci_fn; + int msi_cap; + int pcix_cap; + int pcie_readrq; + + struct mii_bus *mdio_bus; + int mdio_irq[PHY_MAX_ADDR]; + int old_link; + + u8 phy_addr; + u8 phy_ape_lock; + + /* PHY info */ + u32 phy_id; +#define TG3_PHY_ID_MASK 0xfffffff0 +#define TG3_PHY_ID_BCM5400 0x60008040 +#define TG3_PHY_ID_BCM5401 0x60008050 +#define TG3_PHY_ID_BCM5411 0x60008070 +#define TG3_PHY_ID_BCM5701 0x60008110 +#define TG3_PHY_ID_BCM5703 0x60008160 +#define TG3_PHY_ID_BCM5704 0x60008190 +#define TG3_PHY_ID_BCM5705 0x600081a0 +#define TG3_PHY_ID_BCM5750 0x60008180 +#define TG3_PHY_ID_BCM5752 0x60008100 +#define TG3_PHY_ID_BCM5714 0x60008340 +#define TG3_PHY_ID_BCM5780 0x60008350 +#define TG3_PHY_ID_BCM5755 0xbc050cc0 +#define TG3_PHY_ID_BCM5787 0xbc050ce0 +#define TG3_PHY_ID_BCM5756 0xbc050ed0 +#define TG3_PHY_ID_BCM5784 0xbc050fa0 +#define TG3_PHY_ID_BCM5761 0xbc050fd0 +#define TG3_PHY_ID_BCM5718C 0x5c0d8a00 +#define TG3_PHY_ID_BCM5718S 0xbc050ff0 +#define TG3_PHY_ID_BCM57765 0x5c0d8a40 +#define TG3_PHY_ID_BCM5719C 0x5c0d8a20 +#define TG3_PHY_ID_BCM5720C 0x5c0d8b60 +#define TG3_PHY_ID_BCM5762 0x85803780 +#define TG3_PHY_ID_BCM5906 0xdc00ac40 +#define TG3_PHY_ID_BCM8002 0x60010140 +#define TG3_PHY_ID_INVALID 0xffffffff + +#define PHY_ID_RTL8211C 0x001cc910 +#define PHY_ID_RTL8201E 0x00008200 + +#define TG3_PHY_ID_REV_MASK 0x0000000f +#define TG3_PHY_REV_BCM5401_B0 0x1 + + /* This macro assumes the passed PHY ID is + * already masked with TG3_PHY_ID_MASK. + */ +#define TG3_KNOWN_PHY_ID(X) \ + ((X) == TG3_PHY_ID_BCM5400 || (X) == TG3_PHY_ID_BCM5401 || \ + (X) == TG3_PHY_ID_BCM5411 || (X) == TG3_PHY_ID_BCM5701 || \ + (X) == TG3_PHY_ID_BCM5703 || (X) == TG3_PHY_ID_BCM5704 || \ + (X) == TG3_PHY_ID_BCM5705 || (X) == TG3_PHY_ID_BCM5750 || \ + (X) == TG3_PHY_ID_BCM5752 || (X) == TG3_PHY_ID_BCM5714 || \ + (X) == TG3_PHY_ID_BCM5780 || (X) == TG3_PHY_ID_BCM5787 || \ + (X) == TG3_PHY_ID_BCM5755 || (X) == TG3_PHY_ID_BCM5756 || \ + (X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \ + (X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \ + (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM5719C || \ + (X) == TG3_PHY_ID_BCM5720C || (X) == TG3_PHY_ID_BCM5762 || \ + (X) == TG3_PHY_ID_BCM8002) + + u32 phy_flags; +#define TG3_PHYFLG_IS_LOW_POWER 0x00000001 +#define TG3_PHYFLG_IS_CONNECTED 0x00000002 +#define TG3_PHYFLG_USE_MI_INTERRUPT 0x00000004 +#define TG3_PHYFLG_USER_CONFIGURED 0x00000008 +#define TG3_PHYFLG_PHY_SERDES 0x00000010 +#define TG3_PHYFLG_MII_SERDES 0x00000020 +#define TG3_PHYFLG_ANY_SERDES (TG3_PHYFLG_PHY_SERDES | \ + TG3_PHYFLG_MII_SERDES) +#define TG3_PHYFLG_IS_FET 0x00000040 +#define TG3_PHYFLG_10_100_ONLY 0x00000080 +#define TG3_PHYFLG_ENABLE_APD 0x00000100 +#define TG3_PHYFLG_CAPACITIVE_COUPLING 0x00000200 +#define TG3_PHYFLG_NO_ETH_WIRE_SPEED 0x00000400 +#define TG3_PHYFLG_JITTER_BUG 0x00000800 +#define TG3_PHYFLG_ADJUST_TRIM 0x00001000 +#define TG3_PHYFLG_ADC_BUG 0x00002000 +#define TG3_PHYFLG_5704_A0_BUG 0x00004000 +#define TG3_PHYFLG_BER_BUG 0x00008000 +#define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000 +#define TG3_PHYFLG_PARALLEL_DETECT 0x00020000 +#define TG3_PHYFLG_EEE_CAP 0x00040000 +#define TG3_PHYFLG_1G_ON_VAUX_OK 0x00080000 +#define TG3_PHYFLG_KEEP_LINK_ON_PWRDN 0x00100000 +#define TG3_PHYFLG_MDIX_STATE 0x00200000 +#define TG3_PHYFLG_DISABLE_1G_HD_ADV 0x00400000 + + u32 led_ctrl; + u32 phy_otp; + u32 setlpicnt; + u8 rss_ind_tbl[TG3_RSS_INDIR_TBL_SIZE]; + +#define TG3_BPN_SIZE 24 + char board_part_number[TG3_BPN_SIZE]; +#define TG3_VER_SIZE ETHTOOL_FWVERS_LEN + char fw_ver[TG3_VER_SIZE]; + u32 nic_sram_data_cfg; + u32 pci_clock_ctrl; + struct pci_dev *pdev_peer; + + struct tg3_hw_stats *hw_stats; + dma_addr_t stats_mapping; + struct work_struct reset_task; + + int nvram_lock_cnt; + u32 nvram_size; +#define TG3_NVRAM_SIZE_2KB 0x00000800 +#define TG3_NVRAM_SIZE_64KB 0x00010000 +#define TG3_NVRAM_SIZE_128KB 0x00020000 +#define TG3_NVRAM_SIZE_256KB 0x00040000 +#define TG3_NVRAM_SIZE_512KB 0x00080000 +#define TG3_NVRAM_SIZE_1MB 0x00100000 +#define TG3_NVRAM_SIZE_2MB 0x00200000 + + u32 nvram_pagesize; + u32 nvram_jedecnum; + +#define JEDEC_ATMEL 0x1f +#define JEDEC_ST 0x20 +#define JEDEC_SAIFUN 0x4f +#define JEDEC_SST 0xbf + +#define ATMEL_AT24C02_CHIP_SIZE TG3_NVRAM_SIZE_2KB +#define ATMEL_AT24C02_PAGE_SIZE (8) + +#define ATMEL_AT24C64_CHIP_SIZE TG3_NVRAM_SIZE_64KB +#define ATMEL_AT24C64_PAGE_SIZE (32) + +#define ATMEL_AT24C512_CHIP_SIZE TG3_NVRAM_SIZE_512KB +#define ATMEL_AT24C512_PAGE_SIZE (128) + +#define ATMEL_AT45DB0X1B_PAGE_POS 9 +#define ATMEL_AT45DB0X1B_PAGE_SIZE 264 + +#define ATMEL_AT25F512_PAGE_SIZE 256 + +#define ST_M45PEX0_PAGE_SIZE 256 + +#define SAIFUN_SA25F0XX_PAGE_SIZE 256 + +#define SST_25VF0X0_PAGE_SIZE 4098 + + unsigned int irq_max; + unsigned int irq_cnt; + + struct ethtool_coalesce coal; + struct ethtool_eee eee; + + /* firmware info */ + const char *fw_needed; + const struct firmware *fw; + u32 fw_len; /* includes BSS */ + + struct device *hwmon_dev; + bool link_up; + bool pcierr_recovery; +}; + +/* Accessor macros for chip and asic attributes + * + * nb: Using static inlines equivalent to the accessor macros generates + * larger object code with gcc 4.7. + * Using statement expression macros to check tp with + * typecheck(struct tg3 *, tp) also creates larger objects. + */ +#define tg3_chip_rev_id(tp) \ + ((tp)->pci_chip_rev_id) +#define tg3_asic_rev(tp) \ + ((tp)->pci_chip_rev_id >> 12) +#define tg3_chip_rev(tp) \ + ((tp)->pci_chip_rev_id >> 8) + +#endif /* !(_T3_H) */ diff --git a/drivers/net/ethernet/brocade/Kconfig b/drivers/net/ethernet/brocade/Kconfig new file mode 100644 index 000000000..4e8c0b6c5 --- /dev/null +++ b/drivers/net/ethernet/brocade/Kconfig @@ -0,0 +1,23 @@ +# +# QLogic BR-series device configuration +# + +config NET_VENDOR_BROCADE + bool "QLogic BR-series devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about QLogic BR-series cards. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_BROCADE + +source "drivers/net/ethernet/brocade/bna/Kconfig" + +endif # NET_VENDOR_BROCADE diff --git a/drivers/net/ethernet/brocade/Makefile b/drivers/net/ethernet/brocade/Makefile new file mode 100644 index 000000000..fec10f9b4 --- /dev/null +++ b/drivers/net/ethernet/brocade/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the QLogic BR-series device drivers. +# + +obj-$(CONFIG_BNA) += bna/ diff --git a/drivers/net/ethernet/brocade/bna/Kconfig b/drivers/net/ethernet/brocade/bna/Kconfig new file mode 100644 index 000000000..fe01279a8 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/Kconfig @@ -0,0 +1,17 @@ +# +# QLogic BR-series network device configuration +# + +config BNA + tristate "QLogic BR-series 1010/1020/1860 10Gb Ethernet Driver support" + depends on PCI + ---help--- + This driver supports QLogic BR-series 1010/1020/1860 10Gb CEE capable + Ethernet cards. + To compile this driver as a module, choose M here: the module + will be called bna. + + For general information and support, go to the QLogic support + website at: + + diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile new file mode 100644 index 000000000..6e10b9973 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/Makefile @@ -0,0 +1,13 @@ +# +# Copyright (c) 2005-2014 Brocade Communications Systems, Inc. +# Copyright (c) 2014-2015 QLogic Corporation. +# All rights reserved. +# + +obj-$(CONFIG_BNA) += bna.o + +bna-objs := bnad.o bnad_ethtool.o bnad_debugfs.o bna_enet.o bna_tx_rx.o +bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o +bna-objs += cna_fwimg.o + +EXTRA_CFLAGS := -Idrivers/net/bna diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c new file mode 100644 index 000000000..cf9f3956f --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c @@ -0,0 +1,288 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +#include "bfa_cee.h" +#include "bfi_cna.h" +#include "bfa_ioc.h" + +static void bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg); +static void bfa_cee_format_cee_cfg(void *buffer); + +static void +bfa_cee_format_cee_cfg(void *buffer) +{ + struct bfa_cee_attr *cee_cfg = buffer; + bfa_cee_format_lldp_cfg(&cee_cfg->lldp_remote); +} + +static void +bfa_cee_stats_swap(struct bfa_cee_stats *stats) +{ + u32 *buffer = (u32 *)stats; + int i; + + for (i = 0; i < (sizeof(struct bfa_cee_stats) / sizeof(u32)); + i++) { + buffer[i] = ntohl(buffer[i]); + } +} + +static void +bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg) +{ + lldp_cfg->time_to_live = + ntohs(lldp_cfg->time_to_live); + lldp_cfg->enabled_system_cap = + ntohs(lldp_cfg->enabled_system_cap); +} + +/** + * bfa_cee_attr_meminfo - Returns the size of the DMA memory needed by CEE attributes + */ +static u32 +bfa_cee_attr_meminfo(void) +{ + return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ); +} +/** + * bfa_cee_stats_meminfo - Returns the size of the DMA memory needed by CEE stats + */ +static u32 +bfa_cee_stats_meminfo(void) +{ + return roundup(sizeof(struct bfa_cee_stats), BFA_DMA_ALIGN_SZ); +} + +/** + * bfa_cee_get_attr_isr - CEE ISR for get-attributes responses from f/w + * + * @cee: Pointer to the CEE module + * @status: Return status from the f/w + */ +static void +bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status) +{ + cee->get_attr_status = status; + if (status == BFA_STATUS_OK) { + memcpy(cee->attr, cee->attr_dma.kva, + sizeof(struct bfa_cee_attr)); + bfa_cee_format_cee_cfg(cee->attr); + } + cee->get_attr_pending = false; + if (cee->cbfn.get_attr_cbfn) + cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status); +} + +/** + * bfa_cee_get_attr_isr - CEE ISR for get-stats responses from f/w + * + * @cee: Pointer to the CEE module + * @status: Return status from the f/w + */ +static void +bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status) +{ + cee->get_stats_status = status; + if (status == BFA_STATUS_OK) { + memcpy(cee->stats, cee->stats_dma.kva, + sizeof(struct bfa_cee_stats)); + bfa_cee_stats_swap(cee->stats); + } + cee->get_stats_pending = false; + if (cee->cbfn.get_stats_cbfn) + cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status); +} + +/** + * bfa_cee_get_attr_isr() + * + * @brief CEE ISR for reset-stats responses from f/w + * + * @param[in] cee - Pointer to the CEE module + * status - Return status from the f/w + * + * @return void + */ +static void +bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status) +{ + cee->reset_stats_status = status; + cee->reset_stats_pending = false; + if (cee->cbfn.reset_stats_cbfn) + cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status); +} +/** + * bfa_nw_cee_meminfo - Returns the size of the DMA memory needed by CEE module + */ +u32 +bfa_nw_cee_meminfo(void) +{ + return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo(); +} + +/** + * bfa_nw_cee_mem_claim - Initialized CEE DMA Memory + * + * @cee: CEE module pointer + * @dma_kva: Kernel Virtual Address of CEE DMA Memory + * @dma_pa: Physical Address of CEE DMA Memory + */ +void +bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa) +{ + cee->attr_dma.kva = dma_kva; + cee->attr_dma.pa = dma_pa; + cee->stats_dma.kva = dma_kva + bfa_cee_attr_meminfo(); + cee->stats_dma.pa = dma_pa + bfa_cee_attr_meminfo(); + cee->attr = (struct bfa_cee_attr *) dma_kva; + cee->stats = (struct bfa_cee_stats *) + (dma_kva + bfa_cee_attr_meminfo()); +} + +/** + * bfa_cee_get_attr - Send the request to the f/w to fetch CEE attributes. + * + * @cee: Pointer to the CEE module data structure. + * + * Return: status + */ +enum bfa_status +bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr, + bfa_cee_get_attr_cbfn_t cbfn, void *cbarg) +{ + struct bfi_cee_get_req *cmd; + + BUG_ON(!((cee != NULL) && (cee->ioc != NULL))); + if (!bfa_nw_ioc_is_operational(cee->ioc)) + return BFA_STATUS_IOC_FAILURE; + + if (cee->get_attr_pending) + return BFA_STATUS_DEVBUSY; + + cee->get_attr_pending = true; + cmd = (struct bfi_cee_get_req *) cee->get_cfg_mb.msg; + cee->attr = attr; + cee->cbfn.get_attr_cbfn = cbfn; + cee->cbfn.get_attr_cbarg = cbarg; + bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ, + bfa_ioc_portid(cee->ioc)); + bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa); + bfa_nw_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb, NULL, NULL); + + return BFA_STATUS_OK; +} + +/** + * bfa_cee_isrs - Handles Mail-box interrupts for CEE module. + */ + +static void +bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m) +{ + union bfi_cee_i2h_msg_u *msg; + struct bfi_cee_get_rsp *get_rsp; + struct bfa_cee *cee = (struct bfa_cee *) cbarg; + msg = (union bfi_cee_i2h_msg_u *) m; + get_rsp = (struct bfi_cee_get_rsp *) m; + switch (msg->mh.msg_id) { + case BFI_CEE_I2H_GET_CFG_RSP: + bfa_cee_get_attr_isr(cee, get_rsp->cmd_status); + break; + case BFI_CEE_I2H_GET_STATS_RSP: + bfa_cee_get_stats_isr(cee, get_rsp->cmd_status); + break; + case BFI_CEE_I2H_RESET_STATS_RSP: + bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status); + break; + default: + BUG_ON(1); + } +} + +/** + * bfa_cee_notify - CEE module heart-beat failure handler. + * + * @event: IOC event type + */ + +static void +bfa_cee_notify(void *arg, enum bfa_ioc_event event) +{ + struct bfa_cee *cee; + cee = (struct bfa_cee *) arg; + + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + if (cee->get_attr_pending) { + cee->get_attr_status = BFA_STATUS_FAILED; + cee->get_attr_pending = false; + if (cee->cbfn.get_attr_cbfn) { + cee->cbfn.get_attr_cbfn( + cee->cbfn.get_attr_cbarg, + BFA_STATUS_FAILED); + } + } + if (cee->get_stats_pending) { + cee->get_stats_status = BFA_STATUS_FAILED; + cee->get_stats_pending = false; + if (cee->cbfn.get_stats_cbfn) { + cee->cbfn.get_stats_cbfn( + cee->cbfn.get_stats_cbarg, + BFA_STATUS_FAILED); + } + } + if (cee->reset_stats_pending) { + cee->reset_stats_status = BFA_STATUS_FAILED; + cee->reset_stats_pending = false; + if (cee->cbfn.reset_stats_cbfn) { + cee->cbfn.reset_stats_cbfn( + cee->cbfn.reset_stats_cbarg, + BFA_STATUS_FAILED); + } + } + break; + + default: + break; + } +} + +/** + * bfa_nw_cee_attach - CEE module-attach API + * + * @cee: Pointer to the CEE module data structure + * @ioc: Pointer to the ioc module data structure + * @dev: Pointer to the device driver module data structure. + * The device driver specific mbox ISR functions have + * this pointer as one of the parameters. + */ +void +bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, + void *dev) +{ + BUG_ON(!(cee != NULL)); + cee->dev = dev; + cee->ioc = ioc; + + bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee); + bfa_q_qe_init(&cee->ioc_notify); + bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee); + bfa_nw_ioc_notify_register(cee->ioc, &cee->ioc_notify); +} diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.h b/drivers/net/ethernet/brocade/bna/bfa_cee.h new file mode 100644 index 000000000..d04eef5d5 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_cee.h @@ -0,0 +1,66 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +#ifndef __BFA_CEE_H__ +#define __BFA_CEE_H__ + +#include "bfa_defs_cna.h" +#include "bfa_ioc.h" + +typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, enum bfa_status status); +typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, enum bfa_status status); +typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, enum bfa_status status); + +struct bfa_cee_cbfn { + bfa_cee_get_attr_cbfn_t get_attr_cbfn; + void *get_attr_cbarg; + bfa_cee_get_stats_cbfn_t get_stats_cbfn; + void *get_stats_cbarg; + bfa_cee_reset_stats_cbfn_t reset_stats_cbfn; + void *reset_stats_cbarg; +}; + +struct bfa_cee { + void *dev; + bool get_attr_pending; + bool get_stats_pending; + bool reset_stats_pending; + enum bfa_status get_attr_status; + enum bfa_status get_stats_status; + enum bfa_status reset_stats_status; + struct bfa_cee_cbfn cbfn; + struct bfa_ioc_notify ioc_notify; + struct bfa_cee_attr *attr; + struct bfa_cee_stats *stats; + struct bfa_dma attr_dma; + struct bfa_dma stats_dma; + struct bfa_ioc *ioc; + struct bfa_mbox_cmd get_cfg_mb; + struct bfa_mbox_cmd get_stats_mb; + struct bfa_mbox_cmd reset_stats_mb; +}; + +u32 bfa_nw_cee_meminfo(void); +void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, + u64 dma_pa); +void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev); +enum bfa_status bfa_nw_cee_get_attr(struct bfa_cee *cee, + struct bfa_cee_attr *attr, + bfa_cee_get_attr_cbfn_t cbfn, void *cbarg); +#endif /* __BFA_CEE_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h new file mode 100644 index 000000000..af25d8e8f --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h @@ -0,0 +1,125 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +/* BFA common services */ + +#ifndef __BFA_CS_H__ +#define __BFA_CS_H__ + +#include "cna.h" + +/* BFA state machine interfaces */ + +typedef void (*bfa_sm_t)(void *sm, int event); + +/* oc - object class eg. bfa_ioc + * st - state, eg. reset + * otype - object type, eg. struct bfa_ioc + * etype - object type, eg. enum ioc_event + */ +#define bfa_sm_state_decl(oc, st, otype, etype) \ + static void oc ## _sm_ ## st(otype * fsm, etype event) + +#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state)) +#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event))) +#define bfa_sm_get_state(_sm) ((_sm)->sm) +#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state)) + +/* For converting from state machine function to state encoding. */ +struct bfa_sm_table { + bfa_sm_t sm; /*!< state machine function */ + int state; /*!< state machine encoding */ + char *name; /*!< state name for display */ +}; +#define BFA_SM(_sm) ((bfa_sm_t)(_sm)) + +/* State machine with entry actions. */ +typedef void (*bfa_fsm_t)(void *fsm, int event); + +/* oc - object class eg. bfa_ioc + * st - state, eg. reset + * otype - object type, eg. struct bfa_ioc + * etype - object type, eg. enum ioc_event + */ +#define bfa_fsm_state_decl(oc, st, otype, etype) \ + static void oc ## _sm_ ## st(otype * fsm, etype event); \ + static void oc ## _sm_ ## st ## _entry(otype * fsm) + +#define bfa_fsm_set_state(_fsm, _state) do { \ + (_fsm)->fsm = (bfa_fsm_t)(_state); \ + _state ## _entry(_fsm); \ +} while (0) + +#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event))) +#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm) +#define bfa_fsm_cmp_state(_fsm, _state) \ + ((_fsm)->fsm == (bfa_fsm_t)(_state)) + +static inline int +bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm) +{ + int i = 0; + + while (smt[i].sm && smt[i].sm != sm) + i++; + return smt[i].state; +} + +/* Generic wait counter. */ + +typedef void (*bfa_wc_resume_t) (void *cbarg); + +struct bfa_wc { + bfa_wc_resume_t wc_resume; + void *wc_cbarg; + int wc_count; +}; + +static inline void +bfa_wc_up(struct bfa_wc *wc) +{ + wc->wc_count++; +} + +static inline void +bfa_wc_down(struct bfa_wc *wc) +{ + wc->wc_count--; + if (wc->wc_count == 0) + wc->wc_resume(wc->wc_cbarg); +} + +/* Initialize a waiting counter. */ +static inline void +bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg) +{ + wc->wc_resume = wc_resume; + wc->wc_cbarg = wc_cbarg; + wc->wc_count = 0; + bfa_wc_up(wc); +} + +/* Wait for counter to reach zero */ +static inline void +bfa_wc_wait(struct bfa_wc *wc) +{ + bfa_wc_down(wc); +} + +#endif /* __BFA_CS_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h new file mode 100644 index 000000000..3bfd9da92 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h @@ -0,0 +1,296 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +#ifndef __BFA_DEFS_H__ +#define __BFA_DEFS_H__ + +#include "cna.h" +#include "bfa_defs_status.h" +#include "bfa_defs_mfg_comm.h" + +#define BFA_STRING_32 32 +#define BFA_VERSION_LEN 64 + +/* ---------------------- adapter definitions ------------ */ + +/* BFA adapter level attributes. */ +enum { + BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE), + /* + *!< adapter serial num length + */ + BFA_ADAPTER_MODEL_NAME_LEN = 16, /*!< model name length */ + BFA_ADAPTER_MODEL_DESCR_LEN = 128, /*!< model description length */ + BFA_ADAPTER_MFG_NAME_LEN = 8, /*!< manufacturer name length */ + BFA_ADAPTER_SYM_NAME_LEN = 64, /*!< adapter symbolic name length */ + BFA_ADAPTER_OS_TYPE_LEN = 64, /*!< adapter os type length */ +}; + +struct bfa_adapter_attr { + char manufacturer[BFA_ADAPTER_MFG_NAME_LEN]; + char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; + u32 card_type; + char model[BFA_ADAPTER_MODEL_NAME_LEN]; + char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN]; + u64 pwwn; + char node_symname[FC_SYMNAME_MAX]; + char hw_ver[BFA_VERSION_LEN]; + char fw_ver[BFA_VERSION_LEN]; + char optrom_ver[BFA_VERSION_LEN]; + char os_type[BFA_ADAPTER_OS_TYPE_LEN]; + struct bfa_mfg_vpd vpd; + struct mac mac; + + u8 nports; + u8 max_speed; + u8 prototype; + char asic_rev; + + u8 pcie_gen; + u8 pcie_lanes_orig; + u8 pcie_lanes; + u8 cna_capable; + + u8 is_mezz; + u8 trunk_capable; +}; + +/* ---------------------- IOC definitions ------------ */ + +enum { + BFA_IOC_DRIVER_LEN = 16, + BFA_IOC_CHIP_REV_LEN = 8, +}; + +/* Driver and firmware versions. */ +struct bfa_ioc_driver_attr { + char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */ + char driver_ver[BFA_VERSION_LEN]; /*!< driver version */ + char fw_ver[BFA_VERSION_LEN]; /*!< firmware version */ + char bios_ver[BFA_VERSION_LEN]; /*!< bios version */ + char efi_ver[BFA_VERSION_LEN]; /*!< EFI version */ + char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */ +}; + +/* IOC PCI device attributes */ +struct bfa_ioc_pci_attr { + u16 vendor_id; /*!< PCI vendor ID */ + u16 device_id; /*!< PCI device ID */ + u16 ssid; /*!< subsystem ID */ + u16 ssvid; /*!< subsystem vendor ID */ + u32 pcifn; /*!< PCI device function */ + u32 rsvd; /* padding */ + char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */ +}; + +/* IOC states */ +enum bfa_ioc_state { + BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */ + BFA_IOC_RESET = 2, /*!< IOC is in reset state */ + BFA_IOC_SEMWAIT = 3, /*!< Waiting for IOC h/w semaphore */ + BFA_IOC_HWINIT = 4, /*!< IOC h/w is being initialized */ + BFA_IOC_GETATTR = 5, /*!< IOC is being configured */ + BFA_IOC_OPERATIONAL = 6, /*!< IOC is operational */ + BFA_IOC_INITFAIL = 7, /*!< IOC hardware failure */ + BFA_IOC_FAIL = 8, /*!< IOC heart-beat failure */ + BFA_IOC_DISABLING = 9, /*!< IOC is being disabled */ + BFA_IOC_DISABLED = 10, /*!< IOC is disabled */ + BFA_IOC_FWMISMATCH = 11, /*!< IOC f/w different from drivers */ + BFA_IOC_ENABLING = 12, /*!< IOC is being enabled */ + BFA_IOC_HWFAIL = 13, /*!< PCI mapping doesn't exist */ +}; + +/* IOC firmware stats */ +struct bfa_fw_ioc_stats { + u32 enable_reqs; + u32 disable_reqs; + u32 get_attr_reqs; + u32 dbg_sync; + u32 dbg_dump; + u32 unknown_reqs; +}; + +/* IOC driver stats */ +struct bfa_ioc_drv_stats { + u32 ioc_isrs; + u32 ioc_enables; + u32 ioc_disables; + u32 ioc_hbfails; + u32 ioc_boots; + u32 stats_tmos; + u32 hb_count; + u32 disable_reqs; + u32 enable_reqs; + u32 disable_replies; + u32 enable_replies; + u32 rsvd; +}; + +/* IOC statistics */ +struct bfa_ioc_stats { + struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */ + struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */ +}; + +enum bfa_ioc_type { + BFA_IOC_TYPE_FC = 1, + BFA_IOC_TYPE_FCoE = 2, + BFA_IOC_TYPE_LL = 3, +}; + +/* IOC attributes returned in queries */ +struct bfa_ioc_attr { + enum bfa_ioc_type ioc_type; + enum bfa_ioc_state state; /*!< IOC state */ + struct bfa_adapter_attr adapter_attr; /*!< HBA attributes */ + struct bfa_ioc_driver_attr driver_attr; /*!< driver attr */ + struct bfa_ioc_pci_attr pci_attr; + u8 port_id; /*!< port number */ + u8 port_mode; /*!< enum bfa_mode */ + u8 cap_bm; /*!< capability */ + u8 port_mode_cfg; /*!< enum bfa_mode */ + u8 def_fn; /*!< 1 if default fn */ + u8 rsvd[3]; /*!< 64bit align */ +}; + +/* Adapter capability mask definition */ +enum { + BFA_CM_HBA = 0x01, + BFA_CM_CNA = 0x02, + BFA_CM_NIC = 0x04, +}; + +/* ---------------------- mfg definitions ------------ */ + +/* Checksum size */ +#define BFA_MFG_CHKSUM_SIZE 16 + +#define BFA_MFG_PARTNUM_SIZE 14 +#define BFA_MFG_SUPPLIER_ID_SIZE 10 +#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20 +#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20 +#define BFA_MFG_SUPPLIER_REVISION_SIZE 4 + +#pragma pack(1) + +/* BFA adapter manufacturing block definition. + * + * All numerical fields are in big-endian format. + */ +struct bfa_mfg_block { + u8 version; /* manufacturing block version */ + u8 mfg_sig[3]; /* characters 'M', 'F', 'G' */ + u16 mfgsize; /* mfg block size */ + u16 u16_chksum; /* old u16 checksum */ + char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)]; + char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)]; + u8 mfg_day; /* manufacturing day */ + u8 mfg_month; /* manufacturing month */ + u16 mfg_year; /* manufacturing year */ + u64 mfg_wwn; /* wwn base for this adapter */ + u8 num_wwn; /* number of wwns assigned */ + u8 mfg_speeds; /* speeds allowed for this adapter */ + u8 rsv[2]; + char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)]; + char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)]; + char supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)]; + char supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)]; + mac_t mfg_mac; /* base mac address */ + u8 num_mac; /* number of mac addresses */ + u8 rsv2; + u32 card_type; /* card type */ + char cap_nic; /* capability nic */ + char cap_cna; /* capability cna */ + char cap_hba; /* capability hba */ + char cap_fc16g; /* capability fc 16g */ + char cap_sriov; /* capability sriov */ + char cap_mezz; /* capability mezz */ + u8 rsv3; + u8 mfg_nports; /* number of ports */ + char media[8]; /* xfi/xaui */ + char initial_mode[8]; /* initial mode: hba/cna/nic */ + u8 rsv4[84]; + u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */ +}; + +#pragma pack() + +/* ---------------------- pci definitions ------------ */ + +/* + * PCI device ID information + */ +enum { + BFA_PCI_DEVICE_ID_CT2 = 0x22, +}; + +#define bfa_asic_id_ct(device) \ + ((device) == PCI_DEVICE_ID_BROCADE_CT || \ + (device) == PCI_DEVICE_ID_BROCADE_CT_FC) +#define bfa_asic_id_ct2(device) \ + ((device) == BFA_PCI_DEVICE_ID_CT2) +#define bfa_asic_id_ctc(device) \ + (bfa_asic_id_ct(device) || bfa_asic_id_ct2(device)) + +/* PCI sub-system device and vendor ID information */ +enum { + BFA_PCI_FCOE_SSDEVICE_ID = 0x14, + BFA_PCI_CT2_SSID_FCoE = 0x22, + BFA_PCI_CT2_SSID_ETH = 0x23, + BFA_PCI_CT2_SSID_FC = 0x24, +}; + +enum bfa_mode { + BFA_MODE_HBA = 1, + BFA_MODE_CNA = 2, + BFA_MODE_NIC = 3 +}; + +/* + * Flash module specific + */ +#define BFA_FLASH_PART_ENTRY_SIZE 32 /* partition entry size */ +#define BFA_FLASH_PART_MAX 32 /* maximal # of partitions */ +#define BFA_TOTAL_FLASH_SIZE 0x400000 +#define BFA_FLASH_PART_FWIMG 2 +#define BFA_FLASH_PART_MFG 7 + +/* + * flash partition attributes + */ +struct bfa_flash_part_attr { + u32 part_type; /* partition type */ + u32 part_instance; /* partition instance */ + u32 part_off; /* partition offset */ + u32 part_size; /* partition size */ + u32 part_len; /* partition content length */ + u32 part_status; /* partition status */ + char rsv[BFA_FLASH_PART_ENTRY_SIZE - 24]; +}; + +/* + * flash attributes + */ +struct bfa_flash_attr { + u32 status; /* flash overall status */ + u32 npart; /* num of partitions */ + struct bfa_flash_part_attr part[BFA_FLASH_PART_MAX]; +}; + +#endif /* __BFA_DEFS_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h new file mode 100644 index 000000000..a37326d44 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h @@ -0,0 +1,221 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#ifndef __BFA_DEFS_CNA_H__ +#define __BFA_DEFS_CNA_H__ + +#include "bfa_defs.h" + +/* FC physical port statistics. */ +struct bfa_port_fc_stats { + u64 secs_reset; /*!< Seconds since stats is reset */ + u64 tx_frames; /*!< Tx frames */ + u64 tx_words; /*!< Tx words */ + u64 tx_lip; /*!< Tx LIP */ + u64 tx_nos; /*!< Tx NOS */ + u64 tx_ols; /*!< Tx OLS */ + u64 tx_lr; /*!< Tx LR */ + u64 tx_lrr; /*!< Tx LRR */ + u64 rx_frames; /*!< Rx frames */ + u64 rx_words; /*!< Rx words */ + u64 lip_count; /*!< Rx LIP */ + u64 nos_count; /*!< Rx NOS */ + u64 ols_count; /*!< Rx OLS */ + u64 lr_count; /*!< Rx LR */ + u64 lrr_count; /*!< Rx LRR */ + u64 invalid_crcs; /*!< Rx CRC err frames */ + u64 invalid_crc_gd_eof; /*!< Rx CRC err good EOF frames */ + u64 undersized_frm; /*!< Rx undersized frames */ + u64 oversized_frm; /*!< Rx oversized frames */ + u64 bad_eof_frm; /*!< Rx frames with bad EOF */ + u64 error_frames; /*!< Errored frames */ + u64 dropped_frames; /*!< Dropped frames */ + u64 link_failures; /*!< Link Failure (LF) count */ + u64 loss_of_syncs; /*!< Loss of sync count */ + u64 loss_of_signals; /*!< Loss of signal count */ + u64 primseq_errs; /*!< Primitive sequence protocol err. */ + u64 bad_os_count; /*!< Invalid ordered sets */ + u64 err_enc_out; /*!< Encoding err nonframe_8b10b */ + u64 err_enc; /*!< Encoding err frame_8b10b */ + u64 bbsc_frames_lost; /*!< Credit Recovery-Frames Lost */ + u64 bbsc_credits_lost; /*!< Credit Recovery-Credits Lost */ + u64 bbsc_link_resets; /*!< Credit Recovery-Link Resets */ +}; + +/* Eth Physical Port statistics. */ +struct bfa_port_eth_stats { + u64 secs_reset; /*!< Seconds since stats is reset */ + u64 frame_64; /*!< Frames 64 bytes */ + u64 frame_65_127; /*!< Frames 65-127 bytes */ + u64 frame_128_255; /*!< Frames 128-255 bytes */ + u64 frame_256_511; /*!< Frames 256-511 bytes */ + u64 frame_512_1023; /*!< Frames 512-1023 bytes */ + u64 frame_1024_1518; /*!< Frames 1024-1518 bytes */ + u64 frame_1519_1522; /*!< Frames 1519-1522 bytes */ + u64 tx_bytes; /*!< Tx bytes */ + u64 tx_packets; /*!< Tx packets */ + u64 tx_mcast_packets; /*!< Tx multicast packets */ + u64 tx_bcast_packets; /*!< Tx broadcast packets */ + u64 tx_control_frame; /*!< Tx control frame */ + u64 tx_drop; /*!< Tx drops */ + u64 tx_jabber; /*!< Tx jabber */ + u64 tx_fcs_error; /*!< Tx FCS errors */ + u64 tx_fragments; /*!< Tx fragments */ + u64 rx_bytes; /*!< Rx bytes */ + u64 rx_packets; /*!< Rx packets */ + u64 rx_mcast_packets; /*!< Rx multicast packets */ + u64 rx_bcast_packets; /*!< Rx broadcast packets */ + u64 rx_control_frames; /*!< Rx control frames */ + u64 rx_unknown_opcode; /*!< Rx unknown opcode */ + u64 rx_drop; /*!< Rx drops */ + u64 rx_jabber; /*!< Rx jabber */ + u64 rx_fcs_error; /*!< Rx FCS errors */ + u64 rx_alignment_error; /*!< Rx alignment errors */ + u64 rx_frame_length_error; /*!< Rx frame len errors */ + u64 rx_code_error; /*!< Rx code errors */ + u64 rx_fragments; /*!< Rx fragments */ + u64 rx_pause; /*!< Rx pause */ + u64 rx_zero_pause; /*!< Rx zero pause */ + u64 tx_pause; /*!< Tx pause */ + u64 tx_zero_pause; /*!< Tx zero pause */ + u64 rx_fcoe_pause; /*!< Rx FCoE pause */ + u64 rx_fcoe_zero_pause; /*!< Rx FCoE zero pause */ + u64 tx_fcoe_pause; /*!< Tx FCoE pause */ + u64 tx_fcoe_zero_pause; /*!< Tx FCoE zero pause */ + u64 rx_iscsi_pause; /*!< Rx iSCSI pause */ + u64 rx_iscsi_zero_pause; /*!< Rx iSCSI zero pause */ + u64 tx_iscsi_pause; /*!< Tx iSCSI pause */ + u64 tx_iscsi_zero_pause; /*!< Tx iSCSI zero pause */ +}; + +/* Port statistics. */ +union bfa_port_stats_u { + struct bfa_port_fc_stats fc; + struct bfa_port_eth_stats eth; +}; + +#pragma pack(1) + +#define BFA_CEE_LLDP_MAX_STRING_LEN (128) +#define BFA_CEE_DCBX_MAX_PRIORITY (8) +#define BFA_CEE_DCBX_MAX_PGID (8) + +#define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001 +#define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002 +#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004 +#define BFA_CEE_LLDP_SYS_CAP_WLAN_AP 0x0008 +#define BFA_CEE_LLDP_SYS_CAP_ROUTER 0x0010 +#define BFA_CEE_LLDP_SYS_CAP_TELEPHONE 0x0020 +#define BFA_CEE_LLDP_SYS_CAP_DOCSIS_CD 0x0040 +#define BFA_CEE_LLDP_SYS_CAP_STATION 0x0080 +#define BFA_CEE_LLDP_SYS_CAP_CVLAN 0x0100 +#define BFA_CEE_LLDP_SYS_CAP_SVLAN 0x0200 +#define BFA_CEE_LLDP_SYS_CAP_TPMR 0x0400 + +/* LLDP string type */ +struct bfa_cee_lldp_str { + u8 sub_type; + u8 len; + u8 rsvd[2]; + u8 value[BFA_CEE_LLDP_MAX_STRING_LEN]; +}; + +/* LLDP parameters */ +struct bfa_cee_lldp_cfg { + struct bfa_cee_lldp_str chassis_id; + struct bfa_cee_lldp_str port_id; + struct bfa_cee_lldp_str port_desc; + struct bfa_cee_lldp_str sys_name; + struct bfa_cee_lldp_str sys_desc; + struct bfa_cee_lldp_str mgmt_addr; + u16 time_to_live; + u16 enabled_system_cap; +}; + +enum bfa_cee_dcbx_version { + DCBX_PROTOCOL_PRECEE = 1, + DCBX_PROTOCOL_CEE = 2, +}; + +enum bfa_cee_lls { + /* LLS is down because the TLV not sent by the peer */ + CEE_LLS_DOWN_NO_TLV = 0, + /* LLS is down as advertised by the peer */ + CEE_LLS_DOWN = 1, + CEE_LLS_UP = 2, +}; + +/* CEE/DCBX parameters */ +struct bfa_cee_dcbx_cfg { + u8 pgid[BFA_CEE_DCBX_MAX_PRIORITY]; + u8 pg_percentage[BFA_CEE_DCBX_MAX_PGID]; + u8 pfc_primap; /* bitmap of priorties with PFC enabled */ + u8 fcoe_primap; /* bitmap of priorities used for FcoE traffic */ + u8 iscsi_primap; /* bitmap of priorities used for iSCSI traffic */ + u8 dcbx_version; /* operating version:CEE or preCEE */ + u8 lls_fcoe; /* FCoE Logical Link Status */ + u8 lls_lan; /* LAN Logical Link Status */ + u8 rsvd[2]; +}; + +/* CEE status */ +/* Making this to tri-state for the benefit of port list command */ +enum bfa_cee_status { + CEE_UP = 0, + CEE_PHY_UP = 1, + CEE_LOOPBACK = 2, + CEE_PHY_DOWN = 3, +}; + +/* CEE Query */ +struct bfa_cee_attr { + u8 cee_status; + u8 error_reason; + struct bfa_cee_lldp_cfg lldp_remote; + struct bfa_cee_dcbx_cfg dcbx_remote; + mac_t src_mac; + u8 link_speed; + u8 nw_priority; + u8 filler[2]; +}; + +/* LLDP/DCBX/CEE Statistics */ +struct bfa_cee_stats { + u32 lldp_tx_frames; /*!< LLDP Tx Frames */ + u32 lldp_rx_frames; /*!< LLDP Rx Frames */ + u32 lldp_rx_frames_invalid; /*!< LLDP Rx Frames invalid */ + u32 lldp_rx_frames_new; /*!< LLDP Rx Frames new */ + u32 lldp_tlvs_unrecognized; /*!< LLDP Rx unrecognized TLVs */ + u32 lldp_rx_shutdown_tlvs; /*!< LLDP Rx shutdown TLVs */ + u32 lldp_info_aged_out; /*!< LLDP remote info aged out */ + u32 dcbx_phylink_ups; /*!< DCBX phy link ups */ + u32 dcbx_phylink_downs; /*!< DCBX phy link downs */ + u32 dcbx_rx_tlvs; /*!< DCBX Rx TLVs */ + u32 dcbx_rx_tlvs_invalid; /*!< DCBX Rx TLVs invalid */ + u32 dcbx_control_tlv_error; /*!< DCBX control TLV errors */ + u32 dcbx_feature_tlv_error; /*!< DCBX feature TLV errors */ + u32 dcbx_cee_cfg_new; /*!< DCBX new CEE cfg rcvd */ + u32 cee_status_down; /*!< CEE status down */ + u32 cee_status_up; /*!< CEE status up */ + u32 cee_hw_cfg_changed; /*!< CEE hw cfg changed */ + u32 cee_rx_invalid_cfg; /*!< CEE invalid cfg */ +}; + +#pragma pack() + +#endif /* __BFA_DEFS_CNA_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h new file mode 100644 index 000000000..7a45cd0b5 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h @@ -0,0 +1,155 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#ifndef __BFA_DEFS_MFG_COMM_H__ +#define __BFA_DEFS_MFG_COMM_H__ + +#include "bfa_defs.h" + +/* Manufacturing block version */ +#define BFA_MFG_VERSION 3 +#define BFA_MFG_VERSION_UNINIT 0xFF + +/* Manufacturing block encrypted version */ +#define BFA_MFG_ENC_VER 2 + +/* Manufacturing block version 1 length */ +#define BFA_MFG_VER1_LEN 128 + +/* Manufacturing block header length */ +#define BFA_MFG_HDR_LEN 4 + +#define BFA_MFG_SERIALNUM_SIZE 11 +#define STRSZ(_n) (((_n) + 4) & ~3) + +/* Manufacturing card type */ +enum { + BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */ + BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */ + BFA_MFG_TYPE_FC8P1 = 815, /*!< 8G 1port FC card */ + BFA_MFG_TYPE_FC4P2 = 425, /*!< 4G 2port FC card */ + BFA_MFG_TYPE_FC4P1 = 415, /*!< 4G 1port FC card */ + BFA_MFG_TYPE_CNA10P2 = 1020, /*!< 10G 2port CNA card */ + BFA_MFG_TYPE_CNA10P1 = 1010, /*!< 10G 1port CNA card */ + BFA_MFG_TYPE_JAYHAWK = 804, /*!< Jayhawk mezz card */ + BFA_MFG_TYPE_WANCHESE = 1007, /*!< Wanchese mezz card */ + BFA_MFG_TYPE_ASTRA = 807, /*!< Astra mezz card */ + BFA_MFG_TYPE_LIGHTNING_P0 = 902, /*!< Lightning mezz card - old */ + BFA_MFG_TYPE_LIGHTNING = 1741, /*!< Lightning mezz card */ + BFA_MFG_TYPE_PROWLER_F = 1560, /*!< Prowler FC only cards */ + BFA_MFG_TYPE_PROWLER_N = 1410, /*!< Prowler NIC only cards */ + BFA_MFG_TYPE_PROWLER_C = 1710, /*!< Prowler CNA only cards */ + BFA_MFG_TYPE_PROWLER_D = 1860, /*!< Prowler Dual cards */ + BFA_MFG_TYPE_CHINOOK = 1867, /*!< Chinook cards */ + BFA_MFG_TYPE_INVALID = 0, /*!< Invalid card type */ +}; + +#pragma pack(1) + +/* Check if Mezz card */ +#define bfa_mfg_is_mezz(type) (( \ + (type) == BFA_MFG_TYPE_JAYHAWK || \ + (type) == BFA_MFG_TYPE_WANCHESE || \ + (type) == BFA_MFG_TYPE_ASTRA || \ + (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \ + (type) == BFA_MFG_TYPE_LIGHTNING || \ + (type) == BFA_MFG_TYPE_CHINOOK)) + +enum { + CB_GPIO_TTV = (1), /*!< TTV debug capable cards */ + CB_GPIO_FC8P2 = (2), /*!< 8G 2port FC card */ + CB_GPIO_FC8P1 = (3), /*!< 8G 1port FC card */ + CB_GPIO_FC4P2 = (4), /*!< 4G 2port FC card */ + CB_GPIO_FC4P1 = (5), /*!< 4G 1port FC card */ + CB_GPIO_DFLY = (6), /*!< 8G 2port FC mezzanine card */ + CB_GPIO_PROTO = (1 << 7) /*!< 8G 2port FC prototypes */ +}; + +#define bfa_mfg_adapter_prop_init_gpio(gpio, card_type, prop) \ +do { \ + if ((gpio) & CB_GPIO_PROTO) { \ + (prop) |= BFI_ADAPTER_PROTO; \ + (gpio) &= ~CB_GPIO_PROTO; \ + } \ + switch ((gpio)) { \ + case CB_GPIO_TTV: \ + (prop) |= BFI_ADAPTER_TTV; \ + case CB_GPIO_DFLY: \ + case CB_GPIO_FC8P2: \ + (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \ + (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \ + (card_type) = BFA_MFG_TYPE_FC8P2; \ + break; \ + case CB_GPIO_FC8P1: \ + (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \ + (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \ + (card_type) = BFA_MFG_TYPE_FC8P1; \ + break; \ + case CB_GPIO_FC4P2: \ + (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \ + (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \ + (card_type) = BFA_MFG_TYPE_FC4P2; \ + break; \ + case CB_GPIO_FC4P1: \ + (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \ + (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \ + (card_type) = BFA_MFG_TYPE_FC4P1; \ + break; \ + default: \ + (prop) |= BFI_ADAPTER_UNSUPP; \ + (card_type) = BFA_MFG_TYPE_INVALID; \ + } \ +} while (0) + +/* VPD data length */ +#define BFA_MFG_VPD_LEN 512 +#define BFA_MFG_VPD_LEN_INVALID 0 + +#define BFA_MFG_VPD_PCI_HDR_OFF 137 +#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */ +#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */ + +/* VPD vendor tag */ +enum { + BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */ + BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */ + BFA_MFG_VPD_HP = 2, /*!< vendor HP */ + BFA_MFG_VPD_DELL = 3, /*!< vendor DELL */ + BFA_MFG_VPD_PCI_IBM = 0x08, /*!< PCI VPD IBM */ + BFA_MFG_VPD_PCI_HP = 0x10, /*!< PCI VPD HP */ + BFA_MFG_VPD_PCI_DELL = 0x20, /*!< PCI VPD DELL */ + BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */ +}; + +/* BFA adapter flash vpd data definition. + * + * All numerical fields are in big-endian format. + */ +struct bfa_mfg_vpd { + u8 version; /*!< vpd data version */ + u8 vpd_sig[3]; /*!< characters 'V', 'P', 'D' */ + u8 chksum; /*!< u8 checksum */ + u8 vendor; /*!< vendor */ + u8 len; /*!< vpd data length excluding header */ + u8 rsv; + u8 data[BFA_MFG_VPD_LEN]; /*!< vpd data */ +}; + +#pragma pack() + +#endif /* __BFA_DEFS_MFG_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h new file mode 100644 index 000000000..a43b56002 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h @@ -0,0 +1,216 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#ifndef __BFA_DEFS_STATUS_H__ +#define __BFA_DEFS_STATUS_H__ + +/* API status return values + * + * NOTE: The error msgs are auto generated from the comments. Only singe line + * comments are supported + */ +enum bfa_status { + BFA_STATUS_OK = 0, + BFA_STATUS_FAILED = 1, + BFA_STATUS_EINVAL = 2, + BFA_STATUS_ENOMEM = 3, + BFA_STATUS_ENOSYS = 4, + BFA_STATUS_ETIMER = 5, + BFA_STATUS_EPROTOCOL = 6, + BFA_STATUS_ENOFCPORTS = 7, + BFA_STATUS_NOFLASH = 8, + BFA_STATUS_BADFLASH = 9, + BFA_STATUS_SFP_UNSUPP = 10, + BFA_STATUS_UNKNOWN_VFID = 11, + BFA_STATUS_DATACORRUPTED = 12, + BFA_STATUS_DEVBUSY = 13, + BFA_STATUS_ABORTED = 14, + BFA_STATUS_NODEV = 15, + BFA_STATUS_HDMA_FAILED = 16, + BFA_STATUS_FLASH_BAD_LEN = 17, + BFA_STATUS_UNKNOWN_LWWN = 18, + BFA_STATUS_UNKNOWN_RWWN = 19, + BFA_STATUS_FCPT_LS_RJT = 20, + BFA_STATUS_VPORT_EXISTS = 21, + BFA_STATUS_VPORT_MAX = 22, + BFA_STATUS_UNSUPP_SPEED = 23, + BFA_STATUS_INVLD_DFSZ = 24, + BFA_STATUS_CNFG_FAILED = 25, + BFA_STATUS_CMD_NOTSUPP = 26, + BFA_STATUS_NO_ADAPTER = 27, + BFA_STATUS_LINKDOWN = 28, + BFA_STATUS_FABRIC_RJT = 29, + BFA_STATUS_UNKNOWN_VWWN = 30, + BFA_STATUS_NSLOGIN_FAILED = 31, + BFA_STATUS_NO_RPORTS = 32, + BFA_STATUS_NSQUERY_FAILED = 33, + BFA_STATUS_PORT_OFFLINE = 34, + BFA_STATUS_RPORT_OFFLINE = 35, + BFA_STATUS_TGTOPEN_FAILED = 36, + BFA_STATUS_BAD_LUNS = 37, + BFA_STATUS_IO_FAILURE = 38, + BFA_STATUS_NO_FABRIC = 39, + BFA_STATUS_EBADF = 40, + BFA_STATUS_EINTR = 41, + BFA_STATUS_EIO = 42, + BFA_STATUS_ENOTTY = 43, + BFA_STATUS_ENXIO = 44, + BFA_STATUS_EFOPEN = 45, + BFA_STATUS_VPORT_WWN_BP = 46, + BFA_STATUS_PORT_NOT_DISABLED = 47, + BFA_STATUS_BADFRMHDR = 48, + BFA_STATUS_BADFRMSZ = 49, + BFA_STATUS_MISSINGFRM = 50, + BFA_STATUS_LINKTIMEOUT = 51, + BFA_STATUS_NO_FCPIM_NEXUS = 52, + BFA_STATUS_CHECKSUM_FAIL = 53, + BFA_STATUS_GZME_FAILED = 54, + BFA_STATUS_SCSISTART_REQD = 55, + BFA_STATUS_IOC_FAILURE = 56, + BFA_STATUS_INVALID_WWN = 57, + BFA_STATUS_MISMATCH = 58, + BFA_STATUS_IOC_ENABLED = 59, + BFA_STATUS_ADAPTER_ENABLED = 60, + BFA_STATUS_IOC_NON_OP = 61, + BFA_STATUS_ADDR_MAP_FAILURE = 62, + BFA_STATUS_SAME_NAME = 63, + BFA_STATUS_PENDING = 64, + BFA_STATUS_8G_SPD = 65, + BFA_STATUS_4G_SPD = 66, + BFA_STATUS_AD_IS_ENABLE = 67, + BFA_STATUS_EINVAL_TOV = 68, + BFA_STATUS_EINVAL_QDEPTH = 69, + BFA_STATUS_VERSION_FAIL = 70, + BFA_STATUS_DIAG_BUSY = 71, + BFA_STATUS_BEACON_ON = 72, + BFA_STATUS_BEACON_OFF = 73, + BFA_STATUS_LBEACON_ON = 74, + BFA_STATUS_LBEACON_OFF = 75, + BFA_STATUS_PORT_NOT_INITED = 76, + BFA_STATUS_RPSC_ENABLED = 77, + BFA_STATUS_ENOFSAVE = 78, + BFA_STATUS_BAD_FILE = 79, + BFA_STATUS_RLIM_EN = 80, + BFA_STATUS_RLIM_DIS = 81, + BFA_STATUS_IOC_DISABLED = 82, + BFA_STATUS_ADAPTER_DISABLED = 83, + BFA_STATUS_BIOS_DISABLED = 84, + BFA_STATUS_AUTH_ENABLED = 85, + BFA_STATUS_AUTH_DISABLED = 86, + BFA_STATUS_ERROR_TRL_ENABLED = 87, + BFA_STATUS_ERROR_QOS_ENABLED = 88, + BFA_STATUS_NO_SFP_DEV = 89, + BFA_STATUS_MEMTEST_FAILED = 90, + BFA_STATUS_INVALID_DEVID = 91, + BFA_STATUS_QOS_ENABLED = 92, + BFA_STATUS_QOS_DISABLED = 93, + BFA_STATUS_INCORRECT_DRV_CONFIG = 94, + BFA_STATUS_REG_FAIL = 95, + BFA_STATUS_IM_INV_CODE = 96, + BFA_STATUS_IM_INV_VLAN = 97, + BFA_STATUS_IM_INV_ADAPT_NAME = 98, + BFA_STATUS_IM_LOW_RESOURCES = 99, + BFA_STATUS_IM_VLANID_IS_PVID = 100, + BFA_STATUS_IM_VLANID_EXISTS = 101, + BFA_STATUS_IM_FW_UPDATE_FAIL = 102, + BFA_STATUS_PORTLOG_ENABLED = 103, + BFA_STATUS_PORTLOG_DISABLED = 104, + BFA_STATUS_FILE_NOT_FOUND = 105, + BFA_STATUS_QOS_FC_ONLY = 106, + BFA_STATUS_RLIM_FC_ONLY = 107, + BFA_STATUS_CT_SPD = 108, + BFA_STATUS_LEDTEST_OP = 109, + BFA_STATUS_CEE_NOT_DN = 110, + BFA_STATUS_10G_SPD = 111, + BFA_STATUS_IM_INV_TEAM_NAME = 112, + BFA_STATUS_IM_DUP_TEAM_NAME = 113, + BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114, + BFA_STATUS_IM_ADAPT_HAS_VLANS = 115, + BFA_STATUS_IM_PVID_MISMATCH = 116, + BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117, + BFA_STATUS_IM_MTU_MISMATCH = 118, + BFA_STATUS_IM_RSS_MISMATCH = 119, + BFA_STATUS_IM_HDS_MISMATCH = 120, + BFA_STATUS_IM_OFFLOAD_MISMATCH = 121, + BFA_STATUS_IM_PORT_PARAMS = 122, + BFA_STATUS_IM_PORT_NOT_IN_TEAM = 123, + BFA_STATUS_IM_CANNOT_REM_PRI = 124, + BFA_STATUS_IM_MAX_PORTS_REACHED = 125, + BFA_STATUS_IM_LAST_PORT_DELETE = 126, + BFA_STATUS_IM_NO_DRIVER = 127, + BFA_STATUS_IM_MAX_VLANS_REACHED = 128, + BFA_STATUS_TOMCAT_SPD_NOT_ALLOWED = 129, + BFA_STATUS_NO_MINPORT_DRIVER = 130, + BFA_STATUS_CARD_TYPE_MISMATCH = 131, + BFA_STATUS_BAD_ASICBLK = 132, + BFA_STATUS_NO_DRIVER = 133, + BFA_STATUS_INVALID_MAC = 134, + BFA_STATUS_IM_NO_VLAN = 135, + BFA_STATUS_IM_ETH_LB_FAILED = 136, + BFA_STATUS_IM_PVID_REMOVE = 137, + BFA_STATUS_IM_PVID_EDIT = 138, + BFA_STATUS_CNA_NO_BOOT = 139, + BFA_STATUS_IM_PVID_NON_ZERO = 140, + BFA_STATUS_IM_INETCFG_LOCK_FAILED = 141, + BFA_STATUS_IM_GET_INETCFG_FAILED = 142, + BFA_STATUS_IM_NOT_BOUND = 143, + BFA_STATUS_INSUFFICIENT_PERMS = 144, + BFA_STATUS_IM_INV_VLAN_NAME = 145, + BFA_STATUS_CMD_NOTSUPP_CNA = 146, + BFA_STATUS_IM_PASSTHRU_EDIT = 147, + BFA_STATUS_IM_BIND_FAILED = 148, + BFA_STATUS_IM_UNBIND_FAILED = 149, + BFA_STATUS_IM_PORT_IN_TEAM = 150, + BFA_STATUS_IM_VLAN_NOT_FOUND = 151, + BFA_STATUS_IM_TEAM_NOT_FOUND = 152, + BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153, + BFA_STATUS_PBC = 154, + BFA_STATUS_DEVID_MISSING = 155, + BFA_STATUS_BAD_FWCFG = 156, + BFA_STATUS_CREATE_FILE = 157, + BFA_STATUS_INVALID_VENDOR = 158, + BFA_STATUS_SFP_NOT_READY = 159, + BFA_STATUS_FLASH_UNINIT = 160, + BFA_STATUS_FLASH_EMPTY = 161, + BFA_STATUS_FLASH_CKFAIL = 162, + BFA_STATUS_TRUNK_UNSUPP = 163, + BFA_STATUS_TRUNK_ENABLED = 164, + BFA_STATUS_TRUNK_DISABLED = 165, + BFA_STATUS_TRUNK_ERROR_TRL_ENABLED = 166, + BFA_STATUS_BOOT_CODE_UPDATED = 167, + BFA_STATUS_BOOT_VERSION = 168, + BFA_STATUS_CARDTYPE_MISSING = 169, + BFA_STATUS_INVALID_CARDTYPE = 170, + BFA_STATUS_NO_TOPOLOGY_FOR_CNA = 171, + BFA_STATUS_IM_VLAN_OVER_TEAM_DELETE_FAILED = 172, + BFA_STATUS_ETHBOOT_ENABLED = 173, + BFA_STATUS_ETHBOOT_DISABLED = 174, + BFA_STATUS_IOPROFILE_OFF = 175, + BFA_STATUS_NO_PORT_INSTANCE = 176, + BFA_STATUS_BOOT_CODE_TIMEDOUT = 177, + BFA_STATUS_NO_VPORT_LOCK = 178, + BFA_STATUS_VPORT_NO_CNFG = 179, + BFA_STATUS_MAX_VAL +}; + +enum bfa_eproto_status { + BFA_EPROTO_BAD_ACCEPT = 0, + BFA_EPROTO_UNKNOWN_RSP = 1 +}; + +#endif /* __BFA_DEFS_STATUS_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c new file mode 100644 index 000000000..68f3c13c9 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -0,0 +1,3399 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +#include "bfa_ioc.h" +#include "bfi_reg.h" +#include "bfa_defs.h" + +/* IOC local definitions */ + +#define bfa_ioc_state_disabled(__sm) \ + (((__sm) == BFI_IOC_UNINIT) || \ + ((__sm) == BFI_IOC_INITING) || \ + ((__sm) == BFI_IOC_HWINIT) || \ + ((__sm) == BFI_IOC_DISABLED) || \ + ((__sm) == BFI_IOC_FAIL) || \ + ((__sm) == BFI_IOC_CFG_DISABLED)) + +/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */ + +#define bfa_ioc_firmware_lock(__ioc) \ + ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) +#define bfa_ioc_firmware_unlock(__ioc) \ + ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) +#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) +#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) +#define bfa_ioc_notify_fail(__ioc) \ + ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) +#define bfa_ioc_sync_start(__ioc) \ + ((__ioc)->ioc_hwif->ioc_sync_start(__ioc)) +#define bfa_ioc_sync_join(__ioc) \ + ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) +#define bfa_ioc_sync_leave(__ioc) \ + ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc)) +#define bfa_ioc_sync_ack(__ioc) \ + ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc)) +#define bfa_ioc_sync_complete(__ioc) \ + ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc)) +#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \ + ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate)) +#define bfa_ioc_get_cur_ioc_fwstate(__ioc) \ + ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc)) +#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \ + ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate)) +#define bfa_ioc_get_alt_ioc_fwstate(__ioc) \ + ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc)) + +#define bfa_ioc_mbox_cmd_pending(__ioc) \ + (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ + readl((__ioc)->ioc_regs.hfn_mbox_cmd)) + +static bool bfa_nw_auto_recover = true; + +/* + * forward declarations + */ +static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc); +static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc); +static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc); +static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force); +static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc); +static void bfa_ioc_send_enable(struct bfa_ioc *ioc); +static void bfa_ioc_send_disable(struct bfa_ioc *ioc); +static void bfa_ioc_send_getattr(struct bfa_ioc *ioc); +static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc); +static void bfa_ioc_hb_stop(struct bfa_ioc *ioc); +static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force); +static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc); +static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc); +static void bfa_ioc_recover(struct bfa_ioc *ioc); +static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event); +static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); +static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); +static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc); +static void bfa_ioc_fail_notify(struct bfa_ioc *ioc); +static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc); +static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc); +static void bfa_ioc_pf_failed(struct bfa_ioc *ioc); +static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc); +static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc); +static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc, + enum bfi_fwboot_type boot_type, u32 boot_param); +static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); +static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, + char *serial_num); +static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, + char *fw_ver); +static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, + char *chip_rev); +static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, + char *optrom_ver); +static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, + char *manufacturer); +static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model); +static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc); + +/* IOC state machine definitions/declarations */ +enum ioc_event { + IOC_E_RESET = 1, /*!< IOC reset request */ + IOC_E_ENABLE = 2, /*!< IOC enable request */ + IOC_E_DISABLE = 3, /*!< IOC disable request */ + IOC_E_DETACH = 4, /*!< driver detach cleanup */ + IOC_E_ENABLED = 5, /*!< f/w enabled */ + IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */ + IOC_E_DISABLED = 7, /*!< f/w disabled */ + IOC_E_PFFAILED = 8, /*!< failure notice by iocpf sm */ + IOC_E_HBFAIL = 9, /*!< heartbeat failure */ + IOC_E_HWERROR = 10, /*!< hardware error interrupt */ + IOC_E_TIMEOUT = 11, /*!< timeout */ + IOC_E_HWFAILED = 12, /*!< PCI mapping failure notice */ +}; + +bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event); + +static struct bfa_sm_table ioc_sm_table[] = { + {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, + {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, + {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING}, + {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, + {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, + {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL}, + {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, + {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, + {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, + {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL}, +}; + +/* + * Forward declareations for iocpf state machine + */ +static void bfa_iocpf_enable(struct bfa_ioc *ioc); +static void bfa_iocpf_disable(struct bfa_ioc *ioc); +static void bfa_iocpf_fail(struct bfa_ioc *ioc); +static void bfa_iocpf_initfail(struct bfa_ioc *ioc); +static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc); +static void bfa_iocpf_stop(struct bfa_ioc *ioc); + +/* IOCPF state machine events */ +enum iocpf_event { + IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */ + IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */ + IOCPF_E_STOP = 3, /*!< stop on driver detach */ + IOCPF_E_FWREADY = 4, /*!< f/w initialization done */ + IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */ + IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */ + IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */ + IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */ + IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */ + IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */ + IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */ + IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */ +}; + +/* IOCPF states */ +enum bfa_iocpf_state { + BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */ + BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */ + BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */ + BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */ + BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */ + BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */ + BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */ + BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */ + BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */ +}; + +bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf, + enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf, + enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event); + +static struct bfa_sm_table iocpf_sm_table[] = { + {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET}, + {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH}, + {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH}, + {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT}, + {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT}, + {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT}, + {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY}, + {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL}, + {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL}, + {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL}, + {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL}, + {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING}, + {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING}, + {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, +}; + +/* IOC State Machine */ + +/* Beginning state. IOC uninit state. */ +static void +bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc) +{ +} + +/* IOC is in uninit state. */ +static void +bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_RESET: + bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); + break; + + default: + bfa_sm_fault(event); + } +} + +/* Reset entry actions -- initialize state machine */ +static void +bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) +{ + bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); +} + +/* IOC is in reset state. */ +static void +bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_ENABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); + break; + + case IOC_E_DISABLE: + bfa_ioc_disable_comp(ioc); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc) +{ + bfa_iocpf_enable(ioc); +} + +/* Host IOC function is being enabled, awaiting response from firmware. + * Semaphore is acquired. + */ +static void +bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_ENABLED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); + break; + + case IOC_E_PFFAILED: + /* !!! fall through !!! */ + case IOC_E_HWERROR: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + if (event != IOC_E_PFFAILED) + bfa_iocpf_initfail(ioc); + break; + + case IOC_E_HWFAILED: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); + break; + + case IOC_E_DISABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_iocpf_stop(ioc); + break; + + case IOC_E_ENABLE: + break; + + default: + bfa_sm_fault(event); + } +} + +/* Semaphore should be acquired for version check. */ +static void +bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) +{ + mod_timer(&ioc->ioc_timer, jiffies + + msecs_to_jiffies(BFA_IOC_TOV)); + bfa_ioc_send_getattr(ioc); +} + +/* IOC configuration in progress. Timer is active. */ +static void +bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_FWRSP_GETATTR: + del_timer(&ioc->ioc_timer); + bfa_fsm_set_state(ioc, bfa_ioc_sm_op); + break; + + case IOC_E_PFFAILED: + case IOC_E_HWERROR: + del_timer(&ioc->ioc_timer); + /* fall through */ + case IOC_E_TIMEOUT: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + if (event != IOC_E_PFFAILED) + bfa_iocpf_getattrfail(ioc); + break; + + case IOC_E_DISABLE: + del_timer(&ioc->ioc_timer); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_ENABLE: + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_ioc_sm_op_entry(struct bfa_ioc *ioc) +{ + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); + bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); + bfa_ioc_hb_monitor(ioc); +} + +static void +bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_ENABLE: + break; + + case IOC_E_DISABLE: + bfa_ioc_hb_stop(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_PFFAILED: + case IOC_E_HWERROR: + bfa_ioc_hb_stop(ioc); + /* !!! fall through !!! */ + case IOC_E_HBFAIL: + if (ioc->iocpf.auto_recover) + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); + else + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + + bfa_ioc_fail_notify(ioc); + + if (event != IOC_E_PFFAILED) + bfa_iocpf_fail(ioc); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc) +{ + bfa_iocpf_disable(ioc); +} + +/* IOC is being disabled */ +static void +bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_DISABLED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); + break; + + case IOC_E_HWERROR: + /* + * No state change. Will move to disabled state + * after iocpf sm completes failure processing and + * moves to disabled state. + */ + bfa_iocpf_fail(ioc); + break; + + case IOC_E_HWFAILED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); + bfa_ioc_disable_comp(ioc); + break; + + default: + bfa_sm_fault(event); + } +} + +/* IOC disable completion entry. */ +static void +bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc) +{ + bfa_ioc_disable_comp(ioc); +} + +static void +bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_ENABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); + break; + + case IOC_E_DISABLE: + ioc->cbfn->disable_cbfn(ioc->bfa); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_iocpf_stop(ioc); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc) +{ +} + +/* Hardware initialization retry. */ +static void +bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_ENABLED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); + break; + + case IOC_E_PFFAILED: + case IOC_E_HWERROR: + /** + * Initialization retry failed. + */ + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + if (event != IOC_E_PFFAILED) + bfa_iocpf_initfail(ioc); + break; + + case IOC_E_HWFAILED: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); + break; + + case IOC_E_ENABLE: + break; + + case IOC_E_DISABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_iocpf_stop(ioc); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc) +{ +} + +/* IOC failure. */ +static void +bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_ENABLE: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + break; + + case IOC_E_DISABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_iocpf_stop(ioc); + break; + + case IOC_E_HWERROR: + /* HB failure notification, ignore. */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc) +{ +} + +/* IOC failure. */ +static void +bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + + case IOC_E_ENABLE: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + break; + + case IOC_E_DISABLE: + ioc->cbfn->disable_cbfn(ioc->bfa); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + break; + + default: + bfa_sm_fault(event); + } +} + +/* IOCPF State Machine */ + +/* Reset entry actions -- initialize state machine */ +static void +bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf) +{ + iocpf->fw_mismatch_notified = false; + iocpf->auto_recover = bfa_nw_auto_recover; +} + +/* Beginning state. IOC is in reset state. */ +static void +bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + switch (event) { + case IOCPF_E_ENABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); + break; + + case IOCPF_E_STOP: + break; + + default: + bfa_sm_fault(event); + } +} + +/* Semaphore should be acquired for version check. */ +static void +bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf) +{ + bfa_ioc_hw_sem_init(iocpf->ioc); + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +/* Awaiting h/w semaphore to continue with version check. */ +static void +bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_SEMLOCKED: + if (bfa_ioc_firmware_lock(ioc)) { + if (bfa_ioc_sync_start(ioc)) { + bfa_ioc_sync_join(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); + } else { + bfa_ioc_firmware_unlock(ioc); + bfa_nw_ioc_hw_sem_release(ioc); + mod_timer(&ioc->sem_timer, jiffies + + msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); + } + } else { + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch); + } + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_ioc_pf_hwfailed(ioc); + break; + + case IOCPF_E_DISABLE: + bfa_ioc_hw_sem_get_cancel(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + bfa_ioc_pf_disabled(ioc); + break; + + case IOCPF_E_STOP: + bfa_ioc_hw_sem_get_cancel(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; + + default: + bfa_sm_fault(event); + } +} + +/* Notify enable completion callback */ +static void +bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf) +{ + /* Call only the first time sm enters fwmismatch state. */ + if (!iocpf->fw_mismatch_notified) + bfa_ioc_pf_fwmismatch(iocpf->ioc); + + iocpf->fw_mismatch_notified = true; + mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + + msecs_to_jiffies(BFA_IOC_TOV)); +} + +/* Awaiting firmware version match. */ +static void +bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_TIMEOUT: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); + break; + + case IOCPF_E_DISABLE: + del_timer(&ioc->iocpf_timer); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + bfa_ioc_pf_disabled(ioc); + break; + + case IOCPF_E_STOP: + del_timer(&ioc->iocpf_timer); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; + + default: + bfa_sm_fault(event); + } +} + +/* Request for semaphore. */ +static void +bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf) +{ + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +/* Awaiting semaphore for h/w initialzation. */ +static void +bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_SEMLOCKED: + if (bfa_ioc_sync_complete(ioc)) { + bfa_ioc_sync_join(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); + } else { + bfa_nw_ioc_hw_sem_release(ioc); + mod_timer(&ioc->sem_timer, jiffies + + msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); + } + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_ioc_pf_hwfailed(ioc); + break; + + case IOCPF_E_DISABLE: + bfa_ioc_hw_sem_get_cancel(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf) +{ + iocpf->poll_time = 0; + bfa_ioc_reset(iocpf->ioc, false); +} + +/* Hardware is being initialized. Interrupts are enabled. + * Holding hardware semaphore lock. + */ +static void +bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_FWREADY: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling); + break; + + case IOCPF_E_TIMEOUT: + bfa_nw_ioc_hw_sem_release(ioc); + bfa_ioc_pf_failed(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); + break; + + case IOCPF_E_DISABLE: + del_timer(&ioc->iocpf_timer); + bfa_ioc_sync_leave(ioc); + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf) +{ + mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + + msecs_to_jiffies(BFA_IOC_TOV)); + /** + * Enable Interrupts before sending fw IOC ENABLE cmd. + */ + iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa); + bfa_ioc_send_enable(iocpf->ioc); +} + +/* Host IOC function is being enabled, awaiting response from firmware. + * Semaphore is acquired. + */ +static void +bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_FWRSP_ENABLE: + del_timer(&ioc->iocpf_timer); + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready); + break; + + case IOCPF_E_INITFAIL: + del_timer(&ioc->iocpf_timer); + /* + * !!! fall through !!! + */ + case IOCPF_E_TIMEOUT: + bfa_nw_ioc_hw_sem_release(ioc); + if (event == IOCPF_E_TIMEOUT) + bfa_ioc_pf_failed(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); + break; + + case IOCPF_E_DISABLE: + del_timer(&ioc->iocpf_timer); + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf) +{ + bfa_ioc_pf_enabled(iocpf->ioc); +} + +static void +bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + switch (event) { + case IOCPF_E_DISABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); + break; + + case IOCPF_E_GETATTRFAIL: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); + break; + + case IOCPF_E_FAIL: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf) +{ + mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + + msecs_to_jiffies(BFA_IOC_TOV)); + bfa_ioc_send_disable(iocpf->ioc); +} + +/* IOC is being disabled */ +static void +bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_FWRSP_DISABLE: + del_timer(&ioc->iocpf_timer); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + break; + + case IOCPF_E_FAIL: + del_timer(&ioc->iocpf_timer); + /* + * !!! fall through !!! + */ + + case IOCPF_E_TIMEOUT: + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + break; + + case IOCPF_E_FWRSP_ENABLE: + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf) +{ + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +/* IOC hb ack request is being removed. */ +static void +bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_SEMLOCKED: + bfa_ioc_sync_leave(ioc); + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_ioc_pf_hwfailed(ioc); + break; + + case IOCPF_E_FAIL: + break; + + default: + bfa_sm_fault(event); + } +} + +/* IOC disable completion entry. */ +static void +bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf) +{ + bfa_ioc_mbox_flush(iocpf->ioc); + bfa_ioc_pf_disabled(iocpf->ioc); +} + +static void +bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_ENABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); + break; + + case IOCPF_E_STOP: + bfa_ioc_firmware_unlock(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf) +{ + bfa_nw_ioc_debug_save_ftrc(iocpf->ioc); + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +/* Hardware initialization failed. */ +static void +bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_SEMLOCKED: + bfa_ioc_notify_fail(ioc); + bfa_ioc_sync_leave(ioc); + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_ioc_pf_hwfailed(ioc); + break; + + case IOCPF_E_DISABLE: + bfa_ioc_hw_sem_get_cancel(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + break; + + case IOCPF_E_STOP: + bfa_ioc_hw_sem_get_cancel(ioc); + bfa_ioc_firmware_unlock(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; + + case IOCPF_E_FAIL: + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf) +{ +} + +/* Hardware initialization failed. */ +static void +bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_DISABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + break; + + case IOCPF_E_STOP: + bfa_ioc_firmware_unlock(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf) +{ + /** + * Mark IOC as failed in hardware and stop firmware. + */ + bfa_ioc_lpu_stop(iocpf->ioc); + + /** + * Flush any queued up mailbox requests. + */ + bfa_ioc_mbox_flush(iocpf->ioc); + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +/* IOC is in failed state. */ +static void +bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_SEMLOCKED: + bfa_ioc_sync_ack(ioc); + bfa_ioc_notify_fail(ioc); + if (!iocpf->auto_recover) { + bfa_ioc_sync_leave(ioc); + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + } else { + if (bfa_ioc_sync_complete(ioc)) + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); + else { + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); + } + } + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_ioc_pf_hwfailed(ioc); + break; + + case IOCPF_E_DISABLE: + bfa_ioc_hw_sem_get_cancel(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + break; + + case IOCPF_E_FAIL: + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf) +{ +} + +/* IOC is in failed state. */ +static void +bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + switch (event) { + case IOCPF_E_DISABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + break; + + default: + bfa_sm_fault(event); + } +} + +/* BFA IOC private functions */ + +/* Notify common modules registered for notification. */ +static void +bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event) +{ + struct bfa_ioc_notify *notify; + struct list_head *qe; + + list_for_each(qe, &ioc->notify_q) { + notify = (struct bfa_ioc_notify *)qe; + notify->cbfn(notify->cbarg, event); + } +} + +static void +bfa_ioc_disable_comp(struct bfa_ioc *ioc) +{ + ioc->cbfn->disable_cbfn(ioc->bfa); + bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED); +} + +bool +bfa_nw_ioc_sem_get(void __iomem *sem_reg) +{ + u32 r32; + int cnt = 0; +#define BFA_SEM_SPINCNT 3000 + + r32 = readl(sem_reg); + + while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) { + cnt++; + udelay(2); + r32 = readl(sem_reg); + } + + if (!(r32 & 1)) + return true; + + return false; +} + +void +bfa_nw_ioc_sem_release(void __iomem *sem_reg) +{ + readl(sem_reg); + writel(1, sem_reg); +} + +/* Clear fwver hdr */ +static void +bfa_ioc_fwver_clear(struct bfa_ioc *ioc) +{ + u32 pgnum, pgoff, loff = 0; + int i; + + pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); + pgoff = PSS_SMEM_PGOFF(loff); + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + + for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) { + writel(0, ioc->ioc_regs.smem_page_start + loff); + loff += sizeof(u32); + } +} + + +static void +bfa_ioc_hw_sem_init(struct bfa_ioc *ioc) +{ + struct bfi_ioc_image_hdr fwhdr; + u32 fwstate, r32; + + /* Spin on init semaphore to serialize. */ + r32 = readl(ioc->ioc_regs.ioc_init_sem_reg); + while (r32 & 0x1) { + udelay(20); + r32 = readl(ioc->ioc_regs.ioc_init_sem_reg); + } + + fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); + if (fwstate == BFI_IOC_UNINIT) { + writel(1, ioc->ioc_regs.ioc_init_sem_reg); + return; + } + + bfa_nw_ioc_fwver_get(ioc, &fwhdr); + + if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) { + writel(1, ioc->ioc_regs.ioc_init_sem_reg); + return; + } + + bfa_ioc_fwver_clear(ioc); + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT); + bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT); + + /* + * Try to lock and then unlock the semaphore. + */ + readl(ioc->ioc_regs.ioc_sem_reg); + writel(1, ioc->ioc_regs.ioc_sem_reg); + + /* Unlock init semaphore */ + writel(1, ioc->ioc_regs.ioc_init_sem_reg); +} + +static void +bfa_ioc_hw_sem_get(struct bfa_ioc *ioc) +{ + u32 r32; + + /** + * First read to the semaphore register will return 0, subsequent reads + * will return 1. Semaphore is released by writing 1 to the register + */ + r32 = readl(ioc->ioc_regs.ioc_sem_reg); + if (r32 == ~0) { + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR); + return; + } + if (!(r32 & 1)) { + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); + return; + } + + mod_timer(&ioc->sem_timer, jiffies + + msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); +} + +void +bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc) +{ + writel(1, ioc->ioc_regs.ioc_sem_reg); +} + +static void +bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc) +{ + del_timer(&ioc->sem_timer); +} + +/* Initialize LPU local memory (aka secondary memory / SRAM) */ +static void +bfa_ioc_lmem_init(struct bfa_ioc *ioc) +{ + u32 pss_ctl; + int i; +#define PSS_LMEM_INIT_TIME 10000 + + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); + pss_ctl &= ~__PSS_LMEM_RESET; + pss_ctl |= __PSS_LMEM_INIT_EN; + + /* + * i2c workaround 12.5khz clock + */ + pss_ctl |= __PSS_I2C_CLK_DIV(3UL); + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); + + /** + * wait for memory initialization to be complete + */ + i = 0; + do { + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); + i++; + } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME)); + + /** + * If memory initialization is not successful, IOC timeout will catch + * such failures. + */ + BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE)); + + pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN); + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); +} + +static void +bfa_ioc_lpu_start(struct bfa_ioc *ioc) +{ + u32 pss_ctl; + + /** + * Take processor out of reset. + */ + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); + pss_ctl &= ~__PSS_LPU0_RESET; + + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); +} + +static void +bfa_ioc_lpu_stop(struct bfa_ioc *ioc) +{ + u32 pss_ctl; + + /** + * Put processors in reset. + */ + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); + pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); + + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); +} + +/* Get driver and firmware versions. */ +void +bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) +{ + u32 pgnum; + u32 loff = 0; + int i; + u32 *fwsig = (u32 *) fwhdr; + + pgnum = bfa_ioc_smem_pgnum(ioc, loff); + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + + for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); + i++) { + fwsig[i] = + swab32(readl((loff) + (ioc->ioc_regs.smem_page_start))); + loff += sizeof(u32); + } +} + +static bool +bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1, + struct bfi_ioc_image_hdr *fwhdr_2) +{ + int i; + + for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { + if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i]) + return false; + } + + return true; +} + +/* Returns TRUE if major minor and maintenance are same. + * If patch version are same, check for MD5 Checksum to be same. + */ +static bool +bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr *drv_fwhdr, + struct bfi_ioc_image_hdr *fwhdr_to_cmp) +{ + if (drv_fwhdr->signature != fwhdr_to_cmp->signature) + return false; + if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major) + return false; + if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor) + return false; + if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint) + return false; + if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch && + drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase && + drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) + return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp); + + return true; +} + +static bool +bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr *flash_fwhdr) +{ + if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF) + return false; + + return true; +} + +static bool +fwhdr_is_ga(struct bfi_ioc_image_hdr *fwhdr) +{ + if (fwhdr->fwver.phase == 0 && + fwhdr->fwver.build == 0) + return false; + + return true; +} + +/* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */ +static enum bfi_ioc_img_ver_cmp +bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr, + struct bfi_ioc_image_hdr *fwhdr_to_cmp) +{ + if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == false) + return BFI_IOC_IMG_VER_INCOMP; + + if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch) + return BFI_IOC_IMG_VER_BETTER; + else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch) + return BFI_IOC_IMG_VER_OLD; + + /* GA takes priority over internal builds of the same patch stream. + * At this point major minor maint and patch numbers are same. + */ + if (fwhdr_is_ga(base_fwhdr) == true) + if (fwhdr_is_ga(fwhdr_to_cmp)) + return BFI_IOC_IMG_VER_SAME; + else + return BFI_IOC_IMG_VER_OLD; + else + if (fwhdr_is_ga(fwhdr_to_cmp)) + return BFI_IOC_IMG_VER_BETTER; + + if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase) + return BFI_IOC_IMG_VER_BETTER; + else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase) + return BFI_IOC_IMG_VER_OLD; + + if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build) + return BFI_IOC_IMG_VER_BETTER; + else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build) + return BFI_IOC_IMG_VER_OLD; + + /* All Version Numbers are equal. + * Md5 check to be done as a part of compatibility check. + */ + return BFI_IOC_IMG_VER_SAME; +} + +/* register definitions */ +#define FLI_CMD_REG 0x0001d000 +#define FLI_WRDATA_REG 0x0001d00c +#define FLI_RDDATA_REG 0x0001d010 +#define FLI_ADDR_REG 0x0001d004 +#define FLI_DEV_STATUS_REG 0x0001d014 + +#define BFA_FLASH_FIFO_SIZE 128 /* fifo size */ +#define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */ +#define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */ +#define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */ + +#define NFC_STATE_RUNNING 0x20000001 +#define NFC_STATE_PAUSED 0x00004560 +#define NFC_VER_VALID 0x147 + +enum bfa_flash_cmd { + BFA_FLASH_FAST_READ = 0x0b, /* fast read */ + BFA_FLASH_WRITE_ENABLE = 0x06, /* write enable */ + BFA_FLASH_SECTOR_ERASE = 0xd8, /* sector erase */ + BFA_FLASH_WRITE = 0x02, /* write */ + BFA_FLASH_READ_STATUS = 0x05, /* read status */ +}; + +/* hardware error definition */ +enum bfa_flash_err { + BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */ + BFA_FLASH_UNINIT = -2, /*!< flash not initialized */ + BFA_FLASH_BAD = -3, /*!< flash bad */ + BFA_FLASH_BUSY = -4, /*!< flash busy */ + BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */ + BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */ + BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */ + BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */ + BFA_FLASH_ERR_LEN = -9, /*!< invalid length */ +}; + +/* flash command register data structure */ +union bfa_flash_cmd_reg { + struct { +#ifdef __BIG_ENDIAN + u32 act:1; + u32 rsv:1; + u32 write_cnt:9; + u32 read_cnt:9; + u32 addr_cnt:4; + u32 cmd:8; +#else + u32 cmd:8; + u32 addr_cnt:4; + u32 read_cnt:9; + u32 write_cnt:9; + u32 rsv:1; + u32 act:1; +#endif + } r; + u32 i; +}; + +/* flash device status register data structure */ +union bfa_flash_dev_status_reg { + struct { +#ifdef __BIG_ENDIAN + u32 rsv:21; + u32 fifo_cnt:6; + u32 busy:1; + u32 init_status:1; + u32 present:1; + u32 bad:1; + u32 good:1; +#else + u32 good:1; + u32 bad:1; + u32 present:1; + u32 init_status:1; + u32 busy:1; + u32 fifo_cnt:6; + u32 rsv:21; +#endif + } r; + u32 i; +}; + +/* flash address register data structure */ +union bfa_flash_addr_reg { + struct { +#ifdef __BIG_ENDIAN + u32 addr:24; + u32 dummy:8; +#else + u32 dummy:8; + u32 addr:24; +#endif + } r; + u32 i; +}; + +/* Flash raw private functions */ +static void +bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt, + u8 rd_cnt, u8 ad_cnt, u8 op) +{ + union bfa_flash_cmd_reg cmd; + + cmd.i = 0; + cmd.r.act = 1; + cmd.r.write_cnt = wr_cnt; + cmd.r.read_cnt = rd_cnt; + cmd.r.addr_cnt = ad_cnt; + cmd.r.cmd = op; + writel(cmd.i, (pci_bar + FLI_CMD_REG)); +} + +static void +bfa_flash_set_addr(void __iomem *pci_bar, u32 address) +{ + union bfa_flash_addr_reg addr; + + addr.r.addr = address & 0x00ffffff; + addr.r.dummy = 0; + writel(addr.i, (pci_bar + FLI_ADDR_REG)); +} + +static int +bfa_flash_cmd_act_check(void __iomem *pci_bar) +{ + union bfa_flash_cmd_reg cmd; + + cmd.i = readl(pci_bar + FLI_CMD_REG); + + if (cmd.r.act) + return BFA_FLASH_ERR_CMD_ACT; + + return 0; +} + +/* Flush FLI data fifo. */ +static u32 +bfa_flash_fifo_flush(void __iomem *pci_bar) +{ + u32 i; + u32 t; + union bfa_flash_dev_status_reg dev_status; + + dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); + + if (!dev_status.r.fifo_cnt) + return 0; + + /* fifo counter in terms of words */ + for (i = 0; i < dev_status.r.fifo_cnt; i++) + t = readl(pci_bar + FLI_RDDATA_REG); + + /* Check the device status. It may take some time. */ + for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { + dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); + if (!dev_status.r.fifo_cnt) + break; + } + + if (dev_status.r.fifo_cnt) + return BFA_FLASH_ERR_FIFO_CNT; + + return 0; +} + +/* Read flash status. */ +static u32 +bfa_flash_status_read(void __iomem *pci_bar) +{ + union bfa_flash_dev_status_reg dev_status; + u32 status; + u32 ret_status; + int i; + + status = bfa_flash_fifo_flush(pci_bar); + if (status < 0) + return status; + + bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS); + + for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { + status = bfa_flash_cmd_act_check(pci_bar); + if (!status) + break; + } + + if (status) + return status; + + dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); + if (!dev_status.r.fifo_cnt) + return BFA_FLASH_BUSY; + + ret_status = readl(pci_bar + FLI_RDDATA_REG); + ret_status >>= 24; + + status = bfa_flash_fifo_flush(pci_bar); + if (status < 0) + return status; + + return ret_status; +} + +/* Start flash read operation. */ +static u32 +bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, + char *buf) +{ + u32 status; + + /* len must be mutiple of 4 and not exceeding fifo size */ + if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) + return BFA_FLASH_ERR_LEN; + + /* check status */ + status = bfa_flash_status_read(pci_bar); + if (status == BFA_FLASH_BUSY) + status = bfa_flash_status_read(pci_bar); + + if (status < 0) + return status; + + /* check if write-in-progress bit is cleared */ + if (status & BFA_FLASH_WIP_MASK) + return BFA_FLASH_ERR_WIP; + + bfa_flash_set_addr(pci_bar, offset); + + bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ); + + return 0; +} + +/* Check flash read operation. */ +static u32 +bfa_flash_read_check(void __iomem *pci_bar) +{ + if (bfa_flash_cmd_act_check(pci_bar)) + return 1; + + return 0; +} + +/* End flash read operation. */ +static void +bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf) +{ + u32 i; + + /* read data fifo up to 32 words */ + for (i = 0; i < len; i += 4) { + u32 w = readl(pci_bar + FLI_RDDATA_REG); + *((u32 *)(buf + i)) = swab32(w); + } + + bfa_flash_fifo_flush(pci_bar); +} + +/* Perform flash raw read. */ + +#define FLASH_BLOCKING_OP_MAX 500 +#define FLASH_SEM_LOCK_REG 0x18820 + +static int +bfa_raw_sem_get(void __iomem *bar) +{ + int locked; + + locked = readl((bar + FLASH_SEM_LOCK_REG)); + + return !locked; +} + +static enum bfa_status +bfa_flash_sem_get(void __iomem *bar) +{ + u32 n = FLASH_BLOCKING_OP_MAX; + + while (!bfa_raw_sem_get(bar)) { + if (--n <= 0) + return BFA_STATUS_BADFLASH; + mdelay(10); + } + return BFA_STATUS_OK; +} + +static void +bfa_flash_sem_put(void __iomem *bar) +{ + writel(0, (bar + FLASH_SEM_LOCK_REG)); +} + +static enum bfa_status +bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, + u32 len) +{ + u32 n, status; + u32 off, l, s, residue, fifo_sz; + + residue = len; + off = 0; + fifo_sz = BFA_FLASH_FIFO_SIZE; + status = bfa_flash_sem_get(pci_bar); + if (status != BFA_STATUS_OK) + return status; + + while (residue) { + s = offset + off; + n = s / fifo_sz; + l = (n + 1) * fifo_sz - s; + if (l > residue) + l = residue; + + status = bfa_flash_read_start(pci_bar, offset + off, l, + &buf[off]); + if (status < 0) { + bfa_flash_sem_put(pci_bar); + return BFA_STATUS_FAILED; + } + + n = BFA_FLASH_BLOCKING_OP_MAX; + while (bfa_flash_read_check(pci_bar)) { + if (--n <= 0) { + bfa_flash_sem_put(pci_bar); + return BFA_STATUS_FAILED; + } + } + + bfa_flash_read_end(pci_bar, l, &buf[off]); + + residue -= l; + off += l; + } + bfa_flash_sem_put(pci_bar); + + return BFA_STATUS_OK; +} + +#define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */ + +static enum bfa_status +bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc *ioc, u32 off, + u32 *fwimg) +{ + return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva, + BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)), + (char *)fwimg, BFI_FLASH_CHUNK_SZ); +} + +static enum bfi_ioc_img_ver_cmp +bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc, + struct bfi_ioc_image_hdr *base_fwhdr) +{ + struct bfi_ioc_image_hdr *flash_fwhdr; + enum bfa_status status; + u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS]; + + status = bfa_nw_ioc_flash_img_get_chnk(ioc, 0, fwimg); + if (status != BFA_STATUS_OK) + return BFI_IOC_IMG_VER_INCOMP; + + flash_fwhdr = (struct bfi_ioc_image_hdr *)fwimg; + if (bfa_ioc_flash_fwver_valid(flash_fwhdr)) + return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr); + else + return BFI_IOC_IMG_VER_INCOMP; +} + +/** + * Returns TRUE if driver is willing to work with current smem f/w version. + */ +bool +bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) +{ + struct bfi_ioc_image_hdr *drv_fwhdr; + enum bfi_ioc_img_ver_cmp smem_flash_cmp, drv_smem_cmp; + + drv_fwhdr = (struct bfi_ioc_image_hdr *) + bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); + + /* If smem is incompatible or old, driver should not work with it. */ + drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, fwhdr); + if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP || + drv_smem_cmp == BFI_IOC_IMG_VER_OLD) { + return false; + } + + /* IF Flash has a better F/W than smem do not work with smem. + * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it. + * If Flash is old or incomp work with smem iff smem f/w == drv f/w. + */ + smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, fwhdr); + + if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) + return false; + else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) + return true; + else + return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ? + true : false; +} + +/* Return true if current running version is valid. Firmware signature and + * execution context (driver/bios) must match. + */ +static bool +bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env) +{ + struct bfi_ioc_image_hdr fwhdr; + + bfa_nw_ioc_fwver_get(ioc, &fwhdr); + if (swab32(fwhdr.bootenv) != boot_env) + return false; + + return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr); +} + +/* Conditionally flush any pending message from firmware at start. */ +static void +bfa_ioc_msgflush(struct bfa_ioc *ioc) +{ + u32 r32; + + r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); + if (r32) + writel(1, ioc->ioc_regs.lpu_mbox_cmd); +} + +static void +bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) +{ + enum bfi_ioc_state ioc_fwstate; + bool fwvalid; + u32 boot_env; + + ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); + + if (force) + ioc_fwstate = BFI_IOC_UNINIT; + + boot_env = BFI_FWBOOT_ENV_OS; + + /** + * check if firmware is valid + */ + fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? + false : bfa_ioc_fwver_valid(ioc, boot_env); + + if (!fwvalid) { + if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) == + BFA_STATUS_OK) + bfa_ioc_poll_fwinit(ioc); + + return; + } + + /** + * If hardware initialization is in progress (initialized by other IOC), + * just wait for an initialization completion interrupt. + */ + if (ioc_fwstate == BFI_IOC_INITING) { + bfa_ioc_poll_fwinit(ioc); + return; + } + + /** + * If IOC function is disabled and firmware version is same, + * just re-enable IOC. + */ + if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) { + /** + * When using MSI-X any pending firmware ready event should + * be flushed. Otherwise MSI-X interrupts are not delivered. + */ + bfa_ioc_msgflush(ioc); + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); + return; + } + + /** + * Initialize the h/w for any other states. + */ + if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) == + BFA_STATUS_OK) + bfa_ioc_poll_fwinit(ioc); +} + +void +bfa_nw_ioc_timeout(void *ioc_arg) +{ + struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; + + bfa_fsm_send_event(ioc, IOC_E_TIMEOUT); +} + +static void +bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len) +{ + u32 *msgp = (u32 *) ioc_msg; + u32 i; + + BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX)); + + /* + * first write msg to mailbox registers + */ + for (i = 0; i < len / sizeof(u32); i++) + writel(cpu_to_le32(msgp[i]), + ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); + + for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++) + writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); + + /* + * write 1 to mailbox CMD to trigger LPU event + */ + writel(1, ioc->ioc_regs.hfn_mbox_cmd); + (void) readl(ioc->ioc_regs.hfn_mbox_cmd); +} + +static void +bfa_ioc_send_enable(struct bfa_ioc *ioc) +{ + struct bfi_ioc_ctrl_req enable_req; + struct timeval tv; + + bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, + bfa_ioc_portid(ioc)); + enable_req.clscode = htons(ioc->clscode); + do_gettimeofday(&tv); + enable_req.tv_sec = ntohl(tv.tv_sec); + bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req)); +} + +static void +bfa_ioc_send_disable(struct bfa_ioc *ioc) +{ + struct bfi_ioc_ctrl_req disable_req; + + bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, + bfa_ioc_portid(ioc)); + bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req)); +} + +static void +bfa_ioc_send_getattr(struct bfa_ioc *ioc) +{ + struct bfi_ioc_getattr_req attr_req; + + bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ, + bfa_ioc_portid(ioc)); + bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa); + bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req)); +} + +void +bfa_nw_ioc_hb_check(void *cbarg) +{ + struct bfa_ioc *ioc = cbarg; + u32 hb_count; + + hb_count = readl(ioc->ioc_regs.heartbeat); + if (ioc->hb_count == hb_count) { + bfa_ioc_recover(ioc); + return; + } else { + ioc->hb_count = hb_count; + } + + bfa_ioc_mbox_poll(ioc); + mod_timer(&ioc->hb_timer, jiffies + + msecs_to_jiffies(BFA_IOC_HB_TOV)); +} + +static void +bfa_ioc_hb_monitor(struct bfa_ioc *ioc) +{ + ioc->hb_count = readl(ioc->ioc_regs.heartbeat); + mod_timer(&ioc->hb_timer, jiffies + + msecs_to_jiffies(BFA_IOC_HB_TOV)); +} + +static void +bfa_ioc_hb_stop(struct bfa_ioc *ioc) +{ + del_timer(&ioc->hb_timer); +} + +/* Initiate a full firmware download. */ +static enum bfa_status +bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, + u32 boot_env) +{ + u32 *fwimg; + u32 pgnum; + u32 loff = 0; + u32 chunkno = 0; + u32 i; + u32 asicmode; + u32 fwimg_size; + u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS]; + enum bfa_status status; + + if (boot_env == BFI_FWBOOT_ENV_OS && + boot_type == BFI_FWBOOT_TYPE_FLASH) { + fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32); + + status = bfa_nw_ioc_flash_img_get_chnk(ioc, + BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf); + if (status != BFA_STATUS_OK) + return status; + + fwimg = fwimg_buf; + } else { + fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); + fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), + BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); + } + + pgnum = bfa_ioc_smem_pgnum(ioc, loff); + + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + + for (i = 0; i < fwimg_size; i++) { + if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { + chunkno = BFA_IOC_FLASH_CHUNK_NO(i); + if (boot_env == BFI_FWBOOT_ENV_OS && + boot_type == BFI_FWBOOT_TYPE_FLASH) { + status = bfa_nw_ioc_flash_img_get_chnk(ioc, + BFA_IOC_FLASH_CHUNK_ADDR(chunkno), + fwimg_buf); + if (status != BFA_STATUS_OK) + return status; + + fwimg = fwimg_buf; + } else { + fwimg = bfa_cb_image_get_chunk( + bfa_ioc_asic_gen(ioc), + BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); + } + } + + /** + * write smem + */ + writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])), + ((ioc->ioc_regs.smem_page_start) + (loff))); + + loff += sizeof(u32); + + /** + * handle page offset wrap around + */ + loff = PSS_SMEM_PGOFF(loff); + if (loff == 0) { + pgnum++; + writel(pgnum, + ioc->ioc_regs.host_page_num_fn); + } + } + + writel(bfa_ioc_smem_pgnum(ioc, 0), + ioc->ioc_regs.host_page_num_fn); + + /* + * Set boot type, env and device mode at the end. + */ + if (boot_env == BFI_FWBOOT_ENV_OS && + boot_type == BFI_FWBOOT_TYPE_FLASH) { + boot_type = BFI_FWBOOT_TYPE_NORMAL; + } + asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode, + ioc->port0_mode, ioc->port1_mode); + writel(asicmode, ((ioc->ioc_regs.smem_page_start) + + BFI_FWBOOT_DEVMODE_OFF)); + writel(boot_type, ((ioc->ioc_regs.smem_page_start) + + (BFI_FWBOOT_TYPE_OFF))); + writel(boot_env, ((ioc->ioc_regs.smem_page_start) + + (BFI_FWBOOT_ENV_OFF))); + return BFA_STATUS_OK; +} + +static void +bfa_ioc_reset(struct bfa_ioc *ioc, bool force) +{ + bfa_ioc_hwinit(ioc, force); +} + +/* BFA ioc enable reply by firmware */ +static void +bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode, + u8 cap_bm) +{ + struct bfa_iocpf *iocpf = &ioc->iocpf; + + ioc->port_mode = ioc->port_mode_cfg = port_mode; + ioc->ad_cap_bm = cap_bm; + bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); +} + +/* Update BFA configuration from firmware configuration. */ +static void +bfa_ioc_getattr_reply(struct bfa_ioc *ioc) +{ + struct bfi_ioc_attr *attr = ioc->attr; + + attr->adapter_prop = ntohl(attr->adapter_prop); + attr->card_type = ntohl(attr->card_type); + attr->maxfrsize = ntohs(attr->maxfrsize); + + bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); +} + +/* Attach time initialization of mbox logic. */ +static void +bfa_ioc_mbox_attach(struct bfa_ioc *ioc) +{ + struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; + int mc; + + INIT_LIST_HEAD(&mod->cmd_q); + for (mc = 0; mc < BFI_MC_MAX; mc++) { + mod->mbhdlr[mc].cbfn = NULL; + mod->mbhdlr[mc].cbarg = ioc->bfa; + } +} + +/* Mbox poll timer -- restarts any pending mailbox requests. */ +static void +bfa_ioc_mbox_poll(struct bfa_ioc *ioc) +{ + struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; + struct bfa_mbox_cmd *cmd; + bfa_mbox_cmd_cbfn_t cbfn; + void *cbarg; + u32 stat; + + /** + * If no command pending, do nothing + */ + if (list_empty(&mod->cmd_q)) + return; + + /** + * If previous command is not yet fetched by firmware, do nothing + */ + stat = readl(ioc->ioc_regs.hfn_mbox_cmd); + if (stat) + return; + + /** + * Enqueue command to firmware. + */ + bfa_q_deq(&mod->cmd_q, &cmd); + bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); + + /** + * Give a callback to the client, indicating that the command is sent + */ + if (cmd->cbfn) { + cbfn = cmd->cbfn; + cbarg = cmd->cbarg; + cmd->cbfn = NULL; + cbfn(cbarg); + } +} + +/* Cleanup any pending requests. */ +static void +bfa_ioc_mbox_flush(struct bfa_ioc *ioc) +{ + struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; + struct bfa_mbox_cmd *cmd; + + while (!list_empty(&mod->cmd_q)) + bfa_q_deq(&mod->cmd_q, &cmd); +} + +/** + * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap + * + * @ioc: memory for IOC + * @tbuf: app memory to store data from smem + * @soff: smem offset + * @sz: size of smem in bytes + */ +static int +bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz) +{ + u32 pgnum, loff, r32; + int i, len; + u32 *buf = tbuf; + + pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); + loff = PSS_SMEM_PGOFF(soff); + + /* + * Hold semaphore to serialize pll init and fwtrc. + */ + if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0) + return 1; + + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + + len = sz/sizeof(u32); + for (i = 0; i < len; i++) { + r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start))); + buf[i] = be32_to_cpu(r32); + loff += sizeof(u32); + + /** + * handle page offset wrap around + */ + loff = PSS_SMEM_PGOFF(loff); + if (loff == 0) { + pgnum++; + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + } + } + + writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), + ioc->ioc_regs.host_page_num_fn); + + /* + * release semaphore + */ + readl(ioc->ioc_regs.ioc_init_sem_reg); + writel(1, ioc->ioc_regs.ioc_init_sem_reg); + return 0; +} + +/* Retrieve saved firmware trace from a prior IOC failure. */ +int +bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen) +{ + u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id; + int tlen, status = 0; + + tlen = *trclen; + if (tlen > BNA_DBG_FWTRC_LEN) + tlen = BNA_DBG_FWTRC_LEN; + + status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen); + *trclen = tlen; + return status; +} + +/* Save firmware trace if configured. */ +static void +bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc) +{ + int tlen; + + if (ioc->dbg_fwsave_once) { + ioc->dbg_fwsave_once = 0; + if (ioc->dbg_fwsave_len) { + tlen = ioc->dbg_fwsave_len; + bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen); + } + } +} + +/* Retrieve saved firmware trace from a prior IOC failure. */ +int +bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen) +{ + int tlen; + + if (ioc->dbg_fwsave_len == 0) + return BFA_STATUS_ENOFSAVE; + + tlen = *trclen; + if (tlen > ioc->dbg_fwsave_len) + tlen = ioc->dbg_fwsave_len; + + memcpy(trcdata, ioc->dbg_fwsave, tlen); + *trclen = tlen; + return BFA_STATUS_OK; +} + +static void +bfa_ioc_fail_notify(struct bfa_ioc *ioc) +{ + /** + * Notify driver and common modules registered for notification. + */ + ioc->cbfn->hbfail_cbfn(ioc->bfa); + bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED); + bfa_nw_ioc_debug_save_ftrc(ioc); +} + +/* IOCPF to IOC interface */ +static void +bfa_ioc_pf_enabled(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(ioc, IOC_E_ENABLED); +} + +static void +bfa_ioc_pf_disabled(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(ioc, IOC_E_DISABLED); +} + +static void +bfa_ioc_pf_failed(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(ioc, IOC_E_PFFAILED); +} + +static void +bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(ioc, IOC_E_HWFAILED); +} + +static void +bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc) +{ + /** + * Provide enable completion callback and AEN notification. + */ + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); +} + +/* IOC public */ +static enum bfa_status +bfa_ioc_pll_init(struct bfa_ioc *ioc) +{ + /* + * Hold semaphore so that nobody can access the chip during init. + */ + bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); + + bfa_ioc_pll_init_asic(ioc); + + ioc->pllinit = true; + + /* Initialize LMEM */ + bfa_ioc_lmem_init(ioc); + + /* + * release semaphore. + */ + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); + + return BFA_STATUS_OK; +} + +/* Interface used by diag module to do firmware boot with memory test + * as the entry vector. + */ +static enum bfa_status +bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type, + u32 boot_env) +{ + struct bfi_ioc_image_hdr *drv_fwhdr; + enum bfa_status status; + bfa_ioc_stats(ioc, ioc_boots); + + if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) + return BFA_STATUS_FAILED; + if (boot_env == BFI_FWBOOT_ENV_OS && + boot_type == BFI_FWBOOT_TYPE_NORMAL) { + drv_fwhdr = (struct bfi_ioc_image_hdr *) + bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); + /* Work with Flash iff flash f/w is better than driver f/w. + * Otherwise push drivers firmware. + */ + if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) == + BFI_IOC_IMG_VER_BETTER) + boot_type = BFI_FWBOOT_TYPE_FLASH; + } + + /** + * Initialize IOC state of all functions on a chip reset. + */ + if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) { + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST); + bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST); + } else { + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING); + bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING); + } + + bfa_ioc_msgflush(ioc); + status = bfa_ioc_download_fw(ioc, boot_type, boot_env); + if (status == BFA_STATUS_OK) + bfa_ioc_lpu_start(ioc); + else + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); + + return status; +} + +/* Enable/disable IOC failure auto recovery. */ +void +bfa_nw_ioc_auto_recover(bool auto_recover) +{ + bfa_nw_auto_recover = auto_recover; +} + +static bool +bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg) +{ + u32 *msgp = mbmsg; + u32 r32; + int i; + + r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); + if ((r32 & 1) == 0) + return false; + + /** + * read the MBOX msg + */ + for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32)); + i++) { + r32 = readl(ioc->ioc_regs.lpu_mbox + + i * sizeof(u32)); + msgp[i] = htonl(r32); + } + + /** + * turn off mailbox interrupt by clearing mailbox status + */ + writel(1, ioc->ioc_regs.lpu_mbox_cmd); + readl(ioc->ioc_regs.lpu_mbox_cmd); + + return true; +} + +static void +bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) +{ + union bfi_ioc_i2h_msg_u *msg; + struct bfa_iocpf *iocpf = &ioc->iocpf; + + msg = (union bfi_ioc_i2h_msg_u *) m; + + bfa_ioc_stats(ioc, ioc_isrs); + + switch (msg->mh.msg_id) { + case BFI_IOC_I2H_HBEAT: + break; + + case BFI_IOC_I2H_ENABLE_REPLY: + bfa_ioc_enable_reply(ioc, + (enum bfa_mode)msg->fw_event.port_mode, + msg->fw_event.cap_bm); + break; + + case BFI_IOC_I2H_DISABLE_REPLY: + bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE); + break; + + case BFI_IOC_I2H_GETATTR_REPLY: + bfa_ioc_getattr_reply(ioc); + break; + + default: + BUG_ON(1); + } +} + +/** + * bfa_nw_ioc_attach - IOC attach time initialization and setup. + * + * @ioc: memory for IOC + * @bfa: driver instance structure + */ +void +bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn) +{ + ioc->bfa = bfa; + ioc->cbfn = cbfn; + ioc->fcmode = false; + ioc->pllinit = false; + ioc->dbg_fwsave_once = true; + ioc->iocpf.ioc = ioc; + + bfa_ioc_mbox_attach(ioc); + INIT_LIST_HEAD(&ioc->notify_q); + + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_fsm_send_event(ioc, IOC_E_RESET); +} + +/* Driver detach time IOC cleanup. */ +void +bfa_nw_ioc_detach(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(ioc, IOC_E_DETACH); + + /* Done with detach, empty the notify_q. */ + INIT_LIST_HEAD(&ioc->notify_q); +} + +/** + * bfa_nw_ioc_pci_init - Setup IOC PCI properties. + * + * @pcidev: PCI device information for this IOC + */ +void +bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, + enum bfi_pcifn_class clscode) +{ + ioc->clscode = clscode; + ioc->pcidev = *pcidev; + + /** + * Initialize IOC and device personality + */ + ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC; + ioc->asic_mode = BFI_ASIC_MODE_FC; + + switch (pcidev->device_id) { + case PCI_DEVICE_ID_BROCADE_CT: + ioc->asic_gen = BFI_ASIC_GEN_CT; + ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; + ioc->asic_mode = BFI_ASIC_MODE_ETH; + ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA; + ioc->ad_cap_bm = BFA_CM_CNA; + break; + + case BFA_PCI_DEVICE_ID_CT2: + ioc->asic_gen = BFI_ASIC_GEN_CT2; + if (clscode == BFI_PCIFN_CLASS_FC && + pcidev->ssid == BFA_PCI_CT2_SSID_FC) { + ioc->asic_mode = BFI_ASIC_MODE_FC16; + ioc->fcmode = true; + ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; + ioc->ad_cap_bm = BFA_CM_HBA; + } else { + ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; + ioc->asic_mode = BFI_ASIC_MODE_ETH; + if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) { + ioc->port_mode = + ioc->port_mode_cfg = BFA_MODE_CNA; + ioc->ad_cap_bm = BFA_CM_CNA; + } else { + ioc->port_mode = + ioc->port_mode_cfg = BFA_MODE_NIC; + ioc->ad_cap_bm = BFA_CM_NIC; + } + } + break; + + default: + BUG_ON(1); + } + + /** + * Set asic specific interfaces. + */ + if (ioc->asic_gen == BFI_ASIC_GEN_CT) + bfa_nw_ioc_set_ct_hwif(ioc); + else { + WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2); + bfa_nw_ioc_set_ct2_hwif(ioc); + bfa_nw_ioc_ct2_poweron(ioc); + } + + bfa_ioc_map_port(ioc); + bfa_ioc_reg_init(ioc); +} + +/** + * bfa_nw_ioc_mem_claim - Initialize IOC dma memory + * + * @dm_kva: kernel virtual address of IOC dma memory + * @dm_pa: physical address of IOC dma memory + */ +void +bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa) +{ + /** + * dma memory for firmware attribute + */ + ioc->attr_dma.kva = dm_kva; + ioc->attr_dma.pa = dm_pa; + ioc->attr = (struct bfi_ioc_attr *) dm_kva; +} + +/* Return size of dma memory required. */ +u32 +bfa_nw_ioc_meminfo(void) +{ + return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ); +} + +void +bfa_nw_ioc_enable(struct bfa_ioc *ioc) +{ + bfa_ioc_stats(ioc, ioc_enables); + ioc->dbg_fwsave_once = true; + + bfa_fsm_send_event(ioc, IOC_E_ENABLE); +} + +void +bfa_nw_ioc_disable(struct bfa_ioc *ioc) +{ + bfa_ioc_stats(ioc, ioc_disables); + bfa_fsm_send_event(ioc, IOC_E_DISABLE); +} + +/* Initialize memory for saving firmware trace. */ +void +bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave) +{ + ioc->dbg_fwsave = dbg_fwsave; + ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0; +} + +static u32 +bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr) +{ + return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr); +} + +/* Register mailbox message handler function, to be called by common modules */ +void +bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, + bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) +{ + struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; + + mod->mbhdlr[mc].cbfn = cbfn; + mod->mbhdlr[mc].cbarg = cbarg; +} + +/** + * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware. + * + * @ioc: IOC instance + * @cmd: Mailbox command + * + * Waits if mailbox is busy. Responsibility of caller to serialize + */ +bool +bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd, + bfa_mbox_cmd_cbfn_t cbfn, void *cbarg) +{ + struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; + u32 stat; + + cmd->cbfn = cbfn; + cmd->cbarg = cbarg; + + /** + * If a previous command is pending, queue new command + */ + if (!list_empty(&mod->cmd_q)) { + list_add_tail(&cmd->qe, &mod->cmd_q); + return true; + } + + /** + * If mailbox is busy, queue command for poll timer + */ + stat = readl(ioc->ioc_regs.hfn_mbox_cmd); + if (stat) { + list_add_tail(&cmd->qe, &mod->cmd_q); + return true; + } + + /** + * mailbox is free -- queue command to firmware + */ + bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); + + return false; +} + +/* Handle mailbox interrupts */ +void +bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc) +{ + struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; + struct bfi_mbmsg m; + int mc; + + if (bfa_ioc_msgget(ioc, &m)) { + /** + * Treat IOC message class as special. + */ + mc = m.mh.msg_class; + if (mc == BFI_MC_IOC) { + bfa_ioc_isr(ioc, &m); + return; + } + + if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) + return; + + mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); + } + + bfa_ioc_lpu_read_stat(ioc); + + /** + * Try to send pending mailbox commands + */ + bfa_ioc_mbox_poll(ioc); +} + +void +bfa_nw_ioc_error_isr(struct bfa_ioc *ioc) +{ + bfa_ioc_stats(ioc, ioc_hbfails); + bfa_ioc_stats_hb_count(ioc, ioc->hb_count); + bfa_fsm_send_event(ioc, IOC_E_HWERROR); +} + +/* return true if IOC is disabled */ +bool +bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc) +{ + return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) || + bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); +} + +/* return true if IOC is operational */ +bool +bfa_nw_ioc_is_operational(struct bfa_ioc *ioc) +{ + return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); +} + +/* Add to IOC heartbeat failure notification queue. To be used by common + * modules such as cee, port, diag. + */ +void +bfa_nw_ioc_notify_register(struct bfa_ioc *ioc, + struct bfa_ioc_notify *notify) +{ + list_add_tail(¬ify->qe, &ioc->notify_q); +} + +#define BFA_MFG_NAME "QLogic" +static void +bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc, + struct bfa_adapter_attr *ad_attr) +{ + struct bfi_ioc_attr *ioc_attr; + + ioc_attr = ioc->attr; + + bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num); + bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); + bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); + bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); + memcpy(&ad_attr->vpd, &ioc_attr->vpd, + sizeof(struct bfa_mfg_vpd)); + + ad_attr->nports = bfa_ioc_get_nports(ioc); + ad_attr->max_speed = bfa_ioc_speed_sup(ioc); + + bfa_ioc_get_adapter_model(ioc, ad_attr->model); + /* For now, model descr uses same model string */ + bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr); + + ad_attr->card_type = ioc_attr->card_type; + ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type); + + if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) + ad_attr->prototype = 1; + else + ad_attr->prototype = 0; + + ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); + ad_attr->mac = bfa_nw_ioc_get_mac(ioc); + + ad_attr->pcie_gen = ioc_attr->pcie_gen; + ad_attr->pcie_lanes = ioc_attr->pcie_lanes; + ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig; + ad_attr->asic_rev = ioc_attr->asic_rev; + + bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); +} + +static enum bfa_ioc_type +bfa_ioc_get_type(struct bfa_ioc *ioc) +{ + if (ioc->clscode == BFI_PCIFN_CLASS_ETH) + return BFA_IOC_TYPE_LL; + + BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC)); + + return (ioc->attr->port_mode == BFI_PORT_MODE_FC) + ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE; +} + +static void +bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num) +{ + memcpy(serial_num, + (void *)ioc->attr->brcd_serialnum, + BFA_ADAPTER_SERIAL_NUM_LEN); +} + +static void +bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver) +{ + memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); +} + +static void +bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev) +{ + BUG_ON(!(chip_rev)); + + memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN); + + chip_rev[0] = 'R'; + chip_rev[1] = 'e'; + chip_rev[2] = 'v'; + chip_rev[3] = '-'; + chip_rev[4] = ioc->attr->asic_rev; + chip_rev[5] = '\0'; +} + +static void +bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver) +{ + memcpy(optrom_ver, ioc->attr->optrom_version, + BFA_VERSION_LEN); +} + +static void +bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer) +{ + memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); +} + +static void +bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model) +{ + struct bfi_ioc_attr *ioc_attr; + + BUG_ON(!(model)); + memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN); + + ioc_attr = ioc->attr; + + snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", + BFA_MFG_NAME, ioc_attr->card_type); +} + +static enum bfa_ioc_state +bfa_ioc_get_state(struct bfa_ioc *ioc) +{ + enum bfa_iocpf_state iocpf_st; + enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm); + + if (ioc_st == BFA_IOC_ENABLING || + ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) { + + iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); + + switch (iocpf_st) { + case BFA_IOCPF_SEMWAIT: + ioc_st = BFA_IOC_SEMWAIT; + break; + + case BFA_IOCPF_HWINIT: + ioc_st = BFA_IOC_HWINIT; + break; + + case BFA_IOCPF_FWMISMATCH: + ioc_st = BFA_IOC_FWMISMATCH; + break; + + case BFA_IOCPF_FAIL: + ioc_st = BFA_IOC_FAIL; + break; + + case BFA_IOCPF_INITFAIL: + ioc_st = BFA_IOC_INITFAIL; + break; + + default: + break; + } + } + return ioc_st; +} + +void +bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr) +{ + memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr)); + + ioc_attr->state = bfa_ioc_get_state(ioc); + ioc_attr->port_id = bfa_ioc_portid(ioc); + ioc_attr->port_mode = ioc->port_mode; + + ioc_attr->port_mode_cfg = ioc->port_mode_cfg; + ioc_attr->cap_bm = ioc->ad_cap_bm; + + ioc_attr->ioc_type = bfa_ioc_get_type(ioc); + + bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); + + ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc); + ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc); + ioc_attr->def_fn = bfa_ioc_is_default(ioc); + bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); +} + +/* WWN public */ +static u64 +bfa_ioc_get_pwwn(struct bfa_ioc *ioc) +{ + return ioc->attr->pwwn; +} + +mac_t +bfa_nw_ioc_get_mac(struct bfa_ioc *ioc) +{ + return ioc->attr->mac; +} + +/* Firmware failure detected. Start recovery actions. */ +static void +bfa_ioc_recover(struct bfa_ioc *ioc) +{ + pr_crit("Heart Beat of IOC has failed\n"); + bfa_ioc_stats(ioc, ioc_hbfails); + bfa_ioc_stats_hb_count(ioc, ioc->hb_count); + bfa_fsm_send_event(ioc, IOC_E_HBFAIL); +} + +/* BFA IOC PF private functions */ + +static void +bfa_iocpf_enable(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE); +} + +static void +bfa_iocpf_disable(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); +} + +static void +bfa_iocpf_fail(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); +} + +static void +bfa_iocpf_initfail(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); +} + +static void +bfa_iocpf_getattrfail(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); +} + +static void +bfa_iocpf_stop(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); +} + +void +bfa_nw_iocpf_timeout(void *ioc_arg) +{ + struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; + enum bfa_iocpf_state iocpf_st; + + iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); + + if (iocpf_st == BFA_IOCPF_HWINIT) + bfa_ioc_poll_fwinit(ioc); + else + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); +} + +void +bfa_nw_iocpf_sem_timeout(void *ioc_arg) +{ + struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; + + bfa_ioc_hw_sem_get(ioc); +} + +static void +bfa_ioc_poll_fwinit(struct bfa_ioc *ioc) +{ + u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); + + if (fwstate == BFI_IOC_DISABLED) { + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); + return; + } + + if (ioc->iocpf.poll_time >= BFA_IOC_TOV) { + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); + } else { + ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; + mod_timer(&ioc->iocpf_timer, jiffies + + msecs_to_jiffies(BFA_IOC_POLL_TOV)); + } +} + +/* + * Flash module specific + */ + +/* + * FLASH DMA buffer should be big enough to hold both MFG block and + * asic block(64k) at the same time and also should be 2k aligned to + * avoid write segement to cross sector boundary. + */ +#define BFA_FLASH_SEG_SZ 2048 +#define BFA_FLASH_DMA_BUF_SZ \ + roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ) + +static void +bfa_flash_cb(struct bfa_flash *flash) +{ + flash->op_busy = 0; + if (flash->cbfn) + flash->cbfn(flash->cbarg, flash->status); +} + +static void +bfa_flash_notify(void *cbarg, enum bfa_ioc_event event) +{ + struct bfa_flash *flash = cbarg; + + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + if (flash->op_busy) { + flash->status = BFA_STATUS_IOC_FAILURE; + flash->cbfn(flash->cbarg, flash->status); + flash->op_busy = 0; + } + break; + default: + break; + } +} + +/* + * Send flash write request. + */ +static void +bfa_flash_write_send(struct bfa_flash *flash) +{ + struct bfi_flash_write_req *msg = + (struct bfi_flash_write_req *) flash->mb.msg; + u32 len; + + msg->type = be32_to_cpu(flash->type); + msg->instance = flash->instance; + msg->offset = be32_to_cpu(flash->addr_off + flash->offset); + len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? + flash->residue : BFA_FLASH_DMA_BUF_SZ; + msg->length = be32_to_cpu(len); + + /* indicate if it's the last msg of the whole write operation */ + msg->last = (len == flash->residue) ? 1 : 0; + + bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ, + bfa_ioc_portid(flash->ioc)); + bfa_alen_set(&msg->alen, len, flash->dbuf_pa); + memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len); + bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); + + flash->residue -= len; + flash->offset += len; +} + +/** + * bfa_flash_read_send - Send flash read request. + * + * @cbarg: callback argument + */ +static void +bfa_flash_read_send(void *cbarg) +{ + struct bfa_flash *flash = cbarg; + struct bfi_flash_read_req *msg = + (struct bfi_flash_read_req *) flash->mb.msg; + u32 len; + + msg->type = be32_to_cpu(flash->type); + msg->instance = flash->instance; + msg->offset = be32_to_cpu(flash->addr_off + flash->offset); + len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? + flash->residue : BFA_FLASH_DMA_BUF_SZ; + msg->length = be32_to_cpu(len); + bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ, + bfa_ioc_portid(flash->ioc)); + bfa_alen_set(&msg->alen, len, flash->dbuf_pa); + bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); +} + +/** + * bfa_flash_intr - Process flash response messages upon receiving interrupts. + * + * @flasharg: flash structure + * @msg: message structure + */ +static void +bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg) +{ + struct bfa_flash *flash = flasharg; + u32 status; + + union { + struct bfi_flash_query_rsp *query; + struct bfi_flash_write_rsp *write; + struct bfi_flash_read_rsp *read; + struct bfi_mbmsg *msg; + } m; + + m.msg = msg; + + /* receiving response after ioc failure */ + if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) + return; + + switch (msg->mh.msg_id) { + case BFI_FLASH_I2H_QUERY_RSP: + status = be32_to_cpu(m.query->status); + if (status == BFA_STATUS_OK) { + u32 i; + struct bfa_flash_attr *attr, *f; + + attr = (struct bfa_flash_attr *) flash->ubuf; + f = (struct bfa_flash_attr *) flash->dbuf_kva; + attr->status = be32_to_cpu(f->status); + attr->npart = be32_to_cpu(f->npart); + for (i = 0; i < attr->npart; i++) { + attr->part[i].part_type = + be32_to_cpu(f->part[i].part_type); + attr->part[i].part_instance = + be32_to_cpu(f->part[i].part_instance); + attr->part[i].part_off = + be32_to_cpu(f->part[i].part_off); + attr->part[i].part_size = + be32_to_cpu(f->part[i].part_size); + attr->part[i].part_len = + be32_to_cpu(f->part[i].part_len); + attr->part[i].part_status = + be32_to_cpu(f->part[i].part_status); + } + } + flash->status = status; + bfa_flash_cb(flash); + break; + case BFI_FLASH_I2H_WRITE_RSP: + status = be32_to_cpu(m.write->status); + if (status != BFA_STATUS_OK || flash->residue == 0) { + flash->status = status; + bfa_flash_cb(flash); + } else + bfa_flash_write_send(flash); + break; + case BFI_FLASH_I2H_READ_RSP: + status = be32_to_cpu(m.read->status); + if (status != BFA_STATUS_OK) { + flash->status = status; + bfa_flash_cb(flash); + } else { + u32 len = be32_to_cpu(m.read->length); + memcpy(flash->ubuf + flash->offset, + flash->dbuf_kva, len); + flash->residue -= len; + flash->offset += len; + if (flash->residue == 0) { + flash->status = status; + bfa_flash_cb(flash); + } else + bfa_flash_read_send(flash); + } + break; + case BFI_FLASH_I2H_BOOT_VER_RSP: + case BFI_FLASH_I2H_EVENT: + break; + default: + WARN_ON(1); + } +} + +/* + * Flash memory info API. + */ +u32 +bfa_nw_flash_meminfo(void) +{ + return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +/** + * bfa_nw_flash_attach - Flash attach API. + * + * @flash: flash structure + * @ioc: ioc structure + * @dev: device structure + */ +void +bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev) +{ + flash->ioc = ioc; + flash->cbfn = NULL; + flash->cbarg = NULL; + flash->op_busy = 0; + + bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash); + bfa_q_qe_init(&flash->ioc_notify); + bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash); + list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q); +} + +/** + * bfa_nw_flash_memclaim - Claim memory for flash + * + * @flash: flash structure + * @dm_kva: pointer to virtual memory address + * @dm_pa: physical memory address + */ +void +bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa) +{ + flash->dbuf_kva = dm_kva; + flash->dbuf_pa = dm_pa; + memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ); + dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); + dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +/** + * bfa_nw_flash_get_attr - Get flash attribute. + * + * @flash: flash structure + * @attr: flash attribute structure + * @cbfn: callback function + * @cbarg: callback argument + * + * Return status. + */ +enum bfa_status +bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr, + bfa_cb_flash cbfn, void *cbarg) +{ + struct bfi_flash_query_req *msg = + (struct bfi_flash_query_req *) flash->mb.msg; + + if (!bfa_nw_ioc_is_operational(flash->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (flash->op_busy) + return BFA_STATUS_DEVBUSY; + + flash->op_busy = 1; + flash->cbfn = cbfn; + flash->cbarg = cbarg; + flash->ubuf = (u8 *) attr; + + bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ, + bfa_ioc_portid(flash->ioc)); + bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa); + bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); + + return BFA_STATUS_OK; +} + +/** + * bfa_nw_flash_update_part - Update flash partition. + * + * @flash: flash structure + * @type: flash partition type + * @instance: flash partition instance + * @buf: update data buffer + * @len: data buffer length + * @offset: offset relative to the partition starting address + * @cbfn: callback function + * @cbarg: callback argument + * + * Return status. + */ +enum bfa_status +bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance, + void *buf, u32 len, u32 offset, + bfa_cb_flash cbfn, void *cbarg) +{ + if (!bfa_nw_ioc_is_operational(flash->ioc)) + return BFA_STATUS_IOC_NON_OP; + + /* + * 'len' must be in word (4-byte) boundary + */ + if (!len || (len & 0x03)) + return BFA_STATUS_FLASH_BAD_LEN; + + if (type == BFA_FLASH_PART_MFG) + return BFA_STATUS_EINVAL; + + if (flash->op_busy) + return BFA_STATUS_DEVBUSY; + + flash->op_busy = 1; + flash->cbfn = cbfn; + flash->cbarg = cbarg; + flash->type = type; + flash->instance = instance; + flash->residue = len; + flash->offset = 0; + flash->addr_off = offset; + flash->ubuf = buf; + + bfa_flash_write_send(flash); + + return BFA_STATUS_OK; +} + +/** + * bfa_nw_flash_read_part - Read flash partition. + * + * @flash: flash structure + * @type: flash partition type + * @instance: flash partition instance + * @buf: read data buffer + * @len: data buffer length + * @offset: offset relative to the partition starting address + * @cbfn: callback function + * @cbarg: callback argument + * + * Return status. + */ +enum bfa_status +bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance, + void *buf, u32 len, u32 offset, + bfa_cb_flash cbfn, void *cbarg) +{ + if (!bfa_nw_ioc_is_operational(flash->ioc)) + return BFA_STATUS_IOC_NON_OP; + + /* + * 'len' must be in word (4-byte) boundary + */ + if (!len || (len & 0x03)) + return BFA_STATUS_FLASH_BAD_LEN; + + if (flash->op_busy) + return BFA_STATUS_DEVBUSY; + + flash->op_busy = 1; + flash->cbfn = cbfn; + flash->cbarg = cbarg; + flash->type = type; + flash->instance = instance; + flash->residue = len; + flash->offset = 0; + flash->addr_off = offset; + flash->ubuf = buf; + + bfa_flash_read_send(flash); + + return BFA_STATUS_OK; +} diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h new file mode 100644 index 000000000..effb7156e --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h @@ -0,0 +1,369 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +#ifndef __BFA_IOC_H__ +#define __BFA_IOC_H__ + +#include "bfa_cs.h" +#include "bfi.h" +#include "cna.h" + +#define BFA_IOC_TOV 3000 /* msecs */ +#define BFA_IOC_HWSEM_TOV 500 /* msecs */ +#define BFA_IOC_HB_TOV 500 /* msecs */ +#define BFA_IOC_POLL_TOV 200 /* msecs */ +#define BNA_DBG_FWTRC_LEN (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \ + BFI_IOC_TRC_HDR_SZ) + +/* PCI device information required by IOC */ +struct bfa_pcidev { + int pci_slot; + u8 pci_func; + u16 device_id; + u16 ssid; + void __iomem *pci_bar_kva; +}; + +/* Structure used to remember the DMA-able memory block's KVA and Physical + * Address + */ +struct bfa_dma { + void *kva; /* ! Kernel virtual address */ + u64 pa; /* ! Physical address */ +}; + +#define BFA_DMA_ALIGN_SZ 256 + +/* smem size for Crossbow and Catapult */ +#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */ +#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */ + +/* BFA dma address assignment macro. (big endian format) */ +#define bfa_dma_be_addr_set(dma_addr, pa) \ + __bfa_dma_be_addr_set(&dma_addr, (u64)pa) +static inline void +__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa) +{ + dma_addr->a32.addr_lo = (u32) htonl(pa); + dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa)); +} + +#define bfa_alen_set(__alen, __len, __pa) \ + __bfa_alen_set(__alen, __len, (u64)__pa) + +static inline void +__bfa_alen_set(struct bfi_alen *alen, u32 len, u64 pa) +{ + alen->al_len = cpu_to_be32(len); + bfa_dma_be_addr_set(alen->al_addr, pa); +} + +struct bfa_ioc_regs { + void __iomem *hfn_mbox_cmd; + void __iomem *hfn_mbox; + void __iomem *lpu_mbox_cmd; + void __iomem *lpu_mbox; + void __iomem *lpu_read_stat; + void __iomem *pss_ctl_reg; + void __iomem *pss_err_status_reg; + void __iomem *app_pll_fast_ctl_reg; + void __iomem *app_pll_slow_ctl_reg; + void __iomem *ioc_sem_reg; + void __iomem *ioc_usage_sem_reg; + void __iomem *ioc_init_sem_reg; + void __iomem *ioc_usage_reg; + void __iomem *host_page_num_fn; + void __iomem *heartbeat; + void __iomem *ioc_fwstate; + void __iomem *alt_ioc_fwstate; + void __iomem *ll_halt; + void __iomem *alt_ll_halt; + void __iomem *err_set; + void __iomem *ioc_fail_sync; + void __iomem *shirq_isr_next; + void __iomem *shirq_msk_next; + void __iomem *smem_page_start; + u32 smem_pg0; +}; + +/* IOC Mailbox structures */ +typedef void (*bfa_mbox_cmd_cbfn_t)(void *cbarg); +struct bfa_mbox_cmd { + struct list_head qe; + bfa_mbox_cmd_cbfn_t cbfn; + void *cbarg; + u32 msg[BFI_IOC_MSGSZ]; +}; + +/* IOC mailbox module */ +typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m); +struct bfa_ioc_mbox_mod { + struct list_head cmd_q; /*!< pending mbox queue */ + int nmclass; /*!< number of handlers */ + struct { + bfa_ioc_mbox_mcfunc_t cbfn; /*!< message handlers */ + void *cbarg; + } mbhdlr[BFI_MC_MAX]; +}; + +/* IOC callback function interfaces */ +typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status); +typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa); +typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa); +typedef void (*bfa_ioc_reset_cbfn_t)(void *bfa); +struct bfa_ioc_cbfn { + bfa_ioc_enable_cbfn_t enable_cbfn; + bfa_ioc_disable_cbfn_t disable_cbfn; + bfa_ioc_hbfail_cbfn_t hbfail_cbfn; + bfa_ioc_reset_cbfn_t reset_cbfn; +}; + +/* IOC event notification mechanism. */ +enum bfa_ioc_event { + BFA_IOC_E_ENABLED = 1, + BFA_IOC_E_DISABLED = 2, + BFA_IOC_E_FAILED = 3, +}; + +typedef void (*bfa_ioc_notify_cbfn_t)(void *, enum bfa_ioc_event); + +struct bfa_ioc_notify { + struct list_head qe; + bfa_ioc_notify_cbfn_t cbfn; + void *cbarg; +}; + +/* Initialize a IOC event notification structure */ +#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \ + (__notify)->cbfn = (__cbfn); \ + (__notify)->cbarg = (__cbarg); \ +} while (0) + +struct bfa_iocpf { + bfa_fsm_t fsm; + struct bfa_ioc *ioc; + bool fw_mismatch_notified; + bool auto_recover; + u32 poll_time; +}; + +struct bfa_ioc { + bfa_fsm_t fsm; + struct bfa *bfa; + struct bfa_pcidev pcidev; + struct timer_list ioc_timer; + struct timer_list iocpf_timer; + struct timer_list sem_timer; + struct timer_list hb_timer; + u32 hb_count; + struct list_head notify_q; + void *dbg_fwsave; + int dbg_fwsave_len; + bool dbg_fwsave_once; + enum bfi_pcifn_class clscode; + struct bfa_ioc_regs ioc_regs; + struct bfa_ioc_drv_stats stats; + bool fcmode; + bool pllinit; + bool stats_busy; /*!< outstanding stats */ + u8 port_id; + + struct bfa_dma attr_dma; + struct bfi_ioc_attr *attr; + struct bfa_ioc_cbfn *cbfn; + struct bfa_ioc_mbox_mod mbox_mod; + const struct bfa_ioc_hwif *ioc_hwif; + struct bfa_iocpf iocpf; + enum bfi_asic_gen asic_gen; + enum bfi_asic_mode asic_mode; + enum bfi_port_mode port0_mode; + enum bfi_port_mode port1_mode; + enum bfa_mode port_mode; + u8 ad_cap_bm; /*!< adapter cap bit mask */ + u8 port_mode_cfg; /*!< config port mode */ +}; + +struct bfa_ioc_hwif { + enum bfa_status (*ioc_pll_init) (void __iomem *rb, + enum bfi_asic_mode m); + bool (*ioc_firmware_lock) (struct bfa_ioc *ioc); + void (*ioc_firmware_unlock) (struct bfa_ioc *ioc); + void (*ioc_reg_init) (struct bfa_ioc *ioc); + void (*ioc_map_port) (struct bfa_ioc *ioc); + void (*ioc_isr_mode_set) (struct bfa_ioc *ioc, + bool msix); + void (*ioc_notify_fail) (struct bfa_ioc *ioc); + void (*ioc_ownership_reset) (struct bfa_ioc *ioc); + bool (*ioc_sync_start) (struct bfa_ioc *ioc); + void (*ioc_sync_join) (struct bfa_ioc *ioc); + void (*ioc_sync_leave) (struct bfa_ioc *ioc); + void (*ioc_sync_ack) (struct bfa_ioc *ioc); + bool (*ioc_sync_complete) (struct bfa_ioc *ioc); + bool (*ioc_lpu_read_stat) (struct bfa_ioc *ioc); + void (*ioc_set_fwstate) (struct bfa_ioc *ioc, + enum bfi_ioc_state fwstate); + enum bfi_ioc_state (*ioc_get_fwstate) (struct bfa_ioc *ioc); + void (*ioc_set_alt_fwstate) (struct bfa_ioc *ioc, + enum bfi_ioc_state fwstate); + enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc *ioc); + +}; + +#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) +#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id) +#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva) +#define bfa_ioc_portid(__ioc) ((__ioc)->port_id) +#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen) +#define bfa_ioc_is_default(__ioc) \ + (bfa_ioc_pcifn(__ioc) == bfa_ioc_portid(__ioc)) +#define bfa_ioc_fetch_stats(__ioc, __stats) \ + (((__stats)->drv_stats) = (__ioc)->stats) +#define bfa_ioc_clr_stats(__ioc) \ + memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats)) +#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize) +#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit) +#define bfa_ioc_speed_sup(__ioc) \ + BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop) +#define bfa_ioc_get_nports(__ioc) \ + BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop) + +#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++) +#define bfa_ioc_stats_hb_count(_ioc, _hb_count) \ + ((_ioc)->stats.hb_count = (_hb_count)) +#define BFA_IOC_FWIMG_MINSZ (16 * 1024) +#define BFA_IOC_FW_SMEM_SIZE(__ioc) \ + ((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB) \ + ? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE) +#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) +#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) +#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) + +/* IOC mailbox interface */ +bool bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, + struct bfa_mbox_cmd *cmd, + bfa_mbox_cmd_cbfn_t cbfn, void *cbarg); +void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc); +void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, + bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg); + +/* IOC interfaces */ + +#define bfa_ioc_pll_init_asic(__ioc) \ + ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \ + (__ioc)->asic_mode)) + +#define bfa_ioc_isr_mode_set(__ioc, __msix) do { \ + if ((__ioc)->ioc_hwif->ioc_isr_mode_set) \ + ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)); \ +} while (0) +#define bfa_ioc_ownership_reset(__ioc) \ + ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc)) + +#define bfa_ioc_lpu_read_stat(__ioc) do { \ + if ((__ioc)->ioc_hwif->ioc_lpu_read_stat) \ + ((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc)); \ +} while (0) + +void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc); +void bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc); +void bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc); + +void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, + struct bfa_ioc_cbfn *cbfn); +void bfa_nw_ioc_auto_recover(bool auto_recover); +void bfa_nw_ioc_detach(struct bfa_ioc *ioc); +void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, + enum bfi_pcifn_class clscode); +u32 bfa_nw_ioc_meminfo(void); +void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa); +void bfa_nw_ioc_enable(struct bfa_ioc *ioc); +void bfa_nw_ioc_disable(struct bfa_ioc *ioc); + +void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc); +bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc); +bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc); +void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr); +enum bfa_status bfa_nw_ioc_fwsig_invalidate(struct bfa_ioc *ioc); +void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc, + struct bfa_ioc_notify *notify); +bool bfa_nw_ioc_sem_get(void __iomem *sem_reg); +void bfa_nw_ioc_sem_release(void __iomem *sem_reg); +void bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc); +void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, + struct bfi_ioc_image_hdr *fwhdr); +bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, + struct bfi_ioc_image_hdr *fwhdr); +mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc); +void bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave); +int bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen); +int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen); + +/* + * Timeout APIs + */ +void bfa_nw_ioc_timeout(void *ioc); +void bfa_nw_ioc_hb_check(void *ioc); +void bfa_nw_iocpf_timeout(void *ioc); +void bfa_nw_iocpf_sem_timeout(void *ioc); + +/* + * F/W Image Size & Chunk + */ +u32 *bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off); +u32 bfa_cb_image_get_size(enum bfi_asic_gen asic_gen); + +/* + * Flash module specific + */ +typedef void (*bfa_cb_flash) (void *cbarg, enum bfa_status status); + +struct bfa_flash { + struct bfa_ioc *ioc; /* back pointer to ioc */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 op_busy; /* operation busy flag */ + u32 residue; /* residual length */ + u32 offset; /* offset */ + enum bfa_status status; /* status */ + u8 *dbuf_kva; /* dma buf virtual address */ + u64 dbuf_pa; /* dma buf physical address */ + bfa_cb_flash cbfn; /* user callback function */ + void *cbarg; /* user callback arg */ + u8 *ubuf; /* user supplied buffer */ + u32 addr_off; /* partition address offset */ + struct bfa_mbox_cmd mb; /* mailbox */ + struct bfa_ioc_notify ioc_notify; /* ioc event notify */ +}; + +enum bfa_status bfa_nw_flash_get_attr(struct bfa_flash *flash, + struct bfa_flash_attr *attr, + bfa_cb_flash cbfn, void *cbarg); +enum bfa_status bfa_nw_flash_update_part(struct bfa_flash *flash, + u32 type, u8 instance, void *buf, u32 len, u32 offset, + bfa_cb_flash cbfn, void *cbarg); +enum bfa_status bfa_nw_flash_read_part(struct bfa_flash *flash, + u32 type, u8 instance, void *buf, u32 len, u32 offset, + bfa_cb_flash cbfn, void *cbarg); +u32 bfa_nw_flash_meminfo(void); +void bfa_nw_flash_attach(struct bfa_flash *flash, + struct bfa_ioc *ioc, void *dev); +void bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa); + +#endif /* __BFA_IOC_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c new file mode 100644 index 000000000..2e72445db --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c @@ -0,0 +1,945 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +#include "bfa_ioc.h" +#include "cna.h" +#include "bfi.h" +#include "bfi_reg.h" +#include "bfa_defs.h" + +#define bfa_ioc_ct_sync_pos(__ioc) \ + ((u32) (1 << bfa_ioc_pcifn(__ioc))) +#define BFA_IOC_SYNC_REQD_SH 16 +#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff) +#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000) +#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH) +#define bfa_ioc_ct_sync_reqd_pos(__ioc) \ + (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH) + +/* + * forward declarations + */ +static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc); +static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc); +static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc); +static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc); +static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc); +static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc); +static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); +static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc); +static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); +static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc); +static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc); +static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); +static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); +static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc); +static void bfa_ioc_ct_set_cur_ioc_fwstate( + struct bfa_ioc *ioc, enum bfi_ioc_state fwstate); +static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc); +static void bfa_ioc_ct_set_alt_ioc_fwstate( + struct bfa_ioc *ioc, enum bfi_ioc_state fwstate); +static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc); +static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, + enum bfi_asic_mode asic_mode); +static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb, + enum bfi_asic_mode asic_mode); +static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc); + +static const struct bfa_ioc_hwif nw_hwif_ct = { + .ioc_pll_init = bfa_ioc_ct_pll_init, + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock, + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock, + .ioc_reg_init = bfa_ioc_ct_reg_init, + .ioc_map_port = bfa_ioc_ct_map_port, + .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set, + .ioc_notify_fail = bfa_ioc_ct_notify_fail, + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset, + .ioc_sync_start = bfa_ioc_ct_sync_start, + .ioc_sync_join = bfa_ioc_ct_sync_join, + .ioc_sync_leave = bfa_ioc_ct_sync_leave, + .ioc_sync_ack = bfa_ioc_ct_sync_ack, + .ioc_sync_complete = bfa_ioc_ct_sync_complete, + .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate, + .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate, + .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate, + .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate, +}; + +static const struct bfa_ioc_hwif nw_hwif_ct2 = { + .ioc_pll_init = bfa_ioc_ct2_pll_init, + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock, + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock, + .ioc_reg_init = bfa_ioc_ct2_reg_init, + .ioc_map_port = bfa_ioc_ct2_map_port, + .ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat, + .ioc_isr_mode_set = NULL, + .ioc_notify_fail = bfa_ioc_ct_notify_fail, + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset, + .ioc_sync_start = bfa_ioc_ct_sync_start, + .ioc_sync_join = bfa_ioc_ct_sync_join, + .ioc_sync_leave = bfa_ioc_ct_sync_leave, + .ioc_sync_ack = bfa_ioc_ct_sync_ack, + .ioc_sync_complete = bfa_ioc_ct_sync_complete, + .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate, + .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate, + .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate, + .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate, +}; + +/* Called from bfa_ioc_attach() to map asic specific calls. */ +void +bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc) +{ + ioc->ioc_hwif = &nw_hwif_ct; +} + +void +bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc) +{ + ioc->ioc_hwif = &nw_hwif_ct2; +} + +/* Return true if firmware of current driver matches the running firmware. */ +static bool +bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) +{ + enum bfi_ioc_state ioc_fwstate; + u32 usecnt; + struct bfi_ioc_image_hdr fwhdr; + + /** + * If bios boot (flash based) -- do not increment usage count + */ + if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < + BFA_IOC_FWIMG_MINSZ) + return true; + + bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); + usecnt = readl(ioc->ioc_regs.ioc_usage_reg); + + /** + * If usage count is 0, always return TRUE. + */ + if (usecnt == 0) { + writel(1, ioc->ioc_regs.ioc_usage_reg); + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); + writel(0, ioc->ioc_regs.ioc_fail_sync); + return true; + } + + ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); + + /** + * Use count cannot be non-zero and chip in uninitialized state. + */ + BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT)); + + /** + * Check if another driver with a different firmware is active + */ + bfa_nw_ioc_fwver_get(ioc, &fwhdr); + if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) { + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); + return false; + } + + /** + * Same firmware version. Increment the reference count. + */ + usecnt++; + writel(usecnt, ioc->ioc_regs.ioc_usage_reg); + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); + return true; +} + +static void +bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc) +{ + u32 usecnt; + + /** + * If bios boot (flash based) -- do not decrement usage count + */ + if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < + BFA_IOC_FWIMG_MINSZ) + return; + + /** + * decrement usage count + */ + bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); + usecnt = readl(ioc->ioc_regs.ioc_usage_reg); + BUG_ON(!(usecnt > 0)); + + usecnt--; + writel(usecnt, ioc->ioc_regs.ioc_usage_reg); + + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); +} + +/* Notify other functions on HB failure. */ +static void +bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) +{ + writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); + writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); + /* Wait for halt to take effect */ + readl(ioc->ioc_regs.ll_halt); + readl(ioc->ioc_regs.alt_ll_halt); +} + +/* Host to LPU mailbox message addresses */ +static const struct { + u32 hfn_mbox; + u32 lpu_mbox; + u32 hfn_pgn; +} ct_fnreg[] = { + { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, + { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, + { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, + { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } +}; + +/* Host <-> LPU mailbox command/status registers - port 0 */ +static const struct { + u32 hfn; + u32 lpu; +} ct_p0reg[] = { + { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, + { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT }, + { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT }, + { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT } +}; + +/* Host <-> LPU mailbox command/status registers - port 1 */ +static const struct { + u32 hfn; + u32 lpu; +} ct_p1reg[] = { + { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT }, + { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }, + { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT }, + { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT } +}; + +static const struct { + u32 hfn_mbox; + u32 lpu_mbox; + u32 hfn_pgn; + u32 hfn; + u32 lpu; + u32 lpu_read; +} ct2_reg[] = { + { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, + CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT, + CT2_HOSTFN_LPU0_READ_STAT}, + { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, + CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT, + CT2_HOSTFN_LPU1_READ_STAT}, +}; + +static void +bfa_ioc_ct_reg_init(struct bfa_ioc *ioc) +{ + void __iomem *rb; + int pcifn = bfa_ioc_pcifn(ioc); + + rb = bfa_ioc_bar0(ioc); + + ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; + ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; + ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; + + if (ioc->port_id == 0) { + ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; + ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; + ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; + ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; + ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; + ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; + } else { + ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG; + ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG; + ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; + ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn; + ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu; + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; + ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; + } + + /* + * PSS control registers + */ + ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; + ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; + ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG; + ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG; + + /* + * IOC semaphore registers and serialization + */ + ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG; + ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG; + ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG; + ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT; + ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC; + + /** + * sram memory access + */ + ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; + ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; + + /* + * err set reg : for notification of hb failure in fcmode + */ + ioc->ioc_regs.err_set = (rb + ERR_SET_REG); +} + +static void +bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc) +{ + void __iomem *rb; + int port = bfa_ioc_portid(ioc); + + rb = bfa_ioc_bar0(ioc); + + ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox; + ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox; + ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn; + ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn; + ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu; + ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read; + + if (port == 0) { + ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG; + ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; + ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; + ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; + } else { + ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG; + ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; + ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; + ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; + } + + /* + * PSS control registers + */ + ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; + ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; + ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG; + ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG; + + /* + * IOC semaphore registers and serialization + */ + ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG; + ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG; + ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG; + ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT; + ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC; + + /** + * sram memory access + */ + ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; + ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; + + /* + * err set reg : for notification of hb failure in fcmode + */ + ioc->ioc_regs.err_set = rb + ERR_SET_REG; +} + +/* Initialize IOC to port mapping. */ + +#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) +static void +bfa_ioc_ct_map_port(struct bfa_ioc *ioc) +{ + void __iomem *rb = ioc->pcidev.pci_bar_kva; + u32 r32; + + /** + * For catapult, base port id on personality register and IOC type + */ + r32 = readl(rb + FNC_PERS_REG); + r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); + ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; + +} + +static void +bfa_ioc_ct2_map_port(struct bfa_ioc *ioc) +{ + void __iomem *rb = ioc->pcidev.pci_bar_kva; + u32 r32; + + r32 = readl(rb + CT2_HOSTFN_PERSONALITY0); + ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH); +} + +/* Set interrupt mode for a function: INTX or MSIX */ +static void +bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix) +{ + void __iomem *rb = ioc->pcidev.pci_bar_kva; + u32 r32, mode; + + r32 = readl(rb + FNC_PERS_REG); + + mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & + __F0_INTX_STATUS; + + /** + * If already in desired mode, do not change anything + */ + if ((!msix && mode) || (msix && !mode)) + return; + + if (msix) + mode = __F0_INTX_STATUS_MSIX; + else + mode = __F0_INTX_STATUS_INTA; + + r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); + r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); + + writel(r32, rb + FNC_PERS_REG); +} + +static bool +bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc) +{ + u32 r32; + + r32 = readl(ioc->ioc_regs.lpu_read_stat); + if (r32) { + writel(1, ioc->ioc_regs.lpu_read_stat); + return true; + } + + return false; +} + +/* MSI-X resource allocation for 1860 with no asic block */ +#define HOSTFN_MSIX_DEFAULT 64 +#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138 +#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c +#define __MSIX_VT_NUMVT__MK 0x003ff800 +#define __MSIX_VT_NUMVT__SH 11 +#define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH) +#define __MSIX_VT_OFST_ 0x000007ff +void +bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc) +{ + void __iomem *rb = ioc->pcidev.pci_bar_kva; + u32 r32; + + r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT); + if (r32 & __MSIX_VT_NUMVT__MK) { + writel(r32 & __MSIX_VT_OFST_, + rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); + return; + } + + writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) | + HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), + rb + HOSTFN_MSIX_VT_OFST_NUMVT); + writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), + rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); +} + +/* Cleanup hw semaphore and usecnt registers */ +static void +bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) +{ + bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); + writel(0, ioc->ioc_regs.ioc_usage_reg); + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); + + /* + * Read the hw sem reg to make sure that it is locked + * before we clear it. If it is not locked, writing 1 + * will lock it instead of clearing it. + */ + readl(ioc->ioc_regs.ioc_sem_reg); + bfa_nw_ioc_hw_sem_release(ioc); +} + +/* Synchronized IOC failure processing routines */ +static bool +bfa_ioc_ct_sync_start(struct bfa_ioc *ioc) +{ + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); + u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); + + /* + * Driver load time. If the sync required bit for this PCI fn + * is set, it is due to an unclean exit by the driver for this + * PCI fn in the previous incarnation. Whoever comes here first + * should clean it up, no matter which PCI fn. + */ + + if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) { + writel(0, ioc->ioc_regs.ioc_fail_sync); + writel(1, ioc->ioc_regs.ioc_usage_reg); + writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); + writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); + return true; + } + + return bfa_ioc_ct_sync_complete(ioc); +} +/* Synchronized IOC failure processing routines */ +static void +bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) +{ + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); + u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc); + + writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync); +} + +static void +bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc) +{ + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); + u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) | + bfa_ioc_ct_sync_pos(ioc); + + writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync); +} + +static void +bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc) +{ + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); + + writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync); +} + +static bool +bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc) +{ + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); + u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); + u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32); + u32 tmp_ackd; + + if (sync_ackd == 0) + return true; + + /** + * The check below is to see whether any other PCI fn + * has reinitialized the ASIC (reset sync_ackd bits) + * and failed again while this IOC was waiting for hw + * semaphore (in bfa_iocpf_sm_semwait()). + */ + tmp_ackd = sync_ackd; + if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) && + !(sync_ackd & bfa_ioc_ct_sync_pos(ioc))) + sync_ackd |= bfa_ioc_ct_sync_pos(ioc); + + if (sync_reqd == sync_ackd) { + writel(bfa_ioc_ct_clear_sync_ackd(r32), + ioc->ioc_regs.ioc_fail_sync); + writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); + writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate); + return true; + } + + /** + * If another PCI fn reinitialized and failed again while + * this IOC was waiting for hw sem, the sync_ackd bit for + * this IOC need to be set again to allow reinitialization. + */ + if (tmp_ackd != sync_ackd) + writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync); + + return false; +} + +static void +bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc, + enum bfi_ioc_state fwstate) +{ + writel(fwstate, ioc->ioc_regs.ioc_fwstate); +} + +static enum bfi_ioc_state +bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc) +{ + return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate); +} + +static void +bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc, + enum bfi_ioc_state fwstate) +{ + writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate); +} + +static enum bfi_ioc_state +bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc) +{ + return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate); +} + +static enum bfa_status +bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) +{ + u32 pll_sclk, pll_fclk, r32; + bool fcmode = (asic_mode == BFI_ASIC_MODE_FC); + + pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST | + __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) | + __APP_PLL_SCLK_JITLMT0_1(3U) | + __APP_PLL_SCLK_CNTLMT0_1(1U); + pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST | + __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) | + __APP_PLL_LCLK_JITLMT0_1(3U) | + __APP_PLL_LCLK_CNTLMT0_1(1U); + + if (fcmode) { + writel(0, (rb + OP_MODE)); + writel(__APP_EMS_CMLCKSEL | + __APP_EMS_REFCKBUFEN2 | + __APP_EMS_CHANNEL_SEL, + (rb + ETH_MAC_SER_REG)); + } else { + writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE)); + writel(__APP_EMS_REFCKBUFEN1, + (rb + ETH_MAC_SER_REG)); + } + writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); + writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); + writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); + writel(pll_sclk | + __APP_PLL_SCLK_LOGIC_SOFT_RESET, + rb + APP_PLL_SCLK_CTL_REG); + writel(pll_fclk | + __APP_PLL_LCLK_LOGIC_SOFT_RESET, + rb + APP_PLL_LCLK_CTL_REG); + writel(pll_sclk | + __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE, + rb + APP_PLL_SCLK_CTL_REG); + writel(pll_fclk | + __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE, + rb + APP_PLL_LCLK_CTL_REG); + readl(rb + HOSTFN0_INT_MSK); + udelay(2000); + writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); + writel(pll_sclk | + __APP_PLL_SCLK_ENABLE, + rb + APP_PLL_SCLK_CTL_REG); + writel(pll_fclk | + __APP_PLL_LCLK_ENABLE, + rb + APP_PLL_LCLK_CTL_REG); + + if (!fcmode) { + writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0)); + writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1)); + } + r32 = readl((rb + PSS_CTL_REG)); + r32 &= ~__PSS_LMEM_RESET; + writel(r32, (rb + PSS_CTL_REG)); + udelay(1000); + if (!fcmode) { + writel(0, (rb + PMM_1T_RESET_REG_P0)); + writel(0, (rb + PMM_1T_RESET_REG_P1)); + } + + writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG)); + udelay(1000); + r32 = readl((rb + MBIST_STAT_REG)); + writel(0, (rb + MBIST_CTL_REG)); + return BFA_STATUS_OK; +} + +static void +bfa_ioc_ct2_sclk_init(void __iomem *rb) +{ + u32 r32; + + /* + * put s_clk PLL and PLL FSM in reset + */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN); + r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS | + __APP_PLL_SCLK_LOGIC_SOFT_RESET); + writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * Ignore mode and program for the max clock (which is FC16) + * Firmware/NFC will do the PLL init appropriately + */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2); + writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * while doing PLL init dont clock gate ethernet subsystem + */ + r32 = readl((rb + CT2_CHIP_MISC_PRG)); + writel((r32 | __ETH_CLK_ENABLE_PORT0), + (rb + CT2_CHIP_MISC_PRG)); + + r32 = readl((rb + CT2_PCIE_MISC_REG)); + writel((r32 | __ETH_CLK_ENABLE_PORT1), + (rb + CT2_PCIE_MISC_REG)); + + /* + * set sclk value + */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL | + __APP_PLL_SCLK_CLK_DIV2); + writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * poll for s_clk lock or delay 1ms + */ + udelay(1000); + + /* + * Dont do clock gating for ethernet subsystem, firmware/NFC will + * do this appropriately + */ +} + +static void +bfa_ioc_ct2_lclk_init(void __iomem *rb) +{ + u32 r32; + + /* + * put l_clk PLL and PLL FSM in reset + */ + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN); + r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS | + __APP_PLL_LCLK_LOGIC_SOFT_RESET); + writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); + + /* + * set LPU speed (set for FC16 which will work for other modes) + */ + r32 = readl((rb + CT2_CHIP_MISC_PRG)); + writel(r32, (rb + CT2_CHIP_MISC_PRG)); + + /* + * set LPU half speed (set for FC16 which will work for other modes) + */ + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); + + /* + * set lclk for mode (set for FC16) + */ + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED); + r32 |= 0x20c1731b; + writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); + + /* + * poll for s_clk lock or delay 1ms + */ + udelay(1000); +} + +static void +bfa_ioc_ct2_mem_init(void __iomem *rb) +{ + u32 r32; + + r32 = readl((rb + PSS_CTL_REG)); + r32 &= ~__PSS_LMEM_RESET; + writel(r32, (rb + PSS_CTL_REG)); + udelay(1000); + + writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG)); + udelay(1000); + writel(0, (rb + CT2_MBIST_CTL_REG)); +} + +static void +bfa_ioc_ct2_mac_reset(void __iomem *rb) +{ + volatile u32 r32; + + bfa_ioc_ct2_sclk_init(rb); + bfa_ioc_ct2_lclk_init(rb); + + /* + * release soft reset on s_clk & l_clk + */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET), + (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * release soft reset on s_clk & l_clk + */ + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET), + (rb + CT2_APP_PLL_LCLK_CTL_REG)); + + /* put port0, port1 MAC & AHB in reset */ + writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), + (rb + CT2_CSI_MAC_CONTROL_REG(0))); + writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET), + (rb + CT2_CSI_MAC_CONTROL_REG(1))); +} + +#define CT2_NFC_MAX_DELAY 1000 +#define CT2_NFC_VER_VALID 0x143 +#define BFA_IOC_PLL_POLL 1000000 + +static bool +bfa_ioc_ct2_nfc_halted(void __iomem *rb) +{ + volatile u32 r32; + + r32 = readl(rb + CT2_NFC_CSR_SET_REG); + if (r32 & __NFC_CONTROLLER_HALTED) + return true; + + return false; +} + +static void +bfa_ioc_ct2_nfc_resume(void __iomem *rb) +{ + volatile u32 r32; + int i; + + writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG); + for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { + r32 = readl(rb + CT2_NFC_CSR_SET_REG); + if (!(r32 & __NFC_CONTROLLER_HALTED)) + return; + udelay(1000); + } + BUG_ON(1); +} + +static enum bfa_status +bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) +{ + volatile u32 wgn, r32; + u32 nfc_ver, i; + + wgn = readl(rb + CT2_WGN_STATUS); + + nfc_ver = readl(rb + CT2_RSC_GPR15_REG); + + if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) && + (nfc_ver >= CT2_NFC_VER_VALID)) { + if (bfa_ioc_ct2_nfc_halted(rb)) + bfa_ioc_ct2_nfc_resume(rb); + writel(__RESET_AND_START_SCLK_LCLK_PLLS, + rb + CT2_CSI_FW_CTL_SET_REG); + + for (i = 0; i < BFA_IOC_PLL_POLL; i++) { + r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); + if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS) + break; + } + BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)); + + for (i = 0; i < BFA_IOC_PLL_POLL; i++) { + r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); + if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)) + break; + } + BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); + udelay(1000); + + r32 = readl(rb + CT2_CSI_FW_CTL_REG); + BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); + } else { + writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG)); + for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { + r32 = readl(rb + CT2_NFC_CSR_SET_REG); + if (r32 & __NFC_CONTROLLER_HALTED) + break; + udelay(1000); + } + + bfa_ioc_ct2_mac_reset(rb); + bfa_ioc_ct2_sclk_init(rb); + bfa_ioc_ct2_lclk_init(rb); + + /* release soft reset on s_clk & l_clk */ + r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); + writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, + rb + CT2_APP_PLL_SCLK_CTL_REG); + r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); + writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, + rb + CT2_APP_PLL_LCLK_CTL_REG); + } + + /* Announce flash device presence, if flash was corrupted. */ + if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { + r32 = readl((rb + PSS_GPIO_OUT_REG)); + writel(r32 & ~1, rb + PSS_GPIO_OUT_REG); + r32 = readl((rb + PSS_GPIO_OE_REG)); + writel(r32 | 1, rb + PSS_GPIO_OE_REG); + } + + /* + * Mask the interrupts and clear any + * pending interrupts left by BIOS/EFI + */ + writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); + writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); + + /* For first time initialization, no need to clear interrupts */ + r32 = readl(rb + HOST_SEM5_REG); + if (r32 & 0x1) { + r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); + if (r32 == 1) { + writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); + readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); + } + r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); + if (r32 == 1) { + writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT)); + readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); + } + } + + bfa_ioc_ct2_mem_init(rb); + + writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG)); + writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG)); + return BFA_STATUS_OK; +} diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c new file mode 100644 index 000000000..c07d5b937 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c @@ -0,0 +1,668 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +/* MSGQ module source file. */ + +#include "bfi.h" +#include "bfa_msgq.h" +#include "bfa_ioc.h" + +#define call_cmdq_ent_cbfn(_cmdq_ent, _status) \ +{ \ + bfa_msgq_cmdcbfn_t cbfn; \ + void *cbarg; \ + cbfn = (_cmdq_ent)->cbfn; \ + cbarg = (_cmdq_ent)->cbarg; \ + (_cmdq_ent)->cbfn = NULL; \ + (_cmdq_ent)->cbarg = NULL; \ + if (cbfn) { \ + cbfn(cbarg, (_status)); \ + } \ +} + +static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq); +static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq); + +enum cmdq_event { + CMDQ_E_START = 1, + CMDQ_E_STOP = 2, + CMDQ_E_FAIL = 3, + CMDQ_E_POST = 4, + CMDQ_E_INIT_RESP = 5, + CMDQ_E_DB_READY = 6, +}; + +bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event); +bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event); +bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event); +bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq, + enum cmdq_event); + +static void +cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq) +{ + struct bfa_msgq_cmd_entry *cmdq_ent; + + cmdq->producer_index = 0; + cmdq->consumer_index = 0; + cmdq->flags = 0; + cmdq->token = 0; + cmdq->offset = 0; + cmdq->bytes_to_copy = 0; + while (!list_empty(&cmdq->pending_q)) { + bfa_q_deq(&cmdq->pending_q, &cmdq_ent); + bfa_q_qe_init(&cmdq_ent->qe); + call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED); + } +} + +static void +cmdq_sm_stopped(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event) +{ + switch (event) { + case CMDQ_E_START: + bfa_fsm_set_state(cmdq, cmdq_sm_init_wait); + break; + + case CMDQ_E_STOP: + case CMDQ_E_FAIL: + /* No-op */ + break; + + case CMDQ_E_POST: + cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE; + break; + + default: + bfa_sm_fault(event); + } +} + +static void +cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq *cmdq) +{ + bfa_wc_down(&cmdq->msgq->init_wc); +} + +static void +cmdq_sm_init_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event) +{ + switch (event) { + case CMDQ_E_STOP: + case CMDQ_E_FAIL: + bfa_fsm_set_state(cmdq, cmdq_sm_stopped); + break; + + case CMDQ_E_POST: + cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE; + break; + + case CMDQ_E_INIT_RESP: + if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) { + cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE; + bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait); + } else + bfa_fsm_set_state(cmdq, cmdq_sm_ready); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +cmdq_sm_ready_entry(struct bfa_msgq_cmdq *cmdq) +{ +} + +static void +cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event) +{ + switch (event) { + case CMDQ_E_STOP: + case CMDQ_E_FAIL: + bfa_fsm_set_state(cmdq, cmdq_sm_stopped); + break; + + case CMDQ_E_POST: + bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq *cmdq) +{ + bfa_msgq_cmdq_dbell(cmdq); +} + +static void +cmdq_sm_dbell_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event) +{ + switch (event) { + case CMDQ_E_STOP: + case CMDQ_E_FAIL: + bfa_fsm_set_state(cmdq, cmdq_sm_stopped); + break; + + case CMDQ_E_POST: + cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE; + break; + + case CMDQ_E_DB_READY: + if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) { + cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE; + bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait); + } else + bfa_fsm_set_state(cmdq, cmdq_sm_ready); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_msgq_cmdq_dbell_ready(void *arg) +{ + struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg; + bfa_fsm_send_event(cmdq, CMDQ_E_DB_READY); +} + +static void +bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq) +{ + struct bfi_msgq_h2i_db *dbell = + (struct bfi_msgq_h2i_db *)(&cmdq->dbell_mb.msg[0]); + + memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db)); + bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_PI, 0); + dbell->mh.mtag.i2htok = 0; + dbell->idx.cmdq_pi = htons(cmdq->producer_index); + + if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb, + bfa_msgq_cmdq_dbell_ready, cmdq)) { + bfa_msgq_cmdq_dbell_ready(cmdq); + } +} + +static void +__cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd) +{ + size_t len = cmd->msg_size; + int num_entries = 0; + size_t to_copy; + u8 *src, *dst; + + src = (u8 *)cmd->msg_hdr; + dst = (u8 *)cmdq->addr.kva; + dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE); + + while (len) { + to_copy = (len < BFI_MSGQ_CMD_ENTRY_SIZE) ? + len : BFI_MSGQ_CMD_ENTRY_SIZE; + memcpy(dst, src, to_copy); + len -= to_copy; + src += BFI_MSGQ_CMD_ENTRY_SIZE; + BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth); + dst = (u8 *)cmdq->addr.kva; + dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE); + num_entries++; + } + +} + +static void +bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb) +{ + struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb; + struct bfa_msgq_cmd_entry *cmd; + int posted = 0; + + cmdq->consumer_index = ntohs(dbell->idx.cmdq_ci); + + /* Walk through pending list to see if the command can be posted */ + while (!list_empty(&cmdq->pending_q)) { + cmd = + (struct bfa_msgq_cmd_entry *)bfa_q_first(&cmdq->pending_q); + if (ntohs(cmd->msg_hdr->num_entries) <= + BFA_MSGQ_FREE_CNT(cmdq)) { + list_del(&cmd->qe); + __cmd_copy(cmdq, cmd); + posted = 1; + call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK); + } else { + break; + } + } + + if (posted) + bfa_fsm_send_event(cmdq, CMDQ_E_POST); +} + +static void +bfa_msgq_cmdq_copy_next(void *arg) +{ + struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg; + + if (cmdq->bytes_to_copy) + bfa_msgq_cmdq_copy_rsp(cmdq); +} + +static void +bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb) +{ + struct bfi_msgq_i2h_cmdq_copy_req *req = + (struct bfi_msgq_i2h_cmdq_copy_req *)mb; + + cmdq->token = 0; + cmdq->offset = ntohs(req->offset); + cmdq->bytes_to_copy = ntohs(req->len); + bfa_msgq_cmdq_copy_rsp(cmdq); +} + +static void +bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq) +{ + struct bfi_msgq_h2i_cmdq_copy_rsp *rsp = + (struct bfi_msgq_h2i_cmdq_copy_rsp *)&cmdq->copy_mb.msg[0]; + int copied; + u8 *addr = (u8 *)cmdq->addr.kva; + + memset(rsp, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp)); + bfi_h2i_set(rsp->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_CMDQ_COPY_RSP, 0); + rsp->mh.mtag.i2htok = htons(cmdq->token); + copied = (cmdq->bytes_to_copy >= BFI_CMD_COPY_SZ) ? BFI_CMD_COPY_SZ : + cmdq->bytes_to_copy; + addr += cmdq->offset; + memcpy(rsp->data, addr, copied); + + cmdq->token++; + cmdq->offset += copied; + cmdq->bytes_to_copy -= copied; + + if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb, + bfa_msgq_cmdq_copy_next, cmdq)) { + bfa_msgq_cmdq_copy_next(cmdq); + } +} + +static void +bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq *msgq) +{ + cmdq->depth = BFA_MSGQ_CMDQ_NUM_ENTRY; + INIT_LIST_HEAD(&cmdq->pending_q); + cmdq->msgq = msgq; + bfa_fsm_set_state(cmdq, cmdq_sm_stopped); +} + +static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq); + +enum rspq_event { + RSPQ_E_START = 1, + RSPQ_E_STOP = 2, + RSPQ_E_FAIL = 3, + RSPQ_E_RESP = 4, + RSPQ_E_INIT_RESP = 5, + RSPQ_E_DB_READY = 6, +}; + +bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event); +bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq, + enum rspq_event); +bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event); +bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq, + enum rspq_event); + +static void +rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq) +{ + rspq->producer_index = 0; + rspq->consumer_index = 0; + rspq->flags = 0; +} + +static void +rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event) +{ + switch (event) { + case RSPQ_E_START: + bfa_fsm_set_state(rspq, rspq_sm_init_wait); + break; + + case RSPQ_E_STOP: + case RSPQ_E_FAIL: + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq) +{ + bfa_wc_down(&rspq->msgq->init_wc); +} + +static void +rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event) +{ + switch (event) { + case RSPQ_E_FAIL: + case RSPQ_E_STOP: + bfa_fsm_set_state(rspq, rspq_sm_stopped); + break; + + case RSPQ_E_INIT_RESP: + bfa_fsm_set_state(rspq, rspq_sm_ready); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq) +{ +} + +static void +rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event) +{ + switch (event) { + case RSPQ_E_STOP: + case RSPQ_E_FAIL: + bfa_fsm_set_state(rspq, rspq_sm_stopped); + break; + + case RSPQ_E_RESP: + bfa_fsm_set_state(rspq, rspq_sm_dbell_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq) +{ + if (!bfa_nw_ioc_is_disabled(rspq->msgq->ioc)) + bfa_msgq_rspq_dbell(rspq); +} + +static void +rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event) +{ + switch (event) { + case RSPQ_E_STOP: + case RSPQ_E_FAIL: + bfa_fsm_set_state(rspq, rspq_sm_stopped); + break; + + case RSPQ_E_RESP: + rspq->flags |= BFA_MSGQ_RSPQ_F_DB_UPDATE; + break; + + case RSPQ_E_DB_READY: + if (rspq->flags & BFA_MSGQ_RSPQ_F_DB_UPDATE) { + rspq->flags &= ~BFA_MSGQ_RSPQ_F_DB_UPDATE; + bfa_fsm_set_state(rspq, rspq_sm_dbell_wait); + } else + bfa_fsm_set_state(rspq, rspq_sm_ready); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_msgq_rspq_dbell_ready(void *arg) +{ + struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg; + bfa_fsm_send_event(rspq, RSPQ_E_DB_READY); +} + +static void +bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq) +{ + struct bfi_msgq_h2i_db *dbell = + (struct bfi_msgq_h2i_db *)(&rspq->dbell_mb.msg[0]); + + memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db)); + bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_CI, 0); + dbell->mh.mtag.i2htok = 0; + dbell->idx.rspq_ci = htons(rspq->consumer_index); + + if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb, + bfa_msgq_rspq_dbell_ready, rspq)) { + bfa_msgq_rspq_dbell_ready(rspq); + } +} + +static void +bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb) +{ + struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb; + struct bfi_msgq_mhdr *msghdr; + int num_entries; + int mc; + u8 *rspq_qe; + + rspq->producer_index = ntohs(dbell->idx.rspq_pi); + + while (rspq->consumer_index != rspq->producer_index) { + rspq_qe = (u8 *)rspq->addr.kva; + rspq_qe += (rspq->consumer_index * BFI_MSGQ_RSP_ENTRY_SIZE); + msghdr = (struct bfi_msgq_mhdr *)rspq_qe; + + mc = msghdr->msg_class; + num_entries = ntohs(msghdr->num_entries); + + if ((mc >= BFI_MC_MAX) || (rspq->rsphdlr[mc].cbfn == NULL)) + break; + + (rspq->rsphdlr[mc].cbfn)(rspq->rsphdlr[mc].cbarg, msghdr); + + BFA_MSGQ_INDX_ADD(rspq->consumer_index, num_entries, + rspq->depth); + } + + bfa_fsm_send_event(rspq, RSPQ_E_RESP); +} + +static void +bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq) +{ + rspq->depth = BFA_MSGQ_RSPQ_NUM_ENTRY; + rspq->msgq = msgq; + bfa_fsm_set_state(rspq, rspq_sm_stopped); +} + +static void +bfa_msgq_init_rsp(struct bfa_msgq *msgq, + struct bfi_mbmsg *mb) +{ + bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_INIT_RESP); + bfa_fsm_send_event(&msgq->rspq, RSPQ_E_INIT_RESP); +} + +static void +bfa_msgq_init(void *arg) +{ + struct bfa_msgq *msgq = (struct bfa_msgq *)arg; + struct bfi_msgq_cfg_req *msgq_cfg = + (struct bfi_msgq_cfg_req *)&msgq->init_mb.msg[0]; + + memset(msgq_cfg, 0, sizeof(struct bfi_msgq_cfg_req)); + bfi_h2i_set(msgq_cfg->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_INIT_REQ, 0); + msgq_cfg->mh.mtag.i2htok = 0; + + bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa); + msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth); + bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa); + msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth); + + bfa_nw_ioc_mbox_queue(msgq->ioc, &msgq->init_mb, NULL, NULL); +} + +static void +bfa_msgq_isr(void *cbarg, struct bfi_mbmsg *msg) +{ + struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg; + + switch (msg->mh.msg_id) { + case BFI_MSGQ_I2H_INIT_RSP: + bfa_msgq_init_rsp(msgq, msg); + break; + + case BFI_MSGQ_I2H_DOORBELL_PI: + bfa_msgq_rspq_pi_update(&msgq->rspq, msg); + break; + + case BFI_MSGQ_I2H_DOORBELL_CI: + bfa_msgq_cmdq_ci_update(&msgq->cmdq, msg); + break; + + case BFI_MSGQ_I2H_CMDQ_COPY_REQ: + bfa_msgq_cmdq_copy_req(&msgq->cmdq, msg); + break; + + default: + BUG_ON(1); + } +} + +static void +bfa_msgq_notify(void *cbarg, enum bfa_ioc_event event) +{ + struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg; + + switch (event) { + case BFA_IOC_E_ENABLED: + bfa_wc_init(&msgq->init_wc, bfa_msgq_init, msgq); + bfa_wc_up(&msgq->init_wc); + bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_START); + bfa_wc_up(&msgq->init_wc); + bfa_fsm_send_event(&msgq->rspq, RSPQ_E_START); + bfa_wc_wait(&msgq->init_wc); + break; + + case BFA_IOC_E_DISABLED: + bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_STOP); + bfa_fsm_send_event(&msgq->rspq, RSPQ_E_STOP); + break; + + case BFA_IOC_E_FAILED: + bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_FAIL); + bfa_fsm_send_event(&msgq->rspq, RSPQ_E_FAIL); + break; + + default: + break; + } +} + +u32 +bfa_msgq_meminfo(void) +{ + return roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ) + + roundup(BFA_MSGQ_RSPQ_SIZE, BFA_DMA_ALIGN_SZ); +} + +void +bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa) +{ + msgq->cmdq.addr.kva = kva; + msgq->cmdq.addr.pa = pa; + + kva += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ); + pa += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ); + + msgq->rspq.addr.kva = kva; + msgq->rspq.addr.pa = pa; +} + +void +bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc) +{ + msgq->ioc = ioc; + + bfa_msgq_cmdq_attach(&msgq->cmdq, msgq); + bfa_msgq_rspq_attach(&msgq->rspq, msgq); + + bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq); + bfa_q_qe_init(&msgq->ioc_notify); + bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq); + bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify); +} + +void +bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc, + bfa_msgq_mcfunc_t cbfn, void *cbarg) +{ + msgq->rspq.rsphdlr[mc].cbfn = cbfn; + msgq->rspq.rsphdlr[mc].cbarg = cbarg; +} + +void +bfa_msgq_cmd_post(struct bfa_msgq *msgq, struct bfa_msgq_cmd_entry *cmd) +{ + if (ntohs(cmd->msg_hdr->num_entries) <= + BFA_MSGQ_FREE_CNT(&msgq->cmdq)) { + __cmd_copy(&msgq->cmdq, cmd); + call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK); + bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_POST); + } else { + list_add_tail(&cmd->qe, &msgq->cmdq.pending_q); + } +} + +void +bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len) +{ + struct bfa_msgq_rspq *rspq = &msgq->rspq; + size_t len = buf_len; + size_t to_copy; + int ci; + u8 *src, *dst; + + ci = rspq->consumer_index; + src = (u8 *)rspq->addr.kva; + src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE); + dst = buf; + + while (len) { + to_copy = (len < BFI_MSGQ_RSP_ENTRY_SIZE) ? + len : BFI_MSGQ_RSP_ENTRY_SIZE; + memcpy(dst, src, to_copy); + len -= to_copy; + dst += BFI_MSGQ_RSP_ENTRY_SIZE; + BFA_MSGQ_INDX_ADD(ci, 1, rspq->depth); + src = (u8 *)rspq->addr.kva; + src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE); + } +} diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.h b/drivers/net/ethernet/brocade/bna/bfa_msgq.h new file mode 100644 index 000000000..66bc8b5ac --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.h @@ -0,0 +1,131 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +#ifndef __BFA_MSGQ_H__ +#define __BFA_MSGQ_H__ + +#include "bfa_defs.h" +#include "bfi.h" +#include "bfa_ioc.h" +#include "bfa_cs.h" + +#define BFA_MSGQ_FREE_CNT(_q) \ + (((_q)->consumer_index - (_q)->producer_index - 1) & ((_q)->depth - 1)) + +#define BFA_MSGQ_INDX_ADD(_q_indx, _qe_num, _q_depth) \ + ((_q_indx) = (((_q_indx) + (_qe_num)) & ((_q_depth) - 1))) + +#define BFA_MSGQ_CMDQ_NUM_ENTRY 128 +#define BFA_MSGQ_CMDQ_SIZE \ + (BFI_MSGQ_CMD_ENTRY_SIZE * BFA_MSGQ_CMDQ_NUM_ENTRY) + +#define BFA_MSGQ_RSPQ_NUM_ENTRY 128 +#define BFA_MSGQ_RSPQ_SIZE \ + (BFI_MSGQ_RSP_ENTRY_SIZE * BFA_MSGQ_RSPQ_NUM_ENTRY) + +#define bfa_msgq_cmd_set(_cmd, _cbfn, _cbarg, _msg_size, _msg_hdr) \ +do { \ + (_cmd)->cbfn = (_cbfn); \ + (_cmd)->cbarg = (_cbarg); \ + (_cmd)->msg_size = (_msg_size); \ + (_cmd)->msg_hdr = (_msg_hdr); \ +} while (0) + +struct bfa_msgq; + +typedef void (*bfa_msgq_cmdcbfn_t)(void *cbarg, enum bfa_status status); + +struct bfa_msgq_cmd_entry { + struct list_head qe; + bfa_msgq_cmdcbfn_t cbfn; + void *cbarg; + size_t msg_size; + struct bfi_msgq_mhdr *msg_hdr; +}; + +enum bfa_msgq_cmdq_flags { + BFA_MSGQ_CMDQ_F_DB_UPDATE = 1, +}; + +struct bfa_msgq_cmdq { + bfa_fsm_t fsm; + enum bfa_msgq_cmdq_flags flags; + + u16 producer_index; + u16 consumer_index; + u16 depth; /* FW Q depth is 16 bits */ + struct bfa_dma addr; + struct bfa_mbox_cmd dbell_mb; + + u16 token; + int offset; + int bytes_to_copy; + struct bfa_mbox_cmd copy_mb; + + struct list_head pending_q; /* pending command queue */ + + struct bfa_msgq *msgq; +}; + +enum bfa_msgq_rspq_flags { + BFA_MSGQ_RSPQ_F_DB_UPDATE = 1, +}; + +typedef void (*bfa_msgq_mcfunc_t)(void *cbarg, struct bfi_msgq_mhdr *mhdr); + +struct bfa_msgq_rspq { + bfa_fsm_t fsm; + enum bfa_msgq_rspq_flags flags; + + u16 producer_index; + u16 consumer_index; + u16 depth; /* FW Q depth is 16 bits */ + struct bfa_dma addr; + struct bfa_mbox_cmd dbell_mb; + + int nmclass; + struct { + bfa_msgq_mcfunc_t cbfn; + void *cbarg; + } rsphdlr[BFI_MC_MAX]; + + struct bfa_msgq *msgq; +}; + +struct bfa_msgq { + struct bfa_msgq_cmdq cmdq; + struct bfa_msgq_rspq rspq; + + struct bfa_wc init_wc; + struct bfa_mbox_cmd init_mb; + + struct bfa_ioc_notify ioc_notify; + struct bfa_ioc *ioc; +}; + +u32 bfa_msgq_meminfo(void); +void bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa); +void bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc); +void bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc, + bfa_msgq_mcfunc_t cbfn, void *cbarg); +void bfa_msgq_cmd_post(struct bfa_msgq *msgq, + struct bfa_msgq_cmd_entry *cmd); +void bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len); + +#endif diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h new file mode 100644 index 000000000..2bcde4042 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfi.h @@ -0,0 +1,571 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#ifndef __BFI_H__ +#define __BFI_H__ + +#include "bfa_defs.h" + +#pragma pack(1) + +/* BFI FW image type */ +#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */ +#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32)) +#define BFI_FLASH_IMAGE_SZ 0x100000 + +/* Msg header common to all msgs */ +struct bfi_mhdr { + u8 msg_class; /*!< @ref enum bfi_mclass */ + u8 msg_id; /*!< msg opcode with in the class */ + union { + struct { + u8 qid; + u8 fn_lpu; /*!< msg destination */ + } h2i; + u16 i2htok; /*!< token in msgs to host */ + } mtag; +}; + +#define bfi_fn_lpu(__fn, __lpu) ((__fn) << 1 | (__lpu)) +#define bfi_mhdr_2_fn(_mh) ((_mh)->mtag.h2i.fn_lpu >> 1) +#define bfi_mhdr_2_qid(_mh) ((_mh)->mtag.h2i.qid) + +#define bfi_h2i_set(_mh, _mc, _op, _fn_lpu) do { \ + (_mh).msg_class = (_mc); \ + (_mh).msg_id = (_op); \ + (_mh).mtag.h2i.fn_lpu = (_fn_lpu); \ +} while (0) + +#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \ + (_mh).msg_class = (_mc); \ + (_mh).msg_id = (_op); \ + (_mh).mtag.i2htok = (_i2htok); \ +} while (0) + +/* + * Message opcodes: 0-127 to firmware, 128-255 to host + */ +#define BFI_I2H_OPCODE_BASE 128 +#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE) + +/**************************************************************************** + * + * Scatter Gather Element and Page definition + * + **************************************************************************** + */ + +/* DMA addresses */ +union bfi_addr_u { + struct { + u32 addr_lo; + u32 addr_hi; + } a32; +}; + +/* Generic DMA addr-len pair. */ +struct bfi_alen { + union bfi_addr_u al_addr; /* DMA addr of buffer */ + u32 al_len; /* length of buffer */ +}; + +/* + * Large Message structure - 128 Bytes size Msgs + */ +#define BFI_LMSG_SZ 128 +#define BFI_LMSG_PL_WSZ \ + ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4) + +/* Mailbox message structure */ +#define BFI_MBMSG_SZ 7 +struct bfi_mbmsg { + struct bfi_mhdr mh; + u32 pl[BFI_MBMSG_SZ]; +}; + +/* Supported PCI function class codes (personality) */ +enum bfi_pcifn_class { + BFI_PCIFN_CLASS_FC = 0x0c04, + BFI_PCIFN_CLASS_ETH = 0x0200, +}; + +/* Message Classes */ +enum bfi_mclass { + BFI_MC_IOC = 1, /*!< IO Controller (IOC) */ + BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */ + BFI_MC_FLASH = 3, /*!< Flash message class */ + BFI_MC_CEE = 4, /*!< CEE */ + BFI_MC_FCPORT = 5, /*!< FC port */ + BFI_MC_IOCFC = 6, /*!< FC - IO Controller (IOC) */ + BFI_MC_LL = 7, /*!< Link Layer */ + BFI_MC_UF = 8, /*!< Unsolicited frame receive */ + BFI_MC_FCXP = 9, /*!< FC Transport */ + BFI_MC_LPS = 10, /*!< lport fc login services */ + BFI_MC_RPORT = 11, /*!< Remote port */ + BFI_MC_ITNIM = 12, /*!< I-T nexus (Initiator mode) */ + BFI_MC_IOIM_READ = 13, /*!< read IO (Initiator mode) */ + BFI_MC_IOIM_WRITE = 14, /*!< write IO (Initiator mode) */ + BFI_MC_IOIM_IO = 15, /*!< IO (Initiator mode) */ + BFI_MC_IOIM = 16, /*!< IO (Initiator mode) */ + BFI_MC_IOIM_IOCOM = 17, /*!< good IO completion */ + BFI_MC_TSKIM = 18, /*!< Initiator Task management */ + BFI_MC_SBOOT = 19, /*!< SAN boot services */ + BFI_MC_IPFC = 20, /*!< IP over FC Msgs */ + BFI_MC_PORT = 21, /*!< Physical port */ + BFI_MC_SFP = 22, /*!< SFP module */ + BFI_MC_MSGQ = 23, /*!< MSGQ */ + BFI_MC_ENET = 24, /*!< ENET commands/responses */ + BFI_MC_PHY = 25, /*!< External PHY message class */ + BFI_MC_NBOOT = 26, /*!< Network Boot */ + BFI_MC_TIO_READ = 27, /*!< read IO (Target mode) */ + BFI_MC_TIO_WRITE = 28, /*!< write IO (Target mode) */ + BFI_MC_TIO_DATA_XFERED = 29, /*!< ds transferred (target mode) */ + BFI_MC_TIO_IO = 30, /*!< IO (Target mode) */ + BFI_MC_TIO = 31, /*!< IO (target mode) */ + BFI_MC_MFG = 32, /*!< MFG/ASIC block commands */ + BFI_MC_EDMA = 33, /*!< EDMA copy commands */ + BFI_MC_MAX = 34 +}; + +#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */ + +#define BFI_FWBOOT_ENV_OS 0 + +/*---------------------------------------------------------------------- + * IOC + *---------------------------------------------------------------------- + */ + +/* Different asic generations */ +enum bfi_asic_gen { + BFI_ASIC_GEN_CB = 1, + BFI_ASIC_GEN_CT = 2, + BFI_ASIC_GEN_CT2 = 3, +}; + +enum bfi_asic_mode { + BFI_ASIC_MODE_FC = 1, /* FC up to 8G speed */ + BFI_ASIC_MODE_FC16 = 2, /* FC up to 16G speed */ + BFI_ASIC_MODE_ETH = 3, /* Ethernet ports */ + BFI_ASIC_MODE_COMBO = 4, /* FC 16G and Ethernet 10G port */ +}; + +enum bfi_ioc_h2i_msgs { + BFI_IOC_H2I_ENABLE_REQ = 1, + BFI_IOC_H2I_DISABLE_REQ = 2, + BFI_IOC_H2I_GETATTR_REQ = 3, + BFI_IOC_H2I_DBG_SYNC = 4, + BFI_IOC_H2I_DBG_DUMP = 5, +}; + +enum bfi_ioc_i2h_msgs { + BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1), + BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2), + BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3), + BFI_IOC_I2H_HBEAT = BFA_I2HM(4), +}; + +/* BFI_IOC_H2I_GETATTR_REQ message */ +struct bfi_ioc_getattr_req { + struct bfi_mhdr mh; + union bfi_addr_u attr_addr; +}; + +struct bfi_ioc_attr { + u64 mfg_pwwn; /*!< Mfg port wwn */ + u64 mfg_nwwn; /*!< Mfg node wwn */ + mac_t mfg_mac; /*!< Mfg mac */ + u8 port_mode; /* enum bfi_port_mode */ + u8 rsvd_a; + u64 pwwn; + u64 nwwn; + mac_t mac; /*!< PBC or Mfg mac */ + u16 rsvd_b; + mac_t fcoe_mac; + u16 rsvd_c; + char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)]; + u8 pcie_gen; + u8 pcie_lanes_orig; + u8 pcie_lanes; + u8 rx_bbcredit; /*!< receive buffer credits */ + u32 adapter_prop; /*!< adapter properties */ + u16 maxfrsize; /*!< max receive frame size */ + char asic_rev; + u8 rsvd_d; + char fw_version[BFA_VERSION_LEN]; + char optrom_version[BFA_VERSION_LEN]; + struct bfa_mfg_vpd vpd; + u32 card_type; /*!< card type */ +}; + +/* BFI_IOC_I2H_GETATTR_REPLY message */ +struct bfi_ioc_getattr_reply { + struct bfi_mhdr mh; /*!< Common msg header */ + u8 status; /*!< cfg reply status */ + u8 rsvd[3]; +}; + +/* Firmware memory page offsets */ +#define BFI_IOC_SMEM_PG0_CB (0x40) +#define BFI_IOC_SMEM_PG0_CT (0x180) + +/* Firmware statistic offset */ +#define BFI_IOC_FWSTATS_OFF (0x6B40) +#define BFI_IOC_FWSTATS_SZ (4096) + +/* Firmware trace offset */ +#define BFI_IOC_TRC_OFF (0x4b00) +#define BFI_IOC_TRC_ENTS 256 +#define BFI_IOC_TRC_ENT_SZ 16 +#define BFI_IOC_TRC_HDR_SZ 32 + +#define BFI_IOC_FW_SIGNATURE (0xbfadbfad) +#define BFI_IOC_FW_INV_SIGN (0xdeaddead) +#define BFI_IOC_MD5SUM_SZ 4 + +struct bfi_ioc_fwver { +#ifdef __BIG_ENDIAN + u8 patch; + u8 maint; + u8 minor; + u8 major; + u8 rsvd[2]; + u8 build; + u8 phase; +#else + u8 major; + u8 minor; + u8 maint; + u8 patch; + u8 phase; + u8 build; + u8 rsvd[2]; +#endif +}; + +struct bfi_ioc_image_hdr { + u32 signature; /*!< constant signature */ + u8 asic_gen; /*!< asic generation */ + u8 asic_mode; + u8 port0_mode; /*!< device mode for port 0 */ + u8 port1_mode; /*!< device mode for port 1 */ + u32 exec; /*!< exec vector */ + u32 bootenv; /*!< firmware boot env */ + u32 rsvd_b[2]; + struct bfi_ioc_fwver fwver; + u32 md5sum[BFI_IOC_MD5SUM_SZ]; +}; + +enum bfi_ioc_img_ver_cmp { + BFI_IOC_IMG_VER_INCOMP, + BFI_IOC_IMG_VER_OLD, + BFI_IOC_IMG_VER_SAME, + BFI_IOC_IMG_VER_BETTER +}; + +#define BFI_FWBOOT_DEVMODE_OFF 4 +#define BFI_FWBOOT_TYPE_OFF 8 +#define BFI_FWBOOT_ENV_OFF 12 +#define BFI_FWBOOT_DEVMODE(__asic_gen, __asic_mode, __p0_mode, __p1_mode) \ + (((u32)(__asic_gen)) << 24 | \ + ((u32)(__asic_mode)) << 16 | \ + ((u32)(__p0_mode)) << 8 | \ + ((u32)(__p1_mode))) + +enum bfi_fwboot_type { + BFI_FWBOOT_TYPE_NORMAL = 0, + BFI_FWBOOT_TYPE_FLASH = 1, + BFI_FWBOOT_TYPE_MEMTEST = 2, +}; + +enum bfi_port_mode { + BFI_PORT_MODE_FC = 1, + BFI_PORT_MODE_ETH = 2, +}; + +struct bfi_ioc_hbeat { + struct bfi_mhdr mh; /*!< common msg header */ + u32 hb_count; /*!< current heart beat count */ +}; + +/* IOC hardware/firmware state */ +enum bfi_ioc_state { + BFI_IOC_UNINIT = 0, /*!< not initialized */ + BFI_IOC_INITING = 1, /*!< h/w is being initialized */ + BFI_IOC_HWINIT = 2, /*!< h/w is initialized */ + BFI_IOC_CFG = 3, /*!< IOC configuration in progress */ + BFI_IOC_OP = 4, /*!< IOC is operational */ + BFI_IOC_DISABLING = 5, /*!< IOC is being disabled */ + BFI_IOC_DISABLED = 6, /*!< IOC is disabled */ + BFI_IOC_CFG_DISABLED = 7, /*!< IOC is being disabled;transient */ + BFI_IOC_FAIL = 8, /*!< IOC heart-beat failure */ + BFI_IOC_MEMTEST = 9, /*!< IOC is doing memtest */ +}; + +#define BFI_IOC_ENDIAN_SIG 0x12345678 + +enum { + BFI_ADAPTER_TYPE_FC = 0x01, /*!< FC adapters */ + BFI_ADAPTER_TYPE_MK = 0x0f0000, /*!< adapter type mask */ + BFI_ADAPTER_TYPE_SH = 16, /*!< adapter type shift */ + BFI_ADAPTER_NPORTS_MK = 0xff00, /*!< number of ports mask */ + BFI_ADAPTER_NPORTS_SH = 8, /*!< number of ports shift */ + BFI_ADAPTER_SPEED_MK = 0xff, /*!< adapter speed mask */ + BFI_ADAPTER_SPEED_SH = 0, /*!< adapter speed shift */ + BFI_ADAPTER_PROTO = 0x100000, /*!< prototype adapaters */ + BFI_ADAPTER_TTV = 0x200000, /*!< TTV debug capable */ + BFI_ADAPTER_UNSUPP = 0x400000, /*!< unknown adapter type */ +}; + +#define BFI_ADAPTER_GETP(__prop, __adap_prop) \ + (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \ + BFI_ADAPTER_ ## __prop ## _SH) +#define BFI_ADAPTER_SETP(__prop, __val) \ + ((__val) << BFI_ADAPTER_ ## __prop ## _SH) +#define BFI_ADAPTER_IS_PROTO(__adap_type) \ + ((__adap_type) & BFI_ADAPTER_PROTO) +#define BFI_ADAPTER_IS_TTV(__adap_type) \ + ((__adap_type) & BFI_ADAPTER_TTV) +#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \ + ((__adap_type) & BFI_ADAPTER_UNSUPP) +#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \ + ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \ + BFI_ADAPTER_UNSUPP)) + +/* BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages */ +struct bfi_ioc_ctrl_req { + struct bfi_mhdr mh; + u16 clscode; + u16 rsvd; + u32 tv_sec; +}; + +/* BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages */ +struct bfi_ioc_ctrl_reply { + struct bfi_mhdr mh; /*!< Common msg header */ + u8 status; /*!< enable/disable status */ + u8 port_mode; /*!< enum bfa_mode */ + u8 cap_bm; /*!< capability bit mask */ + u8 rsvd; +}; + +#define BFI_IOC_MSGSZ 8 +/* H2I Messages */ +union bfi_ioc_h2i_msg_u { + struct bfi_mhdr mh; + struct bfi_ioc_ctrl_req enable_req; + struct bfi_ioc_ctrl_req disable_req; + struct bfi_ioc_getattr_req getattr_req; + u32 mboxmsg[BFI_IOC_MSGSZ]; +}; + +/* I2H Messages */ +union bfi_ioc_i2h_msg_u { + struct bfi_mhdr mh; + struct bfi_ioc_ctrl_reply fw_event; + u32 mboxmsg[BFI_IOC_MSGSZ]; +}; + +/*---------------------------------------------------------------------- + * MSGQ + *---------------------------------------------------------------------- + */ + +enum bfi_msgq_h2i_msgs { + BFI_MSGQ_H2I_INIT_REQ = 1, + BFI_MSGQ_H2I_DOORBELL_PI = 2, + BFI_MSGQ_H2I_DOORBELL_CI = 3, + BFI_MSGQ_H2I_CMDQ_COPY_RSP = 4, +}; + +enum bfi_msgq_i2h_msgs { + BFI_MSGQ_I2H_INIT_RSP = BFA_I2HM(BFI_MSGQ_H2I_INIT_REQ), + BFI_MSGQ_I2H_DOORBELL_PI = BFA_I2HM(BFI_MSGQ_H2I_DOORBELL_PI), + BFI_MSGQ_I2H_DOORBELL_CI = BFA_I2HM(BFI_MSGQ_H2I_DOORBELL_CI), + BFI_MSGQ_I2H_CMDQ_COPY_REQ = BFA_I2HM(BFI_MSGQ_H2I_CMDQ_COPY_RSP), +}; + +/* Messages(commands/responsed/AENS will have the following header */ +struct bfi_msgq_mhdr { + u8 msg_class; + u8 msg_id; + u16 msg_token; + u16 num_entries; + u8 enet_id; + u8 rsvd[1]; +}; + +#define bfi_msgq_mhdr_set(_mh, _mc, _mid, _tok, _enet_id) do { \ + (_mh).msg_class = (_mc); \ + (_mh).msg_id = (_mid); \ + (_mh).msg_token = (_tok); \ + (_mh).enet_id = (_enet_id); \ +} while (0) + +/* + * Mailbox for messaging interface + */ +#define BFI_MSGQ_CMD_ENTRY_SIZE (64) /* TBD */ +#define BFI_MSGQ_RSP_ENTRY_SIZE (64) /* TBD */ + +#define bfi_msgq_num_cmd_entries(_size) \ + (((_size) + BFI_MSGQ_CMD_ENTRY_SIZE - 1) / BFI_MSGQ_CMD_ENTRY_SIZE) + +struct bfi_msgq { + union bfi_addr_u addr; + u16 q_depth; /* Total num of entries in the queue */ + u8 rsvd[2]; +}; + +/* BFI_ENET_MSGQ_CFG_REQ TBD init or cfg? */ +struct bfi_msgq_cfg_req { + struct bfi_mhdr mh; + struct bfi_msgq cmdq; + struct bfi_msgq rspq; +}; + +/* BFI_ENET_MSGQ_CFG_RSP */ +struct bfi_msgq_cfg_rsp { + struct bfi_mhdr mh; + u8 cmd_status; + u8 rsvd[3]; +}; + +/* BFI_MSGQ_H2I_DOORBELL */ +struct bfi_msgq_h2i_db { + struct bfi_mhdr mh; + union { + u16 cmdq_pi; + u16 rspq_ci; + } idx; +}; + +/* BFI_MSGQ_I2H_DOORBELL */ +struct bfi_msgq_i2h_db { + struct bfi_mhdr mh; + union { + u16 rspq_pi; + u16 cmdq_ci; + } idx; +}; + +#define BFI_CMD_COPY_SZ 28 + +/* BFI_MSGQ_H2I_CMD_COPY_RSP */ +struct bfi_msgq_h2i_cmdq_copy_rsp { + struct bfi_mhdr mh; + u8 data[BFI_CMD_COPY_SZ]; +}; + +/* BFI_MSGQ_I2H_CMD_COPY_REQ */ +struct bfi_msgq_i2h_cmdq_copy_req { + struct bfi_mhdr mh; + u16 offset; + u16 len; +}; + +/* + * FLASH module specific + */ +enum bfi_flash_h2i_msgs { + BFI_FLASH_H2I_QUERY_REQ = 1, + BFI_FLASH_H2I_ERASE_REQ = 2, + BFI_FLASH_H2I_WRITE_REQ = 3, + BFI_FLASH_H2I_READ_REQ = 4, + BFI_FLASH_H2I_BOOT_VER_REQ = 5, +}; + +enum bfi_flash_i2h_msgs { + BFI_FLASH_I2H_QUERY_RSP = BFA_I2HM(1), + BFI_FLASH_I2H_ERASE_RSP = BFA_I2HM(2), + BFI_FLASH_I2H_WRITE_RSP = BFA_I2HM(3), + BFI_FLASH_I2H_READ_RSP = BFA_I2HM(4), + BFI_FLASH_I2H_BOOT_VER_RSP = BFA_I2HM(5), + BFI_FLASH_I2H_EVENT = BFA_I2HM(127), +}; + +/* + * Flash query request + */ +struct bfi_flash_query_req { + struct bfi_mhdr mh; /* Common msg header */ + struct bfi_alen alen; +}; + +/* + * Flash write request + */ +struct bfi_flash_write_req { + struct bfi_mhdr mh; /* Common msg header */ + struct bfi_alen alen; + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 last; + u8 rsv[2]; + u32 offset; + u32 length; +}; + +/* + * Flash read request + */ +struct bfi_flash_read_req { + struct bfi_mhdr mh; /* Common msg header */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 offset; + u32 length; + struct bfi_alen alen; +}; + +/* + * Flash query response + */ +struct bfi_flash_query_rsp { + struct bfi_mhdr mh; /* Common msg header */ + u32 status; +}; + +/* + * Flash read response + */ +struct bfi_flash_read_rsp { + struct bfi_mhdr mh; /* Common msg header */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 status; + u32 length; +}; + +/* + * Flash write response + */ +struct bfi_flash_write_rsp { + struct bfi_mhdr mh; /* Common msg header */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 status; + u32 length; +}; + +#pragma pack() + +#endif /* __BFI_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h new file mode 100644 index 000000000..bd605bee7 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h @@ -0,0 +1,164 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#ifndef __BFI_CNA_H__ +#define __BFI_CNA_H__ + +#include "bfi.h" +#include "bfa_defs_cna.h" + +#pragma pack(1) + +enum bfi_port_h2i { + BFI_PORT_H2I_ENABLE_REQ = (1), + BFI_PORT_H2I_DISABLE_REQ = (2), + BFI_PORT_H2I_GET_STATS_REQ = (3), + BFI_PORT_H2I_CLEAR_STATS_REQ = (4), +}; + +enum bfi_port_i2h { + BFI_PORT_I2H_ENABLE_RSP = BFA_I2HM(1), + BFI_PORT_I2H_DISABLE_RSP = BFA_I2HM(2), + BFI_PORT_I2H_GET_STATS_RSP = BFA_I2HM(3), + BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4), +}; + +/* Generic REQ type */ +struct bfi_port_generic_req { + struct bfi_mhdr mh; /*!< msg header */ + u32 msgtag; /*!< msgtag for reply */ + u32 rsvd; +}; + +/* Generic RSP type */ +struct bfi_port_generic_rsp { + struct bfi_mhdr mh; /*!< common msg header */ + u8 status; /*!< port enable status */ + u8 rsvd[3]; + u32 msgtag; /*!< msgtag for reply */ +}; + +/* BFI_PORT_H2I_GET_STATS_REQ */ +struct bfi_port_get_stats_req { + struct bfi_mhdr mh; /*!< common msg header */ + union bfi_addr_u dma_addr; +}; + +union bfi_port_h2i_msg_u { + struct bfi_mhdr mh; + struct bfi_port_generic_req enable_req; + struct bfi_port_generic_req disable_req; + struct bfi_port_get_stats_req getstats_req; + struct bfi_port_generic_req clearstats_req; +}; + +union bfi_port_i2h_msg_u { + struct bfi_mhdr mh; + struct bfi_port_generic_rsp enable_rsp; + struct bfi_port_generic_rsp disable_rsp; + struct bfi_port_generic_rsp getstats_rsp; + struct bfi_port_generic_rsp clearstats_rsp; +}; + +/* @brief Mailbox commands from host to (DCBX/LLDP) firmware */ +enum bfi_cee_h2i_msgs { + BFI_CEE_H2I_GET_CFG_REQ = 1, + BFI_CEE_H2I_RESET_STATS = 2, + BFI_CEE_H2I_GET_STATS_REQ = 3, +}; + +/* @brief Mailbox reply and AEN messages from DCBX/LLDP firmware to host */ +enum bfi_cee_i2h_msgs { + BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1), + BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2), + BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3), +}; + +/* Data structures */ + +/* + * @brief H2I command structure for resetting the stats. + * BFI_CEE_H2I_RESET_STATS + */ +struct bfi_lldp_reset_stats { + struct bfi_mhdr mh; +}; + +/* + * @brief H2I command structure for resetting the stats. + * BFI_CEE_H2I_RESET_STATS + */ +struct bfi_cee_reset_stats { + struct bfi_mhdr mh; +}; + +/* + * @brief get configuration command from host + * BFI_CEE_H2I_GET_CFG_REQ + */ +struct bfi_cee_get_req { + struct bfi_mhdr mh; + union bfi_addr_u dma_addr; +}; + +/* + * @brief reply message from firmware + * BFI_CEE_I2H_GET_CFG_RSP + */ +struct bfi_cee_get_rsp { + struct bfi_mhdr mh; + u8 cmd_status; + u8 rsvd[3]; +}; + +/* + * @brief get configuration command from host + * BFI_CEE_H2I_GET_STATS_REQ + */ +struct bfi_cee_stats_req { + struct bfi_mhdr mh; + union bfi_addr_u dma_addr; +}; + +/* + * @brief reply message from firmware + * BFI_CEE_I2H_GET_STATS_RSP + */ +struct bfi_cee_stats_rsp { + struct bfi_mhdr mh; + u8 cmd_status; + u8 rsvd[3]; +}; + +/* @brief mailbox command structures from host to firmware */ +union bfi_cee_h2i_msg_u { + struct bfi_mhdr mh; + struct bfi_cee_get_req get_req; + struct bfi_cee_stats_req stats_req; +}; + +/* @brief mailbox message structures from firmware to host */ +union bfi_cee_i2h_msg_u { + struct bfi_mhdr mh; + struct bfi_cee_get_rsp get_rsp; + struct bfi_cee_stats_rsp stats_rsp; +}; + +#pragma pack() + +#endif /* __BFI_CNA_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h new file mode 100644 index 000000000..bccca3bba --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h @@ -0,0 +1,859 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +/* BNA Hardware and Firmware Interface */ + +/* Skipping statistics collection to avoid clutter. + * Command is no longer needed: + * MTU + * TxQ Stop + * RxQ Stop + * RxF Enable/Disable + * + * HDS-off request is dynamic + * keep structures as multiple of 32-bit fields for alignment. + * All values must be written in big-endian. + */ +#ifndef __BFI_ENET_H__ +#define __BFI_ENET_H__ + +#include "bfa_defs.h" +#include "bfi.h" + +#pragma pack(1) + +#define BFI_ENET_CFG_MAX 32 /* Max resources per PF */ + +#define BFI_ENET_TXQ_PRIO_MAX 8 +#define BFI_ENET_RX_QSET_MAX 16 +#define BFI_ENET_TXQ_WI_VECT_MAX 4 + +#define BFI_ENET_VLAN_ID_MAX 4096 +#define BFI_ENET_VLAN_BLOCK_SIZE 512 /* in bits */ +#define BFI_ENET_VLAN_BLOCKS_MAX \ + (BFI_ENET_VLAN_ID_MAX / BFI_ENET_VLAN_BLOCK_SIZE) +#define BFI_ENET_VLAN_WORD_SIZE 32 /* in bits */ +#define BFI_ENET_VLAN_WORDS_MAX \ + (BFI_ENET_VLAN_BLOCK_SIZE / BFI_ENET_VLAN_WORD_SIZE) + +#define BFI_ENET_RSS_RIT_MAX 64 /* entries */ +#define BFI_ENET_RSS_KEY_LEN 10 /* 32-bit words */ + +union bfi_addr_be_u { + struct { + u32 addr_hi; /* Most Significant 32-bits */ + u32 addr_lo; /* Least Significant 32-Bits */ + } a32; +}; + +/* T X Q U E U E D E F I N E S */ +/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */ +/* TxQ Entry Opcodes */ +#define BFI_ENET_TXQ_WI_SEND (0x402) /* Single Frame Transmission */ +#define BFI_ENET_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */ +#define BFI_ENET_TXQ_WI_EXTENSION (0x104) /* Extension WI */ + +/* TxQ Entry Control Flags */ +#define BFI_ENET_TXQ_WI_CF_FCOE_CRC (1 << 8) +#define BFI_ENET_TXQ_WI_CF_IPID_MODE (1 << 5) +#define BFI_ENET_TXQ_WI_CF_INS_PRIO (1 << 4) +#define BFI_ENET_TXQ_WI_CF_INS_VLAN (1 << 3) +#define BFI_ENET_TXQ_WI_CF_UDP_CKSUM (1 << 2) +#define BFI_ENET_TXQ_WI_CF_TCP_CKSUM (1 << 1) +#define BFI_ENET_TXQ_WI_CF_IP_CKSUM (1 << 0) + +struct bfi_enet_txq_wi_base { + u8 reserved; + u8 num_vectors; /* number of vectors present */ + u16 opcode; + /* BFI_ENET_TXQ_WI_SEND or BFI_ENET_TXQ_WI_SEND_LSO */ + u16 flags; /* OR of all the flags */ + u16 l4_hdr_size_n_offset; + u16 vlan_tag; + u16 lso_mss; /* Only 14 LSB are valid */ + u32 frame_length; /* Only 24 LSB are valid */ +}; + +struct bfi_enet_txq_wi_ext { + u16 reserved; + u16 opcode; /* BFI_ENET_TXQ_WI_EXTENSION */ + u32 reserved2[3]; +}; + +struct bfi_enet_txq_wi_vector { /* Tx Buffer Descriptor */ + u16 reserved; + u16 length; /* Only 14 LSB are valid */ + union bfi_addr_be_u addr; +}; + +/* TxQ Entry Structure */ +struct bfi_enet_txq_entry { + union { + struct bfi_enet_txq_wi_base base; + struct bfi_enet_txq_wi_ext ext; + } wi; + struct bfi_enet_txq_wi_vector vector[BFI_ENET_TXQ_WI_VECT_MAX]; +}; + +#define wi_hdr wi.base +#define wi_ext_hdr wi.ext + +#define BFI_ENET_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \ + (((_hdr_size) << 10) | ((_offset) & 0x3FF)) + +/* R X Q U E U E D E F I N E S */ +struct bfi_enet_rxq_entry { + union bfi_addr_be_u rx_buffer; +}; + +/* R X C O M P L E T I O N Q U E U E D E F I N E S */ +/* CQ Entry Flags */ +#define BFI_ENET_CQ_EF_MAC_ERROR (1 << 0) +#define BFI_ENET_CQ_EF_FCS_ERROR (1 << 1) +#define BFI_ENET_CQ_EF_TOO_LONG (1 << 2) +#define BFI_ENET_CQ_EF_FC_CRC_OK (1 << 3) + +#define BFI_ENET_CQ_EF_RSVD1 (1 << 4) +#define BFI_ENET_CQ_EF_L4_CKSUM_OK (1 << 5) +#define BFI_ENET_CQ_EF_L3_CKSUM_OK (1 << 6) +#define BFI_ENET_CQ_EF_HDS_HEADER (1 << 7) + +#define BFI_ENET_CQ_EF_UDP (1 << 8) +#define BFI_ENET_CQ_EF_TCP (1 << 9) +#define BFI_ENET_CQ_EF_IP_OPTIONS (1 << 10) +#define BFI_ENET_CQ_EF_IPV6 (1 << 11) + +#define BFI_ENET_CQ_EF_IPV4 (1 << 12) +#define BFI_ENET_CQ_EF_VLAN (1 << 13) +#define BFI_ENET_CQ_EF_RSS (1 << 14) +#define BFI_ENET_CQ_EF_RSVD2 (1 << 15) + +#define BFI_ENET_CQ_EF_MCAST_MATCH (1 << 16) +#define BFI_ENET_CQ_EF_MCAST (1 << 17) +#define BFI_ENET_CQ_EF_BCAST (1 << 18) +#define BFI_ENET_CQ_EF_REMOTE (1 << 19) + +#define BFI_ENET_CQ_EF_LOCAL (1 << 20) + +/* CQ Entry Structure */ +struct bfi_enet_cq_entry { + u32 flags; + u16 vlan_tag; + u16 length; + u32 rss_hash; + u8 valid; + u8 reserved1; + u8 reserved2; + u8 rxq_id; +}; + +/* E N E T C O N T R O L P A T H C O M M A N D S */ +struct bfi_enet_q { + union bfi_addr_u pg_tbl; + union bfi_addr_u first_entry; + u16 pages; /* # of pages */ + u16 page_sz; +}; + +struct bfi_enet_txq { + struct bfi_enet_q q; + u8 priority; + u8 rsvd[3]; +}; + +struct bfi_enet_rxq { + struct bfi_enet_q q; + u16 rx_buffer_size; + u16 rsvd; +}; + +struct bfi_enet_cq { + struct bfi_enet_q q; +}; + +struct bfi_enet_ib_cfg { + u8 int_pkt_dma; + u8 int_enabled; + u8 int_pkt_enabled; + u8 continuous_coalescing; + u8 msix; + u8 rsvd[3]; + u32 coalescing_timeout; + u32 inter_pkt_timeout; + u8 inter_pkt_count; + u8 rsvd1[3]; +}; + +struct bfi_enet_ib { + union bfi_addr_u index_addr; + union { + u16 msix_index; + u16 intx_bitmask; + } intr; + u16 rsvd; +}; + +/* ENET command messages */ +enum bfi_enet_h2i_msgs { + /* Rx Commands */ + BFI_ENET_H2I_RX_CFG_SET_REQ = 1, + BFI_ENET_H2I_RX_CFG_CLR_REQ = 2, + + BFI_ENET_H2I_RIT_CFG_REQ = 3, + BFI_ENET_H2I_RSS_CFG_REQ = 4, + BFI_ENET_H2I_RSS_ENABLE_REQ = 5, + BFI_ENET_H2I_RX_PROMISCUOUS_REQ = 6, + BFI_ENET_H2I_RX_DEFAULT_REQ = 7, + + BFI_ENET_H2I_MAC_UCAST_SET_REQ = 8, + BFI_ENET_H2I_MAC_UCAST_CLR_REQ = 9, + BFI_ENET_H2I_MAC_UCAST_ADD_REQ = 10, + BFI_ENET_H2I_MAC_UCAST_DEL_REQ = 11, + + BFI_ENET_H2I_MAC_MCAST_ADD_REQ = 12, + BFI_ENET_H2I_MAC_MCAST_DEL_REQ = 13, + BFI_ENET_H2I_MAC_MCAST_FILTER_REQ = 14, + + BFI_ENET_H2I_RX_VLAN_SET_REQ = 15, + BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ = 16, + + /* Tx Commands */ + BFI_ENET_H2I_TX_CFG_SET_REQ = 17, + BFI_ENET_H2I_TX_CFG_CLR_REQ = 18, + + /* Port Commands */ + BFI_ENET_H2I_PORT_ADMIN_UP_REQ = 19, + BFI_ENET_H2I_SET_PAUSE_REQ = 20, + BFI_ENET_H2I_DIAG_LOOPBACK_REQ = 21, + + /* Get Attributes Command */ + BFI_ENET_H2I_GET_ATTR_REQ = 22, + + /* Statistics Commands */ + BFI_ENET_H2I_STATS_GET_REQ = 23, + BFI_ENET_H2I_STATS_CLR_REQ = 24, + + BFI_ENET_H2I_WOL_MAGIC_REQ = 25, + BFI_ENET_H2I_WOL_FRAME_REQ = 26, + + BFI_ENET_H2I_MAX = 27, +}; + +enum bfi_enet_i2h_msgs { + /* Rx Responses */ + BFI_ENET_I2H_RX_CFG_SET_RSP = + BFA_I2HM(BFI_ENET_H2I_RX_CFG_SET_REQ), + BFI_ENET_I2H_RX_CFG_CLR_RSP = + BFA_I2HM(BFI_ENET_H2I_RX_CFG_CLR_REQ), + + BFI_ENET_I2H_RIT_CFG_RSP = + BFA_I2HM(BFI_ENET_H2I_RIT_CFG_REQ), + BFI_ENET_I2H_RSS_CFG_RSP = + BFA_I2HM(BFI_ENET_H2I_RSS_CFG_REQ), + BFI_ENET_I2H_RSS_ENABLE_RSP = + BFA_I2HM(BFI_ENET_H2I_RSS_ENABLE_REQ), + BFI_ENET_I2H_RX_PROMISCUOUS_RSP = + BFA_I2HM(BFI_ENET_H2I_RX_PROMISCUOUS_REQ), + BFI_ENET_I2H_RX_DEFAULT_RSP = + BFA_I2HM(BFI_ENET_H2I_RX_DEFAULT_REQ), + + BFI_ENET_I2H_MAC_UCAST_SET_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_SET_REQ), + BFI_ENET_I2H_MAC_UCAST_CLR_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_CLR_REQ), + BFI_ENET_I2H_MAC_UCAST_ADD_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_ADD_REQ), + BFI_ENET_I2H_MAC_UCAST_DEL_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_DEL_REQ), + + BFI_ENET_I2H_MAC_MCAST_ADD_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_ADD_REQ), + BFI_ENET_I2H_MAC_MCAST_DEL_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_DEL_REQ), + BFI_ENET_I2H_MAC_MCAST_FILTER_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_FILTER_REQ), + + BFI_ENET_I2H_RX_VLAN_SET_RSP = + BFA_I2HM(BFI_ENET_H2I_RX_VLAN_SET_REQ), + + BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP = + BFA_I2HM(BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ), + + /* Tx Responses */ + BFI_ENET_I2H_TX_CFG_SET_RSP = + BFA_I2HM(BFI_ENET_H2I_TX_CFG_SET_REQ), + BFI_ENET_I2H_TX_CFG_CLR_RSP = + BFA_I2HM(BFI_ENET_H2I_TX_CFG_CLR_REQ), + + /* Port Responses */ + BFI_ENET_I2H_PORT_ADMIN_RSP = + BFA_I2HM(BFI_ENET_H2I_PORT_ADMIN_UP_REQ), + + BFI_ENET_I2H_SET_PAUSE_RSP = + BFA_I2HM(BFI_ENET_H2I_SET_PAUSE_REQ), + BFI_ENET_I2H_DIAG_LOOPBACK_RSP = + BFA_I2HM(BFI_ENET_H2I_DIAG_LOOPBACK_REQ), + + /* Attributes Response */ + BFI_ENET_I2H_GET_ATTR_RSP = + BFA_I2HM(BFI_ENET_H2I_GET_ATTR_REQ), + + /* Statistics Responses */ + BFI_ENET_I2H_STATS_GET_RSP = + BFA_I2HM(BFI_ENET_H2I_STATS_GET_REQ), + BFI_ENET_I2H_STATS_CLR_RSP = + BFA_I2HM(BFI_ENET_H2I_STATS_CLR_REQ), + + BFI_ENET_I2H_WOL_MAGIC_RSP = + BFA_I2HM(BFI_ENET_H2I_WOL_MAGIC_REQ), + BFI_ENET_I2H_WOL_FRAME_RSP = + BFA_I2HM(BFI_ENET_H2I_WOL_FRAME_REQ), + + /* AENs */ + BFI_ENET_I2H_LINK_DOWN_AEN = BFA_I2HM(BFI_ENET_H2I_MAX), + BFI_ENET_I2H_LINK_UP_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 1), + + BFI_ENET_I2H_PORT_ENABLE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 2), + BFI_ENET_I2H_PORT_DISABLE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 3), + + BFI_ENET_I2H_BW_UPDATE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 4), +}; + +/* The following error codes can be returned by the enet commands */ +enum bfi_enet_err { + BFI_ENET_CMD_OK = 0, + BFI_ENET_CMD_FAIL = 1, + BFI_ENET_CMD_DUP_ENTRY = 2, /* !< Duplicate entry in CAM */ + BFI_ENET_CMD_CAM_FULL = 3, /* !< CAM is full */ + BFI_ENET_CMD_NOT_OWNER = 4, /* !< Not permitted, b'cos not owner */ + BFI_ENET_CMD_NOT_EXEC = 5, /* !< Was not sent to f/w at all */ + BFI_ENET_CMD_WAITING = 6, /* !< Waiting for completion */ + BFI_ENET_CMD_PORT_DISABLED = 7, /* !< port in disabled state */ +}; + +/* Generic Request + * + * bfi_enet_req is used by: + * BFI_ENET_H2I_RX_CFG_CLR_REQ + * BFI_ENET_H2I_TX_CFG_CLR_REQ + */ +struct bfi_enet_req { + struct bfi_msgq_mhdr mh; +}; + +/* Enable/Disable Request + * + * bfi_enet_enable_req is used by: + * BFI_ENET_H2I_RSS_ENABLE_REQ (enet_id must be zero) + * BFI_ENET_H2I_RX_PROMISCUOUS_REQ (enet_id must be zero) + * BFI_ENET_H2I_RX_DEFAULT_REQ (enet_id must be zero) + * BFI_ENET_H2I_RX_MAC_MCAST_FILTER_REQ + * BFI_ENET_H2I_PORT_ADMIN_UP_REQ (enet_id must be zero) + */ +struct bfi_enet_enable_req { + struct bfi_msgq_mhdr mh; + u8 enable; /* 1 = enable; 0 = disable */ + u8 rsvd[3]; +}; + +/* Generic Response */ +struct bfi_enet_rsp { + struct bfi_msgq_mhdr mh; + u8 error; /*!< if error see cmd_offset */ + u8 rsvd; + u16 cmd_offset; /*!< offset to invalid parameter */ +}; + +/* GLOBAL CONFIGURATION */ + +/* bfi_enet_attr_req is used by: + * BFI_ENET_H2I_GET_ATTR_REQ + */ +struct bfi_enet_attr_req { + struct bfi_msgq_mhdr mh; +}; + +/* bfi_enet_attr_rsp is used by: + * BFI_ENET_I2H_GET_ATTR_RSP + */ +struct bfi_enet_attr_rsp { + struct bfi_msgq_mhdr mh; + u8 error; /*!< if error see cmd_offset */ + u8 rsvd; + u16 cmd_offset; /*!< offset to invalid parameter */ + u32 max_cfg; + u32 max_ucmac; + u32 rit_size; +}; + +/* Tx Configuration + * + * bfi_enet_tx_cfg is used by: + * BFI_ENET_H2I_TX_CFG_SET_REQ + */ +enum bfi_enet_tx_vlan_mode { + BFI_ENET_TX_VLAN_NOP = 0, + BFI_ENET_TX_VLAN_INS = 1, + BFI_ENET_TX_VLAN_WI = 2, +}; + +struct bfi_enet_tx_cfg { + u8 vlan_mode; /*!< processing mode */ + u8 rsvd; + u16 vlan_id; + u8 admit_tagged_frame; + u8 apply_vlan_filter; + u8 add_to_vswitch; + u8 rsvd1[1]; +}; + +struct bfi_enet_tx_cfg_req { + struct bfi_msgq_mhdr mh; + u8 num_queues; /* # of Tx Queues */ + u8 rsvd[3]; + + struct { + struct bfi_enet_txq q; + struct bfi_enet_ib ib; + } q_cfg[BFI_ENET_TXQ_PRIO_MAX]; + + struct bfi_enet_ib_cfg ib_cfg; + + struct bfi_enet_tx_cfg tx_cfg; +}; + +struct bfi_enet_tx_cfg_rsp { + struct bfi_msgq_mhdr mh; + u8 error; + u8 hw_id; /* For debugging */ + u8 rsvd[2]; + struct { + u32 q_dbell; /* PCI base address offset */ + u32 i_dbell; /* PCI base address offset */ + u8 hw_qid; /* For debugging */ + u8 rsvd[3]; + } q_handles[BFI_ENET_TXQ_PRIO_MAX]; +}; + +/* Rx Configuration + * + * bfi_enet_rx_cfg is used by: + * BFI_ENET_H2I_RX_CFG_SET_REQ + */ +enum bfi_enet_rxq_type { + BFI_ENET_RXQ_SINGLE = 1, + BFI_ENET_RXQ_LARGE_SMALL = 2, + BFI_ENET_RXQ_HDS = 3, + BFI_ENET_RXQ_HDS_OPT_BASED = 4, +}; + +enum bfi_enet_hds_type { + BFI_ENET_HDS_FORCED = 0x01, + BFI_ENET_HDS_IPV6_UDP = 0x02, + BFI_ENET_HDS_IPV6_TCP = 0x04, + BFI_ENET_HDS_IPV4_TCP = 0x08, + BFI_ENET_HDS_IPV4_UDP = 0x10, +}; + +struct bfi_enet_rx_cfg { + u8 rxq_type; + u8 rsvd[1]; + u16 frame_size; + + struct { + u8 max_header_size; + u8 force_offset; + u8 type; + u8 rsvd1; + } hds; + + u8 multi_buffer; + u8 strip_vlan; + u8 drop_untagged; + u8 rsvd2; +}; + +/* + * Multicast frames are received on the ql of q-set index zero. + * On the completion queue. RxQ ID = even is for large/data buffer queues + * and RxQ ID = odd is for small/header buffer queues. + */ +struct bfi_enet_rx_cfg_req { + struct bfi_msgq_mhdr mh; + u8 num_queue_sets; /* # of Rx Queue Sets */ + u8 rsvd[3]; + + struct { + struct bfi_enet_rxq ql; /* large/data/single buffers */ + struct bfi_enet_rxq qs; /* small/header buffers */ + struct bfi_enet_cq cq; + struct bfi_enet_ib ib; + } q_cfg[BFI_ENET_RX_QSET_MAX]; + + struct bfi_enet_ib_cfg ib_cfg; + + struct bfi_enet_rx_cfg rx_cfg; +}; + +struct bfi_enet_rx_cfg_rsp { + struct bfi_msgq_mhdr mh; + u8 error; + u8 hw_id; /* For debugging */ + u8 rsvd[2]; + struct { + u32 ql_dbell; /* PCI base address offset */ + u32 qs_dbell; /* PCI base address offset */ + u32 i_dbell; /* PCI base address offset */ + u8 hw_lqid; /* For debugging */ + u8 hw_sqid; /* For debugging */ + u8 hw_cqid; /* For debugging */ + u8 rsvd; + } q_handles[BFI_ENET_RX_QSET_MAX]; +}; + +/* RIT + * + * bfi_enet_rit_req is used by: + * BFI_ENET_H2I_RIT_CFG_REQ + */ +struct bfi_enet_rit_req { + struct bfi_msgq_mhdr mh; + u16 size; /* number of table-entries used */ + u8 rsvd[2]; + u8 table[BFI_ENET_RSS_RIT_MAX]; +}; + +/* RSS + * + * bfi_enet_rss_cfg_req is used by: + * BFI_ENET_H2I_RSS_CFG_REQ + */ +enum bfi_enet_rss_type { + BFI_ENET_RSS_IPV6 = 0x01, + BFI_ENET_RSS_IPV6_TCP = 0x02, + BFI_ENET_RSS_IPV4 = 0x04, + BFI_ENET_RSS_IPV4_TCP = 0x08 +}; + +struct bfi_enet_rss_cfg { + u8 type; + u8 mask; + u8 rsvd[2]; + u32 key[BFI_ENET_RSS_KEY_LEN]; +}; + +struct bfi_enet_rss_cfg_req { + struct bfi_msgq_mhdr mh; + struct bfi_enet_rss_cfg cfg; +}; + +/* MAC Unicast + * + * bfi_enet_rx_vlan_req is used by: + * BFI_ENET_H2I_MAC_UCAST_SET_REQ + * BFI_ENET_H2I_MAC_UCAST_CLR_REQ + * BFI_ENET_H2I_MAC_UCAST_ADD_REQ + * BFI_ENET_H2I_MAC_UCAST_DEL_REQ + */ +struct bfi_enet_ucast_req { + struct bfi_msgq_mhdr mh; + mac_t mac_addr; + u8 rsvd[2]; +}; + +/* MAC Unicast + VLAN */ +struct bfi_enet_mac_n_vlan_req { + struct bfi_msgq_mhdr mh; + u16 vlan_id; + mac_t mac_addr; +}; + +/* MAC Multicast + * + * bfi_enet_mac_mfilter_add_req is used by: + * BFI_ENET_H2I_MAC_MCAST_ADD_REQ + */ +struct bfi_enet_mcast_add_req { + struct bfi_msgq_mhdr mh; + mac_t mac_addr; + u8 rsvd[2]; +}; + +/* bfi_enet_mac_mfilter_add_rsp is used by: + * BFI_ENET_I2H_MAC_MCAST_ADD_RSP + */ +struct bfi_enet_mcast_add_rsp { + struct bfi_msgq_mhdr mh; + u8 error; + u8 rsvd; + u16 cmd_offset; + u16 handle; + u8 rsvd1[2]; +}; + +/* bfi_enet_mac_mfilter_del_req is used by: + * BFI_ENET_H2I_MAC_MCAST_DEL_REQ + */ +struct bfi_enet_mcast_del_req { + struct bfi_msgq_mhdr mh; + u16 handle; + u8 rsvd[2]; +}; + +/* VLAN + * + * bfi_enet_rx_vlan_req is used by: + * BFI_ENET_H2I_RX_VLAN_SET_REQ + */ +struct bfi_enet_rx_vlan_req { + struct bfi_msgq_mhdr mh; + u8 block_idx; + u8 rsvd[3]; + u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX]; +}; + +/* PAUSE + * + * bfi_enet_set_pause_req is used by: + * BFI_ENET_H2I_SET_PAUSE_REQ + */ +struct bfi_enet_set_pause_req { + struct bfi_msgq_mhdr mh; + u8 rsvd[2]; + u8 tx_pause; /* 1 = enable; 0 = disable */ + u8 rx_pause; /* 1 = enable; 0 = disable */ +}; + +/* DIAGNOSTICS + * + * bfi_enet_diag_lb_req is used by: + * BFI_ENET_H2I_DIAG_LOOPBACK + */ +struct bfi_enet_diag_lb_req { + struct bfi_msgq_mhdr mh; + u8 rsvd[2]; + u8 mode; /* cable or Serdes */ + u8 enable; /* 1 = enable; 0 = disable */ +}; + +/* enum for Loopback opmodes */ +enum { + BFI_ENET_DIAG_LB_OPMODE_EXT = 0, + BFI_ENET_DIAG_LB_OPMODE_CBL = 1, +}; + +/* STATISTICS + * + * bfi_enet_stats_req is used by: + * BFI_ENET_H2I_STATS_GET_REQ + * BFI_ENET_I2H_STATS_CLR_REQ + */ +struct bfi_enet_stats_req { + struct bfi_msgq_mhdr mh; + u16 stats_mask; + u8 rsvd[2]; + u32 rx_enet_mask; + u32 tx_enet_mask; + union bfi_addr_u host_buffer; +}; + +/* defines for "stats_mask" above. */ +#define BFI_ENET_STATS_MAC (1 << 0) /* !< MAC Statistics */ +#define BFI_ENET_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */ +#define BFI_ENET_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */ +#define BFI_ENET_STATS_RX_FC (1 << 3) /* !< Rx FC Stats from RxA */ +#define BFI_ENET_STATS_TX_FC (1 << 4) /* !< Tx FC Stats from TxA */ + +#define BFI_ENET_STATS_ALL 0x1f + +/* TxF Frame Statistics */ +struct bfi_enet_stats_txf { + u64 ucast_octets; + u64 ucast; + u64 ucast_vlan; + + u64 mcast_octets; + u64 mcast; + u64 mcast_vlan; + + u64 bcast_octets; + u64 bcast; + u64 bcast_vlan; + + u64 errors; + u64 filter_vlan; /* frames filtered due to VLAN */ + u64 filter_mac_sa; /* frames filtered due to SA check */ +}; + +/* RxF Frame Statistics */ +struct bfi_enet_stats_rxf { + u64 ucast_octets; + u64 ucast; + u64 ucast_vlan; + + u64 mcast_octets; + u64 mcast; + u64 mcast_vlan; + + u64 bcast_octets; + u64 bcast; + u64 bcast_vlan; + u64 frame_drops; +}; + +/* FC Tx Frame Statistics */ +struct bfi_enet_stats_fc_tx { + u64 txf_ucast_octets; + u64 txf_ucast; + u64 txf_ucast_vlan; + + u64 txf_mcast_octets; + u64 txf_mcast; + u64 txf_mcast_vlan; + + u64 txf_bcast_octets; + u64 txf_bcast; + u64 txf_bcast_vlan; + + u64 txf_parity_errors; + u64 txf_timeout; + u64 txf_fid_parity_errors; +}; + +/* FC Rx Frame Statistics */ +struct bfi_enet_stats_fc_rx { + u64 rxf_ucast_octets; + u64 rxf_ucast; + u64 rxf_ucast_vlan; + + u64 rxf_mcast_octets; + u64 rxf_mcast; + u64 rxf_mcast_vlan; + + u64 rxf_bcast_octets; + u64 rxf_bcast; + u64 rxf_bcast_vlan; +}; + +/* RAD Frame Statistics */ +struct bfi_enet_stats_rad { + u64 rx_frames; + u64 rx_octets; + u64 rx_vlan_frames; + + u64 rx_ucast; + u64 rx_ucast_octets; + u64 rx_ucast_vlan; + + u64 rx_mcast; + u64 rx_mcast_octets; + u64 rx_mcast_vlan; + + u64 rx_bcast; + u64 rx_bcast_octets; + u64 rx_bcast_vlan; + + u64 rx_drops; +}; + +/* BPC Tx Registers */ +struct bfi_enet_stats_bpc { + /* transmit stats */ + u64 tx_pause[8]; + u64 tx_zero_pause[8]; /*!< Pause cancellation */ + /*!> 15)) +#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff) + +#endif /* __BFI_REG_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h new file mode 100644 index 000000000..8ba72b1f3 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bna.h @@ -0,0 +1,565 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#ifndef __BNA_H__ +#define __BNA_H__ + +#include "bfa_defs.h" +#include "bfa_ioc.h" +#include "bfi_enet.h" +#include "bna_types.h" + +extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX]; + +/* Macros and constants */ + +#define BNA_IOC_TIMER_FREQ 200 + +/* Log string size */ +#define BNA_MESSAGE_SIZE 256 + +#define bna_is_small_rxq(_id) ((_id) & 0x1) + +#define BNA_MAC_IS_EQUAL(_mac1, _mac2) \ + (!memcmp((_mac1), (_mac2), sizeof(mac_t))) + +#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0) + +#define BNA_TO_POWER_OF_2(x) \ +do { \ + int _shift = 0; \ + while ((x) && (x) != 1) { \ + (x) >>= 1; \ + _shift++; \ + } \ + (x) <<= _shift; \ +} while (0) + +#define BNA_TO_POWER_OF_2_HIGH(x) \ +do { \ + int n = 1; \ + while (n < (x)) \ + n <<= 1; \ + (x) = n; \ +} while (0) + +/* + * input : _addr-> os dma addr in host endian format, + * output : _bna_dma_addr-> pointer to hw dma addr + */ +#define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr) \ +do { \ + u64 tmp_addr = \ + cpu_to_be64((u64)(_addr)); \ + (_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \ + (_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \ +} while (0) + +/* + * input : _bna_dma_addr-> pointer to hw dma addr + * output : _addr-> os dma addr in host endian format + */ +#define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr) \ +do { \ + (_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32) \ + | ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \ +} while (0) + +#define containing_rec(addr, type, field) \ + ((type *)((unsigned char *)(addr) - \ + (unsigned char *)(&((type *)0)->field))) + +#define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2) + +/* TxQ element is 64 bytes */ +#define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6) +#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6) + +#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \ +{ \ + unsigned int page_index; /* index within a page */ \ + void *page_addr; \ + page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \ + (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \ + page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\ + (_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \ +} + +/* RxQ element is 8 bytes */ +#define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3) +#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3) + +#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \ +{ \ + unsigned int page_index; /* index within a page */ \ + void *page_addr; \ + page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \ + (_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \ + page_addr = (_qpt_ptr)[((_qe_idx) >> \ + BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \ + (_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \ +} + +/* CQ element is 16 bytes */ +#define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4) +#define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4) + +#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \ +{ \ + unsigned int page_index; /* index within a page */ \ + void *page_addr; \ + \ + page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \ + (_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \ + page_addr = (_qpt_ptr)[((_qe_idx) >> \ + BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \ + (_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\ +} + +#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \ + (&((_cast *)(_q_base))[(_qe_idx)]) + +#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx)) + +#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \ + ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1)) + +#define BNA_QE_INDX_INC(_idx, _q_depth) BNA_QE_INDX_ADD(_idx, 1, _q_depth) + +#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \ + (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1)) + +#define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \ + (((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \ + ((_q_depth) - 1)) + +#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \ + ((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \ + (_q_depth - 1)) + +#define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index) + +#define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index) + +#define BNA_Q_PI_ADD(_q_ptr, _num) \ + (_q_ptr)->q.producer_index = \ + (((_q_ptr)->q.producer_index + (_num)) & \ + ((_q_ptr)->q.q_depth - 1)) + +#define BNA_Q_CI_ADD(_q_ptr, _num) \ + (_q_ptr)->q.consumer_index = \ + (((_q_ptr)->q.consumer_index + (_num)) \ + & ((_q_ptr)->q.q_depth - 1)) + +#define BNA_Q_FREE_COUNT(_q_ptr) \ + (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth)) + +#define BNA_Q_IN_USE_COUNT(_q_ptr) \ + (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth)) + +#define BNA_LARGE_PKT_SIZE 1000 + +#define BNA_UPDATE_PKT_CNT(_pkt, _len) \ +do { \ + if ((_len) > BNA_LARGE_PKT_SIZE) { \ + (_pkt)->large_pkt_cnt++; \ + } else { \ + (_pkt)->small_pkt_cnt++; \ + } \ +} while (0) + +#define call_rxf_stop_cbfn(rxf) \ +do { \ + if ((rxf)->stop_cbfn) { \ + void (*cbfn)(struct bna_rx *); \ + struct bna_rx *cbarg; \ + cbfn = (rxf)->stop_cbfn; \ + cbarg = (rxf)->stop_cbarg; \ + (rxf)->stop_cbfn = NULL; \ + (rxf)->stop_cbarg = NULL; \ + cbfn(cbarg); \ + } \ +} while (0) + +#define call_rxf_start_cbfn(rxf) \ +do { \ + if ((rxf)->start_cbfn) { \ + void (*cbfn)(struct bna_rx *); \ + struct bna_rx *cbarg; \ + cbfn = (rxf)->start_cbfn; \ + cbarg = (rxf)->start_cbarg; \ + (rxf)->start_cbfn = NULL; \ + (rxf)->start_cbarg = NULL; \ + cbfn(cbarg); \ + } \ +} while (0) + +#define call_rxf_cam_fltr_cbfn(rxf) \ +do { \ + if ((rxf)->cam_fltr_cbfn) { \ + void (*cbfn)(struct bnad *, struct bna_rx *); \ + struct bnad *cbarg; \ + cbfn = (rxf)->cam_fltr_cbfn; \ + cbarg = (rxf)->cam_fltr_cbarg; \ + (rxf)->cam_fltr_cbfn = NULL; \ + (rxf)->cam_fltr_cbarg = NULL; \ + cbfn(cbarg, rxf->rx); \ + } \ +} while (0) + +#define call_rxf_pause_cbfn(rxf) \ +do { \ + if ((rxf)->oper_state_cbfn) { \ + void (*cbfn)(struct bnad *, struct bna_rx *); \ + struct bnad *cbarg; \ + cbfn = (rxf)->oper_state_cbfn; \ + cbarg = (rxf)->oper_state_cbarg; \ + (rxf)->oper_state_cbfn = NULL; \ + (rxf)->oper_state_cbarg = NULL; \ + cbfn(cbarg, rxf->rx); \ + } \ +} while (0) + +#define call_rxf_resume_cbfn(rxf) call_rxf_pause_cbfn(rxf) + +#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx)) + +#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx)) + +#define xxx_enable(mode, bitmask, xxx) \ +do { \ + bitmask |= xxx; \ + mode |= xxx; \ +} while (0) + +#define xxx_disable(mode, bitmask, xxx) \ +do { \ + bitmask |= xxx; \ + mode &= ~xxx; \ +} while (0) + +#define xxx_inactive(mode, bitmask, xxx) \ +do { \ + bitmask &= ~xxx; \ + mode &= ~xxx; \ +} while (0) + +#define is_promisc_enable(mode, bitmask) \ + is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC) + +#define is_promisc_disable(mode, bitmask) \ + is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC) + +#define promisc_enable(mode, bitmask) \ + xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC) + +#define promisc_disable(mode, bitmask) \ + xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC) + +#define promisc_inactive(mode, bitmask) \ + xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC) + +#define is_default_enable(mode, bitmask) \ + is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT) + +#define is_default_disable(mode, bitmask) \ + is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT) + +#define default_enable(mode, bitmask) \ + xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT) + +#define default_disable(mode, bitmask) \ + xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT) + +#define default_inactive(mode, bitmask) \ + xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT) + +#define is_allmulti_enable(mode, bitmask) \ + is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI) + +#define is_allmulti_disable(mode, bitmask) \ + is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI) + +#define allmulti_enable(mode, bitmask) \ + xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI) + +#define allmulti_disable(mode, bitmask) \ + xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI) + +#define allmulti_inactive(mode, bitmask) \ + xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI) + +#define GET_RXQS(rxp, q0, q1) do { \ + switch ((rxp)->type) { \ + case BNA_RXP_SINGLE: \ + (q0) = rxp->rxq.single.only; \ + (q1) = NULL; \ + break; \ + case BNA_RXP_SLR: \ + (q0) = rxp->rxq.slr.large; \ + (q1) = rxp->rxq.slr.small; \ + break; \ + case BNA_RXP_HDS: \ + (q0) = rxp->rxq.hds.data; \ + (q1) = rxp->rxq.hds.hdr; \ + break; \ + } \ +} while (0) + +#define bna_tx_rid_mask(_bna) ((_bna)->tx_mod.rid_mask) + +#define bna_rx_rid_mask(_bna) ((_bna)->rx_mod.rid_mask) + +#define bna_tx_from_rid(_bna, _rid, _tx) \ +do { \ + struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod; \ + struct bna_tx *__tx; \ + struct list_head *qe; \ + _tx = NULL; \ + list_for_each(qe, &__tx_mod->tx_active_q) { \ + __tx = (struct bna_tx *)qe; \ + if (__tx->rid == (_rid)) { \ + (_tx) = __tx; \ + break; \ + } \ + } \ +} while (0) + +#define bna_rx_from_rid(_bna, _rid, _rx) \ +do { \ + struct bna_rx_mod *__rx_mod = &(_bna)->rx_mod; \ + struct bna_rx *__rx; \ + struct list_head *qe; \ + _rx = NULL; \ + list_for_each(qe, &__rx_mod->rx_active_q) { \ + __rx = (struct bna_rx *)qe; \ + if (__rx->rid == (_rid)) { \ + (_rx) = __rx; \ + break; \ + } \ + } \ +} while (0) + +#define bna_mcam_mod_free_q(_bna) (&(_bna)->mcam_mod.free_q) + +#define bna_mcam_mod_del_q(_bna) (&(_bna)->mcam_mod.del_q) + +#define bna_ucam_mod_free_q(_bna) (&(_bna)->ucam_mod.free_q) + +#define bna_ucam_mod_del_q(_bna) (&(_bna)->ucam_mod.del_q) + +/* Inline functions */ + +static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr) +{ + struct bna_mac *mac = NULL; + struct list_head *qe; + list_for_each(qe, q) { + if (BNA_MAC_IS_EQUAL(((struct bna_mac *)qe)->addr, addr)) { + mac = (struct bna_mac *)qe; + break; + } + } + return mac; +} + +#define bna_attr(_bna) (&(_bna)->ioceth.attr) + +/* Function prototypes */ + +/* BNA */ + +/* FW response handlers */ +void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr); + +/* APIs for BNAD */ +void bna_res_req(struct bna_res_info *res_info); +void bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info); +void bna_init(struct bna *bna, struct bnad *bnad, + struct bfa_pcidev *pcidev, + struct bna_res_info *res_info); +void bna_mod_init(struct bna *bna, struct bna_res_info *res_info); +void bna_uninit(struct bna *bna); +int bna_num_txq_set(struct bna *bna, int num_txq); +int bna_num_rxp_set(struct bna *bna, int num_rxp); +void bna_hw_stats_get(struct bna *bna); + +/* APIs for RxF */ +struct bna_mac *bna_cam_mod_mac_get(struct list_head *head); +void bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac); +struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod); +void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod, + struct bna_mcam_handle *handle); + +/* MBOX */ + +/* API for BNAD */ +void bna_mbox_handler(struct bna *bna, u32 intr_status); + +/* ETHPORT */ + +/* Callbacks for RX */ +void bna_ethport_cb_rx_started(struct bna_ethport *ethport); +void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport); + +/* TX MODULE AND TX */ + +/* FW response handelrs */ +void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, + struct bfi_msgq_mhdr *msghdr); +void bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, + struct bfi_msgq_mhdr *msghdr); +void bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod); + +/* APIs for BNA */ +void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna, + struct bna_res_info *res_info); +void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod); + +/* APIs for ENET */ +void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type); +void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type); +void bna_tx_mod_fail(struct bna_tx_mod *tx_mod); + +/* APIs for BNAD */ +void bna_tx_res_req(int num_txq, int txq_depth, + struct bna_res_info *res_info); +struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad, + struct bna_tx_config *tx_cfg, + const struct bna_tx_event_cbfn *tx_cbfn, + struct bna_res_info *res_info, void *priv); +void bna_tx_destroy(struct bna_tx *tx); +void bna_tx_enable(struct bna_tx *tx); +void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, + void (*cbfn)(void *, struct bna_tx *)); +void bna_tx_cleanup_complete(struct bna_tx *tx); +void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo); + +/* RX MODULE, RX, RXF */ + +/* FW response handlers */ +void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, + struct bfi_msgq_mhdr *msghdr); +void bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, + struct bfi_msgq_mhdr *msghdr); +void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr); +void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, + struct bfi_msgq_mhdr *msghdr); +void bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf, + struct bfi_msgq_mhdr *msghdr); + +/* APIs for BNA */ +void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, + struct bna_res_info *res_info); +void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod); + +/* APIs for ENET */ +void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type); +void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type); +void bna_rx_mod_fail(struct bna_rx_mod *rx_mod); + +/* APIs for BNAD */ +void bna_rx_res_req(struct bna_rx_config *rx_config, + struct bna_res_info *res_info); +struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad, + struct bna_rx_config *rx_cfg, + const struct bna_rx_event_cbfn *rx_cbfn, + struct bna_res_info *res_info, void *priv); +void bna_rx_destroy(struct bna_rx *rx); +void bna_rx_enable(struct bna_rx *rx); +void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type, + void (*cbfn)(void *, struct bna_rx *)); +void bna_rx_cleanup_complete(struct bna_rx *rx); +void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo); +void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]); +void bna_rx_dim_update(struct bna_ccb *ccb); +enum bna_cb_status +bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac, + void (*cbfn)(struct bnad *, struct bna_rx *)); +enum bna_cb_status +bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac, + void (*cbfn)(struct bnad *, struct bna_rx *)); +enum bna_cb_status +bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac, + void (*cbfn)(struct bnad *, struct bna_rx *)); +enum bna_cb_status +bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist, + void (*cbfn)(struct bnad *, struct bna_rx *)); +enum bna_cb_status +bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac, + void (*cbfn)(struct bnad *, struct bna_rx *)); +enum bna_cb_status +bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac, + void (*cbfn)(struct bnad *, struct bna_rx *)); +void +bna_rx_mcast_delall(struct bna_rx *rx, + void (*cbfn)(struct bnad *, struct bna_rx *)); +enum bna_cb_status +bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode, + enum bna_rxmode bitmask, + void (*cbfn)(struct bnad *, struct bna_rx *)); +void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id); +void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id); +void bna_rx_vlanfilter_enable(struct bna_rx *rx); +void bna_rx_vlan_strip_enable(struct bna_rx *rx); +void bna_rx_vlan_strip_disable(struct bna_rx *rx); +/* ENET */ + +/* API for RX */ +int bna_enet_mtu_get(struct bna_enet *enet); + +/* Callbacks for TX, RX */ +void bna_enet_cb_tx_stopped(struct bna_enet *enet); +void bna_enet_cb_rx_stopped(struct bna_enet *enet); + +/* API for BNAD */ +void bna_enet_enable(struct bna_enet *enet); +void bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type, + void (*cbfn)(void *)); +void bna_enet_pause_config(struct bna_enet *enet, + struct bna_pause_config *pause_config, + void (*cbfn)(struct bnad *)); +void bna_enet_mtu_set(struct bna_enet *enet, int mtu, + void (*cbfn)(struct bnad *)); +void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac); + +/* IOCETH */ + +/* APIs for BNAD */ +void bna_ioceth_enable(struct bna_ioceth *ioceth); +void bna_ioceth_disable(struct bna_ioceth *ioceth, + enum bna_cleanup_type type); + +/* BNAD */ + +/* Callbacks for ENET */ +void bnad_cb_ethport_link_status(struct bnad *bnad, + enum bna_link_status status); + +/* Callbacks for IOCETH */ +void bnad_cb_ioceth_ready(struct bnad *bnad); +void bnad_cb_ioceth_failed(struct bnad *bnad); +void bnad_cb_ioceth_disabled(struct bnad *bnad); +void bnad_cb_mbox_intr_enable(struct bnad *bnad); +void bnad_cb_mbox_intr_disable(struct bnad *bnad); + +/* Callbacks for BNA */ +void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, + struct bna_stats *stats); + +#endif /* __BNA_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c new file mode 100644 index 000000000..deb8da6ab --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c @@ -0,0 +1,2159 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#include "bna.h" + +static inline int +ethport_can_be_up(struct bna_ethport *ethport) +{ + int ready = 0; + if (ethport->bna->enet.type == BNA_ENET_T_REGULAR) + ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) && + (ethport->flags & BNA_ETHPORT_F_RX_STARTED) && + (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED)); + else + ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) && + (ethport->flags & BNA_ETHPORT_F_RX_STARTED) && + !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED)); + return ready; +} + +#define ethport_is_up ethport_can_be_up + +enum bna_ethport_event { + ETHPORT_E_START = 1, + ETHPORT_E_STOP = 2, + ETHPORT_E_FAIL = 3, + ETHPORT_E_UP = 4, + ETHPORT_E_DOWN = 5, + ETHPORT_E_FWRESP_UP_OK = 6, + ETHPORT_E_FWRESP_DOWN = 7, + ETHPORT_E_FWRESP_UP_FAIL = 8, +}; + +enum bna_enet_event { + ENET_E_START = 1, + ENET_E_STOP = 2, + ENET_E_FAIL = 3, + ENET_E_PAUSE_CFG = 4, + ENET_E_MTU_CFG = 5, + ENET_E_FWRESP_PAUSE = 6, + ENET_E_CHLD_STOPPED = 7, +}; + +enum bna_ioceth_event { + IOCETH_E_ENABLE = 1, + IOCETH_E_DISABLE = 2, + IOCETH_E_IOC_RESET = 3, + IOCETH_E_IOC_FAILED = 4, + IOCETH_E_IOC_READY = 5, + IOCETH_E_ENET_ATTR_RESP = 6, + IOCETH_E_ENET_STOPPED = 7, + IOCETH_E_IOC_DISABLED = 8, +}; + +#define bna_stats_copy(_name, _type) \ +do { \ + count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \ + stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \ + stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \ + for (i = 0; i < count; i++) \ + stats_dst[i] = be64_to_cpu(stats_src[i]); \ +} while (0) \ + +/* + * FW response handlers + */ + +static void +bna_bfi_ethport_enable_aen(struct bna_ethport *ethport, + struct bfi_msgq_mhdr *msghdr) +{ + ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED; + + if (ethport_can_be_up(ethport)) + bfa_fsm_send_event(ethport, ETHPORT_E_UP); +} + +static void +bna_bfi_ethport_disable_aen(struct bna_ethport *ethport, + struct bfi_msgq_mhdr *msghdr) +{ + int ethport_up = ethport_is_up(ethport); + + ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED; + + if (ethport_up) + bfa_fsm_send_event(ethport, ETHPORT_E_DOWN); +} + +static void +bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport, + struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_enable_req *admin_req = + ðport->bfi_enet_cmd.admin_req; + struct bfi_enet_rsp *rsp = + container_of(msghdr, struct bfi_enet_rsp, mh); + + switch (admin_req->enable) { + case BNA_STATUS_T_ENABLED: + if (rsp->error == BFI_ENET_CMD_OK) + bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK); + else { + ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED; + bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL); + } + break; + + case BNA_STATUS_T_DISABLED: + bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN); + ethport->link_status = BNA_LINK_DOWN; + ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN); + break; + } +} + +static void +bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport, + struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_diag_lb_req *diag_lb_req = + ðport->bfi_enet_cmd.lpbk_req; + struct bfi_enet_rsp *rsp = + container_of(msghdr, struct bfi_enet_rsp, mh); + + switch (diag_lb_req->enable) { + case BNA_STATUS_T_ENABLED: + if (rsp->error == BFI_ENET_CMD_OK) + bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK); + else { + ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP; + bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL); + } + break; + + case BNA_STATUS_T_DISABLED: + bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN); + break; + } +} + +static void +bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr) +{ + bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE); +} + +static void +bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth, + struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_attr_rsp *rsp = + container_of(msghdr, struct bfi_enet_attr_rsp, mh); + + /** + * Store only if not set earlier, since BNAD can override the HW + * attributes + */ + if (!ioceth->attr.fw_query_complete) { + ioceth->attr.num_txq = ntohl(rsp->max_cfg); + ioceth->attr.num_rxp = ntohl(rsp->max_cfg); + ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac); + ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM; + ioceth->attr.max_rit_size = ntohl(rsp->rit_size); + ioceth->attr.fw_query_complete = true; + } + + bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP); +} + +static void +bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get; + u64 *stats_src; + u64 *stats_dst; + u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask); + u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask); + int count; + int i; + + bna_stats_copy(mac, mac); + bna_stats_copy(bpc, bpc); + bna_stats_copy(rad, rad); + bna_stats_copy(rlb, rad); + bna_stats_copy(fc_rx, fc_rx); + bna_stats_copy(fc_tx, fc_tx); + + stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]); + + /* Copy Rxf stats to SW area, scatter them while copying */ + for (i = 0; i < BFI_ENET_CFG_MAX; i++) { + stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]); + memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf)); + if (rx_enet_mask & ((u32)(1 << i))) { + int k; + count = sizeof(struct bfi_enet_stats_rxf) / + sizeof(u64); + for (k = 0; k < count; k++) { + stats_dst[k] = be64_to_cpu(*stats_src); + stats_src++; + } + } + } + + /* Copy Txf stats to SW area, scatter them while copying */ + for (i = 0; i < BFI_ENET_CFG_MAX; i++) { + stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]); + memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf)); + if (tx_enet_mask & ((u32)(1 << i))) { + int k; + count = sizeof(struct bfi_enet_stats_txf) / + sizeof(u64); + for (k = 0; k < count; k++) { + stats_dst[k] = be64_to_cpu(*stats_src); + stats_src++; + } + } + } + + bna->stats_mod.stats_get_busy = false; + bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats); +} + +static void +bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport, + struct bfi_msgq_mhdr *msghdr) +{ + ethport->link_status = BNA_LINK_UP; + + /* Dispatch events */ + ethport->link_cbfn(ethport->bna->bnad, ethport->link_status); +} + +static void +bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport, + struct bfi_msgq_mhdr *msghdr) +{ + ethport->link_status = BNA_LINK_DOWN; + + /* Dispatch events */ + ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN); +} + +static void +bna_err_handler(struct bna *bna, u32 intr_status) +{ + if (BNA_IS_HALT_INTR(bna, intr_status)) + bna_halt_clear(bna); + + bfa_nw_ioc_error_isr(&bna->ioceth.ioc); +} + +void +bna_mbox_handler(struct bna *bna, u32 intr_status) +{ + if (BNA_IS_ERR_INTR(bna, intr_status)) { + bna_err_handler(bna, intr_status); + return; + } + if (BNA_IS_MBOX_INTR(bna, intr_status)) + bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc); +} + +static void +bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr) +{ + struct bna *bna = (struct bna *)arg; + struct bna_tx *tx; + struct bna_rx *rx; + + switch (msghdr->msg_id) { + case BFI_ENET_I2H_RX_CFG_SET_RSP: + bna_rx_from_rid(bna, msghdr->enet_id, rx); + if (rx) + bna_bfi_rx_enet_start_rsp(rx, msghdr); + break; + + case BFI_ENET_I2H_RX_CFG_CLR_RSP: + bna_rx_from_rid(bna, msghdr->enet_id, rx); + if (rx) + bna_bfi_rx_enet_stop_rsp(rx, msghdr); + break; + + case BFI_ENET_I2H_RIT_CFG_RSP: + case BFI_ENET_I2H_RSS_CFG_RSP: + case BFI_ENET_I2H_RSS_ENABLE_RSP: + case BFI_ENET_I2H_RX_PROMISCUOUS_RSP: + case BFI_ENET_I2H_RX_DEFAULT_RSP: + case BFI_ENET_I2H_MAC_UCAST_CLR_RSP: + case BFI_ENET_I2H_MAC_UCAST_ADD_RSP: + case BFI_ENET_I2H_MAC_UCAST_DEL_RSP: + case BFI_ENET_I2H_MAC_MCAST_DEL_RSP: + case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP: + case BFI_ENET_I2H_RX_VLAN_SET_RSP: + case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP: + bna_rx_from_rid(bna, msghdr->enet_id, rx); + if (rx) + bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr); + break; + + case BFI_ENET_I2H_MAC_UCAST_SET_RSP: + bna_rx_from_rid(bna, msghdr->enet_id, rx); + if (rx) + bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr); + break; + + case BFI_ENET_I2H_MAC_MCAST_ADD_RSP: + bna_rx_from_rid(bna, msghdr->enet_id, rx); + if (rx) + bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr); + break; + + case BFI_ENET_I2H_TX_CFG_SET_RSP: + bna_tx_from_rid(bna, msghdr->enet_id, tx); + if (tx) + bna_bfi_tx_enet_start_rsp(tx, msghdr); + break; + + case BFI_ENET_I2H_TX_CFG_CLR_RSP: + bna_tx_from_rid(bna, msghdr->enet_id, tx); + if (tx) + bna_bfi_tx_enet_stop_rsp(tx, msghdr); + break; + + case BFI_ENET_I2H_PORT_ADMIN_RSP: + bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr); + break; + + case BFI_ENET_I2H_DIAG_LOOPBACK_RSP: + bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr); + break; + + case BFI_ENET_I2H_SET_PAUSE_RSP: + bna_bfi_pause_set_rsp(&bna->enet, msghdr); + break; + + case BFI_ENET_I2H_GET_ATTR_RSP: + bna_bfi_attr_get_rsp(&bna->ioceth, msghdr); + break; + + case BFI_ENET_I2H_STATS_GET_RSP: + bna_bfi_stats_get_rsp(bna, msghdr); + break; + + case BFI_ENET_I2H_STATS_CLR_RSP: + /* No-op */ + break; + + case BFI_ENET_I2H_LINK_UP_AEN: + bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr); + break; + + case BFI_ENET_I2H_LINK_DOWN_AEN: + bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr); + break; + + case BFI_ENET_I2H_PORT_ENABLE_AEN: + bna_bfi_ethport_enable_aen(&bna->ethport, msghdr); + break; + + case BFI_ENET_I2H_PORT_DISABLE_AEN: + bna_bfi_ethport_disable_aen(&bna->ethport, msghdr); + break; + + case BFI_ENET_I2H_BW_UPDATE_AEN: + bna_bfi_bw_update_aen(&bna->tx_mod); + break; + + default: + break; + } +} + +/* ETHPORT */ + +#define call_ethport_stop_cbfn(_ethport) \ +do { \ + if ((_ethport)->stop_cbfn) { \ + void (*cbfn)(struct bna_enet *); \ + cbfn = (_ethport)->stop_cbfn; \ + (_ethport)->stop_cbfn = NULL; \ + cbfn(&(_ethport)->bna->enet); \ + } \ +} while (0) + +#define call_ethport_adminup_cbfn(ethport, status) \ +do { \ + if ((ethport)->adminup_cbfn) { \ + void (*cbfn)(struct bnad *, enum bna_cb_status); \ + cbfn = (ethport)->adminup_cbfn; \ + (ethport)->adminup_cbfn = NULL; \ + cbfn((ethport)->bna->bnad, status); \ + } \ +} while (0) + +static void +bna_bfi_ethport_admin_up(struct bna_ethport *ethport) +{ + struct bfi_enet_enable_req *admin_up_req = + ðport->bfi_enet_cmd.admin_req; + + bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0); + admin_up_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); + admin_up_req->enable = BNA_STATUS_T_ENABLED; + + bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_enable_req), &admin_up_req->mh); + bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd); +} + +static void +bna_bfi_ethport_admin_down(struct bna_ethport *ethport) +{ + struct bfi_enet_enable_req *admin_down_req = + ðport->bfi_enet_cmd.admin_req; + + bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0); + admin_down_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); + admin_down_req->enable = BNA_STATUS_T_DISABLED; + + bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_enable_req), &admin_down_req->mh); + bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd); +} + +static void +bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport) +{ + struct bfi_enet_diag_lb_req *lpbk_up_req = + ðport->bfi_enet_cmd.lpbk_req; + + bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0); + lpbk_up_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req))); + lpbk_up_req->mode = (ethport->bna->enet.type == + BNA_ENET_T_LOOPBACK_INTERNAL) ? + BFI_ENET_DIAG_LB_OPMODE_EXT : + BFI_ENET_DIAG_LB_OPMODE_CBL; + lpbk_up_req->enable = BNA_STATUS_T_ENABLED; + + bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh); + bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd); +} + +static void +bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport) +{ + struct bfi_enet_diag_lb_req *lpbk_down_req = + ðport->bfi_enet_cmd.lpbk_req; + + bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0); + lpbk_down_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req))); + lpbk_down_req->enable = BNA_STATUS_T_DISABLED; + + bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh); + bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd); +} + +static void +bna_bfi_ethport_up(struct bna_ethport *ethport) +{ + if (ethport->bna->enet.type == BNA_ENET_T_REGULAR) + bna_bfi_ethport_admin_up(ethport); + else + bna_bfi_ethport_lpbk_up(ethport); +} + +static void +bna_bfi_ethport_down(struct bna_ethport *ethport) +{ + if (ethport->bna->enet.type == BNA_ENET_T_REGULAR) + bna_bfi_ethport_admin_down(ethport); + else + bna_bfi_ethport_lpbk_down(ethport); +} + +bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport, + enum bna_ethport_event); +bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport, + enum bna_ethport_event); +bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport, + enum bna_ethport_event); +bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport, + enum bna_ethport_event); +bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport, + enum bna_ethport_event); +bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport, + enum bna_ethport_event); + +static void +bna_ethport_sm_stopped_entry(struct bna_ethport *ethport) +{ + call_ethport_stop_cbfn(ethport); +} + +static void +bna_ethport_sm_stopped(struct bna_ethport *ethport, + enum bna_ethport_event event) +{ + switch (event) { + case ETHPORT_E_START: + bfa_fsm_set_state(ethport, bna_ethport_sm_down); + break; + + case ETHPORT_E_STOP: + call_ethport_stop_cbfn(ethport); + break; + + case ETHPORT_E_FAIL: + /* No-op */ + break; + + case ETHPORT_E_DOWN: + /* This event is received due to Rx objects failing */ + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ethport_sm_down_entry(struct bna_ethport *ethport) +{ +} + +static void +bna_ethport_sm_down(struct bna_ethport *ethport, + enum bna_ethport_event event) +{ + switch (event) { + case ETHPORT_E_STOP: + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + case ETHPORT_E_FAIL: + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + case ETHPORT_E_UP: + bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait); + bna_bfi_ethport_up(ethport); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport) +{ +} + +static void +bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport, + enum bna_ethport_event event) +{ + switch (event) { + case ETHPORT_E_STOP: + bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait); + break; + + case ETHPORT_E_FAIL: + call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL); + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + case ETHPORT_E_DOWN: + call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT); + bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait); + break; + + case ETHPORT_E_FWRESP_UP_OK: + call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS); + bfa_fsm_set_state(ethport, bna_ethport_sm_up); + break; + + case ETHPORT_E_FWRESP_UP_FAIL: + call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL); + bfa_fsm_set_state(ethport, bna_ethport_sm_down); + break; + + case ETHPORT_E_FWRESP_DOWN: + /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */ + bna_bfi_ethport_up(ethport); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport) +{ + /** + * NOTE: Do not call bna_bfi_ethport_down() here. That will over step + * mbox due to up_resp_wait -> down_resp_wait transition on event + * ETHPORT_E_DOWN + */ +} + +static void +bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport, + enum bna_ethport_event event) +{ + switch (event) { + case ETHPORT_E_STOP: + bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait); + break; + + case ETHPORT_E_FAIL: + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + case ETHPORT_E_UP: + bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait); + break; + + case ETHPORT_E_FWRESP_UP_OK: + /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */ + bna_bfi_ethport_down(ethport); + break; + + case ETHPORT_E_FWRESP_UP_FAIL: + case ETHPORT_E_FWRESP_DOWN: + bfa_fsm_set_state(ethport, bna_ethport_sm_down); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ethport_sm_up_entry(struct bna_ethport *ethport) +{ +} + +static void +bna_ethport_sm_up(struct bna_ethport *ethport, + enum bna_ethport_event event) +{ + switch (event) { + case ETHPORT_E_STOP: + bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait); + bna_bfi_ethport_down(ethport); + break; + + case ETHPORT_E_FAIL: + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + case ETHPORT_E_DOWN: + bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait); + bna_bfi_ethport_down(ethport); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport) +{ +} + +static void +bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport, + enum bna_ethport_event event) +{ + switch (event) { + case ETHPORT_E_FAIL: + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + case ETHPORT_E_DOWN: + /** + * This event is received due to Rx objects stopping in + * parallel to ethport + */ + /* No-op */ + break; + + case ETHPORT_E_FWRESP_UP_OK: + /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */ + bna_bfi_ethport_down(ethport); + break; + + case ETHPORT_E_FWRESP_UP_FAIL: + case ETHPORT_E_FWRESP_DOWN: + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ethport_init(struct bna_ethport *ethport, struct bna *bna) +{ + ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED); + ethport->bna = bna; + + ethport->link_status = BNA_LINK_DOWN; + ethport->link_cbfn = bnad_cb_ethport_link_status; + + ethport->rx_started_count = 0; + + ethport->stop_cbfn = NULL; + ethport->adminup_cbfn = NULL; + + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); +} + +static void +bna_ethport_uninit(struct bna_ethport *ethport) +{ + ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP; + ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED; + + ethport->bna = NULL; +} + +static void +bna_ethport_start(struct bna_ethport *ethport) +{ + bfa_fsm_send_event(ethport, ETHPORT_E_START); +} + +static void +bna_enet_cb_ethport_stopped(struct bna_enet *enet) +{ + bfa_wc_down(&enet->chld_stop_wc); +} + +static void +bna_ethport_stop(struct bna_ethport *ethport) +{ + ethport->stop_cbfn = bna_enet_cb_ethport_stopped; + bfa_fsm_send_event(ethport, ETHPORT_E_STOP); +} + +static void +bna_ethport_fail(struct bna_ethport *ethport) +{ + /* Reset the physical port status to enabled */ + ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED; + + if (ethport->link_status != BNA_LINK_DOWN) { + ethport->link_status = BNA_LINK_DOWN; + ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN); + } + bfa_fsm_send_event(ethport, ETHPORT_E_FAIL); +} + +/* Should be called only when ethport is disabled */ +void +bna_ethport_cb_rx_started(struct bna_ethport *ethport) +{ + ethport->rx_started_count++; + + if (ethport->rx_started_count == 1) { + ethport->flags |= BNA_ETHPORT_F_RX_STARTED; + + if (ethport_can_be_up(ethport)) + bfa_fsm_send_event(ethport, ETHPORT_E_UP); + } +} + +void +bna_ethport_cb_rx_stopped(struct bna_ethport *ethport) +{ + int ethport_up = ethport_is_up(ethport); + + ethport->rx_started_count--; + + if (ethport->rx_started_count == 0) { + ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED; + + if (ethport_up) + bfa_fsm_send_event(ethport, ETHPORT_E_DOWN); + } +} + +/* ENET */ + +#define bna_enet_chld_start(enet) \ +do { \ + enum bna_tx_type tx_type = \ + ((enet)->type == BNA_ENET_T_REGULAR) ? \ + BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \ + enum bna_rx_type rx_type = \ + ((enet)->type == BNA_ENET_T_REGULAR) ? \ + BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \ + bna_ethport_start(&(enet)->bna->ethport); \ + bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \ + bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \ +} while (0) + +#define bna_enet_chld_stop(enet) \ +do { \ + enum bna_tx_type tx_type = \ + ((enet)->type == BNA_ENET_T_REGULAR) ? \ + BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \ + enum bna_rx_type rx_type = \ + ((enet)->type == BNA_ENET_T_REGULAR) ? \ + BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \ + bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\ + bfa_wc_up(&(enet)->chld_stop_wc); \ + bna_ethport_stop(&(enet)->bna->ethport); \ + bfa_wc_up(&(enet)->chld_stop_wc); \ + bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \ + bfa_wc_up(&(enet)->chld_stop_wc); \ + bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \ + bfa_wc_wait(&(enet)->chld_stop_wc); \ +} while (0) + +#define bna_enet_chld_fail(enet) \ +do { \ + bna_ethport_fail(&(enet)->bna->ethport); \ + bna_tx_mod_fail(&(enet)->bna->tx_mod); \ + bna_rx_mod_fail(&(enet)->bna->rx_mod); \ +} while (0) + +#define bna_enet_rx_start(enet) \ +do { \ + enum bna_rx_type rx_type = \ + ((enet)->type == BNA_ENET_T_REGULAR) ? \ + BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \ + bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \ +} while (0) + +#define bna_enet_rx_stop(enet) \ +do { \ + enum bna_rx_type rx_type = \ + ((enet)->type == BNA_ENET_T_REGULAR) ? \ + BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \ + bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\ + bfa_wc_up(&(enet)->chld_stop_wc); \ + bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \ + bfa_wc_wait(&(enet)->chld_stop_wc); \ +} while (0) + +#define call_enet_stop_cbfn(enet) \ +do { \ + if ((enet)->stop_cbfn) { \ + void (*cbfn)(void *); \ + void *cbarg; \ + cbfn = (enet)->stop_cbfn; \ + cbarg = (enet)->stop_cbarg; \ + (enet)->stop_cbfn = NULL; \ + (enet)->stop_cbarg = NULL; \ + cbfn(cbarg); \ + } \ +} while (0) + +#define call_enet_pause_cbfn(enet) \ +do { \ + if ((enet)->pause_cbfn) { \ + void (*cbfn)(struct bnad *); \ + cbfn = (enet)->pause_cbfn; \ + (enet)->pause_cbfn = NULL; \ + cbfn((enet)->bna->bnad); \ + } \ +} while (0) + +#define call_enet_mtu_cbfn(enet) \ +do { \ + if ((enet)->mtu_cbfn) { \ + void (*cbfn)(struct bnad *); \ + cbfn = (enet)->mtu_cbfn; \ + (enet)->mtu_cbfn = NULL; \ + cbfn((enet)->bna->bnad); \ + } \ +} while (0) + +static void bna_enet_cb_chld_stopped(void *arg); +static void bna_bfi_pause_set(struct bna_enet *enet); + +bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet, + enum bna_enet_event); +bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet, + enum bna_enet_event); +bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet, + enum bna_enet_event); +bfa_fsm_state_decl(bna_enet, started, struct bna_enet, + enum bna_enet_event); +bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet, + enum bna_enet_event); +bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet, + enum bna_enet_event); +bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet, + enum bna_enet_event); + +static void +bna_enet_sm_stopped_entry(struct bna_enet *enet) +{ + call_enet_pause_cbfn(enet); + call_enet_mtu_cbfn(enet); + call_enet_stop_cbfn(enet); +} + +static void +bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event) +{ + switch (event) { + case ENET_E_START: + bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait); + break; + + case ENET_E_STOP: + call_enet_stop_cbfn(enet); + break; + + case ENET_E_FAIL: + /* No-op */ + break; + + case ENET_E_PAUSE_CFG: + call_enet_pause_cbfn(enet); + break; + + case ENET_E_MTU_CFG: + call_enet_mtu_cbfn(enet); + break; + + case ENET_E_CHLD_STOPPED: + /** + * This event is received due to Ethport, Tx and Rx objects + * failing + */ + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet) +{ + bna_bfi_pause_set(enet); +} + +static void +bna_enet_sm_pause_init_wait(struct bna_enet *enet, + enum bna_enet_event event) +{ + switch (event) { + case ENET_E_STOP: + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait); + break; + + case ENET_E_FAIL: + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + break; + + case ENET_E_PAUSE_CFG: + enet->flags |= BNA_ENET_F_PAUSE_CHANGED; + break; + + case ENET_E_MTU_CFG: + /* No-op */ + break; + + case ENET_E_FWRESP_PAUSE: + if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) { + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + bna_bfi_pause_set(enet); + } else { + bfa_fsm_set_state(enet, bna_enet_sm_started); + bna_enet_chld_start(enet); + } + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet) +{ + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; +} + +static void +bna_enet_sm_last_resp_wait(struct bna_enet *enet, + enum bna_enet_event event) +{ + switch (event) { + case ENET_E_FAIL: + case ENET_E_FWRESP_PAUSE: + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_enet_sm_started_entry(struct bna_enet *enet) +{ + /** + * NOTE: Do not call bna_enet_chld_start() here, since it will be + * inadvertently called during cfg_wait->started transition as well + */ + call_enet_pause_cbfn(enet); + call_enet_mtu_cbfn(enet); +} + +static void +bna_enet_sm_started(struct bna_enet *enet, + enum bna_enet_event event) +{ + switch (event) { + case ENET_E_STOP: + bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait); + break; + + case ENET_E_FAIL: + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + bna_enet_chld_fail(enet); + break; + + case ENET_E_PAUSE_CFG: + bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait); + bna_bfi_pause_set(enet); + break; + + case ENET_E_MTU_CFG: + bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait); + bna_enet_rx_stop(enet); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_enet_sm_cfg_wait_entry(struct bna_enet *enet) +{ +} + +static void +bna_enet_sm_cfg_wait(struct bna_enet *enet, + enum bna_enet_event event) +{ + switch (event) { + case ENET_E_STOP: + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + enet->flags &= ~BNA_ENET_F_MTU_CHANGED; + bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait); + break; + + case ENET_E_FAIL: + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + enet->flags &= ~BNA_ENET_F_MTU_CHANGED; + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + bna_enet_chld_fail(enet); + break; + + case ENET_E_PAUSE_CFG: + enet->flags |= BNA_ENET_F_PAUSE_CHANGED; + break; + + case ENET_E_MTU_CFG: + enet->flags |= BNA_ENET_F_MTU_CHANGED; + break; + + case ENET_E_CHLD_STOPPED: + bna_enet_rx_start(enet); + /* Fall through */ + case ENET_E_FWRESP_PAUSE: + if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) { + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + bna_bfi_pause_set(enet); + } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) { + enet->flags &= ~BNA_ENET_F_MTU_CHANGED; + bna_enet_rx_stop(enet); + } else { + bfa_fsm_set_state(enet, bna_enet_sm_started); + } + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet) +{ + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + enet->flags &= ~BNA_ENET_F_MTU_CHANGED; +} + +static void +bna_enet_sm_cfg_stop_wait(struct bna_enet *enet, + enum bna_enet_event event) +{ + switch (event) { + case ENET_E_FAIL: + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + bna_enet_chld_fail(enet); + break; + + case ENET_E_FWRESP_PAUSE: + case ENET_E_CHLD_STOPPED: + bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet) +{ + bna_enet_chld_stop(enet); +} + +static void +bna_enet_sm_chld_stop_wait(struct bna_enet *enet, + enum bna_enet_event event) +{ + switch (event) { + case ENET_E_FAIL: + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + bna_enet_chld_fail(enet); + break; + + case ENET_E_CHLD_STOPPED: + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_bfi_pause_set(struct bna_enet *enet) +{ + struct bfi_enet_set_pause_req *pause_req = &enet->pause_req; + + bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0); + pause_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req))); + pause_req->tx_pause = enet->pause_config.tx_pause; + pause_req->rx_pause = enet->pause_config.rx_pause; + + bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_set_pause_req), &pause_req->mh); + bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd); +} + +static void +bna_enet_cb_chld_stopped(void *arg) +{ + struct bna_enet *enet = (struct bna_enet *)arg; + + bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED); +} + +static void +bna_enet_init(struct bna_enet *enet, struct bna *bna) +{ + enet->bna = bna; + enet->flags = 0; + enet->mtu = 0; + enet->type = BNA_ENET_T_REGULAR; + + enet->stop_cbfn = NULL; + enet->stop_cbarg = NULL; + + enet->pause_cbfn = NULL; + + enet->mtu_cbfn = NULL; + + bfa_fsm_set_state(enet, bna_enet_sm_stopped); +} + +static void +bna_enet_uninit(struct bna_enet *enet) +{ + enet->flags = 0; + + enet->bna = NULL; +} + +static void +bna_enet_start(struct bna_enet *enet) +{ + enet->flags |= BNA_ENET_F_IOCETH_READY; + if (enet->flags & BNA_ENET_F_ENABLED) + bfa_fsm_send_event(enet, ENET_E_START); +} + +static void +bna_ioceth_cb_enet_stopped(void *arg) +{ + struct bna_ioceth *ioceth = (struct bna_ioceth *)arg; + + bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED); +} + +static void +bna_enet_stop(struct bna_enet *enet) +{ + enet->stop_cbfn = bna_ioceth_cb_enet_stopped; + enet->stop_cbarg = &enet->bna->ioceth; + + enet->flags &= ~BNA_ENET_F_IOCETH_READY; + bfa_fsm_send_event(enet, ENET_E_STOP); +} + +static void +bna_enet_fail(struct bna_enet *enet) +{ + enet->flags &= ~BNA_ENET_F_IOCETH_READY; + bfa_fsm_send_event(enet, ENET_E_FAIL); +} + +void +bna_enet_cb_tx_stopped(struct bna_enet *enet) +{ + bfa_wc_down(&enet->chld_stop_wc); +} + +void +bna_enet_cb_rx_stopped(struct bna_enet *enet) +{ + bfa_wc_down(&enet->chld_stop_wc); +} + +int +bna_enet_mtu_get(struct bna_enet *enet) +{ + return enet->mtu; +} + +void +bna_enet_enable(struct bna_enet *enet) +{ + if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped) + return; + + enet->flags |= BNA_ENET_F_ENABLED; + + if (enet->flags & BNA_ENET_F_IOCETH_READY) + bfa_fsm_send_event(enet, ENET_E_START); +} + +void +bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type, + void (*cbfn)(void *)) +{ + if (type == BNA_SOFT_CLEANUP) { + (*cbfn)(enet->bna->bnad); + return; + } + + enet->stop_cbfn = cbfn; + enet->stop_cbarg = enet->bna->bnad; + + enet->flags &= ~BNA_ENET_F_ENABLED; + + bfa_fsm_send_event(enet, ENET_E_STOP); +} + +void +bna_enet_pause_config(struct bna_enet *enet, + struct bna_pause_config *pause_config, + void (*cbfn)(struct bnad *)) +{ + enet->pause_config = *pause_config; + + enet->pause_cbfn = cbfn; + + bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG); +} + +void +bna_enet_mtu_set(struct bna_enet *enet, int mtu, + void (*cbfn)(struct bnad *)) +{ + enet->mtu = mtu; + + enet->mtu_cbfn = cbfn; + + bfa_fsm_send_event(enet, ENET_E_MTU_CFG); +} + +void +bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac) +{ + *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc); +} + +/* IOCETH */ + +#define enable_mbox_intr(_ioceth) \ +do { \ + u32 intr_status; \ + bna_intr_status_get((_ioceth)->bna, intr_status); \ + bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \ + bna_mbox_intr_enable((_ioceth)->bna); \ +} while (0) + +#define disable_mbox_intr(_ioceth) \ +do { \ + bna_mbox_intr_disable((_ioceth)->bna); \ + bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \ +} while (0) + +#define call_ioceth_stop_cbfn(_ioceth) \ +do { \ + if ((_ioceth)->stop_cbfn) { \ + void (*cbfn)(struct bnad *); \ + struct bnad *cbarg; \ + cbfn = (_ioceth)->stop_cbfn; \ + cbarg = (_ioceth)->stop_cbarg; \ + (_ioceth)->stop_cbfn = NULL; \ + (_ioceth)->stop_cbarg = NULL; \ + cbfn(cbarg); \ + } \ +} while (0) + +#define bna_stats_mod_uninit(_stats_mod) \ +do { \ +} while (0) + +#define bna_stats_mod_start(_stats_mod) \ +do { \ + (_stats_mod)->ioc_ready = true; \ +} while (0) + +#define bna_stats_mod_stop(_stats_mod) \ +do { \ + (_stats_mod)->ioc_ready = false; \ +} while (0) + +#define bna_stats_mod_fail(_stats_mod) \ +do { \ + (_stats_mod)->ioc_ready = false; \ + (_stats_mod)->stats_get_busy = false; \ + (_stats_mod)->stats_clr_busy = false; \ +} while (0) + +static void bna_bfi_attr_get(struct bna_ioceth *ioceth); + +bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth, + enum bna_ioceth_event); + +static void +bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth) +{ + call_ioceth_stop_cbfn(ioceth); +} + +static void +bna_ioceth_sm_stopped(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_ENABLE: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait); + bfa_nw_ioc_enable(&ioceth->ioc); + break; + + case IOCETH_E_DISABLE: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped); + break; + + case IOCETH_E_IOC_RESET: + enable_mbox_intr(ioceth); + break; + + case IOCETH_E_IOC_FAILED: + disable_mbox_intr(ioceth); + bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth) +{ + /** + * Do not call bfa_nw_ioc_enable() here. It must be called in the + * previous state due to failed -> ioc_ready_wait transition. + */ +} + +static void +bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_DISABLE: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); + bfa_nw_ioc_disable(&ioceth->ioc); + break; + + case IOCETH_E_IOC_RESET: + enable_mbox_intr(ioceth); + break; + + case IOCETH_E_IOC_FAILED: + disable_mbox_intr(ioceth); + bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed); + break; + + case IOCETH_E_IOC_READY: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth) +{ + bna_bfi_attr_get(ioceth); +} + +static void +bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_DISABLE: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait); + break; + + case IOCETH_E_IOC_FAILED: + disable_mbox_intr(ioceth); + bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed); + break; + + case IOCETH_E_ENET_ATTR_RESP: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth) +{ + bna_enet_start(&ioceth->bna->enet); + bna_stats_mod_start(&ioceth->bna->stats_mod); + bnad_cb_ioceth_ready(ioceth->bna->bnad); +} + +static void +bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_DISABLE: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait); + break; + + case IOCETH_E_IOC_FAILED: + disable_mbox_intr(ioceth); + bna_enet_fail(&ioceth->bna->enet); + bna_stats_mod_fail(&ioceth->bna->stats_mod); + bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth) +{ +} + +static void +bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_IOC_FAILED: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); + disable_mbox_intr(ioceth); + bfa_nw_ioc_disable(&ioceth->ioc); + break; + + case IOCETH_E_ENET_ATTR_RESP: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); + bfa_nw_ioc_disable(&ioceth->ioc); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth) +{ + bna_stats_mod_stop(&ioceth->bna->stats_mod); + bna_enet_stop(&ioceth->bna->enet); +} + +static void +bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_IOC_FAILED: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); + disable_mbox_intr(ioceth); + bna_enet_fail(&ioceth->bna->enet); + bna_stats_mod_fail(&ioceth->bna->stats_mod); + bfa_nw_ioc_disable(&ioceth->ioc); + break; + + case IOCETH_E_ENET_STOPPED: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); + bfa_nw_ioc_disable(&ioceth->ioc); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth) +{ +} + +static void +bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_IOC_DISABLED: + disable_mbox_intr(ioceth); + bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped); + break; + + case IOCETH_E_ENET_STOPPED: + /* This event is received due to enet failing */ + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth) +{ + bnad_cb_ioceth_failed(ioceth->bna->bnad); +} + +static void +bna_ioceth_sm_failed(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_DISABLE: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); + bfa_nw_ioc_disable(&ioceth->ioc); + break; + + case IOCETH_E_IOC_RESET: + enable_mbox_intr(ioceth); + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait); + break; + + case IOCETH_E_IOC_FAILED: + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_bfi_attr_get(struct bna_ioceth *ioceth) +{ + struct bfi_enet_attr_req *attr_req = &ioceth->attr_req; + + bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_GET_ATTR_REQ, 0, 0); + attr_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req))); + bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_attr_req), &attr_req->mh); + bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd); +} + +/* IOC callback functions */ + +static void +bna_cb_ioceth_enable(void *arg, enum bfa_status error) +{ + struct bna_ioceth *ioceth = (struct bna_ioceth *)arg; + + if (error) + bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED); + else + bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY); +} + +static void +bna_cb_ioceth_disable(void *arg) +{ + struct bna_ioceth *ioceth = (struct bna_ioceth *)arg; + + bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED); +} + +static void +bna_cb_ioceth_hbfail(void *arg) +{ + struct bna_ioceth *ioceth = (struct bna_ioceth *)arg; + + bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED); +} + +static void +bna_cb_ioceth_reset(void *arg) +{ + struct bna_ioceth *ioceth = (struct bna_ioceth *)arg; + + bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET); +} + +static struct bfa_ioc_cbfn bna_ioceth_cbfn = { + bna_cb_ioceth_enable, + bna_cb_ioceth_disable, + bna_cb_ioceth_hbfail, + bna_cb_ioceth_reset +}; + +static void bna_attr_init(struct bna_ioceth *ioceth) +{ + ioceth->attr.num_txq = BFI_ENET_DEF_TXQ; + ioceth->attr.num_rxp = BFI_ENET_DEF_RXP; + ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM; + ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM; + ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ; + ioceth->attr.fw_query_complete = false; +} + +static void +bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna, + struct bna_res_info *res_info) +{ + u64 dma; + u8 *kva; + + ioceth->bna = bna; + + /** + * Attach IOC and claim: + * 1. DMA memory for IOC attributes + * 2. Kernel memory for FW trace + */ + bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn); + bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH); + + BNA_GET_DMA_ADDR( + &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma); + kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva; + bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma); + + kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva; + bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva); + + /** + * Attach common modules (Diag, SFP, CEE, Port) and claim respective + * DMA memory. + */ + BNA_GET_DMA_ADDR( + &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma); + kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva; + bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna); + bfa_nw_cee_mem_claim(&bna->cee, kva, dma); + kva += bfa_nw_cee_meminfo(); + dma += bfa_nw_cee_meminfo(); + + bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna); + bfa_nw_flash_memclaim(&bna->flash, kva, dma); + kva += bfa_nw_flash_meminfo(); + dma += bfa_nw_flash_meminfo(); + + bfa_msgq_attach(&bna->msgq, &ioceth->ioc); + bfa_msgq_memclaim(&bna->msgq, kva, dma); + bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna); + kva += bfa_msgq_meminfo(); + dma += bfa_msgq_meminfo(); + + ioceth->stop_cbfn = NULL; + ioceth->stop_cbarg = NULL; + + bna_attr_init(ioceth); + + bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped); +} + +static void +bna_ioceth_uninit(struct bna_ioceth *ioceth) +{ + bfa_nw_ioc_detach(&ioceth->ioc); + + ioceth->bna = NULL; +} + +void +bna_ioceth_enable(struct bna_ioceth *ioceth) +{ + if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) { + bnad_cb_ioceth_ready(ioceth->bna->bnad); + return; + } + + if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped) + bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE); +} + +void +bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type) +{ + if (type == BNA_SOFT_CLEANUP) { + bnad_cb_ioceth_disabled(ioceth->bna->bnad); + return; + } + + ioceth->stop_cbfn = bnad_cb_ioceth_disabled; + ioceth->stop_cbarg = ioceth->bna->bnad; + + bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE); +} + +static void +bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna, + struct bna_res_info *res_info) +{ + int i; + + ucam_mod->ucmac = (struct bna_mac *) + res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva; + + INIT_LIST_HEAD(&ucam_mod->free_q); + for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) { + bfa_q_qe_init(&ucam_mod->ucmac[i].qe); + list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q); + } + + /* A separate queue to allow synchronous setting of a list of MACs */ + INIT_LIST_HEAD(&ucam_mod->del_q); + for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) { + bfa_q_qe_init(&ucam_mod->ucmac[i].qe); + list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q); + } + + ucam_mod->bna = bna; +} + +static void +bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod) +{ + struct list_head *qe; + int i; + + i = 0; + list_for_each(qe, &ucam_mod->free_q) + i++; + + i = 0; + list_for_each(qe, &ucam_mod->del_q) + i++; + + ucam_mod->bna = NULL; +} + +static void +bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna, + struct bna_res_info *res_info) +{ + int i; + + mcam_mod->mcmac = (struct bna_mac *) + res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva; + + INIT_LIST_HEAD(&mcam_mod->free_q); + for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) { + bfa_q_qe_init(&mcam_mod->mcmac[i].qe); + list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q); + } + + mcam_mod->mchandle = (struct bna_mcam_handle *) + res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva; + + INIT_LIST_HEAD(&mcam_mod->free_handle_q); + for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) { + bfa_q_qe_init(&mcam_mod->mchandle[i].qe); + list_add_tail(&mcam_mod->mchandle[i].qe, + &mcam_mod->free_handle_q); + } + + /* A separate queue to allow synchronous setting of a list of MACs */ + INIT_LIST_HEAD(&mcam_mod->del_q); + for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) { + bfa_q_qe_init(&mcam_mod->mcmac[i].qe); + list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q); + } + + mcam_mod->bna = bna; +} + +static void +bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod) +{ + struct list_head *qe; + int i; + + i = 0; + list_for_each(qe, &mcam_mod->free_q) i++; + + i = 0; + list_for_each(qe, &mcam_mod->del_q) i++; + + i = 0; + list_for_each(qe, &mcam_mod->free_handle_q) i++; + + mcam_mod->bna = NULL; +} + +static void +bna_bfi_stats_get(struct bna *bna) +{ + struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get; + + bna->stats_mod.stats_get_busy = true; + + bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_STATS_GET_REQ, 0, 0); + stats_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req))); + stats_req->stats_mask = htons(BFI_ENET_STATS_ALL); + stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask); + stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask); + stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb; + stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb; + + bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL, + sizeof(struct bfi_enet_stats_req), &stats_req->mh); + bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd); +} + +void +bna_res_req(struct bna_res_info *res_info) +{ + /* DMA memory for COMMON_MODULE */ + res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM; + res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA; + res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1; + res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN( + (bfa_nw_cee_meminfo() + + bfa_nw_flash_meminfo() + + bfa_msgq_meminfo()), PAGE_SIZE); + + /* DMA memory for retrieving IOC attributes */ + res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM; + res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA; + res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1; + res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len = + ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE); + + /* Virtual memory for retreiving fw_trc */ + res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM; + res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA; + res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1; + res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN; + + /* DMA memory for retreiving stats */ + res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM; + res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA; + res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1; + res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len = + ALIGN(sizeof(struct bfi_enet_stats), + PAGE_SIZE); +} + +void +bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info) +{ + struct bna_attr *attr = &bna->ioceth.attr; + + /* Virtual memory for Tx objects - stored by Tx module */ + res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len = + attr->num_txq * sizeof(struct bna_tx); + + /* Virtual memory for TxQ - stored by Tx module */ + res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len = + attr->num_txq * sizeof(struct bna_txq); + + /* Virtual memory for Rx objects - stored by Rx module */ + res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len = + attr->num_rxp * sizeof(struct bna_rx); + + /* Virtual memory for RxPath - stored by Rx module */ + res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len = + attr->num_rxp * sizeof(struct bna_rxp); + + /* Virtual memory for RxQ - stored by Rx module */ + res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len = + (attr->num_rxp * 2) * sizeof(struct bna_rxq); + + /* Virtual memory for Unicast MAC address - stored by ucam module */ + res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len = + (attr->num_ucmac * 2) * sizeof(struct bna_mac); + + /* Virtual memory for Multicast MAC address - stored by mcam module */ + res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len = + (attr->num_mcmac * 2) * sizeof(struct bna_mac); + + /* Virtual memory for Multicast handle - stored by mcam module */ + res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len = + attr->num_mcmac * sizeof(struct bna_mcam_handle); +} + +void +bna_init(struct bna *bna, struct bnad *bnad, + struct bfa_pcidev *pcidev, struct bna_res_info *res_info) +{ + bna->bnad = bnad; + bna->pcidev = *pcidev; + + bna->stats.hw_stats_kva = (struct bfi_enet_stats *) + res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva; + bna->stats.hw_stats_dma.msb = + res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb; + bna->stats.hw_stats_dma.lsb = + res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb; + + bna_reg_addr_init(bna, &bna->pcidev); + + /* Also initializes diag, cee, sfp, phy_port, msgq */ + bna_ioceth_init(&bna->ioceth, bna, res_info); + + bna_enet_init(&bna->enet, bna); + bna_ethport_init(&bna->ethport, bna); +} + +void +bna_mod_init(struct bna *bna, struct bna_res_info *res_info) +{ + bna_tx_mod_init(&bna->tx_mod, bna, res_info); + + bna_rx_mod_init(&bna->rx_mod, bna, res_info); + + bna_ucam_mod_init(&bna->ucam_mod, bna, res_info); + + bna_mcam_mod_init(&bna->mcam_mod, bna, res_info); + + bna->default_mode_rid = BFI_INVALID_RID; + bna->promisc_rid = BFI_INVALID_RID; + + bna->mod_flags |= BNA_MOD_F_INIT_DONE; +} + +void +bna_uninit(struct bna *bna) +{ + if (bna->mod_flags & BNA_MOD_F_INIT_DONE) { + bna_mcam_mod_uninit(&bna->mcam_mod); + bna_ucam_mod_uninit(&bna->ucam_mod); + bna_rx_mod_uninit(&bna->rx_mod); + bna_tx_mod_uninit(&bna->tx_mod); + bna->mod_flags &= ~BNA_MOD_F_INIT_DONE; + } + + bna_stats_mod_uninit(&bna->stats_mod); + bna_ethport_uninit(&bna->ethport); + bna_enet_uninit(&bna->enet); + + bna_ioceth_uninit(&bna->ioceth); + + bna->bnad = NULL; +} + +int +bna_num_txq_set(struct bna *bna, int num_txq) +{ + if (bna->ioceth.attr.fw_query_complete && + (num_txq <= bna->ioceth.attr.num_txq)) { + bna->ioceth.attr.num_txq = num_txq; + return BNA_CB_SUCCESS; + } + + return BNA_CB_FAIL; +} + +int +bna_num_rxp_set(struct bna *bna, int num_rxp) +{ + if (bna->ioceth.attr.fw_query_complete && + (num_rxp <= bna->ioceth.attr.num_rxp)) { + bna->ioceth.attr.num_rxp = num_rxp; + return BNA_CB_SUCCESS; + } + + return BNA_CB_FAIL; +} + +struct bna_mac * +bna_cam_mod_mac_get(struct list_head *head) +{ + struct list_head *qe; + + if (list_empty(head)) + return NULL; + + bfa_q_deq(head, &qe); + return (struct bna_mac *)qe; +} + +void +bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac) +{ + list_add_tail(&mac->qe, tail); +} + +struct bna_mcam_handle * +bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod) +{ + struct list_head *qe; + + if (list_empty(&mcam_mod->free_handle_q)) + return NULL; + + bfa_q_deq(&mcam_mod->free_handle_q, &qe); + + return (struct bna_mcam_handle *)qe; +} + +void +bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod, + struct bna_mcam_handle *handle) +{ + list_add_tail(&handle->qe, &mcam_mod->free_handle_q); +} + +void +bna_hw_stats_get(struct bna *bna) +{ + if (!bna->stats_mod.ioc_ready) { + bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats); + return; + } + if (bna->stats_mod.stats_get_busy) { + bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats); + return; + } + + bna_bfi_stats_get(bna); +} diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h new file mode 100644 index 000000000..174af0e9d --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h @@ -0,0 +1,411 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +/* File for interrupt macros and functions */ + +#ifndef __BNA_HW_DEFS_H__ +#define __BNA_HW_DEFS_H__ + +#include "bfi_reg.h" + +/* SW imposed limits */ + +#define BFI_ENET_DEF_TXQ 1 +#define BFI_ENET_DEF_RXP 1 +#define BFI_ENET_DEF_UCAM 1 +#define BFI_ENET_DEF_RITSZ 1 + +#define BFI_ENET_MAX_MCAM 256 + +#define BFI_INVALID_RID -1 + +#define BFI_IBIDX_SIZE 4 + +#define BFI_VLAN_WORD_SHIFT 5 /* 32 bits */ +#define BFI_VLAN_WORD_MASK 0x1F +#define BFI_VLAN_BLOCK_SHIFT 9 /* 512 bits */ +#define BFI_VLAN_BMASK_ALL 0xFF + +#define BFI_COALESCING_TIMER_UNIT 5 /* 5us */ +#define BFI_MAX_COALESCING_TIMEO 0xFF /* in 5us units */ +#define BFI_MAX_INTERPKT_COUNT 0xFF +#define BFI_MAX_INTERPKT_TIMEO 0xF /* in 0.5us units */ +#define BFI_TX_COALESCING_TIMEO 20 /* 20 * 5 = 100us */ +#define BFI_TX_INTERPKT_COUNT 12 /* Pkt Cnt = 12 */ +#define BFI_TX_INTERPKT_TIMEO 15 /* 15 * 0.5 = 7.5us */ +#define BFI_RX_COALESCING_TIMEO 12 /* 12 * 5 = 60us */ +#define BFI_RX_INTERPKT_COUNT 6 /* Pkt Cnt = 6 */ +#define BFI_RX_INTERPKT_TIMEO 3 /* 3 * 0.5 = 1.5us */ + +#define BFI_TXQ_WI_SIZE 64 /* bytes */ +#define BFI_RXQ_WI_SIZE 8 /* bytes */ +#define BFI_CQ_WI_SIZE 16 /* bytes */ +#define BFI_TX_MAX_WRR_QUOTA 0xFFF + +#define BFI_TX_MAX_VECTORS_PER_WI 4 +#define BFI_TX_MAX_VECTORS_PER_PKT 0xFF +#define BFI_TX_MAX_DATA_PER_VECTOR 0xFFFF +#define BFI_TX_MAX_DATA_PER_PKT 0xFFFFFF + +/* Small Q buffer size */ +#define BFI_SMALL_RXBUF_SIZE 128 + +#define BFI_TX_MAX_PRIO 8 +#define BFI_TX_PRIO_MAP_ALL 0xFF + +/* + * + * Register definitions and macros + * + */ + +#define BNA_PCI_REG_CT_ADDRSZ (0x40000) + +#define ct_reg_addr_init(_bna, _pcidev) \ +{ \ + struct bna_reg_offset reg_offset[] = \ + {{HOSTFN0_INT_STATUS, HOSTFN0_INT_MSK}, \ + {HOSTFN1_INT_STATUS, HOSTFN1_INT_MSK}, \ + {HOSTFN2_INT_STATUS, HOSTFN2_INT_MSK}, \ + {HOSTFN3_INT_STATUS, HOSTFN3_INT_MSK} }; \ + \ + (_bna)->regs.fn_int_status = (_pcidev)->pci_bar_kva + \ + reg_offset[(_pcidev)->pci_func].fn_int_status;\ + (_bna)->regs.fn_int_mask = (_pcidev)->pci_bar_kva + \ + reg_offset[(_pcidev)->pci_func].fn_int_mask;\ +} + +#define ct_bit_defn_init(_bna, _pcidev) \ +{ \ + (_bna)->bits.mbox_status_bits = (__HFN_INT_MBOX_LPU0 | \ + __HFN_INT_MBOX_LPU1); \ + (_bna)->bits.mbox_mask_bits = (__HFN_INT_MBOX_LPU0 | \ + __HFN_INT_MBOX_LPU1); \ + (_bna)->bits.error_status_bits = (__HFN_INT_ERR_MASK); \ + (_bna)->bits.error_mask_bits = (__HFN_INT_ERR_MASK); \ + (_bna)->bits.halt_status_bits = __HFN_INT_LL_HALT; \ + (_bna)->bits.halt_mask_bits = __HFN_INT_LL_HALT; \ +} + +#define ct2_reg_addr_init(_bna, _pcidev) \ +{ \ + (_bna)->regs.fn_int_status = (_pcidev)->pci_bar_kva + \ + CT2_HOSTFN_INT_STATUS; \ + (_bna)->regs.fn_int_mask = (_pcidev)->pci_bar_kva + \ + CT2_HOSTFN_INTR_MASK; \ +} + +#define ct2_bit_defn_init(_bna, _pcidev) \ +{ \ + (_bna)->bits.mbox_status_bits = (__HFN_INT_MBOX_LPU0_CT2 | \ + __HFN_INT_MBOX_LPU1_CT2); \ + (_bna)->bits.mbox_mask_bits = (__HFN_INT_MBOX_LPU0_CT2 | \ + __HFN_INT_MBOX_LPU1_CT2); \ + (_bna)->bits.error_status_bits = (__HFN_INT_ERR_MASK_CT2); \ + (_bna)->bits.error_mask_bits = (__HFN_INT_ERR_MASK_CT2); \ + (_bna)->bits.halt_status_bits = __HFN_INT_CPQ_HALT_CT2; \ + (_bna)->bits.halt_mask_bits = __HFN_INT_CPQ_HALT_CT2; \ +} + +#define bna_reg_addr_init(_bna, _pcidev) \ +{ \ + switch ((_pcidev)->device_id) { \ + case PCI_DEVICE_ID_BROCADE_CT: \ + ct_reg_addr_init((_bna), (_pcidev)); \ + ct_bit_defn_init((_bna), (_pcidev)); \ + break; \ + case BFA_PCI_DEVICE_ID_CT2: \ + ct2_reg_addr_init((_bna), (_pcidev)); \ + ct2_bit_defn_init((_bna), (_pcidev)); \ + break; \ + } \ +} + +#define bna_port_id_get(_bna) ((_bna)->ioceth.ioc.port_id) + +/* Interrupt related bits, flags and macros */ + +#define IB_STATUS_BITS 0x0000ffff + +#define BNA_IS_MBOX_INTR(_bna, _intr_status) \ + ((_intr_status) & (_bna)->bits.mbox_status_bits) + +#define BNA_IS_HALT_INTR(_bna, _intr_status) \ + ((_intr_status) & (_bna)->bits.halt_status_bits) + +#define BNA_IS_ERR_INTR(_bna, _intr_status) \ + ((_intr_status) & (_bna)->bits.error_status_bits) + +#define BNA_IS_MBOX_ERR_INTR(_bna, _intr_status) \ + (BNA_IS_MBOX_INTR(_bna, _intr_status) | \ + BNA_IS_ERR_INTR(_bna, _intr_status)) + +#define BNA_IS_INTX_DATA_INTR(_intr_status) \ + ((_intr_status) & IB_STATUS_BITS) + +#define bna_halt_clear(_bna) \ +do { \ + u32 init_halt; \ + init_halt = readl((_bna)->ioceth.ioc.ioc_regs.ll_halt); \ + init_halt &= ~__FW_INIT_HALT_P; \ + writel(init_halt, (_bna)->ioceth.ioc.ioc_regs.ll_halt); \ + init_halt = readl((_bna)->ioceth.ioc.ioc_regs.ll_halt); \ +} while (0) + +#define bna_intx_disable(_bna, _cur_mask) \ +{ \ + (_cur_mask) = readl((_bna)->regs.fn_int_mask); \ + writel(0xffffffff, (_bna)->regs.fn_int_mask); \ +} + +#define bna_intx_enable(bna, new_mask) \ + writel((new_mask), (bna)->regs.fn_int_mask) +#define bna_mbox_intr_disable(bna) \ +do { \ + u32 mask; \ + mask = readl((bna)->regs.fn_int_mask); \ + writel((mask | (bna)->bits.mbox_mask_bits | \ + (bna)->bits.error_mask_bits), (bna)->regs.fn_int_mask); \ + mask = readl((bna)->regs.fn_int_mask); \ +} while (0) + +#define bna_mbox_intr_enable(bna) \ +do { \ + u32 mask; \ + mask = readl((bna)->regs.fn_int_mask); \ + writel((mask & ~((bna)->bits.mbox_mask_bits | \ + (bna)->bits.error_mask_bits)), (bna)->regs.fn_int_mask);\ + mask = readl((bna)->regs.fn_int_mask); \ +} while (0) + +#define bna_intr_status_get(_bna, _status) \ +{ \ + (_status) = readl((_bna)->regs.fn_int_status); \ + if (_status) { \ + writel(((_status) & ~(_bna)->bits.mbox_status_bits), \ + (_bna)->regs.fn_int_status); \ + } \ +} + +/* + * MAX ACK EVENTS : No. of acks that can be accumulated in driver, + * before acking to h/w. The no. of bits is 16 in the doorbell register, + * however we keep this limited to 15 bits. + * This is because around the edge of 64K boundary (16 bits), one + * single poll can make the accumulated ACK counter cross the 64K boundary, + * causing problems, when we try to ack with a value greater than 64K. + * 15 bits (32K) should be large enough to accumulate, anyways, and the max. + * acked events to h/w can be (32K + max poll weight) (currently 64). + */ +#define BNA_IB_MAX_ACK_EVENTS (1 << 15) + +/* These macros build the data portion of the TxQ/RxQ doorbell */ +#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi)) +#define BNA_DOORBELL_Q_STOP (0x40000000) + +/* These macros build the data portion of the IB doorbell */ +#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \ + (0x80000000 | ((_timeout) << 16) | (_events)) +#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000) + +/* Set the coalescing timer for the given ib */ +#define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer) \ + ((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0)); + +/* Acks 'events' # of events for a given ib while disabling interrupts */ +#define bna_ib_ack_disable_irq(_i_dbell, _events) \ + (writel(BNA_DOORBELL_IB_INT_ACK(0, (_events)), \ + (_i_dbell)->doorbell_addr)); + +/* Acks 'events' # of events for a given ib */ +#define bna_ib_ack(_i_dbell, _events) \ + (writel(((_i_dbell)->doorbell_ack | (_events)), \ + (_i_dbell)->doorbell_addr)); + +#define bna_ib_start(_bna, _ib, _is_regular) \ +{ \ + u32 intx_mask; \ + struct bna_ib *ib = _ib; \ + if ((ib->intr_type == BNA_INTR_T_INTX)) { \ + bna_intx_disable((_bna), intx_mask); \ + intx_mask &= ~(ib->intr_vector); \ + bna_intx_enable((_bna), intx_mask); \ + } \ + bna_ib_coalescing_timer_set(&ib->door_bell, \ + ib->coalescing_timeo); \ + if (_is_regular) \ + bna_ib_ack(&ib->door_bell, 0); \ +} + +#define bna_ib_stop(_bna, _ib) \ +{ \ + u32 intx_mask; \ + struct bna_ib *ib = _ib; \ + writel(BNA_DOORBELL_IB_INT_DISABLE, \ + ib->door_bell.doorbell_addr); \ + if (ib->intr_type == BNA_INTR_T_INTX) { \ + bna_intx_disable((_bna), intx_mask); \ + intx_mask |= ib->intr_vector; \ + bna_intx_enable((_bna), intx_mask); \ + } \ +} + +#define bna_txq_prod_indx_doorbell(_tcb) \ + (writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \ + (_tcb)->q_dbell)); + +#define bna_rxq_prod_indx_doorbell(_rcb) \ + (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \ + (_rcb)->q_dbell)); + +/* TxQ, RxQ, CQ related bits, offsets, macros */ + +/* TxQ Entry Opcodes */ +#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */ +#define BNA_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */ +#define BNA_TXQ_WI_EXTENSION (0x104) /* Extension WI */ + +/* TxQ Entry Control Flags */ +#define BNA_TXQ_WI_CF_FCOE_CRC (1 << 8) +#define BNA_TXQ_WI_CF_IPID_MODE (1 << 5) +#define BNA_TXQ_WI_CF_INS_PRIO (1 << 4) +#define BNA_TXQ_WI_CF_INS_VLAN (1 << 3) +#define BNA_TXQ_WI_CF_UDP_CKSUM (1 << 2) +#define BNA_TXQ_WI_CF_TCP_CKSUM (1 << 1) +#define BNA_TXQ_WI_CF_IP_CKSUM (1 << 0) + +#define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \ + (((_hdr_size) << 10) | ((_offset) & 0x3FF)) + +/* + * Completion Q defines + */ +/* CQ Entry Flags */ +#define BNA_CQ_EF_MAC_ERROR (1 << 0) +#define BNA_CQ_EF_FCS_ERROR (1 << 1) +#define BNA_CQ_EF_TOO_LONG (1 << 2) +#define BNA_CQ_EF_FC_CRC_OK (1 << 3) + +#define BNA_CQ_EF_RSVD1 (1 << 4) +#define BNA_CQ_EF_L4_CKSUM_OK (1 << 5) +#define BNA_CQ_EF_L3_CKSUM_OK (1 << 6) +#define BNA_CQ_EF_HDS_HEADER (1 << 7) + +#define BNA_CQ_EF_UDP (1 << 8) +#define BNA_CQ_EF_TCP (1 << 9) +#define BNA_CQ_EF_IP_OPTIONS (1 << 10) +#define BNA_CQ_EF_IPV6 (1 << 11) + +#define BNA_CQ_EF_IPV4 (1 << 12) +#define BNA_CQ_EF_VLAN (1 << 13) +#define BNA_CQ_EF_RSS (1 << 14) +#define BNA_CQ_EF_RSVD2 (1 << 15) + +#define BNA_CQ_EF_MCAST_MATCH (1 << 16) +#define BNA_CQ_EF_MCAST (1 << 17) +#define BNA_CQ_EF_BCAST (1 << 18) +#define BNA_CQ_EF_REMOTE (1 << 19) + +#define BNA_CQ_EF_LOCAL (1 << 20) +/* CAT2 ASIC does not use bit 21 as per the SPEC. + * Bit 31 is set in every end of frame completion + */ +#define BNA_CQ_EF_EOP (1 << 31) + +/* Data structures */ + +struct bna_reg_offset { + u32 fn_int_status; + u32 fn_int_mask; +}; + +struct bna_bit_defn { + u32 mbox_status_bits; + u32 mbox_mask_bits; + u32 error_status_bits; + u32 error_mask_bits; + u32 halt_status_bits; + u32 halt_mask_bits; +}; + +struct bna_reg { + void __iomem *fn_int_status; + void __iomem *fn_int_mask; +}; + +/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */ +struct bna_dma_addr { + u32 msb; + u32 lsb; +}; + +struct bna_txq_wi_vector { + u16 reserved; + u16 length; /* Only 14 LSB are valid */ + struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */ +}; + +/* TxQ Entry Structure + * + * BEWARE: Load values into this structure with correct endianness. + */ +struct bna_txq_entry { + union { + struct { + u8 reserved; + u8 num_vectors; /* number of vectors present */ + u16 opcode; /* Either */ + /* BNA_TXQ_WI_SEND or */ + /* BNA_TXQ_WI_SEND_LSO */ + u16 flags; /* OR of all the flags */ + u16 l4_hdr_size_n_offset; + u16 vlan_tag; + u16 lso_mss; /* Only 14 LSB are valid */ + u32 frame_length; /* Only 24 LSB are valid */ + } wi; + + struct { + u16 reserved; + u16 opcode; /* Must be */ + /* BNA_TXQ_WI_EXTENSION */ + u32 reserved2[3]; /* Place holder for */ + /* removed vector (12 bytes) */ + } wi_ext; + } hdr; + struct bna_txq_wi_vector vector[4]; +}; + +/* RxQ Entry Structure */ +struct bna_rxq_entry { /* Rx-Buffer */ + struct bna_dma_addr host_addr; /* Rx-Buffer DMA address */ +}; + +/* CQ Entry Structure */ +struct bna_cq_entry { + u32 flags; + u16 vlan_tag; + u16 length; + u32 rss_hash; + u8 valid; + u8 reserved1; + u8 reserved2; + u8 rxq_id; +}; + +#endif /* __BNA_HW_DEFS_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c new file mode 100644 index 000000000..8ab3a5f62 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -0,0 +1,4018 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#include "bna.h" +#include "bfi.h" + +/* IB */ +static void +bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo) +{ + ib->coalescing_timeo = coalescing_timeo; + ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK( + (u32)ib->coalescing_timeo, 0); +} + +/* RXF */ + +#define bna_rxf_vlan_cfg_soft_reset(rxf) \ +do { \ + (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \ + (rxf)->vlan_strip_pending = true; \ +} while (0) + +#define bna_rxf_rss_cfg_soft_reset(rxf) \ +do { \ + if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \ + (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \ + BNA_RSS_F_CFG_PENDING | \ + BNA_RSS_F_STATUS_PENDING); \ +} while (0) + +static int bna_rxf_cfg_apply(struct bna_rxf *rxf); +static void bna_rxf_cfg_reset(struct bna_rxf *rxf); +static int bna_rxf_fltr_clear(struct bna_rxf *rxf); +static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf); +static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf); +static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf); +static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf); +static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, + enum bna_cleanup_type cleanup); +static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, + enum bna_cleanup_type cleanup); +static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, + enum bna_cleanup_type cleanup); + +bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf, + enum bna_rxf_event); +bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf, + enum bna_rxf_event); +bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf, + enum bna_rxf_event); +bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf, + enum bna_rxf_event); +bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf, + enum bna_rxf_event); +bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf, + enum bna_rxf_event); + +static void +bna_rxf_sm_stopped_entry(struct bna_rxf *rxf) +{ + call_rxf_stop_cbfn(rxf); +} + +static void +bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event) +{ + switch (event) { + case RXF_E_START: + if (rxf->flags & BNA_RXF_F_PAUSED) { + bfa_fsm_set_state(rxf, bna_rxf_sm_paused); + call_rxf_start_cbfn(rxf); + } else + bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); + break; + + case RXF_E_STOP: + call_rxf_stop_cbfn(rxf); + break; + + case RXF_E_FAIL: + /* No-op */ + break; + + case RXF_E_CONFIG: + call_rxf_cam_fltr_cbfn(rxf); + break; + + case RXF_E_PAUSE: + rxf->flags |= BNA_RXF_F_PAUSED; + call_rxf_pause_cbfn(rxf); + break; + + case RXF_E_RESUME: + rxf->flags &= ~BNA_RXF_F_PAUSED; + call_rxf_resume_cbfn(rxf); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_rxf_sm_paused_entry(struct bna_rxf *rxf) +{ + call_rxf_pause_cbfn(rxf); +} + +static void +bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event) +{ + switch (event) { + case RXF_E_STOP: + case RXF_E_FAIL: + bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); + break; + + case RXF_E_CONFIG: + call_rxf_cam_fltr_cbfn(rxf); + break; + + case RXF_E_RESUME: + rxf->flags &= ~BNA_RXF_F_PAUSED; + bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf) +{ + if (!bna_rxf_cfg_apply(rxf)) { + /* No more pending config updates */ + bfa_fsm_set_state(rxf, bna_rxf_sm_started); + } +} + +static void +bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event) +{ + switch (event) { + case RXF_E_STOP: + bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait); + break; + + case RXF_E_FAIL: + bna_rxf_cfg_reset(rxf); + call_rxf_start_cbfn(rxf); + call_rxf_cam_fltr_cbfn(rxf); + call_rxf_resume_cbfn(rxf); + bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); + break; + + case RXF_E_CONFIG: + /* No-op */ + break; + + case RXF_E_PAUSE: + rxf->flags |= BNA_RXF_F_PAUSED; + call_rxf_start_cbfn(rxf); + bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait); + break; + + case RXF_E_FW_RESP: + if (!bna_rxf_cfg_apply(rxf)) { + /* No more pending config updates */ + bfa_fsm_set_state(rxf, bna_rxf_sm_started); + } + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_rxf_sm_started_entry(struct bna_rxf *rxf) +{ + call_rxf_start_cbfn(rxf); + call_rxf_cam_fltr_cbfn(rxf); + call_rxf_resume_cbfn(rxf); +} + +static void +bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event) +{ + switch (event) { + case RXF_E_STOP: + case RXF_E_FAIL: + bna_rxf_cfg_reset(rxf); + bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); + break; + + case RXF_E_CONFIG: + bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); + break; + + case RXF_E_PAUSE: + rxf->flags |= BNA_RXF_F_PAUSED; + if (!bna_rxf_fltr_clear(rxf)) + bfa_fsm_set_state(rxf, bna_rxf_sm_paused); + else + bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf) +{ +} + +static void +bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event) +{ + switch (event) { + case RXF_E_FAIL: + bna_rxf_cfg_reset(rxf); + call_rxf_pause_cbfn(rxf); + bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); + break; + + case RXF_E_FW_RESP: + if (!bna_rxf_fltr_clear(rxf)) { + /* No more pending CAM entries to clear */ + bfa_fsm_set_state(rxf, bna_rxf_sm_paused); + } + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf) +{ +} + +static void +bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event) +{ + switch (event) { + case RXF_E_FAIL: + case RXF_E_FW_RESP: + bna_rxf_cfg_reset(rxf); + bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac, + enum bfi_enet_h2i_msgs req_type) +{ + struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req))); + memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t)); + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_ucast_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac) +{ + struct bfi_enet_mcast_add_req *req = + &rxf->bfi_enet_cmd.mcast_add_req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ, + 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req))); + memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t)); + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_mcast_add_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle) +{ + struct bfi_enet_mcast_del_req *req = + &rxf->bfi_enet_cmd.mcast_del_req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ, + 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req))); + req->handle = htons(handle); + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_mcast_del_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status) +{ + struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); + req->enable = status; + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_enable_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status) +{ + struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); + req->enable = status; + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_enable_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx) +{ + struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req; + int i; + int j; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req))); + req->block_idx = block_idx; + for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) { + j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i; + if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) + req->bit_mask[i] = + htonl(rxf->vlan_filter_table[j]); + else + req->bit_mask[i] = 0xFFFFFFFF; + } + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_rx_vlan_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_vlan_strip_enable(struct bna_rxf *rxf) +{ + struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); + req->enable = rxf->vlan_strip_status; + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_enable_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_rit_cfg(struct bna_rxf *rxf) +{ + struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req))); + req->size = htons(rxf->rit_size); + memcpy(&req->table[0], rxf->rit, rxf->rit_size); + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_rit_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_rss_cfg(struct bna_rxf *rxf) +{ + struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req; + int i; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req))); + req->cfg.type = rxf->rss_cfg.hash_type; + req->cfg.mask = rxf->rss_cfg.hash_mask; + for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++) + req->cfg.key[i] = + htonl(rxf->rss_cfg.toeplitz_hash_key[i]); + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_rss_cfg_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_rss_enable(struct bna_rxf *rxf) +{ + struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); + req->enable = rxf->rss_status; + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_enable_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +/* This function gets the multicast MAC that has already been added to CAM */ +static struct bna_mac * +bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr) +{ + struct bna_mac *mac; + struct list_head *qe; + + list_for_each(qe, &rxf->mcast_active_q) { + mac = (struct bna_mac *)qe; + if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr)) + return mac; + } + + list_for_each(qe, &rxf->mcast_pending_del_q) { + mac = (struct bna_mac *)qe; + if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr)) + return mac; + } + + return NULL; +} + +static struct bna_mcam_handle * +bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle) +{ + struct bna_mcam_handle *mchandle; + struct list_head *qe; + + list_for_each(qe, &rxf->mcast_handle_q) { + mchandle = (struct bna_mcam_handle *)qe; + if (mchandle->handle == handle) + return mchandle; + } + + return NULL; +} + +static void +bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle) +{ + struct bna_mac *mcmac; + struct bna_mcam_handle *mchandle; + + mcmac = bna_rxf_mcmac_get(rxf, mac_addr); + mchandle = bna_rxf_mchandle_get(rxf, handle); + if (mchandle == NULL) { + mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod); + mchandle->handle = handle; + mchandle->refcnt = 0; + list_add_tail(&mchandle->qe, &rxf->mcast_handle_q); + } + mchandle->refcnt++; + mcmac->handle = mchandle; +} + +static int +bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac, + enum bna_cleanup_type cleanup) +{ + struct bna_mcam_handle *mchandle; + int ret = 0; + + mchandle = mac->handle; + if (mchandle == NULL) + return ret; + + mchandle->refcnt--; + if (mchandle->refcnt == 0) { + if (cleanup == BNA_HARD_CLEANUP) { + bna_bfi_mcast_del_req(rxf, mchandle->handle); + ret = 1; + } + list_del(&mchandle->qe); + bfa_q_qe_init(&mchandle->qe); + bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle); + } + mac->handle = NULL; + + return ret; +} + +static int +bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf) +{ + struct bna_mac *mac = NULL; + struct list_head *qe; + int ret; + + /* First delete multicast entries to maintain the count */ + while (!list_empty(&rxf->mcast_pending_del_q)) { + bfa_q_deq(&rxf->mcast_pending_del_q, &qe); + bfa_q_qe_init(qe); + mac = (struct bna_mac *)qe; + ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP); + bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac); + if (ret) + return ret; + } + + /* Add multicast entries */ + if (!list_empty(&rxf->mcast_pending_add_q)) { + bfa_q_deq(&rxf->mcast_pending_add_q, &qe); + bfa_q_qe_init(qe); + mac = (struct bna_mac *)qe; + list_add_tail(&mac->qe, &rxf->mcast_active_q); + bna_bfi_mcast_add_req(rxf, mac); + return 1; + } + + return 0; +} + +static int +bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf) +{ + u8 vlan_pending_bitmask; + int block_idx = 0; + + if (rxf->vlan_pending_bitmask) { + vlan_pending_bitmask = rxf->vlan_pending_bitmask; + while (!(vlan_pending_bitmask & 0x1)) { + block_idx++; + vlan_pending_bitmask >>= 1; + } + rxf->vlan_pending_bitmask &= ~(1 << block_idx); + bna_bfi_rx_vlan_filter_set(rxf, block_idx); + return 1; + } + + return 0; +} + +static int +bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) +{ + struct list_head *qe; + struct bna_mac *mac; + int ret; + + /* Throw away delete pending mcast entries */ + while (!list_empty(&rxf->mcast_pending_del_q)) { + bfa_q_deq(&rxf->mcast_pending_del_q, &qe); + bfa_q_qe_init(qe); + mac = (struct bna_mac *)qe; + ret = bna_rxf_mcast_del(rxf, mac, cleanup); + bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac); + if (ret) + return ret; + } + + /* Move active mcast entries to pending_add_q */ + while (!list_empty(&rxf->mcast_active_q)) { + bfa_q_deq(&rxf->mcast_active_q, &qe); + bfa_q_qe_init(qe); + list_add_tail(qe, &rxf->mcast_pending_add_q); + mac = (struct bna_mac *)qe; + if (bna_rxf_mcast_del(rxf, mac, cleanup)) + return 1; + } + + return 0; +} + +static int +bna_rxf_rss_cfg_apply(struct bna_rxf *rxf) +{ + if (rxf->rss_pending) { + if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) { + rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING; + bna_bfi_rit_cfg(rxf); + return 1; + } + + if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) { + rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING; + bna_bfi_rss_cfg(rxf); + return 1; + } + + if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) { + rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING; + bna_bfi_rss_enable(rxf); + return 1; + } + } + + return 0; +} + +static int +bna_rxf_cfg_apply(struct bna_rxf *rxf) +{ + if (bna_rxf_ucast_cfg_apply(rxf)) + return 1; + + if (bna_rxf_mcast_cfg_apply(rxf)) + return 1; + + if (bna_rxf_promisc_cfg_apply(rxf)) + return 1; + + if (bna_rxf_allmulti_cfg_apply(rxf)) + return 1; + + if (bna_rxf_vlan_cfg_apply(rxf)) + return 1; + + if (bna_rxf_vlan_strip_cfg_apply(rxf)) + return 1; + + if (bna_rxf_rss_cfg_apply(rxf)) + return 1; + + return 0; +} + +/* Only software reset */ +static int +bna_rxf_fltr_clear(struct bna_rxf *rxf) +{ + if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP)) + return 1; + + if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP)) + return 1; + + if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP)) + return 1; + + if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP)) + return 1; + + return 0; +} + +static void +bna_rxf_cfg_reset(struct bna_rxf *rxf) +{ + bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP); + bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP); + bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP); + bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP); + bna_rxf_vlan_cfg_soft_reset(rxf); + bna_rxf_rss_cfg_soft_reset(rxf); +} + +static void +bna_rit_init(struct bna_rxf *rxf, int rit_size) +{ + struct bna_rx *rx = rxf->rx; + struct bna_rxp *rxp; + struct list_head *qe; + int offset = 0; + + rxf->rit_size = rit_size; + list_for_each(qe, &rx->rxp_q) { + rxp = (struct bna_rxp *)qe; + rxf->rit[offset] = rxp->cq.ccb->id; + offset++; + } + +} + +void +bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr) +{ + bfa_fsm_send_event(rxf, RXF_E_FW_RESP); +} + +void +bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf, + struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_rsp *rsp = + container_of(msghdr, struct bfi_enet_rsp, mh); + + if (rsp->error) { + /* Clear ucast from cache */ + rxf->ucast_active_set = 0; + } + + bfa_fsm_send_event(rxf, RXF_E_FW_RESP); +} + +void +bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, + struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_mcast_add_req *req = + &rxf->bfi_enet_cmd.mcast_add_req; + struct bfi_enet_mcast_add_rsp *rsp = + container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh); + + bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr, + ntohs(rsp->handle)); + bfa_fsm_send_event(rxf, RXF_E_FW_RESP); +} + +static void +bna_rxf_init(struct bna_rxf *rxf, + struct bna_rx *rx, + struct bna_rx_config *q_config, + struct bna_res_info *res_info) +{ + rxf->rx = rx; + + INIT_LIST_HEAD(&rxf->ucast_pending_add_q); + INIT_LIST_HEAD(&rxf->ucast_pending_del_q); + rxf->ucast_pending_set = 0; + rxf->ucast_active_set = 0; + INIT_LIST_HEAD(&rxf->ucast_active_q); + rxf->ucast_pending_mac = NULL; + + INIT_LIST_HEAD(&rxf->mcast_pending_add_q); + INIT_LIST_HEAD(&rxf->mcast_pending_del_q); + INIT_LIST_HEAD(&rxf->mcast_active_q); + INIT_LIST_HEAD(&rxf->mcast_handle_q); + + if (q_config->paused) + rxf->flags |= BNA_RXF_F_PAUSED; + + rxf->rit = (u8 *) + res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva; + bna_rit_init(rxf, q_config->num_paths); + + rxf->rss_status = q_config->rss_status; + if (rxf->rss_status == BNA_STATUS_T_ENABLED) { + rxf->rss_cfg = q_config->rss_config; + rxf->rss_pending |= BNA_RSS_F_CFG_PENDING; + rxf->rss_pending |= BNA_RSS_F_RIT_PENDING; + rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING; + } + + rxf->vlan_filter_status = BNA_STATUS_T_DISABLED; + memset(rxf->vlan_filter_table, 0, + (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32))); + rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */ + rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; + + rxf->vlan_strip_status = q_config->vlan_strip_status; + + bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); +} + +static void +bna_rxf_uninit(struct bna_rxf *rxf) +{ + struct bna_mac *mac; + + rxf->ucast_pending_set = 0; + rxf->ucast_active_set = 0; + + while (!list_empty(&rxf->ucast_pending_add_q)) { + bfa_q_deq(&rxf->ucast_pending_add_q, &mac); + bfa_q_qe_init(&mac->qe); + bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac); + } + + if (rxf->ucast_pending_mac) { + bfa_q_qe_init(&rxf->ucast_pending_mac->qe); + bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), + rxf->ucast_pending_mac); + rxf->ucast_pending_mac = NULL; + } + + while (!list_empty(&rxf->mcast_pending_add_q)) { + bfa_q_deq(&rxf->mcast_pending_add_q, &mac); + bfa_q_qe_init(&mac->qe); + bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac); + } + + rxf->rxmode_pending = 0; + rxf->rxmode_pending_bitmask = 0; + if (rxf->rx->bna->promisc_rid == rxf->rx->rid) + rxf->rx->bna->promisc_rid = BFI_INVALID_RID; + if (rxf->rx->bna->default_mode_rid == rxf->rx->rid) + rxf->rx->bna->default_mode_rid = BFI_INVALID_RID; + + rxf->rss_pending = 0; + rxf->vlan_strip_pending = false; + + rxf->flags = 0; + + rxf->rx = NULL; +} + +static void +bna_rx_cb_rxf_started(struct bna_rx *rx) +{ + bfa_fsm_send_event(rx, RX_E_RXF_STARTED); +} + +static void +bna_rxf_start(struct bna_rxf *rxf) +{ + rxf->start_cbfn = bna_rx_cb_rxf_started; + rxf->start_cbarg = rxf->rx; + bfa_fsm_send_event(rxf, RXF_E_START); +} + +static void +bna_rx_cb_rxf_stopped(struct bna_rx *rx) +{ + bfa_fsm_send_event(rx, RX_E_RXF_STOPPED); +} + +static void +bna_rxf_stop(struct bna_rxf *rxf) +{ + rxf->stop_cbfn = bna_rx_cb_rxf_stopped; + rxf->stop_cbarg = rxf->rx; + bfa_fsm_send_event(rxf, RXF_E_STOP); +} + +static void +bna_rxf_fail(struct bna_rxf *rxf) +{ + bfa_fsm_send_event(rxf, RXF_E_FAIL); +} + +enum bna_cb_status +bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac, + void (*cbfn)(struct bnad *, struct bna_rx *)) +{ + struct bna_rxf *rxf = &rx->rxf; + + if (rxf->ucast_pending_mac == NULL) { + rxf->ucast_pending_mac = + bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna)); + if (rxf->ucast_pending_mac == NULL) + return BNA_CB_UCAST_CAM_FULL; + bfa_q_qe_init(&rxf->ucast_pending_mac->qe); + } + + memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN); + rxf->ucast_pending_set = 1; + rxf->cam_fltr_cbfn = cbfn; + rxf->cam_fltr_cbarg = rx->bna->bnad; + + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + + return BNA_CB_SUCCESS; +} + +enum bna_cb_status +bna_rx_mcast_add(struct bna_rx *rx, u8 *addr, + void (*cbfn)(struct bnad *, struct bna_rx *)) +{ + struct bna_rxf *rxf = &rx->rxf; + struct bna_mac *mac; + + /* Check if already added or pending addition */ + if (bna_mac_find(&rxf->mcast_active_q, addr) || + bna_mac_find(&rxf->mcast_pending_add_q, addr)) { + if (cbfn) + cbfn(rx->bna->bnad, rx); + return BNA_CB_SUCCESS; + } + + mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna)); + if (mac == NULL) + return BNA_CB_MCAST_LIST_FULL; + bfa_q_qe_init(&mac->qe); + memcpy(mac->addr, addr, ETH_ALEN); + list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); + + rxf->cam_fltr_cbfn = cbfn; + rxf->cam_fltr_cbarg = rx->bna->bnad; + + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + + return BNA_CB_SUCCESS; +} + +enum bna_cb_status +bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist, + void (*cbfn)(struct bnad *, struct bna_rx *)) +{ + struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod; + struct bna_rxf *rxf = &rx->rxf; + struct list_head list_head; + struct list_head *qe; + u8 *mcaddr; + struct bna_mac *mac, *del_mac; + int i; + + /* Purge the pending_add_q */ + while (!list_empty(&rxf->ucast_pending_add_q)) { + bfa_q_deq(&rxf->ucast_pending_add_q, &qe); + bfa_q_qe_init(qe); + mac = (struct bna_mac *)qe; + bna_cam_mod_mac_put(&ucam_mod->free_q, mac); + } + + /* Schedule active_q entries for deletion */ + while (!list_empty(&rxf->ucast_active_q)) { + bfa_q_deq(&rxf->ucast_active_q, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + + del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q); + memcpy(del_mac, mac, sizeof(*del_mac)); + list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q); + bna_cam_mod_mac_put(&ucam_mod->free_q, mac); + } + + /* Allocate nodes */ + INIT_LIST_HEAD(&list_head); + for (i = 0, mcaddr = uclist; i < count; i++) { + mac = bna_cam_mod_mac_get(&ucam_mod->free_q); + if (mac == NULL) + goto err_return; + bfa_q_qe_init(&mac->qe); + memcpy(mac->addr, mcaddr, ETH_ALEN); + list_add_tail(&mac->qe, &list_head); + mcaddr += ETH_ALEN; + } + + /* Add the new entries */ + while (!list_empty(&list_head)) { + bfa_q_deq(&list_head, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + list_add_tail(&mac->qe, &rxf->ucast_pending_add_q); + } + + rxf->cam_fltr_cbfn = cbfn; + rxf->cam_fltr_cbarg = rx->bna->bnad; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + + return BNA_CB_SUCCESS; + +err_return: + while (!list_empty(&list_head)) { + bfa_q_deq(&list_head, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + bna_cam_mod_mac_put(&ucam_mod->free_q, mac); + } + + return BNA_CB_UCAST_CAM_FULL; +} + +enum bna_cb_status +bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist, + void (*cbfn)(struct bnad *, struct bna_rx *)) +{ + struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod; + struct bna_rxf *rxf = &rx->rxf; + struct list_head list_head; + struct list_head *qe; + u8 *mcaddr; + struct bna_mac *mac, *del_mac; + int i; + + /* Purge the pending_add_q */ + while (!list_empty(&rxf->mcast_pending_add_q)) { + bfa_q_deq(&rxf->mcast_pending_add_q, &qe); + bfa_q_qe_init(qe); + mac = (struct bna_mac *)qe; + bna_cam_mod_mac_put(&mcam_mod->free_q, mac); + } + + /* Schedule active_q entries for deletion */ + while (!list_empty(&rxf->mcast_active_q)) { + bfa_q_deq(&rxf->mcast_active_q, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + + del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q); + + memcpy(del_mac, mac, sizeof(*del_mac)); + list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q); + mac->handle = NULL; + bna_cam_mod_mac_put(&mcam_mod->free_q, mac); + } + + /* Allocate nodes */ + INIT_LIST_HEAD(&list_head); + for (i = 0, mcaddr = mclist; i < count; i++) { + mac = bna_cam_mod_mac_get(&mcam_mod->free_q); + if (mac == NULL) + goto err_return; + bfa_q_qe_init(&mac->qe); + memcpy(mac->addr, mcaddr, ETH_ALEN); + list_add_tail(&mac->qe, &list_head); + + mcaddr += ETH_ALEN; + } + + /* Add the new entries */ + while (!list_empty(&list_head)) { + bfa_q_deq(&list_head, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); + } + + rxf->cam_fltr_cbfn = cbfn; + rxf->cam_fltr_cbarg = rx->bna->bnad; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + + return BNA_CB_SUCCESS; + +err_return: + while (!list_empty(&list_head)) { + bfa_q_deq(&list_head, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + bna_cam_mod_mac_put(&mcam_mod->free_q, mac); + } + + return BNA_CB_MCAST_LIST_FULL; +} + +void +bna_rx_mcast_delall(struct bna_rx *rx, + void (*cbfn)(struct bnad *, struct bna_rx *)) +{ + struct bna_rxf *rxf = &rx->rxf; + struct list_head *qe; + struct bna_mac *mac, *del_mac; + int need_hw_config = 0; + + /* Purge all entries from pending_add_q */ + while (!list_empty(&rxf->mcast_pending_add_q)) { + bfa_q_deq(&rxf->mcast_pending_add_q, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac); + } + + /* Schedule all entries in active_q for deletion */ + while (!list_empty(&rxf->mcast_active_q)) { + bfa_q_deq(&rxf->mcast_active_q, &qe); + mac = (struct bna_mac *)qe; + bfa_q_qe_init(&mac->qe); + + del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna)); + + memcpy(del_mac, mac, sizeof(*del_mac)); + list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q); + mac->handle = NULL; + bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac); + need_hw_config = 1; + } + + if (need_hw_config) { + rxf->cam_fltr_cbfn = cbfn; + rxf->cam_fltr_cbarg = rx->bna->bnad; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + return; + } + + if (cbfn) + (*cbfn)(rx->bna->bnad, rx); +} + +void +bna_rx_vlan_add(struct bna_rx *rx, int vlan_id) +{ + struct bna_rxf *rxf = &rx->rxf; + int index = (vlan_id >> BFI_VLAN_WORD_SHIFT); + int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK)); + int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT); + + rxf->vlan_filter_table[index] |= bit; + if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { + rxf->vlan_pending_bitmask |= (1 << group_id); + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + } +} + +void +bna_rx_vlan_del(struct bna_rx *rx, int vlan_id) +{ + struct bna_rxf *rxf = &rx->rxf; + int index = (vlan_id >> BFI_VLAN_WORD_SHIFT); + int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK)); + int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT); + + rxf->vlan_filter_table[index] &= ~bit; + if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { + rxf->vlan_pending_bitmask |= (1 << group_id); + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + } +} + +static int +bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf) +{ + struct bna_mac *mac = NULL; + struct list_head *qe; + + /* Delete MAC addresses previousely added */ + if (!list_empty(&rxf->ucast_pending_del_q)) { + bfa_q_deq(&rxf->ucast_pending_del_q, &qe); + bfa_q_qe_init(qe); + mac = (struct bna_mac *)qe; + bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ); + bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac); + return 1; + } + + /* Set default unicast MAC */ + if (rxf->ucast_pending_set) { + rxf->ucast_pending_set = 0; + memcpy(rxf->ucast_active_mac.addr, + rxf->ucast_pending_mac->addr, ETH_ALEN); + rxf->ucast_active_set = 1; + bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac, + BFI_ENET_H2I_MAC_UCAST_SET_REQ); + return 1; + } + + /* Add additional MAC entries */ + if (!list_empty(&rxf->ucast_pending_add_q)) { + bfa_q_deq(&rxf->ucast_pending_add_q, &qe); + bfa_q_qe_init(qe); + mac = (struct bna_mac *)qe; + list_add_tail(&mac->qe, &rxf->ucast_active_q); + bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ); + return 1; + } + + return 0; +} + +static int +bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) +{ + struct list_head *qe; + struct bna_mac *mac; + + /* Throw away delete pending ucast entries */ + while (!list_empty(&rxf->ucast_pending_del_q)) { + bfa_q_deq(&rxf->ucast_pending_del_q, &qe); + bfa_q_qe_init(qe); + mac = (struct bna_mac *)qe; + if (cleanup == BNA_SOFT_CLEANUP) + bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), + mac); + else { + bna_bfi_ucast_req(rxf, mac, + BFI_ENET_H2I_MAC_UCAST_DEL_REQ); + bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), + mac); + return 1; + } + } + + /* Move active ucast entries to pending_add_q */ + while (!list_empty(&rxf->ucast_active_q)) { + bfa_q_deq(&rxf->ucast_active_q, &qe); + bfa_q_qe_init(qe); + list_add_tail(qe, &rxf->ucast_pending_add_q); + if (cleanup == BNA_HARD_CLEANUP) { + mac = (struct bna_mac *)qe; + bna_bfi_ucast_req(rxf, mac, + BFI_ENET_H2I_MAC_UCAST_DEL_REQ); + return 1; + } + } + + if (rxf->ucast_active_set) { + rxf->ucast_pending_set = 1; + rxf->ucast_active_set = 0; + if (cleanup == BNA_HARD_CLEANUP) { + bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac, + BFI_ENET_H2I_MAC_UCAST_CLR_REQ); + return 1; + } + } + + return 0; +} + +static int +bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf) +{ + struct bna *bna = rxf->rx->bna; + + /* Enable/disable promiscuous mode */ + if (is_promisc_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* move promisc configuration from pending -> active */ + promisc_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active |= BNA_RXMODE_PROMISC; + bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED); + return 1; + } else if (is_promisc_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* move promisc configuration from pending -> active */ + promisc_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; + bna->promisc_rid = BFI_INVALID_RID; + bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); + return 1; + } + + return 0; +} + +static int +bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) +{ + struct bna *bna = rxf->rx->bna; + + /* Clear pending promisc mode disable */ + if (is_promisc_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + promisc_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; + bna->promisc_rid = BFI_INVALID_RID; + if (cleanup == BNA_HARD_CLEANUP) { + bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); + return 1; + } + } + + /* Move promisc mode config from active -> pending */ + if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { + promisc_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; + if (cleanup == BNA_HARD_CLEANUP) { + bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); + return 1; + } + } + + return 0; +} + +static int +bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf) +{ + /* Enable/disable allmulti mode */ + if (is_allmulti_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* move allmulti configuration from pending -> active */ + allmulti_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active |= BNA_RXMODE_ALLMULTI; + bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED); + return 1; + } else if (is_allmulti_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* move allmulti configuration from pending -> active */ + allmulti_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; + bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); + return 1; + } + + return 0; +} + +static int +bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) +{ + /* Clear pending allmulti mode disable */ + if (is_allmulti_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + allmulti_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; + if (cleanup == BNA_HARD_CLEANUP) { + bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); + return 1; + } + } + + /* Move allmulti mode config from active -> pending */ + if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { + allmulti_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; + if (cleanup == BNA_HARD_CLEANUP) { + bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); + return 1; + } + } + + return 0; +} + +static int +bna_rxf_promisc_enable(struct bna_rxf *rxf) +{ + struct bna *bna = rxf->rx->bna; + int ret = 0; + + if (is_promisc_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask) || + (rxf->rxmode_active & BNA_RXMODE_PROMISC)) { + /* Do nothing if pending enable or already enabled */ + } else if (is_promisc_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* Turn off pending disable command */ + promisc_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + } else { + /* Schedule enable */ + promisc_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + bna->promisc_rid = rxf->rx->rid; + ret = 1; + } + + return ret; +} + +static int +bna_rxf_promisc_disable(struct bna_rxf *rxf) +{ + struct bna *bna = rxf->rx->bna; + int ret = 0; + + if (is_promisc_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask) || + (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) { + /* Do nothing if pending disable or already disabled */ + } else if (is_promisc_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* Turn off pending enable command */ + promisc_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + bna->promisc_rid = BFI_INVALID_RID; + } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { + /* Schedule disable */ + promisc_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + ret = 1; + } + + return ret; +} + +static int +bna_rxf_allmulti_enable(struct bna_rxf *rxf) +{ + int ret = 0; + + if (is_allmulti_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask) || + (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) { + /* Do nothing if pending enable or already enabled */ + } else if (is_allmulti_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* Turn off pending disable command */ + allmulti_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + } else { + /* Schedule enable */ + allmulti_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + ret = 1; + } + + return ret; +} + +static int +bna_rxf_allmulti_disable(struct bna_rxf *rxf) +{ + int ret = 0; + + if (is_allmulti_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask) || + (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) { + /* Do nothing if pending disable or already disabled */ + } else if (is_allmulti_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* Turn off pending enable command */ + allmulti_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { + /* Schedule disable */ + allmulti_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + ret = 1; + } + + return ret; +} + +static int +bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf) +{ + if (rxf->vlan_strip_pending) { + rxf->vlan_strip_pending = false; + bna_bfi_vlan_strip_enable(rxf); + return 1; + } + + return 0; +} + +/* RX */ + +#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \ + (qcfg)->num_paths : ((qcfg)->num_paths * 2)) + +#define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\ + (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)) + +#define call_rx_stop_cbfn(rx) \ +do { \ + if ((rx)->stop_cbfn) { \ + void (*cbfn)(void *, struct bna_rx *); \ + void *cbarg; \ + cbfn = (rx)->stop_cbfn; \ + cbarg = (rx)->stop_cbarg; \ + (rx)->stop_cbfn = NULL; \ + (rx)->stop_cbarg = NULL; \ + cbfn(cbarg, rx); \ + } \ +} while (0) + +#define call_rx_stall_cbfn(rx) \ +do { \ + if ((rx)->rx_stall_cbfn) \ + (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \ +} while (0) + +#define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \ +do { \ + struct bna_dma_addr cur_q_addr = \ + *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \ + (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \ + (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \ + (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \ + (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \ + (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \ + (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\ +} while (0) + +static void bna_bfi_rx_enet_start(struct bna_rx *rx); +static void bna_rx_enet_stop(struct bna_rx *rx); +static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx); + +bfa_fsm_state_decl(bna_rx, stopped, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, start_wait, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, start_stop_wait, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, rxf_start_wait, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, started, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, rxf_stop_wait, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, stop_wait, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, cleanup_wait, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, failed, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, quiesce_wait, + struct bna_rx, enum bna_rx_event); + +static void bna_rx_sm_stopped_entry(struct bna_rx *rx) +{ + call_rx_stop_cbfn(rx); +} + +static void bna_rx_sm_stopped(struct bna_rx *rx, + enum bna_rx_event event) +{ + switch (event) { + case RX_E_START: + bfa_fsm_set_state(rx, bna_rx_sm_start_wait); + break; + + case RX_E_STOP: + call_rx_stop_cbfn(rx); + break; + + case RX_E_FAIL: + /* no-op */ + break; + + default: + bfa_sm_fault(event); + break; + } +} + +static void bna_rx_sm_start_wait_entry(struct bna_rx *rx) +{ + bna_bfi_rx_enet_start(rx); +} + +static void +bna_rx_sm_stop_wait_entry(struct bna_rx *rx) +{ +} + +static void +bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_FAIL: + case RX_E_STOPPED: + bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); + rx->rx_cleanup_cbfn(rx->bna->bnad, rx); + break; + + case RX_E_STARTED: + bna_rx_enet_stop(rx); + break; + + default: + bfa_sm_fault(event); + break; + } +} + +static void bna_rx_sm_start_wait(struct bna_rx *rx, + enum bna_rx_event event) +{ + switch (event) { + case RX_E_STOP: + bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait); + break; + + case RX_E_FAIL: + bfa_fsm_set_state(rx, bna_rx_sm_stopped); + break; + + case RX_E_STARTED: + bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait); + break; + + default: + bfa_sm_fault(event); + break; + } +} + +static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx) +{ + rx->rx_post_cbfn(rx->bna->bnad, rx); + bna_rxf_start(&rx->rxf); +} + +static void +bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx) +{ +} + +static void +bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_FAIL: + bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); + bna_rxf_fail(&rx->rxf); + call_rx_stall_cbfn(rx); + rx->rx_cleanup_cbfn(rx->bna->bnad, rx); + break; + + case RX_E_RXF_STARTED: + bna_rxf_stop(&rx->rxf); + break; + + case RX_E_RXF_STOPPED: + bfa_fsm_set_state(rx, bna_rx_sm_stop_wait); + call_rx_stall_cbfn(rx); + bna_rx_enet_stop(rx); + break; + + default: + bfa_sm_fault(event); + break; + } + +} + +static void +bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx) +{ +} + +static void +bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_FAIL: + case RX_E_STOPPED: + bfa_fsm_set_state(rx, bna_rx_sm_stopped); + break; + + case RX_E_STARTED: + bna_rx_enet_stop(rx); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_rx_sm_started_entry(struct bna_rx *rx) +{ + struct bna_rxp *rxp; + struct list_head *qe_rxp; + int is_regular = (rx->type == BNA_RX_T_REGULAR); + + /* Start IB */ + list_for_each(qe_rxp, &rx->rxp_q) { + rxp = (struct bna_rxp *)qe_rxp; + bna_ib_start(rx->bna, &rxp->cq.ib, is_regular); + } + + bna_ethport_cb_rx_started(&rx->bna->ethport); +} + +static void +bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_STOP: + bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); + bna_ethport_cb_rx_stopped(&rx->bna->ethport); + bna_rxf_stop(&rx->rxf); + break; + + case RX_E_FAIL: + bfa_fsm_set_state(rx, bna_rx_sm_failed); + bna_ethport_cb_rx_stopped(&rx->bna->ethport); + bna_rxf_fail(&rx->rxf); + call_rx_stall_cbfn(rx); + rx->rx_cleanup_cbfn(rx->bna->bnad, rx); + break; + + default: + bfa_sm_fault(event); + break; + } +} + +static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx, + enum bna_rx_event event) +{ + switch (event) { + case RX_E_STOP: + bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); + break; + + case RX_E_FAIL: + bfa_fsm_set_state(rx, bna_rx_sm_failed); + bna_rxf_fail(&rx->rxf); + call_rx_stall_cbfn(rx); + rx->rx_cleanup_cbfn(rx->bna->bnad, rx); + break; + + case RX_E_RXF_STARTED: + bfa_fsm_set_state(rx, bna_rx_sm_started); + break; + + default: + bfa_sm_fault(event); + break; + } +} + +static void +bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx) +{ +} + +static void +bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_FAIL: + case RX_E_RXF_STOPPED: + /* No-op */ + break; + + case RX_E_CLEANUP_DONE: + bfa_fsm_set_state(rx, bna_rx_sm_stopped); + break; + + default: + bfa_sm_fault(event); + break; + } +} + +static void +bna_rx_sm_failed_entry(struct bna_rx *rx) +{ +} + +static void +bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_START: + bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait); + break; + + case RX_E_STOP: + bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); + break; + + case RX_E_FAIL: + case RX_E_RXF_STARTED: + case RX_E_RXF_STOPPED: + /* No-op */ + break; + + case RX_E_CLEANUP_DONE: + bfa_fsm_set_state(rx, bna_rx_sm_stopped); + break; + + default: + bfa_sm_fault(event); + break; +} } + +static void +bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx) +{ +} + +static void +bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_STOP: + bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); + break; + + case RX_E_FAIL: + bfa_fsm_set_state(rx, bna_rx_sm_failed); + break; + + case RX_E_CLEANUP_DONE: + bfa_fsm_set_state(rx, bna_rx_sm_start_wait); + break; + + default: + bfa_sm_fault(event); + break; + } +} + +static void +bna_bfi_rx_enet_start(struct bna_rx *rx) +{ + struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req; + struct bna_rxp *rxp = NULL; + struct bna_rxq *q0 = NULL, *q1 = NULL; + struct list_head *rxp_qe; + int i; + + bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid); + cfg_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req))); + + cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet); + cfg_req->num_queue_sets = rx->num_paths; + for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q); + i < rx->num_paths; + i++, rxp_qe = bfa_q_next(rxp_qe)) { + rxp = (struct bna_rxp *)rxp_qe; + + GET_RXQS(rxp, q0, q1); + switch (rxp->type) { + case BNA_RXP_SLR: + case BNA_RXP_HDS: + /* Small RxQ */ + bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q, + &q1->qpt); + cfg_req->q_cfg[i].qs.rx_buffer_size = + htons((u16)q1->buffer_size); + /* Fall through */ + + case BNA_RXP_SINGLE: + /* Large/Single RxQ */ + bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q, + &q0->qpt); + if (q0->multi_buffer) + /* multi-buffer is enabled by allocating + * a new rx with new set of resources. + * q0->buffer_size should be initialized to + * fragment size. + */ + cfg_req->rx_cfg.multi_buffer = + BNA_STATUS_T_ENABLED; + else + q0->buffer_size = + bna_enet_mtu_get(&rx->bna->enet); + cfg_req->q_cfg[i].ql.rx_buffer_size = + htons((u16)q0->buffer_size); + break; + + default: + BUG_ON(1); + } + + bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q, + &rxp->cq.qpt); + + cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo = + rxp->cq.ib.ib_seg_host_addr.lsb; + cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi = + rxp->cq.ib.ib_seg_host_addr.msb; + cfg_req->q_cfg[i].ib.intr.msix_index = + htons((u16)rxp->cq.ib.intr_vector); + } + + cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED; + cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED; + cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED; + cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED; + cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX) + ? BNA_STATUS_T_ENABLED : + BNA_STATUS_T_DISABLED; + cfg_req->ib_cfg.coalescing_timeout = + htonl((u32)rxp->cq.ib.coalescing_timeo); + cfg_req->ib_cfg.inter_pkt_timeout = + htonl((u32)rxp->cq.ib.interpkt_timeo); + cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count; + + switch (rxp->type) { + case BNA_RXP_SLR: + cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL; + break; + + case BNA_RXP_HDS: + cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS; + cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type; + cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset; + cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset; + break; + + case BNA_RXP_SINGLE: + cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE; + break; + + default: + BUG_ON(1); + } + cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status; + + bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh); + bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd); +} + +static void +bna_bfi_rx_enet_stop(struct bna_rx *rx) +{ + struct bfi_enet_req *req = &rx->bfi_enet_cmd.req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req))); + bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), + &req->mh); + bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd); +} + +static void +bna_rx_enet_stop(struct bna_rx *rx) +{ + struct bna_rxp *rxp; + struct list_head *qe_rxp; + + /* Stop IB */ + list_for_each(qe_rxp, &rx->rxp_q) { + rxp = (struct bna_rxp *)qe_rxp; + bna_ib_stop(rx->bna, &rxp->cq.ib); + } + + bna_bfi_rx_enet_stop(rx); +} + +static int +bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg) +{ + if ((rx_mod->rx_free_count == 0) || + (rx_mod->rxp_free_count == 0) || + (rx_mod->rxq_free_count == 0)) + return 0; + + if (rx_cfg->rxp_type == BNA_RXP_SINGLE) { + if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || + (rx_mod->rxq_free_count < rx_cfg->num_paths)) + return 0; + } else { + if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || + (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths))) + return 0; + } + + return 1; +} + +static struct bna_rxq * +bna_rxq_get(struct bna_rx_mod *rx_mod) +{ + struct bna_rxq *rxq = NULL; + struct list_head *qe = NULL; + + bfa_q_deq(&rx_mod->rxq_free_q, &qe); + rx_mod->rxq_free_count--; + rxq = (struct bna_rxq *)qe; + bfa_q_qe_init(&rxq->qe); + + return rxq; +} + +static void +bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq) +{ + bfa_q_qe_init(&rxq->qe); + list_add_tail(&rxq->qe, &rx_mod->rxq_free_q); + rx_mod->rxq_free_count++; +} + +static struct bna_rxp * +bna_rxp_get(struct bna_rx_mod *rx_mod) +{ + struct list_head *qe = NULL; + struct bna_rxp *rxp = NULL; + + bfa_q_deq(&rx_mod->rxp_free_q, &qe); + rx_mod->rxp_free_count--; + rxp = (struct bna_rxp *)qe; + bfa_q_qe_init(&rxp->qe); + + return rxp; +} + +static void +bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp) +{ + bfa_q_qe_init(&rxp->qe); + list_add_tail(&rxp->qe, &rx_mod->rxp_free_q); + rx_mod->rxp_free_count++; +} + +static struct bna_rx * +bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type) +{ + struct list_head *qe = NULL; + struct bna_rx *rx = NULL; + + if (type == BNA_RX_T_REGULAR) { + bfa_q_deq(&rx_mod->rx_free_q, &qe); + } else + bfa_q_deq_tail(&rx_mod->rx_free_q, &qe); + + rx_mod->rx_free_count--; + rx = (struct bna_rx *)qe; + bfa_q_qe_init(&rx->qe); + list_add_tail(&rx->qe, &rx_mod->rx_active_q); + rx->type = type; + + return rx; +} + +static void +bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx) +{ + struct list_head *prev_qe = NULL; + struct list_head *qe; + + bfa_q_qe_init(&rx->qe); + + list_for_each(qe, &rx_mod->rx_free_q) { + if (((struct bna_rx *)qe)->rid < rx->rid) + prev_qe = qe; + else + break; + } + + if (prev_qe == NULL) { + /* This is the first entry */ + bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe); + } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) { + /* This is the last entry */ + list_add_tail(&rx->qe, &rx_mod->rx_free_q); + } else { + /* Somewhere in the middle */ + bfa_q_next(&rx->qe) = bfa_q_next(prev_qe); + bfa_q_prev(&rx->qe) = prev_qe; + bfa_q_next(prev_qe) = &rx->qe; + bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe; + } + + rx_mod->rx_free_count++; +} + +static void +bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0, + struct bna_rxq *q1) +{ + switch (rxp->type) { + case BNA_RXP_SINGLE: + rxp->rxq.single.only = q0; + rxp->rxq.single.reserved = NULL; + break; + case BNA_RXP_SLR: + rxp->rxq.slr.large = q0; + rxp->rxq.slr.small = q1; + break; + case BNA_RXP_HDS: + rxp->rxq.hds.data = q0; + rxp->rxq.hds.hdr = q1; + break; + default: + break; + } +} + +static void +bna_rxq_qpt_setup(struct bna_rxq *rxq, + struct bna_rxp *rxp, + u32 page_count, + u32 page_size, + struct bna_mem_descr *qpt_mem, + struct bna_mem_descr *swqpt_mem, + struct bna_mem_descr *page_mem) +{ + u8 *kva; + u64 dma; + struct bna_dma_addr bna_dma; + int i; + + rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; + rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; + rxq->qpt.kv_qpt_ptr = qpt_mem->kva; + rxq->qpt.page_count = page_count; + rxq->qpt.page_size = page_size; + + rxq->rcb->sw_qpt = (void **) swqpt_mem->kva; + rxq->rcb->sw_q = page_mem->kva; + + kva = page_mem->kva; + BNA_GET_DMA_ADDR(&page_mem->dma, dma); + + for (i = 0; i < rxq->qpt.page_count; i++) { + rxq->rcb->sw_qpt[i] = kva; + kva += PAGE_SIZE; + + BNA_SET_DMA_ADDR(dma, &bna_dma); + ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb = + bna_dma.lsb; + ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb = + bna_dma.msb; + dma += PAGE_SIZE; + } +} + +static void +bna_rxp_cqpt_setup(struct bna_rxp *rxp, + u32 page_count, + u32 page_size, + struct bna_mem_descr *qpt_mem, + struct bna_mem_descr *swqpt_mem, + struct bna_mem_descr *page_mem) +{ + u8 *kva; + u64 dma; + struct bna_dma_addr bna_dma; + int i; + + rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; + rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; + rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva; + rxp->cq.qpt.page_count = page_count; + rxp->cq.qpt.page_size = page_size; + + rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva; + rxp->cq.ccb->sw_q = page_mem->kva; + + kva = page_mem->kva; + BNA_GET_DMA_ADDR(&page_mem->dma, dma); + + for (i = 0; i < rxp->cq.qpt.page_count; i++) { + rxp->cq.ccb->sw_qpt[i] = kva; + kva += PAGE_SIZE; + + BNA_SET_DMA_ADDR(dma, &bna_dma); + ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb = + bna_dma.lsb; + ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb = + bna_dma.msb; + dma += PAGE_SIZE; + } +} + +static void +bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx) +{ + struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; + + bfa_wc_down(&rx_mod->rx_stop_wc); +} + +static void +bna_rx_mod_cb_rx_stopped_all(void *arg) +{ + struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; + + if (rx_mod->stop_cbfn) + rx_mod->stop_cbfn(&rx_mod->bna->enet); + rx_mod->stop_cbfn = NULL; +} + +static void +bna_rx_start(struct bna_rx *rx) +{ + rx->rx_flags |= BNA_RX_F_ENET_STARTED; + if (rx->rx_flags & BNA_RX_F_ENABLED) + bfa_fsm_send_event(rx, RX_E_START); +} + +static void +bna_rx_stop(struct bna_rx *rx) +{ + rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; + if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped) + bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx); + else { + rx->stop_cbfn = bna_rx_mod_cb_rx_stopped; + rx->stop_cbarg = &rx->bna->rx_mod; + bfa_fsm_send_event(rx, RX_E_STOP); + } +} + +static void +bna_rx_fail(struct bna_rx *rx) +{ + /* Indicate Enet is not enabled, and failed */ + rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; + bfa_fsm_send_event(rx, RX_E_FAIL); +} + +void +bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type) +{ + struct bna_rx *rx; + struct list_head *qe; + + rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED; + if (type == BNA_RX_T_LOOPBACK) + rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK; + + list_for_each(qe, &rx_mod->rx_active_q) { + rx = (struct bna_rx *)qe; + if (rx->type == type) + bna_rx_start(rx); + } +} + +void +bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type) +{ + struct bna_rx *rx; + struct list_head *qe; + + rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED; + rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK; + + rx_mod->stop_cbfn = bna_enet_cb_rx_stopped; + + bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod); + + list_for_each(qe, &rx_mod->rx_active_q) { + rx = (struct bna_rx *)qe; + if (rx->type == type) { + bfa_wc_up(&rx_mod->rx_stop_wc); + bna_rx_stop(rx); + } + } + + bfa_wc_wait(&rx_mod->rx_stop_wc); +} + +void +bna_rx_mod_fail(struct bna_rx_mod *rx_mod) +{ + struct bna_rx *rx; + struct list_head *qe; + + rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED; + rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK; + + list_for_each(qe, &rx_mod->rx_active_q) { + rx = (struct bna_rx *)qe; + bna_rx_fail(rx); + } +} + +void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, + struct bna_res_info *res_info) +{ + int index; + struct bna_rx *rx_ptr; + struct bna_rxp *rxp_ptr; + struct bna_rxq *rxq_ptr; + + rx_mod->bna = bna; + rx_mod->flags = 0; + + rx_mod->rx = (struct bna_rx *) + res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva; + rx_mod->rxp = (struct bna_rxp *) + res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva; + rx_mod->rxq = (struct bna_rxq *) + res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva; + + /* Initialize the queues */ + INIT_LIST_HEAD(&rx_mod->rx_free_q); + rx_mod->rx_free_count = 0; + INIT_LIST_HEAD(&rx_mod->rxq_free_q); + rx_mod->rxq_free_count = 0; + INIT_LIST_HEAD(&rx_mod->rxp_free_q); + rx_mod->rxp_free_count = 0; + INIT_LIST_HEAD(&rx_mod->rx_active_q); + + /* Build RX queues */ + for (index = 0; index < bna->ioceth.attr.num_rxp; index++) { + rx_ptr = &rx_mod->rx[index]; + + bfa_q_qe_init(&rx_ptr->qe); + INIT_LIST_HEAD(&rx_ptr->rxp_q); + rx_ptr->bna = NULL; + rx_ptr->rid = index; + rx_ptr->stop_cbfn = NULL; + rx_ptr->stop_cbarg = NULL; + + list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q); + rx_mod->rx_free_count++; + } + + /* build RX-path queue */ + for (index = 0; index < bna->ioceth.attr.num_rxp; index++) { + rxp_ptr = &rx_mod->rxp[index]; + bfa_q_qe_init(&rxp_ptr->qe); + list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q); + rx_mod->rxp_free_count++; + } + + /* build RXQ queue */ + for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) { + rxq_ptr = &rx_mod->rxq[index]; + bfa_q_qe_init(&rxq_ptr->qe); + list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q); + rx_mod->rxq_free_count++; + } +} + +void +bna_rx_mod_uninit(struct bna_rx_mod *rx_mod) +{ + struct list_head *qe; + int i; + + i = 0; + list_for_each(qe, &rx_mod->rx_free_q) + i++; + + i = 0; + list_for_each(qe, &rx_mod->rxp_free_q) + i++; + + i = 0; + list_for_each(qe, &rx_mod->rxq_free_q) + i++; + + rx_mod->bna = NULL; +} + +void +bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp; + struct bna_rxp *rxp = NULL; + struct bna_rxq *q0 = NULL, *q1 = NULL; + struct list_head *rxp_qe; + int i; + + bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp, + sizeof(struct bfi_enet_rx_cfg_rsp)); + + rx->hw_id = cfg_rsp->hw_id; + + for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q); + i < rx->num_paths; + i++, rxp_qe = bfa_q_next(rxp_qe)) { + rxp = (struct bna_rxp *)rxp_qe; + GET_RXQS(rxp, q0, q1); + + /* Setup doorbells */ + rxp->cq.ccb->i_dbell->doorbell_addr = + rx->bna->pcidev.pci_bar_kva + + ntohl(cfg_rsp->q_handles[i].i_dbell); + rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid; + q0->rcb->q_dbell = + rx->bna->pcidev.pci_bar_kva + + ntohl(cfg_rsp->q_handles[i].ql_dbell); + q0->hw_id = cfg_rsp->q_handles[i].hw_lqid; + if (q1) { + q1->rcb->q_dbell = + rx->bna->pcidev.pci_bar_kva + + ntohl(cfg_rsp->q_handles[i].qs_dbell); + q1->hw_id = cfg_rsp->q_handles[i].hw_sqid; + } + + /* Initialize producer/consumer indexes */ + (*rxp->cq.ccb->hw_producer_index) = 0; + rxp->cq.ccb->producer_index = 0; + q0->rcb->producer_index = q0->rcb->consumer_index = 0; + if (q1) + q1->rcb->producer_index = q1->rcb->consumer_index = 0; + } + + bfa_fsm_send_event(rx, RX_E_STARTED); +} + +void +bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr) +{ + bfa_fsm_send_event(rx, RX_E_STOPPED); +} + +void +bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info) +{ + u32 cq_size, hq_size, dq_size; + u32 cpage_count, hpage_count, dpage_count; + struct bna_mem_info *mem_info; + u32 cq_depth; + u32 hq_depth; + u32 dq_depth; + + dq_depth = q_cfg->q0_depth; + hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth); + cq_depth = dq_depth + hq_depth; + + BNA_TO_POWER_OF_2_HIGH(cq_depth); + cq_size = cq_depth * BFI_CQ_WI_SIZE; + cq_size = ALIGN(cq_size, PAGE_SIZE); + cpage_count = SIZE_TO_PAGES(cq_size); + + BNA_TO_POWER_OF_2_HIGH(dq_depth); + dq_size = dq_depth * BFI_RXQ_WI_SIZE; + dq_size = ALIGN(dq_size, PAGE_SIZE); + dpage_count = SIZE_TO_PAGES(dq_size); + + if (BNA_RXP_SINGLE != q_cfg->rxp_type) { + BNA_TO_POWER_OF_2_HIGH(hq_depth); + hq_size = hq_depth * BFI_RXQ_WI_SIZE; + hq_size = ALIGN(hq_size, PAGE_SIZE); + hpage_count = SIZE_TO_PAGES(hq_size); + } else + hpage_count = 0; + + res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = sizeof(struct bna_ccb); + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = sizeof(struct bna_rcb); + mem_info->num = BNA_GET_RXQS(q_cfg); + + res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = cpage_count * sizeof(struct bna_dma_addr); + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = cpage_count * sizeof(void *); + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = PAGE_SIZE * cpage_count; + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = dpage_count * sizeof(struct bna_dma_addr); + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = dpage_count * sizeof(void *); + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = PAGE_SIZE * dpage_count; + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = hpage_count * sizeof(struct bna_dma_addr); + mem_info->num = (hpage_count ? q_cfg->num_paths : 0); + + res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = hpage_count * sizeof(void *); + mem_info->num = (hpage_count ? q_cfg->num_paths : 0); + + res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = PAGE_SIZE * hpage_count; + mem_info->num = (hpage_count ? q_cfg->num_paths : 0); + + res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = BFI_IBIDX_SIZE; + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = BFI_ENET_RSS_RIT_MAX; + mem_info->num = 1; + + res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR; + res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX; + res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths; +} + +struct bna_rx * +bna_rx_create(struct bna *bna, struct bnad *bnad, + struct bna_rx_config *rx_cfg, + const struct bna_rx_event_cbfn *rx_cbfn, + struct bna_res_info *res_info, + void *priv) +{ + struct bna_rx_mod *rx_mod = &bna->rx_mod; + struct bna_rx *rx; + struct bna_rxp *rxp; + struct bna_rxq *q0; + struct bna_rxq *q1; + struct bna_intr_info *intr_info; + struct bna_mem_descr *hqunmap_mem; + struct bna_mem_descr *dqunmap_mem; + struct bna_mem_descr *ccb_mem; + struct bna_mem_descr *rcb_mem; + struct bna_mem_descr *cqpt_mem; + struct bna_mem_descr *cswqpt_mem; + struct bna_mem_descr *cpage_mem; + struct bna_mem_descr *hqpt_mem; + struct bna_mem_descr *dqpt_mem; + struct bna_mem_descr *hsqpt_mem; + struct bna_mem_descr *dsqpt_mem; + struct bna_mem_descr *hpage_mem; + struct bna_mem_descr *dpage_mem; + u32 dpage_count, hpage_count; + u32 hq_idx, dq_idx, rcb_idx; + u32 cq_depth, i; + u32 page_count; + + if (!bna_rx_res_check(rx_mod, rx_cfg)) + return NULL; + + intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; + ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0]; + rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0]; + dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0]; + hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0]; + cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0]; + cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0]; + cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0]; + hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0]; + dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0]; + hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0]; + dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0]; + hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0]; + dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0]; + + page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len / + PAGE_SIZE; + + dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len / + PAGE_SIZE; + + hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len / + PAGE_SIZE; + + rx = bna_rx_get(rx_mod, rx_cfg->rx_type); + rx->bna = bna; + rx->rx_flags = 0; + INIT_LIST_HEAD(&rx->rxp_q); + rx->stop_cbfn = NULL; + rx->stop_cbarg = NULL; + rx->priv = priv; + + rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn; + rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn; + rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn; + rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn; + rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn; + /* Following callbacks are mandatory */ + rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn; + rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn; + + if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) { + switch (rx->type) { + case BNA_RX_T_REGULAR: + if (!(rx->bna->rx_mod.flags & + BNA_RX_MOD_F_ENET_LOOPBACK)) + rx->rx_flags |= BNA_RX_F_ENET_STARTED; + break; + case BNA_RX_T_LOOPBACK: + if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK) + rx->rx_flags |= BNA_RX_F_ENET_STARTED; + break; + } + } + + rx->num_paths = rx_cfg->num_paths; + for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0; + i < rx->num_paths; i++) { + rxp = bna_rxp_get(rx_mod); + list_add_tail(&rxp->qe, &rx->rxp_q); + rxp->type = rx_cfg->rxp_type; + rxp->rx = rx; + rxp->cq.rx = rx; + + q0 = bna_rxq_get(rx_mod); + if (BNA_RXP_SINGLE == rx_cfg->rxp_type) + q1 = NULL; + else + q1 = bna_rxq_get(rx_mod); + + if (1 == intr_info->num) + rxp->vector = intr_info->idl[0].vector; + else + rxp->vector = intr_info->idl[i].vector; + + /* Setup IB */ + + rxp->cq.ib.ib_seg_host_addr.lsb = + res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb; + rxp->cq.ib.ib_seg_host_addr.msb = + res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb; + rxp->cq.ib.ib_seg_host_addr_kva = + res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva; + rxp->cq.ib.intr_type = intr_info->intr_type; + if (intr_info->intr_type == BNA_INTR_T_MSIX) + rxp->cq.ib.intr_vector = rxp->vector; + else + rxp->cq.ib.intr_vector = (1 << rxp->vector); + rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo; + rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT; + rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO; + + bna_rxp_add_rxqs(rxp, q0, q1); + + /* Setup large Q */ + + q0->rx = rx; + q0->rxp = rxp; + + q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; + q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva; + rcb_idx++; dq_idx++; + q0->rcb->q_depth = rx_cfg->q0_depth; + q0->q_depth = rx_cfg->q0_depth; + q0->multi_buffer = rx_cfg->q0_multi_buf; + q0->buffer_size = rx_cfg->q0_buf_size; + q0->num_vecs = rx_cfg->q0_num_vecs; + q0->rcb->rxq = q0; + q0->rcb->bnad = bna->bnad; + q0->rcb->id = 0; + q0->rx_packets = q0->rx_bytes = 0; + q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0; + + bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE, + &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]); + + if (rx->rcb_setup_cbfn) + rx->rcb_setup_cbfn(bnad, q0->rcb); + + /* Setup small Q */ + + if (q1) { + q1->rx = rx; + q1->rxp = rxp; + + q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; + q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva; + rcb_idx++; hq_idx++; + q1->rcb->q_depth = rx_cfg->q1_depth; + q1->q_depth = rx_cfg->q1_depth; + q1->multi_buffer = BNA_STATUS_T_DISABLED; + q1->num_vecs = 1; + q1->rcb->rxq = q1; + q1->rcb->bnad = bna->bnad; + q1->rcb->id = 1; + q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ? + rx_cfg->hds_config.forced_offset + : rx_cfg->q1_buf_size; + q1->rx_packets = q1->rx_bytes = 0; + q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; + + bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE, + &hqpt_mem[i], &hsqpt_mem[i], + &hpage_mem[i]); + + if (rx->rcb_setup_cbfn) + rx->rcb_setup_cbfn(bnad, q1->rcb); + } + + /* Setup CQ */ + + rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva; + cq_depth = rx_cfg->q0_depth + + ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ? + 0 : rx_cfg->q1_depth); + /* if multi-buffer is enabled sum of q0_depth + * and q1_depth need not be a power of 2 + */ + BNA_TO_POWER_OF_2_HIGH(cq_depth); + rxp->cq.ccb->q_depth = cq_depth; + rxp->cq.ccb->cq = &rxp->cq; + rxp->cq.ccb->rcb[0] = q0->rcb; + q0->rcb->ccb = rxp->cq.ccb; + if (q1) { + rxp->cq.ccb->rcb[1] = q1->rcb; + q1->rcb->ccb = rxp->cq.ccb; + } + rxp->cq.ccb->hw_producer_index = + (u32 *)rxp->cq.ib.ib_seg_host_addr_kva; + rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell; + rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type; + rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector; + rxp->cq.ccb->rx_coalescing_timeo = + rxp->cq.ib.coalescing_timeo; + rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0; + rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0; + rxp->cq.ccb->bnad = bna->bnad; + rxp->cq.ccb->id = i; + + bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE, + &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]); + + if (rx->ccb_setup_cbfn) + rx->ccb_setup_cbfn(bnad, rxp->cq.ccb); + } + + rx->hds_cfg = rx_cfg->hds_config; + + bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info); + + bfa_fsm_set_state(rx, bna_rx_sm_stopped); + + rx_mod->rid_mask |= (1 << rx->rid); + + return rx; +} + +void +bna_rx_destroy(struct bna_rx *rx) +{ + struct bna_rx_mod *rx_mod = &rx->bna->rx_mod; + struct bna_rxq *q0 = NULL; + struct bna_rxq *q1 = NULL; + struct bna_rxp *rxp; + struct list_head *qe; + + bna_rxf_uninit(&rx->rxf); + + while (!list_empty(&rx->rxp_q)) { + bfa_q_deq(&rx->rxp_q, &rxp); + GET_RXQS(rxp, q0, q1); + if (rx->rcb_destroy_cbfn) + rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb); + q0->rcb = NULL; + q0->rxp = NULL; + q0->rx = NULL; + bna_rxq_put(rx_mod, q0); + + if (q1) { + if (rx->rcb_destroy_cbfn) + rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb); + q1->rcb = NULL; + q1->rxp = NULL; + q1->rx = NULL; + bna_rxq_put(rx_mod, q1); + } + rxp->rxq.slr.large = NULL; + rxp->rxq.slr.small = NULL; + + if (rx->ccb_destroy_cbfn) + rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb); + rxp->cq.ccb = NULL; + rxp->rx = NULL; + bna_rxp_put(rx_mod, rxp); + } + + list_for_each(qe, &rx_mod->rx_active_q) { + if (qe == &rx->qe) { + list_del(&rx->qe); + bfa_q_qe_init(&rx->qe); + break; + } + } + + rx_mod->rid_mask &= ~(1 << rx->rid); + + rx->bna = NULL; + rx->priv = NULL; + bna_rx_put(rx_mod, rx); +} + +void +bna_rx_enable(struct bna_rx *rx) +{ + if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped) + return; + + rx->rx_flags |= BNA_RX_F_ENABLED; + if (rx->rx_flags & BNA_RX_F_ENET_STARTED) + bfa_fsm_send_event(rx, RX_E_START); +} + +void +bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type, + void (*cbfn)(void *, struct bna_rx *)) +{ + if (type == BNA_SOFT_CLEANUP) { + /* h/w should not be accessed. Treat we're stopped */ + (*cbfn)(rx->bna->bnad, rx); + } else { + rx->stop_cbfn = cbfn; + rx->stop_cbarg = rx->bna->bnad; + + rx->rx_flags &= ~BNA_RX_F_ENABLED; + + bfa_fsm_send_event(rx, RX_E_STOP); + } +} + +void +bna_rx_cleanup_complete(struct bna_rx *rx) +{ + bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE); +} + +void +bna_rx_vlan_strip_enable(struct bna_rx *rx) +{ + struct bna_rxf *rxf = &rx->rxf; + + if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) { + rxf->vlan_strip_status = BNA_STATUS_T_ENABLED; + rxf->vlan_strip_pending = true; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + } +} + +void +bna_rx_vlan_strip_disable(struct bna_rx *rx) +{ + struct bna_rxf *rxf = &rx->rxf; + + if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) { + rxf->vlan_strip_status = BNA_STATUS_T_DISABLED; + rxf->vlan_strip_pending = true; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + } +} + +enum bna_cb_status +bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, + enum bna_rxmode bitmask, + void (*cbfn)(struct bnad *, struct bna_rx *)) +{ + struct bna_rxf *rxf = &rx->rxf; + int need_hw_config = 0; + + /* Error checks */ + + if (is_promisc_enable(new_mode, bitmask)) { + /* If promisc mode is already enabled elsewhere in the system */ + if ((rx->bna->promisc_rid != BFI_INVALID_RID) && + (rx->bna->promisc_rid != rxf->rx->rid)) + goto err_return; + + /* If default mode is already enabled in the system */ + if (rx->bna->default_mode_rid != BFI_INVALID_RID) + goto err_return; + + /* Trying to enable promiscuous and default mode together */ + if (is_default_enable(new_mode, bitmask)) + goto err_return; + } + + if (is_default_enable(new_mode, bitmask)) { + /* If default mode is already enabled elsewhere in the system */ + if ((rx->bna->default_mode_rid != BFI_INVALID_RID) && + (rx->bna->default_mode_rid != rxf->rx->rid)) { + goto err_return; + } + + /* If promiscuous mode is already enabled in the system */ + if (rx->bna->promisc_rid != BFI_INVALID_RID) + goto err_return; + } + + /* Process the commands */ + + if (is_promisc_enable(new_mode, bitmask)) { + if (bna_rxf_promisc_enable(rxf)) + need_hw_config = 1; + } else if (is_promisc_disable(new_mode, bitmask)) { + if (bna_rxf_promisc_disable(rxf)) + need_hw_config = 1; + } + + if (is_allmulti_enable(new_mode, bitmask)) { + if (bna_rxf_allmulti_enable(rxf)) + need_hw_config = 1; + } else if (is_allmulti_disable(new_mode, bitmask)) { + if (bna_rxf_allmulti_disable(rxf)) + need_hw_config = 1; + } + + /* Trigger h/w if needed */ + + if (need_hw_config) { + rxf->cam_fltr_cbfn = cbfn; + rxf->cam_fltr_cbarg = rx->bna->bnad; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + } else if (cbfn) + (*cbfn)(rx->bna->bnad, rx); + + return BNA_CB_SUCCESS; + +err_return: + return BNA_CB_FAIL; +} + +void +bna_rx_vlanfilter_enable(struct bna_rx *rx) +{ + struct bna_rxf *rxf = &rx->rxf; + + if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) { + rxf->vlan_filter_status = BNA_STATUS_T_ENABLED; + rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + } +} + +void +bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo) +{ + struct bna_rxp *rxp; + struct list_head *qe; + + list_for_each(qe, &rx->rxp_q) { + rxp = (struct bna_rxp *)qe; + rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo; + bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo); + } +} + +void +bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]) +{ + int i, j; + + for (i = 0; i < BNA_LOAD_T_MAX; i++) + for (j = 0; j < BNA_BIAS_T_MAX; j++) + bna->rx_mod.dim_vector[i][j] = vector[i][j]; +} + +void +bna_rx_dim_update(struct bna_ccb *ccb) +{ + struct bna *bna = ccb->cq->rx->bna; + u32 load, bias; + u32 pkt_rt, small_rt, large_rt; + u8 coalescing_timeo; + + if ((ccb->pkt_rate.small_pkt_cnt == 0) && + (ccb->pkt_rate.large_pkt_cnt == 0)) + return; + + /* Arrive at preconfigured coalescing timeo value based on pkt rate */ + + small_rt = ccb->pkt_rate.small_pkt_cnt; + large_rt = ccb->pkt_rate.large_pkt_cnt; + + pkt_rt = small_rt + large_rt; + + if (pkt_rt < BNA_PKT_RATE_10K) + load = BNA_LOAD_T_LOW_4; + else if (pkt_rt < BNA_PKT_RATE_20K) + load = BNA_LOAD_T_LOW_3; + else if (pkt_rt < BNA_PKT_RATE_30K) + load = BNA_LOAD_T_LOW_2; + else if (pkt_rt < BNA_PKT_RATE_40K) + load = BNA_LOAD_T_LOW_1; + else if (pkt_rt < BNA_PKT_RATE_50K) + load = BNA_LOAD_T_HIGH_1; + else if (pkt_rt < BNA_PKT_RATE_60K) + load = BNA_LOAD_T_HIGH_2; + else if (pkt_rt < BNA_PKT_RATE_80K) + load = BNA_LOAD_T_HIGH_3; + else + load = BNA_LOAD_T_HIGH_4; + + if (small_rt > (large_rt << 1)) + bias = 0; + else + bias = 1; + + ccb->pkt_rate.small_pkt_cnt = 0; + ccb->pkt_rate.large_pkt_cnt = 0; + + coalescing_timeo = bna->rx_mod.dim_vector[load][bias]; + ccb->rx_coalescing_timeo = coalescing_timeo; + + /* Set it to IB */ + bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo); +} + +const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = { + {12, 12}, + {6, 10}, + {5, 10}, + {4, 8}, + {3, 6}, + {3, 6}, + {2, 4}, + {1, 2}, +}; + +/* TX */ + +#define call_tx_stop_cbfn(tx) \ +do { \ + if ((tx)->stop_cbfn) { \ + void (*cbfn)(void *, struct bna_tx *); \ + void *cbarg; \ + cbfn = (tx)->stop_cbfn; \ + cbarg = (tx)->stop_cbarg; \ + (tx)->stop_cbfn = NULL; \ + (tx)->stop_cbarg = NULL; \ + cbfn(cbarg, (tx)); \ + } \ +} while (0) + +#define call_tx_prio_change_cbfn(tx) \ +do { \ + if ((tx)->prio_change_cbfn) { \ + void (*cbfn)(struct bnad *, struct bna_tx *); \ + cbfn = (tx)->prio_change_cbfn; \ + (tx)->prio_change_cbfn = NULL; \ + cbfn((tx)->bna->bnad, (tx)); \ + } \ +} while (0) + +static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx); +static void bna_bfi_tx_enet_start(struct bna_tx *tx); +static void bna_tx_enet_stop(struct bna_tx *tx); + +enum bna_tx_event { + TX_E_START = 1, + TX_E_STOP = 2, + TX_E_FAIL = 3, + TX_E_STARTED = 4, + TX_E_STOPPED = 5, + TX_E_PRIO_CHANGE = 6, + TX_E_CLEANUP_DONE = 7, + TX_E_BW_UPDATE = 8, +}; + +bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx, + enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx, + enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx, + enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx, + enum bna_tx_event); + +static void +bna_tx_sm_stopped_entry(struct bna_tx *tx) +{ + call_tx_stop_cbfn(tx); +} + +static void +bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_START: + bfa_fsm_set_state(tx, bna_tx_sm_start_wait); + break; + + case TX_E_STOP: + call_tx_stop_cbfn(tx); + break; + + case TX_E_FAIL: + /* No-op */ + break; + + case TX_E_PRIO_CHANGE: + call_tx_prio_change_cbfn(tx); + break; + + case TX_E_BW_UPDATE: + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_start_wait_entry(struct bna_tx *tx) +{ + bna_bfi_tx_enet_start(tx); +} + +static void +bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_STOP: + tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED); + bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); + break; + + case TX_E_FAIL: + tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED); + bfa_fsm_set_state(tx, bna_tx_sm_stopped); + break; + + case TX_E_STARTED: + if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) { + tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | + BNA_TX_F_BW_UPDATED); + bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); + } else + bfa_fsm_set_state(tx, bna_tx_sm_started); + break; + + case TX_E_PRIO_CHANGE: + tx->flags |= BNA_TX_F_PRIO_CHANGED; + break; + + case TX_E_BW_UPDATE: + tx->flags |= BNA_TX_F_BW_UPDATED; + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_started_entry(struct bna_tx *tx) +{ + struct bna_txq *txq; + struct list_head *qe; + int is_regular = (tx->type == BNA_TX_T_REGULAR); + + list_for_each(qe, &tx->txq_q) { + txq = (struct bna_txq *)qe; + txq->tcb->priority = txq->priority; + /* Start IB */ + bna_ib_start(tx->bna, &txq->ib, is_regular); + } + tx->tx_resume_cbfn(tx->bna->bnad, tx); +} + +static void +bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_STOP: + bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); + tx->tx_stall_cbfn(tx->bna->bnad, tx); + bna_tx_enet_stop(tx); + break; + + case TX_E_FAIL: + bfa_fsm_set_state(tx, bna_tx_sm_failed); + tx->tx_stall_cbfn(tx->bna->bnad, tx); + tx->tx_cleanup_cbfn(tx->bna->bnad, tx); + break; + + case TX_E_PRIO_CHANGE: + case TX_E_BW_UPDATE: + bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_stop_wait_entry(struct bna_tx *tx) +{ +} + +static void +bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_FAIL: + case TX_E_STOPPED: + bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); + tx->tx_cleanup_cbfn(tx->bna->bnad, tx); + break; + + case TX_E_STARTED: + /** + * We are here due to start_wait -> stop_wait transition on + * TX_E_STOP event + */ + bna_tx_enet_stop(tx); + break; + + case TX_E_PRIO_CHANGE: + case TX_E_BW_UPDATE: + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx) +{ +} + +static void +bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_FAIL: + case TX_E_PRIO_CHANGE: + case TX_E_BW_UPDATE: + /* No-op */ + break; + + case TX_E_CLEANUP_DONE: + bfa_fsm_set_state(tx, bna_tx_sm_stopped); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx) +{ + tx->tx_stall_cbfn(tx->bna->bnad, tx); + bna_tx_enet_stop(tx); +} + +static void +bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_STOP: + bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); + break; + + case TX_E_FAIL: + bfa_fsm_set_state(tx, bna_tx_sm_failed); + call_tx_prio_change_cbfn(tx); + tx->tx_cleanup_cbfn(tx->bna->bnad, tx); + break; + + case TX_E_STOPPED: + bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait); + break; + + case TX_E_PRIO_CHANGE: + case TX_E_BW_UPDATE: + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx) +{ + call_tx_prio_change_cbfn(tx); + tx->tx_cleanup_cbfn(tx->bna->bnad, tx); +} + +static void +bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_STOP: + bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); + break; + + case TX_E_FAIL: + bfa_fsm_set_state(tx, bna_tx_sm_failed); + break; + + case TX_E_PRIO_CHANGE: + case TX_E_BW_UPDATE: + /* No-op */ + break; + + case TX_E_CLEANUP_DONE: + bfa_fsm_set_state(tx, bna_tx_sm_start_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_failed_entry(struct bna_tx *tx) +{ +} + +static void +bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_START: + bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait); + break; + + case TX_E_STOP: + bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); + break; + + case TX_E_FAIL: + /* No-op */ + break; + + case TX_E_CLEANUP_DONE: + bfa_fsm_set_state(tx, bna_tx_sm_stopped); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx) +{ +} + +static void +bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_STOP: + bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); + break; + + case TX_E_FAIL: + bfa_fsm_set_state(tx, bna_tx_sm_failed); + break; + + case TX_E_CLEANUP_DONE: + bfa_fsm_set_state(tx, bna_tx_sm_start_wait); + break; + + case TX_E_BW_UPDATE: + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_bfi_tx_enet_start(struct bna_tx *tx) +{ + struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req; + struct bna_txq *txq = NULL; + struct list_head *qe; + int i; + + bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid); + cfg_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req))); + + cfg_req->num_queues = tx->num_txq; + for (i = 0, qe = bfa_q_first(&tx->txq_q); + i < tx->num_txq; + i++, qe = bfa_q_next(qe)) { + txq = (struct bna_txq *)qe; + + bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt); + cfg_req->q_cfg[i].q.priority = txq->priority; + + cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo = + txq->ib.ib_seg_host_addr.lsb; + cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi = + txq->ib.ib_seg_host_addr.msb; + cfg_req->q_cfg[i].ib.intr.msix_index = + htons((u16)txq->ib.intr_vector); + } + + cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED; + cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED; + cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED; + cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED; + cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX) + ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED; + cfg_req->ib_cfg.coalescing_timeout = + htonl((u32)txq->ib.coalescing_timeo); + cfg_req->ib_cfg.inter_pkt_timeout = + htonl((u32)txq->ib.interpkt_timeo); + cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count; + + cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI; + cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id); + cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED; + cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED; + + bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh); + bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); +} + +static void +bna_bfi_tx_enet_stop(struct bna_tx *tx) +{ + struct bfi_enet_req *req = &tx->bfi_enet_cmd.req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req))); + bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), + &req->mh); + bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); +} + +static void +bna_tx_enet_stop(struct bna_tx *tx) +{ + struct bna_txq *txq; + struct list_head *qe; + + /* Stop IB */ + list_for_each(qe, &tx->txq_q) { + txq = (struct bna_txq *)qe; + bna_ib_stop(tx->bna, &txq->ib); + } + + bna_bfi_tx_enet_stop(tx); +} + +static void +bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size, + struct bna_mem_descr *qpt_mem, + struct bna_mem_descr *swqpt_mem, + struct bna_mem_descr *page_mem) +{ + u8 *kva; + u64 dma; + struct bna_dma_addr bna_dma; + int i; + + txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; + txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; + txq->qpt.kv_qpt_ptr = qpt_mem->kva; + txq->qpt.page_count = page_count; + txq->qpt.page_size = page_size; + + txq->tcb->sw_qpt = (void **) swqpt_mem->kva; + txq->tcb->sw_q = page_mem->kva; + + kva = page_mem->kva; + BNA_GET_DMA_ADDR(&page_mem->dma, dma); + + for (i = 0; i < page_count; i++) { + txq->tcb->sw_qpt[i] = kva; + kva += PAGE_SIZE; + + BNA_SET_DMA_ADDR(dma, &bna_dma); + ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb = + bna_dma.lsb; + ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb = + bna_dma.msb; + dma += PAGE_SIZE; + } +} + +static struct bna_tx * +bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type) +{ + struct list_head *qe = NULL; + struct bna_tx *tx = NULL; + + if (list_empty(&tx_mod->tx_free_q)) + return NULL; + if (type == BNA_TX_T_REGULAR) { + bfa_q_deq(&tx_mod->tx_free_q, &qe); + } else { + bfa_q_deq_tail(&tx_mod->tx_free_q, &qe); + } + tx = (struct bna_tx *)qe; + bfa_q_qe_init(&tx->qe); + tx->type = type; + + return tx; +} + +static void +bna_tx_free(struct bna_tx *tx) +{ + struct bna_tx_mod *tx_mod = &tx->bna->tx_mod; + struct bna_txq *txq; + struct list_head *prev_qe; + struct list_head *qe; + + while (!list_empty(&tx->txq_q)) { + bfa_q_deq(&tx->txq_q, &txq); + bfa_q_qe_init(&txq->qe); + txq->tcb = NULL; + txq->tx = NULL; + list_add_tail(&txq->qe, &tx_mod->txq_free_q); + } + + list_for_each(qe, &tx_mod->tx_active_q) { + if (qe == &tx->qe) { + list_del(&tx->qe); + bfa_q_qe_init(&tx->qe); + break; + } + } + + tx->bna = NULL; + tx->priv = NULL; + + prev_qe = NULL; + list_for_each(qe, &tx_mod->tx_free_q) { + if (((struct bna_tx *)qe)->rid < tx->rid) + prev_qe = qe; + else { + break; + } + } + + if (prev_qe == NULL) { + /* This is the first entry */ + bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe); + } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) { + /* This is the last entry */ + list_add_tail(&tx->qe, &tx_mod->tx_free_q); + } else { + /* Somewhere in the middle */ + bfa_q_next(&tx->qe) = bfa_q_next(prev_qe); + bfa_q_prev(&tx->qe) = prev_qe; + bfa_q_next(prev_qe) = &tx->qe; + bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe; + } +} + +static void +bna_tx_start(struct bna_tx *tx) +{ + tx->flags |= BNA_TX_F_ENET_STARTED; + if (tx->flags & BNA_TX_F_ENABLED) + bfa_fsm_send_event(tx, TX_E_START); +} + +static void +bna_tx_stop(struct bna_tx *tx) +{ + tx->stop_cbfn = bna_tx_mod_cb_tx_stopped; + tx->stop_cbarg = &tx->bna->tx_mod; + + tx->flags &= ~BNA_TX_F_ENET_STARTED; + bfa_fsm_send_event(tx, TX_E_STOP); +} + +static void +bna_tx_fail(struct bna_tx *tx) +{ + tx->flags &= ~BNA_TX_F_ENET_STARTED; + bfa_fsm_send_event(tx, TX_E_FAIL); +} + +void +bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp; + struct bna_txq *txq = NULL; + struct list_head *qe; + int i; + + bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp, + sizeof(struct bfi_enet_tx_cfg_rsp)); + + tx->hw_id = cfg_rsp->hw_id; + + for (i = 0, qe = bfa_q_first(&tx->txq_q); + i < tx->num_txq; i++, qe = bfa_q_next(qe)) { + txq = (struct bna_txq *)qe; + + /* Setup doorbells */ + txq->tcb->i_dbell->doorbell_addr = + tx->bna->pcidev.pci_bar_kva + + ntohl(cfg_rsp->q_handles[i].i_dbell); + txq->tcb->q_dbell = + tx->bna->pcidev.pci_bar_kva + + ntohl(cfg_rsp->q_handles[i].q_dbell); + txq->hw_id = cfg_rsp->q_handles[i].hw_qid; + + /* Initialize producer/consumer indexes */ + (*txq->tcb->hw_consumer_index) = 0; + txq->tcb->producer_index = txq->tcb->consumer_index = 0; + } + + bfa_fsm_send_event(tx, TX_E_STARTED); +} + +void +bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) +{ + bfa_fsm_send_event(tx, TX_E_STOPPED); +} + +void +bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod) +{ + struct bna_tx *tx; + struct list_head *qe; + + list_for_each(qe, &tx_mod->tx_active_q) { + tx = (struct bna_tx *)qe; + bfa_fsm_send_event(tx, TX_E_BW_UPDATE); + } +} + +void +bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info) +{ + u32 q_size; + u32 page_count; + struct bna_mem_info *mem_info; + + res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = sizeof(struct bna_tcb); + mem_info->num = num_txq; + + q_size = txq_depth * BFI_TXQ_WI_SIZE; + q_size = ALIGN(q_size, PAGE_SIZE); + page_count = q_size >> PAGE_SHIFT; + + res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = page_count * sizeof(struct bna_dma_addr); + mem_info->num = num_txq; + + res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = page_count * sizeof(void *); + mem_info->num = num_txq; + + res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = PAGE_SIZE * page_count; + mem_info->num = num_txq; + + res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = BFI_IBIDX_SIZE; + mem_info->num = num_txq; + + res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR; + res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type = + BNA_INTR_T_MSIX; + res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq; +} + +struct bna_tx * +bna_tx_create(struct bna *bna, struct bnad *bnad, + struct bna_tx_config *tx_cfg, + const struct bna_tx_event_cbfn *tx_cbfn, + struct bna_res_info *res_info, void *priv) +{ + struct bna_intr_info *intr_info; + struct bna_tx_mod *tx_mod = &bna->tx_mod; + struct bna_tx *tx; + struct bna_txq *txq; + struct list_head *qe; + int page_count; + int i; + + intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; + page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) / + PAGE_SIZE; + + /** + * Get resources + */ + + if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq)) + return NULL; + + /* Tx */ + + tx = bna_tx_get(tx_mod, tx_cfg->tx_type); + if (!tx) + return NULL; + tx->bna = bna; + tx->priv = priv; + + /* TxQs */ + + INIT_LIST_HEAD(&tx->txq_q); + for (i = 0; i < tx_cfg->num_txq; i++) { + if (list_empty(&tx_mod->txq_free_q)) + goto err_return; + + bfa_q_deq(&tx_mod->txq_free_q, &txq); + bfa_q_qe_init(&txq->qe); + list_add_tail(&txq->qe, &tx->txq_q); + txq->tx = tx; + } + + /* + * Initialize + */ + + /* Tx */ + + tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn; + tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn; + /* Following callbacks are mandatory */ + tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn; + tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn; + tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn; + + list_add_tail(&tx->qe, &tx_mod->tx_active_q); + + tx->num_txq = tx_cfg->num_txq; + + tx->flags = 0; + if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) { + switch (tx->type) { + case BNA_TX_T_REGULAR: + if (!(tx->bna->tx_mod.flags & + BNA_TX_MOD_F_ENET_LOOPBACK)) + tx->flags |= BNA_TX_F_ENET_STARTED; + break; + case BNA_TX_T_LOOPBACK: + if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK) + tx->flags |= BNA_TX_F_ENET_STARTED; + break; + } + } + + /* TxQ */ + + i = 0; + list_for_each(qe, &tx->txq_q) { + txq = (struct bna_txq *)qe; + txq->tcb = (struct bna_tcb *) + res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva; + txq->tx_packets = 0; + txq->tx_bytes = 0; + + /* IB */ + txq->ib.ib_seg_host_addr.lsb = + res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb; + txq->ib.ib_seg_host_addr.msb = + res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb; + txq->ib.ib_seg_host_addr_kva = + res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva; + txq->ib.intr_type = intr_info->intr_type; + txq->ib.intr_vector = (intr_info->num == 1) ? + intr_info->idl[0].vector : + intr_info->idl[i].vector; + if (intr_info->intr_type == BNA_INTR_T_INTX) + txq->ib.intr_vector = (1 << txq->ib.intr_vector); + txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo; + txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO; + txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT; + + /* TCB */ + + txq->tcb->q_depth = tx_cfg->txq_depth; + txq->tcb->unmap_q = (void *) + res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva; + txq->tcb->hw_consumer_index = + (u32 *)txq->ib.ib_seg_host_addr_kva; + txq->tcb->i_dbell = &txq->ib.door_bell; + txq->tcb->intr_type = txq->ib.intr_type; + txq->tcb->intr_vector = txq->ib.intr_vector; + txq->tcb->txq = txq; + txq->tcb->bnad = bnad; + txq->tcb->id = i; + + /* QPT, SWQPT, Pages */ + bna_txq_qpt_setup(txq, page_count, PAGE_SIZE, + &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i], + &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i], + &res_info[BNA_TX_RES_MEM_T_PAGE]. + res_u.mem_info.mdl[i]); + + /* Callback to bnad for setting up TCB */ + if (tx->tcb_setup_cbfn) + (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb); + + if (tx_cfg->num_txq == BFI_TX_MAX_PRIO) + txq->priority = txq->tcb->id; + else + txq->priority = tx_mod->default_prio; + + i++; + } + + tx->txf_vlan_id = 0; + + bfa_fsm_set_state(tx, bna_tx_sm_stopped); + + tx_mod->rid_mask |= (1 << tx->rid); + + return tx; + +err_return: + bna_tx_free(tx); + return NULL; +} + +void +bna_tx_destroy(struct bna_tx *tx) +{ + struct bna_txq *txq; + struct list_head *qe; + + list_for_each(qe, &tx->txq_q) { + txq = (struct bna_txq *)qe; + if (tx->tcb_destroy_cbfn) + (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb); + } + + tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid); + bna_tx_free(tx); +} + +void +bna_tx_enable(struct bna_tx *tx) +{ + if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped) + return; + + tx->flags |= BNA_TX_F_ENABLED; + + if (tx->flags & BNA_TX_F_ENET_STARTED) + bfa_fsm_send_event(tx, TX_E_START); +} + +void +bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, + void (*cbfn)(void *, struct bna_tx *)) +{ + if (type == BNA_SOFT_CLEANUP) { + (*cbfn)(tx->bna->bnad, tx); + return; + } + + tx->stop_cbfn = cbfn; + tx->stop_cbarg = tx->bna->bnad; + + tx->flags &= ~BNA_TX_F_ENABLED; + + bfa_fsm_send_event(tx, TX_E_STOP); +} + +void +bna_tx_cleanup_complete(struct bna_tx *tx) +{ + bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE); +} + +static void +bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx) +{ + struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg; + + bfa_wc_down(&tx_mod->tx_stop_wc); +} + +static void +bna_tx_mod_cb_tx_stopped_all(void *arg) +{ + struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg; + + if (tx_mod->stop_cbfn) + tx_mod->stop_cbfn(&tx_mod->bna->enet); + tx_mod->stop_cbfn = NULL; +} + +void +bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna, + struct bna_res_info *res_info) +{ + int i; + + tx_mod->bna = bna; + tx_mod->flags = 0; + + tx_mod->tx = (struct bna_tx *) + res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva; + tx_mod->txq = (struct bna_txq *) + res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva; + + INIT_LIST_HEAD(&tx_mod->tx_free_q); + INIT_LIST_HEAD(&tx_mod->tx_active_q); + + INIT_LIST_HEAD(&tx_mod->txq_free_q); + + for (i = 0; i < bna->ioceth.attr.num_txq; i++) { + tx_mod->tx[i].rid = i; + bfa_q_qe_init(&tx_mod->tx[i].qe); + list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q); + bfa_q_qe_init(&tx_mod->txq[i].qe); + list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q); + } + + tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL; + tx_mod->default_prio = 0; + tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED; + tx_mod->iscsi_prio = -1; +} + +void +bna_tx_mod_uninit(struct bna_tx_mod *tx_mod) +{ + struct list_head *qe; + int i; + + i = 0; + list_for_each(qe, &tx_mod->tx_free_q) + i++; + + i = 0; + list_for_each(qe, &tx_mod->txq_free_q) + i++; + + tx_mod->bna = NULL; +} + +void +bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type) +{ + struct bna_tx *tx; + struct list_head *qe; + + tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED; + if (type == BNA_TX_T_LOOPBACK) + tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK; + + list_for_each(qe, &tx_mod->tx_active_q) { + tx = (struct bna_tx *)qe; + if (tx->type == type) + bna_tx_start(tx); + } +} + +void +bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type) +{ + struct bna_tx *tx; + struct list_head *qe; + + tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED; + tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK; + + tx_mod->stop_cbfn = bna_enet_cb_tx_stopped; + + bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod); + + list_for_each(qe, &tx_mod->tx_active_q) { + tx = (struct bna_tx *)qe; + if (tx->type == type) { + bfa_wc_up(&tx_mod->tx_stop_wc); + bna_tx_stop(tx); + } + } + + bfa_wc_wait(&tx_mod->tx_stop_wc); +} + +void +bna_tx_mod_fail(struct bna_tx_mod *tx_mod) +{ + struct bna_tx *tx; + struct list_head *qe; + + tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED; + tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK; + + list_for_each(qe, &tx_mod->tx_active_q) { + tx = (struct bna_tx *)qe; + bna_tx_fail(tx); + } +} + +void +bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo) +{ + struct bna_txq *txq; + struct list_head *qe; + + list_for_each(qe, &tx->txq_q) { + txq = (struct bna_txq *)qe; + bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo); + } +} diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h new file mode 100644 index 000000000..d0a7a566f --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bna_types.h @@ -0,0 +1,957 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#ifndef __BNA_TYPES_H__ +#define __BNA_TYPES_H__ + +#include "cna.h" +#include "bna_hw_defs.h" +#include "bfa_cee.h" +#include "bfa_msgq.h" + +/* Forward declarations */ + +struct bna_mcam_handle; +struct bna_txq; +struct bna_tx; +struct bna_rxq; +struct bna_cq; +struct bna_rx; +struct bna_rxf; +struct bna_enet; +struct bna; +struct bnad; + +/* Enums, primitive data types */ + +enum bna_status { + BNA_STATUS_T_DISABLED = 0, + BNA_STATUS_T_ENABLED = 1 +}; + +enum bna_cleanup_type { + BNA_HARD_CLEANUP = 0, + BNA_SOFT_CLEANUP = 1 +}; + +enum bna_cb_status { + BNA_CB_SUCCESS = 0, + BNA_CB_FAIL = 1, + BNA_CB_INTERRUPT = 2, + BNA_CB_BUSY = 3, + BNA_CB_INVALID_MAC = 4, + BNA_CB_MCAST_LIST_FULL = 5, + BNA_CB_UCAST_CAM_FULL = 6, + BNA_CB_WAITING = 7, + BNA_CB_NOT_EXEC = 8 +}; + +enum bna_res_type { + BNA_RES_T_MEM = 1, + BNA_RES_T_INTR = 2 +}; + +enum bna_mem_type { + BNA_MEM_T_KVA = 1, + BNA_MEM_T_DMA = 2 +}; + +enum bna_intr_type { + BNA_INTR_T_INTX = 1, + BNA_INTR_T_MSIX = 2 +}; + +enum bna_res_req_type { + BNA_RES_MEM_T_COM = 0, + BNA_RES_MEM_T_ATTR = 1, + BNA_RES_MEM_T_FWTRC = 2, + BNA_RES_MEM_T_STATS = 3, + BNA_RES_T_MAX +}; + +enum bna_mod_res_req_type { + BNA_MOD_RES_MEM_T_TX_ARRAY = 0, + BNA_MOD_RES_MEM_T_TXQ_ARRAY = 1, + BNA_MOD_RES_MEM_T_RX_ARRAY = 2, + BNA_MOD_RES_MEM_T_RXP_ARRAY = 3, + BNA_MOD_RES_MEM_T_RXQ_ARRAY = 4, + BNA_MOD_RES_MEM_T_UCMAC_ARRAY = 5, + BNA_MOD_RES_MEM_T_MCMAC_ARRAY = 6, + BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY = 7, + BNA_MOD_RES_T_MAX +}; + +enum bna_tx_res_req_type { + BNA_TX_RES_MEM_T_TCB = 0, + BNA_TX_RES_MEM_T_UNMAPQ = 1, + BNA_TX_RES_MEM_T_QPT = 2, + BNA_TX_RES_MEM_T_SWQPT = 3, + BNA_TX_RES_MEM_T_PAGE = 4, + BNA_TX_RES_MEM_T_IBIDX = 5, + BNA_TX_RES_INTR_T_TXCMPL = 6, + BNA_TX_RES_T_MAX, +}; + +enum bna_rx_mem_type { + BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */ + BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */ + BNA_RX_RES_MEM_T_UNMAPHQ = 2, + BNA_RX_RES_MEM_T_UNMAPDQ = 3, + BNA_RX_RES_MEM_T_CQPT = 4, + BNA_RX_RES_MEM_T_CSWQPT = 5, + BNA_RX_RES_MEM_T_CQPT_PAGE = 6, + BNA_RX_RES_MEM_T_HQPT = 7, + BNA_RX_RES_MEM_T_DQPT = 8, + BNA_RX_RES_MEM_T_HSWQPT = 9, + BNA_RX_RES_MEM_T_DSWQPT = 10, + BNA_RX_RES_MEM_T_DPAGE = 11, + BNA_RX_RES_MEM_T_HPAGE = 12, + BNA_RX_RES_MEM_T_IBIDX = 13, + BNA_RX_RES_MEM_T_RIT = 14, + BNA_RX_RES_T_INTR = 15, + BNA_RX_RES_T_MAX = 16 +}; + +enum bna_tx_type { + BNA_TX_T_REGULAR = 0, + BNA_TX_T_LOOPBACK = 1, +}; + +enum bna_tx_flags { + BNA_TX_F_ENET_STARTED = 1, + BNA_TX_F_ENABLED = 2, + BNA_TX_F_PRIO_CHANGED = 4, + BNA_TX_F_BW_UPDATED = 8, +}; + +enum bna_tx_mod_flags { + BNA_TX_MOD_F_ENET_STARTED = 1, + BNA_TX_MOD_F_ENET_LOOPBACK = 2, +}; + +enum bna_rx_type { + BNA_RX_T_REGULAR = 0, + BNA_RX_T_LOOPBACK = 1, +}; + +enum bna_rxp_type { + BNA_RXP_SINGLE = 1, + BNA_RXP_SLR = 2, + BNA_RXP_HDS = 3 +}; + +enum bna_rxmode { + BNA_RXMODE_PROMISC = 1, + BNA_RXMODE_DEFAULT = 2, + BNA_RXMODE_ALLMULTI = 4 +}; + +enum bna_rx_event { + RX_E_START = 1, + RX_E_STOP = 2, + RX_E_FAIL = 3, + RX_E_STARTED = 4, + RX_E_STOPPED = 5, + RX_E_RXF_STARTED = 6, + RX_E_RXF_STOPPED = 7, + RX_E_CLEANUP_DONE = 8, +}; + +enum bna_rx_flags { + BNA_RX_F_ENET_STARTED = 1, + BNA_RX_F_ENABLED = 2, +}; + +enum bna_rx_mod_flags { + BNA_RX_MOD_F_ENET_STARTED = 1, + BNA_RX_MOD_F_ENET_LOOPBACK = 2, +}; + +enum bna_rxf_flags { + BNA_RXF_F_PAUSED = 1, +}; + +enum bna_rxf_event { + RXF_E_START = 1, + RXF_E_STOP = 2, + RXF_E_FAIL = 3, + RXF_E_CONFIG = 4, + RXF_E_PAUSE = 5, + RXF_E_RESUME = 6, + RXF_E_FW_RESP = 7, +}; + +enum bna_enet_type { + BNA_ENET_T_REGULAR = 0, + BNA_ENET_T_LOOPBACK_INTERNAL = 1, + BNA_ENET_T_LOOPBACK_EXTERNAL = 2, +}; + +enum bna_link_status { + BNA_LINK_DOWN = 0, + BNA_LINK_UP = 1, + BNA_CEE_UP = 2 +}; + +enum bna_ethport_flags { + BNA_ETHPORT_F_ADMIN_UP = 1, + BNA_ETHPORT_F_PORT_ENABLED = 2, + BNA_ETHPORT_F_RX_STARTED = 4, +}; + +enum bna_enet_flags { + BNA_ENET_F_IOCETH_READY = 1, + BNA_ENET_F_ENABLED = 2, + BNA_ENET_F_PAUSE_CHANGED = 4, + BNA_ENET_F_MTU_CHANGED = 8 +}; + +enum bna_rss_flags { + BNA_RSS_F_RIT_PENDING = 1, + BNA_RSS_F_CFG_PENDING = 2, + BNA_RSS_F_STATUS_PENDING = 4, +}; + +enum bna_mod_flags { + BNA_MOD_F_INIT_DONE = 1, +}; + +enum bna_pkt_rates { + BNA_PKT_RATE_10K = 10000, + BNA_PKT_RATE_20K = 20000, + BNA_PKT_RATE_30K = 30000, + BNA_PKT_RATE_40K = 40000, + BNA_PKT_RATE_50K = 50000, + BNA_PKT_RATE_60K = 60000, + BNA_PKT_RATE_70K = 70000, + BNA_PKT_RATE_80K = 80000, +}; + +enum bna_dim_load_types { + BNA_LOAD_T_HIGH_4 = 0, /* 80K <= r */ + BNA_LOAD_T_HIGH_3 = 1, /* 60K <= r < 80K */ + BNA_LOAD_T_HIGH_2 = 2, /* 50K <= r < 60K */ + BNA_LOAD_T_HIGH_1 = 3, /* 40K <= r < 50K */ + BNA_LOAD_T_LOW_1 = 4, /* 30K <= r < 40K */ + BNA_LOAD_T_LOW_2 = 5, /* 20K <= r < 30K */ + BNA_LOAD_T_LOW_3 = 6, /* 10K <= r < 20K */ + BNA_LOAD_T_LOW_4 = 7, /* r < 10K */ + BNA_LOAD_T_MAX = 8 +}; + +enum bna_dim_bias_types { + BNA_BIAS_T_SMALL = 0, /* small pkts > (large pkts * 2) */ + BNA_BIAS_T_LARGE = 1, /* Not BNA_BIAS_T_SMALL */ + BNA_BIAS_T_MAX = 2 +}; + +#define BNA_MAX_NAME_SIZE 64 +struct bna_ident { + int id; + char name[BNA_MAX_NAME_SIZE]; +}; + +struct bna_mac { + /* This should be the first one */ + struct list_head qe; + u8 addr[ETH_ALEN]; + struct bna_mcam_handle *handle; +}; + +struct bna_mem_descr { + u32 len; + void *kva; + struct bna_dma_addr dma; +}; + +struct bna_mem_info { + enum bna_mem_type mem_type; + u32 len; + u32 num; + u32 align_sz; /* 0/1 = no alignment */ + struct bna_mem_descr *mdl; + void *cookie; /* For bnad to unmap dma later */ +}; + +struct bna_intr_descr { + int vector; +}; + +struct bna_intr_info { + enum bna_intr_type intr_type; + int num; + struct bna_intr_descr *idl; +}; + +union bna_res_u { + struct bna_mem_info mem_info; + struct bna_intr_info intr_info; +}; + +struct bna_res_info { + enum bna_res_type res_type; + union bna_res_u res_u; +}; + +/* HW QPT */ +struct bna_qpt { + struct bna_dma_addr hw_qpt_ptr; + void *kv_qpt_ptr; + u32 page_count; + u32 page_size; +}; + +struct bna_attr { + bool fw_query_complete; + int num_txq; + int num_rxp; + int num_ucmac; + int num_mcmac; + int max_rit_size; +}; + +/* IOCEth */ + +struct bna_ioceth { + bfa_fsm_t fsm; + struct bfa_ioc ioc; + + struct bna_attr attr; + struct bfa_msgq_cmd_entry msgq_cmd; + struct bfi_enet_attr_req attr_req; + + void (*stop_cbfn)(struct bnad *bnad); + struct bnad *stop_cbarg; + + struct bna *bna; +}; + +/* Enet */ + +/* Pause configuration */ +struct bna_pause_config { + enum bna_status tx_pause; + enum bna_status rx_pause; +}; + +struct bna_enet { + bfa_fsm_t fsm; + enum bna_enet_flags flags; + + enum bna_enet_type type; + + struct bna_pause_config pause_config; + int mtu; + + /* Callback for bna_enet_disable(), enet_stop() */ + void (*stop_cbfn)(void *); + void *stop_cbarg; + + /* Callback for bna_enet_pause_config() */ + void (*pause_cbfn)(struct bnad *); + + /* Callback for bna_enet_mtu_set() */ + void (*mtu_cbfn)(struct bnad *); + + struct bfa_wc chld_stop_wc; + + struct bfa_msgq_cmd_entry msgq_cmd; + struct bfi_enet_set_pause_req pause_req; + + struct bna *bna; +}; + +/* Ethport */ + +struct bna_ethport { + bfa_fsm_t fsm; + enum bna_ethport_flags flags; + + enum bna_link_status link_status; + + int rx_started_count; + + void (*stop_cbfn)(struct bna_enet *); + + void (*adminup_cbfn)(struct bnad *, enum bna_cb_status); + + void (*link_cbfn)(struct bnad *, enum bna_link_status); + + struct bfa_msgq_cmd_entry msgq_cmd; + union { + struct bfi_enet_enable_req admin_req; + struct bfi_enet_diag_lb_req lpbk_req; + } bfi_enet_cmd; + + struct bna *bna; +}; + +/* Interrupt Block */ + +/* Doorbell structure */ +struct bna_ib_dbell { + void __iomem *doorbell_addr; + u32 doorbell_ack; +}; + +/* IB structure */ +struct bna_ib { + struct bna_dma_addr ib_seg_host_addr; + void *ib_seg_host_addr_kva; + + struct bna_ib_dbell door_bell; + + enum bna_intr_type intr_type; + int intr_vector; + + u8 coalescing_timeo; /* Unit is 5usec. */ + + int interpkt_count; + int interpkt_timeo; +}; + +/* Tx object */ + +/* Tx datapath control structure */ +#define BNA_Q_NAME_SIZE 16 +struct bna_tcb { + /* Fast path */ + void **sw_qpt; + void *sw_q; + void *unmap_q; + u32 producer_index; + u32 consumer_index; + volatile u32 *hw_consumer_index; + u32 q_depth; + void __iomem *q_dbell; + struct bna_ib_dbell *i_dbell; + /* Control path */ + struct bna_txq *txq; + struct bnad *bnad; + void *priv; /* BNAD's cookie */ + enum bna_intr_type intr_type; + int intr_vector; + u8 priority; /* Current priority */ + unsigned long flags; /* Used by bnad as required */ + int id; + char name[BNA_Q_NAME_SIZE]; +}; + +/* TxQ QPT and configuration */ +struct bna_txq { + /* This should be the first one */ + struct list_head qe; + + u8 priority; + + struct bna_qpt qpt; + struct bna_tcb *tcb; + struct bna_ib ib; + + struct bna_tx *tx; + + int hw_id; + + u64 tx_packets; + u64 tx_bytes; +}; + +/* Tx object */ +struct bna_tx { + /* This should be the first one */ + struct list_head qe; + int rid; + int hw_id; + + bfa_fsm_t fsm; + enum bna_tx_flags flags; + + enum bna_tx_type type; + int num_txq; + + struct list_head txq_q; + u16 txf_vlan_id; + + /* Tx event handlers */ + void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *); + void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *); + void (*tx_stall_cbfn)(struct bnad *, struct bna_tx *); + void (*tx_resume_cbfn)(struct bnad *, struct bna_tx *); + void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tx *); + + /* callback for bna_tx_disable(), bna_tx_stop() */ + void (*stop_cbfn)(void *arg, struct bna_tx *tx); + void *stop_cbarg; + + /* callback for bna_tx_prio_set() */ + void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx); + + struct bfa_msgq_cmd_entry msgq_cmd; + union { + struct bfi_enet_tx_cfg_req cfg_req; + struct bfi_enet_req req; + struct bfi_enet_tx_cfg_rsp cfg_rsp; + } bfi_enet_cmd; + + struct bna *bna; + void *priv; /* bnad's cookie */ +}; + +/* Tx object configuration used during creation */ +struct bna_tx_config { + int num_txq; + int txq_depth; + int coalescing_timeo; + enum bna_tx_type tx_type; +}; + +struct bna_tx_event_cbfn { + /* Optional */ + void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *); + void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *); + /* Mandatory */ + void (*tx_stall_cbfn)(struct bnad *, struct bna_tx *); + void (*tx_resume_cbfn)(struct bnad *, struct bna_tx *); + void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tx *); +}; + +/* Tx module - keeps track of free, active tx objects */ +struct bna_tx_mod { + struct bna_tx *tx; /* BFI_MAX_TXQ entries */ + struct bna_txq *txq; /* BFI_MAX_TXQ entries */ + + struct list_head tx_free_q; + struct list_head tx_active_q; + + struct list_head txq_free_q; + + /* callback for bna_tx_mod_stop() */ + void (*stop_cbfn)(struct bna_enet *enet); + + struct bfa_wc tx_stop_wc; + + enum bna_tx_mod_flags flags; + + u8 prio_map; + int default_prio; + int iscsi_over_cee; + int iscsi_prio; + int prio_reconfigured; + + u32 rid_mask; + + struct bna *bna; +}; + +/* Rx object */ + +/* Rx datapath control structure */ +struct bna_rcb { + /* Fast path */ + void **sw_qpt; + void *sw_q; + void *unmap_q; + u32 producer_index; + u32 consumer_index; + u32 q_depth; + void __iomem *q_dbell; + /* Control path */ + struct bna_rxq *rxq; + struct bna_ccb *ccb; + struct bnad *bnad; + void *priv; /* BNAD's cookie */ + unsigned long flags; + int id; +}; + +/* RxQ structure - QPT, configuration */ +struct bna_rxq { + struct list_head qe; + + int buffer_size; + int q_depth; + u32 num_vecs; + enum bna_status multi_buffer; + + struct bna_qpt qpt; + struct bna_rcb *rcb; + + struct bna_rxp *rxp; + struct bna_rx *rx; + + int hw_id; + + u64 rx_packets; + u64 rx_bytes; + u64 rx_packets_with_error; + u64 rxbuf_alloc_failed; +}; + +/* RxQ pair */ +union bna_rxq_u { + struct { + struct bna_rxq *hdr; + struct bna_rxq *data; + } hds; + struct { + struct bna_rxq *small; + struct bna_rxq *large; + } slr; + struct { + struct bna_rxq *only; + struct bna_rxq *reserved; + } single; +}; + +/* Packet rate for Dynamic Interrupt Moderation */ +struct bna_pkt_rate { + u32 small_pkt_cnt; + u32 large_pkt_cnt; +}; + +/* Completion control structure */ +struct bna_ccb { + /* Fast path */ + void **sw_qpt; + void *sw_q; + u32 producer_index; + volatile u32 *hw_producer_index; + u32 q_depth; + struct bna_ib_dbell *i_dbell; + struct bna_rcb *rcb[2]; + void *ctrl; /* For bnad */ + struct bna_pkt_rate pkt_rate; + u32 pkts_una; + u32 bytes_per_intr; + + /* Control path */ + struct bna_cq *cq; + struct bnad *bnad; + void *priv; /* BNAD's cookie */ + enum bna_intr_type intr_type; + int intr_vector; + u8 rx_coalescing_timeo; /* For NAPI */ + int id; + char name[BNA_Q_NAME_SIZE]; +}; + +/* CQ QPT, configuration */ +struct bna_cq { + struct bna_qpt qpt; + struct bna_ccb *ccb; + + struct bna_ib ib; + + struct bna_rx *rx; +}; + +struct bna_rss_config { + enum bfi_enet_rss_type hash_type; + u8 hash_mask; + u32 toeplitz_hash_key[BFI_ENET_RSS_KEY_LEN]; +}; + +struct bna_hds_config { + enum bfi_enet_hds_type hdr_type; + int forced_offset; +}; + +/* Rx object configuration used during creation */ +struct bna_rx_config { + enum bna_rx_type rx_type; + int num_paths; + enum bna_rxp_type rxp_type; + int paused; + int coalescing_timeo; + /* + * Small/Large (or Header/Data) buffer size to be configured + * for SLR and HDS queue type. + */ + u32 frame_size; + + /* header or small queue */ + u32 q1_depth; + u32 q1_buf_size; + + /* data or large queue */ + u32 q0_depth; + u32 q0_buf_size; + u32 q0_num_vecs; + enum bna_status q0_multi_buf; + + enum bna_status rss_status; + struct bna_rss_config rss_config; + + struct bna_hds_config hds_config; + + enum bna_status vlan_strip_status; +}; + +/* Rx Path structure - one per MSIX vector/CPU */ +struct bna_rxp { + /* This should be the first one */ + struct list_head qe; + + enum bna_rxp_type type; + union bna_rxq_u rxq; + struct bna_cq cq; + + struct bna_rx *rx; + + /* MSI-x vector number for configuring RSS */ + int vector; + int hw_id; +}; + +/* RxF structure (hardware Rx Function) */ +struct bna_rxf { + bfa_fsm_t fsm; + enum bna_rxf_flags flags; + + struct bfa_msgq_cmd_entry msgq_cmd; + union { + struct bfi_enet_enable_req req; + struct bfi_enet_rss_cfg_req rss_req; + struct bfi_enet_rit_req rit_req; + struct bfi_enet_rx_vlan_req vlan_req; + struct bfi_enet_mcast_add_req mcast_add_req; + struct bfi_enet_mcast_del_req mcast_del_req; + struct bfi_enet_ucast_req ucast_req; + } bfi_enet_cmd; + + /* callback for bna_rxf_start() */ + void (*start_cbfn) (struct bna_rx *rx); + struct bna_rx *start_cbarg; + + /* callback for bna_rxf_stop() */ + void (*stop_cbfn) (struct bna_rx *rx); + struct bna_rx *stop_cbarg; + + /* callback for bna_rx_receive_pause() / bna_rx_receive_resume() */ + void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx); + struct bnad *oper_state_cbarg; + + /** + * callback for: + * bna_rxf_ucast_set() + * bna_rxf_{ucast/mcast}_add(), + * bna_rxf_{ucast/mcast}_del(), + * bna_rxf_mode_set() + */ + void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx); + struct bnad *cam_fltr_cbarg; + + /* List of unicast addresses yet to be applied to h/w */ + struct list_head ucast_pending_add_q; + struct list_head ucast_pending_del_q; + struct bna_mac *ucast_pending_mac; + int ucast_pending_set; + /* ucast addresses applied to the h/w */ + struct list_head ucast_active_q; + struct bna_mac ucast_active_mac; + int ucast_active_set; + + /* List of multicast addresses yet to be applied to h/w */ + struct list_head mcast_pending_add_q; + struct list_head mcast_pending_del_q; + /* multicast addresses applied to the h/w */ + struct list_head mcast_active_q; + struct list_head mcast_handle_q; + + /* Rx modes yet to be applied to h/w */ + enum bna_rxmode rxmode_pending; + enum bna_rxmode rxmode_pending_bitmask; + /* Rx modes applied to h/w */ + enum bna_rxmode rxmode_active; + + u8 vlan_pending_bitmask; + enum bna_status vlan_filter_status; + u32 vlan_filter_table[(BFI_ENET_VLAN_ID_MAX) / 32]; + bool vlan_strip_pending; + enum bna_status vlan_strip_status; + + enum bna_rss_flags rss_pending; + enum bna_status rss_status; + struct bna_rss_config rss_cfg; + u8 *rit; + int rit_size; + + struct bna_rx *rx; +}; + +/* Rx object */ +struct bna_rx { + /* This should be the first one */ + struct list_head qe; + int rid; + int hw_id; + + bfa_fsm_t fsm; + + enum bna_rx_type type; + + int num_paths; + struct list_head rxp_q; + + struct bna_hds_config hds_cfg; + + struct bna_rxf rxf; + + enum bna_rx_flags rx_flags; + + struct bfa_msgq_cmd_entry msgq_cmd; + union { + struct bfi_enet_rx_cfg_req cfg_req; + struct bfi_enet_req req; + struct bfi_enet_rx_cfg_rsp cfg_rsp; + } bfi_enet_cmd; + + /* Rx event handlers */ + void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *); + void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *); + void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *); + void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *); + void (*rx_stall_cbfn)(struct bnad *, struct bna_rx *); + void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *); + void (*rx_post_cbfn)(struct bnad *, struct bna_rx *); + + /* callback for bna_rx_disable(), bna_rx_stop() */ + void (*stop_cbfn)(void *arg, struct bna_rx *rx); + void *stop_cbarg; + + struct bna *bna; + void *priv; /* bnad's cookie */ +}; + +struct bna_rx_event_cbfn { + /* Optional */ + void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *); + void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *); + void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *); + void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *); + void (*rx_stall_cbfn)(struct bnad *, struct bna_rx *); + /* Mandatory */ + void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *); + void (*rx_post_cbfn)(struct bnad *, struct bna_rx *); +}; + +/* Rx module - keeps track of free, active rx objects */ +struct bna_rx_mod { + struct bna *bna; /* back pointer to parent */ + struct bna_rx *rx; /* BFI_MAX_RXQ entries */ + struct bna_rxp *rxp; /* BFI_MAX_RXQ entries */ + struct bna_rxq *rxq; /* BFI_MAX_RXQ entries */ + + struct list_head rx_free_q; + struct list_head rx_active_q; + int rx_free_count; + + struct list_head rxp_free_q; + int rxp_free_count; + + struct list_head rxq_free_q; + int rxq_free_count; + + enum bna_rx_mod_flags flags; + + /* callback for bna_rx_mod_stop() */ + void (*stop_cbfn)(struct bna_enet *enet); + + struct bfa_wc rx_stop_wc; + u32 dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX]; + u32 rid_mask; +}; + +/* CAM */ + +struct bna_ucam_mod { + struct bna_mac *ucmac; /* num_ucmac * 2 entries */ + struct list_head free_q; + struct list_head del_q; + + struct bna *bna; +}; + +struct bna_mcam_handle { + /* This should be the first one */ + struct list_head qe; + int handle; + int refcnt; +}; + +struct bna_mcam_mod { + struct bna_mac *mcmac; /* num_mcmac * 2 entries */ + struct bna_mcam_handle *mchandle; /* num_mcmac entries */ + struct list_head free_q; + struct list_head del_q; + struct list_head free_handle_q; + + struct bna *bna; +}; + +/* Statistics */ + +struct bna_stats { + struct bna_dma_addr hw_stats_dma; + struct bfi_enet_stats *hw_stats_kva; + struct bfi_enet_stats hw_stats; +}; + +struct bna_stats_mod { + bool ioc_ready; + bool stats_get_busy; + bool stats_clr_busy; + struct bfa_msgq_cmd_entry stats_get_cmd; + struct bfa_msgq_cmd_entry stats_clr_cmd; + struct bfi_enet_stats_req stats_get; + struct bfi_enet_stats_req stats_clr; +}; + +/* BNA */ + +struct bna { + struct bna_ident ident; + struct bfa_pcidev pcidev; + + struct bna_reg regs; + struct bna_bit_defn bits; + + struct bna_stats stats; + + struct bna_ioceth ioceth; + struct bfa_cee cee; + struct bfa_flash flash; + struct bfa_msgq msgq; + + struct bna_ethport ethport; + struct bna_enet enet; + struct bna_stats_mod stats_mod; + + struct bna_tx_mod tx_mod; + struct bna_rx_mod rx_mod; + struct bna_ucam_mod ucam_mod; + struct bna_mcam_mod mcam_mod; + + enum bna_mod_flags mod_flags; + + int default_mode_rid; + int promisc_rid; + + struct bnad *bnad; +}; +#endif /* __BNA_TYPES_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c new file mode 100644 index 000000000..b69b8715a --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -0,0 +1,3896 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bnad.h" +#include "bna.h" +#include "cna.h" + +static DEFINE_MUTEX(bnad_fwimg_mutex); + +/* + * Module params + */ +static uint bnad_msix_disable; +module_param(bnad_msix_disable, uint, 0444); +MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode"); + +static uint bnad_ioc_auto_recover = 1; +module_param(bnad_ioc_auto_recover, uint, 0444); +MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery"); + +static uint bna_debugfs_enable = 1; +module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1," + " Range[false:0|true:1]"); + +/* + * Global variables + */ +static u32 bnad_rxqs_per_cq = 2; +static u32 bna_id; +static struct mutex bnad_list_mutex; +static LIST_HEAD(bnad_list); +static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + +/* + * Local MACROS + */ +#define BNAD_GET_MBOX_IRQ(_bnad) \ + (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \ + ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \ + ((_bnad)->pcidev->irq)) + +#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \ +do { \ + (_res_info)->res_type = BNA_RES_T_MEM; \ + (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \ + (_res_info)->res_u.mem_info.num = (_num); \ + (_res_info)->res_u.mem_info.len = (_size); \ +} while (0) + +static void +bnad_add_to_list(struct bnad *bnad) +{ + mutex_lock(&bnad_list_mutex); + list_add_tail(&bnad->list_entry, &bnad_list); + bnad->id = bna_id++; + mutex_unlock(&bnad_list_mutex); +} + +static void +bnad_remove_from_list(struct bnad *bnad) +{ + mutex_lock(&bnad_list_mutex); + list_del(&bnad->list_entry); + mutex_unlock(&bnad_list_mutex); +} + +/* + * Reinitialize completions in CQ, once Rx is taken down + */ +static void +bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb) +{ + struct bna_cq_entry *cmpl; + int i; + + for (i = 0; i < ccb->q_depth; i++) { + cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i]; + cmpl->valid = 0; + } +} + +/* Tx Datapath functions */ + + +/* Caller should ensure that the entry at unmap_q[index] is valid */ +static u32 +bnad_tx_buff_unmap(struct bnad *bnad, + struct bnad_tx_unmap *unmap_q, + u32 q_depth, u32 index) +{ + struct bnad_tx_unmap *unmap; + struct sk_buff *skb; + int vector, nvecs; + + unmap = &unmap_q[index]; + nvecs = unmap->nvecs; + + skb = unmap->skb; + unmap->skb = NULL; + unmap->nvecs = 0; + dma_unmap_single(&bnad->pcidev->dev, + dma_unmap_addr(&unmap->vectors[0], dma_addr), + skb_headlen(skb), DMA_TO_DEVICE); + dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0); + nvecs--; + + vector = 0; + while (nvecs) { + vector++; + if (vector == BFI_TX_MAX_VECTORS_PER_WI) { + vector = 0; + BNA_QE_INDX_INC(index, q_depth); + unmap = &unmap_q[index]; + } + + dma_unmap_page(&bnad->pcidev->dev, + dma_unmap_addr(&unmap->vectors[vector], dma_addr), + dma_unmap_len(&unmap->vectors[vector], dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0); + nvecs--; + } + + BNA_QE_INDX_INC(index, q_depth); + + return index; +} + +/* + * Frees all pending Tx Bufs + * At this point no activity is expected on the Q, + * so DMA unmap & freeing is fine. + */ +static void +bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb) +{ + struct bnad_tx_unmap *unmap_q = tcb->unmap_q; + struct sk_buff *skb; + int i; + + for (i = 0; i < tcb->q_depth; i++) { + skb = unmap_q[i].skb; + if (!skb) + continue; + bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i); + + dev_kfree_skb_any(skb); + } +} + +/* + * bnad_txcmpl_process : Frees the Tx bufs on Tx completion + * Can be called in a) Interrupt context + * b) Sending context + */ +static u32 +bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb) +{ + u32 sent_packets = 0, sent_bytes = 0; + u32 wis, unmap_wis, hw_cons, cons, q_depth; + struct bnad_tx_unmap *unmap_q = tcb->unmap_q; + struct bnad_tx_unmap *unmap; + struct sk_buff *skb; + + /* Just return if TX is stopped */ + if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) + return 0; + + hw_cons = *(tcb->hw_consumer_index); + cons = tcb->consumer_index; + q_depth = tcb->q_depth; + + wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth); + BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth))); + + while (wis) { + unmap = &unmap_q[cons]; + + skb = unmap->skb; + + sent_packets++; + sent_bytes += skb->len; + + unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs); + wis -= unmap_wis; + + cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons); + dev_kfree_skb_any(skb); + } + + /* Update consumer pointers. */ + tcb->consumer_index = hw_cons; + + tcb->txq->tx_packets += sent_packets; + tcb->txq->tx_bytes += sent_bytes; + + return sent_packets; +} + +static u32 +bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb) +{ + struct net_device *netdev = bnad->netdev; + u32 sent = 0; + + if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) + return 0; + + sent = bnad_txcmpl_process(bnad, tcb); + if (sent) { + if (netif_queue_stopped(netdev) && + netif_carrier_ok(netdev) && + BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= + BNAD_NETIF_WAKE_THRESHOLD) { + if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { + netif_wake_queue(netdev); + BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); + } + } + } + + if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) + bna_ib_ack(tcb->i_dbell, sent); + + smp_mb__before_atomic(); + clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); + + return sent; +} + +/* MSIX Tx Completion Handler */ +static irqreturn_t +bnad_msix_tx(int irq, void *data) +{ + struct bna_tcb *tcb = (struct bna_tcb *)data; + struct bnad *bnad = tcb->bnad; + + bnad_tx_complete(bnad, tcb); + + return IRQ_HANDLED; +} + +static inline void +bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb) +{ + struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; + + unmap_q->reuse_pi = -1; + unmap_q->alloc_order = -1; + unmap_q->map_size = 0; + unmap_q->type = BNAD_RXBUF_NONE; +} + +/* Default is page-based allocation. Multi-buffer support - TBD */ +static int +bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb) +{ + struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; + int order; + + bnad_rxq_alloc_uninit(bnad, rcb); + + order = get_order(rcb->rxq->buffer_size); + + unmap_q->type = BNAD_RXBUF_PAGE; + + if (bna_is_small_rxq(rcb->id)) { + unmap_q->alloc_order = 0; + unmap_q->map_size = rcb->rxq->buffer_size; + } else { + if (rcb->rxq->multi_buffer) { + unmap_q->alloc_order = 0; + unmap_q->map_size = rcb->rxq->buffer_size; + unmap_q->type = BNAD_RXBUF_MULTI_BUFF; + } else { + unmap_q->alloc_order = order; + unmap_q->map_size = + (rcb->rxq->buffer_size > 2048) ? + PAGE_SIZE << order : 2048; + } + } + + BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size)); + + return 0; +} + +static inline void +bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap) +{ + if (!unmap->page) + return; + + dma_unmap_page(&bnad->pcidev->dev, + dma_unmap_addr(&unmap->vector, dma_addr), + unmap->vector.len, DMA_FROM_DEVICE); + put_page(unmap->page); + unmap->page = NULL; + dma_unmap_addr_set(&unmap->vector, dma_addr, 0); + unmap->vector.len = 0; +} + +static inline void +bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap) +{ + if (!unmap->skb) + return; + + dma_unmap_single(&bnad->pcidev->dev, + dma_unmap_addr(&unmap->vector, dma_addr), + unmap->vector.len, DMA_FROM_DEVICE); + dev_kfree_skb_any(unmap->skb); + unmap->skb = NULL; + dma_unmap_addr_set(&unmap->vector, dma_addr, 0); + unmap->vector.len = 0; +} + +static void +bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb) +{ + struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; + int i; + + for (i = 0; i < rcb->q_depth; i++) { + struct bnad_rx_unmap *unmap = &unmap_q->unmap[i]; + + if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) + bnad_rxq_cleanup_skb(bnad, unmap); + else + bnad_rxq_cleanup_page(bnad, unmap); + } + bnad_rxq_alloc_uninit(bnad, rcb); +} + +static u32 +bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) +{ + u32 alloced, prod, q_depth; + struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; + struct bnad_rx_unmap *unmap, *prev; + struct bna_rxq_entry *rxent; + struct page *page; + u32 page_offset, alloc_size; + dma_addr_t dma_addr; + + prod = rcb->producer_index; + q_depth = rcb->q_depth; + + alloc_size = PAGE_SIZE << unmap_q->alloc_order; + alloced = 0; + + while (nalloc--) { + unmap = &unmap_q->unmap[prod]; + + if (unmap_q->reuse_pi < 0) { + page = alloc_pages(GFP_ATOMIC | __GFP_COMP, + unmap_q->alloc_order); + page_offset = 0; + } else { + prev = &unmap_q->unmap[unmap_q->reuse_pi]; + page = prev->page; + page_offset = prev->page_offset + unmap_q->map_size; + get_page(page); + } + + if (unlikely(!page)) { + BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); + rcb->rxq->rxbuf_alloc_failed++; + goto finishing; + } + + dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset, + unmap_q->map_size, DMA_FROM_DEVICE); + + unmap->page = page; + unmap->page_offset = page_offset; + dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); + unmap->vector.len = unmap_q->map_size; + page_offset += unmap_q->map_size; + + if (page_offset < alloc_size) + unmap_q->reuse_pi = prod; + else + unmap_q->reuse_pi = -1; + + rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod]; + BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); + BNA_QE_INDX_INC(prod, q_depth); + alloced++; + } + +finishing: + if (likely(alloced)) { + rcb->producer_index = prod; + smp_mb(); + if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) + bna_rxq_prod_indx_doorbell(rcb); + } + + return alloced; +} + +static u32 +bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) +{ + u32 alloced, prod, q_depth, buff_sz; + struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; + struct bnad_rx_unmap *unmap; + struct bna_rxq_entry *rxent; + struct sk_buff *skb; + dma_addr_t dma_addr; + + buff_sz = rcb->rxq->buffer_size; + prod = rcb->producer_index; + q_depth = rcb->q_depth; + + alloced = 0; + while (nalloc--) { + unmap = &unmap_q->unmap[prod]; + + skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz); + + if (unlikely(!skb)) { + BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); + rcb->rxq->rxbuf_alloc_failed++; + goto finishing; + } + dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, + buff_sz, DMA_FROM_DEVICE); + + unmap->skb = skb; + dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); + unmap->vector.len = buff_sz; + + rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod]; + BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); + BNA_QE_INDX_INC(prod, q_depth); + alloced++; + } + +finishing: + if (likely(alloced)) { + rcb->producer_index = prod; + smp_mb(); + if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) + bna_rxq_prod_indx_doorbell(rcb); + } + + return alloced; +} + +static inline void +bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb) +{ + struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; + u32 to_alloc; + + to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth); + if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) + return; + + if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) + bnad_rxq_refill_skb(bnad, rcb, to_alloc); + else + bnad_rxq_refill_page(bnad, rcb, to_alloc); +} + +#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ + BNA_CQ_EF_IPV6 | \ + BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \ + BNA_CQ_EF_L4_CKSUM_OK) + +#define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ + BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK) +#define flags_tcp6 (BNA_CQ_EF_IPV6 | \ + BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK) +#define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ + BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK) +#define flags_udp6 (BNA_CQ_EF_IPV6 | \ + BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK) + +static void +bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb, + u32 sop_ci, u32 nvecs) +{ + struct bnad_rx_unmap_q *unmap_q; + struct bnad_rx_unmap *unmap; + u32 ci, vec; + + unmap_q = rcb->unmap_q; + for (vec = 0, ci = sop_ci; vec < nvecs; vec++) { + unmap = &unmap_q->unmap[ci]; + BNA_QE_INDX_INC(ci, rcb->q_depth); + + if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) + bnad_rxq_cleanup_skb(bnad, unmap); + else + bnad_rxq_cleanup_page(bnad, unmap); + } +} + +static void +bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb, + u32 sop_ci, u32 nvecs, u32 last_fraglen) +{ + struct bnad *bnad; + u32 ci, vec, len, totlen = 0; + struct bnad_rx_unmap_q *unmap_q; + struct bnad_rx_unmap *unmap; + + unmap_q = rcb->unmap_q; + bnad = rcb->bnad; + + /* prefetch header */ + prefetch(page_address(unmap_q->unmap[sop_ci].page) + + unmap_q->unmap[sop_ci].page_offset); + + for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) { + unmap = &unmap_q->unmap[ci]; + BNA_QE_INDX_INC(ci, rcb->q_depth); + + dma_unmap_page(&bnad->pcidev->dev, + dma_unmap_addr(&unmap->vector, dma_addr), + unmap->vector.len, DMA_FROM_DEVICE); + + len = (vec == nvecs) ? + last_fraglen : unmap->vector.len; + skb->truesize += unmap->vector.len; + totlen += len; + + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + unmap->page, unmap->page_offset, len); + + unmap->page = NULL; + unmap->vector.len = 0; + } + + skb->len += totlen; + skb->data_len += totlen; +} + +static inline void +bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb, + struct bnad_rx_unmap *unmap, u32 len) +{ + prefetch(skb->data); + + dma_unmap_single(&bnad->pcidev->dev, + dma_unmap_addr(&unmap->vector, dma_addr), + unmap->vector.len, DMA_FROM_DEVICE); + + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, bnad->netdev); + + unmap->skb = NULL; + unmap->vector.len = 0; +} + +static u32 +bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) +{ + struct bna_cq_entry *cq, *cmpl, *next_cmpl; + struct bna_rcb *rcb = NULL; + struct bnad_rx_unmap_q *unmap_q; + struct bnad_rx_unmap *unmap = NULL; + struct sk_buff *skb = NULL; + struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; + struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl; + u32 packets = 0, len = 0, totlen = 0; + u32 pi, vec, sop_ci = 0, nvecs = 0; + u32 flags, masked_flags; + + prefetch(bnad->netdev); + + cq = ccb->sw_q; + + while (packets < budget) { + cmpl = &cq[ccb->producer_index]; + if (!cmpl->valid) + break; + /* The 'valid' field is set by the adapter, only after writing + * the other fields of completion entry. Hence, do not load + * other fields of completion entry *before* the 'valid' is + * loaded. Adding the rmb() here prevents the compiler and/or + * CPU from reordering the reads which would potentially result + * in reading stale values in completion entry. + */ + rmb(); + + BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length)); + + if (bna_is_small_rxq(cmpl->rxq_id)) + rcb = ccb->rcb[1]; + else + rcb = ccb->rcb[0]; + + unmap_q = rcb->unmap_q; + + /* start of packet ci */ + sop_ci = rcb->consumer_index; + + if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) { + unmap = &unmap_q->unmap[sop_ci]; + skb = unmap->skb; + } else { + skb = napi_get_frags(&rx_ctrl->napi); + if (unlikely(!skb)) + break; + } + prefetch(skb); + + flags = ntohl(cmpl->flags); + len = ntohs(cmpl->length); + totlen = len; + nvecs = 1; + + /* Check all the completions for this frame. + * busy-wait doesn't help much, break here. + */ + if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) && + (flags & BNA_CQ_EF_EOP) == 0) { + pi = ccb->producer_index; + do { + BNA_QE_INDX_INC(pi, ccb->q_depth); + next_cmpl = &cq[pi]; + + if (!next_cmpl->valid) + break; + /* The 'valid' field is set by the adapter, only + * after writing the other fields of completion + * entry. Hence, do not load other fields of + * completion entry *before* the 'valid' is + * loaded. Adding the rmb() here prevents the + * compiler and/or CPU from reordering the reads + * which would potentially result in reading + * stale values in completion entry. + */ + rmb(); + + len = ntohs(next_cmpl->length); + flags = ntohl(next_cmpl->flags); + + nvecs++; + totlen += len; + } while ((flags & BNA_CQ_EF_EOP) == 0); + + if (!next_cmpl->valid) + break; + } + + /* TODO: BNA_CQ_EF_LOCAL ? */ + if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | + BNA_CQ_EF_FCS_ERROR | + BNA_CQ_EF_TOO_LONG))) { + bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs); + rcb->rxq->rx_packets_with_error++; + + goto next; + } + + if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) + bnad_cq_setup_skb(bnad, skb, unmap, len); + else + bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len); + + packets++; + rcb->rxq->rx_packets++; + rcb->rxq->rx_bytes += totlen; + ccb->bytes_per_intr += totlen; + + masked_flags = flags & flags_cksum_prot_mask; + + if (likely + ((bnad->netdev->features & NETIF_F_RXCSUM) && + ((masked_flags == flags_tcp4) || + (masked_flags == flags_udp4) || + (masked_flags == flags_tcp6) || + (masked_flags == flags_udp6)))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + + if ((flags & BNA_CQ_EF_VLAN) && + (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag)); + + if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) + netif_receive_skb(skb); + else + napi_gro_frags(&rx_ctrl->napi); + +next: + BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth); + for (vec = 0; vec < nvecs; vec++) { + cmpl = &cq[ccb->producer_index]; + cmpl->valid = 0; + BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth); + } + cmpl = &cq[ccb->producer_index]; + } + + napi_gro_flush(&rx_ctrl->napi, false); + if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) + bna_ib_ack_disable_irq(ccb->i_dbell, packets); + + bnad_rxq_post(bnad, ccb->rcb[0]); + if (ccb->rcb[1]) + bnad_rxq_post(bnad, ccb->rcb[1]); + + return packets; +} + +static void +bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb) +{ + struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); + struct napi_struct *napi = &rx_ctrl->napi; + + if (likely(napi_schedule_prep(napi))) { + __napi_schedule(napi); + rx_ctrl->rx_schedule++; + } +} + +/* MSIX Rx Path Handler */ +static irqreturn_t +bnad_msix_rx(int irq, void *data) +{ + struct bna_ccb *ccb = (struct bna_ccb *)data; + + if (ccb) { + ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++; + bnad_netif_rx_schedule_poll(ccb->bnad, ccb); + } + + return IRQ_HANDLED; +} + +/* Interrupt handlers */ + +/* Mbox Interrupt Handlers */ +static irqreturn_t +bnad_msix_mbox_handler(int irq, void *data) +{ + u32 intr_status; + unsigned long flags; + struct bnad *bnad = (struct bnad *)data; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + return IRQ_HANDLED; + } + + bna_intr_status_get(&bnad->bna, intr_status); + + if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) + bna_mbox_handler(&bnad->bna, intr_status); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + return IRQ_HANDLED; +} + +static irqreturn_t +bnad_isr(int irq, void *data) +{ + int i, j; + u32 intr_status; + unsigned long flags; + struct bnad *bnad = (struct bnad *)data; + struct bnad_rx_info *rx_info; + struct bnad_rx_ctrl *rx_ctrl; + struct bna_tcb *tcb = NULL; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + return IRQ_NONE; + } + + bna_intr_status_get(&bnad->bna, intr_status); + + if (unlikely(!intr_status)) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + return IRQ_NONE; + } + + if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) + bna_mbox_handler(&bnad->bna, intr_status); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + if (!BNA_IS_INTX_DATA_INTR(intr_status)) + return IRQ_HANDLED; + + /* Process data interrupts */ + /* Tx processing */ + for (i = 0; i < bnad->num_tx; i++) { + for (j = 0; j < bnad->num_txq_per_tx; j++) { + tcb = bnad->tx_info[i].tcb[j]; + if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) + bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]); + } + } + /* Rx processing */ + for (i = 0; i < bnad->num_rx; i++) { + rx_info = &bnad->rx_info[i]; + if (!rx_info->rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + rx_ctrl = &rx_info->rx_ctrl[j]; + if (rx_ctrl->ccb) + bnad_netif_rx_schedule_poll(bnad, + rx_ctrl->ccb); + } + } + return IRQ_HANDLED; +} + +/* + * Called in interrupt / callback context + * with bna_lock held, so cfg_flags access is OK + */ +static void +bnad_enable_mbox_irq(struct bnad *bnad) +{ + clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); + + BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); +} + +/* + * Called with bnad->bna_lock held b'cos of + * bnad->cfg_flags access. + */ +static void +bnad_disable_mbox_irq(struct bnad *bnad) +{ + set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); + + BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); +} + +static void +bnad_set_netdev_perm_addr(struct bnad *bnad) +{ + struct net_device *netdev = bnad->netdev; + + memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len); + if (is_zero_ether_addr(netdev->dev_addr)) + memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len); +} + +/* Control Path Handlers */ + +/* Callbacks */ +void +bnad_cb_mbox_intr_enable(struct bnad *bnad) +{ + bnad_enable_mbox_irq(bnad); +} + +void +bnad_cb_mbox_intr_disable(struct bnad *bnad) +{ + bnad_disable_mbox_irq(bnad); +} + +void +bnad_cb_ioceth_ready(struct bnad *bnad) +{ + bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS; + complete(&bnad->bnad_completions.ioc_comp); +} + +void +bnad_cb_ioceth_failed(struct bnad *bnad) +{ + bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL; + complete(&bnad->bnad_completions.ioc_comp); +} + +void +bnad_cb_ioceth_disabled(struct bnad *bnad) +{ + bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS; + complete(&bnad->bnad_completions.ioc_comp); +} + +static void +bnad_cb_enet_disabled(void *arg) +{ + struct bnad *bnad = (struct bnad *)arg; + + netif_carrier_off(bnad->netdev); + complete(&bnad->bnad_completions.enet_comp); +} + +void +bnad_cb_ethport_link_status(struct bnad *bnad, + enum bna_link_status link_status) +{ + bool link_up = false; + + link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP); + + if (link_status == BNA_CEE_UP) { + if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) + BNAD_UPDATE_CTR(bnad, cee_toggle); + set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); + } else { + if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) + BNAD_UPDATE_CTR(bnad, cee_toggle); + clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); + } + + if (link_up) { + if (!netif_carrier_ok(bnad->netdev)) { + uint tx_id, tcb_id; + printk(KERN_WARNING "bna: %s link up\n", + bnad->netdev->name); + netif_carrier_on(bnad->netdev); + BNAD_UPDATE_CTR(bnad, link_toggle); + for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) { + for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx; + tcb_id++) { + struct bna_tcb *tcb = + bnad->tx_info[tx_id].tcb[tcb_id]; + u32 txq_id; + if (!tcb) + continue; + + txq_id = tcb->id; + + if (test_bit(BNAD_TXQ_TX_STARTED, + &tcb->flags)) { + /* + * Force an immediate + * Transmit Schedule */ + printk(KERN_INFO "bna: %s %d " + "TXQ_STARTED\n", + bnad->netdev->name, + txq_id); + netif_wake_subqueue( + bnad->netdev, + txq_id); + BNAD_UPDATE_CTR(bnad, + netif_queue_wakeup); + } else { + netif_stop_subqueue( + bnad->netdev, + txq_id); + BNAD_UPDATE_CTR(bnad, + netif_queue_stop); + } + } + } + } + } else { + if (netif_carrier_ok(bnad->netdev)) { + printk(KERN_WARNING "bna: %s link down\n", + bnad->netdev->name); + netif_carrier_off(bnad->netdev); + BNAD_UPDATE_CTR(bnad, link_toggle); + } + } +} + +static void +bnad_cb_tx_disabled(void *arg, struct bna_tx *tx) +{ + struct bnad *bnad = (struct bnad *)arg; + + complete(&bnad->bnad_completions.tx_comp); +} + +static void +bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb) +{ + struct bnad_tx_info *tx_info = + (struct bnad_tx_info *)tcb->txq->tx->priv; + + tcb->priv = tcb; + tx_info->tcb[tcb->id] = tcb; +} + +static void +bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb) +{ + struct bnad_tx_info *tx_info = + (struct bnad_tx_info *)tcb->txq->tx->priv; + + tx_info->tcb[tcb->id] = NULL; + tcb->priv = NULL; +} + +static void +bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) +{ + struct bnad_rx_info *rx_info = + (struct bnad_rx_info *)ccb->cq->rx->priv; + + rx_info->rx_ctrl[ccb->id].ccb = ccb; + ccb->ctrl = &rx_info->rx_ctrl[ccb->id]; +} + +static void +bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb) +{ + struct bnad_rx_info *rx_info = + (struct bnad_rx_info *)ccb->cq->rx->priv; + + rx_info->rx_ctrl[ccb->id].ccb = NULL; +} + +static void +bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx) +{ + struct bnad_tx_info *tx_info = + (struct bnad_tx_info *)tx->priv; + struct bna_tcb *tcb; + u32 txq_id; + int i; + + for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { + tcb = tx_info->tcb[i]; + if (!tcb) + continue; + txq_id = tcb->id; + clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); + netif_stop_subqueue(bnad->netdev, txq_id); + printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n", + bnad->netdev->name, txq_id); + } +} + +static void +bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) +{ + struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; + struct bna_tcb *tcb; + u32 txq_id; + int i; + + for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { + tcb = tx_info->tcb[i]; + if (!tcb) + continue; + txq_id = tcb->id; + + BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)); + set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); + BUG_ON(*(tcb->hw_consumer_index) != 0); + + if (netif_carrier_ok(bnad->netdev)) { + printk(KERN_INFO "bna: %s %d TXQ_STARTED\n", + bnad->netdev->name, txq_id); + netif_wake_subqueue(bnad->netdev, txq_id); + BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); + } + } + + /* + * Workaround for first ioceth enable failure & we + * get a 0 MAC address. We try to get the MAC address + * again here. + */ + if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) { + bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr); + bnad_set_netdev_perm_addr(bnad); + } +} + +/* + * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm. + */ +static void +bnad_tx_cleanup(struct delayed_work *work) +{ + struct bnad_tx_info *tx_info = + container_of(work, struct bnad_tx_info, tx_cleanup_work); + struct bnad *bnad = NULL; + struct bna_tcb *tcb; + unsigned long flags; + u32 i, pending = 0; + + for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { + tcb = tx_info->tcb[i]; + if (!tcb) + continue; + + bnad = tcb->bnad; + + if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { + pending++; + continue; + } + + bnad_txq_cleanup(bnad, tcb); + + smp_mb__before_atomic(); + clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); + } + + if (pending) { + queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, + msecs_to_jiffies(1)); + return; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_tx_cleanup_complete(tx_info->tx); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +static void +bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) +{ + struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; + struct bna_tcb *tcb; + int i; + + for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { + tcb = tx_info->tcb[i]; + if (!tcb) + continue; + } + + queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0); +} + +static void +bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx) +{ + struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; + struct bna_ccb *ccb; + struct bnad_rx_ctrl *rx_ctrl; + int i; + + for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { + rx_ctrl = &rx_info->rx_ctrl[i]; + ccb = rx_ctrl->ccb; + if (!ccb) + continue; + + clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags); + + if (ccb->rcb[1]) + clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags); + } +} + +/* + * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm. + */ +static void +bnad_rx_cleanup(void *work) +{ + struct bnad_rx_info *rx_info = + container_of(work, struct bnad_rx_info, rx_cleanup_work); + struct bnad_rx_ctrl *rx_ctrl; + struct bnad *bnad = NULL; + unsigned long flags; + u32 i; + + for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { + rx_ctrl = &rx_info->rx_ctrl[i]; + + if (!rx_ctrl->ccb) + continue; + + bnad = rx_ctrl->ccb->bnad; + + /* + * Wait till the poll handler has exited + * and nothing can be scheduled anymore + */ + napi_disable(&rx_ctrl->napi); + + bnad_cq_cleanup(bnad, rx_ctrl->ccb); + bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]); + if (rx_ctrl->ccb->rcb[1]) + bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]); + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_rx_cleanup_complete(rx_info->rx); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +static void +bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) +{ + struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; + struct bna_ccb *ccb; + struct bnad_rx_ctrl *rx_ctrl; + int i; + + for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { + rx_ctrl = &rx_info->rx_ctrl[i]; + ccb = rx_ctrl->ccb; + if (!ccb) + continue; + + clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags); + + if (ccb->rcb[1]) + clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); + } + + queue_work(bnad->work_q, &rx_info->rx_cleanup_work); +} + +static void +bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx) +{ + struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; + struct bna_ccb *ccb; + struct bna_rcb *rcb; + struct bnad_rx_ctrl *rx_ctrl; + int i, j; + + for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { + rx_ctrl = &rx_info->rx_ctrl[i]; + ccb = rx_ctrl->ccb; + if (!ccb) + continue; + + napi_enable(&rx_ctrl->napi); + + for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) { + rcb = ccb->rcb[j]; + if (!rcb) + continue; + + bnad_rxq_alloc_init(bnad, rcb); + set_bit(BNAD_RXQ_STARTED, &rcb->flags); + set_bit(BNAD_RXQ_POST_OK, &rcb->flags); + bnad_rxq_post(bnad, rcb); + } + } +} + +static void +bnad_cb_rx_disabled(void *arg, struct bna_rx *rx) +{ + struct bnad *bnad = (struct bnad *)arg; + + complete(&bnad->bnad_completions.rx_comp); +} + +static void +bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx) +{ + bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS; + complete(&bnad->bnad_completions.mcast_comp); +} + +void +bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, + struct bna_stats *stats) +{ + if (status == BNA_CB_SUCCESS) + BNAD_UPDATE_CTR(bnad, hw_stats_updates); + + if (!netif_running(bnad->netdev) || + !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) + return; + + mod_timer(&bnad->stats_timer, + jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); +} + +static void +bnad_cb_enet_mtu_set(struct bnad *bnad) +{ + bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS; + complete(&bnad->bnad_completions.mtu_comp); +} + +void +bnad_cb_completion(void *arg, enum bfa_status status) +{ + struct bnad_iocmd_comp *iocmd_comp = + (struct bnad_iocmd_comp *)arg; + + iocmd_comp->comp_status = (u32) status; + complete(&iocmd_comp->comp); +} + +/* Resource allocation, free functions */ + +static void +bnad_mem_free(struct bnad *bnad, + struct bna_mem_info *mem_info) +{ + int i; + dma_addr_t dma_pa; + + if (mem_info->mdl == NULL) + return; + + for (i = 0; i < mem_info->num; i++) { + if (mem_info->mdl[i].kva != NULL) { + if (mem_info->mem_type == BNA_MEM_T_DMA) { + BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma), + dma_pa); + dma_free_coherent(&bnad->pcidev->dev, + mem_info->mdl[i].len, + mem_info->mdl[i].kva, dma_pa); + } else + kfree(mem_info->mdl[i].kva); + } + } + kfree(mem_info->mdl); + mem_info->mdl = NULL; +} + +static int +bnad_mem_alloc(struct bnad *bnad, + struct bna_mem_info *mem_info) +{ + int i; + dma_addr_t dma_pa; + + if ((mem_info->num == 0) || (mem_info->len == 0)) { + mem_info->mdl = NULL; + return 0; + } + + mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr), + GFP_KERNEL); + if (mem_info->mdl == NULL) + return -ENOMEM; + + if (mem_info->mem_type == BNA_MEM_T_DMA) { + for (i = 0; i < mem_info->num; i++) { + mem_info->mdl[i].len = mem_info->len; + mem_info->mdl[i].kva = + dma_alloc_coherent(&bnad->pcidev->dev, + mem_info->len, &dma_pa, + GFP_KERNEL); + if (mem_info->mdl[i].kva == NULL) + goto err_return; + + BNA_SET_DMA_ADDR(dma_pa, + &(mem_info->mdl[i].dma)); + } + } else { + for (i = 0; i < mem_info->num; i++) { + mem_info->mdl[i].len = mem_info->len; + mem_info->mdl[i].kva = kzalloc(mem_info->len, + GFP_KERNEL); + if (mem_info->mdl[i].kva == NULL) + goto err_return; + } + } + + return 0; + +err_return: + bnad_mem_free(bnad, mem_info); + return -ENOMEM; +} + +/* Free IRQ for Mailbox */ +static void +bnad_mbox_irq_free(struct bnad *bnad) +{ + int irq; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bnad_disable_mbox_irq(bnad); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + irq = BNAD_GET_MBOX_IRQ(bnad); + free_irq(irq, bnad); +} + +/* + * Allocates IRQ for Mailbox, but keep it disabled + * This will be enabled once we get the mbox enable callback + * from bna + */ +static int +bnad_mbox_irq_alloc(struct bnad *bnad) +{ + int err = 0; + unsigned long irq_flags, flags; + u32 irq; + irq_handler_t irq_handler; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (bnad->cfg_flags & BNAD_CF_MSIX) { + irq_handler = (irq_handler_t)bnad_msix_mbox_handler; + irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; + irq_flags = 0; + } else { + irq_handler = (irq_handler_t)bnad_isr; + irq = bnad->pcidev->irq; + irq_flags = IRQF_SHARED; + } + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME); + + /* + * Set the Mbox IRQ disable flag, so that the IRQ handler + * called from request_irq() for SHARED IRQs do not execute + */ + set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); + + BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); + + err = request_irq(irq, irq_handler, irq_flags, + bnad->mbox_irq_name, bnad); + + return err; +} + +static void +bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info) +{ + kfree(intr_info->idl); + intr_info->idl = NULL; +} + +/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */ +static int +bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src, + u32 txrx_id, struct bna_intr_info *intr_info) +{ + int i, vector_start = 0; + u32 cfg_flags; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + cfg_flags = bnad->cfg_flags; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + if (cfg_flags & BNAD_CF_MSIX) { + intr_info->intr_type = BNA_INTR_T_MSIX; + intr_info->idl = kcalloc(intr_info->num, + sizeof(struct bna_intr_descr), + GFP_KERNEL); + if (!intr_info->idl) + return -ENOMEM; + + switch (src) { + case BNAD_INTR_TX: + vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id; + break; + + case BNAD_INTR_RX: + vector_start = BNAD_MAILBOX_MSIX_VECTORS + + (bnad->num_tx * bnad->num_txq_per_tx) + + txrx_id; + break; + + default: + BUG(); + } + + for (i = 0; i < intr_info->num; i++) + intr_info->idl[i].vector = vector_start + i; + } else { + intr_info->intr_type = BNA_INTR_T_INTX; + intr_info->num = 1; + intr_info->idl = kcalloc(intr_info->num, + sizeof(struct bna_intr_descr), + GFP_KERNEL); + if (!intr_info->idl) + return -ENOMEM; + + switch (src) { + case BNAD_INTR_TX: + intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK; + break; + + case BNAD_INTR_RX: + intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK; + break; + } + } + return 0; +} + +/* NOTE: Should be called for MSIX only + * Unregisters Tx MSIX vector(s) from the kernel + */ +static void +bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info, + int num_txqs) +{ + int i; + int vector_num; + + for (i = 0; i < num_txqs; i++) { + if (tx_info->tcb[i] == NULL) + continue; + + vector_num = tx_info->tcb[i]->intr_vector; + free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]); + } +} + +/* NOTE: Should be called for MSIX only + * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel + */ +static int +bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info, + u32 tx_id, int num_txqs) +{ + int i; + int err; + int vector_num; + + for (i = 0; i < num_txqs; i++) { + vector_num = tx_info->tcb[i]->intr_vector; + sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name, + tx_id + tx_info->tcb[i]->id); + err = request_irq(bnad->msix_table[vector_num].vector, + (irq_handler_t)bnad_msix_tx, 0, + tx_info->tcb[i]->name, + tx_info->tcb[i]); + if (err) + goto err_return; + } + + return 0; + +err_return: + if (i > 0) + bnad_tx_msix_unregister(bnad, tx_info, (i - 1)); + return -1; +} + +/* NOTE: Should be called for MSIX only + * Unregisters Rx MSIX vector(s) from the kernel + */ +static void +bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info, + int num_rxps) +{ + int i; + int vector_num; + + for (i = 0; i < num_rxps; i++) { + if (rx_info->rx_ctrl[i].ccb == NULL) + continue; + + vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; + free_irq(bnad->msix_table[vector_num].vector, + rx_info->rx_ctrl[i].ccb); + } +} + +/* NOTE: Should be called for MSIX only + * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel + */ +static int +bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info, + u32 rx_id, int num_rxps) +{ + int i; + int err; + int vector_num; + + for (i = 0; i < num_rxps; i++) { + vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; + sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d", + bnad->netdev->name, + rx_id + rx_info->rx_ctrl[i].ccb->id); + err = request_irq(bnad->msix_table[vector_num].vector, + (irq_handler_t)bnad_msix_rx, 0, + rx_info->rx_ctrl[i].ccb->name, + rx_info->rx_ctrl[i].ccb); + if (err) + goto err_return; + } + + return 0; + +err_return: + if (i > 0) + bnad_rx_msix_unregister(bnad, rx_info, (i - 1)); + return -1; +} + +/* Free Tx object Resources */ +static void +bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info) +{ + int i; + + for (i = 0; i < BNA_TX_RES_T_MAX; i++) { + if (res_info[i].res_type == BNA_RES_T_MEM) + bnad_mem_free(bnad, &res_info[i].res_u.mem_info); + else if (res_info[i].res_type == BNA_RES_T_INTR) + bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); + } +} + +/* Allocates memory and interrupt resources for Tx object */ +static int +bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, + u32 tx_id) +{ + int i, err = 0; + + for (i = 0; i < BNA_TX_RES_T_MAX; i++) { + if (res_info[i].res_type == BNA_RES_T_MEM) + err = bnad_mem_alloc(bnad, + &res_info[i].res_u.mem_info); + else if (res_info[i].res_type == BNA_RES_T_INTR) + err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id, + &res_info[i].res_u.intr_info); + if (err) + goto err_return; + } + return 0; + +err_return: + bnad_tx_res_free(bnad, res_info); + return err; +} + +/* Free Rx object Resources */ +static void +bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info) +{ + int i; + + for (i = 0; i < BNA_RX_RES_T_MAX; i++) { + if (res_info[i].res_type == BNA_RES_T_MEM) + bnad_mem_free(bnad, &res_info[i].res_u.mem_info); + else if (res_info[i].res_type == BNA_RES_T_INTR) + bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); + } +} + +/* Allocates memory and interrupt resources for Rx object */ +static int +bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, + uint rx_id) +{ + int i, err = 0; + + /* All memory needs to be allocated before setup_ccbs */ + for (i = 0; i < BNA_RX_RES_T_MAX; i++) { + if (res_info[i].res_type == BNA_RES_T_MEM) + err = bnad_mem_alloc(bnad, + &res_info[i].res_u.mem_info); + else if (res_info[i].res_type == BNA_RES_T_INTR) + err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id, + &res_info[i].res_u.intr_info); + if (err) + goto err_return; + } + return 0; + +err_return: + bnad_rx_res_free(bnad, res_info); + return err; +} + +/* Timer callbacks */ +/* a) IOC timer */ +static void +bnad_ioc_timeout(unsigned long data) +{ + struct bnad *bnad = (struct bnad *)data; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +static void +bnad_ioc_hb_check(unsigned long data) +{ + struct bnad *bnad = (struct bnad *)data; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +static void +bnad_iocpf_timeout(unsigned long data) +{ + struct bnad *bnad = (struct bnad *)data; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +static void +bnad_iocpf_sem_timeout(unsigned long data) +{ + struct bnad *bnad = (struct bnad *)data; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +/* + * All timer routines use bnad->bna_lock to protect against + * the following race, which may occur in case of no locking: + * Time CPU m CPU n + * 0 1 = test_bit + * 1 clear_bit + * 2 del_timer_sync + * 3 mod_timer + */ + +/* b) Dynamic Interrupt Moderation Timer */ +static void +bnad_dim_timeout(unsigned long data) +{ + struct bnad *bnad = (struct bnad *)data; + struct bnad_rx_info *rx_info; + struct bnad_rx_ctrl *rx_ctrl; + int i, j; + unsigned long flags; + + if (!netif_carrier_ok(bnad->netdev)) + return; + + spin_lock_irqsave(&bnad->bna_lock, flags); + for (i = 0; i < bnad->num_rx; i++) { + rx_info = &bnad->rx_info[i]; + if (!rx_info->rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + rx_ctrl = &rx_info->rx_ctrl[j]; + if (!rx_ctrl->ccb) + continue; + bna_rx_dim_update(rx_ctrl->ccb); + } + } + + /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */ + if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) + mod_timer(&bnad->dim_timer, + jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ)); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +/* c) Statistics Timer */ +static void +bnad_stats_timeout(unsigned long data) +{ + struct bnad *bnad = (struct bnad *)data; + unsigned long flags; + + if (!netif_running(bnad->netdev) || + !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) + return; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_hw_stats_get(&bnad->bna); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +/* + * Set up timer for DIM + * Called with bnad->bna_lock held + */ +void +bnad_dim_timer_start(struct bnad *bnad) +{ + if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && + !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { + setup_timer(&bnad->dim_timer, bnad_dim_timeout, + (unsigned long)bnad); + set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); + mod_timer(&bnad->dim_timer, + jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ)); + } +} + +/* + * Set up timer for statistics + * Called with mutex_lock(&bnad->conf_mutex) held + */ +static void +bnad_stats_timer_start(struct bnad *bnad) +{ + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) { + setup_timer(&bnad->stats_timer, bnad_stats_timeout, + (unsigned long)bnad); + mod_timer(&bnad->stats_timer, + jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +/* + * Stops the stats timer + * Called with mutex_lock(&bnad->conf_mutex) held + */ +static void +bnad_stats_timer_stop(struct bnad *bnad) +{ + int to_del = 0; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) + to_del = 1; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (to_del) + del_timer_sync(&bnad->stats_timer); +} + +/* Utilities */ + +static void +bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list) +{ + int i = 1; /* Index 0 has broadcast address */ + struct netdev_hw_addr *mc_addr; + + netdev_for_each_mc_addr(mc_addr, netdev) { + memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0], + ETH_ALEN); + i++; + } +} + +static int +bnad_napi_poll_rx(struct napi_struct *napi, int budget) +{ + struct bnad_rx_ctrl *rx_ctrl = + container_of(napi, struct bnad_rx_ctrl, napi); + struct bnad *bnad = rx_ctrl->bnad; + int rcvd = 0; + + rx_ctrl->rx_poll_ctr++; + + if (!netif_carrier_ok(bnad->netdev)) + goto poll_exit; + + rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget); + if (rcvd >= budget) + return rcvd; + +poll_exit: + napi_complete(napi); + + rx_ctrl->rx_complete++; + + if (rx_ctrl->ccb) + bnad_enable_rx_irq_unsafe(rx_ctrl->ccb); + + return rcvd; +} + +#define BNAD_NAPI_POLL_QUOTA 64 +static void +bnad_napi_add(struct bnad *bnad, u32 rx_id) +{ + struct bnad_rx_ctrl *rx_ctrl; + int i; + + /* Initialize & enable NAPI */ + for (i = 0; i < bnad->num_rxp_per_rx; i++) { + rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; + netif_napi_add(bnad->netdev, &rx_ctrl->napi, + bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA); + } +} + +static void +bnad_napi_delete(struct bnad *bnad, u32 rx_id) +{ + int i; + + /* First disable and then clean up */ + for (i = 0; i < bnad->num_rxp_per_rx; i++) + netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi); +} + +/* Should be held with conf_lock held */ +void +bnad_destroy_tx(struct bnad *bnad, u32 tx_id) +{ + struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; + struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; + unsigned long flags; + + if (!tx_info->tx) + return; + + init_completion(&bnad->bnad_completions.tx_comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&bnad->bnad_completions.tx_comp); + + if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX) + bnad_tx_msix_unregister(bnad, tx_info, + bnad->num_txq_per_tx); + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_tx_destroy(tx_info->tx); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + tx_info->tx = NULL; + tx_info->tx_id = 0; + + bnad_tx_res_free(bnad, res_info); +} + +/* Should be held with conf_lock held */ +int +bnad_setup_tx(struct bnad *bnad, u32 tx_id) +{ + int err; + struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; + struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; + struct bna_intr_info *intr_info = + &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; + struct bna_tx_config *tx_config = &bnad->tx_config[tx_id]; + static const struct bna_tx_event_cbfn tx_cbfn = { + .tcb_setup_cbfn = bnad_cb_tcb_setup, + .tcb_destroy_cbfn = bnad_cb_tcb_destroy, + .tx_stall_cbfn = bnad_cb_tx_stall, + .tx_resume_cbfn = bnad_cb_tx_resume, + .tx_cleanup_cbfn = bnad_cb_tx_cleanup, + }; + + struct bna_tx *tx; + unsigned long flags; + + tx_info->tx_id = tx_id; + + /* Initialize the Tx object configuration */ + tx_config->num_txq = bnad->num_txq_per_tx; + tx_config->txq_depth = bnad->txq_depth; + tx_config->tx_type = BNA_TX_T_REGULAR; + tx_config->coalescing_timeo = bnad->tx_coalescing_timeo; + + /* Get BNA's resource requirement for one tx object */ + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_tx_res_req(bnad->num_txq_per_tx, + bnad->txq_depth, res_info); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Fill Unmap Q memory requirements */ + BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ], + bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) * + bnad->txq_depth)); + + /* Allocate resources */ + err = bnad_tx_res_alloc(bnad, res_info, tx_id); + if (err) + return err; + + /* Ask BNA to create one Tx object, supplying required resources */ + spin_lock_irqsave(&bnad->bna_lock, flags); + tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info, + tx_info); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (!tx) { + err = -ENOMEM; + goto err_return; + } + tx_info->tx = tx; + + INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, + (work_func_t)bnad_tx_cleanup); + + /* Register ISR for the Tx object */ + if (intr_info->intr_type == BNA_INTR_T_MSIX) { + err = bnad_tx_msix_register(bnad, tx_info, + tx_id, bnad->num_txq_per_tx); + if (err) + goto cleanup_tx; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_tx_enable(tx); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + return 0; + +cleanup_tx: + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_tx_destroy(tx_info->tx); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + tx_info->tx = NULL; + tx_info->tx_id = 0; +err_return: + bnad_tx_res_free(bnad, res_info); + return err; +} + +/* Setup the rx config for bna_rx_create */ +/* bnad decides the configuration */ +static void +bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) +{ + memset(rx_config, 0, sizeof(*rx_config)); + rx_config->rx_type = BNA_RX_T_REGULAR; + rx_config->num_paths = bnad->num_rxp_per_rx; + rx_config->coalescing_timeo = bnad->rx_coalescing_timeo; + + if (bnad->num_rxp_per_rx > 1) { + rx_config->rss_status = BNA_STATUS_T_ENABLED; + rx_config->rss_config.hash_type = + (BFI_ENET_RSS_IPV6 | + BFI_ENET_RSS_IPV6_TCP | + BFI_ENET_RSS_IPV4 | + BFI_ENET_RSS_IPV4_TCP); + rx_config->rss_config.hash_mask = + bnad->num_rxp_per_rx - 1; + netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key, + sizeof(rx_config->rss_config.toeplitz_hash_key)); + } else { + rx_config->rss_status = BNA_STATUS_T_DISABLED; + memset(&rx_config->rss_config, 0, + sizeof(rx_config->rss_config)); + } + + rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu); + rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED; + + /* BNA_RXP_SINGLE - one data-buffer queue + * BNA_RXP_SLR - one small-buffer and one large-buffer queues + * BNA_RXP_HDS - one header-buffer and one data-buffer queues + */ + /* TODO: configurable param for queue type */ + rx_config->rxp_type = BNA_RXP_SLR; + + if (BNAD_PCI_DEV_IS_CAT2(bnad) && + rx_config->frame_size > 4096) { + /* though size_routing_enable is set in SLR, + * small packets may get routed to same rxq. + * set buf_size to 2048 instead of PAGE_SIZE. + */ + rx_config->q0_buf_size = 2048; + /* this should be in multiples of 2 */ + rx_config->q0_num_vecs = 4; + rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs; + rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED; + } else { + rx_config->q0_buf_size = rx_config->frame_size; + rx_config->q0_num_vecs = 1; + rx_config->q0_depth = bnad->rxq_depth; + } + + /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */ + if (rx_config->rxp_type == BNA_RXP_SLR) { + rx_config->q1_depth = bnad->rxq_depth; + rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE; + } + + rx_config->vlan_strip_status = + (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ? + BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED; +} + +static void +bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id) +{ + struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; + int i; + + for (i = 0; i < bnad->num_rxp_per_rx; i++) + rx_info->rx_ctrl[i].bnad = bnad; +} + +/* Called with mutex_lock(&bnad->conf_mutex) held */ +static u32 +bnad_reinit_rx(struct bnad *bnad) +{ + struct net_device *netdev = bnad->netdev; + u32 err = 0, current_err = 0; + u32 rx_id = 0, count = 0; + unsigned long flags; + + /* destroy and create new rx objects */ + for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) { + if (!bnad->rx_info[rx_id].rx) + continue; + bnad_destroy_rx(bnad, rx_id); + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_enet_mtu_set(&bnad->bna.enet, + BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) { + count++; + current_err = bnad_setup_rx(bnad, rx_id); + if (current_err && !err) { + err = current_err; + pr_err("RXQ:%u setup failed\n", rx_id); + } + } + + /* restore rx configuration */ + if (bnad->rx_info[0].rx && !err) { + bnad_restore_vlans(bnad, 0); + bnad_enable_default_bcast(bnad); + spin_lock_irqsave(&bnad->bna_lock, flags); + bnad_mac_addr_set_locked(bnad, netdev->dev_addr); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + bnad_set_rx_mode(netdev); + } + + return count; +} + +/* Called with bnad_conf_lock() held */ +void +bnad_destroy_rx(struct bnad *bnad, u32 rx_id) +{ + struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; + struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; + struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; + unsigned long flags; + int to_del = 0; + + if (!rx_info->rx) + return; + + if (0 == rx_id) { + spin_lock_irqsave(&bnad->bna_lock, flags); + if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && + test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { + clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); + to_del = 1; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (to_del) + del_timer_sync(&bnad->dim_timer); + } + + init_completion(&bnad->bnad_completions.rx_comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&bnad->bnad_completions.rx_comp); + + if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX) + bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); + + bnad_napi_delete(bnad, rx_id); + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_rx_destroy(rx_info->rx); + + rx_info->rx = NULL; + rx_info->rx_id = 0; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + bnad_rx_res_free(bnad, res_info); +} + +/* Called with mutex_lock(&bnad->conf_mutex) held */ +int +bnad_setup_rx(struct bnad *bnad, u32 rx_id) +{ + int err; + struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; + struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; + struct bna_intr_info *intr_info = + &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; + struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; + static const struct bna_rx_event_cbfn rx_cbfn = { + .rcb_setup_cbfn = NULL, + .rcb_destroy_cbfn = NULL, + .ccb_setup_cbfn = bnad_cb_ccb_setup, + .ccb_destroy_cbfn = bnad_cb_ccb_destroy, + .rx_stall_cbfn = bnad_cb_rx_stall, + .rx_cleanup_cbfn = bnad_cb_rx_cleanup, + .rx_post_cbfn = bnad_cb_rx_post, + }; + struct bna_rx *rx; + unsigned long flags; + + rx_info->rx_id = rx_id; + + /* Initialize the Rx object configuration */ + bnad_init_rx_config(bnad, rx_config); + + /* Get BNA's resource requirement for one Rx object */ + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_rx_res_req(rx_config, res_info); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Fill Unmap Q memory requirements */ + BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ], + rx_config->num_paths, + (rx_config->q0_depth * + sizeof(struct bnad_rx_unmap)) + + sizeof(struct bnad_rx_unmap_q)); + + if (rx_config->rxp_type != BNA_RXP_SINGLE) { + BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ], + rx_config->num_paths, + (rx_config->q1_depth * + sizeof(struct bnad_rx_unmap) + + sizeof(struct bnad_rx_unmap_q))); + } + /* Allocate resource */ + err = bnad_rx_res_alloc(bnad, res_info, rx_id); + if (err) + return err; + + bnad_rx_ctrl_init(bnad, rx_id); + + /* Ask BNA to create one Rx object, supplying required resources */ + spin_lock_irqsave(&bnad->bna_lock, flags); + rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info, + rx_info); + if (!rx) { + err = -ENOMEM; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + goto err_return; + } + rx_info->rx = rx; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + INIT_WORK(&rx_info->rx_cleanup_work, + (work_func_t)(bnad_rx_cleanup)); + + /* + * Init NAPI, so that state is set to NAPI_STATE_SCHED, + * so that IRQ handler cannot schedule NAPI at this point. + */ + bnad_napi_add(bnad, rx_id); + + /* Register ISR for the Rx object */ + if (intr_info->intr_type == BNA_INTR_T_MSIX) { + err = bnad_rx_msix_register(bnad, rx_info, rx_id, + rx_config->num_paths); + if (err) + goto err_return; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (0 == rx_id) { + /* Set up Dynamic Interrupt Moderation Vector */ + if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) + bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector); + + /* Enable VLAN filtering only on the default Rx */ + bna_rx_vlanfilter_enable(rx); + + /* Start the DIM timer */ + bnad_dim_timer_start(bnad); + } + + bna_rx_enable(rx); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + return 0; + +err_return: + bnad_destroy_rx(bnad, rx_id); + return err; +} + +/* Called with conf_lock & bnad->bna_lock held */ +void +bnad_tx_coalescing_timeo_set(struct bnad *bnad) +{ + struct bnad_tx_info *tx_info; + + tx_info = &bnad->tx_info[0]; + if (!tx_info->tx) + return; + + bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo); +} + +/* Called with conf_lock & bnad->bna_lock held */ +void +bnad_rx_coalescing_timeo_set(struct bnad *bnad) +{ + struct bnad_rx_info *rx_info; + int i; + + for (i = 0; i < bnad->num_rx; i++) { + rx_info = &bnad->rx_info[i]; + if (!rx_info->rx) + continue; + bna_rx_coalescing_timeo_set(rx_info->rx, + bnad->rx_coalescing_timeo); + } +} + +/* + * Called with bnad->bna_lock held + */ +int +bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr) +{ + int ret; + + if (!is_valid_ether_addr(mac_addr)) + return -EADDRNOTAVAIL; + + /* If datapath is down, pretend everything went through */ + if (!bnad->rx_info[0].rx) + return 0; + + ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL); + if (ret != BNA_CB_SUCCESS) + return -EADDRNOTAVAIL; + + return 0; +} + +/* Should be called with conf_lock held */ +int +bnad_enable_default_bcast(struct bnad *bnad) +{ + struct bnad_rx_info *rx_info = &bnad->rx_info[0]; + int ret; + unsigned long flags; + + init_completion(&bnad->bnad_completions.mcast_comp); + + spin_lock_irqsave(&bnad->bna_lock, flags); + ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr, + bnad_cb_rx_mcast_add); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + if (ret == BNA_CB_SUCCESS) + wait_for_completion(&bnad->bnad_completions.mcast_comp); + else + return -ENODEV; + + if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS) + return -ENODEV; + + return 0; +} + +/* Called with mutex_lock(&bnad->conf_mutex) held */ +void +bnad_restore_vlans(struct bnad *bnad, u32 rx_id) +{ + u16 vid; + unsigned long flags; + + for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) { + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + } +} + +/* Statistics utilities */ +void +bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) +{ + int i, j; + + for (i = 0; i < bnad->num_rx; i++) { + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + if (bnad->rx_info[i].rx_ctrl[j].ccb) { + stats->rx_packets += bnad->rx_info[i]. + rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets; + stats->rx_bytes += bnad->rx_info[i]. + rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes; + if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && + bnad->rx_info[i].rx_ctrl[j].ccb-> + rcb[1]->rxq) { + stats->rx_packets += + bnad->rx_info[i].rx_ctrl[j]. + ccb->rcb[1]->rxq->rx_packets; + stats->rx_bytes += + bnad->rx_info[i].rx_ctrl[j]. + ccb->rcb[1]->rxq->rx_bytes; + } + } + } + } + for (i = 0; i < bnad->num_tx; i++) { + for (j = 0; j < bnad->num_txq_per_tx; j++) { + if (bnad->tx_info[i].tcb[j]) { + stats->tx_packets += + bnad->tx_info[i].tcb[j]->txq->tx_packets; + stats->tx_bytes += + bnad->tx_info[i].tcb[j]->txq->tx_bytes; + } + } + } +} + +/* + * Must be called with the bna_lock held. + */ +void +bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) +{ + struct bfi_enet_stats_mac *mac_stats; + u32 bmap; + int i; + + mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats; + stats->rx_errors = + mac_stats->rx_fcs_error + mac_stats->rx_alignment_error + + mac_stats->rx_frame_length_error + mac_stats->rx_code_error + + mac_stats->rx_undersize; + stats->tx_errors = mac_stats->tx_fcs_error + + mac_stats->tx_undersize; + stats->rx_dropped = mac_stats->rx_drop; + stats->tx_dropped = mac_stats->tx_drop; + stats->multicast = mac_stats->rx_multicast; + stats->collisions = mac_stats->tx_total_collision; + + stats->rx_length_errors = mac_stats->rx_frame_length_error; + + /* receive ring buffer overflow ?? */ + + stats->rx_crc_errors = mac_stats->rx_fcs_error; + stats->rx_frame_errors = mac_stats->rx_alignment_error; + /* recv'r fifo overrun */ + bmap = bna_rx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) { + stats->rx_fifo_errors += + bnad->stats.bna_stats-> + hw_stats.rxf_stats[i].frame_drops; + break; + } + bmap >>= 1; + } +} + +static void +bnad_mbox_irq_sync(struct bnad *bnad) +{ + u32 irq; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (bnad->cfg_flags & BNAD_CF_MSIX) + irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; + else + irq = bnad->pcidev->irq; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + synchronize_irq(irq); +} + +/* Utility used by bnad_start_xmit, for doing TSO */ +static int +bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb) +{ + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) { + BNAD_UPDATE_CTR(bnad, tso_err); + return err; + } + + /* + * For TSO, the TCP checksum field is seeded with pseudo-header sum + * excluding the length field. + */ + if (vlan_get_protocol(skb) == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + + /* Do we really need these? */ + iph->tot_len = 0; + iph->check = 0; + + tcp_hdr(skb)->check = + ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, + IPPROTO_TCP, 0); + BNAD_UPDATE_CTR(bnad, tso4); + } else { + struct ipv6hdr *ipv6h = ipv6_hdr(skb); + + ipv6h->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0, + IPPROTO_TCP, 0); + BNAD_UPDATE_CTR(bnad, tso6); + } + + return 0; +} + +/* + * Initialize Q numbers depending on Rx Paths + * Called with bnad->bna_lock held, because of cfg_flags + * access. + */ +static void +bnad_q_num_init(struct bnad *bnad) +{ + int rxps; + + rxps = min((uint)num_online_cpus(), + (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX)); + + if (!(bnad->cfg_flags & BNAD_CF_MSIX)) + rxps = 1; /* INTx */ + + bnad->num_rx = 1; + bnad->num_tx = 1; + bnad->num_rxp_per_rx = rxps; + bnad->num_txq_per_tx = BNAD_TXQ_NUM; +} + +/* + * Adjusts the Q numbers, given a number of msix vectors + * Give preference to RSS as opposed to Tx priority Queues, + * in such a case, just use 1 Tx Q + * Called with bnad->bna_lock held b'cos of cfg_flags access + */ +static void +bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp) +{ + bnad->num_txq_per_tx = 1; + if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) + + bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) && + (bnad->cfg_flags & BNAD_CF_MSIX)) { + bnad->num_rxp_per_rx = msix_vectors - + (bnad->num_tx * bnad->num_txq_per_tx) - + BNAD_MAILBOX_MSIX_VECTORS; + } else + bnad->num_rxp_per_rx = 1; +} + +/* Enable / disable ioceth */ +static int +bnad_ioceth_disable(struct bnad *bnad) +{ + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&bnad->bna_lock, flags); + init_completion(&bnad->bnad_completions.ioc_comp); + bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp, + msecs_to_jiffies(BNAD_IOCETH_TIMEOUT)); + + err = bnad->bnad_completions.ioc_comp_status; + return err; +} + +static int +bnad_ioceth_enable(struct bnad *bnad) +{ + int err = 0; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + init_completion(&bnad->bnad_completions.ioc_comp); + bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING; + bna_ioceth_enable(&bnad->bna.ioceth); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp, + msecs_to_jiffies(BNAD_IOCETH_TIMEOUT)); + + err = bnad->bnad_completions.ioc_comp_status; + + return err; +} + +/* Free BNA resources */ +static void +bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info, + u32 res_val_max) +{ + int i; + + for (i = 0; i < res_val_max; i++) + bnad_mem_free(bnad, &res_info[i].res_u.mem_info); +} + +/* Allocates memory and interrupt resources for BNA */ +static int +bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, + u32 res_val_max) +{ + int i, err; + + for (i = 0; i < res_val_max; i++) { + err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info); + if (err) + goto err_return; + } + return 0; + +err_return: + bnad_res_free(bnad, res_info, res_val_max); + return err; +} + +/* Interrupt enable / disable */ +static void +bnad_enable_msix(struct bnad *bnad) +{ + int i, ret; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + return; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + if (bnad->msix_table) + return; + + bnad->msix_table = + kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL); + + if (!bnad->msix_table) + goto intx_mode; + + for (i = 0; i < bnad->msix_num; i++) + bnad->msix_table[i].entry = i; + + ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table, + 1, bnad->msix_num); + if (ret < 0) { + goto intx_mode; + } else if (ret < bnad->msix_num) { + pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n", + ret, bnad->msix_num); + + spin_lock_irqsave(&bnad->bna_lock, flags); + /* ret = #of vectors that we got */ + bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2, + (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP + + BNAD_MAILBOX_MSIX_VECTORS; + + if (bnad->msix_num > ret) { + pci_disable_msix(bnad->pcidev); + goto intx_mode; + } + } + + pci_intx(bnad->pcidev, 0); + + return; + +intx_mode: + pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n"); + + kfree(bnad->msix_table); + bnad->msix_table = NULL; + bnad->msix_num = 0; + spin_lock_irqsave(&bnad->bna_lock, flags); + bnad->cfg_flags &= ~BNAD_CF_MSIX; + bnad_q_num_init(bnad); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +static void +bnad_disable_msix(struct bnad *bnad) +{ + u32 cfg_flags; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + cfg_flags = bnad->cfg_flags; + if (bnad->cfg_flags & BNAD_CF_MSIX) + bnad->cfg_flags &= ~BNAD_CF_MSIX; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + if (cfg_flags & BNAD_CF_MSIX) { + pci_disable_msix(bnad->pcidev); + kfree(bnad->msix_table); + bnad->msix_table = NULL; + } +} + +/* Netdev entry points */ +static int +bnad_open(struct net_device *netdev) +{ + int err; + struct bnad *bnad = netdev_priv(netdev); + struct bna_pause_config pause_config; + unsigned long flags; + + mutex_lock(&bnad->conf_mutex); + + /* Tx */ + err = bnad_setup_tx(bnad, 0); + if (err) + goto err_return; + + /* Rx */ + err = bnad_setup_rx(bnad, 0); + if (err) + goto cleanup_tx; + + /* Port */ + pause_config.tx_pause = 0; + pause_config.rx_pause = 0; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_enet_mtu_set(&bnad->bna.enet, + BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL); + bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL); + bna_enet_enable(&bnad->bna.enet); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Enable broadcast */ + bnad_enable_default_bcast(bnad); + + /* Restore VLANs, if any */ + bnad_restore_vlans(bnad, 0); + + /* Set the UCAST address */ + spin_lock_irqsave(&bnad->bna_lock, flags); + bnad_mac_addr_set_locked(bnad, netdev->dev_addr); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Start the stats timer */ + bnad_stats_timer_start(bnad); + + mutex_unlock(&bnad->conf_mutex); + + return 0; + +cleanup_tx: + bnad_destroy_tx(bnad, 0); + +err_return: + mutex_unlock(&bnad->conf_mutex); + return err; +} + +static int +bnad_stop(struct net_device *netdev) +{ + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + + mutex_lock(&bnad->conf_mutex); + + /* Stop the stats timer */ + bnad_stats_timer_stop(bnad); + + init_completion(&bnad->bnad_completions.enet_comp); + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP, + bnad_cb_enet_disabled); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + wait_for_completion(&bnad->bnad_completions.enet_comp); + + bnad_destroy_tx(bnad, 0); + bnad_destroy_rx(bnad, 0); + + /* Synchronize mailbox IRQ */ + bnad_mbox_irq_sync(bnad); + + mutex_unlock(&bnad->conf_mutex); + + return 0; +} + +/* TX */ +/* Returns 0 for success */ +static int +bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb, + struct sk_buff *skb, struct bna_txq_entry *txqent) +{ + u16 flags = 0; + u32 gso_size; + u16 vlan_tag = 0; + + if (skb_vlan_tag_present(skb)) { + vlan_tag = (u16)skb_vlan_tag_get(skb); + flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); + } + if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) { + vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT) + | (vlan_tag & 0x1fff); + flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); + } + txqent->hdr.wi.vlan_tag = htons(vlan_tag); + + if (skb_is_gso(skb)) { + gso_size = skb_shinfo(skb)->gso_size; + if (unlikely(gso_size > bnad->netdev->mtu)) { + BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long); + return -EINVAL; + } + if (unlikely((gso_size + skb_transport_offset(skb) + + tcp_hdrlen(skb)) >= skb->len)) { + txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND); + txqent->hdr.wi.lso_mss = 0; + BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short); + } else { + txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO); + txqent->hdr.wi.lso_mss = htons(gso_size); + } + + if (bnad_tso_prepare(bnad, skb)) { + BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare); + return -EINVAL; + } + + flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM); + txqent->hdr.wi.l4_hdr_size_n_offset = + htons(BNA_TXQ_WI_L4_HDR_N_OFFSET( + tcp_hdrlen(skb) >> 2, skb_transport_offset(skb))); + } else { + txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND); + txqent->hdr.wi.lso_mss = 0; + + if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) { + BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long); + return -EINVAL; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + __be16 net_proto = vlan_get_protocol(skb); + u8 proto = 0; + + if (net_proto == htons(ETH_P_IP)) + proto = ip_hdr(skb)->protocol; +#ifdef NETIF_F_IPV6_CSUM + else if (net_proto == htons(ETH_P_IPV6)) { + /* nexthdr may not be TCP immediately. */ + proto = ipv6_hdr(skb)->nexthdr; + } +#endif + if (proto == IPPROTO_TCP) { + flags |= BNA_TXQ_WI_CF_TCP_CKSUM; + txqent->hdr.wi.l4_hdr_size_n_offset = + htons(BNA_TXQ_WI_L4_HDR_N_OFFSET + (0, skb_transport_offset(skb))); + + BNAD_UPDATE_CTR(bnad, tcpcsum_offload); + + if (unlikely(skb_headlen(skb) < + skb_transport_offset(skb) + + tcp_hdrlen(skb))) { + BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr); + return -EINVAL; + } + } else if (proto == IPPROTO_UDP) { + flags |= BNA_TXQ_WI_CF_UDP_CKSUM; + txqent->hdr.wi.l4_hdr_size_n_offset = + htons(BNA_TXQ_WI_L4_HDR_N_OFFSET + (0, skb_transport_offset(skb))); + + BNAD_UPDATE_CTR(bnad, udpcsum_offload); + if (unlikely(skb_headlen(skb) < + skb_transport_offset(skb) + + sizeof(struct udphdr))) { + BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr); + return -EINVAL; + } + } else { + + BNAD_UPDATE_CTR(bnad, tx_skb_csum_err); + return -EINVAL; + } + } else + txqent->hdr.wi.l4_hdr_size_n_offset = 0; + } + + txqent->hdr.wi.flags = htons(flags); + txqent->hdr.wi.frame_length = htonl(skb->len); + + return 0; +} + +/* + * bnad_start_xmit : Netdev entry point for Transmit + * Called under lock held by net_device + */ +static netdev_tx_t +bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct bnad *bnad = netdev_priv(netdev); + u32 txq_id = 0; + struct bna_tcb *tcb = NULL; + struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap; + u32 prod, q_depth, vect_id; + u32 wis, vectors, len; + int i; + dma_addr_t dma_addr; + struct bna_txq_entry *txqent; + + len = skb_headlen(skb); + + /* Sanity checks for the skb */ + + if (unlikely(skb->len <= ETH_HLEN)) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_too_short); + return NETDEV_TX_OK; + } + if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); + return NETDEV_TX_OK; + } + if (unlikely(len == 0)) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); + return NETDEV_TX_OK; + } + + tcb = bnad->tx_info[0].tcb[txq_id]; + + /* + * Takes care of the Tx that is scheduled between clearing the flag + * and the netif_tx_stop_all_queues() call. + */ + if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_stopping); + return NETDEV_TX_OK; + } + + q_depth = tcb->q_depth; + prod = tcb->producer_index; + unmap_q = tcb->unmap_q; + + vectors = 1 + skb_shinfo(skb)->nr_frags; + wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */ + + if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors); + return NETDEV_TX_OK; + } + + /* Check for available TxQ resources */ + if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) { + if ((*tcb->hw_consumer_index != tcb->consumer_index) && + !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { + u32 sent; + sent = bnad_txcmpl_process(bnad, tcb); + if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) + bna_ib_ack(tcb->i_dbell, sent); + smp_mb__before_atomic(); + clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); + } else { + netif_stop_queue(netdev); + BNAD_UPDATE_CTR(bnad, netif_queue_stop); + } + + smp_mb(); + /* + * Check again to deal with race condition between + * netif_stop_queue here, and netif_wake_queue in + * interrupt handler which is not inside netif tx lock. + */ + if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) { + BNAD_UPDATE_CTR(bnad, netif_queue_stop); + return NETDEV_TX_BUSY; + } else { + netif_wake_queue(netdev); + BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); + } + } + + txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod]; + head_unmap = &unmap_q[prod]; + + /* Program the opcode, flags, frame_len, num_vectors in WI */ + if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + txqent->hdr.wi.reserved = 0; + txqent->hdr.wi.num_vectors = vectors; + + head_unmap->skb = skb; + head_unmap->nvecs = 0; + + /* Program the vectors */ + unmap = head_unmap; + dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, + len, DMA_TO_DEVICE); + BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr); + txqent->vector[0].length = htons(len); + dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr); + head_unmap->nvecs++; + + for (i = 0, vect_id = 0; i < vectors - 1; i++) { + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + u32 size = skb_frag_size(frag); + + if (unlikely(size == 0)) { + /* Undo the changes starting at tcb->producer_index */ + bnad_tx_buff_unmap(bnad, unmap_q, q_depth, + tcb->producer_index); + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero); + return NETDEV_TX_OK; + } + + len += size; + + vect_id++; + if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) { + vect_id = 0; + BNA_QE_INDX_INC(prod, q_depth); + txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod]; + txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION); + unmap = &unmap_q[prod]; + } + + dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, + 0, size, DMA_TO_DEVICE); + dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size); + BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); + txqent->vector[vect_id].length = htons(size); + dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr, + dma_addr); + head_unmap->nvecs++; + } + + if (unlikely(len != skb->len)) { + /* Undo the changes starting at tcb->producer_index */ + bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index); + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch); + return NETDEV_TX_OK; + } + + BNA_QE_INDX_INC(prod, q_depth); + tcb->producer_index = prod; + + smp_mb(); + + if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) + return NETDEV_TX_OK; + + skb_tx_timestamp(skb); + + bna_txq_prod_indx_doorbell(tcb); + smp_mb(); + + return NETDEV_TX_OK; +} + +/* + * Used spin_lock to synchronize reading of stats structures, which + * is written by BNA under the same lock. + */ +static struct rtnl_link_stats64 * +bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + + bnad_netdev_qstats_fill(bnad, stats); + bnad_netdev_hwstats_fill(bnad, stats); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + return stats; +} + +static void +bnad_set_rx_ucast_fltr(struct bnad *bnad) +{ + struct net_device *netdev = bnad->netdev; + int uc_count = netdev_uc_count(netdev); + enum bna_cb_status ret; + u8 *mac_list; + struct netdev_hw_addr *ha; + int entry; + + if (netdev_uc_empty(bnad->netdev)) { + bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL); + return; + } + + if (uc_count > bna_attr(&bnad->bna)->num_ucmac) + goto mode_default; + + mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC); + if (mac_list == NULL) + goto mode_default; + + entry = 0; + netdev_for_each_uc_addr(ha, netdev) { + memcpy(&mac_list[entry * ETH_ALEN], + &ha->addr[0], ETH_ALEN); + entry++; + } + + ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, + mac_list, NULL); + kfree(mac_list); + + if (ret != BNA_CB_SUCCESS) + goto mode_default; + + return; + + /* ucast packets not in UCAM are routed to default function */ +mode_default: + bnad->cfg_flags |= BNAD_CF_DEFAULT; + bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL); +} + +static void +bnad_set_rx_mcast_fltr(struct bnad *bnad) +{ + struct net_device *netdev = bnad->netdev; + int mc_count = netdev_mc_count(netdev); + enum bna_cb_status ret; + u8 *mac_list; + + if (netdev->flags & IFF_ALLMULTI) + goto mode_allmulti; + + if (netdev_mc_empty(netdev)) + return; + + if (mc_count > bna_attr(&bnad->bna)->num_mcmac) + goto mode_allmulti; + + mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC); + + if (mac_list == NULL) + goto mode_allmulti; + + memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN); + + /* copy rest of the MCAST addresses */ + bnad_netdev_mc_list_get(netdev, mac_list); + ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, + mac_list, NULL); + kfree(mac_list); + + if (ret != BNA_CB_SUCCESS) + goto mode_allmulti; + + return; + +mode_allmulti: + bnad->cfg_flags |= BNAD_CF_ALLMULTI; + bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL); +} + +void +bnad_set_rx_mode(struct net_device *netdev) +{ + struct bnad *bnad = netdev_priv(netdev); + enum bna_rxmode new_mode, mode_mask; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + + if (bnad->rx_info[0].rx == NULL) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + return; + } + + /* clear bnad flags to update it with new settings */ + bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT | + BNAD_CF_ALLMULTI); + + new_mode = 0; + if (netdev->flags & IFF_PROMISC) { + new_mode |= BNAD_RXMODE_PROMISC_DEFAULT; + bnad->cfg_flags |= BNAD_CF_PROMISC; + } else { + bnad_set_rx_mcast_fltr(bnad); + + if (bnad->cfg_flags & BNAD_CF_ALLMULTI) + new_mode |= BNA_RXMODE_ALLMULTI; + + bnad_set_rx_ucast_fltr(bnad); + + if (bnad->cfg_flags & BNAD_CF_DEFAULT) + new_mode |= BNA_RXMODE_DEFAULT; + } + + mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT | + BNA_RXMODE_ALLMULTI; + bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +/* + * bna_lock is used to sync writes to netdev->addr + * conf_lock cannot be used since this call may be made + * in a non-blocking context. + */ +static int +bnad_set_mac_address(struct net_device *netdev, void *mac_addr) +{ + int err; + struct bnad *bnad = netdev_priv(netdev); + struct sockaddr *sa = (struct sockaddr *)mac_addr; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + + err = bnad_mac_addr_set_locked(bnad, sa->sa_data); + + if (!err) + memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + return err; +} + +static int +bnad_mtu_set(struct bnad *bnad, int frame_size) +{ + unsigned long flags; + + init_completion(&bnad->bnad_completions.mtu_comp); + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + wait_for_completion(&bnad->bnad_completions.mtu_comp); + + return bnad->bnad_completions.mtu_comp_status; +} + +static int +bnad_change_mtu(struct net_device *netdev, int new_mtu) +{ + int err, mtu; + struct bnad *bnad = netdev_priv(netdev); + u32 rx_count = 0, frame, new_frame; + + if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU) + return -EINVAL; + + mutex_lock(&bnad->conf_mutex); + + mtu = netdev->mtu; + netdev->mtu = new_mtu; + + frame = BNAD_FRAME_SIZE(mtu); + new_frame = BNAD_FRAME_SIZE(new_mtu); + + /* check if multi-buffer needs to be enabled */ + if (BNAD_PCI_DEV_IS_CAT2(bnad) && + netif_running(bnad->netdev)) { + /* only when transition is over 4K */ + if ((frame <= 4096 && new_frame > 4096) || + (frame > 4096 && new_frame <= 4096)) + rx_count = bnad_reinit_rx(bnad); + } + + /* rx_count > 0 - new rx created + * - Linux set err = 0 and return + */ + err = bnad_mtu_set(bnad, new_frame); + if (err) + err = -EBUSY; + + mutex_unlock(&bnad->conf_mutex); + return err; +} + +static int +bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + + if (!bnad->rx_info[0].rx) + return 0; + + mutex_lock(&bnad->conf_mutex); + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_rx_vlan_add(bnad->rx_info[0].rx, vid); + set_bit(vid, bnad->active_vlans); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + mutex_unlock(&bnad->conf_mutex); + + return 0; +} + +static int +bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + + if (!bnad->rx_info[0].rx) + return 0; + + mutex_lock(&bnad->conf_mutex); + + spin_lock_irqsave(&bnad->bna_lock, flags); + clear_bit(vid, bnad->active_vlans); + bna_rx_vlan_del(bnad->rx_info[0].rx, vid); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + mutex_unlock(&bnad->conf_mutex); + + return 0; +} + +static int bnad_set_features(struct net_device *dev, netdev_features_t features) +{ + struct bnad *bnad = netdev_priv(dev); + netdev_features_t changed = features ^ dev->features; + + if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) { + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + bna_rx_vlan_strip_enable(bnad->rx_info[0].rx); + else + bna_rx_vlan_strip_disable(bnad->rx_info[0].rx); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + } + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void +bnad_netpoll(struct net_device *netdev) +{ + struct bnad *bnad = netdev_priv(netdev); + struct bnad_rx_info *rx_info; + struct bnad_rx_ctrl *rx_ctrl; + u32 curr_mask; + int i, j; + + if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { + bna_intx_disable(&bnad->bna, curr_mask); + bnad_isr(bnad->pcidev->irq, netdev); + bna_intx_enable(&bnad->bna, curr_mask); + } else { + /* + * Tx processing may happen in sending context, so no need + * to explicitly process completions here + */ + + /* Rx processing */ + for (i = 0; i < bnad->num_rx; i++) { + rx_info = &bnad->rx_info[i]; + if (!rx_info->rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + rx_ctrl = &rx_info->rx_ctrl[j]; + if (rx_ctrl->ccb) + bnad_netif_rx_schedule_poll(bnad, + rx_ctrl->ccb); + } + } + } +} +#endif + +static const struct net_device_ops bnad_netdev_ops = { + .ndo_open = bnad_open, + .ndo_stop = bnad_stop, + .ndo_start_xmit = bnad_start_xmit, + .ndo_get_stats64 = bnad_get_stats64, + .ndo_set_rx_mode = bnad_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = bnad_set_mac_address, + .ndo_change_mtu = bnad_change_mtu, + .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid, + .ndo_set_features = bnad_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bnad_netpoll +#endif +}; + +static void +bnad_netdev_init(struct bnad *bnad, bool using_dac) +{ + struct net_device *netdev = bnad->netdev; + + netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_TSO6; + + netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; + + if (using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->mem_start = bnad->mmio_start; + netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1; + + netdev->netdev_ops = &bnad_netdev_ops; + bnad_set_ethtool_ops(netdev); +} + +/* + * 1. Initialize the bnad structure + * 2. Setup netdev pointer in pci_dev + * 3. Initialize no. of TxQ & CQs & MSIX vectors + * 4. Initialize work queue. + */ +static int +bnad_init(struct bnad *bnad, + struct pci_dev *pdev, struct net_device *netdev) +{ + unsigned long flags; + + SET_NETDEV_DEV(netdev, &pdev->dev); + pci_set_drvdata(pdev, netdev); + + bnad->netdev = netdev; + bnad->pcidev = pdev; + bnad->mmio_start = pci_resource_start(pdev, 0); + bnad->mmio_len = pci_resource_len(pdev, 0); + bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len); + if (!bnad->bar0) { + dev_err(&pdev->dev, "ioremap for bar0 failed\n"); + return -ENOMEM; + } + pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0, + (unsigned long long) bnad->mmio_len); + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (!bnad_msix_disable) + bnad->cfg_flags = BNAD_CF_MSIX; + + bnad->cfg_flags |= BNAD_CF_DIM_ENABLED; + + bnad_q_num_init(bnad); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) + + (bnad->num_rx * bnad->num_rxp_per_rx) + + BNAD_MAILBOX_MSIX_VECTORS; + + bnad->txq_depth = BNAD_TXQ_DEPTH; + bnad->rxq_depth = BNAD_RXQ_DEPTH; + + bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO; + bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO; + + sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id); + bnad->work_q = create_singlethread_workqueue(bnad->wq_name); + if (!bnad->work_q) { + iounmap(bnad->bar0); + return -ENOMEM; + } + + return 0; +} + +/* + * Must be called after bnad_pci_uninit() + * so that iounmap() and pci_set_drvdata(NULL) + * happens only after PCI uninitialization. + */ +static void +bnad_uninit(struct bnad *bnad) +{ + if (bnad->work_q) { + flush_workqueue(bnad->work_q); + destroy_workqueue(bnad->work_q); + bnad->work_q = NULL; + } + + if (bnad->bar0) + iounmap(bnad->bar0); +} + +/* + * Initialize locks + a) Per ioceth mutes used for serializing configuration + changes from OS interface + b) spin lock used to protect bna state machine + */ +static void +bnad_lock_init(struct bnad *bnad) +{ + spin_lock_init(&bnad->bna_lock); + mutex_init(&bnad->conf_mutex); + mutex_init(&bnad_list_mutex); +} + +static void +bnad_lock_uninit(struct bnad *bnad) +{ + mutex_destroy(&bnad->conf_mutex); + mutex_destroy(&bnad_list_mutex); +} + +/* PCI Initialization */ +static int +bnad_pci_init(struct bnad *bnad, + struct pci_dev *pdev, bool *using_dac) +{ + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + err = pci_request_regions(pdev, BNAD_NAME); + if (err) + goto disable_device; + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + *using_dac = true; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + goto release_regions; + *using_dac = false; + } + pci_set_master(pdev); + return 0; + +release_regions: + pci_release_regions(pdev); +disable_device: + pci_disable_device(pdev); + + return err; +} + +static void +bnad_pci_uninit(struct pci_dev *pdev) +{ + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static int +bnad_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *pcidev_id) +{ + bool using_dac; + int err; + struct bnad *bnad; + struct bna *bna; + struct net_device *netdev; + struct bfa_pcidev pcidev_info; + unsigned long flags; + + pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n", + pdev, pcidev_id, PCI_FUNC(pdev->devfn)); + + mutex_lock(&bnad_fwimg_mutex); + if (!cna_get_firmware_buf(pdev)) { + mutex_unlock(&bnad_fwimg_mutex); + pr_warn("Failed to load Firmware Image!\n"); + return -ENODEV; + } + mutex_unlock(&bnad_fwimg_mutex); + + /* + * Allocates sizeof(struct net_device + struct bnad) + * bnad = netdev->priv + */ + netdev = alloc_etherdev(sizeof(struct bnad)); + if (!netdev) { + err = -ENOMEM; + return err; + } + bnad = netdev_priv(netdev); + bnad_lock_init(bnad); + bnad_add_to_list(bnad); + + mutex_lock(&bnad->conf_mutex); + /* + * PCI initialization + * Output : using_dac = 1 for 64 bit DMA + * = 0 for 32 bit DMA + */ + using_dac = false; + err = bnad_pci_init(bnad, pdev, &using_dac); + if (err) + goto unlock_mutex; + + /* + * Initialize bnad structure + * Setup relation between pci_dev & netdev + */ + err = bnad_init(bnad, pdev, netdev); + if (err) + goto pci_uninit; + + /* Initialize netdev structure, set up ethtool ops */ + bnad_netdev_init(bnad, using_dac); + + /* Set link to down state */ + netif_carrier_off(netdev); + + /* Setup the debugfs node for this bfad */ + if (bna_debugfs_enable) + bnad_debugfs_init(bnad); + + /* Get resource requirement form bna */ + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_res_req(&bnad->res_info[0]); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Allocate resources from bna */ + err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX); + if (err) + goto drv_uninit; + + bna = &bnad->bna; + + /* Setup pcidev_info for bna_init() */ + pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn); + pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn); + pcidev_info.device_id = bnad->pcidev->device; + pcidev_info.pci_bar_kva = bnad->bar0; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + bnad->stats.bna_stats = &bna->stats; + + bnad_enable_msix(bnad); + err = bnad_mbox_irq_alloc(bnad); + if (err) + goto res_free; + + /* Set up timers */ + setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, + ((unsigned long)bnad)); + setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, + ((unsigned long)bnad)); + setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, + ((unsigned long)bnad)); + setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, + ((unsigned long)bnad)); + + /* + * Start the chip + * If the call back comes with error, we bail out. + * This is a catastrophic error. + */ + err = bnad_ioceth_enable(bnad); + if (err) { + pr_err("BNA: Initialization failed err=%d\n", + err); + goto probe_success; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) || + bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) { + bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1, + bna_attr(bna)->num_rxp - 1); + if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) || + bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) + err = -EIO; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (err) + goto disable_ioceth; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); + if (err) { + err = -EIO; + goto disable_ioceth; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Get the burnt-in mac */ + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr); + bnad_set_netdev_perm_addr(bnad); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + mutex_unlock(&bnad->conf_mutex); + + /* Finally, reguister with net_device layer */ + err = register_netdev(netdev); + if (err) { + pr_err("BNA : Registering with netdev failed\n"); + goto probe_uninit; + } + set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags); + + return 0; + +probe_success: + mutex_unlock(&bnad->conf_mutex); + return 0; + +probe_uninit: + mutex_lock(&bnad->conf_mutex); + bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); +disable_ioceth: + bnad_ioceth_disable(bnad); + del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); + del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); + del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_uninit(bna); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + bnad_mbox_irq_free(bnad); + bnad_disable_msix(bnad); +res_free: + bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); +drv_uninit: + /* Remove the debugfs node for this bnad */ + kfree(bnad->regdata); + bnad_debugfs_uninit(bnad); + bnad_uninit(bnad); +pci_uninit: + bnad_pci_uninit(pdev); +unlock_mutex: + mutex_unlock(&bnad->conf_mutex); + bnad_remove_from_list(bnad); + bnad_lock_uninit(bnad); + free_netdev(netdev); + return err; +} + +static void +bnad_pci_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct bnad *bnad; + struct bna *bna; + unsigned long flags; + + if (!netdev) + return; + + pr_info("%s bnad_pci_remove\n", netdev->name); + bnad = netdev_priv(netdev); + bna = &bnad->bna; + + if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags)) + unregister_netdev(netdev); + + mutex_lock(&bnad->conf_mutex); + bnad_ioceth_disable(bnad); + del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); + del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); + del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_uninit(bna); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); + bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); + bnad_mbox_irq_free(bnad); + bnad_disable_msix(bnad); + bnad_pci_uninit(pdev); + mutex_unlock(&bnad->conf_mutex); + bnad_remove_from_list(bnad); + bnad_lock_uninit(bnad); + /* Remove the debugfs node for this bnad */ + kfree(bnad->regdata); + bnad_debugfs_uninit(bnad); + bnad_uninit(bnad); + free_netdev(netdev); +} + +static const struct pci_device_id bnad_pci_id_table[] = { + { + PCI_DEVICE(PCI_VENDOR_ID_BROCADE, + PCI_DEVICE_ID_BROCADE_CT), + .class = PCI_CLASS_NETWORK_ETHERNET << 8, + .class_mask = 0xffff00 + }, + { + PCI_DEVICE(PCI_VENDOR_ID_BROCADE, + BFA_PCI_DEVICE_ID_CT2), + .class = PCI_CLASS_NETWORK_ETHERNET << 8, + .class_mask = 0xffff00 + }, + {0, }, +}; + +MODULE_DEVICE_TABLE(pci, bnad_pci_id_table); + +static struct pci_driver bnad_pci_driver = { + .name = BNAD_NAME, + .id_table = bnad_pci_id_table, + .probe = bnad_pci_probe, + .remove = bnad_pci_remove, +}; + +static int __init +bnad_module_init(void) +{ + int err; + + pr_info("QLogic BR-series 10G Ethernet driver - version: %s\n", + BNAD_VERSION); + + bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover); + + err = pci_register_driver(&bnad_pci_driver); + if (err < 0) { + pr_err("bna : PCI registration failed in module init " + "(%d)\n", err); + return err; + } + + return 0; +} + +static void __exit +bnad_module_exit(void) +{ + pci_unregister_driver(&bnad_pci_driver); + release_firmware(bfi_fw); +} + +module_init(bnad_module_init); +module_exit(bnad_module_exit); + +MODULE_AUTHOR("Brocade"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver"); +MODULE_VERSION(BNAD_VERSION); +/*(DEBLOBBED)*/ diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h new file mode 100644 index 000000000..7ead6c23e --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bnad.h @@ -0,0 +1,432 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#ifndef __BNAD_H__ +#define __BNAD_H__ + +#include +#include +#include +#include +#include +#include +#include + +/* Fix for IA64 */ +#include +#include + +#include +#include + +#include "bna.h" + +#define BNAD_TXQ_DEPTH 2048 +#define BNAD_RXQ_DEPTH 2048 + +#define BNAD_MAX_TX 1 +#define BNAD_MAX_TXQ_PER_TX 8 /* 8 priority queues */ +#define BNAD_TXQ_NUM 1 + +#define BNAD_MAX_RX 1 +#define BNAD_MAX_RXP_PER_RX 16 +#define BNAD_MAX_RXQ_PER_RXP 2 + +/* + * Control structure pointed to ccb->ctrl, which + * determines the NAPI / LRO behavior CCB + * There is 1:1 corres. between ccb & ctrl + */ +struct bnad_rx_ctrl { + struct bna_ccb *ccb; + struct bnad *bnad; + unsigned long flags; + struct napi_struct napi; + u64 rx_intr_ctr; + u64 rx_poll_ctr; + u64 rx_schedule; + u64 rx_keep_poll; + u64 rx_complete; +}; + +#define BNAD_RXMODE_PROMISC_DEFAULT BNA_RXMODE_PROMISC + +/* + * GLOBAL #defines (CONSTANTS) + */ +#define BNAD_NAME "bna" +#define BNAD_NAME_LEN 64 + +#define BNAD_VERSION "3.2.25.1" + +#define BNAD_MAILBOX_MSIX_INDEX 0 +#define BNAD_MAILBOX_MSIX_VECTORS 1 +#define BNAD_INTX_TX_IB_BITMASK 0x1 +#define BNAD_INTX_RX_IB_BITMASK 0x2 + +#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */ +#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */ + +#define BNAD_IOCETH_TIMEOUT 10000 + +#define BNAD_MIN_Q_DEPTH 512 +#define BNAD_MAX_RXQ_DEPTH 16384 +#define BNAD_MAX_TXQ_DEPTH 2048 + +#define BNAD_JUMBO_MTU 9000 + +#define BNAD_NETIF_WAKE_THRESHOLD 8 + +#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT 3 + +/* Bit positions for tcb->flags */ +#define BNAD_TXQ_FREE_SENT 0 +#define BNAD_TXQ_TX_STARTED 1 + +/* Bit positions for rcb->flags */ +#define BNAD_RXQ_STARTED 0 +#define BNAD_RXQ_POST_OK 1 + +/* Resource limits */ +#define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx) +#define BNAD_NUM_RXP (bnad->num_rx * bnad->num_rxp_per_rx) + +#define BNAD_FRAME_SIZE(_mtu) \ + (ETH_HLEN + VLAN_HLEN + (_mtu) + ETH_FCS_LEN) + +/* + * DATA STRUCTURES + */ + +/* enums */ +enum bnad_intr_source { + BNAD_INTR_TX = 1, + BNAD_INTR_RX = 2 +}; + +enum bnad_link_state { + BNAD_LS_DOWN = 0, + BNAD_LS_UP = 1 +}; + +struct bnad_iocmd_comp { + struct bnad *bnad; + struct completion comp; + int comp_status; +}; + +struct bnad_completion { + struct completion ioc_comp; + struct completion ucast_comp; + struct completion mcast_comp; + struct completion tx_comp; + struct completion rx_comp; + struct completion stats_comp; + struct completion enet_comp; + struct completion mtu_comp; + + u8 ioc_comp_status; + u8 ucast_comp_status; + u8 mcast_comp_status; + u8 tx_comp_status; + u8 rx_comp_status; + u8 stats_comp_status; + u8 port_comp_status; + u8 mtu_comp_status; +}; + +/* Tx Rx Control Stats */ +struct bnad_drv_stats { + u64 netif_queue_stop; + u64 netif_queue_wakeup; + u64 netif_queue_stopped; + u64 tso4; + u64 tso6; + u64 tso_err; + u64 tcpcsum_offload; + u64 udpcsum_offload; + u64 csum_help; + u64 tx_skb_too_short; + u64 tx_skb_stopping; + u64 tx_skb_max_vectors; + u64 tx_skb_mss_too_long; + u64 tx_skb_tso_too_short; + u64 tx_skb_tso_prepare; + u64 tx_skb_non_tso_too_long; + u64 tx_skb_tcp_hdr; + u64 tx_skb_udp_hdr; + u64 tx_skb_csum_err; + u64 tx_skb_headlen_too_long; + u64 tx_skb_headlen_zero; + u64 tx_skb_frag_zero; + u64 tx_skb_len_mismatch; + + u64 hw_stats_updates; + u64 netif_rx_dropped; + + u64 link_toggle; + u64 cee_toggle; + + u64 rxp_info_alloc_failed; + u64 mbox_intr_disabled; + u64 mbox_intr_enabled; + u64 tx_unmap_q_alloc_failed; + u64 rx_unmap_q_alloc_failed; + + u64 rxbuf_alloc_failed; +}; + +/* Complete driver stats */ +struct bnad_stats { + struct bnad_drv_stats drv_stats; + struct bna_stats *bna_stats; +}; + +/* Tx / Rx Resources */ +struct bnad_tx_res_info { + struct bna_res_info res_info[BNA_TX_RES_T_MAX]; +}; + +struct bnad_rx_res_info { + struct bna_res_info res_info[BNA_RX_RES_T_MAX]; +}; + +struct bnad_tx_info { + struct bna_tx *tx; /* 1:1 between tx_info & tx */ + struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX]; + u32 tx_id; + struct delayed_work tx_cleanup_work; +} ____cacheline_aligned; + +struct bnad_rx_info { + struct bna_rx *rx; /* 1:1 between rx_info & rx */ + + struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX]; + u32 rx_id; + struct work_struct rx_cleanup_work; +} ____cacheline_aligned; + +struct bnad_tx_vector { + DEFINE_DMA_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_LEN(dma_len); +}; + +struct bnad_tx_unmap { + struct sk_buff *skb; + u32 nvecs; + struct bnad_tx_vector vectors[BFI_TX_MAX_VECTORS_PER_WI]; +}; + +struct bnad_rx_vector { + DEFINE_DMA_UNMAP_ADDR(dma_addr); + u32 len; +}; + +struct bnad_rx_unmap { + struct page *page; + struct sk_buff *skb; + struct bnad_rx_vector vector; + u32 page_offset; +}; + +enum bnad_rxbuf_type { + BNAD_RXBUF_NONE = 0, + BNAD_RXBUF_SK_BUFF = 1, + BNAD_RXBUF_PAGE = 2, + BNAD_RXBUF_MULTI_BUFF = 3 +}; + +#define BNAD_RXBUF_IS_SK_BUFF(_type) ((_type) == BNAD_RXBUF_SK_BUFF) +#define BNAD_RXBUF_IS_MULTI_BUFF(_type) ((_type) == BNAD_RXBUF_MULTI_BUFF) + +struct bnad_rx_unmap_q { + int reuse_pi; + int alloc_order; + u32 map_size; + enum bnad_rxbuf_type type; + struct bnad_rx_unmap unmap[0] ____cacheline_aligned; +}; + +#define BNAD_PCI_DEV_IS_CAT2(_bnad) \ + ((_bnad)->pcidev->device == BFA_PCI_DEVICE_ID_CT2) + +/* Bit mask values for bnad->cfg_flags */ +#define BNAD_CF_DIM_ENABLED 0x01 /* DIM */ +#define BNAD_CF_PROMISC 0x02 +#define BNAD_CF_ALLMULTI 0x04 +#define BNAD_CF_DEFAULT 0x08 +#define BNAD_CF_MSIX 0x10 /* If in MSIx mode */ + +/* Defines for run_flags bit-mask */ +/* Set, tested & cleared using xxx_bit() functions */ +/* Values indicated bit positions */ +#define BNAD_RF_CEE_RUNNING 0 +#define BNAD_RF_MTU_SET 1 +#define BNAD_RF_MBOX_IRQ_DISABLED 2 +#define BNAD_RF_NETDEV_REGISTERED 3 +#define BNAD_RF_DIM_TIMER_RUNNING 4 +#define BNAD_RF_STATS_TIMER_RUNNING 5 +#define BNAD_RF_TX_PRIO_SET 6 + +struct bnad { + struct net_device *netdev; + u32 id; + struct list_head list_entry; + + /* Data path */ + struct bnad_tx_info tx_info[BNAD_MAX_TX]; + struct bnad_rx_info rx_info[BNAD_MAX_RX]; + + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + /* + * These q numbers are global only because + * they are used to calculate MSIx vectors. + * Actually the exact # of queues are per Tx/Rx + * object. + */ + u32 num_tx; + u32 num_rx; + u32 num_txq_per_tx; + u32 num_rxp_per_rx; + + u32 txq_depth; + u32 rxq_depth; + + u8 tx_coalescing_timeo; + u8 rx_coalescing_timeo; + + struct bna_rx_config rx_config[BNAD_MAX_RX] ____cacheline_aligned; + struct bna_tx_config tx_config[BNAD_MAX_TX] ____cacheline_aligned; + + void __iomem *bar0; /* BAR0 address */ + + struct bna bna; + + u32 cfg_flags; + unsigned long run_flags; + + struct pci_dev *pcidev; + u64 mmio_start; + u64 mmio_len; + + u32 msix_num; + struct msix_entry *msix_table; + + struct mutex conf_mutex; + spinlock_t bna_lock ____cacheline_aligned; + + /* Timers */ + struct timer_list ioc_timer; + struct timer_list dim_timer; + struct timer_list stats_timer; + + /* Control path resources, memory & irq */ + struct bna_res_info res_info[BNA_RES_T_MAX]; + struct bna_res_info mod_res_info[BNA_MOD_RES_T_MAX]; + struct bnad_tx_res_info tx_res_info[BNAD_MAX_TX]; + struct bnad_rx_res_info rx_res_info[BNAD_MAX_RX]; + + struct bnad_completion bnad_completions; + + /* Burnt in MAC address */ + mac_t perm_addr; + + struct workqueue_struct *work_q; + + /* Statistics */ + struct bnad_stats stats; + + struct bnad_diag *diag; + + char adapter_name[BNAD_NAME_LEN]; + char port_name[BNAD_NAME_LEN]; + char mbox_irq_name[BNAD_NAME_LEN]; + char wq_name[BNAD_NAME_LEN]; + + /* debugfs specific data */ + char *regdata; + u32 reglen; + struct dentry *bnad_dentry_files[5]; + struct dentry *port_debugfs_root; +}; + +struct bnad_drvinfo { + struct bfa_ioc_attr ioc_attr; + struct bfa_cee_attr cee_attr; + struct bfa_flash_attr flash_attr; + u32 cee_status; + u32 flash_status; +}; + +/* + * EXTERN VARIABLES + */ +extern const struct firmware *bfi_fw; + +/* + * EXTERN PROTOTYPES + */ +u32 *cna_get_firmware_buf(struct pci_dev *pdev); +/* Netdev entry point prototypes */ +void bnad_set_rx_mode(struct net_device *netdev); +struct net_device_stats *bnad_get_netdev_stats(struct net_device *netdev); +int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr); +int bnad_enable_default_bcast(struct bnad *bnad); +void bnad_restore_vlans(struct bnad *bnad, u32 rx_id); +void bnad_set_ethtool_ops(struct net_device *netdev); +void bnad_cb_completion(void *arg, enum bfa_status status); + +/* Configuration & setup */ +void bnad_tx_coalescing_timeo_set(struct bnad *bnad); +void bnad_rx_coalescing_timeo_set(struct bnad *bnad); + +int bnad_setup_rx(struct bnad *bnad, u32 rx_id); +int bnad_setup_tx(struct bnad *bnad, u32 tx_id); +void bnad_destroy_tx(struct bnad *bnad, u32 tx_id); +void bnad_destroy_rx(struct bnad *bnad, u32 rx_id); + +/* Timer start/stop protos */ +void bnad_dim_timer_start(struct bnad *bnad); + +/* Statistics */ +void bnad_netdev_qstats_fill(struct bnad *bnad, + struct rtnl_link_stats64 *stats); +void bnad_netdev_hwstats_fill(struct bnad *bnad, + struct rtnl_link_stats64 *stats); + +/* Debugfs */ +void bnad_debugfs_init(struct bnad *bnad); +void bnad_debugfs_uninit(struct bnad *bnad); + +/* MACROS */ +/* To set & get the stats counters */ +#define BNAD_UPDATE_CTR(_bnad, _ctr) \ + (((_bnad)->stats.drv_stats._ctr)++) + +#define BNAD_GET_CTR(_bnad, _ctr) ((_bnad)->stats.drv_stats._ctr) + +#define bnad_enable_rx_irq_unsafe(_ccb) \ +{ \ + if (likely(test_bit(BNAD_RXQ_STARTED, &(_ccb)->rcb[0]->flags))) {\ + bna_ib_coalescing_timer_set((_ccb)->i_dbell, \ + (_ccb)->rx_coalescing_timeo); \ + bna_ib_ack((_ccb)->i_dbell, 0); \ + } \ +} + +#endif /* __BNAD_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c new file mode 100644 index 000000000..72c895504 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c @@ -0,0 +1,589 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +#include +#include +#include "bnad.h" + +/* + * BNA debufs interface + * + * To access the interface, debugfs file system should be mounted + * if not already mounted using: + * mount -t debugfs none /sys/kernel/debug + * + * BNA Hierarchy: + * - bna/pci_dev: + * where the pci_name corresponds to the one under /sys/bus/pci/drivers/bna + * + * Debugging service available per pci_dev: + * fwtrc: To collect current firmware trace. + * fwsave: To collect last saved fw trace as a result of firmware crash. + * regwr: To write one word to chip register + * regrd: To read one or more words from chip register. + */ + +struct bnad_debug_info { + char *debug_buffer; + void *i_private; + int buffer_len; +}; + +static int +bnad_debugfs_open_fwtrc(struct inode *inode, struct file *file) +{ + struct bnad *bnad = inode->i_private; + struct bnad_debug_info *fw_debug; + unsigned long flags; + int rc; + + fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL); + if (!fw_debug) + return -ENOMEM; + + fw_debug->buffer_len = BNA_DBG_FWTRC_LEN; + + fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL); + if (!fw_debug->debug_buffer) { + kfree(fw_debug); + fw_debug = NULL; + return -ENOMEM; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + rc = bfa_nw_ioc_debug_fwtrc(&bnad->bna.ioceth.ioc, + fw_debug->debug_buffer, + &fw_debug->buffer_len); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (rc != BFA_STATUS_OK) { + kfree(fw_debug->debug_buffer); + fw_debug->debug_buffer = NULL; + kfree(fw_debug); + fw_debug = NULL; + pr_warn("bnad %s: Failed to collect fwtrc\n", + pci_name(bnad->pcidev)); + return -ENOMEM; + } + + file->private_data = fw_debug; + + return 0; +} + +static int +bnad_debugfs_open_fwsave(struct inode *inode, struct file *file) +{ + struct bnad *bnad = inode->i_private; + struct bnad_debug_info *fw_debug; + unsigned long flags; + int rc; + + fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL); + if (!fw_debug) + return -ENOMEM; + + fw_debug->buffer_len = BNA_DBG_FWTRC_LEN; + + fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL); + if (!fw_debug->debug_buffer) { + kfree(fw_debug); + fw_debug = NULL; + return -ENOMEM; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + rc = bfa_nw_ioc_debug_fwsave(&bnad->bna.ioceth.ioc, + fw_debug->debug_buffer, + &fw_debug->buffer_len); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (rc != BFA_STATUS_OK && rc != BFA_STATUS_ENOFSAVE) { + kfree(fw_debug->debug_buffer); + fw_debug->debug_buffer = NULL; + kfree(fw_debug); + fw_debug = NULL; + pr_warn("bna %s: Failed to collect fwsave\n", + pci_name(bnad->pcidev)); + return -ENOMEM; + } + + file->private_data = fw_debug; + + return 0; +} + +static int +bnad_debugfs_open_reg(struct inode *inode, struct file *file) +{ + struct bnad_debug_info *reg_debug; + + reg_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL); + if (!reg_debug) + return -ENOMEM; + + reg_debug->i_private = inode->i_private; + + file->private_data = reg_debug; + + return 0; +} + +static int +bnad_get_debug_drvinfo(struct bnad *bnad, void *buffer, u32 len) +{ + struct bnad_drvinfo *drvinfo = (struct bnad_drvinfo *) buffer; + struct bnad_iocmd_comp fcomp; + unsigned long flags = 0; + int ret = BFA_STATUS_FAILED; + + /* Get IOC info */ + spin_lock_irqsave(&bnad->bna_lock, flags); + bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, &drvinfo->ioc_attr); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Retrieve CEE related info */ + fcomp.bnad = bnad; + fcomp.comp_status = 0; + init_completion(&fcomp.comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + ret = bfa_nw_cee_get_attr(&bnad->bna.cee, &drvinfo->cee_attr, + bnad_cb_completion, &fcomp); + if (ret != BFA_STATUS_OK) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + goto out; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&fcomp.comp); + drvinfo->cee_status = fcomp.comp_status; + + /* Retrieve flash partition info */ + fcomp.comp_status = 0; + reinit_completion(&fcomp.comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr, + bnad_cb_completion, &fcomp); + if (ret != BFA_STATUS_OK) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + goto out; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&fcomp.comp); + drvinfo->flash_status = fcomp.comp_status; +out: + return ret; +} + +static int +bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file) +{ + struct bnad *bnad = inode->i_private; + struct bnad_debug_info *drv_info; + int rc; + + drv_info = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL); + if (!drv_info) + return -ENOMEM; + + drv_info->buffer_len = sizeof(struct bnad_drvinfo); + + drv_info->debug_buffer = kzalloc(drv_info->buffer_len, GFP_KERNEL); + if (!drv_info->debug_buffer) { + kfree(drv_info); + drv_info = NULL; + return -ENOMEM; + } + + mutex_lock(&bnad->conf_mutex); + rc = bnad_get_debug_drvinfo(bnad, drv_info->debug_buffer, + drv_info->buffer_len); + mutex_unlock(&bnad->conf_mutex); + if (rc != BFA_STATUS_OK) { + kfree(drv_info->debug_buffer); + drv_info->debug_buffer = NULL; + kfree(drv_info); + drv_info = NULL; + pr_warn("bna %s: Failed to collect drvinfo\n", + pci_name(bnad->pcidev)); + return -ENOMEM; + } + + file->private_data = drv_info; + + return 0; +} + +/* Changes the current file position */ +static loff_t +bnad_debugfs_lseek(struct file *file, loff_t offset, int orig) +{ + struct bnad_debug_info *debug = file->private_data; + + if (!debug) + return -EINVAL; + + return fixed_size_llseek(file, offset, orig, debug->buffer_len); +} + +static ssize_t +bnad_debugfs_read(struct file *file, char __user *buf, + size_t nbytes, loff_t *pos) +{ + struct bnad_debug_info *debug = file->private_data; + + if (!debug || !debug->debug_buffer) + return 0; + + return simple_read_from_buffer(buf, nbytes, pos, + debug->debug_buffer, debug->buffer_len); +} + +#define BFA_REG_CT_ADDRSZ (0x40000) +#define BFA_REG_CB_ADDRSZ (0x20000) +#define BFA_REG_ADDRSZ(__ioc) \ + ((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ? \ + BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ)) +#define BFA_REG_ADDRMSK(__ioc) (BFA_REG_ADDRSZ(__ioc) - 1) + +/* + * Function to check if the register offset passed is valid. + */ +static int +bna_reg_offset_check(struct bfa_ioc *ioc, u32 offset, u32 len) +{ + u8 area; + + /* check [16:15] */ + area = (offset >> 15) & 0x7; + if (area == 0) { + /* PCIe core register */ + if ((offset + (len<<2)) > 0x8000) /* 8k dwords or 32KB */ + return BFA_STATUS_EINVAL; + } else if (area == 0x1) { + /* CB 32 KB memory page */ + if ((offset + (len<<2)) > 0x10000) /* 8k dwords or 32KB */ + return BFA_STATUS_EINVAL; + } else { + /* CB register space 64KB */ + if ((offset + (len<<2)) > BFA_REG_ADDRMSK(ioc)) + return BFA_STATUS_EINVAL; + } + return BFA_STATUS_OK; +} + +static ssize_t +bnad_debugfs_read_regrd(struct file *file, char __user *buf, + size_t nbytes, loff_t *pos) +{ + struct bnad_debug_info *regrd_debug = file->private_data; + struct bnad *bnad = (struct bnad *)regrd_debug->i_private; + ssize_t rc; + + if (!bnad->regdata) + return 0; + + rc = simple_read_from_buffer(buf, nbytes, pos, + bnad->regdata, bnad->reglen); + + if ((*pos + nbytes) >= bnad->reglen) { + kfree(bnad->regdata); + bnad->regdata = NULL; + bnad->reglen = 0; + } + + return rc; +} + +static ssize_t +bnad_debugfs_write_regrd(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct bnad_debug_info *regrd_debug = file->private_data; + struct bnad *bnad = (struct bnad *)regrd_debug->i_private; + struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc; + int addr, len, rc, i; + u32 *regbuf; + void __iomem *rb, *reg_addr; + unsigned long flags; + void *kern_buf; + + /* Allocate memory to store the user space buf */ + kern_buf = kzalloc(nbytes, GFP_KERNEL); + if (!kern_buf) + return -ENOMEM; + + if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) { + kfree(kern_buf); + return -ENOMEM; + } + + rc = sscanf(kern_buf, "%x:%x", &addr, &len); + if (rc < 2) { + pr_warn("bna %s: Failed to read user buffer\n", + pci_name(bnad->pcidev)); + kfree(kern_buf); + return -EINVAL; + } + + kfree(kern_buf); + kfree(bnad->regdata); + bnad->regdata = NULL; + bnad->reglen = 0; + + bnad->regdata = kzalloc(len << 2, GFP_KERNEL); + if (!bnad->regdata) + return -ENOMEM; + + bnad->reglen = len << 2; + rb = bfa_ioc_bar0(ioc); + addr &= BFA_REG_ADDRMSK(ioc); + + /* offset and len sanity check */ + rc = bna_reg_offset_check(ioc, addr, len); + if (rc) { + pr_warn("bna %s: Failed reg offset check\n", + pci_name(bnad->pcidev)); + kfree(bnad->regdata); + bnad->regdata = NULL; + bnad->reglen = 0; + return -EINVAL; + } + + reg_addr = rb + addr; + regbuf = (u32 *)bnad->regdata; + spin_lock_irqsave(&bnad->bna_lock, flags); + for (i = 0; i < len; i++) { + *regbuf = readl(reg_addr); + regbuf++; + reg_addr += sizeof(u32); + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + return nbytes; +} + +static ssize_t +bnad_debugfs_write_regwr(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct bnad_debug_info *debug = file->private_data; + struct bnad *bnad = (struct bnad *)debug->i_private; + struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc; + int addr, val, rc; + void __iomem *reg_addr; + unsigned long flags; + void *kern_buf; + + /* Allocate memory to store the user space buf */ + kern_buf = kzalloc(nbytes, GFP_KERNEL); + if (!kern_buf) + return -ENOMEM; + + if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) { + kfree(kern_buf); + return -ENOMEM; + } + + rc = sscanf(kern_buf, "%x:%x", &addr, &val); + if (rc < 2) { + pr_warn("bna %s: Failed to read user buffer\n", + pci_name(bnad->pcidev)); + kfree(kern_buf); + return -EINVAL; + } + kfree(kern_buf); + + addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */ + + /* offset and len sanity check */ + rc = bna_reg_offset_check(ioc, addr, 1); + if (rc) { + pr_warn("bna %s: Failed reg offset check\n", + pci_name(bnad->pcidev)); + return -EINVAL; + } + + reg_addr = (bfa_ioc_bar0(ioc)) + addr; + spin_lock_irqsave(&bnad->bna_lock, flags); + writel(val, reg_addr); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + return nbytes; +} + +static int +bnad_debugfs_release(struct inode *inode, struct file *file) +{ + struct bnad_debug_info *debug = file->private_data; + + if (!debug) + return 0; + + file->private_data = NULL; + kfree(debug); + return 0; +} + +static int +bnad_debugfs_buffer_release(struct inode *inode, struct file *file) +{ + struct bnad_debug_info *debug = file->private_data; + + if (!debug) + return 0; + + kfree(debug->debug_buffer); + + file->private_data = NULL; + kfree(debug); + debug = NULL; + return 0; +} + +static const struct file_operations bnad_debugfs_op_fwtrc = { + .owner = THIS_MODULE, + .open = bnad_debugfs_open_fwtrc, + .llseek = bnad_debugfs_lseek, + .read = bnad_debugfs_read, + .release = bnad_debugfs_buffer_release, +}; + +static const struct file_operations bnad_debugfs_op_fwsave = { + .owner = THIS_MODULE, + .open = bnad_debugfs_open_fwsave, + .llseek = bnad_debugfs_lseek, + .read = bnad_debugfs_read, + .release = bnad_debugfs_buffer_release, +}; + +static const struct file_operations bnad_debugfs_op_regrd = { + .owner = THIS_MODULE, + .open = bnad_debugfs_open_reg, + .llseek = bnad_debugfs_lseek, + .read = bnad_debugfs_read_regrd, + .write = bnad_debugfs_write_regrd, + .release = bnad_debugfs_release, +}; + +static const struct file_operations bnad_debugfs_op_regwr = { + .owner = THIS_MODULE, + .open = bnad_debugfs_open_reg, + .llseek = bnad_debugfs_lseek, + .write = bnad_debugfs_write_regwr, + .release = bnad_debugfs_release, +}; + +static const struct file_operations bnad_debugfs_op_drvinfo = { + .owner = THIS_MODULE, + .open = bnad_debugfs_open_drvinfo, + .llseek = bnad_debugfs_lseek, + .read = bnad_debugfs_read, + .release = bnad_debugfs_buffer_release, +}; + +struct bnad_debugfs_entry { + const char *name; + umode_t mode; + const struct file_operations *fops; +}; + +static const struct bnad_debugfs_entry bnad_debugfs_files[] = { + { "fwtrc", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwtrc, }, + { "fwsave", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwsave, }, + { "regrd", S_IFREG|S_IRUGO|S_IWUSR, &bnad_debugfs_op_regrd, }, + { "regwr", S_IFREG|S_IWUSR, &bnad_debugfs_op_regwr, }, + { "drvinfo", S_IFREG|S_IRUGO, &bnad_debugfs_op_drvinfo, }, +}; + +static struct dentry *bna_debugfs_root; +static atomic_t bna_debugfs_port_count; + +/* Initialize debugfs interface for BNA */ +void +bnad_debugfs_init(struct bnad *bnad) +{ + const struct bnad_debugfs_entry *file; + char name[64]; + int i; + + /* Setup the BNA debugfs root directory*/ + if (!bna_debugfs_root) { + bna_debugfs_root = debugfs_create_dir("bna", NULL); + atomic_set(&bna_debugfs_port_count, 0); + if (!bna_debugfs_root) { + pr_warn("BNA: debugfs root dir creation failed\n"); + return; + } + } + + /* Setup the pci_dev debugfs directory for the port */ + snprintf(name, sizeof(name), "pci_dev:%s", pci_name(bnad->pcidev)); + if (!bnad->port_debugfs_root) { + bnad->port_debugfs_root = + debugfs_create_dir(name, bna_debugfs_root); + if (!bnad->port_debugfs_root) { + pr_warn("bna pci_dev %s: root dir creation failed\n", + pci_name(bnad->pcidev)); + return; + } + + atomic_inc(&bna_debugfs_port_count); + + for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) { + file = &bnad_debugfs_files[i]; + bnad->bnad_dentry_files[i] = + debugfs_create_file(file->name, + file->mode, + bnad->port_debugfs_root, + bnad, + file->fops); + if (!bnad->bnad_dentry_files[i]) { + pr_warn( + "BNA pci_dev:%s: create %s entry failed\n", + pci_name(bnad->pcidev), file->name); + return; + } + } + } +} + +/* Uninitialize debugfs interface for BNA */ +void +bnad_debugfs_uninit(struct bnad *bnad) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) { + if (bnad->bnad_dentry_files[i]) { + debugfs_remove(bnad->bnad_dentry_files[i]); + bnad->bnad_dentry_files[i] = NULL; + } + } + + /* Remove the pci_dev debugfs directory for the port */ + if (bnad->port_debugfs_root) { + debugfs_remove(bnad->port_debugfs_root); + bnad->port_debugfs_root = NULL; + atomic_dec(&bna_debugfs_port_count); + } + + /* Remove the BNA debugfs root directory */ + if (atomic_read(&bna_debugfs_port_count) == 0) { + debugfs_remove(bna_debugfs_root); + bna_debugfs_root = NULL; + } +} diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c new file mode 100644 index 000000000..2a1a1d21d --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -0,0 +1,1140 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +#include "cna.h" + +#include +#include +#include +#include + +#include "bna.h" + +#include "bnad.h" + +#define BNAD_NUM_TXF_COUNTERS 12 +#define BNAD_NUM_RXF_COUNTERS 10 +#define BNAD_NUM_CQ_COUNTERS (3 + 5) +#define BNAD_NUM_RXQ_COUNTERS 6 +#define BNAD_NUM_TXQ_COUNTERS 5 + +#define BNAD_ETHTOOL_STATS_NUM \ + (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \ + sizeof(struct bnad_drv_stats) / sizeof(u64) + \ + offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64)) + +static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { + "rx_packets", + "tx_packets", + "rx_bytes", + "tx_bytes", + "rx_errors", + "tx_errors", + "rx_dropped", + "tx_dropped", + "multicast", + "collisions", + + "rx_length_errors", + "rx_over_errors", + "rx_crc_errors", + "rx_frame_errors", + "rx_fifo_errors", + "rx_missed_errors", + + "tx_aborted_errors", + "tx_carrier_errors", + "tx_fifo_errors", + "tx_heartbeat_errors", + "tx_window_errors", + + "rx_compressed", + "tx_compressed", + + "netif_queue_stop", + "netif_queue_wakeup", + "netif_queue_stopped", + "tso4", + "tso6", + "tso_err", + "tcpcsum_offload", + "udpcsum_offload", + "csum_help", + "tx_skb_too_short", + "tx_skb_stopping", + "tx_skb_max_vectors", + "tx_skb_mss_too_long", + "tx_skb_tso_too_short", + "tx_skb_tso_prepare", + "tx_skb_non_tso_too_long", + "tx_skb_tcp_hdr", + "tx_skb_udp_hdr", + "tx_skb_csum_err", + "tx_skb_headlen_too_long", + "tx_skb_headlen_zero", + "tx_skb_frag_zero", + "tx_skb_len_mismatch", + "hw_stats_updates", + "netif_rx_dropped", + + "link_toggle", + "cee_toggle", + + "rxp_info_alloc_failed", + "mbox_intr_disabled", + "mbox_intr_enabled", + "tx_unmap_q_alloc_failed", + "rx_unmap_q_alloc_failed", + "rxbuf_alloc_failed", + + "mac_stats_clr_cnt", + "mac_frame_64", + "mac_frame_65_127", + "mac_frame_128_255", + "mac_frame_256_511", + "mac_frame_512_1023", + "mac_frame_1024_1518", + "mac_frame_1518_1522", + "mac_rx_bytes", + "mac_rx_packets", + "mac_rx_fcs_error", + "mac_rx_multicast", + "mac_rx_broadcast", + "mac_rx_control_frames", + "mac_rx_pause", + "mac_rx_unknown_opcode", + "mac_rx_alignment_error", + "mac_rx_frame_length_error", + "mac_rx_code_error", + "mac_rx_carrier_sense_error", + "mac_rx_undersize", + "mac_rx_oversize", + "mac_rx_fragments", + "mac_rx_jabber", + "mac_rx_drop", + + "mac_tx_bytes", + "mac_tx_packets", + "mac_tx_multicast", + "mac_tx_broadcast", + "mac_tx_pause", + "mac_tx_deferral", + "mac_tx_excessive_deferral", + "mac_tx_single_collision", + "mac_tx_muliple_collision", + "mac_tx_late_collision", + "mac_tx_excessive_collision", + "mac_tx_total_collision", + "mac_tx_pause_honored", + "mac_tx_drop", + "mac_tx_jabber", + "mac_tx_fcs_error", + "mac_tx_control_frame", + "mac_tx_oversize", + "mac_tx_undersize", + "mac_tx_fragments", + + "bpc_tx_pause_0", + "bpc_tx_pause_1", + "bpc_tx_pause_2", + "bpc_tx_pause_3", + "bpc_tx_pause_4", + "bpc_tx_pause_5", + "bpc_tx_pause_6", + "bpc_tx_pause_7", + "bpc_tx_zero_pause_0", + "bpc_tx_zero_pause_1", + "bpc_tx_zero_pause_2", + "bpc_tx_zero_pause_3", + "bpc_tx_zero_pause_4", + "bpc_tx_zero_pause_5", + "bpc_tx_zero_pause_6", + "bpc_tx_zero_pause_7", + "bpc_tx_first_pause_0", + "bpc_tx_first_pause_1", + "bpc_tx_first_pause_2", + "bpc_tx_first_pause_3", + "bpc_tx_first_pause_4", + "bpc_tx_first_pause_5", + "bpc_tx_first_pause_6", + "bpc_tx_first_pause_7", + + "bpc_rx_pause_0", + "bpc_rx_pause_1", + "bpc_rx_pause_2", + "bpc_rx_pause_3", + "bpc_rx_pause_4", + "bpc_rx_pause_5", + "bpc_rx_pause_6", + "bpc_rx_pause_7", + "bpc_rx_zero_pause_0", + "bpc_rx_zero_pause_1", + "bpc_rx_zero_pause_2", + "bpc_rx_zero_pause_3", + "bpc_rx_zero_pause_4", + "bpc_rx_zero_pause_5", + "bpc_rx_zero_pause_6", + "bpc_rx_zero_pause_7", + "bpc_rx_first_pause_0", + "bpc_rx_first_pause_1", + "bpc_rx_first_pause_2", + "bpc_rx_first_pause_3", + "bpc_rx_first_pause_4", + "bpc_rx_first_pause_5", + "bpc_rx_first_pause_6", + "bpc_rx_first_pause_7", + + "rad_rx_frames", + "rad_rx_octets", + "rad_rx_vlan_frames", + "rad_rx_ucast", + "rad_rx_ucast_octets", + "rad_rx_ucast_vlan", + "rad_rx_mcast", + "rad_rx_mcast_octets", + "rad_rx_mcast_vlan", + "rad_rx_bcast", + "rad_rx_bcast_octets", + "rad_rx_bcast_vlan", + "rad_rx_drops", + + "rlb_rad_rx_frames", + "rlb_rad_rx_octets", + "rlb_rad_rx_vlan_frames", + "rlb_rad_rx_ucast", + "rlb_rad_rx_ucast_octets", + "rlb_rad_rx_ucast_vlan", + "rlb_rad_rx_mcast", + "rlb_rad_rx_mcast_octets", + "rlb_rad_rx_mcast_vlan", + "rlb_rad_rx_bcast", + "rlb_rad_rx_bcast_octets", + "rlb_rad_rx_bcast_vlan", + "rlb_rad_rx_drops", + + "fc_rx_ucast_octets", + "fc_rx_ucast", + "fc_rx_ucast_vlan", + "fc_rx_mcast_octets", + "fc_rx_mcast", + "fc_rx_mcast_vlan", + "fc_rx_bcast_octets", + "fc_rx_bcast", + "fc_rx_bcast_vlan", + + "fc_tx_ucast_octets", + "fc_tx_ucast", + "fc_tx_ucast_vlan", + "fc_tx_mcast_octets", + "fc_tx_mcast", + "fc_tx_mcast_vlan", + "fc_tx_bcast_octets", + "fc_tx_bcast", + "fc_tx_bcast_vlan", + "fc_tx_parity_errors", + "fc_tx_timeout", + "fc_tx_fid_parity_errors", +}; + +static int +bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +{ + cmd->supported = SUPPORTED_10000baseT_Full; + cmd->advertising = ADVERTISED_10000baseT_Full; + cmd->autoneg = AUTONEG_DISABLE; + cmd->supported |= SUPPORTED_FIBRE; + cmd->advertising |= ADVERTISED_FIBRE; + cmd->port = PORT_FIBRE; + cmd->phy_address = 0; + + if (netif_carrier_ok(netdev)) { + ethtool_cmd_speed_set(cmd, SPEED_10000); + cmd->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->duplex = DUPLEX_UNKNOWN; + } + cmd->transceiver = XCVR_EXTERNAL; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + + return 0; +} + +static int +bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +{ + /* 10G full duplex setting supported only */ + if (cmd->autoneg == AUTONEG_ENABLE) + return -EOPNOTSUPP; else { + if ((ethtool_cmd_speed(cmd) == SPEED_10000) + && (cmd->duplex == DUPLEX_FULL)) + return 0; + } + + return -EOPNOTSUPP; +} + +static void +bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct bnad *bnad = netdev_priv(netdev); + struct bfa_ioc_attr *ioc_attr; + unsigned long flags; + + strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, BNAD_VERSION, sizeof(drvinfo->version)); + + ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL); + if (ioc_attr) { + spin_lock_irqsave(&bnad->bna_lock, flags); + bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver, + sizeof(drvinfo->fw_version)); + kfree(ioc_attr); + } + + strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev), + sizeof(drvinfo->bus_info)); +} + +static void +bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo) +{ + wolinfo->supported = 0; + wolinfo->wolopts = 0; +} + +static int +bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) +{ + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + + /* Lock rqd. to access bnad->bna_lock */ + spin_lock_irqsave(&bnad->bna_lock, flags); + coalesce->use_adaptive_rx_coalesce = + (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo * + BFI_COALESCING_TIMER_UNIT; + coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo * + BFI_COALESCING_TIMER_UNIT; + coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT; + + return 0; +} + +static int +bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) +{ + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + int to_del = 0; + + if (coalesce->rx_coalesce_usecs == 0 || + coalesce->rx_coalesce_usecs > + BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT) + return -EINVAL; + + if (coalesce->tx_coalesce_usecs == 0 || + coalesce->tx_coalesce_usecs > + BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT) + return -EINVAL; + + mutex_lock(&bnad->conf_mutex); + /* + * Do not need to store rx_coalesce_usecs here + * Every time DIM is disabled, we can get it from the + * stack. + */ + spin_lock_irqsave(&bnad->bna_lock, flags); + if (coalesce->use_adaptive_rx_coalesce) { + if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) { + bnad->cfg_flags |= BNAD_CF_DIM_ENABLED; + bnad_dim_timer_start(bnad); + } + } else { + if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) { + bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED; + if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && + test_bit(BNAD_RF_DIM_TIMER_RUNNING, + &bnad->run_flags)) { + clear_bit(BNAD_RF_DIM_TIMER_RUNNING, + &bnad->run_flags); + to_del = 1; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (to_del) + del_timer_sync(&bnad->dim_timer); + spin_lock_irqsave(&bnad->bna_lock, flags); + bnad_rx_coalescing_timeo_set(bnad); + } + } + if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs / + BFI_COALESCING_TIMER_UNIT) { + bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs / + BFI_COALESCING_TIMER_UNIT; + bnad_tx_coalescing_timeo_set(bnad); + } + + if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs / + BFI_COALESCING_TIMER_UNIT) { + bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs / + BFI_COALESCING_TIMER_UNIT; + + if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) + bnad_rx_coalescing_timeo_set(bnad); + + } + + /* Add Tx Inter-pkt DMA count? */ + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + mutex_unlock(&bnad->conf_mutex); + return 0; +} + +static void +bnad_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ringparam) +{ + struct bnad *bnad = netdev_priv(netdev); + + ringparam->rx_max_pending = BNAD_MAX_RXQ_DEPTH; + ringparam->tx_max_pending = BNAD_MAX_TXQ_DEPTH; + + ringparam->rx_pending = bnad->rxq_depth; + ringparam->tx_pending = bnad->txq_depth; +} + +static int +bnad_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ringparam) +{ + int i, current_err, err = 0; + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + + mutex_lock(&bnad->conf_mutex); + if (ringparam->rx_pending == bnad->rxq_depth && + ringparam->tx_pending == bnad->txq_depth) { + mutex_unlock(&bnad->conf_mutex); + return 0; + } + + if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH || + ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH || + !BNA_POWER_OF_2(ringparam->rx_pending)) { + mutex_unlock(&bnad->conf_mutex); + return -EINVAL; + } + if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH || + ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH || + !BNA_POWER_OF_2(ringparam->tx_pending)) { + mutex_unlock(&bnad->conf_mutex); + return -EINVAL; + } + + if (ringparam->rx_pending != bnad->rxq_depth) { + bnad->rxq_depth = ringparam->rx_pending; + if (!netif_running(netdev)) { + mutex_unlock(&bnad->conf_mutex); + return 0; + } + + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + bnad_destroy_rx(bnad, i); + current_err = bnad_setup_rx(bnad, i); + if (current_err && !err) + err = current_err; + } + + if (!err && bnad->rx_info[0].rx) { + /* restore rx configuration */ + bnad_restore_vlans(bnad, 0); + bnad_enable_default_bcast(bnad); + spin_lock_irqsave(&bnad->bna_lock, flags); + bnad_mac_addr_set_locked(bnad, netdev->dev_addr); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | + BNAD_CF_PROMISC); + bnad_set_rx_mode(netdev); + } + } + if (ringparam->tx_pending != bnad->txq_depth) { + bnad->txq_depth = ringparam->tx_pending; + if (!netif_running(netdev)) { + mutex_unlock(&bnad->conf_mutex); + return 0; + } + + for (i = 0; i < bnad->num_tx; i++) { + if (!bnad->tx_info[i].tx) + continue; + bnad_destroy_tx(bnad, i); + current_err = bnad_setup_tx(bnad, i); + if (current_err && !err) + err = current_err; + } + } + + mutex_unlock(&bnad->conf_mutex); + return err; +} + +static void +bnad_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pauseparam) +{ + struct bnad *bnad = netdev_priv(netdev); + + pauseparam->autoneg = 0; + pauseparam->rx_pause = bnad->bna.enet.pause_config.rx_pause; + pauseparam->tx_pause = bnad->bna.enet.pause_config.tx_pause; +} + +static int +bnad_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pauseparam) +{ + struct bnad *bnad = netdev_priv(netdev); + struct bna_pause_config pause_config; + unsigned long flags; + + if (pauseparam->autoneg == AUTONEG_ENABLE) + return -EINVAL; + + mutex_lock(&bnad->conf_mutex); + if (pauseparam->rx_pause != bnad->bna.enet.pause_config.rx_pause || + pauseparam->tx_pause != bnad->bna.enet.pause_config.tx_pause) { + pause_config.rx_pause = pauseparam->rx_pause; + pause_config.tx_pause = pauseparam->tx_pause; + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + } + mutex_unlock(&bnad->conf_mutex); + return 0; +} + +static void +bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string) +{ + struct bnad *bnad = netdev_priv(netdev); + int i, j, q_num; + u32 bmap; + + mutex_lock(&bnad->conf_mutex); + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) { + BUG_ON(!(strlen(bnad_net_stats_strings[i]) < + ETH_GSTRING_LEN)); + memcpy(string, bnad_net_stats_strings[i], + ETH_GSTRING_LEN); + string += ETH_GSTRING_LEN; + } + bmap = bna_tx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) { + sprintf(string, "txf%d_ucast_octets", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_ucast", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_ucast_vlan", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_mcast_octets", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_mcast", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_mcast_vlan", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_bcast_octets", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_bcast", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_bcast_vlan", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_errors", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_filter_vlan", i); + string += ETH_GSTRING_LEN; + sprintf(string, "txf%d_filter_mac_sa", i); + string += ETH_GSTRING_LEN; + } + bmap >>= 1; + } + + bmap = bna_rx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) { + sprintf(string, "rxf%d_ucast_octets", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rxf%d_ucast", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rxf%d_ucast_vlan", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rxf%d_mcast_octets", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rxf%d_mcast", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rxf%d_mcast_vlan", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rxf%d_bcast_octets", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rxf%d_bcast", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rxf%d_bcast_vlan", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rxf%d_frame_drops", i); + string += ETH_GSTRING_LEN; + } + bmap >>= 1; + } + + q_num = 0; + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + sprintf(string, "cq%d_producer_index", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "cq%d_consumer_index", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "cq%d_hw_producer_index", + q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "cq%d_intr", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "cq%d_poll", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "cq%d_schedule", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "cq%d_keep_poll", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "cq%d_complete", q_num); + string += ETH_GSTRING_LEN; + q_num++; + } + } + + q_num = 0; + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + sprintf(string, "rxq%d_packets", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_bytes", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_packets_with_error", + q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_allocbuf_failed", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_producer_index", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_consumer_index", q_num); + string += ETH_GSTRING_LEN; + q_num++; + if (bnad->rx_info[i].rx_ctrl[j].ccb && + bnad->rx_info[i].rx_ctrl[j].ccb-> + rcb[1] && + bnad->rx_info[i].rx_ctrl[j].ccb-> + rcb[1]->rxq) { + sprintf(string, "rxq%d_packets", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_bytes", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, + "rxq%d_packets_with_error", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_allocbuf_failed", + q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_producer_index", + q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_consumer_index", + q_num); + string += ETH_GSTRING_LEN; + q_num++; + } + } + } + + q_num = 0; + for (i = 0; i < bnad->num_tx; i++) { + if (!bnad->tx_info[i].tx) + continue; + for (j = 0; j < bnad->num_txq_per_tx; j++) { + sprintf(string, "txq%d_packets", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "txq%d_bytes", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "txq%d_producer_index", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "txq%d_consumer_index", q_num); + string += ETH_GSTRING_LEN; + sprintf(string, "txq%d_hw_consumer_index", + q_num); + string += ETH_GSTRING_LEN; + q_num++; + } + } + + break; + + default: + break; + } + + mutex_unlock(&bnad->conf_mutex); +} + +static int +bnad_get_stats_count_locked(struct net_device *netdev) +{ + struct bnad *bnad = netdev_priv(netdev); + int i, j, count = 0, rxf_active_num = 0, txf_active_num = 0; + u32 bmap; + + bmap = bna_tx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) + txf_active_num++; + bmap >>= 1; + } + bmap = bna_rx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) + rxf_active_num++; + bmap >>= 1; + } + count = BNAD_ETHTOOL_STATS_NUM + + txf_active_num * BNAD_NUM_TXF_COUNTERS + + rxf_active_num * BNAD_NUM_RXF_COUNTERS; + + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS; + count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS; + for (j = 0; j < bnad->num_rxp_per_rx; j++) + if (bnad->rx_info[i].rx_ctrl[j].ccb && + bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && + bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq) + count += BNAD_NUM_RXQ_COUNTERS; + } + + for (i = 0; i < bnad->num_tx; i++) { + if (!bnad->tx_info[i].tx) + continue; + count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS; + } + return count; +} + +static int +bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi) +{ + int i, j; + struct bna_rcb *rcb = NULL; + struct bna_tcb *tcb = NULL; + + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) + if (bnad->rx_info[i].rx_ctrl[j].ccb && + bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] && + bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) { + buf[bi++] = bnad->rx_info[i].rx_ctrl[j]. + ccb->producer_index; + buf[bi++] = 0; /* ccb->consumer_index */ + buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j]. + ccb->hw_producer_index); + + buf[bi++] = bnad->rx_info[i]. + rx_ctrl[j].rx_intr_ctr; + buf[bi++] = bnad->rx_info[i]. + rx_ctrl[j].rx_poll_ctr; + buf[bi++] = bnad->rx_info[i]. + rx_ctrl[j].rx_schedule; + buf[bi++] = bnad->rx_info[i]. + rx_ctrl[j].rx_keep_poll; + buf[bi++] = bnad->rx_info[i]. + rx_ctrl[j].rx_complete; + } + } + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) + if (bnad->rx_info[i].rx_ctrl[j].ccb) { + if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] && + bnad->rx_info[i].rx_ctrl[j].ccb-> + rcb[0]->rxq) { + rcb = bnad->rx_info[i].rx_ctrl[j]. + ccb->rcb[0]; + buf[bi++] = rcb->rxq->rx_packets; + buf[bi++] = rcb->rxq->rx_bytes; + buf[bi++] = rcb->rxq-> + rx_packets_with_error; + buf[bi++] = rcb->rxq-> + rxbuf_alloc_failed; + buf[bi++] = rcb->producer_index; + buf[bi++] = rcb->consumer_index; + } + if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && + bnad->rx_info[i].rx_ctrl[j].ccb-> + rcb[1]->rxq) { + rcb = bnad->rx_info[i].rx_ctrl[j]. + ccb->rcb[1]; + buf[bi++] = rcb->rxq->rx_packets; + buf[bi++] = rcb->rxq->rx_bytes; + buf[bi++] = rcb->rxq-> + rx_packets_with_error; + buf[bi++] = rcb->rxq-> + rxbuf_alloc_failed; + buf[bi++] = rcb->producer_index; + buf[bi++] = rcb->consumer_index; + } + } + } + + for (i = 0; i < bnad->num_tx; i++) { + if (!bnad->tx_info[i].tx) + continue; + for (j = 0; j < bnad->num_txq_per_tx; j++) + if (bnad->tx_info[i].tcb[j] && + bnad->tx_info[i].tcb[j]->txq) { + tcb = bnad->tx_info[i].tcb[j]; + buf[bi++] = tcb->txq->tx_packets; + buf[bi++] = tcb->txq->tx_bytes; + buf[bi++] = tcb->producer_index; + buf[bi++] = tcb->consumer_index; + buf[bi++] = *(tcb->hw_consumer_index); + } + } + + return bi; +} + +static void +bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, + u64 *buf) +{ + struct bnad *bnad = netdev_priv(netdev); + int i, j, bi; + unsigned long flags; + struct rtnl_link_stats64 *net_stats64; + u64 *stats64; + u32 bmap; + + mutex_lock(&bnad->conf_mutex); + if (bnad_get_stats_count_locked(netdev) != stats->n_stats) { + mutex_unlock(&bnad->conf_mutex); + return; + } + + /* + * Used bna_lock to sync reads from bna_stats, which is written + * under the same lock + */ + spin_lock_irqsave(&bnad->bna_lock, flags); + bi = 0; + memset(buf, 0, stats->n_stats * sizeof(u64)); + + net_stats64 = (struct rtnl_link_stats64 *)buf; + bnad_netdev_qstats_fill(bnad, net_stats64); + bnad_netdev_hwstats_fill(bnad, net_stats64); + + bi = sizeof(*net_stats64) / sizeof(u64); + + /* Get netif_queue_stopped from stack */ + bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev); + + /* Fill driver stats into ethtool buffers */ + stats64 = (u64 *)&bnad->stats.drv_stats; + for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++) + buf[bi++] = stats64[i]; + + /* Fill hardware stats excluding the rxf/txf into ethtool bufs */ + stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats; + for (i = 0; + i < offsetof(struct bfi_enet_stats, rxf_stats[0]) / + sizeof(u64); + i++) + buf[bi++] = stats64[i]; + + /* Fill txf stats into ethtool buffers */ + bmap = bna_tx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) { + stats64 = (u64 *)&bnad->stats.bna_stats-> + hw_stats.txf_stats[i]; + for (j = 0; j < sizeof(struct bfi_enet_stats_txf) / + sizeof(u64); j++) + buf[bi++] = stats64[j]; + } + bmap >>= 1; + } + + /* Fill rxf stats into ethtool buffers */ + bmap = bna_rx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) { + stats64 = (u64 *)&bnad->stats.bna_stats-> + hw_stats.rxf_stats[i]; + for (j = 0; j < sizeof(struct bfi_enet_stats_rxf) / + sizeof(u64); j++) + buf[bi++] = stats64[j]; + } + bmap >>= 1; + } + + /* Fill per Q stats into ethtool buffers */ + bi = bnad_per_q_stats_fill(bnad, buf, bi); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + mutex_unlock(&bnad->conf_mutex); +} + +static int +bnad_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return bnad_get_stats_count_locked(netdev); + default: + return -EOPNOTSUPP; + } +} + +static u32 +bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset, + u32 *base_offset) +{ + struct bfa_flash_attr *flash_attr; + struct bnad_iocmd_comp fcomp; + u32 i, flash_part = 0, ret; + unsigned long flags = 0; + + flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL); + if (!flash_attr) + return 0; + + fcomp.bnad = bnad; + fcomp.comp_status = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + ret = bfa_nw_flash_get_attr(&bnad->bna.flash, flash_attr, + bnad_cb_completion, &fcomp); + if (ret != BFA_STATUS_OK) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + kfree(flash_attr); + return 0; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&fcomp.comp); + ret = fcomp.comp_status; + + /* Check for the flash type & base offset value */ + if (ret == BFA_STATUS_OK) { + for (i = 0; i < flash_attr->npart; i++) { + if (offset >= flash_attr->part[i].part_off && + offset < (flash_attr->part[i].part_off + + flash_attr->part[i].part_size)) { + flash_part = flash_attr->part[i].part_type; + *base_offset = flash_attr->part[i].part_off; + break; + } + } + } + kfree(flash_attr); + return flash_part; +} + +static int +bnad_get_eeprom_len(struct net_device *netdev) +{ + return BFA_TOTAL_FLASH_SIZE; +} + +static int +bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, + u8 *bytes) +{ + struct bnad *bnad = netdev_priv(netdev); + struct bnad_iocmd_comp fcomp; + u32 flash_part = 0, base_offset = 0; + unsigned long flags = 0; + int ret = 0; + + /* Fill the magic value */ + eeprom->magic = bnad->pcidev->vendor | (bnad->pcidev->device << 16); + + /* Query the flash partition based on the offset */ + flash_part = bnad_get_flash_partition_by_offset(bnad, + eeprom->offset, &base_offset); + if (flash_part == 0) + return -EFAULT; + + fcomp.bnad = bnad; + fcomp.comp_status = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + ret = bfa_nw_flash_read_part(&bnad->bna.flash, flash_part, + bnad->id, bytes, eeprom->len, + eeprom->offset - base_offset, + bnad_cb_completion, &fcomp); + if (ret != BFA_STATUS_OK) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + goto done; + } + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&fcomp.comp); + ret = fcomp.comp_status; +done: + return ret; +} + +static int +bnad_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, + u8 *bytes) +{ + struct bnad *bnad = netdev_priv(netdev); + struct bnad_iocmd_comp fcomp; + u32 flash_part = 0, base_offset = 0; + unsigned long flags = 0; + int ret = 0; + + /* Check if the flash update request is valid */ + if (eeprom->magic != (bnad->pcidev->vendor | + (bnad->pcidev->device << 16))) + return -EINVAL; + + /* Query the flash partition based on the offset */ + flash_part = bnad_get_flash_partition_by_offset(bnad, + eeprom->offset, &base_offset); + if (flash_part == 0) + return -EFAULT; + + fcomp.bnad = bnad; + fcomp.comp_status = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + ret = bfa_nw_flash_update_part(&bnad->bna.flash, flash_part, + bnad->id, bytes, eeprom->len, + eeprom->offset - base_offset, + bnad_cb_completion, &fcomp); + if (ret != BFA_STATUS_OK) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + goto done; + } + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&fcomp.comp); + ret = fcomp.comp_status; +done: + return ret; +} + +static int +bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash) +{ + struct bnad *bnad = netdev_priv(netdev); + struct bnad_iocmd_comp fcomp; + const struct firmware *fw; + int ret = 0; + + ret = reject_firmware(&fw, eflash->data, &bnad->pcidev->dev); + if (ret) { + pr_err("BNA: Can't locate firmware %s\n", eflash->data); + goto out; + } + + fcomp.bnad = bnad; + fcomp.comp_status = 0; + + init_completion(&fcomp.comp); + spin_lock_irq(&bnad->bna_lock); + ret = bfa_nw_flash_update_part(&bnad->bna.flash, BFA_FLASH_PART_FWIMG, + bnad->id, (u8 *)fw->data, fw->size, 0, + bnad_cb_completion, &fcomp); + if (ret != BFA_STATUS_OK) { + pr_warn("BNA: Flash update failed with err: %d\n", ret); + ret = -EIO; + spin_unlock_irq(&bnad->bna_lock); + goto out; + } + + spin_unlock_irq(&bnad->bna_lock); + wait_for_completion(&fcomp.comp); + if (fcomp.comp_status != BFA_STATUS_OK) { + ret = -EIO; + pr_warn("BNA: Firmware image update to flash failed with: %d\n", + fcomp.comp_status); + } +out: + release_firmware(fw); + return ret; +} + +static const struct ethtool_ops bnad_ethtool_ops = { + .get_settings = bnad_get_settings, + .set_settings = bnad_set_settings, + .get_drvinfo = bnad_get_drvinfo, + .get_wol = bnad_get_wol, + .get_link = ethtool_op_get_link, + .get_coalesce = bnad_get_coalesce, + .set_coalesce = bnad_set_coalesce, + .get_ringparam = bnad_get_ringparam, + .set_ringparam = bnad_set_ringparam, + .get_pauseparam = bnad_get_pauseparam, + .set_pauseparam = bnad_set_pauseparam, + .get_strings = bnad_get_strings, + .get_ethtool_stats = bnad_get_ethtool_stats, + .get_sset_count = bnad_get_sset_count, + .get_eeprom_len = bnad_get_eeprom_len, + .get_eeprom = bnad_get_eeprom, + .set_eeprom = bnad_set_eeprom, + .flash_device = bnad_flash_device, + .get_ts_info = ethtool_op_get_ts_info, +}; + +void +bnad_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &bnad_ethtool_ops; +} diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h new file mode 100644 index 000000000..1901ff1c6 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/cna.h @@ -0,0 +1,107 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2006-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +#ifndef __CNA_H__ +#define __CNA_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define bfa_sm_fault(__event) do { \ + pr_err("SM Assertion failure: %s: %d: event = %d\n", \ + __FILE__, __LINE__, __event); \ +} while (0) + +extern char bfa_version[]; + +#define CNA_FW_FILE_CT "/*(DEBLOBBED)*/" +#define CNA_FW_FILE_CT2 "/*(DEBLOBBED)*/" +#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ + +#pragma pack(1) + +typedef struct mac { u8 mac[ETH_ALEN]; } mac_t; + +#pragma pack() + +#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next)) +#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next) +#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev) + +/* + * bfa_q_qe_init - to initialize a queue element + */ +#define bfa_q_qe_init(_qe) { \ + bfa_q_next(_qe) = (struct list_head *) NULL; \ + bfa_q_prev(_qe) = (struct list_head *) NULL; \ +} + +/* + * bfa_q_deq - dequeue an element from head of the queue + */ +#define bfa_q_deq(_q, _qe) { \ + if (!list_empty(_q)) { \ + (*((struct list_head **) (_qe))) = bfa_q_next(_q); \ + bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \ + (struct list_head *) (_q); \ + bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \ + bfa_q_qe_init(*((struct list_head **) _qe)); \ + } else { \ + *((struct list_head **)(_qe)) = NULL; \ + } \ +} + +/* + * bfa_q_deq_tail - dequeue an element from tail of the queue + */ +#define bfa_q_deq_tail(_q, _qe) { \ + if (!list_empty(_q)) { \ + *((struct list_head **) (_qe)) = bfa_q_prev(_q); \ + bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \ + (struct list_head *) (_q); \ + bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\ + bfa_q_qe_init(*((struct list_head **) _qe)); \ + } else { \ + *((struct list_head **) (_qe)) = (struct list_head *) NULL; \ + } \ +} + +/* + * bfa_add_tail_head - enqueue an element at the head of queue + */ +#define bfa_q_enq_head(_q, _qe) { \ + if (!(bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL)) \ + pr_err("Assertion failure: %s:%d: %d", \ + __FILE__, __LINE__, \ + (bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL));\ + bfa_q_next(_qe) = bfa_q_next(_q); \ + bfa_q_prev(_qe) = (struct list_head *) (_q); \ + bfa_q_prev(bfa_q_next(_q)) = (struct list_head *) (_qe); \ + bfa_q_next(_q) = (struct list_head *) (_qe); \ +} + +#endif /* __CNA_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c new file mode 100644 index 000000000..a59f221bd --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c @@ -0,0 +1,97 @@ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#include +#include "bnad.h" +#include "bfi.h" +#include "cna.h" + +const struct firmware *bfi_fw; +static u32 *bfi_image_ct_cna, *bfi_image_ct2_cna; +static u32 bfi_image_ct_cna_size, bfi_image_ct2_cna_size; + +static u32 * +cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image, + u32 *bfi_image_size, char *fw_name) +{ + const struct firmware *fw; + u32 n; + + if (reject_firmware(&fw, fw_name, &pdev->dev)) { + pr_alert("Can't locate firmware %s\n", fw_name); + goto error; + } + + *bfi_image = (u32 *)fw->data; + *bfi_image_size = fw->size/sizeof(u32); + bfi_fw = fw; + + /* Convert loaded firmware to host order as it is stored in file + * as sequence of LE32 integers. + */ + for (n = 0; n < *bfi_image_size; n++) + le32_to_cpus(*bfi_image + n); + + return *bfi_image; +error: + return NULL; +} + +u32 * +cna_get_firmware_buf(struct pci_dev *pdev) +{ + if (pdev->device == BFA_PCI_DEVICE_ID_CT2) { + if (bfi_image_ct2_cna_size == 0) + cna_read_firmware(pdev, &bfi_image_ct2_cna, + &bfi_image_ct2_cna_size, CNA_FW_FILE_CT2); + return bfi_image_ct2_cna; + } else if (bfa_asic_id_ct(pdev->device)) { + if (bfi_image_ct_cna_size == 0) + cna_read_firmware(pdev, &bfi_image_ct_cna, + &bfi_image_ct_cna_size, CNA_FW_FILE_CT); + return bfi_image_ct_cna; + } + + return NULL; +} + +u32 * +bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off) +{ + switch (asic_gen) { + case BFI_ASIC_GEN_CT: + return (bfi_image_ct_cna + off); + case BFI_ASIC_GEN_CT2: + return (bfi_image_ct2_cna + off); + default: + return NULL; + } +} + +u32 +bfa_cb_image_get_size(enum bfi_asic_gen asic_gen) +{ + switch (asic_gen) { + case BFI_ASIC_GEN_CT: + return bfi_image_ct_cna_size; + case BFI_ASIC_GEN_CT2: + return bfi_image_ct2_cna_size; + default: + return 0; + } +} diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig new file mode 100644 index 000000000..1ba3e3a67 --- /dev/null +++ b/drivers/net/ethernet/cadence/Kconfig @@ -0,0 +1,36 @@ +# +# Atmel device configuration +# + +config NET_CADENCE + bool "Cadence devices" + depends on HAS_IOMEM + default y + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y. + Make sure you know the name of your card. Read the Ethernet-HOWTO, + available from . + + If unsure, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the remaining Atmel network card questions. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_CADENCE + +config MACB + tristate "Cadence MACB/GEM support" + depends on HAS_DMA + select PHYLIB + ---help--- + The Cadence MACB ethernet interface is found on many Atmel AT32 and + AT91 parts. This driver also supports the Cadence GEM (Gigabit + Ethernet MAC found in some ARM SoC devices). Say Y to include + support for the MACB/GEM chip. + + To compile this driver as a module, choose M here: the module + will be called macb. + +endif # NET_CADENCE diff --git a/drivers/net/ethernet/cadence/Makefile b/drivers/net/ethernet/cadence/Makefile new file mode 100644 index 000000000..91f79b1f0 --- /dev/null +++ b/drivers/net/ethernet/cadence/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Atmel network device drivers. +# + +obj-$(CONFIG_MACB) += macb.o diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c new file mode 100644 index 000000000..fc646a41d --- /dev/null +++ b/drivers/net/ethernet/cadence/macb.c @@ -0,0 +1,2935 @@ +/* + * Cadence MACB/GEM Ethernet Controller driver + * + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "macb.h" + +#define MACB_RX_BUFFER_SIZE 128 +#define RX_BUFFER_MULTIPLE 64 /* bytes */ +#define RX_RING_SIZE 512 /* must be power of 2 */ +#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) + +#define TX_RING_SIZE 128 /* must be power of 2 */ +#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) + +/* level of occupied TX descriptors under which we wake up TX process */ +#define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) + +#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ + | MACB_BIT(ISR_ROVR)) +#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ + | MACB_BIT(ISR_RLE) \ + | MACB_BIT(TXERR)) +#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) + +#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1)) +#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1)) + +/* + * Graceful stop timeouts in us. We should allow up to + * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) + */ +#define MACB_HALT_TIMEOUT 1230 + +/* Ring buffer accessors */ +static unsigned int macb_tx_ring_wrap(unsigned int index) +{ + return index & (TX_RING_SIZE - 1); +} + +static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, + unsigned int index) +{ + return &queue->tx_ring[macb_tx_ring_wrap(index)]; +} + +static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, + unsigned int index) +{ + return &queue->tx_skb[macb_tx_ring_wrap(index)]; +} + +static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) +{ + dma_addr_t offset; + + offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc); + + return queue->tx_ring_dma + offset; +} + +static unsigned int macb_rx_ring_wrap(unsigned int index) +{ + return index & (RX_RING_SIZE - 1); +} + +static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) +{ + return &bp->rx_ring[macb_rx_ring_wrap(index)]; +} + +static void *macb_rx_buffer(struct macb *bp, unsigned int index) +{ + return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); +} + +static void macb_set_hwaddr(struct macb *bp) +{ + u32 bottom; + u16 top; + + bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); + macb_or_gem_writel(bp, SA1B, bottom); + top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); + macb_or_gem_writel(bp, SA1T, top); + + /* Clear unused address register sets */ + macb_or_gem_writel(bp, SA2B, 0); + macb_or_gem_writel(bp, SA2T, 0); + macb_or_gem_writel(bp, SA3B, 0); + macb_or_gem_writel(bp, SA3T, 0); + macb_or_gem_writel(bp, SA4B, 0); + macb_or_gem_writel(bp, SA4T, 0); +} + +static void macb_get_hwaddr(struct macb *bp) +{ + struct macb_platform_data *pdata; + u32 bottom; + u16 top; + u8 addr[6]; + int i; + + pdata = dev_get_platdata(&bp->pdev->dev); + + /* Check all 4 address register for vaild address */ + for (i = 0; i < 4; i++) { + bottom = macb_or_gem_readl(bp, SA1B + i * 8); + top = macb_or_gem_readl(bp, SA1T + i * 8); + + if (pdata && pdata->rev_eth_addr) { + addr[5] = bottom & 0xff; + addr[4] = (bottom >> 8) & 0xff; + addr[3] = (bottom >> 16) & 0xff; + addr[2] = (bottom >> 24) & 0xff; + addr[1] = top & 0xff; + addr[0] = (top & 0xff00) >> 8; + } else { + addr[0] = bottom & 0xff; + addr[1] = (bottom >> 8) & 0xff; + addr[2] = (bottom >> 16) & 0xff; + addr[3] = (bottom >> 24) & 0xff; + addr[4] = top & 0xff; + addr[5] = (top >> 8) & 0xff; + } + + if (is_valid_ether_addr(addr)) { + memcpy(bp->dev->dev_addr, addr, sizeof(addr)); + return; + } + } + + netdev_info(bp->dev, "invalid hw address, using random\n"); + eth_hw_addr_random(bp->dev); +} + +static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct macb *bp = bus->priv; + int value; + + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) + | MACB_BF(RW, MACB_MAN_READ) + | MACB_BF(PHYA, mii_id) + | MACB_BF(REGA, regnum) + | MACB_BF(CODE, MACB_MAN_CODE))); + + /* wait for end of transfer */ + while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) + cpu_relax(); + + value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); + + return value; +} + +static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) +{ + struct macb *bp = bus->priv; + + macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) + | MACB_BF(RW, MACB_MAN_WRITE) + | MACB_BF(PHYA, mii_id) + | MACB_BF(REGA, regnum) + | MACB_BF(CODE, MACB_MAN_CODE) + | MACB_BF(DATA, value))); + + /* wait for end of transfer */ + while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) + cpu_relax(); + + return 0; +} + +/** + * macb_set_tx_clk() - Set a clock to a new frequency + * @clk Pointer to the clock to change + * @rate New frequency in Hz + * @dev Pointer to the struct net_device + */ +static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) +{ + long ferr, rate, rate_rounded; + + if (!clk) + return; + + switch (speed) { + case SPEED_10: + rate = 2500000; + break; + case SPEED_100: + rate = 25000000; + break; + case SPEED_1000: + rate = 125000000; + break; + default: + return; + } + + rate_rounded = clk_round_rate(clk, rate); + if (rate_rounded < 0) + return; + + /* RGMII allows 50 ppm frequency error. Test and warn if this limit + * is not satisfied. + */ + ferr = abs(rate_rounded - rate); + ferr = DIV_ROUND_UP(ferr, rate / 100000); + if (ferr > 5) + netdev_warn(dev, "unable to generate target frequency: %ld Hz\n", + rate); + + if (clk_set_rate(clk, rate_rounded)) + netdev_err(dev, "adjusting tx_clk failed.\n"); +} + +static void macb_handle_link_change(struct net_device *dev) +{ + struct macb *bp = netdev_priv(dev); + struct phy_device *phydev = bp->phy_dev; + unsigned long flags; + + int status_change = 0; + + spin_lock_irqsave(&bp->lock, flags); + + if (phydev->link) { + if ((bp->speed != phydev->speed) || + (bp->duplex != phydev->duplex)) { + u32 reg; + + reg = macb_readl(bp, NCFGR); + reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); + if (macb_is_gem(bp)) + reg &= ~GEM_BIT(GBE); + + if (phydev->duplex) + reg |= MACB_BIT(FD); + if (phydev->speed == SPEED_100) + reg |= MACB_BIT(SPD); + if (phydev->speed == SPEED_1000 && + bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) + reg |= GEM_BIT(GBE); + + macb_or_gem_writel(bp, NCFGR, reg); + + bp->speed = phydev->speed; + bp->duplex = phydev->duplex; + status_change = 1; + } + } + + if (phydev->link != bp->link) { + if (!phydev->link) { + bp->speed = 0; + bp->duplex = -1; + } + bp->link = phydev->link; + + status_change = 1; + } + + spin_unlock_irqrestore(&bp->lock, flags); + + if (status_change) { + if (phydev->link) { + /* Update the TX clock rate if and only if the link is + * up and there has been a link change. + */ + macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); + + netif_carrier_on(dev); + netdev_info(dev, "link up (%d/%s)\n", + phydev->speed, + phydev->duplex == DUPLEX_FULL ? + "Full" : "Half"); + } else { + netif_carrier_off(dev); + netdev_info(dev, "link down\n"); + } + } +} + +/* based on au1000_eth. c*/ +static int macb_mii_probe(struct net_device *dev) +{ + struct macb *bp = netdev_priv(dev); + struct macb_platform_data *pdata; + struct phy_device *phydev; + int phy_irq; + int ret; + + phydev = phy_find_first(bp->mii_bus); + if (!phydev) { + netdev_err(dev, "no PHY found\n"); + return -ENXIO; + } + + pdata = dev_get_platdata(&bp->pdev->dev); + if (pdata && gpio_is_valid(pdata->phy_irq_pin)) { + ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int"); + if (!ret) { + phy_irq = gpio_to_irq(pdata->phy_irq_pin); + phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; + } + } + + /* attach the mac to the phy */ + ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, + bp->phy_interface); + if (ret) { + netdev_err(dev, "Could not attach to PHY\n"); + return ret; + } + + /* mask with MAC supported features */ + if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) + phydev->supported &= PHY_GBIT_FEATURES; + else + phydev->supported &= PHY_BASIC_FEATURES; + + if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF) + phydev->supported &= ~SUPPORTED_1000baseT_Half; + + phydev->advertising = phydev->supported; + + bp->link = 0; + bp->speed = 0; + bp->duplex = -1; + bp->phy_dev = phydev; + + return 0; +} + +static int macb_mii_init(struct macb *bp) +{ + struct macb_platform_data *pdata; + struct device_node *np; + int err = -ENXIO, i; + + /* Enable management port */ + macb_writel(bp, NCR, MACB_BIT(MPE)); + + bp->mii_bus = mdiobus_alloc(); + if (bp->mii_bus == NULL) { + err = -ENOMEM; + goto err_out; + } + + bp->mii_bus->name = "MACB_mii_bus"; + bp->mii_bus->read = &macb_mdio_read; + bp->mii_bus->write = &macb_mdio_write; + snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + bp->pdev->name, bp->pdev->id); + bp->mii_bus->priv = bp; + bp->mii_bus->parent = &bp->dev->dev; + pdata = dev_get_platdata(&bp->pdev->dev); + + bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); + if (!bp->mii_bus->irq) { + err = -ENOMEM; + goto err_out_free_mdiobus; + } + + dev_set_drvdata(&bp->dev->dev, bp->mii_bus); + + np = bp->pdev->dev.of_node; + if (np) { + /* try dt phy registration */ + err = of_mdiobus_register(bp->mii_bus, np); + + /* fallback to standard phy registration if no phy were + found during dt phy registration */ + if (!err && !phy_find_first(bp->mii_bus)) { + for (i = 0; i < PHY_MAX_ADDR; i++) { + struct phy_device *phydev; + + phydev = mdiobus_scan(bp->mii_bus, i); + if (IS_ERR(phydev)) { + err = PTR_ERR(phydev); + break; + } + } + + if (err) + goto err_out_unregister_bus; + } + } else { + for (i = 0; i < PHY_MAX_ADDR; i++) + bp->mii_bus->irq[i] = PHY_POLL; + + if (pdata) + bp->mii_bus->phy_mask = pdata->phy_mask; + + err = mdiobus_register(bp->mii_bus); + } + + if (err) + goto err_out_free_mdio_irq; + + err = macb_mii_probe(bp->dev); + if (err) + goto err_out_unregister_bus; + + return 0; + +err_out_unregister_bus: + mdiobus_unregister(bp->mii_bus); +err_out_free_mdio_irq: + kfree(bp->mii_bus->irq); +err_out_free_mdiobus: + mdiobus_free(bp->mii_bus); +err_out: + return err; +} + +static void macb_update_stats(struct macb *bp) +{ + u32 __iomem *reg = bp->regs + MACB_PFR; + u32 *p = &bp->hw_stats.macb.rx_pause_frames; + u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; + + WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); + + for(; p < end; p++, reg++) + *p += readl_relaxed(reg); +} + +static int macb_halt_tx(struct macb *bp) +{ + unsigned long halt_time, timeout; + u32 status; + + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); + + timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); + do { + halt_time = jiffies; + status = macb_readl(bp, TSR); + if (!(status & MACB_BIT(TGO))) + return 0; + + usleep_range(10, 250); + } while (time_before(halt_time, timeout)); + + return -ETIMEDOUT; +} + +static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) +{ + if (tx_skb->mapping) { + if (tx_skb->mapped_as_page) + dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, + tx_skb->size, DMA_TO_DEVICE); + else + dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, + tx_skb->size, DMA_TO_DEVICE); + tx_skb->mapping = 0; + } + + if (tx_skb->skb) { + dev_kfree_skb_any(tx_skb->skb); + tx_skb->skb = NULL; + } +} + +static void macb_tx_error_task(struct work_struct *work) +{ + struct macb_queue *queue = container_of(work, struct macb_queue, + tx_error_task); + struct macb *bp = queue->bp; + struct macb_tx_skb *tx_skb; + struct macb_dma_desc *desc; + struct sk_buff *skb; + unsigned int tail; + unsigned long flags; + + netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", + (unsigned int)(queue - bp->queues), + queue->tx_tail, queue->tx_head); + + /* Prevent the queue IRQ handlers from running: each of them may call + * macb_tx_interrupt(), which in turn may call netif_wake_subqueue(). + * As explained below, we have to halt the transmission before updating + * TBQP registers so we call netif_tx_stop_all_queues() to notify the + * network engine about the macb/gem being halted. + */ + spin_lock_irqsave(&bp->lock, flags); + + /* Make sure nobody is trying to queue up new packets */ + netif_tx_stop_all_queues(bp->dev); + + /* + * Stop transmission now + * (in case we have just queued new packets) + * macb/gem must be halted to write TBQP register + */ + if (macb_halt_tx(bp)) + /* Just complain for now, reinitializing TX path can be good */ + netdev_err(bp->dev, "BUG: halt tx timed out\n"); + + /* + * Treat frames in TX queue including the ones that caused the error. + * Free transmit buffers in upper layer. + */ + for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { + u32 ctrl; + + desc = macb_tx_desc(queue, tail); + ctrl = desc->ctrl; + tx_skb = macb_tx_skb(queue, tail); + skb = tx_skb->skb; + + if (ctrl & MACB_BIT(TX_USED)) { + /* skb is set for the last buffer of the frame */ + while (!skb) { + macb_tx_unmap(bp, tx_skb); + tail++; + tx_skb = macb_tx_skb(queue, tail); + skb = tx_skb->skb; + } + + /* ctrl still refers to the first buffer descriptor + * since it's the only one written back by the hardware + */ + if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) { + netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", + macb_tx_ring_wrap(tail), skb->data); + bp->stats.tx_packets++; + bp->stats.tx_bytes += skb->len; + } + } else { + /* + * "Buffers exhausted mid-frame" errors may only happen + * if the driver is buggy, so complain loudly about those. + * Statistics are updated by hardware. + */ + if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) + netdev_err(bp->dev, + "BUG: TX buffers exhausted mid-frame\n"); + + desc->ctrl = ctrl | MACB_BIT(TX_USED); + } + + macb_tx_unmap(bp, tx_skb); + } + + /* Set end of TX queue */ + desc = macb_tx_desc(queue, 0); + desc->addr = 0; + desc->ctrl = MACB_BIT(TX_USED); + + /* Make descriptor updates visible to hardware */ + wmb(); + + /* Reinitialize the TX desc queue */ + queue_writel(queue, TBQP, queue->tx_ring_dma); + /* Make TX ring reflect state of hardware */ + queue->tx_head = 0; + queue->tx_tail = 0; + + /* Housework before enabling TX IRQ */ + macb_writel(bp, TSR, macb_readl(bp, TSR)); + queue_writel(queue, IER, MACB_TX_INT_FLAGS); + + /* Now we are ready to start transmission again */ + netif_tx_start_all_queues(bp->dev); + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); + + spin_unlock_irqrestore(&bp->lock, flags); +} + +static void macb_tx_interrupt(struct macb_queue *queue) +{ + unsigned int tail; + unsigned int head; + u32 status; + struct macb *bp = queue->bp; + u16 queue_index = queue - bp->queues; + + status = macb_readl(bp, TSR); + macb_writel(bp, TSR, status); + + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, MACB_BIT(TCOMP)); + + netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", + (unsigned long)status); + + head = queue->tx_head; + for (tail = queue->tx_tail; tail != head; tail++) { + struct macb_tx_skb *tx_skb; + struct sk_buff *skb; + struct macb_dma_desc *desc; + u32 ctrl; + + desc = macb_tx_desc(queue, tail); + + /* Make hw descriptor updates visible to CPU */ + rmb(); + + ctrl = desc->ctrl; + + /* TX_USED bit is only set by hardware on the very first buffer + * descriptor of the transmitted frame. + */ + if (!(ctrl & MACB_BIT(TX_USED))) + break; + + /* Process all buffers of the current transmitted frame */ + for (;; tail++) { + tx_skb = macb_tx_skb(queue, tail); + skb = tx_skb->skb; + + /* First, update TX stats if needed */ + if (skb) { + netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", + macb_tx_ring_wrap(tail), skb->data); + bp->stats.tx_packets++; + bp->stats.tx_bytes += skb->len; + } + + /* Now we can safely release resources */ + macb_tx_unmap(bp, tx_skb); + + /* skb is set only for the last buffer of the frame. + * WARNING: at this point skb has been freed by + * macb_tx_unmap(). + */ + if (skb) + break; + } + } + + queue->tx_tail = tail; + if (__netif_subqueue_stopped(bp->dev, queue_index) && + CIRC_CNT(queue->tx_head, queue->tx_tail, + TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH) + netif_wake_subqueue(bp->dev, queue_index); +} + +static void gem_rx_refill(struct macb *bp) +{ + unsigned int entry; + struct sk_buff *skb; + dma_addr_t paddr; + + while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { + entry = macb_rx_ring_wrap(bp->rx_prepared_head); + + /* Make hw descriptor updates visible to CPU */ + rmb(); + + bp->rx_prepared_head++; + + if (bp->rx_skbuff[entry] == NULL) { + /* allocate sk_buff for this free entry in ring */ + skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); + if (unlikely(skb == NULL)) { + netdev_err(bp->dev, + "Unable to allocate sk_buff\n"); + break; + } + + /* now fill corresponding descriptor entry */ + paddr = dma_map_single(&bp->pdev->dev, skb->data, + bp->rx_buffer_size, DMA_FROM_DEVICE); + if (dma_mapping_error(&bp->pdev->dev, paddr)) { + dev_kfree_skb(skb); + break; + } + + bp->rx_skbuff[entry] = skb; + + if (entry == RX_RING_SIZE - 1) + paddr |= MACB_BIT(RX_WRAP); + bp->rx_ring[entry].addr = paddr; + bp->rx_ring[entry].ctrl = 0; + + /* properly align Ethernet header */ + skb_reserve(skb, NET_IP_ALIGN); + } else { + bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED); + bp->rx_ring[entry].ctrl = 0; + } + } + + /* Make descriptor updates visible to hardware */ + wmb(); + + netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n", + bp->rx_prepared_head, bp->rx_tail); +} + +/* Mark DMA descriptors from begin up to and not including end as unused */ +static void discard_partial_frame(struct macb *bp, unsigned int begin, + unsigned int end) +{ + unsigned int frag; + + for (frag = begin; frag != end; frag++) { + struct macb_dma_desc *desc = macb_rx_desc(bp, frag); + desc->addr &= ~MACB_BIT(RX_USED); + } + + /* Make descriptor updates visible to hardware */ + wmb(); + + /* + * When this happens, the hardware stats registers for + * whatever caused this is updated, so we don't have to record + * anything. + */ +} + +static int gem_rx(struct macb *bp, int budget) +{ + unsigned int len; + unsigned int entry; + struct sk_buff *skb; + struct macb_dma_desc *desc; + int count = 0; + + while (count < budget) { + u32 addr, ctrl; + + entry = macb_rx_ring_wrap(bp->rx_tail); + desc = &bp->rx_ring[entry]; + + /* Make hw descriptor updates visible to CPU */ + rmb(); + + addr = desc->addr; + ctrl = desc->ctrl; + + if (!(addr & MACB_BIT(RX_USED))) + break; + + bp->rx_tail++; + count++; + + if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { + netdev_err(bp->dev, + "not whole frame pointed by descriptor\n"); + bp->stats.rx_dropped++; + break; + } + skb = bp->rx_skbuff[entry]; + if (unlikely(!skb)) { + netdev_err(bp->dev, + "inconsistent Rx descriptor chain\n"); + bp->stats.rx_dropped++; + break; + } + /* now everything is ready for receiving packet */ + bp->rx_skbuff[entry] = NULL; + len = MACB_BFEXT(RX_FRMLEN, ctrl); + + netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); + + skb_put(skb, len); + addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr)); + dma_unmap_single(&bp->pdev->dev, addr, + bp->rx_buffer_size, DMA_FROM_DEVICE); + + skb->protocol = eth_type_trans(skb, bp->dev); + skb_checksum_none_assert(skb); + if (bp->dev->features & NETIF_F_RXCSUM && + !(bp->dev->flags & IFF_PROMISC) && + GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + bp->stats.rx_packets++; + bp->stats.rx_bytes += skb->len; + +#if defined(DEBUG) && defined(VERBOSE_DEBUG) + netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", + skb->len, skb->csum); + print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, + skb_mac_header(skb), 16, true); + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1, + skb->data, 32, true); +#endif + + netif_receive_skb(skb); + } + + gem_rx_refill(bp); + + return count; +} + +static int macb_rx_frame(struct macb *bp, unsigned int first_frag, + unsigned int last_frag) +{ + unsigned int len; + unsigned int frag; + unsigned int offset; + struct sk_buff *skb; + struct macb_dma_desc *desc; + + desc = macb_rx_desc(bp, last_frag); + len = MACB_BFEXT(RX_FRMLEN, desc->ctrl); + + netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", + macb_rx_ring_wrap(first_frag), + macb_rx_ring_wrap(last_frag), len); + + /* + * The ethernet header starts NET_IP_ALIGN bytes into the + * first buffer. Since the header is 14 bytes, this makes the + * payload word-aligned. + * + * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy + * the two padding bytes into the skb so that we avoid hitting + * the slowpath in memcpy(), and pull them off afterwards. + */ + skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); + if (!skb) { + bp->stats.rx_dropped++; + for (frag = first_frag; ; frag++) { + desc = macb_rx_desc(bp, frag); + desc->addr &= ~MACB_BIT(RX_USED); + if (frag == last_frag) + break; + } + + /* Make descriptor updates visible to hardware */ + wmb(); + + return 1; + } + + offset = 0; + len += NET_IP_ALIGN; + skb_checksum_none_assert(skb); + skb_put(skb, len); + + for (frag = first_frag; ; frag++) { + unsigned int frag_len = bp->rx_buffer_size; + + if (offset + frag_len > len) { + BUG_ON(frag != last_frag); + frag_len = len - offset; + } + skb_copy_to_linear_data_offset(skb, offset, + macb_rx_buffer(bp, frag), frag_len); + offset += bp->rx_buffer_size; + desc = macb_rx_desc(bp, frag); + desc->addr &= ~MACB_BIT(RX_USED); + + if (frag == last_frag) + break; + } + + /* Make descriptor updates visible to hardware */ + wmb(); + + __skb_pull(skb, NET_IP_ALIGN); + skb->protocol = eth_type_trans(skb, bp->dev); + + bp->stats.rx_packets++; + bp->stats.rx_bytes += skb->len; + netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", + skb->len, skb->csum); + netif_receive_skb(skb); + + return 0; +} + +static int macb_rx(struct macb *bp, int budget) +{ + int received = 0; + unsigned int tail; + int first_frag = -1; + + for (tail = bp->rx_tail; budget > 0; tail++) { + struct macb_dma_desc *desc = macb_rx_desc(bp, tail); + u32 addr, ctrl; + + /* Make hw descriptor updates visible to CPU */ + rmb(); + + addr = desc->addr; + ctrl = desc->ctrl; + + if (!(addr & MACB_BIT(RX_USED))) + break; + + if (ctrl & MACB_BIT(RX_SOF)) { + if (first_frag != -1) + discard_partial_frame(bp, first_frag, tail); + first_frag = tail; + } + + if (ctrl & MACB_BIT(RX_EOF)) { + int dropped; + BUG_ON(first_frag == -1); + + dropped = macb_rx_frame(bp, first_frag, tail); + first_frag = -1; + if (!dropped) { + received++; + budget--; + } + } + } + + if (first_frag != -1) + bp->rx_tail = first_frag; + else + bp->rx_tail = tail; + + return received; +} + +static int macb_poll(struct napi_struct *napi, int budget) +{ + struct macb *bp = container_of(napi, struct macb, napi); + int work_done; + u32 status; + + status = macb_readl(bp, RSR); + macb_writel(bp, RSR, status); + + work_done = 0; + + netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", + (unsigned long)status, budget); + + work_done = bp->macbgem_ops.mog_rx(bp, budget); + if (work_done < budget) { + napi_complete(napi); + + /* Packets received while interrupts were disabled */ + status = macb_readl(bp, RSR); + if (status) { + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + macb_writel(bp, ISR, MACB_BIT(RCOMP)); + napi_reschedule(napi); + } else { + macb_writel(bp, IER, MACB_RX_INT_FLAGS); + } + } + + /* TODO: Handle errors */ + + return work_done; +} + +static irqreturn_t macb_interrupt(int irq, void *dev_id) +{ + struct macb_queue *queue = dev_id; + struct macb *bp = queue->bp; + struct net_device *dev = bp->dev; + u32 status, ctrl; + + status = queue_readl(queue, ISR); + + if (unlikely(!status)) + return IRQ_NONE; + + spin_lock(&bp->lock); + + while (status) { + /* close possible race with dev_close */ + if (unlikely(!netif_running(dev))) { + queue_writel(queue, IDR, -1); + break; + } + + netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", + (unsigned int)(queue - bp->queues), + (unsigned long)status); + + if (status & MACB_RX_INT_FLAGS) { + /* + * There's no point taking any more interrupts + * until we have processed the buffers. The + * scheduling call may fail if the poll routine + * is already scheduled, so disable interrupts + * now. + */ + queue_writel(queue, IDR, MACB_RX_INT_FLAGS); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, MACB_BIT(RCOMP)); + + if (napi_schedule_prep(&bp->napi)) { + netdev_vdbg(bp->dev, "scheduling RX softirq\n"); + __napi_schedule(&bp->napi); + } + } + + if (unlikely(status & (MACB_TX_ERR_FLAGS))) { + queue_writel(queue, IDR, MACB_TX_INT_FLAGS); + schedule_work(&queue->tx_error_task); + + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, MACB_TX_ERR_FLAGS); + + break; + } + + if (status & MACB_BIT(TCOMP)) + macb_tx_interrupt(queue); + + /* + * Link change detection isn't possible with RMII, so we'll + * add that if/when we get our hands on a full-blown MII PHY. + */ + + /* There is a hardware issue under heavy load where DMA can + * stop, this causes endless "used buffer descriptor read" + * interrupts but it can be cleared by re-enabling RX. See + * the at91 manual, section 41.3.1 or the Zynq manual + * section 16.7.4 for details. + */ + if (status & MACB_BIT(RXUBR)) { + ctrl = macb_readl(bp, NCR); + macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); + macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); + + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + macb_writel(bp, ISR, MACB_BIT(RXUBR)); + } + + if (status & MACB_BIT(ISR_ROVR)) { + /* We missed at least one packet */ + if (macb_is_gem(bp)) + bp->hw_stats.gem.rx_overruns++; + else + bp->hw_stats.macb.rx_overruns++; + + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, MACB_BIT(ISR_ROVR)); + } + + if (status & MACB_BIT(HRESP)) { + /* + * TODO: Reset the hardware, and maybe move the + * netdev_err to a lower-priority context as well + * (work queue?) + */ + netdev_err(dev, "DMA bus error: HRESP not OK\n"); + + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, MACB_BIT(HRESP)); + } + + status = queue_readl(queue, ISR); + } + + spin_unlock(&bp->lock); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling receive - used by netconsole and other diagnostic tools + * to allow network i/o with interrupts disabled. + */ +static void macb_poll_controller(struct net_device *dev) +{ + struct macb *bp = netdev_priv(dev); + struct macb_queue *queue; + unsigned long flags; + unsigned int q; + + local_irq_save(flags); + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) + macb_interrupt(dev->irq, queue); + local_irq_restore(flags); +} +#endif + +static inline unsigned int macb_count_tx_descriptors(struct macb *bp, + unsigned int len) +{ + return (len + bp->max_tx_length - 1) / bp->max_tx_length; +} + +static unsigned int macb_tx_map(struct macb *bp, + struct macb_queue *queue, + struct sk_buff *skb) +{ + dma_addr_t mapping; + unsigned int len, entry, i, tx_head = queue->tx_head; + struct macb_tx_skb *tx_skb = NULL; + struct macb_dma_desc *desc; + unsigned int offset, size, count = 0; + unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; + unsigned int eof = 1; + u32 ctrl; + + /* First, map non-paged data */ + len = skb_headlen(skb); + offset = 0; + while (len) { + size = min(len, bp->max_tx_length); + entry = macb_tx_ring_wrap(tx_head); + tx_skb = &queue->tx_skb[entry]; + + mapping = dma_map_single(&bp->pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(&bp->pdev->dev, mapping)) + goto dma_error; + + /* Save info to properly release resources */ + tx_skb->skb = NULL; + tx_skb->mapping = mapping; + tx_skb->size = size; + tx_skb->mapped_as_page = false; + + len -= size; + offset += size; + count++; + tx_head++; + } + + /* Then, map paged data from fragments */ + for (f = 0; f < nr_frags; f++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + len = skb_frag_size(frag); + offset = 0; + while (len) { + size = min(len, bp->max_tx_length); + entry = macb_tx_ring_wrap(tx_head); + tx_skb = &queue->tx_skb[entry]; + + mapping = skb_frag_dma_map(&bp->pdev->dev, frag, + offset, size, DMA_TO_DEVICE); + if (dma_mapping_error(&bp->pdev->dev, mapping)) + goto dma_error; + + /* Save info to properly release resources */ + tx_skb->skb = NULL; + tx_skb->mapping = mapping; + tx_skb->size = size; + tx_skb->mapped_as_page = true; + + len -= size; + offset += size; + count++; + tx_head++; + } + } + + /* Should never happen */ + if (unlikely(tx_skb == NULL)) { + netdev_err(bp->dev, "BUG! empty skb!\n"); + return 0; + } + + /* This is the last buffer of the frame: save socket buffer */ + tx_skb->skb = skb; + + /* Update TX ring: update buffer descriptors in reverse order + * to avoid race condition + */ + + /* Set 'TX_USED' bit in buffer descriptor at tx_head position + * to set the end of TX queue + */ + i = tx_head; + entry = macb_tx_ring_wrap(i); + ctrl = MACB_BIT(TX_USED); + desc = &queue->tx_ring[entry]; + desc->ctrl = ctrl; + + do { + i--; + entry = macb_tx_ring_wrap(i); + tx_skb = &queue->tx_skb[entry]; + desc = &queue->tx_ring[entry]; + + ctrl = (u32)tx_skb->size; + if (eof) { + ctrl |= MACB_BIT(TX_LAST); + eof = 0; + } + if (unlikely(entry == (TX_RING_SIZE - 1))) + ctrl |= MACB_BIT(TX_WRAP); + + /* Set TX buffer descriptor */ + desc->addr = tx_skb->mapping; + /* desc->addr must be visible to hardware before clearing + * 'TX_USED' bit in desc->ctrl. + */ + wmb(); + desc->ctrl = ctrl; + } while (i != queue->tx_head); + + queue->tx_head = tx_head; + + return count; + +dma_error: + netdev_err(bp->dev, "TX DMA map failed\n"); + + for (i = queue->tx_head; i != tx_head; i++) { + tx_skb = macb_tx_skb(queue, i); + + macb_tx_unmap(bp, tx_skb); + } + + return 0; +} + +static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + u16 queue_index = skb_get_queue_mapping(skb); + struct macb *bp = netdev_priv(dev); + struct macb_queue *queue = &bp->queues[queue_index]; + unsigned long flags; + unsigned int count, nr_frags, frag_size, f; + +#if defined(DEBUG) && defined(VERBOSE_DEBUG) + netdev_vdbg(bp->dev, + "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", + queue_index, skb->len, skb->head, skb->data, + skb_tail_pointer(skb), skb_end_pointer(skb)); + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, 16, true); +#endif + + /* Count how many TX buffer descriptors are needed to send this + * socket buffer: skb fragments of jumbo frames may need to be + * splitted into many buffer descriptors. + */ + count = macb_count_tx_descriptors(bp, skb_headlen(skb)); + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) { + frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); + count += macb_count_tx_descriptors(bp, frag_size); + } + + spin_lock_irqsave(&bp->lock, flags); + + /* This is a hard error, log it. */ + if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) { + netif_stop_subqueue(dev, queue_index); + spin_unlock_irqrestore(&bp->lock, flags); + netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", + queue->tx_head, queue->tx_tail); + return NETDEV_TX_BUSY; + } + + /* Map socket buffer for DMA transfer */ + if (!macb_tx_map(bp, queue, skb)) { + dev_kfree_skb_any(skb); + goto unlock; + } + + /* Make newly initialized descriptor visible to hardware */ + wmb(); + + skb_tx_timestamp(skb); + + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); + + if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1) + netif_stop_subqueue(dev, queue_index); + +unlock: + spin_unlock_irqrestore(&bp->lock, flags); + + return NETDEV_TX_OK; +} + +static void macb_init_rx_buffer_size(struct macb *bp, size_t size) +{ + if (!macb_is_gem(bp)) { + bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; + } else { + bp->rx_buffer_size = size; + + if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { + netdev_dbg(bp->dev, + "RX buffer must be multiple of %d bytes, expanding\n", + RX_BUFFER_MULTIPLE); + bp->rx_buffer_size = + roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); + } + } + + netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n", + bp->dev->mtu, bp->rx_buffer_size); +} + +static void gem_free_rx_buffers(struct macb *bp) +{ + struct sk_buff *skb; + struct macb_dma_desc *desc; + dma_addr_t addr; + int i; + + if (!bp->rx_skbuff) + return; + + for (i = 0; i < RX_RING_SIZE; i++) { + skb = bp->rx_skbuff[i]; + + if (skb == NULL) + continue; + + desc = &bp->rx_ring[i]; + addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); + dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + skb = NULL; + } + + kfree(bp->rx_skbuff); + bp->rx_skbuff = NULL; +} + +static void macb_free_rx_buffers(struct macb *bp) +{ + if (bp->rx_buffers) { + dma_free_coherent(&bp->pdev->dev, + RX_RING_SIZE * bp->rx_buffer_size, + bp->rx_buffers, bp->rx_buffers_dma); + bp->rx_buffers = NULL; + } +} + +static void macb_free_consistent(struct macb *bp) +{ + struct macb_queue *queue; + unsigned int q; + + bp->macbgem_ops.mog_free_rx_buffers(bp); + if (bp->rx_ring) { + dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, + bp->rx_ring, bp->rx_ring_dma); + bp->rx_ring = NULL; + } + + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { + kfree(queue->tx_skb); + queue->tx_skb = NULL; + if (queue->tx_ring) { + dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, + queue->tx_ring, queue->tx_ring_dma); + queue->tx_ring = NULL; + } + } +} + +static int gem_alloc_rx_buffers(struct macb *bp) +{ + int size; + + size = RX_RING_SIZE * sizeof(struct sk_buff *); + bp->rx_skbuff = kzalloc(size, GFP_KERNEL); + if (!bp->rx_skbuff) + return -ENOMEM; + else + netdev_dbg(bp->dev, + "Allocated %d RX struct sk_buff entries at %p\n", + RX_RING_SIZE, bp->rx_skbuff); + return 0; +} + +static int macb_alloc_rx_buffers(struct macb *bp) +{ + int size; + + size = RX_RING_SIZE * bp->rx_buffer_size; + bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, + &bp->rx_buffers_dma, GFP_KERNEL); + if (!bp->rx_buffers) + return -ENOMEM; + else + netdev_dbg(bp->dev, + "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", + size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); + return 0; +} + +static int macb_alloc_consistent(struct macb *bp) +{ + struct macb_queue *queue; + unsigned int q; + int size; + + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { + size = TX_RING_BYTES; + queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, + &queue->tx_ring_dma, + GFP_KERNEL); + if (!queue->tx_ring) + goto out_err; + netdev_dbg(bp->dev, + "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n", + q, size, (unsigned long)queue->tx_ring_dma, + queue->tx_ring); + + size = TX_RING_SIZE * sizeof(struct macb_tx_skb); + queue->tx_skb = kmalloc(size, GFP_KERNEL); + if (!queue->tx_skb) + goto out_err; + } + + size = RX_RING_BYTES; + bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, + &bp->rx_ring_dma, GFP_KERNEL); + if (!bp->rx_ring) + goto out_err; + netdev_dbg(bp->dev, + "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", + size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); + + if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) + goto out_err; + + return 0; + +out_err: + macb_free_consistent(bp); + return -ENOMEM; +} + +static void gem_init_rings(struct macb *bp) +{ + struct macb_queue *queue; + unsigned int q; + int i; + + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { + for (i = 0; i < TX_RING_SIZE; i++) { + queue->tx_ring[i].addr = 0; + queue->tx_ring[i].ctrl = MACB_BIT(TX_USED); + } + queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); + queue->tx_head = 0; + queue->tx_tail = 0; + } + + bp->rx_tail = 0; + bp->rx_prepared_head = 0; + + gem_rx_refill(bp); +} + +static void macb_init_rings(struct macb *bp) +{ + int i; + dma_addr_t addr; + + addr = bp->rx_buffers_dma; + for (i = 0; i < RX_RING_SIZE; i++) { + bp->rx_ring[i].addr = addr; + bp->rx_ring[i].ctrl = 0; + addr += bp->rx_buffer_size; + } + bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); + + for (i = 0; i < TX_RING_SIZE; i++) { + bp->queues[0].tx_ring[i].addr = 0; + bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); + } + bp->queues[0].tx_head = 0; + bp->queues[0].tx_tail = 0; + bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); + + bp->rx_tail = 0; +} + +static void macb_reset_hw(struct macb *bp) +{ + struct macb_queue *queue; + unsigned int q; + + /* + * Disable RX and TX (XXX: Should we halt the transmission + * more gracefully?) + */ + macb_writel(bp, NCR, 0); + + /* Clear the stats registers (XXX: Update stats first?) */ + macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); + + /* Clear all status flags */ + macb_writel(bp, TSR, -1); + macb_writel(bp, RSR, -1); + + /* Disable all interrupts */ + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { + queue_writel(queue, IDR, -1); + queue_readl(queue, ISR); + } +} + +static u32 gem_mdc_clk_div(struct macb *bp) +{ + u32 config; + unsigned long pclk_hz = clk_get_rate(bp->pclk); + + if (pclk_hz <= 20000000) + config = GEM_BF(CLK, GEM_CLK_DIV8); + else if (pclk_hz <= 40000000) + config = GEM_BF(CLK, GEM_CLK_DIV16); + else if (pclk_hz <= 80000000) + config = GEM_BF(CLK, GEM_CLK_DIV32); + else if (pclk_hz <= 120000000) + config = GEM_BF(CLK, GEM_CLK_DIV48); + else if (pclk_hz <= 160000000) + config = GEM_BF(CLK, GEM_CLK_DIV64); + else + config = GEM_BF(CLK, GEM_CLK_DIV96); + + return config; +} + +static u32 macb_mdc_clk_div(struct macb *bp) +{ + u32 config; + unsigned long pclk_hz; + + if (macb_is_gem(bp)) + return gem_mdc_clk_div(bp); + + pclk_hz = clk_get_rate(bp->pclk); + if (pclk_hz <= 20000000) + config = MACB_BF(CLK, MACB_CLK_DIV8); + else if (pclk_hz <= 40000000) + config = MACB_BF(CLK, MACB_CLK_DIV16); + else if (pclk_hz <= 80000000) + config = MACB_BF(CLK, MACB_CLK_DIV32); + else + config = MACB_BF(CLK, MACB_CLK_DIV64); + + return config; +} + +/* + * Get the DMA bus width field of the network configuration register that we + * should program. We find the width from decoding the design configuration + * register to find the maximum supported data bus width. + */ +static u32 macb_dbw(struct macb *bp) +{ + if (!macb_is_gem(bp)) + return 0; + + switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { + case 4: + return GEM_BF(DBW, GEM_DBW128); + case 2: + return GEM_BF(DBW, GEM_DBW64); + case 1: + default: + return GEM_BF(DBW, GEM_DBW32); + } +} + +/* + * Configure the receive DMA engine + * - use the correct receive buffer size + * - set best burst length for DMA operations + * (if not supported by FIFO, it will fallback to default) + * - set both rx/tx packet buffers to full memory size + * These are configurable parameters for GEM. + */ +static void macb_configure_dma(struct macb *bp) +{ + u32 dmacfg; + u32 tmp, ncr; + + if (macb_is_gem(bp)) { + dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); + dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE); + if (bp->dma_burst_length) + dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); + dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); + dmacfg &= ~GEM_BIT(ENDIA_PKT); + + /* Find the CPU endianness by using the loopback bit of net_ctrl + * register. save it first. When the CPU is in big endian we + * need to program swaped mode for management descriptor access. + */ + ncr = macb_readl(bp, NCR); + __raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR); + tmp = __raw_readl(bp->regs + MACB_NCR); + + if (tmp == MACB_BIT(LLB)) + dmacfg &= ~GEM_BIT(ENDIA_DESC); + else + dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ + + /* Restore net_ctrl */ + macb_writel(bp, NCR, ncr); + + if (bp->dev->features & NETIF_F_HW_CSUM) + dmacfg |= GEM_BIT(TXCOEN); + else + dmacfg &= ~GEM_BIT(TXCOEN); + netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", + dmacfg); + gem_writel(bp, DMACFG, dmacfg); + } +} + +static void macb_init_hw(struct macb *bp) +{ + struct macb_queue *queue; + unsigned int q; + + u32 config; + + macb_reset_hw(bp); + macb_set_hwaddr(bp); + + config = macb_mdc_clk_div(bp); + config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ + config |= MACB_BIT(PAE); /* PAuse Enable */ + config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ + config |= MACB_BIT(BIG); /* Receive oversized frames */ + if (bp->dev->flags & IFF_PROMISC) + config |= MACB_BIT(CAF); /* Copy All Frames */ + else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) + config |= GEM_BIT(RXCOEN); + if (!(bp->dev->flags & IFF_BROADCAST)) + config |= MACB_BIT(NBC); /* No BroadCast */ + config |= macb_dbw(bp); + macb_writel(bp, NCFGR, config); + bp->speed = SPEED_10; + bp->duplex = DUPLEX_HALF; + + macb_configure_dma(bp); + + /* Initialize TX and RX buffers */ + macb_writel(bp, RBQP, bp->rx_ring_dma); + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { + queue_writel(queue, TBQP, queue->tx_ring_dma); + + /* Enable interrupts */ + queue_writel(queue, IER, + MACB_RX_INT_FLAGS | + MACB_TX_INT_FLAGS | + MACB_BIT(HRESP)); + } + + /* Enable TX and RX */ + macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); +} + +/* + * The hash address register is 64 bits long and takes up two + * locations in the memory map. The least significant bits are stored + * in EMAC_HSL and the most significant bits in EMAC_HSH. + * + * The unicast hash enable and the multicast hash enable bits in the + * network configuration register enable the reception of hash matched + * frames. The destination address is reduced to a 6 bit index into + * the 64 bit hash register using the following hash function. The + * hash function is an exclusive or of every sixth bit of the + * destination address. + * + * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] + * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] + * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] + * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] + * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] + * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] + * + * da[0] represents the least significant bit of the first byte + * received, that is, the multicast/unicast indicator, and da[47] + * represents the most significant bit of the last byte received. If + * the hash index, hi[n], points to a bit that is set in the hash + * register then the frame will be matched according to whether the + * frame is multicast or unicast. A multicast match will be signalled + * if the multicast hash enable bit is set, da[0] is 1 and the hash + * index points to a bit set in the hash register. A unicast match + * will be signalled if the unicast hash enable bit is set, da[0] is 0 + * and the hash index points to a bit set in the hash register. To + * receive all multicast frames, the hash register should be set with + * all ones and the multicast hash enable bit should be set in the + * network configuration register. + */ + +static inline int hash_bit_value(int bitnr, __u8 *addr) +{ + if (addr[bitnr / 8] & (1 << (bitnr % 8))) + return 1; + return 0; +} + +/* + * Return the hash index value for the specified address. + */ +static int hash_get_index(__u8 *addr) +{ + int i, j, bitval; + int hash_index = 0; + + for (j = 0; j < 6; j++) { + for (i = 0, bitval = 0; i < 8; i++) + bitval ^= hash_bit_value(i * 6 + j, addr); + + hash_index |= (bitval << j); + } + + return hash_index; +} + +/* + * Add multicast addresses to the internal multicast-hash table. + */ +static void macb_sethashtable(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + unsigned long mc_filter[2]; + unsigned int bitnr; + struct macb *bp = netdev_priv(dev); + + mc_filter[0] = mc_filter[1] = 0; + + netdev_for_each_mc_addr(ha, dev) { + bitnr = hash_get_index(ha->addr); + mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); + } + + macb_or_gem_writel(bp, HRB, mc_filter[0]); + macb_or_gem_writel(bp, HRT, mc_filter[1]); +} + +/* + * Enable/Disable promiscuous and multicast modes. + */ +static void macb_set_rx_mode(struct net_device *dev) +{ + unsigned long cfg; + struct macb *bp = netdev_priv(dev); + + cfg = macb_readl(bp, NCFGR); + + if (dev->flags & IFF_PROMISC) { + /* Enable promiscuous mode */ + cfg |= MACB_BIT(CAF); + + /* Disable RX checksum offload */ + if (macb_is_gem(bp)) + cfg &= ~GEM_BIT(RXCOEN); + } else { + /* Disable promiscuous mode */ + cfg &= ~MACB_BIT(CAF); + + /* Enable RX checksum offload only if requested */ + if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) + cfg |= GEM_BIT(RXCOEN); + } + + if (dev->flags & IFF_ALLMULTI) { + /* Enable all multicast mode */ + macb_or_gem_writel(bp, HRB, -1); + macb_or_gem_writel(bp, HRT, -1); + cfg |= MACB_BIT(NCFGR_MTI); + } else if (!netdev_mc_empty(dev)) { + /* Enable specific multicasts */ + macb_sethashtable(dev); + cfg |= MACB_BIT(NCFGR_MTI); + } else if (dev->flags & (~IFF_ALLMULTI)) { + /* Disable all multicast mode */ + macb_or_gem_writel(bp, HRB, 0); + macb_or_gem_writel(bp, HRT, 0); + cfg &= ~MACB_BIT(NCFGR_MTI); + } + + macb_writel(bp, NCFGR, cfg); +} + +static int macb_open(struct net_device *dev) +{ + struct macb *bp = netdev_priv(dev); + size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; + int err; + + netdev_dbg(bp->dev, "open\n"); + + /* carrier starts down */ + netif_carrier_off(dev); + + /* if the phy is not yet register, retry later*/ + if (!bp->phy_dev) + return -EAGAIN; + + /* RX buffers initialization */ + macb_init_rx_buffer_size(bp, bufsz); + + err = macb_alloc_consistent(bp); + if (err) { + netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", + err); + return err; + } + + napi_enable(&bp->napi); + + bp->macbgem_ops.mog_init_rings(bp); + macb_init_hw(bp); + + /* schedule a link state check */ + phy_start(bp->phy_dev); + + netif_tx_start_all_queues(dev); + + return 0; +} + +static int macb_close(struct net_device *dev) +{ + struct macb *bp = netdev_priv(dev); + unsigned long flags; + + netif_tx_stop_all_queues(dev); + napi_disable(&bp->napi); + + if (bp->phy_dev) + phy_stop(bp->phy_dev); + + spin_lock_irqsave(&bp->lock, flags); + macb_reset_hw(bp); + netif_carrier_off(dev); + spin_unlock_irqrestore(&bp->lock, flags); + + macb_free_consistent(bp); + + return 0; +} + +static void gem_update_stats(struct macb *bp) +{ + int i; + u32 *p = &bp->hw_stats.gem.tx_octets_31_0; + + for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { + u32 offset = gem_statistics[i].offset; + u64 val = readl_relaxed(bp->regs + offset); + + bp->ethtool_stats[i] += val; + *p += val; + + if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { + /* Add GEM_OCTTXH, GEM_OCTRXH */ + val = readl_relaxed(bp->regs + offset + 4); + bp->ethtool_stats[i] += ((u64)val) << 32; + *(++p) += val; + } + } +} + +static struct net_device_stats *gem_get_stats(struct macb *bp) +{ + struct gem_stats *hwstat = &bp->hw_stats.gem; + struct net_device_stats *nstat = &bp->stats; + + gem_update_stats(bp); + + nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + + hwstat->rx_alignment_errors + + hwstat->rx_resource_errors + + hwstat->rx_overruns + + hwstat->rx_oversize_frames + + hwstat->rx_jabbers + + hwstat->rx_undersized_frames + + hwstat->rx_length_field_frame_errors); + nstat->tx_errors = (hwstat->tx_late_collisions + + hwstat->tx_excessive_collisions + + hwstat->tx_underrun + + hwstat->tx_carrier_sense_errors); + nstat->multicast = hwstat->rx_multicast_frames; + nstat->collisions = (hwstat->tx_single_collision_frames + + hwstat->tx_multiple_collision_frames + + hwstat->tx_excessive_collisions); + nstat->rx_length_errors = (hwstat->rx_oversize_frames + + hwstat->rx_jabbers + + hwstat->rx_undersized_frames + + hwstat->rx_length_field_frame_errors); + nstat->rx_over_errors = hwstat->rx_resource_errors; + nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; + nstat->rx_frame_errors = hwstat->rx_alignment_errors; + nstat->rx_fifo_errors = hwstat->rx_overruns; + nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; + nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; + nstat->tx_fifo_errors = hwstat->tx_underrun; + + return nstat; +} + +static void gem_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct macb *bp; + + bp = netdev_priv(dev); + gem_update_stats(bp); + memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN); +} + +static int gem_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return GEM_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) +{ + int i; + + switch (sset) { + case ETH_SS_STATS: + for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN) + memcpy(p, gem_statistics[i].stat_string, + ETH_GSTRING_LEN); + break; + } +} + +static struct net_device_stats *macb_get_stats(struct net_device *dev) +{ + struct macb *bp = netdev_priv(dev); + struct net_device_stats *nstat = &bp->stats; + struct macb_stats *hwstat = &bp->hw_stats.macb; + + if (macb_is_gem(bp)) + return gem_get_stats(bp); + + /* read stats from hardware */ + macb_update_stats(bp); + + /* Convert HW stats into netdevice stats */ + nstat->rx_errors = (hwstat->rx_fcs_errors + + hwstat->rx_align_errors + + hwstat->rx_resource_errors + + hwstat->rx_overruns + + hwstat->rx_oversize_pkts + + hwstat->rx_jabbers + + hwstat->rx_undersize_pkts + + hwstat->rx_length_mismatch); + nstat->tx_errors = (hwstat->tx_late_cols + + hwstat->tx_excessive_cols + + hwstat->tx_underruns + + hwstat->tx_carrier_errors + + hwstat->sqe_test_errors); + nstat->collisions = (hwstat->tx_single_cols + + hwstat->tx_multiple_cols + + hwstat->tx_excessive_cols); + nstat->rx_length_errors = (hwstat->rx_oversize_pkts + + hwstat->rx_jabbers + + hwstat->rx_undersize_pkts + + hwstat->rx_length_mismatch); + nstat->rx_over_errors = hwstat->rx_resource_errors + + hwstat->rx_overruns; + nstat->rx_crc_errors = hwstat->rx_fcs_errors; + nstat->rx_frame_errors = hwstat->rx_align_errors; + nstat->rx_fifo_errors = hwstat->rx_overruns; + /* XXX: What does "missed" mean? */ + nstat->tx_aborted_errors = hwstat->tx_excessive_cols; + nstat->tx_carrier_errors = hwstat->tx_carrier_errors; + nstat->tx_fifo_errors = hwstat->tx_underruns; + /* Don't know about heartbeat or window errors... */ + + return nstat; +} + +static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct macb *bp = netdev_priv(dev); + struct phy_device *phydev = bp->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_gset(phydev, cmd); +} + +static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct macb *bp = netdev_priv(dev); + struct phy_device *phydev = bp->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_sset(phydev, cmd); +} + +static int macb_get_regs_len(struct net_device *netdev) +{ + return MACB_GREGS_NBR * sizeof(u32); +} + +static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct macb *bp = netdev_priv(dev); + unsigned int tail, head; + u32 *regs_buff = p; + + regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) + | MACB_GREGS_VERSION; + + tail = macb_tx_ring_wrap(bp->queues[0].tx_tail); + head = macb_tx_ring_wrap(bp->queues[0].tx_head); + + regs_buff[0] = macb_readl(bp, NCR); + regs_buff[1] = macb_or_gem_readl(bp, NCFGR); + regs_buff[2] = macb_readl(bp, NSR); + regs_buff[3] = macb_readl(bp, TSR); + regs_buff[4] = macb_readl(bp, RBQP); + regs_buff[5] = macb_readl(bp, TBQP); + regs_buff[6] = macb_readl(bp, RSR); + regs_buff[7] = macb_readl(bp, IMR); + + regs_buff[8] = tail; + regs_buff[9] = head; + regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); + regs_buff[11] = macb_tx_dma(&bp->queues[0], head); + + regs_buff[12] = macb_or_gem_readl(bp, USRIO); + if (macb_is_gem(bp)) { + regs_buff[13] = gem_readl(bp, DMACFG); + } +} + +static const struct ethtool_ops macb_ethtool_ops = { + .get_settings = macb_get_settings, + .set_settings = macb_set_settings, + .get_regs_len = macb_get_regs_len, + .get_regs = macb_get_regs, + .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, +}; + +static const struct ethtool_ops gem_ethtool_ops = { + .get_settings = macb_get_settings, + .set_settings = macb_set_settings, + .get_regs_len = macb_get_regs_len, + .get_regs = macb_get_regs, + .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, + .get_ethtool_stats = gem_get_ethtool_stats, + .get_strings = gem_get_ethtool_strings, + .get_sset_count = gem_get_sset_count, +}; + +static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct macb *bp = netdev_priv(dev); + struct phy_device *phydev = bp->phy_dev; + + if (!netif_running(dev)) + return -EINVAL; + + if (!phydev) + return -ENODEV; + + return phy_mii_ioctl(phydev, rq, cmd); +} + +static int macb_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct macb *bp = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + /* TX checksum offload */ + if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) { + u32 dmacfg; + + dmacfg = gem_readl(bp, DMACFG); + if (features & NETIF_F_HW_CSUM) + dmacfg |= GEM_BIT(TXCOEN); + else + dmacfg &= ~GEM_BIT(TXCOEN); + gem_writel(bp, DMACFG, dmacfg); + } + + /* RX checksum offload */ + if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) { + u32 netcfg; + + netcfg = gem_readl(bp, NCFGR); + if (features & NETIF_F_RXCSUM && + !(netdev->flags & IFF_PROMISC)) + netcfg |= GEM_BIT(RXCOEN); + else + netcfg &= ~GEM_BIT(RXCOEN); + gem_writel(bp, NCFGR, netcfg); + } + + return 0; +} + +static const struct net_device_ops macb_netdev_ops = { + .ndo_open = macb_open, + .ndo_stop = macb_close, + .ndo_start_xmit = macb_start_xmit, + .ndo_set_rx_mode = macb_set_rx_mode, + .ndo_get_stats = macb_get_stats, + .ndo_do_ioctl = macb_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = macb_poll_controller, +#endif + .ndo_set_features = macb_set_features, +}; + +/* + * Configure peripheral capabilities according to device tree + * and integration options used + */ +static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf) +{ + u32 dcfg; + + if (dt_conf) + bp->caps = dt_conf->caps; + + if (macb_is_gem_hw(bp->regs)) { + bp->caps |= MACB_CAPS_MACB_IS_GEM; + + dcfg = gem_readl(bp, DCFG1); + if (GEM_BFEXT(IRQCOR, dcfg) == 0) + bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; + dcfg = gem_readl(bp, DCFG2); + if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0) + bp->caps |= MACB_CAPS_FIFO_MODE; + } + + netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps); +} + +static void macb_probe_queues(void __iomem *mem, + unsigned int *queue_mask, + unsigned int *num_queues) +{ + unsigned int hw_q; + + *queue_mask = 0x1; + *num_queues = 1; + + /* is it macb or gem ? + * + * We need to read directly from the hardware here because + * we are early in the probe process and don't have the + * MACB_CAPS_MACB_IS_GEM flag positioned + */ + if (!macb_is_gem_hw(mem)) + return; + + /* bit 0 is never set but queue 0 always exists */ + *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff; + + *queue_mask |= 0x1; + + for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q) + if (*queue_mask & (1 << hw_q)) + (*num_queues)++; +} + +static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, + struct clk **hclk, struct clk **tx_clk) +{ + int err; + + *pclk = devm_clk_get(&pdev->dev, "pclk"); + if (IS_ERR(*pclk)) { + err = PTR_ERR(*pclk); + dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); + return err; + } + + *hclk = devm_clk_get(&pdev->dev, "hclk"); + if (IS_ERR(*hclk)) { + err = PTR_ERR(*hclk); + dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); + return err; + } + + *tx_clk = devm_clk_get(&pdev->dev, "tx_clk"); + if (IS_ERR(*tx_clk)) + *tx_clk = NULL; + + err = clk_prepare_enable(*pclk); + if (err) { + dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); + return err; + } + + err = clk_prepare_enable(*hclk); + if (err) { + dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err); + goto err_disable_pclk; + } + + err = clk_prepare_enable(*tx_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); + goto err_disable_hclk; + } + + return 0; + +err_disable_hclk: + clk_disable_unprepare(*hclk); + +err_disable_pclk: + clk_disable_unprepare(*pclk); + + return err; +} + +static int macb_init(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + unsigned int hw_q, q; + struct macb *bp = netdev_priv(dev); + struct macb_queue *queue; + int err; + u32 val; + + /* set the queue register mapping once for all: queue0 has a special + * register mapping but we don't want to test the queue index then + * compute the corresponding register offset at run time. + */ + for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { + if (!(bp->queue_mask & (1 << hw_q))) + continue; + + queue = &bp->queues[q]; + queue->bp = bp; + if (hw_q) { + queue->ISR = GEM_ISR(hw_q - 1); + queue->IER = GEM_IER(hw_q - 1); + queue->IDR = GEM_IDR(hw_q - 1); + queue->IMR = GEM_IMR(hw_q - 1); + queue->TBQP = GEM_TBQP(hw_q - 1); + } else { + /* queue0 uses legacy registers */ + queue->ISR = MACB_ISR; + queue->IER = MACB_IER; + queue->IDR = MACB_IDR; + queue->IMR = MACB_IMR; + queue->TBQP = MACB_TBQP; + } + + /* get irq: here we use the linux queue index, not the hardware + * queue index. the queue irq definitions in the device tree + * must remove the optional gaps that could exist in the + * hardware queue mask. + */ + queue->irq = platform_get_irq(pdev, q); + err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, + IRQF_SHARED, dev->name, queue); + if (err) { + dev_err(&pdev->dev, + "Unable to request IRQ %d (error %d)\n", + queue->irq, err); + return err; + } + + INIT_WORK(&queue->tx_error_task, macb_tx_error_task); + q++; + } + + dev->netdev_ops = &macb_netdev_ops; + netif_napi_add(dev, &bp->napi, macb_poll, 64); + + /* setup appropriated routines according to adapter type */ + if (macb_is_gem(bp)) { + bp->max_tx_length = GEM_MAX_TX_LEN; + bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; + bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; + bp->macbgem_ops.mog_init_rings = gem_init_rings; + bp->macbgem_ops.mog_rx = gem_rx; + dev->ethtool_ops = &gem_ethtool_ops; + } else { + bp->max_tx_length = MACB_MAX_TX_LEN; + bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; + bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; + bp->macbgem_ops.mog_init_rings = macb_init_rings; + bp->macbgem_ops.mog_rx = macb_rx; + dev->ethtool_ops = &macb_ethtool_ops; + } + + /* Set features */ + dev->hw_features = NETIF_F_SG; + /* Checksum offload is only available on gem with packet buffer */ + if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) + dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; + if (bp->caps & MACB_CAPS_SG_DISABLED) + dev->hw_features &= ~NETIF_F_SG; + dev->features = dev->hw_features; + + val = 0; + if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) + val = GEM_BIT(RGMII); + else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && + (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) + val = MACB_BIT(RMII); + else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) + val = MACB_BIT(MII); + + if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) + val |= MACB_BIT(CLKEN); + + macb_or_gem_writel(bp, USRIO, val); + + /* Set MII management clock divider */ + val = macb_mdc_clk_div(bp); + val |= macb_dbw(bp); + macb_writel(bp, NCFGR, val); + + return 0; +} + +#if defined(CONFIG_OF) +/* 1518 rounded up */ +#define AT91ETHER_MAX_RBUFF_SZ 0x600 +/* max number of receive buffers */ +#define AT91ETHER_MAX_RX_DESCR 9 + +/* Initialize and start the Receiver and Transmit subsystems */ +static int at91ether_start(struct net_device *dev) +{ + struct macb *lp = netdev_priv(dev); + dma_addr_t addr; + u32 ctl; + int i; + + lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, + (AT91ETHER_MAX_RX_DESCR * + sizeof(struct macb_dma_desc)), + &lp->rx_ring_dma, GFP_KERNEL); + if (!lp->rx_ring) + return -ENOMEM; + + lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, + AT91ETHER_MAX_RX_DESCR * + AT91ETHER_MAX_RBUFF_SZ, + &lp->rx_buffers_dma, GFP_KERNEL); + if (!lp->rx_buffers) { + dma_free_coherent(&lp->pdev->dev, + AT91ETHER_MAX_RX_DESCR * + sizeof(struct macb_dma_desc), + lp->rx_ring, lp->rx_ring_dma); + lp->rx_ring = NULL; + return -ENOMEM; + } + + addr = lp->rx_buffers_dma; + for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { + lp->rx_ring[i].addr = addr; + lp->rx_ring[i].ctrl = 0; + addr += AT91ETHER_MAX_RBUFF_SZ; + } + + /* Set the Wrap bit on the last descriptor */ + lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP); + + /* Reset buffer index */ + lp->rx_tail = 0; + + /* Program address of descriptor list in Rx Buffer Queue register */ + macb_writel(lp, RBQP, lp->rx_ring_dma); + + /* Enable Receive and Transmit */ + ctl = macb_readl(lp, NCR); + macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); + + return 0; +} + +/* Open the ethernet interface */ +static int at91ether_open(struct net_device *dev) +{ + struct macb *lp = netdev_priv(dev); + u32 ctl; + int ret; + + /* Clear internal statistics */ + ctl = macb_readl(lp, NCR); + macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT)); + + macb_set_hwaddr(lp); + + ret = at91ether_start(dev); + if (ret) + return ret; + + /* Enable MAC interrupts */ + macb_writel(lp, IER, MACB_BIT(RCOMP) | + MACB_BIT(RXUBR) | + MACB_BIT(ISR_TUND) | + MACB_BIT(ISR_RLE) | + MACB_BIT(TCOMP) | + MACB_BIT(ISR_ROVR) | + MACB_BIT(HRESP)); + + /* schedule a link state check */ + phy_start(lp->phy_dev); + + netif_start_queue(dev); + + return 0; +} + +/* Close the interface */ +static int at91ether_close(struct net_device *dev) +{ + struct macb *lp = netdev_priv(dev); + u32 ctl; + + /* Disable Receiver and Transmitter */ + ctl = macb_readl(lp, NCR); + macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); + + /* Disable MAC interrupts */ + macb_writel(lp, IDR, MACB_BIT(RCOMP) | + MACB_BIT(RXUBR) | + MACB_BIT(ISR_TUND) | + MACB_BIT(ISR_RLE) | + MACB_BIT(TCOMP) | + MACB_BIT(ISR_ROVR) | + MACB_BIT(HRESP)); + + netif_stop_queue(dev); + + dma_free_coherent(&lp->pdev->dev, + AT91ETHER_MAX_RX_DESCR * + sizeof(struct macb_dma_desc), + lp->rx_ring, lp->rx_ring_dma); + lp->rx_ring = NULL; + + dma_free_coherent(&lp->pdev->dev, + AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ, + lp->rx_buffers, lp->rx_buffers_dma); + lp->rx_buffers = NULL; + + return 0; +} + +/* Transmit packet */ +static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct macb *lp = netdev_priv(dev); + + if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { + netif_stop_queue(dev); + + /* Store packet information (to free when Tx completed) */ + lp->skb = skb; + lp->skb_length = skb->len; + lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, + DMA_TO_DEVICE); + + /* Set address of the data in the Transmit Address register */ + macb_writel(lp, TAR, lp->skb_physaddr); + /* Set length of the packet in the Transmit Control register */ + macb_writel(lp, TCR, skb->len); + + } else { + netdev_err(dev, "%s called, but device is busy!\n", __func__); + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +/* Extract received frame from buffer descriptors and sent to upper layers. + * (Called from interrupt context) + */ +static void at91ether_rx(struct net_device *dev) +{ + struct macb *lp = netdev_priv(dev); + unsigned char *p_recv; + struct sk_buff *skb; + unsigned int pktlen; + + while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) { + p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ; + pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl); + skb = netdev_alloc_skb(dev, pktlen + 2); + if (skb) { + skb_reserve(skb, 2); + memcpy(skb_put(skb, pktlen), p_recv, pktlen); + + skb->protocol = eth_type_trans(skb, dev); + lp->stats.rx_packets++; + lp->stats.rx_bytes += pktlen; + netif_rx(skb); + } else { + lp->stats.rx_dropped++; + } + + if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH)) + lp->stats.multicast++; + + /* reset ownership bit */ + lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED); + + /* wrap after last buffer */ + if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) + lp->rx_tail = 0; + else + lp->rx_tail++; + } +} + +/* MAC interrupt handler */ +static irqreturn_t at91ether_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct macb *lp = netdev_priv(dev); + u32 intstatus, ctl; + + /* MAC Interrupt Status register indicates what interrupts are pending. + * It is automatically cleared once read. + */ + intstatus = macb_readl(lp, ISR); + + /* Receive complete */ + if (intstatus & MACB_BIT(RCOMP)) + at91ether_rx(dev); + + /* Transmit complete */ + if (intstatus & MACB_BIT(TCOMP)) { + /* The TCOM bit is set even if the transmission failed */ + if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) + lp->stats.tx_errors++; + + if (lp->skb) { + dev_kfree_skb_irq(lp->skb); + lp->skb = NULL; + dma_unmap_single(NULL, lp->skb_physaddr, + lp->skb_length, DMA_TO_DEVICE); + lp->stats.tx_packets++; + lp->stats.tx_bytes += lp->skb_length; + } + netif_wake_queue(dev); + } + + /* Work-around for EMAC Errata section 41.3.1 */ + if (intstatus & MACB_BIT(RXUBR)) { + ctl = macb_readl(lp, NCR); + macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); + macb_writel(lp, NCR, ctl | MACB_BIT(RE)); + } + + if (intstatus & MACB_BIT(ISR_ROVR)) + netdev_err(dev, "ROVR error\n"); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void at91ether_poll_controller(struct net_device *dev) +{ + unsigned long flags; + + local_irq_save(flags); + at91ether_interrupt(dev->irq, dev); + local_irq_restore(flags); +} +#endif + +static const struct net_device_ops at91ether_netdev_ops = { + .ndo_open = at91ether_open, + .ndo_stop = at91ether_close, + .ndo_start_xmit = at91ether_start_xmit, + .ndo_get_stats = macb_get_stats, + .ndo_set_rx_mode = macb_set_rx_mode, + .ndo_set_mac_address = eth_mac_addr, + .ndo_do_ioctl = macb_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = at91ether_poll_controller, +#endif +}; + +static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, + struct clk **hclk, struct clk **tx_clk) +{ + int err; + + *hclk = NULL; + *tx_clk = NULL; + + *pclk = devm_clk_get(&pdev->dev, "ether_clk"); + if (IS_ERR(*pclk)) + return PTR_ERR(*pclk); + + err = clk_prepare_enable(*pclk); + if (err) { + dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); + return err; + } + + return 0; +} + +static int at91ether_init(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct macb *bp = netdev_priv(dev); + int err; + u32 reg; + + dev->netdev_ops = &at91ether_netdev_ops; + dev->ethtool_ops = &macb_ethtool_ops; + + err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, + 0, dev->name, dev); + if (err) + return err; + + macb_writel(bp, NCR, 0); + + reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG); + if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) + reg |= MACB_BIT(RM9200_RMII); + + macb_writel(bp, NCFGR, reg); + + return 0; +} + +static const struct macb_config at91sam9260_config = { + .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII, + .clk_init = macb_clk_init, + .init = macb_init, +}; + +static const struct macb_config pc302gem_config = { + .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, + .dma_burst_length = 16, + .clk_init = macb_clk_init, + .init = macb_init, +}; + +static const struct macb_config sama5d3_config = { + .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, + .dma_burst_length = 16, + .clk_init = macb_clk_init, + .init = macb_init, +}; + +static const struct macb_config sama5d4_config = { + .caps = 0, + .dma_burst_length = 4, + .clk_init = macb_clk_init, + .init = macb_init, +}; + +static const struct macb_config emac_config = { + .clk_init = at91ether_clk_init, + .init = at91ether_init, +}; + +static const struct macb_config zynq_config = { + .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE | + MACB_CAPS_NO_GIGABIT_HALF, + .dma_burst_length = 16, + .clk_init = macb_clk_init, + .init = macb_init, +}; + +static const struct of_device_id macb_dt_ids[] = { + { .compatible = "cdns,at32ap7000-macb" }, + { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, + { .compatible = "cdns,macb" }, + { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, + { .compatible = "cdns,gem", .data = &pc302gem_config }, + { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, + { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, + { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, + { .compatible = "cdns,emac", .data = &emac_config }, + { .compatible = "cdns,zynq-gem", .data = &zynq_config }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, macb_dt_ids); +#endif /* CONFIG_OF */ + +static int macb_probe(struct platform_device *pdev) +{ + int (*clk_init)(struct platform_device *, struct clk **, + struct clk **, struct clk **) + = macb_clk_init; + int (*init)(struct platform_device *) = macb_init; + struct device_node *np = pdev->dev.of_node; + const struct macb_config *macb_config = NULL; + struct clk *pclk, *hclk, *tx_clk; + unsigned int queue_mask, num_queues; + struct macb_platform_data *pdata; + struct phy_device *phydev; + struct net_device *dev; + struct resource *regs; + void __iomem *mem; + const char *mac; + struct macb *bp; + int err; + + if (np) { + const struct of_device_id *match; + + match = of_match_node(macb_dt_ids, np); + if (match && match->data) { + macb_config = match->data; + clk_init = macb_config->clk_init; + init = macb_config->init; + } + } + + err = clk_init(pdev, &pclk, &hclk, &tx_clk); + if (err) + return err; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mem = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(mem)) { + err = PTR_ERR(mem); + goto err_disable_clocks; + } + + macb_probe_queues(mem, &queue_mask, &num_queues); + dev = alloc_etherdev_mq(sizeof(*bp), num_queues); + if (!dev) { + err = -ENOMEM; + goto err_disable_clocks; + } + + dev->base_addr = regs->start; + + SET_NETDEV_DEV(dev, &pdev->dev); + + bp = netdev_priv(dev); + bp->pdev = pdev; + bp->dev = dev; + bp->regs = mem; + bp->num_queues = num_queues; + bp->queue_mask = queue_mask; + if (macb_config) + bp->dma_burst_length = macb_config->dma_burst_length; + bp->pclk = pclk; + bp->hclk = hclk; + bp->tx_clk = tx_clk; + spin_lock_init(&bp->lock); + + /* setup capabilities */ + macb_configure_caps(bp, macb_config); + + platform_set_drvdata(pdev, dev); + + dev->irq = platform_get_irq(pdev, 0); + if (dev->irq < 0) { + err = dev->irq; + goto err_disable_clocks; + } + + mac = of_get_mac_address(np); + if (mac) + memcpy(bp->dev->dev_addr, mac, ETH_ALEN); + else + macb_get_hwaddr(bp); + + err = of_get_phy_mode(np); + if (err < 0) { + pdata = dev_get_platdata(&pdev->dev); + if (pdata && pdata->is_rmii) + bp->phy_interface = PHY_INTERFACE_MODE_RMII; + else + bp->phy_interface = PHY_INTERFACE_MODE_MII; + } else { + bp->phy_interface = err; + } + + /* IP specific init */ + err = init(pdev); + if (err) + goto err_out_free_netdev; + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); + goto err_out_unregister_netdev; + } + + err = macb_mii_init(bp); + if (err) + goto err_out_unregister_netdev; + + netif_carrier_off(dev); + + netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", + macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), + dev->base_addr, dev->irq, dev->dev_addr); + + phydev = bp->phy_dev; + netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + + return 0; + +err_out_unregister_netdev: + unregister_netdev(dev); + +err_out_free_netdev: + free_netdev(dev); + +err_disable_clocks: + clk_disable_unprepare(tx_clk); + clk_disable_unprepare(hclk); + clk_disable_unprepare(pclk); + + return err; +} + +static int macb_remove(struct platform_device *pdev) +{ + struct net_device *dev; + struct macb *bp; + + dev = platform_get_drvdata(pdev); + + if (dev) { + bp = netdev_priv(dev); + if (bp->phy_dev) + phy_disconnect(bp->phy_dev); + mdiobus_unregister(bp->mii_bus); + kfree(bp->mii_bus->irq); + mdiobus_free(bp->mii_bus); + unregister_netdev(dev); + clk_disable_unprepare(bp->tx_clk); + clk_disable_unprepare(bp->hclk); + clk_disable_unprepare(bp->pclk); + free_netdev(dev); + } + + return 0; +} + +static int __maybe_unused macb_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *netdev = platform_get_drvdata(pdev); + struct macb *bp = netdev_priv(netdev); + + netif_carrier_off(netdev); + netif_device_detach(netdev); + + clk_disable_unprepare(bp->tx_clk); + clk_disable_unprepare(bp->hclk); + clk_disable_unprepare(bp->pclk); + + return 0; +} + +static int __maybe_unused macb_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *netdev = platform_get_drvdata(pdev); + struct macb *bp = netdev_priv(netdev); + + clk_prepare_enable(bp->pclk); + clk_prepare_enable(bp->hclk); + clk_prepare_enable(bp->tx_clk); + + netif_device_attach(netdev); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume); + +static struct platform_driver macb_driver = { + .probe = macb_probe, + .remove = macb_remove, + .driver = { + .name = "macb", + .of_match_table = of_match_ptr(macb_dt_ids), + .pm = &macb_pm_ops, + }, +}; + +module_platform_driver(macb_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); +MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); +MODULE_ALIAS("platform:macb"); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h new file mode 100644 index 000000000..24b1d9bcd --- /dev/null +++ b/drivers/net/ethernet/cadence/macb.h @@ -0,0 +1,842 @@ +/* + * Atmel MACB Ethernet Controller driver + * + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _MACB_H +#define _MACB_H + +#define MACB_GREGS_NBR 16 +#define MACB_GREGS_VERSION 2 +#define MACB_MAX_QUEUES 8 + +/* MACB register offsets */ +#define MACB_NCR 0x0000 /* Network Control */ +#define MACB_NCFGR 0x0004 /* Network Config */ +#define MACB_NSR 0x0008 /* Network Status */ +#define MACB_TAR 0x000c /* AT91RM9200 only */ +#define MACB_TCR 0x0010 /* AT91RM9200 only */ +#define MACB_TSR 0x0014 /* Transmit Status */ +#define MACB_RBQP 0x0018 /* RX Q Base Address */ +#define MACB_TBQP 0x001c /* TX Q Base Address */ +#define MACB_RSR 0x0020 /* Receive Status */ +#define MACB_ISR 0x0024 /* Interrupt Status */ +#define MACB_IER 0x0028 /* Interrupt Enable */ +#define MACB_IDR 0x002c /* Interrupt Disable */ +#define MACB_IMR 0x0030 /* Interrupt Mask */ +#define MACB_MAN 0x0034 /* PHY Maintenance */ +#define MACB_PTR 0x0038 +#define MACB_PFR 0x003c +#define MACB_FTO 0x0040 +#define MACB_SCF 0x0044 +#define MACB_MCF 0x0048 +#define MACB_FRO 0x004c +#define MACB_FCSE 0x0050 +#define MACB_ALE 0x0054 +#define MACB_DTF 0x0058 +#define MACB_LCOL 0x005c +#define MACB_EXCOL 0x0060 +#define MACB_TUND 0x0064 +#define MACB_CSE 0x0068 +#define MACB_RRE 0x006c +#define MACB_ROVR 0x0070 +#define MACB_RSE 0x0074 +#define MACB_ELE 0x0078 +#define MACB_RJA 0x007c +#define MACB_USF 0x0080 +#define MACB_STE 0x0084 +#define MACB_RLE 0x0088 +#define MACB_TPF 0x008c +#define MACB_HRB 0x0090 +#define MACB_HRT 0x0094 +#define MACB_SA1B 0x0098 +#define MACB_SA1T 0x009c +#define MACB_SA2B 0x00a0 +#define MACB_SA2T 0x00a4 +#define MACB_SA3B 0x00a8 +#define MACB_SA3T 0x00ac +#define MACB_SA4B 0x00b0 +#define MACB_SA4T 0x00b4 +#define MACB_TID 0x00b8 +#define MACB_TPQ 0x00bc +#define MACB_USRIO 0x00c0 +#define MACB_WOL 0x00c4 +#define MACB_MID 0x00fc + +/* GEM register offsets. */ +#define GEM_NCFGR 0x0004 /* Network Config */ +#define GEM_USRIO 0x000c /* User IO */ +#define GEM_DMACFG 0x0010 /* DMA Configuration */ +#define GEM_HRB 0x0080 /* Hash Bottom */ +#define GEM_HRT 0x0084 /* Hash Top */ +#define GEM_SA1B 0x0088 /* Specific1 Bottom */ +#define GEM_SA1T 0x008C /* Specific1 Top */ +#define GEM_SA2B 0x0090 /* Specific2 Bottom */ +#define GEM_SA2T 0x0094 /* Specific2 Top */ +#define GEM_SA3B 0x0098 /* Specific3 Bottom */ +#define GEM_SA3T 0x009C /* Specific3 Top */ +#define GEM_SA4B 0x00A0 /* Specific4 Bottom */ +#define GEM_SA4T 0x00A4 /* Specific4 Top */ +#define GEM_OTX 0x0100 /* Octets transmitted */ +#define GEM_OCTTXL 0x0100 /* Octets transmitted [31:0] */ +#define GEM_OCTTXH 0x0104 /* Octets transmitted [47:32] */ +#define GEM_TXCNT 0x0108 /* Frames Transmitted counter */ +#define GEM_TXBCCNT 0x010c /* Broadcast Frames counter */ +#define GEM_TXMCCNT 0x0110 /* Multicast Frames counter */ +#define GEM_TXPAUSECNT 0x0114 /* Pause Frames Transmitted Counter */ +#define GEM_TX64CNT 0x0118 /* 64 byte Frames TX counter */ +#define GEM_TX65CNT 0x011c /* 65-127 byte Frames TX counter */ +#define GEM_TX128CNT 0x0120 /* 128-255 byte Frames TX counter */ +#define GEM_TX256CNT 0x0124 /* 256-511 byte Frames TX counter */ +#define GEM_TX512CNT 0x0128 /* 512-1023 byte Frames TX counter */ +#define GEM_TX1024CNT 0x012c /* 1024-1518 byte Frames TX counter */ +#define GEM_TX1519CNT 0x0130 /* 1519+ byte Frames TX counter */ +#define GEM_TXURUNCNT 0x0134 /* TX under run error counter */ +#define GEM_SNGLCOLLCNT 0x0138 /* Single Collision Frame Counter */ +#define GEM_MULTICOLLCNT 0x013c /* Multiple Collision Frame Counter */ +#define GEM_EXCESSCOLLCNT 0x0140 /* Excessive Collision Frame Counter */ +#define GEM_LATECOLLCNT 0x0144 /* Late Collision Frame Counter */ +#define GEM_TXDEFERCNT 0x0148 /* Deferred Transmission Frame Counter */ +#define GEM_TXCSENSECNT 0x014c /* Carrier Sense Error Counter */ +#define GEM_ORX 0x0150 /* Octets received */ +#define GEM_OCTRXL 0x0150 /* Octets received [31:0] */ +#define GEM_OCTRXH 0x0154 /* Octets received [47:32] */ +#define GEM_RXCNT 0x0158 /* Frames Received Counter */ +#define GEM_RXBROADCNT 0x015c /* Broadcast Frames Received Counter */ +#define GEM_RXMULTICNT 0x0160 /* Multicast Frames Received Counter */ +#define GEM_RXPAUSECNT 0x0164 /* Pause Frames Received Counter */ +#define GEM_RX64CNT 0x0168 /* 64 byte Frames RX Counter */ +#define GEM_RX65CNT 0x016c /* 65-127 byte Frames RX Counter */ +#define GEM_RX128CNT 0x0170 /* 128-255 byte Frames RX Counter */ +#define GEM_RX256CNT 0x0174 /* 256-511 byte Frames RX Counter */ +#define GEM_RX512CNT 0x0178 /* 512-1023 byte Frames RX Counter */ +#define GEM_RX1024CNT 0x017c /* 1024-1518 byte Frames RX Counter */ +#define GEM_RX1519CNT 0x0180 /* 1519+ byte Frames RX Counter */ +#define GEM_RXUNDRCNT 0x0184 /* Undersize Frames Received Counter */ +#define GEM_RXOVRCNT 0x0188 /* Oversize Frames Received Counter */ +#define GEM_RXJABCNT 0x018c /* Jabbers Received Counter */ +#define GEM_RXFCSCNT 0x0190 /* Frame Check Sequence Error Counter */ +#define GEM_RXLENGTHCNT 0x0194 /* Length Field Error Counter */ +#define GEM_RXSYMBCNT 0x0198 /* Symbol Error Counter */ +#define GEM_RXALIGNCNT 0x019c /* Alignment Error Counter */ +#define GEM_RXRESERRCNT 0x01a0 /* Receive Resource Error Counter */ +#define GEM_RXORCNT 0x01a4 /* Receive Overrun Counter */ +#define GEM_RXIPCCNT 0x01a8 /* IP header Checksum Error Counter */ +#define GEM_RXTCPCCNT 0x01ac /* TCP Checksum Error Counter */ +#define GEM_RXUDPCCNT 0x01b0 /* UDP Checksum Error Counter */ +#define GEM_DCFG1 0x0280 /* Design Config 1 */ +#define GEM_DCFG2 0x0284 /* Design Config 2 */ +#define GEM_DCFG3 0x0288 /* Design Config 3 */ +#define GEM_DCFG4 0x028c /* Design Config 4 */ +#define GEM_DCFG5 0x0290 /* Design Config 5 */ +#define GEM_DCFG6 0x0294 /* Design Config 6 */ +#define GEM_DCFG7 0x0298 /* Design Config 7 */ + +#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2)) +#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2)) +#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2)) +#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2)) +#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2)) +#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2)) + +/* Bitfields in NCR */ +#define MACB_LB_OFFSET 0 /* reserved */ +#define MACB_LB_SIZE 1 +#define MACB_LLB_OFFSET 1 /* Loop back local */ +#define MACB_LLB_SIZE 1 +#define MACB_RE_OFFSET 2 /* Receive enable */ +#define MACB_RE_SIZE 1 +#define MACB_TE_OFFSET 3 /* Transmit enable */ +#define MACB_TE_SIZE 1 +#define MACB_MPE_OFFSET 4 /* Management port enable */ +#define MACB_MPE_SIZE 1 +#define MACB_CLRSTAT_OFFSET 5 /* Clear stats regs */ +#define MACB_CLRSTAT_SIZE 1 +#define MACB_INCSTAT_OFFSET 6 /* Incremental stats regs */ +#define MACB_INCSTAT_SIZE 1 +#define MACB_WESTAT_OFFSET 7 /* Write enable stats regs */ +#define MACB_WESTAT_SIZE 1 +#define MACB_BP_OFFSET 8 /* Back pressure */ +#define MACB_BP_SIZE 1 +#define MACB_TSTART_OFFSET 9 /* Start transmission */ +#define MACB_TSTART_SIZE 1 +#define MACB_THALT_OFFSET 10 /* Transmit halt */ +#define MACB_THALT_SIZE 1 +#define MACB_NCR_TPF_OFFSET 11 /* Transmit pause frame */ +#define MACB_NCR_TPF_SIZE 1 +#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */ +#define MACB_TZQ_SIZE 1 + +/* Bitfields in NCFGR */ +#define MACB_SPD_OFFSET 0 /* Speed */ +#define MACB_SPD_SIZE 1 +#define MACB_FD_OFFSET 1 /* Full duplex */ +#define MACB_FD_SIZE 1 +#define MACB_BIT_RATE_OFFSET 2 /* Discard non-VLAN frames */ +#define MACB_BIT_RATE_SIZE 1 +#define MACB_JFRAME_OFFSET 3 /* reserved */ +#define MACB_JFRAME_SIZE 1 +#define MACB_CAF_OFFSET 4 /* Copy all frames */ +#define MACB_CAF_SIZE 1 +#define MACB_NBC_OFFSET 5 /* No broadcast */ +#define MACB_NBC_SIZE 1 +#define MACB_NCFGR_MTI_OFFSET 6 /* Multicast hash enable */ +#define MACB_NCFGR_MTI_SIZE 1 +#define MACB_UNI_OFFSET 7 /* Unicast hash enable */ +#define MACB_UNI_SIZE 1 +#define MACB_BIG_OFFSET 8 /* Receive 1536 byte frames */ +#define MACB_BIG_SIZE 1 +#define MACB_EAE_OFFSET 9 /* External address match enable */ +#define MACB_EAE_SIZE 1 +#define MACB_CLK_OFFSET 10 +#define MACB_CLK_SIZE 2 +#define MACB_RTY_OFFSET 12 /* Retry test */ +#define MACB_RTY_SIZE 1 +#define MACB_PAE_OFFSET 13 /* Pause enable */ +#define MACB_PAE_SIZE 1 +#define MACB_RM9200_RMII_OFFSET 13 /* AT91RM9200 only */ +#define MACB_RM9200_RMII_SIZE 1 /* AT91RM9200 only */ +#define MACB_RBOF_OFFSET 14 /* Receive buffer offset */ +#define MACB_RBOF_SIZE 2 +#define MACB_RLCE_OFFSET 16 /* Length field error frame discard */ +#define MACB_RLCE_SIZE 1 +#define MACB_DRFCS_OFFSET 17 /* FCS remove */ +#define MACB_DRFCS_SIZE 1 +#define MACB_EFRHD_OFFSET 18 +#define MACB_EFRHD_SIZE 1 +#define MACB_IRXFCS_OFFSET 19 +#define MACB_IRXFCS_SIZE 1 + +/* GEM specific NCFGR bitfields. */ +#define GEM_GBE_OFFSET 10 /* Gigabit mode enable */ +#define GEM_GBE_SIZE 1 +#define GEM_CLK_OFFSET 18 /* MDC clock division */ +#define GEM_CLK_SIZE 3 +#define GEM_DBW_OFFSET 21 /* Data bus width */ +#define GEM_DBW_SIZE 2 +#define GEM_RXCOEN_OFFSET 24 +#define GEM_RXCOEN_SIZE 1 + +/* Constants for data bus width. */ +#define GEM_DBW32 0 /* 32 bit AMBA AHB data bus width */ +#define GEM_DBW64 1 /* 64 bit AMBA AHB data bus width */ +#define GEM_DBW128 2 /* 128 bit AMBA AHB data bus width */ + +/* Bitfields in DMACFG. */ +#define GEM_FBLDO_OFFSET 0 /* fixed burst length for DMA */ +#define GEM_FBLDO_SIZE 5 +#define GEM_ENDIA_DESC_OFFSET 6 /* endian swap mode for management descriptor access */ +#define GEM_ENDIA_DESC_SIZE 1 +#define GEM_ENDIA_PKT_OFFSET 7 /* endian swap mode for packet data access */ +#define GEM_ENDIA_PKT_SIZE 1 +#define GEM_RXBMS_OFFSET 8 /* RX packet buffer memory size select */ +#define GEM_RXBMS_SIZE 2 +#define GEM_TXPBMS_OFFSET 10 /* TX packet buffer memory size select */ +#define GEM_TXPBMS_SIZE 1 +#define GEM_TXCOEN_OFFSET 11 /* TX IP/TCP/UDP checksum gen offload */ +#define GEM_TXCOEN_SIZE 1 +#define GEM_RXBS_OFFSET 16 /* DMA receive buffer size */ +#define GEM_RXBS_SIZE 8 +#define GEM_DDRP_OFFSET 24 /* disc_when_no_ahb */ +#define GEM_DDRP_SIZE 1 + + +/* Bitfields in NSR */ +#define MACB_NSR_LINK_OFFSET 0 /* pcs_link_state */ +#define MACB_NSR_LINK_SIZE 1 +#define MACB_MDIO_OFFSET 1 /* status of the mdio_in pin */ +#define MACB_MDIO_SIZE 1 +#define MACB_IDLE_OFFSET 2 /* The PHY management logic is idle */ +#define MACB_IDLE_SIZE 1 + +/* Bitfields in TSR */ +#define MACB_UBR_OFFSET 0 /* Used bit read */ +#define MACB_UBR_SIZE 1 +#define MACB_COL_OFFSET 1 /* Collision occurred */ +#define MACB_COL_SIZE 1 +#define MACB_TSR_RLE_OFFSET 2 /* Retry limit exceeded */ +#define MACB_TSR_RLE_SIZE 1 +#define MACB_TGO_OFFSET 3 /* Transmit go */ +#define MACB_TGO_SIZE 1 +#define MACB_BEX_OFFSET 4 /* TX frame corruption due to AHB error */ +#define MACB_BEX_SIZE 1 +#define MACB_RM9200_BNQ_OFFSET 4 /* AT91RM9200 only */ +#define MACB_RM9200_BNQ_SIZE 1 /* AT91RM9200 only */ +#define MACB_COMP_OFFSET 5 /* Trnasmit complete */ +#define MACB_COMP_SIZE 1 +#define MACB_UND_OFFSET 6 /* Trnasmit under run */ +#define MACB_UND_SIZE 1 + +/* Bitfields in RSR */ +#define MACB_BNA_OFFSET 0 /* Buffer not available */ +#define MACB_BNA_SIZE 1 +#define MACB_REC_OFFSET 1 /* Frame received */ +#define MACB_REC_SIZE 1 +#define MACB_OVR_OFFSET 2 /* Receive overrun */ +#define MACB_OVR_SIZE 1 + +/* Bitfields in ISR/IER/IDR/IMR */ +#define MACB_MFD_OFFSET 0 /* Management frame sent */ +#define MACB_MFD_SIZE 1 +#define MACB_RCOMP_OFFSET 1 /* Receive complete */ +#define MACB_RCOMP_SIZE 1 +#define MACB_RXUBR_OFFSET 2 /* RX used bit read */ +#define MACB_RXUBR_SIZE 1 +#define MACB_TXUBR_OFFSET 3 /* TX used bit read */ +#define MACB_TXUBR_SIZE 1 +#define MACB_ISR_TUND_OFFSET 4 /* Enable TX buffer under run interrupt */ +#define MACB_ISR_TUND_SIZE 1 +#define MACB_ISR_RLE_OFFSET 5 /* EN retry exceeded/late coll interrupt */ +#define MACB_ISR_RLE_SIZE 1 +#define MACB_TXERR_OFFSET 6 /* EN TX frame corrupt from error interrupt */ +#define MACB_TXERR_SIZE 1 +#define MACB_TCOMP_OFFSET 7 /* Enable transmit complete interrupt */ +#define MACB_TCOMP_SIZE 1 +#define MACB_ISR_LINK_OFFSET 9 /* Enable link change interrupt */ +#define MACB_ISR_LINK_SIZE 1 +#define MACB_ISR_ROVR_OFFSET 10 /* Enable receive overrun interrupt */ +#define MACB_ISR_ROVR_SIZE 1 +#define MACB_HRESP_OFFSET 11 /* Enable hrsep not OK interrupt */ +#define MACB_HRESP_SIZE 1 +#define MACB_PFR_OFFSET 12 /* Enable pause frame w/ quantum interrupt */ +#define MACB_PFR_SIZE 1 +#define MACB_PTZ_OFFSET 13 /* Enable pause time zero interrupt */ +#define MACB_PTZ_SIZE 1 + +/* Bitfields in MAN */ +#define MACB_DATA_OFFSET 0 /* data */ +#define MACB_DATA_SIZE 16 +#define MACB_CODE_OFFSET 16 /* Must be written to 10 */ +#define MACB_CODE_SIZE 2 +#define MACB_REGA_OFFSET 18 /* Register address */ +#define MACB_REGA_SIZE 5 +#define MACB_PHYA_OFFSET 23 /* PHY address */ +#define MACB_PHYA_SIZE 5 +#define MACB_RW_OFFSET 28 /* Operation. 10 is read. 01 is write. */ +#define MACB_RW_SIZE 2 +#define MACB_SOF_OFFSET 30 /* Must be written to 1 for Clause 22 */ +#define MACB_SOF_SIZE 2 + +/* Bitfields in USRIO (AVR32) */ +#define MACB_MII_OFFSET 0 +#define MACB_MII_SIZE 1 +#define MACB_EAM_OFFSET 1 +#define MACB_EAM_SIZE 1 +#define MACB_TX_PAUSE_OFFSET 2 +#define MACB_TX_PAUSE_SIZE 1 +#define MACB_TX_PAUSE_ZERO_OFFSET 3 +#define MACB_TX_PAUSE_ZERO_SIZE 1 + +/* Bitfields in USRIO (AT91) */ +#define MACB_RMII_OFFSET 0 +#define MACB_RMII_SIZE 1 +#define GEM_RGMII_OFFSET 0 /* GEM gigabit mode */ +#define GEM_RGMII_SIZE 1 +#define MACB_CLKEN_OFFSET 1 +#define MACB_CLKEN_SIZE 1 + +/* Bitfields in WOL */ +#define MACB_IP_OFFSET 0 +#define MACB_IP_SIZE 16 +#define MACB_MAG_OFFSET 16 +#define MACB_MAG_SIZE 1 +#define MACB_ARP_OFFSET 17 +#define MACB_ARP_SIZE 1 +#define MACB_SA1_OFFSET 18 +#define MACB_SA1_SIZE 1 +#define MACB_WOL_MTI_OFFSET 19 +#define MACB_WOL_MTI_SIZE 1 + +/* Bitfields in MID */ +#define MACB_IDNUM_OFFSET 16 +#define MACB_IDNUM_SIZE 12 +#define MACB_REV_OFFSET 0 +#define MACB_REV_SIZE 16 + +/* Bitfields in DCFG1. */ +#define GEM_IRQCOR_OFFSET 23 +#define GEM_IRQCOR_SIZE 1 +#define GEM_DBWDEF_OFFSET 25 +#define GEM_DBWDEF_SIZE 3 + +/* Bitfields in DCFG2. */ +#define GEM_RX_PKT_BUFF_OFFSET 20 +#define GEM_RX_PKT_BUFF_SIZE 1 +#define GEM_TX_PKT_BUFF_OFFSET 21 +#define GEM_TX_PKT_BUFF_SIZE 1 + +/* Constants for CLK */ +#define MACB_CLK_DIV8 0 +#define MACB_CLK_DIV16 1 +#define MACB_CLK_DIV32 2 +#define MACB_CLK_DIV64 3 + +/* GEM specific constants for CLK. */ +#define GEM_CLK_DIV8 0 +#define GEM_CLK_DIV16 1 +#define GEM_CLK_DIV32 2 +#define GEM_CLK_DIV48 3 +#define GEM_CLK_DIV64 4 +#define GEM_CLK_DIV96 5 + +/* Constants for MAN register */ +#define MACB_MAN_SOF 1 +#define MACB_MAN_WRITE 1 +#define MACB_MAN_READ 2 +#define MACB_MAN_CODE 2 + +/* Capability mask bits */ +#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001 +#define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002 +#define MACB_CAPS_USRIO_DEFAULT_IS_MII 0x00000004 +#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 +#define MACB_CAPS_FIFO_MODE 0x10000000 +#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 +#define MACB_CAPS_SG_DISABLED 0x40000000 +#define MACB_CAPS_MACB_IS_GEM 0x80000000 + +/* Bit manipulation macros */ +#define MACB_BIT(name) \ + (1 << MACB_##name##_OFFSET) +#define MACB_BF(name,value) \ + (((value) & ((1 << MACB_##name##_SIZE) - 1)) \ + << MACB_##name##_OFFSET) +#define MACB_BFEXT(name,value)\ + (((value) >> MACB_##name##_OFFSET) \ + & ((1 << MACB_##name##_SIZE) - 1)) +#define MACB_BFINS(name,value,old) \ + (((old) & ~(((1 << MACB_##name##_SIZE) - 1) \ + << MACB_##name##_OFFSET)) \ + | MACB_BF(name,value)) + +#define GEM_BIT(name) \ + (1 << GEM_##name##_OFFSET) +#define GEM_BF(name, value) \ + (((value) & ((1 << GEM_##name##_SIZE) - 1)) \ + << GEM_##name##_OFFSET) +#define GEM_BFEXT(name, value)\ + (((value) >> GEM_##name##_OFFSET) \ + & ((1 << GEM_##name##_SIZE) - 1)) +#define GEM_BFINS(name, value, old) \ + (((old) & ~(((1 << GEM_##name##_SIZE) - 1) \ + << GEM_##name##_OFFSET)) \ + | GEM_BF(name, value)) + +/* Register access macros */ +#define macb_readl(port,reg) \ + readl_relaxed((port)->regs + MACB_##reg) +#define macb_writel(port,reg,value) \ + writel_relaxed((value), (port)->regs + MACB_##reg) +#define gem_readl(port, reg) \ + readl_relaxed((port)->regs + GEM_##reg) +#define gem_writel(port, reg, value) \ + writel_relaxed((value), (port)->regs + GEM_##reg) +#define queue_readl(queue, reg) \ + readl_relaxed((queue)->bp->regs + (queue)->reg) +#define queue_writel(queue, reg, value) \ + writel_relaxed((value), (queue)->bp->regs + (queue)->reg) + +/* Conditional GEM/MACB macros. These perform the operation to the correct + * register dependent on whether the device is a GEM or a MACB. For registers + * and bitfields that are common across both devices, use macb_{read,write}l + * to avoid the cost of the conditional. + */ +#define macb_or_gem_writel(__bp, __reg, __value) \ + ({ \ + if (macb_is_gem((__bp))) \ + gem_writel((__bp), __reg, __value); \ + else \ + macb_writel((__bp), __reg, __value); \ + }) + +#define macb_or_gem_readl(__bp, __reg) \ + ({ \ + u32 __v; \ + if (macb_is_gem((__bp))) \ + __v = gem_readl((__bp), __reg); \ + else \ + __v = macb_readl((__bp), __reg); \ + __v; \ + }) + +/* struct macb_dma_desc - Hardware DMA descriptor + * @addr: DMA address of data buffer + * @ctrl: Control and status bits + */ +struct macb_dma_desc { + u32 addr; + u32 ctrl; +}; + +/* DMA descriptor bitfields */ +#define MACB_RX_USED_OFFSET 0 +#define MACB_RX_USED_SIZE 1 +#define MACB_RX_WRAP_OFFSET 1 +#define MACB_RX_WRAP_SIZE 1 +#define MACB_RX_WADDR_OFFSET 2 +#define MACB_RX_WADDR_SIZE 30 + +#define MACB_RX_FRMLEN_OFFSET 0 +#define MACB_RX_FRMLEN_SIZE 12 +#define MACB_RX_OFFSET_OFFSET 12 +#define MACB_RX_OFFSET_SIZE 2 +#define MACB_RX_SOF_OFFSET 14 +#define MACB_RX_SOF_SIZE 1 +#define MACB_RX_EOF_OFFSET 15 +#define MACB_RX_EOF_SIZE 1 +#define MACB_RX_CFI_OFFSET 16 +#define MACB_RX_CFI_SIZE 1 +#define MACB_RX_VLAN_PRI_OFFSET 17 +#define MACB_RX_VLAN_PRI_SIZE 3 +#define MACB_RX_PRI_TAG_OFFSET 20 +#define MACB_RX_PRI_TAG_SIZE 1 +#define MACB_RX_VLAN_TAG_OFFSET 21 +#define MACB_RX_VLAN_TAG_SIZE 1 +#define MACB_RX_TYPEID_MATCH_OFFSET 22 +#define MACB_RX_TYPEID_MATCH_SIZE 1 +#define MACB_RX_SA4_MATCH_OFFSET 23 +#define MACB_RX_SA4_MATCH_SIZE 1 +#define MACB_RX_SA3_MATCH_OFFSET 24 +#define MACB_RX_SA3_MATCH_SIZE 1 +#define MACB_RX_SA2_MATCH_OFFSET 25 +#define MACB_RX_SA2_MATCH_SIZE 1 +#define MACB_RX_SA1_MATCH_OFFSET 26 +#define MACB_RX_SA1_MATCH_SIZE 1 +#define MACB_RX_EXT_MATCH_OFFSET 28 +#define MACB_RX_EXT_MATCH_SIZE 1 +#define MACB_RX_UHASH_MATCH_OFFSET 29 +#define MACB_RX_UHASH_MATCH_SIZE 1 +#define MACB_RX_MHASH_MATCH_OFFSET 30 +#define MACB_RX_MHASH_MATCH_SIZE 1 +#define MACB_RX_BROADCAST_OFFSET 31 +#define MACB_RX_BROADCAST_SIZE 1 + +/* RX checksum offload disabled: bit 24 clear in NCFGR */ +#define GEM_RX_TYPEID_MATCH_OFFSET 22 +#define GEM_RX_TYPEID_MATCH_SIZE 2 + +/* RX checksum offload enabled: bit 24 set in NCFGR */ +#define GEM_RX_CSUM_OFFSET 22 +#define GEM_RX_CSUM_SIZE 2 + +#define MACB_TX_FRMLEN_OFFSET 0 +#define MACB_TX_FRMLEN_SIZE 11 +#define MACB_TX_LAST_OFFSET 15 +#define MACB_TX_LAST_SIZE 1 +#define MACB_TX_NOCRC_OFFSET 16 +#define MACB_TX_NOCRC_SIZE 1 +#define MACB_TX_BUF_EXHAUSTED_OFFSET 27 +#define MACB_TX_BUF_EXHAUSTED_SIZE 1 +#define MACB_TX_UNDERRUN_OFFSET 28 +#define MACB_TX_UNDERRUN_SIZE 1 +#define MACB_TX_ERROR_OFFSET 29 +#define MACB_TX_ERROR_SIZE 1 +#define MACB_TX_WRAP_OFFSET 30 +#define MACB_TX_WRAP_SIZE 1 +#define MACB_TX_USED_OFFSET 31 +#define MACB_TX_USED_SIZE 1 + +#define GEM_TX_FRMLEN_OFFSET 0 +#define GEM_TX_FRMLEN_SIZE 14 + +/* Buffer descriptor constants */ +#define GEM_RX_CSUM_NONE 0 +#define GEM_RX_CSUM_IP_ONLY 1 +#define GEM_RX_CSUM_IP_TCP 2 +#define GEM_RX_CSUM_IP_UDP 3 + +/* limit RX checksum offload to TCP and UDP packets */ +#define GEM_RX_CSUM_CHECKED_MASK 2 + +/* struct macb_tx_skb - data about an skb which is being transmitted + * @skb: skb currently being transmitted, only set for the last buffer + * of the frame + * @mapping: DMA address of the skb's fragment buffer + * @size: size of the DMA mapped buffer + * @mapped_as_page: true when buffer was mapped with skb_frag_dma_map(), + * false when buffer was mapped with dma_map_single() + */ +struct macb_tx_skb { + struct sk_buff *skb; + dma_addr_t mapping; + size_t size; + bool mapped_as_page; +}; + +/* Hardware-collected statistics. Used when updating the network + * device stats by a periodic timer. + */ +struct macb_stats { + u32 rx_pause_frames; + u32 tx_ok; + u32 tx_single_cols; + u32 tx_multiple_cols; + u32 rx_ok; + u32 rx_fcs_errors; + u32 rx_align_errors; + u32 tx_deferred; + u32 tx_late_cols; + u32 tx_excessive_cols; + u32 tx_underruns; + u32 tx_carrier_errors; + u32 rx_resource_errors; + u32 rx_overruns; + u32 rx_symbol_errors; + u32 rx_oversize_pkts; + u32 rx_jabbers; + u32 rx_undersize_pkts; + u32 sqe_test_errors; + u32 rx_length_mismatch; + u32 tx_pause_frames; +}; + +struct gem_stats { + u32 tx_octets_31_0; + u32 tx_octets_47_32; + u32 tx_frames; + u32 tx_broadcast_frames; + u32 tx_multicast_frames; + u32 tx_pause_frames; + u32 tx_64_byte_frames; + u32 tx_65_127_byte_frames; + u32 tx_128_255_byte_frames; + u32 tx_256_511_byte_frames; + u32 tx_512_1023_byte_frames; + u32 tx_1024_1518_byte_frames; + u32 tx_greater_than_1518_byte_frames; + u32 tx_underrun; + u32 tx_single_collision_frames; + u32 tx_multiple_collision_frames; + u32 tx_excessive_collisions; + u32 tx_late_collisions; + u32 tx_deferred_frames; + u32 tx_carrier_sense_errors; + u32 rx_octets_31_0; + u32 rx_octets_47_32; + u32 rx_frames; + u32 rx_broadcast_frames; + u32 rx_multicast_frames; + u32 rx_pause_frames; + u32 rx_64_byte_frames; + u32 rx_65_127_byte_frames; + u32 rx_128_255_byte_frames; + u32 rx_256_511_byte_frames; + u32 rx_512_1023_byte_frames; + u32 rx_1024_1518_byte_frames; + u32 rx_greater_than_1518_byte_frames; + u32 rx_undersized_frames; + u32 rx_oversize_frames; + u32 rx_jabbers; + u32 rx_frame_check_sequence_errors; + u32 rx_length_field_frame_errors; + u32 rx_symbol_errors; + u32 rx_alignment_errors; + u32 rx_resource_errors; + u32 rx_overruns; + u32 rx_ip_header_checksum_errors; + u32 rx_tcp_checksum_errors; + u32 rx_udp_checksum_errors; +}; + +/* Describes the name and offset of an individual statistic register, as + * returned by `ethtool -S`. Also describes which net_device_stats statistics + * this register should contribute to. + */ +struct gem_statistic { + char stat_string[ETH_GSTRING_LEN]; + int offset; + u32 stat_bits; +}; + +/* Bitfield defs for net_device_stat statistics */ +#define GEM_NDS_RXERR_OFFSET 0 +#define GEM_NDS_RXLENERR_OFFSET 1 +#define GEM_NDS_RXOVERERR_OFFSET 2 +#define GEM_NDS_RXCRCERR_OFFSET 3 +#define GEM_NDS_RXFRAMEERR_OFFSET 4 +#define GEM_NDS_RXFIFOERR_OFFSET 5 +#define GEM_NDS_TXERR_OFFSET 6 +#define GEM_NDS_TXABORTEDERR_OFFSET 7 +#define GEM_NDS_TXCARRIERERR_OFFSET 8 +#define GEM_NDS_TXFIFOERR_OFFSET 9 +#define GEM_NDS_COLLISIONS_OFFSET 10 + +#define GEM_STAT_TITLE(name, title) GEM_STAT_TITLE_BITS(name, title, 0) +#define GEM_STAT_TITLE_BITS(name, title, bits) { \ + .stat_string = title, \ + .offset = GEM_##name, \ + .stat_bits = bits \ +} + +/* list of gem statistic registers. The names MUST match the + * corresponding GEM_* definitions. + */ +static const struct gem_statistic gem_statistics[] = { + GEM_STAT_TITLE(OCTTXL, "tx_octets"), /* OCTTXH combined with OCTTXL */ + GEM_STAT_TITLE(TXCNT, "tx_frames"), + GEM_STAT_TITLE(TXBCCNT, "tx_broadcast_frames"), + GEM_STAT_TITLE(TXMCCNT, "tx_multicast_frames"), + GEM_STAT_TITLE(TXPAUSECNT, "tx_pause_frames"), + GEM_STAT_TITLE(TX64CNT, "tx_64_byte_frames"), + GEM_STAT_TITLE(TX65CNT, "tx_65_127_byte_frames"), + GEM_STAT_TITLE(TX128CNT, "tx_128_255_byte_frames"), + GEM_STAT_TITLE(TX256CNT, "tx_256_511_byte_frames"), + GEM_STAT_TITLE(TX512CNT, "tx_512_1023_byte_frames"), + GEM_STAT_TITLE(TX1024CNT, "tx_1024_1518_byte_frames"), + GEM_STAT_TITLE(TX1519CNT, "tx_greater_than_1518_byte_frames"), + GEM_STAT_TITLE_BITS(TXURUNCNT, "tx_underrun", + GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_TXFIFOERR)), + GEM_STAT_TITLE_BITS(SNGLCOLLCNT, "tx_single_collision_frames", + GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)), + GEM_STAT_TITLE_BITS(MULTICOLLCNT, "tx_multiple_collision_frames", + GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)), + GEM_STAT_TITLE_BITS(EXCESSCOLLCNT, "tx_excessive_collisions", + GEM_BIT(NDS_TXERR)| + GEM_BIT(NDS_TXABORTEDERR)| + GEM_BIT(NDS_COLLISIONS)), + GEM_STAT_TITLE_BITS(LATECOLLCNT, "tx_late_collisions", + GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)), + GEM_STAT_TITLE(TXDEFERCNT, "tx_deferred_frames"), + GEM_STAT_TITLE_BITS(TXCSENSECNT, "tx_carrier_sense_errors", + GEM_BIT(NDS_TXERR)|GEM_BIT(NDS_COLLISIONS)), + GEM_STAT_TITLE(OCTRXL, "rx_octets"), /* OCTRXH combined with OCTRXL */ + GEM_STAT_TITLE(RXCNT, "rx_frames"), + GEM_STAT_TITLE(RXBROADCNT, "rx_broadcast_frames"), + GEM_STAT_TITLE(RXMULTICNT, "rx_multicast_frames"), + GEM_STAT_TITLE(RXPAUSECNT, "rx_pause_frames"), + GEM_STAT_TITLE(RX64CNT, "rx_64_byte_frames"), + GEM_STAT_TITLE(RX65CNT, "rx_65_127_byte_frames"), + GEM_STAT_TITLE(RX128CNT, "rx_128_255_byte_frames"), + GEM_STAT_TITLE(RX256CNT, "rx_256_511_byte_frames"), + GEM_STAT_TITLE(RX512CNT, "rx_512_1023_byte_frames"), + GEM_STAT_TITLE(RX1024CNT, "rx_1024_1518_byte_frames"), + GEM_STAT_TITLE(RX1519CNT, "rx_greater_than_1518_byte_frames"), + GEM_STAT_TITLE_BITS(RXUNDRCNT, "rx_undersized_frames", + GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)), + GEM_STAT_TITLE_BITS(RXOVRCNT, "rx_oversize_frames", + GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)), + GEM_STAT_TITLE_BITS(RXJABCNT, "rx_jabbers", + GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXLENERR)), + GEM_STAT_TITLE_BITS(RXFCSCNT, "rx_frame_check_sequence_errors", + GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXCRCERR)), + GEM_STAT_TITLE_BITS(RXLENGTHCNT, "rx_length_field_frame_errors", + GEM_BIT(NDS_RXERR)), + GEM_STAT_TITLE_BITS(RXSYMBCNT, "rx_symbol_errors", + GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXFRAMEERR)), + GEM_STAT_TITLE_BITS(RXALIGNCNT, "rx_alignment_errors", + GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXOVERERR)), + GEM_STAT_TITLE_BITS(RXRESERRCNT, "rx_resource_errors", + GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXOVERERR)), + GEM_STAT_TITLE_BITS(RXORCNT, "rx_overruns", + GEM_BIT(NDS_RXERR)|GEM_BIT(NDS_RXFIFOERR)), + GEM_STAT_TITLE_BITS(RXIPCCNT, "rx_ip_header_checksum_errors", + GEM_BIT(NDS_RXERR)), + GEM_STAT_TITLE_BITS(RXTCPCCNT, "rx_tcp_checksum_errors", + GEM_BIT(NDS_RXERR)), + GEM_STAT_TITLE_BITS(RXUDPCCNT, "rx_udp_checksum_errors", + GEM_BIT(NDS_RXERR)), +}; + +#define GEM_STATS_LEN ARRAY_SIZE(gem_statistics) + +struct macb; + +struct macb_or_gem_ops { + int (*mog_alloc_rx_buffers)(struct macb *bp); + void (*mog_free_rx_buffers)(struct macb *bp); + void (*mog_init_rings)(struct macb *bp); + int (*mog_rx)(struct macb *bp, int budget); +}; + +struct macb_config { + u32 caps; + unsigned int dma_burst_length; + int (*clk_init)(struct platform_device *pdev, struct clk **pclk, + struct clk **hclk, struct clk **tx_clk); + int (*init)(struct platform_device *pdev); +}; + +struct macb_queue { + struct macb *bp; + int irq; + + unsigned int ISR; + unsigned int IER; + unsigned int IDR; + unsigned int IMR; + unsigned int TBQP; + + unsigned int tx_head, tx_tail; + struct macb_dma_desc *tx_ring; + struct macb_tx_skb *tx_skb; + dma_addr_t tx_ring_dma; + struct work_struct tx_error_task; +}; + +struct macb { + void __iomem *regs; + + unsigned int rx_tail; + unsigned int rx_prepared_head; + struct macb_dma_desc *rx_ring; + struct sk_buff **rx_skbuff; + void *rx_buffers; + size_t rx_buffer_size; + + unsigned int num_queues; + unsigned int queue_mask; + struct macb_queue queues[MACB_MAX_QUEUES]; + + spinlock_t lock; + struct platform_device *pdev; + struct clk *pclk; + struct clk *hclk; + struct clk *tx_clk; + struct net_device *dev; + struct napi_struct napi; + struct net_device_stats stats; + union { + struct macb_stats macb; + struct gem_stats gem; + } hw_stats; + + dma_addr_t rx_ring_dma; + dma_addr_t rx_buffers_dma; + + struct macb_or_gem_ops macbgem_ops; + + struct mii_bus *mii_bus; + struct phy_device *phy_dev; + unsigned int link; + unsigned int speed; + unsigned int duplex; + + u32 caps; + unsigned int dma_burst_length; + + phy_interface_t phy_interface; + + /* AT91RM9200 transmit */ + struct sk_buff *skb; /* holds skb until xmit interrupt completes */ + dma_addr_t skb_physaddr; /* phys addr from pci_map_single */ + int skb_length; /* saved skb length for pci_unmap_single */ + unsigned int max_tx_length; + + u64 ethtool_stats[GEM_STATS_LEN]; +}; + +static inline bool macb_is_gem(struct macb *bp) +{ + return !!(bp->caps & MACB_CAPS_MACB_IS_GEM); +} + +static inline bool macb_is_gem_hw(void __iomem *addr) +{ + return !!(MACB_BFEXT(IDNUM, readl_relaxed(addr + MACB_MID)) >= 0x2); +} + +#endif /* _MACB_H */ diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig new file mode 100644 index 000000000..07d220153 --- /dev/null +++ b/drivers/net/ethernet/calxeda/Kconfig @@ -0,0 +1,8 @@ +config NET_CALXEDA_XGMAC + tristate "Calxeda 1G/10G XGMAC Ethernet driver" + depends on HAS_IOMEM && HAS_DMA + depends on ARCH_HIGHBANK || COMPILE_TEST + select CRC32 + help + This is the driver for the XGMAC Ethernet IP block found on Calxeda + Highbank platforms. diff --git a/drivers/net/ethernet/calxeda/Makefile b/drivers/net/ethernet/calxeda/Makefile new file mode 100644 index 000000000..f0ef08067 --- /dev/null +++ b/drivers/net/ethernet/calxeda/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_NET_CALXEDA_XGMAC) += xgmac.o diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c new file mode 100644 index 000000000..63efa0dc4 --- /dev/null +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -0,0 +1,1949 @@ +/* + * Copyright 2010-2011 Calxeda, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* XGMAC Register definitions */ +#define XGMAC_CONTROL 0x00000000 /* MAC Configuration */ +#define XGMAC_FRAME_FILTER 0x00000004 /* MAC Frame Filter */ +#define XGMAC_FLOW_CTRL 0x00000018 /* MAC Flow Control */ +#define XGMAC_VLAN_TAG 0x0000001C /* VLAN Tags */ +#define XGMAC_VERSION 0x00000020 /* Version */ +#define XGMAC_VLAN_INCL 0x00000024 /* VLAN tag for tx frames */ +#define XGMAC_LPI_CTRL 0x00000028 /* LPI Control and Status */ +#define XGMAC_LPI_TIMER 0x0000002C /* LPI Timers Control */ +#define XGMAC_TX_PACE 0x00000030 /* Transmit Pace and Stretch */ +#define XGMAC_VLAN_HASH 0x00000034 /* VLAN Hash Table */ +#define XGMAC_DEBUG 0x00000038 /* Debug */ +#define XGMAC_INT_STAT 0x0000003C /* Interrupt and Control */ +#define XGMAC_ADDR_HIGH(reg) (0x00000040 + ((reg) * 8)) +#define XGMAC_ADDR_LOW(reg) (0x00000044 + ((reg) * 8)) +#define XGMAC_HASH(n) (0x00000300 + (n) * 4) /* HASH table regs */ +#define XGMAC_NUM_HASH 16 +#define XGMAC_OMR 0x00000400 +#define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */ +#define XGMAC_PMT 0x00000704 /* PMT Control and Status */ +#define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */ +#define XGMAC_MMC_INTR_RX 0x00000804 /* Receive Interrupt */ +#define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */ +#define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Receive Interrupt Mask */ +#define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */ + +/* Hardware TX Statistics Counters */ +#define XGMAC_MMC_TXOCTET_GB_LO 0x00000814 +#define XGMAC_MMC_TXOCTET_GB_HI 0x00000818 +#define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C +#define XGMAC_MMC_TXFRAME_GB_HI 0x00000820 +#define XGMAC_MMC_TXBCFRAME_G 0x00000824 +#define XGMAC_MMC_TXMCFRAME_G 0x0000082C +#define XGMAC_MMC_TXUCFRAME_GB 0x00000864 +#define XGMAC_MMC_TXMCFRAME_GB 0x0000086C +#define XGMAC_MMC_TXBCFRAME_GB 0x00000874 +#define XGMAC_MMC_TXUNDERFLOW 0x0000087C +#define XGMAC_MMC_TXOCTET_G_LO 0x00000884 +#define XGMAC_MMC_TXOCTET_G_HI 0x00000888 +#define XGMAC_MMC_TXFRAME_G_LO 0x0000088C +#define XGMAC_MMC_TXFRAME_G_HI 0x00000890 +#define XGMAC_MMC_TXPAUSEFRAME 0x00000894 +#define XGMAC_MMC_TXVLANFRAME 0x0000089C + +/* Hardware RX Statistics Counters */ +#define XGMAC_MMC_RXFRAME_GB_LO 0x00000900 +#define XGMAC_MMC_RXFRAME_GB_HI 0x00000904 +#define XGMAC_MMC_RXOCTET_GB_LO 0x00000908 +#define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C +#define XGMAC_MMC_RXOCTET_G_LO 0x00000910 +#define XGMAC_MMC_RXOCTET_G_HI 0x00000914 +#define XGMAC_MMC_RXBCFRAME_G 0x00000918 +#define XGMAC_MMC_RXMCFRAME_G 0x00000920 +#define XGMAC_MMC_RXCRCERR 0x00000928 +#define XGMAC_MMC_RXRUNT 0x00000930 +#define XGMAC_MMC_RXJABBER 0x00000934 +#define XGMAC_MMC_RXUCFRAME_G 0x00000970 +#define XGMAC_MMC_RXLENGTHERR 0x00000978 +#define XGMAC_MMC_RXPAUSEFRAME 0x00000988 +#define XGMAC_MMC_RXOVERFLOW 0x00000990 +#define XGMAC_MMC_RXVLANFRAME 0x00000998 +#define XGMAC_MMC_RXWATCHDOG 0x000009a0 + +/* DMA Control and Status Registers */ +#define XGMAC_DMA_BUS_MODE 0x00000f00 /* Bus Mode */ +#define XGMAC_DMA_TX_POLL 0x00000f04 /* Transmit Poll Demand */ +#define XGMAC_DMA_RX_POLL 0x00000f08 /* Received Poll Demand */ +#define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c /* Receive List Base */ +#define XGMAC_DMA_TX_BASE_ADDR 0x00000f10 /* Transmit List Base */ +#define XGMAC_DMA_STATUS 0x00000f14 /* Status Register */ +#define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */ +#define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */ +#define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20 /* Missed Frame Counter */ +#define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24 /* RX Intr Watchdog Timer */ +#define XGMAC_DMA_AXI_BUS 0x00000f28 /* AXI Bus Mode */ +#define XGMAC_DMA_AXI_STATUS 0x00000f2C /* AXI Status */ +#define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */ + +#define XGMAC_ADDR_AE 0x80000000 + +/* PMT Control and Status */ +#define XGMAC_PMT_POINTER_RESET 0x80000000 +#define XGMAC_PMT_GLBL_UNICAST 0x00000200 +#define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040 +#define XGMAC_PMT_MAGIC_PKT 0x00000020 +#define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004 +#define XGMAC_PMT_MAGIC_PKT_EN 0x00000002 +#define XGMAC_PMT_POWERDOWN 0x00000001 + +#define XGMAC_CONTROL_SPD 0x40000000 /* Speed control */ +#define XGMAC_CONTROL_SPD_MASK 0x60000000 +#define XGMAC_CONTROL_SPD_1G 0x60000000 +#define XGMAC_CONTROL_SPD_2_5G 0x40000000 +#define XGMAC_CONTROL_SPD_10G 0x00000000 +#define XGMAC_CONTROL_SARC 0x10000000 /* Source Addr Insert/Replace */ +#define XGMAC_CONTROL_SARK_MASK 0x18000000 +#define XGMAC_CONTROL_CAR 0x04000000 /* CRC Addition/Replacement */ +#define XGMAC_CONTROL_CAR_MASK 0x06000000 +#define XGMAC_CONTROL_DP 0x01000000 /* Disable Padding */ +#define XGMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on rx */ +#define XGMAC_CONTROL_JD 0x00400000 /* Jabber disable */ +#define XGMAC_CONTROL_JE 0x00100000 /* Jumbo frame */ +#define XGMAC_CONTROL_LM 0x00001000 /* Loop-back mode */ +#define XGMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ +#define XGMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Strip */ +#define XGMAC_CONTROL_DDIC 0x00000010 /* Disable Deficit Idle Count */ +#define XGMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ +#define XGMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ + +/* XGMAC Frame Filter defines */ +#define XGMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ +#define XGMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ +#define XGMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ +#define XGMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ +#define XGMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ +#define XGMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ +#define XGMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ +#define XGMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ +#define XGMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ +#define XGMAC_FRAME_FILTER_VHF 0x00000800 /* VLAN Hash Filter */ +#define XGMAC_FRAME_FILTER_VPF 0x00001000 /* VLAN Perfect Filter */ +#define XGMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ + +/* XGMAC FLOW CTRL defines */ +#define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ +#define XGMAC_FLOW_CTRL_PT_SHIFT 16 +#define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */ +#define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshold */ +#define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */ +#define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */ +#define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ +#define XGMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ +#define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ + +/* XGMAC_INT_STAT reg */ +#define XGMAC_INT_STAT_PMTIM 0x00800000 /* PMT Interrupt Mask */ +#define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ +#define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ + +/* DMA Bus Mode register defines */ +#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ +#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ +#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ +#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */ + +/* Programmable burst length */ +#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ +#define DMA_BUS_MODE_PBL_SHIFT 8 +#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */ +#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */ +#define DMA_BUS_MODE_RPBL_SHIFT 17 +#define DMA_BUS_MODE_USP 0x00800000 +#define DMA_BUS_MODE_8PBL 0x01000000 +#define DMA_BUS_MODE_AAL 0x02000000 + +/* DMA Bus Mode register defines */ +#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */ +#define DMA_BUS_PR_RATIO_SHIFT 14 +#define DMA_BUS_FB 0x00010000 /* Fixed Burst */ + +/* DMA Control register defines */ +#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ +#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ +#define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */ +#define DMA_CONTROL_OSF 0x00000004 /* Operate on 2nd tx frame */ + +/* DMA Normal interrupt */ +#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */ +#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */ +#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */ +#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */ +#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */ +#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */ +#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */ +#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */ +#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */ +#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */ +#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */ +#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */ +#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavail */ +#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */ +#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */ + +#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \ + DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE) + +#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \ + DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \ + DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \ + DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \ + DMA_INTR_ENA_TSE) + +/* DMA default interrupt mask */ +#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL) + +/* DMA Status register defines */ +#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */ +#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */ +#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */ +#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */ +#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */ +#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */ +#define DMA_STATUS_TS_SHIFT 20 +#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */ +#define DMA_STATUS_RS_SHIFT 17 +#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */ +#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */ +#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */ +#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */ +#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */ +#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */ +#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */ +#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */ +#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */ +#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */ +#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */ +#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */ +#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavail */ +#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */ +#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ + +/* Common MAC defines */ +#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ +#define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */ + +/* XGMAC Operation Mode Register */ +#define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */ +#define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */ +#define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshold Ctrl */ +#define XGMAC_OMR_TTC_MASK 0x00030000 +#define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshold */ +#define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshold MASK */ +#define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshold */ +#define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshold MASK */ +#define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */ +#define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */ +#define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */ +#define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */ +#define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshold Ctrl */ +#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshold Ctrl MASK */ + +/* XGMAC HW Features Register */ +#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */ + +#define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008 + +/* XGMAC Descriptor Defines */ +#define MAX_DESC_BUF_SZ (0x2000 - 8) + +#define RXDESC_EXT_STATUS 0x00000001 +#define RXDESC_CRC_ERR 0x00000002 +#define RXDESC_RX_ERR 0x00000008 +#define RXDESC_RX_WDOG 0x00000010 +#define RXDESC_FRAME_TYPE 0x00000020 +#define RXDESC_GIANT_FRAME 0x00000080 +#define RXDESC_LAST_SEG 0x00000100 +#define RXDESC_FIRST_SEG 0x00000200 +#define RXDESC_VLAN_FRAME 0x00000400 +#define RXDESC_OVERFLOW_ERR 0x00000800 +#define RXDESC_LENGTH_ERR 0x00001000 +#define RXDESC_SA_FILTER_FAIL 0x00002000 +#define RXDESC_DESCRIPTOR_ERR 0x00004000 +#define RXDESC_ERROR_SUMMARY 0x00008000 +#define RXDESC_FRAME_LEN_OFFSET 16 +#define RXDESC_FRAME_LEN_MASK 0x3fff0000 +#define RXDESC_DA_FILTER_FAIL 0x40000000 + +#define RXDESC1_END_RING 0x00008000 + +#define RXDESC_IP_PAYLOAD_MASK 0x00000003 +#define RXDESC_IP_PAYLOAD_UDP 0x00000001 +#define RXDESC_IP_PAYLOAD_TCP 0x00000002 +#define RXDESC_IP_PAYLOAD_ICMP 0x00000003 +#define RXDESC_IP_HEADER_ERR 0x00000008 +#define RXDESC_IP_PAYLOAD_ERR 0x00000010 +#define RXDESC_IPV4_PACKET 0x00000040 +#define RXDESC_IPV6_PACKET 0x00000080 +#define TXDESC_UNDERFLOW_ERR 0x00000001 +#define TXDESC_JABBER_TIMEOUT 0x00000002 +#define TXDESC_LOCAL_FAULT 0x00000004 +#define TXDESC_REMOTE_FAULT 0x00000008 +#define TXDESC_VLAN_FRAME 0x00000010 +#define TXDESC_FRAME_FLUSHED 0x00000020 +#define TXDESC_IP_HEADER_ERR 0x00000040 +#define TXDESC_PAYLOAD_CSUM_ERR 0x00000080 +#define TXDESC_ERROR_SUMMARY 0x00008000 +#define TXDESC_SA_CTRL_INSERT 0x00040000 +#define TXDESC_SA_CTRL_REPLACE 0x00080000 +#define TXDESC_2ND_ADDR_CHAINED 0x00100000 +#define TXDESC_END_RING 0x00200000 +#define TXDESC_CSUM_IP 0x00400000 +#define TXDESC_CSUM_IP_PAYLD 0x00800000 +#define TXDESC_CSUM_ALL 0x00C00000 +#define TXDESC_CRC_EN_REPLACE 0x01000000 +#define TXDESC_CRC_EN_APPEND 0x02000000 +#define TXDESC_DISABLE_PAD 0x04000000 +#define TXDESC_FIRST_SEG 0x10000000 +#define TXDESC_LAST_SEG 0x20000000 +#define TXDESC_INTERRUPT 0x40000000 + +#define DESC_OWN 0x80000000 +#define DESC_BUFFER1_SZ_MASK 0x00001fff +#define DESC_BUFFER2_SZ_MASK 0x1fff0000 +#define DESC_BUFFER2_SZ_OFFSET 16 + +struct xgmac_dma_desc { + __le32 flags; + __le32 buf_size; + __le32 buf1_addr; /* Buffer 1 Address Pointer */ + __le32 buf2_addr; /* Buffer 2 Address Pointer */ + __le32 ext_status; + __le32 res[3]; +}; + +struct xgmac_extra_stats { + /* Transmit errors */ + unsigned long tx_jabber; + unsigned long tx_frame_flushed; + unsigned long tx_payload_error; + unsigned long tx_ip_header_error; + unsigned long tx_local_fault; + unsigned long tx_remote_fault; + /* Receive errors */ + unsigned long rx_watchdog; + unsigned long rx_da_filter_fail; + unsigned long rx_payload_error; + unsigned long rx_ip_header_error; + /* Tx/Rx IRQ errors */ + unsigned long tx_process_stopped; + unsigned long rx_buf_unav; + unsigned long rx_process_stopped; + unsigned long tx_early; + unsigned long fatal_bus_error; +}; + +struct xgmac_priv { + struct xgmac_dma_desc *dma_rx; + struct sk_buff **rx_skbuff; + unsigned int rx_tail; + unsigned int rx_head; + + struct xgmac_dma_desc *dma_tx; + struct sk_buff **tx_skbuff; + unsigned int tx_head; + unsigned int tx_tail; + int tx_irq_cnt; + + void __iomem *base; + unsigned int dma_buf_sz; + dma_addr_t dma_rx_phy; + dma_addr_t dma_tx_phy; + + struct net_device *dev; + struct device *device; + struct napi_struct napi; + + int max_macs; + struct xgmac_extra_stats xstats; + + spinlock_t stats_lock; + int pmt_irq; + char rx_pause; + char tx_pause; + int wolopts; + struct work_struct tx_timeout_work; +}; + +/* XGMAC Configuration Settings */ +#define MAX_MTU 9000 +#define PAUSE_TIME 0x400 + +#define DMA_RX_RING_SZ 256 +#define DMA_TX_RING_SZ 128 +/* minimum number of free TX descriptors required to wake up TX process */ +#define TX_THRESH (DMA_TX_RING_SZ/4) + +/* DMA descriptor ring helpers */ +#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1)) +#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) +#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) + +#define tx_dma_ring_space(p) \ + dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ) + +/* XGMAC Descriptor Access Helpers */ +static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) +{ + if (buf_sz > MAX_DESC_BUF_SZ) + p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ | + (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET); + else + p->buf_size = cpu_to_le32(buf_sz); +} + +static inline int desc_get_buf_len(struct xgmac_dma_desc *p) +{ + u32 len = le32_to_cpu(p->buf_size); + return (len & DESC_BUFFER1_SZ_MASK) + + ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET); +} + +static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size, + int buf_sz) +{ + struct xgmac_dma_desc *end = p + ring_size - 1; + + memset(p, 0, sizeof(*p) * ring_size); + + for (; p <= end; p++) + desc_set_buf_len(p, buf_sz); + + end->buf_size |= cpu_to_le32(RXDESC1_END_RING); +} + +static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size) +{ + memset(p, 0, sizeof(*p) * ring_size); + p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING); +} + +static inline int desc_get_owner(struct xgmac_dma_desc *p) +{ + return le32_to_cpu(p->flags) & DESC_OWN; +} + +static inline void desc_set_rx_owner(struct xgmac_dma_desc *p) +{ + /* Clear all fields and set the owner */ + p->flags = cpu_to_le32(DESC_OWN); +} + +static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags) +{ + u32 tmpflags = le32_to_cpu(p->flags); + tmpflags &= TXDESC_END_RING; + tmpflags |= flags | DESC_OWN; + p->flags = cpu_to_le32(tmpflags); +} + +static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p) +{ + u32 tmpflags = le32_to_cpu(p->flags); + tmpflags &= TXDESC_END_RING; + p->flags = cpu_to_le32(tmpflags); +} + +static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) +{ + return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; +} + +static inline int desc_get_tx_fs(struct xgmac_dma_desc *p) +{ + return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG; +} + +static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) +{ + return le32_to_cpu(p->buf1_addr); +} + +static inline void desc_set_buf_addr(struct xgmac_dma_desc *p, + u32 paddr, int len) +{ + p->buf1_addr = cpu_to_le32(paddr); + if (len > MAX_DESC_BUF_SZ) + p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ); +} + +static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p, + u32 paddr, int len) +{ + desc_set_buf_len(p, len); + desc_set_buf_addr(p, paddr, len); +} + +static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p) +{ + u32 data = le32_to_cpu(p->flags); + u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET; + if (data & RXDESC_FRAME_TYPE) + len -= ETH_FCS_LEN; + + return len; +} + +static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr) +{ + int timeout = 1000; + u32 reg = readl(ioaddr + XGMAC_OMR); + writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR); + + while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF) + udelay(1); +} + +static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p) +{ + struct xgmac_extra_stats *x = &priv->xstats; + u32 status = le32_to_cpu(p->flags); + + if (!(status & TXDESC_ERROR_SUMMARY)) + return 0; + + netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status); + if (status & TXDESC_JABBER_TIMEOUT) + x->tx_jabber++; + if (status & TXDESC_FRAME_FLUSHED) + x->tx_frame_flushed++; + if (status & TXDESC_UNDERFLOW_ERR) + xgmac_dma_flush_tx_fifo(priv->base); + if (status & TXDESC_IP_HEADER_ERR) + x->tx_ip_header_error++; + if (status & TXDESC_LOCAL_FAULT) + x->tx_local_fault++; + if (status & TXDESC_REMOTE_FAULT) + x->tx_remote_fault++; + if (status & TXDESC_PAYLOAD_CSUM_ERR) + x->tx_payload_error++; + + return -1; +} + +static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p) +{ + struct xgmac_extra_stats *x = &priv->xstats; + int ret = CHECKSUM_UNNECESSARY; + u32 status = le32_to_cpu(p->flags); + u32 ext_status = le32_to_cpu(p->ext_status); + + if (status & RXDESC_DA_FILTER_FAIL) { + netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n"); + x->rx_da_filter_fail++; + return -1; + } + + /* All frames should fit into a single buffer */ + if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG)) + return -1; + + /* Check if packet has checksum already */ + if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) && + !(ext_status & RXDESC_IP_PAYLOAD_MASK)) + ret = CHECKSUM_NONE; + + netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n", + (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status); + + if (!(status & RXDESC_ERROR_SUMMARY)) + return ret; + + /* Handle any errors */ + if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR | + RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR)) + return -1; + + if (status & RXDESC_EXT_STATUS) { + if (ext_status & RXDESC_IP_HEADER_ERR) + x->rx_ip_header_error++; + if (ext_status & RXDESC_IP_PAYLOAD_ERR) + x->rx_payload_error++; + netdev_dbg(priv->dev, "IP checksum error - stat %08x\n", + ext_status); + return CHECKSUM_NONE; + } + + return ret; +} + +static inline void xgmac_mac_enable(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + XGMAC_CONTROL); + value |= MAC_ENABLE_RX | MAC_ENABLE_TX; + writel(value, ioaddr + XGMAC_CONTROL); + + value = readl(ioaddr + XGMAC_DMA_CONTROL); + value |= DMA_CONTROL_ST | DMA_CONTROL_SR; + writel(value, ioaddr + XGMAC_DMA_CONTROL); +} + +static inline void xgmac_mac_disable(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + XGMAC_DMA_CONTROL); + value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR); + writel(value, ioaddr + XGMAC_DMA_CONTROL); + + value = readl(ioaddr + XGMAC_CONTROL); + value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX); + writel(value, ioaddr + XGMAC_CONTROL); +} + +static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr, + int num) +{ + u32 data; + + if (addr) { + data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); + writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); + data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + writel(data, ioaddr + XGMAC_ADDR_LOW(num)); + } else { + writel(0, ioaddr + XGMAC_ADDR_HIGH(num)); + writel(0, ioaddr + XGMAC_ADDR_LOW(num)); + } +} + +static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, + int num) +{ + u32 hi_addr, lo_addr; + + /* Read the MAC address from the hardware */ + hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num)); + lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num)); + + /* Extract the MAC address from the high and low words */ + addr[0] = lo_addr & 0xff; + addr[1] = (lo_addr >> 8) & 0xff; + addr[2] = (lo_addr >> 16) & 0xff; + addr[3] = (lo_addr >> 24) & 0xff; + addr[4] = hi_addr & 0xff; + addr[5] = (hi_addr >> 8) & 0xff; +} + +static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx) +{ + u32 reg; + unsigned int flow = 0; + + priv->rx_pause = rx; + priv->tx_pause = tx; + + if (rx || tx) { + if (rx) + flow |= XGMAC_FLOW_CTRL_RFE; + if (tx) + flow |= XGMAC_FLOW_CTRL_TFE; + + flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP; + flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT); + + writel(flow, priv->base + XGMAC_FLOW_CTRL); + + reg = readl(priv->base + XGMAC_OMR); + reg |= XGMAC_OMR_EFC; + writel(reg, priv->base + XGMAC_OMR); + } else { + writel(0, priv->base + XGMAC_FLOW_CTRL); + + reg = readl(priv->base + XGMAC_OMR); + reg &= ~XGMAC_OMR_EFC; + writel(reg, priv->base + XGMAC_OMR); + } + + return 0; +} + +static void xgmac_rx_refill(struct xgmac_priv *priv) +{ + struct xgmac_dma_desc *p; + dma_addr_t paddr; + int bufsz = priv->dev->mtu + ETH_HLEN + ETH_FCS_LEN; + + while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) { + int entry = priv->rx_head; + struct sk_buff *skb; + + p = priv->dma_rx + entry; + + if (priv->rx_skbuff[entry] == NULL) { + skb = netdev_alloc_skb_ip_align(priv->dev, bufsz); + if (unlikely(skb == NULL)) + break; + + paddr = dma_map_single(priv->device, skb->data, + priv->dma_buf_sz - NET_IP_ALIGN, + DMA_FROM_DEVICE); + if (dma_mapping_error(priv->device, paddr)) { + dev_kfree_skb_any(skb); + break; + } + priv->rx_skbuff[entry] = skb; + desc_set_buf_addr(p, paddr, priv->dma_buf_sz); + } + + netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n", + priv->rx_head, priv->rx_tail); + + priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ); + desc_set_rx_owner(p); + } +} + +/** + * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings + * @dev: net device structure + * Description: this function initializes the DMA RX/TX descriptors + * and allocates the socket buffers. + */ +static int xgmac_dma_desc_rings_init(struct net_device *dev) +{ + struct xgmac_priv *priv = netdev_priv(dev); + unsigned int bfsize; + + /* Set the Buffer size according to the MTU; + * The total buffer size including any IP offset must be a multiple + * of 8 bytes. + */ + bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8); + + netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize); + + priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ, + GFP_KERNEL); + if (!priv->rx_skbuff) + return -ENOMEM; + + priv->dma_rx = dma_alloc_coherent(priv->device, + DMA_RX_RING_SZ * + sizeof(struct xgmac_dma_desc), + &priv->dma_rx_phy, + GFP_KERNEL); + if (!priv->dma_rx) + goto err_dma_rx; + + priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ, + GFP_KERNEL); + if (!priv->tx_skbuff) + goto err_tx_skb; + + priv->dma_tx = dma_alloc_coherent(priv->device, + DMA_TX_RING_SZ * + sizeof(struct xgmac_dma_desc), + &priv->dma_tx_phy, + GFP_KERNEL); + if (!priv->dma_tx) + goto err_dma_tx; + + netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, " + "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n", + priv->dma_rx, priv->dma_tx, + (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy); + + priv->rx_tail = 0; + priv->rx_head = 0; + priv->dma_buf_sz = bfsize; + desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz); + xgmac_rx_refill(priv); + + priv->tx_tail = 0; + priv->tx_head = 0; + desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ); + + writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR); + writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR); + + return 0; + +err_dma_tx: + kfree(priv->tx_skbuff); +err_tx_skb: + dma_free_coherent(priv->device, + DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc), + priv->dma_rx, priv->dma_rx_phy); +err_dma_rx: + kfree(priv->rx_skbuff); + return -ENOMEM; +} + +static void xgmac_free_rx_skbufs(struct xgmac_priv *priv) +{ + int i; + struct xgmac_dma_desc *p; + + if (!priv->rx_skbuff) + return; + + for (i = 0; i < DMA_RX_RING_SZ; i++) { + struct sk_buff *skb = priv->rx_skbuff[i]; + if (skb == NULL) + continue; + + p = priv->dma_rx + i; + dma_unmap_single(priv->device, desc_get_buf_addr(p), + priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + priv->rx_skbuff[i] = NULL; + } +} + +static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) +{ + int i; + struct xgmac_dma_desc *p; + + if (!priv->tx_skbuff) + return; + + for (i = 0; i < DMA_TX_RING_SZ; i++) { + if (priv->tx_skbuff[i] == NULL) + continue; + + p = priv->dma_tx + i; + if (desc_get_tx_fs(p)) + dma_unmap_single(priv->device, desc_get_buf_addr(p), + desc_get_buf_len(p), DMA_TO_DEVICE); + else + dma_unmap_page(priv->device, desc_get_buf_addr(p), + desc_get_buf_len(p), DMA_TO_DEVICE); + + if (desc_get_tx_ls(p)) + dev_kfree_skb_any(priv->tx_skbuff[i]); + priv->tx_skbuff[i] = NULL; + } +} + +static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv) +{ + /* Release the DMA TX/RX socket buffers */ + xgmac_free_rx_skbufs(priv); + xgmac_free_tx_skbufs(priv); + + /* Free the consistent memory allocated for descriptor rings */ + if (priv->dma_tx) { + dma_free_coherent(priv->device, + DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc), + priv->dma_tx, priv->dma_tx_phy); + priv->dma_tx = NULL; + } + if (priv->dma_rx) { + dma_free_coherent(priv->device, + DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc), + priv->dma_rx, priv->dma_rx_phy); + priv->dma_rx = NULL; + } + kfree(priv->rx_skbuff); + priv->rx_skbuff = NULL; + kfree(priv->tx_skbuff); + priv->tx_skbuff = NULL; +} + +/** + * xgmac_tx: + * @priv: private driver structure + * Description: it reclaims resources after transmission completes. + */ +static void xgmac_tx_complete(struct xgmac_priv *priv) +{ + while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { + unsigned int entry = priv->tx_tail; + struct sk_buff *skb = priv->tx_skbuff[entry]; + struct xgmac_dma_desc *p = priv->dma_tx + entry; + + /* Check if the descriptor is owned by the DMA. */ + if (desc_get_owner(p)) + break; + + netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", + priv->tx_head, priv->tx_tail); + + if (desc_get_tx_fs(p)) + dma_unmap_single(priv->device, desc_get_buf_addr(p), + desc_get_buf_len(p), DMA_TO_DEVICE); + else + dma_unmap_page(priv->device, desc_get_buf_addr(p), + desc_get_buf_len(p), DMA_TO_DEVICE); + + /* Check tx error on the last segment */ + if (desc_get_tx_ls(p)) { + desc_get_tx_status(priv, p); + dev_consume_skb_any(skb); + } + + priv->tx_skbuff[entry] = NULL; + priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); + } + + /* Ensure tx_tail is visible to xgmac_xmit */ + smp_mb(); + if (unlikely(netif_queue_stopped(priv->dev) && + (tx_dma_ring_space(priv) > MAX_SKB_FRAGS))) + netif_wake_queue(priv->dev); +} + +static void xgmac_tx_timeout_work(struct work_struct *work) +{ + u32 reg, value; + struct xgmac_priv *priv = + container_of(work, struct xgmac_priv, tx_timeout_work); + + napi_disable(&priv->napi); + + writel(0, priv->base + XGMAC_DMA_INTR_ENA); + + netif_tx_lock(priv->dev); + + reg = readl(priv->base + XGMAC_DMA_CONTROL); + writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); + do { + value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000; + } while (value && (value != 0x600000)); + + xgmac_free_tx_skbufs(priv); + desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ); + priv->tx_tail = 0; + priv->tx_head = 0; + writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR); + writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); + + writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, + priv->base + XGMAC_DMA_STATUS); + + netif_tx_unlock(priv->dev); + netif_wake_queue(priv->dev); + + napi_enable(&priv->napi); + + /* Enable interrupts */ + writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS); + writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); +} + +static int xgmac_hw_init(struct net_device *dev) +{ + u32 value, ctrl; + int limit; + struct xgmac_priv *priv = netdev_priv(dev); + void __iomem *ioaddr = priv->base; + + /* Save the ctrl register value */ + ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK; + + /* SW reset */ + value = DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + XGMAC_DMA_BUS_MODE); + limit = 15000; + while (limit-- && + (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) + cpu_relax(); + if (limit < 0) + return -EBUSY; + + value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) | + (0x10 << DMA_BUS_MODE_RPBL_SHIFT) | + DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL; + writel(value, ioaddr + XGMAC_DMA_BUS_MODE); + + writel(0, ioaddr + XGMAC_DMA_INTR_ENA); + + /* Mask power mgt interrupt */ + writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); + + /* XGMAC requires AXI bus init. This is a 'magic number' for now */ + writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS); + + ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS | + XGMAC_CONTROL_CAR; + if (dev->features & NETIF_F_RXCSUM) + ctrl |= XGMAC_CONTROL_IPC; + writel(ctrl, ioaddr + XGMAC_CONTROL); + + writel(DMA_CONTROL_OSF, ioaddr + XGMAC_DMA_CONTROL); + + /* Set the HW DMA mode and the COE */ + writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA | + XGMAC_OMR_RTC_256, + ioaddr + XGMAC_OMR); + + /* Reset the MMC counters */ + writel(1, ioaddr + XGMAC_MMC_CTRL); + return 0; +} + +/** + * xgmac_open - open entry point of the driver + * @dev : pointer to the device structure. + * Description: + * This function is the open entry point of the driver. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int xgmac_open(struct net_device *dev) +{ + int ret; + struct xgmac_priv *priv = netdev_priv(dev); + void __iomem *ioaddr = priv->base; + + /* Check that the MAC address is valid. If its not, refuse + * to bring the device up. The user must specify an + * address using the following linux command: + * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */ + if (!is_valid_ether_addr(dev->dev_addr)) { + eth_hw_addr_random(dev); + netdev_dbg(priv->dev, "generated random MAC address %pM\n", + dev->dev_addr); + } + + memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats)); + + /* Initialize the XGMAC and descriptors */ + xgmac_hw_init(dev); + xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); + xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause); + + ret = xgmac_dma_desc_rings_init(dev); + if (ret < 0) + return ret; + + /* Enable the MAC Rx/Tx */ + xgmac_mac_enable(ioaddr); + + napi_enable(&priv->napi); + netif_start_queue(dev); + + /* Enable interrupts */ + writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); + writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); + + return 0; +} + +/** + * xgmac_release - close entry point of the driver + * @dev : device pointer. + * Description: + * This is the stop entry point of the driver. + */ +static int xgmac_stop(struct net_device *dev) +{ + struct xgmac_priv *priv = netdev_priv(dev); + + if (readl(priv->base + XGMAC_DMA_INTR_ENA)) + napi_disable(&priv->napi); + + writel(0, priv->base + XGMAC_DMA_INTR_ENA); + + netif_tx_disable(dev); + + /* Disable the MAC core */ + xgmac_mac_disable(priv->base); + + /* Release and free the Rx/Tx resources */ + xgmac_free_dma_desc_rings(priv); + + return 0; +} + +/** + * xgmac_xmit: + * @skb : the socket buffer + * @dev : device pointer + * Description : Tx entry point of the driver. + */ +static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct xgmac_priv *priv = netdev_priv(dev); + unsigned int entry; + int i; + u32 irq_flag; + int nfrags = skb_shinfo(skb)->nr_frags; + struct xgmac_dma_desc *desc, *first; + unsigned int desc_flags; + unsigned int len; + dma_addr_t paddr; + + priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1); + irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT; + + desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ? + TXDESC_CSUM_ALL : 0; + entry = priv->tx_head; + desc = priv->dma_tx + entry; + first = desc; + + len = skb_headlen(skb); + paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, paddr)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + priv->tx_skbuff[entry] = skb; + desc_set_buf_addr_and_size(desc, paddr, len); + + for (i = 0; i < nfrags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + len = frag->size; + + paddr = skb_frag_dma_map(priv->device, frag, 0, len, + DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, paddr)) + goto dma_err; + + entry = dma_ring_incr(entry, DMA_TX_RING_SZ); + desc = priv->dma_tx + entry; + priv->tx_skbuff[entry] = skb; + + desc_set_buf_addr_and_size(desc, paddr, len); + if (i < (nfrags - 1)) + desc_set_tx_owner(desc, desc_flags); + } + + /* Interrupt on completition only for the latest segment */ + if (desc != first) + desc_set_tx_owner(desc, desc_flags | + TXDESC_LAST_SEG | irq_flag); + else + desc_flags |= TXDESC_LAST_SEG | irq_flag; + + /* Set owner on first desc last to avoid race condition */ + wmb(); + desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG); + + writel(1, priv->base + XGMAC_DMA_TX_POLL); + + priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); + + /* Ensure tx_head update is visible to tx completion */ + smp_mb(); + if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) { + netif_stop_queue(dev); + /* Ensure netif_stop_queue is visible to tx completion */ + smp_mb(); + if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS) + netif_start_queue(dev); + } + return NETDEV_TX_OK; + +dma_err: + entry = priv->tx_head; + for ( ; i > 0; i--) { + entry = dma_ring_incr(entry, DMA_TX_RING_SZ); + desc = priv->dma_tx + entry; + priv->tx_skbuff[entry] = NULL; + dma_unmap_page(priv->device, desc_get_buf_addr(desc), + desc_get_buf_len(desc), DMA_TO_DEVICE); + desc_clear_tx_owner(desc); + } + desc = first; + dma_unmap_single(priv->device, desc_get_buf_addr(desc), + desc_get_buf_len(desc), DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static int xgmac_rx(struct xgmac_priv *priv, int limit) +{ + unsigned int entry; + unsigned int count = 0; + struct xgmac_dma_desc *p; + + while (count < limit) { + int ip_checksum; + struct sk_buff *skb; + int frame_len; + + if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ)) + break; + + entry = priv->rx_tail; + p = priv->dma_rx + entry; + if (desc_get_owner(p)) + break; + + count++; + priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ); + + /* read the status of the incoming frame */ + ip_checksum = desc_get_rx_status(priv, p); + if (ip_checksum < 0) + continue; + + skb = priv->rx_skbuff[entry]; + if (unlikely(!skb)) { + netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n"); + break; + } + priv->rx_skbuff[entry] = NULL; + + frame_len = desc_get_rx_frame_len(p); + netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n", + frame_len, ip_checksum); + + skb_put(skb, frame_len); + dma_unmap_single(priv->device, desc_get_buf_addr(p), + priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); + + skb->protocol = eth_type_trans(skb, priv->dev); + skb->ip_summed = ip_checksum; + if (ip_checksum == CHECKSUM_NONE) + netif_receive_skb(skb); + else + napi_gro_receive(&priv->napi, skb); + } + + xgmac_rx_refill(priv); + + return count; +} + +/** + * xgmac_poll - xgmac poll method (NAPI) + * @napi : pointer to the napi structure. + * @budget : maximum number of packets that the current CPU can receive from + * all interfaces. + * Description : + * This function implements the the reception process. + * Also it runs the TX completion thread + */ +static int xgmac_poll(struct napi_struct *napi, int budget) +{ + struct xgmac_priv *priv = container_of(napi, + struct xgmac_priv, napi); + int work_done = 0; + + xgmac_tx_complete(priv); + work_done = xgmac_rx(priv, budget); + + if (work_done < budget) { + napi_complete(napi); + __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); + } + return work_done; +} + +/** + * xgmac_tx_timeout + * @dev : Pointer to net device structure + * Description: this function is called when a packet transmission fails to + * complete within a reasonable tmrate. The driver will mark the error in the + * netdev structure and arrange for the device to be reset to a sane state + * in order to transmit a new packet. + */ +static void xgmac_tx_timeout(struct net_device *dev) +{ + struct xgmac_priv *priv = netdev_priv(dev); + schedule_work(&priv->tx_timeout_work); +} + +/** + * xgmac_set_rx_mode - entry point for multicast addressing + * @dev : pointer to the device structure + * Description: + * This function is a driver entry point which gets called by the kernel + * whenever multicast addresses must be enabled/disabled. + * Return value: + * void. + */ +static void xgmac_set_rx_mode(struct net_device *dev) +{ + int i; + struct xgmac_priv *priv = netdev_priv(dev); + void __iomem *ioaddr = priv->base; + unsigned int value = 0; + u32 hash_filter[XGMAC_NUM_HASH]; + int reg = 1; + struct netdev_hw_addr *ha; + bool use_hash = false; + + netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n", + netdev_mc_count(dev), netdev_uc_count(dev)); + + if (dev->flags & IFF_PROMISC) + value |= XGMAC_FRAME_FILTER_PR; + + memset(hash_filter, 0, sizeof(hash_filter)); + + if (netdev_uc_count(dev) > priv->max_macs) { + use_hash = true; + value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF; + } + netdev_for_each_uc_addr(ha, dev) { + if (use_hash) { + u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23; + + /* The most significant 4 bits determine the register to + * use (H/L) while the other 5 bits determine the bit + * within the register. */ + hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + } else { + xgmac_set_mac_addr(ioaddr, ha->addr, reg); + reg++; + } + } + + if (dev->flags & IFF_ALLMULTI) { + value |= XGMAC_FRAME_FILTER_PM; + goto out; + } + + if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) { + use_hash = true; + value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; + } else { + use_hash = false; + } + netdev_for_each_mc_addr(ha, dev) { + if (use_hash) { + u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23; + + /* The most significant 4 bits determine the register to + * use (H/L) while the other 5 bits determine the bit + * within the register. */ + hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + } else { + xgmac_set_mac_addr(ioaddr, ha->addr, reg); + reg++; + } + } + +out: + for (i = reg; i <= priv->max_macs; i++) + xgmac_set_mac_addr(ioaddr, NULL, i); + for (i = 0; i < XGMAC_NUM_HASH; i++) + writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); + + writel(value, ioaddr + XGMAC_FRAME_FILTER); +} + +/** + * xgmac_change_mtu - entry point to change MTU size for the device. + * @dev : device pointer. + * @new_mtu : the new MTU size for the device. + * Description: the Maximum Transfer Unit (MTU) is used by the network layer + * to drive packet transmission. Ethernet has an MTU of 1500 octets + * (ETH_DATA_LEN). This value can be changed with ifconfig. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int xgmac_change_mtu(struct net_device *dev, int new_mtu) +{ + struct xgmac_priv *priv = netdev_priv(dev); + int old_mtu; + + if ((new_mtu < 46) || (new_mtu > MAX_MTU)) { + netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU); + return -EINVAL; + } + + old_mtu = dev->mtu; + + /* return early if the buffer sizes will not change */ + if (old_mtu == new_mtu) + return 0; + + /* Stop everything, get ready to change the MTU */ + if (!netif_running(dev)) + return 0; + + /* Bring interface down, change mtu and bring interface back up */ + xgmac_stop(dev); + dev->mtu = new_mtu; + return xgmac_open(dev); +} + +static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id) +{ + u32 intr_status; + struct net_device *dev = (struct net_device *)dev_id; + struct xgmac_priv *priv = netdev_priv(dev); + void __iomem *ioaddr = priv->base; + + intr_status = __raw_readl(ioaddr + XGMAC_INT_STAT); + if (intr_status & XGMAC_INT_STAT_PMT) { + netdev_dbg(priv->dev, "received Magic frame\n"); + /* clear the PMT bits 5 and 6 by reading the PMT */ + readl(ioaddr + XGMAC_PMT); + } + return IRQ_HANDLED; +} + +static irqreturn_t xgmac_interrupt(int irq, void *dev_id) +{ + u32 intr_status; + struct net_device *dev = (struct net_device *)dev_id; + struct xgmac_priv *priv = netdev_priv(dev); + struct xgmac_extra_stats *x = &priv->xstats; + + /* read the status register (CSR5) */ + intr_status = __raw_readl(priv->base + XGMAC_DMA_STATUS); + intr_status &= __raw_readl(priv->base + XGMAC_DMA_INTR_ENA); + __raw_writel(intr_status, priv->base + XGMAC_DMA_STATUS); + + /* It displays the DMA process states (CSR5 register) */ + /* ABNORMAL interrupts */ + if (unlikely(intr_status & DMA_STATUS_AIS)) { + if (intr_status & DMA_STATUS_TJT) { + netdev_err(priv->dev, "transmit jabber\n"); + x->tx_jabber++; + } + if (intr_status & DMA_STATUS_RU) + x->rx_buf_unav++; + if (intr_status & DMA_STATUS_RPS) { + netdev_err(priv->dev, "receive process stopped\n"); + x->rx_process_stopped++; + } + if (intr_status & DMA_STATUS_ETI) { + netdev_err(priv->dev, "transmit early interrupt\n"); + x->tx_early++; + } + if (intr_status & DMA_STATUS_TPS) { + netdev_err(priv->dev, "transmit process stopped\n"); + x->tx_process_stopped++; + schedule_work(&priv->tx_timeout_work); + } + if (intr_status & DMA_STATUS_FBI) { + netdev_err(priv->dev, "fatal bus error\n"); + x->fatal_bus_error++; + } + } + + /* TX/RX NORMAL interrupts */ + if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) { + __raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA); + napi_schedule(&priv->napi); + } + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling receive - used by NETCONSOLE and other diagnostic tools + * to allow network I/O with interrupts disabled. */ +static void xgmac_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + xgmac_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +static struct rtnl_link_stats64 * +xgmac_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *storage) +{ + struct xgmac_priv *priv = netdev_priv(dev); + void __iomem *base = priv->base; + u32 count; + + spin_lock_bh(&priv->stats_lock); + writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL); + + storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO); + storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32; + + storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO); + storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G); + storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR); + storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR); + storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW); + + storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO); + storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32; + + count = readl(base + XGMAC_MMC_TXFRAME_GB_LO); + storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO); + storage->tx_packets = count; + storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW); + + writel(0, base + XGMAC_MMC_CTRL); + spin_unlock_bh(&priv->stats_lock); + return storage; +} + +static int xgmac_set_mac_address(struct net_device *dev, void *p) +{ + struct xgmac_priv *priv = netdev_priv(dev); + void __iomem *ioaddr = priv->base; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0); + + return 0; +} + +static int xgmac_set_features(struct net_device *dev, netdev_features_t features) +{ + u32 ctrl; + struct xgmac_priv *priv = netdev_priv(dev); + void __iomem *ioaddr = priv->base; + netdev_features_t changed = dev->features ^ features; + + if (!(changed & NETIF_F_RXCSUM)) + return 0; + + ctrl = readl(ioaddr + XGMAC_CONTROL); + if (features & NETIF_F_RXCSUM) + ctrl |= XGMAC_CONTROL_IPC; + else + ctrl &= ~XGMAC_CONTROL_IPC; + writel(ctrl, ioaddr + XGMAC_CONTROL); + + return 0; +} + +static const struct net_device_ops xgmac_netdev_ops = { + .ndo_open = xgmac_open, + .ndo_start_xmit = xgmac_xmit, + .ndo_stop = xgmac_stop, + .ndo_change_mtu = xgmac_change_mtu, + .ndo_set_rx_mode = xgmac_set_rx_mode, + .ndo_tx_timeout = xgmac_tx_timeout, + .ndo_get_stats64 = xgmac_get_stats64, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = xgmac_poll_controller, +#endif + .ndo_set_mac_address = xgmac_set_mac_address, + .ndo_set_features = xgmac_set_features, +}; + +static int xgmac_ethtool_getsettings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + cmd->autoneg = 0; + cmd->duplex = DUPLEX_FULL; + ethtool_cmd_speed_set(cmd, 10000); + cmd->supported = 0; + cmd->advertising = 0; + cmd->transceiver = XCVR_INTERNAL; + return 0; +} + +static void xgmac_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct xgmac_priv *priv = netdev_priv(netdev); + + pause->rx_pause = priv->rx_pause; + pause->tx_pause = priv->tx_pause; +} + +static int xgmac_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct xgmac_priv *priv = netdev_priv(netdev); + + if (pause->autoneg) + return -EINVAL; + + return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause); +} + +struct xgmac_stats { + char stat_string[ETH_GSTRING_LEN]; + int stat_offset; + bool is_reg; +}; + +#define XGMAC_STAT(m) \ + { #m, offsetof(struct xgmac_priv, xstats.m), false } +#define XGMAC_HW_STAT(m, reg_offset) \ + { #m, reg_offset, true } + +static const struct xgmac_stats xgmac_gstrings_stats[] = { + XGMAC_STAT(tx_frame_flushed), + XGMAC_STAT(tx_payload_error), + XGMAC_STAT(tx_ip_header_error), + XGMAC_STAT(tx_local_fault), + XGMAC_STAT(tx_remote_fault), + XGMAC_STAT(tx_early), + XGMAC_STAT(tx_process_stopped), + XGMAC_STAT(tx_jabber), + XGMAC_STAT(rx_buf_unav), + XGMAC_STAT(rx_process_stopped), + XGMAC_STAT(rx_payload_error), + XGMAC_STAT(rx_ip_header_error), + XGMAC_STAT(rx_da_filter_fail), + XGMAC_STAT(fatal_bus_error), + XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG), + XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME), + XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME), + XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME), + XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME), +}; +#define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats) + +static void xgmac_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *dummy, + u64 *data) +{ + struct xgmac_priv *priv = netdev_priv(dev); + void *p = priv; + int i; + + for (i = 0; i < XGMAC_STATS_LEN; i++) { + if (xgmac_gstrings_stats[i].is_reg) + *data++ = readl(priv->base + + xgmac_gstrings_stats[i].stat_offset); + else + *data++ = *(u32 *)(p + + xgmac_gstrings_stats[i].stat_offset); + } +} + +static int xgmac_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return XGMAC_STATS_LEN; + default: + return -EINVAL; + } +} + +static void xgmac_get_strings(struct net_device *dev, u32 stringset, + u8 *data) +{ + int i; + u8 *p = data; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < XGMAC_STATS_LEN; i++) { + memcpy(p, xgmac_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + default: + WARN_ON(1); + break; + } +} + +static void xgmac_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + struct xgmac_priv *priv = netdev_priv(dev); + + if (device_can_wakeup(priv->device)) { + wol->supported = WAKE_MAGIC | WAKE_UCAST; + wol->wolopts = priv->wolopts; + } +} + +static int xgmac_set_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + struct xgmac_priv *priv = netdev_priv(dev); + u32 support = WAKE_MAGIC | WAKE_UCAST; + + if (!device_can_wakeup(priv->device)) + return -ENOTSUPP; + + if (wol->wolopts & ~support) + return -EINVAL; + + priv->wolopts = wol->wolopts; + + if (wol->wolopts) { + device_set_wakeup_enable(priv->device, 1); + enable_irq_wake(dev->irq); + } else { + device_set_wakeup_enable(priv->device, 0); + disable_irq_wake(dev->irq); + } + + return 0; +} + +static const struct ethtool_ops xgmac_ethtool_ops = { + .get_settings = xgmac_ethtool_getsettings, + .get_link = ethtool_op_get_link, + .get_pauseparam = xgmac_get_pauseparam, + .set_pauseparam = xgmac_set_pauseparam, + .get_ethtool_stats = xgmac_get_ethtool_stats, + .get_strings = xgmac_get_strings, + .get_wol = xgmac_get_wol, + .set_wol = xgmac_set_wol, + .get_sset_count = xgmac_get_sset_count, +}; + +/** + * xgmac_probe + * @pdev: platform device pointer + * Description: the driver is initialized through platform_device. + */ +static int xgmac_probe(struct platform_device *pdev) +{ + int ret = 0; + struct resource *res; + struct net_device *ndev = NULL; + struct xgmac_priv *priv = NULL; + u32 uid; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + if (!request_mem_region(res->start, resource_size(res), pdev->name)) + return -EBUSY; + + ndev = alloc_etherdev(sizeof(struct xgmac_priv)); + if (!ndev) { + ret = -ENOMEM; + goto err_alloc; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + priv = netdev_priv(ndev); + platform_set_drvdata(pdev, ndev); + ndev->netdev_ops = &xgmac_netdev_ops; + ndev->ethtool_ops = &xgmac_ethtool_ops; + spin_lock_init(&priv->stats_lock); + INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work); + + priv->device = &pdev->dev; + priv->dev = ndev; + priv->rx_pause = 1; + priv->tx_pause = 1; + + priv->base = ioremap(res->start, resource_size(res)); + if (!priv->base) { + netdev_err(ndev, "ioremap failed\n"); + ret = -ENOMEM; + goto err_io; + } + + uid = readl(priv->base + XGMAC_VERSION); + netdev_info(ndev, "h/w version is 0x%x\n", uid); + + /* Figure out how many valid mac address filter registers we have */ + writel(1, priv->base + XGMAC_ADDR_HIGH(31)); + if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1) + priv->max_macs = 31; + else + priv->max_macs = 7; + + writel(0, priv->base + XGMAC_DMA_INTR_ENA); + ndev->irq = platform_get_irq(pdev, 0); + if (ndev->irq == -ENXIO) { + netdev_err(ndev, "No irq resource\n"); + ret = ndev->irq; + goto err_irq; + } + + ret = request_irq(ndev->irq, xgmac_interrupt, 0, + dev_name(&pdev->dev), ndev); + if (ret < 0) { + netdev_err(ndev, "Could not request irq %d - ret %d)\n", + ndev->irq, ret); + goto err_irq; + } + + priv->pmt_irq = platform_get_irq(pdev, 1); + if (priv->pmt_irq == -ENXIO) { + netdev_err(ndev, "No pmt irq resource\n"); + ret = priv->pmt_irq; + goto err_pmt_irq; + } + + ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0, + dev_name(&pdev->dev), ndev); + if (ret < 0) { + netdev_err(ndev, "Could not request irq %d - ret %d)\n", + priv->pmt_irq, ret); + goto err_pmt_irq; + } + + device_set_wakeup_capable(&pdev->dev, 1); + if (device_can_wakeup(priv->device)) + priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ + + ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA; + if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL) + ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM; + ndev->features |= ndev->hw_features; + ndev->priv_flags |= IFF_UNICAST_FLT; + + /* Get the MAC address */ + xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0); + if (!is_valid_ether_addr(ndev->dev_addr)) + netdev_warn(ndev, "MAC address %pM not valid", + ndev->dev_addr); + + netif_napi_add(ndev, &priv->napi, xgmac_poll, 64); + ret = register_netdev(ndev); + if (ret) + goto err_reg; + + return 0; + +err_reg: + netif_napi_del(&priv->napi); + free_irq(priv->pmt_irq, ndev); +err_pmt_irq: + free_irq(ndev->irq, ndev); +err_irq: + iounmap(priv->base); +err_io: + free_netdev(ndev); +err_alloc: + release_mem_region(res->start, resource_size(res)); + return ret; +} + +/** + * xgmac_dvr_remove + * @pdev: platform device pointer + * Description: this function resets the TX/RX processes, disables the MAC RX/TX + * changes the link status, releases the DMA descriptor rings, + * unregisters the MDIO bus and unmaps the allocated memory. + */ +static int xgmac_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct xgmac_priv *priv = netdev_priv(ndev); + struct resource *res; + + xgmac_mac_disable(priv->base); + + /* Free the IRQ lines */ + free_irq(ndev->irq, ndev); + free_irq(priv->pmt_irq, ndev); + + unregister_netdev(ndev); + netif_napi_del(&priv->napi); + + iounmap(priv->base); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + + free_netdev(ndev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode) +{ + unsigned int pmt = 0; + + if (mode & WAKE_MAGIC) + pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN; + if (mode & WAKE_UCAST) + pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; + + writel(pmt, ioaddr + XGMAC_PMT); +} + +static int xgmac_suspend(struct device *dev) +{ + struct net_device *ndev = platform_get_drvdata(to_platform_device(dev)); + struct xgmac_priv *priv = netdev_priv(ndev); + u32 value; + + if (!ndev || !netif_running(ndev)) + return 0; + + netif_device_detach(ndev); + napi_disable(&priv->napi); + writel(0, priv->base + XGMAC_DMA_INTR_ENA); + + if (device_may_wakeup(priv->device)) { + /* Stop TX/RX DMA Only */ + value = readl(priv->base + XGMAC_DMA_CONTROL); + value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR); + writel(value, priv->base + XGMAC_DMA_CONTROL); + + xgmac_pmt(priv->base, priv->wolopts); + } else + xgmac_mac_disable(priv->base); + + return 0; +} + +static int xgmac_resume(struct device *dev) +{ + struct net_device *ndev = platform_get_drvdata(to_platform_device(dev)); + struct xgmac_priv *priv = netdev_priv(ndev); + void __iomem *ioaddr = priv->base; + + if (!netif_running(ndev)) + return 0; + + xgmac_pmt(ioaddr, 0); + + /* Enable the MAC and DMA */ + xgmac_mac_enable(ioaddr); + writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); + writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); + + netif_device_attach(ndev); + napi_enable(&priv->napi); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume); + +static const struct of_device_id xgmac_of_match[] = { + { .compatible = "calxeda,hb-xgmac", }, + {}, +}; +MODULE_DEVICE_TABLE(of, xgmac_of_match); + +static struct platform_driver xgmac_driver = { + .driver = { + .name = "calxedaxgmac", + .of_match_table = xgmac_of_match, + }, + .probe = xgmac_probe, + .remove = xgmac_remove, + .driver.pm = &xgmac_pm_ops, +}; + +module_platform_driver(xgmac_driver); + +MODULE_AUTHOR("Calxeda, Inc."); +MODULE_DESCRIPTION("Calxeda 10G XGMAC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig new file mode 100644 index 000000000..7daa088a9 --- /dev/null +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -0,0 +1,130 @@ +# +# Chelsio device configuration +# + +config NET_VENDOR_CHELSIO + bool "Chelsio devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Chelsio devices. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_CHELSIO + +config CHELSIO_T1 + tristate "Chelsio 10Gb Ethernet support" + depends on PCI + select CRC32 + select MDIO + ---help--- + This driver supports Chelsio gigabit and 10-gigabit + Ethernet cards. More information about adapter features and + performance tuning is in . + + For general information about Chelsio and our products, visit + our website at . + + For customer support, please visit our customer support page at + . + + Please send feedback to . + + To compile this driver as a module, choose M here: the module + will be called cxgb. + +config CHELSIO_T1_1G + bool "Chelsio gigabit Ethernet support" + depends on CHELSIO_T1 + ---help--- + Enables support for Chelsio's gigabit Ethernet PCI cards. If you + are using only 10G cards say 'N' here. + +config CHELSIO_T3 + tristate "Chelsio Communications T3 10Gb Ethernet support" + depends on PCI && INET + select FW_LOADER + select MDIO + ---help--- + This driver supports Chelsio T3-based gigabit and 10Gb Ethernet + adapters. + + For general information about Chelsio and our products, visit + our website at . + + For customer support, please visit our customer support page at + . + + Please send feedback to . + + To compile this driver as a module, choose M here: the module + will be called cxgb3. + +config CHELSIO_T4 + tristate "Chelsio Communications T4/T5 Ethernet support" + depends on PCI && (IPV6 || IPV6=n) + select FW_LOADER + select MDIO + ---help--- + This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet + adapter and T5 based 40Gb Ethernet adapter. + + For general information about Chelsio and our products, visit + our website at . + + For customer support, please visit our customer support page at + . + + Please send feedback to . + + To compile this driver as a module choose M here; the module + will be called cxgb4. + +config CHELSIO_T4_DCB + bool "Data Center Bridging (DCB) Support for Chelsio T4/T5 cards" + default n + depends on CHELSIO_T4 && DCB + ---help--- + Enable DCB support through rtNetlink interface. + Say Y here if you want to enable Data Center Bridging (DCB) support + in the driver. + + If unsure, say N. + +config CHELSIO_T4_FCOE + bool "Fibre Channel over Ethernet (FCoE) Support for Chelsio T5 cards" + default n + depends on CHELSIO_T4 && CHELSIO_T4_DCB && FCOE + ---help--- + Enable FCoE offload features. + Say Y here if you want to enable Fibre Channel over Ethernet (FCoE) support + in the driver. + + If unsure, say N. + +config CHELSIO_T4VF + tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support" + depends on PCI + ---help--- + This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet + adapters and T5 based 40Gb Ethernet adapters with PCI-E SR-IOV Virtual + Functions. + + For general information about Chelsio and our products, visit + our website at . + + For customer support, please visit our customer support page at + . + + Please send feedback to . + + To compile this driver as a module choose M here; the module + will be called cxgb4vf. + +endif # NET_VENDOR_CHELSIO diff --git a/drivers/net/ethernet/chelsio/Makefile b/drivers/net/ethernet/chelsio/Makefile new file mode 100644 index 000000000..390510b5e --- /dev/null +++ b/drivers/net/ethernet/chelsio/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the Chelsio network device drivers. +# + +obj-$(CONFIG_CHELSIO_T1) += cxgb/ +obj-$(CONFIG_CHELSIO_T3) += cxgb3/ +obj-$(CONFIG_CHELSIO_T4) += cxgb4/ +obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf/ diff --git a/drivers/net/ethernet/chelsio/cxgb/Makefile b/drivers/net/ethernet/chelsio/cxgb/Makefile new file mode 100644 index 000000000..57a4b262f --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/Makefile @@ -0,0 +1,9 @@ +# +# Chelsio T1 driver +# + +obj-$(CONFIG_CHELSIO_T1) += cxgb.o + +cxgb-$(CONFIG_CHELSIO_T1_1G) += mv88e1xxx.o vsc7326.o +cxgb-objs := cxgb2.o espi.o tp.o pm3393.o sge.o subr.o \ + mv88x201x.o my3126.o $(cxgb-y) diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h new file mode 100644 index 000000000..53b1f9478 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/common.h @@ -0,0 +1,351 @@ +/***************************************************************************** + * * + * File: common.h * + * $Revision: 1.21 $ * + * $Date: 2005/06/22 00:43:25 $ * + * Description: * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, see . * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#define pr_fmt(fmt) "cxgb: " fmt + +#ifndef _CXGB_COMMON_H_ +#define _CXGB_COMMON_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver" +#define DRV_NAME "cxgb" +#define DRV_VERSION "2.2" + +#define CH_DEVICE(devid, ssid, idx) \ + { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx } + +#define SUPPORTED_PAUSE (1 << 13) +#define SUPPORTED_LOOPBACK (1 << 15) + +#define ADVERTISED_PAUSE (1 << 13) +#define ADVERTISED_ASYM_PAUSE (1 << 14) + +typedef struct adapter adapter_t; + +struct t1_rx_mode { + struct net_device *dev; +}; + +#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC) +#define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI) +#define t1_rx_mode_mc_cnt(rm) (netdev_mc_count(rm->dev)) +#define t1_get_netdev(rm) (rm->dev) + +#define MAX_NPORTS 4 +#define PORT_MASK ((1 << MAX_NPORTS) - 1) +#define NMTUS 8 +#define TCB_SIZE 128 + +#define SPEED_INVALID 0xffff +#define DUPLEX_INVALID 0xff + +enum { + CHBT_BOARD_N110, + CHBT_BOARD_N210, + CHBT_BOARD_7500, + CHBT_BOARD_8000, + CHBT_BOARD_CHT101, + CHBT_BOARD_CHT110, + CHBT_BOARD_CHT210, + CHBT_BOARD_CHT204, + CHBT_BOARD_CHT204V, + CHBT_BOARD_CHT204E, + CHBT_BOARD_CHN204, + CHBT_BOARD_COUGAR, + CHBT_BOARD_6800, + CHBT_BOARD_SIMUL, +}; + +enum { + CHBT_TERM_FPGA, + CHBT_TERM_T1, + CHBT_TERM_T2, + CHBT_TERM_T3 +}; + +enum { + CHBT_MAC_CHELSIO_A, + CHBT_MAC_IXF1010, + CHBT_MAC_PM3393, + CHBT_MAC_VSC7321, + CHBT_MAC_DUMMY +}; + +enum { + CHBT_PHY_88E1041, + CHBT_PHY_88E1111, + CHBT_PHY_88X2010, + CHBT_PHY_XPAK, + CHBT_PHY_MY3126, + CHBT_PHY_8244, + CHBT_PHY_DUMMY +}; + +enum { + PAUSE_RX = 1 << 0, + PAUSE_TX = 1 << 1, + PAUSE_AUTONEG = 1 << 2 +}; + +/* Revisions of T1 chip */ +enum { + TERM_T1A = 0, + TERM_T1B = 1, + TERM_T2 = 3 +}; + +struct sge_params { + unsigned int cmdQ_size[2]; + unsigned int freelQ_size[2]; + unsigned int large_buf_capacity; + unsigned int rx_coalesce_usecs; + unsigned int last_rx_coalesce_raw; + unsigned int default_rx_coalesce_usecs; + unsigned int sample_interval_usecs; + unsigned int coalesce_enable; + unsigned int polling; +}; + +struct chelsio_pci_params { + unsigned short speed; + unsigned char width; + unsigned char is_pcix; +}; + +struct tp_params { + unsigned int pm_size; + unsigned int cm_size; + unsigned int pm_rx_base; + unsigned int pm_tx_base; + unsigned int pm_rx_pg_size; + unsigned int pm_tx_pg_size; + unsigned int pm_rx_num_pgs; + unsigned int pm_tx_num_pgs; + unsigned int rx_coalescing_size; + unsigned int use_5tuple_mode; +}; + +struct mc5_params { + unsigned int mode; /* selects MC5 width */ + unsigned int nservers; /* size of server region */ + unsigned int nroutes; /* size of routing region */ +}; + +/* Default MC5 region sizes */ +#define DEFAULT_SERVER_REGION_LEN 256 +#define DEFAULT_RT_REGION_LEN 1024 + +struct adapter_params { + struct sge_params sge; + struct mc5_params mc5; + struct tp_params tp; + struct chelsio_pci_params pci; + + const struct board_info *brd_info; + + unsigned short mtus[NMTUS]; + unsigned int nports; /* # of ethernet ports */ + unsigned int stats_update_period; + unsigned short chip_revision; + unsigned char chip_version; + unsigned char is_asic; + unsigned char has_msi; +}; + +struct link_config { + unsigned int supported; /* link capabilities */ + unsigned int advertising; /* advertised capabilities */ + unsigned short requested_speed; /* speed user has requested */ + unsigned short speed; /* actual link speed */ + unsigned char requested_duplex; /* duplex user has requested */ + unsigned char duplex; /* actual link duplex */ + unsigned char requested_fc; /* flow control user has requested */ + unsigned char fc; /* actual link flow control */ + unsigned char autoneg; /* autonegotiating? */ +}; + +struct cmac; +struct cphy; + +struct port_info { + struct net_device *dev; + struct cmac *mac; + struct cphy *phy; + struct link_config link_config; + struct net_device_stats netstats; +}; + +struct sge; +struct peespi; + +struct adapter { + u8 __iomem *regs; + struct pci_dev *pdev; + unsigned long registered_device_map; + unsigned long open_device_map; + unsigned long flags; + + const char *name; + int msg_enable; + u32 mmio_len; + + struct work_struct ext_intr_handler_task; + struct adapter_params params; + + /* Terminator modules. */ + struct sge *sge; + struct peespi *espi; + struct petp *tp; + + struct napi_struct napi; + struct port_info port[MAX_NPORTS]; + struct delayed_work stats_update_task; + struct timer_list stats_update_timer; + + spinlock_t tpi_lock; + spinlock_t work_lock; + spinlock_t mac_lock; + + /* guards async operations */ + spinlock_t async_lock ____cacheline_aligned; + u32 slow_intr_mask; + int t1powersave; +}; + +enum { /* adapter flags */ + FULL_INIT_DONE = 1 << 0, +}; + +struct mdio_ops; +struct gmac; +struct gphy; + +struct board_info { + unsigned char board; + unsigned char port_number; + unsigned long caps; + unsigned char chip_term; + unsigned char chip_mac; + unsigned char chip_phy; + unsigned int clock_core; + unsigned int clock_mc3; + unsigned int clock_mc4; + unsigned int espi_nports; + unsigned int clock_elmer0; + unsigned char mdio_mdien; + unsigned char mdio_mdiinv; + unsigned char mdio_mdc; + unsigned char mdio_phybaseaddr; + const struct gmac *gmac; + const struct gphy *gphy; + const struct mdio_ops *mdio_ops; + const char *desc; +}; + +static inline int t1_is_asic(const adapter_t *adapter) +{ + return adapter->params.is_asic; +} + +extern const struct pci_device_id t1_pci_tbl[]; + +static inline int adapter_matches_type(const adapter_t *adapter, + int version, int revision) +{ + return adapter->params.chip_version == version && + adapter->params.chip_revision == revision; +} + +#define t1_is_T1B(adap) adapter_matches_type(adap, CHBT_TERM_T1, TERM_T1B) +#define is_T2(adap) adapter_matches_type(adap, CHBT_TERM_T2, TERM_T2) + +/* Returns true if an adapter supports VLAN acceleration and TSO */ +static inline int vlan_tso_capable(const adapter_t *adapter) +{ + return !t1_is_T1B(adapter); +} + +#define for_each_port(adapter, iter) \ + for (iter = 0; iter < (adapter)->params.nports; ++iter) + +#define board_info(adapter) ((adapter)->params.brd_info) +#define is_10G(adapter) (board_info(adapter)->caps & SUPPORTED_10000baseT_Full) + +static inline unsigned int core_ticks_per_usec(const adapter_t *adap) +{ + return board_info(adap)->clock_core / 1000000; +} + +int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp); +int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value); +int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value); +int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value); + +void t1_interrupts_enable(adapter_t *adapter); +void t1_interrupts_disable(adapter_t *adapter); +void t1_interrupts_clear(adapter_t *adapter); +int t1_elmer0_ext_intr_handler(adapter_t *adapter); +void t1_elmer0_ext_intr(adapter_t *adapter); +int t1_slow_intr_handler(adapter_t *adapter); + +int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); +const struct board_info *t1_get_board_info(unsigned int board_id); +const struct board_info *t1_get_board_info_from_ids(unsigned int devid, + unsigned short ssid); +int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data); +int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, + struct adapter_params *p); +int t1_init_hw_modules(adapter_t *adapter); +int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi); +void t1_free_sw_modules(adapter_t *adapter); +void t1_fatal_err(adapter_t *adapter); +void t1_link_changed(adapter_t *adapter, int port_id); +void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat, + int speed, int duplex, int pause); +#endif /* _CXGB_COMMON_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb/cphy.h b/drivers/net/ethernet/chelsio/cxgb/cphy.h new file mode 100644 index 000000000..a4d2a4c08 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/cphy.h @@ -0,0 +1,174 @@ +/***************************************************************************** + * * + * File: cphy.h * + * $Revision: 1.7 $ * + * $Date: 2005/06/21 18:29:47 $ * + * Description: * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, see . * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#ifndef _CXGB_CPHY_H_ +#define _CXGB_CPHY_H_ + +#include "common.h" + +struct mdio_ops { + void (*init)(adapter_t *adapter, const struct board_info *bi); + int (*read)(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr); + int (*write)(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr, u16 val); + unsigned mode_support; +}; + +/* PHY interrupt types */ +enum { + cphy_cause_link_change = 0x1, + cphy_cause_error = 0x2, + cphy_cause_fifo_error = 0x3 +}; + +enum { + PHY_LINK_UP = 0x1, + PHY_AUTONEG_RDY = 0x2, + PHY_AUTONEG_EN = 0x4 +}; + +struct cphy; + +/* PHY operations */ +struct cphy_ops { + void (*destroy)(struct cphy *); + int (*reset)(struct cphy *, int wait); + + int (*interrupt_enable)(struct cphy *); + int (*interrupt_disable)(struct cphy *); + int (*interrupt_clear)(struct cphy *); + int (*interrupt_handler)(struct cphy *); + + int (*autoneg_enable)(struct cphy *); + int (*autoneg_disable)(struct cphy *); + int (*autoneg_restart)(struct cphy *); + + int (*advertise)(struct cphy *phy, unsigned int advertise_map); + int (*set_loopback)(struct cphy *, int on); + int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex); + int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed, + int *duplex, int *fc); + + u32 mmds; +}; + +/* A PHY instance */ +struct cphy { + int state; /* Link status state machine */ + adapter_t *adapter; /* associated adapter */ + + struct delayed_work phy_update; + + u16 bmsr; + int count; + int act_count; + int act_on; + + u32 elmer_gpo; + + const struct cphy_ops *ops; /* PHY operations */ + struct mdio_if_info mdio; + struct cphy_instance *instance; +}; + +/* Convenience MDIO read/write wrappers */ +static inline int cphy_mdio_read(struct cphy *cphy, int mmd, int reg, + unsigned int *valp) +{ + int rc = cphy->mdio.mdio_read(cphy->mdio.dev, cphy->mdio.prtad, mmd, + reg); + *valp = (rc >= 0) ? rc : -1; + return (rc >= 0) ? 0 : rc; +} + +static inline int cphy_mdio_write(struct cphy *cphy, int mmd, int reg, + unsigned int val) +{ + return cphy->mdio.mdio_write(cphy->mdio.dev, cphy->mdio.prtad, mmd, + reg, val); +} + +static inline int simple_mdio_read(struct cphy *cphy, int reg, + unsigned int *valp) +{ + return cphy_mdio_read(cphy, MDIO_DEVAD_NONE, reg, valp); +} + +static inline int simple_mdio_write(struct cphy *cphy, int reg, + unsigned int val) +{ + return cphy_mdio_write(cphy, MDIO_DEVAD_NONE, reg, val); +} + +/* Convenience initializer */ +static inline void cphy_init(struct cphy *phy, struct net_device *dev, + int phy_addr, struct cphy_ops *phy_ops, + const struct mdio_ops *mdio_ops) +{ + struct adapter *adapter = netdev_priv(dev); + phy->adapter = adapter; + phy->ops = phy_ops; + if (mdio_ops) { + phy->mdio.prtad = phy_addr; + phy->mdio.mmds = phy_ops->mmds; + phy->mdio.mode_support = mdio_ops->mode_support; + phy->mdio.mdio_read = mdio_ops->read; + phy->mdio.mdio_write = mdio_ops->write; + } + phy->mdio.dev = dev; +} + +/* Operations of the PHY-instance factory */ +struct gphy { + /* Construct a PHY instance with the given PHY address */ + struct cphy *(*create)(struct net_device *dev, int phy_addr, + const struct mdio_ops *mdio_ops); + + /* + * Reset the PHY chip. This resets the whole PHY chip, not individual + * ports. + */ + int (*reset)(adapter_t *adapter); +}; + +extern const struct gphy t1_my3126_ops; +extern const struct gphy t1_mv88e1xxx_ops; +extern const struct gphy t1_vsc8244_ops; +extern const struct gphy t1_mv88x201x_ops; + +#endif /* _CXGB_CPHY_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h new file mode 100644 index 000000000..5249686af --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h @@ -0,0 +1,638 @@ +/***************************************************************************** + * * + * File: cpl5_cmd.h * + * $Revision: 1.6 $ * + * $Date: 2005/06/21 18:29:47 $ * + * Description: * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, see . * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#ifndef _CXGB_CPL5_CMD_H_ +#define _CXGB_CPL5_CMD_H_ + +#include + +#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD) +#error "Adjust your defines" +#endif + +enum CPL_opcode { + CPL_PASS_OPEN_REQ = 0x1, + CPL_PASS_OPEN_RPL = 0x2, + CPL_PASS_ESTABLISH = 0x3, + CPL_PASS_ACCEPT_REQ = 0xE, + CPL_PASS_ACCEPT_RPL = 0x4, + CPL_ACT_OPEN_REQ = 0x5, + CPL_ACT_OPEN_RPL = 0x6, + CPL_CLOSE_CON_REQ = 0x7, + CPL_CLOSE_CON_RPL = 0x8, + CPL_CLOSE_LISTSRV_REQ = 0x9, + CPL_CLOSE_LISTSRV_RPL = 0xA, + CPL_ABORT_REQ = 0xB, + CPL_ABORT_RPL = 0xC, + CPL_PEER_CLOSE = 0xD, + CPL_ACT_ESTABLISH = 0x17, + + CPL_GET_TCB = 0x24, + CPL_GET_TCB_RPL = 0x25, + CPL_SET_TCB = 0x26, + CPL_SET_TCB_FIELD = 0x27, + CPL_SET_TCB_RPL = 0x28, + CPL_PCMD = 0x29, + + CPL_PCMD_READ = 0x31, + CPL_PCMD_READ_RPL = 0x32, + + + CPL_RX_DATA = 0xA0, + CPL_RX_DATA_DDP = 0xA1, + CPL_RX_DATA_ACK = 0xA3, + CPL_RX_PKT = 0xAD, + CPL_RX_ISCSI_HDR = 0xAF, + CPL_TX_DATA_ACK = 0xB0, + CPL_TX_DATA = 0xB1, + CPL_TX_PKT = 0xB2, + CPL_TX_PKT_LSO = 0xB6, + + CPL_RTE_DELETE_REQ = 0xC0, + CPL_RTE_DELETE_RPL = 0xC1, + CPL_RTE_WRITE_REQ = 0xC2, + CPL_RTE_WRITE_RPL = 0xD3, + CPL_RTE_READ_REQ = 0xC3, + CPL_RTE_READ_RPL = 0xC4, + CPL_L2T_WRITE_REQ = 0xC5, + CPL_L2T_WRITE_RPL = 0xD4, + CPL_L2T_READ_REQ = 0xC6, + CPL_L2T_READ_RPL = 0xC7, + CPL_SMT_WRITE_REQ = 0xC8, + CPL_SMT_WRITE_RPL = 0xD5, + CPL_SMT_READ_REQ = 0xC9, + CPL_SMT_READ_RPL = 0xCA, + CPL_ARP_MISS_REQ = 0xCD, + CPL_ARP_MISS_RPL = 0xCE, + CPL_MIGRATE_C2T_REQ = 0xDC, + CPL_MIGRATE_C2T_RPL = 0xDD, + CPL_ERROR = 0xD7, + + /* internal: driver -> TOM */ + CPL_MSS_CHANGE = 0xE1 +}; + +#define NUM_CPL_CMDS 256 + +enum CPL_error { + CPL_ERR_NONE = 0, + CPL_ERR_TCAM_PARITY = 1, + CPL_ERR_TCAM_FULL = 3, + CPL_ERR_CONN_RESET = 20, + CPL_ERR_CONN_EXIST = 22, + CPL_ERR_ARP_MISS = 23, + CPL_ERR_BAD_SYN = 24, + CPL_ERR_CONN_TIMEDOUT = 30, + CPL_ERR_XMIT_TIMEDOUT = 31, + CPL_ERR_PERSIST_TIMEDOUT = 32, + CPL_ERR_FINWAIT2_TIMEDOUT = 33, + CPL_ERR_KEEPALIVE_TIMEDOUT = 34, + CPL_ERR_ABORT_FAILED = 42, + CPL_ERR_GENERAL = 99 +}; + +enum { + CPL_CONN_POLICY_AUTO = 0, + CPL_CONN_POLICY_ASK = 1, + CPL_CONN_POLICY_DENY = 3 +}; + +enum { + ULP_MODE_NONE = 0, + ULP_MODE_TCPDDP = 1, + ULP_MODE_ISCSI = 2, + ULP_MODE_IWARP = 3, + ULP_MODE_SSL = 4 +}; + +enum { + CPL_PASS_OPEN_ACCEPT, + CPL_PASS_OPEN_REJECT +}; + +enum { + CPL_ABORT_SEND_RST = 0, + CPL_ABORT_NO_RST, + CPL_ABORT_POST_CLOSE_REQ = 2 +}; + +enum { // TX_PKT_LSO ethernet types + CPL_ETH_II, + CPL_ETH_II_VLAN, + CPL_ETH_802_3, + CPL_ETH_802_3_VLAN +}; + +union opcode_tid { + u32 opcode_tid; + u8 opcode; +}; + +#define S_OPCODE 24 +#define V_OPCODE(x) ((x) << S_OPCODE) +#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF) +#define G_TID(x) ((x) & 0xFFFFFF) + +/* tid is assumed to be 24-bits */ +#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid)) + +#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) + +/* extract the TID from a CPL command */ +#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd)))) + +struct tcp_options { + u16 mss; + u8 wsf; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 rsvd:4; + u8 ecn:1; + u8 sack:1; + u8 tstamp:1; +#else + u8 tstamp:1; + u8 sack:1; + u8 ecn:1; + u8 rsvd:4; +#endif +}; + +struct cpl_pass_open_req { + union opcode_tid ot; + u16 local_port; + u16 peer_port; + u32 local_ip; + u32 peer_ip; + u32 opt0h; + u32 opt0l; + u32 peer_netmask; + u32 opt1; +}; + +struct cpl_pass_open_rpl { + union opcode_tid ot; + u16 local_port; + u16 peer_port; + u32 local_ip; + u32 peer_ip; + u8 resvd[7]; + u8 status; +}; + +struct cpl_pass_establish { + union opcode_tid ot; + u16 local_port; + u16 peer_port; + u32 local_ip; + u32 peer_ip; + u32 tos_tid; + u8 l2t_idx; + u8 rsvd[3]; + u32 snd_isn; + u32 rcv_isn; +}; + +struct cpl_pass_accept_req { + union opcode_tid ot; + u16 local_port; + u16 peer_port; + u32 local_ip; + u32 peer_ip; + u32 tos_tid; + struct tcp_options tcp_options; + u8 dst_mac[6]; + u16 vlan_tag; + u8 src_mac[6]; + u8 rsvd[2]; + u32 rcv_isn; + u32 unknown_tcp_options; +}; + +struct cpl_pass_accept_rpl { + union opcode_tid ot; + u32 rsvd0; + u32 rsvd1; + u32 peer_ip; + u32 opt0h; + union { + u32 opt0l; + struct { + u8 rsvd[3]; + u8 status; + }; + }; +}; + +struct cpl_act_open_req { + union opcode_tid ot; + u16 local_port; + u16 peer_port; + u32 local_ip; + u32 peer_ip; + u32 opt0h; + u32 opt0l; + u32 iff_vlantag; + u32 rsvd; +}; + +struct cpl_act_open_rpl { + union opcode_tid ot; + u16 local_port; + u16 peer_port; + u32 local_ip; + u32 peer_ip; + u32 new_tid; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_act_establish { + union opcode_tid ot; + u16 local_port; + u16 peer_port; + u32 local_ip; + u32 peer_ip; + u32 tos_tid; + u32 rsvd; + u32 snd_isn; + u32 rcv_isn; +}; + +struct cpl_get_tcb { + union opcode_tid ot; + u32 rsvd; +}; + +struct cpl_get_tcb_rpl { + union opcode_tid ot; + u16 len; + u8 rsvd; + u8 status; +}; + +struct cpl_set_tcb { + union opcode_tid ot; + u16 len; + u16 rsvd; +}; + +struct cpl_set_tcb_field { + union opcode_tid ot; + u8 rsvd[3]; + u8 offset; + u32 mask; + u32 val; +}; + +struct cpl_set_tcb_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_pcmd { + union opcode_tid ot; + u16 dlen_in; + u16 dlen_out; + u32 pcmd_parm[2]; +}; + +struct cpl_pcmd_read { + union opcode_tid ot; + u32 rsvd1; + u16 rsvd2; + u32 addr; + u16 len; +}; + +struct cpl_pcmd_read_rpl { + union opcode_tid ot; + u16 len; +}; + +struct cpl_close_con_req { + union opcode_tid ot; + u32 rsvd; +}; + +struct cpl_close_con_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; + u32 snd_nxt; + u32 rcv_nxt; +}; + +struct cpl_close_listserv_req { + union opcode_tid ot; + u32 rsvd; +}; + +struct cpl_close_listserv_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_abort_req { + union opcode_tid ot; + u32 rsvd0; + u8 rsvd1; + u8 cmd; + u8 rsvd2[6]; +}; + +struct cpl_abort_rpl { + union opcode_tid ot; + u32 rsvd0; + u8 rsvd1; + u8 status; + u8 rsvd2[6]; +}; + +struct cpl_peer_close { + union opcode_tid ot; + u32 rsvd; +}; + +struct cpl_tx_data { + union opcode_tid ot; + u32 len; + u32 rsvd0; + u16 urg; + u16 flags; +}; + +struct cpl_tx_data_ack { + union opcode_tid ot; + u32 ack_seq; +}; + +struct cpl_rx_data { + union opcode_tid ot; + u32 len; + u32 seq; + u16 urg; + u8 rsvd; + u8 status; +}; + +struct cpl_rx_data_ack { + union opcode_tid ot; + u32 credit; +}; + +struct cpl_rx_data_ddp { + union opcode_tid ot; + u32 len; + u32 seq; + u32 nxt_seq; + u32 ulp_crc; + u16 ddp_status; + u8 rsvd; + u8 status; +}; + +/* + * We want this header's alignment to be no more stringent than 2-byte aligned. + * All fields are u8 or u16 except for the length. However that field is not + * used so we break it into 2 16-bit parts to easily meet our alignment needs. + */ +struct cpl_tx_pkt { + u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 iff:4; + u8 ip_csum_dis:1; + u8 l4_csum_dis:1; + u8 vlan_valid:1; + u8 rsvd:1; +#else + u8 rsvd:1; + u8 vlan_valid:1; + u8 l4_csum_dis:1; + u8 ip_csum_dis:1; + u8 iff:4; +#endif + u16 vlan; + u16 len_hi; + u16 len_lo; +}; + +struct cpl_tx_pkt_lso { + u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 iff:4; + u8 ip_csum_dis:1; + u8 l4_csum_dis:1; + u8 vlan_valid:1; + u8 :1; +#else + u8 :1; + u8 vlan_valid:1; + u8 l4_csum_dis:1; + u8 ip_csum_dis:1; + u8 iff:4; +#endif + u16 vlan; + __be32 len; + + u8 rsvd[5]; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 tcp_hdr_words:4; + u8 ip_hdr_words:4; +#else + u8 ip_hdr_words:4; + u8 tcp_hdr_words:4; +#endif + __be16 eth_type_mss; +}; + +struct cpl_rx_pkt { + u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 iff:4; + u8 csum_valid:1; + u8 bad_pkt:1; + u8 vlan_valid:1; + u8 rsvd:1; +#else + u8 rsvd:1; + u8 vlan_valid:1; + u8 bad_pkt:1; + u8 csum_valid:1; + u8 iff:4; +#endif + u16 csum; + u16 vlan; + u16 len; +}; + +struct cpl_l2t_write_req { + union opcode_tid ot; + u32 params; + u8 rsvd1[2]; + u8 dst_mac[6]; +}; + +struct cpl_l2t_write_rpl { + union opcode_tid ot; + u8 status; + u8 rsvd[3]; +}; + +struct cpl_l2t_read_req { + union opcode_tid ot; + u8 rsvd[3]; + u8 l2t_idx; +}; + +struct cpl_l2t_read_rpl { + union opcode_tid ot; + u32 params; + u8 rsvd1[2]; + u8 dst_mac[6]; +}; + +struct cpl_smt_write_req { + union opcode_tid ot; + u8 rsvd0; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 rsvd1:1; + u8 mtu_idx:3; + u8 iff:4; +#else + u8 iff:4; + u8 mtu_idx:3; + u8 rsvd1:1; +#endif + u16 rsvd2; + u16 rsvd3; + u8 src_mac1[6]; + u16 rsvd4; + u8 src_mac0[6]; +}; + +struct cpl_smt_write_rpl { + union opcode_tid ot; + u8 status; + u8 rsvd[3]; +}; + +struct cpl_smt_read_req { + union opcode_tid ot; + u8 rsvd0; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 rsvd1:4; + u8 iff:4; +#else + u8 iff:4; + u8 rsvd1:4; +#endif + u16 rsvd2; +}; + +struct cpl_smt_read_rpl { + union opcode_tid ot; + u8 status; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 rsvd1:1; + u8 mtu_idx:3; + u8 rsvd0:4; +#else + u8 rsvd0:4; + u8 mtu_idx:3; + u8 rsvd1:1; +#endif + u16 rsvd2; + u16 rsvd3; + u8 src_mac1[6]; + u16 rsvd4; + u8 src_mac0[6]; +}; + +struct cpl_rte_delete_req { + union opcode_tid ot; + u32 params; +}; + +struct cpl_rte_delete_rpl { + union opcode_tid ot; + u8 status; + u8 rsvd[3]; +}; + +struct cpl_rte_write_req { + union opcode_tid ot; + u32 params; + u32 netmask; + u32 faddr; +}; + +struct cpl_rte_write_rpl { + union opcode_tid ot; + u8 status; + u8 rsvd[3]; +}; + +struct cpl_rte_read_req { + union opcode_tid ot; + u32 params; +}; + +struct cpl_rte_read_rpl { + union opcode_tid ot; + u8 status; + u8 rsvd0[2]; + u8 l2t_idx; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 rsvd1:7; + u8 select:1; +#else + u8 select:1; + u8 rsvd1:7; +#endif + u8 rsvd2[3]; + u32 addr; +}; + +struct cpl_mss_change { + union opcode_tid ot; + u32 mss; +}; + +#endif /* _CXGB_CPL5_CMD_H_ */ + diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c new file mode 100644 index 000000000..f5f1b0b51 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -0,0 +1,1357 @@ +/***************************************************************************** + * * + * File: cxgb2.c * + * $Revision: 1.25 $ * + * $Date: 2005/06/22 00:43:25 $ * + * Description: * + * Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, see . * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#include "common.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cpl5_cmd.h" +#include "regs.h" +#include "gmac.h" +#include "cphy.h" +#include "sge.h" +#include "tp.h" +#include "espi.h" +#include "elmer0.h" + +#include + +static inline void schedule_mac_stats_update(struct adapter *ap, int secs) +{ + schedule_delayed_work(&ap->stats_update_task, secs * HZ); +} + +static inline void cancel_mac_stats_update(struct adapter *ap) +{ + cancel_delayed_work(&ap->stats_update_task); +} + +#define MAX_CMDQ_ENTRIES 16384 +#define MAX_CMDQ1_ENTRIES 1024 +#define MAX_RX_BUFFERS 16384 +#define MAX_RX_JUMBO_BUFFERS 16384 +#define MAX_TX_BUFFERS_HIGH 16384U +#define MAX_TX_BUFFERS_LOW 1536U +#define MAX_TX_BUFFERS 1460U +#define MIN_FL_ENTRIES 32 + +#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ + NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) + +/* + * The EEPROM is actually bigger but only the first few bytes are used so we + * only report those. + */ +#define EEPROM_SIZE 32 + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR("Chelsio Communications"); +MODULE_LICENSE("GPL"); + +static int dflt_msg_enable = DFLT_MSG_ENABLE; + +module_param(dflt_msg_enable, int, 0); +MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap"); + +#define HCLOCK 0x0 +#define LCLOCK 0x1 + +/* T1 cards powersave mode */ +static int t1_clock(struct adapter *adapter, int mode); +static int t1powersave = 1; /* HW default is powersave mode. */ + +module_param(t1powersave, int, 0); +MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode"); + +static int disable_msi = 0; +module_param(disable_msi, int, 0); +MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); + +static const char pci_speed[][4] = { + "33", "66", "100", "133" +}; + +/* + * Setup MAC to receive the types of packets we want. + */ +static void t1_set_rxmode(struct net_device *dev) +{ + struct adapter *adapter = dev->ml_priv; + struct cmac *mac = adapter->port[dev->if_port].mac; + struct t1_rx_mode rm; + + rm.dev = dev; + mac->ops->set_rx_mode(mac, &rm); +} + +static void link_report(struct port_info *p) +{ + if (!netif_carrier_ok(p->dev)) + netdev_info(p->dev, "link down\n"); + else { + const char *s = "10Mbps"; + + switch (p->link_config.speed) { + case SPEED_10000: s = "10Gbps"; break; + case SPEED_1000: s = "1000Mbps"; break; + case SPEED_100: s = "100Mbps"; break; + } + + netdev_info(p->dev, "link up, %s, %s-duplex\n", + s, p->link_config.duplex == DUPLEX_FULL + ? "full" : "half"); + } +} + +void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat, + int speed, int duplex, int pause) +{ + struct port_info *p = &adapter->port[port_id]; + + if (link_stat != netif_carrier_ok(p->dev)) { + if (link_stat) + netif_carrier_on(p->dev); + else + netif_carrier_off(p->dev); + link_report(p); + + /* multi-ports: inform toe */ + if ((speed > 0) && (adapter->params.nports > 1)) { + unsigned int sched_speed = 10; + switch (speed) { + case SPEED_1000: + sched_speed = 1000; + break; + case SPEED_100: + sched_speed = 100; + break; + case SPEED_10: + sched_speed = 10; + break; + } + t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed); + } + } +} + +static void link_start(struct port_info *p) +{ + struct cmac *mac = p->mac; + + mac->ops->reset(mac); + if (mac->ops->macaddress_set) + mac->ops->macaddress_set(mac, p->dev->dev_addr); + t1_set_rxmode(p->dev); + t1_link_start(p->phy, mac, &p->link_config); + mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); +} + +static void enable_hw_csum(struct adapter *adapter) +{ + if (adapter->port[0].dev->hw_features & NETIF_F_TSO) + t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */ + t1_tp_set_tcp_checksum_offload(adapter->tp, 1); +} + +/* + * Things to do upon first use of a card. + * This must run with the rtnl lock held. + */ +static int cxgb_up(struct adapter *adapter) +{ + int err = 0; + + if (!(adapter->flags & FULL_INIT_DONE)) { + err = t1_init_hw_modules(adapter); + if (err) + goto out_err; + + enable_hw_csum(adapter); + adapter->flags |= FULL_INIT_DONE; + } + + t1_interrupts_clear(adapter); + + adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev); + err = request_irq(adapter->pdev->irq, t1_interrupt, + adapter->params.has_msi ? 0 : IRQF_SHARED, + adapter->name, adapter); + if (err) { + if (adapter->params.has_msi) + pci_disable_msi(adapter->pdev); + + goto out_err; + } + + t1_sge_start(adapter->sge); + t1_interrupts_enable(adapter); +out_err: + return err; +} + +/* + * Release resources when all the ports have been stopped. + */ +static void cxgb_down(struct adapter *adapter) +{ + t1_sge_stop(adapter->sge); + t1_interrupts_disable(adapter); + free_irq(adapter->pdev->irq, adapter); + if (adapter->params.has_msi) + pci_disable_msi(adapter->pdev); +} + +static int cxgb_open(struct net_device *dev) +{ + int err; + struct adapter *adapter = dev->ml_priv; + int other_ports = adapter->open_device_map & PORT_MASK; + + napi_enable(&adapter->napi); + if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) { + napi_disable(&adapter->napi); + return err; + } + + __set_bit(dev->if_port, &adapter->open_device_map); + link_start(&adapter->port[dev->if_port]); + netif_start_queue(dev); + if (!other_ports && adapter->params.stats_update_period) + schedule_mac_stats_update(adapter, + adapter->params.stats_update_period); + + t1_vlan_mode(adapter, dev->features); + return 0; +} + +static int cxgb_close(struct net_device *dev) +{ + struct adapter *adapter = dev->ml_priv; + struct port_info *p = &adapter->port[dev->if_port]; + struct cmac *mac = p->mac; + + netif_stop_queue(dev); + napi_disable(&adapter->napi); + mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); + netif_carrier_off(dev); + + clear_bit(dev->if_port, &adapter->open_device_map); + if (adapter->params.stats_update_period && + !(adapter->open_device_map & PORT_MASK)) { + /* Stop statistics accumulation. */ + smp_mb__after_atomic(); + spin_lock(&adapter->work_lock); /* sync with update task */ + spin_unlock(&adapter->work_lock); + cancel_mac_stats_update(adapter); + } + + if (!adapter->open_device_map) + cxgb_down(adapter); + return 0; +} + +static struct net_device_stats *t1_get_stats(struct net_device *dev) +{ + struct adapter *adapter = dev->ml_priv; + struct port_info *p = &adapter->port[dev->if_port]; + struct net_device_stats *ns = &p->netstats; + const struct cmac_statistics *pstats; + + /* Do a full update of the MAC stats */ + pstats = p->mac->ops->statistics_update(p->mac, + MAC_STATS_UPDATE_FULL); + + ns->tx_packets = pstats->TxUnicastFramesOK + + pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK; + + ns->rx_packets = pstats->RxUnicastFramesOK + + pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK; + + ns->tx_bytes = pstats->TxOctetsOK; + ns->rx_bytes = pstats->RxOctetsOK; + + ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors + + pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions; + ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors + + pstats->RxFCSErrors + pstats->RxAlignErrors + + pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors + + pstats->RxSymbolErrors + pstats->RxRuntErrors; + + ns->multicast = pstats->RxMulticastFramesOK; + ns->collisions = pstats->TxTotalCollisions; + + /* detailed rx_errors */ + ns->rx_length_errors = pstats->RxFrameTooLongErrors + + pstats->RxJabberErrors; + ns->rx_over_errors = 0; + ns->rx_crc_errors = pstats->RxFCSErrors; + ns->rx_frame_errors = pstats->RxAlignErrors; + ns->rx_fifo_errors = 0; + ns->rx_missed_errors = 0; + + /* detailed tx_errors */ + ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions; + ns->tx_carrier_errors = 0; + ns->tx_fifo_errors = pstats->TxUnderrun; + ns->tx_heartbeat_errors = 0; + ns->tx_window_errors = pstats->TxLateCollisions; + return ns; +} + +static u32 get_msglevel(struct net_device *dev) +{ + struct adapter *adapter = dev->ml_priv; + + return adapter->msg_enable; +} + +static void set_msglevel(struct net_device *dev, u32 val) +{ + struct adapter *adapter = dev->ml_priv; + + adapter->msg_enable = val; +} + +static const char stats_strings[][ETH_GSTRING_LEN] = { + "TxOctetsOK", + "TxOctetsBad", + "TxUnicastFramesOK", + "TxMulticastFramesOK", + "TxBroadcastFramesOK", + "TxPauseFrames", + "TxFramesWithDeferredXmissions", + "TxLateCollisions", + "TxTotalCollisions", + "TxFramesAbortedDueToXSCollisions", + "TxUnderrun", + "TxLengthErrors", + "TxInternalMACXmitError", + "TxFramesWithExcessiveDeferral", + "TxFCSErrors", + "TxJumboFramesOk", + "TxJumboOctetsOk", + + "RxOctetsOK", + "RxOctetsBad", + "RxUnicastFramesOK", + "RxMulticastFramesOK", + "RxBroadcastFramesOK", + "RxPauseFrames", + "RxFCSErrors", + "RxAlignErrors", + "RxSymbolErrors", + "RxDataErrors", + "RxSequenceErrors", + "RxRuntErrors", + "RxJabberErrors", + "RxInternalMACRcvError", + "RxInRangeLengthErrors", + "RxOutOfRangeLengthField", + "RxFrameTooLongErrors", + "RxJumboFramesOk", + "RxJumboOctetsOk", + + /* Port stats */ + "RxCsumGood", + "TxCsumOffload", + "TxTso", + "RxVlan", + "TxVlan", + "TxNeedHeadroom", + + /* Interrupt stats */ + "rx drops", + "pure_rsps", + "unhandled irqs", + "respQ_empty", + "respQ_overflow", + "freelistQ_empty", + "pkt_too_big", + "pkt_mismatch", + "cmdQ_full0", + "cmdQ_full1", + + "espi_DIP2ParityErr", + "espi_DIP4Err", + "espi_RxDrops", + "espi_TxDrops", + "espi_RxOvfl", + "espi_ParityErr" +}; + +#define T2_REGMAP_SIZE (3 * 1024) + +static int get_regs_len(struct net_device *dev) +{ + return T2_REGMAP_SIZE; +} + +static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct adapter *adapter = dev->ml_priv; + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(adapter->pdev), + sizeof(info->bus_info)); +} + +static int get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(stats_strings); + default: + return -EOPNOTSUPP; + } +} + +static void get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + if (stringset == ETH_SS_STATS) + memcpy(data, stats_strings, sizeof(stats_strings)); +} + +static void get_stats(struct net_device *dev, struct ethtool_stats *stats, + u64 *data) +{ + struct adapter *adapter = dev->ml_priv; + struct cmac *mac = adapter->port[dev->if_port].mac; + const struct cmac_statistics *s; + const struct sge_intr_counts *t; + struct sge_port_stats ss; + + s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); + t = t1_sge_get_intr_counts(adapter->sge); + t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss); + + *data++ = s->TxOctetsOK; + *data++ = s->TxOctetsBad; + *data++ = s->TxUnicastFramesOK; + *data++ = s->TxMulticastFramesOK; + *data++ = s->TxBroadcastFramesOK; + *data++ = s->TxPauseFrames; + *data++ = s->TxFramesWithDeferredXmissions; + *data++ = s->TxLateCollisions; + *data++ = s->TxTotalCollisions; + *data++ = s->TxFramesAbortedDueToXSCollisions; + *data++ = s->TxUnderrun; + *data++ = s->TxLengthErrors; + *data++ = s->TxInternalMACXmitError; + *data++ = s->TxFramesWithExcessiveDeferral; + *data++ = s->TxFCSErrors; + *data++ = s->TxJumboFramesOK; + *data++ = s->TxJumboOctetsOK; + + *data++ = s->RxOctetsOK; + *data++ = s->RxOctetsBad; + *data++ = s->RxUnicastFramesOK; + *data++ = s->RxMulticastFramesOK; + *data++ = s->RxBroadcastFramesOK; + *data++ = s->RxPauseFrames; + *data++ = s->RxFCSErrors; + *data++ = s->RxAlignErrors; + *data++ = s->RxSymbolErrors; + *data++ = s->RxDataErrors; + *data++ = s->RxSequenceErrors; + *data++ = s->RxRuntErrors; + *data++ = s->RxJabberErrors; + *data++ = s->RxInternalMACRcvError; + *data++ = s->RxInRangeLengthErrors; + *data++ = s->RxOutOfRangeLengthField; + *data++ = s->RxFrameTooLongErrors; + *data++ = s->RxJumboFramesOK; + *data++ = s->RxJumboOctetsOK; + + *data++ = ss.rx_cso_good; + *data++ = ss.tx_cso; + *data++ = ss.tx_tso; + *data++ = ss.vlan_xtract; + *data++ = ss.vlan_insert; + *data++ = ss.tx_need_hdrroom; + + *data++ = t->rx_drops; + *data++ = t->pure_rsps; + *data++ = t->unhandled_irqs; + *data++ = t->respQ_empty; + *data++ = t->respQ_overflow; + *data++ = t->freelistQ_empty; + *data++ = t->pkt_too_big; + *data++ = t->pkt_mismatch; + *data++ = t->cmdQ_full[0]; + *data++ = t->cmdQ_full[1]; + + if (adapter->espi) { + const struct espi_intr_counts *e; + + e = t1_espi_get_intr_counts(adapter->espi); + *data++ = e->DIP2_parity_err; + *data++ = e->DIP4_err; + *data++ = e->rx_drops; + *data++ = e->tx_drops; + *data++ = e->rx_ovflw; + *data++ = e->parity_err; + } +} + +static inline void reg_block_dump(struct adapter *ap, void *buf, + unsigned int start, unsigned int end) +{ + u32 *p = buf + start; + + for ( ; start <= end; start += sizeof(u32)) + *p++ = readl(ap->regs + start); +} + +static void get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *buf) +{ + struct adapter *ap = dev->ml_priv; + + /* + * Version scheme: bits 0..9: chip version, bits 10..15: chip revision + */ + regs->version = 2; + + memset(buf, 0, T2_REGMAP_SIZE); + reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER); + reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE); + reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR); + reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT); + reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE); + reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE); + reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT); + reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL); + reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE); + reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD); +} + +static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct adapter *adapter = dev->ml_priv; + struct port_info *p = &adapter->port[dev->if_port]; + + cmd->supported = p->link_config.supported; + cmd->advertising = p->link_config.advertising; + + if (netif_carrier_ok(dev)) { + ethtool_cmd_speed_set(cmd, p->link_config.speed); + cmd->duplex = p->link_config.duplex; + } else { + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->duplex = DUPLEX_UNKNOWN; + } + + cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; + cmd->phy_address = p->phy->mdio.prtad; + cmd->transceiver = XCVR_EXTERNAL; + cmd->autoneg = p->link_config.autoneg; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +static int speed_duplex_to_caps(int speed, int duplex) +{ + int cap = 0; + + switch (speed) { + case SPEED_10: + if (duplex == DUPLEX_FULL) + cap = SUPPORTED_10baseT_Full; + else + cap = SUPPORTED_10baseT_Half; + break; + case SPEED_100: + if (duplex == DUPLEX_FULL) + cap = SUPPORTED_100baseT_Full; + else + cap = SUPPORTED_100baseT_Half; + break; + case SPEED_1000: + if (duplex == DUPLEX_FULL) + cap = SUPPORTED_1000baseT_Full; + else + cap = SUPPORTED_1000baseT_Half; + break; + case SPEED_10000: + if (duplex == DUPLEX_FULL) + cap = SUPPORTED_10000baseT_Full; + } + return cap; +} + +#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \ + ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \ + ADVERTISED_10000baseT_Full) + +static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct adapter *adapter = dev->ml_priv; + struct port_info *p = &adapter->port[dev->if_port]; + struct link_config *lc = &p->link_config; + + if (!(lc->supported & SUPPORTED_Autoneg)) + return -EOPNOTSUPP; /* can't change speed/duplex */ + + if (cmd->autoneg == AUTONEG_DISABLE) { + u32 speed = ethtool_cmd_speed(cmd); + int cap = speed_duplex_to_caps(speed, cmd->duplex); + + if (!(lc->supported & cap) || (speed == SPEED_1000)) + return -EINVAL; + lc->requested_speed = speed; + lc->requested_duplex = cmd->duplex; + lc->advertising = 0; + } else { + cmd->advertising &= ADVERTISED_MASK; + if (cmd->advertising & (cmd->advertising - 1)) + cmd->advertising = lc->supported; + cmd->advertising &= lc->supported; + if (!cmd->advertising) + return -EINVAL; + lc->requested_speed = SPEED_INVALID; + lc->requested_duplex = DUPLEX_INVALID; + lc->advertising = cmd->advertising | ADVERTISED_Autoneg; + } + lc->autoneg = cmd->autoneg; + if (netif_running(dev)) + t1_link_start(p->phy, p->mac, lc); + return 0; +} + +static void get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct adapter *adapter = dev->ml_priv; + struct port_info *p = &adapter->port[dev->if_port]; + + epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0; + epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0; + epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0; +} + +static int set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct adapter *adapter = dev->ml_priv; + struct port_info *p = &adapter->port[dev->if_port]; + struct link_config *lc = &p->link_config; + + if (epause->autoneg == AUTONEG_DISABLE) + lc->requested_fc = 0; + else if (lc->supported & SUPPORTED_Autoneg) + lc->requested_fc = PAUSE_AUTONEG; + else + return -EINVAL; + + if (epause->rx_pause) + lc->requested_fc |= PAUSE_RX; + if (epause->tx_pause) + lc->requested_fc |= PAUSE_TX; + if (lc->autoneg == AUTONEG_ENABLE) { + if (netif_running(dev)) + t1_link_start(p->phy, p->mac, lc); + } else { + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + if (netif_running(dev)) + p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1, + lc->fc); + } + return 0; +} + +static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +{ + struct adapter *adapter = dev->ml_priv; + int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; + + e->rx_max_pending = MAX_RX_BUFFERS; + e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS; + e->tx_max_pending = MAX_CMDQ_ENTRIES; + + e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl]; + e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl]; + e->tx_pending = adapter->params.sge.cmdQ_size[0]; +} + +static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +{ + struct adapter *adapter = dev->ml_priv; + int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; + + if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending || + e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS || + e->tx_pending > MAX_CMDQ_ENTRIES || + e->rx_pending < MIN_FL_ENTRIES || + e->rx_jumbo_pending < MIN_FL_ENTRIES || + e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1)) + return -EINVAL; + + if (adapter->flags & FULL_INIT_DONE) + return -EBUSY; + + adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending; + adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending; + adapter->params.sge.cmdQ_size[0] = e->tx_pending; + adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ? + MAX_CMDQ1_ENTRIES : e->tx_pending; + return 0; +} + +static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + struct adapter *adapter = dev->ml_priv; + + adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs; + adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; + adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; + t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge); + return 0; +} + +static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + struct adapter *adapter = dev->ml_priv; + + c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs; + c->rate_sample_interval = adapter->params.sge.sample_interval_usecs; + c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable; + return 0; +} + +static int get_eeprom_len(struct net_device *dev) +{ + struct adapter *adapter = dev->ml_priv; + + return t1_is_asic(adapter) ? EEPROM_SIZE : 0; +} + +#define EEPROM_MAGIC(ap) \ + (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16)) + +static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, + u8 *data) +{ + int i; + u8 buf[EEPROM_SIZE] __attribute__((aligned(4))); + struct adapter *adapter = dev->ml_priv; + + e->magic = EEPROM_MAGIC(adapter); + for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32)) + t1_seeprom_read(adapter, i, (__le32 *)&buf[i]); + memcpy(data, buf + e->offset, e->len); + return 0; +} + +static const struct ethtool_ops t1_ethtool_ops = { + .get_settings = get_settings, + .set_settings = set_settings, + .get_drvinfo = get_drvinfo, + .get_msglevel = get_msglevel, + .set_msglevel = set_msglevel, + .get_ringparam = get_sge_param, + .set_ringparam = set_sge_param, + .get_coalesce = get_coalesce, + .set_coalesce = set_coalesce, + .get_eeprom_len = get_eeprom_len, + .get_eeprom = get_eeprom, + .get_pauseparam = get_pauseparam, + .set_pauseparam = set_pauseparam, + .get_link = ethtool_op_get_link, + .get_strings = get_strings, + .get_sset_count = get_sset_count, + .get_ethtool_stats = get_stats, + .get_regs_len = get_regs_len, + .get_regs = get_regs, +}; + +static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd) +{ + struct adapter *adapter = dev->ml_priv; + struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio; + + return mdio_mii_ioctl(mdio, if_mii(req), cmd); +} + +static int t1_change_mtu(struct net_device *dev, int new_mtu) +{ + int ret; + struct adapter *adapter = dev->ml_priv; + struct cmac *mac = adapter->port[dev->if_port].mac; + + if (!mac->ops->set_mtu) + return -EOPNOTSUPP; + if (new_mtu < 68) + return -EINVAL; + if ((ret = mac->ops->set_mtu(mac, new_mtu))) + return ret; + dev->mtu = new_mtu; + return 0; +} + +static int t1_set_mac_addr(struct net_device *dev, void *p) +{ + struct adapter *adapter = dev->ml_priv; + struct cmac *mac = adapter->port[dev->if_port].mac; + struct sockaddr *addr = p; + + if (!mac->ops->macaddress_set) + return -EOPNOTSUPP; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + mac->ops->macaddress_set(mac, dev->dev_addr); + return 0; +} + +static netdev_features_t t1_fix_features(struct net_device *dev, + netdev_features_t features) +{ + /* + * Since there is no support for separate rx/tx vlan accel + * enable/disable make sure tx flag is always in same state as rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int t1_set_features(struct net_device *dev, netdev_features_t features) +{ + netdev_features_t changed = dev->features ^ features; + struct adapter *adapter = dev->ml_priv; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + t1_vlan_mode(adapter, features); + + return 0; +} +#ifdef CONFIG_NET_POLL_CONTROLLER +static void t1_netpoll(struct net_device *dev) +{ + unsigned long flags; + struct adapter *adapter = dev->ml_priv; + + local_irq_save(flags); + t1_interrupt(adapter->pdev->irq, adapter); + local_irq_restore(flags); +} +#endif + +/* + * Periodic accumulation of MAC statistics. This is used only if the MAC + * does not have any other way to prevent stats counter overflow. + */ +static void mac_stats_task(struct work_struct *work) +{ + int i; + struct adapter *adapter = + container_of(work, struct adapter, stats_update_task.work); + + for_each_port(adapter, i) { + struct port_info *p = &adapter->port[i]; + + if (netif_running(p->dev)) + p->mac->ops->statistics_update(p->mac, + MAC_STATS_UPDATE_FAST); + } + + /* Schedule the next statistics update if any port is active. */ + spin_lock(&adapter->work_lock); + if (adapter->open_device_map & PORT_MASK) + schedule_mac_stats_update(adapter, + adapter->params.stats_update_period); + spin_unlock(&adapter->work_lock); +} + +/* + * Processes elmer0 external interrupts in process context. + */ +static void ext_intr_task(struct work_struct *work) +{ + struct adapter *adapter = + container_of(work, struct adapter, ext_intr_handler_task); + + t1_elmer0_ext_intr_handler(adapter); + + /* Now reenable external interrupts */ + spin_lock_irq(&adapter->async_lock); + adapter->slow_intr_mask |= F_PL_INTR_EXT; + writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE); + writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, + adapter->regs + A_PL_ENABLE); + spin_unlock_irq(&adapter->async_lock); +} + +/* + * Interrupt-context handler for elmer0 external interrupts. + */ +void t1_elmer0_ext_intr(struct adapter *adapter) +{ + /* + * Schedule a task to handle external interrupts as we require + * a process context. We disable EXT interrupts in the interim + * and let the task reenable them when it's done. + */ + adapter->slow_intr_mask &= ~F_PL_INTR_EXT; + writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, + adapter->regs + A_PL_ENABLE); + schedule_work(&adapter->ext_intr_handler_task); +} + +void t1_fatal_err(struct adapter *adapter) +{ + if (adapter->flags & FULL_INIT_DONE) { + t1_sge_stop(adapter->sge); + t1_interrupts_disable(adapter); + } + pr_alert("%s: encountered fatal error, operation suspended\n", + adapter->name); +} + +static const struct net_device_ops cxgb_netdev_ops = { + .ndo_open = cxgb_open, + .ndo_stop = cxgb_close, + .ndo_start_xmit = t1_start_xmit, + .ndo_get_stats = t1_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = t1_set_rxmode, + .ndo_do_ioctl = t1_ioctl, + .ndo_change_mtu = t1_change_mtu, + .ndo_set_mac_address = t1_set_mac_addr, + .ndo_fix_features = t1_fix_features, + .ndo_set_features = t1_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = t1_netpoll, +#endif +}; + +static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int i, err, pci_using_dac = 0; + unsigned long mmio_start, mmio_len; + const struct board_info *bi; + struct adapter *adapter = NULL; + struct port_info *pi; + + pr_info_once("%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION); + + err = pci_enable_device(pdev); + if (err) + return err; + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + pr_err("%s: cannot find PCI device memory base address\n", + pci_name(pdev)); + err = -ENODEV; + goto out_disable_pdev; + } + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + pr_err("%s: unable to obtain 64-bit DMA for " + "consistent allocations\n", pci_name(pdev)); + err = -ENODEV; + goto out_disable_pdev; + } + + } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { + pr_err("%s: no usable DMA configuration\n", pci_name(pdev)); + goto out_disable_pdev; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev)); + goto out_disable_pdev; + } + + pci_set_master(pdev); + + mmio_start = pci_resource_start(pdev, 0); + mmio_len = pci_resource_len(pdev, 0); + bi = t1_get_board_info(ent->driver_data); + + for (i = 0; i < bi->port_number; ++i) { + struct net_device *netdev; + + netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter)); + if (!netdev) { + err = -ENOMEM; + goto out_free_dev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + if (!adapter) { + adapter = netdev_priv(netdev); + adapter->pdev = pdev; + adapter->port[0].dev = netdev; /* so we don't leak it */ + + adapter->regs = ioremap(mmio_start, mmio_len); + if (!adapter->regs) { + pr_err("%s: cannot map device registers\n", + pci_name(pdev)); + err = -ENOMEM; + goto out_free_dev; + } + + if (t1_get_board_rev(adapter, bi, &adapter->params)) { + err = -ENODEV; /* Can't handle this chip rev */ + goto out_free_dev; + } + + adapter->name = pci_name(pdev); + adapter->msg_enable = dflt_msg_enable; + adapter->mmio_len = mmio_len; + + spin_lock_init(&adapter->tpi_lock); + spin_lock_init(&adapter->work_lock); + spin_lock_init(&adapter->async_lock); + spin_lock_init(&adapter->mac_lock); + + INIT_WORK(&adapter->ext_intr_handler_task, + ext_intr_task); + INIT_DELAYED_WORK(&adapter->stats_update_task, + mac_stats_task); + + pci_set_drvdata(pdev, netdev); + } + + pi = &adapter->port[i]; + pi->dev = netdev; + netif_carrier_off(netdev); + netdev->irq = pdev->irq; + netdev->if_port = i; + netdev->mem_start = mmio_start; + netdev->mem_end = mmio_start + mmio_len - 1; + netdev->ml_priv = adapter; + netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_RXCSUM; + netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_RXCSUM | NETIF_F_LLTX; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + if (vlan_tso_capable(adapter)) { + netdev->features |= + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + + /* T204: disable TSO */ + if (!(is_T2(adapter)) || bi->port_number != 4) { + netdev->hw_features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO; + } + } + + netdev->netdev_ops = &cxgb_netdev_ops; + netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ? + sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt); + + netif_napi_add(netdev, &adapter->napi, t1_poll, 64); + + netdev->ethtool_ops = &t1_ethtool_ops; + } + + if (t1_init_sw_modules(adapter, bi) < 0) { + err = -ENODEV; + goto out_free_dev; + } + + /* + * The card is now ready to go. If any errors occur during device + * registration we do not fail the whole card but rather proceed only + * with the ports we manage to register successfully. However we must + * register at least one net device. + */ + for (i = 0; i < bi->port_number; ++i) { + err = register_netdev(adapter->port[i].dev); + if (err) + pr_warn("%s: cannot register net device %s, skipping\n", + pci_name(pdev), adapter->port[i].dev->name); + else { + /* + * Change the name we use for messages to the name of + * the first successfully registered interface. + */ + if (!adapter->registered_device_map) + adapter->name = adapter->port[i].dev->name; + + __set_bit(i, &adapter->registered_device_map); + } + } + if (!adapter->registered_device_map) { + pr_err("%s: could not register any net devices\n", + pci_name(pdev)); + goto out_release_adapter_res; + } + + pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n", + adapter->name, bi->desc, adapter->params.chip_revision, + adapter->params.pci.is_pcix ? "PCIX" : "PCI", + adapter->params.pci.speed, adapter->params.pci.width); + + /* + * Set the T1B ASIC and memory clocks. + */ + if (t1powersave) + adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */ + else + adapter->t1powersave = HCLOCK; + if (t1_is_T1B(adapter)) + t1_clock(adapter, t1powersave); + + return 0; + +out_release_adapter_res: + t1_free_sw_modules(adapter); +out_free_dev: + if (adapter) { + if (adapter->regs) + iounmap(adapter->regs); + for (i = bi->port_number - 1; i >= 0; --i) + if (adapter->port[i].dev) + free_netdev(adapter->port[i].dev); + } + pci_release_regions(pdev); +out_disable_pdev: + pci_disable_device(pdev); + return err; +} + +static void bit_bang(struct adapter *adapter, int bitdata, int nbits) +{ + int data; + int i; + u32 val; + + enum { + S_CLOCK = 1 << 3, + S_DATA = 1 << 4 + }; + + for (i = (nbits - 1); i > -1; i--) { + + udelay(50); + + data = ((bitdata >> i) & 0x1); + __t1_tpi_read(adapter, A_ELMER0_GPO, &val); + + if (data) + val |= S_DATA; + else + val &= ~S_DATA; + + udelay(50); + + /* Set SCLOCK low */ + val &= ~S_CLOCK; + __t1_tpi_write(adapter, A_ELMER0_GPO, val); + + udelay(50); + + /* Write SCLOCK high */ + val |= S_CLOCK; + __t1_tpi_write(adapter, A_ELMER0_GPO, val); + + } +} + +static int t1_clock(struct adapter *adapter, int mode) +{ + u32 val; + int M_CORE_VAL; + int M_MEM_VAL; + + enum { + M_CORE_BITS = 9, + T_CORE_VAL = 0, + T_CORE_BITS = 2, + N_CORE_VAL = 0, + N_CORE_BITS = 2, + M_MEM_BITS = 9, + T_MEM_VAL = 0, + T_MEM_BITS = 2, + N_MEM_VAL = 0, + N_MEM_BITS = 2, + NP_LOAD = 1 << 17, + S_LOAD_MEM = 1 << 5, + S_LOAD_CORE = 1 << 6, + S_CLOCK = 1 << 3 + }; + + if (!t1_is_T1B(adapter)) + return -ENODEV; /* Can't re-clock this chip. */ + + if (mode & 2) + return 0; /* show current mode. */ + + if ((adapter->t1powersave & 1) == (mode & 1)) + return -EALREADY; /* ASIC already running in mode. */ + + if ((mode & 1) == HCLOCK) { + M_CORE_VAL = 0x14; + M_MEM_VAL = 0x18; + adapter->t1powersave = HCLOCK; /* overclock */ + } else { + M_CORE_VAL = 0xe; + M_MEM_VAL = 0x10; + adapter->t1powersave = LCLOCK; /* underclock */ + } + + /* Don't interrupt this serial stream! */ + spin_lock(&adapter->tpi_lock); + + /* Initialize for ASIC core */ + __t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val |= NP_LOAD; + udelay(50); + __t1_tpi_write(adapter, A_ELMER0_GPO, val); + udelay(50); + __t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val &= ~S_LOAD_CORE; + val &= ~S_CLOCK; + __t1_tpi_write(adapter, A_ELMER0_GPO, val); + udelay(50); + + /* Serial program the ASIC clock synthesizer */ + bit_bang(adapter, T_CORE_VAL, T_CORE_BITS); + bit_bang(adapter, N_CORE_VAL, N_CORE_BITS); + bit_bang(adapter, M_CORE_VAL, M_CORE_BITS); + udelay(50); + + /* Finish ASIC core */ + __t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val |= S_LOAD_CORE; + udelay(50); + __t1_tpi_write(adapter, A_ELMER0_GPO, val); + udelay(50); + __t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val &= ~S_LOAD_CORE; + udelay(50); + __t1_tpi_write(adapter, A_ELMER0_GPO, val); + udelay(50); + + /* Initialize for memory */ + __t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val |= NP_LOAD; + udelay(50); + __t1_tpi_write(adapter, A_ELMER0_GPO, val); + udelay(50); + __t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val &= ~S_LOAD_MEM; + val &= ~S_CLOCK; + udelay(50); + __t1_tpi_write(adapter, A_ELMER0_GPO, val); + udelay(50); + + /* Serial program the memory clock synthesizer */ + bit_bang(adapter, T_MEM_VAL, T_MEM_BITS); + bit_bang(adapter, N_MEM_VAL, N_MEM_BITS); + bit_bang(adapter, M_MEM_VAL, M_MEM_BITS); + udelay(50); + + /* Finish memory */ + __t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val |= S_LOAD_MEM; + udelay(50); + __t1_tpi_write(adapter, A_ELMER0_GPO, val); + udelay(50); + __t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val &= ~S_LOAD_MEM; + udelay(50); + __t1_tpi_write(adapter, A_ELMER0_GPO, val); + + spin_unlock(&adapter->tpi_lock); + + return 0; +} + +static inline void t1_sw_reset(struct pci_dev *pdev) +{ + pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3); + pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0); +} + +static void remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct adapter *adapter = dev->ml_priv; + int i; + + for_each_port(adapter, i) { + if (test_bit(i, &adapter->registered_device_map)) + unregister_netdev(adapter->port[i].dev); + } + + t1_free_sw_modules(adapter); + iounmap(adapter->regs); + + while (--i >= 0) { + if (adapter->port[i].dev) + free_netdev(adapter->port[i].dev); + } + + pci_release_regions(pdev); + pci_disable_device(pdev); + t1_sw_reset(pdev); +} + +static struct pci_driver cxgb_pci_driver = { + .name = DRV_NAME, + .id_table = t1_pci_tbl, + .probe = init_one, + .remove = remove_one, +}; + +module_pci_driver(cxgb_pci_driver); diff --git a/drivers/net/ethernet/chelsio/cxgb/elmer0.h b/drivers/net/ethernet/chelsio/cxgb/elmer0.h new file mode 100644 index 000000000..81526ad36 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/elmer0.h @@ -0,0 +1,157 @@ +/***************************************************************************** + * * + * File: elmer0.h * + * $Revision: 1.6 $ * + * $Date: 2005/06/21 22:49:43 $ * + * Description: * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, see . * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#ifndef _CXGB_ELMER0_H_ +#define _CXGB_ELMER0_H_ + +/* ELMER0 flavors */ +enum { + ELMER0_XC2S300E_6FT256_C, + ELMER0_XC2S100E_6TQ144_C +}; + +/* ELMER0 registers */ +#define A_ELMER0_VERSION 0x100000 +#define A_ELMER0_PHY_CFG 0x100004 +#define A_ELMER0_INT_ENABLE 0x100008 +#define A_ELMER0_INT_CAUSE 0x10000c +#define A_ELMER0_GPI_CFG 0x100010 +#define A_ELMER0_GPI_STAT 0x100014 +#define A_ELMER0_GPO 0x100018 +#define A_ELMER0_PORT0_MI1_CFG 0x400000 + +#define S_MI1_MDI_ENABLE 0 +#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE) +#define F_MI1_MDI_ENABLE V_MI1_MDI_ENABLE(1U) + +#define S_MI1_MDI_INVERT 1 +#define V_MI1_MDI_INVERT(x) ((x) << S_MI1_MDI_INVERT) +#define F_MI1_MDI_INVERT V_MI1_MDI_INVERT(1U) + +#define S_MI1_PREAMBLE_ENABLE 2 +#define V_MI1_PREAMBLE_ENABLE(x) ((x) << S_MI1_PREAMBLE_ENABLE) +#define F_MI1_PREAMBLE_ENABLE V_MI1_PREAMBLE_ENABLE(1U) + +#define S_MI1_SOF 3 +#define M_MI1_SOF 0x3 +#define V_MI1_SOF(x) ((x) << S_MI1_SOF) +#define G_MI1_SOF(x) (((x) >> S_MI1_SOF) & M_MI1_SOF) + +#define S_MI1_CLK_DIV 5 +#define M_MI1_CLK_DIV 0xff +#define V_MI1_CLK_DIV(x) ((x) << S_MI1_CLK_DIV) +#define G_MI1_CLK_DIV(x) (((x) >> S_MI1_CLK_DIV) & M_MI1_CLK_DIV) + +#define A_ELMER0_PORT0_MI1_ADDR 0x400004 + +#define S_MI1_REG_ADDR 0 +#define M_MI1_REG_ADDR 0x1f +#define V_MI1_REG_ADDR(x) ((x) << S_MI1_REG_ADDR) +#define G_MI1_REG_ADDR(x) (((x) >> S_MI1_REG_ADDR) & M_MI1_REG_ADDR) + +#define S_MI1_PHY_ADDR 5 +#define M_MI1_PHY_ADDR 0x1f +#define V_MI1_PHY_ADDR(x) ((x) << S_MI1_PHY_ADDR) +#define G_MI1_PHY_ADDR(x) (((x) >> S_MI1_PHY_ADDR) & M_MI1_PHY_ADDR) + +#define A_ELMER0_PORT0_MI1_DATA 0x400008 + +#define S_MI1_DATA 0 +#define M_MI1_DATA 0xffff +#define V_MI1_DATA(x) ((x) << S_MI1_DATA) +#define G_MI1_DATA(x) (((x) >> S_MI1_DATA) & M_MI1_DATA) + +#define A_ELMER0_PORT0_MI1_OP 0x40000c + +#define S_MI1_OP 0 +#define M_MI1_OP 0x3 +#define V_MI1_OP(x) ((x) << S_MI1_OP) +#define G_MI1_OP(x) (((x) >> S_MI1_OP) & M_MI1_OP) + +#define S_MI1_ADDR_AUTOINC 2 +#define V_MI1_ADDR_AUTOINC(x) ((x) << S_MI1_ADDR_AUTOINC) +#define F_MI1_ADDR_AUTOINC V_MI1_ADDR_AUTOINC(1U) + +#define S_MI1_OP_BUSY 31 +#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY) +#define F_MI1_OP_BUSY V_MI1_OP_BUSY(1U) + +#define A_ELMER0_PORT1_MI1_CFG 0x500000 +#define A_ELMER0_PORT1_MI1_ADDR 0x500004 +#define A_ELMER0_PORT1_MI1_DATA 0x500008 +#define A_ELMER0_PORT1_MI1_OP 0x50000c +#define A_ELMER0_PORT2_MI1_CFG 0x600000 +#define A_ELMER0_PORT2_MI1_ADDR 0x600004 +#define A_ELMER0_PORT2_MI1_DATA 0x600008 +#define A_ELMER0_PORT2_MI1_OP 0x60000c +#define A_ELMER0_PORT3_MI1_CFG 0x700000 +#define A_ELMER0_PORT3_MI1_ADDR 0x700004 +#define A_ELMER0_PORT3_MI1_DATA 0x700008 +#define A_ELMER0_PORT3_MI1_OP 0x70000c + +/* Simple bit definition for GPI and GP0 registers. */ +#define ELMER0_GP_BIT0 0x0001 +#define ELMER0_GP_BIT1 0x0002 +#define ELMER0_GP_BIT2 0x0004 +#define ELMER0_GP_BIT3 0x0008 +#define ELMER0_GP_BIT4 0x0010 +#define ELMER0_GP_BIT5 0x0020 +#define ELMER0_GP_BIT6 0x0040 +#define ELMER0_GP_BIT7 0x0080 +#define ELMER0_GP_BIT8 0x0100 +#define ELMER0_GP_BIT9 0x0200 +#define ELMER0_GP_BIT10 0x0400 +#define ELMER0_GP_BIT11 0x0800 +#define ELMER0_GP_BIT12 0x1000 +#define ELMER0_GP_BIT13 0x2000 +#define ELMER0_GP_BIT14 0x4000 +#define ELMER0_GP_BIT15 0x8000 +#define ELMER0_GP_BIT16 0x10000 +#define ELMER0_GP_BIT17 0x20000 +#define ELMER0_GP_BIT18 0x40000 +#define ELMER0_GP_BIT19 0x80000 + +#define MI1_OP_DIRECT_WRITE 1 +#define MI1_OP_DIRECT_READ 2 + +#define MI1_OP_INDIRECT_ADDRESS 0 +#define MI1_OP_INDIRECT_WRITE 1 +#define MI1_OP_INDIRECT_READ_INC 2 +#define MI1_OP_INDIRECT_READ 3 + +#endif /* _CXGB_ELMER0_H_ */ + diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.c b/drivers/net/ethernet/chelsio/cxgb/espi.c new file mode 100644 index 000000000..3e182eee7 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/espi.c @@ -0,0 +1,372 @@ +/***************************************************************************** + * * + * File: espi.c * + * $Revision: 1.14 $ * + * $Date: 2005/05/14 00:59:32 $ * + * Description: * + * Ethernet SPI functionality. * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, see . * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#include "common.h" +#include "regs.h" +#include "espi.h" + +struct peespi { + adapter_t *adapter; + struct espi_intr_counts intr_cnt; + u32 misc_ctrl; + spinlock_t lock; +}; + +#define ESPI_INTR_MASK (F_DIP4ERR | F_RXDROP | F_TXDROP | F_RXOVERFLOW | \ + F_RAMPARITYERR | F_DIP2PARITYERR) +#define MON_MASK (V_MONITORED_PORT_NUM(3) | F_MONITORED_DIRECTION \ + | F_MONITORED_INTERFACE) + +#define TRICN_CNFG 14 +#define TRICN_CMD_READ 0x11 +#define TRICN_CMD_WRITE 0x21 +#define TRICN_CMD_ATTEMPTS 10 + +static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr, + int ch_addr, int reg_offset, u32 wr_data) +{ + int busy, attempts = TRICN_CMD_ATTEMPTS; + + writel(V_WRITE_DATA(wr_data) | + V_REGISTER_OFFSET(reg_offset) | + V_CHANNEL_ADDR(ch_addr) | V_MODULE_ADDR(module_addr) | + V_BUNDLE_ADDR(bundle_addr) | + V_SPI4_COMMAND(TRICN_CMD_WRITE), + adapter->regs + A_ESPI_CMD_ADDR); + writel(0, adapter->regs + A_ESPI_GOSTAT); + + do { + busy = readl(adapter->regs + A_ESPI_GOSTAT) & F_ESPI_CMD_BUSY; + } while (busy && --attempts); + + if (busy) + pr_err("%s: TRICN write timed out\n", adapter->name); + + return busy; +} + +static int tricn_init(adapter_t *adapter) +{ + int i, sme = 1; + + if (!(readl(adapter->regs + A_ESPI_RX_RESET) & F_RX_CLK_STATUS)) { + pr_err("%s: ESPI clock not ready\n", adapter->name); + return -1; + } + + writel(F_ESPI_RX_CORE_RST, adapter->regs + A_ESPI_RX_RESET); + + if (sme) { + tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81); + tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81); + tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81); + } + for (i = 1; i <= 8; i++) + tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1); + for (i = 1; i <= 2; i++) + tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1); + for (i = 1; i <= 3; i++) + tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1); + tricn_write(adapter, 0, 2, 4, TRICN_CNFG, 0xf1); + tricn_write(adapter, 0, 2, 5, TRICN_CNFG, 0xe1); + tricn_write(adapter, 0, 2, 6, TRICN_CNFG, 0xf1); + tricn_write(adapter, 0, 2, 7, TRICN_CNFG, 0x80); + tricn_write(adapter, 0, 2, 8, TRICN_CNFG, 0xf1); + + writel(F_ESPI_RX_CORE_RST | F_ESPI_RX_LNK_RST, + adapter->regs + A_ESPI_RX_RESET); + + return 0; +} + +void t1_espi_intr_enable(struct peespi *espi) +{ + u32 enable, pl_intr = readl(espi->adapter->regs + A_PL_ENABLE); + + /* + * Cannot enable ESPI interrupts on T1B because HW asserts the + * interrupt incorrectly, namely the driver gets ESPI interrupts + * but no data is actually dropped (can verify this reading the ESPI + * drop registers). Also, once the ESPI interrupt is asserted it + * cannot be cleared (HW bug). + */ + enable = t1_is_T1B(espi->adapter) ? 0 : ESPI_INTR_MASK; + writel(enable, espi->adapter->regs + A_ESPI_INTR_ENABLE); + writel(pl_intr | F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE); +} + +void t1_espi_intr_clear(struct peespi *espi) +{ + readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT); + writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS); + writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE); +} + +void t1_espi_intr_disable(struct peespi *espi) +{ + u32 pl_intr = readl(espi->adapter->regs + A_PL_ENABLE); + + writel(0, espi->adapter->regs + A_ESPI_INTR_ENABLE); + writel(pl_intr & ~F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE); +} + +int t1_espi_intr_handler(struct peespi *espi) +{ + u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS); + + if (status & F_DIP4ERR) + espi->intr_cnt.DIP4_err++; + if (status & F_RXDROP) + espi->intr_cnt.rx_drops++; + if (status & F_TXDROP) + espi->intr_cnt.tx_drops++; + if (status & F_RXOVERFLOW) + espi->intr_cnt.rx_ovflw++; + if (status & F_RAMPARITYERR) + espi->intr_cnt.parity_err++; + if (status & F_DIP2PARITYERR) { + espi->intr_cnt.DIP2_parity_err++; + + /* + * Must read the error count to clear the interrupt + * that it causes. + */ + readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT); + } + + /* + * For T1B we need to write 1 to clear ESPI interrupts. For T2+ we + * write the status as is. + */ + if (status && t1_is_T1B(espi->adapter)) + status = 1; + writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS); + return 0; +} + +const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi) +{ + return &espi->intr_cnt; +} + +static void espi_setup_for_pm3393(adapter_t *adapter) +{ + u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200; + + writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0); + writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN1); + writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2); + writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN3); + writel(0x100, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); + writel(wmark, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); + writel(3, adapter->regs + A_ESPI_CALENDAR_LENGTH); + writel(0x08000008, adapter->regs + A_ESPI_TRAIN); + writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG); +} + +static void espi_setup_for_vsc7321(adapter_t *adapter) +{ + writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0); + writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1); + writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2); + writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); + writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); + writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH); + writel(V_RX_NPORTS(4) | V_TX_NPORTS(4), adapter->regs + A_PORT_CONFIG); + + writel(0x08000008, adapter->regs + A_ESPI_TRAIN); +} + +/* + * Note that T1B requires at least 2 ports for IXF1010 due to a HW bug. + */ +static void espi_setup_for_ixf1010(adapter_t *adapter, int nports) +{ + writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH); + if (nports == 4) { + if (is_T2(adapter)) { + writel(0xf00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); + writel(0x3c0, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); + } else { + writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); + writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); + } + } else { + writel(0x1fff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); + writel(0x7ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); + } + writel(V_RX_NPORTS(nports) | V_TX_NPORTS(nports), adapter->regs + A_PORT_CONFIG); + +} + +int t1_espi_init(struct peespi *espi, int mac_type, int nports) +{ + u32 status_enable_extra = 0; + adapter_t *adapter = espi->adapter; + + /* Disable ESPI training. MACs that can handle it enable it below. */ + writel(0, adapter->regs + A_ESPI_TRAIN); + + if (is_T2(adapter)) { + writel(V_OUT_OF_SYNC_COUNT(4) | + V_DIP2_PARITY_ERR_THRES(3) | + V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL); + writel(nports == 4 ? 0x200040 : 0x1000080, + adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); + } else + writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); + + if (mac_type == CHBT_MAC_PM3393) + espi_setup_for_pm3393(adapter); + else if (mac_type == CHBT_MAC_VSC7321) + espi_setup_for_vsc7321(adapter); + else if (mac_type == CHBT_MAC_IXF1010) { + status_enable_extra = F_INTEL1010MODE; + espi_setup_for_ixf1010(adapter, nports); + } else + return -1; + + writel(status_enable_extra | F_RXSTATUSENABLE, + adapter->regs + A_ESPI_FIFO_STATUS_ENABLE); + + if (is_T2(adapter)) { + tricn_init(adapter); + /* + * Always position the control at the 1st port egress IN + * (sop,eop) counter to reduce PIOs for T/N210 workaround. + */ + espi->misc_ctrl = readl(adapter->regs + A_ESPI_MISC_CONTROL); + espi->misc_ctrl &= ~MON_MASK; + espi->misc_ctrl |= F_MONITORED_DIRECTION; + if (adapter->params.nports == 1) + espi->misc_ctrl |= F_MONITORED_INTERFACE; + writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); + spin_lock_init(&espi->lock); + } + + return 0; +} + +void t1_espi_destroy(struct peespi *espi) +{ + kfree(espi); +} + +struct peespi *t1_espi_create(adapter_t *adapter) +{ + struct peespi *espi = kzalloc(sizeof(*espi), GFP_KERNEL); + + if (espi) + espi->adapter = adapter; + return espi; +} + +#if 0 +void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val) +{ + struct peespi *espi = adapter->espi; + + if (!is_T2(adapter)) + return; + spin_lock(&espi->lock); + espi->misc_ctrl = (val & ~MON_MASK) | + (espi->misc_ctrl & MON_MASK); + writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); + spin_unlock(&espi->lock); +} +#endif /* 0 */ + +u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait) +{ + struct peespi *espi = adapter->espi; + u32 sel; + + if (!is_T2(adapter)) + return 0; + + sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2); + if (!wait) { + if (!spin_trylock(&espi->lock)) + return 0; + } else + spin_lock(&espi->lock); + + if ((sel != (espi->misc_ctrl & MON_MASK))) { + writel(((espi->misc_ctrl & ~MON_MASK) | sel), + adapter->regs + A_ESPI_MISC_CONTROL); + sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3); + writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); + } else + sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3); + spin_unlock(&espi->lock); + return sel; +} + +/* + * This function is for T204 only. + * compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in + * one shot, since there is no per port counter on the out side. + */ +int t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait) +{ + struct peespi *espi = adapter->espi; + u8 i, nport = (u8)adapter->params.nports; + + if (!wait) { + if (!spin_trylock(&espi->lock)) + return -1; + } else + spin_lock(&espi->lock); + + if ((espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION) { + espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) | + F_MONITORED_DIRECTION; + writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); + } + for (i = 0 ; i < nport; i++, valp++) { + if (i) { + writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i), + adapter->regs + A_ESPI_MISC_CONTROL); + } + *valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3); + } + + writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); + spin_unlock(&espi->lock); + return 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.h b/drivers/net/ethernet/chelsio/cxgb/espi.h new file mode 100644 index 000000000..162de5259 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/espi.h @@ -0,0 +1,67 @@ +/***************************************************************************** + * * + * File: espi.h * + * $Revision: 1.7 $ * + * $Date: 2005/06/21 18:29:47 $ * + * Description: * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, see . * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#ifndef _CXGB_ESPI_H_ +#define _CXGB_ESPI_H_ + +#include "common.h" + +struct espi_intr_counts { + unsigned int DIP4_err; + unsigned int rx_drops; + unsigned int tx_drops; + unsigned int rx_ovflw; + unsigned int parity_err; + unsigned int DIP2_parity_err; +}; + +struct peespi; + +struct peespi *t1_espi_create(adapter_t *adapter); +void t1_espi_destroy(struct peespi *espi); +int t1_espi_init(struct peespi *espi, int mac_type, int nports); + +void t1_espi_intr_enable(struct peespi *); +void t1_espi_intr_clear(struct peespi *); +void t1_espi_intr_disable(struct peespi *); +int t1_espi_intr_handler(struct peespi *); +const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi); + +u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait); +int t1_espi_get_mon_t204(adapter_t *, u32 *, u8); + +#endif /* _CXGB_ESPI_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb/fpga_defs.h b/drivers/net/ethernet/chelsio/cxgb/fpga_defs.h new file mode 100644 index 000000000..ccdb2bc9a --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/fpga_defs.h @@ -0,0 +1,232 @@ +/* $Date: 2005/03/07 23:59:05 $ $RCSfile: fpga_defs.h,v $ $Revision: 1.4 $ */ + +/* + * FPGA specific definitions + */ + +#ifndef __CHELSIO_FPGA_DEFS_H__ +#define __CHELSIO_FPGA_DEFS_H__ + +#define FPGA_PCIX_ADDR_VERSION 0xA08 +#define FPGA_PCIX_ADDR_STAT 0xA0C + +/* FPGA master interrupt Cause/Enable bits */ +#define FPGA_PCIX_INTERRUPT_SGE_ERROR 0x1 +#define FPGA_PCIX_INTERRUPT_SGE_DATA 0x2 +#define FPGA_PCIX_INTERRUPT_TP 0x4 +#define FPGA_PCIX_INTERRUPT_MC3 0x8 +#define FPGA_PCIX_INTERRUPT_GMAC 0x10 +#define FPGA_PCIX_INTERRUPT_PCIX 0x20 + +/* TP interrupt register addresses */ +#define FPGA_TP_ADDR_INTERRUPT_ENABLE 0xA10 +#define FPGA_TP_ADDR_INTERRUPT_CAUSE 0xA14 +#define FPGA_TP_ADDR_VERSION 0xA18 + +/* TP interrupt Cause/Enable bits */ +#define FPGA_TP_INTERRUPT_MC4 0x1 +#define FPGA_TP_INTERRUPT_MC5 0x2 + +/* + * PM interrupt register addresses + */ +#define FPGA_MC3_REG_INTRENABLE 0xA20 +#define FPGA_MC3_REG_INTRCAUSE 0xA24 +#define FPGA_MC3_REG_VERSION 0xA28 + +/* + * GMAC interrupt register addresses + */ +#define FPGA_GMAC_ADDR_INTERRUPT_ENABLE 0xA30 +#define FPGA_GMAC_ADDR_INTERRUPT_CAUSE 0xA34 +#define FPGA_GMAC_ADDR_VERSION 0xA38 + +/* GMAC Cause/Enable bits */ +#define FPGA_GMAC_INTERRUPT_PORT0 0x1 +#define FPGA_GMAC_INTERRUPT_PORT1 0x2 +#define FPGA_GMAC_INTERRUPT_PORT2 0x4 +#define FPGA_GMAC_INTERRUPT_PORT3 0x8 + +/* MI0 registers */ +#define A_MI0_CLK 0xb00 + +#define S_MI0_CLK_DIV 0 +#define M_MI0_CLK_DIV 0xff +#define V_MI0_CLK_DIV(x) ((x) << S_MI0_CLK_DIV) +#define G_MI0_CLK_DIV(x) (((x) >> S_MI0_CLK_DIV) & M_MI0_CLK_DIV) + +#define S_MI0_CLK_CNT 8 +#define M_MI0_CLK_CNT 0xff +#define V_MI0_CLK_CNT(x) ((x) << S_MI0_CLK_CNT) +#define G_MI0_CLK_CNT(x) (((x) >> S_MI0_CLK_CNT) & M_MI0_CLK_CNT) + +#define A_MI0_CSR 0xb04 + +#define S_MI0_CSR_POLL 0 +#define V_MI0_CSR_POLL(x) ((x) << S_MI0_CSR_POLL) +#define F_MI0_CSR_POLL V_MI0_CSR_POLL(1U) + +#define S_MI0_PREAMBLE 1 +#define V_MI0_PREAMBLE(x) ((x) << S_MI0_PREAMBLE) +#define F_MI0_PREAMBLE V_MI0_PREAMBLE(1U) + +#define S_MI0_INTR_ENABLE 2 +#define V_MI0_INTR_ENABLE(x) ((x) << S_MI0_INTR_ENABLE) +#define F_MI0_INTR_ENABLE V_MI0_INTR_ENABLE(1U) + +#define S_MI0_BUSY 3 +#define V_MI0_BUSY(x) ((x) << S_MI0_BUSY) +#define F_MI0_BUSY V_MI0_BUSY(1U) + +#define S_MI0_MDIO 4 +#define V_MI0_MDIO(x) ((x) << S_MI0_MDIO) +#define F_MI0_MDIO V_MI0_MDIO(1U) + +#define A_MI0_ADDR 0xb08 + +#define S_MI0_PHY_REG_ADDR 0 +#define M_MI0_PHY_REG_ADDR 0x1f +#define V_MI0_PHY_REG_ADDR(x) ((x) << S_MI0_PHY_REG_ADDR) +#define G_MI0_PHY_REG_ADDR(x) (((x) >> S_MI0_PHY_REG_ADDR) & M_MI0_PHY_REG_ADDR) + +#define S_MI0_PHY_ADDR 5 +#define M_MI0_PHY_ADDR 0x1f +#define V_MI0_PHY_ADDR(x) ((x) << S_MI0_PHY_ADDR) +#define G_MI0_PHY_ADDR(x) (((x) >> S_MI0_PHY_ADDR) & M_MI0_PHY_ADDR) + +#define A_MI0_DATA_EXT 0xb0c +#define A_MI0_DATA_INT 0xb10 + +/* GMAC registers */ +#define A_GMAC_MACID_LO 0x28 +#define A_GMAC_MACID_HI 0x2c +#define A_GMAC_CSR 0x30 + +#define S_INTERFACE 0 +#define M_INTERFACE 0x3 +#define V_INTERFACE(x) ((x) << S_INTERFACE) +#define G_INTERFACE(x) (((x) >> S_INTERFACE) & M_INTERFACE) + +#define S_MAC_TX_ENABLE 2 +#define V_MAC_TX_ENABLE(x) ((x) << S_MAC_TX_ENABLE) +#define F_MAC_TX_ENABLE V_MAC_TX_ENABLE(1U) + +#define S_MAC_RX_ENABLE 3 +#define V_MAC_RX_ENABLE(x) ((x) << S_MAC_RX_ENABLE) +#define F_MAC_RX_ENABLE V_MAC_RX_ENABLE(1U) + +#define S_MAC_LB_ENABLE 4 +#define V_MAC_LB_ENABLE(x) ((x) << S_MAC_LB_ENABLE) +#define F_MAC_LB_ENABLE V_MAC_LB_ENABLE(1U) + +#define S_MAC_SPEED 5 +#define M_MAC_SPEED 0x3 +#define V_MAC_SPEED(x) ((x) << S_MAC_SPEED) +#define G_MAC_SPEED(x) (((x) >> S_MAC_SPEED) & M_MAC_SPEED) + +#define S_MAC_HD_FC_ENABLE 7 +#define V_MAC_HD_FC_ENABLE(x) ((x) << S_MAC_HD_FC_ENABLE) +#define F_MAC_HD_FC_ENABLE V_MAC_HD_FC_ENABLE(1U) + +#define S_MAC_HALF_DUPLEX 8 +#define V_MAC_HALF_DUPLEX(x) ((x) << S_MAC_HALF_DUPLEX) +#define F_MAC_HALF_DUPLEX V_MAC_HALF_DUPLEX(1U) + +#define S_MAC_PROMISC 9 +#define V_MAC_PROMISC(x) ((x) << S_MAC_PROMISC) +#define F_MAC_PROMISC V_MAC_PROMISC(1U) + +#define S_MAC_MC_ENABLE 10 +#define V_MAC_MC_ENABLE(x) ((x) << S_MAC_MC_ENABLE) +#define F_MAC_MC_ENABLE V_MAC_MC_ENABLE(1U) + +#define S_MAC_RESET 11 +#define V_MAC_RESET(x) ((x) << S_MAC_RESET) +#define F_MAC_RESET V_MAC_RESET(1U) + +#define S_MAC_RX_PAUSE_ENABLE 12 +#define V_MAC_RX_PAUSE_ENABLE(x) ((x) << S_MAC_RX_PAUSE_ENABLE) +#define F_MAC_RX_PAUSE_ENABLE V_MAC_RX_PAUSE_ENABLE(1U) + +#define S_MAC_TX_PAUSE_ENABLE 13 +#define V_MAC_TX_PAUSE_ENABLE(x) ((x) << S_MAC_TX_PAUSE_ENABLE) +#define F_MAC_TX_PAUSE_ENABLE V_MAC_TX_PAUSE_ENABLE(1U) + +#define S_MAC_LWM_ENABLE 14 +#define V_MAC_LWM_ENABLE(x) ((x) << S_MAC_LWM_ENABLE) +#define F_MAC_LWM_ENABLE V_MAC_LWM_ENABLE(1U) + +#define S_MAC_MAGIC_PKT_ENABLE 15 +#define V_MAC_MAGIC_PKT_ENABLE(x) ((x) << S_MAC_MAGIC_PKT_ENABLE) +#define F_MAC_MAGIC_PKT_ENABLE V_MAC_MAGIC_PKT_ENABLE(1U) + +#define S_MAC_ISL_ENABLE 16 +#define V_MAC_ISL_ENABLE(x) ((x) << S_MAC_ISL_ENABLE) +#define F_MAC_ISL_ENABLE V_MAC_ISL_ENABLE(1U) + +#define S_MAC_JUMBO_ENABLE 17 +#define V_MAC_JUMBO_ENABLE(x) ((x) << S_MAC_JUMBO_ENABLE) +#define F_MAC_JUMBO_ENABLE V_MAC_JUMBO_ENABLE(1U) + +#define S_MAC_RX_PAD_ENABLE 18 +#define V_MAC_RX_PAD_ENABLE(x) ((x) << S_MAC_RX_PAD_ENABLE) +#define F_MAC_RX_PAD_ENABLE V_MAC_RX_PAD_ENABLE(1U) + +#define S_MAC_RX_CRC_ENABLE 19 +#define V_MAC_RX_CRC_ENABLE(x) ((x) << S_MAC_RX_CRC_ENABLE) +#define F_MAC_RX_CRC_ENABLE V_MAC_RX_CRC_ENABLE(1U) + +#define A_GMAC_IFS 0x34 + +#define S_MAC_IFS2 0 +#define M_MAC_IFS2 0x3f +#define V_MAC_IFS2(x) ((x) << S_MAC_IFS2) +#define G_MAC_IFS2(x) (((x) >> S_MAC_IFS2) & M_MAC_IFS2) + +#define S_MAC_IFS1 8 +#define M_MAC_IFS1 0x7f +#define V_MAC_IFS1(x) ((x) << S_MAC_IFS1) +#define G_MAC_IFS1(x) (((x) >> S_MAC_IFS1) & M_MAC_IFS1) + +#define A_GMAC_JUMBO_FRAME_LEN 0x38 +#define A_GMAC_LNK_DLY 0x3c +#define A_GMAC_PAUSETIME 0x40 +#define A_GMAC_MCAST_LO 0x44 +#define A_GMAC_MCAST_HI 0x48 +#define A_GMAC_MCAST_MASK_LO 0x4c +#define A_GMAC_MCAST_MASK_HI 0x50 +#define A_GMAC_RMT_CNT 0x54 +#define A_GMAC_RMT_DATA 0x58 +#define A_GMAC_BACKOFF_SEED 0x5c +#define A_GMAC_TXF_THRES 0x60 + +#define S_TXF_READ_THRESHOLD 0 +#define M_TXF_READ_THRESHOLD 0xff +#define V_TXF_READ_THRESHOLD(x) ((x) << S_TXF_READ_THRESHOLD) +#define G_TXF_READ_THRESHOLD(x) (((x) >> S_TXF_READ_THRESHOLD) & M_TXF_READ_THRESHOLD) + +#define S_TXF_WRITE_THRESHOLD 16 +#define M_TXF_WRITE_THRESHOLD 0xff +#define V_TXF_WRITE_THRESHOLD(x) ((x) << S_TXF_WRITE_THRESHOLD) +#define G_TXF_WRITE_THRESHOLD(x) (((x) >> S_TXF_WRITE_THRESHOLD) & M_TXF_WRITE_THRESHOLD) + +#define MAC_REG_BASE 0x600 +#define MAC_REG_ADDR(idx, reg) (MAC_REG_BASE + (idx) * 128 + (reg)) + +#define MAC_REG_IDLO(idx) MAC_REG_ADDR(idx, A_GMAC_MACID_LO) +#define MAC_REG_IDHI(idx) MAC_REG_ADDR(idx, A_GMAC_MACID_HI) +#define MAC_REG_CSR(idx) MAC_REG_ADDR(idx, A_GMAC_CSR) +#define MAC_REG_IFS(idx) MAC_REG_ADDR(idx, A_GMAC_IFS) +#define MAC_REG_LARGEFRAMELENGTH(idx) MAC_REG_ADDR(idx, A_GMAC_JUMBO_FRAME_LEN) +#define MAC_REG_LINKDLY(idx) MAC_REG_ADDR(idx, A_GMAC_LNK_DLY) +#define MAC_REG_PAUSETIME(idx) MAC_REG_ADDR(idx, A_GMAC_PAUSETIME) +#define MAC_REG_CASTLO(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_LO) +#define MAC_REG_MCASTHI(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_HI) +#define MAC_REG_CASTMASKLO(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_MASK_LO) +#define MAC_REG_MCASTMASKHI(idx) MAC_REG_ADDR(idx, A_GMAC_MCAST_MASK_HI) +#define MAC_REG_RMCNT(idx) MAC_REG_ADDR(idx, A_GMAC_RMT_CNT) +#define MAC_REG_RMDATA(idx) MAC_REG_ADDR(idx, A_GMAC_RMT_DATA) +#define MAC_REG_GMRANDBACKOFFSEED(idx) MAC_REG_ADDR(idx, A_GMAC_BACKOFF_SEED) +#define MAC_REG_TXFTHRESHOLDS(idx) MAC_REG_ADDR(idx, A_GMAC_TXF_THRES) + +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb/gmac.h b/drivers/net/ethernet/chelsio/cxgb/gmac.h new file mode 100644 index 000000000..dfa77491a --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/gmac.h @@ -0,0 +1,141 @@ +/***************************************************************************** + * * + * File: gmac.h * + * $Revision: 1.6 $ * + * $Date: 2005/06/21 18:29:47 $ * + * Description: * + * Generic MAC functionality. * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, see . * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#ifndef _CXGB_GMAC_H_ +#define _CXGB_GMAC_H_ + +#include "common.h" + +enum { + MAC_STATS_UPDATE_FAST, + MAC_STATS_UPDATE_FULL +}; + +enum { + MAC_DIRECTION_RX = 1, + MAC_DIRECTION_TX = 2 +}; + +struct cmac_statistics { + /* Transmit */ + u64 TxOctetsOK; + u64 TxOctetsBad; + u64 TxUnicastFramesOK; + u64 TxMulticastFramesOK; + u64 TxBroadcastFramesOK; + u64 TxPauseFrames; + u64 TxFramesWithDeferredXmissions; + u64 TxLateCollisions; + u64 TxTotalCollisions; + u64 TxFramesAbortedDueToXSCollisions; + u64 TxUnderrun; + u64 TxLengthErrors; + u64 TxInternalMACXmitError; + u64 TxFramesWithExcessiveDeferral; + u64 TxFCSErrors; + u64 TxJumboFramesOK; + u64 TxJumboOctetsOK; + + /* Receive */ + u64 RxOctetsOK; + u64 RxOctetsBad; + u64 RxUnicastFramesOK; + u64 RxMulticastFramesOK; + u64 RxBroadcastFramesOK; + u64 RxPauseFrames; + u64 RxFCSErrors; + u64 RxAlignErrors; + u64 RxSymbolErrors; + u64 RxDataErrors; + u64 RxSequenceErrors; + u64 RxRuntErrors; + u64 RxJabberErrors; + u64 RxInternalMACRcvError; + u64 RxInRangeLengthErrors; + u64 RxOutOfRangeLengthField; + u64 RxFrameTooLongErrors; + u64 RxJumboFramesOK; + u64 RxJumboOctetsOK; +}; + +struct cmac_ops { + void (*destroy)(struct cmac *); + int (*reset)(struct cmac *); + int (*interrupt_enable)(struct cmac *); + int (*interrupt_disable)(struct cmac *); + int (*interrupt_clear)(struct cmac *); + int (*interrupt_handler)(struct cmac *); + + int (*enable)(struct cmac *, int); + int (*disable)(struct cmac *, int); + + int (*loopback_enable)(struct cmac *); + int (*loopback_disable)(struct cmac *); + + int (*set_mtu)(struct cmac *, int mtu); + int (*set_rx_mode)(struct cmac *, struct t1_rx_mode *rm); + + int (*set_speed_duplex_fc)(struct cmac *, int speed, int duplex, int fc); + int (*get_speed_duplex_fc)(struct cmac *, int *speed, int *duplex, + int *fc); + + const struct cmac_statistics *(*statistics_update)(struct cmac *, int); + + int (*macaddress_get)(struct cmac *, u8 mac_addr[6]); + int (*macaddress_set)(struct cmac *, u8 mac_addr[6]); +}; + +typedef struct _cmac_instance cmac_instance; + +struct cmac { + struct cmac_statistics stats; + adapter_t *adapter; + const struct cmac_ops *ops; + cmac_instance *instance; +}; + +struct gmac { + unsigned int stats_update_period; + struct cmac *(*create)(adapter_t *adapter, int index); + int (*reset)(adapter_t *); +}; + +extern const struct gmac t1_pm3393_ops; +extern const struct gmac t1_vsc7326_ops; + +#endif /* _CXGB_GMAC_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c new file mode 100644 index 000000000..71018a4fd --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c @@ -0,0 +1,397 @@ +/* $Date: 2005/10/24 23:18:13 $ $RCSfile: mv88e1xxx.c,v $ $Revision: 1.49 $ */ +#include "common.h" +#include "mv88e1xxx.h" +#include "cphy.h" +#include "elmer0.h" + +/* MV88E1XXX MDI crossover register values */ +#define CROSSOVER_MDI 0 +#define CROSSOVER_MDIX 1 +#define CROSSOVER_AUTO 3 + +#define INTR_ENABLE_MASK 0x6CA0 + +/* + * Set the bits given by 'bitval' in PHY register 'reg'. + */ +static void mdio_set_bit(struct cphy *cphy, int reg, u32 bitval) +{ + u32 val; + + (void) simple_mdio_read(cphy, reg, &val); + (void) simple_mdio_write(cphy, reg, val | bitval); +} + +/* + * Clear the bits given by 'bitval' in PHY register 'reg'. + */ +static void mdio_clear_bit(struct cphy *cphy, int reg, u32 bitval) +{ + u32 val; + + (void) simple_mdio_read(cphy, reg, &val); + (void) simple_mdio_write(cphy, reg, val & ~bitval); +} + +/* + * NAME: phy_reset + * + * DESC: Reset the given PHY's port. NOTE: This is not a global + * chip reset. + * + * PARAMS: cphy - Pointer to PHY instance data. + * + * RETURN: 0 - Successful reset. + * -1 - Timeout. + */ +static int mv88e1xxx_reset(struct cphy *cphy, int wait) +{ + u32 ctl; + int time_out = 1000; + + mdio_set_bit(cphy, MII_BMCR, BMCR_RESET); + + do { + (void) simple_mdio_read(cphy, MII_BMCR, &ctl); + ctl &= BMCR_RESET; + if (ctl) + udelay(1); + } while (ctl && --time_out); + + return ctl ? -1 : 0; +} + +static int mv88e1xxx_interrupt_enable(struct cphy *cphy) +{ + /* Enable PHY interrupts. */ + (void) simple_mdio_write(cphy, MV88E1XXX_INTERRUPT_ENABLE_REGISTER, + INTR_ENABLE_MASK); + + /* Enable Marvell interrupts through Elmer0. */ + if (t1_is_asic(cphy->adapter)) { + u32 elmer; + + t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); + elmer |= ELMER0_GP_BIT1; + if (is_T2(cphy->adapter)) + elmer |= ELMER0_GP_BIT2 | ELMER0_GP_BIT3 | ELMER0_GP_BIT4; + t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); + } + return 0; +} + +static int mv88e1xxx_interrupt_disable(struct cphy *cphy) +{ + /* Disable all phy interrupts. */ + (void) simple_mdio_write(cphy, MV88E1XXX_INTERRUPT_ENABLE_REGISTER, 0); + + /* Disable Marvell interrupts through Elmer0. */ + if (t1_is_asic(cphy->adapter)) { + u32 elmer; + + t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); + elmer &= ~ELMER0_GP_BIT1; + if (is_T2(cphy->adapter)) + elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4); + t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); + } + return 0; +} + +static int mv88e1xxx_interrupt_clear(struct cphy *cphy) +{ + u32 elmer; + + /* Clear PHY interrupts by reading the register. */ + (void) simple_mdio_read(cphy, + MV88E1XXX_INTERRUPT_STATUS_REGISTER, &elmer); + + /* Clear Marvell interrupts through Elmer0. */ + if (t1_is_asic(cphy->adapter)) { + t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); + elmer |= ELMER0_GP_BIT1; + if (is_T2(cphy->adapter)) + elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; + t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); + } + return 0; +} + +/* + * Set the PHY speed and duplex. This also disables auto-negotiation, except + * for 1Gb/s, where auto-negotiation is mandatory. + */ +static int mv88e1xxx_set_speed_duplex(struct cphy *phy, int speed, int duplex) +{ + u32 ctl; + + (void) simple_mdio_read(phy, MII_BMCR, &ctl); + if (speed >= 0) { + ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); + if (speed == SPEED_100) + ctl |= BMCR_SPEED100; + else if (speed == SPEED_1000) + ctl |= BMCR_SPEED1000; + } + if (duplex >= 0) { + ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE); + if (duplex == DUPLEX_FULL) + ctl |= BMCR_FULLDPLX; + } + if (ctl & BMCR_SPEED1000) /* auto-negotiation required for 1Gb/s */ + ctl |= BMCR_ANENABLE; + (void) simple_mdio_write(phy, MII_BMCR, ctl); + return 0; +} + +static int mv88e1xxx_crossover_set(struct cphy *cphy, int crossover) +{ + u32 data32; + + (void) simple_mdio_read(cphy, + MV88E1XXX_SPECIFIC_CNTRL_REGISTER, &data32); + data32 &= ~V_PSCR_MDI_XOVER_MODE(M_PSCR_MDI_XOVER_MODE); + data32 |= V_PSCR_MDI_XOVER_MODE(crossover); + (void) simple_mdio_write(cphy, + MV88E1XXX_SPECIFIC_CNTRL_REGISTER, data32); + return 0; +} + +static int mv88e1xxx_autoneg_enable(struct cphy *cphy) +{ + u32 ctl; + + (void) mv88e1xxx_crossover_set(cphy, CROSSOVER_AUTO); + + (void) simple_mdio_read(cphy, MII_BMCR, &ctl); + /* restart autoneg for change to take effect */ + ctl |= BMCR_ANENABLE | BMCR_ANRESTART; + (void) simple_mdio_write(cphy, MII_BMCR, ctl); + return 0; +} + +static int mv88e1xxx_autoneg_disable(struct cphy *cphy) +{ + u32 ctl; + + /* + * Crossover *must* be set to manual in order to disable auto-neg. + * The Alaska FAQs document highlights this point. + */ + (void) mv88e1xxx_crossover_set(cphy, CROSSOVER_MDI); + + /* + * Must include autoneg reset when disabling auto-neg. This + * is described in the Alaska FAQ document. + */ + (void) simple_mdio_read(cphy, MII_BMCR, &ctl); + ctl &= ~BMCR_ANENABLE; + (void) simple_mdio_write(cphy, MII_BMCR, ctl | BMCR_ANRESTART); + return 0; +} + +static int mv88e1xxx_autoneg_restart(struct cphy *cphy) +{ + mdio_set_bit(cphy, MII_BMCR, BMCR_ANRESTART); + return 0; +} + +static int mv88e1xxx_advertise(struct cphy *phy, unsigned int advertise_map) +{ + u32 val = 0; + + if (advertise_map & + (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) { + (void) simple_mdio_read(phy, MII_GBCR, &val); + val &= ~(GBCR_ADV_1000HALF | GBCR_ADV_1000FULL); + if (advertise_map & ADVERTISED_1000baseT_Half) + val |= GBCR_ADV_1000HALF; + if (advertise_map & ADVERTISED_1000baseT_Full) + val |= GBCR_ADV_1000FULL; + } + (void) simple_mdio_write(phy, MII_GBCR, val); + + val = 1; + if (advertise_map & ADVERTISED_10baseT_Half) + val |= ADVERTISE_10HALF; + if (advertise_map & ADVERTISED_10baseT_Full) + val |= ADVERTISE_10FULL; + if (advertise_map & ADVERTISED_100baseT_Half) + val |= ADVERTISE_100HALF; + if (advertise_map & ADVERTISED_100baseT_Full) + val |= ADVERTISE_100FULL; + if (advertise_map & ADVERTISED_PAUSE) + val |= ADVERTISE_PAUSE; + if (advertise_map & ADVERTISED_ASYM_PAUSE) + val |= ADVERTISE_PAUSE_ASYM; + (void) simple_mdio_write(phy, MII_ADVERTISE, val); + return 0; +} + +static int mv88e1xxx_set_loopback(struct cphy *cphy, int on) +{ + if (on) + mdio_set_bit(cphy, MII_BMCR, BMCR_LOOPBACK); + else + mdio_clear_bit(cphy, MII_BMCR, BMCR_LOOPBACK); + return 0; +} + +static int mv88e1xxx_get_link_status(struct cphy *cphy, int *link_ok, + int *speed, int *duplex, int *fc) +{ + u32 status; + int sp = -1, dplx = -1, pause = 0; + + (void) simple_mdio_read(cphy, + MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status); + if ((status & V_PSSR_STATUS_RESOLVED) != 0) { + if (status & V_PSSR_RX_PAUSE) + pause |= PAUSE_RX; + if (status & V_PSSR_TX_PAUSE) + pause |= PAUSE_TX; + dplx = (status & V_PSSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; + sp = G_PSSR_SPEED(status); + if (sp == 0) + sp = SPEED_10; + else if (sp == 1) + sp = SPEED_100; + else + sp = SPEED_1000; + } + if (link_ok) + *link_ok = (status & V_PSSR_LINK) != 0; + if (speed) + *speed = sp; + if (duplex) + *duplex = dplx; + if (fc) + *fc = pause; + return 0; +} + +static int mv88e1xxx_downshift_set(struct cphy *cphy, int downshift_enable) +{ + u32 val; + + (void) simple_mdio_read(cphy, + MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER, &val); + + /* + * Set the downshift counter to 2 so we try to establish Gb link + * twice before downshifting. + */ + val &= ~(V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(M_DOWNSHIFT_CNT)); + + if (downshift_enable) + val |= V_DOWNSHIFT_ENABLE | V_DOWNSHIFT_CNT(2); + (void) simple_mdio_write(cphy, + MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER, val); + return 0; +} + +static int mv88e1xxx_interrupt_handler(struct cphy *cphy) +{ + int cphy_cause = 0; + u32 status; + + /* + * Loop until cause reads zero. Need to handle bouncing interrupts. + */ + while (1) { + u32 cause; + + (void) simple_mdio_read(cphy, + MV88E1XXX_INTERRUPT_STATUS_REGISTER, + &cause); + cause &= INTR_ENABLE_MASK; + if (!cause) + break; + + if (cause & MV88E1XXX_INTR_LINK_CHNG) { + (void) simple_mdio_read(cphy, + MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status); + + if (status & MV88E1XXX_INTR_LINK_CHNG) + cphy->state |= PHY_LINK_UP; + else { + cphy->state &= ~PHY_LINK_UP; + if (cphy->state & PHY_AUTONEG_EN) + cphy->state &= ~PHY_AUTONEG_RDY; + cphy_cause |= cphy_cause_link_change; + } + } + + if (cause & MV88E1XXX_INTR_AUTONEG_DONE) + cphy->state |= PHY_AUTONEG_RDY; + + if ((cphy->state & (PHY_LINK_UP | PHY_AUTONEG_RDY)) == + (PHY_LINK_UP | PHY_AUTONEG_RDY)) + cphy_cause |= cphy_cause_link_change; + } + return cphy_cause; +} + +static void mv88e1xxx_destroy(struct cphy *cphy) +{ + kfree(cphy); +} + +static struct cphy_ops mv88e1xxx_ops = { + .destroy = mv88e1xxx_destroy, + .reset = mv88e1xxx_reset, + .interrupt_enable = mv88e1xxx_interrupt_enable, + .interrupt_disable = mv88e1xxx_interrupt_disable, + .interrupt_clear = mv88e1xxx_interrupt_clear, + .interrupt_handler = mv88e1xxx_interrupt_handler, + .autoneg_enable = mv88e1xxx_autoneg_enable, + .autoneg_disable = mv88e1xxx_autoneg_disable, + .autoneg_restart = mv88e1xxx_autoneg_restart, + .advertise = mv88e1xxx_advertise, + .set_loopback = mv88e1xxx_set_loopback, + .set_speed_duplex = mv88e1xxx_set_speed_duplex, + .get_link_status = mv88e1xxx_get_link_status, +}; + +static struct cphy *mv88e1xxx_phy_create(struct net_device *dev, int phy_addr, + const struct mdio_ops *mdio_ops) +{ + struct adapter *adapter = netdev_priv(dev); + struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); + + if (!cphy) + return NULL; + + cphy_init(cphy, dev, phy_addr, &mv88e1xxx_ops, mdio_ops); + + /* Configure particular PHY's to run in a different mode. */ + if ((board_info(adapter)->caps & SUPPORTED_TP) && + board_info(adapter)->chip_phy == CHBT_PHY_88E1111) { + /* + * Configure the PHY transmitter as class A to reduce EMI. + */ + (void) simple_mdio_write(cphy, + MV88E1XXX_EXTENDED_ADDR_REGISTER, 0xB); + (void) simple_mdio_write(cphy, + MV88E1XXX_EXTENDED_REGISTER, 0x8004); + } + (void) mv88e1xxx_downshift_set(cphy, 1); /* Enable downshift */ + + /* LED */ + if (is_T2(adapter)) { + (void) simple_mdio_write(cphy, + MV88E1XXX_LED_CONTROL_REGISTER, 0x1); + } + + return cphy; +} + +static int mv88e1xxx_phy_reset(adapter_t* adapter) +{ + return 0; +} + +const struct gphy t1_mv88e1xxx_ops = { + .create = mv88e1xxx_phy_create, + .reset = mv88e1xxx_phy_reset +}; diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h new file mode 100644 index 000000000..967cc4286 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h @@ -0,0 +1,127 @@ +/* $Date: 2005/03/07 23:59:05 $ $RCSfile: mv88e1xxx.h,v $ $Revision: 1.13 $ */ +#ifndef CHELSIO_MV8E1XXX_H +#define CHELSIO_MV8E1XXX_H + +#ifndef BMCR_SPEED1000 +# define BMCR_SPEED1000 0x40 +#endif + +#ifndef ADVERTISE_PAUSE +# define ADVERTISE_PAUSE 0x400 +#endif +#ifndef ADVERTISE_PAUSE_ASYM +# define ADVERTISE_PAUSE_ASYM 0x800 +#endif + +/* Gigabit MII registers */ +#define MII_GBCR 9 /* 1000Base-T control register */ +#define MII_GBSR 10 /* 1000Base-T status register */ + +/* 1000Base-T control register fields */ +#define GBCR_ADV_1000HALF 0x100 +#define GBCR_ADV_1000FULL 0x200 +#define GBCR_PREFER_MASTER 0x400 +#define GBCR_MANUAL_AS_MASTER 0x800 +#define GBCR_MANUAL_CONFIG_ENABLE 0x1000 + +/* 1000Base-T status register fields */ +#define GBSR_LP_1000HALF 0x400 +#define GBSR_LP_1000FULL 0x800 +#define GBSR_REMOTE_OK 0x1000 +#define GBSR_LOCAL_OK 0x2000 +#define GBSR_LOCAL_MASTER 0x4000 +#define GBSR_MASTER_FAULT 0x8000 + +/* Marvell PHY interrupt status bits. */ +#define MV88E1XXX_INTR_JABBER 0x0001 +#define MV88E1XXX_INTR_POLARITY_CHNG 0x0002 +#define MV88E1XXX_INTR_ENG_DETECT_CHNG 0x0010 +#define MV88E1XXX_INTR_DOWNSHIFT 0x0020 +#define MV88E1XXX_INTR_MDI_XOVER_CHNG 0x0040 +#define MV88E1XXX_INTR_FIFO_OVER_UNDER 0x0080 +#define MV88E1XXX_INTR_FALSE_CARRIER 0x0100 +#define MV88E1XXX_INTR_SYMBOL_ERROR 0x0200 +#define MV88E1XXX_INTR_LINK_CHNG 0x0400 +#define MV88E1XXX_INTR_AUTONEG_DONE 0x0800 +#define MV88E1XXX_INTR_PAGE_RECV 0x1000 +#define MV88E1XXX_INTR_DUPLEX_CHNG 0x2000 +#define MV88E1XXX_INTR_SPEED_CHNG 0x4000 +#define MV88E1XXX_INTR_AUTONEG_ERR 0x8000 + +/* Marvell PHY specific registers. */ +#define MV88E1XXX_SPECIFIC_CNTRL_REGISTER 16 +#define MV88E1XXX_SPECIFIC_STATUS_REGISTER 17 +#define MV88E1XXX_INTERRUPT_ENABLE_REGISTER 18 +#define MV88E1XXX_INTERRUPT_STATUS_REGISTER 19 +#define MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_REGISTER 20 +#define MV88E1XXX_RECV_ERR_CNTR_REGISTER 21 +#define MV88E1XXX_RES_REGISTER 22 +#define MV88E1XXX_GLOBAL_STATUS_REGISTER 23 +#define MV88E1XXX_LED_CONTROL_REGISTER 24 +#define MV88E1XXX_MANUAL_LED_OVERRIDE_REGISTER 25 +#define MV88E1XXX_EXT_PHY_SPECIFIC_CNTRL_2_REGISTER 26 +#define MV88E1XXX_EXT_PHY_SPECIFIC_STATUS_REGISTER 27 +#define MV88E1XXX_VIRTUAL_CABLE_TESTER_REGISTER 28 +#define MV88E1XXX_EXTENDED_ADDR_REGISTER 29 +#define MV88E1XXX_EXTENDED_REGISTER 30 + +/* PHY specific control register fields */ +#define S_PSCR_MDI_XOVER_MODE 5 +#define M_PSCR_MDI_XOVER_MODE 0x3 +#define V_PSCR_MDI_XOVER_MODE(x) ((x) << S_PSCR_MDI_XOVER_MODE) +#define G_PSCR_MDI_XOVER_MODE(x) (((x) >> S_PSCR_MDI_XOVER_MODE) & M_PSCR_MDI_XOVER_MODE) + +/* Extended PHY specific control register fields */ +#define S_DOWNSHIFT_ENABLE 8 +#define V_DOWNSHIFT_ENABLE (1 << S_DOWNSHIFT_ENABLE) + +#define S_DOWNSHIFT_CNT 9 +#define M_DOWNSHIFT_CNT 0x7 +#define V_DOWNSHIFT_CNT(x) ((x) << S_DOWNSHIFT_CNT) +#define G_DOWNSHIFT_CNT(x) (((x) >> S_DOWNSHIFT_CNT) & M_DOWNSHIFT_CNT) + +/* PHY specific status register fields */ +#define S_PSSR_JABBER 0 +#define V_PSSR_JABBER (1 << S_PSSR_JABBER) + +#define S_PSSR_POLARITY 1 +#define V_PSSR_POLARITY (1 << S_PSSR_POLARITY) + +#define S_PSSR_RX_PAUSE 2 +#define V_PSSR_RX_PAUSE (1 << S_PSSR_RX_PAUSE) + +#define S_PSSR_TX_PAUSE 3 +#define V_PSSR_TX_PAUSE (1 << S_PSSR_TX_PAUSE) + +#define S_PSSR_ENERGY_DETECT 4 +#define V_PSSR_ENERGY_DETECT (1 << S_PSSR_ENERGY_DETECT) + +#define S_PSSR_DOWNSHIFT_STATUS 5 +#define V_PSSR_DOWNSHIFT_STATUS (1 << S_PSSR_DOWNSHIFT_STATUS) + +#define S_PSSR_MDI 6 +#define V_PSSR_MDI (1 << S_PSSR_MDI) + +#define S_PSSR_CABLE_LEN 7 +#define M_PSSR_CABLE_LEN 0x7 +#define V_PSSR_CABLE_LEN(x) ((x) << S_PSSR_CABLE_LEN) +#define G_PSSR_CABLE_LEN(x) (((x) >> S_PSSR_CABLE_LEN) & M_PSSR_CABLE_LEN) + +#define S_PSSR_LINK 10 +#define V_PSSR_LINK (1 << S_PSSR_LINK) + +#define S_PSSR_STATUS_RESOLVED 11 +#define V_PSSR_STATUS_RESOLVED (1 << S_PSSR_STATUS_RESOLVED) + +#define S_PSSR_PAGE_RECEIVED 12 +#define V_PSSR_PAGE_RECEIVED (1 << S_PSSR_PAGE_RECEIVED) + +#define S_PSSR_DUPLEX 13 +#define V_PSSR_DUPLEX (1 << S_PSSR_DUPLEX) + +#define S_PSSR_SPEED 14 +#define M_PSSR_SPEED 0x3 +#define V_PSSR_SPEED(x) ((x) << S_PSSR_SPEED) +#define G_PSSR_SPEED(x) (((x) >> S_PSSR_SPEED) & M_PSSR_SPEED) + +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c new file mode 100644 index 000000000..d0cf61155 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c @@ -0,0 +1,259 @@ +/***************************************************************************** + * * + * File: mv88x201x.c * + * $Revision: 1.12 $ * + * $Date: 2005/04/15 19:27:14 $ * + * Description: * + * Marvell PHY (mv88x201x) functionality. * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, see . * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#include "cphy.h" +#include "elmer0.h" + +/* + * The 88x2010 Rev C. requires some link status registers * to be read + * twice in order to get the right values. Future * revisions will fix + * this problem and then this macro * can disappear. + */ +#define MV88x2010_LINK_STATUS_BUGS 1 + +static int led_init(struct cphy *cphy) +{ + /* Setup the LED registers so we can turn on/off. + * Writing these bits maps control to another + * register. mmd(0x1) addr(0x7) + */ + cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8304, 0xdddd); + return 0; +} + +static int led_link(struct cphy *cphy, u32 do_enable) +{ + u32 led = 0; +#define LINK_ENABLE_BIT 0x1 + + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, &led); + + if (do_enable & LINK_ENABLE_BIT) { + led |= LINK_ENABLE_BIT; + cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led); + } else { + led &= ~LINK_ENABLE_BIT; + cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led); + } + return 0; +} + +/* Port Reset */ +static int mv88x201x_reset(struct cphy *cphy, int wait) +{ + /* This can be done through registers. It is not required since + * a full chip reset is used. + */ + return 0; +} + +static int mv88x201x_interrupt_enable(struct cphy *cphy) +{ + /* Enable PHY LASI interrupts. */ + cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, + MDIO_PMA_LASI_LSALARM); + + /* Enable Marvell interrupts through Elmer0. */ + if (t1_is_asic(cphy->adapter)) { + u32 elmer; + + t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); + elmer |= ELMER0_GP_BIT6; + t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); + } + return 0; +} + +static int mv88x201x_interrupt_disable(struct cphy *cphy) +{ + /* Disable PHY LASI interrupts. */ + cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0x0); + + /* Disable Marvell interrupts through Elmer0. */ + if (t1_is_asic(cphy->adapter)) { + u32 elmer; + + t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); + elmer &= ~ELMER0_GP_BIT6; + t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); + } + return 0; +} + +static int mv88x201x_interrupt_clear(struct cphy *cphy) +{ + u32 elmer; + u32 val; + +#ifdef MV88x2010_LINK_STATUS_BUGS + /* Required to read twice before clear takes affect. */ + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val); + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val); + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val); + + /* Read this register after the others above it else + * the register doesn't clear correctly. + */ + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); +#endif + + /* Clear link status. */ + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); + /* Clear PHY LASI interrupts. */ + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val); + +#ifdef MV88x2010_LINK_STATUS_BUGS + /* Do it again. */ + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val); + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val); +#endif + + /* Clear Marvell interrupts through Elmer0. */ + if (t1_is_asic(cphy->adapter)) { + t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); + elmer |= ELMER0_GP_BIT6; + t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); + } + return 0; +} + +static int mv88x201x_interrupt_handler(struct cphy *cphy) +{ + /* Clear interrupts */ + mv88x201x_interrupt_clear(cphy); + + /* We have only enabled link change interrupts and so + * cphy_cause must be a link change interrupt. + */ + return cphy_cause_link_change; +} + +static int mv88x201x_set_loopback(struct cphy *cphy, int on) +{ + return 0; +} + +static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok, + int *speed, int *duplex, int *fc) +{ + u32 val = 0; + + if (link_ok) { + /* Read link status. */ + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); + val &= MDIO_STAT1_LSTATUS; + *link_ok = (val == MDIO_STAT1_LSTATUS); + /* Turn on/off Link LED */ + led_link(cphy, *link_ok); + } + if (speed) + *speed = SPEED_10000; + if (duplex) + *duplex = DUPLEX_FULL; + if (fc) + *fc = PAUSE_RX | PAUSE_TX; + return 0; +} + +static void mv88x201x_destroy(struct cphy *cphy) +{ + kfree(cphy); +} + +static struct cphy_ops mv88x201x_ops = { + .destroy = mv88x201x_destroy, + .reset = mv88x201x_reset, + .interrupt_enable = mv88x201x_interrupt_enable, + .interrupt_disable = mv88x201x_interrupt_disable, + .interrupt_clear = mv88x201x_interrupt_clear, + .interrupt_handler = mv88x201x_interrupt_handler, + .get_link_status = mv88x201x_get_link_status, + .set_loopback = mv88x201x_set_loopback, + .mmds = (MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | + MDIO_DEVS_PHYXS | MDIO_DEVS_WIS), +}; + +static struct cphy *mv88x201x_phy_create(struct net_device *dev, int phy_addr, + const struct mdio_ops *mdio_ops) +{ + u32 val; + struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); + + if (!cphy) + return NULL; + + cphy_init(cphy, dev, phy_addr, &mv88x201x_ops, mdio_ops); + + /* Commands the PHY to enable XFP's clock. */ + cphy_mdio_read(cphy, MDIO_MMD_PCS, 0x8300, &val); + cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8300, val | 1); + + /* Clear link status. Required because of a bug in the PHY. */ + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT2, &val); + cphy_mdio_read(cphy, MDIO_MMD_PCS, MDIO_STAT2, &val); + + /* Allows for Link,Ack LED turn on/off */ + led_init(cphy); + return cphy; +} + +/* Chip Reset */ +static int mv88x201x_phy_reset(adapter_t *adapter) +{ + u32 val; + + t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val &= ~4; + t1_tpi_write(adapter, A_ELMER0_GPO, val); + msleep(100); + + t1_tpi_write(adapter, A_ELMER0_GPO, val | 4); + msleep(1000); + + /* Now lets enable the Laser. Delay 100us */ + t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val |= 0x8000; + t1_tpi_write(adapter, A_ELMER0_GPO, val); + udelay(100); + return 0; +} + +const struct gphy t1_mv88x201x_ops = { + .create = mv88x201x_phy_create, + .reset = mv88x201x_phy_reset +}; diff --git a/drivers/net/ethernet/chelsio/cxgb/my3126.c b/drivers/net/ethernet/chelsio/cxgb/my3126.c new file mode 100644 index 000000000..a683fd3bb --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/my3126.c @@ -0,0 +1,209 @@ +/* $Date: 2005/11/12 02:13:49 $ $RCSfile: my3126.c,v $ $Revision: 1.15 $ */ +#include "cphy.h" +#include "elmer0.h" +#include "suni1x10gexp_regs.h" + +/* Port Reset */ +static int my3126_reset(struct cphy *cphy, int wait) +{ + /* + * This can be done through registers. It is not required since + * a full chip reset is used. + */ + return 0; +} + +static int my3126_interrupt_enable(struct cphy *cphy) +{ + schedule_delayed_work(&cphy->phy_update, HZ/30); + t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo); + return 0; +} + +static int my3126_interrupt_disable(struct cphy *cphy) +{ + cancel_delayed_work_sync(&cphy->phy_update); + return 0; +} + +static int my3126_interrupt_clear(struct cphy *cphy) +{ + return 0; +} + +#define OFFSET(REG_ADDR) (REG_ADDR << 2) + +static int my3126_interrupt_handler(struct cphy *cphy) +{ + u32 val; + u16 val16; + u16 status; + u32 act_count; + adapter_t *adapter; + adapter = cphy->adapter; + + if (cphy->count == 50) { + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); + val16 = (u16) val; + status = cphy->bmsr ^ val16; + + if (status & MDIO_STAT1_LSTATUS) + t1_link_changed(adapter, 0); + cphy->bmsr = val16; + + /* We have only enabled link change interrupts so it + must be that + */ + cphy->count = 0; + } + + t1_tpi_write(adapter, OFFSET(SUNI1x10GEXP_REG_MSTAT_CONTROL), + SUNI1x10GEXP_BITMSK_MSTAT_SNAP); + t1_tpi_read(adapter, + OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW), &act_count); + t1_tpi_read(adapter, + OFFSET(SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW), &val); + act_count += val; + + /* Populate elmer_gpo with the register value */ + t1_tpi_read(adapter, A_ELMER0_GPO, &val); + cphy->elmer_gpo = val; + + if ( (val & (1 << 8)) || (val & (1 << 19)) || + (cphy->act_count == act_count) || cphy->act_on ) { + if (is_T2(adapter)) + val |= (1 << 9); + else if (t1_is_T1B(adapter)) + val |= (1 << 20); + cphy->act_on = 0; + } else { + if (is_T2(adapter)) + val &= ~(1 << 9); + else if (t1_is_T1B(adapter)) + val &= ~(1 << 20); + cphy->act_on = 1; + } + + t1_tpi_write(adapter, A_ELMER0_GPO, val); + + cphy->elmer_gpo = val; + cphy->act_count = act_count; + cphy->count++; + + return cphy_cause_link_change; +} + +static void my3216_poll(struct work_struct *work) +{ + struct cphy *cphy = container_of(work, struct cphy, phy_update.work); + + my3126_interrupt_handler(cphy); +} + +static int my3126_set_loopback(struct cphy *cphy, int on) +{ + return 0; +} + +/* To check the activity LED */ +static int my3126_get_link_status(struct cphy *cphy, + int *link_ok, int *speed, int *duplex, int *fc) +{ + u32 val; + u16 val16; + adapter_t *adapter; + + adapter = cphy->adapter; + cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); + val16 = (u16) val; + + /* Populate elmer_gpo with the register value */ + t1_tpi_read(adapter, A_ELMER0_GPO, &val); + cphy->elmer_gpo = val; + + *link_ok = (val16 & MDIO_STAT1_LSTATUS); + + if (*link_ok) { + /* Turn on the LED. */ + if (is_T2(adapter)) + val &= ~(1 << 8); + else if (t1_is_T1B(adapter)) + val &= ~(1 << 19); + } else { + /* Turn off the LED. */ + if (is_T2(adapter)) + val |= (1 << 8); + else if (t1_is_T1B(adapter)) + val |= (1 << 19); + } + + t1_tpi_write(adapter, A_ELMER0_GPO, val); + cphy->elmer_gpo = val; + *speed = SPEED_10000; + *duplex = DUPLEX_FULL; + + /* need to add flow control */ + if (fc) + *fc = PAUSE_RX | PAUSE_TX; + + return 0; +} + +static void my3126_destroy(struct cphy *cphy) +{ + kfree(cphy); +} + +static struct cphy_ops my3126_ops = { + .destroy = my3126_destroy, + .reset = my3126_reset, + .interrupt_enable = my3126_interrupt_enable, + .interrupt_disable = my3126_interrupt_disable, + .interrupt_clear = my3126_interrupt_clear, + .interrupt_handler = my3126_interrupt_handler, + .get_link_status = my3126_get_link_status, + .set_loopback = my3126_set_loopback, + .mmds = (MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | + MDIO_DEVS_PHYXS), +}; + +static struct cphy *my3126_phy_create(struct net_device *dev, + int phy_addr, const struct mdio_ops *mdio_ops) +{ + struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL); + + if (!cphy) + return NULL; + + cphy_init(cphy, dev, phy_addr, &my3126_ops, mdio_ops); + INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); + cphy->bmsr = 0; + + return cphy; +} + +/* Chip Reset */ +static int my3126_phy_reset(adapter_t * adapter) +{ + u32 val; + + t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val &= ~4; + t1_tpi_write(adapter, A_ELMER0_GPO, val); + msleep(100); + + t1_tpi_write(adapter, A_ELMER0_GPO, val | 4); + msleep(1000); + + /* Now lets enable the Laser. Delay 100us */ + t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val |= 0x8000; + t1_tpi_write(adapter, A_ELMER0_GPO, val); + udelay(100); + return 0; +} + +const struct gphy t1_my3126_ops = { + .create = my3126_phy_create, + .reset = my3126_phy_reset +}; diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c new file mode 100644 index 000000000..ec5e05052 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c @@ -0,0 +1,795 @@ +/***************************************************************************** + * * + * File: pm3393.c * + * $Revision: 1.16 $ * + * $Date: 2005/05/14 00:59:32 $ * + * Description: * + * PMC/SIERRA (pm3393) MAC-PHY functionality. * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, see . * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#include "common.h" +#include "regs.h" +#include "gmac.h" +#include "elmer0.h" +#include "suni1x10gexp_regs.h" + +#include +#include + +#define OFFSET(REG_ADDR) ((REG_ADDR) << 2) + +/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */ +#define MAX_FRAME_SIZE 9600 + +#define IPG 12 +#define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \ + SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \ + SUNI1x10GEXP_BITMSK_TXXG_PADEN) +#define RXXG_CONF1_VAL (SUNI1x10GEXP_BITMSK_RXXG_PUREP | 0x14 | \ + SUNI1x10GEXP_BITMSK_RXXG_FLCHK | SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP) + +/* Update statistics every 15 minutes */ +#define STATS_TICK_SECS (15 * 60) + +enum { /* RMON registers */ + RxOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW, + RxUnicastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW, + RxMulticastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW, + RxBroadcastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW, + RxPAUSEMACCtrlFramesReceived = SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW, + RxFrameCheckSequenceErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW, + RxFramesLostDueToInternalMACErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW, + RxSymbolErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW, + RxInRangeLengthErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW, + RxFramesTooLongErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW, + RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW, + RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW, + RxUndersizedFrames = SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW, + RxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW, + RxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW, + + TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW, + TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW, + TxTransmitSystemError = SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW, + TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW, + TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW, + TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW, + TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW, + TxJumboFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW, + TxJumboOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW +}; + +struct _cmac_instance { + u8 enabled; + u8 fc; + u8 mac_addr[6]; +}; + +static int pmread(struct cmac *cmac, u32 reg, u32 * data32) +{ + t1_tpi_read(cmac->adapter, OFFSET(reg), data32); + return 0; +} + +static int pmwrite(struct cmac *cmac, u32 reg, u32 data32) +{ + t1_tpi_write(cmac->adapter, OFFSET(reg), data32); + return 0; +} + +/* Port reset. */ +static int pm3393_reset(struct cmac *cmac) +{ + return 0; +} + +/* + * Enable interrupts for the PM3393 + * + * 1. Enable PM3393 BLOCK interrupts. + * 2. Enable PM3393 Master Interrupt bit(INTE) + * 3. Enable ELMER's PM3393 bit. + * 4. Enable Terminator external interrupt. + */ +static int pm3393_interrupt_enable(struct cmac *cmac) +{ + u32 pl_intr; + + /* PM3393 - Enabling all hardware block interrupts. + */ + pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0xffff); + + /* Don't interrupt on statistics overflow, we are polling */ + pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0); + + pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0xffff); + + /* PM3393 - Global interrupt enable + */ + /* TBD XXX Disable for now until we figure out why error interrupts keep asserting. */ + pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, + 0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ ); + + /* TERMINATOR - PL_INTERUPTS_EXT */ + pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE); + pl_intr |= F_PL_INTR_EXT; + writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE); + return 0; +} + +static int pm3393_interrupt_disable(struct cmac *cmac) +{ + u32 elmer; + + /* PM3393 - Enabling HW interrupt blocks. */ + pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0); + pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0); + + /* PM3393 - Global interrupt enable */ + pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, 0); + + /* ELMER - External chip interrupts. */ + t1_tpi_read(cmac->adapter, A_ELMER0_INT_ENABLE, &elmer); + elmer &= ~ELMER0_GP_BIT1; + t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer); + + /* TERMINATOR - PL_INTERUPTS_EXT */ + /* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP + * COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level. + */ + + return 0; +} + +static int pm3393_interrupt_clear(struct cmac *cmac) +{ + u32 elmer; + u32 pl_intr; + u32 val32; + + /* PM3393 - Clearing HW interrupt blocks. Note, this assumes + * bit WCIMODE=0 for a clear-on-read. + */ + pmread(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS, &val32); + pmread(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS, &val32); + pmread(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS, &val32); + pmread(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS, &val32); + pmread(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT, &val32); + pmread(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS, &val32); + pmread(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT, &val32); + pmread(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS, &val32); + pmread(cmac, SUNI1x10GEXP_REG_RXXG_INTERRUPT, &val32); + pmread(cmac, SUNI1x10GEXP_REG_TXXG_INTERRUPT, &val32); + pmread(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT, &val32); + pmread(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION, + &val32); + pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS, &val32); + pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE, &val32); + + /* PM3393 - Global interrupt status + */ + pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, &val32); + + /* ELMER - External chip interrupts. + */ + t1_tpi_read(cmac->adapter, A_ELMER0_INT_CAUSE, &elmer); + elmer |= ELMER0_GP_BIT1; + t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer); + + /* TERMINATOR - PL_INTERUPTS_EXT + */ + pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE); + pl_intr |= F_PL_INTR_EXT; + writel(pl_intr, cmac->adapter->regs + A_PL_CAUSE); + + return 0; +} + +/* Interrupt handler */ +static int pm3393_interrupt_handler(struct cmac *cmac) +{ + u32 master_intr_status; + + /* Read the master interrupt status register. */ + pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, + &master_intr_status); + if (netif_msg_intr(cmac->adapter)) + dev_dbg(&cmac->adapter->pdev->dev, "PM3393 intr cause 0x%x\n", + master_intr_status); + + /* TBD XXX Lets just clear everything for now */ + pm3393_interrupt_clear(cmac); + + return 0; +} + +static int pm3393_enable(struct cmac *cmac, int which) +{ + if (which & MAC_DIRECTION_RX) + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, + (RXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_RXXG_RXEN)); + + if (which & MAC_DIRECTION_TX) { + u32 val = TXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_TXXG_TXEN0; + + if (cmac->instance->fc & PAUSE_RX) + val |= SUNI1x10GEXP_BITMSK_TXXG_FCRX; + if (cmac->instance->fc & PAUSE_TX) + val |= SUNI1x10GEXP_BITMSK_TXXG_FCTX; + pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, val); + } + + cmac->instance->enabled |= which; + return 0; +} + +static int pm3393_enable_port(struct cmac *cmac, int which) +{ + /* Clear port statistics */ + pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_CONTROL, + SUNI1x10GEXP_BITMSK_MSTAT_CLEAR); + udelay(2); + memset(&cmac->stats, 0, sizeof(struct cmac_statistics)); + + pm3393_enable(cmac, which); + + /* + * XXX This should be done by the PHY and preferably not at all. + * The PHY doesn't give us link status indication on its own so have + * the link management code query it instead. + */ + t1_link_changed(cmac->adapter, 0); + return 0; +} + +static int pm3393_disable(struct cmac *cmac, int which) +{ + if (which & MAC_DIRECTION_RX) + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, RXXG_CONF1_VAL); + if (which & MAC_DIRECTION_TX) + pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, TXXG_CONF1_VAL); + + /* + * The disable is graceful. Give the PM3393 time. Can't wait very + * long here, we may be holding locks. + */ + udelay(20); + + cmac->instance->enabled &= ~which; + return 0; +} + +static int pm3393_loopback_enable(struct cmac *cmac) +{ + return 0; +} + +static int pm3393_loopback_disable(struct cmac *cmac) +{ + return 0; +} + +static int pm3393_set_mtu(struct cmac *cmac, int mtu) +{ + int enabled = cmac->instance->enabled; + + /* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */ + mtu += 14 + 4; + if (mtu > MAX_FRAME_SIZE) + return -EINVAL; + + /* Disable Rx/Tx MAC before configuring it. */ + if (enabled) + pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); + + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH, mtu); + pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE, mtu); + + if (enabled) + pm3393_enable(cmac, enabled); + return 0; +} + +static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm) +{ + int enabled = cmac->instance->enabled & MAC_DIRECTION_RX; + u32 rx_mode; + + /* Disable MAC RX before reconfiguring it */ + if (enabled) + pm3393_disable(cmac, MAC_DIRECTION_RX); + + pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, &rx_mode); + rx_mode &= ~(SUNI1x10GEXP_BITMSK_RXXG_PMODE | + SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, + (u16)rx_mode); + + if (t1_rx_mode_promisc(rm)) { + /* Promiscuous mode. */ + rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_PMODE; + } + if (t1_rx_mode_allmulti(rm)) { + /* Accept all multicast. */ + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, 0xffff); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, 0xffff); + rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN; + } else if (t1_rx_mode_mc_cnt(rm)) { + /* Accept one or more multicast(s). */ + struct netdev_hw_addr *ha; + int bit; + u16 mc_filter[4] = { 0, }; + + netdev_for_each_mc_addr(ha, t1_get_netdev(rm)) { + /* bit[23:28] */ + bit = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x3f; + mc_filter[bit >> 4] |= 1 << (bit & 0xf); + } + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, mc_filter[1]); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, mc_filter[2]); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, mc_filter[3]); + rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN; + } + + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, (u16)rx_mode); + + if (enabled) + pm3393_enable(cmac, MAC_DIRECTION_RX); + + return 0; +} + +static int pm3393_get_speed_duplex_fc(struct cmac *cmac, int *speed, + int *duplex, int *fc) +{ + if (speed) + *speed = SPEED_10000; + if (duplex) + *duplex = DUPLEX_FULL; + if (fc) + *fc = cmac->instance->fc; + return 0; +} + +static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex, + int fc) +{ + if (speed >= 0 && speed != SPEED_10000) + return -1; + if (duplex >= 0 && duplex != DUPLEX_FULL) + return -1; + if (fc & ~(PAUSE_TX | PAUSE_RX)) + return -1; + + if (fc != cmac->instance->fc) { + cmac->instance->fc = (u8) fc; + if (cmac->instance->enabled & MAC_DIRECTION_TX) + pm3393_enable(cmac, MAC_DIRECTION_TX); + } + return 0; +} + +#define RMON_UPDATE(mac, name, stat_name) \ +{ \ + t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \ + t1_tpi_read((mac)->adapter, OFFSET((name)+1), &val1); \ + t1_tpi_read((mac)->adapter, OFFSET((name)+2), &val2); \ + (mac)->stats.stat_name = (u64)(val0 & 0xffff) | \ + ((u64)(val1 & 0xffff) << 16) | \ + ((u64)(val2 & 0xff) << 32) | \ + ((mac)->stats.stat_name & \ + 0xffffff0000000000ULL); \ + if (ro & \ + (1ULL << ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2))) \ + (mac)->stats.stat_name += 1ULL << 40; \ +} + +static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, + int flag) +{ + u64 ro; + u32 val0, val1, val2, val3; + + /* Snap the counters */ + pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL, + SUNI1x10GEXP_BITMSK_MSTAT_SNAP); + + /* Counter rollover, clear on read */ + pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0, &val0); + pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1, &val1); + pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2, &val2); + pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3, &val3); + ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) | + (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48); + + /* Rx stats */ + RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK); + RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK); + RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK); + RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK); + RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames); + RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors); + RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors, + RxInternalMACRcvError); + RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors); + RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors); + RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors); + RMON_UPDATE(mac, RxJabbers, RxJabberErrors); + RMON_UPDATE(mac, RxFragments, RxRuntErrors); + RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors); + RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK); + RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK); + + /* Tx stats */ + RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK); + RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError, + TxInternalMACXmitError); + RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors); + RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK); + RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK); + RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK); + RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames); + RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK); + RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK); + + return &mac->stats; +} + +static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6]) +{ + memcpy(mac_addr, cmac->instance->mac_addr, ETH_ALEN); + return 0; +} + +static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6]) +{ + u32 val, lo, mid, hi, enabled = cmac->instance->enabled; + + /* + * MAC addr: 00:07:43:00:13:09 + * + * ma[5] = 0x09 + * ma[4] = 0x13 + * ma[3] = 0x00 + * ma[2] = 0x43 + * ma[1] = 0x07 + * ma[0] = 0x00 + * + * The PM3393 requires byte swapping and reverse order entry + * when programming MAC addresses: + * + * low_bits[15:0] = ma[1]:ma[0] + * mid_bits[31:16] = ma[3]:ma[2] + * high_bits[47:32] = ma[5]:ma[4] + */ + + /* Store local copy */ + memcpy(cmac->instance->mac_addr, ma, ETH_ALEN); + + lo = ((u32) ma[1] << 8) | (u32) ma[0]; + mid = ((u32) ma[3] << 8) | (u32) ma[2]; + hi = ((u32) ma[5] << 8) | (u32) ma[4]; + + /* Disable Rx/Tx MAC before configuring it. */ + if (enabled) + pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); + + /* Set RXXG Station Address */ + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_15_0, lo); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_31_16, mid); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_47_32, hi); + + /* Set TXXG Station Address */ + pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_15_0, lo); + pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_31_16, mid); + pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_47_32, hi); + + /* Setup Exact Match Filter 1 with our MAC address + * + * Must disable exact match filter before configuring it. + */ + pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, &val); + val &= 0xff0f; + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val); + + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW, lo); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID, mid); + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH, hi); + + val |= 0x0090; + pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val); + + if (enabled) + pm3393_enable(cmac, enabled); + return 0; +} + +static void pm3393_destroy(struct cmac *cmac) +{ + kfree(cmac); +} + +static struct cmac_ops pm3393_ops = { + .destroy = pm3393_destroy, + .reset = pm3393_reset, + .interrupt_enable = pm3393_interrupt_enable, + .interrupt_disable = pm3393_interrupt_disable, + .interrupt_clear = pm3393_interrupt_clear, + .interrupt_handler = pm3393_interrupt_handler, + .enable = pm3393_enable_port, + .disable = pm3393_disable, + .loopback_enable = pm3393_loopback_enable, + .loopback_disable = pm3393_loopback_disable, + .set_mtu = pm3393_set_mtu, + .set_rx_mode = pm3393_set_rx_mode, + .get_speed_duplex_fc = pm3393_get_speed_duplex_fc, + .set_speed_duplex_fc = pm3393_set_speed_duplex_fc, + .statistics_update = pm3393_update_statistics, + .macaddress_get = pm3393_macaddress_get, + .macaddress_set = pm3393_macaddress_set +}; + +static struct cmac *pm3393_mac_create(adapter_t *adapter, int index) +{ + struct cmac *cmac; + + cmac = kzalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL); + if (!cmac) + return NULL; + + cmac->ops = &pm3393_ops; + cmac->instance = (cmac_instance *) (cmac + 1); + cmac->adapter = adapter; + cmac->instance->fc = PAUSE_TX | PAUSE_RX; + + t1_tpi_write(adapter, OFFSET(0x0001), 0x00008000); + t1_tpi_write(adapter, OFFSET(0x0001), 0x00000000); + t1_tpi_write(adapter, OFFSET(0x2308), 0x00009800); + t1_tpi_write(adapter, OFFSET(0x2305), 0x00001001); /* PL4IO Enable */ + t1_tpi_write(adapter, OFFSET(0x2320), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x2321), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x2322), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x2323), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x2324), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x2325), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x2326), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x2327), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x2328), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x2329), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x232a), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x232b), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x232c), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x232d), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x232e), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x232f), 0x00008800); + t1_tpi_write(adapter, OFFSET(0x230d), 0x00009c00); + t1_tpi_write(adapter, OFFSET(0x2304), 0x00000202); /* PL4IO Calendar Repetitions */ + + t1_tpi_write(adapter, OFFSET(0x3200), 0x00008080); /* EFLX Enable */ + t1_tpi_write(adapter, OFFSET(0x3210), 0x00000000); /* EFLX Channel Deprovision */ + t1_tpi_write(adapter, OFFSET(0x3203), 0x00000000); /* EFLX Low Limit */ + t1_tpi_write(adapter, OFFSET(0x3204), 0x00000040); /* EFLX High Limit */ + t1_tpi_write(adapter, OFFSET(0x3205), 0x000002cc); /* EFLX Almost Full */ + t1_tpi_write(adapter, OFFSET(0x3206), 0x00000199); /* EFLX Almost Empty */ + t1_tpi_write(adapter, OFFSET(0x3207), 0x00000240); /* EFLX Cut Through Threshold */ + t1_tpi_write(adapter, OFFSET(0x3202), 0x00000000); /* EFLX Indirect Register Update */ + t1_tpi_write(adapter, OFFSET(0x3210), 0x00000001); /* EFLX Channel Provision */ + t1_tpi_write(adapter, OFFSET(0x3208), 0x0000ffff); /* EFLX Undocumented */ + t1_tpi_write(adapter, OFFSET(0x320a), 0x0000ffff); /* EFLX Undocumented */ + t1_tpi_write(adapter, OFFSET(0x320c), 0x0000ffff); /* EFLX enable overflow interrupt The other bit are undocumented */ + t1_tpi_write(adapter, OFFSET(0x320e), 0x0000ffff); /* EFLX Undocumented */ + + t1_tpi_write(adapter, OFFSET(0x2200), 0x0000c000); /* IFLX Configuration - enable */ + t1_tpi_write(adapter, OFFSET(0x2201), 0x00000000); /* IFLX Channel Deprovision */ + t1_tpi_write(adapter, OFFSET(0x220e), 0x00000000); /* IFLX Low Limit */ + t1_tpi_write(adapter, OFFSET(0x220f), 0x00000100); /* IFLX High Limit */ + t1_tpi_write(adapter, OFFSET(0x2210), 0x00000c00); /* IFLX Almost Full Limit */ + t1_tpi_write(adapter, OFFSET(0x2211), 0x00000599); /* IFLX Almost Empty Limit */ + t1_tpi_write(adapter, OFFSET(0x220d), 0x00000000); /* IFLX Indirect Register Update */ + t1_tpi_write(adapter, OFFSET(0x2201), 0x00000001); /* IFLX Channel Provision */ + t1_tpi_write(adapter, OFFSET(0x2203), 0x0000ffff); /* IFLX Undocumented */ + t1_tpi_write(adapter, OFFSET(0x2205), 0x0000ffff); /* IFLX Undocumented */ + t1_tpi_write(adapter, OFFSET(0x2209), 0x0000ffff); /* IFLX Enable overflow interrupt. The other bit are undocumented */ + + t1_tpi_write(adapter, OFFSET(0x2241), 0xfffffffe); /* PL4MOS Undocumented */ + t1_tpi_write(adapter, OFFSET(0x2242), 0x0000ffff); /* PL4MOS Undocumented */ + t1_tpi_write(adapter, OFFSET(0x2243), 0x00000008); /* PL4MOS Starving Burst Size */ + t1_tpi_write(adapter, OFFSET(0x2244), 0x00000008); /* PL4MOS Hungry Burst Size */ + t1_tpi_write(adapter, OFFSET(0x2245), 0x00000008); /* PL4MOS Transfer Size */ + t1_tpi_write(adapter, OFFSET(0x2240), 0x00000005); /* PL4MOS Disable */ + + t1_tpi_write(adapter, OFFSET(0x2280), 0x00002103); /* PL4ODP Training Repeat and SOP rule */ + t1_tpi_write(adapter, OFFSET(0x2284), 0x00000000); /* PL4ODP MAX_T setting */ + + t1_tpi_write(adapter, OFFSET(0x3280), 0x00000087); /* PL4IDU Enable data forward, port state machine. Set ALLOW_NON_ZERO_OLB */ + t1_tpi_write(adapter, OFFSET(0x3282), 0x0000001f); /* PL4IDU Enable Dip4 check error interrupts */ + + t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32); /* # TXXG Config */ + /* For T1 use timer based Mac flow control. */ + t1_tpi_write(adapter, OFFSET(0x304d), 0x8000); + t1_tpi_write(adapter, OFFSET(0x2040), 0x059c); /* # RXXG Config */ + t1_tpi_write(adapter, OFFSET(0x2049), 0x0001); /* # RXXG Cut Through */ + t1_tpi_write(adapter, OFFSET(0x2070), 0x0000); /* # Disable promiscuous mode */ + + /* Setup Exact Match Filter 0 to allow broadcast packets. + */ + t1_tpi_write(adapter, OFFSET(0x206e), 0x0000); /* # Disable Match Enable bit */ + t1_tpi_write(adapter, OFFSET(0x204a), 0xffff); /* # low addr */ + t1_tpi_write(adapter, OFFSET(0x204b), 0xffff); /* # mid addr */ + t1_tpi_write(adapter, OFFSET(0x204c), 0xffff); /* # high addr */ + t1_tpi_write(adapter, OFFSET(0x206e), 0x0009); /* # Enable Match Enable bit */ + + t1_tpi_write(adapter, OFFSET(0x0003), 0x0000); /* # NO SOP/ PAD_EN setup */ + t1_tpi_write(adapter, OFFSET(0x0100), 0x0ff0); /* # RXEQB disabled */ + t1_tpi_write(adapter, OFFSET(0x0101), 0x0f0f); /* # No Preemphasis */ + + return cmac; +} + +static int pm3393_mac_reset(adapter_t * adapter) +{ + u32 val; + u32 x; + u32 is_pl4_reset_finished; + u32 is_pl4_outof_lock; + u32 is_xaui_mabc_pll_locked; + u32 successful_reset; + int i; + + /* The following steps are required to properly reset + * the PM3393. This information is provided in the + * PM3393 datasheet (Issue 2: November 2002) + * section 13.1 -- Device Reset. + * + * The PM3393 has three types of components that are + * individually reset: + * + * DRESETB - Digital circuitry + * PL4_ARESETB - PL4 analog circuitry + * XAUI_ARESETB - XAUI bus analog circuitry + * + * Steps to reset PM3393 using RSTB pin: + * + * 1. Assert RSTB pin low ( write 0 ) + * 2. Wait at least 1ms to initiate a complete initialization of device. + * 3. Wait until all external clocks and REFSEL are stable. + * 4. Wait minimum of 1ms. (after external clocks and REFEL are stable) + * 5. De-assert RSTB ( write 1 ) + * 6. Wait until internal timers to expires after ~14ms. + * - Allows analog clock synthesizer(PL4CSU) to stabilize to + * selected reference frequency before allowing the digital + * portion of the device to operate. + * 7. Wait at least 200us for XAUI interface to stabilize. + * 8. Verify the PM3393 came out of reset successfully. + * Set successful reset flag if everything worked else try again + * a few more times. + */ + + successful_reset = 0; + for (i = 0; i < 3 && !successful_reset; i++) { + /* 1 */ + t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val &= ~1; + t1_tpi_write(adapter, A_ELMER0_GPO, val); + + /* 2 */ + msleep(1); + + /* 3 */ + msleep(1); + + /* 4 */ + msleep(2 /*1 extra ms for safety */ ); + + /* 5 */ + val |= 1; + t1_tpi_write(adapter, A_ELMER0_GPO, val); + + /* 6 */ + msleep(15 /*1 extra ms for safety */ ); + + /* 7 */ + msleep(1); + + /* 8 */ + + /* Has PL4 analog block come out of reset correctly? */ + t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_DEVICE_STATUS), &val); + is_pl4_reset_finished = (val & SUNI1x10GEXP_BITMSK_TOP_EXPIRED); + + /* TBD XXX SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL gets locked later in the init sequence + * figure out why? */ + + /* Have all PL4 block clocks locked? */ + x = (SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL + /*| SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL */ | + SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL | + SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL | + SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL); + is_pl4_outof_lock = (val & x); + + /* ??? If this fails, might be able to software reset the XAUI part + * and try to recover... thus saving us from doing another HW reset */ + /* Has the XAUI MABC PLL circuitry stablized? */ + is_xaui_mabc_pll_locked = + (val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED); + + successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock + && is_xaui_mabc_pll_locked); + + if (netif_msg_hw(adapter)) + dev_dbg(&adapter->pdev->dev, + "PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, " + "is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n", + i, is_pl4_reset_finished, val, + is_pl4_outof_lock, is_xaui_mabc_pll_locked); + } + return successful_reset ? 0 : 1; +} + +const struct gmac t1_pm3393_ops = { + .stats_update_period = STATS_TICK_SECS, + .create = pm3393_mac_create, + .reset = pm3393_mac_reset, +}; diff --git a/drivers/net/ethernet/chelsio/cxgb/regs.h b/drivers/net/ethernet/chelsio/cxgb/regs.h new file mode 100644 index 000000000..964ce59ee --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/regs.h @@ -0,0 +1,2167 @@ +/***************************************************************************** + * * + * File: regs.h * + * $Revision: 1.8 $ * + * $Date: 2005/06/21 18:29:48 $ * + * Description: * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, see . * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#ifndef _CXGB_REGS_H_ +#define _CXGB_REGS_H_ + +/* SGE registers */ +#define A_SG_CONTROL 0x0 + +#define S_CMDQ0_ENABLE 0 +#define V_CMDQ0_ENABLE(x) ((x) << S_CMDQ0_ENABLE) +#define F_CMDQ0_ENABLE V_CMDQ0_ENABLE(1U) + +#define S_CMDQ1_ENABLE 1 +#define V_CMDQ1_ENABLE(x) ((x) << S_CMDQ1_ENABLE) +#define F_CMDQ1_ENABLE V_CMDQ1_ENABLE(1U) + +#define S_FL0_ENABLE 2 +#define V_FL0_ENABLE(x) ((x) << S_FL0_ENABLE) +#define F_FL0_ENABLE V_FL0_ENABLE(1U) + +#define S_FL1_ENABLE 3 +#define V_FL1_ENABLE(x) ((x) << S_FL1_ENABLE) +#define F_FL1_ENABLE V_FL1_ENABLE(1U) + +#define S_CPL_ENABLE 4 +#define V_CPL_ENABLE(x) ((x) << S_CPL_ENABLE) +#define F_CPL_ENABLE V_CPL_ENABLE(1U) + +#define S_RESPONSE_QUEUE_ENABLE 5 +#define V_RESPONSE_QUEUE_ENABLE(x) ((x) << S_RESPONSE_QUEUE_ENABLE) +#define F_RESPONSE_QUEUE_ENABLE V_RESPONSE_QUEUE_ENABLE(1U) + +#define S_CMDQ_PRIORITY 6 +#define M_CMDQ_PRIORITY 0x3 +#define V_CMDQ_PRIORITY(x) ((x) << S_CMDQ_PRIORITY) +#define G_CMDQ_PRIORITY(x) (((x) >> S_CMDQ_PRIORITY) & M_CMDQ_PRIORITY) + +#define S_DISABLE_CMDQ0_GTS 8 +#define V_DISABLE_CMDQ0_GTS(x) ((x) << S_DISABLE_CMDQ0_GTS) +#define F_DISABLE_CMDQ0_GTS V_DISABLE_CMDQ0_GTS(1U) + +#define S_DISABLE_CMDQ1_GTS 9 +#define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS) +#define F_DISABLE_CMDQ1_GTS V_DISABLE_CMDQ1_GTS(1U) + +#define S_DISABLE_FL0_GTS 10 +#define V_DISABLE_FL0_GTS(x) ((x) << S_DISABLE_FL0_GTS) +#define F_DISABLE_FL0_GTS V_DISABLE_FL0_GTS(1U) + +#define S_DISABLE_FL1_GTS 11 +#define V_DISABLE_FL1_GTS(x) ((x) << S_DISABLE_FL1_GTS) +#define F_DISABLE_FL1_GTS V_DISABLE_FL1_GTS(1U) + +#define S_ENABLE_BIG_ENDIAN 12 +#define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN) +#define F_ENABLE_BIG_ENDIAN V_ENABLE_BIG_ENDIAN(1U) + +#define S_FL_SELECTION_CRITERIA 13 +#define V_FL_SELECTION_CRITERIA(x) ((x) << S_FL_SELECTION_CRITERIA) +#define F_FL_SELECTION_CRITERIA V_FL_SELECTION_CRITERIA(1U) + +#define S_ISCSI_COALESCE 14 +#define V_ISCSI_COALESCE(x) ((x) << S_ISCSI_COALESCE) +#define F_ISCSI_COALESCE V_ISCSI_COALESCE(1U) + +#define S_RX_PKT_OFFSET 15 +#define M_RX_PKT_OFFSET 0x7 +#define V_RX_PKT_OFFSET(x) ((x) << S_RX_PKT_OFFSET) +#define G_RX_PKT_OFFSET(x) (((x) >> S_RX_PKT_OFFSET) & M_RX_PKT_OFFSET) + +#define S_VLAN_XTRACT 18 +#define V_VLAN_XTRACT(x) ((x) << S_VLAN_XTRACT) +#define F_VLAN_XTRACT V_VLAN_XTRACT(1U) + +#define A_SG_DOORBELL 0x4 +#define A_SG_CMD0BASELWR 0x8 +#define A_SG_CMD0BASEUPR 0xc +#define A_SG_CMD1BASELWR 0x10 +#define A_SG_CMD1BASEUPR 0x14 +#define A_SG_FL0BASELWR 0x18 +#define A_SG_FL0BASEUPR 0x1c +#define A_SG_FL1BASELWR 0x20 +#define A_SG_FL1BASEUPR 0x24 +#define A_SG_CMD0SIZE 0x28 + +#define S_CMDQ0_SIZE 0 +#define M_CMDQ0_SIZE 0x1ffff +#define V_CMDQ0_SIZE(x) ((x) << S_CMDQ0_SIZE) +#define G_CMDQ0_SIZE(x) (((x) >> S_CMDQ0_SIZE) & M_CMDQ0_SIZE) + +#define A_SG_FL0SIZE 0x2c + +#define S_FL0_SIZE 0 +#define M_FL0_SIZE 0x1ffff +#define V_FL0_SIZE(x) ((x) << S_FL0_SIZE) +#define G_FL0_SIZE(x) (((x) >> S_FL0_SIZE) & M_FL0_SIZE) + +#define A_SG_RSPSIZE 0x30 + +#define S_RESPQ_SIZE 0 +#define M_RESPQ_SIZE 0x1ffff +#define V_RESPQ_SIZE(x) ((x) << S_RESPQ_SIZE) +#define G_RESPQ_SIZE(x) (((x) >> S_RESPQ_SIZE) & M_RESPQ_SIZE) + +#define A_SG_RSPBASELWR 0x34 +#define A_SG_RSPBASEUPR 0x38 +#define A_SG_FLTHRESHOLD 0x3c + +#define S_FL_THRESHOLD 0 +#define M_FL_THRESHOLD 0xffff +#define V_FL_THRESHOLD(x) ((x) << S_FL_THRESHOLD) +#define G_FL_THRESHOLD(x) (((x) >> S_FL_THRESHOLD) & M_FL_THRESHOLD) + +#define A_SG_RSPQUEUECREDIT 0x40 + +#define S_RESPQ_CREDIT 0 +#define M_RESPQ_CREDIT 0x1ffff +#define V_RESPQ_CREDIT(x) ((x) << S_RESPQ_CREDIT) +#define G_RESPQ_CREDIT(x) (((x) >> S_RESPQ_CREDIT) & M_RESPQ_CREDIT) + +#define A_SG_SLEEPING 0x48 + +#define S_SLEEPING 0 +#define M_SLEEPING 0xffff +#define V_SLEEPING(x) ((x) << S_SLEEPING) +#define G_SLEEPING(x) (((x) >> S_SLEEPING) & M_SLEEPING) + +#define A_SG_INTRTIMER 0x4c + +#define S_INTERRUPT_TIMER_COUNT 0 +#define M_INTERRUPT_TIMER_COUNT 0xffffff +#define V_INTERRUPT_TIMER_COUNT(x) ((x) << S_INTERRUPT_TIMER_COUNT) +#define G_INTERRUPT_TIMER_COUNT(x) (((x) >> S_INTERRUPT_TIMER_COUNT) & M_INTERRUPT_TIMER_COUNT) + +#define A_SG_CMD0PTR 0x50 + +#define S_CMDQ0_POINTER 0 +#define M_CMDQ0_POINTER 0xffff +#define V_CMDQ0_POINTER(x) ((x) << S_CMDQ0_POINTER) +#define G_CMDQ0_POINTER(x) (((x) >> S_CMDQ0_POINTER) & M_CMDQ0_POINTER) + +#define S_CURRENT_GENERATION_BIT 16 +#define V_CURRENT_GENERATION_BIT(x) ((x) << S_CURRENT_GENERATION_BIT) +#define F_CURRENT_GENERATION_BIT V_CURRENT_GENERATION_BIT(1U) + +#define A_SG_CMD1PTR 0x54 + +#define S_CMDQ1_POINTER 0 +#define M_CMDQ1_POINTER 0xffff +#define V_CMDQ1_POINTER(x) ((x) << S_CMDQ1_POINTER) +#define G_CMDQ1_POINTER(x) (((x) >> S_CMDQ1_POINTER) & M_CMDQ1_POINTER) + +#define A_SG_FL0PTR 0x58 + +#define S_FL0_POINTER 0 +#define M_FL0_POINTER 0xffff +#define V_FL0_POINTER(x) ((x) << S_FL0_POINTER) +#define G_FL0_POINTER(x) (((x) >> S_FL0_POINTER) & M_FL0_POINTER) + +#define A_SG_FL1PTR 0x5c + +#define S_FL1_POINTER 0 +#define M_FL1_POINTER 0xffff +#define V_FL1_POINTER(x) ((x) << S_FL1_POINTER) +#define G_FL1_POINTER(x) (((x) >> S_FL1_POINTER) & M_FL1_POINTER) + +#define A_SG_VERSION 0x6c + +#define S_DAY 0 +#define M_DAY 0x1f +#define V_DAY(x) ((x) << S_DAY) +#define G_DAY(x) (((x) >> S_DAY) & M_DAY) + +#define S_MONTH 5 +#define M_MONTH 0xf +#define V_MONTH(x) ((x) << S_MONTH) +#define G_MONTH(x) (((x) >> S_MONTH) & M_MONTH) + +#define A_SG_CMD1SIZE 0xb0 + +#define S_CMDQ1_SIZE 0 +#define M_CMDQ1_SIZE 0x1ffff +#define V_CMDQ1_SIZE(x) ((x) << S_CMDQ1_SIZE) +#define G_CMDQ1_SIZE(x) (((x) >> S_CMDQ1_SIZE) & M_CMDQ1_SIZE) + +#define A_SG_FL1SIZE 0xb4 + +#define S_FL1_SIZE 0 +#define M_FL1_SIZE 0x1ffff +#define V_FL1_SIZE(x) ((x) << S_FL1_SIZE) +#define G_FL1_SIZE(x) (((x) >> S_FL1_SIZE) & M_FL1_SIZE) + +#define A_SG_INT_ENABLE 0xb8 + +#define S_RESPQ_EXHAUSTED 0 +#define V_RESPQ_EXHAUSTED(x) ((x) << S_RESPQ_EXHAUSTED) +#define F_RESPQ_EXHAUSTED V_RESPQ_EXHAUSTED(1U) + +#define S_RESPQ_OVERFLOW 1 +#define V_RESPQ_OVERFLOW(x) ((x) << S_RESPQ_OVERFLOW) +#define F_RESPQ_OVERFLOW V_RESPQ_OVERFLOW(1U) + +#define S_FL_EXHAUSTED 2 +#define V_FL_EXHAUSTED(x) ((x) << S_FL_EXHAUSTED) +#define F_FL_EXHAUSTED V_FL_EXHAUSTED(1U) + +#define S_PACKET_TOO_BIG 3 +#define V_PACKET_TOO_BIG(x) ((x) << S_PACKET_TOO_BIG) +#define F_PACKET_TOO_BIG V_PACKET_TOO_BIG(1U) + +#define S_PACKET_MISMATCH 4 +#define V_PACKET_MISMATCH(x) ((x) << S_PACKET_MISMATCH) +#define F_PACKET_MISMATCH V_PACKET_MISMATCH(1U) + +#define A_SG_INT_CAUSE 0xbc +#define A_SG_RESPACCUTIMER 0xc0 + +/* MC3 registers */ +#define A_MC3_CFG 0x100 + +#define S_CLK_ENABLE 0 +#define V_CLK_ENABLE(x) ((x) << S_CLK_ENABLE) +#define F_CLK_ENABLE V_CLK_ENABLE(1U) + +#define S_READY 1 +#define V_READY(x) ((x) << S_READY) +#define F_READY V_READY(1U) + +#define S_READ_TO_WRITE_DELAY 2 +#define M_READ_TO_WRITE_DELAY 0x7 +#define V_READ_TO_WRITE_DELAY(x) ((x) << S_READ_TO_WRITE_DELAY) +#define G_READ_TO_WRITE_DELAY(x) (((x) >> S_READ_TO_WRITE_DELAY) & M_READ_TO_WRITE_DELAY) + +#define S_WRITE_TO_READ_DELAY 5 +#define M_WRITE_TO_READ_DELAY 0x7 +#define V_WRITE_TO_READ_DELAY(x) ((x) << S_WRITE_TO_READ_DELAY) +#define G_WRITE_TO_READ_DELAY(x) (((x) >> S_WRITE_TO_READ_DELAY) & M_WRITE_TO_READ_DELAY) + +#define S_MC3_BANK_CYCLE 8 +#define M_MC3_BANK_CYCLE 0xf +#define V_MC3_BANK_CYCLE(x) ((x) << S_MC3_BANK_CYCLE) +#define G_MC3_BANK_CYCLE(x) (((x) >> S_MC3_BANK_CYCLE) & M_MC3_BANK_CYCLE) + +#define S_REFRESH_CYCLE 12 +#define M_REFRESH_CYCLE 0xf +#define V_REFRESH_CYCLE(x) ((x) << S_REFRESH_CYCLE) +#define G_REFRESH_CYCLE(x) (((x) >> S_REFRESH_CYCLE) & M_REFRESH_CYCLE) + +#define S_PRECHARGE_CYCLE 16 +#define M_PRECHARGE_CYCLE 0x3 +#define V_PRECHARGE_CYCLE(x) ((x) << S_PRECHARGE_CYCLE) +#define G_PRECHARGE_CYCLE(x) (((x) >> S_PRECHARGE_CYCLE) & M_PRECHARGE_CYCLE) + +#define S_ACTIVE_TO_READ_WRITE_DELAY 18 +#define V_ACTIVE_TO_READ_WRITE_DELAY(x) ((x) << S_ACTIVE_TO_READ_WRITE_DELAY) +#define F_ACTIVE_TO_READ_WRITE_DELAY V_ACTIVE_TO_READ_WRITE_DELAY(1U) + +#define S_ACTIVE_TO_PRECHARGE_DELAY 19 +#define M_ACTIVE_TO_PRECHARGE_DELAY 0x7 +#define V_ACTIVE_TO_PRECHARGE_DELAY(x) ((x) << S_ACTIVE_TO_PRECHARGE_DELAY) +#define G_ACTIVE_TO_PRECHARGE_DELAY(x) (((x) >> S_ACTIVE_TO_PRECHARGE_DELAY) & M_ACTIVE_TO_PRECHARGE_DELAY) + +#define S_WRITE_RECOVERY_DELAY 22 +#define M_WRITE_RECOVERY_DELAY 0x3 +#define V_WRITE_RECOVERY_DELAY(x) ((x) << S_WRITE_RECOVERY_DELAY) +#define G_WRITE_RECOVERY_DELAY(x) (((x) >> S_WRITE_RECOVERY_DELAY) & M_WRITE_RECOVERY_DELAY) + +#define S_DENSITY 24 +#define M_DENSITY 0x3 +#define V_DENSITY(x) ((x) << S_DENSITY) +#define G_DENSITY(x) (((x) >> S_DENSITY) & M_DENSITY) + +#define S_ORGANIZATION 26 +#define V_ORGANIZATION(x) ((x) << S_ORGANIZATION) +#define F_ORGANIZATION V_ORGANIZATION(1U) + +#define S_BANKS 27 +#define V_BANKS(x) ((x) << S_BANKS) +#define F_BANKS V_BANKS(1U) + +#define S_UNREGISTERED 28 +#define V_UNREGISTERED(x) ((x) << S_UNREGISTERED) +#define F_UNREGISTERED V_UNREGISTERED(1U) + +#define S_MC3_WIDTH 29 +#define M_MC3_WIDTH 0x3 +#define V_MC3_WIDTH(x) ((x) << S_MC3_WIDTH) +#define G_MC3_WIDTH(x) (((x) >> S_MC3_WIDTH) & M_MC3_WIDTH) + +#define S_MC3_SLOW 31 +#define V_MC3_SLOW(x) ((x) << S_MC3_SLOW) +#define F_MC3_SLOW V_MC3_SLOW(1U) + +#define A_MC3_MODE 0x104 + +#define S_MC3_MODE 0 +#define M_MC3_MODE 0x3fff +#define V_MC3_MODE(x) ((x) << S_MC3_MODE) +#define G_MC3_MODE(x) (((x) >> S_MC3_MODE) & M_MC3_MODE) + +#define S_BUSY 31 +#define V_BUSY(x) ((x) << S_BUSY) +#define F_BUSY V_BUSY(1U) + +#define A_MC3_EXT_MODE 0x108 + +#define S_MC3_EXTENDED_MODE 0 +#define M_MC3_EXTENDED_MODE 0x3fff +#define V_MC3_EXTENDED_MODE(x) ((x) << S_MC3_EXTENDED_MODE) +#define G_MC3_EXTENDED_MODE(x) (((x) >> S_MC3_EXTENDED_MODE) & M_MC3_EXTENDED_MODE) + +#define A_MC3_PRECHARG 0x10c +#define A_MC3_REFRESH 0x110 + +#define S_REFRESH_ENABLE 0 +#define V_REFRESH_ENABLE(x) ((x) << S_REFRESH_ENABLE) +#define F_REFRESH_ENABLE V_REFRESH_ENABLE(1U) + +#define S_REFRESH_DIVISOR 1 +#define M_REFRESH_DIVISOR 0x3fff +#define V_REFRESH_DIVISOR(x) ((x) << S_REFRESH_DIVISOR) +#define G_REFRESH_DIVISOR(x) (((x) >> S_REFRESH_DIVISOR) & M_REFRESH_DIVISOR) + +#define A_MC3_STROBE 0x114 + +#define S_MASTER_DLL_RESET 0 +#define V_MASTER_DLL_RESET(x) ((x) << S_MASTER_DLL_RESET) +#define F_MASTER_DLL_RESET V_MASTER_DLL_RESET(1U) + +#define S_MASTER_DLL_TAP_COUNT 1 +#define M_MASTER_DLL_TAP_COUNT 0xff +#define V_MASTER_DLL_TAP_COUNT(x) ((x) << S_MASTER_DLL_TAP_COUNT) +#define G_MASTER_DLL_TAP_COUNT(x) (((x) >> S_MASTER_DLL_TAP_COUNT) & M_MASTER_DLL_TAP_COUNT) + +#define S_MASTER_DLL_LOCKED 9 +#define V_MASTER_DLL_LOCKED(x) ((x) << S_MASTER_DLL_LOCKED) +#define F_MASTER_DLL_LOCKED V_MASTER_DLL_LOCKED(1U) + +#define S_MASTER_DLL_MAX_TAP_COUNT 10 +#define V_MASTER_DLL_MAX_TAP_COUNT(x) ((x) << S_MASTER_DLL_MAX_TAP_COUNT) +#define F_MASTER_DLL_MAX_TAP_COUNT V_MASTER_DLL_MAX_TAP_COUNT(1U) + +#define S_MASTER_DLL_TAP_COUNT_OFFSET 11 +#define M_MASTER_DLL_TAP_COUNT_OFFSET 0x3f +#define V_MASTER_DLL_TAP_COUNT_OFFSET(x) ((x) << S_MASTER_DLL_TAP_COUNT_OFFSET) +#define G_MASTER_DLL_TAP_COUNT_OFFSET(x) (((x) >> S_MASTER_DLL_TAP_COUNT_OFFSET) & M_MASTER_DLL_TAP_COUNT_OFFSET) + +#define S_SLAVE_DLL_RESET 11 +#define V_SLAVE_DLL_RESET(x) ((x) << S_SLAVE_DLL_RESET) +#define F_SLAVE_DLL_RESET V_SLAVE_DLL_RESET(1U) + +#define S_SLAVE_DLL_DELTA 12 +#define M_SLAVE_DLL_DELTA 0xf +#define V_SLAVE_DLL_DELTA(x) ((x) << S_SLAVE_DLL_DELTA) +#define G_SLAVE_DLL_DELTA(x) (((x) >> S_SLAVE_DLL_DELTA) & M_SLAVE_DLL_DELTA) + +#define S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT 17 +#define M_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT 0x3f +#define V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT(x) ((x) << S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT) +#define G_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT(x) (((x) >> S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT) & M_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT) + +#define S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE 23 +#define V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE(x) ((x) << S_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE) +#define F_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE V_SLAVE_DELAY_LINE_MANUAL_TAP_COUNT_ENABLE(1U) + +#define S_SLAVE_DELAY_LINE_TAP_COUNT 24 +#define M_SLAVE_DELAY_LINE_TAP_COUNT 0x3f +#define V_SLAVE_DELAY_LINE_TAP_COUNT(x) ((x) << S_SLAVE_DELAY_LINE_TAP_COUNT) +#define G_SLAVE_DELAY_LINE_TAP_COUNT(x) (((x) >> S_SLAVE_DELAY_LINE_TAP_COUNT) & M_SLAVE_DELAY_LINE_TAP_COUNT) + +#define A_MC3_ECC_CNTL 0x118 + +#define S_ECC_GENERATION_ENABLE 0 +#define V_ECC_GENERATION_ENABLE(x) ((x) << S_ECC_GENERATION_ENABLE) +#define F_ECC_GENERATION_ENABLE V_ECC_GENERATION_ENABLE(1U) + +#define S_ECC_CHECK_ENABLE 1 +#define V_ECC_CHECK_ENABLE(x) ((x) << S_ECC_CHECK_ENABLE) +#define F_ECC_CHECK_ENABLE V_ECC_CHECK_ENABLE(1U) + +#define S_CORRECTABLE_ERROR_COUNT 2 +#define M_CORRECTABLE_ERROR_COUNT 0xff +#define V_CORRECTABLE_ERROR_COUNT(x) ((x) << S_CORRECTABLE_ERROR_COUNT) +#define G_CORRECTABLE_ERROR_COUNT(x) (((x) >> S_CORRECTABLE_ERROR_COUNT) & M_CORRECTABLE_ERROR_COUNT) + +#define S_UNCORRECTABLE_ERROR_COUNT 10 +#define M_UNCORRECTABLE_ERROR_COUNT 0xff +#define V_UNCORRECTABLE_ERROR_COUNT(x) ((x) << S_UNCORRECTABLE_ERROR_COUNT) +#define G_UNCORRECTABLE_ERROR_COUNT(x) (((x) >> S_UNCORRECTABLE_ERROR_COUNT) & M_UNCORRECTABLE_ERROR_COUNT) + +#define A_MC3_CE_ADDR 0x11c + +#define S_MC3_CE_ADDR 4 +#define M_MC3_CE_ADDR 0xfffffff +#define V_MC3_CE_ADDR(x) ((x) << S_MC3_CE_ADDR) +#define G_MC3_CE_ADDR(x) (((x) >> S_MC3_CE_ADDR) & M_MC3_CE_ADDR) + +#define A_MC3_CE_DATA0 0x120 +#define A_MC3_CE_DATA1 0x124 +#define A_MC3_CE_DATA2 0x128 +#define A_MC3_CE_DATA3 0x12c +#define A_MC3_CE_DATA4 0x130 +#define A_MC3_UE_ADDR 0x134 + +#define S_MC3_UE_ADDR 4 +#define M_MC3_UE_ADDR 0xfffffff +#define V_MC3_UE_ADDR(x) ((x) << S_MC3_UE_ADDR) +#define G_MC3_UE_ADDR(x) (((x) >> S_MC3_UE_ADDR) & M_MC3_UE_ADDR) + +#define A_MC3_UE_DATA0 0x138 +#define A_MC3_UE_DATA1 0x13c +#define A_MC3_UE_DATA2 0x140 +#define A_MC3_UE_DATA3 0x144 +#define A_MC3_UE_DATA4 0x148 +#define A_MC3_BD_ADDR 0x14c +#define A_MC3_BD_DATA0 0x150 +#define A_MC3_BD_DATA1 0x154 +#define A_MC3_BD_DATA2 0x158 +#define A_MC3_BD_DATA3 0x15c +#define A_MC3_BD_DATA4 0x160 +#define A_MC3_BD_OP 0x164 + +#define S_BACK_DOOR_OPERATION 0 +#define V_BACK_DOOR_OPERATION(x) ((x) << S_BACK_DOOR_OPERATION) +#define F_BACK_DOOR_OPERATION V_BACK_DOOR_OPERATION(1U) + +#define A_MC3_BIST_ADDR_BEG 0x168 +#define A_MC3_BIST_ADDR_END 0x16c +#define A_MC3_BIST_DATA 0x170 +#define A_MC3_BIST_OP 0x174 + +#define S_OP 0 +#define V_OP(x) ((x) << S_OP) +#define F_OP V_OP(1U) + +#define S_DATA_PATTERN 1 +#define M_DATA_PATTERN 0x3 +#define V_DATA_PATTERN(x) ((x) << S_DATA_PATTERN) +#define G_DATA_PATTERN(x) (((x) >> S_DATA_PATTERN) & M_DATA_PATTERN) + +#define S_CONTINUOUS 3 +#define V_CONTINUOUS(x) ((x) << S_CONTINUOUS) +#define F_CONTINUOUS V_CONTINUOUS(1U) + +#define A_MC3_INT_ENABLE 0x178 + +#define S_MC3_CORR_ERR 0 +#define V_MC3_CORR_ERR(x) ((x) << S_MC3_CORR_ERR) +#define F_MC3_CORR_ERR V_MC3_CORR_ERR(1U) + +#define S_MC3_UNCORR_ERR 1 +#define V_MC3_UNCORR_ERR(x) ((x) << S_MC3_UNCORR_ERR) +#define F_MC3_UNCORR_ERR V_MC3_UNCORR_ERR(1U) + +#define S_MC3_PARITY_ERR 2 +#define M_MC3_PARITY_ERR 0xff +#define V_MC3_PARITY_ERR(x) ((x) << S_MC3_PARITY_ERR) +#define G_MC3_PARITY_ERR(x) (((x) >> S_MC3_PARITY_ERR) & M_MC3_PARITY_ERR) + +#define S_MC3_ADDR_ERR 10 +#define V_MC3_ADDR_ERR(x) ((x) << S_MC3_ADDR_ERR) +#define F_MC3_ADDR_ERR V_MC3_ADDR_ERR(1U) + +#define A_MC3_INT_CAUSE 0x17c + +/* MC4 registers */ +#define A_MC4_CFG 0x180 + +#define S_POWER_UP 0 +#define V_POWER_UP(x) ((x) << S_POWER_UP) +#define F_POWER_UP V_POWER_UP(1U) + +#define S_MC4_BANK_CYCLE 8 +#define M_MC4_BANK_CYCLE 0x7 +#define V_MC4_BANK_CYCLE(x) ((x) << S_MC4_BANK_CYCLE) +#define G_MC4_BANK_CYCLE(x) (((x) >> S_MC4_BANK_CYCLE) & M_MC4_BANK_CYCLE) + +#define S_MC4_NARROW 24 +#define V_MC4_NARROW(x) ((x) << S_MC4_NARROW) +#define F_MC4_NARROW V_MC4_NARROW(1U) + +#define S_MC4_SLOW 25 +#define V_MC4_SLOW(x) ((x) << S_MC4_SLOW) +#define F_MC4_SLOW V_MC4_SLOW(1U) + +#define S_MC4A_WIDTH 24 +#define M_MC4A_WIDTH 0x3 +#define V_MC4A_WIDTH(x) ((x) << S_MC4A_WIDTH) +#define G_MC4A_WIDTH(x) (((x) >> S_MC4A_WIDTH) & M_MC4A_WIDTH) + +#define S_MC4A_SLOW 26 +#define V_MC4A_SLOW(x) ((x) << S_MC4A_SLOW) +#define F_MC4A_SLOW V_MC4A_SLOW(1U) + +#define A_MC4_MODE 0x184 + +#define S_MC4_MODE 0 +#define M_MC4_MODE 0x7fff +#define V_MC4_MODE(x) ((x) << S_MC4_MODE) +#define G_MC4_MODE(x) (((x) >> S_MC4_MODE) & M_MC4_MODE) + +#define A_MC4_EXT_MODE 0x188 + +#define S_MC4_EXTENDED_MODE 0 +#define M_MC4_EXTENDED_MODE 0x7fff +#define V_MC4_EXTENDED_MODE(x) ((x) << S_MC4_EXTENDED_MODE) +#define G_MC4_EXTENDED_MODE(x) (((x) >> S_MC4_EXTENDED_MODE) & M_MC4_EXTENDED_MODE) + +#define A_MC4_REFRESH 0x190 +#define A_MC4_STROBE 0x194 +#define A_MC4_ECC_CNTL 0x198 +#define A_MC4_CE_ADDR 0x19c + +#define S_MC4_CE_ADDR 4 +#define M_MC4_CE_ADDR 0xffffff +#define V_MC4_CE_ADDR(x) ((x) << S_MC4_CE_ADDR) +#define G_MC4_CE_ADDR(x) (((x) >> S_MC4_CE_ADDR) & M_MC4_CE_ADDR) + +#define A_MC4_CE_DATA0 0x1a0 +#define A_MC4_CE_DATA1 0x1a4 +#define A_MC4_CE_DATA2 0x1a8 +#define A_MC4_CE_DATA3 0x1ac +#define A_MC4_CE_DATA4 0x1b0 +#define A_MC4_UE_ADDR 0x1b4 + +#define S_MC4_UE_ADDR 4 +#define M_MC4_UE_ADDR 0xffffff +#define V_MC4_UE_ADDR(x) ((x) << S_MC4_UE_ADDR) +#define G_MC4_UE_ADDR(x) (((x) >> S_MC4_UE_ADDR) & M_MC4_UE_ADDR) + +#define A_MC4_UE_DATA0 0x1b8 +#define A_MC4_UE_DATA1 0x1bc +#define A_MC4_UE_DATA2 0x1c0 +#define A_MC4_UE_DATA3 0x1c4 +#define A_MC4_UE_DATA4 0x1c8 +#define A_MC4_BD_ADDR 0x1cc + +#define S_MC4_BACK_DOOR_ADDR 0 +#define M_MC4_BACK_DOOR_ADDR 0xfffffff +#define V_MC4_BACK_DOOR_ADDR(x) ((x) << S_MC4_BACK_DOOR_ADDR) +#define G_MC4_BACK_DOOR_ADDR(x) (((x) >> S_MC4_BACK_DOOR_ADDR) & M_MC4_BACK_DOOR_ADDR) + +#define A_MC4_BD_DATA0 0x1d0 +#define A_MC4_BD_DATA1 0x1d4 +#define A_MC4_BD_DATA2 0x1d8 +#define A_MC4_BD_DATA3 0x1dc +#define A_MC4_BD_DATA4 0x1e0 +#define A_MC4_BD_OP 0x1e4 + +#define S_OPERATION 0 +#define V_OPERATION(x) ((x) << S_OPERATION) +#define F_OPERATION V_OPERATION(1U) + +#define A_MC4_BIST_ADDR_BEG 0x1e8 +#define A_MC4_BIST_ADDR_END 0x1ec +#define A_MC4_BIST_DATA 0x1f0 +#define A_MC4_BIST_OP 0x1f4 +#define A_MC4_INT_ENABLE 0x1f8 + +#define S_MC4_CORR_ERR 0 +#define V_MC4_CORR_ERR(x) ((x) << S_MC4_CORR_ERR) +#define F_MC4_CORR_ERR V_MC4_CORR_ERR(1U) + +#define S_MC4_UNCORR_ERR 1 +#define V_MC4_UNCORR_ERR(x) ((x) << S_MC4_UNCORR_ERR) +#define F_MC4_UNCORR_ERR V_MC4_UNCORR_ERR(1U) + +#define S_MC4_ADDR_ERR 2 +#define V_MC4_ADDR_ERR(x) ((x) << S_MC4_ADDR_ERR) +#define F_MC4_ADDR_ERR V_MC4_ADDR_ERR(1U) + +#define A_MC4_INT_CAUSE 0x1fc + +/* TPI registers */ +#define A_TPI_ADDR 0x280 + +#define S_TPI_ADDRESS 0 +#define M_TPI_ADDRESS 0xffffff +#define V_TPI_ADDRESS(x) ((x) << S_TPI_ADDRESS) +#define G_TPI_ADDRESS(x) (((x) >> S_TPI_ADDRESS) & M_TPI_ADDRESS) + +#define A_TPI_WR_DATA 0x284 +#define A_TPI_RD_DATA 0x288 +#define A_TPI_CSR 0x28c + +#define S_TPIWR 0 +#define V_TPIWR(x) ((x) << S_TPIWR) +#define F_TPIWR V_TPIWR(1U) + +#define S_TPIRDY 1 +#define V_TPIRDY(x) ((x) << S_TPIRDY) +#define F_TPIRDY V_TPIRDY(1U) + +#define S_INT_DIR 31 +#define V_INT_DIR(x) ((x) << S_INT_DIR) +#define F_INT_DIR V_INT_DIR(1U) + +#define A_TPI_PAR 0x29c + +#define S_TPIPAR 0 +#define M_TPIPAR 0x7f +#define V_TPIPAR(x) ((x) << S_TPIPAR) +#define G_TPIPAR(x) (((x) >> S_TPIPAR) & M_TPIPAR) + + +/* TP registers */ +#define A_TP_IN_CONFIG 0x300 + +#define S_TP_IN_CSPI_TUNNEL 0 +#define V_TP_IN_CSPI_TUNNEL(x) ((x) << S_TP_IN_CSPI_TUNNEL) +#define F_TP_IN_CSPI_TUNNEL V_TP_IN_CSPI_TUNNEL(1U) + +#define S_TP_IN_CSPI_ETHERNET 1 +#define V_TP_IN_CSPI_ETHERNET(x) ((x) << S_TP_IN_CSPI_ETHERNET) +#define F_TP_IN_CSPI_ETHERNET V_TP_IN_CSPI_ETHERNET(1U) + +#define S_TP_IN_CSPI_CPL 3 +#define V_TP_IN_CSPI_CPL(x) ((x) << S_TP_IN_CSPI_CPL) +#define F_TP_IN_CSPI_CPL V_TP_IN_CSPI_CPL(1U) + +#define S_TP_IN_CSPI_POS 4 +#define V_TP_IN_CSPI_POS(x) ((x) << S_TP_IN_CSPI_POS) +#define F_TP_IN_CSPI_POS V_TP_IN_CSPI_POS(1U) + +#define S_TP_IN_CSPI_CHECK_IP_CSUM 5 +#define V_TP_IN_CSPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_IP_CSUM) +#define F_TP_IN_CSPI_CHECK_IP_CSUM V_TP_IN_CSPI_CHECK_IP_CSUM(1U) + +#define S_TP_IN_CSPI_CHECK_TCP_CSUM 6 +#define V_TP_IN_CSPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_TCP_CSUM) +#define F_TP_IN_CSPI_CHECK_TCP_CSUM V_TP_IN_CSPI_CHECK_TCP_CSUM(1U) + +#define S_TP_IN_ESPI_TUNNEL 7 +#define V_TP_IN_ESPI_TUNNEL(x) ((x) << S_TP_IN_ESPI_TUNNEL) +#define F_TP_IN_ESPI_TUNNEL V_TP_IN_ESPI_TUNNEL(1U) + +#define S_TP_IN_ESPI_ETHERNET 8 +#define V_TP_IN_ESPI_ETHERNET(x) ((x) << S_TP_IN_ESPI_ETHERNET) +#define F_TP_IN_ESPI_ETHERNET V_TP_IN_ESPI_ETHERNET(1U) + +#define S_TP_IN_ESPI_CPL 10 +#define V_TP_IN_ESPI_CPL(x) ((x) << S_TP_IN_ESPI_CPL) +#define F_TP_IN_ESPI_CPL V_TP_IN_ESPI_CPL(1U) + +#define S_TP_IN_ESPI_POS 11 +#define V_TP_IN_ESPI_POS(x) ((x) << S_TP_IN_ESPI_POS) +#define F_TP_IN_ESPI_POS V_TP_IN_ESPI_POS(1U) + +#define S_TP_IN_ESPI_CHECK_IP_CSUM 12 +#define V_TP_IN_ESPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_IP_CSUM) +#define F_TP_IN_ESPI_CHECK_IP_CSUM V_TP_IN_ESPI_CHECK_IP_CSUM(1U) + +#define S_TP_IN_ESPI_CHECK_TCP_CSUM 13 +#define V_TP_IN_ESPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_TCP_CSUM) +#define F_TP_IN_ESPI_CHECK_TCP_CSUM V_TP_IN_ESPI_CHECK_TCP_CSUM(1U) + +#define S_OFFLOAD_DISABLE 14 +#define V_OFFLOAD_DISABLE(x) ((x) << S_OFFLOAD_DISABLE) +#define F_OFFLOAD_DISABLE V_OFFLOAD_DISABLE(1U) + +#define A_TP_OUT_CONFIG 0x304 + +#define S_TP_OUT_C_ETH 0 +#define V_TP_OUT_C_ETH(x) ((x) << S_TP_OUT_C_ETH) +#define F_TP_OUT_C_ETH V_TP_OUT_C_ETH(1U) + +#define S_TP_OUT_CSPI_CPL 2 +#define V_TP_OUT_CSPI_CPL(x) ((x) << S_TP_OUT_CSPI_CPL) +#define F_TP_OUT_CSPI_CPL V_TP_OUT_CSPI_CPL(1U) + +#define S_TP_OUT_CSPI_POS 3 +#define V_TP_OUT_CSPI_POS(x) ((x) << S_TP_OUT_CSPI_POS) +#define F_TP_OUT_CSPI_POS V_TP_OUT_CSPI_POS(1U) + +#define S_TP_OUT_CSPI_GENERATE_IP_CSUM 4 +#define V_TP_OUT_CSPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_CSPI_GENERATE_IP_CSUM) +#define F_TP_OUT_CSPI_GENERATE_IP_CSUM V_TP_OUT_CSPI_GENERATE_IP_CSUM(1U) + +#define S_TP_OUT_CSPI_GENERATE_TCP_CSUM 5 +#define V_TP_OUT_CSPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_CSPI_GENERATE_TCP_CSUM) +#define F_TP_OUT_CSPI_GENERATE_TCP_CSUM V_TP_OUT_CSPI_GENERATE_TCP_CSUM(1U) + +#define S_TP_OUT_ESPI_ETHERNET 6 +#define V_TP_OUT_ESPI_ETHERNET(x) ((x) << S_TP_OUT_ESPI_ETHERNET) +#define F_TP_OUT_ESPI_ETHERNET V_TP_OUT_ESPI_ETHERNET(1U) + +#define S_TP_OUT_ESPI_TAG_ETHERNET 7 +#define V_TP_OUT_ESPI_TAG_ETHERNET(x) ((x) << S_TP_OUT_ESPI_TAG_ETHERNET) +#define F_TP_OUT_ESPI_TAG_ETHERNET V_TP_OUT_ESPI_TAG_ETHERNET(1U) + +#define S_TP_OUT_ESPI_CPL 8 +#define V_TP_OUT_ESPI_CPL(x) ((x) << S_TP_OUT_ESPI_CPL) +#define F_TP_OUT_ESPI_CPL V_TP_OUT_ESPI_CPL(1U) + +#define S_TP_OUT_ESPI_POS 9 +#define V_TP_OUT_ESPI_POS(x) ((x) << S_TP_OUT_ESPI_POS) +#define F_TP_OUT_ESPI_POS V_TP_OUT_ESPI_POS(1U) + +#define S_TP_OUT_ESPI_GENERATE_IP_CSUM 10 +#define V_TP_OUT_ESPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_IP_CSUM) +#define F_TP_OUT_ESPI_GENERATE_IP_CSUM V_TP_OUT_ESPI_GENERATE_IP_CSUM(1U) + +#define S_TP_OUT_ESPI_GENERATE_TCP_CSUM 11 +#define V_TP_OUT_ESPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_TCP_CSUM) +#define F_TP_OUT_ESPI_GENERATE_TCP_CSUM V_TP_OUT_ESPI_GENERATE_TCP_CSUM(1U) + +#define A_TP_GLOBAL_CONFIG 0x308 + +#define S_IP_TTL 0 +#define M_IP_TTL 0xff +#define V_IP_TTL(x) ((x) << S_IP_TTL) +#define G_IP_TTL(x) (((x) >> S_IP_TTL) & M_IP_TTL) + +#define S_TCAM_SERVER_REGION_USAGE 8 +#define M_TCAM_SERVER_REGION_USAGE 0x3 +#define V_TCAM_SERVER_REGION_USAGE(x) ((x) << S_TCAM_SERVER_REGION_USAGE) +#define G_TCAM_SERVER_REGION_USAGE(x) (((x) >> S_TCAM_SERVER_REGION_USAGE) & M_TCAM_SERVER_REGION_USAGE) + +#define S_QOS_MAPPING 10 +#define V_QOS_MAPPING(x) ((x) << S_QOS_MAPPING) +#define F_QOS_MAPPING V_QOS_MAPPING(1U) + +#define S_TCP_CSUM 11 +#define V_TCP_CSUM(x) ((x) << S_TCP_CSUM) +#define F_TCP_CSUM V_TCP_CSUM(1U) + +#define S_UDP_CSUM 12 +#define V_UDP_CSUM(x) ((x) << S_UDP_CSUM) +#define F_UDP_CSUM V_UDP_CSUM(1U) + +#define S_IP_CSUM 13 +#define V_IP_CSUM(x) ((x) << S_IP_CSUM) +#define F_IP_CSUM V_IP_CSUM(1U) + +#define S_IP_ID_SPLIT 14 +#define V_IP_ID_SPLIT(x) ((x) << S_IP_ID_SPLIT) +#define F_IP_ID_SPLIT V_IP_ID_SPLIT(1U) + +#define S_PATH_MTU 15 +#define V_PATH_MTU(x) ((x) << S_PATH_MTU) +#define F_PATH_MTU V_PATH_MTU(1U) + +#define S_5TUPLE_LOOKUP 17 +#define M_5TUPLE_LOOKUP 0x3 +#define V_5TUPLE_LOOKUP(x) ((x) << S_5TUPLE_LOOKUP) +#define G_5TUPLE_LOOKUP(x) (((x) >> S_5TUPLE_LOOKUP) & M_5TUPLE_LOOKUP) + +#define S_IP_FRAGMENT_DROP 19 +#define V_IP_FRAGMENT_DROP(x) ((x) << S_IP_FRAGMENT_DROP) +#define F_IP_FRAGMENT_DROP V_IP_FRAGMENT_DROP(1U) + +#define S_PING_DROP 20 +#define V_PING_DROP(x) ((x) << S_PING_DROP) +#define F_PING_DROP V_PING_DROP(1U) + +#define S_PROTECT_MODE 21 +#define V_PROTECT_MODE(x) ((x) << S_PROTECT_MODE) +#define F_PROTECT_MODE V_PROTECT_MODE(1U) + +#define S_SYN_COOKIE_ALGORITHM 22 +#define V_SYN_COOKIE_ALGORITHM(x) ((x) << S_SYN_COOKIE_ALGORITHM) +#define F_SYN_COOKIE_ALGORITHM V_SYN_COOKIE_ALGORITHM(1U) + +#define S_ATTACK_FILTER 23 +#define V_ATTACK_FILTER(x) ((x) << S_ATTACK_FILTER) +#define F_ATTACK_FILTER V_ATTACK_FILTER(1U) + +#define S_INTERFACE_TYPE 24 +#define V_INTERFACE_TYPE(x) ((x) << S_INTERFACE_TYPE) +#define F_INTERFACE_TYPE V_INTERFACE_TYPE(1U) + +#define S_DISABLE_RX_FLOW_CONTROL 25 +#define V_DISABLE_RX_FLOW_CONTROL(x) ((x) << S_DISABLE_RX_FLOW_CONTROL) +#define F_DISABLE_RX_FLOW_CONTROL V_DISABLE_RX_FLOW_CONTROL(1U) + +#define S_SYN_COOKIE_PARAMETER 26 +#define M_SYN_COOKIE_PARAMETER 0x3f +#define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER) +#define G_SYN_COOKIE_PARAMETER(x) (((x) >> S_SYN_COOKIE_PARAMETER) & M_SYN_COOKIE_PARAMETER) + +#define A_TP_GLOBAL_RX_CREDITS 0x30c +#define A_TP_CM_SIZE 0x310 +#define A_TP_CM_MM_BASE 0x314 + +#define S_CM_MEMMGR_BASE 0 +#define M_CM_MEMMGR_BASE 0xfffffff +#define V_CM_MEMMGR_BASE(x) ((x) << S_CM_MEMMGR_BASE) +#define G_CM_MEMMGR_BASE(x) (((x) >> S_CM_MEMMGR_BASE) & M_CM_MEMMGR_BASE) + +#define A_TP_CM_TIMER_BASE 0x318 + +#define S_CM_TIMER_BASE 0 +#define M_CM_TIMER_BASE 0xfffffff +#define V_CM_TIMER_BASE(x) ((x) << S_CM_TIMER_BASE) +#define G_CM_TIMER_BASE(x) (((x) >> S_CM_TIMER_BASE) & M_CM_TIMER_BASE) + +#define A_TP_PM_SIZE 0x31c +#define A_TP_PM_TX_BASE 0x320 +#define A_TP_PM_DEFRAG_BASE 0x324 +#define A_TP_PM_RX_BASE 0x328 +#define A_TP_PM_RX_PG_SIZE 0x32c +#define A_TP_PM_RX_MAX_PGS 0x330 +#define A_TP_PM_TX_PG_SIZE 0x334 +#define A_TP_PM_TX_MAX_PGS 0x338 +#define A_TP_TCP_OPTIONS 0x340 + +#define S_TIMESTAMP 0 +#define M_TIMESTAMP 0x3 +#define V_TIMESTAMP(x) ((x) << S_TIMESTAMP) +#define G_TIMESTAMP(x) (((x) >> S_TIMESTAMP) & M_TIMESTAMP) + +#define S_WINDOW_SCALE 2 +#define M_WINDOW_SCALE 0x3 +#define V_WINDOW_SCALE(x) ((x) << S_WINDOW_SCALE) +#define G_WINDOW_SCALE(x) (((x) >> S_WINDOW_SCALE) & M_WINDOW_SCALE) + +#define S_SACK 4 +#define M_SACK 0x3 +#define V_SACK(x) ((x) << S_SACK) +#define G_SACK(x) (((x) >> S_SACK) & M_SACK) + +#define S_ECN 6 +#define M_ECN 0x3 +#define V_ECN(x) ((x) << S_ECN) +#define G_ECN(x) (((x) >> S_ECN) & M_ECN) + +#define S_SACK_ALGORITHM 8 +#define M_SACK_ALGORITHM 0x3 +#define V_SACK_ALGORITHM(x) ((x) << S_SACK_ALGORITHM) +#define G_SACK_ALGORITHM(x) (((x) >> S_SACK_ALGORITHM) & M_SACK_ALGORITHM) + +#define S_MSS 10 +#define V_MSS(x) ((x) << S_MSS) +#define F_MSS V_MSS(1U) + +#define S_DEFAULT_PEER_MSS 16 +#define M_DEFAULT_PEER_MSS 0xffff +#define V_DEFAULT_PEER_MSS(x) ((x) << S_DEFAULT_PEER_MSS) +#define G_DEFAULT_PEER_MSS(x) (((x) >> S_DEFAULT_PEER_MSS) & M_DEFAULT_PEER_MSS) + +#define A_TP_DACK_CONFIG 0x344 + +#define S_DACK_MODE 0 +#define V_DACK_MODE(x) ((x) << S_DACK_MODE) +#define F_DACK_MODE V_DACK_MODE(1U) + +#define S_DACK_AUTO_MGMT 1 +#define V_DACK_AUTO_MGMT(x) ((x) << S_DACK_AUTO_MGMT) +#define F_DACK_AUTO_MGMT V_DACK_AUTO_MGMT(1U) + +#define S_DACK_AUTO_CAREFUL 2 +#define V_DACK_AUTO_CAREFUL(x) ((x) << S_DACK_AUTO_CAREFUL) +#define F_DACK_AUTO_CAREFUL V_DACK_AUTO_CAREFUL(1U) + +#define S_DACK_MSS_SELECTOR 3 +#define M_DACK_MSS_SELECTOR 0x3 +#define V_DACK_MSS_SELECTOR(x) ((x) << S_DACK_MSS_SELECTOR) +#define G_DACK_MSS_SELECTOR(x) (((x) >> S_DACK_MSS_SELECTOR) & M_DACK_MSS_SELECTOR) + +#define S_DACK_BYTE_THRESHOLD 5 +#define M_DACK_BYTE_THRESHOLD 0xfffff +#define V_DACK_BYTE_THRESHOLD(x) ((x) << S_DACK_BYTE_THRESHOLD) +#define G_DACK_BYTE_THRESHOLD(x) (((x) >> S_DACK_BYTE_THRESHOLD) & M_DACK_BYTE_THRESHOLD) + +#define A_TP_PC_CONFIG 0x348 + +#define S_TP_ACCESS_LATENCY 0 +#define M_TP_ACCESS_LATENCY 0xf +#define V_TP_ACCESS_LATENCY(x) ((x) << S_TP_ACCESS_LATENCY) +#define G_TP_ACCESS_LATENCY(x) (((x) >> S_TP_ACCESS_LATENCY) & M_TP_ACCESS_LATENCY) + +#define S_HELD_FIN_DISABLE 4 +#define V_HELD_FIN_DISABLE(x) ((x) << S_HELD_FIN_DISABLE) +#define F_HELD_FIN_DISABLE V_HELD_FIN_DISABLE(1U) + +#define S_DDP_FC_ENABLE 5 +#define V_DDP_FC_ENABLE(x) ((x) << S_DDP_FC_ENABLE) +#define F_DDP_FC_ENABLE V_DDP_FC_ENABLE(1U) + +#define S_RDMA_ERR_ENABLE 6 +#define V_RDMA_ERR_ENABLE(x) ((x) << S_RDMA_ERR_ENABLE) +#define F_RDMA_ERR_ENABLE V_RDMA_ERR_ENABLE(1U) + +#define S_FAST_PDU_DELIVERY 7 +#define V_FAST_PDU_DELIVERY(x) ((x) << S_FAST_PDU_DELIVERY) +#define F_FAST_PDU_DELIVERY V_FAST_PDU_DELIVERY(1U) + +#define S_CLEAR_FIN 8 +#define V_CLEAR_FIN(x) ((x) << S_CLEAR_FIN) +#define F_CLEAR_FIN V_CLEAR_FIN(1U) + +#define S_DIS_TX_FILL_WIN_PUSH 12 +#define V_DIS_TX_FILL_WIN_PUSH(x) ((x) << S_DIS_TX_FILL_WIN_PUSH) +#define F_DIS_TX_FILL_WIN_PUSH V_DIS_TX_FILL_WIN_PUSH(1U) + +#define S_TP_PC_REV 30 +#define M_TP_PC_REV 0x3 +#define V_TP_PC_REV(x) ((x) << S_TP_PC_REV) +#define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV) + +#define A_TP_BACKOFF0 0x350 + +#define S_ELEMENT0 0 +#define M_ELEMENT0 0xff +#define V_ELEMENT0(x) ((x) << S_ELEMENT0) +#define G_ELEMENT0(x) (((x) >> S_ELEMENT0) & M_ELEMENT0) + +#define S_ELEMENT1 8 +#define M_ELEMENT1 0xff +#define V_ELEMENT1(x) ((x) << S_ELEMENT1) +#define G_ELEMENT1(x) (((x) >> S_ELEMENT1) & M_ELEMENT1) + +#define S_ELEMENT2 16 +#define M_ELEMENT2 0xff +#define V_ELEMENT2(x) ((x) << S_ELEMENT2) +#define G_ELEMENT2(x) (((x) >> S_ELEMENT2) & M_ELEMENT2) + +#define S_ELEMENT3 24 +#define M_ELEMENT3 0xff +#define V_ELEMENT3(x) ((x) << S_ELEMENT3) +#define G_ELEMENT3(x) (((x) >> S_ELEMENT3) & M_ELEMENT3) + +#define A_TP_BACKOFF1 0x354 +#define A_TP_BACKOFF2 0x358 +#define A_TP_BACKOFF3 0x35c +#define A_TP_PARA_REG0 0x360 + +#define S_VAR_MULT 0 +#define M_VAR_MULT 0xf +#define V_VAR_MULT(x) ((x) << S_VAR_MULT) +#define G_VAR_MULT(x) (((x) >> S_VAR_MULT) & M_VAR_MULT) + +#define S_VAR_GAIN 4 +#define M_VAR_GAIN 0xf +#define V_VAR_GAIN(x) ((x) << S_VAR_GAIN) +#define G_VAR_GAIN(x) (((x) >> S_VAR_GAIN) & M_VAR_GAIN) + +#define S_SRTT_GAIN 8 +#define M_SRTT_GAIN 0xf +#define V_SRTT_GAIN(x) ((x) << S_SRTT_GAIN) +#define G_SRTT_GAIN(x) (((x) >> S_SRTT_GAIN) & M_SRTT_GAIN) + +#define S_RTTVAR_INIT 12 +#define M_RTTVAR_INIT 0xf +#define V_RTTVAR_INIT(x) ((x) << S_RTTVAR_INIT) +#define G_RTTVAR_INIT(x) (((x) >> S_RTTVAR_INIT) & M_RTTVAR_INIT) + +#define S_DUP_THRESH 20 +#define M_DUP_THRESH 0xf +#define V_DUP_THRESH(x) ((x) << S_DUP_THRESH) +#define G_DUP_THRESH(x) (((x) >> S_DUP_THRESH) & M_DUP_THRESH) + +#define S_INIT_CONG_WIN 24 +#define M_INIT_CONG_WIN 0x7 +#define V_INIT_CONG_WIN(x) ((x) << S_INIT_CONG_WIN) +#define G_INIT_CONG_WIN(x) (((x) >> S_INIT_CONG_WIN) & M_INIT_CONG_WIN) + +#define A_TP_PARA_REG1 0x364 + +#define S_INITIAL_SLOW_START_THRESHOLD 0 +#define M_INITIAL_SLOW_START_THRESHOLD 0xffff +#define V_INITIAL_SLOW_START_THRESHOLD(x) ((x) << S_INITIAL_SLOW_START_THRESHOLD) +#define G_INITIAL_SLOW_START_THRESHOLD(x) (((x) >> S_INITIAL_SLOW_START_THRESHOLD) & M_INITIAL_SLOW_START_THRESHOLD) + +#define S_RECEIVE_BUFFER_SIZE 16 +#define M_RECEIVE_BUFFER_SIZE 0xffff +#define V_RECEIVE_BUFFER_SIZE(x) ((x) << S_RECEIVE_BUFFER_SIZE) +#define G_RECEIVE_BUFFER_SIZE(x) (((x) >> S_RECEIVE_BUFFER_SIZE) & M_RECEIVE_BUFFER_SIZE) + +#define A_TP_PARA_REG2 0x368 + +#define S_RX_COALESCE_SIZE 0 +#define M_RX_COALESCE_SIZE 0xffff +#define V_RX_COALESCE_SIZE(x) ((x) << S_RX_COALESCE_SIZE) +#define G_RX_COALESCE_SIZE(x) (((x) >> S_RX_COALESCE_SIZE) & M_RX_COALESCE_SIZE) + +#define S_MAX_RX_SIZE 16 +#define M_MAX_RX_SIZE 0xffff +#define V_MAX_RX_SIZE(x) ((x) << S_MAX_RX_SIZE) +#define G_MAX_RX_SIZE(x) (((x) >> S_MAX_RX_SIZE) & M_MAX_RX_SIZE) + +#define A_TP_PARA_REG3 0x36c + +#define S_RX_COALESCING_PSH_DELIVER 0 +#define V_RX_COALESCING_PSH_DELIVER(x) ((x) << S_RX_COALESCING_PSH_DELIVER) +#define F_RX_COALESCING_PSH_DELIVER V_RX_COALESCING_PSH_DELIVER(1U) + +#define S_RX_COALESCING_ENABLE 1 +#define V_RX_COALESCING_ENABLE(x) ((x) << S_RX_COALESCING_ENABLE) +#define F_RX_COALESCING_ENABLE V_RX_COALESCING_ENABLE(1U) + +#define S_TAHOE_ENABLE 2 +#define V_TAHOE_ENABLE(x) ((x) << S_TAHOE_ENABLE) +#define F_TAHOE_ENABLE V_TAHOE_ENABLE(1U) + +#define S_MAX_REORDER_FRAGMENTS 12 +#define M_MAX_REORDER_FRAGMENTS 0x7 +#define V_MAX_REORDER_FRAGMENTS(x) ((x) << S_MAX_REORDER_FRAGMENTS) +#define G_MAX_REORDER_FRAGMENTS(x) (((x) >> S_MAX_REORDER_FRAGMENTS) & M_MAX_REORDER_FRAGMENTS) + +#define A_TP_TIMER_RESOLUTION 0x390 + +#define S_DELAYED_ACK_TIMER_RESOLUTION 0 +#define M_DELAYED_ACK_TIMER_RESOLUTION 0x3f +#define V_DELAYED_ACK_TIMER_RESOLUTION(x) ((x) << S_DELAYED_ACK_TIMER_RESOLUTION) +#define G_DELAYED_ACK_TIMER_RESOLUTION(x) (((x) >> S_DELAYED_ACK_TIMER_RESOLUTION) & M_DELAYED_ACK_TIMER_RESOLUTION) + +#define S_GENERIC_TIMER_RESOLUTION 16 +#define M_GENERIC_TIMER_RESOLUTION 0x3f +#define V_GENERIC_TIMER_RESOLUTION(x) ((x) << S_GENERIC_TIMER_RESOLUTION) +#define G_GENERIC_TIMER_RESOLUTION(x) (((x) >> S_GENERIC_TIMER_RESOLUTION) & M_GENERIC_TIMER_RESOLUTION) + +#define A_TP_2MSL 0x394 + +#define S_2MSL 0 +#define M_2MSL 0x3fffffff +#define V_2MSL(x) ((x) << S_2MSL) +#define G_2MSL(x) (((x) >> S_2MSL) & M_2MSL) + +#define A_TP_RXT_MIN 0x398 + +#define S_RETRANSMIT_TIMER_MIN 0 +#define M_RETRANSMIT_TIMER_MIN 0xffff +#define V_RETRANSMIT_TIMER_MIN(x) ((x) << S_RETRANSMIT_TIMER_MIN) +#define G_RETRANSMIT_TIMER_MIN(x) (((x) >> S_RETRANSMIT_TIMER_MIN) & M_RETRANSMIT_TIMER_MIN) + +#define A_TP_RXT_MAX 0x39c + +#define S_RETRANSMIT_TIMER_MAX 0 +#define M_RETRANSMIT_TIMER_MAX 0x3fffffff +#define V_RETRANSMIT_TIMER_MAX(x) ((x) << S_RETRANSMIT_TIMER_MAX) +#define G_RETRANSMIT_TIMER_MAX(x) (((x) >> S_RETRANSMIT_TIMER_MAX) & M_RETRANSMIT_TIMER_MAX) + +#define A_TP_PERS_MIN 0x3a0 + +#define S_PERSIST_TIMER_MIN 0 +#define M_PERSIST_TIMER_MIN 0xffff +#define V_PERSIST_TIMER_MIN(x) ((x) << S_PERSIST_TIMER_MIN) +#define G_PERSIST_TIMER_MIN(x) (((x) >> S_PERSIST_TIMER_MIN) & M_PERSIST_TIMER_MIN) + +#define A_TP_PERS_MAX 0x3a4 + +#define S_PERSIST_TIMER_MAX 0 +#define M_PERSIST_TIMER_MAX 0x3fffffff +#define V_PERSIST_TIMER_MAX(x) ((x) << S_PERSIST_TIMER_MAX) +#define G_PERSIST_TIMER_MAX(x) (((x) >> S_PERSIST_TIMER_MAX) & M_PERSIST_TIMER_MAX) + +#define A_TP_KEEP_IDLE 0x3ac + +#define S_KEEP_ALIVE_IDLE_TIME 0 +#define M_KEEP_ALIVE_IDLE_TIME 0x3fffffff +#define V_KEEP_ALIVE_IDLE_TIME(x) ((x) << S_KEEP_ALIVE_IDLE_TIME) +#define G_KEEP_ALIVE_IDLE_TIME(x) (((x) >> S_KEEP_ALIVE_IDLE_TIME) & M_KEEP_ALIVE_IDLE_TIME) + +#define A_TP_KEEP_INTVL 0x3b0 + +#define S_KEEP_ALIVE_INTERVAL_TIME 0 +#define M_KEEP_ALIVE_INTERVAL_TIME 0x3fffffff +#define V_KEEP_ALIVE_INTERVAL_TIME(x) ((x) << S_KEEP_ALIVE_INTERVAL_TIME) +#define G_KEEP_ALIVE_INTERVAL_TIME(x) (((x) >> S_KEEP_ALIVE_INTERVAL_TIME) & M_KEEP_ALIVE_INTERVAL_TIME) + +#define A_TP_INIT_SRTT 0x3b4 + +#define S_INITIAL_SRTT 0 +#define M_INITIAL_SRTT 0xffff +#define V_INITIAL_SRTT(x) ((x) << S_INITIAL_SRTT) +#define G_INITIAL_SRTT(x) (((x) >> S_INITIAL_SRTT) & M_INITIAL_SRTT) + +#define A_TP_DACK_TIME 0x3b8 + +#define S_DELAYED_ACK_TIME 0 +#define M_DELAYED_ACK_TIME 0x7ff +#define V_DELAYED_ACK_TIME(x) ((x) << S_DELAYED_ACK_TIME) +#define G_DELAYED_ACK_TIME(x) (((x) >> S_DELAYED_ACK_TIME) & M_DELAYED_ACK_TIME) + +#define A_TP_FINWAIT2_TIME 0x3bc + +#define S_FINWAIT2_TIME 0 +#define M_FINWAIT2_TIME 0x3fffffff +#define V_FINWAIT2_TIME(x) ((x) << S_FINWAIT2_TIME) +#define G_FINWAIT2_TIME(x) (((x) >> S_FINWAIT2_TIME) & M_FINWAIT2_TIME) + +#define A_TP_FAST_FINWAIT2_TIME 0x3c0 + +#define S_FAST_FINWAIT2_TIME 0 +#define M_FAST_FINWAIT2_TIME 0x3fffffff +#define V_FAST_FINWAIT2_TIME(x) ((x) << S_FAST_FINWAIT2_TIME) +#define G_FAST_FINWAIT2_TIME(x) (((x) >> S_FAST_FINWAIT2_TIME) & M_FAST_FINWAIT2_TIME) + +#define A_TP_SHIFT_CNT 0x3c4 + +#define S_KEEPALIVE_MAX 0 +#define M_KEEPALIVE_MAX 0xff +#define V_KEEPALIVE_MAX(x) ((x) << S_KEEPALIVE_MAX) +#define G_KEEPALIVE_MAX(x) (((x) >> S_KEEPALIVE_MAX) & M_KEEPALIVE_MAX) + +#define S_WINDOWPROBE_MAX 8 +#define M_WINDOWPROBE_MAX 0xff +#define V_WINDOWPROBE_MAX(x) ((x) << S_WINDOWPROBE_MAX) +#define G_WINDOWPROBE_MAX(x) (((x) >> S_WINDOWPROBE_MAX) & M_WINDOWPROBE_MAX) + +#define S_RETRANSMISSION_MAX 16 +#define M_RETRANSMISSION_MAX 0xff +#define V_RETRANSMISSION_MAX(x) ((x) << S_RETRANSMISSION_MAX) +#define G_RETRANSMISSION_MAX(x) (((x) >> S_RETRANSMISSION_MAX) & M_RETRANSMISSION_MAX) + +#define S_SYN_MAX 24 +#define M_SYN_MAX 0xff +#define V_SYN_MAX(x) ((x) << S_SYN_MAX) +#define G_SYN_MAX(x) (((x) >> S_SYN_MAX) & M_SYN_MAX) + +#define A_TP_QOS_REG0 0x3e0 + +#define S_L3_VALUE 0 +#define M_L3_VALUE 0x3f +#define V_L3_VALUE(x) ((x) << S_L3_VALUE) +#define G_L3_VALUE(x) (((x) >> S_L3_VALUE) & M_L3_VALUE) + +#define A_TP_QOS_REG1 0x3e4 +#define A_TP_QOS_REG2 0x3e8 +#define A_TP_QOS_REG3 0x3ec +#define A_TP_QOS_REG4 0x3f0 +#define A_TP_QOS_REG5 0x3f4 +#define A_TP_QOS_REG6 0x3f8 +#define A_TP_QOS_REG7 0x3fc +#define A_TP_MTU_REG0 0x404 +#define A_TP_MTU_REG1 0x408 +#define A_TP_MTU_REG2 0x40c +#define A_TP_MTU_REG3 0x410 +#define A_TP_MTU_REG4 0x414 +#define A_TP_MTU_REG5 0x418 +#define A_TP_MTU_REG6 0x41c +#define A_TP_MTU_REG7 0x420 +#define A_TP_RESET 0x44c + +#define S_TP_RESET 0 +#define V_TP_RESET(x) ((x) << S_TP_RESET) +#define F_TP_RESET V_TP_RESET(1U) + +#define S_CM_MEMMGR_INIT 1 +#define V_CM_MEMMGR_INIT(x) ((x) << S_CM_MEMMGR_INIT) +#define F_CM_MEMMGR_INIT V_CM_MEMMGR_INIT(1U) + +#define A_TP_MIB_INDEX 0x450 +#define A_TP_MIB_DATA 0x454 +#define A_TP_SYNC_TIME_HI 0x458 +#define A_TP_SYNC_TIME_LO 0x45c +#define A_TP_CM_MM_RX_FLST_BASE 0x460 + +#define S_CM_MEMMGR_RX_FREE_LIST_BASE 0 +#define M_CM_MEMMGR_RX_FREE_LIST_BASE 0xfffffff +#define V_CM_MEMMGR_RX_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_RX_FREE_LIST_BASE) +#define G_CM_MEMMGR_RX_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_RX_FREE_LIST_BASE) & M_CM_MEMMGR_RX_FREE_LIST_BASE) + +#define A_TP_CM_MM_TX_FLST_BASE 0x464 + +#define S_CM_MEMMGR_TX_FREE_LIST_BASE 0 +#define M_CM_MEMMGR_TX_FREE_LIST_BASE 0xfffffff +#define V_CM_MEMMGR_TX_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_TX_FREE_LIST_BASE) +#define G_CM_MEMMGR_TX_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_TX_FREE_LIST_BASE) & M_CM_MEMMGR_TX_FREE_LIST_BASE) + +#define A_TP_CM_MM_P_FLST_BASE 0x468 + +#define S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE 0 +#define M_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE 0xfffffff +#define V_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE(x) ((x) << S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE) +#define G_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE(x) (((x) >> S_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE) & M_CM_MEMMGR_PSTRUCT_FREE_LIST_BASE) + +#define A_TP_CM_MM_MAX_P 0x46c + +#define S_CM_MEMMGR_MAX_PSTRUCT 0 +#define M_CM_MEMMGR_MAX_PSTRUCT 0xfffffff +#define V_CM_MEMMGR_MAX_PSTRUCT(x) ((x) << S_CM_MEMMGR_MAX_PSTRUCT) +#define G_CM_MEMMGR_MAX_PSTRUCT(x) (((x) >> S_CM_MEMMGR_MAX_PSTRUCT) & M_CM_MEMMGR_MAX_PSTRUCT) + +#define A_TP_INT_ENABLE 0x470 + +#define S_TX_FREE_LIST_EMPTY 0 +#define V_TX_FREE_LIST_EMPTY(x) ((x) << S_TX_FREE_LIST_EMPTY) +#define F_TX_FREE_LIST_EMPTY V_TX_FREE_LIST_EMPTY(1U) + +#define S_RX_FREE_LIST_EMPTY 1 +#define V_RX_FREE_LIST_EMPTY(x) ((x) << S_RX_FREE_LIST_EMPTY) +#define F_RX_FREE_LIST_EMPTY V_RX_FREE_LIST_EMPTY(1U) + +#define A_TP_INT_CAUSE 0x474 +#define A_TP_TIMER_SEPARATOR 0x4a4 + +#define S_DISABLE_PAST_TIMER_INSERTION 0 +#define V_DISABLE_PAST_TIMER_INSERTION(x) ((x) << S_DISABLE_PAST_TIMER_INSERTION) +#define F_DISABLE_PAST_TIMER_INSERTION V_DISABLE_PAST_TIMER_INSERTION(1U) + +#define S_MODULATION_TIMER_SEPARATOR 1 +#define M_MODULATION_TIMER_SEPARATOR 0x7fff +#define V_MODULATION_TIMER_SEPARATOR(x) ((x) << S_MODULATION_TIMER_SEPARATOR) +#define G_MODULATION_TIMER_SEPARATOR(x) (((x) >> S_MODULATION_TIMER_SEPARATOR) & M_MODULATION_TIMER_SEPARATOR) + +#define S_GLOBAL_TIMER_SEPARATOR 16 +#define M_GLOBAL_TIMER_SEPARATOR 0xffff +#define V_GLOBAL_TIMER_SEPARATOR(x) ((x) << S_GLOBAL_TIMER_SEPARATOR) +#define G_GLOBAL_TIMER_SEPARATOR(x) (((x) >> S_GLOBAL_TIMER_SEPARATOR) & M_GLOBAL_TIMER_SEPARATOR) + +#define A_TP_CM_FC_MODE 0x4b0 +#define A_TP_PC_CONGESTION_CNTL 0x4b4 +#define A_TP_TX_DROP_CONFIG 0x4b8 + +#define S_ENABLE_TX_DROP 31 +#define V_ENABLE_TX_DROP(x) ((x) << S_ENABLE_TX_DROP) +#define F_ENABLE_TX_DROP V_ENABLE_TX_DROP(1U) + +#define S_ENABLE_TX_ERROR 30 +#define V_ENABLE_TX_ERROR(x) ((x) << S_ENABLE_TX_ERROR) +#define F_ENABLE_TX_ERROR V_ENABLE_TX_ERROR(1U) + +#define S_DROP_TICKS_CNT 4 +#define M_DROP_TICKS_CNT 0x3ffffff +#define V_DROP_TICKS_CNT(x) ((x) << S_DROP_TICKS_CNT) +#define G_DROP_TICKS_CNT(x) (((x) >> S_DROP_TICKS_CNT) & M_DROP_TICKS_CNT) + +#define S_NUM_PKTS_DROPPED 0 +#define M_NUM_PKTS_DROPPED 0xf +#define V_NUM_PKTS_DROPPED(x) ((x) << S_NUM_PKTS_DROPPED) +#define G_NUM_PKTS_DROPPED(x) (((x) >> S_NUM_PKTS_DROPPED) & M_NUM_PKTS_DROPPED) + +#define A_TP_TX_DROP_COUNT 0x4bc + +/* RAT registers */ +#define A_RAT_ROUTE_CONTROL 0x580 + +#define S_USE_ROUTE_TABLE 0 +#define V_USE_ROUTE_TABLE(x) ((x) << S_USE_ROUTE_TABLE) +#define F_USE_ROUTE_TABLE V_USE_ROUTE_TABLE(1U) + +#define S_ENABLE_CSPI 1 +#define V_ENABLE_CSPI(x) ((x) << S_ENABLE_CSPI) +#define F_ENABLE_CSPI V_ENABLE_CSPI(1U) + +#define S_ENABLE_PCIX 2 +#define V_ENABLE_PCIX(x) ((x) << S_ENABLE_PCIX) +#define F_ENABLE_PCIX V_ENABLE_PCIX(1U) + +#define A_RAT_ROUTE_TABLE_INDEX 0x584 + +#define S_ROUTE_TABLE_INDEX 0 +#define M_ROUTE_TABLE_INDEX 0xf +#define V_ROUTE_TABLE_INDEX(x) ((x) << S_ROUTE_TABLE_INDEX) +#define G_ROUTE_TABLE_INDEX(x) (((x) >> S_ROUTE_TABLE_INDEX) & M_ROUTE_TABLE_INDEX) + +#define A_RAT_ROUTE_TABLE_DATA 0x588 +#define A_RAT_NO_ROUTE 0x58c + +#define S_CPL_OPCODE 0 +#define M_CPL_OPCODE 0xff +#define V_CPL_OPCODE(x) ((x) << S_CPL_OPCODE) +#define G_CPL_OPCODE(x) (((x) >> S_CPL_OPCODE) & M_CPL_OPCODE) + +#define A_RAT_INTR_ENABLE 0x590 + +#define S_ZEROROUTEERROR 0 +#define V_ZEROROUTEERROR(x) ((x) << S_ZEROROUTEERROR) +#define F_ZEROROUTEERROR V_ZEROROUTEERROR(1U) + +#define S_CSPIFRAMINGERROR 1 +#define V_CSPIFRAMINGERROR(x) ((x) << S_CSPIFRAMINGERROR) +#define F_CSPIFRAMINGERROR V_CSPIFRAMINGERROR(1U) + +#define S_SGEFRAMINGERROR 2 +#define V_SGEFRAMINGERROR(x) ((x) << S_SGEFRAMINGERROR) +#define F_SGEFRAMINGERROR V_SGEFRAMINGERROR(1U) + +#define S_TPFRAMINGERROR 3 +#define V_TPFRAMINGERROR(x) ((x) << S_TPFRAMINGERROR) +#define F_TPFRAMINGERROR V_TPFRAMINGERROR(1U) + +#define A_RAT_INTR_CAUSE 0x594 + +/* CSPI registers */ +#define A_CSPI_RX_AE_WM 0x810 +#define A_CSPI_RX_AF_WM 0x814 +#define A_CSPI_CALENDAR_LEN 0x818 + +#define S_CALENDARLENGTH 0 +#define M_CALENDARLENGTH 0xffff +#define V_CALENDARLENGTH(x) ((x) << S_CALENDARLENGTH) +#define G_CALENDARLENGTH(x) (((x) >> S_CALENDARLENGTH) & M_CALENDARLENGTH) + +#define A_CSPI_FIFO_STATUS_ENABLE 0x820 + +#define S_FIFOSTATUSENABLE 0 +#define V_FIFOSTATUSENABLE(x) ((x) << S_FIFOSTATUSENABLE) +#define F_FIFOSTATUSENABLE V_FIFOSTATUSENABLE(1U) + +#define A_CSPI_MAXBURST1_MAXBURST2 0x828 + +#define S_MAXBURST1 0 +#define M_MAXBURST1 0xffff +#define V_MAXBURST1(x) ((x) << S_MAXBURST1) +#define G_MAXBURST1(x) (((x) >> S_MAXBURST1) & M_MAXBURST1) + +#define S_MAXBURST2 16 +#define M_MAXBURST2 0xffff +#define V_MAXBURST2(x) ((x) << S_MAXBURST2) +#define G_MAXBURST2(x) (((x) >> S_MAXBURST2) & M_MAXBURST2) + +#define A_CSPI_TRAIN 0x82c + +#define S_CSPI_TRAIN_ALPHA 0 +#define M_CSPI_TRAIN_ALPHA 0xffff +#define V_CSPI_TRAIN_ALPHA(x) ((x) << S_CSPI_TRAIN_ALPHA) +#define G_CSPI_TRAIN_ALPHA(x) (((x) >> S_CSPI_TRAIN_ALPHA) & M_CSPI_TRAIN_ALPHA) + +#define S_CSPI_TRAIN_DATA_MAXT 16 +#define M_CSPI_TRAIN_DATA_MAXT 0xffff +#define V_CSPI_TRAIN_DATA_MAXT(x) ((x) << S_CSPI_TRAIN_DATA_MAXT) +#define G_CSPI_TRAIN_DATA_MAXT(x) (((x) >> S_CSPI_TRAIN_DATA_MAXT) & M_CSPI_TRAIN_DATA_MAXT) + +#define A_CSPI_INTR_STATUS 0x848 + +#define S_DIP4ERR 0 +#define V_DIP4ERR(x) ((x) << S_DIP4ERR) +#define F_DIP4ERR V_DIP4ERR(1U) + +#define S_RXDROP 1 +#define V_RXDROP(x) ((x) << S_RXDROP) +#define F_RXDROP V_RXDROP(1U) + +#define S_TXDROP 2 +#define V_TXDROP(x) ((x) << S_TXDROP) +#define F_TXDROP V_TXDROP(1U) + +#define S_RXOVERFLOW 3 +#define V_RXOVERFLOW(x) ((x) << S_RXOVERFLOW) +#define F_RXOVERFLOW V_RXOVERFLOW(1U) + +#define S_RAMPARITYERR 4 +#define V_RAMPARITYERR(x) ((x) << S_RAMPARITYERR) +#define F_RAMPARITYERR V_RAMPARITYERR(1U) + +#define A_CSPI_INTR_ENABLE 0x84c + +/* ESPI registers */ +#define A_ESPI_SCH_TOKEN0 0x880 + +#define S_SCHTOKEN0 0 +#define M_SCHTOKEN0 0xffff +#define V_SCHTOKEN0(x) ((x) << S_SCHTOKEN0) +#define G_SCHTOKEN0(x) (((x) >> S_SCHTOKEN0) & M_SCHTOKEN0) + +#define A_ESPI_SCH_TOKEN1 0x884 + +#define S_SCHTOKEN1 0 +#define M_SCHTOKEN1 0xffff +#define V_SCHTOKEN1(x) ((x) << S_SCHTOKEN1) +#define G_SCHTOKEN1(x) (((x) >> S_SCHTOKEN1) & M_SCHTOKEN1) + +#define A_ESPI_SCH_TOKEN2 0x888 + +#define S_SCHTOKEN2 0 +#define M_SCHTOKEN2 0xffff +#define V_SCHTOKEN2(x) ((x) << S_SCHTOKEN2) +#define G_SCHTOKEN2(x) (((x) >> S_SCHTOKEN2) & M_SCHTOKEN2) + +#define A_ESPI_SCH_TOKEN3 0x88c + +#define S_SCHTOKEN3 0 +#define M_SCHTOKEN3 0xffff +#define V_SCHTOKEN3(x) ((x) << S_SCHTOKEN3) +#define G_SCHTOKEN3(x) (((x) >> S_SCHTOKEN3) & M_SCHTOKEN3) + +#define A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK 0x890 + +#define S_ALMOSTEMPTY 0 +#define M_ALMOSTEMPTY 0xffff +#define V_ALMOSTEMPTY(x) ((x) << S_ALMOSTEMPTY) +#define G_ALMOSTEMPTY(x) (((x) >> S_ALMOSTEMPTY) & M_ALMOSTEMPTY) + +#define A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK 0x894 + +#define S_ALMOSTFULL 0 +#define M_ALMOSTFULL 0xffff +#define V_ALMOSTFULL(x) ((x) << S_ALMOSTFULL) +#define G_ALMOSTFULL(x) (((x) >> S_ALMOSTFULL) & M_ALMOSTFULL) + +#define A_ESPI_CALENDAR_LENGTH 0x898 +#define A_PORT_CONFIG 0x89c + +#define S_RX_NPORTS 0 +#define M_RX_NPORTS 0xff +#define V_RX_NPORTS(x) ((x) << S_RX_NPORTS) +#define G_RX_NPORTS(x) (((x) >> S_RX_NPORTS) & M_RX_NPORTS) + +#define S_TX_NPORTS 8 +#define M_TX_NPORTS 0xff +#define V_TX_NPORTS(x) ((x) << S_TX_NPORTS) +#define G_TX_NPORTS(x) (((x) >> S_TX_NPORTS) & M_TX_NPORTS) + +#define A_ESPI_FIFO_STATUS_ENABLE 0x8a0 + +#define S_RXSTATUSENABLE 0 +#define V_RXSTATUSENABLE(x) ((x) << S_RXSTATUSENABLE) +#define F_RXSTATUSENABLE V_RXSTATUSENABLE(1U) + +#define S_TXDROPENABLE 1 +#define V_TXDROPENABLE(x) ((x) << S_TXDROPENABLE) +#define F_TXDROPENABLE V_TXDROPENABLE(1U) + +#define S_RXENDIANMODE 2 +#define V_RXENDIANMODE(x) ((x) << S_RXENDIANMODE) +#define F_RXENDIANMODE V_RXENDIANMODE(1U) + +#define S_TXENDIANMODE 3 +#define V_TXENDIANMODE(x) ((x) << S_TXENDIANMODE) +#define F_TXENDIANMODE V_TXENDIANMODE(1U) + +#define S_INTEL1010MODE 4 +#define V_INTEL1010MODE(x) ((x) << S_INTEL1010MODE) +#define F_INTEL1010MODE V_INTEL1010MODE(1U) + +#define A_ESPI_MAXBURST1_MAXBURST2 0x8a8 +#define A_ESPI_TRAIN 0x8ac + +#define S_MAXTRAINALPHA 0 +#define M_MAXTRAINALPHA 0xffff +#define V_MAXTRAINALPHA(x) ((x) << S_MAXTRAINALPHA) +#define G_MAXTRAINALPHA(x) (((x) >> S_MAXTRAINALPHA) & M_MAXTRAINALPHA) + +#define S_MAXTRAINDATA 16 +#define M_MAXTRAINDATA 0xffff +#define V_MAXTRAINDATA(x) ((x) << S_MAXTRAINDATA) +#define G_MAXTRAINDATA(x) (((x) >> S_MAXTRAINDATA) & M_MAXTRAINDATA) + +#define A_RAM_STATUS 0x8b0 + +#define S_RXFIFOPARITYERROR 0 +#define M_RXFIFOPARITYERROR 0x3ff +#define V_RXFIFOPARITYERROR(x) ((x) << S_RXFIFOPARITYERROR) +#define G_RXFIFOPARITYERROR(x) (((x) >> S_RXFIFOPARITYERROR) & M_RXFIFOPARITYERROR) + +#define S_TXFIFOPARITYERROR 10 +#define M_TXFIFOPARITYERROR 0x3ff +#define V_TXFIFOPARITYERROR(x) ((x) << S_TXFIFOPARITYERROR) +#define G_TXFIFOPARITYERROR(x) (((x) >> S_TXFIFOPARITYERROR) & M_TXFIFOPARITYERROR) + +#define S_RXFIFOOVERFLOW 20 +#define M_RXFIFOOVERFLOW 0x3ff +#define V_RXFIFOOVERFLOW(x) ((x) << S_RXFIFOOVERFLOW) +#define G_RXFIFOOVERFLOW(x) (((x) >> S_RXFIFOOVERFLOW) & M_RXFIFOOVERFLOW) + +#define A_TX_DROP_COUNT0 0x8b4 + +#define S_TXPORT0DROPCNT 0 +#define M_TXPORT0DROPCNT 0xffff +#define V_TXPORT0DROPCNT(x) ((x) << S_TXPORT0DROPCNT) +#define G_TXPORT0DROPCNT(x) (((x) >> S_TXPORT0DROPCNT) & M_TXPORT0DROPCNT) + +#define S_TXPORT1DROPCNT 16 +#define M_TXPORT1DROPCNT 0xffff +#define V_TXPORT1DROPCNT(x) ((x) << S_TXPORT1DROPCNT) +#define G_TXPORT1DROPCNT(x) (((x) >> S_TXPORT1DROPCNT) & M_TXPORT1DROPCNT) + +#define A_TX_DROP_COUNT1 0x8b8 + +#define S_TXPORT2DROPCNT 0 +#define M_TXPORT2DROPCNT 0xffff +#define V_TXPORT2DROPCNT(x) ((x) << S_TXPORT2DROPCNT) +#define G_TXPORT2DROPCNT(x) (((x) >> S_TXPORT2DROPCNT) & M_TXPORT2DROPCNT) + +#define S_TXPORT3DROPCNT 16 +#define M_TXPORT3DROPCNT 0xffff +#define V_TXPORT3DROPCNT(x) ((x) << S_TXPORT3DROPCNT) +#define G_TXPORT3DROPCNT(x) (((x) >> S_TXPORT3DROPCNT) & M_TXPORT3DROPCNT) + +#define A_RX_DROP_COUNT0 0x8bc + +#define S_RXPORT0DROPCNT 0 +#define M_RXPORT0DROPCNT 0xffff +#define V_RXPORT0DROPCNT(x) ((x) << S_RXPORT0DROPCNT) +#define G_RXPORT0DROPCNT(x) (((x) >> S_RXPORT0DROPCNT) & M_RXPORT0DROPCNT) + +#define S_RXPORT1DROPCNT 16 +#define M_RXPORT1DROPCNT 0xffff +#define V_RXPORT1DROPCNT(x) ((x) << S_RXPORT1DROPCNT) +#define G_RXPORT1DROPCNT(x) (((x) >> S_RXPORT1DROPCNT) & M_RXPORT1DROPCNT) + +#define A_RX_DROP_COUNT1 0x8c0 + +#define S_RXPORT2DROPCNT 0 +#define M_RXPORT2DROPCNT 0xffff +#define V_RXPORT2DROPCNT(x) ((x) << S_RXPORT2DROPCNT) +#define G_RXPORT2DROPCNT(x) (((x) >> S_RXPORT2DROPCNT) & M_RXPORT2DROPCNT) + +#define S_RXPORT3DROPCNT 16 +#define M_RXPORT3DROPCNT 0xffff +#define V_RXPORT3DROPCNT(x) ((x) << S_RXPORT3DROPCNT) +#define G_RXPORT3DROPCNT(x) (((x) >> S_RXPORT3DROPCNT) & M_RXPORT3DROPCNT) + +#define A_DIP4_ERROR_COUNT 0x8c4 + +#define S_DIP4ERRORCNT 0 +#define M_DIP4ERRORCNT 0xfff +#define V_DIP4ERRORCNT(x) ((x) << S_DIP4ERRORCNT) +#define G_DIP4ERRORCNT(x) (((x) >> S_DIP4ERRORCNT) & M_DIP4ERRORCNT) + +#define S_DIP4ERRORCNTSHADOW 12 +#define M_DIP4ERRORCNTSHADOW 0xfff +#define V_DIP4ERRORCNTSHADOW(x) ((x) << S_DIP4ERRORCNTSHADOW) +#define G_DIP4ERRORCNTSHADOW(x) (((x) >> S_DIP4ERRORCNTSHADOW) & M_DIP4ERRORCNTSHADOW) + +#define S_TRICN_RX_TRAIN_ERR 24 +#define V_TRICN_RX_TRAIN_ERR(x) ((x) << S_TRICN_RX_TRAIN_ERR) +#define F_TRICN_RX_TRAIN_ERR V_TRICN_RX_TRAIN_ERR(1U) + +#define S_TRICN_RX_TRAINING 25 +#define V_TRICN_RX_TRAINING(x) ((x) << S_TRICN_RX_TRAINING) +#define F_TRICN_RX_TRAINING V_TRICN_RX_TRAINING(1U) + +#define S_TRICN_RX_TRAIN_OK 26 +#define V_TRICN_RX_TRAIN_OK(x) ((x) << S_TRICN_RX_TRAIN_OK) +#define F_TRICN_RX_TRAIN_OK V_TRICN_RX_TRAIN_OK(1U) + +#define A_ESPI_INTR_STATUS 0x8c8 + +#define S_DIP2PARITYERR 5 +#define V_DIP2PARITYERR(x) ((x) << S_DIP2PARITYERR) +#define F_DIP2PARITYERR V_DIP2PARITYERR(1U) + +#define A_ESPI_INTR_ENABLE 0x8cc +#define A_RX_DROP_THRESHOLD 0x8d0 +#define A_ESPI_RX_RESET 0x8ec + +#define S_ESPI_RX_LNK_RST 0 +#define V_ESPI_RX_LNK_RST(x) ((x) << S_ESPI_RX_LNK_RST) +#define F_ESPI_RX_LNK_RST V_ESPI_RX_LNK_RST(1U) + +#define S_ESPI_RX_CORE_RST 1 +#define V_ESPI_RX_CORE_RST(x) ((x) << S_ESPI_RX_CORE_RST) +#define F_ESPI_RX_CORE_RST V_ESPI_RX_CORE_RST(1U) + +#define S_RX_CLK_STATUS 2 +#define V_RX_CLK_STATUS(x) ((x) << S_RX_CLK_STATUS) +#define F_RX_CLK_STATUS V_RX_CLK_STATUS(1U) + +#define A_ESPI_MISC_CONTROL 0x8f0 + +#define S_OUT_OF_SYNC_COUNT 0 +#define M_OUT_OF_SYNC_COUNT 0xf +#define V_OUT_OF_SYNC_COUNT(x) ((x) << S_OUT_OF_SYNC_COUNT) +#define G_OUT_OF_SYNC_COUNT(x) (((x) >> S_OUT_OF_SYNC_COUNT) & M_OUT_OF_SYNC_COUNT) + +#define S_DIP2_COUNT_MODE_ENABLE 4 +#define V_DIP2_COUNT_MODE_ENABLE(x) ((x) << S_DIP2_COUNT_MODE_ENABLE) +#define F_DIP2_COUNT_MODE_ENABLE V_DIP2_COUNT_MODE_ENABLE(1U) + +#define S_DIP2_PARITY_ERR_THRES 5 +#define M_DIP2_PARITY_ERR_THRES 0xf +#define V_DIP2_PARITY_ERR_THRES(x) ((x) << S_DIP2_PARITY_ERR_THRES) +#define G_DIP2_PARITY_ERR_THRES(x) (((x) >> S_DIP2_PARITY_ERR_THRES) & M_DIP2_PARITY_ERR_THRES) + +#define S_DIP4_THRES 9 +#define M_DIP4_THRES 0xfff +#define V_DIP4_THRES(x) ((x) << S_DIP4_THRES) +#define G_DIP4_THRES(x) (((x) >> S_DIP4_THRES) & M_DIP4_THRES) + +#define S_DIP4_THRES_ENABLE 21 +#define V_DIP4_THRES_ENABLE(x) ((x) << S_DIP4_THRES_ENABLE) +#define F_DIP4_THRES_ENABLE V_DIP4_THRES_ENABLE(1U) + +#define S_FORCE_DISABLE_STATUS 22 +#define V_FORCE_DISABLE_STATUS(x) ((x) << S_FORCE_DISABLE_STATUS) +#define F_FORCE_DISABLE_STATUS V_FORCE_DISABLE_STATUS(1U) + +#define S_DYNAMIC_DESKEW 23 +#define V_DYNAMIC_DESKEW(x) ((x) << S_DYNAMIC_DESKEW) +#define F_DYNAMIC_DESKEW V_DYNAMIC_DESKEW(1U) + +#define S_MONITORED_PORT_NUM 25 +#define M_MONITORED_PORT_NUM 0x3 +#define V_MONITORED_PORT_NUM(x) ((x) << S_MONITORED_PORT_NUM) +#define G_MONITORED_PORT_NUM(x) (((x) >> S_MONITORED_PORT_NUM) & M_MONITORED_PORT_NUM) + +#define S_MONITORED_DIRECTION 27 +#define V_MONITORED_DIRECTION(x) ((x) << S_MONITORED_DIRECTION) +#define F_MONITORED_DIRECTION V_MONITORED_DIRECTION(1U) + +#define S_MONITORED_INTERFACE 28 +#define V_MONITORED_INTERFACE(x) ((x) << S_MONITORED_INTERFACE) +#define F_MONITORED_INTERFACE V_MONITORED_INTERFACE(1U) + +#define A_ESPI_DIP2_ERR_COUNT 0x8f4 + +#define S_DIP2_ERR_CNT 0 +#define M_DIP2_ERR_CNT 0xf +#define V_DIP2_ERR_CNT(x) ((x) << S_DIP2_ERR_CNT) +#define G_DIP2_ERR_CNT(x) (((x) >> S_DIP2_ERR_CNT) & M_DIP2_ERR_CNT) + +#define A_ESPI_CMD_ADDR 0x8f8 + +#define S_WRITE_DATA 0 +#define M_WRITE_DATA 0xff +#define V_WRITE_DATA(x) ((x) << S_WRITE_DATA) +#define G_WRITE_DATA(x) (((x) >> S_WRITE_DATA) & M_WRITE_DATA) + +#define S_REGISTER_OFFSET 8 +#define M_REGISTER_OFFSET 0xf +#define V_REGISTER_OFFSET(x) ((x) << S_REGISTER_OFFSET) +#define G_REGISTER_OFFSET(x) (((x) >> S_REGISTER_OFFSET) & M_REGISTER_OFFSET) + +#define S_CHANNEL_ADDR 12 +#define M_CHANNEL_ADDR 0xf +#define V_CHANNEL_ADDR(x) ((x) << S_CHANNEL_ADDR) +#define G_CHANNEL_ADDR(x) (((x) >> S_CHANNEL_ADDR) & M_CHANNEL_ADDR) + +#define S_MODULE_ADDR 16 +#define M_MODULE_ADDR 0x3 +#define V_MODULE_ADDR(x) ((x) << S_MODULE_ADDR) +#define G_MODULE_ADDR(x) (((x) >> S_MODULE_ADDR) & M_MODULE_ADDR) + +#define S_BUNDLE_ADDR 20 +#define M_BUNDLE_ADDR 0x3 +#define V_BUNDLE_ADDR(x) ((x) << S_BUNDLE_ADDR) +#define G_BUNDLE_ADDR(x) (((x) >> S_BUNDLE_ADDR) & M_BUNDLE_ADDR) + +#define S_SPI4_COMMAND 24 +#define M_SPI4_COMMAND 0xff +#define V_SPI4_COMMAND(x) ((x) << S_SPI4_COMMAND) +#define G_SPI4_COMMAND(x) (((x) >> S_SPI4_COMMAND) & M_SPI4_COMMAND) + +#define A_ESPI_GOSTAT 0x8fc + +#define S_READ_DATA 0 +#define M_READ_DATA 0xff +#define V_READ_DATA(x) ((x) << S_READ_DATA) +#define G_READ_DATA(x) (((x) >> S_READ_DATA) & M_READ_DATA) + +#define S_ESPI_CMD_BUSY 8 +#define V_ESPI_CMD_BUSY(x) ((x) << S_ESPI_CMD_BUSY) +#define F_ESPI_CMD_BUSY V_ESPI_CMD_BUSY(1U) + +#define S_ERROR_ACK 9 +#define V_ERROR_ACK(x) ((x) << S_ERROR_ACK) +#define F_ERROR_ACK V_ERROR_ACK(1U) + +#define S_UNMAPPED_ERR 10 +#define V_UNMAPPED_ERR(x) ((x) << S_UNMAPPED_ERR) +#define F_UNMAPPED_ERR V_UNMAPPED_ERR(1U) + +#define S_TRANSACTION_TIMER 16 +#define M_TRANSACTION_TIMER 0xff +#define V_TRANSACTION_TIMER(x) ((x) << S_TRANSACTION_TIMER) +#define G_TRANSACTION_TIMER(x) (((x) >> S_TRANSACTION_TIMER) & M_TRANSACTION_TIMER) + + +/* ULP registers */ +#define A_ULP_ULIMIT 0x980 +#define A_ULP_TAGMASK 0x984 +#define A_ULP_HREG_INDEX 0x988 +#define A_ULP_HREG_DATA 0x98c +#define A_ULP_INT_ENABLE 0x990 +#define A_ULP_INT_CAUSE 0x994 + +#define S_HREG_PAR_ERR 0 +#define V_HREG_PAR_ERR(x) ((x) << S_HREG_PAR_ERR) +#define F_HREG_PAR_ERR V_HREG_PAR_ERR(1U) + +#define S_EGRS_DATA_PAR_ERR 1 +#define V_EGRS_DATA_PAR_ERR(x) ((x) << S_EGRS_DATA_PAR_ERR) +#define F_EGRS_DATA_PAR_ERR V_EGRS_DATA_PAR_ERR(1U) + +#define S_INGRS_DATA_PAR_ERR 2 +#define V_INGRS_DATA_PAR_ERR(x) ((x) << S_INGRS_DATA_PAR_ERR) +#define F_INGRS_DATA_PAR_ERR V_INGRS_DATA_PAR_ERR(1U) + +#define S_PM_INTR 3 +#define V_PM_INTR(x) ((x) << S_PM_INTR) +#define F_PM_INTR V_PM_INTR(1U) + +#define S_PM_E2C_SYNC_ERR 4 +#define V_PM_E2C_SYNC_ERR(x) ((x) << S_PM_E2C_SYNC_ERR) +#define F_PM_E2C_SYNC_ERR V_PM_E2C_SYNC_ERR(1U) + +#define S_PM_C2E_SYNC_ERR 5 +#define V_PM_C2E_SYNC_ERR(x) ((x) << S_PM_C2E_SYNC_ERR) +#define F_PM_C2E_SYNC_ERR V_PM_C2E_SYNC_ERR(1U) + +#define S_PM_E2C_EMPTY_ERR 6 +#define V_PM_E2C_EMPTY_ERR(x) ((x) << S_PM_E2C_EMPTY_ERR) +#define F_PM_E2C_EMPTY_ERR V_PM_E2C_EMPTY_ERR(1U) + +#define S_PM_C2E_EMPTY_ERR 7 +#define V_PM_C2E_EMPTY_ERR(x) ((x) << S_PM_C2E_EMPTY_ERR) +#define F_PM_C2E_EMPTY_ERR V_PM_C2E_EMPTY_ERR(1U) + +#define S_PM_PAR_ERR 8 +#define M_PM_PAR_ERR 0xffff +#define V_PM_PAR_ERR(x) ((x) << S_PM_PAR_ERR) +#define G_PM_PAR_ERR(x) (((x) >> S_PM_PAR_ERR) & M_PM_PAR_ERR) + +#define S_PM_E2C_WRT_FULL 24 +#define V_PM_E2C_WRT_FULL(x) ((x) << S_PM_E2C_WRT_FULL) +#define F_PM_E2C_WRT_FULL V_PM_E2C_WRT_FULL(1U) + +#define S_PM_C2E_WRT_FULL 25 +#define V_PM_C2E_WRT_FULL(x) ((x) << S_PM_C2E_WRT_FULL) +#define F_PM_C2E_WRT_FULL V_PM_C2E_WRT_FULL(1U) + +#define A_ULP_PIO_CTRL 0x998 + +/* PL registers */ +#define A_PL_ENABLE 0xa00 + +#define S_PL_INTR_SGE_ERR 0 +#define V_PL_INTR_SGE_ERR(x) ((x) << S_PL_INTR_SGE_ERR) +#define F_PL_INTR_SGE_ERR V_PL_INTR_SGE_ERR(1U) + +#define S_PL_INTR_SGE_DATA 1 +#define V_PL_INTR_SGE_DATA(x) ((x) << S_PL_INTR_SGE_DATA) +#define F_PL_INTR_SGE_DATA V_PL_INTR_SGE_DATA(1U) + +#define S_PL_INTR_MC3 2 +#define V_PL_INTR_MC3(x) ((x) << S_PL_INTR_MC3) +#define F_PL_INTR_MC3 V_PL_INTR_MC3(1U) + +#define S_PL_INTR_MC4 3 +#define V_PL_INTR_MC4(x) ((x) << S_PL_INTR_MC4) +#define F_PL_INTR_MC4 V_PL_INTR_MC4(1U) + +#define S_PL_INTR_MC5 4 +#define V_PL_INTR_MC5(x) ((x) << S_PL_INTR_MC5) +#define F_PL_INTR_MC5 V_PL_INTR_MC5(1U) + +#define S_PL_INTR_RAT 5 +#define V_PL_INTR_RAT(x) ((x) << S_PL_INTR_RAT) +#define F_PL_INTR_RAT V_PL_INTR_RAT(1U) + +#define S_PL_INTR_TP 6 +#define V_PL_INTR_TP(x) ((x) << S_PL_INTR_TP) +#define F_PL_INTR_TP V_PL_INTR_TP(1U) + +#define S_PL_INTR_ULP 7 +#define V_PL_INTR_ULP(x) ((x) << S_PL_INTR_ULP) +#define F_PL_INTR_ULP V_PL_INTR_ULP(1U) + +#define S_PL_INTR_ESPI 8 +#define V_PL_INTR_ESPI(x) ((x) << S_PL_INTR_ESPI) +#define F_PL_INTR_ESPI V_PL_INTR_ESPI(1U) + +#define S_PL_INTR_CSPI 9 +#define V_PL_INTR_CSPI(x) ((x) << S_PL_INTR_CSPI) +#define F_PL_INTR_CSPI V_PL_INTR_CSPI(1U) + +#define S_PL_INTR_PCIX 10 +#define V_PL_INTR_PCIX(x) ((x) << S_PL_INTR_PCIX) +#define F_PL_INTR_PCIX V_PL_INTR_PCIX(1U) + +#define S_PL_INTR_EXT 11 +#define V_PL_INTR_EXT(x) ((x) << S_PL_INTR_EXT) +#define F_PL_INTR_EXT V_PL_INTR_EXT(1U) + +#define A_PL_CAUSE 0xa04 + +/* MC5 registers */ +#define A_MC5_CONFIG 0xc04 + +#define S_MODE 0 +#define V_MODE(x) ((x) << S_MODE) +#define F_MODE V_MODE(1U) + +#define S_TCAM_RESET 1 +#define V_TCAM_RESET(x) ((x) << S_TCAM_RESET) +#define F_TCAM_RESET V_TCAM_RESET(1U) + +#define S_TCAM_READY 2 +#define V_TCAM_READY(x) ((x) << S_TCAM_READY) +#define F_TCAM_READY V_TCAM_READY(1U) + +#define S_DBGI_ENABLE 4 +#define V_DBGI_ENABLE(x) ((x) << S_DBGI_ENABLE) +#define F_DBGI_ENABLE V_DBGI_ENABLE(1U) + +#define S_M_BUS_ENABLE 5 +#define V_M_BUS_ENABLE(x) ((x) << S_M_BUS_ENABLE) +#define F_M_BUS_ENABLE V_M_BUS_ENABLE(1U) + +#define S_PARITY_ENABLE 6 +#define V_PARITY_ENABLE(x) ((x) << S_PARITY_ENABLE) +#define F_PARITY_ENABLE V_PARITY_ENABLE(1U) + +#define S_SYN_ISSUE_MODE 7 +#define M_SYN_ISSUE_MODE 0x3 +#define V_SYN_ISSUE_MODE(x) ((x) << S_SYN_ISSUE_MODE) +#define G_SYN_ISSUE_MODE(x) (((x) >> S_SYN_ISSUE_MODE) & M_SYN_ISSUE_MODE) + +#define S_BUILD 16 +#define V_BUILD(x) ((x) << S_BUILD) +#define F_BUILD V_BUILD(1U) + +#define S_COMPRESSION_ENABLE 17 +#define V_COMPRESSION_ENABLE(x) ((x) << S_COMPRESSION_ENABLE) +#define F_COMPRESSION_ENABLE V_COMPRESSION_ENABLE(1U) + +#define S_NUM_LIP 18 +#define M_NUM_LIP 0x3f +#define V_NUM_LIP(x) ((x) << S_NUM_LIP) +#define G_NUM_LIP(x) (((x) >> S_NUM_LIP) & M_NUM_LIP) + +#define S_TCAM_PART_CNT 24 +#define M_TCAM_PART_CNT 0x3 +#define V_TCAM_PART_CNT(x) ((x) << S_TCAM_PART_CNT) +#define G_TCAM_PART_CNT(x) (((x) >> S_TCAM_PART_CNT) & M_TCAM_PART_CNT) + +#define S_TCAM_PART_TYPE 26 +#define M_TCAM_PART_TYPE 0x3 +#define V_TCAM_PART_TYPE(x) ((x) << S_TCAM_PART_TYPE) +#define G_TCAM_PART_TYPE(x) (((x) >> S_TCAM_PART_TYPE) & M_TCAM_PART_TYPE) + +#define S_TCAM_PART_SIZE 28 +#define M_TCAM_PART_SIZE 0x3 +#define V_TCAM_PART_SIZE(x) ((x) << S_TCAM_PART_SIZE) +#define G_TCAM_PART_SIZE(x) (((x) >> S_TCAM_PART_SIZE) & M_TCAM_PART_SIZE) + +#define S_TCAM_PART_TYPE_HI 30 +#define V_TCAM_PART_TYPE_HI(x) ((x) << S_TCAM_PART_TYPE_HI) +#define F_TCAM_PART_TYPE_HI V_TCAM_PART_TYPE_HI(1U) + +#define A_MC5_SIZE 0xc08 + +#define S_SIZE 0 +#define M_SIZE 0x3fffff +#define V_SIZE(x) ((x) << S_SIZE) +#define G_SIZE(x) (((x) >> S_SIZE) & M_SIZE) + +#define A_MC5_ROUTING_TABLE_INDEX 0xc0c + +#define S_START_OF_ROUTING_TABLE 0 +#define M_START_OF_ROUTING_TABLE 0x3fffff +#define V_START_OF_ROUTING_TABLE(x) ((x) << S_START_OF_ROUTING_TABLE) +#define G_START_OF_ROUTING_TABLE(x) (((x) >> S_START_OF_ROUTING_TABLE) & M_START_OF_ROUTING_TABLE) + +#define A_MC5_SERVER_INDEX 0xc14 + +#define S_START_OF_SERVER_INDEX 0 +#define M_START_OF_SERVER_INDEX 0x3fffff +#define V_START_OF_SERVER_INDEX(x) ((x) << S_START_OF_SERVER_INDEX) +#define G_START_OF_SERVER_INDEX(x) (((x) >> S_START_OF_SERVER_INDEX) & M_START_OF_SERVER_INDEX) + +#define A_MC5_LIP_RAM_ADDR 0xc18 + +#define S_LOCAL_IP_RAM_ADDR 0 +#define M_LOCAL_IP_RAM_ADDR 0x3f +#define V_LOCAL_IP_RAM_ADDR(x) ((x) << S_LOCAL_IP_RAM_ADDR) +#define G_LOCAL_IP_RAM_ADDR(x) (((x) >> S_LOCAL_IP_RAM_ADDR) & M_LOCAL_IP_RAM_ADDR) + +#define S_RAM_WRITE_ENABLE 8 +#define V_RAM_WRITE_ENABLE(x) ((x) << S_RAM_WRITE_ENABLE) +#define F_RAM_WRITE_ENABLE V_RAM_WRITE_ENABLE(1U) + +#define A_MC5_LIP_RAM_DATA 0xc1c +#define A_MC5_RSP_LATENCY 0xc20 + +#define S_SEARCH_RESPONSE_LATENCY 0 +#define M_SEARCH_RESPONSE_LATENCY 0x1f +#define V_SEARCH_RESPONSE_LATENCY(x) ((x) << S_SEARCH_RESPONSE_LATENCY) +#define G_SEARCH_RESPONSE_LATENCY(x) (((x) >> S_SEARCH_RESPONSE_LATENCY) & M_SEARCH_RESPONSE_LATENCY) + +#define S_LEARN_RESPONSE_LATENCY 8 +#define M_LEARN_RESPONSE_LATENCY 0x1f +#define V_LEARN_RESPONSE_LATENCY(x) ((x) << S_LEARN_RESPONSE_LATENCY) +#define G_LEARN_RESPONSE_LATENCY(x) (((x) >> S_LEARN_RESPONSE_LATENCY) & M_LEARN_RESPONSE_LATENCY) + +#define A_MC5_PARITY_LATENCY 0xc24 + +#define S_SRCHLAT 0 +#define M_SRCHLAT 0x1f +#define V_SRCHLAT(x) ((x) << S_SRCHLAT) +#define G_SRCHLAT(x) (((x) >> S_SRCHLAT) & M_SRCHLAT) + +#define S_PARLAT 8 +#define M_PARLAT 0x1f +#define V_PARLAT(x) ((x) << S_PARLAT) +#define G_PARLAT(x) (((x) >> S_PARLAT) & M_PARLAT) + +#define A_MC5_WR_LRN_VERIFY 0xc28 + +#define S_POVEREN 0 +#define V_POVEREN(x) ((x) << S_POVEREN) +#define F_POVEREN V_POVEREN(1U) + +#define S_LRNVEREN 1 +#define V_LRNVEREN(x) ((x) << S_LRNVEREN) +#define F_LRNVEREN V_LRNVEREN(1U) + +#define S_VWVEREN 2 +#define V_VWVEREN(x) ((x) << S_VWVEREN) +#define F_VWVEREN V_VWVEREN(1U) + +#define A_MC5_PART_ID_INDEX 0xc2c + +#define S_IDINDEX 0 +#define M_IDINDEX 0xf +#define V_IDINDEX(x) ((x) << S_IDINDEX) +#define G_IDINDEX(x) (((x) >> S_IDINDEX) & M_IDINDEX) + +#define A_MC5_RESET_MAX 0xc30 + +#define S_RSTMAX 0 +#define M_RSTMAX 0x1ff +#define V_RSTMAX(x) ((x) << S_RSTMAX) +#define G_RSTMAX(x) (((x) >> S_RSTMAX) & M_RSTMAX) + +#define A_MC5_INT_ENABLE 0xc40 + +#define S_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR 0 +#define V_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR(x) ((x) << S_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR) +#define F_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR V_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR(1U) + +#define S_MC5_INT_HIT_IN_ACTIVE_REGION_ERR 1 +#define V_MC5_INT_HIT_IN_ACTIVE_REGION_ERR(x) ((x) << S_MC5_INT_HIT_IN_ACTIVE_REGION_ERR) +#define F_MC5_INT_HIT_IN_ACTIVE_REGION_ERR V_MC5_INT_HIT_IN_ACTIVE_REGION_ERR(1U) + +#define S_MC5_INT_HIT_IN_RT_REGION_ERR 2 +#define V_MC5_INT_HIT_IN_RT_REGION_ERR(x) ((x) << S_MC5_INT_HIT_IN_RT_REGION_ERR) +#define F_MC5_INT_HIT_IN_RT_REGION_ERR V_MC5_INT_HIT_IN_RT_REGION_ERR(1U) + +#define S_MC5_INT_MISS_ERR 3 +#define V_MC5_INT_MISS_ERR(x) ((x) << S_MC5_INT_MISS_ERR) +#define F_MC5_INT_MISS_ERR V_MC5_INT_MISS_ERR(1U) + +#define S_MC5_INT_LIP0_ERR 4 +#define V_MC5_INT_LIP0_ERR(x) ((x) << S_MC5_INT_LIP0_ERR) +#define F_MC5_INT_LIP0_ERR V_MC5_INT_LIP0_ERR(1U) + +#define S_MC5_INT_LIP_MISS_ERR 5 +#define V_MC5_INT_LIP_MISS_ERR(x) ((x) << S_MC5_INT_LIP_MISS_ERR) +#define F_MC5_INT_LIP_MISS_ERR V_MC5_INT_LIP_MISS_ERR(1U) + +#define S_MC5_INT_PARITY_ERR 6 +#define V_MC5_INT_PARITY_ERR(x) ((x) << S_MC5_INT_PARITY_ERR) +#define F_MC5_INT_PARITY_ERR V_MC5_INT_PARITY_ERR(1U) + +#define S_MC5_INT_ACTIVE_REGION_FULL 7 +#define V_MC5_INT_ACTIVE_REGION_FULL(x) ((x) << S_MC5_INT_ACTIVE_REGION_FULL) +#define F_MC5_INT_ACTIVE_REGION_FULL V_MC5_INT_ACTIVE_REGION_FULL(1U) + +#define S_MC5_INT_NFA_SRCH_ERR 8 +#define V_MC5_INT_NFA_SRCH_ERR(x) ((x) << S_MC5_INT_NFA_SRCH_ERR) +#define F_MC5_INT_NFA_SRCH_ERR V_MC5_INT_NFA_SRCH_ERR(1U) + +#define S_MC5_INT_SYN_COOKIE 9 +#define V_MC5_INT_SYN_COOKIE(x) ((x) << S_MC5_INT_SYN_COOKIE) +#define F_MC5_INT_SYN_COOKIE V_MC5_INT_SYN_COOKIE(1U) + +#define S_MC5_INT_SYN_COOKIE_BAD 10 +#define V_MC5_INT_SYN_COOKIE_BAD(x) ((x) << S_MC5_INT_SYN_COOKIE_BAD) +#define F_MC5_INT_SYN_COOKIE_BAD V_MC5_INT_SYN_COOKIE_BAD(1U) + +#define S_MC5_INT_SYN_COOKIE_OFF 11 +#define V_MC5_INT_SYN_COOKIE_OFF(x) ((x) << S_MC5_INT_SYN_COOKIE_OFF) +#define F_MC5_INT_SYN_COOKIE_OFF V_MC5_INT_SYN_COOKIE_OFF(1U) + +#define S_MC5_INT_UNKNOWN_CMD 15 +#define V_MC5_INT_UNKNOWN_CMD(x) ((x) << S_MC5_INT_UNKNOWN_CMD) +#define F_MC5_INT_UNKNOWN_CMD V_MC5_INT_UNKNOWN_CMD(1U) + +#define S_MC5_INT_REQUESTQ_PARITY_ERR 16 +#define V_MC5_INT_REQUESTQ_PARITY_ERR(x) ((x) << S_MC5_INT_REQUESTQ_PARITY_ERR) +#define F_MC5_INT_REQUESTQ_PARITY_ERR V_MC5_INT_REQUESTQ_PARITY_ERR(1U) + +#define S_MC5_INT_DISPATCHQ_PARITY_ERR 17 +#define V_MC5_INT_DISPATCHQ_PARITY_ERR(x) ((x) << S_MC5_INT_DISPATCHQ_PARITY_ERR) +#define F_MC5_INT_DISPATCHQ_PARITY_ERR V_MC5_INT_DISPATCHQ_PARITY_ERR(1U) + +#define S_MC5_INT_DEL_ACT_EMPTY 18 +#define V_MC5_INT_DEL_ACT_EMPTY(x) ((x) << S_MC5_INT_DEL_ACT_EMPTY) +#define F_MC5_INT_DEL_ACT_EMPTY V_MC5_INT_DEL_ACT_EMPTY(1U) + +#define A_MC5_INT_CAUSE 0xc44 +#define A_MC5_INT_TID 0xc48 +#define A_MC5_INT_PTID 0xc4c +#define A_MC5_DBGI_CONFIG 0xc74 +#define A_MC5_DBGI_REQ_CMD 0xc78 + +#define S_CMDMODE 0 +#define M_CMDMODE 0x7 +#define V_CMDMODE(x) ((x) << S_CMDMODE) +#define G_CMDMODE(x) (((x) >> S_CMDMODE) & M_CMDMODE) + +#define S_SADRSEL 4 +#define V_SADRSEL(x) ((x) << S_SADRSEL) +#define F_SADRSEL V_SADRSEL(1U) + +#define S_WRITE_BURST_SIZE 22 +#define M_WRITE_BURST_SIZE 0x3ff +#define V_WRITE_BURST_SIZE(x) ((x) << S_WRITE_BURST_SIZE) +#define G_WRITE_BURST_SIZE(x) (((x) >> S_WRITE_BURST_SIZE) & M_WRITE_BURST_SIZE) + +#define A_MC5_DBGI_REQ_ADDR0 0xc7c +#define A_MC5_DBGI_REQ_ADDR1 0xc80 +#define A_MC5_DBGI_REQ_ADDR2 0xc84 +#define A_MC5_DBGI_REQ_DATA0 0xc88 +#define A_MC5_DBGI_REQ_DATA1 0xc8c +#define A_MC5_DBGI_REQ_DATA2 0xc90 +#define A_MC5_DBGI_REQ_DATA3 0xc94 +#define A_MC5_DBGI_REQ_DATA4 0xc98 +#define A_MC5_DBGI_REQ_MASK0 0xc9c +#define A_MC5_DBGI_REQ_MASK1 0xca0 +#define A_MC5_DBGI_REQ_MASK2 0xca4 +#define A_MC5_DBGI_REQ_MASK3 0xca8 +#define A_MC5_DBGI_REQ_MASK4 0xcac +#define A_MC5_DBGI_RSP_STATUS 0xcb0 + +#define S_DBGI_RSP_VALID 0 +#define V_DBGI_RSP_VALID(x) ((x) << S_DBGI_RSP_VALID) +#define F_DBGI_RSP_VALID V_DBGI_RSP_VALID(1U) + +#define S_DBGI_RSP_HIT 1 +#define V_DBGI_RSP_HIT(x) ((x) << S_DBGI_RSP_HIT) +#define F_DBGI_RSP_HIT V_DBGI_RSP_HIT(1U) + +#define S_DBGI_RSP_ERR 2 +#define V_DBGI_RSP_ERR(x) ((x) << S_DBGI_RSP_ERR) +#define F_DBGI_RSP_ERR V_DBGI_RSP_ERR(1U) + +#define S_DBGI_RSP_ERR_REASON 8 +#define M_DBGI_RSP_ERR_REASON 0x7 +#define V_DBGI_RSP_ERR_REASON(x) ((x) << S_DBGI_RSP_ERR_REASON) +#define G_DBGI_RSP_ERR_REASON(x) (((x) >> S_DBGI_RSP_ERR_REASON) & M_DBGI_RSP_ERR_REASON) + +#define A_MC5_DBGI_RSP_DATA0 0xcb4 +#define A_MC5_DBGI_RSP_DATA1 0xcb8 +#define A_MC5_DBGI_RSP_DATA2 0xcbc +#define A_MC5_DBGI_RSP_DATA3 0xcc0 +#define A_MC5_DBGI_RSP_DATA4 0xcc4 +#define A_MC5_DBGI_RSP_LAST_CMD 0xcc8 +#define A_MC5_POPEN_DATA_WR_CMD 0xccc +#define A_MC5_POPEN_MASK_WR_CMD 0xcd0 +#define A_MC5_AOPEN_SRCH_CMD 0xcd4 +#define A_MC5_AOPEN_LRN_CMD 0xcd8 +#define A_MC5_SYN_SRCH_CMD 0xcdc +#define A_MC5_SYN_LRN_CMD 0xce0 +#define A_MC5_ACK_SRCH_CMD 0xce4 +#define A_MC5_ACK_LRN_CMD 0xce8 +#define A_MC5_ILOOKUP_CMD 0xcec +#define A_MC5_ELOOKUP_CMD 0xcf0 +#define A_MC5_DATA_WRITE_CMD 0xcf4 +#define A_MC5_DATA_READ_CMD 0xcf8 +#define A_MC5_MASK_WRITE_CMD 0xcfc + +/* PCICFG registers */ +#define A_PCICFG_PM_CSR 0x44 +#define A_PCICFG_VPD_ADDR 0x4a + +#define S_VPD_ADDR 0 +#define M_VPD_ADDR 0x7fff +#define V_VPD_ADDR(x) ((x) << S_VPD_ADDR) +#define G_VPD_ADDR(x) (((x) >> S_VPD_ADDR) & M_VPD_ADDR) + +#define S_VPD_OP_FLAG 15 +#define V_VPD_OP_FLAG(x) ((x) << S_VPD_OP_FLAG) +#define F_VPD_OP_FLAG V_VPD_OP_FLAG(1U) + +#define A_PCICFG_VPD_DATA 0x4c +#define A_PCICFG_PCIX_CMD 0x60 +#define A_PCICFG_INTR_ENABLE 0xf4 + +#define S_MASTER_PARITY_ERR 0 +#define V_MASTER_PARITY_ERR(x) ((x) << S_MASTER_PARITY_ERR) +#define F_MASTER_PARITY_ERR V_MASTER_PARITY_ERR(1U) + +#define S_SIG_TARGET_ABORT 1 +#define V_SIG_TARGET_ABORT(x) ((x) << S_SIG_TARGET_ABORT) +#define F_SIG_TARGET_ABORT V_SIG_TARGET_ABORT(1U) + +#define S_RCV_TARGET_ABORT 2 +#define V_RCV_TARGET_ABORT(x) ((x) << S_RCV_TARGET_ABORT) +#define F_RCV_TARGET_ABORT V_RCV_TARGET_ABORT(1U) + +#define S_RCV_MASTER_ABORT 3 +#define V_RCV_MASTER_ABORT(x) ((x) << S_RCV_MASTER_ABORT) +#define F_RCV_MASTER_ABORT V_RCV_MASTER_ABORT(1U) + +#define S_SIG_SYS_ERR 4 +#define V_SIG_SYS_ERR(x) ((x) << S_SIG_SYS_ERR) +#define F_SIG_SYS_ERR V_SIG_SYS_ERR(1U) + +#define S_DET_PARITY_ERR 5 +#define V_DET_PARITY_ERR(x) ((x) << S_DET_PARITY_ERR) +#define F_DET_PARITY_ERR V_DET_PARITY_ERR(1U) + +#define S_PIO_PARITY_ERR 6 +#define V_PIO_PARITY_ERR(x) ((x) << S_PIO_PARITY_ERR) +#define F_PIO_PARITY_ERR V_PIO_PARITY_ERR(1U) + +#define S_WF_PARITY_ERR 7 +#define V_WF_PARITY_ERR(x) ((x) << S_WF_PARITY_ERR) +#define F_WF_PARITY_ERR V_WF_PARITY_ERR(1U) + +#define S_RF_PARITY_ERR 8 +#define M_RF_PARITY_ERR 0x3 +#define V_RF_PARITY_ERR(x) ((x) << S_RF_PARITY_ERR) +#define G_RF_PARITY_ERR(x) (((x) >> S_RF_PARITY_ERR) & M_RF_PARITY_ERR) + +#define S_CF_PARITY_ERR 10 +#define M_CF_PARITY_ERR 0x3 +#define V_CF_PARITY_ERR(x) ((x) << S_CF_PARITY_ERR) +#define G_CF_PARITY_ERR(x) (((x) >> S_CF_PARITY_ERR) & M_CF_PARITY_ERR) + +#define A_PCICFG_INTR_CAUSE 0xf8 +#define A_PCICFG_MODE 0xfc + +#define S_PCI_MODE_64BIT 0 +#define V_PCI_MODE_64BIT(x) ((x) << S_PCI_MODE_64BIT) +#define F_PCI_MODE_64BIT V_PCI_MODE_64BIT(1U) + +#define S_PCI_MODE_66MHZ 1 +#define V_PCI_MODE_66MHZ(x) ((x) << S_PCI_MODE_66MHZ) +#define F_PCI_MODE_66MHZ V_PCI_MODE_66MHZ(1U) + +#define S_PCI_MODE_PCIX_INITPAT 2 +#define M_PCI_MODE_PCIX_INITPAT 0x7 +#define V_PCI_MODE_PCIX_INITPAT(x) ((x) << S_PCI_MODE_PCIX_INITPAT) +#define G_PCI_MODE_PCIX_INITPAT(x) (((x) >> S_PCI_MODE_PCIX_INITPAT) & M_PCI_MODE_PCIX_INITPAT) + +#define S_PCI_MODE_PCIX 5 +#define V_PCI_MODE_PCIX(x) ((x) << S_PCI_MODE_PCIX) +#define F_PCI_MODE_PCIX V_PCI_MODE_PCIX(1U) + +#define S_PCI_MODE_CLK 6 +#define M_PCI_MODE_CLK 0x3 +#define V_PCI_MODE_CLK(x) ((x) << S_PCI_MODE_CLK) +#define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK) + +#endif /* _CXGB_REGS_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c new file mode 100644 index 000000000..526ea74e8 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -0,0 +1,2124 @@ +/***************************************************************************** + * * + * File: sge.c * + * $Revision: 1.26 $ * + * $Date: 2005/06/21 18:29:48 $ * + * Description: * + * DMA engine. * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, see . * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#include "common.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cpl5_cmd.h" +#include "sge.h" +#include "regs.h" +#include "espi.h" + +/* This belongs in if_ether.h */ +#define ETH_P_CPL5 0xf + +#define SGE_CMDQ_N 2 +#define SGE_FREELQ_N 2 +#define SGE_CMDQ0_E_N 1024 +#define SGE_CMDQ1_E_N 128 +#define SGE_FREEL_SIZE 4096 +#define SGE_JUMBO_FREEL_SIZE 512 +#define SGE_FREEL_REFILL_THRESH 16 +#define SGE_RESPQ_E_N 1024 +#define SGE_INTRTIMER_NRES 1000 +#define SGE_RX_SM_BUF_SIZE 1536 +#define SGE_TX_DESC_MAX_PLEN 16384 + +#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4) + +/* + * Period of the TX buffer reclaim timer. This timer does not need to run + * frequently as TX buffers are usually reclaimed by new TX packets. + */ +#define TX_RECLAIM_PERIOD (HZ / 4) + +#define M_CMD_LEN 0x7fffffff +#define V_CMD_LEN(v) (v) +#define G_CMD_LEN(v) ((v) & M_CMD_LEN) +#define V_CMD_GEN1(v) ((v) << 31) +#define V_CMD_GEN2(v) (v) +#define F_CMD_DATAVALID (1 << 1) +#define F_CMD_SOP (1 << 2) +#define V_CMD_EOP(v) ((v) << 3) + +/* + * Command queue, receive buffer list, and response queue descriptors. + */ +#if defined(__BIG_ENDIAN_BITFIELD) +struct cmdQ_e { + u32 addr_lo; + u32 len_gen; + u32 flags; + u32 addr_hi; +}; + +struct freelQ_e { + u32 addr_lo; + u32 len_gen; + u32 gen2; + u32 addr_hi; +}; + +struct respQ_e { + u32 Qsleeping : 4; + u32 Cmdq1CreditReturn : 5; + u32 Cmdq1DmaComplete : 5; + u32 Cmdq0CreditReturn : 5; + u32 Cmdq0DmaComplete : 5; + u32 FreelistQid : 2; + u32 CreditValid : 1; + u32 DataValid : 1; + u32 Offload : 1; + u32 Eop : 1; + u32 Sop : 1; + u32 GenerationBit : 1; + u32 BufferLength; +}; +#elif defined(__LITTLE_ENDIAN_BITFIELD) +struct cmdQ_e { + u32 len_gen; + u32 addr_lo; + u32 addr_hi; + u32 flags; +}; + +struct freelQ_e { + u32 len_gen; + u32 addr_lo; + u32 addr_hi; + u32 gen2; +}; + +struct respQ_e { + u32 BufferLength; + u32 GenerationBit : 1; + u32 Sop : 1; + u32 Eop : 1; + u32 Offload : 1; + u32 DataValid : 1; + u32 CreditValid : 1; + u32 FreelistQid : 2; + u32 Cmdq0DmaComplete : 5; + u32 Cmdq0CreditReturn : 5; + u32 Cmdq1DmaComplete : 5; + u32 Cmdq1CreditReturn : 5; + u32 Qsleeping : 4; +} ; +#endif + +/* + * SW Context Command and Freelist Queue Descriptors + */ +struct cmdQ_ce { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_LEN(dma_len); +}; + +struct freelQ_ce { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_LEN(dma_len); +}; + +/* + * SW command, freelist and response rings + */ +struct cmdQ { + unsigned long status; /* HW DMA fetch status */ + unsigned int in_use; /* # of in-use command descriptors */ + unsigned int size; /* # of descriptors */ + unsigned int processed; /* total # of descs HW has processed */ + unsigned int cleaned; /* total # of descs SW has reclaimed */ + unsigned int stop_thres; /* SW TX queue suspend threshold */ + u16 pidx; /* producer index (SW) */ + u16 cidx; /* consumer index (HW) */ + u8 genbit; /* current generation (=valid) bit */ + u8 sop; /* is next entry start of packet? */ + struct cmdQ_e *entries; /* HW command descriptor Q */ + struct cmdQ_ce *centries; /* SW command context descriptor Q */ + dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ + spinlock_t lock; /* Lock to protect cmdQ enqueuing */ +}; + +struct freelQ { + unsigned int credits; /* # of available RX buffers */ + unsigned int size; /* free list capacity */ + u16 pidx; /* producer index (SW) */ + u16 cidx; /* consumer index (HW) */ + u16 rx_buffer_size; /* Buffer size on this free list */ + u16 dma_offset; /* DMA offset to align IP headers */ + u16 recycleq_idx; /* skb recycle q to use */ + u8 genbit; /* current generation (=valid) bit */ + struct freelQ_e *entries; /* HW freelist descriptor Q */ + struct freelQ_ce *centries; /* SW freelist context descriptor Q */ + dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */ +}; + +struct respQ { + unsigned int credits; /* credits to be returned to SGE */ + unsigned int size; /* # of response Q descriptors */ + u16 cidx; /* consumer index (SW) */ + u8 genbit; /* current generation(=valid) bit */ + struct respQ_e *entries; /* HW response descriptor Q */ + dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */ +}; + +/* Bit flags for cmdQ.status */ +enum { + CMDQ_STAT_RUNNING = 1, /* fetch engine is running */ + CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */ +}; + +/* T204 TX SW scheduler */ + +/* Per T204 TX port */ +struct sched_port { + unsigned int avail; /* available bits - quota */ + unsigned int drain_bits_per_1024ns; /* drain rate */ + unsigned int speed; /* drain rate, mbps */ + unsigned int mtu; /* mtu size */ + struct sk_buff_head skbq; /* pending skbs */ +}; + +/* Per T204 device */ +struct sched { + ktime_t last_updated; /* last time quotas were computed */ + unsigned int max_avail; /* max bits to be sent to any port */ + unsigned int port; /* port index (round robin ports) */ + unsigned int num; /* num skbs in per port queues */ + struct sched_port p[MAX_NPORTS]; + struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */ +}; +static void restart_sched(unsigned long); + + +/* + * Main SGE data structure + * + * Interrupts are handled by a single CPU and it is likely that on a MP system + * the application is migrated to another CPU. In that scenario, we try to + * separate the RX(in irq context) and TX state in order to decrease memory + * contention. + */ +struct sge { + struct adapter *adapter; /* adapter backpointer */ + struct net_device *netdev; /* netdevice backpointer */ + struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */ + struct respQ respQ; /* response Q */ + unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */ + unsigned int rx_pkt_pad; /* RX padding for L2 packets */ + unsigned int jumbo_fl; /* jumbo freelist Q index */ + unsigned int intrtimer_nres; /* no-resource interrupt timer */ + unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */ + struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ + struct timer_list espibug_timer; + unsigned long espibug_timeout; + struct sk_buff *espibug_skb[MAX_NPORTS]; + u32 sge_control; /* shadow value of sge control reg */ + struct sge_intr_counts stats; + struct sge_port_stats __percpu *port_stats[MAX_NPORTS]; + struct sched *tx_sched; + struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; +}; + +static const u8 ch_mac_addr[ETH_ALEN] = { + 0x0, 0x7, 0x43, 0x0, 0x0, 0x0 +}; + +/* + * stop tasklet and free all pending skb's + */ +static void tx_sched_stop(struct sge *sge) +{ + struct sched *s = sge->tx_sched; + int i; + + tasklet_kill(&s->sched_tsk); + + for (i = 0; i < MAX_NPORTS; i++) + __skb_queue_purge(&s->p[s->port].skbq); +} + +/* + * t1_sched_update_parms() is called when the MTU or link speed changes. It + * re-computes scheduler parameters to scope with the change. + */ +unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port, + unsigned int mtu, unsigned int speed) +{ + struct sched *s = sge->tx_sched; + struct sched_port *p = &s->p[port]; + unsigned int max_avail_segs; + + pr_debug("%s mtu=%d speed=%d\n", __func__, mtu, speed); + if (speed) + p->speed = speed; + if (mtu) + p->mtu = mtu; + + if (speed || mtu) { + unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40); + do_div(drain, (p->mtu + 50) * 1000); + p->drain_bits_per_1024ns = (unsigned int) drain; + + if (p->speed < 1000) + p->drain_bits_per_1024ns = + 90 * p->drain_bits_per_1024ns / 100; + } + + if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) { + p->drain_bits_per_1024ns -= 16; + s->max_avail = max(4096U, p->mtu + 16 + 14 + 4); + max_avail_segs = max(1U, 4096 / (p->mtu - 40)); + } else { + s->max_avail = 16384; + max_avail_segs = max(1U, 9000 / (p->mtu - 40)); + } + + pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u " + "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu, + p->speed, s->max_avail, max_avail_segs, + p->drain_bits_per_1024ns); + + return max_avail_segs * (p->mtu - 40); +} + +#if 0 + +/* + * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of + * data that can be pushed per port. + */ +void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val) +{ + struct sched *s = sge->tx_sched; + unsigned int i; + + s->max_avail = val; + for (i = 0; i < MAX_NPORTS; i++) + t1_sched_update_parms(sge, i, 0, 0); +} + +/* + * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port + * is draining. + */ +void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port, + unsigned int val) +{ + struct sched *s = sge->tx_sched; + struct sched_port *p = &s->p[port]; + p->drain_bits_per_1024ns = val * 1024 / 1000; + t1_sched_update_parms(sge, port, 0, 0); +} + +#endif /* 0 */ + +/* + * tx_sched_init() allocates resources and does basic initialization. + */ +static int tx_sched_init(struct sge *sge) +{ + struct sched *s; + int i; + + s = kzalloc(sizeof (struct sched), GFP_KERNEL); + if (!s) + return -ENOMEM; + + pr_debug("tx_sched_init\n"); + tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge); + sge->tx_sched = s; + + for (i = 0; i < MAX_NPORTS; i++) { + skb_queue_head_init(&s->p[i].skbq); + t1_sched_update_parms(sge, i, 1500, 1000); + } + + return 0; +} + +/* + * sched_update_avail() computes the delta since the last time it was called + * and updates the per port quota (number of bits that can be sent to the any + * port). + */ +static inline int sched_update_avail(struct sge *sge) +{ + struct sched *s = sge->tx_sched; + ktime_t now = ktime_get(); + unsigned int i; + long long delta_time_ns; + + delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated)); + + pr_debug("sched_update_avail delta=%lld\n", delta_time_ns); + if (delta_time_ns < 15000) + return 0; + + for (i = 0; i < MAX_NPORTS; i++) { + struct sched_port *p = &s->p[i]; + unsigned int delta_avail; + + delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13; + p->avail = min(p->avail + delta_avail, s->max_avail); + } + + s->last_updated = now; + + return 1; +} + +/* + * sched_skb() is called from two different places. In the tx path, any + * packet generating load on an output port will call sched_skb() + * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq + * context (skb == NULL). + * The scheduler only returns a skb (which will then be sent) if the + * length of the skb is <= the current quota of the output port. + */ +static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb, + unsigned int credits) +{ + struct sched *s = sge->tx_sched; + struct sk_buff_head *skbq; + unsigned int i, len, update = 1; + + pr_debug("sched_skb %p\n", skb); + if (!skb) { + if (!s->num) + return NULL; + } else { + skbq = &s->p[skb->dev->if_port].skbq; + __skb_queue_tail(skbq, skb); + s->num++; + skb = NULL; + } + + if (credits < MAX_SKB_FRAGS + 1) + goto out; + +again: + for (i = 0; i < MAX_NPORTS; i++) { + s->port = (s->port + 1) & (MAX_NPORTS - 1); + skbq = &s->p[s->port].skbq; + + skb = skb_peek(skbq); + + if (!skb) + continue; + + len = skb->len; + if (len <= s->p[s->port].avail) { + s->p[s->port].avail -= len; + s->num--; + __skb_unlink(skb, skbq); + goto out; + } + skb = NULL; + } + + if (update-- && sched_update_avail(sge)) + goto again; + +out: + /* If there are more pending skbs, we use the hardware to schedule us + * again. + */ + if (s->num && !skb) { + struct cmdQ *q = &sge->cmdQ[0]; + clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); + if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { + set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); + writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); + } + } + pr_debug("sched_skb ret %p\n", skb); + + return skb; +} + +/* + * PIO to indicate that memory mapped Q contains valid descriptor(s). + */ +static inline void doorbell_pio(struct adapter *adapter, u32 val) +{ + wmb(); + writel(val, adapter->regs + A_SG_DOORBELL); +} + +/* + * Frees all RX buffers on the freelist Q. The caller must make sure that + * the SGE is turned off before calling this function. + */ +static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q) +{ + unsigned int cidx = q->cidx; + + while (q->credits--) { + struct freelQ_ce *ce = &q->centries[cidx]; + + pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), + dma_unmap_len(ce, dma_len), + PCI_DMA_FROMDEVICE); + dev_kfree_skb(ce->skb); + ce->skb = NULL; + if (++cidx == q->size) + cidx = 0; + } +} + +/* + * Free RX free list and response queue resources. + */ +static void free_rx_resources(struct sge *sge) +{ + struct pci_dev *pdev = sge->adapter->pdev; + unsigned int size, i; + + if (sge->respQ.entries) { + size = sizeof(struct respQ_e) * sge->respQ.size; + pci_free_consistent(pdev, size, sge->respQ.entries, + sge->respQ.dma_addr); + } + + for (i = 0; i < SGE_FREELQ_N; i++) { + struct freelQ *q = &sge->freelQ[i]; + + if (q->centries) { + free_freelQ_buffers(pdev, q); + kfree(q->centries); + } + if (q->entries) { + size = sizeof(struct freelQ_e) * q->size; + pci_free_consistent(pdev, size, q->entries, + q->dma_addr); + } + } +} + +/* + * Allocates basic RX resources, consisting of memory mapped freelist Qs and a + * response queue. + */ +static int alloc_rx_resources(struct sge *sge, struct sge_params *p) +{ + struct pci_dev *pdev = sge->adapter->pdev; + unsigned int size, i; + + for (i = 0; i < SGE_FREELQ_N; i++) { + struct freelQ *q = &sge->freelQ[i]; + + q->genbit = 1; + q->size = p->freelQ_size[i]; + q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; + size = sizeof(struct freelQ_e) * q->size; + q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); + if (!q->entries) + goto err_no_mem; + + size = sizeof(struct freelQ_ce) * q->size; + q->centries = kzalloc(size, GFP_KERNEL); + if (!q->centries) + goto err_no_mem; + } + + /* + * Calculate the buffer sizes for the two free lists. FL0 accommodates + * regular sized Ethernet frames, FL1 is sized not to exceed 16K, + * including all the sk_buff overhead. + * + * Note: For T2 FL0 and FL1 are reversed. + */ + sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE + + sizeof(struct cpl_rx_data) + + sge->freelQ[!sge->jumbo_fl].dma_offset; + + size = (16 * 1024) - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + sge->freelQ[sge->jumbo_fl].rx_buffer_size = size; + + /* + * Setup which skb recycle Q should be used when recycling buffers from + * each free list. + */ + sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0; + sge->freelQ[sge->jumbo_fl].recycleq_idx = 1; + + sge->respQ.genbit = 1; + sge->respQ.size = SGE_RESPQ_E_N; + sge->respQ.credits = 0; + size = sizeof(struct respQ_e) * sge->respQ.size; + sge->respQ.entries = + pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); + if (!sge->respQ.entries) + goto err_no_mem; + return 0; + +err_no_mem: + free_rx_resources(sge); + return -ENOMEM; +} + +/* + * Reclaims n TX descriptors and frees the buffers associated with them. + */ +static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) +{ + struct cmdQ_ce *ce; + struct pci_dev *pdev = sge->adapter->pdev; + unsigned int cidx = q->cidx; + + q->in_use -= n; + ce = &q->centries[cidx]; + while (n--) { + if (likely(dma_unmap_len(ce, dma_len))) { + pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), + dma_unmap_len(ce, dma_len), + PCI_DMA_TODEVICE); + if (q->sop) + q->sop = 0; + } + if (ce->skb) { + dev_kfree_skb_any(ce->skb); + q->sop = 1; + } + ce++; + if (++cidx == q->size) { + cidx = 0; + ce = q->centries; + } + } + q->cidx = cidx; +} + +/* + * Free TX resources. + * + * Assumes that SGE is stopped and all interrupts are disabled. + */ +static void free_tx_resources(struct sge *sge) +{ + struct pci_dev *pdev = sge->adapter->pdev; + unsigned int size, i; + + for (i = 0; i < SGE_CMDQ_N; i++) { + struct cmdQ *q = &sge->cmdQ[i]; + + if (q->centries) { + if (q->in_use) + free_cmdQ_buffers(sge, q, q->in_use); + kfree(q->centries); + } + if (q->entries) { + size = sizeof(struct cmdQ_e) * q->size; + pci_free_consistent(pdev, size, q->entries, + q->dma_addr); + } + } +} + +/* + * Allocates basic TX resources, consisting of memory mapped command Qs. + */ +static int alloc_tx_resources(struct sge *sge, struct sge_params *p) +{ + struct pci_dev *pdev = sge->adapter->pdev; + unsigned int size, i; + + for (i = 0; i < SGE_CMDQ_N; i++) { + struct cmdQ *q = &sge->cmdQ[i]; + + q->genbit = 1; + q->sop = 1; + q->size = p->cmdQ_size[i]; + q->in_use = 0; + q->status = 0; + q->processed = q->cleaned = 0; + q->stop_thres = 0; + spin_lock_init(&q->lock); + size = sizeof(struct cmdQ_e) * q->size; + q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); + if (!q->entries) + goto err_no_mem; + + size = sizeof(struct cmdQ_ce) * q->size; + q->centries = kzalloc(size, GFP_KERNEL); + if (!q->centries) + goto err_no_mem; + } + + /* + * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE + * only. For queue 0 set the stop threshold so we can handle one more + * packet from each port, plus reserve an additional 24 entries for + * Ethernet packets only. Queue 1 never suspends nor do we reserve + * space for Ethernet packets. + */ + sge->cmdQ[0].stop_thres = sge->adapter->params.nports * + (MAX_SKB_FRAGS + 1); + return 0; + +err_no_mem: + free_tx_resources(sge); + return -ENOMEM; +} + +static inline void setup_ring_params(struct adapter *adapter, u64 addr, + u32 size, int base_reg_lo, + int base_reg_hi, int size_reg) +{ + writel((u32)addr, adapter->regs + base_reg_lo); + writel(addr >> 32, adapter->regs + base_reg_hi); + writel(size, adapter->regs + size_reg); +} + +/* + * Enable/disable VLAN acceleration. + */ +void t1_vlan_mode(struct adapter *adapter, netdev_features_t features) +{ + struct sge *sge = adapter->sge; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + sge->sge_control |= F_VLAN_XTRACT; + else + sge->sge_control &= ~F_VLAN_XTRACT; + if (adapter->open_device_map) { + writel(sge->sge_control, adapter->regs + A_SG_CONTROL); + readl(adapter->regs + A_SG_CONTROL); /* flush */ + } +} + +/* + * Programs the various SGE registers. However, the engine is not yet enabled, + * but sge->sge_control is setup and ready to go. + */ +static void configure_sge(struct sge *sge, struct sge_params *p) +{ + struct adapter *ap = sge->adapter; + + writel(0, ap->regs + A_SG_CONTROL); + setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, + A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); + setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size, + A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE); + setup_ring_params(ap, sge->freelQ[0].dma_addr, + sge->freelQ[0].size, A_SG_FL0BASELWR, + A_SG_FL0BASEUPR, A_SG_FL0SIZE); + setup_ring_params(ap, sge->freelQ[1].dma_addr, + sge->freelQ[1].size, A_SG_FL1BASELWR, + A_SG_FL1BASEUPR, A_SG_FL1SIZE); + + /* The threshold comparison uses <. */ + writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD); + + setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size, + A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE); + writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT); + + sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | + F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE | + V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE | + V_RX_PKT_OFFSET(sge->rx_pkt_pad); + +#if defined(__BIG_ENDIAN_BITFIELD) + sge->sge_control |= F_ENABLE_BIG_ENDIAN; +#endif + + /* Initialize no-resource timer */ + sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap); + + t1_sge_set_coalesce_params(sge, p); +} + +/* + * Return the payload capacity of the jumbo free-list buffers. + */ +static inline unsigned int jumbo_payload_capacity(const struct sge *sge) +{ + return sge->freelQ[sge->jumbo_fl].rx_buffer_size - + sge->freelQ[sge->jumbo_fl].dma_offset - + sizeof(struct cpl_rx_data); +} + +/* + * Frees all SGE related resources and the sge structure itself + */ +void t1_sge_destroy(struct sge *sge) +{ + int i; + + for_each_port(sge->adapter, i) + free_percpu(sge->port_stats[i]); + + kfree(sge->tx_sched); + free_tx_resources(sge); + free_rx_resources(sge); + kfree(sge); +} + +/* + * Allocates new RX buffers on the freelist Q (and tracks them on the freelist + * context Q) until the Q is full or alloc_skb fails. + * + * It is possible that the generation bits already match, indicating that the + * buffer is already valid and nothing needs to be done. This happens when we + * copied a received buffer into a new sk_buff during the interrupt processing. + * + * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad), + * we specify a RX_OFFSET in order to make sure that the IP header is 4B + * aligned. + */ +static void refill_free_list(struct sge *sge, struct freelQ *q) +{ + struct pci_dev *pdev = sge->adapter->pdev; + struct freelQ_ce *ce = &q->centries[q->pidx]; + struct freelQ_e *e = &q->entries[q->pidx]; + unsigned int dma_len = q->rx_buffer_size - q->dma_offset; + + while (q->credits < q->size) { + struct sk_buff *skb; + dma_addr_t mapping; + + skb = dev_alloc_skb(q->rx_buffer_size); + if (!skb) + break; + + skb_reserve(skb, q->dma_offset); + mapping = pci_map_single(pdev, skb->data, dma_len, + PCI_DMA_FROMDEVICE); + skb_reserve(skb, sge->rx_pkt_pad); + + ce->skb = skb; + dma_unmap_addr_set(ce, dma_addr, mapping); + dma_unmap_len_set(ce, dma_len, dma_len); + e->addr_lo = (u32)mapping; + e->addr_hi = (u64)mapping >> 32; + e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); + wmb(); + e->gen2 = V_CMD_GEN2(q->genbit); + + e++; + ce++; + if (++q->pidx == q->size) { + q->pidx = 0; + q->genbit ^= 1; + ce = q->centries; + e = q->entries; + } + q->credits++; + } +} + +/* + * Calls refill_free_list for both free lists. If we cannot fill at least 1/4 + * of both rings, we go into 'few interrupt mode' in order to give the system + * time to free up resources. + */ +static void freelQs_empty(struct sge *sge) +{ + struct adapter *adapter = sge->adapter; + u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE); + u32 irqholdoff_reg; + + refill_free_list(sge, &sge->freelQ[0]); + refill_free_list(sge, &sge->freelQ[1]); + + if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) && + sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) { + irq_reg |= F_FL_EXHAUSTED; + irqholdoff_reg = sge->fixed_intrtimer; + } else { + /* Clear the F_FL_EXHAUSTED interrupts for now */ + irq_reg &= ~F_FL_EXHAUSTED; + irqholdoff_reg = sge->intrtimer_nres; + } + writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER); + writel(irq_reg, adapter->regs + A_SG_INT_ENABLE); + + /* We reenable the Qs to force a freelist GTS interrupt later */ + doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE); +} + +#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA) +#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) +#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \ + F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) + +/* + * Disable SGE Interrupts + */ +void t1_sge_intr_disable(struct sge *sge) +{ + u32 val = readl(sge->adapter->regs + A_PL_ENABLE); + + writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); + writel(0, sge->adapter->regs + A_SG_INT_ENABLE); +} + +/* + * Enable SGE interrupts. + */ +void t1_sge_intr_enable(struct sge *sge) +{ + u32 en = SGE_INT_ENABLE; + u32 val = readl(sge->adapter->regs + A_PL_ENABLE); + + if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO) + en &= ~F_PACKET_TOO_BIG; + writel(en, sge->adapter->regs + A_SG_INT_ENABLE); + writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); +} + +/* + * Clear SGE interrupts. + */ +void t1_sge_intr_clear(struct sge *sge) +{ + writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE); + writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE); +} + +/* + * SGE 'Error' interrupt handler + */ +int t1_sge_intr_error_handler(struct sge *sge) +{ + struct adapter *adapter = sge->adapter; + u32 cause = readl(adapter->regs + A_SG_INT_CAUSE); + + if (adapter->port[0].dev->hw_features & NETIF_F_TSO) + cause &= ~F_PACKET_TOO_BIG; + if (cause & F_RESPQ_EXHAUSTED) + sge->stats.respQ_empty++; + if (cause & F_RESPQ_OVERFLOW) { + sge->stats.respQ_overflow++; + pr_alert("%s: SGE response queue overflow\n", + adapter->name); + } + if (cause & F_FL_EXHAUSTED) { + sge->stats.freelistQ_empty++; + freelQs_empty(sge); + } + if (cause & F_PACKET_TOO_BIG) { + sge->stats.pkt_too_big++; + pr_alert("%s: SGE max packet size exceeded\n", + adapter->name); + } + if (cause & F_PACKET_MISMATCH) { + sge->stats.pkt_mismatch++; + pr_alert("%s: SGE packet mismatch\n", adapter->name); + } + if (cause & SGE_INT_FATAL) + t1_fatal_err(adapter); + + writel(cause, adapter->regs + A_SG_INT_CAUSE); + return 0; +} + +const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge) +{ + return &sge->stats; +} + +void t1_sge_get_port_stats(const struct sge *sge, int port, + struct sge_port_stats *ss) +{ + int cpu; + + memset(ss, 0, sizeof(*ss)); + for_each_possible_cpu(cpu) { + struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); + + ss->rx_cso_good += st->rx_cso_good; + ss->tx_cso += st->tx_cso; + ss->tx_tso += st->tx_tso; + ss->tx_need_hdrroom += st->tx_need_hdrroom; + ss->vlan_xtract += st->vlan_xtract; + ss->vlan_insert += st->vlan_insert; + } +} + +/** + * recycle_fl_buf - recycle a free list buffer + * @fl: the free list + * @idx: index of buffer to recycle + * + * Recycles the specified buffer on the given free list by adding it at + * the next available slot on the list. + */ +static void recycle_fl_buf(struct freelQ *fl, int idx) +{ + struct freelQ_e *from = &fl->entries[idx]; + struct freelQ_e *to = &fl->entries[fl->pidx]; + + fl->centries[fl->pidx] = fl->centries[idx]; + to->addr_lo = from->addr_lo; + to->addr_hi = from->addr_hi; + to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit); + wmb(); + to->gen2 = V_CMD_GEN2(fl->genbit); + fl->credits++; + + if (++fl->pidx == fl->size) { + fl->pidx = 0; + fl->genbit ^= 1; + } +} + +static int copybreak __read_mostly = 256; +module_param(copybreak, int, 0); +MODULE_PARM_DESC(copybreak, "Receive copy threshold"); + +/** + * get_packet - return the next ingress packet buffer + * @adapter: the adapter that received the packet + * @fl: the SGE free list holding the packet + * @len: the actual packet length, excluding any SGE padding + * + * Get the next packet from a free list and complete setup of the + * sk_buff. If the packet is small we make a copy and recycle the + * original buffer, otherwise we use the original buffer itself. If a + * positive drop threshold is supplied packets are dropped and their + * buffers recycled if (a) the number of remaining buffers is under the + * threshold and the packet is too big to copy, or (b) the packet should + * be copied but there is no memory for the copy. + */ +static inline struct sk_buff *get_packet(struct adapter *adapter, + struct freelQ *fl, unsigned int len) +{ + const struct freelQ_ce *ce = &fl->centries[fl->cidx]; + struct pci_dev *pdev = adapter->pdev; + struct sk_buff *skb; + + if (len < copybreak) { + skb = napi_alloc_skb(&adapter->napi, len); + if (!skb) + goto use_orig_buf; + + skb_put(skb, len); + pci_dma_sync_single_for_cpu(pdev, + dma_unmap_addr(ce, dma_addr), + dma_unmap_len(ce, dma_len), + PCI_DMA_FROMDEVICE); + skb_copy_from_linear_data(ce->skb, skb->data, len); + pci_dma_sync_single_for_device(pdev, + dma_unmap_addr(ce, dma_addr), + dma_unmap_len(ce, dma_len), + PCI_DMA_FROMDEVICE); + recycle_fl_buf(fl, fl->cidx); + return skb; + } + +use_orig_buf: + if (fl->credits < 2) { + recycle_fl_buf(fl, fl->cidx); + return NULL; + } + + pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), + dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); + skb = ce->skb; + prefetch(skb->data); + + skb_put(skb, len); + return skb; +} + +/** + * unexpected_offload - handle an unexpected offload packet + * @adapter: the adapter + * @fl: the free list that received the packet + * + * Called when we receive an unexpected offload packet (e.g., the TOE + * function is disabled or the card is a NIC). Prints a message and + * recycles the buffer. + */ +static void unexpected_offload(struct adapter *adapter, struct freelQ *fl) +{ + struct freelQ_ce *ce = &fl->centries[fl->cidx]; + struct sk_buff *skb = ce->skb; + + pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr), + dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); + pr_err("%s: unexpected offload packet, cmd %u\n", + adapter->name, *skb->data); + recycle_fl_buf(fl, fl->cidx); +} + +/* + * T1/T2 SGE limits the maximum DMA size per TX descriptor to + * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the + * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner. + * Note that the *_large_page_tx_descs stuff will be optimized out when + * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN. + * + * compute_large_page_descs() computes how many additional descriptors are + * required to break down the stack's request. + */ +static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb) +{ + unsigned int count = 0; + + if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { + unsigned int nfrags = skb_shinfo(skb)->nr_frags; + unsigned int i, len = skb_headlen(skb); + while (len > SGE_TX_DESC_MAX_PLEN) { + count++; + len -= SGE_TX_DESC_MAX_PLEN; + } + for (i = 0; nfrags--; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + len = skb_frag_size(frag); + while (len > SGE_TX_DESC_MAX_PLEN) { + count++; + len -= SGE_TX_DESC_MAX_PLEN; + } + } + } + return count; +} + +/* + * Write a cmdQ entry. + * + * Since this function writes the 'flags' field, it must not be used to + * write the first cmdQ entry. + */ +static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping, + unsigned int len, unsigned int gen, + unsigned int eop) +{ + BUG_ON(len > SGE_TX_DESC_MAX_PLEN); + + e->addr_lo = (u32)mapping; + e->addr_hi = (u64)mapping >> 32; + e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen); + e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen); +} + +/* + * See comment for previous function. + * + * write_tx_descs_large_page() writes additional SGE tx descriptors if + * *desc_len exceeds HW's capability. + */ +static inline unsigned int write_large_page_tx_descs(unsigned int pidx, + struct cmdQ_e **e, + struct cmdQ_ce **ce, + unsigned int *gen, + dma_addr_t *desc_mapping, + unsigned int *desc_len, + unsigned int nfrags, + struct cmdQ *q) +{ + if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { + struct cmdQ_e *e1 = *e; + struct cmdQ_ce *ce1 = *ce; + + while (*desc_len > SGE_TX_DESC_MAX_PLEN) { + *desc_len -= SGE_TX_DESC_MAX_PLEN; + write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN, + *gen, nfrags == 0 && *desc_len == 0); + ce1->skb = NULL; + dma_unmap_len_set(ce1, dma_len, 0); + *desc_mapping += SGE_TX_DESC_MAX_PLEN; + if (*desc_len) { + ce1++; + e1++; + if (++pidx == q->size) { + pidx = 0; + *gen ^= 1; + ce1 = q->centries; + e1 = q->entries; + } + } + } + *e = e1; + *ce = ce1; + } + return pidx; +} + +/* + * Write the command descriptors to transmit the given skb starting at + * descriptor pidx with the given generation. + */ +static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, + unsigned int pidx, unsigned int gen, + struct cmdQ *q) +{ + dma_addr_t mapping, desc_mapping; + struct cmdQ_e *e, *e1; + struct cmdQ_ce *ce; + unsigned int i, flags, first_desc_len, desc_len, + nfrags = skb_shinfo(skb)->nr_frags; + + e = e1 = &q->entries[pidx]; + ce = &q->centries[pidx]; + + mapping = pci_map_single(adapter->pdev, skb->data, + skb_headlen(skb), PCI_DMA_TODEVICE); + + desc_mapping = mapping; + desc_len = skb_headlen(skb); + + flags = F_CMD_DATAVALID | F_CMD_SOP | + V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) | + V_CMD_GEN2(gen); + first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ? + desc_len : SGE_TX_DESC_MAX_PLEN; + e->addr_lo = (u32)desc_mapping; + e->addr_hi = (u64)desc_mapping >> 32; + e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen); + ce->skb = NULL; + dma_unmap_len_set(ce, dma_len, 0); + + if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN && + desc_len > SGE_TX_DESC_MAX_PLEN) { + desc_mapping += first_desc_len; + desc_len -= first_desc_len; + e1++; + ce++; + if (++pidx == q->size) { + pidx = 0; + gen ^= 1; + e1 = q->entries; + ce = q->centries; + } + pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, + &desc_mapping, &desc_len, + nfrags, q); + + if (likely(desc_len)) + write_tx_desc(e1, desc_mapping, desc_len, gen, + nfrags == 0); + } + + ce->skb = NULL; + dma_unmap_addr_set(ce, dma_addr, mapping); + dma_unmap_len_set(ce, dma_len, skb_headlen(skb)); + + for (i = 0; nfrags--; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + e1++; + ce++; + if (++pidx == q->size) { + pidx = 0; + gen ^= 1; + e1 = q->entries; + ce = q->centries; + } + + mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + desc_mapping = mapping; + desc_len = skb_frag_size(frag); + + pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, + &desc_mapping, &desc_len, + nfrags, q); + if (likely(desc_len)) + write_tx_desc(e1, desc_mapping, desc_len, gen, + nfrags == 0); + ce->skb = NULL; + dma_unmap_addr_set(ce, dma_addr, mapping); + dma_unmap_len_set(ce, dma_len, skb_frag_size(frag)); + } + ce->skb = skb; + wmb(); + e->flags = flags; +} + +/* + * Clean up completed Tx buffers. + */ +static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q) +{ + unsigned int reclaim = q->processed - q->cleaned; + + if (reclaim) { + pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n", + q->processed, q->cleaned); + free_cmdQ_buffers(sge, q, reclaim); + q->cleaned += reclaim; + } +} + +/* + * Called from tasklet. Checks the scheduler for any + * pending skbs that can be sent. + */ +static void restart_sched(unsigned long arg) +{ + struct sge *sge = (struct sge *) arg; + struct adapter *adapter = sge->adapter; + struct cmdQ *q = &sge->cmdQ[0]; + struct sk_buff *skb; + unsigned int credits, queued_skb = 0; + + spin_lock(&q->lock); + reclaim_completed_tx(sge, q); + + credits = q->size - q->in_use; + pr_debug("restart_sched credits=%d\n", credits); + while ((skb = sched_skb(sge, NULL, credits)) != NULL) { + unsigned int genbit, pidx, count; + count = 1 + skb_shinfo(skb)->nr_frags; + count += compute_large_page_tx_descs(skb); + q->in_use += count; + genbit = q->genbit; + pidx = q->pidx; + q->pidx += count; + if (q->pidx >= q->size) { + q->pidx -= q->size; + q->genbit ^= 1; + } + write_tx_descs(adapter, skb, pidx, genbit, q); + credits = q->size - q->in_use; + queued_skb = 1; + } + + if (queued_skb) { + clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); + if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { + set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); + writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); + } + } + spin_unlock(&q->lock); +} + +/** + * sge_rx - process an ingress ethernet packet + * @sge: the sge structure + * @fl: the free list that contains the packet buffer + * @len: the packet length + * + * Process an ingress ethernet pakcet and deliver it to the stack. + */ +static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) +{ + struct sk_buff *skb; + const struct cpl_rx_pkt *p; + struct adapter *adapter = sge->adapter; + struct sge_port_stats *st; + struct net_device *dev; + + skb = get_packet(adapter, fl, len - sge->rx_pkt_pad); + if (unlikely(!skb)) { + sge->stats.rx_drops++; + return; + } + + p = (const struct cpl_rx_pkt *) skb->data; + if (p->iff >= adapter->params.nports) { + kfree_skb(skb); + return; + } + __skb_pull(skb, sizeof(*p)); + + st = this_cpu_ptr(sge->port_stats[p->iff]); + dev = adapter->port[p->iff].dev; + + skb->protocol = eth_type_trans(skb, dev); + if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff && + skb->protocol == htons(ETH_P_IP) && + (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { + ++st->rx_cso_good; + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else + skb_checksum_none_assert(skb); + + if (p->vlan_valid) { + st->vlan_xtract++; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan)); + } + netif_receive_skb(skb); +} + +/* + * Returns true if a command queue has enough available descriptors that + * we can resume Tx operation after temporarily disabling its packet queue. + */ +static inline int enough_free_Tx_descs(const struct cmdQ *q) +{ + unsigned int r = q->processed - q->cleaned; + + return q->in_use - r < (q->size >> 1); +} + +/* + * Called when sufficient space has become available in the SGE command queues + * after the Tx packet schedulers have been suspended to restart the Tx path. + */ +static void restart_tx_queues(struct sge *sge) +{ + struct adapter *adap = sge->adapter; + int i; + + if (!enough_free_Tx_descs(&sge->cmdQ[0])) + return; + + for_each_port(adap, i) { + struct net_device *nd = adap->port[i].dev; + + if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) && + netif_running(nd)) { + sge->stats.cmdQ_restarted[2]++; + netif_wake_queue(nd); + } + } +} + +/* + * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 + * information. + */ +static unsigned int update_tx_info(struct adapter *adapter, + unsigned int flags, + unsigned int pr0) +{ + struct sge *sge = adapter->sge; + struct cmdQ *cmdq = &sge->cmdQ[0]; + + cmdq->processed += pr0; + if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) { + freelQs_empty(sge); + flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE); + } + if (flags & F_CMDQ0_ENABLE) { + clear_bit(CMDQ_STAT_RUNNING, &cmdq->status); + + if (cmdq->cleaned + cmdq->in_use != cmdq->processed && + !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) { + set_bit(CMDQ_STAT_RUNNING, &cmdq->status); + writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); + } + if (sge->tx_sched) + tasklet_hi_schedule(&sge->tx_sched->sched_tsk); + + flags &= ~F_CMDQ0_ENABLE; + } + + if (unlikely(sge->stopped_tx_queues != 0)) + restart_tx_queues(sge); + + return flags; +} + +/* + * Process SGE responses, up to the supplied budget. Returns the number of + * responses processed. A negative budget is effectively unlimited. + */ +static int process_responses(struct adapter *adapter, int budget) +{ + struct sge *sge = adapter->sge; + struct respQ *q = &sge->respQ; + struct respQ_e *e = &q->entries[q->cidx]; + int done = 0; + unsigned int flags = 0; + unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; + + while (done < budget && e->GenerationBit == q->genbit) { + flags |= e->Qsleeping; + + cmdq_processed[0] += e->Cmdq0CreditReturn; + cmdq_processed[1] += e->Cmdq1CreditReturn; + + /* We batch updates to the TX side to avoid cacheline + * ping-pong of TX state information on MP where the sender + * might run on a different CPU than this function... + */ + if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) { + flags = update_tx_info(adapter, flags, cmdq_processed[0]); + cmdq_processed[0] = 0; + } + + if (unlikely(cmdq_processed[1] > 16)) { + sge->cmdQ[1].processed += cmdq_processed[1]; + cmdq_processed[1] = 0; + } + + if (likely(e->DataValid)) { + struct freelQ *fl = &sge->freelQ[e->FreelistQid]; + + BUG_ON(!e->Sop || !e->Eop); + if (unlikely(e->Offload)) + unexpected_offload(adapter, fl); + else + sge_rx(sge, fl, e->BufferLength); + + ++done; + + /* + * Note: this depends on each packet consuming a + * single free-list buffer; cf. the BUG above. + */ + if (++fl->cidx == fl->size) + fl->cidx = 0; + prefetch(fl->centries[fl->cidx].skb); + + if (unlikely(--fl->credits < + fl->size - SGE_FREEL_REFILL_THRESH)) + refill_free_list(sge, fl); + } else + sge->stats.pure_rsps++; + + e++; + if (unlikely(++q->cidx == q->size)) { + q->cidx = 0; + q->genbit ^= 1; + e = q->entries; + } + prefetch(e); + + if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { + writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); + q->credits = 0; + } + } + + flags = update_tx_info(adapter, flags, cmdq_processed[0]); + sge->cmdQ[1].processed += cmdq_processed[1]; + + return done; +} + +static inline int responses_pending(const struct adapter *adapter) +{ + const struct respQ *Q = &adapter->sge->respQ; + const struct respQ_e *e = &Q->entries[Q->cidx]; + + return e->GenerationBit == Q->genbit; +} + +/* + * A simpler version of process_responses() that handles only pure (i.e., + * non data-carrying) responses. Such respones are too light-weight to justify + * calling a softirq when using NAPI, so we handle them specially in hard + * interrupt context. The function is called with a pointer to a response, + * which the caller must ensure is a valid pure response. Returns 1 if it + * encounters a valid data-carrying response, 0 otherwise. + */ +static int process_pure_responses(struct adapter *adapter) +{ + struct sge *sge = adapter->sge; + struct respQ *q = &sge->respQ; + struct respQ_e *e = &q->entries[q->cidx]; + const struct freelQ *fl = &sge->freelQ[e->FreelistQid]; + unsigned int flags = 0; + unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; + + prefetch(fl->centries[fl->cidx].skb); + if (e->DataValid) + return 1; + + do { + flags |= e->Qsleeping; + + cmdq_processed[0] += e->Cmdq0CreditReturn; + cmdq_processed[1] += e->Cmdq1CreditReturn; + + e++; + if (unlikely(++q->cidx == q->size)) { + q->cidx = 0; + q->genbit ^= 1; + e = q->entries; + } + prefetch(e); + + if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { + writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); + q->credits = 0; + } + sge->stats.pure_rsps++; + } while (e->GenerationBit == q->genbit && !e->DataValid); + + flags = update_tx_info(adapter, flags, cmdq_processed[0]); + sge->cmdQ[1].processed += cmdq_processed[1]; + + return e->GenerationBit == q->genbit; +} + +/* + * Handler for new data events when using NAPI. This does not need any locking + * or protection from interrupts as data interrupts are off at this point and + * other adapter interrupts do not interfere. + */ +int t1_poll(struct napi_struct *napi, int budget) +{ + struct adapter *adapter = container_of(napi, struct adapter, napi); + int work_done = process_responses(adapter, budget); + + if (likely(work_done < budget)) { + napi_complete(napi); + writel(adapter->sge->respQ.cidx, + adapter->regs + A_SG_SLEEPING); + } + return work_done; +} + +irqreturn_t t1_interrupt(int irq, void *data) +{ + struct adapter *adapter = data; + struct sge *sge = adapter->sge; + int handled; + + if (likely(responses_pending(adapter))) { + writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); + + if (napi_schedule_prep(&adapter->napi)) { + if (process_pure_responses(adapter)) + __napi_schedule(&adapter->napi); + else { + /* no data, no NAPI needed */ + writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); + /* undo schedule_prep */ + napi_enable(&adapter->napi); + } + } + return IRQ_HANDLED; + } + + spin_lock(&adapter->async_lock); + handled = t1_slow_intr_handler(adapter); + spin_unlock(&adapter->async_lock); + + if (!handled) + sge->stats.unhandled_irqs++; + + return IRQ_RETVAL(handled != 0); +} + +/* + * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. + * + * The code figures out how many entries the sk_buff will require in the + * cmdQ and updates the cmdQ data structure with the state once the enqueue + * has complete. Then, it doesn't access the global structure anymore, but + * uses the corresponding fields on the stack. In conjunction with a spinlock + * around that code, we can make the function reentrant without holding the + * lock when we actually enqueue (which might be expensive, especially on + * architectures with IO MMUs). + * + * This runs with softirqs disabled. + */ +static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, + unsigned int qid, struct net_device *dev) +{ + struct sge *sge = adapter->sge; + struct cmdQ *q = &sge->cmdQ[qid]; + unsigned int credits, pidx, genbit, count, use_sched_skb = 0; + + if (!spin_trylock(&q->lock)) + return NETDEV_TX_LOCKED; + + reclaim_completed_tx(sge, q); + + pidx = q->pidx; + credits = q->size - q->in_use; + count = 1 + skb_shinfo(skb)->nr_frags; + count += compute_large_page_tx_descs(skb); + + /* Ethernet packet */ + if (unlikely(credits < count)) { + if (!netif_queue_stopped(dev)) { + netif_stop_queue(dev); + set_bit(dev->if_port, &sge->stopped_tx_queues); + sge->stats.cmdQ_full[2]++; + pr_err("%s: Tx ring full while queue awake!\n", + adapter->name); + } + spin_unlock(&q->lock); + return NETDEV_TX_BUSY; + } + + if (unlikely(credits - count < q->stop_thres)) { + netif_stop_queue(dev); + set_bit(dev->if_port, &sge->stopped_tx_queues); + sge->stats.cmdQ_full[2]++; + } + + /* T204 cmdQ0 skbs that are destined for a certain port have to go + * through the scheduler. + */ + if (sge->tx_sched && !qid && skb->dev) { +use_sched: + use_sched_skb = 1; + /* Note that the scheduler might return a different skb than + * the one passed in. + */ + skb = sched_skb(sge, skb, credits); + if (!skb) { + spin_unlock(&q->lock); + return NETDEV_TX_OK; + } + pidx = q->pidx; + count = 1 + skb_shinfo(skb)->nr_frags; + count += compute_large_page_tx_descs(skb); + } + + q->in_use += count; + genbit = q->genbit; + pidx = q->pidx; + q->pidx += count; + if (q->pidx >= q->size) { + q->pidx -= q->size; + q->genbit ^= 1; + } + spin_unlock(&q->lock); + + write_tx_descs(adapter, skb, pidx, genbit, q); + + /* + * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring + * the doorbell if the Q is asleep. There is a natural race, where + * the hardware is going to sleep just after we checked, however, + * then the interrupt handler will detect the outstanding TX packet + * and ring the doorbell for us. + */ + if (qid) + doorbell_pio(adapter, F_CMDQ1_ENABLE); + else { + clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); + if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { + set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); + writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); + } + } + + if (use_sched_skb) { + if (spin_trylock(&q->lock)) { + credits = q->size - q->in_use; + skb = NULL; + goto use_sched; + } + } + return NETDEV_TX_OK; +} + +#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14)) + +/* + * eth_hdr_len - return the length of an Ethernet header + * @data: pointer to the start of the Ethernet header + * + * Returns the length of an Ethernet header, including optional VLAN tag. + */ +static inline int eth_hdr_len(const void *data) +{ + const struct ethhdr *e = data; + + return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN; +} + +/* + * Adds the CPL header to the sk_buff and passes it to t1_sge_tx. + */ +netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct adapter *adapter = dev->ml_priv; + struct sge *sge = adapter->sge; + struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]); + struct cpl_tx_pkt *cpl; + struct sk_buff *orig_skb = skb; + int ret; + + if (skb->protocol == htons(ETH_P_CPL5)) + goto send; + + /* + * We are using a non-standard hard_header_len. + * Allocate more header room in the rare cases it is not big enough. + */ + if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { + skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso)); + ++st->tx_need_hdrroom; + dev_kfree_skb_any(orig_skb); + if (!skb) + return NETDEV_TX_OK; + } + + if (skb_shinfo(skb)->gso_size) { + int eth_type; + struct cpl_tx_pkt_lso *hdr; + + ++st->tx_tso; + + eth_type = skb_network_offset(skb) == ETH_HLEN ? + CPL_ETH_II : CPL_ETH_II_VLAN; + + hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); + hdr->opcode = CPL_TX_PKT_LSO; + hdr->ip_csum_dis = hdr->l4_csum_dis = 0; + hdr->ip_hdr_words = ip_hdr(skb)->ihl; + hdr->tcp_hdr_words = tcp_hdr(skb)->doff; + hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, + skb_shinfo(skb)->gso_size)); + hdr->len = htonl(skb->len - sizeof(*hdr)); + cpl = (struct cpl_tx_pkt *)hdr; + } else { + /* + * Packets shorter than ETH_HLEN can break the MAC, drop them + * early. Also, we may get oversized packets because some + * parts of the kernel don't handle our unusual hard_header_len + * right, drop those too. + */ + if (unlikely(skb->len < ETH_HLEN || + skb->len > dev->mtu + eth_hdr_len(skb->data))) { + netdev_dbg(dev, "packet size %d hdr %d mtu%d\n", + skb->len, eth_hdr_len(skb->data), dev->mtu); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL && + ip_hdr(skb)->protocol == IPPROTO_UDP) { + if (unlikely(skb_checksum_help(skb))) { + netdev_dbg(dev, "unable to do udp checksum\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } + + /* Hmmm, assuming to catch the gratious arp... and we'll use + * it to flush out stuck espi packets... + */ + if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) { + if (skb->protocol == htons(ETH_P_ARP) && + arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) { + adapter->sge->espibug_skb[dev->if_port] = skb; + /* We want to re-use this skb later. We + * simply bump the reference count and it + * will not be freed... + */ + skb = skb_get(skb); + } + } + + cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl)); + cpl->opcode = CPL_TX_PKT; + cpl->ip_csum_dis = 1; /* SW calculates IP csum */ + cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1; + /* the length field isn't used so don't bother setting it */ + + st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL); + } + cpl->iff = dev->if_port; + + if (skb_vlan_tag_present(skb)) { + cpl->vlan_valid = 1; + cpl->vlan = htons(skb_vlan_tag_get(skb)); + st->vlan_insert++; + } else + cpl->vlan_valid = 0; + +send: + ret = t1_sge_tx(skb, adapter, 0, dev); + + /* If transmit busy, and we reallocated skb's due to headroom limit, + * then silently discard to avoid leak. + */ + if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) { + dev_kfree_skb_any(skb); + ret = NETDEV_TX_OK; + } + return ret; +} + +/* + * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled. + */ +static void sge_tx_reclaim_cb(unsigned long data) +{ + int i; + struct sge *sge = (struct sge *)data; + + for (i = 0; i < SGE_CMDQ_N; ++i) { + struct cmdQ *q = &sge->cmdQ[i]; + + if (!spin_trylock(&q->lock)) + continue; + + reclaim_completed_tx(sge, q); + if (i == 0 && q->in_use) { /* flush pending credits */ + writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); + } + spin_unlock(&q->lock); + } + mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); +} + +/* + * Propagate changes of the SGE coalescing parameters to the HW. + */ +int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) +{ + sge->fixed_intrtimer = p->rx_coalesce_usecs * + core_ticks_per_usec(sge->adapter); + writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER); + return 0; +} + +/* + * Allocates both RX and TX resources and configures the SGE. However, + * the hardware is not enabled yet. + */ +int t1_sge_configure(struct sge *sge, struct sge_params *p) +{ + if (alloc_rx_resources(sge, p)) + return -ENOMEM; + if (alloc_tx_resources(sge, p)) { + free_rx_resources(sge); + return -ENOMEM; + } + configure_sge(sge, p); + + /* + * Now that we have sized the free lists calculate the payload + * capacity of the large buffers. Other parts of the driver use + * this to set the max offload coalescing size so that RX packets + * do not overflow our large buffers. + */ + p->large_buf_capacity = jumbo_payload_capacity(sge); + return 0; +} + +/* + * Disables the DMA engine. + */ +void t1_sge_stop(struct sge *sge) +{ + int i; + writel(0, sge->adapter->regs + A_SG_CONTROL); + readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ + + if (is_T2(sge->adapter)) + del_timer_sync(&sge->espibug_timer); + + del_timer_sync(&sge->tx_reclaim_timer); + if (sge->tx_sched) + tx_sched_stop(sge); + + for (i = 0; i < MAX_NPORTS; i++) + kfree_skb(sge->espibug_skb[i]); +} + +/* + * Enables the DMA engine. + */ +void t1_sge_start(struct sge *sge) +{ + refill_free_list(sge, &sge->freelQ[0]); + refill_free_list(sge, &sge->freelQ[1]); + + writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL); + doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE); + readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ + + mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); + + if (is_T2(sge->adapter)) + mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); +} + +/* + * Callback for the T2 ESPI 'stuck packet feature' workaorund + */ +static void espibug_workaround_t204(unsigned long data) +{ + struct adapter *adapter = (struct adapter *)data; + struct sge *sge = adapter->sge; + unsigned int nports = adapter->params.nports; + u32 seop[MAX_NPORTS]; + + if (adapter->open_device_map & PORT_MASK) { + int i; + + if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) + return; + + for (i = 0; i < nports; i++) { + struct sk_buff *skb = sge->espibug_skb[i]; + + if (!netif_running(adapter->port[i].dev) || + netif_queue_stopped(adapter->port[i].dev) || + !seop[i] || ((seop[i] & 0xfff) != 0) || !skb) + continue; + + if (!skb->cb[0]) { + skb_copy_to_linear_data_offset(skb, + sizeof(struct cpl_tx_pkt), + ch_mac_addr, + ETH_ALEN); + skb_copy_to_linear_data_offset(skb, + skb->len - 10, + ch_mac_addr, + ETH_ALEN); + skb->cb[0] = 0xff; + } + + /* bump the reference count to avoid freeing of + * the skb once the DMA has completed. + */ + skb = skb_get(skb); + t1_sge_tx(skb, adapter, 0, adapter->port[i].dev); + } + } + mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); +} + +static void espibug_workaround(unsigned long data) +{ + struct adapter *adapter = (struct adapter *)data; + struct sge *sge = adapter->sge; + + if (netif_running(adapter->port[0].dev)) { + struct sk_buff *skb = sge->espibug_skb[0]; + u32 seop = t1_espi_get_mon(adapter, 0x930, 0); + + if ((seop & 0xfff0fff) == 0xfff && skb) { + if (!skb->cb[0]) { + skb_copy_to_linear_data_offset(skb, + sizeof(struct cpl_tx_pkt), + ch_mac_addr, + ETH_ALEN); + skb_copy_to_linear_data_offset(skb, + skb->len - 10, + ch_mac_addr, + ETH_ALEN); + skb->cb[0] = 0xff; + } + + /* bump the reference count to avoid freeing of the + * skb once the DMA has completed. + */ + skb = skb_get(skb); + t1_sge_tx(skb, adapter, 0, adapter->port[0].dev); + } + } + mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); +} + +/* + * Creates a t1_sge structure and returns suggested resource parameters. + */ +struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p) +{ + struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL); + int i; + + if (!sge) + return NULL; + + sge->adapter = adapter; + sge->netdev = adapter->port[0].dev; + sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2; + sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; + + for_each_port(adapter, i) { + sge->port_stats[i] = alloc_percpu(struct sge_port_stats); + if (!sge->port_stats[i]) + goto nomem_port; + } + + init_timer(&sge->tx_reclaim_timer); + sge->tx_reclaim_timer.data = (unsigned long)sge; + sge->tx_reclaim_timer.function = sge_tx_reclaim_cb; + + if (is_T2(sge->adapter)) { + init_timer(&sge->espibug_timer); + + if (adapter->params.nports > 1) { + tx_sched_init(sge); + sge->espibug_timer.function = espibug_workaround_t204; + } else + sge->espibug_timer.function = espibug_workaround; + sge->espibug_timer.data = (unsigned long)sge->adapter; + + sge->espibug_timeout = 1; + /* for T204, every 10ms */ + if (adapter->params.nports > 1) + sge->espibug_timeout = HZ/100; + } + + + p->cmdQ_size[0] = SGE_CMDQ0_E_N; + p->cmdQ_size[1] = SGE_CMDQ1_E_N; + p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE; + p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE; + if (sge->tx_sched) { + if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) + p->rx_coalesce_usecs = 15; + else + p->rx_coalesce_usecs = 50; + } else + p->rx_coalesce_usecs = 50; + + p->coalesce_enable = 0; + p->sample_interval_usecs = 0; + + return sge; +nomem_port: + while (i >= 0) { + free_percpu(sge->port_stats[i]); + --i; + } + kfree(sge); + return NULL; + +} diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h new file mode 100644 index 000000000..a1ba591b3 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/sge.h @@ -0,0 +1,93 @@ +/***************************************************************************** + * * + * File: sge.h * + * $Revision: 1.11 $ * + * $Date: 2005/06/21 22:10:55 $ * + * Description: * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, see . * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#ifndef _CXGB_SGE_H_ +#define _CXGB_SGE_H_ + +#include +#include +#include + +struct sge_intr_counts { + unsigned int rx_drops; /* # of packets dropped due to no mem */ + unsigned int pure_rsps; /* # of non-payload responses */ + unsigned int unhandled_irqs; /* # of unhandled interrupts */ + unsigned int respQ_empty; /* # times respQ empty */ + unsigned int respQ_overflow; /* # respQ overflow (fatal) */ + unsigned int freelistQ_empty; /* # times freelist empty */ + unsigned int pkt_too_big; /* packet too large (fatal) */ + unsigned int pkt_mismatch; + unsigned int cmdQ_full[3]; /* not HW IRQ, host cmdQ[] full */ + unsigned int cmdQ_restarted[3];/* # of times cmdQ X was restarted */ +}; + +struct sge_port_stats { + u64 rx_cso_good; /* # of successful RX csum offloads */ + u64 tx_cso; /* # of TX checksum offloads */ + u64 tx_tso; /* # of TSO requests */ + u64 vlan_xtract; /* # of VLAN tag extractions */ + u64 vlan_insert; /* # of VLAN tag insertions */ + u64 tx_need_hdrroom; /* # of TX skbs in need of more header room */ +}; + +struct sk_buff; +struct net_device; +struct adapter; +struct sge_params; +struct sge; + +struct sge *t1_sge_create(struct adapter *, struct sge_params *); +int t1_sge_configure(struct sge *, struct sge_params *); +int t1_sge_set_coalesce_params(struct sge *, struct sge_params *); +void t1_sge_destroy(struct sge *); +irqreturn_t t1_interrupt(int irq, void *cookie); +int t1_poll(struct napi_struct *, int); + +netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev); +void t1_vlan_mode(struct adapter *adapter, netdev_features_t features); +void t1_sge_start(struct sge *); +void t1_sge_stop(struct sge *); +int t1_sge_intr_error_handler(struct sge *); +void t1_sge_intr_enable(struct sge *); +void t1_sge_intr_disable(struct sge *); +void t1_sge_intr_clear(struct sge *); +const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge); +void t1_sge_get_port_stats(const struct sge *sge, int port, struct sge_port_stats *); +unsigned int t1_sched_update_parms(struct sge *, unsigned int, unsigned int, + unsigned int); + +#endif /* _CXGB_SGE_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c new file mode 100644 index 000000000..ea0f8741d --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/subr.c @@ -0,0 +1,1128 @@ +/***************************************************************************** + * * + * File: subr.c * + * $Revision: 1.27 $ * + * $Date: 2005/06/22 01:08:36 $ * + * Description: * + * Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, see . * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * + * All rights reserved. * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: Dimitrios Michailidis * + * Tina Yang * + * Felix Marti * + * Scott Bardone * + * Kurt Ottaway * + * Frank DiMambro * + * * + * History: * + * * + ****************************************************************************/ + +#include "common.h" +#include "elmer0.h" +#include "regs.h" +#include "gmac.h" +#include "cphy.h" +#include "sge.h" +#include "tp.h" +#include "espi.h" + +/** + * t1_wait_op_done - wait until an operation is completed + * @adapter: the adapter performing the operation + * @reg: the register to check for completion + * @mask: a single-bit field within @reg that indicates completion + * @polarity: the value of the field when the operation is completed + * @attempts: number of check iterations + * @delay: delay in usecs between iterations + * + * Wait until an operation is completed by checking a bit in a register + * up to @attempts times. Returns %0 if the operation completes and %1 + * otherwise. + */ +static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity, + int attempts, int delay) +{ + while (1) { + u32 val = readl(adapter->regs + reg) & mask; + + if (!!val == polarity) + return 0; + if (--attempts == 0) + return 1; + if (delay) + udelay(delay); + } +} + +#define TPI_ATTEMPTS 50 + +/* + * Write a register over the TPI interface (unlocked and locked versions). + */ +int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value) +{ + int tpi_busy; + + writel(addr, adapter->regs + A_TPI_ADDR); + writel(value, adapter->regs + A_TPI_WR_DATA); + writel(F_TPIWR, adapter->regs + A_TPI_CSR); + + tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1, + TPI_ATTEMPTS, 3); + if (tpi_busy) + pr_alert("%s: TPI write to 0x%x failed\n", + adapter->name, addr); + return tpi_busy; +} + +int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value) +{ + int ret; + + spin_lock(&adapter->tpi_lock); + ret = __t1_tpi_write(adapter, addr, value); + spin_unlock(&adapter->tpi_lock); + return ret; +} + +/* + * Read a register over the TPI interface (unlocked and locked versions). + */ +int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp) +{ + int tpi_busy; + + writel(addr, adapter->regs + A_TPI_ADDR); + writel(0, adapter->regs + A_TPI_CSR); + + tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1, + TPI_ATTEMPTS, 3); + if (tpi_busy) + pr_alert("%s: TPI read from 0x%x failed\n", + adapter->name, addr); + else + *valp = readl(adapter->regs + A_TPI_RD_DATA); + return tpi_busy; +} + +int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp) +{ + int ret; + + spin_lock(&adapter->tpi_lock); + ret = __t1_tpi_read(adapter, addr, valp); + spin_unlock(&adapter->tpi_lock); + return ret; +} + +/* + * Set a TPI parameter. + */ +static void t1_tpi_par(adapter_t *adapter, u32 value) +{ + writel(V_TPIPAR(value), adapter->regs + A_TPI_PAR); +} + +/* + * Called when a port's link settings change to propagate the new values to the + * associated PHY and MAC. After performing the common tasks it invokes an + * OS-specific handler. + */ +void t1_link_changed(adapter_t *adapter, int port_id) +{ + int link_ok, speed, duplex, fc; + struct cphy *phy = adapter->port[port_id].phy; + struct link_config *lc = &adapter->port[port_id].link_config; + + phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); + + lc->speed = speed < 0 ? SPEED_INVALID : speed; + lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex; + if (!(lc->requested_fc & PAUSE_AUTONEG)) + fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + + if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) { + /* Set MAC speed, duplex, and flow control to match PHY. */ + struct cmac *mac = adapter->port[port_id].mac; + + mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc); + lc->fc = (unsigned char)fc; + } + t1_link_negotiated(adapter, port_id, link_ok, speed, duplex, fc); +} + +static int t1_pci_intr_handler(adapter_t *adapter) +{ + u32 pcix_cause; + + pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause); + + if (pcix_cause) { + pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, + pcix_cause); + t1_fatal_err(adapter); /* PCI errors are fatal */ + } + return 0; +} + +#ifdef CONFIG_CHELSIO_T1_1G +#include "fpga_defs.h" + +/* + * PHY interrupt handler for FPGA boards. + */ +static int fpga_phy_intr_handler(adapter_t *adapter) +{ + int p; + u32 cause = readl(adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE); + + for_each_port(adapter, p) + if (cause & (1 << p)) { + struct cphy *phy = adapter->port[p].phy; + int phy_cause = phy->ops->interrupt_handler(phy); + + if (phy_cause & cphy_cause_link_change) + t1_link_changed(adapter, p); + } + writel(cause, adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE); + return 0; +} + +/* + * Slow path interrupt handler for FPGAs. + */ +static int fpga_slow_intr(adapter_t *adapter) +{ + u32 cause = readl(adapter->regs + A_PL_CAUSE); + + cause &= ~F_PL_INTR_SGE_DATA; + if (cause & F_PL_INTR_SGE_ERR) + t1_sge_intr_error_handler(adapter->sge); + + if (cause & FPGA_PCIX_INTERRUPT_GMAC) + fpga_phy_intr_handler(adapter); + + if (cause & FPGA_PCIX_INTERRUPT_TP) { + /* + * FPGA doesn't support MC4 interrupts and it requires + * this odd layer of indirection for MC5. + */ + u32 tp_cause = readl(adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); + + /* Clear TP interrupt */ + writel(tp_cause, adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); + } + if (cause & FPGA_PCIX_INTERRUPT_PCIX) + t1_pci_intr_handler(adapter); + + /* Clear the interrupts just processed. */ + if (cause) + writel(cause, adapter->regs + A_PL_CAUSE); + + return cause != 0; +} +#endif + +/* + * Wait until Elmer's MI1 interface is ready for new operations. + */ +static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg) +{ + int attempts = 100, busy; + + do { + u32 val; + + __t1_tpi_read(adapter, mi1_reg, &val); + busy = val & F_MI1_OP_BUSY; + if (busy) + udelay(10); + } while (busy && --attempts); + if (busy) + pr_alert("%s: MDIO operation timed out\n", adapter->name); + return busy; +} + +/* + * MI1 MDIO initialization. + */ +static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi) +{ + u32 clkdiv = bi->clock_elmer0 / (2 * bi->mdio_mdc) - 1; + u32 val = F_MI1_PREAMBLE_ENABLE | V_MI1_MDI_INVERT(bi->mdio_mdiinv) | + V_MI1_MDI_ENABLE(bi->mdio_mdien) | V_MI1_CLK_DIV(clkdiv); + + if (!(bi->caps & SUPPORTED_10000baseT_Full)) + val |= V_MI1_SOF(1); + t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val); +} + +#if defined(CONFIG_CHELSIO_T1_1G) +/* + * Elmer MI1 MDIO read/write operations. + */ +static int mi1_mdio_read(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr) +{ + struct adapter *adapter = dev->ml_priv; + u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr); + unsigned int val; + + spin_lock(&adapter->tpi_lock); + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); + __t1_tpi_write(adapter, + A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_READ); + mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); + __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, &val); + spin_unlock(&adapter->tpi_lock); + return val; +} + +static int mi1_mdio_write(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr, u16 val) +{ + struct adapter *adapter = dev->ml_priv; + u32 addr = V_MI1_REG_ADDR(reg_addr) | V_MI1_PHY_ADDR(phy_addr); + + spin_lock(&adapter->tpi_lock); + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val); + __t1_tpi_write(adapter, + A_ELMER0_PORT0_MI1_OP, MI1_OP_DIRECT_WRITE); + mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); + spin_unlock(&adapter->tpi_lock); + return 0; +} + +static const struct mdio_ops mi1_mdio_ops = { + .init = mi1_mdio_init, + .read = mi1_mdio_read, + .write = mi1_mdio_write, + .mode_support = MDIO_SUPPORTS_C22 +}; + +#endif + +static int mi1_mdio_ext_read(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr) +{ + struct adapter *adapter = dev->ml_priv; + u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr); + unsigned int val; + + spin_lock(&adapter->tpi_lock); + + /* Write the address we want. */ + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr); + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, + MI1_OP_INDIRECT_ADDRESS); + mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); + + /* Write the operation we want. */ + __t1_tpi_write(adapter, + A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ); + mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); + + /* Read the data. */ + __t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, &val); + spin_unlock(&adapter->tpi_lock); + return val; +} + +static int mi1_mdio_ext_write(struct net_device *dev, int phy_addr, + int mmd_addr, u16 reg_addr, u16 val) +{ + struct adapter *adapter = dev->ml_priv; + u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr); + + spin_lock(&adapter->tpi_lock); + + /* Write the address we want. */ + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr); + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr); + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, + MI1_OP_INDIRECT_ADDRESS); + mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); + + /* Write the data. */ + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val); + __t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE); + mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP); + spin_unlock(&adapter->tpi_lock); + return 0; +} + +static const struct mdio_ops mi1_mdio_ext_ops = { + .init = mi1_mdio_init, + .read = mi1_mdio_ext_read, + .write = mi1_mdio_ext_write, + .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22 +}; + +enum { + CH_BRD_T110_1CU, + CH_BRD_N110_1F, + CH_BRD_N210_1F, + CH_BRD_T210_1F, + CH_BRD_T210_1CU, + CH_BRD_N204_4CU, +}; + +static const struct board_info t1_board[] = { + { + .board = CHBT_BOARD_CHT110, + .port_number = 1, + .caps = SUPPORTED_10000baseT_Full, + .chip_term = CHBT_TERM_T1, + .chip_mac = CHBT_MAC_PM3393, + .chip_phy = CHBT_PHY_MY3126, + .clock_core = 125000000, + .clock_mc3 = 150000000, + .clock_mc4 = 125000000, + .espi_nports = 1, + .clock_elmer0 = 44, + .mdio_mdien = 1, + .mdio_mdiinv = 1, + .mdio_mdc = 1, + .mdio_phybaseaddr = 1, + .gmac = &t1_pm3393_ops, + .gphy = &t1_my3126_ops, + .mdio_ops = &mi1_mdio_ext_ops, + .desc = "Chelsio T110 1x10GBase-CX4 TOE", + }, + + { + .board = CHBT_BOARD_N110, + .port_number = 1, + .caps = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE, + .chip_term = CHBT_TERM_T1, + .chip_mac = CHBT_MAC_PM3393, + .chip_phy = CHBT_PHY_88X2010, + .clock_core = 125000000, + .espi_nports = 1, + .clock_elmer0 = 44, + .mdio_mdien = 0, + .mdio_mdiinv = 0, + .mdio_mdc = 1, + .mdio_phybaseaddr = 0, + .gmac = &t1_pm3393_ops, + .gphy = &t1_mv88x201x_ops, + .mdio_ops = &mi1_mdio_ext_ops, + .desc = "Chelsio N110 1x10GBaseX NIC", + }, + + { + .board = CHBT_BOARD_N210, + .port_number = 1, + .caps = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE, + .chip_term = CHBT_TERM_T2, + .chip_mac = CHBT_MAC_PM3393, + .chip_phy = CHBT_PHY_88X2010, + .clock_core = 125000000, + .espi_nports = 1, + .clock_elmer0 = 44, + .mdio_mdien = 0, + .mdio_mdiinv = 0, + .mdio_mdc = 1, + .mdio_phybaseaddr = 0, + .gmac = &t1_pm3393_ops, + .gphy = &t1_mv88x201x_ops, + .mdio_ops = &mi1_mdio_ext_ops, + .desc = "Chelsio N210 1x10GBaseX NIC", + }, + + { + .board = CHBT_BOARD_CHT210, + .port_number = 1, + .caps = SUPPORTED_10000baseT_Full, + .chip_term = CHBT_TERM_T2, + .chip_mac = CHBT_MAC_PM3393, + .chip_phy = CHBT_PHY_88X2010, + .clock_core = 125000000, + .clock_mc3 = 133000000, + .clock_mc4 = 125000000, + .espi_nports = 1, + .clock_elmer0 = 44, + .mdio_mdien = 0, + .mdio_mdiinv = 0, + .mdio_mdc = 1, + .mdio_phybaseaddr = 0, + .gmac = &t1_pm3393_ops, + .gphy = &t1_mv88x201x_ops, + .mdio_ops = &mi1_mdio_ext_ops, + .desc = "Chelsio T210 1x10GBaseX TOE", + }, + + { + .board = CHBT_BOARD_CHT210, + .port_number = 1, + .caps = SUPPORTED_10000baseT_Full, + .chip_term = CHBT_TERM_T2, + .chip_mac = CHBT_MAC_PM3393, + .chip_phy = CHBT_PHY_MY3126, + .clock_core = 125000000, + .clock_mc3 = 133000000, + .clock_mc4 = 125000000, + .espi_nports = 1, + .clock_elmer0 = 44, + .mdio_mdien = 1, + .mdio_mdiinv = 1, + .mdio_mdc = 1, + .mdio_phybaseaddr = 1, + .gmac = &t1_pm3393_ops, + .gphy = &t1_my3126_ops, + .mdio_ops = &mi1_mdio_ext_ops, + .desc = "Chelsio T210 1x10GBase-CX4 TOE", + }, + +#ifdef CONFIG_CHELSIO_T1_1G + { + .board = CHBT_BOARD_CHN204, + .port_number = 4, + .caps = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full + | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full + | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | + SUPPORTED_PAUSE | SUPPORTED_TP, + .chip_term = CHBT_TERM_T2, + .chip_mac = CHBT_MAC_VSC7321, + .chip_phy = CHBT_PHY_88E1111, + .clock_core = 100000000, + .espi_nports = 4, + .clock_elmer0 = 44, + .mdio_mdien = 0, + .mdio_mdiinv = 0, + .mdio_mdc = 0, + .mdio_phybaseaddr = 4, + .gmac = &t1_vsc7326_ops, + .gphy = &t1_mv88e1xxx_ops, + .mdio_ops = &mi1_mdio_ops, + .desc = "Chelsio N204 4x100/1000BaseT NIC", + }, +#endif + +}; + +const struct pci_device_id t1_pci_tbl[] = { + CH_DEVICE(8, 0, CH_BRD_T110_1CU), + CH_DEVICE(8, 1, CH_BRD_T110_1CU), + CH_DEVICE(7, 0, CH_BRD_N110_1F), + CH_DEVICE(10, 1, CH_BRD_N210_1F), + CH_DEVICE(11, 1, CH_BRD_T210_1F), + CH_DEVICE(14, 1, CH_BRD_T210_1CU), + CH_DEVICE(16, 1, CH_BRD_N204_4CU), + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, t1_pci_tbl); + +/* + * Return the board_info structure with a given index. Out-of-range indices + * return NULL. + */ +const struct board_info *t1_get_board_info(unsigned int board_id) +{ + return board_id < ARRAY_SIZE(t1_board) ? &t1_board[board_id] : NULL; +} + +struct chelsio_vpd_t { + u32 format_version; + u8 serial_number[16]; + u8 mac_base_address[6]; + u8 pad[2]; /* make multiple-of-4 size requirement explicit */ +}; + +#define EEPROMSIZE (8 * 1024) +#define EEPROM_MAX_POLL 4 + +/* + * Read SEEPROM. A zero is written to the flag register when the address is + * written to the Control register. The hardware device will set the flag to a + * one when 4B have been transferred to the Data register. + */ +int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data) +{ + int i = EEPROM_MAX_POLL; + u16 val; + u32 v; + + if (addr >= EEPROMSIZE || (addr & 3)) + return -EINVAL; + + pci_write_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, (u16)addr); + do { + udelay(50); + pci_read_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, &val); + } while (!(val & F_VPD_OP_FLAG) && --i); + + if (!(val & F_VPD_OP_FLAG)) { + pr_err("%s: reading EEPROM address 0x%x failed\n", + adapter->name, addr); + return -EIO; + } + pci_read_config_dword(adapter->pdev, A_PCICFG_VPD_DATA, &v); + *data = cpu_to_le32(v); + return 0; +} + +static int t1_eeprom_vpd_get(adapter_t *adapter, struct chelsio_vpd_t *vpd) +{ + int addr, ret = 0; + + for (addr = 0; !ret && addr < sizeof(*vpd); addr += sizeof(u32)) + ret = t1_seeprom_read(adapter, addr, + (__le32 *)((u8 *)vpd + addr)); + + return ret; +} + +/* + * Read a port's MAC address from the VPD ROM. + */ +static int vpd_macaddress_get(adapter_t *adapter, int index, u8 mac_addr[]) +{ + struct chelsio_vpd_t vpd; + + if (t1_eeprom_vpd_get(adapter, &vpd)) + return 1; + memcpy(mac_addr, vpd.mac_base_address, 5); + mac_addr[5] = vpd.mac_base_address[5] + index; + return 0; +} + +/* + * Set up the MAC/PHY according to the requested link settings. + * + * If the PHY can auto-negotiate first decide what to advertise, then + * enable/disable auto-negotiation as desired and reset. + * + * If the PHY does not auto-negotiate we just reset it. + * + * If auto-negotiation is off set the MAC to the proper speed/duplex/FC, + * otherwise do it later based on the outcome of auto-negotiation. + */ +int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc) +{ + unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + + if (lc->supported & SUPPORTED_Autoneg) { + lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE); + if (fc) { + if (fc == ((PAUSE_RX | PAUSE_TX) & + (mac->adapter->params.nports < 2))) + lc->advertising |= ADVERTISED_PAUSE; + else { + lc->advertising |= ADVERTISED_ASYM_PAUSE; + if (fc == PAUSE_RX) + lc->advertising |= ADVERTISED_PAUSE; + } + } + phy->ops->advertise(phy, lc->advertising); + + if (lc->autoneg == AUTONEG_DISABLE) { + lc->speed = lc->requested_speed; + lc->duplex = lc->requested_duplex; + lc->fc = (unsigned char)fc; + mac->ops->set_speed_duplex_fc(mac, lc->speed, + lc->duplex, fc); + /* Also disables autoneg */ + phy->state = PHY_AUTONEG_RDY; + phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex); + phy->ops->reset(phy, 0); + } else { + phy->state = PHY_AUTONEG_EN; + phy->ops->autoneg_enable(phy); /* also resets PHY */ + } + } else { + phy->state = PHY_AUTONEG_RDY; + mac->ops->set_speed_duplex_fc(mac, -1, -1, fc); + lc->fc = (unsigned char)fc; + phy->ops->reset(phy, 0); + } + return 0; +} + +/* + * External interrupt handler for boards using elmer0. + */ +int t1_elmer0_ext_intr_handler(adapter_t *adapter) +{ + struct cphy *phy; + int phy_cause; + u32 cause; + + t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause); + + switch (board_info(adapter)->board) { +#ifdef CONFIG_CHELSIO_T1_1G + case CHBT_BOARD_CHT204: + case CHBT_BOARD_CHT204E: + case CHBT_BOARD_CHN204: + case CHBT_BOARD_CHT204V: { + int i, port_bit; + for_each_port(adapter, i) { + port_bit = i + 1; + if (!(cause & (1 << port_bit))) + continue; + + phy = adapter->port[i].phy; + phy_cause = phy->ops->interrupt_handler(phy); + if (phy_cause & cphy_cause_link_change) + t1_link_changed(adapter, i); + } + break; + } + case CHBT_BOARD_CHT101: + if (cause & ELMER0_GP_BIT1) { /* Marvell 88E1111 interrupt */ + phy = adapter->port[0].phy; + phy_cause = phy->ops->interrupt_handler(phy); + if (phy_cause & cphy_cause_link_change) + t1_link_changed(adapter, 0); + } + break; + case CHBT_BOARD_7500: { + int p; + /* + * Elmer0's interrupt cause isn't useful here because there is + * only one bit that can be set for all 4 ports. This means + * we are forced to check every PHY's interrupt status + * register to see who initiated the interrupt. + */ + for_each_port(adapter, p) { + phy = adapter->port[p].phy; + phy_cause = phy->ops->interrupt_handler(phy); + if (phy_cause & cphy_cause_link_change) + t1_link_changed(adapter, p); + } + break; + } +#endif + case CHBT_BOARD_CHT210: + case CHBT_BOARD_N210: + case CHBT_BOARD_N110: + if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */ + phy = adapter->port[0].phy; + phy_cause = phy->ops->interrupt_handler(phy); + if (phy_cause & cphy_cause_link_change) + t1_link_changed(adapter, 0); + } + break; + case CHBT_BOARD_8000: + case CHBT_BOARD_CHT110: + if (netif_msg_intr(adapter)) + dev_dbg(&adapter->pdev->dev, + "External interrupt cause 0x%x\n", cause); + if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */ + struct cmac *mac = adapter->port[0].mac; + + mac->ops->interrupt_handler(mac); + } + if (cause & ELMER0_GP_BIT5) { /* XPAK MOD_DETECT */ + u32 mod_detect; + + t1_tpi_read(adapter, + A_ELMER0_GPI_STAT, &mod_detect); + if (netif_msg_link(adapter)) + dev_info(&adapter->pdev->dev, "XPAK %s\n", + mod_detect ? "removed" : "inserted"); + } + break; + } + t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause); + return 0; +} + +/* Enables all interrupts. */ +void t1_interrupts_enable(adapter_t *adapter) +{ + unsigned int i; + + adapter->slow_intr_mask = F_PL_INTR_SGE_ERR | F_PL_INTR_TP; + + t1_sge_intr_enable(adapter->sge); + t1_tp_intr_enable(adapter->tp); + if (adapter->espi) { + adapter->slow_intr_mask |= F_PL_INTR_ESPI; + t1_espi_intr_enable(adapter->espi); + } + + /* Enable MAC/PHY interrupts for each port. */ + for_each_port(adapter, i) { + adapter->port[i].mac->ops->interrupt_enable(adapter->port[i].mac); + adapter->port[i].phy->ops->interrupt_enable(adapter->port[i].phy); + } + + /* Enable PCIX & external chip interrupts on ASIC boards. */ + if (t1_is_asic(adapter)) { + u32 pl_intr = readl(adapter->regs + A_PL_ENABLE); + + /* PCI-X interrupts */ + pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, + 0xffffffff); + + adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX; + pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX; + writel(pl_intr, adapter->regs + A_PL_ENABLE); + } +} + +/* Disables all interrupts. */ +void t1_interrupts_disable(adapter_t* adapter) +{ + unsigned int i; + + t1_sge_intr_disable(adapter->sge); + t1_tp_intr_disable(adapter->tp); + if (adapter->espi) + t1_espi_intr_disable(adapter->espi); + + /* Disable MAC/PHY interrupts for each port. */ + for_each_port(adapter, i) { + adapter->port[i].mac->ops->interrupt_disable(adapter->port[i].mac); + adapter->port[i].phy->ops->interrupt_disable(adapter->port[i].phy); + } + + /* Disable PCIX & external chip interrupts. */ + if (t1_is_asic(adapter)) + writel(0, adapter->regs + A_PL_ENABLE); + + /* PCI-X interrupts */ + pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0); + + adapter->slow_intr_mask = 0; +} + +/* Clears all interrupts */ +void t1_interrupts_clear(adapter_t* adapter) +{ + unsigned int i; + + t1_sge_intr_clear(adapter->sge); + t1_tp_intr_clear(adapter->tp); + if (adapter->espi) + t1_espi_intr_clear(adapter->espi); + + /* Clear MAC/PHY interrupts for each port. */ + for_each_port(adapter, i) { + adapter->port[i].mac->ops->interrupt_clear(adapter->port[i].mac); + adapter->port[i].phy->ops->interrupt_clear(adapter->port[i].phy); + } + + /* Enable interrupts for external devices. */ + if (t1_is_asic(adapter)) { + u32 pl_intr = readl(adapter->regs + A_PL_CAUSE); + + writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX, + adapter->regs + A_PL_CAUSE); + } + + /* PCI-X interrupts */ + pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff); +} + +/* + * Slow path interrupt handler for ASICs. + */ +static int asic_slow_intr(adapter_t *adapter) +{ + u32 cause = readl(adapter->regs + A_PL_CAUSE); + + cause &= adapter->slow_intr_mask; + if (!cause) + return 0; + if (cause & F_PL_INTR_SGE_ERR) + t1_sge_intr_error_handler(adapter->sge); + if (cause & F_PL_INTR_TP) + t1_tp_intr_handler(adapter->tp); + if (cause & F_PL_INTR_ESPI) + t1_espi_intr_handler(adapter->espi); + if (cause & F_PL_INTR_PCIX) + t1_pci_intr_handler(adapter); + if (cause & F_PL_INTR_EXT) + t1_elmer0_ext_intr(adapter); + + /* Clear the interrupts just processed. */ + writel(cause, adapter->regs + A_PL_CAUSE); + readl(adapter->regs + A_PL_CAUSE); /* flush writes */ + return 1; +} + +int t1_slow_intr_handler(adapter_t *adapter) +{ +#ifdef CONFIG_CHELSIO_T1_1G + if (!t1_is_asic(adapter)) + return fpga_slow_intr(adapter); +#endif + return asic_slow_intr(adapter); +} + +/* Power sequencing is a work-around for Intel's XPAKs. */ +static void power_sequence_xpak(adapter_t* adapter) +{ + u32 mod_detect; + u32 gpo; + + /* Check for XPAK */ + t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect); + if (!(ELMER0_GP_BIT5 & mod_detect)) { + /* XPAK is present */ + t1_tpi_read(adapter, A_ELMER0_GPO, &gpo); + gpo |= ELMER0_GP_BIT18; + t1_tpi_write(adapter, A_ELMER0_GPO, gpo); + } +} + +int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, + struct adapter_params *p) +{ + p->chip_version = bi->chip_term; + p->is_asic = (p->chip_version != CHBT_TERM_FPGA); + if (p->chip_version == CHBT_TERM_T1 || + p->chip_version == CHBT_TERM_T2 || + p->chip_version == CHBT_TERM_FPGA) { + u32 val = readl(adapter->regs + A_TP_PC_CONFIG); + + val = G_TP_PC_REV(val); + if (val == 2) + p->chip_revision = TERM_T1B; + else if (val == 3) + p->chip_revision = TERM_T2; + else + return -1; + } else + return -1; + return 0; +} + +/* + * Enable board components other than the Chelsio chip, such as external MAC + * and PHY. + */ +static int board_init(adapter_t *adapter, const struct board_info *bi) +{ + switch (bi->board) { + case CHBT_BOARD_8000: + case CHBT_BOARD_N110: + case CHBT_BOARD_N210: + case CHBT_BOARD_CHT210: + t1_tpi_par(adapter, 0xf); + t1_tpi_write(adapter, A_ELMER0_GPO, 0x800); + break; + case CHBT_BOARD_CHT110: + t1_tpi_par(adapter, 0xf); + t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800); + + /* TBD XXX Might not need. This fixes a problem + * described in the Intel SR XPAK errata. + */ + power_sequence_xpak(adapter); + break; +#ifdef CONFIG_CHELSIO_T1_1G + case CHBT_BOARD_CHT204E: + /* add config space write here */ + case CHBT_BOARD_CHT204: + case CHBT_BOARD_CHT204V: + case CHBT_BOARD_CHN204: + t1_tpi_par(adapter, 0xf); + t1_tpi_write(adapter, A_ELMER0_GPO, 0x804); + break; + case CHBT_BOARD_CHT101: + case CHBT_BOARD_7500: + t1_tpi_par(adapter, 0xf); + t1_tpi_write(adapter, A_ELMER0_GPO, 0x1804); + break; +#endif + } + return 0; +} + +/* + * Initialize and configure the Terminator HW modules. Note that external + * MAC and PHYs are initialized separately. + */ +int t1_init_hw_modules(adapter_t *adapter) +{ + int err = -EIO; + const struct board_info *bi = board_info(adapter); + + if (!bi->clock_mc4) { + u32 val = readl(adapter->regs + A_MC4_CFG); + + writel(val | F_READY | F_MC4_SLOW, adapter->regs + A_MC4_CFG); + writel(F_M_BUS_ENABLE | F_TCAM_RESET, + adapter->regs + A_MC5_CONFIG); + } + + if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac, + bi->espi_nports)) + goto out_err; + + if (t1_tp_reset(adapter->tp, &adapter->params.tp, bi->clock_core)) + goto out_err; + + err = t1_sge_configure(adapter->sge, &adapter->params.sge); + if (err) + goto out_err; + + err = 0; +out_err: + return err; +} + +/* + * Determine a card's PCI mode. + */ +static void get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p) +{ + static const unsigned short speed_map[] = { 33, 66, 100, 133 }; + u32 pci_mode; + + pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode); + p->speed = speed_map[G_PCI_MODE_CLK(pci_mode)]; + p->width = (pci_mode & F_PCI_MODE_64BIT) ? 64 : 32; + p->is_pcix = (pci_mode & F_PCI_MODE_PCIX) != 0; +} + +/* + * Release the structures holding the SW per-Terminator-HW-module state. + */ +void t1_free_sw_modules(adapter_t *adapter) +{ + unsigned int i; + + for_each_port(adapter, i) { + struct cmac *mac = adapter->port[i].mac; + struct cphy *phy = adapter->port[i].phy; + + if (mac) + mac->ops->destroy(mac); + if (phy) + phy->ops->destroy(phy); + } + + if (adapter->sge) + t1_sge_destroy(adapter->sge); + if (adapter->tp) + t1_tp_destroy(adapter->tp); + if (adapter->espi) + t1_espi_destroy(adapter->espi); +} + +static void init_link_config(struct link_config *lc, + const struct board_info *bi) +{ + lc->supported = bi->caps; + lc->requested_speed = lc->speed = SPEED_INVALID; + lc->requested_duplex = lc->duplex = DUPLEX_INVALID; + lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; + if (lc->supported & SUPPORTED_Autoneg) { + lc->advertising = lc->supported; + lc->autoneg = AUTONEG_ENABLE; + lc->requested_fc |= PAUSE_AUTONEG; + } else { + lc->advertising = 0; + lc->autoneg = AUTONEG_DISABLE; + } +} + +/* + * Allocate and initialize the data structures that hold the SW state of + * the Terminator HW modules. + */ +int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi) +{ + unsigned int i; + + adapter->params.brd_info = bi; + adapter->params.nports = bi->port_number; + adapter->params.stats_update_period = bi->gmac->stats_update_period; + + adapter->sge = t1_sge_create(adapter, &adapter->params.sge); + if (!adapter->sge) { + pr_err("%s: SGE initialization failed\n", + adapter->name); + goto error; + } + + if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) { + pr_err("%s: ESPI initialization failed\n", + adapter->name); + goto error; + } + + adapter->tp = t1_tp_create(adapter, &adapter->params.tp); + if (!adapter->tp) { + pr_err("%s: TP initialization failed\n", + adapter->name); + goto error; + } + + board_init(adapter, bi); + bi->mdio_ops->init(adapter, bi); + if (bi->gphy->reset) + bi->gphy->reset(adapter); + if (bi->gmac->reset) + bi->gmac->reset(adapter); + + for_each_port(adapter, i) { + u8 hw_addr[6]; + struct cmac *mac; + int phy_addr = bi->mdio_phybaseaddr + i; + + adapter->port[i].phy = bi->gphy->create(adapter->port[i].dev, + phy_addr, bi->mdio_ops); + if (!adapter->port[i].phy) { + pr_err("%s: PHY %d initialization failed\n", + adapter->name, i); + goto error; + } + + adapter->port[i].mac = mac = bi->gmac->create(adapter, i); + if (!mac) { + pr_err("%s: MAC %d initialization failed\n", + adapter->name, i); + goto error; + } + + /* + * Get the port's MAC addresses either from the EEPROM if one + * exists or the one hardcoded in the MAC. + */ + if (!t1_is_asic(adapter) || bi->chip_mac == CHBT_MAC_DUMMY) + mac->ops->macaddress_get(mac, hw_addr); + else if (vpd_macaddress_get(adapter, i, hw_addr)) { + pr_err("%s: could not read MAC address from VPD ROM\n", + adapter->port[i].dev->name); + goto error; + } + memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN); + init_link_config(&adapter->port[i].link_config, bi); + } + + get_pci_mode(adapter, &adapter->params.pci); + t1_interrupts_clear(adapter); + return 0; + +error: + t1_free_sw_modules(adapter); + return -1; +} diff --git a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h new file mode 100644 index 000000000..7f79cc7ce --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h @@ -0,0 +1,1642 @@ +/***************************************************************************** + * * + * File: suni1x10gexp_regs.h * + * $Revision: 1.9 $ * + * $Date: 2005/06/22 00:17:04 $ * + * Description: * + * PMC/SIERRA (pm3393) MAC-PHY functionality. * + * part of the Chelsio 10Gb Ethernet Driver. * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License, version 2, as * + * published by the Free Software Foundation. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, see . * + * * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * + * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * + * * + * http://www.chelsio.com * + * * + * Maintainers: maintainers@chelsio.com * + * * + * Authors: PMC/SIERRA * + * * + * History: * + * * + ****************************************************************************/ + +#ifndef _CXGB_SUNI1x10GEXP_REGS_H_ +#define _CXGB_SUNI1x10GEXP_REGS_H_ + +/* +** Space allocated for each Exact Match Filter +** There are 8 filter configurations +*/ +#define SUNI1x10GEXP_REG_SIZEOF_MAC_FILTER 0x0003 + +#define mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId) ( (filterId) * SUNI1x10GEXP_REG_SIZEOF_MAC_FILTER ) + +/* +** Space allocated for VLAN-Id Filter +** There are 8 filter configurations +*/ +#define SUNI1x10GEXP_REG_SIZEOF_MAC_VID_FILTER 0x0001 + +#define mSUNI1x10GEXP_MAC_VID_FILTER_OFFSET(filterId) ( (filterId) * SUNI1x10GEXP_REG_SIZEOF_MAC_VID_FILTER ) + +/* +** Space allocated for each MSTAT Counter +*/ +#define SUNI1x10GEXP_REG_SIZEOF_MSTAT_COUNT 0x0004 + +#define mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId) ( (countId) * SUNI1x10GEXP_REG_SIZEOF_MSTAT_COUNT ) + + +/******************************************************************************/ +/** S/UNI-1x10GE-XP REGISTER ADDRESS MAP **/ +/******************************************************************************/ +/* Refer to the Register Bit Masks bellow for the naming of each register and */ +/* to the S/UNI-1x10GE-XP Data Sheet for the signification of each bit */ +/******************************************************************************/ + + +#define SUNI1x10GEXP_REG_IDENTIFICATION 0x0000 +#define SUNI1x10GEXP_REG_PRODUCT_REVISION 0x0001 +#define SUNI1x10GEXP_REG_CONFIG_AND_RESET_CONTROL 0x0002 +#define SUNI1x10GEXP_REG_LOOPBACK_MISC_CTRL 0x0003 +#define SUNI1x10GEXP_REG_DEVICE_STATUS 0x0004 +#define SUNI1x10GEXP_REG_GLOBAL_PERFORMANCE_MONITOR_UPDATE 0x0005 + +#define SUNI1x10GEXP_REG_MDIO_COMMAND 0x0006 +#define SUNI1x10GEXP_REG_MDIO_INTERRUPT_ENABLE 0x0007 +#define SUNI1x10GEXP_REG_MDIO_INTERRUPT_STATUS 0x0008 +#define SUNI1x10GEXP_REG_MMD_PHY_ADDRESS 0x0009 +#define SUNI1x10GEXP_REG_MMD_CONTROL_ADDRESS_DATA 0x000A +#define SUNI1x10GEXP_REG_MDIO_READ_STATUS_DATA 0x000B + +#define SUNI1x10GEXP_REG_OAM_INTF_CTRL 0x000C +#define SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS 0x000D +#define SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE 0x000E +#define SUNI1x10GEXP_REG_FREE 0x000F + +#define SUNI1x10GEXP_REG_XTEF_MISC_CTRL 0x0010 +#define SUNI1x10GEXP_REG_XRF_MISC_CTRL 0x0011 + +#define SUNI1x10GEXP_REG_SERDES_3125_CONFIG_1 0x0100 +#define SUNI1x10GEXP_REG_SERDES_3125_CONFIG_2 0x0101 +#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE 0x0102 +#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_VISIBLE 0x0103 +#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS 0x0104 +#define SUNI1x10GEXP_REG_SERDES_3125_TEST_CONFIG 0x0107 + +#define SUNI1x10GEXP_REG_RXXG_CONFIG_1 0x2040 +#define SUNI1x10GEXP_REG_RXXG_CONFIG_2 0x2041 +#define SUNI1x10GEXP_REG_RXXG_CONFIG_3 0x2042 +#define SUNI1x10GEXP_REG_RXXG_INTERRUPT 0x2043 +#define SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH 0x2045 +#define SUNI1x10GEXP_REG_RXXG_SA_15_0 0x2046 +#define SUNI1x10GEXP_REG_RXXG_SA_31_16 0x2047 +#define SUNI1x10GEXP_REG_RXXG_SA_47_32 0x2048 +#define SUNI1x10GEXP_REG_RXXG_RECEIVE_FIFO_THRESHOLD 0x2049 +#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_LOW(filterId) (0x204A + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId)) +#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_MID(filterId) (0x204B + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId)) +#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_HIGH(filterId)(0x204C + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId)) +#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID(filterId) (0x2062 + mSUNI1x10GEXP_MAC_VID_FILTER_OFFSET(filterId)) +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_LOW 0x204A +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_MID 0x204B +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_HIGH 0x204C +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW 0x204D +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID 0x204E +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH 0x204F +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_LOW 0x2050 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_MID 0x2051 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_2_HIGH 0x2052 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_LOW 0x2053 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_MID 0x2054 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_3_HIGH 0x2055 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_LOW 0x2056 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_MID 0x2057 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_4_HIGH 0x2058 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_LOW 0x2059 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_MID 0x205A +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_5_HIGH 0x205B +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_LOW 0x205C +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_MID 0x205D +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_6_HIGH 0x205E +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_LOW 0x205F +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_MID 0x2060 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_7_HIGH 0x2061 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_0 0x2062 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_1 0x2063 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_2 0x2064 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_3 0x2065 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_4 0x2066 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_5 0x2067 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_6 0x2068 +#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID_7 0x2069 +#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW 0x206A +#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW 0x206B +#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH 0x206C +#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH 0x206D +#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0 0x206E +#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_1 0x206F +#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2 0x2070 + +#define SUNI1x10GEXP_REG_XRF_PATTERN_GEN_CTRL 0x2081 +#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_0 0x2084 +#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_1 0x2085 +#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_2 0x2086 +#define SUNI1x10GEXP_REG_XRF_8BTB_ERR_COUNT_LANE_3 0x2087 +#define SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE 0x2088 +#define SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS 0x2089 +#define SUNI1x10GEXP_REG_XRF_ERR_STATUS 0x208A +#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE 0x208B +#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS 0x208C +#define SUNI1x10GEXP_REG_XRF_CODE_ERR_THRES 0x2092 + +#define SUNI1x10GEXP_REG_RXOAM_CONFIG 0x20C0 +#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_CONFIG 0x20C1 +#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_CONFIG 0x20C2 +#define SUNI1x10GEXP_REG_RXOAM_CONFIG_2 0x20C3 +#define SUNI1x10GEXP_REG_RXOAM_HEC_CONFIG 0x20C4 +#define SUNI1x10GEXP_REG_RXOAM_HEC_ERR_THRES 0x20C5 +#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE 0x20C7 +#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS 0x20C8 +#define SUNI1x10GEXP_REG_RXOAM_STATUS 0x20C9 +#define SUNI1x10GEXP_REG_RXOAM_HEC_ERR_COUNT 0x20CA +#define SUNI1x10GEXP_REG_RXOAM_FIFO_OVERFLOW_COUNT 0x20CB +#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_COUNT_LSB 0x20CC +#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_COUNT_MSB 0x20CD +#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_MISMATCH_COUNT_LSB 0x20CE +#define SUNI1x10GEXP_REG_RXOAM_FILTER_1_MISMATCH_COUNT_MSB 0x20CF +#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_MISMATCH_COUNT_LSB 0x20D0 +#define SUNI1x10GEXP_REG_RXOAM_FILTER_2_MISMATCH_COUNT_MSB 0x20D1 +#define SUNI1x10GEXP_REG_RXOAM_OAM_EXTRACT_COUNT_LSB 0x20D2 +#define SUNI1x10GEXP_REG_RXOAM_OAM_EXTRACT_COUNT_MSB 0x20D3 +#define SUNI1x10GEXP_REG_RXOAM_MINI_PACKET_COUNT_LSB 0x20D4 +#define SUNI1x10GEXP_REG_RXOAM_MINI_PACKET_COUNT_MSB 0x20D5 +#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_THRES_LSB 0x20D6 +#define SUNI1x10GEXP_REG_RXOAM_FILTER_MISMATCH_THRES_MSB 0x20D7 + +#define SUNI1x10GEXP_REG_MSTAT_CONTROL 0x2100 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0 0x2101 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1 0x2102 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2 0x2103 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3 0x2104 +#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0 0x2105 +#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1 0x2106 +#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2 0x2107 +#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3 0x2108 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_ADDRESS 0x2109 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_LOW 0x210A +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_MIDDLE 0x210B +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_WRITE_DATA_HIGH 0x210C +#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_LOW(countId) (0x2110 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId)) +#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_MID(countId) (0x2111 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId)) +#define mSUNI1x10GEXP_REG_MSTAT_COUNTER_HIGH(countId) (0x2112 + mSUNI1x10GEXP_MSTAT_COUNT_OFFSET(countId)) +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW 0x2110 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_MID 0x2111 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_HIGH 0x2112 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_RESVD 0x2113 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW 0x2114 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_MID 0x2115 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_HIGH 0x2116 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_RESVD 0x2117 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_LOW 0x2118 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_MID 0x2119 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_HIGH 0x211A +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_2_RESVD 0x211B +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_LOW 0x211C +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_MID 0x211D +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_HIGH 0x211E +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_3_RESVD 0x211F +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW 0x2120 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_MID 0x2121 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_HIGH 0x2122 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_RESVD 0x2123 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW 0x2124 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_MID 0x2125 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_HIGH 0x2126 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_RESVD 0x2127 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW 0x2128 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_MID 0x2129 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_HIGH 0x212A +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_RESVD 0x212B +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_LOW 0x212C +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_MID 0x212D +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_HIGH 0x212E +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_7_RESVD 0x212F +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW 0x2130 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_MID 0x2131 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_HIGH 0x2132 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_RESVD 0x2133 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_LOW 0x2134 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_MID 0x2135 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_HIGH 0x2136 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_9_RESVD 0x2137 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW 0x2138 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_MID 0x2139 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_HIGH 0x213A +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_RESVD 0x213B +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW 0x213C +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_MID 0x213D +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_HIGH 0x213E +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_RESVD 0x213F +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW 0x2140 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_MID 0x2141 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_HIGH 0x2142 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_RESVD 0x2143 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW 0x2144 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_MID 0x2145 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_HIGH 0x2146 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_RESVD 0x2147 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_LOW 0x2148 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_MID 0x2149 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_HIGH 0x214A +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_14_RESVD 0x214B +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW 0x214C +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_MID 0x214D +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_HIGH 0x214E +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_RESVD 0x214F +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW 0x2150 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_MID 0x2151 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_HIGH 0x2152 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_RESVD 0x2153 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW 0x2154 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_MID 0x2155 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_HIGH 0x2156 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_RESVD 0x2157 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW 0x2158 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_MID 0x2159 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_HIGH 0x215A +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_RESVD 0x215B +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_LOW 0x215C +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_MID 0x215D +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_HIGH 0x215E +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_19_RESVD 0x215F +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_LOW 0x2160 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_MID 0x2161 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_HIGH 0x2162 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_20_RESVD 0x2163 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_LOW 0x2164 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_MID 0x2165 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_HIGH 0x2166 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_21_RESVD 0x2167 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_LOW 0x2168 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_MID 0x2169 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_HIGH 0x216A +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_22_RESVD 0x216B +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_LOW 0x216C +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_MID 0x216D +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_HIGH 0x216E +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_23_RESVD 0x216F +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_LOW 0x2170 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_MID 0x2171 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_HIGH 0x2172 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_24_RESVD 0x2173 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_LOW 0x2174 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_MID 0x2175 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_HIGH 0x2176 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_25_RESVD 0x2177 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_LOW 0x2178 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_MID 0x2179 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_HIGH 0x217a +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_26_RESVD 0x217b +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_LOW 0x217c +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_MID 0x217d +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_HIGH 0x217e +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_27_RESVD 0x217f +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_LOW 0x2180 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_MID 0x2181 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_HIGH 0x2182 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_28_RESVD 0x2183 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_LOW 0x2184 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_MID 0x2185 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_HIGH 0x2186 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_29_RESVD 0x2187 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_LOW 0x2188 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_MID 0x2189 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_HIGH 0x218A +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_30_RESVD 0x218B +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_LOW 0x218C +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_MID 0x218D +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_HIGH 0x218E +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_31_RESVD 0x218F +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_LOW 0x2190 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_MID 0x2191 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_HIGH 0x2192 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_32_RESVD 0x2193 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW 0x2194 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_MID 0x2195 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_HIGH 0x2196 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_RESVD 0x2197 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_LOW 0x2198 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_MID 0x2199 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_HIGH 0x219A +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_34_RESVD 0x219B +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW 0x219C +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_MID 0x219D +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_HIGH 0x219E +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_RESVD 0x219F +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW 0x21A0 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_MID 0x21A1 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_HIGH 0x21A2 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_RESVD 0x21A3 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_LOW 0x21A4 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_MID 0x21A5 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_HIGH 0x21A6 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_37_RESVD 0x21A7 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW 0x21A8 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_MID 0x21A9 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_HIGH 0x21AA +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_RESVD 0x21AB +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_LOW 0x21AC +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_MID 0x21AD +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_HIGH 0x21AE +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_39_RESVD 0x21AF +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW 0x21B0 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_MID 0x21B1 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_HIGH 0x21B2 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_RESVD 0x21B3 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_LOW 0x21B4 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_MID 0x21B5 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_HIGH 0x21B6 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_41_RESVD 0x21B7 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW 0x21B8 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_MID 0x21B9 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_HIGH 0x21BA +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_RESVD 0x21BB +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW 0x21BC +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_MID 0x21BD +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_HIGH 0x21BE +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_RESVD 0x21BF +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_LOW 0x21C0 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_MID 0x21C1 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_HIGH 0x21C2 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_44_RESVD 0x21C3 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_LOW 0x21C4 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_MID 0x21C5 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_HIGH 0x21C6 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_45_RESVD 0x21C7 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_LOW 0x21C8 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_MID 0x21C9 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_HIGH 0x21CA +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_46_RESVD 0x21CB +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_LOW 0x21CC +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_MID 0x21CD +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_HIGH 0x21CE +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_47_RESVD 0x21CF +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_LOW 0x21D0 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_MID 0x21D1 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_HIGH 0x21D2 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_48_RESVD 0x21D3 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_LOW 0x21D4 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_MID 0x21D5 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_HIGH 0x21D6 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_49_RESVD 0x21D7 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_LOW 0x21D8 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_MID 0x21D9 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_HIGH 0x21DA +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_50_RESVD 0x21DB +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_LOW 0x21DC +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_MID 0x21DD +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_HIGH 0x21DE +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_51_RESVD 0x21DF +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_LOW 0x21E0 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_MID 0x21E1 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_HIGH 0x21E2 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_52_RESVD 0x21E3 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_LOW 0x21E4 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_MID 0x21E5 +#define SUNI1x10GEXP_REG_MSTAT_COUNTER_53_HIGH 0x21E6 +#define SUNI1x10GEXP_CNTR_MAC_ETHERNET_NUM 51 + +#define SUNI1x10GEXP_REG_IFLX_GLOBAL_CONFIG 0x2200 +#define SUNI1x10GEXP_REG_IFLX_CHANNEL_PROVISION 0x2201 +#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE 0x2209 +#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT 0x220A +#define SUNI1x10GEXP_REG_IFLX_INDIR_CHANNEL_ADDRESS 0x220D +#define SUNI1x10GEXP_REG_IFLX_INDIR_LOGICAL_FIFO_LOW_LIMIT_PROVISION 0x220E +#define SUNI1x10GEXP_REG_IFLX_INDIR_LOGICAL_FIFO_HIGH_LIMIT 0x220F +#define SUNI1x10GEXP_REG_IFLX_INDIR_FULL_ALMOST_FULL_STATUS_LIMIT 0x2210 +#define SUNI1x10GEXP_REG_IFLX_INDIR_EMPTY_ALMOST_EMPTY_STATUS_LIMIT 0x2211 + +#define SUNI1x10GEXP_REG_PL4MOS_CONFIG 0x2240 +#define SUNI1x10GEXP_REG_PL4MOS_MASK 0x2241 +#define SUNI1x10GEXP_REG_PL4MOS_FAIRNESS_MASKING 0x2242 +#define SUNI1x10GEXP_REG_PL4MOS_MAXBURST1 0x2243 +#define SUNI1x10GEXP_REG_PL4MOS_MAXBURST2 0x2244 +#define SUNI1x10GEXP_REG_PL4MOS_TRANSFER_SIZE 0x2245 + +#define SUNI1x10GEXP_REG_PL4ODP_CONFIG 0x2280 +#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK 0x2282 +#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT 0x2283 +#define SUNI1x10GEXP_REG_PL4ODP_CONFIG_MAX_T 0x2284 + +#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS 0x2300 +#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE 0x2301 +#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK 0x2302 +#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_LIMITS 0x2303 +#define SUNI1x10GEXP_REG_PL4IO_CALENDAR_REPETITIONS 0x2304 +#define SUNI1x10GEXP_REG_PL4IO_CONFIG 0x2305 + +#define SUNI1x10GEXP_REG_TXXG_CONFIG_1 0x3040 +#define SUNI1x10GEXP_REG_TXXG_CONFIG_2 0x3041 +#define SUNI1x10GEXP_REG_TXXG_CONFIG_3 0x3042 +#define SUNI1x10GEXP_REG_TXXG_INTERRUPT 0x3043 +#define SUNI1x10GEXP_REG_TXXG_STATUS 0x3044 +#define SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE 0x3045 +#define SUNI1x10GEXP_REG_TXXG_MIN_FRAME_SIZE 0x3046 +#define SUNI1x10GEXP_REG_TXXG_SA_15_0 0x3047 +#define SUNI1x10GEXP_REG_TXXG_SA_31_16 0x3048 +#define SUNI1x10GEXP_REG_TXXG_SA_47_32 0x3049 +#define SUNI1x10GEXP_REG_TXXG_PAUSE_TIMER 0x304D +#define SUNI1x10GEXP_REG_TXXG_PAUSE_TIMER_INTERVAL 0x304E +#define SUNI1x10GEXP_REG_TXXG_FILTER_ERROR_COUNTER 0x3051 +#define SUNI1x10GEXP_REG_TXXG_PAUSE_QUANTUM_CONFIG 0x3052 + +#define SUNI1x10GEXP_REG_XTEF_CTRL 0x3080 +#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS 0x3084 +#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE 0x3085 +#define SUNI1x10GEXP_REG_XTEF_VISIBILITY 0x3086 + +#define SUNI1x10GEXP_REG_TXOAM_OAM_CONFIG 0x30C0 +#define SUNI1x10GEXP_REG_TXOAM_MINI_RATE_CONFIG 0x30C1 +#define SUNI1x10GEXP_REG_TXOAM_MINI_GAP_FIFO_CONFIG 0x30C2 +#define SUNI1x10GEXP_REG_TXOAM_P1P2_STATIC_VALUES 0x30C3 +#define SUNI1x10GEXP_REG_TXOAM_P3P4_STATIC_VALUES 0x30C4 +#define SUNI1x10GEXP_REG_TXOAM_P5P6_STATIC_VALUES 0x30C5 +#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE 0x30C6 +#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS 0x30C7 +#define SUNI1x10GEXP_REG_TXOAM_INSERT_COUNT_LSB 0x30C8 +#define SUNI1x10GEXP_REG_TXOAM_INSERT_COUNT_MSB 0x30C9 +#define SUNI1x10GEXP_REG_TXOAM_OAM_MINI_COUNT_LSB 0x30CA +#define SUNI1x10GEXP_REG_TXOAM_OAM_MINI_COUNT_MSB 0x30CB +#define SUNI1x10GEXP_REG_TXOAM_P1P2_MINI_MASK 0x30CC +#define SUNI1x10GEXP_REG_TXOAM_P3P4_MINI_MASK 0x30CD +#define SUNI1x10GEXP_REG_TXOAM_P5P6_MINI_MASK 0x30CE +#define SUNI1x10GEXP_REG_TXOAM_COSET 0x30CF +#define SUNI1x10GEXP_REG_TXOAM_EMPTY_FIFO_INS_OP_CNT_LSB 0x30D0 +#define SUNI1x10GEXP_REG_TXOAM_EMPTY_FIFO_INS_OP_CNT_MSB 0x30D1 +#define SUNI1x10GEXP_REG_TXOAM_STATIC_VALUE_MINI_COUNT_LSB 0x30D2 +#define SUNI1x10GEXP_REG_TXOAM_STATIC_VALUE_MINI_COUNT_MSB 0x30D3 + + +#define SUNI1x10GEXP_REG_EFLX_GLOBAL_CONFIG 0x3200 +#define SUNI1x10GEXP_REG_EFLX_ERCU_GLOBAL_STATUS 0x3201 +#define SUNI1x10GEXP_REG_EFLX_INDIR_CHANNEL_ADDRESS 0x3202 +#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_LOW_LIMIT 0x3203 +#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_HIGH_LIMIT 0x3204 +#define SUNI1x10GEXP_REG_EFLX_INDIR_FULL_ALMOST_FULL_STATUS_AND_LIMIT 0x3205 +#define SUNI1x10GEXP_REG_EFLX_INDIR_EMPTY_ALMOST_EMPTY_STATUS_AND_LIMIT 0x3206 +#define SUNI1x10GEXP_REG_EFLX_INDIR_FIFO_CUT_THROUGH_THRESHOLD 0x3207 +#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE 0x320C +#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION 0x320D +#define SUNI1x10GEXP_REG_EFLX_CHANNEL_PROVISION 0x3210 + +#define SUNI1x10GEXP_REG_PL4IDU_CONFIG 0x3280 +#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK 0x3282 +#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT 0x3283 + + +/*----------------------------------------*/ +#define SUNI1x10GEXP_REG_MAX_OFFSET 0x3480 + +/******************************************************************************/ +/* -- End register offset definitions -- */ +/******************************************************************************/ + +/******************************************************************************/ +/** SUNI-1x10GE-XP REGISTER BIT MASKS **/ +/******************************************************************************/ + +#define SUNI1x10GEXP_BITMSK_BITS_1 0x00001 +#define SUNI1x10GEXP_BITMSK_BITS_2 0x00003 +#define SUNI1x10GEXP_BITMSK_BITS_3 0x00007 +#define SUNI1x10GEXP_BITMSK_BITS_4 0x0000f +#define SUNI1x10GEXP_BITMSK_BITS_5 0x0001f +#define SUNI1x10GEXP_BITMSK_BITS_6 0x0003f +#define SUNI1x10GEXP_BITMSK_BITS_7 0x0007f +#define SUNI1x10GEXP_BITMSK_BITS_8 0x000ff +#define SUNI1x10GEXP_BITMSK_BITS_9 0x001ff +#define SUNI1x10GEXP_BITMSK_BITS_10 0x003ff +#define SUNI1x10GEXP_BITMSK_BITS_11 0x007ff +#define SUNI1x10GEXP_BITMSK_BITS_12 0x00fff +#define SUNI1x10GEXP_BITMSK_BITS_13 0x01fff +#define SUNI1x10GEXP_BITMSK_BITS_14 0x03fff +#define SUNI1x10GEXP_BITMSK_BITS_15 0x07fff +#define SUNI1x10GEXP_BITMSK_BITS_16 0x0ffff + +#define mSUNI1x10GEXP_CLR_MSBITS_1(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_15) +#define mSUNI1x10GEXP_CLR_MSBITS_2(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_14) +#define mSUNI1x10GEXP_CLR_MSBITS_3(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_13) +#define mSUNI1x10GEXP_CLR_MSBITS_4(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_12) +#define mSUNI1x10GEXP_CLR_MSBITS_5(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_11) +#define mSUNI1x10GEXP_CLR_MSBITS_6(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_10) +#define mSUNI1x10GEXP_CLR_MSBITS_7(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_9) +#define mSUNI1x10GEXP_CLR_MSBITS_8(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_8) +#define mSUNI1x10GEXP_CLR_MSBITS_9(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_7) +#define mSUNI1x10GEXP_CLR_MSBITS_10(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_6) +#define mSUNI1x10GEXP_CLR_MSBITS_11(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_5) +#define mSUNI1x10GEXP_CLR_MSBITS_12(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_4) +#define mSUNI1x10GEXP_CLR_MSBITS_13(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_3) +#define mSUNI1x10GEXP_CLR_MSBITS_14(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_2) +#define mSUNI1x10GEXP_CLR_MSBITS_15(v) ((v) & SUNI1x10GEXP_BITMSK_BITS_1) + +#define mSUNI1x10GEXP_GET_BIT(val, bitMsk) (((val)&(bitMsk)) ? 1:0) + + + +/*---------------------------------------------------------------------------- + * Register 0x0001: S/UNI-1x10GE-XP Product Revision + * Bit 3-0 REVISION + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_REVISION 0x000F + +/*---------------------------------------------------------------------------- + * Register 0x0002: S/UNI-1x10GE-XP Configuration and Reset Control + * Bit 2 XAUI_ARESETB + * Bit 1 PL4_ARESETB + * Bit 0 DRESETB + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_XAUI_ARESET 0x0004 +#define SUNI1x10GEXP_BITMSK_PL4_ARESET 0x0002 +#define SUNI1x10GEXP_BITMSK_DRESETB 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x0003: S/UNI-1x10GE-XP Loop Back and Miscellaneous Control + * Bit 11 PL4IO_OUTCLKSEL + * Bit 9 SYSPCSLB + * Bit 8 LINEPCSLB + * Bit 7 MSTAT_BYPASS + * Bit 6 RXXG_BYPASS + * Bit 5 TXXG_BYPASS + * Bit 4 SOP_PAD_EN + * Bit 1 LOS_INV + * Bit 0 OVERRIDE_LOS + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4IO_OUTCLKSEL 0x0800 +#define SUNI1x10GEXP_BITMSK_SYSPCSLB 0x0200 +#define SUNI1x10GEXP_BITMSK_LINEPCSLB 0x0100 +#define SUNI1x10GEXP_BITMSK_MSTAT_BYPASS 0x0080 +#define SUNI1x10GEXP_BITMSK_RXXG_BYPASS 0x0040 +#define SUNI1x10GEXP_BITMSK_TXXG_BYPASS 0x0020 +#define SUNI1x10GEXP_BITMSK_SOP_PAD_EN 0x0010 +#define SUNI1x10GEXP_BITMSK_LOS_INV 0x0002 +#define SUNI1x10GEXP_BITMSK_OVERRIDE_LOS 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x0004: S/UNI-1x10GE-XP Device Status + * Bit 9 TOP_SXRA_EXPIRED + * Bit 8 TOP_MDIO_BUSY + * Bit 7 TOP_DTRB + * Bit 6 TOP_EXPIRED + * Bit 5 TOP_PAUSED + * Bit 4 TOP_PL4_ID_DOOL + * Bit 3 TOP_PL4_IS_DOOL + * Bit 2 TOP_PL4_ID_ROOL + * Bit 1 TOP_PL4_IS_ROOL + * Bit 0 TOP_PL4_OUT_ROOL + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED 0x0200 +#define SUNI1x10GEXP_BITMSK_TOP_MDIO_BUSY 0x0100 +#define SUNI1x10GEXP_BITMSK_TOP_DTRB 0x0080 +#define SUNI1x10GEXP_BITMSK_TOP_EXPIRED 0x0040 +#define SUNI1x10GEXP_BITMSK_TOP_PAUSED 0x0020 +#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL 0x0010 +#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL 0x0008 +#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL 0x0004 +#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL 0x0002 +#define SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x0005: Global Performance Update and Clock Monitors + * Bit 15 TIP + * Bit 8 XAUI_REF_CLKA + * Bit 7 RXLANE3CLKA + * Bit 6 RXLANE2CLKA + * Bit 5 RXLANE1CLKA + * Bit 4 RXLANE0CLKA + * Bit 3 CSUCLKA + * Bit 2 TDCLKA + * Bit 1 RSCLKA + * Bit 0 RDCLKA + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TIP 0x8000 +#define SUNI1x10GEXP_BITMSK_XAUI_REF_CLKA 0x0100 +#define SUNI1x10GEXP_BITMSK_RXLANE3CLKA 0x0080 +#define SUNI1x10GEXP_BITMSK_RXLANE2CLKA 0x0040 +#define SUNI1x10GEXP_BITMSK_RXLANE1CLKA 0x0020 +#define SUNI1x10GEXP_BITMSK_RXLANE0CLKA 0x0010 +#define SUNI1x10GEXP_BITMSK_CSUCLKA 0x0008 +#define SUNI1x10GEXP_BITMSK_TDCLKA 0x0004 +#define SUNI1x10GEXP_BITMSK_RSCLKA 0x0002 +#define SUNI1x10GEXP_BITMSK_RDCLKA 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x0006: MDIO Command + * Bit 4 MDIO_RDINC + * Bit 3 MDIO_RSTAT + * Bit 2 MDIO_LCTLD + * Bit 1 MDIO_LCTLA + * Bit 0 MDIO_SPRE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_MDIO_RDINC 0x0010 +#define SUNI1x10GEXP_BITMSK_MDIO_RSTAT 0x0008 +#define SUNI1x10GEXP_BITMSK_MDIO_LCTLD 0x0004 +#define SUNI1x10GEXP_BITMSK_MDIO_LCTLA 0x0002 +#define SUNI1x10GEXP_BITMSK_MDIO_SPRE 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x0007: MDIO Interrupt Enable + * Bit 0 MDIO_BUSY_EN + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_MDIO_BUSY_EN 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x0008: MDIO Interrupt Status + * Bit 0 MDIO_BUSYI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_MDIO_BUSYI 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x0009: MMD PHY Address + * Bit 12-8 MDIO_DEVADR + * Bit 4-0 MDIO_PRTADR + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_MDIO_DEVADR 0x1F00 +#define SUNI1x10GEXP_BITOFF_MDIO_DEVADR 8 +#define SUNI1x10GEXP_BITMSK_MDIO_PRTADR 0x001F +#define SUNI1x10GEXP_BITOFF_MDIO_PRTADR 0 + +/*---------------------------------------------------------------------------- + * Register 0x000C: OAM Interface Control + * Bit 6 MDO_OD_ENB + * Bit 5 MDI_INV + * Bit 4 MDI_SEL + * Bit 3 RXOAMEN + * Bit 2 RXOAMCLKEN + * Bit 1 TXOAMEN + * Bit 0 TXOAMCLKEN + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_MDO_OD_ENB 0x0040 +#define SUNI1x10GEXP_BITMSK_MDI_INV 0x0020 +#define SUNI1x10GEXP_BITMSK_MDI_SEL 0x0010 +#define SUNI1x10GEXP_BITMSK_RXOAMEN 0x0008 +#define SUNI1x10GEXP_BITMSK_RXOAMCLKEN 0x0004 +#define SUNI1x10GEXP_BITMSK_TXOAMEN 0x0002 +#define SUNI1x10GEXP_BITMSK_TXOAMCLKEN 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x000D: S/UNI-1x10GE-XP Master Interrupt Status + * Bit 15 TOP_PL4IO_INT + * Bit 14 TOP_IRAM_INT + * Bit 13 TOP_ERAM_INT + * Bit 12 TOP_XAUI_INT + * Bit 11 TOP_MSTAT_INT + * Bit 10 TOP_RXXG_INT + * Bit 9 TOP_TXXG_INT + * Bit 8 TOP_XRF_INT + * Bit 7 TOP_XTEF_INT + * Bit 6 TOP_MDIO_BUSY_INT + * Bit 5 TOP_RXOAM_INT + * Bit 4 TOP_TXOAM_INT + * Bit 3 TOP_IFLX_INT + * Bit 2 TOP_EFLX_INT + * Bit 1 TOP_PL4ODP_INT + * Bit 0 TOP_PL4IDU_INT + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TOP_PL4IO_INT 0x8000 +#define SUNI1x10GEXP_BITMSK_TOP_IRAM_INT 0x4000 +#define SUNI1x10GEXP_BITMSK_TOP_ERAM_INT 0x2000 +#define SUNI1x10GEXP_BITMSK_TOP_XAUI_INT 0x1000 +#define SUNI1x10GEXP_BITMSK_TOP_MSTAT_INT 0x0800 +#define SUNI1x10GEXP_BITMSK_TOP_RXXG_INT 0x0400 +#define SUNI1x10GEXP_BITMSK_TOP_TXXG_INT 0x0200 +#define SUNI1x10GEXP_BITMSK_TOP_XRF_INT 0x0100 +#define SUNI1x10GEXP_BITMSK_TOP_XTEF_INT 0x0080 +#define SUNI1x10GEXP_BITMSK_TOP_MDIO_BUSY_INT 0x0040 +#define SUNI1x10GEXP_BITMSK_TOP_RXOAM_INT 0x0020 +#define SUNI1x10GEXP_BITMSK_TOP_TXOAM_INT 0x0010 +#define SUNI1x10GEXP_BITMSK_TOP_IFLX_INT 0x0008 +#define SUNI1x10GEXP_BITMSK_TOP_EFLX_INT 0x0004 +#define SUNI1x10GEXP_BITMSK_TOP_PL4ODP_INT 0x0002 +#define SUNI1x10GEXP_BITMSK_TOP_PL4IDU_INT 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x000E:PM3393 Global interrupt enable + * Bit 15 TOP_INTE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TOP_INTE 0x8000 + +/*---------------------------------------------------------------------------- + * Register 0x0010: XTEF Miscellaneous Control + * Bit 7 RF_VAL + * Bit 6 RF_OVERRIDE + * Bit 5 LF_VAL + * Bit 4 LF_OVERRIDE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RF_VAL 0x0080 +#define SUNI1x10GEXP_BITMSK_RF_OVERRIDE 0x0040 +#define SUNI1x10GEXP_BITMSK_LF_VAL 0x0020 +#define SUNI1x10GEXP_BITMSK_LF_OVERRIDE 0x0010 +#define SUNI1x10GEXP_BITMSK_LFRF_OVERRIDE_VAL 0x00F0 + +/*---------------------------------------------------------------------------- + * Register 0x0011: XRF Miscellaneous Control + * Bit 6-4 EN_IDLE_REP + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EN_IDLE_REP 0x0070 + +/*---------------------------------------------------------------------------- + * Register 0x0100: SERDES 3125 Configuration Register 1 + * Bit 10 RXEQB_3 + * Bit 8 RXEQB_2 + * Bit 6 RXEQB_1 + * Bit 4 RXEQB_0 + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXEQB 0x0FF0 +#define SUNI1x10GEXP_BITOFF_RXEQB_3 10 +#define SUNI1x10GEXP_BITOFF_RXEQB_2 8 +#define SUNI1x10GEXP_BITOFF_RXEQB_1 6 +#define SUNI1x10GEXP_BITOFF_RXEQB_0 4 + +/*---------------------------------------------------------------------------- + * Register 0x0101: SERDES 3125 Configuration Register 2 + * Bit 12 YSEL + * Bit 7 PRE_EMPH_3 + * Bit 6 PRE_EMPH_2 + * Bit 5 PRE_EMPH_1 + * Bit 4 PRE_EMPH_0 + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_YSEL 0x1000 +#define SUNI1x10GEXP_BITMSK_PRE_EMPH 0x00F0 +#define SUNI1x10GEXP_BITMSK_PRE_EMPH_3 0x0080 +#define SUNI1x10GEXP_BITMSK_PRE_EMPH_2 0x0040 +#define SUNI1x10GEXP_BITMSK_PRE_EMPH_1 0x0020 +#define SUNI1x10GEXP_BITMSK_PRE_EMPH_0 0x0010 + +/*---------------------------------------------------------------------------- + * Register 0x0102: SERDES 3125 Interrupt Enable Register + * Bit 3 LASIE + * Bit 2 SPLL_RAE + * Bit 1 MPLL_RAE + * Bit 0 PLL_LOCKE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_LASIE 0x0008 +#define SUNI1x10GEXP_BITMSK_SPLL_RAE 0x0004 +#define SUNI1x10GEXP_BITMSK_MPLL_RAE 0x0002 +#define SUNI1x10GEXP_BITMSK_PLL_LOCKE 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x0103: SERDES 3125 Interrupt Visibility Register + * Bit 3 LASIV + * Bit 2 SPLL_RAV + * Bit 1 MPLL_RAV + * Bit 0 PLL_LOCKV + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_LASIV 0x0008 +#define SUNI1x10GEXP_BITMSK_SPLL_RAV 0x0004 +#define SUNI1x10GEXP_BITMSK_MPLL_RAV 0x0002 +#define SUNI1x10GEXP_BITMSK_PLL_LOCKV 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x0104: SERDES 3125 Interrupt Status Register + * Bit 3 LASII + * Bit 2 SPLL_RAI + * Bit 1 MPLL_RAI + * Bit 0 PLL_LOCKI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_LASII 0x0008 +#define SUNI1x10GEXP_BITMSK_SPLL_RAI 0x0004 +#define SUNI1x10GEXP_BITMSK_MPLL_RAI 0x0002 +#define SUNI1x10GEXP_BITMSK_PLL_LOCKI 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x0107: SERDES 3125 Test Configuration + * Bit 12 DUALTX + * Bit 10 HC_1 + * Bit 9 HC_0 + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_DUALTX 0x1000 +#define SUNI1x10GEXP_BITMSK_HC 0x0600 +#define SUNI1x10GEXP_BITOFF_HC_0 9 + +/*---------------------------------------------------------------------------- + * Register 0x2040: RXXG Configuration 1 + * Bit 15 RXXG_RXEN + * Bit 14 RXXG_ROCF + * Bit 13 RXXG_PAD_STRIP + * Bit 10 RXXG_PUREP + * Bit 9 RXXG_LONGP + * Bit 8 RXXG_PARF + * Bit 7 RXXG_FLCHK + * Bit 5 RXXG_PASS_CTRL + * Bit 3 RXXG_CRC_STRIP + * Bit 2-0 RXXG_MIFG + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXXG_RXEN 0x8000 +#define SUNI1x10GEXP_BITMSK_RXXG_ROCF 0x4000 +#define SUNI1x10GEXP_BITMSK_RXXG_PAD_STRIP 0x2000 +#define SUNI1x10GEXP_BITMSK_RXXG_PUREP 0x0400 +#define SUNI1x10GEXP_BITMSK_RXXG_LONGP 0x0200 +#define SUNI1x10GEXP_BITMSK_RXXG_PARF 0x0100 +#define SUNI1x10GEXP_BITMSK_RXXG_FLCHK 0x0080 +#define SUNI1x10GEXP_BITMSK_RXXG_PASS_CTRL 0x0020 +#define SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP 0x0008 + +/*---------------------------------------------------------------------------- + * Register 0x02041: RXXG Configuration 2 + * Bit 7-0 RXXG_HDRSIZE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXXG_HDRSIZE 0x00FF + +/*---------------------------------------------------------------------------- + * Register 0x2042: RXXG Configuration 3 + * Bit 15 RXXG_MIN_LERRE + * Bit 14 RXXG_MAX_LERRE + * Bit 12 RXXG_LINE_ERRE + * Bit 10 RXXG_RX_OVRE + * Bit 9 RXXG_ADR_FILTERE + * Bit 8 RXXG_ERR_FILTERE + * Bit 5 RXXG_PRMB_ERRE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXXG_MIN_LERRE 0x8000 +#define SUNI1x10GEXP_BITMSK_RXXG_MAX_LERRE 0x4000 +#define SUNI1x10GEXP_BITMSK_RXXG_LINE_ERRE 0x1000 +#define SUNI1x10GEXP_BITMSK_RXXG_RX_OVRE 0x0400 +#define SUNI1x10GEXP_BITMSK_RXXG_ADR_FILTERE 0x0200 +#define SUNI1x10GEXP_BITMSK_RXXG_ERR_FILTERRE 0x0100 +#define SUNI1x10GEXP_BITMSK_RXXG_PRMB_ERRE 0x0020 + +/*---------------------------------------------------------------------------- + * Register 0x2043: RXXG Interrupt + * Bit 15 RXXG_MIN_LERRI + * Bit 14 RXXG_MAX_LERRI + * Bit 12 RXXG_LINE_ERRI + * Bit 10 RXXG_RX_OVRI + * Bit 9 RXXG_ADR_FILTERI + * Bit 8 RXXG_ERR_FILTERI + * Bit 5 RXXG_PRMB_ERRE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXXG_MIN_LERRI 0x8000 +#define SUNI1x10GEXP_BITMSK_RXXG_MAX_LERRI 0x4000 +#define SUNI1x10GEXP_BITMSK_RXXG_LINE_ERRI 0x1000 +#define SUNI1x10GEXP_BITMSK_RXXG_RX_OVRI 0x0400 +#define SUNI1x10GEXP_BITMSK_RXXG_ADR_FILTERI 0x0200 +#define SUNI1x10GEXP_BITMSK_RXXG_ERR_FILTERI 0x0100 +#define SUNI1x10GEXP_BITMSK_RXXG_PRMB_ERRE 0x0020 + +/*---------------------------------------------------------------------------- + * Register 0x2049: RXXG Receive FIFO Threshold + * Bit 2-0 RXXG_CUT_THRU + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXXG_CUT_THRU 0x0007 +#define SUNI1x10GEXP_BITOFF_RXXG_CUT_THRU 0 + +/*---------------------------------------------------------------------------- + * Register 0x2062H - 0x2069: RXXG Exact Match VID + * Bit 11-0 RXXG_VID_MATCH + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXXG_VID_MATCH 0x0FFF +#define SUNI1x10GEXP_BITOFF_RXXG_VID_MATCH 0 + +/*---------------------------------------------------------------------------- + * Register 0x206EH - 0x206F: RXXG Address Filter Control + * Bit 3 RXXG_FORWARD_ENABLE + * Bit 2 RXXG_VLAN_ENABLE + * Bit 1 RXXG_SRC_ADDR + * Bit 0 RXXG_MATCH_ENABLE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXXG_FORWARD_ENABLE 0x0008 +#define SUNI1x10GEXP_BITMSK_RXXG_VLAN_ENABLE 0x0004 +#define SUNI1x10GEXP_BITMSK_RXXG_SRC_ADDR 0x0002 +#define SUNI1x10GEXP_BITMSK_RXXG_MATCH_ENABLE 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x2070: RXXG Address Filter Control 2 + * Bit 1 RXXG_PMODE + * Bit 0 RXXG_MHASH_EN + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXXG_PMODE 0x0002 +#define SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x2081: XRF Control Register 2 + * Bit 6 EN_PKT_GEN + * Bit 4-2 PATT + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EN_PKT_GEN 0x0040 +#define SUNI1x10GEXP_BITMSK_PATT 0x001C +#define SUNI1x10GEXP_BITOFF_PATT 2 + +/*---------------------------------------------------------------------------- + * Register 0x2088: XRF Interrupt Enable + * Bit 12-9 LANE_HICERE + * Bit 8-5 HS_SD_LANEE + * Bit 4 ALIGN_STATUS_ERRE + * Bit 3-0 LANE_SYNC_STAT_ERRE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_LANE_HICERE 0x1E00 +#define SUNI1x10GEXP_BITOFF_LANE_HICERE 9 +#define SUNI1x10GEXP_BITMSK_HS_SD_LANEE 0x01E0 +#define SUNI1x10GEXP_BITOFF_HS_SD_LANEE 5 +#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERRE 0x0010 +#define SUNI1x10GEXP_BITMSK_LANE_SYNC_STAT_ERRE 0x000F +#define SUNI1x10GEXP_BITOFF_LANE_SYNC_STAT_ERRE 0 + +/*---------------------------------------------------------------------------- + * Register 0x2089: XRF Interrupt Status + * Bit 12-9 LANE_HICERI + * Bit 8-5 HS_SD_LANEI + * Bit 4 ALIGN_STATUS_ERRI + * Bit 3-0 LANE_SYNC_STAT_ERRI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_LANE_HICERI 0x1E00 +#define SUNI1x10GEXP_BITOFF_LANE_HICERI 9 +#define SUNI1x10GEXP_BITMSK_HS_SD_LANEI 0x01E0 +#define SUNI1x10GEXP_BITOFF_HS_SD_LANEI 5 +#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERRI 0x0010 +#define SUNI1x10GEXP_BITMSK_LANE_SYNC_STAT_ERRI 0x000F +#define SUNI1x10GEXP_BITOFF_LANE_SYNC_STAT_ERRI 0 + +/*---------------------------------------------------------------------------- + * Register 0x208A: XRF Error Status + * Bit 8-5 HS_SD_LANE + * Bit 4 ALIGN_STATUS_ERR + * Bit 3-0 LANE_SYNC_STAT_ERR + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_HS_SD_LANE3 0x0100 +#define SUNI1x10GEXP_BITMSK_HS_SD_LANE2 0x0080 +#define SUNI1x10GEXP_BITMSK_HS_SD_LANE1 0x0040 +#define SUNI1x10GEXP_BITMSK_HS_SD_LANE0 0x0020 +#define SUNI1x10GEXP_BITMSK_ALIGN_STATUS_ERR 0x0010 +#define SUNI1x10GEXP_BITMSK_LANE3_SYNC_STAT_ERR 0x0008 +#define SUNI1x10GEXP_BITMSK_LANE2_SYNC_STAT_ERR 0x0004 +#define SUNI1x10GEXP_BITMSK_LANE1_SYNC_STAT_ERR 0x0002 +#define SUNI1x10GEXP_BITMSK_LANE0_SYNC_STAT_ERR 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x208B: XRF Diagnostic Interrupt Enable + * Bit 7-4 LANE_OVERRUNE + * Bit 3-0 LANE_UNDERRUNE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_LANE_OVERRUNE 0x00F0 +#define SUNI1x10GEXP_BITOFF_LANE_OVERRUNE 4 +#define SUNI1x10GEXP_BITMSK_LANE_UNDERRUNE 0x000F +#define SUNI1x10GEXP_BITOFF_LANE_UNDERRUNE 0 + +/*---------------------------------------------------------------------------- + * Register 0x208C: XRF Diagnostic Interrupt Status + * Bit 7-4 LANE_OVERRUNI + * Bit 3-0 LANE_UNDERRUNI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_LANE_OVERRUNI 0x00F0 +#define SUNI1x10GEXP_BITOFF_LANE_OVERRUNI 4 +#define SUNI1x10GEXP_BITMSK_LANE_UNDERRUNI 0x000F +#define SUNI1x10GEXP_BITOFF_LANE_UNDERRUNI 0 + +/*---------------------------------------------------------------------------- + * Register 0x20C0: RXOAM Configuration + * Bit 15 RXOAM_BUSY + * Bit 14-12 RXOAM_F2_SEL + * Bit 10-8 RXOAM_F1_SEL + * Bit 7-6 RXOAM_FILTER_CTRL + * Bit 5-0 RXOAM_PX_EN + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXOAM_BUSY 0x8000 +#define SUNI1x10GEXP_BITMSK_RXOAM_F2_SEL 0x7000 +#define SUNI1x10GEXP_BITOFF_RXOAM_F2_SEL 12 +#define SUNI1x10GEXP_BITMSK_RXOAM_F1_SEL 0x0700 +#define SUNI1x10GEXP_BITOFF_RXOAM_F1_SEL 8 +#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_CTRL 0x00C0 +#define SUNI1x10GEXP_BITOFF_RXOAM_FILTER_CTRL 6 +#define SUNI1x10GEXP_BITMSK_RXOAM_PX_EN 0x003F +#define SUNI1x10GEXP_BITOFF_RXOAM_PX_EN 0 + +/*---------------------------------------------------------------------------- + * Register 0x20C1,0x20C2: RXOAM Filter Configuration + * Bit 15-8 RXOAM_FX_MASK + * Bit 7-0 RXOAM_FX_VAL + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXOAM_FX_MASK 0xFF00 +#define SUNI1x10GEXP_BITOFF_RXOAM_FX_MASK 8 +#define SUNI1x10GEXP_BITMSK_RXOAM_FX_VAL 0x00FF +#define SUNI1x10GEXP_BITOFF_RXOAM_FX_VAl 0 + +/*---------------------------------------------------------------------------- + * Register 0x20C3: RXOAM Configuration Register 2 + * Bit 13 RXOAM_REC_BYTE_VAL + * Bit 11-10 RXOAM_BYPASS_MODE + * Bit 5-0 RXOAM_PX_CLEAR + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXOAM_REC_BYTE_VAL 0x2000 +#define SUNI1x10GEXP_BITMSK_RXOAM_BYPASS_MODE 0x0C00 +#define SUNI1x10GEXP_BITOFF_RXOAM_BYPASS_MODE 10 +#define SUNI1x10GEXP_BITMSK_RXOAM_PX_CLEAR 0x003F +#define SUNI1x10GEXP_BITOFF_RXOAM_PX_CLEAR 0 + +/*---------------------------------------------------------------------------- + * Register 0x20C4: RXOAM HEC Configuration + * Bit 15-8 RXOAM_COSET + * Bit 2 RXOAM_HEC_ERR_PKT + * Bit 0 RXOAM_HEC_EN + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXOAM_COSET 0xFF00 +#define SUNI1x10GEXP_BITOFF_RXOAM_COSET 8 +#define SUNI1x10GEXP_BITMSK_RXOAM_HEC_ERR_PKT 0x0004 +#define SUNI1x10GEXP_BITMSK_RXOAM_HEC_EN 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x20C7: RXOAM Interrupt Enable + * Bit 10 RXOAM_FILTER_THRSHE + * Bit 9 RXOAM_OAM_ERRE + * Bit 8 RXOAM_HECE_THRSHE + * Bit 7 RXOAM_SOPE + * Bit 6 RXOAM_RFE + * Bit 5 RXOAM_LFE + * Bit 4 RXOAM_DV_ERRE + * Bit 3 RXOAM_DATA_INVALIDE + * Bit 2 RXOAM_FILTER_DROPE + * Bit 1 RXOAM_HECE + * Bit 0 RXOAM_OFLE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHE 0x0400 +#define SUNI1x10GEXP_BITMSK_RXOAM_OAM_ERRE 0x0200 +#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHE 0x0100 +#define SUNI1x10GEXP_BITMSK_RXOAM_SOPE 0x0080 +#define SUNI1x10GEXP_BITMSK_RXOAM_RFE 0x0040 +#define SUNI1x10GEXP_BITMSK_RXOAM_LFE 0x0020 +#define SUNI1x10GEXP_BITMSK_RXOAM_DV_ERRE 0x0010 +#define SUNI1x10GEXP_BITMSK_RXOAM_DATA_INVALIDE 0x0008 +#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_DROPE 0x0004 +#define SUNI1x10GEXP_BITMSK_RXOAM_HECE 0x0002 +#define SUNI1x10GEXP_BITMSK_RXOAM_OFLE 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x20C8: RXOAM Interrupt Status + * Bit 10 RXOAM_FILTER_THRSHI + * Bit 9 RXOAM_OAM_ERRI + * Bit 8 RXOAM_HECE_THRSHI + * Bit 7 RXOAM_SOPI + * Bit 6 RXOAM_RFI + * Bit 5 RXOAM_LFI + * Bit 4 RXOAM_DV_ERRI + * Bit 3 RXOAM_DATA_INVALIDI + * Bit 2 RXOAM_FILTER_DROPI + * Bit 1 RXOAM_HECI + * Bit 0 RXOAM_OFLI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHI 0x0400 +#define SUNI1x10GEXP_BITMSK_RXOAM_OAM_ERRI 0x0200 +#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHI 0x0100 +#define SUNI1x10GEXP_BITMSK_RXOAM_SOPI 0x0080 +#define SUNI1x10GEXP_BITMSK_RXOAM_RFI 0x0040 +#define SUNI1x10GEXP_BITMSK_RXOAM_LFI 0x0020 +#define SUNI1x10GEXP_BITMSK_RXOAM_DV_ERRI 0x0010 +#define SUNI1x10GEXP_BITMSK_RXOAM_DATA_INVALIDI 0x0008 +#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_DROPI 0x0004 +#define SUNI1x10GEXP_BITMSK_RXOAM_HECI 0x0002 +#define SUNI1x10GEXP_BITMSK_RXOAM_OFLI 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x20C9: RXOAM Status + * Bit 10 RXOAM_FILTER_THRSHV + * Bit 8 RXOAM_HECE_THRSHV + * Bit 6 RXOAM_RFV + * Bit 5 RXOAM_LFV + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_RXOAM_FILTER_THRSHV 0x0400 +#define SUNI1x10GEXP_BITMSK_RXOAM_HECE_THRSHV 0x0100 +#define SUNI1x10GEXP_BITMSK_RXOAM_RFV 0x0040 +#define SUNI1x10GEXP_BITMSK_RXOAM_LFV 0x0020 + +/*---------------------------------------------------------------------------- + * Register 0x2100: MSTAT Control + * Bit 2 MSTAT_WRITE + * Bit 1 MSTAT_CLEAR + * Bit 0 MSTAT_SNAP + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_MSTAT_WRITE 0x0004 +#define SUNI1x10GEXP_BITMSK_MSTAT_CLEAR 0x0002 +#define SUNI1x10GEXP_BITMSK_MSTAT_SNAP 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x2109: MSTAT Counter Write Address + * Bit 5-0 MSTAT_WRITE_ADDRESS + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_MSTAT_WRITE_ADDRESS 0x003F +#define SUNI1x10GEXP_BITOFF_MSTAT_WRITE_ADDRESS 0 + +/*---------------------------------------------------------------------------- + * Register 0x2200: IFLX Global Configuration Register + * Bit 15 IFLX_IRCU_ENABLE + * Bit 14 IFLX_IDSWT_ENABLE + * Bit 13-0 IFLX_IFD_CNT + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_IFLX_IRCU_ENABLE 0x8000 +#define SUNI1x10GEXP_BITMSK_IFLX_IDSWT_ENABLE 0x4000 +#define SUNI1x10GEXP_BITMSK_IFLX_IFD_CNT 0x3FFF +#define SUNI1x10GEXP_BITOFF_IFLX_IFD_CNT 0 + +/*---------------------------------------------------------------------------- + * Register 0x2209: IFLX FIFO Overflow Enable + * Bit 0 IFLX_OVFE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_IFLX_OVFE 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x220A: IFLX FIFO Overflow Interrupt + * Bit 0 IFLX_OVFI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_IFLX_OVFI 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x220D: IFLX Indirect Channel Address + * Bit 15 IFLX_BUSY + * Bit 14 IFLX_RWB + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_IFLX_BUSY 0x8000 +#define SUNI1x10GEXP_BITMSK_IFLX_RWB 0x4000 + +/*---------------------------------------------------------------------------- + * Register 0x220E: IFLX Indirect Logical FIFO Low Limit & Provision + * Bit 9-0 IFLX_LOLIM + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_IFLX_LOLIM 0x03FF +#define SUNI1x10GEXP_BITOFF_IFLX_LOLIM 0 + +/*---------------------------------------------------------------------------- + * Register 0x220F: IFLX Indirect Logical FIFO High Limit + * Bit 9-0 IFLX_HILIM + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_IFLX_HILIM 0x03FF +#define SUNI1x10GEXP_BITOFF_IFLX_HILIM 0 + +/*---------------------------------------------------------------------------- + * Register 0x2210: IFLX Indirect Full/Almost Full Status & Limit + * Bit 15 IFLX_FULL + * Bit 14 IFLX_AFULL + * Bit 13-0 IFLX_AFTH + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_IFLX_FULL 0x8000 +#define SUNI1x10GEXP_BITMSK_IFLX_AFULL 0x4000 +#define SUNI1x10GEXP_BITMSK_IFLX_AFTH 0x3FFF +#define SUNI1x10GEXP_BITOFF_IFLX_AFTH 0 + +/*---------------------------------------------------------------------------- + * Register 0x2211: IFLX Indirect Empty/Almost Empty Status & Limit + * Bit 15 IFLX_EMPTY + * Bit 14 IFLX_AEMPTY + * Bit 13-0 IFLX_AETH + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_IFLX_EMPTY 0x8000 +#define SUNI1x10GEXP_BITMSK_IFLX_AEMPTY 0x4000 +#define SUNI1x10GEXP_BITMSK_IFLX_AETH 0x3FFF +#define SUNI1x10GEXP_BITOFF_IFLX_AETH 0 + +/*---------------------------------------------------------------------------- + * Register 0x2240: PL4MOS Configuration Register + * Bit 3 PL4MOS_RE_INIT + * Bit 2 PL4MOS_EN + * Bit 1 PL4MOS_NO_STATUS + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4MOS_RE_INIT 0x0008 +#define SUNI1x10GEXP_BITMSK_PL4MOS_EN 0x0004 +#define SUNI1x10GEXP_BITMSK_PL4MOS_NO_STATUS 0x0002 + +/*---------------------------------------------------------------------------- + * Register 0x2243: PL4MOS MaxBurst1 Register + * Bit 11-0 PL4MOS_MAX_BURST1 + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_BURST1 0x0FFF +#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_BURST1 0 + +/*---------------------------------------------------------------------------- + * Register 0x2244: PL4MOS MaxBurst2 Register + * Bit 11-0 PL4MOS_MAX_BURST2 + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_BURST2 0x0FFF +#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_BURST2 0 + +/*---------------------------------------------------------------------------- + * Register 0x2245: PL4MOS Transfer Size Register + * Bit 7-0 PL4MOS_MAX_TRANSFER + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4MOS_MAX_TRANSFER 0x00FF +#define SUNI1x10GEXP_BITOFF_PL4MOS_MAX_TRANSFER 0 + +/*---------------------------------------------------------------------------- + * Register 0x2280: PL4ODP Configuration + * Bit 15-12 PL4ODP_REPEAT_T + * Bit 8 PL4ODP_SOP_RULE + * Bit 1 PL4ODP_EN_PORTS + * Bit 0 PL4ODP_EN_DFWD + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4ODP_REPEAT_T 0xF000 +#define SUNI1x10GEXP_BITOFF_PL4ODP_REPEAT_T 12 +#define SUNI1x10GEXP_BITMSK_PL4ODP_SOP_RULE 0x0100 +#define SUNI1x10GEXP_BITMSK_PL4ODP_EN_PORTS 0x0002 +#define SUNI1x10GEXP_BITMSK_PL4ODP_EN_DFWD 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x2282: PL4ODP Interrupt Mask + * Bit 0 PL4ODP_OUT_DISE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4ODP_OUT_DISE 0x0001 + + + +#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_EOPEOBE 0x0080 +#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_ERREOPE 0x0040 +#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MEOPE 0x0008 +#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MSOPE 0x0004 +#define SUNI1x10GEXP_BITMSK_PL4ODP_ES_OVRE 0x0002 + + +/*---------------------------------------------------------------------------- + * Register 0x2283: PL4ODP Interrupt + * Bit 0 PL4ODP_OUT_DISI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4ODP_OUT_DISI 0x0001 + + + +#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_EOPEOBI 0x0080 +#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_ERREOPI 0x0040 +#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MEOPI 0x0008 +#define SUNI1x10GEXP_BITMSK_PL4ODP_PPE_MSOPI 0x0004 +#define SUNI1x10GEXP_BITMSK_PL4ODP_ES_OVRI 0x0002 + +/*---------------------------------------------------------------------------- + * Register 0x2300: PL4IO Lock Detect Status + * Bit 15 PL4IO_OUT_ROOLV + * Bit 12 PL4IO_IS_ROOLV + * Bit 11 PL4IO_DIP2_ERRV + * Bit 8 PL4IO_ID_ROOLV + * Bit 4 PL4IO_IS_DOOLV + * Bit 0 PL4IO_ID_DOOLV + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLV 0x8000 +#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLV 0x1000 +#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRV 0x0800 +#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLV 0x0100 +#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLV 0x0010 +#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLV 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x2301: PL4IO Lock Detect Change + * Bit 15 PL4IO_OUT_ROOLI + * Bit 12 PL4IO_IS_ROOLI + * Bit 11 PL4IO_DIP2_ERRI + * Bit 8 PL4IO_ID_ROOLI + * Bit 4 PL4IO_IS_DOOLI + * Bit 0 PL4IO_ID_DOOLI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLI 0x8000 +#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLI 0x1000 +#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRI 0x0800 +#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLI 0x0100 +#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLI 0x0010 +#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLI 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x2302: PL4IO Lock Detect Mask + * Bit 15 PL4IO_OUT_ROOLE + * Bit 12 PL4IO_IS_ROOLE + * Bit 11 PL4IO_DIP2_ERRE + * Bit 8 PL4IO_ID_ROOLE + * Bit 4 PL4IO_IS_DOOLE + * Bit 0 PL4IO_ID_DOOLE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_ROOLE 0x8000 +#define SUNI1x10GEXP_BITMSK_PL4IO_IS_ROOLE 0x1000 +#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERRE 0x0800 +#define SUNI1x10GEXP_BITMSK_PL4IO_ID_ROOLE 0x0100 +#define SUNI1x10GEXP_BITMSK_PL4IO_IS_DOOLE 0x0010 +#define SUNI1x10GEXP_BITMSK_PL4IO_ID_DOOLE 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x2303: PL4IO Lock Detect Limits + * Bit 15-8 PL4IO_REF_LIMIT + * Bit 7-0 PL4IO_TRAN_LIMIT + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4IO_REF_LIMIT 0xFF00 +#define SUNI1x10GEXP_BITOFF_PL4IO_REF_LIMIT 8 +#define SUNI1x10GEXP_BITMSK_PL4IO_TRAN_LIMIT 0x00FF +#define SUNI1x10GEXP_BITOFF_PL4IO_TRAN_LIMIT 0 + +/*---------------------------------------------------------------------------- + * Register 0x2304: PL4IO Calendar Repetitions + * Bit 15-8 PL4IO_IN_MUL + * Bit 7-0 PL4IO_OUT_MUL + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4IO_IN_MUL 0xFF00 +#define SUNI1x10GEXP_BITOFF_PL4IO_IN_MUL 8 +#define SUNI1x10GEXP_BITMSK_PL4IO_OUT_MUL 0x00FF +#define SUNI1x10GEXP_BITOFF_PL4IO_OUT_MUL 0 + +/*---------------------------------------------------------------------------- + * Register 0x2305: PL4IO Configuration + * Bit 15 PL4IO_DIP2_ERR_CHK + * Bit 11 PL4IO_ODAT_DIS + * Bit 10 PL4IO_TRAIN_DIS + * Bit 9 PL4IO_OSTAT_DIS + * Bit 8 PL4IO_ISTAT_DIS + * Bit 7 PL4IO_NO_ISTAT + * Bit 6 PL4IO_STAT_OUTSEL + * Bit 5 PL4IO_INSEL + * Bit 4 PL4IO_DLSEL + * Bit 1-0 PL4IO_OUTSEL + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4IO_DIP2_ERR_CHK 0x8000 +#define SUNI1x10GEXP_BITMSK_PL4IO_ODAT_DIS 0x0800 +#define SUNI1x10GEXP_BITMSK_PL4IO_TRAIN_DIS 0x0400 +#define SUNI1x10GEXP_BITMSK_PL4IO_OSTAT_DIS 0x0200 +#define SUNI1x10GEXP_BITMSK_PL4IO_ISTAT_DIS 0x0100 +#define SUNI1x10GEXP_BITMSK_PL4IO_NO_ISTAT 0x0080 +#define SUNI1x10GEXP_BITMSK_PL4IO_STAT_OUTSEL 0x0040 +#define SUNI1x10GEXP_BITMSK_PL4IO_INSEL 0x0020 +#define SUNI1x10GEXP_BITMSK_PL4IO_DLSEL 0x0010 +#define SUNI1x10GEXP_BITMSK_PL4IO_OUTSEL 0x0003 +#define SUNI1x10GEXP_BITOFF_PL4IO_OUTSEL 0 + +/*---------------------------------------------------------------------------- + * Register 0x3040: TXXG Configuration Register 1 + * Bit 15 TXXG_TXEN0 + * Bit 13 TXXG_HOSTPAUSE + * Bit 12-7 TXXG_IPGT + * Bit 5 TXXG_32BIT_ALIGN + * Bit 4 TXXG_CRCEN + * Bit 3 TXXG_FCTX + * Bit 2 TXXG_FCRX + * Bit 1 TXXG_PADEN + * Bit 0 TXXG_SPRE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXXG_TXEN0 0x8000 +#define SUNI1x10GEXP_BITMSK_TXXG_HOSTPAUSE 0x2000 +#define SUNI1x10GEXP_BITMSK_TXXG_IPGT 0x1F80 +#define SUNI1x10GEXP_BITOFF_TXXG_IPGT 7 +#define SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN 0x0020 +#define SUNI1x10GEXP_BITMSK_TXXG_CRCEN 0x0010 +#define SUNI1x10GEXP_BITMSK_TXXG_FCTX 0x0008 +#define SUNI1x10GEXP_BITMSK_TXXG_FCRX 0x0004 +#define SUNI1x10GEXP_BITMSK_TXXG_PADEN 0x0002 +#define SUNI1x10GEXP_BITMSK_TXXG_SPRE 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x3041: TXXG Configuration Register 2 + * Bit 7-0 TXXG_HDRSIZE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXXG_HDRSIZE 0x00FF + +/*---------------------------------------------------------------------------- + * Register 0x3042: TXXG Configuration Register 3 + * Bit 15 TXXG_FIFO_ERRE + * Bit 14 TXXG_FIFO_UDRE + * Bit 13 TXXG_MAX_LERRE + * Bit 12 TXXG_MIN_LERRE + * Bit 11 TXXG_XFERE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_ERRE 0x8000 +#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_UDRE 0x4000 +#define SUNI1x10GEXP_BITMSK_TXXG_MAX_LERRE 0x2000 +#define SUNI1x10GEXP_BITMSK_TXXG_MIN_LERRE 0x1000 +#define SUNI1x10GEXP_BITMSK_TXXG_XFERE 0x0800 + +/*---------------------------------------------------------------------------- + * Register 0x3043: TXXG Interrupt + * Bit 15 TXXG_FIFO_ERRI + * Bit 14 TXXG_FIFO_UDRI + * Bit 13 TXXG_MAX_LERRI + * Bit 12 TXXG_MIN_LERRI + * Bit 11 TXXG_XFERI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_ERRI 0x8000 +#define SUNI1x10GEXP_BITMSK_TXXG_FIFO_UDRI 0x4000 +#define SUNI1x10GEXP_BITMSK_TXXG_MAX_LERRI 0x2000 +#define SUNI1x10GEXP_BITMSK_TXXG_MIN_LERRI 0x1000 +#define SUNI1x10GEXP_BITMSK_TXXG_XFERI 0x0800 + +/*---------------------------------------------------------------------------- + * Register 0x3044: TXXG Status Register + * Bit 1 TXXG_TXACTIVE + * Bit 0 TXXG_PAUSED + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXXG_TXACTIVE 0x0002 +#define SUNI1x10GEXP_BITMSK_TXXG_PAUSED 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x3046: TXXG TX_MINFR - Transmit Min Frame Size Register + * Bit 7-0 TXXG_TX_MINFR + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXXG_TX_MINFR 0x00FF +#define SUNI1x10GEXP_BITOFF_TXXG_TX_MINFR 0 + +/*---------------------------------------------------------------------------- + * Register 0x3052: TXXG Pause Quantum Value Configuration Register + * Bit 7-0 TXXG_FC_PAUSE_QNTM + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXXG_FC_PAUSE_QNTM 0x00FF +#define SUNI1x10GEXP_BITOFF_TXXG_FC_PAUSE_QNTM 0 + +/*---------------------------------------------------------------------------- + * Register 0x3080: XTEF Control + * Bit 3-0 XTEF_FORCE_PARITY_ERR + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_XTEF_FORCE_PARITY_ERR 0x000F +#define SUNI1x10GEXP_BITOFF_XTEF_FORCE_PARITY_ERR 0 + +/*---------------------------------------------------------------------------- + * Register 0x3084: XTEF Interrupt Event Register + * Bit 0 XTEF_LOST_SYNCI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCI 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x3085: XTEF Interrupt Enable Register + * Bit 0 XTEF_LOST_SYNCE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCE 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x3086: XTEF Visibility Register + * Bit 0 XTEF_LOST_SYNCV + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_XTEF_LOST_SYNCV 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x30C0: TXOAM OAM Configuration + * Bit 15 TXOAM_HEC_EN + * Bit 14 TXOAM_EMPTYCODE_EN + * Bit 13 TXOAM_FORCE_IDLE + * Bit 12 TXOAM_IGNORE_IDLE + * Bit 11-6 TXOAM_PX_OVERWRITE + * Bit 5-0 TXOAM_PX_SEL + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXOAM_HEC_EN 0x8000 +#define SUNI1x10GEXP_BITMSK_TXOAM_EMPTYCODE_EN 0x4000 +#define SUNI1x10GEXP_BITMSK_TXOAM_FORCE_IDLE 0x2000 +#define SUNI1x10GEXP_BITMSK_TXOAM_IGNORE_IDLE 0x1000 +#define SUNI1x10GEXP_BITMSK_TXOAM_PX_OVERWRITE 0x0FC0 +#define SUNI1x10GEXP_BITOFF_TXOAM_PX_OVERWRITE 6 +#define SUNI1x10GEXP_BITMSK_TXOAM_PX_SEL 0x003F +#define SUNI1x10GEXP_BITOFF_TXOAM_PX_SEL 0 + +/*---------------------------------------------------------------------------- + * Register 0x30C1: TXOAM Mini-Packet Rate Configuration + * Bit 15 TXOAM_MINIDIS + * Bit 14 TXOAM_BUSY + * Bit 13 TXOAM_TRANS_EN + * Bit 10-0 TXOAM_MINIRATE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXOAM_MINIDIS 0x8000 +#define SUNI1x10GEXP_BITMSK_TXOAM_BUSY 0x4000 +#define SUNI1x10GEXP_BITMSK_TXOAM_TRANS_EN 0x2000 +#define SUNI1x10GEXP_BITMSK_TXOAM_MINIRATE 0x07FF + +/*---------------------------------------------------------------------------- + * Register 0x30C2: TXOAM Mini-Packet Gap and FIFO Configuration + * Bit 13-10 TXOAM_FTHRESH + * Bit 9-6 TXOAM_MINIPOST + * Bit 5-0 TXOAM_MINIPRE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXOAM_FTHRESH 0x3C00 +#define SUNI1x10GEXP_BITOFF_TXOAM_FTHRESH 10 +#define SUNI1x10GEXP_BITMSK_TXOAM_MINIPOST 0x03C0 +#define SUNI1x10GEXP_BITOFF_TXOAM_MINIPOST 6 +#define SUNI1x10GEXP_BITMSK_TXOAM_MINIPRE 0x003F + +/*---------------------------------------------------------------------------- + * Register 0x30C6: TXOAM Interrupt Enable + * Bit 2 TXOAM_SOP_ERRE + * Bit 1 TXOAM_OFLE + * Bit 0 TXOAM_ERRE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXOAM_SOP_ERRE 0x0004 +#define SUNI1x10GEXP_BITMSK_TXOAM_OFLE 0x0002 +#define SUNI1x10GEXP_BITMSK_TXOAM_ERRE 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x30C7: TXOAM Interrupt Status + * Bit 2 TXOAM_SOP_ERRI + * Bit 1 TXOAM_OFLI + * Bit 0 TXOAM_ERRI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXOAM_SOP_ERRI 0x0004 +#define SUNI1x10GEXP_BITMSK_TXOAM_OFLI 0x0002 +#define SUNI1x10GEXP_BITMSK_TXOAM_ERRI 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x30CF: TXOAM Coset + * Bit 7-0 TXOAM_COSET + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_TXOAM_COSET 0x00FF + +/*---------------------------------------------------------------------------- + * Register 0x3200: EFLX Global Configuration + * Bit 15 EFLX_ERCU_EN + * Bit 7 EFLX_EN_EDSWT + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_ERCU_EN 0x8000 +#define SUNI1x10GEXP_BITMSK_EFLX_EN_EDSWT 0x0080 + +/*---------------------------------------------------------------------------- + * Register 0x3201: EFLX ERCU Global Status + * Bit 13 EFLX_OVF_ERR + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_OVF_ERR 0x2000 + +/*---------------------------------------------------------------------------- + * Register 0x3202: EFLX Indirect Channel Address + * Bit 15 EFLX_BUSY + * Bit 14 EFLX_RDWRB + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_BUSY 0x8000 +#define SUNI1x10GEXP_BITMSK_EFLX_RDWRB 0x4000 + +/*---------------------------------------------------------------------------- + * Register 0x3203: EFLX Indirect Logical FIFO Low Limit + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_LOLIM 0x03FF +#define SUNI1x10GEXP_BITOFF_EFLX_LOLIM 0 + +/*---------------------------------------------------------------------------- + * Register 0x3204: EFLX Indirect Logical FIFO High Limit + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_HILIM 0x03FF +#define SUNI1x10GEXP_BITOFF_EFLX_HILIM 0 + +/*---------------------------------------------------------------------------- + * Register 0x3205: EFLX Indirect Full/Almost-Full Status and Limit + * Bit 15 EFLX_FULL + * Bit 14 EFLX_AFULL + * Bit 13-0 EFLX_AFTH + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_FULL 0x8000 +#define SUNI1x10GEXP_BITMSK_EFLX_AFULL 0x4000 +#define SUNI1x10GEXP_BITMSK_EFLX_AFTH 0x3FFF +#define SUNI1x10GEXP_BITOFF_EFLX_AFTH 0 + +/*---------------------------------------------------------------------------- + * Register 0x3206: EFLX Indirect Empty/Almost-Empty Status and Limit + * Bit 15 EFLX_EMPTY + * Bit 14 EFLX_AEMPTY + * Bit 13-0 EFLX_AETH + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_EMPTY 0x8000 +#define SUNI1x10GEXP_BITMSK_EFLX_AEMPTY 0x4000 +#define SUNI1x10GEXP_BITMSK_EFLX_AETH 0x3FFF +#define SUNI1x10GEXP_BITOFF_EFLX_AETH 0 + +/*---------------------------------------------------------------------------- + * Register 0x3207: EFLX Indirect FIFO Cut-Through Threshold + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_CUT_THRU 0x3FFF +#define SUNI1x10GEXP_BITOFF_EFLX_CUT_THRU 0 + +/*---------------------------------------------------------------------------- + * Register 0x320C: EFLX FIFO Overflow Error Enable + * Bit 0 EFLX_OVFE + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_OVFE 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x320D: EFLX FIFO Overflow Error Indication + * Bit 0 EFLX_OVFI + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_OVFI 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x3210: EFLX Channel Provision + * Bit 0 EFLX_PROV + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_EFLX_PROV 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x3280: PL4IDU Configuration + * Bit 2 PL4IDU_SYNCH_ON_TRAIN + * Bit 1 PL4IDU_EN_PORTS + * Bit 0 PL4IDU_EN_DFWD + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4IDU_SYNCH_ON_TRAIN 0x0004 +#define SUNI1x10GEXP_BITMSK_PL4IDU_EN_PORTS 0x0002 +#define SUNI1x10GEXP_BITMSK_PL4IDU_EN_DFWD 0x0001 + +/*---------------------------------------------------------------------------- + * Register 0x3282: PL4IDU Interrupt Mask + * Bit 1 PL4IDU_DIP4E + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4IDU_DIP4E 0x0002 + +/*---------------------------------------------------------------------------- + * Register 0x3283: PL4IDU Interrupt + * Bit 1 PL4IDU_DIP4I + *----------------------------------------------------------------------------*/ +#define SUNI1x10GEXP_BITMSK_PL4IDU_DIP4I 0x0002 + +#endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */ + diff --git a/drivers/net/ethernet/chelsio/cxgb/tp.c b/drivers/net/ethernet/chelsio/cxgb/tp.c new file mode 100644 index 000000000..b146acabf --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/tp.c @@ -0,0 +1,171 @@ +/* $Date: 2006/02/07 04:21:54 $ $RCSfile: tp.c,v $ $Revision: 1.73 $ */ +#include "common.h" +#include "regs.h" +#include "tp.h" +#ifdef CONFIG_CHELSIO_T1_1G +#include "fpga_defs.h" +#endif + +struct petp { + adapter_t *adapter; +}; + +/* Pause deadlock avoidance parameters */ +#define DROP_MSEC 16 +#define DROP_PKTS_CNT 1 + +static void tp_init(adapter_t * ap, const struct tp_params *p, + unsigned int tp_clk) +{ + u32 val; + + if (!t1_is_asic(ap)) + return; + + val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM | + F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET; + if (!p->pm_size) + val |= F_OFFLOAD_DISABLE; + else + val |= F_TP_IN_ESPI_CHECK_IP_CSUM | F_TP_IN_ESPI_CHECK_TCP_CSUM; + writel(val, ap->regs + A_TP_IN_CONFIG); + writel(F_TP_OUT_CSPI_CPL | + F_TP_OUT_ESPI_ETHERNET | + F_TP_OUT_ESPI_GENERATE_IP_CSUM | + F_TP_OUT_ESPI_GENERATE_TCP_CSUM, ap->regs + A_TP_OUT_CONFIG); + writel(V_IP_TTL(64) | + F_PATH_MTU /* IP DF bit */ | + V_5TUPLE_LOOKUP(p->use_5tuple_mode) | + V_SYN_COOKIE_PARAMETER(29), ap->regs + A_TP_GLOBAL_CONFIG); + /* + * Enable pause frame deadlock prevention. + */ + if (is_T2(ap) && ap->params.nports > 1) { + u32 drop_ticks = DROP_MSEC * (tp_clk / 1000); + + writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR | + V_DROP_TICKS_CNT(drop_ticks) | + V_NUM_PKTS_DROPPED(DROP_PKTS_CNT), + ap->regs + A_TP_TX_DROP_CONFIG); + } +} + +void t1_tp_destroy(struct petp *tp) +{ + kfree(tp); +} + +struct petp *t1_tp_create(adapter_t *adapter, struct tp_params *p) +{ + struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL); + + if (!tp) + return NULL; + + tp->adapter = adapter; + + return tp; +} + +void t1_tp_intr_enable(struct petp *tp) +{ + u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE); + +#ifdef CONFIG_CHELSIO_T1_1G + if (!t1_is_asic(tp->adapter)) { + /* FPGA */ + writel(0xffffffff, + tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_ENABLE); + writel(tp_intr | FPGA_PCIX_INTERRUPT_TP, + tp->adapter->regs + A_PL_ENABLE); + } else +#endif + { + /* We don't use any TP interrupts */ + writel(0, tp->adapter->regs + A_TP_INT_ENABLE); + writel(tp_intr | F_PL_INTR_TP, + tp->adapter->regs + A_PL_ENABLE); + } +} + +void t1_tp_intr_disable(struct petp *tp) +{ + u32 tp_intr = readl(tp->adapter->regs + A_PL_ENABLE); + +#ifdef CONFIG_CHELSIO_T1_1G + if (!t1_is_asic(tp->adapter)) { + /* FPGA */ + writel(0, tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_ENABLE); + writel(tp_intr & ~FPGA_PCIX_INTERRUPT_TP, + tp->adapter->regs + A_PL_ENABLE); + } else +#endif + { + writel(0, tp->adapter->regs + A_TP_INT_ENABLE); + writel(tp_intr & ~F_PL_INTR_TP, + tp->adapter->regs + A_PL_ENABLE); + } +} + +void t1_tp_intr_clear(struct petp *tp) +{ +#ifdef CONFIG_CHELSIO_T1_1G + if (!t1_is_asic(tp->adapter)) { + writel(0xffffffff, + tp->adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); + writel(FPGA_PCIX_INTERRUPT_TP, tp->adapter->regs + A_PL_CAUSE); + return; + } +#endif + writel(0xffffffff, tp->adapter->regs + A_TP_INT_CAUSE); + writel(F_PL_INTR_TP, tp->adapter->regs + A_PL_CAUSE); +} + +int t1_tp_intr_handler(struct petp *tp) +{ + u32 cause; + +#ifdef CONFIG_CHELSIO_T1_1G + /* FPGA doesn't support TP interrupts. */ + if (!t1_is_asic(tp->adapter)) + return 1; +#endif + + cause = readl(tp->adapter->regs + A_TP_INT_CAUSE); + writel(cause, tp->adapter->regs + A_TP_INT_CAUSE); + return 0; +} + +static void set_csum_offload(struct petp *tp, u32 csum_bit, int enable) +{ + u32 val = readl(tp->adapter->regs + A_TP_GLOBAL_CONFIG); + + if (enable) + val |= csum_bit; + else + val &= ~csum_bit; + writel(val, tp->adapter->regs + A_TP_GLOBAL_CONFIG); +} + +void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable) +{ + set_csum_offload(tp, F_IP_CSUM, enable); +} + +void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable) +{ + set_csum_offload(tp, F_TCP_CSUM, enable); +} + +/* + * Initialize TP state. tp_params contains initial settings for some TP + * parameters, particularly the one-time PM and CM settings. + */ +int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk) +{ + adapter_t *adapter = tp->adapter; + + tp_init(adapter, p, tp_clk); + writel(F_TP_RESET, adapter->regs + A_TP_RESET); + return 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb/tp.h b/drivers/net/ethernet/chelsio/cxgb/tp.h new file mode 100644 index 000000000..dfd8ce251 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/tp.h @@ -0,0 +1,72 @@ +/* $Date: 2005/03/07 23:59:05 $ $RCSfile: tp.h,v $ $Revision: 1.20 $ */ +#ifndef CHELSIO_TP_H +#define CHELSIO_TP_H + +#include "common.h" + +#define TP_MAX_RX_COALESCING_SIZE 16224U + +struct tp_mib_statistics { + + /* IP */ + u32 ipInReceive_hi; + u32 ipInReceive_lo; + u32 ipInHdrErrors_hi; + u32 ipInHdrErrors_lo; + u32 ipInAddrErrors_hi; + u32 ipInAddrErrors_lo; + u32 ipInUnknownProtos_hi; + u32 ipInUnknownProtos_lo; + u32 ipInDiscards_hi; + u32 ipInDiscards_lo; + u32 ipInDelivers_hi; + u32 ipInDelivers_lo; + u32 ipOutRequests_hi; + u32 ipOutRequests_lo; + u32 ipOutDiscards_hi; + u32 ipOutDiscards_lo; + u32 ipOutNoRoutes_hi; + u32 ipOutNoRoutes_lo; + u32 ipReasmTimeout; + u32 ipReasmReqds; + u32 ipReasmOKs; + u32 ipReasmFails; + + u32 reserved[8]; + + /* TCP */ + u32 tcpActiveOpens; + u32 tcpPassiveOpens; + u32 tcpAttemptFails; + u32 tcpEstabResets; + u32 tcpOutRsts; + u32 tcpCurrEstab; + u32 tcpInSegs_hi; + u32 tcpInSegs_lo; + u32 tcpOutSegs_hi; + u32 tcpOutSegs_lo; + u32 tcpRetransSeg_hi; + u32 tcpRetransSeg_lo; + u32 tcpInErrs_hi; + u32 tcpInErrs_lo; + u32 tcpRtoMin; + u32 tcpRtoMax; +}; + +struct petp; +struct tp_params; + +struct petp *t1_tp_create(adapter_t *adapter, struct tp_params *p); +void t1_tp_destroy(struct petp *tp); + +void t1_tp_intr_disable(struct petp *tp); +void t1_tp_intr_enable(struct petp *tp); +void t1_tp_intr_clear(struct petp *tp); +int t1_tp_intr_handler(struct petp *tp); + +void t1_tp_get_mib_statistics(adapter_t *adap, struct tp_mib_statistics *tps); +void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable); +void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable); +int t1_tp_set_coalescing_size(struct petp *tp, unsigned int size); +int t1_tp_reset(struct petp *tp, struct tp_params *p, unsigned int tp_clk); +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c new file mode 100644 index 000000000..b0cb388f5 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c @@ -0,0 +1,730 @@ +/* $Date: 2006/04/28 19:20:06 $ $RCSfile: vsc7326.c,v $ $Revision: 1.19 $ */ + +/* Driver for Vitesse VSC7326 (Schaumburg) MAC */ + +#include "gmac.h" +#include "elmer0.h" +#include "vsc7326_reg.h" + +/* Update fast changing statistics every 15 seconds */ +#define STATS_TICK_SECS 15 +/* 30 minutes for full statistics update */ +#define MAJOR_UPDATE_TICKS (1800 / STATS_TICK_SECS) + +#define MAX_MTU 9600 + +/* The egress WM value 0x01a01fff should be used only when the + * interface is down (MAC port disabled). This is a workaround + * for disabling the T2/MAC flow-control. When the interface is + * enabled, the WM value should be set to 0x014a03F0. + */ +#define WM_DISABLE 0x01a01fff +#define WM_ENABLE 0x014a03F0 + +struct init_table { + u32 addr; + u32 data; +}; + +struct _cmac_instance { + u32 index; + u32 ticks; +}; + +#define INITBLOCK_SLEEP 0xffffffff + +static void vsc_read(adapter_t *adapter, u32 addr, u32 *val) +{ + u32 status, vlo, vhi; + int i; + + spin_lock_bh(&adapter->mac_lock); + t1_tpi_read(adapter, (addr << 2) + 4, &vlo); + i = 0; + do { + t1_tpi_read(adapter, (REG_LOCAL_STATUS << 2) + 4, &vlo); + t1_tpi_read(adapter, REG_LOCAL_STATUS << 2, &vhi); + status = (vhi << 16) | vlo; + i++; + } while (((status & 1) == 0) && (i < 50)); + if (i == 50) + pr_err("Invalid tpi read from MAC, breaking loop.\n"); + + t1_tpi_read(adapter, (REG_LOCAL_DATA << 2) + 4, &vlo); + t1_tpi_read(adapter, REG_LOCAL_DATA << 2, &vhi); + + *val = (vhi << 16) | vlo; + + /* pr_err("rd: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n", + ((addr&0xe000)>>13), ((addr&0x1e00)>>9), + ((addr&0x01fe)>>1), *val); */ + spin_unlock_bh(&adapter->mac_lock); +} + +static void vsc_write(adapter_t *adapter, u32 addr, u32 data) +{ + spin_lock_bh(&adapter->mac_lock); + t1_tpi_write(adapter, (addr << 2) + 4, data & 0xFFFF); + t1_tpi_write(adapter, addr << 2, (data >> 16) & 0xFFFF); + /* pr_err("wr: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n", + ((addr&0xe000)>>13), ((addr&0x1e00)>>9), + ((addr&0x01fe)>>1), data); */ + spin_unlock_bh(&adapter->mac_lock); +} + +/* Hard reset the MAC. This wipes out *all* configuration. */ +static void vsc7326_full_reset(adapter_t* adapter) +{ + u32 val; + u32 result = 0xffff; + + t1_tpi_read(adapter, A_ELMER0_GPO, &val); + val &= ~1; + t1_tpi_write(adapter, A_ELMER0_GPO, val); + udelay(2); + val |= 0x1; /* Enable mac MAC itself */ + val |= 0x800; /* Turn off the red LED */ + t1_tpi_write(adapter, A_ELMER0_GPO, val); + mdelay(1); + vsc_write(adapter, REG_SW_RESET, 0x80000001); + do { + mdelay(1); + vsc_read(adapter, REG_SW_RESET, &result); + } while (result != 0x0); +} + +static struct init_table vsc7326_reset[] = { + { REG_IFACE_MODE, 0x00000000 }, + { REG_CRC_CFG, 0x00000020 }, + { REG_PLL_CLK_SPEED, 0x00050c00 }, + { REG_PLL_CLK_SPEED, 0x00050c00 }, + { REG_MSCH, 0x00002f14 }, + { REG_SPI4_MISC, 0x00040409 }, + { REG_SPI4_DESKEW, 0x00080000 }, + { REG_SPI4_ING_SETUP2, 0x08080004 }, + { REG_SPI4_ING_SETUP0, 0x04111004 }, + { REG_SPI4_EGR_SETUP0, 0x80001a04 }, + { REG_SPI4_ING_SETUP1, 0x02010000 }, + { REG_AGE_INC(0), 0x00000000 }, + { REG_AGE_INC(1), 0x00000000 }, + { REG_ING_CONTROL, 0x0a200011 }, + { REG_EGR_CONTROL, 0xa0010091 }, +}; + +static struct init_table vsc7326_portinit[4][22] = { + { /* Port 0 */ + /* FIFO setup */ + { REG_DBG(0), 0x000004f0 }, + { REG_HDX(0), 0x00073101 }, + { REG_TEST(0,0), 0x00000022 }, + { REG_TEST(1,0), 0x00000022 }, + { REG_TOP_BOTTOM(0,0), 0x003f0000 }, + { REG_TOP_BOTTOM(1,0), 0x00120000 }, + { REG_HIGH_LOW_WM(0,0), 0x07460757 }, + { REG_HIGH_LOW_WM(1,0), WM_DISABLE }, + { REG_CT_THRHLD(0,0), 0x00000000 }, + { REG_CT_THRHLD(1,0), 0x00000000 }, + { REG_BUCKE(0), 0x0002ffff }, + { REG_BUCKI(0), 0x0002ffff }, + { REG_TEST(0,0), 0x00000020 }, + { REG_TEST(1,0), 0x00000020 }, + /* Port config */ + { REG_MAX_LEN(0), 0x00002710 }, + { REG_PORT_FAIL(0), 0x00000002 }, + { REG_NORMALIZER(0), 0x00000a64 }, + { REG_DENORM(0), 0x00000010 }, + { REG_STICK_BIT(0), 0x03baa370 }, + { REG_DEV_SETUP(0), 0x00000083 }, + { REG_DEV_SETUP(0), 0x00000082 }, + { REG_MODE_CFG(0), 0x0200259f }, + }, + { /* Port 1 */ + /* FIFO setup */ + { REG_DBG(1), 0x000004f0 }, + { REG_HDX(1), 0x00073101 }, + { REG_TEST(0,1), 0x00000022 }, + { REG_TEST(1,1), 0x00000022 }, + { REG_TOP_BOTTOM(0,1), 0x007e003f }, + { REG_TOP_BOTTOM(1,1), 0x00240012 }, + { REG_HIGH_LOW_WM(0,1), 0x07460757 }, + { REG_HIGH_LOW_WM(1,1), WM_DISABLE }, + { REG_CT_THRHLD(0,1), 0x00000000 }, + { REG_CT_THRHLD(1,1), 0x00000000 }, + { REG_BUCKE(1), 0x0002ffff }, + { REG_BUCKI(1), 0x0002ffff }, + { REG_TEST(0,1), 0x00000020 }, + { REG_TEST(1,1), 0x00000020 }, + /* Port config */ + { REG_MAX_LEN(1), 0x00002710 }, + { REG_PORT_FAIL(1), 0x00000002 }, + { REG_NORMALIZER(1), 0x00000a64 }, + { REG_DENORM(1), 0x00000010 }, + { REG_STICK_BIT(1), 0x03baa370 }, + { REG_DEV_SETUP(1), 0x00000083 }, + { REG_DEV_SETUP(1), 0x00000082 }, + { REG_MODE_CFG(1), 0x0200259f }, + }, + { /* Port 2 */ + /* FIFO setup */ + { REG_DBG(2), 0x000004f0 }, + { REG_HDX(2), 0x00073101 }, + { REG_TEST(0,2), 0x00000022 }, + { REG_TEST(1,2), 0x00000022 }, + { REG_TOP_BOTTOM(0,2), 0x00bd007e }, + { REG_TOP_BOTTOM(1,2), 0x00360024 }, + { REG_HIGH_LOW_WM(0,2), 0x07460757 }, + { REG_HIGH_LOW_WM(1,2), WM_DISABLE }, + { REG_CT_THRHLD(0,2), 0x00000000 }, + { REG_CT_THRHLD(1,2), 0x00000000 }, + { REG_BUCKE(2), 0x0002ffff }, + { REG_BUCKI(2), 0x0002ffff }, + { REG_TEST(0,2), 0x00000020 }, + { REG_TEST(1,2), 0x00000020 }, + /* Port config */ + { REG_MAX_LEN(2), 0x00002710 }, + { REG_PORT_FAIL(2), 0x00000002 }, + { REG_NORMALIZER(2), 0x00000a64 }, + { REG_DENORM(2), 0x00000010 }, + { REG_STICK_BIT(2), 0x03baa370 }, + { REG_DEV_SETUP(2), 0x00000083 }, + { REG_DEV_SETUP(2), 0x00000082 }, + { REG_MODE_CFG(2), 0x0200259f }, + }, + { /* Port 3 */ + /* FIFO setup */ + { REG_DBG(3), 0x000004f0 }, + { REG_HDX(3), 0x00073101 }, + { REG_TEST(0,3), 0x00000022 }, + { REG_TEST(1,3), 0x00000022 }, + { REG_TOP_BOTTOM(0,3), 0x00fc00bd }, + { REG_TOP_BOTTOM(1,3), 0x00480036 }, + { REG_HIGH_LOW_WM(0,3), 0x07460757 }, + { REG_HIGH_LOW_WM(1,3), WM_DISABLE }, + { REG_CT_THRHLD(0,3), 0x00000000 }, + { REG_CT_THRHLD(1,3), 0x00000000 }, + { REG_BUCKE(3), 0x0002ffff }, + { REG_BUCKI(3), 0x0002ffff }, + { REG_TEST(0,3), 0x00000020 }, + { REG_TEST(1,3), 0x00000020 }, + /* Port config */ + { REG_MAX_LEN(3), 0x00002710 }, + { REG_PORT_FAIL(3), 0x00000002 }, + { REG_NORMALIZER(3), 0x00000a64 }, + { REG_DENORM(3), 0x00000010 }, + { REG_STICK_BIT(3), 0x03baa370 }, + { REG_DEV_SETUP(3), 0x00000083 }, + { REG_DEV_SETUP(3), 0x00000082 }, + { REG_MODE_CFG(3), 0x0200259f }, + }, +}; + +static void run_table(adapter_t *adapter, struct init_table *ib, int len) +{ + int i; + + for (i = 0; i < len; i++) { + if (ib[i].addr == INITBLOCK_SLEEP) { + udelay( ib[i].data ); + pr_err("sleep %d us\n",ib[i].data); + } else + vsc_write( adapter, ib[i].addr, ib[i].data ); + } +} + +static int bist_rd(adapter_t *adapter, int moduleid, int address) +{ + int data = 0; + u32 result = 0; + + if ((address != 0x0) && + (address != 0x1) && + (address != 0x2) && + (address != 0xd) && + (address != 0xe)) + pr_err("No bist address: 0x%x\n", address); + + data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) | + ((moduleid & 0xff) << 0)); + vsc_write(adapter, REG_RAM_BIST_CMD, data); + + udelay(10); + + vsc_read(adapter, REG_RAM_BIST_RESULT, &result); + if ((result & (1 << 9)) != 0x0) + pr_err("Still in bist read: 0x%x\n", result); + else if ((result & (1 << 8)) != 0x0) + pr_err("bist read error: 0x%x\n", result); + + return result & 0xff; +} + +static int bist_wr(adapter_t *adapter, int moduleid, int address, int value) +{ + int data = 0; + u32 result = 0; + + if ((address != 0x0) && + (address != 0x1) && + (address != 0x2) && + (address != 0xd) && + (address != 0xe)) + pr_err("No bist address: 0x%x\n", address); + + if (value > 255) + pr_err("Suspicious write out of range value: 0x%x\n", value); + + data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) | + ((moduleid & 0xff) << 0)); + vsc_write(adapter, REG_RAM_BIST_CMD, data); + + udelay(5); + + vsc_read(adapter, REG_RAM_BIST_CMD, &result); + if ((result & (1 << 27)) != 0x0) + pr_err("Still in bist write: 0x%x\n", result); + else if ((result & (1 << 26)) != 0x0) + pr_err("bist write error: 0x%x\n", result); + + return 0; +} + +static int run_bist(adapter_t *adapter, int moduleid) +{ + /*run bist*/ + (void) bist_wr(adapter,moduleid, 0x00, 0x02); + (void) bist_wr(adapter,moduleid, 0x01, 0x01); + + return 0; +} + +static int check_bist(adapter_t *adapter, int moduleid) +{ + int result=0; + int column=0; + /*check bist*/ + result = bist_rd(adapter,moduleid, 0x02); + column = ((bist_rd(adapter,moduleid, 0x0e)<<8) + + (bist_rd(adapter,moduleid, 0x0d))); + if ((result & 3) != 0x3) + pr_err("Result: 0x%x BIST error in ram %d, column: 0x%04x\n", + result, moduleid, column); + return 0; +} + +static int enable_mem(adapter_t *adapter, int moduleid) +{ + /*enable mem*/ + (void) bist_wr(adapter,moduleid, 0x00, 0x00); + return 0; +} + +static int run_bist_all(adapter_t *adapter) +{ + int port = 0; + u32 val = 0; + + vsc_write(adapter, REG_MEM_BIST, 0x5); + vsc_read(adapter, REG_MEM_BIST, &val); + + for (port = 0; port < 12; port++) + vsc_write(adapter, REG_DEV_SETUP(port), 0x0); + + udelay(300); + vsc_write(adapter, REG_SPI4_MISC, 0x00040409); + udelay(300); + + (void) run_bist(adapter,13); + (void) run_bist(adapter,14); + (void) run_bist(adapter,20); + (void) run_bist(adapter,21); + mdelay(200); + (void) check_bist(adapter,13); + (void) check_bist(adapter,14); + (void) check_bist(adapter,20); + (void) check_bist(adapter,21); + udelay(100); + (void) enable_mem(adapter,13); + (void) enable_mem(adapter,14); + (void) enable_mem(adapter,20); + (void) enable_mem(adapter,21); + udelay(300); + vsc_write(adapter, REG_SPI4_MISC, 0x60040400); + udelay(300); + for (port = 0; port < 12; port++) + vsc_write(adapter, REG_DEV_SETUP(port), 0x1); + + udelay(300); + vsc_write(adapter, REG_MEM_BIST, 0x0); + mdelay(10); + return 0; +} + +static int mac_intr_handler(struct cmac *mac) +{ + return 0; +} + +static int mac_intr_enable(struct cmac *mac) +{ + return 0; +} + +static int mac_intr_disable(struct cmac *mac) +{ + return 0; +} + +static int mac_intr_clear(struct cmac *mac) +{ + return 0; +} + +/* Expect MAC address to be in network byte order. */ +static int mac_set_address(struct cmac* mac, u8 addr[6]) +{ + u32 val; + int port = mac->instance->index; + + vsc_write(mac->adapter, REG_MAC_LOW_ADDR(port), + (addr[3] << 16) | (addr[4] << 8) | addr[5]); + vsc_write(mac->adapter, REG_MAC_HIGH_ADDR(port), + (addr[0] << 16) | (addr[1] << 8) | addr[2]); + + vsc_read(mac->adapter, REG_ING_FFILT_UM_EN, &val); + val &= ~0xf0000000; + vsc_write(mac->adapter, REG_ING_FFILT_UM_EN, val | (port << 28)); + + vsc_write(mac->adapter, REG_ING_FFILT_MASK0, + 0xffff0000 | (addr[4] << 8) | addr[5]); + vsc_write(mac->adapter, REG_ING_FFILT_MASK1, + 0xffff0000 | (addr[2] << 8) | addr[3]); + vsc_write(mac->adapter, REG_ING_FFILT_MASK2, + 0xffff0000 | (addr[0] << 8) | addr[1]); + return 0; +} + +static int mac_get_address(struct cmac *mac, u8 addr[6]) +{ + u32 addr_lo, addr_hi; + int port = mac->instance->index; + + vsc_read(mac->adapter, REG_MAC_LOW_ADDR(port), &addr_lo); + vsc_read(mac->adapter, REG_MAC_HIGH_ADDR(port), &addr_hi); + + addr[0] = (u8) (addr_hi >> 16); + addr[1] = (u8) (addr_hi >> 8); + addr[2] = (u8) addr_hi; + addr[3] = (u8) (addr_lo >> 16); + addr[4] = (u8) (addr_lo >> 8); + addr[5] = (u8) addr_lo; + return 0; +} + +/* This is intended to reset a port, not the whole MAC */ +static int mac_reset(struct cmac *mac) +{ + int index = mac->instance->index; + + run_table(mac->adapter, vsc7326_portinit[index], + ARRAY_SIZE(vsc7326_portinit[index])); + + return 0; +} + +static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm) +{ + u32 v; + int port = mac->instance->index; + + vsc_read(mac->adapter, REG_ING_FFILT_UM_EN, &v); + v |= 1 << 12; + + if (t1_rx_mode_promisc(rm)) + v &= ~(1 << (port + 16)); + else + v |= 1 << (port + 16); + + vsc_write(mac->adapter, REG_ING_FFILT_UM_EN, v); + return 0; +} + +static int mac_set_mtu(struct cmac *mac, int mtu) +{ + int port = mac->instance->index; + + if (mtu > MAX_MTU) + return -EINVAL; + + /* max_len includes header and FCS */ + vsc_write(mac->adapter, REG_MAX_LEN(port), mtu + 14 + 4); + return 0; +} + +static int mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, + int fc) +{ + u32 v; + int enable, port = mac->instance->index; + + if (speed >= 0 && speed != SPEED_10 && speed != SPEED_100 && + speed != SPEED_1000) + return -1; + if (duplex > 0 && duplex != DUPLEX_FULL) + return -1; + + if (speed >= 0) { + vsc_read(mac->adapter, REG_MODE_CFG(port), &v); + enable = v & 3; /* save tx/rx enables */ + v &= ~0xf; + v |= 4; /* full duplex */ + if (speed == SPEED_1000) + v |= 8; /* GigE */ + enable |= v; + vsc_write(mac->adapter, REG_MODE_CFG(port), v); + + if (speed == SPEED_1000) + v = 0x82; + else if (speed == SPEED_100) + v = 0x84; + else /* SPEED_10 */ + v = 0x86; + vsc_write(mac->adapter, REG_DEV_SETUP(port), v | 1); /* reset */ + vsc_write(mac->adapter, REG_DEV_SETUP(port), v); + vsc_read(mac->adapter, REG_DBG(port), &v); + v &= ~0xff00; + if (speed == SPEED_1000) + v |= 0x400; + else if (speed == SPEED_100) + v |= 0x2000; + else /* SPEED_10 */ + v |= 0xff00; + vsc_write(mac->adapter, REG_DBG(port), v); + + vsc_write(mac->adapter, REG_TX_IFG(port), + speed == SPEED_1000 ? 5 : 0x11); + if (duplex == DUPLEX_HALF) + enable = 0x0; /* 100 or 10 */ + else if (speed == SPEED_1000) + enable = 0xc; + else /* SPEED_100 or 10 */ + enable = 0x4; + enable |= 0x9 << 10; /* IFG1 */ + enable |= 0x6 << 6; /* IFG2 */ + enable |= 0x1 << 4; /* VLAN */ + enable |= 0x3; /* RX/TX EN */ + vsc_write(mac->adapter, REG_MODE_CFG(port), enable); + + } + + vsc_read(mac->adapter, REG_PAUSE_CFG(port), &v); + v &= 0xfff0ffff; + v |= 0x20000; /* xon/xoff */ + if (fc & PAUSE_RX) + v |= 0x40000; + if (fc & PAUSE_TX) + v |= 0x80000; + if (fc == (PAUSE_RX | PAUSE_TX)) + v |= 0x10000; + vsc_write(mac->adapter, REG_PAUSE_CFG(port), v); + return 0; +} + +static int mac_enable(struct cmac *mac, int which) +{ + u32 val; + int port = mac->instance->index; + + /* Write the correct WM value when the port is enabled. */ + vsc_write(mac->adapter, REG_HIGH_LOW_WM(1,port), WM_ENABLE); + + vsc_read(mac->adapter, REG_MODE_CFG(port), &val); + if (which & MAC_DIRECTION_RX) + val |= 0x2; + if (which & MAC_DIRECTION_TX) + val |= 1; + vsc_write(mac->adapter, REG_MODE_CFG(port), val); + return 0; +} + +static int mac_disable(struct cmac *mac, int which) +{ + u32 val; + int i, port = mac->instance->index; + + /* Reset the port, this also writes the correct WM value */ + mac_reset(mac); + + vsc_read(mac->adapter, REG_MODE_CFG(port), &val); + if (which & MAC_DIRECTION_RX) + val &= ~0x2; + if (which & MAC_DIRECTION_TX) + val &= ~0x1; + vsc_write(mac->adapter, REG_MODE_CFG(port), val); + vsc_read(mac->adapter, REG_MODE_CFG(port), &val); + + /* Clear stats */ + for (i = 0; i <= 0x3a; ++i) + vsc_write(mac->adapter, CRA(4, port, i), 0); + + /* Clear software counters */ + memset(&mac->stats, 0, sizeof(struct cmac_statistics)); + + return 0; +} + +static void rmon_update(struct cmac *mac, unsigned int addr, u64 *stat) +{ + u32 v, lo; + + vsc_read(mac->adapter, addr, &v); + lo = *stat; + *stat = *stat - lo + v; + + if (v == 0) + return; + + if (v < lo) + *stat += (1ULL << 32); +} + +static void port_stats_update(struct cmac *mac) +{ + struct { + unsigned int reg; + unsigned int offset; + } hw_stats[] = { + +#define HW_STAT(reg, stat_name) \ + { reg, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL } + + /* Rx stats */ + HW_STAT(RxUnicast, RxUnicastFramesOK), + HW_STAT(RxMulticast, RxMulticastFramesOK), + HW_STAT(RxBroadcast, RxBroadcastFramesOK), + HW_STAT(Crc, RxFCSErrors), + HW_STAT(RxAlignment, RxAlignErrors), + HW_STAT(RxOversize, RxFrameTooLongErrors), + HW_STAT(RxPause, RxPauseFrames), + HW_STAT(RxJabbers, RxJabberErrors), + HW_STAT(RxFragments, RxRuntErrors), + HW_STAT(RxUndersize, RxRuntErrors), + HW_STAT(RxSymbolCarrier, RxSymbolErrors), + HW_STAT(RxSize1519ToMax, RxJumboFramesOK), + + /* Tx stats (skip collision stats as we are full-duplex only) */ + HW_STAT(TxUnicast, TxUnicastFramesOK), + HW_STAT(TxMulticast, TxMulticastFramesOK), + HW_STAT(TxBroadcast, TxBroadcastFramesOK), + HW_STAT(TxPause, TxPauseFrames), + HW_STAT(TxUnderrun, TxUnderrun), + HW_STAT(TxSize1519ToMax, TxJumboFramesOK), + }, *p = hw_stats; + unsigned int port = mac->instance->index; + u64 *stats = (u64 *)&mac->stats; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(hw_stats); i++) + rmon_update(mac, CRA(0x4, port, p->reg), stats + p->offset); + + rmon_update(mac, REG_TX_OK_BYTES(port), &mac->stats.TxOctetsOK); + rmon_update(mac, REG_RX_OK_BYTES(port), &mac->stats.RxOctetsOK); + rmon_update(mac, REG_RX_BAD_BYTES(port), &mac->stats.RxOctetsBad); +} + +/* + * This function is called periodically to accumulate the current values of the + * RMON counters into the port statistics. Since the counters are only 32 bits + * some of them can overflow in less than a minute at GigE speeds, so this + * function should be called every 30 seconds or so. + * + * To cut down on reading costs we update only the octet counters at each tick + * and do a full update at major ticks, which can be every 30 minutes or more. + */ +static const struct cmac_statistics *mac_update_statistics(struct cmac *mac, + int flag) +{ + if (flag == MAC_STATS_UPDATE_FULL || + mac->instance->ticks >= MAJOR_UPDATE_TICKS) { + port_stats_update(mac); + mac->instance->ticks = 0; + } else { + int port = mac->instance->index; + + rmon_update(mac, REG_RX_OK_BYTES(port), + &mac->stats.RxOctetsOK); + rmon_update(mac, REG_RX_BAD_BYTES(port), + &mac->stats.RxOctetsBad); + rmon_update(mac, REG_TX_OK_BYTES(port), + &mac->stats.TxOctetsOK); + mac->instance->ticks++; + } + return &mac->stats; +} + +static void mac_destroy(struct cmac *mac) +{ + kfree(mac); +} + +static struct cmac_ops vsc7326_ops = { + .destroy = mac_destroy, + .reset = mac_reset, + .interrupt_handler = mac_intr_handler, + .interrupt_enable = mac_intr_enable, + .interrupt_disable = mac_intr_disable, + .interrupt_clear = mac_intr_clear, + .enable = mac_enable, + .disable = mac_disable, + .set_mtu = mac_set_mtu, + .set_rx_mode = mac_set_rx_mode, + .set_speed_duplex_fc = mac_set_speed_duplex_fc, + .statistics_update = mac_update_statistics, + .macaddress_get = mac_get_address, + .macaddress_set = mac_set_address, +}; + +static struct cmac *vsc7326_mac_create(adapter_t *adapter, int index) +{ + struct cmac *mac; + u32 val; + int i; + + mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL); + if (!mac) + return NULL; + + mac->ops = &vsc7326_ops; + mac->instance = (cmac_instance *)(mac + 1); + mac->adapter = adapter; + + mac->instance->index = index; + mac->instance->ticks = 0; + + i = 0; + do { + u32 vhi, vlo; + + vhi = vlo = 0; + t1_tpi_read(adapter, (REG_LOCAL_STATUS << 2) + 4, &vlo); + udelay(1); + t1_tpi_read(adapter, REG_LOCAL_STATUS << 2, &vhi); + udelay(5); + val = (vhi << 16) | vlo; + } while ((++i < 10000) && (val == 0xffffffff)); + + return mac; +} + +static int vsc7326_mac_reset(adapter_t *adapter) +{ + vsc7326_full_reset(adapter); + (void) run_bist_all(adapter); + run_table(adapter, vsc7326_reset, ARRAY_SIZE(vsc7326_reset)); + return 0; +} + +const struct gmac t1_vsc7326_ops = { + .stats_update_period = STATS_TICK_SECS, + .create = vsc7326_mac_create, + .reset = vsc7326_mac_reset, +}; diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h b/drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h new file mode 100644 index 000000000..479edbcab --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h @@ -0,0 +1,297 @@ +/* $Date: 2006/04/28 19:20:17 $ $RCSfile: vsc7326_reg.h,v $ $Revision: 1.5 $ */ +#ifndef _VSC7321_REG_H_ +#define _VSC7321_REG_H_ + +/* Register definitions for Vitesse VSC7321 (Meigs II) MAC + * + * Straight off the data sheet, VMDS-10038 Rev 2.0 and + * PD0011-01-14-Meigs-II 2002-12-12 + */ + +/* Just 'cause it's in here doesn't mean it's used. */ + +#define CRA(blk,sub,adr) ((((blk) & 0x7) << 13) | (((sub) & 0xf) << 9) | (((adr) & 0xff) << 1)) + +/* System and CPU comm's registers */ +#define REG_CHIP_ID CRA(0x7,0xf,0x00) /* Chip ID */ +#define REG_BLADE_ID CRA(0x7,0xf,0x01) /* Blade ID */ +#define REG_SW_RESET CRA(0x7,0xf,0x02) /* Global Soft Reset */ +#define REG_MEM_BIST CRA(0x7,0xf,0x04) /* mem */ +#define REG_IFACE_MODE CRA(0x7,0xf,0x07) /* Interface mode */ +#define REG_MSCH CRA(0x7,0x2,0x06) /* CRC error count */ +#define REG_CRC_CNT CRA(0x7,0x2,0x0a) /* CRC error count */ +#define REG_CRC_CFG CRA(0x7,0x2,0x0b) /* CRC config */ +#define REG_SI_TRANSFER_SEL CRA(0x7,0xf,0x18) /* SI Transfer Select */ +#define REG_PLL_CLK_SPEED CRA(0x7,0xf,0x19) /* Clock Speed Selection */ +#define REG_SYS_CLK_SELECT CRA(0x7,0xf,0x1c) /* System Clock Select */ +#define REG_GPIO_CTRL CRA(0x7,0xf,0x1d) /* GPIO Control */ +#define REG_GPIO_OUT CRA(0x7,0xf,0x1e) /* GPIO Out */ +#define REG_GPIO_IN CRA(0x7,0xf,0x1f) /* GPIO In */ +#define REG_CPU_TRANSFER_SEL CRA(0x7,0xf,0x20) /* CPU Transfer Select */ +#define REG_LOCAL_DATA CRA(0x7,0xf,0xfe) /* Local CPU Data Register */ +#define REG_LOCAL_STATUS CRA(0x7,0xf,0xff) /* Local CPU Status Register */ + +/* Aggregator registers */ +#define REG_AGGR_SETUP CRA(0x7,0x1,0x00) /* Aggregator Setup */ +#define REG_PMAP_TABLE CRA(0x7,0x1,0x01) /* Port map table */ +#define REG_MPLS_BIT0 CRA(0x7,0x1,0x08) /* MPLS bit0 position */ +#define REG_MPLS_BIT1 CRA(0x7,0x1,0x09) /* MPLS bit1 position */ +#define REG_MPLS_BIT2 CRA(0x7,0x1,0x0a) /* MPLS bit2 position */ +#define REG_MPLS_BIT3 CRA(0x7,0x1,0x0b) /* MPLS bit3 position */ +#define REG_MPLS_BITMASK CRA(0x7,0x1,0x0c) /* MPLS bit mask */ +#define REG_PRE_BIT0POS CRA(0x7,0x1,0x10) /* Preamble bit0 position */ +#define REG_PRE_BIT1POS CRA(0x7,0x1,0x11) /* Preamble bit1 position */ +#define REG_PRE_BIT2POS CRA(0x7,0x1,0x12) /* Preamble bit2 position */ +#define REG_PRE_BIT3POS CRA(0x7,0x1,0x13) /* Preamble bit3 position */ +#define REG_PRE_ERR_CNT CRA(0x7,0x1,0x14) /* Preamble parity error count */ + +/* BIST registers */ +/*#define REG_RAM_BIST_CMD CRA(0x7,0x2,0x00)*/ /* RAM BIST Command Register */ +/*#define REG_RAM_BIST_RESULT CRA(0x7,0x2,0x01)*/ /* RAM BIST Read Status/Result */ +#define REG_RAM_BIST_CMD CRA(0x7,0x1,0x00) /* RAM BIST Command Register */ +#define REG_RAM_BIST_RESULT CRA(0x7,0x1,0x01) /* RAM BIST Read Status/Result */ +#define BIST_PORT_SELECT 0x00 /* BIST port select */ +#define BIST_COMMAND 0x01 /* BIST enable/disable */ +#define BIST_STATUS 0x02 /* BIST operation status */ +#define BIST_ERR_CNT_LSB 0x03 /* BIST error count lo 8b */ +#define BIST_ERR_CNT_MSB 0x04 /* BIST error count hi 8b */ +#define BIST_ERR_SEL_LSB 0x05 /* BIST error select lo 8b */ +#define BIST_ERR_SEL_MSB 0x06 /* BIST error select hi 8b */ +#define BIST_ERROR_STATE 0x07 /* BIST engine internal state */ +#define BIST_ERR_ADR0 0x08 /* BIST error address lo 8b */ +#define BIST_ERR_ADR1 0x09 /* BIST error address lomid 8b */ +#define BIST_ERR_ADR2 0x0a /* BIST error address himid 8b */ +#define BIST_ERR_ADR3 0x0b /* BIST error address hi 8b */ + +/* FIFO registers + * ie = 0 for ingress, 1 for egress + * fn = FIFO number, 0-9 + */ +#define REG_TEST(ie,fn) CRA(0x2,ie&1,0x00+fn) /* Mode & Test Register */ +#define REG_TOP_BOTTOM(ie,fn) CRA(0x2,ie&1,0x10+fn) /* FIFO Buffer Top & Bottom */ +#define REG_TAIL(ie,fn) CRA(0x2,ie&1,0x20+fn) /* FIFO Write Pointer */ +#define REG_HEAD(ie,fn) CRA(0x2,ie&1,0x30+fn) /* FIFO Read Pointer */ +#define REG_HIGH_LOW_WM(ie,fn) CRA(0x2,ie&1,0x40+fn) /* Flow Control Water Marks */ +#define REG_CT_THRHLD(ie,fn) CRA(0x2,ie&1,0x50+fn) /* Cut Through Threshold */ +#define REG_FIFO_DROP_CNT(ie,fn) CRA(0x2,ie&1,0x60+fn) /* Drop & CRC Error Counter */ +#define REG_DEBUG_BUF_CNT(ie,fn) CRA(0x2,ie&1,0x70+fn) /* Input Side Debug Counter */ +#define REG_BUCKI(fn) CRA(0x2,2,0x20+fn) /* Input Side Debug Counter */ +#define REG_BUCKE(fn) CRA(0x2,3,0x20+fn) /* Input Side Debug Counter */ + +/* Traffic shaper buckets + * ie = 0 for ingress, 1 for egress + * bn = bucket number 0-10 (yes, 11 buckets) + */ +/* OK, this one's kinda ugly. Some hardware designers are perverse. */ +#define REG_TRAFFIC_SHAPER_BUCKET(ie,bn) CRA(0x2,ie&1,0x0a + (bn>7) | ((bn&7)<<4)) +#define REG_TRAFFIC_SHAPER_CONTROL(ie) CRA(0x2,ie&1,0x3b) + +#define REG_SRAM_ADR(ie) CRA(0x2,ie&1,0x0e) /* FIFO SRAM address */ +#define REG_SRAM_WR_STRB(ie) CRA(0x2,ie&1,0x1e) /* FIFO SRAM write strobe */ +#define REG_SRAM_RD_STRB(ie) CRA(0x2,ie&1,0x2e) /* FIFO SRAM read strobe */ +#define REG_SRAM_DATA_0(ie) CRA(0x2,ie&1,0x3e) /* FIFO SRAM data lo 8b */ +#define REG_SRAM_DATA_1(ie) CRA(0x2,ie&1,0x4e) /* FIFO SRAM data lomid 8b */ +#define REG_SRAM_DATA_2(ie) CRA(0x2,ie&1,0x5e) /* FIFO SRAM data himid 8b */ +#define REG_SRAM_DATA_3(ie) CRA(0x2,ie&1,0x6e) /* FIFO SRAM data hi 8b */ +#define REG_SRAM_DATA_BLK_TYPE(ie) CRA(0x2,ie&1,0x7e) /* FIFO SRAM tag */ +/* REG_ING_CONTROL equals REG_CONTROL with ie = 0, likewise REG_EGR_CONTROL is ie = 1 */ +#define REG_CONTROL(ie) CRA(0x2,ie&1,0x0f) /* FIFO control */ +#define REG_ING_CONTROL CRA(0x2,0x0,0x0f) /* Ingress control (alias) */ +#define REG_EGR_CONTROL CRA(0x2,0x1,0x0f) /* Egress control (alias) */ +#define REG_AGE_TIMER(ie) CRA(0x2,ie&1,0x1f) /* Aging timer */ +#define REG_AGE_INC(ie) CRA(0x2,ie&1,0x2f) /* Aging increment */ +#define DEBUG_OUT(ie) CRA(0x2,ie&1,0x3f) /* Output debug counter control */ +#define DEBUG_CNT(ie) CRA(0x2,ie&1,0x4f) /* Output debug counter */ + +/* SPI4 interface */ +#define REG_SPI4_MISC CRA(0x5,0x0,0x00) /* Misc Register */ +#define REG_SPI4_STATUS CRA(0x5,0x0,0x01) /* CML Status */ +#define REG_SPI4_ING_SETUP0 CRA(0x5,0x0,0x02) /* Ingress Status Channel Setup */ +#define REG_SPI4_ING_SETUP1 CRA(0x5,0x0,0x03) /* Ingress Data Training Setup */ +#define REG_SPI4_ING_SETUP2 CRA(0x5,0x0,0x04) /* Ingress Data Burst Size Setup */ +#define REG_SPI4_EGR_SETUP0 CRA(0x5,0x0,0x05) /* Egress Status Channel Setup */ +#define REG_SPI4_DBG_CNT(n) CRA(0x5,0x0,0x10+n) /* Debug counters 0-9 */ +#define REG_SPI4_DBG_SETUP CRA(0x5,0x0,0x1A) /* Debug counters setup */ +#define REG_SPI4_TEST CRA(0x5,0x0,0x20) /* Test Setup Register */ +#define REG_TPGEN_UP0 CRA(0x5,0x0,0x21) /* Test Pattern generator user pattern 0 */ +#define REG_TPGEN_UP1 CRA(0x5,0x0,0x22) /* Test Pattern generator user pattern 1 */ +#define REG_TPCHK_UP0 CRA(0x5,0x0,0x23) /* Test Pattern checker user pattern 0 */ +#define REG_TPCHK_UP1 CRA(0x5,0x0,0x24) /* Test Pattern checker user pattern 1 */ +#define REG_TPSAM_P0 CRA(0x5,0x0,0x25) /* Sampled pattern 0 */ +#define REG_TPSAM_P1 CRA(0x5,0x0,0x26) /* Sampled pattern 1 */ +#define REG_TPERR_CNT CRA(0x5,0x0,0x27) /* Pattern checker error counter */ +#define REG_SPI4_STICKY CRA(0x5,0x0,0x30) /* Sticky bits register */ +#define REG_SPI4_DBG_INH CRA(0x5,0x0,0x31) /* Core egress & ingress inhibit */ +#define REG_SPI4_DBG_STATUS CRA(0x5,0x0,0x32) /* Sampled ingress status */ +#define REG_SPI4_DBG_GRANT CRA(0x5,0x0,0x33) /* Ingress cranted credit value */ + +#define REG_SPI4_DESKEW CRA(0x5,0x0,0x43) /* Ingress cranted credit value */ + +/* 10GbE MAC Block Registers */ +/* Note that those registers that are exactly the same for 10GbE as for + * tri-speed are only defined with the version that needs a port number. + * Pass 0xa in those cases. + * + * Also note that despite the presence of a MAC address register, this part + * does no ingress MAC address filtering. That register is used only for + * pause frame detection and generation. + */ +/* 10GbE specific, and different from tri-speed */ +#define REG_MISC_10G CRA(0x1,0xa,0x00) /* Misc 10GbE setup */ +#define REG_PAUSE_10G CRA(0x1,0xa,0x01) /* Pause register */ +#define REG_NORMALIZER_10G CRA(0x1,0xa,0x05) /* 10G normalizer */ +#define REG_STICKY_RX CRA(0x1,0xa,0x06) /* RX debug register */ +#define REG_DENORM_10G CRA(0x1,0xa,0x07) /* Denormalizer */ +#define REG_STICKY_TX CRA(0x1,0xa,0x08) /* TX sticky bits */ +#define REG_MAX_RXHIGH CRA(0x1,0xa,0x0a) /* XGMII lane 0-3 debug */ +#define REG_MAX_RXLOW CRA(0x1,0xa,0x0b) /* XGMII lane 4-7 debug */ +#define REG_MAC_TX_STICKY CRA(0x1,0xa,0x0c) /* MAC Tx state sticky debug */ +#define REG_MAC_TX_RUNNING CRA(0x1,0xa,0x0d) /* MAC Tx state running debug */ +#define REG_TX_ABORT_AGE CRA(0x1,0xa,0x14) /* Aged Tx frames discarded */ +#define REG_TX_ABORT_SHORT CRA(0x1,0xa,0x15) /* Short Tx frames discarded */ +#define REG_TX_ABORT_TAXI CRA(0x1,0xa,0x16) /* Taxi error frames discarded */ +#define REG_TX_ABORT_UNDERRUN CRA(0x1,0xa,0x17) /* Tx Underrun abort counter */ +#define REG_TX_DENORM_DISCARD CRA(0x1,0xa,0x18) /* Tx denormalizer discards */ +#define REG_XAUI_STAT_A CRA(0x1,0xa,0x20) /* XAUI status A */ +#define REG_XAUI_STAT_B CRA(0x1,0xa,0x21) /* XAUI status B */ +#define REG_XAUI_STAT_C CRA(0x1,0xa,0x22) /* XAUI status C */ +#define REG_XAUI_CONF_A CRA(0x1,0xa,0x23) /* XAUI configuration A */ +#define REG_XAUI_CONF_B CRA(0x1,0xa,0x24) /* XAUI configuration B */ +#define REG_XAUI_CODE_GRP_CNT CRA(0x1,0xa,0x25) /* XAUI code group error count */ +#define REG_XAUI_CONF_TEST_A CRA(0x1,0xa,0x26) /* XAUI test register A */ +#define REG_PDERRCNT CRA(0x1,0xa,0x27) /* XAUI test register B */ + +/* pn = port number 0-9 for tri-speed, 10 for 10GbE */ +/* Both tri-speed and 10GbE */ +#define REG_MAX_LEN(pn) CRA(0x1,pn,0x02) /* Max length */ +#define REG_MAC_HIGH_ADDR(pn) CRA(0x1,pn,0x03) /* Upper 24 bits of MAC addr */ +#define REG_MAC_LOW_ADDR(pn) CRA(0x1,pn,0x04) /* Lower 24 bits of MAC addr */ + +/* tri-speed only + * pn = port number, 0-9 + */ +#define REG_MODE_CFG(pn) CRA(0x1,pn,0x00) /* Mode configuration */ +#define REG_PAUSE_CFG(pn) CRA(0x1,pn,0x01) /* Pause configuration */ +#define REG_NORMALIZER(pn) CRA(0x1,pn,0x05) /* Normalizer */ +#define REG_TBI_STATUS(pn) CRA(0x1,pn,0x06) /* TBI status */ +#define REG_PCS_STATUS_DBG(pn) CRA(0x1,pn,0x07) /* PCS status debug */ +#define REG_PCS_CTRL(pn) CRA(0x1,pn,0x08) /* PCS control */ +#define REG_TBI_CONFIG(pn) CRA(0x1,pn,0x09) /* TBI configuration */ +#define REG_STICK_BIT(pn) CRA(0x1,pn,0x0a) /* Sticky bits */ +#define REG_DEV_SETUP(pn) CRA(0x1,pn,0x0b) /* MAC clock/reset setup */ +#define REG_DROP_CNT(pn) CRA(0x1,pn,0x0c) /* Drop counter */ +#define REG_PORT_POS(pn) CRA(0x1,pn,0x0d) /* Preamble port position */ +#define REG_PORT_FAIL(pn) CRA(0x1,pn,0x0e) /* Preamble port position */ +#define REG_SERDES_CONF(pn) CRA(0x1,pn,0x0f) /* SerDes configuration */ +#define REG_SERDES_TEST(pn) CRA(0x1,pn,0x10) /* SerDes test */ +#define REG_SERDES_STAT(pn) CRA(0x1,pn,0x11) /* SerDes status */ +#define REG_SERDES_COM_CNT(pn) CRA(0x1,pn,0x12) /* SerDes comma counter */ +#define REG_DENORM(pn) CRA(0x1,pn,0x15) /* Frame denormalization */ +#define REG_DBG(pn) CRA(0x1,pn,0x16) /* Device 1G debug */ +#define REG_TX_IFG(pn) CRA(0x1,pn,0x18) /* Tx IFG config */ +#define REG_HDX(pn) CRA(0x1,pn,0x19) /* Half-duplex config */ + +/* Statistics */ +/* CRA(0x4,pn,reg) */ +/* reg below */ +/* pn = port number, 0-a, a = 10GbE */ + +enum { + RxInBytes = 0x00, // # Rx in octets + RxSymbolCarrier = 0x01, // Frames w/ symbol errors + RxPause = 0x02, // # pause frames received + RxUnsupOpcode = 0x03, // # control frames with unsupported opcode + RxOkBytes = 0x04, // # octets in good frames + RxBadBytes = 0x05, // # octets in bad frames + RxUnicast = 0x06, // # good unicast frames + RxMulticast = 0x07, // # good multicast frames + RxBroadcast = 0x08, // # good broadcast frames + Crc = 0x09, // # frames w/ bad CRC only + RxAlignment = 0x0a, // # frames w/ alignment err + RxUndersize = 0x0b, // # frames undersize + RxFragments = 0x0c, // # frames undersize w/ crc err + RxInRangeLengthError = 0x0d, // # frames with length error + RxOutOfRangeError = 0x0e, // # frames with illegal length field + RxOversize = 0x0f, // # frames oversize + RxJabbers = 0x10, // # frames oversize w/ crc err + RxSize64 = 0x11, // # frames 64 octets long + RxSize65To127 = 0x12, // # frames 65-127 octets + RxSize128To255 = 0x13, // # frames 128-255 + RxSize256To511 = 0x14, // # frames 256-511 + RxSize512To1023 = 0x15, // # frames 512-1023 + RxSize1024To1518 = 0x16, // # frames 1024-1518 + RxSize1519ToMax = 0x17, // # frames 1519-max + + TxOutBytes = 0x18, // # octets tx + TxPause = 0x19, // # pause frames sent + TxOkBytes = 0x1a, // # octets tx OK + TxUnicast = 0x1b, // # frames unicast + TxMulticast = 0x1c, // # frames multicast + TxBroadcast = 0x1d, // # frames broadcast + TxMultipleColl = 0x1e, // # frames tx after multiple collisions + TxLateColl = 0x1f, // # late collisions detected + TxXcoll = 0x20, // # frames lost, excessive collisions + TxDefer = 0x21, // # frames deferred on first tx attempt + TxXdefer = 0x22, // # frames excessively deferred + TxCsense = 0x23, // carrier sense errors at frame end + TxSize64 = 0x24, // # frames 64 octets long + TxSize65To127 = 0x25, // # frames 65-127 octets + TxSize128To255 = 0x26, // # frames 128-255 + TxSize256To511 = 0x27, // # frames 256-511 + TxSize512To1023 = 0x28, // # frames 512-1023 + TxSize1024To1518 = 0x29, // # frames 1024-1518 + TxSize1519ToMax = 0x2a, // # frames 1519-max + TxSingleColl = 0x2b, // # frames tx after single collision + TxBackoff2 = 0x2c, // # frames tx ok after 2 backoffs/collisions + TxBackoff3 = 0x2d, // after 3 backoffs/collisions + TxBackoff4 = 0x2e, // after 4 + TxBackoff5 = 0x2f, // after 5 + TxBackoff6 = 0x30, // after 6 + TxBackoff7 = 0x31, // after 7 + TxBackoff8 = 0x32, // after 8 + TxBackoff9 = 0x33, // after 9 + TxBackoff10 = 0x34, // after 10 + TxBackoff11 = 0x35, // after 11 + TxBackoff12 = 0x36, // after 12 + TxBackoff13 = 0x37, // after 13 + TxBackoff14 = 0x38, // after 14 + TxBackoff15 = 0x39, // after 15 + TxUnderrun = 0x3a, // # frames dropped from underrun + // Hole. See REG_RX_XGMII_PROT_ERR below. + RxIpgShrink = 0x3c, // # of IPG shrinks detected + // Duplicate. See REG_STAT_STICKY10G below. + StatSticky1G = 0x3e, // tri-speed sticky bits + StatInit = 0x3f // Clear all statistics +}; + +#define REG_RX_XGMII_PROT_ERR CRA(0x4,0xa,0x3b) /* # protocol errors detected on XGMII interface */ +#define REG_STAT_STICKY10G CRA(0x4,0xa,StatSticky1G) /* 10GbE sticky bits */ + +#define REG_RX_OK_BYTES(pn) CRA(0x4,pn,RxOkBytes) +#define REG_RX_BAD_BYTES(pn) CRA(0x4,pn,RxBadBytes) +#define REG_TX_OK_BYTES(pn) CRA(0x4,pn,TxOkBytes) + +/* MII-Management Block registers */ +/* These are for MII-M interface 0, which is the bidirectional LVTTL one. If + * we hooked up to the one with separate directions, the middle 0x0 needs to + * change to 0x1. And the current errata states that MII-M 1 doesn't work. + */ + +#define REG_MIIM_STATUS CRA(0x3,0x0,0x00) /* MII-M Status */ +#define REG_MIIM_CMD CRA(0x3,0x0,0x01) /* MII-M Command */ +#define REG_MIIM_DATA CRA(0x3,0x0,0x02) /* MII-M Data */ +#define REG_MIIM_PRESCALE CRA(0x3,0x0,0x03) /* MII-M MDC Prescale */ + +#define REG_ING_FFILT_UM_EN CRA(0x2, 0, 0xd) +#define REG_ING_FFILT_BE_EN CRA(0x2, 0, 0x1d) +#define REG_ING_FFILT_VAL0 CRA(0x2, 0, 0x2d) +#define REG_ING_FFILT_VAL1 CRA(0x2, 0, 0x3d) +#define REG_ING_FFILT_MASK0 CRA(0x2, 0, 0x4d) +#define REG_ING_FFILT_MASK1 CRA(0x2, 0, 0x5d) +#define REG_ING_FFILT_MASK2 CRA(0x2, 0, 0x6d) +#define REG_ING_FFILT_ETYPE CRA(0x2, 0, 0x7d) + + +/* Whew. */ + +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb3/Makefile b/drivers/net/ethernet/chelsio/cxgb3/Makefile new file mode 100644 index 000000000..29aff78c7 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/Makefile @@ -0,0 +1,8 @@ +# +# Chelsio T3 driver +# + +obj-$(CONFIG_CHELSIO_T3) += cxgb3.o + +cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \ + xgmac.o sge.o l2t.o cxgb3_offload.o aq100x.o diff --git a/drivers/net/ethernet/chelsio/cxgb3/adapter.h b/drivers/net/ethernet/chelsio/cxgb3/adapter.h new file mode 100644 index 000000000..8b395b537 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/adapter.h @@ -0,0 +1,334 @@ +/* + * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* This file should not be included directly. Include common.h instead. */ + +#ifndef __T3_ADAPTER_H__ +#define __T3_ADAPTER_H__ + +#include +#include +#include +#include +#include +#include +#include +#include "t3cdev.h" +#include + +struct adapter; +struct sge_qset; +struct port_info; + +enum mac_idx_types { + LAN_MAC_IDX = 0, + SAN_MAC_IDX, + + MAX_MAC_IDX +}; + +struct iscsi_config { + __u8 mac_addr[ETH_ALEN]; + __u32 flags; + int (*send)(struct port_info *pi, struct sk_buff **skb); + int (*recv)(struct port_info *pi, struct sk_buff *skb); +}; + +struct port_info { + struct adapter *adapter; + struct sge_qset *qs; + u8 port_id; + u8 nqsets; + u8 first_qset; + struct cphy phy; + struct cmac mac; + struct link_config link_config; + struct net_device_stats netstats; + int activity; + __be32 iscsi_ipv4addr; + struct iscsi_config iscsic; + + int link_fault; /* link fault was detected */ +}; + +enum { /* adapter flags */ + FULL_INIT_DONE = (1 << 0), + USING_MSI = (1 << 1), + USING_MSIX = (1 << 2), + QUEUES_BOUND = (1 << 3), + TP_PARITY_INIT = (1 << 4), + NAPI_INIT = (1 << 5), +}; + +struct fl_pg_chunk { + struct page *page; + void *va; + unsigned int offset; + unsigned long *p_cnt; + dma_addr_t mapping; +}; + +struct rx_desc; +struct rx_sw_desc; + +struct sge_fl { /* SGE per free-buffer list state */ + unsigned int buf_size; /* size of each Rx buffer */ + unsigned int credits; /* # of available Rx buffers */ + unsigned int pend_cred; /* new buffers since last FL DB ring */ + unsigned int size; /* capacity of free list */ + unsigned int cidx; /* consumer index */ + unsigned int pidx; /* producer index */ + unsigned int gen; /* free list generation */ + struct fl_pg_chunk pg_chunk;/* page chunk cache */ + unsigned int use_pages; /* whether FL uses pages or sk_buffs */ + unsigned int order; /* order of page allocations */ + unsigned int alloc_size; /* size of allocated buffer */ + struct rx_desc *desc; /* address of HW Rx descriptor ring */ + struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ + dma_addr_t phys_addr; /* physical address of HW ring start */ + unsigned int cntxt_id; /* SGE context id for the free list */ + unsigned long empty; /* # of times queue ran out of buffers */ + unsigned long alloc_failed; /* # of times buffer allocation failed */ +}; + +/* + * Bundle size for grouping offload RX packets for delivery to the stack. + * Don't make this too big as we do prefetch on each packet in a bundle. + */ +# define RX_BUNDLE_SIZE 8 + +struct rsp_desc; + +struct sge_rspq { /* state for an SGE response queue */ + unsigned int credits; /* # of pending response credits */ + unsigned int size; /* capacity of response queue */ + unsigned int cidx; /* consumer index */ + unsigned int gen; /* current generation bit */ + unsigned int polling; /* is the queue serviced through NAPI? */ + unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */ + unsigned int next_holdoff; /* holdoff time for next interrupt */ + unsigned int rx_recycle_buf; /* whether recycling occurred + within current sop-eop */ + struct rsp_desc *desc; /* address of HW response ring */ + dma_addr_t phys_addr; /* physical address of the ring */ + unsigned int cntxt_id; /* SGE context id for the response q */ + spinlock_t lock; /* guards response processing */ + struct sk_buff_head rx_queue; /* offload packet receive queue */ + struct sk_buff *pg_skb; /* used to build frag list in napi handler */ + + unsigned long offload_pkts; + unsigned long offload_bundles; + unsigned long eth_pkts; /* # of ethernet packets */ + unsigned long pure_rsps; /* # of pure (non-data) responses */ + unsigned long imm_data; /* responses with immediate data */ + unsigned long rx_drops; /* # of packets dropped due to no mem */ + unsigned long async_notif; /* # of asynchronous notification events */ + unsigned long empty; /* # of times queue ran out of credits */ + unsigned long nomem; /* # of responses deferred due to no mem */ + unsigned long unhandled_irqs; /* # of spurious intrs */ + unsigned long starved; + unsigned long restarted; +}; + +struct tx_desc; +struct tx_sw_desc; + +struct sge_txq { /* state for an SGE Tx queue */ + unsigned long flags; /* HW DMA fetch status */ + unsigned int in_use; /* # of in-use Tx descriptors */ + unsigned int size; /* # of descriptors */ + unsigned int processed; /* total # of descs HW has processed */ + unsigned int cleaned; /* total # of descs SW has reclaimed */ + unsigned int stop_thres; /* SW TX queue suspend threshold */ + unsigned int cidx; /* consumer index */ + unsigned int pidx; /* producer index */ + unsigned int gen; /* current value of generation bit */ + unsigned int unacked; /* Tx descriptors used since last COMPL */ + struct tx_desc *desc; /* address of HW Tx descriptor ring */ + struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ + spinlock_t lock; /* guards enqueueing of new packets */ + unsigned int token; /* WR token */ + dma_addr_t phys_addr; /* physical address of the ring */ + struct sk_buff_head sendq; /* List of backpressured offload packets */ + struct tasklet_struct qresume_tsk; /* restarts the queue */ + unsigned int cntxt_id; /* SGE context id for the Tx q */ + unsigned long stops; /* # of times q has been stopped */ + unsigned long restarts; /* # of queue restarts */ +}; + +enum { /* per port SGE statistics */ + SGE_PSTAT_TSO, /* # of TSO requests */ + SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */ + SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */ + SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */ + SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */ + + SGE_PSTAT_MAX /* must be last */ +}; + +struct napi_gro_fraginfo; + +struct sge_qset { /* an SGE queue set */ + struct adapter *adap; + struct napi_struct napi; + struct sge_rspq rspq; + struct sge_fl fl[SGE_RXQ_PER_SET]; + struct sge_txq txq[SGE_TXQ_PER_SET]; + int nomem; + void *lro_va; + struct net_device *netdev; + struct netdev_queue *tx_q; /* associated netdev TX queue */ + unsigned long txq_stopped; /* which Tx queues are stopped */ + struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ + struct timer_list rx_reclaim_timer; /* reclaims RX buffers */ + unsigned long port_stats[SGE_PSTAT_MAX]; +} ____cacheline_aligned; + +struct sge { + struct sge_qset qs[SGE_QSETS]; + spinlock_t reg_lock; /* guards non-atomic SGE registers (eg context) */ +}; + +struct adapter { + struct t3cdev tdev; + struct list_head adapter_list; + void __iomem *regs; + struct pci_dev *pdev; + unsigned long registered_device_map; + unsigned long open_device_map; + unsigned long flags; + + const char *name; + int msg_enable; + unsigned int mmio_len; + + struct adapter_params params; + unsigned int slow_intr_mask; + unsigned long irq_stats[IRQ_NUM_STATS]; + + int msix_nvectors; + struct { + unsigned short vec; + char desc[22]; + } msix_info[SGE_QSETS + 1]; + + /* T3 modules */ + struct sge sge; + struct mc7 pmrx; + struct mc7 pmtx; + struct mc7 cm; + struct mc5 mc5; + + struct net_device *port[MAX_NPORTS]; + unsigned int check_task_cnt; + struct delayed_work adap_check_task; + struct work_struct ext_intr_handler_task; + struct work_struct fatal_error_handler_task; + struct work_struct link_fault_handler_task; + + struct work_struct db_full_task; + struct work_struct db_empty_task; + struct work_struct db_drop_task; + + struct dentry *debugfs_root; + + struct mutex mdio_lock; + spinlock_t stats_lock; + spinlock_t work_lock; + + struct sk_buff *nofail_skb; +}; + +static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr) +{ + u32 val = readl(adapter->regs + reg_addr); + + CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr, val); + return val; +} + +static inline void t3_write_reg(struct adapter *adapter, u32 reg_addr, u32 val) +{ + CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr, val); + writel(val, adapter->regs + reg_addr); +} + +static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) +{ + return netdev_priv(adap->port[idx]); +} + +static inline int phy2portid(struct cphy *phy) +{ + struct adapter *adap = phy->adapter; + struct port_info *port0 = adap2pinfo(adap, 0); + + return &port0->phy == phy ? 0 : 1; +} + +#define OFFLOAD_DEVMAP_BIT 15 + +#define tdev2adap(d) container_of(d, struct adapter, tdev) + +static inline int offload_running(struct adapter *adapter) +{ + return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); +} + +int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb); + +void t3_os_ext_intr_handler(struct adapter *adapter); +void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status, + int speed, int duplex, int fc); +void t3_os_phymod_changed(struct adapter *adap, int port_id); +void t3_os_link_fault(struct adapter *adapter, int port_id, int state); +void t3_os_link_fault_handler(struct adapter *adapter, int port_id); + +void t3_sge_start(struct adapter *adap); +void t3_sge_stop(struct adapter *adap); +void t3_start_sge_timers(struct adapter *adap); +void t3_stop_sge_timers(struct adapter *adap); +void t3_free_sge_resources(struct adapter *adap); +void t3_sge_err_intr_handler(struct adapter *adapter); +irq_handler_t t3_intr_handler(struct adapter *adap, int polling); +netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev); +int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb); +void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p); +int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, + int irq_vec_idx, const struct qset_params *p, + int ntxq, struct net_device *dev, + struct netdev_queue *netdevq); +extern struct workqueue_struct *cxgb3_wq; + +int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size); + +#endif /* __T3_ADAPTER_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/ael1002.c b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c new file mode 100644 index 000000000..2028da95a --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c @@ -0,0 +1,941 @@ +/* + * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "common.h" +#include "regs.h" + +enum { + AEL100X_TX_CONFIG1 = 0xc002, + AEL1002_PWR_DOWN_HI = 0xc011, + AEL1002_PWR_DOWN_LO = 0xc012, + AEL1002_XFI_EQL = 0xc015, + AEL1002_LB_EN = 0xc017, + AEL_OPT_SETTINGS = 0xc017, + AEL_I2C_CTRL = 0xc30a, + AEL_I2C_DATA = 0xc30b, + AEL_I2C_STAT = 0xc30c, + AEL2005_GPIO_CTRL = 0xc214, + AEL2005_GPIO_STAT = 0xc215, + + AEL2020_GPIO_INTR = 0xc103, /* Latch High (LH) */ + AEL2020_GPIO_CTRL = 0xc108, /* Store Clear (SC) */ + AEL2020_GPIO_STAT = 0xc10c, /* Read Only (RO) */ + AEL2020_GPIO_CFG = 0xc110, /* Read Write (RW) */ + + AEL2020_GPIO_SDA = 0, /* IN: i2c serial data */ + AEL2020_GPIO_MODDET = 1, /* IN: Module Detect */ + AEL2020_GPIO_0 = 3, /* IN: unassigned */ + AEL2020_GPIO_1 = 2, /* OUT: unassigned */ + AEL2020_GPIO_LSTAT = AEL2020_GPIO_1, /* wired to link status LED */ +}; + +enum { edc_none, edc_sr, edc_twinax }; + +/* PHY module I2C device address */ +enum { + MODULE_DEV_ADDR = 0xa0, + SFF_DEV_ADDR = 0xa2, +}; + +/* PHY transceiver type */ +enum { + phy_transtype_unknown = 0, + phy_transtype_sfp = 3, + phy_transtype_xfp = 6, +}; + +#define AEL2005_MODDET_IRQ 4 + +struct reg_val { + unsigned short mmd_addr; + unsigned short reg_addr; + unsigned short clear_bits; + unsigned short set_bits; +}; + +static int set_phy_regs(struct cphy *phy, const struct reg_val *rv) +{ + int err; + + for (err = 0; rv->mmd_addr && !err; rv++) { + if (rv->clear_bits == 0xffff) + err = t3_mdio_write(phy, rv->mmd_addr, rv->reg_addr, + rv->set_bits); + else + err = t3_mdio_change_bits(phy, rv->mmd_addr, + rv->reg_addr, rv->clear_bits, + rv->set_bits); + } + return err; +} + +static void ael100x_txon(struct cphy *phy) +{ + int tx_on_gpio = + phy->mdio.prtad == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL; + + msleep(100); + t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio); + msleep(30); +} + +/* + * Read an 8-bit word from a device attached to the PHY's i2c bus. + */ +static int ael_i2c_rd(struct cphy *phy, int dev_addr, int word_addr) +{ + int i, err; + unsigned int stat, data; + + err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL_I2C_CTRL, + (dev_addr << 8) | (1 << 8) | word_addr); + if (err) + return err; + + for (i = 0; i < 200; i++) { + msleep(1); + err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_STAT, &stat); + if (err) + return err; + if ((stat & 3) == 1) { + err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_DATA, + &data); + if (err) + return err; + return data >> 8; + } + } + CH_WARN(phy->adapter, "PHY %u i2c read of dev.addr %#x.%#x timed out\n", + phy->mdio.prtad, dev_addr, word_addr); + return -ETIMEDOUT; +} + +static int ael1002_power_down(struct cphy *phy, int enable) +{ + int err; + + err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_TXDIS, !!enable); + if (!err) + err = mdio_set_flag(&phy->mdio, phy->mdio.prtad, + MDIO_MMD_PMAPMD, MDIO_CTRL1, + MDIO_CTRL1_LPOWER, enable); + return err; +} + +static int ael1002_reset(struct cphy *phy, int wait) +{ + int err; + + if ((err = ael1002_power_down(phy, 0)) || + (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL100X_TX_CONFIG1, 1)) || + (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_PWR_DOWN_HI, 0)) || + (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_PWR_DOWN_LO, 0)) || + (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_XFI_EQL, 0x18)) || + (err = t3_mdio_change_bits(phy, MDIO_MMD_PMAPMD, AEL1002_LB_EN, + 0, 1 << 5))) + return err; + return 0; +} + +static int ael1002_intr_noop(struct cphy *phy) +{ + return 0; +} + +/* + * Get link status for a 10GBASE-R device. + */ +static int get_link_status_r(struct cphy *phy, int *link_ok, int *speed, + int *duplex, int *fc) +{ + if (link_ok) { + unsigned int stat0, stat1, stat2; + int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, + MDIO_PMA_RXDET, &stat0); + + if (!err) + err = t3_mdio_read(phy, MDIO_MMD_PCS, + MDIO_PCS_10GBRT_STAT1, &stat1); + if (!err) + err = t3_mdio_read(phy, MDIO_MMD_PHYXS, + MDIO_PHYXS_LNSTAT, &stat2); + if (err) + return err; + *link_ok = (stat0 & stat1 & (stat2 >> 12)) & 1; + } + if (speed) + *speed = SPEED_10000; + if (duplex) + *duplex = DUPLEX_FULL; + return 0; +} + +static struct cphy_ops ael1002_ops = { + .reset = ael1002_reset, + .intr_enable = ael1002_intr_noop, + .intr_disable = ael1002_intr_noop, + .intr_clear = ael1002_intr_noop, + .intr_handler = ael1002_intr_noop, + .get_link_status = get_link_status_r, + .power_down = ael1002_power_down, + .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, +}; + +int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops) +{ + cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops, + SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE, + "10GBASE-R"); + ael100x_txon(phy); + return 0; +} + +static int ael1006_reset(struct cphy *phy, int wait) +{ + return t3_phy_reset(phy, MDIO_MMD_PMAPMD, wait); +} + +static struct cphy_ops ael1006_ops = { + .reset = ael1006_reset, + .intr_enable = t3_phy_lasi_intr_enable, + .intr_disable = t3_phy_lasi_intr_disable, + .intr_clear = t3_phy_lasi_intr_clear, + .intr_handler = t3_phy_lasi_intr_handler, + .get_link_status = get_link_status_r, + .power_down = ael1002_power_down, + .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, +}; + +int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops) +{ + cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops, + SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE, + "10GBASE-SR"); + ael100x_txon(phy); + return 0; +} + +/* + * Decode our module type. + */ +static int ael2xxx_get_module_type(struct cphy *phy, int delay_ms) +{ + int v; + + if (delay_ms) + msleep(delay_ms); + + /* see SFF-8472 for below */ + v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 3); + if (v < 0) + return v; + + if (v == 0x10) + return phy_modtype_sr; + if (v == 0x20) + return phy_modtype_lr; + if (v == 0x40) + return phy_modtype_lrm; + + v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 6); + if (v < 0) + return v; + if (v != 4) + goto unknown; + + v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 10); + if (v < 0) + return v; + + if (v & 0x80) { + v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 0x12); + if (v < 0) + return v; + return v > 10 ? phy_modtype_twinax_long : phy_modtype_twinax; + } +unknown: + return phy_modtype_unknown; +} + +/* + * Code to support the Aeluros/NetLogic 2005 10Gb PHY. + */ +static int ael2005_setup_sr_edc(struct cphy *phy) +{ + static const struct reg_val regs[] = { + { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x181 }, + { MDIO_MMD_PMAPMD, 0xc010, 0xffff, 0x448a }, + { MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5200 }, + { 0, 0, 0, 0 } + }; + + int i, err; + + err = set_phy_regs(phy, regs); + if (err) + return err; + + msleep(50); + + if (phy->priv != edc_sr) + err = t3_get_edc_fw(phy, EDC_OPT_AEL2005, + EDC_OPT_AEL2005_SIZE); + if (err) + return err; + + for (i = 0; i < EDC_OPT_AEL2005_SIZE / sizeof(u16) && !err; i += 2) + err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, + phy->phy_cache[i], + phy->phy_cache[i + 1]); + if (!err) + phy->priv = edc_sr; + return err; +} + +static int ael2005_setup_twinax_edc(struct cphy *phy, int modtype) +{ + static const struct reg_val regs[] = { + { MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5a00 }, + { 0, 0, 0, 0 } + }; + static const struct reg_val preemphasis[] = { + { MDIO_MMD_PMAPMD, 0xc014, 0xffff, 0xfe16 }, + { MDIO_MMD_PMAPMD, 0xc015, 0xffff, 0xa000 }, + { 0, 0, 0, 0 } + }; + int i, err; + + err = set_phy_regs(phy, regs); + if (!err && modtype == phy_modtype_twinax_long) + err = set_phy_regs(phy, preemphasis); + if (err) + return err; + + msleep(50); + + if (phy->priv != edc_twinax) + err = t3_get_edc_fw(phy, EDC_TWX_AEL2005, + EDC_TWX_AEL2005_SIZE); + if (err) + return err; + + for (i = 0; i < EDC_TWX_AEL2005_SIZE / sizeof(u16) && !err; i += 2) + err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, + phy->phy_cache[i], + phy->phy_cache[i + 1]); + if (!err) + phy->priv = edc_twinax; + return err; +} + +static int ael2005_get_module_type(struct cphy *phy, int delay_ms) +{ + int v; + unsigned int stat; + + v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, &stat); + if (v) + return v; + + if (stat & (1 << 8)) /* module absent */ + return phy_modtype_none; + + return ael2xxx_get_module_type(phy, delay_ms); +} + +static int ael2005_intr_enable(struct cphy *phy) +{ + int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0x200); + return err ? err : t3_phy_lasi_intr_enable(phy); +} + +static int ael2005_intr_disable(struct cphy *phy) +{ + int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0x100); + return err ? err : t3_phy_lasi_intr_disable(phy); +} + +static int ael2005_intr_clear(struct cphy *phy) +{ + int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0xd00); + return err ? err : t3_phy_lasi_intr_clear(phy); +} + +static int ael2005_reset(struct cphy *phy, int wait) +{ + static const struct reg_val regs0[] = { + { MDIO_MMD_PMAPMD, 0xc001, 0, 1 << 5 }, + { MDIO_MMD_PMAPMD, 0xc017, 0, 1 << 5 }, + { MDIO_MMD_PMAPMD, 0xc013, 0xffff, 0xf341 }, + { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8000 }, + { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8100 }, + { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8000 }, + { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0 }, + { 0, 0, 0, 0 } + }; + static const struct reg_val regs1[] = { + { MDIO_MMD_PMAPMD, 0xca00, 0xffff, 0x0080 }, + { MDIO_MMD_PMAPMD, 0xca12, 0xffff, 0 }, + { 0, 0, 0, 0 } + }; + + int err; + unsigned int lasi_ctrl; + + err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, + &lasi_ctrl); + if (err) + return err; + + err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 0); + if (err) + return err; + + msleep(125); + phy->priv = edc_none; + err = set_phy_regs(phy, regs0); + if (err) + return err; + + msleep(50); + + err = ael2005_get_module_type(phy, 0); + if (err < 0) + return err; + phy->modtype = err; + + if (err == phy_modtype_twinax || err == phy_modtype_twinax_long) + err = ael2005_setup_twinax_edc(phy, err); + else + err = ael2005_setup_sr_edc(phy); + if (err) + return err; + + err = set_phy_regs(phy, regs1); + if (err) + return err; + + /* reset wipes out interrupts, reenable them if they were on */ + if (lasi_ctrl & 1) + err = ael2005_intr_enable(phy); + return err; +} + +static int ael2005_intr_handler(struct cphy *phy) +{ + unsigned int stat; + int ret, edc_needed, cause = 0; + + ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_STAT, &stat); + if (ret) + return ret; + + if (stat & AEL2005_MODDET_IRQ) { + ret = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, + 0xd00); + if (ret) + return ret; + + /* modules have max 300 ms init time after hot plug */ + ret = ael2005_get_module_type(phy, 300); + if (ret < 0) + return ret; + + phy->modtype = ret; + if (ret == phy_modtype_none) + edc_needed = phy->priv; /* on unplug retain EDC */ + else if (ret == phy_modtype_twinax || + ret == phy_modtype_twinax_long) + edc_needed = edc_twinax; + else + edc_needed = edc_sr; + + if (edc_needed != phy->priv) { + ret = ael2005_reset(phy, 0); + return ret ? ret : cphy_cause_module_change; + } + cause = cphy_cause_module_change; + } + + ret = t3_phy_lasi_intr_handler(phy); + if (ret < 0) + return ret; + + ret |= cause; + return ret ? ret : cphy_cause_link_change; +} + +static struct cphy_ops ael2005_ops = { + .reset = ael2005_reset, + .intr_enable = ael2005_intr_enable, + .intr_disable = ael2005_intr_disable, + .intr_clear = ael2005_intr_clear, + .intr_handler = ael2005_intr_handler, + .get_link_status = get_link_status_r, + .power_down = ael1002_power_down, + .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, +}; + +int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops) +{ + cphy_init(phy, adapter, phy_addr, &ael2005_ops, mdio_ops, + SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE | + SUPPORTED_IRQ, "10GBASE-R"); + msleep(125); + return t3_mdio_change_bits(phy, MDIO_MMD_PMAPMD, AEL_OPT_SETTINGS, 0, + 1 << 5); +} + +/* + * Setup EDC and other parameters for operation with an optical module. + */ +static int ael2020_setup_sr_edc(struct cphy *phy) +{ + static const struct reg_val regs[] = { + /* set CDR offset to 10 */ + { MDIO_MMD_PMAPMD, 0xcc01, 0xffff, 0x488a }, + + /* adjust 10G RX bias current */ + { MDIO_MMD_PMAPMD, 0xcb1b, 0xffff, 0x0200 }, + { MDIO_MMD_PMAPMD, 0xcb1c, 0xffff, 0x00f0 }, + { MDIO_MMD_PMAPMD, 0xcc06, 0xffff, 0x00e0 }, + + /* end */ + { 0, 0, 0, 0 } + }; + int err; + + err = set_phy_regs(phy, regs); + msleep(50); + if (err) + return err; + + phy->priv = edc_sr; + return 0; +} + +/* + * Setup EDC and other parameters for operation with an TWINAX module. + */ +static int ael2020_setup_twinax_edc(struct cphy *phy, int modtype) +{ + /* set uC to 40MHz */ + static const struct reg_val uCclock40MHz[] = { + { MDIO_MMD_PMAPMD, 0xff28, 0xffff, 0x4001 }, + { MDIO_MMD_PMAPMD, 0xff2a, 0xffff, 0x0002 }, + { 0, 0, 0, 0 } + }; + + /* activate uC clock */ + static const struct reg_val uCclockActivate[] = { + { MDIO_MMD_PMAPMD, 0xd000, 0xffff, 0x5200 }, + { 0, 0, 0, 0 } + }; + + /* set PC to start of SRAM and activate uC */ + static const struct reg_val uCactivate[] = { + { MDIO_MMD_PMAPMD, 0xd080, 0xffff, 0x0100 }, + { MDIO_MMD_PMAPMD, 0xd092, 0xffff, 0x0000 }, + { 0, 0, 0, 0 } + }; + int i, err; + + /* set uC clock and activate it */ + err = set_phy_regs(phy, uCclock40MHz); + msleep(500); + if (err) + return err; + err = set_phy_regs(phy, uCclockActivate); + msleep(500); + if (err) + return err; + + if (phy->priv != edc_twinax) + err = t3_get_edc_fw(phy, EDC_TWX_AEL2020, + EDC_TWX_AEL2020_SIZE); + if (err) + return err; + + for (i = 0; i < EDC_TWX_AEL2020_SIZE / sizeof(u16) && !err; i += 2) + err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, + phy->phy_cache[i], + phy->phy_cache[i + 1]); + /* activate uC */ + err = set_phy_regs(phy, uCactivate); + if (!err) + phy->priv = edc_twinax; + return err; +} + +/* + * Return Module Type. + */ +static int ael2020_get_module_type(struct cphy *phy, int delay_ms) +{ + int v; + unsigned int stat; + + v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_STAT, &stat); + if (v) + return v; + + if (stat & (0x1 << (AEL2020_GPIO_MODDET*4))) { + /* module absent */ + return phy_modtype_none; + } + + return ael2xxx_get_module_type(phy, delay_ms); +} + +/* + * Enable PHY interrupts. We enable "Module Detection" interrupts (on any + * state transition) and then generic Link Alarm Status Interrupt (LASI). + */ +static int ael2020_intr_enable(struct cphy *phy) +{ + static const struct reg_val regs[] = { + /* output Module's Loss Of Signal (LOS) to LED */ + { MDIO_MMD_PMAPMD, AEL2020_GPIO_CFG+AEL2020_GPIO_LSTAT, + 0xffff, 0x4 }, + { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL, + 0xffff, 0x8 << (AEL2020_GPIO_LSTAT*4) }, + + /* enable module detect status change interrupts */ + { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL, + 0xffff, 0x2 << (AEL2020_GPIO_MODDET*4) }, + + /* end */ + { 0, 0, 0, 0 } + }; + int err, link_ok = 0; + + /* set up "link status" LED and enable module change interrupts */ + err = set_phy_regs(phy, regs); + if (err) + return err; + + err = get_link_status_r(phy, &link_ok, NULL, NULL, NULL); + if (err) + return err; + if (link_ok) + t3_link_changed(phy->adapter, + phy2portid(phy)); + + err = t3_phy_lasi_intr_enable(phy); + if (err) + return err; + + return 0; +} + +/* + * Disable PHY interrupts. The mirror of the above ... + */ +static int ael2020_intr_disable(struct cphy *phy) +{ + static const struct reg_val regs[] = { + /* reset "link status" LED to "off" */ + { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL, + 0xffff, 0xb << (AEL2020_GPIO_LSTAT*4) }, + + /* disable module detect status change interrupts */ + { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL, + 0xffff, 0x1 << (AEL2020_GPIO_MODDET*4) }, + + /* end */ + { 0, 0, 0, 0 } + }; + int err; + + /* turn off "link status" LED and disable module change interrupts */ + err = set_phy_regs(phy, regs); + if (err) + return err; + + return t3_phy_lasi_intr_disable(phy); +} + +/* + * Clear PHY interrupt state. + */ +static int ael2020_intr_clear(struct cphy *phy) +{ + /* + * The GPIO Interrupt register on the AEL2020 is a "Latching High" + * (LH) register which is cleared to the current state when it's read. + * Thus, we simply read the register and discard the result. + */ + unsigned int stat; + int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat); + return err ? err : t3_phy_lasi_intr_clear(phy); +} + +static const struct reg_val ael2020_reset_regs[] = { + /* Erratum #2: CDRLOL asserted, causing PMA link down status */ + { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x3101 }, + + /* force XAUI to send LF when RX_LOS is asserted */ + { MDIO_MMD_PMAPMD, 0xcd40, 0xffff, 0x0001 }, + + /* allow writes to transceiver module EEPROM on i2c bus */ + { MDIO_MMD_PMAPMD, 0xff02, 0xffff, 0x0023 }, + { MDIO_MMD_PMAPMD, 0xff03, 0xffff, 0x0000 }, + { MDIO_MMD_PMAPMD, 0xff04, 0xffff, 0x0000 }, + + /* end */ + { 0, 0, 0, 0 } +}; +/* + * Reset the PHY and put it into a canonical operating state. + */ +static int ael2020_reset(struct cphy *phy, int wait) +{ + int err; + unsigned int lasi_ctrl; + + /* grab current interrupt state */ + err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, + &lasi_ctrl); + if (err) + return err; + + err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 125); + if (err) + return err; + msleep(100); + + /* basic initialization for all module types */ + phy->priv = edc_none; + err = set_phy_regs(phy, ael2020_reset_regs); + if (err) + return err; + + /* determine module type and perform appropriate initialization */ + err = ael2020_get_module_type(phy, 0); + if (err < 0) + return err; + phy->modtype = (u8)err; + if (err == phy_modtype_twinax || err == phy_modtype_twinax_long) + err = ael2020_setup_twinax_edc(phy, err); + else + err = ael2020_setup_sr_edc(phy); + if (err) + return err; + + /* reset wipes out interrupts, reenable them if they were on */ + if (lasi_ctrl & 1) + err = ael2005_intr_enable(phy); + return err; +} + +/* + * Handle a PHY interrupt. + */ +static int ael2020_intr_handler(struct cphy *phy) +{ + unsigned int stat; + int ret, edc_needed, cause = 0; + + ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat); + if (ret) + return ret; + + if (stat & (0x1 << AEL2020_GPIO_MODDET)) { + /* modules have max 300 ms init time after hot plug */ + ret = ael2020_get_module_type(phy, 300); + if (ret < 0) + return ret; + + phy->modtype = (u8)ret; + if (ret == phy_modtype_none) + edc_needed = phy->priv; /* on unplug retain EDC */ + else if (ret == phy_modtype_twinax || + ret == phy_modtype_twinax_long) + edc_needed = edc_twinax; + else + edc_needed = edc_sr; + + if (edc_needed != phy->priv) { + ret = ael2020_reset(phy, 0); + return ret ? ret : cphy_cause_module_change; + } + cause = cphy_cause_module_change; + } + + ret = t3_phy_lasi_intr_handler(phy); + if (ret < 0) + return ret; + + ret |= cause; + return ret ? ret : cphy_cause_link_change; +} + +static struct cphy_ops ael2020_ops = { + .reset = ael2020_reset, + .intr_enable = ael2020_intr_enable, + .intr_disable = ael2020_intr_disable, + .intr_clear = ael2020_intr_clear, + .intr_handler = ael2020_intr_handler, + .get_link_status = get_link_status_r, + .power_down = ael1002_power_down, + .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, +}; + +int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr, + const struct mdio_ops *mdio_ops) +{ + int err; + + cphy_init(phy, adapter, phy_addr, &ael2020_ops, mdio_ops, + SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE | + SUPPORTED_IRQ, "10GBASE-R"); + msleep(125); + + err = set_phy_regs(phy, ael2020_reset_regs); + if (err) + return err; + return 0; +} + +/* + * Get link status for a 10GBASE-X device. + */ +static int get_link_status_x(struct cphy *phy, int *link_ok, int *speed, + int *duplex, int *fc) +{ + if (link_ok) { + unsigned int stat0, stat1, stat2; + int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, + MDIO_PMA_RXDET, &stat0); + + if (!err) + err = t3_mdio_read(phy, MDIO_MMD_PCS, + MDIO_PCS_10GBX_STAT1, &stat1); + if (!err) + err = t3_mdio_read(phy, MDIO_MMD_PHYXS, + MDIO_PHYXS_LNSTAT, &stat2); + if (err) + return err; + *link_ok = (stat0 & (stat1 >> 12) & (stat2 >> 12)) & 1; + } + if (speed) + *speed = SPEED_10000; + if (duplex) + *duplex = DUPLEX_FULL; + return 0; +} + +static struct cphy_ops qt2045_ops = { + .reset = ael1006_reset, + .intr_enable = t3_phy_lasi_intr_enable, + .intr_disable = t3_phy_lasi_intr_disable, + .intr_clear = t3_phy_lasi_intr_clear, + .intr_handler = t3_phy_lasi_intr_handler, + .get_link_status = get_link_status_x, + .power_down = ael1002_power_down, + .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, +}; + +int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops) +{ + unsigned int stat; + + cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops, + SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP, + "10GBASE-CX4"); + + /* + * Some cards where the PHY is supposed to be at address 0 actually + * have it at 1. + */ + if (!phy_addr && + !t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &stat) && + stat == 0xffff) + phy->mdio.prtad = 1; + return 0; +} + +static int xaui_direct_reset(struct cphy *phy, int wait) +{ + return 0; +} + +static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok, + int *speed, int *duplex, int *fc) +{ + if (link_ok) { + unsigned int status; + int prtad = phy->mdio.prtad; + + status = t3_read_reg(phy->adapter, + XGM_REG(A_XGM_SERDES_STAT0, prtad)) | + t3_read_reg(phy->adapter, + XGM_REG(A_XGM_SERDES_STAT1, prtad)) | + t3_read_reg(phy->adapter, + XGM_REG(A_XGM_SERDES_STAT2, prtad)) | + t3_read_reg(phy->adapter, + XGM_REG(A_XGM_SERDES_STAT3, prtad)); + *link_ok = !(status & F_LOWSIG0); + } + if (speed) + *speed = SPEED_10000; + if (duplex) + *duplex = DUPLEX_FULL; + return 0; +} + +static int xaui_direct_power_down(struct cphy *phy, int enable) +{ + return 0; +} + +static struct cphy_ops xaui_direct_ops = { + .reset = xaui_direct_reset, + .intr_enable = ael1002_intr_noop, + .intr_disable = ael1002_intr_noop, + .intr_clear = ael1002_intr_noop, + .intr_handler = ael1002_intr_noop, + .get_link_status = xaui_direct_get_link_status, + .power_down = xaui_direct_power_down, +}; + +int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops) +{ + cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops, + SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP, + "10GBASE-CX4"); + return 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb3/aq100x.c b/drivers/net/ethernet/chelsio/cxgb3/aq100x.c new file mode 100644 index 000000000..341b7ef15 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/aq100x.c @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "common.h" +#include "regs.h" + +enum { + /* MDIO_DEV_PMA_PMD registers */ + AQ_LINK_STAT = 0xe800, + AQ_IMASK_PMA = 0xf000, + + /* MDIO_DEV_XGXS registers */ + AQ_XAUI_RX_CFG = 0xc400, + AQ_XAUI_TX_CFG = 0xe400, + + /* MDIO_DEV_ANEG registers */ + AQ_1G_CTRL = 0xc400, + AQ_ANEG_STAT = 0xc800, + + /* MDIO_DEV_VEND1 registers */ + AQ_FW_VERSION = 0x0020, + AQ_IFLAG_GLOBAL = 0xfc00, + AQ_IMASK_GLOBAL = 0xff00, +}; + +enum { + IMASK_PMA = 1 << 2, + IMASK_GLOBAL = 1 << 15, + ADV_1G_FULL = 1 << 15, + ADV_1G_HALF = 1 << 14, + ADV_10G_FULL = 1 << 12, + AQ_RESET = (1 << 14) | (1 << 15), + AQ_LOWPOWER = 1 << 12, +}; + +static int aq100x_reset(struct cphy *phy, int wait) +{ + /* + * Ignore the caller specified wait time; always wait for the reset to + * complete. Can take up to 3s. + */ + int err = t3_phy_reset(phy, MDIO_MMD_VEND1, 3000); + + if (err) + CH_WARN(phy->adapter, "PHY%d: reset failed (0x%x).\n", + phy->mdio.prtad, err); + + return err; +} + +static int aq100x_intr_enable(struct cphy *phy) +{ + int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AQ_IMASK_PMA, IMASK_PMA); + if (err) + return err; + + err = t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, IMASK_GLOBAL); + return err; +} + +static int aq100x_intr_disable(struct cphy *phy) +{ + return t3_mdio_write(phy, MDIO_MMD_VEND1, AQ_IMASK_GLOBAL, 0); +} + +static int aq100x_intr_clear(struct cphy *phy) +{ + unsigned int v; + + t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &v); + t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v); + + return 0; +} + +static int aq100x_intr_handler(struct cphy *phy) +{ + int err; + unsigned int cause, v; + + err = t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_IFLAG_GLOBAL, &cause); + if (err) + return err; + + /* Read (and reset) the latching version of the status */ + t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &v); + + return cphy_cause_link_change; +} + +static int aq100x_power_down(struct cphy *phy, int off) +{ + return mdio_set_flag(&phy->mdio, phy->mdio.prtad, + MDIO_MMD_PMAPMD, MDIO_CTRL1, + MDIO_CTRL1_LPOWER, off); +} + +static int aq100x_autoneg_enable(struct cphy *phy) +{ + int err; + + err = aq100x_power_down(phy, 0); + if (!err) + err = mdio_set_flag(&phy->mdio, phy->mdio.prtad, + MDIO_MMD_AN, MDIO_CTRL1, + BMCR_ANENABLE | BMCR_ANRESTART, 1); + + return err; +} + +static int aq100x_autoneg_restart(struct cphy *phy) +{ + int err; + + err = aq100x_power_down(phy, 0); + if (!err) + err = mdio_set_flag(&phy->mdio, phy->mdio.prtad, + MDIO_MMD_AN, MDIO_CTRL1, + BMCR_ANENABLE | BMCR_ANRESTART, 1); + + return err; +} + +static int aq100x_advertise(struct cphy *phy, unsigned int advertise_map) +{ + unsigned int adv; + int err; + + /* 10G advertisement */ + adv = 0; + if (advertise_map & ADVERTISED_10000baseT_Full) + adv |= ADV_10G_FULL; + err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, + ADV_10G_FULL, adv); + if (err) + return err; + + /* 1G advertisement */ + adv = 0; + if (advertise_map & ADVERTISED_1000baseT_Full) + adv |= ADV_1G_FULL; + if (advertise_map & ADVERTISED_1000baseT_Half) + adv |= ADV_1G_HALF; + err = t3_mdio_change_bits(phy, MDIO_MMD_AN, AQ_1G_CTRL, + ADV_1G_FULL | ADV_1G_HALF, adv); + if (err) + return err; + + /* 100M, pause advertisement */ + adv = 0; + if (advertise_map & ADVERTISED_100baseT_Half) + adv |= ADVERTISE_100HALF; + if (advertise_map & ADVERTISED_100baseT_Full) + adv |= ADVERTISE_100FULL; + if (advertise_map & ADVERTISED_Pause) + adv |= ADVERTISE_PAUSE_CAP; + if (advertise_map & ADVERTISED_Asym_Pause) + adv |= ADVERTISE_PAUSE_ASYM; + err = t3_mdio_change_bits(phy, MDIO_MMD_AN, MDIO_AN_ADVERTISE, + 0xfe0, adv); + + return err; +} + +static int aq100x_set_loopback(struct cphy *phy, int mmd, int dir, int enable) +{ + return mdio_set_flag(&phy->mdio, phy->mdio.prtad, + MDIO_MMD_PMAPMD, MDIO_CTRL1, + BMCR_LOOPBACK, enable); +} + +static int aq100x_set_speed_duplex(struct cphy *phy, int speed, int duplex) +{ + /* no can do */ + return -1; +} + +static int aq100x_get_link_status(struct cphy *phy, int *link_ok, + int *speed, int *duplex, int *fc) +{ + int err; + unsigned int v; + + if (link_ok) { + err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AQ_LINK_STAT, &v); + if (err) + return err; + + *link_ok = v & 1; + if (!*link_ok) + return 0; + } + + err = t3_mdio_read(phy, MDIO_MMD_AN, AQ_ANEG_STAT, &v); + if (err) + return err; + + if (speed) { + switch (v & 0x6) { + case 0x6: + *speed = SPEED_10000; + break; + case 0x4: + *speed = SPEED_1000; + break; + case 0x2: + *speed = SPEED_100; + break; + case 0x0: + *speed = SPEED_10; + break; + } + } + + if (duplex) + *duplex = v & 1 ? DUPLEX_FULL : DUPLEX_HALF; + + return 0; +} + +static struct cphy_ops aq100x_ops = { + .reset = aq100x_reset, + .intr_enable = aq100x_intr_enable, + .intr_disable = aq100x_intr_disable, + .intr_clear = aq100x_intr_clear, + .intr_handler = aq100x_intr_handler, + .autoneg_enable = aq100x_autoneg_enable, + .autoneg_restart = aq100x_autoneg_restart, + .advertise = aq100x_advertise, + .set_loopback = aq100x_set_loopback, + .set_speed_duplex = aq100x_set_speed_duplex, + .get_link_status = aq100x_get_link_status, + .power_down = aq100x_power_down, + .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, +}; + +int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr, + const struct mdio_ops *mdio_ops) +{ + unsigned int v, v2, gpio, wait; + int err; + + cphy_init(phy, adapter, phy_addr, &aq100x_ops, mdio_ops, + SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full | + SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_AUI, + "1000/10GBASE-T"); + + /* + * The PHY has been out of reset ever since the system powered up. So + * we do a hard reset over here. + */ + gpio = phy_addr ? F_GPIO10_OUT_VAL : F_GPIO6_OUT_VAL; + t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, 0); + msleep(1); + t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, gpio, gpio); + + /* + * Give it enough time to load the firmware and get ready for mdio. + */ + msleep(1000); + wait = 500; /* in 10ms increments */ + do { + err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v); + if (err || v == 0xffff) { + + /* Allow prep_adapter to succeed when ffff is read */ + + CH_WARN(adapter, "PHY%d: reset failed (0x%x, 0x%x).\n", + phy_addr, err, v); + goto done; + } + + v &= AQ_RESET; + if (v) + msleep(10); + } while (v && --wait); + if (v) { + CH_WARN(adapter, "PHY%d: reset timed out (0x%x).\n", + phy_addr, v); + + goto done; /* let prep_adapter succeed */ + } + + /* Datasheet says 3s max but this has been observed */ + wait = (500 - wait) * 10 + 1000; + if (wait > 3000) + CH_WARN(adapter, "PHY%d: reset took %ums\n", phy_addr, wait); + + /* Firmware version check. */ + t3_mdio_read(phy, MDIO_MMD_VEND1, AQ_FW_VERSION, &v); + if (v != 101) + CH_WARN(adapter, "PHY%d: unsupported firmware %d\n", + phy_addr, v); + + /* + * The PHY should start in really-low-power mode. Prepare it for normal + * operations. + */ + err = t3_mdio_read(phy, MDIO_MMD_VEND1, MDIO_CTRL1, &v); + if (err) + return err; + if (v & AQ_LOWPOWER) { + err = t3_mdio_change_bits(phy, MDIO_MMD_VEND1, MDIO_CTRL1, + AQ_LOWPOWER, 0); + if (err) + return err; + msleep(10); + } else + CH_WARN(adapter, "PHY%d does not start in low power mode.\n", + phy_addr); + + /* + * Verify XAUI settings, but let prep succeed no matter what. + */ + v = v2 = 0; + t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_RX_CFG, &v); + t3_mdio_read(phy, MDIO_MMD_PHYXS, AQ_XAUI_TX_CFG, &v2); + if (v != 0x1b || v2 != 0x1b) + CH_WARN(adapter, + "PHY%d: incorrect XAUI settings (0x%x, 0x%x).\n", + phy_addr, v, v2); + +done: + return err; +} diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h new file mode 100644 index 000000000..442480982 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/common.h @@ -0,0 +1,773 @@ +/* + * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __CHELSIO_COMMON_H +#define __CHELSIO_COMMON_H + +#include +#include +#include +#include +#include +#include +#include +#include "version.h" + +#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ##__VA_ARGS__) +#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ##__VA_ARGS__) +#define CH_ALERT(adap, fmt, ...) dev_alert(&adap->pdev->dev, fmt, ##__VA_ARGS__) + +/* + * More powerful macro that selectively prints messages based on msg_enable. + * For info and debugging messages. + */ +#define CH_MSG(adapter, level, category, fmt, ...) do { \ + if ((adapter)->msg_enable & NETIF_MSG_##category) \ + dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \ + ## __VA_ARGS__); \ +} while (0) + +#ifdef DEBUG +# define CH_DBG(adapter, category, fmt, ...) \ + CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__) +#else +# define CH_DBG(adapter, category, fmt, ...) +#endif + +/* Additional NETIF_MSG_* categories */ +#define NETIF_MSG_MMIO 0x8000000 + +enum { + MAX_NPORTS = 2, /* max # of ports */ + MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */ + EEPROMSIZE = 8192, /* Serial EEPROM size */ + SERNUM_LEN = 16, /* Serial # length */ + RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */ + TCB_SIZE = 128, /* TCB size */ + NMTUS = 16, /* size of MTU table */ + NCCTRL_WIN = 32, /* # of congestion control windows */ + PROTO_SRAM_LINES = 128, /* size of TP sram */ +}; + +#define MAX_RX_COALESCING_LEN 12288U + +enum { + PAUSE_RX = 1 << 0, + PAUSE_TX = 1 << 1, + PAUSE_AUTONEG = 1 << 2 +}; + +enum { + SUPPORTED_IRQ = 1 << 24 +}; + +enum { /* adapter interrupt-maintained statistics */ + STAT_ULP_CH0_PBL_OOB, + STAT_ULP_CH1_PBL_OOB, + STAT_PCI_CORR_ECC, + + IRQ_NUM_STATS /* keep last */ +}; + +#define TP_VERSION_MAJOR 1 +#define TP_VERSION_MINOR 1 +#define TP_VERSION_MICRO 0 + +#define S_TP_VERSION_MAJOR 16 +#define M_TP_VERSION_MAJOR 0xFF +#define V_TP_VERSION_MAJOR(x) ((x) << S_TP_VERSION_MAJOR) +#define G_TP_VERSION_MAJOR(x) \ + (((x) >> S_TP_VERSION_MAJOR) & M_TP_VERSION_MAJOR) + +#define S_TP_VERSION_MINOR 8 +#define M_TP_VERSION_MINOR 0xFF +#define V_TP_VERSION_MINOR(x) ((x) << S_TP_VERSION_MINOR) +#define G_TP_VERSION_MINOR(x) \ + (((x) >> S_TP_VERSION_MINOR) & M_TP_VERSION_MINOR) + +#define S_TP_VERSION_MICRO 0 +#define M_TP_VERSION_MICRO 0xFF +#define V_TP_VERSION_MICRO(x) ((x) << S_TP_VERSION_MICRO) +#define G_TP_VERSION_MICRO(x) \ + (((x) >> S_TP_VERSION_MICRO) & M_TP_VERSION_MICRO) + +enum { + SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */ + SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */ + SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */ +}; + +enum sge_context_type { /* SGE egress context types */ + SGE_CNTXT_RDMA = 0, + SGE_CNTXT_ETH = 2, + SGE_CNTXT_OFLD = 4, + SGE_CNTXT_CTRL = 5 +}; + +enum { + AN_PKT_SIZE = 32, /* async notification packet size */ + IMMED_PKT_SIZE = 48 /* packet size for immediate data */ +}; + +struct sg_ent { /* SGE scatter/gather entry */ + __be32 len[2]; + __be64 addr[2]; +}; + +#ifndef SGE_NUM_GENBITS +/* Must be 1 or 2 */ +# define SGE_NUM_GENBITS 2 +#endif + +#define TX_DESC_FLITS 16U +#define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS) + +struct cphy; +struct adapter; + +struct mdio_ops { + int (*read)(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr); + int (*write)(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr, u16 val); + unsigned mode_support; +}; + +struct adapter_info { + unsigned char nports0; /* # of ports on channel 0 */ + unsigned char nports1; /* # of ports on channel 1 */ + unsigned char phy_base_addr; /* MDIO PHY base address */ + unsigned int gpio_out; /* GPIO output settings */ + unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */ + unsigned long caps; /* adapter capabilities */ + const struct mdio_ops *mdio_ops; /* MDIO operations */ + const char *desc; /* product description */ +}; + +struct mc5_stats { + unsigned long parity_err; + unsigned long active_rgn_full; + unsigned long nfa_srch_err; + unsigned long unknown_cmd; + unsigned long reqq_parity_err; + unsigned long dispq_parity_err; + unsigned long del_act_empty; +}; + +struct mc7_stats { + unsigned long corr_err; + unsigned long uncorr_err; + unsigned long parity_err; + unsigned long addr_err; +}; + +struct mac_stats { + u64 tx_octets; /* total # of octets in good frames */ + u64 tx_octets_bad; /* total # of octets in error frames */ + u64 tx_frames; /* all good frames */ + u64 tx_mcast_frames; /* good multicast frames */ + u64 tx_bcast_frames; /* good broadcast frames */ + u64 tx_pause; /* # of transmitted pause frames */ + u64 tx_deferred; /* frames with deferred transmissions */ + u64 tx_late_collisions; /* # of late collisions */ + u64 tx_total_collisions; /* # of total collisions */ + u64 tx_excess_collisions; /* frame errors from excessive collissions */ + u64 tx_underrun; /* # of Tx FIFO underruns */ + u64 tx_len_errs; /* # of Tx length errors */ + u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */ + u64 tx_excess_deferral; /* # of frames with excessive deferral */ + u64 tx_fcs_errs; /* # of frames with bad FCS */ + + u64 tx_frames_64; /* # of Tx frames in a particular range */ + u64 tx_frames_65_127; + u64 tx_frames_128_255; + u64 tx_frames_256_511; + u64 tx_frames_512_1023; + u64 tx_frames_1024_1518; + u64 tx_frames_1519_max; + + u64 rx_octets; /* total # of octets in good frames */ + u64 rx_octets_bad; /* total # of octets in error frames */ + u64 rx_frames; /* all good frames */ + u64 rx_mcast_frames; /* good multicast frames */ + u64 rx_bcast_frames; /* good broadcast frames */ + u64 rx_pause; /* # of received pause frames */ + u64 rx_fcs_errs; /* # of received frames with bad FCS */ + u64 rx_align_errs; /* alignment errors */ + u64 rx_symbol_errs; /* symbol errors */ + u64 rx_data_errs; /* data errors */ + u64 rx_sequence_errs; /* sequence errors */ + u64 rx_runt; /* # of runt frames */ + u64 rx_jabber; /* # of jabber frames */ + u64 rx_short; /* # of short frames */ + u64 rx_too_long; /* # of oversized frames */ + u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */ + + u64 rx_frames_64; /* # of Rx frames in a particular range */ + u64 rx_frames_65_127; + u64 rx_frames_128_255; + u64 rx_frames_256_511; + u64 rx_frames_512_1023; + u64 rx_frames_1024_1518; + u64 rx_frames_1519_max; + + u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */ + + unsigned long tx_fifo_parity_err; + unsigned long rx_fifo_parity_err; + unsigned long tx_fifo_urun; + unsigned long rx_fifo_ovfl; + unsigned long serdes_signal_loss; + unsigned long xaui_pcs_ctc_err; + unsigned long xaui_pcs_align_change; + + unsigned long num_toggled; /* # times toggled TxEn due to stuck TX */ + unsigned long num_resets; /* # times reset due to stuck TX */ + + unsigned long link_faults; /* # detected link faults */ +}; + +struct tp_mib_stats { + u32 ipInReceive_hi; + u32 ipInReceive_lo; + u32 ipInHdrErrors_hi; + u32 ipInHdrErrors_lo; + u32 ipInAddrErrors_hi; + u32 ipInAddrErrors_lo; + u32 ipInUnknownProtos_hi; + u32 ipInUnknownProtos_lo; + u32 ipInDiscards_hi; + u32 ipInDiscards_lo; + u32 ipInDelivers_hi; + u32 ipInDelivers_lo; + u32 ipOutRequests_hi; + u32 ipOutRequests_lo; + u32 ipOutDiscards_hi; + u32 ipOutDiscards_lo; + u32 ipOutNoRoutes_hi; + u32 ipOutNoRoutes_lo; + u32 ipReasmTimeout; + u32 ipReasmReqds; + u32 ipReasmOKs; + u32 ipReasmFails; + + u32 reserved[8]; + + u32 tcpActiveOpens; + u32 tcpPassiveOpens; + u32 tcpAttemptFails; + u32 tcpEstabResets; + u32 tcpOutRsts; + u32 tcpCurrEstab; + u32 tcpInSegs_hi; + u32 tcpInSegs_lo; + u32 tcpOutSegs_hi; + u32 tcpOutSegs_lo; + u32 tcpRetransSeg_hi; + u32 tcpRetransSeg_lo; + u32 tcpInErrs_hi; + u32 tcpInErrs_lo; + u32 tcpRtoMin; + u32 tcpRtoMax; +}; + +struct tp_params { + unsigned int nchan; /* # of channels */ + unsigned int pmrx_size; /* total PMRX capacity */ + unsigned int pmtx_size; /* total PMTX capacity */ + unsigned int cm_size; /* total CM capacity */ + unsigned int chan_rx_size; /* per channel Rx size */ + unsigned int chan_tx_size; /* per channel Tx size */ + unsigned int rx_pg_size; /* Rx page size */ + unsigned int tx_pg_size; /* Tx page size */ + unsigned int rx_num_pgs; /* # of Rx pages */ + unsigned int tx_num_pgs; /* # of Tx pages */ + unsigned int ntimer_qs; /* # of timer queues */ +}; + +struct qset_params { /* SGE queue set parameters */ + unsigned int polling; /* polling/interrupt service for rspq */ + unsigned int coalesce_usecs; /* irq coalescing timer */ + unsigned int rspq_size; /* # of entries in response queue */ + unsigned int fl_size; /* # of entries in regular free list */ + unsigned int jumbo_size; /* # of entries in jumbo free list */ + unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */ + unsigned int cong_thres; /* FL congestion threshold */ + unsigned int vector; /* Interrupt (line or vector) number */ +}; + +struct sge_params { + unsigned int max_pkt_size; /* max offload pkt size */ + struct qset_params qset[SGE_QSETS]; +}; + +struct mc5_params { + unsigned int mode; /* selects MC5 width */ + unsigned int nservers; /* size of server region */ + unsigned int nfilters; /* size of filter region */ + unsigned int nroutes; /* size of routing region */ +}; + +/* Default MC5 region sizes */ +enum { + DEFAULT_NSERVERS = 512, + DEFAULT_NFILTERS = 128 +}; + +/* MC5 modes, these must be non-0 */ +enum { + MC5_MODE_144_BIT = 1, + MC5_MODE_72_BIT = 2 +}; + +/* MC5 min active region size */ +enum { MC5_MIN_TIDS = 16 }; + +struct vpd_params { + unsigned int cclk; + unsigned int mclk; + unsigned int uclk; + unsigned int mdc; + unsigned int mem_timing; + u8 sn[SERNUM_LEN + 1]; + u8 eth_base[6]; + u8 port_type[MAX_NPORTS]; + unsigned short xauicfg[2]; +}; + +struct pci_params { + unsigned int vpd_cap_addr; + unsigned short speed; + unsigned char width; + unsigned char variant; +}; + +enum { + PCI_VARIANT_PCI, + PCI_VARIANT_PCIX_MODE1_PARITY, + PCI_VARIANT_PCIX_MODE1_ECC, + PCI_VARIANT_PCIX_266_MODE2, + PCI_VARIANT_PCIE +}; + +struct adapter_params { + struct sge_params sge; + struct mc5_params mc5; + struct tp_params tp; + struct vpd_params vpd; + struct pci_params pci; + + const struct adapter_info *info; + + unsigned short mtus[NMTUS]; + unsigned short a_wnd[NCCTRL_WIN]; + unsigned short b_wnd[NCCTRL_WIN]; + + unsigned int nports; /* # of ethernet ports */ + unsigned int chan_map; /* bitmap of in-use Tx channels */ + unsigned int stats_update_period; /* MAC stats accumulation period */ + unsigned int linkpoll_period; /* link poll period in 0.1s */ + unsigned int rev; /* chip revision */ + unsigned int offload; +}; + +enum { /* chip revisions */ + T3_REV_A = 0, + T3_REV_B = 2, + T3_REV_B2 = 3, + T3_REV_C = 4, +}; + +struct trace_params { + u32 sip; + u32 sip_mask; + u32 dip; + u32 dip_mask; + u16 sport; + u16 sport_mask; + u16 dport; + u16 dport_mask; + u32 vlan:12; + u32 vlan_mask:12; + u32 intf:4; + u32 intf_mask:4; + u8 proto; + u8 proto_mask; +}; + +struct link_config { + unsigned int supported; /* link capabilities */ + unsigned int advertising; /* advertised capabilities */ + unsigned short requested_speed; /* speed user has requested */ + unsigned short speed; /* actual link speed */ + unsigned char requested_duplex; /* duplex user has requested */ + unsigned char duplex; /* actual link duplex */ + unsigned char requested_fc; /* flow control user has requested */ + unsigned char fc; /* actual link flow control */ + unsigned char autoneg; /* autonegotiating? */ + unsigned int link_ok; /* link up? */ +}; + +#define SPEED_INVALID 0xffff +#define DUPLEX_INVALID 0xff + +struct mc5 { + struct adapter *adapter; + unsigned int tcam_size; + unsigned char part_type; + unsigned char parity_enabled; + unsigned char mode; + struct mc5_stats stats; +}; + +static inline unsigned int t3_mc5_size(const struct mc5 *p) +{ + return p->tcam_size; +} + +struct mc7 { + struct adapter *adapter; /* backpointer to adapter */ + unsigned int size; /* memory size in bytes */ + unsigned int width; /* MC7 interface width */ + unsigned int offset; /* register address offset for MC7 instance */ + const char *name; /* name of MC7 instance */ + struct mc7_stats stats; /* MC7 statistics */ +}; + +static inline unsigned int t3_mc7_size(const struct mc7 *p) +{ + return p->size; +} + +struct cmac { + struct adapter *adapter; + unsigned int offset; + unsigned int nucast; /* # of address filters for unicast MACs */ + unsigned int tx_tcnt; + unsigned int tx_xcnt; + u64 tx_mcnt; + unsigned int rx_xcnt; + unsigned int rx_ocnt; + u64 rx_mcnt; + unsigned int toggle_cnt; + unsigned int txen; + u64 rx_pause; + struct mac_stats stats; +}; + +enum { + MAC_DIRECTION_RX = 1, + MAC_DIRECTION_TX = 2, + MAC_RXFIFO_SIZE = 32768 +}; + +/* PHY loopback direction */ +enum { + PHY_LOOPBACK_TX = 1, + PHY_LOOPBACK_RX = 2 +}; + +/* PHY interrupt types */ +enum { + cphy_cause_link_change = 1, + cphy_cause_fifo_error = 2, + cphy_cause_module_change = 4, +}; + +/* PHY module types */ +enum { + phy_modtype_none, + phy_modtype_sr, + phy_modtype_lr, + phy_modtype_lrm, + phy_modtype_twinax, + phy_modtype_twinax_long, + phy_modtype_unknown +}; + +/* PHY operations */ +struct cphy_ops { + int (*reset)(struct cphy *phy, int wait); + + int (*intr_enable)(struct cphy *phy); + int (*intr_disable)(struct cphy *phy); + int (*intr_clear)(struct cphy *phy); + int (*intr_handler)(struct cphy *phy); + + int (*autoneg_enable)(struct cphy *phy); + int (*autoneg_restart)(struct cphy *phy); + + int (*advertise)(struct cphy *phy, unsigned int advertise_map); + int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable); + int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex); + int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed, + int *duplex, int *fc); + int (*power_down)(struct cphy *phy, int enable); + + u32 mmds; +}; +enum { + EDC_OPT_AEL2005 = 0, + EDC_OPT_AEL2005_SIZE = 1084, + EDC_TWX_AEL2005 = 1, + EDC_TWX_AEL2005_SIZE = 1464, + EDC_TWX_AEL2020 = 2, + EDC_TWX_AEL2020_SIZE = 1628, + EDC_MAX_SIZE = EDC_TWX_AEL2020_SIZE, /* Max cache size */ +}; + +/* A PHY instance */ +struct cphy { + u8 modtype; /* PHY module type */ + short priv; /* scratch pad */ + unsigned int caps; /* PHY capabilities */ + struct adapter *adapter; /* associated adapter */ + const char *desc; /* PHY description */ + unsigned long fifo_errors; /* FIFO over/under-flows */ + const struct cphy_ops *ops; /* PHY operations */ + struct mdio_if_info mdio; + u16 phy_cache[EDC_MAX_SIZE]; /* EDC cache */ +}; + +/* Convenience MDIO read/write wrappers */ +static inline int t3_mdio_read(struct cphy *phy, int mmd, int reg, + unsigned int *valp) +{ + int rc = phy->mdio.mdio_read(phy->mdio.dev, phy->mdio.prtad, mmd, reg); + *valp = (rc >= 0) ? rc : -1; + return (rc >= 0) ? 0 : rc; +} + +static inline int t3_mdio_write(struct cphy *phy, int mmd, int reg, + unsigned int val) +{ + return phy->mdio.mdio_write(phy->mdio.dev, phy->mdio.prtad, mmd, + reg, val); +} + +/* Convenience initializer */ +static inline void cphy_init(struct cphy *phy, struct adapter *adapter, + int phy_addr, struct cphy_ops *phy_ops, + const struct mdio_ops *mdio_ops, + unsigned int caps, const char *desc) +{ + phy->caps = caps; + phy->adapter = adapter; + phy->desc = desc; + phy->ops = phy_ops; + if (mdio_ops) { + phy->mdio.prtad = phy_addr; + phy->mdio.mmds = phy_ops->mmds; + phy->mdio.mode_support = mdio_ops->mode_support; + phy->mdio.mdio_read = mdio_ops->read; + phy->mdio.mdio_write = mdio_ops->write; + } +} + +/* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */ +#define MAC_STATS_ACCUM_SECS 180 + +#define XGM_REG(reg_addr, idx) \ + ((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR)) + +struct addr_val_pair { + unsigned int reg_addr; + unsigned int val; +}; + +#include "adapter.h" + +#ifndef PCI_VENDOR_ID_CHELSIO +# define PCI_VENDOR_ID_CHELSIO 0x1425 +#endif + +#define for_each_port(adapter, iter) \ + for (iter = 0; iter < (adapter)->params.nports; ++iter) + +#define adapter_info(adap) ((adap)->params.info) + +static inline int uses_xaui(const struct adapter *adap) +{ + return adapter_info(adap)->caps & SUPPORTED_AUI; +} + +static inline int is_10G(const struct adapter *adap) +{ + return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full; +} + +static inline int is_offload(const struct adapter *adap) +{ + return adap->params.offload; +} + +static inline unsigned int core_ticks_per_usec(const struct adapter *adap) +{ + return adap->params.vpd.cclk / 1000; +} + +static inline unsigned int is_pcie(const struct adapter *adap) +{ + return adap->params.pci.variant == PCI_VARIANT_PCIE; +} + +void t3_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, + u32 val); +void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p, + int n, unsigned int offset); +int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay, u32 *valp); +static inline int t3_wait_op_done(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay) +{ + return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts, + delay, NULL); +} +int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear, + unsigned int set); +int t3_phy_reset(struct cphy *phy, int mmd, int wait); +int t3_phy_advertise(struct cphy *phy, unsigned int advert); +int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert); +int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex); +int t3_phy_lasi_intr_enable(struct cphy *phy); +int t3_phy_lasi_intr_disable(struct cphy *phy); +int t3_phy_lasi_intr_clear(struct cphy *phy); +int t3_phy_lasi_intr_handler(struct cphy *phy); + +void t3_intr_enable(struct adapter *adapter); +void t3_intr_disable(struct adapter *adapter); +void t3_intr_clear(struct adapter *adapter); +void t3_xgm_intr_enable(struct adapter *adapter, int idx); +void t3_xgm_intr_disable(struct adapter *adapter, int idx); +void t3_port_intr_enable(struct adapter *adapter, int idx); +void t3_port_intr_disable(struct adapter *adapter, int idx); +int t3_slow_intr_handler(struct adapter *adapter); +int t3_phy_intr_handler(struct adapter *adapter); + +void t3_link_changed(struct adapter *adapter, int port_id); +void t3_link_fault(struct adapter *adapter, int port_id); +int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); +const struct adapter_info *t3_get_adapter_info(unsigned int board_id); +int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data); +int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data); +int t3_seeprom_wp(struct adapter *adapter, int enable); +int t3_get_tp_version(struct adapter *adapter, u32 *vers); +int t3_check_tpsram_version(struct adapter *adapter); +int t3_check_tpsram(struct adapter *adapter, const u8 *tp_ram, + unsigned int size); +int t3_set_proto_sram(struct adapter *adap, const u8 *data); +int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size); +int t3_get_fw_version(struct adapter *adapter, u32 *vers); +int t3_check_fw_version(struct adapter *adapter); +int t3_init_hw(struct adapter *adapter, u32 fw_params); +int t3_reset_adapter(struct adapter *adapter); +int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, + int reset); +int t3_replay_prep_adapter(struct adapter *adapter); +void t3_led_ready(struct adapter *adapter); +void t3_fatal_err(struct adapter *adapter); +void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on); +void t3_config_rss(struct adapter *adapter, unsigned int rss_config, + const u8 * cpus, const u16 *rspq); +int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr, + unsigned int n, unsigned int *valp); +int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n, + u64 *buf); + +int t3_mac_reset(struct cmac *mac); +void t3b_pcs_reset(struct cmac *mac); +void t3_mac_disable_exact_filters(struct cmac *mac); +void t3_mac_enable_exact_filters(struct cmac *mac); +int t3_mac_enable(struct cmac *mac, int which); +int t3_mac_disable(struct cmac *mac, int which); +int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu); +int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev); +int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]); +int t3_mac_set_num_ucast(struct cmac *mac, int n); +const struct mac_stats *t3_mac_update_stats(struct cmac *mac); +int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc); +int t3b2_mac_watchdog_task(struct cmac *mac); + +void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode); +int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters, + unsigned int nroutes); +void t3_mc5_intr_handler(struct mc5 *mc5); + +void t3_tp_set_offload_mode(struct adapter *adap, int enable); +void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps); +void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS], + unsigned short alpha[NCCTRL_WIN], + unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap); +void t3_config_trace_filter(struct adapter *adapter, + const struct trace_params *tp, int filter_index, + int invert, int enable); +int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched); + +void t3_sge_prep(struct adapter *adap, struct sge_params *p); +void t3_sge_init(struct adapter *adap, struct sge_params *p); +int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable, + enum sge_context_type type, int respq, u64 base_addr, + unsigned int size, unsigned int token, int gen, + unsigned int cidx); +int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id, + int gts_enable, u64 base_addr, unsigned int size, + unsigned int esize, unsigned int cong_thres, int gen, + unsigned int cidx); +int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id, + int irq_vec_idx, u64 base_addr, unsigned int size, + unsigned int fl_thres, int gen, unsigned int cidx); +int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr, + unsigned int size, int rspq, int ovfl_mode, + unsigned int credits, unsigned int credit_thres); +int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable); +int t3_sge_disable_fl(struct adapter *adapter, unsigned int id); +int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id); +int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id); +int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op, + unsigned int credits); + +int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops); +int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops); +int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops); +int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops); +int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops); +int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr, + const struct mdio_ops *mdio_ops); +int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops); +int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops); +#endif /* __CHELSIO_COMMON_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h new file mode 100644 index 000000000..369fe711f --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ctl_defs.h @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _CXGB3_OFFLOAD_CTL_DEFS_H +#define _CXGB3_OFFLOAD_CTL_DEFS_H + +enum { + GET_MAX_OUTSTANDING_WR = 0, + GET_TX_MAX_CHUNK = 1, + GET_TID_RANGE = 2, + GET_STID_RANGE = 3, + GET_RTBL_RANGE = 4, + GET_L2T_CAPACITY = 5, + GET_MTUS = 6, + GET_WR_LEN = 7, + GET_IFF_FROM_MAC = 8, + GET_DDP_PARAMS = 9, + GET_PORTS = 10, + + ULP_ISCSI_GET_PARAMS = 11, + ULP_ISCSI_SET_PARAMS = 12, + + RDMA_GET_PARAMS = 13, + RDMA_CQ_OP = 14, + RDMA_CQ_SETUP = 15, + RDMA_CQ_DISABLE = 16, + RDMA_CTRL_QP_SETUP = 17, + RDMA_GET_MEM = 18, + RDMA_GET_MIB = 19, + + GET_RX_PAGE_INFO = 50, + GET_ISCSI_IPV4ADDR = 51, + + GET_EMBEDDED_INFO = 70, +}; + +/* + * Structure used to describe a TID range. Valid TIDs are [base, base+num). + */ +struct tid_range { + unsigned int base; /* first TID */ + unsigned int num; /* number of TIDs in range */ +}; + +/* + * Structure used to request the size and contents of the MTU table. + */ +struct mtutab { + unsigned int size; /* # of entries in the MTU table */ + const unsigned short *mtus; /* the MTU table values */ +}; + +struct net_device; + +/* + * Structure used to request the adapter net_device owning a given MAC address. + */ +struct iff_mac { + struct net_device *dev; /* the net_device */ + const unsigned char *mac_addr; /* MAC address to lookup */ + u16 vlan_tag; +}; + +/* Structure used to request a port's iSCSI IPv4 address */ +struct iscsi_ipv4addr { + struct net_device *dev; /* the net_device */ + __be32 ipv4addr; /* the return iSCSI IPv4 address */ +}; + +struct pci_dev; + +/* + * Structure used to request the TCP DDP parameters. + */ +struct ddp_params { + unsigned int llimit; /* TDDP region start address */ + unsigned int ulimit; /* TDDP region end address */ + unsigned int tag_mask; /* TDDP tag mask */ + struct pci_dev *pdev; +}; + +struct adap_ports { + unsigned int nports; /* number of ports on this adapter */ + struct net_device *lldevs[2]; +}; + +/* + * Structure used to return information to the iscsi layer. + */ +struct ulp_iscsi_info { + unsigned int offset; + unsigned int llimit; + unsigned int ulimit; + unsigned int tagmask; + u8 pgsz_factor[4]; + unsigned int max_rxsz; + unsigned int max_txsz; + struct pci_dev *pdev; +}; + +/* + * Structure used to return information to the RDMA layer. + */ +struct rdma_info { + unsigned int tpt_base; /* TPT base address */ + unsigned int tpt_top; /* TPT last entry address */ + unsigned int pbl_base; /* PBL base address */ + unsigned int pbl_top; /* PBL last entry address */ + unsigned int rqt_base; /* RQT base address */ + unsigned int rqt_top; /* RQT last entry address */ + unsigned int udbell_len; /* user doorbell region length */ + unsigned long udbell_physbase; /* user doorbell physical start addr */ + void __iomem *kdb_addr; /* kernel doorbell register address */ + struct pci_dev *pdev; /* associated PCI device */ +}; + +/* + * Structure used to request an operation on an RDMA completion queue. + */ +struct rdma_cq_op { + unsigned int id; + unsigned int op; + unsigned int credits; +}; + +/* + * Structure used to setup RDMA completion queues. + */ +struct rdma_cq_setup { + unsigned int id; + unsigned long long base_addr; + unsigned int size; + unsigned int credits; + unsigned int credit_thres; + unsigned int ovfl_mode; +}; + +/* + * Structure used to setup the RDMA control egress context. + */ +struct rdma_ctrlqp_setup { + unsigned long long base_addr; + unsigned int size; +}; + +/* + * Offload TX/RX page information. + */ +struct ofld_page_info { + unsigned int page_size; /* Page size, should be a power of 2 */ + unsigned int num; /* Number of pages */ +}; + +/* + * Structure used to get firmware and protocol engine versions. + */ +struct ch_embedded_info { + u32 fw_vers; + u32 tp_vers; +}; +#endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h new file mode 100644 index 000000000..920d918ed --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_defs.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _CHELSIO_DEFS_H +#define _CHELSIO_DEFS_H + +#include +#include + +#include "t3cdev.h" + +#include "cxgb3_offload.h" + +#define VALIDATE_TID 1 + +void *cxgb_alloc_mem(unsigned long size); +void cxgb_free_mem(void *addr); + +/* + * Map an ATID or STID to their entries in the corresponding TID tables. + */ +static inline union active_open_entry *atid2entry(const struct tid_info *t, + unsigned int atid) +{ + return &t->atid_tab[atid - t->atid_base]; +} + +static inline union listen_entry *stid2entry(const struct tid_info *t, + unsigned int stid) +{ + return &t->stid_tab[stid - t->stid_base]; +} + +/* + * Find the connection corresponding to a TID. + */ +static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t, + unsigned int tid) +{ + struct t3c_tid_entry *t3c_tid = tid < t->ntids ? + &(t->tid_tab[tid]) : NULL; + + return (t3c_tid && t3c_tid->client) ? t3c_tid : NULL; +} + +/* + * Find the connection corresponding to a server TID. + */ +static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t, + unsigned int tid) +{ + union listen_entry *e; + + if (tid < t->stid_base || tid >= t->stid_base + t->nstids) + return NULL; + + e = stid2entry(t, tid); + if ((void *)e->next >= (void *)t->tid_tab && + (void *)e->next < (void *)&t->atid_tab[t->natids]) + return NULL; + + return &e->t3c_tid; +} + +/* + * Find the connection corresponding to an active-open TID. + */ +static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t, + unsigned int tid) +{ + union active_open_entry *e; + + if (tid < t->atid_base || tid >= t->atid_base + t->natids) + return NULL; + + e = atid2entry(t, tid); + if ((void *)e->next >= (void *)t->tid_tab && + (void *)e->next < (void *)&t->atid_tab[t->natids]) + return NULL; + + return &e->t3c_tid; +} + +int attach_t3cdev(struct t3cdev *dev); +void detach_t3cdev(struct t3cdev *dev); +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h new file mode 100644 index 000000000..b19e4376b --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_ioctl.h @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __CHIOCTL_H__ +#define __CHIOCTL_H__ + +/* + * Ioctl commands specific to this driver. + */ +enum { + CHELSIO_GETMTUTAB = 1029, + CHELSIO_SETMTUTAB = 1030, + CHELSIO_SET_PM = 1032, + CHELSIO_GET_PM = 1033, + CHELSIO_GET_MEM = 1038, + CHELSIO_LOAD_FW = 1041, + CHELSIO_SET_TRACE_FILTER = 1044, + CHELSIO_SET_QSET_PARAMS = 1045, + CHELSIO_GET_QSET_PARAMS = 1046, + CHELSIO_SET_QSET_NUM = 1047, + CHELSIO_GET_QSET_NUM = 1048, +}; + +struct ch_reg { + uint32_t cmd; + uint32_t addr; + uint32_t val; +}; + +struct ch_cntxt { + uint32_t cmd; + uint32_t cntxt_type; + uint32_t cntxt_id; + uint32_t data[4]; +}; + +/* context types */ +enum { CNTXT_TYPE_EGRESS, CNTXT_TYPE_FL, CNTXT_TYPE_RSP, CNTXT_TYPE_CQ }; + +struct ch_desc { + uint32_t cmd; + uint32_t queue_num; + uint32_t idx; + uint32_t size; + uint8_t data[128]; +}; + +struct ch_mem_range { + uint32_t cmd; + uint32_t mem_id; + uint32_t addr; + uint32_t len; + uint32_t version; + uint8_t buf[0]; +}; + +struct ch_qset_params { + uint32_t cmd; + uint32_t qset_idx; + int32_t txq_size[3]; + int32_t rspq_size; + int32_t fl_size[2]; + int32_t intr_lat; + int32_t polling; + int32_t lro; + int32_t cong_thres; + int32_t vector; + int32_t qnum; +}; + +struct ch_pktsched_params { + uint32_t cmd; + uint8_t sched; + uint8_t idx; + uint8_t min; + uint8_t max; + uint8_t binding; +}; + +#ifndef TCB_SIZE +# define TCB_SIZE 128 +#endif + +/* TCB size in 32-bit words */ +#define TCB_WORDS (TCB_SIZE / 4) + +enum { MEM_CM, MEM_PMRX, MEM_PMTX }; /* ch_mem_range.mem_id values */ + +struct ch_mtus { + uint32_t cmd; + uint32_t nmtus; + uint16_t mtus[NMTUS]; +}; + +struct ch_pm { + uint32_t cmd; + uint32_t tx_pg_sz; + uint32_t tx_num_pg; + uint32_t rx_pg_sz; + uint32_t rx_num_pg; + uint32_t pm_total; +}; + +struct ch_tcam { + uint32_t cmd; + uint32_t tcam_size; + uint32_t nservers; + uint32_t nroutes; + uint32_t nfilters; +}; + +struct ch_tcb { + uint32_t cmd; + uint32_t tcb_index; + uint32_t tcb_data[TCB_WORDS]; +}; + +struct ch_tcam_word { + uint32_t cmd; + uint32_t addr; + uint32_t buf[3]; +}; + +struct ch_trace { + uint32_t cmd; + uint32_t sip; + uint32_t sip_mask; + uint32_t dip; + uint32_t dip_mask; + uint16_t sport; + uint16_t sport_mask; + uint16_t dport; + uint16_t dport_mask; + uint32_t vlan:12; + uint32_t vlan_mask:12; + uint32_t intf:4; + uint32_t intf_mask:4; + uint8_t proto; + uint8_t proto_mask; + uint8_t invert_match:1; + uint8_t config_tx:1; + uint8_t config_rx:1; + uint8_t trace_tx:1; + uint8_t trace_rx:1; +}; + +#define SIOCCHIOCTL SIOCDEVPRIVATE + +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c new file mode 100644 index 000000000..c9761ceb4 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -0,0 +1,3431 @@ +/* + * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "cxgb3_ioctl.h" +#include "regs.h" +#include "cxgb3_offload.h" +#include "version.h" + +#include "cxgb3_ctl_defs.h" +#include "t3_cpl.h" +#include "firmware_exports.h" + +enum { + MAX_TXQ_ENTRIES = 16384, + MAX_CTRL_TXQ_ENTRIES = 1024, + MAX_RSPQ_ENTRIES = 16384, + MAX_RX_BUFFERS = 16384, + MAX_RX_JUMBO_BUFFERS = 16384, + MIN_TXQ_ENTRIES = 4, + MIN_CTRL_TXQ_ENTRIES = 4, + MIN_RSPQ_ENTRIES = 32, + MIN_FL_ENTRIES = 32 +}; + +#define PORT_MASK ((1 << MAX_NPORTS) - 1) + +#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ + NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) + +#define EEPROM_MAGIC 0x38E2F10C + +#define CH_DEVICE(devid, idx) \ + { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx } + +static const struct pci_device_id cxgb3_pci_tbl[] = { + CH_DEVICE(0x20, 0), /* PE9000 */ + CH_DEVICE(0x21, 1), /* T302E */ + CH_DEVICE(0x22, 2), /* T310E */ + CH_DEVICE(0x23, 3), /* T320X */ + CH_DEVICE(0x24, 1), /* T302X */ + CH_DEVICE(0x25, 3), /* T320E */ + CH_DEVICE(0x26, 2), /* T310X */ + CH_DEVICE(0x30, 2), /* T3B10 */ + CH_DEVICE(0x31, 3), /* T3B20 */ + CH_DEVICE(0x32, 1), /* T3B02 */ + CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */ + CH_DEVICE(0x36, 3), /* S320E-CR */ + CH_DEVICE(0x37, 7), /* N320E-G2 */ + {0,} +}; + +MODULE_DESCRIPTION(DRV_DESC); +MODULE_AUTHOR("Chelsio Communications"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl); + +static int dflt_msg_enable = DFLT_MSG_ENABLE; + +module_param(dflt_msg_enable, int, 0644); +MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap"); + +/* + * The driver uses the best interrupt scheme available on a platform in the + * order MSI-X, MSI, legacy pin interrupts. This parameter determines which + * of these schemes the driver may consider as follows: + * + * msi = 2: choose from among all three options + * msi = 1: only consider MSI and pin interrupts + * msi = 0: force pin interrupts + */ +static int msi = 2; + +module_param(msi, int, 0644); +MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X"); + +/* + * The driver enables offload as a default. + * To disable it, use ofld_disable = 1. + */ + +static int ofld_disable = 0; + +module_param(ofld_disable, int, 0644); +MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not"); + +/* + * We have work elements that we need to cancel when an interface is taken + * down. Normally the work elements would be executed by keventd but that + * can deadlock because of linkwatch. If our close method takes the rtnl + * lock and linkwatch is ahead of our work elements in keventd, linkwatch + * will block keventd as it needs the rtnl lock, and we'll deadlock waiting + * for our work to complete. Get our own work queue to solve this. + */ +struct workqueue_struct *cxgb3_wq; + +/** + * link_report - show link status and link speed/duplex + * @p: the port whose settings are to be reported + * + * Shows the link status, speed, and duplex of a port. + */ +static void link_report(struct net_device *dev) +{ + if (!netif_carrier_ok(dev)) + netdev_info(dev, "link down\n"); + else { + const char *s = "10Mbps"; + const struct port_info *p = netdev_priv(dev); + + switch (p->link_config.speed) { + case SPEED_10000: + s = "10Gbps"; + break; + case SPEED_1000: + s = "1000Mbps"; + break; + case SPEED_100: + s = "100Mbps"; + break; + } + + netdev_info(dev, "link up, %s, %s-duplex\n", + s, p->link_config.duplex == DUPLEX_FULL + ? "full" : "half"); + } +} + +static void enable_tx_fifo_drain(struct adapter *adapter, + struct port_info *pi) +{ + t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0, + F_ENDROPPKT); + t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0); + t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN); + t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN); +} + +static void disable_tx_fifo_drain(struct adapter *adapter, + struct port_info *pi) +{ + t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, + F_ENDROPPKT, 0); +} + +void t3_os_link_fault(struct adapter *adap, int port_id, int state) +{ + struct net_device *dev = adap->port[port_id]; + struct port_info *pi = netdev_priv(dev); + + if (state == netif_carrier_ok(dev)) + return; + + if (state) { + struct cmac *mac = &pi->mac; + + netif_carrier_on(dev); + + disable_tx_fifo_drain(adap, pi); + + /* Clear local faults */ + t3_xgm_intr_disable(adap, pi->port_id); + t3_read_reg(adap, A_XGM_INT_STATUS + + pi->mac.offset); + t3_write_reg(adap, + A_XGM_INT_CAUSE + pi->mac.offset, + F_XGM_INT); + + t3_set_reg_field(adap, + A_XGM_INT_ENABLE + + pi->mac.offset, + F_XGM_INT, F_XGM_INT); + t3_xgm_intr_enable(adap, pi->port_id); + + t3_mac_enable(mac, MAC_DIRECTION_TX); + } else { + netif_carrier_off(dev); + + /* Flush TX FIFO */ + enable_tx_fifo_drain(adap, pi); + } + link_report(dev); +} + +/** + * t3_os_link_changed - handle link status changes + * @adapter: the adapter associated with the link change + * @port_id: the port index whose limk status has changed + * @link_stat: the new status of the link + * @speed: the new speed setting + * @duplex: the new duplex setting + * @pause: the new flow-control setting + * + * This is the OS-dependent handler for link status changes. The OS + * neutral handler takes care of most of the processing for these events, + * then calls this handler for any OS-specific processing. + */ +void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat, + int speed, int duplex, int pause) +{ + struct net_device *dev = adapter->port[port_id]; + struct port_info *pi = netdev_priv(dev); + struct cmac *mac = &pi->mac; + + /* Skip changes from disabled ports. */ + if (!netif_running(dev)) + return; + + if (link_stat != netif_carrier_ok(dev)) { + if (link_stat) { + disable_tx_fifo_drain(adapter, pi); + + t3_mac_enable(mac, MAC_DIRECTION_RX); + + /* Clear local faults */ + t3_xgm_intr_disable(adapter, pi->port_id); + t3_read_reg(adapter, A_XGM_INT_STATUS + + pi->mac.offset); + t3_write_reg(adapter, + A_XGM_INT_CAUSE + pi->mac.offset, + F_XGM_INT); + + t3_set_reg_field(adapter, + A_XGM_INT_ENABLE + pi->mac.offset, + F_XGM_INT, F_XGM_INT); + t3_xgm_intr_enable(adapter, pi->port_id); + + netif_carrier_on(dev); + } else { + netif_carrier_off(dev); + + t3_xgm_intr_disable(adapter, pi->port_id); + t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset); + t3_set_reg_field(adapter, + A_XGM_INT_ENABLE + pi->mac.offset, + F_XGM_INT, 0); + + if (is_10G(adapter)) + pi->phy.ops->power_down(&pi->phy, 1); + + t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset); + t3_mac_disable(mac, MAC_DIRECTION_RX); + t3_link_start(&pi->phy, mac, &pi->link_config); + + /* Flush TX FIFO */ + enable_tx_fifo_drain(adapter, pi); + } + + link_report(dev); + } +} + +/** + * t3_os_phymod_changed - handle PHY module changes + * @phy: the PHY reporting the module change + * @mod_type: new module type + * + * This is the OS-dependent handler for PHY module changes. It is + * invoked when a PHY module is removed or inserted for any OS-specific + * processing. + */ +void t3_os_phymod_changed(struct adapter *adap, int port_id) +{ + static const char *mod_str[] = { + NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown" + }; + + const struct net_device *dev = adap->port[port_id]; + const struct port_info *pi = netdev_priv(dev); + + if (pi->phy.modtype == phy_modtype_none) + netdev_info(dev, "PHY module unplugged\n"); + else + netdev_info(dev, "%s PHY module inserted\n", + mod_str[pi->phy.modtype]); +} + +static void cxgb_set_rxmode(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + + t3_mac_set_rx_mode(&pi->mac, dev); +} + +/** + * link_start - enable a port + * @dev: the device to enable + * + * Performs the MAC and PHY actions needed to enable a port. + */ +static void link_start(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct cmac *mac = &pi->mac; + + t3_mac_reset(mac); + t3_mac_set_num_ucast(mac, MAX_MAC_IDX); + t3_mac_set_mtu(mac, dev->mtu); + t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr); + t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr); + t3_mac_set_rx_mode(mac, dev); + t3_link_start(&pi->phy, mac, &pi->link_config); + t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); +} + +static inline void cxgb_disable_msi(struct adapter *adapter) +{ + if (adapter->flags & USING_MSIX) { + pci_disable_msix(adapter->pdev); + adapter->flags &= ~USING_MSIX; + } else if (adapter->flags & USING_MSI) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~USING_MSI; + } +} + +/* + * Interrupt handler for asynchronous events used with MSI-X. + */ +static irqreturn_t t3_async_intr_handler(int irq, void *cookie) +{ + t3_slow_intr_handler(cookie); + return IRQ_HANDLED; +} + +/* + * Name the MSI-X interrupts. + */ +static void name_msix_vecs(struct adapter *adap) +{ + int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1; + + snprintf(adap->msix_info[0].desc, n, "%s", adap->name); + adap->msix_info[0].desc[n] = 0; + + for_each_port(adap, j) { + struct net_device *d = adap->port[j]; + const struct port_info *pi = netdev_priv(d); + + for (i = 0; i < pi->nqsets; i++, msi_idx++) { + snprintf(adap->msix_info[msi_idx].desc, n, + "%s-%d", d->name, pi->first_qset + i); + adap->msix_info[msi_idx].desc[n] = 0; + } + } +} + +static int request_msix_data_irqs(struct adapter *adap) +{ + int i, j, err, qidx = 0; + + for_each_port(adap, i) { + int nqsets = adap2pinfo(adap, i)->nqsets; + + for (j = 0; j < nqsets; ++j) { + err = request_irq(adap->msix_info[qidx + 1].vec, + t3_intr_handler(adap, + adap->sge.qs[qidx]. + rspq.polling), 0, + adap->msix_info[qidx + 1].desc, + &adap->sge.qs[qidx]); + if (err) { + while (--qidx >= 0) + free_irq(adap->msix_info[qidx + 1].vec, + &adap->sge.qs[qidx]); + return err; + } + qidx++; + } + } + return 0; +} + +static void free_irq_resources(struct adapter *adapter) +{ + if (adapter->flags & USING_MSIX) { + int i, n = 0; + + free_irq(adapter->msix_info[0].vec, adapter); + for_each_port(adapter, i) + n += adap2pinfo(adapter, i)->nqsets; + + for (i = 0; i < n; ++i) + free_irq(adapter->msix_info[i + 1].vec, + &adapter->sge.qs[i]); + } else + free_irq(adapter->pdev->irq, adapter); +} + +static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, + unsigned long n) +{ + int attempts = 10; + + while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { + if (!--attempts) + return -ETIMEDOUT; + msleep(10); + } + return 0; +} + +static int init_tp_parity(struct adapter *adap) +{ + int i; + struct sk_buff *skb; + struct cpl_set_tcb_field *greq; + unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts; + + t3_tp_set_offload_mode(adap, 1); + + for (i = 0; i < 16; i++) { + struct cpl_smt_write_req *req; + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + skb = adap->nofail_skb; + if (!skb) + goto alloc_skb_fail; + + req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req)); + memset(req, 0, sizeof(*req)); + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i)); + req->mtu_idx = NMTUS - 1; + req->iff = i; + t3_mgmt_tx(adap, skb); + if (skb == adap->nofail_skb) { + await_mgmt_replies(adap, cnt, i + 1); + adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); + if (!adap->nofail_skb) + goto alloc_skb_fail; + } + } + + for (i = 0; i < 2048; i++) { + struct cpl_l2t_write_req *req; + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + skb = adap->nofail_skb; + if (!skb) + goto alloc_skb_fail; + + req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); + memset(req, 0, sizeof(*req)); + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i)); + req->params = htonl(V_L2T_W_IDX(i)); + t3_mgmt_tx(adap, skb); + if (skb == adap->nofail_skb) { + await_mgmt_replies(adap, cnt, 16 + i + 1); + adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); + if (!adap->nofail_skb) + goto alloc_skb_fail; + } + } + + for (i = 0; i < 2048; i++) { + struct cpl_rte_write_req *req; + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + skb = adap->nofail_skb; + if (!skb) + goto alloc_skb_fail; + + req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req)); + memset(req, 0, sizeof(*req)); + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i)); + req->l2t_idx = htonl(V_L2T_W_IDX(i)); + t3_mgmt_tx(adap, skb); + if (skb == adap->nofail_skb) { + await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1); + adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); + if (!adap->nofail_skb) + goto alloc_skb_fail; + } + } + + skb = alloc_skb(sizeof(*greq), GFP_KERNEL); + if (!skb) + skb = adap->nofail_skb; + if (!skb) + goto alloc_skb_fail; + + greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq)); + memset(greq, 0, sizeof(*greq)); + greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0)); + greq->mask = cpu_to_be64(1); + t3_mgmt_tx(adap, skb); + + i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); + if (skb == adap->nofail_skb) { + i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); + adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); + } + + t3_tp_set_offload_mode(adap, 0); + return i; + +alloc_skb_fail: + t3_tp_set_offload_mode(adap, 0); + return -ENOMEM; +} + +/** + * setup_rss - configure RSS + * @adap: the adapter + * + * Sets up RSS to distribute packets to multiple receive queues. We + * configure the RSS CPU lookup table to distribute to the number of HW + * receive queues, and the response queue lookup table to narrow that + * down to the response queues actually configured for each port. + * We always configure the RSS mapping for two ports since the mapping + * table has plenty of entries. + */ +static void setup_rss(struct adapter *adap) +{ + int i; + unsigned int nq0 = adap2pinfo(adap, 0)->nqsets; + unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1; + u8 cpus[SGE_QSETS + 1]; + u16 rspq_map[RSS_TABLE_SIZE]; + + for (i = 0; i < SGE_QSETS; ++i) + cpus[i] = i; + cpus[SGE_QSETS] = 0xff; /* terminator */ + + for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) { + rspq_map[i] = i % nq0; + rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0; + } + + t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN | + F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | + V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map); +} + +static void ring_dbs(struct adapter *adap) +{ + int i, j; + + for (i = 0; i < SGE_QSETS; i++) { + struct sge_qset *qs = &adap->sge.qs[i]; + + if (qs->adap) + for (j = 0; j < SGE_TXQ_PER_SET; j++) + t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id)); + } +} + +static void init_napi(struct adapter *adap) +{ + int i; + + for (i = 0; i < SGE_QSETS; i++) { + struct sge_qset *qs = &adap->sge.qs[i]; + + if (qs->adap) + netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll, + 64); + } + + /* + * netif_napi_add() can be called only once per napi_struct because it + * adds each new napi_struct to a list. Be careful not to call it a + * second time, e.g., during EEH recovery, by making a note of it. + */ + adap->flags |= NAPI_INIT; +} + +/* + * Wait until all NAPI handlers are descheduled. This includes the handlers of + * both netdevices representing interfaces and the dummy ones for the extra + * queues. + */ +static void quiesce_rx(struct adapter *adap) +{ + int i; + + for (i = 0; i < SGE_QSETS; i++) + if (adap->sge.qs[i].adap) + napi_disable(&adap->sge.qs[i].napi); +} + +static void enable_all_napi(struct adapter *adap) +{ + int i; + for (i = 0; i < SGE_QSETS; i++) + if (adap->sge.qs[i].adap) + napi_enable(&adap->sge.qs[i].napi); +} + +/** + * setup_sge_qsets - configure SGE Tx/Rx/response queues + * @adap: the adapter + * + * Determines how many sets of SGE queues to use and initializes them. + * We support multiple queue sets per port if we have MSI-X, otherwise + * just one queue set per port. + */ +static int setup_sge_qsets(struct adapter *adap) +{ + int i, j, err, irq_idx = 0, qset_idx = 0; + unsigned int ntxq = SGE_TXQ_PER_SET; + + if (adap->params.rev > 0 && !(adap->flags & USING_MSI)) + irq_idx = -1; + + for_each_port(adap, i) { + struct net_device *dev = adap->port[i]; + struct port_info *pi = netdev_priv(dev); + + pi->qs = &adap->sge.qs[pi->first_qset]; + for (j = 0; j < pi->nqsets; ++j, ++qset_idx) { + err = t3_sge_alloc_qset(adap, qset_idx, 1, + (adap->flags & USING_MSIX) ? qset_idx + 1 : + irq_idx, + &adap->params.sge.qset[qset_idx], ntxq, dev, + netdev_get_tx_queue(dev, j)); + if (err) { + t3_free_sge_resources(adap); + return err; + } + } + } + + return 0; +} + +static ssize_t attr_show(struct device *d, char *buf, + ssize_t(*format) (struct net_device *, char *)) +{ + ssize_t len; + + /* Synchronize with ioctls that may shut down the device */ + rtnl_lock(); + len = (*format) (to_net_dev(d), buf); + rtnl_unlock(); + return len; +} + +static ssize_t attr_store(struct device *d, + const char *buf, size_t len, + ssize_t(*set) (struct net_device *, unsigned int), + unsigned int min_val, unsigned int max_val) +{ + char *endp; + ssize_t ret; + unsigned int val; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + val = simple_strtoul(buf, &endp, 0); + if (endp == buf || val < min_val || val > max_val) + return -EINVAL; + + rtnl_lock(); + ret = (*set) (to_net_dev(d), val); + if (!ret) + ret = len; + rtnl_unlock(); + return ret; +} + +#define CXGB3_SHOW(name, val_expr) \ +static ssize_t format_##name(struct net_device *dev, char *buf) \ +{ \ + struct port_info *pi = netdev_priv(dev); \ + struct adapter *adap = pi->adapter; \ + return sprintf(buf, "%u\n", val_expr); \ +} \ +static ssize_t show_##name(struct device *d, struct device_attribute *attr, \ + char *buf) \ +{ \ + return attr_show(d, buf, format_##name); \ +} + +static ssize_t set_nfilters(struct net_device *dev, unsigned int val) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0; + + if (adap->flags & FULL_INIT_DONE) + return -EBUSY; + if (val && adap->params.rev == 0) + return -EINVAL; + if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers - + min_tids) + return -EINVAL; + adap->params.mc5.nfilters = val; + return 0; +} + +static ssize_t store_nfilters(struct device *d, struct device_attribute *attr, + const char *buf, size_t len) +{ + return attr_store(d, buf, len, set_nfilters, 0, ~0); +} + +static ssize_t set_nservers(struct net_device *dev, unsigned int val) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + + if (adap->flags & FULL_INIT_DONE) + return -EBUSY; + if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters - + MC5_MIN_TIDS) + return -EINVAL; + adap->params.mc5.nservers = val; + return 0; +} + +static ssize_t store_nservers(struct device *d, struct device_attribute *attr, + const char *buf, size_t len) +{ + return attr_store(d, buf, len, set_nservers, 0, ~0); +} + +#define CXGB3_ATTR_R(name, val_expr) \ +CXGB3_SHOW(name, val_expr) \ +static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) + +#define CXGB3_ATTR_RW(name, val_expr, store_method) \ +CXGB3_SHOW(name, val_expr) \ +static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method) + +CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5)); +CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters); +CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers); + +static struct attribute *cxgb3_attrs[] = { + &dev_attr_cam_size.attr, + &dev_attr_nfilters.attr, + &dev_attr_nservers.attr, + NULL +}; + +static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs }; + +static ssize_t tm_attr_show(struct device *d, + char *buf, int sched) +{ + struct port_info *pi = netdev_priv(to_net_dev(d)); + struct adapter *adap = pi->adapter; + unsigned int v, addr, bpt, cpt; + ssize_t len; + + addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; + rtnl_lock(); + t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr); + v = t3_read_reg(adap, A_TP_TM_PIO_DATA); + if (sched & 1) + v >>= 16; + bpt = (v >> 8) & 0xff; + cpt = v & 0xff; + if (!cpt) + len = sprintf(buf, "disabled\n"); + else { + v = (adap->params.vpd.cclk * 1000) / cpt; + len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125); + } + rtnl_unlock(); + return len; +} + +static ssize_t tm_attr_store(struct device *d, + const char *buf, size_t len, int sched) +{ + struct port_info *pi = netdev_priv(to_net_dev(d)); + struct adapter *adap = pi->adapter; + unsigned int val; + char *endp; + ssize_t ret; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + val = simple_strtoul(buf, &endp, 0); + if (endp == buf || val > 10000000) + return -EINVAL; + + rtnl_lock(); + ret = t3_config_sched(adap, val, sched); + if (!ret) + ret = len; + rtnl_unlock(); + return ret; +} + +#define TM_ATTR(name, sched) \ +static ssize_t show_##name(struct device *d, struct device_attribute *attr, \ + char *buf) \ +{ \ + return tm_attr_show(d, buf, sched); \ +} \ +static ssize_t store_##name(struct device *d, struct device_attribute *attr, \ + const char *buf, size_t len) \ +{ \ + return tm_attr_store(d, buf, len, sched); \ +} \ +static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name) + +TM_ATTR(sched0, 0); +TM_ATTR(sched1, 1); +TM_ATTR(sched2, 2); +TM_ATTR(sched3, 3); +TM_ATTR(sched4, 4); +TM_ATTR(sched5, 5); +TM_ATTR(sched6, 6); +TM_ATTR(sched7, 7); + +static struct attribute *offload_attrs[] = { + &dev_attr_sched0.attr, + &dev_attr_sched1.attr, + &dev_attr_sched2.attr, + &dev_attr_sched3.attr, + &dev_attr_sched4.attr, + &dev_attr_sched5.attr, + &dev_attr_sched6.attr, + &dev_attr_sched7.attr, + NULL +}; + +static struct attribute_group offload_attr_group = {.attrs = offload_attrs }; + +/* + * Sends an sk_buff to an offload queue driver + * after dealing with any active network taps. + */ +static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb) +{ + int ret; + + local_bh_disable(); + ret = t3_offload_tx(tdev, skb); + local_bh_enable(); + return ret; +} + +static int write_smt_entry(struct adapter *adapter, int idx) +{ + struct cpl_smt_write_req *req; + struct port_info *pi = netdev_priv(adapter->port[idx]); + struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL); + + if (!skb) + return -ENOMEM; + + req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req)); + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx)); + req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */ + req->iff = idx; + memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN); + memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN); + skb->priority = 1; + offload_tx(&adapter->tdev, skb); + return 0; +} + +static int init_smt(struct adapter *adapter) +{ + int i; + + for_each_port(adapter, i) + write_smt_entry(adapter, i); + return 0; +} + +static void init_port_mtus(struct adapter *adapter) +{ + unsigned int mtus = adapter->port[0]->mtu; + + if (adapter->port[1]) + mtus |= adapter->port[1]->mtu << 16; + t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus); +} + +static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, + int hi, int port) +{ + struct sk_buff *skb; + struct mngt_pktsched_wr *req; + int ret; + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + skb = adap->nofail_skb; + if (!skb) + return -ENOMEM; + + req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req)); + req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT)); + req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET; + req->sched = sched; + req->idx = qidx; + req->min = lo; + req->max = hi; + req->binding = port; + ret = t3_mgmt_tx(adap, skb); + if (skb == adap->nofail_skb) { + adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field), + GFP_KERNEL); + if (!adap->nofail_skb) + ret = -ENOMEM; + } + + return ret; +} + +static int bind_qsets(struct adapter *adap) +{ + int i, j, err = 0; + + for_each_port(adap, i) { + const struct port_info *pi = adap2pinfo(adap, i); + + for (j = 0; j < pi->nqsets; ++j) { + int ret = send_pktsched_cmd(adap, 1, + pi->first_qset + j, -1, + -1, i); + if (ret) + err = ret; + } + } + + return err; +} + +/*(DEBLOBBED)*/ +#define FW_FNAME "/*(DEBLOBBED)*/" +/*(DEBLOBBED)*/ +#define TPSRAM_NAME "/*(DEBLOBBED)*/" +#define AEL2005_OPT_EDC_NAME "/*(DEBLOBBED)*/" +#define AEL2005_TWX_EDC_NAME "/*(DEBLOBBED)*/" +#define AEL2020_TWX_EDC_NAME "/*(DEBLOBBED)*/" +/*(DEBLOBBED)*/ + +static inline const char *get_edc_fw_name(int edc_idx) +{ + const char *fw_name = NULL; + + switch (edc_idx) { + case EDC_OPT_AEL2005: + fw_name = AEL2005_OPT_EDC_NAME; + break; + case EDC_TWX_AEL2005: + fw_name = AEL2005_TWX_EDC_NAME; + break; + case EDC_TWX_AEL2020: + fw_name = AEL2020_TWX_EDC_NAME; + break; + } + return fw_name; +} + +int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size) +{ + struct adapter *adapter = phy->adapter; + const struct firmware *fw; + char buf[64]; + u32 csum; + const __be32 *p; + u16 *cache = phy->phy_cache; + int i, ret; + + snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx)); + + ret = reject_firmware(&fw, buf, &adapter->pdev->dev); + if (ret < 0) { + dev_err(&adapter->pdev->dev, + "could not upgrade firmware: unable to load %s\n", + buf); + return ret; + } + + /* check size, take checksum in account */ + if (fw->size > size + 4) { + CH_ERR(adapter, "firmware image too large %u, expected %d\n", + (unsigned int)fw->size, size + 4); + ret = -EINVAL; + } + + /* compute checksum */ + p = (const __be32 *)fw->data; + for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++) + csum += ntohl(p[i]); + + if (csum != 0xffffffff) { + CH_ERR(adapter, "corrupted firmware image, checksum %u\n", + csum); + ret = -EINVAL; + } + + for (i = 0; i < size / 4 ; i++) { + *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16; + *cache++ = be32_to_cpu(p[i]) & 0xffff; + } + + release_firmware(fw); + + return ret; +} + +static int upgrade_fw(struct adapter *adap) +{ + int ret; + const struct firmware *fw; + struct device *dev = &adap->pdev->dev; + + ret = reject_firmware(&fw, FW_FNAME, dev); + if (ret < 0) { + dev_err(dev, "could not upgrade firmware: unable to load %s\n", + FW_FNAME); + return ret; + } + ret = t3_load_fw(adap, fw->data, fw->size); + release_firmware(fw); + + if (ret == 0) + dev_info(dev, "successful upgrade to firmware %d.%d.%d\n", + FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO); + else + dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n", + FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO); + + return ret; +} + +static inline char t3rev2char(struct adapter *adapter) +{ + char rev = 0; + + switch(adapter->params.rev) { + case T3_REV_B: + case T3_REV_B2: + rev = 'b'; + break; + case T3_REV_C: + rev = 'c'; + break; + } + return rev; +} + +static int update_tpsram(struct adapter *adap) +{ + const struct firmware *tpsram; + char buf[64]; + struct device *dev = &adap->pdev->dev; + int ret; + char rev; + + rev = t3rev2char(adap); + if (!rev) + return 0; + + snprintf(buf, sizeof(buf), TPSRAM_NAME, rev); + + ret = reject_firmware(&tpsram, buf, dev); + if (ret < 0) { + dev_err(dev, "could not load TP SRAM: unable to load %s\n", + buf); + return ret; + } + + ret = t3_check_tpsram(adap, tpsram->data, tpsram->size); + if (ret) + goto release_tpsram; + + ret = t3_set_proto_sram(adap, tpsram->data); + if (ret == 0) + dev_info(dev, + "successful update of protocol engine " + "to %d.%d.%d\n", + TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); + else + dev_err(dev, "failed to update of protocol engine %d.%d.%d\n", + TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); + if (ret) + dev_err(dev, "loading protocol SRAM failed\n"); + +release_tpsram: + release_firmware(tpsram); + + return ret; +} + +/** + * t3_synchronize_rx - wait for current Rx processing on a port to complete + * @adap: the adapter + * @p: the port + * + * Ensures that current Rx processing on any of the queues associated with + * the given port completes before returning. We do this by acquiring and + * releasing the locks of the response queues associated with the port. + */ +static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p) +{ + int i; + + for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { + struct sge_rspq *q = &adap->sge.qs[i].rspq; + + spin_lock_irq(&q->lock); + spin_unlock_irq(&q->lock); + } +} + +static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + if (adapter->params.rev > 0) { + t3_set_vlan_accel(adapter, 1 << pi->port_id, + features & NETIF_F_HW_VLAN_CTAG_RX); + } else { + /* single control for all ports */ + unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX; + + for_each_port(adapter, i) + have_vlans |= + adapter->port[i]->features & + NETIF_F_HW_VLAN_CTAG_RX; + + t3_set_vlan_accel(adapter, 1, have_vlans); + } + t3_synchronize_rx(adapter, pi); +} + +/** + * cxgb_up - enable the adapter + * @adapter: adapter being enabled + * + * Called when the first port is enabled, this function performs the + * actions necessary to make an adapter operational, such as completing + * the initialization of HW modules, and enabling interrupts. + * + * Must be called with the rtnl lock held. + */ +static int cxgb_up(struct adapter *adap) +{ + int i, err; + + if (!(adap->flags & FULL_INIT_DONE)) { + err = t3_check_fw_version(adap); + if (err == -EINVAL) { + err = upgrade_fw(adap); + CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n", + FW_VERSION_MAJOR, FW_VERSION_MINOR, + FW_VERSION_MICRO, err ? "failed" : "succeeded"); + } + + err = t3_check_tpsram_version(adap); + if (err == -EINVAL) { + err = update_tpsram(adap); + CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n", + TP_VERSION_MAJOR, TP_VERSION_MINOR, + TP_VERSION_MICRO, err ? "failed" : "succeeded"); + } + + /* + * Clear interrupts now to catch errors if t3_init_hw fails. + * We clear them again later as initialization may trigger + * conditions that can interrupt. + */ + t3_intr_clear(adap); + + err = t3_init_hw(adap, 0); + if (err) + goto out; + + t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT); + t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); + + err = setup_sge_qsets(adap); + if (err) + goto out; + + for_each_port(adap, i) + cxgb_vlan_mode(adap->port[i], adap->port[i]->features); + + setup_rss(adap); + if (!(adap->flags & NAPI_INIT)) + init_napi(adap); + + t3_start_sge_timers(adap); + adap->flags |= FULL_INIT_DONE; + } + + t3_intr_clear(adap); + + if (adap->flags & USING_MSIX) { + name_msix_vecs(adap); + err = request_irq(adap->msix_info[0].vec, + t3_async_intr_handler, 0, + adap->msix_info[0].desc, adap); + if (err) + goto irq_err; + + err = request_msix_data_irqs(adap); + if (err) { + free_irq(adap->msix_info[0].vec, adap); + goto irq_err; + } + } else if ((err = request_irq(adap->pdev->irq, + t3_intr_handler(adap, + adap->sge.qs[0].rspq. + polling), + (adap->flags & USING_MSI) ? + 0 : IRQF_SHARED, + adap->name, adap))) + goto irq_err; + + enable_all_napi(adap); + t3_sge_start(adap); + t3_intr_enable(adap); + + if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) && + is_offload(adap) && init_tp_parity(adap) == 0) + adap->flags |= TP_PARITY_INIT; + + if (adap->flags & TP_PARITY_INIT) { + t3_write_reg(adap, A_TP_INT_CAUSE, + F_CMCACHEPERR | F_ARPLUTPERR); + t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff); + } + + if (!(adap->flags & QUEUES_BOUND)) { + int ret = bind_qsets(adap); + + if (ret < 0) { + CH_ERR(adap, "failed to bind qsets, err %d\n", ret); + t3_intr_disable(adap); + free_irq_resources(adap); + err = ret; + goto out; + } + adap->flags |= QUEUES_BOUND; + } + +out: + return err; +irq_err: + CH_ERR(adap, "request_irq failed, err %d\n", err); + goto out; +} + +/* + * Release resources when all the ports and offloading have been stopped. + */ +static void cxgb_down(struct adapter *adapter, int on_wq) +{ + t3_sge_stop(adapter); + spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */ + t3_intr_disable(adapter); + spin_unlock_irq(&adapter->work_lock); + + free_irq_resources(adapter); + quiesce_rx(adapter); + t3_sge_stop(adapter); + if (!on_wq) + flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */ +} + +static void schedule_chk_task(struct adapter *adap) +{ + unsigned int timeo; + + timeo = adap->params.linkpoll_period ? + (HZ * adap->params.linkpoll_period) / 10 : + adap->params.stats_update_period * HZ; + if (timeo) + queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo); +} + +static int offload_open(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct t3cdev *tdev = dev2t3cdev(dev); + int adap_up = adapter->open_device_map & PORT_MASK; + int err; + + if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) + return 0; + + if (!adap_up && (err = cxgb_up(adapter)) < 0) + goto out; + + t3_tp_set_offload_mode(adapter, 1); + tdev->lldev = adapter->port[0]; + err = cxgb3_offload_activate(adapter); + if (err) + goto out; + + init_port_mtus(adapter); + t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd, + adapter->params.b_wnd, + adapter->params.rev == 0 ? + adapter->port[0]->mtu : 0xffff); + init_smt(adapter); + + if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group)) + dev_dbg(&dev->dev, "cannot create sysfs group\n"); + + /* Call back all registered clients */ + cxgb3_add_clients(tdev); + +out: + /* restore them in case the offload module has changed them */ + if (err) { + t3_tp_set_offload_mode(adapter, 0); + clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); + cxgb3_set_dummy_ops(tdev); + } + return err; +} + +static int offload_close(struct t3cdev *tdev) +{ + struct adapter *adapter = tdev2adap(tdev); + struct t3c_data *td = T3C_DATA(tdev); + + if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) + return 0; + + /* Call back all registered clients */ + cxgb3_remove_clients(tdev); + + sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group); + + /* Flush work scheduled while releasing TIDs */ + flush_work(&td->tid_release_task); + + tdev->lldev = NULL; + cxgb3_set_dummy_ops(tdev); + t3_tp_set_offload_mode(adapter, 0); + clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); + + if (!adapter->open_device_map) + cxgb_down(adapter, 0); + + cxgb3_offload_deactivate(adapter); + return 0; +} + +static int cxgb_open(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + int other_ports = adapter->open_device_map & PORT_MASK; + int err; + + if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) + return err; + + set_bit(pi->port_id, &adapter->open_device_map); + if (is_offload(adapter) && !ofld_disable) { + err = offload_open(dev); + if (err) + pr_warn("Could not initialize offload capabilities\n"); + } + + netif_set_real_num_tx_queues(dev, pi->nqsets); + err = netif_set_real_num_rx_queues(dev, pi->nqsets); + if (err) + return err; + link_start(dev); + t3_port_intr_enable(adapter, pi->port_id); + netif_tx_start_all_queues(dev); + if (!other_ports) + schedule_chk_task(adapter); + + cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id); + return 0; +} + +static int __cxgb_close(struct net_device *dev, int on_wq) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + + if (!adapter->open_device_map) + return 0; + + /* Stop link fault interrupts */ + t3_xgm_intr_disable(adapter, pi->port_id); + t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset); + + t3_port_intr_disable(adapter, pi->port_id); + netif_tx_stop_all_queues(dev); + pi->phy.ops->power_down(&pi->phy, 1); + netif_carrier_off(dev); + t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); + + spin_lock_irq(&adapter->work_lock); /* sync with update task */ + clear_bit(pi->port_id, &adapter->open_device_map); + spin_unlock_irq(&adapter->work_lock); + + if (!(adapter->open_device_map & PORT_MASK)) + cancel_delayed_work_sync(&adapter->adap_check_task); + + if (!adapter->open_device_map) + cxgb_down(adapter, on_wq); + + cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id); + return 0; +} + +static int cxgb_close(struct net_device *dev) +{ + return __cxgb_close(dev, 0); +} + +static struct net_device_stats *cxgb_get_stats(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct net_device_stats *ns = &pi->netstats; + const struct mac_stats *pstats; + + spin_lock(&adapter->stats_lock); + pstats = t3_mac_update_stats(&pi->mac); + spin_unlock(&adapter->stats_lock); + + ns->tx_bytes = pstats->tx_octets; + ns->tx_packets = pstats->tx_frames; + ns->rx_bytes = pstats->rx_octets; + ns->rx_packets = pstats->rx_frames; + ns->multicast = pstats->rx_mcast_frames; + + ns->tx_errors = pstats->tx_underrun; + ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs + + pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short + + pstats->rx_fifo_ovfl; + + /* detailed rx_errors */ + ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long; + ns->rx_over_errors = 0; + ns->rx_crc_errors = pstats->rx_fcs_errs; + ns->rx_frame_errors = pstats->rx_symbol_errs; + ns->rx_fifo_errors = pstats->rx_fifo_ovfl; + ns->rx_missed_errors = pstats->rx_cong_drops; + + /* detailed tx_errors */ + ns->tx_aborted_errors = 0; + ns->tx_carrier_errors = 0; + ns->tx_fifo_errors = pstats->tx_underrun; + ns->tx_heartbeat_errors = 0; + ns->tx_window_errors = 0; + return ns; +} + +static u32 get_msglevel(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + return adapter->msg_enable; +} + +static void set_msglevel(struct net_device *dev, u32 val) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + adapter->msg_enable = val; +} + +static const char stats_strings[][ETH_GSTRING_LEN] = { + "TxOctetsOK ", + "TxFramesOK ", + "TxMulticastFramesOK", + "TxBroadcastFramesOK", + "TxPauseFrames ", + "TxUnderrun ", + "TxExtUnderrun ", + + "TxFrames64 ", + "TxFrames65To127 ", + "TxFrames128To255 ", + "TxFrames256To511 ", + "TxFrames512To1023 ", + "TxFrames1024To1518 ", + "TxFrames1519ToMax ", + + "RxOctetsOK ", + "RxFramesOK ", + "RxMulticastFramesOK", + "RxBroadcastFramesOK", + "RxPauseFrames ", + "RxFCSErrors ", + "RxSymbolErrors ", + "RxShortErrors ", + "RxJabberErrors ", + "RxLengthErrors ", + "RxFIFOoverflow ", + + "RxFrames64 ", + "RxFrames65To127 ", + "RxFrames128To255 ", + "RxFrames256To511 ", + "RxFrames512To1023 ", + "RxFrames1024To1518 ", + "RxFrames1519ToMax ", + + "PhyFIFOErrors ", + "TSO ", + "VLANextractions ", + "VLANinsertions ", + "TxCsumOffload ", + "RxCsumGood ", + "LroAggregated ", + "LroFlushed ", + "LroNoDesc ", + "RxDrops ", + + "CheckTXEnToggled ", + "CheckResets ", + + "LinkFaults ", +}; + +static int get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(stats_strings); + default: + return -EOPNOTSUPP; + } +} + +#define T3_REGMAP_SIZE (3 * 1024) + +static int get_regs_len(struct net_device *dev) +{ + return T3_REGMAP_SIZE; +} + +static int get_eeprom_len(struct net_device *dev) +{ + return EEPROMSIZE; +} + +static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + u32 fw_vers = 0; + u32 tp_vers = 0; + + spin_lock(&adapter->stats_lock); + t3_get_fw_version(adapter, &fw_vers); + t3_get_tp_version(adapter, &tp_vers); + spin_unlock(&adapter->stats_lock); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(adapter->pdev), + sizeof(info->bus_info)); + if (fw_vers) + snprintf(info->fw_version, sizeof(info->fw_version), + "%s %u.%u.%u TP %u.%u.%u", + G_FW_VERSION_TYPE(fw_vers) ? "T" : "N", + G_FW_VERSION_MAJOR(fw_vers), + G_FW_VERSION_MINOR(fw_vers), + G_FW_VERSION_MICRO(fw_vers), + G_TP_VERSION_MAJOR(tp_vers), + G_TP_VERSION_MINOR(tp_vers), + G_TP_VERSION_MICRO(tp_vers)); +} + +static void get_strings(struct net_device *dev, u32 stringset, u8 * data) +{ + if (stringset == ETH_SS_STATS) + memcpy(data, stats_strings, sizeof(stats_strings)); +} + +static unsigned long collect_sge_port_stats(struct adapter *adapter, + struct port_info *p, int idx) +{ + int i; + unsigned long tot = 0; + + for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i) + tot += adapter->sge.qs[i].port_stats[idx]; + return tot; +} + +static void get_stats(struct net_device *dev, struct ethtool_stats *stats, + u64 *data) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + const struct mac_stats *s; + + spin_lock(&adapter->stats_lock); + s = t3_mac_update_stats(&pi->mac); + spin_unlock(&adapter->stats_lock); + + *data++ = s->tx_octets; + *data++ = s->tx_frames; + *data++ = s->tx_mcast_frames; + *data++ = s->tx_bcast_frames; + *data++ = s->tx_pause; + *data++ = s->tx_underrun; + *data++ = s->tx_fifo_urun; + + *data++ = s->tx_frames_64; + *data++ = s->tx_frames_65_127; + *data++ = s->tx_frames_128_255; + *data++ = s->tx_frames_256_511; + *data++ = s->tx_frames_512_1023; + *data++ = s->tx_frames_1024_1518; + *data++ = s->tx_frames_1519_max; + + *data++ = s->rx_octets; + *data++ = s->rx_frames; + *data++ = s->rx_mcast_frames; + *data++ = s->rx_bcast_frames; + *data++ = s->rx_pause; + *data++ = s->rx_fcs_errs; + *data++ = s->rx_symbol_errs; + *data++ = s->rx_short; + *data++ = s->rx_jabber; + *data++ = s->rx_too_long; + *data++ = s->rx_fifo_ovfl; + + *data++ = s->rx_frames_64; + *data++ = s->rx_frames_65_127; + *data++ = s->rx_frames_128_255; + *data++ = s->rx_frames_256_511; + *data++ = s->rx_frames_512_1023; + *data++ = s->rx_frames_1024_1518; + *data++ = s->rx_frames_1519_max; + + *data++ = pi->phy.fifo_errors; + + *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO); + *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX); + *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS); + *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM); + *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD); + *data++ = 0; + *data++ = 0; + *data++ = 0; + *data++ = s->rx_cong_drops; + + *data++ = s->num_toggled; + *data++ = s->num_resets; + + *data++ = s->link_faults; +} + +static inline void reg_block_dump(struct adapter *ap, void *buf, + unsigned int start, unsigned int end) +{ + u32 *p = buf + start; + + for (; start <= end; start += sizeof(u32)) + *p++ = t3_read_reg(ap, start); +} + +static void get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *buf) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *ap = pi->adapter; + + /* + * Version scheme: + * bits 0..9: chip version + * bits 10..15: chip revision + * bit 31: set for PCIe cards + */ + regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31); + + /* + * We skip the MAC statistics registers because they are clear-on-read. + * Also reading multi-register stats would need to synchronize with the + * periodic mac stats accumulation. Hard to justify the complexity. + */ + memset(buf, 0, T3_REGMAP_SIZE); + reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN); + reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT); + reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE); + reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA); + reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3); + reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0, + XGM_REG(A_XGM_SERDES_STAT3, 1)); + reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1), + XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1)); +} + +static int restart_autoneg(struct net_device *dev) +{ + struct port_info *p = netdev_priv(dev); + + if (!netif_running(dev)) + return -EAGAIN; + if (p->link_config.autoneg != AUTONEG_ENABLE) + return -EINVAL; + p->phy.ops->autoneg_restart(&p->phy); + return 0; +} + +static int set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 1; /* cycle on/off once per second */ + + case ETHTOOL_ID_OFF: + t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0); + break; + + case ETHTOOL_ID_ON: + case ETHTOOL_ID_INACTIVE: + t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, + F_GPIO0_OUT_VAL); + } + + return 0; +} + +static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct port_info *p = netdev_priv(dev); + + cmd->supported = p->link_config.supported; + cmd->advertising = p->link_config.advertising; + + if (netif_carrier_ok(dev)) { + ethtool_cmd_speed_set(cmd, p->link_config.speed); + cmd->duplex = p->link_config.duplex; + } else { + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->duplex = DUPLEX_UNKNOWN; + } + + cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; + cmd->phy_address = p->phy.mdio.prtad; + cmd->transceiver = XCVR_EXTERNAL; + cmd->autoneg = p->link_config.autoneg; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +static int speed_duplex_to_caps(int speed, int duplex) +{ + int cap = 0; + + switch (speed) { + case SPEED_10: + if (duplex == DUPLEX_FULL) + cap = SUPPORTED_10baseT_Full; + else + cap = SUPPORTED_10baseT_Half; + break; + case SPEED_100: + if (duplex == DUPLEX_FULL) + cap = SUPPORTED_100baseT_Full; + else + cap = SUPPORTED_100baseT_Half; + break; + case SPEED_1000: + if (duplex == DUPLEX_FULL) + cap = SUPPORTED_1000baseT_Full; + else + cap = SUPPORTED_1000baseT_Half; + break; + case SPEED_10000: + if (duplex == DUPLEX_FULL) + cap = SUPPORTED_10000baseT_Full; + } + return cap; +} + +#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \ + ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \ + ADVERTISED_10000baseT_Full) + +static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct port_info *p = netdev_priv(dev); + struct link_config *lc = &p->link_config; + + if (!(lc->supported & SUPPORTED_Autoneg)) { + /* + * PHY offers a single speed/duplex. See if that's what's + * being requested. + */ + if (cmd->autoneg == AUTONEG_DISABLE) { + u32 speed = ethtool_cmd_speed(cmd); + int cap = speed_duplex_to_caps(speed, cmd->duplex); + if (lc->supported & cap) + return 0; + } + return -EINVAL; + } + + if (cmd->autoneg == AUTONEG_DISABLE) { + u32 speed = ethtool_cmd_speed(cmd); + int cap = speed_duplex_to_caps(speed, cmd->duplex); + + if (!(lc->supported & cap) || (speed == SPEED_1000)) + return -EINVAL; + lc->requested_speed = speed; + lc->requested_duplex = cmd->duplex; + lc->advertising = 0; + } else { + cmd->advertising &= ADVERTISED_MASK; + cmd->advertising &= lc->supported; + if (!cmd->advertising) + return -EINVAL; + lc->requested_speed = SPEED_INVALID; + lc->requested_duplex = DUPLEX_INVALID; + lc->advertising = cmd->advertising | ADVERTISED_Autoneg; + } + lc->autoneg = cmd->autoneg; + if (netif_running(dev)) + t3_link_start(&p->phy, &p->mac, lc); + return 0; +} + +static void get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct port_info *p = netdev_priv(dev); + + epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0; + epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0; + epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0; +} + +static int set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct port_info *p = netdev_priv(dev); + struct link_config *lc = &p->link_config; + + if (epause->autoneg == AUTONEG_DISABLE) + lc->requested_fc = 0; + else if (lc->supported & SUPPORTED_Autoneg) + lc->requested_fc = PAUSE_AUTONEG; + else + return -EINVAL; + + if (epause->rx_pause) + lc->requested_fc |= PAUSE_RX; + if (epause->tx_pause) + lc->requested_fc |= PAUSE_TX; + if (lc->autoneg == AUTONEG_ENABLE) { + if (netif_running(dev)) + t3_link_start(&p->phy, &p->mac, lc); + } else { + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + if (netif_running(dev)) + t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc); + } + return 0; +} + +static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset]; + + e->rx_max_pending = MAX_RX_BUFFERS; + e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS; + e->tx_max_pending = MAX_TXQ_ENTRIES; + + e->rx_pending = q->fl_size; + e->rx_mini_pending = q->rspq_size; + e->rx_jumbo_pending = q->jumbo_size; + e->tx_pending = q->txq_size[0]; +} + +static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct qset_params *q; + int i; + + if (e->rx_pending > MAX_RX_BUFFERS || + e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS || + e->tx_pending > MAX_TXQ_ENTRIES || + e->rx_mini_pending > MAX_RSPQ_ENTRIES || + e->rx_mini_pending < MIN_RSPQ_ENTRIES || + e->rx_pending < MIN_FL_ENTRIES || + e->rx_jumbo_pending < MIN_FL_ENTRIES || + e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES) + return -EINVAL; + + if (adapter->flags & FULL_INIT_DONE) + return -EBUSY; + + q = &adapter->params.sge.qset[pi->first_qset]; + for (i = 0; i < pi->nqsets; ++i, ++q) { + q->rspq_size = e->rx_mini_pending; + q->fl_size = e->rx_pending; + q->jumbo_size = e->rx_jumbo_pending; + q->txq_size[0] = e->tx_pending; + q->txq_size[1] = e->tx_pending; + q->txq_size[2] = e->tx_pending; + } + return 0; +} + +static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct qset_params *qsp; + struct sge_qset *qs; + int i; + + if (c->rx_coalesce_usecs * 10 > M_NEWTIMER) + return -EINVAL; + + for (i = 0; i < pi->nqsets; i++) { + qsp = &adapter->params.sge.qset[i]; + qs = &adapter->sge.qs[i]; + qsp->coalesce_usecs = c->rx_coalesce_usecs; + t3_update_qset_coalesce(qs, qsp); + } + + return 0; +} + +static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct qset_params *q = adapter->params.sge.qset; + + c->rx_coalesce_usecs = q->coalesce_usecs; + return 0; +} + +static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, + u8 * data) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + int i, err = 0; + + u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + e->magic = EEPROM_MAGIC; + for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) + err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]); + + if (!err) + memcpy(data, buf + e->offset, e->len); + kfree(buf); + return err; +} + +static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 * data) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + u32 aligned_offset, aligned_len; + __le32 *p; + u8 *buf; + int err; + + if (eeprom->magic != EEPROM_MAGIC) + return -EINVAL; + + aligned_offset = eeprom->offset & ~3; + aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; + + if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { + buf = kmalloc(aligned_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf); + if (!err && aligned_len > 4) + err = t3_seeprom_read(adapter, + aligned_offset + aligned_len - 4, + (__le32 *) & buf[aligned_len - 4]); + if (err) + goto out; + memcpy(buf + (eeprom->offset & 3), data, eeprom->len); + } else + buf = data; + + err = t3_seeprom_wp(adapter, 0); + if (err) + goto out; + + for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) { + err = t3_seeprom_write(adapter, aligned_offset, *p); + aligned_offset += 4; + } + + if (!err) + err = t3_seeprom_wp(adapter, 1); +out: + if (buf != data) + kfree(buf); + return err; +} + +static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static const struct ethtool_ops cxgb_ethtool_ops = { + .get_settings = get_settings, + .set_settings = set_settings, + .get_drvinfo = get_drvinfo, + .get_msglevel = get_msglevel, + .set_msglevel = set_msglevel, + .get_ringparam = get_sge_param, + .set_ringparam = set_sge_param, + .get_coalesce = get_coalesce, + .set_coalesce = set_coalesce, + .get_eeprom_len = get_eeprom_len, + .get_eeprom = get_eeprom, + .set_eeprom = set_eeprom, + .get_pauseparam = get_pauseparam, + .set_pauseparam = set_pauseparam, + .get_link = ethtool_op_get_link, + .get_strings = get_strings, + .set_phys_id = set_phys_id, + .nway_reset = restart_autoneg, + .get_sset_count = get_sset_count, + .get_ethtool_stats = get_stats, + .get_regs_len = get_regs_len, + .get_regs = get_regs, + .get_wol = get_wol, +}; + +static int in_range(int val, int lo, int hi) +{ + return val < 0 || (val <= hi && val >= lo); +} + +static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + u32 cmd; + int ret; + + if (copy_from_user(&cmd, useraddr, sizeof(cmd))) + return -EFAULT; + + switch (cmd) { + case CHELSIO_SET_QSET_PARAMS:{ + int i; + struct qset_params *q; + struct ch_qset_params t; + int q1 = pi->first_qset; + int nqsets = pi->nqsets; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (copy_from_user(&t, useraddr, sizeof(t))) + return -EFAULT; + if (t.qset_idx >= SGE_QSETS) + return -EINVAL; + if (!in_range(t.intr_lat, 0, M_NEWTIMER) || + !in_range(t.cong_thres, 0, 255) || + !in_range(t.txq_size[0], MIN_TXQ_ENTRIES, + MAX_TXQ_ENTRIES) || + !in_range(t.txq_size[1], MIN_TXQ_ENTRIES, + MAX_TXQ_ENTRIES) || + !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES, + MAX_CTRL_TXQ_ENTRIES) || + !in_range(t.fl_size[0], MIN_FL_ENTRIES, + MAX_RX_BUFFERS) || + !in_range(t.fl_size[1], MIN_FL_ENTRIES, + MAX_RX_JUMBO_BUFFERS) || + !in_range(t.rspq_size, MIN_RSPQ_ENTRIES, + MAX_RSPQ_ENTRIES)) + return -EINVAL; + + if ((adapter->flags & FULL_INIT_DONE) && + (t.rspq_size >= 0 || t.fl_size[0] >= 0 || + t.fl_size[1] >= 0 || t.txq_size[0] >= 0 || + t.txq_size[1] >= 0 || t.txq_size[2] >= 0 || + t.polling >= 0 || t.cong_thres >= 0)) + return -EBUSY; + + /* Allow setting of any available qset when offload enabled */ + if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { + q1 = 0; + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + nqsets += pi->first_qset + pi->nqsets; + } + } + + if (t.qset_idx < q1) + return -EINVAL; + if (t.qset_idx > q1 + nqsets - 1) + return -EINVAL; + + q = &adapter->params.sge.qset[t.qset_idx]; + + if (t.rspq_size >= 0) + q->rspq_size = t.rspq_size; + if (t.fl_size[0] >= 0) + q->fl_size = t.fl_size[0]; + if (t.fl_size[1] >= 0) + q->jumbo_size = t.fl_size[1]; + if (t.txq_size[0] >= 0) + q->txq_size[0] = t.txq_size[0]; + if (t.txq_size[1] >= 0) + q->txq_size[1] = t.txq_size[1]; + if (t.txq_size[2] >= 0) + q->txq_size[2] = t.txq_size[2]; + if (t.cong_thres >= 0) + q->cong_thres = t.cong_thres; + if (t.intr_lat >= 0) { + struct sge_qset *qs = + &adapter->sge.qs[t.qset_idx]; + + q->coalesce_usecs = t.intr_lat; + t3_update_qset_coalesce(qs, q); + } + if (t.polling >= 0) { + if (adapter->flags & USING_MSIX) + q->polling = t.polling; + else { + /* No polling with INTx for T3A */ + if (adapter->params.rev == 0 && + !(adapter->flags & USING_MSI)) + t.polling = 0; + + for (i = 0; i < SGE_QSETS; i++) { + q = &adapter->params.sge. + qset[i]; + q->polling = t.polling; + } + } + } + + if (t.lro >= 0) { + if (t.lro) + dev->wanted_features |= NETIF_F_GRO; + else + dev->wanted_features &= ~NETIF_F_GRO; + netdev_update_features(dev); + } + + break; + } + case CHELSIO_GET_QSET_PARAMS:{ + struct qset_params *q; + struct ch_qset_params t; + int q1 = pi->first_qset; + int nqsets = pi->nqsets; + int i; + + if (copy_from_user(&t, useraddr, sizeof(t))) + return -EFAULT; + + /* Display qsets for all ports when offload enabled */ + if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { + q1 = 0; + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + nqsets = pi->first_qset + pi->nqsets; + } + } + + if (t.qset_idx >= nqsets) + return -EINVAL; + + q = &adapter->params.sge.qset[q1 + t.qset_idx]; + t.rspq_size = q->rspq_size; + t.txq_size[0] = q->txq_size[0]; + t.txq_size[1] = q->txq_size[1]; + t.txq_size[2] = q->txq_size[2]; + t.fl_size[0] = q->fl_size; + t.fl_size[1] = q->jumbo_size; + t.polling = q->polling; + t.lro = !!(dev->features & NETIF_F_GRO); + t.intr_lat = q->coalesce_usecs; + t.cong_thres = q->cong_thres; + t.qnum = q1; + + if (adapter->flags & USING_MSIX) + t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec; + else + t.vector = adapter->pdev->irq; + + if (copy_to_user(useraddr, &t, sizeof(t))) + return -EFAULT; + break; + } + case CHELSIO_SET_QSET_NUM:{ + struct ch_reg edata; + unsigned int i, first_qset = 0, other_qsets = 0; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (adapter->flags & FULL_INIT_DONE) + return -EBUSY; + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + if (edata.val < 1 || + (edata.val > 1 && !(adapter->flags & USING_MSIX))) + return -EINVAL; + + for_each_port(adapter, i) + if (adapter->port[i] && adapter->port[i] != dev) + other_qsets += adap2pinfo(adapter, i)->nqsets; + + if (edata.val + other_qsets > SGE_QSETS) + return -EINVAL; + + pi->nqsets = edata.val; + + for_each_port(adapter, i) + if (adapter->port[i]) { + pi = adap2pinfo(adapter, i); + pi->first_qset = first_qset; + first_qset += pi->nqsets; + } + break; + } + case CHELSIO_GET_QSET_NUM:{ + struct ch_reg edata; + + memset(&edata, 0, sizeof(struct ch_reg)); + + edata.cmd = CHELSIO_GET_QSET_NUM; + edata.val = pi->nqsets; + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + break; + } + case CHELSIO_LOAD_FW:{ + u8 *fw_data; + struct ch_mem_range t; + + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + if (copy_from_user(&t, useraddr, sizeof(t))) + return -EFAULT; + /* Check t.len sanity ? */ + fw_data = memdup_user(useraddr + sizeof(t), t.len); + if (IS_ERR(fw_data)) + return PTR_ERR(fw_data); + + ret = t3_load_fw(adapter, fw_data, t.len); + kfree(fw_data); + if (ret) + return ret; + break; + } + case CHELSIO_SETMTUTAB:{ + struct ch_mtus m; + int i; + + if (!is_offload(adapter)) + return -EOPNOTSUPP; + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (offload_running(adapter)) + return -EBUSY; + if (copy_from_user(&m, useraddr, sizeof(m))) + return -EFAULT; + if (m.nmtus != NMTUS) + return -EINVAL; + if (m.mtus[0] < 81) /* accommodate SACK */ + return -EINVAL; + + /* MTUs must be in ascending order */ + for (i = 1; i < NMTUS; ++i) + if (m.mtus[i] < m.mtus[i - 1]) + return -EINVAL; + + memcpy(adapter->params.mtus, m.mtus, + sizeof(adapter->params.mtus)); + break; + } + case CHELSIO_GET_PM:{ + struct tp_params *p = &adapter->params.tp; + struct ch_pm m = {.cmd = CHELSIO_GET_PM }; + + if (!is_offload(adapter)) + return -EOPNOTSUPP; + m.tx_pg_sz = p->tx_pg_size; + m.tx_num_pg = p->tx_num_pgs; + m.rx_pg_sz = p->rx_pg_size; + m.rx_num_pg = p->rx_num_pgs; + m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan; + if (copy_to_user(useraddr, &m, sizeof(m))) + return -EFAULT; + break; + } + case CHELSIO_SET_PM:{ + struct ch_pm m; + struct tp_params *p = &adapter->params.tp; + + if (!is_offload(adapter)) + return -EOPNOTSUPP; + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (adapter->flags & FULL_INIT_DONE) + return -EBUSY; + if (copy_from_user(&m, useraddr, sizeof(m))) + return -EFAULT; + if (!is_power_of_2(m.rx_pg_sz) || + !is_power_of_2(m.tx_pg_sz)) + return -EINVAL; /* not power of 2 */ + if (!(m.rx_pg_sz & 0x14000)) + return -EINVAL; /* not 16KB or 64KB */ + if (!(m.tx_pg_sz & 0x1554000)) + return -EINVAL; + if (m.tx_num_pg == -1) + m.tx_num_pg = p->tx_num_pgs; + if (m.rx_num_pg == -1) + m.rx_num_pg = p->rx_num_pgs; + if (m.tx_num_pg % 24 || m.rx_num_pg % 24) + return -EINVAL; + if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size || + m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size) + return -EINVAL; + p->rx_pg_size = m.rx_pg_sz; + p->tx_pg_size = m.tx_pg_sz; + p->rx_num_pgs = m.rx_num_pg; + p->tx_num_pgs = m.tx_num_pg; + break; + } + case CHELSIO_GET_MEM:{ + struct ch_mem_range t; + struct mc7 *mem; + u64 buf[32]; + + if (!is_offload(adapter)) + return -EOPNOTSUPP; + if (!(adapter->flags & FULL_INIT_DONE)) + return -EIO; /* need the memory controllers */ + if (copy_from_user(&t, useraddr, sizeof(t))) + return -EFAULT; + if ((t.addr & 7) || (t.len & 7)) + return -EINVAL; + if (t.mem_id == MEM_CM) + mem = &adapter->cm; + else if (t.mem_id == MEM_PMRX) + mem = &adapter->pmrx; + else if (t.mem_id == MEM_PMTX) + mem = &adapter->pmtx; + else + return -EINVAL; + + /* + * Version scheme: + * bits 0..9: chip version + * bits 10..15: chip revision + */ + t.version = 3 | (adapter->params.rev << 10); + if (copy_to_user(useraddr, &t, sizeof(t))) + return -EFAULT; + + /* + * Read 256 bytes at a time as len can be large and we don't + * want to use huge intermediate buffers. + */ + useraddr += sizeof(t); /* advance to start of buffer */ + while (t.len) { + unsigned int chunk = + min_t(unsigned int, t.len, sizeof(buf)); + + ret = + t3_mc7_bd_read(mem, t.addr / 8, chunk / 8, + buf); + if (ret) + return ret; + if (copy_to_user(useraddr, buf, chunk)) + return -EFAULT; + useraddr += chunk; + t.addr += chunk; + t.len -= chunk; + } + break; + } + case CHELSIO_SET_TRACE_FILTER:{ + struct ch_trace t; + const struct trace_params *tp; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (!offload_running(adapter)) + return -EAGAIN; + if (copy_from_user(&t, useraddr, sizeof(t))) + return -EFAULT; + + tp = (const struct trace_params *)&t.sip; + if (t.config_tx) + t3_config_trace_filter(adapter, tp, 0, + t.invert_match, + t.trace_tx); + if (t.config_rx) + t3_config_trace_filter(adapter, tp, 1, + t.invert_match, + t.trace_rx); + break; + } + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) +{ + struct mii_ioctl_data *data = if_mii(req); + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + switch (cmd) { + case SIOCGMIIREG: + case SIOCSMIIREG: + /* Convert phy_id from older PRTAD/DEVAD format */ + if (is_10G(adapter) && + !mdio_phy_id_is_c45(data->phy_id) && + (data->phy_id & 0x1f00) && + !(data->phy_id & 0xe0e0)) + data->phy_id = mdio_phy_id_c45(data->phy_id >> 8, + data->phy_id & 0x1f); + /* FALLTHRU */ + case SIOCGMIIPHY: + return mdio_mii_ioctl(&pi->phy.mdio, data, cmd); + case SIOCCHIOCTL: + return cxgb_extension_ioctl(dev, req->ifr_data); + default: + return -EOPNOTSUPP; + } +} + +static int cxgb_change_mtu(struct net_device *dev, int new_mtu) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + int ret; + + if (new_mtu < 81) /* accommodate SACK */ + return -EINVAL; + if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu))) + return ret; + dev->mtu = new_mtu; + init_port_mtus(adapter); + if (adapter->params.rev == 0 && offload_running(adapter)) + t3_load_mtus(adapter, adapter->params.mtus, + adapter->params.a_wnd, adapter->params.b_wnd, + adapter->port[0]->mtu); + return 0; +} + +static int cxgb_set_mac_addr(struct net_device *dev, void *p) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr); + if (offload_running(adapter)) + write_smt_entry(adapter, pi->port_id); + return 0; +} + +static netdev_features_t cxgb_fix_features(struct net_device *dev, + netdev_features_t features) +{ + /* + * Since there is no support for separate rx/tx vlan accel + * enable/disable make sure tx flag is always in same state as rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int cxgb_set_features(struct net_device *dev, netdev_features_t features) +{ + netdev_features_t changed = dev->features ^ features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + cxgb_vlan_mode(dev, features); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void cxgb_netpoll(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + int qidx; + + for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) { + struct sge_qset *qs = &adapter->sge.qs[qidx]; + void *source; + + if (adapter->flags & USING_MSIX) + source = qs; + else + source = adapter; + + t3_intr_handler(adapter, qs->rspq.polling) (0, source); + } +} +#endif + +/* + * Periodic accumulation of MAC statistics. + */ +static void mac_stats_update(struct adapter *adapter) +{ + int i; + + for_each_port(adapter, i) { + struct net_device *dev = adapter->port[i]; + struct port_info *p = netdev_priv(dev); + + if (netif_running(dev)) { + spin_lock(&adapter->stats_lock); + t3_mac_update_stats(&p->mac); + spin_unlock(&adapter->stats_lock); + } + } +} + +static void check_link_status(struct adapter *adapter) +{ + int i; + + for_each_port(adapter, i) { + struct net_device *dev = adapter->port[i]; + struct port_info *p = netdev_priv(dev); + int link_fault; + + spin_lock_irq(&adapter->work_lock); + link_fault = p->link_fault; + spin_unlock_irq(&adapter->work_lock); + + if (link_fault) { + t3_link_fault(adapter, i); + continue; + } + + if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) { + t3_xgm_intr_disable(adapter, i); + t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset); + + t3_link_changed(adapter, i); + t3_xgm_intr_enable(adapter, i); + } + } +} + +static void check_t3b2_mac(struct adapter *adapter) +{ + int i; + + if (!rtnl_trylock()) /* synchronize with ifdown */ + return; + + for_each_port(adapter, i) { + struct net_device *dev = adapter->port[i]; + struct port_info *p = netdev_priv(dev); + int status; + + if (!netif_running(dev)) + continue; + + status = 0; + if (netif_running(dev) && netif_carrier_ok(dev)) + status = t3b2_mac_watchdog_task(&p->mac); + if (status == 1) + p->mac.stats.num_toggled++; + else if (status == 2) { + struct cmac *mac = &p->mac; + + t3_mac_set_mtu(mac, dev->mtu); + t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr); + cxgb_set_rxmode(dev); + t3_link_start(&p->phy, mac, &p->link_config); + t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); + t3_port_intr_enable(adapter, p->port_id); + p->mac.stats.num_resets++; + } + } + rtnl_unlock(); +} + + +static void t3_adap_check_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + adap_check_task.work); + const struct adapter_params *p = &adapter->params; + int port; + unsigned int v, status, reset; + + adapter->check_task_cnt++; + + check_link_status(adapter); + + /* Accumulate MAC stats if needed */ + if (!p->linkpoll_period || + (adapter->check_task_cnt * p->linkpoll_period) / 10 >= + p->stats_update_period) { + mac_stats_update(adapter); + adapter->check_task_cnt = 0; + } + + if (p->rev == T3_REV_B2) + check_t3b2_mac(adapter); + + /* + * Scan the XGMAC's to check for various conditions which we want to + * monitor in a periodic polling manner rather than via an interrupt + * condition. This is used for conditions which would otherwise flood + * the system with interrupts and we only really need to know that the + * conditions are "happening" ... For each condition we count the + * detection of the condition and reset it for the next polling loop. + */ + for_each_port(adapter, port) { + struct cmac *mac = &adap2pinfo(adapter, port)->mac; + u32 cause; + + cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset); + reset = 0; + if (cause & F_RXFIFO_OVERFLOW) { + mac->stats.rx_fifo_ovfl++; + reset |= F_RXFIFO_OVERFLOW; + } + + t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset); + } + + /* + * We do the same as above for FL_EMPTY interrupts. + */ + status = t3_read_reg(adapter, A_SG_INT_CAUSE); + reset = 0; + + if (status & F_FLEMPTY) { + struct sge_qset *qs = &adapter->sge.qs[0]; + int i = 0; + + reset |= F_FLEMPTY; + + v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) & + 0xffff; + + while (v) { + qs->fl[i].empty += (v & 1); + if (i) + qs++; + i ^= 1; + v >>= 1; + } + } + + t3_write_reg(adapter, A_SG_INT_CAUSE, reset); + + /* Schedule the next check update if any port is active. */ + spin_lock_irq(&adapter->work_lock); + if (adapter->open_device_map & PORT_MASK) + schedule_chk_task(adapter); + spin_unlock_irq(&adapter->work_lock); +} + +static void db_full_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + db_full_task); + + cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0); +} + +static void db_empty_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + db_empty_task); + + cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0); +} + +static void db_drop_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + db_drop_task); + unsigned long delay = 1000; + unsigned short r; + + cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0); + + /* + * Sleep a while before ringing the driver qset dbs. + * The delay is between 1000-2023 usecs. + */ + get_random_bytes(&r, 2); + delay += r & 1023; + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(usecs_to_jiffies(delay)); + ring_dbs(adapter); +} + +/* + * Processes external (PHY) interrupts in process context. + */ +static void ext_intr_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + ext_intr_handler_task); + int i; + + /* Disable link fault interrupts */ + for_each_port(adapter, i) { + struct net_device *dev = adapter->port[i]; + struct port_info *p = netdev_priv(dev); + + t3_xgm_intr_disable(adapter, i); + t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset); + } + + /* Re-enable link fault interrupts */ + t3_phy_intr_handler(adapter); + + for_each_port(adapter, i) + t3_xgm_intr_enable(adapter, i); + + /* Now reenable external interrupts */ + spin_lock_irq(&adapter->work_lock); + if (adapter->slow_intr_mask) { + adapter->slow_intr_mask |= F_T3DBG; + t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG); + t3_write_reg(adapter, A_PL_INT_ENABLE0, + adapter->slow_intr_mask); + } + spin_unlock_irq(&adapter->work_lock); +} + +/* + * Interrupt-context handler for external (PHY) interrupts. + */ +void t3_os_ext_intr_handler(struct adapter *adapter) +{ + /* + * Schedule a task to handle external interrupts as they may be slow + * and we use a mutex to protect MDIO registers. We disable PHY + * interrupts in the meantime and let the task reenable them when + * it's done. + */ + spin_lock(&adapter->work_lock); + if (adapter->slow_intr_mask) { + adapter->slow_intr_mask &= ~F_T3DBG; + t3_write_reg(adapter, A_PL_INT_ENABLE0, + adapter->slow_intr_mask); + queue_work(cxgb3_wq, &adapter->ext_intr_handler_task); + } + spin_unlock(&adapter->work_lock); +} + +void t3_os_link_fault_handler(struct adapter *adapter, int port_id) +{ + struct net_device *netdev = adapter->port[port_id]; + struct port_info *pi = netdev_priv(netdev); + + spin_lock(&adapter->work_lock); + pi->link_fault = 1; + spin_unlock(&adapter->work_lock); +} + +static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq) +{ + int i, ret = 0; + + if (is_offload(adapter) && + test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { + cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0); + offload_close(&adapter->tdev); + } + + /* Stop all ports */ + for_each_port(adapter, i) { + struct net_device *netdev = adapter->port[i]; + + if (netif_running(netdev)) + __cxgb_close(netdev, on_wq); + } + + /* Stop SGE timers */ + t3_stop_sge_timers(adapter); + + adapter->flags &= ~FULL_INIT_DONE; + + if (reset) + ret = t3_reset_adapter(adapter); + + pci_disable_device(adapter->pdev); + + return ret; +} + +static int t3_reenable_adapter(struct adapter *adapter) +{ + if (pci_enable_device(adapter->pdev)) { + dev_err(&adapter->pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + goto err; + } + pci_set_master(adapter->pdev); + pci_restore_state(adapter->pdev); + pci_save_state(adapter->pdev); + + /* Free sge resources */ + t3_free_sge_resources(adapter); + + if (t3_replay_prep_adapter(adapter)) + goto err; + + return 0; +err: + return -1; +} + +static void t3_resume_ports(struct adapter *adapter) +{ + int i; + + /* Restart the ports */ + for_each_port(adapter, i) { + struct net_device *netdev = adapter->port[i]; + + if (netif_running(netdev)) { + if (cxgb_open(netdev)) { + dev_err(&adapter->pdev->dev, + "can't bring device back up" + " after reset\n"); + continue; + } + } + } + + if (is_offload(adapter) && !ofld_disable) + cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0); +} + +/* + * processes a fatal error. + * Bring the ports down, reset the chip, bring the ports back up. + */ +static void fatal_error_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + fatal_error_handler_task); + int err = 0; + + rtnl_lock(); + err = t3_adapter_error(adapter, 1, 1); + if (!err) + err = t3_reenable_adapter(adapter); + if (!err) + t3_resume_ports(adapter); + + CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded"); + rtnl_unlock(); +} + +void t3_fatal_err(struct adapter *adapter) +{ + unsigned int fw_status[4]; + + if (adapter->flags & FULL_INIT_DONE) { + t3_sge_stop(adapter); + t3_write_reg(adapter, A_XGM_TX_CTRL, 0); + t3_write_reg(adapter, A_XGM_RX_CTRL, 0); + t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0); + t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0); + + spin_lock(&adapter->work_lock); + t3_intr_disable(adapter); + queue_work(cxgb3_wq, &adapter->fatal_error_handler_task); + spin_unlock(&adapter->work_lock); + } + CH_ALERT(adapter, "encountered fatal error, operation suspended\n"); + if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status)) + CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n", + fw_status[0], fw_status[1], + fw_status[2], fw_status[3]); +} + +/** + * t3_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct adapter *adapter = pci_get_drvdata(pdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + t3_adapter_error(adapter, 0, 0); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * t3_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev) +{ + struct adapter *adapter = pci_get_drvdata(pdev); + + if (!t3_reenable_adapter(adapter)) + return PCI_ERS_RESULT_RECOVERED; + + return PCI_ERS_RESULT_DISCONNECT; +} + +/** + * t3_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void t3_io_resume(struct pci_dev *pdev) +{ + struct adapter *adapter = pci_get_drvdata(pdev); + + CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n", + t3_read_reg(adapter, A_PCIE_PEX_ERR)); + + rtnl_lock(); + t3_resume_ports(adapter); + rtnl_unlock(); +} + +static const struct pci_error_handlers t3_err_handler = { + .error_detected = t3_io_error_detected, + .slot_reset = t3_io_slot_reset, + .resume = t3_io_resume, +}; + +/* + * Set the number of qsets based on the number of CPUs and the number of ports, + * not to exceed the number of available qsets, assuming there are enough qsets + * per port in HW. + */ +static void set_nqsets(struct adapter *adap) +{ + int i, j = 0; + int num_cpus = netif_get_num_default_rss_queues(); + int hwports = adap->params.nports; + int nqsets = adap->msix_nvectors - 1; + + if (adap->params.rev > 0 && adap->flags & USING_MSIX) { + if (hwports == 2 && + (hwports * nqsets > SGE_QSETS || + num_cpus >= nqsets / hwports)) + nqsets /= hwports; + if (nqsets > num_cpus) + nqsets = num_cpus; + if (nqsets < 1 || hwports == 4) + nqsets = 1; + } else + nqsets = 1; + + for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); + + pi->first_qset = j; + pi->nqsets = nqsets; + j = pi->first_qset + nqsets; + + dev_info(&adap->pdev->dev, + "Port %d using %d queue sets.\n", i, nqsets); + } +} + +static int cxgb_enable_msix(struct adapter *adap) +{ + struct msix_entry entries[SGE_QSETS + 1]; + int vectors; + int i; + + vectors = ARRAY_SIZE(entries); + for (i = 0; i < vectors; ++i) + entries[i].entry = i; + + vectors = pci_enable_msix_range(adap->pdev, entries, + adap->params.nports + 1, vectors); + if (vectors < 0) + return vectors; + + for (i = 0; i < vectors; ++i) + adap->msix_info[i].vec = entries[i].vector; + adap->msix_nvectors = vectors; + + return 0; +} + +static void print_port_info(struct adapter *adap, const struct adapter_info *ai) +{ + static const char *pci_variant[] = { + "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express" + }; + + int i; + char buf[80]; + + if (is_pcie(adap)) + snprintf(buf, sizeof(buf), "%s x%d", + pci_variant[adap->params.pci.variant], + adap->params.pci.width); + else + snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit", + pci_variant[adap->params.pci.variant], + adap->params.pci.speed, adap->params.pci.width); + + for_each_port(adap, i) { + struct net_device *dev = adap->port[i]; + const struct port_info *pi = netdev_priv(dev); + + if (!test_bit(i, &adap->registered_device_map)) + continue; + netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n", + ai->desc, pi->phy.desc, + is_offload(adap) ? "R" : "", adap->params.rev, buf, + (adap->flags & USING_MSIX) ? " MSI-X" : + (adap->flags & USING_MSI) ? " MSI" : ""); + if (adap->name == dev->name && adap->params.vpd.mclk) + pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n", + adap->name, t3_mc7_size(&adap->cm) >> 20, + t3_mc7_size(&adap->pmtx) >> 20, + t3_mc7_size(&adap->pmrx) >> 20, + adap->params.vpd.sn); + } +} + +static const struct net_device_ops cxgb_netdev_ops = { + .ndo_open = cxgb_open, + .ndo_stop = cxgb_close, + .ndo_start_xmit = t3_eth_xmit, + .ndo_get_stats = cxgb_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = cxgb_set_rxmode, + .ndo_do_ioctl = cxgb_ioctl, + .ndo_change_mtu = cxgb_change_mtu, + .ndo_set_mac_address = cxgb_set_mac_addr, + .ndo_fix_features = cxgb_fix_features, + .ndo_set_features = cxgb_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cxgb_netpoll, +#endif +}; + +static void cxgb3_init_iscsi_mac(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + + memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN); + pi->iscsic.mac_addr[3] |= 0x80; +} + +#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) +#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ + NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) +static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int i, err, pci_using_dac = 0; + resource_size_t mmio_start, mmio_len; + const struct adapter_info *ai; + struct adapter *adapter = NULL; + struct port_info *pi; + + pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION); + + if (!cxgb3_wq) { + cxgb3_wq = create_singlethread_workqueue(DRV_NAME); + if (!cxgb3_wq) { + pr_err("cannot initialize work queue\n"); + return -ENOMEM; + } + } + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + goto out; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + /* Just info, some other driver may have claimed the device. */ + dev_info(&pdev->dev, "cannot obtain PCI resources\n"); + goto out_disable_device; + } + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " + "coherent allocations\n"); + goto out_release_regions; + } + } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto out_release_regions; + } + + pci_set_master(pdev); + pci_save_state(pdev); + + mmio_start = pci_resource_start(pdev, 0); + mmio_len = pci_resource_len(pdev, 0); + ai = t3_get_adapter_info(ent->driver_data); + + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) { + err = -ENOMEM; + goto out_release_regions; + } + + adapter->nofail_skb = + alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL); + if (!adapter->nofail_skb) { + dev_err(&pdev->dev, "cannot allocate nofail buffer\n"); + err = -ENOMEM; + goto out_free_adapter; + } + + adapter->regs = ioremap_nocache(mmio_start, mmio_len); + if (!adapter->regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + err = -ENOMEM; + goto out_free_adapter; + } + + adapter->pdev = pdev; + adapter->name = pci_name(pdev); + adapter->msg_enable = dflt_msg_enable; + adapter->mmio_len = mmio_len; + + mutex_init(&adapter->mdio_lock); + spin_lock_init(&adapter->work_lock); + spin_lock_init(&adapter->stats_lock); + + INIT_LIST_HEAD(&adapter->adapter_list); + INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); + INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); + + INIT_WORK(&adapter->db_full_task, db_full_task); + INIT_WORK(&adapter->db_empty_task, db_empty_task); + INIT_WORK(&adapter->db_drop_task, db_drop_task); + + INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); + + for (i = 0; i < ai->nports0 + ai->nports1; ++i) { + struct net_device *netdev; + + netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS); + if (!netdev) { + err = -ENOMEM; + goto out_free_dev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter->port[i] = netdev; + pi = netdev_priv(netdev); + pi->adapter = adapter; + pi->port_id = i; + netif_carrier_off(netdev); + netdev->irq = pdev->irq; + netdev->mem_start = mmio_start; + netdev->mem_end = mmio_start + mmio_len - 1; + netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX; + netdev->features |= netdev->hw_features | + NETIF_F_HW_VLAN_CTAG_TX; + netdev->vlan_features |= netdev->features & VLAN_FEAT; + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->netdev_ops = &cxgb_netdev_ops; + netdev->ethtool_ops = &cxgb_ethtool_ops; + } + + pci_set_drvdata(pdev, adapter); + if (t3_prep_adapter(adapter, ai, 1) < 0) { + err = -ENODEV; + goto out_free_dev; + } + + /* + * The card is now ready to go. If any errors occur during device + * registration we do not fail the whole card but rather proceed only + * with the ports we manage to register successfully. However we must + * register at least one net device. + */ + for_each_port(adapter, i) { + err = register_netdev(adapter->port[i]); + if (err) + dev_warn(&pdev->dev, + "cannot register net device %s, skipping\n", + adapter->port[i]->name); + else { + /* + * Change the name we use for messages to the name of + * the first successfully registered interface. + */ + if (!adapter->registered_device_map) + adapter->name = adapter->port[i]->name; + + __set_bit(i, &adapter->registered_device_map); + } + } + if (!adapter->registered_device_map) { + dev_err(&pdev->dev, "could not register any net devices\n"); + goto out_free_dev; + } + + for_each_port(adapter, i) + cxgb3_init_iscsi_mac(adapter->port[i]); + + /* Driver's ready. Reflect it on LEDs */ + t3_led_ready(adapter); + + if (is_offload(adapter)) { + __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map); + cxgb3_adapter_ofld(adapter); + } + + /* See what interrupts we'll be using */ + if (msi > 1 && cxgb_enable_msix(adapter) == 0) + adapter->flags |= USING_MSIX; + else if (msi > 0 && pci_enable_msi(pdev) == 0) + adapter->flags |= USING_MSI; + + set_nqsets(adapter); + + err = sysfs_create_group(&adapter->port[0]->dev.kobj, + &cxgb3_attr_group); + + print_port_info(adapter, ai); + return 0; + +out_free_dev: + iounmap(adapter->regs); + for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i) + if (adapter->port[i]) + free_netdev(adapter->port[i]); + +out_free_adapter: + kfree(adapter); + +out_release_regions: + pci_release_regions(pdev); +out_disable_device: + pci_disable_device(pdev); +out: + return err; +} + +static void remove_one(struct pci_dev *pdev) +{ + struct adapter *adapter = pci_get_drvdata(pdev); + + if (adapter) { + int i; + + t3_sge_stop(adapter); + sysfs_remove_group(&adapter->port[0]->dev.kobj, + &cxgb3_attr_group); + + if (is_offload(adapter)) { + cxgb3_adapter_unofld(adapter); + if (test_bit(OFFLOAD_DEVMAP_BIT, + &adapter->open_device_map)) + offload_close(&adapter->tdev); + } + + for_each_port(adapter, i) + if (test_bit(i, &adapter->registered_device_map)) + unregister_netdev(adapter->port[i]); + + t3_stop_sge_timers(adapter); + t3_free_sge_resources(adapter); + cxgb_disable_msi(adapter); + + for_each_port(adapter, i) + if (adapter->port[i]) + free_netdev(adapter->port[i]); + + iounmap(adapter->regs); + if (adapter->nofail_skb) + kfree_skb(adapter->nofail_skb); + kfree(adapter); + pci_release_regions(pdev); + pci_disable_device(pdev); + } +} + +static struct pci_driver driver = { + .name = DRV_NAME, + .id_table = cxgb3_pci_tbl, + .probe = init_one, + .remove = remove_one, + .err_handler = &t3_err_handler, +}; + +static int __init cxgb3_init_module(void) +{ + int ret; + + cxgb3_offload_init(); + + ret = pci_register_driver(&driver); + return ret; +} + +static void __exit cxgb3_cleanup_module(void) +{ + pci_unregister_driver(&driver); + if (cxgb3_wq) + destroy_workqueue(cxgb3_wq); +} + +module_init(cxgb3_init_module); +module_exit(cxgb3_cleanup_module); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c new file mode 100644 index 000000000..b0cbb2b7f --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c @@ -0,0 +1,1427 @@ +/* + * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "regs.h" +#include "cxgb3_ioctl.h" +#include "cxgb3_ctl_defs.h" +#include "cxgb3_defs.h" +#include "l2t.h" +#include "firmware_exports.h" +#include "cxgb3_offload.h" + +static LIST_HEAD(client_list); +static LIST_HEAD(ofld_dev_list); +static DEFINE_MUTEX(cxgb3_db_lock); + +static DEFINE_RWLOCK(adapter_list_lock); +static LIST_HEAD(adapter_list); + +static const unsigned int MAX_ATIDS = 64 * 1024; +static const unsigned int ATID_BASE = 0x10000; + +static void cxgb_neigh_update(struct neighbour *neigh); +static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new, + struct neighbour *neigh, const void *daddr); + +static inline int offload_activated(struct t3cdev *tdev) +{ + const struct adapter *adapter = tdev2adap(tdev); + + return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); +} + +/** + * cxgb3_register_client - register an offload client + * @client: the client + * + * Add the client to the client list, + * and call backs the client for each activated offload device + */ +void cxgb3_register_client(struct cxgb3_client *client) +{ + struct t3cdev *tdev; + + mutex_lock(&cxgb3_db_lock); + list_add_tail(&client->client_list, &client_list); + + if (client->add) { + list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { + if (offload_activated(tdev)) + client->add(tdev); + } + } + mutex_unlock(&cxgb3_db_lock); +} + +EXPORT_SYMBOL(cxgb3_register_client); + +/** + * cxgb3_unregister_client - unregister an offload client + * @client: the client + * + * Remove the client to the client list, + * and call backs the client for each activated offload device. + */ +void cxgb3_unregister_client(struct cxgb3_client *client) +{ + struct t3cdev *tdev; + + mutex_lock(&cxgb3_db_lock); + list_del(&client->client_list); + + if (client->remove) { + list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { + if (offload_activated(tdev)) + client->remove(tdev); + } + } + mutex_unlock(&cxgb3_db_lock); +} + +EXPORT_SYMBOL(cxgb3_unregister_client); + +/** + * cxgb3_add_clients - activate registered clients for an offload device + * @tdev: the offload device + * + * Call backs all registered clients once a offload device is activated + */ +void cxgb3_add_clients(struct t3cdev *tdev) +{ + struct cxgb3_client *client; + + mutex_lock(&cxgb3_db_lock); + list_for_each_entry(client, &client_list, client_list) { + if (client->add) + client->add(tdev); + } + mutex_unlock(&cxgb3_db_lock); +} + +/** + * cxgb3_remove_clients - deactivates registered clients + * for an offload device + * @tdev: the offload device + * + * Call backs all registered clients once a offload device is deactivated + */ +void cxgb3_remove_clients(struct t3cdev *tdev) +{ + struct cxgb3_client *client; + + mutex_lock(&cxgb3_db_lock); + list_for_each_entry(client, &client_list, client_list) { + if (client->remove) + client->remove(tdev); + } + mutex_unlock(&cxgb3_db_lock); +} + +void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port) +{ + struct cxgb3_client *client; + + mutex_lock(&cxgb3_db_lock); + list_for_each_entry(client, &client_list, client_list) { + if (client->event_handler) + client->event_handler(tdev, event, port); + } + mutex_unlock(&cxgb3_db_lock); +} + +static struct net_device *get_iff_from_mac(struct adapter *adapter, + const unsigned char *mac, + unsigned int vlan) +{ + int i; + + for_each_port(adapter, i) { + struct net_device *dev = adapter->port[i]; + + if (ether_addr_equal(dev->dev_addr, mac)) { + rcu_read_lock(); + if (vlan && vlan != VLAN_VID_MASK) { + dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), vlan); + } else if (netif_is_bond_slave(dev)) { + struct net_device *upper_dev; + + while ((upper_dev = + netdev_master_upper_dev_get_rcu(dev))) + dev = upper_dev; + } + rcu_read_unlock(); + return dev; + } + } + return NULL; +} + +static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req, + void *data) +{ + int i; + int ret = 0; + unsigned int val = 0; + struct ulp_iscsi_info *uiip = data; + + switch (req) { + case ULP_ISCSI_GET_PARAMS: + uiip->pdev = adapter->pdev; + uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT); + uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT); + uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK); + + val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ); + for (i = 0; i < 4; i++, val >>= 8) + uiip->pgsz_factor[i] = val & 0xFF; + + val = t3_read_reg(adapter, A_TP_PARA_REG7); + uiip->max_txsz = + uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0, + (val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1); + /* + * On tx, the iscsi pdu has to be <= tx page size and has to + * fit into the Tx PM FIFO. + */ + val = min(adapter->params.tp.tx_pg_size, + t3_read_reg(adapter, A_PM1_TX_CFG) >> 17); + uiip->max_txsz = min(val, uiip->max_txsz); + + /* set MaxRxData to 16224 */ + val = t3_read_reg(adapter, A_TP_PARA_REG2); + if ((val >> S_MAXRXDATA) != 0x3f60) { + val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE); + val |= V_MAXRXDATA(0x3f60); + pr_info("%s, iscsi set MaxRxData to 16224 (0x%x)\n", + adapter->name, val); + t3_write_reg(adapter, A_TP_PARA_REG2, val); + } + + /* + * on rx, the iscsi pdu has to be < rx page size and the + * the max rx data length programmed in TP + */ + val = min(adapter->params.tp.rx_pg_size, + ((t3_read_reg(adapter, A_TP_PARA_REG2)) >> + S_MAXRXDATA) & M_MAXRXDATA); + uiip->max_rxsz = min(val, uiip->max_rxsz); + break; + case ULP_ISCSI_SET_PARAMS: + t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask); + /* program the ddp page sizes */ + for (i = 0; i < 4; i++) + val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i); + if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) { + pr_info("%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u\n", + adapter->name, val, uiip->pgsz_factor[0], + uiip->pgsz_factor[1], uiip->pgsz_factor[2], + uiip->pgsz_factor[3]); + t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val); + } + break; + default: + ret = -EOPNOTSUPP; + } + return ret; +} + +/* Response queue used for RDMA events. */ +#define ASYNC_NOTIF_RSPQ 0 + +static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data) +{ + int ret = 0; + + switch (req) { + case RDMA_GET_PARAMS: { + struct rdma_info *rdma = data; + struct pci_dev *pdev = adapter->pdev; + + rdma->udbell_physbase = pci_resource_start(pdev, 2); + rdma->udbell_len = pci_resource_len(pdev, 2); + rdma->tpt_base = + t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT); + rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); + rdma->pbl_base = + t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT); + rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); + rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); + rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); + rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL; + rdma->pdev = pdev; + break; + } + case RDMA_CQ_OP:{ + unsigned long flags; + struct rdma_cq_op *rdma = data; + + /* may be called in any context */ + spin_lock_irqsave(&adapter->sge.reg_lock, flags); + ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op, + rdma->credits); + spin_unlock_irqrestore(&adapter->sge.reg_lock, flags); + break; + } + case RDMA_GET_MEM:{ + struct ch_mem_range *t = data; + struct mc7 *mem; + + if ((t->addr & 7) || (t->len & 7)) + return -EINVAL; + if (t->mem_id == MEM_CM) + mem = &adapter->cm; + else if (t->mem_id == MEM_PMRX) + mem = &adapter->pmrx; + else if (t->mem_id == MEM_PMTX) + mem = &adapter->pmtx; + else + return -EINVAL; + + ret = + t3_mc7_bd_read(mem, t->addr / 8, t->len / 8, + (u64 *) t->buf); + if (ret) + return ret; + break; + } + case RDMA_CQ_SETUP:{ + struct rdma_cq_setup *rdma = data; + + spin_lock_irq(&adapter->sge.reg_lock); + ret = + t3_sge_init_cqcntxt(adapter, rdma->id, + rdma->base_addr, rdma->size, + ASYNC_NOTIF_RSPQ, + rdma->ovfl_mode, rdma->credits, + rdma->credit_thres); + spin_unlock_irq(&adapter->sge.reg_lock); + break; + } + case RDMA_CQ_DISABLE: + spin_lock_irq(&adapter->sge.reg_lock); + ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data); + spin_unlock_irq(&adapter->sge.reg_lock); + break; + case RDMA_CTRL_QP_SETUP:{ + struct rdma_ctrlqp_setup *rdma = data; + + spin_lock_irq(&adapter->sge.reg_lock); + ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0, + SGE_CNTXT_RDMA, + ASYNC_NOTIF_RSPQ, + rdma->base_addr, rdma->size, + FW_RI_TID_START, 1, 0); + spin_unlock_irq(&adapter->sge.reg_lock); + break; + } + case RDMA_GET_MIB: { + spin_lock(&adapter->stats_lock); + t3_tp_get_mib_stats(adapter, (struct tp_mib_stats *)data); + spin_unlock(&adapter->stats_lock); + break; + } + default: + ret = -EOPNOTSUPP; + } + return ret; +} + +static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data) +{ + struct adapter *adapter = tdev2adap(tdev); + struct tid_range *tid; + struct mtutab *mtup; + struct iff_mac *iffmacp; + struct ddp_params *ddpp; + struct adap_ports *ports; + struct ofld_page_info *rx_page_info; + struct tp_params *tp = &adapter->params.tp; + int i; + + switch (req) { + case GET_MAX_OUTSTANDING_WR: + *(unsigned int *)data = FW_WR_NUM; + break; + case GET_WR_LEN: + *(unsigned int *)data = WR_FLITS; + break; + case GET_TX_MAX_CHUNK: + *(unsigned int *)data = 1 << 20; /* 1MB */ + break; + case GET_TID_RANGE: + tid = data; + tid->num = t3_mc5_size(&adapter->mc5) - + adapter->params.mc5.nroutes - + adapter->params.mc5.nfilters - adapter->params.mc5.nservers; + tid->base = 0; + break; + case GET_STID_RANGE: + tid = data; + tid->num = adapter->params.mc5.nservers; + tid->base = t3_mc5_size(&adapter->mc5) - tid->num - + adapter->params.mc5.nfilters - adapter->params.mc5.nroutes; + break; + case GET_L2T_CAPACITY: + *(unsigned int *)data = 2048; + break; + case GET_MTUS: + mtup = data; + mtup->size = NMTUS; + mtup->mtus = adapter->params.mtus; + break; + case GET_IFF_FROM_MAC: + iffmacp = data; + iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr, + iffmacp->vlan_tag & + VLAN_VID_MASK); + break; + case GET_DDP_PARAMS: + ddpp = data; + ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT); + ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT); + ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK); + break; + case GET_PORTS: + ports = data; + ports->nports = adapter->params.nports; + for_each_port(adapter, i) + ports->lldevs[i] = adapter->port[i]; + break; + case ULP_ISCSI_GET_PARAMS: + case ULP_ISCSI_SET_PARAMS: + if (!offload_running(adapter)) + return -EAGAIN; + return cxgb_ulp_iscsi_ctl(adapter, req, data); + case RDMA_GET_PARAMS: + case RDMA_CQ_OP: + case RDMA_CQ_SETUP: + case RDMA_CQ_DISABLE: + case RDMA_CTRL_QP_SETUP: + case RDMA_GET_MEM: + case RDMA_GET_MIB: + if (!offload_running(adapter)) + return -EAGAIN; + return cxgb_rdma_ctl(adapter, req, data); + case GET_RX_PAGE_INFO: + rx_page_info = data; + rx_page_info->page_size = tp->rx_pg_size; + rx_page_info->num = tp->rx_num_pgs; + break; + case GET_ISCSI_IPV4ADDR: { + struct iscsi_ipv4addr *p = data; + struct port_info *pi = netdev_priv(p->dev); + p->ipv4addr = pi->iscsi_ipv4addr; + break; + } + case GET_EMBEDDED_INFO: { + struct ch_embedded_info *e = data; + + spin_lock(&adapter->stats_lock); + t3_get_fw_version(adapter, &e->fw_vers); + t3_get_tp_version(adapter, &e->tp_vers); + spin_unlock(&adapter->stats_lock); + break; + } + default: + return -EOPNOTSUPP; + } + return 0; +} + +/* + * Dummy handler for Rx offload packets in case we get an offload packet before + * proper processing is setup. This complains and drops the packet as it isn't + * normal to get offload packets at this stage. + */ +static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs, + int n) +{ + while (n--) + dev_kfree_skb_any(skbs[n]); + return 0; +} + +static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh) +{ +} + +void cxgb3_set_dummy_ops(struct t3cdev *dev) +{ + dev->recv = rx_offload_blackhole; + dev->neigh_update = dummy_neigh_update; +} + +/* + * Free an active-open TID. + */ +void *cxgb3_free_atid(struct t3cdev *tdev, int atid) +{ + struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; + union active_open_entry *p = atid2entry(t, atid); + void *ctx = p->t3c_tid.ctx; + + spin_lock_bh(&t->atid_lock); + p->next = t->afree; + t->afree = p; + t->atids_in_use--; + spin_unlock_bh(&t->atid_lock); + + return ctx; +} + +EXPORT_SYMBOL(cxgb3_free_atid); + +/* + * Free a server TID and return it to the free pool. + */ +void cxgb3_free_stid(struct t3cdev *tdev, int stid) +{ + struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; + union listen_entry *p = stid2entry(t, stid); + + spin_lock_bh(&t->stid_lock); + p->next = t->sfree; + t->sfree = p; + t->stids_in_use--; + spin_unlock_bh(&t->stid_lock); +} + +EXPORT_SYMBOL(cxgb3_free_stid); + +void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client, + void *ctx, unsigned int tid) +{ + struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; + + t->tid_tab[tid].client = client; + t->tid_tab[tid].ctx = ctx; + atomic_inc(&t->tids_in_use); +} + +EXPORT_SYMBOL(cxgb3_insert_tid); + +/* + * Populate a TID_RELEASE WR. The skb must be already propely sized. + */ +static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid) +{ + struct cpl_tid_release *req; + + skb->priority = CPL_PRIORITY_SETUP; + req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); +} + +static void t3_process_tid_release_list(struct work_struct *work) +{ + struct t3c_data *td = container_of(work, struct t3c_data, + tid_release_task); + struct sk_buff *skb; + struct t3cdev *tdev = td->dev; + + + spin_lock_bh(&td->tid_release_lock); + while (td->tid_release_list) { + struct t3c_tid_entry *p = td->tid_release_list; + + td->tid_release_list = p->ctx; + spin_unlock_bh(&td->tid_release_lock); + + skb = alloc_skb(sizeof(struct cpl_tid_release), + GFP_KERNEL); + if (!skb) + skb = td->nofail_skb; + if (!skb) { + spin_lock_bh(&td->tid_release_lock); + p->ctx = (void *)td->tid_release_list; + td->tid_release_list = p; + break; + } + mk_tid_release(skb, p - td->tid_maps.tid_tab); + cxgb3_ofld_send(tdev, skb); + p->ctx = NULL; + if (skb == td->nofail_skb) + td->nofail_skb = + alloc_skb(sizeof(struct cpl_tid_release), + GFP_KERNEL); + spin_lock_bh(&td->tid_release_lock); + } + td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1; + spin_unlock_bh(&td->tid_release_lock); + + if (!td->nofail_skb) + td->nofail_skb = + alloc_skb(sizeof(struct cpl_tid_release), + GFP_KERNEL); +} + +/* use ctx as a next pointer in the tid release list */ +void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid) +{ + struct t3c_data *td = T3C_DATA(tdev); + struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid]; + + spin_lock_bh(&td->tid_release_lock); + p->ctx = (void *)td->tid_release_list; + p->client = NULL; + td->tid_release_list = p; + if (!p->ctx || td->release_list_incomplete) + schedule_work(&td->tid_release_task); + spin_unlock_bh(&td->tid_release_lock); +} + +EXPORT_SYMBOL(cxgb3_queue_tid_release); + +/* + * Remove a tid from the TID table. A client may defer processing its last + * CPL message if it is locked at the time it arrives, and while the message + * sits in the client's backlog the TID may be reused for another connection. + * To handle this we atomically switch the TID association if it still points + * to the original client context. + */ +void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid) +{ + struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; + + BUG_ON(tid >= t->ntids); + if (tdev->type == T3A) + (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL); + else { + struct sk_buff *skb; + + skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); + if (likely(skb)) { + mk_tid_release(skb, tid); + cxgb3_ofld_send(tdev, skb); + t->tid_tab[tid].ctx = NULL; + } else + cxgb3_queue_tid_release(tdev, tid); + } + atomic_dec(&t->tids_in_use); +} + +EXPORT_SYMBOL(cxgb3_remove_tid); + +int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client, + void *ctx) +{ + int atid = -1; + struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; + + spin_lock_bh(&t->atid_lock); + if (t->afree && + t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <= + t->ntids) { + union active_open_entry *p = t->afree; + + atid = (p - t->atid_tab) + t->atid_base; + t->afree = p->next; + p->t3c_tid.ctx = ctx; + p->t3c_tid.client = client; + t->atids_in_use++; + } + spin_unlock_bh(&t->atid_lock); + return atid; +} + +EXPORT_SYMBOL(cxgb3_alloc_atid); + +int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client, + void *ctx) +{ + int stid = -1; + struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; + + spin_lock_bh(&t->stid_lock); + if (t->sfree) { + union listen_entry *p = t->sfree; + + stid = (p - t->stid_tab) + t->stid_base; + t->sfree = p->next; + p->t3c_tid.ctx = ctx; + p->t3c_tid.client = client; + t->stids_in_use++; + } + spin_unlock_bh(&t->stid_lock); + return stid; +} + +EXPORT_SYMBOL(cxgb3_alloc_stid); + +/* Get the t3cdev associated with a net_device */ +struct t3cdev *dev2t3cdev(struct net_device *dev) +{ + const struct port_info *pi = netdev_priv(dev); + + return (struct t3cdev *)pi->adapter; +} + +EXPORT_SYMBOL(dev2t3cdev); + +static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb) +{ + struct cpl_smt_write_rpl *rpl = cplhdr(skb); + + if (rpl->status != CPL_ERR_NONE) + pr_err("Unexpected SMT_WRITE_RPL status %u for entry %u\n", + rpl->status, GET_TID(rpl)); + + return CPL_RET_BUF_DONE; +} + +static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb) +{ + struct cpl_l2t_write_rpl *rpl = cplhdr(skb); + + if (rpl->status != CPL_ERR_NONE) + pr_err("Unexpected L2T_WRITE_RPL status %u for entry %u\n", + rpl->status, GET_TID(rpl)); + + return CPL_RET_BUF_DONE; +} + +static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb) +{ + struct cpl_rte_write_rpl *rpl = cplhdr(skb); + + if (rpl->status != CPL_ERR_NONE) + pr_err("Unexpected RTE_WRITE_RPL status %u for entry %u\n", + rpl->status, GET_TID(rpl)); + + return CPL_RET_BUF_DONE; +} + +static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb) +{ + struct cpl_act_open_rpl *rpl = cplhdr(skb); + unsigned int atid = G_TID(ntohl(rpl->atid)); + struct t3c_tid_entry *t3c_tid; + + t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); + if (t3c_tid && t3c_tid->ctx && t3c_tid->client && + t3c_tid->client->handlers && + t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) { + return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb, + t3c_tid-> + ctx); + } else { + pr_err("%s: received clientless CPL command 0x%x\n", + dev->name, CPL_ACT_OPEN_RPL); + return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; + } +} + +static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb) +{ + union opcode_tid *p = cplhdr(skb); + unsigned int stid = G_TID(ntohl(p->opcode_tid)); + struct t3c_tid_entry *t3c_tid; + + t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); + if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && + t3c_tid->client->handlers[p->opcode]) { + return t3c_tid->client->handlers[p->opcode] (dev, skb, + t3c_tid->ctx); + } else { + pr_err("%s: received clientless CPL command 0x%x\n", + dev->name, p->opcode); + return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; + } +} + +static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb) +{ + union opcode_tid *p = cplhdr(skb); + unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); + struct t3c_tid_entry *t3c_tid; + + t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); + if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && + t3c_tid->client->handlers[p->opcode]) { + return t3c_tid->client->handlers[p->opcode] + (dev, skb, t3c_tid->ctx); + } else { + pr_err("%s: received clientless CPL command 0x%x\n", + dev->name, p->opcode); + return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; + } +} + +static int do_cr(struct t3cdev *dev, struct sk_buff *skb) +{ + struct cpl_pass_accept_req *req = cplhdr(skb); + unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); + struct tid_info *t = &(T3C_DATA(dev))->tid_maps; + struct t3c_tid_entry *t3c_tid; + unsigned int tid = GET_TID(req); + + if (unlikely(tid >= t->ntids)) { + printk("%s: passive open TID %u too large\n", + dev->name, tid); + t3_fatal_err(tdev2adap(dev)); + return CPL_RET_BUF_DONE; + } + + t3c_tid = lookup_stid(t, stid); + if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && + t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) { + return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ] + (dev, skb, t3c_tid->ctx); + } else { + pr_err("%s: received clientless CPL command 0x%x\n", + dev->name, CPL_PASS_ACCEPT_REQ); + return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; + } +} + +/* + * Returns an sk_buff for a reply CPL message of size len. If the input + * sk_buff has no other users it is trimmed and reused, otherwise a new buffer + * is allocated. The input skb must be of size at least len. Note that this + * operation does not destroy the original skb data even if it decides to reuse + * the buffer. + */ +static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len, + gfp_t gfp) +{ + if (likely(!skb_cloned(skb))) { + BUG_ON(skb->len < len); + __skb_trim(skb, len); + skb_get(skb); + } else { + skb = alloc_skb(len, gfp); + if (skb) + __skb_put(skb, len); + } + return skb; +} + +static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb) +{ + union opcode_tid *p = cplhdr(skb); + unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); + struct t3c_tid_entry *t3c_tid; + + t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); + if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && + t3c_tid->client->handlers[p->opcode]) { + return t3c_tid->client->handlers[p->opcode] + (dev, skb, t3c_tid->ctx); + } else { + struct cpl_abort_req_rss *req = cplhdr(skb); + struct cpl_abort_rpl *rpl; + struct sk_buff *reply_skb; + unsigned int tid = GET_TID(req); + u8 cmd = req->status; + + if (req->status == CPL_ERR_RTX_NEG_ADVICE || + req->status == CPL_ERR_PERSIST_NEG_ADVICE) + goto out; + + reply_skb = cxgb3_get_cpl_reply_skb(skb, + sizeof(struct + cpl_abort_rpl), + GFP_ATOMIC); + + if (!reply_skb) { + printk("do_abort_req_rss: couldn't get skb!\n"); + goto out; + } + reply_skb->priority = CPL_PRIORITY_DATA; + __skb_put(reply_skb, sizeof(struct cpl_abort_rpl)); + rpl = cplhdr(reply_skb); + rpl->wr.wr_hi = + htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); + rpl->wr.wr_lo = htonl(V_WR_TID(tid)); + OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); + rpl->cmd = cmd; + cxgb3_ofld_send(dev, reply_skb); +out: + return CPL_RET_BUF_DONE; + } +} + +static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb) +{ + struct cpl_act_establish *req = cplhdr(skb); + unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); + struct tid_info *t = &(T3C_DATA(dev))->tid_maps; + struct t3c_tid_entry *t3c_tid; + unsigned int tid = GET_TID(req); + + if (unlikely(tid >= t->ntids)) { + printk("%s: active establish TID %u too large\n", + dev->name, tid); + t3_fatal_err(tdev2adap(dev)); + return CPL_RET_BUF_DONE; + } + + t3c_tid = lookup_atid(t, atid); + if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && + t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { + return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] + (dev, skb, t3c_tid->ctx); + } else { + pr_err("%s: received clientless CPL command 0x%x\n", + dev->name, CPL_ACT_ESTABLISH); + return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; + } +} + +static int do_trace(struct t3cdev *dev, struct sk_buff *skb) +{ + struct cpl_trace_pkt *p = cplhdr(skb); + + skb->protocol = htons(0xffff); + skb->dev = dev->lldev; + skb_pull(skb, sizeof(*p)); + skb_reset_mac_header(skb); + netif_receive_skb(skb); + return 0; +} + +/* + * That skb would better have come from process_responses() where we abuse + * ->priority and ->csum to carry our data. NB: if we get to per-arch + * ->csum, the things might get really interesting here. + */ + +static inline u32 get_hwtid(struct sk_buff *skb) +{ + return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff; +} + +static inline u32 get_opcode(struct sk_buff *skb) +{ + return G_OPCODE(ntohl((__force __be32)skb->csum)); +} + +static int do_term(struct t3cdev *dev, struct sk_buff *skb) +{ + unsigned int hwtid = get_hwtid(skb); + unsigned int opcode = get_opcode(skb); + struct t3c_tid_entry *t3c_tid; + + t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); + if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && + t3c_tid->client->handlers[opcode]) { + return t3c_tid->client->handlers[opcode] (dev, skb, + t3c_tid->ctx); + } else { + pr_err("%s: received clientless CPL command 0x%x\n", + dev->name, opcode); + return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; + } +} + +static int nb_callback(struct notifier_block *self, unsigned long event, + void *ctx) +{ + switch (event) { + case (NETEVENT_NEIGH_UPDATE):{ + cxgb_neigh_update((struct neighbour *)ctx); + break; + } + case (NETEVENT_REDIRECT):{ + struct netevent_redirect *nr = ctx; + cxgb_redirect(nr->old, nr->new, nr->neigh, + nr->daddr); + cxgb_neigh_update(nr->neigh); + break; + } + default: + break; + } + return 0; +} + +static struct notifier_block nb = { + .notifier_call = nb_callback +}; + +/* + * Process a received packet with an unknown/unexpected CPL opcode. + */ +static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb) +{ + pr_err("%s: received bad CPL command 0x%x\n", dev->name, *skb->data); + return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; +} + +/* + * Handlers for each CPL opcode + */ +static cpl_handler_func cpl_handlers[NUM_CPL_CMDS]; + +/* + * Add a new handler to the CPL dispatch table. A NULL handler may be supplied + * to unregister an existing handler. + */ +void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h) +{ + if (opcode < NUM_CPL_CMDS) + cpl_handlers[opcode] = h ? h : do_bad_cpl; + else + pr_err("T3C: handler registration for opcode %x failed\n", + opcode); +} + +EXPORT_SYMBOL(t3_register_cpl_handler); + +/* + * T3CDEV's receive method. + */ +static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n) +{ + while (n--) { + struct sk_buff *skb = *skbs++; + unsigned int opcode = get_opcode(skb); + int ret = cpl_handlers[opcode] (dev, skb); + +#if VALIDATE_TID + if (ret & CPL_RET_UNKNOWN_TID) { + union opcode_tid *p = cplhdr(skb); + + pr_err("%s: CPL message (opcode %u) had unknown TID %u\n", + dev->name, opcode, G_TID(ntohl(p->opcode_tid))); + } +#endif + if (ret & CPL_RET_BUF_DONE) + kfree_skb(skb); + } + return 0; +} + +/* + * Sends an sk_buff to a T3C driver after dealing with any active network taps. + */ +int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb) +{ + int r; + + local_bh_disable(); + r = dev->send(dev, skb); + local_bh_enable(); + return r; +} + +EXPORT_SYMBOL(cxgb3_ofld_send); + +static int is_offloading(struct net_device *dev) +{ + struct adapter *adapter; + int i; + + read_lock_bh(&adapter_list_lock); + list_for_each_entry(adapter, &adapter_list, adapter_list) { + for_each_port(adapter, i) { + if (dev == adapter->port[i]) { + read_unlock_bh(&adapter_list_lock); + return 1; + } + } + } + read_unlock_bh(&adapter_list_lock); + return 0; +} + +static void cxgb_neigh_update(struct neighbour *neigh) +{ + struct net_device *dev; + + if (!neigh) + return; + dev = neigh->dev; + if (dev && (is_offloading(dev))) { + struct t3cdev *tdev = dev2t3cdev(dev); + + BUG_ON(!tdev); + t3_l2t_update(tdev, neigh); + } +} + +static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e) +{ + struct sk_buff *skb; + struct cpl_set_tcb_field *req; + + skb = alloc_skb(sizeof(*req), GFP_ATOMIC); + if (!skb) { + pr_err("%s: cannot allocate skb!\n", __func__); + return; + } + skb->priority = CPL_PRIORITY_CONTROL; + req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req)); + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); + req->reply = 0; + req->cpu_idx = 0; + req->word = htons(W_TCB_L2T_IX); + req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX)); + req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx)); + tdev->send(tdev, skb); +} + +static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new, + struct neighbour *neigh, + const void *daddr) +{ + struct net_device *dev; + struct tid_info *ti; + struct t3cdev *tdev; + u32 tid; + int update_tcb; + struct l2t_entry *e; + struct t3c_tid_entry *te; + + dev = neigh->dev; + + if (!is_offloading(dev)) + return; + tdev = dev2t3cdev(dev); + BUG_ON(!tdev); + + /* Add new L2T entry */ + e = t3_l2t_get(tdev, new, dev, daddr); + if (!e) { + pr_err("%s: couldn't allocate new l2t entry!\n", __func__); + return; + } + + /* Walk tid table and notify clients of dst change. */ + ti = &(T3C_DATA(tdev))->tid_maps; + for (tid = 0; tid < ti->ntids; tid++) { + te = lookup_tid(ti, tid); + BUG_ON(!te); + if (te && te->ctx && te->client && te->client->redirect) { + update_tcb = te->client->redirect(te->ctx, old, new, e); + if (update_tcb) { + rcu_read_lock(); + l2t_hold(L2DATA(tdev), e); + rcu_read_unlock(); + set_l2t_ix(tdev, tid, e); + } + } + } + l2t_release(tdev, e); +} + +/* + * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. + * The allocated memory is cleared. + */ +void *cxgb_alloc_mem(unsigned long size) +{ + void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); + + if (!p) + p = vzalloc(size); + return p; +} + +/* + * Free memory allocated through t3_alloc_mem(). + */ +void cxgb_free_mem(void *addr) +{ + if (is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} + +/* + * Allocate and initialize the TID tables. Returns 0 on success. + */ +static int init_tid_tabs(struct tid_info *t, unsigned int ntids, + unsigned int natids, unsigned int nstids, + unsigned int atid_base, unsigned int stid_base) +{ + unsigned long size = ntids * sizeof(*t->tid_tab) + + natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab); + + t->tid_tab = cxgb_alloc_mem(size); + if (!t->tid_tab) + return -ENOMEM; + + t->stid_tab = (union listen_entry *)&t->tid_tab[ntids]; + t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids]; + t->ntids = ntids; + t->nstids = nstids; + t->stid_base = stid_base; + t->sfree = NULL; + t->natids = natids; + t->atid_base = atid_base; + t->afree = NULL; + t->stids_in_use = t->atids_in_use = 0; + atomic_set(&t->tids_in_use, 0); + spin_lock_init(&t->stid_lock); + spin_lock_init(&t->atid_lock); + + /* + * Setup the free lists for stid_tab and atid_tab. + */ + if (nstids) { + while (--nstids) + t->stid_tab[nstids - 1].next = &t->stid_tab[nstids]; + t->sfree = t->stid_tab; + } + if (natids) { + while (--natids) + t->atid_tab[natids - 1].next = &t->atid_tab[natids]; + t->afree = t->atid_tab; + } + return 0; +} + +static void free_tid_maps(struct tid_info *t) +{ + cxgb_free_mem(t->tid_tab); +} + +static inline void add_adapter(struct adapter *adap) +{ + write_lock_bh(&adapter_list_lock); + list_add_tail(&adap->adapter_list, &adapter_list); + write_unlock_bh(&adapter_list_lock); +} + +static inline void remove_adapter(struct adapter *adap) +{ + write_lock_bh(&adapter_list_lock); + list_del(&adap->adapter_list); + write_unlock_bh(&adapter_list_lock); +} + +int cxgb3_offload_activate(struct adapter *adapter) +{ + struct t3cdev *dev = &adapter->tdev; + int natids, err; + struct t3c_data *t; + struct tid_range stid_range, tid_range; + struct mtutab mtutab; + unsigned int l2t_capacity; + struct l2t_data *l2td; + + t = kzalloc(sizeof(*t), GFP_KERNEL); + if (!t) + return -ENOMEM; + + err = -EOPNOTSUPP; + if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 || + dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 || + dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 || + dev->ctl(dev, GET_MTUS, &mtutab) < 0 || + dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 || + dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0) + goto out_free; + + err = -ENOMEM; + l2td = t3_init_l2t(l2t_capacity); + if (!l2td) + goto out_free; + + natids = min(tid_range.num / 2, MAX_ATIDS); + err = init_tid_tabs(&t->tid_maps, tid_range.num, natids, + stid_range.num, ATID_BASE, stid_range.base); + if (err) + goto out_free_l2t; + + t->mtus = mtutab.mtus; + t->nmtus = mtutab.size; + + INIT_WORK(&t->tid_release_task, t3_process_tid_release_list); + spin_lock_init(&t->tid_release_lock); + INIT_LIST_HEAD(&t->list_node); + t->dev = dev; + + RCU_INIT_POINTER(dev->l2opt, l2td); + T3C_DATA(dev) = t; + dev->recv = process_rx; + dev->neigh_update = t3_l2t_update; + + /* Register netevent handler once */ + if (list_empty(&adapter_list)) + register_netevent_notifier(&nb); + + t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL); + t->release_list_incomplete = 0; + + add_adapter(adapter); + return 0; + +out_free_l2t: + t3_free_l2t(l2td); +out_free: + kfree(t); + return err; +} + +static void clean_l2_data(struct rcu_head *head) +{ + struct l2t_data *d = container_of(head, struct l2t_data, rcu_head); + t3_free_l2t(d); +} + + +void cxgb3_offload_deactivate(struct adapter *adapter) +{ + struct t3cdev *tdev = &adapter->tdev; + struct t3c_data *t = T3C_DATA(tdev); + struct l2t_data *d; + + remove_adapter(adapter); + if (list_empty(&adapter_list)) + unregister_netevent_notifier(&nb); + + free_tid_maps(&t->tid_maps); + T3C_DATA(tdev) = NULL; + rcu_read_lock(); + d = L2DATA(tdev); + rcu_read_unlock(); + RCU_INIT_POINTER(tdev->l2opt, NULL); + call_rcu(&d->rcu_head, clean_l2_data); + if (t->nofail_skb) + kfree_skb(t->nofail_skb); + kfree(t); +} + +static inline void register_tdev(struct t3cdev *tdev) +{ + static int unit; + + mutex_lock(&cxgb3_db_lock); + snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++); + list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list); + mutex_unlock(&cxgb3_db_lock); +} + +static inline void unregister_tdev(struct t3cdev *tdev) +{ + mutex_lock(&cxgb3_db_lock); + list_del(&tdev->ofld_dev_list); + mutex_unlock(&cxgb3_db_lock); +} + +static inline int adap2type(struct adapter *adapter) +{ + int type = 0; + + switch (adapter->params.rev) { + case T3_REV_A: + type = T3A; + break; + case T3_REV_B: + case T3_REV_B2: + type = T3B; + break; + case T3_REV_C: + type = T3C; + break; + } + return type; +} + +void cxgb3_adapter_ofld(struct adapter *adapter) +{ + struct t3cdev *tdev = &adapter->tdev; + + INIT_LIST_HEAD(&tdev->ofld_dev_list); + + cxgb3_set_dummy_ops(tdev); + tdev->send = t3_offload_tx; + tdev->ctl = cxgb_offload_ctl; + tdev->type = adap2type(adapter); + + register_tdev(tdev); +} + +void cxgb3_adapter_unofld(struct adapter *adapter) +{ + struct t3cdev *tdev = &adapter->tdev; + + tdev->recv = NULL; + tdev->neigh_update = NULL; + + unregister_tdev(tdev); +} + +void __init cxgb3_offload_init(void) +{ + int i; + + for (i = 0; i < NUM_CPL_CMDS; ++i) + cpl_handlers[i] = do_bad_cpl; + + t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl); + t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl); + t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl); + t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl); + t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl); + t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr); + t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl); + t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl); + t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl); + t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl); + t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl); + t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl); + t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl); + t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl); + t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl); + t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl); + t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss); + t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish); + t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl); + t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl); + t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term); + t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl); + t3_register_cpl_handler(CPL_TRACE_PKT, do_trace); + t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl); + t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl); + t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl); +} diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h new file mode 100644 index 000000000..929c29811 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.h @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _CXGB3_OFFLOAD_H +#define _CXGB3_OFFLOAD_H + +#include +#include + +#include "l2t.h" + +#include "t3cdev.h" +#include "t3_cpl.h" + +struct adapter; + +void cxgb3_offload_init(void); + +void cxgb3_adapter_ofld(struct adapter *adapter); +void cxgb3_adapter_unofld(struct adapter *adapter); +int cxgb3_offload_activate(struct adapter *adapter); +void cxgb3_offload_deactivate(struct adapter *adapter); + +void cxgb3_set_dummy_ops(struct t3cdev *dev); + +struct t3cdev *dev2t3cdev(struct net_device *dev); + +/* + * Client registration. Users of T3 driver must register themselves. + * The T3 driver will call the add function of every client for each T3 + * adapter activated, passing up the t3cdev ptr. Each client fills out an + * array of callback functions to process CPL messages. + */ + +void cxgb3_register_client(struct cxgb3_client *client); +void cxgb3_unregister_client(struct cxgb3_client *client); +void cxgb3_add_clients(struct t3cdev *tdev); +void cxgb3_remove_clients(struct t3cdev *tdev); +void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port); + +typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev, + struct sk_buff *skb, void *ctx); + +enum { + OFFLOAD_STATUS_UP, + OFFLOAD_STATUS_DOWN, + OFFLOAD_PORT_DOWN, + OFFLOAD_PORT_UP, + OFFLOAD_DB_FULL, + OFFLOAD_DB_EMPTY, + OFFLOAD_DB_DROP +}; + +struct cxgb3_client { + char *name; + void (*add) (struct t3cdev *); + void (*remove) (struct t3cdev *); + cxgb3_cpl_handler_func *handlers; + int (*redirect)(void *ctx, struct dst_entry *old, + struct dst_entry *new, struct l2t_entry *l2t); + struct list_head client_list; + void (*event_handler)(struct t3cdev *tdev, u32 event, u32 port); +}; + +/* + * TID allocation services. + */ +int cxgb3_alloc_atid(struct t3cdev *dev, struct cxgb3_client *client, + void *ctx); +int cxgb3_alloc_stid(struct t3cdev *dev, struct cxgb3_client *client, + void *ctx); +void *cxgb3_free_atid(struct t3cdev *dev, int atid); +void cxgb3_free_stid(struct t3cdev *dev, int stid); +void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client, + void *ctx, unsigned int tid); +void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid); +void cxgb3_remove_tid(struct t3cdev *dev, void *ctx, unsigned int tid); + +struct t3c_tid_entry { + struct cxgb3_client *client; + void *ctx; +}; + +/* CPL message priority levels */ +enum { + CPL_PRIORITY_DATA = 0, /* data messages */ + CPL_PRIORITY_SETUP = 1, /* connection setup messages */ + CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */ + CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */ + CPL_PRIORITY_ACK = 1, /* RX ACK messages */ + CPL_PRIORITY_CONTROL = 1 /* offload control messages */ +}; + +/* Flags for return value of CPL message handlers */ +enum { + CPL_RET_BUF_DONE = 1, /* buffer processing done, buffer may be freed */ + CPL_RET_BAD_MSG = 2, /* bad CPL message (e.g., unknown opcode) */ + CPL_RET_UNKNOWN_TID = 4 /* unexpected unknown TID */ +}; + +typedef int (*cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb); + +/* + * Returns a pointer to the first byte of the CPL header in an sk_buff that + * contains a CPL message. + */ +static inline void *cplhdr(struct sk_buff *skb) +{ + return skb->data; +} + +void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h); + +union listen_entry { + struct t3c_tid_entry t3c_tid; + union listen_entry *next; +}; + +union active_open_entry { + struct t3c_tid_entry t3c_tid; + union active_open_entry *next; +}; + +/* + * Holds the size, base address, free list start, etc of the TID, server TID, + * and active-open TID tables for a offload device. + * The tables themselves are allocated dynamically. + */ +struct tid_info { + struct t3c_tid_entry *tid_tab; + unsigned int ntids; + atomic_t tids_in_use; + + union listen_entry *stid_tab; + unsigned int nstids; + unsigned int stid_base; + + union active_open_entry *atid_tab; + unsigned int natids; + unsigned int atid_base; + + /* + * The following members are accessed R/W so we put them in their own + * cache lines. + * + * XXX We could combine the atid fields above with the lock here since + * atids are use once (unlike other tids). OTOH the above fields are + * usually in cache due to tid_tab. + */ + spinlock_t atid_lock ____cacheline_aligned_in_smp; + union active_open_entry *afree; + unsigned int atids_in_use; + + spinlock_t stid_lock ____cacheline_aligned; + union listen_entry *sfree; + unsigned int stids_in_use; +}; + +struct t3c_data { + struct list_head list_node; + struct t3cdev *dev; + unsigned int tx_max_chunk; /* max payload for TX_DATA */ + unsigned int max_wrs; /* max in-flight WRs per connection */ + unsigned int nmtus; + const unsigned short *mtus; + struct tid_info tid_maps; + + struct t3c_tid_entry *tid_release_list; + spinlock_t tid_release_lock; + struct work_struct tid_release_task; + + struct sk_buff *nofail_skb; + unsigned int release_list_incomplete; +}; + +/* + * t3cdev -> t3c_data accessor + */ +#define T3C_DATA(dev) (*(struct t3c_data **)&(dev)->l4opt) + +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h b/drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h new file mode 100644 index 000000000..0d9b0e6dc --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/firmware_exports.h @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2004-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _FIRMWARE_EXPORTS_H_ +#define _FIRMWARE_EXPORTS_H_ + +/* WR OPCODES supported by the firmware. + */ +#define FW_WROPCODE_FORWARD 0x01 +#define FW_WROPCODE_BYPASS 0x05 + +#define FW_WROPCODE_TUNNEL_TX_PKT 0x03 + +#define FW_WROPOCDE_ULPTX_DATA_SGL 0x00 +#define FW_WROPCODE_ULPTX_MEM_READ 0x02 +#define FW_WROPCODE_ULPTX_PKT 0x04 +#define FW_WROPCODE_ULPTX_INVALIDATE 0x06 + +#define FW_WROPCODE_TUNNEL_RX_PKT 0x07 + +#define FW_WROPCODE_OFLD_GETTCB_RPL 0x08 +#define FW_WROPCODE_OFLD_CLOSE_CON 0x09 +#define FW_WROPCODE_OFLD_TP_ABORT_CON_REQ 0x0A +#define FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL 0x0F +#define FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ 0x0B +#define FW_WROPCODE_OFLD_TP_ABORT_CON_RPL 0x0C +#define FW_WROPCODE_OFLD_TX_DATA 0x0D +#define FW_WROPCODE_OFLD_TX_DATA_ACK 0x0E + +#define FW_WROPCODE_RI_RDMA_INIT 0x10 +#define FW_WROPCODE_RI_RDMA_WRITE 0x11 +#define FW_WROPCODE_RI_RDMA_READ_REQ 0x12 +#define FW_WROPCODE_RI_RDMA_READ_RESP 0x13 +#define FW_WROPCODE_RI_SEND 0x14 +#define FW_WROPCODE_RI_TERMINATE 0x15 +#define FW_WROPCODE_RI_RDMA_READ 0x16 +#define FW_WROPCODE_RI_RECEIVE 0x17 +#define FW_WROPCODE_RI_BIND_MW 0x18 +#define FW_WROPCODE_RI_FASTREGISTER_MR 0x19 +#define FW_WROPCODE_RI_LOCAL_INV 0x1A +#define FW_WROPCODE_RI_MODIFY_QP 0x1B +#define FW_WROPCODE_RI_BYPASS 0x1C + +#define FW_WROPOCDE_RSVD 0x1E + +#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR 0x1F + +#define FW_WROPCODE_MNGT 0x1D +#define FW_MNGTOPCODE_PKTSCHED_SET 0x00 + +/* Maximum size of a WR sent from the host, limited by the SGE. + * + * Note: WR coming from ULP or TP are only limited by CIM. + */ +#define FW_WR_SIZE 128 + +/* Maximum number of outstanding WRs sent from the host. Value must be + * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by + * offload modules to limit the number of WRs per connection. + */ +#define FW_T3_WR_NUM 16 +#define FW_N3_WR_NUM 7 + +#ifndef N3 +# define FW_WR_NUM FW_T3_WR_NUM +#else +# define FW_WR_NUM FW_N3_WR_NUM +#endif + +/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These + * queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must + * start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START. + * + * Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent + * to RESP Queue[i]. + */ +#define FW_TUNNEL_NUM 8 +#define FW_TUNNEL_SGEEC_START 8 +#define FW_TUNNEL_TID_START 65544 + +/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues + * must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID' + * (or 'uP Token') FW_CTRL_TID_START. + * + * Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i]. + */ +#define FW_CTRL_NUM 8 +#define FW_CTRL_SGEEC_START 65528 +#define FW_CTRL_TID_START 65536 + +/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These + * queues must start at SGE Egress Context FW_OFLD_SGEEC_START. + * + * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for + * OFFLOAD Queues, as the host is responsible for providing the correct TID in + * every WR. + * + * Ingress Trafffic for OFFLOAD Queue[i] is sent to RESP Queue[i]. + */ +#define FW_OFLD_NUM 8 +#define FW_OFLD_SGEEC_START 0 + +/* + * + */ +#define FW_RI_NUM 1 +#define FW_RI_SGEEC_START 65527 +#define FW_RI_TID_START 65552 + +/* + * The RX_PKT_TID + */ +#define FW_RX_PKT_NUM 1 +#define FW_RX_PKT_TID_START 65553 + +/* FW_WRC_NUM corresponds to the number of Work Request Context that supported + * by the firmware. + */ +#define FW_WRC_NUM \ + (65536 + FW_TUNNEL_NUM + FW_CTRL_NUM + FW_RI_NUM + FW_RX_PKT_NUM) + +/* + * FW type and version. + */ +#define S_FW_VERSION_TYPE 28 +#define M_FW_VERSION_TYPE 0xF +#define V_FW_VERSION_TYPE(x) ((x) << S_FW_VERSION_TYPE) +#define G_FW_VERSION_TYPE(x) \ + (((x) >> S_FW_VERSION_TYPE) & M_FW_VERSION_TYPE) + +#define S_FW_VERSION_MAJOR 16 +#define M_FW_VERSION_MAJOR 0xFFF +#define V_FW_VERSION_MAJOR(x) ((x) << S_FW_VERSION_MAJOR) +#define G_FW_VERSION_MAJOR(x) \ + (((x) >> S_FW_VERSION_MAJOR) & M_FW_VERSION_MAJOR) + +#define S_FW_VERSION_MINOR 8 +#define M_FW_VERSION_MINOR 0xFF +#define V_FW_VERSION_MINOR(x) ((x) << S_FW_VERSION_MINOR) +#define G_FW_VERSION_MINOR(x) \ + (((x) >> S_FW_VERSION_MINOR) & M_FW_VERSION_MINOR) + +#define S_FW_VERSION_MICRO 0 +#define M_FW_VERSION_MICRO 0xFF +#define V_FW_VERSION_MICRO(x) ((x) << S_FW_VERSION_MICRO) +#define G_FW_VERSION_MICRO(x) \ + (((x) >> S_FW_VERSION_MICRO) & M_FW_VERSION_MICRO) + +#endif /* _FIRMWARE_EXPORTS_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c new file mode 100644 index 000000000..5f226eda8 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c @@ -0,0 +1,470 @@ +/* + * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "common.h" +#include "t3cdev.h" +#include "cxgb3_defs.h" +#include "l2t.h" +#include "t3_cpl.h" +#include "firmware_exports.h" + +#define VLAN_NONE 0xfff + +/* + * Module locking notes: There is a RW lock protecting the L2 table as a + * whole plus a spinlock per L2T entry. Entry lookups and allocations happen + * under the protection of the table lock, individual entry changes happen + * while holding that entry's spinlock. The table lock nests outside the + * entry locks. Allocations of new entries take the table lock as writers so + * no other lookups can happen while allocating new entries. Entry updates + * take the table lock as readers so multiple entries can be updated in + * parallel. An L2T entry can be dropped by decrementing its reference count + * and therefore can happen in parallel with entry allocation but no entry + * can change state or increment its ref count during allocation as both of + * these perform lookups. + */ + +static inline unsigned int vlan_prio(const struct l2t_entry *e) +{ + return e->vlan >> 13; +} + +static inline unsigned int arp_hash(u32 key, int ifindex, + const struct l2t_data *d) +{ + return jhash_2words(key, ifindex, 0) & (d->nentries - 1); +} + +static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n) +{ + neigh_hold(n); + if (e->neigh) + neigh_release(e->neigh); + e->neigh = n; +} + +/* + * Set up an L2T entry and send any packets waiting in the arp queue. The + * supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the + * entry locked. + */ +static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb, + struct l2t_entry *e) +{ + struct cpl_l2t_write_req *req; + struct sk_buff *tmp; + + if (!skb) { + skb = alloc_skb(sizeof(*req), GFP_ATOMIC); + if (!skb) + return -ENOMEM; + } + + req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx)); + req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) | + V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) | + V_L2T_W_PRIO(vlan_prio(e))); + memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); + memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); + skb->priority = CPL_PRIORITY_CONTROL; + cxgb3_ofld_send(dev, skb); + + skb_queue_walk_safe(&e->arpq, skb, tmp) { + __skb_unlink(skb, &e->arpq); + cxgb3_ofld_send(dev, skb); + } + e->state = L2T_STATE_VALID; + + return 0; +} + +/* + * Add a packet to the an L2T entry's queue of packets awaiting resolution. + * Must be called with the entry's lock held. + */ +static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) +{ + __skb_queue_tail(&e->arpq, skb); +} + +int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, + struct l2t_entry *e) +{ +again: + switch (e->state) { + case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ + neigh_event_send(e->neigh, NULL); + spin_lock_bh(&e->lock); + if (e->state == L2T_STATE_STALE) + e->state = L2T_STATE_VALID; + spin_unlock_bh(&e->lock); + case L2T_STATE_VALID: /* fast-path, send the packet on */ + return cxgb3_ofld_send(dev, skb); + case L2T_STATE_RESOLVING: + spin_lock_bh(&e->lock); + if (e->state != L2T_STATE_RESOLVING) { + /* ARP already completed */ + spin_unlock_bh(&e->lock); + goto again; + } + arpq_enqueue(e, skb); + spin_unlock_bh(&e->lock); + + /* + * Only the first packet added to the arpq should kick off + * resolution. However, because the alloc_skb below can fail, + * we allow each packet added to the arpq to retry resolution + * as a way of recovering from transient memory exhaustion. + * A better way would be to use a work request to retry L2T + * entries when there's no memory. + */ + if (!neigh_event_send(e->neigh, NULL)) { + skb = alloc_skb(sizeof(struct cpl_l2t_write_req), + GFP_ATOMIC); + if (!skb) + break; + + spin_lock_bh(&e->lock); + if (!skb_queue_empty(&e->arpq)) + setup_l2e_send_pending(dev, skb, e); + else /* we lost the race */ + __kfree_skb(skb); + spin_unlock_bh(&e->lock); + } + } + return 0; +} + +EXPORT_SYMBOL(t3_l2t_send_slow); + +void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e) +{ +again: + switch (e->state) { + case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ + neigh_event_send(e->neigh, NULL); + spin_lock_bh(&e->lock); + if (e->state == L2T_STATE_STALE) { + e->state = L2T_STATE_VALID; + } + spin_unlock_bh(&e->lock); + return; + case L2T_STATE_VALID: /* fast-path, send the packet on */ + return; + case L2T_STATE_RESOLVING: + spin_lock_bh(&e->lock); + if (e->state != L2T_STATE_RESOLVING) { + /* ARP already completed */ + spin_unlock_bh(&e->lock); + goto again; + } + spin_unlock_bh(&e->lock); + + /* + * Only the first packet added to the arpq should kick off + * resolution. However, because the alloc_skb below can fail, + * we allow each packet added to the arpq to retry resolution + * as a way of recovering from transient memory exhaustion. + * A better way would be to use a work request to retry L2T + * entries when there's no memory. + */ + neigh_event_send(e->neigh, NULL); + } +} + +EXPORT_SYMBOL(t3_l2t_send_event); + +/* + * Allocate a free L2T entry. Must be called with l2t_data.lock held. + */ +static struct l2t_entry *alloc_l2e(struct l2t_data *d) +{ + struct l2t_entry *end, *e, **p; + + if (!atomic_read(&d->nfree)) + return NULL; + + /* there's definitely a free entry */ + for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e) + if (atomic_read(&e->refcnt) == 0) + goto found; + + for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ; +found: + d->rover = e + 1; + atomic_dec(&d->nfree); + + /* + * The entry we found may be an inactive entry that is + * presently in the hash table. We need to remove it. + */ + if (e->state != L2T_STATE_UNUSED) { + int hash = arp_hash(e->addr, e->ifindex, d); + + for (p = &d->l2tab[hash].first; *p; p = &(*p)->next) + if (*p == e) { + *p = e->next; + break; + } + e->state = L2T_STATE_UNUSED; + } + return e; +} + +/* + * Called when an L2T entry has no more users. The entry is left in the hash + * table since it is likely to be reused but we also bump nfree to indicate + * that the entry can be reallocated for a different neighbor. We also drop + * the existing neighbor reference in case the neighbor is going away and is + * waiting on our reference. + * + * Because entries can be reallocated to other neighbors once their ref count + * drops to 0 we need to take the entry's lock to avoid races with a new + * incarnation. + */ +void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e) +{ + spin_lock_bh(&e->lock); + if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ + if (e->neigh) { + neigh_release(e->neigh); + e->neigh = NULL; + } + } + spin_unlock_bh(&e->lock); + atomic_inc(&d->nfree); +} + +EXPORT_SYMBOL(t3_l2e_free); + +/* + * Update an L2T entry that was previously used for the same next hop as neigh. + * Must be called with softirqs disabled. + */ +static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) +{ + unsigned int nud_state; + + spin_lock(&e->lock); /* avoid race with t3_l2t_free */ + + if (neigh != e->neigh) + neigh_replace(e, neigh); + nud_state = neigh->nud_state; + if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) || + !(nud_state & NUD_VALID)) + e->state = L2T_STATE_RESOLVING; + else if (nud_state & NUD_CONNECTED) + e->state = L2T_STATE_VALID; + else + e->state = L2T_STATE_STALE; + spin_unlock(&e->lock); +} + +struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst, + struct net_device *dev, const void *daddr) +{ + struct l2t_entry *e = NULL; + struct neighbour *neigh; + struct port_info *p; + struct l2t_data *d; + int hash; + u32 addr; + int ifidx; + int smt_idx; + + rcu_read_lock(); + neigh = dst_neigh_lookup(dst, daddr); + if (!neigh) + goto done_rcu; + + addr = *(u32 *) neigh->primary_key; + ifidx = neigh->dev->ifindex; + + if (!dev) + dev = neigh->dev; + p = netdev_priv(dev); + smt_idx = p->port_id; + + d = L2DATA(cdev); + if (!d) + goto done_rcu; + + hash = arp_hash(addr, ifidx, d); + + write_lock_bh(&d->lock); + for (e = d->l2tab[hash].first; e; e = e->next) + if (e->addr == addr && e->ifindex == ifidx && + e->smt_idx == smt_idx) { + l2t_hold(d, e); + if (atomic_read(&e->refcnt) == 1) + reuse_entry(e, neigh); + goto done_unlock; + } + + /* Need to allocate a new entry */ + e = alloc_l2e(d); + if (e) { + spin_lock(&e->lock); /* avoid race with t3_l2t_free */ + e->next = d->l2tab[hash].first; + d->l2tab[hash].first = e; + e->state = L2T_STATE_RESOLVING; + e->addr = addr; + e->ifindex = ifidx; + e->smt_idx = smt_idx; + atomic_set(&e->refcnt, 1); + neigh_replace(e, neigh); + if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) + e->vlan = vlan_dev_vlan_id(neigh->dev); + else + e->vlan = VLAN_NONE; + spin_unlock(&e->lock); + } +done_unlock: + write_unlock_bh(&d->lock); +done_rcu: + if (neigh) + neigh_release(neigh); + rcu_read_unlock(); + return e; +} + +EXPORT_SYMBOL(t3_l2t_get); + +/* + * Called when address resolution fails for an L2T entry to handle packets + * on the arpq head. If a packet specifies a failure handler it is invoked, + * otherwise the packets is sent to the offload device. + * + * XXX: maybe we should abandon the latter behavior and just require a failure + * handler. + */ +static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq) +{ + struct sk_buff *skb, *tmp; + + skb_queue_walk_safe(arpq, skb, tmp) { + struct l2t_skb_cb *cb = L2T_SKB_CB(skb); + + __skb_unlink(skb, arpq); + if (cb->arp_failure_handler) + cb->arp_failure_handler(dev, skb); + else + cxgb3_ofld_send(dev, skb); + } +} + +/* + * Called when the host's ARP layer makes a change to some entry that is + * loaded into the HW L2 table. + */ +void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh) +{ + struct sk_buff_head arpq; + struct l2t_entry *e; + struct l2t_data *d = L2DATA(dev); + u32 addr = *(u32 *) neigh->primary_key; + int ifidx = neigh->dev->ifindex; + int hash = arp_hash(addr, ifidx, d); + + read_lock_bh(&d->lock); + for (e = d->l2tab[hash].first; e; e = e->next) + if (e->addr == addr && e->ifindex == ifidx) { + spin_lock(&e->lock); + goto found; + } + read_unlock_bh(&d->lock); + return; + +found: + __skb_queue_head_init(&arpq); + + read_unlock(&d->lock); + if (atomic_read(&e->refcnt)) { + if (neigh != e->neigh) + neigh_replace(e, neigh); + + if (e->state == L2T_STATE_RESOLVING) { + if (neigh->nud_state & NUD_FAILED) { + skb_queue_splice_init(&e->arpq, &arpq); + } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE)) + setup_l2e_send_pending(dev, NULL, e); + } else { + e->state = neigh->nud_state & NUD_CONNECTED ? + L2T_STATE_VALID : L2T_STATE_STALE; + if (!ether_addr_equal(e->dmac, neigh->ha)) + setup_l2e_send_pending(dev, NULL, e); + } + } + spin_unlock_bh(&e->lock); + + if (!skb_queue_empty(&arpq)) + handle_failed_resolution(dev, &arpq); +} + +struct l2t_data *t3_init_l2t(unsigned int l2t_capacity) +{ + struct l2t_data *d; + int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry); + + d = cxgb_alloc_mem(size); + if (!d) + return NULL; + + d->nentries = l2t_capacity; + d->rover = &d->l2tab[1]; /* entry 0 is not used */ + atomic_set(&d->nfree, l2t_capacity - 1); + rwlock_init(&d->lock); + + for (i = 0; i < l2t_capacity; ++i) { + d->l2tab[i].idx = i; + d->l2tab[i].state = L2T_STATE_UNUSED; + __skb_queue_head_init(&d->l2tab[i].arpq); + spin_lock_init(&d->l2tab[i].lock); + atomic_set(&d->l2tab[i].refcnt, 0); + } + return d; +} + +void t3_free_l2t(struct l2t_data *d) +{ + cxgb_free_mem(d); +} + diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h new file mode 100644 index 000000000..8cffcdfd5 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _CHELSIO_L2T_H +#define _CHELSIO_L2T_H + +#include +#include "t3cdev.h" +#include + +enum { + L2T_STATE_VALID, /* entry is up to date */ + L2T_STATE_STALE, /* entry may be used but needs revalidation */ + L2T_STATE_RESOLVING, /* entry needs address resolution */ + L2T_STATE_UNUSED /* entry not in use */ +}; + +struct neighbour; +struct sk_buff; + +/* + * Each L2T entry plays multiple roles. First of all, it keeps state for the + * corresponding entry of the HW L2 table and maintains a queue of offload + * packets awaiting address resolution. Second, it is a node of a hash table + * chain, where the nodes of the chain are linked together through their next + * pointer. Finally, each node is a bucket of a hash table, pointing to the + * first element in its chain through its first pointer. + */ +struct l2t_entry { + u16 state; /* entry state */ + u16 idx; /* entry index */ + u32 addr; /* dest IP address */ + int ifindex; /* neighbor's net_device's ifindex */ + u16 smt_idx; /* SMT index */ + u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */ + struct neighbour *neigh; /* associated neighbour */ + struct l2t_entry *first; /* start of hash chain */ + struct l2t_entry *next; /* next l2t_entry on chain */ + struct sk_buff_head arpq; /* queue of packets awaiting resolution */ + spinlock_t lock; + atomic_t refcnt; /* entry reference count */ + u8 dmac[6]; /* neighbour's MAC address */ +}; + +struct l2t_data { + unsigned int nentries; /* number of entries */ + struct l2t_entry *rover; /* starting point for next allocation */ + atomic_t nfree; /* number of free entries */ + rwlock_t lock; + struct l2t_entry l2tab[0]; + struct rcu_head rcu_head; /* to handle rcu cleanup */ +}; + +typedef void (*arp_failure_handler_func)(struct t3cdev * dev, + struct sk_buff * skb); + +/* + * Callback stored in an skb to handle address resolution failure. + */ +struct l2t_skb_cb { + arp_failure_handler_func arp_failure_handler; +}; + +#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb) + +static inline void set_arp_failure_handler(struct sk_buff *skb, + arp_failure_handler_func hnd) +{ + L2T_SKB_CB(skb)->arp_failure_handler = hnd; +} + +/* + * Getting to the L2 data from an offload device. + */ +#define L2DATA(cdev) (rcu_dereference((cdev)->l2opt)) + +#define W_TCB_L2T_IX 0 +#define S_TCB_L2T_IX 7 +#define M_TCB_L2T_IX 0x7ffULL +#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX) + +void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e); +void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh); +struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst, + struct net_device *dev, const void *daddr); +int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, + struct l2t_entry *e); +void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e); +struct l2t_data *t3_init_l2t(unsigned int l2t_capacity); +void t3_free_l2t(struct l2t_data *d); + +int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb); + +static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb, + struct l2t_entry *e) +{ + if (likely(e->state == L2T_STATE_VALID)) + return cxgb3_ofld_send(dev, skb); + return t3_l2t_send_slow(dev, skb, e); +} + +static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e) +{ + struct l2t_data *d; + + rcu_read_lock(); + d = L2DATA(t); + + if (atomic_dec_and_test(&e->refcnt) && d) + t3_l2e_free(d, e); + + rcu_read_unlock(); +} + +static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) +{ + if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ + atomic_dec(&d->nfree); +} + +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb3/mc5.c b/drivers/net/ethernet/chelsio/cxgb3/mc5.c new file mode 100644 index 000000000..338301b11 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/mc5.c @@ -0,0 +1,422 @@ +/* + * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "common.h" +#include "regs.h" + +enum { + IDT75P52100 = 4, + IDT75N43102 = 5 +}; + +/* DBGI command mode */ +enum { + DBGI_MODE_MBUS = 0, + DBGI_MODE_IDT52100 = 5 +}; + +/* IDT 75P52100 commands */ +#define IDT_CMD_READ 0 +#define IDT_CMD_WRITE 1 +#define IDT_CMD_SEARCH 2 +#define IDT_CMD_LEARN 3 + +/* IDT LAR register address and value for 144-bit mode (low 32 bits) */ +#define IDT_LAR_ADR0 0x180006 +#define IDT_LAR_MODE144 0xffff0000 + +/* IDT SCR and SSR addresses (low 32 bits) */ +#define IDT_SCR_ADR0 0x180000 +#define IDT_SSR0_ADR0 0x180002 +#define IDT_SSR1_ADR0 0x180004 + +/* IDT GMR base address (low 32 bits) */ +#define IDT_GMR_BASE_ADR0 0x180020 + +/* IDT data and mask array base addresses (low 32 bits) */ +#define IDT_DATARY_BASE_ADR0 0 +#define IDT_MSKARY_BASE_ADR0 0x80000 + +/* IDT 75N43102 commands */ +#define IDT4_CMD_SEARCH144 3 +#define IDT4_CMD_WRITE 4 +#define IDT4_CMD_READ 5 + +/* IDT 75N43102 SCR address (low 32 bits) */ +#define IDT4_SCR_ADR0 0x3 + +/* IDT 75N43102 GMR base addresses (low 32 bits) */ +#define IDT4_GMR_BASE0 0x10 +#define IDT4_GMR_BASE1 0x20 +#define IDT4_GMR_BASE2 0x30 + +/* IDT 75N43102 data and mask array base addresses (low 32 bits) */ +#define IDT4_DATARY_BASE_ADR0 0x1000000 +#define IDT4_MSKARY_BASE_ADR0 0x2000000 + +#define MAX_WRITE_ATTEMPTS 5 + +#define MAX_ROUTES 2048 + +/* + * Issue a command to the TCAM and wait for its completion. The address and + * any data required by the command must have been setup by the caller. + */ +static int mc5_cmd_write(struct adapter *adapter, u32 cmd) +{ + t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd); + return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS, + F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1); +} + +static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2, + u32 v3) +{ + t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA0, v1); + t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA1, v2); + t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3); +} + +/* + * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM + * command cmd. The data to be written must have been set up by the caller. + * Returns -1 on failure, 0 on success. + */ +static int mc5_write(struct adapter *adapter, u32 addr_lo, u32 cmd) +{ + t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo); + if (mc5_cmd_write(adapter, cmd) == 0) + return 0; + CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n", + addr_lo); + return -1; +} + +static int init_mask_data_array(struct mc5 *mc5, u32 mask_array_base, + u32 data_array_base, u32 write_cmd, + int addr_shift) +{ + unsigned int i; + struct adapter *adap = mc5->adapter; + + /* + * We need the size of the TCAM data and mask arrays in terms of + * 72-bit entries. + */ + unsigned int size72 = mc5->tcam_size; + unsigned int server_base = t3_read_reg(adap, A_MC5_DB_SERVER_INDEX); + + if (mc5->mode == MC5_MODE_144_BIT) { + size72 *= 2; /* 1 144-bit entry is 2 72-bit entries */ + server_base *= 2; + } + + /* Clear the data array */ + dbgi_wr_data3(adap, 0, 0, 0); + for (i = 0; i < size72; i++) + if (mc5_write(adap, data_array_base + (i << addr_shift), + write_cmd)) + return -1; + + /* Initialize the mask array. */ + dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff); + for (i = 0; i < size72; i++) { + if (i == server_base) /* entering server or routing region */ + t3_write_reg(adap, A_MC5_DB_DBGI_REQ_DATA0, + mc5->mode == MC5_MODE_144_BIT ? + 0xfffffff9 : 0xfffffffd); + if (mc5_write(adap, mask_array_base + (i << addr_shift), + write_cmd)) + return -1; + } + return 0; +} + +static int init_idt52100(struct mc5 *mc5) +{ + int i; + struct adapter *adap = mc5->adapter; + + t3_write_reg(adap, A_MC5_DB_RSP_LATENCY, + V_RDLAT(0x15) | V_LRNLAT(0x15) | V_SRCHLAT(0x15)); + t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 2); + + /* + * Use GMRs 14-15 for ELOOKUP, GMRs 12-13 for SYN lookups, and + * GMRs 8-9 for ACK- and AOPEN searches. + */ + t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT_CMD_WRITE); + t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT_CMD_WRITE); + t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, IDT_CMD_SEARCH); + t3_write_reg(adap, A_MC5_DB_AOPEN_LRN_CMD, IDT_CMD_LEARN); + t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT_CMD_SEARCH | 0x6000); + t3_write_reg(adap, A_MC5_DB_SYN_LRN_CMD, IDT_CMD_LEARN); + t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT_CMD_SEARCH); + t3_write_reg(adap, A_MC5_DB_ACK_LRN_CMD, IDT_CMD_LEARN); + t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT_CMD_SEARCH); + t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT_CMD_SEARCH | 0x7000); + t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT_CMD_WRITE); + t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT_CMD_READ); + + /* Set DBGI command mode for IDT TCAM. */ + t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100); + + /* Set up LAR */ + dbgi_wr_data3(adap, IDT_LAR_MODE144, 0, 0); + if (mc5_write(adap, IDT_LAR_ADR0, IDT_CMD_WRITE)) + goto err; + + /* Set up SSRs */ + dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0); + if (mc5_write(adap, IDT_SSR0_ADR0, IDT_CMD_WRITE) || + mc5_write(adap, IDT_SSR1_ADR0, IDT_CMD_WRITE)) + goto err; + + /* Set up GMRs */ + for (i = 0; i < 32; ++i) { + if (i >= 12 && i < 15) + dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff); + else if (i == 15) + dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff); + else + dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff); + + if (mc5_write(adap, IDT_GMR_BASE_ADR0 + i, IDT_CMD_WRITE)) + goto err; + } + + /* Set up SCR */ + dbgi_wr_data3(adap, 1, 0, 0); + if (mc5_write(adap, IDT_SCR_ADR0, IDT_CMD_WRITE)) + goto err; + + return init_mask_data_array(mc5, IDT_MSKARY_BASE_ADR0, + IDT_DATARY_BASE_ADR0, IDT_CMD_WRITE, 0); +err: + return -EIO; +} + +static int init_idt43102(struct mc5 *mc5) +{ + int i; + struct adapter *adap = mc5->adapter; + + t3_write_reg(adap, A_MC5_DB_RSP_LATENCY, + adap->params.rev == 0 ? V_RDLAT(0xd) | V_SRCHLAT(0x11) : + V_RDLAT(0xd) | V_SRCHLAT(0x12)); + + /* + * Use GMRs 24-25 for ELOOKUP, GMRs 20-21 for SYN lookups, and no mask + * for ACK- and AOPEN searches. + */ + t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT4_CMD_WRITE); + t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT4_CMD_WRITE); + t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, + IDT4_CMD_SEARCH144 | 0x3800); + t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT4_CMD_SEARCH144); + t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT4_CMD_SEARCH144 | 0x3800); + t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x3800); + t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x800); + t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT4_CMD_WRITE); + t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT4_CMD_READ); + + t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 3); + + /* Set DBGI command mode for IDT TCAM. */ + t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100); + + /* Set up GMRs */ + dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff); + for (i = 0; i < 7; ++i) + if (mc5_write(adap, IDT4_GMR_BASE0 + i, IDT4_CMD_WRITE)) + goto err; + + for (i = 0; i < 4; ++i) + if (mc5_write(adap, IDT4_GMR_BASE2 + i, IDT4_CMD_WRITE)) + goto err; + + dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff); + if (mc5_write(adap, IDT4_GMR_BASE1, IDT4_CMD_WRITE) || + mc5_write(adap, IDT4_GMR_BASE1 + 1, IDT4_CMD_WRITE) || + mc5_write(adap, IDT4_GMR_BASE1 + 4, IDT4_CMD_WRITE)) + goto err; + + dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff); + if (mc5_write(adap, IDT4_GMR_BASE1 + 5, IDT4_CMD_WRITE)) + goto err; + + /* Set up SCR */ + dbgi_wr_data3(adap, 0xf0000000, 0, 0); + if (mc5_write(adap, IDT4_SCR_ADR0, IDT4_CMD_WRITE)) + goto err; + + return init_mask_data_array(mc5, IDT4_MSKARY_BASE_ADR0, + IDT4_DATARY_BASE_ADR0, IDT4_CMD_WRITE, 1); +err: + return -EIO; +} + +/* Put MC5 in DBGI mode. */ +static inline void mc5_dbgi_mode_enable(const struct mc5 *mc5) +{ + t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG, + V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_DBGIEN); +} + +/* Put MC5 in M-Bus mode. */ +static void mc5_dbgi_mode_disable(const struct mc5 *mc5) +{ + t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG, + V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | + V_COMPEN(mc5->mode == MC5_MODE_72_BIT) | + V_PRTYEN(mc5->parity_enabled) | F_MBUSEN); +} + +/* + * Initialization that requires the OS and protocol layers to already + * be initialized goes here. + */ +int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters, + unsigned int nroutes) +{ + u32 cfg; + int err; + unsigned int tcam_size = mc5->tcam_size; + struct adapter *adap = mc5->adapter; + + if (!tcam_size) + return 0; + + if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size) + return -EINVAL; + + /* Reset the TCAM */ + cfg = t3_read_reg(adap, A_MC5_DB_CONFIG) & ~F_TMMODE; + cfg |= V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_TMRST; + t3_write_reg(adap, A_MC5_DB_CONFIG, cfg); + if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) { + CH_ERR(adap, "TCAM reset timed out\n"); + return -1; + } + + t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes); + t3_write_reg(adap, A_MC5_DB_FILTER_TABLE, + tcam_size - nroutes - nfilters); + t3_write_reg(adap, A_MC5_DB_SERVER_INDEX, + tcam_size - nroutes - nfilters - nservers); + + mc5->parity_enabled = 1; + + /* All the TCAM addresses we access have only the low 32 bits non 0 */ + t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0); + t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0); + + mc5_dbgi_mode_enable(mc5); + + switch (mc5->part_type) { + case IDT75P52100: + err = init_idt52100(mc5); + break; + case IDT75N43102: + err = init_idt43102(mc5); + break; + default: + CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type); + err = -EINVAL; + break; + } + + mc5_dbgi_mode_disable(mc5); + return err; +} + + +#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR) + +/* + * MC5 interrupt handler + */ +void t3_mc5_intr_handler(struct mc5 *mc5) +{ + struct adapter *adap = mc5->adapter; + u32 cause = t3_read_reg(adap, A_MC5_DB_INT_CAUSE); + + if ((cause & F_PARITYERR) && mc5->parity_enabled) { + CH_ALERT(adap, "MC5 parity error\n"); + mc5->stats.parity_err++; + } + + if (cause & F_REQQPARERR) { + CH_ALERT(adap, "MC5 request queue parity error\n"); + mc5->stats.reqq_parity_err++; + } + + if (cause & F_DISPQPARERR) { + CH_ALERT(adap, "MC5 dispatch queue parity error\n"); + mc5->stats.dispq_parity_err++; + } + + if (cause & F_ACTRGNFULL) + mc5->stats.active_rgn_full++; + if (cause & F_NFASRCHFAIL) + mc5->stats.nfa_srch_err++; + if (cause & F_UNKNOWNCMD) + mc5->stats.unknown_cmd++; + if (cause & F_DELACTEMPTY) + mc5->stats.del_act_empty++; + if (cause & MC5_INT_FATAL) + t3_fatal_err(adap); + + t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause); +} + +void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode) +{ +#define K * 1024 + + static unsigned int tcam_part_size[] = { /* in K 72-bit entries */ + 64 K, 128 K, 256 K, 32 K + }; + +#undef K + + u32 cfg = t3_read_reg(adapter, A_MC5_DB_CONFIG); + + mc5->adapter = adapter; + mc5->mode = (unsigned char)mode; + mc5->part_type = (unsigned char)G_TMTYPE(cfg); + if (cfg & F_TMTYPEHI) + mc5->part_type |= 4; + + mc5->tcam_size = tcam_part_size[G_TMPARTSIZE(cfg)]; + if (mode == MC5_MODE_144_BIT) + mc5->tcam_size /= 2; +} diff --git a/drivers/net/ethernet/chelsio/cxgb3/regs.h b/drivers/net/ethernet/chelsio/cxgb3/regs.h new file mode 100644 index 000000000..81029b872 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/regs.h @@ -0,0 +1,2563 @@ +#define A_SG_CONTROL 0x0 + +#define S_CONGMODE 29 +#define V_CONGMODE(x) ((x) << S_CONGMODE) +#define F_CONGMODE V_CONGMODE(1U) + +#define S_TNLFLMODE 28 +#define V_TNLFLMODE(x) ((x) << S_TNLFLMODE) +#define F_TNLFLMODE V_TNLFLMODE(1U) + +#define S_FATLPERREN 27 +#define V_FATLPERREN(x) ((x) << S_FATLPERREN) +#define F_FATLPERREN V_FATLPERREN(1U) + +#define S_DROPPKT 20 +#define V_DROPPKT(x) ((x) << S_DROPPKT) +#define F_DROPPKT V_DROPPKT(1U) + +#define S_EGRGENCTRL 19 +#define V_EGRGENCTRL(x) ((x) << S_EGRGENCTRL) +#define F_EGRGENCTRL V_EGRGENCTRL(1U) + +#define S_USERSPACESIZE 14 +#define M_USERSPACESIZE 0x1f +#define V_USERSPACESIZE(x) ((x) << S_USERSPACESIZE) + +#define S_HOSTPAGESIZE 11 +#define M_HOSTPAGESIZE 0x7 +#define V_HOSTPAGESIZE(x) ((x) << S_HOSTPAGESIZE) + +#define S_FLMODE 9 +#define V_FLMODE(x) ((x) << S_FLMODE) +#define F_FLMODE V_FLMODE(1U) + +#define S_PKTSHIFT 6 +#define M_PKTSHIFT 0x7 +#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT) + +#define S_ONEINTMULTQ 5 +#define V_ONEINTMULTQ(x) ((x) << S_ONEINTMULTQ) +#define F_ONEINTMULTQ V_ONEINTMULTQ(1U) + +#define S_BIGENDIANINGRESS 2 +#define V_BIGENDIANINGRESS(x) ((x) << S_BIGENDIANINGRESS) +#define F_BIGENDIANINGRESS V_BIGENDIANINGRESS(1U) + +#define S_ISCSICOALESCING 1 +#define V_ISCSICOALESCING(x) ((x) << S_ISCSICOALESCING) +#define F_ISCSICOALESCING V_ISCSICOALESCING(1U) + +#define S_GLOBALENABLE 0 +#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE) +#define F_GLOBALENABLE V_GLOBALENABLE(1U) + +#define S_AVOIDCQOVFL 24 +#define V_AVOIDCQOVFL(x) ((x) << S_AVOIDCQOVFL) +#define F_AVOIDCQOVFL V_AVOIDCQOVFL(1U) + +#define S_OPTONEINTMULTQ 23 +#define V_OPTONEINTMULTQ(x) ((x) << S_OPTONEINTMULTQ) +#define F_OPTONEINTMULTQ V_OPTONEINTMULTQ(1U) + +#define S_CQCRDTCTRL 22 +#define V_CQCRDTCTRL(x) ((x) << S_CQCRDTCTRL) +#define F_CQCRDTCTRL V_CQCRDTCTRL(1U) + +#define A_SG_KDOORBELL 0x4 + +#define S_SELEGRCNTX 31 +#define V_SELEGRCNTX(x) ((x) << S_SELEGRCNTX) +#define F_SELEGRCNTX V_SELEGRCNTX(1U) + +#define S_EGRCNTX 0 +#define M_EGRCNTX 0xffff +#define V_EGRCNTX(x) ((x) << S_EGRCNTX) + +#define A_SG_GTS 0x8 + +#define S_RSPQ 29 +#define M_RSPQ 0x7 +#define V_RSPQ(x) ((x) << S_RSPQ) +#define G_RSPQ(x) (((x) >> S_RSPQ) & M_RSPQ) + +#define S_NEWTIMER 16 +#define M_NEWTIMER 0x1fff +#define V_NEWTIMER(x) ((x) << S_NEWTIMER) + +#define S_NEWINDEX 0 +#define M_NEWINDEX 0xffff +#define V_NEWINDEX(x) ((x) << S_NEWINDEX) + +#define A_SG_CONTEXT_CMD 0xc + +#define S_CONTEXT_CMD_OPCODE 28 +#define M_CONTEXT_CMD_OPCODE 0xf +#define V_CONTEXT_CMD_OPCODE(x) ((x) << S_CONTEXT_CMD_OPCODE) + +#define S_CONTEXT_CMD_BUSY 27 +#define V_CONTEXT_CMD_BUSY(x) ((x) << S_CONTEXT_CMD_BUSY) +#define F_CONTEXT_CMD_BUSY V_CONTEXT_CMD_BUSY(1U) + +#define S_CQ_CREDIT 20 + +#define M_CQ_CREDIT 0x7f + +#define V_CQ_CREDIT(x) ((x) << S_CQ_CREDIT) + +#define G_CQ_CREDIT(x) (((x) >> S_CQ_CREDIT) & M_CQ_CREDIT) + +#define S_CQ 19 + +#define V_CQ(x) ((x) << S_CQ) +#define F_CQ V_CQ(1U) + +#define S_RESPONSEQ 18 +#define V_RESPONSEQ(x) ((x) << S_RESPONSEQ) +#define F_RESPONSEQ V_RESPONSEQ(1U) + +#define S_EGRESS 17 +#define V_EGRESS(x) ((x) << S_EGRESS) +#define F_EGRESS V_EGRESS(1U) + +#define S_FREELIST 16 +#define V_FREELIST(x) ((x) << S_FREELIST) +#define F_FREELIST V_FREELIST(1U) + +#define S_CONTEXT 0 +#define M_CONTEXT 0xffff +#define V_CONTEXT(x) ((x) << S_CONTEXT) + +#define G_CONTEXT(x) (((x) >> S_CONTEXT) & M_CONTEXT) + +#define A_SG_CONTEXT_DATA0 0x10 + +#define A_SG_CONTEXT_DATA1 0x14 + +#define A_SG_CONTEXT_DATA2 0x18 + +#define A_SG_CONTEXT_DATA3 0x1c + +#define A_SG_CONTEXT_MASK0 0x20 + +#define A_SG_CONTEXT_MASK1 0x24 + +#define A_SG_CONTEXT_MASK2 0x28 + +#define A_SG_CONTEXT_MASK3 0x2c + +#define A_SG_RSPQ_CREDIT_RETURN 0x30 + +#define S_CREDITS 0 +#define M_CREDITS 0xffff +#define V_CREDITS(x) ((x) << S_CREDITS) + +#define A_SG_DATA_INTR 0x34 + +#define S_ERRINTR 31 +#define V_ERRINTR(x) ((x) << S_ERRINTR) +#define F_ERRINTR V_ERRINTR(1U) + +#define A_SG_HI_DRB_HI_THRSH 0x38 + +#define A_SG_HI_DRB_LO_THRSH 0x3c + +#define A_SG_LO_DRB_HI_THRSH 0x40 + +#define A_SG_LO_DRB_LO_THRSH 0x44 + +#define A_SG_RSPQ_FL_STATUS 0x4c + +#define S_RSPQ0DISABLED 8 + +#define S_FL0EMPTY 16 +#define V_FL0EMPTY(x) ((x) << S_FL0EMPTY) +#define F_FL0EMPTY V_FL0EMPTY(1U) + +#define A_SG_EGR_RCQ_DRB_THRSH 0x54 + +#define S_HIRCQDRBTHRSH 16 +#define M_HIRCQDRBTHRSH 0x7ff +#define V_HIRCQDRBTHRSH(x) ((x) << S_HIRCQDRBTHRSH) + +#define S_LORCQDRBTHRSH 0 +#define M_LORCQDRBTHRSH 0x7ff +#define V_LORCQDRBTHRSH(x) ((x) << S_LORCQDRBTHRSH) + +#define A_SG_EGR_CNTX_BADDR 0x58 + +#define A_SG_INT_CAUSE 0x5c + +#define S_HIRCQPARITYERROR 31 +#define V_HIRCQPARITYERROR(x) ((x) << S_HIRCQPARITYERROR) +#define F_HIRCQPARITYERROR V_HIRCQPARITYERROR(1U) + +#define S_LORCQPARITYERROR 30 +#define V_LORCQPARITYERROR(x) ((x) << S_LORCQPARITYERROR) +#define F_LORCQPARITYERROR V_LORCQPARITYERROR(1U) + +#define S_HIDRBPARITYERROR 29 +#define V_HIDRBPARITYERROR(x) ((x) << S_HIDRBPARITYERROR) +#define F_HIDRBPARITYERROR V_HIDRBPARITYERROR(1U) + +#define S_LODRBPARITYERROR 28 +#define V_LODRBPARITYERROR(x) ((x) << S_LODRBPARITYERROR) +#define F_LODRBPARITYERROR V_LODRBPARITYERROR(1U) + +#define S_FLPARITYERROR 22 +#define M_FLPARITYERROR 0x3f +#define V_FLPARITYERROR(x) ((x) << S_FLPARITYERROR) +#define G_FLPARITYERROR(x) (((x) >> S_FLPARITYERROR) & M_FLPARITYERROR) + +#define S_ITPARITYERROR 20 +#define M_ITPARITYERROR 0x3 +#define V_ITPARITYERROR(x) ((x) << S_ITPARITYERROR) +#define G_ITPARITYERROR(x) (((x) >> S_ITPARITYERROR) & M_ITPARITYERROR) + +#define S_IRPARITYERROR 19 +#define V_IRPARITYERROR(x) ((x) << S_IRPARITYERROR) +#define F_IRPARITYERROR V_IRPARITYERROR(1U) + +#define S_RCPARITYERROR 18 +#define V_RCPARITYERROR(x) ((x) << S_RCPARITYERROR) +#define F_RCPARITYERROR V_RCPARITYERROR(1U) + +#define S_OCPARITYERROR 17 +#define V_OCPARITYERROR(x) ((x) << S_OCPARITYERROR) +#define F_OCPARITYERROR V_OCPARITYERROR(1U) + +#define S_CPPARITYERROR 16 +#define V_CPPARITYERROR(x) ((x) << S_CPPARITYERROR) +#define F_CPPARITYERROR V_CPPARITYERROR(1U) + +#define S_R_REQ_FRAMINGERROR 15 +#define V_R_REQ_FRAMINGERROR(x) ((x) << S_R_REQ_FRAMINGERROR) +#define F_R_REQ_FRAMINGERROR V_R_REQ_FRAMINGERROR(1U) + +#define S_UC_REQ_FRAMINGERROR 14 +#define V_UC_REQ_FRAMINGERROR(x) ((x) << S_UC_REQ_FRAMINGERROR) +#define F_UC_REQ_FRAMINGERROR V_UC_REQ_FRAMINGERROR(1U) + +#define S_HICTLDRBDROPERR 13 +#define V_HICTLDRBDROPERR(x) ((x) << S_HICTLDRBDROPERR) +#define F_HICTLDRBDROPERR V_HICTLDRBDROPERR(1U) + +#define S_LOCTLDRBDROPERR 12 +#define V_LOCTLDRBDROPERR(x) ((x) << S_LOCTLDRBDROPERR) +#define F_LOCTLDRBDROPERR V_LOCTLDRBDROPERR(1U) + +#define S_HIPIODRBDROPERR 11 +#define V_HIPIODRBDROPERR(x) ((x) << S_HIPIODRBDROPERR) +#define F_HIPIODRBDROPERR V_HIPIODRBDROPERR(1U) + +#define S_LOPIODRBDROPERR 10 +#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR) +#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U) + +#define S_HIPRIORITYDBFULL 7 +#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL) +#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U) + +#define S_HIPRIORITYDBEMPTY 6 +#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY) +#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U) + +#define S_LOPRIORITYDBFULL 5 +#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL) +#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U) + +#define S_LOPRIORITYDBEMPTY 4 +#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY) +#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U) + +#define S_RSPQDISABLED 3 +#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED) +#define F_RSPQDISABLED V_RSPQDISABLED(1U) + +#define S_RSPQCREDITOVERFOW 2 +#define V_RSPQCREDITOVERFOW(x) ((x) << S_RSPQCREDITOVERFOW) +#define F_RSPQCREDITOVERFOW V_RSPQCREDITOVERFOW(1U) + +#define S_FLEMPTY 1 +#define V_FLEMPTY(x) ((x) << S_FLEMPTY) +#define F_FLEMPTY V_FLEMPTY(1U) + +#define A_SG_INT_ENABLE 0x60 + +#define A_SG_CMDQ_CREDIT_TH 0x64 + +#define S_TIMEOUT 8 +#define M_TIMEOUT 0xffffff +#define V_TIMEOUT(x) ((x) << S_TIMEOUT) + +#define S_THRESHOLD 0 +#define M_THRESHOLD 0xff +#define V_THRESHOLD(x) ((x) << S_THRESHOLD) + +#define A_SG_TIMER_TICK 0x68 + +#define A_SG_CQ_CONTEXT_BADDR 0x6c + +#define A_SG_OCO_BASE 0x70 + +#define S_BASE1 16 +#define M_BASE1 0xffff +#define V_BASE1(x) ((x) << S_BASE1) + +#define A_SG_DRB_PRI_THRESH 0x74 + +#define A_PCIX_INT_ENABLE 0x80 + +#define S_MSIXPARERR 22 +#define M_MSIXPARERR 0x7 + +#define V_MSIXPARERR(x) ((x) << S_MSIXPARERR) + +#define S_CFPARERR 18 +#define M_CFPARERR 0xf + +#define V_CFPARERR(x) ((x) << S_CFPARERR) + +#define S_RFPARERR 14 +#define M_RFPARERR 0xf + +#define V_RFPARERR(x) ((x) << S_RFPARERR) + +#define S_WFPARERR 12 +#define M_WFPARERR 0x3 + +#define V_WFPARERR(x) ((x) << S_WFPARERR) + +#define S_PIOPARERR 11 +#define V_PIOPARERR(x) ((x) << S_PIOPARERR) +#define F_PIOPARERR V_PIOPARERR(1U) + +#define S_DETUNCECCERR 10 +#define V_DETUNCECCERR(x) ((x) << S_DETUNCECCERR) +#define F_DETUNCECCERR V_DETUNCECCERR(1U) + +#define S_DETCORECCERR 9 +#define V_DETCORECCERR(x) ((x) << S_DETCORECCERR) +#define F_DETCORECCERR V_DETCORECCERR(1U) + +#define S_RCVSPLCMPERR 8 +#define V_RCVSPLCMPERR(x) ((x) << S_RCVSPLCMPERR) +#define F_RCVSPLCMPERR V_RCVSPLCMPERR(1U) + +#define S_UNXSPLCMP 7 +#define V_UNXSPLCMP(x) ((x) << S_UNXSPLCMP) +#define F_UNXSPLCMP V_UNXSPLCMP(1U) + +#define S_SPLCMPDIS 6 +#define V_SPLCMPDIS(x) ((x) << S_SPLCMPDIS) +#define F_SPLCMPDIS V_SPLCMPDIS(1U) + +#define S_DETPARERR 5 +#define V_DETPARERR(x) ((x) << S_DETPARERR) +#define F_DETPARERR V_DETPARERR(1U) + +#define S_SIGSYSERR 4 +#define V_SIGSYSERR(x) ((x) << S_SIGSYSERR) +#define F_SIGSYSERR V_SIGSYSERR(1U) + +#define S_RCVMSTABT 3 +#define V_RCVMSTABT(x) ((x) << S_RCVMSTABT) +#define F_RCVMSTABT V_RCVMSTABT(1U) + +#define S_RCVTARABT 2 +#define V_RCVTARABT(x) ((x) << S_RCVTARABT) +#define F_RCVTARABT V_RCVTARABT(1U) + +#define S_SIGTARABT 1 +#define V_SIGTARABT(x) ((x) << S_SIGTARABT) +#define F_SIGTARABT V_SIGTARABT(1U) + +#define S_MSTDETPARERR 0 +#define V_MSTDETPARERR(x) ((x) << S_MSTDETPARERR) +#define F_MSTDETPARERR V_MSTDETPARERR(1U) + +#define A_PCIX_INT_CAUSE 0x84 + +#define A_PCIX_CFG 0x88 + +#define S_DMASTOPEN 19 +#define V_DMASTOPEN(x) ((x) << S_DMASTOPEN) +#define F_DMASTOPEN V_DMASTOPEN(1U) + +#define S_CLIDECEN 18 +#define V_CLIDECEN(x) ((x) << S_CLIDECEN) +#define F_CLIDECEN V_CLIDECEN(1U) + +#define A_PCIX_MODE 0x8c + +#define S_PCLKRANGE 6 +#define M_PCLKRANGE 0x3 +#define V_PCLKRANGE(x) ((x) << S_PCLKRANGE) +#define G_PCLKRANGE(x) (((x) >> S_PCLKRANGE) & M_PCLKRANGE) + +#define S_PCIXINITPAT 2 +#define M_PCIXINITPAT 0xf +#define V_PCIXINITPAT(x) ((x) << S_PCIXINITPAT) +#define G_PCIXINITPAT(x) (((x) >> S_PCIXINITPAT) & M_PCIXINITPAT) + +#define S_64BIT 0 +#define V_64BIT(x) ((x) << S_64BIT) +#define F_64BIT V_64BIT(1U) + +#define A_PCIE_INT_ENABLE 0x80 + +#define S_BISTERR 15 +#define M_BISTERR 0xff + +#define V_BISTERR(x) ((x) << S_BISTERR) + +#define S_TXPARERR 18 +#define V_TXPARERR(x) ((x) << S_TXPARERR) +#define F_TXPARERR V_TXPARERR(1U) + +#define S_RXPARERR 17 +#define V_RXPARERR(x) ((x) << S_RXPARERR) +#define F_RXPARERR V_RXPARERR(1U) + +#define S_RETRYLUTPARERR 16 +#define V_RETRYLUTPARERR(x) ((x) << S_RETRYLUTPARERR) +#define F_RETRYLUTPARERR V_RETRYLUTPARERR(1U) + +#define S_RETRYBUFPARERR 15 +#define V_RETRYBUFPARERR(x) ((x) << S_RETRYBUFPARERR) +#define F_RETRYBUFPARERR V_RETRYBUFPARERR(1U) + +#define S_PCIE_MSIXPARERR 12 +#define M_PCIE_MSIXPARERR 0x7 + +#define V_PCIE_MSIXPARERR(x) ((x) << S_PCIE_MSIXPARERR) + +#define S_PCIE_CFPARERR 11 +#define V_PCIE_CFPARERR(x) ((x) << S_PCIE_CFPARERR) +#define F_PCIE_CFPARERR V_PCIE_CFPARERR(1U) + +#define S_PCIE_RFPARERR 10 +#define V_PCIE_RFPARERR(x) ((x) << S_PCIE_RFPARERR) +#define F_PCIE_RFPARERR V_PCIE_RFPARERR(1U) + +#define S_PCIE_WFPARERR 9 +#define V_PCIE_WFPARERR(x) ((x) << S_PCIE_WFPARERR) +#define F_PCIE_WFPARERR V_PCIE_WFPARERR(1U) + +#define S_PCIE_PIOPARERR 8 +#define V_PCIE_PIOPARERR(x) ((x) << S_PCIE_PIOPARERR) +#define F_PCIE_PIOPARERR V_PCIE_PIOPARERR(1U) + +#define S_UNXSPLCPLERRC 7 +#define V_UNXSPLCPLERRC(x) ((x) << S_UNXSPLCPLERRC) +#define F_UNXSPLCPLERRC V_UNXSPLCPLERRC(1U) + +#define S_UNXSPLCPLERRR 6 +#define V_UNXSPLCPLERRR(x) ((x) << S_UNXSPLCPLERRR) +#define F_UNXSPLCPLERRR V_UNXSPLCPLERRR(1U) + +#define S_PEXERR 0 +#define V_PEXERR(x) ((x) << S_PEXERR) +#define F_PEXERR V_PEXERR(1U) + +#define A_PCIE_INT_CAUSE 0x84 + +#define S_PCIE_DMASTOPEN 24 +#define V_PCIE_DMASTOPEN(x) ((x) << S_PCIE_DMASTOPEN) +#define F_PCIE_DMASTOPEN V_PCIE_DMASTOPEN(1U) + +#define A_PCIE_CFG 0x88 + +#define S_ENABLELINKDWNDRST 21 +#define V_ENABLELINKDWNDRST(x) ((x) << S_ENABLELINKDWNDRST) +#define F_ENABLELINKDWNDRST V_ENABLELINKDWNDRST(1U) + +#define S_ENABLELINKDOWNRST 20 +#define V_ENABLELINKDOWNRST(x) ((x) << S_ENABLELINKDOWNRST) +#define F_ENABLELINKDOWNRST V_ENABLELINKDOWNRST(1U) + +#define S_PCIE_CLIDECEN 16 +#define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN) +#define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U) + +#define S_CRSTWRMMODE 0 +#define V_CRSTWRMMODE(x) ((x) << S_CRSTWRMMODE) +#define F_CRSTWRMMODE V_CRSTWRMMODE(1U) + +#define A_PCIE_MODE 0x8c + +#define S_NUMFSTTRNSEQRX 10 +#define M_NUMFSTTRNSEQRX 0xff +#define V_NUMFSTTRNSEQRX(x) ((x) << S_NUMFSTTRNSEQRX) +#define G_NUMFSTTRNSEQRX(x) (((x) >> S_NUMFSTTRNSEQRX) & M_NUMFSTTRNSEQRX) + +#define A_PCIE_PEX_CTRL0 0x98 + +#define S_NUMFSTTRNSEQ 22 +#define M_NUMFSTTRNSEQ 0xff +#define V_NUMFSTTRNSEQ(x) ((x) << S_NUMFSTTRNSEQ) +#define G_NUMFSTTRNSEQ(x) (((x) >> S_NUMFSTTRNSEQ) & M_NUMFSTTRNSEQ) + +#define S_REPLAYLMT 2 +#define M_REPLAYLMT 0xfffff + +#define V_REPLAYLMT(x) ((x) << S_REPLAYLMT) + +#define A_PCIE_PEX_CTRL1 0x9c + +#define S_T3A_ACKLAT 0 +#define M_T3A_ACKLAT 0x7ff + +#define V_T3A_ACKLAT(x) ((x) << S_T3A_ACKLAT) + +#define S_ACKLAT 0 +#define M_ACKLAT 0x1fff + +#define V_ACKLAT(x) ((x) << S_ACKLAT) + +#define A_PCIE_PEX_ERR 0xa4 + +#define A_T3DBG_GPIO_EN 0xd0 + +#define S_GPIO11_OEN 27 +#define V_GPIO11_OEN(x) ((x) << S_GPIO11_OEN) +#define F_GPIO11_OEN V_GPIO11_OEN(1U) + +#define S_GPIO10_OEN 26 +#define V_GPIO10_OEN(x) ((x) << S_GPIO10_OEN) +#define F_GPIO10_OEN V_GPIO10_OEN(1U) + +#define S_GPIO7_OEN 23 +#define V_GPIO7_OEN(x) ((x) << S_GPIO7_OEN) +#define F_GPIO7_OEN V_GPIO7_OEN(1U) + +#define S_GPIO6_OEN 22 +#define V_GPIO6_OEN(x) ((x) << S_GPIO6_OEN) +#define F_GPIO6_OEN V_GPIO6_OEN(1U) + +#define S_GPIO5_OEN 21 +#define V_GPIO5_OEN(x) ((x) << S_GPIO5_OEN) +#define F_GPIO5_OEN V_GPIO5_OEN(1U) + +#define S_GPIO4_OEN 20 +#define V_GPIO4_OEN(x) ((x) << S_GPIO4_OEN) +#define F_GPIO4_OEN V_GPIO4_OEN(1U) + +#define S_GPIO2_OEN 18 +#define V_GPIO2_OEN(x) ((x) << S_GPIO2_OEN) +#define F_GPIO2_OEN V_GPIO2_OEN(1U) + +#define S_GPIO1_OEN 17 +#define V_GPIO1_OEN(x) ((x) << S_GPIO1_OEN) +#define F_GPIO1_OEN V_GPIO1_OEN(1U) + +#define S_GPIO0_OEN 16 +#define V_GPIO0_OEN(x) ((x) << S_GPIO0_OEN) +#define F_GPIO0_OEN V_GPIO0_OEN(1U) + +#define S_GPIO10_OUT_VAL 10 +#define V_GPIO10_OUT_VAL(x) ((x) << S_GPIO10_OUT_VAL) +#define F_GPIO10_OUT_VAL V_GPIO10_OUT_VAL(1U) + +#define S_GPIO7_OUT_VAL 7 +#define V_GPIO7_OUT_VAL(x) ((x) << S_GPIO7_OUT_VAL) +#define F_GPIO7_OUT_VAL V_GPIO7_OUT_VAL(1U) + +#define S_GPIO6_OUT_VAL 6 +#define V_GPIO6_OUT_VAL(x) ((x) << S_GPIO6_OUT_VAL) +#define F_GPIO6_OUT_VAL V_GPIO6_OUT_VAL(1U) + +#define S_GPIO5_OUT_VAL 5 +#define V_GPIO5_OUT_VAL(x) ((x) << S_GPIO5_OUT_VAL) +#define F_GPIO5_OUT_VAL V_GPIO5_OUT_VAL(1U) + +#define S_GPIO4_OUT_VAL 4 +#define V_GPIO4_OUT_VAL(x) ((x) << S_GPIO4_OUT_VAL) +#define F_GPIO4_OUT_VAL V_GPIO4_OUT_VAL(1U) + +#define S_GPIO2_OUT_VAL 2 +#define V_GPIO2_OUT_VAL(x) ((x) << S_GPIO2_OUT_VAL) +#define F_GPIO2_OUT_VAL V_GPIO2_OUT_VAL(1U) + +#define S_GPIO1_OUT_VAL 1 +#define V_GPIO1_OUT_VAL(x) ((x) << S_GPIO1_OUT_VAL) +#define F_GPIO1_OUT_VAL V_GPIO1_OUT_VAL(1U) + +#define S_GPIO0_OUT_VAL 0 +#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL) +#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U) + +#define A_T3DBG_INT_ENABLE 0xd8 + +#define S_GPIO11 11 +#define V_GPIO11(x) ((x) << S_GPIO11) +#define F_GPIO11 V_GPIO11(1U) + +#define S_GPIO10 10 +#define V_GPIO10(x) ((x) << S_GPIO10) +#define F_GPIO10 V_GPIO10(1U) + +#define S_GPIO9 9 +#define V_GPIO9(x) ((x) << S_GPIO9) +#define F_GPIO9 V_GPIO9(1U) + +#define S_GPIO7 7 +#define V_GPIO7(x) ((x) << S_GPIO7) +#define F_GPIO7 V_GPIO7(1U) + +#define S_GPIO6 6 +#define V_GPIO6(x) ((x) << S_GPIO6) +#define F_GPIO6 V_GPIO6(1U) + +#define S_GPIO5 5 +#define V_GPIO5(x) ((x) << S_GPIO5) +#define F_GPIO5 V_GPIO5(1U) + +#define S_GPIO4 4 +#define V_GPIO4(x) ((x) << S_GPIO4) +#define F_GPIO4 V_GPIO4(1U) + +#define S_GPIO3 3 +#define V_GPIO3(x) ((x) << S_GPIO3) +#define F_GPIO3 V_GPIO3(1U) + +#define S_GPIO2 2 +#define V_GPIO2(x) ((x) << S_GPIO2) +#define F_GPIO2 V_GPIO2(1U) + +#define S_GPIO1 1 +#define V_GPIO1(x) ((x) << S_GPIO1) +#define F_GPIO1 V_GPIO1(1U) + +#define S_GPIO0 0 +#define V_GPIO0(x) ((x) << S_GPIO0) +#define F_GPIO0 V_GPIO0(1U) + +#define A_T3DBG_INT_CAUSE 0xdc + +#define A_T3DBG_GPIO_ACT_LOW 0xf0 + +#define MC7_PMRX_BASE_ADDR 0x100 + +#define A_MC7_CFG 0x100 + +#define S_IFEN 13 +#define V_IFEN(x) ((x) << S_IFEN) +#define F_IFEN V_IFEN(1U) + +#define S_TERM150 11 +#define V_TERM150(x) ((x) << S_TERM150) +#define F_TERM150 V_TERM150(1U) + +#define S_SLOW 10 +#define V_SLOW(x) ((x) << S_SLOW) +#define F_SLOW V_SLOW(1U) + +#define S_WIDTH 8 +#define M_WIDTH 0x3 +#define V_WIDTH(x) ((x) << S_WIDTH) +#define G_WIDTH(x) (((x) >> S_WIDTH) & M_WIDTH) + +#define S_BKS 6 +#define V_BKS(x) ((x) << S_BKS) +#define F_BKS V_BKS(1U) + +#define S_ORG 5 +#define V_ORG(x) ((x) << S_ORG) +#define F_ORG V_ORG(1U) + +#define S_DEN 2 +#define M_DEN 0x7 +#define V_DEN(x) ((x) << S_DEN) +#define G_DEN(x) (((x) >> S_DEN) & M_DEN) + +#define S_RDY 1 +#define V_RDY(x) ((x) << S_RDY) +#define F_RDY V_RDY(1U) + +#define S_CLKEN 0 +#define V_CLKEN(x) ((x) << S_CLKEN) +#define F_CLKEN V_CLKEN(1U) + +#define A_MC7_MODE 0x104 + +#define S_BUSY 31 +#define V_BUSY(x) ((x) << S_BUSY) +#define F_BUSY V_BUSY(1U) + +#define A_MC7_EXT_MODE1 0x108 + +#define A_MC7_EXT_MODE2 0x10c + +#define A_MC7_EXT_MODE3 0x110 + +#define A_MC7_PRE 0x114 + +#define A_MC7_REF 0x118 + +#define S_PREREFDIV 1 +#define M_PREREFDIV 0x3fff +#define V_PREREFDIV(x) ((x) << S_PREREFDIV) + +#define S_PERREFEN 0 +#define V_PERREFEN(x) ((x) << S_PERREFEN) +#define F_PERREFEN V_PERREFEN(1U) + +#define A_MC7_DLL 0x11c + +#define S_DLLENB 1 +#define V_DLLENB(x) ((x) << S_DLLENB) +#define F_DLLENB V_DLLENB(1U) + +#define S_DLLRST 0 +#define V_DLLRST(x) ((x) << S_DLLRST) +#define F_DLLRST V_DLLRST(1U) + +#define A_MC7_PARM 0x120 + +#define S_ACTTOPREDLY 26 +#define M_ACTTOPREDLY 0xf +#define V_ACTTOPREDLY(x) ((x) << S_ACTTOPREDLY) + +#define S_ACTTORDWRDLY 23 +#define M_ACTTORDWRDLY 0x7 +#define V_ACTTORDWRDLY(x) ((x) << S_ACTTORDWRDLY) + +#define S_PRECYC 20 +#define M_PRECYC 0x7 +#define V_PRECYC(x) ((x) << S_PRECYC) + +#define S_REFCYC 13 +#define M_REFCYC 0x7f +#define V_REFCYC(x) ((x) << S_REFCYC) + +#define S_BKCYC 8 +#define M_BKCYC 0x1f +#define V_BKCYC(x) ((x) << S_BKCYC) + +#define S_WRTORDDLY 4 +#define M_WRTORDDLY 0xf +#define V_WRTORDDLY(x) ((x) << S_WRTORDDLY) + +#define S_RDTOWRDLY 0 +#define M_RDTOWRDLY 0xf +#define V_RDTOWRDLY(x) ((x) << S_RDTOWRDLY) + +#define A_MC7_CAL 0x128 + +#define S_CAL_FAULT 30 +#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT) +#define F_CAL_FAULT V_CAL_FAULT(1U) + +#define S_SGL_CAL_EN 20 +#define V_SGL_CAL_EN(x) ((x) << S_SGL_CAL_EN) +#define F_SGL_CAL_EN V_SGL_CAL_EN(1U) + +#define A_MC7_ERR_ADDR 0x12c + +#define A_MC7_ECC 0x130 + +#define S_ECCCHKEN 1 +#define V_ECCCHKEN(x) ((x) << S_ECCCHKEN) +#define F_ECCCHKEN V_ECCCHKEN(1U) + +#define S_ECCGENEN 0 +#define V_ECCGENEN(x) ((x) << S_ECCGENEN) +#define F_ECCGENEN V_ECCGENEN(1U) + +#define A_MC7_CE_ADDR 0x134 + +#define A_MC7_CE_DATA0 0x138 + +#define A_MC7_CE_DATA1 0x13c + +#define A_MC7_CE_DATA2 0x140 + +#define S_DATA 0 +#define M_DATA 0xff + +#define G_DATA(x) (((x) >> S_DATA) & M_DATA) + +#define A_MC7_UE_ADDR 0x144 + +#define A_MC7_UE_DATA0 0x148 + +#define A_MC7_UE_DATA1 0x14c + +#define A_MC7_UE_DATA2 0x150 + +#define A_MC7_BD_ADDR 0x154 + +#define S_ADDR 3 + +#define M_ADDR 0x1fffffff + +#define A_MC7_BD_DATA0 0x158 + +#define A_MC7_BD_DATA1 0x15c + +#define A_MC7_BD_OP 0x164 + +#define S_OP 0 + +#define V_OP(x) ((x) << S_OP) +#define F_OP V_OP(1U) + +#define A_MC7_BIST_ADDR_BEG 0x168 + +#define A_MC7_BIST_ADDR_END 0x16c + +#define A_MC7_BIST_DATA 0x170 + +#define A_MC7_BIST_OP 0x174 + +#define S_CONT 3 +#define V_CONT(x) ((x) << S_CONT) +#define F_CONT V_CONT(1U) + +#define A_MC7_INT_ENABLE 0x178 + +#define S_AE 17 +#define V_AE(x) ((x) << S_AE) +#define F_AE V_AE(1U) + +#define S_PE 2 +#define M_PE 0x7fff + +#define V_PE(x) ((x) << S_PE) + +#define G_PE(x) (((x) >> S_PE) & M_PE) + +#define S_UE 1 +#define V_UE(x) ((x) << S_UE) +#define F_UE V_UE(1U) + +#define S_CE 0 +#define V_CE(x) ((x) << S_CE) +#define F_CE V_CE(1U) + +#define A_MC7_INT_CAUSE 0x17c + +#define MC7_PMTX_BASE_ADDR 0x180 + +#define MC7_CM_BASE_ADDR 0x200 + +#define A_CIM_BOOT_CFG 0x280 + +#define S_BOOTADDR 2 +#define M_BOOTADDR 0x3fffffff +#define V_BOOTADDR(x) ((x) << S_BOOTADDR) + +#define A_CIM_SDRAM_BASE_ADDR 0x28c + +#define A_CIM_SDRAM_ADDR_SIZE 0x290 + +#define A_CIM_HOST_INT_ENABLE 0x298 + +#define S_DTAGPARERR 28 +#define V_DTAGPARERR(x) ((x) << S_DTAGPARERR) +#define F_DTAGPARERR V_DTAGPARERR(1U) + +#define S_ITAGPARERR 27 +#define V_ITAGPARERR(x) ((x) << S_ITAGPARERR) +#define F_ITAGPARERR V_ITAGPARERR(1U) + +#define S_IBQTPPARERR 26 +#define V_IBQTPPARERR(x) ((x) << S_IBQTPPARERR) +#define F_IBQTPPARERR V_IBQTPPARERR(1U) + +#define S_IBQULPPARERR 25 +#define V_IBQULPPARERR(x) ((x) << S_IBQULPPARERR) +#define F_IBQULPPARERR V_IBQULPPARERR(1U) + +#define S_IBQSGEHIPARERR 24 +#define V_IBQSGEHIPARERR(x) ((x) << S_IBQSGEHIPARERR) +#define F_IBQSGEHIPARERR V_IBQSGEHIPARERR(1U) + +#define S_IBQSGELOPARERR 23 +#define V_IBQSGELOPARERR(x) ((x) << S_IBQSGELOPARERR) +#define F_IBQSGELOPARERR V_IBQSGELOPARERR(1U) + +#define S_OBQULPLOPARERR 22 +#define V_OBQULPLOPARERR(x) ((x) << S_OBQULPLOPARERR) +#define F_OBQULPLOPARERR V_OBQULPLOPARERR(1U) + +#define S_OBQULPHIPARERR 21 +#define V_OBQULPHIPARERR(x) ((x) << S_OBQULPHIPARERR) +#define F_OBQULPHIPARERR V_OBQULPHIPARERR(1U) + +#define S_OBQSGEPARERR 20 +#define V_OBQSGEPARERR(x) ((x) << S_OBQSGEPARERR) +#define F_OBQSGEPARERR V_OBQSGEPARERR(1U) + +#define S_DCACHEPARERR 19 +#define V_DCACHEPARERR(x) ((x) << S_DCACHEPARERR) +#define F_DCACHEPARERR V_DCACHEPARERR(1U) + +#define S_ICACHEPARERR 18 +#define V_ICACHEPARERR(x) ((x) << S_ICACHEPARERR) +#define F_ICACHEPARERR V_ICACHEPARERR(1U) + +#define S_DRAMPARERR 17 +#define V_DRAMPARERR(x) ((x) << S_DRAMPARERR) +#define F_DRAMPARERR V_DRAMPARERR(1U) + +#define A_CIM_HOST_INT_CAUSE 0x29c + +#define S_BLKWRPLINT 12 +#define V_BLKWRPLINT(x) ((x) << S_BLKWRPLINT) +#define F_BLKWRPLINT V_BLKWRPLINT(1U) + +#define S_BLKRDPLINT 11 +#define V_BLKRDPLINT(x) ((x) << S_BLKRDPLINT) +#define F_BLKRDPLINT V_BLKRDPLINT(1U) + +#define S_BLKWRCTLINT 10 +#define V_BLKWRCTLINT(x) ((x) << S_BLKWRCTLINT) +#define F_BLKWRCTLINT V_BLKWRCTLINT(1U) + +#define S_BLKRDCTLINT 9 +#define V_BLKRDCTLINT(x) ((x) << S_BLKRDCTLINT) +#define F_BLKRDCTLINT V_BLKRDCTLINT(1U) + +#define S_BLKWRFLASHINT 8 +#define V_BLKWRFLASHINT(x) ((x) << S_BLKWRFLASHINT) +#define F_BLKWRFLASHINT V_BLKWRFLASHINT(1U) + +#define S_BLKRDFLASHINT 7 +#define V_BLKRDFLASHINT(x) ((x) << S_BLKRDFLASHINT) +#define F_BLKRDFLASHINT V_BLKRDFLASHINT(1U) + +#define S_SGLWRFLASHINT 6 +#define V_SGLWRFLASHINT(x) ((x) << S_SGLWRFLASHINT) +#define F_SGLWRFLASHINT V_SGLWRFLASHINT(1U) + +#define S_WRBLKFLASHINT 5 +#define V_WRBLKFLASHINT(x) ((x) << S_WRBLKFLASHINT) +#define F_WRBLKFLASHINT V_WRBLKFLASHINT(1U) + +#define S_BLKWRBOOTINT 4 +#define V_BLKWRBOOTINT(x) ((x) << S_BLKWRBOOTINT) +#define F_BLKWRBOOTINT V_BLKWRBOOTINT(1U) + +#define S_FLASHRANGEINT 2 +#define V_FLASHRANGEINT(x) ((x) << S_FLASHRANGEINT) +#define F_FLASHRANGEINT V_FLASHRANGEINT(1U) + +#define S_SDRAMRANGEINT 1 +#define V_SDRAMRANGEINT(x) ((x) << S_SDRAMRANGEINT) +#define F_SDRAMRANGEINT V_SDRAMRANGEINT(1U) + +#define S_RSVDSPACEINT 0 +#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT) +#define F_RSVDSPACEINT V_RSVDSPACEINT(1U) + +#define A_CIM_HOST_ACC_CTRL 0x2b0 + +#define S_HOSTBUSY 17 +#define V_HOSTBUSY(x) ((x) << S_HOSTBUSY) +#define F_HOSTBUSY V_HOSTBUSY(1U) + +#define A_CIM_HOST_ACC_DATA 0x2b4 + +#define A_CIM_IBQ_DBG_CFG 0x2c0 + +#define S_IBQDBGADDR 16 +#define M_IBQDBGADDR 0x1ff +#define V_IBQDBGADDR(x) ((x) << S_IBQDBGADDR) +#define G_IBQDBGADDR(x) (((x) >> S_IBQDBGADDR) & M_IBQDBGADDR) + +#define S_IBQDBGQID 3 +#define M_IBQDBGQID 0x3 +#define V_IBQDBGQID(x) ((x) << S_IBQDBGQID) +#define G_IBQDBGQID(x) (((x) >> S_IBQDBGQID) & M_IBQDBGQID) + +#define S_IBQDBGWR 2 +#define V_IBQDBGWR(x) ((x) << S_IBQDBGWR) +#define F_IBQDBGWR V_IBQDBGWR(1U) + +#define S_IBQDBGBUSY 1 +#define V_IBQDBGBUSY(x) ((x) << S_IBQDBGBUSY) +#define F_IBQDBGBUSY V_IBQDBGBUSY(1U) + +#define S_IBQDBGEN 0 +#define V_IBQDBGEN(x) ((x) << S_IBQDBGEN) +#define F_IBQDBGEN V_IBQDBGEN(1U) + +#define A_CIM_IBQ_DBG_DATA 0x2c8 + +#define A_TP_IN_CONFIG 0x300 + +#define S_RXFBARBPRIO 25 +#define V_RXFBARBPRIO(x) ((x) << S_RXFBARBPRIO) +#define F_RXFBARBPRIO V_RXFBARBPRIO(1U) + +#define S_TXFBARBPRIO 24 +#define V_TXFBARBPRIO(x) ((x) << S_TXFBARBPRIO) +#define F_TXFBARBPRIO V_TXFBARBPRIO(1U) + +#define S_NICMODE 14 +#define V_NICMODE(x) ((x) << S_NICMODE) +#define F_NICMODE V_NICMODE(1U) + +#define S_IPV6ENABLE 15 +#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE) +#define F_IPV6ENABLE V_IPV6ENABLE(1U) + +#define A_TP_OUT_CONFIG 0x304 + +#define S_VLANEXTRACTIONENABLE 12 + +#define A_TP_GLOBAL_CONFIG 0x308 + +#define S_TXPACINGENABLE 24 +#define V_TXPACINGENABLE(x) ((x) << S_TXPACINGENABLE) +#define F_TXPACINGENABLE V_TXPACINGENABLE(1U) + +#define S_PATHMTU 15 +#define V_PATHMTU(x) ((x) << S_PATHMTU) +#define F_PATHMTU V_PATHMTU(1U) + +#define S_IPCHECKSUMOFFLOAD 13 +#define V_IPCHECKSUMOFFLOAD(x) ((x) << S_IPCHECKSUMOFFLOAD) +#define F_IPCHECKSUMOFFLOAD V_IPCHECKSUMOFFLOAD(1U) + +#define S_UDPCHECKSUMOFFLOAD 12 +#define V_UDPCHECKSUMOFFLOAD(x) ((x) << S_UDPCHECKSUMOFFLOAD) +#define F_UDPCHECKSUMOFFLOAD V_UDPCHECKSUMOFFLOAD(1U) + +#define S_TCPCHECKSUMOFFLOAD 11 +#define V_TCPCHECKSUMOFFLOAD(x) ((x) << S_TCPCHECKSUMOFFLOAD) +#define F_TCPCHECKSUMOFFLOAD V_TCPCHECKSUMOFFLOAD(1U) + +#define S_IPTTL 0 +#define M_IPTTL 0xff +#define V_IPTTL(x) ((x) << S_IPTTL) + +#define A_TP_CMM_MM_BASE 0x314 + +#define A_TP_CMM_TIMER_BASE 0x318 + +#define S_CMTIMERMAXNUM 28 +#define M_CMTIMERMAXNUM 0x3 +#define V_CMTIMERMAXNUM(x) ((x) << S_CMTIMERMAXNUM) + +#define A_TP_PMM_SIZE 0x31c + +#define A_TP_PMM_TX_BASE 0x320 + +#define A_TP_PMM_RX_BASE 0x328 + +#define A_TP_PMM_RX_PAGE_SIZE 0x32c + +#define A_TP_PMM_RX_MAX_PAGE 0x330 + +#define A_TP_PMM_TX_PAGE_SIZE 0x334 + +#define A_TP_PMM_TX_MAX_PAGE 0x338 + +#define A_TP_TCP_OPTIONS 0x340 + +#define S_MTUDEFAULT 16 +#define M_MTUDEFAULT 0xffff +#define V_MTUDEFAULT(x) ((x) << S_MTUDEFAULT) + +#define S_MTUENABLE 10 +#define V_MTUENABLE(x) ((x) << S_MTUENABLE) +#define F_MTUENABLE V_MTUENABLE(1U) + +#define S_SACKRX 8 +#define V_SACKRX(x) ((x) << S_SACKRX) +#define F_SACKRX V_SACKRX(1U) + +#define S_SACKMODE 4 + +#define M_SACKMODE 0x3 + +#define V_SACKMODE(x) ((x) << S_SACKMODE) + +#define S_WINDOWSCALEMODE 2 +#define M_WINDOWSCALEMODE 0x3 +#define V_WINDOWSCALEMODE(x) ((x) << S_WINDOWSCALEMODE) + +#define S_TIMESTAMPSMODE 0 + +#define M_TIMESTAMPSMODE 0x3 + +#define V_TIMESTAMPSMODE(x) ((x) << S_TIMESTAMPSMODE) + +#define A_TP_DACK_CONFIG 0x344 + +#define S_AUTOSTATE3 30 +#define M_AUTOSTATE3 0x3 +#define V_AUTOSTATE3(x) ((x) << S_AUTOSTATE3) + +#define S_AUTOSTATE2 28 +#define M_AUTOSTATE2 0x3 +#define V_AUTOSTATE2(x) ((x) << S_AUTOSTATE2) + +#define S_AUTOSTATE1 26 +#define M_AUTOSTATE1 0x3 +#define V_AUTOSTATE1(x) ((x) << S_AUTOSTATE1) + +#define S_BYTETHRESHOLD 5 +#define M_BYTETHRESHOLD 0xfffff +#define V_BYTETHRESHOLD(x) ((x) << S_BYTETHRESHOLD) + +#define S_MSSTHRESHOLD 3 +#define M_MSSTHRESHOLD 0x3 +#define V_MSSTHRESHOLD(x) ((x) << S_MSSTHRESHOLD) + +#define S_AUTOCAREFUL 2 +#define V_AUTOCAREFUL(x) ((x) << S_AUTOCAREFUL) +#define F_AUTOCAREFUL V_AUTOCAREFUL(1U) + +#define S_AUTOENABLE 1 +#define V_AUTOENABLE(x) ((x) << S_AUTOENABLE) +#define F_AUTOENABLE V_AUTOENABLE(1U) + +#define S_DACK_MODE 0 +#define V_DACK_MODE(x) ((x) << S_DACK_MODE) +#define F_DACK_MODE V_DACK_MODE(1U) + +#define A_TP_PC_CONFIG 0x348 + +#define S_TXTOSQUEUEMAPMODE 26 +#define V_TXTOSQUEUEMAPMODE(x) ((x) << S_TXTOSQUEUEMAPMODE) +#define F_TXTOSQUEUEMAPMODE V_TXTOSQUEUEMAPMODE(1U) + +#define S_ENABLEEPCMDAFULL 23 +#define V_ENABLEEPCMDAFULL(x) ((x) << S_ENABLEEPCMDAFULL) +#define F_ENABLEEPCMDAFULL V_ENABLEEPCMDAFULL(1U) + +#define S_MODULATEUNIONMODE 22 +#define V_MODULATEUNIONMODE(x) ((x) << S_MODULATEUNIONMODE) +#define F_MODULATEUNIONMODE V_MODULATEUNIONMODE(1U) + +#define S_TXDEFERENABLE 20 +#define V_TXDEFERENABLE(x) ((x) << S_TXDEFERENABLE) +#define F_TXDEFERENABLE V_TXDEFERENABLE(1U) + +#define S_RXCONGESTIONMODE 19 +#define V_RXCONGESTIONMODE(x) ((x) << S_RXCONGESTIONMODE) +#define F_RXCONGESTIONMODE V_RXCONGESTIONMODE(1U) + +#define S_HEARBEATDACK 16 +#define V_HEARBEATDACK(x) ((x) << S_HEARBEATDACK) +#define F_HEARBEATDACK V_HEARBEATDACK(1U) + +#define S_TXCONGESTIONMODE 15 +#define V_TXCONGESTIONMODE(x) ((x) << S_TXCONGESTIONMODE) +#define F_TXCONGESTIONMODE V_TXCONGESTIONMODE(1U) + +#define S_ENABLEOCSPIFULL 30 +#define V_ENABLEOCSPIFULL(x) ((x) << S_ENABLEOCSPIFULL) +#define F_ENABLEOCSPIFULL V_ENABLEOCSPIFULL(1U) + +#define S_LOCKTID 28 +#define V_LOCKTID(x) ((x) << S_LOCKTID) +#define F_LOCKTID V_LOCKTID(1U) + +#define S_TABLELATENCYDELTA 0 +#define M_TABLELATENCYDELTA 0xf +#define V_TABLELATENCYDELTA(x) ((x) << S_TABLELATENCYDELTA) +#define G_TABLELATENCYDELTA(x) \ + (((x) >> S_TABLELATENCYDELTA) & M_TABLELATENCYDELTA) + +#define A_TP_PC_CONFIG2 0x34c + +#define S_DISBLEDAPARBIT0 15 +#define V_DISBLEDAPARBIT0(x) ((x) << S_DISBLEDAPARBIT0) +#define F_DISBLEDAPARBIT0 V_DISBLEDAPARBIT0(1U) + +#define S_ENABLEARPMISS 13 +#define V_ENABLEARPMISS(x) ((x) << S_ENABLEARPMISS) +#define F_ENABLEARPMISS V_ENABLEARPMISS(1U) + +#define S_ENABLENONOFDTNLSYN 12 +#define V_ENABLENONOFDTNLSYN(x) ((x) << S_ENABLENONOFDTNLSYN) +#define F_ENABLENONOFDTNLSYN V_ENABLENONOFDTNLSYN(1U) + +#define S_ENABLEIPV6RSS 11 +#define V_ENABLEIPV6RSS(x) ((x) << S_ENABLEIPV6RSS) +#define F_ENABLEIPV6RSS V_ENABLEIPV6RSS(1U) + +#define S_CHDRAFULL 4 +#define V_CHDRAFULL(x) ((x) << S_CHDRAFULL) +#define F_CHDRAFULL V_CHDRAFULL(1U) + +#define A_TP_TCP_BACKOFF_REG0 0x350 + +#define A_TP_TCP_BACKOFF_REG1 0x354 + +#define A_TP_TCP_BACKOFF_REG2 0x358 + +#define A_TP_TCP_BACKOFF_REG3 0x35c + +#define A_TP_PARA_REG2 0x368 + +#define S_MAXRXDATA 16 +#define M_MAXRXDATA 0xffff +#define V_MAXRXDATA(x) ((x) << S_MAXRXDATA) + +#define S_RXCOALESCESIZE 0 +#define M_RXCOALESCESIZE 0xffff +#define V_RXCOALESCESIZE(x) ((x) << S_RXCOALESCESIZE) + +#define A_TP_PARA_REG3 0x36c + +#define S_TXDATAACKIDX 16 +#define M_TXDATAACKIDX 0xf + +#define V_TXDATAACKIDX(x) ((x) << S_TXDATAACKIDX) + +#define S_TXPACEAUTOSTRICT 10 +#define V_TXPACEAUTOSTRICT(x) ((x) << S_TXPACEAUTOSTRICT) +#define F_TXPACEAUTOSTRICT V_TXPACEAUTOSTRICT(1U) + +#define S_TXPACEFIXED 9 +#define V_TXPACEFIXED(x) ((x) << S_TXPACEFIXED) +#define F_TXPACEFIXED V_TXPACEFIXED(1U) + +#define S_TXPACEAUTO 8 +#define V_TXPACEAUTO(x) ((x) << S_TXPACEAUTO) +#define F_TXPACEAUTO V_TXPACEAUTO(1U) + +#define S_RXCOALESCEENABLE 1 +#define V_RXCOALESCEENABLE(x) ((x) << S_RXCOALESCEENABLE) +#define F_RXCOALESCEENABLE V_RXCOALESCEENABLE(1U) + +#define S_RXCOALESCEPSHEN 0 +#define V_RXCOALESCEPSHEN(x) ((x) << S_RXCOALESCEPSHEN) +#define F_RXCOALESCEPSHEN V_RXCOALESCEPSHEN(1U) + +#define A_TP_PARA_REG4 0x370 + +#define A_TP_PARA_REG5 0x374 + +#define S_RXDDPOFFINIT 3 +#define V_RXDDPOFFINIT(x) ((x) << S_RXDDPOFFINIT) +#define F_RXDDPOFFINIT V_RXDDPOFFINIT(1U) + +#define A_TP_PARA_REG6 0x378 + +#define S_T3A_ENABLEESND 13 +#define V_T3A_ENABLEESND(x) ((x) << S_T3A_ENABLEESND) +#define F_T3A_ENABLEESND V_T3A_ENABLEESND(1U) + +#define S_ENABLEESND 11 +#define V_ENABLEESND(x) ((x) << S_ENABLEESND) +#define F_ENABLEESND V_ENABLEESND(1U) + +#define A_TP_PARA_REG7 0x37c + +#define S_PMMAXXFERLEN1 16 +#define M_PMMAXXFERLEN1 0xffff +#define V_PMMAXXFERLEN1(x) ((x) << S_PMMAXXFERLEN1) + +#define S_PMMAXXFERLEN0 0 +#define M_PMMAXXFERLEN0 0xffff +#define V_PMMAXXFERLEN0(x) ((x) << S_PMMAXXFERLEN0) + +#define A_TP_TIMER_RESOLUTION 0x390 + +#define S_TIMERRESOLUTION 16 +#define M_TIMERRESOLUTION 0xff +#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION) + +#define S_TIMESTAMPRESOLUTION 8 +#define M_TIMESTAMPRESOLUTION 0xff +#define V_TIMESTAMPRESOLUTION(x) ((x) << S_TIMESTAMPRESOLUTION) + +#define S_DELAYEDACKRESOLUTION 0 +#define M_DELAYEDACKRESOLUTION 0xff +#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION) + +#define A_TP_MSL 0x394 + +#define A_TP_RXT_MIN 0x398 + +#define A_TP_RXT_MAX 0x39c + +#define A_TP_PERS_MIN 0x3a0 + +#define A_TP_PERS_MAX 0x3a4 + +#define A_TP_KEEP_IDLE 0x3a8 + +#define A_TP_KEEP_INTVL 0x3ac + +#define A_TP_INIT_SRTT 0x3b0 + +#define A_TP_DACK_TIMER 0x3b4 + +#define A_TP_FINWAIT2_TIMER 0x3b8 + +#define A_TP_SHIFT_CNT 0x3c0 + +#define S_SYNSHIFTMAX 24 + +#define M_SYNSHIFTMAX 0xff + +#define V_SYNSHIFTMAX(x) ((x) << S_SYNSHIFTMAX) + +#define S_RXTSHIFTMAXR1 20 + +#define M_RXTSHIFTMAXR1 0xf + +#define V_RXTSHIFTMAXR1(x) ((x) << S_RXTSHIFTMAXR1) + +#define S_RXTSHIFTMAXR2 16 + +#define M_RXTSHIFTMAXR2 0xf + +#define V_RXTSHIFTMAXR2(x) ((x) << S_RXTSHIFTMAXR2) + +#define S_PERSHIFTBACKOFFMAX 12 +#define M_PERSHIFTBACKOFFMAX 0xf +#define V_PERSHIFTBACKOFFMAX(x) ((x) << S_PERSHIFTBACKOFFMAX) + +#define S_PERSHIFTMAX 8 +#define M_PERSHIFTMAX 0xf +#define V_PERSHIFTMAX(x) ((x) << S_PERSHIFTMAX) + +#define S_KEEPALIVEMAX 0 + +#define M_KEEPALIVEMAX 0xff + +#define V_KEEPALIVEMAX(x) ((x) << S_KEEPALIVEMAX) + +#define A_TP_MTU_PORT_TABLE 0x3d0 + +#define A_TP_CCTRL_TABLE 0x3dc + +#define A_TP_MTU_TABLE 0x3e4 + +#define A_TP_RSS_MAP_TABLE 0x3e8 + +#define A_TP_RSS_LKP_TABLE 0x3ec + +#define A_TP_RSS_CONFIG 0x3f0 + +#define S_TNL4TUPEN 29 +#define V_TNL4TUPEN(x) ((x) << S_TNL4TUPEN) +#define F_TNL4TUPEN V_TNL4TUPEN(1U) + +#define S_TNL2TUPEN 28 +#define V_TNL2TUPEN(x) ((x) << S_TNL2TUPEN) +#define F_TNL2TUPEN V_TNL2TUPEN(1U) + +#define S_TNLPRTEN 26 +#define V_TNLPRTEN(x) ((x) << S_TNLPRTEN) +#define F_TNLPRTEN V_TNLPRTEN(1U) + +#define S_TNLMAPEN 25 +#define V_TNLMAPEN(x) ((x) << S_TNLMAPEN) +#define F_TNLMAPEN V_TNLMAPEN(1U) + +#define S_TNLLKPEN 24 +#define V_TNLLKPEN(x) ((x) << S_TNLLKPEN) +#define F_TNLLKPEN V_TNLLKPEN(1U) + +#define S_RRCPLMAPEN 7 +#define V_RRCPLMAPEN(x) ((x) << S_RRCPLMAPEN) +#define F_RRCPLMAPEN V_RRCPLMAPEN(1U) + +#define S_RRCPLCPUSIZE 4 +#define M_RRCPLCPUSIZE 0x7 +#define V_RRCPLCPUSIZE(x) ((x) << S_RRCPLCPUSIZE) + +#define S_RQFEEDBACKENABLE 3 +#define V_RQFEEDBACKENABLE(x) ((x) << S_RQFEEDBACKENABLE) +#define F_RQFEEDBACKENABLE V_RQFEEDBACKENABLE(1U) + +#define S_HASHTOEPLITZ 2 +#define V_HASHTOEPLITZ(x) ((x) << S_HASHTOEPLITZ) +#define F_HASHTOEPLITZ V_HASHTOEPLITZ(1U) + +#define S_DISABLE 0 + +#define A_TP_TM_PIO_ADDR 0x418 + +#define A_TP_TM_PIO_DATA 0x41c + +#define A_TP_TX_MOD_QUE_TABLE 0x420 + +#define A_TP_TX_RESOURCE_LIMIT 0x424 + +#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x428 + +#define S_TX_MOD_QUEUE_REQ_MAP 0 +#define M_TX_MOD_QUEUE_REQ_MAP 0xff +#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP) + +#define A_TP_TX_MOD_QUEUE_WEIGHT1 0x42c + +#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x430 + +#define A_TP_MOD_CHANNEL_WEIGHT 0x434 + +#define A_TP_MOD_RATE_LIMIT 0x438 + +#define A_TP_PIO_ADDR 0x440 + +#define A_TP_PIO_DATA 0x444 + +#define A_TP_RESET 0x44c + +#define S_FLSTINITENABLE 1 +#define V_FLSTINITENABLE(x) ((x) << S_FLSTINITENABLE) +#define F_FLSTINITENABLE V_FLSTINITENABLE(1U) + +#define S_TPRESET 0 +#define V_TPRESET(x) ((x) << S_TPRESET) +#define F_TPRESET V_TPRESET(1U) + +#define A_TP_CMM_MM_RX_FLST_BASE 0x460 + +#define A_TP_CMM_MM_TX_FLST_BASE 0x464 + +#define A_TP_CMM_MM_PS_FLST_BASE 0x468 + +#define A_TP_MIB_INDEX 0x450 + +#define A_TP_MIB_RDATA 0x454 + +#define A_TP_CMM_MM_MAX_PSTRUCT 0x46c + +#define A_TP_INT_ENABLE 0x470 + +#define S_FLMTXFLSTEMPTY 30 +#define V_FLMTXFLSTEMPTY(x) ((x) << S_FLMTXFLSTEMPTY) +#define F_FLMTXFLSTEMPTY V_FLMTXFLSTEMPTY(1U) + +#define S_FLMRXFLSTEMPTY 29 +#define V_FLMRXFLSTEMPTY(x) ((x) << S_FLMRXFLSTEMPTY) +#define F_FLMRXFLSTEMPTY V_FLMRXFLSTEMPTY(1U) + +#define S_ARPLUTPERR 26 +#define V_ARPLUTPERR(x) ((x) << S_ARPLUTPERR) +#define F_ARPLUTPERR V_ARPLUTPERR(1U) + +#define S_CMCACHEPERR 24 +#define V_CMCACHEPERR(x) ((x) << S_CMCACHEPERR) +#define F_CMCACHEPERR V_CMCACHEPERR(1U) + +#define A_TP_INT_CAUSE 0x474 + +#define A_TP_TX_MOD_Q1_Q0_RATE_LIMIT 0x8 + +#define A_TP_TX_DROP_CFG_CH0 0x12b + +#define A_TP_TX_DROP_MODE 0x12f + +#define A_TP_EGRESS_CONFIG 0x145 + +#define S_REWRITEFORCETOSIZE 0 +#define V_REWRITEFORCETOSIZE(x) ((x) << S_REWRITEFORCETOSIZE) +#define F_REWRITEFORCETOSIZE V_REWRITEFORCETOSIZE(1U) + +#define A_TP_TX_TRC_KEY0 0x20 + +#define A_TP_RX_TRC_KEY0 0x120 + +#define A_TP_TX_DROP_CNT_CH0 0x12d + +#define S_TXDROPCNTCH0RCVD 0 +#define M_TXDROPCNTCH0RCVD 0xffff +#define V_TXDROPCNTCH0RCVD(x) ((x) << S_TXDROPCNTCH0RCVD) +#define G_TXDROPCNTCH0RCVD(x) (((x) >> S_TXDROPCNTCH0RCVD) & \ + M_TXDROPCNTCH0RCVD) + +#define A_TP_PROXY_FLOW_CNTL 0x4b0 + +#define A_TP_EMBED_OP_FIELD0 0x4e8 +#define A_TP_EMBED_OP_FIELD1 0x4ec +#define A_TP_EMBED_OP_FIELD2 0x4f0 +#define A_TP_EMBED_OP_FIELD3 0x4f4 +#define A_TP_EMBED_OP_FIELD4 0x4f8 +#define A_TP_EMBED_OP_FIELD5 0x4fc + +#define A_ULPRX_CTL 0x500 + +#define S_ROUND_ROBIN 4 +#define V_ROUND_ROBIN(x) ((x) << S_ROUND_ROBIN) +#define F_ROUND_ROBIN V_ROUND_ROBIN(1U) + +#define A_ULPRX_INT_ENABLE 0x504 + +#define S_DATASELFRAMEERR0 7 +#define V_DATASELFRAMEERR0(x) ((x) << S_DATASELFRAMEERR0) +#define F_DATASELFRAMEERR0 V_DATASELFRAMEERR0(1U) + +#define S_DATASELFRAMEERR1 6 +#define V_DATASELFRAMEERR1(x) ((x) << S_DATASELFRAMEERR1) +#define F_DATASELFRAMEERR1 V_DATASELFRAMEERR1(1U) + +#define S_PCMDMUXPERR 5 +#define V_PCMDMUXPERR(x) ((x) << S_PCMDMUXPERR) +#define F_PCMDMUXPERR V_PCMDMUXPERR(1U) + +#define S_ARBFPERR 4 +#define V_ARBFPERR(x) ((x) << S_ARBFPERR) +#define F_ARBFPERR V_ARBFPERR(1U) + +#define S_ARBPF0PERR 3 +#define V_ARBPF0PERR(x) ((x) << S_ARBPF0PERR) +#define F_ARBPF0PERR V_ARBPF0PERR(1U) + +#define S_ARBPF1PERR 2 +#define V_ARBPF1PERR(x) ((x) << S_ARBPF1PERR) +#define F_ARBPF1PERR V_ARBPF1PERR(1U) + +#define S_PARERRPCMD 1 +#define V_PARERRPCMD(x) ((x) << S_PARERRPCMD) +#define F_PARERRPCMD V_PARERRPCMD(1U) + +#define S_PARERRDATA 0 +#define V_PARERRDATA(x) ((x) << S_PARERRDATA) +#define F_PARERRDATA V_PARERRDATA(1U) + +#define A_ULPRX_INT_CAUSE 0x508 + +#define A_ULPRX_ISCSI_LLIMIT 0x50c + +#define A_ULPRX_ISCSI_ULIMIT 0x510 + +#define A_ULPRX_ISCSI_TAGMASK 0x514 + +#define A_ULPRX_ISCSI_PSZ 0x518 + +#define A_ULPRX_TDDP_LLIMIT 0x51c + +#define A_ULPRX_TDDP_ULIMIT 0x520 +#define A_ULPRX_TDDP_PSZ 0x528 + +#define S_HPZ0 0 +#define M_HPZ0 0xf +#define V_HPZ0(x) ((x) << S_HPZ0) +#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0) + +#define A_ULPRX_STAG_LLIMIT 0x52c + +#define A_ULPRX_STAG_ULIMIT 0x530 + +#define A_ULPRX_RQ_LLIMIT 0x534 + +#define A_ULPRX_RQ_ULIMIT 0x538 + +#define A_ULPRX_PBL_LLIMIT 0x53c + +#define A_ULPRX_PBL_ULIMIT 0x540 + +#define A_ULPRX_TDDP_TAGMASK 0x524 + +#define A_ULPTX_CONFIG 0x580 + +#define S_CFG_CQE_SOP_MASK 1 +#define V_CFG_CQE_SOP_MASK(x) ((x) << S_CFG_CQE_SOP_MASK) +#define F_CFG_CQE_SOP_MASK V_CFG_CQE_SOP_MASK(1U) + +#define S_CFG_RR_ARB 0 +#define V_CFG_RR_ARB(x) ((x) << S_CFG_RR_ARB) +#define F_CFG_RR_ARB V_CFG_RR_ARB(1U) + +#define A_ULPTX_INT_ENABLE 0x584 + +#define S_PBL_BOUND_ERR_CH1 1 +#define V_PBL_BOUND_ERR_CH1(x) ((x) << S_PBL_BOUND_ERR_CH1) +#define F_PBL_BOUND_ERR_CH1 V_PBL_BOUND_ERR_CH1(1U) + +#define S_PBL_BOUND_ERR_CH0 0 +#define V_PBL_BOUND_ERR_CH0(x) ((x) << S_PBL_BOUND_ERR_CH0) +#define F_PBL_BOUND_ERR_CH0 V_PBL_BOUND_ERR_CH0(1U) + +#define A_ULPTX_INT_CAUSE 0x588 + +#define A_ULPTX_TPT_LLIMIT 0x58c + +#define A_ULPTX_TPT_ULIMIT 0x590 + +#define A_ULPTX_PBL_LLIMIT 0x594 + +#define A_ULPTX_PBL_ULIMIT 0x598 + +#define A_ULPTX_DMA_WEIGHT 0x5ac + +#define S_D1_WEIGHT 16 +#define M_D1_WEIGHT 0xffff +#define V_D1_WEIGHT(x) ((x) << S_D1_WEIGHT) + +#define S_D0_WEIGHT 0 +#define M_D0_WEIGHT 0xffff +#define V_D0_WEIGHT(x) ((x) << S_D0_WEIGHT) + +#define A_PM1_RX_CFG 0x5c0 +#define A_PM1_RX_MODE 0x5c4 + +#define A_PM1_RX_INT_ENABLE 0x5d8 + +#define S_ZERO_E_CMD_ERROR 18 +#define V_ZERO_E_CMD_ERROR(x) ((x) << S_ZERO_E_CMD_ERROR) +#define F_ZERO_E_CMD_ERROR V_ZERO_E_CMD_ERROR(1U) + +#define S_IESPI0_FIFO2X_RX_FRAMING_ERROR 17 +#define V_IESPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_FIFO2X_RX_FRAMING_ERROR) +#define F_IESPI0_FIFO2X_RX_FRAMING_ERROR V_IESPI0_FIFO2X_RX_FRAMING_ERROR(1U) + +#define S_IESPI1_FIFO2X_RX_FRAMING_ERROR 16 +#define V_IESPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_FIFO2X_RX_FRAMING_ERROR) +#define F_IESPI1_FIFO2X_RX_FRAMING_ERROR V_IESPI1_FIFO2X_RX_FRAMING_ERROR(1U) + +#define S_IESPI0_RX_FRAMING_ERROR 15 +#define V_IESPI0_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_RX_FRAMING_ERROR) +#define F_IESPI0_RX_FRAMING_ERROR V_IESPI0_RX_FRAMING_ERROR(1U) + +#define S_IESPI1_RX_FRAMING_ERROR 14 +#define V_IESPI1_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_RX_FRAMING_ERROR) +#define F_IESPI1_RX_FRAMING_ERROR V_IESPI1_RX_FRAMING_ERROR(1U) + +#define S_IESPI0_TX_FRAMING_ERROR 13 +#define V_IESPI0_TX_FRAMING_ERROR(x) ((x) << S_IESPI0_TX_FRAMING_ERROR) +#define F_IESPI0_TX_FRAMING_ERROR V_IESPI0_TX_FRAMING_ERROR(1U) + +#define S_IESPI1_TX_FRAMING_ERROR 12 +#define V_IESPI1_TX_FRAMING_ERROR(x) ((x) << S_IESPI1_TX_FRAMING_ERROR) +#define F_IESPI1_TX_FRAMING_ERROR V_IESPI1_TX_FRAMING_ERROR(1U) + +#define S_OCSPI0_RX_FRAMING_ERROR 11 +#define V_OCSPI0_RX_FRAMING_ERROR(x) ((x) << S_OCSPI0_RX_FRAMING_ERROR) +#define F_OCSPI0_RX_FRAMING_ERROR V_OCSPI0_RX_FRAMING_ERROR(1U) + +#define S_OCSPI1_RX_FRAMING_ERROR 10 +#define V_OCSPI1_RX_FRAMING_ERROR(x) ((x) << S_OCSPI1_RX_FRAMING_ERROR) +#define F_OCSPI1_RX_FRAMING_ERROR V_OCSPI1_RX_FRAMING_ERROR(1U) + +#define S_OCSPI0_TX_FRAMING_ERROR 9 +#define V_OCSPI0_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_TX_FRAMING_ERROR) +#define F_OCSPI0_TX_FRAMING_ERROR V_OCSPI0_TX_FRAMING_ERROR(1U) + +#define S_OCSPI1_TX_FRAMING_ERROR 8 +#define V_OCSPI1_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_TX_FRAMING_ERROR) +#define F_OCSPI1_TX_FRAMING_ERROR V_OCSPI1_TX_FRAMING_ERROR(1U) + +#define S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR 7 +#define V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR) +#define F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(1U) + +#define S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR 6 +#define V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR) +#define F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(1U) + +#define S_IESPI_PAR_ERROR 3 +#define M_IESPI_PAR_ERROR 0x7 + +#define V_IESPI_PAR_ERROR(x) ((x) << S_IESPI_PAR_ERROR) + +#define S_OCSPI_PAR_ERROR 0 +#define M_OCSPI_PAR_ERROR 0x7 + +#define V_OCSPI_PAR_ERROR(x) ((x) << S_OCSPI_PAR_ERROR) + +#define A_PM1_RX_INT_CAUSE 0x5dc + +#define A_PM1_TX_CFG 0x5e0 +#define A_PM1_TX_MODE 0x5e4 + +#define A_PM1_TX_INT_ENABLE 0x5f8 + +#define S_ZERO_C_CMD_ERROR 18 +#define V_ZERO_C_CMD_ERROR(x) ((x) << S_ZERO_C_CMD_ERROR) +#define F_ZERO_C_CMD_ERROR V_ZERO_C_CMD_ERROR(1U) + +#define S_ICSPI0_FIFO2X_RX_FRAMING_ERROR 17 +#define V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_FIFO2X_RX_FRAMING_ERROR) +#define F_ICSPI0_FIFO2X_RX_FRAMING_ERROR V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(1U) + +#define S_ICSPI1_FIFO2X_RX_FRAMING_ERROR 16 +#define V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_FIFO2X_RX_FRAMING_ERROR) +#define F_ICSPI1_FIFO2X_RX_FRAMING_ERROR V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(1U) + +#define S_ICSPI0_RX_FRAMING_ERROR 15 +#define V_ICSPI0_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_RX_FRAMING_ERROR) +#define F_ICSPI0_RX_FRAMING_ERROR V_ICSPI0_RX_FRAMING_ERROR(1U) + +#define S_ICSPI1_RX_FRAMING_ERROR 14 +#define V_ICSPI1_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_RX_FRAMING_ERROR) +#define F_ICSPI1_RX_FRAMING_ERROR V_ICSPI1_RX_FRAMING_ERROR(1U) + +#define S_ICSPI0_TX_FRAMING_ERROR 13 +#define V_ICSPI0_TX_FRAMING_ERROR(x) ((x) << S_ICSPI0_TX_FRAMING_ERROR) +#define F_ICSPI0_TX_FRAMING_ERROR V_ICSPI0_TX_FRAMING_ERROR(1U) + +#define S_ICSPI1_TX_FRAMING_ERROR 12 +#define V_ICSPI1_TX_FRAMING_ERROR(x) ((x) << S_ICSPI1_TX_FRAMING_ERROR) +#define F_ICSPI1_TX_FRAMING_ERROR V_ICSPI1_TX_FRAMING_ERROR(1U) + +#define S_OESPI0_RX_FRAMING_ERROR 11 +#define V_OESPI0_RX_FRAMING_ERROR(x) ((x) << S_OESPI0_RX_FRAMING_ERROR) +#define F_OESPI0_RX_FRAMING_ERROR V_OESPI0_RX_FRAMING_ERROR(1U) + +#define S_OESPI1_RX_FRAMING_ERROR 10 +#define V_OESPI1_RX_FRAMING_ERROR(x) ((x) << S_OESPI1_RX_FRAMING_ERROR) +#define F_OESPI1_RX_FRAMING_ERROR V_OESPI1_RX_FRAMING_ERROR(1U) + +#define S_OESPI0_TX_FRAMING_ERROR 9 +#define V_OESPI0_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_TX_FRAMING_ERROR) +#define F_OESPI0_TX_FRAMING_ERROR V_OESPI0_TX_FRAMING_ERROR(1U) + +#define S_OESPI1_TX_FRAMING_ERROR 8 +#define V_OESPI1_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_TX_FRAMING_ERROR) +#define F_OESPI1_TX_FRAMING_ERROR V_OESPI1_TX_FRAMING_ERROR(1U) + +#define S_OESPI0_OFIFO2X_TX_FRAMING_ERROR 7 +#define V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_OFIFO2X_TX_FRAMING_ERROR) +#define F_OESPI0_OFIFO2X_TX_FRAMING_ERROR V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(1U) + +#define S_OESPI1_OFIFO2X_TX_FRAMING_ERROR 6 +#define V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_OFIFO2X_TX_FRAMING_ERROR) +#define F_OESPI1_OFIFO2X_TX_FRAMING_ERROR V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(1U) + +#define S_ICSPI_PAR_ERROR 3 +#define M_ICSPI_PAR_ERROR 0x7 + +#define V_ICSPI_PAR_ERROR(x) ((x) << S_ICSPI_PAR_ERROR) + +#define S_OESPI_PAR_ERROR 0 +#define M_OESPI_PAR_ERROR 0x7 + +#define V_OESPI_PAR_ERROR(x) ((x) << S_OESPI_PAR_ERROR) + +#define A_PM1_TX_INT_CAUSE 0x5fc + +#define A_MPS_CFG 0x600 + +#define S_TPRXPORTEN 4 +#define V_TPRXPORTEN(x) ((x) << S_TPRXPORTEN) +#define F_TPRXPORTEN V_TPRXPORTEN(1U) + +#define S_TPTXPORT1EN 3 +#define V_TPTXPORT1EN(x) ((x) << S_TPTXPORT1EN) +#define F_TPTXPORT1EN V_TPTXPORT1EN(1U) + +#define S_TPTXPORT0EN 2 +#define V_TPTXPORT0EN(x) ((x) << S_TPTXPORT0EN) +#define F_TPTXPORT0EN V_TPTXPORT0EN(1U) + +#define S_PORT1ACTIVE 1 +#define V_PORT1ACTIVE(x) ((x) << S_PORT1ACTIVE) +#define F_PORT1ACTIVE V_PORT1ACTIVE(1U) + +#define S_PORT0ACTIVE 0 +#define V_PORT0ACTIVE(x) ((x) << S_PORT0ACTIVE) +#define F_PORT0ACTIVE V_PORT0ACTIVE(1U) + +#define S_ENFORCEPKT 11 +#define V_ENFORCEPKT(x) ((x) << S_ENFORCEPKT) +#define F_ENFORCEPKT V_ENFORCEPKT(1U) + +#define A_MPS_INT_ENABLE 0x61c + +#define S_MCAPARERRENB 6 +#define M_MCAPARERRENB 0x7 + +#define V_MCAPARERRENB(x) ((x) << S_MCAPARERRENB) + +#define S_RXTPPARERRENB 4 +#define M_RXTPPARERRENB 0x3 + +#define V_RXTPPARERRENB(x) ((x) << S_RXTPPARERRENB) + +#define S_TX1TPPARERRENB 2 +#define M_TX1TPPARERRENB 0x3 + +#define V_TX1TPPARERRENB(x) ((x) << S_TX1TPPARERRENB) + +#define S_TX0TPPARERRENB 0 +#define M_TX0TPPARERRENB 0x3 + +#define V_TX0TPPARERRENB(x) ((x) << S_TX0TPPARERRENB) + +#define A_MPS_INT_CAUSE 0x620 + +#define S_MCAPARERR 6 +#define M_MCAPARERR 0x7 + +#define V_MCAPARERR(x) ((x) << S_MCAPARERR) + +#define S_RXTPPARERR 4 +#define M_RXTPPARERR 0x3 + +#define V_RXTPPARERR(x) ((x) << S_RXTPPARERR) + +#define S_TX1TPPARERR 2 +#define M_TX1TPPARERR 0x3 + +#define V_TX1TPPARERR(x) ((x) << S_TX1TPPARERR) + +#define S_TX0TPPARERR 0 +#define M_TX0TPPARERR 0x3 + +#define V_TX0TPPARERR(x) ((x) << S_TX0TPPARERR) + +#define A_CPL_SWITCH_CNTRL 0x640 + +#define A_CPL_INTR_ENABLE 0x650 + +#define S_CIM_OP_MAP_PERR 5 +#define V_CIM_OP_MAP_PERR(x) ((x) << S_CIM_OP_MAP_PERR) +#define F_CIM_OP_MAP_PERR V_CIM_OP_MAP_PERR(1U) + +#define S_CIM_OVFL_ERROR 4 +#define V_CIM_OVFL_ERROR(x) ((x) << S_CIM_OVFL_ERROR) +#define F_CIM_OVFL_ERROR V_CIM_OVFL_ERROR(1U) + +#define S_TP_FRAMING_ERROR 3 +#define V_TP_FRAMING_ERROR(x) ((x) << S_TP_FRAMING_ERROR) +#define F_TP_FRAMING_ERROR V_TP_FRAMING_ERROR(1U) + +#define S_SGE_FRAMING_ERROR 2 +#define V_SGE_FRAMING_ERROR(x) ((x) << S_SGE_FRAMING_ERROR) +#define F_SGE_FRAMING_ERROR V_SGE_FRAMING_ERROR(1U) + +#define S_CIM_FRAMING_ERROR 1 +#define V_CIM_FRAMING_ERROR(x) ((x) << S_CIM_FRAMING_ERROR) +#define F_CIM_FRAMING_ERROR V_CIM_FRAMING_ERROR(1U) + +#define S_ZERO_SWITCH_ERROR 0 +#define V_ZERO_SWITCH_ERROR(x) ((x) << S_ZERO_SWITCH_ERROR) +#define F_ZERO_SWITCH_ERROR V_ZERO_SWITCH_ERROR(1U) + +#define A_CPL_INTR_CAUSE 0x654 + +#define A_CPL_MAP_TBL_DATA 0x65c + +#define A_SMB_GLOBAL_TIME_CFG 0x660 + +#define A_I2C_CFG 0x6a0 + +#define S_I2C_CLKDIV 0 +#define M_I2C_CLKDIV 0xfff +#define V_I2C_CLKDIV(x) ((x) << S_I2C_CLKDIV) + +#define A_MI1_CFG 0x6b0 + +#define S_CLKDIV 5 +#define M_CLKDIV 0xff +#define V_CLKDIV(x) ((x) << S_CLKDIV) + +#define S_ST 3 + +#define M_ST 0x3 + +#define V_ST(x) ((x) << S_ST) + +#define G_ST(x) (((x) >> S_ST) & M_ST) + +#define S_PREEN 2 +#define V_PREEN(x) ((x) << S_PREEN) +#define F_PREEN V_PREEN(1U) + +#define S_MDIINV 1 +#define V_MDIINV(x) ((x) << S_MDIINV) +#define F_MDIINV V_MDIINV(1U) + +#define S_MDIEN 0 +#define V_MDIEN(x) ((x) << S_MDIEN) +#define F_MDIEN V_MDIEN(1U) + +#define A_MI1_ADDR 0x6b4 + +#define S_PHYADDR 5 +#define M_PHYADDR 0x1f +#define V_PHYADDR(x) ((x) << S_PHYADDR) + +#define S_REGADDR 0 +#define M_REGADDR 0x1f +#define V_REGADDR(x) ((x) << S_REGADDR) + +#define A_MI1_DATA 0x6b8 + +#define A_MI1_OP 0x6bc + +#define S_MDI_OP 0 +#define M_MDI_OP 0x3 +#define V_MDI_OP(x) ((x) << S_MDI_OP) + +#define A_SF_DATA 0x6d8 + +#define A_SF_OP 0x6dc + +#define S_BYTECNT 1 +#define M_BYTECNT 0x3 +#define V_BYTECNT(x) ((x) << S_BYTECNT) + +#define A_PL_INT_ENABLE0 0x6e0 + +#define S_T3DBG 23 +#define V_T3DBG(x) ((x) << S_T3DBG) +#define F_T3DBG V_T3DBG(1U) + +#define S_XGMAC0_1 20 +#define V_XGMAC0_1(x) ((x) << S_XGMAC0_1) +#define F_XGMAC0_1 V_XGMAC0_1(1U) + +#define S_XGMAC0_0 19 +#define V_XGMAC0_0(x) ((x) << S_XGMAC0_0) +#define F_XGMAC0_0 V_XGMAC0_0(1U) + +#define S_MC5A 18 +#define V_MC5A(x) ((x) << S_MC5A) +#define F_MC5A V_MC5A(1U) + +#define S_CPL_SWITCH 12 +#define V_CPL_SWITCH(x) ((x) << S_CPL_SWITCH) +#define F_CPL_SWITCH V_CPL_SWITCH(1U) + +#define S_MPS0 11 +#define V_MPS0(x) ((x) << S_MPS0) +#define F_MPS0 V_MPS0(1U) + +#define S_PM1_TX 10 +#define V_PM1_TX(x) ((x) << S_PM1_TX) +#define F_PM1_TX V_PM1_TX(1U) + +#define S_PM1_RX 9 +#define V_PM1_RX(x) ((x) << S_PM1_RX) +#define F_PM1_RX V_PM1_RX(1U) + +#define S_ULP2_TX 8 +#define V_ULP2_TX(x) ((x) << S_ULP2_TX) +#define F_ULP2_TX V_ULP2_TX(1U) + +#define S_ULP2_RX 7 +#define V_ULP2_RX(x) ((x) << S_ULP2_RX) +#define F_ULP2_RX V_ULP2_RX(1U) + +#define S_TP1 6 +#define V_TP1(x) ((x) << S_TP1) +#define F_TP1 V_TP1(1U) + +#define S_CIM 5 +#define V_CIM(x) ((x) << S_CIM) +#define F_CIM V_CIM(1U) + +#define S_MC7_CM 4 +#define V_MC7_CM(x) ((x) << S_MC7_CM) +#define F_MC7_CM V_MC7_CM(1U) + +#define S_MC7_PMTX 3 +#define V_MC7_PMTX(x) ((x) << S_MC7_PMTX) +#define F_MC7_PMTX V_MC7_PMTX(1U) + +#define S_MC7_PMRX 2 +#define V_MC7_PMRX(x) ((x) << S_MC7_PMRX) +#define F_MC7_PMRX V_MC7_PMRX(1U) + +#define S_PCIM0 1 +#define V_PCIM0(x) ((x) << S_PCIM0) +#define F_PCIM0 V_PCIM0(1U) + +#define S_SGE3 0 +#define V_SGE3(x) ((x) << S_SGE3) +#define F_SGE3 V_SGE3(1U) + +#define A_PL_INT_CAUSE0 0x6e4 + +#define A_PL_RST 0x6f0 + +#define S_FATALPERREN 4 +#define V_FATALPERREN(x) ((x) << S_FATALPERREN) +#define F_FATALPERREN V_FATALPERREN(1U) + +#define S_CRSTWRM 1 +#define V_CRSTWRM(x) ((x) << S_CRSTWRM) +#define F_CRSTWRM V_CRSTWRM(1U) + +#define A_PL_REV 0x6f4 + +#define A_PL_CLI 0x6f8 + +#define A_MC5_DB_CONFIG 0x704 + +#define S_TMTYPEHI 30 +#define V_TMTYPEHI(x) ((x) << S_TMTYPEHI) +#define F_TMTYPEHI V_TMTYPEHI(1U) + +#define S_TMPARTSIZE 28 +#define M_TMPARTSIZE 0x3 +#define V_TMPARTSIZE(x) ((x) << S_TMPARTSIZE) +#define G_TMPARTSIZE(x) (((x) >> S_TMPARTSIZE) & M_TMPARTSIZE) + +#define S_TMTYPE 26 +#define M_TMTYPE 0x3 +#define V_TMTYPE(x) ((x) << S_TMTYPE) +#define G_TMTYPE(x) (((x) >> S_TMTYPE) & M_TMTYPE) + +#define S_COMPEN 17 +#define V_COMPEN(x) ((x) << S_COMPEN) +#define F_COMPEN V_COMPEN(1U) + +#define S_PRTYEN 6 +#define V_PRTYEN(x) ((x) << S_PRTYEN) +#define F_PRTYEN V_PRTYEN(1U) + +#define S_MBUSEN 5 +#define V_MBUSEN(x) ((x) << S_MBUSEN) +#define F_MBUSEN V_MBUSEN(1U) + +#define S_DBGIEN 4 +#define V_DBGIEN(x) ((x) << S_DBGIEN) +#define F_DBGIEN V_DBGIEN(1U) + +#define S_TMRDY 2 +#define V_TMRDY(x) ((x) << S_TMRDY) +#define F_TMRDY V_TMRDY(1U) + +#define S_TMRST 1 +#define V_TMRST(x) ((x) << S_TMRST) +#define F_TMRST V_TMRST(1U) + +#define S_TMMODE 0 +#define V_TMMODE(x) ((x) << S_TMMODE) +#define F_TMMODE V_TMMODE(1U) + +#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c + +#define A_MC5_DB_FILTER_TABLE 0x710 + +#define A_MC5_DB_SERVER_INDEX 0x714 + +#define A_MC5_DB_RSP_LATENCY 0x720 + +#define S_RDLAT 16 +#define M_RDLAT 0x1f +#define V_RDLAT(x) ((x) << S_RDLAT) + +#define S_LRNLAT 8 +#define M_LRNLAT 0x1f +#define V_LRNLAT(x) ((x) << S_LRNLAT) + +#define S_SRCHLAT 0 +#define M_SRCHLAT 0x1f +#define V_SRCHLAT(x) ((x) << S_SRCHLAT) + +#define A_MC5_DB_PART_ID_INDEX 0x72c + +#define A_MC5_DB_INT_ENABLE 0x740 + +#define S_DELACTEMPTY 18 +#define V_DELACTEMPTY(x) ((x) << S_DELACTEMPTY) +#define F_DELACTEMPTY V_DELACTEMPTY(1U) + +#define S_DISPQPARERR 17 +#define V_DISPQPARERR(x) ((x) << S_DISPQPARERR) +#define F_DISPQPARERR V_DISPQPARERR(1U) + +#define S_REQQPARERR 16 +#define V_REQQPARERR(x) ((x) << S_REQQPARERR) +#define F_REQQPARERR V_REQQPARERR(1U) + +#define S_UNKNOWNCMD 15 +#define V_UNKNOWNCMD(x) ((x) << S_UNKNOWNCMD) +#define F_UNKNOWNCMD V_UNKNOWNCMD(1U) + +#define S_NFASRCHFAIL 8 +#define V_NFASRCHFAIL(x) ((x) << S_NFASRCHFAIL) +#define F_NFASRCHFAIL V_NFASRCHFAIL(1U) + +#define S_ACTRGNFULL 7 +#define V_ACTRGNFULL(x) ((x) << S_ACTRGNFULL) +#define F_ACTRGNFULL V_ACTRGNFULL(1U) + +#define S_PARITYERR 6 +#define V_PARITYERR(x) ((x) << S_PARITYERR) +#define F_PARITYERR V_PARITYERR(1U) + +#define A_MC5_DB_INT_CAUSE 0x744 + +#define A_MC5_DB_DBGI_CONFIG 0x774 + +#define A_MC5_DB_DBGI_REQ_CMD 0x778 + +#define A_MC5_DB_DBGI_REQ_ADDR0 0x77c + +#define A_MC5_DB_DBGI_REQ_ADDR1 0x780 + +#define A_MC5_DB_DBGI_REQ_ADDR2 0x784 + +#define A_MC5_DB_DBGI_REQ_DATA0 0x788 + +#define A_MC5_DB_DBGI_REQ_DATA1 0x78c + +#define A_MC5_DB_DBGI_REQ_DATA2 0x790 + +#define A_MC5_DB_DBGI_RSP_STATUS 0x7b0 + +#define S_DBGIRSPVALID 0 +#define V_DBGIRSPVALID(x) ((x) << S_DBGIRSPVALID) +#define F_DBGIRSPVALID V_DBGIRSPVALID(1U) + +#define A_MC5_DB_DBGI_RSP_DATA0 0x7b4 + +#define A_MC5_DB_DBGI_RSP_DATA1 0x7b8 + +#define A_MC5_DB_DBGI_RSP_DATA2 0x7bc + +#define A_MC5_DB_POPEN_DATA_WR_CMD 0x7cc + +#define A_MC5_DB_POPEN_MASK_WR_CMD 0x7d0 + +#define A_MC5_DB_AOPEN_SRCH_CMD 0x7d4 + +#define A_MC5_DB_AOPEN_LRN_CMD 0x7d8 + +#define A_MC5_DB_SYN_SRCH_CMD 0x7dc + +#define A_MC5_DB_SYN_LRN_CMD 0x7e0 + +#define A_MC5_DB_ACK_SRCH_CMD 0x7e4 + +#define A_MC5_DB_ACK_LRN_CMD 0x7e8 + +#define A_MC5_DB_ILOOKUP_CMD 0x7ec + +#define A_MC5_DB_ELOOKUP_CMD 0x7f0 + +#define A_MC5_DB_DATA_WRITE_CMD 0x7f4 + +#define A_MC5_DB_DATA_READ_CMD 0x7f8 + +#define XGMAC0_0_BASE_ADDR 0x800 + +#define A_XGM_TX_CTRL 0x800 + +#define S_TXEN 0 +#define V_TXEN(x) ((x) << S_TXEN) +#define F_TXEN V_TXEN(1U) + +#define A_XGM_TX_CFG 0x804 + +#define S_TXPAUSEEN 0 +#define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN) +#define F_TXPAUSEEN V_TXPAUSEEN(1U) + +#define A_XGM_TX_PAUSE_QUANTA 0x808 + +#define A_XGM_RX_CTRL 0x80c + +#define S_RXEN 0 +#define V_RXEN(x) ((x) << S_RXEN) +#define F_RXEN V_RXEN(1U) + +#define A_XGM_RX_CFG 0x810 + +#define S_DISPAUSEFRAMES 9 +#define V_DISPAUSEFRAMES(x) ((x) << S_DISPAUSEFRAMES) +#define F_DISPAUSEFRAMES V_DISPAUSEFRAMES(1U) + +#define S_EN1536BFRAMES 8 +#define V_EN1536BFRAMES(x) ((x) << S_EN1536BFRAMES) +#define F_EN1536BFRAMES V_EN1536BFRAMES(1U) + +#define S_ENJUMBO 7 +#define V_ENJUMBO(x) ((x) << S_ENJUMBO) +#define F_ENJUMBO V_ENJUMBO(1U) + +#define S_RMFCS 6 +#define V_RMFCS(x) ((x) << S_RMFCS) +#define F_RMFCS V_RMFCS(1U) + +#define S_ENHASHMCAST 2 +#define V_ENHASHMCAST(x) ((x) << S_ENHASHMCAST) +#define F_ENHASHMCAST V_ENHASHMCAST(1U) + +#define S_COPYALLFRAMES 0 +#define V_COPYALLFRAMES(x) ((x) << S_COPYALLFRAMES) +#define F_COPYALLFRAMES V_COPYALLFRAMES(1U) + +#define S_DISBCAST 1 +#define V_DISBCAST(x) ((x) << S_DISBCAST) +#define F_DISBCAST V_DISBCAST(1U) + +#define A_XGM_RX_HASH_LOW 0x814 + +#define A_XGM_RX_HASH_HIGH 0x818 + +#define A_XGM_RX_EXACT_MATCH_LOW_1 0x81c + +#define A_XGM_RX_EXACT_MATCH_HIGH_1 0x820 + +#define A_XGM_RX_EXACT_MATCH_LOW_2 0x824 + +#define A_XGM_RX_EXACT_MATCH_LOW_3 0x82c + +#define A_XGM_RX_EXACT_MATCH_LOW_4 0x834 + +#define A_XGM_RX_EXACT_MATCH_LOW_5 0x83c + +#define A_XGM_RX_EXACT_MATCH_LOW_6 0x844 + +#define A_XGM_RX_EXACT_MATCH_LOW_7 0x84c + +#define A_XGM_RX_EXACT_MATCH_LOW_8 0x854 + +#define A_XGM_INT_STATUS 0x86c + +#define S_LINKFAULTCHANGE 9 +#define V_LINKFAULTCHANGE(x) ((x) << S_LINKFAULTCHANGE) +#define F_LINKFAULTCHANGE V_LINKFAULTCHANGE(1U) + +#define A_XGM_XGM_INT_ENABLE 0x874 +#define A_XGM_XGM_INT_DISABLE 0x878 + +#define A_XGM_STAT_CTRL 0x880 + +#define S_CLRSTATS 2 +#define V_CLRSTATS(x) ((x) << S_CLRSTATS) +#define F_CLRSTATS V_CLRSTATS(1U) + +#define A_XGM_RXFIFO_CFG 0x884 + +#define S_RXFIFO_EMPTY 31 +#define V_RXFIFO_EMPTY(x) ((x) << S_RXFIFO_EMPTY) +#define F_RXFIFO_EMPTY V_RXFIFO_EMPTY(1U) + +#define S_RXFIFOPAUSEHWM 17 +#define M_RXFIFOPAUSEHWM 0xfff + +#define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM) + +#define G_RXFIFOPAUSEHWM(x) (((x) >> S_RXFIFOPAUSEHWM) & M_RXFIFOPAUSEHWM) + +#define S_RXFIFOPAUSELWM 5 +#define M_RXFIFOPAUSELWM 0xfff + +#define V_RXFIFOPAUSELWM(x) ((x) << S_RXFIFOPAUSELWM) + +#define G_RXFIFOPAUSELWM(x) (((x) >> S_RXFIFOPAUSELWM) & M_RXFIFOPAUSELWM) + +#define S_RXSTRFRWRD 1 +#define V_RXSTRFRWRD(x) ((x) << S_RXSTRFRWRD) +#define F_RXSTRFRWRD V_RXSTRFRWRD(1U) + +#define S_DISERRFRAMES 0 +#define V_DISERRFRAMES(x) ((x) << S_DISERRFRAMES) +#define F_DISERRFRAMES V_DISERRFRAMES(1U) + +#define A_XGM_TXFIFO_CFG 0x888 + +#define S_UNDERUNFIX 22 +#define V_UNDERUNFIX(x) ((x) << S_UNDERUNFIX) +#define F_UNDERUNFIX V_UNDERUNFIX(1U) + +#define S_TXIPG 13 +#define M_TXIPG 0xff +#define V_TXIPG(x) ((x) << S_TXIPG) +#define G_TXIPG(x) (((x) >> S_TXIPG) & M_TXIPG) + +#define S_TXFIFOTHRESH 4 +#define M_TXFIFOTHRESH 0x1ff + +#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH) + +#define S_ENDROPPKT 21 +#define V_ENDROPPKT(x) ((x) << S_ENDROPPKT) +#define F_ENDROPPKT V_ENDROPPKT(1U) + +#define A_XGM_SERDES_CTRL 0x890 +#define A_XGM_SERDES_CTRL0 0x8e0 + +#define S_SERDESRESET_ 24 +#define V_SERDESRESET_(x) ((x) << S_SERDESRESET_) +#define F_SERDESRESET_ V_SERDESRESET_(1U) + +#define S_RXENABLE 4 +#define V_RXENABLE(x) ((x) << S_RXENABLE) +#define F_RXENABLE V_RXENABLE(1U) + +#define S_TXENABLE 3 +#define V_TXENABLE(x) ((x) << S_TXENABLE) +#define F_TXENABLE V_TXENABLE(1U) + +#define A_XGM_PAUSE_TIMER 0x890 + +#define A_XGM_RGMII_IMP 0x89c + +#define S_XGM_IMPSETUPDATE 6 +#define V_XGM_IMPSETUPDATE(x) ((x) << S_XGM_IMPSETUPDATE) +#define F_XGM_IMPSETUPDATE V_XGM_IMPSETUPDATE(1U) + +#define S_RGMIIIMPPD 3 +#define M_RGMIIIMPPD 0x7 +#define V_RGMIIIMPPD(x) ((x) << S_RGMIIIMPPD) + +#define S_RGMIIIMPPU 0 +#define M_RGMIIIMPPU 0x7 +#define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU) + +#define S_CALRESET 8 +#define V_CALRESET(x) ((x) << S_CALRESET) +#define F_CALRESET V_CALRESET(1U) + +#define S_CALUPDATE 7 +#define V_CALUPDATE(x) ((x) << S_CALUPDATE) +#define F_CALUPDATE V_CALUPDATE(1U) + +#define A_XGM_XAUI_IMP 0x8a0 + +#define S_CALBUSY 31 +#define V_CALBUSY(x) ((x) << S_CALBUSY) +#define F_CALBUSY V_CALBUSY(1U) + +#define S_XGM_CALFAULT 29 +#define V_XGM_CALFAULT(x) ((x) << S_XGM_CALFAULT) +#define F_XGM_CALFAULT V_XGM_CALFAULT(1U) + +#define S_CALIMP 24 +#define M_CALIMP 0x1f +#define V_CALIMP(x) ((x) << S_CALIMP) +#define G_CALIMP(x) (((x) >> S_CALIMP) & M_CALIMP) + +#define S_XAUIIMP 0 +#define M_XAUIIMP 0x7 +#define V_XAUIIMP(x) ((x) << S_XAUIIMP) + +#define A_XGM_RX_MAX_PKT_SIZE 0x8a8 + +#define S_RXMAXFRAMERSIZE 17 +#define M_RXMAXFRAMERSIZE 0x3fff +#define V_RXMAXFRAMERSIZE(x) ((x) << S_RXMAXFRAMERSIZE) +#define G_RXMAXFRAMERSIZE(x) (((x) >> S_RXMAXFRAMERSIZE) & M_RXMAXFRAMERSIZE) + +#define S_RXENFRAMER 14 +#define V_RXENFRAMER(x) ((x) << S_RXENFRAMER) +#define F_RXENFRAMER V_RXENFRAMER(1U) + +#define S_RXMAXPKTSIZE 0 +#define M_RXMAXPKTSIZE 0x3fff +#define V_RXMAXPKTSIZE(x) ((x) << S_RXMAXPKTSIZE) +#define G_RXMAXPKTSIZE(x) (((x) >> S_RXMAXPKTSIZE) & M_RXMAXPKTSIZE) + +#define A_XGM_RESET_CTRL 0x8ac + +#define S_XGMAC_STOP_EN 4 +#define V_XGMAC_STOP_EN(x) ((x) << S_XGMAC_STOP_EN) +#define F_XGMAC_STOP_EN V_XGMAC_STOP_EN(1U) + +#define S_XG2G_RESET_ 3 +#define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_) +#define F_XG2G_RESET_ V_XG2G_RESET_(1U) + +#define S_RGMII_RESET_ 2 +#define V_RGMII_RESET_(x) ((x) << S_RGMII_RESET_) +#define F_RGMII_RESET_ V_RGMII_RESET_(1U) + +#define S_PCS_RESET_ 1 +#define V_PCS_RESET_(x) ((x) << S_PCS_RESET_) +#define F_PCS_RESET_ V_PCS_RESET_(1U) + +#define S_MAC_RESET_ 0 +#define V_MAC_RESET_(x) ((x) << S_MAC_RESET_) +#define F_MAC_RESET_ V_MAC_RESET_(1U) + +#define A_XGM_PORT_CFG 0x8b8 + +#define S_CLKDIVRESET_ 3 +#define V_CLKDIVRESET_(x) ((x) << S_CLKDIVRESET_) +#define F_CLKDIVRESET_ V_CLKDIVRESET_(1U) + +#define S_PORTSPEED 1 +#define M_PORTSPEED 0x3 + +#define V_PORTSPEED(x) ((x) << S_PORTSPEED) + +#define S_ENRGMII 0 +#define V_ENRGMII(x) ((x) << S_ENRGMII) +#define F_ENRGMII V_ENRGMII(1U) + +#define A_XGM_INT_ENABLE 0x8d4 + +#define S_TXFIFO_PRTY_ERR 17 +#define M_TXFIFO_PRTY_ERR 0x7 + +#define V_TXFIFO_PRTY_ERR(x) ((x) << S_TXFIFO_PRTY_ERR) + +#define S_RXFIFO_PRTY_ERR 14 +#define M_RXFIFO_PRTY_ERR 0x7 + +#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR) + +#define S_TXFIFO_UNDERRUN 13 +#define V_TXFIFO_UNDERRUN(x) ((x) << S_TXFIFO_UNDERRUN) +#define F_TXFIFO_UNDERRUN V_TXFIFO_UNDERRUN(1U) + +#define S_RXFIFO_OVERFLOW 12 +#define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW) +#define F_RXFIFO_OVERFLOW V_RXFIFO_OVERFLOW(1U) + +#define S_SERDES_LOS 4 +#define M_SERDES_LOS 0xf + +#define V_SERDES_LOS(x) ((x) << S_SERDES_LOS) + +#define S_XAUIPCSCTCERR 3 +#define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR) +#define F_XAUIPCSCTCERR V_XAUIPCSCTCERR(1U) + +#define S_XAUIPCSALIGNCHANGE 2 +#define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE) +#define F_XAUIPCSALIGNCHANGE V_XAUIPCSALIGNCHANGE(1U) + +#define S_XGM_INT 0 +#define V_XGM_INT(x) ((x) << S_XGM_INT) +#define F_XGM_INT V_XGM_INT(1U) + +#define A_XGM_INT_CAUSE 0x8d8 + +#define A_XGM_XAUI_ACT_CTRL 0x8dc + +#define S_TXACTENABLE 1 +#define V_TXACTENABLE(x) ((x) << S_TXACTENABLE) +#define F_TXACTENABLE V_TXACTENABLE(1U) + +#define S_RESET3 23 +#define V_RESET3(x) ((x) << S_RESET3) +#define F_RESET3 V_RESET3(1U) + +#define S_RESET2 22 +#define V_RESET2(x) ((x) << S_RESET2) +#define F_RESET2 V_RESET2(1U) + +#define S_RESET1 21 +#define V_RESET1(x) ((x) << S_RESET1) +#define F_RESET1 V_RESET1(1U) + +#define S_RESET0 20 +#define V_RESET0(x) ((x) << S_RESET0) +#define F_RESET0 V_RESET0(1U) + +#define S_PWRDN3 19 +#define V_PWRDN3(x) ((x) << S_PWRDN3) +#define F_PWRDN3 V_PWRDN3(1U) + +#define S_PWRDN2 18 +#define V_PWRDN2(x) ((x) << S_PWRDN2) +#define F_PWRDN2 V_PWRDN2(1U) + +#define S_PWRDN1 17 +#define V_PWRDN1(x) ((x) << S_PWRDN1) +#define F_PWRDN1 V_PWRDN1(1U) + +#define S_PWRDN0 16 +#define V_PWRDN0(x) ((x) << S_PWRDN0) +#define F_PWRDN0 V_PWRDN0(1U) + +#define S_RESETPLL23 15 +#define V_RESETPLL23(x) ((x) << S_RESETPLL23) +#define F_RESETPLL23 V_RESETPLL23(1U) + +#define S_RESETPLL01 14 +#define V_RESETPLL01(x) ((x) << S_RESETPLL01) +#define F_RESETPLL01 V_RESETPLL01(1U) + +#define A_XGM_SERDES_STAT0 0x8f0 +#define A_XGM_SERDES_STAT1 0x8f4 +#define A_XGM_SERDES_STAT2 0x8f8 + +#define S_LOWSIG0 0 +#define V_LOWSIG0(x) ((x) << S_LOWSIG0) +#define F_LOWSIG0 V_LOWSIG0(1U) + +#define A_XGM_SERDES_STAT3 0x8fc + +#define A_XGM_STAT_TX_BYTE_LOW 0x900 + +#define A_XGM_STAT_TX_BYTE_HIGH 0x904 + +#define A_XGM_STAT_TX_FRAME_LOW 0x908 + +#define A_XGM_STAT_TX_FRAME_HIGH 0x90c + +#define A_XGM_STAT_TX_BCAST 0x910 + +#define A_XGM_STAT_TX_MCAST 0x914 + +#define A_XGM_STAT_TX_PAUSE 0x918 + +#define A_XGM_STAT_TX_64B_FRAMES 0x91c + +#define A_XGM_STAT_TX_65_127B_FRAMES 0x920 + +#define A_XGM_STAT_TX_128_255B_FRAMES 0x924 + +#define A_XGM_STAT_TX_256_511B_FRAMES 0x928 + +#define A_XGM_STAT_TX_512_1023B_FRAMES 0x92c + +#define A_XGM_STAT_TX_1024_1518B_FRAMES 0x930 + +#define A_XGM_STAT_TX_1519_MAXB_FRAMES 0x934 + +#define A_XGM_STAT_TX_ERR_FRAMES 0x938 + +#define A_XGM_STAT_RX_BYTES_LOW 0x93c + +#define A_XGM_STAT_RX_BYTES_HIGH 0x940 + +#define A_XGM_STAT_RX_FRAMES_LOW 0x944 + +#define A_XGM_STAT_RX_FRAMES_HIGH 0x948 + +#define A_XGM_STAT_RX_BCAST_FRAMES 0x94c + +#define A_XGM_STAT_RX_MCAST_FRAMES 0x950 + +#define A_XGM_STAT_RX_PAUSE_FRAMES 0x954 + +#define A_XGM_STAT_RX_64B_FRAMES 0x958 + +#define A_XGM_STAT_RX_65_127B_FRAMES 0x95c + +#define A_XGM_STAT_RX_128_255B_FRAMES 0x960 + +#define A_XGM_STAT_RX_256_511B_FRAMES 0x964 + +#define A_XGM_STAT_RX_512_1023B_FRAMES 0x968 + +#define A_XGM_STAT_RX_1024_1518B_FRAMES 0x96c + +#define A_XGM_STAT_RX_1519_MAXB_FRAMES 0x970 + +#define A_XGM_STAT_RX_SHORT_FRAMES 0x974 + +#define A_XGM_STAT_RX_OVERSIZE_FRAMES 0x978 + +#define A_XGM_STAT_RX_JABBER_FRAMES 0x97c + +#define A_XGM_STAT_RX_CRC_ERR_FRAMES 0x980 + +#define A_XGM_STAT_RX_LENGTH_ERR_FRAMES 0x984 + +#define A_XGM_STAT_RX_SYM_CODE_ERR_FRAMES 0x988 + +#define A_XGM_SERDES_STATUS0 0x98c + +#define A_XGM_SERDES_STATUS1 0x990 + +#define S_CMULOCK 31 +#define V_CMULOCK(x) ((x) << S_CMULOCK) +#define F_CMULOCK V_CMULOCK(1U) + +#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4 + +#define A_XGM_TX_SPI4_SOP_EOP_CNT 0x9a8 + +#define S_TXSPI4SOPCNT 16 +#define M_TXSPI4SOPCNT 0xffff +#define V_TXSPI4SOPCNT(x) ((x) << S_TXSPI4SOPCNT) +#define G_TXSPI4SOPCNT(x) (((x) >> S_TXSPI4SOPCNT) & M_TXSPI4SOPCNT) + +#define A_XGM_RX_SPI4_SOP_EOP_CNT 0x9ac + +#define XGMAC0_1_BASE_ADDR 0xa00 diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c new file mode 100644 index 000000000..e4b5b057f --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -0,0 +1,3305 @@ +/* + * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "common.h" +#include "regs.h" +#include "sge_defs.h" +#include "t3_cpl.h" +#include "firmware_exports.h" +#include "cxgb3_offload.h" + +#define USE_GTS 0 + +#define SGE_RX_SM_BUF_SIZE 1536 + +#define SGE_RX_COPY_THRES 256 +#define SGE_RX_PULL_LEN 128 + +#define SGE_PG_RSVD SMP_CACHE_BYTES +/* + * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks. + * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs + * directly. + */ +#define FL0_PG_CHUNK_SIZE 2048 +#define FL0_PG_ORDER 0 +#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER) +#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192) +#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1) +#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER) + +#define SGE_RX_DROP_THRES 16 +#define RX_RECLAIM_PERIOD (HZ/4) + +/* + * Max number of Rx buffers we replenish at a time. + */ +#define MAX_RX_REFILL 16U +/* + * Period of the Tx buffer reclaim timer. This timer does not need to run + * frequently as Tx buffers are usually reclaimed by new Tx packets. + */ +#define TX_RECLAIM_PERIOD (HZ / 4) +#define TX_RECLAIM_TIMER_CHUNK 64U +#define TX_RECLAIM_CHUNK 16U + +/* WR size in bytes */ +#define WR_LEN (WR_FLITS * 8) + +/* + * Types of Tx queues in each queue set. Order here matters, do not change. + */ +enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL }; + +/* Values for sge_txq.flags */ +enum { + TXQ_RUNNING = 1 << 0, /* fetch engine is running */ + TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */ +}; + +struct tx_desc { + __be64 flit[TX_DESC_FLITS]; +}; + +struct rx_desc { + __be32 addr_lo; + __be32 len_gen; + __be32 gen2; + __be32 addr_hi; +}; + +struct tx_sw_desc { /* SW state per Tx descriptor */ + struct sk_buff *skb; + u8 eop; /* set if last descriptor for packet */ + u8 addr_idx; /* buffer index of first SGL entry in descriptor */ + u8 fragidx; /* first page fragment associated with descriptor */ + s8 sflit; /* start flit of first SGL entry in descriptor */ +}; + +struct rx_sw_desc { /* SW state per Rx descriptor */ + union { + struct sk_buff *skb; + struct fl_pg_chunk pg_chunk; + }; + DEFINE_DMA_UNMAP_ADDR(dma_addr); +}; + +struct rsp_desc { /* response queue descriptor */ + struct rss_header rss_hdr; + __be32 flags; + __be32 len_cq; + u8 imm_data[47]; + u8 intr_gen; +}; + +/* + * Holds unmapping information for Tx packets that need deferred unmapping. + * This structure lives at skb->head and must be allocated by callers. + */ +struct deferred_unmap_info { + struct pci_dev *pdev; + dma_addr_t addr[MAX_SKB_FRAGS + 1]; +}; + +/* + * Maps a number of flits to the number of Tx descriptors that can hold them. + * The formula is + * + * desc = 1 + (flits - 2) / (WR_FLITS - 1). + * + * HW allows up to 4 descriptors to be combined into a WR. + */ +static u8 flit_desc_map[] = { + 0, +#if SGE_NUM_GENBITS == 1 + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 +#elif SGE_NUM_GENBITS == 2 + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, +#else +# error "SGE_NUM_GENBITS must be 1 or 2" +#endif +}; + +static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx) +{ + return container_of(q, struct sge_qset, fl[qidx]); +} + +static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q) +{ + return container_of(q, struct sge_qset, rspq); +} + +static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) +{ + return container_of(q, struct sge_qset, txq[qidx]); +} + +/** + * refill_rspq - replenish an SGE response queue + * @adapter: the adapter + * @q: the response queue to replenish + * @credits: how many new responses to make available + * + * Replenishes a response queue by making the supplied number of responses + * available to HW. + */ +static inline void refill_rspq(struct adapter *adapter, + const struct sge_rspq *q, unsigned int credits) +{ + rmb(); + t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN, + V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); +} + +/** + * need_skb_unmap - does the platform need unmapping of sk_buffs? + * + * Returns true if the platform needs sk_buff unmapping. The compiler + * optimizes away unnecessary code if this returns true. + */ +static inline int need_skb_unmap(void) +{ +#ifdef CONFIG_NEED_DMA_MAP_STATE + return 1; +#else + return 0; +#endif +} + +/** + * unmap_skb - unmap a packet main body and its page fragments + * @skb: the packet + * @q: the Tx queue containing Tx descriptors for the packet + * @cidx: index of Tx descriptor + * @pdev: the PCI device + * + * Unmap the main body of an sk_buff and its page fragments, if any. + * Because of the fairly complicated structure of our SGLs and the desire + * to conserve space for metadata, the information necessary to unmap an + * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx + * descriptors (the physical addresses of the various data buffers), and + * the SW descriptor state (assorted indices). The send functions + * initialize the indices for the first packet descriptor so we can unmap + * the buffers held in the first Tx descriptor here, and we have enough + * information at this point to set the state for the next Tx descriptor. + * + * Note that it is possible to clean up the first descriptor of a packet + * before the send routines have written the next descriptors, but this + * race does not cause any problem. We just end up writing the unmapping + * info for the descriptor first. + */ +static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, + unsigned int cidx, struct pci_dev *pdev) +{ + const struct sg_ent *sgp; + struct tx_sw_desc *d = &q->sdesc[cidx]; + int nfrags, frag_idx, curflit, j = d->addr_idx; + + sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit]; + frag_idx = d->fragidx; + + if (frag_idx == 0 && skb_headlen(skb)) { + pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), + skb_headlen(skb), PCI_DMA_TODEVICE); + j = 1; + } + + curflit = d->sflit + 1 + j; + nfrags = skb_shinfo(skb)->nr_frags; + + while (frag_idx < nfrags && curflit < WR_FLITS) { + pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]), + skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]), + PCI_DMA_TODEVICE); + j ^= 1; + if (j == 0) { + sgp++; + curflit++; + } + curflit++; + frag_idx++; + } + + if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */ + d = cidx + 1 == q->size ? q->sdesc : d + 1; + d->fragidx = frag_idx; + d->addr_idx = j; + d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */ + } +} + +/** + * free_tx_desc - reclaims Tx descriptors and their buffers + * @adapter: the adapter + * @q: the Tx queue to reclaim descriptors from + * @n: the number of descriptors to reclaim + * + * Reclaims Tx descriptors from an SGE Tx queue and frees the associated + * Tx buffers. Called with the Tx queue lock held. + */ +static void free_tx_desc(struct adapter *adapter, struct sge_txq *q, + unsigned int n) +{ + struct tx_sw_desc *d; + struct pci_dev *pdev = adapter->pdev; + unsigned int cidx = q->cidx; + + const int need_unmap = need_skb_unmap() && + q->cntxt_id >= FW_TUNNEL_SGEEC_START; + + d = &q->sdesc[cidx]; + while (n--) { + if (d->skb) { /* an SGL is present */ + if (need_unmap) + unmap_skb(d->skb, q, cidx, pdev); + if (d->eop) { + dev_consume_skb_any(d->skb); + d->skb = NULL; + } + } + ++d; + if (++cidx == q->size) { + cidx = 0; + d = q->sdesc; + } + } + q->cidx = cidx; +} + +/** + * reclaim_completed_tx - reclaims completed Tx descriptors + * @adapter: the adapter + * @q: the Tx queue to reclaim completed descriptors from + * @chunk: maximum number of descriptors to reclaim + * + * Reclaims Tx descriptors that the SGE has indicated it has processed, + * and frees the associated buffers if possible. Called with the Tx + * queue's lock held. + */ +static inline unsigned int reclaim_completed_tx(struct adapter *adapter, + struct sge_txq *q, + unsigned int chunk) +{ + unsigned int reclaim = q->processed - q->cleaned; + + reclaim = min(chunk, reclaim); + if (reclaim) { + free_tx_desc(adapter, q, reclaim); + q->cleaned += reclaim; + q->in_use -= reclaim; + } + return q->processed - q->cleaned; +} + +/** + * should_restart_tx - are there enough resources to restart a Tx queue? + * @q: the Tx queue + * + * Checks if there are enough descriptors to restart a suspended Tx queue. + */ +static inline int should_restart_tx(const struct sge_txq *q) +{ + unsigned int r = q->processed - q->cleaned; + + return q->in_use - r < (q->size >> 1); +} + +static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, + struct rx_sw_desc *d) +{ + if (q->use_pages && d->pg_chunk.page) { + (*d->pg_chunk.p_cnt)--; + if (!*d->pg_chunk.p_cnt) + pci_unmap_page(pdev, + d->pg_chunk.mapping, + q->alloc_size, PCI_DMA_FROMDEVICE); + + put_page(d->pg_chunk.page); + d->pg_chunk.page = NULL; + } else { + pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr), + q->buf_size, PCI_DMA_FROMDEVICE); + kfree_skb(d->skb); + d->skb = NULL; + } +} + +/** + * free_rx_bufs - free the Rx buffers on an SGE free list + * @pdev: the PCI device associated with the adapter + * @rxq: the SGE free list to clean up + * + * Release the buffers on an SGE free-buffer Rx queue. HW fetching from + * this queue should be stopped before calling this function. + */ +static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) +{ + unsigned int cidx = q->cidx; + + while (q->credits--) { + struct rx_sw_desc *d = &q->sdesc[cidx]; + + + clear_rx_desc(pdev, q, d); + if (++cidx == q->size) + cidx = 0; + } + + if (q->pg_chunk.page) { + __free_pages(q->pg_chunk.page, q->order); + q->pg_chunk.page = NULL; + } +} + +/** + * add_one_rx_buf - add a packet buffer to a free-buffer list + * @va: buffer start VA + * @len: the buffer length + * @d: the HW Rx descriptor to write + * @sd: the SW Rx descriptor to write + * @gen: the generation bit value + * @pdev: the PCI device associated with the adapter + * + * Add a buffer of the given length to the supplied HW and SW Rx + * descriptors. + */ +static inline int add_one_rx_buf(void *va, unsigned int len, + struct rx_desc *d, struct rx_sw_desc *sd, + unsigned int gen, struct pci_dev *pdev) +{ + dma_addr_t mapping; + + mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(pdev, mapping))) + return -ENOMEM; + + dma_unmap_addr_set(sd, dma_addr, mapping); + + d->addr_lo = cpu_to_be32(mapping); + d->addr_hi = cpu_to_be32((u64) mapping >> 32); + dma_wmb(); + d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); + d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); + return 0; +} + +static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d, + unsigned int gen) +{ + d->addr_lo = cpu_to_be32(mapping); + d->addr_hi = cpu_to_be32((u64) mapping >> 32); + dma_wmb(); + d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); + d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); + return 0; +} + +static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, + struct rx_sw_desc *sd, gfp_t gfp, + unsigned int order) +{ + if (!q->pg_chunk.page) { + dma_addr_t mapping; + + q->pg_chunk.page = alloc_pages(gfp, order); + if (unlikely(!q->pg_chunk.page)) + return -ENOMEM; + q->pg_chunk.va = page_address(q->pg_chunk.page); + q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) - + SGE_PG_RSVD; + q->pg_chunk.offset = 0; + mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, + 0, q->alloc_size, PCI_DMA_FROMDEVICE); + q->pg_chunk.mapping = mapping; + } + sd->pg_chunk = q->pg_chunk; + + prefetch(sd->pg_chunk.p_cnt); + + q->pg_chunk.offset += q->buf_size; + if (q->pg_chunk.offset == (PAGE_SIZE << order)) + q->pg_chunk.page = NULL; + else { + q->pg_chunk.va += q->buf_size; + get_page(q->pg_chunk.page); + } + + if (sd->pg_chunk.offset == 0) + *sd->pg_chunk.p_cnt = 1; + else + *sd->pg_chunk.p_cnt += 1; + + return 0; +} + +static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) +{ + if (q->pend_cred >= q->credits / 4) { + q->pend_cred = 0; + wmb(); + t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); + } +} + +/** + * refill_fl - refill an SGE free-buffer list + * @adapter: the adapter + * @q: the free-list to refill + * @n: the number of new buffers to allocate + * @gfp: the gfp flags for allocating new buffers + * + * (Re)populate an SGE free-buffer list with up to @n new packet buffers, + * allocated with the supplied gfp flags. The caller must assure that + * @n does not exceed the queue's capacity. + */ +static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) +{ + struct rx_sw_desc *sd = &q->sdesc[q->pidx]; + struct rx_desc *d = &q->desc[q->pidx]; + unsigned int count = 0; + + while (n--) { + dma_addr_t mapping; + int err; + + if (q->use_pages) { + if (unlikely(alloc_pg_chunk(adap, q, sd, gfp, + q->order))) { +nomem: q->alloc_failed++; + break; + } + mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset; + dma_unmap_addr_set(sd, dma_addr, mapping); + + add_one_rx_chunk(mapping, d, q->gen); + pci_dma_sync_single_for_device(adap->pdev, mapping, + q->buf_size - SGE_PG_RSVD, + PCI_DMA_FROMDEVICE); + } else { + void *buf_start; + + struct sk_buff *skb = alloc_skb(q->buf_size, gfp); + if (!skb) + goto nomem; + + sd->skb = skb; + buf_start = skb->data; + err = add_one_rx_buf(buf_start, q->buf_size, d, sd, + q->gen, adap->pdev); + if (unlikely(err)) { + clear_rx_desc(adap->pdev, q, sd); + break; + } + } + + d++; + sd++; + if (++q->pidx == q->size) { + q->pidx = 0; + q->gen ^= 1; + sd = q->sdesc; + d = q->desc; + } + count++; + } + + q->credits += count; + q->pend_cred += count; + ring_fl_db(adap, q); + + return count; +} + +static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) +{ + refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits), + GFP_ATOMIC | __GFP_COMP); +} + +/** + * recycle_rx_buf - recycle a receive buffer + * @adapter: the adapter + * @q: the SGE free list + * @idx: index of buffer to recycle + * + * Recycles the specified buffer on the given free list by adding it at + * the next available slot on the list. + */ +static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, + unsigned int idx) +{ + struct rx_desc *from = &q->desc[idx]; + struct rx_desc *to = &q->desc[q->pidx]; + + q->sdesc[q->pidx] = q->sdesc[idx]; + to->addr_lo = from->addr_lo; /* already big endian */ + to->addr_hi = from->addr_hi; /* likewise */ + dma_wmb(); + to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen)); + to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen)); + + if (++q->pidx == q->size) { + q->pidx = 0; + q->gen ^= 1; + } + + q->credits++; + q->pend_cred++; + ring_fl_db(adap, q); +} + +/** + * alloc_ring - allocate resources for an SGE descriptor ring + * @pdev: the PCI device + * @nelem: the number of descriptors + * @elem_size: the size of each descriptor + * @sw_size: the size of the SW state associated with each ring element + * @phys: the physical address of the allocated ring + * @metadata: address of the array holding the SW state for the ring + * + * Allocates resources for an SGE descriptor ring, such as Tx queues, + * free buffer lists, or response queues. Each SGE ring requires + * space for its HW descriptors plus, optionally, space for the SW state + * associated with each HW entry (the metadata). The function returns + * three values: the virtual address for the HW ring (the return value + * of the function), the physical address of the HW ring, and the address + * of the SW ring. + */ +static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, + size_t sw_size, dma_addr_t * phys, void *metadata) +{ + size_t len = nelem * elem_size; + void *s = NULL; + void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); + + if (!p) + return NULL; + if (sw_size && metadata) { + s = kcalloc(nelem, sw_size, GFP_KERNEL); + + if (!s) { + dma_free_coherent(&pdev->dev, len, p, *phys); + return NULL; + } + *(void **)metadata = s; + } + memset(p, 0, len); + return p; +} + +/** + * t3_reset_qset - reset a sge qset + * @q: the queue set + * + * Reset the qset structure. + * the NAPI structure is preserved in the event of + * the qset's reincarnation, for example during EEH recovery. + */ +static void t3_reset_qset(struct sge_qset *q) +{ + if (q->adap && + !(q->adap->flags & NAPI_INIT)) { + memset(q, 0, sizeof(*q)); + return; + } + + q->adap = NULL; + memset(&q->rspq, 0, sizeof(q->rspq)); + memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); + memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); + q->txq_stopped = 0; + q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ + q->rx_reclaim_timer.function = NULL; + q->nomem = 0; + napi_free_frags(&q->napi); +} + + +/** + * free_qset - free the resources of an SGE queue set + * @adapter: the adapter owning the queue set + * @q: the queue set + * + * Release the HW and SW resources associated with an SGE queue set, such + * as HW contexts, packet buffers, and descriptor rings. Traffic to the + * queue set must be quiesced prior to calling this. + */ +static void t3_free_qset(struct adapter *adapter, struct sge_qset *q) +{ + int i; + struct pci_dev *pdev = adapter->pdev; + + for (i = 0; i < SGE_RXQ_PER_SET; ++i) + if (q->fl[i].desc) { + spin_lock_irq(&adapter->sge.reg_lock); + t3_sge_disable_fl(adapter, q->fl[i].cntxt_id); + spin_unlock_irq(&adapter->sge.reg_lock); + free_rx_bufs(pdev, &q->fl[i]); + kfree(q->fl[i].sdesc); + dma_free_coherent(&pdev->dev, + q->fl[i].size * + sizeof(struct rx_desc), q->fl[i].desc, + q->fl[i].phys_addr); + } + + for (i = 0; i < SGE_TXQ_PER_SET; ++i) + if (q->txq[i].desc) { + spin_lock_irq(&adapter->sge.reg_lock); + t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); + spin_unlock_irq(&adapter->sge.reg_lock); + if (q->txq[i].sdesc) { + free_tx_desc(adapter, &q->txq[i], + q->txq[i].in_use); + kfree(q->txq[i].sdesc); + } + dma_free_coherent(&pdev->dev, + q->txq[i].size * + sizeof(struct tx_desc), + q->txq[i].desc, q->txq[i].phys_addr); + __skb_queue_purge(&q->txq[i].sendq); + } + + if (q->rspq.desc) { + spin_lock_irq(&adapter->sge.reg_lock); + t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id); + spin_unlock_irq(&adapter->sge.reg_lock); + dma_free_coherent(&pdev->dev, + q->rspq.size * sizeof(struct rsp_desc), + q->rspq.desc, q->rspq.phys_addr); + } + + t3_reset_qset(q); +} + +/** + * init_qset_cntxt - initialize an SGE queue set context info + * @qs: the queue set + * @id: the queue set id + * + * Initializes the TIDs and context ids for the queues of a queue set. + */ +static void init_qset_cntxt(struct sge_qset *qs, unsigned int id) +{ + qs->rspq.cntxt_id = id; + qs->fl[0].cntxt_id = 2 * id; + qs->fl[1].cntxt_id = 2 * id + 1; + qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; + qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; + qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; + qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; + qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id; +} + +/** + * sgl_len - calculates the size of an SGL of the given capacity + * @n: the number of SGL entries + * + * Calculates the number of flits needed for a scatter/gather list that + * can hold the given number of entries. + */ +static inline unsigned int sgl_len(unsigned int n) +{ + /* alternatively: 3 * (n / 2) + 2 * (n & 1) */ + return (3 * n) / 2 + (n & 1); +} + +/** + * flits_to_desc - returns the num of Tx descriptors for the given flits + * @n: the number of flits + * + * Calculates the number of Tx descriptors needed for the supplied number + * of flits. + */ +static inline unsigned int flits_to_desc(unsigned int n) +{ + BUG_ON(n >= ARRAY_SIZE(flit_desc_map)); + return flit_desc_map[n]; +} + +/** + * get_packet - return the next ingress packet buffer from a free list + * @adap: the adapter that received the packet + * @fl: the SGE free list holding the packet + * @len: the packet length including any SGE padding + * @drop_thres: # of remaining buffers before we start dropping packets + * + * Get the next packet from a free list and complete setup of the + * sk_buff. If the packet is small we make a copy and recycle the + * original buffer, otherwise we use the original buffer itself. If a + * positive drop threshold is supplied packets are dropped and their + * buffers recycled if (a) the number of remaining buffers is under the + * threshold and the packet is too big to copy, or (b) the packet should + * be copied but there is no memory for the copy. + */ +static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, + unsigned int len, unsigned int drop_thres) +{ + struct sk_buff *skb = NULL; + struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; + + prefetch(sd->skb->data); + fl->credits--; + + if (len <= SGE_RX_COPY_THRES) { + skb = alloc_skb(len, GFP_ATOMIC); + if (likely(skb != NULL)) { + __skb_put(skb, len); + pci_dma_sync_single_for_cpu(adap->pdev, + dma_unmap_addr(sd, dma_addr), len, + PCI_DMA_FROMDEVICE); + memcpy(skb->data, sd->skb->data, len); + pci_dma_sync_single_for_device(adap->pdev, + dma_unmap_addr(sd, dma_addr), len, + PCI_DMA_FROMDEVICE); + } else if (!drop_thres) + goto use_orig_buf; +recycle: + recycle_rx_buf(adap, fl, fl->cidx); + return skb; + } + + if (unlikely(fl->credits < drop_thres) && + refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1), + GFP_ATOMIC | __GFP_COMP) == 0) + goto recycle; + +use_orig_buf: + pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr), + fl->buf_size, PCI_DMA_FROMDEVICE); + skb = sd->skb; + skb_put(skb, len); + __refill_fl(adap, fl); + return skb; +} + +/** + * get_packet_pg - return the next ingress packet buffer from a free list + * @adap: the adapter that received the packet + * @fl: the SGE free list holding the packet + * @len: the packet length including any SGE padding + * @drop_thres: # of remaining buffers before we start dropping packets + * + * Get the next packet from a free list populated with page chunks. + * If the packet is small we make a copy and recycle the original buffer, + * otherwise we attach the original buffer as a page fragment to a fresh + * sk_buff. If a positive drop threshold is supplied packets are dropped + * and their buffers recycled if (a) the number of remaining buffers is + * under the threshold and the packet is too big to copy, or (b) there's + * no system memory. + * + * Note: this function is similar to @get_packet but deals with Rx buffers + * that are page chunks rather than sk_buffs. + */ +static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, + struct sge_rspq *q, unsigned int len, + unsigned int drop_thres) +{ + struct sk_buff *newskb, *skb; + struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; + + dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr); + + newskb = skb = q->pg_skb; + if (!skb && (len <= SGE_RX_COPY_THRES)) { + newskb = alloc_skb(len, GFP_ATOMIC); + if (likely(newskb != NULL)) { + __skb_put(newskb, len); + pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, + PCI_DMA_FROMDEVICE); + memcpy(newskb->data, sd->pg_chunk.va, len); + pci_dma_sync_single_for_device(adap->pdev, dma_addr, + len, + PCI_DMA_FROMDEVICE); + } else if (!drop_thres) + return NULL; +recycle: + fl->credits--; + recycle_rx_buf(adap, fl, fl->cidx); + q->rx_recycle_buf++; + return newskb; + } + + if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) + goto recycle; + + prefetch(sd->pg_chunk.p_cnt); + + if (!skb) + newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); + + if (unlikely(!newskb)) { + if (!drop_thres) + return NULL; + goto recycle; + } + + pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, + PCI_DMA_FROMDEVICE); + (*sd->pg_chunk.p_cnt)--; + if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) + pci_unmap_page(adap->pdev, + sd->pg_chunk.mapping, + fl->alloc_size, + PCI_DMA_FROMDEVICE); + if (!skb) { + __skb_put(newskb, SGE_RX_PULL_LEN); + memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); + skb_fill_page_desc(newskb, 0, sd->pg_chunk.page, + sd->pg_chunk.offset + SGE_RX_PULL_LEN, + len - SGE_RX_PULL_LEN); + newskb->len = len; + newskb->data_len = len - SGE_RX_PULL_LEN; + newskb->truesize += newskb->data_len; + } else { + skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags, + sd->pg_chunk.page, + sd->pg_chunk.offset, len); + newskb->len += len; + newskb->data_len += len; + newskb->truesize += len; + } + + fl->credits--; + /* + * We do not refill FLs here, we let the caller do it to overlap a + * prefetch. + */ + return newskb; +} + +/** + * get_imm_packet - return the next ingress packet buffer from a response + * @resp: the response descriptor containing the packet data + * + * Return a packet containing the immediate data of the given response. + */ +static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp) +{ + struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC); + + if (skb) { + __skb_put(skb, IMMED_PKT_SIZE); + skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE); + } + return skb; +} + +/** + * calc_tx_descs - calculate the number of Tx descriptors for a packet + * @skb: the packet + * + * Returns the number of Tx descriptors needed for the given Ethernet + * packet. Ethernet packets require addition of WR and CPL headers. + */ +static inline unsigned int calc_tx_descs(const struct sk_buff *skb) +{ + unsigned int flits; + + if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt)) + return 1; + + flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2; + if (skb_shinfo(skb)->gso_size) + flits++; + return flits_to_desc(flits); +} + +/** + * make_sgl - populate a scatter/gather list for a packet + * @skb: the packet + * @sgp: the SGL to populate + * @start: start address of skb main body data to include in the SGL + * @len: length of skb main body data to include in the SGL + * @pdev: the PCI device + * + * Generates a scatter/gather list for the buffers that make up a packet + * and returns the SGL size in 8-byte words. The caller must size the SGL + * appropriately. + */ +static inline unsigned int make_sgl(const struct sk_buff *skb, + struct sg_ent *sgp, unsigned char *start, + unsigned int len, struct pci_dev *pdev) +{ + dma_addr_t mapping; + unsigned int i, j = 0, nfrags; + + if (len) { + mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE); + sgp->len[0] = cpu_to_be32(len); + sgp->addr[0] = cpu_to_be64(mapping); + j = 1; + } + + nfrags = skb_shinfo(skb)->nr_frags; + for (i = 0; i < nfrags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); + sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); + sgp->addr[j] = cpu_to_be64(mapping); + j ^= 1; + if (j == 0) + ++sgp; + } + if (j) + sgp->len[j] = 0; + return ((nfrags + (len != 0)) * 3) / 2 + j; +} + +/** + * check_ring_tx_db - check and potentially ring a Tx queue's doorbell + * @adap: the adapter + * @q: the Tx queue + * + * Ring the doorbel if a Tx queue is asleep. There is a natural race, + * where the HW is going to sleep just after we checked, however, + * then the interrupt handler will detect the outstanding TX packet + * and ring the doorbell for us. + * + * When GTS is disabled we unconditionally ring the doorbell. + */ +static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q) +{ +#if USE_GTS + clear_bit(TXQ_LAST_PKT_DB, &q->flags); + if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) { + set_bit(TXQ_LAST_PKT_DB, &q->flags); + t3_write_reg(adap, A_SG_KDOORBELL, + F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); + } +#else + wmb(); /* write descriptors before telling HW */ + t3_write_reg(adap, A_SG_KDOORBELL, + F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); +#endif +} + +static inline void wr_gen2(struct tx_desc *d, unsigned int gen) +{ +#if SGE_NUM_GENBITS == 2 + d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen); +#endif +} + +/** + * write_wr_hdr_sgl - write a WR header and, optionally, SGL + * @ndesc: number of Tx descriptors spanned by the SGL + * @skb: the packet corresponding to the WR + * @d: first Tx descriptor to be written + * @pidx: index of above descriptors + * @q: the SGE Tx queue + * @sgl: the SGL + * @flits: number of flits to the start of the SGL in the first descriptor + * @sgl_flits: the SGL size in flits + * @gen: the Tx descriptor generation + * @wr_hi: top 32 bits of WR header based on WR type (big endian) + * @wr_lo: low 32 bits of WR header based on WR type (big endian) + * + * Write a work request header and an associated SGL. If the SGL is + * small enough to fit into one Tx descriptor it has already been written + * and we just need to write the WR header. Otherwise we distribute the + * SGL across the number of descriptors it spans. + */ +static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb, + struct tx_desc *d, unsigned int pidx, + const struct sge_txq *q, + const struct sg_ent *sgl, + unsigned int flits, unsigned int sgl_flits, + unsigned int gen, __be32 wr_hi, + __be32 wr_lo) +{ + struct work_request_hdr *wrp = (struct work_request_hdr *)d; + struct tx_sw_desc *sd = &q->sdesc[pidx]; + + sd->skb = skb; + if (need_skb_unmap()) { + sd->fragidx = 0; + sd->addr_idx = 0; + sd->sflit = flits; + } + + if (likely(ndesc == 1)) { + sd->eop = 1; + wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) | + V_WR_SGLSFLT(flits)) | wr_hi; + dma_wmb(); + wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) | + V_WR_GEN(gen)) | wr_lo; + wr_gen2(d, gen); + } else { + unsigned int ogen = gen; + const u64 *fp = (const u64 *)sgl; + struct work_request_hdr *wp = wrp; + + wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) | + V_WR_SGLSFLT(flits)) | wr_hi; + + while (sgl_flits) { + unsigned int avail = WR_FLITS - flits; + + if (avail > sgl_flits) + avail = sgl_flits; + memcpy(&d->flit[flits], fp, avail * sizeof(*fp)); + sgl_flits -= avail; + ndesc--; + if (!sgl_flits) + break; + + fp += avail; + d++; + sd->eop = 0; + sd++; + if (++pidx == q->size) { + pidx = 0; + gen ^= 1; + d = q->desc; + sd = q->sdesc; + } + + sd->skb = skb; + wrp = (struct work_request_hdr *)d; + wrp->wr_hi = htonl(V_WR_DATATYPE(1) | + V_WR_SGLSFLT(1)) | wr_hi; + wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS, + sgl_flits + 1)) | + V_WR_GEN(gen)) | wr_lo; + wr_gen2(d, gen); + flits = 1; + } + sd->eop = 1; + wrp->wr_hi |= htonl(F_WR_EOP); + dma_wmb(); + wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo; + wr_gen2((struct tx_desc *)wp, ogen); + WARN_ON(ndesc != 0); + } +} + +/** + * write_tx_pkt_wr - write a TX_PKT work request + * @adap: the adapter + * @skb: the packet to send + * @pi: the egress interface + * @pidx: index of the first Tx descriptor to write + * @gen: the generation value to use + * @q: the Tx queue + * @ndesc: number of descriptors the packet will occupy + * @compl: the value of the COMPL bit to use + * + * Generate a TX_PKT work request to send the supplied packet. + */ +static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, + const struct port_info *pi, + unsigned int pidx, unsigned int gen, + struct sge_txq *q, unsigned int ndesc, + unsigned int compl) +{ + unsigned int flits, sgl_flits, cntrl, tso_info; + struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; + struct tx_desc *d = &q->desc[pidx]; + struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d; + + cpl->len = htonl(skb->len); + cntrl = V_TXPKT_INTF(pi->port_id); + + if (skb_vlan_tag_present(skb)) + cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb)); + + tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size); + if (tso_info) { + int eth_type; + struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl; + + d->flit[2] = 0; + cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO); + hdr->cntrl = htonl(cntrl); + eth_type = skb_network_offset(skb) == ETH_HLEN ? + CPL_ETH_II : CPL_ETH_II_VLAN; + tso_info |= V_LSO_ETH_TYPE(eth_type) | + V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) | + V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff); + hdr->lso_info = htonl(tso_info); + flits = 3; + } else { + cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT); + cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */ + cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL); + cpl->cntrl = htonl(cntrl); + + if (skb->len <= WR_LEN - sizeof(*cpl)) { + q->sdesc[pidx].skb = NULL; + if (!skb->data_len) + skb_copy_from_linear_data(skb, &d->flit[2], + skb->len); + else + skb_copy_bits(skb, 0, &d->flit[2], skb->len); + + flits = (skb->len + 7) / 8 + 2; + cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) | + V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) + | F_WR_SOP | F_WR_EOP | compl); + dma_wmb(); + cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) | + V_WR_TID(q->token)); + wr_gen2(d, gen); + dev_consume_skb_any(skb); + return; + } + + flits = 2; + } + + sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; + sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); + + write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, + htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), + htonl(V_WR_TID(q->token))); +} + +static inline void t3_stop_tx_queue(struct netdev_queue *txq, + struct sge_qset *qs, struct sge_txq *q) +{ + netif_tx_stop_queue(txq); + set_bit(TXQ_ETH, &qs->txq_stopped); + q->stops++; +} + +/** + * eth_xmit - add a packet to the Ethernet Tx queue + * @skb: the packet + * @dev: the egress net device + * + * Add a packet to an SGE Tx queue. Runs with softirqs disabled. + */ +netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) +{ + int qidx; + unsigned int ndesc, pidx, credits, gen, compl; + const struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct netdev_queue *txq; + struct sge_qset *qs; + struct sge_txq *q; + + /* + * The chip min packet length is 9 octets but play safe and reject + * anything shorter than an Ethernet header. + */ + if (unlikely(skb->len < ETH_HLEN)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + qidx = skb_get_queue_mapping(skb); + qs = &pi->qs[qidx]; + q = &qs->txq[TXQ_ETH]; + txq = netdev_get_tx_queue(dev, qidx); + + reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); + + credits = q->size - q->in_use; + ndesc = calc_tx_descs(skb); + + if (unlikely(credits < ndesc)) { + t3_stop_tx_queue(txq, qs, q); + dev_err(&adap->pdev->dev, + "%s: Tx ring %u full while queue awake!\n", + dev->name, q->cntxt_id & 7); + return NETDEV_TX_BUSY; + } + + q->in_use += ndesc; + if (unlikely(credits - ndesc < q->stop_thres)) { + t3_stop_tx_queue(txq, qs, q); + + if (should_restart_tx(q) && + test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { + q->restarts++; + netif_tx_start_queue(txq); + } + } + + gen = q->gen; + q->unacked += ndesc; + compl = (q->unacked & 8) << (S_WR_COMPL - 3); + q->unacked &= 7; + pidx = q->pidx; + q->pidx += ndesc; + if (q->pidx >= q->size) { + q->pidx -= q->size; + q->gen ^= 1; + } + + /* update port statistics */ + if (skb->ip_summed == CHECKSUM_PARTIAL) + qs->port_stats[SGE_PSTAT_TX_CSUM]++; + if (skb_shinfo(skb)->gso_size) + qs->port_stats[SGE_PSTAT_TSO]++; + if (skb_vlan_tag_present(skb)) + qs->port_stats[SGE_PSTAT_VLANINS]++; + + /* + * We do not use Tx completion interrupts to free DMAd Tx packets. + * This is good for performance but means that we rely on new Tx + * packets arriving to run the destructors of completed packets, + * which open up space in their sockets' send queues. Sometimes + * we do not get such new packets causing Tx to stall. A single + * UDP transmitter is a good example of this situation. We have + * a clean up timer that periodically reclaims completed packets + * but it doesn't run often enough (nor do we want it to) to prevent + * lengthy stalls. A solution to this problem is to run the + * destructor early, after the packet is queued but before it's DMAd. + * A cons is that we lie to socket memory accounting, but the amount + * of extra memory is reasonable (limited by the number of Tx + * descriptors), the packets do actually get freed quickly by new + * packets almost always, and for protocols like TCP that wait for + * acks to really free up the data the extra memory is even less. + * On the positive side we run the destructors on the sending CPU + * rather than on a potentially different completing CPU, usually a + * good thing. We also run them without holding our Tx queue lock, + * unlike what reclaim_completed_tx() would otherwise do. + * + * Run the destructor before telling the DMA engine about the packet + * to make sure it doesn't complete and get freed prematurely. + */ + if (likely(!skb_shared(skb))) + skb_orphan(skb); + + write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); + check_ring_tx_db(adap, q); + return NETDEV_TX_OK; +} + +/** + * write_imm - write a packet into a Tx descriptor as immediate data + * @d: the Tx descriptor to write + * @skb: the packet + * @len: the length of packet data to write as immediate data + * @gen: the generation bit value to write + * + * Writes a packet as immediate data into a Tx descriptor. The packet + * contains a work request at its beginning. We must write the packet + * carefully so the SGE doesn't read it accidentally before it's written + * in its entirety. + */ +static inline void write_imm(struct tx_desc *d, struct sk_buff *skb, + unsigned int len, unsigned int gen) +{ + struct work_request_hdr *from = (struct work_request_hdr *)skb->data; + struct work_request_hdr *to = (struct work_request_hdr *)d; + + if (likely(!skb->data_len)) + memcpy(&to[1], &from[1], len - sizeof(*from)); + else + skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from)); + + to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP | + V_WR_BCNTLFLT(len & 7)); + dma_wmb(); + to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) | + V_WR_LEN((len + 7) / 8)); + wr_gen2(d, gen); + kfree_skb(skb); +} + +/** + * check_desc_avail - check descriptor availability on a send queue + * @adap: the adapter + * @q: the send queue + * @skb: the packet needing the descriptors + * @ndesc: the number of Tx descriptors needed + * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL) + * + * Checks if the requested number of Tx descriptors is available on an + * SGE send queue. If the queue is already suspended or not enough + * descriptors are available the packet is queued for later transmission. + * Must be called with the Tx queue locked. + * + * Returns 0 if enough descriptors are available, 1 if there aren't + * enough descriptors and the packet has been queued, and 2 if the caller + * needs to retry because there weren't enough descriptors at the + * beginning of the call but some freed up in the mean time. + */ +static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q, + struct sk_buff *skb, unsigned int ndesc, + unsigned int qid) +{ + if (unlikely(!skb_queue_empty(&q->sendq))) { + addq_exit:__skb_queue_tail(&q->sendq, skb); + return 1; + } + if (unlikely(q->size - q->in_use < ndesc)) { + struct sge_qset *qs = txq_to_qset(q, qid); + + set_bit(qid, &qs->txq_stopped); + smp_mb__after_atomic(); + + if (should_restart_tx(q) && + test_and_clear_bit(qid, &qs->txq_stopped)) + return 2; + + q->stops++; + goto addq_exit; + } + return 0; +} + +/** + * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs + * @q: the SGE control Tx queue + * + * This is a variant of reclaim_completed_tx() that is used for Tx queues + * that send only immediate data (presently just the control queues) and + * thus do not have any sk_buffs to release. + */ +static inline void reclaim_completed_tx_imm(struct sge_txq *q) +{ + unsigned int reclaim = q->processed - q->cleaned; + + q->in_use -= reclaim; + q->cleaned += reclaim; +} + +static inline int immediate(const struct sk_buff *skb) +{ + return skb->len <= WR_LEN; +} + +/** + * ctrl_xmit - send a packet through an SGE control Tx queue + * @adap: the adapter + * @q: the control queue + * @skb: the packet + * + * Send a packet through an SGE control Tx queue. Packets sent through + * a control queue must fit entirely as immediate data in a single Tx + * descriptor and have no page fragments. + */ +static int ctrl_xmit(struct adapter *adap, struct sge_txq *q, + struct sk_buff *skb) +{ + int ret; + struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data; + + if (unlikely(!immediate(skb))) { + WARN_ON(1); + dev_kfree_skb(skb); + return NET_XMIT_SUCCESS; + } + + wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP); + wrp->wr_lo = htonl(V_WR_TID(q->token)); + + spin_lock(&q->lock); + again:reclaim_completed_tx_imm(q); + + ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL); + if (unlikely(ret)) { + if (ret == 1) { + spin_unlock(&q->lock); + return NET_XMIT_CN; + } + goto again; + } + + write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); + + q->in_use++; + if (++q->pidx >= q->size) { + q->pidx = 0; + q->gen ^= 1; + } + spin_unlock(&q->lock); + wmb(); + t3_write_reg(adap, A_SG_KDOORBELL, + F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); + return NET_XMIT_SUCCESS; +} + +/** + * restart_ctrlq - restart a suspended control queue + * @qs: the queue set cotaining the control queue + * + * Resumes transmission on a suspended Tx control queue. + */ +static void restart_ctrlq(unsigned long data) +{ + struct sk_buff *skb; + struct sge_qset *qs = (struct sge_qset *)data; + struct sge_txq *q = &qs->txq[TXQ_CTRL]; + + spin_lock(&q->lock); + again:reclaim_completed_tx_imm(q); + + while (q->in_use < q->size && + (skb = __skb_dequeue(&q->sendq)) != NULL) { + + write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); + + if (++q->pidx >= q->size) { + q->pidx = 0; + q->gen ^= 1; + } + q->in_use++; + } + + if (!skb_queue_empty(&q->sendq)) { + set_bit(TXQ_CTRL, &qs->txq_stopped); + smp_mb__after_atomic(); + + if (should_restart_tx(q) && + test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) + goto again; + q->stops++; + } + + spin_unlock(&q->lock); + wmb(); + t3_write_reg(qs->adap, A_SG_KDOORBELL, + F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); +} + +/* + * Send a management message through control queue 0 + */ +int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb) +{ + int ret; + local_bh_disable(); + ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); + local_bh_enable(); + + return ret; +} + +/** + * deferred_unmap_destructor - unmap a packet when it is freed + * @skb: the packet + * + * This is the packet destructor used for Tx packets that need to remain + * mapped until they are freed rather than until their Tx descriptors are + * freed. + */ +static void deferred_unmap_destructor(struct sk_buff *skb) +{ + int i; + const dma_addr_t *p; + const struct skb_shared_info *si; + const struct deferred_unmap_info *dui; + + dui = (struct deferred_unmap_info *)skb->head; + p = dui->addr; + + if (skb_tail_pointer(skb) - skb_transport_header(skb)) + pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) - + skb_transport_header(skb), PCI_DMA_TODEVICE); + + si = skb_shinfo(skb); + for (i = 0; i < si->nr_frags; i++) + pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]), + PCI_DMA_TODEVICE); +} + +static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, + const struct sg_ent *sgl, int sgl_flits) +{ + dma_addr_t *p; + struct deferred_unmap_info *dui; + + dui = (struct deferred_unmap_info *)skb->head; + dui->pdev = pdev; + for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) { + *p++ = be64_to_cpu(sgl->addr[0]); + *p++ = be64_to_cpu(sgl->addr[1]); + } + if (sgl_flits) + *p = be64_to_cpu(sgl->addr[0]); +} + +/** + * write_ofld_wr - write an offload work request + * @adap: the adapter + * @skb: the packet to send + * @q: the Tx queue + * @pidx: index of the first Tx descriptor to write + * @gen: the generation value to use + * @ndesc: number of descriptors the packet will occupy + * + * Write an offload work request to send the supplied packet. The packet + * data already carry the work request with most fields populated. + */ +static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, + struct sge_txq *q, unsigned int pidx, + unsigned int gen, unsigned int ndesc) +{ + unsigned int sgl_flits, flits; + struct work_request_hdr *from; + struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; + struct tx_desc *d = &q->desc[pidx]; + + if (immediate(skb)) { + q->sdesc[pidx].skb = NULL; + write_imm(d, skb, skb->len, gen); + return; + } + + /* Only TX_DATA builds SGLs */ + + from = (struct work_request_hdr *)skb->data; + memcpy(&d->flit[1], &from[1], + skb_transport_offset(skb) - sizeof(*from)); + + flits = skb_transport_offset(skb) / 8; + sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; + sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), + skb_tail_pointer(skb) - + skb_transport_header(skb), + adap->pdev); + if (need_skb_unmap()) { + setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); + skb->destructor = deferred_unmap_destructor; + } + + write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, + gen, from->wr_hi, from->wr_lo); +} + +/** + * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet + * @skb: the packet + * + * Returns the number of Tx descriptors needed for the given offload + * packet. These packets are already fully constructed. + */ +static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb) +{ + unsigned int flits, cnt; + + if (skb->len <= WR_LEN) + return 1; /* packet fits as immediate data */ + + flits = skb_transport_offset(skb) / 8; /* headers */ + cnt = skb_shinfo(skb)->nr_frags; + if (skb_tail_pointer(skb) != skb_transport_header(skb)) + cnt++; + return flits_to_desc(flits + sgl_len(cnt)); +} + +/** + * ofld_xmit - send a packet through an offload queue + * @adap: the adapter + * @q: the Tx offload queue + * @skb: the packet + * + * Send an offload packet through an SGE offload queue. + */ +static int ofld_xmit(struct adapter *adap, struct sge_txq *q, + struct sk_buff *skb) +{ + int ret; + unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen; + + spin_lock(&q->lock); +again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); + + ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD); + if (unlikely(ret)) { + if (ret == 1) { + skb->priority = ndesc; /* save for restart */ + spin_unlock(&q->lock); + return NET_XMIT_CN; + } + goto again; + } + + gen = q->gen; + q->in_use += ndesc; + pidx = q->pidx; + q->pidx += ndesc; + if (q->pidx >= q->size) { + q->pidx -= q->size; + q->gen ^= 1; + } + spin_unlock(&q->lock); + + write_ofld_wr(adap, skb, q, pidx, gen, ndesc); + check_ring_tx_db(adap, q); + return NET_XMIT_SUCCESS; +} + +/** + * restart_offloadq - restart a suspended offload queue + * @qs: the queue set cotaining the offload queue + * + * Resumes transmission on a suspended Tx offload queue. + */ +static void restart_offloadq(unsigned long data) +{ + struct sk_buff *skb; + struct sge_qset *qs = (struct sge_qset *)data; + struct sge_txq *q = &qs->txq[TXQ_OFLD]; + const struct port_info *pi = netdev_priv(qs->netdev); + struct adapter *adap = pi->adapter; + + spin_lock(&q->lock); +again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); + + while ((skb = skb_peek(&q->sendq)) != NULL) { + unsigned int gen, pidx; + unsigned int ndesc = skb->priority; + + if (unlikely(q->size - q->in_use < ndesc)) { + set_bit(TXQ_OFLD, &qs->txq_stopped); + smp_mb__after_atomic(); + + if (should_restart_tx(q) && + test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) + goto again; + q->stops++; + break; + } + + gen = q->gen; + q->in_use += ndesc; + pidx = q->pidx; + q->pidx += ndesc; + if (q->pidx >= q->size) { + q->pidx -= q->size; + q->gen ^= 1; + } + __skb_unlink(skb, &q->sendq); + spin_unlock(&q->lock); + + write_ofld_wr(adap, skb, q, pidx, gen, ndesc); + spin_lock(&q->lock); + } + spin_unlock(&q->lock); + +#if USE_GTS + set_bit(TXQ_RUNNING, &q->flags); + set_bit(TXQ_LAST_PKT_DB, &q->flags); +#endif + wmb(); + t3_write_reg(adap, A_SG_KDOORBELL, + F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); +} + +/** + * queue_set - return the queue set a packet should use + * @skb: the packet + * + * Maps a packet to the SGE queue set it should use. The desired queue + * set is carried in bits 1-3 in the packet's priority. + */ +static inline int queue_set(const struct sk_buff *skb) +{ + return skb->priority >> 1; +} + +/** + * is_ctrl_pkt - return whether an offload packet is a control packet + * @skb: the packet + * + * Determines whether an offload packet should use an OFLD or a CTRL + * Tx queue. This is indicated by bit 0 in the packet's priority. + */ +static inline int is_ctrl_pkt(const struct sk_buff *skb) +{ + return skb->priority & 1; +} + +/** + * t3_offload_tx - send an offload packet + * @tdev: the offload device to send to + * @skb: the packet + * + * Sends an offload packet. We use the packet priority to select the + * appropriate Tx queue as follows: bit 0 indicates whether the packet + * should be sent as regular or control, bits 1-3 select the queue set. + */ +int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb) +{ + struct adapter *adap = tdev2adap(tdev); + struct sge_qset *qs = &adap->sge.qs[queue_set(skb)]; + + if (unlikely(is_ctrl_pkt(skb))) + return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb); + + return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb); +} + +/** + * offload_enqueue - add an offload packet to an SGE offload receive queue + * @q: the SGE response queue + * @skb: the packet + * + * Add a new offload packet to an SGE response queue's offload packet + * queue. If the packet is the first on the queue it schedules the RX + * softirq to process the queue. + */ +static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) +{ + int was_empty = skb_queue_empty(&q->rx_queue); + + __skb_queue_tail(&q->rx_queue, skb); + + if (was_empty) { + struct sge_qset *qs = rspq_to_qset(q); + + napi_schedule(&qs->napi); + } +} + +/** + * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts + * @tdev: the offload device that will be receiving the packets + * @q: the SGE response queue that assembled the bundle + * @skbs: the partial bundle + * @n: the number of packets in the bundle + * + * Delivers a (partial) bundle of Rx offload packets to an offload device. + */ +static inline void deliver_partial_bundle(struct t3cdev *tdev, + struct sge_rspq *q, + struct sk_buff *skbs[], int n) +{ + if (n) { + q->offload_bundles++; + tdev->recv(tdev, skbs, n); + } +} + +/** + * ofld_poll - NAPI handler for offload packets in interrupt mode + * @dev: the network device doing the polling + * @budget: polling budget + * + * The NAPI handler for offload packets when a response queue is serviced + * by the hard interrupt handler, i.e., when it's operating in non-polling + * mode. Creates small packet batches and sends them through the offload + * receive handler. Batches need to be of modest size as we do prefetches + * on the packets in each. + */ +static int ofld_poll(struct napi_struct *napi, int budget) +{ + struct sge_qset *qs = container_of(napi, struct sge_qset, napi); + struct sge_rspq *q = &qs->rspq; + struct adapter *adapter = qs->adap; + int work_done = 0; + + while (work_done < budget) { + struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE]; + struct sk_buff_head queue; + int ngathered; + + spin_lock_irq(&q->lock); + __skb_queue_head_init(&queue); + skb_queue_splice_init(&q->rx_queue, &queue); + if (skb_queue_empty(&queue)) { + napi_complete(napi); + spin_unlock_irq(&q->lock); + return work_done; + } + spin_unlock_irq(&q->lock); + + ngathered = 0; + skb_queue_walk_safe(&queue, skb, tmp) { + if (work_done >= budget) + break; + work_done++; + + __skb_unlink(skb, &queue); + prefetch(skb->data); + skbs[ngathered] = skb; + if (++ngathered == RX_BUNDLE_SIZE) { + q->offload_bundles++; + adapter->tdev.recv(&adapter->tdev, skbs, + ngathered); + ngathered = 0; + } + } + if (!skb_queue_empty(&queue)) { + /* splice remaining packets back onto Rx queue */ + spin_lock_irq(&q->lock); + skb_queue_splice(&queue, &q->rx_queue); + spin_unlock_irq(&q->lock); + } + deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); + } + + return work_done; +} + +/** + * rx_offload - process a received offload packet + * @tdev: the offload device receiving the packet + * @rq: the response queue that received the packet + * @skb: the packet + * @rx_gather: a gather list of packets if we are building a bundle + * @gather_idx: index of the next available slot in the bundle + * + * Process an ingress offload pakcet and add it to the offload ingress + * queue. Returns the index of the next available slot in the bundle. + */ +static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq, + struct sk_buff *skb, struct sk_buff *rx_gather[], + unsigned int gather_idx) +{ + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + + if (rq->polling) { + rx_gather[gather_idx++] = skb; + if (gather_idx == RX_BUNDLE_SIZE) { + tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE); + gather_idx = 0; + rq->offload_bundles++; + } + } else + offload_enqueue(rq, skb); + + return gather_idx; +} + +/** + * restart_tx - check whether to restart suspended Tx queues + * @qs: the queue set to resume + * + * Restarts suspended Tx queues of an SGE queue set if they have enough + * free resources to resume operation. + */ +static void restart_tx(struct sge_qset *qs) +{ + if (test_bit(TXQ_ETH, &qs->txq_stopped) && + should_restart_tx(&qs->txq[TXQ_ETH]) && + test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { + qs->txq[TXQ_ETH].restarts++; + if (netif_running(qs->netdev)) + netif_tx_wake_queue(qs->tx_q); + } + + if (test_bit(TXQ_OFLD, &qs->txq_stopped) && + should_restart_tx(&qs->txq[TXQ_OFLD]) && + test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) { + qs->txq[TXQ_OFLD].restarts++; + tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk); + } + if (test_bit(TXQ_CTRL, &qs->txq_stopped) && + should_restart_tx(&qs->txq[TXQ_CTRL]) && + test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) { + qs->txq[TXQ_CTRL].restarts++; + tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk); + } +} + +/** + * cxgb3_arp_process - process an ARP request probing a private IP address + * @adapter: the adapter + * @skb: the skbuff containing the ARP request + * + * Check if the ARP request is probing the private IP address + * dedicated to iSCSI, generate an ARP reply if so. + */ +static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb) +{ + struct net_device *dev = skb->dev; + struct arphdr *arp; + unsigned char *arp_ptr; + unsigned char *sha; + __be32 sip, tip; + + if (!dev) + return; + + skb_reset_network_header(skb); + arp = arp_hdr(skb); + + if (arp->ar_op != htons(ARPOP_REQUEST)) + return; + + arp_ptr = (unsigned char *)(arp + 1); + sha = arp_ptr; + arp_ptr += dev->addr_len; + memcpy(&sip, arp_ptr, sizeof(sip)); + arp_ptr += sizeof(sip); + arp_ptr += dev->addr_len; + memcpy(&tip, arp_ptr, sizeof(tip)); + + if (tip != pi->iscsi_ipv4addr) + return; + + arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, + pi->iscsic.mac_addr, sha); + +} + +static inline int is_arp(struct sk_buff *skb) +{ + return skb->protocol == htons(ETH_P_ARP); +} + +static void cxgb3_process_iscsi_prov_pack(struct port_info *pi, + struct sk_buff *skb) +{ + if (is_arp(skb)) { + cxgb3_arp_process(pi, skb); + return; + } + + if (pi->iscsic.recv) + pi->iscsic.recv(pi, skb); + +} + +/** + * rx_eth - process an ingress ethernet packet + * @adap: the adapter + * @rq: the response queue that received the packet + * @skb: the packet + * @pad: amount of padding at the start of the buffer + * + * Process an ingress ethernet pakcet and deliver it to the stack. + * The padding is 2 if the packet was delivered in an Rx buffer and 0 + * if it was immediate data in a response. + */ +static void rx_eth(struct adapter *adap, struct sge_rspq *rq, + struct sk_buff *skb, int pad, int lro) +{ + struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); + struct sge_qset *qs = rspq_to_qset(rq); + struct port_info *pi; + + skb_pull(skb, sizeof(*p) + pad); + skb->protocol = eth_type_trans(skb, adap->port[p->iff]); + pi = netdev_priv(skb->dev); + if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid && + p->csum == htons(0xffff) && !p->fragment) { + qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else + skb_checksum_none_assert(skb); + skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); + + if (p->vlan_valid) { + qs->port_stats[SGE_PSTAT_VLANEX]++; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan)); + } + if (rq->polling) { + if (lro) + napi_gro_receive(&qs->napi, skb); + else { + if (unlikely(pi->iscsic.flags)) + cxgb3_process_iscsi_prov_pack(pi, skb); + netif_receive_skb(skb); + } + } else + netif_rx(skb); +} + +static inline int is_eth_tcp(u32 rss) +{ + return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE; +} + +/** + * lro_add_page - add a page chunk to an LRO session + * @adap: the adapter + * @qs: the associated queue set + * @fl: the free list containing the page chunk to add + * @len: packet length + * @complete: Indicates the last fragment of a frame + * + * Add a received packet contained in a page chunk to an existing LRO + * session. + */ +static void lro_add_page(struct adapter *adap, struct sge_qset *qs, + struct sge_fl *fl, int len, int complete) +{ + struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; + struct port_info *pi = netdev_priv(qs->netdev); + struct sk_buff *skb = NULL; + struct cpl_rx_pkt *cpl; + struct skb_frag_struct *rx_frag; + int nr_frags; + int offset = 0; + + if (!qs->nomem) { + skb = napi_get_frags(&qs->napi); + qs->nomem = !skb; + } + + fl->credits--; + + pci_dma_sync_single_for_cpu(adap->pdev, + dma_unmap_addr(sd, dma_addr), + fl->buf_size - SGE_PG_RSVD, + PCI_DMA_FROMDEVICE); + + (*sd->pg_chunk.p_cnt)--; + if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) + pci_unmap_page(adap->pdev, + sd->pg_chunk.mapping, + fl->alloc_size, + PCI_DMA_FROMDEVICE); + + if (!skb) { + put_page(sd->pg_chunk.page); + if (complete) + qs->nomem = 0; + return; + } + + rx_frag = skb_shinfo(skb)->frags; + nr_frags = skb_shinfo(skb)->nr_frags; + + if (!nr_frags) { + offset = 2 + sizeof(struct cpl_rx_pkt); + cpl = qs->lro_va = sd->pg_chunk.va + 2; + + if ((qs->netdev->features & NETIF_F_RXCSUM) && + cpl->csum_valid && cpl->csum == htons(0xffff)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; + } else + skb->ip_summed = CHECKSUM_NONE; + } else + cpl = qs->lro_va; + + len -= offset; + + rx_frag += nr_frags; + __skb_frag_set_page(rx_frag, sd->pg_chunk.page); + rx_frag->page_offset = sd->pg_chunk.offset + offset; + skb_frag_size_set(rx_frag, len); + + skb->len += len; + skb->data_len += len; + skb->truesize += len; + skb_shinfo(skb)->nr_frags++; + + if (!complete) + return; + + skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); + + if (cpl->vlan_valid) { + qs->port_stats[SGE_PSTAT_VLANEX]++; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan)); + } + napi_gro_frags(&qs->napi); +} + +/** + * handle_rsp_cntrl_info - handles control information in a response + * @qs: the queue set corresponding to the response + * @flags: the response control flags + * + * Handles the control information of an SGE response, such as GTS + * indications and completion credits for the queue set's Tx queues. + * HW coalesces credits, we don't do any extra SW coalescing. + */ +static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags) +{ + unsigned int credits; + +#if USE_GTS + if (flags & F_RSPD_TXQ0_GTS) + clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags); +#endif + + credits = G_RSPD_TXQ0_CR(flags); + if (credits) + qs->txq[TXQ_ETH].processed += credits; + + credits = G_RSPD_TXQ2_CR(flags); + if (credits) + qs->txq[TXQ_CTRL].processed += credits; + +# if USE_GTS + if (flags & F_RSPD_TXQ1_GTS) + clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags); +# endif + credits = G_RSPD_TXQ1_CR(flags); + if (credits) + qs->txq[TXQ_OFLD].processed += credits; +} + +/** + * check_ring_db - check if we need to ring any doorbells + * @adapter: the adapter + * @qs: the queue set whose Tx queues are to be examined + * @sleeping: indicates which Tx queue sent GTS + * + * Checks if some of a queue set's Tx queues need to ring their doorbells + * to resume transmission after idling while they still have unprocessed + * descriptors. + */ +static void check_ring_db(struct adapter *adap, struct sge_qset *qs, + unsigned int sleeping) +{ + if (sleeping & F_RSPD_TXQ0_GTS) { + struct sge_txq *txq = &qs->txq[TXQ_ETH]; + + if (txq->cleaned + txq->in_use != txq->processed && + !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) { + set_bit(TXQ_RUNNING, &txq->flags); + t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | + V_EGRCNTX(txq->cntxt_id)); + } + } + + if (sleeping & F_RSPD_TXQ1_GTS) { + struct sge_txq *txq = &qs->txq[TXQ_OFLD]; + + if (txq->cleaned + txq->in_use != txq->processed && + !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) { + set_bit(TXQ_RUNNING, &txq->flags); + t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | + V_EGRCNTX(txq->cntxt_id)); + } + } +} + +/** + * is_new_response - check if a response is newly written + * @r: the response descriptor + * @q: the response queue + * + * Returns true if a response descriptor contains a yet unprocessed + * response. + */ +static inline int is_new_response(const struct rsp_desc *r, + const struct sge_rspq *q) +{ + return (r->intr_gen & F_RSPD_GEN2) == q->gen; +} + +static inline void clear_rspq_bufstate(struct sge_rspq * const q) +{ + q->pg_skb = NULL; + q->rx_recycle_buf = 0; +} + +#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS) +#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \ + V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \ + V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \ + V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR)) + +/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */ +#define NOMEM_INTR_DELAY 2500 + +/** + * process_responses - process responses from an SGE response queue + * @adap: the adapter + * @qs: the queue set to which the response queue belongs + * @budget: how many responses can be processed in this round + * + * Process responses from an SGE response queue up to the supplied budget. + * Responses include received packets as well as credits and other events + * for the queues that belong to the response queue's queue set. + * A negative budget is effectively unlimited. + * + * Additionally choose the interrupt holdoff time for the next interrupt + * on this queue. If the system is under memory shortage use a fairly + * long delay to help recovery. + */ +static int process_responses(struct adapter *adap, struct sge_qset *qs, + int budget) +{ + struct sge_rspq *q = &qs->rspq; + struct rsp_desc *r = &q->desc[q->cidx]; + int budget_left = budget; + unsigned int sleeping = 0; + struct sk_buff *offload_skbs[RX_BUNDLE_SIZE]; + int ngathered = 0; + + q->next_holdoff = q->holdoff_tmr; + + while (likely(budget_left && is_new_response(r, q))) { + int packet_complete, eth, ethpad = 2; + int lro = !!(qs->netdev->features & NETIF_F_GRO); + struct sk_buff *skb = NULL; + u32 len, flags; + __be32 rss_hi, rss_lo; + + dma_rmb(); + eth = r->rss_hdr.opcode == CPL_RX_PKT; + rss_hi = *(const __be32 *)r; + rss_lo = r->rss_hdr.rss_hash_val; + flags = ntohl(r->flags); + + if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) { + skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC); + if (!skb) + goto no_mem; + + memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE); + skb->data[0] = CPL_ASYNC_NOTIF; + rss_hi = htonl(CPL_ASYNC_NOTIF << 24); + q->async_notif++; + } else if (flags & F_RSPD_IMM_DATA_VALID) { + skb = get_imm_packet(r); + if (unlikely(!skb)) { +no_mem: + q->next_holdoff = NOMEM_INTR_DELAY; + q->nomem++; + /* consume one credit since we tried */ + budget_left--; + break; + } + q->imm_data++; + ethpad = 0; + } else if ((len = ntohl(r->len_cq)) != 0) { + struct sge_fl *fl; + + lro &= eth && is_eth_tcp(rss_hi); + + fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; + if (fl->use_pages) { + void *addr = fl->sdesc[fl->cidx].pg_chunk.va; + + prefetch(addr); +#if L1_CACHE_BYTES < 128 + prefetch(addr + L1_CACHE_BYTES); +#endif + __refill_fl(adap, fl); + if (lro > 0) { + lro_add_page(adap, qs, fl, + G_RSPD_LEN(len), + flags & F_RSPD_EOP); + goto next_fl; + } + + skb = get_packet_pg(adap, fl, q, + G_RSPD_LEN(len), + eth ? + SGE_RX_DROP_THRES : 0); + q->pg_skb = skb; + } else + skb = get_packet(adap, fl, G_RSPD_LEN(len), + eth ? SGE_RX_DROP_THRES : 0); + if (unlikely(!skb)) { + if (!eth) + goto no_mem; + q->rx_drops++; + } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT)) + __skb_pull(skb, 2); +next_fl: + if (++fl->cidx == fl->size) + fl->cidx = 0; + } else + q->pure_rsps++; + + if (flags & RSPD_CTRL_MASK) { + sleeping |= flags & RSPD_GTS_MASK; + handle_rsp_cntrl_info(qs, flags); + } + + r++; + if (unlikely(++q->cidx == q->size)) { + q->cidx = 0; + q->gen ^= 1; + r = q->desc; + } + prefetch(r); + + if (++q->credits >= (q->size / 4)) { + refill_rspq(adap, q, q->credits); + q->credits = 0; + } + + packet_complete = flags & + (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID | + F_RSPD_ASYNC_NOTIF); + + if (skb != NULL && packet_complete) { + if (eth) + rx_eth(adap, q, skb, ethpad, lro); + else { + q->offload_pkts++; + /* Preserve the RSS info in csum & priority */ + skb->csum = rss_hi; + skb->priority = rss_lo; + ngathered = rx_offload(&adap->tdev, q, skb, + offload_skbs, + ngathered); + } + + if (flags & F_RSPD_EOP) + clear_rspq_bufstate(q); + } + --budget_left; + } + + deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); + + if (sleeping) + check_ring_db(adap, qs, sleeping); + + smp_mb(); /* commit Tx queue .processed updates */ + if (unlikely(qs->txq_stopped != 0)) + restart_tx(qs); + + budget -= budget_left; + return budget; +} + +static inline int is_pure_response(const struct rsp_desc *r) +{ + __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID); + + return (n | r->len_cq) == 0; +} + +/** + * napi_rx_handler - the NAPI handler for Rx processing + * @napi: the napi instance + * @budget: how many packets we can process in this round + * + * Handler for new data events when using NAPI. + */ +static int napi_rx_handler(struct napi_struct *napi, int budget) +{ + struct sge_qset *qs = container_of(napi, struct sge_qset, napi); + struct adapter *adap = qs->adap; + int work_done = process_responses(adap, qs, budget); + + if (likely(work_done < budget)) { + napi_complete(napi); + + /* + * Because we don't atomically flush the following + * write it is possible that in very rare cases it can + * reach the device in a way that races with a new + * response being written plus an error interrupt + * causing the NAPI interrupt handler below to return + * unhandled status to the OS. To protect against + * this would require flushing the write and doing + * both the write and the flush with interrupts off. + * Way too expensive and unjustifiable given the + * rarity of the race. + * + * The race cannot happen at all with MSI-X. + */ + t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | + V_NEWTIMER(qs->rspq.next_holdoff) | + V_NEWINDEX(qs->rspq.cidx)); + } + return work_done; +} + +/* + * Returns true if the device is already scheduled for polling. + */ +static inline int napi_is_scheduled(struct napi_struct *napi) +{ + return test_bit(NAPI_STATE_SCHED, &napi->state); +} + +/** + * process_pure_responses - process pure responses from a response queue + * @adap: the adapter + * @qs: the queue set owning the response queue + * @r: the first pure response to process + * + * A simpler version of process_responses() that handles only pure (i.e., + * non data-carrying) responses. Such respones are too light-weight to + * justify calling a softirq under NAPI, so we handle them specially in + * the interrupt handler. The function is called with a pointer to a + * response, which the caller must ensure is a valid pure response. + * + * Returns 1 if it encounters a valid data-carrying response, 0 otherwise. + */ +static int process_pure_responses(struct adapter *adap, struct sge_qset *qs, + struct rsp_desc *r) +{ + struct sge_rspq *q = &qs->rspq; + unsigned int sleeping = 0; + + do { + u32 flags = ntohl(r->flags); + + r++; + if (unlikely(++q->cidx == q->size)) { + q->cidx = 0; + q->gen ^= 1; + r = q->desc; + } + prefetch(r); + + if (flags & RSPD_CTRL_MASK) { + sleeping |= flags & RSPD_GTS_MASK; + handle_rsp_cntrl_info(qs, flags); + } + + q->pure_rsps++; + if (++q->credits >= (q->size / 4)) { + refill_rspq(adap, q, q->credits); + q->credits = 0; + } + if (!is_new_response(r, q)) + break; + dma_rmb(); + } while (is_pure_response(r)); + + if (sleeping) + check_ring_db(adap, qs, sleeping); + + smp_mb(); /* commit Tx queue .processed updates */ + if (unlikely(qs->txq_stopped != 0)) + restart_tx(qs); + + return is_new_response(r, q); +} + +/** + * handle_responses - decide what to do with new responses in NAPI mode + * @adap: the adapter + * @q: the response queue + * + * This is used by the NAPI interrupt handlers to decide what to do with + * new SGE responses. If there are no new responses it returns -1. If + * there are new responses and they are pure (i.e., non-data carrying) + * it handles them straight in hard interrupt context as they are very + * cheap and don't deliver any packets. Finally, if there are any data + * signaling responses it schedules the NAPI handler. Returns 1 if it + * schedules NAPI, 0 if all new responses were pure. + * + * The caller must ascertain NAPI is not already running. + */ +static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) +{ + struct sge_qset *qs = rspq_to_qset(q); + struct rsp_desc *r = &q->desc[q->cidx]; + + if (!is_new_response(r, q)) + return -1; + dma_rmb(); + if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) { + t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | + V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); + return 0; + } + napi_schedule(&qs->napi); + return 1; +} + +/* + * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case + * (i.e., response queue serviced in hard interrupt). + */ +static irqreturn_t t3_sge_intr_msix(int irq, void *cookie) +{ + struct sge_qset *qs = cookie; + struct adapter *adap = qs->adap; + struct sge_rspq *q = &qs->rspq; + + spin_lock(&q->lock); + if (process_responses(adap, qs, -1) == 0) + q->unhandled_irqs++; + t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | + V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); + spin_unlock(&q->lock); + return IRQ_HANDLED; +} + +/* + * The MSI-X interrupt handler for an SGE response queue for the NAPI case + * (i.e., response queue serviced by NAPI polling). + */ +static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie) +{ + struct sge_qset *qs = cookie; + struct sge_rspq *q = &qs->rspq; + + spin_lock(&q->lock); + + if (handle_responses(qs->adap, q) < 0) + q->unhandled_irqs++; + spin_unlock(&q->lock); + return IRQ_HANDLED; +} + +/* + * The non-NAPI MSI interrupt handler. This needs to handle data events from + * SGE response queues as well as error and other async events as they all use + * the same MSI vector. We use one SGE response queue per port in this mode + * and protect all response queues with queue 0's lock. + */ +static irqreturn_t t3_intr_msi(int irq, void *cookie) +{ + int new_packets = 0; + struct adapter *adap = cookie; + struct sge_rspq *q = &adap->sge.qs[0].rspq; + + spin_lock(&q->lock); + + if (process_responses(adap, &adap->sge.qs[0], -1)) { + t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | + V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); + new_packets = 1; + } + + if (adap->params.nports == 2 && + process_responses(adap, &adap->sge.qs[1], -1)) { + struct sge_rspq *q1 = &adap->sge.qs[1].rspq; + + t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) | + V_NEWTIMER(q1->next_holdoff) | + V_NEWINDEX(q1->cidx)); + new_packets = 1; + } + + if (!new_packets && t3_slow_intr_handler(adap) == 0) + q->unhandled_irqs++; + + spin_unlock(&q->lock); + return IRQ_HANDLED; +} + +static int rspq_check_napi(struct sge_qset *qs) +{ + struct sge_rspq *q = &qs->rspq; + + if (!napi_is_scheduled(&qs->napi) && + is_new_response(&q->desc[q->cidx], q)) { + napi_schedule(&qs->napi); + return 1; + } + return 0; +} + +/* + * The MSI interrupt handler for the NAPI case (i.e., response queues serviced + * by NAPI polling). Handles data events from SGE response queues as well as + * error and other async events as they all use the same MSI vector. We use + * one SGE response queue per port in this mode and protect all response + * queues with queue 0's lock. + */ +static irqreturn_t t3_intr_msi_napi(int irq, void *cookie) +{ + int new_packets; + struct adapter *adap = cookie; + struct sge_rspq *q = &adap->sge.qs[0].rspq; + + spin_lock(&q->lock); + + new_packets = rspq_check_napi(&adap->sge.qs[0]); + if (adap->params.nports == 2) + new_packets += rspq_check_napi(&adap->sge.qs[1]); + if (!new_packets && t3_slow_intr_handler(adap) == 0) + q->unhandled_irqs++; + + spin_unlock(&q->lock); + return IRQ_HANDLED; +} + +/* + * A helper function that processes responses and issues GTS. + */ +static inline int process_responses_gts(struct adapter *adap, + struct sge_rspq *rq) +{ + int work; + + work = process_responses(adap, rspq_to_qset(rq), -1); + t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) | + V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx)); + return work; +} + +/* + * The legacy INTx interrupt handler. This needs to handle data events from + * SGE response queues as well as error and other async events as they all use + * the same interrupt pin. We use one SGE response queue per port in this mode + * and protect all response queues with queue 0's lock. + */ +static irqreturn_t t3_intr(int irq, void *cookie) +{ + int work_done, w0, w1; + struct adapter *adap = cookie; + struct sge_rspq *q0 = &adap->sge.qs[0].rspq; + struct sge_rspq *q1 = &adap->sge.qs[1].rspq; + + spin_lock(&q0->lock); + + w0 = is_new_response(&q0->desc[q0->cidx], q0); + w1 = adap->params.nports == 2 && + is_new_response(&q1->desc[q1->cidx], q1); + + if (likely(w0 | w1)) { + t3_write_reg(adap, A_PL_CLI, 0); + t3_read_reg(adap, A_PL_CLI); /* flush */ + + if (likely(w0)) + process_responses_gts(adap, q0); + + if (w1) + process_responses_gts(adap, q1); + + work_done = w0 | w1; + } else + work_done = t3_slow_intr_handler(adap); + + spin_unlock(&q0->lock); + return IRQ_RETVAL(work_done != 0); +} + +/* + * Interrupt handler for legacy INTx interrupts for T3B-based cards. + * Handles data events from SGE response queues as well as error and other + * async events as they all use the same interrupt pin. We use one SGE + * response queue per port in this mode and protect all response queues with + * queue 0's lock. + */ +static irqreturn_t t3b_intr(int irq, void *cookie) +{ + u32 map; + struct adapter *adap = cookie; + struct sge_rspq *q0 = &adap->sge.qs[0].rspq; + + t3_write_reg(adap, A_PL_CLI, 0); + map = t3_read_reg(adap, A_SG_DATA_INTR); + + if (unlikely(!map)) /* shared interrupt, most likely */ + return IRQ_NONE; + + spin_lock(&q0->lock); + + if (unlikely(map & F_ERRINTR)) + t3_slow_intr_handler(adap); + + if (likely(map & 1)) + process_responses_gts(adap, q0); + + if (map & 2) + process_responses_gts(adap, &adap->sge.qs[1].rspq); + + spin_unlock(&q0->lock); + return IRQ_HANDLED; +} + +/* + * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards. + * Handles data events from SGE response queues as well as error and other + * async events as they all use the same interrupt pin. We use one SGE + * response queue per port in this mode and protect all response queues with + * queue 0's lock. + */ +static irqreturn_t t3b_intr_napi(int irq, void *cookie) +{ + u32 map; + struct adapter *adap = cookie; + struct sge_qset *qs0 = &adap->sge.qs[0]; + struct sge_rspq *q0 = &qs0->rspq; + + t3_write_reg(adap, A_PL_CLI, 0); + map = t3_read_reg(adap, A_SG_DATA_INTR); + + if (unlikely(!map)) /* shared interrupt, most likely */ + return IRQ_NONE; + + spin_lock(&q0->lock); + + if (unlikely(map & F_ERRINTR)) + t3_slow_intr_handler(adap); + + if (likely(map & 1)) + napi_schedule(&qs0->napi); + + if (map & 2) + napi_schedule(&adap->sge.qs[1].napi); + + spin_unlock(&q0->lock); + return IRQ_HANDLED; +} + +/** + * t3_intr_handler - select the top-level interrupt handler + * @adap: the adapter + * @polling: whether using NAPI to service response queues + * + * Selects the top-level interrupt handler based on the type of interrupts + * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the + * response queues. + */ +irq_handler_t t3_intr_handler(struct adapter *adap, int polling) +{ + if (adap->flags & USING_MSIX) + return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix; + if (adap->flags & USING_MSI) + return polling ? t3_intr_msi_napi : t3_intr_msi; + if (adap->params.rev > 0) + return polling ? t3b_intr_napi : t3b_intr; + return t3_intr; +} + +#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \ + F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ + V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ + F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ + F_HIRCQPARITYERROR) +#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR) +#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \ + F_RSPQDISABLED) + +/** + * t3_sge_err_intr_handler - SGE async event interrupt handler + * @adapter: the adapter + * + * Interrupt handler for SGE asynchronous (non-data) events. + */ +void t3_sge_err_intr_handler(struct adapter *adapter) +{ + unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) & + ~F_FLEMPTY; + + if (status & SGE_PARERR) + CH_ALERT(adapter, "SGE parity error (0x%x)\n", + status & SGE_PARERR); + if (status & SGE_FRAMINGERR) + CH_ALERT(adapter, "SGE framing error (0x%x)\n", + status & SGE_FRAMINGERR); + + if (status & F_RSPQCREDITOVERFOW) + CH_ALERT(adapter, "SGE response queue credit overflow\n"); + + if (status & F_RSPQDISABLED) { + v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS); + + CH_ALERT(adapter, + "packet delivered to disabled response queue " + "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff); + } + + if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR)) + queue_work(cxgb3_wq, &adapter->db_drop_task); + + if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL)) + queue_work(cxgb3_wq, &adapter->db_full_task); + + if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY)) + queue_work(cxgb3_wq, &adapter->db_empty_task); + + t3_write_reg(adapter, A_SG_INT_CAUSE, status); + if (status & SGE_FATALERR) + t3_fatal_err(adapter); +} + +/** + * sge_timer_tx - perform periodic maintenance of an SGE qset + * @data: the SGE queue set to maintain + * + * Runs periodically from a timer to perform maintenance of an SGE queue + * set. It performs two tasks: + * + * Cleans up any completed Tx descriptors that may still be pending. + * Normal descriptor cleanup happens when new packets are added to a Tx + * queue so this timer is relatively infrequent and does any cleanup only + * if the Tx queue has not seen any new packets in a while. We make a + * best effort attempt to reclaim descriptors, in that we don't wait + * around if we cannot get a queue's lock (which most likely is because + * someone else is queueing new packets and so will also handle the clean + * up). Since control queues use immediate data exclusively we don't + * bother cleaning them up here. + * + */ +static void sge_timer_tx(unsigned long data) +{ + struct sge_qset *qs = (struct sge_qset *)data; + struct port_info *pi = netdev_priv(qs->netdev); + struct adapter *adap = pi->adapter; + unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0}; + unsigned long next_period; + + if (__netif_tx_trylock(qs->tx_q)) { + tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH], + TX_RECLAIM_TIMER_CHUNK); + __netif_tx_unlock(qs->tx_q); + } + + if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) { + tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD], + TX_RECLAIM_TIMER_CHUNK); + spin_unlock(&qs->txq[TXQ_OFLD].lock); + } + + next_period = TX_RECLAIM_PERIOD >> + (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) / + TX_RECLAIM_TIMER_CHUNK); + mod_timer(&qs->tx_reclaim_timer, jiffies + next_period); +} + +/** + * sge_timer_rx - perform periodic maintenance of an SGE qset + * @data: the SGE queue set to maintain + * + * a) Replenishes Rx queues that have run out due to memory shortage. + * Normally new Rx buffers are added when existing ones are consumed but + * when out of memory a queue can become empty. We try to add only a few + * buffers here, the queue will be replenished fully as these new buffers + * are used up if memory shortage has subsided. + * + * b) Return coalesced response queue credits in case a response queue is + * starved. + * + */ +static void sge_timer_rx(unsigned long data) +{ + spinlock_t *lock; + struct sge_qset *qs = (struct sge_qset *)data; + struct port_info *pi = netdev_priv(qs->netdev); + struct adapter *adap = pi->adapter; + u32 status; + + lock = adap->params.rev > 0 ? + &qs->rspq.lock : &adap->sge.qs[0].rspq.lock; + + if (!spin_trylock_irq(lock)) + goto out; + + if (napi_is_scheduled(&qs->napi)) + goto unlock; + + if (adap->params.rev < 4) { + status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS); + + if (status & (1 << qs->rspq.cntxt_id)) { + qs->rspq.starved++; + if (qs->rspq.credits) { + qs->rspq.credits--; + refill_rspq(adap, &qs->rspq, 1); + qs->rspq.restarted++; + t3_write_reg(adap, A_SG_RSPQ_FL_STATUS, + 1 << qs->rspq.cntxt_id); + } + } + } + + if (qs->fl[0].credits < qs->fl[0].size) + __refill_fl(adap, &qs->fl[0]); + if (qs->fl[1].credits < qs->fl[1].size) + __refill_fl(adap, &qs->fl[1]); + +unlock: + spin_unlock_irq(lock); +out: + mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); +} + +/** + * t3_update_qset_coalesce - update coalescing settings for a queue set + * @qs: the SGE queue set + * @p: new queue set parameters + * + * Update the coalescing settings for an SGE queue set. Nothing is done + * if the queue set is not initialized yet. + */ +void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) +{ + qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */ + qs->rspq.polling = p->polling; + qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll; +} + +/** + * t3_sge_alloc_qset - initialize an SGE queue set + * @adapter: the adapter + * @id: the queue set id + * @nports: how many Ethernet ports will be using this queue set + * @irq_vec_idx: the IRQ vector index for response queue interrupts + * @p: configuration parameters for this queue set + * @ntxq: number of Tx queues for the queue set + * @netdev: net device associated with this queue set + * @netdevq: net device TX queue associated with this queue set + * + * Allocate resources and initialize an SGE queue set. A queue set + * comprises a response queue, two Rx free-buffer queues, and up to 3 + * Tx queues. The Tx queues are assigned roles in the order Ethernet + * queue, offload queue, and control queue. + */ +int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, + int irq_vec_idx, const struct qset_params *p, + int ntxq, struct net_device *dev, + struct netdev_queue *netdevq) +{ + int i, avail, ret = -ENOMEM; + struct sge_qset *q = &adapter->sge.qs[id]; + + init_qset_cntxt(q, id); + setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q); + setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q); + + q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, + sizeof(struct rx_desc), + sizeof(struct rx_sw_desc), + &q->fl[0].phys_addr, &q->fl[0].sdesc); + if (!q->fl[0].desc) + goto err; + + q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size, + sizeof(struct rx_desc), + sizeof(struct rx_sw_desc), + &q->fl[1].phys_addr, &q->fl[1].sdesc); + if (!q->fl[1].desc) + goto err; + + q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size, + sizeof(struct rsp_desc), 0, + &q->rspq.phys_addr, NULL); + if (!q->rspq.desc) + goto err; + + for (i = 0; i < ntxq; ++i) { + /* + * The control queue always uses immediate data so does not + * need to keep track of any sk_buffs. + */ + size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc); + + q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i], + sizeof(struct tx_desc), sz, + &q->txq[i].phys_addr, + &q->txq[i].sdesc); + if (!q->txq[i].desc) + goto err; + + q->txq[i].gen = 1; + q->txq[i].size = p->txq_size[i]; + spin_lock_init(&q->txq[i].lock); + skb_queue_head_init(&q->txq[i].sendq); + } + + tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq, + (unsigned long)q); + tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq, + (unsigned long)q); + + q->fl[0].gen = q->fl[1].gen = 1; + q->fl[0].size = p->fl_size; + q->fl[1].size = p->jumbo_size; + + q->rspq.gen = 1; + q->rspq.size = p->rspq_size; + spin_lock_init(&q->rspq.lock); + skb_queue_head_init(&q->rspq.rx_queue); + + q->txq[TXQ_ETH].stop_thres = nports * + flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); + +#if FL0_PG_CHUNK_SIZE > 0 + q->fl[0].buf_size = FL0_PG_CHUNK_SIZE; +#else + q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); +#endif +#if FL1_PG_CHUNK_SIZE > 0 + q->fl[1].buf_size = FL1_PG_CHUNK_SIZE; +#else + q->fl[1].buf_size = is_offload(adapter) ? + (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : + MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt); +#endif + + q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; + q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; + q->fl[0].order = FL0_PG_ORDER; + q->fl[1].order = FL1_PG_ORDER; + q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE; + q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE; + + spin_lock_irq(&adapter->sge.reg_lock); + + /* FL threshold comparison uses < */ + ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, + q->rspq.phys_addr, q->rspq.size, + q->fl[0].buf_size - SGE_PG_RSVD, 1, 0); + if (ret) + goto err_unlock; + + for (i = 0; i < SGE_RXQ_PER_SET; ++i) { + ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, + q->fl[i].phys_addr, q->fl[i].size, + q->fl[i].buf_size - SGE_PG_RSVD, + p->cong_thres, 1, 0); + if (ret) + goto err_unlock; + } + + ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS, + SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr, + q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token, + 1, 0); + if (ret) + goto err_unlock; + + if (ntxq > 1) { + ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id, + USE_GTS, SGE_CNTXT_OFLD, id, + q->txq[TXQ_OFLD].phys_addr, + q->txq[TXQ_OFLD].size, 0, 1, 0); + if (ret) + goto err_unlock; + } + + if (ntxq > 2) { + ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0, + SGE_CNTXT_CTRL, id, + q->txq[TXQ_CTRL].phys_addr, + q->txq[TXQ_CTRL].size, + q->txq[TXQ_CTRL].token, 1, 0); + if (ret) + goto err_unlock; + } + + spin_unlock_irq(&adapter->sge.reg_lock); + + q->adap = adapter; + q->netdev = dev; + q->tx_q = netdevq; + t3_update_qset_coalesce(q, p); + + avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, + GFP_KERNEL | __GFP_COMP); + if (!avail) { + CH_ALERT(adapter, "free list queue 0 initialization failed\n"); + goto err; + } + if (avail < q->fl[0].size) + CH_WARN(adapter, "free list queue 0 enabled with %d credits\n", + avail); + + avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, + GFP_KERNEL | __GFP_COMP); + if (avail < q->fl[1].size) + CH_WARN(adapter, "free list queue 1 enabled with %d credits\n", + avail); + refill_rspq(adapter, &q->rspq, q->rspq.size - 1); + + t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | + V_NEWTIMER(q->rspq.holdoff_tmr)); + + return 0; + +err_unlock: + spin_unlock_irq(&adapter->sge.reg_lock); +err: + t3_free_qset(adapter, q); + return ret; +} + +/** + * t3_start_sge_timers - start SGE timer call backs + * @adap: the adapter + * + * Starts each SGE queue set's timer call back + */ +void t3_start_sge_timers(struct adapter *adap) +{ + int i; + + for (i = 0; i < SGE_QSETS; ++i) { + struct sge_qset *q = &adap->sge.qs[i]; + + if (q->tx_reclaim_timer.function) + mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); + + if (q->rx_reclaim_timer.function) + mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); + } +} + +/** + * t3_stop_sge_timers - stop SGE timer call backs + * @adap: the adapter + * + * Stops each SGE queue set's timer call back + */ +void t3_stop_sge_timers(struct adapter *adap) +{ + int i; + + for (i = 0; i < SGE_QSETS; ++i) { + struct sge_qset *q = &adap->sge.qs[i]; + + if (q->tx_reclaim_timer.function) + del_timer_sync(&q->tx_reclaim_timer); + if (q->rx_reclaim_timer.function) + del_timer_sync(&q->rx_reclaim_timer); + } +} + +/** + * t3_free_sge_resources - free SGE resources + * @adap: the adapter + * + * Frees resources used by the SGE queue sets. + */ +void t3_free_sge_resources(struct adapter *adap) +{ + int i; + + for (i = 0; i < SGE_QSETS; ++i) + t3_free_qset(adap, &adap->sge.qs[i]); +} + +/** + * t3_sge_start - enable SGE + * @adap: the adapter + * + * Enables the SGE for DMAs. This is the last step in starting packet + * transfers. + */ +void t3_sge_start(struct adapter *adap) +{ + t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE); +} + +/** + * t3_sge_stop - disable SGE operation + * @adap: the adapter + * + * Disables the DMA engine. This can be called in emeregencies (e.g., + * from error interrupts) or from normal process context. In the latter + * case it also disables any pending queue restart tasklets. Note that + * if it is called in interrupt context it cannot disable the restart + * tasklets as it cannot wait, however the tasklets will have no effect + * since the doorbells are disabled and the driver will call this again + * later from process context, at which time the tasklets will be stopped + * if they are still running. + */ +void t3_sge_stop(struct adapter *adap) +{ + t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0); + if (!in_interrupt()) { + int i; + + for (i = 0; i < SGE_QSETS; ++i) { + struct sge_qset *qs = &adap->sge.qs[i]; + + tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk); + tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk); + } + } +} + +/** + * t3_sge_init - initialize SGE + * @adap: the adapter + * @p: the SGE parameters + * + * Performs SGE initialization needed every time after a chip reset. + * We do not initialize any of the queue sets here, instead the driver + * top-level must request those individually. We also do not enable DMA + * here, that should be done after the queues have been set up. + */ +void t3_sge_init(struct adapter *adap, struct sge_params *p) +{ + unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12); + + ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL | + F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN | + V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS | + V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING; +#if SGE_NUM_GENBITS == 1 + ctrl |= F_EGRGENCTRL; +#endif + if (adap->params.rev > 0) { + if (!(adap->flags & (USING_MSIX | USING_MSI))) + ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ; + } + t3_write_reg(adap, A_SG_CONTROL, ctrl); + t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) | + V_LORCQDRBTHRSH(512)); + t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10); + t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) | + V_TIMEOUT(200 * core_ticks_per_usec(adap))); + t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, + adap->params.rev < T3_REV_C ? 1000 : 500); + t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256); + t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000); + t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256); + t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff)); + t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024); +} + +/** + * t3_sge_prep - one-time SGE initialization + * @adap: the associated adapter + * @p: SGE parameters + * + * Performs one-time initialization of SGE SW state. Includes determining + * defaults for the assorted SGE parameters, which admins can change until + * they are used to initialize the SGE. + */ +void t3_sge_prep(struct adapter *adap, struct sge_params *p) +{ + int i; + + p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + for (i = 0; i < SGE_QSETS; ++i) { + struct qset_params *q = p->qset + i; + + q->polling = adap->params.rev > 0; + q->coalesce_usecs = 5; + q->rspq_size = 1024; + q->fl_size = 1024; + q->jumbo_size = 512; + q->txq_size[TXQ_ETH] = 1024; + q->txq_size[TXQ_OFLD] = 1024; + q->txq_size[TXQ_CTRL] = 256; + q->cong_thres = 0; + } + + spin_lock_init(&adap->sge.reg_lock); +} diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h b/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h new file mode 100644 index 000000000..29b6c800b --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h @@ -0,0 +1,255 @@ +/* + * This file is automatically generated --- any changes will be lost. + */ + +#ifndef _SGE_DEFS_H +#define _SGE_DEFS_H + +#define S_EC_CREDITS 0 +#define M_EC_CREDITS 0x7FFF +#define V_EC_CREDITS(x) ((x) << S_EC_CREDITS) +#define G_EC_CREDITS(x) (((x) >> S_EC_CREDITS) & M_EC_CREDITS) + +#define S_EC_GTS 15 +#define V_EC_GTS(x) ((x) << S_EC_GTS) +#define F_EC_GTS V_EC_GTS(1U) + +#define S_EC_INDEX 16 +#define M_EC_INDEX 0xFFFF +#define V_EC_INDEX(x) ((x) << S_EC_INDEX) +#define G_EC_INDEX(x) (((x) >> S_EC_INDEX) & M_EC_INDEX) + +#define S_EC_SIZE 0 +#define M_EC_SIZE 0xFFFF +#define V_EC_SIZE(x) ((x) << S_EC_SIZE) +#define G_EC_SIZE(x) (((x) >> S_EC_SIZE) & M_EC_SIZE) + +#define S_EC_BASE_LO 16 +#define M_EC_BASE_LO 0xFFFF +#define V_EC_BASE_LO(x) ((x) << S_EC_BASE_LO) +#define G_EC_BASE_LO(x) (((x) >> S_EC_BASE_LO) & M_EC_BASE_LO) + +#define S_EC_BASE_HI 0 +#define M_EC_BASE_HI 0xF +#define V_EC_BASE_HI(x) ((x) << S_EC_BASE_HI) +#define G_EC_BASE_HI(x) (((x) >> S_EC_BASE_HI) & M_EC_BASE_HI) + +#define S_EC_RESPQ 4 +#define M_EC_RESPQ 0x7 +#define V_EC_RESPQ(x) ((x) << S_EC_RESPQ) +#define G_EC_RESPQ(x) (((x) >> S_EC_RESPQ) & M_EC_RESPQ) + +#define S_EC_TYPE 7 +#define M_EC_TYPE 0x7 +#define V_EC_TYPE(x) ((x) << S_EC_TYPE) +#define G_EC_TYPE(x) (((x) >> S_EC_TYPE) & M_EC_TYPE) + +#define S_EC_GEN 10 +#define V_EC_GEN(x) ((x) << S_EC_GEN) +#define F_EC_GEN V_EC_GEN(1U) + +#define S_EC_UP_TOKEN 11 +#define M_EC_UP_TOKEN 0xFFFFF +#define V_EC_UP_TOKEN(x) ((x) << S_EC_UP_TOKEN) +#define G_EC_UP_TOKEN(x) (((x) >> S_EC_UP_TOKEN) & M_EC_UP_TOKEN) + +#define S_EC_VALID 31 +#define V_EC_VALID(x) ((x) << S_EC_VALID) +#define F_EC_VALID V_EC_VALID(1U) + +#define S_RQ_MSI_VEC 20 +#define M_RQ_MSI_VEC 0x3F +#define V_RQ_MSI_VEC(x) ((x) << S_RQ_MSI_VEC) +#define G_RQ_MSI_VEC(x) (((x) >> S_RQ_MSI_VEC) & M_RQ_MSI_VEC) + +#define S_RQ_INTR_EN 26 +#define V_RQ_INTR_EN(x) ((x) << S_RQ_INTR_EN) +#define F_RQ_INTR_EN V_RQ_INTR_EN(1U) + +#define S_RQ_GEN 28 +#define V_RQ_GEN(x) ((x) << S_RQ_GEN) +#define F_RQ_GEN V_RQ_GEN(1U) + +#define S_CQ_INDEX 0 +#define M_CQ_INDEX 0xFFFF +#define V_CQ_INDEX(x) ((x) << S_CQ_INDEX) +#define G_CQ_INDEX(x) (((x) >> S_CQ_INDEX) & M_CQ_INDEX) + +#define S_CQ_SIZE 16 +#define M_CQ_SIZE 0xFFFF +#define V_CQ_SIZE(x) ((x) << S_CQ_SIZE) +#define G_CQ_SIZE(x) (((x) >> S_CQ_SIZE) & M_CQ_SIZE) + +#define S_CQ_BASE_HI 0 +#define M_CQ_BASE_HI 0xFFFFF +#define V_CQ_BASE_HI(x) ((x) << S_CQ_BASE_HI) +#define G_CQ_BASE_HI(x) (((x) >> S_CQ_BASE_HI) & M_CQ_BASE_HI) + +#define S_CQ_RSPQ 20 +#define M_CQ_RSPQ 0x3F +#define V_CQ_RSPQ(x) ((x) << S_CQ_RSPQ) +#define G_CQ_RSPQ(x) (((x) >> S_CQ_RSPQ) & M_CQ_RSPQ) + +#define S_CQ_ASYNC_NOTIF 26 +#define V_CQ_ASYNC_NOTIF(x) ((x) << S_CQ_ASYNC_NOTIF) +#define F_CQ_ASYNC_NOTIF V_CQ_ASYNC_NOTIF(1U) + +#define S_CQ_ARMED 27 +#define V_CQ_ARMED(x) ((x) << S_CQ_ARMED) +#define F_CQ_ARMED V_CQ_ARMED(1U) + +#define S_CQ_ASYNC_NOTIF_SOL 28 +#define V_CQ_ASYNC_NOTIF_SOL(x) ((x) << S_CQ_ASYNC_NOTIF_SOL) +#define F_CQ_ASYNC_NOTIF_SOL V_CQ_ASYNC_NOTIF_SOL(1U) + +#define S_CQ_GEN 29 +#define V_CQ_GEN(x) ((x) << S_CQ_GEN) +#define F_CQ_GEN V_CQ_GEN(1U) + +#define S_CQ_ERR 30 +#define V_CQ_ERR(x) ((x) << S_CQ_ERR) +#define F_CQ_ERR V_CQ_ERR(1U) + +#define S_CQ_OVERFLOW_MODE 31 +#define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE) +#define F_CQ_OVERFLOW_MODE V_CQ_OVERFLOW_MODE(1U) + +#define S_CQ_CREDITS 0 +#define M_CQ_CREDITS 0xFFFF +#define V_CQ_CREDITS(x) ((x) << S_CQ_CREDITS) +#define G_CQ_CREDITS(x) (((x) >> S_CQ_CREDITS) & M_CQ_CREDITS) + +#define S_CQ_CREDIT_THRES 16 +#define M_CQ_CREDIT_THRES 0x1FFF +#define V_CQ_CREDIT_THRES(x) ((x) << S_CQ_CREDIT_THRES) +#define G_CQ_CREDIT_THRES(x) (((x) >> S_CQ_CREDIT_THRES) & M_CQ_CREDIT_THRES) + +#define S_FL_BASE_HI 0 +#define M_FL_BASE_HI 0xFFFFF +#define V_FL_BASE_HI(x) ((x) << S_FL_BASE_HI) +#define G_FL_BASE_HI(x) (((x) >> S_FL_BASE_HI) & M_FL_BASE_HI) + +#define S_FL_INDEX_LO 20 +#define M_FL_INDEX_LO 0xFFF +#define V_FL_INDEX_LO(x) ((x) << S_FL_INDEX_LO) +#define G_FL_INDEX_LO(x) (((x) >> S_FL_INDEX_LO) & M_FL_INDEX_LO) + +#define S_FL_INDEX_HI 0 +#define M_FL_INDEX_HI 0xF +#define V_FL_INDEX_HI(x) ((x) << S_FL_INDEX_HI) +#define G_FL_INDEX_HI(x) (((x) >> S_FL_INDEX_HI) & M_FL_INDEX_HI) + +#define S_FL_SIZE 4 +#define M_FL_SIZE 0xFFFF +#define V_FL_SIZE(x) ((x) << S_FL_SIZE) +#define G_FL_SIZE(x) (((x) >> S_FL_SIZE) & M_FL_SIZE) + +#define S_FL_GEN 20 +#define V_FL_GEN(x) ((x) << S_FL_GEN) +#define F_FL_GEN V_FL_GEN(1U) + +#define S_FL_ENTRY_SIZE_LO 21 +#define M_FL_ENTRY_SIZE_LO 0x7FF +#define V_FL_ENTRY_SIZE_LO(x) ((x) << S_FL_ENTRY_SIZE_LO) +#define G_FL_ENTRY_SIZE_LO(x) (((x) >> S_FL_ENTRY_SIZE_LO) & M_FL_ENTRY_SIZE_LO) + +#define S_FL_ENTRY_SIZE_HI 0 +#define M_FL_ENTRY_SIZE_HI 0x1FFFFF +#define V_FL_ENTRY_SIZE_HI(x) ((x) << S_FL_ENTRY_SIZE_HI) +#define G_FL_ENTRY_SIZE_HI(x) (((x) >> S_FL_ENTRY_SIZE_HI) & M_FL_ENTRY_SIZE_HI) + +#define S_FL_CONG_THRES 21 +#define M_FL_CONG_THRES 0x3FF +#define V_FL_CONG_THRES(x) ((x) << S_FL_CONG_THRES) +#define G_FL_CONG_THRES(x) (((x) >> S_FL_CONG_THRES) & M_FL_CONG_THRES) + +#define S_FL_GTS 31 +#define V_FL_GTS(x) ((x) << S_FL_GTS) +#define F_FL_GTS V_FL_GTS(1U) + +#define S_FLD_GEN1 31 +#define V_FLD_GEN1(x) ((x) << S_FLD_GEN1) +#define F_FLD_GEN1 V_FLD_GEN1(1U) + +#define S_FLD_GEN2 0 +#define V_FLD_GEN2(x) ((x) << S_FLD_GEN2) +#define F_FLD_GEN2 V_FLD_GEN2(1U) + +#define S_RSPD_TXQ1_CR 0 +#define M_RSPD_TXQ1_CR 0x7F +#define V_RSPD_TXQ1_CR(x) ((x) << S_RSPD_TXQ1_CR) +#define G_RSPD_TXQ1_CR(x) (((x) >> S_RSPD_TXQ1_CR) & M_RSPD_TXQ1_CR) + +#define S_RSPD_TXQ1_GTS 7 +#define V_RSPD_TXQ1_GTS(x) ((x) << S_RSPD_TXQ1_GTS) +#define F_RSPD_TXQ1_GTS V_RSPD_TXQ1_GTS(1U) + +#define S_RSPD_TXQ2_CR 8 +#define M_RSPD_TXQ2_CR 0x7F +#define V_RSPD_TXQ2_CR(x) ((x) << S_RSPD_TXQ2_CR) +#define G_RSPD_TXQ2_CR(x) (((x) >> S_RSPD_TXQ2_CR) & M_RSPD_TXQ2_CR) + +#define S_RSPD_TXQ2_GTS 15 +#define V_RSPD_TXQ2_GTS(x) ((x) << S_RSPD_TXQ2_GTS) +#define F_RSPD_TXQ2_GTS V_RSPD_TXQ2_GTS(1U) + +#define S_RSPD_TXQ0_CR 16 +#define M_RSPD_TXQ0_CR 0x7F +#define V_RSPD_TXQ0_CR(x) ((x) << S_RSPD_TXQ0_CR) +#define G_RSPD_TXQ0_CR(x) (((x) >> S_RSPD_TXQ0_CR) & M_RSPD_TXQ0_CR) + +#define S_RSPD_TXQ0_GTS 23 +#define V_RSPD_TXQ0_GTS(x) ((x) << S_RSPD_TXQ0_GTS) +#define F_RSPD_TXQ0_GTS V_RSPD_TXQ0_GTS(1U) + +#define S_RSPD_EOP 24 +#define V_RSPD_EOP(x) ((x) << S_RSPD_EOP) +#define F_RSPD_EOP V_RSPD_EOP(1U) + +#define S_RSPD_SOP 25 +#define V_RSPD_SOP(x) ((x) << S_RSPD_SOP) +#define F_RSPD_SOP V_RSPD_SOP(1U) + +#define S_RSPD_ASYNC_NOTIF 26 +#define V_RSPD_ASYNC_NOTIF(x) ((x) << S_RSPD_ASYNC_NOTIF) +#define F_RSPD_ASYNC_NOTIF V_RSPD_ASYNC_NOTIF(1U) + +#define S_RSPD_FL0_GTS 27 +#define V_RSPD_FL0_GTS(x) ((x) << S_RSPD_FL0_GTS) +#define F_RSPD_FL0_GTS V_RSPD_FL0_GTS(1U) + +#define S_RSPD_FL1_GTS 28 +#define V_RSPD_FL1_GTS(x) ((x) << S_RSPD_FL1_GTS) +#define F_RSPD_FL1_GTS V_RSPD_FL1_GTS(1U) + +#define S_RSPD_IMM_DATA_VALID 29 +#define V_RSPD_IMM_DATA_VALID(x) ((x) << S_RSPD_IMM_DATA_VALID) +#define F_RSPD_IMM_DATA_VALID V_RSPD_IMM_DATA_VALID(1U) + +#define S_RSPD_OFFLOAD 30 +#define V_RSPD_OFFLOAD(x) ((x) << S_RSPD_OFFLOAD) +#define F_RSPD_OFFLOAD V_RSPD_OFFLOAD(1U) + +#define S_RSPD_GEN1 31 +#define V_RSPD_GEN1(x) ((x) << S_RSPD_GEN1) +#define F_RSPD_GEN1 V_RSPD_GEN1(1U) + +#define S_RSPD_LEN 0 +#define M_RSPD_LEN 0x7FFFFFFF +#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN) +#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN) + +#define S_RSPD_FLQ 31 +#define V_RSPD_FLQ(x) ((x) << S_RSPD_FLQ) +#define F_RSPD_FLQ V_RSPD_FLQ(1U) + +#define S_RSPD_GEN2 0 +#define V_RSPD_GEN2(x) ((x) << S_RSPD_GEN2) +#define F_RSPD_GEN2 V_RSPD_GEN2(1U) + +#define S_RSPD_INR_VEC 1 +#define M_RSPD_INR_VEC 0x7F +#define V_RSPD_INR_VEC(x) ((x) << S_RSPD_INR_VEC) +#define G_RSPD_INR_VEC(x) (((x) >> S_RSPD_INR_VEC) & M_RSPD_INR_VEC) + +#endif /* _SGE_DEFS_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h b/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h new file mode 100644 index 000000000..852c399a8 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_cpl.h @@ -0,0 +1,1495 @@ +/* + * Copyright (c) 2004-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef T3_CPL_H +#define T3_CPL_H + +#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD) +# include +#endif + +enum CPL_opcode { + CPL_PASS_OPEN_REQ = 0x1, + CPL_PASS_ACCEPT_RPL = 0x2, + CPL_ACT_OPEN_REQ = 0x3, + CPL_SET_TCB = 0x4, + CPL_SET_TCB_FIELD = 0x5, + CPL_GET_TCB = 0x6, + CPL_PCMD = 0x7, + CPL_CLOSE_CON_REQ = 0x8, + CPL_CLOSE_LISTSRV_REQ = 0x9, + CPL_ABORT_REQ = 0xA, + CPL_ABORT_RPL = 0xB, + CPL_TX_DATA = 0xC, + CPL_RX_DATA_ACK = 0xD, + CPL_TX_PKT = 0xE, + CPL_RTE_DELETE_REQ = 0xF, + CPL_RTE_WRITE_REQ = 0x10, + CPL_RTE_READ_REQ = 0x11, + CPL_L2T_WRITE_REQ = 0x12, + CPL_L2T_READ_REQ = 0x13, + CPL_SMT_WRITE_REQ = 0x14, + CPL_SMT_READ_REQ = 0x15, + CPL_TX_PKT_LSO = 0x16, + CPL_PCMD_READ = 0x17, + CPL_BARRIER = 0x18, + CPL_TID_RELEASE = 0x1A, + + CPL_CLOSE_LISTSRV_RPL = 0x20, + CPL_ERROR = 0x21, + CPL_GET_TCB_RPL = 0x22, + CPL_L2T_WRITE_RPL = 0x23, + CPL_PCMD_READ_RPL = 0x24, + CPL_PCMD_RPL = 0x25, + CPL_PEER_CLOSE = 0x26, + CPL_RTE_DELETE_RPL = 0x27, + CPL_RTE_WRITE_RPL = 0x28, + CPL_RX_DDP_COMPLETE = 0x29, + CPL_RX_PHYS_ADDR = 0x2A, + CPL_RX_PKT = 0x2B, + CPL_RX_URG_NOTIFY = 0x2C, + CPL_SET_TCB_RPL = 0x2D, + CPL_SMT_WRITE_RPL = 0x2E, + CPL_TX_DATA_ACK = 0x2F, + + CPL_ABORT_REQ_RSS = 0x30, + CPL_ABORT_RPL_RSS = 0x31, + CPL_CLOSE_CON_RPL = 0x32, + CPL_ISCSI_HDR = 0x33, + CPL_L2T_READ_RPL = 0x34, + CPL_RDMA_CQE = 0x35, + CPL_RDMA_CQE_READ_RSP = 0x36, + CPL_RDMA_CQE_ERR = 0x37, + CPL_RTE_READ_RPL = 0x38, + CPL_RX_DATA = 0x39, + + CPL_ACT_OPEN_RPL = 0x40, + CPL_PASS_OPEN_RPL = 0x41, + CPL_RX_DATA_DDP = 0x42, + CPL_SMT_READ_RPL = 0x43, + + CPL_ACT_ESTABLISH = 0x50, + CPL_PASS_ESTABLISH = 0x51, + + CPL_PASS_ACCEPT_REQ = 0x70, + + CPL_ASYNC_NOTIF = 0x80, /* fake opcode for async notifications */ + + CPL_TX_DMA_ACK = 0xA0, + CPL_RDMA_READ_REQ = 0xA1, + CPL_RDMA_TERMINATE = 0xA2, + CPL_TRACE_PKT = 0xA3, + CPL_RDMA_EC_STATUS = 0xA5, + + NUM_CPL_CMDS /* must be last and previous entries must be sorted */ +}; + +enum CPL_error { + CPL_ERR_NONE = 0, + CPL_ERR_TCAM_PARITY = 1, + CPL_ERR_TCAM_FULL = 3, + CPL_ERR_CONN_RESET = 20, + CPL_ERR_CONN_EXIST = 22, + CPL_ERR_ARP_MISS = 23, + CPL_ERR_BAD_SYN = 24, + CPL_ERR_CONN_TIMEDOUT = 30, + CPL_ERR_XMIT_TIMEDOUT = 31, + CPL_ERR_PERSIST_TIMEDOUT = 32, + CPL_ERR_FINWAIT2_TIMEDOUT = 33, + CPL_ERR_KEEPALIVE_TIMEDOUT = 34, + CPL_ERR_RTX_NEG_ADVICE = 35, + CPL_ERR_PERSIST_NEG_ADVICE = 36, + CPL_ERR_ABORT_FAILED = 42, + CPL_ERR_GENERAL = 99 +}; + +enum { + CPL_CONN_POLICY_AUTO = 0, + CPL_CONN_POLICY_ASK = 1, + CPL_CONN_POLICY_DENY = 3 +}; + +enum { + ULP_MODE_NONE = 0, + ULP_MODE_ISCSI = 2, + ULP_MODE_RDMA = 4, + ULP_MODE_TCPDDP = 5 +}; + +enum { + ULP_CRC_HEADER = 1 << 0, + ULP_CRC_DATA = 1 << 1 +}; + +enum { + CPL_PASS_OPEN_ACCEPT, + CPL_PASS_OPEN_REJECT +}; + +enum { + CPL_ABORT_SEND_RST = 0, + CPL_ABORT_NO_RST, + CPL_ABORT_POST_CLOSE_REQ = 2 +}; + +enum { /* TX_PKT_LSO ethernet types */ + CPL_ETH_II, + CPL_ETH_II_VLAN, + CPL_ETH_802_3, + CPL_ETH_802_3_VLAN +}; + +enum { /* TCP congestion control algorithms */ + CONG_ALG_RENO, + CONG_ALG_TAHOE, + CONG_ALG_NEWRENO, + CONG_ALG_HIGHSPEED +}; + +enum { /* RSS hash type */ + RSS_HASH_NONE = 0, + RSS_HASH_2_TUPLE = 1, + RSS_HASH_4_TUPLE = 2, + RSS_HASH_TCPV6 = 3 +}; + +union opcode_tid { + __be32 opcode_tid; + __u8 opcode; +}; + +#define S_OPCODE 24 +#define V_OPCODE(x) ((x) << S_OPCODE) +#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF) +#define G_TID(x) ((x) & 0xFFFFFF) + +#define S_QNUM 0 +#define G_QNUM(x) (((x) >> S_QNUM) & 0xFFFF) + +#define S_HASHTYPE 22 +#define M_HASHTYPE 0x3 +#define G_HASHTYPE(x) (((x) >> S_HASHTYPE) & M_HASHTYPE) + +/* tid is assumed to be 24-bits */ +#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid)) + +#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) + +/* extract the TID from a CPL command */ +#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd)))) + +struct tcp_options { + __be16 mss; + __u8 wsf; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8:5; + __u8 ecn:1; + __u8 sack:1; + __u8 tstamp:1; +#else + __u8 tstamp:1; + __u8 sack:1; + __u8 ecn:1; + __u8:5; +#endif +}; + +struct rss_header { + __u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 cpu_idx:6; + __u8 hash_type:2; +#else + __u8 hash_type:2; + __u8 cpu_idx:6; +#endif + __be16 cq_idx; + __be32 rss_hash_val; +}; + +#ifndef CHELSIO_FW +struct work_request_hdr { + __be32 wr_hi; + __be32 wr_lo; +}; + +/* wr_hi fields */ +#define S_WR_SGE_CREDITS 0 +#define M_WR_SGE_CREDITS 0xFF +#define V_WR_SGE_CREDITS(x) ((x) << S_WR_SGE_CREDITS) +#define G_WR_SGE_CREDITS(x) (((x) >> S_WR_SGE_CREDITS) & M_WR_SGE_CREDITS) + +#define S_WR_SGLSFLT 8 +#define M_WR_SGLSFLT 0xFF +#define V_WR_SGLSFLT(x) ((x) << S_WR_SGLSFLT) +#define G_WR_SGLSFLT(x) (((x) >> S_WR_SGLSFLT) & M_WR_SGLSFLT) + +#define S_WR_BCNTLFLT 16 +#define M_WR_BCNTLFLT 0xF +#define V_WR_BCNTLFLT(x) ((x) << S_WR_BCNTLFLT) +#define G_WR_BCNTLFLT(x) (((x) >> S_WR_BCNTLFLT) & M_WR_BCNTLFLT) + +#define S_WR_DATATYPE 20 +#define V_WR_DATATYPE(x) ((x) << S_WR_DATATYPE) +#define F_WR_DATATYPE V_WR_DATATYPE(1U) + +#define S_WR_COMPL 21 +#define V_WR_COMPL(x) ((x) << S_WR_COMPL) +#define F_WR_COMPL V_WR_COMPL(1U) + +#define S_WR_EOP 22 +#define V_WR_EOP(x) ((x) << S_WR_EOP) +#define F_WR_EOP V_WR_EOP(1U) + +#define S_WR_SOP 23 +#define V_WR_SOP(x) ((x) << S_WR_SOP) +#define F_WR_SOP V_WR_SOP(1U) + +#define S_WR_OP 24 +#define M_WR_OP 0xFF +#define V_WR_OP(x) ((x) << S_WR_OP) +#define G_WR_OP(x) (((x) >> S_WR_OP) & M_WR_OP) + +/* wr_lo fields */ +#define S_WR_LEN 0 +#define M_WR_LEN 0xFF +#define V_WR_LEN(x) ((x) << S_WR_LEN) +#define G_WR_LEN(x) (((x) >> S_WR_LEN) & M_WR_LEN) + +#define S_WR_TID 8 +#define M_WR_TID 0xFFFFF +#define V_WR_TID(x) ((x) << S_WR_TID) +#define G_WR_TID(x) (((x) >> S_WR_TID) & M_WR_TID) + +#define S_WR_CR_FLUSH 30 +#define V_WR_CR_FLUSH(x) ((x) << S_WR_CR_FLUSH) +#define F_WR_CR_FLUSH V_WR_CR_FLUSH(1U) + +#define S_WR_GEN 31 +#define V_WR_GEN(x) ((x) << S_WR_GEN) +#define F_WR_GEN V_WR_GEN(1U) + +# define WR_HDR struct work_request_hdr wr +# define RSS_HDR +#else +# define WR_HDR +# define RSS_HDR struct rss_header rss_hdr; +#endif + +/* option 0 lower-half fields */ +#define S_CPL_STATUS 0 +#define M_CPL_STATUS 0xFF +#define V_CPL_STATUS(x) ((x) << S_CPL_STATUS) +#define G_CPL_STATUS(x) (((x) >> S_CPL_STATUS) & M_CPL_STATUS) + +#define S_INJECT_TIMER 6 +#define V_INJECT_TIMER(x) ((x) << S_INJECT_TIMER) +#define F_INJECT_TIMER V_INJECT_TIMER(1U) + +#define S_NO_OFFLOAD 7 +#define V_NO_OFFLOAD(x) ((x) << S_NO_OFFLOAD) +#define F_NO_OFFLOAD V_NO_OFFLOAD(1U) + +#define S_ULP_MODE 8 +#define M_ULP_MODE 0xF +#define V_ULP_MODE(x) ((x) << S_ULP_MODE) +#define G_ULP_MODE(x) (((x) >> S_ULP_MODE) & M_ULP_MODE) + +#define S_RCV_BUFSIZ 12 +#define M_RCV_BUFSIZ 0x3FFF +#define V_RCV_BUFSIZ(x) ((x) << S_RCV_BUFSIZ) +#define G_RCV_BUFSIZ(x) (((x) >> S_RCV_BUFSIZ) & M_RCV_BUFSIZ) + +#define S_TOS 26 +#define M_TOS 0x3F +#define V_TOS(x) ((x) << S_TOS) +#define G_TOS(x) (((x) >> S_TOS) & M_TOS) + +/* option 0 upper-half fields */ +#define S_DELACK 0 +#define V_DELACK(x) ((x) << S_DELACK) +#define F_DELACK V_DELACK(1U) + +#define S_NO_CONG 1 +#define V_NO_CONG(x) ((x) << S_NO_CONG) +#define F_NO_CONG V_NO_CONG(1U) + +#define S_SRC_MAC_SEL 2 +#define M_SRC_MAC_SEL 0x3 +#define V_SRC_MAC_SEL(x) ((x) << S_SRC_MAC_SEL) +#define G_SRC_MAC_SEL(x) (((x) >> S_SRC_MAC_SEL) & M_SRC_MAC_SEL) + +#define S_L2T_IDX 4 +#define M_L2T_IDX 0x7FF +#define V_L2T_IDX(x) ((x) << S_L2T_IDX) +#define G_L2T_IDX(x) (((x) >> S_L2T_IDX) & M_L2T_IDX) + +#define S_TX_CHANNEL 15 +#define V_TX_CHANNEL(x) ((x) << S_TX_CHANNEL) +#define F_TX_CHANNEL V_TX_CHANNEL(1U) + +#define S_TCAM_BYPASS 16 +#define V_TCAM_BYPASS(x) ((x) << S_TCAM_BYPASS) +#define F_TCAM_BYPASS V_TCAM_BYPASS(1U) + +#define S_NAGLE 17 +#define V_NAGLE(x) ((x) << S_NAGLE) +#define F_NAGLE V_NAGLE(1U) + +#define S_WND_SCALE 18 +#define M_WND_SCALE 0xF +#define V_WND_SCALE(x) ((x) << S_WND_SCALE) +#define G_WND_SCALE(x) (((x) >> S_WND_SCALE) & M_WND_SCALE) + +#define S_KEEP_ALIVE 22 +#define V_KEEP_ALIVE(x) ((x) << S_KEEP_ALIVE) +#define F_KEEP_ALIVE V_KEEP_ALIVE(1U) + +#define S_MAX_RETRANS 23 +#define M_MAX_RETRANS 0xF +#define V_MAX_RETRANS(x) ((x) << S_MAX_RETRANS) +#define G_MAX_RETRANS(x) (((x) >> S_MAX_RETRANS) & M_MAX_RETRANS) + +#define S_MAX_RETRANS_OVERRIDE 27 +#define V_MAX_RETRANS_OVERRIDE(x) ((x) << S_MAX_RETRANS_OVERRIDE) +#define F_MAX_RETRANS_OVERRIDE V_MAX_RETRANS_OVERRIDE(1U) + +#define S_MSS_IDX 28 +#define M_MSS_IDX 0xF +#define V_MSS_IDX(x) ((x) << S_MSS_IDX) +#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX) + +/* option 1 fields */ +#define S_RSS_ENABLE 0 +#define V_RSS_ENABLE(x) ((x) << S_RSS_ENABLE) +#define F_RSS_ENABLE V_RSS_ENABLE(1U) + +#define S_RSS_MASK_LEN 1 +#define M_RSS_MASK_LEN 0x7 +#define V_RSS_MASK_LEN(x) ((x) << S_RSS_MASK_LEN) +#define G_RSS_MASK_LEN(x) (((x) >> S_RSS_MASK_LEN) & M_RSS_MASK_LEN) + +#define S_CPU_IDX 4 +#define M_CPU_IDX 0x3F +#define V_CPU_IDX(x) ((x) << S_CPU_IDX) +#define G_CPU_IDX(x) (((x) >> S_CPU_IDX) & M_CPU_IDX) + +#define S_MAC_MATCH_VALID 18 +#define V_MAC_MATCH_VALID(x) ((x) << S_MAC_MATCH_VALID) +#define F_MAC_MATCH_VALID V_MAC_MATCH_VALID(1U) + +#define S_CONN_POLICY 19 +#define M_CONN_POLICY 0x3 +#define V_CONN_POLICY(x) ((x) << S_CONN_POLICY) +#define G_CONN_POLICY(x) (((x) >> S_CONN_POLICY) & M_CONN_POLICY) + +#define S_SYN_DEFENSE 21 +#define V_SYN_DEFENSE(x) ((x) << S_SYN_DEFENSE) +#define F_SYN_DEFENSE V_SYN_DEFENSE(1U) + +#define S_VLAN_PRI 22 +#define M_VLAN_PRI 0x3 +#define V_VLAN_PRI(x) ((x) << S_VLAN_PRI) +#define G_VLAN_PRI(x) (((x) >> S_VLAN_PRI) & M_VLAN_PRI) + +#define S_VLAN_PRI_VALID 24 +#define V_VLAN_PRI_VALID(x) ((x) << S_VLAN_PRI_VALID) +#define F_VLAN_PRI_VALID V_VLAN_PRI_VALID(1U) + +#define S_PKT_TYPE 25 +#define M_PKT_TYPE 0x3 +#define V_PKT_TYPE(x) ((x) << S_PKT_TYPE) +#define G_PKT_TYPE(x) (((x) >> S_PKT_TYPE) & M_PKT_TYPE) + +#define S_MAC_MATCH 27 +#define M_MAC_MATCH 0x1F +#define V_MAC_MATCH(x) ((x) << S_MAC_MATCH) +#define G_MAC_MATCH(x) (((x) >> S_MAC_MATCH) & M_MAC_MATCH) + +/* option 2 fields */ +#define S_CPU_INDEX 0 +#define M_CPU_INDEX 0x7F +#define V_CPU_INDEX(x) ((x) << S_CPU_INDEX) +#define G_CPU_INDEX(x) (((x) >> S_CPU_INDEX) & M_CPU_INDEX) + +#define S_CPU_INDEX_VALID 7 +#define V_CPU_INDEX_VALID(x) ((x) << S_CPU_INDEX_VALID) +#define F_CPU_INDEX_VALID V_CPU_INDEX_VALID(1U) + +#define S_RX_COALESCE 8 +#define M_RX_COALESCE 0x3 +#define V_RX_COALESCE(x) ((x) << S_RX_COALESCE) +#define G_RX_COALESCE(x) (((x) >> S_RX_COALESCE) & M_RX_COALESCE) + +#define S_RX_COALESCE_VALID 10 +#define V_RX_COALESCE_VALID(x) ((x) << S_RX_COALESCE_VALID) +#define F_RX_COALESCE_VALID V_RX_COALESCE_VALID(1U) + +#define S_CONG_CONTROL_FLAVOR 11 +#define M_CONG_CONTROL_FLAVOR 0x3 +#define V_CONG_CONTROL_FLAVOR(x) ((x) << S_CONG_CONTROL_FLAVOR) +#define G_CONG_CONTROL_FLAVOR(x) (((x) >> S_CONG_CONTROL_FLAVOR) & M_CONG_CONTROL_FLAVOR) + +#define S_PACING_FLAVOR 13 +#define M_PACING_FLAVOR 0x3 +#define V_PACING_FLAVOR(x) ((x) << S_PACING_FLAVOR) +#define G_PACING_FLAVOR(x) (((x) >> S_PACING_FLAVOR) & M_PACING_FLAVOR) + +#define S_FLAVORS_VALID 15 +#define V_FLAVORS_VALID(x) ((x) << S_FLAVORS_VALID) +#define F_FLAVORS_VALID V_FLAVORS_VALID(1U) + +#define S_RX_FC_DISABLE 16 +#define V_RX_FC_DISABLE(x) ((x) << S_RX_FC_DISABLE) +#define F_RX_FC_DISABLE V_RX_FC_DISABLE(1U) + +#define S_RX_FC_VALID 17 +#define V_RX_FC_VALID(x) ((x) << S_RX_FC_VALID) +#define F_RX_FC_VALID V_RX_FC_VALID(1U) + +struct cpl_pass_open_req { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be32 opt0h; + __be32 opt0l; + __be32 peer_netmask; + __be32 opt1; +}; + +struct cpl_pass_open_rpl { + RSS_HDR union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __u8 resvd[7]; + __u8 status; +}; + +struct cpl_pass_establish { + RSS_HDR union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be32 tos_tid; + __be16 l2t_idx; + __be16 tcp_opt; + __be32 snd_isn; + __be32 rcv_isn; +}; + +/* cpl_pass_establish.tos_tid fields */ +#define S_PASS_OPEN_TID 0 +#define M_PASS_OPEN_TID 0xFFFFFF +#define V_PASS_OPEN_TID(x) ((x) << S_PASS_OPEN_TID) +#define G_PASS_OPEN_TID(x) (((x) >> S_PASS_OPEN_TID) & M_PASS_OPEN_TID) + +#define S_PASS_OPEN_TOS 24 +#define M_PASS_OPEN_TOS 0xFF +#define V_PASS_OPEN_TOS(x) ((x) << S_PASS_OPEN_TOS) +#define G_PASS_OPEN_TOS(x) (((x) >> S_PASS_OPEN_TOS) & M_PASS_OPEN_TOS) + +/* cpl_pass_establish.l2t_idx fields */ +#define S_L2T_IDX16 5 +#define M_L2T_IDX16 0x7FF +#define V_L2T_IDX16(x) ((x) << S_L2T_IDX16) +#define G_L2T_IDX16(x) (((x) >> S_L2T_IDX16) & M_L2T_IDX16) + +/* cpl_pass_establish.tcp_opt fields (also applies act_open_establish) */ +#define G_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1) +#define G_TCPOPT_SACK(x) (((x) >> 6) & 1) +#define G_TCPOPT_TSTAMP(x) (((x) >> 7) & 1) +#define G_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf) +#define G_TCPOPT_MSS(x) (((x) >> 12) & 0xf) + +struct cpl_pass_accept_req { + RSS_HDR union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be32 tos_tid; + struct tcp_options tcp_options; + __u8 dst_mac[6]; + __be16 vlan_tag; + __u8 src_mac[6]; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8:3; + __u8 addr_idx:3; + __u8 port_idx:1; + __u8 exact_match:1; +#else + __u8 exact_match:1; + __u8 port_idx:1; + __u8 addr_idx:3; + __u8:3; +#endif + __u8 rsvd; + __be32 rcv_isn; + __be32 rsvd2; +}; + +struct cpl_pass_accept_rpl { + WR_HDR; + union opcode_tid ot; + __be32 opt2; + __be32 rsvd; + __be32 peer_ip; + __be32 opt0h; + __be32 opt0l_status; +}; + +struct cpl_act_open_req { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be32 opt0h; + __be32 opt0l; + __be32 params; + __be32 opt2; +}; + +/* cpl_act_open_req.params fields */ +#define S_AOPEN_VLAN_PRI 9 +#define M_AOPEN_VLAN_PRI 0x3 +#define V_AOPEN_VLAN_PRI(x) ((x) << S_AOPEN_VLAN_PRI) +#define G_AOPEN_VLAN_PRI(x) (((x) >> S_AOPEN_VLAN_PRI) & M_AOPEN_VLAN_PRI) + +#define S_AOPEN_VLAN_PRI_VALID 11 +#define V_AOPEN_VLAN_PRI_VALID(x) ((x) << S_AOPEN_VLAN_PRI_VALID) +#define F_AOPEN_VLAN_PRI_VALID V_AOPEN_VLAN_PRI_VALID(1U) + +#define S_AOPEN_PKT_TYPE 12 +#define M_AOPEN_PKT_TYPE 0x3 +#define V_AOPEN_PKT_TYPE(x) ((x) << S_AOPEN_PKT_TYPE) +#define G_AOPEN_PKT_TYPE(x) (((x) >> S_AOPEN_PKT_TYPE) & M_AOPEN_PKT_TYPE) + +#define S_AOPEN_MAC_MATCH 14 +#define M_AOPEN_MAC_MATCH 0x1F +#define V_AOPEN_MAC_MATCH(x) ((x) << S_AOPEN_MAC_MATCH) +#define G_AOPEN_MAC_MATCH(x) (((x) >> S_AOPEN_MAC_MATCH) & M_AOPEN_MAC_MATCH) + +#define S_AOPEN_MAC_MATCH_VALID 19 +#define V_AOPEN_MAC_MATCH_VALID(x) ((x) << S_AOPEN_MAC_MATCH_VALID) +#define F_AOPEN_MAC_MATCH_VALID V_AOPEN_MAC_MATCH_VALID(1U) + +#define S_AOPEN_IFF_VLAN 20 +#define M_AOPEN_IFF_VLAN 0xFFF +#define V_AOPEN_IFF_VLAN(x) ((x) << S_AOPEN_IFF_VLAN) +#define G_AOPEN_IFF_VLAN(x) (((x) >> S_AOPEN_IFF_VLAN) & M_AOPEN_IFF_VLAN) + +struct cpl_act_open_rpl { + RSS_HDR union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be32 atid; + __u8 rsvd[3]; + __u8 status; +}; + +struct cpl_act_establish { + RSS_HDR union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be32 tos_tid; + __be16 l2t_idx; + __be16 tcp_opt; + __be32 snd_isn; + __be32 rcv_isn; +}; + +struct cpl_get_tcb { + WR_HDR; + union opcode_tid ot; + __be16 cpuno; + __be16 rsvd; +}; + +struct cpl_get_tcb_rpl { + RSS_HDR union opcode_tid ot; + __u8 rsvd; + __u8 status; + __be16 len; +}; + +struct cpl_set_tcb { + WR_HDR; + union opcode_tid ot; + __u8 reply; + __u8 cpu_idx; + __be16 len; +}; + +/* cpl_set_tcb.reply fields */ +#define S_NO_REPLY 7 +#define V_NO_REPLY(x) ((x) << S_NO_REPLY) +#define F_NO_REPLY V_NO_REPLY(1U) + +struct cpl_set_tcb_field { + WR_HDR; + union opcode_tid ot; + __u8 reply; + __u8 cpu_idx; + __be16 word; + __be64 mask; + __be64 val; +}; + +struct cpl_set_tcb_rpl { + RSS_HDR union opcode_tid ot; + __u8 rsvd[3]; + __u8 status; +}; + +struct cpl_pcmd { + WR_HDR; + union opcode_tid ot; + __u8 rsvd[3]; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 src:1; + __u8 bundle:1; + __u8 channel:1; + __u8:5; +#else + __u8:5; + __u8 channel:1; + __u8 bundle:1; + __u8 src:1; +#endif + __be32 pcmd_parm[2]; +}; + +struct cpl_pcmd_reply { + RSS_HDR union opcode_tid ot; + __u8 status; + __u8 rsvd; + __be16 len; +}; + +struct cpl_close_con_req { + WR_HDR; + union opcode_tid ot; + __be32 rsvd; +}; + +struct cpl_close_con_rpl { + RSS_HDR union opcode_tid ot; + __u8 rsvd[3]; + __u8 status; + __be32 snd_nxt; + __be32 rcv_nxt; +}; + +struct cpl_close_listserv_req { + WR_HDR; + union opcode_tid ot; + __u8 rsvd0; + __u8 cpu_idx; + __be16 rsvd1; +}; + +struct cpl_close_listserv_rpl { + RSS_HDR union opcode_tid ot; + __u8 rsvd[3]; + __u8 status; +}; + +struct cpl_abort_req_rss { + RSS_HDR union opcode_tid ot; + __be32 rsvd0; + __u8 rsvd1; + __u8 status; + __u8 rsvd2[6]; +}; + +struct cpl_abort_req { + WR_HDR; + union opcode_tid ot; + __be32 rsvd0; + __u8 rsvd1; + __u8 cmd; + __u8 rsvd2[6]; +}; + +struct cpl_abort_rpl_rss { + RSS_HDR union opcode_tid ot; + __be32 rsvd0; + __u8 rsvd1; + __u8 status; + __u8 rsvd2[6]; +}; + +struct cpl_abort_rpl { + WR_HDR; + union opcode_tid ot; + __be32 rsvd0; + __u8 rsvd1; + __u8 cmd; + __u8 rsvd2[6]; +}; + +struct cpl_peer_close { + RSS_HDR union opcode_tid ot; + __be32 rcv_nxt; +}; + +struct tx_data_wr { + __be32 wr_hi; + __be32 wr_lo; + __be32 len; + __be32 flags; + __be32 sndseq; + __be32 param; +}; + +/* tx_data_wr.flags fields */ +#define S_TX_ACK_PAGES 21 +#define M_TX_ACK_PAGES 0x7 +#define V_TX_ACK_PAGES(x) ((x) << S_TX_ACK_PAGES) +#define G_TX_ACK_PAGES(x) (((x) >> S_TX_ACK_PAGES) & M_TX_ACK_PAGES) + +/* tx_data_wr.param fields */ +#define S_TX_PORT 0 +#define M_TX_PORT 0x7 +#define V_TX_PORT(x) ((x) << S_TX_PORT) +#define G_TX_PORT(x) (((x) >> S_TX_PORT) & M_TX_PORT) + +#define S_TX_MSS 4 +#define M_TX_MSS 0xF +#define V_TX_MSS(x) ((x) << S_TX_MSS) +#define G_TX_MSS(x) (((x) >> S_TX_MSS) & M_TX_MSS) + +#define S_TX_QOS 8 +#define M_TX_QOS 0xFF +#define V_TX_QOS(x) ((x) << S_TX_QOS) +#define G_TX_QOS(x) (((x) >> S_TX_QOS) & M_TX_QOS) + +#define S_TX_SNDBUF 16 +#define M_TX_SNDBUF 0xFFFF +#define V_TX_SNDBUF(x) ((x) << S_TX_SNDBUF) +#define G_TX_SNDBUF(x) (((x) >> S_TX_SNDBUF) & M_TX_SNDBUF) + +struct cpl_tx_data { + union opcode_tid ot; + __be32 len; + __be32 rsvd; + __be16 urg; + __be16 flags; +}; + +/* cpl_tx_data.flags fields */ +#define S_TX_ULP_SUBMODE 6 +#define M_TX_ULP_SUBMODE 0xF +#define V_TX_ULP_SUBMODE(x) ((x) << S_TX_ULP_SUBMODE) +#define G_TX_ULP_SUBMODE(x) (((x) >> S_TX_ULP_SUBMODE) & M_TX_ULP_SUBMODE) + +#define S_TX_ULP_MODE 10 +#define M_TX_ULP_MODE 0xF +#define V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE) +#define G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE) + +#define S_TX_SHOVE 14 +#define V_TX_SHOVE(x) ((x) << S_TX_SHOVE) +#define F_TX_SHOVE V_TX_SHOVE(1U) + +#define S_TX_MORE 15 +#define V_TX_MORE(x) ((x) << S_TX_MORE) +#define F_TX_MORE V_TX_MORE(1U) + +/* additional tx_data_wr.flags fields */ +#define S_TX_CPU_IDX 0 +#define M_TX_CPU_IDX 0x3F +#define V_TX_CPU_IDX(x) ((x) << S_TX_CPU_IDX) +#define G_TX_CPU_IDX(x) (((x) >> S_TX_CPU_IDX) & M_TX_CPU_IDX) + +#define S_TX_URG 16 +#define V_TX_URG(x) ((x) << S_TX_URG) +#define F_TX_URG V_TX_URG(1U) + +#define S_TX_CLOSE 17 +#define V_TX_CLOSE(x) ((x) << S_TX_CLOSE) +#define F_TX_CLOSE V_TX_CLOSE(1U) + +#define S_TX_INIT 18 +#define V_TX_INIT(x) ((x) << S_TX_INIT) +#define F_TX_INIT V_TX_INIT(1U) + +#define S_TX_IMM_ACK 19 +#define V_TX_IMM_ACK(x) ((x) << S_TX_IMM_ACK) +#define F_TX_IMM_ACK V_TX_IMM_ACK(1U) + +#define S_TX_IMM_DMA 20 +#define V_TX_IMM_DMA(x) ((x) << S_TX_IMM_DMA) +#define F_TX_IMM_DMA V_TX_IMM_DMA(1U) + +struct cpl_tx_data_ack { + RSS_HDR union opcode_tid ot; + __be32 ack_seq; +}; + +struct cpl_wr_ack { + RSS_HDR union opcode_tid ot; + __be16 credits; + __be16 rsvd; + __be32 snd_nxt; + __be32 snd_una; +}; + +struct cpl_rdma_ec_status { + RSS_HDR union opcode_tid ot; + __u8 rsvd[3]; + __u8 status; +}; + +struct mngt_pktsched_wr { + __be32 wr_hi; + __be32 wr_lo; + __u8 mngt_opcode; + __u8 rsvd[7]; + __u8 sched; + __u8 idx; + __u8 min; + __u8 max; + __u8 binding; + __u8 rsvd1[3]; +}; + +struct cpl_iscsi_hdr { + RSS_HDR union opcode_tid ot; + __be16 pdu_len_ddp; + __be16 len; + __be32 seq; + __be16 urg; + __u8 rsvd; + __u8 status; +}; + +/* cpl_iscsi_hdr.pdu_len_ddp fields */ +#define S_ISCSI_PDU_LEN 0 +#define M_ISCSI_PDU_LEN 0x7FFF +#define V_ISCSI_PDU_LEN(x) ((x) << S_ISCSI_PDU_LEN) +#define G_ISCSI_PDU_LEN(x) (((x) >> S_ISCSI_PDU_LEN) & M_ISCSI_PDU_LEN) + +#define S_ISCSI_DDP 15 +#define V_ISCSI_DDP(x) ((x) << S_ISCSI_DDP) +#define F_ISCSI_DDP V_ISCSI_DDP(1U) + +struct cpl_rx_data { + RSS_HDR union opcode_tid ot; + __be16 rsvd; + __be16 len; + __be32 seq; + __be16 urg; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 dack_mode:2; + __u8 psh:1; + __u8 heartbeat:1; + __u8:4; +#else + __u8:4; + __u8 heartbeat:1; + __u8 psh:1; + __u8 dack_mode:2; +#endif + __u8 status; +}; + +struct cpl_rx_data_ack { + WR_HDR; + union opcode_tid ot; + __be32 credit_dack; +}; + +/* cpl_rx_data_ack.ack_seq fields */ +#define S_RX_CREDITS 0 +#define M_RX_CREDITS 0x7FFFFFF +#define V_RX_CREDITS(x) ((x) << S_RX_CREDITS) +#define G_RX_CREDITS(x) (((x) >> S_RX_CREDITS) & M_RX_CREDITS) + +#define S_RX_MODULATE 27 +#define V_RX_MODULATE(x) ((x) << S_RX_MODULATE) +#define F_RX_MODULATE V_RX_MODULATE(1U) + +#define S_RX_FORCE_ACK 28 +#define V_RX_FORCE_ACK(x) ((x) << S_RX_FORCE_ACK) +#define F_RX_FORCE_ACK V_RX_FORCE_ACK(1U) + +#define S_RX_DACK_MODE 29 +#define M_RX_DACK_MODE 0x3 +#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE) +#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE) + +#define S_RX_DACK_CHANGE 31 +#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE) +#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U) + +struct cpl_rx_urg_notify { + RSS_HDR union opcode_tid ot; + __be32 seq; +}; + +struct cpl_rx_ddp_complete { + RSS_HDR union opcode_tid ot; + __be32 ddp_report; +}; + +struct cpl_rx_data_ddp { + RSS_HDR union opcode_tid ot; + __be16 urg; + __be16 len; + __be32 seq; + union { + __be32 nxt_seq; + __be32 ddp_report; + }; + __be32 ulp_crc; + __be32 ddpvld_status; +}; + +/* cpl_rx_data_ddp.ddpvld_status fields */ +#define S_DDP_STATUS 0 +#define M_DDP_STATUS 0xFF +#define V_DDP_STATUS(x) ((x) << S_DDP_STATUS) +#define G_DDP_STATUS(x) (((x) >> S_DDP_STATUS) & M_DDP_STATUS) + +#define S_DDP_VALID 15 +#define M_DDP_VALID 0x1FFFF +#define V_DDP_VALID(x) ((x) << S_DDP_VALID) +#define G_DDP_VALID(x) (((x) >> S_DDP_VALID) & M_DDP_VALID) + +#define S_DDP_PPOD_MISMATCH 15 +#define V_DDP_PPOD_MISMATCH(x) ((x) << S_DDP_PPOD_MISMATCH) +#define F_DDP_PPOD_MISMATCH V_DDP_PPOD_MISMATCH(1U) + +#define S_DDP_PDU 16 +#define V_DDP_PDU(x) ((x) << S_DDP_PDU) +#define F_DDP_PDU V_DDP_PDU(1U) + +#define S_DDP_LLIMIT_ERR 17 +#define V_DDP_LLIMIT_ERR(x) ((x) << S_DDP_LLIMIT_ERR) +#define F_DDP_LLIMIT_ERR V_DDP_LLIMIT_ERR(1U) + +#define S_DDP_PPOD_PARITY_ERR 18 +#define V_DDP_PPOD_PARITY_ERR(x) ((x) << S_DDP_PPOD_PARITY_ERR) +#define F_DDP_PPOD_PARITY_ERR V_DDP_PPOD_PARITY_ERR(1U) + +#define S_DDP_PADDING_ERR 19 +#define V_DDP_PADDING_ERR(x) ((x) << S_DDP_PADDING_ERR) +#define F_DDP_PADDING_ERR V_DDP_PADDING_ERR(1U) + +#define S_DDP_HDRCRC_ERR 20 +#define V_DDP_HDRCRC_ERR(x) ((x) << S_DDP_HDRCRC_ERR) +#define F_DDP_HDRCRC_ERR V_DDP_HDRCRC_ERR(1U) + +#define S_DDP_DATACRC_ERR 21 +#define V_DDP_DATACRC_ERR(x) ((x) << S_DDP_DATACRC_ERR) +#define F_DDP_DATACRC_ERR V_DDP_DATACRC_ERR(1U) + +#define S_DDP_INVALID_TAG 22 +#define V_DDP_INVALID_TAG(x) ((x) << S_DDP_INVALID_TAG) +#define F_DDP_INVALID_TAG V_DDP_INVALID_TAG(1U) + +#define S_DDP_ULIMIT_ERR 23 +#define V_DDP_ULIMIT_ERR(x) ((x) << S_DDP_ULIMIT_ERR) +#define F_DDP_ULIMIT_ERR V_DDP_ULIMIT_ERR(1U) + +#define S_DDP_OFFSET_ERR 24 +#define V_DDP_OFFSET_ERR(x) ((x) << S_DDP_OFFSET_ERR) +#define F_DDP_OFFSET_ERR V_DDP_OFFSET_ERR(1U) + +#define S_DDP_COLOR_ERR 25 +#define V_DDP_COLOR_ERR(x) ((x) << S_DDP_COLOR_ERR) +#define F_DDP_COLOR_ERR V_DDP_COLOR_ERR(1U) + +#define S_DDP_TID_MISMATCH 26 +#define V_DDP_TID_MISMATCH(x) ((x) << S_DDP_TID_MISMATCH) +#define F_DDP_TID_MISMATCH V_DDP_TID_MISMATCH(1U) + +#define S_DDP_INVALID_PPOD 27 +#define V_DDP_INVALID_PPOD(x) ((x) << S_DDP_INVALID_PPOD) +#define F_DDP_INVALID_PPOD V_DDP_INVALID_PPOD(1U) + +#define S_DDP_ULP_MODE 28 +#define M_DDP_ULP_MODE 0xF +#define V_DDP_ULP_MODE(x) ((x) << S_DDP_ULP_MODE) +#define G_DDP_ULP_MODE(x) (((x) >> S_DDP_ULP_MODE) & M_DDP_ULP_MODE) + +/* cpl_rx_data_ddp.ddp_report fields */ +#define S_DDP_OFFSET 0 +#define M_DDP_OFFSET 0x3FFFFF +#define V_DDP_OFFSET(x) ((x) << S_DDP_OFFSET) +#define G_DDP_OFFSET(x) (((x) >> S_DDP_OFFSET) & M_DDP_OFFSET) + +#define S_DDP_URG 24 +#define V_DDP_URG(x) ((x) << S_DDP_URG) +#define F_DDP_URG V_DDP_URG(1U) + +#define S_DDP_PSH 25 +#define V_DDP_PSH(x) ((x) << S_DDP_PSH) +#define F_DDP_PSH V_DDP_PSH(1U) + +#define S_DDP_BUF_COMPLETE 26 +#define V_DDP_BUF_COMPLETE(x) ((x) << S_DDP_BUF_COMPLETE) +#define F_DDP_BUF_COMPLETE V_DDP_BUF_COMPLETE(1U) + +#define S_DDP_BUF_TIMED_OUT 27 +#define V_DDP_BUF_TIMED_OUT(x) ((x) << S_DDP_BUF_TIMED_OUT) +#define F_DDP_BUF_TIMED_OUT V_DDP_BUF_TIMED_OUT(1U) + +#define S_DDP_BUF_IDX 28 +#define V_DDP_BUF_IDX(x) ((x) << S_DDP_BUF_IDX) +#define F_DDP_BUF_IDX V_DDP_BUF_IDX(1U) + +struct cpl_tx_pkt { + WR_HDR; + __be32 cntrl; + __be32 len; +}; + +struct cpl_tx_pkt_lso { + WR_HDR; + __be32 cntrl; + __be32 len; + + __be32 rsvd; + __be32 lso_info; +}; + +/* cpl_tx_pkt*.cntrl fields */ +#define S_TXPKT_VLAN 0 +#define M_TXPKT_VLAN 0xFFFF +#define V_TXPKT_VLAN(x) ((x) << S_TXPKT_VLAN) +#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN) + +#define S_TXPKT_INTF 16 +#define M_TXPKT_INTF 0xF +#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF) +#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF) + +#define S_TXPKT_IPCSUM_DIS 20 +#define V_TXPKT_IPCSUM_DIS(x) ((x) << S_TXPKT_IPCSUM_DIS) +#define F_TXPKT_IPCSUM_DIS V_TXPKT_IPCSUM_DIS(1U) + +#define S_TXPKT_L4CSUM_DIS 21 +#define V_TXPKT_L4CSUM_DIS(x) ((x) << S_TXPKT_L4CSUM_DIS) +#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1U) + +#define S_TXPKT_VLAN_VLD 22 +#define V_TXPKT_VLAN_VLD(x) ((x) << S_TXPKT_VLAN_VLD) +#define F_TXPKT_VLAN_VLD V_TXPKT_VLAN_VLD(1U) + +#define S_TXPKT_LOOPBACK 23 +#define V_TXPKT_LOOPBACK(x) ((x) << S_TXPKT_LOOPBACK) +#define F_TXPKT_LOOPBACK V_TXPKT_LOOPBACK(1U) + +#define S_TXPKT_OPCODE 24 +#define M_TXPKT_OPCODE 0xFF +#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE) +#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE) + +/* cpl_tx_pkt_lso.lso_info fields */ +#define S_LSO_MSS 0 +#define M_LSO_MSS 0x3FFF +#define V_LSO_MSS(x) ((x) << S_LSO_MSS) +#define G_LSO_MSS(x) (((x) >> S_LSO_MSS) & M_LSO_MSS) + +#define S_LSO_ETH_TYPE 14 +#define M_LSO_ETH_TYPE 0x3 +#define V_LSO_ETH_TYPE(x) ((x) << S_LSO_ETH_TYPE) +#define G_LSO_ETH_TYPE(x) (((x) >> S_LSO_ETH_TYPE) & M_LSO_ETH_TYPE) + +#define S_LSO_TCPHDR_WORDS 16 +#define M_LSO_TCPHDR_WORDS 0xF +#define V_LSO_TCPHDR_WORDS(x) ((x) << S_LSO_TCPHDR_WORDS) +#define G_LSO_TCPHDR_WORDS(x) (((x) >> S_LSO_TCPHDR_WORDS) & M_LSO_TCPHDR_WORDS) + +#define S_LSO_IPHDR_WORDS 20 +#define M_LSO_IPHDR_WORDS 0xF +#define V_LSO_IPHDR_WORDS(x) ((x) << S_LSO_IPHDR_WORDS) +#define G_LSO_IPHDR_WORDS(x) (((x) >> S_LSO_IPHDR_WORDS) & M_LSO_IPHDR_WORDS) + +#define S_LSO_IPV6 24 +#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6) +#define F_LSO_IPV6 V_LSO_IPV6(1U) + +struct cpl_trace_pkt { +#ifdef CHELSIO_FW + __u8 rss_opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 err:1; + __u8:7; +#else + __u8:7; + __u8 err:1; +#endif + __u8 rsvd0; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 qid:4; + __u8:4; +#else + __u8:4; + __u8 qid:4; +#endif + __be32 tstamp; +#endif /* CHELSIO_FW */ + + __u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 iff:4; + __u8:4; +#else + __u8:4; + __u8 iff:4; +#endif + __u8 rsvd[4]; + __be16 len; +}; + +struct cpl_rx_pkt { + RSS_HDR __u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 iff:4; + __u8 csum_valid:1; + __u8 ipmi_pkt:1; + __u8 vlan_valid:1; + __u8 fragment:1; +#else + __u8 fragment:1; + __u8 vlan_valid:1; + __u8 ipmi_pkt:1; + __u8 csum_valid:1; + __u8 iff:4; +#endif + __be16 csum; + __be16 vlan; + __be16 len; +}; + +struct cpl_l2t_write_req { + WR_HDR; + union opcode_tid ot; + __be32 params; + __u8 rsvd[2]; + __u8 dst_mac[6]; +}; + +/* cpl_l2t_write_req.params fields */ +#define S_L2T_W_IDX 0 +#define M_L2T_W_IDX 0x7FF +#define V_L2T_W_IDX(x) ((x) << S_L2T_W_IDX) +#define G_L2T_W_IDX(x) (((x) >> S_L2T_W_IDX) & M_L2T_W_IDX) + +#define S_L2T_W_VLAN 11 +#define M_L2T_W_VLAN 0xFFF +#define V_L2T_W_VLAN(x) ((x) << S_L2T_W_VLAN) +#define G_L2T_W_VLAN(x) (((x) >> S_L2T_W_VLAN) & M_L2T_W_VLAN) + +#define S_L2T_W_IFF 23 +#define M_L2T_W_IFF 0xF +#define V_L2T_W_IFF(x) ((x) << S_L2T_W_IFF) +#define G_L2T_W_IFF(x) (((x) >> S_L2T_W_IFF) & M_L2T_W_IFF) + +#define S_L2T_W_PRIO 27 +#define M_L2T_W_PRIO 0x7 +#define V_L2T_W_PRIO(x) ((x) << S_L2T_W_PRIO) +#define G_L2T_W_PRIO(x) (((x) >> S_L2T_W_PRIO) & M_L2T_W_PRIO) + +struct cpl_l2t_write_rpl { + RSS_HDR union opcode_tid ot; + __u8 status; + __u8 rsvd[3]; +}; + +struct cpl_l2t_read_req { + WR_HDR; + union opcode_tid ot; + __be16 rsvd; + __be16 l2t_idx; +}; + +struct cpl_l2t_read_rpl { + RSS_HDR union opcode_tid ot; + __be32 params; + __u8 rsvd[2]; + __u8 dst_mac[6]; +}; + +/* cpl_l2t_read_rpl.params fields */ +#define S_L2T_R_PRIO 0 +#define M_L2T_R_PRIO 0x7 +#define V_L2T_R_PRIO(x) ((x) << S_L2T_R_PRIO) +#define G_L2T_R_PRIO(x) (((x) >> S_L2T_R_PRIO) & M_L2T_R_PRIO) + +#define S_L2T_R_VLAN 8 +#define M_L2T_R_VLAN 0xFFF +#define V_L2T_R_VLAN(x) ((x) << S_L2T_R_VLAN) +#define G_L2T_R_VLAN(x) (((x) >> S_L2T_R_VLAN) & M_L2T_R_VLAN) + +#define S_L2T_R_IFF 20 +#define M_L2T_R_IFF 0xF +#define V_L2T_R_IFF(x) ((x) << S_L2T_R_IFF) +#define G_L2T_R_IFF(x) (((x) >> S_L2T_R_IFF) & M_L2T_R_IFF) + +#define S_L2T_STATUS 24 +#define M_L2T_STATUS 0xFF +#define V_L2T_STATUS(x) ((x) << S_L2T_STATUS) +#define G_L2T_STATUS(x) (((x) >> S_L2T_STATUS) & M_L2T_STATUS) + +struct cpl_smt_write_req { + WR_HDR; + union opcode_tid ot; + __u8 rsvd0; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 mtu_idx:4; + __u8 iff:4; +#else + __u8 iff:4; + __u8 mtu_idx:4; +#endif + __be16 rsvd2; + __be16 rsvd3; + __u8 src_mac1[6]; + __be16 rsvd4; + __u8 src_mac0[6]; +}; + +struct cpl_smt_write_rpl { + RSS_HDR union opcode_tid ot; + __u8 status; + __u8 rsvd[3]; +}; + +struct cpl_smt_read_req { + WR_HDR; + union opcode_tid ot; + __u8 rsvd0; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8:4; + __u8 iff:4; +#else + __u8 iff:4; + __u8:4; +#endif + __be16 rsvd2; +}; + +struct cpl_smt_read_rpl { + RSS_HDR union opcode_tid ot; + __u8 status; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 mtu_idx:4; + __u8:4; +#else + __u8:4; + __u8 mtu_idx:4; +#endif + __be16 rsvd2; + __be16 rsvd3; + __u8 src_mac1[6]; + __be16 rsvd4; + __u8 src_mac0[6]; +}; + +struct cpl_rte_delete_req { + WR_HDR; + union opcode_tid ot; + __be32 params; +}; + +/* { cpl_rte_delete_req, cpl_rte_read_req }.params fields */ +#define S_RTE_REQ_LUT_IX 8 +#define M_RTE_REQ_LUT_IX 0x7FF +#define V_RTE_REQ_LUT_IX(x) ((x) << S_RTE_REQ_LUT_IX) +#define G_RTE_REQ_LUT_IX(x) (((x) >> S_RTE_REQ_LUT_IX) & M_RTE_REQ_LUT_IX) + +#define S_RTE_REQ_LUT_BASE 19 +#define M_RTE_REQ_LUT_BASE 0x7FF +#define V_RTE_REQ_LUT_BASE(x) ((x) << S_RTE_REQ_LUT_BASE) +#define G_RTE_REQ_LUT_BASE(x) (((x) >> S_RTE_REQ_LUT_BASE) & M_RTE_REQ_LUT_BASE) + +#define S_RTE_READ_REQ_SELECT 31 +#define V_RTE_READ_REQ_SELECT(x) ((x) << S_RTE_READ_REQ_SELECT) +#define F_RTE_READ_REQ_SELECT V_RTE_READ_REQ_SELECT(1U) + +struct cpl_rte_delete_rpl { + RSS_HDR union opcode_tid ot; + __u8 status; + __u8 rsvd[3]; +}; + +struct cpl_rte_write_req { + WR_HDR; + union opcode_tid ot; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8:6; + __u8 write_tcam:1; + __u8 write_l2t_lut:1; +#else + __u8 write_l2t_lut:1; + __u8 write_tcam:1; + __u8:6; +#endif + __u8 rsvd[3]; + __be32 lut_params; + __be16 rsvd2; + __be16 l2t_idx; + __be32 netmask; + __be32 faddr; +}; + +/* cpl_rte_write_req.lut_params fields */ +#define S_RTE_WRITE_REQ_LUT_IX 10 +#define M_RTE_WRITE_REQ_LUT_IX 0x7FF +#define V_RTE_WRITE_REQ_LUT_IX(x) ((x) << S_RTE_WRITE_REQ_LUT_IX) +#define G_RTE_WRITE_REQ_LUT_IX(x) (((x) >> S_RTE_WRITE_REQ_LUT_IX) & M_RTE_WRITE_REQ_LUT_IX) + +#define S_RTE_WRITE_REQ_LUT_BASE 21 +#define M_RTE_WRITE_REQ_LUT_BASE 0x7FF +#define V_RTE_WRITE_REQ_LUT_BASE(x) ((x) << S_RTE_WRITE_REQ_LUT_BASE) +#define G_RTE_WRITE_REQ_LUT_BASE(x) (((x) >> S_RTE_WRITE_REQ_LUT_BASE) & M_RTE_WRITE_REQ_LUT_BASE) + +struct cpl_rte_write_rpl { + RSS_HDR union opcode_tid ot; + __u8 status; + __u8 rsvd[3]; +}; + +struct cpl_rte_read_req { + WR_HDR; + union opcode_tid ot; + __be32 params; +}; + +struct cpl_rte_read_rpl { + RSS_HDR union opcode_tid ot; + __u8 status; + __u8 rsvd0; + __be16 l2t_idx; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8:7; + __u8 select:1; +#else + __u8 select:1; + __u8:7; +#endif + __u8 rsvd2[3]; + __be32 addr; +}; + +struct cpl_tid_release { + WR_HDR; + union opcode_tid ot; + __be32 rsvd; +}; + +struct cpl_barrier { + WR_HDR; + __u8 opcode; + __u8 rsvd[7]; +}; + +struct cpl_rdma_read_req { + __u8 opcode; + __u8 rsvd[15]; +}; + +struct cpl_rdma_terminate { +#ifdef CHELSIO_FW + __u8 opcode; + __u8 rsvd[2]; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 rspq:3; + __u8:5; +#else + __u8:5; + __u8 rspq:3; +#endif + __be32 tid_len; +#endif + __be32 msn; + __be32 mo; + __u8 data[0]; +}; + +/* cpl_rdma_terminate.tid_len fields */ +#define S_FLIT_CNT 0 +#define M_FLIT_CNT 0xFF +#define V_FLIT_CNT(x) ((x) << S_FLIT_CNT) +#define G_FLIT_CNT(x) (((x) >> S_FLIT_CNT) & M_FLIT_CNT) + +#define S_TERM_TID 8 +#define M_TERM_TID 0xFFFFF +#define V_TERM_TID(x) ((x) << S_TERM_TID) +#define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID) + +/* ULP_TX opcodes */ +enum { ULP_MEM_READ = 2, ULP_MEM_WRITE = 3, ULP_TXPKT = 4 }; + +#define S_ULPTX_CMD 28 +#define M_ULPTX_CMD 0xF +#define V_ULPTX_CMD(x) ((x) << S_ULPTX_CMD) + +#define S_ULPTX_NFLITS 0 +#define M_ULPTX_NFLITS 0xFF +#define V_ULPTX_NFLITS(x) ((x) << S_ULPTX_NFLITS) + +struct ulp_mem_io { + WR_HDR; + __be32 cmd_lock_addr; + __be32 len; +}; + +/* ulp_mem_io.cmd_lock_addr fields */ +#define S_ULP_MEMIO_ADDR 0 +#define M_ULP_MEMIO_ADDR 0x7FFFFFF +#define V_ULP_MEMIO_ADDR(x) ((x) << S_ULP_MEMIO_ADDR) +#define S_ULP_MEMIO_LOCK 27 +#define V_ULP_MEMIO_LOCK(x) ((x) << S_ULP_MEMIO_LOCK) +#define F_ULP_MEMIO_LOCK V_ULP_MEMIO_LOCK(1U) + +/* ulp_mem_io.len fields */ +#define S_ULP_MEMIO_DATA_LEN 28 +#define M_ULP_MEMIO_DATA_LEN 0xF +#define V_ULP_MEMIO_DATA_LEN(x) ((x) << S_ULP_MEMIO_DATA_LEN) + +#endif /* T3_CPL_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c new file mode 100644 index 000000000..a22768c94 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -0,0 +1,3777 @@ +/* + * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "common.h" +#include "regs.h" +#include "sge_defs.h" +#include "firmware_exports.h" + +static void t3_port_intr_clear(struct adapter *adapter, int idx); + +/** + * t3_wait_op_done_val - wait until an operation is completed + * @adapter: the adapter performing the operation + * @reg: the register to check for completion + * @mask: a single-bit field within @reg that indicates completion + * @polarity: the value of the field when the operation is completed + * @attempts: number of check iterations + * @delay: delay in usecs between iterations + * @valp: where to store the value of the register at completion time + * + * Wait until an operation is completed by checking a bit in a register + * up to @attempts times. If @valp is not NULL the value of the register + * at the time it indicated completion is stored there. Returns 0 if the + * operation completes and -EAGAIN otherwise. + */ + +int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay, u32 *valp) +{ + while (1) { + u32 val = t3_read_reg(adapter, reg); + + if (!!(val & mask) == polarity) { + if (valp) + *valp = val; + return 0; + } + if (--attempts == 0) + return -EAGAIN; + if (delay) + udelay(delay); + } +} + +/** + * t3_write_regs - write a bunch of registers + * @adapter: the adapter to program + * @p: an array of register address/register value pairs + * @n: the number of address/value pairs + * @offset: register address offset + * + * Takes an array of register address/register value pairs and writes each + * value to the corresponding register. Register addresses are adjusted + * by the supplied offset. + */ +void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p, + int n, unsigned int offset) +{ + while (n--) { + t3_write_reg(adapter, p->reg_addr + offset, p->val); + p++; + } +} + +/** + * t3_set_reg_field - set a register field to a value + * @adapter: the adapter to program + * @addr: the register address + * @mask: specifies the portion of the register to modify + * @val: the new value for the register field + * + * Sets a register field specified by the supplied mask to the + * given value. + */ +void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, + u32 val) +{ + u32 v = t3_read_reg(adapter, addr) & ~mask; + + t3_write_reg(adapter, addr, v | val); + t3_read_reg(adapter, addr); /* flush */ +} + +/** + * t3_read_indirect - read indirectly addressed registers + * @adap: the adapter + * @addr_reg: register holding the indirect address + * @data_reg: register holding the value of the indirect register + * @vals: where the read register values are stored + * @start_idx: index of first indirect register to read + * @nregs: how many indirect registers to read + * + * Reads registers that are accessed indirectly through an address/data + * register pair. + */ +static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, u32 *vals, + unsigned int nregs, unsigned int start_idx) +{ + while (nregs--) { + t3_write_reg(adap, addr_reg, start_idx); + *vals++ = t3_read_reg(adap, data_reg); + start_idx++; + } +} + +/** + * t3_mc7_bd_read - read from MC7 through backdoor accesses + * @mc7: identifies MC7 to read from + * @start: index of first 64-bit word to read + * @n: number of 64-bit words to read + * @buf: where to store the read result + * + * Read n 64-bit words from MC7 starting at word start, using backdoor + * accesses. + */ +int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n, + u64 *buf) +{ + static const int shift[] = { 0, 0, 16, 24 }; + static const int step[] = { 0, 32, 16, 8 }; + + unsigned int size64 = mc7->size / 8; /* # of 64-bit words */ + struct adapter *adap = mc7->adapter; + + if (start >= size64 || start + n > size64) + return -EINVAL; + + start *= (8 << mc7->width); + while (n--) { + int i; + u64 val64 = 0; + + for (i = (1 << mc7->width) - 1; i >= 0; --i) { + int attempts = 10; + u32 val; + + t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start); + t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0); + val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP); + while ((val & F_BUSY) && attempts--) + val = t3_read_reg(adap, + mc7->offset + A_MC7_BD_OP); + if (val & F_BUSY) + return -EIO; + + val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1); + if (mc7->width == 0) { + val64 = t3_read_reg(adap, + mc7->offset + + A_MC7_BD_DATA0); + val64 |= (u64) val << 32; + } else { + if (mc7->width > 1) + val >>= shift[mc7->width]; + val64 |= (u64) val << (step[mc7->width] * i); + } + start += 8; + } + *buf++ = val64; + } + return 0; +} + +/* + * Initialize MI1. + */ +static void mi1_init(struct adapter *adap, const struct adapter_info *ai) +{ + u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1; + u32 val = F_PREEN | V_CLKDIV(clkdiv); + + t3_write_reg(adap, A_MI1_CFG, val); +} + +#define MDIO_ATTEMPTS 20 + +/* + * MI1 read/write operations for clause 22 PHYs. + */ +static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + int ret; + u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr); + + mutex_lock(&adapter->mdio_lock); + t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1)); + t3_write_reg(adapter, A_MI1_ADDR, addr); + t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2)); + ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10); + if (!ret) + ret = t3_read_reg(adapter, A_MI1_DATA); + mutex_unlock(&adapter->mdio_lock); + return ret; +} + +static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr, u16 val) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + int ret; + u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr); + + mutex_lock(&adapter->mdio_lock); + t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1)); + t3_write_reg(adapter, A_MI1_ADDR, addr); + t3_write_reg(adapter, A_MI1_DATA, val); + t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1)); + ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10); + mutex_unlock(&adapter->mdio_lock); + return ret; +} + +static const struct mdio_ops mi1_mdio_ops = { + .read = t3_mi1_read, + .write = t3_mi1_write, + .mode_support = MDIO_SUPPORTS_C22 +}; + +/* + * Performs the address cycle for clause 45 PHYs. + * Must be called with the MDIO_LOCK held. + */ +static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr, + int reg_addr) +{ + u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr); + + t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0); + t3_write_reg(adapter, A_MI1_ADDR, addr); + t3_write_reg(adapter, A_MI1_DATA, reg_addr); + t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0)); + return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, + MDIO_ATTEMPTS, 10); +} + +/* + * MI1 read/write operations for indirect-addressed PHYs. + */ +static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + int ret; + + mutex_lock(&adapter->mdio_lock); + ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr); + if (!ret) { + t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3)); + ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, + MDIO_ATTEMPTS, 10); + if (!ret) + ret = t3_read_reg(adapter, A_MI1_DATA); + } + mutex_unlock(&adapter->mdio_lock); + return ret; +} + +static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr, + u16 reg_addr, u16 val) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + int ret; + + mutex_lock(&adapter->mdio_lock); + ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr); + if (!ret) { + t3_write_reg(adapter, A_MI1_DATA, val); + t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1)); + ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, + MDIO_ATTEMPTS, 10); + } + mutex_unlock(&adapter->mdio_lock); + return ret; +} + +static const struct mdio_ops mi1_mdio_ext_ops = { + .read = mi1_ext_read, + .write = mi1_ext_write, + .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22 +}; + +/** + * t3_mdio_change_bits - modify the value of a PHY register + * @phy: the PHY to operate on + * @mmd: the device address + * @reg: the register address + * @clear: what part of the register value to mask off + * @set: what part of the register value to set + * + * Changes the value of a PHY register by applying a mask to its current + * value and ORing the result with a new value. + */ +int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear, + unsigned int set) +{ + int ret; + unsigned int val; + + ret = t3_mdio_read(phy, mmd, reg, &val); + if (!ret) { + val &= ~clear; + ret = t3_mdio_write(phy, mmd, reg, val | set); + } + return ret; +} + +/** + * t3_phy_reset - reset a PHY block + * @phy: the PHY to operate on + * @mmd: the device address of the PHY block to reset + * @wait: how long to wait for the reset to complete in 1ms increments + * + * Resets a PHY block and optionally waits for the reset to complete. + * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset + * for 10G PHYs. + */ +int t3_phy_reset(struct cphy *phy, int mmd, int wait) +{ + int err; + unsigned int ctl; + + err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER, + MDIO_CTRL1_RESET); + if (err || !wait) + return err; + + do { + err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl); + if (err) + return err; + ctl &= MDIO_CTRL1_RESET; + if (ctl) + msleep(1); + } while (ctl && --wait); + + return ctl ? -1 : 0; +} + +/** + * t3_phy_advertise - set the PHY advertisement registers for autoneg + * @phy: the PHY to operate on + * @advert: bitmap of capabilities the PHY should advertise + * + * Sets a 10/100/1000 PHY's advertisement registers to advertise the + * requested capabilities. + */ +int t3_phy_advertise(struct cphy *phy, unsigned int advert) +{ + int err; + unsigned int val = 0; + + err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val); + if (err) + return err; + + val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); + if (advert & ADVERTISED_1000baseT_Half) + val |= ADVERTISE_1000HALF; + if (advert & ADVERTISED_1000baseT_Full) + val |= ADVERTISE_1000FULL; + + err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val); + if (err) + return err; + + val = 1; + if (advert & ADVERTISED_10baseT_Half) + val |= ADVERTISE_10HALF; + if (advert & ADVERTISED_10baseT_Full) + val |= ADVERTISE_10FULL; + if (advert & ADVERTISED_100baseT_Half) + val |= ADVERTISE_100HALF; + if (advert & ADVERTISED_100baseT_Full) + val |= ADVERTISE_100FULL; + if (advert & ADVERTISED_Pause) + val |= ADVERTISE_PAUSE_CAP; + if (advert & ADVERTISED_Asym_Pause) + val |= ADVERTISE_PAUSE_ASYM; + return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val); +} + +/** + * t3_phy_advertise_fiber - set fiber PHY advertisement register + * @phy: the PHY to operate on + * @advert: bitmap of capabilities the PHY should advertise + * + * Sets a fiber PHY's advertisement register to advertise the + * requested capabilities. + */ +int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert) +{ + unsigned int val = 0; + + if (advert & ADVERTISED_1000baseT_Half) + val |= ADVERTISE_1000XHALF; + if (advert & ADVERTISED_1000baseT_Full) + val |= ADVERTISE_1000XFULL; + if (advert & ADVERTISED_Pause) + val |= ADVERTISE_1000XPAUSE; + if (advert & ADVERTISED_Asym_Pause) + val |= ADVERTISE_1000XPSE_ASYM; + return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val); +} + +/** + * t3_set_phy_speed_duplex - force PHY speed and duplex + * @phy: the PHY to operate on + * @speed: requested PHY speed + * @duplex: requested PHY duplex + * + * Force a 10/100/1000 PHY's speed and duplex. This also disables + * auto-negotiation except for GigE, where auto-negotiation is mandatory. + */ +int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex) +{ + int err; + unsigned int ctl; + + err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl); + if (err) + return err; + + if (speed >= 0) { + ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); + if (speed == SPEED_100) + ctl |= BMCR_SPEED100; + else if (speed == SPEED_1000) + ctl |= BMCR_SPEED1000; + } + if (duplex >= 0) { + ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE); + if (duplex == DUPLEX_FULL) + ctl |= BMCR_FULLDPLX; + } + if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */ + ctl |= BMCR_ANENABLE; + return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl); +} + +int t3_phy_lasi_intr_enable(struct cphy *phy) +{ + return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, + MDIO_PMA_LASI_LSALARM); +} + +int t3_phy_lasi_intr_disable(struct cphy *phy) +{ + return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0); +} + +int t3_phy_lasi_intr_clear(struct cphy *phy) +{ + u32 val; + + return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val); +} + +int t3_phy_lasi_intr_handler(struct cphy *phy) +{ + unsigned int status; + int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, + &status); + + if (err) + return err; + return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0; +} + +static const struct adapter_info t3_adap_info[] = { + {1, 1, 0, + F_GPIO2_OEN | F_GPIO4_OEN | + F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0, + &mi1_mdio_ops, "Chelsio PE9000"}, + {1, 1, 0, + F_GPIO2_OEN | F_GPIO4_OEN | + F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0, + &mi1_mdio_ops, "Chelsio T302"}, + {1, 0, 0, + F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN | + F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, + { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, + &mi1_mdio_ext_ops, "Chelsio T310"}, + {1, 1, 0, + F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN | + F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL | + F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, + { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, + &mi1_mdio_ext_ops, "Chelsio T320"}, + {}, + {}, + {1, 0, 0, + F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN | + F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, + { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, + &mi1_mdio_ext_ops, "Chelsio T310" }, + {1, 0, 0, + F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | + F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL, + { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI, + &mi1_mdio_ext_ops, "Chelsio N320E-G2" }, +}; + +/* + * Return the adapter_info structure with a given index. Out-of-range indices + * return NULL. + */ +const struct adapter_info *t3_get_adapter_info(unsigned int id) +{ + return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL; +} + +struct port_type_info { + int (*phy_prep)(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *ops); +}; + +static const struct port_type_info port_types[] = { + { NULL }, + { t3_ael1002_phy_prep }, + { t3_vsc8211_phy_prep }, + { NULL}, + { t3_xaui_direct_phy_prep }, + { t3_ael2005_phy_prep }, + { t3_qt2045_phy_prep }, + { t3_ael1006_phy_prep }, + { NULL }, + { t3_aq100x_phy_prep }, + { t3_ael2020_phy_prep }, +}; + +#define VPD_ENTRY(name, len) \ + u8 name##_kword[2]; u8 name##_len; u8 name##_data[len] + +/* + * Partial EEPROM Vital Product Data structure. Includes only the ID and + * VPD-R sections. + */ +struct t3_vpd { + u8 id_tag; + u8 id_len[2]; + u8 id_data[16]; + u8 vpdr_tag; + u8 vpdr_len[2]; + VPD_ENTRY(pn, 16); /* part number */ + VPD_ENTRY(ec, 16); /* EC level */ + VPD_ENTRY(sn, SERNUM_LEN); /* serial number */ + VPD_ENTRY(na, 12); /* MAC address base */ + VPD_ENTRY(cclk, 6); /* core clock */ + VPD_ENTRY(mclk, 6); /* mem clock */ + VPD_ENTRY(uclk, 6); /* uP clk */ + VPD_ENTRY(mdc, 6); /* MDIO clk */ + VPD_ENTRY(mt, 2); /* mem timing */ + VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */ + VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */ + VPD_ENTRY(port0, 2); /* PHY0 complex */ + VPD_ENTRY(port1, 2); /* PHY1 complex */ + VPD_ENTRY(port2, 2); /* PHY2 complex */ + VPD_ENTRY(port3, 2); /* PHY3 complex */ + VPD_ENTRY(rv, 1); /* csum */ + u32 pad; /* for multiple-of-4 sizing and alignment */ +}; + +#define EEPROM_MAX_POLL 40 +#define EEPROM_STAT_ADDR 0x4000 +#define VPD_BASE 0xc00 + +/** + * t3_seeprom_read - read a VPD EEPROM location + * @adapter: adapter to read + * @addr: EEPROM address + * @data: where to store the read data + * + * Read a 32-bit word from a location in VPD EEPROM using the card's PCI + * VPD ROM capability. A zero is written to the flag bit when the + * address is written to the control register. The hardware device will + * set the flag to 1 when 4 bytes have been read into the data register. + */ +int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data) +{ + u16 val; + int attempts = EEPROM_MAX_POLL; + u32 v; + unsigned int base = adapter->params.pci.vpd_cap_addr; + + if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3)) + return -EINVAL; + + pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr); + do { + udelay(10); + pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val); + } while (!(val & PCI_VPD_ADDR_F) && --attempts); + + if (!(val & PCI_VPD_ADDR_F)) { + CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr); + return -EIO; + } + pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v); + *data = cpu_to_le32(v); + return 0; +} + +/** + * t3_seeprom_write - write a VPD EEPROM location + * @adapter: adapter to write + * @addr: EEPROM address + * @data: value to write + * + * Write a 32-bit word to a location in VPD EEPROM using the card's PCI + * VPD ROM capability. + */ +int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data) +{ + u16 val; + int attempts = EEPROM_MAX_POLL; + unsigned int base = adapter->params.pci.vpd_cap_addr; + + if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3)) + return -EINVAL; + + pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA, + le32_to_cpu(data)); + pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR, + addr | PCI_VPD_ADDR_F); + do { + msleep(1); + pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val); + } while ((val & PCI_VPD_ADDR_F) && --attempts); + + if (val & PCI_VPD_ADDR_F) { + CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr); + return -EIO; + } + return 0; +} + +/** + * t3_seeprom_wp - enable/disable EEPROM write protection + * @adapter: the adapter + * @enable: 1 to enable write protection, 0 to disable it + * + * Enables or disables write protection on the serial EEPROM. + */ +int t3_seeprom_wp(struct adapter *adapter, int enable) +{ + return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); +} + +/** + * get_vpd_params - read VPD parameters from VPD EEPROM + * @adapter: adapter to read + * @p: where to store the parameters + * + * Reads card parameters stored in VPD EEPROM. + */ +static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) +{ + int i, addr, ret; + struct t3_vpd vpd; + + /* + * Card information is normally at VPD_BASE but some early cards had + * it at 0. + */ + ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd); + if (ret) + return ret; + addr = vpd.id_tag == 0x82 ? VPD_BASE : 0; + + for (i = 0; i < sizeof(vpd); i += 4) { + ret = t3_seeprom_read(adapter, addr + i, + (__le32 *)((u8 *)&vpd + i)); + if (ret) + return ret; + } + + p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10); + p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10); + p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10); + p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10); + p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10); + memcpy(p->sn, vpd.sn_data, SERNUM_LEN); + + /* Old eeproms didn't have port information */ + if (adapter->params.rev == 0 && !vpd.port0_data[0]) { + p->port_type[0] = uses_xaui(adapter) ? 1 : 2; + p->port_type[1] = uses_xaui(adapter) ? 6 : 2; + } else { + p->port_type[0] = hex_to_bin(vpd.port0_data[0]); + p->port_type[1] = hex_to_bin(vpd.port1_data[0]); + p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16); + p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16); + } + + ret = hex2bin(p->eth_base, vpd.na_data, 6); + if (ret < 0) + return -EINVAL; + return 0; +} + +/* serial flash and firmware constants */ +enum { + SF_ATTEMPTS = 5, /* max retries for SF1 operations */ + SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ + SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */ + + /* flash command opcodes */ + SF_PROG_PAGE = 2, /* program page */ + SF_WR_DISABLE = 4, /* disable writes */ + SF_RD_STATUS = 5, /* read status register */ + SF_WR_ENABLE = 6, /* enable writes */ + SF_RD_DATA_FAST = 0xb, /* read flash */ + SF_ERASE_SECTOR = 0xd8, /* erase sector */ + + FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */ + FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */ + FW_MIN_SIZE = 8 /* at least version and csum */ +}; + +/** + * sf1_read - read data from the serial flash + * @adapter: the adapter + * @byte_cnt: number of bytes to read + * @cont: whether another operation will be chained + * @valp: where to store the read data + * + * Reads up to 4 bytes of data from the serial flash. The location of + * the read needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, + u32 *valp) +{ + int ret; + + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (t3_read_reg(adapter, A_SF_OP) & F_BUSY) + return -EBUSY; + t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); + ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10); + if (!ret) + *valp = t3_read_reg(adapter, A_SF_DATA); + return ret; +} + +/** + * sf1_write - write data to the serial flash + * @adapter: the adapter + * @byte_cnt: number of bytes to write + * @cont: whether another operation will be chained + * @val: value to write + * + * Writes up to 4 bytes of data to the serial flash. The location of + * the write needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, + u32 val) +{ + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (t3_read_reg(adapter, A_SF_OP) & F_BUSY) + return -EBUSY; + t3_write_reg(adapter, A_SF_DATA, val); + t3_write_reg(adapter, A_SF_OP, + V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); + return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10); +} + +/** + * flash_wait_op - wait for a flash operation to complete + * @adapter: the adapter + * @attempts: max number of polls of the status register + * @delay: delay between polls in ms + * + * Wait for a flash operation to complete by polling the status register. + */ +static int flash_wait_op(struct adapter *adapter, int attempts, int delay) +{ + int ret; + u32 status; + + while (1) { + if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 || + (ret = sf1_read(adapter, 1, 0, &status)) != 0) + return ret; + if (!(status & 1)) + return 0; + if (--attempts == 0) + return -EAGAIN; + if (delay) + msleep(delay); + } +} + +/** + * t3_read_flash - read words from serial flash + * @adapter: the adapter + * @addr: the start address for the read + * @nwords: how many 32-bit words to read + * @data: where to store the read data + * @byte_oriented: whether to store data as bytes or as words + * + * Read the specified number of 32-bit words from the serial flash. + * If @byte_oriented is set the read data is stored as a byte array + * (i.e., big-endian), otherwise as 32-bit words in the platform's + * natural endianness. + */ +static int t3_read_flash(struct adapter *adapter, unsigned int addr, + unsigned int nwords, u32 *data, int byte_oriented) +{ + int ret; + + if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3)) + return -EINVAL; + + addr = swab32(addr) | SF_RD_DATA_FAST; + + if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 || + (ret = sf1_read(adapter, 1, 1, data)) != 0) + return ret; + + for (; nwords; nwords--, data++) { + ret = sf1_read(adapter, 4, nwords > 1, data); + if (ret) + return ret; + if (byte_oriented) + *data = htonl(*data); + } + return 0; +} + +/** + * t3_write_flash - write up to a page of data to the serial flash + * @adapter: the adapter + * @addr: the start address to write + * @n: length of data to write + * @data: the data to write + * + * Writes up to a page of data (256 bytes) to the serial flash starting + * at the given address. + */ +static int t3_write_flash(struct adapter *adapter, unsigned int addr, + unsigned int n, const u8 *data) +{ + int ret; + u32 buf[64]; + unsigned int i, c, left, val, offset = addr & 0xff; + + if (addr + n > SF_SIZE || offset + n > 256) + return -EINVAL; + + val = swab32(addr) | SF_PROG_PAGE; + + if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 || + (ret = sf1_write(adapter, 4, 1, val)) != 0) + return ret; + + for (left = n; left; left -= c) { + c = min(left, 4U); + for (val = 0, i = 0; i < c; ++i) + val = (val << 8) + *data++; + + ret = sf1_write(adapter, c, c != left, val); + if (ret) + return ret; + } + if ((ret = flash_wait_op(adapter, 5, 1)) != 0) + return ret; + + /* Read the page to verify the write succeeded */ + ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); + if (ret) + return ret; + + if (memcmp(data - n, (u8 *) buf + offset, n)) + return -EIO; + return 0; +} + +/** + * t3_get_tp_version - read the tp sram version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the protocol sram version from sram. + */ +int t3_get_tp_version(struct adapter *adapter, u32 *vers) +{ + int ret; + + /* Get version loaded in SRAM */ + t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0); + ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0, + 1, 1, 5, 1); + if (ret) + return ret; + + *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1); + + return 0; +} + +/** + * t3_check_tpsram_version - read the tp sram version + * @adapter: the adapter + * + * Reads the protocol sram version from flash. + */ +int t3_check_tpsram_version(struct adapter *adapter) +{ + int ret; + u32 vers; + unsigned int major, minor; + + if (adapter->params.rev == T3_REV_A) + return 0; + + + ret = t3_get_tp_version(adapter, &vers); + if (ret) + return ret; + + major = G_TP_VERSION_MAJOR(vers); + minor = G_TP_VERSION_MINOR(vers); + + if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR) + return 0; + else { + CH_ERR(adapter, "found wrong TP version (%u.%u), " + "driver compiled for version %d.%d\n", major, minor, + TP_VERSION_MAJOR, TP_VERSION_MINOR); + } + return -EINVAL; +} + +/** + * t3_check_tpsram - check if provided protocol SRAM + * is compatible with this driver + * @adapter: the adapter + * @tp_sram: the firmware image to write + * @size: image size + * + * Checks if an adapter's tp sram is compatible with the driver. + * Returns 0 if the versions are compatible, a negative error otherwise. + */ +int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram, + unsigned int size) +{ + u32 csum; + unsigned int i; + const __be32 *p = (const __be32 *)tp_sram; + + /* Verify checksum */ + for (csum = 0, i = 0; i < size / sizeof(csum); i++) + csum += ntohl(p[i]); + if (csum != 0xffffffff) { + CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n", + csum); + return -EINVAL; + } + + return 0; +} + +enum fw_version_type { + FW_VERSION_N3, + FW_VERSION_T3 +}; + +/** + * t3_get_fw_version - read the firmware version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the FW version from flash. + */ +int t3_get_fw_version(struct adapter *adapter, u32 *vers) +{ + return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0); +} + +/** + * t3_check_fw_version - check if the FW is compatible with this driver + * @adapter: the adapter + * + * Checks if an adapter's FW is compatible with the driver. Returns 0 + * if the versions are compatible, a negative error otherwise. + */ +int t3_check_fw_version(struct adapter *adapter) +{ + int ret; + u32 vers; + unsigned int type, major, minor; + + ret = t3_get_fw_version(adapter, &vers); + if (ret) + return ret; + + type = G_FW_VERSION_TYPE(vers); + major = G_FW_VERSION_MAJOR(vers); + minor = G_FW_VERSION_MINOR(vers); + + if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR && + minor == FW_VERSION_MINOR) + return 0; + else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR) + CH_WARN(adapter, "found old FW minor version(%u.%u), " + "driver compiled for version %u.%u\n", major, minor, + FW_VERSION_MAJOR, FW_VERSION_MINOR); + else { + CH_WARN(adapter, "found newer FW version(%u.%u), " + "driver compiled for version %u.%u\n", major, minor, + FW_VERSION_MAJOR, FW_VERSION_MINOR); + return 0; + } + return -EINVAL; +} + +/** + * t3_flash_erase_sectors - erase a range of flash sectors + * @adapter: the adapter + * @start: the first sector to erase + * @end: the last sector to erase + * + * Erases the sectors in the given range. + */ +static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end) +{ + while (start <= end) { + int ret; + + if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 || + (ret = sf1_write(adapter, 4, 0, + SF_ERASE_SECTOR | (start << 8))) != 0 || + (ret = flash_wait_op(adapter, 5, 500)) != 0) + return ret; + start++; + } + return 0; +} + +/** + * t3_load_fw - download firmware + * @adapter: the adapter + * @fw_data: the firmware image to write + * @size: image size + * + * Write the supplied firmware image to the card's serial flash. + * The FW image has the following sections: @size - 8 bytes of code and + * data, followed by 4 bytes of FW version, followed by the 32-bit + * 1's complement checksum of the whole image. + */ +int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size) +{ + u32 csum; + unsigned int i; + const __be32 *p = (const __be32 *)fw_data; + int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16; + + if ((size & 3) || size < FW_MIN_SIZE) + return -EINVAL; + if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR) + return -EFBIG; + + for (csum = 0, i = 0; i < size / sizeof(csum); i++) + csum += ntohl(p[i]); + if (csum != 0xffffffff) { + CH_ERR(adapter, "corrupted firmware image, checksum %u\n", + csum); + return -EINVAL; + } + + ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector); + if (ret) + goto out; + + size -= 8; /* trim off version and checksum */ + for (addr = FW_FLASH_BOOT_ADDR; size;) { + unsigned int chunk_size = min(size, 256U); + + ret = t3_write_flash(adapter, addr, chunk_size, fw_data); + if (ret) + goto out; + + addr += chunk_size; + fw_data += chunk_size; + size -= chunk_size; + } + + ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data); +out: + if (ret) + CH_ERR(adapter, "firmware download failed, error %d\n", ret); + return ret; +} + +#define CIM_CTL_BASE 0x2000 + +/** + * t3_cim_ctl_blk_read - read a block from CIM control region + * + * @adap: the adapter + * @addr: the start address within the CIM control region + * @n: number of words to read + * @valp: where to store the result + * + * Reads a block of 4-byte words from the CIM control region. + */ +int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr, + unsigned int n, unsigned int *valp) +{ + int ret = 0; + + if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY) + return -EBUSY; + + for ( ; !ret && n--; addr += 4) { + t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr); + ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY, + 0, 5, 2); + if (!ret) + *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA); + } + return ret; +} + +static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg, + u32 *rx_hash_high, u32 *rx_hash_low) +{ + /* stop Rx unicast traffic */ + t3_mac_disable_exact_filters(mac); + + /* stop broadcast, multicast, promiscuous mode traffic */ + *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG); + t3_set_reg_field(mac->adapter, A_XGM_RX_CFG, + F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES, + F_DISBCAST); + + *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH); + t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0); + + *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW); + t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0); + + /* Leave time to drain max RX fifo */ + msleep(1); +} + +static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg, + u32 rx_hash_high, u32 rx_hash_low) +{ + t3_mac_enable_exact_filters(mac); + t3_set_reg_field(mac->adapter, A_XGM_RX_CFG, + F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES, + rx_cfg); + t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high); + t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low); +} + +/** + * t3_link_changed - handle interface link changes + * @adapter: the adapter + * @port_id: the port index that changed link state + * + * Called when a port's link settings change to propagate the new values + * to the associated PHY and MAC. After performing the common tasks it + * invokes an OS-specific handler. + */ +void t3_link_changed(struct adapter *adapter, int port_id) +{ + int link_ok, speed, duplex, fc; + struct port_info *pi = adap2pinfo(adapter, port_id); + struct cphy *phy = &pi->phy; + struct cmac *mac = &pi->mac; + struct link_config *lc = &pi->link_config; + + phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); + + if (!lc->link_ok && link_ok) { + u32 rx_cfg, rx_hash_high, rx_hash_low; + u32 status; + + t3_xgm_intr_enable(adapter, port_id); + t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low); + t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0); + t3_mac_enable(mac, MAC_DIRECTION_RX); + + status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset); + if (status & F_LINKFAULTCHANGE) { + mac->stats.link_faults++; + pi->link_fault = 1; + } + t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low); + } + + if (lc->requested_fc & PAUSE_AUTONEG) + fc &= lc->requested_fc; + else + fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + + if (link_ok == lc->link_ok && speed == lc->speed && + duplex == lc->duplex && fc == lc->fc) + return; /* nothing changed */ + + if (link_ok != lc->link_ok && adapter->params.rev > 0 && + uses_xaui(adapter)) { + if (link_ok) + t3b_pcs_reset(mac); + t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, + link_ok ? F_TXACTENABLE | F_RXEN : 0); + } + lc->link_ok = link_ok; + lc->speed = speed < 0 ? SPEED_INVALID : speed; + lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex; + + if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) { + /* Set MAC speed, duplex, and flow control to match PHY. */ + t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc); + lc->fc = fc; + } + + t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault, + speed, duplex, fc); +} + +void t3_link_fault(struct adapter *adapter, int port_id) +{ + struct port_info *pi = adap2pinfo(adapter, port_id); + struct cmac *mac = &pi->mac; + struct cphy *phy = &pi->phy; + struct link_config *lc = &pi->link_config; + int link_ok, speed, duplex, fc, link_fault; + u32 rx_cfg, rx_hash_high, rx_hash_low; + + t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low); + + if (adapter->params.rev > 0 && uses_xaui(adapter)) + t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0); + + t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0); + t3_mac_enable(mac, MAC_DIRECTION_RX); + + t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low); + + link_fault = t3_read_reg(adapter, + A_XGM_INT_STATUS + mac->offset); + link_fault &= F_LINKFAULTCHANGE; + + link_ok = lc->link_ok; + speed = lc->speed; + duplex = lc->duplex; + fc = lc->fc; + + phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); + + if (link_fault) { + lc->link_ok = 0; + lc->speed = SPEED_INVALID; + lc->duplex = DUPLEX_INVALID; + + t3_os_link_fault(adapter, port_id, 0); + + /* Account link faults only when the phy reports a link up */ + if (link_ok) + mac->stats.link_faults++; + } else { + if (link_ok) + t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, + F_TXACTENABLE | F_RXEN); + + pi->link_fault = 0; + lc->link_ok = (unsigned char)link_ok; + lc->speed = speed < 0 ? SPEED_INVALID : speed; + lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex; + t3_os_link_fault(adapter, port_id, link_ok); + } +} + +/** + * t3_link_start - apply link configuration to MAC/PHY + * @phy: the PHY to setup + * @mac: the MAC to setup + * @lc: the requested link configuration + * + * Set up a port's MAC and PHY according to a desired link configuration. + * - If the PHY can auto-negotiate first decide what to advertise, then + * enable/disable auto-negotiation as desired, and reset. + * - If the PHY does not auto-negotiate just reset it. + * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, + * otherwise do it later based on the outcome of auto-negotiation. + */ +int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc) +{ + unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + + lc->link_ok = 0; + if (lc->supported & SUPPORTED_Autoneg) { + lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause); + if (fc) { + lc->advertising |= ADVERTISED_Asym_Pause; + if (fc & PAUSE_RX) + lc->advertising |= ADVERTISED_Pause; + } + phy->ops->advertise(phy, lc->advertising); + + if (lc->autoneg == AUTONEG_DISABLE) { + lc->speed = lc->requested_speed; + lc->duplex = lc->requested_duplex; + lc->fc = (unsigned char)fc; + t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex, + fc); + /* Also disables autoneg */ + phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex); + } else + phy->ops->autoneg_enable(phy); + } else { + t3_mac_set_speed_duplex_fc(mac, -1, -1, fc); + lc->fc = (unsigned char)fc; + phy->ops->reset(phy, 0); + } + return 0; +} + +/** + * t3_set_vlan_accel - control HW VLAN extraction + * @adapter: the adapter + * @ports: bitmap of adapter ports to operate on + * @on: enable (1) or disable (0) HW VLAN extraction + * + * Enables or disables HW extraction of VLAN tags for the given port. + */ +void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on) +{ + t3_set_reg_field(adapter, A_TP_OUT_CONFIG, + ports << S_VLANEXTRACTIONENABLE, + on ? (ports << S_VLANEXTRACTIONENABLE) : 0); +} + +struct intr_info { + unsigned int mask; /* bits to check in interrupt status */ + const char *msg; /* message to print or NULL */ + short stat_idx; /* stat counter to increment or -1 */ + unsigned short fatal; /* whether the condition reported is fatal */ +}; + +/** + * t3_handle_intr_status - table driven interrupt handler + * @adapter: the adapter that generated the interrupt + * @reg: the interrupt status register to process + * @mask: a mask to apply to the interrupt status + * @acts: table of interrupt actions + * @stats: statistics counters tracking interrupt occurrences + * + * A table driven interrupt handler that applies a set of masks to an + * interrupt status word and performs the corresponding actions if the + * interrupts described by the mask have occurred. The actions include + * optionally printing a warning or alert message, and optionally + * incrementing a stat counter. The table is terminated by an entry + * specifying mask 0. Returns the number of fatal interrupt conditions. + */ +static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg, + unsigned int mask, + const struct intr_info *acts, + unsigned long *stats) +{ + int fatal = 0; + unsigned int status = t3_read_reg(adapter, reg) & mask; + + for (; acts->mask; ++acts) { + if (!(status & acts->mask)) + continue; + if (acts->fatal) { + fatal++; + CH_ALERT(adapter, "%s (0x%x)\n", + acts->msg, status & acts->mask); + status &= ~acts->mask; + } else if (acts->msg) + CH_WARN(adapter, "%s (0x%x)\n", + acts->msg, status & acts->mask); + if (acts->stat_idx >= 0) + stats[acts->stat_idx]++; + } + if (status) /* clear processed interrupts */ + t3_write_reg(adapter, reg, status); + return fatal; +} + +#define SGE_INTR_MASK (F_RSPQDISABLED | \ + F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \ + F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \ + F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ + V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ + F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ + F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \ + F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \ + F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \ + F_LOPIODRBDROPERR) +#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \ + F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \ + F_NFASRCHFAIL) +#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE)) +#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \ + V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \ + F_TXFIFO_UNDERRUN) +#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \ + F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \ + F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \ + F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \ + V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \ + V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */) +#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\ + F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \ + /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \ + F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \ + F_TXPARERR | V_BISTERR(M_BISTERR)) +#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \ + F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \ + F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0) +#define ULPTX_INTR_MASK 0xfc +#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \ + F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \ + F_ZERO_SWITCH_ERROR) +#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \ + F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \ + F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \ + F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \ + F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \ + F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \ + F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \ + F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR) +#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \ + V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \ + V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR)) +#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \ + V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \ + V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR)) +#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \ + V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \ + V_RXTPPARERRENB(M_RXTPPARERRENB) | \ + V_MCAPARERRENB(M_MCAPARERRENB)) +#define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE) +#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \ + F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \ + F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \ + F_MPS0 | F_CPL_SWITCH) +/* + * Interrupt handler for the PCIX1 module. + */ +static void pci_intr_handler(struct adapter *adapter) +{ + static const struct intr_info pcix1_intr_info[] = { + {F_MSTDETPARERR, "PCI master detected parity error", -1, 1}, + {F_SIGTARABT, "PCI signaled target abort", -1, 1}, + {F_RCVTARABT, "PCI received target abort", -1, 1}, + {F_RCVMSTABT, "PCI received master abort", -1, 1}, + {F_SIGSYSERR, "PCI signaled system error", -1, 1}, + {F_DETPARERR, "PCI detected parity error", -1, 1}, + {F_SPLCMPDIS, "PCI split completion discarded", -1, 1}, + {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1}, + {F_RCVSPLCMPERR, "PCI received split completion error", -1, + 1}, + {F_DETCORECCERR, "PCI correctable ECC error", + STAT_PCI_CORR_ECC, 0}, + {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1}, + {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1}, + {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1, + 1}, + {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1, + 1}, + {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1, + 1}, + {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity " + "error", -1, 1}, + {0} + }; + + if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK, + pcix1_intr_info, adapter->irq_stats)) + t3_fatal_err(adapter); +} + +/* + * Interrupt handler for the PCIE module. + */ +static void pcie_intr_handler(struct adapter *adapter) +{ + static const struct intr_info pcie_intr_info[] = { + {F_PEXERR, "PCI PEX error", -1, 1}, + {F_UNXSPLCPLERRR, + "PCI unexpected split completion DMA read error", -1, 1}, + {F_UNXSPLCPLERRC, + "PCI unexpected split completion DMA command error", -1, 1}, + {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1}, + {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1}, + {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1}, + {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1}, + {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR), + "PCI MSI-X table/PBA parity error", -1, 1}, + {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1}, + {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1}, + {F_RXPARERR, "PCI Rx parity error", -1, 1}, + {F_TXPARERR, "PCI Tx parity error", -1, 1}, + {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1}, + {0} + }; + + if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR) + CH_ALERT(adapter, "PEX error code 0x%x\n", + t3_read_reg(adapter, A_PCIE_PEX_ERR)); + + if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK, + pcie_intr_info, adapter->irq_stats)) + t3_fatal_err(adapter); +} + +/* + * TP interrupt handler. + */ +static void tp_intr_handler(struct adapter *adapter) +{ + static const struct intr_info tp_intr_info[] = { + {0xffffff, "TP parity error", -1, 1}, + {0x1000000, "TP out of Rx pages", -1, 1}, + {0x2000000, "TP out of Tx pages", -1, 1}, + {0} + }; + + static const struct intr_info tp_intr_info_t3c[] = { + {0x1fffffff, "TP parity error", -1, 1}, + {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1}, + {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1}, + {0} + }; + + if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff, + adapter->params.rev < T3_REV_C ? + tp_intr_info : tp_intr_info_t3c, NULL)) + t3_fatal_err(adapter); +} + +/* + * CIM interrupt handler. + */ +static void cim_intr_handler(struct adapter *adapter) +{ + static const struct intr_info cim_intr_info[] = { + {F_RSVDSPACEINT, "CIM reserved space write", -1, 1}, + {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1}, + {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1}, + {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1}, + {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1}, + {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1}, + {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1}, + {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1}, + {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1}, + {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1}, + {F_BLKRDPLINT, "CIM block read from PL space", -1, 1}, + {F_BLKWRPLINT, "CIM block write to PL space", -1, 1}, + {F_DRAMPARERR, "CIM DRAM parity error", -1, 1}, + {F_ICACHEPARERR, "CIM icache parity error", -1, 1}, + {F_DCACHEPARERR, "CIM dcache parity error", -1, 1}, + {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1}, + {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1}, + {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1}, + {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1}, + {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1}, + {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1}, + {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1}, + {F_ITAGPARERR, "CIM itag parity error", -1, 1}, + {F_DTAGPARERR, "CIM dtag parity error", -1, 1}, + {0} + }; + + if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff, + cim_intr_info, NULL)) + t3_fatal_err(adapter); +} + +/* + * ULP RX interrupt handler. + */ +static void ulprx_intr_handler(struct adapter *adapter) +{ + static const struct intr_info ulprx_intr_info[] = { + {F_PARERRDATA, "ULP RX data parity error", -1, 1}, + {F_PARERRPCMD, "ULP RX command parity error", -1, 1}, + {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1}, + {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1}, + {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1}, + {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1}, + {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1}, + {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1}, + {0} + }; + + if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff, + ulprx_intr_info, NULL)) + t3_fatal_err(adapter); +} + +/* + * ULP TX interrupt handler. + */ +static void ulptx_intr_handler(struct adapter *adapter) +{ + static const struct intr_info ulptx_intr_info[] = { + {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds", + STAT_ULP_CH0_PBL_OOB, 0}, + {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds", + STAT_ULP_CH1_PBL_OOB, 0}, + {0xfc, "ULP TX parity error", -1, 1}, + {0} + }; + + if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff, + ulptx_intr_info, adapter->irq_stats)) + t3_fatal_err(adapter); +} + +#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \ + F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \ + F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \ + F_ICSPI1_TX_FRAMING_ERROR) +#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \ + F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \ + F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \ + F_OESPI1_OFIFO2X_TX_FRAMING_ERROR) + +/* + * PM TX interrupt handler. + */ +static void pmtx_intr_handler(struct adapter *adapter) +{ + static const struct intr_info pmtx_intr_info[] = { + {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1}, + {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1}, + {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1}, + {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR), + "PMTX ispi parity error", -1, 1}, + {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR), + "PMTX ospi parity error", -1, 1}, + {0} + }; + + if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff, + pmtx_intr_info, NULL)) + t3_fatal_err(adapter); +} + +#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \ + F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \ + F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \ + F_IESPI1_TX_FRAMING_ERROR) +#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \ + F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \ + F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \ + F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR) + +/* + * PM RX interrupt handler. + */ +static void pmrx_intr_handler(struct adapter *adapter) +{ + static const struct intr_info pmrx_intr_info[] = { + {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1}, + {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1}, + {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1}, + {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR), + "PMRX ispi parity error", -1, 1}, + {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR), + "PMRX ospi parity error", -1, 1}, + {0} + }; + + if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff, + pmrx_intr_info, NULL)) + t3_fatal_err(adapter); +} + +/* + * CPL switch interrupt handler. + */ +static void cplsw_intr_handler(struct adapter *adapter) +{ + static const struct intr_info cplsw_intr_info[] = { + {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1}, + {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1}, + {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1}, + {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1}, + {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1}, + {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1}, + {0} + }; + + if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff, + cplsw_intr_info, NULL)) + t3_fatal_err(adapter); +} + +/* + * MPS interrupt handler. + */ +static void mps_intr_handler(struct adapter *adapter) +{ + static const struct intr_info mps_intr_info[] = { + {0x1ff, "MPS parity error", -1, 1}, + {0} + }; + + if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff, + mps_intr_info, NULL)) + t3_fatal_err(adapter); +} + +#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE) + +/* + * MC7 interrupt handler. + */ +static void mc7_intr_handler(struct mc7 *mc7) +{ + struct adapter *adapter = mc7->adapter; + u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE); + + if (cause & F_CE) { + mc7->stats.corr_err++; + CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, " + "data 0x%x 0x%x 0x%x\n", mc7->name, + t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR), + t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0), + t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1), + t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2)); + } + + if (cause & F_UE) { + mc7->stats.uncorr_err++; + CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, " + "data 0x%x 0x%x 0x%x\n", mc7->name, + t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR), + t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0), + t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1), + t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2)); + } + + if (G_PE(cause)) { + mc7->stats.parity_err++; + CH_ALERT(adapter, "%s MC7 parity error 0x%x\n", + mc7->name, G_PE(cause)); + } + + if (cause & F_AE) { + u32 addr = 0; + + if (adapter->params.rev > 0) + addr = t3_read_reg(adapter, + mc7->offset + A_MC7_ERR_ADDR); + mc7->stats.addr_err++; + CH_ALERT(adapter, "%s MC7 address error: 0x%x\n", + mc7->name, addr); + } + + if (cause & MC7_INTR_FATAL) + t3_fatal_err(adapter); + + t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause); +} + +#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \ + V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) +/* + * XGMAC interrupt handler. + */ +static int mac_intr_handler(struct adapter *adap, unsigned int idx) +{ + struct cmac *mac = &adap2pinfo(adap, idx)->mac; + /* + * We mask out interrupt causes for which we're not taking interrupts. + * This allows us to use polling logic to monitor some of the other + * conditions when taking interrupts would impose too much load on the + * system. + */ + u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) & + ~F_RXFIFO_OVERFLOW; + + if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) { + mac->stats.tx_fifo_parity_err++; + CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx); + } + if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) { + mac->stats.rx_fifo_parity_err++; + CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx); + } + if (cause & F_TXFIFO_UNDERRUN) + mac->stats.tx_fifo_urun++; + if (cause & F_RXFIFO_OVERFLOW) + mac->stats.rx_fifo_ovfl++; + if (cause & V_SERDES_LOS(M_SERDES_LOS)) + mac->stats.serdes_signal_loss++; + if (cause & F_XAUIPCSCTCERR) + mac->stats.xaui_pcs_ctc_err++; + if (cause & F_XAUIPCSALIGNCHANGE) + mac->stats.xaui_pcs_align_change++; + if (cause & F_XGM_INT) { + t3_set_reg_field(adap, + A_XGM_INT_ENABLE + mac->offset, + F_XGM_INT, 0); + mac->stats.link_faults++; + + t3_os_link_fault_handler(adap, idx); + } + + if (cause & XGM_INTR_FATAL) + t3_fatal_err(adap); + + t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause); + return cause != 0; +} + +/* + * Interrupt handler for PHY events. + */ +int t3_phy_intr_handler(struct adapter *adapter) +{ + u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE); + + for_each_port(adapter, i) { + struct port_info *p = adap2pinfo(adapter, i); + + if (!(p->phy.caps & SUPPORTED_IRQ)) + continue; + + if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) { + int phy_cause = p->phy.ops->intr_handler(&p->phy); + + if (phy_cause & cphy_cause_link_change) + t3_link_changed(adapter, i); + if (phy_cause & cphy_cause_fifo_error) + p->phy.fifo_errors++; + if (phy_cause & cphy_cause_module_change) + t3_os_phymod_changed(adapter, i); + } + } + + t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause); + return 0; +} + +/* + * T3 slow path (non-data) interrupt handler. + */ +int t3_slow_intr_handler(struct adapter *adapter) +{ + u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0); + + cause &= adapter->slow_intr_mask; + if (!cause) + return 0; + if (cause & F_PCIM0) { + if (is_pcie(adapter)) + pcie_intr_handler(adapter); + else + pci_intr_handler(adapter); + } + if (cause & F_SGE3) + t3_sge_err_intr_handler(adapter); + if (cause & F_MC7_PMRX) + mc7_intr_handler(&adapter->pmrx); + if (cause & F_MC7_PMTX) + mc7_intr_handler(&adapter->pmtx); + if (cause & F_MC7_CM) + mc7_intr_handler(&adapter->cm); + if (cause & F_CIM) + cim_intr_handler(adapter); + if (cause & F_TP1) + tp_intr_handler(adapter); + if (cause & F_ULP2_RX) + ulprx_intr_handler(adapter); + if (cause & F_ULP2_TX) + ulptx_intr_handler(adapter); + if (cause & F_PM1_RX) + pmrx_intr_handler(adapter); + if (cause & F_PM1_TX) + pmtx_intr_handler(adapter); + if (cause & F_CPL_SWITCH) + cplsw_intr_handler(adapter); + if (cause & F_MPS0) + mps_intr_handler(adapter); + if (cause & F_MC5A) + t3_mc5_intr_handler(&adapter->mc5); + if (cause & F_XGMAC0_0) + mac_intr_handler(adapter, 0); + if (cause & F_XGMAC0_1) + mac_intr_handler(adapter, 1); + if (cause & F_T3DBG) + t3_os_ext_intr_handler(adapter); + + /* Clear the interrupts just processed. */ + t3_write_reg(adapter, A_PL_INT_CAUSE0, cause); + t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */ + return 1; +} + +static unsigned int calc_gpio_intr(struct adapter *adap) +{ + unsigned int i, gpi_intr = 0; + + for_each_port(adap, i) + if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) && + adapter_info(adap)->gpio_intr[i]) + gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i]; + return gpi_intr; +} + +/** + * t3_intr_enable - enable interrupts + * @adapter: the adapter whose interrupts should be enabled + * + * Enable interrupts by setting the interrupt enable registers of the + * various HW modules and then enabling the top-level interrupt + * concentrator. + */ +void t3_intr_enable(struct adapter *adapter) +{ + static const struct addr_val_pair intr_en_avp[] = { + {A_SG_INT_ENABLE, SGE_INTR_MASK}, + {A_MC7_INT_ENABLE, MC7_INTR_MASK}, + {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR, + MC7_INTR_MASK}, + {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR, + MC7_INTR_MASK}, + {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK}, + {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK}, + {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK}, + {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK}, + {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK}, + {A_MPS_INT_ENABLE, MPS_INTR_MASK}, + }; + + adapter->slow_intr_mask = PL_INTR_MASK; + + t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0); + t3_write_reg(adapter, A_TP_INT_ENABLE, + adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff); + + if (adapter->params.rev > 0) { + t3_write_reg(adapter, A_CPL_INTR_ENABLE, + CPLSW_INTR_MASK | F_CIM_OVFL_ERROR); + t3_write_reg(adapter, A_ULPTX_INT_ENABLE, + ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 | + F_PBL_BOUND_ERR_CH1); + } else { + t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK); + t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK); + } + + t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter)); + + if (is_pcie(adapter)) + t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK); + else + t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK); + t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask); + t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */ +} + +/** + * t3_intr_disable - disable a card's interrupts + * @adapter: the adapter whose interrupts should be disabled + * + * Disable interrupts. We only disable the top-level interrupt + * concentrator and the SGE data interrupts. + */ +void t3_intr_disable(struct adapter *adapter) +{ + t3_write_reg(adapter, A_PL_INT_ENABLE0, 0); + t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */ + adapter->slow_intr_mask = 0; +} + +/** + * t3_intr_clear - clear all interrupts + * @adapter: the adapter whose interrupts should be cleared + * + * Clears all interrupts. + */ +void t3_intr_clear(struct adapter *adapter) +{ + static const unsigned int cause_reg_addr[] = { + A_SG_INT_CAUSE, + A_SG_RSPQ_FL_STATUS, + A_PCIX_INT_CAUSE, + A_MC7_INT_CAUSE, + A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR, + A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR, + A_CIM_HOST_INT_CAUSE, + A_TP_INT_CAUSE, + A_MC5_DB_INT_CAUSE, + A_ULPRX_INT_CAUSE, + A_ULPTX_INT_CAUSE, + A_CPL_INTR_CAUSE, + A_PM1_TX_INT_CAUSE, + A_PM1_RX_INT_CAUSE, + A_MPS_INT_CAUSE, + A_T3DBG_INT_CAUSE, + }; + unsigned int i; + + /* Clear PHY and MAC interrupts for each port. */ + for_each_port(adapter, i) + t3_port_intr_clear(adapter, i); + + for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i) + t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff); + + if (is_pcie(adapter)) + t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff); + t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff); + t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */ +} + +void t3_xgm_intr_enable(struct adapter *adapter, int idx) +{ + struct port_info *pi = adap2pinfo(adapter, idx); + + t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset, + XGM_EXTRA_INTR_MASK); +} + +void t3_xgm_intr_disable(struct adapter *adapter, int idx) +{ + struct port_info *pi = adap2pinfo(adapter, idx); + + t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset, + 0x7ff); +} + +/** + * t3_port_intr_enable - enable port-specific interrupts + * @adapter: associated adapter + * @idx: index of port whose interrupts should be enabled + * + * Enable port-specific (i.e., MAC and PHY) interrupts for the given + * adapter port. + */ +void t3_port_intr_enable(struct adapter *adapter, int idx) +{ + struct cphy *phy = &adap2pinfo(adapter, idx)->phy; + + t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK); + t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */ + phy->ops->intr_enable(phy); +} + +/** + * t3_port_intr_disable - disable port-specific interrupts + * @adapter: associated adapter + * @idx: index of port whose interrupts should be disabled + * + * Disable port-specific (i.e., MAC and PHY) interrupts for the given + * adapter port. + */ +void t3_port_intr_disable(struct adapter *adapter, int idx) +{ + struct cphy *phy = &adap2pinfo(adapter, idx)->phy; + + t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0); + t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */ + phy->ops->intr_disable(phy); +} + +/** + * t3_port_intr_clear - clear port-specific interrupts + * @adapter: associated adapter + * @idx: index of port whose interrupts to clear + * + * Clear port-specific (i.e., MAC and PHY) interrupts for the given + * adapter port. + */ +static void t3_port_intr_clear(struct adapter *adapter, int idx) +{ + struct cphy *phy = &adap2pinfo(adapter, idx)->phy; + + t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff); + t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */ + phy->ops->intr_clear(phy); +} + +#define SG_CONTEXT_CMD_ATTEMPTS 100 + +/** + * t3_sge_write_context - write an SGE context + * @adapter: the adapter + * @id: the context id + * @type: the context type + * + * Program an SGE context with the values already loaded in the + * CONTEXT_DATA? registers. + */ +static int t3_sge_write_context(struct adapter *adapter, unsigned int id, + unsigned int type) +{ + if (type == F_RESPONSEQ) { + /* + * Can't write the Response Queue Context bits for + * Interrupt Armed or the Reserve bits after the chip + * has been initialized out of reset. Writing to these + * bits can confuse the hardware. + */ + t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff); + } else { + t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff); + t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff); + } + t3_write_reg(adapter, A_SG_CONTEXT_CMD, + V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id)); + return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); +} + +/** + * clear_sge_ctxt - completely clear an SGE context + * @adapter: the adapter + * @id: the context id + * @type: the context type + * + * Completely clear an SGE context. Used predominantly at post-reset + * initialization. Note in particular that we don't skip writing to any + * "sensitive bits" in the contexts the way that t3_sge_write_context() + * does ... + */ +static int clear_sge_ctxt(struct adapter *adap, unsigned int id, + unsigned int type) +{ + t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0); + t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0); + t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0); + t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0); + t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff); + t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff); + t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff); + t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff); + t3_write_reg(adap, A_SG_CONTEXT_CMD, + V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id)); + return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); +} + +/** + * t3_sge_init_ecntxt - initialize an SGE egress context + * @adapter: the adapter to configure + * @id: the context id + * @gts_enable: whether to enable GTS for the context + * @type: the egress context type + * @respq: associated response queue + * @base_addr: base address of queue + * @size: number of queue entries + * @token: uP token + * @gen: initial generation value for the context + * @cidx: consumer pointer + * + * Initialize an SGE egress context and make it ready for use. If the + * platform allows concurrent context operations, the caller is + * responsible for appropriate locking. + */ +int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable, + enum sge_context_type type, int respq, u64 base_addr, + unsigned int size, unsigned int token, int gen, + unsigned int cidx) +{ + unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM; + + if (base_addr & 0xfff) /* must be 4K aligned */ + return -EINVAL; + if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + base_addr >>= 12; + t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) | + V_EC_CREDITS(credits) | V_EC_GTS(gts_enable)); + t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) | + V_EC_BASE_LO(base_addr & 0xffff)); + base_addr >>= 16; + t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr); + base_addr >>= 32; + t3_write_reg(adapter, A_SG_CONTEXT_DATA3, + V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) | + V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) | + F_EC_VALID); + return t3_sge_write_context(adapter, id, F_EGRESS); +} + +/** + * t3_sge_init_flcntxt - initialize an SGE free-buffer list context + * @adapter: the adapter to configure + * @id: the context id + * @gts_enable: whether to enable GTS for the context + * @base_addr: base address of queue + * @size: number of queue entries + * @bsize: size of each buffer for this queue + * @cong_thres: threshold to signal congestion to upstream producers + * @gen: initial generation value for the context + * @cidx: consumer pointer + * + * Initialize an SGE free list context and make it ready for use. The + * caller is responsible for ensuring only one context operation occurs + * at a time. + */ +int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id, + int gts_enable, u64 base_addr, unsigned int size, + unsigned int bsize, unsigned int cong_thres, int gen, + unsigned int cidx) +{ + if (base_addr & 0xfff) /* must be 4K aligned */ + return -EINVAL; + if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + base_addr >>= 12; + t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr); + base_addr >>= 32; + t3_write_reg(adapter, A_SG_CONTEXT_DATA1, + V_FL_BASE_HI((u32) base_addr) | + V_FL_INDEX_LO(cidx & M_FL_INDEX_LO)); + t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) | + V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) | + V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO)); + t3_write_reg(adapter, A_SG_CONTEXT_DATA3, + V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) | + V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable)); + return t3_sge_write_context(adapter, id, F_FREELIST); +} + +/** + * t3_sge_init_rspcntxt - initialize an SGE response queue context + * @adapter: the adapter to configure + * @id: the context id + * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ + * @base_addr: base address of queue + * @size: number of queue entries + * @fl_thres: threshold for selecting the normal or jumbo free list + * @gen: initial generation value for the context + * @cidx: consumer pointer + * + * Initialize an SGE response queue context and make it ready for use. + * The caller is responsible for ensuring only one context operation + * occurs at a time. + */ +int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id, + int irq_vec_idx, u64 base_addr, unsigned int size, + unsigned int fl_thres, int gen, unsigned int cidx) +{ + unsigned int intr = 0; + + if (base_addr & 0xfff) /* must be 4K aligned */ + return -EINVAL; + if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + base_addr >>= 12; + t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) | + V_CQ_INDEX(cidx)); + t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr); + base_addr >>= 32; + if (irq_vec_idx >= 0) + intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN; + t3_write_reg(adapter, A_SG_CONTEXT_DATA2, + V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen)); + t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres); + return t3_sge_write_context(adapter, id, F_RESPONSEQ); +} + +/** + * t3_sge_init_cqcntxt - initialize an SGE completion queue context + * @adapter: the adapter to configure + * @id: the context id + * @base_addr: base address of queue + * @size: number of queue entries + * @rspq: response queue for async notifications + * @ovfl_mode: CQ overflow mode + * @credits: completion queue credits + * @credit_thres: the credit threshold + * + * Initialize an SGE completion queue context and make it ready for use. + * The caller is responsible for ensuring only one context operation + * occurs at a time. + */ +int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr, + unsigned int size, int rspq, int ovfl_mode, + unsigned int credits, unsigned int credit_thres) +{ + if (base_addr & 0xfff) /* must be 4K aligned */ + return -EINVAL; + if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + base_addr >>= 12; + t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size)); + t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr); + base_addr >>= 32; + t3_write_reg(adapter, A_SG_CONTEXT_DATA2, + V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) | + V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) | + V_CQ_ERR(ovfl_mode)); + t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) | + V_CQ_CREDIT_THRES(credit_thres)); + return t3_sge_write_context(adapter, id, F_CQ); +} + +/** + * t3_sge_enable_ecntxt - enable/disable an SGE egress context + * @adapter: the adapter + * @id: the egress context id + * @enable: enable (1) or disable (0) the context + * + * Enable or disable an SGE egress context. The caller is responsible for + * ensuring only one context operation occurs at a time. + */ +int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable) +{ + if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0); + t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0); + t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0); + t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID); + t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable)); + t3_write_reg(adapter, A_SG_CONTEXT_CMD, + V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id)); + return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); +} + +/** + * t3_sge_disable_fl - disable an SGE free-buffer list + * @adapter: the adapter + * @id: the free list context id + * + * Disable an SGE free-buffer list. The caller is responsible for + * ensuring only one context operation occurs at a time. + */ +int t3_sge_disable_fl(struct adapter *adapter, unsigned int id) +{ + if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0); + t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0); + t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE)); + t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0); + t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0); + t3_write_reg(adapter, A_SG_CONTEXT_CMD, + V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id)); + return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); +} + +/** + * t3_sge_disable_rspcntxt - disable an SGE response queue + * @adapter: the adapter + * @id: the response queue context id + * + * Disable an SGE response queue. The caller is responsible for + * ensuring only one context operation occurs at a time. + */ +int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id) +{ + if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE)); + t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0); + t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0); + t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0); + t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0); + t3_write_reg(adapter, A_SG_CONTEXT_CMD, + V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id)); + return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); +} + +/** + * t3_sge_disable_cqcntxt - disable an SGE completion queue + * @adapter: the adapter + * @id: the completion queue context id + * + * Disable an SGE completion queue. The caller is responsible for + * ensuring only one context operation occurs at a time. + */ +int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id) +{ + if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE)); + t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0); + t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0); + t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0); + t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0); + t3_write_reg(adapter, A_SG_CONTEXT_CMD, + V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id)); + return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); +} + +/** + * t3_sge_cqcntxt_op - perform an operation on a completion queue context + * @adapter: the adapter + * @id: the context id + * @op: the operation to perform + * + * Perform the selected operation on an SGE completion queue context. + * The caller is responsible for ensuring only one context operation + * occurs at a time. + */ +int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op, + unsigned int credits) +{ + u32 val; + + if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16); + t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) | + V_CONTEXT(id) | F_CQ); + if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, + 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val)) + return -EIO; + + if (op >= 2 && op < 7) { + if (adapter->params.rev > 0) + return G_CQ_INDEX(val); + + t3_write_reg(adapter, A_SG_CONTEXT_CMD, + V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id)); + if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, + F_CONTEXT_CMD_BUSY, 0, + SG_CONTEXT_CMD_ATTEMPTS, 1)) + return -EIO; + return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0)); + } + return 0; +} + +/** + * t3_config_rss - configure Rx packet steering + * @adapter: the adapter + * @rss_config: RSS settings (written to TP_RSS_CONFIG) + * @cpus: values for the CPU lookup table (0xff terminated) + * @rspq: values for the response queue lookup table (0xffff terminated) + * + * Programs the receive packet steering logic. @cpus and @rspq provide + * the values for the CPU and response queue lookup tables. If they + * provide fewer values than the size of the tables the supplied values + * are used repeatedly until the tables are fully populated. + */ +void t3_config_rss(struct adapter *adapter, unsigned int rss_config, + const u8 * cpus, const u16 *rspq) +{ + int i, j, cpu_idx = 0, q_idx = 0; + + if (cpus) + for (i = 0; i < RSS_TABLE_SIZE; ++i) { + u32 val = i << 16; + + for (j = 0; j < 2; ++j) { + val |= (cpus[cpu_idx++] & 0x3f) << (8 * j); + if (cpus[cpu_idx] == 0xff) + cpu_idx = 0; + } + t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val); + } + + if (rspq) + for (i = 0; i < RSS_TABLE_SIZE; ++i) { + t3_write_reg(adapter, A_TP_RSS_MAP_TABLE, + (i << 16) | rspq[q_idx++]); + if (rspq[q_idx] == 0xffff) + q_idx = 0; + } + + t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config); +} + +/** + * t3_tp_set_offload_mode - put TP in NIC/offload mode + * @adap: the adapter + * @enable: 1 to select offload mode, 0 for regular NIC + * + * Switches TP to NIC/offload mode. + */ +void t3_tp_set_offload_mode(struct adapter *adap, int enable) +{ + if (is_offload(adap) || !enable) + t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE, + V_NICMODE(!enable)); +} + +/** + * pm_num_pages - calculate the number of pages of the payload memory + * @mem_size: the size of the payload memory + * @pg_size: the size of each payload memory page + * + * Calculate the number of pages, each of the given size, that fit in a + * memory of the specified size, respecting the HW requirement that the + * number of pages must be a multiple of 24. + */ +static inline unsigned int pm_num_pages(unsigned int mem_size, + unsigned int pg_size) +{ + unsigned int n = mem_size / pg_size; + + return n - n % 24; +} + +#define mem_region(adap, start, size, reg) \ + t3_write_reg((adap), A_ ## reg, (start)); \ + start += size + +/** + * partition_mem - partition memory and configure TP memory settings + * @adap: the adapter + * @p: the TP parameters + * + * Partitions context and payload memory and configures TP's memory + * registers. + */ +static void partition_mem(struct adapter *adap, const struct tp_params *p) +{ + unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5); + unsigned int timers = 0, timers_shift = 22; + + if (adap->params.rev > 0) { + if (tids <= 16 * 1024) { + timers = 1; + timers_shift = 16; + } else if (tids <= 64 * 1024) { + timers = 2; + timers_shift = 18; + } else if (tids <= 256 * 1024) { + timers = 3; + timers_shift = 20; + } + } + + t3_write_reg(adap, A_TP_PMM_SIZE, + p->chan_rx_size | (p->chan_tx_size >> 16)); + + t3_write_reg(adap, A_TP_PMM_TX_BASE, 0); + t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size); + t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs); + t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX), + V_TXDATAACKIDX(fls(p->tx_pg_size) - 12)); + + t3_write_reg(adap, A_TP_PMM_RX_BASE, 0); + t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size); + t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs); + + pstructs = p->rx_num_pgs + p->tx_num_pgs; + /* Add a bit of headroom and make multiple of 24 */ + pstructs += 48; + pstructs -= pstructs % 24; + t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs); + + m = tids * TCB_SIZE; + mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR); + mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR); + t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m); + m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22); + mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE); + mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE); + mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE); + mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE); + + m = (m + 4095) & ~0xfff; + t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m); + t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m); + + tids = (p->cm_size - m - (3 << 20)) / 3072 - 32; + m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers - + adap->params.mc5.nfilters - adap->params.mc5.nroutes; + if (tids < m) + adap->params.mc5.nservers += m - tids; +} + +static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr, + u32 val) +{ + t3_write_reg(adap, A_TP_PIO_ADDR, addr); + t3_write_reg(adap, A_TP_PIO_DATA, val); +} + +static void tp_config(struct adapter *adap, const struct tp_params *p) +{ + t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU | + F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD | + F_TCPCHECKSUMOFFLOAD | V_IPTTL(64)); + t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) | + F_MTUENABLE | V_WINDOWSCALEMODE(1) | + V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1)); + t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) | + V_AUTOSTATE2(1) | V_AUTOSTATE1(0) | + V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) | + F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1)); + t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO, + F_IPV6ENABLE | F_NICMODE); + t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814); + t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105); + t3_set_reg_field(adap, A_TP_PARA_REG6, 0, + adap->params.rev > 0 ? F_ENABLEESND : + F_T3A_ENABLEESND); + + t3_set_reg_field(adap, A_TP_PC_CONFIG, + F_ENABLEEPCMDAFULL, + F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK | + F_TXCONGESTIONMODE | F_RXCONGESTIONMODE); + t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, + F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN | + F_ENABLEARPMISS | F_DISBLEDAPARBIT0); + t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080); + t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000); + + if (adap->params.rev > 0) { + tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE); + t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO, + F_TXPACEAUTO); + t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID); + t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT); + } else + t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED); + + if (adap->params.rev == T3_REV_C) + t3_set_reg_field(adap, A_TP_PC_CONFIG, + V_TABLELATENCYDELTA(M_TABLELATENCYDELTA), + V_TABLELATENCYDELTA(4)); + + t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0); + t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0); + t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0); + t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000); +} + +/* Desired TP timer resolution in usec */ +#define TP_TMR_RES 50 + +/* TCP timer values in ms */ +#define TP_DACK_TIMER 50 +#define TP_RTO_MIN 250 + +/** + * tp_set_timers - set TP timing parameters + * @adap: the adapter to set + * @core_clk: the core clock frequency in Hz + * + * Set TP's timing parameters, such as the various timer resolutions and + * the TCP timer values. + */ +static void tp_set_timers(struct adapter *adap, unsigned int core_clk) +{ + unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1; + unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */ + unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */ + unsigned int tps = core_clk >> tre; + + t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) | + V_DELAYEDACKRESOLUTION(dack_re) | + V_TIMESTAMPRESOLUTION(tstamp_re)); + t3_write_reg(adap, A_TP_DACK_TIMER, + (core_clk >> dack_re) / (1000 / TP_DACK_TIMER)); + t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100); + t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504); + t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908); + t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c); + t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | + V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) | + V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) | + V_KEEPALIVEMAX(9)); + +#define SECONDS * tps + + t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS); + t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN)); + t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS); + t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS); + t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS); + t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS); + t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS); + t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS); + t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS); + +#undef SECONDS +} + +/** + * t3_tp_set_coalescing_size - set receive coalescing size + * @adap: the adapter + * @size: the receive coalescing size + * @psh: whether a set PSH bit should deliver coalesced data + * + * Set the receive coalescing size and PSH bit handling. + */ +static int t3_tp_set_coalescing_size(struct adapter *adap, + unsigned int size, int psh) +{ + u32 val; + + if (size > MAX_RX_COALESCING_LEN) + return -EINVAL; + + val = t3_read_reg(adap, A_TP_PARA_REG3); + val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN); + + if (size) { + val |= F_RXCOALESCEENABLE; + if (psh) + val |= F_RXCOALESCEPSHEN; + size = min(MAX_RX_COALESCING_LEN, size); + t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) | + V_MAXRXDATA(MAX_RX_COALESCING_LEN)); + } + t3_write_reg(adap, A_TP_PARA_REG3, val); + return 0; +} + +/** + * t3_tp_set_max_rxsize - set the max receive size + * @adap: the adapter + * @size: the max receive size + * + * Set TP's max receive size. This is the limit that applies when + * receive coalescing is disabled. + */ +static void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size) +{ + t3_write_reg(adap, A_TP_PARA_REG7, + V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size)); +} + +static void init_mtus(unsigned short mtus[]) +{ + /* + * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so + * it can accommodate max size TCP/IP headers when SACK and timestamps + * are enabled and still have at least 8 bytes of payload. + */ + mtus[0] = 88; + mtus[1] = 88; + mtus[2] = 256; + mtus[3] = 512; + mtus[4] = 576; + mtus[5] = 1024; + mtus[6] = 1280; + mtus[7] = 1492; + mtus[8] = 1500; + mtus[9] = 2002; + mtus[10] = 2048; + mtus[11] = 4096; + mtus[12] = 4352; + mtus[13] = 8192; + mtus[14] = 9000; + mtus[15] = 9600; +} + +/* + * Initial congestion control parameters. + */ +static void init_cong_ctrl(unsigned short *a, unsigned short *b) +{ + a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; + a[9] = 2; + a[10] = 3; + a[11] = 4; + a[12] = 5; + a[13] = 6; + a[14] = 7; + a[15] = 8; + a[16] = 9; + a[17] = 10; + a[18] = 14; + a[19] = 17; + a[20] = 21; + a[21] = 25; + a[22] = 30; + a[23] = 35; + a[24] = 45; + a[25] = 60; + a[26] = 80; + a[27] = 100; + a[28] = 200; + a[29] = 300; + a[30] = 400; + a[31] = 500; + + b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; + b[9] = b[10] = 1; + b[11] = b[12] = 2; + b[13] = b[14] = b[15] = b[16] = 3; + b[17] = b[18] = b[19] = b[20] = b[21] = 4; + b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; + b[28] = b[29] = 6; + b[30] = b[31] = 7; +} + +/* The minimum additive increment value for the congestion control table */ +#define CC_MIN_INCR 2U + +/** + * t3_load_mtus - write the MTU and congestion control HW tables + * @adap: the adapter + * @mtus: the unrestricted values for the MTU table + * @alphs: the values for the congestion control alpha parameter + * @beta: the values for the congestion control beta parameter + * @mtu_cap: the maximum permitted effective MTU + * + * Write the MTU table with the supplied MTUs capping each at &mtu_cap. + * Update the high-speed congestion control table with the supplied alpha, + * beta, and MTUs. + */ +void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS], + unsigned short alpha[NCCTRL_WIN], + unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap) +{ + static const unsigned int avg_pkts[NCCTRL_WIN] = { + 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, + 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, + 28672, 40960, 57344, 81920, 114688, 163840, 229376 + }; + + unsigned int i, w; + + for (i = 0; i < NMTUS; ++i) { + unsigned int mtu = min(mtus[i], mtu_cap); + unsigned int log2 = fls(mtu); + + if (!(mtu & ((1 << log2) >> 2))) /* round */ + log2--; + t3_write_reg(adap, A_TP_MTU_TABLE, + (i << 24) | (log2 << 16) | mtu); + + for (w = 0; w < NCCTRL_WIN; ++w) { + unsigned int inc; + + inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], + CC_MIN_INCR); + + t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | + (w << 16) | (beta[w] << 13) | inc); + } + } +} + +/** + * t3_tp_get_mib_stats - read TP's MIB counters + * @adap: the adapter + * @tps: holds the returned counter values + * + * Returns the values of TP's MIB counters. + */ +void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps) +{ + t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps, + sizeof(*tps) / sizeof(u32), 0); +} + +#define ulp_region(adap, name, start, len) \ + t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \ + t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \ + (start) + (len) - 1); \ + start += len + +#define ulptx_region(adap, name, start, len) \ + t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \ + t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \ + (start) + (len) - 1) + +static void ulp_config(struct adapter *adap, const struct tp_params *p) +{ + unsigned int m = p->chan_rx_size; + + ulp_region(adap, ISCSI, m, p->chan_rx_size / 8); + ulp_region(adap, TDDP, m, p->chan_rx_size / 8); + ulptx_region(adap, TPT, m, p->chan_rx_size / 4); + ulp_region(adap, STAG, m, p->chan_rx_size / 4); + ulp_region(adap, RQ, m, p->chan_rx_size / 4); + ulptx_region(adap, PBL, m, p->chan_rx_size / 4); + ulp_region(adap, PBL, m, p->chan_rx_size / 4); + t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff); +} + +/** + * t3_set_proto_sram - set the contents of the protocol sram + * @adapter: the adapter + * @data: the protocol image + * + * Write the contents of the protocol SRAM. + */ +int t3_set_proto_sram(struct adapter *adap, const u8 *data) +{ + int i; + const __be32 *buf = (const __be32 *)data; + + for (i = 0; i < PROTO_SRAM_LINES; i++) { + t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++)); + t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++)); + t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++)); + t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++)); + t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++)); + + t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31); + if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1)) + return -EIO; + } + t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0); + + return 0; +} + +void t3_config_trace_filter(struct adapter *adapter, + const struct trace_params *tp, int filter_index, + int invert, int enable) +{ + u32 addr, key[4], mask[4]; + + key[0] = tp->sport | (tp->sip << 16); + key[1] = (tp->sip >> 16) | (tp->dport << 16); + key[2] = tp->dip; + key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20); + + mask[0] = tp->sport_mask | (tp->sip_mask << 16); + mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16); + mask[2] = tp->dip_mask; + mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20); + + if (invert) + key[3] |= (1 << 29); + if (enable) + key[3] |= (1 << 28); + + addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0; + tp_wr_indirect(adapter, addr++, key[0]); + tp_wr_indirect(adapter, addr++, mask[0]); + tp_wr_indirect(adapter, addr++, key[1]); + tp_wr_indirect(adapter, addr++, mask[1]); + tp_wr_indirect(adapter, addr++, key[2]); + tp_wr_indirect(adapter, addr++, mask[2]); + tp_wr_indirect(adapter, addr++, key[3]); + tp_wr_indirect(adapter, addr, mask[3]); + t3_read_reg(adapter, A_TP_PIO_DATA); +} + +/** + * t3_config_sched - configure a HW traffic scheduler + * @adap: the adapter + * @kbps: target rate in Kbps + * @sched: the scheduler index + * + * Configure a HW scheduler for the target rate + */ +int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched) +{ + unsigned int v, tps, cpt, bpt, delta, mindelta = ~0; + unsigned int clk = adap->params.vpd.cclk * 1000; + unsigned int selected_cpt = 0, selected_bpt = 0; + + if (kbps > 0) { + kbps *= 125; /* -> bytes */ + for (cpt = 1; cpt <= 255; cpt++) { + tps = clk / cpt; + bpt = (kbps + tps / 2) / tps; + if (bpt > 0 && bpt <= 255) { + v = bpt * tps; + delta = v >= kbps ? v - kbps : kbps - v; + if (delta <= mindelta) { + mindelta = delta; + selected_cpt = cpt; + selected_bpt = bpt; + } + } else if (selected_cpt) + break; + } + if (!selected_cpt) + return -EINVAL; + } + t3_write_reg(adap, A_TP_TM_PIO_ADDR, + A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2); + v = t3_read_reg(adap, A_TP_TM_PIO_DATA); + if (sched & 1) + v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24); + else + v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8); + t3_write_reg(adap, A_TP_TM_PIO_DATA, v); + return 0; +} + +static int tp_init(struct adapter *adap, const struct tp_params *p) +{ + int busy = 0; + + tp_config(adap, p); + t3_set_vlan_accel(adap, 3, 0); + + if (is_offload(adap)) { + tp_set_timers(adap, adap->params.vpd.cclk * 1000); + t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE); + busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE, + 0, 1000, 5); + if (busy) + CH_ERR(adap, "TP initialization timed out\n"); + } + + if (!busy) + t3_write_reg(adap, A_TP_RESET, F_TPRESET); + return busy; +} + +/* + * Perform the bits of HW initialization that are dependent on the Tx + * channels being used. + */ +static void chan_init_hw(struct adapter *adap, unsigned int chan_map) +{ + int i; + + if (chan_map != 3) { /* one channel */ + t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0); + t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0); + t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT | + (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE : + F_TPTXPORT1EN | F_PORT1ACTIVE)); + t3_write_reg(adap, A_PM1_TX_CFG, + chan_map == 1 ? 0xffffffff : 0); + } else { /* two channels */ + t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN); + t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB); + t3_write_reg(adap, A_ULPTX_DMA_WEIGHT, + V_D1_WEIGHT(16) | V_D0_WEIGHT(16)); + t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN | + F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE | + F_ENFORCEPKT); + t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000); + t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE); + t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP, + V_TX_MOD_QUEUE_REQ_MAP(0xaa)); + for (i = 0; i < 16; i++) + t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE, + (i << 16) | 0x1010); + } +} + +static int calibrate_xgm(struct adapter *adapter) +{ + if (uses_xaui(adapter)) { + unsigned int v, i; + + for (i = 0; i < 5; ++i) { + t3_write_reg(adapter, A_XGM_XAUI_IMP, 0); + t3_read_reg(adapter, A_XGM_XAUI_IMP); + msleep(1); + v = t3_read_reg(adapter, A_XGM_XAUI_IMP); + if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) { + t3_write_reg(adapter, A_XGM_XAUI_IMP, + V_XAUIIMP(G_CALIMP(v) >> 2)); + return 0; + } + } + CH_ERR(adapter, "MAC calibration failed\n"); + return -1; + } else { + t3_write_reg(adapter, A_XGM_RGMII_IMP, + V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3)); + t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE, + F_XGM_IMPSETUPDATE); + } + return 0; +} + +static void calibrate_xgm_t3b(struct adapter *adapter) +{ + if (!uses_xaui(adapter)) { + t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET | + F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3)); + t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0); + t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, + F_XGM_IMPSETUPDATE); + t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE, + 0); + t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0); + t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE); + } +} + +struct mc7_timing_params { + unsigned char ActToPreDly; + unsigned char ActToRdWrDly; + unsigned char PreCyc; + unsigned char RefCyc[5]; + unsigned char BkCyc; + unsigned char WrToRdDly; + unsigned char RdToWrDly; +}; + +/* + * Write a value to a register and check that the write completed. These + * writes normally complete in a cycle or two, so one read should suffice. + * The very first read exists to flush the posted write to the device. + */ +static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val) +{ + t3_write_reg(adapter, addr, val); + t3_read_reg(adapter, addr); /* flush */ + if (!(t3_read_reg(adapter, addr) & F_BUSY)) + return 0; + CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr); + return -EIO; +} + +static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type) +{ + static const unsigned int mc7_mode[] = { + 0x632, 0x642, 0x652, 0x432, 0x442 + }; + static const struct mc7_timing_params mc7_timings[] = { + {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4}, + {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4}, + {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4}, + {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4}, + {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4} + }; + + u32 val; + unsigned int width, density, slow, attempts; + struct adapter *adapter = mc7->adapter; + const struct mc7_timing_params *p = &mc7_timings[mem_type]; + + if (!mc7->size) + return 0; + + val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); + slow = val & F_SLOW; + width = G_WIDTH(val); + density = G_DEN(val); + + t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN); + val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */ + msleep(1); + + if (!slow) { + t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN); + t3_read_reg(adapter, mc7->offset + A_MC7_CAL); + msleep(1); + if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) & + (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) { + CH_ERR(adapter, "%s MC7 calibration timed out\n", + mc7->name); + goto out_fail; + } + } + + t3_write_reg(adapter, mc7->offset + A_MC7_PARM, + V_ACTTOPREDLY(p->ActToPreDly) | + V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) | + V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) | + V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly)); + + t3_write_reg(adapter, mc7->offset + A_MC7_CFG, + val | F_CLKEN | F_TERM150); + t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */ + + if (!slow) + t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB, + F_DLLENB); + udelay(1); + + val = slow ? 3 : 6; + if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) || + wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) || + wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) || + wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val)) + goto out_fail; + + if (!slow) { + t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100); + t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0); + udelay(5); + } + + if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) || + wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) || + wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) || + wrreg_wait(adapter, mc7->offset + A_MC7_MODE, + mc7_mode[mem_type]) || + wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) || + wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val)) + goto out_fail; + + /* clock value is in KHz */ + mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */ + mc7_clock /= 1000000; /* KHz->MHz, ns->us */ + + t3_write_reg(adapter, mc7->offset + A_MC7_REF, + F_PERREFEN | V_PREREFDIV(mc7_clock)); + t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */ + + t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN); + t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0); + t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0); + t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END, + (mc7->size << width) - 1); + t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1)); + t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */ + + attempts = 50; + do { + msleep(250); + val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); + } while ((val & F_BUSY) && --attempts); + if (val & F_BUSY) { + CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name); + goto out_fail; + } + + /* Enable normal memory accesses. */ + t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY); + return 0; + +out_fail: + return -1; +} + +static void config_pcie(struct adapter *adap) +{ + static const u16 ack_lat[4][6] = { + {237, 416, 559, 1071, 2095, 4143}, + {128, 217, 289, 545, 1057, 2081}, + {73, 118, 154, 282, 538, 1050}, + {67, 107, 86, 150, 278, 534} + }; + static const u16 rpl_tmr[4][6] = { + {711, 1248, 1677, 3213, 6285, 12429}, + {384, 651, 867, 1635, 3171, 6243}, + {219, 354, 462, 846, 1614, 3150}, + {201, 321, 258, 450, 834, 1602} + }; + + u16 val, devid; + unsigned int log2_width, pldsize; + unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt; + + pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL, &val); + pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5; + + pci_read_config_word(adap->pdev, 0x2, &devid); + if (devid == 0x37) { + pcie_capability_write_word(adap->pdev, PCI_EXP_DEVCTL, + val & ~PCI_EXP_DEVCTL_READRQ & + ~PCI_EXP_DEVCTL_PAYLOAD); + pldsize = 0; + } + + pcie_capability_read_word(adap->pdev, PCI_EXP_LNKCTL, &val); + + fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0)); + fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx : + G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE)); + log2_width = fls(adap->params.pci.width) - 1; + acklat = ack_lat[log2_width][pldsize]; + if (val & PCI_EXP_LNKCTL_ASPM_L0S) /* check LOsEnable */ + acklat += fst_trn_tx * 4; + rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4; + + if (adap->params.rev == 0) + t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, + V_T3A_ACKLAT(M_T3A_ACKLAT), + V_T3A_ACKLAT(acklat)); + else + t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT), + V_ACKLAT(acklat)); + + t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT), + V_REPLAYLMT(rpllmt)); + + t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff); + t3_set_reg_field(adap, A_PCIE_CFG, 0, + F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST | + F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN); +} + +/* + * Initialize and configure T3 HW modules. This performs the + * initialization steps that need to be done once after a card is reset. + * MAC and PHY initialization is handled separarely whenever a port is enabled. + * + * fw_params are passed to FW and their value is platform dependent. Only the + * top 8 bits are available for use, the rest must be 0. + */ +int t3_init_hw(struct adapter *adapter, u32 fw_params) +{ + int err = -EIO, attempts, i; + const struct vpd_params *vpd = &adapter->params.vpd; + + if (adapter->params.rev > 0) + calibrate_xgm_t3b(adapter); + else if (calibrate_xgm(adapter)) + goto out_err; + + if (vpd->mclk) { + partition_mem(adapter, &adapter->params.tp); + + if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) || + mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) || + mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) || + t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers, + adapter->params.mc5.nfilters, + adapter->params.mc5.nroutes)) + goto out_err; + + for (i = 0; i < 32; i++) + if (clear_sge_ctxt(adapter, i, F_CQ)) + goto out_err; + } + + if (tp_init(adapter, &adapter->params.tp)) + goto out_err; + + t3_tp_set_coalescing_size(adapter, + min(adapter->params.sge.max_pkt_size, + MAX_RX_COALESCING_LEN), 1); + t3_tp_set_max_rxsize(adapter, + min(adapter->params.sge.max_pkt_size, 16384U)); + ulp_config(adapter, &adapter->params.tp); + + if (is_pcie(adapter)) + config_pcie(adapter); + else + t3_set_reg_field(adapter, A_PCIX_CFG, 0, + F_DMASTOPEN | F_CLIDECEN); + + if (adapter->params.rev == T3_REV_C) + t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0, + F_CFG_CQE_SOP_MASK); + + t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff); + t3_write_reg(adapter, A_PM1_RX_MODE, 0); + t3_write_reg(adapter, A_PM1_TX_MODE, 0); + chan_init_hw(adapter, adapter->params.chan_map); + t3_sge_init(adapter, &adapter->params.sge); + t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN); + + t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter)); + + t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params); + t3_write_reg(adapter, A_CIM_BOOT_CFG, + V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2)); + t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */ + + attempts = 100; + do { /* wait for uP to initialize */ + msleep(20); + } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts); + if (!attempts) { + CH_ERR(adapter, "uP initialization timed out\n"); + goto out_err; + } + + err = 0; +out_err: + return err; +} + +/** + * get_pci_mode - determine a card's PCI mode + * @adapter: the adapter + * @p: where to store the PCI settings + * + * Determines a card's PCI mode and associated parameters, such as speed + * and width. + */ +static void get_pci_mode(struct adapter *adapter, struct pci_params *p) +{ + static unsigned short speed_map[] = { 33, 66, 100, 133 }; + u32 pci_mode; + + if (pci_is_pcie(adapter->pdev)) { + u16 val; + + p->variant = PCI_VARIANT_PCIE; + pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val); + p->width = (val >> 4) & 0x3f; + return; + } + + pci_mode = t3_read_reg(adapter, A_PCIX_MODE); + p->speed = speed_map[G_PCLKRANGE(pci_mode)]; + p->width = (pci_mode & F_64BIT) ? 64 : 32; + pci_mode = G_PCIXINITPAT(pci_mode); + if (pci_mode == 0) + p->variant = PCI_VARIANT_PCI; + else if (pci_mode < 4) + p->variant = PCI_VARIANT_PCIX_MODE1_PARITY; + else if (pci_mode < 8) + p->variant = PCI_VARIANT_PCIX_MODE1_ECC; + else + p->variant = PCI_VARIANT_PCIX_266_MODE2; +} + +/** + * init_link_config - initialize a link's SW state + * @lc: structure holding the link state + * @ai: information about the current card + * + * Initializes the SW state maintained for each link, including the link's + * capabilities and default speed/duplex/flow-control/autonegotiation + * settings. + */ +static void init_link_config(struct link_config *lc, unsigned int caps) +{ + lc->supported = caps; + lc->requested_speed = lc->speed = SPEED_INVALID; + lc->requested_duplex = lc->duplex = DUPLEX_INVALID; + lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; + if (lc->supported & SUPPORTED_Autoneg) { + lc->advertising = lc->supported; + lc->autoneg = AUTONEG_ENABLE; + lc->requested_fc |= PAUSE_AUTONEG; + } else { + lc->advertising = 0; + lc->autoneg = AUTONEG_DISABLE; + } +} + +/** + * mc7_calc_size - calculate MC7 memory size + * @cfg: the MC7 configuration + * + * Calculates the size of an MC7 memory in bytes from the value of its + * configuration register. + */ +static unsigned int mc7_calc_size(u32 cfg) +{ + unsigned int width = G_WIDTH(cfg); + unsigned int banks = !!(cfg & F_BKS) + 1; + unsigned int org = !!(cfg & F_ORG) + 1; + unsigned int density = G_DEN(cfg); + unsigned int MBs = ((256 << density) * banks) / (org << width); + + return MBs << 20; +} + +static void mc7_prep(struct adapter *adapter, struct mc7 *mc7, + unsigned int base_addr, const char *name) +{ + u32 cfg; + + mc7->adapter = adapter; + mc7->name = name; + mc7->offset = base_addr - MC7_PMRX_BASE_ADDR; + cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); + mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg); + mc7->width = G_WIDTH(cfg); +} + +static void mac_prep(struct cmac *mac, struct adapter *adapter, int index) +{ + u16 devid; + + mac->adapter = adapter; + pci_read_config_word(adapter->pdev, 0x2, &devid); + + if (devid == 0x37 && !adapter->params.vpd.xauicfg[1]) + index = 0; + mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index; + mac->nucast = 1; + + if (adapter->params.rev == 0 && uses_xaui(adapter)) { + t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset, + is_10G(adapter) ? 0x2901c04 : 0x2301c04); + t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset, + F_ENRGMII, 0); + } +} + +static void early_hw_init(struct adapter *adapter, + const struct adapter_info *ai) +{ + u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2); + + mi1_init(adapter, ai); + t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */ + V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1)); + t3_write_reg(adapter, A_T3DBG_GPIO_EN, + ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL); + t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0); + t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff)); + + if (adapter->params.rev == 0 || !uses_xaui(adapter)) + val |= F_ENRGMII; + + /* Enable MAC clocks so we can access the registers */ + t3_write_reg(adapter, A_XGM_PORT_CFG, val); + t3_read_reg(adapter, A_XGM_PORT_CFG); + + val |= F_CLKDIVRESET_; + t3_write_reg(adapter, A_XGM_PORT_CFG, val); + t3_read_reg(adapter, A_XGM_PORT_CFG); + t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val); + t3_read_reg(adapter, A_XGM_PORT_CFG); +} + +/* + * Reset the adapter. + * Older PCIe cards lose their config space during reset, PCI-X + * ones don't. + */ +int t3_reset_adapter(struct adapter *adapter) +{ + int i, save_and_restore_pcie = + adapter->params.rev < T3_REV_B2 && is_pcie(adapter); + uint16_t devid = 0; + + if (save_and_restore_pcie) + pci_save_state(adapter->pdev); + t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE); + + /* + * Delay. Give Some time to device to reset fully. + * XXX The delay time should be modified. + */ + for (i = 0; i < 10; i++) { + msleep(50); + pci_read_config_word(adapter->pdev, 0x00, &devid); + if (devid == 0x1425) + break; + } + + if (devid != 0x1425) + return -1; + + if (save_and_restore_pcie) + pci_restore_state(adapter->pdev); + return 0; +} + +static int init_parity(struct adapter *adap) +{ + int i, err, addr; + + if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) + return -EBUSY; + + for (err = i = 0; !err && i < 16; i++) + err = clear_sge_ctxt(adap, i, F_EGRESS); + for (i = 0xfff0; !err && i <= 0xffff; i++) + err = clear_sge_ctxt(adap, i, F_EGRESS); + for (i = 0; !err && i < SGE_QSETS; i++) + err = clear_sge_ctxt(adap, i, F_RESPONSEQ); + if (err) + return err; + + t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0); + for (i = 0; i < 4; i++) + for (addr = 0; addr <= M_IBQDBGADDR; addr++) { + t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN | + F_IBQDBGWR | V_IBQDBGQID(i) | + V_IBQDBGADDR(addr)); + err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, + F_IBQDBGBUSY, 0, 2, 1); + if (err) + return err; + } + return 0; +} + +/* + * Initialize adapter SW state for the various HW modules, set initial values + * for some adapter tunables, take PHYs out of reset, and initialize the MDIO + * interface. + */ +int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, + int reset) +{ + int ret; + unsigned int i, j = -1; + + get_pci_mode(adapter, &adapter->params.pci); + + adapter->params.info = ai; + adapter->params.nports = ai->nports0 + ai->nports1; + adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1); + adapter->params.rev = t3_read_reg(adapter, A_PL_REV); + /* + * We used to only run the "adapter check task" once a second if + * we had PHYs which didn't support interrupts (we would check + * their link status once a second). Now we check other conditions + * in that routine which could potentially impose a very high + * interrupt load on the system. As such, we now always scan the + * adapter state once a second ... + */ + adapter->params.linkpoll_period = 10; + adapter->params.stats_update_period = is_10G(adapter) ? + MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10); + adapter->params.pci.vpd_cap_addr = + pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD); + ret = get_vpd_params(adapter, &adapter->params.vpd); + if (ret < 0) + return ret; + + if (reset && t3_reset_adapter(adapter)) + return -1; + + t3_sge_prep(adapter, &adapter->params.sge); + + if (adapter->params.vpd.mclk) { + struct tp_params *p = &adapter->params.tp; + + mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX"); + mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX"); + mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM"); + + p->nchan = adapter->params.chan_map == 3 ? 2 : 1; + p->pmrx_size = t3_mc7_size(&adapter->pmrx); + p->pmtx_size = t3_mc7_size(&adapter->pmtx); + p->cm_size = t3_mc7_size(&adapter->cm); + p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */ + p->chan_tx_size = p->pmtx_size / p->nchan; + p->rx_pg_size = 64 * 1024; + p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024; + p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size); + p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size); + p->ntimer_qs = p->cm_size >= (128 << 20) || + adapter->params.rev > 0 ? 12 : 6; + } + + adapter->params.offload = t3_mc7_size(&adapter->pmrx) && + t3_mc7_size(&adapter->pmtx) && + t3_mc7_size(&adapter->cm); + + if (is_offload(adapter)) { + adapter->params.mc5.nservers = DEFAULT_NSERVERS; + adapter->params.mc5.nfilters = adapter->params.rev > 0 ? + DEFAULT_NFILTERS : 0; + adapter->params.mc5.nroutes = 0; + t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT); + + init_mtus(adapter->params.mtus); + init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); + } + + early_hw_init(adapter, ai); + ret = init_parity(adapter); + if (ret) + return ret; + + for_each_port(adapter, i) { + u8 hw_addr[6]; + const struct port_type_info *pti; + struct port_info *p = adap2pinfo(adapter, i); + + while (!adapter->params.vpd.port_type[++j]) + ; + + pti = &port_types[adapter->params.vpd.port_type[j]]; + if (!pti->phy_prep) { + CH_ALERT(adapter, "Invalid port type index %d\n", + adapter->params.vpd.port_type[j]); + return -EINVAL; + } + + p->phy.mdio.dev = adapter->port[i]; + ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j, + ai->mdio_ops); + if (ret) + return ret; + mac_prep(&p->mac, adapter, j); + + /* + * The VPD EEPROM stores the base Ethernet address for the + * card. A port's address is derived from the base by adding + * the port's index to the base's low octet. + */ + memcpy(hw_addr, adapter->params.vpd.eth_base, 5); + hw_addr[5] = adapter->params.vpd.eth_base[5] + i; + + memcpy(adapter->port[i]->dev_addr, hw_addr, + ETH_ALEN); + init_link_config(&p->link_config, p->phy.caps); + p->phy.ops->power_down(&p->phy, 1); + + /* + * If the PHY doesn't support interrupts for link status + * changes, schedule a scan of the adapter links at least + * once a second. + */ + if (!(p->phy.caps & SUPPORTED_IRQ) && + adapter->params.linkpoll_period > 10) + adapter->params.linkpoll_period = 10; + } + + return 0; +} + +void t3_led_ready(struct adapter *adapter) +{ + t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, + F_GPIO0_OUT_VAL); +} + +int t3_replay_prep_adapter(struct adapter *adapter) +{ + const struct adapter_info *ai = adapter->params.info; + unsigned int i, j = -1; + int ret; + + early_hw_init(adapter, ai); + ret = init_parity(adapter); + if (ret) + return ret; + + for_each_port(adapter, i) { + const struct port_type_info *pti; + struct port_info *p = adap2pinfo(adapter, i); + + while (!adapter->params.vpd.port_type[++j]) + ; + + pti = &port_types[adapter->params.vpd.port_type[j]]; + ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL); + if (ret) + return ret; + p->phy.ops->power_down(&p->phy, 1); + } + +return 0; +} + diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h b/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h new file mode 100644 index 000000000..705713b56 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2006-2008 Chelsio Communications. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _T3CDEV_H_ +#define _T3CDEV_H_ + +#include +#include +#include +#include +#include +#include + +#define T3CNAMSIZ 16 + +struct cxgb3_client; + +enum t3ctype { + T3A = 0, + T3B, + T3C, +}; + +struct t3cdev { + char name[T3CNAMSIZ]; /* T3C device name */ + enum t3ctype type; + struct list_head ofld_dev_list; /* for list linking */ + struct net_device *lldev; /* LL dev associated with T3C messages */ + struct proc_dir_entry *proc_dir; /* root of proc dir for this T3C */ + int (*send)(struct t3cdev *dev, struct sk_buff *skb); + int (*recv)(struct t3cdev *dev, struct sk_buff **skb, int n); + int (*ctl)(struct t3cdev *dev, unsigned int req, void *data); + void (*neigh_update)(struct t3cdev *dev, struct neighbour *neigh); + void *priv; /* driver private data */ + void *l2opt; /* optional layer 2 data */ + void *l3opt; /* optional layer 3 data */ + void *l4opt; /* optional layer 4 data */ + void *ulp; /* ulp stuff */ + void *ulp_iscsi; /* ulp iscsi */ +}; + +#endif /* _T3CDEV_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/version.h b/drivers/net/ethernet/chelsio/cxgb3/version.h new file mode 100644 index 000000000..165bfb914 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/version.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +/* $Date: 2006/10/31 18:57:51 $ $RCSfile: version.h,v $ $Revision: 1.3 $ */ +#ifndef __CHELSIO_VERSION_H +#define __CHELSIO_VERSION_H +#define DRV_DESC "Chelsio T3 Network Driver" +#define DRV_NAME "cxgb3" +/* Driver version */ +#define DRV_VERSION "1.1.5-ko" + +/* Firmware version */ +#define FW_VERSION_MAJOR 7 +#define FW_VERSION_MINOR 12 +#define FW_VERSION_MICRO 0 +#endif /* __CHELSIO_VERSION_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c b/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c new file mode 100644 index 000000000..4f9a1c272 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/vsc8211.c @@ -0,0 +1,416 @@ +/* + * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "common.h" + +/* VSC8211 PHY specific registers. */ +enum { + VSC8211_SIGDET_CTRL = 19, + VSC8211_EXT_CTRL = 23, + VSC8211_INTR_ENABLE = 25, + VSC8211_INTR_STATUS = 26, + VSC8211_LED_CTRL = 27, + VSC8211_AUX_CTRL_STAT = 28, + VSC8211_EXT_PAGE_AXS = 31, +}; + +enum { + VSC_INTR_RX_ERR = 1 << 0, + VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */ + VSC_INTR_CABLE = 1 << 2, /* cable impairment */ + VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */ + VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */ + VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */ + VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */ + VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */ + VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */ + VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */ + VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */ + VSC_INTR_DPLX_CHG = 1 << 12, /* duplex change */ + VSC_INTR_LINK_CHG = 1 << 13, /* link change */ + VSC_INTR_SPD_CHG = 1 << 14, /* speed change */ + VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */ +}; + +enum { + VSC_CTRL_CLAUSE37_VIEW = 1 << 4, /* Switch to Clause 37 view */ + VSC_CTRL_MEDIA_MODE_HI = 0xf000 /* High part of media mode select */ +}; + +#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \ + VSC_INTR_DPLX_CHG | VSC_INTR_SPD_CHG | \ + VSC_INTR_NEG_DONE) +#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \ + VSC_INTR_ENABLE) + +/* PHY specific auxiliary control & status register fields */ +#define S_ACSR_ACTIPHY_TMR 0 +#define M_ACSR_ACTIPHY_TMR 0x3 +#define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR) + +#define S_ACSR_SPEED 3 +#define M_ACSR_SPEED 0x3 +#define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED) + +#define S_ACSR_DUPLEX 5 +#define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX) + +#define S_ACSR_ACTIPHY 6 +#define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY) + +/* + * Reset the PHY. This PHY completes reset immediately so we never wait. + */ +static int vsc8211_reset(struct cphy *cphy, int wait) +{ + return t3_phy_reset(cphy, MDIO_DEVAD_NONE, 0); +} + +static int vsc8211_intr_enable(struct cphy *cphy) +{ + return t3_mdio_write(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_ENABLE, + INTR_MASK); +} + +static int vsc8211_intr_disable(struct cphy *cphy) +{ + return t3_mdio_write(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_ENABLE, 0); +} + +static int vsc8211_intr_clear(struct cphy *cphy) +{ + u32 val; + + /* Clear PHY interrupts by reading the register. */ + return t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_STATUS, &val); +} + +static int vsc8211_autoneg_enable(struct cphy *cphy) +{ + return t3_mdio_change_bits(cphy, MDIO_DEVAD_NONE, MII_BMCR, + BMCR_PDOWN | BMCR_ISOLATE, + BMCR_ANENABLE | BMCR_ANRESTART); +} + +static int vsc8211_autoneg_restart(struct cphy *cphy) +{ + return t3_mdio_change_bits(cphy, MDIO_DEVAD_NONE, MII_BMCR, + BMCR_PDOWN | BMCR_ISOLATE, + BMCR_ANRESTART); +} + +static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok, + int *speed, int *duplex, int *fc) +{ + unsigned int bmcr, status, lpa, adv; + int err, sp = -1, dplx = -1, pause = 0; + + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMCR, &bmcr); + if (!err) + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, &status); + if (err) + return err; + + if (link_ok) { + /* + * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it + * once more to get the current link state. + */ + if (!(status & BMSR_LSTATUS)) + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, + &status); + if (err) + return err; + *link_ok = (status & BMSR_LSTATUS) != 0; + } + if (!(bmcr & BMCR_ANENABLE)) { + dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; + if (bmcr & BMCR_SPEED1000) + sp = SPEED_1000; + else if (bmcr & BMCR_SPEED100) + sp = SPEED_100; + else + sp = SPEED_10; + } else if (status & BMSR_ANEGCOMPLETE) { + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_AUX_CTRL_STAT, + &status); + if (err) + return err; + + dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; + sp = G_ACSR_SPEED(status); + if (sp == 0) + sp = SPEED_10; + else if (sp == 1) + sp = SPEED_100; + else + sp = SPEED_1000; + + if (fc && dplx == DUPLEX_FULL) { + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_LPA, + &lpa); + if (!err) + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, + MII_ADVERTISE, &adv); + if (err) + return err; + + if (lpa & adv & ADVERTISE_PAUSE_CAP) + pause = PAUSE_RX | PAUSE_TX; + else if ((lpa & ADVERTISE_PAUSE_CAP) && + (lpa & ADVERTISE_PAUSE_ASYM) && + (adv & ADVERTISE_PAUSE_ASYM)) + pause = PAUSE_TX; + else if ((lpa & ADVERTISE_PAUSE_ASYM) && + (adv & ADVERTISE_PAUSE_CAP)) + pause = PAUSE_RX; + } + } + if (speed) + *speed = sp; + if (duplex) + *duplex = dplx; + if (fc) + *fc = pause; + return 0; +} + +static int vsc8211_get_link_status_fiber(struct cphy *cphy, int *link_ok, + int *speed, int *duplex, int *fc) +{ + unsigned int bmcr, status, lpa, adv; + int err, sp = -1, dplx = -1, pause = 0; + + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMCR, &bmcr); + if (!err) + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, &status); + if (err) + return err; + + if (link_ok) { + /* + * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it + * once more to get the current link state. + */ + if (!(status & BMSR_LSTATUS)) + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_BMSR, + &status); + if (err) + return err; + *link_ok = (status & BMSR_LSTATUS) != 0; + } + if (!(bmcr & BMCR_ANENABLE)) { + dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; + if (bmcr & BMCR_SPEED1000) + sp = SPEED_1000; + else if (bmcr & BMCR_SPEED100) + sp = SPEED_100; + else + sp = SPEED_10; + } else if (status & BMSR_ANEGCOMPLETE) { + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_LPA, &lpa); + if (!err) + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, MII_ADVERTISE, + &adv); + if (err) + return err; + + if (adv & lpa & ADVERTISE_1000XFULL) { + dplx = DUPLEX_FULL; + sp = SPEED_1000; + } else if (adv & lpa & ADVERTISE_1000XHALF) { + dplx = DUPLEX_HALF; + sp = SPEED_1000; + } + + if (fc && dplx == DUPLEX_FULL) { + if (lpa & adv & ADVERTISE_1000XPAUSE) + pause = PAUSE_RX | PAUSE_TX; + else if ((lpa & ADVERTISE_1000XPAUSE) && + (adv & lpa & ADVERTISE_1000XPSE_ASYM)) + pause = PAUSE_TX; + else if ((lpa & ADVERTISE_1000XPSE_ASYM) && + (adv & ADVERTISE_1000XPAUSE)) + pause = PAUSE_RX; + } + } + if (speed) + *speed = sp; + if (duplex) + *duplex = dplx; + if (fc) + *fc = pause; + return 0; +} + +#ifdef UNUSED +/* + * Enable/disable auto MDI/MDI-X in forced link speed mode. + */ +static int vsc8211_set_automdi(struct cphy *phy, int enable) +{ + int err; + + err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0x52b5); + if (err) + return err; + + err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 18, 0x12); + if (err) + return err; + + err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 17, enable ? 0x2803 : 0x3003); + if (err) + return err; + + err = t3_mdio_write(phy, MDIO_DEVAD_NONE, 16, 0x87fa); + if (err) + return err; + + err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0); + if (err) + return err; + + return 0; +} + +int vsc8211_set_speed_duplex(struct cphy *phy, int speed, int duplex) +{ + int err; + + err = t3_set_phy_speed_duplex(phy, speed, duplex); + if (!err) + err = vsc8211_set_automdi(phy, 1); + return err; +} +#endif /* UNUSED */ + +static int vsc8211_power_down(struct cphy *cphy, int enable) +{ + return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN, + enable ? BMCR_PDOWN : 0); +} + +static int vsc8211_intr_handler(struct cphy *cphy) +{ + unsigned int cause; + int err, cphy_cause = 0; + + err = t3_mdio_read(cphy, MDIO_DEVAD_NONE, VSC8211_INTR_STATUS, &cause); + if (err) + return err; + + cause &= INTR_MASK; + if (cause & CFG_CHG_INTR_MASK) + cphy_cause |= cphy_cause_link_change; + if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO)) + cphy_cause |= cphy_cause_fifo_error; + return cphy_cause; +} + +static struct cphy_ops vsc8211_ops = { + .reset = vsc8211_reset, + .intr_enable = vsc8211_intr_enable, + .intr_disable = vsc8211_intr_disable, + .intr_clear = vsc8211_intr_clear, + .intr_handler = vsc8211_intr_handler, + .autoneg_enable = vsc8211_autoneg_enable, + .autoneg_restart = vsc8211_autoneg_restart, + .advertise = t3_phy_advertise, + .set_speed_duplex = t3_set_phy_speed_duplex, + .get_link_status = vsc8211_get_link_status, + .power_down = vsc8211_power_down, +}; + +static struct cphy_ops vsc8211_fiber_ops = { + .reset = vsc8211_reset, + .intr_enable = vsc8211_intr_enable, + .intr_disable = vsc8211_intr_disable, + .intr_clear = vsc8211_intr_clear, + .intr_handler = vsc8211_intr_handler, + .autoneg_enable = vsc8211_autoneg_enable, + .autoneg_restart = vsc8211_autoneg_restart, + .advertise = t3_phy_advertise_fiber, + .set_speed_duplex = t3_set_phy_speed_duplex, + .get_link_status = vsc8211_get_link_status_fiber, + .power_down = vsc8211_power_down, +}; + +int t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter, + int phy_addr, const struct mdio_ops *mdio_ops) +{ + int err; + unsigned int val; + + cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops, + SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII | + SUPPORTED_TP | SUPPORTED_IRQ, "10/100/1000BASE-T"); + msleep(20); /* PHY needs ~10ms to start responding to MDIO */ + + err = t3_mdio_read(phy, MDIO_DEVAD_NONE, VSC8211_EXT_CTRL, &val); + if (err) + return err; + if (val & VSC_CTRL_MEDIA_MODE_HI) { + /* copper interface, just need to configure the LEDs */ + return t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_LED_CTRL, + 0x100); + } + + phy->caps = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | + SUPPORTED_MII | SUPPORTED_FIBRE | SUPPORTED_IRQ; + phy->desc = "1000BASE-X"; + phy->ops = &vsc8211_fiber_ops; + + err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 1); + if (err) + return err; + + err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_SIGDET_CTRL, 1); + if (err) + return err; + + err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_PAGE_AXS, 0); + if (err) + return err; + + err = t3_mdio_write(phy, MDIO_DEVAD_NONE, VSC8211_EXT_CTRL, + val | VSC_CTRL_CLAUSE37_VIEW); + if (err) + return err; + + err = vsc8211_reset(phy, 0); + if (err) + return err; + + udelay(5); /* delay after reset before next SMI */ + return 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb3/xgmac.c b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c new file mode 100644 index 000000000..3af19a550 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c @@ -0,0 +1,657 @@ +/* + * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "common.h" +#include "regs.h" + +/* + * # of exact address filters. The first one is used for the station address, + * the rest are available for multicast addresses. + */ +#define EXACT_ADDR_FILTERS 8 + +static inline int macidx(const struct cmac *mac) +{ + return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR); +} + +static void xaui_serdes_reset(struct cmac *mac) +{ + static const unsigned int clear[] = { + F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1, + F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3 + }; + + int i; + struct adapter *adap = mac->adapter; + u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset; + + t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] | + F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 | + F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 | + F_RESETPLL23 | F_RESETPLL01); + t3_read_reg(adap, ctrl); + udelay(15); + + for (i = 0; i < ARRAY_SIZE(clear); i++) { + t3_set_reg_field(adap, ctrl, clear[i], 0); + udelay(15); + } +} + +void t3b_pcs_reset(struct cmac *mac) +{ + t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, + F_PCS_RESET_, 0); + udelay(20); + t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0, + F_PCS_RESET_); +} + +int t3_mac_reset(struct cmac *mac) +{ + static const struct addr_val_pair mac_reset_avp[] = { + {A_XGM_TX_CTRL, 0}, + {A_XGM_RX_CTRL, 0}, + {A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES | + F_RMFCS | F_ENJUMBO | F_ENHASHMCAST}, + {A_XGM_RX_HASH_LOW, 0}, + {A_XGM_RX_HASH_HIGH, 0}, + {A_XGM_RX_EXACT_MATCH_LOW_1, 0}, + {A_XGM_RX_EXACT_MATCH_LOW_2, 0}, + {A_XGM_RX_EXACT_MATCH_LOW_3, 0}, + {A_XGM_RX_EXACT_MATCH_LOW_4, 0}, + {A_XGM_RX_EXACT_MATCH_LOW_5, 0}, + {A_XGM_RX_EXACT_MATCH_LOW_6, 0}, + {A_XGM_RX_EXACT_MATCH_LOW_7, 0}, + {A_XGM_RX_EXACT_MATCH_LOW_8, 0}, + {A_XGM_STAT_CTRL, F_CLRSTATS} + }; + u32 val; + struct adapter *adap = mac->adapter; + unsigned int oft = mac->offset; + + t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_); + t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ + + t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft); + t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft, + F_RXSTRFRWRD | F_DISERRFRAMES, + uses_xaui(adap) ? 0 : F_RXSTRFRWRD); + t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0, F_UNDERUNFIX); + + if (uses_xaui(adap)) { + if (adap->params.rev == 0) { + t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0, + F_RXENABLE | F_TXENABLE); + if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft, + F_CMULOCK, 1, 5, 2)) { + CH_ERR(adap, + "MAC %d XAUI SERDES CMU lock failed\n", + macidx(mac)); + return -1; + } + t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0, + F_SERDESRESET_); + } else + xaui_serdes_reset(mac); + } + + t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + oft, + V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE), + V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE) | F_RXENFRAMER); + val = F_MAC_RESET_ | F_XGMAC_STOP_EN; + + if (is_10G(adap)) + val |= F_PCS_RESET_; + else if (uses_xaui(adap)) + val |= F_PCS_RESET_ | F_XG2G_RESET_; + else + val |= F_RGMII_RESET_ | F_XG2G_RESET_; + t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val); + t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ + if ((val & F_PCS_RESET_) && adap->params.rev) { + msleep(1); + t3b_pcs_reset(mac); + } + + memset(&mac->stats, 0, sizeof(mac->stats)); + return 0; +} + +static int t3b2_mac_reset(struct cmac *mac) +{ + struct adapter *adap = mac->adapter; + unsigned int oft = mac->offset, store; + int idx = macidx(mac); + u32 val; + + if (!macidx(mac)) + t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0); + else + t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0); + + /* Stop NIC traffic to reduce the number of TXTOGGLES */ + t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 0); + /* Ensure TX drains */ + t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, 0); + + t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_); + t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ + + /* Store A_TP_TX_DROP_CFG_CH0 */ + t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); + store = t3_read_reg(adap, A_TP_TX_DROP_CFG_CH0 + idx); + + msleep(10); + + /* Change DROP_CFG to 0xc0000011 */ + t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); + t3_write_reg(adap, A_TP_PIO_DATA, 0xc0000011); + + /* Check for xgm Rx fifo empty */ + /* Increased loop count to 1000 from 5 cover 1G and 100Mbps case */ + if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft, + 0x80000000, 1, 1000, 2)) { + CH_ERR(adap, "MAC %d Rx fifo drain failed\n", + macidx(mac)); + return -1; + } + + t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0); + t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ + + val = F_MAC_RESET_; + if (is_10G(adap)) + val |= F_PCS_RESET_; + else if (uses_xaui(adap)) + val |= F_PCS_RESET_ | F_XG2G_RESET_; + else + val |= F_RGMII_RESET_ | F_XG2G_RESET_; + t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val); + t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ + if ((val & F_PCS_RESET_) && adap->params.rev) { + msleep(1); + t3b_pcs_reset(mac); + } + t3_write_reg(adap, A_XGM_RX_CFG + oft, + F_DISPAUSEFRAMES | F_EN1536BFRAMES | + F_RMFCS | F_ENJUMBO | F_ENHASHMCAST); + + /* Restore the DROP_CFG */ + t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); + t3_write_reg(adap, A_TP_PIO_DATA, store); + + if (!idx) + t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE); + else + t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE); + + /* re-enable nic traffic */ + t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1); + + /* Set: re-enable NIC traffic */ + t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1); + + return 0; +} + +/* + * Set the exact match register 'idx' to recognize the given Ethernet address. + */ +static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr) +{ + u32 addr_lo, addr_hi; + unsigned int oft = mac->offset + idx * 8; + + addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + addr_hi = (addr[5] << 8) | addr[4]; + + t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo); + t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi); +} + +/* Set one of the station's unicast MAC addresses. */ +int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]) +{ + if (idx >= mac->nucast) + return -EINVAL; + set_addr_filter(mac, idx, addr); + return 0; +} + +/* + * Specify the number of exact address filters that should be reserved for + * unicast addresses. Caller should reload the unicast and multicast addresses + * after calling this. + */ +int t3_mac_set_num_ucast(struct cmac *mac, int n) +{ + if (n > EXACT_ADDR_FILTERS) + return -EINVAL; + mac->nucast = n; + return 0; +} + +void t3_mac_disable_exact_filters(struct cmac *mac) +{ + unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_LOW_1; + + for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) { + u32 v = t3_read_reg(mac->adapter, reg); + t3_write_reg(mac->adapter, reg, v); + } + t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */ +} + +void t3_mac_enable_exact_filters(struct cmac *mac) +{ + unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_HIGH_1; + + for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) { + u32 v = t3_read_reg(mac->adapter, reg); + t3_write_reg(mac->adapter, reg, v); + } + t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */ +} + +/* Calculate the RX hash filter index of an Ethernet address */ +static int hash_hw_addr(const u8 * addr) +{ + int hash = 0, octet, bit, i = 0, c; + + for (octet = 0; octet < 6; ++octet) + for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) { + hash ^= (c & 1) << i; + if (++i == 6) + i = 0; + } + return hash; +} + +int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev) +{ + u32 val, hash_lo, hash_hi; + struct adapter *adap = mac->adapter; + unsigned int oft = mac->offset; + + val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES; + if (dev->flags & IFF_PROMISC) + val |= F_COPYALLFRAMES; + t3_write_reg(adap, A_XGM_RX_CFG + oft, val); + + if (dev->flags & IFF_ALLMULTI) + hash_lo = hash_hi = 0xffffffff; + else { + struct netdev_hw_addr *ha; + int exact_addr_idx = mac->nucast; + + hash_lo = hash_hi = 0; + netdev_for_each_mc_addr(ha, dev) + if (exact_addr_idx < EXACT_ADDR_FILTERS) + set_addr_filter(mac, exact_addr_idx++, + ha->addr); + else { + int hash = hash_hw_addr(ha->addr); + + if (hash < 32) + hash_lo |= (1 << hash); + else + hash_hi |= (1 << (hash - 32)); + } + } + + t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo); + t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi); + return 0; +} + +static int rx_fifo_hwm(int mtu) +{ + int hwm; + + hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, (MAC_RXFIFO_SIZE * 38) / 100); + return min(hwm, MAC_RXFIFO_SIZE - 8192); +} + +int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu) +{ + int hwm, lwm, divisor; + int ipg; + unsigned int thres, v, reg; + struct adapter *adap = mac->adapter; + + /* + * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max + * packet size register includes header, but not FCS. + */ + mtu += 14; + if (mtu > 1536) + mtu += 4; + + if (mtu > MAX_FRAME_SIZE - 4) + return -EINVAL; + t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu); + + if (adap->params.rev >= T3_REV_B2 && + (t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) { + t3_mac_disable_exact_filters(mac); + v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset); + t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset, + F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST); + + reg = adap->params.rev == T3_REV_B2 ? + A_XGM_RX_MAX_PKT_SIZE_ERR_CNT : A_XGM_RXFIFO_CFG; + + /* drain RX FIFO */ + if (t3_wait_op_done(adap, reg + mac->offset, + F_RXFIFO_EMPTY, 1, 20, 5)) { + t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v); + t3_mac_enable_exact_filters(mac); + return -EIO; + } + t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, + V_RXMAXPKTSIZE(M_RXMAXPKTSIZE), + V_RXMAXPKTSIZE(mtu)); + t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v); + t3_mac_enable_exact_filters(mac); + } else + t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, + V_RXMAXPKTSIZE(M_RXMAXPKTSIZE), + V_RXMAXPKTSIZE(mtu)); + + /* + * Adjust the PAUSE frame watermarks. We always set the LWM, and the + * HWM only if flow-control is enabled. + */ + hwm = rx_fifo_hwm(mtu); + lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4); + v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset); + v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM); + v |= V_RXFIFOPAUSELWM(lwm / 8); + if (G_RXFIFOPAUSEHWM(v)) + v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) | + V_RXFIFOPAUSEHWM(hwm / 8); + + t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v); + + /* Adjust the TX FIFO threshold based on the MTU */ + thres = (adap->params.vpd.cclk * 1000) / 15625; + thres = (thres * mtu) / 1000; + if (is_10G(adap)) + thres /= 10; + thres = mtu > thres ? (mtu - thres + 7) / 8 : 0; + thres = max(thres, 8U); /* need at least 8 */ + ipg = (adap->params.rev == T3_REV_C) ? 0 : 1; + t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset, + V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG), + V_TXFIFOTHRESH(thres) | V_TXIPG(ipg)); + + if (adap->params.rev > 0) { + divisor = (adap->params.rev == T3_REV_C) ? 64 : 8; + t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset, + (hwm - lwm) * 4 / divisor); + } + t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset, + MAC_RXFIFO_SIZE * 4 * 8 / 512); + return 0; +} + +int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc) +{ + u32 val; + struct adapter *adap = mac->adapter; + unsigned int oft = mac->offset; + + if (duplex >= 0 && duplex != DUPLEX_FULL) + return -EINVAL; + if (speed >= 0) { + if (speed == SPEED_10) + val = V_PORTSPEED(0); + else if (speed == SPEED_100) + val = V_PORTSPEED(1); + else if (speed == SPEED_1000) + val = V_PORTSPEED(2); + else if (speed == SPEED_10000) + val = V_PORTSPEED(3); + else + return -EINVAL; + + t3_set_reg_field(adap, A_XGM_PORT_CFG + oft, + V_PORTSPEED(M_PORTSPEED), val); + } + + val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft); + val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM); + if (fc & PAUSE_TX) { + u32 rx_max_pkt_size = + G_RXMAXPKTSIZE(t3_read_reg(adap, + A_XGM_RX_MAX_PKT_SIZE + oft)); + val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(rx_max_pkt_size) / 8); + } + t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val); + + t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, + (fc & PAUSE_RX) ? F_TXPAUSEEN : 0); + return 0; +} + +int t3_mac_enable(struct cmac *mac, int which) +{ + int idx = macidx(mac); + struct adapter *adap = mac->adapter; + unsigned int oft = mac->offset; + struct mac_stats *s = &mac->stats; + + if (which & MAC_DIRECTION_TX) { + t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); + t3_write_reg(adap, A_TP_PIO_DATA, + adap->params.rev == T3_REV_C ? + 0xc4ffff01 : 0xc0ede401); + t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE); + t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, + adap->params.rev == T3_REV_C ? 0 : 1 << idx); + + t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN); + + t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx); + mac->tx_mcnt = s->tx_frames; + mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap, + A_TP_PIO_DATA))); + mac->tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, + A_XGM_TX_SPI4_SOP_EOP_CNT + + oft))); + mac->rx_mcnt = s->rx_frames; + mac->rx_pause = s->rx_pause; + mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, + A_XGM_RX_SPI4_SOP_EOP_CNT + + oft))); + mac->rx_ocnt = s->rx_fifo_ovfl; + mac->txen = F_TXEN; + mac->toggle_cnt = 0; + } + if (which & MAC_DIRECTION_RX) + t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN); + return 0; +} + +int t3_mac_disable(struct cmac *mac, int which) +{ + struct adapter *adap = mac->adapter; + + if (which & MAC_DIRECTION_TX) { + t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0); + mac->txen = 0; + } + if (which & MAC_DIRECTION_RX) { + int val = F_MAC_RESET_; + + t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, + F_PCS_RESET_, 0); + msleep(100); + t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0); + if (is_10G(adap)) + val |= F_PCS_RESET_; + else if (uses_xaui(adap)) + val |= F_PCS_RESET_ | F_XG2G_RESET_; + else + val |= F_RGMII_RESET_ | F_XG2G_RESET_; + t3_write_reg(mac->adapter, A_XGM_RESET_CTRL + mac->offset, val); + } + return 0; +} + +int t3b2_mac_watchdog_task(struct cmac *mac) +{ + struct adapter *adap = mac->adapter; + struct mac_stats *s = &mac->stats; + unsigned int tx_tcnt, tx_xcnt; + u64 tx_mcnt = s->tx_frames; + int status; + + status = 0; + tx_xcnt = 1; /* By default tx_xcnt is making progress */ + tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt */ + if (tx_mcnt == mac->tx_mcnt && mac->rx_pause == s->rx_pause) { + tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, + A_XGM_TX_SPI4_SOP_EOP_CNT + + mac->offset))); + if (tx_xcnt == 0) { + t3_write_reg(adap, A_TP_PIO_ADDR, + A_TP_TX_DROP_CNT_CH0 + macidx(mac)); + tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap, + A_TP_PIO_DATA))); + } else { + goto out; + } + } else { + mac->toggle_cnt = 0; + goto out; + } + + if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) { + if (mac->toggle_cnt > 4) { + status = 2; + goto out; + } else { + status = 1; + goto out; + } + } else { + mac->toggle_cnt = 0; + goto out; + } + +out: + mac->tx_tcnt = tx_tcnt; + mac->tx_xcnt = tx_xcnt; + mac->tx_mcnt = s->tx_frames; + mac->rx_pause = s->rx_pause; + if (status == 1) { + t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0); + t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */ + t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, mac->txen); + t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */ + mac->toggle_cnt++; + } else if (status == 2) { + t3b2_mac_reset(mac); + mac->toggle_cnt = 0; + } + return status; +} + +/* + * This function is called periodically to accumulate the current values of the + * RMON counters into the port statistics. Since the packet counters are only + * 32 bits they can overflow in ~286 secs at 10G, so the function should be + * called more frequently than that. The byte counters are 45-bit wide, they + * would overflow in ~7.8 hours. + */ +const struct mac_stats *t3_mac_update_stats(struct cmac *mac) +{ +#define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset) +#define RMON_UPDATE(mac, name, reg) \ + (mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg) +#define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \ + (mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \ + ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32) + + u32 v, lo; + + RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH); + RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH); + RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES); + RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES); + RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES); + RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES); + RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES); + RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES); + RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES); + + RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES); + + v = RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT); + if (mac->adapter->params.rev == T3_REV_B2) + v &= 0x7fffffff; + mac->stats.rx_too_long += v; + + RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES); + RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES); + RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES); + RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES); + RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES); + RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES); + RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES); + + RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH); + RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH); + RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST); + RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST); + RMON_UPDATE(mac, tx_pause, TX_PAUSE); + /* This counts error frames in general (bad FCS, underrun, etc). */ + RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES); + + RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES); + RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES); + RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES); + RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES); + RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES); + RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES); + RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES); + + /* The next stat isn't clear-on-read. */ + t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50); + v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA); + lo = (u32) mac->stats.rx_cong_drops; + mac->stats.rx_cong_drops += (u64) (v - lo); + + return &mac->stats; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile new file mode 100644 index 000000000..ace0ab98d --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -0,0 +1,10 @@ +# +# Chelsio T4 driver +# + +obj-$(CONFIG_CHELSIO_T4) += cxgb4.o + +cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o +cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o +cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o +cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c new file mode 100644 index 000000000..c308429dd --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c @@ -0,0 +1,318 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * Copyright (C) 2003-2014 Chelsio Communications. All rights reserved. + * + * Written by Deepak (deepak.s@chelsio.com) + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this + * release for licensing terms and conditions. + */ + +#include +#include +#include +#include +#include +#include "cxgb4.h" +#include "clip_tbl.h" + +static inline unsigned int ipv4_clip_hash(struct clip_tbl *c, const u32 *key) +{ + unsigned int clipt_size_half = c->clipt_size / 2; + + return jhash_1word(*key, 0) % clipt_size_half; +} + +static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key) +{ + unsigned int clipt_size_half = d->clipt_size / 2; + u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3]; + + return clipt_size_half + + (jhash_1word(xor, 0) % clipt_size_half); +} + +static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr, + u8 v6) +{ + return v6 ? ipv6_clip_hash(ctbl, addr) : + ipv4_clip_hash(ctbl, addr); +} + +static int clip6_get_mbox(const struct net_device *dev, + const struct in6_addr *lip) +{ + struct adapter *adap = netdev2adap(dev); + struct fw_clip_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); + c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c)); + *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr); + *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8); + return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); +} + +static int clip6_release_mbox(const struct net_device *dev, + const struct in6_addr *lip) +{ + struct adapter *adap = netdev2adap(dev); + struct fw_clip_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); + c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c)); + *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr); + *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8); + return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); +} + +int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) +{ + struct adapter *adap = netdev2adap(dev); + struct clip_tbl *ctbl = adap->clipt; + struct clip_entry *ce, *cte; + u32 *addr = (u32 *)lip; + int hash; + int ret = -1; + + if (!ctbl) + return 0; + + hash = clip_addr_hash(ctbl, addr, v6); + + read_lock_bh(&ctbl->lock); + list_for_each_entry(cte, &ctbl->hash_list[hash], list) { + if (cte->addr6.sin6_family == AF_INET6 && v6) + ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr, + sizeof(struct in6_addr)); + else if (cte->addr.sin_family == AF_INET && !v6) + ret = memcmp(lip, (char *)(&cte->addr.sin_addr), + sizeof(struct in_addr)); + if (!ret) { + ce = cte; + read_unlock_bh(&ctbl->lock); + goto found; + } + } + read_unlock_bh(&ctbl->lock); + + write_lock_bh(&ctbl->lock); + if (!list_empty(&ctbl->ce_free_head)) { + ce = list_first_entry(&ctbl->ce_free_head, + struct clip_entry, list); + list_del(&ce->list); + INIT_LIST_HEAD(&ce->list); + spin_lock_init(&ce->lock); + atomic_set(&ce->refcnt, 0); + atomic_dec(&ctbl->nfree); + list_add_tail(&ce->list, &ctbl->hash_list[hash]); + if (v6) { + ce->addr6.sin6_family = AF_INET6; + memcpy(ce->addr6.sin6_addr.s6_addr, + lip, sizeof(struct in6_addr)); + ret = clip6_get_mbox(dev, (const struct in6_addr *)lip); + if (ret) { + write_unlock_bh(&ctbl->lock); + return ret; + } + } else { + ce->addr.sin_family = AF_INET; + memcpy((char *)(&ce->addr.sin_addr), lip, + sizeof(struct in_addr)); + } + } else { + write_unlock_bh(&ctbl->lock); + return -ENOMEM; + } + write_unlock_bh(&ctbl->lock); +found: + atomic_inc(&ce->refcnt); + + return 0; +} +EXPORT_SYMBOL(cxgb4_clip_get); + +void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6) +{ + struct adapter *adap = netdev2adap(dev); + struct clip_tbl *ctbl = adap->clipt; + struct clip_entry *ce, *cte; + u32 *addr = (u32 *)lip; + int hash; + int ret = -1; + + hash = clip_addr_hash(ctbl, addr, v6); + + read_lock_bh(&ctbl->lock); + list_for_each_entry(cte, &ctbl->hash_list[hash], list) { + if (cte->addr6.sin6_family == AF_INET6 && v6) + ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr, + sizeof(struct in6_addr)); + else if (cte->addr.sin_family == AF_INET && !v6) + ret = memcmp(lip, (char *)(&cte->addr.sin_addr), + sizeof(struct in_addr)); + if (!ret) { + ce = cte; + read_unlock_bh(&ctbl->lock); + goto found; + } + } + read_unlock_bh(&ctbl->lock); + + return; +found: + write_lock_bh(&ctbl->lock); + spin_lock_bh(&ce->lock); + if (atomic_dec_and_test(&ce->refcnt)) { + list_del(&ce->list); + INIT_LIST_HEAD(&ce->list); + list_add_tail(&ce->list, &ctbl->ce_free_head); + atomic_inc(&ctbl->nfree); + if (v6) + clip6_release_mbox(dev, (const struct in6_addr *)lip); + } + spin_unlock_bh(&ce->lock); + write_unlock_bh(&ctbl->lock); +} +EXPORT_SYMBOL(cxgb4_clip_release); + +/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with + * a physical device. + * The physical device reference is needed to send the actul CLIP command. + */ +static int cxgb4_update_dev_clip(struct net_device *root_dev, + struct net_device *dev) +{ + struct inet6_dev *idev = NULL; + struct inet6_ifaddr *ifa; + int ret = 0; + + idev = __in6_dev_get(root_dev); + if (!idev) + return ret; + + read_lock_bh(&idev->lock); + list_for_each_entry(ifa, &idev->addr_list, if_list) { + ret = cxgb4_clip_get(dev, (const u32 *)ifa->addr.s6_addr, 1); + if (ret < 0) + break; + } + read_unlock_bh(&idev->lock); + + return ret; +} + +int cxgb4_update_root_dev_clip(struct net_device *dev) +{ + struct net_device *root_dev = NULL; + int i, ret = 0; + + /* First populate the real net device's IPv6 addresses */ + ret = cxgb4_update_dev_clip(dev, dev); + if (ret) + return ret; + + /* Parse all bond and vlan devices layered on top of the physical dev */ + root_dev = netdev_master_upper_dev_get_rcu(dev); + if (root_dev) { + ret = cxgb4_update_dev_clip(root_dev, dev); + if (ret) + return ret; + } + + for (i = 0; i < VLAN_N_VID; i++) { + root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i); + if (!root_dev) + continue; + + ret = cxgb4_update_dev_clip(root_dev, dev); + if (ret) + break; + } + + return ret; +} +EXPORT_SYMBOL(cxgb4_update_root_dev_clip); + +int clip_tbl_show(struct seq_file *seq, void *v) +{ + struct adapter *adapter = seq->private; + struct clip_tbl *ctbl = adapter->clipt; + struct clip_entry *ce; + char ip[60]; + int i; + + read_lock_bh(&ctbl->lock); + + seq_puts(seq, "IP Address Users\n"); + for (i = 0 ; i < ctbl->clipt_size; ++i) { + list_for_each_entry(ce, &ctbl->hash_list[i], list) { + ip[0] = '\0'; + sprintf(ip, "%pISc", &ce->addr); + seq_printf(seq, "%-25s %u\n", ip, + atomic_read(&ce->refcnt)); + } + } + seq_printf(seq, "Free clip entries : %d\n", atomic_read(&ctbl->nfree)); + + read_unlock_bh(&ctbl->lock); + + return 0; +} + +struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start, + unsigned int clipt_end) +{ + struct clip_entry *cl_list; + struct clip_tbl *ctbl; + unsigned int clipt_size; + int i; + + if (clipt_start >= clipt_end) + return NULL; + clipt_size = clipt_end - clipt_start + 1; + if (clipt_size < CLIPT_MIN_HASH_BUCKETS) + return NULL; + + ctbl = t4_alloc_mem(sizeof(*ctbl) + + clipt_size*sizeof(struct list_head)); + if (!ctbl) + return NULL; + + ctbl->clipt_start = clipt_start; + ctbl->clipt_size = clipt_size; + INIT_LIST_HEAD(&ctbl->ce_free_head); + + atomic_set(&ctbl->nfree, clipt_size); + rwlock_init(&ctbl->lock); + + for (i = 0; i < ctbl->clipt_size; ++i) + INIT_LIST_HEAD(&ctbl->hash_list[i]); + + cl_list = t4_alloc_mem(clipt_size*sizeof(struct clip_entry)); + ctbl->cl_list = (void *)cl_list; + + for (i = 0; i < clipt_size; i++) { + INIT_LIST_HEAD(&cl_list[i].list); + list_add_tail(&cl_list[i].list, &ctbl->ce_free_head); + } + + return ctbl; +} + +void t4_cleanup_clip_tbl(struct adapter *adap) +{ + struct clip_tbl *ctbl = adap->clipt; + + if (ctbl) { + if (ctbl->cl_list) + t4_free_mem(ctbl->cl_list); + t4_free_mem(ctbl); + } +} +EXPORT_SYMBOL(t4_cleanup_clip_tbl); diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h new file mode 100644 index 000000000..35eb43c6b --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h @@ -0,0 +1,43 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * Copyright (C) 2003-2014 Chelsio Communications. All rights reserved. + * + * Written by Deepak (deepak.s@chelsio.com) + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this + * release for licensing terms and conditions. + */ + +struct clip_entry { + spinlock_t lock; /* Hold while modifying clip reference */ + atomic_t refcnt; + struct list_head list; + union { + struct sockaddr_in addr; + struct sockaddr_in6 addr6; + }; +}; + +struct clip_tbl { + unsigned int clipt_start; + unsigned int clipt_size; + rwlock_t lock; + atomic_t nfree; + struct list_head ce_free_head; + void *cl_list; + struct list_head hash_list[0]; +}; + +enum { + CLIPT_MIN_HASH_BUCKETS = 2, +}; + +struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start, + unsigned int clipt_end); +int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6); +void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6); +int clip_tbl_show(struct seq_file *seq, void *v); +int cxgb4_update_root_dev_clip(struct net_device *dev); +void t4_cleanup_clip_tbl(struct adapter *adap); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h new file mode 100644 index 000000000..524d11098 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -0,0 +1,1313 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_H__ +#define __CXGB4_H__ + +#include "t4_hw.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cxgb4_uld.h" + +#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) + +enum { + MAX_NPORTS = 4, /* max # of ports */ + SERNUM_LEN = 24, /* Serial # length */ + EC_LEN = 16, /* E/C length */ + ID_LEN = 16, /* ID length */ + PN_LEN = 16, /* Part Number length */ +}; + +enum { + T4_REGMAP_SIZE = (160 * 1024), + T5_REGMAP_SIZE = (332 * 1024), +}; + +enum { + MEM_EDC0, + MEM_EDC1, + MEM_MC, + MEM_MC0 = MEM_MC, + MEM_MC1 +}; + +enum { + MEMWIN0_APERTURE = 2048, + MEMWIN0_BASE = 0x1b800, + MEMWIN1_APERTURE = 32768, + MEMWIN1_BASE = 0x28000, + MEMWIN1_BASE_T5 = 0x52000, + MEMWIN2_APERTURE = 65536, + MEMWIN2_BASE = 0x30000, + MEMWIN2_APERTURE_T5 = 131072, + MEMWIN2_BASE_T5 = 0x60000, +}; + +enum dev_master { + MASTER_CANT, + MASTER_MAY, + MASTER_MUST +}; + +enum dev_state { + DEV_STATE_UNINIT, + DEV_STATE_INIT, + DEV_STATE_ERR +}; + +enum { + PAUSE_RX = 1 << 0, + PAUSE_TX = 1 << 1, + PAUSE_AUTONEG = 1 << 2 +}; + +struct port_stats { + u64 tx_octets; /* total # of octets in good frames */ + u64 tx_frames; /* all good frames */ + u64 tx_bcast_frames; /* all broadcast frames */ + u64 tx_mcast_frames; /* all multicast frames */ + u64 tx_ucast_frames; /* all unicast frames */ + u64 tx_error_frames; /* all error frames */ + + u64 tx_frames_64; /* # of Tx frames in a particular range */ + u64 tx_frames_65_127; + u64 tx_frames_128_255; + u64 tx_frames_256_511; + u64 tx_frames_512_1023; + u64 tx_frames_1024_1518; + u64 tx_frames_1519_max; + + u64 tx_drop; /* # of dropped Tx frames */ + u64 tx_pause; /* # of transmitted pause frames */ + u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */ + u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */ + u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */ + u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */ + u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */ + u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */ + u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */ + u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */ + + u64 rx_octets; /* total # of octets in good frames */ + u64 rx_frames; /* all good frames */ + u64 rx_bcast_frames; /* all broadcast frames */ + u64 rx_mcast_frames; /* all multicast frames */ + u64 rx_ucast_frames; /* all unicast frames */ + u64 rx_too_long; /* # of frames exceeding MTU */ + u64 rx_jabber; /* # of jabber frames */ + u64 rx_fcs_err; /* # of received frames with bad FCS */ + u64 rx_len_err; /* # of received frames with length error */ + u64 rx_symbol_err; /* symbol errors */ + u64 rx_runt; /* # of short frames */ + + u64 rx_frames_64; /* # of Rx frames in a particular range */ + u64 rx_frames_65_127; + u64 rx_frames_128_255; + u64 rx_frames_256_511; + u64 rx_frames_512_1023; + u64 rx_frames_1024_1518; + u64 rx_frames_1519_max; + + u64 rx_pause; /* # of received pause frames */ + u64 rx_ppp0; /* # of received PPP prio 0 frames */ + u64 rx_ppp1; /* # of received PPP prio 1 frames */ + u64 rx_ppp2; /* # of received PPP prio 2 frames */ + u64 rx_ppp3; /* # of received PPP prio 3 frames */ + u64 rx_ppp4; /* # of received PPP prio 4 frames */ + u64 rx_ppp5; /* # of received PPP prio 5 frames */ + u64 rx_ppp6; /* # of received PPP prio 6 frames */ + u64 rx_ppp7; /* # of received PPP prio 7 frames */ + + u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */ + u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */ + u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */ + u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */ + u64 rx_trunc0; /* buffer-group 0 truncated packets */ + u64 rx_trunc1; /* buffer-group 1 truncated packets */ + u64 rx_trunc2; /* buffer-group 2 truncated packets */ + u64 rx_trunc3; /* buffer-group 3 truncated packets */ +}; + +struct lb_port_stats { + u64 octets; + u64 frames; + u64 bcast_frames; + u64 mcast_frames; + u64 ucast_frames; + u64 error_frames; + + u64 frames_64; + u64 frames_65_127; + u64 frames_128_255; + u64 frames_256_511; + u64 frames_512_1023; + u64 frames_1024_1518; + u64 frames_1519_max; + + u64 drop; + + u64 ovflow0; + u64 ovflow1; + u64 ovflow2; + u64 ovflow3; + u64 trunc0; + u64 trunc1; + u64 trunc2; + u64 trunc3; +}; + +struct tp_tcp_stats { + u32 tcpOutRsts; + u64 tcpInSegs; + u64 tcpOutSegs; + u64 tcpRetransSegs; +}; + +struct tp_err_stats { + u32 macInErrs[4]; + u32 hdrInErrs[4]; + u32 tcpInErrs[4]; + u32 tnlCongDrops[4]; + u32 ofldChanDrops[4]; + u32 tnlTxDrops[4]; + u32 ofldVlanDrops[4]; + u32 tcp6InErrs[4]; + u32 ofldNoNeigh; + u32 ofldCongDefer; +}; + +struct sge_params { + u32 hps; /* host page size for our PF/VF */ + u32 eq_qpp; /* egress queues/page for our PF/VF */ + u32 iq_qpp; /* egress queues/page for our PF/VF */ +}; + +struct tp_params { + unsigned int ntxchan; /* # of Tx channels */ + unsigned int tre; /* log2 of core clocks per TP tick */ + unsigned int la_mask; /* what events are recorded by TP LA */ + unsigned short tx_modq_map; /* TX modulation scheduler queue to */ + /* channel map */ + + uint32_t dack_re; /* DACK timer resolution */ + unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ + + u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */ + u32 ingress_config; /* cached TP_INGRESS_CONFIG */ + + /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a + * subset of the set of fields which may be present in the Compressed + * Filter Tuple portion of filters and TCP TCB connections. The + * fields which are present are controlled by the TP_VLAN_PRI_MAP. + * Since a variable number of fields may or may not be present, their + * shifted field positions within the Compressed Filter Tuple may + * vary, or not even be present if the field isn't selected in + * TP_VLAN_PRI_MAP. Since some of these fields are needed in various + * places we store their offsets here, or a -1 if the field isn't + * present. + */ + int vlan_shift; + int vnic_shift; + int port_shift; + int protocol_shift; +}; + +struct vpd_params { + unsigned int cclk; + u8 ec[EC_LEN + 1]; + u8 sn[SERNUM_LEN + 1]; + u8 id[ID_LEN + 1]; + u8 pn[PN_LEN + 1]; +}; + +struct pci_params { + unsigned char speed; + unsigned char width; +}; + +#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) +#define CHELSIO_CHIP_FPGA 0x100 +#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf) +#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) + +#define CHELSIO_T4 0x4 +#define CHELSIO_T5 0x5 + +enum chip_type { + T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), + T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), + T4_FIRST_REV = T4_A1, + T4_LAST_REV = T4_A2, + + T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), + T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), + T5_FIRST_REV = T5_A0, + T5_LAST_REV = T5_A1, +}; + +struct devlog_params { + u32 memtype; /* which memory (EDC0, EDC1, MC) */ + u32 start; /* start of log in firmware memory */ + u32 size; /* size of log */ +}; + +struct adapter_params { + struct sge_params sge; + struct tp_params tp; + struct vpd_params vpd; + struct pci_params pci; + struct devlog_params devlog; + enum pcie_memwin drv_memwin; + + unsigned int cim_la_size; + + unsigned int sf_size; /* serial flash size in bytes */ + unsigned int sf_nsec; /* # of flash sectors */ + unsigned int sf_fw_start; /* start of FW image in flash */ + + unsigned int fw_vers; + unsigned int tp_vers; + u8 api_vers[7]; + + unsigned short mtus[NMTUS]; + unsigned short a_wnd[NCCTRL_WIN]; + unsigned short b_wnd[NCCTRL_WIN]; + + unsigned char nports; /* # of ethernet ports */ + unsigned char portvec; + enum chip_type chip; /* chip code */ + unsigned char offload; + + unsigned char bypass; + + unsigned int ofldq_wr_cred; + bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ + + unsigned int max_ordird_qp; /* Max read depth per RDMA QP */ + unsigned int max_ird_adapter; /* Max read depth per adapter */ +}; + +#include "t4fw_api.h" + +#define FW_VERSION(chip) ( \ + FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \ + FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \ + FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \ + FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD)) +#define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf) + +struct fw_info { + u8 chip; + char *fs_name; + char *fw_mod_name; + struct fw_hdr fw_hdr; +}; + + +struct trace_params { + u32 data[TRACE_LEN / 4]; + u32 mask[TRACE_LEN / 4]; + unsigned short snap_len; + unsigned short min_len; + unsigned char skip_ofst; + unsigned char skip_len; + unsigned char invert; + unsigned char port; +}; + +struct link_config { + unsigned short supported; /* link capabilities */ + unsigned short advertising; /* advertised capabilities */ + unsigned short requested_speed; /* speed user has requested */ + unsigned short speed; /* actual link speed */ + unsigned char requested_fc; /* flow control user has requested */ + unsigned char fc; /* actual link flow control */ + unsigned char autoneg; /* autonegotiating? */ + unsigned char link_ok; /* link up? */ +}; + +#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16) + +enum { + MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */ + MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ + MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ + MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ + MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */ + MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */ +}; + +enum { + MAX_TXQ_ENTRIES = 16384, + MAX_CTRL_TXQ_ENTRIES = 1024, + MAX_RSPQ_ENTRIES = 16384, + MAX_RX_BUFFERS = 16384, + MIN_TXQ_ENTRIES = 32, + MIN_CTRL_TXQ_ENTRIES = 32, + MIN_RSPQ_ENTRIES = 128, + MIN_FL_ENTRIES = 16 +}; + +enum { + INGQ_EXTRAS = 2, /* firmware event queue and */ + /* forwarded interrupts */ + MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES + + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS, +}; + +struct adapter; +struct sge_rspq; + +#include "cxgb4_dcb.h" + +#ifdef CONFIG_CHELSIO_T4_FCOE +#include "cxgb4_fcoe.h" +#endif /* CONFIG_CHELSIO_T4_FCOE */ + +struct port_info { + struct adapter *adapter; + u16 viid; + s16 xact_addr_filt; /* index of exact MAC address filter */ + u16 rss_size; /* size of VI's RSS table slice */ + s8 mdio_addr; + enum fw_port_type port_type; + u8 mod_type; + u8 port_id; + u8 tx_chan; + u8 lport; /* associated offload logical port */ + u8 nqsets; /* # of qsets */ + u8 first_qset; /* index of first qset */ + u8 rss_mode; + struct link_config link_cfg; + u16 *rss; +#ifdef CONFIG_CHELSIO_T4_DCB + struct port_dcb_info dcb; /* Data Center Bridging support */ +#endif +#ifdef CONFIG_CHELSIO_T4_FCOE + struct cxgb_fcoe fcoe; +#endif /* CONFIG_CHELSIO_T4_FCOE */ +}; + +struct dentry; +struct work_struct; + +enum { /* adapter flags */ + FULL_INIT_DONE = (1 << 0), + DEV_ENABLED = (1 << 1), + USING_MSI = (1 << 2), + USING_MSIX = (1 << 3), + FW_OK = (1 << 4), + RSS_TNLALLLOOKUP = (1 << 5), + USING_SOFT_PARAMS = (1 << 6), + MASTER_PF = (1 << 7), + FW_OFLD_CONN = (1 << 9), +}; + +struct rx_sw_desc; + +struct sge_fl { /* SGE free-buffer queue state */ + unsigned int avail; /* # of available Rx buffers */ + unsigned int pend_cred; /* new buffers since last FL DB ring */ + unsigned int cidx; /* consumer index */ + unsigned int pidx; /* producer index */ + unsigned long alloc_failed; /* # of times buffer allocation failed */ + unsigned long large_alloc_failed; + unsigned long starving; + /* RO fields */ + unsigned int cntxt_id; /* SGE context id for the free list */ + unsigned int size; /* capacity of free list */ + struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ + __be64 *desc; /* address of HW Rx descriptor ring */ + dma_addr_t addr; /* bus address of HW ring start */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ +}; + +/* A packet gather list */ +struct pkt_gl { + struct page_frag frags[MAX_SKB_FRAGS]; + void *va; /* virtual address of first byte */ + unsigned int nfrags; /* # of fragments */ + unsigned int tot_len; /* total length of fragments */ +}; + +typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl); + +struct sge_rspq { /* state for an SGE response queue */ + struct napi_struct napi; + const __be64 *cur_desc; /* current descriptor in queue */ + unsigned int cidx; /* consumer index */ + u8 gen; /* current generation bit */ + u8 intr_params; /* interrupt holdoff parameters */ + u8 next_intr_params; /* holdoff params for next interrupt */ + u8 adaptive_rx; + u8 pktcnt_idx; /* interrupt packet threshold */ + u8 uld; /* ULD handling this queue */ + u8 idx; /* queue index within its group */ + int offset; /* offset into current Rx buffer */ + u16 cntxt_id; /* SGE context id for the response q */ + u16 abs_id; /* absolute SGE id for the response q */ + __be64 *desc; /* address of HW response ring */ + dma_addr_t phys_addr; /* physical address of the ring */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ + unsigned int iqe_len; /* entry size */ + unsigned int size; /* capacity of response queue */ + struct adapter *adap; + struct net_device *netdev; /* associated net device */ + rspq_handler_t handler; +#ifdef CONFIG_NET_RX_BUSY_POLL +#define CXGB_POLL_STATE_IDLE 0 +#define CXGB_POLL_STATE_NAPI BIT(0) /* NAPI owns this poll */ +#define CXGB_POLL_STATE_POLL BIT(1) /* poll owns this poll */ +#define CXGB_POLL_STATE_NAPI_YIELD BIT(2) /* NAPI yielded this poll */ +#define CXGB_POLL_STATE_POLL_YIELD BIT(3) /* poll yielded this poll */ +#define CXGB_POLL_YIELD (CXGB_POLL_STATE_NAPI_YIELD | \ + CXGB_POLL_STATE_POLL_YIELD) +#define CXGB_POLL_LOCKED (CXGB_POLL_STATE_NAPI | \ + CXGB_POLL_STATE_POLL) +#define CXGB_POLL_USER_PEND (CXGB_POLL_STATE_POLL | \ + CXGB_POLL_STATE_POLL_YIELD) + unsigned int bpoll_state; + spinlock_t bpoll_lock; /* lock for busy poll */ +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +}; + +struct sge_eth_stats { /* Ethernet queue statistics */ + unsigned long pkts; /* # of ethernet packets */ + unsigned long lro_pkts; /* # of LRO super packets */ + unsigned long lro_merged; /* # of wire packets merged by LRO */ + unsigned long rx_cso; /* # of Rx checksum offloads */ + unsigned long vlan_ex; /* # of Rx VLAN extractions */ + unsigned long rx_drops; /* # of packets dropped due to no mem */ +}; + +struct sge_eth_rxq { /* SW Ethernet Rx queue */ + struct sge_rspq rspq; + struct sge_fl fl; + struct sge_eth_stats stats; +} ____cacheline_aligned_in_smp; + +struct sge_ofld_stats { /* offload queue statistics */ + unsigned long pkts; /* # of packets */ + unsigned long imm; /* # of immediate-data packets */ + unsigned long an; /* # of asynchronous notifications */ + unsigned long nomem; /* # of responses deferred due to no mem */ +}; + +struct sge_ofld_rxq { /* SW offload Rx queue */ + struct sge_rspq rspq; + struct sge_fl fl; + struct sge_ofld_stats stats; +} ____cacheline_aligned_in_smp; + +struct tx_desc { + __be64 flit[8]; +}; + +struct tx_sw_desc; + +struct sge_txq { + unsigned int in_use; /* # of in-use Tx descriptors */ + unsigned int size; /* # of descriptors */ + unsigned int cidx; /* SW consumer index */ + unsigned int pidx; /* producer index */ + unsigned long stops; /* # of times q has been stopped */ + unsigned long restarts; /* # of queue restarts */ + unsigned int cntxt_id; /* SGE context id for the Tx q */ + struct tx_desc *desc; /* address of HW Tx descriptor ring */ + struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ + struct sge_qstat *stat; /* queue status entry */ + dma_addr_t phys_addr; /* physical address of the ring */ + spinlock_t db_lock; + int db_disabled; + unsigned short db_pidx; + unsigned short db_pidx_inc; + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ +}; + +struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ + struct sge_txq q; + struct netdev_queue *txq; /* associated netdev TX queue */ +#ifdef CONFIG_CHELSIO_T4_DCB + u8 dcb_prio; /* DCB Priority bound to queue */ +#endif + unsigned long tso; /* # of TSO requests */ + unsigned long tx_cso; /* # of Tx checksum offloads */ + unsigned long vlan_ins; /* # of Tx VLAN insertions */ + unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ +} ____cacheline_aligned_in_smp; + +struct sge_ofld_txq { /* state for an SGE offload Tx queue */ + struct sge_txq q; + struct adapter *adap; + struct sk_buff_head sendq; /* list of backpressured packets */ + struct tasklet_struct qresume_tsk; /* restarts the queue */ + u8 full; /* the Tx ring is full */ + unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ +} ____cacheline_aligned_in_smp; + +struct sge_ctrl_txq { /* state for an SGE control Tx queue */ + struct sge_txq q; + struct adapter *adap; + struct sk_buff_head sendq; /* list of backpressured packets */ + struct tasklet_struct qresume_tsk; /* restarts the queue */ + u8 full; /* the Tx ring is full */ +} ____cacheline_aligned_in_smp; + +struct sge { + struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; + struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS]; + struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; + + struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; + struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS]; + struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; + struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS]; + struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; + + struct sge_rspq intrq ____cacheline_aligned_in_smp; + spinlock_t intrq_lock; + + u16 max_ethqsets; /* # of available Ethernet queue sets */ + u16 ethqsets; /* # of active Ethernet queue sets */ + u16 ethtxq_rover; /* Tx queue to clean up next */ + u16 ofldqsets; /* # of active offload queue sets */ + u16 rdmaqs; /* # of available RDMA Rx queues */ + u16 rdmaciqs; /* # of available RDMA concentrator IQs */ + u16 ofld_rxq[MAX_OFLD_QSETS]; + u16 rdma_rxq[MAX_RDMA_QUEUES]; + u16 rdma_ciq[MAX_RDMA_CIQS]; + u16 timer_val[SGE_NTIMERS]; + u8 counter_val[SGE_NCOUNTERS]; + u32 fl_pg_order; /* large page allocation size */ + u32 stat_len; /* length of status page at ring end */ + u32 pktshift; /* padding between CPL & packet data */ + u32 fl_align; /* response queue message alignment */ + u32 fl_starve_thres; /* Free List starvation threshold */ + + /* State variables for detecting an SGE Ingress DMA hang */ + unsigned int idma_1s_thresh;/* SGE same State Counter 1s threshold */ + unsigned int idma_stalled[2];/* SGE synthesized stalled timers in HZ */ + unsigned int idma_state[2]; /* SGE IDMA Hang detect state */ + unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */ + + unsigned int egr_start; + unsigned int egr_sz; + unsigned int ingr_start; + unsigned int ingr_sz; + void **egr_map; /* qid->queue egress queue map */ + struct sge_rspq **ingr_map; /* qid->queue ingress queue map */ + unsigned long *starving_fl; + unsigned long *txq_maperr; + struct timer_list rx_timer; /* refills starving FLs */ + struct timer_list tx_timer; /* checks Tx queues */ +}; + +#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) +#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) +#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) +#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++) + +struct l2t_data; + +#ifdef CONFIG_PCI_IOV + +/* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial + * Configuration initialization for T5 only has SR-IOV functionality enabled + * on PF0-3 in order to simplify everything. + */ +#define NUM_OF_PF_WITH_SRIOV 4 + +#endif + +struct adapter { + void __iomem *regs; + void __iomem *bar2; + u32 t4_bar0; + struct pci_dev *pdev; + struct device *pdev_dev; + unsigned int mbox; + unsigned int fn; + unsigned int flags; + enum chip_type chip; + + int msg_enable; + + struct adapter_params params; + struct cxgb4_virt_res vres; + unsigned int swintr; + + unsigned int wol; + + struct { + unsigned short vec; + char desc[IFNAMSIZ + 10]; + } msix_info[MAX_INGQ + 1]; + + struct sge sge; + + struct net_device *port[MAX_NPORTS]; + u8 chan_map[NCHAN]; /* channel -> port map */ + + u32 filter_mode; + unsigned int l2t_start; + unsigned int l2t_end; + struct l2t_data *l2t; + unsigned int clipt_start; + unsigned int clipt_end; + struct clip_tbl *clipt; + void *uld_handle[CXGB4_ULD_MAX]; + struct list_head list_node; + struct list_head rcu_node; + + struct tid_info tids; + void **tid_release_head; + spinlock_t tid_release_lock; + struct workqueue_struct *workq; + struct work_struct tid_release_task; + struct work_struct db_full_task; + struct work_struct db_drop_task; + bool tid_release_task_busy; + + struct dentry *debugfs_root; + + spinlock_t stats_lock; + spinlock_t win0_lock ____cacheline_aligned_in_smp; +}; + +/* Defined bit width of user definable filter tuples + */ +#define ETHTYPE_BITWIDTH 16 +#define FRAG_BITWIDTH 1 +#define MACIDX_BITWIDTH 9 +#define FCOE_BITWIDTH 1 +#define IPORT_BITWIDTH 3 +#define MATCHTYPE_BITWIDTH 3 +#define PROTO_BITWIDTH 8 +#define TOS_BITWIDTH 8 +#define PF_BITWIDTH 8 +#define VF_BITWIDTH 8 +#define IVLAN_BITWIDTH 16 +#define OVLAN_BITWIDTH 16 + +/* Filter matching rules. These consist of a set of ingress packet field + * (value, mask) tuples. The associated ingress packet field matches the + * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field + * rule can be constructed by specifying a tuple of (0, 0).) A filter rule + * matches an ingress packet when all of the individual individual field + * matching rules are true. + * + * Partial field masks are always valid, however, while it may be easy to + * understand their meanings for some fields (e.g. IP address to match a + * subnet), for others making sensible partial masks is less intuitive (e.g. + * MPS match type) ... + * + * Most of the following data structures are modeled on T4 capabilities. + * Drivers for earlier chips use the subsets which make sense for those chips. + * We really need to come up with a hardware-independent mechanism to + * represent hardware filter capabilities ... + */ +struct ch_filter_tuple { + /* Compressed header matching field rules. The TP_VLAN_PRI_MAP + * register selects which of these fields will participate in the + * filter match rules -- up to a maximum of 36 bits. Because + * TP_VLAN_PRI_MAP is a global register, all filters must use the same + * set of fields. + */ + uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */ + uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */ + uint32_t ivlan_vld:1; /* inner VLAN valid */ + uint32_t ovlan_vld:1; /* outer VLAN valid */ + uint32_t pfvf_vld:1; /* PF/VF valid */ + uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */ + uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */ + uint32_t iport:IPORT_BITWIDTH; /* ingress port */ + uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */ + uint32_t proto:PROTO_BITWIDTH; /* protocol type */ + uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */ + uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */ + uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */ + uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */ + uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */ + + /* Uncompressed header matching field rules. These are always + * available for field rules. + */ + uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */ + uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */ + uint16_t lport; /* local port */ + uint16_t fport; /* foreign port */ +}; + +/* A filter ioctl command. + */ +struct ch_filter_specification { + /* Administrative fields for filter. + */ + uint32_t hitcnts:1; /* count filter hits in TCB */ + uint32_t prio:1; /* filter has priority over active/server */ + + /* Fundamental filter typing. This is the one element of filter + * matching that doesn't exist as a (value, mask) tuple. + */ + uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */ + + /* Packet dispatch information. Ingress packets which match the + * filter rules will be dropped, passed to the host or switched back + * out as egress packets. + */ + uint32_t action:2; /* drop, pass, switch */ + + uint32_t rpttid:1; /* report TID in RSS hash field */ + + uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */ + uint32_t iq:10; /* ingress queue */ + + uint32_t maskhash:1; /* dirsteer=0: store RSS hash in TCB */ + uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */ + /* 1 => TCB contains IQ ID */ + + /* Switch proxy/rewrite fields. An ingress packet which matches a + * filter with "switch" set will be looped back out as an egress + * packet -- potentially with some Ethernet header rewriting. + */ + uint32_t eport:2; /* egress port to switch packet out */ + uint32_t newdmac:1; /* rewrite destination MAC address */ + uint32_t newsmac:1; /* rewrite source MAC address */ + uint32_t newvlan:2; /* rewrite VLAN Tag */ + uint8_t dmac[ETH_ALEN]; /* new destination MAC address */ + uint8_t smac[ETH_ALEN]; /* new source MAC address */ + uint16_t vlan; /* VLAN Tag to insert */ + + /* Filter rule value/mask pairs. + */ + struct ch_filter_tuple val; + struct ch_filter_tuple mask; +}; + +enum { + FILTER_PASS = 0, /* default */ + FILTER_DROP, + FILTER_SWITCH +}; + +enum { + VLAN_NOCHANGE = 0, /* default */ + VLAN_REMOVE, + VLAN_INSERT, + VLAN_REWRITE +}; + +static inline int is_t5(enum chip_type chip) +{ + return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5; +} + +static inline int is_t4(enum chip_type chip) +{ + return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; +} + +static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) +{ + return readl(adap->regs + reg_addr); +} + +static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val) +{ + writel(val, adap->regs + reg_addr); +} + +#ifndef readq +static inline u64 readq(const volatile void __iomem *addr) +{ + return readl(addr) + ((u64)readl(addr + 4) << 32); +} + +static inline void writeq(u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#endif + +static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr) +{ + return readq(adap->regs + reg_addr); +} + +static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val) +{ + writeq(val, adap->regs + reg_addr); +} + +/** + * netdev2pinfo - return the port_info structure associated with a net_device + * @dev: the netdev + * + * Return the struct port_info associated with a net_device + */ +static inline struct port_info *netdev2pinfo(const struct net_device *dev) +{ + return netdev_priv(dev); +} + +/** + * adap2pinfo - return the port_info of a port + * @adap: the adapter + * @idx: the port index + * + * Return the port_info structure for the port of the given index. + */ +static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) +{ + return netdev_priv(adap->port[idx]); +} + +/** + * netdev2adap - return the adapter structure associated with a net_device + * @dev: the netdev + * + * Return the struct adapter associated with a net_device + */ +static inline struct adapter *netdev2adap(const struct net_device *dev) +{ + return netdev2pinfo(dev)->adapter; +} + +#ifdef CONFIG_NET_RX_BUSY_POLL +static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q) +{ + spin_lock_init(&q->bpoll_lock); + q->bpoll_state = CXGB_POLL_STATE_IDLE; +} + +static inline bool cxgb_poll_lock_napi(struct sge_rspq *q) +{ + bool rc = true; + + spin_lock(&q->bpoll_lock); + if (q->bpoll_state & CXGB_POLL_LOCKED) { + q->bpoll_state |= CXGB_POLL_STATE_NAPI_YIELD; + rc = false; + } else { + q->bpoll_state = CXGB_POLL_STATE_NAPI; + } + spin_unlock(&q->bpoll_lock); + return rc; +} + +static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q) +{ + bool rc = false; + + spin_lock(&q->bpoll_lock); + if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD) + rc = true; + q->bpoll_state = CXGB_POLL_STATE_IDLE; + spin_unlock(&q->bpoll_lock); + return rc; +} + +static inline bool cxgb_poll_lock_poll(struct sge_rspq *q) +{ + bool rc = true; + + spin_lock_bh(&q->bpoll_lock); + if (q->bpoll_state & CXGB_POLL_LOCKED) { + q->bpoll_state |= CXGB_POLL_STATE_POLL_YIELD; + rc = false; + } else { + q->bpoll_state |= CXGB_POLL_STATE_POLL; + } + spin_unlock_bh(&q->bpoll_lock); + return rc; +} + +static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q) +{ + bool rc = false; + + spin_lock_bh(&q->bpoll_lock); + if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD) + rc = true; + q->bpoll_state = CXGB_POLL_STATE_IDLE; + spin_unlock_bh(&q->bpoll_lock); + return rc; +} + +static inline bool cxgb_poll_busy_polling(struct sge_rspq *q) +{ + return q->bpoll_state & CXGB_POLL_USER_PEND; +} +#else +static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q) +{ +} + +static inline bool cxgb_poll_lock_napi(struct sge_rspq *q) +{ + return true; +} + +static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q) +{ + return false; +} + +static inline bool cxgb_poll_lock_poll(struct sge_rspq *q) +{ + return false; +} + +static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q) +{ + return false; +} + +static inline bool cxgb_poll_busy_polling(struct sge_rspq *q) +{ + return false; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +/* Return a version number to identify the type of adapter. The scheme is: + * - bits 0..9: chip version + * - bits 10..15: chip revision + * - bits 16..23: register dump version + */ +static inline unsigned int mk_adap_vers(struct adapter *ap) +{ + return CHELSIO_CHIP_VERSION(ap->params.chip) | + (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16); +} + +/* Return a queue's interrupt hold-off time in us. 0 means no timer. */ +static inline unsigned int qtimer_val(const struct adapter *adap, + const struct sge_rspq *q) +{ + unsigned int idx = q->intr_params >> 1; + + return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0; +} + +/* driver version & name used for ethtool_drvinfo */ +extern char cxgb4_driver_name[]; +extern const char cxgb4_driver_version[]; + +void t4_os_portmod_changed(const struct adapter *adap, int port_id); +void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); + +void *t4_alloc_mem(size_t size); + +void t4_free_sge_resources(struct adapter *adap); +void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q); +irq_handler_t t4_intr_handler(struct adapter *adap); +netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev); +int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl); +int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb); +int t4_ofld_send(struct adapter *adap, struct sk_buff *skb); +int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, + struct net_device *dev, int intr_idx, + struct sge_fl *fl, rspq_handler_t hnd); +int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, + struct net_device *dev, struct netdev_queue *netdevq, + unsigned int iqid); +int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, + struct net_device *dev, unsigned int iqid, + unsigned int cmplqid); +int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, + struct net_device *dev, unsigned int iqid); +irqreturn_t t4_sge_intr_msix(int irq, void *cookie); +int t4_sge_init(struct adapter *adap); +void t4_sge_start(struct adapter *adap); +void t4_sge_stop(struct adapter *adap); +int cxgb_busy_poll(struct napi_struct *napi); +int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, + unsigned int cnt); +void cxgb4_set_ethtool_ops(struct net_device *netdev); +int cxgb4_write_rss(const struct port_info *pi, const u16 *queues); +extern int dbfifo_int_thresh; + +#define for_each_port(adapter, iter) \ + for (iter = 0; iter < (adapter)->params.nports; ++iter) + +static inline int is_bypass(struct adapter *adap) +{ + return adap->params.bypass; +} + +static inline int is_bypass_device(int device) +{ + /* this should be set based upon device capabilities */ + switch (device) { + case 0x440b: + case 0x440c: + return 1; + default: + return 0; + } +} + +static inline unsigned int core_ticks_per_usec(const struct adapter *adap) +{ + return adap->params.vpd.cclk / 1000; +} + +static inline unsigned int us_to_core_ticks(const struct adapter *adap, + unsigned int us) +{ + return (us * adap->params.vpd.cclk) / 1000; +} + +static inline unsigned int core_ticks_to_us(const struct adapter *adapter, + unsigned int ticks) +{ + /* add Core Clock / 2 to round ticks to nearest uS */ + return ((ticks * 1000 + adapter->params.vpd.cclk/2) / + adapter->params.vpd.cclk); +} + +void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, + u32 val); + +int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, + void *rpl, bool sleep_ok); + +static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd, + int size, void *rpl) +{ + return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true); +} + +static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, + int size, void *rpl) +{ + return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); +} + +void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, const u32 *vals, + unsigned int nregs, unsigned int start_idx); +void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, u32 *vals, unsigned int nregs, + unsigned int start_idx); +void t4_hw_pci_read_cfg4(struct adapter *adapter, int reg, u32 *val); + +struct fw_filter_wr; + +void t4_intr_enable(struct adapter *adapter); +void t4_intr_disable(struct adapter *adapter); +int t4_slow_intr_handler(struct adapter *adapter); + +int t4_wait_dev_ready(void __iomem *regs); +int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, + struct link_config *lc); +int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); + +#define T4_MEMORY_WRITE 0 +#define T4_MEMORY_READ 1 +int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len, + void *buf, int dir); +static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr, + u32 len, __be32 *buf) +{ + return t4_memory_rw(adap, 0, mtype, addr, len, buf, 0); +} + +unsigned int t4_get_regs_len(struct adapter *adapter); +void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size); + +int t4_seeprom_wp(struct adapter *adapter, bool enable); +int get_vpd_params(struct adapter *adapter, struct vpd_params *p); +int t4_read_flash(struct adapter *adapter, unsigned int addr, + unsigned int nwords, u32 *data, int byte_oriented); +int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); +int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op); +int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, + const u8 *fw_data, unsigned int size, int force); +unsigned int t4_flash_cfg_addr(struct adapter *adapter); +int t4_get_fw_version(struct adapter *adapter, u32 *vers); +int t4_get_tp_version(struct adapter *adapter, u32 *vers); +int t4_get_exprom_version(struct adapter *adapter, u32 *vers); +int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, + const u8 *fw_data, unsigned int fw_size, + struct fw_hdr *card_fw, enum dev_state state, int *reset); +int t4_prep_adapter(struct adapter *adapter); + +enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; +int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid); + +unsigned int qtimer_val(const struct adapter *adap, + const struct sge_rspq *q); + +int t4_init_devlog_params(struct adapter *adapter); +int t4_init_sge_params(struct adapter *adapter); +int t4_init_tp_params(struct adapter *adap); +int t4_filter_field_shift(const struct adapter *adap, int filter_sel); +int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); +void t4_fatal_err(struct adapter *adapter); +int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, + int start, int n, const u16 *rspq, unsigned int nrspq); +int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, + unsigned int flags); +int t4_read_rss(struct adapter *adapter, u16 *entries); +void t4_read_rss_key(struct adapter *adapter, u32 *key); +void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx); +void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, + u32 *valp); +void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, + u32 *vfl, u32 *vfh); +u32 t4_read_rss_pf_map(struct adapter *adapter); +u32 t4_read_rss_pf_mask(struct adapter *adapter); + +int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, + u64 *parity); +int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, + u64 *parity); +void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]); +void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]); +int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, + size_t n); +int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, + size_t n); +int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, + unsigned int *valp); +int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, + const unsigned int *valp); +int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr); +void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres); +const char *t4_get_port_type_description(enum fw_port_type port_type); +void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); +void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); +void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]); +void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, + unsigned int mask, unsigned int val); +void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr); +void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, + struct tp_tcp_stats *v6); +void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, + const unsigned short *alpha, const unsigned short *beta); + +void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf); + +void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid); + +void t4_wol_magic_enable(struct adapter *adap, unsigned int port, + const u8 *addr); +int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, + u64 mask0, u64 mask1, unsigned int crc, bool enable); + +int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, + enum dev_master master, enum dev_state *state); +int t4_fw_bye(struct adapter *adap, unsigned int mbox); +int t4_early_init(struct adapter *adap, unsigned int mbox); +int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); +int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, + unsigned int cache_line_size); +int t4_fw_initialize(struct adapter *adap, unsigned int mbox); +int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + u32 *val); +int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + const u32 *val); +int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox, + unsigned int pf, unsigned int vf, + unsigned int nparams, const u32 *params, + const u32 *val); +int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, + unsigned int rxqi, unsigned int rxq, unsigned int tc, + unsigned int vi, unsigned int cmask, unsigned int pmask, + unsigned int nexact, unsigned int rcaps, unsigned int wxcaps); +int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, + unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, + unsigned int *rss_size); +int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, + int mtu, int promisc, int all_multi, int bcast, int vlanex, + bool sleep_ok); +int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, + unsigned int viid, bool free, unsigned int naddr, + const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); +int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, + int idx, const u8 *addr, bool persist, bool add_smt); +int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool ucast, u64 vec, bool sleep_ok); +int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, + unsigned int viid, bool rx_en, bool tx_en, bool dcb_en); +int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool rx_en, bool tx_en); +int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, + unsigned int nblinks); +int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, + unsigned int mmd, unsigned int reg, u16 *valp); +int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, + unsigned int mmd, unsigned int reg, u16 val); +int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int iqtype, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id); +int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); +int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); +int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); +int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); +void t4_db_full(struct adapter *adapter); +void t4_db_dropped(struct adapter *adapter); +int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, + u32 addr, u32 val); +void t4_sge_decode_idma_state(struct adapter *adapter, int state); +void t4_free_mem(void *addr); +#endif /* __CXGB4_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c new file mode 100644 index 000000000..6074680bc --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -0,0 +1,1243 @@ +/* + * Copyright (C) 2013-2014 Chelsio Communications. All rights reserved. + * + * Written by Anish Bhatt (anish@chelsio.com) + * Casey Leedom (leedom@chelsio.com) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "cxgb4.h" + +/* DCBx version control + */ +static const char * const dcb_ver_array[] = { + "Unknown", + "DCBx-CIN", + "DCBx-CEE 1.01", + "DCBx-IEEE", + "", "", "", + "Auto Negotiated" +}; + +/* Initialize a port's Data Center Bridging state. Typically used after a + * Link Down event. + */ +void cxgb4_dcb_state_init(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct port_dcb_info *dcb = &pi->dcb; + int version_temp = dcb->dcb_version; + + memset(dcb, 0, sizeof(struct port_dcb_info)); + dcb->state = CXGB4_DCB_STATE_START; + if (version_temp) + dcb->dcb_version = version_temp; + + netdev_dbg(dev, "%s: Initializing DCB state for port[%d]\n", + __func__, pi->port_id); +} + +void cxgb4_dcb_version_init(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct port_dcb_info *dcb = &pi->dcb; + + /* Any writes here are only done on kernels that exlicitly need + * a specific version, say < 2.6.38 which only support CEE + */ + dcb->dcb_version = FW_PORT_DCB_VER_AUTO; +} + +static void cxgb4_dcb_cleanup_apps(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = pi->adapter; + struct port_dcb_info *dcb = &pi->dcb; + struct dcb_app app; + int i, err; + + /* zero priority implies remove */ + app.priority = 0; + + for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) { + /* Check if app list is exhausted */ + if (!dcb->app_priority[i].protocolid) + break; + + app.protocol = dcb->app_priority[i].protocolid; + + if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) { + app.priority = dcb->app_priority[i].user_prio_map; + app.selector = dcb->app_priority[i].sel_field + 1; + err = dcb_ieee_delapp(dev, &app); + } else { + app.selector = !!(dcb->app_priority[i].sel_field); + err = dcb_setapp(dev, &app); + } + + if (err) { + dev_err(adap->pdev_dev, + "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n", + dcb_ver_array[dcb->dcb_version], app.selector, + app.protocol, -err); + break; + } + } +} + +/* Finite State machine for Data Center Bridging. + */ +void cxgb4_dcb_state_fsm(struct net_device *dev, + enum cxgb4_dcb_state_input transition_to) +{ + struct port_info *pi = netdev2pinfo(dev); + struct port_dcb_info *dcb = &pi->dcb; + struct adapter *adap = pi->adapter; + enum cxgb4_dcb_state current_state = dcb->state; + + netdev_dbg(dev, "%s: State change from %d to %d for %s\n", + __func__, dcb->state, transition_to, dev->name); + + switch (current_state) { + case CXGB4_DCB_STATE_START: { + switch (transition_to) { + case CXGB4_DCB_INPUT_FW_DISABLED: { + /* we're going to use Host DCB */ + dcb->state = CXGB4_DCB_STATE_HOST; + dcb->supported = CXGB4_DCBX_HOST_SUPPORT; + break; + } + + case CXGB4_DCB_INPUT_FW_ENABLED: { + /* we're going to use Firmware DCB */ + dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; + dcb->supported = DCB_CAP_DCBX_LLD_MANAGED; + if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) + dcb->supported |= DCB_CAP_DCBX_VER_IEEE; + else + dcb->supported |= DCB_CAP_DCBX_VER_CEE; + break; + } + + case CXGB4_DCB_INPUT_FW_INCOMPLETE: { + /* expected transition */ + break; + } + + case CXGB4_DCB_INPUT_FW_ALLSYNCED: { + dcb->state = CXGB4_DCB_STATE_FW_ALLSYNCED; + break; + } + + default: + goto bad_state_input; + } + break; + } + + case CXGB4_DCB_STATE_FW_INCOMPLETE: { + switch (transition_to) { + case CXGB4_DCB_INPUT_FW_ENABLED: { + /* we're alreaady in firmware DCB mode */ + break; + } + + case CXGB4_DCB_INPUT_FW_INCOMPLETE: { + /* we're already incomplete */ + break; + } + + case CXGB4_DCB_INPUT_FW_ALLSYNCED: { + dcb->state = CXGB4_DCB_STATE_FW_ALLSYNCED; + dcb->enabled = 1; + linkwatch_fire_event(dev); + break; + } + + default: + goto bad_state_input; + } + break; + } + + case CXGB4_DCB_STATE_FW_ALLSYNCED: { + switch (transition_to) { + case CXGB4_DCB_INPUT_FW_ENABLED: { + /* we're alreaady in firmware DCB mode */ + break; + } + + case CXGB4_DCB_INPUT_FW_INCOMPLETE: { + /* We were successfully running with firmware DCB but + * now it's telling us that it's in an "incomplete + * state. We need to reset back to a ground state + * of incomplete. + */ + cxgb4_dcb_cleanup_apps(dev); + cxgb4_dcb_state_init(dev); + dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; + dcb->supported = CXGB4_DCBX_FW_SUPPORT; + linkwatch_fire_event(dev); + break; + } + + case CXGB4_DCB_INPUT_FW_ALLSYNCED: { + /* we're already all sync'ed + * this is only applicable for IEEE or + * when another VI already completed negotiaton + */ + dcb->enabled = 1; + linkwatch_fire_event(dev); + break; + } + + default: + goto bad_state_input; + } + break; + } + + case CXGB4_DCB_STATE_HOST: { + switch (transition_to) { + case CXGB4_DCB_INPUT_FW_DISABLED: { + /* we're alreaady in Host DCB mode */ + break; + } + + default: + goto bad_state_input; + } + break; + } + + default: + goto bad_state_transition; + } + return; + +bad_state_input: + dev_err(adap->pdev_dev, "cxgb4_dcb_state_fsm: illegal input symbol %d\n", + transition_to); + return; + +bad_state_transition: + dev_err(adap->pdev_dev, "cxgb4_dcb_state_fsm: bad state transition, state = %d, input = %d\n", + current_state, transition_to); +} + +/* Handle a DCB/DCBX update message from the firmware. + */ +void cxgb4_dcb_handle_fw_update(struct adapter *adap, + const struct fw_port_cmd *pcmd) +{ + const union fw_port_dcb *fwdcb = &pcmd->u.dcb; + int port = FW_PORT_CMD_PORTID_G(be32_to_cpu(pcmd->op_to_portid)); + struct net_device *dev = adap->port[port]; + struct port_info *pi = netdev_priv(dev); + struct port_dcb_info *dcb = &pi->dcb; + int dcb_type = pcmd->u.dcb.pgid.type; + int dcb_running_version; + + /* Handle Firmware DCB Control messages separately since they drive + * our state machine. + */ + if (dcb_type == FW_PORT_DCB_TYPE_CONTROL) { + enum cxgb4_dcb_state_input input = + ((pcmd->u.dcb.control.all_syncd_pkd & + FW_PORT_CMD_ALL_SYNCD_F) + ? CXGB4_DCB_STATE_FW_ALLSYNCED + : CXGB4_DCB_STATE_FW_INCOMPLETE); + + if (dcb->dcb_version != FW_PORT_DCB_VER_UNKNOWN) { + dcb_running_version = FW_PORT_CMD_DCB_VERSION_G( + be16_to_cpu( + pcmd->u.dcb.control.dcb_version_to_app_state)); + if (dcb_running_version == FW_PORT_DCB_VER_CEE1D01 || + dcb_running_version == FW_PORT_DCB_VER_IEEE) { + dcb->dcb_version = dcb_running_version; + dev_warn(adap->pdev_dev, "Interface %s is running %s\n", + dev->name, + dcb_ver_array[dcb->dcb_version]); + } else { + dev_warn(adap->pdev_dev, + "Something screwed up, requested firmware for %s, but firmware returned %s instead\n", + dcb_ver_array[dcb->dcb_version], + dcb_ver_array[dcb_running_version]); + dcb->dcb_version = FW_PORT_DCB_VER_UNKNOWN; + } + } + + cxgb4_dcb_state_fsm(dev, input); + return; + } + + /* It's weird, and almost certainly an error, to get Firmware DCB + * messages when we either haven't been told whether we're going to be + * doing Host or Firmware DCB; and even worse when we've been told + * that we're doing Host DCB! + */ + if (dcb->state == CXGB4_DCB_STATE_START || + dcb->state == CXGB4_DCB_STATE_HOST) { + dev_err(adap->pdev_dev, "Receiving Firmware DCB messages in State %d\n", + dcb->state); + return; + } + + /* Now handle the general Firmware DCB update messages ... + */ + switch (dcb_type) { + case FW_PORT_DCB_TYPE_PGID: + dcb->pgid = be32_to_cpu(fwdcb->pgid.pgid); + dcb->msgs |= CXGB4_DCB_FW_PGID; + break; + + case FW_PORT_DCB_TYPE_PGRATE: + dcb->pg_num_tcs_supported = fwdcb->pgrate.num_tcs_supported; + memcpy(dcb->pgrate, &fwdcb->pgrate.pgrate, + sizeof(dcb->pgrate)); + memcpy(dcb->tsa, &fwdcb->pgrate.tsa, + sizeof(dcb->tsa)); + dcb->msgs |= CXGB4_DCB_FW_PGRATE; + if (dcb->msgs & CXGB4_DCB_FW_PGID) + IEEE_FAUX_SYNC(dev, dcb); + break; + + case FW_PORT_DCB_TYPE_PRIORATE: + memcpy(dcb->priorate, &fwdcb->priorate.strict_priorate, + sizeof(dcb->priorate)); + dcb->msgs |= CXGB4_DCB_FW_PRIORATE; + break; + + case FW_PORT_DCB_TYPE_PFC: + dcb->pfcen = fwdcb->pfc.pfcen; + dcb->pfc_num_tcs_supported = fwdcb->pfc.max_pfc_tcs; + dcb->msgs |= CXGB4_DCB_FW_PFC; + IEEE_FAUX_SYNC(dev, dcb); + break; + + case FW_PORT_DCB_TYPE_APP_ID: { + const struct fw_port_app_priority *fwap = &fwdcb->app_priority; + int idx = fwap->idx; + struct app_priority *ap = &dcb->app_priority[idx]; + + struct dcb_app app = { + .protocol = be16_to_cpu(fwap->protocolid), + }; + int err; + + /* Convert from firmware format to relevant format + * when using app selector + */ + if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) { + app.selector = (fwap->sel_field + 1); + app.priority = ffs(fwap->user_prio_map) - 1; + err = dcb_ieee_setapp(dev, &app); + IEEE_FAUX_SYNC(dev, dcb); + } else { + /* Default is CEE */ + app.selector = !!(fwap->sel_field); + app.priority = fwap->user_prio_map; + err = dcb_setapp(dev, &app); + } + + if (err) + dev_err(adap->pdev_dev, + "Failed DCB Set Application Priority: sel=%d, prot=%d, prio=%d, err=%d\n", + app.selector, app.protocol, app.priority, -err); + + ap->user_prio_map = fwap->user_prio_map; + ap->sel_field = fwap->sel_field; + ap->protocolid = be16_to_cpu(fwap->protocolid); + dcb->msgs |= CXGB4_DCB_FW_APP_ID; + break; + } + + default: + dev_err(adap->pdev_dev, "Unknown DCB update type received %x\n", + dcb_type); + break; + } +} + +/* Data Center Bridging netlink operations. + */ + + +/* Get current DCB enabled/disabled state. + */ +static u8 cxgb4_getstate(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + + return pi->dcb.enabled; +} + +/* Set DCB enabled/disabled. + */ +static u8 cxgb4_setstate(struct net_device *dev, u8 enabled) +{ + struct port_info *pi = netdev2pinfo(dev); + + /* If DCBx is host-managed, dcb is enabled by outside lldp agents */ + if (pi->dcb.state == CXGB4_DCB_STATE_HOST) { + pi->dcb.enabled = enabled; + return 0; + } + + /* Firmware doesn't provide any mechanism to control the DCB state. + */ + if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED)) + return 1; + + return 0; +} + +static void cxgb4_getpgtccfg(struct net_device *dev, int tc, + u8 *prio_type, u8 *pgid, u8 *bw_per, + u8 *up_tc_map, int local) +{ + struct fw_port_cmd pcmd; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = pi->adapter; + int err; + + *prio_type = *pgid = *bw_per = *up_tc_map = 0; + + if (local) + INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id); + else + INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id); + + pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID; + err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); + if (err != FW_PORT_DCB_CFG_SUCCESS) { + dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err); + return; + } + *pgid = (be32_to_cpu(pcmd.u.dcb.pgid.pgid) >> (tc * 4)) & 0xf; + + if (local) + INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id); + else + INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id); + pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE; + err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); + if (err != FW_PORT_DCB_CFG_SUCCESS) { + dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n", + -err); + return; + } + + *bw_per = pcmd.u.dcb.pgrate.pgrate[*pgid]; + *up_tc_map = (1 << tc); + + /* prio_type is link strict */ + if (*pgid != 0xF) + *prio_type = 0x2; +} + +static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc, + u8 *prio_type, u8 *pgid, u8 *bw_per, + u8 *up_tc_map) +{ + /* tc 0 is written at MSB position */ + return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per, + up_tc_map, 1); +} + + +static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc, + u8 *prio_type, u8 *pgid, u8 *bw_per, + u8 *up_tc_map) +{ + /* tc 0 is written at MSB position */ + return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per, + up_tc_map, 0); +} + +static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, + u8 prio_type, u8 pgid, u8 bw_per, + u8 up_tc_map) +{ + struct fw_port_cmd pcmd; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = pi->adapter; + int fw_tc = 7 - tc; + u32 _pgid; + int err; + + if (pgid == DCB_ATTR_VALUE_UNDEFINED) + return; + if (bw_per == DCB_ATTR_VALUE_UNDEFINED) + return; + + INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id); + pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID; + + err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); + if (err != FW_PORT_DCB_CFG_SUCCESS) { + dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err); + return; + } + + _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid); + _pgid &= ~(0xF << (fw_tc * 4)); + _pgid |= pgid << (fw_tc * 4); + pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid); + + INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); + + err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); + if (err != FW_PORT_DCB_CFG_SUCCESS) { + dev_err(adap->pdev_dev, "DCB write PGID failed with %d\n", + -err); + return; + } + + memset(&pcmd, 0, sizeof(struct fw_port_cmd)); + + INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id); + pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE; + + err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); + if (err != FW_PORT_DCB_CFG_SUCCESS) { + dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n", + -err); + return; + } + + pcmd.u.dcb.pgrate.pgrate[pgid] = bw_per; + + INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); + if (pi->dcb.state == CXGB4_DCB_STATE_HOST) + pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F); + + err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); + if (err != FW_PORT_DCB_CFG_SUCCESS) + dev_err(adap->pdev_dev, "DCB write PGRATE failed with %d\n", + -err); +} + +static void cxgb4_getpgbwgcfg(struct net_device *dev, int pgid, u8 *bw_per, + int local) +{ + struct fw_port_cmd pcmd; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = pi->adapter; + int err; + + if (local) + INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id); + else + INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id); + + pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE; + err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); + if (err != FW_PORT_DCB_CFG_SUCCESS) { + dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n", + -err); + return; + } + + *bw_per = pcmd.u.dcb.pgrate.pgrate[pgid]; +} + +static void cxgb4_getpgbwgcfg_tx(struct net_device *dev, int pgid, u8 *bw_per) +{ + return cxgb4_getpgbwgcfg(dev, pgid, bw_per, 1); +} + +static void cxgb4_getpgbwgcfg_rx(struct net_device *dev, int pgid, u8 *bw_per) +{ + return cxgb4_getpgbwgcfg(dev, pgid, bw_per, 0); +} + +static void cxgb4_setpgbwgcfg_tx(struct net_device *dev, int pgid, + u8 bw_per) +{ + struct fw_port_cmd pcmd; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = pi->adapter; + int err; + + INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id); + pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE; + + err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); + if (err != FW_PORT_DCB_CFG_SUCCESS) { + dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n", + -err); + return; + } + + pcmd.u.dcb.pgrate.pgrate[pgid] = bw_per; + + INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); + if (pi->dcb.state == CXGB4_DCB_STATE_HOST) + pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F); + + err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); + + if (err != FW_PORT_DCB_CFG_SUCCESS) + dev_err(adap->pdev_dev, "DCB write PGRATE failed with %d\n", + -err); +} + +/* Return whether the specified Traffic Class Priority has Priority Pause + * Frames enabled. + */ +static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg) +{ + struct port_info *pi = netdev2pinfo(dev); + struct port_dcb_info *dcb = &pi->dcb; + + if (dcb->state != CXGB4_DCB_STATE_FW_ALLSYNCED || + priority >= CXGB4_MAX_PRIORITY) + *pfccfg = 0; + else + *pfccfg = (pi->dcb.pfcen >> (7 - priority)) & 1; +} + +/* Enable/disable Priority Pause Frames for the specified Traffic Class + * Priority. + */ +static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg) +{ + struct fw_port_cmd pcmd; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = pi->adapter; + int err; + + if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED || + priority >= CXGB4_MAX_PRIORITY) + return; + + INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); + if (pi->dcb.state == CXGB4_DCB_STATE_HOST) + pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F); + + pcmd.u.dcb.pfc.type = FW_PORT_DCB_TYPE_PFC; + pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen; + + if (pfccfg) + pcmd.u.dcb.pfc.pfcen |= (1 << (7 - priority)); + else + pcmd.u.dcb.pfc.pfcen &= (~(1 << (7 - priority))); + + err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); + if (err != FW_PORT_DCB_CFG_SUCCESS) { + dev_err(adap->pdev_dev, "DCB PFC write failed with %d\n", -err); + return; + } + + pi->dcb.pfcen = pcmd.u.dcb.pfc.pfcen; +} + +static u8 cxgb4_setall(struct net_device *dev) +{ + return 0; +} + +/* Return DCB capabilities. + */ +static u8 cxgb4_getcap(struct net_device *dev, int cap_id, u8 *caps) +{ + struct port_info *pi = netdev2pinfo(dev); + + switch (cap_id) { + case DCB_CAP_ATTR_PG: + case DCB_CAP_ATTR_PFC: + *caps = true; + break; + + case DCB_CAP_ATTR_PG_TCS: + /* 8 priorities for PG represented by bitmap */ + *caps = 0x80; + break; + + case DCB_CAP_ATTR_PFC_TCS: + /* 8 priorities for PFC represented by bitmap */ + *caps = 0x80; + break; + + case DCB_CAP_ATTR_GSP: + *caps = true; + break; + + case DCB_CAP_ATTR_UP2TC: + case DCB_CAP_ATTR_BCN: + *caps = false; + break; + + case DCB_CAP_ATTR_DCBX: + *caps = pi->dcb.supported; + break; + + default: + *caps = false; + } + + return 0; +} + +/* Return the number of Traffic Classes for the indicated Traffic Class ID. + */ +static int cxgb4_getnumtcs(struct net_device *dev, int tcs_id, u8 *num) +{ + struct port_info *pi = netdev2pinfo(dev); + + switch (tcs_id) { + case DCB_NUMTCS_ATTR_PG: + if (pi->dcb.msgs & CXGB4_DCB_FW_PGRATE) + *num = pi->dcb.pg_num_tcs_supported; + else + *num = 0x8; + break; + + case DCB_NUMTCS_ATTR_PFC: + *num = 0x8; + break; + + default: + return -EINVAL; + } + + return 0; +} + +/* Set the number of Traffic Classes supported for the indicated Traffic Class + * ID. + */ +static int cxgb4_setnumtcs(struct net_device *dev, int tcs_id, u8 num) +{ + /* Setting the number of Traffic Classes isn't supported. + */ + return -ENOSYS; +} + +/* Return whether Priority Flow Control is enabled. */ +static u8 cxgb4_getpfcstate(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + + if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + return false; + + return pi->dcb.pfcen != 0; +} + +/* Enable/disable Priority Flow Control. */ +static void cxgb4_setpfcstate(struct net_device *dev, u8 state) +{ + /* We can't enable/disable Priority Flow Control but we also can't + * return an error ... + */ +} + +/* Return the Application User Priority Map associated with the specified + * Application ID. + */ +static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id, + int peer) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = pi->adapter; + int i; + + if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + return 0; + + for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) { + struct fw_port_cmd pcmd; + int err; + + if (peer) + INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id); + else + INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id); + + pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID; + pcmd.u.dcb.app_priority.idx = i; + + err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); + if (err != FW_PORT_DCB_CFG_SUCCESS) { + dev_err(adap->pdev_dev, "DCB APP read failed with %d\n", + -err); + return err; + } + if (be16_to_cpu(pcmd.u.dcb.app_priority.protocolid) == app_id) + if (pcmd.u.dcb.app_priority.sel_field == app_idtype) + return pcmd.u.dcb.app_priority.user_prio_map; + + /* exhausted app list */ + if (!pcmd.u.dcb.app_priority.protocolid) + break; + } + + return -EEXIST; +} + +/* Return the Application User Priority Map associated with the specified + * Application ID. + */ +static int cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id) +{ + return __cxgb4_getapp(dev, app_idtype, app_id, 0); +} + +/* Write a new Application User Priority Map for the specified Application ID + */ +static int __cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id, + u8 app_prio) +{ + struct fw_port_cmd pcmd; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = pi->adapter; + int i, err; + + + if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + return -EINVAL; + + /* DCB info gets thrown away on link up */ + if (!netif_carrier_ok(dev)) + return -ENOLINK; + + for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) { + INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id); + pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID; + pcmd.u.dcb.app_priority.idx = i; + err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); + + if (err != FW_PORT_DCB_CFG_SUCCESS) { + dev_err(adap->pdev_dev, "DCB app table read failed with %d\n", + -err); + return err; + } + if (be16_to_cpu(pcmd.u.dcb.app_priority.protocolid) == app_id) { + /* overwrite existing app table */ + pcmd.u.dcb.app_priority.protocolid = 0; + break; + } + /* find first empty slot */ + if (!pcmd.u.dcb.app_priority.protocolid) + break; + } + + if (i == CXGB4_MAX_DCBX_APP_SUPPORTED) { + /* no empty slots available */ + dev_err(adap->pdev_dev, "DCB app table full\n"); + return -EBUSY; + } + + /* write out new app table entry */ + INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); + if (pi->dcb.state == CXGB4_DCB_STATE_HOST) + pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F); + + pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID; + pcmd.u.dcb.app_priority.protocolid = cpu_to_be16(app_id); + pcmd.u.dcb.app_priority.sel_field = app_idtype; + pcmd.u.dcb.app_priority.user_prio_map = app_prio; + pcmd.u.dcb.app_priority.idx = i; + + err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); + if (err != FW_PORT_DCB_CFG_SUCCESS) { + dev_err(adap->pdev_dev, "DCB app table write failed with %d\n", + -err); + return err; + } + + return 0; +} + +/* Priority for CEE inside dcb_app is bitmask, with 0 being an invalid value */ +static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id, + u8 app_prio) +{ + int ret; + struct dcb_app app = { + .selector = app_idtype, + .protocol = app_id, + .priority = app_prio, + }; + + if (app_idtype != DCB_APP_IDTYPE_ETHTYPE && + app_idtype != DCB_APP_IDTYPE_PORTNUM) + return -EINVAL; + + /* Convert app_idtype to a format that firmware understands */ + ret = __cxgb4_setapp(dev, app_idtype == DCB_APP_IDTYPE_ETHTYPE ? + app_idtype : 3, app_id, app_prio); + if (ret) + return ret; + + return dcb_setapp(dev, &app); +} + +/* Return whether IEEE Data Center Bridging has been negotiated. + */ +static inline int +cxgb4_ieee_negotiation_complete(struct net_device *dev, + enum cxgb4_dcb_fw_msgs dcb_subtype) +{ + struct port_info *pi = netdev2pinfo(dev); + struct port_dcb_info *dcb = &pi->dcb; + + if (dcb_subtype && !(dcb->msgs & dcb_subtype)) + return 0; + + return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED && + (dcb->supported & DCB_CAP_DCBX_VER_IEEE)); +} + +static int cxgb4_ieee_read_ets(struct net_device *dev, struct ieee_ets *ets, + int local) +{ + struct port_info *pi = netdev2pinfo(dev); + struct port_dcb_info *dcb = &pi->dcb; + struct adapter *adap = pi->adapter; + uint32_t tc_info; + struct fw_port_cmd pcmd; + int i, bwg, err; + + if (!(dcb->msgs & (CXGB4_DCB_FW_PGID | CXGB4_DCB_FW_PGRATE))) + return 0; + + ets->ets_cap = dcb->pg_num_tcs_supported; + + if (local) { + ets->willing = 1; + INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id); + } else { + INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id); + } + + pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID; + err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); + if (err != FW_PORT_DCB_CFG_SUCCESS) { + dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err); + return err; + } + + tc_info = be32_to_cpu(pcmd.u.dcb.pgid.pgid); + + if (local) + INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id); + else + INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id); + + pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE; + err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); + if (err != FW_PORT_DCB_CFG_SUCCESS) { + dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n", + -err); + return err; + } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + bwg = (tc_info >> ((7 - i) * 4)) & 0xF; + ets->prio_tc[i] = bwg; + ets->tc_tx_bw[i] = pcmd.u.dcb.pgrate.pgrate[i]; + ets->tc_rx_bw[i] = ets->tc_tx_bw[i]; + ets->tc_tsa[i] = pcmd.u.dcb.pgrate.tsa[i]; + } + + return 0; +} + +static int cxgb4_ieee_get_ets(struct net_device *dev, struct ieee_ets *ets) +{ + return cxgb4_ieee_read_ets(dev, ets, 1); +} + +/* We reuse this for peer PFC as well, as we can't have it enabled one way */ +static int cxgb4_ieee_get_pfc(struct net_device *dev, struct ieee_pfc *pfc) +{ + struct port_info *pi = netdev2pinfo(dev); + struct port_dcb_info *dcb = &pi->dcb; + + memset(pfc, 0, sizeof(struct ieee_pfc)); + + if (!(dcb->msgs & CXGB4_DCB_FW_PFC)) + return 0; + + pfc->pfc_cap = dcb->pfc_num_tcs_supported; + pfc->pfc_en = bitswap_1(dcb->pfcen); + + return 0; +} + +static int cxgb4_ieee_peer_ets(struct net_device *dev, struct ieee_ets *ets) +{ + return cxgb4_ieee_read_ets(dev, ets, 0); +} + +/* Fill in the Application User Priority Map associated with the + * specified Application. + * Priority for IEEE dcb_app is an integer, with 0 being a valid value + */ +static int cxgb4_ieee_getapp(struct net_device *dev, struct dcb_app *app) +{ + int prio; + + if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID)) + return -EINVAL; + if (!(app->selector && app->protocol)) + return -EINVAL; + + /* Try querying firmware first, use firmware format */ + prio = __cxgb4_getapp(dev, app->selector - 1, app->protocol, 0); + + if (prio < 0) + prio = dcb_ieee_getapp_mask(dev, app); + + app->priority = ffs(prio) - 1; + return 0; +} + +/* Write a new Application User Priority Map for the specified Application ID. + * Priority for IEEE dcb_app is an integer, with 0 being a valid value + */ +static int cxgb4_ieee_setapp(struct net_device *dev, struct dcb_app *app) +{ + int ret; + + if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID)) + return -EINVAL; + if (!(app->selector && app->protocol)) + return -EINVAL; + + if (!(app->selector > IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->selector < IEEE_8021QAZ_APP_SEL_ANY)) + return -EINVAL; + + /* change selector to a format that firmware understands */ + ret = __cxgb4_setapp(dev, app->selector - 1, app->protocol, + (1 << app->priority)); + if (ret) + return ret; + + return dcb_ieee_setapp(dev, app); +} + +/* Return our DCBX parameters. + */ +static u8 cxgb4_getdcbx(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + + /* This is already set by cxgb4_set_dcb_caps, so just return it */ + return pi->dcb.supported; +} + +/* Set our DCBX parameters. + */ +static u8 cxgb4_setdcbx(struct net_device *dev, u8 dcb_request) +{ + struct port_info *pi = netdev2pinfo(dev); + + /* Filter out requests which exceed our capabilities. + */ + if ((dcb_request & (CXGB4_DCBX_FW_SUPPORT | CXGB4_DCBX_HOST_SUPPORT)) + != dcb_request) + return 1; + + /* Can't enable DCB if we haven't successfully negotiated it. + */ + if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + return 1; + + /* There's currently no mechanism to allow for the firmware DCBX + * negotiation to be changed from the Host Driver. If the caller + * requests exactly the same parameters that we already have then + * we'll allow them to be successfully "set" ... + */ + if (dcb_request != pi->dcb.supported) + return 1; + + pi->dcb.supported = dcb_request; + return 0; +} + +static int cxgb4_getpeer_app(struct net_device *dev, + struct dcb_peer_app_info *info, u16 *app_count) +{ + struct fw_port_cmd pcmd; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = pi->adapter; + int i, err = 0; + + if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + return 1; + + info->willing = 0; + info->error = 0; + + *app_count = 0; + for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) { + INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id); + pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID; + pcmd.u.dcb.app_priority.idx = *app_count; + err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); + + if (err != FW_PORT_DCB_CFG_SUCCESS) { + dev_err(adap->pdev_dev, "DCB app table read failed with %d\n", + -err); + return err; + } + + /* find first empty slot */ + if (!pcmd.u.dcb.app_priority.protocolid) + break; + } + *app_count = i; + return err; +} + +static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table) +{ + struct fw_port_cmd pcmd; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = pi->adapter; + int i, err = 0; + + if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + return 1; + + for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) { + INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id); + pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID; + pcmd.u.dcb.app_priority.idx = i; + err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); + + if (err != FW_PORT_DCB_CFG_SUCCESS) { + dev_err(adap->pdev_dev, "DCB app table read failed with %d\n", + -err); + return err; + } + + /* find first empty slot */ + if (!pcmd.u.dcb.app_priority.protocolid) + break; + + table[i].selector = pcmd.u.dcb.app_priority.sel_field; + table[i].protocol = + be16_to_cpu(pcmd.u.dcb.app_priority.protocolid); + table[i].priority = + ffs(pcmd.u.dcb.app_priority.user_prio_map) - 1; + } + return err; +} + +/* Return Priority Group information. + */ +static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg) +{ + struct fw_port_cmd pcmd; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = pi->adapter; + u32 pgid; + int i, err; + + /* We're always "willing" -- the Switch Fabric always dictates the + * DCBX parameters to us. + */ + pg->willing = true; + + INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id); + pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID; + err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); + if (err != FW_PORT_DCB_CFG_SUCCESS) { + dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err); + return err; + } + pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid); + + for (i = 0; i < CXGB4_MAX_PRIORITY; i++) + pg->prio_pg[7 - i] = (pgid >> (i * 4)) & 0xF; + + INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id); + pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE; + err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); + if (err != FW_PORT_DCB_CFG_SUCCESS) { + dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n", + -err); + return err; + } + + for (i = 0; i < CXGB4_MAX_PRIORITY; i++) + pg->pg_bw[i] = pcmd.u.dcb.pgrate.pgrate[i]; + + return 0; +} + +/* Return Priority Flow Control information. + */ +static int cxgb4_cee_peer_getpfc(struct net_device *dev, struct cee_pfc *pfc) +{ + struct port_info *pi = netdev2pinfo(dev); + + cxgb4_getnumtcs(dev, DCB_NUMTCS_ATTR_PFC, &(pfc->tcs_supported)); + + /* Firmware sends this to us in a formwat that is a bit flipped version + * of spec, correct it before we send it to host. This is taken care of + * by bit shifting in other uses of pfcen + */ + pfc->pfc_en = bitswap_1(pi->dcb.pfcen); + + return 0; +} + +const struct dcbnl_rtnl_ops cxgb4_dcb_ops = { + .ieee_getets = cxgb4_ieee_get_ets, + .ieee_getpfc = cxgb4_ieee_get_pfc, + .ieee_getapp = cxgb4_ieee_getapp, + .ieee_setapp = cxgb4_ieee_setapp, + .ieee_peer_getets = cxgb4_ieee_peer_ets, + .ieee_peer_getpfc = cxgb4_ieee_get_pfc, + + /* CEE std */ + .getstate = cxgb4_getstate, + .setstate = cxgb4_setstate, + .getpgtccfgtx = cxgb4_getpgtccfg_tx, + .getpgbwgcfgtx = cxgb4_getpgbwgcfg_tx, + .getpgtccfgrx = cxgb4_getpgtccfg_rx, + .getpgbwgcfgrx = cxgb4_getpgbwgcfg_rx, + .setpgtccfgtx = cxgb4_setpgtccfg_tx, + .setpgbwgcfgtx = cxgb4_setpgbwgcfg_tx, + .setpfccfg = cxgb4_setpfccfg, + .getpfccfg = cxgb4_getpfccfg, + .setall = cxgb4_setall, + .getcap = cxgb4_getcap, + .getnumtcs = cxgb4_getnumtcs, + .setnumtcs = cxgb4_setnumtcs, + .getpfcstate = cxgb4_getpfcstate, + .setpfcstate = cxgb4_setpfcstate, + .getapp = cxgb4_getapp, + .setapp = cxgb4_setapp, + + /* DCBX configuration */ + .getdcbx = cxgb4_getdcbx, + .setdcbx = cxgb4_setdcbx, + + /* peer apps */ + .peer_getappinfo = cxgb4_getpeer_app, + .peer_getapptable = cxgb4_getpeerapp_tbl, + + /* CEE peer */ + .cee_peer_getpg = cxgb4_cee_peer_getpg, + .cee_peer_getpfc = cxgb4_cee_peer_getpfc, +}; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h new file mode 100644 index 000000000..ccf24d3dc --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2013-2014 Chelsio Communications. All rights reserved. + * + * Written by Anish Bhatt (anish@chelsio.com) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#ifndef __CXGB4_DCB_H +#define __CXGB4_DCB_H + +#include +#include +#include + +#ifdef CONFIG_CHELSIO_T4_DCB + +#define CXGB4_DCBX_FW_SUPPORT \ + (DCB_CAP_DCBX_VER_CEE | \ + DCB_CAP_DCBX_VER_IEEE | \ + DCB_CAP_DCBX_LLD_MANAGED) +#define CXGB4_DCBX_HOST_SUPPORT \ + (DCB_CAP_DCBX_VER_CEE | \ + DCB_CAP_DCBX_VER_IEEE | \ + DCB_CAP_DCBX_HOST) + +#define CXGB4_MAX_PRIORITY CXGB4_MAX_DCBX_APP_SUPPORTED +#define CXGB4_MAX_TCS CXGB4_MAX_DCBX_APP_SUPPORTED + +#define INIT_PORT_DCB_CMD(__pcmd, __port, __op, __action) \ + do { \ + memset(&(__pcmd), 0, sizeof(__pcmd)); \ + (__pcmd).op_to_portid = \ + cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | \ + FW_CMD_REQUEST_F | \ + FW_CMD_##__op##_F | \ + FW_PORT_CMD_PORTID_V(__port)); \ + (__pcmd).action_to_len16 = \ + cpu_to_be32(FW_PORT_CMD_ACTION_V(__action) | \ + FW_LEN16(pcmd)); \ + } while (0) + +#define INIT_PORT_DCB_READ_PEER_CMD(__pcmd, __port) \ + INIT_PORT_DCB_CMD(__pcmd, __port, READ, FW_PORT_ACTION_DCB_READ_RECV) + +#define INIT_PORT_DCB_READ_LOCAL_CMD(__pcmd, __port) \ + INIT_PORT_DCB_CMD(__pcmd, __port, READ, FW_PORT_ACTION_DCB_READ_TRANS) + +#define INIT_PORT_DCB_READ_SYNC_CMD(__pcmd, __port) \ + INIT_PORT_DCB_CMD(__pcmd, __port, READ, FW_PORT_ACTION_DCB_READ_DET) + +#define INIT_PORT_DCB_WRITE_CMD(__pcmd, __port) \ + INIT_PORT_DCB_CMD(__pcmd, __port, EXEC, FW_PORT_ACTION_L2_DCB_CFG) + +#define IEEE_FAUX_SYNC(__dev, __dcb) \ + do { \ + if ((__dcb)->dcb_version == FW_PORT_DCB_VER_IEEE) \ + cxgb4_dcb_state_fsm((__dev), \ + CXGB4_DCB_STATE_FW_ALLSYNCED); \ + } while (0) + +/* States we can be in for a port's Data Center Bridging. + */ +enum cxgb4_dcb_state { + CXGB4_DCB_STATE_START, /* initial unknown state */ + CXGB4_DCB_STATE_HOST, /* we're using Host DCB (if at all) */ + CXGB4_DCB_STATE_FW_INCOMPLETE, /* using firmware DCB, incomplete */ + CXGB4_DCB_STATE_FW_ALLSYNCED, /* using firmware DCB, all sync'ed */ +}; + +/* Data Center Bridging state input for the Finite State Machine. + */ +enum cxgb4_dcb_state_input { + /* Input from the firmware. + */ + CXGB4_DCB_INPUT_FW_DISABLED, /* firmware DCB disabled */ + CXGB4_DCB_INPUT_FW_ENABLED, /* firmware DCB enabled */ + CXGB4_DCB_INPUT_FW_INCOMPLETE, /* firmware reports incomplete DCB */ + CXGB4_DCB_INPUT_FW_ALLSYNCED, /* firmware reports all sync'ed */ + +}; + +/* Firmware DCB messages that we've received so far ... + */ +enum cxgb4_dcb_fw_msgs { + CXGB4_DCB_FW_PGID = 0x01, + CXGB4_DCB_FW_PGRATE = 0x02, + CXGB4_DCB_FW_PRIORATE = 0x04, + CXGB4_DCB_FW_PFC = 0x08, + CXGB4_DCB_FW_APP_ID = 0x10, +}; + +#define CXGB4_MAX_DCBX_APP_SUPPORTED 8 + +/* Data Center Bridging support; + */ +struct port_dcb_info { + enum cxgb4_dcb_state state; /* DCB State Machine */ + enum cxgb4_dcb_fw_msgs msgs; /* DCB Firmware messages received */ + unsigned int supported; /* OS DCB capabilities supported */ + bool enabled; /* OS Enabled state */ + + /* Cached copies of DCB information sent by the firmware (in Host + * Native Endian format). + */ + u32 pgid; /* Priority Group[0..7] */ + u8 dcb_version; /* Running DCBx version */ + u8 pfcen; /* Priority Flow Control[0..7] */ + u8 pg_num_tcs_supported; /* max PG Traffic Classes */ + u8 pfc_num_tcs_supported; /* max PFC Traffic Classes */ + u8 pgrate[8]; /* Priority Group Rate[0..7] */ + u8 priorate[8]; /* Priority Rate[0..7] */ + u8 tsa[8]; /* TSA Algorithm[0..7] */ + struct app_priority { /* Application Information */ + u8 user_prio_map; /* Priority Map bitfield */ + u8 sel_field; /* Protocol ID interpretation */ + u16 protocolid; /* Protocol ID */ + } app_priority[CXGB4_MAX_DCBX_APP_SUPPORTED]; +}; + +void cxgb4_dcb_state_init(struct net_device *); +void cxgb4_dcb_version_init(struct net_device *); +void cxgb4_dcb_state_fsm(struct net_device *, enum cxgb4_dcb_state_input); +void cxgb4_dcb_handle_fw_update(struct adapter *, const struct fw_port_cmd *); +void cxgb4_dcb_set_caps(struct adapter *, const struct fw_port_cmd *); +extern const struct dcbnl_rtnl_ops cxgb4_dcb_ops; + +static inline __u8 bitswap_1(unsigned char val) +{ + return ((val & 0x80) >> 7) | + ((val & 0x40) >> 5) | + ((val & 0x20) >> 3) | + ((val & 0x10) >> 1) | + ((val & 0x08) << 1) | + ((val & 0x04) << 3) | + ((val & 0x02) << 5) | + ((val & 0x01) << 7); +} +#define CXGB4_DCB_ENABLED true + +#else /* !CONFIG_CHELSIO_T4_DCB */ + +static inline void cxgb4_dcb_state_init(struct net_device *dev) +{ +} + +#define CXGB4_DCB_ENABLED false + +#endif /* !CONFIG_CHELSIO_T4_DCB */ + +#endif /* __CXGB4_DCB_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c new file mode 100644 index 000000000..371f75e78 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -0,0 +1,2073 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4_values.h" +#include "t4fw_api.h" +#include "cxgb4_debugfs.h" +#include "clip_tbl.h" +#include "l2t.h" + +/* generic seq_file support for showing a table of size rows x width. */ +static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos) +{ + pos -= tb->skip_first; + return pos >= tb->rows ? NULL : &tb->data[pos * tb->width]; +} + +static void *seq_tab_start(struct seq_file *seq, loff_t *pos) +{ + struct seq_tab *tb = seq->private; + + if (tb->skip_first && *pos == 0) + return SEQ_START_TOKEN; + + return seq_tab_get_idx(tb, *pos); +} + +static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos) +{ + v = seq_tab_get_idx(seq->private, *pos + 1); + if (v) + ++*pos; + return v; +} + +static void seq_tab_stop(struct seq_file *seq, void *v) +{ +} + +static int seq_tab_show(struct seq_file *seq, void *v) +{ + const struct seq_tab *tb = seq->private; + + return tb->show(seq, v, ((char *)v - tb->data) / tb->width); +} + +static const struct seq_operations seq_tab_ops = { + .start = seq_tab_start, + .next = seq_tab_next, + .stop = seq_tab_stop, + .show = seq_tab_show +}; + +struct seq_tab *seq_open_tab(struct file *f, unsigned int rows, + unsigned int width, unsigned int have_header, + int (*show)(struct seq_file *seq, void *v, int i)) +{ + struct seq_tab *p; + + p = __seq_open_private(f, &seq_tab_ops, sizeof(*p) + rows * width); + if (p) { + p->show = show; + p->rows = rows; + p->width = width; + p->skip_first = have_header != 0; + } + return p; +} + +/* Trim the size of a seq_tab to the supplied number of rows. The operation is + * irreversible. + */ +static int seq_tab_trim(struct seq_tab *p, unsigned int new_rows) +{ + if (new_rows > p->rows) + return -EINVAL; + p->rows = new_rows; + return 0; +} + +static int cim_la_show(struct seq_file *seq, void *v, int idx) +{ + if (v == SEQ_START_TOKEN) + seq_puts(seq, "Status Data PC LS0Stat LS0Addr " + " LS0Data\n"); + else { + const u32 *p = v; + + seq_printf(seq, + " %02x %x%07x %x%07x %08x %08x %08x%08x%08x%08x\n", + (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, + p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], + p[6], p[7]); + } + return 0; +} + +static int cim_la_show_3in1(struct seq_file *seq, void *v, int idx) +{ + if (v == SEQ_START_TOKEN) { + seq_puts(seq, "Status Data PC\n"); + } else { + const u32 *p = v; + + seq_printf(seq, " %02x %08x %08x\n", p[5] & 0xff, p[6], + p[7]); + seq_printf(seq, " %02x %02x%06x %02x%06x\n", + (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, + p[4] & 0xff, p[5] >> 8); + seq_printf(seq, " %02x %x%07x %x%07x\n", (p[0] >> 4) & 0xff, + p[0] & 0xf, p[1] >> 4, p[1] & 0xf, p[2] >> 4); + } + return 0; +} + +static int cim_la_open(struct inode *inode, struct file *file) +{ + int ret; + unsigned int cfg; + struct seq_tab *p; + struct adapter *adap = inode->i_private; + + ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg); + if (ret) + return ret; + + p = seq_open_tab(file, adap->params.cim_la_size / 8, 8 * sizeof(u32), 1, + cfg & UPDBGLACAPTPCONLY_F ? + cim_la_show_3in1 : cim_la_show); + if (!p) + return -ENOMEM; + + ret = t4_cim_read_la(adap, (u32 *)p->data, NULL); + if (ret) + seq_release_private(inode, file); + return ret; +} + +static const struct file_operations cim_la_fops = { + .owner = THIS_MODULE, + .open = cim_la_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private +}; + +static int cim_qcfg_show(struct seq_file *seq, void *v) +{ + static const char * const qname[] = { + "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", + "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", + "SGE0-RX", "SGE1-RX" + }; + + int i; + struct adapter *adap = seq->private; + u16 base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; + u16 size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; + u32 stat[(4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5))]; + u16 thres[CIM_NUM_IBQ]; + u32 obq_wr_t4[2 * CIM_NUM_OBQ], *wr; + u32 obq_wr_t5[2 * CIM_NUM_OBQ_T5]; + u32 *p = stat; + int cim_num_obq = is_t4(adap->params.chip) ? + CIM_NUM_OBQ : CIM_NUM_OBQ_T5; + + i = t4_cim_read(adap, is_t4(adap->params.chip) ? UP_IBQ_0_RDADDR_A : + UP_IBQ_0_SHADOW_RDADDR_A, + ARRAY_SIZE(stat), stat); + if (!i) { + if (is_t4(adap->params.chip)) { + i = t4_cim_read(adap, UP_OBQ_0_REALADDR_A, + ARRAY_SIZE(obq_wr_t4), obq_wr_t4); + wr = obq_wr_t4; + } else { + i = t4_cim_read(adap, UP_OBQ_0_SHADOW_REALADDR_A, + ARRAY_SIZE(obq_wr_t5), obq_wr_t5); + wr = obq_wr_t5; + } + } + if (i) + return i; + + t4_read_cimq_cfg(adap, base, size, thres); + + seq_printf(seq, + " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail\n"); + for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) + seq_printf(seq, "%7s %5x %5u %5u %6x %4x %4u %4u %5u\n", + qname[i], base[i], size[i], thres[i], + IBQRDADDR_G(p[0]), IBQWRADDR_G(p[1]), + QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]), + QUEREMFLITS_G(p[2]) * 16); + for ( ; i < CIM_NUM_IBQ + cim_num_obq; i++, p += 4, wr += 2) + seq_printf(seq, "%7s %5x %5u %12x %4x %4u %4u %5u\n", + qname[i], base[i], size[i], + QUERDADDR_G(p[0]) & 0x3fff, wr[0] - base[i], + QUESOPCNT_G(p[3]), QUEEOPCNT_G(p[3]), + QUEREMFLITS_G(p[2]) * 16); + return 0; +} + +static int cim_qcfg_open(struct inode *inode, struct file *file) +{ + return single_open(file, cim_qcfg_show, inode->i_private); +} + +static const struct file_operations cim_qcfg_fops = { + .owner = THIS_MODULE, + .open = cim_qcfg_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int cimq_show(struct seq_file *seq, void *v, int idx) +{ + const u32 *p = v; + + seq_printf(seq, "%#06x: %08x %08x %08x %08x\n", idx * 16, p[0], p[1], + p[2], p[3]); + return 0; +} + +static int cim_ibq_open(struct inode *inode, struct file *file) +{ + int ret; + struct seq_tab *p; + unsigned int qid = (uintptr_t)inode->i_private & 7; + struct adapter *adap = inode->i_private - qid; + + p = seq_open_tab(file, CIM_IBQ_SIZE, 4 * sizeof(u32), 0, cimq_show); + if (!p) + return -ENOMEM; + + ret = t4_read_cim_ibq(adap, qid, (u32 *)p->data, CIM_IBQ_SIZE * 4); + if (ret < 0) + seq_release_private(inode, file); + else + ret = 0; + return ret; +} + +static const struct file_operations cim_ibq_fops = { + .owner = THIS_MODULE, + .open = cim_ibq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private +}; + +static int cim_obq_open(struct inode *inode, struct file *file) +{ + int ret; + struct seq_tab *p; + unsigned int qid = (uintptr_t)inode->i_private & 7; + struct adapter *adap = inode->i_private - qid; + + p = seq_open_tab(file, 6 * CIM_OBQ_SIZE, 4 * sizeof(u32), 0, cimq_show); + if (!p) + return -ENOMEM; + + ret = t4_read_cim_obq(adap, qid, (u32 *)p->data, 6 * CIM_OBQ_SIZE * 4); + if (ret < 0) { + seq_release_private(inode, file); + } else { + seq_tab_trim(p, ret / 4); + ret = 0; + } + return ret; +} + +static const struct file_operations cim_obq_fops = { + .owner = THIS_MODULE, + .open = cim_obq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private +}; + +struct field_desc { + const char *name; + unsigned int start; + unsigned int width; +}; + +static void field_desc_show(struct seq_file *seq, u64 v, + const struct field_desc *p) +{ + char buf[32]; + int line_size = 0; + + while (p->name) { + u64 mask = (1ULL << p->width) - 1; + int len = scnprintf(buf, sizeof(buf), "%s: %llu", p->name, + ((unsigned long long)v >> p->start) & mask); + + if (line_size + len >= 79) { + line_size = 8; + seq_puts(seq, "\n "); + } + seq_printf(seq, "%s ", buf); + line_size += len + 1; + p++; + } + seq_putc(seq, '\n'); +} + +static struct field_desc tp_la0[] = { + { "RcfOpCodeOut", 60, 4 }, + { "State", 56, 4 }, + { "WcfState", 52, 4 }, + { "RcfOpcSrcOut", 50, 2 }, + { "CRxError", 49, 1 }, + { "ERxError", 48, 1 }, + { "SanityFailed", 47, 1 }, + { "SpuriousMsg", 46, 1 }, + { "FlushInputMsg", 45, 1 }, + { "FlushInputCpl", 44, 1 }, + { "RssUpBit", 43, 1 }, + { "RssFilterHit", 42, 1 }, + { "Tid", 32, 10 }, + { "InitTcb", 31, 1 }, + { "LineNumber", 24, 7 }, + { "Emsg", 23, 1 }, + { "EdataOut", 22, 1 }, + { "Cmsg", 21, 1 }, + { "CdataOut", 20, 1 }, + { "EreadPdu", 19, 1 }, + { "CreadPdu", 18, 1 }, + { "TunnelPkt", 17, 1 }, + { "RcfPeerFin", 16, 1 }, + { "RcfReasonOut", 12, 4 }, + { "TxCchannel", 10, 2 }, + { "RcfTxChannel", 8, 2 }, + { "RxEchannel", 6, 2 }, + { "RcfRxChannel", 5, 1 }, + { "RcfDataOutSrdy", 4, 1 }, + { "RxDvld", 3, 1 }, + { "RxOoDvld", 2, 1 }, + { "RxCongestion", 1, 1 }, + { "TxCongestion", 0, 1 }, + { NULL } +}; + +static int tp_la_show(struct seq_file *seq, void *v, int idx) +{ + const u64 *p = v; + + field_desc_show(seq, *p, tp_la0); + return 0; +} + +static int tp_la_show2(struct seq_file *seq, void *v, int idx) +{ + const u64 *p = v; + + if (idx) + seq_putc(seq, '\n'); + field_desc_show(seq, p[0], tp_la0); + if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) + field_desc_show(seq, p[1], tp_la0); + return 0; +} + +static int tp_la_show3(struct seq_file *seq, void *v, int idx) +{ + static struct field_desc tp_la1[] = { + { "CplCmdIn", 56, 8 }, + { "CplCmdOut", 48, 8 }, + { "ESynOut", 47, 1 }, + { "EAckOut", 46, 1 }, + { "EFinOut", 45, 1 }, + { "ERstOut", 44, 1 }, + { "SynIn", 43, 1 }, + { "AckIn", 42, 1 }, + { "FinIn", 41, 1 }, + { "RstIn", 40, 1 }, + { "DataIn", 39, 1 }, + { "DataInVld", 38, 1 }, + { "PadIn", 37, 1 }, + { "RxBufEmpty", 36, 1 }, + { "RxDdp", 35, 1 }, + { "RxFbCongestion", 34, 1 }, + { "TxFbCongestion", 33, 1 }, + { "TxPktSumSrdy", 32, 1 }, + { "RcfUlpType", 28, 4 }, + { "Eread", 27, 1 }, + { "Ebypass", 26, 1 }, + { "Esave", 25, 1 }, + { "Static0", 24, 1 }, + { "Cread", 23, 1 }, + { "Cbypass", 22, 1 }, + { "Csave", 21, 1 }, + { "CPktOut", 20, 1 }, + { "RxPagePoolFull", 18, 2 }, + { "RxLpbkPkt", 17, 1 }, + { "TxLpbkPkt", 16, 1 }, + { "RxVfValid", 15, 1 }, + { "SynLearned", 14, 1 }, + { "SetDelEntry", 13, 1 }, + { "SetInvEntry", 12, 1 }, + { "CpcmdDvld", 11, 1 }, + { "CpcmdSave", 10, 1 }, + { "RxPstructsFull", 8, 2 }, + { "EpcmdDvld", 7, 1 }, + { "EpcmdFlush", 6, 1 }, + { "EpcmdTrimPrefix", 5, 1 }, + { "EpcmdTrimPostfix", 4, 1 }, + { "ERssIp4Pkt", 3, 1 }, + { "ERssIp6Pkt", 2, 1 }, + { "ERssTcpUdpPkt", 1, 1 }, + { "ERssFceFipPkt", 0, 1 }, + { NULL } + }; + static struct field_desc tp_la2[] = { + { "CplCmdIn", 56, 8 }, + { "MpsVfVld", 55, 1 }, + { "MpsPf", 52, 3 }, + { "MpsVf", 44, 8 }, + { "SynIn", 43, 1 }, + { "AckIn", 42, 1 }, + { "FinIn", 41, 1 }, + { "RstIn", 40, 1 }, + { "DataIn", 39, 1 }, + { "DataInVld", 38, 1 }, + { "PadIn", 37, 1 }, + { "RxBufEmpty", 36, 1 }, + { "RxDdp", 35, 1 }, + { "RxFbCongestion", 34, 1 }, + { "TxFbCongestion", 33, 1 }, + { "TxPktSumSrdy", 32, 1 }, + { "RcfUlpType", 28, 4 }, + { "Eread", 27, 1 }, + { "Ebypass", 26, 1 }, + { "Esave", 25, 1 }, + { "Static0", 24, 1 }, + { "Cread", 23, 1 }, + { "Cbypass", 22, 1 }, + { "Csave", 21, 1 }, + { "CPktOut", 20, 1 }, + { "RxPagePoolFull", 18, 2 }, + { "RxLpbkPkt", 17, 1 }, + { "TxLpbkPkt", 16, 1 }, + { "RxVfValid", 15, 1 }, + { "SynLearned", 14, 1 }, + { "SetDelEntry", 13, 1 }, + { "SetInvEntry", 12, 1 }, + { "CpcmdDvld", 11, 1 }, + { "CpcmdSave", 10, 1 }, + { "RxPstructsFull", 8, 2 }, + { "EpcmdDvld", 7, 1 }, + { "EpcmdFlush", 6, 1 }, + { "EpcmdTrimPrefix", 5, 1 }, + { "EpcmdTrimPostfix", 4, 1 }, + { "ERssIp4Pkt", 3, 1 }, + { "ERssIp6Pkt", 2, 1 }, + { "ERssTcpUdpPkt", 1, 1 }, + { "ERssFceFipPkt", 0, 1 }, + { NULL } + }; + const u64 *p = v; + + if (idx) + seq_putc(seq, '\n'); + field_desc_show(seq, p[0], tp_la0); + if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) + field_desc_show(seq, p[1], (p[0] & BIT(17)) ? tp_la2 : tp_la1); + return 0; +} + +static int tp_la_open(struct inode *inode, struct file *file) +{ + struct seq_tab *p; + struct adapter *adap = inode->i_private; + + switch (DBGLAMODE_G(t4_read_reg(adap, TP_DBG_LA_CONFIG_A))) { + case 2: + p = seq_open_tab(file, TPLA_SIZE / 2, 2 * sizeof(u64), 0, + tp_la_show2); + break; + case 3: + p = seq_open_tab(file, TPLA_SIZE / 2, 2 * sizeof(u64), 0, + tp_la_show3); + break; + default: + p = seq_open_tab(file, TPLA_SIZE, sizeof(u64), 0, tp_la_show); + } + if (!p) + return -ENOMEM; + + t4_tp_read_la(adap, (u64 *)p->data, NULL); + return 0; +} + +static ssize_t tp_la_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + int err; + char s[32]; + unsigned long val; + size_t size = min(sizeof(s) - 1, count); + struct adapter *adap = file_inode(file)->i_private; + + if (copy_from_user(s, buf, size)) + return -EFAULT; + s[size] = '\0'; + err = kstrtoul(s, 0, &val); + if (err) + return err; + if (val > 0xffff) + return -EINVAL; + adap->params.tp.la_mask = val << 16; + t4_set_reg_field(adap, TP_DBG_LA_CONFIG_A, 0xffff0000U, + adap->params.tp.la_mask); + return count; +} + +static const struct file_operations tp_la_fops = { + .owner = THIS_MODULE, + .open = tp_la_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, + .write = tp_la_write +}; + +static int ulprx_la_show(struct seq_file *seq, void *v, int idx) +{ + const u32 *p = v; + + if (v == SEQ_START_TOKEN) + seq_puts(seq, " Pcmd Type Message" + " Data\n"); + else + seq_printf(seq, "%08x%08x %4x %08x %08x%08x%08x%08x\n", + p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); + return 0; +} + +static int ulprx_la_open(struct inode *inode, struct file *file) +{ + struct seq_tab *p; + struct adapter *adap = inode->i_private; + + p = seq_open_tab(file, ULPRX_LA_SIZE, 8 * sizeof(u32), 1, + ulprx_la_show); + if (!p) + return -ENOMEM; + + t4_ulprx_read_la(adap, (u32 *)p->data); + return 0; +} + +static const struct file_operations ulprx_la_fops = { + .owner = THIS_MODULE, + .open = ulprx_la_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private +}; + +/* Show the PM memory stats. These stats include: + * + * TX: + * Read: memory read operation + * Write Bypass: cut-through + * Bypass + mem: cut-through and save copy + * + * RX: + * Read: memory read + * Write Bypass: cut-through + * Flush: payload trim or drop + */ +static int pm_stats_show(struct seq_file *seq, void *v) +{ + static const char * const tx_pm_stats[] = { + "Read:", "Write bypass:", "Write mem:", "Bypass + mem:" + }; + static const char * const rx_pm_stats[] = { + "Read:", "Write bypass:", "Write mem:", "Flush:" + }; + + int i; + u32 tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS]; + u64 tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS]; + struct adapter *adap = seq->private; + + t4_pmtx_get_stats(adap, tx_cnt, tx_cyc); + t4_pmrx_get_stats(adap, rx_cnt, rx_cyc); + + seq_printf(seq, "%13s %10s %20s\n", " ", "Tx pcmds", "Tx bytes"); + for (i = 0; i < PM_NSTATS - 1; i++) + seq_printf(seq, "%-13s %10u %20llu\n", + tx_pm_stats[i], tx_cnt[i], tx_cyc[i]); + + seq_printf(seq, "%13s %10s %20s\n", " ", "Rx pcmds", "Rx bytes"); + for (i = 0; i < PM_NSTATS - 1; i++) + seq_printf(seq, "%-13s %10u %20llu\n", + rx_pm_stats[i], rx_cnt[i], rx_cyc[i]); + return 0; +} + +static int pm_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, pm_stats_show, inode->i_private); +} + +static ssize_t pm_stats_clear(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + struct adapter *adap = file_inode(file)->i_private; + + t4_write_reg(adap, PM_RX_STAT_CONFIG_A, 0); + t4_write_reg(adap, PM_TX_STAT_CONFIG_A, 0); + return count; +} + +static const struct file_operations pm_stats_debugfs_fops = { + .owner = THIS_MODULE, + .open = pm_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = pm_stats_clear +}; + +static int cctrl_tbl_show(struct seq_file *seq, void *v) +{ + static const char * const dec_fac[] = { + "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", + "0.9375" }; + + int i; + u16 (*incr)[NCCTRL_WIN]; + struct adapter *adap = seq->private; + + incr = kmalloc(sizeof(*incr) * NMTUS, GFP_KERNEL); + if (!incr) + return -ENOMEM; + + t4_read_cong_tbl(adap, incr); + + for (i = 0; i < NCCTRL_WIN; ++i) { + seq_printf(seq, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, + incr[0][i], incr[1][i], incr[2][i], incr[3][i], + incr[4][i], incr[5][i], incr[6][i], incr[7][i]); + seq_printf(seq, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", + incr[8][i], incr[9][i], incr[10][i], incr[11][i], + incr[12][i], incr[13][i], incr[14][i], incr[15][i], + adap->params.a_wnd[i], + dec_fac[adap->params.b_wnd[i]]); + } + + kfree(incr); + return 0; +} + +DEFINE_SIMPLE_DEBUGFS_FILE(cctrl_tbl); + +/* Format a value in a unit that differs from the value's native unit by the + * given factor. + */ +static char *unit_conv(char *buf, size_t len, unsigned int val, + unsigned int factor) +{ + unsigned int rem = val % factor; + + if (rem == 0) { + snprintf(buf, len, "%u", val / factor); + } else { + while (rem % 10 == 0) + rem /= 10; + snprintf(buf, len, "%u.%u", val / factor, rem); + } + return buf; +} + +static int clk_show(struct seq_file *seq, void *v) +{ + char buf[32]; + struct adapter *adap = seq->private; + unsigned int cclk_ps = 1000000000 / adap->params.vpd.cclk; /* in ps */ + u32 res = t4_read_reg(adap, TP_TIMER_RESOLUTION_A); + unsigned int tre = TIMERRESOLUTION_G(res); + unsigned int dack_re = DELAYEDACKRESOLUTION_G(res); + unsigned long long tp_tick_us = (cclk_ps << tre) / 1000000; /* in us */ + + seq_printf(seq, "Core clock period: %s ns\n", + unit_conv(buf, sizeof(buf), cclk_ps, 1000)); + seq_printf(seq, "TP timer tick: %s us\n", + unit_conv(buf, sizeof(buf), (cclk_ps << tre), 1000000)); + seq_printf(seq, "TCP timestamp tick: %s us\n", + unit_conv(buf, sizeof(buf), + (cclk_ps << TIMESTAMPRESOLUTION_G(res)), 1000000)); + seq_printf(seq, "DACK tick: %s us\n", + unit_conv(buf, sizeof(buf), (cclk_ps << dack_re), 1000000)); + seq_printf(seq, "DACK timer: %u us\n", + ((cclk_ps << dack_re) / 1000000) * + t4_read_reg(adap, TP_DACK_TIMER_A)); + seq_printf(seq, "Retransmit min: %llu us\n", + tp_tick_us * t4_read_reg(adap, TP_RXT_MIN_A)); + seq_printf(seq, "Retransmit max: %llu us\n", + tp_tick_us * t4_read_reg(adap, TP_RXT_MAX_A)); + seq_printf(seq, "Persist timer min: %llu us\n", + tp_tick_us * t4_read_reg(adap, TP_PERS_MIN_A)); + seq_printf(seq, "Persist timer max: %llu us\n", + tp_tick_us * t4_read_reg(adap, TP_PERS_MAX_A)); + seq_printf(seq, "Keepalive idle timer: %llu us\n", + tp_tick_us * t4_read_reg(adap, TP_KEEP_IDLE_A)); + seq_printf(seq, "Keepalive interval: %llu us\n", + tp_tick_us * t4_read_reg(adap, TP_KEEP_INTVL_A)); + seq_printf(seq, "Initial SRTT: %llu us\n", + tp_tick_us * INITSRTT_G(t4_read_reg(adap, TP_INIT_SRTT_A))); + seq_printf(seq, "FINWAIT2 timer: %llu us\n", + tp_tick_us * t4_read_reg(adap, TP_FINWAIT2_TIMER_A)); + + return 0; +} + +DEFINE_SIMPLE_DEBUGFS_FILE(clk); + +/* Firmware Device Log dump. */ +static const char * const devlog_level_strings[] = { + [FW_DEVLOG_LEVEL_EMERG] = "EMERG", + [FW_DEVLOG_LEVEL_CRIT] = "CRIT", + [FW_DEVLOG_LEVEL_ERR] = "ERR", + [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", + [FW_DEVLOG_LEVEL_INFO] = "INFO", + [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" +}; + +static const char * const devlog_facility_strings[] = { + [FW_DEVLOG_FACILITY_CORE] = "CORE", + [FW_DEVLOG_FACILITY_SCHED] = "SCHED", + [FW_DEVLOG_FACILITY_TIMER] = "TIMER", + [FW_DEVLOG_FACILITY_RES] = "RES", + [FW_DEVLOG_FACILITY_HW] = "HW", + [FW_DEVLOG_FACILITY_FLR] = "FLR", + [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", + [FW_DEVLOG_FACILITY_PHY] = "PHY", + [FW_DEVLOG_FACILITY_MAC] = "MAC", + [FW_DEVLOG_FACILITY_PORT] = "PORT", + [FW_DEVLOG_FACILITY_VI] = "VI", + [FW_DEVLOG_FACILITY_FILTER] = "FILTER", + [FW_DEVLOG_FACILITY_ACL] = "ACL", + [FW_DEVLOG_FACILITY_TM] = "TM", + [FW_DEVLOG_FACILITY_QFC] = "QFC", + [FW_DEVLOG_FACILITY_DCB] = "DCB", + [FW_DEVLOG_FACILITY_ETH] = "ETH", + [FW_DEVLOG_FACILITY_OFLD] = "OFLD", + [FW_DEVLOG_FACILITY_RI] = "RI", + [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", + [FW_DEVLOG_FACILITY_FCOE] = "FCOE", + [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", + [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE" +}; + +/* Information gathered by Device Log Open routine for the display routine. + */ +struct devlog_info { + unsigned int nentries; /* number of entries in log[] */ + unsigned int first; /* first [temporal] entry in log[] */ + struct fw_devlog_e log[0]; /* Firmware Device Log */ +}; + +/* Dump a Firmaware Device Log entry. + */ +static int devlog_show(struct seq_file *seq, void *v) +{ + if (v == SEQ_START_TOKEN) + seq_printf(seq, "%10s %15s %8s %8s %s\n", + "Seq#", "Tstamp", "Level", "Facility", "Message"); + else { + struct devlog_info *dinfo = seq->private; + int fidx = (uintptr_t)v - 2; + unsigned long index; + struct fw_devlog_e *e; + + /* Get a pointer to the log entry to display. Skip unused log + * entries. + */ + index = dinfo->first + fidx; + if (index >= dinfo->nentries) + index -= dinfo->nentries; + e = &dinfo->log[index]; + if (e->timestamp == 0) + return 0; + + /* Print the message. This depends on the firmware using + * exactly the same formating strings as the kernel so we may + * eventually have to put a format interpreter in here ... + */ + seq_printf(seq, "%10d %15llu %8s %8s ", + e->seqno, e->timestamp, + (e->level < ARRAY_SIZE(devlog_level_strings) + ? devlog_level_strings[e->level] + : "UNKNOWN"), + (e->facility < ARRAY_SIZE(devlog_facility_strings) + ? devlog_facility_strings[e->facility] + : "UNKNOWN")); + seq_printf(seq, e->fmt, e->params[0], e->params[1], + e->params[2], e->params[3], e->params[4], + e->params[5], e->params[6], e->params[7]); + } + return 0; +} + +/* Sequential File Operations for Device Log. + */ +static inline void *devlog_get_idx(struct devlog_info *dinfo, loff_t pos) +{ + if (pos > dinfo->nentries) + return NULL; + + return (void *)(uintptr_t)(pos + 1); +} + +static void *devlog_start(struct seq_file *seq, loff_t *pos) +{ + struct devlog_info *dinfo = seq->private; + + return (*pos + ? devlog_get_idx(dinfo, *pos) + : SEQ_START_TOKEN); +} + +static void *devlog_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct devlog_info *dinfo = seq->private; + + (*pos)++; + return devlog_get_idx(dinfo, *pos); +} + +static void devlog_stop(struct seq_file *seq, void *v) +{ +} + +static const struct seq_operations devlog_seq_ops = { + .start = devlog_start, + .next = devlog_next, + .stop = devlog_stop, + .show = devlog_show +}; + +/* Set up for reading the firmware's device log. We read the entire log here + * and then display it incrementally in devlog_show(). + */ +static int devlog_open(struct inode *inode, struct file *file) +{ + struct adapter *adap = inode->i_private; + struct devlog_params *dparams = &adap->params.devlog; + struct devlog_info *dinfo; + unsigned int index; + u32 fseqno; + int ret; + + /* If we don't know where the log is we can't do anything. + */ + if (dparams->start == 0) + return -ENXIO; + + /* Allocate the space to read in the firmware's device log and set up + * for the iterated call to our display function. + */ + dinfo = __seq_open_private(file, &devlog_seq_ops, + sizeof(*dinfo) + dparams->size); + if (!dinfo) + return -ENOMEM; + + /* Record the basic log buffer information and read in the raw log. + */ + dinfo->nentries = (dparams->size / sizeof(struct fw_devlog_e)); + dinfo->first = 0; + spin_lock(&adap->win0_lock); + ret = t4_memory_rw(adap, adap->params.drv_memwin, dparams->memtype, + dparams->start, dparams->size, (__be32 *)dinfo->log, + T4_MEMORY_READ); + spin_unlock(&adap->win0_lock); + if (ret) { + seq_release_private(inode, file); + return ret; + } + + /* Translate log multi-byte integral elements into host native format + * and determine where the first entry in the log is. + */ + for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) { + struct fw_devlog_e *e = &dinfo->log[index]; + int i; + __u32 seqno; + + if (e->timestamp == 0) + continue; + + e->timestamp = (__force __be64)be64_to_cpu(e->timestamp); + seqno = be32_to_cpu(e->seqno); + for (i = 0; i < 8; i++) + e->params[i] = + (__force __be32)be32_to_cpu(e->params[i]); + + if (seqno < fseqno) { + fseqno = seqno; + dinfo->first = index; + } + } + return 0; +} + +static const struct file_operations devlog_fops = { + .owner = THIS_MODULE, + .open = devlog_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private +}; + +static int mbox_show(struct seq_file *seq, void *v) +{ + static const char * const owner[] = { "none", "FW", "driver", + "unknown" }; + + int i; + unsigned int mbox = (uintptr_t)seq->private & 7; + struct adapter *adap = seq->private - mbox; + void __iomem *addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A); + unsigned int ctrl_reg = (is_t4(adap->params.chip) + ? CIM_PF_MAILBOX_CTRL_A + : CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A); + void __iomem *ctrl = adap->regs + PF_REG(mbox, ctrl_reg); + + i = MBOWNER_G(readl(ctrl)); + seq_printf(seq, "mailbox owned by %s\n\n", owner[i]); + + for (i = 0; i < MBOX_LEN; i += 8) + seq_printf(seq, "%016llx\n", + (unsigned long long)readq(addr + i)); + return 0; +} + +static int mbox_open(struct inode *inode, struct file *file) +{ + return single_open(file, mbox_show, inode->i_private); +} + +static ssize_t mbox_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + int i; + char c = '\n', s[256]; + unsigned long long data[8]; + const struct inode *ino; + unsigned int mbox; + struct adapter *adap; + void __iomem *addr; + void __iomem *ctrl; + + if (count > sizeof(s) - 1 || !count) + return -EINVAL; + if (copy_from_user(s, buf, count)) + return -EFAULT; + s[count] = '\0'; + + if (sscanf(s, "%llx %llx %llx %llx %llx %llx %llx %llx%c", &data[0], + &data[1], &data[2], &data[3], &data[4], &data[5], &data[6], + &data[7], &c) < 8 || c != '\n') + return -EINVAL; + + ino = file_inode(file); + mbox = (uintptr_t)ino->i_private & 7; + adap = ino->i_private - mbox; + addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A); + ctrl = addr + MBOX_LEN; + + if (MBOWNER_G(readl(ctrl)) != X_MBOWNER_PL) + return -EBUSY; + + for (i = 0; i < 8; i++) + writeq(data[i], addr + 8 * i); + + writel(MBMSGVALID_F | MBOWNER_V(X_MBOWNER_FW), ctrl); + return count; +} + +static const struct file_operations mbox_debugfs_fops = { + .owner = THIS_MODULE, + .open = mbox_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = mbox_write +}; + +static ssize_t flash_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + loff_t pos = *ppos; + loff_t avail = file_inode(file)->i_size; + struct adapter *adap = file->private_data; + + if (pos < 0) + return -EINVAL; + if (pos >= avail) + return 0; + if (count > avail - pos) + count = avail - pos; + + while (count) { + size_t len; + int ret, ofst; + u8 data[256]; + + ofst = pos & 3; + len = min(count + ofst, sizeof(data)); + ret = t4_read_flash(adap, pos - ofst, (len + 3) / 4, + (u32 *)data, 1); + if (ret) + return ret; + + len -= ofst; + if (copy_to_user(buf, data + ofst, len)) + return -EFAULT; + + buf += len; + pos += len; + count -= len; + } + count = pos - *ppos; + *ppos = pos; + return count; +} + +static const struct file_operations flash_debugfs_fops = { + .owner = THIS_MODULE, + .open = mem_open, + .read = flash_read, +}; + +static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask) +{ + *mask = x | y; + y = (__force u64)cpu_to_be64(y); + memcpy(addr, (char *)&y + 2, ETH_ALEN); +} + +static int mps_tcam_show(struct seq_file *seq, void *v) +{ + if (v == SEQ_START_TOKEN) + seq_puts(seq, "Idx Ethernet address Mask Vld Ports PF" + " VF Replication " + "P0 P1 P2 P3 ML\n"); + else { + u64 mask; + u8 addr[ETH_ALEN]; + struct adapter *adap = seq->private; + unsigned int idx = (uintptr_t)v - 2; + u64 tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx)); + u64 tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx)); + u32 cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx)); + u32 cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx)); + u32 rplc[4] = {0, 0, 0, 0}; + + if (tcamx & tcamy) { + seq_printf(seq, "%3u -\n", idx); + goto out; + } + + if (cls_lo & REPLICATE_F) { + struct fw_ldst_cmd ldst_cmd; + int ret; + + memset(&ldst_cmd, 0, sizeof(ldst_cmd)); + ldst_cmd.op_to_addrspace = + htonl(FW_CMD_OP_V(FW_LDST_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_LDST_CMD_ADDRSPACE_V( + FW_LDST_ADDRSPC_MPS)); + ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd)); + ldst_cmd.u.mps.fid_ctl = + htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) | + FW_LDST_CMD_CTL_V(idx)); + ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, + sizeof(ldst_cmd), &ldst_cmd); + if (ret) + dev_warn(adap->pdev_dev, "Can't read MPS " + "replication map for idx %d: %d\n", + idx, -ret); + else { + rplc[0] = ntohl(ldst_cmd.u.mps.rplc31_0); + rplc[1] = ntohl(ldst_cmd.u.mps.rplc63_32); + rplc[2] = ntohl(ldst_cmd.u.mps.rplc95_64); + rplc[3] = ntohl(ldst_cmd.u.mps.rplc127_96); + } + } + + tcamxy2valmask(tcamx, tcamy, addr, &mask); + seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x %012llx" + "%3c %#x%4u%4d", + idx, addr[0], addr[1], addr[2], addr[3], addr[4], + addr[5], (unsigned long long)mask, + (cls_lo & SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi), + PF_G(cls_lo), + (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1); + if (cls_lo & REPLICATE_F) + seq_printf(seq, " %08x %08x %08x %08x", + rplc[3], rplc[2], rplc[1], rplc[0]); + else + seq_printf(seq, "%36c", ' '); + seq_printf(seq, "%4u%3u%3u%3u %#x\n", + SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo), + SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo), + (cls_lo >> MULTILISTEN0_S) & 0xf); + } +out: return 0; +} + +static inline void *mps_tcam_get_idx(struct seq_file *seq, loff_t pos) +{ + struct adapter *adap = seq->private; + int max_mac_addr = is_t4(adap->params.chip) ? + NUM_MPS_CLS_SRAM_L_INSTANCES : + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + return ((pos <= max_mac_addr) ? (void *)(uintptr_t)(pos + 1) : NULL); +} + +static void *mps_tcam_start(struct seq_file *seq, loff_t *pos) +{ + return *pos ? mps_tcam_get_idx(seq, *pos) : SEQ_START_TOKEN; +} + +static void *mps_tcam_next(struct seq_file *seq, void *v, loff_t *pos) +{ + ++*pos; + return mps_tcam_get_idx(seq, *pos); +} + +static void mps_tcam_stop(struct seq_file *seq, void *v) +{ +} + +static const struct seq_operations mps_tcam_seq_ops = { + .start = mps_tcam_start, + .next = mps_tcam_next, + .stop = mps_tcam_stop, + .show = mps_tcam_show +}; + +static int mps_tcam_open(struct inode *inode, struct file *file) +{ + int res = seq_open(file, &mps_tcam_seq_ops); + + if (!res) { + struct seq_file *seq = file->private_data; + + seq->private = inode->i_private; + } + return res; +} + +static const struct file_operations mps_tcam_debugfs_fops = { + .owner = THIS_MODULE, + .open = mps_tcam_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* Display various sensor information. + */ +static int sensors_show(struct seq_file *seq, void *v) +{ + struct adapter *adap = seq->private; + u32 param[7], val[7]; + int ret; + + /* Note that if the sensors haven't been initialized and turned on + * we'll get values of 0, so treat those as "" ... + */ + param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) | + FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_TMP)); + param[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) | + FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_VDD)); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, + param, val); + + if (ret < 0 || val[0] == 0) + seq_puts(seq, "Temperature: \n"); + else + seq_printf(seq, "Temperature: %dC\n", val[0]); + + if (ret < 0 || val[1] == 0) + seq_puts(seq, "Core VDD: \n"); + else + seq_printf(seq, "Core VDD: %dmV\n", val[1]); + + return 0; +} + +DEFINE_SIMPLE_DEBUGFS_FILE(sensors); + +#if IS_ENABLED(CONFIG_IPV6) +static int clip_tbl_open(struct inode *inode, struct file *file) +{ + return single_open(file, clip_tbl_show, inode->i_private); +} + +static const struct file_operations clip_tbl_debugfs_fops = { + .owner = THIS_MODULE, + .open = clip_tbl_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; +#endif + +/*RSS Table. + */ + +static int rss_show(struct seq_file *seq, void *v, int idx) +{ + u16 *entry = v; + + seq_printf(seq, "%4d: %4u %4u %4u %4u %4u %4u %4u %4u\n", + idx * 8, entry[0], entry[1], entry[2], entry[3], entry[4], + entry[5], entry[6], entry[7]); + return 0; +} + +static int rss_open(struct inode *inode, struct file *file) +{ + int ret; + struct seq_tab *p; + struct adapter *adap = inode->i_private; + + p = seq_open_tab(file, RSS_NENTRIES / 8, 8 * sizeof(u16), 0, rss_show); + if (!p) + return -ENOMEM; + + ret = t4_read_rss(adap, (u16 *)p->data); + if (ret) + seq_release_private(inode, file); + + return ret; +} + +static const struct file_operations rss_debugfs_fops = { + .owner = THIS_MODULE, + .open = rss_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private +}; + +/* RSS Configuration. + */ + +/* Small utility function to return the strings "yes" or "no" if the supplied + * argument is non-zero. + */ +static const char *yesno(int x) +{ + static const char *yes = "yes"; + static const char *no = "no"; + + return x ? yes : no; +} + +static int rss_config_show(struct seq_file *seq, void *v) +{ + struct adapter *adapter = seq->private; + static const char * const keymode[] = { + "global", + "global and per-VF scramble", + "per-PF and per-VF scramble", + "per-VF and per-VF scramble", + }; + u32 rssconf; + + rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_A); + seq_printf(seq, "TP_RSS_CONFIG: %#x\n", rssconf); + seq_printf(seq, " Tnl4TupEnIpv6: %3s\n", yesno(rssconf & + TNL4TUPENIPV6_F)); + seq_printf(seq, " Tnl2TupEnIpv6: %3s\n", yesno(rssconf & + TNL2TUPENIPV6_F)); + seq_printf(seq, " Tnl4TupEnIpv4: %3s\n", yesno(rssconf & + TNL4TUPENIPV4_F)); + seq_printf(seq, " Tnl2TupEnIpv4: %3s\n", yesno(rssconf & + TNL2TUPENIPV4_F)); + seq_printf(seq, " TnlTcpSel: %3s\n", yesno(rssconf & TNLTCPSEL_F)); + seq_printf(seq, " TnlIp6Sel: %3s\n", yesno(rssconf & TNLIP6SEL_F)); + seq_printf(seq, " TnlVrtSel: %3s\n", yesno(rssconf & TNLVRTSEL_F)); + seq_printf(seq, " TnlMapEn: %3s\n", yesno(rssconf & TNLMAPEN_F)); + seq_printf(seq, " OfdHashSave: %3s\n", yesno(rssconf & + OFDHASHSAVE_F)); + seq_printf(seq, " OfdVrtSel: %3s\n", yesno(rssconf & OFDVRTSEL_F)); + seq_printf(seq, " OfdMapEn: %3s\n", yesno(rssconf & OFDMAPEN_F)); + seq_printf(seq, " OfdLkpEn: %3s\n", yesno(rssconf & OFDLKPEN_F)); + seq_printf(seq, " Syn4TupEnIpv6: %3s\n", yesno(rssconf & + SYN4TUPENIPV6_F)); + seq_printf(seq, " Syn2TupEnIpv6: %3s\n", yesno(rssconf & + SYN2TUPENIPV6_F)); + seq_printf(seq, " Syn4TupEnIpv4: %3s\n", yesno(rssconf & + SYN4TUPENIPV4_F)); + seq_printf(seq, " Syn2TupEnIpv4: %3s\n", yesno(rssconf & + SYN2TUPENIPV4_F)); + seq_printf(seq, " Syn4TupEnIpv6: %3s\n", yesno(rssconf & + SYN4TUPENIPV6_F)); + seq_printf(seq, " SynIp6Sel: %3s\n", yesno(rssconf & SYNIP6SEL_F)); + seq_printf(seq, " SynVrt6Sel: %3s\n", yesno(rssconf & SYNVRTSEL_F)); + seq_printf(seq, " SynMapEn: %3s\n", yesno(rssconf & SYNMAPEN_F)); + seq_printf(seq, " SynLkpEn: %3s\n", yesno(rssconf & SYNLKPEN_F)); + seq_printf(seq, " ChnEn: %3s\n", yesno(rssconf & + CHANNELENABLE_F)); + seq_printf(seq, " PrtEn: %3s\n", yesno(rssconf & + PORTENABLE_F)); + seq_printf(seq, " TnlAllLkp: %3s\n", yesno(rssconf & + TNLALLLOOKUP_F)); + seq_printf(seq, " VrtEn: %3s\n", yesno(rssconf & + VIRTENABLE_F)); + seq_printf(seq, " CngEn: %3s\n", yesno(rssconf & + CONGESTIONENABLE_F)); + seq_printf(seq, " HashToeplitz: %3s\n", yesno(rssconf & + HASHTOEPLITZ_F)); + seq_printf(seq, " Udp4En: %3s\n", yesno(rssconf & UDPENABLE_F)); + seq_printf(seq, " Disable: %3s\n", yesno(rssconf & DISABLE_F)); + + seq_puts(seq, "\n"); + + rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_TNL_A); + seq_printf(seq, "TP_RSS_CONFIG_TNL: %#x\n", rssconf); + seq_printf(seq, " MaskSize: %3d\n", MASKSIZE_G(rssconf)); + seq_printf(seq, " MaskFilter: %3d\n", MASKFILTER_G(rssconf)); + if (CHELSIO_CHIP_VERSION(adapter->params.chip) > CHELSIO_T5) { + seq_printf(seq, " HashAll: %3s\n", + yesno(rssconf & HASHALL_F)); + seq_printf(seq, " HashEth: %3s\n", + yesno(rssconf & HASHETH_F)); + } + seq_printf(seq, " UseWireCh: %3s\n", yesno(rssconf & USEWIRECH_F)); + + seq_puts(seq, "\n"); + + rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_OFD_A); + seq_printf(seq, "TP_RSS_CONFIG_OFD: %#x\n", rssconf); + seq_printf(seq, " MaskSize: %3d\n", MASKSIZE_G(rssconf)); + seq_printf(seq, " RRCplMapEn: %3s\n", yesno(rssconf & + RRCPLMAPEN_F)); + seq_printf(seq, " RRCplQueWidth: %3d\n", RRCPLQUEWIDTH_G(rssconf)); + + seq_puts(seq, "\n"); + + rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_SYN_A); + seq_printf(seq, "TP_RSS_CONFIG_SYN: %#x\n", rssconf); + seq_printf(seq, " MaskSize: %3d\n", MASKSIZE_G(rssconf)); + seq_printf(seq, " UseWireCh: %3s\n", yesno(rssconf & USEWIRECH_F)); + + seq_puts(seq, "\n"); + + rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A); + seq_printf(seq, "TP_RSS_CONFIG_VRT: %#x\n", rssconf); + if (CHELSIO_CHIP_VERSION(adapter->params.chip) > CHELSIO_T5) { + seq_printf(seq, " KeyWrAddrX: %3d\n", + KEYWRADDRX_G(rssconf)); + seq_printf(seq, " KeyExtend: %3s\n", + yesno(rssconf & KEYEXTEND_F)); + } + seq_printf(seq, " VfRdRg: %3s\n", yesno(rssconf & VFRDRG_F)); + seq_printf(seq, " VfRdEn: %3s\n", yesno(rssconf & VFRDEN_F)); + seq_printf(seq, " VfPerrEn: %3s\n", yesno(rssconf & VFPERREN_F)); + seq_printf(seq, " KeyPerrEn: %3s\n", yesno(rssconf & KEYPERREN_F)); + seq_printf(seq, " DisVfVlan: %3s\n", yesno(rssconf & + DISABLEVLAN_F)); + seq_printf(seq, " EnUpSwt: %3s\n", yesno(rssconf & ENABLEUP0_F)); + seq_printf(seq, " HashDelay: %3d\n", HASHDELAY_G(rssconf)); + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + seq_printf(seq, " VfWrAddr: %3d\n", VFWRADDR_G(rssconf)); + seq_printf(seq, " KeyMode: %s\n", keymode[KEYMODE_G(rssconf)]); + seq_printf(seq, " VfWrEn: %3s\n", yesno(rssconf & VFWREN_F)); + seq_printf(seq, " KeyWrEn: %3s\n", yesno(rssconf & KEYWREN_F)); + seq_printf(seq, " KeyWrAddr: %3d\n", KEYWRADDR_G(rssconf)); + + seq_puts(seq, "\n"); + + rssconf = t4_read_reg(adapter, TP_RSS_CONFIG_CNG_A); + seq_printf(seq, "TP_RSS_CONFIG_CNG: %#x\n", rssconf); + seq_printf(seq, " ChnCount3: %3s\n", yesno(rssconf & CHNCOUNT3_F)); + seq_printf(seq, " ChnCount2: %3s\n", yesno(rssconf & CHNCOUNT2_F)); + seq_printf(seq, " ChnCount1: %3s\n", yesno(rssconf & CHNCOUNT1_F)); + seq_printf(seq, " ChnCount0: %3s\n", yesno(rssconf & CHNCOUNT0_F)); + seq_printf(seq, " ChnUndFlow3: %3s\n", yesno(rssconf & + CHNUNDFLOW3_F)); + seq_printf(seq, " ChnUndFlow2: %3s\n", yesno(rssconf & + CHNUNDFLOW2_F)); + seq_printf(seq, " ChnUndFlow1: %3s\n", yesno(rssconf & + CHNUNDFLOW1_F)); + seq_printf(seq, " ChnUndFlow0: %3s\n", yesno(rssconf & + CHNUNDFLOW0_F)); + seq_printf(seq, " RstChn3: %3s\n", yesno(rssconf & RSTCHN3_F)); + seq_printf(seq, " RstChn2: %3s\n", yesno(rssconf & RSTCHN2_F)); + seq_printf(seq, " RstChn1: %3s\n", yesno(rssconf & RSTCHN1_F)); + seq_printf(seq, " RstChn0: %3s\n", yesno(rssconf & RSTCHN0_F)); + seq_printf(seq, " UpdVld: %3s\n", yesno(rssconf & UPDVLD_F)); + seq_printf(seq, " Xoff: %3s\n", yesno(rssconf & XOFF_F)); + seq_printf(seq, " UpdChn3: %3s\n", yesno(rssconf & UPDCHN3_F)); + seq_printf(seq, " UpdChn2: %3s\n", yesno(rssconf & UPDCHN2_F)); + seq_printf(seq, " UpdChn1: %3s\n", yesno(rssconf & UPDCHN1_F)); + seq_printf(seq, " UpdChn0: %3s\n", yesno(rssconf & UPDCHN0_F)); + seq_printf(seq, " Queue: %3d\n", QUEUE_G(rssconf)); + + return 0; +} + +DEFINE_SIMPLE_DEBUGFS_FILE(rss_config); + +/* RSS Secret Key. + */ + +static int rss_key_show(struct seq_file *seq, void *v) +{ + u32 key[10]; + + t4_read_rss_key(seq->private, key); + seq_printf(seq, "%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x\n", + key[9], key[8], key[7], key[6], key[5], key[4], key[3], + key[2], key[1], key[0]); + return 0; +} + +static int rss_key_open(struct inode *inode, struct file *file) +{ + return single_open(file, rss_key_show, inode->i_private); +} + +static ssize_t rss_key_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + int i, j; + u32 key[10]; + char s[100], *p; + struct adapter *adap = file_inode(file)->i_private; + + if (count > sizeof(s) - 1) + return -EINVAL; + if (copy_from_user(s, buf, count)) + return -EFAULT; + for (i = count; i > 0 && isspace(s[i - 1]); i--) + ; + s[i] = '\0'; + + for (p = s, i = 9; i >= 0; i--) { + key[i] = 0; + for (j = 0; j < 8; j++, p++) { + if (!isxdigit(*p)) + return -EINVAL; + key[i] = (key[i] << 4) | hex2val(*p); + } + } + + t4_write_rss_key(adap, key, -1); + return count; +} + +static const struct file_operations rss_key_debugfs_fops = { + .owner = THIS_MODULE, + .open = rss_key_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = rss_key_write +}; + +/* PF RSS Configuration. + */ + +struct rss_pf_conf { + u32 rss_pf_map; + u32 rss_pf_mask; + u32 rss_pf_config; +}; + +static int rss_pf_config_show(struct seq_file *seq, void *v, int idx) +{ + struct rss_pf_conf *pfconf; + + if (v == SEQ_START_TOKEN) { + /* use the 0th entry to dump the PF Map Index Size */ + pfconf = seq->private + offsetof(struct seq_tab, data); + seq_printf(seq, "PF Map Index Size = %d\n\n", + LKPIDXSIZE_G(pfconf->rss_pf_map)); + + seq_puts(seq, " RSS PF VF Hash Tuple Enable Default\n"); + seq_puts(seq, " Enable IPF Mask Mask IPv6 IPv4 UDP Queue\n"); + seq_puts(seq, " PF Map Chn Prt Map Size Size Four Two Four Two Four Ch1 Ch0\n"); + } else { + #define G_PFnLKPIDX(map, n) \ + (((map) >> PF1LKPIDX_S*(n)) & PF0LKPIDX_M) + #define G_PFnMSKSIZE(mask, n) \ + (((mask) >> PF1MSKSIZE_S*(n)) & PF1MSKSIZE_M) + + pfconf = v; + seq_printf(seq, "%3d %3s %3s %3s %3d %3d %3d %3s %3s %3s %3s %3s %3d %3d\n", + idx, + yesno(pfconf->rss_pf_config & MAPENABLE_F), + yesno(pfconf->rss_pf_config & CHNENABLE_F), + yesno(pfconf->rss_pf_config & PRTENABLE_F), + G_PFnLKPIDX(pfconf->rss_pf_map, idx), + G_PFnMSKSIZE(pfconf->rss_pf_mask, idx), + IVFWIDTH_G(pfconf->rss_pf_config), + yesno(pfconf->rss_pf_config & IP6FOURTUPEN_F), + yesno(pfconf->rss_pf_config & IP6TWOTUPEN_F), + yesno(pfconf->rss_pf_config & IP4FOURTUPEN_F), + yesno(pfconf->rss_pf_config & IP4TWOTUPEN_F), + yesno(pfconf->rss_pf_config & UDPFOURTUPEN_F), + CH1DEFAULTQUEUE_G(pfconf->rss_pf_config), + CH0DEFAULTQUEUE_G(pfconf->rss_pf_config)); + + #undef G_PFnLKPIDX + #undef G_PFnMSKSIZE + } + return 0; +} + +static int rss_pf_config_open(struct inode *inode, struct file *file) +{ + struct adapter *adapter = inode->i_private; + struct seq_tab *p; + u32 rss_pf_map, rss_pf_mask; + struct rss_pf_conf *pfconf; + int pf; + + p = seq_open_tab(file, 8, sizeof(*pfconf), 1, rss_pf_config_show); + if (!p) + return -ENOMEM; + + pfconf = (struct rss_pf_conf *)p->data; + rss_pf_map = t4_read_rss_pf_map(adapter); + rss_pf_mask = t4_read_rss_pf_mask(adapter); + for (pf = 0; pf < 8; pf++) { + pfconf[pf].rss_pf_map = rss_pf_map; + pfconf[pf].rss_pf_mask = rss_pf_mask; + t4_read_rss_pf_config(adapter, pf, &pfconf[pf].rss_pf_config); + } + return 0; +} + +static const struct file_operations rss_pf_config_debugfs_fops = { + .owner = THIS_MODULE, + .open = rss_pf_config_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private +}; + +/* VF RSS Configuration. + */ + +struct rss_vf_conf { + u32 rss_vf_vfl; + u32 rss_vf_vfh; +}; + +static int rss_vf_config_show(struct seq_file *seq, void *v, int idx) +{ + if (v == SEQ_START_TOKEN) { + seq_puts(seq, " RSS Hash Tuple Enable\n"); + seq_puts(seq, " Enable IVF Dis Enb IPv6 IPv4 UDP Def Secret Key\n"); + seq_puts(seq, " VF Chn Prt Map VLAN uP Four Two Four Two Four Que Idx Hash\n"); + } else { + struct rss_vf_conf *vfconf = v; + + seq_printf(seq, "%3d %3s %3s %3d %3s %3s %3s %3s %3s %3s %3s %4d %3d %#10x\n", + idx, + yesno(vfconf->rss_vf_vfh & VFCHNEN_F), + yesno(vfconf->rss_vf_vfh & VFPRTEN_F), + VFLKPIDX_G(vfconf->rss_vf_vfh), + yesno(vfconf->rss_vf_vfh & VFVLNEX_F), + yesno(vfconf->rss_vf_vfh & VFUPEN_F), + yesno(vfconf->rss_vf_vfh & VFIP4FOURTUPEN_F), + yesno(vfconf->rss_vf_vfh & VFIP6TWOTUPEN_F), + yesno(vfconf->rss_vf_vfh & VFIP4FOURTUPEN_F), + yesno(vfconf->rss_vf_vfh & VFIP4TWOTUPEN_F), + yesno(vfconf->rss_vf_vfh & ENABLEUDPHASH_F), + DEFAULTQUEUE_G(vfconf->rss_vf_vfh), + KEYINDEX_G(vfconf->rss_vf_vfh), + vfconf->rss_vf_vfl); + } + return 0; +} + +static int rss_vf_config_open(struct inode *inode, struct file *file) +{ + struct adapter *adapter = inode->i_private; + struct seq_tab *p; + struct rss_vf_conf *vfconf; + int vf; + + p = seq_open_tab(file, 128, sizeof(*vfconf), 1, rss_vf_config_show); + if (!p) + return -ENOMEM; + + vfconf = (struct rss_vf_conf *)p->data; + for (vf = 0; vf < 128; vf++) { + t4_read_rss_vf_config(adapter, vf, &vfconf[vf].rss_vf_vfl, + &vfconf[vf].rss_vf_vfh); + } + return 0; +} + +static const struct file_operations rss_vf_config_debugfs_fops = { + .owner = THIS_MODULE, + .open = rss_vf_config_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private +}; + +/** + * ethqset2pinfo - return port_info of an Ethernet Queue Set + * @adap: the adapter + * @qset: Ethernet Queue Set + */ +static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset) +{ + int pidx; + + for_each_port(adap, pidx) { + struct port_info *pi = adap2pinfo(adap, pidx); + + if (qset >= pi->first_qset && + qset < pi->first_qset + pi->nqsets) + return pi; + } + + /* should never happen! */ + BUG_ON(1); + return NULL; +} + +static int sge_qinfo_show(struct seq_file *seq, void *v) +{ + struct adapter *adap = seq->private; + int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4); + int toe_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4); + int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4); + int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4); + int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4); + int i, r = (uintptr_t)v - 1; + int toe_idx = r - eth_entries; + int rdma_idx = toe_idx - toe_entries; + int ciq_idx = rdma_idx - rdma_entries; + int ctrl_idx = ciq_idx - ciq_entries; + int fq_idx = ctrl_idx - ctrl_entries; + + if (r) + seq_putc(seq, '\n'); + +#define S3(fmt_spec, s, v) \ +do { \ + seq_printf(seq, "%-12s", s); \ + for (i = 0; i < n; ++i) \ + seq_printf(seq, " %16" fmt_spec, v); \ + seq_putc(seq, '\n'); \ +} while (0) +#define S(s, v) S3("s", s, v) +#define T(s, v) S3("u", s, tx[i].v) +#define R(s, v) S3("u", s, rx[i].v) + + if (r < eth_entries) { + int base_qset = r * 4; + const struct sge_eth_rxq *rx = &adap->sge.ethrxq[base_qset]; + const struct sge_eth_txq *tx = &adap->sge.ethtxq[base_qset]; + int n = min(4, adap->sge.ethqsets - 4 * r); + + S("QType:", "Ethernet"); + S("Interface:", + rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); + T("TxQ ID:", q.cntxt_id); + T("TxQ size:", q.size); + T("TxQ inuse:", q.in_use); + T("TxQ CIDX:", q.cidx); + T("TxQ PIDX:", q.pidx); +#ifdef CONFIG_CHELSIO_T4_DCB + T("DCB Prio:", dcb_prio); + S3("u", "DCB PGID:", + (ethqset2pinfo(adap, base_qset + i)->dcb.pgid >> + 4*(7-tx[i].dcb_prio)) & 0xf); + S3("u", "DCB PFC:", + (ethqset2pinfo(adap, base_qset + i)->dcb.pfcen >> + 1*(7-tx[i].dcb_prio)) & 0x1); +#endif + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + R("RspQ CIDX:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); + S3("u", "Intr pktcnt:", + adap->sge.counter_val[rx[i].rspq.pktcnt_idx]); + R("FL ID:", fl.cntxt_id); + R("FL size:", fl.size - 8); + R("FL pend:", fl.pend_cred); + R("FL avail:", fl.avail); + R("FL PIDX:", fl.pidx); + R("FL CIDX:", fl.cidx); + } else if (toe_idx < toe_entries) { + const struct sge_ofld_rxq *rx = &adap->sge.ofldrxq[toe_idx * 4]; + const struct sge_ofld_txq *tx = &adap->sge.ofldtxq[toe_idx * 4]; + int n = min(4, adap->sge.ofldqsets - 4 * toe_idx); + + S("QType:", "TOE"); + T("TxQ ID:", q.cntxt_id); + T("TxQ size:", q.size); + T("TxQ inuse:", q.in_use); + T("TxQ CIDX:", q.cidx); + T("TxQ PIDX:", q.pidx); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + R("RspQ CIDX:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); + S3("u", "Intr pktcnt:", + adap->sge.counter_val[rx[i].rspq.pktcnt_idx]); + R("FL ID:", fl.cntxt_id); + R("FL size:", fl.size - 8); + R("FL pend:", fl.pend_cred); + R("FL avail:", fl.avail); + R("FL PIDX:", fl.pidx); + R("FL CIDX:", fl.cidx); + } else if (rdma_idx < rdma_entries) { + const struct sge_ofld_rxq *rx = + &adap->sge.rdmarxq[rdma_idx * 4]; + int n = min(4, adap->sge.rdmaqs - 4 * rdma_idx); + + S("QType:", "RDMA-CPL"); + S("Interface:", + rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + R("RspQ CIDX:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); + S3("u", "Intr pktcnt:", + adap->sge.counter_val[rx[i].rspq.pktcnt_idx]); + R("FL ID:", fl.cntxt_id); + R("FL size:", fl.size - 8); + R("FL pend:", fl.pend_cred); + R("FL avail:", fl.avail); + R("FL PIDX:", fl.pidx); + R("FL CIDX:", fl.cidx); + } else if (ciq_idx < ciq_entries) { + const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4]; + int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx); + + S("QType:", "RDMA-CIQ"); + S("Interface:", + rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + R("RspQ CIDX:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); + S3("u", "Intr pktcnt:", + adap->sge.counter_val[rx[i].rspq.pktcnt_idx]); + } else if (ctrl_idx < ctrl_entries) { + const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4]; + int n = min(4, adap->params.nports - 4 * ctrl_idx); + + S("QType:", "Control"); + T("TxQ ID:", q.cntxt_id); + T("TxQ size:", q.size); + T("TxQ inuse:", q.in_use); + T("TxQ CIDX:", q.cidx); + T("TxQ PIDX:", q.pidx); + } else if (fq_idx == 0) { + const struct sge_rspq *evtq = &adap->sge.fw_evtq; + + seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue"); + seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id); + seq_printf(seq, "%-12s %16u\n", "RspQ size:", evtq->size); + seq_printf(seq, "%-12s %16u\n", "RspQE size:", evtq->iqe_len); + seq_printf(seq, "%-12s %16u\n", "RspQ CIDX:", evtq->cidx); + seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen); + seq_printf(seq, "%-12s %16u\n", "Intr delay:", + qtimer_val(adap, evtq)); + seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:", + adap->sge.counter_val[evtq->pktcnt_idx]); + } +#undef R +#undef T +#undef S +#undef S3 +return 0; +} + +static int sge_queue_entries(const struct adapter *adap) +{ + return DIV_ROUND_UP(adap->sge.ethqsets, 4) + + DIV_ROUND_UP(adap->sge.ofldqsets, 4) + + DIV_ROUND_UP(adap->sge.rdmaqs, 4) + + DIV_ROUND_UP(adap->sge.rdmaciqs, 4) + + DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1; +} + +static void *sge_queue_start(struct seq_file *seq, loff_t *pos) +{ + int entries = sge_queue_entries(seq->private); + + return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; +} + +static void sge_queue_stop(struct seq_file *seq, void *v) +{ +} + +static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos) +{ + int entries = sge_queue_entries(seq->private); + + ++*pos; + return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; +} + +static const struct seq_operations sge_qinfo_seq_ops = { + .start = sge_queue_start, + .next = sge_queue_next, + .stop = sge_queue_stop, + .show = sge_qinfo_show +}; + +static int sge_qinfo_open(struct inode *inode, struct file *file) +{ + int res = seq_open(file, &sge_qinfo_seq_ops); + + if (!res) { + struct seq_file *seq = file->private_data; + + seq->private = inode->i_private; + } + return res; +} + +static const struct file_operations sge_qinfo_debugfs_fops = { + .owner = THIS_MODULE, + .open = sge_qinfo_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +int mem_open(struct inode *inode, struct file *file) +{ + unsigned int mem; + struct adapter *adap; + + file->private_data = inode->i_private; + + mem = (uintptr_t)file->private_data & 0x3; + adap = file->private_data - mem; + + (void)t4_fwcache(adap, FW_PARAM_DEV_FWCACHE_FLUSH); + + return 0; +} + +static ssize_t mem_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + loff_t pos = *ppos; + loff_t avail = file_inode(file)->i_size; + unsigned int mem = (uintptr_t)file->private_data & 3; + struct adapter *adap = file->private_data - mem; + __be32 *data; + int ret; + + if (pos < 0) + return -EINVAL; + if (pos >= avail) + return 0; + if (count > avail - pos) + count = avail - pos; + + data = t4_alloc_mem(count); + if (!data) + return -ENOMEM; + + spin_lock(&adap->win0_lock); + ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ); + spin_unlock(&adap->win0_lock); + if (ret) { + t4_free_mem(data); + return ret; + } + ret = copy_to_user(buf, data, count); + + t4_free_mem(data); + if (ret) + return -EFAULT; + + *ppos = pos + count; + return count; +} +static const struct file_operations mem_debugfs_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mem_read, + .llseek = default_llseek, +}; + +static void add_debugfs_mem(struct adapter *adap, const char *name, + unsigned int idx, unsigned int size_mb) +{ + debugfs_create_file_size(name, S_IRUSR, adap->debugfs_root, + (void *)adap + idx, &mem_debugfs_fops, + size_mb << 20); +} + +/* Add an array of Debug FS files. + */ +void add_debugfs_files(struct adapter *adap, + struct t4_debugfs_entry *files, + unsigned int nfiles) +{ + int i; + + /* debugfs support is best effort */ + for (i = 0; i < nfiles; i++) + debugfs_create_file(files[i].name, files[i].mode, + adap->debugfs_root, + (void *)adap + files[i].data, + files[i].ops); +} + +int t4_setup_debugfs(struct adapter *adap) +{ + int i; + u32 size; + struct dentry *de; + + static struct t4_debugfs_entry t4_debugfs_files[] = { + { "cim_la", &cim_la_fops, S_IRUSR, 0 }, + { "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 }, + { "clk", &clk_debugfs_fops, S_IRUSR, 0 }, + { "devlog", &devlog_fops, S_IRUSR, 0 }, + { "mbox0", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 0 }, + { "mbox1", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 1 }, + { "mbox2", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 2 }, + { "mbox3", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 3 }, + { "mbox4", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 4 }, + { "mbox5", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 5 }, + { "mbox6", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 6 }, + { "mbox7", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 7 }, + { "l2t", &t4_l2t_fops, S_IRUSR, 0}, + { "mps_tcam", &mps_tcam_debugfs_fops, S_IRUSR, 0 }, + { "rss", &rss_debugfs_fops, S_IRUSR, 0 }, + { "rss_config", &rss_config_debugfs_fops, S_IRUSR, 0 }, + { "rss_key", &rss_key_debugfs_fops, S_IRUSR, 0 }, + { "rss_pf_config", &rss_pf_config_debugfs_fops, S_IRUSR, 0 }, + { "rss_vf_config", &rss_vf_config_debugfs_fops, S_IRUSR, 0 }, + { "sge_qinfo", &sge_qinfo_debugfs_fops, S_IRUSR, 0 }, + { "ibq_tp0", &cim_ibq_fops, S_IRUSR, 0 }, + { "ibq_tp1", &cim_ibq_fops, S_IRUSR, 1 }, + { "ibq_ulp", &cim_ibq_fops, S_IRUSR, 2 }, + { "ibq_sge0", &cim_ibq_fops, S_IRUSR, 3 }, + { "ibq_sge1", &cim_ibq_fops, S_IRUSR, 4 }, + { "ibq_ncsi", &cim_ibq_fops, S_IRUSR, 5 }, + { "obq_ulp0", &cim_obq_fops, S_IRUSR, 0 }, + { "obq_ulp1", &cim_obq_fops, S_IRUSR, 1 }, + { "obq_ulp2", &cim_obq_fops, S_IRUSR, 2 }, + { "obq_ulp3", &cim_obq_fops, S_IRUSR, 3 }, + { "obq_sge", &cim_obq_fops, S_IRUSR, 4 }, + { "obq_ncsi", &cim_obq_fops, S_IRUSR, 5 }, + { "tp_la", &tp_la_fops, S_IRUSR, 0 }, + { "ulprx_la", &ulprx_la_fops, S_IRUSR, 0 }, + { "sensors", &sensors_debugfs_fops, S_IRUSR, 0 }, + { "pm_stats", &pm_stats_debugfs_fops, S_IRUSR, 0 }, + { "cctrl", &cctrl_tbl_debugfs_fops, S_IRUSR, 0 }, +#if IS_ENABLED(CONFIG_IPV6) + { "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 }, +#endif + }; + + /* Debug FS nodes common to all T5 and later adapters. + */ + static struct t4_debugfs_entry t5_debugfs_files[] = { + { "obq_sge_rx_q0", &cim_obq_fops, S_IRUSR, 6 }, + { "obq_sge_rx_q1", &cim_obq_fops, S_IRUSR, 7 }, + }; + + add_debugfs_files(adap, + t4_debugfs_files, + ARRAY_SIZE(t4_debugfs_files)); + if (!is_t4(adap->params.chip)) + add_debugfs_files(adap, + t5_debugfs_files, + ARRAY_SIZE(t5_debugfs_files)); + + i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (i & EDRAM0_ENABLE_F) { + size = t4_read_reg(adap, MA_EDRAM0_BAR_A); + add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM0_SIZE_G(size)); + } + if (i & EDRAM1_ENABLE_F) { + size = t4_read_reg(adap, MA_EDRAM1_BAR_A); + add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM1_SIZE_G(size)); + } + if (is_t4(adap->params.chip)) { + size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); + if (i & EXT_MEM_ENABLE_F) + add_debugfs_mem(adap, "mc", MEM_MC, + EXT_MEM_SIZE_G(size)); + } else { + if (i & EXT_MEM0_ENABLE_F) { + size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); + add_debugfs_mem(adap, "mc0", MEM_MC0, + EXT_MEM0_SIZE_G(size)); + } + if (i & EXT_MEM1_ENABLE_F) { + size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); + add_debugfs_mem(adap, "mc1", MEM_MC1, + EXT_MEM1_SIZE_G(size)); + } + } + + de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap, + &flash_debugfs_fops, adap->params.sf_size); + + return 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h new file mode 100644 index 000000000..23f43a0f8 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h @@ -0,0 +1,83 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_DEBUGFS_H +#define __CXGB4_DEBUGFS_H + +#include + +#define DEFINE_SIMPLE_DEBUGFS_FILE(name) \ +static int name##_open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, name##_show, inode->i_private); \ +} \ +static const struct file_operations name##_debugfs_fops = { \ + .owner = THIS_MODULE, \ + .open = name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release \ +} + +struct t4_debugfs_entry { + const char *name; + const struct file_operations *ops; + umode_t mode; + unsigned char data; +}; + +struct seq_tab { + int (*show)(struct seq_file *seq, void *v, int idx); + unsigned int rows; /* # of entries */ + unsigned char width; /* size in bytes of each entry */ + unsigned char skip_first; /* whether the first line is a header */ + char data[0]; /* the table data */ +}; + +static inline unsigned int hex2val(char c) +{ + return isdigit(c) ? c - '0' : tolower(c) - 'a' + 10; +} + +struct seq_tab *seq_open_tab(struct file *f, unsigned int rows, + unsigned int width, unsigned int have_header, + int (*show)(struct seq_file *seq, void *v, int i)); + +int t4_setup_debugfs(struct adapter *adap); +void add_debugfs_files(struct adapter *adap, + struct t4_debugfs_entry *files, + unsigned int nfiles); +int mem_open(struct inode *inode, struct file *file); + +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c new file mode 100644 index 000000000..10d82b51d --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -0,0 +1,915 @@ +/* + * Copyright (C) 2013-2015 Chelsio Communications. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include +#include + +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4fw_api.h" + +#define EEPROM_MAGIC 0x38E2F10C + +static u32 get_msglevel(struct net_device *dev) +{ + return netdev2adap(dev)->msg_enable; +} + +static void set_msglevel(struct net_device *dev, u32 val) +{ + netdev2adap(dev)->msg_enable = val; +} + +static const char stats_strings[][ETH_GSTRING_LEN] = { + "TxOctetsOK ", + "TxFramesOK ", + "TxBroadcastFrames ", + "TxMulticastFrames ", + "TxUnicastFrames ", + "TxErrorFrames ", + + "TxFrames64 ", + "TxFrames65To127 ", + "TxFrames128To255 ", + "TxFrames256To511 ", + "TxFrames512To1023 ", + "TxFrames1024To1518 ", + "TxFrames1519ToMax ", + + "TxFramesDropped ", + "TxPauseFrames ", + "TxPPP0Frames ", + "TxPPP1Frames ", + "TxPPP2Frames ", + "TxPPP3Frames ", + "TxPPP4Frames ", + "TxPPP5Frames ", + "TxPPP6Frames ", + "TxPPP7Frames ", + + "RxOctetsOK ", + "RxFramesOK ", + "RxBroadcastFrames ", + "RxMulticastFrames ", + "RxUnicastFrames ", + + "RxFramesTooLong ", + "RxJabberErrors ", + "RxFCSErrors ", + "RxLengthErrors ", + "RxSymbolErrors ", + "RxRuntFrames ", + + "RxFrames64 ", + "RxFrames65To127 ", + "RxFrames128To255 ", + "RxFrames256To511 ", + "RxFrames512To1023 ", + "RxFrames1024To1518 ", + "RxFrames1519ToMax ", + + "RxPauseFrames ", + "RxPPP0Frames ", + "RxPPP1Frames ", + "RxPPP2Frames ", + "RxPPP3Frames ", + "RxPPP4Frames ", + "RxPPP5Frames ", + "RxPPP6Frames ", + "RxPPP7Frames ", + + "RxBG0FramesDropped ", + "RxBG1FramesDropped ", + "RxBG2FramesDropped ", + "RxBG3FramesDropped ", + "RxBG0FramesTrunc ", + "RxBG1FramesTrunc ", + "RxBG2FramesTrunc ", + "RxBG3FramesTrunc ", + + "TSO ", + "TxCsumOffload ", + "RxCsumGood ", + "VLANextractions ", + "VLANinsertions ", + "GROpackets ", + "GROmerged ", + "WriteCoalSuccess ", + "WriteCoalFail ", +}; + +static int get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(stats_strings); + default: + return -EOPNOTSUPP; + } +} + +static int get_regs_len(struct net_device *dev) +{ + struct adapter *adap = netdev2adap(dev); + + return t4_get_regs_len(adap); +} + +static int get_eeprom_len(struct net_device *dev) +{ + return EEPROMSIZE; +} + +static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct adapter *adapter = netdev2adap(dev); + u32 exprom_vers; + + strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver)); + strlcpy(info->version, cxgb4_driver_version, + sizeof(info->version)); + strlcpy(info->bus_info, pci_name(adapter->pdev), + sizeof(info->bus_info)); + + if (adapter->params.fw_vers) + snprintf(info->fw_version, sizeof(info->fw_version), + "%u.%u.%u.%u, TP %u.%u.%u.%u", + FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers)); + + if (!t4_get_exprom_version(adapter, &exprom_vers)) + snprintf(info->erom_version, sizeof(info->erom_version), + "%u.%u.%u.%u", + FW_HDR_FW_VER_MAJOR_G(exprom_vers), + FW_HDR_FW_VER_MINOR_G(exprom_vers), + FW_HDR_FW_VER_MICRO_G(exprom_vers), + FW_HDR_FW_VER_BUILD_G(exprom_vers)); +} + +static void get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + if (stringset == ETH_SS_STATS) + memcpy(data, stats_strings, sizeof(stats_strings)); +} + +/* port stats maintained per queue of the port. They should be in the same + * order as in stats_strings above. + */ +struct queue_port_stats { + u64 tso; + u64 tx_csum; + u64 rx_csum; + u64 vlan_ex; + u64 vlan_ins; + u64 gro_pkts; + u64 gro_merged; +}; + +static void collect_sge_port_stats(const struct adapter *adap, + const struct port_info *p, + struct queue_port_stats *s) +{ + int i; + const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset]; + const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset]; + + memset(s, 0, sizeof(*s)); + for (i = 0; i < p->nqsets; i++, rx++, tx++) { + s->tso += tx->tso; + s->tx_csum += tx->tx_cso; + s->rx_csum += rx->stats.rx_cso; + s->vlan_ex += rx->stats.vlan_ex; + s->vlan_ins += tx->vlan_ins; + s->gro_pkts += rx->stats.lro_pkts; + s->gro_merged += rx->stats.lro_merged; + } +} + +static void get_stats(struct net_device *dev, struct ethtool_stats *stats, + u64 *data) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + u32 val1, val2; + + t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data); + + data += sizeof(struct port_stats) / sizeof(u64); + collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); + data += sizeof(struct queue_port_stats) / sizeof(u64); + if (!is_t4(adapter->params.chip)) { + t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7)); + val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A); + val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A); + *data = val1 - val2; + data++; + *data = val2; + data++; + } else { + memset(data, 0, 2 * sizeof(u64)); + *data += 2; + } +} + +static void get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *buf) +{ + struct adapter *adap = netdev2adap(dev); + size_t buf_size; + + buf_size = t4_get_regs_len(adap); + regs->version = mk_adap_vers(adap); + t4_get_regs(adap, buf, buf_size); +} + +static int restart_autoneg(struct net_device *dev) +{ + struct port_info *p = netdev_priv(dev); + + if (!netif_running(dev)) + return -EAGAIN; + if (p->link_cfg.autoneg != AUTONEG_ENABLE) + return -EINVAL; + t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan); + return 0; +} + +static int identify_port(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + unsigned int val; + struct adapter *adap = netdev2adap(dev); + + if (state == ETHTOOL_ID_ACTIVE) + val = 0xffff; + else if (state == ETHTOOL_ID_INACTIVE) + val = 0; + else + return -EINVAL; + + return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val); +} + +static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps) +{ + unsigned int v = 0; + + if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI || + type == FW_PORT_TYPE_BT_XAUI) { + v |= SUPPORTED_TP; + if (caps & FW_PORT_CAP_SPEED_100M) + v |= SUPPORTED_100baseT_Full; + if (caps & FW_PORT_CAP_SPEED_1G) + v |= SUPPORTED_1000baseT_Full; + if (caps & FW_PORT_CAP_SPEED_10G) + v |= SUPPORTED_10000baseT_Full; + } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) { + v |= SUPPORTED_Backplane; + if (caps & FW_PORT_CAP_SPEED_1G) + v |= SUPPORTED_1000baseKX_Full; + if (caps & FW_PORT_CAP_SPEED_10G) + v |= SUPPORTED_10000baseKX4_Full; + } else if (type == FW_PORT_TYPE_KR) { + v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; + } else if (type == FW_PORT_TYPE_BP_AP) { + v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | + SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full; + } else if (type == FW_PORT_TYPE_BP4_AP) { + v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | + SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | + SUPPORTED_10000baseKX4_Full; + } else if (type == FW_PORT_TYPE_FIBER_XFI || + type == FW_PORT_TYPE_FIBER_XAUI || + type == FW_PORT_TYPE_SFP || + type == FW_PORT_TYPE_QSFP_10G || + type == FW_PORT_TYPE_QSA) { + v |= SUPPORTED_FIBRE; + if (caps & FW_PORT_CAP_SPEED_1G) + v |= SUPPORTED_1000baseT_Full; + if (caps & FW_PORT_CAP_SPEED_10G) + v |= SUPPORTED_10000baseT_Full; + } else if (type == FW_PORT_TYPE_BP40_BA || + type == FW_PORT_TYPE_QSFP) { + v |= SUPPORTED_40000baseSR4_Full; + v |= SUPPORTED_FIBRE; + } + + if (caps & FW_PORT_CAP_ANEG) + v |= SUPPORTED_Autoneg; + return v; +} + +static unsigned int to_fw_linkcaps(unsigned int caps) +{ + unsigned int v = 0; + + if (caps & ADVERTISED_100baseT_Full) + v |= FW_PORT_CAP_SPEED_100M; + if (caps & ADVERTISED_1000baseT_Full) + v |= FW_PORT_CAP_SPEED_1G; + if (caps & ADVERTISED_10000baseT_Full) + v |= FW_PORT_CAP_SPEED_10G; + if (caps & ADVERTISED_40000baseSR4_Full) + v |= FW_PORT_CAP_SPEED_40G; + return v; +} + +static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + const struct port_info *p = netdev_priv(dev); + + if (p->port_type == FW_PORT_TYPE_BT_SGMII || + p->port_type == FW_PORT_TYPE_BT_XFI || + p->port_type == FW_PORT_TYPE_BT_XAUI) { + cmd->port = PORT_TP; + } else if (p->port_type == FW_PORT_TYPE_FIBER_XFI || + p->port_type == FW_PORT_TYPE_FIBER_XAUI) { + cmd->port = PORT_FIBRE; + } else if (p->port_type == FW_PORT_TYPE_SFP || + p->port_type == FW_PORT_TYPE_QSFP_10G || + p->port_type == FW_PORT_TYPE_QSA || + p->port_type == FW_PORT_TYPE_QSFP) { + if (p->mod_type == FW_PORT_MOD_TYPE_LR || + p->mod_type == FW_PORT_MOD_TYPE_SR || + p->mod_type == FW_PORT_MOD_TYPE_ER || + p->mod_type == FW_PORT_MOD_TYPE_LRM) + cmd->port = PORT_FIBRE; + else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || + p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) + cmd->port = PORT_DA; + else + cmd->port = PORT_OTHER; + } else { + cmd->port = PORT_OTHER; + } + + if (p->mdio_addr >= 0) { + cmd->phy_address = p->mdio_addr; + cmd->transceiver = XCVR_EXTERNAL; + cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ? + MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45; + } else { + cmd->phy_address = 0; /* not really, but no better option */ + cmd->transceiver = XCVR_INTERNAL; + cmd->mdio_support = 0; + } + + cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported); + cmd->advertising = from_fw_linkcaps(p->port_type, + p->link_cfg.advertising); + ethtool_cmd_speed_set(cmd, + netif_carrier_ok(dev) ? p->link_cfg.speed : 0); + cmd->duplex = DUPLEX_FULL; + cmd->autoneg = p->link_cfg.autoneg; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +static unsigned int speed_to_caps(int speed) +{ + if (speed == 100) + return FW_PORT_CAP_SPEED_100M; + if (speed == 1000) + return FW_PORT_CAP_SPEED_1G; + if (speed == 10000) + return FW_PORT_CAP_SPEED_10G; + if (speed == 40000) + return FW_PORT_CAP_SPEED_40G; + return 0; +} + +static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + unsigned int cap; + struct port_info *p = netdev_priv(dev); + struct link_config *lc = &p->link_cfg; + u32 speed = ethtool_cmd_speed(cmd); + + if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */ + return -EINVAL; + + if (!(lc->supported & FW_PORT_CAP_ANEG)) { + /* PHY offers a single speed. See if that's what's + * being requested. + */ + if (cmd->autoneg == AUTONEG_DISABLE && + (lc->supported & speed_to_caps(speed))) + return 0; + return -EINVAL; + } + + if (cmd->autoneg == AUTONEG_DISABLE) { + cap = speed_to_caps(speed); + + if (!(lc->supported & cap) || + (speed == 1000) || + (speed == 10000) || + (speed == 40000)) + return -EINVAL; + lc->requested_speed = cap; + lc->advertising = 0; + } else { + cap = to_fw_linkcaps(cmd->advertising); + if (!(lc->supported & cap)) + return -EINVAL; + lc->requested_speed = 0; + lc->advertising = cap | FW_PORT_CAP_ANEG; + } + lc->autoneg = cmd->autoneg; + + if (netif_running(dev)) + return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan, + lc); + return 0; +} + +static void get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct port_info *p = netdev_priv(dev); + + epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; + epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0; + epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0; +} + +static int set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct port_info *p = netdev_priv(dev); + struct link_config *lc = &p->link_cfg; + + if (epause->autoneg == AUTONEG_DISABLE) + lc->requested_fc = 0; + else if (lc->supported & FW_PORT_CAP_ANEG) + lc->requested_fc = PAUSE_AUTONEG; + else + return -EINVAL; + + if (epause->rx_pause) + lc->requested_fc |= PAUSE_RX; + if (epause->tx_pause) + lc->requested_fc |= PAUSE_TX; + if (netif_running(dev)) + return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan, + lc); + return 0; +} + +static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +{ + const struct port_info *pi = netdev_priv(dev); + const struct sge *s = &pi->adapter->sge; + + e->rx_max_pending = MAX_RX_BUFFERS; + e->rx_mini_max_pending = MAX_RSPQ_ENTRIES; + e->rx_jumbo_max_pending = 0; + e->tx_max_pending = MAX_TXQ_ENTRIES; + + e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8; + e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; + e->rx_jumbo_pending = 0; + e->tx_pending = s->ethtxq[pi->first_qset].q.size; +} + +static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +{ + int i; + const struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + + if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending || + e->tx_pending > MAX_TXQ_ENTRIES || + e->rx_mini_pending > MAX_RSPQ_ENTRIES || + e->rx_mini_pending < MIN_RSPQ_ENTRIES || + e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES) + return -EINVAL; + + if (adapter->flags & FULL_INIT_DONE) + return -EBUSY; + + for (i = 0; i < pi->nqsets; ++i) { + s->ethtxq[pi->first_qset + i].q.size = e->tx_pending; + s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8; + s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending; + } + return 0; +} + +/** + * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete! + * @dev: the network device + * @us: the hold-off time in us, or 0 to disable timer + * @cnt: the hold-off packet count, or 0 to disable counter + * + * Set the RX interrupt hold-off parameters for a network device. + */ +static int set_rx_intr_params(struct net_device *dev, + unsigned int us, unsigned int cnt) +{ + int i, err; + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; + + for (i = 0; i < pi->nqsets; i++, q++) { + err = cxgb4_set_rspq_intr_params(&q->rspq, us, cnt); + if (err) + return err; + } + return 0; +} + +static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx) +{ + int i; + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; + + for (i = 0; i < pi->nqsets; i++, q++) + q->rspq.adaptive_rx = adaptive_rx; + + return 0; +} + +static int get_adaptive_rx_setting(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; + + return q->rspq.adaptive_rx; +} + +static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce); + return set_rx_intr_params(dev, c->rx_coalesce_usecs, + c->rx_max_coalesced_frames); +} + +static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + const struct port_info *pi = netdev_priv(dev); + const struct adapter *adap = pi->adapter; + const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq; + + c->rx_coalesce_usecs = qtimer_val(adap, rq); + c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ? + adap->sge.counter_val[rq->pktcnt_idx] : 0; + c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev); + return 0; +} + +/** + * eeprom_ptov - translate a physical EEPROM address to virtual + * @phys_addr: the physical EEPROM address + * @fn: the PCI function number + * @sz: size of function-specific area + * + * Translate a physical EEPROM address to virtual. The first 1K is + * accessed through virtual addresses starting at 31K, the rest is + * accessed through virtual addresses starting at 0. + * + * The mapping is as follows: + * [0..1K) -> [31K..32K) + * [1K..1K+A) -> [31K-A..31K) + * [1K+A..ES) -> [0..ES-A-1K) + * + * where A = @fn * @sz, and ES = EEPROM size. + */ +static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) +{ + fn *= sz; + if (phys_addr < 1024) + return phys_addr + (31 << 10); + if (phys_addr < 1024 + fn) + return 31744 - fn + phys_addr - 1024; + if (phys_addr < EEPROMSIZE) + return phys_addr - 1024 - fn; + return -EINVAL; +} + +/* The next two routines implement eeprom read/write from physical addresses. + */ +static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) +{ + int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE); + + if (vaddr >= 0) + vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); + return vaddr < 0 ? vaddr : 0; +} + +static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) +{ + int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE); + + if (vaddr >= 0) + vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); + return vaddr < 0 ? vaddr : 0; +} + +#define EEPROM_MAGIC 0x38E2F10C + +static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, + u8 *data) +{ + int i, err = 0; + struct adapter *adapter = netdev2adap(dev); + u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); + + if (!buf) + return -ENOMEM; + + e->magic = EEPROM_MAGIC; + for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) + err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); + + if (!err) + memcpy(data, buf + e->offset, e->len); + kfree(buf); + return err; +} + +static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + u8 *buf; + int err = 0; + u32 aligned_offset, aligned_len, *p; + struct adapter *adapter = netdev2adap(dev); + + if (eeprom->magic != EEPROM_MAGIC) + return -EINVAL; + + aligned_offset = eeprom->offset & ~3; + aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; + + if (adapter->fn > 0) { + u32 start = 1024 + adapter->fn * EEPROMPFSIZE; + + if (aligned_offset < start || + aligned_offset + aligned_len > start + EEPROMPFSIZE) + return -EPERM; + } + + if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { + /* RMW possibly needed for first or last words. + */ + buf = kmalloc(aligned_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); + if (!err && aligned_len > 4) + err = eeprom_rd_phys(adapter, + aligned_offset + aligned_len - 4, + (u32 *)&buf[aligned_len - 4]); + if (err) + goto out; + memcpy(buf + (eeprom->offset & 3), data, eeprom->len); + } else { + buf = data; + } + + err = t4_seeprom_wp(adapter, false); + if (err) + goto out; + + for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { + err = eeprom_wr_phys(adapter, aligned_offset, *p); + aligned_offset += 4; + } + + if (!err) + err = t4_seeprom_wp(adapter, true); +out: + if (buf != data) + kfree(buf); + return err; +} + +static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) +{ + int ret; + const struct firmware *fw; + struct adapter *adap = netdev2adap(netdev); + unsigned int mbox = PCIE_FW_MASTER_M + 1; + + ef->data[sizeof(ef->data) - 1] = '\0'; + ret = request_firmware(&fw, ef->data, adap->pdev_dev); + if (ret < 0) + return ret; + + /* If the adapter has been fully initialized then we'll go ahead and + * try to get the firmware's cooperation in upgrading to the new + * firmware image otherwise we'll try to do the entire job from the + * host ... and we always "force" the operation in this path. + */ + if (adap->flags & FULL_INIT_DONE) + mbox = adap->mbox; + + ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1); + release_firmware(fw); + if (!ret) + dev_info(adap->pdev_dev, + "loaded firmware %s, reload cxgb4 driver\n", ef->data); + return ret; +} + +#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC) +#define BCAST_CRC 0xa0ccc1a6 + +static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + wol->supported = WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = netdev2adap(dev)->wol; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + int err = 0; + struct port_info *pi = netdev_priv(dev); + + if (wol->wolopts & ~WOL_SUPPORTED) + return -EINVAL; + t4_wol_magic_enable(pi->adapter, pi->tx_chan, + (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL); + if (wol->wolopts & WAKE_BCAST) { + err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL, + ~0ULL, 0, false); + if (!err) + err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1, + ~6ULL, ~0ULL, BCAST_CRC, true); + } else { + t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false); + } + return err; +} + +static u32 get_rss_table_size(struct net_device *dev) +{ + const struct port_info *pi = netdev_priv(dev); + + return pi->rss_size; +} + +static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc) +{ + const struct port_info *pi = netdev_priv(dev); + unsigned int n = pi->rss_size; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!p) + return 0; + while (n--) + p[n] = pi->rss[n]; + return 0; +} + +static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key, + const u8 hfunc) +{ + unsigned int i; + struct port_info *pi = netdev_priv(dev); + + /* We require at least one supported parameter to be changed and no + * change in any of the unsupported parameters + */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!p) + return 0; + + for (i = 0; i < pi->rss_size; i++) + pi->rss[i] = p[i]; + if (pi->adapter->flags & FULL_INIT_DONE) + return cxgb4_write_rss(pi, pi->rss); + return 0; +} + +static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + u32 *rules) +{ + const struct port_info *pi = netdev_priv(dev); + + switch (info->cmd) { + case ETHTOOL_GRXFH: { + unsigned int v = pi->rss_mode; + + info->data = 0; + switch (info->flow_type) { + case TCP_V4_FLOW: + if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case UDP_V4_FLOW: + if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) && + (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case IPV4_FLOW: + if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case UDP_V6_FLOW: + if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) && + (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F)) + info->data = RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case IPV6_FLOW: + if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) + info->data = RXH_IP_SRC | RXH_IP_DST; + break; + } + return 0; + } + case ETHTOOL_GRXRINGS: + info->data = pi->nqsets; + return 0; + } + return -EOPNOTSUPP; +} + +static const struct ethtool_ops cxgb_ethtool_ops = { + .get_settings = get_settings, + .set_settings = set_settings, + .get_drvinfo = get_drvinfo, + .get_msglevel = get_msglevel, + .set_msglevel = set_msglevel, + .get_ringparam = get_sge_param, + .set_ringparam = set_sge_param, + .get_coalesce = get_coalesce, + .set_coalesce = set_coalesce, + .get_eeprom_len = get_eeprom_len, + .get_eeprom = get_eeprom, + .set_eeprom = set_eeprom, + .get_pauseparam = get_pauseparam, + .set_pauseparam = set_pauseparam, + .get_link = ethtool_op_get_link, + .get_strings = get_strings, + .set_phys_id = identify_port, + .nway_reset = restart_autoneg, + .get_sset_count = get_sset_count, + .get_ethtool_stats = get_stats, + .get_regs_len = get_regs_len, + .get_regs = get_regs, + .get_wol = get_wol, + .set_wol = set_wol, + .get_rxnfc = get_rxnfc, + .get_rxfh_indir_size = get_rss_table_size, + .get_rxfh = get_rss_table, + .set_rxfh = set_rss_table, + .flash_device = set_flash, +}; + +void cxgb4_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &cxgb_ethtool_ops; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c new file mode 100644 index 000000000..6c8a62eef --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c @@ -0,0 +1,122 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2015 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifdef CONFIG_CHELSIO_T4_FCOE + +#include +#include +#include "cxgb4.h" + +bool cxgb_fcoe_sof_eof_supported(struct adapter *adap, struct sk_buff *skb) +{ + struct fcoe_hdr *fcoeh = (struct fcoe_hdr *)skb_network_header(skb); + u8 sof = fcoeh->fcoe_sof; + u8 eof = 0; + + if ((sof != FC_SOF_I3) && (sof != FC_SOF_N3)) { + dev_err(adap->pdev_dev, "Unsupported SOF 0x%x\n", sof); + return false; + } + + skb_copy_bits(skb, skb->len - 4, &eof, 1); + + if ((eof != FC_EOF_N) && (eof != FC_EOF_T)) { + dev_err(adap->pdev_dev, "Unsupported EOF 0x%x\n", eof); + return false; + } + + return true; +} + +/** + * cxgb_fcoe_enable - enable FCoE offload features + * @netdev: net device + * + * Returns 0 on success or -EINVAL on failure. + */ +int cxgb_fcoe_enable(struct net_device *netdev) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adap = pi->adapter; + struct cxgb_fcoe *fcoe = &pi->fcoe; + + if (is_t4(adap->params.chip)) + return -EINVAL; + + if (!(adap->flags & FULL_INIT_DONE)) + return -EINVAL; + + dev_info(adap->pdev_dev, "Enabling FCoE offload features\n"); + + netdev->features |= NETIF_F_FCOE_CRC; + netdev->vlan_features |= NETIF_F_FCOE_CRC; + netdev->features |= NETIF_F_FCOE_MTU; + netdev->vlan_features |= NETIF_F_FCOE_MTU; + + netdev_features_change(netdev); + + fcoe->flags |= CXGB_FCOE_ENABLED; + + return 0; +} + +/** + * cxgb_fcoe_disable - disable FCoE offload + * @netdev: net device + * + * Returns 0 on success or -EINVAL on failure. + */ +int cxgb_fcoe_disable(struct net_device *netdev) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adap = pi->adapter; + struct cxgb_fcoe *fcoe = &pi->fcoe; + + if (!(fcoe->flags & CXGB_FCOE_ENABLED)) + return -EINVAL; + + dev_info(adap->pdev_dev, "Disabling FCoE offload features\n"); + + fcoe->flags &= ~CXGB_FCOE_ENABLED; + + netdev->features &= ~NETIF_F_FCOE_CRC; + netdev->vlan_features &= ~NETIF_F_FCOE_CRC; + netdev->features &= ~NETIF_F_FCOE_MTU; + netdev->vlan_features &= ~NETIF_F_FCOE_MTU; + + netdev_features_change(netdev); + + return 0; +} +#endif /* CONFIG_CHELSIO_T4_FCOE */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.h new file mode 100644 index 000000000..bf9258a56 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.h @@ -0,0 +1,57 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2015 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_FCOE_H__ +#define __CXGB4_FCOE_H__ + +#ifdef CONFIG_CHELSIO_T4_FCOE + +#define CXGB_FCOE_TXPKT_CSUM_START 28 +#define CXGB_FCOE_TXPKT_CSUM_END 8 + +/* fcoe flags */ +enum { + CXGB_FCOE_ENABLED = (1 << 0), +}; + +struct cxgb_fcoe { + u8 flags; +}; + +int cxgb_fcoe_enable(struct net_device *); +int cxgb_fcoe_disable(struct net_device *); +bool cxgb_fcoe_sof_eof_supported(struct adapter *, struct sk_buff *); + +#endif /* CONFIG_CHELSIO_T4_FCOE */ +#endif /* __CXGB4_FCOE_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c new file mode 100644 index 000000000..2b4328b33 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -0,0 +1,4826 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4_values.h" +#include "t4_msg.h" +#include "t4fw_api.h" +#include "t4fw_version.h" +#include "cxgb4_dcb.h" +#include "cxgb4_debugfs.h" +#include "clip_tbl.h" +#include "l2t.h" + +char cxgb4_driver_name[] = KBUILD_MODNAME; + +#ifdef DRV_VERSION +#undef DRV_VERSION +#endif +#define DRV_VERSION "2.0.0-ko" +const char cxgb4_driver_version[] = DRV_VERSION; +#define DRV_DESC "Chelsio T4/T5 Network Driver" + +/* Host shadow copy of ingress filter entry. This is in host native format + * and doesn't match the ordering or bit order, etc. of the hardware of the + * firmware command. The use of bit-field structure elements is purely to + * remind ourselves of the field size limitations and save memory in the case + * where the filter table is large. + */ +struct filter_entry { + /* Administrative fields for filter. + */ + u32 valid:1; /* filter allocated and valid */ + u32 locked:1; /* filter is administratively locked */ + + u32 pending:1; /* filter action is pending firmware reply */ + u32 smtidx:8; /* Source MAC Table index for smac */ + struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ + + /* The filter itself. Most of this is a straight copy of information + * provided by the extended ioctl(). Some fields are translated to + * internal forms -- for instance the Ingress Queue ID passed in from + * the ioctl() is translated into the Absolute Ingress Queue ID. + */ + struct ch_filter_specification fs; +}; + +#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ + NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) + +/* Macros needed to support the PCI Device ID Table ... + */ +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ + static const struct pci_device_id cxgb4_pci_tbl[] = { +#define CH_PCI_DEVICE_ID_FUNCTION 0x4 + +/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is + * called for both. + */ +#define CH_PCI_DEVICE_ID_FUNCTION2 0x0 + +#define CH_PCI_ID_TABLE_ENTRY(devid) \ + {PCI_VDEVICE(CHELSIO, (devid)), 4} + +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \ + { 0, } \ + } + +#include "t4_pci_id_tbl.h" + +#define FW4_FNAME "/*(DEBLOBBED)*/" +#define FW5_FNAME "/*(DEBLOBBED)*/" +#define FW4_CFNAME "cxgb4/t4-config.txt" +#define FW5_CFNAME "cxgb4/t5-config.txt" + +MODULE_DESCRIPTION(DRV_DESC); +MODULE_AUTHOR("Chelsio Communications"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); +/*(DEBLOBBED)*/ + +/* + * Normally we're willing to become the firmware's Master PF but will be happy + * if another PF has already become the Master and initialized the adapter. + * Setting "force_init" will cause this driver to forcibly establish itself as + * the Master PF and initialize the adapter. + */ +static uint force_init; + +module_param(force_init, uint, 0644); +MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter"); + +/* + * Normally if the firmware we connect to has Configuration File support, we + * use that and only fall back to the old Driver-based initialization if the + * Configuration File fails for some reason. If force_old_init is set, then + * we'll always use the old Driver-based initialization sequence. + */ +static uint force_old_init; + +module_param(force_old_init, uint, 0644); +MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated" + " parameter"); + +static int dflt_msg_enable = DFLT_MSG_ENABLE; + +module_param(dflt_msg_enable, int, 0644); +MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap"); + +/* + * The driver uses the best interrupt scheme available on a platform in the + * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which + * of these schemes the driver may consider as follows: + * + * msi = 2: choose from among all three options + * msi = 1: only consider MSI and INTx interrupts + * msi = 0: force INTx interrupts + */ +static int msi = 2; + +module_param(msi, int, 0644); +MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)"); + +/* + * Queue interrupt hold-off timer values. Queues default to the first of these + * upon creation. + */ +static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 }; + +module_param_array(intr_holdoff, uint, NULL, 0644); +MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers " + "0..4 in microseconds, deprecated parameter"); + +static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 }; + +module_param_array(intr_cnt, uint, NULL, 0644); +MODULE_PARM_DESC(intr_cnt, + "thresholds 1..3 for queue interrupt packet counters, " + "deprecated parameter"); + +/* + * Normally we tell the chip to deliver Ingress Packets into our DMA buffers + * offset by 2 bytes in order to have the IP headers line up on 4-byte + * boundaries. This is a requirement for many architectures which will throw + * a machine check fault if an attempt is made to access one of the 4-byte IP + * header fields on a non-4-byte boundary. And it's a major performance issue + * even on some architectures which allow it like some implementations of the + * x86 ISA. However, some architectures don't mind this and for some very + * edge-case performance sensitive applications (like forwarding large volumes + * of small packets), setting this DMA offset to 0 will decrease the number of + * PCI-E Bus transfers enough to measurably affect performance. + */ +static int rx_dma_offset = 2; + +static bool vf_acls; + +#ifdef CONFIG_PCI_IOV +module_param(vf_acls, bool, 0644); +MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, " + "deprecated parameter"); + +/* Configure the number of PCI-E Virtual Function which are to be instantiated + * on SR-IOV Capable Physical Functions. + */ +static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV]; + +module_param_array(num_vf, uint, NULL, 0644); +MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); +#endif + +/* TX Queue select used to determine what algorithm to use for selecting TX + * queue. Select between the kernel provided function (select_queue=0) or user + * cxgb_select_queue function (select_queue=1) + * + * Default: select_queue=0 + */ +static int select_queue; +module_param(select_queue, int, 0644); +MODULE_PARM_DESC(select_queue, + "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method."); + +static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC; + +module_param(tp_vlan_pri_map, uint, 0644); +MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, " + "deprecated parameter"); + +static struct dentry *cxgb4_debugfs_root; + +static LIST_HEAD(adapter_list); +static DEFINE_MUTEX(uld_mutex); +/* Adapter list to be accessed from atomic context */ +static LIST_HEAD(adap_rcu_list); +static DEFINE_SPINLOCK(adap_rcu_lock); +static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX]; +static const char *uld_str[] = { "RDMA", "iSCSI" }; + +static void link_report(struct net_device *dev) +{ + if (!netif_carrier_ok(dev)) + netdev_info(dev, "link down\n"); + else { + static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" }; + + const char *s = "10Mbps"; + const struct port_info *p = netdev_priv(dev); + + switch (p->link_cfg.speed) { + case 10000: + s = "10Gbps"; + break; + case 1000: + s = "1000Mbps"; + break; + case 100: + s = "100Mbps"; + break; + case 40000: + s = "40Gbps"; + break; + } + + netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, + fc[p->link_cfg.fc]); + } +} + +#ifdef CONFIG_CHELSIO_T4_DCB +/* Set up/tear down Data Center Bridging Priority mapping for a net device. */ +static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; + int i; + + /* We use a simple mapping of Port TX Queue Index to DCB + * Priority when we're enabling DCB. + */ + for (i = 0; i < pi->nqsets; i++, txq++) { + u32 name, value; + int err; + + name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X_V( + FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) | + FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id)); + value = enable ? i : 0xffffffff; + + /* Since we can be called while atomic (from "interrupt + * level") we need to issue the Set Parameters Commannd + * without sleeping (timeout < 0). + */ + err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1, + &name, &value); + + if (err) + dev_err(adap->pdev_dev, + "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n", + enable ? "set" : "unset", pi->port_id, i, -err); + else + txq->dcb_prio = value; + } +} +#endif /* CONFIG_CHELSIO_T4_DCB */ + +void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) +{ + struct net_device *dev = adapter->port[port_id]; + + /* Skip changes from disabled ports. */ + if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) { + if (link_stat) + netif_carrier_on(dev); + else { +#ifdef CONFIG_CHELSIO_T4_DCB + cxgb4_dcb_state_init(dev); + dcb_tx_queue_prio_enable(dev, false); +#endif /* CONFIG_CHELSIO_T4_DCB */ + netif_carrier_off(dev); + } + + link_report(dev); + } +} + +void t4_os_portmod_changed(const struct adapter *adap, int port_id) +{ + static const char *mod_str[] = { + NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" + }; + + const struct net_device *dev = adap->port[port_id]; + const struct port_info *pi = netdev_priv(dev); + + if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) + netdev_info(dev, "port module unplugged\n"); + else if (pi->mod_type < ARRAY_SIZE(mod_str)) + netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); +} + +/* + * Configure the exact and hash address filters to handle a port's multicast + * and secondary unicast MAC addresses. + */ +static int set_addr_filters(const struct net_device *dev, bool sleep) +{ + u64 mhash = 0; + u64 uhash = 0; + bool free = true; + u16 filt_idx[7]; + const u8 *addr[7]; + int ret, naddr = 0; + const struct netdev_hw_addr *ha; + int uc_cnt = netdev_uc_count(dev); + int mc_cnt = netdev_mc_count(dev); + const struct port_info *pi = netdev_priv(dev); + unsigned int mb = pi->adapter->fn; + + /* first do the secondary unicast addresses */ + netdev_for_each_uc_addr(ha, dev) { + addr[naddr++] = ha->addr; + if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { + ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free, + naddr, addr, filt_idx, &uhash, sleep); + if (ret < 0) + return ret; + + free = false; + naddr = 0; + } + } + + /* next set up the multicast addresses */ + netdev_for_each_mc_addr(ha, dev) { + addr[naddr++] = ha->addr; + if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { + ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free, + naddr, addr, filt_idx, &mhash, sleep); + if (ret < 0) + return ret; + + free = false; + naddr = 0; + } + } + + return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0, + uhash | mhash, sleep); +} + +int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ +module_param(dbfifo_int_thresh, int, 0644); +MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold"); + +/* + * usecs to sleep while draining the dbfifo + */ +static int dbfifo_drain_delay = 1000; +module_param(dbfifo_drain_delay, int, 0644); +MODULE_PARM_DESC(dbfifo_drain_delay, + "usecs to sleep while draining the dbfifo"); + +/* + * Set Rx properties of a port, such as promiscruity, address filters, and MTU. + * If @mtu is -1 it is left unchanged. + */ +static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + ret = set_addr_filters(dev, sleep_ok); + if (ret == 0) + ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu, + (dev->flags & IFF_PROMISC) ? 1 : 0, + (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, + sleep_ok); + return ret; +} + +/** + * link_start - enable a port + * @dev: the port to enable + * + * Performs the MAC and PHY actions needed to enable a port. + */ +static int link_start(struct net_device *dev) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + unsigned int mb = pi->adapter->fn; + + /* + * We do not set address filters and promiscuity here, the stack does + * that step explicitly. + */ + ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, + !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); + if (ret == 0) { + ret = t4_change_mac(pi->adapter, mb, pi->viid, + pi->xact_addr_filt, dev->dev_addr, true, + true); + if (ret >= 0) { + pi->xact_addr_filt = ret; + ret = 0; + } + } + if (ret == 0) + ret = t4_link_start(pi->adapter, mb, pi->tx_chan, + &pi->link_cfg); + if (ret == 0) { + local_bh_disable(); + ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true, + true, CXGB4_DCB_ENABLED); + local_bh_enable(); + } + + return ret; +} + +int cxgb4_dcb_enabled(const struct net_device *dev) +{ +#ifdef CONFIG_CHELSIO_T4_DCB + struct port_info *pi = netdev_priv(dev); + + if (!pi->dcb.enabled) + return 0; + + return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) || + (pi->dcb.state == CXGB4_DCB_STATE_HOST)); +#else + return 0; +#endif +} +EXPORT_SYMBOL(cxgb4_dcb_enabled); + +#ifdef CONFIG_CHELSIO_T4_DCB +/* Handle a Data Center Bridging update message from the firmware. */ +static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd) +{ + int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid)); + struct net_device *dev = adap->port[port]; + int old_dcb_enabled = cxgb4_dcb_enabled(dev); + int new_dcb_enabled; + + cxgb4_dcb_handle_fw_update(adap, pcmd); + new_dcb_enabled = cxgb4_dcb_enabled(dev); + + /* If the DCB has become enabled or disabled on the port then we're + * going to need to set up/tear down DCB Priority parameters for the + * TX Queues associated with the port. + */ + if (new_dcb_enabled != old_dcb_enabled) + dcb_tx_queue_prio_enable(dev, new_dcb_enabled); +} +#endif /* CONFIG_CHELSIO_T4_DCB */ + +/* Clear a filter and release any of its resources that we own. This also + * clears the filter's "pending" status. + */ +static void clear_filter(struct adapter *adap, struct filter_entry *f) +{ + /* If the new or old filter have loopback rewriteing rules then we'll + * need to free any existing Layer Two Table (L2T) entries of the old + * filter rule. The firmware will handle freeing up any Source MAC + * Table (SMT) entries used for rewriting Source MAC Addresses in + * loopback rules. + */ + if (f->l2t) + cxgb4_l2t_release(f->l2t); + + /* The zeroing of the filter rule below clears the filter valid, + * pending, locked flags, l2t pointer, etc. so it's all we need for + * this operation. + */ + memset(f, 0, sizeof(*f)); +} + +/* Handle a filter write/deletion reply. + */ +static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl) +{ + unsigned int idx = GET_TID(rpl); + unsigned int nidx = idx - adap->tids.ftid_base; + unsigned int ret; + struct filter_entry *f; + + if (idx >= adap->tids.ftid_base && nidx < + (adap->tids.nftids + adap->tids.nsftids)) { + idx = nidx; + ret = TCB_COOKIE_G(rpl->cookie); + f = &adap->tids.ftid_tab[idx]; + + if (ret == FW_FILTER_WR_FLT_DELETED) { + /* Clear the filter when we get confirmation from the + * hardware that the filter has been deleted. + */ + clear_filter(adap, f); + } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) { + dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n", + idx); + clear_filter(adap, f); + } else if (ret == FW_FILTER_WR_FLT_ADDED) { + f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff; + f->pending = 0; /* asynchronous setup completed */ + f->valid = 1; + } else { + /* Something went wrong. Issue a warning about the + * problem and clear everything out. + */ + dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n", + idx, ret); + clear_filter(adap, f); + } + } +} + +/* Response queue handler for the FW event queue. + */ +static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl) +{ + u8 opcode = ((const struct rss_header *)rsp)->opcode; + + rsp++; /* skip RSS header */ + + /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. + */ + if (unlikely(opcode == CPL_FW4_MSG && + ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) { + rsp++; + opcode = ((const struct rss_header *)rsp)->opcode; + rsp++; + if (opcode != CPL_SGE_EGR_UPDATE) { + dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" + , opcode); + goto out; + } + } + + if (likely(opcode == CPL_SGE_EGR_UPDATE)) { + const struct cpl_sge_egr_update *p = (void *)rsp; + unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid)); + struct sge_txq *txq; + + txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; + txq->restarts++; + if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) { + struct sge_eth_txq *eq; + + eq = container_of(txq, struct sge_eth_txq, q); + netif_tx_wake_queue(eq->txq); + } else { + struct sge_ofld_txq *oq; + + oq = container_of(txq, struct sge_ofld_txq, q); + tasklet_schedule(&oq->qresume_tsk); + } + } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) { + const struct cpl_fw6_msg *p = (void *)rsp; + +#ifdef CONFIG_CHELSIO_T4_DCB + const struct fw_port_cmd *pcmd = (const void *)p->data; + unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid)); + unsigned int action = + FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16)); + + if (cmd == FW_PORT_CMD && + action == FW_PORT_ACTION_GET_PORT_INFO) { + int port = FW_PORT_CMD_PORTID_G( + be32_to_cpu(pcmd->op_to_portid)); + struct net_device *dev = q->adap->port[port]; + int state_input = ((pcmd->u.info.dcbxdis_pkd & + FW_PORT_CMD_DCBXDIS_F) + ? CXGB4_DCB_INPUT_FW_DISABLED + : CXGB4_DCB_INPUT_FW_ENABLED); + + cxgb4_dcb_state_fsm(dev, state_input); + } + + if (cmd == FW_PORT_CMD && + action == FW_PORT_ACTION_L2_DCB_CFG) + dcb_rpl(q->adap, pcmd); + else +#endif + if (p->type == 0) + t4_handle_fw_rpl(q->adap, p->data); + } else if (opcode == CPL_L2T_WRITE_RPL) { + const struct cpl_l2t_write_rpl *p = (void *)rsp; + + do_l2t_write_rpl(q->adap, p); + } else if (opcode == CPL_SET_TCB_RPL) { + const struct cpl_set_tcb_rpl *p = (void *)rsp; + + filter_rpl(q->adap, p); + } else + dev_err(q->adap->pdev_dev, + "unexpected CPL %#x on FW event queue\n", opcode); +out: + return 0; +} + +/** + * uldrx_handler - response queue handler for ULD queues + * @q: the response queue that received the packet + * @rsp: the response queue descriptor holding the offload message + * @gl: the gather list of packet fragments + * + * Deliver an ingress offload packet to a ULD. All processing is done by + * the ULD, we just maintain statistics. + */ +static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl) +{ + struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); + + /* FW can send CPLs encapsulated in a CPL_FW4_MSG. + */ + if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG && + ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL) + rsp += 2; + + if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) { + rxq->stats.nomem++; + return -1; + } + if (gl == NULL) + rxq->stats.imm++; + else if (gl == CXGB4_MSG_AN) + rxq->stats.an++; + else + rxq->stats.pkts++; + return 0; +} + +static void disable_msi(struct adapter *adapter) +{ + if (adapter->flags & USING_MSIX) { + pci_disable_msix(adapter->pdev); + adapter->flags &= ~USING_MSIX; + } else if (adapter->flags & USING_MSI) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~USING_MSI; + } +} + +/* + * Interrupt handler for non-data events used with MSI-X. + */ +static irqreturn_t t4_nondata_intr(int irq, void *cookie) +{ + struct adapter *adap = cookie; + u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A)); + + if (v & PFSW_F) { + adap->swintr = 1; + t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v); + } + if (adap->flags & MASTER_PF) + t4_slow_intr_handler(adap); + return IRQ_HANDLED; +} + +/* + * Name the MSI-X interrupts. + */ +static void name_msix_vecs(struct adapter *adap) +{ + int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc); + + /* non-data interrupts */ + snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name); + + /* FW events */ + snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", + adap->port[0]->name); + + /* Ethernet queues */ + for_each_port(adap, j) { + struct net_device *d = adap->port[j]; + const struct port_info *pi = netdev_priv(d); + + for (i = 0; i < pi->nqsets; i++, msi_idx++) + snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", + d->name, i); + } + + /* offload queues */ + for_each_ofldrxq(&adap->sge, i) + snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d", + adap->port[0]->name, i); + + for_each_rdmarxq(&adap->sge, i) + snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d", + adap->port[0]->name, i); + + for_each_rdmaciq(&adap->sge, i) + snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d", + adap->port[0]->name, i); +} + +static int request_msix_queue_irqs(struct adapter *adap) +{ + struct sge *s = &adap->sge; + int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0; + int msi_index = 2; + + err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, + adap->msix_info[1].desc, &s->fw_evtq); + if (err) + return err; + + for_each_ethrxq(s, ethqidx) { + err = request_irq(adap->msix_info[msi_index].vec, + t4_sge_intr_msix, 0, + adap->msix_info[msi_index].desc, + &s->ethrxq[ethqidx].rspq); + if (err) + goto unwind; + msi_index++; + } + for_each_ofldrxq(s, ofldqidx) { + err = request_irq(adap->msix_info[msi_index].vec, + t4_sge_intr_msix, 0, + adap->msix_info[msi_index].desc, + &s->ofldrxq[ofldqidx].rspq); + if (err) + goto unwind; + msi_index++; + } + for_each_rdmarxq(s, rdmaqidx) { + err = request_irq(adap->msix_info[msi_index].vec, + t4_sge_intr_msix, 0, + adap->msix_info[msi_index].desc, + &s->rdmarxq[rdmaqidx].rspq); + if (err) + goto unwind; + msi_index++; + } + for_each_rdmaciq(s, rdmaciqqidx) { + err = request_irq(adap->msix_info[msi_index].vec, + t4_sge_intr_msix, 0, + adap->msix_info[msi_index].desc, + &s->rdmaciq[rdmaciqqidx].rspq); + if (err) + goto unwind; + msi_index++; + } + return 0; + +unwind: + while (--rdmaciqqidx >= 0) + free_irq(adap->msix_info[--msi_index].vec, + &s->rdmaciq[rdmaciqqidx].rspq); + while (--rdmaqidx >= 0) + free_irq(adap->msix_info[--msi_index].vec, + &s->rdmarxq[rdmaqidx].rspq); + while (--ofldqidx >= 0) + free_irq(adap->msix_info[--msi_index].vec, + &s->ofldrxq[ofldqidx].rspq); + while (--ethqidx >= 0) + free_irq(adap->msix_info[--msi_index].vec, + &s->ethrxq[ethqidx].rspq); + free_irq(adap->msix_info[1].vec, &s->fw_evtq); + return err; +} + +static void free_msix_queue_irqs(struct adapter *adap) +{ + int i, msi_index = 2; + struct sge *s = &adap->sge; + + free_irq(adap->msix_info[1].vec, &s->fw_evtq); + for_each_ethrxq(s, i) + free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq); + for_each_ofldrxq(s, i) + free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq); + for_each_rdmarxq(s, i) + free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); + for_each_rdmaciq(s, i) + free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq); +} + +/** + * cxgb4_write_rss - write the RSS table for a given port + * @pi: the port + * @queues: array of queue indices for RSS + * + * Sets up the portion of the HW RSS table for the port's VI to distribute + * packets to the Rx queues in @queues. + */ +int cxgb4_write_rss(const struct port_info *pi, const u16 *queues) +{ + u16 *rss; + int i, err; + const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset]; + + rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL); + if (!rss) + return -ENOMEM; + + /* map the queue indices to queue ids */ + for (i = 0; i < pi->rss_size; i++, queues++) + rss[i] = q[*queues].rspq.abs_id; + + err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0, + pi->rss_size, rss, pi->rss_size); + kfree(rss); + return err; +} + +/** + * setup_rss - configure RSS + * @adap: the adapter + * + * Sets up RSS for each port. + */ +static int setup_rss(struct adapter *adap) +{ + int i, err; + + for_each_port(adap, i) { + const struct port_info *pi = adap2pinfo(adap, i); + + err = cxgb4_write_rss(pi, pi->rss); + if (err) + return err; + } + return 0; +} + +/* + * Return the channel of the ingress queue with the given qid. + */ +static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid) +{ + qid -= p->ingr_start; + return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan; +} + +/* + * Wait until all NAPI handlers are descheduled. + */ +static void quiesce_rx(struct adapter *adap) +{ + int i; + + for (i = 0; i < adap->sge.ingr_sz; i++) { + struct sge_rspq *q = adap->sge.ingr_map[i]; + + if (q && q->handler) { + napi_disable(&q->napi); + local_bh_disable(); + while (!cxgb_poll_lock_napi(q)) + mdelay(1); + local_bh_enable(); + } + + } +} + +/* Disable interrupt and napi handler */ +static void disable_interrupts(struct adapter *adap) +{ + if (adap->flags & FULL_INIT_DONE) { + t4_intr_disable(adap); + if (adap->flags & USING_MSIX) { + free_msix_queue_irqs(adap); + free_irq(adap->msix_info[0].vec, adap); + } else { + free_irq(adap->pdev->irq, adap); + } + quiesce_rx(adap); + } +} + +/* + * Enable NAPI scheduling and interrupt generation for all Rx queues. + */ +static void enable_rx(struct adapter *adap) +{ + int i; + + for (i = 0; i < adap->sge.ingr_sz; i++) { + struct sge_rspq *q = adap->sge.ingr_map[i]; + + if (!q) + continue; + if (q->handler) { + cxgb_busy_poll_init_lock(q); + napi_enable(&q->napi); + } + /* 0-increment GTS to start the timer and enable interrupts */ + t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), + SEINTARM_V(q->intr_params) | + INGRESSQID_V(q->cntxt_id)); + } +} + +static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q, + unsigned int nq, unsigned int per_chan, int msi_idx, + u16 *ids) +{ + int i, err; + + for (i = 0; i < nq; i++, q++) { + if (msi_idx > 0) + msi_idx++; + err = t4_sge_alloc_rxq(adap, &q->rspq, false, + adap->port[i / per_chan], + msi_idx, q->fl.size ? &q->fl : NULL, + uldrx_handler); + if (err) + return err; + memset(&q->stats, 0, sizeof(q->stats)); + if (ids) + ids[i] = q->rspq.abs_id; + } + return 0; +} + +/** + * setup_sge_queues - configure SGE Tx/Rx/response queues + * @adap: the adapter + * + * Determines how many sets of SGE queues to use and initializes them. + * We support multiple queue sets per port if we have MSI-X, otherwise + * just one queue set per port. + */ +static int setup_sge_queues(struct adapter *adap) +{ + int err, msi_idx, i, j; + struct sge *s = &adap->sge; + + bitmap_zero(s->starving_fl, s->egr_sz); + bitmap_zero(s->txq_maperr, s->egr_sz); + + if (adap->flags & USING_MSIX) + msi_idx = 1; /* vector 0 is for non-queue interrupts */ + else { + err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, + NULL, NULL); + if (err) + return err; + msi_idx = -((int)s->intrq.abs_id + 1); + } + + /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here, + * don't forget to update the following which need to be + * synchronized to and changes here. + * + * 1. The calculations of MAX_INGQ in cxgb4.h. + * + * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs + * to accommodate any new/deleted Ingress Queues + * which need MSI-X Vectors. + * + * 3. Update sge_qinfo_show() to include information on the + * new/deleted queues. + */ + err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], + msi_idx, NULL, fwevtq_handler); + if (err) { +freeout: t4_free_sge_resources(adap); + return err; + } + + for_each_port(adap, i) { + struct net_device *dev = adap->port[i]; + struct port_info *pi = netdev_priv(dev); + struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset]; + struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; + + for (j = 0; j < pi->nqsets; j++, q++) { + if (msi_idx > 0) + msi_idx++; + err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, + msi_idx, &q->fl, + t4_ethrx_handler); + if (err) + goto freeout; + q->rspq.idx = j; + memset(&q->stats, 0, sizeof(q->stats)); + } + for (j = 0; j < pi->nqsets; j++, t++) { + err = t4_sge_alloc_eth_txq(adap, t, dev, + netdev_get_tx_queue(dev, j), + s->fw_evtq.cntxt_id); + if (err) + goto freeout; + } + } + + j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */ + for_each_ofldrxq(s, i) { + err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], + adap->port[i / j], + s->fw_evtq.cntxt_id); + if (err) + goto freeout; + } + +#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids) do { \ + err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \ + if (err) \ + goto freeout; \ + if (msi_idx > 0) \ + msi_idx += nq; \ +} while (0) + + ALLOC_OFLD_RXQS(s->ofldrxq, s->ofldqsets, j, s->ofld_rxq); + ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq); + j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */ + ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq); + +#undef ALLOC_OFLD_RXQS + + for_each_port(adap, i) { + /* + * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't + * have RDMA queues, and that's the right value. + */ + err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], + s->fw_evtq.cntxt_id, + s->rdmarxq[i].rspq.cntxt_id); + if (err) + goto freeout; + } + + t4_write_reg(adap, is_t4(adap->params.chip) ? + MPS_TRC_RSS_CONTROL_A : + MPS_T5_TRC_RSS_CONTROL_A, + RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) | + QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id)); + return 0; +} + +/* + * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. + * The allocated memory is cleared. + */ +void *t4_alloc_mem(size_t size) +{ + void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); + + if (!p) + p = vzalloc(size); + return p; +} + +/* + * Free memory allocated through alloc_mem(). + */ +void t4_free_mem(void *addr) +{ + if (is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} + +/* Send a Work Request to write the filter at a specified index. We construct + * a Firmware Filter Work Request to have the work done and put the indicated + * filter into "pending" mode which will prevent any further actions against + * it till we get a reply from the firmware on the completion status of the + * request. + */ +static int set_filter_wr(struct adapter *adapter, int fidx) +{ + struct filter_entry *f = &adapter->tids.ftid_tab[fidx]; + struct sk_buff *skb; + struct fw_filter_wr *fwr; + unsigned int ftid; + + skb = alloc_skb(sizeof(*fwr), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + /* If the new filter requires loopback Destination MAC and/or VLAN + * rewriting then we need to allocate a Layer 2 Table (L2T) entry for + * the filter. + */ + if (f->fs.newdmac || f->fs.newvlan) { + /* allocate L2T entry for new filter */ + f->l2t = t4_l2t_alloc_switching(adapter->l2t); + if (f->l2t == NULL) { + kfree_skb(skb); + return -EAGAIN; + } + if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan, + f->fs.eport, f->fs.dmac)) { + cxgb4_l2t_release(f->l2t); + f->l2t = NULL; + kfree_skb(skb); + return -ENOMEM; + } + } + + ftid = adapter->tids.ftid_base + fidx; + + fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr)); + memset(fwr, 0, sizeof(*fwr)); + + /* It would be nice to put most of the following in t4_hw.c but most + * of the work is translating the cxgbtool ch_filter_specification + * into the Work Request and the definition of that structure is + * currently in cxgbtool.h which isn't appropriate to pull into the + * common code. We may eventually try to come up with a more neutral + * filter specification structure but for now it's easiest to simply + * put this fairly direct code in line ... + */ + fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR)); + fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16)); + fwr->tid_to_iq = + htonl(FW_FILTER_WR_TID_V(ftid) | + FW_FILTER_WR_RQTYPE_V(f->fs.type) | + FW_FILTER_WR_NOREPLY_V(0) | + FW_FILTER_WR_IQ_V(f->fs.iq)); + fwr->del_filter_to_l2tix = + htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) | + FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) | + FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) | + FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) | + FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) | + FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) | + FW_FILTER_WR_DMAC_V(f->fs.newdmac) | + FW_FILTER_WR_SMAC_V(f->fs.newsmac) | + FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT || + f->fs.newvlan == VLAN_REWRITE) | + FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE || + f->fs.newvlan == VLAN_REWRITE) | + FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) | + FW_FILTER_WR_TXCHAN_V(f->fs.eport) | + FW_FILTER_WR_PRIO_V(f->fs.prio) | + FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0)); + fwr->ethtype = htons(f->fs.val.ethtype); + fwr->ethtypem = htons(f->fs.mask.ethtype); + fwr->frag_to_ovlan_vldm = + (FW_FILTER_WR_FRAG_V(f->fs.val.frag) | + FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) | + FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) | + FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) | + FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) | + FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld)); + fwr->smac_sel = 0; + fwr->rx_chan_rx_rpl_iq = + htons(FW_FILTER_WR_RX_CHAN_V(0) | + FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id)); + fwr->maci_to_matchtypem = + htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) | + FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) | + FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) | + FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) | + FW_FILTER_WR_PORT_V(f->fs.val.iport) | + FW_FILTER_WR_PORTM_V(f->fs.mask.iport) | + FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) | + FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype)); + fwr->ptcl = f->fs.val.proto; + fwr->ptclm = f->fs.mask.proto; + fwr->ttyp = f->fs.val.tos; + fwr->ttypm = f->fs.mask.tos; + fwr->ivlan = htons(f->fs.val.ivlan); + fwr->ivlanm = htons(f->fs.mask.ivlan); + fwr->ovlan = htons(f->fs.val.ovlan); + fwr->ovlanm = htons(f->fs.mask.ovlan); + memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip)); + memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm)); + memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip)); + memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm)); + fwr->lp = htons(f->fs.val.lport); + fwr->lpm = htons(f->fs.mask.lport); + fwr->fp = htons(f->fs.val.fport); + fwr->fpm = htons(f->fs.mask.fport); + if (f->fs.newsmac) + memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma)); + + /* Mark the filter as "pending" and ship off the Filter Work Request. + * When we get the Work Request Reply we'll clear the pending status. + */ + f->pending = 1; + set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3); + t4_ofld_send(adapter, skb); + return 0; +} + +/* Delete the filter at a specified index. + */ +static int del_filter_wr(struct adapter *adapter, int fidx) +{ + struct filter_entry *f = &adapter->tids.ftid_tab[fidx]; + struct sk_buff *skb; + struct fw_filter_wr *fwr; + unsigned int len, ftid; + + len = sizeof(*fwr); + ftid = adapter->tids.ftid_base + fidx; + + skb = alloc_skb(len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + fwr = (struct fw_filter_wr *)__skb_put(skb, len); + t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id); + + /* Mark the filter as "pending" and ship off the Filter Work Request. + * When we get the Work Request Reply we'll clear the pending status. + */ + f->pending = 1; + t4_mgmt_tx(adapter, skb); + return 0; +} + +static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + int txq; + +#ifdef CONFIG_CHELSIO_T4_DCB + /* If a Data Center Bridging has been successfully negotiated on this + * link then we'll use the skb's priority to map it to a TX Queue. + * The skb's priority is determined via the VLAN Tag Priority Code + * Point field. + */ + if (cxgb4_dcb_enabled(dev)) { + u16 vlan_tci; + int err; + + err = vlan_get_tag(skb, &vlan_tci); + if (unlikely(err)) { + if (net_ratelimit()) + netdev_warn(dev, + "TX Packet without VLAN Tag on DCB Link\n"); + txq = 0; + } else { + txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; +#ifdef CONFIG_CHELSIO_T4_FCOE + if (skb->protocol == htons(ETH_P_FCOE)) + txq = skb->priority & 0x7; +#endif /* CONFIG_CHELSIO_T4_FCOE */ + } + return txq; + } +#endif /* CONFIG_CHELSIO_T4_DCB */ + + if (select_queue) { + txq = (skb_rx_queue_recorded(skb) + ? skb_get_rx_queue(skb) + : smp_processor_id()); + + while (unlikely(txq >= dev->real_num_tx_queues)) + txq -= dev->real_num_tx_queues; + + return txq; + } + + return fallback(dev, skb) % dev->real_num_tx_queues; +} + +static inline int is_offload(const struct adapter *adap) +{ + return adap->params.offload; +} + +static int closest_timer(const struct sge *s, int time) +{ + int i, delta, match = 0, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { + delta = time - s->timer_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + match = i; + } + } + return match; +} + +static int closest_thres(const struct sge *s, int thres) +{ + int i, delta, match = 0, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { + delta = thres - s->counter_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + match = i; + } + } + return match; +} + +/** + * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters + * @q: the Rx queue + * @us: the hold-off time in us, or 0 to disable timer + * @cnt: the hold-off packet count, or 0 to disable counter + * + * Sets an Rx queue's interrupt hold-off time and packet count. At least + * one of the two needs to be enabled for the queue to generate interrupts. + */ +int cxgb4_set_rspq_intr_params(struct sge_rspq *q, + unsigned int us, unsigned int cnt) +{ + struct adapter *adap = q->adap; + + if ((us | cnt) == 0) + cnt = 1; + + if (cnt) { + int err; + u32 v, new_idx; + + new_idx = closest_thres(&adap->sge, cnt); + if (q->desc && q->pktcnt_idx != new_idx) { + /* the queue has already been created, update it */ + v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X_V( + FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | + FW_PARAMS_PARAM_YZ_V(q->cntxt_id); + err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v, + &new_idx); + if (err) + return err; + } + q->pktcnt_idx = new_idx; + } + + us = us == 0 ? 6 : closest_timer(&adap->sge, us); + q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0); + return 0; +} + +static int cxgb_set_features(struct net_device *dev, netdev_features_t features) +{ + const struct port_info *pi = netdev_priv(dev); + netdev_features_t changed = dev->features ^ features; + int err; + + if (!(changed & NETIF_F_HW_VLAN_CTAG_RX)) + return 0; + + err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, + -1, -1, -1, + !!(features & NETIF_F_HW_VLAN_CTAG_RX), true); + if (unlikely(err)) + dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX; + return err; +} + +static int setup_debugfs(struct adapter *adap) +{ + if (IS_ERR_OR_NULL(adap->debugfs_root)) + return -1; + +#ifdef CONFIG_DEBUG_FS + t4_setup_debugfs(adap); +#endif + return 0; +} + +/* + * upper-layer driver support + */ + +/* + * Allocate an active-open TID and set it to the supplied value. + */ +int cxgb4_alloc_atid(struct tid_info *t, void *data) +{ + int atid = -1; + + spin_lock_bh(&t->atid_lock); + if (t->afree) { + union aopen_entry *p = t->afree; + + atid = (p - t->atid_tab) + t->atid_base; + t->afree = p->next; + p->data = data; + t->atids_in_use++; + } + spin_unlock_bh(&t->atid_lock); + return atid; +} +EXPORT_SYMBOL(cxgb4_alloc_atid); + +/* + * Release an active-open TID. + */ +void cxgb4_free_atid(struct tid_info *t, unsigned int atid) +{ + union aopen_entry *p = &t->atid_tab[atid - t->atid_base]; + + spin_lock_bh(&t->atid_lock); + p->next = t->afree; + t->afree = p; + t->atids_in_use--; + spin_unlock_bh(&t->atid_lock); +} +EXPORT_SYMBOL(cxgb4_free_atid); + +/* + * Allocate a server TID and set it to the supplied value. + */ +int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) +{ + int stid; + + spin_lock_bh(&t->stid_lock); + if (family == PF_INET) { + stid = find_first_zero_bit(t->stid_bmap, t->nstids); + if (stid < t->nstids) + __set_bit(stid, t->stid_bmap); + else + stid = -1; + } else { + stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2); + if (stid < 0) + stid = -1; + } + if (stid >= 0) { + t->stid_tab[stid].data = data; + stid += t->stid_base; + /* IPv6 requires max of 520 bits or 16 cells in TCAM + * This is equivalent to 4 TIDs. With CLIP enabled it + * needs 2 TIDs. + */ + if (family == PF_INET) + t->stids_in_use++; + else + t->stids_in_use += 4; + } + spin_unlock_bh(&t->stid_lock); + return stid; +} +EXPORT_SYMBOL(cxgb4_alloc_stid); + +/* Allocate a server filter TID and set it to the supplied value. + */ +int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data) +{ + int stid; + + spin_lock_bh(&t->stid_lock); + if (family == PF_INET) { + stid = find_next_zero_bit(t->stid_bmap, + t->nstids + t->nsftids, t->nstids); + if (stid < (t->nstids + t->nsftids)) + __set_bit(stid, t->stid_bmap); + else + stid = -1; + } else { + stid = -1; + } + if (stid >= 0) { + t->stid_tab[stid].data = data; + stid -= t->nstids; + stid += t->sftid_base; + t->stids_in_use++; + } + spin_unlock_bh(&t->stid_lock); + return stid; +} +EXPORT_SYMBOL(cxgb4_alloc_sftid); + +/* Release a server TID. + */ +void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) +{ + /* Is it a server filter TID? */ + if (t->nsftids && (stid >= t->sftid_base)) { + stid -= t->sftid_base; + stid += t->nstids; + } else { + stid -= t->stid_base; + } + + spin_lock_bh(&t->stid_lock); + if (family == PF_INET) + __clear_bit(stid, t->stid_bmap); + else + bitmap_release_region(t->stid_bmap, stid, 2); + t->stid_tab[stid].data = NULL; + if (family == PF_INET) + t->stids_in_use--; + else + t->stids_in_use -= 4; + spin_unlock_bh(&t->stid_lock); +} +EXPORT_SYMBOL(cxgb4_free_stid); + +/* + * Populate a TID_RELEASE WR. Caller must properly size the skb. + */ +static void mk_tid_release(struct sk_buff *skb, unsigned int chan, + unsigned int tid) +{ + struct cpl_tid_release *req; + + set_wr_txq(skb, CPL_PRIORITY_SETUP, chan); + req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, tid); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); +} + +/* + * Queue a TID release request and if necessary schedule a work queue to + * process it. + */ +static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, + unsigned int tid) +{ + void **p = &t->tid_tab[tid]; + struct adapter *adap = container_of(t, struct adapter, tids); + + spin_lock_bh(&adap->tid_release_lock); + *p = adap->tid_release_head; + /* Low 2 bits encode the Tx channel number */ + adap->tid_release_head = (void **)((uintptr_t)p | chan); + if (!adap->tid_release_task_busy) { + adap->tid_release_task_busy = true; + queue_work(adap->workq, &adap->tid_release_task); + } + spin_unlock_bh(&adap->tid_release_lock); +} + +/* + * Process the list of pending TID release requests. + */ +static void process_tid_release_list(struct work_struct *work) +{ + struct sk_buff *skb; + struct adapter *adap; + + adap = container_of(work, struct adapter, tid_release_task); + + spin_lock_bh(&adap->tid_release_lock); + while (adap->tid_release_head) { + void **p = adap->tid_release_head; + unsigned int chan = (uintptr_t)p & 3; + p = (void *)p - chan; + + adap->tid_release_head = *p; + *p = NULL; + spin_unlock_bh(&adap->tid_release_lock); + + while (!(skb = alloc_skb(sizeof(struct cpl_tid_release), + GFP_KERNEL))) + schedule_timeout_uninterruptible(1); + + mk_tid_release(skb, chan, p - adap->tids.tid_tab); + t4_ofld_send(adap, skb); + spin_lock_bh(&adap->tid_release_lock); + } + adap->tid_release_task_busy = false; + spin_unlock_bh(&adap->tid_release_lock); +} + +/* + * Release a TID and inform HW. If we are unable to allocate the release + * message we defer to a work queue. + */ +void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid) +{ + void *old; + struct sk_buff *skb; + struct adapter *adap = container_of(t, struct adapter, tids); + + old = t->tid_tab[tid]; + skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); + if (likely(skb)) { + t->tid_tab[tid] = NULL; + mk_tid_release(skb, chan, tid); + t4_ofld_send(adap, skb); + } else + cxgb4_queue_tid_release(t, chan, tid); + if (old) + atomic_dec(&t->tids_in_use); +} +EXPORT_SYMBOL(cxgb4_remove_tid); + +/* + * Allocate and initialize the TID tables. Returns 0 on success. + */ +static int tid_init(struct tid_info *t) +{ + size_t size; + unsigned int stid_bmap_size; + unsigned int natids = t->natids; + struct adapter *adap = container_of(t, struct adapter, tids); + + stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); + size = t->ntids * sizeof(*t->tid_tab) + + natids * sizeof(*t->atid_tab) + + t->nstids * sizeof(*t->stid_tab) + + t->nsftids * sizeof(*t->stid_tab) + + stid_bmap_size * sizeof(long) + + t->nftids * sizeof(*t->ftid_tab) + + t->nsftids * sizeof(*t->ftid_tab); + + t->tid_tab = t4_alloc_mem(size); + if (!t->tid_tab) + return -ENOMEM; + + t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; + t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; + t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids]; + t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size]; + spin_lock_init(&t->stid_lock); + spin_lock_init(&t->atid_lock); + + t->stids_in_use = 0; + t->afree = NULL; + t->atids_in_use = 0; + atomic_set(&t->tids_in_use, 0); + + /* Setup the free list for atid_tab and clear the stid bitmap. */ + if (natids) { + while (--natids) + t->atid_tab[natids - 1].next = &t->atid_tab[natids]; + t->afree = t->atid_tab; + } + bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); + /* Reserve stid 0 for T4/T5 adapters */ + if (!t->stid_base && + (is_t4(adap->params.chip) || is_t5(adap->params.chip))) + __set_bit(0, t->stid_bmap); + + return 0; +} + +/** + * cxgb4_create_server - create an IP server + * @dev: the device + * @stid: the server TID + * @sip: local IP address to bind server to + * @sport: the server's TCP port + * @queue: queue to direct messages from this server to + * + * Create an IP server for the given port and address. + * Returns <0 on error and one of the %NET_XMIT_* values on success. + */ +int cxgb4_create_server(const struct net_device *dev, unsigned int stid, + __be32 sip, __be16 sport, __be16 vlan, + unsigned int queue) +{ + unsigned int chan; + struct sk_buff *skb; + struct adapter *adap; + struct cpl_pass_open_req *req; + int ret; + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + adap = netdev2adap(dev); + req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, 0); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid)); + req->local_port = sport; + req->peer_port = htons(0); + req->local_ip = sip; + req->peer_ip = htonl(0); + chan = rxq_to_chan(&adap->sge, queue); + req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); + req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) | + SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue)); + ret = t4_mgmt_tx(adap, skb); + return net_xmit_eval(ret); +} +EXPORT_SYMBOL(cxgb4_create_server); + +/* cxgb4_create_server6 - create an IPv6 server + * @dev: the device + * @stid: the server TID + * @sip: local IPv6 address to bind server to + * @sport: the server's TCP port + * @queue: queue to direct messages from this server to + * + * Create an IPv6 server for the given port and address. + * Returns <0 on error and one of the %NET_XMIT_* values on success. + */ +int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, + const struct in6_addr *sip, __be16 sport, + unsigned int queue) +{ + unsigned int chan; + struct sk_buff *skb; + struct adapter *adap; + struct cpl_pass_open_req6 *req; + int ret; + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + adap = netdev2adap(dev); + req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, 0); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid)); + req->local_port = sport; + req->peer_port = htons(0); + req->local_ip_hi = *(__be64 *)(sip->s6_addr); + req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); + req->peer_ip_hi = cpu_to_be64(0); + req->peer_ip_lo = cpu_to_be64(0); + chan = rxq_to_chan(&adap->sge, queue); + req->opt0 = cpu_to_be64(TX_CHAN_V(chan)); + req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) | + SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue)); + ret = t4_mgmt_tx(adap, skb); + return net_xmit_eval(ret); +} +EXPORT_SYMBOL(cxgb4_create_server6); + +int cxgb4_remove_server(const struct net_device *dev, unsigned int stid, + unsigned int queue, bool ipv6) +{ + struct sk_buff *skb; + struct adapter *adap; + struct cpl_close_listsvr_req *req; + int ret; + + adap = netdev2adap(dev); + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, 0); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid)); + req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) : + LISTSVR_IPV6_V(0)) | QUEUENO_V(queue)); + ret = t4_mgmt_tx(adap, skb); + return net_xmit_eval(ret); +} +EXPORT_SYMBOL(cxgb4_remove_server); + +/** + * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU + * @mtus: the HW MTU table + * @mtu: the target MTU + * @idx: index of selected entry in the MTU table + * + * Returns the index and the value in the HW MTU table that is closest to + * but does not exceed @mtu, unless @mtu is smaller than any value in the + * table, in which case that smallest available value is selected. + */ +unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, + unsigned int *idx) +{ + unsigned int i = 0; + + while (i < NMTUS - 1 && mtus[i + 1] <= mtu) + ++i; + if (idx) + *idx = i; + return mtus[i]; +} +EXPORT_SYMBOL(cxgb4_best_mtu); + +/** + * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned + * @mtus: the HW MTU table + * @header_size: Header Size + * @data_size_max: maximum Data Segment Size + * @data_size_align: desired Data Segment Size Alignment (2^N) + * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL) + * + * Similar to cxgb4_best_mtu() but instead of searching the Hardware + * MTU Table based solely on a Maximum MTU parameter, we break that + * parameter up into a Header Size and Maximum Data Segment Size, and + * provide a desired Data Segment Size Alignment. If we find an MTU in + * the Hardware MTU Table which will result in a Data Segment Size with + * the requested alignment _and_ that MTU isn't "too far" from the + * closest MTU, then we'll return that rather than the closest MTU. + */ +unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus, + unsigned short header_size, + unsigned short data_size_max, + unsigned short data_size_align, + unsigned int *mtu_idxp) +{ + unsigned short max_mtu = header_size + data_size_max; + unsigned short data_size_align_mask = data_size_align - 1; + int mtu_idx, aligned_mtu_idx; + + /* Scan the MTU Table till we find an MTU which is larger than our + * Maximum MTU or we reach the end of the table. Along the way, + * record the last MTU found, if any, which will result in a Data + * Segment Length matching the requested alignment. + */ + for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) { + unsigned short data_size = mtus[mtu_idx] - header_size; + + /* If this MTU minus the Header Size would result in a + * Data Segment Size of the desired alignment, remember it. + */ + if ((data_size & data_size_align_mask) == 0) + aligned_mtu_idx = mtu_idx; + + /* If we're not at the end of the Hardware MTU Table and the + * next element is larger than our Maximum MTU, drop out of + * the loop. + */ + if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu) + break; + } + + /* If we fell out of the loop because we ran to the end of the table, + * then we just have to use the last [largest] entry. + */ + if (mtu_idx == NMTUS) + mtu_idx--; + + /* If we found an MTU which resulted in the requested Data Segment + * Length alignment and that's "not far" from the largest MTU which is + * less than or equal to the maximum MTU, then use that. + */ + if (aligned_mtu_idx >= 0 && + mtu_idx - aligned_mtu_idx <= 1) + mtu_idx = aligned_mtu_idx; + + /* If the caller has passed in an MTU Index pointer, pass the + * MTU Index back. Return the MTU value. + */ + if (mtu_idxp) + *mtu_idxp = mtu_idx; + return mtus[mtu_idx]; +} +EXPORT_SYMBOL(cxgb4_best_aligned_mtu); + +/** + * cxgb4_port_chan - get the HW channel of a port + * @dev: the net device for the port + * + * Return the HW Tx channel of the given port. + */ +unsigned int cxgb4_port_chan(const struct net_device *dev) +{ + return netdev2pinfo(dev)->tx_chan; +} +EXPORT_SYMBOL(cxgb4_port_chan); + +unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo) +{ + struct adapter *adap = netdev2adap(dev); + u32 v1, v2, lp_count, hp_count; + + v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A); + v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A); + if (is_t4(adap->params.chip)) { + lp_count = LP_COUNT_G(v1); + hp_count = HP_COUNT_G(v1); + } else { + lp_count = LP_COUNT_T5_G(v1); + hp_count = HP_COUNT_T5_G(v2); + } + return lpfifo ? lp_count : hp_count; +} +EXPORT_SYMBOL(cxgb4_dbfifo_count); + +/** + * cxgb4_port_viid - get the VI id of a port + * @dev: the net device for the port + * + * Return the VI id of the given port. + */ +unsigned int cxgb4_port_viid(const struct net_device *dev) +{ + return netdev2pinfo(dev)->viid; +} +EXPORT_SYMBOL(cxgb4_port_viid); + +/** + * cxgb4_port_idx - get the index of a port + * @dev: the net device for the port + * + * Return the index of the given port. + */ +unsigned int cxgb4_port_idx(const struct net_device *dev) +{ + return netdev2pinfo(dev)->port_id; +} +EXPORT_SYMBOL(cxgb4_port_idx); + +void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, + struct tp_tcp_stats *v6) +{ + struct adapter *adap = pci_get_drvdata(pdev); + + spin_lock(&adap->stats_lock); + t4_tp_get_tcp_stats(adap, v4, v6); + spin_unlock(&adap->stats_lock); +} +EXPORT_SYMBOL(cxgb4_get_tcp_stats); + +void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, + const unsigned int *pgsz_order) +{ + struct adapter *adap = netdev2adap(dev); + + t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask); + t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) | + HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) | + HPZ3_V(pgsz_order[3])); +} +EXPORT_SYMBOL(cxgb4_iscsi_init); + +int cxgb4_flush_eq_cache(struct net_device *dev) +{ + struct adapter *adap = netdev2adap(dev); + int ret; + + ret = t4_fwaddrspace_write(adap, adap->mbox, + 0xe1000000 + SGE_CTXT_CMD_A, 0x20000000); + return ret; +} +EXPORT_SYMBOL(cxgb4_flush_eq_cache); + +static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx) +{ + u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8; + __be64 indices; + int ret; + + spin_lock(&adap->win0_lock); + ret = t4_memory_rw(adap, 0, MEM_EDC0, addr, + sizeof(indices), (__be32 *)&indices, + T4_MEMORY_READ); + spin_unlock(&adap->win0_lock); + if (!ret) { + *cidx = (be64_to_cpu(indices) >> 25) & 0xffff; + *pidx = (be64_to_cpu(indices) >> 9) & 0xffff; + } + return ret; +} + +int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, + u16 size) +{ + struct adapter *adap = netdev2adap(dev); + u16 hw_pidx, hw_cidx; + int ret; + + ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx); + if (ret) + goto out; + + if (pidx != hw_pidx) { + u16 delta; + u32 val; + + if (pidx >= hw_pidx) + delta = pidx - hw_pidx; + else + delta = size - hw_pidx + pidx; + + if (is_t4(adap->params.chip)) + val = PIDX_V(delta); + else + val = PIDX_T5_V(delta); + wmb(); + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), + QID_V(qid) | val); + } +out: + return ret; +} +EXPORT_SYMBOL(cxgb4_sync_txq_pidx); + +void cxgb4_disable_db_coalescing(struct net_device *dev) +{ + struct adapter *adap; + + adap = netdev2adap(dev); + t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, + NOCOALESCE_F); +} +EXPORT_SYMBOL(cxgb4_disable_db_coalescing); + +void cxgb4_enable_db_coalescing(struct net_device *dev) +{ + struct adapter *adap; + + adap = netdev2adap(dev); + t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0); +} +EXPORT_SYMBOL(cxgb4_enable_db_coalescing); + +int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) +{ + struct adapter *adap; + u32 offset, memtype, memaddr; + u32 edc0_size, edc1_size, mc0_size, mc1_size, size; + u32 edc0_end, edc1_end, mc0_end, mc1_end; + int ret; + + adap = netdev2adap(dev); + + offset = ((stag >> 8) * 32) + adap->vres.stag.start; + + /* Figure out where the offset lands in the Memory Type/Address scheme. + * This code assumes that the memory is laid out starting at offset 0 + * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0 + * and EDC1. Some cards will have neither MC0 nor MC1, most cards have + * MC0, and some have both MC0 and MC1. + */ + size = t4_read_reg(adap, MA_EDRAM0_BAR_A); + edc0_size = EDRAM0_SIZE_G(size) << 20; + size = t4_read_reg(adap, MA_EDRAM1_BAR_A); + edc1_size = EDRAM1_SIZE_G(size) << 20; + size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); + mc0_size = EXT_MEM0_SIZE_G(size) << 20; + + edc0_end = edc0_size; + edc1_end = edc0_end + edc1_size; + mc0_end = edc1_end + mc0_size; + + if (offset < edc0_end) { + memtype = MEM_EDC0; + memaddr = offset; + } else if (offset < edc1_end) { + memtype = MEM_EDC1; + memaddr = offset - edc0_end; + } else { + if (offset < mc0_end) { + memtype = MEM_MC0; + memaddr = offset - edc1_end; + } else if (is_t4(adap->params.chip)) { + /* T4 only has a single memory channel */ + goto err; + } else { + size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); + mc1_size = EXT_MEM1_SIZE_G(size) << 20; + mc1_end = mc0_end + mc1_size; + if (offset < mc1_end) { + memtype = MEM_MC1; + memaddr = offset - mc0_end; + } else { + /* offset beyond the end of any memory */ + goto err; + } + } + } + + spin_lock(&adap->win0_lock); + ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ); + spin_unlock(&adap->win0_lock); + return ret; + +err: + dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n", + stag, offset); + return -EINVAL; +} +EXPORT_SYMBOL(cxgb4_read_tpte); + +u64 cxgb4_read_sge_timestamp(struct net_device *dev) +{ + u32 hi, lo; + struct adapter *adap; + + adap = netdev2adap(dev); + lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A); + hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A)); + + return ((u64)hi << 32) | (u64)lo; +} +EXPORT_SYMBOL(cxgb4_read_sge_timestamp); + +int cxgb4_bar2_sge_qregs(struct net_device *dev, + unsigned int qid, + enum cxgb4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid) +{ + return cxgb4_t4_bar2_sge_qregs(netdev2adap(dev), + qid, + (qtype == CXGB4_BAR2_QTYPE_EGRESS + ? T4_BAR2_QTYPE_EGRESS + : T4_BAR2_QTYPE_INGRESS), + pbar2_qoffset, + pbar2_qid); +} +EXPORT_SYMBOL(cxgb4_bar2_sge_qregs); + +static struct pci_driver cxgb4_driver; + +static void check_neigh_update(struct neighbour *neigh) +{ + const struct device *parent; + const struct net_device *netdev = neigh->dev; + + if (netdev->priv_flags & IFF_802_1Q_VLAN) + netdev = vlan_dev_real_dev(netdev); + parent = netdev->dev.parent; + if (parent && parent->driver == &cxgb4_driver.driver) + t4_l2t_update(dev_get_drvdata(parent), neigh); +} + +static int netevent_cb(struct notifier_block *nb, unsigned long event, + void *data) +{ + switch (event) { + case NETEVENT_NEIGH_UPDATE: + check_neigh_update(data); + break; + case NETEVENT_REDIRECT: + default: + break; + } + return 0; +} + +static bool netevent_registered; +static struct notifier_block cxgb4_netevent_nb = { + .notifier_call = netevent_cb +}; + +static void drain_db_fifo(struct adapter *adap, int usecs) +{ + u32 v1, v2, lp_count, hp_count; + + do { + v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A); + v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A); + if (is_t4(adap->params.chip)) { + lp_count = LP_COUNT_G(v1); + hp_count = HP_COUNT_G(v1); + } else { + lp_count = LP_COUNT_T5_G(v1); + hp_count = HP_COUNT_T5_G(v2); + } + + if (lp_count == 0 && hp_count == 0) + break; + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(usecs_to_jiffies(usecs)); + } while (1); +} + +static void disable_txq_db(struct sge_txq *q) +{ + unsigned long flags; + + spin_lock_irqsave(&q->db_lock, flags); + q->db_disabled = 1; + spin_unlock_irqrestore(&q->db_lock, flags); +} + +static void enable_txq_db(struct adapter *adap, struct sge_txq *q) +{ + spin_lock_irq(&q->db_lock); + if (q->db_pidx_inc) { + /* Make sure that all writes to the TX descriptors + * are committed before we tell HW about them. + */ + wmb(); + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), + QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc)); + q->db_pidx_inc = 0; + } + q->db_disabled = 0; + spin_unlock_irq(&q->db_lock); +} + +static void disable_dbs(struct adapter *adap) +{ + int i; + + for_each_ethrxq(&adap->sge, i) + disable_txq_db(&adap->sge.ethtxq[i].q); + for_each_ofldrxq(&adap->sge, i) + disable_txq_db(&adap->sge.ofldtxq[i].q); + for_each_port(adap, i) + disable_txq_db(&adap->sge.ctrlq[i].q); +} + +static void enable_dbs(struct adapter *adap) +{ + int i; + + for_each_ethrxq(&adap->sge, i) + enable_txq_db(adap, &adap->sge.ethtxq[i].q); + for_each_ofldrxq(&adap->sge, i) + enable_txq_db(adap, &adap->sge.ofldtxq[i].q); + for_each_port(adap, i) + enable_txq_db(adap, &adap->sge.ctrlq[i].q); +} + +static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) +{ + if (adap->uld_handle[CXGB4_ULD_RDMA]) + ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA], + cmd); +} + +static void process_db_full(struct work_struct *work) +{ + struct adapter *adap; + + adap = container_of(work, struct adapter, db_full_task); + + drain_db_fifo(adap, dbfifo_drain_delay); + enable_dbs(adap); + notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); + t4_set_reg_field(adap, SGE_INT_ENABLE3_A, + DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, + DBFIFO_HP_INT_F | DBFIFO_LP_INT_F); +} + +static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) +{ + u16 hw_pidx, hw_cidx; + int ret; + + spin_lock_irq(&q->db_lock); + ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); + if (ret) + goto out; + if (q->db_pidx != hw_pidx) { + u16 delta; + u32 val; + + if (q->db_pidx >= hw_pidx) + delta = q->db_pidx - hw_pidx; + else + delta = q->size - hw_pidx + q->db_pidx; + + if (is_t4(adap->params.chip)) + val = PIDX_V(delta); + else + val = PIDX_T5_V(delta); + wmb(); + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), + QID_V(q->cntxt_id) | val); + } +out: + q->db_disabled = 0; + q->db_pidx_inc = 0; + spin_unlock_irq(&q->db_lock); + if (ret) + CH_WARN(adap, "DB drop recovery failed.\n"); +} +static void recover_all_queues(struct adapter *adap) +{ + int i; + + for_each_ethrxq(&adap->sge, i) + sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); + for_each_ofldrxq(&adap->sge, i) + sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q); + for_each_port(adap, i) + sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); +} + +static void process_db_drop(struct work_struct *work) +{ + struct adapter *adap; + + adap = container_of(work, struct adapter, db_drop_task); + + if (is_t4(adap->params.chip)) { + drain_db_fifo(adap, dbfifo_drain_delay); + notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); + drain_db_fifo(adap, dbfifo_drain_delay); + recover_all_queues(adap); + drain_db_fifo(adap, dbfifo_drain_delay); + enable_dbs(adap); + notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); + } else { + u32 dropped_db = t4_read_reg(adap, 0x010ac); + u16 qid = (dropped_db >> 15) & 0x1ffff; + u16 pidx_inc = dropped_db & 0x1fff; + u64 bar2_qoffset; + unsigned int bar2_qid; + int ret; + + ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS, + &bar2_qoffset, &bar2_qid); + if (ret) + dev_err(adap->pdev_dev, "doorbell drop recovery: " + "qid=%d, pidx_inc=%d\n", qid, pidx_inc); + else + writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid), + adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL); + + /* Re-enable BAR2 WC */ + t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15); + } + + t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0); +} + +void t4_db_full(struct adapter *adap) +{ + if (is_t4(adap->params.chip)) { + disable_dbs(adap); + notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); + t4_set_reg_field(adap, SGE_INT_ENABLE3_A, + DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0); + queue_work(adap->workq, &adap->db_full_task); + } +} + +void t4_db_dropped(struct adapter *adap) +{ + if (is_t4(adap->params.chip)) { + disable_dbs(adap); + notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); + } + queue_work(adap->workq, &adap->db_drop_task); +} + +static void uld_attach(struct adapter *adap, unsigned int uld) +{ + void *handle; + struct cxgb4_lld_info lli; + unsigned short i; + + lli.pdev = adap->pdev; + lli.pf = adap->fn; + lli.l2t = adap->l2t; + lli.tids = &adap->tids; + lli.ports = adap->port; + lli.vr = &adap->vres; + lli.mtus = adap->params.mtus; + if (uld == CXGB4_ULD_RDMA) { + lli.rxq_ids = adap->sge.rdma_rxq; + lli.ciq_ids = adap->sge.rdma_ciq; + lli.nrxq = adap->sge.rdmaqs; + lli.nciq = adap->sge.rdmaciqs; + } else if (uld == CXGB4_ULD_ISCSI) { + lli.rxq_ids = adap->sge.ofld_rxq; + lli.nrxq = adap->sge.ofldqsets; + } + lli.ntxq = adap->sge.ofldqsets; + lli.nchan = adap->params.nports; + lli.nports = adap->params.nports; + lli.wr_cred = adap->params.ofldq_wr_cred; + lli.adapter_type = adap->params.chip; + lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A)); + lli.cclk_ps = 1000000000 / adap->params.vpd.cclk; + lli.udb_density = 1 << adap->params.sge.eq_qpp; + lli.ucq_density = 1 << adap->params.sge.iq_qpp; + lli.filt_mode = adap->params.tp.vlan_pri_map; + /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ + for (i = 0; i < NCHAN; i++) + lli.tx_modq[i] = i; + lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A); + lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A); + lli.fw_vers = adap->params.fw_vers; + lli.dbfifo_int_thresh = dbfifo_int_thresh; + lli.sge_ingpadboundary = adap->sge.fl_align; + lli.sge_egrstatuspagesize = adap->sge.stat_len; + lli.sge_pktshift = adap->sge.pktshift; + lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; + lli.max_ordird_qp = adap->params.max_ordird_qp; + lli.max_ird_adapter = adap->params.max_ird_adapter; + lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; + + handle = ulds[uld].add(&lli); + if (IS_ERR(handle)) { + dev_warn(adap->pdev_dev, + "could not attach to the %s driver, error %ld\n", + uld_str[uld], PTR_ERR(handle)); + return; + } + + adap->uld_handle[uld] = handle; + + if (!netevent_registered) { + register_netevent_notifier(&cxgb4_netevent_nb); + netevent_registered = true; + } + + if (adap->flags & FULL_INIT_DONE) + ulds[uld].state_change(handle, CXGB4_STATE_UP); +} + +static void attach_ulds(struct adapter *adap) +{ + unsigned int i; + + spin_lock(&adap_rcu_lock); + list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list); + spin_unlock(&adap_rcu_lock); + + mutex_lock(&uld_mutex); + list_add_tail(&adap->list_node, &adapter_list); + for (i = 0; i < CXGB4_ULD_MAX; i++) + if (ulds[i].add) + uld_attach(adap, i); + mutex_unlock(&uld_mutex); +} + +static void detach_ulds(struct adapter *adap) +{ + unsigned int i; + + mutex_lock(&uld_mutex); + list_del(&adap->list_node); + for (i = 0; i < CXGB4_ULD_MAX; i++) + if (adap->uld_handle[i]) { + ulds[i].state_change(adap->uld_handle[i], + CXGB4_STATE_DETACH); + adap->uld_handle[i] = NULL; + } + if (netevent_registered && list_empty(&adapter_list)) { + unregister_netevent_notifier(&cxgb4_netevent_nb); + netevent_registered = false; + } + mutex_unlock(&uld_mutex); + + spin_lock(&adap_rcu_lock); + list_del_rcu(&adap->rcu_node); + spin_unlock(&adap_rcu_lock); +} + +static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) +{ + unsigned int i; + + mutex_lock(&uld_mutex); + for (i = 0; i < CXGB4_ULD_MAX; i++) + if (adap->uld_handle[i]) + ulds[i].state_change(adap->uld_handle[i], new_state); + mutex_unlock(&uld_mutex); +} + +/** + * cxgb4_register_uld - register an upper-layer driver + * @type: the ULD type + * @p: the ULD methods + * + * Registers an upper-layer driver with this driver and notifies the ULD + * about any presently available devices that support its type. Returns + * %-EBUSY if a ULD of the same type is already registered. + */ +int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p) +{ + int ret = 0; + struct adapter *adap; + + if (type >= CXGB4_ULD_MAX) + return -EINVAL; + mutex_lock(&uld_mutex); + if (ulds[type].add) { + ret = -EBUSY; + goto out; + } + ulds[type] = *p; + list_for_each_entry(adap, &adapter_list, list_node) + uld_attach(adap, type); +out: mutex_unlock(&uld_mutex); + return ret; +} +EXPORT_SYMBOL(cxgb4_register_uld); + +/** + * cxgb4_unregister_uld - unregister an upper-layer driver + * @type: the ULD type + * + * Unregisters an existing upper-layer driver. + */ +int cxgb4_unregister_uld(enum cxgb4_uld type) +{ + struct adapter *adap; + + if (type >= CXGB4_ULD_MAX) + return -EINVAL; + mutex_lock(&uld_mutex); + list_for_each_entry(adap, &adapter_list, list_node) + adap->uld_handle[type] = NULL; + ulds[type].add = NULL; + mutex_unlock(&uld_mutex); + return 0; +} +EXPORT_SYMBOL(cxgb4_unregister_uld); + +#if IS_ENABLED(CONFIG_IPV6) +static int cxgb4_inet6addr_handler(struct notifier_block *this, + unsigned long event, void *data) +{ + struct inet6_ifaddr *ifa = data; + struct net_device *event_dev = ifa->idev->dev; + const struct device *parent = NULL; +#if IS_ENABLED(CONFIG_BONDING) + struct adapter *adap; +#endif + if (event_dev->priv_flags & IFF_802_1Q_VLAN) + event_dev = vlan_dev_real_dev(event_dev); +#if IS_ENABLED(CONFIG_BONDING) + if (event_dev->flags & IFF_MASTER) { + list_for_each_entry(adap, &adapter_list, list_node) { + switch (event) { + case NETDEV_UP: + cxgb4_clip_get(adap->port[0], + (const u32 *)ifa, 1); + break; + case NETDEV_DOWN: + cxgb4_clip_release(adap->port[0], + (const u32 *)ifa, 1); + break; + default: + break; + } + } + return NOTIFY_OK; + } +#endif + + if (event_dev) + parent = event_dev->dev.parent; + + if (parent && parent->driver == &cxgb4_driver.driver) { + switch (event) { + case NETDEV_UP: + cxgb4_clip_get(event_dev, (const u32 *)ifa, 1); + break; + case NETDEV_DOWN: + cxgb4_clip_release(event_dev, (const u32 *)ifa, 1); + break; + default: + break; + } + } + return NOTIFY_OK; +} + +static bool inet6addr_registered; +static struct notifier_block cxgb4_inet6addr_notifier = { + .notifier_call = cxgb4_inet6addr_handler +}; + +static void update_clip(const struct adapter *adap) +{ + int i; + struct net_device *dev; + int ret; + + rcu_read_lock(); + + for (i = 0; i < MAX_NPORTS; i++) { + dev = adap->port[i]; + ret = 0; + + if (dev) + ret = cxgb4_update_root_dev_clip(dev); + + if (ret < 0) + break; + } + rcu_read_unlock(); +} +#endif /* IS_ENABLED(CONFIG_IPV6) */ + +/** + * cxgb_up - enable the adapter + * @adap: adapter being enabled + * + * Called when the first port is enabled, this function performs the + * actions necessary to make an adapter operational, such as completing + * the initialization of HW modules, and enabling interrupts. + * + * Must be called with the rtnl lock held. + */ +static int cxgb_up(struct adapter *adap) +{ + int err; + + err = setup_sge_queues(adap); + if (err) + goto out; + err = setup_rss(adap); + if (err) + goto freeq; + + if (adap->flags & USING_MSIX) { + name_msix_vecs(adap); + err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, + adap->msix_info[0].desc, adap); + if (err) + goto irq_err; + + err = request_msix_queue_irqs(adap); + if (err) { + free_irq(adap->msix_info[0].vec, adap); + goto irq_err; + } + } else { + err = request_irq(adap->pdev->irq, t4_intr_handler(adap), + (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, + adap->port[0]->name, adap); + if (err) + goto irq_err; + } + enable_rx(adap); + t4_sge_start(adap); + t4_intr_enable(adap); + adap->flags |= FULL_INIT_DONE; + notify_ulds(adap, CXGB4_STATE_UP); +#if IS_ENABLED(CONFIG_IPV6) + update_clip(adap); +#endif + out: + return err; + irq_err: + dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); + freeq: + t4_free_sge_resources(adap); + goto out; +} + +static void cxgb_down(struct adapter *adapter) +{ + cancel_work_sync(&adapter->tid_release_task); + cancel_work_sync(&adapter->db_full_task); + cancel_work_sync(&adapter->db_drop_task); + adapter->tid_release_task_busy = false; + adapter->tid_release_head = NULL; + + t4_sge_stop(adapter); + t4_free_sge_resources(adapter); + adapter->flags &= ~FULL_INIT_DONE; +} + +/* + * net_device operations + */ +static int cxgb_open(struct net_device *dev) +{ + int err; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + netif_carrier_off(dev); + + if (!(adapter->flags & FULL_INIT_DONE)) { + err = cxgb_up(adapter); + if (err < 0) + return err; + } + + err = link_start(dev); + if (!err) + netif_tx_start_all_queues(dev); + return err; +} + +static int cxgb_close(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false); +} + +/* Return an error number if the indicated filter isn't writable ... + */ +static int writable_filter(struct filter_entry *f) +{ + if (f->locked) + return -EPERM; + if (f->pending) + return -EBUSY; + + return 0; +} + +/* Delete the filter at the specified index (if valid). The checks for all + * the common problems with doing this like the filter being locked, currently + * pending in another operation, etc. + */ +static int delete_filter(struct adapter *adapter, unsigned int fidx) +{ + struct filter_entry *f; + int ret; + + if (fidx >= adapter->tids.nftids + adapter->tids.nsftids) + return -EINVAL; + + f = &adapter->tids.ftid_tab[fidx]; + ret = writable_filter(f); + if (ret) + return ret; + if (f->valid) + return del_filter_wr(adapter, fidx); + + return 0; +} + +int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, + __be32 sip, __be16 sport, __be16 vlan, + unsigned int queue, unsigned char port, unsigned char mask) +{ + int ret; + struct filter_entry *f; + struct adapter *adap; + int i; + u8 *val; + + adap = netdev2adap(dev); + + /* Adjust stid to correct filter index */ + stid -= adap->tids.sftid_base; + stid += adap->tids.nftids; + + /* Check to make sure the filter requested is writable ... + */ + f = &adap->tids.ftid_tab[stid]; + ret = writable_filter(f); + if (ret) + return ret; + + /* Clear out any old resources being used by the filter before + * we start constructing the new filter. + */ + if (f->valid) + clear_filter(adap, f); + + /* Clear out filter specifications */ + memset(&f->fs, 0, sizeof(struct ch_filter_specification)); + f->fs.val.lport = cpu_to_be16(sport); + f->fs.mask.lport = ~0; + val = (u8 *)&sip; + if ((val[0] | val[1] | val[2] | val[3]) != 0) { + for (i = 0; i < 4; i++) { + f->fs.val.lip[i] = val[i]; + f->fs.mask.lip[i] = ~0; + } + if (adap->params.tp.vlan_pri_map & PORT_F) { + f->fs.val.iport = port; + f->fs.mask.iport = mask; + } + } + + if (adap->params.tp.vlan_pri_map & PROTOCOL_F) { + f->fs.val.proto = IPPROTO_TCP; + f->fs.mask.proto = ~0; + } + + f->fs.dirsteer = 1; + f->fs.iq = queue; + /* Mark filter as locked */ + f->locked = 1; + f->fs.rpttid = 1; + + ret = set_filter_wr(adap, stid); + if (ret) { + clear_filter(adap, f); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(cxgb4_create_server_filter); + +int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, + unsigned int queue, bool ipv6) +{ + int ret; + struct filter_entry *f; + struct adapter *adap; + + adap = netdev2adap(dev); + + /* Adjust stid to correct filter index */ + stid -= adap->tids.sftid_base; + stid += adap->tids.nftids; + + f = &adap->tids.ftid_tab[stid]; + /* Unlock the filter */ + f->locked = 0; + + ret = delete_filter(adap, stid); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL(cxgb4_remove_server_filter); + +static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *ns) +{ + struct port_stats stats; + struct port_info *p = netdev_priv(dev); + struct adapter *adapter = p->adapter; + + /* Block retrieving statistics during EEH error + * recovery. Otherwise, the recovery might fail + * and the PCI device will be removed permanently + */ + spin_lock(&adapter->stats_lock); + if (!netif_device_present(dev)) { + spin_unlock(&adapter->stats_lock); + return ns; + } + t4_get_port_stats(adapter, p->tx_chan, &stats); + spin_unlock(&adapter->stats_lock); + + ns->tx_bytes = stats.tx_octets; + ns->tx_packets = stats.tx_frames; + ns->rx_bytes = stats.rx_octets; + ns->rx_packets = stats.rx_frames; + ns->multicast = stats.rx_mcast_frames; + + /* detailed rx_errors */ + ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long + + stats.rx_runt; + ns->rx_over_errors = 0; + ns->rx_crc_errors = stats.rx_fcs_err; + ns->rx_frame_errors = stats.rx_symbol_err; + ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 + + stats.rx_ovflow2 + stats.rx_ovflow3 + + stats.rx_trunc0 + stats.rx_trunc1 + + stats.rx_trunc2 + stats.rx_trunc3; + ns->rx_missed_errors = 0; + + /* detailed tx_errors */ + ns->tx_aborted_errors = 0; + ns->tx_carrier_errors = 0; + ns->tx_fifo_errors = 0; + ns->tx_heartbeat_errors = 0; + ns->tx_window_errors = 0; + + ns->tx_errors = stats.tx_error_frames; + ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + + ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; + return ns; +} + +static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) +{ + unsigned int mbox; + int ret = 0, prtad, devad; + struct port_info *pi = netdev_priv(dev); + struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; + + switch (cmd) { + case SIOCGMIIPHY: + if (pi->mdio_addr < 0) + return -EOPNOTSUPP; + data->phy_id = pi->mdio_addr; + break; + case SIOCGMIIREG: + case SIOCSMIIREG: + if (mdio_phy_id_is_c45(data->phy_id)) { + prtad = mdio_phy_id_prtad(data->phy_id); + devad = mdio_phy_id_devad(data->phy_id); + } else if (data->phy_id < 32) { + prtad = data->phy_id; + devad = 0; + data->reg_num &= 0x1f; + } else + return -EINVAL; + + mbox = pi->adapter->fn; + if (cmd == SIOCGMIIREG) + ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad, + data->reg_num, &data->val_out); + else + ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad, + data->reg_num, data->val_in); + break; + default: + return -EOPNOTSUPP; + } + return ret; +} + +static void cxgb_set_rxmode(struct net_device *dev) +{ + /* unfortunately we can't return errors to the stack */ + set_rxmode(dev, -1, false); +} + +static int cxgb_change_mtu(struct net_device *dev, int new_mtu) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */ + return -EINVAL; + ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1, + -1, -1, -1, true); + if (!ret) + dev->mtu = new_mtu; + return ret; +} + +static int cxgb_set_mac_addr(struct net_device *dev, void *p) +{ + int ret; + struct sockaddr *addr = p; + struct port_info *pi = netdev_priv(dev); + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid, + pi->xact_addr_filt, addr->sa_data, true, true); + if (ret < 0) + return ret; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + pi->xact_addr_filt = ret; + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void cxgb_netpoll(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + + if (adap->flags & USING_MSIX) { + int i; + struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; + + for (i = pi->nqsets; i; i--, rx++) + t4_sge_intr_msix(0, &rx->rspq); + } else + t4_intr_handler(adap)(0, adap); +} +#endif + +static const struct net_device_ops cxgb4_netdev_ops = { + .ndo_open = cxgb_open, + .ndo_stop = cxgb_close, + .ndo_start_xmit = t4_eth_xmit, + .ndo_select_queue = cxgb_select_queue, + .ndo_get_stats64 = cxgb_get_stats, + .ndo_set_rx_mode = cxgb_set_rxmode, + .ndo_set_mac_address = cxgb_set_mac_addr, + .ndo_set_features = cxgb_set_features, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = cxgb_ioctl, + .ndo_change_mtu = cxgb_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cxgb_netpoll, +#endif +#ifdef CONFIG_CHELSIO_T4_FCOE + .ndo_fcoe_enable = cxgb_fcoe_enable, + .ndo_fcoe_disable = cxgb_fcoe_disable, +#endif /* CONFIG_CHELSIO_T4_FCOE */ +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = cxgb_busy_poll, +#endif + +}; + +void t4_fatal_err(struct adapter *adap) +{ + t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0); + t4_intr_disable(adap); + dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); +} + +/* Return the specified PCI-E Configuration Space register from our Physical + * Function. We try first via a Firmware LDST Command since we prefer to let + * the firmware own all of these registers, but if that fails we go for it + * directly ourselves. + */ +static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg) +{ + struct fw_ldst_cmd ldst_cmd; + u32 val; + int ret; + + /* Construct and send the Firmware LDST Command to retrieve the + * specified PCI-E Configuration Space register. + */ + memset(&ldst_cmd, 0, sizeof(ldst_cmd)); + ldst_cmd.op_to_addrspace = + htonl(FW_CMD_OP_V(FW_LDST_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE)); + ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd)); + ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1); + ldst_cmd.u.pcie.ctrl_to_fn = + (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn)); + ldst_cmd.u.pcie.r = reg; + ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), + &ldst_cmd); + + /* If the LDST Command suucceeded, exctract the returned register + * value. Otherwise read it directly ourself. + */ + if (ret == 0) + val = ntohl(ldst_cmd.u.pcie.data[0]); + else + t4_hw_pci_read_cfg4(adap, reg, &val); + + return val; +} + +static void setup_memwin(struct adapter *adap) +{ + u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture; + + if (is_t4(adap->params.chip)) { + u32 bar0; + + /* Truncation intentional: we only read the bottom 32-bits of + * the 64-bit BAR0/BAR1 ... We use the hardware backdoor + * mechanism to read BAR0 instead of using + * pci_resource_start() because we could be operating from + * within a Virtual Machine which is trapping our accesses to + * our Configuration Space and we need to set up the PCI-E + * Memory Window decoders with the actual addresses which will + * be coming across the PCI-E link. + */ + bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0); + bar0 &= PCI_BASE_ADDRESS_MEM_MASK; + adap->t4_bar0 = bar0; + + mem_win0_base = bar0 + MEMWIN0_BASE; + mem_win1_base = bar0 + MEMWIN1_BASE; + mem_win2_base = bar0 + MEMWIN2_BASE; + mem_win2_aperture = MEMWIN2_APERTURE; + } else { + /* For T5, only relative offset inside the PCIe BAR is passed */ + mem_win0_base = MEMWIN0_BASE; + mem_win1_base = MEMWIN1_BASE; + mem_win2_base = MEMWIN2_BASE_T5; + mem_win2_aperture = MEMWIN2_APERTURE_T5; + } + t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0), + mem_win0_base | BIR_V(0) | + WINDOW_V(ilog2(MEMWIN0_APERTURE) - 10)); + t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1), + mem_win1_base | BIR_V(0) | + WINDOW_V(ilog2(MEMWIN1_APERTURE) - 10)); + t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2), + mem_win2_base | BIR_V(0) | + WINDOW_V(ilog2(mem_win2_aperture) - 10)); + t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2)); +} + +static void setup_memwin_rdma(struct adapter *adap) +{ + if (adap->vres.ocq.size) { + u32 start; + unsigned int sz_kb; + + start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2); + start &= PCI_BASE_ADDRESS_MEM_MASK; + start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres); + sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; + t4_write_reg(adap, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3), + start | BIR_V(1) | WINDOW_V(ilog2(sz_kb))); + t4_write_reg(adap, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3), + adap->vres.ocq.start); + t4_read_reg(adap, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3)); + } +} + +static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) +{ + u32 v; + int ret; + + /* get device capabilities */ + memset(c, 0, sizeof(*c)); + c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); + c->cfvalid_to_len16 = htonl(FW_LEN16(*c)); + ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c); + if (ret < 0) + return ret; + + /* select capabilities we'll be using */ + if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { + if (!vf_acls) + c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); + else + c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM); + } else if (vf_acls) { + dev_err(adap->pdev_dev, "virtualization ACLs not supported"); + return ret; + } + c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); + ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL); + if (ret < 0) + return ret; + + ret = t4_config_glbl_rss(adap, adap->fn, + FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, + FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F | + FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F); + if (ret < 0) + return ret; + + ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64, + MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, + FW_CMD_CAP_PF); + if (ret < 0) + return ret; + + t4_sge_init(adap); + + /* tweak some settings */ + t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849); + t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12)); + t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A); + v = t4_read_reg(adap, TP_PIO_DATA_A); + t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F); + + /* first 4 Tx modulation queues point to consecutive Tx channels */ + adap->params.tp.tx_modq_map = 0xE4; + t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A, + TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map)); + + /* associate each Tx modulation queue with consecutive Tx channels */ + v = 0x84218421; + t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, + &v, 1, TP_TX_SCHED_HDR_A); + t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, + &v, 1, TP_TX_SCHED_FIFO_A); + t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, + &v, 1, TP_TX_SCHED_PCMD_A); + +#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */ + if (is_offload(adap)) { + t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A, + TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | + TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | + TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | + TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT)); + t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A, + TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | + TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | + TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | + TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT)); + } + + /* get basic stuff going */ + return t4_early_init(adap, adap->fn); +} + +/* + * Max # of ATIDs. The absolute HW max is 16K but we keep it lower. + */ +#define MAX_ATIDS 8192U + +/* + * Phase 0 of initialization: contact FW, obtain config, perform basic init. + * + * If the firmware we're dealing with has Configuration File support, then + * we use that to perform all configuration + */ + +/* + * Tweak configuration based on module parameters, etc. Most of these have + * defaults assigned to them by Firmware Configuration Files (if we're using + * them) but need to be explicitly set if we're using hard-coded + * initialization. But even in the case of using Firmware Configuration + * Files, we'd like to expose the ability to change these via module + * parameters so these are essentially common tweaks/settings for + * Configuration Files and hard-coded initialization ... + */ +static int adap_init0_tweaks(struct adapter *adapter) +{ + /* + * Fix up various Host-Dependent Parameters like Page Size, Cache + * Line Size, etc. The firmware default is for a 4KB Page Size and + * 64B Cache Line Size ... + */ + t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES); + + /* + * Process module parameters which affect early initialization. + */ + if (rx_dma_offset != 2 && rx_dma_offset != 0) { + dev_err(&adapter->pdev->dev, + "Ignoring illegal rx_dma_offset=%d, using 2\n", + rx_dma_offset); + rx_dma_offset = 2; + } + t4_set_reg_field(adapter, SGE_CONTROL_A, + PKTSHIFT_V(PKTSHIFT_M), + PKTSHIFT_V(rx_dma_offset)); + + /* + * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux + * adds the pseudo header itself. + */ + t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A, + CSUM_HAS_PSEUDO_HDR_F, 0); + + return 0; +} + +/* + * Attempt to initialize the adapter via a Firmware Configuration File. + */ +static int adap_init0_config(struct adapter *adapter, int reset) +{ + struct fw_caps_config_cmd caps_cmd; + const struct firmware *cf; + unsigned long mtype = 0, maddr = 0; + u32 finiver, finicsum, cfcsum; + int ret; + int config_issued = 0; + char *fw_config_file, fw_config_file_path[256]; + char *config_name = NULL; + + /* + * Reset device if necessary. + */ + if (reset) { + ret = t4_fw_reset(adapter, adapter->mbox, + PIORSTMODE_F | PIORST_F); + if (ret < 0) + goto bye; + } + + /* + * If we have a T4 configuration file under /lib/firmware/cxgb4/, + * then use that. Otherwise, use the configuration file stored + * in the adapter flash ... + */ + switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) { + case CHELSIO_T4: + fw_config_file = FW4_CFNAME; + break; + case CHELSIO_T5: + fw_config_file = FW5_CFNAME; + break; + default: + dev_err(adapter->pdev_dev, "Device %d is not supported\n", + adapter->pdev->device); + ret = -EINVAL; + goto bye; + } + + ret = reject_firmware(&cf, fw_config_file, adapter->pdev_dev); + if (ret < 0) { + config_name = "On FLASH"; + mtype = FW_MEMTYPE_CF_FLASH; + maddr = t4_flash_cfg_addr(adapter); + } else { + u32 params[7], val[7]; + + sprintf(fw_config_file_path, + "/lib/firmware/%s", fw_config_file); + config_name = fw_config_file_path; + + if (cf->size >= FLASH_CFG_MAX_SIZE) + ret = -ENOMEM; + else { + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); + ret = t4_query_params(adapter, adapter->mbox, + adapter->fn, 0, 1, params, val); + if (ret == 0) { + /* + * For t4_memory_rw() below addresses and + * sizes have to be in terms of multiples of 4 + * bytes. So, if the Configuration File isn't + * a multiple of 4 bytes in length we'll have + * to write that out separately since we can't + * guarantee that the bytes following the + * residual byte in the buffer returned by + * reject_firmware() are zeroed out ... + */ + size_t resid = cf->size & 0x3; + size_t size = cf->size & ~0x3; + __be32 *data = (__be32 *)cf->data; + + mtype = FW_PARAMS_PARAM_Y_G(val[0]); + maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16; + + spin_lock(&adapter->win0_lock); + ret = t4_memory_rw(adapter, 0, mtype, maddr, + size, data, T4_MEMORY_WRITE); + if (ret == 0 && resid != 0) { + union { + __be32 word; + char buf[4]; + } last; + int i; + + last.word = data[size >> 2]; + for (i = resid; i < 4; i++) + last.buf[i] = 0; + ret = t4_memory_rw(adapter, 0, mtype, + maddr + size, + 4, &last.word, + T4_MEMORY_WRITE); + } + spin_unlock(&adapter->win0_lock); + } + } + + release_firmware(cf); + if (ret) + goto bye; + } + + /* + * Issue a Capability Configuration command to the firmware to get it + * to parse the Configuration File. We don't use t4_fw_config_file() + * because we want the ability to modify various features after we've + * processed the configuration file ... + */ + memset(&caps_cmd, 0, sizeof(caps_cmd)); + caps_cmd.op_to_write = + htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); + caps_cmd.cfvalid_to_len16 = + htonl(FW_CAPS_CONFIG_CMD_CFVALID_F | + FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) | + FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) | + FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), + &caps_cmd); + + /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware + * Configuration File in FLASH), our last gasp effort is to use the + * Firmware Configuration File which is embedded in the firmware. A + * very few early versions of the firmware didn't have one embedded + * but we can ignore those. + */ + if (ret == -ENOENT) { + memset(&caps_cmd, 0, sizeof(caps_cmd)); + caps_cmd.op_to_write = + htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); + caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, + sizeof(caps_cmd), &caps_cmd); + config_name = "Firmware Default"; + } + + config_issued = 1; + if (ret < 0) + goto bye; + + finiver = ntohl(caps_cmd.finiver); + finicsum = ntohl(caps_cmd.finicsum); + cfcsum = ntohl(caps_cmd.cfcsum); + if (finicsum != cfcsum) + dev_warn(adapter->pdev_dev, "Configuration File checksum "\ + "mismatch: [fini] csum=%#x, computed csum=%#x\n", + finicsum, cfcsum); + + /* + * And now tell the firmware to use the configuration we just loaded. + */ + caps_cmd.op_to_write = + htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F); + caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), + NULL); + if (ret < 0) + goto bye; + + /* + * Tweak configuration based on system architecture, module + * parameters, etc. + */ + ret = adap_init0_tweaks(adapter); + if (ret < 0) + goto bye; + + /* + * And finally tell the firmware to initialize itself using the + * parameters from the Configuration File. + */ + ret = t4_fw_initialize(adapter, adapter->mbox); + if (ret < 0) + goto bye; + + /* Emit Firmware Configuration File information and return + * successfully. + */ + dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ + "Configuration File \"%s\", version %#x, computed checksum %#x\n", + config_name, finiver, cfcsum); + return 0; + + /* + * Something bad happened. Return the error ... (If the "error" + * is that there's no Configuration File on the adapter we don't + * want to issue a warning since this is fairly common.) + */ +bye: + if (config_issued && ret != -ENOENT) + dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n", + config_name, -ret); + return ret; +} + +static struct fw_info fw_info_array[] = { + { + .chip = CHELSIO_T4, + .fs_name = FW4_CFNAME, + .fw_mod_name = FW4_FNAME, + .fw_hdr = { + .chip = FW_HDR_CHIP_T4, + .fw_ver = __cpu_to_be32(FW_VERSION(T4)), + .intfver_nic = FW_INTFVER(T4, NIC), + .intfver_vnic = FW_INTFVER(T4, VNIC), + .intfver_ri = FW_INTFVER(T4, RI), + .intfver_iscsi = FW_INTFVER(T4, ISCSI), + .intfver_fcoe = FW_INTFVER(T4, FCOE), + }, + }, { + .chip = CHELSIO_T5, + .fs_name = FW5_CFNAME, + .fw_mod_name = FW5_FNAME, + .fw_hdr = { + .chip = FW_HDR_CHIP_T5, + .fw_ver = __cpu_to_be32(FW_VERSION(T5)), + .intfver_nic = FW_INTFVER(T5, NIC), + .intfver_vnic = FW_INTFVER(T5, VNIC), + .intfver_ri = FW_INTFVER(T5, RI), + .intfver_iscsi = FW_INTFVER(T5, ISCSI), + .intfver_fcoe = FW_INTFVER(T5, FCOE), + }, + } +}; + +static struct fw_info *find_fw_info(int chip) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) { + if (fw_info_array[i].chip == chip) + return &fw_info_array[i]; + } + return NULL; +} + +/* + * Phase 0 of initialization: contact FW, obtain config, perform basic init. + */ +static int adap_init0(struct adapter *adap) +{ + int ret; + u32 v, port_vec; + enum dev_state state; + u32 params[7], val[7]; + struct fw_caps_config_cmd caps_cmd; + int reset = 1; + + /* Grab Firmware Device Log parameters as early as possible so we have + * access to it for debugging, etc. + */ + ret = t4_init_devlog_params(adap); + if (ret < 0) + return ret; + + /* Contact FW, advertising Master capability */ + ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); + if (ret < 0) { + dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", + ret); + return ret; + } + if (ret == adap->mbox) + adap->flags |= MASTER_PF; + + /* + * If we're the Master PF Driver and the device is uninitialized, + * then let's consider upgrading the firmware ... (We always want + * to check the firmware version number in order to A. get it for + * later reporting and B. to warn if the currently loaded firmware + * is excessively mismatched relative to the driver.) + */ + t4_get_fw_version(adap, &adap->params.fw_vers); + t4_get_tp_version(adap, &adap->params.tp_vers); + if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { + struct fw_info *fw_info; + struct fw_hdr *card_fw; + const struct firmware *fw; + const u8 *fw_data = NULL; + unsigned int fw_size = 0; + + /* This is the firmware whose headers the driver was compiled + * against + */ + fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); + if (fw_info == NULL) { + dev_err(adap->pdev_dev, + "unable to get firmware info for chip %d.\n", + CHELSIO_CHIP_VERSION(adap->params.chip)); + return -EINVAL; + } + + /* allocate memory to read the header of the firmware on the + * card + */ + card_fw = t4_alloc_mem(sizeof(*card_fw)); + + /* Get FW from from /lib/firmware/ */ + ret = reject_firmware(&fw, fw_info->fw_mod_name, + adap->pdev_dev); + if (ret < 0) { + dev_err(adap->pdev_dev, + "unable to load firmware image %s, error %d\n", + fw_info->fw_mod_name, ret); + } else { + fw_data = fw->data; + fw_size = fw->size; + } + + /* upgrade FW logic */ + ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw, + state, &reset); + + /* Cleaning up */ + release_firmware(fw); + t4_free_mem(card_fw); + + if (ret < 0) + goto bye; + } + + /* + * Grab VPD parameters. This should be done after we establish a + * connection to the firmware since some of the VPD parameters + * (notably the Core Clock frequency) are retrieved via requests to + * the firmware. On the other hand, we need these fairly early on + * so we do this right after getting ahold of the firmware. + */ + ret = get_vpd_params(adap, &adap->params.vpd); + if (ret < 0) + goto bye; + + /* + * Find out what ports are available to us. Note that we need to do + * this before calling adap_init0_no_config() since it needs nports + * and portvec ... + */ + v = + FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec); + if (ret < 0) + goto bye; + + adap->params.nports = hweight32(port_vec); + adap->params.portvec = port_vec; + + /* If the firmware is initialized already, emit a simply note to that + * effect. Otherwise, it's time to try initializing the adapter. + */ + if (state == DEV_STATE_INIT) { + dev_info(adap->pdev_dev, "Coming up as %s: "\ + "Adapter already initialized\n", + adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); + } else { + dev_info(adap->pdev_dev, "Coming up as MASTER: "\ + "Initializing adapter\n"); + + /* Find out whether we're dealing with a version of the + * firmware which has configuration file support. + */ + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, + params, val); + + /* If the firmware doesn't support Configuration Files, + * return an error. + */ + if (ret < 0) { + dev_err(adap->pdev_dev, "firmware doesn't support " + "Firmware Configuration Files\n"); + goto bye; + } + + /* The firmware provides us with a memory buffer where we can + * load a Configuration File from the host if we want to + * override the Configuration File in flash. + */ + ret = adap_init0_config(adap, reset); + if (ret == -ENOENT) { + dev_err(adap->pdev_dev, "no Configuration File " + "present on adapter.\n"); + goto bye; + } + if (ret < 0) { + dev_err(adap->pdev_dev, "could not initialize " + "adapter, error %d\n", -ret); + goto bye; + } + } + + /* Give the SGE code a chance to pull in anything that it needs ... + * Note that this must be called after we retrieve our VPD parameters + * in order to know how to convert core ticks to seconds, etc. + */ + ret = t4_sge_init(adap); + if (ret < 0) + goto bye; + + if (is_bypass_device(adap->pdev->device)) + adap->params.bypass = 1; + + /* + * Grab some of our basic fundamental operating parameters. + */ +#define FW_PARAM_DEV(param) \ + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \ + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param)) + +#define FW_PARAM_PFVF(param) \ + FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \ + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \ + FW_PARAMS_PARAM_Y_V(0) | \ + FW_PARAMS_PARAM_Z_V(0) + + params[0] = FW_PARAM_PFVF(EQ_START); + params[1] = FW_PARAM_PFVF(L2T_START); + params[2] = FW_PARAM_PFVF(L2T_END); + params[3] = FW_PARAM_PFVF(FILTER_START); + params[4] = FW_PARAM_PFVF(FILTER_END); + params[5] = FW_PARAM_PFVF(IQFLINT_START); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val); + if (ret < 0) + goto bye; + adap->sge.egr_start = val[0]; + adap->l2t_start = val[1]; + adap->l2t_end = val[2]; + adap->tids.ftid_base = val[3]; + adap->tids.nftids = val[4] - val[3] + 1; + adap->sge.ingr_start = val[5]; + + /* qids (ingress/egress) returned from firmware can be anywhere + * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END. + * Hence driver needs to allocate memory for this range to + * store the queue info. Get the highest IQFLINT/EQ index returned + * in FW_EQ_*_CMD.alloc command. + */ + params[0] = FW_PARAM_PFVF(EQ_END); + params[1] = FW_PARAM_PFVF(IQFLINT_END); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); + if (ret < 0) + goto bye; + adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; + adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1; + + adap->sge.egr_map = kcalloc(adap->sge.egr_sz, + sizeof(*adap->sge.egr_map), GFP_KERNEL); + if (!adap->sge.egr_map) { + ret = -ENOMEM; + goto bye; + } + + adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz, + sizeof(*adap->sge.ingr_map), GFP_KERNEL); + if (!adap->sge.ingr_map) { + ret = -ENOMEM; + goto bye; + } + + /* Allocate the memory for the vaious egress queue bitmaps + * ie starving_fl and txq_maperr. + */ + adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), + sizeof(long), GFP_KERNEL); + if (!adap->sge.starving_fl) { + ret = -ENOMEM; + goto bye; + } + + adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), + sizeof(long), GFP_KERNEL); + if (!adap->sge.txq_maperr) { + ret = -ENOMEM; + goto bye; + } + + params[0] = FW_PARAM_PFVF(CLIP_START); + params[1] = FW_PARAM_PFVF(CLIP_END); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); + if (ret < 0) + goto bye; + adap->clipt_start = val[0]; + adap->clipt_end = val[1]; + + /* query params related to active filter region */ + params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START); + params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); + /* If Active filter size is set we enable establishing + * offload connection through firmware work request + */ + if ((val[0] != val[1]) && (ret >= 0)) { + adap->flags |= FW_OFLD_CONN; + adap->tids.aftid_base = val[0]; + adap->tids.aftid_end = val[1]; + } + + /* If we're running on newer firmware, let it know that we're + * prepared to deal with encapsulated CPL messages. Older + * firmware won't understand this and we'll just get + * unencapsulated messages ... + */ + params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); + val[0] = 1; + (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val); + + /* + * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL + * capability. Earlier versions of the firmware didn't have the + * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no + * permission to use ULPTX MEMWRITE DSGL. + */ + if (is_t4(adap->params.chip)) { + adap->params.ulptx_memwrite_dsgl = false; + } else { + params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, + 1, params, val); + adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); + } + + /* + * Get device capabilities so we can determine what resources we need + * to manage. + */ + memset(&caps_cmd, 0, sizeof(caps_cmd)); + caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); + caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), + &caps_cmd); + if (ret < 0) + goto bye; + + if (caps_cmd.ofldcaps) { + /* query offload-related parameters */ + params[0] = FW_PARAM_DEV(NTID); + params[1] = FW_PARAM_PFVF(SERVER_START); + params[2] = FW_PARAM_PFVF(SERVER_END); + params[3] = FW_PARAM_PFVF(TDDP_START); + params[4] = FW_PARAM_PFVF(TDDP_END); + params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, + params, val); + if (ret < 0) + goto bye; + adap->tids.ntids = val[0]; + adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); + adap->tids.stid_base = val[1]; + adap->tids.nstids = val[2] - val[1] + 1; + /* + * Setup server filter region. Divide the available filter + * region into two parts. Regular filters get 1/3rd and server + * filters get 2/3rd part. This is only enabled if workarond + * path is enabled. + * 1. For regular filters. + * 2. Server filter: This are special filters which are used + * to redirect SYN packets to offload queue. + */ + if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) { + adap->tids.sftid_base = adap->tids.ftid_base + + DIV_ROUND_UP(adap->tids.nftids, 3); + adap->tids.nsftids = adap->tids.nftids - + DIV_ROUND_UP(adap->tids.nftids, 3); + adap->tids.nftids = adap->tids.sftid_base - + adap->tids.ftid_base; + } + adap->vres.ddp.start = val[3]; + adap->vres.ddp.size = val[4] - val[3] + 1; + adap->params.ofldq_wr_cred = val[5]; + + adap->params.offload = 1; + } + if (caps_cmd.rdmacaps) { + params[0] = FW_PARAM_PFVF(STAG_START); + params[1] = FW_PARAM_PFVF(STAG_END); + params[2] = FW_PARAM_PFVF(RQ_START); + params[3] = FW_PARAM_PFVF(RQ_END); + params[4] = FW_PARAM_PFVF(PBL_START); + params[5] = FW_PARAM_PFVF(PBL_END); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, + params, val); + if (ret < 0) + goto bye; + adap->vres.stag.start = val[0]; + adap->vres.stag.size = val[1] - val[0] + 1; + adap->vres.rq.start = val[2]; + adap->vres.rq.size = val[3] - val[2] + 1; + adap->vres.pbl.start = val[4]; + adap->vres.pbl.size = val[5] - val[4] + 1; + + params[0] = FW_PARAM_PFVF(SQRQ_START); + params[1] = FW_PARAM_PFVF(SQRQ_END); + params[2] = FW_PARAM_PFVF(CQ_START); + params[3] = FW_PARAM_PFVF(CQ_END); + params[4] = FW_PARAM_PFVF(OCQ_START); + params[5] = FW_PARAM_PFVF(OCQ_END); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, + val); + if (ret < 0) + goto bye; + adap->vres.qp.start = val[0]; + adap->vres.qp.size = val[1] - val[0] + 1; + adap->vres.cq.start = val[2]; + adap->vres.cq.size = val[3] - val[2] + 1; + adap->vres.ocq.start = val[4]; + adap->vres.ocq.size = val[5] - val[4] + 1; + + params[0] = FW_PARAM_DEV(MAXORDIRD_QP); + params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, + val); + if (ret < 0) { + adap->params.max_ordird_qp = 8; + adap->params.max_ird_adapter = 32 * adap->tids.ntids; + ret = 0; + } else { + adap->params.max_ordird_qp = val[0]; + adap->params.max_ird_adapter = val[1]; + } + dev_info(adap->pdev_dev, + "max_ordird_qp %d max_ird_adapter %d\n", + adap->params.max_ordird_qp, + adap->params.max_ird_adapter); + } + if (caps_cmd.iscsicaps) { + params[0] = FW_PARAM_PFVF(ISCSI_START); + params[1] = FW_PARAM_PFVF(ISCSI_END); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, + params, val); + if (ret < 0) + goto bye; + adap->vres.iscsi.start = val[0]; + adap->vres.iscsi.size = val[1] - val[0] + 1; + } +#undef FW_PARAM_PFVF +#undef FW_PARAM_DEV + + /* The MTU/MSS Table is initialized by now, so load their values. If + * we're initializing the adapter, then we'll make any modifications + * we want to the MTU/MSS Table and also initialize the congestion + * parameters. + */ + t4_read_mtu_tbl(adap, adap->params.mtus, NULL); + if (state != DEV_STATE_INIT) { + int i; + + /* The default MTU Table contains values 1492 and 1500. + * However, for TCP, it's better to have two values which are + * a multiple of 8 +/- 4 bytes apart near this popular MTU. + * This allows us to have a TCP Data Payload which is a + * multiple of 8 regardless of what combination of TCP Options + * are in use (always a multiple of 4 bytes) which is + * important for performance reasons. For instance, if no + * options are in use, then we have a 20-byte IP header and a + * 20-byte TCP header. In this case, a 1500-byte MSS would + * result in a TCP Data Payload of 1500 - 40 == 1460 bytes + * which is not a multiple of 8. So using an MSS of 1488 in + * this case results in a TCP Data Payload of 1448 bytes which + * is a multiple of 8. On the other hand, if 12-byte TCP Time + * Stamps have been negotiated, then an MTU of 1500 bytes + * results in a TCP Data Payload of 1448 bytes which, as + * above, is a multiple of 8 bytes ... + */ + for (i = 0; i < NMTUS; i++) + if (adap->params.mtus[i] == 1492) { + adap->params.mtus[i] = 1488; + break; + } + + t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, + adap->params.b_wnd); + } + t4_init_sge_params(adap); + t4_init_tp_params(adap); + adap->flags |= FW_OK; + return 0; + + /* + * Something bad happened. If a command timed out or failed with EIO + * FW does not operate within its spec or something catastrophic + * happened to HW/FW, stop issuing commands. + */ +bye: + kfree(adap->sge.egr_map); + kfree(adap->sge.ingr_map); + kfree(adap->sge.starving_fl); + kfree(adap->sge.txq_maperr); + if (ret != -ETIMEDOUT && ret != -EIO) + t4_fw_bye(adap, adap->mbox); + return ret; +} + +/* EEH callbacks */ + +static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + int i; + struct adapter *adap = pci_get_drvdata(pdev); + + if (!adap) + goto out; + + rtnl_lock(); + adap->flags &= ~FW_OK; + notify_ulds(adap, CXGB4_STATE_START_RECOVERY); + spin_lock(&adap->stats_lock); + for_each_port(adap, i) { + struct net_device *dev = adap->port[i]; + + netif_device_detach(dev); + netif_carrier_off(dev); + } + spin_unlock(&adap->stats_lock); + disable_interrupts(adap); + if (adap->flags & FULL_INIT_DONE) + cxgb_down(adap); + rtnl_unlock(); + if ((adap->flags & DEV_ENABLED)) { + pci_disable_device(pdev); + adap->flags &= ~DEV_ENABLED; + } +out: return state == pci_channel_io_perm_failure ? + PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) +{ + int i, ret; + struct fw_caps_config_cmd c; + struct adapter *adap = pci_get_drvdata(pdev); + + if (!adap) { + pci_restore_state(pdev); + pci_save_state(pdev); + return PCI_ERS_RESULT_RECOVERED; + } + + if (!(adap->flags & DEV_ENABLED)) { + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, "Cannot reenable PCI " + "device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + adap->flags |= DEV_ENABLED; + } + + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + pci_cleanup_aer_uncorrect_error_status(pdev); + + if (t4_wait_dev_ready(adap->regs) < 0) + return PCI_ERS_RESULT_DISCONNECT; + if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0) + return PCI_ERS_RESULT_DISCONNECT; + adap->flags |= FW_OK; + if (adap_init1(adap, &c)) + return PCI_ERS_RESULT_DISCONNECT; + + for_each_port(adap, i) { + struct port_info *p = adap2pinfo(adap, i); + + ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1, + NULL, NULL); + if (ret < 0) + return PCI_ERS_RESULT_DISCONNECT; + p->viid = ret; + p->xact_addr_filt = -1; + } + + t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, + adap->params.b_wnd); + setup_memwin(adap); + if (cxgb_up(adap)) + return PCI_ERS_RESULT_DISCONNECT; + return PCI_ERS_RESULT_RECOVERED; +} + +static void eeh_resume(struct pci_dev *pdev) +{ + int i; + struct adapter *adap = pci_get_drvdata(pdev); + + if (!adap) + return; + + rtnl_lock(); + for_each_port(adap, i) { + struct net_device *dev = adap->port[i]; + + if (netif_running(dev)) { + link_start(dev); + cxgb_set_rxmode(dev); + } + netif_device_attach(dev); + } + rtnl_unlock(); +} + +static const struct pci_error_handlers cxgb4_eeh = { + .error_detected = eeh_err_detected, + .slot_reset = eeh_slot_reset, + .resume = eeh_resume, +}; + +static inline bool is_x_10g_port(const struct link_config *lc) +{ + return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 || + (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; +} + +static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, + unsigned int us, unsigned int cnt, + unsigned int size, unsigned int iqe_size) +{ + q->adap = adap; + cxgb4_set_rspq_intr_params(q, us, cnt); + q->iqe_len = iqe_size; + q->size = size; +} + +/* + * Perform default configuration of DMA queues depending on the number and type + * of ports we found and the number of available CPUs. Most settings can be + * modified by the admin prior to actual use. + */ +static void cfg_queues(struct adapter *adap) +{ + struct sge *s = &adap->sge; + int i, n10g = 0, qidx = 0; +#ifndef CONFIG_CHELSIO_T4_DCB + int q10g = 0; +#endif + int ciq_size; + + for_each_port(adap, i) + n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); +#ifdef CONFIG_CHELSIO_T4_DCB + /* For Data Center Bridging support we need to be able to support up + * to 8 Traffic Priorities; each of which will be assigned to its + * own TX Queue in order to prevent Head-Of-Line Blocking. + */ + if (adap->params.nports * 8 > MAX_ETH_QSETS) { + dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n", + MAX_ETH_QSETS, adap->params.nports * 8); + BUG_ON(1); + } + + for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); + + pi->first_qset = qidx; + pi->nqsets = 8; + qidx += pi->nqsets; + } +#else /* !CONFIG_CHELSIO_T4_DCB */ + /* + * We default to 1 queue per non-10G port and up to # of cores queues + * per 10G port. + */ + if (n10g) + q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; + if (q10g > netif_get_num_default_rss_queues()) + q10g = netif_get_num_default_rss_queues(); + + for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); + + pi->first_qset = qidx; + pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1; + qidx += pi->nqsets; + } +#endif /* !CONFIG_CHELSIO_T4_DCB */ + + s->ethqsets = qidx; + s->max_ethqsets = qidx; /* MSI-X may lower it later */ + + if (is_offload(adap)) { + /* + * For offload we use 1 queue/channel if all ports are up to 1G, + * otherwise we divide all available queues amongst the channels + * capped by the number of available cores. + */ + if (n10g) { + i = min_t(int, ARRAY_SIZE(s->ofldrxq), + num_online_cpus()); + s->ofldqsets = roundup(i, adap->params.nports); + } else + s->ofldqsets = adap->params.nports; + /* For RDMA one Rx queue per channel suffices */ + s->rdmaqs = adap->params.nports; + /* Try and allow at least 1 CIQ per cpu rounding down + * to the number of ports, with a minimum of 1 per port. + * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port. + * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port. + * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port. + */ + s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus()); + s->rdmaciqs = (s->rdmaciqs / adap->params.nports) * + adap->params.nports; + s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports); + } + + for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { + struct sge_eth_rxq *r = &s->ethrxq[i]; + + init_rspq(adap, &r->rspq, 5, 10, 1024, 64); + r->fl.size = 72; + } + + for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) + s->ethtxq[i].q.size = 1024; + + for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) + s->ctrlq[i].q.size = 512; + + for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) + s->ofldtxq[i].q.size = 1024; + + for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) { + struct sge_ofld_rxq *r = &s->ofldrxq[i]; + + init_rspq(adap, &r->rspq, 5, 1, 1024, 64); + r->rspq.uld = CXGB4_ULD_ISCSI; + r->fl.size = 72; + } + + for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) { + struct sge_ofld_rxq *r = &s->rdmarxq[i]; + + init_rspq(adap, &r->rspq, 5, 1, 511, 64); + r->rspq.uld = CXGB4_ULD_RDMA; + r->fl.size = 72; + } + + ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids; + if (ciq_size > SGE_MAX_IQ_SIZE) { + CH_WARN(adap, "CIQ size too small for available IQs\n"); + ciq_size = SGE_MAX_IQ_SIZE; + } + + for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) { + struct sge_ofld_rxq *r = &s->rdmaciq[i]; + + init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64); + r->rspq.uld = CXGB4_ULD_RDMA; + } + + init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); + init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64); +} + +/* + * Reduce the number of Ethernet queues across all ports to at most n. + * n provides at least one queue per port. + */ +static void reduce_ethqs(struct adapter *adap, int n) +{ + int i; + struct port_info *pi; + + while (n < adap->sge.ethqsets) + for_each_port(adap, i) { + pi = adap2pinfo(adap, i); + if (pi->nqsets > 1) { + pi->nqsets--; + adap->sge.ethqsets--; + if (adap->sge.ethqsets <= n) + break; + } + } + + n = 0; + for_each_port(adap, i) { + pi = adap2pinfo(adap, i); + pi->first_qset = n; + n += pi->nqsets; + } +} + +/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ +#define EXTRA_VECS 2 + +static int enable_msix(struct adapter *adap) +{ + int ofld_need = 0; + int i, want, need, allocated; + struct sge *s = &adap->sge; + unsigned int nchan = adap->params.nports; + struct msix_entry *entries; + + entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1), + GFP_KERNEL); + if (!entries) + return -ENOMEM; + + for (i = 0; i < MAX_INGQ + 1; ++i) + entries[i].entry = i; + + want = s->max_ethqsets + EXTRA_VECS; + if (is_offload(adap)) { + want += s->rdmaqs + s->rdmaciqs + s->ofldqsets; + /* need nchan for each possible ULD */ + ofld_need = 3 * nchan; + } +#ifdef CONFIG_CHELSIO_T4_DCB + /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for + * each port. + */ + need = 8 * adap->params.nports + EXTRA_VECS + ofld_need; +#else + need = adap->params.nports + EXTRA_VECS + ofld_need; +#endif + allocated = pci_enable_msix_range(adap->pdev, entries, need, want); + if (allocated < 0) { + dev_info(adap->pdev_dev, "not enough MSI-X vectors left," + " not using MSI-X\n"); + kfree(entries); + return allocated; + } + + /* Distribute available vectors to the various queue groups. + * Every group gets its minimum requirement and NIC gets top + * priority for leftovers. + */ + i = allocated - EXTRA_VECS - ofld_need; + if (i < s->max_ethqsets) { + s->max_ethqsets = i; + if (i < s->ethqsets) + reduce_ethqs(adap, i); + } + if (is_offload(adap)) { + if (allocated < want) { + s->rdmaqs = nchan; + s->rdmaciqs = nchan; + } + + /* leftovers go to OFLD */ + i = allocated - EXTRA_VECS - s->max_ethqsets - + s->rdmaqs - s->rdmaciqs; + s->ofldqsets = (i / nchan) * nchan; /* round down */ + } + for (i = 0; i < allocated; ++i) + adap->msix_info[i].vec = entries[i].vector; + + kfree(entries); + return 0; +} + +#undef EXTRA_VECS + +static int init_rss(struct adapter *adap) +{ + unsigned int i, j; + + for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); + + pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL); + if (!pi->rss) + return -ENOMEM; + for (j = 0; j < pi->rss_size; j++) + pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets); + } + return 0; +} + +static void print_port_info(const struct net_device *dev) +{ + char buf[80]; + char *bufp = buf; + const char *spd = ""; + const struct port_info *pi = netdev_priv(dev); + const struct adapter *adap = pi->adapter; + + if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB) + spd = " 2.5 GT/s"; + else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) + spd = " 5 GT/s"; + else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB) + spd = " 8 GT/s"; + + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) + bufp += sprintf(bufp, "100/"); + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) + bufp += sprintf(bufp, "1000/"); + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) + bufp += sprintf(bufp, "10G/"); + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) + bufp += sprintf(bufp, "40G/"); + if (bufp != buf) + --bufp; + sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); + + netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", + adap->params.vpd.id, + CHELSIO_CHIP_RELEASE(adap->params.chip), buf, + is_offload(adap) ? "R" : "", adap->params.pci.width, spd, + (adap->flags & USING_MSIX) ? " MSI-X" : + (adap->flags & USING_MSI) ? " MSI" : ""); + netdev_info(dev, "S/N: %s, P/N: %s\n", + adap->params.vpd.sn, adap->params.vpd.pn); +} + +static void enable_pcie_relaxed_ordering(struct pci_dev *dev) +{ + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); +} + +/* + * Free the following resources: + * - memory used for tables + * - MSI/MSI-X + * - net devices + * - resources FW is holding for us + */ +static void free_some_resources(struct adapter *adapter) +{ + unsigned int i; + + t4_free_mem(adapter->l2t); + t4_free_mem(adapter->tids.tid_tab); + kfree(adapter->sge.egr_map); + kfree(adapter->sge.ingr_map); + kfree(adapter->sge.starving_fl); + kfree(adapter->sge.txq_maperr); + disable_msi(adapter); + + for_each_port(adapter, i) + if (adapter->port[i]) { + kfree(adap2pinfo(adapter, i)->rss); + free_netdev(adapter->port[i]); + } + if (adapter->flags & FW_OK) + t4_fw_bye(adapter, adapter->fn); +} + +#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) +#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ + NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) +#define SEGMENT_SIZE 128 + +static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int func, i, err, s_qpp, qpp, num_seg; + struct port_info *pi; + bool highdma = false; + struct adapter *adapter = NULL; + void __iomem *regs; + + printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); + + err = pci_request_regions(pdev, KBUILD_MODNAME); + if (err) { + /* Just info, some other driver may have claimed the device. */ + dev_info(&pdev->dev, "cannot obtain PCI resources\n"); + return err; + } + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + goto out_release_regions; + } + + regs = pci_ioremap_bar(pdev, 0); + if (!regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + err = -ENOMEM; + goto out_disable_device; + } + + err = t4_wait_dev_ready(regs); + if (err < 0) + goto out_unmap_bar0; + + /* We control everything through one PF */ + func = SOURCEPF_G(readl(regs + PL_WHOAMI_A)); + if (func != ent->driver_data) { + iounmap(regs); + pci_disable_device(pdev); + pci_save_state(pdev); /* to restore SR-IOV later */ + goto sriov; + } + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + highdma = true; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " + "coherent allocations\n"); + goto out_unmap_bar0; + } + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto out_unmap_bar0; + } + } + + pci_enable_pcie_error_reporting(pdev); + enable_pcie_relaxed_ordering(pdev); + pci_set_master(pdev); + pci_save_state(pdev); + + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) { + err = -ENOMEM; + goto out_unmap_bar0; + } + + adapter->workq = create_singlethread_workqueue("cxgb4"); + if (!adapter->workq) { + err = -ENOMEM; + goto out_free_adapter; + } + + /* PCI device has been enabled */ + adapter->flags |= DEV_ENABLED; + + adapter->regs = regs; + adapter->pdev = pdev; + adapter->pdev_dev = &pdev->dev; + adapter->mbox = func; + adapter->fn = func; + adapter->msg_enable = dflt_msg_enable; + memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); + + spin_lock_init(&adapter->stats_lock); + spin_lock_init(&adapter->tid_release_lock); + spin_lock_init(&adapter->win0_lock); + + INIT_WORK(&adapter->tid_release_task, process_tid_release_list); + INIT_WORK(&adapter->db_full_task, process_db_full); + INIT_WORK(&adapter->db_drop_task, process_db_drop); + + err = t4_prep_adapter(adapter); + if (err) + goto out_free_adapter; + + + if (!is_t4(adapter->params.chip)) { + s_qpp = (QUEUESPERPAGEPF0_S + + (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * + adapter->fn); + qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter, + SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp); + num_seg = PAGE_SIZE / SEGMENT_SIZE; + + /* Each segment size is 128B. Write coalescing is enabled only + * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the + * queue is less no of segments that can be accommodated in + * a page size. + */ + if (qpp > num_seg) { + dev_err(&pdev->dev, + "Incorrect number of egress queues per page\n"); + err = -EINVAL; + goto out_free_adapter; + } + adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); + if (!adapter->bar2) { + dev_err(&pdev->dev, "cannot map device bar2 region\n"); + err = -ENOMEM; + goto out_free_adapter; + } + } + + setup_memwin(adapter); + err = adap_init0(adapter); + setup_memwin_rdma(adapter); + if (err) + goto out_unmap_bar; + + for_each_port(adapter, i) { + struct net_device *netdev; + + netdev = alloc_etherdev_mq(sizeof(struct port_info), + MAX_ETH_QSETS); + if (!netdev) { + err = -ENOMEM; + goto out_free_dev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter->port[i] = netdev; + pi = netdev_priv(netdev); + pi->adapter = adapter; + pi->xact_addr_filt = -1; + pi->port_id = i; + netdev->irq = pdev->irq; + + netdev->hw_features = NETIF_F_SG | TSO_FLAGS | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | NETIF_F_RXHASH | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + if (highdma) + netdev->hw_features |= NETIF_F_HIGHDMA; + netdev->features |= netdev->hw_features; + netdev->vlan_features = netdev->features & VLAN_FEAT; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + netdev->netdev_ops = &cxgb4_netdev_ops; +#ifdef CONFIG_CHELSIO_T4_DCB + netdev->dcbnl_ops = &cxgb4_dcb_ops; + cxgb4_dcb_state_init(netdev); +#endif + cxgb4_set_ethtool_ops(netdev); + } + + pci_set_drvdata(pdev, adapter); + + if (adapter->flags & FW_OK) { + err = t4_port_init(adapter, func, func, 0); + if (err) + goto out_free_dev; + } + + /* + * Configure queues and allocate tables now, they can be needed as + * soon as the first register_netdev completes. + */ + cfg_queues(adapter); + + adapter->l2t = t4_init_l2t(); + if (!adapter->l2t) { + /* We tolerate a lack of L2T, giving up some functionality */ + dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); + adapter->params.offload = 0; + } + +#if IS_ENABLED(CONFIG_IPV6) + adapter->clipt = t4_init_clip_tbl(adapter->clipt_start, + adapter->clipt_end); + if (!adapter->clipt) { + /* We tolerate a lack of clip_table, giving up + * some functionality + */ + dev_warn(&pdev->dev, + "could not allocate Clip table, continuing\n"); + adapter->params.offload = 0; + } +#endif + if (is_offload(adapter) && tid_init(&adapter->tids) < 0) { + dev_warn(&pdev->dev, "could not allocate TID table, " + "continuing\n"); + adapter->params.offload = 0; + } + + /* See what interrupts we'll be using */ + if (msi > 1 && enable_msix(adapter) == 0) + adapter->flags |= USING_MSIX; + else if (msi > 0 && pci_enable_msi(pdev) == 0) + adapter->flags |= USING_MSI; + + err = init_rss(adapter); + if (err) + goto out_free_dev; + + /* + * The card is now ready to go. If any errors occur during device + * registration we do not fail the whole card but rather proceed only + * with the ports we manage to register successfully. However we must + * register at least one net device. + */ + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets); + netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets); + + err = register_netdev(adapter->port[i]); + if (err) + break; + adapter->chan_map[pi->tx_chan] = i; + print_port_info(adapter->port[i]); + } + if (i == 0) { + dev_err(&pdev->dev, "could not register any net devices\n"); + goto out_free_dev; + } + if (err) { + dev_warn(&pdev->dev, "only %d net devices registered\n", i); + err = 0; + } + + if (cxgb4_debugfs_root) { + adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), + cxgb4_debugfs_root); + setup_debugfs(adapter); + } + + /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ + pdev->needs_freset = 1; + + if (is_offload(adapter)) + attach_ulds(adapter); + +sriov: +#ifdef CONFIG_PCI_IOV + if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) + if (pci_enable_sriov(pdev, num_vf[func]) == 0) + dev_info(&pdev->dev, + "instantiated %u virtual functions\n", + num_vf[func]); +#endif + return 0; + + out_free_dev: + free_some_resources(adapter); + out_unmap_bar: + if (!is_t4(adapter->params.chip)) + iounmap(adapter->bar2); + out_free_adapter: + if (adapter->workq) + destroy_workqueue(adapter->workq); + + kfree(adapter); + out_unmap_bar0: + iounmap(regs); + out_disable_device: + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + out_release_regions: + pci_release_regions(pdev); + return err; +} + +static void remove_one(struct pci_dev *pdev) +{ + struct adapter *adapter = pci_get_drvdata(pdev); + +#ifdef CONFIG_PCI_IOV + pci_disable_sriov(pdev); + +#endif + + if (adapter) { + int i; + + /* Tear down per-adapter Work Queue first since it can contain + * references to our adapter data structure. + */ + destroy_workqueue(adapter->workq); + + if (is_offload(adapter)) + detach_ulds(adapter); + + disable_interrupts(adapter); + + for_each_port(adapter, i) + if (adapter->port[i]->reg_state == NETREG_REGISTERED) + unregister_netdev(adapter->port[i]); + + debugfs_remove_recursive(adapter->debugfs_root); + + /* If we allocated filters, free up state associated with any + * valid filters ... + */ + if (adapter->tids.ftid_tab) { + struct filter_entry *f = &adapter->tids.ftid_tab[0]; + for (i = 0; i < (adapter->tids.nftids + + adapter->tids.nsftids); i++, f++) + if (f->valid) + clear_filter(adapter, f); + } + + if (adapter->flags & FULL_INIT_DONE) + cxgb_down(adapter); + + free_some_resources(adapter); +#if IS_ENABLED(CONFIG_IPV6) + t4_cleanup_clip_tbl(adapter); +#endif + iounmap(adapter->regs); + if (!is_t4(adapter->params.chip)) + iounmap(adapter->bar2); + pci_disable_pcie_error_reporting(pdev); + if ((adapter->flags & DEV_ENABLED)) { + pci_disable_device(pdev); + adapter->flags &= ~DEV_ENABLED; + } + pci_release_regions(pdev); + synchronize_rcu(); + kfree(adapter); + } else + pci_release_regions(pdev); +} + +static struct pci_driver cxgb4_driver = { + .name = KBUILD_MODNAME, + .id_table = cxgb4_pci_tbl, + .probe = init_one, + .remove = remove_one, + .shutdown = remove_one, + .err_handler = &cxgb4_eeh, +}; + +static int __init cxgb4_init_module(void) +{ + int ret; + + /* Debugfs support is optional, just warn if this fails */ + cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (!cxgb4_debugfs_root) + pr_warn("could not create debugfs entry, continuing\n"); + + ret = pci_register_driver(&cxgb4_driver); + if (ret < 0) + debugfs_remove(cxgb4_debugfs_root); + +#if IS_ENABLED(CONFIG_IPV6) + if (!inet6addr_registered) { + register_inet6addr_notifier(&cxgb4_inet6addr_notifier); + inet6addr_registered = true; + } +#endif + + return ret; +} + +static void __exit cxgb4_cleanup_module(void) +{ +#if IS_ENABLED(CONFIG_IPV6) + if (inet6addr_registered) { + unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier); + inet6addr_registered = false; + } +#endif + pci_unregister_driver(&cxgb4_driver); + debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ +} + +module_init(cxgb4_init_module); +module_exit(cxgb4_cleanup_module); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h new file mode 100644 index 000000000..78ab4d406 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -0,0 +1,312 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_OFLD_H +#define __CXGB4_OFLD_H + +#include +#include +#include +#include +#include + +/* CPL message priority levels */ +enum { + CPL_PRIORITY_DATA = 0, /* data messages */ + CPL_PRIORITY_SETUP = 1, /* connection setup messages */ + CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */ + CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */ + CPL_PRIORITY_ACK = 1, /* RX ACK messages */ + CPL_PRIORITY_CONTROL = 1 /* control messages */ +}; + +#define INIT_TP_WR(w, tid) do { \ + (w)->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) | \ + FW_WR_IMMDLEN_V(sizeof(*w) - sizeof(w->wr))); \ + (w)->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*w), 16)) | \ + FW_WR_FLOWID_V(tid)); \ + (w)->wr.wr_lo = cpu_to_be64(0); \ +} while (0) + +#define INIT_TP_WR_CPL(w, cpl, tid) do { \ + INIT_TP_WR(w, tid); \ + OPCODE_TID(w) = htonl(MK_OPCODE_TID(cpl, tid)); \ +} while (0) + +#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \ + (w)->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) | \ + FW_WR_ATOMIC_V(atomic)); \ + (w)->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(wrlen, 16)) | \ + FW_WR_FLOWID_V(tid)); \ + (w)->wr.wr_lo = cpu_to_be64(0); \ +} while (0) + +/* Special asynchronous notification message */ +#define CXGB4_MSG_AN ((void *)1) + +struct serv_entry { + void *data; +}; + +union aopen_entry { + void *data; + union aopen_entry *next; +}; + +/* + * Holds the size, base address, free list start, etc of the TID, server TID, + * and active-open TID tables. The tables themselves are allocated dynamically. + */ +struct tid_info { + void **tid_tab; + unsigned int ntids; + + struct serv_entry *stid_tab; + unsigned long *stid_bmap; + unsigned int nstids; + unsigned int stid_base; + + union aopen_entry *atid_tab; + unsigned int natids; + unsigned int atid_base; + + struct filter_entry *ftid_tab; + unsigned int nftids; + unsigned int ftid_base; + unsigned int aftid_base; + unsigned int aftid_end; + /* Server filter region */ + unsigned int sftid_base; + unsigned int nsftids; + + spinlock_t atid_lock ____cacheline_aligned_in_smp; + union aopen_entry *afree; + unsigned int atids_in_use; + + spinlock_t stid_lock; + unsigned int stids_in_use; + + atomic_t tids_in_use; +}; + +static inline void *lookup_tid(const struct tid_info *t, unsigned int tid) +{ + return tid < t->ntids ? t->tid_tab[tid] : NULL; +} + +static inline void *lookup_atid(const struct tid_info *t, unsigned int atid) +{ + return atid < t->natids ? t->atid_tab[atid].data : NULL; +} + +static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) +{ + /* Is it a server filter TID? */ + if (t->nsftids && (stid >= t->sftid_base)) { + stid -= t->sftid_base; + stid += t->nstids; + } else { + stid -= t->stid_base; + } + + return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL; +} + +static inline void cxgb4_insert_tid(struct tid_info *t, void *data, + unsigned int tid) +{ + t->tid_tab[tid] = data; + atomic_inc(&t->tids_in_use); +} + +int cxgb4_alloc_atid(struct tid_info *t, void *data); +int cxgb4_alloc_stid(struct tid_info *t, int family, void *data); +int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data); +void cxgb4_free_atid(struct tid_info *t, unsigned int atid); +void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family); +void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid); + +struct in6_addr; + +int cxgb4_create_server(const struct net_device *dev, unsigned int stid, + __be32 sip, __be16 sport, __be16 vlan, + unsigned int queue); +int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, + const struct in6_addr *sip, __be16 sport, + unsigned int queue); +int cxgb4_remove_server(const struct net_device *dev, unsigned int stid, + unsigned int queue, bool ipv6); +int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, + __be32 sip, __be16 sport, __be16 vlan, + unsigned int queue, + unsigned char port, unsigned char mask); +int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, + unsigned int queue, bool ipv6); + +static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) +{ + skb_set_queue_mapping(skb, (queue << 1) | prio); +} + +enum cxgb4_uld { + CXGB4_ULD_RDMA, + CXGB4_ULD_ISCSI, + CXGB4_ULD_MAX +}; + +enum cxgb4_state { + CXGB4_STATE_UP, + CXGB4_STATE_START_RECOVERY, + CXGB4_STATE_DOWN, + CXGB4_STATE_DETACH +}; + +enum cxgb4_control { + CXGB4_CONTROL_DB_FULL, + CXGB4_CONTROL_DB_EMPTY, + CXGB4_CONTROL_DB_DROP, +}; + +struct pci_dev; +struct l2t_data; +struct net_device; +struct pkt_gl; +struct tp_tcp_stats; + +struct cxgb4_range { + unsigned int start; + unsigned int size; +}; + +struct cxgb4_virt_res { /* virtualized HW resources */ + struct cxgb4_range ddp; + struct cxgb4_range iscsi; + struct cxgb4_range stag; + struct cxgb4_range rq; + struct cxgb4_range pbl; + struct cxgb4_range qp; + struct cxgb4_range cq; + struct cxgb4_range ocq; +}; + +#define OCQ_WIN_OFFSET(pdev, vres) \ + (pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size)) + +/* + * Block of information the LLD provides to ULDs attaching to a device. + */ +struct cxgb4_lld_info { + struct pci_dev *pdev; /* associated PCI device */ + struct l2t_data *l2t; /* L2 table */ + struct tid_info *tids; /* TID table */ + struct net_device **ports; /* device ports */ + const struct cxgb4_virt_res *vr; /* assorted HW resources */ + const unsigned short *mtus; /* MTU table */ + const unsigned short *rxq_ids; /* the ULD's Rx queue ids */ + const unsigned short *ciq_ids; /* the ULD's concentrator IQ ids */ + unsigned short nrxq; /* # of Rx queues */ + unsigned short ntxq; /* # of Tx queues */ + unsigned short nciq; /* # of concentrator IQ */ + unsigned char nchan:4; /* # of channels */ + unsigned char nports:4; /* # of ports */ + unsigned char wr_cred; /* WR 16-byte credits */ + unsigned char adapter_type; /* type of adapter */ + unsigned char fw_api_ver; /* FW API version */ + unsigned int fw_vers; /* FW version */ + unsigned int iscsi_iolen; /* iSCSI max I/O length */ + unsigned int cclk_ps; /* Core clock period in psec */ + unsigned short udb_density; /* # of user DB/page */ + unsigned short ucq_density; /* # of user CQs/page */ + unsigned short filt_mode; /* filter optional components */ + unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */ + /* scheduler queue */ + void __iomem *gts_reg; /* address of GTS register */ + void __iomem *db_reg; /* address of kernel doorbell */ + int dbfifo_int_thresh; /* doorbell fifo int threshold */ + unsigned int sge_ingpadboundary; /* SGE ingress padding boundary */ + unsigned int sge_egrstatuspagesize; /* SGE egress status page size */ + unsigned int sge_pktshift; /* Padding between CPL and */ + /* packet data */ + unsigned int pf; /* Physical Function we're using */ + bool enable_fw_ofld_conn; /* Enable connection through fw */ + /* WR */ + unsigned int max_ordird_qp; /* Max ORD/IRD depth per RDMA QP */ + unsigned int max_ird_adapter; /* Max IRD memory per adapter */ + bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ +}; + +struct cxgb4_uld_info { + const char *name; + void *(*add)(const struct cxgb4_lld_info *p); + int (*rx_handler)(void *handle, const __be64 *rsp, + const struct pkt_gl *gl); + int (*state_change)(void *handle, enum cxgb4_state new_state); + int (*control)(void *handle, enum cxgb4_control control, ...); +}; + +int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); +int cxgb4_unregister_uld(enum cxgb4_uld type); +int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); +unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo); +unsigned int cxgb4_port_chan(const struct net_device *dev); +unsigned int cxgb4_port_viid(const struct net_device *dev); +unsigned int cxgb4_port_idx(const struct net_device *dev); +unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, + unsigned int *idx); +unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus, + unsigned short header_size, + unsigned short data_size_max, + unsigned short data_size_align, + unsigned int *mtu_idxp); +void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, + struct tp_tcp_stats *v6); +void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, + const unsigned int *pgsz_order); +struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, + unsigned int skb_len, unsigned int pull_len); +int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size); +int cxgb4_flush_eq_cache(struct net_device *dev); +void cxgb4_disable_db_coalescing(struct net_device *dev); +void cxgb4_enable_db_coalescing(struct net_device *dev); +int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte); +u64 cxgb4_read_sge_timestamp(struct net_device *dev); + +enum cxgb4_bar2_qtype { CXGB4_BAR2_QTYPE_EGRESS, CXGB4_BAR2_QTYPE_INGRESS }; +int cxgb4_bar2_sge_qregs(struct net_device *dev, + unsigned int qid, + enum cxgb4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid); + +#endif /* !__CXGB4_OFLD_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c new file mode 100644 index 000000000..252efc293 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -0,0 +1,666 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cxgb4.h" +#include "l2t.h" +#include "t4_msg.h" +#include "t4fw_api.h" +#include "t4_regs.h" +#include "t4_values.h" + +#define VLAN_NONE 0xfff + +/* identifies sync vs async L2T_WRITE_REQs */ +#define F_SYNC_WR (1 << 12) + +enum { + L2T_STATE_VALID, /* entry is up to date */ + L2T_STATE_STALE, /* entry may be used but needs revalidation */ + L2T_STATE_RESOLVING, /* entry needs address resolution */ + L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */ + + /* when state is one of the below the entry is not hashed */ + L2T_STATE_SWITCHING, /* entry is being used by a switching filter */ + L2T_STATE_UNUSED /* entry not in use */ +}; + +struct l2t_data { + rwlock_t lock; + atomic_t nfree; /* number of free entries */ + struct l2t_entry *rover; /* starting point for next allocation */ + struct l2t_entry l2tab[L2T_SIZE]; +}; + +static inline unsigned int vlan_prio(const struct l2t_entry *e) +{ + return e->vlan >> 13; +} + +static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) +{ + if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ + atomic_dec(&d->nfree); +} + +/* + * To avoid having to check address families we do not allow v4 and v6 + * neighbors to be on the same hash chain. We keep v4 entries in the first + * half of available hash buckets and v6 in the second. + */ +enum { + L2T_SZ_HALF = L2T_SIZE / 2, + L2T_HASH_MASK = L2T_SZ_HALF - 1 +}; + +static inline unsigned int arp_hash(const u32 *key, int ifindex) +{ + return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK; +} + +static inline unsigned int ipv6_hash(const u32 *key, int ifindex) +{ + u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3]; + + return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK); +} + +static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex) +{ + return addr_len == 4 ? arp_hash(addr, ifindex) : + ipv6_hash(addr, ifindex); +} + +/* + * Checks if an L2T entry is for the given IP/IPv6 address. It does not check + * whether the L2T entry and the address are of the same address family. + * Callers ensure an address is only checked against L2T entries of the same + * family, something made trivial by the separation of IP and IPv6 hash chains + * mentioned above. Returns 0 if there's a match, + */ +static int addreq(const struct l2t_entry *e, const u32 *addr) +{ + if (e->v6) + return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) | + (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]); + return e->addr[0] ^ addr[0]; +} + +static void neigh_replace(struct l2t_entry *e, struct neighbour *n) +{ + neigh_hold(n); + if (e->neigh) + neigh_release(e->neigh); + e->neigh = n; +} + +/* + * Write an L2T entry. Must be called with the entry locked. + * The write may be synchronous or asynchronous. + */ +static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) +{ + struct sk_buff *skb; + struct cpl_l2t_write_req *req; + + skb = alloc_skb(sizeof(*req), GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, 0); + + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, + e->idx | (sync ? F_SYNC_WR : 0) | + TID_QID_V(adap->sge.fw_evtq.abs_id))); + req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync)); + req->l2t_idx = htons(e->idx); + req->vlan = htons(e->vlan); + if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK)) + memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); + memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); + + set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); + t4_ofld_send(adap, skb); + + if (sync && e->state != L2T_STATE_SWITCHING) + e->state = L2T_STATE_SYNC_WRITE; + return 0; +} + +/* + * Send packets waiting in an L2T entry's ARP queue. Must be called with the + * entry locked. + */ +static void send_pending(struct adapter *adap, struct l2t_entry *e) +{ + while (e->arpq_head) { + struct sk_buff *skb = e->arpq_head; + + e->arpq_head = skb->next; + skb->next = NULL; + t4_ofld_send(adap, skb); + } + e->arpq_tail = NULL; +} + +/* + * Process a CPL_L2T_WRITE_RPL. Wake up the ARP queue if it completes a + * synchronous L2T_WRITE. Note that the TID in the reply is really the L2T + * index it refers to. + */ +void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl) +{ + unsigned int tid = GET_TID(rpl); + unsigned int idx = tid & (L2T_SIZE - 1); + + if (unlikely(rpl->status != CPL_ERR_NONE)) { + dev_err(adap->pdev_dev, + "Unexpected L2T_WRITE_RPL status %u for entry %u\n", + rpl->status, idx); + return; + } + + if (tid & F_SYNC_WR) { + struct l2t_entry *e = &adap->l2t->l2tab[idx]; + + spin_lock(&e->lock); + if (e->state != L2T_STATE_SWITCHING) { + send_pending(adap, e); + e->state = (e->neigh->nud_state & NUD_STALE) ? + L2T_STATE_STALE : L2T_STATE_VALID; + } + spin_unlock(&e->lock); + } +} + +/* + * Add a packet to an L2T entry's queue of packets awaiting resolution. + * Must be called with the entry's lock held. + */ +static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) +{ + skb->next = NULL; + if (e->arpq_head) + e->arpq_tail->next = skb; + else + e->arpq_head = skb; + e->arpq_tail = skb; +} + +int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, + struct l2t_entry *e) +{ + struct adapter *adap = netdev2adap(dev); + +again: + switch (e->state) { + case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ + neigh_event_send(e->neigh, NULL); + spin_lock_bh(&e->lock); + if (e->state == L2T_STATE_STALE) + e->state = L2T_STATE_VALID; + spin_unlock_bh(&e->lock); + case L2T_STATE_VALID: /* fast-path, send the packet on */ + return t4_ofld_send(adap, skb); + case L2T_STATE_RESOLVING: + case L2T_STATE_SYNC_WRITE: + spin_lock_bh(&e->lock); + if (e->state != L2T_STATE_SYNC_WRITE && + e->state != L2T_STATE_RESOLVING) { + spin_unlock_bh(&e->lock); + goto again; + } + arpq_enqueue(e, skb); + spin_unlock_bh(&e->lock); + + if (e->state == L2T_STATE_RESOLVING && + !neigh_event_send(e->neigh, NULL)) { + spin_lock_bh(&e->lock); + if (e->state == L2T_STATE_RESOLVING && e->arpq_head) + write_l2e(adap, e, 1); + spin_unlock_bh(&e->lock); + } + } + return 0; +} +EXPORT_SYMBOL(cxgb4_l2t_send); + +/* + * Allocate a free L2T entry. Must be called with l2t_data.lock held. + */ +static struct l2t_entry *alloc_l2e(struct l2t_data *d) +{ + struct l2t_entry *end, *e, **p; + + if (!atomic_read(&d->nfree)) + return NULL; + + /* there's definitely a free entry */ + for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e) + if (atomic_read(&e->refcnt) == 0) + goto found; + + for (e = d->l2tab; atomic_read(&e->refcnt); ++e) + ; +found: + d->rover = e + 1; + atomic_dec(&d->nfree); + + /* + * The entry we found may be an inactive entry that is + * presently in the hash table. We need to remove it. + */ + if (e->state < L2T_STATE_SWITCHING) + for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) + if (*p == e) { + *p = e->next; + e->next = NULL; + break; + } + + e->state = L2T_STATE_UNUSED; + return e; +} + +/* + * Called when an L2T entry has no more users. + */ +static void t4_l2e_free(struct l2t_entry *e) +{ + struct l2t_data *d; + + spin_lock_bh(&e->lock); + if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ + if (e->neigh) { + neigh_release(e->neigh); + e->neigh = NULL; + } + while (e->arpq_head) { + struct sk_buff *skb = e->arpq_head; + + e->arpq_head = skb->next; + kfree_skb(skb); + } + e->arpq_tail = NULL; + } + spin_unlock_bh(&e->lock); + + d = container_of(e, struct l2t_data, l2tab[e->idx]); + atomic_inc(&d->nfree); +} + +void cxgb4_l2t_release(struct l2t_entry *e) +{ + if (atomic_dec_and_test(&e->refcnt)) + t4_l2e_free(e); +} +EXPORT_SYMBOL(cxgb4_l2t_release); + +/* + * Update an L2T entry that was previously used for the same next hop as neigh. + * Must be called with softirqs disabled. + */ +static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) +{ + unsigned int nud_state; + + spin_lock(&e->lock); /* avoid race with t4_l2t_free */ + if (neigh != e->neigh) + neigh_replace(e, neigh); + nud_state = neigh->nud_state; + if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) || + !(nud_state & NUD_VALID)) + e->state = L2T_STATE_RESOLVING; + else if (nud_state & NUD_CONNECTED) + e->state = L2T_STATE_VALID; + else + e->state = L2T_STATE_STALE; + spin_unlock(&e->lock); +} + +struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, + const struct net_device *physdev, + unsigned int priority) +{ + u8 lport; + u16 vlan; + struct l2t_entry *e; + int addr_len = neigh->tbl->key_len; + u32 *addr = (u32 *)neigh->primary_key; + int ifidx = neigh->dev->ifindex; + int hash = addr_hash(addr, addr_len, ifidx); + + if (neigh->dev->flags & IFF_LOOPBACK) + lport = netdev2pinfo(physdev)->tx_chan + 4; + else + lport = netdev2pinfo(physdev)->lport; + + if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) + vlan = vlan_dev_vlan_id(neigh->dev); + else + vlan = VLAN_NONE; + + write_lock_bh(&d->lock); + for (e = d->l2tab[hash].first; e; e = e->next) + if (!addreq(e, addr) && e->ifindex == ifidx && + e->vlan == vlan && e->lport == lport) { + l2t_hold(d, e); + if (atomic_read(&e->refcnt) == 1) + reuse_entry(e, neigh); + goto done; + } + + /* Need to allocate a new entry */ + e = alloc_l2e(d); + if (e) { + spin_lock(&e->lock); /* avoid race with t4_l2t_free */ + e->state = L2T_STATE_RESOLVING; + if (neigh->dev->flags & IFF_LOOPBACK) + memcpy(e->dmac, physdev->dev_addr, sizeof(e->dmac)); + memcpy(e->addr, addr, addr_len); + e->ifindex = ifidx; + e->hash = hash; + e->lport = lport; + e->v6 = addr_len == 16; + atomic_set(&e->refcnt, 1); + neigh_replace(e, neigh); + e->vlan = vlan; + e->next = d->l2tab[hash].first; + d->l2tab[hash].first = e; + spin_unlock(&e->lock); + } +done: + write_unlock_bh(&d->lock); + return e; +} +EXPORT_SYMBOL(cxgb4_l2t_get); + +u64 cxgb4_select_ntuple(struct net_device *dev, + const struct l2t_entry *l2t) +{ + struct adapter *adap = netdev2adap(dev); + struct tp_params *tp = &adap->params.tp; + u64 ntuple = 0; + + /* Initialize each of the fields which we care about which are present + * in the Compressed Filter Tuple. + */ + if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE) + ntuple |= (u64)(FT_VLAN_VLD_F | l2t->vlan) << tp->vlan_shift; + + if (tp->port_shift >= 0) + ntuple |= (u64)l2t->lport << tp->port_shift; + + if (tp->protocol_shift >= 0) + ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift; + + if (tp->vnic_shift >= 0) { + u32 viid = cxgb4_port_viid(dev); + u32 vf = FW_VIID_VIN_G(viid); + u32 pf = FW_VIID_PFN_G(viid); + u32 vld = FW_VIID_VIVLD_G(viid); + + ntuple |= (u64)(FT_VNID_ID_VF_V(vf) | + FT_VNID_ID_PF_V(pf) | + FT_VNID_ID_VLD_V(vld)) << tp->vnic_shift; + } + + return ntuple; +} +EXPORT_SYMBOL(cxgb4_select_ntuple); + +/* + * Called when address resolution fails for an L2T entry to handle packets + * on the arpq head. If a packet specifies a failure handler it is invoked, + * otherwise the packet is sent to the device. + */ +static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq) +{ + while (arpq) { + struct sk_buff *skb = arpq; + const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); + + arpq = skb->next; + skb->next = NULL; + if (cb->arp_err_handler) + cb->arp_err_handler(cb->handle, skb); + else + t4_ofld_send(adap, skb); + } +} + +/* + * Called when the host's neighbor layer makes a change to some entry that is + * loaded into the HW L2 table. + */ +void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) +{ + struct l2t_entry *e; + struct sk_buff *arpq = NULL; + struct l2t_data *d = adap->l2t; + int addr_len = neigh->tbl->key_len; + u32 *addr = (u32 *) neigh->primary_key; + int ifidx = neigh->dev->ifindex; + int hash = addr_hash(addr, addr_len, ifidx); + + read_lock_bh(&d->lock); + for (e = d->l2tab[hash].first; e; e = e->next) + if (!addreq(e, addr) && e->ifindex == ifidx) { + spin_lock(&e->lock); + if (atomic_read(&e->refcnt)) + goto found; + spin_unlock(&e->lock); + break; + } + read_unlock_bh(&d->lock); + return; + + found: + read_unlock(&d->lock); + + if (neigh != e->neigh) + neigh_replace(e, neigh); + + if (e->state == L2T_STATE_RESOLVING) { + if (neigh->nud_state & NUD_FAILED) { + arpq = e->arpq_head; + e->arpq_head = e->arpq_tail = NULL; + } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) && + e->arpq_head) { + write_l2e(adap, e, 1); + } + } else { + e->state = neigh->nud_state & NUD_CONNECTED ? + L2T_STATE_VALID : L2T_STATE_STALE; + if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac))) + write_l2e(adap, e, 0); + } + + spin_unlock_bh(&e->lock); + + if (arpq) + handle_failed_resolution(adap, arpq); +} + +/* Allocate an L2T entry for use by a switching rule. Such need to be + * explicitly freed and while busy they are not on any hash chain, so normal + * address resolution updates do not see them. + */ +struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d) +{ + struct l2t_entry *e; + + write_lock_bh(&d->lock); + e = alloc_l2e(d); + if (e) { + spin_lock(&e->lock); /* avoid race with t4_l2t_free */ + e->state = L2T_STATE_SWITCHING; + atomic_set(&e->refcnt, 1); + spin_unlock(&e->lock); + } + write_unlock_bh(&d->lock); + return e; +} + +/* Sets/updates the contents of a switching L2T entry that has been allocated + * with an earlier call to @t4_l2t_alloc_switching. + */ +int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, + u8 port, u8 *eth_addr) +{ + e->vlan = vlan; + e->lport = port; + memcpy(e->dmac, eth_addr, ETH_ALEN); + return write_l2e(adap, e, 0); +} + +struct l2t_data *t4_init_l2t(void) +{ + int i; + struct l2t_data *d; + + d = t4_alloc_mem(sizeof(*d)); + if (!d) + return NULL; + + d->rover = d->l2tab; + atomic_set(&d->nfree, L2T_SIZE); + rwlock_init(&d->lock); + + for (i = 0; i < L2T_SIZE; ++i) { + d->l2tab[i].idx = i; + d->l2tab[i].state = L2T_STATE_UNUSED; + spin_lock_init(&d->l2tab[i].lock); + atomic_set(&d->l2tab[i].refcnt, 0); + } + return d; +} + +static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos) +{ + struct l2t_entry *l2tab = seq->private; + + return pos >= L2T_SIZE ? NULL : &l2tab[pos]; +} + +static void *l2t_seq_start(struct seq_file *seq, loff_t *pos) +{ + return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; +} + +static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + v = l2t_get_idx(seq, *pos); + if (v) + ++*pos; + return v; +} + +static void l2t_seq_stop(struct seq_file *seq, void *v) +{ +} + +static char l2e_state(const struct l2t_entry *e) +{ + switch (e->state) { + case L2T_STATE_VALID: return 'V'; + case L2T_STATE_STALE: return 'S'; + case L2T_STATE_SYNC_WRITE: return 'W'; + case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R'; + case L2T_STATE_SWITCHING: return 'X'; + default: + return 'U'; + } +} + +static int l2t_seq_show(struct seq_file *seq, void *v) +{ + if (v == SEQ_START_TOKEN) + seq_puts(seq, " Idx IP address " + "Ethernet address VLAN/P LP State Users Port\n"); + else { + char ip[60]; + struct l2t_entry *e = v; + + spin_lock_bh(&e->lock); + if (e->state == L2T_STATE_SWITCHING) + ip[0] = '\0'; + else + sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr); + seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n", + e->idx, ip, e->dmac, + e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport, + l2e_state(e), atomic_read(&e->refcnt), + e->neigh ? e->neigh->dev->name : ""); + spin_unlock_bh(&e->lock); + } + return 0; +} + +static const struct seq_operations l2t_seq_ops = { + .start = l2t_seq_start, + .next = l2t_seq_next, + .stop = l2t_seq_stop, + .show = l2t_seq_show +}; + +static int l2t_seq_open(struct inode *inode, struct file *file) +{ + int rc = seq_open(file, &l2t_seq_ops); + + if (!rc) { + struct adapter *adap = inode->i_private; + struct seq_file *seq = file->private_data; + + seq->private = adap->l2t->l2tab; + } + return rc; +} + +const struct file_operations t4_l2t_fops = { + .owner = THIS_MODULE, + .open = l2t_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h new file mode 100644 index 000000000..a30126ce9 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h @@ -0,0 +1,111 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_L2T_H +#define __CXGB4_L2T_H + +#include +#include +#include + +struct adapter; +struct l2t_data; +struct neighbour; +struct net_device; +struct file_operations; +struct cpl_l2t_write_rpl; + +/* + * Each L2T entry plays multiple roles. First of all, it keeps state for the + * corresponding entry of the HW L2 table and maintains a queue of offload + * packets awaiting address resolution. Second, it is a node of a hash table + * chain, where the nodes of the chain are linked together through their next + * pointer. Finally, each node is a bucket of a hash table, pointing to the + * first element in its chain through its first pointer. + */ +struct l2t_entry { + u16 state; /* entry state */ + u16 idx; /* entry index */ + u32 addr[4]; /* next hop IP or IPv6 address */ + int ifindex; /* neighbor's net_device's ifindex */ + struct neighbour *neigh; /* associated neighbour */ + struct l2t_entry *first; /* start of hash chain */ + struct l2t_entry *next; /* next l2t_entry on chain */ + struct sk_buff *arpq_head; /* queue of packets awaiting resolution */ + struct sk_buff *arpq_tail; + spinlock_t lock; + atomic_t refcnt; /* entry reference count */ + u16 hash; /* hash bucket the entry is on */ + u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */ + u8 v6; /* whether entry is for IPv6 */ + u8 lport; /* associated offload logical interface */ + u8 dmac[ETH_ALEN]; /* neighbour's MAC address */ +}; + +typedef void (*arp_err_handler_t)(void *handle, struct sk_buff *skb); + +/* + * Callback stored in an skb to handle address resolution failure. + */ +struct l2t_skb_cb { + void *handle; + arp_err_handler_t arp_err_handler; +}; + +#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb) + +static inline void t4_set_arp_err_handler(struct sk_buff *skb, void *handle, + arp_err_handler_t handler) +{ + L2T_SKB_CB(skb)->handle = handle; + L2T_SKB_CB(skb)->arp_err_handler = handler; +} + +void cxgb4_l2t_release(struct l2t_entry *e); +int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, + struct l2t_entry *e); +struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, + const struct net_device *physdev, + unsigned int priority); +u64 cxgb4_select_ntuple(struct net_device *dev, + const struct l2t_entry *l2t); +void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); +struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); +int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, + u8 port, u8 *eth_addr); +struct l2t_data *t4_init_l2t(void); +void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl); + +extern const struct file_operations t4_l2t_fops; +#endif /* __CXGB4_L2T_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c new file mode 100644 index 000000000..0d2eddab0 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -0,0 +1,3078 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_NET_RX_BUSY_POLL +#include +#endif /* CONFIG_NET_RX_BUSY_POLL */ +#ifdef CONFIG_CHELSIO_T4_FCOE +#include +#endif /* CONFIG_CHELSIO_T4_FCOE */ +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4_values.h" +#include "t4_msg.h" +#include "t4fw_api.h" + +/* + * Rx buffer size. We use largish buffers if possible but settle for single + * pages under memory shortage. + */ +#if PAGE_SHIFT >= 16 +# define FL_PG_ORDER 0 +#else +# define FL_PG_ORDER (16 - PAGE_SHIFT) +#endif + +/* RX_PULL_LEN should be <= RX_COPY_THRES */ +#define RX_COPY_THRES 256 +#define RX_PULL_LEN 128 + +/* + * Main body length for sk_buffs used for Rx Ethernet packets with fragments. + * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room. + */ +#define RX_PKT_SKB_LEN 512 + +/* + * Max number of Tx descriptors we clean up at a time. Should be modest as + * freeing skbs isn't cheap and it happens while holding locks. We just need + * to free packets faster than they arrive, we eventually catch up and keep + * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. + */ +#define MAX_TX_RECLAIM 16 + +/* + * Max number of Rx buffers we replenish at a time. Again keep this modest, + * allocating buffers isn't cheap either. + */ +#define MAX_RX_REFILL 16U + +/* + * Period of the Rx queue check timer. This timer is infrequent as it has + * something to do only when the system experiences severe memory shortage. + */ +#define RX_QCHECK_PERIOD (HZ / 2) + +/* + * Period of the Tx queue check timer. + */ +#define TX_QCHECK_PERIOD (HZ / 2) + +/* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate + * (in RX_QCHECK_PERIOD multiples). If we find one of the SGE Ingress DMA + * State Machines in the same state for this amount of time (in HZ) then we'll + * issue a warning about a potential hang. We'll repeat the warning as the + * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till + * the situation clears. If the situation clears, we'll note that as well. + */ +#define SGE_IDMA_WARN_THRESH (1 * HZ) +#define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD) + +/* + * Max number of Tx descriptors to be reclaimed by the Tx timer. + */ +#define MAX_TIMER_TX_RECLAIM 100 + +/* + * Timer index used when backing off due to memory shortage. + */ +#define NOMEM_TMR_IDX (SGE_NTIMERS - 1) + +/* + * Suspend an Ethernet Tx queue with fewer available descriptors than this. + * This is the same as calc_tx_descs() for a TSO packet with + * nr_frags == MAX_SKB_FRAGS. + */ +#define ETHTXQ_STOP_THRES \ + (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8)) + +/* + * Suspension threshold for non-Ethernet Tx queues. We require enough room + * for a full sized WR. + */ +#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc)) + +/* + * Max Tx descriptor space we allow for an Ethernet packet to be inlined + * into a WR. + */ +#define MAX_IMM_TX_PKT_LEN 256 + +/* + * Max size of a WR sent through a control Tx queue. + */ +#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN + +struct tx_sw_desc { /* SW state per Tx descriptor */ + struct sk_buff *skb; + struct ulptx_sgl *sgl; +}; + +struct rx_sw_desc { /* SW state per Rx descriptor */ + struct page *page; + dma_addr_t dma_addr; +}; + +/* + * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb + * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs. + * We could easily support more but there doesn't seem to be much need for + * that ... + */ +#define FL_MTU_SMALL 1500 +#define FL_MTU_LARGE 9000 + +static inline unsigned int fl_mtu_bufsize(struct adapter *adapter, + unsigned int mtu) +{ + struct sge *s = &adapter->sge; + + return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align); +} + +#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL) +#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE) + +/* + * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses + * these to specify the buffer size as an index into the SGE Free List Buffer + * Size register array. We also use bit 4, when the buffer has been unmapped + * for DMA, but this is of course never sent to the hardware and is only used + * to prevent double unmappings. All of the above requires that the Free List + * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are + * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal + * Free List Buffer alignment is 32 bytes, this works out for us ... + */ +enum { + RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */ + RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */ + RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */ + + /* + * XXX We shouldn't depend on being able to use these indices. + * XXX Especially when some other Master PF has initialized the + * XXX adapter or we use the Firmware Configuration File. We + * XXX should really search through the Host Buffer Size register + * XXX array for the appropriately sized buffer indices. + */ + RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */ + RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */ + + RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */ + RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */ +}; + +static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5}; +#define MIN_NAPI_WORK 1 + +static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) +{ + return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS; +} + +static inline bool is_buf_mapped(const struct rx_sw_desc *d) +{ + return !(d->dma_addr & RX_UNMAPPED_BUF); +} + +/** + * txq_avail - return the number of available slots in a Tx queue + * @q: the Tx queue + * + * Returns the number of descriptors in a Tx queue available to write new + * packets. + */ +static inline unsigned int txq_avail(const struct sge_txq *q) +{ + return q->size - 1 - q->in_use; +} + +/** + * fl_cap - return the capacity of a free-buffer list + * @fl: the FL + * + * Returns the capacity of a free-buffer list. The capacity is less than + * the size because one descriptor needs to be left unpopulated, otherwise + * HW will think the FL is empty. + */ +static inline unsigned int fl_cap(const struct sge_fl *fl) +{ + return fl->size - 8; /* 1 descriptor = 8 buffers */ +} + +/** + * fl_starving - return whether a Free List is starving. + * @adapter: pointer to the adapter + * @fl: the Free List + * + * Tests specified Free List to see whether the number of buffers + * available to the hardware has falled below our "starvation" + * threshold. + */ +static inline bool fl_starving(const struct adapter *adapter, + const struct sge_fl *fl) +{ + const struct sge *s = &adapter->sge; + + return fl->avail - fl->pend_cred <= s->fl_starve_thres; +} + +static int map_skb(struct device *dev, const struct sk_buff *skb, + dma_addr_t *addr) +{ + const skb_frag_t *fp, *end; + const struct skb_shared_info *si; + + *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(dev, *addr)) + goto out_err; + + si = skb_shinfo(skb); + end = &si->frags[si->nr_frags]; + + for (fp = si->frags; fp < end; fp++) { + *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp), + DMA_TO_DEVICE); + if (dma_mapping_error(dev, *addr)) + goto unwind; + } + return 0; + +unwind: + while (fp-- > si->frags) + dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); + + dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); +out_err: + return -ENOMEM; +} + +#ifdef CONFIG_NEED_DMA_MAP_STATE +static void unmap_skb(struct device *dev, const struct sk_buff *skb, + const dma_addr_t *addr) +{ + const skb_frag_t *fp, *end; + const struct skb_shared_info *si; + + dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE); + + si = skb_shinfo(skb); + end = &si->frags[si->nr_frags]; + for (fp = si->frags; fp < end; fp++) + dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE); +} + +/** + * deferred_unmap_destructor - unmap a packet when it is freed + * @skb: the packet + * + * This is the packet destructor used for Tx packets that need to remain + * mapped until they are freed rather than until their Tx descriptors are + * freed. + */ +static void deferred_unmap_destructor(struct sk_buff *skb) +{ + unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); +} +#endif + +static void unmap_sgl(struct device *dev, const struct sk_buff *skb, + const struct ulptx_sgl *sgl, const struct sge_txq *q) +{ + const struct ulptx_sge_pair *p; + unsigned int nfrags = skb_shinfo(skb)->nr_frags; + + if (likely(skb_headlen(skb))) + dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), + DMA_TO_DEVICE); + else { + dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), + DMA_TO_DEVICE); + nfrags--; + } + + /* + * the complexity below is because of the possibility of a wrap-around + * in the middle of an SGL + */ + for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { + if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) { +unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]), + ntohl(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(p->addr[1]), + ntohl(p->len[1]), DMA_TO_DEVICE); + p++; + } else if ((u8 *)p == (u8 *)q->stat) { + p = (const struct ulptx_sge_pair *)q->desc; + goto unmap; + } else if ((u8 *)p + 8 == (u8 *)q->stat) { + const __be64 *addr = (const __be64 *)q->desc; + + dma_unmap_page(dev, be64_to_cpu(addr[0]), + ntohl(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(addr[1]), + ntohl(p->len[1]), DMA_TO_DEVICE); + p = (const struct ulptx_sge_pair *)&addr[2]; + } else { + const __be64 *addr = (const __be64 *)q->desc; + + dma_unmap_page(dev, be64_to_cpu(p->addr[0]), + ntohl(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(addr[0]), + ntohl(p->len[1]), DMA_TO_DEVICE); + p = (const struct ulptx_sge_pair *)&addr[1]; + } + } + if (nfrags) { + __be64 addr; + + if ((u8 *)p == (u8 *)q->stat) + p = (const struct ulptx_sge_pair *)q->desc; + addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] : + *(const __be64 *)q->desc; + dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]), + DMA_TO_DEVICE); + } +} + +/** + * free_tx_desc - reclaims Tx descriptors and their buffers + * @adapter: the adapter + * @q: the Tx queue to reclaim descriptors from + * @n: the number of descriptors to reclaim + * @unmap: whether the buffers should be unmapped for DMA + * + * Reclaims Tx descriptors from an SGE Tx queue and frees the associated + * Tx buffers. Called with the Tx queue lock held. + */ +static void free_tx_desc(struct adapter *adap, struct sge_txq *q, + unsigned int n, bool unmap) +{ + struct tx_sw_desc *d; + unsigned int cidx = q->cidx; + struct device *dev = adap->pdev_dev; + + d = &q->sdesc[cidx]; + while (n--) { + if (d->skb) { /* an SGL is present */ + if (unmap) + unmap_sgl(dev, d->skb, d->sgl, q); + dev_consume_skb_any(d->skb); + d->skb = NULL; + } + ++d; + if (++cidx == q->size) { + cidx = 0; + d = q->sdesc; + } + } + q->cidx = cidx; +} + +/* + * Return the number of reclaimable descriptors in a Tx queue. + */ +static inline int reclaimable(const struct sge_txq *q) +{ + int hw_cidx = ntohs(q->stat->cidx); + hw_cidx -= q->cidx; + return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; +} + +/** + * reclaim_completed_tx - reclaims completed Tx descriptors + * @adap: the adapter + * @q: the Tx queue to reclaim completed descriptors from + * @unmap: whether the buffers should be unmapped for DMA + * + * Reclaims Tx descriptors that the SGE has indicated it has processed, + * and frees the associated buffers if possible. Called with the Tx + * queue locked. + */ +static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, + bool unmap) +{ + int avail = reclaimable(q); + + if (avail) { + /* + * Limit the amount of clean up work we do at a time to keep + * the Tx lock hold time O(1). + */ + if (avail > MAX_TX_RECLAIM) + avail = MAX_TX_RECLAIM; + + free_tx_desc(adap, q, avail, unmap); + q->in_use -= avail; + } +} + +static inline int get_buf_size(struct adapter *adapter, + const struct rx_sw_desc *d) +{ + struct sge *s = &adapter->sge; + unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE; + int buf_size; + + switch (rx_buf_size_idx) { + case RX_SMALL_PG_BUF: + buf_size = PAGE_SIZE; + break; + + case RX_LARGE_PG_BUF: + buf_size = PAGE_SIZE << s->fl_pg_order; + break; + + case RX_SMALL_MTU_BUF: + buf_size = FL_MTU_SMALL_BUFSIZE(adapter); + break; + + case RX_LARGE_MTU_BUF: + buf_size = FL_MTU_LARGE_BUFSIZE(adapter); + break; + + default: + BUG_ON(1); + } + + return buf_size; +} + +/** + * free_rx_bufs - free the Rx buffers on an SGE free list + * @adap: the adapter + * @q: the SGE free list to free buffers from + * @n: how many buffers to free + * + * Release the next @n buffers on an SGE free-buffer Rx queue. The + * buffers must be made inaccessible to HW before calling this function. + */ +static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) +{ + while (n--) { + struct rx_sw_desc *d = &q->sdesc[q->cidx]; + + if (is_buf_mapped(d)) + dma_unmap_page(adap->pdev_dev, get_buf_addr(d), + get_buf_size(adap, d), + PCI_DMA_FROMDEVICE); + put_page(d->page); + d->page = NULL; + if (++q->cidx == q->size) + q->cidx = 0; + q->avail--; + } +} + +/** + * unmap_rx_buf - unmap the current Rx buffer on an SGE free list + * @adap: the adapter + * @q: the SGE free list + * + * Unmap the current buffer on an SGE free-buffer Rx queue. The + * buffer must be made inaccessible to HW before calling this function. + * + * This is similar to @free_rx_bufs above but does not free the buffer. + * Do note that the FL still loses any further access to the buffer. + */ +static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) +{ + struct rx_sw_desc *d = &q->sdesc[q->cidx]; + + if (is_buf_mapped(d)) + dma_unmap_page(adap->pdev_dev, get_buf_addr(d), + get_buf_size(adap, d), PCI_DMA_FROMDEVICE); + d->page = NULL; + if (++q->cidx == q->size) + q->cidx = 0; + q->avail--; +} + +static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) +{ + u32 val; + if (q->pend_cred >= 8) { + if (is_t4(adap->params.chip)) + val = PIDX_V(q->pend_cred / 8); + else + val = PIDX_T5_V(q->pend_cred / 8) | + DBTYPE_F; + val |= DBPRIO_F; + wmb(); + + /* If we don't have access to the new User Doorbell (T5+), use + * the old doorbell mechanism; otherwise use the new BAR2 + * mechanism. + */ + if (unlikely(q->bar2_addr == NULL)) { + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), + val | QID_V(q->cntxt_id)); + } else { + writel(val | QID_V(q->bar2_qid), + q->bar2_addr + SGE_UDB_KDOORBELL); + + /* This Write memory Barrier will force the write to + * the User Doorbell area to be flushed. + */ + wmb(); + } + q->pend_cred &= 7; + } +} + +static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg, + dma_addr_t mapping) +{ + sd->page = pg; + sd->dma_addr = mapping; /* includes size low bits */ +} + +/** + * refill_fl - refill an SGE Rx buffer ring + * @adap: the adapter + * @q: the ring to refill + * @n: the number of new buffers to allocate + * @gfp: the gfp flags for the allocations + * + * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, + * allocated with the supplied gfp flags. The caller must assure that + * @n does not exceed the queue's capacity. If afterwards the queue is + * found critically low mark it as starving in the bitmap of starving FLs. + * + * Returns the number of buffers allocated. + */ +static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, + gfp_t gfp) +{ + struct sge *s = &adap->sge; + struct page *pg; + dma_addr_t mapping; + unsigned int cred = q->avail; + __be64 *d = &q->desc[q->pidx]; + struct rx_sw_desc *sd = &q->sdesc[q->pidx]; + int node; + + gfp |= __GFP_NOWARN; + node = dev_to_node(adap->pdev_dev); + + if (s->fl_pg_order == 0) + goto alloc_small_pages; + + /* + * Prefer large buffers + */ + while (n) { + pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order); + if (unlikely(!pg)) { + q->large_alloc_failed++; + break; /* fall back to single pages */ + } + + mapping = dma_map_page(adap->pdev_dev, pg, 0, + PAGE_SIZE << s->fl_pg_order, + PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { + __free_pages(pg, s->fl_pg_order); + goto out; /* do not try small pages for this error */ + } + mapping |= RX_LARGE_PG_BUF; + *d++ = cpu_to_be64(mapping); + + set_rx_sw_desc(sd, pg, mapping); + sd++; + + q->avail++; + if (++q->pidx == q->size) { + q->pidx = 0; + sd = q->sdesc; + d = q->desc; + } + n--; + } + +alloc_small_pages: + while (n--) { + pg = alloc_pages_node(node, gfp, 0); + if (unlikely(!pg)) { + q->alloc_failed++; + break; + } + + mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { + put_page(pg); + goto out; + } + *d++ = cpu_to_be64(mapping); + + set_rx_sw_desc(sd, pg, mapping); + sd++; + + q->avail++; + if (++q->pidx == q->size) { + q->pidx = 0; + sd = q->sdesc; + d = q->desc; + } + } + +out: cred = q->avail - cred; + q->pend_cred += cred; + ring_fl_db(adap, q); + + if (unlikely(fl_starving(adap, q))) { + smp_wmb(); + set_bit(q->cntxt_id - adap->sge.egr_start, + adap->sge.starving_fl); + } + + return cred; +} + +static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) +{ + refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), + GFP_ATOMIC); +} + +/** + * alloc_ring - allocate resources for an SGE descriptor ring + * @dev: the PCI device's core device + * @nelem: the number of descriptors + * @elem_size: the size of each descriptor + * @sw_size: the size of the SW state associated with each ring element + * @phys: the physical address of the allocated ring + * @metadata: address of the array holding the SW state for the ring + * @stat_size: extra space in HW ring for status information + * @node: preferred node for memory allocations + * + * Allocates resources for an SGE descriptor ring, such as Tx queues, + * free buffer lists, or response queues. Each SGE ring requires + * space for its HW descriptors plus, optionally, space for the SW state + * associated with each HW entry (the metadata). The function returns + * three values: the virtual address for the HW ring (the return value + * of the function), the bus address of the HW ring, and the address + * of the SW ring. + */ +static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, + size_t sw_size, dma_addr_t *phys, void *metadata, + size_t stat_size, int node) +{ + size_t len = nelem * elem_size + stat_size; + void *s = NULL; + void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); + + if (!p) + return NULL; + if (sw_size) { + s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node); + + if (!s) { + dma_free_coherent(dev, len, p, *phys); + return NULL; + } + } + if (metadata) + *(void **)metadata = s; + memset(p, 0, len); + return p; +} + +/** + * sgl_len - calculates the size of an SGL of the given capacity + * @n: the number of SGL entries + * + * Calculates the number of flits needed for a scatter/gather list that + * can hold the given number of entries. + */ +static inline unsigned int sgl_len(unsigned int n) +{ + /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA + * addresses. The DSGL Work Request starts off with a 32-bit DSGL + * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, + * repeated sequences of { Length[i], Length[i+1], Address[i], + * Address[i+1] } (this ensures that all addresses are on 64-bit + * boundaries). If N is even, then Length[N+1] should be set to 0 and + * Address[N+1] is omitted. + * + * The following calculation incorporates all of the above. It's + * somewhat hard to follow but, briefly: the "+2" accounts for the + * first two flits which include the DSGL header, Length0 and + * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 + * flits for every pair of the remaining N) +1 if (n-1) is odd; and + * finally the "+((n-1)&1)" adds the one remaining flit needed if + * (n-1) is odd ... + */ + n--; + return (3 * n) / 2 + (n & 1) + 2; +} + +/** + * flits_to_desc - returns the num of Tx descriptors for the given flits + * @n: the number of flits + * + * Returns the number of Tx descriptors needed for the supplied number + * of flits. + */ +static inline unsigned int flits_to_desc(unsigned int n) +{ + BUG_ON(n > SGE_MAX_WR_LEN / 8); + return DIV_ROUND_UP(n, 8); +} + +/** + * is_eth_imm - can an Ethernet packet be sent as immediate data? + * @skb: the packet + * + * Returns whether an Ethernet packet is small enough to fit as + * immediate data. Return value corresponds to headroom required. + */ +static inline int is_eth_imm(const struct sk_buff *skb) +{ + int hdrlen = skb_shinfo(skb)->gso_size ? + sizeof(struct cpl_tx_pkt_lso_core) : 0; + + hdrlen += sizeof(struct cpl_tx_pkt); + if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) + return hdrlen; + return 0; +} + +/** + * calc_tx_flits - calculate the number of flits for a packet Tx WR + * @skb: the packet + * + * Returns the number of flits needed for a Tx WR for the given Ethernet + * packet, including the needed WR and CPL headers. + */ +static inline unsigned int calc_tx_flits(const struct sk_buff *skb) +{ + unsigned int flits; + int hdrlen = is_eth_imm(skb); + + /* If the skb is small enough, we can pump it out as a work request + * with only immediate data. In that case we just have to have the + * TX Packet header plus the skb data in the Work Request. + */ + + if (hdrlen) + return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); + + /* Otherwise, we're going to have to construct a Scatter gather list + * of the skb body and fragments. We also include the flits necessary + * for the TX Packet Work Request and CPL. We always have a firmware + * Write Header (incorporated as part of the cpl_tx_pkt_lso and + * cpl_tx_pkt structures), followed by either a TX Packet Write CPL + * message or, if we're doing a Large Send Offload, an LSO CPL message + * with an embedded TX Packet Write CPL message. + */ + flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4; + if (skb_shinfo(skb)->gso_size) + flits += (sizeof(struct fw_eth_tx_pkt_wr) + + sizeof(struct cpl_tx_pkt_lso_core) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + else + flits += (sizeof(struct fw_eth_tx_pkt_wr) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + return flits; +} + +/** + * calc_tx_descs - calculate the number of Tx descriptors for a packet + * @skb: the packet + * + * Returns the number of Tx descriptors needed for the given Ethernet + * packet, including the needed WR and CPL headers. + */ +static inline unsigned int calc_tx_descs(const struct sk_buff *skb) +{ + return flits_to_desc(calc_tx_flits(skb)); +} + +/** + * write_sgl - populate a scatter/gather list for a packet + * @skb: the packet + * @q: the Tx queue we are writing into + * @sgl: starting location for writing the SGL + * @end: points right after the end of the SGL + * @start: start offset into skb main-body data to include in the SGL + * @addr: the list of bus addresses for the SGL elements + * + * Generates a gather list for the buffers that make up a packet. + * The caller must provide adequate space for the SGL that will be written. + * The SGL includes all of the packet's page fragments and the data in its + * main body except for the first @start bytes. @sgl must be 16-byte + * aligned and within a Tx descriptor with available space. @end points + * right after the end of the SGL but does not account for any potential + * wrap around, i.e., @end > @sgl. + */ +static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, + struct ulptx_sgl *sgl, u64 *end, unsigned int start, + const dma_addr_t *addr) +{ + unsigned int i, len; + struct ulptx_sge_pair *to; + const struct skb_shared_info *si = skb_shinfo(skb); + unsigned int nfrags = si->nr_frags; + struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; + + len = skb_headlen(skb) - start; + if (likely(len)) { + sgl->len0 = htonl(len); + sgl->addr0 = cpu_to_be64(addr[0] + start); + nfrags++; + } else { + sgl->len0 = htonl(skb_frag_size(&si->frags[0])); + sgl->addr0 = cpu_to_be64(addr[1]); + } + + sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | + ULPTX_NSGE_V(nfrags)); + if (likely(--nfrags == 0)) + return; + /* + * Most of the complexity below deals with the possibility we hit the + * end of the queue in the middle of writing the SGL. For this case + * only we create the SGL in a temporary buffer and then copy it. + */ + to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; + + for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { + to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); + to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); + to->addr[0] = cpu_to_be64(addr[i]); + to->addr[1] = cpu_to_be64(addr[++i]); + } + if (nfrags) { + to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); + to->len[1] = cpu_to_be32(0); + to->addr[0] = cpu_to_be64(addr[i + 1]); + } + if (unlikely((u8 *)end > (u8 *)q->stat)) { + unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; + + if (likely(part0)) + memcpy(sgl->sge, buf, part0); + part1 = (u8 *)end - (u8 *)q->stat; + memcpy(q->desc, (u8 *)buf + part0, part1); + end = (void *)q->desc + part1; + } + if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ + *end = 0; +} + +/* This function copies 64 byte coalesced work request to + * memory mapped BAR2 space. For coalesced WR SGE fetches + * data from the FIFO instead of from Host. + */ +static void cxgb_pio_copy(u64 __iomem *dst, u64 *src) +{ + int count = 8; + + while (count) { + writeq(*src, dst); + src++; + dst++; + count--; + } +} + +/** + * ring_tx_db - check and potentially ring a Tx queue's doorbell + * @adap: the adapter + * @q: the Tx queue + * @n: number of new descriptors to give to HW + * + * Ring the doorbel for a Tx queue. + */ +static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) +{ + wmb(); /* write descriptors before telling HW */ + + /* If we don't have access to the new User Doorbell (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(q->bar2_addr == NULL)) { + u32 val = PIDX_V(n); + unsigned long flags; + + /* For T4 we need to participate in the Doorbell Recovery + * mechanism. + */ + spin_lock_irqsave(&q->db_lock, flags); + if (!q->db_disabled) + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), + QID_V(q->cntxt_id) | val); + else + q->db_pidx_inc += n; + q->db_pidx = q->pidx; + spin_unlock_irqrestore(&q->db_lock, flags); + } else { + u32 val = PIDX_T5_V(n); + + /* T4 and later chips share the same PIDX field offset within + * the doorbell, but T5 and later shrank the field in order to + * gain a bit for Doorbell Priority. The field was absurdly + * large in the first place (14 bits) so we just use the T5 + * and later limits and warn if a Queue ID is too large. + */ + WARN_ON(val & DBPRIO_F); + + /* If we're only writing a single TX Descriptor and we can use + * Inferred QID registers, we can use the Write Combining + * Gather Buffer; otherwise we use the simple doorbell. + */ + if (n == 1 && q->bar2_qid == 0) { + int index = (q->pidx + ? (q->pidx - 1) + : (q->size - 1)); + u64 *wr = (u64 *)&q->desc[index]; + + cxgb_pio_copy((u64 __iomem *) + (q->bar2_addr + SGE_UDB_WCDOORBELL), + wr); + } else { + writel(val | QID_V(q->bar2_qid), + q->bar2_addr + SGE_UDB_KDOORBELL); + } + + /* This Write Memory Barrier will force the write to the User + * Doorbell area to be flushed. This is needed to prevent + * writes on different CPUs for the same queue from hitting + * the adapter out of order. This is required when some Work + * Requests take the Write Combine Gather Buffer path (user + * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some + * take the traditional path where we simply increment the + * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the + * hardware DMA read the actual Work Request. + */ + wmb(); + } +} + +/** + * inline_tx_skb - inline a packet's data into Tx descriptors + * @skb: the packet + * @q: the Tx queue where the packet will be inlined + * @pos: starting position in the Tx queue where to inline the packet + * + * Inline a packet's contents directly into Tx descriptors, starting at + * the given position within the Tx DMA ring. + * Most of the complexity of this operation is dealing with wrap arounds + * in the middle of the packet we want to inline. + */ +static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, + void *pos) +{ + u64 *p; + int left = (void *)q->stat - pos; + + if (likely(skb->len <= left)) { + if (likely(!skb->data_len)) + skb_copy_from_linear_data(skb, pos, skb->len); + else + skb_copy_bits(skb, 0, pos, skb->len); + pos += skb->len; + } else { + skb_copy_bits(skb, 0, pos, left); + skb_copy_bits(skb, left, q->desc, skb->len - left); + pos = (void *)q->desc + (skb->len - left); + } + + /* 0-pad to multiple of 16 */ + p = PTR_ALIGN(pos, 8); + if ((uintptr_t)p & 8) + *p = 0; +} + +/* + * Figure out what HW csum a packet wants and return the appropriate control + * bits. + */ +static u64 hwcsum(const struct sk_buff *skb) +{ + int csum_type; + const struct iphdr *iph = ip_hdr(skb); + + if (iph->version == 4) { + if (iph->protocol == IPPROTO_TCP) + csum_type = TX_CSUM_TCPIP; + else if (iph->protocol == IPPROTO_UDP) + csum_type = TX_CSUM_UDPIP; + else { +nocsum: /* + * unknown protocol, disable HW csum + * and hope a bad packet is detected + */ + return TXPKT_L4CSUM_DIS; + } + } else { + /* + * this doesn't work with extension headers + */ + const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph; + + if (ip6h->nexthdr == IPPROTO_TCP) + csum_type = TX_CSUM_TCPIP6; + else if (ip6h->nexthdr == IPPROTO_UDP) + csum_type = TX_CSUM_UDPIP6; + else + goto nocsum; + } + + if (likely(csum_type >= TX_CSUM_TCPIP)) + return TXPKT_CSUM_TYPE(csum_type) | + TXPKT_IPHDR_LEN(skb_network_header_len(skb)) | + TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN); + else { + int start = skb_transport_offset(skb); + + return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) | + TXPKT_CSUM_LOC(start + skb->csum_offset); + } +} + +static void eth_txq_stop(struct sge_eth_txq *q) +{ + netif_tx_stop_queue(q->txq); + q->q.stops++; +} + +static inline void txq_advance(struct sge_txq *q, unsigned int n) +{ + q->in_use += n; + q->pidx += n; + if (q->pidx >= q->size) + q->pidx -= q->size; +} + +#ifdef CONFIG_CHELSIO_T4_FCOE +static inline int +cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap, + const struct port_info *pi, u64 *cntrl) +{ + const struct cxgb_fcoe *fcoe = &pi->fcoe; + + if (!(fcoe->flags & CXGB_FCOE_ENABLED)) + return 0; + + if (skb->protocol != htons(ETH_P_FCOE)) + return 0; + + skb_reset_mac_header(skb); + skb->mac_len = sizeof(struct ethhdr); + + skb_set_network_header(skb, skb->mac_len); + skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr)); + + if (!cxgb_fcoe_sof_eof_supported(adap, skb)) + return -ENOTSUPP; + + /* FC CRC offload */ + *cntrl = TXPKT_CSUM_TYPE(TX_CSUM_FCOE) | + TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS | + TXPKT_CSUM_START(CXGB_FCOE_TXPKT_CSUM_START) | + TXPKT_CSUM_END(CXGB_FCOE_TXPKT_CSUM_END) | + TXPKT_CSUM_LOC(CXGB_FCOE_TXPKT_CSUM_END); + return 0; +} +#endif /* CONFIG_CHELSIO_T4_FCOE */ + +/** + * t4_eth_xmit - add a packet to an Ethernet Tx queue + * @skb: the packet + * @dev: the egress net device + * + * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. + */ +netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) +{ + int len; + u32 wr_mid; + u64 cntrl, *end; + int qidx, credits; + unsigned int flits, ndesc; + struct adapter *adap; + struct sge_eth_txq *q; + const struct port_info *pi; + struct fw_eth_tx_pkt_wr *wr; + struct cpl_tx_pkt_core *cpl; + const struct skb_shared_info *ssi; + dma_addr_t addr[MAX_SKB_FRAGS + 1]; + bool immediate = false; +#ifdef CONFIG_CHELSIO_T4_FCOE + int err; +#endif /* CONFIG_CHELSIO_T4_FCOE */ + + /* + * The chip min packet length is 10 octets but play safe and reject + * anything shorter than an Ethernet header. + */ + if (unlikely(skb->len < ETH_HLEN)) { +out_free: dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + pi = netdev_priv(dev); + adap = pi->adapter; + qidx = skb_get_queue_mapping(skb); + q = &adap->sge.ethtxq[qidx + pi->first_qset]; + + reclaim_completed_tx(adap, &q->q, true); + cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; + +#ifdef CONFIG_CHELSIO_T4_FCOE + err = cxgb_fcoe_offload(skb, adap, pi, &cntrl); + if (unlikely(err == -ENOTSUPP)) + goto out_free; +#endif /* CONFIG_CHELSIO_T4_FCOE */ + + flits = calc_tx_flits(skb); + ndesc = flits_to_desc(flits); + credits = txq_avail(&q->q) - ndesc; + + if (unlikely(credits < 0)) { + eth_txq_stop(q); + dev_err(adap->pdev_dev, + "%s: Tx ring %u full while queue awake!\n", + dev->name, qidx); + return NETDEV_TX_BUSY; + } + + if (is_eth_imm(skb)) + immediate = true; + + if (!immediate && + unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) { + q->mapping_err++; + goto out_free; + } + + wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); + if (unlikely(credits < ETHTXQ_STOP_THRES)) { + eth_txq_stop(q); + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; + } + + wr = (void *)&q->q.desc[q->q.pidx]; + wr->equiq_to_len16 = htonl(wr_mid); + wr->r3 = cpu_to_be64(0); + end = (u64 *)wr + flits; + + len = immediate ? skb->len : 0; + ssi = skb_shinfo(skb); + if (ssi->gso_size) { + struct cpl_tx_pkt_lso *lso = (void *)wr; + bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; + int l3hdr_len = skb_network_header_len(skb); + int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; + + len += sizeof(*lso); + wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | + FW_WR_IMMDLEN_V(len)); + lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) | + LSO_FIRST_SLICE | LSO_LAST_SLICE | + LSO_IPV6(v6) | + LSO_ETHHDR_LEN(eth_xtra_len / 4) | + LSO_IPHDR_LEN(l3hdr_len / 4) | + LSO_TCPHDR_LEN(tcp_hdr(skb)->doff)); + lso->c.ipid_ofst = htons(0); + lso->c.mss = htons(ssi->gso_size); + lso->c.seqno_offset = htonl(0); + if (is_t4(adap->params.chip)) + lso->c.len = htonl(skb->len); + else + lso->c.len = htonl(LSO_T5_XFER_SIZE(skb->len)); + cpl = (void *)(lso + 1); + cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | + TXPKT_IPHDR_LEN(l3hdr_len) | + TXPKT_ETHHDR_LEN(eth_xtra_len); + q->tso++; + q->tx_cso += ssi->gso_segs; + } else { + len += sizeof(*cpl); + wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | + FW_WR_IMMDLEN_V(len)); + cpl = (void *)(wr + 1); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; + q->tx_cso++; + } + } + + if (skb_vlan_tag_present(skb)) { + q->vlan_ins++; + cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb)); +#ifdef CONFIG_CHELSIO_T4_FCOE + if (skb->protocol == htons(ETH_P_FCOE)) + cntrl |= TXPKT_VLAN( + ((skb->priority & 0x7) << VLAN_PRIO_SHIFT)); +#endif /* CONFIG_CHELSIO_T4_FCOE */ + } + + cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) | + TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn)); + cpl->pack = htons(0); + cpl->len = htons(skb->len); + cpl->ctrl1 = cpu_to_be64(cntrl); + + if (immediate) { + inline_tx_skb(skb, &q->q, cpl + 1); + dev_consume_skb_any(skb); + } else { + int last_desc; + + write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0, + addr); + skb_orphan(skb); + + last_desc = q->q.pidx + ndesc - 1; + if (last_desc >= q->q.size) + last_desc -= q->q.size; + q->q.sdesc[last_desc].skb = skb; + q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1); + } + + txq_advance(&q->q, ndesc); + + ring_tx_db(adap, &q->q, ndesc); + return NETDEV_TX_OK; +} + +/** + * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs + * @q: the SGE control Tx queue + * + * This is a variant of reclaim_completed_tx() that is used for Tx queues + * that send only immediate data (presently just the control queues) and + * thus do not have any sk_buffs to release. + */ +static inline void reclaim_completed_tx_imm(struct sge_txq *q) +{ + int hw_cidx = ntohs(q->stat->cidx); + int reclaim = hw_cidx - q->cidx; + + if (reclaim < 0) + reclaim += q->size; + + q->in_use -= reclaim; + q->cidx = hw_cidx; +} + +/** + * is_imm - check whether a packet can be sent as immediate data + * @skb: the packet + * + * Returns true if a packet can be sent as a WR with immediate data. + */ +static inline int is_imm(const struct sk_buff *skb) +{ + return skb->len <= MAX_CTRL_WR_LEN; +} + +/** + * ctrlq_check_stop - check if a control queue is full and should stop + * @q: the queue + * @wr: most recent WR written to the queue + * + * Check if a control queue has become full and should be stopped. + * We clean up control queue descriptors very lazily, only when we are out. + * If the queue is still full after reclaiming any completed descriptors + * we suspend it and have the last WR wake it up. + */ +static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) +{ + reclaim_completed_tx_imm(&q->q); + if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { + wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); + q->q.stops++; + q->full = 1; + } +} + +/** + * ctrl_xmit - send a packet through an SGE control Tx queue + * @q: the control queue + * @skb: the packet + * + * Send a packet through an SGE control Tx queue. Packets sent through + * a control queue must fit entirely as immediate data. + */ +static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) +{ + unsigned int ndesc; + struct fw_wr_hdr *wr; + + if (unlikely(!is_imm(skb))) { + WARN_ON(1); + dev_kfree_skb(skb); + return NET_XMIT_DROP; + } + + ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); + spin_lock(&q->sendq.lock); + + if (unlikely(q->full)) { + skb->priority = ndesc; /* save for restart */ + __skb_queue_tail(&q->sendq, skb); + spin_unlock(&q->sendq.lock); + return NET_XMIT_CN; + } + + wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; + inline_tx_skb(skb, &q->q, wr); + + txq_advance(&q->q, ndesc); + if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) + ctrlq_check_stop(q, wr); + + ring_tx_db(q->adap, &q->q, ndesc); + spin_unlock(&q->sendq.lock); + + kfree_skb(skb); + return NET_XMIT_SUCCESS; +} + +/** + * restart_ctrlq - restart a suspended control queue + * @data: the control queue to restart + * + * Resumes transmission on a suspended Tx control queue. + */ +static void restart_ctrlq(unsigned long data) +{ + struct sk_buff *skb; + unsigned int written = 0; + struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data; + + spin_lock(&q->sendq.lock); + reclaim_completed_tx_imm(&q->q); + BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ + + while ((skb = __skb_dequeue(&q->sendq)) != NULL) { + struct fw_wr_hdr *wr; + unsigned int ndesc = skb->priority; /* previously saved */ + + /* + * Write descriptors and free skbs outside the lock to limit + * wait times. q->full is still set so new skbs will be queued. + */ + spin_unlock(&q->sendq.lock); + + wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; + inline_tx_skb(skb, &q->q, wr); + kfree_skb(skb); + + written += ndesc; + txq_advance(&q->q, ndesc); + if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { + unsigned long old = q->q.stops; + + ctrlq_check_stop(q, wr); + if (q->q.stops != old) { /* suspended anew */ + spin_lock(&q->sendq.lock); + goto ringdb; + } + } + if (written > 16) { + ring_tx_db(q->adap, &q->q, written); + written = 0; + } + spin_lock(&q->sendq.lock); + } + q->full = 0; +ringdb: if (written) + ring_tx_db(q->adap, &q->q, written); + spin_unlock(&q->sendq.lock); +} + +/** + * t4_mgmt_tx - send a management message + * @adap: the adapter + * @skb: the packet containing the management message + * + * Send a management message through control queue 0. + */ +int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) +{ + int ret; + + local_bh_disable(); + ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); + local_bh_enable(); + return ret; +} + +/** + * is_ofld_imm - check whether a packet can be sent as immediate data + * @skb: the packet + * + * Returns true if a packet can be sent as an offload WR with immediate + * data. We currently use the same limit as for Ethernet packets. + */ +static inline int is_ofld_imm(const struct sk_buff *skb) +{ + return skb->len <= MAX_IMM_TX_PKT_LEN; +} + +/** + * calc_tx_flits_ofld - calculate # of flits for an offload packet + * @skb: the packet + * + * Returns the number of flits needed for the given offload packet. + * These packets are already fully constructed and no additional headers + * will be added. + */ +static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) +{ + unsigned int flits, cnt; + + if (is_ofld_imm(skb)) + return DIV_ROUND_UP(skb->len, 8); + + flits = skb_transport_offset(skb) / 8U; /* headers */ + cnt = skb_shinfo(skb)->nr_frags; + if (skb_tail_pointer(skb) != skb_transport_header(skb)) + cnt++; + return flits + sgl_len(cnt); +} + +/** + * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion + * @adap: the adapter + * @q: the queue to stop + * + * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting + * inability to map packets. A periodic timer attempts to restart + * queues so marked. + */ +static void txq_stop_maperr(struct sge_ofld_txq *q) +{ + q->mapping_err++; + q->q.stops++; + set_bit(q->q.cntxt_id - q->adap->sge.egr_start, + q->adap->sge.txq_maperr); +} + +/** + * ofldtxq_stop - stop an offload Tx queue that has become full + * @q: the queue to stop + * @skb: the packet causing the queue to become full + * + * Stops an offload Tx queue that has become full and modifies the packet + * being written to request a wakeup. + */ +static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) +{ + struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data; + + wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); + q->q.stops++; + q->full = 1; +} + +/** + * service_ofldq - restart a suspended offload queue + * @q: the offload queue + * + * Services an offload Tx queue by moving packets from its packet queue + * to the HW Tx ring. The function starts and ends with the queue locked. + */ +static void service_ofldq(struct sge_ofld_txq *q) +{ + u64 *pos; + int credits; + struct sk_buff *skb; + unsigned int written = 0; + unsigned int flits, ndesc; + + while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { + /* + * We drop the lock but leave skb on sendq, thus retaining + * exclusive access to the state of the queue. + */ + spin_unlock(&q->sendq.lock); + + reclaim_completed_tx(q->adap, &q->q, false); + + flits = skb->priority; /* previously saved */ + ndesc = flits_to_desc(flits); + credits = txq_avail(&q->q) - ndesc; + BUG_ON(credits < 0); + if (unlikely(credits < TXQ_STOP_THRES)) + ofldtxq_stop(q, skb); + + pos = (u64 *)&q->q.desc[q->q.pidx]; + if (is_ofld_imm(skb)) + inline_tx_skb(skb, &q->q, pos); + else if (map_skb(q->adap->pdev_dev, skb, + (dma_addr_t *)skb->head)) { + txq_stop_maperr(q); + spin_lock(&q->sendq.lock); + break; + } else { + int last_desc, hdr_len = skb_transport_offset(skb); + + memcpy(pos, skb->data, hdr_len); + write_sgl(skb, &q->q, (void *)pos + hdr_len, + pos + flits, hdr_len, + (dma_addr_t *)skb->head); +#ifdef CONFIG_NEED_DMA_MAP_STATE + skb->dev = q->adap->port[0]; + skb->destructor = deferred_unmap_destructor; +#endif + last_desc = q->q.pidx + ndesc - 1; + if (last_desc >= q->q.size) + last_desc -= q->q.size; + q->q.sdesc[last_desc].skb = skb; + } + + txq_advance(&q->q, ndesc); + written += ndesc; + if (unlikely(written > 32)) { + ring_tx_db(q->adap, &q->q, written); + written = 0; + } + + spin_lock(&q->sendq.lock); + __skb_unlink(skb, &q->sendq); + if (is_ofld_imm(skb)) + kfree_skb(skb); + } + if (likely(written)) + ring_tx_db(q->adap, &q->q, written); +} + +/** + * ofld_xmit - send a packet through an offload queue + * @q: the Tx offload queue + * @skb: the packet + * + * Send an offload packet through an SGE offload queue. + */ +static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb) +{ + skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ + spin_lock(&q->sendq.lock); + __skb_queue_tail(&q->sendq, skb); + if (q->sendq.qlen == 1) + service_ofldq(q); + spin_unlock(&q->sendq.lock); + return NET_XMIT_SUCCESS; +} + +/** + * restart_ofldq - restart a suspended offload queue + * @data: the offload queue to restart + * + * Resumes transmission on a suspended Tx offload queue. + */ +static void restart_ofldq(unsigned long data) +{ + struct sge_ofld_txq *q = (struct sge_ofld_txq *)data; + + spin_lock(&q->sendq.lock); + q->full = 0; /* the queue actually is completely empty now */ + service_ofldq(q); + spin_unlock(&q->sendq.lock); +} + +/** + * skb_txq - return the Tx queue an offload packet should use + * @skb: the packet + * + * Returns the Tx queue an offload packet should use as indicated by bits + * 1-15 in the packet's queue_mapping. + */ +static inline unsigned int skb_txq(const struct sk_buff *skb) +{ + return skb->queue_mapping >> 1; +} + +/** + * is_ctrl_pkt - return whether an offload packet is a control packet + * @skb: the packet + * + * Returns whether an offload packet should use an OFLD or a CTRL + * Tx queue as indicated by bit 0 in the packet's queue_mapping. + */ +static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb) +{ + return skb->queue_mapping & 1; +} + +static inline int ofld_send(struct adapter *adap, struct sk_buff *skb) +{ + unsigned int idx = skb_txq(skb); + + if (unlikely(is_ctrl_pkt(skb))) { + /* Single ctrl queue is a requirement for LE workaround path */ + if (adap->tids.nsftids) + idx = 0; + return ctrl_xmit(&adap->sge.ctrlq[idx], skb); + } + return ofld_xmit(&adap->sge.ofldtxq[idx], skb); +} + +/** + * t4_ofld_send - send an offload packet + * @adap: the adapter + * @skb: the packet + * + * Sends an offload packet. We use the packet queue_mapping to select the + * appropriate Tx queue as follows: bit 0 indicates whether the packet + * should be sent as regular or control, bits 1-15 select the queue. + */ +int t4_ofld_send(struct adapter *adap, struct sk_buff *skb) +{ + int ret; + + local_bh_disable(); + ret = ofld_send(adap, skb); + local_bh_enable(); + return ret; +} + +/** + * cxgb4_ofld_send - send an offload packet + * @dev: the net device + * @skb: the packet + * + * Sends an offload packet. This is an exported version of @t4_ofld_send, + * intended for ULDs. + */ +int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb) +{ + return t4_ofld_send(netdev2adap(dev), skb); +} +EXPORT_SYMBOL(cxgb4_ofld_send); + +static inline void copy_frags(struct sk_buff *skb, + const struct pkt_gl *gl, unsigned int offset) +{ + int i; + + /* usually there's just one frag */ + __skb_fill_page_desc(skb, 0, gl->frags[0].page, + gl->frags[0].offset + offset, + gl->frags[0].size - offset); + skb_shinfo(skb)->nr_frags = gl->nfrags; + for (i = 1; i < gl->nfrags; i++) + __skb_fill_page_desc(skb, i, gl->frags[i].page, + gl->frags[i].offset, + gl->frags[i].size); + + /* get a reference to the last page, we don't own it */ + get_page(gl->frags[gl->nfrags - 1].page); +} + +/** + * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list + * @gl: the gather list + * @skb_len: size of sk_buff main body if it carries fragments + * @pull_len: amount of data to move to the sk_buff's main body + * + * Builds an sk_buff from the given packet gather list. Returns the + * sk_buff or %NULL if sk_buff allocation failed. + */ +struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, + unsigned int skb_len, unsigned int pull_len) +{ + struct sk_buff *skb; + + /* + * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer + * size, which is expected since buffers are at least PAGE_SIZEd. + * In this case packets up to RX_COPY_THRES have only one fragment. + */ + if (gl->tot_len <= RX_COPY_THRES) { + skb = dev_alloc_skb(gl->tot_len); + if (unlikely(!skb)) + goto out; + __skb_put(skb, gl->tot_len); + skb_copy_to_linear_data(skb, gl->va, gl->tot_len); + } else { + skb = dev_alloc_skb(skb_len); + if (unlikely(!skb)) + goto out; + __skb_put(skb, pull_len); + skb_copy_to_linear_data(skb, gl->va, pull_len); + + copy_frags(skb, gl, pull_len); + skb->len = gl->tot_len; + skb->data_len = skb->len - pull_len; + skb->truesize += skb->data_len; + } +out: return skb; +} +EXPORT_SYMBOL(cxgb4_pktgl_to_skb); + +/** + * t4_pktgl_free - free a packet gather list + * @gl: the gather list + * + * Releases the pages of a packet gather list. We do not own the last + * page on the list and do not free it. + */ +static void t4_pktgl_free(const struct pkt_gl *gl) +{ + int n; + const struct page_frag *p; + + for (p = gl->frags, n = gl->nfrags - 1; n--; p++) + put_page(p->page); +} + +/* + * Process an MPS trace packet. Give it an unused protocol number so it won't + * be delivered to anyone and send it to the stack for capture. + */ +static noinline int handle_trace_pkt(struct adapter *adap, + const struct pkt_gl *gl) +{ + struct sk_buff *skb; + + skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); + if (unlikely(!skb)) { + t4_pktgl_free(gl); + return 0; + } + + if (is_t4(adap->params.chip)) + __skb_pull(skb, sizeof(struct cpl_trace_pkt)); + else + __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt)); + + skb_reset_mac_header(skb); + skb->protocol = htons(0xffff); + skb->dev = adap->port[0]; + netif_receive_skb(skb); + return 0; +} + +static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, + const struct cpl_rx_pkt *pkt) +{ + struct adapter *adapter = rxq->rspq.adap; + struct sge *s = &adapter->sge; + int ret; + struct sk_buff *skb; + + skb = napi_get_frags(&rxq->rspq.napi); + if (unlikely(!skb)) { + t4_pktgl_free(gl); + rxq->stats.rx_drops++; + return; + } + + copy_frags(skb, gl, s->pktshift); + skb->len = gl->tot_len - s->pktshift; + skb->data_len = skb->len; + skb->truesize += skb->data_len; + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_record_rx_queue(skb, rxq->rspq.idx); + skb_mark_napi_id(skb, &rxq->rspq.napi); + if (rxq->rspq.netdev->features & NETIF_F_RXHASH) + skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, + PKT_HASH_TYPE_L3); + + if (unlikely(pkt->vlan_ex)) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); + rxq->stats.vlan_ex++; + } + ret = napi_gro_frags(&rxq->rspq.napi); + if (ret == GRO_HELD) + rxq->stats.lro_pkts++; + else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) + rxq->stats.lro_merged++; + rxq->stats.pkts++; + rxq->stats.rx_cso++; +} + +/** + * t4_ethrx_handler - process an ingress ethernet packet + * @q: the response queue that received the packet + * @rsp: the response queue descriptor holding the RX_PKT message + * @si: the gather list of packet fragments + * + * Process an ingress ethernet packet and deliver it to the stack. + */ +int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *si) +{ + bool csum_ok; + struct sk_buff *skb; + const struct cpl_rx_pkt *pkt; + struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); + struct sge *s = &q->adap->sge; + int cpl_trace_pkt = is_t4(q->adap->params.chip) ? + CPL_TRACE_PKT : CPL_TRACE_PKT_T5; +#ifdef CONFIG_CHELSIO_T4_FCOE + struct port_info *pi; +#endif + + if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) + return handle_trace_pkt(q->adap, si); + + pkt = (const struct cpl_rx_pkt *)rsp; + csum_ok = pkt->csum_calc && !pkt->err_vec && + (q->netdev->features & NETIF_F_RXCSUM); + if ((pkt->l2info & htonl(RXF_TCP_F)) && + !(cxgb_poll_busy_polling(q)) && + (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { + do_gro(rxq, si, pkt); + return 0; + } + + skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN); + if (unlikely(!skb)) { + t4_pktgl_free(si); + rxq->stats.rx_drops++; + return 0; + } + + __skb_pull(skb, s->pktshift); /* remove ethernet header padding */ + skb->protocol = eth_type_trans(skb, q->netdev); + skb_record_rx_queue(skb, q->idx); + if (skb->dev->features & NETIF_F_RXHASH) + skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, + PKT_HASH_TYPE_L3); + + rxq->stats.pkts++; + + if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) { + if (!pkt->ip_frag) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + rxq->stats.rx_cso++; + } else if (pkt->l2info & htonl(RXF_IP_F)) { + __sum16 c = (__force __sum16)pkt->csum; + skb->csum = csum_unfold(c); + skb->ip_summed = CHECKSUM_COMPLETE; + rxq->stats.rx_cso++; + } + } else { + skb_checksum_none_assert(skb); +#ifdef CONFIG_CHELSIO_T4_FCOE +#define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \ + RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F) + + pi = netdev_priv(skb->dev); + if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) { + if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) && + (pi->fcoe.flags & CXGB_FCOE_ENABLED)) { + if (!(pkt->err_vec & cpu_to_be16(RXERR_CSUM_F))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + } + +#undef CPL_RX_PKT_FLAGS +#endif /* CONFIG_CHELSIO_T4_FCOE */ + } + + if (unlikely(pkt->vlan_ex)) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); + rxq->stats.vlan_ex++; + } + skb_mark_napi_id(skb, &q->napi); + netif_receive_skb(skb); + return 0; +} + +/** + * restore_rx_bufs - put back a packet's Rx buffers + * @si: the packet gather list + * @q: the SGE free list + * @frags: number of FL buffers to restore + * + * Puts back on an FL the Rx buffers associated with @si. The buffers + * have already been unmapped and are left unmapped, we mark them so to + * prevent further unmapping attempts. + * + * This function undoes a series of @unmap_rx_buf calls when we find out + * that the current packet can't be processed right away afterall and we + * need to come back to it later. This is a very rare event and there's + * no effort to make this particularly efficient. + */ +static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q, + int frags) +{ + struct rx_sw_desc *d; + + while (frags--) { + if (q->cidx == 0) + q->cidx = q->size - 1; + else + q->cidx--; + d = &q->sdesc[q->cidx]; + d->page = si->frags[frags].page; + d->dma_addr |= RX_UNMAPPED_BUF; + q->avail++; + } +} + +/** + * is_new_response - check if a response is newly written + * @r: the response descriptor + * @q: the response queue + * + * Returns true if a response descriptor contains a yet unprocessed + * response. + */ +static inline bool is_new_response(const struct rsp_ctrl *r, + const struct sge_rspq *q) +{ + return RSPD_GEN(r->type_gen) == q->gen; +} + +/** + * rspq_next - advance to the next entry in a response queue + * @q: the queue + * + * Updates the state of a response queue to advance it to the next entry. + */ +static inline void rspq_next(struct sge_rspq *q) +{ + q->cur_desc = (void *)q->cur_desc + q->iqe_len; + if (unlikely(++q->cidx == q->size)) { + q->cidx = 0; + q->gen ^= 1; + q->cur_desc = q->desc; + } +} + +/** + * process_responses - process responses from an SGE response queue + * @q: the ingress queue to process + * @budget: how many responses can be processed in this round + * + * Process responses from an SGE response queue up to the supplied budget. + * Responses include received packets as well as control messages from FW + * or HW. + * + * Additionally choose the interrupt holdoff time for the next interrupt + * on this queue. If the system is under memory shortage use a fairly + * long delay to help recovery. + */ +static int process_responses(struct sge_rspq *q, int budget) +{ + int ret, rsp_type; + int budget_left = budget; + const struct rsp_ctrl *rc; + struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); + struct adapter *adapter = q->adap; + struct sge *s = &adapter->sge; + + while (likely(budget_left)) { + rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); + if (!is_new_response(rc, q)) + break; + + dma_rmb(); + rsp_type = RSPD_TYPE(rc->type_gen); + if (likely(rsp_type == RSP_TYPE_FLBUF)) { + struct page_frag *fp; + struct pkt_gl si; + const struct rx_sw_desc *rsd; + u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; + + if (len & RSPD_NEWBUF) { + if (likely(q->offset > 0)) { + free_rx_bufs(q->adap, &rxq->fl, 1); + q->offset = 0; + } + len = RSPD_LEN(len); + } + si.tot_len = len; + + /* gather packet fragments */ + for (frags = 0, fp = si.frags; ; frags++, fp++) { + rsd = &rxq->fl.sdesc[rxq->fl.cidx]; + bufsz = get_buf_size(adapter, rsd); + fp->page = rsd->page; + fp->offset = q->offset; + fp->size = min(bufsz, len); + len -= fp->size; + if (!len) + break; + unmap_rx_buf(q->adap, &rxq->fl); + } + + /* + * Last buffer remains mapped so explicitly make it + * coherent for CPU access. + */ + dma_sync_single_for_cpu(q->adap->pdev_dev, + get_buf_addr(rsd), + fp->size, DMA_FROM_DEVICE); + + si.va = page_address(si.frags[0].page) + + si.frags[0].offset; + prefetch(si.va); + + si.nfrags = frags + 1; + ret = q->handler(q, q->cur_desc, &si); + if (likely(ret == 0)) + q->offset += ALIGN(fp->size, s->fl_align); + else + restore_rx_bufs(&si, &rxq->fl, frags); + } else if (likely(rsp_type == RSP_TYPE_CPL)) { + ret = q->handler(q, q->cur_desc, NULL); + } else { + ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); + } + + if (unlikely(ret)) { + /* couldn't process descriptor, back off for recovery */ + q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX); + break; + } + + rspq_next(q); + budget_left--; + } + + if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16) + __refill_fl(q->adap, &rxq->fl); + return budget - budget_left; +} + +#ifdef CONFIG_NET_RX_BUSY_POLL +int cxgb_busy_poll(struct napi_struct *napi) +{ + struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); + unsigned int params, work_done; + u32 val; + + if (!cxgb_poll_lock_poll(q)) + return LL_FLUSH_BUSY; + + work_done = process_responses(q, 4); + params = QINTR_TIMER_IDX(TIMERREG_COUNTER0_X) | QINTR_CNT_EN; + q->next_intr_params = params; + val = CIDXINC_V(work_done) | SEINTARM_V(params); + + /* If we don't have access to the new User GTS (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(!q->bar2_addr)) + t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), + val | INGRESSQID_V((u32)q->cntxt_id)); + else { + writel(val | INGRESSQID_V(q->bar2_qid), + q->bar2_addr + SGE_UDB_GTS); + wmb(); + } + + cxgb_poll_unlock_poll(q); + return work_done; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +/** + * napi_rx_handler - the NAPI handler for Rx processing + * @napi: the napi instance + * @budget: how many packets we can process in this round + * + * Handler for new data events when using NAPI. This does not need any + * locking or protection from interrupts as data interrupts are off at + * this point and other adapter interrupts do not interfere (the latter + * in not a concern at all with MSI-X as non-data interrupts then have + * a separate handler). + */ +static int napi_rx_handler(struct napi_struct *napi, int budget) +{ + unsigned int params; + struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); + int work_done; + u32 val; + + if (!cxgb_poll_lock_napi(q)) + return budget; + + work_done = process_responses(q, budget); + if (likely(work_done < budget)) { + int timer_index; + + napi_complete(napi); + timer_index = QINTR_TIMER_IDX_GET(q->next_intr_params); + + if (q->adaptive_rx) { + if (work_done > max(timer_pkt_quota[timer_index], + MIN_NAPI_WORK)) + timer_index = (timer_index + 1); + else + timer_index = timer_index - 1; + + timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1); + q->next_intr_params = QINTR_TIMER_IDX(timer_index) | + V_QINTR_CNT_EN; + params = q->next_intr_params; + } else { + params = q->next_intr_params; + q->next_intr_params = q->intr_params; + } + } else + params = QINTR_TIMER_IDX(7); + + val = CIDXINC_V(work_done) | SEINTARM_V(params); + + /* If we don't have access to the new User GTS (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(q->bar2_addr == NULL)) { + t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), + val | INGRESSQID_V((u32)q->cntxt_id)); + } else { + writel(val | INGRESSQID_V(q->bar2_qid), + q->bar2_addr + SGE_UDB_GTS); + wmb(); + } + cxgb_poll_unlock_napi(q); + return work_done; +} + +/* + * The MSI-X interrupt handler for an SGE response queue. + */ +irqreturn_t t4_sge_intr_msix(int irq, void *cookie) +{ + struct sge_rspq *q = cookie; + + napi_schedule(&q->napi); + return IRQ_HANDLED; +} + +/* + * Process the indirect interrupt entries in the interrupt queue and kick off + * NAPI for each queue that has generated an entry. + */ +static unsigned int process_intrq(struct adapter *adap) +{ + unsigned int credits; + const struct rsp_ctrl *rc; + struct sge_rspq *q = &adap->sge.intrq; + u32 val; + + spin_lock(&adap->sge.intrq_lock); + for (credits = 0; ; credits++) { + rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); + if (!is_new_response(rc, q)) + break; + + dma_rmb(); + if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) { + unsigned int qid = ntohl(rc->pldbuflen_qid); + + qid -= adap->sge.ingr_start; + napi_schedule(&adap->sge.ingr_map[qid]->napi); + } + + rspq_next(q); + } + + val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params); + + /* If we don't have access to the new User GTS (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(q->bar2_addr == NULL)) { + t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), + val | INGRESSQID_V(q->cntxt_id)); + } else { + writel(val | INGRESSQID_V(q->bar2_qid), + q->bar2_addr + SGE_UDB_GTS); + wmb(); + } + spin_unlock(&adap->sge.intrq_lock); + return credits; +} + +/* + * The MSI interrupt handler, which handles data events from SGE response queues + * as well as error and other async events as they all use the same MSI vector. + */ +static irqreturn_t t4_intr_msi(int irq, void *cookie) +{ + struct adapter *adap = cookie; + + if (adap->flags & MASTER_PF) + t4_slow_intr_handler(adap); + process_intrq(adap); + return IRQ_HANDLED; +} + +/* + * Interrupt handler for legacy INTx interrupts. + * Handles data events from SGE response queues as well as error and other + * async events as they all use the same interrupt line. + */ +static irqreturn_t t4_intr_intx(int irq, void *cookie) +{ + struct adapter *adap = cookie; + + t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0); + if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) | + process_intrq(adap)) + return IRQ_HANDLED; + return IRQ_NONE; /* probably shared interrupt */ +} + +/** + * t4_intr_handler - select the top-level interrupt handler + * @adap: the adapter + * + * Selects the top-level interrupt handler based on the type of interrupts + * (MSI-X, MSI, or INTx). + */ +irq_handler_t t4_intr_handler(struct adapter *adap) +{ + if (adap->flags & USING_MSIX) + return t4_sge_intr_msix; + if (adap->flags & USING_MSI) + return t4_intr_msi; + return t4_intr_intx; +} + +static void sge_rx_timer_cb(unsigned long data) +{ + unsigned long m; + unsigned int i, idma_same_state_cnt[2]; + struct adapter *adap = (struct adapter *)data; + struct sge *s = &adap->sge; + + for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) + for (m = s->starving_fl[i]; m; m &= m - 1) { + struct sge_eth_rxq *rxq; + unsigned int id = __ffs(m) + i * BITS_PER_LONG; + struct sge_fl *fl = s->egr_map[id]; + + clear_bit(id, s->starving_fl); + smp_mb__after_atomic(); + + if (fl_starving(adap, fl)) { + rxq = container_of(fl, struct sge_eth_rxq, fl); + if (napi_reschedule(&rxq->rspq.napi)) + fl->starving++; + else + set_bit(id, s->starving_fl); + } + } + + t4_write_reg(adap, SGE_DEBUG_INDEX_A, 13); + idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH_A); + idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A); + + for (i = 0; i < 2; i++) { + u32 debug0, debug11; + + /* If the Ingress DMA Same State Counter ("timer") is less + * than 1s, then we can reset our synthesized Stall Timer and + * continue. If we have previously emitted warnings about a + * potential stalled Ingress Queue, issue a note indicating + * that the Ingress Queue has resumed forward progress. + */ + if (idma_same_state_cnt[i] < s->idma_1s_thresh) { + if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH) + CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n", + i, s->idma_qid[i], + s->idma_stalled[i]/HZ); + s->idma_stalled[i] = 0; + continue; + } + + /* Synthesize an SGE Ingress DMA Same State Timer in the Hz + * domain. The first time we get here it'll be because we + * passed the 1s Threshold; each additional time it'll be + * because the RX Timer Callback is being fired on its regular + * schedule. + * + * If the stall is below our Potential Hung Ingress Queue + * Warning Threshold, continue. + */ + if (s->idma_stalled[i] == 0) + s->idma_stalled[i] = HZ; + else + s->idma_stalled[i] += RX_QCHECK_PERIOD; + + if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH) + continue; + + /* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */ + if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0) + continue; + + /* Read and save the SGE IDMA State and Queue ID information. + * We do this every time in case it changes across time ... + */ + t4_write_reg(adap, SGE_DEBUG_INDEX_A, 0); + debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A); + s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f; + + t4_write_reg(adap, SGE_DEBUG_INDEX_A, 11); + debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A); + s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff; + + CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n", + i, s->idma_qid[i], s->idma_state[i], + s->idma_stalled[i]/HZ, debug0, debug11); + t4_sge_decode_idma_state(adap, s->idma_state[i]); + } + + mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); +} + +static void sge_tx_timer_cb(unsigned long data) +{ + unsigned long m; + unsigned int i, budget; + struct adapter *adap = (struct adapter *)data; + struct sge *s = &adap->sge; + + for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) + for (m = s->txq_maperr[i]; m; m &= m - 1) { + unsigned long id = __ffs(m) + i * BITS_PER_LONG; + struct sge_ofld_txq *txq = s->egr_map[id]; + + clear_bit(id, s->txq_maperr); + tasklet_schedule(&txq->qresume_tsk); + } + + budget = MAX_TIMER_TX_RECLAIM; + i = s->ethtxq_rover; + do { + struct sge_eth_txq *q = &s->ethtxq[i]; + + if (q->q.in_use && + time_after_eq(jiffies, q->txq->trans_start + HZ / 100) && + __netif_tx_trylock(q->txq)) { + int avail = reclaimable(&q->q); + + if (avail) { + if (avail > budget) + avail = budget; + + free_tx_desc(adap, &q->q, avail, true); + q->q.in_use -= avail; + budget -= avail; + } + __netif_tx_unlock(q->txq); + } + + if (++i >= s->ethqsets) + i = 0; + } while (budget && i != s->ethtxq_rover); + s->ethtxq_rover = i; + mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); +} + +/** + * bar2_address - return the BAR2 address for an SGE Queue's Registers + * @adapter: the adapter + * @qid: the SGE Queue ID + * @qtype: the SGE Queue Type (Egress or Ingress) + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues + * + * Returns the BAR2 address for the SGE Queue Registers associated with + * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also + * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE + * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID" + * Registers are supported (e.g. the Write Combining Doorbell Buffer). + */ +static void __iomem *bar2_address(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + unsigned int *pbar2_qid) +{ + u64 bar2_qoffset; + int ret; + + ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype, + &bar2_qoffset, pbar2_qid); + if (ret) + return NULL; + + return adapter->bar2 + bar2_qoffset; +} + +int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, + struct net_device *dev, int intr_idx, + struct sge_fl *fl, rspq_handler_t hnd) +{ + int ret, flsz = 0; + struct fw_iq_cmd c; + struct sge *s = &adap->sge; + struct port_info *pi = netdev_priv(dev); + + /* Size needs to be multiple of 16, including status entry. */ + iq->size = roundup(iq->size, 16); + + iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, + &iq->phys_addr, NULL, 0, NUMA_NO_NODE); + if (!iq->desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_CMD_EXEC_F | + FW_IQ_CMD_PFN_V(adap->fn) | FW_IQ_CMD_VFN_V(0)); + c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F | + FW_LEN16(c)); + c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | + FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) | + FW_IQ_CMD_IQANDST_V(intr_idx < 0) | FW_IQ_CMD_IQANUD_V(1) | + FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx : + -intr_idx - 1)); + c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) | + FW_IQ_CMD_IQGTSMODE_F | + FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) | + FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4)); + c.iqsize = htons(iq->size); + c.iqaddr = cpu_to_be64(iq->phys_addr); + + if (fl) { + fl->size = roundup(fl->size, 8); + fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), + sizeof(struct rx_sw_desc), &fl->addr, + &fl->sdesc, s->stat_len, NUMA_NO_NODE); + if (!fl->desc) + goto fl_nomem; + + flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); + c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN_F | + FW_IQ_CMD_FL0FETCHRO_F | + FW_IQ_CMD_FL0DATARO_F | + FW_IQ_CMD_FL0PADEN_F); + c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN_V(2) | + FW_IQ_CMD_FL0FBMAX_V(3)); + c.fl0size = htons(flsz); + c.fl0addr = cpu_to_be64(fl->addr); + } + + ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); + if (ret) + goto err; + + netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); + napi_hash_add(&iq->napi); + iq->cur_desc = iq->desc; + iq->cidx = 0; + iq->gen = 1; + iq->next_intr_params = iq->intr_params; + iq->cntxt_id = ntohs(c.iqid); + iq->abs_id = ntohs(c.physiqid); + iq->bar2_addr = bar2_address(adap, + iq->cntxt_id, + T4_BAR2_QTYPE_INGRESS, + &iq->bar2_qid); + iq->size--; /* subtract status entry */ + iq->netdev = dev; + iq->handler = hnd; + + /* set offset to -1 to distinguish ingress queues without FL */ + iq->offset = fl ? 0 : -1; + + adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq; + + if (fl) { + fl->cntxt_id = ntohs(c.fl0id); + fl->avail = fl->pend_cred = 0; + fl->pidx = fl->cidx = 0; + fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; + adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; + + /* Note, we must initialize the BAR2 Free List User Doorbell + * information before refilling the Free List! + */ + fl->bar2_addr = bar2_address(adap, + fl->cntxt_id, + T4_BAR2_QTYPE_EGRESS, + &fl->bar2_qid); + refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); + } + return 0; + +fl_nomem: + ret = -ENOMEM; +err: + if (iq->desc) { + dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, + iq->desc, iq->phys_addr); + iq->desc = NULL; + } + if (fl && fl->desc) { + kfree(fl->sdesc); + fl->sdesc = NULL; + dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), + fl->desc, fl->addr); + fl->desc = NULL; + } + return ret; +} + +static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) +{ + q->cntxt_id = id; + q->bar2_addr = bar2_address(adap, + q->cntxt_id, + T4_BAR2_QTYPE_EGRESS, + &q->bar2_qid); + q->in_use = 0; + q->cidx = q->pidx = 0; + q->stops = q->restarts = 0; + q->stat = (void *)&q->desc[q->size]; + spin_lock_init(&q->db_lock); + adap->sge.egr_map[id - adap->sge.egr_start] = q; +} + +int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, + struct net_device *dev, struct netdev_queue *netdevq, + unsigned int iqid) +{ + int ret, nentries; + struct fw_eq_eth_cmd c; + struct sge *s = &adap->sge; + struct port_info *pi = netdev_priv(dev); + + /* Add status entries */ + nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); + + txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, + sizeof(struct tx_desc), sizeof(struct tx_sw_desc), + &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, + netdev_queue_numa_node_read(netdevq)); + if (!txq->q.desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_CMD_EXEC_F | + FW_EQ_ETH_CMD_PFN_V(adap->fn) | + FW_EQ_ETH_CMD_VFN_V(0)); + c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F | + FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c)); + c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F | + FW_EQ_ETH_CMD_VIID_V(pi->viid)); + c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(2) | + FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | + FW_EQ_ETH_CMD_FETCHRO_V(1) | + FW_EQ_ETH_CMD_IQID_V(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN_V(2) | + FW_EQ_ETH_CMD_FBMAX_V(3) | + FW_EQ_ETH_CMD_CIDXFTHRESH_V(5) | + FW_EQ_ETH_CMD_EQSIZE_V(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + + ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); + if (ret) { + kfree(txq->q.sdesc); + txq->q.sdesc = NULL; + dma_free_coherent(adap->pdev_dev, + nentries * sizeof(struct tx_desc), + txq->q.desc, txq->q.phys_addr); + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); + txq->txq = netdevq; + txq->tso = txq->tx_cso = txq->vlan_ins = 0; + txq->mapping_err = 0; + return 0; +} + +int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, + struct net_device *dev, unsigned int iqid, + unsigned int cmplqid) +{ + int ret, nentries; + struct fw_eq_ctrl_cmd c; + struct sge *s = &adap->sge; + struct port_info *pi = netdev_priv(dev); + + /* Add status entries */ + nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); + + txq->q.desc = alloc_ring(adap->pdev_dev, nentries, + sizeof(struct tx_desc), 0, &txq->q.phys_addr, + NULL, 0, NUMA_NO_NODE); + if (!txq->q.desc) + return -ENOMEM; + + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_CMD_EXEC_F | + FW_EQ_CTRL_CMD_PFN_V(adap->fn) | + FW_EQ_CTRL_CMD_VFN_V(0)); + c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F | + FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c)); + c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid)); + c.physeqid_pkd = htonl(0); + c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(2) | + FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) | + FW_EQ_CTRL_CMD_FETCHRO_F | + FW_EQ_CTRL_CMD_IQID_V(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN_V(2) | + FW_EQ_CTRL_CMD_FBMAX_V(3) | + FW_EQ_CTRL_CMD_CIDXFTHRESH_V(5) | + FW_EQ_CTRL_CMD_EQSIZE_V(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + + ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); + if (ret) { + dma_free_coherent(adap->pdev_dev, + nentries * sizeof(struct tx_desc), + txq->q.desc, txq->q.phys_addr); + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); + txq->adap = adap; + skb_queue_head_init(&txq->sendq); + tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq); + txq->full = 0; + return 0; +} + +int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, + struct net_device *dev, unsigned int iqid) +{ + int ret, nentries; + struct fw_eq_ofld_cmd c; + struct sge *s = &adap->sge; + struct port_info *pi = netdev_priv(dev); + + /* Add status entries */ + nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); + + txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, + sizeof(struct tx_desc), sizeof(struct tx_sw_desc), + &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, + NUMA_NO_NODE); + if (!txq->q.desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_CMD_EXEC_F | + FW_EQ_OFLD_CMD_PFN_V(adap->fn) | + FW_EQ_OFLD_CMD_VFN_V(0)); + c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F | + FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c)); + c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(2) | + FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) | + FW_EQ_OFLD_CMD_FETCHRO_F | + FW_EQ_OFLD_CMD_IQID_V(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN_V(2) | + FW_EQ_OFLD_CMD_FBMAX_V(3) | + FW_EQ_OFLD_CMD_CIDXFTHRESH_V(5) | + FW_EQ_OFLD_CMD_EQSIZE_V(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + + ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); + if (ret) { + kfree(txq->q.sdesc); + txq->q.sdesc = NULL; + dma_free_coherent(adap->pdev_dev, + nentries * sizeof(struct tx_desc), + txq->q.desc, txq->q.phys_addr); + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); + txq->adap = adap; + skb_queue_head_init(&txq->sendq); + tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq); + txq->full = 0; + txq->mapping_err = 0; + return 0; +} + +static void free_txq(struct adapter *adap, struct sge_txq *q) +{ + struct sge *s = &adap->sge; + + dma_free_coherent(adap->pdev_dev, + q->size * sizeof(struct tx_desc) + s->stat_len, + q->desc, q->phys_addr); + q->cntxt_id = 0; + q->sdesc = NULL; + q->desc = NULL; +} + +static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, + struct sge_fl *fl) +{ + struct sge *s = &adap->sge; + unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; + + adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; + t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP, + rq->cntxt_id, fl_id, 0xffff); + dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, + rq->desc, rq->phys_addr); + napi_hash_del(&rq->napi); + netif_napi_del(&rq->napi); + rq->netdev = NULL; + rq->cntxt_id = rq->abs_id = 0; + rq->desc = NULL; + + if (fl) { + free_rx_bufs(adap, fl, fl->avail); + dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len, + fl->desc, fl->addr); + kfree(fl->sdesc); + fl->sdesc = NULL; + fl->cntxt_id = 0; + fl->desc = NULL; + } +} + +/** + * t4_free_ofld_rxqs - free a block of consecutive Rx queues + * @adap: the adapter + * @n: number of queues + * @q: pointer to first queue + * + * Release the resources of a consecutive block of offload Rx queues. + */ +void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q) +{ + for ( ; n; n--, q++) + if (q->rspq.desc) + free_rspq_fl(adap, &q->rspq, + q->fl.size ? &q->fl : NULL); +} + +/** + * t4_free_sge_resources - free SGE resources + * @adap: the adapter + * + * Frees resources used by the SGE queue sets. + */ +void t4_free_sge_resources(struct adapter *adap) +{ + int i; + struct sge_eth_rxq *eq = adap->sge.ethrxq; + struct sge_eth_txq *etq = adap->sge.ethtxq; + + /* clean up Ethernet Tx/Rx queues */ + for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) { + if (eq->rspq.desc) + free_rspq_fl(adap, &eq->rspq, + eq->fl.size ? &eq->fl : NULL); + if (etq->q.desc) { + t4_eth_eq_free(adap, adap->fn, adap->fn, 0, + etq->q.cntxt_id); + free_tx_desc(adap, &etq->q, etq->q.in_use, true); + kfree(etq->q.sdesc); + free_txq(adap, &etq->q); + } + } + + /* clean up RDMA and iSCSI Rx queues */ + t4_free_ofld_rxqs(adap, adap->sge.ofldqsets, adap->sge.ofldrxq); + t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq); + t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq); + + /* clean up offload Tx queues */ + for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { + struct sge_ofld_txq *q = &adap->sge.ofldtxq[i]; + + if (q->q.desc) { + tasklet_kill(&q->qresume_tsk); + t4_ofld_eq_free(adap, adap->fn, adap->fn, 0, + q->q.cntxt_id); + free_tx_desc(adap, &q->q, q->q.in_use, false); + kfree(q->q.sdesc); + __skb_queue_purge(&q->sendq); + free_txq(adap, &q->q); + } + } + + /* clean up control Tx queues */ + for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { + struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; + + if (cq->q.desc) { + tasklet_kill(&cq->qresume_tsk); + t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0, + cq->q.cntxt_id); + __skb_queue_purge(&cq->sendq); + free_txq(adap, &cq->q); + } + } + + if (adap->sge.fw_evtq.desc) + free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); + + if (adap->sge.intrq.desc) + free_rspq_fl(adap, &adap->sge.intrq, NULL); + + /* clear the reverse egress queue map */ + memset(adap->sge.egr_map, 0, + adap->sge.egr_sz * sizeof(*adap->sge.egr_map)); +} + +void t4_sge_start(struct adapter *adap) +{ + adap->sge.ethtxq_rover = 0; + mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); + mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); +} + +/** + * t4_sge_stop - disable SGE operation + * @adap: the adapter + * + * Stop tasklets and timers associated with the DMA engine. Note that + * this is effective only if measures have been taken to disable any HW + * events that may restart them. + */ +void t4_sge_stop(struct adapter *adap) +{ + int i; + struct sge *s = &adap->sge; + + if (in_interrupt()) /* actions below require waiting */ + return; + + if (s->rx_timer.function) + del_timer_sync(&s->rx_timer); + if (s->tx_timer.function) + del_timer_sync(&s->tx_timer); + + for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) { + struct sge_ofld_txq *q = &s->ofldtxq[i]; + + if (q->q.desc) + tasklet_kill(&q->qresume_tsk); + } + for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { + struct sge_ctrl_txq *cq = &s->ctrlq[i]; + + if (cq->q.desc) + tasklet_kill(&cq->qresume_tsk); + } +} + +/** + * t4_sge_init_soft - grab core SGE values needed by SGE code + * @adap: the adapter + * + * We need to grab the SGE operating parameters that we need to have + * in order to do our job and make sure we can live with them. + */ + +static int t4_sge_init_soft(struct adapter *adap) +{ + struct sge *s = &adap->sge; + u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu; + u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5; + u32 ingress_rx_threshold; + + /* + * Verify that CPL messages are going to the Ingress Queue for + * process_responses() and that only packet data is going to the + * Free Lists. + */ + if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) != + RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) { + dev_err(adap->pdev_dev, "bad SGE CPL MODE\n"); + return -EINVAL; + } + + /* + * Validate the Host Buffer Register Array indices that we want to + * use ... + * + * XXX Note that we should really read through the Host Buffer Size + * XXX register array and find the indices of the Buffer Sizes which + * XXX meet our needs! + */ + #define READ_FL_BUF(x) \ + t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32)) + + fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF); + fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF); + fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF); + fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF); + + /* We only bother using the Large Page logic if the Large Page Buffer + * is larger than our Page Size Buffer. + */ + if (fl_large_pg <= fl_small_pg) + fl_large_pg = 0; + + #undef READ_FL_BUF + + /* The Page Size Buffer must be exactly equal to our Page Size and the + * Large Page Size Buffer should be 0 (per above) or a power of 2. + */ + if (fl_small_pg != PAGE_SIZE || + (fl_large_pg & (fl_large_pg-1)) != 0) { + dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", + fl_small_pg, fl_large_pg); + return -EINVAL; + } + if (fl_large_pg) + s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; + + if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) || + fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) { + dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n", + fl_small_mtu, fl_large_mtu); + return -EINVAL; + } + + /* + * Retrieve our RX interrupt holdoff timer values and counter + * threshold values from the SGE parameters. + */ + timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A); + timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A); + timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A); + s->timer_val[0] = core_ticks_to_us(adap, + TIMERVALUE0_G(timer_value_0_and_1)); + s->timer_val[1] = core_ticks_to_us(adap, + TIMERVALUE1_G(timer_value_0_and_1)); + s->timer_val[2] = core_ticks_to_us(adap, + TIMERVALUE2_G(timer_value_2_and_3)); + s->timer_val[3] = core_ticks_to_us(adap, + TIMERVALUE3_G(timer_value_2_and_3)); + s->timer_val[4] = core_ticks_to_us(adap, + TIMERVALUE4_G(timer_value_4_and_5)); + s->timer_val[5] = core_ticks_to_us(adap, + TIMERVALUE5_G(timer_value_4_and_5)); + + ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A); + s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold); + s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold); + s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold); + s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold); + + return 0; +} + +/** + * t4_sge_init - initialize SGE + * @adap: the adapter + * + * Perform low-level SGE code initialization needed every time after a + * chip reset. + */ +int t4_sge_init(struct adapter *adap) +{ + struct sge *s = &adap->sge; + u32 sge_control, sge_control2, sge_conm_ctrl; + unsigned int ingpadboundary, ingpackboundary; + int ret, egress_threshold; + + /* + * Ingress Padding Boundary and Egress Status Page Size are set up by + * t4_fixup_host_params(). + */ + sge_control = t4_read_reg(adap, SGE_CONTROL_A); + s->pktshift = PKTSHIFT_G(sge_control); + s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64; + + /* T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately. The actual Ingress Packet Data alignment boundary + * within Packed Buffer Mode is the maximum of these two + * specifications. + */ + ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + + INGPADBOUNDARY_SHIFT_X); + if (is_t4(adap->params.chip)) { + s->fl_align = ingpadboundary; + } else { + /* T5 has a different interpretation of one of the PCIe Packing + * Boundary values. + */ + sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A); + ingpackboundary = INGPACKBOUNDARY_G(sge_control2); + if (ingpackboundary == INGPACKBOUNDARY_16B_X) + ingpackboundary = 16; + else + ingpackboundary = 1 << (ingpackboundary + + INGPACKBOUNDARY_SHIFT_X); + + s->fl_align = max(ingpadboundary, ingpackboundary); + } + + ret = t4_sge_init_soft(adap); + if (ret < 0) + return ret; + + /* + * A FL with <= fl_starve_thres buffers is starving and a periodic + * timer will attempt to refill it. This needs to be larger than the + * SGE's Egress Congestion Threshold. If it isn't, then we can get + * stuck waiting for new packets while the SGE is waiting for us to + * give it more Free List entries. (Note that the SGE's Egress + * Congestion Threshold is in units of 2 Free List pointers.) For T4, + * there was only a single field to control this. For T5 there's the + * original field which now only applies to Unpacked Mode Free List + * buffers and a new field which only applies to Packed Mode Free List + * buffers. + */ + sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A); + if (is_t4(adap->params.chip)) + egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl); + else + egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl); + s->fl_starve_thres = 2*egress_threshold + 1; + + setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); + setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); + s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000; /* 1 s */ + s->idma_stalled[0] = 0; + s->idma_stalled[1] = 0; + spin_lock_init(&s->intrq_lock); + + return 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c new file mode 100644 index 000000000..e8578a742 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -0,0 +1,5719 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4_values.h" +#include "t4fw_api.h" + +/** + * t4_wait_op_done_val - wait until an operation is completed + * @adapter: the adapter performing the operation + * @reg: the register to check for completion + * @mask: a single-bit field within @reg that indicates completion + * @polarity: the value of the field when the operation is completed + * @attempts: number of check iterations + * @delay: delay in usecs between iterations + * @valp: where to store the value of the register at completion time + * + * Wait until an operation is completed by checking a bit in a register + * up to @attempts times. If @valp is not NULL the value of the register + * at the time it indicated completion is stored there. Returns 0 if the + * operation completes and -EAGAIN otherwise. + */ +static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay, u32 *valp) +{ + while (1) { + u32 val = t4_read_reg(adapter, reg); + + if (!!(val & mask) == polarity) { + if (valp) + *valp = val; + return 0; + } + if (--attempts == 0) + return -EAGAIN; + if (delay) + udelay(delay); + } +} + +static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay) +{ + return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, + delay, NULL); +} + +/** + * t4_set_reg_field - set a register field to a value + * @adapter: the adapter to program + * @addr: the register address + * @mask: specifies the portion of the register to modify + * @val: the new value for the register field + * + * Sets a register field specified by the supplied mask to the + * given value. + */ +void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, + u32 val) +{ + u32 v = t4_read_reg(adapter, addr) & ~mask; + + t4_write_reg(adapter, addr, v | val); + (void) t4_read_reg(adapter, addr); /* flush */ +} + +/** + * t4_read_indirect - read indirectly addressed registers + * @adap: the adapter + * @addr_reg: register holding the indirect address + * @data_reg: register holding the value of the indirect register + * @vals: where the read register values are stored + * @nregs: how many indirect registers to read + * @start_idx: index of first indirect register to read + * + * Reads registers that are accessed indirectly through an address/data + * register pair. + */ +void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, u32 *vals, + unsigned int nregs, unsigned int start_idx) +{ + while (nregs--) { + t4_write_reg(adap, addr_reg, start_idx); + *vals++ = t4_read_reg(adap, data_reg); + start_idx++; + } +} + +/** + * t4_write_indirect - write indirectly addressed registers + * @adap: the adapter + * @addr_reg: register holding the indirect addresses + * @data_reg: register holding the value for the indirect registers + * @vals: values to write + * @nregs: how many indirect registers to write + * @start_idx: address of first indirect register to write + * + * Writes a sequential block of registers that are accessed indirectly + * through an address/data register pair. + */ +void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, const u32 *vals, + unsigned int nregs, unsigned int start_idx) +{ + while (nregs--) { + t4_write_reg(adap, addr_reg, start_idx++); + t4_write_reg(adap, data_reg, *vals++); + } +} + +/* + * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor + * mechanism. This guarantees that we get the real value even if we're + * operating within a Virtual Machine and the Hypervisor is trapping our + * Configuration Space accesses. + */ +void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val) +{ + u32 req = ENABLE_F | FUNCTION_V(adap->fn) | REGISTER_V(reg); + + if (is_t4(adap->params.chip)) + req |= LOCALCFG_F; + + t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req); + *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A); + + /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a + * Configuration Space read. (None of the other fields matter when + * ENABLE is 0 so a simple register write is easier than a + * read-modify-write via t4_set_reg_field().) + */ + t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0); +} + +/* + * t4_report_fw_error - report firmware error + * @adap: the adapter + * + * The adapter firmware can indicate error conditions to the host. + * If the firmware has indicated an error, print out the reason for + * the firmware error. + */ +static void t4_report_fw_error(struct adapter *adap) +{ + static const char *const reason[] = { + "Crash", /* PCIE_FW_EVAL_CRASH */ + "During Device Preparation", /* PCIE_FW_EVAL_PREP */ + "During Device Configuration", /* PCIE_FW_EVAL_CONF */ + "During Device Initialization", /* PCIE_FW_EVAL_INIT */ + "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ + "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ + "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ + "Reserved", /* reserved */ + }; + u32 pcie_fw; + + pcie_fw = t4_read_reg(adap, PCIE_FW_A); + if (pcie_fw & PCIE_FW_ERR_F) + dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n", + reason[PCIE_FW_EVAL_G(pcie_fw)]); +} + +/* + * Get the reply to a mailbox command and store it in @rpl in big-endian order. + */ +static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, + u32 mbox_addr) +{ + for ( ; nflit; nflit--, mbox_addr += 8) + *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); +} + +/* + * Handle a FW assertion reported in a mailbox. + */ +static void fw_asrt(struct adapter *adap, u32 mbox_addr) +{ + struct fw_debug_cmd asrt; + + get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); + dev_alert(adap->pdev_dev, + "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", + asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), + ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); +} + +static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg) +{ + dev_err(adap->pdev_dev, + "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, + (unsigned long long)t4_read_reg64(adap, data_reg), + (unsigned long long)t4_read_reg64(adap, data_reg + 8), + (unsigned long long)t4_read_reg64(adap, data_reg + 16), + (unsigned long long)t4_read_reg64(adap, data_reg + 24), + (unsigned long long)t4_read_reg64(adap, data_reg + 32), + (unsigned long long)t4_read_reg64(adap, data_reg + 40), + (unsigned long long)t4_read_reg64(adap, data_reg + 48), + (unsigned long long)t4_read_reg64(adap, data_reg + 56)); +} + +/** + * t4_wr_mbox_meat - send a command to FW through the given mailbox + * @adap: the adapter + * @mbox: index of the mailbox to use + * @cmd: the command to write + * @size: command length in bytes + * @rpl: where to optionally store the reply + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Sends the given command to FW through the selected mailbox and waits + * for the FW to execute the command. If @rpl is not %NULL it is used to + * store the FW's reply to the command. The command and its optional + * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms + * to respond. @sleep_ok determines whether we may sleep while awaiting + * the response. If sleeping is allowed we use progressive backoff + * otherwise we spin. + * + * The return value is 0 on success or a negative errno on failure. A + * failure can happen either because we are not able to execute the + * command or FW executes it but signals an error. In the latter case + * the return value is the error code indicated by FW (negated). + */ +int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, + void *rpl, bool sleep_ok) +{ + static const int delay[] = { + 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 + }; + + u32 v; + u64 res; + int i, ms, delay_idx; + const __be64 *p = cmd; + u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A); + u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A); + + if ((size & 15) || size > MBOX_LEN) + return -EINVAL; + + /* + * If the device is off-line, as in EEH, commands will time out. + * Fail them early so we don't waste time waiting. + */ + if (adap->pdev->error_state != pci_channel_io_normal) + return -EIO; + + v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); + for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) + v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); + + if (v != MBOX_OWNER_DRV) + return v ? -EBUSY : -ETIMEDOUT; + + for (i = 0; i < size; i += 8) + t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); + + t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW)); + t4_read_reg(adap, ctl_reg); /* flush write */ + + delay_idx = 0; + ms = delay[0]; + + for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else + mdelay(ms); + + v = t4_read_reg(adap, ctl_reg); + if (MBOWNER_G(v) == MBOX_OWNER_DRV) { + if (!(v & MBMSGVALID_F)) { + t4_write_reg(adap, ctl_reg, 0); + continue; + } + + res = t4_read_reg64(adap, data_reg); + if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) { + fw_asrt(adap, data_reg); + res = FW_CMD_RETVAL_V(EIO); + } else if (rpl) { + get_mbox_rpl(adap, rpl, size / 8, data_reg); + } + + if (FW_CMD_RETVAL_G((int)res)) + dump_mbox(adap, mbox, data_reg); + t4_write_reg(adap, ctl_reg, 0); + return -FW_CMD_RETVAL_G((int)res); + } + } + + dump_mbox(adap, mbox, data_reg); + dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", + *(const u8 *)cmd, mbox); + t4_report_fw_error(adap); + return -ETIMEDOUT; +} + +/** + * t4_mc_read - read from MC through backdoor accesses + * @adap: the adapter + * @addr: address of first byte requested + * @idx: which MC to access + * @data: 64 bytes of data containing the requested address + * @ecc: where to store the corresponding 64-bit ECC word + * + * Read 64 bytes of data from MC starting at a 64-byte-aligned address + * that covers the requested address @addr. If @parity is not %NULL it + * is assigned the 64-bit ECC word for the read data. + */ +int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) +{ + int i; + u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len; + u32 mc_bist_status_rdata, mc_bist_data_pattern; + + if (is_t4(adap->params.chip)) { + mc_bist_cmd = MC_BIST_CMD_A; + mc_bist_cmd_addr = MC_BIST_CMD_ADDR_A; + mc_bist_cmd_len = MC_BIST_CMD_LEN_A; + mc_bist_status_rdata = MC_BIST_STATUS_RDATA_A; + mc_bist_data_pattern = MC_BIST_DATA_PATTERN_A; + } else { + mc_bist_cmd = MC_REG(MC_P_BIST_CMD_A, idx); + mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR_A, idx); + mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN_A, idx); + mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx); + mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx); + } + + if (t4_read_reg(adap, mc_bist_cmd) & START_BIST_F) + return -EBUSY; + t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU); + t4_write_reg(adap, mc_bist_cmd_len, 64); + t4_write_reg(adap, mc_bist_data_pattern, 0xc); + t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE_V(1) | START_BIST_F | + BIST_CMD_GAP_V(1)); + i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST_F, 0, 10, 1); + if (i) + return i; + +#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i) + + for (i = 15; i >= 0; i--) + *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); + if (ecc) + *ecc = t4_read_reg64(adap, MC_DATA(16)); +#undef MC_DATA + return 0; +} + +/** + * t4_edc_read - read from EDC through backdoor accesses + * @adap: the adapter + * @idx: which EDC to access + * @addr: address of first byte requested + * @data: 64 bytes of data containing the requested address + * @ecc: where to store the corresponding 64-bit ECC word + * + * Read 64 bytes of data from EDC starting at a 64-byte-aligned address + * that covers the requested address @addr. If @parity is not %NULL it + * is assigned the 64-bit ECC word for the read data. + */ +int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) +{ + int i; + u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len; + u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata; + + if (is_t4(adap->params.chip)) { + edc_bist_cmd = EDC_REG(EDC_BIST_CMD_A, idx); + edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR_A, idx); + edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN_A, idx); + edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN_A, + idx); + edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA_A, + idx); + } else { + edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD_A, idx); + edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx); + edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx); + edc_bist_cmd_data_pattern = + EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx); + edc_bist_status_rdata = + EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx); + } + + if (t4_read_reg(adap, edc_bist_cmd) & START_BIST_F) + return -EBUSY; + t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU); + t4_write_reg(adap, edc_bist_cmd_len, 64); + t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc); + t4_write_reg(adap, edc_bist_cmd, + BIST_OPCODE_V(1) | BIST_CMD_GAP_V(1) | START_BIST_F); + i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST_F, 0, 10, 1); + if (i) + return i; + +#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i)) + + for (i = 15; i >= 0; i--) + *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); + if (ecc) + *ecc = t4_read_reg64(adap, EDC_DATA(16)); +#undef EDC_DATA + return 0; +} + +/** + * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window + * @adap: the adapter + * @win: PCI-E Memory Window to use + * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC + * @addr: address within indicated memory type + * @len: amount of memory to transfer + * @hbuf: host memory buffer + * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) + * + * Reads/writes an [almost] arbitrary memory region in the firmware: the + * firmware memory address and host buffer must be aligned on 32-bit + * boudaries; the length may be arbitrary. The memory is transferred as + * a raw byte sequence from/to the firmware's memory. If this memory + * contains data structures which contain multi-byte integers, it's the + * caller's responsibility to perform appropriate byte order conversions. + */ +int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, + u32 len, void *hbuf, int dir) +{ + u32 pos, offset, resid, memoffset; + u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base; + u32 *buf; + + /* Argument sanity checks ... + */ + if (addr & 0x3 || (uintptr_t)hbuf & 0x3) + return -EINVAL; + buf = (u32 *)hbuf; + + /* It's convenient to be able to handle lengths which aren't a + * multiple of 32-bits because we often end up transferring files to + * the firmware. So we'll handle that by normalizing the length here + * and then handling any residual transfer at the end. + */ + resid = len & 0x3; + len -= resid; + + /* Offset into the region of memory which is being accessed + * MEM_EDC0 = 0 + * MEM_EDC1 = 1 + * MEM_MC = 2 -- T4 + * MEM_MC0 = 2 -- For T5 + * MEM_MC1 = 3 -- For T5 + */ + edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A)); + if (mtype != MEM_MC1) + memoffset = (mtype * (edc_size * 1024 * 1024)); + else { + mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap, + MA_EXT_MEMORY0_BAR_A)); + memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; + } + + /* Determine the PCIE_MEM_ACCESS_OFFSET */ + addr = addr + memoffset; + + /* Each PCI-E Memory Window is programmed with a window size -- or + * "aperture" -- which controls the granularity of its mapping onto + * adapter memory. We need to grab that aperture in order to know + * how to use the specified window. The window is also programmed + * with the base address of the Memory Window in BAR0's address + * space. For T4 this is an absolute PCI-E Bus Address. For T5 + * the address is relative to BAR0. + */ + mem_reg = t4_read_reg(adap, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, + win)); + mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X); + mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X; + if (is_t4(adap->params.chip)) + mem_base -= adap->t4_bar0; + win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->fn); + + /* Calculate our initial PCI-E Memory Window Position and Offset into + * that Window. + */ + pos = addr & ~(mem_aperture-1); + offset = addr - pos; + + /* Set up initial PCI-E Memory Window to cover the start of our + * transfer. (Read it back to ensure that changes propagate before we + * attempt to use the new value.) + */ + t4_write_reg(adap, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win), + pos | win_pf); + t4_read_reg(adap, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win)); + + /* Transfer data to/from the adapter as long as there's an integral + * number of 32-bit transfers to complete. + * + * A note on Endianness issues: + * + * The "register" reads and writes below from/to the PCI-E Memory + * Window invoke the standard adapter Big-Endian to PCI-E Link + * Little-Endian "swizzel." As a result, if we have the following + * data in adapter memory: + * + * Memory: ... | b0 | b1 | b2 | b3 | ... + * Address: i+0 i+1 i+2 i+3 + * + * Then a read of the adapter memory via the PCI-E Memory Window + * will yield: + * + * x = readl(i) + * 31 0 + * [ b3 | b2 | b1 | b0 ] + * + * If this value is stored into local memory on a Little-Endian system + * it will show up correctly in local memory as: + * + * ( ..., b0, b1, b2, b3, ... ) + * + * But on a Big-Endian system, the store will show up in memory + * incorrectly swizzled as: + * + * ( ..., b3, b2, b1, b0, ... ) + * + * So we need to account for this in the reads and writes to the + * PCI-E Memory Window below by undoing the register read/write + * swizzels. + */ + while (len > 0) { + if (dir == T4_MEMORY_READ) + *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap, + mem_base + offset)); + else + t4_write_reg(adap, mem_base + offset, + (__force u32)cpu_to_le32(*buf++)); + offset += sizeof(__be32); + len -= sizeof(__be32); + + /* If we've reached the end of our current window aperture, + * move the PCI-E Memory Window on to the next. Note that + * doing this here after "len" may be 0 allows us to set up + * the PCI-E Memory Window for a possible final residual + * transfer below ... + */ + if (offset == mem_aperture) { + pos += mem_aperture; + offset = 0; + t4_write_reg(adap, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, + win), pos | win_pf); + t4_read_reg(adap, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, + win)); + } + } + + /* If the original transfer had a length which wasn't a multiple of + * 32-bits, now's where we need to finish off the transfer of the + * residual amount. The PCI-E Memory Window has already been moved + * above (if necessary) to cover this final transfer. + */ + if (resid) { + union { + u32 word; + char byte[4]; + } last; + unsigned char *bp; + int i; + + if (dir == T4_MEMORY_READ) { + last.word = le32_to_cpu( + (__force __le32)t4_read_reg(adap, + mem_base + offset)); + for (bp = (unsigned char *)buf, i = resid; i < 4; i++) + bp[i] = last.byte[i]; + } else { + last.word = *buf; + for (i = resid; i < 4; i++) + last.byte[i] = 0; + t4_write_reg(adap, mem_base + offset, + (__force u32)cpu_to_le32(last.word)); + } + } + + return 0; +} + +/** + * t4_get_regs_len - return the size of the chips register set + * @adapter: the adapter + * + * Returns the size of the chip's BAR0 register space. + */ +unsigned int t4_get_regs_len(struct adapter *adapter) +{ + unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip); + + switch (chip_version) { + case CHELSIO_T4: + return T4_REGMAP_SIZE; + + case CHELSIO_T5: + return T5_REGMAP_SIZE; + } + + dev_err(adapter->pdev_dev, + "Unsupported chip version %d\n", chip_version); + return 0; +} + +/** + * t4_get_regs - read chip registers into provided buffer + * @adap: the adapter + * @buf: register buffer + * @buf_size: size (in bytes) of register buffer + * + * If the provided register buffer isn't large enough for the chip's + * full register range, the register dump will be truncated to the + * register buffer's size. + */ +void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) +{ + static const unsigned int t4_reg_ranges[] = { + 0x1008, 0x1108, + 0x1180, 0x11b4, + 0x11fc, 0x123c, + 0x1300, 0x173c, + 0x1800, 0x18fc, + 0x3000, 0x30d8, + 0x30e0, 0x5924, + 0x5960, 0x59d4, + 0x5a00, 0x5af8, + 0x6000, 0x6098, + 0x6100, 0x6150, + 0x6200, 0x6208, + 0x6240, 0x6248, + 0x6280, 0x6338, + 0x6370, 0x638c, + 0x6400, 0x643c, + 0x6500, 0x6524, + 0x6a00, 0x6a38, + 0x6a60, 0x6a78, + 0x6b00, 0x6b84, + 0x6bf0, 0x6c84, + 0x6cf0, 0x6d84, + 0x6df0, 0x6e84, + 0x6ef0, 0x6f84, + 0x6ff0, 0x7084, + 0x70f0, 0x7184, + 0x71f0, 0x7284, + 0x72f0, 0x7384, + 0x73f0, 0x7450, + 0x7500, 0x7530, + 0x7600, 0x761c, + 0x7680, 0x76cc, + 0x7700, 0x7798, + 0x77c0, 0x77fc, + 0x7900, 0x79fc, + 0x7b00, 0x7c38, + 0x7d00, 0x7efc, + 0x8dc0, 0x8e1c, + 0x8e30, 0x8e78, + 0x8ea0, 0x8f6c, + 0x8fc0, 0x9074, + 0x90fc, 0x90fc, + 0x9400, 0x9458, + 0x9600, 0x96bc, + 0x9800, 0x9808, + 0x9820, 0x983c, + 0x9850, 0x9864, + 0x9c00, 0x9c6c, + 0x9c80, 0x9cec, + 0x9d00, 0x9d6c, + 0x9d80, 0x9dec, + 0x9e00, 0x9e6c, + 0x9e80, 0x9eec, + 0x9f00, 0x9f6c, + 0x9f80, 0x9fec, + 0xd004, 0xd03c, + 0xdfc0, 0xdfe0, + 0xe000, 0xea7c, + 0xf000, 0x11110, + 0x11118, 0x11190, + 0x19040, 0x1906c, + 0x19078, 0x19080, + 0x1908c, 0x19124, + 0x19150, 0x191b0, + 0x191d0, 0x191e8, + 0x19238, 0x1924c, + 0x193f8, 0x19474, + 0x19490, 0x194f8, + 0x19800, 0x19f30, + 0x1a000, 0x1a06c, + 0x1a0b0, 0x1a120, + 0x1a128, 0x1a138, + 0x1a190, 0x1a1c4, + 0x1a1fc, 0x1a1fc, + 0x1e040, 0x1e04c, + 0x1e284, 0x1e28c, + 0x1e2c0, 0x1e2c0, + 0x1e2e0, 0x1e2e0, + 0x1e300, 0x1e384, + 0x1e3c0, 0x1e3c8, + 0x1e440, 0x1e44c, + 0x1e684, 0x1e68c, + 0x1e6c0, 0x1e6c0, + 0x1e6e0, 0x1e6e0, + 0x1e700, 0x1e784, + 0x1e7c0, 0x1e7c8, + 0x1e840, 0x1e84c, + 0x1ea84, 0x1ea8c, + 0x1eac0, 0x1eac0, + 0x1eae0, 0x1eae0, + 0x1eb00, 0x1eb84, + 0x1ebc0, 0x1ebc8, + 0x1ec40, 0x1ec4c, + 0x1ee84, 0x1ee8c, + 0x1eec0, 0x1eec0, + 0x1eee0, 0x1eee0, + 0x1ef00, 0x1ef84, + 0x1efc0, 0x1efc8, + 0x1f040, 0x1f04c, + 0x1f284, 0x1f28c, + 0x1f2c0, 0x1f2c0, + 0x1f2e0, 0x1f2e0, + 0x1f300, 0x1f384, + 0x1f3c0, 0x1f3c8, + 0x1f440, 0x1f44c, + 0x1f684, 0x1f68c, + 0x1f6c0, 0x1f6c0, + 0x1f6e0, 0x1f6e0, + 0x1f700, 0x1f784, + 0x1f7c0, 0x1f7c8, + 0x1f840, 0x1f84c, + 0x1fa84, 0x1fa8c, + 0x1fac0, 0x1fac0, + 0x1fae0, 0x1fae0, + 0x1fb00, 0x1fb84, + 0x1fbc0, 0x1fbc8, + 0x1fc40, 0x1fc4c, + 0x1fe84, 0x1fe8c, + 0x1fec0, 0x1fec0, + 0x1fee0, 0x1fee0, + 0x1ff00, 0x1ff84, + 0x1ffc0, 0x1ffc8, + 0x20000, 0x2002c, + 0x20100, 0x2013c, + 0x20190, 0x201c8, + 0x20200, 0x20318, + 0x20400, 0x20528, + 0x20540, 0x20614, + 0x21000, 0x21040, + 0x2104c, 0x21060, + 0x210c0, 0x210ec, + 0x21200, 0x21268, + 0x21270, 0x21284, + 0x212fc, 0x21388, + 0x21400, 0x21404, + 0x21500, 0x21518, + 0x2152c, 0x2153c, + 0x21550, 0x21554, + 0x21600, 0x21600, + 0x21608, 0x21628, + 0x21630, 0x2163c, + 0x21700, 0x2171c, + 0x21780, 0x2178c, + 0x21800, 0x21c38, + 0x21c80, 0x21d7c, + 0x21e00, 0x21e04, + 0x22000, 0x2202c, + 0x22100, 0x2213c, + 0x22190, 0x221c8, + 0x22200, 0x22318, + 0x22400, 0x22528, + 0x22540, 0x22614, + 0x23000, 0x23040, + 0x2304c, 0x23060, + 0x230c0, 0x230ec, + 0x23200, 0x23268, + 0x23270, 0x23284, + 0x232fc, 0x23388, + 0x23400, 0x23404, + 0x23500, 0x23518, + 0x2352c, 0x2353c, + 0x23550, 0x23554, + 0x23600, 0x23600, + 0x23608, 0x23628, + 0x23630, 0x2363c, + 0x23700, 0x2371c, + 0x23780, 0x2378c, + 0x23800, 0x23c38, + 0x23c80, 0x23d7c, + 0x23e00, 0x23e04, + 0x24000, 0x2402c, + 0x24100, 0x2413c, + 0x24190, 0x241c8, + 0x24200, 0x24318, + 0x24400, 0x24528, + 0x24540, 0x24614, + 0x25000, 0x25040, + 0x2504c, 0x25060, + 0x250c0, 0x250ec, + 0x25200, 0x25268, + 0x25270, 0x25284, + 0x252fc, 0x25388, + 0x25400, 0x25404, + 0x25500, 0x25518, + 0x2552c, 0x2553c, + 0x25550, 0x25554, + 0x25600, 0x25600, + 0x25608, 0x25628, + 0x25630, 0x2563c, + 0x25700, 0x2571c, + 0x25780, 0x2578c, + 0x25800, 0x25c38, + 0x25c80, 0x25d7c, + 0x25e00, 0x25e04, + 0x26000, 0x2602c, + 0x26100, 0x2613c, + 0x26190, 0x261c8, + 0x26200, 0x26318, + 0x26400, 0x26528, + 0x26540, 0x26614, + 0x27000, 0x27040, + 0x2704c, 0x27060, + 0x270c0, 0x270ec, + 0x27200, 0x27268, + 0x27270, 0x27284, + 0x272fc, 0x27388, + 0x27400, 0x27404, + 0x27500, 0x27518, + 0x2752c, 0x2753c, + 0x27550, 0x27554, + 0x27600, 0x27600, + 0x27608, 0x27628, + 0x27630, 0x2763c, + 0x27700, 0x2771c, + 0x27780, 0x2778c, + 0x27800, 0x27c38, + 0x27c80, 0x27d7c, + 0x27e00, 0x27e04 + }; + + static const unsigned int t5_reg_ranges[] = { + 0x1008, 0x1148, + 0x1180, 0x11b4, + 0x11fc, 0x123c, + 0x1280, 0x173c, + 0x1800, 0x18fc, + 0x3000, 0x3028, + 0x3060, 0x30d8, + 0x30e0, 0x30fc, + 0x3140, 0x357c, + 0x35a8, 0x35cc, + 0x35ec, 0x35ec, + 0x3600, 0x5624, + 0x56cc, 0x575c, + 0x580c, 0x5814, + 0x5890, 0x58bc, + 0x5940, 0x59dc, + 0x59fc, 0x5a18, + 0x5a60, 0x5a9c, + 0x5b9c, 0x5bfc, + 0x6000, 0x6040, + 0x6058, 0x614c, + 0x7700, 0x7798, + 0x77c0, 0x78fc, + 0x7b00, 0x7c54, + 0x7d00, 0x7efc, + 0x8dc0, 0x8de0, + 0x8df8, 0x8e84, + 0x8ea0, 0x8f84, + 0x8fc0, 0x90f8, + 0x9400, 0x9470, + 0x9600, 0x96f4, + 0x9800, 0x9808, + 0x9820, 0x983c, + 0x9850, 0x9864, + 0x9c00, 0x9c6c, + 0x9c80, 0x9cec, + 0x9d00, 0x9d6c, + 0x9d80, 0x9dec, + 0x9e00, 0x9e6c, + 0x9e80, 0x9eec, + 0x9f00, 0x9f6c, + 0x9f80, 0xa020, + 0xd004, 0xd03c, + 0xdfc0, 0xdfe0, + 0xe000, 0x11088, + 0x1109c, 0x11110, + 0x11118, 0x1117c, + 0x11190, 0x11204, + 0x19040, 0x1906c, + 0x19078, 0x19080, + 0x1908c, 0x19124, + 0x19150, 0x191b0, + 0x191d0, 0x191e8, + 0x19238, 0x19290, + 0x193f8, 0x19474, + 0x19490, 0x194cc, + 0x194f0, 0x194f8, + 0x19c00, 0x19c60, + 0x19c94, 0x19e10, + 0x19e50, 0x19f34, + 0x19f40, 0x19f50, + 0x19f90, 0x19fe4, + 0x1a000, 0x1a06c, + 0x1a0b0, 0x1a120, + 0x1a128, 0x1a138, + 0x1a190, 0x1a1c4, + 0x1a1fc, 0x1a1fc, + 0x1e008, 0x1e00c, + 0x1e040, 0x1e04c, + 0x1e284, 0x1e290, + 0x1e2c0, 0x1e2c0, + 0x1e2e0, 0x1e2e0, + 0x1e300, 0x1e384, + 0x1e3c0, 0x1e3c8, + 0x1e408, 0x1e40c, + 0x1e440, 0x1e44c, + 0x1e684, 0x1e690, + 0x1e6c0, 0x1e6c0, + 0x1e6e0, 0x1e6e0, + 0x1e700, 0x1e784, + 0x1e7c0, 0x1e7c8, + 0x1e808, 0x1e80c, + 0x1e840, 0x1e84c, + 0x1ea84, 0x1ea90, + 0x1eac0, 0x1eac0, + 0x1eae0, 0x1eae0, + 0x1eb00, 0x1eb84, + 0x1ebc0, 0x1ebc8, + 0x1ec08, 0x1ec0c, + 0x1ec40, 0x1ec4c, + 0x1ee84, 0x1ee90, + 0x1eec0, 0x1eec0, + 0x1eee0, 0x1eee0, + 0x1ef00, 0x1ef84, + 0x1efc0, 0x1efc8, + 0x1f008, 0x1f00c, + 0x1f040, 0x1f04c, + 0x1f284, 0x1f290, + 0x1f2c0, 0x1f2c0, + 0x1f2e0, 0x1f2e0, + 0x1f300, 0x1f384, + 0x1f3c0, 0x1f3c8, + 0x1f408, 0x1f40c, + 0x1f440, 0x1f44c, + 0x1f684, 0x1f690, + 0x1f6c0, 0x1f6c0, + 0x1f6e0, 0x1f6e0, + 0x1f700, 0x1f784, + 0x1f7c0, 0x1f7c8, + 0x1f808, 0x1f80c, + 0x1f840, 0x1f84c, + 0x1fa84, 0x1fa90, + 0x1fac0, 0x1fac0, + 0x1fae0, 0x1fae0, + 0x1fb00, 0x1fb84, + 0x1fbc0, 0x1fbc8, + 0x1fc08, 0x1fc0c, + 0x1fc40, 0x1fc4c, + 0x1fe84, 0x1fe90, + 0x1fec0, 0x1fec0, + 0x1fee0, 0x1fee0, + 0x1ff00, 0x1ff84, + 0x1ffc0, 0x1ffc8, + 0x30000, 0x30030, + 0x30100, 0x30144, + 0x30190, 0x301d0, + 0x30200, 0x30318, + 0x30400, 0x3052c, + 0x30540, 0x3061c, + 0x30800, 0x30834, + 0x308c0, 0x30908, + 0x30910, 0x309ac, + 0x30a00, 0x30a04, + 0x30a0c, 0x30a2c, + 0x30a44, 0x30a50, + 0x30a74, 0x30c24, + 0x30d08, 0x30d14, + 0x30d1c, 0x30d20, + 0x30d3c, 0x30d50, + 0x31200, 0x3120c, + 0x31220, 0x31220, + 0x31240, 0x31240, + 0x31600, 0x31600, + 0x31608, 0x3160c, + 0x31a00, 0x31a1c, + 0x31e04, 0x31e20, + 0x31e38, 0x31e3c, + 0x31e80, 0x31e80, + 0x31e88, 0x31ea8, + 0x31eb0, 0x31eb4, + 0x31ec8, 0x31ed4, + 0x31fb8, 0x32004, + 0x32208, 0x3223c, + 0x32600, 0x32630, + 0x32a00, 0x32abc, + 0x32b00, 0x32b70, + 0x33000, 0x33048, + 0x33060, 0x3309c, + 0x330f0, 0x33148, + 0x33160, 0x3319c, + 0x331f0, 0x332e4, + 0x332f8, 0x333e4, + 0x333f8, 0x33448, + 0x33460, 0x3349c, + 0x334f0, 0x33548, + 0x33560, 0x3359c, + 0x335f0, 0x336e4, + 0x336f8, 0x337e4, + 0x337f8, 0x337fc, + 0x33814, 0x33814, + 0x3382c, 0x3382c, + 0x33880, 0x3388c, + 0x338e8, 0x338ec, + 0x33900, 0x33948, + 0x33960, 0x3399c, + 0x339f0, 0x33ae4, + 0x33af8, 0x33b10, + 0x33b28, 0x33b28, + 0x33b3c, 0x33b50, + 0x33bf0, 0x33c10, + 0x33c28, 0x33c28, + 0x33c3c, 0x33c50, + 0x33cf0, 0x33cfc, + 0x34000, 0x34030, + 0x34100, 0x34144, + 0x34190, 0x341d0, + 0x34200, 0x34318, + 0x34400, 0x3452c, + 0x34540, 0x3461c, + 0x34800, 0x34834, + 0x348c0, 0x34908, + 0x34910, 0x349ac, + 0x34a00, 0x34a04, + 0x34a0c, 0x34a2c, + 0x34a44, 0x34a50, + 0x34a74, 0x34c24, + 0x34d08, 0x34d14, + 0x34d1c, 0x34d20, + 0x34d3c, 0x34d50, + 0x35200, 0x3520c, + 0x35220, 0x35220, + 0x35240, 0x35240, + 0x35600, 0x35600, + 0x35608, 0x3560c, + 0x35a00, 0x35a1c, + 0x35e04, 0x35e20, + 0x35e38, 0x35e3c, + 0x35e80, 0x35e80, + 0x35e88, 0x35ea8, + 0x35eb0, 0x35eb4, + 0x35ec8, 0x35ed4, + 0x35fb8, 0x36004, + 0x36208, 0x3623c, + 0x36600, 0x36630, + 0x36a00, 0x36abc, + 0x36b00, 0x36b70, + 0x37000, 0x37048, + 0x37060, 0x3709c, + 0x370f0, 0x37148, + 0x37160, 0x3719c, + 0x371f0, 0x372e4, + 0x372f8, 0x373e4, + 0x373f8, 0x37448, + 0x37460, 0x3749c, + 0x374f0, 0x37548, + 0x37560, 0x3759c, + 0x375f0, 0x376e4, + 0x376f8, 0x377e4, + 0x377f8, 0x377fc, + 0x37814, 0x37814, + 0x3782c, 0x3782c, + 0x37880, 0x3788c, + 0x378e8, 0x378ec, + 0x37900, 0x37948, + 0x37960, 0x3799c, + 0x379f0, 0x37ae4, + 0x37af8, 0x37b10, + 0x37b28, 0x37b28, + 0x37b3c, 0x37b50, + 0x37bf0, 0x37c10, + 0x37c28, 0x37c28, + 0x37c3c, 0x37c50, + 0x37cf0, 0x37cfc, + 0x38000, 0x38030, + 0x38100, 0x38144, + 0x38190, 0x381d0, + 0x38200, 0x38318, + 0x38400, 0x3852c, + 0x38540, 0x3861c, + 0x38800, 0x38834, + 0x388c0, 0x38908, + 0x38910, 0x389ac, + 0x38a00, 0x38a04, + 0x38a0c, 0x38a2c, + 0x38a44, 0x38a50, + 0x38a74, 0x38c24, + 0x38d08, 0x38d14, + 0x38d1c, 0x38d20, + 0x38d3c, 0x38d50, + 0x39200, 0x3920c, + 0x39220, 0x39220, + 0x39240, 0x39240, + 0x39600, 0x39600, + 0x39608, 0x3960c, + 0x39a00, 0x39a1c, + 0x39e04, 0x39e20, + 0x39e38, 0x39e3c, + 0x39e80, 0x39e80, + 0x39e88, 0x39ea8, + 0x39eb0, 0x39eb4, + 0x39ec8, 0x39ed4, + 0x39fb8, 0x3a004, + 0x3a208, 0x3a23c, + 0x3a600, 0x3a630, + 0x3aa00, 0x3aabc, + 0x3ab00, 0x3ab70, + 0x3b000, 0x3b048, + 0x3b060, 0x3b09c, + 0x3b0f0, 0x3b148, + 0x3b160, 0x3b19c, + 0x3b1f0, 0x3b2e4, + 0x3b2f8, 0x3b3e4, + 0x3b3f8, 0x3b448, + 0x3b460, 0x3b49c, + 0x3b4f0, 0x3b548, + 0x3b560, 0x3b59c, + 0x3b5f0, 0x3b6e4, + 0x3b6f8, 0x3b7e4, + 0x3b7f8, 0x3b7fc, + 0x3b814, 0x3b814, + 0x3b82c, 0x3b82c, + 0x3b880, 0x3b88c, + 0x3b8e8, 0x3b8ec, + 0x3b900, 0x3b948, + 0x3b960, 0x3b99c, + 0x3b9f0, 0x3bae4, + 0x3baf8, 0x3bb10, + 0x3bb28, 0x3bb28, + 0x3bb3c, 0x3bb50, + 0x3bbf0, 0x3bc10, + 0x3bc28, 0x3bc28, + 0x3bc3c, 0x3bc50, + 0x3bcf0, 0x3bcfc, + 0x3c000, 0x3c030, + 0x3c100, 0x3c144, + 0x3c190, 0x3c1d0, + 0x3c200, 0x3c318, + 0x3c400, 0x3c52c, + 0x3c540, 0x3c61c, + 0x3c800, 0x3c834, + 0x3c8c0, 0x3c908, + 0x3c910, 0x3c9ac, + 0x3ca00, 0x3ca04, + 0x3ca0c, 0x3ca2c, + 0x3ca44, 0x3ca50, + 0x3ca74, 0x3cc24, + 0x3cd08, 0x3cd14, + 0x3cd1c, 0x3cd20, + 0x3cd3c, 0x3cd50, + 0x3d200, 0x3d20c, + 0x3d220, 0x3d220, + 0x3d240, 0x3d240, + 0x3d600, 0x3d600, + 0x3d608, 0x3d60c, + 0x3da00, 0x3da1c, + 0x3de04, 0x3de20, + 0x3de38, 0x3de3c, + 0x3de80, 0x3de80, + 0x3de88, 0x3dea8, + 0x3deb0, 0x3deb4, + 0x3dec8, 0x3ded4, + 0x3dfb8, 0x3e004, + 0x3e208, 0x3e23c, + 0x3e600, 0x3e630, + 0x3ea00, 0x3eabc, + 0x3eb00, 0x3eb70, + 0x3f000, 0x3f048, + 0x3f060, 0x3f09c, + 0x3f0f0, 0x3f148, + 0x3f160, 0x3f19c, + 0x3f1f0, 0x3f2e4, + 0x3f2f8, 0x3f3e4, + 0x3f3f8, 0x3f448, + 0x3f460, 0x3f49c, + 0x3f4f0, 0x3f548, + 0x3f560, 0x3f59c, + 0x3f5f0, 0x3f6e4, + 0x3f6f8, 0x3f7e4, + 0x3f7f8, 0x3f7fc, + 0x3f814, 0x3f814, + 0x3f82c, 0x3f82c, + 0x3f880, 0x3f88c, + 0x3f8e8, 0x3f8ec, + 0x3f900, 0x3f948, + 0x3f960, 0x3f99c, + 0x3f9f0, 0x3fae4, + 0x3faf8, 0x3fb10, + 0x3fb28, 0x3fb28, + 0x3fb3c, 0x3fb50, + 0x3fbf0, 0x3fc10, + 0x3fc28, 0x3fc28, + 0x3fc3c, 0x3fc50, + 0x3fcf0, 0x3fcfc, + 0x40000, 0x4000c, + 0x40040, 0x40068, + 0x40080, 0x40144, + 0x40180, 0x4018c, + 0x40200, 0x40298, + 0x402ac, 0x4033c, + 0x403f8, 0x403fc, + 0x41304, 0x413c4, + 0x41400, 0x4141c, + 0x41480, 0x414d0, + 0x44000, 0x44078, + 0x440c0, 0x44278, + 0x442c0, 0x44478, + 0x444c0, 0x44678, + 0x446c0, 0x44878, + 0x448c0, 0x449fc, + 0x45000, 0x45068, + 0x45080, 0x45084, + 0x450a0, 0x450b0, + 0x45200, 0x45268, + 0x45280, 0x45284, + 0x452a0, 0x452b0, + 0x460c0, 0x460e4, + 0x47000, 0x4708c, + 0x47200, 0x47250, + 0x47400, 0x47420, + 0x47600, 0x47618, + 0x47800, 0x47814, + 0x48000, 0x4800c, + 0x48040, 0x48068, + 0x48080, 0x48144, + 0x48180, 0x4818c, + 0x48200, 0x48298, + 0x482ac, 0x4833c, + 0x483f8, 0x483fc, + 0x49304, 0x493c4, + 0x49400, 0x4941c, + 0x49480, 0x494d0, + 0x4c000, 0x4c078, + 0x4c0c0, 0x4c278, + 0x4c2c0, 0x4c478, + 0x4c4c0, 0x4c678, + 0x4c6c0, 0x4c878, + 0x4c8c0, 0x4c9fc, + 0x4d000, 0x4d068, + 0x4d080, 0x4d084, + 0x4d0a0, 0x4d0b0, + 0x4d200, 0x4d268, + 0x4d280, 0x4d284, + 0x4d2a0, 0x4d2b0, + 0x4e0c0, 0x4e0e4, + 0x4f000, 0x4f08c, + 0x4f200, 0x4f250, + 0x4f400, 0x4f420, + 0x4f600, 0x4f618, + 0x4f800, 0x4f814, + 0x50000, 0x500cc, + 0x50400, 0x50400, + 0x50800, 0x508cc, + 0x50c00, 0x50c00, + 0x51000, 0x5101c, + 0x51300, 0x51308, + }; + + u32 *buf_end = (u32 *)((char *)buf + buf_size); + const unsigned int *reg_ranges; + int reg_ranges_size, range; + unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip); + + /* Select the right set of register ranges to dump depending on the + * adapter chip type. + */ + switch (chip_version) { + case CHELSIO_T4: + reg_ranges = t4_reg_ranges; + reg_ranges_size = ARRAY_SIZE(t4_reg_ranges); + break; + + case CHELSIO_T5: + reg_ranges = t5_reg_ranges; + reg_ranges_size = ARRAY_SIZE(t5_reg_ranges); + break; + + default: + dev_err(adap->pdev_dev, + "Unsupported chip version %d\n", chip_version); + return; + } + + /* Clear the register buffer and insert the appropriate register + * values selected by the above register ranges. + */ + memset(buf, 0, buf_size); + for (range = 0; range < reg_ranges_size; range += 2) { + unsigned int reg = reg_ranges[range]; + unsigned int last_reg = reg_ranges[range + 1]; + u32 *bufp = (u32 *)((char *)buf + reg); + + /* Iterate across the register range filling in the register + * buffer but don't write past the end of the register buffer. + */ + while (reg <= last_reg && bufp < buf_end) { + *bufp++ = t4_read_reg(adap, reg); + reg += sizeof(u32); + } + } +} + +#define EEPROM_STAT_ADDR 0x7bfc +#define VPD_BASE 0x400 +#define VPD_BASE_OLD 0 +#define VPD_LEN 1024 +#define CHELSIO_VPD_UNIQUE_ID 0x82 + +/** + * t4_seeprom_wp - enable/disable EEPROM write protection + * @adapter: the adapter + * @enable: whether to enable or disable write protection + * + * Enables or disables write protection on the serial EEPROM. + */ +int t4_seeprom_wp(struct adapter *adapter, bool enable) +{ + unsigned int v = enable ? 0xc : 0; + int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v); + return ret < 0 ? ret : 0; +} + +/** + * get_vpd_params - read VPD parameters from VPD EEPROM + * @adapter: adapter to read + * @p: where to store the parameters + * + * Reads card parameters stored in VPD EEPROM. + */ +int get_vpd_params(struct adapter *adapter, struct vpd_params *p) +{ + u32 cclk_param, cclk_val; + int i, ret, addr; + int ec, sn, pn; + u8 *vpd, csum; + unsigned int vpdr_len, kw_offset, id_len; + + vpd = vmalloc(VPD_LEN); + if (!vpd) + return -ENOMEM; + + ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); + if (ret < 0) + goto out; + + /* The VPD shall have a unique identifier specified by the PCI SIG. + * For chelsio adapters, the identifier is 0x82. The first byte of a VPD + * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software + * is expected to automatically put this entry at the + * beginning of the VPD. + */ + addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD; + + ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd); + if (ret < 0) + goto out; + + if (vpd[0] != PCI_VPD_LRDT_ID_STRING) { + dev_err(adapter->pdev_dev, "missing VPD ID string\n"); + ret = -EINVAL; + goto out; + } + + id_len = pci_vpd_lrdt_size(vpd); + if (id_len > ID_LEN) + id_len = ID_LEN; + + i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA); + if (i < 0) { + dev_err(adapter->pdev_dev, "missing VPD-R section\n"); + ret = -EINVAL; + goto out; + } + + vpdr_len = pci_vpd_lrdt_size(&vpd[i]); + kw_offset = i + PCI_VPD_LRDT_TAG_SIZE; + if (vpdr_len + kw_offset > VPD_LEN) { + dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len); + ret = -EINVAL; + goto out; + } + +#define FIND_VPD_KW(var, name) do { \ + var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \ + if (var < 0) { \ + dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \ + ret = -EINVAL; \ + goto out; \ + } \ + var += PCI_VPD_INFO_FLD_HDR_SIZE; \ +} while (0) + + FIND_VPD_KW(i, "RV"); + for (csum = 0; i >= 0; i--) + csum += vpd[i]; + + if (csum) { + dev_err(adapter->pdev_dev, + "corrupted VPD EEPROM, actual csum %u\n", csum); + ret = -EINVAL; + goto out; + } + + FIND_VPD_KW(ec, "EC"); + FIND_VPD_KW(sn, "SN"); + FIND_VPD_KW(pn, "PN"); +#undef FIND_VPD_KW + + memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len); + strim(p->id); + memcpy(p->ec, vpd + ec, EC_LEN); + strim(p->ec); + i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); + memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); + strim(p->sn); + i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE); + memcpy(p->pn, vpd + pn, min(i, PN_LEN)); + strim(p->pn); + + /* + * Ask firmware for the Core Clock since it knows how to translate the + * Reference Clock ('V2') VPD field into a Core Clock value ... + */ + cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK)); + ret = t4_query_params(adapter, adapter->mbox, 0, 0, + 1, &cclk_param, &cclk_val); + +out: + vfree(vpd); + if (ret) + return ret; + p->cclk = cclk_val; + + return 0; +} + +/* serial flash and firmware constants */ +enum { + SF_ATTEMPTS = 10, /* max retries for SF operations */ + + /* flash command opcodes */ + SF_PROG_PAGE = 2, /* program page */ + SF_WR_DISABLE = 4, /* disable writes */ + SF_RD_STATUS = 5, /* read status register */ + SF_WR_ENABLE = 6, /* enable writes */ + SF_RD_DATA_FAST = 0xb, /* read flash */ + SF_RD_ID = 0x9f, /* read ID */ + SF_ERASE_SECTOR = 0xd8, /* erase sector */ + + FW_MAX_SIZE = 16 * SF_SEC_SIZE, +}; + +/** + * sf1_read - read data from the serial flash + * @adapter: the adapter + * @byte_cnt: number of bytes to read + * @cont: whether another operation will be chained + * @lock: whether to lock SF for PL access only + * @valp: where to store the read data + * + * Reads up to 4 bytes of data from the serial flash. The location of + * the read needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, + int lock, u32 *valp) +{ + int ret; + + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F) + return -EBUSY; + t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) | + SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1)); + ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5); + if (!ret) + *valp = t4_read_reg(adapter, SF_DATA_A); + return ret; +} + +/** + * sf1_write - write data to the serial flash + * @adapter: the adapter + * @byte_cnt: number of bytes to write + * @cont: whether another operation will be chained + * @lock: whether to lock SF for PL access only + * @val: value to write + * + * Writes up to 4 bytes of data to the serial flash. The location of + * the write needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, + int lock, u32 val) +{ + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F) + return -EBUSY; + t4_write_reg(adapter, SF_DATA_A, val); + t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) | + SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1)); + return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5); +} + +/** + * flash_wait_op - wait for a flash operation to complete + * @adapter: the adapter + * @attempts: max number of polls of the status register + * @delay: delay between polls in ms + * + * Wait for a flash operation to complete by polling the status register. + */ +static int flash_wait_op(struct adapter *adapter, int attempts, int delay) +{ + int ret; + u32 status; + + while (1) { + if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || + (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) + return ret; + if (!(status & 1)) + return 0; + if (--attempts == 0) + return -EAGAIN; + if (delay) + msleep(delay); + } +} + +/** + * t4_read_flash - read words from serial flash + * @adapter: the adapter + * @addr: the start address for the read + * @nwords: how many 32-bit words to read + * @data: where to store the read data + * @byte_oriented: whether to store data as bytes or as words + * + * Read the specified number of 32-bit words from the serial flash. + * If @byte_oriented is set the read data is stored as a byte array + * (i.e., big-endian), otherwise as 32-bit words in the platform's + * natural endianness. + */ +int t4_read_flash(struct adapter *adapter, unsigned int addr, + unsigned int nwords, u32 *data, int byte_oriented) +{ + int ret; + + if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) + return -EINVAL; + + addr = swab32(addr) | SF_RD_DATA_FAST; + + if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || + (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) + return ret; + + for ( ; nwords; nwords--, data++) { + ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); + if (nwords == 1) + t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */ + if (ret) + return ret; + if (byte_oriented) + *data = (__force __u32) (htonl(*data)); + } + return 0; +} + +/** + * t4_write_flash - write up to a page of data to the serial flash + * @adapter: the adapter + * @addr: the start address to write + * @n: length of data to write in bytes + * @data: the data to write + * + * Writes up to a page of data (256 bytes) to the serial flash starting + * at the given address. All the data must be written to the same page. + */ +static int t4_write_flash(struct adapter *adapter, unsigned int addr, + unsigned int n, const u8 *data) +{ + int ret; + u32 buf[64]; + unsigned int i, c, left, val, offset = addr & 0xff; + + if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) + return -EINVAL; + + val = swab32(addr) | SF_PROG_PAGE; + + if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || + (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) + goto unlock; + + for (left = n; left; left -= c) { + c = min(left, 4U); + for (val = 0, i = 0; i < c; ++i) + val = (val << 8) + *data++; + + ret = sf1_write(adapter, c, c != left, 1, val); + if (ret) + goto unlock; + } + ret = flash_wait_op(adapter, 8, 1); + if (ret) + goto unlock; + + t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */ + + /* Read the page to verify the write succeeded */ + ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); + if (ret) + return ret; + + if (memcmp(data - n, (u8 *)buf + offset, n)) { + dev_err(adapter->pdev_dev, + "failed to correctly write the flash page at %#x\n", + addr); + return -EIO; + } + return 0; + +unlock: + t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */ + return ret; +} + +/** + * t4_get_fw_version - read the firmware version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the FW version from flash. + */ +int t4_get_fw_version(struct adapter *adapter, u32 *vers) +{ + return t4_read_flash(adapter, FLASH_FW_START + + offsetof(struct fw_hdr, fw_ver), 1, + vers, 0); +} + +/** + * t4_get_tp_version - read the TP microcode version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the TP microcode version from flash. + */ +int t4_get_tp_version(struct adapter *adapter, u32 *vers) +{ + return t4_read_flash(adapter, FLASH_FW_START + + offsetof(struct fw_hdr, tp_microcode_ver), + 1, vers, 0); +} + +/** + * t4_get_exprom_version - return the Expansion ROM version (if any) + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the Expansion ROM header from FLASH and returns the version + * number (if present) through the @vers return value pointer. We return + * this in the Firmware Version Format since it's convenient. Return + * 0 on success, -ENOENT if no Expansion ROM is present. + */ +int t4_get_exprom_version(struct adapter *adap, u32 *vers) +{ + struct exprom_header { + unsigned char hdr_arr[16]; /* must start with 0x55aa */ + unsigned char hdr_ver[4]; /* Expansion ROM version */ + } *hdr; + u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header), + sizeof(u32))]; + int ret; + + ret = t4_read_flash(adap, FLASH_EXP_ROM_START, + ARRAY_SIZE(exprom_header_buf), exprom_header_buf, + 0); + if (ret) + return ret; + + hdr = (struct exprom_header *)exprom_header_buf; + if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa) + return -ENOENT; + + *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) | + FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) | + FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) | + FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3])); + return 0; +} + +/* Is the given firmware API compatible with the one the driver was compiled + * with? + */ +static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) +{ + + /* short circuit if it's the exact same firmware version */ + if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) + return 1; + +#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) + if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && + SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe)) + return 1; +#undef SAME_INTF + + return 0; +} + +/* The firmware in the filesystem is usable, but should it be installed? + * This routine explains itself in detail if it indicates the filesystem + * firmware should be installed. + */ +static int should_install_fs_fw(struct adapter *adap, int card_fw_usable, + int k, int c) +{ + const char *reason; + + if (!card_fw_usable) { + reason = "incompatible or unusable"; + goto install; + } + + if (k > c) { + reason = "older than the version supported with this driver"; + goto install; + } + + return 0; + +install: + dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, " + "installing firmware %u.%u.%u.%u on card.\n", + FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), + FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason, + FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), + FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); + + return 1; +} + +int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, + const u8 *fw_data, unsigned int fw_size, + struct fw_hdr *card_fw, enum dev_state state, + int *reset) +{ + int ret, card_fw_usable, fs_fw_usable; + const struct fw_hdr *fs_fw; + const struct fw_hdr *drv_fw; + + drv_fw = &fw_info->fw_hdr; + + /* Read the header of the firmware on the card */ + ret = -t4_read_flash(adap, FLASH_FW_START, + sizeof(*card_fw) / sizeof(uint32_t), + (uint32_t *)card_fw, 1); + if (ret == 0) { + card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw); + } else { + dev_err(adap->pdev_dev, + "Unable to read card's firmware header: %d\n", ret); + card_fw_usable = 0; + } + + if (fw_data != NULL) { + fs_fw = (const void *)fw_data; + fs_fw_usable = fw_compatible(drv_fw, fs_fw); + } else { + fs_fw = NULL; + fs_fw_usable = 0; + } + + if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && + (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) { + /* Common case: the firmware on the card is an exact match and + * the filesystem one is an exact match too, or the filesystem + * one is absent/incompatible. + */ + } else if (fs_fw_usable && state == DEV_STATE_UNINIT && + should_install_fs_fw(adap, card_fw_usable, + be32_to_cpu(fs_fw->fw_ver), + be32_to_cpu(card_fw->fw_ver))) { + ret = -t4_fw_upgrade(adap, adap->mbox, fw_data, + fw_size, 0); + if (ret != 0) { + dev_err(adap->pdev_dev, + "failed to install firmware: %d\n", ret); + goto bye; + } + + /* Installed successfully, update the cached header too. */ + *card_fw = *fs_fw; + card_fw_usable = 1; + *reset = 0; /* already reset as part of load_fw */ + } + + if (!card_fw_usable) { + uint32_t d, c, k; + + d = be32_to_cpu(drv_fw->fw_ver); + c = be32_to_cpu(card_fw->fw_ver); + k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0; + + dev_err(adap->pdev_dev, "Cannot find a usable firmware: " + "chip state %d, " + "driver compiled with %d.%d.%d.%d, " + "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", + state, + FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d), + FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d), + FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c), + FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), + FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), + FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); + ret = EINVAL; + goto bye; + } + + /* We're using whatever's on the card and it's known to be good. */ + adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver); + adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver); + +bye: + return ret; +} + +/** + * t4_flash_erase_sectors - erase a range of flash sectors + * @adapter: the adapter + * @start: the first sector to erase + * @end: the last sector to erase + * + * Erases the sectors in the given inclusive range. + */ +static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) +{ + int ret = 0; + + if (end >= adapter->params.sf_nsec) + return -EINVAL; + + while (start <= end) { + if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || + (ret = sf1_write(adapter, 4, 0, 1, + SF_ERASE_SECTOR | (start << 8))) != 0 || + (ret = flash_wait_op(adapter, 14, 500)) != 0) { + dev_err(adapter->pdev_dev, + "erase of flash sector %d failed, error %d\n", + start, ret); + break; + } + start++; + } + t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */ + return ret; +} + +/** + * t4_flash_cfg_addr - return the address of the flash configuration file + * @adapter: the adapter + * + * Return the address within the flash where the Firmware Configuration + * File is stored. + */ +unsigned int t4_flash_cfg_addr(struct adapter *adapter) +{ + if (adapter->params.sf_size == 0x100000) + return FLASH_FPGA_CFG_START; + else + return FLASH_CFG_START; +} + +/* Return TRUE if the specified firmware matches the adapter. I.e. T4 + * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead + * and emit an error message for mismatched firmware to save our caller the + * effort ... + */ +static bool t4_fw_matches_chip(const struct adapter *adap, + const struct fw_hdr *hdr) +{ + /* The expression below will return FALSE for any unsupported adapter + * which will keep us "honest" in the future ... + */ + if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) || + (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5)) + return true; + + dev_err(adap->pdev_dev, + "FW image (%d) is not suitable for this adapter (%d)\n", + hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip)); + return false; +} + +/** + * t4_load_fw - download firmware + * @adap: the adapter + * @fw_data: the firmware image to write + * @size: image size + * + * Write the supplied firmware image to the card's serial flash. + */ +int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) +{ + u32 csum; + int ret, addr; + unsigned int i; + u8 first_page[SF_PAGE_SIZE]; + const __be32 *p = (const __be32 *)fw_data; + const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; + unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; + unsigned int fw_img_start = adap->params.sf_fw_start; + unsigned int fw_start_sec = fw_img_start / sf_sec_size; + + if (!size) { + dev_err(adap->pdev_dev, "FW image has no data\n"); + return -EINVAL; + } + if (size & 511) { + dev_err(adap->pdev_dev, + "FW image size not multiple of 512 bytes\n"); + return -EINVAL; + } + if (ntohs(hdr->len512) * 512 != size) { + dev_err(adap->pdev_dev, + "FW image size differs from size in FW header\n"); + return -EINVAL; + } + if (size > FW_MAX_SIZE) { + dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", + FW_MAX_SIZE); + return -EFBIG; + } + if (!t4_fw_matches_chip(adap, hdr)) + return -EINVAL; + + for (csum = 0, i = 0; i < size / sizeof(csum); i++) + csum += ntohl(p[i]); + + if (csum != 0xffffffff) { + dev_err(adap->pdev_dev, + "corrupted firmware image, checksum %#x\n", csum); + return -EINVAL; + } + + i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ + ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); + if (ret) + goto out; + + /* + * We write the correct version at the end so the driver can see a bad + * version if the FW write fails. Start by writing a copy of the + * first page with a bad version. + */ + memcpy(first_page, fw_data, SF_PAGE_SIZE); + ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); + ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page); + if (ret) + goto out; + + addr = fw_img_start; + for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { + addr += SF_PAGE_SIZE; + fw_data += SF_PAGE_SIZE; + ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data); + if (ret) + goto out; + } + + ret = t4_write_flash(adap, + fw_img_start + offsetof(struct fw_hdr, fw_ver), + sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); +out: + if (ret) + dev_err(adap->pdev_dev, "firmware download failed, error %d\n", + ret); + else + ret = t4_get_fw_version(adap, &adap->params.fw_vers); + return ret; +} + +/** + * t4_fwcache - firmware cache operation + * @adap: the adapter + * @op : the operation (flush or flush and invalidate) + */ +int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) +{ + struct fw_params_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = + cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_PARAMS_CMD_PFN_V(adap->fn) | + FW_PARAMS_CMD_VFN_V(0)); + c.retval_len16 = cpu_to_be32(FW_LEN16(c)); + c.param[0].mnem = + cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE)); + c.param[0].val = (__force __be32)op; + + return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL); +} + +void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) +{ + unsigned int i, j; + + for (i = 0; i < 8; i++) { + u32 *p = la_buf + i; + + t4_write_reg(adap, ULP_RX_LA_CTL_A, i); + j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A); + t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j); + for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8) + *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A); + } +} + +#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ + FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ + FW_PORT_CAP_ANEG) + +/** + * t4_link_start - apply link configuration to MAC/PHY + * @phy: the PHY to setup + * @mac: the MAC to setup + * @lc: the requested link configuration + * + * Set up a port's MAC and PHY according to a desired link configuration. + * - If the PHY can auto-negotiate first decide what to advertise, then + * enable/disable auto-negotiation as desired, and reset. + * - If the PHY does not auto-negotiate just reset it. + * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, + * otherwise do it later based on the outcome of auto-negotiation. + */ +int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, + struct link_config *lc) +{ + struct fw_port_cmd c; + unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO); + + lc->link_ok = 0; + if (lc->requested_fc & PAUSE_RX) + fc |= FW_PORT_CAP_FC_RX; + if (lc->requested_fc & PAUSE_TX) + fc |= FW_PORT_CAP_FC_TX; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port)); + c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | + FW_LEN16(c)); + + if (!(lc->supported & FW_PORT_CAP_ANEG)) { + c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + } else if (lc->autoneg == AUTONEG_DISABLE) { + c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + } else + c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_restart_aneg - restart autonegotiation + * @adap: the adapter + * @mbox: mbox to use for the FW command + * @port: the port id + * + * Restarts autonegotiation for the selected port. + */ +int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) +{ + struct fw_port_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port)); + c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | + FW_LEN16(c)); + c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +typedef void (*int_handler_t)(struct adapter *adap); + +struct intr_info { + unsigned int mask; /* bits to check in interrupt status */ + const char *msg; /* message to print or NULL */ + short stat_idx; /* stat counter to increment or -1 */ + unsigned short fatal; /* whether the condition reported is fatal */ + int_handler_t int_handler; /* platform-specific int handler */ +}; + +/** + * t4_handle_intr_status - table driven interrupt handler + * @adapter: the adapter that generated the interrupt + * @reg: the interrupt status register to process + * @acts: table of interrupt actions + * + * A table driven interrupt handler that applies a set of masks to an + * interrupt status word and performs the corresponding actions if the + * interrupts described by the mask have occurred. The actions include + * optionally emitting a warning or alert message. The table is terminated + * by an entry specifying mask 0. Returns the number of fatal interrupt + * conditions. + */ +static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, + const struct intr_info *acts) +{ + int fatal = 0; + unsigned int mask = 0; + unsigned int status = t4_read_reg(adapter, reg); + + for ( ; acts->mask; ++acts) { + if (!(status & acts->mask)) + continue; + if (acts->fatal) { + fatal++; + dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, + status & acts->mask); + } else if (acts->msg && printk_ratelimit()) + dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, + status & acts->mask); + if (acts->int_handler) + acts->int_handler(adapter); + mask |= acts->mask; + } + status &= mask; + if (status) /* clear processed interrupts */ + t4_write_reg(adapter, reg, status); + return fatal; +} + +/* + * Interrupt handler for the PCIE module. + */ +static void pcie_intr_handler(struct adapter *adapter) +{ + static const struct intr_info sysbus_intr_info[] = { + { RNPP_F, "RXNP array parity error", -1, 1 }, + { RPCP_F, "RXPC array parity error", -1, 1 }, + { RCIP_F, "RXCIF array parity error", -1, 1 }, + { RCCP_F, "Rx completions control array parity error", -1, 1 }, + { RFTP_F, "RXFT array parity error", -1, 1 }, + { 0 } + }; + static const struct intr_info pcie_port_intr_info[] = { + { TPCP_F, "TXPC array parity error", -1, 1 }, + { TNPP_F, "TXNP array parity error", -1, 1 }, + { TFTP_F, "TXFT array parity error", -1, 1 }, + { TCAP_F, "TXCA array parity error", -1, 1 }, + { TCIP_F, "TXCIF array parity error", -1, 1 }, + { RCAP_F, "RXCA array parity error", -1, 1 }, + { OTDD_F, "outbound request TLP discarded", -1, 1 }, + { RDPE_F, "Rx data parity error", -1, 1 }, + { TDUE_F, "Tx uncorrectable data error", -1, 1 }, + { 0 } + }; + static const struct intr_info pcie_intr_info[] = { + { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 }, + { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 }, + { MSIDATAPERR_F, "MSI data parity error", -1, 1 }, + { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 }, + { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 }, + { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 }, + { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 }, + { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 }, + { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 }, + { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 }, + { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 }, + { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 }, + { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 }, + { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 }, + { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 }, + { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 }, + { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 }, + { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 }, + { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 }, + { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 }, + { FIDPERR_F, "PCI FID parity error", -1, 1 }, + { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 }, + { MATAGPERR_F, "PCI MA tag parity error", -1, 1 }, + { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 }, + { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 }, + { RXWRPERR_F, "PCI Rx write parity error", -1, 1 }, + { RPLPERR_F, "PCI replay buffer parity error", -1, 1 }, + { PCIESINT_F, "PCI core secondary fault", -1, 1 }, + { PCIEPINT_F, "PCI core primary fault", -1, 1 }, + { UNXSPLCPLERR_F, "PCI unexpected split completion error", + -1, 0 }, + { 0 } + }; + + static struct intr_info t5_pcie_intr_info[] = { + { MSTGRPPERR_F, "Master Response Read Queue parity error", + -1, 1 }, + { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 }, + { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 }, + { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 }, + { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 }, + { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 }, + { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 }, + { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error", + -1, 1 }, + { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error", + -1, 1 }, + { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 }, + { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 }, + { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 }, + { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 }, + { DREQWRPERR_F, "PCI DMA channel write request parity error", + -1, 1 }, + { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 }, + { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 }, + { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 }, + { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 }, + { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 }, + { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 }, + { FIDPERR_F, "PCI FID parity error", -1, 1 }, + { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 }, + { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 }, + { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 }, + { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error", + -1, 1 }, + { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error", + -1, 1 }, + { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 }, + { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 }, + { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 }, + { READRSPERR_F, "Outbound read error", -1, 0 }, + { 0 } + }; + + int fat; + + if (is_t4(adapter->params.chip)) + fat = t4_handle_intr_status(adapter, + PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A, + sysbus_intr_info) + + t4_handle_intr_status(adapter, + PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A, + pcie_port_intr_info) + + t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A, + pcie_intr_info); + else + fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A, + t5_pcie_intr_info); + + if (fat) + t4_fatal_err(adapter); +} + +/* + * TP interrupt handler. + */ +static void tp_intr_handler(struct adapter *adapter) +{ + static const struct intr_info tp_intr_info[] = { + { 0x3fffffff, "TP parity error", -1, 1 }, + { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info)) + t4_fatal_err(adapter); +} + +/* + * SGE interrupt handler. + */ +static void sge_intr_handler(struct adapter *adapter) +{ + u64 v; + + static const struct intr_info sge_intr_info[] = { + { ERR_CPL_EXCEED_IQE_SIZE_F, + "SGE received CPL exceeding IQE size", -1, 1 }, + { ERR_INVALID_CIDX_INC_F, + "SGE GTS CIDX increment too large", -1, 0 }, + { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 }, + { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full }, + { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full }, + { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped }, + { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F, + "SGE IQID > 1023 received CPL for FL", -1, 0 }, + { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1, + 0 }, + { ERR_ING_CTXT_PRIO_F, + "SGE too many priority ingress contexts", -1, 0 }, + { ERR_EGR_CTXT_PRIO_F, + "SGE too many priority egress contexts", -1, 0 }, + { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 }, + { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 }, + { 0 } + }; + + v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) | + ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32); + if (v) { + dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", + (unsigned long long)v); + t4_write_reg(adapter, SGE_INT_CAUSE1_A, v); + t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32); + } + + if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) || + v != 0) + t4_fatal_err(adapter); +} + +#define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\ + OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F) +#define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\ + IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F) + +/* + * CIM interrupt handler. + */ +static void cim_intr_handler(struct adapter *adapter) +{ + static const struct intr_info cim_intr_info[] = { + { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 }, + { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 }, + { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 }, + { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 }, + { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 }, + { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 }, + { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 }, + { 0 } + }; + static const struct intr_info cim_upintr_info[] = { + { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 }, + { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 }, + { ILLWRINT_F, "CIM illegal write", -1, 1 }, + { ILLRDINT_F, "CIM illegal read", -1, 1 }, + { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 }, + { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 }, + { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 }, + { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 }, + { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 }, + { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 }, + { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 }, + { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 }, + { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 }, + { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 }, + { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 }, + { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 }, + { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 }, + { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 }, + { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 }, + { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 }, + { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 }, + { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 }, + { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 }, + { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 }, + { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 }, + { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 }, + { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 }, + { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 }, + { 0 } + }; + + int fat; + + if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F) + t4_report_fw_error(adapter); + + fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A, + cim_intr_info) + + t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A, + cim_upintr_info); + if (fat) + t4_fatal_err(adapter); +} + +/* + * ULP RX interrupt handler. + */ +static void ulprx_intr_handler(struct adapter *adapter) +{ + static const struct intr_info ulprx_intr_info[] = { + { 0x1800000, "ULPRX context error", -1, 1 }, + { 0x7fffff, "ULPRX parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info)) + t4_fatal_err(adapter); +} + +/* + * ULP TX interrupt handler. + */ +static void ulptx_intr_handler(struct adapter *adapter) +{ + static const struct intr_info ulptx_intr_info[] = { + { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1, + 0 }, + { 0xfffffff, "ULPTX parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info)) + t4_fatal_err(adapter); +} + +/* + * PM TX interrupt handler. + */ +static void pmtx_intr_handler(struct adapter *adapter) +{ + static const struct intr_info pmtx_intr_info[] = { + { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 }, + { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 }, + { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 }, + { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 }, + { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 }, + { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 }, + { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error", + -1, 1 }, + { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 }, + { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1}, + { 0 } + }; + + if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info)) + t4_fatal_err(adapter); +} + +/* + * PM RX interrupt handler. + */ +static void pmrx_intr_handler(struct adapter *adapter) +{ + static const struct intr_info pmrx_intr_info[] = { + { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 }, + { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 }, + { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 }, + { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error", + -1, 1 }, + { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 }, + { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1}, + { 0 } + }; + + if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info)) + t4_fatal_err(adapter); +} + +/* + * CPL switch interrupt handler. + */ +static void cplsw_intr_handler(struct adapter *adapter) +{ + static const struct intr_info cplsw_intr_info[] = { + { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 }, + { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 }, + { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 }, + { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 }, + { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 }, + { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info)) + t4_fatal_err(adapter); +} + +/* + * LE interrupt handler. + */ +static void le_intr_handler(struct adapter *adap) +{ + static const struct intr_info le_intr_info[] = { + { LIPMISS_F, "LE LIP miss", -1, 0 }, + { LIP0_F, "LE 0 LIP error", -1, 0 }, + { PARITYERR_F, "LE parity error", -1, 1 }, + { UNKNOWNCMD_F, "LE unknown command", -1, 1 }, + { REQQPARERR_F, "LE request queue parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, le_intr_info)) + t4_fatal_err(adap); +} + +/* + * MPS interrupt handler. + */ +static void mps_intr_handler(struct adapter *adapter) +{ + static const struct intr_info mps_rx_intr_info[] = { + { 0xffffff, "MPS Rx parity error", -1, 1 }, + { 0 } + }; + static const struct intr_info mps_tx_intr_info[] = { + { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 }, + { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 }, + { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error", + -1, 1 }, + { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error", + -1, 1 }, + { BUBBLE_F, "MPS Tx underflow", -1, 1 }, + { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 }, + { FRMERR_F, "MPS Tx framing error", -1, 1 }, + { 0 } + }; + static const struct intr_info mps_trc_intr_info[] = { + { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 }, + { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error", + -1, 1 }, + { MISCPERR_F, "MPS TRC misc parity error", -1, 1 }, + { 0 } + }; + static const struct intr_info mps_stat_sram_intr_info[] = { + { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, + { 0 } + }; + static const struct intr_info mps_stat_tx_intr_info[] = { + { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, + { 0 } + }; + static const struct intr_info mps_stat_rx_intr_info[] = { + { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, + { 0 } + }; + static const struct intr_info mps_cls_intr_info[] = { + { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 }, + { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 }, + { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 }, + { 0 } + }; + + int fat; + + fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A, + mps_rx_intr_info) + + t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A, + mps_tx_intr_info) + + t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A, + mps_trc_intr_info) + + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A, + mps_stat_sram_intr_info) + + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A, + mps_stat_tx_intr_info) + + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A, + mps_stat_rx_intr_info) + + t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A, + mps_cls_intr_info); + + t4_write_reg(adapter, MPS_INT_CAUSE_A, 0); + t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */ + if (fat) + t4_fatal_err(adapter); +} + +#define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \ + ECC_UE_INT_CAUSE_F) + +/* + * EDC/MC interrupt handler. + */ +static void mem_intr_handler(struct adapter *adapter, int idx) +{ + static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" }; + + unsigned int addr, cnt_addr, v; + + if (idx <= MEM_EDC1) { + addr = EDC_REG(EDC_INT_CAUSE_A, idx); + cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx); + } else if (idx == MEM_MC) { + if (is_t4(adapter->params.chip)) { + addr = MC_INT_CAUSE_A; + cnt_addr = MC_ECC_STATUS_A; + } else { + addr = MC_P_INT_CAUSE_A; + cnt_addr = MC_P_ECC_STATUS_A; + } + } else { + addr = MC_REG(MC_P_INT_CAUSE_A, 1); + cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1); + } + + v = t4_read_reg(adapter, addr) & MEM_INT_MASK; + if (v & PERR_INT_CAUSE_F) + dev_alert(adapter->pdev_dev, "%s FIFO parity error\n", + name[idx]); + if (v & ECC_CE_INT_CAUSE_F) { + u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr)); + + t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M)); + if (printk_ratelimit()) + dev_warn(adapter->pdev_dev, + "%u %s correctable ECC data error%s\n", + cnt, name[idx], cnt > 1 ? "s" : ""); + } + if (v & ECC_UE_INT_CAUSE_F) + dev_alert(adapter->pdev_dev, + "%s uncorrectable ECC data error\n", name[idx]); + + t4_write_reg(adapter, addr, v); + if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F)) + t4_fatal_err(adapter); +} + +/* + * MA interrupt handler. + */ +static void ma_intr_handler(struct adapter *adap) +{ + u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A); + + if (status & MEM_PERR_INT_CAUSE_F) { + dev_alert(adap->pdev_dev, + "MA parity error, parity status %#x\n", + t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A)); + if (is_t5(adap->params.chip)) + dev_alert(adap->pdev_dev, + "MA parity error, parity status %#x\n", + t4_read_reg(adap, + MA_PARITY_ERROR_STATUS2_A)); + } + if (status & MEM_WRAP_INT_CAUSE_F) { + v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A); + dev_alert(adap->pdev_dev, "MA address wrap-around error by " + "client %u to address %#x\n", + MEM_WRAP_CLIENT_NUM_G(v), + MEM_WRAP_ADDRESS_G(v) << 4); + } + t4_write_reg(adap, MA_INT_CAUSE_A, status); + t4_fatal_err(adap); +} + +/* + * SMB interrupt handler. + */ +static void smb_intr_handler(struct adapter *adap) +{ + static const struct intr_info smb_intr_info[] = { + { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 }, + { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 }, + { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info)) + t4_fatal_err(adap); +} + +/* + * NC-SI interrupt handler. + */ +static void ncsi_intr_handler(struct adapter *adap) +{ + static const struct intr_info ncsi_intr_info[] = { + { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 }, + { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 }, + { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 }, + { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info)) + t4_fatal_err(adap); +} + +/* + * XGMAC interrupt handler. + */ +static void xgmac_intr_handler(struct adapter *adap, int port) +{ + u32 v, int_cause_reg; + + if (is_t4(adap->params.chip)) + int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A); + else + int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A); + + v = t4_read_reg(adap, int_cause_reg); + + v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F; + if (!v) + return; + + if (v & TXFIFO_PRTY_ERR_F) + dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n", + port); + if (v & RXFIFO_PRTY_ERR_F) + dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n", + port); + t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v); + t4_fatal_err(adap); +} + +/* + * PL interrupt handler. + */ +static void pl_intr_handler(struct adapter *adap) +{ + static const struct intr_info pl_intr_info[] = { + { FATALPERR_F, "T4 fatal parity error", -1, 1 }, + { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info)) + t4_fatal_err(adap); +} + +#define PF_INTR_MASK (PFSW_F) +#define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \ + EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \ + CPL_SWITCH_F | SGE_F | ULP_TX_F) + +/** + * t4_slow_intr_handler - control path interrupt handler + * @adapter: the adapter + * + * T4 interrupt handler for non-data global interrupt events, e.g., errors. + * The designation 'slow' is because it involves register reads, while + * data interrupts typically don't involve any MMIOs. + */ +int t4_slow_intr_handler(struct adapter *adapter) +{ + u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A); + + if (!(cause & GLBL_INTR_MASK)) + return 0; + if (cause & CIM_F) + cim_intr_handler(adapter); + if (cause & MPS_F) + mps_intr_handler(adapter); + if (cause & NCSI_F) + ncsi_intr_handler(adapter); + if (cause & PL_F) + pl_intr_handler(adapter); + if (cause & SMB_F) + smb_intr_handler(adapter); + if (cause & XGMAC0_F) + xgmac_intr_handler(adapter, 0); + if (cause & XGMAC1_F) + xgmac_intr_handler(adapter, 1); + if (cause & XGMAC_KR0_F) + xgmac_intr_handler(adapter, 2); + if (cause & XGMAC_KR1_F) + xgmac_intr_handler(adapter, 3); + if (cause & PCIE_F) + pcie_intr_handler(adapter); + if (cause & MC_F) + mem_intr_handler(adapter, MEM_MC); + if (!is_t4(adapter->params.chip) && (cause & MC1_S)) + mem_intr_handler(adapter, MEM_MC1); + if (cause & EDC0_F) + mem_intr_handler(adapter, MEM_EDC0); + if (cause & EDC1_F) + mem_intr_handler(adapter, MEM_EDC1); + if (cause & LE_F) + le_intr_handler(adapter); + if (cause & TP_F) + tp_intr_handler(adapter); + if (cause & MA_F) + ma_intr_handler(adapter); + if (cause & PM_TX_F) + pmtx_intr_handler(adapter); + if (cause & PM_RX_F) + pmrx_intr_handler(adapter); + if (cause & ULP_RX_F) + ulprx_intr_handler(adapter); + if (cause & CPL_SWITCH_F) + cplsw_intr_handler(adapter); + if (cause & SGE_F) + sge_intr_handler(adapter); + if (cause & ULP_TX_F) + ulptx_intr_handler(adapter); + + /* Clear the interrupts just processed for which we are the master. */ + t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK); + (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */ + return 1; +} + +/** + * t4_intr_enable - enable interrupts + * @adapter: the adapter whose interrupts should be enabled + * + * Enable PF-specific interrupts for the calling function and the top-level + * interrupt concentrator for global interrupts. Interrupts are already + * enabled at each module, here we just enable the roots of the interrupt + * hierarchies. + * + * Note: this function should be called only when the driver manages + * non PF-specific interrupts from the various HW modules. Only one PCI + * function at a time should be doing this. + */ +void t4_intr_enable(struct adapter *adapter) +{ + u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A)); + + t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F | + ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F | + ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F | + ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F | + ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F | + ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F | + ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F | + DBFIFO_HP_INT_F | DBFIFO_LP_INT_F | + EGRESS_SIZE_ERR_F); + t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK); + t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf); +} + +/** + * t4_intr_disable - disable interrupts + * @adapter: the adapter whose interrupts should be disabled + * + * Disable interrupts. We only disable the top-level interrupt + * concentrators. The caller must be a PCI function managing global + * interrupts. + */ +void t4_intr_disable(struct adapter *adapter) +{ + u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A)); + + t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0); + t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0); +} + +/** + * hash_mac_addr - return the hash value of a MAC address + * @addr: the 48-bit Ethernet MAC address + * + * Hashes a MAC address according to the hash function used by HW inexact + * (hash) address matching. + */ +static int hash_mac_addr(const u8 *addr) +{ + u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; + u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; + a ^= b; + a ^= (a >> 12); + a ^= (a >> 6); + return a & 0x3f; +} + +/** + * t4_config_rss_range - configure a portion of the RSS mapping table + * @adapter: the adapter + * @mbox: mbox to use for the FW command + * @viid: virtual interface whose RSS subtable is to be written + * @start: start entry in the table to write + * @n: how many table entries to write + * @rspq: values for the response queue lookup table + * @nrspq: number of values in @rspq + * + * Programs the selected part of the VI's RSS mapping table with the + * provided values. If @nrspq < @n the supplied values are used repeatedly + * until the full table range is populated. + * + * The caller must ensure the values in @rspq are in the range allowed for + * @viid. + */ +int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, + int start, int n, const u16 *rspq, unsigned int nrspq) +{ + int ret; + const u16 *rsp = rspq; + const u16 *rsp_end = rspq + nrspq; + struct fw_rss_ind_tbl_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_RSS_IND_TBL_CMD_VIID_V(viid)); + cmd.retval_len16 = htonl(FW_LEN16(cmd)); + + /* each fw_rss_ind_tbl_cmd takes up to 32 entries */ + while (n > 0) { + int nq = min(n, 32); + __be32 *qp = &cmd.iq0_to_iq2; + + cmd.niqid = htons(nq); + cmd.startidx = htons(start); + + start += nq; + n -= nq; + + while (nq > 0) { + unsigned int v; + + v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp); + if (++rsp >= rsp_end) + rsp = rspq; + v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp); + if (++rsp >= rsp_end) + rsp = rspq; + v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp); + if (++rsp >= rsp_end) + rsp = rspq; + + *qp++ = htonl(v); + nq -= 3; + } + + ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); + if (ret) + return ret; + } + return 0; +} + +/** + * t4_config_glbl_rss - configure the global RSS mode + * @adapter: the adapter + * @mbox: mbox to use for the FW command + * @mode: global RSS mode + * @flags: mode-specific flags + * + * Sets the global RSS mode. + */ +int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, + unsigned int flags) +{ + struct fw_rss_glb_config_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_write = htonl(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F); + c.retval_len16 = htonl(FW_LEN16(c)); + if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { + c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode)); + } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { + c.u.basicvirtual.mode_pkd = + htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode)); + c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); + } else + return -EINVAL; + return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); +} + +/* Read an RSS table row */ +static int rd_rss_row(struct adapter *adap, int row, u32 *val) +{ + t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row); + return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1, + 5, 0, val); +} + +/** + * t4_read_rss - read the contents of the RSS mapping table + * @adapter: the adapter + * @map: holds the contents of the RSS mapping table + * + * Reads the contents of the RSS hash->queue mapping table. + */ +int t4_read_rss(struct adapter *adapter, u16 *map) +{ + u32 val; + int i, ret; + + for (i = 0; i < RSS_NENTRIES / 2; ++i) { + ret = rd_rss_row(adapter, i, &val); + if (ret) + return ret; + *map++ = LKPTBLQUEUE0_G(val); + *map++ = LKPTBLQUEUE1_G(val); + } + return 0; +} + +/** + * t4_read_rss_key - read the global RSS key + * @adap: the adapter + * @key: 10-entry array holding the 320-bit RSS key + * + * Reads the global 320-bit RSS key. + */ +void t4_read_rss_key(struct adapter *adap, u32 *key) +{ + t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, + TP_RSS_SECRET_KEY0_A); +} + +/** + * t4_write_rss_key - program one of the RSS keys + * @adap: the adapter + * @key: 10-entry array holding the 320-bit RSS key + * @idx: which RSS key to write + * + * Writes one of the RSS keys with the given 320-bit value. If @idx is + * 0..15 the corresponding entry in the RSS key table is written, + * otherwise the global RSS key is written. + */ +void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx) +{ + t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, + TP_RSS_SECRET_KEY0_A); + if (idx >= 0 && idx < 16) + t4_write_reg(adap, TP_RSS_CONFIG_VRT_A, + KEYWRADDR_V(idx) | KEYWREN_F); +} + +/** + * t4_read_rss_pf_config - read PF RSS Configuration Table + * @adapter: the adapter + * @index: the entry in the PF RSS table to read + * @valp: where to store the returned value + * + * Reads the PF RSS Configuration Table at the specified index and returns + * the value found there. + */ +void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, + u32 *valp) +{ + t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, + valp, 1, TP_RSS_PF0_CONFIG_A + index); +} + +/** + * t4_read_rss_vf_config - read VF RSS Configuration Table + * @adapter: the adapter + * @index: the entry in the VF RSS table to read + * @vfl: where to store the returned VFL + * @vfh: where to store the returned VFH + * + * Reads the VF RSS Configuration Table at the specified index and returns + * the (VFL, VFH) values found there. + */ +void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, + u32 *vfl, u32 *vfh) +{ + u32 vrt, mask, data; + + mask = VFWRADDR_V(VFWRADDR_M); + data = VFWRADDR_V(index); + + /* Request that the index'th VF Table values be read into VFL/VFH. + */ + vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A); + vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask); + vrt |= data | VFRDEN_F; + t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt); + + /* Grab the VFL/VFH values ... + */ + t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, + vfl, 1, TP_RSS_VFL_CONFIG_A); + t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, + vfh, 1, TP_RSS_VFH_CONFIG_A); +} + +/** + * t4_read_rss_pf_map - read PF RSS Map + * @adapter: the adapter + * + * Reads the PF RSS Map register and returns its value. + */ +u32 t4_read_rss_pf_map(struct adapter *adapter) +{ + u32 pfmap; + + t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, + &pfmap, 1, TP_RSS_PF_MAP_A); + return pfmap; +} + +/** + * t4_read_rss_pf_mask - read PF RSS Mask + * @adapter: the adapter + * + * Reads the PF RSS Mask register and returns its value. + */ +u32 t4_read_rss_pf_mask(struct adapter *adapter) +{ + u32 pfmask; + + t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, + &pfmask, 1, TP_RSS_PF_MSK_A); + return pfmask; +} + +/** + * t4_tp_get_tcp_stats - read TP's TCP MIB counters + * @adap: the adapter + * @v4: holds the TCP/IP counter values + * @v6: holds the TCP/IPv6 counter values + * + * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. + * Either @v4 or @v6 may be %NULL to skip the corresponding stats. + */ +void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, + struct tp_tcp_stats *v6) +{ + u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1]; + +#define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A) +#define STAT(x) val[STAT_IDX(x)] +#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) + + if (v4) { + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, + ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A); + v4->tcpOutRsts = STAT(OUT_RST); + v4->tcpInSegs = STAT64(IN_SEG); + v4->tcpOutSegs = STAT64(OUT_SEG); + v4->tcpRetransSegs = STAT64(RXT_SEG); + } + if (v6) { + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, + ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A); + v6->tcpOutRsts = STAT(OUT_RST); + v6->tcpInSegs = STAT64(IN_SEG); + v6->tcpOutSegs = STAT64(OUT_SEG); + v6->tcpRetransSegs = STAT64(RXT_SEG); + } +#undef STAT64 +#undef STAT +#undef STAT_IDX +} + +/** + * t4_read_mtu_tbl - returns the values in the HW path MTU table + * @adap: the adapter + * @mtus: where to store the MTU values + * @mtu_log: where to store the MTU base-2 log (may be %NULL) + * + * Reads the HW path MTU table. + */ +void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) +{ + u32 v; + int i; + + for (i = 0; i < NMTUS; ++i) { + t4_write_reg(adap, TP_MTU_TABLE_A, + MTUINDEX_V(0xff) | MTUVALUE_V(i)); + v = t4_read_reg(adap, TP_MTU_TABLE_A); + mtus[i] = MTUVALUE_G(v); + if (mtu_log) + mtu_log[i] = MTUWIDTH_G(v); + } +} + +/** + * t4_read_cong_tbl - reads the congestion control table + * @adap: the adapter + * @incr: where to store the alpha values + * + * Reads the additive increments programmed into the HW congestion + * control table. + */ +void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]) +{ + unsigned int mtu, w; + + for (mtu = 0; mtu < NMTUS; ++mtu) + for (w = 0; w < NCCTRL_WIN; ++w) { + t4_write_reg(adap, TP_CCTRL_TABLE_A, + ROWINDEX_V(0xffff) | (mtu << 5) | w); + incr[mtu][w] = (u16)t4_read_reg(adap, + TP_CCTRL_TABLE_A) & 0x1fff; + } +} + +/** + * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register + * @adap: the adapter + * @addr: the indirect TP register address + * @mask: specifies the field within the register to modify + * @val: new value for the field + * + * Sets a field of an indirect TP register to the given value. + */ +void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, + unsigned int mask, unsigned int val) +{ + t4_write_reg(adap, TP_PIO_ADDR_A, addr); + val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask; + t4_write_reg(adap, TP_PIO_DATA_A, val); +} + +/** + * init_cong_ctrl - initialize congestion control parameters + * @a: the alpha values for congestion control + * @b: the beta values for congestion control + * + * Initialize the congestion control parameters. + */ +static void init_cong_ctrl(unsigned short *a, unsigned short *b) +{ + a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; + a[9] = 2; + a[10] = 3; + a[11] = 4; + a[12] = 5; + a[13] = 6; + a[14] = 7; + a[15] = 8; + a[16] = 9; + a[17] = 10; + a[18] = 14; + a[19] = 17; + a[20] = 21; + a[21] = 25; + a[22] = 30; + a[23] = 35; + a[24] = 45; + a[25] = 60; + a[26] = 80; + a[27] = 100; + a[28] = 200; + a[29] = 300; + a[30] = 400; + a[31] = 500; + + b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; + b[9] = b[10] = 1; + b[11] = b[12] = 2; + b[13] = b[14] = b[15] = b[16] = 3; + b[17] = b[18] = b[19] = b[20] = b[21] = 4; + b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; + b[28] = b[29] = 6; + b[30] = b[31] = 7; +} + +/* The minimum additive increment value for the congestion control table */ +#define CC_MIN_INCR 2U + +/** + * t4_load_mtus - write the MTU and congestion control HW tables + * @adap: the adapter + * @mtus: the values for the MTU table + * @alpha: the values for the congestion control alpha parameter + * @beta: the values for the congestion control beta parameter + * + * Write the HW MTU table with the supplied MTUs and the high-speed + * congestion control table with the supplied alpha, beta, and MTUs. + * We write the two tables together because the additive increments + * depend on the MTUs. + */ +void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, + const unsigned short *alpha, const unsigned short *beta) +{ + static const unsigned int avg_pkts[NCCTRL_WIN] = { + 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, + 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, + 28672, 40960, 57344, 81920, 114688, 163840, 229376 + }; + + unsigned int i, w; + + for (i = 0; i < NMTUS; ++i) { + unsigned int mtu = mtus[i]; + unsigned int log2 = fls(mtu); + + if (!(mtu & ((1 << log2) >> 2))) /* round */ + log2--; + t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) | + MTUWIDTH_V(log2) | MTUVALUE_V(mtu)); + + for (w = 0; w < NCCTRL_WIN; ++w) { + unsigned int inc; + + inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], + CC_MIN_INCR); + + t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) | + (w << 16) | (beta[w] << 13) | inc); + } + } +} + +/** + * t4_pmtx_get_stats - returns the HW stats from PMTX + * @adap: the adapter + * @cnt: where to store the count statistics + * @cycles: where to store the cycle statistics + * + * Returns performance statistics from PMTX. + */ +void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) +{ + int i; + u32 data[2]; + + for (i = 0; i < PM_NSTATS; i++) { + t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1); + cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A); + if (is_t4(adap->params.chip)) { + cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A); + } else { + t4_read_indirect(adap, PM_TX_DBG_CTRL_A, + PM_TX_DBG_DATA_A, data, 2, + PM_TX_DBG_STAT_MSB_A); + cycles[i] = (((u64)data[0] << 32) | data[1]); + } + } +} + +/** + * t4_pmrx_get_stats - returns the HW stats from PMRX + * @adap: the adapter + * @cnt: where to store the count statistics + * @cycles: where to store the cycle statistics + * + * Returns performance statistics from PMRX. + */ +void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) +{ + int i; + u32 data[2]; + + for (i = 0; i < PM_NSTATS; i++) { + t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1); + cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A); + if (is_t4(adap->params.chip)) { + cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A); + } else { + t4_read_indirect(adap, PM_RX_DBG_CTRL_A, + PM_RX_DBG_DATA_A, data, 2, + PM_RX_DBG_STAT_MSB_A); + cycles[i] = (((u64)data[0] << 32) | data[1]); + } + } +} + +/** + * get_mps_bg_map - return the buffer groups associated with a port + * @adap: the adapter + * @idx: the port index + * + * Returns a bitmap indicating which MPS buffer groups are associated + * with the given port. Bit i is set if buffer group i is used by the + * port. + */ +static unsigned int get_mps_bg_map(struct adapter *adap, int idx) +{ + u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A)); + + if (n == 0) + return idx == 0 ? 0xf : 0; + if (n == 1) + return idx < 2 ? (3 << (2 * idx)) : 0; + return 1 << idx; +} + +/** + * t4_get_port_type_description - return Port Type string description + * @port_type: firmware Port Type enumeration + */ +const char *t4_get_port_type_description(enum fw_port_type port_type) +{ + static const char *const port_type_description[] = { + "R XFI", + "R XAUI", + "T SGMII", + "T XFI", + "T XAUI", + "KX4", + "CX4", + "KX", + "KR", + "R SFP+", + "KR/KX", + "KR/KX/KX4", + "R QSFP_10G", + "R QSA", + "R QSFP", + "R BP40_BA", + }; + + if (port_type < ARRAY_SIZE(port_type_description)) + return port_type_description[port_type]; + return "UNKNOWN"; +} + +/** + * t4_get_port_stats - collect port statistics + * @adap: the adapter + * @idx: the port index + * @p: the stats structure to fill + * + * Collect statistics related to the given port from HW. + */ +void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) +{ + u32 bgmap = get_mps_bg_map(adap, idx); + +#define GET_STAT(name) \ + t4_read_reg64(adap, \ + (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \ + T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L))) +#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) + + p->tx_octets = GET_STAT(TX_PORT_BYTES); + p->tx_frames = GET_STAT(TX_PORT_FRAMES); + p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); + p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); + p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); + p->tx_error_frames = GET_STAT(TX_PORT_ERROR); + p->tx_frames_64 = GET_STAT(TX_PORT_64B); + p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); + p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); + p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); + p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); + p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); + p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); + p->tx_drop = GET_STAT(TX_PORT_DROP); + p->tx_pause = GET_STAT(TX_PORT_PAUSE); + p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); + p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); + p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); + p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); + p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); + p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); + p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); + p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); + + p->rx_octets = GET_STAT(RX_PORT_BYTES); + p->rx_frames = GET_STAT(RX_PORT_FRAMES); + p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); + p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); + p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); + p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); + p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); + p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); + p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); + p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); + p->rx_runt = GET_STAT(RX_PORT_LESS_64B); + p->rx_frames_64 = GET_STAT(RX_PORT_64B); + p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); + p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); + p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); + p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); + p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); + p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); + p->rx_pause = GET_STAT(RX_PORT_PAUSE); + p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); + p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); + p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); + p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); + p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); + p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); + p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); + p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); + + p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; + p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; + p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; + p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; + p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; + p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; + p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; + p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; + +#undef GET_STAT +#undef GET_STAT_COM +} + +/** + * t4_wol_magic_enable - enable/disable magic packet WoL + * @adap: the adapter + * @port: the physical port index + * @addr: MAC address expected in magic packets, %NULL to disable + * + * Enables/disables magic packet wake-on-LAN for the selected port. + */ +void t4_wol_magic_enable(struct adapter *adap, unsigned int port, + const u8 *addr) +{ + u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; + + if (is_t4(adap->params.chip)) { + mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO); + mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI); + port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A); + } else { + mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO); + mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI); + port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A); + } + + if (addr) { + t4_write_reg(adap, mag_id_reg_l, + (addr[2] << 24) | (addr[3] << 16) | + (addr[4] << 8) | addr[5]); + t4_write_reg(adap, mag_id_reg_h, + (addr[0] << 8) | addr[1]); + } + t4_set_reg_field(adap, port_cfg_reg, MAGICEN_F, + addr ? MAGICEN_F : 0); +} + +/** + * t4_wol_pat_enable - enable/disable pattern-based WoL + * @adap: the adapter + * @port: the physical port index + * @map: bitmap of which HW pattern filters to set + * @mask0: byte mask for bytes 0-63 of a packet + * @mask1: byte mask for bytes 64-127 of a packet + * @crc: Ethernet CRC for selected bytes + * @enable: enable/disable switch + * + * Sets the pattern filters indicated in @map to mask out the bytes + * specified in @mask0/@mask1 in received packets and compare the CRC of + * the resulting packet against @crc. If @enable is %true pattern-based + * WoL is enabled, otherwise disabled. + */ +int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, + u64 mask0, u64 mask1, unsigned int crc, bool enable) +{ + int i; + u32 port_cfg_reg; + + if (is_t4(adap->params.chip)) + port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A); + else + port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A); + + if (!enable) { + t4_set_reg_field(adap, port_cfg_reg, PATEN_F, 0); + return 0; + } + if (map > 0xff) + return -EINVAL; + +#define EPIO_REG(name) \ + (is_t4(adap->params.chip) ? \ + PORT_REG(port, XGMAC_PORT_EPIO_##name##_A) : \ + T5_PORT_REG(port, MAC_PORT_EPIO_##name##_A)) + + t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); + t4_write_reg(adap, EPIO_REG(DATA2), mask1); + t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); + + for (i = 0; i < NWOL_PAT; i++, map >>= 1) { + if (!(map & 1)) + continue; + + /* write byte masks */ + t4_write_reg(adap, EPIO_REG(DATA0), mask0); + t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i) | EPIOWR_F); + t4_read_reg(adap, EPIO_REG(OP)); /* flush */ + if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F) + return -ETIMEDOUT; + + /* write CRC */ + t4_write_reg(adap, EPIO_REG(DATA0), crc); + t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i + 32) | EPIOWR_F); + t4_read_reg(adap, EPIO_REG(OP)); /* flush */ + if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F) + return -ETIMEDOUT; + } +#undef EPIO_REG + + t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2_A), 0, PATEN_F); + return 0; +} + +/* t4_mk_filtdelwr - create a delete filter WR + * @ftid: the filter ID + * @wr: the filter work request to populate + * @qid: ingress queue to receive the delete notification + * + * Creates a filter work request to delete the supplied filter. If @qid is + * negative the delete notification is suppressed. + */ +void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) +{ + memset(wr, 0, sizeof(*wr)); + wr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR)); + wr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*wr) / 16)); + wr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(ftid) | + FW_FILTER_WR_NOREPLY_V(qid < 0)); + wr->del_filter_to_l2tix = htonl(FW_FILTER_WR_DEL_FILTER_F); + if (qid >= 0) + wr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_RPL_IQ_V(qid)); +} + +#define INIT_CMD(var, cmd, rd_wr) do { \ + (var).op_to_write = htonl(FW_CMD_OP_V(FW_##cmd##_CMD) | \ + FW_CMD_REQUEST_F | FW_CMD_##rd_wr##_F); \ + (var).retval_len16 = htonl(FW_LEN16(var)); \ +} while (0) + +int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, + u32 addr, u32 val) +{ + struct fw_ldst_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE)); + c.cycles_to_len16 = htonl(FW_LEN16(c)); + c.u.addrval.addr = htonl(addr); + c.u.addrval.val = htonl(val); + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_mdio_rd - read a PHY register through MDIO + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @phy_addr: the PHY address + * @mmd: the PHY MMD to access (0 for clause 22 PHYs) + * @reg: the register to read + * @valp: where to store the value + * + * Issues a FW command through the given mailbox to read a PHY register. + */ +int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, + unsigned int mmd, unsigned int reg, u16 *valp) +{ + int ret; + struct fw_ldst_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F | + FW_CMD_READ_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO)); + c.cycles_to_len16 = htonl(FW_LEN16(c)); + c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) | + FW_LDST_CMD_MMD_V(mmd)); + c.u.mdio.raddr = htons(reg); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) + *valp = ntohs(c.u.mdio.rval); + return ret; +} + +/** + * t4_mdio_wr - write a PHY register through MDIO + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @phy_addr: the PHY address + * @mmd: the PHY MMD to access (0 for clause 22 PHYs) + * @reg: the register to write + * @valp: value to write + * + * Issues a FW command through the given mailbox to write a PHY register. + */ +int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, + unsigned int mmd, unsigned int reg, u16 val) +{ + struct fw_ldst_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO)); + c.cycles_to_len16 = htonl(FW_LEN16(c)); + c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) | + FW_LDST_CMD_MMD_V(mmd)); + c.u.mdio.raddr = htons(reg); + c.u.mdio.rval = htons(val); + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_sge_decode_idma_state - decode the idma state + * @adap: the adapter + * @state: the state idma is stuck in + */ +void t4_sge_decode_idma_state(struct adapter *adapter, int state) +{ + static const char * const t4_decode[] = { + "IDMA_IDLE", + "IDMA_PUSH_MORE_CPL_FIFO", + "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", + "Not used", + "IDMA_PHYSADDR_SEND_PCIEHDR", + "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", + "IDMA_PHYSADDR_SEND_PAYLOAD", + "IDMA_SEND_FIFO_TO_IMSG", + "IDMA_FL_REQ_DATA_FL_PREP", + "IDMA_FL_REQ_DATA_FL", + "IDMA_FL_DROP", + "IDMA_FL_H_REQ_HEADER_FL", + "IDMA_FL_H_SEND_PCIEHDR", + "IDMA_FL_H_PUSH_CPL_FIFO", + "IDMA_FL_H_SEND_CPL", + "IDMA_FL_H_SEND_IP_HDR_FIRST", + "IDMA_FL_H_SEND_IP_HDR", + "IDMA_FL_H_REQ_NEXT_HEADER_FL", + "IDMA_FL_H_SEND_NEXT_PCIEHDR", + "IDMA_FL_H_SEND_IP_HDR_PADDING", + "IDMA_FL_D_SEND_PCIEHDR", + "IDMA_FL_D_SEND_CPL_AND_IP_HDR", + "IDMA_FL_D_REQ_NEXT_DATA_FL", + "IDMA_FL_SEND_PCIEHDR", + "IDMA_FL_PUSH_CPL_FIFO", + "IDMA_FL_SEND_CPL", + "IDMA_FL_SEND_PAYLOAD_FIRST", + "IDMA_FL_SEND_PAYLOAD", + "IDMA_FL_REQ_NEXT_DATA_FL", + "IDMA_FL_SEND_NEXT_PCIEHDR", + "IDMA_FL_SEND_PADDING", + "IDMA_FL_SEND_COMPLETION_TO_IMSG", + "IDMA_FL_SEND_FIFO_TO_IMSG", + "IDMA_FL_REQ_DATAFL_DONE", + "IDMA_FL_REQ_HEADERFL_DONE", + }; + static const char * const t5_decode[] = { + "IDMA_IDLE", + "IDMA_ALMOST_IDLE", + "IDMA_PUSH_MORE_CPL_FIFO", + "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", + "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", + "IDMA_PHYSADDR_SEND_PCIEHDR", + "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", + "IDMA_PHYSADDR_SEND_PAYLOAD", + "IDMA_SEND_FIFO_TO_IMSG", + "IDMA_FL_REQ_DATA_FL", + "IDMA_FL_DROP", + "IDMA_FL_DROP_SEND_INC", + "IDMA_FL_H_REQ_HEADER_FL", + "IDMA_FL_H_SEND_PCIEHDR", + "IDMA_FL_H_PUSH_CPL_FIFO", + "IDMA_FL_H_SEND_CPL", + "IDMA_FL_H_SEND_IP_HDR_FIRST", + "IDMA_FL_H_SEND_IP_HDR", + "IDMA_FL_H_REQ_NEXT_HEADER_FL", + "IDMA_FL_H_SEND_NEXT_PCIEHDR", + "IDMA_FL_H_SEND_IP_HDR_PADDING", + "IDMA_FL_D_SEND_PCIEHDR", + "IDMA_FL_D_SEND_CPL_AND_IP_HDR", + "IDMA_FL_D_REQ_NEXT_DATA_FL", + "IDMA_FL_SEND_PCIEHDR", + "IDMA_FL_PUSH_CPL_FIFO", + "IDMA_FL_SEND_CPL", + "IDMA_FL_SEND_PAYLOAD_FIRST", + "IDMA_FL_SEND_PAYLOAD", + "IDMA_FL_REQ_NEXT_DATA_FL", + "IDMA_FL_SEND_NEXT_PCIEHDR", + "IDMA_FL_SEND_PADDING", + "IDMA_FL_SEND_COMPLETION_TO_IMSG", + }; + static const u32 sge_regs[] = { + SGE_DEBUG_DATA_LOW_INDEX_2_A, + SGE_DEBUG_DATA_LOW_INDEX_3_A, + SGE_DEBUG_DATA_HIGH_INDEX_10_A, + }; + const char **sge_idma_decode; + int sge_idma_decode_nstates; + int i; + + if (is_t4(adapter->params.chip)) { + sge_idma_decode = (const char **)t4_decode; + sge_idma_decode_nstates = ARRAY_SIZE(t4_decode); + } else { + sge_idma_decode = (const char **)t5_decode; + sge_idma_decode_nstates = ARRAY_SIZE(t5_decode); + } + + if (state < sge_idma_decode_nstates) + CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]); + else + CH_WARN(adapter, "idma state %d unknown\n", state); + + for (i = 0; i < ARRAY_SIZE(sge_regs); i++) + CH_WARN(adapter, "SGE register %#x value %#x\n", + sge_regs[i], t4_read_reg(adapter, sge_regs[i])); +} + +/** + * t4_fw_hello - establish communication with FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @evt_mbox: mailbox to receive async FW events + * @master: specifies the caller's willingness to be the device master + * @state: returns the current device state (if non-NULL) + * + * Issues a command to establish communication with FW. Returns either + * an error (negative integer) or the mailbox of the Master PF. + */ +int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, + enum dev_master master, enum dev_state *state) +{ + int ret; + struct fw_hello_cmd c; + u32 v; + unsigned int master_mbox; + int retries = FW_CMD_HELLO_RETRIES; + +retry: + memset(&c, 0, sizeof(c)); + INIT_CMD(c, HELLO, WRITE); + c.err_to_clearinit = htonl( + FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) | + FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) | + FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ? mbox : + FW_HELLO_CMD_MBMASTER_M) | + FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) | + FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) | + FW_HELLO_CMD_CLEARINIT_F); + + /* + * Issue the HELLO command to the firmware. If it's not successful + * but indicates that we got a "busy" or "timeout" condition, retry + * the HELLO until we exhaust our retry limit. If we do exceed our + * retry limit, check to see if the firmware left us any error + * information and report that if so. + */ + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret < 0) { + if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) + goto retry; + if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F) + t4_report_fw_error(adap); + return ret; + } + + v = ntohl(c.err_to_clearinit); + master_mbox = FW_HELLO_CMD_MBMASTER_G(v); + if (state) { + if (v & FW_HELLO_CMD_ERR_F) + *state = DEV_STATE_ERR; + else if (v & FW_HELLO_CMD_INIT_F) + *state = DEV_STATE_INIT; + else + *state = DEV_STATE_UNINIT; + } + + /* + * If we're not the Master PF then we need to wait around for the + * Master PF Driver to finish setting up the adapter. + * + * Note that we also do this wait if we're a non-Master-capable PF and + * there is no current Master PF; a Master PF may show up momentarily + * and we wouldn't want to fail pointlessly. (This can happen when an + * OS loads lots of different drivers rapidly at the same time). In + * this case, the Master PF returned by the firmware will be + * PCIE_FW_MASTER_M so the test below will work ... + */ + if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 && + master_mbox != mbox) { + int waiting = FW_CMD_HELLO_TIMEOUT; + + /* + * Wait for the firmware to either indicate an error or + * initialized state. If we see either of these we bail out + * and report the issue to the caller. If we exhaust the + * "hello timeout" and we haven't exhausted our retries, try + * again. Otherwise bail with a timeout error. + */ + for (;;) { + u32 pcie_fw; + + msleep(50); + waiting -= 50; + + /* + * If neither Error nor Initialialized are indicated + * by the firmware keep waiting till we exaust our + * timeout ... and then retry if we haven't exhausted + * our retries ... + */ + pcie_fw = t4_read_reg(adap, PCIE_FW_A); + if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) { + if (waiting <= 0) { + if (retries-- > 0) + goto retry; + + return -ETIMEDOUT; + } + continue; + } + + /* + * We either have an Error or Initialized condition + * report errors preferentially. + */ + if (state) { + if (pcie_fw & PCIE_FW_ERR_F) + *state = DEV_STATE_ERR; + else if (pcie_fw & PCIE_FW_INIT_F) + *state = DEV_STATE_INIT; + } + + /* + * If we arrived before a Master PF was selected and + * there's not a valid Master PF, grab its identity + * for our caller. + */ + if (master_mbox == PCIE_FW_MASTER_M && + (pcie_fw & PCIE_FW_MASTER_VLD_F)) + master_mbox = PCIE_FW_MASTER_G(pcie_fw); + break; + } + } + + return master_mbox; +} + +/** + * t4_fw_bye - end communication with FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * + * Issues a command to terminate communication with FW. + */ +int t4_fw_bye(struct adapter *adap, unsigned int mbox) +{ + struct fw_bye_cmd c; + + memset(&c, 0, sizeof(c)); + INIT_CMD(c, BYE, WRITE); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_init_cmd - ask FW to initialize the device + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * + * Issues a command to FW to partially initialize the device. This + * performs initialization that generally doesn't depend on user input. + */ +int t4_early_init(struct adapter *adap, unsigned int mbox) +{ + struct fw_initialize_cmd c; + + memset(&c, 0, sizeof(c)); + INIT_CMD(c, INITIALIZE, WRITE); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_fw_reset - issue a reset to FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @reset: specifies the type of reset to perform + * + * Issues a reset command of the specified type to FW. + */ +int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) +{ + struct fw_reset_cmd c; + + memset(&c, 0, sizeof(c)); + INIT_CMD(c, RESET, WRITE); + c.val = htonl(reset); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_fw_halt - issue a reset/halt to FW and put uP into RESET + * @adap: the adapter + * @mbox: mailbox to use for the FW RESET command (if desired) + * @force: force uP into RESET even if FW RESET command fails + * + * Issues a RESET command to firmware (if desired) with a HALT indication + * and then puts the microprocessor into RESET state. The RESET command + * will only be issued if a legitimate mailbox is provided (mbox <= + * PCIE_FW_MASTER_M). + * + * This is generally used in order for the host to safely manipulate the + * adapter without fear of conflicting with whatever the firmware might + * be doing. The only way out of this state is to RESTART the firmware + * ... + */ +static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) +{ + int ret = 0; + + /* + * If a legitimate mailbox is provided, issue a RESET command + * with a HALT indication. + */ + if (mbox <= PCIE_FW_MASTER_M) { + struct fw_reset_cmd c; + + memset(&c, 0, sizeof(c)); + INIT_CMD(c, RESET, WRITE); + c.val = htonl(PIORST_F | PIORSTMODE_F); + c.halt_pkd = htonl(FW_RESET_CMD_HALT_F); + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + } + + /* + * Normally we won't complete the operation if the firmware RESET + * command fails but if our caller insists we'll go ahead and put the + * uP into RESET. This can be useful if the firmware is hung or even + * missing ... We'll have to take the risk of putting the uP into + * RESET without the cooperation of firmware in that case. + * + * We also force the firmware's HALT flag to be on in case we bypassed + * the firmware RESET command above or we're dealing with old firmware + * which doesn't have the HALT capability. This will serve as a flag + * for the incoming firmware to know that it's coming out of a HALT + * rather than a RESET ... if it's new enough to understand that ... + */ + if (ret == 0 || force) { + t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F); + t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, + PCIE_FW_HALT_F); + } + + /* + * And we always return the result of the firmware RESET command + * even when we force the uP into RESET ... + */ + return ret; +} + +/** + * t4_fw_restart - restart the firmware by taking the uP out of RESET + * @adap: the adapter + * @reset: if we want to do a RESET to restart things + * + * Restart firmware previously halted by t4_fw_halt(). On successful + * return the previous PF Master remains as the new PF Master and there + * is no need to issue a new HELLO command, etc. + * + * We do this in two ways: + * + * 1. If we're dealing with newer firmware we'll simply want to take + * the chip's microprocessor out of RESET. This will cause the + * firmware to start up from its start vector. And then we'll loop + * until the firmware indicates it's started again (PCIE_FW.HALT + * reset to 0) or we timeout. + * + * 2. If we're dealing with older firmware then we'll need to RESET + * the chip since older firmware won't recognize the PCIE_FW.HALT + * flag and automatically RESET itself on startup. + */ +static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) +{ + if (reset) { + /* + * Since we're directing the RESET instead of the firmware + * doing it automatically, we need to clear the PCIE_FW.HALT + * bit. + */ + t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0); + + /* + * If we've been given a valid mailbox, first try to get the + * firmware to do the RESET. If that works, great and we can + * return success. Otherwise, if we haven't been given a + * valid mailbox or the RESET command failed, fall back to + * hitting the chip with a hammer. + */ + if (mbox <= PCIE_FW_MASTER_M) { + t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0); + msleep(100); + if (t4_fw_reset(adap, mbox, + PIORST_F | PIORSTMODE_F) == 0) + return 0; + } + + t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F); + msleep(2000); + } else { + int ms; + + t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0); + for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { + if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F)) + return 0; + msleep(100); + ms += 100; + } + return -ETIMEDOUT; + } + return 0; +} + +/** + * t4_fw_upgrade - perform all of the steps necessary to upgrade FW + * @adap: the adapter + * @mbox: mailbox to use for the FW RESET command (if desired) + * @fw_data: the firmware image to write + * @size: image size + * @force: force upgrade even if firmware doesn't cooperate + * + * Perform all of the steps necessary for upgrading an adapter's + * firmware image. Normally this requires the cooperation of the + * existing firmware in order to halt all existing activities + * but if an invalid mailbox token is passed in we skip that step + * (though we'll still put the adapter microprocessor into RESET in + * that case). + * + * On successful return the new firmware will have been loaded and + * the adapter will have been fully RESET losing all previous setup + * state. On unsuccessful return the adapter may be completely hosed ... + * positive errno indicates that the adapter is ~probably~ intact, a + * negative errno indicates that things are looking bad ... + */ +int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, + const u8 *fw_data, unsigned int size, int force) +{ + const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; + int reset, ret; + + if (!t4_fw_matches_chip(adap, fw_hdr)) + return -EINVAL; + + ret = t4_fw_halt(adap, mbox, force); + if (ret < 0 && !force) + return ret; + + ret = t4_load_fw(adap, fw_data, size); + if (ret < 0) + return ret; + + /* + * Older versions of the firmware don't understand the new + * PCIE_FW.HALT flag and so won't know to perform a RESET when they + * restart. So for newly loaded older firmware we'll have to do the + * RESET for it so it starts up on a clean slate. We can tell if + * the newly loaded firmware will handle this right by checking + * its header flags to see if it advertises the capability. + */ + reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); + return t4_fw_restart(adap, mbox, reset); +} + +/** + * t4_fixup_host_params - fix up host-dependent parameters + * @adap: the adapter + * @page_size: the host's Base Page Size + * @cache_line_size: the host's Cache Line Size + * + * Various registers in T4 contain values which are dependent on the + * host's Base Page and Cache Line Sizes. This function will fix all of + * those registers with the appropriate values as passed in ... + */ +int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, + unsigned int cache_line_size) +{ + unsigned int page_shift = fls(page_size) - 1; + unsigned int sge_hps = page_shift - 10; + unsigned int stat_len = cache_line_size > 64 ? 128 : 64; + unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size; + unsigned int fl_align_log = fls(fl_align) - 1; + + t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A, + HOSTPAGESIZEPF0_V(sge_hps) | + HOSTPAGESIZEPF1_V(sge_hps) | + HOSTPAGESIZEPF2_V(sge_hps) | + HOSTPAGESIZEPF3_V(sge_hps) | + HOSTPAGESIZEPF4_V(sge_hps) | + HOSTPAGESIZEPF5_V(sge_hps) | + HOSTPAGESIZEPF6_V(sge_hps) | + HOSTPAGESIZEPF7_V(sge_hps)); + + if (is_t4(adap->params.chip)) { + t4_set_reg_field(adap, SGE_CONTROL_A, + INGPADBOUNDARY_V(INGPADBOUNDARY_M) | + EGRSTATUSPAGESIZE_F, + INGPADBOUNDARY_V(fl_align_log - + INGPADBOUNDARY_SHIFT_X) | + EGRSTATUSPAGESIZE_V(stat_len != 64)); + } else { + /* T5 introduced the separation of the Free List Padding and + * Packing Boundaries. Thus, we can select a smaller Padding + * Boundary to avoid uselessly chewing up PCIe Link and Memory + * Bandwidth, and use a Packing Boundary which is large enough + * to avoid false sharing between CPUs, etc. + * + * For the PCI Link, the smaller the Padding Boundary the + * better. For the Memory Controller, a smaller Padding + * Boundary is better until we cross under the Memory Line + * Size (the minimum unit of transfer to/from Memory). If we + * have a Padding Boundary which is smaller than the Memory + * Line Size, that'll involve a Read-Modify-Write cycle on the + * Memory Controller which is never good. For T5 the smallest + * Padding Boundary which we can select is 32 bytes which is + * larger than any known Memory Controller Line Size so we'll + * use that. + * + * T5 has a different interpretation of the "0" value for the + * Packing Boundary. This corresponds to 16 bytes instead of + * the expected 32 bytes. We never have a Packing Boundary + * less than 32 bytes so we can't use that special value but + * on the other hand, if we wanted 32 bytes, the best we can + * really do is 64 bytes. + */ + if (fl_align <= 32) { + fl_align = 64; + fl_align_log = 6; + } + t4_set_reg_field(adap, SGE_CONTROL_A, + INGPADBOUNDARY_V(INGPADBOUNDARY_M) | + EGRSTATUSPAGESIZE_F, + INGPADBOUNDARY_V(INGPCIEBOUNDARY_32B_X) | + EGRSTATUSPAGESIZE_V(stat_len != 64)); + t4_set_reg_field(adap, SGE_CONTROL2_A, + INGPACKBOUNDARY_V(INGPACKBOUNDARY_M), + INGPACKBOUNDARY_V(fl_align_log - + INGPACKBOUNDARY_SHIFT_X)); + } + /* + * Adjust various SGE Free List Host Buffer Sizes. + * + * This is something of a crock since we're using fixed indices into + * the array which are also known by the sge.c code and the T4 + * Firmware Configuration File. We need to come up with a much better + * approach to managing this array. For now, the first four entries + * are: + * + * 0: Host Page Size + * 1: 64KB + * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode) + * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode) + * + * For the single-MTU buffers in unpacked mode we need to include + * space for the SGE Control Packet Shift, 14 byte Ethernet header, + * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet + * Padding boundary. All of these are accommodated in the Factory + * Default Firmware Configuration File but we need to adjust it for + * this host's cache line size. + */ + t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size); + t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A, + (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1) + & ~(fl_align-1)); + t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A, + (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1) + & ~(fl_align-1)); + + t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12)); + + return 0; +} + +/** + * t4_fw_initialize - ask FW to initialize the device + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * + * Issues a command to FW to partially initialize the device. This + * performs initialization that generally doesn't depend on user input. + */ +int t4_fw_initialize(struct adapter *adap, unsigned int mbox) +{ + struct fw_initialize_cmd c; + + memset(&c, 0, sizeof(c)); + INIT_CMD(c, INITIALIZE, WRITE); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_query_params - query FW or device parameters + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF + * @vf: the VF + * @nparams: the number of parameters + * @params: the parameter names + * @val: the parameter values + * + * Reads the value of FW or device parameters. Up to 7 parameters can be + * queried at once. + */ +int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + u32 *val) +{ + int i, ret; + struct fw_params_cmd c; + __be32 *p = &c.param[0].mnem; + + if (nparams > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F | + FW_CMD_READ_F | FW_PARAMS_CMD_PFN_V(pf) | + FW_PARAMS_CMD_VFN_V(vf)); + c.retval_len16 = htonl(FW_LEN16(c)); + for (i = 0; i < nparams; i++, p += 2) + *p = htonl(*params++); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) + for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) + *val++ = ntohl(*p); + return ret; +} + +/** + * t4_set_params_nosleep - sets FW or device parameters + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF + * @vf: the VF + * @nparams: the number of parameters + * @params: the parameter names + * @val: the parameter values + * + * Does not ever sleep + * Sets the value of FW or device parameters. Up to 7 parameters can be + * specified at once. + */ +int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox, + unsigned int pf, unsigned int vf, + unsigned int nparams, const u32 *params, + const u32 *val) +{ + struct fw_params_cmd c; + __be32 *p = &c.param[0].mnem; + + if (nparams > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_PARAMS_CMD_PFN_V(pf) | + FW_PARAMS_CMD_VFN_V(vf)); + c.retval_len16 = cpu_to_be32(FW_LEN16(c)); + + while (nparams--) { + *p++ = cpu_to_be32(*params++); + *p++ = cpu_to_be32(*val++); + } + + return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_set_params - sets FW or device parameters + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF + * @vf: the VF + * @nparams: the number of parameters + * @params: the parameter names + * @val: the parameter values + * + * Sets the value of FW or device parameters. Up to 7 parameters can be + * specified at once. + */ +int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + const u32 *val) +{ + struct fw_params_cmd c; + __be32 *p = &c.param[0].mnem; + + if (nparams > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_PARAMS_CMD_PFN_V(pf) | + FW_PARAMS_CMD_VFN_V(vf)); + c.retval_len16 = htonl(FW_LEN16(c)); + while (nparams--) { + *p++ = htonl(*params++); + *p++ = htonl(*val++); + } + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_cfg_pfvf - configure PF/VF resource limits + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF being configured + * @vf: the VF being configured + * @txq: the max number of egress queues + * @txq_eth_ctrl: the max number of egress Ethernet or control queues + * @rxqi: the max number of interrupt-capable ingress queues + * @rxq: the max number of interruptless ingress queues + * @tc: the PCI traffic class + * @vi: the max number of virtual interfaces + * @cmask: the channel access rights mask for the PF/VF + * @pmask: the port access rights mask for the PF/VF + * @nexact: the maximum number of exact MPS filters + * @rcaps: read capabilities + * @wxcaps: write/execute capabilities + * + * Configures resource limits and capabilities for a physical or virtual + * function. + */ +int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, + unsigned int rxqi, unsigned int rxq, unsigned int tc, + unsigned int vi, unsigned int cmask, unsigned int pmask, + unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) +{ + struct fw_pfvf_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) | + FW_PFVF_CMD_VFN_V(vf)); + c.retval_len16 = htonl(FW_LEN16(c)); + c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT_V(rxqi) | + FW_PFVF_CMD_NIQ_V(rxq)); + c.type_to_neq = htonl(FW_PFVF_CMD_CMASK_V(cmask) | + FW_PFVF_CMD_PMASK_V(pmask) | + FW_PFVF_CMD_NEQ_V(txq)); + c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC_V(tc) | FW_PFVF_CMD_NVI_V(vi) | + FW_PFVF_CMD_NEXACTF_V(nexact)); + c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS_V(rcaps) | + FW_PFVF_CMD_WX_CAPS_V(wxcaps) | + FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_alloc_vi - allocate a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @port: physical port associated with the VI + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @nmac: number of MAC addresses needed (1 to 5) + * @mac: the MAC addresses of the VI + * @rss_size: size of RSS table slice associated with this VI + * + * Allocates a virtual interface for the given physical port. If @mac is + * not %NULL it contains the MAC addresses of the VI as assigned by FW. + * @mac should be large enough to hold @nmac Ethernet addresses, they are + * stored consecutively so the space needed is @nmac * 6 bytes. + * Returns a negative error number or the non-negative VI id. + */ +int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, + unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, + unsigned int *rss_size) +{ + int ret; + struct fw_vi_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_CMD_EXEC_F | + FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf)); + c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC_F | FW_LEN16(c)); + c.portid_pkd = FW_VI_CMD_PORTID_V(port); + c.nmac = nmac - 1; + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret) + return ret; + + if (mac) { + memcpy(mac, c.mac, sizeof(c.mac)); + switch (nmac) { + case 5: + memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); + case 4: + memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); + case 3: + memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); + case 2: + memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); + } + } + if (rss_size) + *rss_size = FW_VI_CMD_RSSSIZE_G(ntohs(c.rsssize_pkd)); + return FW_VI_CMD_VIID_G(ntohs(c.type_viid)); +} + +/** + * t4_set_rxmode - set Rx properties of a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @mtu: the new MTU or -1 + * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change + * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change + * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change + * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Sets Rx properties of a virtual interface. + */ +int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, + int mtu, int promisc, int all_multi, int bcast, int vlanex, + bool sleep_ok) +{ + struct fw_vi_rxmode_cmd c; + + /* convert to FW values */ + if (mtu < 0) + mtu = FW_RXMODE_MTU_NO_CHG; + if (promisc < 0) + promisc = FW_VI_RXMODE_CMD_PROMISCEN_M; + if (all_multi < 0) + all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M; + if (bcast < 0) + bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M; + if (vlanex < 0) + vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_VI_RXMODE_CMD_VIID_V(viid)); + c.retval_len16 = htonl(FW_LEN16(c)); + c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU_V(mtu) | + FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) | + FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) | + FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) | + FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex)); + return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); +} + +/** + * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @free: if true any existing filters for this VI id are first removed + * @naddr: the number of MAC addresses to allocate filters for (up to 7) + * @addr: the MAC address(es) + * @idx: where to store the index of each allocated filter + * @hash: pointer to hash address filter bitmap + * @sleep_ok: call is allowed to sleep + * + * Allocates an exact-match filter for each of the supplied addresses and + * sets it to the corresponding address. If @idx is not %NULL it should + * have at least @naddr entries, each of which will be set to the index of + * the filter allocated for the corresponding MAC address. If a filter + * could not be allocated for an address its index is set to 0xffff. + * If @hash is not %NULL addresses that fail to allocate an exact filter + * are hashed and update the hash filter bitmap pointed at by @hash. + * + * Returns a negative error number or the number of filters allocated. + */ +int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, + unsigned int viid, bool free, unsigned int naddr, + const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) +{ + int i, ret; + struct fw_vi_mac_cmd c; + struct fw_vi_mac_exact *p; + unsigned int max_naddr = is_t4(adap->params.chip) ? + NUM_MPS_CLS_SRAM_L_INSTANCES : + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + + if (naddr > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | (free ? FW_CMD_EXEC_F : 0) | + FW_VI_MAC_CMD_VIID_V(viid)); + c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS_V(free) | + FW_CMD_LEN16_V((naddr + 2) / 2)); + + for (i = 0, p = c.u.exact; i < naddr; i++, p++) { + p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); + memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); + } + + ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); + if (ret) + return ret; + + for (i = 0, p = c.u.exact; i < naddr; i++, p++) { + u16 index = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx)); + + if (idx) + idx[i] = index >= max_naddr ? 0xffff : index; + if (index < max_naddr) + ret++; + else if (hash) + *hash |= (1ULL << hash_mac_addr(addr[i])); + } + return ret; +} + +/** + * t4_change_mac - modifies the exact-match filter for a MAC address + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @idx: index of existing filter for old value of MAC address, or -1 + * @addr: the new MAC address value + * @persist: whether a new MAC allocation should be persistent + * @add_smt: if true also add the address to the HW SMT + * + * Modifies an exact-match filter and sets it to the new MAC address. + * Note that in general it is not possible to modify the value of a given + * filter so the generic way to modify an address filter is to free the one + * being used by the old address value and allocate a new filter for the + * new address value. @idx can be -1 if the address is a new addition. + * + * Returns a negative error number or the index of the filter with the new + * MAC value. + */ +int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, + int idx, const u8 *addr, bool persist, bool add_smt) +{ + int ret, mode; + struct fw_vi_mac_cmd c; + struct fw_vi_mac_exact *p = c.u.exact; + unsigned int max_mac_addr = is_t4(adap->params.chip) ? + NUM_MPS_CLS_SRAM_L_INSTANCES : + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + + if (idx < 0) /* new allocation */ + idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; + mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_VI_MAC_CMD_VIID_V(viid)); + c.freemacs_to_len16 = htonl(FW_CMD_LEN16_V(1)); + p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_SMAC_RESULT_V(mode) | + FW_VI_MAC_CMD_IDX_V(idx)); + memcpy(p->macaddr, addr, sizeof(p->macaddr)); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) { + ret = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx)); + if (ret >= max_mac_addr) + ret = -ENOMEM; + } + return ret; +} + +/** + * t4_set_addr_hash - program the MAC inexact-match hash filter + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @ucast: whether the hash filter should also match unicast addresses + * @vec: the value to be written to the hash filter + * @sleep_ok: call is allowed to sleep + * + * Sets the 64-bit inexact-match hash filter for a virtual interface. + */ +int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool ucast, u64 vec, bool sleep_ok) +{ + struct fw_vi_mac_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | FW_VI_ENABLE_CMD_VIID_V(viid)); + c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN_F | + FW_VI_MAC_CMD_HASHUNIEN_V(ucast) | + FW_CMD_LEN16_V(1)); + c.u.hash.hashvec = cpu_to_be64(vec); + return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); +} + +/** + * t4_enable_vi_params - enable/disable a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @rx_en: 1=enable Rx, 0=disable Rx + * @tx_en: 1=enable Tx, 0=disable Tx + * @dcb_en: 1=enable delivery of Data Center Bridging messages. + * + * Enables/disables a virtual interface. Note that setting DCB Enable + * only makes sense when enabling a Virtual Interface ... + */ +int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, + unsigned int viid, bool rx_en, bool tx_en, bool dcb_en) +{ + struct fw_vi_enable_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid)); + + c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN_V(rx_en) | + FW_VI_ENABLE_CMD_EEN_V(tx_en) | FW_LEN16(c) | + FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en)); + return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_enable_vi - enable/disable a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @rx_en: 1=enable Rx, 0=disable Rx + * @tx_en: 1=enable Tx, 0=disable Tx + * + * Enables/disables a virtual interface. + */ +int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool rx_en, bool tx_en) +{ + return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0); +} + +/** + * t4_identify_port - identify a VI's port by blinking its LED + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @nblinks: how many times to blink LED at 2.5 Hz + * + * Identifies a VI's port by blinking its LED. + */ +int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, + unsigned int nblinks) +{ + struct fw_vi_enable_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid)); + c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c)); + c.blinkdur = htons(nblinks); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_iq_free - free an ingress queue and its FLs + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queues + * @vf: the VF owning the queues + * @iqtype: the ingress queue type + * @iqid: ingress queue id + * @fl0id: FL0 queue id or 0xffff if no attached FL0 + * @fl1id: FL1 queue id or 0xffff if no attached FL1 + * + * Frees an ingress queue and its associated FLs, if any. + */ +int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int iqtype, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id) +{ + struct fw_iq_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) | + FW_IQ_CMD_VFN_V(vf)); + c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | FW_LEN16(c)); + c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iqtype)); + c.iqid = htons(iqid); + c.fl0id = htons(fl0id); + c.fl1id = htons(fl1id); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_eth_eq_free - free an Ethernet egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees an Ethernet egress queue. + */ +int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_eth_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_EQ_ETH_CMD_PFN_V(pf) | + FW_EQ_ETH_CMD_VFN_V(vf)); + c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c)); + c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID_V(eqid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_ctrl_eq_free - free a control egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees a control egress queue. + */ +int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_ctrl_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_EQ_CTRL_CMD_PFN_V(pf) | + FW_EQ_CTRL_CMD_VFN_V(vf)); + c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c)); + c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID_V(eqid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_ofld_eq_free - free an offload egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees a control egress queue. + */ +int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_ofld_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(pf) | + FW_EQ_OFLD_CMD_VFN_V(vf)); + c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c)); + c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eqid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_handle_fw_rpl - process a FW reply message + * @adap: the adapter + * @rpl: start of the FW message + * + * Processes a FW message, such as link state change messages. + */ +int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) +{ + u8 opcode = *(const u8 *)rpl; + + if (opcode == FW_PORT_CMD) { /* link/module state change message */ + int speed = 0, fc = 0; + const struct fw_port_cmd *p = (void *)rpl; + int chan = FW_PORT_CMD_PORTID_G(ntohl(p->op_to_portid)); + int port = adap->chan_map[chan]; + struct port_info *pi = adap2pinfo(adap, port); + struct link_config *lc = &pi->link_cfg; + u32 stat = ntohl(p->u.info.lstatus_to_modtype); + int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; + u32 mod = FW_PORT_CMD_MODTYPE_G(stat); + + if (stat & FW_PORT_CMD_RXPAUSE_F) + fc |= PAUSE_RX; + if (stat & FW_PORT_CMD_TXPAUSE_F) + fc |= PAUSE_TX; + if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) + speed = 100; + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) + speed = 1000; + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) + speed = 10000; + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) + speed = 40000; + + if (link_ok != lc->link_ok || speed != lc->speed || + fc != lc->fc) { /* something changed */ + lc->link_ok = link_ok; + lc->speed = speed; + lc->fc = fc; + lc->supported = be16_to_cpu(p->u.info.pcap); + t4_os_link_changed(adap, port, link_ok); + } + if (mod != pi->mod_type) { + pi->mod_type = mod; + t4_os_portmod_changed(adap, port); + } + } + return 0; +} + +static void get_pci_mode(struct adapter *adapter, struct pci_params *p) +{ + u16 val; + + if (pci_is_pcie(adapter->pdev)) { + pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val); + p->speed = val & PCI_EXP_LNKSTA_CLS; + p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; + } +} + +/** + * init_link_config - initialize a link's SW state + * @lc: structure holding the link state + * @caps: link capabilities + * + * Initializes the SW state maintained for each link, including the link's + * capabilities and default speed/flow-control/autonegotiation settings. + */ +static void init_link_config(struct link_config *lc, unsigned int caps) +{ + lc->supported = caps; + lc->requested_speed = 0; + lc->speed = 0; + lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; + if (lc->supported & FW_PORT_CAP_ANEG) { + lc->advertising = lc->supported & ADVERT_MASK; + lc->autoneg = AUTONEG_ENABLE; + lc->requested_fc |= PAUSE_AUTONEG; + } else { + lc->advertising = 0; + lc->autoneg = AUTONEG_DISABLE; + } +} + +#define CIM_PF_NOACCESS 0xeeeeeeee + +int t4_wait_dev_ready(void __iomem *regs) +{ + u32 whoami; + + whoami = readl(regs + PL_WHOAMI_A); + if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS) + return 0; + + msleep(500); + whoami = readl(regs + PL_WHOAMI_A); + return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO); +} + +struct flash_desc { + u32 vendor_and_model_id; + u32 size_mb; +}; + +static int get_flash_params(struct adapter *adap) +{ + /* Table for non-Numonix supported flash parts. Numonix parts are left + * to the preexisting code. All flash parts have 64KB sectors. + */ + static struct flash_desc supported_flash[] = { + { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ + }; + + int ret; + u32 info; + + ret = sf1_write(adap, 1, 1, 0, SF_RD_ID); + if (!ret) + ret = sf1_read(adap, 3, 0, 1, &info); + t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */ + if (ret) + return ret; + + for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret) + if (supported_flash[ret].vendor_and_model_id == info) { + adap->params.sf_size = supported_flash[ret].size_mb; + adap->params.sf_nsec = + adap->params.sf_size / SF_SEC_SIZE; + return 0; + } + + if ((info & 0xff) != 0x20) /* not a Numonix flash */ + return -EINVAL; + info >>= 16; /* log2 of size */ + if (info >= 0x14 && info < 0x18) + adap->params.sf_nsec = 1 << (info - 16); + else if (info == 0x18) + adap->params.sf_nsec = 64; + else + return -EINVAL; + adap->params.sf_size = 1 << info; + adap->params.sf_fw_start = + t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M; + + if (adap->params.sf_size < FLASH_MIN_SIZE) + dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n", + adap->params.sf_size, FLASH_MIN_SIZE); + return 0; +} + +/** + * t4_prep_adapter - prepare SW and HW for operation + * @adapter: the adapter + * @reset: if true perform a HW reset + * + * Initialize adapter SW state for the various HW modules, set initial + * values for some adapter tunables, take PHYs out of reset, and + * initialize the MDIO interface. + */ +int t4_prep_adapter(struct adapter *adapter) +{ + int ret, ver; + uint16_t device_id; + u32 pl_rev; + + get_pci_mode(adapter, &adapter->params.pci); + pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A)); + + ret = get_flash_params(adapter); + if (ret < 0) { + dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret); + return ret; + } + + /* Retrieve adapter's device ID + */ + pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id); + ver = device_id >> 12; + adapter->params.chip = 0; + switch (ver) { + case CHELSIO_T4: + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev); + break; + case CHELSIO_T5: + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); + break; + default: + dev_err(adapter->pdev_dev, "Device %d is not supported\n", + device_id); + return -EINVAL; + } + + adapter->params.cim_la_size = CIMLA_SIZE; + init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); + + /* + * Default port for debugging in case we can't reach FW. + */ + adapter->params.nports = 1; + adapter->params.portvec = 1; + adapter->params.vpd.cclk = 50000; + return 0; +} + +/** + * cxgb4_t4_bar2_sge_qregs - return BAR2 SGE Queue register information + * @adapter: the adapter + * @qid: the Queue ID + * @qtype: the Ingress or Egress type for @qid + * @pbar2_qoffset: BAR2 Queue Offset + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues + * + * Returns the BAR2 SGE Queue Registers information associated with the + * indicated Absolute Queue ID. These are passed back in return value + * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue + * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. + * + * This may return an error which indicates that BAR2 SGE Queue + * registers aren't available. If an error is not returned, then the + * following values are returned: + * + * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers + * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid + * + * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which + * require the "Inferred Queue ID" ability may be used. E.g. the + * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, + * then these "Inferred Queue ID" register may not be used. + */ +int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid) +{ + unsigned int page_shift, page_size, qpp_shift, qpp_mask; + u64 bar2_page_offset, bar2_qoffset; + unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; + + /* T4 doesn't support BAR2 SGE Queue registers. + */ + if (is_t4(adapter->params.chip)) + return -EINVAL; + + /* Get our SGE Page Size parameters. + */ + page_shift = adapter->params.sge.hps + 10; + page_size = 1 << page_shift; + + /* Get the right Queues per Page parameters for our Queue. + */ + qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS + ? adapter->params.sge.eq_qpp + : adapter->params.sge.iq_qpp); + qpp_mask = (1 << qpp_shift) - 1; + + /* Calculate the basics of the BAR2 SGE Queue register area: + * o The BAR2 page the Queue registers will be in. + * o The BAR2 Queue ID. + * o The BAR2 Queue ID Offset into the BAR2 page. + */ + bar2_page_offset = ((qid >> qpp_shift) << page_shift); + bar2_qid = qid & qpp_mask; + bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; + + /* If the BAR2 Queue ID Offset is less than the Page Size, then the + * hardware will infer the Absolute Queue ID simply from the writes to + * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a + * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply + * write to the first BAR2 SGE Queue Area within the BAR2 Page with + * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID + * from the BAR2 Page and BAR2 Queue ID. + * + * One important censequence of this is that some BAR2 SGE registers + * have a "Queue ID" field and we can write the BAR2 SGE Queue ID + * there. But other registers synthesize the SGE Queue ID purely + * from the writes to the registers -- the Write Combined Doorbell + * Buffer is a good example. These BAR2 SGE Registers are only + * available for those BAR2 SGE Register areas where the SGE Absolute + * Queue ID can be inferred from simple writes. + */ + bar2_qoffset = bar2_page_offset; + bar2_qinferred = (bar2_qid_offset < page_size); + if (bar2_qinferred) { + bar2_qoffset += bar2_qid_offset; + bar2_qid = 0; + } + + *pbar2_qoffset = bar2_qoffset; + *pbar2_qid = bar2_qid; + return 0; +} + +/** + * t4_init_devlog_params - initialize adapter->params.devlog + * @adap: the adapter + * + * Initialize various fields of the adapter's Firmware Device Log + * Parameters structure. + */ +int t4_init_devlog_params(struct adapter *adap) +{ + struct devlog_params *dparams = &adap->params.devlog; + u32 pf_dparams; + unsigned int devlog_meminfo; + struct fw_devlog_cmd devlog_cmd; + int ret; + + /* If we're dealing with newer firmware, the Device Log Paramerters + * are stored in a designated register which allows us to access the + * Device Log even if we can't talk to the firmware. + */ + pf_dparams = + t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG)); + if (pf_dparams) { + unsigned int nentries, nentries128; + + dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams); + dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4; + + nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams); + nentries = (nentries128 + 1) * 128; + dparams->size = nentries * sizeof(struct fw_devlog_e); + + return 0; + } + + /* Otherwise, ask the firmware for it's Device Log Parameters. + */ + memset(&devlog_cmd, 0, sizeof(devlog_cmd)); + devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F); + devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd)); + ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd), + &devlog_cmd); + if (ret) + return ret; + + devlog_meminfo = ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog); + dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo); + dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4; + dparams->size = ntohl(devlog_cmd.memsize_devlog); + + return 0; +} + +/** + * t4_init_sge_params - initialize adap->params.sge + * @adapter: the adapter + * + * Initialize various fields of the adapter's SGE Parameters structure. + */ +int t4_init_sge_params(struct adapter *adapter) +{ + struct sge_params *sge_params = &adapter->params.sge; + u32 hps, qpp; + unsigned int s_hps, s_qpp; + + /* Extract the SGE Page Size for our PF. + */ + hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A); + s_hps = (HOSTPAGESIZEPF0_S + + (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn); + sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M); + + /* Extract the SGE Egress and Ingess Queues Per Page for our PF. + */ + s_qpp = (QUEUESPERPAGEPF0_S + + (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn); + qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A); + sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M); + qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A); + sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M); + + return 0; +} + +/** + * t4_init_tp_params - initialize adap->params.tp + * @adap: the adapter + * + * Initialize various fields of the adapter's TP Parameters structure. + */ +int t4_init_tp_params(struct adapter *adap) +{ + int chan; + u32 v; + + v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A); + adap->params.tp.tre = TIMERRESOLUTION_G(v); + adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v); + + /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ + for (chan = 0; chan < NCHAN; chan++) + adap->params.tp.tx_modq[chan] = chan; + + /* Cache the adapter's Compressed Filter Mode and global Incress + * Configuration. + */ + t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, + &adap->params.tp.vlan_pri_map, 1, + TP_VLAN_PRI_MAP_A); + t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, + &adap->params.tp.ingress_config, 1, + TP_INGRESS_CONFIG_A); + + /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field + * shift positions of several elements of the Compressed Filter Tuple + * for this adapter which we need frequently ... + */ + adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F); + adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F); + adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F); + adap->params.tp.protocol_shift = t4_filter_field_shift(adap, + PROTOCOL_F); + + /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID + * represents the presence of an Outer VLAN instead of a VNIC ID. + */ + if ((adap->params.tp.ingress_config & VNIC_F) == 0) + adap->params.tp.vnic_shift = -1; + + return 0; +} + +/** + * t4_filter_field_shift - calculate filter field shift + * @adap: the adapter + * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) + * + * Return the shift position of a filter field within the Compressed + * Filter Tuple. The filter field is specified via its selection bit + * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. + */ +int t4_filter_field_shift(const struct adapter *adap, int filter_sel) +{ + unsigned int filter_mode = adap->params.tp.vlan_pri_map; + unsigned int sel; + int field_shift; + + if ((filter_mode & filter_sel) == 0) + return -1; + + for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { + switch (filter_mode & sel) { + case FCOE_F: + field_shift += FT_FCOE_W; + break; + case PORT_F: + field_shift += FT_PORT_W; + break; + case VNIC_ID_F: + field_shift += FT_VNIC_ID_W; + break; + case VLAN_F: + field_shift += FT_VLAN_W; + break; + case TOS_F: + field_shift += FT_TOS_W; + break; + case PROTOCOL_F: + field_shift += FT_PROTOCOL_W; + break; + case ETHERTYPE_F: + field_shift += FT_ETHERTYPE_W; + break; + case MACMATCH_F: + field_shift += FT_MACMATCH_W; + break; + case MPSHITTYPE_F: + field_shift += FT_MPSHITTYPE_W; + break; + case FRAGMENTATION_F: + field_shift += FT_FRAGMENTATION_W; + break; + } + } + return field_shift; +} + +int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) +{ + u8 addr[6]; + int ret, i, j = 0; + struct fw_port_cmd c; + struct fw_rss_vi_config_cmd rvc; + + memset(&c, 0, sizeof(c)); + memset(&rvc, 0, sizeof(rvc)); + + for_each_port(adap, i) { + unsigned int rss_size; + struct port_info *p = adap2pinfo(adap, i); + + while ((adap->params.portvec & (1 << j)) == 0) + j++; + + c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | + FW_PORT_CMD_PORTID_V(j)); + c.action_to_len16 = htonl( + FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | + FW_LEN16(c)); + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret) + return ret; + + ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); + if (ret < 0) + return ret; + + p->viid = ret; + p->tx_chan = j; + p->lport = j; + p->rss_size = rss_size; + memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); + adap->port[i]->dev_port = j; + + ret = ntohl(c.u.info.lstatus_to_modtype); + p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ? + FW_PORT_CMD_MDIOADDR_G(ret) : -1; + p->port_type = FW_PORT_CMD_PTYPE_G(ret); + p->mod_type = FW_PORT_MOD_TYPE_NA; + + rvc.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | + FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); + rvc.retval_len16 = htonl(FW_LEN16(rvc)); + ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); + if (ret) + return ret; + p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen); + + init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); + j++; + } + return 0; +} + +/** + * t4_read_cimq_cfg - read CIM queue configuration + * @adap: the adapter + * @base: holds the queue base addresses in bytes + * @size: holds the queue sizes in bytes + * @thres: holds the queue full thresholds in bytes + * + * Returns the current configuration of the CIM queues, starting with + * the IBQs, then the OBQs. + */ +void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres) +{ + unsigned int i, v; + int cim_num_obq = is_t4(adap->params.chip) ? + CIM_NUM_OBQ : CIM_NUM_OBQ_T5; + + for (i = 0; i < CIM_NUM_IBQ; i++) { + t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F | + QUENUMSELECT_V(i)); + v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A); + /* value is in 256-byte units */ + *base++ = CIMQBASE_G(v) * 256; + *size++ = CIMQSIZE_G(v) * 256; + *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */ + } + for (i = 0; i < cim_num_obq; i++) { + t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F | + QUENUMSELECT_V(i)); + v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A); + /* value is in 256-byte units */ + *base++ = CIMQBASE_G(v) * 256; + *size++ = CIMQSIZE_G(v) * 256; + } +} + +/** + * t4_read_cim_ibq - read the contents of a CIM inbound queue + * @adap: the adapter + * @qid: the queue index + * @data: where to store the queue contents + * @n: capacity of @data in 32-bit words + * + * Reads the contents of the selected CIM queue starting at address 0 up + * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on + * error and the number of 32-bit words actually read on success. + */ +int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) +{ + int i, err, attempts; + unsigned int addr; + const unsigned int nwords = CIM_IBQ_SIZE * 4; + + if (qid > 5 || (n & 3)) + return -EINVAL; + + addr = qid * nwords; + if (n > nwords) + n = nwords; + + /* It might take 3-10ms before the IBQ debug read access is allowed. + * Wait for 1 Sec with a delay of 1 usec. + */ + attempts = 1000000; + + for (i = 0; i < n; i++, addr++) { + t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) | + IBQDBGEN_F); + err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0, + attempts, 1); + if (err) + return err; + *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A); + } + t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0); + return i; +} + +/** + * t4_read_cim_obq - read the contents of a CIM outbound queue + * @adap: the adapter + * @qid: the queue index + * @data: where to store the queue contents + * @n: capacity of @data in 32-bit words + * + * Reads the contents of the selected CIM queue starting at address 0 up + * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on + * error and the number of 32-bit words actually read on success. + */ +int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n) +{ + int i, err; + unsigned int addr, v, nwords; + int cim_num_obq = is_t4(adap->params.chip) ? + CIM_NUM_OBQ : CIM_NUM_OBQ_T5; + + if ((qid > (cim_num_obq - 1)) || (n & 3)) + return -EINVAL; + + t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F | + QUENUMSELECT_V(qid)); + v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A); + + addr = CIMQBASE_G(v) * 64; /* muliple of 256 -> muliple of 4 */ + nwords = CIMQSIZE_G(v) * 64; /* same */ + if (n > nwords) + n = nwords; + + for (i = 0; i < n; i++, addr++) { + t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) | + OBQDBGEN_F); + err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0, + 2, 1); + if (err) + return err; + *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A); + } + t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0); + return i; +} + +/** + * t4_cim_read - read a block from CIM internal address space + * @adap: the adapter + * @addr: the start address within the CIM address space + * @n: number of words to read + * @valp: where to store the result + * + * Reads a block of 4-byte words from the CIM intenal address space. + */ +int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, + unsigned int *valp) +{ + int ret = 0; + + if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F) + return -EBUSY; + + for ( ; !ret && n--; addr += 4) { + t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr); + ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F, + 0, 5, 2); + if (!ret) + *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A); + } + return ret; +} + +/** + * t4_cim_write - write a block into CIM internal address space + * @adap: the adapter + * @addr: the start address within the CIM address space + * @n: number of words to write + * @valp: set of values to write + * + * Writes a block of 4-byte words into the CIM intenal address space. + */ +int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, + const unsigned int *valp) +{ + int ret = 0; + + if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F) + return -EBUSY; + + for ( ; !ret && n--; addr += 4) { + t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++); + t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F); + ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F, + 0, 5, 2); + } + return ret; +} + +static int t4_cim_write1(struct adapter *adap, unsigned int addr, + unsigned int val) +{ + return t4_cim_write(adap, addr, 1, &val); +} + +/** + * t4_cim_read_la - read CIM LA capture buffer + * @adap: the adapter + * @la_buf: where to store the LA data + * @wrptr: the HW write pointer within the capture buffer + * + * Reads the contents of the CIM LA buffer with the most recent entry at + * the end of the returned data and with the entry at @wrptr first. + * We try to leave the LA in the running state we find it in. + */ +int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr) +{ + int i, ret; + unsigned int cfg, val, idx; + + ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg); + if (ret) + return ret; + + if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */ + ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0); + if (ret) + return ret; + } + + ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val); + if (ret) + goto restart; + + idx = UPDBGLAWRPTR_G(val); + if (wrptr) + *wrptr = idx; + + for (i = 0; i < adap->params.cim_la_size; i++) { + ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, + UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F); + if (ret) + break; + ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val); + if (ret) + break; + if (val & UPDBGLARDEN_F) { + ret = -ETIMEDOUT; + break; + } + ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]); + if (ret) + break; + idx = (idx + 1) & UPDBGLARDPTR_M; + } +restart: + if (cfg & UPDBGLAEN_F) { + int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, + cfg & ~UPDBGLARDEN_F); + if (!ret) + ret = r; + } + return ret; +} + +/** + * t4_tp_read_la - read TP LA capture buffer + * @adap: the adapter + * @la_buf: where to store the LA data + * @wrptr: the HW write pointer within the capture buffer + * + * Reads the contents of the TP LA buffer with the most recent entry at + * the end of the returned data and with the entry at @wrptr first. + * We leave the LA in the running state we find it in. + */ +void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr) +{ + bool last_incomplete; + unsigned int i, cfg, val, idx; + + cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff; + if (cfg & DBGLAENABLE_F) /* freeze LA */ + t4_write_reg(adap, TP_DBG_LA_CONFIG_A, + adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F)); + + val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A); + idx = DBGLAWPTR_G(val); + last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0; + if (last_incomplete) + idx = (idx + 1) & DBGLARPTR_M; + if (wrptr) + *wrptr = idx; + + val &= 0xffff; + val &= ~DBGLARPTR_V(DBGLARPTR_M); + val |= adap->params.tp.la_mask; + + for (i = 0; i < TPLA_SIZE; i++) { + t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val); + la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A); + idx = (idx + 1) & DBGLARPTR_M; + } + + /* Wipe out last entry if it isn't valid */ + if (last_incomplete) + la_buf[TPLA_SIZE - 1] = ~0ULL; + + if (cfg & DBGLAENABLE_F) /* restore running state */ + t4_write_reg(adap, TP_DBG_LA_CONFIG_A, + cfg | adap->params.tp.la_mask); +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h new file mode 100644 index 000000000..380b15c04 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -0,0 +1,251 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4_HW_H +#define __T4_HW_H + +#include + +enum { + NCHAN = 4, /* # of HW channels */ + MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */ + EEPROMSIZE = 17408, /* Serial EEPROM physical size */ + EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */ + EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */ + RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */ + TCB_SIZE = 128, /* TCB size */ + NMTUS = 16, /* size of MTU table */ + NCCTRL_WIN = 32, /* # of congestion control windows */ + L2T_SIZE = 4096, /* # of L2T entries */ + PM_NSTATS = 5, /* # of PM stats */ + MBOX_LEN = 64, /* mailbox size in bytes */ + TRACE_LEN = 112, /* length of trace data and mask */ + FILTER_OPT_LEN = 36, /* filter tuple width for optional components */ + NWOL_PAT = 8, /* # of WoL patterns */ + WOL_PAT_LEN = 128, /* length of WoL patterns */ +}; + +enum { + CIM_NUM_IBQ = 6, /* # of CIM IBQs */ + CIM_NUM_OBQ = 6, /* # of CIM OBQs */ + CIM_NUM_OBQ_T5 = 8, /* # of CIM OBQs for T5 adapter */ + CIMLA_SIZE = 2048, /* # of 32-bit words in CIM LA */ + CIM_IBQ_SIZE = 128, /* # of 128-bit words in a CIM IBQ */ + CIM_OBQ_SIZE = 128, /* # of 128-bit words in a CIM OBQ */ + TPLA_SIZE = 128, /* # of 64-bit words in TP LA */ + ULPRX_LA_SIZE = 512, /* # of 256-bit words in ULP_RX LA */ +}; + +enum { + SF_PAGE_SIZE = 256, /* serial flash page size */ + SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ +}; + +enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */ + +enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */ + +enum { + SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ + SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ + SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ + SGE_MAX_IQ_SIZE = 65520, + + SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */ + SGE_TIMER_UPD_CIDX = 7, /* update cidx only */ + + SGE_EQ_IDXSIZE = 64, /* egress queue pidx/cidx unit size */ + + SGE_INTRDST_PCI = 0, /* interrupt destination is PCI-E */ + SGE_INTRDST_IQ = 1, /* destination is an ingress queue */ + + SGE_UPDATEDEL_NONE = 0, /* ingress queue pidx update delivery */ + SGE_UPDATEDEL_INTR = 1, /* interrupt */ + SGE_UPDATEDEL_STPG = 2, /* status page */ + SGE_UPDATEDEL_BOTH = 3, /* interrupt and status page */ + + SGE_HOSTFCMODE_NONE = 0, /* egress queue cidx updates */ + SGE_HOSTFCMODE_IQ = 1, /* sent to ingress queue */ + SGE_HOSTFCMODE_STPG = 2, /* sent to status page */ + SGE_HOSTFCMODE_BOTH = 3, /* ingress queue and status page */ + + SGE_FETCHBURSTMIN_16B = 0,/* egress queue descriptor fetch minimum */ + SGE_FETCHBURSTMIN_32B = 1, + SGE_FETCHBURSTMIN_64B = 2, + SGE_FETCHBURSTMIN_128B = 3, + + SGE_FETCHBURSTMAX_64B = 0,/* egress queue descriptor fetch maximum */ + SGE_FETCHBURSTMAX_128B = 1, + SGE_FETCHBURSTMAX_256B = 2, + SGE_FETCHBURSTMAX_512B = 3, + + SGE_CIDXFLUSHTHRESH_1 = 0,/* egress queue cidx flush threshold */ + SGE_CIDXFLUSHTHRESH_2 = 1, + SGE_CIDXFLUSHTHRESH_4 = 2, + SGE_CIDXFLUSHTHRESH_8 = 3, + SGE_CIDXFLUSHTHRESH_16 = 4, + SGE_CIDXFLUSHTHRESH_32 = 5, + SGE_CIDXFLUSHTHRESH_64 = 6, + SGE_CIDXFLUSHTHRESH_128 = 7, + + SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */ +}; + +/* PCI-e memory window access */ +enum pcie_memwin { + MEMWIN_NIC = 0, + MEMWIN_RSVD1 = 1, + MEMWIN_RSVD2 = 2, + MEMWIN_RDMA = 3, + MEMWIN_RSVD4 = 4, + MEMWIN_FOISCSI = 5, + MEMWIN_CSIOSTOR = 6, + MEMWIN_RSVD7 = 7, +}; + +struct sge_qstat { /* data written to SGE queue status entries */ + __be32 qid; + __be16 cidx; + __be16 pidx; +}; + +/* + * Structure for last 128 bits of response descriptors + */ +struct rsp_ctrl { + __be32 hdrbuflen_pidx; + __be32 pldbuflen_qid; + union { + u8 type_gen; + __be64 last_flit; + }; +}; + +#define RSPD_NEWBUF 0x80000000U +#define RSPD_LEN(x) (((x) >> 0) & 0x7fffffffU) +#define RSPD_QID(x) RSPD_LEN(x) + +#define RSPD_GEN(x) ((x) >> 7) +#define RSPD_TYPE(x) (((x) >> 4) & 3) + +#define V_QINTR_CNT_EN 0x0 +#define QINTR_CNT_EN 0x1 +#define QINTR_TIMER_IDX(x) ((x) << 1) +#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7) + +/* + * Flash layout. + */ +#define FLASH_START(start) ((start) * SF_SEC_SIZE) +#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE) + +enum { + /* + * Various Expansion-ROM boot images, etc. + */ + FLASH_EXP_ROM_START_SEC = 0, + FLASH_EXP_ROM_NSECS = 6, + FLASH_EXP_ROM_START = FLASH_START(FLASH_EXP_ROM_START_SEC), + FLASH_EXP_ROM_MAX_SIZE = FLASH_MAX_SIZE(FLASH_EXP_ROM_NSECS), + + /* + * iSCSI Boot Firmware Table (iBFT) and other driver-related + * parameters ... + */ + FLASH_IBFT_START_SEC = 6, + FLASH_IBFT_NSECS = 1, + FLASH_IBFT_START = FLASH_START(FLASH_IBFT_START_SEC), + FLASH_IBFT_MAX_SIZE = FLASH_MAX_SIZE(FLASH_IBFT_NSECS), + + /* + * Boot configuration data. + */ + FLASH_BOOTCFG_START_SEC = 7, + FLASH_BOOTCFG_NSECS = 1, + FLASH_BOOTCFG_START = FLASH_START(FLASH_BOOTCFG_START_SEC), + FLASH_BOOTCFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_BOOTCFG_NSECS), + + /* + * Location of firmware image in FLASH. + */ + FLASH_FW_START_SEC = 8, + FLASH_FW_NSECS = 16, + FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC), + FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS), + + /* + * iSCSI persistent/crash information. + */ + FLASH_ISCSI_CRASH_START_SEC = 29, + FLASH_ISCSI_CRASH_NSECS = 1, + FLASH_ISCSI_CRASH_START = FLASH_START(FLASH_ISCSI_CRASH_START_SEC), + FLASH_ISCSI_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_ISCSI_CRASH_NSECS), + + /* + * FCoE persistent/crash information. + */ + FLASH_FCOE_CRASH_START_SEC = 30, + FLASH_FCOE_CRASH_NSECS = 1, + FLASH_FCOE_CRASH_START = FLASH_START(FLASH_FCOE_CRASH_START_SEC), + FLASH_FCOE_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FCOE_CRASH_NSECS), + + /* + * Location of Firmware Configuration File in FLASH. Since the FPGA + * "FLASH" is smaller we need to store the Configuration File in a + * different location -- which will overlap the end of the firmware + * image if firmware ever gets that large ... + */ + FLASH_CFG_START_SEC = 31, + FLASH_CFG_NSECS = 1, + FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC), + FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS), + + /* We don't support FLASH devices which can't support the full + * standard set of sections which we need for normal + * operations. + */ + FLASH_MIN_SIZE = FLASH_CFG_START + FLASH_CFG_MAX_SIZE, + + FLASH_FPGA_CFG_START_SEC = 15, + FLASH_FPGA_CFG_START = FLASH_START(FLASH_FPGA_CFG_START_SEC), + + /* + * Sectors 32-63 are reserved for FLASH failover. + */ +}; + +#undef FLASH_START +#undef FLASH_MAX_SIZE + +#endif /* __T4_HW_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h new file mode 100644 index 000000000..30a2f56e9 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -0,0 +1,1097 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4_MSG_H +#define __T4_MSG_H + +#include + +enum { + CPL_PASS_OPEN_REQ = 0x1, + CPL_PASS_ACCEPT_RPL = 0x2, + CPL_ACT_OPEN_REQ = 0x3, + CPL_SET_TCB_FIELD = 0x5, + CPL_GET_TCB = 0x6, + CPL_CLOSE_CON_REQ = 0x8, + CPL_CLOSE_LISTSRV_REQ = 0x9, + CPL_ABORT_REQ = 0xA, + CPL_ABORT_RPL = 0xB, + CPL_RX_DATA_ACK = 0xD, + CPL_TX_PKT = 0xE, + CPL_L2T_WRITE_REQ = 0x12, + CPL_TID_RELEASE = 0x1A, + + CPL_CLOSE_LISTSRV_RPL = 0x20, + CPL_L2T_WRITE_RPL = 0x23, + CPL_PASS_OPEN_RPL = 0x24, + CPL_ACT_OPEN_RPL = 0x25, + CPL_PEER_CLOSE = 0x26, + CPL_ABORT_REQ_RSS = 0x2B, + CPL_ABORT_RPL_RSS = 0x2D, + + CPL_CLOSE_CON_RPL = 0x32, + CPL_ISCSI_HDR = 0x33, + CPL_RDMA_CQE = 0x35, + CPL_RDMA_CQE_READ_RSP = 0x36, + CPL_RDMA_CQE_ERR = 0x37, + CPL_RX_DATA = 0x39, + CPL_SET_TCB_RPL = 0x3A, + CPL_RX_PKT = 0x3B, + CPL_RX_DDP_COMPLETE = 0x3F, + + CPL_ACT_ESTABLISH = 0x40, + CPL_PASS_ESTABLISH = 0x41, + CPL_RX_DATA_DDP = 0x42, + CPL_PASS_ACCEPT_REQ = 0x44, + CPL_TRACE_PKT_T5 = 0x48, + CPL_RX_ISCSI_DDP = 0x49, + + CPL_RDMA_READ_REQ = 0x60, + + CPL_PASS_OPEN_REQ6 = 0x81, + CPL_ACT_OPEN_REQ6 = 0x83, + + CPL_RDMA_TERMINATE = 0xA2, + CPL_RDMA_WRITE = 0xA4, + CPL_SGE_EGR_UPDATE = 0xA5, + + CPL_TRACE_PKT = 0xB0, + CPL_ISCSI_DATA = 0xB2, + + CPL_FW4_MSG = 0xC0, + CPL_FW4_PLD = 0xC1, + CPL_FW4_ACK = 0xC3, + + CPL_FW6_MSG = 0xE0, + CPL_FW6_PLD = 0xE1, + CPL_TX_PKT_LSO = 0xED, + CPL_TX_PKT_XT = 0xEE, + + NUM_CPL_CMDS +}; + +enum CPL_error { + CPL_ERR_NONE = 0, + CPL_ERR_TCAM_FULL = 3, + CPL_ERR_BAD_LENGTH = 15, + CPL_ERR_BAD_ROUTE = 18, + CPL_ERR_CONN_RESET = 20, + CPL_ERR_CONN_EXIST_SYNRECV = 21, + CPL_ERR_CONN_EXIST = 22, + CPL_ERR_ARP_MISS = 23, + CPL_ERR_BAD_SYN = 24, + CPL_ERR_CONN_TIMEDOUT = 30, + CPL_ERR_XMIT_TIMEDOUT = 31, + CPL_ERR_PERSIST_TIMEDOUT = 32, + CPL_ERR_FINWAIT2_TIMEDOUT = 33, + CPL_ERR_KEEPALIVE_TIMEDOUT = 34, + CPL_ERR_RTX_NEG_ADVICE = 35, + CPL_ERR_PERSIST_NEG_ADVICE = 36, + CPL_ERR_KEEPALV_NEG_ADVICE = 37, + CPL_ERR_ABORT_FAILED = 42, + CPL_ERR_IWARP_FLM = 50, +}; + +enum { + CPL_CONN_POLICY_AUTO = 0, + CPL_CONN_POLICY_ASK = 1, + CPL_CONN_POLICY_FILTER = 2, + CPL_CONN_POLICY_DENY = 3 +}; + +enum { + ULP_MODE_NONE = 0, + ULP_MODE_ISCSI = 2, + ULP_MODE_RDMA = 4, + ULP_MODE_TCPDDP = 5, + ULP_MODE_FCOE = 6, +}; + +enum { + ULP_CRC_HEADER = 1 << 0, + ULP_CRC_DATA = 1 << 1 +}; + +enum { + CPL_ABORT_SEND_RST = 0, + CPL_ABORT_NO_RST, +}; + +enum { /* TX_PKT_XT checksum types */ + TX_CSUM_TCP = 0, + TX_CSUM_UDP = 1, + TX_CSUM_CRC16 = 4, + TX_CSUM_CRC32 = 5, + TX_CSUM_CRC32C = 6, + TX_CSUM_FCOE = 7, + TX_CSUM_TCPIP = 8, + TX_CSUM_UDPIP = 9, + TX_CSUM_TCPIP6 = 10, + TX_CSUM_UDPIP6 = 11, + TX_CSUM_IP = 12, +}; + +union opcode_tid { + __be32 opcode_tid; + u8 opcode; +}; + +#define CPL_OPCODE_S 24 +#define CPL_OPCODE_V(x) ((x) << CPL_OPCODE_S) +#define CPL_OPCODE_G(x) (((x) >> CPL_OPCODE_S) & 0xFF) +#define TID_G(x) ((x) & 0xFFFFFF) + +/* tid is assumed to be 24-bits */ +#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE_V(opcode) | (tid)) + +#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) + +/* extract the TID from a CPL command */ +#define GET_TID(cmd) (TID_G(be32_to_cpu(OPCODE_TID(cmd)))) + +/* partitioning of TID fields that also carry a queue id */ +#define TID_TID_S 0 +#define TID_TID_M 0x3fff +#define TID_TID_G(x) (((x) >> TID_TID_S) & TID_TID_M) + +#define TID_QID_S 14 +#define TID_QID_M 0x3ff +#define TID_QID_V(x) ((x) << TID_QID_S) +#define TID_QID_G(x) (((x) >> TID_QID_S) & TID_QID_M) + +struct rss_header { + u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 channel:2; + u8 filter_hit:1; + u8 filter_tid:1; + u8 hash_type:2; + u8 ipv6:1; + u8 send2fw:1; +#else + u8 send2fw:1; + u8 ipv6:1; + u8 hash_type:2; + u8 filter_tid:1; + u8 filter_hit:1; + u8 channel:2; +#endif + __be16 qid; + __be32 hash_val; +}; + +struct work_request_hdr { + __be32 wr_hi; + __be32 wr_mid; + __be64 wr_lo; +}; + +/* wr_hi fields */ +#define WR_OP_S 24 +#define WR_OP_V(x) ((__u64)(x) << WR_OP_S) + +#define WR_HDR struct work_request_hdr wr + +/* option 0 fields */ +#define TX_CHAN_S 2 +#define TX_CHAN_V(x) ((x) << TX_CHAN_S) + +#define ULP_MODE_S 8 +#define ULP_MODE_V(x) ((x) << ULP_MODE_S) + +#define RCV_BUFSIZ_S 12 +#define RCV_BUFSIZ_M 0x3FFU +#define RCV_BUFSIZ_V(x) ((x) << RCV_BUFSIZ_S) + +#define SMAC_SEL_S 28 +#define SMAC_SEL_V(x) ((__u64)(x) << SMAC_SEL_S) + +#define L2T_IDX_S 36 +#define L2T_IDX_V(x) ((__u64)(x) << L2T_IDX_S) + +#define WND_SCALE_S 50 +#define WND_SCALE_V(x) ((__u64)(x) << WND_SCALE_S) + +#define KEEP_ALIVE_S 54 +#define KEEP_ALIVE_V(x) ((__u64)(x) << KEEP_ALIVE_S) +#define KEEP_ALIVE_F KEEP_ALIVE_V(1ULL) + +#define MSS_IDX_S 60 +#define MSS_IDX_M 0xF +#define MSS_IDX_V(x) ((__u64)(x) << MSS_IDX_S) +#define MSS_IDX_G(x) (((x) >> MSS_IDX_S) & MSS_IDX_M) + +/* option 2 fields */ +#define RSS_QUEUE_S 0 +#define RSS_QUEUE_M 0x3FF +#define RSS_QUEUE_V(x) ((x) << RSS_QUEUE_S) +#define RSS_QUEUE_G(x) (((x) >> RSS_QUEUE_S) & RSS_QUEUE_M) + +#define RSS_QUEUE_VALID_S 10 +#define RSS_QUEUE_VALID_V(x) ((x) << RSS_QUEUE_VALID_S) +#define RSS_QUEUE_VALID_F RSS_QUEUE_VALID_V(1U) + +#define RX_FC_DISABLE_S 20 +#define RX_FC_DISABLE_V(x) ((x) << RX_FC_DISABLE_S) +#define RX_FC_DISABLE_F RX_FC_DISABLE_V(1U) + +#define RX_FC_VALID_S 22 +#define RX_FC_VALID_V(x) ((x) << RX_FC_VALID_S) +#define RX_FC_VALID_F RX_FC_VALID_V(1U) + +#define RX_CHANNEL_S 26 +#define RX_CHANNEL_V(x) ((x) << RX_CHANNEL_S) + +#define WND_SCALE_EN_S 28 +#define WND_SCALE_EN_V(x) ((x) << WND_SCALE_EN_S) +#define WND_SCALE_EN_F WND_SCALE_EN_V(1U) + +#define T5_OPT_2_VALID_S 31 +#define T5_OPT_2_VALID_V(x) ((x) << T5_OPT_2_VALID_S) +#define T5_OPT_2_VALID_F T5_OPT_2_VALID_V(1U) + +struct cpl_pass_open_req { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be64 opt0; + __be64 opt1; +}; + +/* option 0 fields */ +#define NO_CONG_S 4 +#define NO_CONG_V(x) ((x) << NO_CONG_S) +#define NO_CONG_F NO_CONG_V(1U) + +#define DELACK_S 5 +#define DELACK_V(x) ((x) << DELACK_S) +#define DELACK_F DELACK_V(1U) + +#define DSCP_S 22 +#define DSCP_M 0x3F +#define DSCP_V(x) ((x) << DSCP_S) +#define DSCP_G(x) (((x) >> DSCP_S) & DSCP_M) + +#define TCAM_BYPASS_S 48 +#define TCAM_BYPASS_V(x) ((__u64)(x) << TCAM_BYPASS_S) +#define TCAM_BYPASS_F TCAM_BYPASS_V(1ULL) + +#define NAGLE_S 49 +#define NAGLE_V(x) ((__u64)(x) << NAGLE_S) +#define NAGLE_F NAGLE_V(1ULL) + +/* option 1 fields */ +#define SYN_RSS_ENABLE_S 0 +#define SYN_RSS_ENABLE_V(x) ((x) << SYN_RSS_ENABLE_S) +#define SYN_RSS_ENABLE_F SYN_RSS_ENABLE_V(1U) + +#define SYN_RSS_QUEUE_S 2 +#define SYN_RSS_QUEUE_V(x) ((x) << SYN_RSS_QUEUE_S) + +#define CONN_POLICY_S 22 +#define CONN_POLICY_V(x) ((x) << CONN_POLICY_S) + +struct cpl_pass_open_req6 { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be64 local_ip_hi; + __be64 local_ip_lo; + __be64 peer_ip_hi; + __be64 peer_ip_lo; + __be64 opt0; + __be64 opt1; +}; + +struct cpl_pass_open_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_pass_accept_rpl { + WR_HDR; + union opcode_tid ot; + __be32 opt2; + __be64 opt0; +}; + +/* option 2 fields */ +#define RX_COALESCE_VALID_S 11 +#define RX_COALESCE_VALID_V(x) ((x) << RX_COALESCE_VALID_S) +#define RX_COALESCE_VALID_F RX_COALESCE_VALID_V(1U) + +#define RX_COALESCE_S 12 +#define RX_COALESCE_V(x) ((x) << RX_COALESCE_S) + +#define PACE_S 16 +#define PACE_V(x) ((x) << PACE_S) + +#define TX_QUEUE_S 23 +#define TX_QUEUE_M 0x7 +#define TX_QUEUE_V(x) ((x) << TX_QUEUE_S) +#define TX_QUEUE_G(x) (((x) >> TX_QUEUE_S) & TX_QUEUE_M) + +#define CCTRL_ECN_S 27 +#define CCTRL_ECN_V(x) ((x) << CCTRL_ECN_S) +#define CCTRL_ECN_F CCTRL_ECN_V(1U) + +#define TSTAMPS_EN_S 29 +#define TSTAMPS_EN_V(x) ((x) << TSTAMPS_EN_S) +#define TSTAMPS_EN_F TSTAMPS_EN_V(1U) + +#define SACK_EN_S 30 +#define SACK_EN_V(x) ((x) << SACK_EN_S) +#define SACK_EN_F SACK_EN_V(1U) + +struct cpl_t5_pass_accept_rpl { + WR_HDR; + union opcode_tid ot; + __be32 opt2; + __be64 opt0; + __be32 iss; + __be32 rsvd; +}; + +struct cpl_act_open_req { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be64 opt0; + __be32 params; + __be32 opt2; +}; + +#define FILTER_TUPLE_S 24 +#define FILTER_TUPLE_M 0xFFFFFFFFFF +#define FILTER_TUPLE_V(x) ((x) << FILTER_TUPLE_S) +#define FILTER_TUPLE_G(x) (((x) >> FILTER_TUPLE_S) & FILTER_TUPLE_M) +struct cpl_t5_act_open_req { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be64 opt0; + __be32 rsvd; + __be32 opt2; + __be64 params; +}; + +struct cpl_act_open_req6 { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be64 local_ip_hi; + __be64 local_ip_lo; + __be64 peer_ip_hi; + __be64 peer_ip_lo; + __be64 opt0; + __be32 params; + __be32 opt2; +}; + +struct cpl_t5_act_open_req6 { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be64 local_ip_hi; + __be64 local_ip_lo; + __be64 peer_ip_hi; + __be64 peer_ip_lo; + __be64 opt0; + __be32 rsvd; + __be32 opt2; + __be64 params; +}; + +struct cpl_act_open_rpl { + union opcode_tid ot; + __be32 atid_status; +}; + +/* cpl_act_open_rpl.atid_status fields */ +#define AOPEN_STATUS_S 0 +#define AOPEN_STATUS_M 0xFF +#define AOPEN_STATUS_G(x) (((x) >> AOPEN_STATUS_S) & AOPEN_STATUS_M) + +#define AOPEN_ATID_S 8 +#define AOPEN_ATID_M 0xFFFFFF +#define AOPEN_ATID_G(x) (((x) >> AOPEN_ATID_S) & AOPEN_ATID_M) + +struct cpl_pass_establish { + union opcode_tid ot; + __be32 rsvd; + __be32 tos_stid; + __be16 mac_idx; + __be16 tcp_opt; + __be32 snd_isn; + __be32 rcv_isn; +}; + +/* cpl_pass_establish.tos_stid fields */ +#define PASS_OPEN_TID_S 0 +#define PASS_OPEN_TID_M 0xFFFFFF +#define PASS_OPEN_TID_V(x) ((x) << PASS_OPEN_TID_S) +#define PASS_OPEN_TID_G(x) (((x) >> PASS_OPEN_TID_S) & PASS_OPEN_TID_M) + +#define PASS_OPEN_TOS_S 24 +#define PASS_OPEN_TOS_M 0xFF +#define PASS_OPEN_TOS_V(x) ((x) << PASS_OPEN_TOS_S) +#define PASS_OPEN_TOS_G(x) (((x) >> PASS_OPEN_TOS_S) & PASS_OPEN_TOS_M) + +/* cpl_pass_establish.tcp_opt fields (also applies to act_open_establish) */ +#define TCPOPT_WSCALE_OK_S 5 +#define TCPOPT_WSCALE_OK_M 0x1 +#define TCPOPT_WSCALE_OK_G(x) \ + (((x) >> TCPOPT_WSCALE_OK_S) & TCPOPT_WSCALE_OK_M) + +#define TCPOPT_SACK_S 6 +#define TCPOPT_SACK_M 0x1 +#define TCPOPT_SACK_G(x) (((x) >> TCPOPT_SACK_S) & TCPOPT_SACK_M) + +#define TCPOPT_TSTAMP_S 7 +#define TCPOPT_TSTAMP_M 0x1 +#define TCPOPT_TSTAMP_G(x) (((x) >> TCPOPT_TSTAMP_S) & TCPOPT_TSTAMP_M) + +#define TCPOPT_SND_WSCALE_S 8 +#define TCPOPT_SND_WSCALE_M 0xF +#define TCPOPT_SND_WSCALE_G(x) \ + (((x) >> TCPOPT_SND_WSCALE_S) & TCPOPT_SND_WSCALE_M) + +#define TCPOPT_MSS_S 12 +#define TCPOPT_MSS_M 0xF +#define TCPOPT_MSS_G(x) (((x) >> TCPOPT_MSS_S) & TCPOPT_MSS_M) + +struct cpl_act_establish { + union opcode_tid ot; + __be32 rsvd; + __be32 tos_atid; + __be16 mac_idx; + __be16 tcp_opt; + __be32 snd_isn; + __be32 rcv_isn; +}; + +struct cpl_get_tcb { + WR_HDR; + union opcode_tid ot; + __be16 reply_ctrl; + __be16 cookie; +}; + +/* cpl_get_tcb.reply_ctrl fields */ +#define QUEUENO_S 0 +#define QUEUENO_V(x) ((x) << QUEUENO_S) + +#define REPLY_CHAN_S 14 +#define REPLY_CHAN_V(x) ((x) << REPLY_CHAN_S) +#define REPLY_CHAN_F REPLY_CHAN_V(1U) + +#define NO_REPLY_S 15 +#define NO_REPLY_V(x) ((x) << NO_REPLY_S) +#define NO_REPLY_F NO_REPLY_V(1U) + +struct cpl_set_tcb_field { + WR_HDR; + union opcode_tid ot; + __be16 reply_ctrl; + __be16 word_cookie; + __be64 mask; + __be64 val; +}; + +/* cpl_set_tcb_field.word_cookie fields */ +#define TCB_WORD_S 0 +#define TCB_WORD(x) ((x) << TCB_WORD_S) + +#define TCB_COOKIE_S 5 +#define TCB_COOKIE_M 0x7 +#define TCB_COOKIE_V(x) ((x) << TCB_COOKIE_S) +#define TCB_COOKIE_G(x) (((x) >> TCB_COOKIE_S) & TCB_COOKIE_M) + +struct cpl_set_tcb_rpl { + union opcode_tid ot; + __be16 rsvd; + u8 cookie; + u8 status; + __be64 oldval; +}; + +struct cpl_close_con_req { + WR_HDR; + union opcode_tid ot; + __be32 rsvd; +}; + +struct cpl_close_con_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; + __be32 snd_nxt; + __be32 rcv_nxt; +}; + +struct cpl_close_listsvr_req { + WR_HDR; + union opcode_tid ot; + __be16 reply_ctrl; + __be16 rsvd; +}; + +/* additional cpl_close_listsvr_req.reply_ctrl field */ +#define LISTSVR_IPV6_S 14 +#define LISTSVR_IPV6_V(x) ((x) << LISTSVR_IPV6_S) +#define LISTSVR_IPV6_F LISTSVR_IPV6_V(1U) + +struct cpl_close_listsvr_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_abort_req_rss { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_abort_req { + WR_HDR; + union opcode_tid ot; + __be32 rsvd0; + u8 rsvd1; + u8 cmd; + u8 rsvd2[6]; +}; + +struct cpl_abort_rpl_rss { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_abort_rpl { + WR_HDR; + union opcode_tid ot; + __be32 rsvd0; + u8 rsvd1; + u8 cmd; + u8 rsvd2[6]; +}; + +struct cpl_peer_close { + union opcode_tid ot; + __be32 rcv_nxt; +}; + +struct cpl_tid_release { + WR_HDR; + union opcode_tid ot; + __be32 rsvd; +}; + +struct cpl_tx_pkt_core { + __be32 ctrl0; +#define TXPKT_VF(x) ((x) << 0) +#define TXPKT_PF(x) ((x) << 8) +#define TXPKT_VF_VLD (1 << 11) +#define TXPKT_OVLAN_IDX(x) ((x) << 12) +#define TXPKT_INTF(x) ((x) << 16) +#define TXPKT_INS_OVLAN (1 << 21) +#define TXPKT_OPCODE(x) ((x) << 24) + __be16 pack; + __be16 len; + __be64 ctrl1; +#define TXPKT_CSUM_END(x) ((x) << 12) +#define TXPKT_CSUM_START(x) ((x) << 20) +#define TXPKT_IPHDR_LEN(x) ((u64)(x) << 20) +#define TXPKT_CSUM_LOC(x) ((u64)(x) << 30) +#define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34) +#define TXPKT_CSUM_TYPE(x) ((u64)(x) << 40) +#define TXPKT_VLAN(x) ((u64)(x) << 44) +#define TXPKT_VLAN_VLD (1ULL << 60) +#define TXPKT_IPCSUM_DIS (1ULL << 62) +#define TXPKT_L4CSUM_DIS (1ULL << 63) +}; + +struct cpl_tx_pkt { + WR_HDR; + struct cpl_tx_pkt_core c; +}; + +#define cpl_tx_pkt_xt cpl_tx_pkt + +struct cpl_tx_pkt_lso_core { + __be32 lso_ctrl; +#define LSO_TCPHDR_LEN(x) ((x) << 0) +#define LSO_IPHDR_LEN(x) ((x) << 4) +#define LSO_ETHHDR_LEN(x) ((x) << 16) +#define LSO_IPV6(x) ((x) << 20) +#define LSO_LAST_SLICE (1 << 22) +#define LSO_FIRST_SLICE (1 << 23) +#define LSO_OPCODE(x) ((x) << 24) +#define LSO_T5_XFER_SIZE(x) ((x) << 0) + __be16 ipid_ofst; + __be16 mss; + __be32 seqno_offset; + __be32 len; + /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ +}; + +/* cpl_tx_pkt_lso_core.lso_ctrl fields */ +#define LSO_TCPHDR_LEN_S 0 +#define LSO_TCPHDR_LEN_V(x) ((x) << LSO_TCPHDR_LEN_S) + +#define LSO_IPHDR_LEN_S 4 +#define LSO_IPHDR_LEN_V(x) ((x) << LSO_IPHDR_LEN_S) + +#define LSO_ETHHDR_LEN_S 16 +#define LSO_ETHHDR_LEN_V(x) ((x) << LSO_ETHHDR_LEN_S) + +#define LSO_IPV6_S 20 +#define LSO_IPV6_V(x) ((x) << LSO_IPV6_S) +#define LSO_IPV6_F LSO_IPV6_V(1U) + +#define LSO_LAST_SLICE_S 22 +#define LSO_LAST_SLICE_V(x) ((x) << LSO_LAST_SLICE_S) +#define LSO_LAST_SLICE_F LSO_LAST_SLICE_V(1U) + +#define LSO_FIRST_SLICE_S 23 +#define LSO_FIRST_SLICE_V(x) ((x) << LSO_FIRST_SLICE_S) +#define LSO_FIRST_SLICE_F LSO_FIRST_SLICE_V(1U) + +#define LSO_OPCODE_S 24 +#define LSO_OPCODE_V(x) ((x) << LSO_OPCODE_S) + +#define LSO_T5_XFER_SIZE_S 0 +#define LSO_T5_XFER_SIZE_V(x) ((x) << LSO_T5_XFER_SIZE_S) + +struct cpl_tx_pkt_lso { + WR_HDR; + struct cpl_tx_pkt_lso_core c; + /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ +}; + +struct cpl_iscsi_hdr { + union opcode_tid ot; + __be16 pdu_len_ddp; + __be16 len; + __be32 seq; + __be16 urg; + u8 rsvd; + u8 status; +}; + +/* cpl_iscsi_hdr.pdu_len_ddp fields */ +#define ISCSI_PDU_LEN_S 0 +#define ISCSI_PDU_LEN_M 0x7FFF +#define ISCSI_PDU_LEN_V(x) ((x) << ISCSI_PDU_LEN_S) +#define ISCSI_PDU_LEN_G(x) (((x) >> ISCSI_PDU_LEN_S) & ISCSI_PDU_LEN_M) + +#define ISCSI_DDP_S 15 +#define ISCSI_DDP_V(x) ((x) << ISCSI_DDP_S) +#define ISCSI_DDP_F ISCSI_DDP_V(1U) + +struct cpl_rx_data { + union opcode_tid ot; + __be16 rsvd; + __be16 len; + __be32 seq; + __be16 urg; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 dack_mode:2; + u8 psh:1; + u8 heartbeat:1; + u8 ddp_off:1; + u8 :3; +#else + u8 :3; + u8 ddp_off:1; + u8 heartbeat:1; + u8 psh:1; + u8 dack_mode:2; +#endif + u8 status; +}; + +struct cpl_rx_data_ack { + WR_HDR; + union opcode_tid ot; + __be32 credit_dack; +}; + +/* cpl_rx_data_ack.ack_seq fields */ +#define RX_CREDITS_S 0 +#define RX_CREDITS_V(x) ((x) << RX_CREDITS_S) + +#define RX_FORCE_ACK_S 28 +#define RX_FORCE_ACK_V(x) ((x) << RX_FORCE_ACK_S) +#define RX_FORCE_ACK_F RX_FORCE_ACK_V(1U) + +struct cpl_rx_pkt { + struct rss_header rsshdr; + u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 iff:4; + u8 csum_calc:1; + u8 ipmi_pkt:1; + u8 vlan_ex:1; + u8 ip_frag:1; +#else + u8 ip_frag:1; + u8 vlan_ex:1; + u8 ipmi_pkt:1; + u8 csum_calc:1; + u8 iff:4; +#endif + __be16 csum; + __be16 vlan; + __be16 len; + __be32 l2info; + __be16 hdr_len; + __be16 err_vec; +}; + +#define RXF_PSH_S 20 +#define RXF_PSH_V(x) ((x) << RXF_PSH_S) +#define RXF_PSH_F RXF_PSH_V(1U) + +#define RXF_SYN_S 21 +#define RXF_SYN_V(x) ((x) << RXF_SYN_S) +#define RXF_SYN_F RXF_SYN_V(1U) + +#define RXF_UDP_S 22 +#define RXF_UDP_V(x) ((x) << RXF_UDP_S) +#define RXF_UDP_F RXF_UDP_V(1U) + +#define RXF_TCP_S 23 +#define RXF_TCP_V(x) ((x) << RXF_TCP_S) +#define RXF_TCP_F RXF_TCP_V(1U) + +#define RXF_IP_S 24 +#define RXF_IP_V(x) ((x) << RXF_IP_S) +#define RXF_IP_F RXF_IP_V(1U) + +#define RXF_IP6_S 25 +#define RXF_IP6_V(x) ((x) << RXF_IP6_S) +#define RXF_IP6_F RXF_IP6_V(1U) + +#define RXF_SYN_COOKIE_S 26 +#define RXF_SYN_COOKIE_V(x) ((x) << RXF_SYN_COOKIE_S) +#define RXF_SYN_COOKIE_F RXF_SYN_COOKIE_V(1U) + +#define RXF_FCOE_S 26 +#define RXF_FCOE_V(x) ((x) << RXF_FCOE_S) +#define RXF_FCOE_F RXF_FCOE_V(1U) + +#define RXF_LRO_S 27 +#define RXF_LRO_V(x) ((x) << RXF_LRO_S) +#define RXF_LRO_F RXF_LRO_V(1U) + +/* rx_pkt.l2info fields */ +#define RX_ETHHDR_LEN_S 0 +#define RX_ETHHDR_LEN_M 0x1F +#define RX_ETHHDR_LEN_V(x) ((x) << RX_ETHHDR_LEN_S) +#define RX_ETHHDR_LEN_G(x) (((x) >> RX_ETHHDR_LEN_S) & RX_ETHHDR_LEN_M) + +#define RX_T5_ETHHDR_LEN_S 0 +#define RX_T5_ETHHDR_LEN_M 0x3F +#define RX_T5_ETHHDR_LEN_V(x) ((x) << RX_T5_ETHHDR_LEN_S) +#define RX_T5_ETHHDR_LEN_G(x) (((x) >> RX_T5_ETHHDR_LEN_S) & RX_T5_ETHHDR_LEN_M) + +#define RX_MACIDX_S 8 +#define RX_MACIDX_M 0x1FF +#define RX_MACIDX_V(x) ((x) << RX_MACIDX_S) +#define RX_MACIDX_G(x) (((x) >> RX_MACIDX_S) & RX_MACIDX_M) + +#define RXF_SYN_S 21 +#define RXF_SYN_V(x) ((x) << RXF_SYN_S) +#define RXF_SYN_F RXF_SYN_V(1U) + +#define RX_CHAN_S 28 +#define RX_CHAN_M 0xF +#define RX_CHAN_V(x) ((x) << RX_CHAN_S) +#define RX_CHAN_G(x) (((x) >> RX_CHAN_S) & RX_CHAN_M) + +/* rx_pkt.hdr_len fields */ +#define RX_TCPHDR_LEN_S 0 +#define RX_TCPHDR_LEN_M 0x3F +#define RX_TCPHDR_LEN_V(x) ((x) << RX_TCPHDR_LEN_S) +#define RX_TCPHDR_LEN_G(x) (((x) >> RX_TCPHDR_LEN_S) & RX_TCPHDR_LEN_M) + +#define RX_IPHDR_LEN_S 6 +#define RX_IPHDR_LEN_M 0x3FF +#define RX_IPHDR_LEN_V(x) ((x) << RX_IPHDR_LEN_S) +#define RX_IPHDR_LEN_G(x) (((x) >> RX_IPHDR_LEN_S) & RX_IPHDR_LEN_M) + +/* rx_pkt.err_vec fields */ +#define RXERR_CSUM_S 13 +#define RXERR_CSUM_V(x) ((x) << RXERR_CSUM_S) +#define RXERR_CSUM_F RXERR_CSUM_V(1U) + +struct cpl_trace_pkt { + u8 opcode; + u8 intf; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 runt:4; + u8 filter_hit:4; + u8 :6; + u8 err:1; + u8 trunc:1; +#else + u8 filter_hit:4; + u8 runt:4; + u8 trunc:1; + u8 err:1; + u8 :6; +#endif + __be16 rsvd; + __be16 len; + __be64 tstamp; +}; + +struct cpl_t5_trace_pkt { + __u8 opcode; + __u8 intf; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 runt:4; + __u8 filter_hit:4; + __u8:6; + __u8 err:1; + __u8 trunc:1; +#else + __u8 filter_hit:4; + __u8 runt:4; + __u8 trunc:1; + __u8 err:1; + __u8:6; +#endif + __be16 rsvd; + __be16 len; + __be64 tstamp; + __be64 rsvd1; +}; + +struct cpl_l2t_write_req { + WR_HDR; + union opcode_tid ot; + __be16 params; + __be16 l2t_idx; + __be16 vlan; + u8 dst_mac[6]; +}; + +/* cpl_l2t_write_req.params fields */ +#define L2T_W_INFO_S 2 +#define L2T_W_INFO_V(x) ((x) << L2T_W_INFO_S) + +#define L2T_W_PORT_S 8 +#define L2T_W_PORT_V(x) ((x) << L2T_W_PORT_S) + +#define L2T_W_NOREPLY_S 15 +#define L2T_W_NOREPLY_V(x) ((x) << L2T_W_NOREPLY_S) +#define L2T_W_NOREPLY_F L2T_W_NOREPLY_V(1U) + +struct cpl_l2t_write_rpl { + union opcode_tid ot; + u8 status; + u8 rsvd[3]; +}; + +struct cpl_rdma_terminate { + union opcode_tid ot; + __be16 rsvd; + __be16 len; +}; + +struct cpl_sge_egr_update { + __be32 opcode_qid; + __be16 cidx; + __be16 pidx; +}; + +/* cpl_sge_egr_update.ot fields */ +#define EGR_QID_S 0 +#define EGR_QID_M 0x1FFFF +#define EGR_QID_G(x) (((x) >> EGR_QID_S) & EGR_QID_M) + +/* cpl_fw*.type values */ +enum { + FW_TYPE_CMD_RPL = 0, + FW_TYPE_WR_RPL = 1, + FW_TYPE_CQE = 2, + FW_TYPE_OFLD_CONNECTION_WR_RPL = 3, + FW_TYPE_RSSCPL = 4, +}; + +struct cpl_fw4_pld { + u8 opcode; + u8 rsvd0[3]; + u8 type; + u8 rsvd1; + __be16 len; + __be64 data; + __be64 rsvd2; +}; + +struct cpl_fw6_pld { + u8 opcode; + u8 rsvd[5]; + __be16 len; + __be64 data[4]; +}; + +struct cpl_fw4_msg { + u8 opcode; + u8 type; + __be16 rsvd0; + __be32 rsvd1; + __be64 data[2]; +}; + +struct cpl_fw4_ack { + union opcode_tid ot; + u8 credits; + u8 rsvd0[2]; + u8 seq_vld; + __be32 snd_nxt; + __be32 snd_una; + __be64 rsvd1; +}; + +struct cpl_fw6_msg { + u8 opcode; + u8 type; + __be16 rsvd0; + __be32 rsvd1; + __be64 data[4]; +}; + +/* cpl_fw6_msg.type values */ +enum { + FW6_TYPE_CMD_RPL = 0, + FW6_TYPE_WR_RPL = 1, + FW6_TYPE_CQE = 2, + FW6_TYPE_OFLD_CONNECTION_WR_RPL = 3, + FW6_TYPE_RSSCPL = FW_TYPE_RSSCPL, +}; + +struct cpl_fw6_msg_ofld_connection_wr_rpl { + __u64 cookie; + __be32 tid; /* or atid in case of active failure */ + __u8 t_state; + __u8 retval; + __u8 rsvd[2]; +}; + +enum { + ULP_TX_MEM_READ = 2, + ULP_TX_MEM_WRITE = 3, + ULP_TX_PKT = 4 +}; + +enum { + ULP_TX_SC_NOOP = 0x80, + ULP_TX_SC_IMM = 0x81, + ULP_TX_SC_DSGL = 0x82, + ULP_TX_SC_ISGL = 0x83 +}; + +#define ULPTX_CMD_S 24 +#define ULPTX_CMD_V(x) ((x) << ULPTX_CMD_S) + +struct ulptx_sge_pair { + __be32 len[2]; + __be64 addr[2]; +}; + +struct ulptx_sgl { + __be32 cmd_nsge; + __be32 len0; + __be64 addr0; + struct ulptx_sge_pair sge[0]; +}; + +#define ULPTX_NSGE_S 0 +#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S) + +#define ULPTX_MORE_S 23 +#define ULPTX_MORE_V(x) ((x) << ULPTX_MORE_S) +#define ULPTX_MORE_F ULPTX_MORE_V(1U) + +struct ulp_mem_io { + WR_HDR; + __be32 cmd; + __be32 len16; /* command length */ + __be32 dlen; /* data length in 32-byte units */ + __be32 lock_addr; +}; + +#define ULP_MEMIO_LOCK_S 31 +#define ULP_MEMIO_LOCK_V(x) ((x) << ULP_MEMIO_LOCK_S) +#define ULP_MEMIO_LOCK_F ULP_MEMIO_LOCK_V(1U) + +/* additional ulp_mem_io.cmd fields */ +#define ULP_MEMIO_ORDER_S 23 +#define ULP_MEMIO_ORDER_V(x) ((x) << ULP_MEMIO_ORDER_S) +#define ULP_MEMIO_ORDER_F ULP_MEMIO_ORDER_V(1U) + +#define T5_ULP_MEMIO_IMM_S 23 +#define T5_ULP_MEMIO_IMM_V(x) ((x) << T5_ULP_MEMIO_IMM_S) +#define T5_ULP_MEMIO_IMM_F T5_ULP_MEMIO_IMM_V(1U) + +#define T5_ULP_MEMIO_ORDER_S 22 +#define T5_ULP_MEMIO_ORDER_V(x) ((x) << T5_ULP_MEMIO_ORDER_S) +#define T5_ULP_MEMIO_ORDER_F T5_ULP_MEMIO_ORDER_V(1U) + +/* ulp_mem_io.lock_addr fields */ +#define ULP_MEMIO_ADDR_S 0 +#define ULP_MEMIO_ADDR_V(x) ((x) << ULP_MEMIO_ADDR_S) + +/* ulp_mem_io.dlen fields */ +#define ULP_MEMIO_DATA_LEN_S 0 +#define ULP_MEMIO_DATA_LEN_V(x) ((x) << ULP_MEMIO_DATA_LEN_S) + +#endif /* __T4_MSG_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h new file mode 100644 index 000000000..1a9a6f334 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -0,0 +1,158 @@ +/* + * This file is part of the Chelsio T4/T5 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __T4_PCI_ID_TBL_H__ +#define __T4_PCI_ID_TBL_H__ + +/* The code can defined cpp macros for creating a PCI Device ID Table. This is + * useful because it allows the PCI ID Table to be maintained in a single place. + * + * The macros are: + * + * CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN + * -- Used to start the definition of the PCI ID Table. + * + * CH_PCI_DEVICE_ID_FUNCTION + * -- The PCI Function Number to use in the PCI Device ID Table. "0" + * -- for drivers attaching to PF0-3, "4" for drivers attaching to PF4, + * -- "8" for drivers attaching to SR-IOV Virtual Functions, etc. + * + * CH_PCI_DEVICE_ID_FUNCTION2 [optional] + * -- If defined, create a PCI Device ID Table with both + * -- CH_PCI_DEVICE_ID_FUNCTION and CH_PCI_DEVICE_ID_FUNCTION2 populated. + * + * CH_PCI_ID_TABLE_ENTRY(DeviceID) + * -- Used for the individual PCI Device ID entries. Note that we will + * -- be adding a trailing comma (",") after all of the entries (and + * -- between the pairs of entries if CH_PCI_DEVICE_ID_FUNCTION2 is defined). + * + * CH_PCI_DEVICE_ID_TABLE_DEFINE_END + * -- Used to finish the definition of the PCI ID Table. Note that we + * -- will be adding a trailing semi-colon (";") here. + */ +#ifndef CH_PCI_DEVICE_ID_FUNCTION +#error CH_PCI_DEVICE_ID_FUNCTION not defined! +#endif +#ifndef CH_PCI_ID_TABLE_ENTRY +#error CH_PCI_ID_TABLE_ENTRY not defined! +#endif +#ifndef CH_PCI_DEVICE_ID_TABLE_DEFINE_END +#error CH_PCI_DEVICE_ID_TABLE_DEFINE_END not defined! +#endif + +/* T4 and later ASICs use a PCI Device ID scheme of 0xVFPP where: + * + * V = "4" for T4; "5" for T5, etc. + * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs + * PP = adapter product designation + * + * We use this consistency in order to create the proper PCI Device IDs + * for the specified CH_PCI_DEVICE_ID_FUNCTION. + */ +#ifndef CH_PCI_DEVICE_ID_FUNCTION2 +#define CH_PCI_ID_TABLE_FENTRY(devid) \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION) << 8)) +#else +#define CH_PCI_ID_TABLE_FENTRY(devid) \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION) << 8)), \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION2) << 8)) +#endif + +CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN + /* T4 adapters: + */ + CH_PCI_ID_TABLE_FENTRY(0x4000), /* T440-dbg */ + CH_PCI_ID_TABLE_FENTRY(0x4001), /* T420-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4002), /* T422-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4003), /* T440-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4004), /* T420-bch */ + CH_PCI_ID_TABLE_FENTRY(0x4005), /* T440-bch */ + CH_PCI_ID_TABLE_FENTRY(0x4006), /* T440-ch */ + CH_PCI_ID_TABLE_FENTRY(0x4007), /* T420-so */ + CH_PCI_ID_TABLE_FENTRY(0x4008), /* T420-cx */ + CH_PCI_ID_TABLE_FENTRY(0x4009), /* T420-bt */ + CH_PCI_ID_TABLE_FENTRY(0x400a), /* T404-bt */ + CH_PCI_ID_TABLE_FENTRY(0x400b), /* B420-sr */ + CH_PCI_ID_TABLE_FENTRY(0x400c), /* B404-bt */ + CH_PCI_ID_TABLE_FENTRY(0x400d), /* T480-cr */ + CH_PCI_ID_TABLE_FENTRY(0x400e), /* T440-LP-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4080), /* Custom T480-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4081), /* Custom T440-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4082), /* Custom T420-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4083), /* Custom T420-xaui */ + CH_PCI_ID_TABLE_FENTRY(0x4084), /* Custom T440-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4085), /* Custom T420-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4086), /* Custom T440-bt */ + CH_PCI_ID_TABLE_FENTRY(0x4087), /* Custom T440-cr */ + CH_PCI_ID_TABLE_FENTRY(0x4088), /* Custom T440 2-xaui, 2-xfi */ + + /* T5 adapters: + */ + CH_PCI_ID_TABLE_FENTRY(0x5000), /* T580-dbg */ + CH_PCI_ID_TABLE_FENTRY(0x5001), /* T520-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5002), /* T522-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5003), /* T540-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5004), /* T520-bch */ + CH_PCI_ID_TABLE_FENTRY(0x5005), /* T540-bch */ + CH_PCI_ID_TABLE_FENTRY(0x5006), /* T540-ch */ + CH_PCI_ID_TABLE_FENTRY(0x5007), /* T520-so */ + CH_PCI_ID_TABLE_FENTRY(0x5008), /* T520-cx */ + CH_PCI_ID_TABLE_FENTRY(0x5009), /* T520-bt */ + CH_PCI_ID_TABLE_FENTRY(0x500a), /* T504-bt */ + CH_PCI_ID_TABLE_FENTRY(0x500b), /* B520-sr */ + CH_PCI_ID_TABLE_FENTRY(0x500c), /* B504-bt */ + CH_PCI_ID_TABLE_FENTRY(0x500d), /* T580-cr */ + CH_PCI_ID_TABLE_FENTRY(0x500e), /* T540-LP-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5010), /* T580-LP-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5011), /* T520-LL-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5012), /* T560-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */ + CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */ + CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */ + CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5083), /* Custom T540-LP-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5084), /* Custom T580-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5085), /* Custom 3x T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5089), /* Custom T520-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */ +CH_PCI_DEVICE_ID_TABLE_DEFINE_END; + +#endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h new file mode 100644 index 000000000..326674b19 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -0,0 +1,2717 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4_REGS_H +#define __T4_REGS_H + +#define MYPF_BASE 0x1b000 +#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr)) + +#define PF0_BASE 0x1e000 +#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr)) + +#define PF_STRIDE 0x400 +#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE) +#define PF_REG(idx, reg) (PF_BASE(idx) + (reg)) + +#define MYPORT_BASE 0x1c000 +#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr)) + +#define PORT0_BASE 0x20000 +#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr)) + +#define PORT_STRIDE 0x2000 +#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE) +#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg)) + +#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR) +#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx) + +#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8) +#define PCIE_MAILBOX_REG(reg_addr, idx) ((reg_addr) + (idx) * 8) +#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) +#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) + +#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) + +#define SGE_PF_KDOORBELL_A 0x0 + +#define QID_S 15 +#define QID_V(x) ((x) << QID_S) + +#define DBPRIO_S 14 +#define DBPRIO_V(x) ((x) << DBPRIO_S) +#define DBPRIO_F DBPRIO_V(1U) + +#define PIDX_S 0 +#define PIDX_V(x) ((x) << PIDX_S) + +#define SGE_VF_KDOORBELL_A 0x0 + +#define DBTYPE_S 13 +#define DBTYPE_V(x) ((x) << DBTYPE_S) +#define DBTYPE_F DBTYPE_V(1U) + +#define PIDX_T5_S 0 +#define PIDX_T5_M 0x1fffU +#define PIDX_T5_V(x) ((x) << PIDX_T5_S) +#define PIDX_T5_G(x) (((x) >> PIDX_T5_S) & PIDX_T5_M) + +#define SGE_PF_GTS_A 0x4 + +#define INGRESSQID_S 16 +#define INGRESSQID_V(x) ((x) << INGRESSQID_S) + +#define TIMERREG_S 13 +#define TIMERREG_V(x) ((x) << TIMERREG_S) + +#define SEINTARM_S 12 +#define SEINTARM_V(x) ((x) << SEINTARM_S) + +#define CIDXINC_S 0 +#define CIDXINC_M 0xfffU +#define CIDXINC_V(x) ((x) << CIDXINC_S) + +#define SGE_CONTROL_A 0x1008 +#define SGE_CONTROL2_A 0x1124 + +#define RXPKTCPLMODE_S 18 +#define RXPKTCPLMODE_V(x) ((x) << RXPKTCPLMODE_S) +#define RXPKTCPLMODE_F RXPKTCPLMODE_V(1U) + +#define EGRSTATUSPAGESIZE_S 17 +#define EGRSTATUSPAGESIZE_V(x) ((x) << EGRSTATUSPAGESIZE_S) +#define EGRSTATUSPAGESIZE_F EGRSTATUSPAGESIZE_V(1U) + +#define PKTSHIFT_S 10 +#define PKTSHIFT_M 0x7U +#define PKTSHIFT_V(x) ((x) << PKTSHIFT_S) +#define PKTSHIFT_G(x) (((x) >> PKTSHIFT_S) & PKTSHIFT_M) + +#define INGPCIEBOUNDARY_S 7 +#define INGPCIEBOUNDARY_V(x) ((x) << INGPCIEBOUNDARY_S) + +#define INGPADBOUNDARY_S 4 +#define INGPADBOUNDARY_M 0x7U +#define INGPADBOUNDARY_V(x) ((x) << INGPADBOUNDARY_S) +#define INGPADBOUNDARY_G(x) (((x) >> INGPADBOUNDARY_S) & INGPADBOUNDARY_M) + +#define EGRPCIEBOUNDARY_S 1 +#define EGRPCIEBOUNDARY_V(x) ((x) << EGRPCIEBOUNDARY_S) + +#define INGPACKBOUNDARY_S 16 +#define INGPACKBOUNDARY_M 0x7U +#define INGPACKBOUNDARY_V(x) ((x) << INGPACKBOUNDARY_S) +#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \ + & INGPACKBOUNDARY_M) + +#define GLOBALENABLE_S 0 +#define GLOBALENABLE_V(x) ((x) << GLOBALENABLE_S) +#define GLOBALENABLE_F GLOBALENABLE_V(1U) + +#define SGE_HOST_PAGE_SIZE_A 0x100c + +#define HOSTPAGESIZEPF7_S 28 +#define HOSTPAGESIZEPF7_M 0xfU +#define HOSTPAGESIZEPF7_V(x) ((x) << HOSTPAGESIZEPF7_S) +#define HOSTPAGESIZEPF7_G(x) (((x) >> HOSTPAGESIZEPF7_S) & HOSTPAGESIZEPF7_M) + +#define HOSTPAGESIZEPF6_S 24 +#define HOSTPAGESIZEPF6_M 0xfU +#define HOSTPAGESIZEPF6_V(x) ((x) << HOSTPAGESIZEPF6_S) +#define HOSTPAGESIZEPF6_G(x) (((x) >> HOSTPAGESIZEPF6_S) & HOSTPAGESIZEPF6_M) + +#define HOSTPAGESIZEPF5_S 20 +#define HOSTPAGESIZEPF5_M 0xfU +#define HOSTPAGESIZEPF5_V(x) ((x) << HOSTPAGESIZEPF5_S) +#define HOSTPAGESIZEPF5_G(x) (((x) >> HOSTPAGESIZEPF5_S) & HOSTPAGESIZEPF5_M) + +#define HOSTPAGESIZEPF4_S 16 +#define HOSTPAGESIZEPF4_M 0xfU +#define HOSTPAGESIZEPF4_V(x) ((x) << HOSTPAGESIZEPF4_S) +#define HOSTPAGESIZEPF4_G(x) (((x) >> HOSTPAGESIZEPF4_S) & HOSTPAGESIZEPF4_M) + +#define HOSTPAGESIZEPF3_S 12 +#define HOSTPAGESIZEPF3_M 0xfU +#define HOSTPAGESIZEPF3_V(x) ((x) << HOSTPAGESIZEPF3_S) +#define HOSTPAGESIZEPF3_G(x) (((x) >> HOSTPAGESIZEPF3_S) & HOSTPAGESIZEPF3_M) + +#define HOSTPAGESIZEPF2_S 8 +#define HOSTPAGESIZEPF2_M 0xfU +#define HOSTPAGESIZEPF2_V(x) ((x) << HOSTPAGESIZEPF2_S) +#define HOSTPAGESIZEPF2_G(x) (((x) >> HOSTPAGESIZEPF2_S) & HOSTPAGESIZEPF2_M) + +#define HOSTPAGESIZEPF1_S 4 +#define HOSTPAGESIZEPF1_M 0xfU +#define HOSTPAGESIZEPF1_V(x) ((x) << HOSTPAGESIZEPF1_S) +#define HOSTPAGESIZEPF1_G(x) (((x) >> HOSTPAGESIZEPF1_S) & HOSTPAGESIZEPF1_M) + +#define HOSTPAGESIZEPF0_S 0 +#define HOSTPAGESIZEPF0_M 0xfU +#define HOSTPAGESIZEPF0_V(x) ((x) << HOSTPAGESIZEPF0_S) +#define HOSTPAGESIZEPF0_G(x) (((x) >> HOSTPAGESIZEPF0_S) & HOSTPAGESIZEPF0_M) + +#define SGE_EGRESS_QUEUES_PER_PAGE_PF_A 0x1010 +#define SGE_EGRESS_QUEUES_PER_PAGE_VF_A 0x1014 + +#define QUEUESPERPAGEPF1_S 4 + +#define QUEUESPERPAGEPF0_S 0 +#define QUEUESPERPAGEPF0_M 0xfU +#define QUEUESPERPAGEPF0_V(x) ((x) << QUEUESPERPAGEPF0_S) +#define QUEUESPERPAGEPF0_G(x) (((x) >> QUEUESPERPAGEPF0_S) & QUEUESPERPAGEPF0_M) + +#define SGE_INT_CAUSE1_A 0x1024 +#define SGE_INT_CAUSE2_A 0x1030 +#define SGE_INT_CAUSE3_A 0x103c + +#define ERR_FLM_DBP_S 31 +#define ERR_FLM_DBP_V(x) ((x) << ERR_FLM_DBP_S) +#define ERR_FLM_DBP_F ERR_FLM_DBP_V(1U) + +#define ERR_FLM_IDMA1_S 30 +#define ERR_FLM_IDMA1_V(x) ((x) << ERR_FLM_IDMA1_S) +#define ERR_FLM_IDMA1_F ERR_FLM_IDMA1_V(1U) + +#define ERR_FLM_IDMA0_S 29 +#define ERR_FLM_IDMA0_V(x) ((x) << ERR_FLM_IDMA0_S) +#define ERR_FLM_IDMA0_F ERR_FLM_IDMA0_V(1U) + +#define ERR_FLM_HINT_S 28 +#define ERR_FLM_HINT_V(x) ((x) << ERR_FLM_HINT_S) +#define ERR_FLM_HINT_F ERR_FLM_HINT_V(1U) + +#define ERR_PCIE_ERROR3_S 27 +#define ERR_PCIE_ERROR3_V(x) ((x) << ERR_PCIE_ERROR3_S) +#define ERR_PCIE_ERROR3_F ERR_PCIE_ERROR3_V(1U) + +#define ERR_PCIE_ERROR2_S 26 +#define ERR_PCIE_ERROR2_V(x) ((x) << ERR_PCIE_ERROR2_S) +#define ERR_PCIE_ERROR2_F ERR_PCIE_ERROR2_V(1U) + +#define ERR_PCIE_ERROR1_S 25 +#define ERR_PCIE_ERROR1_V(x) ((x) << ERR_PCIE_ERROR1_S) +#define ERR_PCIE_ERROR1_F ERR_PCIE_ERROR1_V(1U) + +#define ERR_PCIE_ERROR0_S 24 +#define ERR_PCIE_ERROR0_V(x) ((x) << ERR_PCIE_ERROR0_S) +#define ERR_PCIE_ERROR0_F ERR_PCIE_ERROR0_V(1U) + +#define ERR_CPL_EXCEED_IQE_SIZE_S 22 +#define ERR_CPL_EXCEED_IQE_SIZE_V(x) ((x) << ERR_CPL_EXCEED_IQE_SIZE_S) +#define ERR_CPL_EXCEED_IQE_SIZE_F ERR_CPL_EXCEED_IQE_SIZE_V(1U) + +#define ERR_INVALID_CIDX_INC_S 21 +#define ERR_INVALID_CIDX_INC_V(x) ((x) << ERR_INVALID_CIDX_INC_S) +#define ERR_INVALID_CIDX_INC_F ERR_INVALID_CIDX_INC_V(1U) + +#define ERR_CPL_OPCODE_0_S 19 +#define ERR_CPL_OPCODE_0_V(x) ((x) << ERR_CPL_OPCODE_0_S) +#define ERR_CPL_OPCODE_0_F ERR_CPL_OPCODE_0_V(1U) + +#define ERR_DROPPED_DB_S 18 +#define ERR_DROPPED_DB_V(x) ((x) << ERR_DROPPED_DB_S) +#define ERR_DROPPED_DB_F ERR_DROPPED_DB_V(1U) + +#define ERR_DATA_CPL_ON_HIGH_QID1_S 17 +#define ERR_DATA_CPL_ON_HIGH_QID1_V(x) ((x) << ERR_DATA_CPL_ON_HIGH_QID1_S) +#define ERR_DATA_CPL_ON_HIGH_QID1_F ERR_DATA_CPL_ON_HIGH_QID1_V(1U) + +#define ERR_DATA_CPL_ON_HIGH_QID0_S 16 +#define ERR_DATA_CPL_ON_HIGH_QID0_V(x) ((x) << ERR_DATA_CPL_ON_HIGH_QID0_S) +#define ERR_DATA_CPL_ON_HIGH_QID0_F ERR_DATA_CPL_ON_HIGH_QID0_V(1U) + +#define ERR_BAD_DB_PIDX3_S 15 +#define ERR_BAD_DB_PIDX3_V(x) ((x) << ERR_BAD_DB_PIDX3_S) +#define ERR_BAD_DB_PIDX3_F ERR_BAD_DB_PIDX3_V(1U) + +#define ERR_BAD_DB_PIDX2_S 14 +#define ERR_BAD_DB_PIDX2_V(x) ((x) << ERR_BAD_DB_PIDX2_S) +#define ERR_BAD_DB_PIDX2_F ERR_BAD_DB_PIDX2_V(1U) + +#define ERR_BAD_DB_PIDX1_S 13 +#define ERR_BAD_DB_PIDX1_V(x) ((x) << ERR_BAD_DB_PIDX1_S) +#define ERR_BAD_DB_PIDX1_F ERR_BAD_DB_PIDX1_V(1U) + +#define ERR_BAD_DB_PIDX0_S 12 +#define ERR_BAD_DB_PIDX0_V(x) ((x) << ERR_BAD_DB_PIDX0_S) +#define ERR_BAD_DB_PIDX0_F ERR_BAD_DB_PIDX0_V(1U) + +#define ERR_ING_CTXT_PRIO_S 10 +#define ERR_ING_CTXT_PRIO_V(x) ((x) << ERR_ING_CTXT_PRIO_S) +#define ERR_ING_CTXT_PRIO_F ERR_ING_CTXT_PRIO_V(1U) + +#define ERR_EGR_CTXT_PRIO_S 9 +#define ERR_EGR_CTXT_PRIO_V(x) ((x) << ERR_EGR_CTXT_PRIO_S) +#define ERR_EGR_CTXT_PRIO_F ERR_EGR_CTXT_PRIO_V(1U) + +#define DBFIFO_HP_INT_S 8 +#define DBFIFO_HP_INT_V(x) ((x) << DBFIFO_HP_INT_S) +#define DBFIFO_HP_INT_F DBFIFO_HP_INT_V(1U) + +#define DBFIFO_LP_INT_S 7 +#define DBFIFO_LP_INT_V(x) ((x) << DBFIFO_LP_INT_S) +#define DBFIFO_LP_INT_F DBFIFO_LP_INT_V(1U) + +#define INGRESS_SIZE_ERR_S 5 +#define INGRESS_SIZE_ERR_V(x) ((x) << INGRESS_SIZE_ERR_S) +#define INGRESS_SIZE_ERR_F INGRESS_SIZE_ERR_V(1U) + +#define EGRESS_SIZE_ERR_S 4 +#define EGRESS_SIZE_ERR_V(x) ((x) << EGRESS_SIZE_ERR_S) +#define EGRESS_SIZE_ERR_F EGRESS_SIZE_ERR_V(1U) + +#define SGE_INT_ENABLE3_A 0x1040 +#define SGE_FL_BUFFER_SIZE0_A 0x1044 +#define SGE_FL_BUFFER_SIZE1_A 0x1048 +#define SGE_FL_BUFFER_SIZE2_A 0x104c +#define SGE_FL_BUFFER_SIZE3_A 0x1050 +#define SGE_FL_BUFFER_SIZE4_A 0x1054 +#define SGE_FL_BUFFER_SIZE5_A 0x1058 +#define SGE_FL_BUFFER_SIZE6_A 0x105c +#define SGE_FL_BUFFER_SIZE7_A 0x1060 +#define SGE_FL_BUFFER_SIZE8_A 0x1064 + +#define SGE_INGRESS_RX_THRESHOLD_A 0x10a0 + +#define THRESHOLD_0_S 24 +#define THRESHOLD_0_M 0x3fU +#define THRESHOLD_0_V(x) ((x) << THRESHOLD_0_S) +#define THRESHOLD_0_G(x) (((x) >> THRESHOLD_0_S) & THRESHOLD_0_M) + +#define THRESHOLD_1_S 16 +#define THRESHOLD_1_M 0x3fU +#define THRESHOLD_1_V(x) ((x) << THRESHOLD_1_S) +#define THRESHOLD_1_G(x) (((x) >> THRESHOLD_1_S) & THRESHOLD_1_M) + +#define THRESHOLD_2_S 8 +#define THRESHOLD_2_M 0x3fU +#define THRESHOLD_2_V(x) ((x) << THRESHOLD_2_S) +#define THRESHOLD_2_G(x) (((x) >> THRESHOLD_2_S) & THRESHOLD_2_M) + +#define THRESHOLD_3_S 0 +#define THRESHOLD_3_M 0x3fU +#define THRESHOLD_3_V(x) ((x) << THRESHOLD_3_S) +#define THRESHOLD_3_G(x) (((x) >> THRESHOLD_3_S) & THRESHOLD_3_M) + +#define SGE_CONM_CTRL_A 0x1094 + +#define EGRTHRESHOLD_S 8 +#define EGRTHRESHOLD_M 0x3fU +#define EGRTHRESHOLD_V(x) ((x) << EGRTHRESHOLD_S) +#define EGRTHRESHOLD_G(x) (((x) >> EGRTHRESHOLD_S) & EGRTHRESHOLD_M) + +#define EGRTHRESHOLDPACKING_S 14 +#define EGRTHRESHOLDPACKING_M 0x3fU +#define EGRTHRESHOLDPACKING_V(x) ((x) << EGRTHRESHOLDPACKING_S) +#define EGRTHRESHOLDPACKING_G(x) \ + (((x) >> EGRTHRESHOLDPACKING_S) & EGRTHRESHOLDPACKING_M) + +#define SGE_TIMESTAMP_LO_A 0x1098 +#define SGE_TIMESTAMP_HI_A 0x109c + +#define TSOP_S 28 +#define TSOP_M 0x3U +#define TSOP_V(x) ((x) << TSOP_S) +#define TSOP_G(x) (((x) >> TSOP_S) & TSOP_M) + +#define TSVAL_S 0 +#define TSVAL_M 0xfffffffU +#define TSVAL_V(x) ((x) << TSVAL_S) +#define TSVAL_G(x) (((x) >> TSVAL_S) & TSVAL_M) + +#define SGE_DBFIFO_STATUS_A 0x10a4 + +#define HP_INT_THRESH_S 28 +#define HP_INT_THRESH_M 0xfU +#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S) + +#define LP_INT_THRESH_S 12 +#define LP_INT_THRESH_M 0xfU +#define LP_INT_THRESH_V(x) ((x) << LP_INT_THRESH_S) + +#define SGE_DOORBELL_CONTROL_A 0x10a8 + +#define NOCOALESCE_S 26 +#define NOCOALESCE_V(x) ((x) << NOCOALESCE_S) +#define NOCOALESCE_F NOCOALESCE_V(1U) + +#define ENABLE_DROP_S 13 +#define ENABLE_DROP_V(x) ((x) << ENABLE_DROP_S) +#define ENABLE_DROP_F ENABLE_DROP_V(1U) + +#define SGE_TIMER_VALUE_0_AND_1_A 0x10b8 + +#define TIMERVALUE0_S 16 +#define TIMERVALUE0_M 0xffffU +#define TIMERVALUE0_V(x) ((x) << TIMERVALUE0_S) +#define TIMERVALUE0_G(x) (((x) >> TIMERVALUE0_S) & TIMERVALUE0_M) + +#define TIMERVALUE1_S 0 +#define TIMERVALUE1_M 0xffffU +#define TIMERVALUE1_V(x) ((x) << TIMERVALUE1_S) +#define TIMERVALUE1_G(x) (((x) >> TIMERVALUE1_S) & TIMERVALUE1_M) + +#define SGE_TIMER_VALUE_2_AND_3_A 0x10bc + +#define TIMERVALUE2_S 16 +#define TIMERVALUE2_M 0xffffU +#define TIMERVALUE2_V(x) ((x) << TIMERVALUE2_S) +#define TIMERVALUE2_G(x) (((x) >> TIMERVALUE2_S) & TIMERVALUE2_M) + +#define TIMERVALUE3_S 0 +#define TIMERVALUE3_M 0xffffU +#define TIMERVALUE3_V(x) ((x) << TIMERVALUE3_S) +#define TIMERVALUE3_G(x) (((x) >> TIMERVALUE3_S) & TIMERVALUE3_M) + +#define SGE_TIMER_VALUE_4_AND_5_A 0x10c0 + +#define TIMERVALUE4_S 16 +#define TIMERVALUE4_M 0xffffU +#define TIMERVALUE4_V(x) ((x) << TIMERVALUE4_S) +#define TIMERVALUE4_G(x) (((x) >> TIMERVALUE4_S) & TIMERVALUE4_M) + +#define TIMERVALUE5_S 0 +#define TIMERVALUE5_M 0xffffU +#define TIMERVALUE5_V(x) ((x) << TIMERVALUE5_S) +#define TIMERVALUE5_G(x) (((x) >> TIMERVALUE5_S) & TIMERVALUE5_M) + +#define SGE_DEBUG_INDEX_A 0x10cc +#define SGE_DEBUG_DATA_HIGH_A 0x10d0 +#define SGE_DEBUG_DATA_LOW_A 0x10d4 + +#define SGE_DEBUG_DATA_LOW_INDEX_2_A 0x12c8 +#define SGE_DEBUG_DATA_LOW_INDEX_3_A 0x12cc +#define SGE_DEBUG_DATA_HIGH_INDEX_10_A 0x12a8 + +#define SGE_INGRESS_QUEUES_PER_PAGE_PF_A 0x10f4 +#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8 + +#define HP_INT_THRESH_S 28 +#define HP_INT_THRESH_M 0xfU +#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S) + +#define HP_COUNT_S 16 +#define HP_COUNT_M 0x7ffU +#define HP_COUNT_G(x) (((x) >> HP_COUNT_S) & HP_COUNT_M) + +#define LP_INT_THRESH_S 12 +#define LP_INT_THRESH_M 0xfU +#define LP_INT_THRESH_V(x) ((x) << LP_INT_THRESH_S) + +#define LP_COUNT_S 0 +#define LP_COUNT_M 0x7ffU +#define LP_COUNT_G(x) (((x) >> LP_COUNT_S) & LP_COUNT_M) + +#define LP_INT_THRESH_T5_S 18 +#define LP_INT_THRESH_T5_M 0xfffU +#define LP_INT_THRESH_T5_V(x) ((x) << LP_INT_THRESH_T5_S) + +#define LP_COUNT_T5_S 0 +#define LP_COUNT_T5_M 0x3ffffU +#define LP_COUNT_T5_G(x) (((x) >> LP_COUNT_T5_S) & LP_COUNT_T5_M) + +#define SGE_DOORBELL_CONTROL_A 0x10a8 + +#define SGE_STAT_TOTAL_A 0x10e4 +#define SGE_STAT_MATCH_A 0x10e8 +#define SGE_STAT_CFG_A 0x10ec + +#define STATSOURCE_T5_S 9 +#define STATSOURCE_T5_V(x) ((x) << STATSOURCE_T5_S) + +#define SGE_DBFIFO_STATUS2_A 0x1118 + +#define HP_INT_THRESH_T5_S 10 +#define HP_INT_THRESH_T5_M 0xfU +#define HP_INT_THRESH_T5_V(x) ((x) << HP_INT_THRESH_T5_S) + +#define HP_COUNT_T5_S 0 +#define HP_COUNT_T5_M 0x3ffU +#define HP_COUNT_T5_G(x) (((x) >> HP_COUNT_T5_S) & HP_COUNT_T5_M) + +#define ENABLE_DROP_S 13 +#define ENABLE_DROP_V(x) ((x) << ENABLE_DROP_S) +#define ENABLE_DROP_F ENABLE_DROP_V(1U) + +#define DROPPED_DB_S 0 +#define DROPPED_DB_V(x) ((x) << DROPPED_DB_S) +#define DROPPED_DB_F DROPPED_DB_V(1U) + +#define SGE_CTXT_CMD_A 0x11fc +#define SGE_DBQ_CTXT_BADDR_A 0x1084 + +/* registers for module PCIE */ +#define PCIE_PF_CFG_A 0x40 + +#define AIVEC_S 4 +#define AIVEC_M 0x3ffU +#define AIVEC_V(x) ((x) << AIVEC_S) + +#define PCIE_PF_CLI_A 0x44 +#define PCIE_INT_CAUSE_A 0x3004 + +#define UNXSPLCPLERR_S 29 +#define UNXSPLCPLERR_V(x) ((x) << UNXSPLCPLERR_S) +#define UNXSPLCPLERR_F UNXSPLCPLERR_V(1U) + +#define PCIEPINT_S 28 +#define PCIEPINT_V(x) ((x) << PCIEPINT_S) +#define PCIEPINT_F PCIEPINT_V(1U) + +#define PCIESINT_S 27 +#define PCIESINT_V(x) ((x) << PCIESINT_S) +#define PCIESINT_F PCIESINT_V(1U) + +#define RPLPERR_S 26 +#define RPLPERR_V(x) ((x) << RPLPERR_S) +#define RPLPERR_F RPLPERR_V(1U) + +#define RXWRPERR_S 25 +#define RXWRPERR_V(x) ((x) << RXWRPERR_S) +#define RXWRPERR_F RXWRPERR_V(1U) + +#define RXCPLPERR_S 24 +#define RXCPLPERR_V(x) ((x) << RXCPLPERR_S) +#define RXCPLPERR_F RXCPLPERR_V(1U) + +#define PIOTAGPERR_S 23 +#define PIOTAGPERR_V(x) ((x) << PIOTAGPERR_S) +#define PIOTAGPERR_F PIOTAGPERR_V(1U) + +#define MATAGPERR_S 22 +#define MATAGPERR_V(x) ((x) << MATAGPERR_S) +#define MATAGPERR_F MATAGPERR_V(1U) + +#define INTXCLRPERR_S 21 +#define INTXCLRPERR_V(x) ((x) << INTXCLRPERR_S) +#define INTXCLRPERR_F INTXCLRPERR_V(1U) + +#define FIDPERR_S 20 +#define FIDPERR_V(x) ((x) << FIDPERR_S) +#define FIDPERR_F FIDPERR_V(1U) + +#define CFGSNPPERR_S 19 +#define CFGSNPPERR_V(x) ((x) << CFGSNPPERR_S) +#define CFGSNPPERR_F CFGSNPPERR_V(1U) + +#define HRSPPERR_S 18 +#define HRSPPERR_V(x) ((x) << HRSPPERR_S) +#define HRSPPERR_F HRSPPERR_V(1U) + +#define HREQPERR_S 17 +#define HREQPERR_V(x) ((x) << HREQPERR_S) +#define HREQPERR_F HREQPERR_V(1U) + +#define HCNTPERR_S 16 +#define HCNTPERR_V(x) ((x) << HCNTPERR_S) +#define HCNTPERR_F HCNTPERR_V(1U) + +#define DRSPPERR_S 15 +#define DRSPPERR_V(x) ((x) << DRSPPERR_S) +#define DRSPPERR_F DRSPPERR_V(1U) + +#define DREQPERR_S 14 +#define DREQPERR_V(x) ((x) << DREQPERR_S) +#define DREQPERR_F DREQPERR_V(1U) + +#define DCNTPERR_S 13 +#define DCNTPERR_V(x) ((x) << DCNTPERR_S) +#define DCNTPERR_F DCNTPERR_V(1U) + +#define CRSPPERR_S 12 +#define CRSPPERR_V(x) ((x) << CRSPPERR_S) +#define CRSPPERR_F CRSPPERR_V(1U) + +#define CREQPERR_S 11 +#define CREQPERR_V(x) ((x) << CREQPERR_S) +#define CREQPERR_F CREQPERR_V(1U) + +#define CCNTPERR_S 10 +#define CCNTPERR_V(x) ((x) << CCNTPERR_S) +#define CCNTPERR_F CCNTPERR_V(1U) + +#define TARTAGPERR_S 9 +#define TARTAGPERR_V(x) ((x) << TARTAGPERR_S) +#define TARTAGPERR_F TARTAGPERR_V(1U) + +#define PIOREQPERR_S 8 +#define PIOREQPERR_V(x) ((x) << PIOREQPERR_S) +#define PIOREQPERR_F PIOREQPERR_V(1U) + +#define PIOCPLPERR_S 7 +#define PIOCPLPERR_V(x) ((x) << PIOCPLPERR_S) +#define PIOCPLPERR_F PIOCPLPERR_V(1U) + +#define MSIXDIPERR_S 6 +#define MSIXDIPERR_V(x) ((x) << MSIXDIPERR_S) +#define MSIXDIPERR_F MSIXDIPERR_V(1U) + +#define MSIXDATAPERR_S 5 +#define MSIXDATAPERR_V(x) ((x) << MSIXDATAPERR_S) +#define MSIXDATAPERR_F MSIXDATAPERR_V(1U) + +#define MSIXADDRHPERR_S 4 +#define MSIXADDRHPERR_V(x) ((x) << MSIXADDRHPERR_S) +#define MSIXADDRHPERR_F MSIXADDRHPERR_V(1U) + +#define MSIXADDRLPERR_S 3 +#define MSIXADDRLPERR_V(x) ((x) << MSIXADDRLPERR_S) +#define MSIXADDRLPERR_F MSIXADDRLPERR_V(1U) + +#define MSIDATAPERR_S 2 +#define MSIDATAPERR_V(x) ((x) << MSIDATAPERR_S) +#define MSIDATAPERR_F MSIDATAPERR_V(1U) + +#define MSIADDRHPERR_S 1 +#define MSIADDRHPERR_V(x) ((x) << MSIADDRHPERR_S) +#define MSIADDRHPERR_F MSIADDRHPERR_V(1U) + +#define MSIADDRLPERR_S 0 +#define MSIADDRLPERR_V(x) ((x) << MSIADDRLPERR_S) +#define MSIADDRLPERR_F MSIADDRLPERR_V(1U) + +#define READRSPERR_S 29 +#define READRSPERR_V(x) ((x) << READRSPERR_S) +#define READRSPERR_F READRSPERR_V(1U) + +#define TRGT1GRPPERR_S 28 +#define TRGT1GRPPERR_V(x) ((x) << TRGT1GRPPERR_S) +#define TRGT1GRPPERR_F TRGT1GRPPERR_V(1U) + +#define IPSOTPERR_S 27 +#define IPSOTPERR_V(x) ((x) << IPSOTPERR_S) +#define IPSOTPERR_F IPSOTPERR_V(1U) + +#define IPRETRYPERR_S 26 +#define IPRETRYPERR_V(x) ((x) << IPRETRYPERR_S) +#define IPRETRYPERR_F IPRETRYPERR_V(1U) + +#define IPRXDATAGRPPERR_S 25 +#define IPRXDATAGRPPERR_V(x) ((x) << IPRXDATAGRPPERR_S) +#define IPRXDATAGRPPERR_F IPRXDATAGRPPERR_V(1U) + +#define IPRXHDRGRPPERR_S 24 +#define IPRXHDRGRPPERR_V(x) ((x) << IPRXHDRGRPPERR_S) +#define IPRXHDRGRPPERR_F IPRXHDRGRPPERR_V(1U) + +#define MAGRPPERR_S 22 +#define MAGRPPERR_V(x) ((x) << MAGRPPERR_S) +#define MAGRPPERR_F MAGRPPERR_V(1U) + +#define VFIDPERR_S 21 +#define VFIDPERR_V(x) ((x) << VFIDPERR_S) +#define VFIDPERR_F VFIDPERR_V(1U) + +#define HREQWRPERR_S 16 +#define HREQWRPERR_V(x) ((x) << HREQWRPERR_S) +#define HREQWRPERR_F HREQWRPERR_V(1U) + +#define DREQWRPERR_S 13 +#define DREQWRPERR_V(x) ((x) << DREQWRPERR_S) +#define DREQWRPERR_F DREQWRPERR_V(1U) + +#define CREQRDPERR_S 11 +#define CREQRDPERR_V(x) ((x) << CREQRDPERR_S) +#define CREQRDPERR_F CREQRDPERR_V(1U) + +#define MSTTAGQPERR_S 10 +#define MSTTAGQPERR_V(x) ((x) << MSTTAGQPERR_S) +#define MSTTAGQPERR_F MSTTAGQPERR_V(1U) + +#define PIOREQGRPPERR_S 8 +#define PIOREQGRPPERR_V(x) ((x) << PIOREQGRPPERR_S) +#define PIOREQGRPPERR_F PIOREQGRPPERR_V(1U) + +#define PIOCPLGRPPERR_S 7 +#define PIOCPLGRPPERR_V(x) ((x) << PIOCPLGRPPERR_S) +#define PIOCPLGRPPERR_F PIOCPLGRPPERR_V(1U) + +#define MSIXSTIPERR_S 2 +#define MSIXSTIPERR_V(x) ((x) << MSIXSTIPERR_S) +#define MSIXSTIPERR_F MSIXSTIPERR_V(1U) + +#define MSTTIMEOUTPERR_S 1 +#define MSTTIMEOUTPERR_V(x) ((x) << MSTTIMEOUTPERR_S) +#define MSTTIMEOUTPERR_F MSTTIMEOUTPERR_V(1U) + +#define MSTGRPPERR_S 0 +#define MSTGRPPERR_V(x) ((x) << MSTGRPPERR_S) +#define MSTGRPPERR_F MSTGRPPERR_V(1U) + +#define PCIE_NONFAT_ERR_A 0x3010 +#define PCIE_CFG_SPACE_REQ_A 0x3060 +#define PCIE_CFG_SPACE_DATA_A 0x3064 +#define PCIE_MEM_ACCESS_BASE_WIN_A 0x3068 + +#define PCIEOFST_S 10 +#define PCIEOFST_M 0x3fffffU +#define PCIEOFST_G(x) (((x) >> PCIEOFST_S) & PCIEOFST_M) + +#define BIR_S 8 +#define BIR_M 0x3U +#define BIR_V(x) ((x) << BIR_S) +#define BIR_G(x) (((x) >> BIR_S) & BIR_M) + +#define WINDOW_S 0 +#define WINDOW_M 0xffU +#define WINDOW_V(x) ((x) << WINDOW_S) +#define WINDOW_G(x) (((x) >> WINDOW_S) & WINDOW_M) + +#define PCIE_MEM_ACCESS_OFFSET_A 0x306c + +#define ENABLE_S 30 +#define ENABLE_V(x) ((x) << ENABLE_S) +#define ENABLE_F ENABLE_V(1U) + +#define LOCALCFG_S 28 +#define LOCALCFG_V(x) ((x) << LOCALCFG_S) +#define LOCALCFG_F LOCALCFG_V(1U) + +#define FUNCTION_S 12 +#define FUNCTION_V(x) ((x) << FUNCTION_S) + +#define REGISTER_S 0 +#define REGISTER_V(x) ((x) << REGISTER_S) + +#define PFNUM_S 0 +#define PFNUM_V(x) ((x) << PFNUM_S) + +#define PCIE_FW_A 0x30b8 +#define PCIE_FW_PF_A 0x30bc + +#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908 + +#define RNPP_S 31 +#define RNPP_V(x) ((x) << RNPP_S) +#define RNPP_F RNPP_V(1U) + +#define RPCP_S 29 +#define RPCP_V(x) ((x) << RPCP_S) +#define RPCP_F RPCP_V(1U) + +#define RCIP_S 27 +#define RCIP_V(x) ((x) << RCIP_S) +#define RCIP_F RCIP_V(1U) + +#define RCCP_S 26 +#define RCCP_V(x) ((x) << RCCP_S) +#define RCCP_F RCCP_V(1U) + +#define RFTP_S 23 +#define RFTP_V(x) ((x) << RFTP_S) +#define RFTP_F RFTP_V(1U) + +#define PTRP_S 20 +#define PTRP_V(x) ((x) << PTRP_S) +#define PTRP_F PTRP_V(1U) + +#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A 0x59a4 + +#define TPCP_S 30 +#define TPCP_V(x) ((x) << TPCP_S) +#define TPCP_F TPCP_V(1U) + +#define TNPP_S 29 +#define TNPP_V(x) ((x) << TNPP_S) +#define TNPP_F TNPP_V(1U) + +#define TFTP_S 28 +#define TFTP_V(x) ((x) << TFTP_S) +#define TFTP_F TFTP_V(1U) + +#define TCAP_S 27 +#define TCAP_V(x) ((x) << TCAP_S) +#define TCAP_F TCAP_V(1U) + +#define TCIP_S 26 +#define TCIP_V(x) ((x) << TCIP_S) +#define TCIP_F TCIP_V(1U) + +#define RCAP_S 25 +#define RCAP_V(x) ((x) << RCAP_S) +#define RCAP_F RCAP_V(1U) + +#define PLUP_S 23 +#define PLUP_V(x) ((x) << PLUP_S) +#define PLUP_F PLUP_V(1U) + +#define PLDN_S 22 +#define PLDN_V(x) ((x) << PLDN_S) +#define PLDN_F PLDN_V(1U) + +#define OTDD_S 21 +#define OTDD_V(x) ((x) << OTDD_S) +#define OTDD_F OTDD_V(1U) + +#define GTRP_S 20 +#define GTRP_V(x) ((x) << GTRP_S) +#define GTRP_F GTRP_V(1U) + +#define RDPE_S 18 +#define RDPE_V(x) ((x) << RDPE_S) +#define RDPE_F RDPE_V(1U) + +#define TDCE_S 17 +#define TDCE_V(x) ((x) << TDCE_S) +#define TDCE_F TDCE_V(1U) + +#define TDUE_S 16 +#define TDUE_V(x) ((x) << TDUE_S) +#define TDUE_F TDUE_V(1U) + +/* registers for module MC */ +#define MC_INT_CAUSE_A 0x7518 +#define MC_P_INT_CAUSE_A 0x41318 + +#define ECC_UE_INT_CAUSE_S 2 +#define ECC_UE_INT_CAUSE_V(x) ((x) << ECC_UE_INT_CAUSE_S) +#define ECC_UE_INT_CAUSE_F ECC_UE_INT_CAUSE_V(1U) + +#define ECC_CE_INT_CAUSE_S 1 +#define ECC_CE_INT_CAUSE_V(x) ((x) << ECC_CE_INT_CAUSE_S) +#define ECC_CE_INT_CAUSE_F ECC_CE_INT_CAUSE_V(1U) + +#define PERR_INT_CAUSE_S 0 +#define PERR_INT_CAUSE_V(x) ((x) << PERR_INT_CAUSE_S) +#define PERR_INT_CAUSE_F PERR_INT_CAUSE_V(1U) + +#define MC_ECC_STATUS_A 0x751c +#define MC_P_ECC_STATUS_A 0x4131c + +#define ECC_CECNT_S 16 +#define ECC_CECNT_M 0xffffU +#define ECC_CECNT_V(x) ((x) << ECC_CECNT_S) +#define ECC_CECNT_G(x) (((x) >> ECC_CECNT_S) & ECC_CECNT_M) + +#define ECC_UECNT_S 0 +#define ECC_UECNT_M 0xffffU +#define ECC_UECNT_V(x) ((x) << ECC_UECNT_S) +#define ECC_UECNT_G(x) (((x) >> ECC_UECNT_S) & ECC_UECNT_M) + +#define MC_BIST_CMD_A 0x7600 + +#define START_BIST_S 31 +#define START_BIST_V(x) ((x) << START_BIST_S) +#define START_BIST_F START_BIST_V(1U) + +#define BIST_CMD_GAP_S 8 +#define BIST_CMD_GAP_V(x) ((x) << BIST_CMD_GAP_S) + +#define BIST_OPCODE_S 0 +#define BIST_OPCODE_V(x) ((x) << BIST_OPCODE_S) + +#define MC_BIST_CMD_ADDR_A 0x7604 +#define MC_BIST_CMD_LEN_A 0x7608 +#define MC_BIST_DATA_PATTERN_A 0x760c + +#define MC_BIST_STATUS_RDATA_A 0x7688 + +/* registers for module MA */ +#define MA_EDRAM0_BAR_A 0x77c0 + +#define EDRAM0_SIZE_S 0 +#define EDRAM0_SIZE_M 0xfffU +#define EDRAM0_SIZE_V(x) ((x) << EDRAM0_SIZE_S) +#define EDRAM0_SIZE_G(x) (((x) >> EDRAM0_SIZE_S) & EDRAM0_SIZE_M) + +#define MA_EDRAM1_BAR_A 0x77c4 + +#define EDRAM1_SIZE_S 0 +#define EDRAM1_SIZE_M 0xfffU +#define EDRAM1_SIZE_V(x) ((x) << EDRAM1_SIZE_S) +#define EDRAM1_SIZE_G(x) (((x) >> EDRAM1_SIZE_S) & EDRAM1_SIZE_M) + +#define MA_EXT_MEMORY_BAR_A 0x77c8 + +#define EXT_MEM_SIZE_S 0 +#define EXT_MEM_SIZE_M 0xfffU +#define EXT_MEM_SIZE_V(x) ((x) << EXT_MEM_SIZE_S) +#define EXT_MEM_SIZE_G(x) (((x) >> EXT_MEM_SIZE_S) & EXT_MEM_SIZE_M) + +#define MA_EXT_MEMORY1_BAR_A 0x7808 + +#define EXT_MEM1_SIZE_S 0 +#define EXT_MEM1_SIZE_M 0xfffU +#define EXT_MEM1_SIZE_V(x) ((x) << EXT_MEM1_SIZE_S) +#define EXT_MEM1_SIZE_G(x) (((x) >> EXT_MEM1_SIZE_S) & EXT_MEM1_SIZE_M) + +#define MA_EXT_MEMORY0_BAR_A 0x77c8 + +#define EXT_MEM0_SIZE_S 0 +#define EXT_MEM0_SIZE_M 0xfffU +#define EXT_MEM0_SIZE_V(x) ((x) << EXT_MEM0_SIZE_S) +#define EXT_MEM0_SIZE_G(x) (((x) >> EXT_MEM0_SIZE_S) & EXT_MEM0_SIZE_M) + +#define MA_TARGET_MEM_ENABLE_A 0x77d8 + +#define EXT_MEM_ENABLE_S 2 +#define EXT_MEM_ENABLE_V(x) ((x) << EXT_MEM_ENABLE_S) +#define EXT_MEM_ENABLE_F EXT_MEM_ENABLE_V(1U) + +#define EDRAM1_ENABLE_S 1 +#define EDRAM1_ENABLE_V(x) ((x) << EDRAM1_ENABLE_S) +#define EDRAM1_ENABLE_F EDRAM1_ENABLE_V(1U) + +#define EDRAM0_ENABLE_S 0 +#define EDRAM0_ENABLE_V(x) ((x) << EDRAM0_ENABLE_S) +#define EDRAM0_ENABLE_F EDRAM0_ENABLE_V(1U) + +#define EXT_MEM1_ENABLE_S 4 +#define EXT_MEM1_ENABLE_V(x) ((x) << EXT_MEM1_ENABLE_S) +#define EXT_MEM1_ENABLE_F EXT_MEM1_ENABLE_V(1U) + +#define EXT_MEM0_ENABLE_S 2 +#define EXT_MEM0_ENABLE_V(x) ((x) << EXT_MEM0_ENABLE_S) +#define EXT_MEM0_ENABLE_F EXT_MEM0_ENABLE_V(1U) + +#define MA_INT_CAUSE_A 0x77e0 + +#define MEM_PERR_INT_CAUSE_S 1 +#define MEM_PERR_INT_CAUSE_V(x) ((x) << MEM_PERR_INT_CAUSE_S) +#define MEM_PERR_INT_CAUSE_F MEM_PERR_INT_CAUSE_V(1U) + +#define MEM_WRAP_INT_CAUSE_S 0 +#define MEM_WRAP_INT_CAUSE_V(x) ((x) << MEM_WRAP_INT_CAUSE_S) +#define MEM_WRAP_INT_CAUSE_F MEM_WRAP_INT_CAUSE_V(1U) + +#define MA_INT_WRAP_STATUS_A 0x77e4 + +#define MEM_WRAP_ADDRESS_S 4 +#define MEM_WRAP_ADDRESS_M 0xfffffffU +#define MEM_WRAP_ADDRESS_G(x) (((x) >> MEM_WRAP_ADDRESS_S) & MEM_WRAP_ADDRESS_M) + +#define MEM_WRAP_CLIENT_NUM_S 0 +#define MEM_WRAP_CLIENT_NUM_M 0xfU +#define MEM_WRAP_CLIENT_NUM_G(x) \ + (((x) >> MEM_WRAP_CLIENT_NUM_S) & MEM_WRAP_CLIENT_NUM_M) + +#define MA_PARITY_ERROR_STATUS_A 0x77f4 +#define MA_PARITY_ERROR_STATUS1_A 0x77f4 +#define MA_PARITY_ERROR_STATUS2_A 0x7804 + +/* registers for module EDC_0 */ +#define EDC_0_BASE_ADDR 0x7900 + +#define EDC_BIST_CMD_A 0x7904 +#define EDC_BIST_CMD_ADDR_A 0x7908 +#define EDC_BIST_CMD_LEN_A 0x790c +#define EDC_BIST_DATA_PATTERN_A 0x7910 +#define EDC_BIST_STATUS_RDATA_A 0x7928 +#define EDC_INT_CAUSE_A 0x7978 + +#define ECC_UE_PAR_S 5 +#define ECC_UE_PAR_V(x) ((x) << ECC_UE_PAR_S) +#define ECC_UE_PAR_F ECC_UE_PAR_V(1U) + +#define ECC_CE_PAR_S 4 +#define ECC_CE_PAR_V(x) ((x) << ECC_CE_PAR_S) +#define ECC_CE_PAR_F ECC_CE_PAR_V(1U) + +#define PERR_PAR_CAUSE_S 3 +#define PERR_PAR_CAUSE_V(x) ((x) << PERR_PAR_CAUSE_S) +#define PERR_PAR_CAUSE_F PERR_PAR_CAUSE_V(1U) + +#define EDC_ECC_STATUS_A 0x797c + +/* registers for module EDC_1 */ +#define EDC_1_BASE_ADDR 0x7980 + +/* registers for module CIM */ +#define CIM_BOOT_CFG_A 0x7b00 +#define CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A 0x290 + +#define BOOTADDR_M 0xffffff00U + +#define UPCRST_S 0 +#define UPCRST_V(x) ((x) << UPCRST_S) +#define UPCRST_F UPCRST_V(1U) + +#define CIM_PF_MAILBOX_DATA_A 0x240 +#define CIM_PF_MAILBOX_CTRL_A 0x280 + +#define MBMSGVALID_S 3 +#define MBMSGVALID_V(x) ((x) << MBMSGVALID_S) +#define MBMSGVALID_F MBMSGVALID_V(1U) + +#define MBINTREQ_S 2 +#define MBINTREQ_V(x) ((x) << MBINTREQ_S) +#define MBINTREQ_F MBINTREQ_V(1U) + +#define MBOWNER_S 0 +#define MBOWNER_M 0x3U +#define MBOWNER_V(x) ((x) << MBOWNER_S) +#define MBOWNER_G(x) (((x) >> MBOWNER_S) & MBOWNER_M) + +#define CIM_PF_HOST_INT_ENABLE_A 0x288 + +#define MBMSGRDYINTEN_S 19 +#define MBMSGRDYINTEN_V(x) ((x) << MBMSGRDYINTEN_S) +#define MBMSGRDYINTEN_F MBMSGRDYINTEN_V(1U) + +#define CIM_PF_HOST_INT_CAUSE_A 0x28c + +#define MBMSGRDYINT_S 19 +#define MBMSGRDYINT_V(x) ((x) << MBMSGRDYINT_S) +#define MBMSGRDYINT_F MBMSGRDYINT_V(1U) + +#define CIM_HOST_INT_CAUSE_A 0x7b2c + +#define TIEQOUTPARERRINT_S 20 +#define TIEQOUTPARERRINT_V(x) ((x) << TIEQOUTPARERRINT_S) +#define TIEQOUTPARERRINT_F TIEQOUTPARERRINT_V(1U) + +#define TIEQINPARERRINT_S 19 +#define TIEQINPARERRINT_V(x) ((x) << TIEQINPARERRINT_S) +#define TIEQINPARERRINT_F TIEQINPARERRINT_V(1U) + +#define PREFDROPINT_S 1 +#define PREFDROPINT_V(x) ((x) << PREFDROPINT_S) +#define PREFDROPINT_F PREFDROPINT_V(1U) + +#define UPACCNONZERO_S 0 +#define UPACCNONZERO_V(x) ((x) << UPACCNONZERO_S) +#define UPACCNONZERO_F UPACCNONZERO_V(1U) + +#define MBHOSTPARERR_S 18 +#define MBHOSTPARERR_V(x) ((x) << MBHOSTPARERR_S) +#define MBHOSTPARERR_F MBHOSTPARERR_V(1U) + +#define MBUPPARERR_S 17 +#define MBUPPARERR_V(x) ((x) << MBUPPARERR_S) +#define MBUPPARERR_F MBUPPARERR_V(1U) + +#define IBQTP0PARERR_S 16 +#define IBQTP0PARERR_V(x) ((x) << IBQTP0PARERR_S) +#define IBQTP0PARERR_F IBQTP0PARERR_V(1U) + +#define IBQTP1PARERR_S 15 +#define IBQTP1PARERR_V(x) ((x) << IBQTP1PARERR_S) +#define IBQTP1PARERR_F IBQTP1PARERR_V(1U) + +#define IBQULPPARERR_S 14 +#define IBQULPPARERR_V(x) ((x) << IBQULPPARERR_S) +#define IBQULPPARERR_F IBQULPPARERR_V(1U) + +#define IBQSGELOPARERR_S 13 +#define IBQSGELOPARERR_V(x) ((x) << IBQSGELOPARERR_S) +#define IBQSGELOPARERR_F IBQSGELOPARERR_V(1U) + +#define IBQSGEHIPARERR_S 12 +#define IBQSGEHIPARERR_V(x) ((x) << IBQSGEHIPARERR_S) +#define IBQSGEHIPARERR_F IBQSGEHIPARERR_V(1U) + +#define IBQNCSIPARERR_S 11 +#define IBQNCSIPARERR_V(x) ((x) << IBQNCSIPARERR_S) +#define IBQNCSIPARERR_F IBQNCSIPARERR_V(1U) + +#define OBQULP0PARERR_S 10 +#define OBQULP0PARERR_V(x) ((x) << OBQULP0PARERR_S) +#define OBQULP0PARERR_F OBQULP0PARERR_V(1U) + +#define OBQULP1PARERR_S 9 +#define OBQULP1PARERR_V(x) ((x) << OBQULP1PARERR_S) +#define OBQULP1PARERR_F OBQULP1PARERR_V(1U) + +#define OBQULP2PARERR_S 8 +#define OBQULP2PARERR_V(x) ((x) << OBQULP2PARERR_S) +#define OBQULP2PARERR_F OBQULP2PARERR_V(1U) + +#define OBQULP3PARERR_S 7 +#define OBQULP3PARERR_V(x) ((x) << OBQULP3PARERR_S) +#define OBQULP3PARERR_F OBQULP3PARERR_V(1U) + +#define OBQSGEPARERR_S 6 +#define OBQSGEPARERR_V(x) ((x) << OBQSGEPARERR_S) +#define OBQSGEPARERR_F OBQSGEPARERR_V(1U) + +#define OBQNCSIPARERR_S 5 +#define OBQNCSIPARERR_V(x) ((x) << OBQNCSIPARERR_S) +#define OBQNCSIPARERR_F OBQNCSIPARERR_V(1U) + +#define CIM_HOST_UPACC_INT_CAUSE_A 0x7b34 + +#define EEPROMWRINT_S 30 +#define EEPROMWRINT_V(x) ((x) << EEPROMWRINT_S) +#define EEPROMWRINT_F EEPROMWRINT_V(1U) + +#define TIMEOUTMAINT_S 29 +#define TIMEOUTMAINT_V(x) ((x) << TIMEOUTMAINT_S) +#define TIMEOUTMAINT_F TIMEOUTMAINT_V(1U) + +#define TIMEOUTINT_S 28 +#define TIMEOUTINT_V(x) ((x) << TIMEOUTINT_S) +#define TIMEOUTINT_F TIMEOUTINT_V(1U) + +#define RSPOVRLOOKUPINT_S 27 +#define RSPOVRLOOKUPINT_V(x) ((x) << RSPOVRLOOKUPINT_S) +#define RSPOVRLOOKUPINT_F RSPOVRLOOKUPINT_V(1U) + +#define REQOVRLOOKUPINT_S 26 +#define REQOVRLOOKUPINT_V(x) ((x) << REQOVRLOOKUPINT_S) +#define REQOVRLOOKUPINT_F REQOVRLOOKUPINT_V(1U) + +#define BLKWRPLINT_S 25 +#define BLKWRPLINT_V(x) ((x) << BLKWRPLINT_S) +#define BLKWRPLINT_F BLKWRPLINT_V(1U) + +#define BLKRDPLINT_S 24 +#define BLKRDPLINT_V(x) ((x) << BLKRDPLINT_S) +#define BLKRDPLINT_F BLKRDPLINT_V(1U) + +#define SGLWRPLINT_S 23 +#define SGLWRPLINT_V(x) ((x) << SGLWRPLINT_S) +#define SGLWRPLINT_F SGLWRPLINT_V(1U) + +#define SGLRDPLINT_S 22 +#define SGLRDPLINT_V(x) ((x) << SGLRDPLINT_S) +#define SGLRDPLINT_F SGLRDPLINT_V(1U) + +#define BLKWRCTLINT_S 21 +#define BLKWRCTLINT_V(x) ((x) << BLKWRCTLINT_S) +#define BLKWRCTLINT_F BLKWRCTLINT_V(1U) + +#define BLKRDCTLINT_S 20 +#define BLKRDCTLINT_V(x) ((x) << BLKRDCTLINT_S) +#define BLKRDCTLINT_F BLKRDCTLINT_V(1U) + +#define SGLWRCTLINT_S 19 +#define SGLWRCTLINT_V(x) ((x) << SGLWRCTLINT_S) +#define SGLWRCTLINT_F SGLWRCTLINT_V(1U) + +#define SGLRDCTLINT_S 18 +#define SGLRDCTLINT_V(x) ((x) << SGLRDCTLINT_S) +#define SGLRDCTLINT_F SGLRDCTLINT_V(1U) + +#define BLKWREEPROMINT_S 17 +#define BLKWREEPROMINT_V(x) ((x) << BLKWREEPROMINT_S) +#define BLKWREEPROMINT_F BLKWREEPROMINT_V(1U) + +#define BLKRDEEPROMINT_S 16 +#define BLKRDEEPROMINT_V(x) ((x) << BLKRDEEPROMINT_S) +#define BLKRDEEPROMINT_F BLKRDEEPROMINT_V(1U) + +#define SGLWREEPROMINT_S 15 +#define SGLWREEPROMINT_V(x) ((x) << SGLWREEPROMINT_S) +#define SGLWREEPROMINT_F SGLWREEPROMINT_V(1U) + +#define SGLRDEEPROMINT_S 14 +#define SGLRDEEPROMINT_V(x) ((x) << SGLRDEEPROMINT_S) +#define SGLRDEEPROMINT_F SGLRDEEPROMINT_V(1U) + +#define BLKWRFLASHINT_S 13 +#define BLKWRFLASHINT_V(x) ((x) << BLKWRFLASHINT_S) +#define BLKWRFLASHINT_F BLKWRFLASHINT_V(1U) + +#define BLKRDFLASHINT_S 12 +#define BLKRDFLASHINT_V(x) ((x) << BLKRDFLASHINT_S) +#define BLKRDFLASHINT_F BLKRDFLASHINT_V(1U) + +#define SGLWRFLASHINT_S 11 +#define SGLWRFLASHINT_V(x) ((x) << SGLWRFLASHINT_S) +#define SGLWRFLASHINT_F SGLWRFLASHINT_V(1U) + +#define SGLRDFLASHINT_S 10 +#define SGLRDFLASHINT_V(x) ((x) << SGLRDFLASHINT_S) +#define SGLRDFLASHINT_F SGLRDFLASHINT_V(1U) + +#define BLKWRBOOTINT_S 9 +#define BLKWRBOOTINT_V(x) ((x) << BLKWRBOOTINT_S) +#define BLKWRBOOTINT_F BLKWRBOOTINT_V(1U) + +#define BLKRDBOOTINT_S 8 +#define BLKRDBOOTINT_V(x) ((x) << BLKRDBOOTINT_S) +#define BLKRDBOOTINT_F BLKRDBOOTINT_V(1U) + +#define SGLWRBOOTINT_S 7 +#define SGLWRBOOTINT_V(x) ((x) << SGLWRBOOTINT_S) +#define SGLWRBOOTINT_F SGLWRBOOTINT_V(1U) + +#define SGLRDBOOTINT_S 6 +#define SGLRDBOOTINT_V(x) ((x) << SGLRDBOOTINT_S) +#define SGLRDBOOTINT_F SGLRDBOOTINT_V(1U) + +#define ILLWRBEINT_S 5 +#define ILLWRBEINT_V(x) ((x) << ILLWRBEINT_S) +#define ILLWRBEINT_F ILLWRBEINT_V(1U) + +#define ILLRDBEINT_S 4 +#define ILLRDBEINT_V(x) ((x) << ILLRDBEINT_S) +#define ILLRDBEINT_F ILLRDBEINT_V(1U) + +#define ILLRDINT_S 3 +#define ILLRDINT_V(x) ((x) << ILLRDINT_S) +#define ILLRDINT_F ILLRDINT_V(1U) + +#define ILLWRINT_S 2 +#define ILLWRINT_V(x) ((x) << ILLWRINT_S) +#define ILLWRINT_F ILLWRINT_V(1U) + +#define ILLTRANSINT_S 1 +#define ILLTRANSINT_V(x) ((x) << ILLTRANSINT_S) +#define ILLTRANSINT_F ILLTRANSINT_V(1U) + +#define RSVDSPACEINT_S 0 +#define RSVDSPACEINT_V(x) ((x) << RSVDSPACEINT_S) +#define RSVDSPACEINT_F RSVDSPACEINT_V(1U) + +/* registers for module TP */ +#define DBGLAWHLF_S 23 +#define DBGLAWHLF_V(x) ((x) << DBGLAWHLF_S) +#define DBGLAWHLF_F DBGLAWHLF_V(1U) + +#define DBGLAWPTR_S 16 +#define DBGLAWPTR_M 0x7fU +#define DBGLAWPTR_G(x) (((x) >> DBGLAWPTR_S) & DBGLAWPTR_M) + +#define DBGLAENABLE_S 12 +#define DBGLAENABLE_V(x) ((x) << DBGLAENABLE_S) +#define DBGLAENABLE_F DBGLAENABLE_V(1U) + +#define DBGLARPTR_S 0 +#define DBGLARPTR_M 0x7fU +#define DBGLARPTR_V(x) ((x) << DBGLARPTR_S) + +#define TP_DBG_LA_DATAL_A 0x7ed8 +#define TP_DBG_LA_CONFIG_A 0x7ed4 +#define TP_OUT_CONFIG_A 0x7d04 +#define TP_GLOBAL_CONFIG_A 0x7d08 + +#define DBGLAMODE_S 14 +#define DBGLAMODE_M 0x3U +#define DBGLAMODE_G(x) (((x) >> DBGLAMODE_S) & DBGLAMODE_M) + +#define FIVETUPLELOOKUP_S 17 +#define FIVETUPLELOOKUP_M 0x3U +#define FIVETUPLELOOKUP_V(x) ((x) << FIVETUPLELOOKUP_S) +#define FIVETUPLELOOKUP_G(x) (((x) >> FIVETUPLELOOKUP_S) & FIVETUPLELOOKUP_M) + +#define TP_PARA_REG2_A 0x7d68 + +#define MAXRXDATA_S 16 +#define MAXRXDATA_M 0xffffU +#define MAXRXDATA_G(x) (((x) >> MAXRXDATA_S) & MAXRXDATA_M) + +#define TP_TIMER_RESOLUTION_A 0x7d90 + +#define TIMERRESOLUTION_S 16 +#define TIMERRESOLUTION_M 0xffU +#define TIMERRESOLUTION_G(x) (((x) >> TIMERRESOLUTION_S) & TIMERRESOLUTION_M) + +#define TIMESTAMPRESOLUTION_S 8 +#define TIMESTAMPRESOLUTION_M 0xffU +#define TIMESTAMPRESOLUTION_G(x) \ + (((x) >> TIMESTAMPRESOLUTION_S) & TIMESTAMPRESOLUTION_M) + +#define DELAYEDACKRESOLUTION_S 0 +#define DELAYEDACKRESOLUTION_M 0xffU +#define DELAYEDACKRESOLUTION_G(x) \ + (((x) >> DELAYEDACKRESOLUTION_S) & DELAYEDACKRESOLUTION_M) + +#define TP_SHIFT_CNT_A 0x7dc0 +#define TP_RXT_MIN_A 0x7d98 +#define TP_RXT_MAX_A 0x7d9c +#define TP_PERS_MIN_A 0x7da0 +#define TP_PERS_MAX_A 0x7da4 +#define TP_KEEP_IDLE_A 0x7da8 +#define TP_KEEP_INTVL_A 0x7dac +#define TP_INIT_SRTT_A 0x7db0 +#define TP_DACK_TIMER_A 0x7db4 +#define TP_FINWAIT2_TIMER_A 0x7db8 + +#define INITSRTT_S 0 +#define INITSRTT_M 0xffffU +#define INITSRTT_G(x) (((x) >> INITSRTT_S) & INITSRTT_M) + +#define PERSMAX_S 0 +#define PERSMAX_M 0x3fffffffU +#define PERSMAX_V(x) ((x) << PERSMAX_S) +#define PERSMAX_G(x) (((x) >> PERSMAX_S) & PERSMAX_M) + +#define SYNSHIFTMAX_S 24 +#define SYNSHIFTMAX_M 0xffU +#define SYNSHIFTMAX_V(x) ((x) << SYNSHIFTMAX_S) +#define SYNSHIFTMAX_G(x) (((x) >> SYNSHIFTMAX_S) & SYNSHIFTMAX_M) + +#define RXTSHIFTMAXR1_S 20 +#define RXTSHIFTMAXR1_M 0xfU +#define RXTSHIFTMAXR1_V(x) ((x) << RXTSHIFTMAXR1_S) +#define RXTSHIFTMAXR1_G(x) (((x) >> RXTSHIFTMAXR1_S) & RXTSHIFTMAXR1_M) + +#define RXTSHIFTMAXR2_S 16 +#define RXTSHIFTMAXR2_M 0xfU +#define RXTSHIFTMAXR2_V(x) ((x) << RXTSHIFTMAXR2_S) +#define RXTSHIFTMAXR2_G(x) (((x) >> RXTSHIFTMAXR2_S) & RXTSHIFTMAXR2_M) + +#define PERSHIFTBACKOFFMAX_S 12 +#define PERSHIFTBACKOFFMAX_M 0xfU +#define PERSHIFTBACKOFFMAX_V(x) ((x) << PERSHIFTBACKOFFMAX_S) +#define PERSHIFTBACKOFFMAX_G(x) \ + (((x) >> PERSHIFTBACKOFFMAX_S) & PERSHIFTBACKOFFMAX_M) + +#define PERSHIFTMAX_S 8 +#define PERSHIFTMAX_M 0xfU +#define PERSHIFTMAX_V(x) ((x) << PERSHIFTMAX_S) +#define PERSHIFTMAX_G(x) (((x) >> PERSHIFTMAX_S) & PERSHIFTMAX_M) + +#define KEEPALIVEMAXR1_S 4 +#define KEEPALIVEMAXR1_M 0xfU +#define KEEPALIVEMAXR1_V(x) ((x) << KEEPALIVEMAXR1_S) +#define KEEPALIVEMAXR1_G(x) (((x) >> KEEPALIVEMAXR1_S) & KEEPALIVEMAXR1_M) + +#define KEEPALIVEMAXR2_S 0 +#define KEEPALIVEMAXR2_M 0xfU +#define KEEPALIVEMAXR2_V(x) ((x) << KEEPALIVEMAXR2_S) +#define KEEPALIVEMAXR2_G(x) (((x) >> KEEPALIVEMAXR2_S) & KEEPALIVEMAXR2_M) + +#define ROWINDEX_S 16 +#define ROWINDEX_V(x) ((x) << ROWINDEX_S) + +#define TP_CCTRL_TABLE_A 0x7ddc +#define TP_MTU_TABLE_A 0x7de4 + +#define MTUINDEX_S 24 +#define MTUINDEX_V(x) ((x) << MTUINDEX_S) + +#define MTUWIDTH_S 16 +#define MTUWIDTH_M 0xfU +#define MTUWIDTH_V(x) ((x) << MTUWIDTH_S) +#define MTUWIDTH_G(x) (((x) >> MTUWIDTH_S) & MTUWIDTH_M) + +#define MTUVALUE_S 0 +#define MTUVALUE_M 0x3fffU +#define MTUVALUE_V(x) ((x) << MTUVALUE_S) +#define MTUVALUE_G(x) (((x) >> MTUVALUE_S) & MTUVALUE_M) + +#define TP_RSS_LKP_TABLE_A 0x7dec + +#define LKPTBLROWVLD_S 31 +#define LKPTBLROWVLD_V(x) ((x) << LKPTBLROWVLD_S) +#define LKPTBLROWVLD_F LKPTBLROWVLD_V(1U) + +#define LKPTBLQUEUE1_S 10 +#define LKPTBLQUEUE1_M 0x3ffU +#define LKPTBLQUEUE1_G(x) (((x) >> LKPTBLQUEUE1_S) & LKPTBLQUEUE1_M) + +#define LKPTBLQUEUE0_S 0 +#define LKPTBLQUEUE0_M 0x3ffU +#define LKPTBLQUEUE0_G(x) (((x) >> LKPTBLQUEUE0_S) & LKPTBLQUEUE0_M) + +#define TP_PIO_ADDR_A 0x7e40 +#define TP_PIO_DATA_A 0x7e44 +#define TP_MIB_INDEX_A 0x7e50 +#define TP_MIB_DATA_A 0x7e54 +#define TP_INT_CAUSE_A 0x7e74 + +#define FLMTXFLSTEMPTY_S 30 +#define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S) +#define FLMTXFLSTEMPTY_F FLMTXFLSTEMPTY_V(1U) + +#define TP_VLAN_PRI_MAP_A 0x140 + +#define FRAGMENTATION_S 9 +#define FRAGMENTATION_V(x) ((x) << FRAGMENTATION_S) +#define FRAGMENTATION_F FRAGMENTATION_V(1U) + +#define MPSHITTYPE_S 8 +#define MPSHITTYPE_V(x) ((x) << MPSHITTYPE_S) +#define MPSHITTYPE_F MPSHITTYPE_V(1U) + +#define MACMATCH_S 7 +#define MACMATCH_V(x) ((x) << MACMATCH_S) +#define MACMATCH_F MACMATCH_V(1U) + +#define ETHERTYPE_S 6 +#define ETHERTYPE_V(x) ((x) << ETHERTYPE_S) +#define ETHERTYPE_F ETHERTYPE_V(1U) + +#define PROTOCOL_S 5 +#define PROTOCOL_V(x) ((x) << PROTOCOL_S) +#define PROTOCOL_F PROTOCOL_V(1U) + +#define TOS_S 4 +#define TOS_V(x) ((x) << TOS_S) +#define TOS_F TOS_V(1U) + +#define VLAN_S 3 +#define VLAN_V(x) ((x) << VLAN_S) +#define VLAN_F VLAN_V(1U) + +#define VNIC_ID_S 2 +#define VNIC_ID_V(x) ((x) << VNIC_ID_S) +#define VNIC_ID_F VNIC_ID_V(1U) + +#define PORT_S 1 +#define PORT_V(x) ((x) << PORT_S) +#define PORT_F PORT_V(1U) + +#define FCOE_S 0 +#define FCOE_V(x) ((x) << FCOE_S) +#define FCOE_F FCOE_V(1U) + +#define FILTERMODE_S 15 +#define FILTERMODE_V(x) ((x) << FILTERMODE_S) +#define FILTERMODE_F FILTERMODE_V(1U) + +#define FCOEMASK_S 14 +#define FCOEMASK_V(x) ((x) << FCOEMASK_S) +#define FCOEMASK_F FCOEMASK_V(1U) + +#define TP_INGRESS_CONFIG_A 0x141 + +#define VNIC_S 11 +#define VNIC_V(x) ((x) << VNIC_S) +#define VNIC_F VNIC_V(1U) + +#define CSUM_HAS_PSEUDO_HDR_S 10 +#define CSUM_HAS_PSEUDO_HDR_V(x) ((x) << CSUM_HAS_PSEUDO_HDR_S) +#define CSUM_HAS_PSEUDO_HDR_F CSUM_HAS_PSEUDO_HDR_V(1U) + +#define TP_MIB_MAC_IN_ERR_0_A 0x0 +#define TP_MIB_TCP_OUT_RST_A 0xc +#define TP_MIB_TCP_IN_SEG_HI_A 0x10 +#define TP_MIB_TCP_IN_SEG_LO_A 0x11 +#define TP_MIB_TCP_OUT_SEG_HI_A 0x12 +#define TP_MIB_TCP_OUT_SEG_LO_A 0x13 +#define TP_MIB_TCP_RXT_SEG_HI_A 0x14 +#define TP_MIB_TCP_RXT_SEG_LO_A 0x15 +#define TP_MIB_TNL_CNG_DROP_0_A 0x18 +#define TP_MIB_TCP_V6IN_ERR_0_A 0x28 +#define TP_MIB_TCP_V6OUT_RST_A 0x2c +#define TP_MIB_OFD_ARP_DROP_A 0x36 +#define TP_MIB_TNL_DROP_0_A 0x44 +#define TP_MIB_OFD_VLN_DROP_0_A 0x58 + +#define ULP_TX_INT_CAUSE_A 0x8dcc + +#define PBL_BOUND_ERR_CH3_S 31 +#define PBL_BOUND_ERR_CH3_V(x) ((x) << PBL_BOUND_ERR_CH3_S) +#define PBL_BOUND_ERR_CH3_F PBL_BOUND_ERR_CH3_V(1U) + +#define PBL_BOUND_ERR_CH2_S 30 +#define PBL_BOUND_ERR_CH2_V(x) ((x) << PBL_BOUND_ERR_CH2_S) +#define PBL_BOUND_ERR_CH2_F PBL_BOUND_ERR_CH2_V(1U) + +#define PBL_BOUND_ERR_CH1_S 29 +#define PBL_BOUND_ERR_CH1_V(x) ((x) << PBL_BOUND_ERR_CH1_S) +#define PBL_BOUND_ERR_CH1_F PBL_BOUND_ERR_CH1_V(1U) + +#define PBL_BOUND_ERR_CH0_S 28 +#define PBL_BOUND_ERR_CH0_V(x) ((x) << PBL_BOUND_ERR_CH0_S) +#define PBL_BOUND_ERR_CH0_F PBL_BOUND_ERR_CH0_V(1U) + +#define PM_RX_INT_CAUSE_A 0x8fdc +#define PM_RX_STAT_CONFIG_A 0x8fc8 +#define PM_RX_STAT_COUNT_A 0x8fcc +#define PM_RX_STAT_LSB_A 0x8fd0 +#define PM_RX_DBG_CTRL_A 0x8fd0 +#define PM_RX_DBG_DATA_A 0x8fd4 +#define PM_RX_DBG_STAT_MSB_A 0x10013 + +#define PMRX_FRAMING_ERROR_F 0x003ffff0U + +#define ZERO_E_CMD_ERROR_S 22 +#define ZERO_E_CMD_ERROR_V(x) ((x) << ZERO_E_CMD_ERROR_S) +#define ZERO_E_CMD_ERROR_F ZERO_E_CMD_ERROR_V(1U) + +#define OCSPI_PAR_ERROR_S 3 +#define OCSPI_PAR_ERROR_V(x) ((x) << OCSPI_PAR_ERROR_S) +#define OCSPI_PAR_ERROR_F OCSPI_PAR_ERROR_V(1U) + +#define DB_OPTIONS_PAR_ERROR_S 2 +#define DB_OPTIONS_PAR_ERROR_V(x) ((x) << DB_OPTIONS_PAR_ERROR_S) +#define DB_OPTIONS_PAR_ERROR_F DB_OPTIONS_PAR_ERROR_V(1U) + +#define IESPI_PAR_ERROR_S 1 +#define IESPI_PAR_ERROR_V(x) ((x) << IESPI_PAR_ERROR_S) +#define IESPI_PAR_ERROR_F IESPI_PAR_ERROR_V(1U) + +#define PMRX_E_PCMD_PAR_ERROR_S 0 +#define PMRX_E_PCMD_PAR_ERROR_V(x) ((x) << PMRX_E_PCMD_PAR_ERROR_S) +#define PMRX_E_PCMD_PAR_ERROR_F PMRX_E_PCMD_PAR_ERROR_V(1U) + +#define PM_TX_INT_CAUSE_A 0x8ffc +#define PM_TX_STAT_CONFIG_A 0x8fe8 +#define PM_TX_STAT_COUNT_A 0x8fec +#define PM_TX_STAT_LSB_A 0x8ff0 +#define PM_TX_DBG_CTRL_A 0x8ff0 +#define PM_TX_DBG_DATA_A 0x8ff4 +#define PM_TX_DBG_STAT_MSB_A 0x1001a + +#define PCMD_LEN_OVFL0_S 31 +#define PCMD_LEN_OVFL0_V(x) ((x) << PCMD_LEN_OVFL0_S) +#define PCMD_LEN_OVFL0_F PCMD_LEN_OVFL0_V(1U) + +#define PCMD_LEN_OVFL1_S 30 +#define PCMD_LEN_OVFL1_V(x) ((x) << PCMD_LEN_OVFL1_S) +#define PCMD_LEN_OVFL1_F PCMD_LEN_OVFL1_V(1U) + +#define PCMD_LEN_OVFL2_S 29 +#define PCMD_LEN_OVFL2_V(x) ((x) << PCMD_LEN_OVFL2_S) +#define PCMD_LEN_OVFL2_F PCMD_LEN_OVFL2_V(1U) + +#define ZERO_C_CMD_ERROR_S 28 +#define ZERO_C_CMD_ERROR_V(x) ((x) << ZERO_C_CMD_ERROR_S) +#define ZERO_C_CMD_ERROR_F ZERO_C_CMD_ERROR_V(1U) + +#define PMTX_FRAMING_ERROR_F 0x0ffffff0U + +#define OESPI_PAR_ERROR_S 3 +#define OESPI_PAR_ERROR_V(x) ((x) << OESPI_PAR_ERROR_S) +#define OESPI_PAR_ERROR_F OESPI_PAR_ERROR_V(1U) + +#define ICSPI_PAR_ERROR_S 1 +#define ICSPI_PAR_ERROR_V(x) ((x) << ICSPI_PAR_ERROR_S) +#define ICSPI_PAR_ERROR_F ICSPI_PAR_ERROR_V(1U) + +#define PMTX_C_PCMD_PAR_ERROR_S 0 +#define PMTX_C_PCMD_PAR_ERROR_V(x) ((x) << PMTX_C_PCMD_PAR_ERROR_S) +#define PMTX_C_PCMD_PAR_ERROR_F PMTX_C_PCMD_PAR_ERROR_V(1U) + +#define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400 +#define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404 +#define MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408 +#define MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c +#define MPS_PORT_STAT_TX_PORT_BCAST_L 0x410 +#define MPS_PORT_STAT_TX_PORT_BCAST_H 0x414 +#define MPS_PORT_STAT_TX_PORT_MCAST_L 0x418 +#define MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c +#define MPS_PORT_STAT_TX_PORT_UCAST_L 0x420 +#define MPS_PORT_STAT_TX_PORT_UCAST_H 0x424 +#define MPS_PORT_STAT_TX_PORT_ERROR_L 0x428 +#define MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c +#define MPS_PORT_STAT_TX_PORT_64B_L 0x430 +#define MPS_PORT_STAT_TX_PORT_64B_H 0x434 +#define MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438 +#define MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c +#define MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440 +#define MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444 +#define MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448 +#define MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c +#define MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450 +#define MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454 +#define MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458 +#define MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c +#define MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460 +#define MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464 +#define MPS_PORT_STAT_TX_PORT_DROP_L 0x468 +#define MPS_PORT_STAT_TX_PORT_DROP_H 0x46c +#define MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470 +#define MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474 +#define MPS_PORT_STAT_TX_PORT_PPP0_L 0x478 +#define MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c +#define MPS_PORT_STAT_TX_PORT_PPP1_L 0x480 +#define MPS_PORT_STAT_TX_PORT_PPP1_H 0x484 +#define MPS_PORT_STAT_TX_PORT_PPP2_L 0x488 +#define MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c +#define MPS_PORT_STAT_TX_PORT_PPP3_L 0x490 +#define MPS_PORT_STAT_TX_PORT_PPP3_H 0x494 +#define MPS_PORT_STAT_TX_PORT_PPP4_L 0x498 +#define MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c +#define MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0 +#define MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4 +#define MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8 +#define MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac +#define MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0 +#define MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4 +#define MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0 +#define MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4 +#define MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8 +#define MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc +#define MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0 +#define MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4 +#define MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8 +#define MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc +#define MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0 +#define MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4 +#define MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8 +#define MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec +#define MPS_PORT_STAT_LB_PORT_64B_L 0x4f0 +#define MPS_PORT_STAT_LB_PORT_64B_H 0x4f4 +#define MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8 +#define MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc +#define MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500 +#define MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504 +#define MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508 +#define MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c +#define MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510 +#define MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514 +#define MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518 +#define MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c +#define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520 +#define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524 +#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528 +#define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540 +#define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544 +#define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548 +#define MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c +#define MPS_PORT_STAT_RX_PORT_BCAST_L 0x550 +#define MPS_PORT_STAT_RX_PORT_BCAST_H 0x554 +#define MPS_PORT_STAT_RX_PORT_MCAST_L 0x558 +#define MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c +#define MPS_PORT_STAT_RX_PORT_UCAST_L 0x560 +#define MPS_PORT_STAT_RX_PORT_UCAST_H 0x564 +#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568 +#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c +#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570 +#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574 +#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578 +#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c +#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580 +#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584 +#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588 +#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c +#define MPS_PORT_STAT_RX_PORT_64B_L 0x590 +#define MPS_PORT_STAT_RX_PORT_64B_H 0x594 +#define MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598 +#define MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c +#define MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0 +#define MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4 +#define MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8 +#define MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac +#define MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0 +#define MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4 +#define MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8 +#define MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc +#define MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0 +#define MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4 +#define MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8 +#define MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc +#define MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0 +#define MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4 +#define MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8 +#define MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc +#define MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0 +#define MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4 +#define MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8 +#define MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec +#define MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0 +#define MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4 +#define MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8 +#define MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc +#define MPS_PORT_STAT_RX_PORT_PPP6_L 0x600 +#define MPS_PORT_STAT_RX_PORT_PPP6_H 0x604 +#define MPS_PORT_STAT_RX_PORT_PPP7_L 0x608 +#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c +#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610 +#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614 +#define MAC_PORT_MAGIC_MACID_LO 0x824 +#define MAC_PORT_MAGIC_MACID_HI 0x828 + +#define MAC_PORT_EPIO_DATA0_A 0x8c0 +#define MAC_PORT_EPIO_DATA1_A 0x8c4 +#define MAC_PORT_EPIO_DATA2_A 0x8c8 +#define MAC_PORT_EPIO_DATA3_A 0x8cc +#define MAC_PORT_EPIO_OP_A 0x8d0 + +#define MAC_PORT_CFG2_A 0x818 + +#define MPS_CMN_CTL_A 0x9000 + +#define NUMPORTS_S 0 +#define NUMPORTS_M 0x3U +#define NUMPORTS_G(x) (((x) >> NUMPORTS_S) & NUMPORTS_M) + +#define MPS_INT_CAUSE_A 0x9008 +#define MPS_TX_INT_CAUSE_A 0x9408 + +#define FRMERR_S 15 +#define FRMERR_V(x) ((x) << FRMERR_S) +#define FRMERR_F FRMERR_V(1U) + +#define SECNTERR_S 14 +#define SECNTERR_V(x) ((x) << SECNTERR_S) +#define SECNTERR_F SECNTERR_V(1U) + +#define BUBBLE_S 13 +#define BUBBLE_V(x) ((x) << BUBBLE_S) +#define BUBBLE_F BUBBLE_V(1U) + +#define TXDESCFIFO_S 9 +#define TXDESCFIFO_M 0xfU +#define TXDESCFIFO_V(x) ((x) << TXDESCFIFO_S) + +#define TXDATAFIFO_S 5 +#define TXDATAFIFO_M 0xfU +#define TXDATAFIFO_V(x) ((x) << TXDATAFIFO_S) + +#define NCSIFIFO_S 4 +#define NCSIFIFO_V(x) ((x) << NCSIFIFO_S) +#define NCSIFIFO_F NCSIFIFO_V(1U) + +#define TPFIFO_S 0 +#define TPFIFO_M 0xfU +#define TPFIFO_V(x) ((x) << TPFIFO_S) + +#define MPS_STAT_PERR_INT_CAUSE_SRAM_A 0x9614 +#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A 0x9620 +#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A 0x962c + +#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640 +#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644 +#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648 +#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c +#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650 +#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654 +#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658 +#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c +#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660 +#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664 +#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668 +#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c +#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670 +#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674 +#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678 +#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c +#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680 +#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684 +#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688 +#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c +#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690 +#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694 +#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698 +#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c +#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0 +#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4 +#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8 +#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac +#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0 +#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4 +#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8 +#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc + +#define MPS_TRC_CFG_A 0x9800 + +#define TRCFIFOEMPTY_S 4 +#define TRCFIFOEMPTY_V(x) ((x) << TRCFIFOEMPTY_S) +#define TRCFIFOEMPTY_F TRCFIFOEMPTY_V(1U) + +#define TRCIGNOREDROPINPUT_S 3 +#define TRCIGNOREDROPINPUT_V(x) ((x) << TRCIGNOREDROPINPUT_S) +#define TRCIGNOREDROPINPUT_F TRCIGNOREDROPINPUT_V(1U) + +#define TRCKEEPDUPLICATES_S 2 +#define TRCKEEPDUPLICATES_V(x) ((x) << TRCKEEPDUPLICATES_S) +#define TRCKEEPDUPLICATES_F TRCKEEPDUPLICATES_V(1U) + +#define TRCEN_S 1 +#define TRCEN_V(x) ((x) << TRCEN_S) +#define TRCEN_F TRCEN_V(1U) + +#define TRCMULTIFILTER_S 0 +#define TRCMULTIFILTER_V(x) ((x) << TRCMULTIFILTER_S) +#define TRCMULTIFILTER_F TRCMULTIFILTER_V(1U) + +#define MPS_TRC_RSS_CONTROL_A 0x9808 +#define MPS_T5_TRC_RSS_CONTROL_A 0xa00c + +#define RSSCONTROL_S 16 +#define RSSCONTROL_V(x) ((x) << RSSCONTROL_S) + +#define QUEUENUMBER_S 0 +#define QUEUENUMBER_V(x) ((x) << QUEUENUMBER_S) + +#define TP_RSS_CONFIG_A 0x7df0 + +#define TNL4TUPENIPV6_S 31 +#define TNL4TUPENIPV6_V(x) ((x) << TNL4TUPENIPV6_S) +#define TNL4TUPENIPV6_F TNL4TUPENIPV6_V(1U) + +#define TNL2TUPENIPV6_S 30 +#define TNL2TUPENIPV6_V(x) ((x) << TNL2TUPENIPV6_S) +#define TNL2TUPENIPV6_F TNL2TUPENIPV6_V(1U) + +#define TNL4TUPENIPV4_S 29 +#define TNL4TUPENIPV4_V(x) ((x) << TNL4TUPENIPV4_S) +#define TNL4TUPENIPV4_F TNL4TUPENIPV4_V(1U) + +#define TNL2TUPENIPV4_S 28 +#define TNL2TUPENIPV4_V(x) ((x) << TNL2TUPENIPV4_S) +#define TNL2TUPENIPV4_F TNL2TUPENIPV4_V(1U) + +#define TNLTCPSEL_S 27 +#define TNLTCPSEL_V(x) ((x) << TNLTCPSEL_S) +#define TNLTCPSEL_F TNLTCPSEL_V(1U) + +#define TNLIP6SEL_S 26 +#define TNLIP6SEL_V(x) ((x) << TNLIP6SEL_S) +#define TNLIP6SEL_F TNLIP6SEL_V(1U) + +#define TNLVRTSEL_S 25 +#define TNLVRTSEL_V(x) ((x) << TNLVRTSEL_S) +#define TNLVRTSEL_F TNLVRTSEL_V(1U) + +#define TNLMAPEN_S 24 +#define TNLMAPEN_V(x) ((x) << TNLMAPEN_S) +#define TNLMAPEN_F TNLMAPEN_V(1U) + +#define OFDHASHSAVE_S 19 +#define OFDHASHSAVE_V(x) ((x) << OFDHASHSAVE_S) +#define OFDHASHSAVE_F OFDHASHSAVE_V(1U) + +#define OFDVRTSEL_S 18 +#define OFDVRTSEL_V(x) ((x) << OFDVRTSEL_S) +#define OFDVRTSEL_F OFDVRTSEL_V(1U) + +#define OFDMAPEN_S 17 +#define OFDMAPEN_V(x) ((x) << OFDMAPEN_S) +#define OFDMAPEN_F OFDMAPEN_V(1U) + +#define OFDLKPEN_S 16 +#define OFDLKPEN_V(x) ((x) << OFDLKPEN_S) +#define OFDLKPEN_F OFDLKPEN_V(1U) + +#define SYN4TUPENIPV6_S 15 +#define SYN4TUPENIPV6_V(x) ((x) << SYN4TUPENIPV6_S) +#define SYN4TUPENIPV6_F SYN4TUPENIPV6_V(1U) + +#define SYN2TUPENIPV6_S 14 +#define SYN2TUPENIPV6_V(x) ((x) << SYN2TUPENIPV6_S) +#define SYN2TUPENIPV6_F SYN2TUPENIPV6_V(1U) + +#define SYN4TUPENIPV4_S 13 +#define SYN4TUPENIPV4_V(x) ((x) << SYN4TUPENIPV4_S) +#define SYN4TUPENIPV4_F SYN4TUPENIPV4_V(1U) + +#define SYN2TUPENIPV4_S 12 +#define SYN2TUPENIPV4_V(x) ((x) << SYN2TUPENIPV4_S) +#define SYN2TUPENIPV4_F SYN2TUPENIPV4_V(1U) + +#define SYNIP6SEL_S 11 +#define SYNIP6SEL_V(x) ((x) << SYNIP6SEL_S) +#define SYNIP6SEL_F SYNIP6SEL_V(1U) + +#define SYNVRTSEL_S 10 +#define SYNVRTSEL_V(x) ((x) << SYNVRTSEL_S) +#define SYNVRTSEL_F SYNVRTSEL_V(1U) + +#define SYNMAPEN_S 9 +#define SYNMAPEN_V(x) ((x) << SYNMAPEN_S) +#define SYNMAPEN_F SYNMAPEN_V(1U) + +#define SYNLKPEN_S 8 +#define SYNLKPEN_V(x) ((x) << SYNLKPEN_S) +#define SYNLKPEN_F SYNLKPEN_V(1U) + +#define CHANNELENABLE_S 7 +#define CHANNELENABLE_V(x) ((x) << CHANNELENABLE_S) +#define CHANNELENABLE_F CHANNELENABLE_V(1U) + +#define PORTENABLE_S 6 +#define PORTENABLE_V(x) ((x) << PORTENABLE_S) +#define PORTENABLE_F PORTENABLE_V(1U) + +#define TNLALLLOOKUP_S 5 +#define TNLALLLOOKUP_V(x) ((x) << TNLALLLOOKUP_S) +#define TNLALLLOOKUP_F TNLALLLOOKUP_V(1U) + +#define VIRTENABLE_S 4 +#define VIRTENABLE_V(x) ((x) << VIRTENABLE_S) +#define VIRTENABLE_F VIRTENABLE_V(1U) + +#define CONGESTIONENABLE_S 3 +#define CONGESTIONENABLE_V(x) ((x) << CONGESTIONENABLE_S) +#define CONGESTIONENABLE_F CONGESTIONENABLE_V(1U) + +#define HASHTOEPLITZ_S 2 +#define HASHTOEPLITZ_V(x) ((x) << HASHTOEPLITZ_S) +#define HASHTOEPLITZ_F HASHTOEPLITZ_V(1U) + +#define UDPENABLE_S 1 +#define UDPENABLE_V(x) ((x) << UDPENABLE_S) +#define UDPENABLE_F UDPENABLE_V(1U) + +#define DISABLE_S 0 +#define DISABLE_V(x) ((x) << DISABLE_S) +#define DISABLE_F DISABLE_V(1U) + +#define TP_RSS_CONFIG_TNL_A 0x7df4 + +#define MASKSIZE_S 28 +#define MASKSIZE_M 0xfU +#define MASKSIZE_V(x) ((x) << MASKSIZE_S) +#define MASKSIZE_G(x) (((x) >> MASKSIZE_S) & MASKSIZE_M) + +#define MASKFILTER_S 16 +#define MASKFILTER_M 0x7ffU +#define MASKFILTER_V(x) ((x) << MASKFILTER_S) +#define MASKFILTER_G(x) (((x) >> MASKFILTER_S) & MASKFILTER_M) + +#define USEWIRECH_S 0 +#define USEWIRECH_V(x) ((x) << USEWIRECH_S) +#define USEWIRECH_F USEWIRECH_V(1U) + +#define HASHALL_S 2 +#define HASHALL_V(x) ((x) << HASHALL_S) +#define HASHALL_F HASHALL_V(1U) + +#define HASHETH_S 1 +#define HASHETH_V(x) ((x) << HASHETH_S) +#define HASHETH_F HASHETH_V(1U) + +#define TP_RSS_CONFIG_OFD_A 0x7df8 + +#define RRCPLMAPEN_S 20 +#define RRCPLMAPEN_V(x) ((x) << RRCPLMAPEN_S) +#define RRCPLMAPEN_F RRCPLMAPEN_V(1U) + +#define RRCPLQUEWIDTH_S 16 +#define RRCPLQUEWIDTH_M 0xfU +#define RRCPLQUEWIDTH_V(x) ((x) << RRCPLQUEWIDTH_S) +#define RRCPLQUEWIDTH_G(x) (((x) >> RRCPLQUEWIDTH_S) & RRCPLQUEWIDTH_M) + +#define TP_RSS_CONFIG_SYN_A 0x7dfc +#define TP_RSS_CONFIG_VRT_A 0x7e00 + +#define VFRDRG_S 25 +#define VFRDRG_V(x) ((x) << VFRDRG_S) +#define VFRDRG_F VFRDRG_V(1U) + +#define VFRDEN_S 24 +#define VFRDEN_V(x) ((x) << VFRDEN_S) +#define VFRDEN_F VFRDEN_V(1U) + +#define VFPERREN_S 23 +#define VFPERREN_V(x) ((x) << VFPERREN_S) +#define VFPERREN_F VFPERREN_V(1U) + +#define KEYPERREN_S 22 +#define KEYPERREN_V(x) ((x) << KEYPERREN_S) +#define KEYPERREN_F KEYPERREN_V(1U) + +#define DISABLEVLAN_S 21 +#define DISABLEVLAN_V(x) ((x) << DISABLEVLAN_S) +#define DISABLEVLAN_F DISABLEVLAN_V(1U) + +#define ENABLEUP0_S 20 +#define ENABLEUP0_V(x) ((x) << ENABLEUP0_S) +#define ENABLEUP0_F ENABLEUP0_V(1U) + +#define HASHDELAY_S 16 +#define HASHDELAY_M 0xfU +#define HASHDELAY_V(x) ((x) << HASHDELAY_S) +#define HASHDELAY_G(x) (((x) >> HASHDELAY_S) & HASHDELAY_M) + +#define VFWRADDR_S 8 +#define VFWRADDR_M 0x7fU +#define VFWRADDR_V(x) ((x) << VFWRADDR_S) +#define VFWRADDR_G(x) (((x) >> VFWRADDR_S) & VFWRADDR_M) + +#define KEYMODE_S 6 +#define KEYMODE_M 0x3U +#define KEYMODE_V(x) ((x) << KEYMODE_S) +#define KEYMODE_G(x) (((x) >> KEYMODE_S) & KEYMODE_M) + +#define VFWREN_S 5 +#define VFWREN_V(x) ((x) << VFWREN_S) +#define VFWREN_F VFWREN_V(1U) + +#define KEYWREN_S 4 +#define KEYWREN_V(x) ((x) << KEYWREN_S) +#define KEYWREN_F KEYWREN_V(1U) + +#define KEYWRADDR_S 0 +#define KEYWRADDR_M 0xfU +#define KEYWRADDR_V(x) ((x) << KEYWRADDR_S) +#define KEYWRADDR_G(x) (((x) >> KEYWRADDR_S) & KEYWRADDR_M) + +#define KEYWRADDRX_S 30 +#define KEYWRADDRX_M 0x3U +#define KEYWRADDRX_V(x) ((x) << KEYWRADDRX_S) +#define KEYWRADDRX_G(x) (((x) >> KEYWRADDRX_S) & KEYWRADDRX_M) + +#define KEYEXTEND_S 26 +#define KEYEXTEND_V(x) ((x) << KEYEXTEND_S) +#define KEYEXTEND_F KEYEXTEND_V(1U) + +#define LKPIDXSIZE_S 24 +#define LKPIDXSIZE_M 0x3U +#define LKPIDXSIZE_V(x) ((x) << LKPIDXSIZE_S) +#define LKPIDXSIZE_G(x) (((x) >> LKPIDXSIZE_S) & LKPIDXSIZE_M) + +#define TP_RSS_VFL_CONFIG_A 0x3a +#define TP_RSS_VFH_CONFIG_A 0x3b + +#define ENABLEUDPHASH_S 31 +#define ENABLEUDPHASH_V(x) ((x) << ENABLEUDPHASH_S) +#define ENABLEUDPHASH_F ENABLEUDPHASH_V(1U) + +#define VFUPEN_S 30 +#define VFUPEN_V(x) ((x) << VFUPEN_S) +#define VFUPEN_F VFUPEN_V(1U) + +#define VFVLNEX_S 28 +#define VFVLNEX_V(x) ((x) << VFVLNEX_S) +#define VFVLNEX_F VFVLNEX_V(1U) + +#define VFPRTEN_S 27 +#define VFPRTEN_V(x) ((x) << VFPRTEN_S) +#define VFPRTEN_F VFPRTEN_V(1U) + +#define VFCHNEN_S 26 +#define VFCHNEN_V(x) ((x) << VFCHNEN_S) +#define VFCHNEN_F VFCHNEN_V(1U) + +#define DEFAULTQUEUE_S 16 +#define DEFAULTQUEUE_M 0x3ffU +#define DEFAULTQUEUE_G(x) (((x) >> DEFAULTQUEUE_S) & DEFAULTQUEUE_M) + +#define VFIP6TWOTUPEN_S 6 +#define VFIP6TWOTUPEN_V(x) ((x) << VFIP6TWOTUPEN_S) +#define VFIP6TWOTUPEN_F VFIP6TWOTUPEN_V(1U) + +#define VFIP4FOURTUPEN_S 5 +#define VFIP4FOURTUPEN_V(x) ((x) << VFIP4FOURTUPEN_S) +#define VFIP4FOURTUPEN_F VFIP4FOURTUPEN_V(1U) + +#define VFIP4TWOTUPEN_S 4 +#define VFIP4TWOTUPEN_V(x) ((x) << VFIP4TWOTUPEN_S) +#define VFIP4TWOTUPEN_F VFIP4TWOTUPEN_V(1U) + +#define KEYINDEX_S 0 +#define KEYINDEX_M 0xfU +#define KEYINDEX_G(x) (((x) >> KEYINDEX_S) & KEYINDEX_M) + +#define MAPENABLE_S 31 +#define MAPENABLE_V(x) ((x) << MAPENABLE_S) +#define MAPENABLE_F MAPENABLE_V(1U) + +#define CHNENABLE_S 30 +#define CHNENABLE_V(x) ((x) << CHNENABLE_S) +#define CHNENABLE_F CHNENABLE_V(1U) + +#define PRTENABLE_S 29 +#define PRTENABLE_V(x) ((x) << PRTENABLE_S) +#define PRTENABLE_F PRTENABLE_V(1U) + +#define UDPFOURTUPEN_S 28 +#define UDPFOURTUPEN_V(x) ((x) << UDPFOURTUPEN_S) +#define UDPFOURTUPEN_F UDPFOURTUPEN_V(1U) + +#define IP6FOURTUPEN_S 27 +#define IP6FOURTUPEN_V(x) ((x) << IP6FOURTUPEN_S) +#define IP6FOURTUPEN_F IP6FOURTUPEN_V(1U) + +#define IP6TWOTUPEN_S 26 +#define IP6TWOTUPEN_V(x) ((x) << IP6TWOTUPEN_S) +#define IP6TWOTUPEN_F IP6TWOTUPEN_V(1U) + +#define IP4FOURTUPEN_S 25 +#define IP4FOURTUPEN_V(x) ((x) << IP4FOURTUPEN_S) +#define IP4FOURTUPEN_F IP4FOURTUPEN_V(1U) + +#define IP4TWOTUPEN_S 24 +#define IP4TWOTUPEN_V(x) ((x) << IP4TWOTUPEN_S) +#define IP4TWOTUPEN_F IP4TWOTUPEN_V(1U) + +#define IVFWIDTH_S 20 +#define IVFWIDTH_M 0xfU +#define IVFWIDTH_V(x) ((x) << IVFWIDTH_S) +#define IVFWIDTH_G(x) (((x) >> IVFWIDTH_S) & IVFWIDTH_M) + +#define CH1DEFAULTQUEUE_S 10 +#define CH1DEFAULTQUEUE_M 0x3ffU +#define CH1DEFAULTQUEUE_V(x) ((x) << CH1DEFAULTQUEUE_S) +#define CH1DEFAULTQUEUE_G(x) (((x) >> CH1DEFAULTQUEUE_S) & CH1DEFAULTQUEUE_M) + +#define CH0DEFAULTQUEUE_S 0 +#define CH0DEFAULTQUEUE_M 0x3ffU +#define CH0DEFAULTQUEUE_V(x) ((x) << CH0DEFAULTQUEUE_S) +#define CH0DEFAULTQUEUE_G(x) (((x) >> CH0DEFAULTQUEUE_S) & CH0DEFAULTQUEUE_M) + +#define VFLKPIDX_S 8 +#define VFLKPIDX_M 0xffU +#define VFLKPIDX_G(x) (((x) >> VFLKPIDX_S) & VFLKPIDX_M) + +#define TP_RSS_CONFIG_CNG_A 0x7e04 +#define TP_RSS_SECRET_KEY0_A 0x40 +#define TP_RSS_PF0_CONFIG_A 0x30 +#define TP_RSS_PF_MAP_A 0x38 +#define TP_RSS_PF_MSK_A 0x39 + +#define PF1LKPIDX_S 3 + +#define PF0LKPIDX_M 0x7U + +#define PF1MSKSIZE_S 4 +#define PF1MSKSIZE_M 0xfU + +#define CHNCOUNT3_S 31 +#define CHNCOUNT3_V(x) ((x) << CHNCOUNT3_S) +#define CHNCOUNT3_F CHNCOUNT3_V(1U) + +#define CHNCOUNT2_S 30 +#define CHNCOUNT2_V(x) ((x) << CHNCOUNT2_S) +#define CHNCOUNT2_F CHNCOUNT2_V(1U) + +#define CHNCOUNT1_S 29 +#define CHNCOUNT1_V(x) ((x) << CHNCOUNT1_S) +#define CHNCOUNT1_F CHNCOUNT1_V(1U) + +#define CHNCOUNT0_S 28 +#define CHNCOUNT0_V(x) ((x) << CHNCOUNT0_S) +#define CHNCOUNT0_F CHNCOUNT0_V(1U) + +#define CHNUNDFLOW3_S 27 +#define CHNUNDFLOW3_V(x) ((x) << CHNUNDFLOW3_S) +#define CHNUNDFLOW3_F CHNUNDFLOW3_V(1U) + +#define CHNUNDFLOW2_S 26 +#define CHNUNDFLOW2_V(x) ((x) << CHNUNDFLOW2_S) +#define CHNUNDFLOW2_F CHNUNDFLOW2_V(1U) + +#define CHNUNDFLOW1_S 25 +#define CHNUNDFLOW1_V(x) ((x) << CHNUNDFLOW1_S) +#define CHNUNDFLOW1_F CHNUNDFLOW1_V(1U) + +#define CHNUNDFLOW0_S 24 +#define CHNUNDFLOW0_V(x) ((x) << CHNUNDFLOW0_S) +#define CHNUNDFLOW0_F CHNUNDFLOW0_V(1U) + +#define RSTCHN3_S 19 +#define RSTCHN3_V(x) ((x) << RSTCHN3_S) +#define RSTCHN3_F RSTCHN3_V(1U) + +#define RSTCHN2_S 18 +#define RSTCHN2_V(x) ((x) << RSTCHN2_S) +#define RSTCHN2_F RSTCHN2_V(1U) + +#define RSTCHN1_S 17 +#define RSTCHN1_V(x) ((x) << RSTCHN1_S) +#define RSTCHN1_F RSTCHN1_V(1U) + +#define RSTCHN0_S 16 +#define RSTCHN0_V(x) ((x) << RSTCHN0_S) +#define RSTCHN0_F RSTCHN0_V(1U) + +#define UPDVLD_S 15 +#define UPDVLD_V(x) ((x) << UPDVLD_S) +#define UPDVLD_F UPDVLD_V(1U) + +#define XOFF_S 14 +#define XOFF_V(x) ((x) << XOFF_S) +#define XOFF_F XOFF_V(1U) + +#define UPDCHN3_S 13 +#define UPDCHN3_V(x) ((x) << UPDCHN3_S) +#define UPDCHN3_F UPDCHN3_V(1U) + +#define UPDCHN2_S 12 +#define UPDCHN2_V(x) ((x) << UPDCHN2_S) +#define UPDCHN2_F UPDCHN2_V(1U) + +#define UPDCHN1_S 11 +#define UPDCHN1_V(x) ((x) << UPDCHN1_S) +#define UPDCHN1_F UPDCHN1_V(1U) + +#define UPDCHN0_S 10 +#define UPDCHN0_V(x) ((x) << UPDCHN0_S) +#define UPDCHN0_F UPDCHN0_V(1U) + +#define QUEUE_S 0 +#define QUEUE_M 0x3ffU +#define QUEUE_V(x) ((x) << QUEUE_S) +#define QUEUE_G(x) (((x) >> QUEUE_S) & QUEUE_M) + +#define MPS_TRC_INT_CAUSE_A 0x985c + +#define MISCPERR_S 8 +#define MISCPERR_V(x) ((x) << MISCPERR_S) +#define MISCPERR_F MISCPERR_V(1U) + +#define PKTFIFO_S 4 +#define PKTFIFO_M 0xfU +#define PKTFIFO_V(x) ((x) << PKTFIFO_S) + +#define FILTMEM_S 0 +#define FILTMEM_M 0xfU +#define FILTMEM_V(x) ((x) << FILTMEM_S) + +#define MPS_CLS_INT_CAUSE_A 0xd028 + +#define HASHSRAM_S 2 +#define HASHSRAM_V(x) ((x) << HASHSRAM_S) +#define HASHSRAM_F HASHSRAM_V(1U) + +#define MATCHTCAM_S 1 +#define MATCHTCAM_V(x) ((x) << MATCHTCAM_S) +#define MATCHTCAM_F MATCHTCAM_V(1U) + +#define MATCHSRAM_S 0 +#define MATCHSRAM_V(x) ((x) << MATCHSRAM_S) +#define MATCHSRAM_F MATCHSRAM_V(1U) + +#define MPS_RX_PERR_INT_CAUSE_A 0x11074 + +#define MPS_CLS_TCAM_Y_L_A 0xf000 +#define MPS_CLS_TCAM_X_L_A 0xf008 + +#define MPS_CLS_TCAM_Y_L(idx) (MPS_CLS_TCAM_Y_L_A + (idx) * 16) +#define NUM_MPS_CLS_TCAM_Y_L_INSTANCES 512 + +#define MPS_CLS_TCAM_X_L(idx) (MPS_CLS_TCAM_X_L_A + (idx) * 16) +#define NUM_MPS_CLS_TCAM_X_L_INSTANCES 512 + +#define MPS_CLS_SRAM_L_A 0xe000 +#define MPS_CLS_SRAM_H_A 0xe004 + +#define MPS_CLS_SRAM_L(idx) (MPS_CLS_SRAM_L_A + (idx) * 8) +#define NUM_MPS_CLS_SRAM_L_INSTANCES 336 + +#define MPS_CLS_SRAM_H(idx) (MPS_CLS_SRAM_H_A + (idx) * 8) +#define NUM_MPS_CLS_SRAM_H_INSTANCES 336 + +#define MULTILISTEN0_S 25 + +#define REPLICATE_S 11 +#define REPLICATE_V(x) ((x) << REPLICATE_S) +#define REPLICATE_F REPLICATE_V(1U) + +#define PF_S 8 +#define PF_M 0x7U +#define PF_G(x) (((x) >> PF_S) & PF_M) + +#define VF_VALID_S 7 +#define VF_VALID_V(x) ((x) << VF_VALID_S) +#define VF_VALID_F VF_VALID_V(1U) + +#define VF_S 0 +#define VF_M 0x7fU +#define VF_G(x) (((x) >> VF_S) & VF_M) + +#define SRAM_PRIO3_S 22 +#define SRAM_PRIO3_M 0x7U +#define SRAM_PRIO3_G(x) (((x) >> SRAM_PRIO3_S) & SRAM_PRIO3_M) + +#define SRAM_PRIO2_S 19 +#define SRAM_PRIO2_M 0x7U +#define SRAM_PRIO2_G(x) (((x) >> SRAM_PRIO2_S) & SRAM_PRIO2_M) + +#define SRAM_PRIO1_S 16 +#define SRAM_PRIO1_M 0x7U +#define SRAM_PRIO1_G(x) (((x) >> SRAM_PRIO1_S) & SRAM_PRIO1_M) + +#define SRAM_PRIO0_S 13 +#define SRAM_PRIO0_M 0x7U +#define SRAM_PRIO0_G(x) (((x) >> SRAM_PRIO0_S) & SRAM_PRIO0_M) + +#define SRAM_VLD_S 12 +#define SRAM_VLD_V(x) ((x) << SRAM_VLD_S) +#define SRAM_VLD_F SRAM_VLD_V(1U) + +#define PORTMAP_S 0 +#define PORTMAP_M 0xfU +#define PORTMAP_G(x) (((x) >> PORTMAP_S) & PORTMAP_M) + +#define CPL_INTR_CAUSE_A 0x19054 + +#define CIM_OP_MAP_PERR_S 5 +#define CIM_OP_MAP_PERR_V(x) ((x) << CIM_OP_MAP_PERR_S) +#define CIM_OP_MAP_PERR_F CIM_OP_MAP_PERR_V(1U) + +#define CIM_OVFL_ERROR_S 4 +#define CIM_OVFL_ERROR_V(x) ((x) << CIM_OVFL_ERROR_S) +#define CIM_OVFL_ERROR_F CIM_OVFL_ERROR_V(1U) + +#define TP_FRAMING_ERROR_S 3 +#define TP_FRAMING_ERROR_V(x) ((x) << TP_FRAMING_ERROR_S) +#define TP_FRAMING_ERROR_F TP_FRAMING_ERROR_V(1U) + +#define SGE_FRAMING_ERROR_S 2 +#define SGE_FRAMING_ERROR_V(x) ((x) << SGE_FRAMING_ERROR_S) +#define SGE_FRAMING_ERROR_F SGE_FRAMING_ERROR_V(1U) + +#define CIM_FRAMING_ERROR_S 1 +#define CIM_FRAMING_ERROR_V(x) ((x) << CIM_FRAMING_ERROR_S) +#define CIM_FRAMING_ERROR_F CIM_FRAMING_ERROR_V(1U) + +#define ZERO_SWITCH_ERROR_S 0 +#define ZERO_SWITCH_ERROR_V(x) ((x) << ZERO_SWITCH_ERROR_S) +#define ZERO_SWITCH_ERROR_F ZERO_SWITCH_ERROR_V(1U) + +#define SMB_INT_CAUSE_A 0x19090 + +#define MSTTXFIFOPARINT_S 21 +#define MSTTXFIFOPARINT_V(x) ((x) << MSTTXFIFOPARINT_S) +#define MSTTXFIFOPARINT_F MSTTXFIFOPARINT_V(1U) + +#define MSTRXFIFOPARINT_S 20 +#define MSTRXFIFOPARINT_V(x) ((x) << MSTRXFIFOPARINT_S) +#define MSTRXFIFOPARINT_F MSTRXFIFOPARINT_V(1U) + +#define SLVFIFOPARINT_S 19 +#define SLVFIFOPARINT_V(x) ((x) << SLVFIFOPARINT_S) +#define SLVFIFOPARINT_F SLVFIFOPARINT_V(1U) + +#define ULP_RX_INT_CAUSE_A 0x19158 +#define ULP_RX_ISCSI_TAGMASK_A 0x19164 +#define ULP_RX_ISCSI_PSZ_A 0x19168 +#define ULP_RX_LA_CTL_A 0x1923c +#define ULP_RX_LA_RDPTR_A 0x19240 +#define ULP_RX_LA_RDDATA_A 0x19244 +#define ULP_RX_LA_WRPTR_A 0x19248 + +#define HPZ3_S 24 +#define HPZ3_V(x) ((x) << HPZ3_S) + +#define HPZ2_S 16 +#define HPZ2_V(x) ((x) << HPZ2_S) + +#define HPZ1_S 8 +#define HPZ1_V(x) ((x) << HPZ1_S) + +#define HPZ0_S 0 +#define HPZ0_V(x) ((x) << HPZ0_S) + +#define ULP_RX_TDDP_PSZ_A 0x19178 + +/* registers for module SF */ +#define SF_DATA_A 0x193f8 +#define SF_OP_A 0x193fc + +#define SF_BUSY_S 31 +#define SF_BUSY_V(x) ((x) << SF_BUSY_S) +#define SF_BUSY_F SF_BUSY_V(1U) + +#define SF_LOCK_S 4 +#define SF_LOCK_V(x) ((x) << SF_LOCK_S) +#define SF_LOCK_F SF_LOCK_V(1U) + +#define SF_CONT_S 3 +#define SF_CONT_V(x) ((x) << SF_CONT_S) +#define SF_CONT_F SF_CONT_V(1U) + +#define BYTECNT_S 1 +#define BYTECNT_V(x) ((x) << BYTECNT_S) + +#define OP_S 0 +#define OP_V(x) ((x) << OP_S) +#define OP_F OP_V(1U) + +#define PL_PF_INT_CAUSE_A 0x3c0 + +#define PFSW_S 3 +#define PFSW_V(x) ((x) << PFSW_S) +#define PFSW_F PFSW_V(1U) + +#define PFCIM_S 1 +#define PFCIM_V(x) ((x) << PFCIM_S) +#define PFCIM_F PFCIM_V(1U) + +#define PL_PF_INT_ENABLE_A 0x3c4 +#define PL_PF_CTL_A 0x3c8 + +#define PL_WHOAMI_A 0x19400 + +#define SOURCEPF_S 8 +#define SOURCEPF_M 0x7U +#define SOURCEPF_G(x) (((x) >> SOURCEPF_S) & SOURCEPF_M) + +#define PL_INT_CAUSE_A 0x1940c + +#define ULP_TX_S 27 +#define ULP_TX_V(x) ((x) << ULP_TX_S) +#define ULP_TX_F ULP_TX_V(1U) + +#define SGE_S 26 +#define SGE_V(x) ((x) << SGE_S) +#define SGE_F SGE_V(1U) + +#define CPL_SWITCH_S 24 +#define CPL_SWITCH_V(x) ((x) << CPL_SWITCH_S) +#define CPL_SWITCH_F CPL_SWITCH_V(1U) + +#define ULP_RX_S 23 +#define ULP_RX_V(x) ((x) << ULP_RX_S) +#define ULP_RX_F ULP_RX_V(1U) + +#define PM_RX_S 22 +#define PM_RX_V(x) ((x) << PM_RX_S) +#define PM_RX_F PM_RX_V(1U) + +#define PM_TX_S 21 +#define PM_TX_V(x) ((x) << PM_TX_S) +#define PM_TX_F PM_TX_V(1U) + +#define MA_S 20 +#define MA_V(x) ((x) << MA_S) +#define MA_F MA_V(1U) + +#define TP_S 19 +#define TP_V(x) ((x) << TP_S) +#define TP_F TP_V(1U) + +#define LE_S 18 +#define LE_V(x) ((x) << LE_S) +#define LE_F LE_V(1U) + +#define EDC1_S 17 +#define EDC1_V(x) ((x) << EDC1_S) +#define EDC1_F EDC1_V(1U) + +#define EDC0_S 16 +#define EDC0_V(x) ((x) << EDC0_S) +#define EDC0_F EDC0_V(1U) + +#define MC_S 15 +#define MC_V(x) ((x) << MC_S) +#define MC_F MC_V(1U) + +#define PCIE_S 14 +#define PCIE_V(x) ((x) << PCIE_S) +#define PCIE_F PCIE_V(1U) + +#define XGMAC_KR1_S 12 +#define XGMAC_KR1_V(x) ((x) << XGMAC_KR1_S) +#define XGMAC_KR1_F XGMAC_KR1_V(1U) + +#define XGMAC_KR0_S 11 +#define XGMAC_KR0_V(x) ((x) << XGMAC_KR0_S) +#define XGMAC_KR0_F XGMAC_KR0_V(1U) + +#define XGMAC1_S 10 +#define XGMAC1_V(x) ((x) << XGMAC1_S) +#define XGMAC1_F XGMAC1_V(1U) + +#define XGMAC0_S 9 +#define XGMAC0_V(x) ((x) << XGMAC0_S) +#define XGMAC0_F XGMAC0_V(1U) + +#define SMB_S 8 +#define SMB_V(x) ((x) << SMB_S) +#define SMB_F SMB_V(1U) + +#define SF_S 7 +#define SF_V(x) ((x) << SF_S) +#define SF_F SF_V(1U) + +#define PL_S 6 +#define PL_V(x) ((x) << PL_S) +#define PL_F PL_V(1U) + +#define NCSI_S 5 +#define NCSI_V(x) ((x) << NCSI_S) +#define NCSI_F NCSI_V(1U) + +#define MPS_S 4 +#define MPS_V(x) ((x) << MPS_S) +#define MPS_F MPS_V(1U) + +#define CIM_S 0 +#define CIM_V(x) ((x) << CIM_S) +#define CIM_F CIM_V(1U) + +#define MC1_S 31 + +#define PL_INT_ENABLE_A 0x19410 +#define PL_INT_MAP0_A 0x19414 +#define PL_RST_A 0x19428 + +#define PIORST_S 1 +#define PIORST_V(x) ((x) << PIORST_S) +#define PIORST_F PIORST_V(1U) + +#define PIORSTMODE_S 0 +#define PIORSTMODE_V(x) ((x) << PIORSTMODE_S) +#define PIORSTMODE_F PIORSTMODE_V(1U) + +#define PL_PL_INT_CAUSE_A 0x19430 + +#define FATALPERR_S 4 +#define FATALPERR_V(x) ((x) << FATALPERR_S) +#define FATALPERR_F FATALPERR_V(1U) + +#define PERRVFID_S 0 +#define PERRVFID_V(x) ((x) << PERRVFID_S) +#define PERRVFID_F PERRVFID_V(1U) + +#define PL_REV_A 0x1943c + +#define REV_S 0 +#define REV_M 0xfU +#define REV_V(x) ((x) << REV_S) +#define REV_G(x) (((x) >> REV_S) & REV_M) + +#define LE_DB_INT_CAUSE_A 0x19c3c + +#define REQQPARERR_S 16 +#define REQQPARERR_V(x) ((x) << REQQPARERR_S) +#define REQQPARERR_F REQQPARERR_V(1U) + +#define UNKNOWNCMD_S 15 +#define UNKNOWNCMD_V(x) ((x) << UNKNOWNCMD_S) +#define UNKNOWNCMD_F UNKNOWNCMD_V(1U) + +#define PARITYERR_S 6 +#define PARITYERR_V(x) ((x) << PARITYERR_S) +#define PARITYERR_F PARITYERR_V(1U) + +#define LIPMISS_S 5 +#define LIPMISS_V(x) ((x) << LIPMISS_S) +#define LIPMISS_F LIPMISS_V(1U) + +#define LIP0_S 4 +#define LIP0_V(x) ((x) << LIP0_S) +#define LIP0_F LIP0_V(1U) + +#define NCSI_INT_CAUSE_A 0x1a0d8 + +#define CIM_DM_PRTY_ERR_S 8 +#define CIM_DM_PRTY_ERR_V(x) ((x) << CIM_DM_PRTY_ERR_S) +#define CIM_DM_PRTY_ERR_F CIM_DM_PRTY_ERR_V(1U) + +#define MPS_DM_PRTY_ERR_S 7 +#define MPS_DM_PRTY_ERR_V(x) ((x) << MPS_DM_PRTY_ERR_S) +#define MPS_DM_PRTY_ERR_F MPS_DM_PRTY_ERR_V(1U) + +#define TXFIFO_PRTY_ERR_S 1 +#define TXFIFO_PRTY_ERR_V(x) ((x) << TXFIFO_PRTY_ERR_S) +#define TXFIFO_PRTY_ERR_F TXFIFO_PRTY_ERR_V(1U) + +#define RXFIFO_PRTY_ERR_S 0 +#define RXFIFO_PRTY_ERR_V(x) ((x) << RXFIFO_PRTY_ERR_S) +#define RXFIFO_PRTY_ERR_F RXFIFO_PRTY_ERR_V(1U) + +#define XGMAC_PORT_CFG2_A 0x1018 + +#define PATEN_S 18 +#define PATEN_V(x) ((x) << PATEN_S) +#define PATEN_F PATEN_V(1U) + +#define MAGICEN_S 17 +#define MAGICEN_V(x) ((x) << MAGICEN_S) +#define MAGICEN_F MAGICEN_V(1U) + +#define XGMAC_PORT_MAGIC_MACID_LO 0x1024 +#define XGMAC_PORT_MAGIC_MACID_HI 0x1028 + +#define XGMAC_PORT_EPIO_DATA0_A 0x10c0 +#define XGMAC_PORT_EPIO_DATA1_A 0x10c4 +#define XGMAC_PORT_EPIO_DATA2_A 0x10c8 +#define XGMAC_PORT_EPIO_DATA3_A 0x10cc +#define XGMAC_PORT_EPIO_OP_A 0x10d0 + +#define EPIOWR_S 8 +#define EPIOWR_V(x) ((x) << EPIOWR_S) +#define EPIOWR_F EPIOWR_V(1U) + +#define ADDRESS_S 0 +#define ADDRESS_V(x) ((x) << ADDRESS_S) + +#define MAC_PORT_INT_CAUSE_A 0x8dc +#define XGMAC_PORT_INT_CAUSE_A 0x10dc + +#define TP_TX_MOD_QUEUE_REQ_MAP_A 0x7e28 + +#define TP_TX_MOD_QUEUE_WEIGHT0_A 0x7e30 +#define TP_TX_MOD_CHANNEL_WEIGHT_A 0x7e34 + +#define TX_MOD_QUEUE_REQ_MAP_S 0 +#define TX_MOD_QUEUE_REQ_MAP_V(x) ((x) << TX_MOD_QUEUE_REQ_MAP_S) + +#define TX_MODQ_WEIGHT3_S 24 +#define TX_MODQ_WEIGHT3_V(x) ((x) << TX_MODQ_WEIGHT3_S) + +#define TX_MODQ_WEIGHT2_S 16 +#define TX_MODQ_WEIGHT2_V(x) ((x) << TX_MODQ_WEIGHT2_S) + +#define TX_MODQ_WEIGHT1_S 8 +#define TX_MODQ_WEIGHT1_V(x) ((x) << TX_MODQ_WEIGHT1_S) + +#define TX_MODQ_WEIGHT0_S 0 +#define TX_MODQ_WEIGHT0_V(x) ((x) << TX_MODQ_WEIGHT0_S) + +#define TP_TX_SCHED_HDR_A 0x23 +#define TP_TX_SCHED_FIFO_A 0x24 +#define TP_TX_SCHED_PCMD_A 0x25 + +#define NUM_MPS_CLS_SRAM_L_INSTANCES 336 +#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 + +#define T5_PORT0_BASE 0x30000 +#define T5_PORT_STRIDE 0x4000 +#define T5_PORT_BASE(idx) (T5_PORT0_BASE + (idx) * T5_PORT_STRIDE) +#define T5_PORT_REG(idx, reg) (T5_PORT_BASE(idx) + (reg)) + +#define MC_0_BASE_ADDR 0x40000 +#define MC_1_BASE_ADDR 0x48000 +#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR) +#define MC_REG(reg, idx) (reg + MC_STRIDE * idx) + +#define MC_P_BIST_CMD_A 0x41400 +#define MC_P_BIST_CMD_ADDR_A 0x41404 +#define MC_P_BIST_CMD_LEN_A 0x41408 +#define MC_P_BIST_DATA_PATTERN_A 0x4140c +#define MC_P_BIST_STATUS_RDATA_A 0x41488 + +#define EDC_T50_BASE_ADDR 0x50000 + +#define EDC_H_BIST_CMD_A 0x50004 +#define EDC_H_BIST_CMD_ADDR_A 0x50008 +#define EDC_H_BIST_CMD_LEN_A 0x5000c +#define EDC_H_BIST_DATA_PATTERN_A 0x50010 +#define EDC_H_BIST_STATUS_RDATA_A 0x50028 + +#define EDC_T51_BASE_ADDR 0x50800 + +#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) +#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) + +#define PL_VF_REV_A 0x4 +#define PL_VF_WHOAMI_A 0x0 +#define PL_VF_REVISION_A 0x8 + +/* registers for module CIM */ +#define CIM_HOST_ACC_CTRL_A 0x7b50 +#define CIM_HOST_ACC_DATA_A 0x7b54 +#define UP_UP_DBG_LA_CFG_A 0x140 +#define UP_UP_DBG_LA_DATA_A 0x144 + +#define HOSTBUSY_S 17 +#define HOSTBUSY_V(x) ((x) << HOSTBUSY_S) +#define HOSTBUSY_F HOSTBUSY_V(1U) + +#define HOSTWRITE_S 16 +#define HOSTWRITE_V(x) ((x) << HOSTWRITE_S) +#define HOSTWRITE_F HOSTWRITE_V(1U) + +#define CIM_IBQ_DBG_CFG_A 0x7b60 + +#define IBQDBGADDR_S 16 +#define IBQDBGADDR_M 0xfffU +#define IBQDBGADDR_V(x) ((x) << IBQDBGADDR_S) +#define IBQDBGADDR_G(x) (((x) >> IBQDBGADDR_S) & IBQDBGADDR_M) + +#define IBQDBGBUSY_S 1 +#define IBQDBGBUSY_V(x) ((x) << IBQDBGBUSY_S) +#define IBQDBGBUSY_F IBQDBGBUSY_V(1U) + +#define IBQDBGEN_S 0 +#define IBQDBGEN_V(x) ((x) << IBQDBGEN_S) +#define IBQDBGEN_F IBQDBGEN_V(1U) + +#define CIM_OBQ_DBG_CFG_A 0x7b64 + +#define OBQDBGADDR_S 16 +#define OBQDBGADDR_M 0xfffU +#define OBQDBGADDR_V(x) ((x) << OBQDBGADDR_S) +#define OBQDBGADDR_G(x) (((x) >> OBQDBGADDR_S) & OBQDBGADDR_M) + +#define OBQDBGBUSY_S 1 +#define OBQDBGBUSY_V(x) ((x) << OBQDBGBUSY_S) +#define OBQDBGBUSY_F OBQDBGBUSY_V(1U) + +#define OBQDBGEN_S 0 +#define OBQDBGEN_V(x) ((x) << OBQDBGEN_S) +#define OBQDBGEN_F OBQDBGEN_V(1U) + +#define CIM_IBQ_DBG_DATA_A 0x7b68 +#define CIM_OBQ_DBG_DATA_A 0x7b6c + +#define UPDBGLARDEN_S 1 +#define UPDBGLARDEN_V(x) ((x) << UPDBGLARDEN_S) +#define UPDBGLARDEN_F UPDBGLARDEN_V(1U) + +#define UPDBGLAEN_S 0 +#define UPDBGLAEN_V(x) ((x) << UPDBGLAEN_S) +#define UPDBGLAEN_F UPDBGLAEN_V(1U) + +#define UPDBGLARDPTR_S 2 +#define UPDBGLARDPTR_M 0xfffU +#define UPDBGLARDPTR_V(x) ((x) << UPDBGLARDPTR_S) + +#define UPDBGLAWRPTR_S 16 +#define UPDBGLAWRPTR_M 0xfffU +#define UPDBGLAWRPTR_G(x) (((x) >> UPDBGLAWRPTR_S) & UPDBGLAWRPTR_M) + +#define UPDBGLACAPTPCONLY_S 30 +#define UPDBGLACAPTPCONLY_V(x) ((x) << UPDBGLACAPTPCONLY_S) +#define UPDBGLACAPTPCONLY_F UPDBGLACAPTPCONLY_V(1U) + +#define CIM_QUEUE_CONFIG_REF_A 0x7b48 +#define CIM_QUEUE_CONFIG_CTRL_A 0x7b4c + +#define CIMQSIZE_S 24 +#define CIMQSIZE_M 0x3fU +#define CIMQSIZE_G(x) (((x) >> CIMQSIZE_S) & CIMQSIZE_M) + +#define CIMQBASE_S 16 +#define CIMQBASE_M 0x3fU +#define CIMQBASE_G(x) (((x) >> CIMQBASE_S) & CIMQBASE_M) + +#define QUEFULLTHRSH_S 0 +#define QUEFULLTHRSH_M 0x1ffU +#define QUEFULLTHRSH_G(x) (((x) >> QUEFULLTHRSH_S) & QUEFULLTHRSH_M) + +#define UP_IBQ_0_RDADDR_A 0x10 +#define UP_IBQ_0_SHADOW_RDADDR_A 0x280 +#define UP_OBQ_0_REALADDR_A 0x104 +#define UP_OBQ_0_SHADOW_REALADDR_A 0x394 + +#define IBQRDADDR_S 0 +#define IBQRDADDR_M 0x1fffU +#define IBQRDADDR_G(x) (((x) >> IBQRDADDR_S) & IBQRDADDR_M) + +#define IBQWRADDR_S 0 +#define IBQWRADDR_M 0x1fffU +#define IBQWRADDR_G(x) (((x) >> IBQWRADDR_S) & IBQWRADDR_M) + +#define QUERDADDR_S 0 +#define QUERDADDR_M 0x7fffU +#define QUERDADDR_G(x) (((x) >> QUERDADDR_S) & QUERDADDR_M) + +#define QUEREMFLITS_S 0 +#define QUEREMFLITS_M 0x7ffU +#define QUEREMFLITS_G(x) (((x) >> QUEREMFLITS_S) & QUEREMFLITS_M) + +#define QUEEOPCNT_S 16 +#define QUEEOPCNT_M 0xfffU +#define QUEEOPCNT_G(x) (((x) >> QUEEOPCNT_S) & QUEEOPCNT_M) + +#define QUESOPCNT_S 0 +#define QUESOPCNT_M 0xfffU +#define QUESOPCNT_G(x) (((x) >> QUESOPCNT_S) & QUESOPCNT_M) + +#define OBQSELECT_S 4 +#define OBQSELECT_V(x) ((x) << OBQSELECT_S) +#define OBQSELECT_F OBQSELECT_V(1U) + +#define IBQSELECT_S 3 +#define IBQSELECT_V(x) ((x) << IBQSELECT_S) +#define IBQSELECT_F IBQSELECT_V(1U) + +#define QUENUMSELECT_S 0 +#define QUENUMSELECT_V(x) ((x) << QUENUMSELECT_S) + +#endif /* __T4_REGS_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h new file mode 100644 index 000000000..19b2dcf6a --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h @@ -0,0 +1,124 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4_VALUES_H__ +#define __T4_VALUES_H__ + +/* This file contains definitions for various T4 register value hardware + * constants. The types of values encoded here are predominantly those for + * register fields which control "modal" behavior. For the most part, we do + * not include definitions for register fields which are simple numeric + * metrics, etc. + */ + +/* SGE register field values. + */ + +/* CONTROL1 register */ +#define RXPKTCPLMODE_SPLIT_X 1 + +#define INGPCIEBOUNDARY_SHIFT_X 5 +#define INGPCIEBOUNDARY_32B_X 0 + +#define INGPADBOUNDARY_SHIFT_X 5 + +/* CONTROL2 register */ +#define INGPACKBOUNDARY_SHIFT_X 5 +#define INGPACKBOUNDARY_16B_X 0 + +/* GTS register */ +#define SGE_TIMERREGS 6 +#define TIMERREG_COUNTER0_X 0 + +/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues. + * The User Doorbells are each 128 bytes in length with a Simple Doorbell at + * offsets 8x and a Write Combining single 64-byte Egress Queue Unit + * (IDXSIZE_UNIT_X) Gather Buffer interface at offset 64. For Ingress Queues, + * we have a Going To Sleep register at offsets 8x+4. + * + * As noted above, we have many instances of the Simple Doorbell and Going To + * Sleep registers at offsets 8x and 8x+4, respectively. We want to use a + * non-64-byte aligned offset for the Simple Doorbell in order to attempt to + * avoid buffering of the writes to the Simple Doorbell and we want to use a + * non-contiguous offset for the Going To Sleep writes in order to avoid + * possible combining between them. + */ +#define SGE_UDB_SIZE 128 +#define SGE_UDB_KDOORBELL 8 +#define SGE_UDB_GTS 20 +#define SGE_UDB_WCDOORBELL 64 + +/* CIM register field values. + */ +#define X_MBOWNER_FW 1 +#define X_MBOWNER_PL 2 + +/* PCI-E definitions */ +#define WINDOW_SHIFT_X 10 +#define PCIEOFST_SHIFT_X 10 + +/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the + * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP + * selects for a particular field being present. These fields, when present + * in the Compressed Filter Tuple, have the following widths in bits. + */ +#define FT_FCOE_W 1 +#define FT_PORT_W 3 +#define FT_VNIC_ID_W 17 +#define FT_VLAN_W 17 +#define FT_TOS_W 8 +#define FT_PROTOCOL_W 8 +#define FT_ETHERTYPE_W 16 +#define FT_MACMATCH_W 9 +#define FT_MPSHITTYPE_W 3 +#define FT_FRAGMENTATION_W 1 + +/* Some of the Compressed Filter Tuple fields have internal structure. These + * bit shifts/masks describe those structures. All shifts are relative to the + * base position of the fields within the Compressed Filter Tuple + */ +#define FT_VLAN_VLD_S 16 +#define FT_VLAN_VLD_V(x) ((x) << FT_VLAN_VLD_S) +#define FT_VLAN_VLD_F FT_VLAN_VLD_V(1U) + +#define FT_VNID_ID_VF_S 0 +#define FT_VNID_ID_VF_V(x) ((x) << FT_VNID_ID_VF_S) + +#define FT_VNID_ID_PF_S 7 +#define FT_VNID_ID_PF_V(x) ((x) << FT_VNID_ID_PF_S) + +#define FT_VNID_ID_VLD_S 16 +#define FT_VNID_ID_VLD_V(x) ((x) << FT_VNID_ID_VLD_S) + +#endif /* __T4_VALUES_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h new file mode 100644 index 000000000..03fbfd1fb --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -0,0 +1,3177 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2009-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _T4FW_INTERFACE_H_ +#define _T4FW_INTERFACE_H_ + +enum fw_retval { + FW_SUCCESS = 0, /* completed successfully */ + FW_EPERM = 1, /* operation not permitted */ + FW_ENOENT = 2, /* no such file or directory */ + FW_EIO = 5, /* input/output error; hw bad */ + FW_ENOEXEC = 8, /* exec format error; inv microcode */ + FW_EAGAIN = 11, /* try again */ + FW_ENOMEM = 12, /* out of memory */ + FW_EFAULT = 14, /* bad address; fw bad */ + FW_EBUSY = 16, /* resource busy */ + FW_EEXIST = 17, /* file exists */ + FW_ENODEV = 19, /* no such device */ + FW_EINVAL = 22, /* invalid argument */ + FW_ENOSPC = 28, /* no space left on device */ + FW_ENOSYS = 38, /* functionality not implemented */ + FW_ENODATA = 61, /* no data available */ + FW_EPROTO = 71, /* protocol error */ + FW_EADDRINUSE = 98, /* address already in use */ + FW_EADDRNOTAVAIL = 99, /* cannot assigned requested address */ + FW_ENETDOWN = 100, /* network is down */ + FW_ENETUNREACH = 101, /* network is unreachable */ + FW_ENOBUFS = 105, /* no buffer space available */ + FW_ETIMEDOUT = 110, /* timeout */ + FW_EINPROGRESS = 115, /* fw internal */ + FW_SCSI_ABORT_REQUESTED = 128, /* */ + FW_SCSI_ABORT_TIMEDOUT = 129, /* */ + FW_SCSI_ABORTED = 130, /* */ + FW_SCSI_CLOSE_REQUESTED = 131, /* */ + FW_ERR_LINK_DOWN = 132, /* */ + FW_RDEV_NOT_READY = 133, /* */ + FW_ERR_RDEV_LOST = 134, /* */ + FW_ERR_RDEV_LOGO = 135, /* */ + FW_FCOE_NO_XCHG = 136, /* */ + FW_SCSI_RSP_ERR = 137, /* */ + FW_ERR_RDEV_IMPL_LOGO = 138, /* */ + FW_SCSI_UNDER_FLOW_ERR = 139, /* */ + FW_SCSI_OVER_FLOW_ERR = 140, /* */ + FW_SCSI_DDP_ERR = 141, /* DDP error*/ + FW_SCSI_TASK_ERR = 142, /* No SCSI tasks available */ +}; + +#define FW_T4VF_SGE_BASE_ADDR 0x0000 +#define FW_T4VF_MPS_BASE_ADDR 0x0100 +#define FW_T4VF_PL_BASE_ADDR 0x0200 +#define FW_T4VF_MBDATA_BASE_ADDR 0x0240 +#define FW_T4VF_CIM_BASE_ADDR 0x0300 + +enum fw_wr_opcodes { + FW_FILTER_WR = 0x02, + FW_ULPTX_WR = 0x04, + FW_TP_WR = 0x05, + FW_ETH_TX_PKT_WR = 0x08, + FW_OFLD_CONNECTION_WR = 0x2f, + FW_FLOWC_WR = 0x0a, + FW_OFLD_TX_DATA_WR = 0x0b, + FW_CMD_WR = 0x10, + FW_ETH_TX_PKT_VM_WR = 0x11, + FW_RI_RES_WR = 0x0c, + FW_RI_INIT_WR = 0x0d, + FW_RI_RDMA_WRITE_WR = 0x14, + FW_RI_SEND_WR = 0x15, + FW_RI_RDMA_READ_WR = 0x16, + FW_RI_RECV_WR = 0x17, + FW_RI_BIND_MW_WR = 0x18, + FW_RI_FR_NSMR_WR = 0x19, + FW_RI_INV_LSTAG_WR = 0x1a, + FW_LASTC2E_WR = 0x70 +}; + +struct fw_wr_hdr { + __be32 hi; + __be32 lo; +}; + +/* work request opcode (hi) */ +#define FW_WR_OP_S 24 +#define FW_WR_OP_M 0xff +#define FW_WR_OP_V(x) ((x) << FW_WR_OP_S) +#define FW_WR_OP_G(x) (((x) >> FW_WR_OP_S) & FW_WR_OP_M) + +/* atomic flag (hi) - firmware encapsulates CPLs in CPL_BARRIER */ +#define FW_WR_ATOMIC_S 23 +#define FW_WR_ATOMIC_V(x) ((x) << FW_WR_ATOMIC_S) + +/* flush flag (hi) - firmware flushes flushable work request buffered + * in the flow context. + */ +#define FW_WR_FLUSH_S 22 +#define FW_WR_FLUSH_V(x) ((x) << FW_WR_FLUSH_S) + +/* completion flag (hi) - firmware generates a cpl_fw6_ack */ +#define FW_WR_COMPL_S 21 +#define FW_WR_COMPL_V(x) ((x) << FW_WR_COMPL_S) +#define FW_WR_COMPL_F FW_WR_COMPL_V(1U) + +/* work request immediate data length (hi) */ +#define FW_WR_IMMDLEN_S 0 +#define FW_WR_IMMDLEN_M 0xff +#define FW_WR_IMMDLEN_V(x) ((x) << FW_WR_IMMDLEN_S) + +/* egress queue status update to associated ingress queue entry (lo) */ +#define FW_WR_EQUIQ_S 31 +#define FW_WR_EQUIQ_V(x) ((x) << FW_WR_EQUIQ_S) +#define FW_WR_EQUIQ_F FW_WR_EQUIQ_V(1U) + +/* egress queue status update to egress queue status entry (lo) */ +#define FW_WR_EQUEQ_S 30 +#define FW_WR_EQUEQ_V(x) ((x) << FW_WR_EQUEQ_S) +#define FW_WR_EQUEQ_F FW_WR_EQUEQ_V(1U) + +/* flow context identifier (lo) */ +#define FW_WR_FLOWID_S 8 +#define FW_WR_FLOWID_V(x) ((x) << FW_WR_FLOWID_S) + +/* length in units of 16-bytes (lo) */ +#define FW_WR_LEN16_S 0 +#define FW_WR_LEN16_V(x) ((x) << FW_WR_LEN16_S) + +#define HW_TPL_FR_MT_PR_IV_P_FC 0X32B +#define HW_TPL_FR_MT_PR_OV_P_FC 0X327 + +/* filter wr reply code in cookie in CPL_SET_TCB_RPL */ +enum fw_filter_wr_cookie { + FW_FILTER_WR_SUCCESS, + FW_FILTER_WR_FLT_ADDED, + FW_FILTER_WR_FLT_DELETED, + FW_FILTER_WR_SMT_TBL_FULL, + FW_FILTER_WR_EINVAL, +}; + +struct fw_filter_wr { + __be32 op_pkd; + __be32 len16_pkd; + __be64 r3; + __be32 tid_to_iq; + __be32 del_filter_to_l2tix; + __be16 ethtype; + __be16 ethtypem; + __u8 frag_to_ovlan_vldm; + __u8 smac_sel; + __be16 rx_chan_rx_rpl_iq; + __be32 maci_to_matchtypem; + __u8 ptcl; + __u8 ptclm; + __u8 ttyp; + __u8 ttypm; + __be16 ivlan; + __be16 ivlanm; + __be16 ovlan; + __be16 ovlanm; + __u8 lip[16]; + __u8 lipm[16]; + __u8 fip[16]; + __u8 fipm[16]; + __be16 lp; + __be16 lpm; + __be16 fp; + __be16 fpm; + __be16 r7; + __u8 sma[6]; +}; + +#define FW_FILTER_WR_TID_S 12 +#define FW_FILTER_WR_TID_M 0xfffff +#define FW_FILTER_WR_TID_V(x) ((x) << FW_FILTER_WR_TID_S) +#define FW_FILTER_WR_TID_G(x) \ + (((x) >> FW_FILTER_WR_TID_S) & FW_FILTER_WR_TID_M) + +#define FW_FILTER_WR_RQTYPE_S 11 +#define FW_FILTER_WR_RQTYPE_M 0x1 +#define FW_FILTER_WR_RQTYPE_V(x) ((x) << FW_FILTER_WR_RQTYPE_S) +#define FW_FILTER_WR_RQTYPE_G(x) \ + (((x) >> FW_FILTER_WR_RQTYPE_S) & FW_FILTER_WR_RQTYPE_M) +#define FW_FILTER_WR_RQTYPE_F FW_FILTER_WR_RQTYPE_V(1U) + +#define FW_FILTER_WR_NOREPLY_S 10 +#define FW_FILTER_WR_NOREPLY_M 0x1 +#define FW_FILTER_WR_NOREPLY_V(x) ((x) << FW_FILTER_WR_NOREPLY_S) +#define FW_FILTER_WR_NOREPLY_G(x) \ + (((x) >> FW_FILTER_WR_NOREPLY_S) & FW_FILTER_WR_NOREPLY_M) +#define FW_FILTER_WR_NOREPLY_F FW_FILTER_WR_NOREPLY_V(1U) + +#define FW_FILTER_WR_IQ_S 0 +#define FW_FILTER_WR_IQ_M 0x3ff +#define FW_FILTER_WR_IQ_V(x) ((x) << FW_FILTER_WR_IQ_S) +#define FW_FILTER_WR_IQ_G(x) \ + (((x) >> FW_FILTER_WR_IQ_S) & FW_FILTER_WR_IQ_M) + +#define FW_FILTER_WR_DEL_FILTER_S 31 +#define FW_FILTER_WR_DEL_FILTER_M 0x1 +#define FW_FILTER_WR_DEL_FILTER_V(x) ((x) << FW_FILTER_WR_DEL_FILTER_S) +#define FW_FILTER_WR_DEL_FILTER_G(x) \ + (((x) >> FW_FILTER_WR_DEL_FILTER_S) & FW_FILTER_WR_DEL_FILTER_M) +#define FW_FILTER_WR_DEL_FILTER_F FW_FILTER_WR_DEL_FILTER_V(1U) + +#define FW_FILTER_WR_RPTTID_S 25 +#define FW_FILTER_WR_RPTTID_M 0x1 +#define FW_FILTER_WR_RPTTID_V(x) ((x) << FW_FILTER_WR_RPTTID_S) +#define FW_FILTER_WR_RPTTID_G(x) \ + (((x) >> FW_FILTER_WR_RPTTID_S) & FW_FILTER_WR_RPTTID_M) +#define FW_FILTER_WR_RPTTID_F FW_FILTER_WR_RPTTID_V(1U) + +#define FW_FILTER_WR_DROP_S 24 +#define FW_FILTER_WR_DROP_M 0x1 +#define FW_FILTER_WR_DROP_V(x) ((x) << FW_FILTER_WR_DROP_S) +#define FW_FILTER_WR_DROP_G(x) \ + (((x) >> FW_FILTER_WR_DROP_S) & FW_FILTER_WR_DROP_M) +#define FW_FILTER_WR_DROP_F FW_FILTER_WR_DROP_V(1U) + +#define FW_FILTER_WR_DIRSTEER_S 23 +#define FW_FILTER_WR_DIRSTEER_M 0x1 +#define FW_FILTER_WR_DIRSTEER_V(x) ((x) << FW_FILTER_WR_DIRSTEER_S) +#define FW_FILTER_WR_DIRSTEER_G(x) \ + (((x) >> FW_FILTER_WR_DIRSTEER_S) & FW_FILTER_WR_DIRSTEER_M) +#define FW_FILTER_WR_DIRSTEER_F FW_FILTER_WR_DIRSTEER_V(1U) + +#define FW_FILTER_WR_MASKHASH_S 22 +#define FW_FILTER_WR_MASKHASH_M 0x1 +#define FW_FILTER_WR_MASKHASH_V(x) ((x) << FW_FILTER_WR_MASKHASH_S) +#define FW_FILTER_WR_MASKHASH_G(x) \ + (((x) >> FW_FILTER_WR_MASKHASH_S) & FW_FILTER_WR_MASKHASH_M) +#define FW_FILTER_WR_MASKHASH_F FW_FILTER_WR_MASKHASH_V(1U) + +#define FW_FILTER_WR_DIRSTEERHASH_S 21 +#define FW_FILTER_WR_DIRSTEERHASH_M 0x1 +#define FW_FILTER_WR_DIRSTEERHASH_V(x) ((x) << FW_FILTER_WR_DIRSTEERHASH_S) +#define FW_FILTER_WR_DIRSTEERHASH_G(x) \ + (((x) >> FW_FILTER_WR_DIRSTEERHASH_S) & FW_FILTER_WR_DIRSTEERHASH_M) +#define FW_FILTER_WR_DIRSTEERHASH_F FW_FILTER_WR_DIRSTEERHASH_V(1U) + +#define FW_FILTER_WR_LPBK_S 20 +#define FW_FILTER_WR_LPBK_M 0x1 +#define FW_FILTER_WR_LPBK_V(x) ((x) << FW_FILTER_WR_LPBK_S) +#define FW_FILTER_WR_LPBK_G(x) \ + (((x) >> FW_FILTER_WR_LPBK_S) & FW_FILTER_WR_LPBK_M) +#define FW_FILTER_WR_LPBK_F FW_FILTER_WR_LPBK_V(1U) + +#define FW_FILTER_WR_DMAC_S 19 +#define FW_FILTER_WR_DMAC_M 0x1 +#define FW_FILTER_WR_DMAC_V(x) ((x) << FW_FILTER_WR_DMAC_S) +#define FW_FILTER_WR_DMAC_G(x) \ + (((x) >> FW_FILTER_WR_DMAC_S) & FW_FILTER_WR_DMAC_M) +#define FW_FILTER_WR_DMAC_F FW_FILTER_WR_DMAC_V(1U) + +#define FW_FILTER_WR_SMAC_S 18 +#define FW_FILTER_WR_SMAC_M 0x1 +#define FW_FILTER_WR_SMAC_V(x) ((x) << FW_FILTER_WR_SMAC_S) +#define FW_FILTER_WR_SMAC_G(x) \ + (((x) >> FW_FILTER_WR_SMAC_S) & FW_FILTER_WR_SMAC_M) +#define FW_FILTER_WR_SMAC_F FW_FILTER_WR_SMAC_V(1U) + +#define FW_FILTER_WR_INSVLAN_S 17 +#define FW_FILTER_WR_INSVLAN_M 0x1 +#define FW_FILTER_WR_INSVLAN_V(x) ((x) << FW_FILTER_WR_INSVLAN_S) +#define FW_FILTER_WR_INSVLAN_G(x) \ + (((x) >> FW_FILTER_WR_INSVLAN_S) & FW_FILTER_WR_INSVLAN_M) +#define FW_FILTER_WR_INSVLAN_F FW_FILTER_WR_INSVLAN_V(1U) + +#define FW_FILTER_WR_RMVLAN_S 16 +#define FW_FILTER_WR_RMVLAN_M 0x1 +#define FW_FILTER_WR_RMVLAN_V(x) ((x) << FW_FILTER_WR_RMVLAN_S) +#define FW_FILTER_WR_RMVLAN_G(x) \ + (((x) >> FW_FILTER_WR_RMVLAN_S) & FW_FILTER_WR_RMVLAN_M) +#define FW_FILTER_WR_RMVLAN_F FW_FILTER_WR_RMVLAN_V(1U) + +#define FW_FILTER_WR_HITCNTS_S 15 +#define FW_FILTER_WR_HITCNTS_M 0x1 +#define FW_FILTER_WR_HITCNTS_V(x) ((x) << FW_FILTER_WR_HITCNTS_S) +#define FW_FILTER_WR_HITCNTS_G(x) \ + (((x) >> FW_FILTER_WR_HITCNTS_S) & FW_FILTER_WR_HITCNTS_M) +#define FW_FILTER_WR_HITCNTS_F FW_FILTER_WR_HITCNTS_V(1U) + +#define FW_FILTER_WR_TXCHAN_S 13 +#define FW_FILTER_WR_TXCHAN_M 0x3 +#define FW_FILTER_WR_TXCHAN_V(x) ((x) << FW_FILTER_WR_TXCHAN_S) +#define FW_FILTER_WR_TXCHAN_G(x) \ + (((x) >> FW_FILTER_WR_TXCHAN_S) & FW_FILTER_WR_TXCHAN_M) + +#define FW_FILTER_WR_PRIO_S 12 +#define FW_FILTER_WR_PRIO_M 0x1 +#define FW_FILTER_WR_PRIO_V(x) ((x) << FW_FILTER_WR_PRIO_S) +#define FW_FILTER_WR_PRIO_G(x) \ + (((x) >> FW_FILTER_WR_PRIO_S) & FW_FILTER_WR_PRIO_M) +#define FW_FILTER_WR_PRIO_F FW_FILTER_WR_PRIO_V(1U) + +#define FW_FILTER_WR_L2TIX_S 0 +#define FW_FILTER_WR_L2TIX_M 0xfff +#define FW_FILTER_WR_L2TIX_V(x) ((x) << FW_FILTER_WR_L2TIX_S) +#define FW_FILTER_WR_L2TIX_G(x) \ + (((x) >> FW_FILTER_WR_L2TIX_S) & FW_FILTER_WR_L2TIX_M) + +#define FW_FILTER_WR_FRAG_S 7 +#define FW_FILTER_WR_FRAG_M 0x1 +#define FW_FILTER_WR_FRAG_V(x) ((x) << FW_FILTER_WR_FRAG_S) +#define FW_FILTER_WR_FRAG_G(x) \ + (((x) >> FW_FILTER_WR_FRAG_S) & FW_FILTER_WR_FRAG_M) +#define FW_FILTER_WR_FRAG_F FW_FILTER_WR_FRAG_V(1U) + +#define FW_FILTER_WR_FRAGM_S 6 +#define FW_FILTER_WR_FRAGM_M 0x1 +#define FW_FILTER_WR_FRAGM_V(x) ((x) << FW_FILTER_WR_FRAGM_S) +#define FW_FILTER_WR_FRAGM_G(x) \ + (((x) >> FW_FILTER_WR_FRAGM_S) & FW_FILTER_WR_FRAGM_M) +#define FW_FILTER_WR_FRAGM_F FW_FILTER_WR_FRAGM_V(1U) + +#define FW_FILTER_WR_IVLAN_VLD_S 5 +#define FW_FILTER_WR_IVLAN_VLD_M 0x1 +#define FW_FILTER_WR_IVLAN_VLD_V(x) ((x) << FW_FILTER_WR_IVLAN_VLD_S) +#define FW_FILTER_WR_IVLAN_VLD_G(x) \ + (((x) >> FW_FILTER_WR_IVLAN_VLD_S) & FW_FILTER_WR_IVLAN_VLD_M) +#define FW_FILTER_WR_IVLAN_VLD_F FW_FILTER_WR_IVLAN_VLD_V(1U) + +#define FW_FILTER_WR_OVLAN_VLD_S 4 +#define FW_FILTER_WR_OVLAN_VLD_M 0x1 +#define FW_FILTER_WR_OVLAN_VLD_V(x) ((x) << FW_FILTER_WR_OVLAN_VLD_S) +#define FW_FILTER_WR_OVLAN_VLD_G(x) \ + (((x) >> FW_FILTER_WR_OVLAN_VLD_S) & FW_FILTER_WR_OVLAN_VLD_M) +#define FW_FILTER_WR_OVLAN_VLD_F FW_FILTER_WR_OVLAN_VLD_V(1U) + +#define FW_FILTER_WR_IVLAN_VLDM_S 3 +#define FW_FILTER_WR_IVLAN_VLDM_M 0x1 +#define FW_FILTER_WR_IVLAN_VLDM_V(x) ((x) << FW_FILTER_WR_IVLAN_VLDM_S) +#define FW_FILTER_WR_IVLAN_VLDM_G(x) \ + (((x) >> FW_FILTER_WR_IVLAN_VLDM_S) & FW_FILTER_WR_IVLAN_VLDM_M) +#define FW_FILTER_WR_IVLAN_VLDM_F FW_FILTER_WR_IVLAN_VLDM_V(1U) + +#define FW_FILTER_WR_OVLAN_VLDM_S 2 +#define FW_FILTER_WR_OVLAN_VLDM_M 0x1 +#define FW_FILTER_WR_OVLAN_VLDM_V(x) ((x) << FW_FILTER_WR_OVLAN_VLDM_S) +#define FW_FILTER_WR_OVLAN_VLDM_G(x) \ + (((x) >> FW_FILTER_WR_OVLAN_VLDM_S) & FW_FILTER_WR_OVLAN_VLDM_M) +#define FW_FILTER_WR_OVLAN_VLDM_F FW_FILTER_WR_OVLAN_VLDM_V(1U) + +#define FW_FILTER_WR_RX_CHAN_S 15 +#define FW_FILTER_WR_RX_CHAN_M 0x1 +#define FW_FILTER_WR_RX_CHAN_V(x) ((x) << FW_FILTER_WR_RX_CHAN_S) +#define FW_FILTER_WR_RX_CHAN_G(x) \ + (((x) >> FW_FILTER_WR_RX_CHAN_S) & FW_FILTER_WR_RX_CHAN_M) +#define FW_FILTER_WR_RX_CHAN_F FW_FILTER_WR_RX_CHAN_V(1U) + +#define FW_FILTER_WR_RX_RPL_IQ_S 0 +#define FW_FILTER_WR_RX_RPL_IQ_M 0x3ff +#define FW_FILTER_WR_RX_RPL_IQ_V(x) ((x) << FW_FILTER_WR_RX_RPL_IQ_S) +#define FW_FILTER_WR_RX_RPL_IQ_G(x) \ + (((x) >> FW_FILTER_WR_RX_RPL_IQ_S) & FW_FILTER_WR_RX_RPL_IQ_M) + +#define FW_FILTER_WR_MACI_S 23 +#define FW_FILTER_WR_MACI_M 0x1ff +#define FW_FILTER_WR_MACI_V(x) ((x) << FW_FILTER_WR_MACI_S) +#define FW_FILTER_WR_MACI_G(x) \ + (((x) >> FW_FILTER_WR_MACI_S) & FW_FILTER_WR_MACI_M) + +#define FW_FILTER_WR_MACIM_S 14 +#define FW_FILTER_WR_MACIM_M 0x1ff +#define FW_FILTER_WR_MACIM_V(x) ((x) << FW_FILTER_WR_MACIM_S) +#define FW_FILTER_WR_MACIM_G(x) \ + (((x) >> FW_FILTER_WR_MACIM_S) & FW_FILTER_WR_MACIM_M) + +#define FW_FILTER_WR_FCOE_S 13 +#define FW_FILTER_WR_FCOE_M 0x1 +#define FW_FILTER_WR_FCOE_V(x) ((x) << FW_FILTER_WR_FCOE_S) +#define FW_FILTER_WR_FCOE_G(x) \ + (((x) >> FW_FILTER_WR_FCOE_S) & FW_FILTER_WR_FCOE_M) +#define FW_FILTER_WR_FCOE_F FW_FILTER_WR_FCOE_V(1U) + +#define FW_FILTER_WR_FCOEM_S 12 +#define FW_FILTER_WR_FCOEM_M 0x1 +#define FW_FILTER_WR_FCOEM_V(x) ((x) << FW_FILTER_WR_FCOEM_S) +#define FW_FILTER_WR_FCOEM_G(x) \ + (((x) >> FW_FILTER_WR_FCOEM_S) & FW_FILTER_WR_FCOEM_M) +#define FW_FILTER_WR_FCOEM_F FW_FILTER_WR_FCOEM_V(1U) + +#define FW_FILTER_WR_PORT_S 9 +#define FW_FILTER_WR_PORT_M 0x7 +#define FW_FILTER_WR_PORT_V(x) ((x) << FW_FILTER_WR_PORT_S) +#define FW_FILTER_WR_PORT_G(x) \ + (((x) >> FW_FILTER_WR_PORT_S) & FW_FILTER_WR_PORT_M) + +#define FW_FILTER_WR_PORTM_S 6 +#define FW_FILTER_WR_PORTM_M 0x7 +#define FW_FILTER_WR_PORTM_V(x) ((x) << FW_FILTER_WR_PORTM_S) +#define FW_FILTER_WR_PORTM_G(x) \ + (((x) >> FW_FILTER_WR_PORTM_S) & FW_FILTER_WR_PORTM_M) + +#define FW_FILTER_WR_MATCHTYPE_S 3 +#define FW_FILTER_WR_MATCHTYPE_M 0x7 +#define FW_FILTER_WR_MATCHTYPE_V(x) ((x) << FW_FILTER_WR_MATCHTYPE_S) +#define FW_FILTER_WR_MATCHTYPE_G(x) \ + (((x) >> FW_FILTER_WR_MATCHTYPE_S) & FW_FILTER_WR_MATCHTYPE_M) + +#define FW_FILTER_WR_MATCHTYPEM_S 0 +#define FW_FILTER_WR_MATCHTYPEM_M 0x7 +#define FW_FILTER_WR_MATCHTYPEM_V(x) ((x) << FW_FILTER_WR_MATCHTYPEM_S) +#define FW_FILTER_WR_MATCHTYPEM_G(x) \ + (((x) >> FW_FILTER_WR_MATCHTYPEM_S) & FW_FILTER_WR_MATCHTYPEM_M) + +struct fw_ulptx_wr { + __be32 op_to_compl; + __be32 flowid_len16; + u64 cookie; +}; + +struct fw_tp_wr { + __be32 op_to_immdlen; + __be32 flowid_len16; + u64 cookie; +}; + +struct fw_eth_tx_pkt_wr { + __be32 op_immdlen; + __be32 equiq_to_len16; + __be64 r3; +}; + +struct fw_ofld_connection_wr { + __be32 op_compl; + __be32 len16_pkd; + __u64 cookie; + __be64 r2; + __be64 r3; + struct fw_ofld_connection_le { + __be32 version_cpl; + __be32 filter; + __be32 r1; + __be16 lport; + __be16 pport; + union fw_ofld_connection_leip { + struct fw_ofld_connection_le_ipv4 { + __be32 pip; + __be32 lip; + __be64 r0; + __be64 r1; + __be64 r2; + } ipv4; + struct fw_ofld_connection_le_ipv6 { + __be64 pip_hi; + __be64 pip_lo; + __be64 lip_hi; + __be64 lip_lo; + } ipv6; + } u; + } le; + struct fw_ofld_connection_tcb { + __be32 t_state_to_astid; + __be16 cplrxdataack_cplpassacceptrpl; + __be16 rcv_adv; + __be32 rcv_nxt; + __be32 tx_max; + __be64 opt0; + __be32 opt2; + __be32 r1; + __be64 r2; + __be64 r3; + } tcb; +}; + +#define FW_OFLD_CONNECTION_WR_VERSION_S 31 +#define FW_OFLD_CONNECTION_WR_VERSION_M 0x1 +#define FW_OFLD_CONNECTION_WR_VERSION_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_VERSION_S) +#define FW_OFLD_CONNECTION_WR_VERSION_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_VERSION_S) & \ + FW_OFLD_CONNECTION_WR_VERSION_M) +#define FW_OFLD_CONNECTION_WR_VERSION_F \ + FW_OFLD_CONNECTION_WR_VERSION_V(1U) + +#define FW_OFLD_CONNECTION_WR_CPL_S 30 +#define FW_OFLD_CONNECTION_WR_CPL_M 0x1 +#define FW_OFLD_CONNECTION_WR_CPL_V(x) ((x) << FW_OFLD_CONNECTION_WR_CPL_S) +#define FW_OFLD_CONNECTION_WR_CPL_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_CPL_S) & FW_OFLD_CONNECTION_WR_CPL_M) +#define FW_OFLD_CONNECTION_WR_CPL_F FW_OFLD_CONNECTION_WR_CPL_V(1U) + +#define FW_OFLD_CONNECTION_WR_T_STATE_S 28 +#define FW_OFLD_CONNECTION_WR_T_STATE_M 0xf +#define FW_OFLD_CONNECTION_WR_T_STATE_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_T_STATE_S) +#define FW_OFLD_CONNECTION_WR_T_STATE_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_T_STATE_S) & \ + FW_OFLD_CONNECTION_WR_T_STATE_M) + +#define FW_OFLD_CONNECTION_WR_RCV_SCALE_S 24 +#define FW_OFLD_CONNECTION_WR_RCV_SCALE_M 0xf +#define FW_OFLD_CONNECTION_WR_RCV_SCALE_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_RCV_SCALE_S) +#define FW_OFLD_CONNECTION_WR_RCV_SCALE_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_RCV_SCALE_S) & \ + FW_OFLD_CONNECTION_WR_RCV_SCALE_M) + +#define FW_OFLD_CONNECTION_WR_ASTID_S 0 +#define FW_OFLD_CONNECTION_WR_ASTID_M 0xffffff +#define FW_OFLD_CONNECTION_WR_ASTID_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_ASTID_S) +#define FW_OFLD_CONNECTION_WR_ASTID_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_ASTID_S) & FW_OFLD_CONNECTION_WR_ASTID_M) + +#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S 15 +#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_M 0x1 +#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S) +#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_CPLRXDATAACK_S) & \ + FW_OFLD_CONNECTION_WR_CPLRXDATAACK_M) +#define FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F \ + FW_OFLD_CONNECTION_WR_CPLRXDATAACK_V(1U) + +#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S 14 +#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_M 0x1 +#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_V(x) \ + ((x) << FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S) +#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_G(x) \ + (((x) >> FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_S) & \ + FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_M) +#define FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_F \ + FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL_V(1U) + +enum fw_flowc_mnem { + FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */ + FW_FLOWC_MNEM_CH, + FW_FLOWC_MNEM_PORT, + FW_FLOWC_MNEM_IQID, + FW_FLOWC_MNEM_SNDNXT, + FW_FLOWC_MNEM_RCVNXT, + FW_FLOWC_MNEM_SNDBUF, + FW_FLOWC_MNEM_MSS, + FW_FLOWC_MNEM_TXDATAPLEN_MAX, +}; + +struct fw_flowc_mnemval { + u8 mnemonic; + u8 r4[3]; + __be32 val; +}; + +struct fw_flowc_wr { + __be32 op_to_nparams; + __be32 flowid_len16; + struct fw_flowc_mnemval mnemval[0]; +}; + +#define FW_FLOWC_WR_NPARAMS_S 0 +#define FW_FLOWC_WR_NPARAMS_V(x) ((x) << FW_FLOWC_WR_NPARAMS_S) + +struct fw_ofld_tx_data_wr { + __be32 op_to_immdlen; + __be32 flowid_len16; + __be32 plen; + __be32 tunnel_to_proxy; +}; + +#define FW_OFLD_TX_DATA_WR_TUNNEL_S 19 +#define FW_OFLD_TX_DATA_WR_TUNNEL_V(x) ((x) << FW_OFLD_TX_DATA_WR_TUNNEL_S) + +#define FW_OFLD_TX_DATA_WR_SAVE_S 18 +#define FW_OFLD_TX_DATA_WR_SAVE_V(x) ((x) << FW_OFLD_TX_DATA_WR_SAVE_S) + +#define FW_OFLD_TX_DATA_WR_FLUSH_S 17 +#define FW_OFLD_TX_DATA_WR_FLUSH_V(x) ((x) << FW_OFLD_TX_DATA_WR_FLUSH_S) +#define FW_OFLD_TX_DATA_WR_FLUSH_F FW_OFLD_TX_DATA_WR_FLUSH_V(1U) + +#define FW_OFLD_TX_DATA_WR_URGENT_S 16 +#define FW_OFLD_TX_DATA_WR_URGENT_V(x) ((x) << FW_OFLD_TX_DATA_WR_URGENT_S) + +#define FW_OFLD_TX_DATA_WR_MORE_S 15 +#define FW_OFLD_TX_DATA_WR_MORE_V(x) ((x) << FW_OFLD_TX_DATA_WR_MORE_S) + +#define FW_OFLD_TX_DATA_WR_SHOVE_S 14 +#define FW_OFLD_TX_DATA_WR_SHOVE_V(x) ((x) << FW_OFLD_TX_DATA_WR_SHOVE_S) +#define FW_OFLD_TX_DATA_WR_SHOVE_F FW_OFLD_TX_DATA_WR_SHOVE_V(1U) + +#define FW_OFLD_TX_DATA_WR_ULPMODE_S 10 +#define FW_OFLD_TX_DATA_WR_ULPMODE_V(x) ((x) << FW_OFLD_TX_DATA_WR_ULPMODE_S) + +#define FW_OFLD_TX_DATA_WR_ULPSUBMODE_S 6 +#define FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(x) \ + ((x) << FW_OFLD_TX_DATA_WR_ULPSUBMODE_S) + +struct fw_cmd_wr { + __be32 op_dma; + __be32 len16_pkd; + __be64 cookie_daddr; +}; + +#define FW_CMD_WR_DMA_S 17 +#define FW_CMD_WR_DMA_V(x) ((x) << FW_CMD_WR_DMA_S) + +struct fw_eth_tx_pkt_vm_wr { + __be32 op_immdlen; + __be32 equiq_to_len16; + __be32 r3[2]; + u8 ethmacdst[6]; + u8 ethmacsrc[6]; + __be16 ethtype; + __be16 vlantci; +}; + +#define FW_CMD_MAX_TIMEOUT 10000 + +/* + * If a host driver does a HELLO and discovers that there's already a MASTER + * selected, we may have to wait for that MASTER to finish issuing RESET, + * configuration and INITIALIZE commands. Also, there's a possibility that + * our own HELLO may get lost if it happens right as the MASTER is issuign a + * RESET command, so we need to be willing to make a few retries of our HELLO. + */ +#define FW_CMD_HELLO_TIMEOUT (3 * FW_CMD_MAX_TIMEOUT) +#define FW_CMD_HELLO_RETRIES 3 + + +enum fw_cmd_opcodes { + FW_LDST_CMD = 0x01, + FW_RESET_CMD = 0x03, + FW_HELLO_CMD = 0x04, + FW_BYE_CMD = 0x05, + FW_INITIALIZE_CMD = 0x06, + FW_CAPS_CONFIG_CMD = 0x07, + FW_PARAMS_CMD = 0x08, + FW_PFVF_CMD = 0x09, + FW_IQ_CMD = 0x10, + FW_EQ_MNGT_CMD = 0x11, + FW_EQ_ETH_CMD = 0x12, + FW_EQ_CTRL_CMD = 0x13, + FW_EQ_OFLD_CMD = 0x21, + FW_VI_CMD = 0x14, + FW_VI_MAC_CMD = 0x15, + FW_VI_RXMODE_CMD = 0x16, + FW_VI_ENABLE_CMD = 0x17, + FW_ACL_MAC_CMD = 0x18, + FW_ACL_VLAN_CMD = 0x19, + FW_VI_STATS_CMD = 0x1a, + FW_PORT_CMD = 0x1b, + FW_PORT_STATS_CMD = 0x1c, + FW_PORT_LB_STATS_CMD = 0x1d, + FW_PORT_TRACE_CMD = 0x1e, + FW_PORT_TRACE_MMAP_CMD = 0x1f, + FW_RSS_IND_TBL_CMD = 0x20, + FW_RSS_GLB_CONFIG_CMD = 0x22, + FW_RSS_VI_CONFIG_CMD = 0x23, + FW_DEVLOG_CMD = 0x25, + FW_CLIP_CMD = 0x28, + FW_LASTC2E_CMD = 0x40, + FW_ERROR_CMD = 0x80, + FW_DEBUG_CMD = 0x81, +}; + +enum fw_cmd_cap { + FW_CMD_CAP_PF = 0x01, + FW_CMD_CAP_DMAQ = 0x02, + FW_CMD_CAP_PORT = 0x04, + FW_CMD_CAP_PORTPROMISC = 0x08, + FW_CMD_CAP_PORTSTATS = 0x10, + FW_CMD_CAP_VF = 0x80, +}; + +/* + * Generic command header flit0 + */ +struct fw_cmd_hdr { + __be32 hi; + __be32 lo; +}; + +#define FW_CMD_OP_S 24 +#define FW_CMD_OP_M 0xff +#define FW_CMD_OP_V(x) ((x) << FW_CMD_OP_S) +#define FW_CMD_OP_G(x) (((x) >> FW_CMD_OP_S) & FW_CMD_OP_M) + +#define FW_CMD_REQUEST_S 23 +#define FW_CMD_REQUEST_V(x) ((x) << FW_CMD_REQUEST_S) +#define FW_CMD_REQUEST_F FW_CMD_REQUEST_V(1U) + +#define FW_CMD_READ_S 22 +#define FW_CMD_READ_V(x) ((x) << FW_CMD_READ_S) +#define FW_CMD_READ_F FW_CMD_READ_V(1U) + +#define FW_CMD_WRITE_S 21 +#define FW_CMD_WRITE_V(x) ((x) << FW_CMD_WRITE_S) +#define FW_CMD_WRITE_F FW_CMD_WRITE_V(1U) + +#define FW_CMD_EXEC_S 20 +#define FW_CMD_EXEC_V(x) ((x) << FW_CMD_EXEC_S) +#define FW_CMD_EXEC_F FW_CMD_EXEC_V(1U) + +#define FW_CMD_RAMASK_S 20 +#define FW_CMD_RAMASK_V(x) ((x) << FW_CMD_RAMASK_S) + +#define FW_CMD_RETVAL_S 8 +#define FW_CMD_RETVAL_M 0xff +#define FW_CMD_RETVAL_V(x) ((x) << FW_CMD_RETVAL_S) +#define FW_CMD_RETVAL_G(x) (((x) >> FW_CMD_RETVAL_S) & FW_CMD_RETVAL_M) + +#define FW_CMD_LEN16_S 0 +#define FW_CMD_LEN16_V(x) ((x) << FW_CMD_LEN16_S) + +#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16) + +enum fw_ldst_addrspc { + FW_LDST_ADDRSPC_FIRMWARE = 0x0001, + FW_LDST_ADDRSPC_SGE_EGRC = 0x0008, + FW_LDST_ADDRSPC_SGE_INGC = 0x0009, + FW_LDST_ADDRSPC_SGE_FLMC = 0x000a, + FW_LDST_ADDRSPC_SGE_CONMC = 0x000b, + FW_LDST_ADDRSPC_TP_PIO = 0x0010, + FW_LDST_ADDRSPC_TP_TM_PIO = 0x0011, + FW_LDST_ADDRSPC_TP_MIB = 0x0012, + FW_LDST_ADDRSPC_MDIO = 0x0018, + FW_LDST_ADDRSPC_MPS = 0x0020, + FW_LDST_ADDRSPC_FUNC = 0x0028, + FW_LDST_ADDRSPC_FUNC_PCIE = 0x0029, +}; + +enum fw_ldst_mps_fid { + FW_LDST_MPS_ATRB, + FW_LDST_MPS_RPLC +}; + +enum fw_ldst_func_access_ctl { + FW_LDST_FUNC_ACC_CTL_VIID, + FW_LDST_FUNC_ACC_CTL_FID +}; + +enum fw_ldst_func_mod_index { + FW_LDST_FUNC_MPS +}; + +struct fw_ldst_cmd { + __be32 op_to_addrspace; +#define FW_LDST_CMD_ADDRSPACE_S 0 +#define FW_LDST_CMD_ADDRSPACE_V(x) ((x) << FW_LDST_CMD_ADDRSPACE_S) + __be32 cycles_to_len16; + union fw_ldst { + struct fw_ldst_addrval { + __be32 addr; + __be32 val; + } addrval; + struct fw_ldst_idctxt { + __be32 physid; + __be32 msg_pkd; + __be32 ctxt_data7; + __be32 ctxt_data6; + __be32 ctxt_data5; + __be32 ctxt_data4; + __be32 ctxt_data3; + __be32 ctxt_data2; + __be32 ctxt_data1; + __be32 ctxt_data0; + } idctxt; + struct fw_ldst_mdio { + __be16 paddr_mmd; + __be16 raddr; + __be16 vctl; + __be16 rval; + } mdio; + struct fw_ldst_mps { + __be16 fid_ctl; + __be16 rplcpf_pkd; + __be32 rplc127_96; + __be32 rplc95_64; + __be32 rplc63_32; + __be32 rplc31_0; + __be32 atrb; + __be16 vlan[16]; + } mps; + struct fw_ldst_func { + u8 access_ctl; + u8 mod_index; + __be16 ctl_id; + __be32 offset; + __be64 data0; + __be64 data1; + } func; + struct fw_ldst_pcie { + u8 ctrl_to_fn; + u8 bnum; + u8 r; + u8 ext_r; + u8 select_naccess; + u8 pcie_fn; + __be16 nset_pkd; + __be32 data[12]; + } pcie; + } u; +}; + +#define FW_LDST_CMD_MSG_S 31 +#define FW_LDST_CMD_MSG_V(x) ((x) << FW_LDST_CMD_MSG_S) + +#define FW_LDST_CMD_PADDR_S 8 +#define FW_LDST_CMD_PADDR_V(x) ((x) << FW_LDST_CMD_PADDR_S) + +#define FW_LDST_CMD_MMD_S 0 +#define FW_LDST_CMD_MMD_V(x) ((x) << FW_LDST_CMD_MMD_S) + +#define FW_LDST_CMD_FID_S 15 +#define FW_LDST_CMD_FID_V(x) ((x) << FW_LDST_CMD_FID_S) + +#define FW_LDST_CMD_CTL_S 0 +#define FW_LDST_CMD_CTL_V(x) ((x) << FW_LDST_CMD_CTL_S) + +#define FW_LDST_CMD_RPLCPF_S 0 +#define FW_LDST_CMD_RPLCPF_V(x) ((x) << FW_LDST_CMD_RPLCPF_S) + +#define FW_LDST_CMD_LC_S 4 +#define FW_LDST_CMD_LC_V(x) ((x) << FW_LDST_CMD_LC_S) +#define FW_LDST_CMD_LC_F FW_LDST_CMD_LC_V(1U) + +#define FW_LDST_CMD_FN_S 0 +#define FW_LDST_CMD_FN_V(x) ((x) << FW_LDST_CMD_FN_S) + +#define FW_LDST_CMD_NACCESS_S 0 +#define FW_LDST_CMD_NACCESS_V(x) ((x) << FW_LDST_CMD_NACCESS_S) + +struct fw_reset_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be32 val; + __be32 halt_pkd; +}; + +#define FW_RESET_CMD_HALT_S 31 +#define FW_RESET_CMD_HALT_M 0x1 +#define FW_RESET_CMD_HALT_V(x) ((x) << FW_RESET_CMD_HALT_S) +#define FW_RESET_CMD_HALT_G(x) \ + (((x) >> FW_RESET_CMD_HALT_S) & FW_RESET_CMD_HALT_M) +#define FW_RESET_CMD_HALT_F FW_RESET_CMD_HALT_V(1U) + +enum fw_hellow_cmd { + fw_hello_cmd_stage_os = 0x0 +}; + +struct fw_hello_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be32 err_to_clearinit; + __be32 fwrev; +}; + +#define FW_HELLO_CMD_ERR_S 31 +#define FW_HELLO_CMD_ERR_V(x) ((x) << FW_HELLO_CMD_ERR_S) +#define FW_HELLO_CMD_ERR_F FW_HELLO_CMD_ERR_V(1U) + +#define FW_HELLO_CMD_INIT_S 30 +#define FW_HELLO_CMD_INIT_V(x) ((x) << FW_HELLO_CMD_INIT_S) +#define FW_HELLO_CMD_INIT_F FW_HELLO_CMD_INIT_V(1U) + +#define FW_HELLO_CMD_MASTERDIS_S 29 +#define FW_HELLO_CMD_MASTERDIS_V(x) ((x) << FW_HELLO_CMD_MASTERDIS_S) + +#define FW_HELLO_CMD_MASTERFORCE_S 28 +#define FW_HELLO_CMD_MASTERFORCE_V(x) ((x) << FW_HELLO_CMD_MASTERFORCE_S) + +#define FW_HELLO_CMD_MBMASTER_S 24 +#define FW_HELLO_CMD_MBMASTER_M 0xfU +#define FW_HELLO_CMD_MBMASTER_V(x) ((x) << FW_HELLO_CMD_MBMASTER_S) +#define FW_HELLO_CMD_MBMASTER_G(x) \ + (((x) >> FW_HELLO_CMD_MBMASTER_S) & FW_HELLO_CMD_MBMASTER_M) + +#define FW_HELLO_CMD_MBASYNCNOTINT_S 23 +#define FW_HELLO_CMD_MBASYNCNOTINT_V(x) ((x) << FW_HELLO_CMD_MBASYNCNOTINT_S) + +#define FW_HELLO_CMD_MBASYNCNOT_S 20 +#define FW_HELLO_CMD_MBASYNCNOT_V(x) ((x) << FW_HELLO_CMD_MBASYNCNOT_S) + +#define FW_HELLO_CMD_STAGE_S 17 +#define FW_HELLO_CMD_STAGE_V(x) ((x) << FW_HELLO_CMD_STAGE_S) + +#define FW_HELLO_CMD_CLEARINIT_S 16 +#define FW_HELLO_CMD_CLEARINIT_V(x) ((x) << FW_HELLO_CMD_CLEARINIT_S) +#define FW_HELLO_CMD_CLEARINIT_F FW_HELLO_CMD_CLEARINIT_V(1U) + +struct fw_bye_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be64 r3; +}; + +struct fw_initialize_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be64 r3; +}; + +enum fw_caps_config_hm { + FW_CAPS_CONFIG_HM_PCIE = 0x00000001, + FW_CAPS_CONFIG_HM_PL = 0x00000002, + FW_CAPS_CONFIG_HM_SGE = 0x00000004, + FW_CAPS_CONFIG_HM_CIM = 0x00000008, + FW_CAPS_CONFIG_HM_ULPTX = 0x00000010, + FW_CAPS_CONFIG_HM_TP = 0x00000020, + FW_CAPS_CONFIG_HM_ULPRX = 0x00000040, + FW_CAPS_CONFIG_HM_PMRX = 0x00000080, + FW_CAPS_CONFIG_HM_PMTX = 0x00000100, + FW_CAPS_CONFIG_HM_MC = 0x00000200, + FW_CAPS_CONFIG_HM_LE = 0x00000400, + FW_CAPS_CONFIG_HM_MPS = 0x00000800, + FW_CAPS_CONFIG_HM_XGMAC = 0x00001000, + FW_CAPS_CONFIG_HM_CPLSWITCH = 0x00002000, + FW_CAPS_CONFIG_HM_T4DBG = 0x00004000, + FW_CAPS_CONFIG_HM_MI = 0x00008000, + FW_CAPS_CONFIG_HM_I2CM = 0x00010000, + FW_CAPS_CONFIG_HM_NCSI = 0x00020000, + FW_CAPS_CONFIG_HM_SMB = 0x00040000, + FW_CAPS_CONFIG_HM_MA = 0x00080000, + FW_CAPS_CONFIG_HM_EDRAM = 0x00100000, + FW_CAPS_CONFIG_HM_PMU = 0x00200000, + FW_CAPS_CONFIG_HM_UART = 0x00400000, + FW_CAPS_CONFIG_HM_SF = 0x00800000, +}; + +enum fw_caps_config_nbm { + FW_CAPS_CONFIG_NBM_IPMI = 0x00000001, + FW_CAPS_CONFIG_NBM_NCSI = 0x00000002, +}; + +enum fw_caps_config_link { + FW_CAPS_CONFIG_LINK_PPP = 0x00000001, + FW_CAPS_CONFIG_LINK_QFC = 0x00000002, + FW_CAPS_CONFIG_LINK_DCBX = 0x00000004, +}; + +enum fw_caps_config_switch { + FW_CAPS_CONFIG_SWITCH_INGRESS = 0x00000001, + FW_CAPS_CONFIG_SWITCH_EGRESS = 0x00000002, +}; + +enum fw_caps_config_nic { + FW_CAPS_CONFIG_NIC = 0x00000001, + FW_CAPS_CONFIG_NIC_VM = 0x00000002, +}; + +enum fw_caps_config_ofld { + FW_CAPS_CONFIG_OFLD = 0x00000001, +}; + +enum fw_caps_config_rdma { + FW_CAPS_CONFIG_RDMA_RDDP = 0x00000001, + FW_CAPS_CONFIG_RDMA_RDMAC = 0x00000002, +}; + +enum fw_caps_config_iscsi { + FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU = 0x00000001, + FW_CAPS_CONFIG_ISCSI_TARGET_PDU = 0x00000002, + FW_CAPS_CONFIG_ISCSI_INITIATOR_CNXOFLD = 0x00000004, + FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008, +}; + +enum fw_caps_config_fcoe { + FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001, + FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002, + FW_CAPS_CONFIG_FCOE_CTRL_OFLD = 0x00000004, +}; + +enum fw_memtype_cf { + FW_MEMTYPE_CF_EDC0 = 0x0, + FW_MEMTYPE_CF_EDC1 = 0x1, + FW_MEMTYPE_CF_EXTMEM = 0x2, + FW_MEMTYPE_CF_FLASH = 0x4, + FW_MEMTYPE_CF_INTERNAL = 0x5, + FW_MEMTYPE_CF_EXTMEM1 = 0x6, +}; + +struct fw_caps_config_cmd { + __be32 op_to_write; + __be32 cfvalid_to_len16; + __be32 r2; + __be32 hwmbitmap; + __be16 nbmcaps; + __be16 linkcaps; + __be16 switchcaps; + __be16 r3; + __be16 niccaps; + __be16 ofldcaps; + __be16 rdmacaps; + __be16 r4; + __be16 iscsicaps; + __be16 fcoecaps; + __be32 cfcsum; + __be32 finiver; + __be32 finicsum; +}; + +#define FW_CAPS_CONFIG_CMD_CFVALID_S 27 +#define FW_CAPS_CONFIG_CMD_CFVALID_V(x) ((x) << FW_CAPS_CONFIG_CMD_CFVALID_S) +#define FW_CAPS_CONFIG_CMD_CFVALID_F FW_CAPS_CONFIG_CMD_CFVALID_V(1U) + +#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF_S 24 +#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(x) \ + ((x) << FW_CAPS_CONFIG_CMD_MEMTYPE_CF_S) + +#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_S 16 +#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(x) \ + ((x) << FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_S) + +/* + * params command mnemonics + */ +enum fw_params_mnem { + FW_PARAMS_MNEM_DEV = 1, /* device params */ + FW_PARAMS_MNEM_PFVF = 2, /* function params */ + FW_PARAMS_MNEM_REG = 3, /* limited register access */ + FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ + FW_PARAMS_MNEM_CHNET = 5, /* chnet params */ + FW_PARAMS_MNEM_LAST +}; + +/* + * device parameters + */ +enum fw_params_param_dev { + FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */ + FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */ + FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs + * allocated by the device's + * Lookup Engine + */ + FW_PARAMS_PARAM_DEV_FLOWC_BUFFIFO_SZ = 0x03, + FW_PARAMS_PARAM_DEV_INTVER_NIC = 0x04, + FW_PARAMS_PARAM_DEV_INTVER_VNIC = 0x05, + FW_PARAMS_PARAM_DEV_INTVER_OFLD = 0x06, + FW_PARAMS_PARAM_DEV_INTVER_RI = 0x07, + FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08, + FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09, + FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A, + FW_PARAMS_PARAM_DEV_FWREV = 0x0B, + FW_PARAMS_PARAM_DEV_TPREV = 0x0C, + FW_PARAMS_PARAM_DEV_CF = 0x0D, + FW_PARAMS_PARAM_DEV_DIAG = 0x11, + FW_PARAMS_PARAM_DEV_MAXORDIRD_QP = 0x13, /* max supported QP IRD/ORD */ + FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */ + FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17, + FW_PARAMS_PARAM_DEV_FWCACHE = 0x18, +}; + +/* + * physical and virtual function parameters + */ +enum fw_params_param_pfvf { + FW_PARAMS_PARAM_PFVF_RWXCAPS = 0x00, + FW_PARAMS_PARAM_PFVF_ROUTE_START = 0x01, + FW_PARAMS_PARAM_PFVF_ROUTE_END = 0x02, + FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03, + FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04, + FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05, + FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06, + FW_PARAMS_PARAM_PFVF_SERVER_START = 0x07, + FW_PARAMS_PARAM_PFVF_SERVER_END = 0x08, + FW_PARAMS_PARAM_PFVF_TDDP_START = 0x09, + FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A, + FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B, + FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C, + FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D, + FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E, + FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F, + FW_PARAMS_PARAM_PFVF_RQ_END = 0x10, + FW_PARAMS_PARAM_PFVF_PBL_START = 0x11, + FW_PARAMS_PARAM_PFVF_PBL_END = 0x12, + FW_PARAMS_PARAM_PFVF_L2T_START = 0x13, + FW_PARAMS_PARAM_PFVF_L2T_END = 0x14, + FW_PARAMS_PARAM_PFVF_SQRQ_START = 0x15, + FW_PARAMS_PARAM_PFVF_SQRQ_END = 0x16, + FW_PARAMS_PARAM_PFVF_CQ_START = 0x17, + FW_PARAMS_PARAM_PFVF_CQ_END = 0x18, + FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20, + FW_PARAMS_PARAM_PFVF_VIID = 0x24, + FW_PARAMS_PARAM_PFVF_CPMASK = 0x25, + FW_PARAMS_PARAM_PFVF_OCQ_START = 0x26, + FW_PARAMS_PARAM_PFVF_OCQ_END = 0x27, + FW_PARAMS_PARAM_PFVF_CONM_MAP = 0x28, + FW_PARAMS_PARAM_PFVF_IQFLINT_START = 0x29, + FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A, + FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B, + FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C, + FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_START = 0x2D, + FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E, + FW_PARAMS_PARAM_PFVF_ETHOFLD_END = 0x30, + FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31 +}; + +/* + * dma queue parameters + */ +enum fw_params_param_dmaq { + FW_PARAMS_PARAM_DMAQ_IQ_DCAEN_DCACPU = 0x00, + FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01, + FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10, + FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11, + FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12, + FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13, +}; + +enum fw_params_param_dev_diag { + FW_PARAM_DEV_DIAG_TMP = 0x00, + FW_PARAM_DEV_DIAG_VDD = 0x01, +}; + +enum fw_params_param_dev_fwcache { + FW_PARAM_DEV_FWCACHE_FLUSH = 0x00, + FW_PARAM_DEV_FWCACHE_FLUSHINV = 0x01, +}; + +#define FW_PARAMS_MNEM_S 24 +#define FW_PARAMS_MNEM_V(x) ((x) << FW_PARAMS_MNEM_S) + +#define FW_PARAMS_PARAM_X_S 16 +#define FW_PARAMS_PARAM_X_V(x) ((x) << FW_PARAMS_PARAM_X_S) + +#define FW_PARAMS_PARAM_Y_S 8 +#define FW_PARAMS_PARAM_Y_M 0xffU +#define FW_PARAMS_PARAM_Y_V(x) ((x) << FW_PARAMS_PARAM_Y_S) +#define FW_PARAMS_PARAM_Y_G(x) (((x) >> FW_PARAMS_PARAM_Y_S) &\ + FW_PARAMS_PARAM_Y_M) + +#define FW_PARAMS_PARAM_Z_S 0 +#define FW_PARAMS_PARAM_Z_M 0xffu +#define FW_PARAMS_PARAM_Z_V(x) ((x) << FW_PARAMS_PARAM_Z_S) +#define FW_PARAMS_PARAM_Z_G(x) (((x) >> FW_PARAMS_PARAM_Z_S) &\ + FW_PARAMS_PARAM_Z_M) + +#define FW_PARAMS_PARAM_XYZ_S 0 +#define FW_PARAMS_PARAM_XYZ_V(x) ((x) << FW_PARAMS_PARAM_XYZ_S) + +#define FW_PARAMS_PARAM_YZ_S 0 +#define FW_PARAMS_PARAM_YZ_V(x) ((x) << FW_PARAMS_PARAM_YZ_S) + +struct fw_params_cmd { + __be32 op_to_vfn; + __be32 retval_len16; + struct fw_params_param { + __be32 mnem; + __be32 val; + } param[7]; +}; + +#define FW_PARAMS_CMD_PFN_S 8 +#define FW_PARAMS_CMD_PFN_V(x) ((x) << FW_PARAMS_CMD_PFN_S) + +#define FW_PARAMS_CMD_VFN_S 0 +#define FW_PARAMS_CMD_VFN_V(x) ((x) << FW_PARAMS_CMD_VFN_S) + +struct fw_pfvf_cmd { + __be32 op_to_vfn; + __be32 retval_len16; + __be32 niqflint_niq; + __be32 type_to_neq; + __be32 tc_to_nexactf; + __be32 r_caps_to_nethctrl; + __be16 nricq; + __be16 nriqp; + __be32 r4; +}; + +#define FW_PFVF_CMD_PFN_S 8 +#define FW_PFVF_CMD_PFN_V(x) ((x) << FW_PFVF_CMD_PFN_S) + +#define FW_PFVF_CMD_VFN_S 0 +#define FW_PFVF_CMD_VFN_V(x) ((x) << FW_PFVF_CMD_VFN_S) + +#define FW_PFVF_CMD_NIQFLINT_S 20 +#define FW_PFVF_CMD_NIQFLINT_M 0xfff +#define FW_PFVF_CMD_NIQFLINT_V(x) ((x) << FW_PFVF_CMD_NIQFLINT_S) +#define FW_PFVF_CMD_NIQFLINT_G(x) \ + (((x) >> FW_PFVF_CMD_NIQFLINT_S) & FW_PFVF_CMD_NIQFLINT_M) + +#define FW_PFVF_CMD_NIQ_S 0 +#define FW_PFVF_CMD_NIQ_M 0xfffff +#define FW_PFVF_CMD_NIQ_V(x) ((x) << FW_PFVF_CMD_NIQ_S) +#define FW_PFVF_CMD_NIQ_G(x) \ + (((x) >> FW_PFVF_CMD_NIQ_S) & FW_PFVF_CMD_NIQ_M) + +#define FW_PFVF_CMD_TYPE_S 31 +#define FW_PFVF_CMD_TYPE_M 0x1 +#define FW_PFVF_CMD_TYPE_V(x) ((x) << FW_PFVF_CMD_TYPE_S) +#define FW_PFVF_CMD_TYPE_G(x) \ + (((x) >> FW_PFVF_CMD_TYPE_S) & FW_PFVF_CMD_TYPE_M) +#define FW_PFVF_CMD_TYPE_F FW_PFVF_CMD_TYPE_V(1U) + +#define FW_PFVF_CMD_CMASK_S 24 +#define FW_PFVF_CMD_CMASK_M 0xf +#define FW_PFVF_CMD_CMASK_V(x) ((x) << FW_PFVF_CMD_CMASK_S) +#define FW_PFVF_CMD_CMASK_G(x) \ + (((x) >> FW_PFVF_CMD_CMASK_S) & FW_PFVF_CMD_CMASK_M) + +#define FW_PFVF_CMD_PMASK_S 20 +#define FW_PFVF_CMD_PMASK_M 0xf +#define FW_PFVF_CMD_PMASK_V(x) ((x) << FW_PFVF_CMD_PMASK_S) +#define FW_PFVF_CMD_PMASK_G(x) \ + (((x) >> FW_PFVF_CMD_PMASK_S) & FW_PFVF_CMD_PMASK_M) + +#define FW_PFVF_CMD_NEQ_S 0 +#define FW_PFVF_CMD_NEQ_M 0xfffff +#define FW_PFVF_CMD_NEQ_V(x) ((x) << FW_PFVF_CMD_NEQ_S) +#define FW_PFVF_CMD_NEQ_G(x) \ + (((x) >> FW_PFVF_CMD_NEQ_S) & FW_PFVF_CMD_NEQ_M) + +#define FW_PFVF_CMD_TC_S 24 +#define FW_PFVF_CMD_TC_M 0xff +#define FW_PFVF_CMD_TC_V(x) ((x) << FW_PFVF_CMD_TC_S) +#define FW_PFVF_CMD_TC_G(x) (((x) >> FW_PFVF_CMD_TC_S) & FW_PFVF_CMD_TC_M) + +#define FW_PFVF_CMD_NVI_S 16 +#define FW_PFVF_CMD_NVI_M 0xff +#define FW_PFVF_CMD_NVI_V(x) ((x) << FW_PFVF_CMD_NVI_S) +#define FW_PFVF_CMD_NVI_G(x) (((x) >> FW_PFVF_CMD_NVI_S) & FW_PFVF_CMD_NVI_M) + +#define FW_PFVF_CMD_NEXACTF_S 0 +#define FW_PFVF_CMD_NEXACTF_M 0xffff +#define FW_PFVF_CMD_NEXACTF_V(x) ((x) << FW_PFVF_CMD_NEXACTF_S) +#define FW_PFVF_CMD_NEXACTF_G(x) \ + (((x) >> FW_PFVF_CMD_NEXACTF_S) & FW_PFVF_CMD_NEXACTF_M) + +#define FW_PFVF_CMD_R_CAPS_S 24 +#define FW_PFVF_CMD_R_CAPS_M 0xff +#define FW_PFVF_CMD_R_CAPS_V(x) ((x) << FW_PFVF_CMD_R_CAPS_S) +#define FW_PFVF_CMD_R_CAPS_G(x) \ + (((x) >> FW_PFVF_CMD_R_CAPS_S) & FW_PFVF_CMD_R_CAPS_M) + +#define FW_PFVF_CMD_WX_CAPS_S 16 +#define FW_PFVF_CMD_WX_CAPS_M 0xff +#define FW_PFVF_CMD_WX_CAPS_V(x) ((x) << FW_PFVF_CMD_WX_CAPS_S) +#define FW_PFVF_CMD_WX_CAPS_G(x) \ + (((x) >> FW_PFVF_CMD_WX_CAPS_S) & FW_PFVF_CMD_WX_CAPS_M) + +#define FW_PFVF_CMD_NETHCTRL_S 0 +#define FW_PFVF_CMD_NETHCTRL_M 0xffff +#define FW_PFVF_CMD_NETHCTRL_V(x) ((x) << FW_PFVF_CMD_NETHCTRL_S) +#define FW_PFVF_CMD_NETHCTRL_G(x) \ + (((x) >> FW_PFVF_CMD_NETHCTRL_S) & FW_PFVF_CMD_NETHCTRL_M) + +enum fw_iq_type { + FW_IQ_TYPE_FL_INT_CAP, + FW_IQ_TYPE_NO_FL_INT_CAP +}; + +struct fw_iq_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be16 physiqid; + __be16 iqid; + __be16 fl0id; + __be16 fl1id; + __be32 type_to_iqandstindex; + __be16 iqdroprss_to_iqesize; + __be16 iqsize; + __be64 iqaddr; + __be32 iqns_to_fl0congen; + __be16 fl0dcaen_to_fl0cidxfthresh; + __be16 fl0size; + __be64 fl0addr; + __be32 fl1cngchmap_to_fl1congen; + __be16 fl1dcaen_to_fl1cidxfthresh; + __be16 fl1size; + __be64 fl1addr; +}; + +#define FW_IQ_CMD_PFN_S 8 +#define FW_IQ_CMD_PFN_V(x) ((x) << FW_IQ_CMD_PFN_S) + +#define FW_IQ_CMD_VFN_S 0 +#define FW_IQ_CMD_VFN_V(x) ((x) << FW_IQ_CMD_VFN_S) + +#define FW_IQ_CMD_ALLOC_S 31 +#define FW_IQ_CMD_ALLOC_V(x) ((x) << FW_IQ_CMD_ALLOC_S) +#define FW_IQ_CMD_ALLOC_F FW_IQ_CMD_ALLOC_V(1U) + +#define FW_IQ_CMD_FREE_S 30 +#define FW_IQ_CMD_FREE_V(x) ((x) << FW_IQ_CMD_FREE_S) +#define FW_IQ_CMD_FREE_F FW_IQ_CMD_FREE_V(1U) + +#define FW_IQ_CMD_MODIFY_S 29 +#define FW_IQ_CMD_MODIFY_V(x) ((x) << FW_IQ_CMD_MODIFY_S) +#define FW_IQ_CMD_MODIFY_F FW_IQ_CMD_MODIFY_V(1U) + +#define FW_IQ_CMD_IQSTART_S 28 +#define FW_IQ_CMD_IQSTART_V(x) ((x) << FW_IQ_CMD_IQSTART_S) +#define FW_IQ_CMD_IQSTART_F FW_IQ_CMD_IQSTART_V(1U) + +#define FW_IQ_CMD_IQSTOP_S 27 +#define FW_IQ_CMD_IQSTOP_V(x) ((x) << FW_IQ_CMD_IQSTOP_S) +#define FW_IQ_CMD_IQSTOP_F FW_IQ_CMD_IQSTOP_V(1U) + +#define FW_IQ_CMD_TYPE_S 29 +#define FW_IQ_CMD_TYPE_V(x) ((x) << FW_IQ_CMD_TYPE_S) + +#define FW_IQ_CMD_IQASYNCH_S 28 +#define FW_IQ_CMD_IQASYNCH_V(x) ((x) << FW_IQ_CMD_IQASYNCH_S) + +#define FW_IQ_CMD_VIID_S 16 +#define FW_IQ_CMD_VIID_V(x) ((x) << FW_IQ_CMD_VIID_S) + +#define FW_IQ_CMD_IQANDST_S 15 +#define FW_IQ_CMD_IQANDST_V(x) ((x) << FW_IQ_CMD_IQANDST_S) + +#define FW_IQ_CMD_IQANUS_S 14 +#define FW_IQ_CMD_IQANUS_V(x) ((x) << FW_IQ_CMD_IQANUS_S) + +#define FW_IQ_CMD_IQANUD_S 12 +#define FW_IQ_CMD_IQANUD_V(x) ((x) << FW_IQ_CMD_IQANUD_S) + +#define FW_IQ_CMD_IQANDSTINDEX_S 0 +#define FW_IQ_CMD_IQANDSTINDEX_V(x) ((x) << FW_IQ_CMD_IQANDSTINDEX_S) + +#define FW_IQ_CMD_IQDROPRSS_S 15 +#define FW_IQ_CMD_IQDROPRSS_V(x) ((x) << FW_IQ_CMD_IQDROPRSS_S) +#define FW_IQ_CMD_IQDROPRSS_F FW_IQ_CMD_IQDROPRSS_V(1U) + +#define FW_IQ_CMD_IQGTSMODE_S 14 +#define FW_IQ_CMD_IQGTSMODE_V(x) ((x) << FW_IQ_CMD_IQGTSMODE_S) +#define FW_IQ_CMD_IQGTSMODE_F FW_IQ_CMD_IQGTSMODE_V(1U) + +#define FW_IQ_CMD_IQPCIECH_S 12 +#define FW_IQ_CMD_IQPCIECH_V(x) ((x) << FW_IQ_CMD_IQPCIECH_S) + +#define FW_IQ_CMD_IQDCAEN_S 11 +#define FW_IQ_CMD_IQDCAEN_V(x) ((x) << FW_IQ_CMD_IQDCAEN_S) + +#define FW_IQ_CMD_IQDCACPU_S 6 +#define FW_IQ_CMD_IQDCACPU_V(x) ((x) << FW_IQ_CMD_IQDCACPU_S) + +#define FW_IQ_CMD_IQINTCNTTHRESH_S 4 +#define FW_IQ_CMD_IQINTCNTTHRESH_V(x) ((x) << FW_IQ_CMD_IQINTCNTTHRESH_S) + +#define FW_IQ_CMD_IQO_S 3 +#define FW_IQ_CMD_IQO_V(x) ((x) << FW_IQ_CMD_IQO_S) +#define FW_IQ_CMD_IQO_F FW_IQ_CMD_IQO_V(1U) + +#define FW_IQ_CMD_IQCPRIO_S 2 +#define FW_IQ_CMD_IQCPRIO_V(x) ((x) << FW_IQ_CMD_IQCPRIO_S) + +#define FW_IQ_CMD_IQESIZE_S 0 +#define FW_IQ_CMD_IQESIZE_V(x) ((x) << FW_IQ_CMD_IQESIZE_S) + +#define FW_IQ_CMD_IQNS_S 31 +#define FW_IQ_CMD_IQNS_V(x) ((x) << FW_IQ_CMD_IQNS_S) + +#define FW_IQ_CMD_IQRO_S 30 +#define FW_IQ_CMD_IQRO_V(x) ((x) << FW_IQ_CMD_IQRO_S) + +#define FW_IQ_CMD_IQFLINTIQHSEN_S 28 +#define FW_IQ_CMD_IQFLINTIQHSEN_V(x) ((x) << FW_IQ_CMD_IQFLINTIQHSEN_S) + +#define FW_IQ_CMD_IQFLINTCONGEN_S 27 +#define FW_IQ_CMD_IQFLINTCONGEN_V(x) ((x) << FW_IQ_CMD_IQFLINTCONGEN_S) + +#define FW_IQ_CMD_IQFLINTISCSIC_S 26 +#define FW_IQ_CMD_IQFLINTISCSIC_V(x) ((x) << FW_IQ_CMD_IQFLINTISCSIC_S) + +#define FW_IQ_CMD_FL0CNGCHMAP_S 20 +#define FW_IQ_CMD_FL0CNGCHMAP_V(x) ((x) << FW_IQ_CMD_FL0CNGCHMAP_S) + +#define FW_IQ_CMD_FL0CACHELOCK_S 15 +#define FW_IQ_CMD_FL0CACHELOCK_V(x) ((x) << FW_IQ_CMD_FL0CACHELOCK_S) + +#define FW_IQ_CMD_FL0DBP_S 14 +#define FW_IQ_CMD_FL0DBP_V(x) ((x) << FW_IQ_CMD_FL0DBP_S) + +#define FW_IQ_CMD_FL0DATANS_S 13 +#define FW_IQ_CMD_FL0DATANS_V(x) ((x) << FW_IQ_CMD_FL0DATANS_S) + +#define FW_IQ_CMD_FL0DATARO_S 12 +#define FW_IQ_CMD_FL0DATARO_V(x) ((x) << FW_IQ_CMD_FL0DATARO_S) +#define FW_IQ_CMD_FL0DATARO_F FW_IQ_CMD_FL0DATARO_V(1U) + +#define FW_IQ_CMD_FL0CONGCIF_S 11 +#define FW_IQ_CMD_FL0CONGCIF_V(x) ((x) << FW_IQ_CMD_FL0CONGCIF_S) + +#define FW_IQ_CMD_FL0ONCHIP_S 10 +#define FW_IQ_CMD_FL0ONCHIP_V(x) ((x) << FW_IQ_CMD_FL0ONCHIP_S) + +#define FW_IQ_CMD_FL0STATUSPGNS_S 9 +#define FW_IQ_CMD_FL0STATUSPGNS_V(x) ((x) << FW_IQ_CMD_FL0STATUSPGNS_S) + +#define FW_IQ_CMD_FL0STATUSPGRO_S 8 +#define FW_IQ_CMD_FL0STATUSPGRO_V(x) ((x) << FW_IQ_CMD_FL0STATUSPGRO_S) + +#define FW_IQ_CMD_FL0FETCHNS_S 7 +#define FW_IQ_CMD_FL0FETCHNS_V(x) ((x) << FW_IQ_CMD_FL0FETCHNS_S) + +#define FW_IQ_CMD_FL0FETCHRO_S 6 +#define FW_IQ_CMD_FL0FETCHRO_V(x) ((x) << FW_IQ_CMD_FL0FETCHRO_S) +#define FW_IQ_CMD_FL0FETCHRO_F FW_IQ_CMD_FL0FETCHRO_V(1U) + +#define FW_IQ_CMD_FL0HOSTFCMODE_S 4 +#define FW_IQ_CMD_FL0HOSTFCMODE_V(x) ((x) << FW_IQ_CMD_FL0HOSTFCMODE_S) + +#define FW_IQ_CMD_FL0CPRIO_S 3 +#define FW_IQ_CMD_FL0CPRIO_V(x) ((x) << FW_IQ_CMD_FL0CPRIO_S) + +#define FW_IQ_CMD_FL0PADEN_S 2 +#define FW_IQ_CMD_FL0PADEN_V(x) ((x) << FW_IQ_CMD_FL0PADEN_S) +#define FW_IQ_CMD_FL0PADEN_F FW_IQ_CMD_FL0PADEN_V(1U) + +#define FW_IQ_CMD_FL0PACKEN_S 1 +#define FW_IQ_CMD_FL0PACKEN_V(x) ((x) << FW_IQ_CMD_FL0PACKEN_S) +#define FW_IQ_CMD_FL0PACKEN_F FW_IQ_CMD_FL0PACKEN_V(1U) + +#define FW_IQ_CMD_FL0CONGEN_S 0 +#define FW_IQ_CMD_FL0CONGEN_V(x) ((x) << FW_IQ_CMD_FL0CONGEN_S) +#define FW_IQ_CMD_FL0CONGEN_F FW_IQ_CMD_FL0CONGEN_V(1U) + +#define FW_IQ_CMD_FL0DCAEN_S 15 +#define FW_IQ_CMD_FL0DCAEN_V(x) ((x) << FW_IQ_CMD_FL0DCAEN_S) + +#define FW_IQ_CMD_FL0DCACPU_S 10 +#define FW_IQ_CMD_FL0DCACPU_V(x) ((x) << FW_IQ_CMD_FL0DCACPU_S) + +#define FW_IQ_CMD_FL0FBMIN_S 7 +#define FW_IQ_CMD_FL0FBMIN_V(x) ((x) << FW_IQ_CMD_FL0FBMIN_S) + +#define FW_IQ_CMD_FL0FBMAX_S 4 +#define FW_IQ_CMD_FL0FBMAX_V(x) ((x) << FW_IQ_CMD_FL0FBMAX_S) + +#define FW_IQ_CMD_FL0CIDXFTHRESHO_S 3 +#define FW_IQ_CMD_FL0CIDXFTHRESHO_V(x) ((x) << FW_IQ_CMD_FL0CIDXFTHRESHO_S) +#define FW_IQ_CMD_FL0CIDXFTHRESHO_F FW_IQ_CMD_FL0CIDXFTHRESHO_V(1U) + +#define FW_IQ_CMD_FL0CIDXFTHRESH_S 0 +#define FW_IQ_CMD_FL0CIDXFTHRESH_V(x) ((x) << FW_IQ_CMD_FL0CIDXFTHRESH_S) + +#define FW_IQ_CMD_FL1CNGCHMAP_S 20 +#define FW_IQ_CMD_FL1CNGCHMAP_V(x) ((x) << FW_IQ_CMD_FL1CNGCHMAP_S) + +#define FW_IQ_CMD_FL1CACHELOCK_S 15 +#define FW_IQ_CMD_FL1CACHELOCK_V(x) ((x) << FW_IQ_CMD_FL1CACHELOCK_S) + +#define FW_IQ_CMD_FL1DBP_S 14 +#define FW_IQ_CMD_FL1DBP_V(x) ((x) << FW_IQ_CMD_FL1DBP_S) + +#define FW_IQ_CMD_FL1DATANS_S 13 +#define FW_IQ_CMD_FL1DATANS_V(x) ((x) << FW_IQ_CMD_FL1DATANS_S) + +#define FW_IQ_CMD_FL1DATARO_S 12 +#define FW_IQ_CMD_FL1DATARO_V(x) ((x) << FW_IQ_CMD_FL1DATARO_S) + +#define FW_IQ_CMD_FL1CONGCIF_S 11 +#define FW_IQ_CMD_FL1CONGCIF_V(x) ((x) << FW_IQ_CMD_FL1CONGCIF_S) + +#define FW_IQ_CMD_FL1ONCHIP_S 10 +#define FW_IQ_CMD_FL1ONCHIP_V(x) ((x) << FW_IQ_CMD_FL1ONCHIP_S) + +#define FW_IQ_CMD_FL1STATUSPGNS_S 9 +#define FW_IQ_CMD_FL1STATUSPGNS_V(x) ((x) << FW_IQ_CMD_FL1STATUSPGNS_S) + +#define FW_IQ_CMD_FL1STATUSPGRO_S 8 +#define FW_IQ_CMD_FL1STATUSPGRO_V(x) ((x) << FW_IQ_CMD_FL1STATUSPGRO_S) + +#define FW_IQ_CMD_FL1FETCHNS_S 7 +#define FW_IQ_CMD_FL1FETCHNS_V(x) ((x) << FW_IQ_CMD_FL1FETCHNS_S) + +#define FW_IQ_CMD_FL1FETCHRO_S 6 +#define FW_IQ_CMD_FL1FETCHRO_V(x) ((x) << FW_IQ_CMD_FL1FETCHRO_S) + +#define FW_IQ_CMD_FL1HOSTFCMODE_S 4 +#define FW_IQ_CMD_FL1HOSTFCMODE_V(x) ((x) << FW_IQ_CMD_FL1HOSTFCMODE_S) + +#define FW_IQ_CMD_FL1CPRIO_S 3 +#define FW_IQ_CMD_FL1CPRIO_V(x) ((x) << FW_IQ_CMD_FL1CPRIO_S) + +#define FW_IQ_CMD_FL1PADEN_S 2 +#define FW_IQ_CMD_FL1PADEN_V(x) ((x) << FW_IQ_CMD_FL1PADEN_S) +#define FW_IQ_CMD_FL1PADEN_F FW_IQ_CMD_FL1PADEN_V(1U) + +#define FW_IQ_CMD_FL1PACKEN_S 1 +#define FW_IQ_CMD_FL1PACKEN_V(x) ((x) << FW_IQ_CMD_FL1PACKEN_S) +#define FW_IQ_CMD_FL1PACKEN_F FW_IQ_CMD_FL1PACKEN_V(1U) + +#define FW_IQ_CMD_FL1CONGEN_S 0 +#define FW_IQ_CMD_FL1CONGEN_V(x) ((x) << FW_IQ_CMD_FL1CONGEN_S) +#define FW_IQ_CMD_FL1CONGEN_F FW_IQ_CMD_FL1CONGEN_V(1U) + +#define FW_IQ_CMD_FL1DCAEN_S 15 +#define FW_IQ_CMD_FL1DCAEN_V(x) ((x) << FW_IQ_CMD_FL1DCAEN_S) + +#define FW_IQ_CMD_FL1DCACPU_S 10 +#define FW_IQ_CMD_FL1DCACPU_V(x) ((x) << FW_IQ_CMD_FL1DCACPU_S) + +#define FW_IQ_CMD_FL1FBMIN_S 7 +#define FW_IQ_CMD_FL1FBMIN_V(x) ((x) << FW_IQ_CMD_FL1FBMIN_S) + +#define FW_IQ_CMD_FL1FBMAX_S 4 +#define FW_IQ_CMD_FL1FBMAX_V(x) ((x) << FW_IQ_CMD_FL1FBMAX_S) + +#define FW_IQ_CMD_FL1CIDXFTHRESHO_S 3 +#define FW_IQ_CMD_FL1CIDXFTHRESHO_V(x) ((x) << FW_IQ_CMD_FL1CIDXFTHRESHO_S) +#define FW_IQ_CMD_FL1CIDXFTHRESHO_F FW_IQ_CMD_FL1CIDXFTHRESHO_V(1U) + +#define FW_IQ_CMD_FL1CIDXFTHRESH_S 0 +#define FW_IQ_CMD_FL1CIDXFTHRESH_V(x) ((x) << FW_IQ_CMD_FL1CIDXFTHRESH_S) + +struct fw_eq_eth_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 eqid_pkd; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; + __be32 viid_pkd; + __be32 r8_lo; + __be64 r9; +}; + +#define FW_EQ_ETH_CMD_PFN_S 8 +#define FW_EQ_ETH_CMD_PFN_V(x) ((x) << FW_EQ_ETH_CMD_PFN_S) + +#define FW_EQ_ETH_CMD_VFN_S 0 +#define FW_EQ_ETH_CMD_VFN_V(x) ((x) << FW_EQ_ETH_CMD_VFN_S) + +#define FW_EQ_ETH_CMD_ALLOC_S 31 +#define FW_EQ_ETH_CMD_ALLOC_V(x) ((x) << FW_EQ_ETH_CMD_ALLOC_S) +#define FW_EQ_ETH_CMD_ALLOC_F FW_EQ_ETH_CMD_ALLOC_V(1U) + +#define FW_EQ_ETH_CMD_FREE_S 30 +#define FW_EQ_ETH_CMD_FREE_V(x) ((x) << FW_EQ_ETH_CMD_FREE_S) +#define FW_EQ_ETH_CMD_FREE_F FW_EQ_ETH_CMD_FREE_V(1U) + +#define FW_EQ_ETH_CMD_MODIFY_S 29 +#define FW_EQ_ETH_CMD_MODIFY_V(x) ((x) << FW_EQ_ETH_CMD_MODIFY_S) +#define FW_EQ_ETH_CMD_MODIFY_F FW_EQ_ETH_CMD_MODIFY_V(1U) + +#define FW_EQ_ETH_CMD_EQSTART_S 28 +#define FW_EQ_ETH_CMD_EQSTART_V(x) ((x) << FW_EQ_ETH_CMD_EQSTART_S) +#define FW_EQ_ETH_CMD_EQSTART_F FW_EQ_ETH_CMD_EQSTART_V(1U) + +#define FW_EQ_ETH_CMD_EQSTOP_S 27 +#define FW_EQ_ETH_CMD_EQSTOP_V(x) ((x) << FW_EQ_ETH_CMD_EQSTOP_S) +#define FW_EQ_ETH_CMD_EQSTOP_F FW_EQ_ETH_CMD_EQSTOP_V(1U) + +#define FW_EQ_ETH_CMD_EQID_S 0 +#define FW_EQ_ETH_CMD_EQID_M 0xfffff +#define FW_EQ_ETH_CMD_EQID_V(x) ((x) << FW_EQ_ETH_CMD_EQID_S) +#define FW_EQ_ETH_CMD_EQID_G(x) \ + (((x) >> FW_EQ_ETH_CMD_EQID_S) & FW_EQ_ETH_CMD_EQID_M) + +#define FW_EQ_ETH_CMD_PHYSEQID_S 0 +#define FW_EQ_ETH_CMD_PHYSEQID_M 0xfffff +#define FW_EQ_ETH_CMD_PHYSEQID_V(x) ((x) << FW_EQ_ETH_CMD_PHYSEQID_S) +#define FW_EQ_ETH_CMD_PHYSEQID_G(x) \ + (((x) >> FW_EQ_ETH_CMD_PHYSEQID_S) & FW_EQ_ETH_CMD_PHYSEQID_M) + +#define FW_EQ_ETH_CMD_FETCHSZM_S 26 +#define FW_EQ_ETH_CMD_FETCHSZM_V(x) ((x) << FW_EQ_ETH_CMD_FETCHSZM_S) +#define FW_EQ_ETH_CMD_FETCHSZM_F FW_EQ_ETH_CMD_FETCHSZM_V(1U) + +#define FW_EQ_ETH_CMD_STATUSPGNS_S 25 +#define FW_EQ_ETH_CMD_STATUSPGNS_V(x) ((x) << FW_EQ_ETH_CMD_STATUSPGNS_S) + +#define FW_EQ_ETH_CMD_STATUSPGRO_S 24 +#define FW_EQ_ETH_CMD_STATUSPGRO_V(x) ((x) << FW_EQ_ETH_CMD_STATUSPGRO_S) + +#define FW_EQ_ETH_CMD_FETCHNS_S 23 +#define FW_EQ_ETH_CMD_FETCHNS_V(x) ((x) << FW_EQ_ETH_CMD_FETCHNS_S) + +#define FW_EQ_ETH_CMD_FETCHRO_S 22 +#define FW_EQ_ETH_CMD_FETCHRO_V(x) ((x) << FW_EQ_ETH_CMD_FETCHRO_S) + +#define FW_EQ_ETH_CMD_HOSTFCMODE_S 20 +#define FW_EQ_ETH_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_ETH_CMD_HOSTFCMODE_S) + +#define FW_EQ_ETH_CMD_CPRIO_S 19 +#define FW_EQ_ETH_CMD_CPRIO_V(x) ((x) << FW_EQ_ETH_CMD_CPRIO_S) + +#define FW_EQ_ETH_CMD_ONCHIP_S 18 +#define FW_EQ_ETH_CMD_ONCHIP_V(x) ((x) << FW_EQ_ETH_CMD_ONCHIP_S) + +#define FW_EQ_ETH_CMD_PCIECHN_S 16 +#define FW_EQ_ETH_CMD_PCIECHN_V(x) ((x) << FW_EQ_ETH_CMD_PCIECHN_S) + +#define FW_EQ_ETH_CMD_IQID_S 0 +#define FW_EQ_ETH_CMD_IQID_V(x) ((x) << FW_EQ_ETH_CMD_IQID_S) + +#define FW_EQ_ETH_CMD_DCAEN_S 31 +#define FW_EQ_ETH_CMD_DCAEN_V(x) ((x) << FW_EQ_ETH_CMD_DCAEN_S) + +#define FW_EQ_ETH_CMD_DCACPU_S 26 +#define FW_EQ_ETH_CMD_DCACPU_V(x) ((x) << FW_EQ_ETH_CMD_DCACPU_S) + +#define FW_EQ_ETH_CMD_FBMIN_S 23 +#define FW_EQ_ETH_CMD_FBMIN_V(x) ((x) << FW_EQ_ETH_CMD_FBMIN_S) + +#define FW_EQ_ETH_CMD_FBMAX_S 20 +#define FW_EQ_ETH_CMD_FBMAX_V(x) ((x) << FW_EQ_ETH_CMD_FBMAX_S) + +#define FW_EQ_ETH_CMD_CIDXFTHRESHO_S 19 +#define FW_EQ_ETH_CMD_CIDXFTHRESHO_V(x) ((x) << FW_EQ_ETH_CMD_CIDXFTHRESHO_S) + +#define FW_EQ_ETH_CMD_CIDXFTHRESH_S 16 +#define FW_EQ_ETH_CMD_CIDXFTHRESH_V(x) ((x) << FW_EQ_ETH_CMD_CIDXFTHRESH_S) + +#define FW_EQ_ETH_CMD_EQSIZE_S 0 +#define FW_EQ_ETH_CMD_EQSIZE_V(x) ((x) << FW_EQ_ETH_CMD_EQSIZE_S) + +#define FW_EQ_ETH_CMD_AUTOEQUEQE_S 30 +#define FW_EQ_ETH_CMD_AUTOEQUEQE_V(x) ((x) << FW_EQ_ETH_CMD_AUTOEQUEQE_S) +#define FW_EQ_ETH_CMD_AUTOEQUEQE_F FW_EQ_ETH_CMD_AUTOEQUEQE_V(1U) + +#define FW_EQ_ETH_CMD_VIID_S 16 +#define FW_EQ_ETH_CMD_VIID_V(x) ((x) << FW_EQ_ETH_CMD_VIID_S) + +struct fw_eq_ctrl_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 cmpliqid_eqid; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; +}; + +#define FW_EQ_CTRL_CMD_PFN_S 8 +#define FW_EQ_CTRL_CMD_PFN_V(x) ((x) << FW_EQ_CTRL_CMD_PFN_S) + +#define FW_EQ_CTRL_CMD_VFN_S 0 +#define FW_EQ_CTRL_CMD_VFN_V(x) ((x) << FW_EQ_CTRL_CMD_VFN_S) + +#define FW_EQ_CTRL_CMD_ALLOC_S 31 +#define FW_EQ_CTRL_CMD_ALLOC_V(x) ((x) << FW_EQ_CTRL_CMD_ALLOC_S) +#define FW_EQ_CTRL_CMD_ALLOC_F FW_EQ_CTRL_CMD_ALLOC_V(1U) + +#define FW_EQ_CTRL_CMD_FREE_S 30 +#define FW_EQ_CTRL_CMD_FREE_V(x) ((x) << FW_EQ_CTRL_CMD_FREE_S) +#define FW_EQ_CTRL_CMD_FREE_F FW_EQ_CTRL_CMD_FREE_V(1U) + +#define FW_EQ_CTRL_CMD_MODIFY_S 29 +#define FW_EQ_CTRL_CMD_MODIFY_V(x) ((x) << FW_EQ_CTRL_CMD_MODIFY_S) +#define FW_EQ_CTRL_CMD_MODIFY_F FW_EQ_CTRL_CMD_MODIFY_V(1U) + +#define FW_EQ_CTRL_CMD_EQSTART_S 28 +#define FW_EQ_CTRL_CMD_EQSTART_V(x) ((x) << FW_EQ_CTRL_CMD_EQSTART_S) +#define FW_EQ_CTRL_CMD_EQSTART_F FW_EQ_CTRL_CMD_EQSTART_V(1U) + +#define FW_EQ_CTRL_CMD_EQSTOP_S 27 +#define FW_EQ_CTRL_CMD_EQSTOP_V(x) ((x) << FW_EQ_CTRL_CMD_EQSTOP_S) +#define FW_EQ_CTRL_CMD_EQSTOP_F FW_EQ_CTRL_CMD_EQSTOP_V(1U) + +#define FW_EQ_CTRL_CMD_CMPLIQID_S 20 +#define FW_EQ_CTRL_CMD_CMPLIQID_V(x) ((x) << FW_EQ_CTRL_CMD_CMPLIQID_S) + +#define FW_EQ_CTRL_CMD_EQID_S 0 +#define FW_EQ_CTRL_CMD_EQID_M 0xfffff +#define FW_EQ_CTRL_CMD_EQID_V(x) ((x) << FW_EQ_CTRL_CMD_EQID_S) +#define FW_EQ_CTRL_CMD_EQID_G(x) \ + (((x) >> FW_EQ_CTRL_CMD_EQID_S) & FW_EQ_CTRL_CMD_EQID_M) + +#define FW_EQ_CTRL_CMD_PHYSEQID_S 0 +#define FW_EQ_CTRL_CMD_PHYSEQID_M 0xfffff +#define FW_EQ_CTRL_CMD_PHYSEQID_G(x) \ + (((x) >> FW_EQ_CTRL_CMD_PHYSEQID_S) & FW_EQ_CTRL_CMD_PHYSEQID_M) + +#define FW_EQ_CTRL_CMD_FETCHSZM_S 26 +#define FW_EQ_CTRL_CMD_FETCHSZM_V(x) ((x) << FW_EQ_CTRL_CMD_FETCHSZM_S) +#define FW_EQ_CTRL_CMD_FETCHSZM_F FW_EQ_CTRL_CMD_FETCHSZM_V(1U) + +#define FW_EQ_CTRL_CMD_STATUSPGNS_S 25 +#define FW_EQ_CTRL_CMD_STATUSPGNS_V(x) ((x) << FW_EQ_CTRL_CMD_STATUSPGNS_S) +#define FW_EQ_CTRL_CMD_STATUSPGNS_F FW_EQ_CTRL_CMD_STATUSPGNS_V(1U) + +#define FW_EQ_CTRL_CMD_STATUSPGRO_S 24 +#define FW_EQ_CTRL_CMD_STATUSPGRO_V(x) ((x) << FW_EQ_CTRL_CMD_STATUSPGRO_S) +#define FW_EQ_CTRL_CMD_STATUSPGRO_F FW_EQ_CTRL_CMD_STATUSPGRO_V(1U) + +#define FW_EQ_CTRL_CMD_FETCHNS_S 23 +#define FW_EQ_CTRL_CMD_FETCHNS_V(x) ((x) << FW_EQ_CTRL_CMD_FETCHNS_S) +#define FW_EQ_CTRL_CMD_FETCHNS_F FW_EQ_CTRL_CMD_FETCHNS_V(1U) + +#define FW_EQ_CTRL_CMD_FETCHRO_S 22 +#define FW_EQ_CTRL_CMD_FETCHRO_V(x) ((x) << FW_EQ_CTRL_CMD_FETCHRO_S) +#define FW_EQ_CTRL_CMD_FETCHRO_F FW_EQ_CTRL_CMD_FETCHRO_V(1U) + +#define FW_EQ_CTRL_CMD_HOSTFCMODE_S 20 +#define FW_EQ_CTRL_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_CTRL_CMD_HOSTFCMODE_S) + +#define FW_EQ_CTRL_CMD_CPRIO_S 19 +#define FW_EQ_CTRL_CMD_CPRIO_V(x) ((x) << FW_EQ_CTRL_CMD_CPRIO_S) + +#define FW_EQ_CTRL_CMD_ONCHIP_S 18 +#define FW_EQ_CTRL_CMD_ONCHIP_V(x) ((x) << FW_EQ_CTRL_CMD_ONCHIP_S) + +#define FW_EQ_CTRL_CMD_PCIECHN_S 16 +#define FW_EQ_CTRL_CMD_PCIECHN_V(x) ((x) << FW_EQ_CTRL_CMD_PCIECHN_S) + +#define FW_EQ_CTRL_CMD_IQID_S 0 +#define FW_EQ_CTRL_CMD_IQID_V(x) ((x) << FW_EQ_CTRL_CMD_IQID_S) + +#define FW_EQ_CTRL_CMD_DCAEN_S 31 +#define FW_EQ_CTRL_CMD_DCAEN_V(x) ((x) << FW_EQ_CTRL_CMD_DCAEN_S) + +#define FW_EQ_CTRL_CMD_DCACPU_S 26 +#define FW_EQ_CTRL_CMD_DCACPU_V(x) ((x) << FW_EQ_CTRL_CMD_DCACPU_S) + +#define FW_EQ_CTRL_CMD_FBMIN_S 23 +#define FW_EQ_CTRL_CMD_FBMIN_V(x) ((x) << FW_EQ_CTRL_CMD_FBMIN_S) + +#define FW_EQ_CTRL_CMD_FBMAX_S 20 +#define FW_EQ_CTRL_CMD_FBMAX_V(x) ((x) << FW_EQ_CTRL_CMD_FBMAX_S) + +#define FW_EQ_CTRL_CMD_CIDXFTHRESHO_S 19 +#define FW_EQ_CTRL_CMD_CIDXFTHRESHO_V(x) \ + ((x) << FW_EQ_CTRL_CMD_CIDXFTHRESHO_S) + +#define FW_EQ_CTRL_CMD_CIDXFTHRESH_S 16 +#define FW_EQ_CTRL_CMD_CIDXFTHRESH_V(x) ((x) << FW_EQ_CTRL_CMD_CIDXFTHRESH_S) + +#define FW_EQ_CTRL_CMD_EQSIZE_S 0 +#define FW_EQ_CTRL_CMD_EQSIZE_V(x) ((x) << FW_EQ_CTRL_CMD_EQSIZE_S) + +struct fw_eq_ofld_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 eqid_pkd; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; +}; + +#define FW_EQ_OFLD_CMD_PFN_S 8 +#define FW_EQ_OFLD_CMD_PFN_V(x) ((x) << FW_EQ_OFLD_CMD_PFN_S) + +#define FW_EQ_OFLD_CMD_VFN_S 0 +#define FW_EQ_OFLD_CMD_VFN_V(x) ((x) << FW_EQ_OFLD_CMD_VFN_S) + +#define FW_EQ_OFLD_CMD_ALLOC_S 31 +#define FW_EQ_OFLD_CMD_ALLOC_V(x) ((x) << FW_EQ_OFLD_CMD_ALLOC_S) +#define FW_EQ_OFLD_CMD_ALLOC_F FW_EQ_OFLD_CMD_ALLOC_V(1U) + +#define FW_EQ_OFLD_CMD_FREE_S 30 +#define FW_EQ_OFLD_CMD_FREE_V(x) ((x) << FW_EQ_OFLD_CMD_FREE_S) +#define FW_EQ_OFLD_CMD_FREE_F FW_EQ_OFLD_CMD_FREE_V(1U) + +#define FW_EQ_OFLD_CMD_MODIFY_S 29 +#define FW_EQ_OFLD_CMD_MODIFY_V(x) ((x) << FW_EQ_OFLD_CMD_MODIFY_S) +#define FW_EQ_OFLD_CMD_MODIFY_F FW_EQ_OFLD_CMD_MODIFY_V(1U) + +#define FW_EQ_OFLD_CMD_EQSTART_S 28 +#define FW_EQ_OFLD_CMD_EQSTART_V(x) ((x) << FW_EQ_OFLD_CMD_EQSTART_S) +#define FW_EQ_OFLD_CMD_EQSTART_F FW_EQ_OFLD_CMD_EQSTART_V(1U) + +#define FW_EQ_OFLD_CMD_EQSTOP_S 27 +#define FW_EQ_OFLD_CMD_EQSTOP_V(x) ((x) << FW_EQ_OFLD_CMD_EQSTOP_S) +#define FW_EQ_OFLD_CMD_EQSTOP_F FW_EQ_OFLD_CMD_EQSTOP_V(1U) + +#define FW_EQ_OFLD_CMD_EQID_S 0 +#define FW_EQ_OFLD_CMD_EQID_M 0xfffff +#define FW_EQ_OFLD_CMD_EQID_V(x) ((x) << FW_EQ_OFLD_CMD_EQID_S) +#define FW_EQ_OFLD_CMD_EQID_G(x) \ + (((x) >> FW_EQ_OFLD_CMD_EQID_S) & FW_EQ_OFLD_CMD_EQID_M) + +#define FW_EQ_OFLD_CMD_PHYSEQID_S 0 +#define FW_EQ_OFLD_CMD_PHYSEQID_M 0xfffff +#define FW_EQ_OFLD_CMD_PHYSEQID_G(x) \ + (((x) >> FW_EQ_OFLD_CMD_PHYSEQID_S) & FW_EQ_OFLD_CMD_PHYSEQID_M) + +#define FW_EQ_OFLD_CMD_FETCHSZM_S 26 +#define FW_EQ_OFLD_CMD_FETCHSZM_V(x) ((x) << FW_EQ_OFLD_CMD_FETCHSZM_S) + +#define FW_EQ_OFLD_CMD_STATUSPGNS_S 25 +#define FW_EQ_OFLD_CMD_STATUSPGNS_V(x) ((x) << FW_EQ_OFLD_CMD_STATUSPGNS_S) + +#define FW_EQ_OFLD_CMD_STATUSPGRO_S 24 +#define FW_EQ_OFLD_CMD_STATUSPGRO_V(x) ((x) << FW_EQ_OFLD_CMD_STATUSPGRO_S) + +#define FW_EQ_OFLD_CMD_FETCHNS_S 23 +#define FW_EQ_OFLD_CMD_FETCHNS_V(x) ((x) << FW_EQ_OFLD_CMD_FETCHNS_S) + +#define FW_EQ_OFLD_CMD_FETCHRO_S 22 +#define FW_EQ_OFLD_CMD_FETCHRO_V(x) ((x) << FW_EQ_OFLD_CMD_FETCHRO_S) +#define FW_EQ_OFLD_CMD_FETCHRO_F FW_EQ_OFLD_CMD_FETCHRO_V(1U) + +#define FW_EQ_OFLD_CMD_HOSTFCMODE_S 20 +#define FW_EQ_OFLD_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_OFLD_CMD_HOSTFCMODE_S) + +#define FW_EQ_OFLD_CMD_CPRIO_S 19 +#define FW_EQ_OFLD_CMD_CPRIO_V(x) ((x) << FW_EQ_OFLD_CMD_CPRIO_S) + +#define FW_EQ_OFLD_CMD_ONCHIP_S 18 +#define FW_EQ_OFLD_CMD_ONCHIP_V(x) ((x) << FW_EQ_OFLD_CMD_ONCHIP_S) + +#define FW_EQ_OFLD_CMD_PCIECHN_S 16 +#define FW_EQ_OFLD_CMD_PCIECHN_V(x) ((x) << FW_EQ_OFLD_CMD_PCIECHN_S) + +#define FW_EQ_OFLD_CMD_IQID_S 0 +#define FW_EQ_OFLD_CMD_IQID_V(x) ((x) << FW_EQ_OFLD_CMD_IQID_S) + +#define FW_EQ_OFLD_CMD_DCAEN_S 31 +#define FW_EQ_OFLD_CMD_DCAEN_V(x) ((x) << FW_EQ_OFLD_CMD_DCAEN_S) + +#define FW_EQ_OFLD_CMD_DCACPU_S 26 +#define FW_EQ_OFLD_CMD_DCACPU_V(x) ((x) << FW_EQ_OFLD_CMD_DCACPU_S) + +#define FW_EQ_OFLD_CMD_FBMIN_S 23 +#define FW_EQ_OFLD_CMD_FBMIN_V(x) ((x) << FW_EQ_OFLD_CMD_FBMIN_S) + +#define FW_EQ_OFLD_CMD_FBMAX_S 20 +#define FW_EQ_OFLD_CMD_FBMAX_V(x) ((x) << FW_EQ_OFLD_CMD_FBMAX_S) + +#define FW_EQ_OFLD_CMD_CIDXFTHRESHO_S 19 +#define FW_EQ_OFLD_CMD_CIDXFTHRESHO_V(x) \ + ((x) << FW_EQ_OFLD_CMD_CIDXFTHRESHO_S) + +#define FW_EQ_OFLD_CMD_CIDXFTHRESH_S 16 +#define FW_EQ_OFLD_CMD_CIDXFTHRESH_V(x) ((x) << FW_EQ_OFLD_CMD_CIDXFTHRESH_S) + +#define FW_EQ_OFLD_CMD_EQSIZE_S 0 +#define FW_EQ_OFLD_CMD_EQSIZE_V(x) ((x) << FW_EQ_OFLD_CMD_EQSIZE_S) + +/* + * Macros for VIID parsing: + * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number + */ + +#define FW_VIID_PFN_S 8 +#define FW_VIID_PFN_M 0x7 +#define FW_VIID_PFN_G(x) (((x) >> FW_VIID_PFN_S) & FW_VIID_PFN_M) + +#define FW_VIID_VIVLD_S 7 +#define FW_VIID_VIVLD_M 0x1 +#define FW_VIID_VIVLD_G(x) (((x) >> FW_VIID_VIVLD_S) & FW_VIID_VIVLD_M) + +#define FW_VIID_VIN_S 0 +#define FW_VIID_VIN_M 0x7F +#define FW_VIID_VIN_G(x) (((x) >> FW_VIID_VIN_S) & FW_VIID_VIN_M) + +struct fw_vi_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be16 type_viid; + u8 mac[6]; + u8 portid_pkd; + u8 nmac; + u8 nmac0[6]; + __be16 rsssize_pkd; + u8 nmac1[6]; + __be16 idsiiq_pkd; + u8 nmac2[6]; + __be16 idseiq_pkd; + u8 nmac3[6]; + __be64 r9; + __be64 r10; +}; + +#define FW_VI_CMD_PFN_S 8 +#define FW_VI_CMD_PFN_V(x) ((x) << FW_VI_CMD_PFN_S) + +#define FW_VI_CMD_VFN_S 0 +#define FW_VI_CMD_VFN_V(x) ((x) << FW_VI_CMD_VFN_S) + +#define FW_VI_CMD_ALLOC_S 31 +#define FW_VI_CMD_ALLOC_V(x) ((x) << FW_VI_CMD_ALLOC_S) +#define FW_VI_CMD_ALLOC_F FW_VI_CMD_ALLOC_V(1U) + +#define FW_VI_CMD_FREE_S 30 +#define FW_VI_CMD_FREE_V(x) ((x) << FW_VI_CMD_FREE_S) +#define FW_VI_CMD_FREE_F FW_VI_CMD_FREE_V(1U) + +#define FW_VI_CMD_VIID_S 0 +#define FW_VI_CMD_VIID_M 0xfff +#define FW_VI_CMD_VIID_V(x) ((x) << FW_VI_CMD_VIID_S) +#define FW_VI_CMD_VIID_G(x) (((x) >> FW_VI_CMD_VIID_S) & FW_VI_CMD_VIID_M) + +#define FW_VI_CMD_PORTID_S 4 +#define FW_VI_CMD_PORTID_M 0xf +#define FW_VI_CMD_PORTID_V(x) ((x) << FW_VI_CMD_PORTID_S) +#define FW_VI_CMD_PORTID_G(x) \ + (((x) >> FW_VI_CMD_PORTID_S) & FW_VI_CMD_PORTID_M) + +#define FW_VI_CMD_RSSSIZE_S 0 +#define FW_VI_CMD_RSSSIZE_M 0x7ff +#define FW_VI_CMD_RSSSIZE_G(x) \ + (((x) >> FW_VI_CMD_RSSSIZE_S) & FW_VI_CMD_RSSSIZE_M) + +/* Special VI_MAC command index ids */ +#define FW_VI_MAC_ADD_MAC 0x3FF +#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE +#define FW_VI_MAC_MAC_BASED_FREE 0x3FD +#define FW_CLS_TCAM_NUM_ENTRIES 336 + +enum fw_vi_mac_smac { + FW_VI_MAC_MPS_TCAM_ENTRY, + FW_VI_MAC_MPS_TCAM_ONLY, + FW_VI_MAC_SMT_ONLY, + FW_VI_MAC_SMT_AND_MPSTCAM +}; + +enum fw_vi_mac_result { + FW_VI_MAC_R_SUCCESS, + FW_VI_MAC_R_F_NONEXISTENT_NOMEM, + FW_VI_MAC_R_SMAC_FAIL, + FW_VI_MAC_R_F_ACL_CHECK +}; + +struct fw_vi_mac_cmd { + __be32 op_to_viid; + __be32 freemacs_to_len16; + union fw_vi_mac { + struct fw_vi_mac_exact { + __be16 valid_to_idx; + u8 macaddr[6]; + } exact[7]; + struct fw_vi_mac_hash { + __be64 hashvec; + } hash; + } u; +}; + +#define FW_VI_MAC_CMD_VIID_S 0 +#define FW_VI_MAC_CMD_VIID_V(x) ((x) << FW_VI_MAC_CMD_VIID_S) + +#define FW_VI_MAC_CMD_FREEMACS_S 31 +#define FW_VI_MAC_CMD_FREEMACS_V(x) ((x) << FW_VI_MAC_CMD_FREEMACS_S) + +#define FW_VI_MAC_CMD_HASHVECEN_S 23 +#define FW_VI_MAC_CMD_HASHVECEN_V(x) ((x) << FW_VI_MAC_CMD_HASHVECEN_S) +#define FW_VI_MAC_CMD_HASHVECEN_F FW_VI_MAC_CMD_HASHVECEN_V(1U) + +#define FW_VI_MAC_CMD_HASHUNIEN_S 22 +#define FW_VI_MAC_CMD_HASHUNIEN_V(x) ((x) << FW_VI_MAC_CMD_HASHUNIEN_S) + +#define FW_VI_MAC_CMD_VALID_S 15 +#define FW_VI_MAC_CMD_VALID_V(x) ((x) << FW_VI_MAC_CMD_VALID_S) +#define FW_VI_MAC_CMD_VALID_F FW_VI_MAC_CMD_VALID_V(1U) + +#define FW_VI_MAC_CMD_PRIO_S 12 +#define FW_VI_MAC_CMD_PRIO_V(x) ((x) << FW_VI_MAC_CMD_PRIO_S) + +#define FW_VI_MAC_CMD_SMAC_RESULT_S 10 +#define FW_VI_MAC_CMD_SMAC_RESULT_M 0x3 +#define FW_VI_MAC_CMD_SMAC_RESULT_V(x) ((x) << FW_VI_MAC_CMD_SMAC_RESULT_S) +#define FW_VI_MAC_CMD_SMAC_RESULT_G(x) \ + (((x) >> FW_VI_MAC_CMD_SMAC_RESULT_S) & FW_VI_MAC_CMD_SMAC_RESULT_M) + +#define FW_VI_MAC_CMD_IDX_S 0 +#define FW_VI_MAC_CMD_IDX_M 0x3ff +#define FW_VI_MAC_CMD_IDX_V(x) ((x) << FW_VI_MAC_CMD_IDX_S) +#define FW_VI_MAC_CMD_IDX_G(x) \ + (((x) >> FW_VI_MAC_CMD_IDX_S) & FW_VI_MAC_CMD_IDX_M) + +#define FW_RXMODE_MTU_NO_CHG 65535 + +struct fw_vi_rxmode_cmd { + __be32 op_to_viid; + __be32 retval_len16; + __be32 mtu_to_vlanexen; + __be32 r4_lo; +}; + +#define FW_VI_RXMODE_CMD_VIID_S 0 +#define FW_VI_RXMODE_CMD_VIID_V(x) ((x) << FW_VI_RXMODE_CMD_VIID_S) + +#define FW_VI_RXMODE_CMD_MTU_S 16 +#define FW_VI_RXMODE_CMD_MTU_M 0xffff +#define FW_VI_RXMODE_CMD_MTU_V(x) ((x) << FW_VI_RXMODE_CMD_MTU_S) + +#define FW_VI_RXMODE_CMD_PROMISCEN_S 14 +#define FW_VI_RXMODE_CMD_PROMISCEN_M 0x3 +#define FW_VI_RXMODE_CMD_PROMISCEN_V(x) ((x) << FW_VI_RXMODE_CMD_PROMISCEN_S) + +#define FW_VI_RXMODE_CMD_ALLMULTIEN_S 12 +#define FW_VI_RXMODE_CMD_ALLMULTIEN_M 0x3 +#define FW_VI_RXMODE_CMD_ALLMULTIEN_V(x) \ + ((x) << FW_VI_RXMODE_CMD_ALLMULTIEN_S) + +#define FW_VI_RXMODE_CMD_BROADCASTEN_S 10 +#define FW_VI_RXMODE_CMD_BROADCASTEN_M 0x3 +#define FW_VI_RXMODE_CMD_BROADCASTEN_V(x) \ + ((x) << FW_VI_RXMODE_CMD_BROADCASTEN_S) + +#define FW_VI_RXMODE_CMD_VLANEXEN_S 8 +#define FW_VI_RXMODE_CMD_VLANEXEN_M 0x3 +#define FW_VI_RXMODE_CMD_VLANEXEN_V(x) ((x) << FW_VI_RXMODE_CMD_VLANEXEN_S) + +struct fw_vi_enable_cmd { + __be32 op_to_viid; + __be32 ien_to_len16; + __be16 blinkdur; + __be16 r3; + __be32 r4; +}; + +#define FW_VI_ENABLE_CMD_VIID_S 0 +#define FW_VI_ENABLE_CMD_VIID_V(x) ((x) << FW_VI_ENABLE_CMD_VIID_S) + +#define FW_VI_ENABLE_CMD_IEN_S 31 +#define FW_VI_ENABLE_CMD_IEN_V(x) ((x) << FW_VI_ENABLE_CMD_IEN_S) + +#define FW_VI_ENABLE_CMD_EEN_S 30 +#define FW_VI_ENABLE_CMD_EEN_V(x) ((x) << FW_VI_ENABLE_CMD_EEN_S) + +#define FW_VI_ENABLE_CMD_LED_S 29 +#define FW_VI_ENABLE_CMD_LED_V(x) ((x) << FW_VI_ENABLE_CMD_LED_S) +#define FW_VI_ENABLE_CMD_LED_F FW_VI_ENABLE_CMD_LED_V(1U) + +#define FW_VI_ENABLE_CMD_DCB_INFO_S 28 +#define FW_VI_ENABLE_CMD_DCB_INFO_V(x) ((x) << FW_VI_ENABLE_CMD_DCB_INFO_S) + +/* VI VF stats offset definitions */ +#define VI_VF_NUM_STATS 16 +enum fw_vi_stats_vf_index { + FW_VI_VF_STAT_TX_BCAST_BYTES_IX, + FW_VI_VF_STAT_TX_BCAST_FRAMES_IX, + FW_VI_VF_STAT_TX_MCAST_BYTES_IX, + FW_VI_VF_STAT_TX_MCAST_FRAMES_IX, + FW_VI_VF_STAT_TX_UCAST_BYTES_IX, + FW_VI_VF_STAT_TX_UCAST_FRAMES_IX, + FW_VI_VF_STAT_TX_DROP_FRAMES_IX, + FW_VI_VF_STAT_TX_OFLD_BYTES_IX, + FW_VI_VF_STAT_TX_OFLD_FRAMES_IX, + FW_VI_VF_STAT_RX_BCAST_BYTES_IX, + FW_VI_VF_STAT_RX_BCAST_FRAMES_IX, + FW_VI_VF_STAT_RX_MCAST_BYTES_IX, + FW_VI_VF_STAT_RX_MCAST_FRAMES_IX, + FW_VI_VF_STAT_RX_UCAST_BYTES_IX, + FW_VI_VF_STAT_RX_UCAST_FRAMES_IX, + FW_VI_VF_STAT_RX_ERR_FRAMES_IX +}; + +/* VI PF stats offset definitions */ +#define VI_PF_NUM_STATS 17 +enum fw_vi_stats_pf_index { + FW_VI_PF_STAT_TX_BCAST_BYTES_IX, + FW_VI_PF_STAT_TX_BCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_MCAST_BYTES_IX, + FW_VI_PF_STAT_TX_MCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_UCAST_BYTES_IX, + FW_VI_PF_STAT_TX_UCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_OFLD_BYTES_IX, + FW_VI_PF_STAT_TX_OFLD_FRAMES_IX, + FW_VI_PF_STAT_RX_BYTES_IX, + FW_VI_PF_STAT_RX_FRAMES_IX, + FW_VI_PF_STAT_RX_BCAST_BYTES_IX, + FW_VI_PF_STAT_RX_BCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_MCAST_BYTES_IX, + FW_VI_PF_STAT_RX_MCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_UCAST_BYTES_IX, + FW_VI_PF_STAT_RX_UCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_ERR_FRAMES_IX +}; + +struct fw_vi_stats_cmd { + __be32 op_to_viid; + __be32 retval_len16; + union fw_vi_stats { + struct fw_vi_stats_ctl { + __be16 nstats_ix; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_vi_stats_pf { + __be64 tx_bcast_bytes; + __be64 tx_bcast_frames; + __be64 tx_mcast_bytes; + __be64 tx_mcast_frames; + __be64 tx_ucast_bytes; + __be64 tx_ucast_frames; + __be64 tx_offload_bytes; + __be64 tx_offload_frames; + __be64 rx_pf_bytes; + __be64 rx_pf_frames; + __be64 rx_bcast_bytes; + __be64 rx_bcast_frames; + __be64 rx_mcast_bytes; + __be64 rx_mcast_frames; + __be64 rx_ucast_bytes; + __be64 rx_ucast_frames; + __be64 rx_err_frames; + } pf; + struct fw_vi_stats_vf { + __be64 tx_bcast_bytes; + __be64 tx_bcast_frames; + __be64 tx_mcast_bytes; + __be64 tx_mcast_frames; + __be64 tx_ucast_bytes; + __be64 tx_ucast_frames; + __be64 tx_drop_frames; + __be64 tx_offload_bytes; + __be64 tx_offload_frames; + __be64 rx_bcast_bytes; + __be64 rx_bcast_frames; + __be64 rx_mcast_bytes; + __be64 rx_mcast_frames; + __be64 rx_ucast_bytes; + __be64 rx_ucast_frames; + __be64 rx_err_frames; + } vf; + } u; +}; + +#define FW_VI_STATS_CMD_VIID_S 0 +#define FW_VI_STATS_CMD_VIID_V(x) ((x) << FW_VI_STATS_CMD_VIID_S) + +#define FW_VI_STATS_CMD_NSTATS_S 12 +#define FW_VI_STATS_CMD_NSTATS_V(x) ((x) << FW_VI_STATS_CMD_NSTATS_S) + +#define FW_VI_STATS_CMD_IX_S 0 +#define FW_VI_STATS_CMD_IX_V(x) ((x) << FW_VI_STATS_CMD_IX_S) + +struct fw_acl_mac_cmd { + __be32 op_to_vfn; + __be32 en_to_len16; + u8 nmac; + u8 r3[7]; + __be16 r4; + u8 macaddr0[6]; + __be16 r5; + u8 macaddr1[6]; + __be16 r6; + u8 macaddr2[6]; + __be16 r7; + u8 macaddr3[6]; +}; + +#define FW_ACL_MAC_CMD_PFN_S 8 +#define FW_ACL_MAC_CMD_PFN_V(x) ((x) << FW_ACL_MAC_CMD_PFN_S) + +#define FW_ACL_MAC_CMD_VFN_S 0 +#define FW_ACL_MAC_CMD_VFN_V(x) ((x) << FW_ACL_MAC_CMD_VFN_S) + +#define FW_ACL_MAC_CMD_EN_S 31 +#define FW_ACL_MAC_CMD_EN_V(x) ((x) << FW_ACL_MAC_CMD_EN_S) + +struct fw_acl_vlan_cmd { + __be32 op_to_vfn; + __be32 en_to_len16; + u8 nvlan; + u8 dropnovlan_fm; + u8 r3_lo[6]; + __be16 vlanid[16]; +}; + +#define FW_ACL_VLAN_CMD_PFN_S 8 +#define FW_ACL_VLAN_CMD_PFN_V(x) ((x) << FW_ACL_VLAN_CMD_PFN_S) + +#define FW_ACL_VLAN_CMD_VFN_S 0 +#define FW_ACL_VLAN_CMD_VFN_V(x) ((x) << FW_ACL_VLAN_CMD_VFN_S) + +#define FW_ACL_VLAN_CMD_EN_S 31 +#define FW_ACL_VLAN_CMD_EN_V(x) ((x) << FW_ACL_VLAN_CMD_EN_S) + +#define FW_ACL_VLAN_CMD_DROPNOVLAN_S 7 +#define FW_ACL_VLAN_CMD_DROPNOVLAN_V(x) ((x) << FW_ACL_VLAN_CMD_DROPNOVLAN_S) + +#define FW_ACL_VLAN_CMD_FM_S 6 +#define FW_ACL_VLAN_CMD_FM_V(x) ((x) << FW_ACL_VLAN_CMD_FM_S) + +enum fw_port_cap { + FW_PORT_CAP_SPEED_100M = 0x0001, + FW_PORT_CAP_SPEED_1G = 0x0002, + FW_PORT_CAP_SPEED_2_5G = 0x0004, + FW_PORT_CAP_SPEED_10G = 0x0008, + FW_PORT_CAP_SPEED_40G = 0x0010, + FW_PORT_CAP_SPEED_100G = 0x0020, + FW_PORT_CAP_FC_RX = 0x0040, + FW_PORT_CAP_FC_TX = 0x0080, + FW_PORT_CAP_ANEG = 0x0100, + FW_PORT_CAP_MDI_0 = 0x0200, + FW_PORT_CAP_MDI_1 = 0x0400, + FW_PORT_CAP_BEAN = 0x0800, + FW_PORT_CAP_PMA_LPBK = 0x1000, + FW_PORT_CAP_PCS_LPBK = 0x2000, + FW_PORT_CAP_PHYXS_LPBK = 0x4000, + FW_PORT_CAP_FAR_END_LPBK = 0x8000, +}; + +enum fw_port_mdi { + FW_PORT_CAP_MDI_UNCHANGED, + FW_PORT_CAP_MDI_AUTO, + FW_PORT_CAP_MDI_F_STRAIGHT, + FW_PORT_CAP_MDI_F_CROSSOVER +}; + +#define FW_PORT_CAP_MDI_S 9 +#define FW_PORT_CAP_MDI_V(x) ((x) << FW_PORT_CAP_MDI_S) + +enum fw_port_action { + FW_PORT_ACTION_L1_CFG = 0x0001, + FW_PORT_ACTION_L2_CFG = 0x0002, + FW_PORT_ACTION_GET_PORT_INFO = 0x0003, + FW_PORT_ACTION_L2_PPP_CFG = 0x0004, + FW_PORT_ACTION_L2_DCB_CFG = 0x0005, + FW_PORT_ACTION_DCB_READ_TRANS = 0x0006, + FW_PORT_ACTION_DCB_READ_RECV = 0x0007, + FW_PORT_ACTION_DCB_READ_DET = 0x0008, + FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010, + FW_PORT_ACTION_L1_LOW_PWR_EN = 0x0011, + FW_PORT_ACTION_L2_WOL_MODE_EN = 0x0012, + FW_PORT_ACTION_LPBK_TO_NORMAL = 0x0020, + FW_PORT_ACTION_L1_LPBK = 0x0021, + FW_PORT_ACTION_L1_PMA_LPBK = 0x0022, + FW_PORT_ACTION_L1_PCS_LPBK = 0x0023, + FW_PORT_ACTION_L1_PHYXS_CSIDE_LPBK = 0x0024, + FW_PORT_ACTION_L1_PHYXS_ESIDE_LPBK = 0x0025, + FW_PORT_ACTION_PHY_RESET = 0x0040, + FW_PORT_ACTION_PMA_RESET = 0x0041, + FW_PORT_ACTION_PCS_RESET = 0x0042, + FW_PORT_ACTION_PHYXS_RESET = 0x0043, + FW_PORT_ACTION_DTEXS_REEST = 0x0044, + FW_PORT_ACTION_AN_RESET = 0x0045 +}; + +enum fw_port_l2cfg_ctlbf { + FW_PORT_L2_CTLBF_OVLAN0 = 0x01, + FW_PORT_L2_CTLBF_OVLAN1 = 0x02, + FW_PORT_L2_CTLBF_OVLAN2 = 0x04, + FW_PORT_L2_CTLBF_OVLAN3 = 0x08, + FW_PORT_L2_CTLBF_IVLAN = 0x10, + FW_PORT_L2_CTLBF_TXIPG = 0x20 +}; + +enum fw_port_dcb_versions { + FW_PORT_DCB_VER_UNKNOWN, + FW_PORT_DCB_VER_CEE1D0, + FW_PORT_DCB_VER_CEE1D01, + FW_PORT_DCB_VER_IEEE, + FW_PORT_DCB_VER_AUTO = 7 +}; + +enum fw_port_dcb_cfg { + FW_PORT_DCB_CFG_PG = 0x01, + FW_PORT_DCB_CFG_PFC = 0x02, + FW_PORT_DCB_CFG_APPL = 0x04 +}; + +enum fw_port_dcb_cfg_rc { + FW_PORT_DCB_CFG_SUCCESS = 0x0, + FW_PORT_DCB_CFG_ERROR = 0x1 +}; + +enum fw_port_dcb_type { + FW_PORT_DCB_TYPE_PGID = 0x00, + FW_PORT_DCB_TYPE_PGRATE = 0x01, + FW_PORT_DCB_TYPE_PRIORATE = 0x02, + FW_PORT_DCB_TYPE_PFC = 0x03, + FW_PORT_DCB_TYPE_APP_ID = 0x04, + FW_PORT_DCB_TYPE_CONTROL = 0x05, +}; + +enum fw_port_dcb_feature_state { + FW_PORT_DCB_FEATURE_STATE_PENDING = 0x0, + FW_PORT_DCB_FEATURE_STATE_SUCCESS = 0x1, + FW_PORT_DCB_FEATURE_STATE_ERROR = 0x2, + FW_PORT_DCB_FEATURE_STATE_TIMEOUT = 0x3, +}; + +struct fw_port_cmd { + __be32 op_to_portid; + __be32 action_to_len16; + union fw_port { + struct fw_port_l1cfg { + __be32 rcap; + __be32 r; + } l1cfg; + struct fw_port_l2cfg { + __u8 ctlbf; + __u8 ovlan3_to_ivlan0; + __be16 ivlantype; + __be16 txipg_force_pinfo; + __be16 mtu; + __be16 ovlan0mask; + __be16 ovlan0type; + __be16 ovlan1mask; + __be16 ovlan1type; + __be16 ovlan2mask; + __be16 ovlan2type; + __be16 ovlan3mask; + __be16 ovlan3type; + } l2cfg; + struct fw_port_info { + __be32 lstatus_to_modtype; + __be16 pcap; + __be16 acap; + __be16 mtu; + __u8 cbllen; + __u8 auxlinfo; + __u8 dcbxdis_pkd; + __u8 r8_lo[3]; + __be64 r9; + } info; + struct fw_port_diags { + __u8 diagop; + __u8 r[3]; + __be32 diagval; + } diags; + union fw_port_dcb { + struct fw_port_dcb_pgid { + __u8 type; + __u8 apply_pkd; + __u8 r10_lo[2]; + __be32 pgid; + __be64 r11; + } pgid; + struct fw_port_dcb_pgrate { + __u8 type; + __u8 apply_pkd; + __u8 r10_lo[5]; + __u8 num_tcs_supported; + __u8 pgrate[8]; + __u8 tsa[8]; + } pgrate; + struct fw_port_dcb_priorate { + __u8 type; + __u8 apply_pkd; + __u8 r10_lo[6]; + __u8 strict_priorate[8]; + } priorate; + struct fw_port_dcb_pfc { + __u8 type; + __u8 pfcen; + __u8 r10[5]; + __u8 max_pfc_tcs; + __be64 r11; + } pfc; + struct fw_port_app_priority { + __u8 type; + __u8 r10[2]; + __u8 idx; + __u8 user_prio_map; + __u8 sel_field; + __be16 protocolid; + __be64 r12; + } app_priority; + struct fw_port_dcb_control { + __u8 type; + __u8 all_syncd_pkd; + __be16 dcb_version_to_app_state; + __be32 r11; + __be64 r12; + } control; + } dcb; + } u; +}; + +#define FW_PORT_CMD_READ_S 22 +#define FW_PORT_CMD_READ_V(x) ((x) << FW_PORT_CMD_READ_S) +#define FW_PORT_CMD_READ_F FW_PORT_CMD_READ_V(1U) + +#define FW_PORT_CMD_PORTID_S 0 +#define FW_PORT_CMD_PORTID_M 0xf +#define FW_PORT_CMD_PORTID_V(x) ((x) << FW_PORT_CMD_PORTID_S) +#define FW_PORT_CMD_PORTID_G(x) \ + (((x) >> FW_PORT_CMD_PORTID_S) & FW_PORT_CMD_PORTID_M) + +#define FW_PORT_CMD_ACTION_S 16 +#define FW_PORT_CMD_ACTION_M 0xffff +#define FW_PORT_CMD_ACTION_V(x) ((x) << FW_PORT_CMD_ACTION_S) +#define FW_PORT_CMD_ACTION_G(x) \ + (((x) >> FW_PORT_CMD_ACTION_S) & FW_PORT_CMD_ACTION_M) + +#define FW_PORT_CMD_OVLAN3_S 7 +#define FW_PORT_CMD_OVLAN3_V(x) ((x) << FW_PORT_CMD_OVLAN3_S) + +#define FW_PORT_CMD_OVLAN2_S 6 +#define FW_PORT_CMD_OVLAN2_V(x) ((x) << FW_PORT_CMD_OVLAN2_S) + +#define FW_PORT_CMD_OVLAN1_S 5 +#define FW_PORT_CMD_OVLAN1_V(x) ((x) << FW_PORT_CMD_OVLAN1_S) + +#define FW_PORT_CMD_OVLAN0_S 4 +#define FW_PORT_CMD_OVLAN0_V(x) ((x) << FW_PORT_CMD_OVLAN0_S) + +#define FW_PORT_CMD_IVLAN0_S 3 +#define FW_PORT_CMD_IVLAN0_V(x) ((x) << FW_PORT_CMD_IVLAN0_S) + +#define FW_PORT_CMD_TXIPG_S 3 +#define FW_PORT_CMD_TXIPG_V(x) ((x) << FW_PORT_CMD_TXIPG_S) + +#define FW_PORT_CMD_LSTATUS_S 31 +#define FW_PORT_CMD_LSTATUS_M 0x1 +#define FW_PORT_CMD_LSTATUS_V(x) ((x) << FW_PORT_CMD_LSTATUS_S) +#define FW_PORT_CMD_LSTATUS_G(x) \ + (((x) >> FW_PORT_CMD_LSTATUS_S) & FW_PORT_CMD_LSTATUS_M) +#define FW_PORT_CMD_LSTATUS_F FW_PORT_CMD_LSTATUS_V(1U) + +#define FW_PORT_CMD_LSPEED_S 24 +#define FW_PORT_CMD_LSPEED_M 0x3f +#define FW_PORT_CMD_LSPEED_V(x) ((x) << FW_PORT_CMD_LSPEED_S) +#define FW_PORT_CMD_LSPEED_G(x) \ + (((x) >> FW_PORT_CMD_LSPEED_S) & FW_PORT_CMD_LSPEED_M) + +#define FW_PORT_CMD_TXPAUSE_S 23 +#define FW_PORT_CMD_TXPAUSE_V(x) ((x) << FW_PORT_CMD_TXPAUSE_S) +#define FW_PORT_CMD_TXPAUSE_F FW_PORT_CMD_TXPAUSE_V(1U) + +#define FW_PORT_CMD_RXPAUSE_S 22 +#define FW_PORT_CMD_RXPAUSE_V(x) ((x) << FW_PORT_CMD_RXPAUSE_S) +#define FW_PORT_CMD_RXPAUSE_F FW_PORT_CMD_RXPAUSE_V(1U) + +#define FW_PORT_CMD_MDIOCAP_S 21 +#define FW_PORT_CMD_MDIOCAP_V(x) ((x) << FW_PORT_CMD_MDIOCAP_S) +#define FW_PORT_CMD_MDIOCAP_F FW_PORT_CMD_MDIOCAP_V(1U) + +#define FW_PORT_CMD_MDIOADDR_S 16 +#define FW_PORT_CMD_MDIOADDR_M 0x1f +#define FW_PORT_CMD_MDIOADDR_G(x) \ + (((x) >> FW_PORT_CMD_MDIOADDR_S) & FW_PORT_CMD_MDIOADDR_M) + +#define FW_PORT_CMD_LPTXPAUSE_S 15 +#define FW_PORT_CMD_LPTXPAUSE_V(x) ((x) << FW_PORT_CMD_LPTXPAUSE_S) +#define FW_PORT_CMD_LPTXPAUSE_F FW_PORT_CMD_LPTXPAUSE_V(1U) + +#define FW_PORT_CMD_LPRXPAUSE_S 14 +#define FW_PORT_CMD_LPRXPAUSE_V(x) ((x) << FW_PORT_CMD_LPRXPAUSE_S) +#define FW_PORT_CMD_LPRXPAUSE_F FW_PORT_CMD_LPRXPAUSE_V(1U) + +#define FW_PORT_CMD_PTYPE_S 8 +#define FW_PORT_CMD_PTYPE_M 0x1f +#define FW_PORT_CMD_PTYPE_G(x) \ + (((x) >> FW_PORT_CMD_PTYPE_S) & FW_PORT_CMD_PTYPE_M) + +#define FW_PORT_CMD_MODTYPE_S 0 +#define FW_PORT_CMD_MODTYPE_M 0x1f +#define FW_PORT_CMD_MODTYPE_V(x) ((x) << FW_PORT_CMD_MODTYPE_S) +#define FW_PORT_CMD_MODTYPE_G(x) \ + (((x) >> FW_PORT_CMD_MODTYPE_S) & FW_PORT_CMD_MODTYPE_M) + +#define FW_PORT_CMD_DCBXDIS_S 7 +#define FW_PORT_CMD_DCBXDIS_V(x) ((x) << FW_PORT_CMD_DCBXDIS_S) +#define FW_PORT_CMD_DCBXDIS_F FW_PORT_CMD_DCBXDIS_V(1U) + +#define FW_PORT_CMD_APPLY_S 7 +#define FW_PORT_CMD_APPLY_V(x) ((x) << FW_PORT_CMD_APPLY_S) +#define FW_PORT_CMD_APPLY_F FW_PORT_CMD_APPLY_V(1U) + +#define FW_PORT_CMD_ALL_SYNCD_S 7 +#define FW_PORT_CMD_ALL_SYNCD_V(x) ((x) << FW_PORT_CMD_ALL_SYNCD_S) +#define FW_PORT_CMD_ALL_SYNCD_F FW_PORT_CMD_ALL_SYNCD_V(1U) + +#define FW_PORT_CMD_DCB_VERSION_S 12 +#define FW_PORT_CMD_DCB_VERSION_M 0x7 +#define FW_PORT_CMD_DCB_VERSION_G(x) \ + (((x) >> FW_PORT_CMD_DCB_VERSION_S) & FW_PORT_CMD_DCB_VERSION_M) + +enum fw_port_type { + FW_PORT_TYPE_FIBER_XFI, + FW_PORT_TYPE_FIBER_XAUI, + FW_PORT_TYPE_BT_SGMII, + FW_PORT_TYPE_BT_XFI, + FW_PORT_TYPE_BT_XAUI, + FW_PORT_TYPE_KX4, + FW_PORT_TYPE_CX4, + FW_PORT_TYPE_KX, + FW_PORT_TYPE_KR, + FW_PORT_TYPE_SFP, + FW_PORT_TYPE_BP_AP, + FW_PORT_TYPE_BP4_AP, + FW_PORT_TYPE_QSFP_10G, + FW_PORT_TYPE_QSA, + FW_PORT_TYPE_QSFP, + FW_PORT_TYPE_BP40_BA, + + FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M +}; + +enum fw_port_module_type { + FW_PORT_MOD_TYPE_NA, + FW_PORT_MOD_TYPE_LR, + FW_PORT_MOD_TYPE_SR, + FW_PORT_MOD_TYPE_ER, + FW_PORT_MOD_TYPE_TWINAX_PASSIVE, + FW_PORT_MOD_TYPE_TWINAX_ACTIVE, + FW_PORT_MOD_TYPE_LRM, + FW_PORT_MOD_TYPE_ERROR = FW_PORT_CMD_MODTYPE_M - 3, + FW_PORT_MOD_TYPE_UNKNOWN = FW_PORT_CMD_MODTYPE_M - 2, + FW_PORT_MOD_TYPE_NOTSUPPORTED = FW_PORT_CMD_MODTYPE_M - 1, + + FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_M +}; + +enum fw_port_mod_sub_type { + FW_PORT_MOD_SUB_TYPE_NA, + FW_PORT_MOD_SUB_TYPE_MV88E114X = 0x1, + FW_PORT_MOD_SUB_TYPE_TN8022 = 0x2, + FW_PORT_MOD_SUB_TYPE_AQ1202 = 0x3, + FW_PORT_MOD_SUB_TYPE_88x3120 = 0x4, + FW_PORT_MOD_SUB_TYPE_BCM84834 = 0x5, + FW_PORT_MOD_SUB_TYPE_BT_VSC8634 = 0x8, + + /* The following will never been in the VPD. They are TWINAX cable + * lengths decoded from SFP+ module i2c PROMs. These should + * almost certainly go somewhere else ... + */ + FW_PORT_MOD_SUB_TYPE_TWINAX_1 = 0x9, + FW_PORT_MOD_SUB_TYPE_TWINAX_3 = 0xA, + FW_PORT_MOD_SUB_TYPE_TWINAX_5 = 0xB, + FW_PORT_MOD_SUB_TYPE_TWINAX_7 = 0xC, +}; + +/* port stats */ +#define FW_NUM_PORT_STATS 50 +#define FW_NUM_PORT_TX_STATS 23 +#define FW_NUM_PORT_RX_STATS 27 + +enum fw_port_stats_tx_index { + FW_STAT_TX_PORT_BYTES_IX, + FW_STAT_TX_PORT_FRAMES_IX, + FW_STAT_TX_PORT_BCAST_IX, + FW_STAT_TX_PORT_MCAST_IX, + FW_STAT_TX_PORT_UCAST_IX, + FW_STAT_TX_PORT_ERROR_IX, + FW_STAT_TX_PORT_64B_IX, + FW_STAT_TX_PORT_65B_127B_IX, + FW_STAT_TX_PORT_128B_255B_IX, + FW_STAT_TX_PORT_256B_511B_IX, + FW_STAT_TX_PORT_512B_1023B_IX, + FW_STAT_TX_PORT_1024B_1518B_IX, + FW_STAT_TX_PORT_1519B_MAX_IX, + FW_STAT_TX_PORT_DROP_IX, + FW_STAT_TX_PORT_PAUSE_IX, + FW_STAT_TX_PORT_PPP0_IX, + FW_STAT_TX_PORT_PPP1_IX, + FW_STAT_TX_PORT_PPP2_IX, + FW_STAT_TX_PORT_PPP3_IX, + FW_STAT_TX_PORT_PPP4_IX, + FW_STAT_TX_PORT_PPP5_IX, + FW_STAT_TX_PORT_PPP6_IX, + FW_STAT_TX_PORT_PPP7_IX +}; + +enum fw_port_stat_rx_index { + FW_STAT_RX_PORT_BYTES_IX, + FW_STAT_RX_PORT_FRAMES_IX, + FW_STAT_RX_PORT_BCAST_IX, + FW_STAT_RX_PORT_MCAST_IX, + FW_STAT_RX_PORT_UCAST_IX, + FW_STAT_RX_PORT_MTU_ERROR_IX, + FW_STAT_RX_PORT_MTU_CRC_ERROR_IX, + FW_STAT_RX_PORT_CRC_ERROR_IX, + FW_STAT_RX_PORT_LEN_ERROR_IX, + FW_STAT_RX_PORT_SYM_ERROR_IX, + FW_STAT_RX_PORT_64B_IX, + FW_STAT_RX_PORT_65B_127B_IX, + FW_STAT_RX_PORT_128B_255B_IX, + FW_STAT_RX_PORT_256B_511B_IX, + FW_STAT_RX_PORT_512B_1023B_IX, + FW_STAT_RX_PORT_1024B_1518B_IX, + FW_STAT_RX_PORT_1519B_MAX_IX, + FW_STAT_RX_PORT_PAUSE_IX, + FW_STAT_RX_PORT_PPP0_IX, + FW_STAT_RX_PORT_PPP1_IX, + FW_STAT_RX_PORT_PPP2_IX, + FW_STAT_RX_PORT_PPP3_IX, + FW_STAT_RX_PORT_PPP4_IX, + FW_STAT_RX_PORT_PPP5_IX, + FW_STAT_RX_PORT_PPP6_IX, + FW_STAT_RX_PORT_PPP7_IX, + FW_STAT_RX_PORT_LESS_64B_IX +}; + +struct fw_port_stats_cmd { + __be32 op_to_portid; + __be32 retval_len16; + union fw_port_stats { + struct fw_port_stats_ctl { + u8 nstats_bg_bm; + u8 tx_ix; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_port_stats_all { + __be64 tx_bytes; + __be64 tx_frames; + __be64 tx_bcast; + __be64 tx_mcast; + __be64 tx_ucast; + __be64 tx_error; + __be64 tx_64b; + __be64 tx_65b_127b; + __be64 tx_128b_255b; + __be64 tx_256b_511b; + __be64 tx_512b_1023b; + __be64 tx_1024b_1518b; + __be64 tx_1519b_max; + __be64 tx_drop; + __be64 tx_pause; + __be64 tx_ppp0; + __be64 tx_ppp1; + __be64 tx_ppp2; + __be64 tx_ppp3; + __be64 tx_ppp4; + __be64 tx_ppp5; + __be64 tx_ppp6; + __be64 tx_ppp7; + __be64 rx_bytes; + __be64 rx_frames; + __be64 rx_bcast; + __be64 rx_mcast; + __be64 rx_ucast; + __be64 rx_mtu_error; + __be64 rx_mtu_crc_error; + __be64 rx_crc_error; + __be64 rx_len_error; + __be64 rx_sym_error; + __be64 rx_64b; + __be64 rx_65b_127b; + __be64 rx_128b_255b; + __be64 rx_256b_511b; + __be64 rx_512b_1023b; + __be64 rx_1024b_1518b; + __be64 rx_1519b_max; + __be64 rx_pause; + __be64 rx_ppp0; + __be64 rx_ppp1; + __be64 rx_ppp2; + __be64 rx_ppp3; + __be64 rx_ppp4; + __be64 rx_ppp5; + __be64 rx_ppp6; + __be64 rx_ppp7; + __be64 rx_less_64b; + __be64 rx_bg_drop; + __be64 rx_bg_trunc; + } all; + } u; +}; + +/* port loopback stats */ +#define FW_NUM_LB_STATS 16 +enum fw_port_lb_stats_index { + FW_STAT_LB_PORT_BYTES_IX, + FW_STAT_LB_PORT_FRAMES_IX, + FW_STAT_LB_PORT_BCAST_IX, + FW_STAT_LB_PORT_MCAST_IX, + FW_STAT_LB_PORT_UCAST_IX, + FW_STAT_LB_PORT_ERROR_IX, + FW_STAT_LB_PORT_64B_IX, + FW_STAT_LB_PORT_65B_127B_IX, + FW_STAT_LB_PORT_128B_255B_IX, + FW_STAT_LB_PORT_256B_511B_IX, + FW_STAT_LB_PORT_512B_1023B_IX, + FW_STAT_LB_PORT_1024B_1518B_IX, + FW_STAT_LB_PORT_1519B_MAX_IX, + FW_STAT_LB_PORT_DROP_FRAMES_IX +}; + +struct fw_port_lb_stats_cmd { + __be32 op_to_lbport; + __be32 retval_len16; + union fw_port_lb_stats { + struct fw_port_lb_stats_ctl { + u8 nstats_bg_bm; + u8 ix_pkd; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_port_lb_stats_all { + __be64 tx_bytes; + __be64 tx_frames; + __be64 tx_bcast; + __be64 tx_mcast; + __be64 tx_ucast; + __be64 tx_error; + __be64 tx_64b; + __be64 tx_65b_127b; + __be64 tx_128b_255b; + __be64 tx_256b_511b; + __be64 tx_512b_1023b; + __be64 tx_1024b_1518b; + __be64 tx_1519b_max; + __be64 rx_lb_drop; + __be64 rx_lb_trunc; + } all; + } u; +}; + +struct fw_rss_ind_tbl_cmd { + __be32 op_to_viid; + __be32 retval_len16; + __be16 niqid; + __be16 startidx; + __be32 r3; + __be32 iq0_to_iq2; + __be32 iq3_to_iq5; + __be32 iq6_to_iq8; + __be32 iq9_to_iq11; + __be32 iq12_to_iq14; + __be32 iq15_to_iq17; + __be32 iq18_to_iq20; + __be32 iq21_to_iq23; + __be32 iq24_to_iq26; + __be32 iq27_to_iq29; + __be32 iq30_iq31; + __be32 r15_lo; +}; + +#define FW_RSS_IND_TBL_CMD_VIID_S 0 +#define FW_RSS_IND_TBL_CMD_VIID_V(x) ((x) << FW_RSS_IND_TBL_CMD_VIID_S) + +#define FW_RSS_IND_TBL_CMD_IQ0_S 20 +#define FW_RSS_IND_TBL_CMD_IQ0_V(x) ((x) << FW_RSS_IND_TBL_CMD_IQ0_S) + +#define FW_RSS_IND_TBL_CMD_IQ1_S 10 +#define FW_RSS_IND_TBL_CMD_IQ1_V(x) ((x) << FW_RSS_IND_TBL_CMD_IQ1_S) + +#define FW_RSS_IND_TBL_CMD_IQ2_S 0 +#define FW_RSS_IND_TBL_CMD_IQ2_V(x) ((x) << FW_RSS_IND_TBL_CMD_IQ2_S) + +struct fw_rss_glb_config_cmd { + __be32 op_to_write; + __be32 retval_len16; + union fw_rss_glb_config { + struct fw_rss_glb_config_manual { + __be32 mode_pkd; + __be32 r3; + __be64 r4; + __be64 r5; + } manual; + struct fw_rss_glb_config_basicvirtual { + __be32 mode_pkd; + __be32 synmapen_to_hashtoeplitz; + __be64 r8; + __be64 r9; + } basicvirtual; + } u; +}; + +#define FW_RSS_GLB_CONFIG_CMD_MODE_S 28 +#define FW_RSS_GLB_CONFIG_CMD_MODE_M 0xf +#define FW_RSS_GLB_CONFIG_CMD_MODE_V(x) ((x) << FW_RSS_GLB_CONFIG_CMD_MODE_S) +#define FW_RSS_GLB_CONFIG_CMD_MODE_G(x) \ + (((x) >> FW_RSS_GLB_CONFIG_CMD_MODE_S) & FW_RSS_GLB_CONFIG_CMD_MODE_M) + +#define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0 +#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1 + +#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_S 8 +#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_S) +#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F \ + FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_S 7 +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_S) +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F \ + FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_S 6 +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_S) +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F \ + FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_S 5 +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_S) +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F \ + FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_S 4 +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_S) +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F \ + FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_S 3 +#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_S) +#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F \ + FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_S 2 +#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_S) +#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F \ + FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_S 1 +#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_S) +#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F \ + FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_V(1U) + +#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_S 0 +#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_V(x) \ + ((x) << FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_S) +#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F \ + FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_V(1U) + +struct fw_rss_vi_config_cmd { + __be32 op_to_viid; +#define FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << 0) + __be32 retval_len16; + union fw_rss_vi_config { + struct fw_rss_vi_config_manual { + __be64 r3; + __be64 r4; + __be64 r5; + } manual; + struct fw_rss_vi_config_basicvirtual { + __be32 r6; + __be32 defaultq_to_udpen; + __be64 r9; + __be64 r10; + } basicvirtual; + } u; +}; + +#define FW_RSS_VI_CONFIG_CMD_VIID_S 0 +#define FW_RSS_VI_CONFIG_CMD_VIID_V(x) ((x) << FW_RSS_VI_CONFIG_CMD_VIID_S) + +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S 16 +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_M 0x3ff +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(x) \ + ((x) << FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S) +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(x) \ + (((x) >> FW_RSS_VI_CONFIG_CMD_DEFAULTQ_S) & \ + FW_RSS_VI_CONFIG_CMD_DEFAULTQ_M) + +#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_S 4 +#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_V(x) \ + ((x) << FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_S) +#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F \ + FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_V(1U) + +#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_S 3 +#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_V(x) \ + ((x) << FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_S) +#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F \ + FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_V(1U) + +#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_S 2 +#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_V(x) \ + ((x) << FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_S) +#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F \ + FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_V(1U) + +#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_S 1 +#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_V(x) \ + ((x) << FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_S) +#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F \ + FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_V(1U) + +#define FW_RSS_VI_CONFIG_CMD_UDPEN_S 0 +#define FW_RSS_VI_CONFIG_CMD_UDPEN_V(x) ((x) << FW_RSS_VI_CONFIG_CMD_UDPEN_S) +#define FW_RSS_VI_CONFIG_CMD_UDPEN_F FW_RSS_VI_CONFIG_CMD_UDPEN_V(1U) + +struct fw_clip_cmd { + __be32 op_to_write; + __be32 alloc_to_len16; + __be64 ip_hi; + __be64 ip_lo; + __be32 r4[2]; +}; + +#define FW_CLIP_CMD_ALLOC_S 31 +#define FW_CLIP_CMD_ALLOC_V(x) ((x) << FW_CLIP_CMD_ALLOC_S) +#define FW_CLIP_CMD_ALLOC_F FW_CLIP_CMD_ALLOC_V(1U) + +#define FW_CLIP_CMD_FREE_S 30 +#define FW_CLIP_CMD_FREE_V(x) ((x) << FW_CLIP_CMD_FREE_S) +#define FW_CLIP_CMD_FREE_F FW_CLIP_CMD_FREE_V(1U) + +enum fw_error_type { + FW_ERROR_TYPE_EXCEPTION = 0x0, + FW_ERROR_TYPE_HWMODULE = 0x1, + FW_ERROR_TYPE_WR = 0x2, + FW_ERROR_TYPE_ACL = 0x3, +}; + +struct fw_error_cmd { + __be32 op_to_type; + __be32 len16_pkd; + union fw_error { + struct fw_error_exception { + __be32 info[6]; + } exception; + struct fw_error_hwmodule { + __be32 regaddr; + __be32 regval; + } hwmodule; + struct fw_error_wr { + __be16 cidx; + __be16 pfn_vfn; + __be32 eqid; + u8 wrhdr[16]; + } wr; + struct fw_error_acl { + __be16 cidx; + __be16 pfn_vfn; + __be32 eqid; + __be16 mv_pkd; + u8 val[6]; + __be64 r4; + } acl; + } u; +}; + +struct fw_debug_cmd { + __be32 op_type; + __be32 len16_pkd; + union fw_debug { + struct fw_debug_assert { + __be32 fcid; + __be32 line; + __be32 x; + __be32 y; + u8 filename_0_7[8]; + u8 filename_8_15[8]; + __be64 r3; + } assert; + struct fw_debug_prt { + __be16 dprtstridx; + __be16 r3[3]; + __be32 dprtstrparam0; + __be32 dprtstrparam1; + __be32 dprtstrparam2; + __be32 dprtstrparam3; + } prt; + } u; +}; + +#define FW_DEBUG_CMD_TYPE_S 0 +#define FW_DEBUG_CMD_TYPE_M 0xff +#define FW_DEBUG_CMD_TYPE_G(x) \ + (((x) >> FW_DEBUG_CMD_TYPE_S) & FW_DEBUG_CMD_TYPE_M) + +#define PCIE_FW_ERR_S 31 +#define PCIE_FW_ERR_V(x) ((x) << PCIE_FW_ERR_S) +#define PCIE_FW_ERR_F PCIE_FW_ERR_V(1U) + +#define PCIE_FW_INIT_S 30 +#define PCIE_FW_INIT_V(x) ((x) << PCIE_FW_INIT_S) +#define PCIE_FW_INIT_F PCIE_FW_INIT_V(1U) + +#define PCIE_FW_HALT_S 29 +#define PCIE_FW_HALT_V(x) ((x) << PCIE_FW_HALT_S) +#define PCIE_FW_HALT_F PCIE_FW_HALT_V(1U) + +#define PCIE_FW_EVAL_S 24 +#define PCIE_FW_EVAL_M 0x7 +#define PCIE_FW_EVAL_G(x) (((x) >> PCIE_FW_EVAL_S) & PCIE_FW_EVAL_M) + +#define PCIE_FW_MASTER_VLD_S 15 +#define PCIE_FW_MASTER_VLD_V(x) ((x) << PCIE_FW_MASTER_VLD_S) +#define PCIE_FW_MASTER_VLD_F PCIE_FW_MASTER_VLD_V(1U) + +#define PCIE_FW_MASTER_S 12 +#define PCIE_FW_MASTER_M 0x7 +#define PCIE_FW_MASTER_V(x) ((x) << PCIE_FW_MASTER_S) +#define PCIE_FW_MASTER_G(x) (((x) >> PCIE_FW_MASTER_S) & PCIE_FW_MASTER_M) + +struct fw_hdr { + u8 ver; + u8 chip; /* terminator chip type */ + __be16 len512; /* bin length in units of 512-bytes */ + __be32 fw_ver; /* firmware version */ + __be32 tp_microcode_ver; + u8 intfver_nic; + u8 intfver_vnic; + u8 intfver_ofld; + u8 intfver_ri; + u8 intfver_iscsipdu; + u8 intfver_iscsi; + u8 intfver_fcoepdu; + u8 intfver_fcoe; + __u32 reserved2; + __u32 reserved3; + __u32 reserved4; + __be32 flags; + __be32 reserved6[23]; +}; + +enum fw_hdr_chip { + FW_HDR_CHIP_T4, + FW_HDR_CHIP_T5 +}; + +#define FW_HDR_FW_VER_MAJOR_S 24 +#define FW_HDR_FW_VER_MAJOR_M 0xff +#define FW_HDR_FW_VER_MAJOR_V(x) \ + ((x) << FW_HDR_FW_VER_MAJOR_S) +#define FW_HDR_FW_VER_MAJOR_G(x) \ + (((x) >> FW_HDR_FW_VER_MAJOR_S) & FW_HDR_FW_VER_MAJOR_M) + +#define FW_HDR_FW_VER_MINOR_S 16 +#define FW_HDR_FW_VER_MINOR_M 0xff +#define FW_HDR_FW_VER_MINOR_V(x) \ + ((x) << FW_HDR_FW_VER_MINOR_S) +#define FW_HDR_FW_VER_MINOR_G(x) \ + (((x) >> FW_HDR_FW_VER_MINOR_S) & FW_HDR_FW_VER_MINOR_M) + +#define FW_HDR_FW_VER_MICRO_S 8 +#define FW_HDR_FW_VER_MICRO_M 0xff +#define FW_HDR_FW_VER_MICRO_V(x) \ + ((x) << FW_HDR_FW_VER_MICRO_S) +#define FW_HDR_FW_VER_MICRO_G(x) \ + (((x) >> FW_HDR_FW_VER_MICRO_S) & FW_HDR_FW_VER_MICRO_M) + +#define FW_HDR_FW_VER_BUILD_S 0 +#define FW_HDR_FW_VER_BUILD_M 0xff +#define FW_HDR_FW_VER_BUILD_V(x) \ + ((x) << FW_HDR_FW_VER_BUILD_S) +#define FW_HDR_FW_VER_BUILD_G(x) \ + (((x) >> FW_HDR_FW_VER_BUILD_S) & FW_HDR_FW_VER_BUILD_M) + +enum fw_hdr_intfver { + FW_HDR_INTFVER_NIC = 0x00, + FW_HDR_INTFVER_VNIC = 0x00, + FW_HDR_INTFVER_OFLD = 0x00, + FW_HDR_INTFVER_RI = 0x00, + FW_HDR_INTFVER_ISCSIPDU = 0x00, + FW_HDR_INTFVER_ISCSI = 0x00, + FW_HDR_INTFVER_FCOEPDU = 0x00, + FW_HDR_INTFVER_FCOE = 0x00, +}; + +enum fw_hdr_flags { + FW_HDR_FLAGS_RESET_HALT = 0x00000001, +}; + +/* length of the formatting string */ +#define FW_DEVLOG_FMT_LEN 192 + +/* maximum number of the formatting string parameters */ +#define FW_DEVLOG_FMT_PARAMS_NUM 8 + +/* priority levels */ +enum fw_devlog_level { + FW_DEVLOG_LEVEL_EMERG = 0x0, + FW_DEVLOG_LEVEL_CRIT = 0x1, + FW_DEVLOG_LEVEL_ERR = 0x2, + FW_DEVLOG_LEVEL_NOTICE = 0x3, + FW_DEVLOG_LEVEL_INFO = 0x4, + FW_DEVLOG_LEVEL_DEBUG = 0x5, + FW_DEVLOG_LEVEL_MAX = 0x5, +}; + +/* facilities that may send a log message */ +enum fw_devlog_facility { + FW_DEVLOG_FACILITY_CORE = 0x00, + FW_DEVLOG_FACILITY_CF = 0x01, + FW_DEVLOG_FACILITY_SCHED = 0x02, + FW_DEVLOG_FACILITY_TIMER = 0x04, + FW_DEVLOG_FACILITY_RES = 0x06, + FW_DEVLOG_FACILITY_HW = 0x08, + FW_DEVLOG_FACILITY_FLR = 0x10, + FW_DEVLOG_FACILITY_DMAQ = 0x12, + FW_DEVLOG_FACILITY_PHY = 0x14, + FW_DEVLOG_FACILITY_MAC = 0x16, + FW_DEVLOG_FACILITY_PORT = 0x18, + FW_DEVLOG_FACILITY_VI = 0x1A, + FW_DEVLOG_FACILITY_FILTER = 0x1C, + FW_DEVLOG_FACILITY_ACL = 0x1E, + FW_DEVLOG_FACILITY_TM = 0x20, + FW_DEVLOG_FACILITY_QFC = 0x22, + FW_DEVLOG_FACILITY_DCB = 0x24, + FW_DEVLOG_FACILITY_ETH = 0x26, + FW_DEVLOG_FACILITY_OFLD = 0x28, + FW_DEVLOG_FACILITY_RI = 0x2A, + FW_DEVLOG_FACILITY_ISCSI = 0x2C, + FW_DEVLOG_FACILITY_FCOE = 0x2E, + FW_DEVLOG_FACILITY_FOISCSI = 0x30, + FW_DEVLOG_FACILITY_FOFCOE = 0x32, + FW_DEVLOG_FACILITY_CHNET = 0x34, + FW_DEVLOG_FACILITY_MAX = 0x34, +}; + +/* log message format */ +struct fw_devlog_e { + __be64 timestamp; + __be32 seqno; + __be16 reserved1; + __u8 level; + __u8 facility; + __u8 fmt[FW_DEVLOG_FMT_LEN]; + __be32 params[FW_DEVLOG_FMT_PARAMS_NUM]; + __be32 reserved3[4]; +}; + +struct fw_devlog_cmd { + __be32 op_to_write; + __be32 retval_len16; + __u8 level; + __u8 r2[7]; + __be32 memtype_devlog_memaddr16_devlog; + __be32 memsize_devlog; + __be32 r3[2]; +}; + +#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_S 28 +#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_M 0xf +#define FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(x) \ + (((x) >> FW_DEVLOG_CMD_MEMTYPE_DEVLOG_S) & \ + FW_DEVLOG_CMD_MEMTYPE_DEVLOG_M) + +#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S 0 +#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M 0xfffffff +#define FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(x) \ + (((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \ + FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M) + +/* P C I E F W P F 7 R E G I S T E R */ + +/* PF7 stores the Firmware Device Log parameters which allows Host Drivers to + * access the "devlog" which needing to contact firmware. The encoding is + * mostly the same as that returned by the DEVLOG command except for the size + * which is encoded as the number of entries in multiples-1 of 128 here rather + * than the memory size as is done in the DEVLOG command. Thus, 0 means 128 + * and 15 means 2048. This of course in turn constrains the allowed values + * for the devlog size ... + */ +#define PCIE_FW_PF_DEVLOG 7 + +#define PCIE_FW_PF_DEVLOG_NENTRIES128_S 28 +#define PCIE_FW_PF_DEVLOG_NENTRIES128_M 0xf +#define PCIE_FW_PF_DEVLOG_NENTRIES128_V(x) \ + ((x) << PCIE_FW_PF_DEVLOG_NENTRIES128_S) +#define PCIE_FW_PF_DEVLOG_NENTRIES128_G(x) \ + (((x) >> PCIE_FW_PF_DEVLOG_NENTRIES128_S) & \ + PCIE_FW_PF_DEVLOG_NENTRIES128_M) + +#define PCIE_FW_PF_DEVLOG_ADDR16_S 4 +#define PCIE_FW_PF_DEVLOG_ADDR16_M 0xffffff +#define PCIE_FW_PF_DEVLOG_ADDR16_V(x) ((x) << PCIE_FW_PF_DEVLOG_ADDR16_S) +#define PCIE_FW_PF_DEVLOG_ADDR16_G(x) \ + (((x) >> PCIE_FW_PF_DEVLOG_ADDR16_S) & PCIE_FW_PF_DEVLOG_ADDR16_M) + +#define PCIE_FW_PF_DEVLOG_MEMTYPE_S 0 +#define PCIE_FW_PF_DEVLOG_MEMTYPE_M 0xf +#define PCIE_FW_PF_DEVLOG_MEMTYPE_V(x) ((x) << PCIE_FW_PF_DEVLOG_MEMTYPE_S) +#define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \ + (((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M) + +#endif /* _T4FW_INTERFACE_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h new file mode 100644 index 000000000..b9d1cbac0 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h @@ -0,0 +1,48 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4FW_VERSION_H__ +#define __T4FW_VERSION_H__ + +#define T4FW_VERSION_MAJOR 0x01 +#define T4FW_VERSION_MINOR 0x0D +#define T4FW_VERSION_MICRO 0x20 +#define T4FW_VERSION_BUILD 0x00 + +#define T5FW_VERSION_MAJOR 0x01 +#define T5FW_VERSION_MINOR 0x0D +#define T5FW_VERSION_MICRO 0x20 +#define T5FW_VERSION_BUILD 0x00 + +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/Makefile b/drivers/net/ethernet/chelsio/cxgb4vf/Makefile new file mode 100644 index 000000000..d72ee26cb --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4vf/Makefile @@ -0,0 +1,7 @@ +# +# Chelsio T4 SR-IOV Virtual Function Driver +# + +obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf.o + +cxgb4vf-objs := cxgb4vf_main.o t4vf_hw.o sge.o diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h new file mode 100644 index 000000000..6049f70e1 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -0,0 +1,552 @@ +/* + * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet + * driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * This file should not be included directly. Include t4vf_common.h instead. + */ + +#ifndef __CXGB4VF_ADAPTER_H__ +#define __CXGB4VF_ADAPTER_H__ + +#include +#include +#include +#include +#include +#include + +#include "../cxgb4/t4_hw.h" + +/* + * Constants of the implementation. + */ +enum { + MAX_NPORTS = 1, /* max # of "ports" */ + MAX_PORT_QSETS = 8, /* max # of Queue Sets / "port" */ + MAX_ETH_QSETS = MAX_NPORTS*MAX_PORT_QSETS, + + /* + * MSI-X interrupt index usage. + */ + MSIX_FW = 0, /* MSI-X index for firmware Q */ + MSIX_IQFLINT = 1, /* MSI-X index base for Ingress Qs */ + MSIX_EXTRAS = 1, + MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS, + + /* + * The maximum number of Ingress and Egress Queues is determined by + * the maximum number of "Queue Sets" which we support plus any + * ancillary queues. Each "Queue Set" requires one Ingress Queue + * for RX Packet Ingress Event notifications and two Egress Queues for + * a Free List and an Ethernet TX list. + */ + INGQ_EXTRAS = 2, /* firmware event queue and */ + /* forwarded interrupts */ + MAX_INGQ = MAX_ETH_QSETS+INGQ_EXTRAS, + MAX_EGRQ = MAX_ETH_QSETS*2, +}; + +/* + * Forward structure definition references. + */ +struct adapter; +struct sge_eth_rxq; +struct sge_rspq; + +/* + * Per-"port" information. This is really per-Virtual Interface information + * but the use of the "port" nomanclature makes it easier to go back and forth + * between the PF and VF drivers ... + */ +struct port_info { + struct adapter *adapter; /* our adapter */ + u16 viid; /* virtual interface ID */ + s16 xact_addr_filt; /* index of our MAC address filter */ + u16 rss_size; /* size of VI's RSS table slice */ + u8 pidx; /* index into adapter port[] */ + s8 mdio_addr; + u8 port_type; /* firmware port type */ + u8 mod_type; /* firmware module type */ + u8 port_id; /* physical port ID */ + u8 nqsets; /* # of "Queue Sets" */ + u8 first_qset; /* index of first "Queue Set" */ + struct link_config link_cfg; /* physical port configuration */ +}; + +/* + * Scatter Gather Engine resources for the "adapter". Our ingress and egress + * queues are organized into "Queue Sets" with one ingress and one egress + * queue per Queue Set. These Queue Sets are aportionable between the "ports" + * (Virtual Interfaces). One extra ingress queue is used to receive + * asynchronous messages from the firmware. Note that the "Queue IDs" that we + * use here are really "Relative Queue IDs" which are returned as part of the + * firmware command to allocate queues. These queue IDs are relative to the + * absolute Queue ID base of the section of the Queue ID space allocated to + * the PF/VF. + */ + +/* + * SGE free-list queue state. + */ +struct rx_sw_desc; +struct sge_fl { + unsigned int avail; /* # of available RX buffers */ + unsigned int pend_cred; /* new buffers since last FL DB ring */ + unsigned int cidx; /* consumer index */ + unsigned int pidx; /* producer index */ + unsigned long alloc_failed; /* # of buffer allocation failures */ + unsigned long large_alloc_failed; + unsigned long starving; /* # of times FL was found starving */ + + /* + * Write-once/infrequently fields. + * ------------------------------- + */ + + unsigned int cntxt_id; /* SGE relative QID for the free list */ + unsigned int abs_id; /* SGE absolute QID for the free list */ + unsigned int size; /* capacity of free list */ + struct rx_sw_desc *sdesc; /* address of SW RX descriptor ring */ + __be64 *desc; /* address of HW RX descriptor ring */ + dma_addr_t addr; /* PCI bus address of hardware ring */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ +}; + +/* + * An ingress packet gather list. + */ +struct pkt_gl { + struct page_frag frags[MAX_SKB_FRAGS]; + void *va; /* virtual address of first byte */ + unsigned int nfrags; /* # of fragments */ + unsigned int tot_len; /* total length of fragments */ +}; + +typedef int (*rspq_handler_t)(struct sge_rspq *, const __be64 *, + const struct pkt_gl *); + +/* + * State for an SGE Response Queue. + */ +struct sge_rspq { + struct napi_struct napi; /* NAPI scheduling control */ + const __be64 *cur_desc; /* current descriptor in queue */ + unsigned int cidx; /* consumer index */ + u8 gen; /* current generation bit */ + u8 next_intr_params; /* holdoff params for next interrupt */ + int offset; /* offset into current FL buffer */ + + unsigned int unhandled_irqs; /* bogus interrupts */ + + /* + * Write-once/infrequently fields. + * ------------------------------- + */ + + u8 intr_params; /* interrupt holdoff parameters */ + u8 pktcnt_idx; /* interrupt packet threshold */ + u8 idx; /* queue index within its group */ + u16 cntxt_id; /* SGE rel QID for the response Q */ + u16 abs_id; /* SGE abs QID for the response Q */ + __be64 *desc; /* address of hardware response ring */ + dma_addr_t phys_addr; /* PCI bus address of ring */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ + unsigned int iqe_len; /* entry size */ + unsigned int size; /* capcity of response Q */ + struct adapter *adapter; /* our adapter */ + struct net_device *netdev; /* associated net device */ + rspq_handler_t handler; /* the handler for this response Q */ +}; + +/* + * Ethernet queue statistics + */ +struct sge_eth_stats { + unsigned long pkts; /* # of ethernet packets */ + unsigned long lro_pkts; /* # of LRO super packets */ + unsigned long lro_merged; /* # of wire packets merged by LRO */ + unsigned long rx_cso; /* # of Rx checksum offloads */ + unsigned long vlan_ex; /* # of Rx VLAN extractions */ + unsigned long rx_drops; /* # of packets dropped due to no mem */ +}; + +/* + * State for an Ethernet Receive Queue. + */ +struct sge_eth_rxq { + struct sge_rspq rspq; /* Response Queue */ + struct sge_fl fl; /* Free List */ + struct sge_eth_stats stats; /* receive statistics */ +}; + +/* + * SGE Transmit Queue state. This contains all of the resources associated + * with the hardware status of a TX Queue which is a circular ring of hardware + * TX Descriptors. For convenience, it also contains a pointer to a parallel + * "Software Descriptor" array but we don't know anything about it here other + * than its type name. + */ +struct tx_desc { + /* + * Egress Queues are measured in units of SGE_EQ_IDXSIZE by the + * hardware: Sizes, Producer and Consumer indices, etc. + */ + __be64 flit[SGE_EQ_IDXSIZE/sizeof(__be64)]; +}; +struct tx_sw_desc; +struct sge_txq { + unsigned int in_use; /* # of in-use TX descriptors */ + unsigned int size; /* # of descriptors */ + unsigned int cidx; /* SW consumer index */ + unsigned int pidx; /* producer index */ + unsigned long stops; /* # of times queue has been stopped */ + unsigned long restarts; /* # of queue restarts */ + + /* + * Write-once/infrequently fields. + * ------------------------------- + */ + + unsigned int cntxt_id; /* SGE relative QID for the TX Q */ + unsigned int abs_id; /* SGE absolute QID for the TX Q */ + struct tx_desc *desc; /* address of HW TX descriptor ring */ + struct tx_sw_desc *sdesc; /* address of SW TX descriptor ring */ + struct sge_qstat *stat; /* queue status entry */ + dma_addr_t phys_addr; /* PCI bus address of hardware ring */ + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ +}; + +/* + * State for an Ethernet Transmit Queue. + */ +struct sge_eth_txq { + struct sge_txq q; /* SGE TX Queue */ + struct netdev_queue *txq; /* associated netdev TX queue */ + unsigned long tso; /* # of TSO requests */ + unsigned long tx_cso; /* # of TX checksum offloads */ + unsigned long vlan_ins; /* # of TX VLAN insertions */ + unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ +}; + +/* + * The complete set of Scatter/Gather Engine resources. + */ +struct sge { + /* + * Our "Queue Sets" ... + */ + struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; + struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; + + /* + * Extra ingress queues for asynchronous firmware events and + * forwarded interrupts (when in MSI mode). + */ + struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; + + struct sge_rspq intrq ____cacheline_aligned_in_smp; + spinlock_t intrq_lock; + + /* + * State for managing "starving Free Lists" -- Free Lists which have + * fallen below a certain threshold of buffers available to the + * hardware and attempts to refill them up to that threshold have + * failed. We have a regular "slow tick" timer process which will + * make periodic attempts to refill these starving Free Lists ... + */ + DECLARE_BITMAP(starving_fl, MAX_EGRQ); + struct timer_list rx_timer; + + /* + * State for cleaning up completed TX descriptors. + */ + struct timer_list tx_timer; + + /* + * Write-once/infrequently fields. + * ------------------------------- + */ + + u16 max_ethqsets; /* # of available Ethernet queue sets */ + u16 ethqsets; /* # of active Ethernet queue sets */ + u16 ethtxq_rover; /* Tx queue to clean up next */ + u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */ + u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */ + + /* Decoded Adapter Parameters. + */ + u32 fl_pg_order; /* large page allocation size */ + u32 stat_len; /* length of status page at ring end */ + u32 pktshift; /* padding between CPL & packet data */ + u32 fl_align; /* response queue message alignment */ + u32 fl_starve_thres; /* Free List starvation threshold */ + + /* + * Reverse maps from Absolute Queue IDs to associated queue pointers. + * The absolute Queue IDs are in a compact range which start at a + * [potentially large] Base Queue ID. We perform the reverse map by + * first converting the Absolute Queue ID into a Relative Queue ID by + * subtracting off the Base Queue ID and then use a Relative Queue ID + * indexed table to get the pointer to the corresponding software + * queue structure. + */ + unsigned int egr_base; + unsigned int ingr_base; + void *egr_map[MAX_EGRQ]; + struct sge_rspq *ingr_map[MAX_INGQ]; +}; + +/* + * Utility macros to convert Absolute- to Relative-Queue indices and Egress- + * and Ingress-Queues. The EQ_MAP() and IQ_MAP() macros which provide + * pointers to Ingress- and Egress-Queues can be used as both L- and R-values + */ +#define EQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->egr_base)) +#define IQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->ingr_base)) + +#define EQ_MAP(s, abs_id) ((s)->egr_map[EQ_IDX(s, abs_id)]) +#define IQ_MAP(s, abs_id) ((s)->ingr_map[IQ_IDX(s, abs_id)]) + +/* + * Macro to iterate across Queue Sets ("rxq" is a historic misnomer). + */ +#define for_each_ethrxq(sge, iter) \ + for (iter = 0; iter < (sge)->ethqsets; iter++) + +/* + * Per-"adapter" (Virtual Function) information. + */ +struct adapter { + /* PCI resources */ + void __iomem *regs; + void __iomem *bar2; + struct pci_dev *pdev; + struct device *pdev_dev; + + /* "adapter" resources */ + unsigned long registered_device_map; + unsigned long open_device_map; + unsigned long flags; + struct adapter_params params; + + /* queue and interrupt resources */ + struct { + unsigned short vec; + char desc[22]; + } msix_info[MSIX_ENTRIES]; + struct sge sge; + + /* Linux network device resources */ + struct net_device *port[MAX_NPORTS]; + const char *name; + unsigned int msg_enable; + + /* debugfs resources */ + struct dentry *debugfs_root; + + /* various locks */ + spinlock_t stats_lock; +}; + +enum { /* adapter flags */ + FULL_INIT_DONE = (1UL << 0), + USING_MSI = (1UL << 1), + USING_MSIX = (1UL << 2), + QUEUES_BOUND = (1UL << 3), +}; + +/* + * The following register read/write routine definitions are required by + * the common code. + */ + +/** + * t4_read_reg - read a HW register + * @adapter: the adapter + * @reg_addr: the register address + * + * Returns the 32-bit value of the given HW register. + */ +static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr) +{ + return readl(adapter->regs + reg_addr); +} + +/** + * t4_write_reg - write a HW register + * @adapter: the adapter + * @reg_addr: the register address + * @val: the value to write + * + * Write a 32-bit value into the given HW register. + */ +static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val) +{ + writel(val, adapter->regs + reg_addr); +} + +#ifndef readq +static inline u64 readq(const volatile void __iomem *addr) +{ + return readl(addr) + ((u64)readl(addr + 4) << 32); +} + +static inline void writeq(u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#endif + +/** + * t4_read_reg64 - read a 64-bit HW register + * @adapter: the adapter + * @reg_addr: the register address + * + * Returns the 64-bit value of the given HW register. + */ +static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr) +{ + return readq(adapter->regs + reg_addr); +} + +/** + * t4_write_reg64 - write a 64-bit HW register + * @adapter: the adapter + * @reg_addr: the register address + * @val: the value to write + * + * Write a 64-bit value into the given HW register. + */ +static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr, + u64 val) +{ + writeq(val, adapter->regs + reg_addr); +} + +/** + * port_name - return the string name of a port + * @adapter: the adapter + * @pidx: the port index + * + * Return the string name of the selected port. + */ +static inline const char *port_name(struct adapter *adapter, int pidx) +{ + return adapter->port[pidx]->name; +} + +/** + * t4_os_set_hw_addr - store a port's MAC address in SW + * @adapter: the adapter + * @pidx: the port index + * @hw_addr: the Ethernet address + * + * Store the Ethernet address of the given port in SW. Called by the common + * code when it retrieves a port's Ethernet address from EEPROM. + */ +static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx, + u8 hw_addr[]) +{ + memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN); +} + +/** + * netdev2pinfo - return the port_info structure associated with a net_device + * @dev: the netdev + * + * Return the struct port_info associated with a net_device + */ +static inline struct port_info *netdev2pinfo(const struct net_device *dev) +{ + return netdev_priv(dev); +} + +/** + * adap2pinfo - return the port_info of a port + * @adap: the adapter + * @pidx: the port index + * + * Return the port_info structure for the adapter. + */ +static inline struct port_info *adap2pinfo(struct adapter *adapter, int pidx) +{ + return netdev_priv(adapter->port[pidx]); +} + +/** + * netdev2adap - return the adapter structure associated with a net_device + * @dev: the netdev + * + * Return the struct adapter associated with a net_device + */ +static inline struct adapter *netdev2adap(const struct net_device *dev) +{ + return netdev2pinfo(dev)->adapter; +} + +/* + * OS "Callback" function declarations. These are functions that the OS code + * is "contracted" to provide for the common code. + */ +void t4vf_os_link_changed(struct adapter *, int, int); +void t4vf_os_portmod_changed(struct adapter *, int); + +/* + * SGE function prototype declarations. + */ +int t4vf_sge_alloc_rxq(struct adapter *, struct sge_rspq *, bool, + struct net_device *, int, + struct sge_fl *, rspq_handler_t); +int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *, + struct net_device *, struct netdev_queue *, + unsigned int); +void t4vf_free_sge_resources(struct adapter *); + +int t4vf_eth_xmit(struct sk_buff *, struct net_device *); +int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *, + const struct pkt_gl *); + +irq_handler_t t4vf_intr_handler(struct adapter *); +irqreturn_t t4vf_sge_intr_msix(int, void *); + +int t4vf_sge_init(struct adapter *); +void t4vf_sge_start(struct adapter *); +void t4vf_sge_stop(struct adapter *); + +#endif /* __CXGB4VF_ADAPTER_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c new file mode 100644 index 000000000..1d893b0b7 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -0,0 +1,3098 @@ +/* + * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet + * driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t4vf_common.h" +#include "t4vf_defs.h" + +#include "../cxgb4/t4_regs.h" +#include "../cxgb4/t4_msg.h" + +/* + * Generic information about the driver. + */ +#define DRV_VERSION "2.0.0-ko" +#define DRV_DESC "Chelsio T4/T5 Virtual Function (VF) Network Driver" + +/* + * Module Parameters. + * ================== + */ + +/* + * Default ethtool "message level" for adapters. + */ +#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ + NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) + +static int dflt_msg_enable = DFLT_MSG_ENABLE; + +module_param(dflt_msg_enable, int, 0644); +MODULE_PARM_DESC(dflt_msg_enable, + "default adapter ethtool message level bitmap"); + +/* + * The driver uses the best interrupt scheme available on a platform in the + * order MSI-X then MSI. This parameter determines which of these schemes the + * driver may consider as follows: + * + * msi = 2: choose from among MSI-X and MSI + * msi = 1: only consider MSI interrupts + * + * Note that unlike the Physical Function driver, this Virtual Function driver + * does _not_ support legacy INTx interrupts (this limitation is mandated by + * the PCI-E SR-IOV standard). + */ +#define MSI_MSIX 2 +#define MSI_MSI 1 +#define MSI_DEFAULT MSI_MSIX + +static int msi = MSI_DEFAULT; + +module_param(msi, int, 0644); +MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI"); + +/* + * Fundamental constants. + * ====================== + */ + +enum { + MAX_TXQ_ENTRIES = 16384, + MAX_RSPQ_ENTRIES = 16384, + MAX_RX_BUFFERS = 16384, + + MIN_TXQ_ENTRIES = 32, + MIN_RSPQ_ENTRIES = 128, + MIN_FL_ENTRIES = 16, + + /* + * For purposes of manipulating the Free List size we need to + * recognize that Free Lists are actually Egress Queues (the host + * produces free buffers which the hardware consumes), Egress Queues + * indices are all in units of Egress Context Units bytes, and free + * list entries are 64-bit PCI DMA addresses. And since the state of + * the Producer Index == the Consumer Index implies an EMPTY list, we + * always have at least one Egress Unit's worth of Free List entries + * unused. See sge.c for more details ... + */ + EQ_UNIT = SGE_EQ_IDXSIZE, + FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), + MIN_FL_RESID = FL_PER_EQ_UNIT, +}; + +/* + * Global driver state. + * ==================== + */ + +static struct dentry *cxgb4vf_debugfs_root; + +/* + * OS "Callback" functions. + * ======================== + */ + +/* + * The link status has changed on the indicated "port" (Virtual Interface). + */ +void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok) +{ + struct net_device *dev = adapter->port[pidx]; + + /* + * If the port is disabled or the current recorded "link up" + * status matches the new status, just return. + */ + if (!netif_running(dev) || link_ok == netif_carrier_ok(dev)) + return; + + /* + * Tell the OS that the link status has changed and print a short + * informative message on the console about the event. + */ + if (link_ok) { + const char *s; + const char *fc; + const struct port_info *pi = netdev_priv(dev); + + netif_carrier_on(dev); + + switch (pi->link_cfg.speed) { + case 40000: + s = "40Gbps"; + break; + + case 10000: + s = "10Gbps"; + break; + + case 1000: + s = "1000Mbps"; + break; + + case 100: + s = "100Mbps"; + break; + + default: + s = "unknown"; + break; + } + + switch (pi->link_cfg.fc) { + case PAUSE_RX: + fc = "RX"; + break; + + case PAUSE_TX: + fc = "TX"; + break; + + case PAUSE_RX|PAUSE_TX: + fc = "RX/TX"; + break; + + default: + fc = "no"; + break; + } + + netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc); + } else { + netif_carrier_off(dev); + netdev_info(dev, "link down\n"); + } +} + +/* + * THe port module type has changed on the indicated "port" (Virtual + * Interface). + */ +void t4vf_os_portmod_changed(struct adapter *adapter, int pidx) +{ + static const char * const mod_str[] = { + NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" + }; + const struct net_device *dev = adapter->port[pidx]; + const struct port_info *pi = netdev_priv(dev); + + if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) + dev_info(adapter->pdev_dev, "%s: port module unplugged\n", + dev->name); + else if (pi->mod_type < ARRAY_SIZE(mod_str)) + dev_info(adapter->pdev_dev, "%s: %s port module inserted\n", + dev->name, mod_str[pi->mod_type]); + else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) + dev_info(adapter->pdev_dev, "%s: unsupported optical port " + "module inserted\n", dev->name); + else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) + dev_info(adapter->pdev_dev, "%s: unknown port module inserted," + "forcing TWINAX\n", dev->name); + else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR) + dev_info(adapter->pdev_dev, "%s: transceiver module error\n", + dev->name); + else + dev_info(adapter->pdev_dev, "%s: unknown module type %d " + "inserted\n", dev->name, pi->mod_type); +} + +/* + * Net device operations. + * ====================== + */ + + + + +/* + * Perform the MAC and PHY actions needed to enable a "port" (Virtual + * Interface). + */ +static int link_start(struct net_device *dev) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + /* + * We do not set address filters and promiscuity here, the stack does + * that step explicitly. Enable vlan accel. + */ + ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1, + true); + if (ret == 0) { + ret = t4vf_change_mac(pi->adapter, pi->viid, + pi->xact_addr_filt, dev->dev_addr, true); + if (ret >= 0) { + pi->xact_addr_filt = ret; + ret = 0; + } + } + + /* + * We don't need to actually "start the link" itself since the + * firmware will do that for us when the first Virtual Interface + * is enabled on a port. + */ + if (ret == 0) + ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true); + return ret; +} + +/* + * Name the MSI-X interrupts. + */ +static void name_msix_vecs(struct adapter *adapter) +{ + int namelen = sizeof(adapter->msix_info[0].desc) - 1; + int pidx; + + /* + * Firmware events. + */ + snprintf(adapter->msix_info[MSIX_FW].desc, namelen, + "%s-FWeventq", adapter->name); + adapter->msix_info[MSIX_FW].desc[namelen] = 0; + + /* + * Ethernet queues. + */ + for_each_port(adapter, pidx) { + struct net_device *dev = adapter->port[pidx]; + const struct port_info *pi = netdev_priv(dev); + int qs, msi; + + for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) { + snprintf(adapter->msix_info[msi].desc, namelen, + "%s-%d", dev->name, qs); + adapter->msix_info[msi].desc[namelen] = 0; + } + } +} + +/* + * Request all of our MSI-X resources. + */ +static int request_msix_queue_irqs(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + int rxq, msi, err; + + /* + * Firmware events. + */ + err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix, + 0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq); + if (err) + return err; + + /* + * Ethernet queues. + */ + msi = MSIX_IQFLINT; + for_each_ethrxq(s, rxq) { + err = request_irq(adapter->msix_info[msi].vec, + t4vf_sge_intr_msix, 0, + adapter->msix_info[msi].desc, + &s->ethrxq[rxq].rspq); + if (err) + goto err_free_irqs; + msi++; + } + return 0; + +err_free_irqs: + while (--rxq >= 0) + free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq); + free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq); + return err; +} + +/* + * Free our MSI-X resources. + */ +static void free_msix_queue_irqs(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + int rxq, msi; + + free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq); + msi = MSIX_IQFLINT; + for_each_ethrxq(s, rxq) + free_irq(adapter->msix_info[msi++].vec, + &s->ethrxq[rxq].rspq); +} + +/* + * Turn on NAPI and start up interrupts on a response queue. + */ +static void qenable(struct sge_rspq *rspq) +{ + napi_enable(&rspq->napi); + + /* + * 0-increment the Going To Sleep register to start the timer and + * enable interrupts. + */ + t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, + CIDXINC_V(0) | + SEINTARM_V(rspq->intr_params) | + INGRESSQID_V(rspq->cntxt_id)); +} + +/* + * Enable NAPI scheduling and interrupt generation for all Receive Queues. + */ +static void enable_rx(struct adapter *adapter) +{ + int rxq; + struct sge *s = &adapter->sge; + + for_each_ethrxq(s, rxq) + qenable(&s->ethrxq[rxq].rspq); + qenable(&s->fw_evtq); + + /* + * The interrupt queue doesn't use NAPI so we do the 0-increment of + * its Going To Sleep register here to get it started. + */ + if (adapter->flags & USING_MSI) + t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, + CIDXINC_V(0) | + SEINTARM_V(s->intrq.intr_params) | + INGRESSQID_V(s->intrq.cntxt_id)); + +} + +/* + * Wait until all NAPI handlers are descheduled. + */ +static void quiesce_rx(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + int rxq; + + for_each_ethrxq(s, rxq) + napi_disable(&s->ethrxq[rxq].rspq.napi); + napi_disable(&s->fw_evtq.napi); +} + +/* + * Response queue handler for the firmware event queue. + */ +static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp, + const struct pkt_gl *gl) +{ + /* + * Extract response opcode and get pointer to CPL message body. + */ + struct adapter *adapter = rspq->adapter; + u8 opcode = ((const struct rss_header *)rsp)->opcode; + void *cpl = (void *)(rsp + 1); + + switch (opcode) { + case CPL_FW6_MSG: { + /* + * We've received an asynchronous message from the firmware. + */ + const struct cpl_fw6_msg *fw_msg = cpl; + if (fw_msg->type == FW6_TYPE_CMD_RPL) + t4vf_handle_fw_rpl(adapter, fw_msg->data); + break; + } + + case CPL_FW4_MSG: { + /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. + */ + const struct cpl_sge_egr_update *p = (void *)(rsp + 3); + opcode = CPL_OPCODE_G(ntohl(p->opcode_qid)); + if (opcode != CPL_SGE_EGR_UPDATE) { + dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" + , opcode); + break; + } + cpl = (void *)p; + /*FALLTHROUGH*/ + } + + case CPL_SGE_EGR_UPDATE: { + /* + * We've received an Egress Queue Status Update message. We + * get these, if the SGE is configured to send these when the + * firmware passes certain points in processing our TX + * Ethernet Queue or if we make an explicit request for one. + * We use these updates to determine when we may need to + * restart a TX Ethernet Queue which was stopped for lack of + * free TX Queue Descriptors ... + */ + const struct cpl_sge_egr_update *p = cpl; + unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid)); + struct sge *s = &adapter->sge; + struct sge_txq *tq; + struct sge_eth_txq *txq; + unsigned int eq_idx; + + /* + * Perform sanity checking on the Queue ID to make sure it + * really refers to one of our TX Ethernet Egress Queues which + * is active and matches the queue's ID. None of these error + * conditions should ever happen so we may want to either make + * them fatal and/or conditionalized under DEBUG. + */ + eq_idx = EQ_IDX(s, qid); + if (unlikely(eq_idx >= MAX_EGRQ)) { + dev_err(adapter->pdev_dev, + "Egress Update QID %d out of range\n", qid); + break; + } + tq = s->egr_map[eq_idx]; + if (unlikely(tq == NULL)) { + dev_err(adapter->pdev_dev, + "Egress Update QID %d TXQ=NULL\n", qid); + break; + } + txq = container_of(tq, struct sge_eth_txq, q); + if (unlikely(tq->abs_id != qid)) { + dev_err(adapter->pdev_dev, + "Egress Update QID %d refers to TXQ %d\n", + qid, tq->abs_id); + break; + } + + /* + * Restart a stopped TX Queue which has less than half of its + * TX ring in use ... + */ + txq->q.restarts++; + netif_tx_wake_queue(txq->txq); + break; + } + + default: + dev_err(adapter->pdev_dev, + "unexpected CPL %#x on FW event queue\n", opcode); + } + + return 0; +} + +/* + * Allocate SGE TX/RX response queues. Determine how many sets of SGE queues + * to use and initializes them. We support multiple "Queue Sets" per port if + * we have MSI-X, otherwise just one queue set per port. + */ +static int setup_sge_queues(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + int err, pidx, msix; + + /* + * Clear "Queue Set" Free List Starving and TX Queue Mapping Error + * state. + */ + bitmap_zero(s->starving_fl, MAX_EGRQ); + + /* + * If we're using MSI interrupt mode we need to set up a "forwarded + * interrupt" queue which we'll set up with our MSI vector. The rest + * of the ingress queues will be set up to forward their interrupts to + * this queue ... This must be first since t4vf_sge_alloc_rxq() uses + * the intrq's queue ID as the interrupt forwarding queue for the + * subsequent calls ... + */ + if (adapter->flags & USING_MSI) { + err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false, + adapter->port[0], 0, NULL, NULL); + if (err) + goto err_free_queues; + } + + /* + * Allocate our ingress queue for asynchronous firmware messages. + */ + err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0], + MSIX_FW, NULL, fwevtq_handler); + if (err) + goto err_free_queues; + + /* + * Allocate each "port"'s initial Queue Sets. These can be changed + * later on ... up to the point where any interface on the adapter is + * brought up at which point lots of things get nailed down + * permanently ... + */ + msix = MSIX_IQFLINT; + for_each_port(adapter, pidx) { + struct net_device *dev = adapter->port[pidx]; + struct port_info *pi = netdev_priv(dev); + struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset]; + struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset]; + int qs; + + for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { + err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false, + dev, msix++, + &rxq->fl, t4vf_ethrx_handler); + if (err) + goto err_free_queues; + + err = t4vf_sge_alloc_eth_txq(adapter, txq, dev, + netdev_get_tx_queue(dev, qs), + s->fw_evtq.cntxt_id); + if (err) + goto err_free_queues; + + rxq->rspq.idx = qs; + memset(&rxq->stats, 0, sizeof(rxq->stats)); + } + } + + /* + * Create the reverse mappings for the queues. + */ + s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id; + s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id; + IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq; + for_each_port(adapter, pidx) { + struct net_device *dev = adapter->port[pidx]; + struct port_info *pi = netdev_priv(dev); + struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset]; + struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset]; + int qs; + + for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { + IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq; + EQ_MAP(s, txq->q.abs_id) = &txq->q; + + /* + * The FW_IQ_CMD doesn't return the Absolute Queue IDs + * for Free Lists but since all of the Egress Queues + * (including Free Lists) have Relative Queue IDs + * which are computed as Absolute - Base Queue ID, we + * can synthesize the Absolute Queue IDs for the Free + * Lists. This is useful for debugging purposes when + * we want to dump Queue Contexts via the PF Driver. + */ + rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base; + EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl; + } + } + return 0; + +err_free_queues: + t4vf_free_sge_resources(adapter); + return err; +} + +/* + * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive + * queues. We configure the RSS CPU lookup table to distribute to the number + * of HW receive queues, and the response queue lookup table to narrow that + * down to the response queues actually configured for each "port" (Virtual + * Interface). We always configure the RSS mapping for all ports since the + * mapping table has plenty of entries. + */ +static int setup_rss(struct adapter *adapter) +{ + int pidx; + + for_each_port(adapter, pidx) { + struct port_info *pi = adap2pinfo(adapter, pidx); + struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset]; + u16 rss[MAX_PORT_QSETS]; + int qs, err; + + for (qs = 0; qs < pi->nqsets; qs++) + rss[qs] = rxq[qs].rspq.abs_id; + + err = t4vf_config_rss_range(adapter, pi->viid, + 0, pi->rss_size, rss, pi->nqsets); + if (err) + return err; + + /* + * Perform Global RSS Mode-specific initialization. + */ + switch (adapter->params.rss.mode) { + case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: + /* + * If Tunnel All Lookup isn't specified in the global + * RSS Configuration, then we need to specify a + * default Ingress Queue for any ingress packets which + * aren't hashed. We'll use our first ingress queue + * ... + */ + if (!adapter->params.rss.u.basicvirtual.tnlalllookup) { + union rss_vi_config config; + err = t4vf_read_rss_vi_config(adapter, + pi->viid, + &config); + if (err) + return err; + config.basicvirtual.defaultq = + rxq[0].rspq.abs_id; + err = t4vf_write_rss_vi_config(adapter, + pi->viid, + &config); + if (err) + return err; + } + break; + } + } + + return 0; +} + +/* + * Bring the adapter up. Called whenever we go from no "ports" open to having + * one open. This function performs the actions necessary to make an adapter + * operational, such as completing the initialization of HW modules, and + * enabling interrupts. Must be called with the rtnl lock held. (Note that + * this is called "cxgb_up" in the PF Driver.) + */ +static int adapter_up(struct adapter *adapter) +{ + int err; + + /* + * If this is the first time we've been called, perform basic + * adapter setup. Once we've done this, many of our adapter + * parameters can no longer be changed ... + */ + if ((adapter->flags & FULL_INIT_DONE) == 0) { + err = setup_sge_queues(adapter); + if (err) + return err; + err = setup_rss(adapter); + if (err) { + t4vf_free_sge_resources(adapter); + return err; + } + + if (adapter->flags & USING_MSIX) + name_msix_vecs(adapter); + adapter->flags |= FULL_INIT_DONE; + } + + /* + * Acquire our interrupt resources. We only support MSI-X and MSI. + */ + BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0); + if (adapter->flags & USING_MSIX) + err = request_msix_queue_irqs(adapter); + else + err = request_irq(adapter->pdev->irq, + t4vf_intr_handler(adapter), 0, + adapter->name, adapter); + if (err) { + dev_err(adapter->pdev_dev, "request_irq failed, err %d\n", + err); + return err; + } + + /* + * Enable NAPI ingress processing and return success. + */ + enable_rx(adapter); + t4vf_sge_start(adapter); + return 0; +} + +/* + * Bring the adapter down. Called whenever the last "port" (Virtual + * Interface) closed. (Note that this routine is called "cxgb_down" in the PF + * Driver.) + */ +static void adapter_down(struct adapter *adapter) +{ + /* + * Free interrupt resources. + */ + if (adapter->flags & USING_MSIX) + free_msix_queue_irqs(adapter); + else + free_irq(adapter->pdev->irq, adapter); + + /* + * Wait for NAPI handlers to finish. + */ + quiesce_rx(adapter); +} + +/* + * Start up a net device. + */ +static int cxgb4vf_open(struct net_device *dev) +{ + int err; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + /* + * If this is the first interface that we're opening on the "adapter", + * bring the "adapter" up now. + */ + if (adapter->open_device_map == 0) { + err = adapter_up(adapter); + if (err) + return err; + } + + /* + * Note that this interface is up and start everything up ... + */ + netif_set_real_num_tx_queues(dev, pi->nqsets); + err = netif_set_real_num_rx_queues(dev, pi->nqsets); + if (err) + goto err_unwind; + err = link_start(dev); + if (err) + goto err_unwind; + + netif_tx_start_all_queues(dev); + set_bit(pi->port_id, &adapter->open_device_map); + return 0; + +err_unwind: + if (adapter->open_device_map == 0) + adapter_down(adapter); + return err; +} + +/* + * Shut down a net device. This routine is called "cxgb_close" in the PF + * Driver ... + */ +static int cxgb4vf_stop(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + t4vf_enable_vi(adapter, pi->viid, false, false); + pi->link_cfg.link_ok = 0; + + clear_bit(pi->port_id, &adapter->open_device_map); + if (adapter->open_device_map == 0) + adapter_down(adapter); + return 0; +} + +/* + * Translate our basic statistics into the standard "ifconfig" statistics. + */ +static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev) +{ + struct t4vf_port_stats stats; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adapter = pi->adapter; + struct net_device_stats *ns = &dev->stats; + int err; + + spin_lock(&adapter->stats_lock); + err = t4vf_get_port_stats(adapter, pi->pidx, &stats); + spin_unlock(&adapter->stats_lock); + + memset(ns, 0, sizeof(*ns)); + if (err) + return ns; + + ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes + + stats.tx_ucast_bytes + stats.tx_offload_bytes); + ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames + + stats.tx_ucast_frames + stats.tx_offload_frames); + ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes + + stats.rx_ucast_bytes); + ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames + + stats.rx_ucast_frames); + ns->multicast = stats.rx_mcast_frames; + ns->tx_errors = stats.tx_drop_frames; + ns->rx_errors = stats.rx_err_frames; + + return ns; +} + +/* + * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting + * at a specified offset within the list, into an array of addrss pointers and + * return the number collected. + */ +static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev, + const u8 **addr, + unsigned int offset, + unsigned int maxaddrs) +{ + unsigned int index = 0; + unsigned int naddr = 0; + const struct netdev_hw_addr *ha; + + for_each_dev_addr(dev, ha) + if (index++ >= offset) { + addr[naddr++] = ha->addr; + if (naddr >= maxaddrs) + break; + } + return naddr; +} + +/* + * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting + * at a specified offset within the list, into an array of addrss pointers and + * return the number collected. + */ +static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev, + const u8 **addr, + unsigned int offset, + unsigned int maxaddrs) +{ + unsigned int index = 0; + unsigned int naddr = 0; + const struct netdev_hw_addr *ha; + + netdev_for_each_mc_addr(ha, dev) + if (index++ >= offset) { + addr[naddr++] = ha->addr; + if (naddr >= maxaddrs) + break; + } + return naddr; +} + +/* + * Configure the exact and hash address filters to handle a port's multicast + * and secondary unicast MAC addresses. + */ +static int set_addr_filters(const struct net_device *dev, bool sleep) +{ + u64 mhash = 0; + u64 uhash = 0; + bool free = true; + unsigned int offset, naddr; + const u8 *addr[7]; + int ret; + const struct port_info *pi = netdev_priv(dev); + + /* first do the secondary unicast addresses */ + for (offset = 0; ; offset += naddr) { + naddr = collect_netdev_uc_list_addrs(dev, addr, offset, + ARRAY_SIZE(addr)); + if (naddr == 0) + break; + + ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, + naddr, addr, NULL, &uhash, sleep); + if (ret < 0) + return ret; + + free = false; + } + + /* next set up the multicast addresses */ + for (offset = 0; ; offset += naddr) { + naddr = collect_netdev_mc_list_addrs(dev, addr, offset, + ARRAY_SIZE(addr)); + if (naddr == 0) + break; + + ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, + naddr, addr, NULL, &mhash, sleep); + if (ret < 0) + return ret; + free = false; + } + + return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0, + uhash | mhash, sleep); +} + +/* + * Set RX properties of a port, such as promiscruity, address filters, and MTU. + * If @mtu is -1 it is left unchanged. + */ +static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + ret = set_addr_filters(dev, sleep_ok); + if (ret == 0) + ret = t4vf_set_rxmode(pi->adapter, pi->viid, -1, + (dev->flags & IFF_PROMISC) != 0, + (dev->flags & IFF_ALLMULTI) != 0, + 1, -1, sleep_ok); + return ret; +} + +/* + * Set the current receive modes on the device. + */ +static void cxgb4vf_set_rxmode(struct net_device *dev) +{ + /* unfortunately we can't return errors to the stack */ + set_rxmode(dev, -1, false); +} + +/* + * Find the entry in the interrupt holdoff timer value array which comes + * closest to the specified interrupt holdoff value. + */ +static int closest_timer(const struct sge *s, int us) +{ + int i, timer_idx = 0, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { + int delta = us - s->timer_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + timer_idx = i; + } + } + return timer_idx; +} + +static int closest_thres(const struct sge *s, int thres) +{ + int i, delta, pktcnt_idx = 0, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { + delta = thres - s->counter_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + pktcnt_idx = i; + } + } + return pktcnt_idx; +} + +/* + * Return a queue's interrupt hold-off time in us. 0 means no timer. + */ +static unsigned int qtimer_val(const struct adapter *adapter, + const struct sge_rspq *rspq) +{ + unsigned int timer_idx = QINTR_TIMER_IDX_GET(rspq->intr_params); + + return timer_idx < SGE_NTIMERS + ? adapter->sge.timer_val[timer_idx] + : 0; +} + +/** + * set_rxq_intr_params - set a queue's interrupt holdoff parameters + * @adapter: the adapter + * @rspq: the RX response queue + * @us: the hold-off time in us, or 0 to disable timer + * @cnt: the hold-off packet count, or 0 to disable counter + * + * Sets an RX response queue's interrupt hold-off time and packet count. + * At least one of the two needs to be enabled for the queue to generate + * interrupts. + */ +static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq, + unsigned int us, unsigned int cnt) +{ + unsigned int timer_idx; + + /* + * If both the interrupt holdoff timer and count are specified as + * zero, default to a holdoff count of 1 ... + */ + if ((us | cnt) == 0) + cnt = 1; + + /* + * If an interrupt holdoff count has been specified, then find the + * closest configured holdoff count and use that. If the response + * queue has already been created, then update its queue context + * parameters ... + */ + if (cnt) { + int err; + u32 v, pktcnt_idx; + + pktcnt_idx = closest_thres(&adapter->sge, cnt); + if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) { + v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X_V( + FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | + FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id); + err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx); + if (err) + return err; + } + rspq->pktcnt_idx = pktcnt_idx; + } + + /* + * Compute the closest holdoff timer index from the supplied holdoff + * timer value. + */ + timer_idx = (us == 0 + ? SGE_TIMER_RSTRT_CNTR + : closest_timer(&adapter->sge, us)); + + /* + * Update the response queue's interrupt coalescing parameters and + * return success. + */ + rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) | + (cnt > 0 ? QINTR_CNT_EN : 0)); + return 0; +} + +/* + * Return a version number to identify the type of adapter. The scheme is: + * - bits 0..9: chip version + * - bits 10..15: chip revision + */ +static inline unsigned int mk_adap_vers(const struct adapter *adapter) +{ + /* + * Chip version 4, revision 0x3f (cxgb4vf). + */ + return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10); +} + +/* + * Execute the specified ioctl command. + */ +static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + int ret = 0; + + switch (cmd) { + /* + * The VF Driver doesn't have access to any of the other + * common Ethernet device ioctl()'s (like reading/writing + * PHY registers, etc. + */ + + default: + ret = -EOPNOTSUPP; + break; + } + return ret; +} + +/* + * Change the device's MTU. + */ +static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + /* accommodate SACK */ + if (new_mtu < 81) + return -EINVAL; + + ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu, + -1, -1, -1, -1, true); + if (!ret) + dev->mtu = new_mtu; + return ret; +} + +static netdev_features_t cxgb4vf_fix_features(struct net_device *dev, + netdev_features_t features) +{ + /* + * Since there is no support for separate rx/tx vlan accel + * enable/disable make sure tx flag is always in same state as rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int cxgb4vf_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct port_info *pi = netdev_priv(dev); + netdev_features_t changed = dev->features ^ features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1, + features & NETIF_F_HW_VLAN_CTAG_TX, 0); + + return 0; +} + +/* + * Change the devices MAC address. + */ +static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr) +{ + int ret; + struct sockaddr *addr = _addr; + struct port_info *pi = netdev_priv(dev); + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt, + addr->sa_data, true); + if (ret < 0) + return ret; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + pi->xact_addr_filt = ret; + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Poll all of our receive queues. This is called outside of normal interrupt + * context. + */ +static void cxgb4vf_poll_controller(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + if (adapter->flags & USING_MSIX) { + struct sge_eth_rxq *rxq; + int nqsets; + + rxq = &adapter->sge.ethrxq[pi->first_qset]; + for (nqsets = pi->nqsets; nqsets; nqsets--) { + t4vf_sge_intr_msix(0, &rxq->rspq); + rxq++; + } + } else + t4vf_intr_handler(adapter)(0, adapter); +} +#endif + +/* + * Ethtool operations. + * =================== + * + * Note that we don't support any ethtool operations which change the physical + * state of the port to which we're linked. + */ + +static unsigned int t4vf_from_fw_linkcaps(enum fw_port_type type, + unsigned int caps) +{ + unsigned int v = 0; + + if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI || + type == FW_PORT_TYPE_BT_XAUI) { + v |= SUPPORTED_TP; + if (caps & FW_PORT_CAP_SPEED_100M) + v |= SUPPORTED_100baseT_Full; + if (caps & FW_PORT_CAP_SPEED_1G) + v |= SUPPORTED_1000baseT_Full; + if (caps & FW_PORT_CAP_SPEED_10G) + v |= SUPPORTED_10000baseT_Full; + } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) { + v |= SUPPORTED_Backplane; + if (caps & FW_PORT_CAP_SPEED_1G) + v |= SUPPORTED_1000baseKX_Full; + if (caps & FW_PORT_CAP_SPEED_10G) + v |= SUPPORTED_10000baseKX4_Full; + } else if (type == FW_PORT_TYPE_KR) + v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; + else if (type == FW_PORT_TYPE_BP_AP) + v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | + SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full; + else if (type == FW_PORT_TYPE_BP4_AP) + v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | + SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | + SUPPORTED_10000baseKX4_Full; + else if (type == FW_PORT_TYPE_FIBER_XFI || + type == FW_PORT_TYPE_FIBER_XAUI || + type == FW_PORT_TYPE_SFP || + type == FW_PORT_TYPE_QSFP_10G || + type == FW_PORT_TYPE_QSA) { + v |= SUPPORTED_FIBRE; + if (caps & FW_PORT_CAP_SPEED_1G) + v |= SUPPORTED_1000baseT_Full; + if (caps & FW_PORT_CAP_SPEED_10G) + v |= SUPPORTED_10000baseT_Full; + } else if (type == FW_PORT_TYPE_BP40_BA || + type == FW_PORT_TYPE_QSFP) { + v |= SUPPORTED_40000baseSR4_Full; + v |= SUPPORTED_FIBRE; + } + + if (caps & FW_PORT_CAP_ANEG) + v |= SUPPORTED_Autoneg; + return v; +} + +static int cxgb4vf_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + const struct port_info *p = netdev_priv(dev); + + if (p->port_type == FW_PORT_TYPE_BT_SGMII || + p->port_type == FW_PORT_TYPE_BT_XFI || + p->port_type == FW_PORT_TYPE_BT_XAUI) + cmd->port = PORT_TP; + else if (p->port_type == FW_PORT_TYPE_FIBER_XFI || + p->port_type == FW_PORT_TYPE_FIBER_XAUI) + cmd->port = PORT_FIBRE; + else if (p->port_type == FW_PORT_TYPE_SFP || + p->port_type == FW_PORT_TYPE_QSFP_10G || + p->port_type == FW_PORT_TYPE_QSA || + p->port_type == FW_PORT_TYPE_QSFP) { + if (p->mod_type == FW_PORT_MOD_TYPE_LR || + p->mod_type == FW_PORT_MOD_TYPE_SR || + p->mod_type == FW_PORT_MOD_TYPE_ER || + p->mod_type == FW_PORT_MOD_TYPE_LRM) + cmd->port = PORT_FIBRE; + else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || + p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) + cmd->port = PORT_DA; + else + cmd->port = PORT_OTHER; + } else + cmd->port = PORT_OTHER; + + if (p->mdio_addr >= 0) { + cmd->phy_address = p->mdio_addr; + cmd->transceiver = XCVR_EXTERNAL; + cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ? + MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45; + } else { + cmd->phy_address = 0; /* not really, but no better option */ + cmd->transceiver = XCVR_INTERNAL; + cmd->mdio_support = 0; + } + + cmd->supported = t4vf_from_fw_linkcaps(p->port_type, + p->link_cfg.supported); + cmd->advertising = t4vf_from_fw_linkcaps(p->port_type, + p->link_cfg.advertising); + ethtool_cmd_speed_set(cmd, + netif_carrier_ok(dev) ? p->link_cfg.speed : 0); + cmd->duplex = DUPLEX_FULL; + cmd->autoneg = p->link_cfg.autoneg; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +/* + * Return our driver information. + */ +static void cxgb4vf_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + struct adapter *adapter = netdev2adap(dev); + + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)), + sizeof(drvinfo->bus_info)); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%u.%u.%u.%u, TP %u.%u.%u.%u", + FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev), + FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev), + FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev), + FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev), + FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev), + FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev), + FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev), + FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev)); +} + +/* + * Return current adapter message level. + */ +static u32 cxgb4vf_get_msglevel(struct net_device *dev) +{ + return netdev2adap(dev)->msg_enable; +} + +/* + * Set current adapter message level. + */ +static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel) +{ + netdev2adap(dev)->msg_enable = msglevel; +} + +/* + * Return the device's current Queue Set ring size parameters along with the + * allowed maximum values. Since ethtool doesn't understand the concept of + * multi-queue devices, we just return the current values associated with the + * first Queue Set. + */ +static void cxgb4vf_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *rp) +{ + const struct port_info *pi = netdev_priv(dev); + const struct sge *s = &pi->adapter->sge; + + rp->rx_max_pending = MAX_RX_BUFFERS; + rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES; + rp->rx_jumbo_max_pending = 0; + rp->tx_max_pending = MAX_TXQ_ENTRIES; + + rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID; + rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; + rp->rx_jumbo_pending = 0; + rp->tx_pending = s->ethtxq[pi->first_qset].q.size; +} + +/* + * Set the Queue Set ring size parameters for the device. Again, since + * ethtool doesn't allow for the concept of multiple queues per device, we'll + * apply these new values across all of the Queue Sets associated with the + * device -- after vetting them of course! + */ +static int cxgb4vf_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *rp) +{ + const struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + int qs; + + if (rp->rx_pending > MAX_RX_BUFFERS || + rp->rx_jumbo_pending || + rp->tx_pending > MAX_TXQ_ENTRIES || + rp->rx_mini_pending > MAX_RSPQ_ENTRIES || + rp->rx_mini_pending < MIN_RSPQ_ENTRIES || + rp->rx_pending < MIN_FL_ENTRIES || + rp->tx_pending < MIN_TXQ_ENTRIES) + return -EINVAL; + + if (adapter->flags & FULL_INIT_DONE) + return -EBUSY; + + for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) { + s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID; + s->ethrxq[qs].rspq.size = rp->rx_mini_pending; + s->ethtxq[qs].q.size = rp->tx_pending; + } + return 0; +} + +/* + * Return the interrupt holdoff timer and count for the first Queue Set on the + * device. Our extension ioctl() (the cxgbtool interface) allows the + * interrupt holdoff timer to be read on all of the device's Queue Sets. + */ +static int cxgb4vf_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coalesce) +{ + const struct port_info *pi = netdev_priv(dev); + const struct adapter *adapter = pi->adapter; + const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq; + + coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq); + coalesce->rx_max_coalesced_frames = + ((rspq->intr_params & QINTR_CNT_EN) + ? adapter->sge.counter_val[rspq->pktcnt_idx] + : 0); + return 0; +} + +/* + * Set the RX interrupt holdoff timer and count for the first Queue Set on the + * interface. Our extension ioctl() (the cxgbtool interface) allows us to set + * the interrupt holdoff timer on any of the device's Queue Sets. + */ +static int cxgb4vf_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coalesce) +{ + const struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + return set_rxq_intr_params(adapter, + &adapter->sge.ethrxq[pi->first_qset].rspq, + coalesce->rx_coalesce_usecs, + coalesce->rx_max_coalesced_frames); +} + +/* + * Report current port link pause parameter settings. + */ +static void cxgb4vf_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pauseparam) +{ + struct port_info *pi = netdev_priv(dev); + + pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; + pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0; + pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0; +} + +/* + * Identify the port by blinking the port's LED. + */ +static int cxgb4vf_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + unsigned int val; + struct port_info *pi = netdev_priv(dev); + + if (state == ETHTOOL_ID_ACTIVE) + val = 0xffff; + else if (state == ETHTOOL_ID_INACTIVE) + val = 0; + else + return -EINVAL; + + return t4vf_identify_port(pi->adapter, pi->viid, val); +} + +/* + * Port stats maintained per queue of the port. + */ +struct queue_port_stats { + u64 tso; + u64 tx_csum; + u64 rx_csum; + u64 vlan_ex; + u64 vlan_ins; + u64 lro_pkts; + u64 lro_merged; +}; + +/* + * Strings for the ETH_SS_STATS statistics set ("ethtool -S"). Note that + * these need to match the order of statistics returned by + * t4vf_get_port_stats(). + */ +static const char stats_strings[][ETH_GSTRING_LEN] = { + /* + * These must match the layout of the t4vf_port_stats structure. + */ + "TxBroadcastBytes ", + "TxBroadcastFrames ", + "TxMulticastBytes ", + "TxMulticastFrames ", + "TxUnicastBytes ", + "TxUnicastFrames ", + "TxDroppedFrames ", + "TxOffloadBytes ", + "TxOffloadFrames ", + "RxBroadcastBytes ", + "RxBroadcastFrames ", + "RxMulticastBytes ", + "RxMulticastFrames ", + "RxUnicastBytes ", + "RxUnicastFrames ", + "RxErrorFrames ", + + /* + * These are accumulated per-queue statistics and must match the + * order of the fields in the queue_port_stats structure. + */ + "TSO ", + "TxCsumOffload ", + "RxCsumGood ", + "VLANextractions ", + "VLANinsertions ", + "GROPackets ", + "GROMerged ", +}; + +/* + * Return the number of statistics in the specified statistics set. + */ +static int cxgb4vf_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(stats_strings); + default: + return -EOPNOTSUPP; + } + /*NOTREACHED*/ +} + +/* + * Return the strings for the specified statistics set. + */ +static void cxgb4vf_get_strings(struct net_device *dev, + u32 sset, + u8 *data) +{ + switch (sset) { + case ETH_SS_STATS: + memcpy(data, stats_strings, sizeof(stats_strings)); + break; + } +} + +/* + * Small utility routine to accumulate queue statistics across the queues of + * a "port". + */ +static void collect_sge_port_stats(const struct adapter *adapter, + const struct port_info *pi, + struct queue_port_stats *stats) +{ + const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset]; + const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset]; + int qs; + + memset(stats, 0, sizeof(*stats)); + for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { + stats->tso += txq->tso; + stats->tx_csum += txq->tx_cso; + stats->rx_csum += rxq->stats.rx_cso; + stats->vlan_ex += rxq->stats.vlan_ex; + stats->vlan_ins += txq->vlan_ins; + stats->lro_pkts += rxq->stats.lro_pkts; + stats->lro_merged += rxq->stats.lro_merged; + } +} + +/* + * Return the ETH_SS_STATS statistics set. + */ +static void cxgb4vf_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adapter = pi->adapter; + int err = t4vf_get_port_stats(adapter, pi->pidx, + (struct t4vf_port_stats *)data); + if (err) + memset(data, 0, sizeof(struct t4vf_port_stats)); + + data += sizeof(struct t4vf_port_stats) / sizeof(u64); + collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); +} + +/* + * Return the size of our register map. + */ +static int cxgb4vf_get_regs_len(struct net_device *dev) +{ + return T4VF_REGMAP_SIZE; +} + +/* + * Dump a block of registers, start to end inclusive, into a buffer. + */ +static void reg_block_dump(struct adapter *adapter, void *regbuf, + unsigned int start, unsigned int end) +{ + u32 *bp = regbuf + start - T4VF_REGMAP_START; + + for ( ; start <= end; start += sizeof(u32)) { + /* + * Avoid reading the Mailbox Control register since that + * can trigger a Mailbox Ownership Arbitration cycle and + * interfere with communication with the firmware. + */ + if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL) + *bp++ = 0xffff; + else + *bp++ = t4_read_reg(adapter, start); + } +} + +/* + * Copy our entire register map into the provided buffer. + */ +static void cxgb4vf_get_regs(struct net_device *dev, + struct ethtool_regs *regs, + void *regbuf) +{ + struct adapter *adapter = netdev2adap(dev); + + regs->version = mk_adap_vers(adapter); + + /* + * Fill in register buffer with our register map. + */ + memset(regbuf, 0, T4VF_REGMAP_SIZE); + + reg_block_dump(adapter, regbuf, + T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST, + T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST); + reg_block_dump(adapter, regbuf, + T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST, + T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST); + + /* T5 adds new registers in the PL Register map. + */ + reg_block_dump(adapter, regbuf, + T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST, + T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip) + ? PL_VF_WHOAMI_A : PL_VF_REVISION_A)); + reg_block_dump(adapter, regbuf, + T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST, + T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST); + + reg_block_dump(adapter, regbuf, + T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST, + T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST); +} + +/* + * Report current Wake On LAN settings. + */ +static void cxgb4vf_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +/* + * TCP Segmentation Offload flags which we support. + */ +#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) + +static const struct ethtool_ops cxgb4vf_ethtool_ops = { + .get_settings = cxgb4vf_get_settings, + .get_drvinfo = cxgb4vf_get_drvinfo, + .get_msglevel = cxgb4vf_get_msglevel, + .set_msglevel = cxgb4vf_set_msglevel, + .get_ringparam = cxgb4vf_get_ringparam, + .set_ringparam = cxgb4vf_set_ringparam, + .get_coalesce = cxgb4vf_get_coalesce, + .set_coalesce = cxgb4vf_set_coalesce, + .get_pauseparam = cxgb4vf_get_pauseparam, + .get_link = ethtool_op_get_link, + .get_strings = cxgb4vf_get_strings, + .set_phys_id = cxgb4vf_phys_id, + .get_sset_count = cxgb4vf_get_sset_count, + .get_ethtool_stats = cxgb4vf_get_ethtool_stats, + .get_regs_len = cxgb4vf_get_regs_len, + .get_regs = cxgb4vf_get_regs, + .get_wol = cxgb4vf_get_wol, +}; + +/* + * /sys/kernel/debug/cxgb4vf support code and data. + * ================================================ + */ + +/* + * Show SGE Queue Set information. We display QPL Queues Sets per line. + */ +#define QPL 4 + +static int sge_qinfo_show(struct seq_file *seq, void *v) +{ + struct adapter *adapter = seq->private; + int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL); + int qs, r = (uintptr_t)v - 1; + + if (r) + seq_putc(seq, '\n'); + + #define S3(fmt_spec, s, v) \ + do {\ + seq_printf(seq, "%-12s", s); \ + for (qs = 0; qs < n; ++qs) \ + seq_printf(seq, " %16" fmt_spec, v); \ + seq_putc(seq, '\n'); \ + } while (0) + #define S(s, v) S3("s", s, v) + #define T(s, v) S3("u", s, txq[qs].v) + #define R(s, v) S3("u", s, rxq[qs].v) + + if (r < eth_entries) { + const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL]; + const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL]; + int n = min(QPL, adapter->sge.ethqsets - QPL * r); + + S("QType:", "Ethernet"); + S("Interface:", + (rxq[qs].rspq.netdev + ? rxq[qs].rspq.netdev->name + : "N/A")); + S3("d", "Port:", + (rxq[qs].rspq.netdev + ? ((struct port_info *) + netdev_priv(rxq[qs].rspq.netdev))->port_id + : -1)); + T("TxQ ID:", q.abs_id); + T("TxQ size:", q.size); + T("TxQ inuse:", q.in_use); + T("TxQ PIdx:", q.pidx); + T("TxQ CIdx:", q.cidx); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq)); + S3("u", "Intr pktcnt:", + adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]); + R("RspQ CIdx:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + R("FL ID:", fl.abs_id); + R("FL size:", fl.size - MIN_FL_RESID); + R("FL avail:", fl.avail); + R("FL PIdx:", fl.pidx); + R("FL CIdx:", fl.cidx); + return 0; + } + + r -= eth_entries; + if (r == 0) { + const struct sge_rspq *evtq = &adapter->sge.fw_evtq; + + seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue"); + seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id); + seq_printf(seq, "%-12s %16u\n", "Intr delay:", + qtimer_val(adapter, evtq)); + seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:", + adapter->sge.counter_val[evtq->pktcnt_idx]); + seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx); + seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen); + } else if (r == 1) { + const struct sge_rspq *intrq = &adapter->sge.intrq; + + seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue"); + seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id); + seq_printf(seq, "%-12s %16u\n", "Intr delay:", + qtimer_val(adapter, intrq)); + seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:", + adapter->sge.counter_val[intrq->pktcnt_idx]); + seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx); + seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen); + } + + #undef R + #undef T + #undef S + #undef S3 + + return 0; +} + +/* + * Return the number of "entries" in our "file". We group the multi-Queue + * sections with QPL Queue Sets per "entry". The sections of the output are: + * + * Ethernet RX/TX Queue Sets + * Firmware Event Queue + * Forwarded Interrupt Queue (if in MSI mode) + */ +static int sge_queue_entries(const struct adapter *adapter) +{ + return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 + + ((adapter->flags & USING_MSI) != 0); +} + +static void *sge_queue_start(struct seq_file *seq, loff_t *pos) +{ + int entries = sge_queue_entries(seq->private); + + return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; +} + +static void sge_queue_stop(struct seq_file *seq, void *v) +{ +} + +static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos) +{ + int entries = sge_queue_entries(seq->private); + + ++*pos; + return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; +} + +static const struct seq_operations sge_qinfo_seq_ops = { + .start = sge_queue_start, + .next = sge_queue_next, + .stop = sge_queue_stop, + .show = sge_qinfo_show +}; + +static int sge_qinfo_open(struct inode *inode, struct file *file) +{ + int res = seq_open(file, &sge_qinfo_seq_ops); + + if (!res) { + struct seq_file *seq = file->private_data; + seq->private = inode->i_private; + } + return res; +} + +static const struct file_operations sge_qinfo_debugfs_fops = { + .owner = THIS_MODULE, + .open = sge_qinfo_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * Show SGE Queue Set statistics. We display QPL Queues Sets per line. + */ +#define QPL 4 + +static int sge_qstats_show(struct seq_file *seq, void *v) +{ + struct adapter *adapter = seq->private; + int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL); + int qs, r = (uintptr_t)v - 1; + + if (r) + seq_putc(seq, '\n'); + + #define S3(fmt, s, v) \ + do { \ + seq_printf(seq, "%-16s", s); \ + for (qs = 0; qs < n; ++qs) \ + seq_printf(seq, " %8" fmt, v); \ + seq_putc(seq, '\n'); \ + } while (0) + #define S(s, v) S3("s", s, v) + + #define T3(fmt, s, v) S3(fmt, s, txq[qs].v) + #define T(s, v) T3("lu", s, v) + + #define R3(fmt, s, v) S3(fmt, s, rxq[qs].v) + #define R(s, v) R3("lu", s, v) + + if (r < eth_entries) { + const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL]; + const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL]; + int n = min(QPL, adapter->sge.ethqsets - QPL * r); + + S("QType:", "Ethernet"); + S("Interface:", + (rxq[qs].rspq.netdev + ? rxq[qs].rspq.netdev->name + : "N/A")); + R3("u", "RspQNullInts:", rspq.unhandled_irqs); + R("RxPackets:", stats.pkts); + R("RxCSO:", stats.rx_cso); + R("VLANxtract:", stats.vlan_ex); + R("LROmerged:", stats.lro_merged); + R("LROpackets:", stats.lro_pkts); + R("RxDrops:", stats.rx_drops); + T("TSO:", tso); + T("TxCSO:", tx_cso); + T("VLANins:", vlan_ins); + T("TxQFull:", q.stops); + T("TxQRestarts:", q.restarts); + T("TxMapErr:", mapping_err); + R("FLAllocErr:", fl.alloc_failed); + R("FLLrgAlcErr:", fl.large_alloc_failed); + R("FLStarving:", fl.starving); + return 0; + } + + r -= eth_entries; + if (r == 0) { + const struct sge_rspq *evtq = &adapter->sge.fw_evtq; + + seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue"); + seq_printf(seq, "%-16s %8u\n", "RspQNullInts:", + evtq->unhandled_irqs); + seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx); + seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen); + } else if (r == 1) { + const struct sge_rspq *intrq = &adapter->sge.intrq; + + seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue"); + seq_printf(seq, "%-16s %8u\n", "RspQNullInts:", + intrq->unhandled_irqs); + seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx); + seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen); + } + + #undef R + #undef T + #undef S + #undef R3 + #undef T3 + #undef S3 + + return 0; +} + +/* + * Return the number of "entries" in our "file". We group the multi-Queue + * sections with QPL Queue Sets per "entry". The sections of the output are: + * + * Ethernet RX/TX Queue Sets + * Firmware Event Queue + * Forwarded Interrupt Queue (if in MSI mode) + */ +static int sge_qstats_entries(const struct adapter *adapter) +{ + return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 + + ((adapter->flags & USING_MSI) != 0); +} + +static void *sge_qstats_start(struct seq_file *seq, loff_t *pos) +{ + int entries = sge_qstats_entries(seq->private); + + return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; +} + +static void sge_qstats_stop(struct seq_file *seq, void *v) +{ +} + +static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos) +{ + int entries = sge_qstats_entries(seq->private); + + (*pos)++; + return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; +} + +static const struct seq_operations sge_qstats_seq_ops = { + .start = sge_qstats_start, + .next = sge_qstats_next, + .stop = sge_qstats_stop, + .show = sge_qstats_show +}; + +static int sge_qstats_open(struct inode *inode, struct file *file) +{ + int res = seq_open(file, &sge_qstats_seq_ops); + + if (res == 0) { + struct seq_file *seq = file->private_data; + seq->private = inode->i_private; + } + return res; +} + +static const struct file_operations sge_qstats_proc_fops = { + .owner = THIS_MODULE, + .open = sge_qstats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * Show PCI-E SR-IOV Virtual Function Resource Limits. + */ +static int resources_show(struct seq_file *seq, void *v) +{ + struct adapter *adapter = seq->private; + struct vf_resources *vfres = &adapter->params.vfres; + + #define S(desc, fmt, var) \ + seq_printf(seq, "%-60s " fmt "\n", \ + desc " (" #var "):", vfres->var) + + S("Virtual Interfaces", "%d", nvi); + S("Egress Queues", "%d", neq); + S("Ethernet Control", "%d", nethctrl); + S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint); + S("Ingress Queues", "%d", niq); + S("Traffic Class", "%d", tc); + S("Port Access Rights Mask", "%#x", pmask); + S("MAC Address Filters", "%d", nexactf); + S("Firmware Command Read Capabilities", "%#x", r_caps); + S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps); + + #undef S + + return 0; +} + +static int resources_open(struct inode *inode, struct file *file) +{ + return single_open(file, resources_show, inode->i_private); +} + +static const struct file_operations resources_proc_fops = { + .owner = THIS_MODULE, + .open = resources_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * Show Virtual Interfaces. + */ +static int interfaces_show(struct seq_file *seq, void *v) +{ + if (v == SEQ_START_TOKEN) { + seq_puts(seq, "Interface Port VIID\n"); + } else { + struct adapter *adapter = seq->private; + int pidx = (uintptr_t)v - 2; + struct net_device *dev = adapter->port[pidx]; + struct port_info *pi = netdev_priv(dev); + + seq_printf(seq, "%9s %4d %#5x\n", + dev->name, pi->port_id, pi->viid); + } + return 0; +} + +static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos) +{ + return pos <= adapter->params.nports + ? (void *)(uintptr_t)(pos + 1) + : NULL; +} + +static void *interfaces_start(struct seq_file *seq, loff_t *pos) +{ + return *pos + ? interfaces_get_idx(seq->private, *pos) + : SEQ_START_TOKEN; +} + +static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos) +{ + (*pos)++; + return interfaces_get_idx(seq->private, *pos); +} + +static void interfaces_stop(struct seq_file *seq, void *v) +{ +} + +static const struct seq_operations interfaces_seq_ops = { + .start = interfaces_start, + .next = interfaces_next, + .stop = interfaces_stop, + .show = interfaces_show +}; + +static int interfaces_open(struct inode *inode, struct file *file) +{ + int res = seq_open(file, &interfaces_seq_ops); + + if (res == 0) { + struct seq_file *seq = file->private_data; + seq->private = inode->i_private; + } + return res; +} + +static const struct file_operations interfaces_proc_fops = { + .owner = THIS_MODULE, + .open = interfaces_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * /sys/kernel/debugfs/cxgb4vf/ files list. + */ +struct cxgb4vf_debugfs_entry { + const char *name; /* name of debugfs node */ + umode_t mode; /* file system mode */ + const struct file_operations *fops; +}; + +static struct cxgb4vf_debugfs_entry debugfs_files[] = { + { "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops }, + { "sge_qstats", S_IRUGO, &sge_qstats_proc_fops }, + { "resources", S_IRUGO, &resources_proc_fops }, + { "interfaces", S_IRUGO, &interfaces_proc_fops }, +}; + +/* + * Module and device initialization and cleanup code. + * ================================================== + */ + +/* + * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the + * directory (debugfs_root) has already been set up. + */ +static int setup_debugfs(struct adapter *adapter) +{ + int i; + + BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); + + /* + * Debugfs support is best effort. + */ + for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) + (void)debugfs_create_file(debugfs_files[i].name, + debugfs_files[i].mode, + adapter->debugfs_root, + (void *)adapter, + debugfs_files[i].fops); + + return 0; +} + +/* + * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave + * it to our caller to tear down the directory (debugfs_root). + */ +static void cleanup_debugfs(struct adapter *adapter) +{ + BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); + + /* + * Unlike our sister routine cleanup_proc(), we don't need to remove + * individual entries because a call will be made to + * debugfs_remove_recursive(). We just need to clean up any ancillary + * persistent state. + */ + /* nothing to do */ +} + +/* + * Perform early "adapter" initialization. This is where we discover what + * adapter parameters we're going to be using and initialize basic adapter + * hardware support. + */ +static int adap_init0(struct adapter *adapter) +{ + struct vf_resources *vfres = &adapter->params.vfres; + struct sge_params *sge_params = &adapter->params.sge; + struct sge *s = &adapter->sge; + unsigned int ethqsets; + int err; + u32 param, val = 0; + + /* + * Wait for the device to become ready before proceeding ... + */ + err = t4vf_wait_dev_ready(adapter); + if (err) { + dev_err(adapter->pdev_dev, "device didn't become ready:" + " err=%d\n", err); + return err; + } + + /* + * Some environments do not properly handle PCIE FLRs -- e.g. in Linux + * 2.6.31 and later we can't call pci_reset_function() in order to + * issue an FLR because of a self- deadlock on the device semaphore. + * Meanwhile, the OS infrastructure doesn't issue FLRs in all the + * cases where they're needed -- for instance, some versions of KVM + * fail to reset "Assigned Devices" when the VM reboots. Therefore we + * use the firmware based reset in order to reset any per function + * state. + */ + err = t4vf_fw_reset(adapter); + if (err < 0) { + dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err); + return err; + } + + /* + * Grab basic operational parameters. These will predominantly have + * been set up by the Physical Function Driver or will be hard coded + * into the adapter. We just have to live with them ... Note that + * we _must_ get our VPD parameters before our SGE parameters because + * we need to know the adapter's core clock from the VPD in order to + * properly decode the SGE Timer Values. + */ + err = t4vf_get_dev_params(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " device parameters: err=%d\n", err); + return err; + } + err = t4vf_get_vpd_params(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " VPD parameters: err=%d\n", err); + return err; + } + err = t4vf_get_sge_params(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " SGE parameters: err=%d\n", err); + return err; + } + err = t4vf_get_rss_glb_config(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to retrieve adapter" + " RSS parameters: err=%d\n", err); + return err; + } + if (adapter->params.rss.mode != + FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { + dev_err(adapter->pdev_dev, "unable to operate with global RSS" + " mode %d\n", adapter->params.rss.mode); + return -EINVAL; + } + err = t4vf_sge_init(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to use adapter parameters:" + " err=%d\n", err); + return err; + } + + /* If we're running on newer firmware, let it know that we're + * prepared to deal with encapsulated CPL messages. Older + * firmware won't understand this and we'll just get + * unencapsulated messages ... + */ + param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP); + val = 1; + (void) t4vf_set_params(adapter, 1, ¶m, &val); + + /* + * Retrieve our RX interrupt holdoff timer values and counter + * threshold values from the SGE parameters. + */ + s->timer_val[0] = core_ticks_to_us(adapter, + TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1)); + s->timer_val[1] = core_ticks_to_us(adapter, + TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1)); + s->timer_val[2] = core_ticks_to_us(adapter, + TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3)); + s->timer_val[3] = core_ticks_to_us(adapter, + TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3)); + s->timer_val[4] = core_ticks_to_us(adapter, + TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5)); + s->timer_val[5] = core_ticks_to_us(adapter, + TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5)); + + s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold); + s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold); + s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold); + s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold); + + /* + * Grab our Virtual Interface resource allocation, extract the + * features that we're interested in and do a bit of sanity testing on + * what we discover. + */ + err = t4vf_get_vfres(adapter); + if (err) { + dev_err(adapter->pdev_dev, "unable to get virtual interface" + " resources: err=%d\n", err); + return err; + } + + /* + * The number of "ports" which we support is equal to the number of + * Virtual Interfaces with which we've been provisioned. + */ + adapter->params.nports = vfres->nvi; + if (adapter->params.nports > MAX_NPORTS) { + dev_warn(adapter->pdev_dev, "only using %d of %d allowed" + " virtual interfaces\n", MAX_NPORTS, + adapter->params.nports); + adapter->params.nports = MAX_NPORTS; + } + + /* + * We need to reserve a number of the ingress queues with Free List + * and Interrupt capabilities for special interrupt purposes (like + * asynchronous firmware messages, or forwarded interrupts if we're + * using MSI). The rest of the FL/Intr-capable ingress queues will be + * matched up one-for-one with Ethernet/Control egress queues in order + * to form "Queue Sets" which will be aportioned between the "ports". + * For each Queue Set, we'll need the ability to allocate two Egress + * Contexts -- one for the Ingress Queue Free List and one for the TX + * Ethernet Queue. + */ + ethqsets = vfres->niqflint - INGQ_EXTRAS; + if (vfres->nethctrl != ethqsets) { + dev_warn(adapter->pdev_dev, "unequal number of [available]" + " ingress/egress queues (%d/%d); using minimum for" + " number of Queue Sets\n", ethqsets, vfres->nethctrl); + ethqsets = min(vfres->nethctrl, ethqsets); + } + if (vfres->neq < ethqsets*2) { + dev_warn(adapter->pdev_dev, "Not enough Egress Contexts (%d)" + " to support Queue Sets (%d); reducing allowed Queue" + " Sets\n", vfres->neq, ethqsets); + ethqsets = vfres->neq/2; + } + if (ethqsets > MAX_ETH_QSETS) { + dev_warn(adapter->pdev_dev, "only using %d of %d allowed Queue" + " Sets\n", MAX_ETH_QSETS, adapter->sge.max_ethqsets); + ethqsets = MAX_ETH_QSETS; + } + if (vfres->niq != 0 || vfres->neq > ethqsets*2) { + dev_warn(adapter->pdev_dev, "unused resources niq/neq (%d/%d)" + " ignored\n", vfres->niq, vfres->neq - ethqsets*2); + } + adapter->sge.max_ethqsets = ethqsets; + + /* + * Check for various parameter sanity issues. Most checks simply + * result in us using fewer resources than our provissioning but we + * do need at least one "port" with which to work ... + */ + if (adapter->sge.max_ethqsets < adapter->params.nports) { + dev_warn(adapter->pdev_dev, "only using %d of %d available" + " virtual interfaces (too few Queue Sets)\n", + adapter->sge.max_ethqsets, adapter->params.nports); + adapter->params.nports = adapter->sge.max_ethqsets; + } + if (adapter->params.nports == 0) { + dev_err(adapter->pdev_dev, "no virtual interfaces configured/" + "usable!\n"); + return -EINVAL; + } + return 0; +} + +static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx, + u8 pkt_cnt_idx, unsigned int size, + unsigned int iqe_size) +{ + rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) | + (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0)); + rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS + ? pkt_cnt_idx + : 0); + rspq->iqe_len = iqe_size; + rspq->size = size; +} + +/* + * Perform default configuration of DMA queues depending on the number and + * type of ports we found and the number of available CPUs. Most settings can + * be modified by the admin via ethtool and cxgbtool prior to the adapter + * being brought up for the first time. + */ +static void cfg_queues(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + int q10g, n10g, qidx, pidx, qs; + size_t iqe_size; + + /* + * We should not be called till we know how many Queue Sets we can + * support. In particular, this means that we need to know what kind + * of interrupts we'll be using ... + */ + BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0); + + /* + * Count the number of 10GbE Virtual Interfaces that we have. + */ + n10g = 0; + for_each_port(adapter, pidx) + n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg); + + /* + * We default to 1 queue per non-10G port and up to # of cores queues + * per 10G port. + */ + if (n10g == 0) + q10g = 0; + else { + int n1g = (adapter->params.nports - n10g); + q10g = (adapter->sge.max_ethqsets - n1g) / n10g; + if (q10g > num_online_cpus()) + q10g = num_online_cpus(); + } + + /* + * Allocate the "Queue Sets" to the various Virtual Interfaces. + * The layout will be established in setup_sge_queues() when the + * adapter is brough up for the first time. + */ + qidx = 0; + for_each_port(adapter, pidx) { + struct port_info *pi = adap2pinfo(adapter, pidx); + + pi->first_qset = qidx; + pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1; + qidx += pi->nqsets; + } + s->ethqsets = qidx; + + /* + * The Ingress Queue Entry Size for our various Response Queues needs + * to be big enough to accommodate the largest message we can receive + * from the chip/firmware; which is 64 bytes ... + */ + iqe_size = 64; + + /* + * Set up default Queue Set parameters ... Start off with the + * shortest interrupt holdoff timer. + */ + for (qs = 0; qs < s->max_ethqsets; qs++) { + struct sge_eth_rxq *rxq = &s->ethrxq[qs]; + struct sge_eth_txq *txq = &s->ethtxq[qs]; + + init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size); + rxq->fl.size = 72; + txq->q.size = 1024; + } + + /* + * The firmware event queue is used for link state changes and + * notifications of TX DMA completions. + */ + init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size); + + /* + * The forwarded interrupt queue is used when we're in MSI interrupt + * mode. In this mode all interrupts associated with RX queues will + * be forwarded to a single queue which we'll associate with our MSI + * interrupt vector. The messages dropped in the forwarded interrupt + * queue will indicate which ingress queue needs servicing ... This + * queue needs to be large enough to accommodate all of the ingress + * queues which are forwarding their interrupt (+1 to prevent the PIDX + * from equalling the CIDX if every ingress queue has an outstanding + * interrupt). The queue doesn't need to be any larger because no + * ingress queue will ever have more than one outstanding interrupt at + * any time ... + */ + init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1, + iqe_size); +} + +/* + * Reduce the number of Ethernet queues across all ports to at most n. + * n provides at least one queue per port. + */ +static void reduce_ethqs(struct adapter *adapter, int n) +{ + int i; + struct port_info *pi; + + /* + * While we have too many active Ether Queue Sets, interate across the + * "ports" and reduce their individual Queue Set allocations. + */ + BUG_ON(n < adapter->params.nports); + while (n < adapter->sge.ethqsets) + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + if (pi->nqsets > 1) { + pi->nqsets--; + adapter->sge.ethqsets--; + if (adapter->sge.ethqsets <= n) + break; + } + } + + /* + * Reassign the starting Queue Sets for each of the "ports" ... + */ + n = 0; + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + pi->first_qset = n; + n += pi->nqsets; + } +} + +/* + * We need to grab enough MSI-X vectors to cover our interrupt needs. Ideally + * we get a separate MSI-X vector for every "Queue Set" plus any extras we + * need. Minimally we need one for every Virtual Interface plus those needed + * for our "extras". Note that this process may lower the maximum number of + * allowed Queue Sets ... + */ +static int enable_msix(struct adapter *adapter) +{ + int i, want, need, nqsets; + struct msix_entry entries[MSIX_ENTRIES]; + struct sge *s = &adapter->sge; + + for (i = 0; i < MSIX_ENTRIES; ++i) + entries[i].entry = i; + + /* + * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets" + * plus those needed for our "extras" (for example, the firmware + * message queue). We _need_ at least one "Queue Set" per Virtual + * Interface plus those needed for our "extras". So now we get to see + * if the song is right ... + */ + want = s->max_ethqsets + MSIX_EXTRAS; + need = adapter->params.nports + MSIX_EXTRAS; + + want = pci_enable_msix_range(adapter->pdev, entries, need, want); + if (want < 0) + return want; + + nqsets = want - MSIX_EXTRAS; + if (nqsets < s->max_ethqsets) { + dev_warn(adapter->pdev_dev, "only enough MSI-X vectors" + " for %d Queue Sets\n", nqsets); + s->max_ethqsets = nqsets; + if (nqsets < s->ethqsets) + reduce_ethqs(adapter, nqsets); + } + for (i = 0; i < want; ++i) + adapter->msix_info[i].vec = entries[i].vector; + + return 0; +} + +static const struct net_device_ops cxgb4vf_netdev_ops = { + .ndo_open = cxgb4vf_open, + .ndo_stop = cxgb4vf_stop, + .ndo_start_xmit = t4vf_eth_xmit, + .ndo_get_stats = cxgb4vf_get_stats, + .ndo_set_rx_mode = cxgb4vf_set_rxmode, + .ndo_set_mac_address = cxgb4vf_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = cxgb4vf_do_ioctl, + .ndo_change_mtu = cxgb4vf_change_mtu, + .ndo_fix_features = cxgb4vf_fix_features, + .ndo_set_features = cxgb4vf_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cxgb4vf_poll_controller, +#endif +}; + +/* + * "Probe" a device: initialize a device and construct all kernel and driver + * state needed to manage the device. This routine is called "init_one" in + * the PF Driver ... + */ +static int cxgb4vf_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int pci_using_dac; + int err, pidx; + unsigned int pmask; + struct adapter *adapter; + struct port_info *pi; + struct net_device *netdev; + + /* + * Print our driver banner the first time we're called to initialize a + * device. + */ + pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION); + + /* + * Initialize generic PCI device state. + */ + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + return err; + } + + /* + * Reserve PCI resources for the device. If we can't get them some + * other driver may have already claimed the device ... + */ + err = pci_request_regions(pdev, KBUILD_MODNAME); + if (err) { + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + goto err_disable_device; + } + + /* + * Set up our DMA mask: try for 64-bit address masking first and + * fall back to 32-bit if we can't get 64 bits ... + */ + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err == 0) { + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "unable to obtain 64-bit DMA for" + " coherent allocations\n"); + goto err_release_regions; + } + pci_using_dac = 1; + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err != 0) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto err_release_regions; + } + pci_using_dac = 0; + } + + /* + * Enable bus mastering for the device ... + */ + pci_set_master(pdev); + + /* + * Allocate our adapter data structure and attach it to the device. + */ + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) { + err = -ENOMEM; + goto err_release_regions; + } + pci_set_drvdata(pdev, adapter); + adapter->pdev = pdev; + adapter->pdev_dev = &pdev->dev; + + /* + * Initialize SMP data synchronization resources. + */ + spin_lock_init(&adapter->stats_lock); + + /* + * Map our I/O registers in BAR0. + */ + adapter->regs = pci_ioremap_bar(pdev, 0); + if (!adapter->regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + err = -ENOMEM; + goto err_free_adapter; + } + + /* Wait for the device to become ready before proceeding ... + */ + err = t4vf_prep_adapter(adapter); + if (err) { + dev_err(adapter->pdev_dev, "device didn't become ready:" + " err=%d\n", err); + goto err_unmap_bar0; + } + + /* For T5 and later we want to use the new BAR-based User Doorbells, + * so we need to map BAR2 here ... + */ + if (!is_t4(adapter->params.chip)) { + adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); + if (!adapter->bar2) { + dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n"); + err = -ENOMEM; + goto err_unmap_bar0; + } + } + /* + * Initialize adapter level features. + */ + adapter->name = pci_name(pdev); + adapter->msg_enable = dflt_msg_enable; + err = adap_init0(adapter); + if (err) + goto err_unmap_bar; + + /* + * Allocate our "adapter ports" and stitch everything together. + */ + pmask = adapter->params.vfres.pmask; + for_each_port(adapter, pidx) { + int port_id, viid; + + /* + * We simplistically allocate our virtual interfaces + * sequentially across the port numbers to which we have + * access rights. This should be configurable in some manner + * ... + */ + if (pmask == 0) + break; + port_id = ffs(pmask) - 1; + pmask &= ~(1 << port_id); + viid = t4vf_alloc_vi(adapter, port_id); + if (viid < 0) { + dev_err(&pdev->dev, "cannot allocate VI for port %d:" + " err=%d\n", port_id, viid); + err = viid; + goto err_free_dev; + } + + /* + * Allocate our network device and stitch things together. + */ + netdev = alloc_etherdev_mq(sizeof(struct port_info), + MAX_PORT_QSETS); + if (netdev == NULL) { + t4vf_free_vi(adapter, viid); + err = -ENOMEM; + goto err_free_dev; + } + adapter->port[pidx] = netdev; + SET_NETDEV_DEV(netdev, &pdev->dev); + pi = netdev_priv(netdev); + pi->adapter = adapter; + pi->pidx = pidx; + pi->port_id = port_id; + pi->viid = viid; + + /* + * Initialize the starting state of our "port" and register + * it. + */ + pi->xact_addr_filt = -1; + netif_carrier_off(netdev); + netdev->irq = pdev->irq; + + netdev->hw_features = NETIF_F_SG | TSO_FLAGS | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM; + netdev->vlan_features = NETIF_F_SG | TSO_FLAGS | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_HIGHDMA; + netdev->features = netdev->hw_features | + NETIF_F_HW_VLAN_CTAG_TX; + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + netdev->netdev_ops = &cxgb4vf_netdev_ops; + netdev->ethtool_ops = &cxgb4vf_ethtool_ops; + + /* + * Initialize the hardware/software state for the port. + */ + err = t4vf_port_init(adapter, pidx); + if (err) { + dev_err(&pdev->dev, "cannot initialize port %d\n", + pidx); + goto err_free_dev; + } + } + + /* + * The "card" is now ready to go. If any errors occur during device + * registration we do not fail the whole "card" but rather proceed + * only with the ports we manage to register successfully. However we + * must register at least one net device. + */ + for_each_port(adapter, pidx) { + netdev = adapter->port[pidx]; + if (netdev == NULL) + continue; + + err = register_netdev(netdev); + if (err) { + dev_warn(&pdev->dev, "cannot register net device %s," + " skipping\n", netdev->name); + continue; + } + + set_bit(pidx, &adapter->registered_device_map); + } + if (adapter->registered_device_map == 0) { + dev_err(&pdev->dev, "could not register any net devices\n"); + goto err_free_dev; + } + + /* + * Set up our debugfs entries. + */ + if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) { + adapter->debugfs_root = + debugfs_create_dir(pci_name(pdev), + cxgb4vf_debugfs_root); + if (IS_ERR_OR_NULL(adapter->debugfs_root)) + dev_warn(&pdev->dev, "could not create debugfs" + " directory"); + else + setup_debugfs(adapter); + } + + /* + * See what interrupts we'll be using. If we've been configured to + * use MSI-X interrupts, try to enable them but fall back to using + * MSI interrupts if we can't enable MSI-X interrupts. If we can't + * get MSI interrupts we bail with the error. + */ + if (msi == MSI_MSIX && enable_msix(adapter) == 0) + adapter->flags |= USING_MSIX; + else { + err = pci_enable_msi(pdev); + if (err) { + dev_err(&pdev->dev, "Unable to allocate %s interrupts;" + " err=%d\n", + msi == MSI_MSIX ? "MSI-X or MSI" : "MSI", err); + goto err_free_debugfs; + } + adapter->flags |= USING_MSI; + } + + /* + * Now that we know how many "ports" we have and what their types are, + * and how many Queue Sets we can support, we can configure our queue + * resources. + */ + cfg_queues(adapter); + + /* + * Print a short notice on the existence and configuration of the new + * VF network device ... + */ + for_each_port(adapter, pidx) { + dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n", + adapter->port[pidx]->name, + (adapter->flags & USING_MSIX) ? "MSI-X" : + (adapter->flags & USING_MSI) ? "MSI" : ""); + } + + /* + * Return success! + */ + return 0; + + /* + * Error recovery and exit code. Unwind state that's been created + * so far and return the error. + */ + +err_free_debugfs: + if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { + cleanup_debugfs(adapter); + debugfs_remove_recursive(adapter->debugfs_root); + } + +err_free_dev: + for_each_port(adapter, pidx) { + netdev = adapter->port[pidx]; + if (netdev == NULL) + continue; + pi = netdev_priv(netdev); + t4vf_free_vi(adapter, pi->viid); + if (test_bit(pidx, &adapter->registered_device_map)) + unregister_netdev(netdev); + free_netdev(netdev); + } + +err_unmap_bar: + if (!is_t4(adapter->params.chip)) + iounmap(adapter->bar2); + +err_unmap_bar0: + iounmap(adapter->regs); + +err_free_adapter: + kfree(adapter); + +err_release_regions: + pci_release_regions(pdev); + pci_clear_master(pdev); + +err_disable_device: + pci_disable_device(pdev); + + return err; +} + +/* + * "Remove" a device: tear down all kernel and driver state created in the + * "probe" routine and quiesce the device (disable interrupts, etc.). (Note + * that this is called "remove_one" in the PF Driver.) + */ +static void cxgb4vf_pci_remove(struct pci_dev *pdev) +{ + struct adapter *adapter = pci_get_drvdata(pdev); + + /* + * Tear down driver state associated with device. + */ + if (adapter) { + int pidx; + + /* + * Stop all of our activity. Unregister network port, + * disable interrupts, etc. + */ + for_each_port(adapter, pidx) + if (test_bit(pidx, &adapter->registered_device_map)) + unregister_netdev(adapter->port[pidx]); + t4vf_sge_stop(adapter); + if (adapter->flags & USING_MSIX) { + pci_disable_msix(adapter->pdev); + adapter->flags &= ~USING_MSIX; + } else if (adapter->flags & USING_MSI) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~USING_MSI; + } + + /* + * Tear down our debugfs entries. + */ + if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { + cleanup_debugfs(adapter); + debugfs_remove_recursive(adapter->debugfs_root); + } + + /* + * Free all of the various resources which we've acquired ... + */ + t4vf_free_sge_resources(adapter); + for_each_port(adapter, pidx) { + struct net_device *netdev = adapter->port[pidx]; + struct port_info *pi; + + if (netdev == NULL) + continue; + + pi = netdev_priv(netdev); + t4vf_free_vi(adapter, pi->viid); + free_netdev(netdev); + } + iounmap(adapter->regs); + if (!is_t4(adapter->params.chip)) + iounmap(adapter->bar2); + kfree(adapter); + } + + /* + * Disable the device and release its PCI resources. + */ + pci_disable_device(pdev); + pci_clear_master(pdev); + pci_release_regions(pdev); +} + +/* + * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt + * delivery. + */ +static void cxgb4vf_pci_shutdown(struct pci_dev *pdev) +{ + struct adapter *adapter; + int pidx; + + adapter = pci_get_drvdata(pdev); + if (!adapter) + return; + + /* Disable all Virtual Interfaces. This will shut down the + * delivery of all ingress packets into the chip for these + * Virtual Interfaces. + */ + for_each_port(adapter, pidx) + if (test_bit(pidx, &adapter->registered_device_map)) + unregister_netdev(adapter->port[pidx]); + + /* Free up all Queues which will prevent further DMA and + * Interrupts allowing various internal pathways to drain. + */ + t4vf_sge_stop(adapter); + if (adapter->flags & USING_MSIX) { + pci_disable_msix(adapter->pdev); + adapter->flags &= ~USING_MSIX; + } else if (adapter->flags & USING_MSI) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~USING_MSI; + } + + /* + * Free up all Queues which will prevent further DMA and + * Interrupts allowing various internal pathways to drain. + */ + t4vf_free_sge_resources(adapter); + pci_set_drvdata(pdev, NULL); +} + +/* Macros needed to support the PCI Device ID Table ... + */ +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \ + static const struct pci_device_id cxgb4vf_pci_tbl[] = { +#define CH_PCI_DEVICE_ID_FUNCTION 0x8 + +#define CH_PCI_ID_TABLE_ENTRY(devid) \ + { PCI_VDEVICE(CHELSIO, (devid)), 0 } + +#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } } + +#include "../cxgb4/t4_pci_id_tbl.h" + +MODULE_DESCRIPTION(DRV_DESC); +MODULE_AUTHOR("Chelsio Communications"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl); + +static struct pci_driver cxgb4vf_driver = { + .name = KBUILD_MODNAME, + .id_table = cxgb4vf_pci_tbl, + .probe = cxgb4vf_pci_probe, + .remove = cxgb4vf_pci_remove, + .shutdown = cxgb4vf_pci_shutdown, +}; + +/* + * Initialize global driver state. + */ +static int __init cxgb4vf_module_init(void) +{ + int ret; + + /* + * Vet our module parameters. + */ + if (msi != MSI_MSIX && msi != MSI_MSI) { + pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n", + msi, MSI_MSIX, MSI_MSI); + return -EINVAL; + } + + /* Debugfs support is optional, just warn if this fails */ + cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) + pr_warn("could not create debugfs entry, continuing\n"); + + ret = pci_register_driver(&cxgb4vf_driver); + if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) + debugfs_remove(cxgb4vf_debugfs_root); + return ret; +} + +/* + * Tear down global driver state. + */ +static void __exit cxgb4vf_module_exit(void) +{ + pci_unregister_driver(&cxgb4vf_driver); + debugfs_remove(cxgb4vf_debugfs_root); +} + +module_init(cxgb4vf_module_init); +module_exit(cxgb4vf_module_exit); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c new file mode 100644 index 000000000..482f6de68 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -0,0 +1,2655 @@ +/* + * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet + * driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "t4vf_common.h" +#include "t4vf_defs.h" + +#include "../cxgb4/t4_regs.h" +#include "../cxgb4/t4_values.h" +#include "../cxgb4/t4fw_api.h" +#include "../cxgb4/t4_msg.h" + +/* + * Constants ... + */ +enum { + /* + * Egress Queue sizes, producer and consumer indices are all in units + * of Egress Context Units bytes. Note that as far as the hardware is + * concerned, the free list is an Egress Queue (the host produces free + * buffers which the hardware consumes) and free list entries are + * 64-bit PCI DMA addresses. + */ + EQ_UNIT = SGE_EQ_IDXSIZE, + FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), + TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64), + + /* + * Max number of TX descriptors we clean up at a time. Should be + * modest as freeing skbs isn't cheap and it happens while holding + * locks. We just need to free packets faster than they arrive, we + * eventually catch up and keep the amortized cost reasonable. + */ + MAX_TX_RECLAIM = 16, + + /* + * Max number of Rx buffers we replenish at a time. Again keep this + * modest, allocating buffers isn't cheap either. + */ + MAX_RX_REFILL = 16, + + /* + * Period of the Rx queue check timer. This timer is infrequent as it + * has something to do only when the system experiences severe memory + * shortage. + */ + RX_QCHECK_PERIOD = (HZ / 2), + + /* + * Period of the TX queue check timer and the maximum number of TX + * descriptors to be reclaimed by the TX timer. + */ + TX_QCHECK_PERIOD = (HZ / 2), + MAX_TIMER_TX_RECLAIM = 100, + + /* + * Suspend an Ethernet TX queue with fewer available descriptors than + * this. We always want to have room for a maximum sized packet: + * inline immediate data + MAX_SKB_FRAGS. This is the same as + * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS + * (see that function and its helpers for a description of the + * calculation). + */ + ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1, + ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 + + ((ETHTXQ_MAX_FRAGS-1) & 1) + + 2), + ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_lso_core) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64), + ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR, + + ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT), + + /* + * Max TX descriptor space we allow for an Ethernet packet to be + * inlined into a WR. This is limited by the maximum value which + * we can specify for immediate data in the firmware Ethernet TX + * Work Request. + */ + MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_M, + + /* + * Max size of a WR sent through a control TX queue. + */ + MAX_CTRL_WR_LEN = 256, + + /* + * Maximum amount of data which we'll ever need to inline into a + * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN). + */ + MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN + ? MAX_IMM_TX_PKT_LEN + : MAX_CTRL_WR_LEN), + + /* + * For incoming packets less than RX_COPY_THRES, we copy the data into + * an skb rather than referencing the data. We allocate enough + * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes + * of the data (header). + */ + RX_COPY_THRES = 256, + RX_PULL_LEN = 128, + + /* + * Main body length for sk_buffs used for RX Ethernet packets with + * fragments. Should be >= RX_PULL_LEN but possibly bigger to give + * pskb_may_pull() some room. + */ + RX_SKB_LEN = 512, +}; + +/* + * Software state per TX descriptor. + */ +struct tx_sw_desc { + struct sk_buff *skb; /* socket buffer of TX data source */ + struct ulptx_sgl *sgl; /* scatter/gather list in TX Queue */ +}; + +/* + * Software state per RX Free List descriptor. We keep track of the allocated + * FL page, its size, and its PCI DMA address (if the page is mapped). The FL + * page size and its PCI DMA mapped state are stored in the low bits of the + * PCI DMA address as per below. + */ +struct rx_sw_desc { + struct page *page; /* Free List page buffer */ + dma_addr_t dma_addr; /* PCI DMA address (if mapped) */ + /* and flags (see below) */ +}; + +/* + * The low bits of rx_sw_desc.dma_addr have special meaning. Note that the + * SGE also uses the low 4 bits to determine the size of the buffer. It uses + * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array. + * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4 + * bits can only contain a 0 or a 1 to indicate which size buffer we're giving + * to the SGE. Thus, our software state of "is the buffer mapped for DMA" is + * maintained in an inverse sense so the hardware never sees that bit high. + */ +enum { + RX_LARGE_BUF = 1 << 0, /* buffer is SGE_FL_BUFFER_SIZE[1] */ + RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */ +}; + +/** + * get_buf_addr - return DMA buffer address of software descriptor + * @sdesc: pointer to the software buffer descriptor + * + * Return the DMA buffer address of a software descriptor (stripping out + * our low-order flag bits). + */ +static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc) +{ + return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF); +} + +/** + * is_buf_mapped - is buffer mapped for DMA? + * @sdesc: pointer to the software buffer descriptor + * + * Determine whether the buffer associated with a software descriptor in + * mapped for DMA or not. + */ +static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc) +{ + return !(sdesc->dma_addr & RX_UNMAPPED_BUF); +} + +/** + * need_skb_unmap - does the platform need unmapping of sk_buffs? + * + * Returns true if the platform needs sk_buff unmapping. The compiler + * optimizes away unnecessary code if this returns true. + */ +static inline int need_skb_unmap(void) +{ +#ifdef CONFIG_NEED_DMA_MAP_STATE + return 1; +#else + return 0; +#endif +} + +/** + * txq_avail - return the number of available slots in a TX queue + * @tq: the TX queue + * + * Returns the number of available descriptors in a TX queue. + */ +static inline unsigned int txq_avail(const struct sge_txq *tq) +{ + return tq->size - 1 - tq->in_use; +} + +/** + * fl_cap - return the capacity of a Free List + * @fl: the Free List + * + * Returns the capacity of a Free List. The capacity is less than the + * size because an Egress Queue Index Unit worth of descriptors needs to + * be left unpopulated, otherwise the Producer and Consumer indices PIDX + * and CIDX will match and the hardware will think the FL is empty. + */ +static inline unsigned int fl_cap(const struct sge_fl *fl) +{ + return fl->size - FL_PER_EQ_UNIT; +} + +/** + * fl_starving - return whether a Free List is starving. + * @adapter: pointer to the adapter + * @fl: the Free List + * + * Tests specified Free List to see whether the number of buffers + * available to the hardware has falled below our "starvation" + * threshold. + */ +static inline bool fl_starving(const struct adapter *adapter, + const struct sge_fl *fl) +{ + const struct sge *s = &adapter->sge; + + return fl->avail - fl->pend_cred <= s->fl_starve_thres; +} + +/** + * map_skb - map an skb for DMA to the device + * @dev: the egress net device + * @skb: the packet to map + * @addr: a pointer to the base of the DMA mapping array + * + * Map an skb for DMA to the device and return an array of DMA addresses. + */ +static int map_skb(struct device *dev, const struct sk_buff *skb, + dma_addr_t *addr) +{ + const skb_frag_t *fp, *end; + const struct skb_shared_info *si; + + *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(dev, *addr)) + goto out_err; + + si = skb_shinfo(skb); + end = &si->frags[si->nr_frags]; + for (fp = si->frags; fp < end; fp++) { + *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp), + DMA_TO_DEVICE); + if (dma_mapping_error(dev, *addr)) + goto unwind; + } + return 0; + +unwind: + while (fp-- > si->frags) + dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); + dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); + +out_err: + return -ENOMEM; +} + +static void unmap_sgl(struct device *dev, const struct sk_buff *skb, + const struct ulptx_sgl *sgl, const struct sge_txq *tq) +{ + const struct ulptx_sge_pair *p; + unsigned int nfrags = skb_shinfo(skb)->nr_frags; + + if (likely(skb_headlen(skb))) + dma_unmap_single(dev, be64_to_cpu(sgl->addr0), + be32_to_cpu(sgl->len0), DMA_TO_DEVICE); + else { + dma_unmap_page(dev, be64_to_cpu(sgl->addr0), + be32_to_cpu(sgl->len0), DMA_TO_DEVICE); + nfrags--; + } + + /* + * the complexity below is because of the possibility of a wrap-around + * in the middle of an SGL + */ + for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { + if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) { +unmap: + dma_unmap_page(dev, be64_to_cpu(p->addr[0]), + be32_to_cpu(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(p->addr[1]), + be32_to_cpu(p->len[1]), DMA_TO_DEVICE); + p++; + } else if ((u8 *)p == (u8 *)tq->stat) { + p = (const struct ulptx_sge_pair *)tq->desc; + goto unmap; + } else if ((u8 *)p + 8 == (u8 *)tq->stat) { + const __be64 *addr = (const __be64 *)tq->desc; + + dma_unmap_page(dev, be64_to_cpu(addr[0]), + be32_to_cpu(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(addr[1]), + be32_to_cpu(p->len[1]), DMA_TO_DEVICE); + p = (const struct ulptx_sge_pair *)&addr[2]; + } else { + const __be64 *addr = (const __be64 *)tq->desc; + + dma_unmap_page(dev, be64_to_cpu(p->addr[0]), + be32_to_cpu(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(addr[0]), + be32_to_cpu(p->len[1]), DMA_TO_DEVICE); + p = (const struct ulptx_sge_pair *)&addr[1]; + } + } + if (nfrags) { + __be64 addr; + + if ((u8 *)p == (u8 *)tq->stat) + p = (const struct ulptx_sge_pair *)tq->desc; + addr = ((u8 *)p + 16 <= (u8 *)tq->stat + ? p->addr[0] + : *(const __be64 *)tq->desc); + dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]), + DMA_TO_DEVICE); + } +} + +/** + * free_tx_desc - reclaims TX descriptors and their buffers + * @adapter: the adapter + * @tq: the TX queue to reclaim descriptors from + * @n: the number of descriptors to reclaim + * @unmap: whether the buffers should be unmapped for DMA + * + * Reclaims TX descriptors from an SGE TX queue and frees the associated + * TX buffers. Called with the TX queue lock held. + */ +static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq, + unsigned int n, bool unmap) +{ + struct tx_sw_desc *sdesc; + unsigned int cidx = tq->cidx; + struct device *dev = adapter->pdev_dev; + + const int need_unmap = need_skb_unmap() && unmap; + + sdesc = &tq->sdesc[cidx]; + while (n--) { + /* + * If we kept a reference to the original TX skb, we need to + * unmap it from PCI DMA space (if required) and free it. + */ + if (sdesc->skb) { + if (need_unmap) + unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq); + dev_consume_skb_any(sdesc->skb); + sdesc->skb = NULL; + } + + sdesc++; + if (++cidx == tq->size) { + cidx = 0; + sdesc = tq->sdesc; + } + } + tq->cidx = cidx; +} + +/* + * Return the number of reclaimable descriptors in a TX queue. + */ +static inline int reclaimable(const struct sge_txq *tq) +{ + int hw_cidx = be16_to_cpu(tq->stat->cidx); + int reclaimable = hw_cidx - tq->cidx; + if (reclaimable < 0) + reclaimable += tq->size; + return reclaimable; +} + +/** + * reclaim_completed_tx - reclaims completed TX descriptors + * @adapter: the adapter + * @tq: the TX queue to reclaim completed descriptors from + * @unmap: whether the buffers should be unmapped for DMA + * + * Reclaims TX descriptors that the SGE has indicated it has processed, + * and frees the associated buffers if possible. Called with the TX + * queue locked. + */ +static inline void reclaim_completed_tx(struct adapter *adapter, + struct sge_txq *tq, + bool unmap) +{ + int avail = reclaimable(tq); + + if (avail) { + /* + * Limit the amount of clean up work we do at a time to keep + * the TX lock hold time O(1). + */ + if (avail > MAX_TX_RECLAIM) + avail = MAX_TX_RECLAIM; + + free_tx_desc(adapter, tq, avail, unmap); + tq->in_use -= avail; + } +} + +/** + * get_buf_size - return the size of an RX Free List buffer. + * @adapter: pointer to the associated adapter + * @sdesc: pointer to the software buffer descriptor + */ +static inline int get_buf_size(const struct adapter *adapter, + const struct rx_sw_desc *sdesc) +{ + const struct sge *s = &adapter->sge; + + return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF) + ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE); +} + +/** + * free_rx_bufs - free RX buffers on an SGE Free List + * @adapter: the adapter + * @fl: the SGE Free List to free buffers from + * @n: how many buffers to free + * + * Release the next @n buffers on an SGE Free List RX queue. The + * buffers must be made inaccessible to hardware before calling this + * function. + */ +static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n) +{ + while (n--) { + struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; + + if (is_buf_mapped(sdesc)) + dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), + get_buf_size(adapter, sdesc), + PCI_DMA_FROMDEVICE); + put_page(sdesc->page); + sdesc->page = NULL; + if (++fl->cidx == fl->size) + fl->cidx = 0; + fl->avail--; + } +} + +/** + * unmap_rx_buf - unmap the current RX buffer on an SGE Free List + * @adapter: the adapter + * @fl: the SGE Free List + * + * Unmap the current buffer on an SGE Free List RX queue. The + * buffer must be made inaccessible to HW before calling this function. + * + * This is similar to @free_rx_bufs above but does not free the buffer. + * Do note that the FL still loses any further access to the buffer. + * This is used predominantly to "transfer ownership" of an FL buffer + * to another entity (typically an skb's fragment list). + */ +static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) +{ + struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; + + if (is_buf_mapped(sdesc)) + dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), + get_buf_size(adapter, sdesc), + PCI_DMA_FROMDEVICE); + sdesc->page = NULL; + if (++fl->cidx == fl->size) + fl->cidx = 0; + fl->avail--; +} + +/** + * ring_fl_db - righ doorbell on free list + * @adapter: the adapter + * @fl: the Free List whose doorbell should be rung ... + * + * Tell the Scatter Gather Engine that there are new free list entries + * available. + */ +static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) +{ + u32 val; + + /* The SGE keeps track of its Producer and Consumer Indices in terms + * of Egress Queue Units so we can only tell it about integral numbers + * of multiples of Free List Entries per Egress Queue Units ... + */ + if (fl->pend_cred >= FL_PER_EQ_UNIT) { + if (is_t4(adapter->params.chip)) + val = PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT); + else + val = PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT) | + DBTYPE_F; + val |= DBPRIO_F; + + /* Make sure all memory writes to the Free List queue are + * committed before we tell the hardware about them. + */ + wmb(); + + /* If we don't have access to the new User Doorbell (T5+), use + * the old doorbell mechanism; otherwise use the new BAR2 + * mechanism. + */ + if (unlikely(fl->bar2_addr == NULL)) { + t4_write_reg(adapter, + T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, + QID_V(fl->cntxt_id) | val); + } else { + writel(val | QID_V(fl->bar2_qid), + fl->bar2_addr + SGE_UDB_KDOORBELL); + + /* This Write memory Barrier will force the write to + * the User Doorbell area to be flushed. + */ + wmb(); + } + fl->pend_cred %= FL_PER_EQ_UNIT; + } +} + +/** + * set_rx_sw_desc - initialize software RX buffer descriptor + * @sdesc: pointer to the softwore RX buffer descriptor + * @page: pointer to the page data structure backing the RX buffer + * @dma_addr: PCI DMA address (possibly with low-bit flags) + */ +static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page, + dma_addr_t dma_addr) +{ + sdesc->page = page; + sdesc->dma_addr = dma_addr; +} + +/* + * Support for poisoning RX buffers ... + */ +#define POISON_BUF_VAL -1 + +static inline void poison_buf(struct page *page, size_t sz) +{ +#if POISON_BUF_VAL >= 0 + memset(page_address(page), POISON_BUF_VAL, sz); +#endif +} + +/** + * refill_fl - refill an SGE RX buffer ring + * @adapter: the adapter + * @fl: the Free List ring to refill + * @n: the number of new buffers to allocate + * @gfp: the gfp flags for the allocations + * + * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, + * allocated with the supplied gfp flags. The caller must assure that + * @n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN + * EGRESS QUEUE UNITS_ indicates an empty Free List! Returns the number + * of buffers allocated. If afterwards the queue is found critically low, + * mark it as starving in the bitmap of starving FLs. + */ +static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, + int n, gfp_t gfp) +{ + struct sge *s = &adapter->sge; + struct page *page; + dma_addr_t dma_addr; + unsigned int cred = fl->avail; + __be64 *d = &fl->desc[fl->pidx]; + struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx]; + + /* + * Sanity: ensure that the result of adding n Free List buffers + * won't result in wrapping the SGE's Producer Index around to + * it's Consumer Index thereby indicating an empty Free List ... + */ + BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT); + + gfp |= __GFP_NOWARN; + + /* + * If we support large pages, prefer large buffers and fail over to + * small pages if we can't allocate large pages to satisfy the refill. + * If we don't support large pages, drop directly into the small page + * allocation code. + */ + if (s->fl_pg_order == 0) + goto alloc_small_pages; + + while (n) { + page = __dev_alloc_pages(gfp, s->fl_pg_order); + if (unlikely(!page)) { + /* + * We've failed inour attempt to allocate a "large + * page". Fail over to the "small page" allocation + * below. + */ + fl->large_alloc_failed++; + break; + } + poison_buf(page, PAGE_SIZE << s->fl_pg_order); + + dma_addr = dma_map_page(adapter->pdev_dev, page, 0, + PAGE_SIZE << s->fl_pg_order, + PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { + /* + * We've run out of DMA mapping space. Free up the + * buffer and return with what we've managed to put + * into the free list. We don't want to fail over to + * the small page allocation below in this case + * because DMA mapping resources are typically + * critical resources once they become scarse. + */ + __free_pages(page, s->fl_pg_order); + goto out; + } + dma_addr |= RX_LARGE_BUF; + *d++ = cpu_to_be64(dma_addr); + + set_rx_sw_desc(sdesc, page, dma_addr); + sdesc++; + + fl->avail++; + if (++fl->pidx == fl->size) { + fl->pidx = 0; + sdesc = fl->sdesc; + d = fl->desc; + } + n--; + } + +alloc_small_pages: + while (n--) { + page = __dev_alloc_page(gfp); + if (unlikely(!page)) { + fl->alloc_failed++; + break; + } + poison_buf(page, PAGE_SIZE); + + dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { + put_page(page); + break; + } + *d++ = cpu_to_be64(dma_addr); + + set_rx_sw_desc(sdesc, page, dma_addr); + sdesc++; + + fl->avail++; + if (++fl->pidx == fl->size) { + fl->pidx = 0; + sdesc = fl->sdesc; + d = fl->desc; + } + } + +out: + /* + * Update our accounting state to incorporate the new Free List + * buffers, tell the hardware about them and return the number of + * buffers which we were able to allocate. + */ + cred = fl->avail - cred; + fl->pend_cred += cred; + ring_fl_db(adapter, fl); + + if (unlikely(fl_starving(adapter, fl))) { + smp_wmb(); + set_bit(fl->cntxt_id, adapter->sge.starving_fl); + } + + return cred; +} + +/* + * Refill a Free List to its capacity or the Maximum Refill Increment, + * whichever is smaller ... + */ +static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl) +{ + refill_fl(adapter, fl, + min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail), + GFP_ATOMIC); +} + +/** + * alloc_ring - allocate resources for an SGE descriptor ring + * @dev: the PCI device's core device + * @nelem: the number of descriptors + * @hwsize: the size of each hardware descriptor + * @swsize: the size of each software descriptor + * @busaddrp: the physical PCI bus address of the allocated ring + * @swringp: return address pointer for software ring + * @stat_size: extra space in hardware ring for status information + * + * Allocates resources for an SGE descriptor ring, such as TX queues, + * free buffer lists, response queues, etc. Each SGE ring requires + * space for its hardware descriptors plus, optionally, space for software + * state associated with each hardware entry (the metadata). The function + * returns three values: the virtual address for the hardware ring (the + * return value of the function), the PCI bus address of the hardware + * ring (in *busaddrp), and the address of the software ring (in swringp). + * Both the hardware and software rings are returned zeroed out. + */ +static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize, + size_t swsize, dma_addr_t *busaddrp, void *swringp, + size_t stat_size) +{ + /* + * Allocate the hardware ring and PCI DMA bus address space for said. + */ + size_t hwlen = nelem * hwsize + stat_size; + void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); + + if (!hwring) + return NULL; + + /* + * If the caller wants a software ring, allocate it and return a + * pointer to it in *swringp. + */ + BUG_ON((swsize != 0) != (swringp != NULL)); + if (swsize) { + void *swring = kcalloc(nelem, swsize, GFP_KERNEL); + + if (!swring) { + dma_free_coherent(dev, hwlen, hwring, *busaddrp); + return NULL; + } + *(void **)swringp = swring; + } + + /* + * Zero out the hardware ring and return its address as our function + * value. + */ + memset(hwring, 0, hwlen); + return hwring; +} + +/** + * sgl_len - calculates the size of an SGL of the given capacity + * @n: the number of SGL entries + * + * Calculates the number of flits (8-byte units) needed for a Direct + * Scatter/Gather List that can hold the given number of entries. + */ +static inline unsigned int sgl_len(unsigned int n) +{ + /* + * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA + * addresses. The DSGL Work Request starts off with a 32-bit DSGL + * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, + * repeated sequences of { Length[i], Length[i+1], Address[i], + * Address[i+1] } (this ensures that all addresses are on 64-bit + * boundaries). If N is even, then Length[N+1] should be set to 0 and + * Address[N+1] is omitted. + * + * The following calculation incorporates all of the above. It's + * somewhat hard to follow but, briefly: the "+2" accounts for the + * first two flits which include the DSGL header, Length0 and + * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 + * flits for every pair of the remaining N) +1 if (n-1) is odd; and + * finally the "+((n-1)&1)" adds the one remaining flit needed if + * (n-1) is odd ... + */ + n--; + return (3 * n) / 2 + (n & 1) + 2; +} + +/** + * flits_to_desc - returns the num of TX descriptors for the given flits + * @flits: the number of flits + * + * Returns the number of TX descriptors needed for the supplied number + * of flits. + */ +static inline unsigned int flits_to_desc(unsigned int flits) +{ + BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64)); + return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT); +} + +/** + * is_eth_imm - can an Ethernet packet be sent as immediate data? + * @skb: the packet + * + * Returns whether an Ethernet packet is small enough to fit completely as + * immediate data. + */ +static inline int is_eth_imm(const struct sk_buff *skb) +{ + /* + * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request + * which does not accommodate immediate data. We could dike out all + * of the support code for immediate data but that would tie our hands + * too much if we ever want to enhace the firmware. It would also + * create more differences between the PF and VF Drivers. + */ + return false; +} + +/** + * calc_tx_flits - calculate the number of flits for a packet TX WR + * @skb: the packet + * + * Returns the number of flits needed for a TX Work Request for the + * given Ethernet packet, including the needed WR and CPL headers. + */ +static inline unsigned int calc_tx_flits(const struct sk_buff *skb) +{ + unsigned int flits; + + /* + * If the skb is small enough, we can pump it out as a work request + * with only immediate data. In that case we just have to have the + * TX Packet header plus the skb data in the Work Request. + */ + if (is_eth_imm(skb)) + return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), + sizeof(__be64)); + + /* + * Otherwise, we're going to have to construct a Scatter gather list + * of the skb body and fragments. We also include the flits necessary + * for the TX Packet Work Request and CPL. We always have a firmware + * Write Header (incorporated as part of the cpl_tx_pkt_lso and + * cpl_tx_pkt structures), followed by either a TX Packet Write CPL + * message or, if we're doing a Large Send Offload, an LSO CPL message + * with an embedded TX Packet Write CPL message. + */ + flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); + if (skb_shinfo(skb)->gso_size) + flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_lso_core) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + else + flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + return flits; +} + +/** + * write_sgl - populate a Scatter/Gather List for a packet + * @skb: the packet + * @tq: the TX queue we are writing into + * @sgl: starting location for writing the SGL + * @end: points right after the end of the SGL + * @start: start offset into skb main-body data to include in the SGL + * @addr: the list of DMA bus addresses for the SGL elements + * + * Generates a Scatter/Gather List for the buffers that make up a packet. + * The caller must provide adequate space for the SGL that will be written. + * The SGL includes all of the packet's page fragments and the data in its + * main body except for the first @start bytes. @pos must be 16-byte + * aligned and within a TX descriptor with available space. @end points + * write after the end of the SGL but does not account for any potential + * wrap around, i.e., @end > @tq->stat. + */ +static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq, + struct ulptx_sgl *sgl, u64 *end, unsigned int start, + const dma_addr_t *addr) +{ + unsigned int i, len; + struct ulptx_sge_pair *to; + const struct skb_shared_info *si = skb_shinfo(skb); + unsigned int nfrags = si->nr_frags; + struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; + + len = skb_headlen(skb) - start; + if (likely(len)) { + sgl->len0 = htonl(len); + sgl->addr0 = cpu_to_be64(addr[0] + start); + nfrags++; + } else { + sgl->len0 = htonl(skb_frag_size(&si->frags[0])); + sgl->addr0 = cpu_to_be64(addr[1]); + } + + sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | + ULPTX_NSGE_V(nfrags)); + if (likely(--nfrags == 0)) + return; + /* + * Most of the complexity below deals with the possibility we hit the + * end of the queue in the middle of writing the SGL. For this case + * only we create the SGL in a temporary buffer and then copy it. + */ + to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge; + + for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { + to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); + to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); + to->addr[0] = cpu_to_be64(addr[i]); + to->addr[1] = cpu_to_be64(addr[++i]); + } + if (nfrags) { + to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); + to->len[1] = cpu_to_be32(0); + to->addr[0] = cpu_to_be64(addr[i + 1]); + } + if (unlikely((u8 *)end > (u8 *)tq->stat)) { + unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1; + + if (likely(part0)) + memcpy(sgl->sge, buf, part0); + part1 = (u8 *)end - (u8 *)tq->stat; + memcpy(tq->desc, (u8 *)buf + part0, part1); + end = (void *)tq->desc + part1; + } + if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ + *end = 0; +} + +/** + * check_ring_tx_db - check and potentially ring a TX queue's doorbell + * @adapter: the adapter + * @tq: the TX queue + * @n: number of new descriptors to give to HW + * + * Ring the doorbel for a TX queue. + */ +static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, + int n) +{ + /* Make sure that all writes to the TX Descriptors are committed + * before we tell the hardware about them. + */ + wmb(); + + /* If we don't have access to the new User Doorbell (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(tq->bar2_addr == NULL)) { + u32 val = PIDX_V(n); + + t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, + QID_V(tq->cntxt_id) | val); + } else { + u32 val = PIDX_T5_V(n); + + /* T4 and later chips share the same PIDX field offset within + * the doorbell, but T5 and later shrank the field in order to + * gain a bit for Doorbell Priority. The field was absurdly + * large in the first place (14 bits) so we just use the T5 + * and later limits and warn if a Queue ID is too large. + */ + WARN_ON(val & DBPRIO_F); + + /* If we're only writing a single Egress Unit and the BAR2 + * Queue ID is 0, we can use the Write Combining Doorbell + * Gather Buffer; otherwise we use the simple doorbell. + */ + if (n == 1 && tq->bar2_qid == 0) { + unsigned int index = (tq->pidx + ? (tq->pidx - 1) + : (tq->size - 1)); + __be64 *src = (__be64 *)&tq->desc[index]; + __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr + + SGE_UDB_WCDOORBELL); + unsigned int count = EQ_UNIT / sizeof(__be64); + + /* Copy the TX Descriptor in a tight loop in order to + * try to get it to the adapter in a single Write + * Combined transfer on the PCI-E Bus. If the Write + * Combine fails (say because of an interrupt, etc.) + * the hardware will simply take the last write as a + * simple doorbell write with a PIDX Increment of 1 + * and will fetch the TX Descriptor from memory via + * DMA. + */ + while (count) { + /* the (__force u64) is because the compiler + * doesn't understand the endian swizzling + * going on + */ + writeq((__force u64)*src, dst); + src++; + dst++; + count--; + } + } else + writel(val | QID_V(tq->bar2_qid), + tq->bar2_addr + SGE_UDB_KDOORBELL); + + /* This Write Memory Barrier will force the write to the User + * Doorbell area to be flushed. This is needed to prevent + * writes on different CPUs for the same queue from hitting + * the adapter out of order. This is required when some Work + * Requests take the Write Combine Gather Buffer path (user + * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some + * take the traditional path where we simply increment the + * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the + * hardware DMA read the actual Work Request. + */ + wmb(); + } +} + +/** + * inline_tx_skb - inline a packet's data into TX descriptors + * @skb: the packet + * @tq: the TX queue where the packet will be inlined + * @pos: starting position in the TX queue to inline the packet + * + * Inline a packet's contents directly into TX descriptors, starting at + * the given position within the TX DMA ring. + * Most of the complexity of this operation is dealing with wrap arounds + * in the middle of the packet we want to inline. + */ +static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq, + void *pos) +{ + u64 *p; + int left = (void *)tq->stat - pos; + + if (likely(skb->len <= left)) { + if (likely(!skb->data_len)) + skb_copy_from_linear_data(skb, pos, skb->len); + else + skb_copy_bits(skb, 0, pos, skb->len); + pos += skb->len; + } else { + skb_copy_bits(skb, 0, pos, left); + skb_copy_bits(skb, left, tq->desc, skb->len - left); + pos = (void *)tq->desc + (skb->len - left); + } + + /* 0-pad to multiple of 16 */ + p = PTR_ALIGN(pos, 8); + if ((uintptr_t)p & 8) + *p = 0; +} + +/* + * Figure out what HW csum a packet wants and return the appropriate control + * bits. + */ +static u64 hwcsum(const struct sk_buff *skb) +{ + int csum_type; + const struct iphdr *iph = ip_hdr(skb); + + if (iph->version == 4) { + if (iph->protocol == IPPROTO_TCP) + csum_type = TX_CSUM_TCPIP; + else if (iph->protocol == IPPROTO_UDP) + csum_type = TX_CSUM_UDPIP; + else { +nocsum: + /* + * unknown protocol, disable HW csum + * and hope a bad packet is detected + */ + return TXPKT_L4CSUM_DIS; + } + } else { + /* + * this doesn't work with extension headers + */ + const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph; + + if (ip6h->nexthdr == IPPROTO_TCP) + csum_type = TX_CSUM_TCPIP6; + else if (ip6h->nexthdr == IPPROTO_UDP) + csum_type = TX_CSUM_UDPIP6; + else + goto nocsum; + } + + if (likely(csum_type >= TX_CSUM_TCPIP)) + return TXPKT_CSUM_TYPE(csum_type) | + TXPKT_IPHDR_LEN(skb_network_header_len(skb)) | + TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN); + else { + int start = skb_transport_offset(skb); + + return TXPKT_CSUM_TYPE(csum_type) | + TXPKT_CSUM_START(start) | + TXPKT_CSUM_LOC(start + skb->csum_offset); + } +} + +/* + * Stop an Ethernet TX queue and record that state change. + */ +static void txq_stop(struct sge_eth_txq *txq) +{ + netif_tx_stop_queue(txq->txq); + txq->q.stops++; +} + +/* + * Advance our software state for a TX queue by adding n in use descriptors. + */ +static inline void txq_advance(struct sge_txq *tq, unsigned int n) +{ + tq->in_use += n; + tq->pidx += n; + if (tq->pidx >= tq->size) + tq->pidx -= tq->size; +} + +/** + * t4vf_eth_xmit - add a packet to an Ethernet TX queue + * @skb: the packet + * @dev: the egress net device + * + * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled. + */ +int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) +{ + u32 wr_mid; + u64 cntrl, *end; + int qidx, credits; + unsigned int flits, ndesc; + struct adapter *adapter; + struct sge_eth_txq *txq; + const struct port_info *pi; + struct fw_eth_tx_pkt_vm_wr *wr; + struct cpl_tx_pkt_core *cpl; + const struct skb_shared_info *ssi; + dma_addr_t addr[MAX_SKB_FRAGS + 1]; + const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) + + sizeof(wr->ethmacsrc) + + sizeof(wr->ethtype) + + sizeof(wr->vlantci)); + + /* + * The chip minimum packet length is 10 octets but the firmware + * command that we are using requires that we copy the Ethernet header + * (including the VLAN tag) into the header so we reject anything + * smaller than that ... + */ + if (unlikely(skb->len < fw_hdr_copy_len)) + goto out_free; + + /* + * Figure out which TX Queue we're going to use. + */ + pi = netdev_priv(dev); + adapter = pi->adapter; + qidx = skb_get_queue_mapping(skb); + BUG_ON(qidx >= pi->nqsets); + txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; + + /* + * Take this opportunity to reclaim any TX Descriptors whose DMA + * transfers have completed. + */ + reclaim_completed_tx(adapter, &txq->q, true); + + /* + * Calculate the number of flits and TX Descriptors we're going to + * need along with how many TX Descriptors will be left over after + * we inject our Work Request. + */ + flits = calc_tx_flits(skb); + ndesc = flits_to_desc(flits); + credits = txq_avail(&txq->q) - ndesc; + + if (unlikely(credits < 0)) { + /* + * Not enough room for this packet's Work Request. Stop the + * TX Queue and return a "busy" condition. The queue will get + * started later on when the firmware informs us that space + * has opened up. + */ + txq_stop(txq); + dev_err(adapter->pdev_dev, + "%s: TX ring %u full while queue awake!\n", + dev->name, qidx); + return NETDEV_TX_BUSY; + } + + if (!is_eth_imm(skb) && + unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) { + /* + * We need to map the skb into PCI DMA space (because it can't + * be in-lined directly into the Work Request) and the mapping + * operation failed. Record the error and drop the packet. + */ + txq->mapping_err++; + goto out_free; + } + + wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); + if (unlikely(credits < ETHTXQ_STOP_THRES)) { + /* + * After we're done injecting the Work Request for this + * packet, we'll be below our "stop threshold" so stop the TX + * Queue now and schedule a request for an SGE Egress Queue + * Update message. The queue will get started later on when + * the firmware processes this Work Request and sends us an + * Egress Queue Status Update message indicating that space + * has opened up. + */ + txq_stop(txq); + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; + } + + /* + * Start filling in our Work Request. Note that we do _not_ handle + * the WR Header wrapping around the TX Descriptor Ring. If our + * maximum header size ever exceeds one TX Descriptor, we'll need to + * do something else here. + */ + BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); + wr = (void *)&txq->q.desc[txq->q.pidx]; + wr->equiq_to_len16 = cpu_to_be32(wr_mid); + wr->r3[0] = cpu_to_be32(0); + wr->r3[1] = cpu_to_be32(0); + skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); + end = (u64 *)wr + flits; + + /* + * If this is a Large Send Offload packet we'll put in an LSO CPL + * message with an encapsulated TX Packet CPL message. Otherwise we + * just use a TX Packet CPL message. + */ + ssi = skb_shinfo(skb); + if (ssi->gso_size) { + struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); + bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; + int l3hdr_len = skb_network_header_len(skb); + int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; + + wr->op_immdlen = + cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | + FW_WR_IMMDLEN_V(sizeof(*lso) + + sizeof(*cpl))); + /* + * Fill in the LSO CPL message. + */ + lso->lso_ctrl = + cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) | + LSO_FIRST_SLICE | + LSO_LAST_SLICE | + LSO_IPV6(v6) | + LSO_ETHHDR_LEN(eth_xtra_len/4) | + LSO_IPHDR_LEN(l3hdr_len/4) | + LSO_TCPHDR_LEN(tcp_hdr(skb)->doff)); + lso->ipid_ofst = cpu_to_be16(0); + lso->mss = cpu_to_be16(ssi->gso_size); + lso->seqno_offset = cpu_to_be32(0); + if (is_t4(adapter->params.chip)) + lso->len = cpu_to_be32(skb->len); + else + lso->len = cpu_to_be32(LSO_T5_XFER_SIZE(skb->len)); + + /* + * Set up TX Packet CPL pointer, control word and perform + * accounting. + */ + cpl = (void *)(lso + 1); + cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | + TXPKT_IPHDR_LEN(l3hdr_len) | + TXPKT_ETHHDR_LEN(eth_xtra_len)); + txq->tso++; + txq->tx_cso += ssi->gso_segs; + } else { + int len; + + len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl); + wr->op_immdlen = + cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) | + FW_WR_IMMDLEN_V(len)); + + /* + * Set up TX Packet CPL pointer, control word and perform + * accounting. + */ + cpl = (void *)(wr + 1); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; + txq->tx_cso++; + } else + cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; + } + + /* + * If there's a VLAN tag present, add that to the list of things to + * do in this Work Request. + */ + if (skb_vlan_tag_present(skb)) { + txq->vlan_ins++; + cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb)); + } + + /* + * Fill in the TX Packet CPL message header. + */ + cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) | + TXPKT_INTF(pi->port_id) | + TXPKT_PF(0)); + cpl->pack = cpu_to_be16(0); + cpl->len = cpu_to_be16(skb->len); + cpl->ctrl1 = cpu_to_be64(cntrl); + +#ifdef T4_TRACE + T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7], + "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u", + ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags); +#endif + + /* + * Fill in the body of the TX Packet CPL message with either in-lined + * data or a Scatter/Gather List. + */ + if (is_eth_imm(skb)) { + /* + * In-line the packet's data and free the skb since we don't + * need it any longer. + */ + inline_tx_skb(skb, &txq->q, cpl + 1); + dev_consume_skb_any(skb); + } else { + /* + * Write the skb's Scatter/Gather list into the TX Packet CPL + * message and retain a pointer to the skb so we can free it + * later when its DMA completes. (We store the skb pointer + * in the Software Descriptor corresponding to the last TX + * Descriptor used by the Work Request.) + * + * The retained skb will be freed when the corresponding TX + * Descriptors are reclaimed after their DMAs complete. + * However, this could take quite a while since, in general, + * the hardware is set up to be lazy about sending DMA + * completion notifications to us and we mostly perform TX + * reclaims in the transmit routine. + * + * This is good for performamce but means that we rely on new + * TX packets arriving to run the destructors of completed + * packets, which open up space in their sockets' send queues. + * Sometimes we do not get such new packets causing TX to + * stall. A single UDP transmitter is a good example of this + * situation. We have a clean up timer that periodically + * reclaims completed packets but it doesn't run often enough + * (nor do we want it to) to prevent lengthy stalls. A + * solution to this problem is to run the destructor early, + * after the packet is queued but before it's DMAd. A con is + * that we lie to socket memory accounting, but the amount of + * extra memory is reasonable (limited by the number of TX + * descriptors), the packets do actually get freed quickly by + * new packets almost always, and for protocols like TCP that + * wait for acks to really free up the data the extra memory + * is even less. On the positive side we run the destructors + * on the sending CPU rather than on a potentially different + * completing CPU, usually a good thing. + * + * Run the destructor before telling the DMA engine about the + * packet to make sure it doesn't complete and get freed + * prematurely. + */ + struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1); + struct sge_txq *tq = &txq->q; + int last_desc; + + /* + * If the Work Request header was an exact multiple of our TX + * Descriptor length, then it's possible that the starting SGL + * pointer lines up exactly with the end of our TX Descriptor + * ring. If that's the case, wrap around to the beginning + * here ... + */ + if (unlikely((void *)sgl == (void *)tq->stat)) { + sgl = (void *)tq->desc; + end = ((void *)tq->desc + ((void *)end - (void *)tq->stat)); + } + + write_sgl(skb, tq, sgl, end, 0, addr); + skb_orphan(skb); + + last_desc = tq->pidx + ndesc - 1; + if (last_desc >= tq->size) + last_desc -= tq->size; + tq->sdesc[last_desc].skb = skb; + tq->sdesc[last_desc].sgl = sgl; + } + + /* + * Advance our internal TX Queue state, tell the hardware about + * the new TX descriptors and return success. + */ + txq_advance(&txq->q, ndesc); + dev->trans_start = jiffies; + ring_tx_db(adapter, &txq->q, ndesc); + return NETDEV_TX_OK; + +out_free: + /* + * An error of some sort happened. Free the TX skb and tell the + * OS that we've "dealt" with the packet ... + */ + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/** + * copy_frags - copy fragments from gather list into skb_shared_info + * @skb: destination skb + * @gl: source internal packet gather list + * @offset: packet start offset in first page + * + * Copy an internal packet gather list into a Linux skb_shared_info + * structure. + */ +static inline void copy_frags(struct sk_buff *skb, + const struct pkt_gl *gl, + unsigned int offset) +{ + int i; + + /* usually there's just one frag */ + __skb_fill_page_desc(skb, 0, gl->frags[0].page, + gl->frags[0].offset + offset, + gl->frags[0].size - offset); + skb_shinfo(skb)->nr_frags = gl->nfrags; + for (i = 1; i < gl->nfrags; i++) + __skb_fill_page_desc(skb, i, gl->frags[i].page, + gl->frags[i].offset, + gl->frags[i].size); + + /* get a reference to the last page, we don't own it */ + get_page(gl->frags[gl->nfrags - 1].page); +} + +/** + * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list + * @gl: the gather list + * @skb_len: size of sk_buff main body if it carries fragments + * @pull_len: amount of data to move to the sk_buff's main body + * + * Builds an sk_buff from the given packet gather list. Returns the + * sk_buff or %NULL if sk_buff allocation failed. + */ +static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl, + unsigned int skb_len, + unsigned int pull_len) +{ + struct sk_buff *skb; + + /* + * If the ingress packet is small enough, allocate an skb large enough + * for all of the data and copy it inline. Otherwise, allocate an skb + * with enough room to pull in the header and reference the rest of + * the data via the skb fragment list. + * + * Below we rely on RX_COPY_THRES being less than the smallest Rx + * buff! size, which is expected since buffers are at least + * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one + * fragment. + */ + if (gl->tot_len <= RX_COPY_THRES) { + /* small packets have only one fragment */ + skb = alloc_skb(gl->tot_len, GFP_ATOMIC); + if (unlikely(!skb)) + goto out; + __skb_put(skb, gl->tot_len); + skb_copy_to_linear_data(skb, gl->va, gl->tot_len); + } else { + skb = alloc_skb(skb_len, GFP_ATOMIC); + if (unlikely(!skb)) + goto out; + __skb_put(skb, pull_len); + skb_copy_to_linear_data(skb, gl->va, pull_len); + + copy_frags(skb, gl, pull_len); + skb->len = gl->tot_len; + skb->data_len = skb->len - pull_len; + skb->truesize += skb->data_len; + } + +out: + return skb; +} + +/** + * t4vf_pktgl_free - free a packet gather list + * @gl: the gather list + * + * Releases the pages of a packet gather list. We do not own the last + * page on the list and do not free it. + */ +static void t4vf_pktgl_free(const struct pkt_gl *gl) +{ + int frag; + + frag = gl->nfrags - 1; + while (frag--) + put_page(gl->frags[frag].page); +} + +/** + * do_gro - perform Generic Receive Offload ingress packet processing + * @rxq: ingress RX Ethernet Queue + * @gl: gather list for ingress packet + * @pkt: CPL header for last packet fragment + * + * Perform Generic Receive Offload (GRO) ingress packet processing. + * We use the standard Linux GRO interfaces for this. + */ +static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, + const struct cpl_rx_pkt *pkt) +{ + struct adapter *adapter = rxq->rspq.adapter; + struct sge *s = &adapter->sge; + int ret; + struct sk_buff *skb; + + skb = napi_get_frags(&rxq->rspq.napi); + if (unlikely(!skb)) { + t4vf_pktgl_free(gl); + rxq->stats.rx_drops++; + return; + } + + copy_frags(skb, gl, s->pktshift); + skb->len = gl->tot_len - s->pktshift; + skb->data_len = skb->len; + skb->truesize += skb->data_len; + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_record_rx_queue(skb, rxq->rspq.idx); + + if (pkt->vlan_ex) { + __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q), + be16_to_cpu(pkt->vlan)); + rxq->stats.vlan_ex++; + } + ret = napi_gro_frags(&rxq->rspq.napi); + + if (ret == GRO_HELD) + rxq->stats.lro_pkts++; + else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) + rxq->stats.lro_merged++; + rxq->stats.pkts++; + rxq->stats.rx_cso++; +} + +/** + * t4vf_ethrx_handler - process an ingress ethernet packet + * @rspq: the response queue that received the packet + * @rsp: the response queue descriptor holding the RX_PKT message + * @gl: the gather list of packet fragments + * + * Process an ingress ethernet packet and deliver it to the stack. + */ +int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, + const struct pkt_gl *gl) +{ + struct sk_buff *skb; + const struct cpl_rx_pkt *pkt = (void *)rsp; + bool csum_ok = pkt->csum_calc && !pkt->err_vec && + (rspq->netdev->features & NETIF_F_RXCSUM); + struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); + struct adapter *adapter = rspq->adapter; + struct sge *s = &adapter->sge; + + /* + * If this is a good TCP packet and we have Generic Receive Offload + * enabled, handle the packet in the GRO path. + */ + if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) && + (rspq->netdev->features & NETIF_F_GRO) && csum_ok && + !pkt->ip_frag) { + do_gro(rxq, gl, pkt); + return 0; + } + + /* + * Convert the Packet Gather List into an skb. + */ + skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN); + if (unlikely(!skb)) { + t4vf_pktgl_free(gl); + rxq->stats.rx_drops++; + return 0; + } + __skb_pull(skb, s->pktshift); + skb->protocol = eth_type_trans(skb, rspq->netdev); + skb_record_rx_queue(skb, rspq->idx); + rxq->stats.pkts++; + + if (csum_ok && !pkt->err_vec && + (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) { + if (!pkt->ip_frag) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else { + __sum16 c = (__force __sum16)pkt->csum; + skb->csum = csum_unfold(c); + skb->ip_summed = CHECKSUM_COMPLETE; + } + rxq->stats.rx_cso++; + } else + skb_checksum_none_assert(skb); + + if (pkt->vlan_ex) { + rxq->stats.vlan_ex++; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan)); + } + + netif_receive_skb(skb); + + return 0; +} + +/** + * is_new_response - check if a response is newly written + * @rc: the response control descriptor + * @rspq: the response queue + * + * Returns true if a response descriptor contains a yet unprocessed + * response. + */ +static inline bool is_new_response(const struct rsp_ctrl *rc, + const struct sge_rspq *rspq) +{ + return RSPD_GEN(rc->type_gen) == rspq->gen; +} + +/** + * restore_rx_bufs - put back a packet's RX buffers + * @gl: the packet gather list + * @fl: the SGE Free List + * @nfrags: how many fragments in @si + * + * Called when we find out that the current packet, @si, can't be + * processed right away for some reason. This is a very rare event and + * there's no effort to make this suspension/resumption process + * particularly efficient. + * + * We implement the suspension by putting all of the RX buffers associated + * with the current packet back on the original Free List. The buffers + * have already been unmapped and are left unmapped, we mark them as + * unmapped in order to prevent further unmapping attempts. (Effectively + * this function undoes the series of @unmap_rx_buf calls which were done + * to create the current packet's gather list.) This leaves us ready to + * restart processing of the packet the next time we start processing the + * RX Queue ... + */ +static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl, + int frags) +{ + struct rx_sw_desc *sdesc; + + while (frags--) { + if (fl->cidx == 0) + fl->cidx = fl->size - 1; + else + fl->cidx--; + sdesc = &fl->sdesc[fl->cidx]; + sdesc->page = gl->frags[frags].page; + sdesc->dma_addr |= RX_UNMAPPED_BUF; + fl->avail++; + } +} + +/** + * rspq_next - advance to the next entry in a response queue + * @rspq: the queue + * + * Updates the state of a response queue to advance it to the next entry. + */ +static inline void rspq_next(struct sge_rspq *rspq) +{ + rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len; + if (unlikely(++rspq->cidx == rspq->size)) { + rspq->cidx = 0; + rspq->gen ^= 1; + rspq->cur_desc = rspq->desc; + } +} + +/** + * process_responses - process responses from an SGE response queue + * @rspq: the ingress response queue to process + * @budget: how many responses can be processed in this round + * + * Process responses from a Scatter Gather Engine response queue up to + * the supplied budget. Responses include received packets as well as + * control messages from firmware or hardware. + * + * Additionally choose the interrupt holdoff time for the next interrupt + * on this queue. If the system is under memory shortage use a fairly + * long delay to help recovery. + */ +static int process_responses(struct sge_rspq *rspq, int budget) +{ + struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); + struct adapter *adapter = rspq->adapter; + struct sge *s = &adapter->sge; + int budget_left = budget; + + while (likely(budget_left)) { + int ret, rsp_type; + const struct rsp_ctrl *rc; + + rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc)); + if (!is_new_response(rc, rspq)) + break; + + /* + * Figure out what kind of response we've received from the + * SGE. + */ + dma_rmb(); + rsp_type = RSPD_TYPE(rc->type_gen); + if (likely(rsp_type == RSP_TYPE_FLBUF)) { + struct page_frag *fp; + struct pkt_gl gl; + const struct rx_sw_desc *sdesc; + u32 bufsz, frag; + u32 len = be32_to_cpu(rc->pldbuflen_qid); + + /* + * If we get a "new buffer" message from the SGE we + * need to move on to the next Free List buffer. + */ + if (len & RSPD_NEWBUF) { + /* + * We get one "new buffer" message when we + * first start up a queue so we need to ignore + * it when our offset into the buffer is 0. + */ + if (likely(rspq->offset > 0)) { + free_rx_bufs(rspq->adapter, &rxq->fl, + 1); + rspq->offset = 0; + } + len = RSPD_LEN(len); + } + gl.tot_len = len; + + /* + * Gather packet fragments. + */ + for (frag = 0, fp = gl.frags; /**/; frag++, fp++) { + BUG_ON(frag >= MAX_SKB_FRAGS); + BUG_ON(rxq->fl.avail == 0); + sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; + bufsz = get_buf_size(adapter, sdesc); + fp->page = sdesc->page; + fp->offset = rspq->offset; + fp->size = min(bufsz, len); + len -= fp->size; + if (!len) + break; + unmap_rx_buf(rspq->adapter, &rxq->fl); + } + gl.nfrags = frag+1; + + /* + * Last buffer remains mapped so explicitly make it + * coherent for CPU access and start preloading first + * cache line ... + */ + dma_sync_single_for_cpu(rspq->adapter->pdev_dev, + get_buf_addr(sdesc), + fp->size, DMA_FROM_DEVICE); + gl.va = (page_address(gl.frags[0].page) + + gl.frags[0].offset); + prefetch(gl.va); + + /* + * Hand the new ingress packet to the handler for + * this Response Queue. + */ + ret = rspq->handler(rspq, rspq->cur_desc, &gl); + if (likely(ret == 0)) + rspq->offset += ALIGN(fp->size, s->fl_align); + else + restore_rx_bufs(&gl, &rxq->fl, frag); + } else if (likely(rsp_type == RSP_TYPE_CPL)) { + ret = rspq->handler(rspq, rspq->cur_desc, NULL); + } else { + WARN_ON(rsp_type > RSP_TYPE_CPL); + ret = 0; + } + + if (unlikely(ret)) { + /* + * Couldn't process descriptor, back off for recovery. + * We use the SGE's last timer which has the longest + * interrupt coalescing value ... + */ + const int NOMEM_TIMER_IDX = SGE_NTIMERS-1; + rspq->next_intr_params = + QINTR_TIMER_IDX(NOMEM_TIMER_IDX); + break; + } + + rspq_next(rspq); + budget_left--; + } + + /* + * If this is a Response Queue with an associated Free List and + * at least two Egress Queue units available in the Free List + * for new buffer pointers, refill the Free List. + */ + if (rspq->offset >= 0 && + rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT) + __refill_fl(rspq->adapter, &rxq->fl); + return budget - budget_left; +} + +/** + * napi_rx_handler - the NAPI handler for RX processing + * @napi: the napi instance + * @budget: how many packets we can process in this round + * + * Handler for new data events when using NAPI. This does not need any + * locking or protection from interrupts as data interrupts are off at + * this point and other adapter interrupts do not interfere (the latter + * in not a concern at all with MSI-X as non-data interrupts then have + * a separate handler). + */ +static int napi_rx_handler(struct napi_struct *napi, int budget) +{ + unsigned int intr_params; + struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi); + int work_done = process_responses(rspq, budget); + u32 val; + + if (likely(work_done < budget)) { + napi_complete(napi); + intr_params = rspq->next_intr_params; + rspq->next_intr_params = rspq->intr_params; + } else + intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX); + + if (unlikely(work_done == 0)) + rspq->unhandled_irqs++; + + val = CIDXINC_V(work_done) | SEINTARM_V(intr_params); + if (is_t4(rspq->adapter->params.chip)) { + t4_write_reg(rspq->adapter, + T4VF_SGE_BASE_ADDR + SGE_VF_GTS, + val | INGRESSQID_V((u32)rspq->cntxt_id)); + } else { + writel(val | INGRESSQID_V(rspq->bar2_qid), + rspq->bar2_addr + SGE_UDB_GTS); + wmb(); + } + return work_done; +} + +/* + * The MSI-X interrupt handler for an SGE response queue for the NAPI case + * (i.e., response queue serviced by NAPI polling). + */ +irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie) +{ + struct sge_rspq *rspq = cookie; + + napi_schedule(&rspq->napi); + return IRQ_HANDLED; +} + +/* + * Process the indirect interrupt entries in the interrupt queue and kick off + * NAPI for each queue that has generated an entry. + */ +static unsigned int process_intrq(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + struct sge_rspq *intrq = &s->intrq; + unsigned int work_done; + u32 val; + + spin_lock(&adapter->sge.intrq_lock); + for (work_done = 0; ; work_done++) { + const struct rsp_ctrl *rc; + unsigned int qid, iq_idx; + struct sge_rspq *rspq; + + /* + * Grab the next response from the interrupt queue and bail + * out if it's not a new response. + */ + rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc)); + if (!is_new_response(rc, intrq)) + break; + + /* + * If the response isn't a forwarded interrupt message issue a + * error and go on to the next response message. This should + * never happen ... + */ + dma_rmb(); + if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) { + dev_err(adapter->pdev_dev, + "Unexpected INTRQ response type %d\n", + RSPD_TYPE(rc->type_gen)); + continue; + } + + /* + * Extract the Queue ID from the interrupt message and perform + * sanity checking to make sure it really refers to one of our + * Ingress Queues which is active and matches the queue's ID. + * None of these error conditions should ever happen so we may + * want to either make them fatal and/or conditionalized under + * DEBUG. + */ + qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid)); + iq_idx = IQ_IDX(s, qid); + if (unlikely(iq_idx >= MAX_INGQ)) { + dev_err(adapter->pdev_dev, + "Ingress QID %d out of range\n", qid); + continue; + } + rspq = s->ingr_map[iq_idx]; + if (unlikely(rspq == NULL)) { + dev_err(adapter->pdev_dev, + "Ingress QID %d RSPQ=NULL\n", qid); + continue; + } + if (unlikely(rspq->abs_id != qid)) { + dev_err(adapter->pdev_dev, + "Ingress QID %d refers to RSPQ %d\n", + qid, rspq->abs_id); + continue; + } + + /* + * Schedule NAPI processing on the indicated Response Queue + * and move on to the next entry in the Forwarded Interrupt + * Queue. + */ + napi_schedule(&rspq->napi); + rspq_next(intrq); + } + + val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params); + if (is_t4(adapter->params.chip)) + t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, + val | INGRESSQID_V(intrq->cntxt_id)); + else { + writel(val | INGRESSQID_V(intrq->bar2_qid), + intrq->bar2_addr + SGE_UDB_GTS); + wmb(); + } + + spin_unlock(&adapter->sge.intrq_lock); + + return work_done; +} + +/* + * The MSI interrupt handler handles data events from SGE response queues as + * well as error and other async events as they all use the same MSI vector. + */ +static irqreturn_t t4vf_intr_msi(int irq, void *cookie) +{ + struct adapter *adapter = cookie; + + process_intrq(adapter); + return IRQ_HANDLED; +} + +/** + * t4vf_intr_handler - select the top-level interrupt handler + * @adapter: the adapter + * + * Selects the top-level interrupt handler based on the type of interrupts + * (MSI-X or MSI). + */ +irq_handler_t t4vf_intr_handler(struct adapter *adapter) +{ + BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0); + if (adapter->flags & USING_MSIX) + return t4vf_sge_intr_msix; + else + return t4vf_intr_msi; +} + +/** + * sge_rx_timer_cb - perform periodic maintenance of SGE RX queues + * @data: the adapter + * + * Runs periodically from a timer to perform maintenance of SGE RX queues. + * + * a) Replenishes RX queues that have run out due to memory shortage. + * Normally new RX buffers are added when existing ones are consumed but + * when out of memory a queue can become empty. We schedule NAPI to do + * the actual refill. + */ +static void sge_rx_timer_cb(unsigned long data) +{ + struct adapter *adapter = (struct adapter *)data; + struct sge *s = &adapter->sge; + unsigned int i; + + /* + * Scan the "Starving Free Lists" flag array looking for any Free + * Lists in need of more free buffers. If we find one and it's not + * being actively polled, then bump its "starving" counter and attempt + * to refill it. If we're successful in adding enough buffers to push + * the Free List over the starving threshold, then we can clear its + * "starving" status. + */ + for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) { + unsigned long m; + + for (m = s->starving_fl[i]; m; m &= m - 1) { + unsigned int id = __ffs(m) + i * BITS_PER_LONG; + struct sge_fl *fl = s->egr_map[id]; + + clear_bit(id, s->starving_fl); + smp_mb__after_atomic(); + + /* + * Since we are accessing fl without a lock there's a + * small probability of a false positive where we + * schedule napi but the FL is no longer starving. + * No biggie. + */ + if (fl_starving(adapter, fl)) { + struct sge_eth_rxq *rxq; + + rxq = container_of(fl, struct sge_eth_rxq, fl); + if (napi_reschedule(&rxq->rspq.napi)) + fl->starving++; + else + set_bit(id, s->starving_fl); + } + } + } + + /* + * Reschedule the next scan for starving Free Lists ... + */ + mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); +} + +/** + * sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues + * @data: the adapter + * + * Runs periodically from a timer to perform maintenance of SGE TX queues. + * + * b) Reclaims completed Tx packets for the Ethernet queues. Normally + * packets are cleaned up by new Tx packets, this timer cleans up packets + * when no new packets are being submitted. This is essential for pktgen, + * at least. + */ +static void sge_tx_timer_cb(unsigned long data) +{ + struct adapter *adapter = (struct adapter *)data; + struct sge *s = &adapter->sge; + unsigned int i, budget; + + budget = MAX_TIMER_TX_RECLAIM; + i = s->ethtxq_rover; + do { + struct sge_eth_txq *txq = &s->ethtxq[i]; + + if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) { + int avail = reclaimable(&txq->q); + + if (avail > budget) + avail = budget; + + free_tx_desc(adapter, &txq->q, avail, true); + txq->q.in_use -= avail; + __netif_tx_unlock(txq->txq); + + budget -= avail; + if (!budget) + break; + } + + i++; + if (i >= s->ethqsets) + i = 0; + } while (i != s->ethtxq_rover); + s->ethtxq_rover = i; + + /* + * If we found too many reclaimable packets schedule a timer in the + * near future to continue where we left off. Otherwise the next timer + * will be at its normal interval. + */ + mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); +} + +/** + * bar2_address - return the BAR2 address for an SGE Queue's Registers + * @adapter: the adapter + * @qid: the SGE Queue ID + * @qtype: the SGE Queue Type (Egress or Ingress) + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues + * + * Returns the BAR2 address for the SGE Queue Registers associated with + * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also + * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE + * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID" + * Registers are supported (e.g. the Write Combining Doorbell Buffer). + */ +static void __iomem *bar2_address(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + unsigned int *pbar2_qid) +{ + u64 bar2_qoffset; + int ret; + + ret = t4_bar2_sge_qregs(adapter, qid, qtype, + &bar2_qoffset, pbar2_qid); + if (ret) + return NULL; + + return adapter->bar2 + bar2_qoffset; +} + +/** + * t4vf_sge_alloc_rxq - allocate an SGE RX Queue + * @adapter: the adapter + * @rspq: pointer to to the new rxq's Response Queue to be filled in + * @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue + * @dev: the network device associated with the new rspq + * @intr_dest: MSI-X vector index (overriden in MSI mode) + * @fl: pointer to the new rxq's Free List to be filled in + * @hnd: the interrupt handler to invoke for the rspq + */ +int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, + bool iqasynch, struct net_device *dev, + int intr_dest, + struct sge_fl *fl, rspq_handler_t hnd) +{ + struct sge *s = &adapter->sge; + struct port_info *pi = netdev_priv(dev); + struct fw_iq_cmd cmd, rpl; + int ret, iqandst, flsz = 0; + + /* + * If we're using MSI interrupts and we're not initializing the + * Forwarded Interrupt Queue itself, then set up this queue for + * indirect interrupts to the Forwarded Interrupt Queue. Obviously + * the Forwarded Interrupt Queue must be set up before any other + * ingress queue ... + */ + if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) { + iqandst = SGE_INTRDST_IQ; + intr_dest = adapter->sge.intrq.abs_id; + } else + iqandst = SGE_INTRDST_PCI; + + /* + * Allocate the hardware ring for the Response Queue. The size needs + * to be a multiple of 16 which includes the mandatory status entry + * (regardless of whether the Status Page capabilities are enabled or + * not). + */ + rspq->size = roundup(rspq->size, 16); + rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len, + 0, &rspq->phys_addr, NULL, 0); + if (!rspq->desc) + return -ENOMEM; + + /* + * Fill in the Ingress Queue Command. Note: Ideally this code would + * be in t4vf_hw.c but there are so many parameters and dependencies + * on our Linux SGE state that we would end up having to pass tons of + * parameters. We'll have to think about how this might be migrated + * into OS-independent common code ... + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_F); + cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC_F | + FW_IQ_CMD_IQSTART_F | + FW_LEN16(cmd)); + cmd.type_to_iqandstindex = + cpu_to_be32(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | + FW_IQ_CMD_IQASYNCH_V(iqasynch) | + FW_IQ_CMD_VIID_V(pi->viid) | + FW_IQ_CMD_IQANDST_V(iqandst) | + FW_IQ_CMD_IQANUS_V(1) | + FW_IQ_CMD_IQANUD_V(SGE_UPDATEDEL_INTR) | + FW_IQ_CMD_IQANDSTINDEX_V(intr_dest)); + cmd.iqdroprss_to_iqesize = + cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi->port_id) | + FW_IQ_CMD_IQGTSMODE_F | + FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) | + FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4)); + cmd.iqsize = cpu_to_be16(rspq->size); + cmd.iqaddr = cpu_to_be64(rspq->phys_addr); + + if (fl) { + /* + * Allocate the ring for the hardware free list (with space + * for its status page) along with the associated software + * descriptor ring. The free list size needs to be a multiple + * of the Egress Queue Unit. + */ + fl->size = roundup(fl->size, FL_PER_EQ_UNIT); + fl->desc = alloc_ring(adapter->pdev_dev, fl->size, + sizeof(__be64), sizeof(struct rx_sw_desc), + &fl->addr, &fl->sdesc, s->stat_len); + if (!fl->desc) { + ret = -ENOMEM; + goto err; + } + + /* + * Calculate the size of the hardware free list ring plus + * Status Page (which the SGE will place after the end of the + * free list ring) in Egress Queue Units. + */ + flsz = (fl->size / FL_PER_EQ_UNIT + + s->stat_len / EQ_UNIT); + + /* + * Fill in all the relevant firmware Ingress Queue Command + * fields for the free list. + */ + cmd.iqns_to_fl0congen = + cpu_to_be32( + FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) | + FW_IQ_CMD_FL0PACKEN_F | + FW_IQ_CMD_FL0PADEN_F); + cmd.fl0dcaen_to_fl0cidxfthresh = + cpu_to_be16( + FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) | + FW_IQ_CMD_FL0FBMAX_V(SGE_FETCHBURSTMAX_512B)); + cmd.fl0size = cpu_to_be16(flsz); + cmd.fl0addr = cpu_to_be64(fl->addr); + } + + /* + * Issue the firmware Ingress Queue Command and extract the results if + * it completes successfully. + */ + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (ret) + goto err; + + netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64); + rspq->cur_desc = rspq->desc; + rspq->cidx = 0; + rspq->gen = 1; + rspq->next_intr_params = rspq->intr_params; + rspq->cntxt_id = be16_to_cpu(rpl.iqid); + rspq->bar2_addr = bar2_address(adapter, + rspq->cntxt_id, + T4_BAR2_QTYPE_INGRESS, + &rspq->bar2_qid); + rspq->abs_id = be16_to_cpu(rpl.physiqid); + rspq->size--; /* subtract status entry */ + rspq->adapter = adapter; + rspq->netdev = dev; + rspq->handler = hnd; + + /* set offset to -1 to distinguish ingress queues without FL */ + rspq->offset = fl ? 0 : -1; + + if (fl) { + fl->cntxt_id = be16_to_cpu(rpl.fl0id); + fl->avail = 0; + fl->pend_cred = 0; + fl->pidx = 0; + fl->cidx = 0; + fl->alloc_failed = 0; + fl->large_alloc_failed = 0; + fl->starving = 0; + + /* Note, we must initialize the BAR2 Free List User Doorbell + * information before refilling the Free List! + */ + fl->bar2_addr = bar2_address(adapter, + fl->cntxt_id, + T4_BAR2_QTYPE_EGRESS, + &fl->bar2_qid); + + refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL); + } + + return 0; + +err: + /* + * An error occurred. Clean up our partial allocation state and + * return the error. + */ + if (rspq->desc) { + dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len, + rspq->desc, rspq->phys_addr); + rspq->desc = NULL; + } + if (fl && fl->desc) { + kfree(fl->sdesc); + fl->sdesc = NULL; + dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT, + fl->desc, fl->addr); + fl->desc = NULL; + } + return ret; +} + +/** + * t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue + * @adapter: the adapter + * @txq: pointer to the new txq to be filled in + * @devq: the network TX queue associated with the new txq + * @iqid: the relative ingress queue ID to which events relating to + * the new txq should be directed + */ +int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, + struct net_device *dev, struct netdev_queue *devq, + unsigned int iqid) +{ + struct sge *s = &adapter->sge; + int ret, nentries; + struct fw_eq_eth_cmd cmd, rpl; + struct port_info *pi = netdev_priv(dev); + + /* + * Calculate the size of the hardware TX Queue (including the Status + * Page on the end of the TX Queue) in units of TX Descriptors. + */ + nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); + + /* + * Allocate the hardware ring for the TX ring (with space for its + * status page) along with the associated software descriptor ring. + */ + txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, + sizeof(struct tx_desc), + sizeof(struct tx_sw_desc), + &txq->q.phys_addr, &txq->q.sdesc, s->stat_len); + if (!txq->q.desc) + return -ENOMEM; + + /* + * Fill in the Egress Queue Command. Note: As with the direct use of + * the firmware Ingress Queue COmmand above in our RXQ allocation + * routine, ideally, this code would be in t4vf_hw.c. Again, we'll + * have to see if there's some reasonable way to parameterize it + * into the common code ... + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_F); + cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F | + FW_EQ_ETH_CMD_EQSTART_F | + FW_LEN16(cmd)); + cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F | + FW_EQ_ETH_CMD_VIID_V(pi->viid)); + cmd.fetchszm_to_iqid = + cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) | + FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) | + FW_EQ_ETH_CMD_IQID_V(iqid)); + cmd.dcaen_to_eqsize = + cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(SGE_FETCHBURSTMIN_64B) | + FW_EQ_ETH_CMD_FBMAX_V(SGE_FETCHBURSTMAX_512B) | + FW_EQ_ETH_CMD_CIDXFTHRESH_V( + SGE_CIDXFLUSHTHRESH_32) | + FW_EQ_ETH_CMD_EQSIZE_V(nentries)); + cmd.eqaddr = cpu_to_be64(txq->q.phys_addr); + + /* + * Issue the firmware Egress Queue Command and extract the results if + * it completes successfully. + */ + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (ret) { + /* + * The girmware Ingress Queue Command failed for some reason. + * Free up our partial allocation state and return the error. + */ + kfree(txq->q.sdesc); + txq->q.sdesc = NULL; + dma_free_coherent(adapter->pdev_dev, + nentries * sizeof(struct tx_desc), + txq->q.desc, txq->q.phys_addr); + txq->q.desc = NULL; + return ret; + } + + txq->q.in_use = 0; + txq->q.cidx = 0; + txq->q.pidx = 0; + txq->q.stat = (void *)&txq->q.desc[txq->q.size]; + txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd)); + txq->q.bar2_addr = bar2_address(adapter, + txq->q.cntxt_id, + T4_BAR2_QTYPE_EGRESS, + &txq->q.bar2_qid); + txq->q.abs_id = + FW_EQ_ETH_CMD_PHYSEQID_G(be32_to_cpu(rpl.physeqid_pkd)); + txq->txq = devq; + txq->tso = 0; + txq->tx_cso = 0; + txq->vlan_ins = 0; + txq->q.stops = 0; + txq->q.restarts = 0; + txq->mapping_err = 0; + return 0; +} + +/* + * Free the DMA map resources associated with a TX queue. + */ +static void free_txq(struct adapter *adapter, struct sge_txq *tq) +{ + struct sge *s = &adapter->sge; + + dma_free_coherent(adapter->pdev_dev, + tq->size * sizeof(*tq->desc) + s->stat_len, + tq->desc, tq->phys_addr); + tq->cntxt_id = 0; + tq->sdesc = NULL; + tq->desc = NULL; +} + +/* + * Free the resources associated with a response queue (possibly including a + * free list). + */ +static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, + struct sge_fl *fl) +{ + struct sge *s = &adapter->sge; + unsigned int flid = fl ? fl->cntxt_id : 0xffff; + + t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP, + rspq->cntxt_id, flid, 0xffff); + dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len, + rspq->desc, rspq->phys_addr); + netif_napi_del(&rspq->napi); + rspq->netdev = NULL; + rspq->cntxt_id = 0; + rspq->abs_id = 0; + rspq->desc = NULL; + + if (fl) { + free_rx_bufs(adapter, fl, fl->avail); + dma_free_coherent(adapter->pdev_dev, + fl->size * sizeof(*fl->desc) + s->stat_len, + fl->desc, fl->addr); + kfree(fl->sdesc); + fl->sdesc = NULL; + fl->cntxt_id = 0; + fl->desc = NULL; + } +} + +/** + * t4vf_free_sge_resources - free SGE resources + * @adapter: the adapter + * + * Frees resources used by the SGE queue sets. + */ +void t4vf_free_sge_resources(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + struct sge_eth_rxq *rxq = s->ethrxq; + struct sge_eth_txq *txq = s->ethtxq; + struct sge_rspq *evtq = &s->fw_evtq; + struct sge_rspq *intrq = &s->intrq; + int qs; + + for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) { + if (rxq->rspq.desc) + free_rspq_fl(adapter, &rxq->rspq, &rxq->fl); + if (txq->q.desc) { + t4vf_eth_eq_free(adapter, txq->q.cntxt_id); + free_tx_desc(adapter, &txq->q, txq->q.in_use, true); + kfree(txq->q.sdesc); + free_txq(adapter, &txq->q); + } + } + if (evtq->desc) + free_rspq_fl(adapter, evtq, NULL); + if (intrq->desc) + free_rspq_fl(adapter, intrq, NULL); +} + +/** + * t4vf_sge_start - enable SGE operation + * @adapter: the adapter + * + * Start tasklets and timers associated with the DMA engine. + */ +void t4vf_sge_start(struct adapter *adapter) +{ + adapter->sge.ethtxq_rover = 0; + mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); + mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); +} + +/** + * t4vf_sge_stop - disable SGE operation + * @adapter: the adapter + * + * Stop tasklets and timers associated with the DMA engine. Note that + * this is effective only if measures have been taken to disable any HW + * events that may restart them. + */ +void t4vf_sge_stop(struct adapter *adapter) +{ + struct sge *s = &adapter->sge; + + if (s->rx_timer.function) + del_timer_sync(&s->rx_timer); + if (s->tx_timer.function) + del_timer_sync(&s->tx_timer); +} + +/** + * t4vf_sge_init - initialize SGE + * @adapter: the adapter + * + * Performs SGE initialization needed every time after a chip reset. + * We do not initialize any of the queue sets here, instead the driver + * top-level must request those individually. We also do not enable DMA + * here, that should be done after the queues have been set up. + */ +int t4vf_sge_init(struct adapter *adapter) +{ + struct sge_params *sge_params = &adapter->params.sge; + u32 fl0 = sge_params->sge_fl_buffer_size[0]; + u32 fl1 = sge_params->sge_fl_buffer_size[1]; + struct sge *s = &adapter->sge; + unsigned int ingpadboundary, ingpackboundary; + + /* + * Start by vetting the basic SGE parameters which have been set up by + * the Physical Function Driver. Ideally we should be able to deal + * with _any_ configuration. Practice is different ... + */ + if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) { + dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", + fl0, fl1); + return -EINVAL; + } + if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) { + dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); + return -EINVAL; + } + + /* + * Now translate the adapter parameters into our internal forms. + */ + if (fl1) + s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT; + s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F) + ? 128 : 64); + s->pktshift = PKTSHIFT_G(sge_params->sge_control); + + /* T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately. The actual Ingress Packet Data alignment boundary + * within Packed Buffer Mode is the maximum of these two + * specifications. (Note that it makes no real practical sense to + * have the Pading Boudary be larger than the Packing Boundary but you + * could set the chip up that way and, in fact, legacy T4 code would + * end doing this because it would initialize the Padding Boundary and + * leave the Packing Boundary initialized to 0 (16 bytes).) + */ + ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) + + INGPADBOUNDARY_SHIFT_X); + if (is_t4(adapter->params.chip)) { + s->fl_align = ingpadboundary; + } else { + /* T5 has a different interpretation of one of the PCIe Packing + * Boundary values. + */ + ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2); + if (ingpackboundary == INGPACKBOUNDARY_16B_X) + ingpackboundary = 16; + else + ingpackboundary = 1 << (ingpackboundary + + INGPACKBOUNDARY_SHIFT_X); + + s->fl_align = max(ingpadboundary, ingpackboundary); + } + + /* A FL with <= fl_starve_thres buffers is starving and a periodic + * timer will attempt to refill it. This needs to be larger than the + * SGE's Egress Congestion Threshold. If it isn't, then we can get + * stuck waiting for new packets while the SGE is waiting for us to + * give it more Free List entries. (Note that the SGE's Egress + * Congestion Threshold is in units of 2 Free List pointers.) + */ + s->fl_starve_thres + = EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1; + + /* + * Set up tasklet timers. + */ + setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter); + setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter); + + /* + * Initialize Forwarded Interrupt Queue lock. + */ + spin_lock_init(&s->intrq_lock); + + return 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h new file mode 100644 index 000000000..b9debb4f2 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -0,0 +1,326 @@ +/* + * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet + * driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4VF_COMMON_H__ +#define __T4VF_COMMON_H__ + +#include "../cxgb4/t4fw_api.h" + +#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) +#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf) +#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) + +/* All T4 and later chips have their PCI-E Device IDs encoded as 0xVFPP where: + * + * V = "4" for T4; "5" for T5, etc. or + * = "a" for T4 FPGA; "b" for T4 FPGA, etc. + * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs + * PP = adapter product designation + */ +#define CHELSIO_T4 0x4 +#define CHELSIO_T5 0x5 + +enum chip_type { + T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), + T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), + T4_FIRST_REV = T4_A1, + T4_LAST_REV = T4_A2, + + T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), + T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), + T5_FIRST_REV = T5_A0, + T5_LAST_REV = T5_A1, +}; + +/* + * The "len16" field of a Firmware Command Structure ... + */ +#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16) + +/* + * Per-VF statistics. + */ +struct t4vf_port_stats { + /* + * TX statistics. + */ + u64 tx_bcast_bytes; /* broadcast */ + u64 tx_bcast_frames; + u64 tx_mcast_bytes; /* multicast */ + u64 tx_mcast_frames; + u64 tx_ucast_bytes; /* unicast */ + u64 tx_ucast_frames; + u64 tx_drop_frames; /* TX dropped frames */ + u64 tx_offload_bytes; /* offload */ + u64 tx_offload_frames; + + /* + * RX statistics. + */ + u64 rx_bcast_bytes; /* broadcast */ + u64 rx_bcast_frames; + u64 rx_mcast_bytes; /* multicast */ + u64 rx_mcast_frames; + u64 rx_ucast_bytes; + u64 rx_ucast_frames; /* unicast */ + + u64 rx_err_frames; /* RX error frames */ +}; + +/* + * Per-"port" (Virtual Interface) link configuration ... + */ +struct link_config { + unsigned int supported; /* link capabilities */ + unsigned int advertising; /* advertised capabilities */ + unsigned short requested_speed; /* speed user has requested */ + unsigned short speed; /* actual link speed */ + unsigned char requested_fc; /* flow control user has requested */ + unsigned char fc; /* actual link flow control */ + unsigned char autoneg; /* autonegotiating? */ + unsigned char link_ok; /* link up? */ +}; + +enum { + PAUSE_RX = 1 << 0, + PAUSE_TX = 1 << 1, + PAUSE_AUTONEG = 1 << 2 +}; + +/* + * General device parameters ... + */ +struct dev_params { + u32 fwrev; /* firmware version */ + u32 tprev; /* TP Microcode Version */ +}; + +/* + * Scatter Gather Engine parameters. These are almost all determined by the + * Physical Function Driver. We just need to grab them to see within which + * environment we're playing ... + */ +struct sge_params { + u32 sge_control; /* padding, boundaries, lengths, etc. */ + u32 sge_control2; /* T5: more of the same */ + u32 sge_host_page_size; /* PF0-7 page sizes */ + u32 sge_egress_queues_per_page; /* PF0-7 egress queues/page */ + u32 sge_ingress_queues_per_page;/* PF0-7 ingress queues/page */ + u32 sge_vf_hps; /* host page size for our vf */ + u32 sge_vf_eq_qpp; /* egress queues/page for our VF */ + u32 sge_vf_iq_qpp; /* ingress queues/page for our VF */ + u32 sge_fl_buffer_size[16]; /* free list buffer sizes */ + u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */ + u32 sge_congestion_control; /* congestion thresholds, etc. */ + u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */ + u32 sge_timer_value_2_and_3; + u32 sge_timer_value_4_and_5; +}; + +/* + * Vital Product Data parameters. + */ +struct vpd_params { + u32 cclk; /* Core Clock (KHz) */ +}; + +/* + * Global Receive Side Scaling (RSS) parameters in host-native format. + */ +struct rss_params { + unsigned int mode; /* RSS mode */ + union { + struct { + unsigned int synmapen:1; /* SYN Map Enable */ + unsigned int syn4tupenipv6:1; /* enable hashing 4-tuple IPv6 SYNs */ + unsigned int syn2tupenipv6:1; /* enable hashing 2-tuple IPv6 SYNs */ + unsigned int syn4tupenipv4:1; /* enable hashing 4-tuple IPv4 SYNs */ + unsigned int syn2tupenipv4:1; /* enable hashing 2-tuple IPv4 SYNs */ + unsigned int ofdmapen:1; /* Offload Map Enable */ + unsigned int tnlmapen:1; /* Tunnel Map Enable */ + unsigned int tnlalllookup:1; /* Tunnel All Lookup */ + unsigned int hashtoeplitz:1; /* use Toeplitz hash */ + } basicvirtual; + } u; +}; + +/* + * Virtual Interface RSS Configuration in host-native format. + */ +union rss_vi_config { + struct { + u16 defaultq; /* Ingress Queue ID for !tnlalllookup */ + unsigned int ip6fourtupen:1; /* hash 4-tuple IPv6 ingress packets */ + unsigned int ip6twotupen:1; /* hash 2-tuple IPv6 ingress packets */ + unsigned int ip4fourtupen:1; /* hash 4-tuple IPv4 ingress packets */ + unsigned int ip4twotupen:1; /* hash 2-tuple IPv4 ingress packets */ + int udpen; /* hash 4-tuple UDP ingress packets */ + } basicvirtual; +}; + +/* + * Maximum resources provisioned for a PCI VF. + */ +struct vf_resources { + unsigned int nvi; /* N virtual interfaces */ + unsigned int neq; /* N egress Qs */ + unsigned int nethctrl; /* N egress ETH or CTRL Qs */ + unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */ + unsigned int niq; /* N ingress Qs */ + unsigned int tc; /* PCI-E traffic class */ + unsigned int pmask; /* port access rights mask */ + unsigned int nexactf; /* N exact MPS filters */ + unsigned int r_caps; /* read capabilities */ + unsigned int wx_caps; /* write/execute capabilities */ +}; + +/* + * Per-"adapter" (Virtual Function) parameters. + */ +struct adapter_params { + struct dev_params dev; /* general device parameters */ + struct sge_params sge; /* Scatter Gather Engine */ + struct vpd_params vpd; /* Vital Product Data */ + struct rss_params rss; /* Receive Side Scaling */ + struct vf_resources vfres; /* Virtual Function Resource limits */ + enum chip_type chip; /* chip code */ + u8 nports; /* # of Ethernet "ports" */ +}; + +#include "adapter.h" + +#ifndef PCI_VENDOR_ID_CHELSIO +# define PCI_VENDOR_ID_CHELSIO 0x1425 +#endif + +#define for_each_port(adapter, iter) \ + for (iter = 0; iter < (adapter)->params.nports; iter++) + +static inline bool is_10g_port(const struct link_config *lc) +{ + return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; +} + +static inline bool is_x_10g_port(const struct link_config *lc) +{ + return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 || + (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; +} + +static inline unsigned int core_ticks_per_usec(const struct adapter *adapter) +{ + return adapter->params.vpd.cclk / 1000; +} + +static inline unsigned int us_to_core_ticks(const struct adapter *adapter, + unsigned int us) +{ + return (us * adapter->params.vpd.cclk) / 1000; +} + +static inline unsigned int core_ticks_to_us(const struct adapter *adapter, + unsigned int ticks) +{ + return (ticks * 1000) / adapter->params.vpd.cclk; +} + +int t4vf_wr_mbox_core(struct adapter *, const void *, int, void *, bool); + +static inline int t4vf_wr_mbox(struct adapter *adapter, const void *cmd, + int size, void *rpl) +{ + return t4vf_wr_mbox_core(adapter, cmd, size, rpl, true); +} + +static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd, + int size, void *rpl) +{ + return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false); +} + +#define CHELSIO_PCI_ID_VER(dev_id) ((dev_id) >> 12) + +static inline int is_t4(enum chip_type chip) +{ + return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; +} + +int t4vf_wait_dev_ready(struct adapter *); +int t4vf_port_init(struct adapter *, int); + +int t4vf_fw_reset(struct adapter *); +int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); + +enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; +int t4_bar2_sge_qregs(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid); + +int t4vf_get_sge_params(struct adapter *); +int t4vf_get_vpd_params(struct adapter *); +int t4vf_get_dev_params(struct adapter *); +int t4vf_get_rss_glb_config(struct adapter *); +int t4vf_get_vfres(struct adapter *); + +int t4vf_read_rss_vi_config(struct adapter *, unsigned int, + union rss_vi_config *); +int t4vf_write_rss_vi_config(struct adapter *, unsigned int, + union rss_vi_config *); +int t4vf_config_rss_range(struct adapter *, unsigned int, int, int, + const u16 *, int); + +int t4vf_alloc_vi(struct adapter *, int); +int t4vf_free_vi(struct adapter *, int); +int t4vf_enable_vi(struct adapter *, unsigned int, bool, bool); +int t4vf_identify_port(struct adapter *, unsigned int, unsigned int); + +int t4vf_set_rxmode(struct adapter *, unsigned int, int, int, int, int, int, + bool); +int t4vf_alloc_mac_filt(struct adapter *, unsigned int, bool, unsigned int, + const u8 **, u16 *, u64 *, bool); +int t4vf_change_mac(struct adapter *, unsigned int, int, const u8 *, bool); +int t4vf_set_addr_hash(struct adapter *, unsigned int, bool, u64, bool); +int t4vf_get_port_stats(struct adapter *, int, struct t4vf_port_stats *); + +int t4vf_iq_free(struct adapter *, unsigned int, unsigned int, unsigned int, + unsigned int); +int t4vf_eth_eq_free(struct adapter *, unsigned int); + +int t4vf_handle_fw_rpl(struct adapter *, const __be64 *); +int t4vf_prep_adapter(struct adapter *); + +#endif /* __T4VF_COMMON_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h new file mode 100644 index 000000000..b516b12b1 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_defs.h @@ -0,0 +1,121 @@ +/* + * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet + * driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4VF_DEFS_H__ +#define __T4VF_DEFS_H__ + +#include "../cxgb4/t4_regs.h" + +/* + * The VF Register Map. + * + * The Scatter Gather Engine (SGE), Multiport Support module (MPS), PIO Local + * bus module (PL) and CPU Interface Module (CIM) components are mapped via + * the Slice to Module Map Table (see below) in the Physical Function Register + * Map. The Mail Box Data (MBDATA) range is mapped via the PCI-E Mailbox Base + * and Offset registers in the PF Register Map. The MBDATA base address is + * quite constrained as it determines the Mailbox Data addresses for both PFs + * and VFs, and therefore must fit in both the VF and PF Register Maps without + * overlapping other registers. + */ +#define T4VF_SGE_BASE_ADDR 0x0000 +#define T4VF_MPS_BASE_ADDR 0x0100 +#define T4VF_PL_BASE_ADDR 0x0200 +#define T4VF_MBDATA_BASE_ADDR 0x0240 +#define T4VF_CIM_BASE_ADDR 0x0300 + +#define T4VF_REGMAP_START 0x0000 +#define T4VF_REGMAP_SIZE 0x0400 + +/* + * There's no hardware limitation which requires that the addresses of the + * Mailbox Data in the fixed CIM PF map and the programmable VF map must + * match. However, it's a useful convention ... + */ +#if T4VF_MBDATA_BASE_ADDR != CIM_PF_MAILBOX_DATA_A +#error T4VF_MBDATA_BASE_ADDR must match CIM_PF_MAILBOX_DATA_A! +#endif + +/* + * Virtual Function "Slice to Module Map Table" definitions. + * + * This table allows us to map subsets of the various module register sets + * into the T4VF Register Map. Each table entry identifies the index of the + * module whose registers are being mapped, the offset within the module's + * register set that the mapping should start at, the limit of the mapping, + * and the offset within the T4VF Register Map to which the module's registers + * are being mapped. All addresses and qualtities are in terms of 32-bit + * words. The "limit" value is also in terms of 32-bit words and is equal to + * the last address mapped in the T4VF Register Map 1 (i.e. it's a "<=" + * relation rather than a "<"). + */ +#define T4VF_MOD_MAP(module, index, first, last) \ + T4VF_MOD_MAP_##module##_INDEX = (index), \ + T4VF_MOD_MAP_##module##_FIRST = (first), \ + T4VF_MOD_MAP_##module##_LAST = (last), \ + T4VF_MOD_MAP_##module##_OFFSET = ((first)/4), \ + T4VF_MOD_MAP_##module##_BASE = \ + (T4VF_##module##_BASE_ADDR/4 + (first)/4), \ + T4VF_MOD_MAP_##module##_LIMIT = \ + (T4VF_##module##_BASE_ADDR/4 + (last)/4), + +#define SGE_VF_KDOORBELL 0x0 +#define SGE_VF_GTS 0x4 +#define MPS_VF_CTL 0x0 +#define MPS_VF_STAT_RX_VF_ERR_FRAMES_H 0xfc +#define PL_VF_WHOAMI 0x0 +#define CIM_VF_EXT_MAILBOX_CTRL 0x0 +#define CIM_VF_EXT_MAILBOX_STATUS 0x4 + +enum { + T4VF_MOD_MAP(SGE, 2, SGE_VF_KDOORBELL, SGE_VF_GTS) + T4VF_MOD_MAP(MPS, 0, MPS_VF_CTL, MPS_VF_STAT_RX_VF_ERR_FRAMES_H) + T4VF_MOD_MAP(PL, 3, PL_VF_WHOAMI, PL_VF_WHOAMI) + T4VF_MOD_MAP(CIM, 1, CIM_VF_EXT_MAILBOX_CTRL, CIM_VF_EXT_MAILBOX_STATUS) +}; + +/* + * There isn't a Slice to Module Map Table entry for the Mailbox Data + * registers, but it's convenient to use similar names as above. There are 8 + * little-endian 64-bit Mailbox Data registers. Note that the "instances" + * value below is in terms of 32-bit words which matches the "word" addressing + * space we use above for the Slice to Module Map Space. + */ +#define NUM_CIM_VF_MAILBOX_DATA_INSTANCES 16 + +#define T4VF_MBDATA_FIRST 0 +#define T4VF_MBDATA_LAST ((NUM_CIM_VF_MAILBOX_DATA_INSTANCES-1)*4) + +#endif /* __T4T4VF_DEFS_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c new file mode 100644 index 000000000..966ee900e --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -0,0 +1,1602 @@ +/* + * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet + * driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include + +#include "t4vf_common.h" +#include "t4vf_defs.h" + +#include "../cxgb4/t4_regs.h" +#include "../cxgb4/t4_values.h" +#include "../cxgb4/t4fw_api.h" + +/* + * Wait for the device to become ready (signified by our "who am I" register + * returning a value other than all 1's). Return an error if it doesn't + * become ready ... + */ +int t4vf_wait_dev_ready(struct adapter *adapter) +{ + const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI; + const u32 notready1 = 0xffffffff; + const u32 notready2 = 0xeeeeeeee; + u32 val; + + val = t4_read_reg(adapter, whoami); + if (val != notready1 && val != notready2) + return 0; + msleep(500); + val = t4_read_reg(adapter, whoami); + if (val != notready1 && val != notready2) + return 0; + else + return -EIO; +} + +/* + * Get the reply to a mailbox command and store it in @rpl in big-endian order + * (since the firmware data structures are specified in a big-endian layout). + */ +static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size, + u32 mbox_data) +{ + for ( ; size; size -= 8, mbox_data += 8) + *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data)); +} + +/* + * Dump contents of mailbox with a leading tag. + */ +static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data) +{ + dev_err(adapter->pdev_dev, + "mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag, + (unsigned long long)t4_read_reg64(adapter, mbox_data + 0), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 8), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 16), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 24), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 32), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 40), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 48), + (unsigned long long)t4_read_reg64(adapter, mbox_data + 56)); +} + +/** + * t4vf_wr_mbox_core - send a command to FW through the mailbox + * @adapter: the adapter + * @cmd: the command to write + * @size: command length in bytes + * @rpl: where to optionally store the reply + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Sends the given command to FW through the mailbox and waits for the + * FW to execute the command. If @rpl is not %NULL it is used to store + * the FW's reply to the command. The command and its optional reply + * are of the same length. FW can take up to 500 ms to respond. + * @sleep_ok determines whether we may sleep while awaiting the response. + * If sleeping is allowed we use progressive backoff otherwise we spin. + * + * The return value is 0 on success or a negative errno on failure. A + * failure can happen either because we are not able to execute the + * command or FW executes it but signals an error. In the latter case + * the return value is the error code indicated by FW (negated). + */ +int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, + void *rpl, bool sleep_ok) +{ + static const int delay[] = { + 1, 1, 3, 5, 10, 10, 20, 50, 100 + }; + + u32 v; + int i, ms, delay_idx; + const __be64 *p; + u32 mbox_data = T4VF_MBDATA_BASE_ADDR; + u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL; + + /* + * Commands must be multiples of 16 bytes in length and may not be + * larger than the size of the Mailbox Data register array. + */ + if ((size % 16) != 0 || + size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4) + return -EINVAL; + + /* + * Loop trying to get ownership of the mailbox. Return an error + * if we can't gain ownership. + */ + v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); + for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) + v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); + if (v != MBOX_OWNER_DRV) + return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT; + + /* + * Write the command array into the Mailbox Data register array and + * transfer ownership of the mailbox to the firmware. + * + * For the VFs, the Mailbox Data "registers" are actually backed by + * T4's "MA" interface rather than PL Registers (as is the case for + * the PFs). Because these are in different coherency domains, the + * write to the VF's PL-register-backed Mailbox Control can race in + * front of the writes to the MA-backed VF Mailbox Data "registers". + * So we need to do a read-back on at least one byte of the VF Mailbox + * Data registers before doing the write to the VF Mailbox Control + * register. + */ + for (i = 0, p = cmd; i < size; i += 8) + t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); + t4_read_reg(adapter, mbox_data); /* flush write */ + + t4_write_reg(adapter, mbox_ctl, + MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW)); + t4_read_reg(adapter, mbox_ctl); /* flush write */ + + /* + * Spin waiting for firmware to acknowledge processing our command. + */ + delay_idx = 0; + ms = delay[0]; + + for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { + if (sleep_ok) { + ms = delay[delay_idx]; + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else + mdelay(ms); + + /* + * If we're the owner, see if this is the reply we wanted. + */ + v = t4_read_reg(adapter, mbox_ctl); + if (MBOWNER_G(v) == MBOX_OWNER_DRV) { + /* + * If the Message Valid bit isn't on, revoke ownership + * of the mailbox and continue waiting for our reply. + */ + if ((v & MBMSGVALID_F) == 0) { + t4_write_reg(adapter, mbox_ctl, + MBOWNER_V(MBOX_OWNER_NONE)); + continue; + } + + /* + * We now have our reply. Extract the command return + * value, copy the reply back to our caller's buffer + * (if specified) and revoke ownership of the mailbox. + * We return the (negated) firmware command return + * code (this depends on FW_SUCCESS == 0). + */ + + /* return value in low-order little-endian word */ + v = t4_read_reg(adapter, mbox_data); + if (FW_CMD_RETVAL_G(v)) + dump_mbox(adapter, "FW Error", mbox_data); + + if (rpl) { + /* request bit in high-order BE word */ + WARN_ON((be32_to_cpu(*(const __be32 *)cmd) + & FW_CMD_REQUEST_F) == 0); + get_mbox_rpl(adapter, rpl, size, mbox_data); + WARN_ON((be32_to_cpu(*(__be32 *)rpl) + & FW_CMD_REQUEST_F) != 0); + } + t4_write_reg(adapter, mbox_ctl, + MBOWNER_V(MBOX_OWNER_NONE)); + return -FW_CMD_RETVAL_G(v); + } + } + + /* + * We timed out. Return the error ... + */ + dump_mbox(adapter, "FW Timeout", mbox_data); + return -ETIMEDOUT; +} + +/** + * hash_mac_addr - return the hash value of a MAC address + * @addr: the 48-bit Ethernet MAC address + * + * Hashes a MAC address according to the hash function used by hardware + * inexact (hash) address matching. + */ +static int hash_mac_addr(const u8 *addr) +{ + u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; + u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; + a ^= b; + a ^= (a >> 12); + a ^= (a >> 6); + return a & 0x3f; +} + +#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ + FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ + FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG) + +/** + * init_link_config - initialize a link's SW state + * @lc: structure holding the link state + * @caps: link capabilities + * + * Initializes the SW state maintained for each link, including the link's + * capabilities and default speed/flow-control/autonegotiation settings. + */ +static void init_link_config(struct link_config *lc, unsigned int caps) +{ + lc->supported = caps; + lc->requested_speed = 0; + lc->speed = 0; + lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; + if (lc->supported & FW_PORT_CAP_ANEG) { + lc->advertising = lc->supported & ADVERT_MASK; + lc->autoneg = AUTONEG_ENABLE; + lc->requested_fc |= PAUSE_AUTONEG; + } else { + lc->advertising = 0; + lc->autoneg = AUTONEG_DISABLE; + } +} + +/** + * t4vf_port_init - initialize port hardware/software state + * @adapter: the adapter + * @pidx: the adapter port index + */ +int t4vf_port_init(struct adapter *adapter, int pidx) +{ + struct port_info *pi = adap2pinfo(adapter, pidx); + struct fw_vi_cmd vi_cmd, vi_rpl; + struct fw_port_cmd port_cmd, port_rpl; + int v; + + /* + * Execute a VI Read command to get our Virtual Interface information + * like MAC address, etc. + */ + memset(&vi_cmd, 0, sizeof(vi_cmd)); + vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); + vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd)); + vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid)); + v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); + if (v) + return v; + + BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd)); + pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd)); + t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac); + + /* + * If we don't have read access to our port information, we're done + * now. Otherwise, execute a PORT Read command to get it ... + */ + if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT)) + return 0; + + memset(&port_cmd, 0, sizeof(port_cmd)); + port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_PORT_CMD_PORTID_V(pi->port_id)); + port_cmd.action_to_len16 = + cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | + FW_LEN16(port_cmd)); + v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl); + if (v) + return v; + + v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); + pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ? + FW_PORT_CMD_MDIOADDR_G(v) : -1; + pi->port_type = FW_PORT_CMD_PTYPE_G(v); + pi->mod_type = FW_PORT_MOD_TYPE_NA; + + init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap)); + + return 0; +} + +/** + * t4vf_fw_reset - issue a reset to FW + * @adapter: the adapter + * + * Issues a reset command to FW. For a Physical Function this would + * result in the Firmware resetting all of its state. For a Virtual + * Function this just resets the state associated with the VF. + */ +int t4vf_fw_reset(struct adapter *adapter) +{ + struct fw_reset_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD) | + FW_CMD_WRITE_F); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_query_params - query FW or device parameters + * @adapter: the adapter + * @nparams: the number of parameters + * @params: the parameter names + * @vals: the parameter values + * + * Reads the values of firmware or device parameters. Up to 7 parameters + * can be queried at once. + */ +static int t4vf_query_params(struct adapter *adapter, unsigned int nparams, + const u32 *params, u32 *vals) +{ + int i, ret; + struct fw_params_cmd cmd, rpl; + struct fw_params_param *p; + size_t len16; + + if (nparams > 7) + return -EINVAL; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); + len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, + param[nparams].mnem), 16); + cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); + for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) + p->mnem = htonl(*params++); + + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (ret == 0) + for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++) + *vals++ = be32_to_cpu(p->val); + return ret; +} + +/** + * t4vf_set_params - sets FW or device parameters + * @adapter: the adapter + * @nparams: the number of parameters + * @params: the parameter names + * @vals: the parameter values + * + * Sets the values of firmware or device parameters. Up to 7 parameters + * can be specified at once. + */ +int t4vf_set_params(struct adapter *adapter, unsigned int nparams, + const u32 *params, const u32 *vals) +{ + int i; + struct fw_params_cmd cmd; + struct fw_params_param *p; + size_t len16; + + if (nparams > 7) + return -EINVAL; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F); + len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd, + param[nparams]), 16); + cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); + for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) { + p->mnem = cpu_to_be32(*params++); + p->val = cpu_to_be32(*vals++); + } + + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4_bar2_sge_qregs - return BAR2 SGE Queue register information + * @adapter: the adapter + * @qid: the Queue ID + * @qtype: the Ingress or Egress type for @qid + * @pbar2_qoffset: BAR2 Queue Offset + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues + * + * Returns the BAR2 SGE Queue Registers information associated with the + * indicated Absolute Queue ID. These are passed back in return value + * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue + * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. + * + * This may return an error which indicates that BAR2 SGE Queue + * registers aren't available. If an error is not returned, then the + * following values are returned: + * + * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers + * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid + * + * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which + * require the "Inferred Queue ID" ability may be used. E.g. the + * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, + * then these "Inferred Queue ID" register may not be used. + */ +int t4_bar2_sge_qregs(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid) +{ + unsigned int page_shift, page_size, qpp_shift, qpp_mask; + u64 bar2_page_offset, bar2_qoffset; + unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; + + /* T4 doesn't support BAR2 SGE Queue registers. + */ + if (is_t4(adapter->params.chip)) + return -EINVAL; + + /* Get our SGE Page Size parameters. + */ + page_shift = adapter->params.sge.sge_vf_hps + 10; + page_size = 1 << page_shift; + + /* Get the right Queues per Page parameters for our Queue. + */ + qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS + ? adapter->params.sge.sge_vf_eq_qpp + : adapter->params.sge.sge_vf_iq_qpp); + qpp_mask = (1 << qpp_shift) - 1; + + /* Calculate the basics of the BAR2 SGE Queue register area: + * o The BAR2 page the Queue registers will be in. + * o The BAR2 Queue ID. + * o The BAR2 Queue ID Offset into the BAR2 page. + */ + bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); + bar2_qid = qid & qpp_mask; + bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; + + /* If the BAR2 Queue ID Offset is less than the Page Size, then the + * hardware will infer the Absolute Queue ID simply from the writes to + * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a + * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply + * write to the first BAR2 SGE Queue Area within the BAR2 Page with + * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID + * from the BAR2 Page and BAR2 Queue ID. + * + * One important censequence of this is that some BAR2 SGE registers + * have a "Queue ID" field and we can write the BAR2 SGE Queue ID + * there. But other registers synthesize the SGE Queue ID purely + * from the writes to the registers -- the Write Combined Doorbell + * Buffer is a good example. These BAR2 SGE Registers are only + * available for those BAR2 SGE Register areas where the SGE Absolute + * Queue ID can be inferred from simple writes. + */ + bar2_qoffset = bar2_page_offset; + bar2_qinferred = (bar2_qid_offset < page_size); + if (bar2_qinferred) { + bar2_qoffset += bar2_qid_offset; + bar2_qid = 0; + } + + *pbar2_qoffset = bar2_qoffset; + *pbar2_qid = bar2_qid; + return 0; +} + +/** + * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters + * @adapter: the adapter + * + * Retrieves various core SGE parameters in the form of hardware SGE + * register values. The caller is responsible for decoding these as + * needed. The SGE parameters are stored in @adapter->params.sge. + */ +int t4vf_get_sge_params(struct adapter *adapter) +{ + struct sge_params *sge_params = &adapter->params.sge; + u32 params[7], vals[7]; + int v; + + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A)); + params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A)); + params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A)); + params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A)); + params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A)); + params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A)); + params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A)); + v = t4vf_query_params(adapter, 7, params, vals); + if (v) + return v; + sge_params->sge_control = vals[0]; + sge_params->sge_host_page_size = vals[1]; + sge_params->sge_fl_buffer_size[0] = vals[2]; + sge_params->sge_fl_buffer_size[1] = vals[3]; + sge_params->sge_timer_value_0_and_1 = vals[4]; + sge_params->sge_timer_value_2_and_3 = vals[5]; + sge_params->sge_timer_value_4_and_5 = vals[6]; + + /* T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately with the Padding Boundary in SGE_CONTROL and and Packing + * Boundary in SGE_CONTROL2. So for T5 and later we need to grab + * SGE_CONTROL in order to determine how ingress packet data will be + * laid out in Packed Buffer Mode. Unfortunately, older versions of + * the firmware won't let us retrieve SGE_CONTROL2 so if we get a + * failure grabbing it we throw an error since we can't figure out the + * right value. + */ + if (!is_t4(adapter->params.chip)) { + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A)); + v = t4vf_query_params(adapter, 1, params, vals); + if (v != FW_SUCCESS) { + dev_err(adapter->pdev_dev, + "Unable to get SGE Control2; " + "probably old firmware.\n"); + return v; + } + sge_params->sge_control2 = vals[0]; + } + + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A)); + params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A)); + v = t4vf_query_params(adapter, 2, params, vals); + if (v) + return v; + sge_params->sge_ingress_rx_threshold = vals[0]; + sge_params->sge_congestion_control = vals[1]; + + /* For T5 and later we want to use the new BAR2 Doorbells. + * Unfortunately, older firmware didn't allow the this register to be + * read. + */ + if (!is_t4(adapter->params.chip)) { + u32 whoami; + unsigned int pf, s_hps, s_qpp; + + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V( + SGE_EGRESS_QUEUES_PER_PAGE_VF_A)); + params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) | + FW_PARAMS_PARAM_XYZ_V( + SGE_INGRESS_QUEUES_PER_PAGE_VF_A)); + v = t4vf_query_params(adapter, 2, params, vals); + if (v != FW_SUCCESS) { + dev_warn(adapter->pdev_dev, + "Unable to get VF SGE Queues/Page; " + "probably old firmware.\n"); + return v; + } + sge_params->sge_egress_queues_per_page = vals[0]; + sge_params->sge_ingress_queues_per_page = vals[1]; + + /* We need the Queues/Page for our VF. This is based on the + * PF from which we're instantiated and is indexed in the + * register we just read. Do it once here so other code in + * the driver can just use it. + */ + whoami = t4_read_reg(adapter, + T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A); + pf = SOURCEPF_G(whoami); + + s_hps = (HOSTPAGESIZEPF0_S + + (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf); + sge_params->sge_vf_hps = + ((sge_params->sge_host_page_size >> s_hps) + & HOSTPAGESIZEPF0_M); + + s_qpp = (QUEUESPERPAGEPF0_S + + (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf); + sge_params->sge_vf_eq_qpp = + ((sge_params->sge_egress_queues_per_page >> s_qpp) + & QUEUESPERPAGEPF0_M); + sge_params->sge_vf_iq_qpp = + ((sge_params->sge_ingress_queues_per_page >> s_qpp) + & QUEUESPERPAGEPF0_M); + } + + return 0; +} + +/** + * t4vf_get_vpd_params - retrieve device VPD paremeters + * @adapter: the adapter + * + * Retrives various device Vital Product Data parameters. The parameters + * are stored in @adapter->params.vpd. + */ +int t4vf_get_vpd_params(struct adapter *adapter) +{ + struct vpd_params *vpd_params = &adapter->params.vpd; + u32 params[7], vals[7]; + int v; + + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK)); + v = t4vf_query_params(adapter, 1, params, vals); + if (v) + return v; + vpd_params->cclk = vals[0]; + + return 0; +} + +/** + * t4vf_get_dev_params - retrieve device paremeters + * @adapter: the adapter + * + * Retrives various device parameters. The parameters are stored in + * @adapter->params.dev. + */ +int t4vf_get_dev_params(struct adapter *adapter) +{ + struct dev_params *dev_params = &adapter->params.dev; + u32 params[7], vals[7]; + int v; + + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV)); + params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV)); + v = t4vf_query_params(adapter, 2, params, vals); + if (v) + return v; + dev_params->fwrev = vals[0]; + dev_params->tprev = vals[1]; + + return 0; +} + +/** + * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration + * @adapter: the adapter + * + * Retrieves global RSS mode and parameters with which we have to live + * and stores them in the @adapter's RSS parameters. + */ +int t4vf_get_rss_glb_config(struct adapter *adapter) +{ + struct rss_params *rss = &adapter->params.rss; + struct fw_rss_glb_config_cmd cmd, rpl; + int v; + + /* + * Execute an RSS Global Configuration read command to retrieve + * our RSS configuration. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v) + return v; + + /* + * Transate the big-endian RSS Global Configuration into our + * cpu-endian format based on the RSS mode. We also do first level + * filtering at this point to weed out modes which don't support + * VF Drivers ... + */ + rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_G( + be32_to_cpu(rpl.u.manual.mode_pkd)); + switch (rss->mode) { + case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { + u32 word = be32_to_cpu( + rpl.u.basicvirtual.synmapen_to_hashtoeplitz); + + rss->u.basicvirtual.synmapen = + ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F) != 0); + rss->u.basicvirtual.syn4tupenipv6 = + ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F) != 0); + rss->u.basicvirtual.syn2tupenipv6 = + ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F) != 0); + rss->u.basicvirtual.syn4tupenipv4 = + ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F) != 0); + rss->u.basicvirtual.syn2tupenipv4 = + ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F) != 0); + + rss->u.basicvirtual.ofdmapen = + ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F) != 0); + + rss->u.basicvirtual.tnlmapen = + ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F) != 0); + rss->u.basicvirtual.tnlalllookup = + ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F) != 0); + + rss->u.basicvirtual.hashtoeplitz = + ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F) != 0); + + /* we need at least Tunnel Map Enable to be set */ + if (!rss->u.basicvirtual.tnlmapen) + return -EINVAL; + break; + } + + default: + /* all unknown/unsupported RSS modes result in an error */ + return -EINVAL; + } + + return 0; +} + +/** + * t4vf_get_vfres - retrieve VF resource limits + * @adapter: the adapter + * + * Retrieves configured resource limits and capabilities for a virtual + * function. The results are stored in @adapter->vfres. + */ +int t4vf_get_vfres(struct adapter *adapter) +{ + struct vf_resources *vfres = &adapter->params.vfres; + struct fw_pfvf_cmd cmd, rpl; + int v; + u32 word; + + /* + * Execute PFVF Read command to get VF resource limits; bail out early + * with error on command failure. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v) + return v; + + /* + * Extract VF resource limits and return success. + */ + word = be32_to_cpu(rpl.niqflint_niq); + vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word); + vfres->niq = FW_PFVF_CMD_NIQ_G(word); + + word = be32_to_cpu(rpl.type_to_neq); + vfres->neq = FW_PFVF_CMD_NEQ_G(word); + vfres->pmask = FW_PFVF_CMD_PMASK_G(word); + + word = be32_to_cpu(rpl.tc_to_nexactf); + vfres->tc = FW_PFVF_CMD_TC_G(word); + vfres->nvi = FW_PFVF_CMD_NVI_G(word); + vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word); + + word = be32_to_cpu(rpl.r_caps_to_nethctrl); + vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word); + vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word); + vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word); + + return 0; +} + +/** + * t4vf_read_rss_vi_config - read a VI's RSS configuration + * @adapter: the adapter + * @viid: Virtual Interface ID + * @config: pointer to host-native VI RSS Configuration buffer + * + * Reads the Virtual Interface's RSS configuration information and + * translates it into CPU-native format. + */ +int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid, + union rss_vi_config *config) +{ + struct fw_rss_vi_config_cmd cmd, rpl; + int v; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_RSS_VI_CONFIG_CMD_VIID(viid)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v) + return v; + + switch (adapter->params.rss.mode) { + case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { + u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen); + + config->basicvirtual.ip6fourtupen = + ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) != 0); + config->basicvirtual.ip6twotupen = + ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) != 0); + config->basicvirtual.ip4fourtupen = + ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) != 0); + config->basicvirtual.ip4twotupen = + ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) != 0); + config->basicvirtual.udpen = + ((word & FW_RSS_VI_CONFIG_CMD_UDPEN_F) != 0); + config->basicvirtual.defaultq = + FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word); + break; + } + + default: + return -EINVAL; + } + + return 0; +} + +/** + * t4vf_write_rss_vi_config - write a VI's RSS configuration + * @adapter: the adapter + * @viid: Virtual Interface ID + * @config: pointer to host-native VI RSS Configuration buffer + * + * Write the Virtual Interface's RSS configuration information + * (translating it into firmware-native format before writing). + */ +int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid, + union rss_vi_config *config) +{ + struct fw_rss_vi_config_cmd cmd, rpl; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_RSS_VI_CONFIG_CMD_VIID(viid)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + switch (adapter->params.rss.mode) { + case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: { + u32 word = 0; + + if (config->basicvirtual.ip6fourtupen) + word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F; + if (config->basicvirtual.ip6twotupen) + word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F; + if (config->basicvirtual.ip4fourtupen) + word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F; + if (config->basicvirtual.ip4twotupen) + word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F; + if (config->basicvirtual.udpen) + word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F; + word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V( + config->basicvirtual.defaultq); + cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word); + break; + } + + default: + return -EINVAL; + } + + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); +} + +/** + * t4vf_config_rss_range - configure a portion of the RSS mapping table + * @adapter: the adapter + * @viid: Virtual Interface of RSS Table Slice + * @start: starting entry in the table to write + * @n: how many table entries to write + * @rspq: values for the "Response Queue" (Ingress Queue) lookup table + * @nrspq: number of values in @rspq + * + * Programs the selected part of the VI's RSS mapping table with the + * provided values. If @nrspq < @n the supplied values are used repeatedly + * until the full table range is populated. + * + * The caller must ensure the values in @rspq are in the range 0..1023. + */ +int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid, + int start, int n, const u16 *rspq, int nrspq) +{ + const u16 *rsp = rspq; + const u16 *rsp_end = rspq+nrspq; + struct fw_rss_ind_tbl_cmd cmd; + + /* + * Initialize firmware command template to write the RSS table. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_RSS_IND_TBL_CMD_VIID_V(viid)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + + /* + * Each firmware RSS command can accommodate up to 32 RSS Ingress + * Queue Identifiers. These Ingress Queue IDs are packed three to + * a 32-bit word as 10-bit values with the upper remaining 2 bits + * reserved. + */ + while (n > 0) { + __be32 *qp = &cmd.iq0_to_iq2; + int nq = min(n, 32); + int ret; + + /* + * Set up the firmware RSS command header to send the next + * "nq" Ingress Queue IDs to the firmware. + */ + cmd.niqid = cpu_to_be16(nq); + cmd.startidx = cpu_to_be16(start); + + /* + * "nq" more done for the start of the next loop. + */ + start += nq; + n -= nq; + + /* + * While there are still Ingress Queue IDs to stuff into the + * current firmware RSS command, retrieve them from the + * Ingress Queue ID array and insert them into the command. + */ + while (nq > 0) { + /* + * Grab up to the next 3 Ingress Queue IDs (wrapping + * around the Ingress Queue ID array if necessary) and + * insert them into the firmware RSS command at the + * current 3-tuple position within the commad. + */ + u16 qbuf[3]; + u16 *qbp = qbuf; + int nqbuf = min(3, nq); + + nq -= nqbuf; + qbuf[0] = qbuf[1] = qbuf[2] = 0; + while (nqbuf) { + nqbuf--; + *qbp++ = *rsp++; + if (rsp >= rsp_end) + rsp = rspq; + } + *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) | + FW_RSS_IND_TBL_CMD_IQ1_V(qbuf[1]) | + FW_RSS_IND_TBL_CMD_IQ2_V(qbuf[2])); + } + + /* + * Send this portion of the RRS table update to the firmware; + * bail out on any errors. + */ + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); + if (ret) + return ret; + } + return 0; +} + +/** + * t4vf_alloc_vi - allocate a virtual interface on a port + * @adapter: the adapter + * @port_id: physical port associated with the VI + * + * Allocate a new Virtual Interface and bind it to the indicated + * physical port. Return the new Virtual Interface Identifier on + * success, or a [negative] error number on failure. + */ +int t4vf_alloc_vi(struct adapter *adapter, int port_id) +{ + struct fw_vi_cmd cmd, rpl; + int v; + + /* + * Execute a VI command to allocate Virtual Interface and return its + * VIID. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_F); + cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | + FW_VI_CMD_ALLOC_F); + cmd.portid_pkd = FW_VI_CMD_PORTID_V(port_id); + v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (v) + return v; + + return FW_VI_CMD_VIID_G(be16_to_cpu(rpl.type_viid)); +} + +/** + * t4vf_free_vi -- free a virtual interface + * @adapter: the adapter + * @viid: the virtual interface identifier + * + * Free a previously allocated Virtual Interface. Return an error on + * failure. + */ +int t4vf_free_vi(struct adapter *adapter, int viid) +{ + struct fw_vi_cmd cmd; + + /* + * Execute a VI command to free the Virtual Interface. + */ + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F); + cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) | + FW_VI_CMD_FREE_F); + cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid)); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_enable_vi - enable/disable a virtual interface + * @adapter: the adapter + * @viid: the Virtual Interface ID + * @rx_en: 1=enable Rx, 0=disable Rx + * @tx_en: 1=enable Tx, 0=disable Tx + * + * Enables/disables a virtual interface. + */ +int t4vf_enable_vi(struct adapter *adapter, unsigned int viid, + bool rx_en, bool tx_en) +{ + struct fw_vi_enable_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | + FW_VI_ENABLE_CMD_VIID_V(viid)); + cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) | + FW_VI_ENABLE_CMD_EEN_V(tx_en) | + FW_LEN16(cmd)); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_identify_port - identify a VI's port by blinking its LED + * @adapter: the adapter + * @viid: the Virtual Interface ID + * @nblinks: how many times to blink LED at 2.5 Hz + * + * Identifies a VI's port by blinking its LED. + */ +int t4vf_identify_port(struct adapter *adapter, unsigned int viid, + unsigned int nblinks) +{ + struct fw_vi_enable_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | + FW_VI_ENABLE_CMD_VIID_V(viid)); + cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | + FW_LEN16(cmd)); + cmd.blinkdur = cpu_to_be16(nblinks); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_set_rxmode - set Rx properties of a virtual interface + * @adapter: the adapter + * @viid: the VI id + * @mtu: the new MTU or -1 for no change + * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change + * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change + * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change + * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it, + * -1 no change + * + * Sets Rx properties of a virtual interface. + */ +int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid, + int mtu, int promisc, int all_multi, int bcast, int vlanex, + bool sleep_ok) +{ + struct fw_vi_rxmode_cmd cmd; + + /* convert to FW values */ + if (mtu < 0) + mtu = FW_VI_RXMODE_CMD_MTU_M; + if (promisc < 0) + promisc = FW_VI_RXMODE_CMD_PROMISCEN_M; + if (all_multi < 0) + all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M; + if (bcast < 0) + bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M; + if (vlanex < 0) + vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_VI_RXMODE_CMD_VIID_V(viid)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + cmd.mtu_to_vlanexen = + cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) | + FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) | + FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) | + FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) | + FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex)); + return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); +} + +/** + * t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses + * @adapter: the adapter + * @viid: the Virtual Interface Identifier + * @free: if true any existing filters for this VI id are first removed + * @naddr: the number of MAC addresses to allocate filters for (up to 7) + * @addr: the MAC address(es) + * @idx: where to store the index of each allocated filter + * @hash: pointer to hash address filter bitmap + * @sleep_ok: call is allowed to sleep + * + * Allocates an exact-match filter for each of the supplied addresses and + * sets it to the corresponding address. If @idx is not %NULL it should + * have at least @naddr entries, each of which will be set to the index of + * the filter allocated for the corresponding MAC address. If a filter + * could not be allocated for an address its index is set to 0xffff. + * If @hash is not %NULL addresses that fail to allocate an exact filter + * are hashed and update the hash filter bitmap pointed at by @hash. + * + * Returns a negative error number or the number of filters allocated. + */ +int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, + unsigned int naddr, const u8 **addr, u16 *idx, + u64 *hash, bool sleep_ok) +{ + int offset, ret = 0; + unsigned nfilters = 0; + unsigned int rem = naddr; + struct fw_vi_mac_cmd cmd, rpl; + unsigned int max_naddr = is_t4(adapter->params.chip) ? + NUM_MPS_CLS_SRAM_L_INSTANCES : + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + + if (naddr > max_naddr) + return -EINVAL; + + for (offset = 0; offset < naddr; /**/) { + unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) + ? rem + : ARRAY_SIZE(cmd.u.exact)); + size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, + u.exact[fw_naddr]), 16); + struct fw_vi_mac_exact *p; + int i; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + (free ? FW_CMD_EXEC_F : 0) | + FW_VI_MAC_CMD_VIID_V(viid)); + cmd.freemacs_to_len16 = + cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) | + FW_CMD_LEN16_V(len16)); + + for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { + p->valid_to_idx = cpu_to_be16( + FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); + memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); + } + + + ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, + sleep_ok); + if (ret && ret != -ENOMEM) + break; + + for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) { + u16 index = FW_VI_MAC_CMD_IDX_G( + be16_to_cpu(p->valid_to_idx)); + + if (idx) + idx[offset+i] = + (index >= max_naddr + ? 0xffff + : index); + if (index < max_naddr) + nfilters++; + else if (hash) + *hash |= (1ULL << hash_mac_addr(addr[offset+i])); + } + + free = false; + offset += fw_naddr; + rem -= fw_naddr; + } + + /* + * If there were no errors or we merely ran out of room in our MAC + * address arena, return the number of filters actually written. + */ + if (ret == 0 || ret == -ENOMEM) + ret = nfilters; + return ret; +} + +/** + * t4vf_change_mac - modifies the exact-match filter for a MAC address + * @adapter: the adapter + * @viid: the Virtual Interface ID + * @idx: index of existing filter for old value of MAC address, or -1 + * @addr: the new MAC address value + * @persist: if idx < 0, the new MAC allocation should be persistent + * + * Modifies an exact-match filter and sets it to the new MAC address. + * Note that in general it is not possible to modify the value of a given + * filter so the generic way to modify an address filter is to free the + * one being used by the old address value and allocate a new filter for + * the new address value. @idx can be -1 if the address is a new + * addition. + * + * Returns a negative error number or the index of the filter with the new + * MAC value. + */ +int t4vf_change_mac(struct adapter *adapter, unsigned int viid, + int idx, const u8 *addr, bool persist) +{ + int ret; + struct fw_vi_mac_cmd cmd, rpl; + struct fw_vi_mac_exact *p = &cmd.u.exact[0]; + size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, + u.exact[1]), 16); + unsigned int max_naddr = is_t4(adapter->params.chip) ? + NUM_MPS_CLS_SRAM_L_INSTANCES : + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + + /* + * If this is a new allocation, determine whether it should be + * persistent (across a "freemacs" operation) or not. + */ + if (idx < 0) + idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_VI_MAC_CMD_VIID_V(viid)); + cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); + p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(idx)); + memcpy(p->macaddr, addr, sizeof(p->macaddr)); + + ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl); + if (ret == 0) { + p = &rpl.u.exact[0]; + ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx)); + if (ret >= max_naddr) + ret = -ENOMEM; + } + return ret; +} + +/** + * t4vf_set_addr_hash - program the MAC inexact-match hash filter + * @adapter: the adapter + * @viid: the Virtual Interface Identifier + * @ucast: whether the hash filter should also match unicast addresses + * @vec: the value to be written to the hash filter + * @sleep_ok: call is allowed to sleep + * + * Sets the 64-bit inexact-match hash filter for a virtual interface. + */ +int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid, + bool ucast, u64 vec, bool sleep_ok) +{ + struct fw_vi_mac_cmd cmd; + size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, + u.exact[0]), 16); + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_VI_ENABLE_CMD_VIID_V(viid)); + cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F | + FW_VI_MAC_CMD_HASHUNIEN_V(ucast) | + FW_CMD_LEN16_V(len16)); + cmd.u.hash.hashvec = cpu_to_be64(vec); + return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok); +} + +/** + * t4vf_get_port_stats - collect "port" statistics + * @adapter: the adapter + * @pidx: the port index + * @s: the stats structure to fill + * + * Collect statistics for the "port"'s Virtual Interface. + */ +int t4vf_get_port_stats(struct adapter *adapter, int pidx, + struct t4vf_port_stats *s) +{ + struct port_info *pi = adap2pinfo(adapter, pidx); + struct fw_vi_stats_vf fwstats; + unsigned int rem = VI_VF_NUM_STATS; + __be64 *fwsp = (__be64 *)&fwstats; + + /* + * Grab the Virtual Interface statistics a chunk at a time via mailbox + * commands. We could use a Work Request and get all of them at once + * but that's an asynchronous interface which is awkward to use. + */ + while (rem) { + unsigned int ix = VI_VF_NUM_STATS - rem; + unsigned int nstats = min(6U, rem); + struct fw_vi_stats_cmd cmd, rpl; + size_t len = (offsetof(struct fw_vi_stats_cmd, u) + + sizeof(struct fw_vi_stats_ctl)); + size_t len16 = DIV_ROUND_UP(len, 16); + int ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD) | + FW_VI_STATS_CMD_VIID_V(pi->viid) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F); + cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16)); + cmd.u.ctl.nstats_ix = + cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix) | + FW_VI_STATS_CMD_NSTATS_V(nstats)); + ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl); + if (ret) + return ret; + + memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats); + + rem -= nstats; + fwsp += nstats; + } + + /* + * Translate firmware statistics into host native statistics. + */ + s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes); + s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames); + s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes); + s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames); + s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes); + s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames); + s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames); + s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes); + s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames); + + s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes); + s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames); + s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes); + s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames); + s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes); + s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames); + + s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames); + + return 0; +} + +/** + * t4vf_iq_free - free an ingress queue and its free lists + * @adapter: the adapter + * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) + * @iqid: ingress queue ID + * @fl0id: FL0 queue ID or 0xffff if no attached FL0 + * @fl1id: FL1 queue ID or 0xffff if no attached FL1 + * + * Frees an ingress queue and its associated free lists, if any. + */ +int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype, + unsigned int iqid, unsigned int fl0id, unsigned int fl1id) +{ + struct fw_iq_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F); + cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | + FW_LEN16(cmd)); + cmd.type_to_iqandstindex = + cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype)); + + cmd.iqid = cpu_to_be16(iqid); + cmd.fl0id = cpu_to_be16(fl0id); + cmd.fl1id = cpu_to_be16(fl1id); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_eth_eq_free - free an Ethernet egress queue + * @adapter: the adapter + * @eqid: egress queue ID + * + * Frees an Ethernet egress queue. + */ +int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) +{ + struct fw_eq_eth_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F); + cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | + FW_LEN16(cmd)); + cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid)); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** + * t4vf_handle_fw_rpl - process a firmware reply message + * @adapter: the adapter + * @rpl: start of the firmware message + * + * Processes a firmware message, such as link state change messages. + */ +int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) +{ + const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl; + u8 opcode = FW_CMD_OP_G(be32_to_cpu(cmd_hdr->hi)); + + switch (opcode) { + case FW_PORT_CMD: { + /* + * Link/module state change message. + */ + const struct fw_port_cmd *port_cmd = + (const struct fw_port_cmd *)rpl; + u32 stat, mod; + int action, port_id, link_ok, speed, fc, pidx; + + /* + * Extract various fields from port status change message. + */ + action = FW_PORT_CMD_ACTION_G( + be32_to_cpu(port_cmd->action_to_len16)); + if (action != FW_PORT_ACTION_GET_PORT_INFO) { + dev_err(adapter->pdev_dev, + "Unknown firmware PORT reply action %x\n", + action); + break; + } + + port_id = FW_PORT_CMD_PORTID_G( + be32_to_cpu(port_cmd->op_to_portid)); + + stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype); + link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; + speed = 0; + fc = 0; + if (stat & FW_PORT_CMD_RXPAUSE_F) + fc |= PAUSE_RX; + if (stat & FW_PORT_CMD_TXPAUSE_F) + fc |= PAUSE_TX; + if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) + speed = 100; + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) + speed = 1000; + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) + speed = 10000; + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) + speed = 40000; + + /* + * Scan all of our "ports" (Virtual Interfaces) looking for + * those bound to the physical port which has changed. If + * our recorded state doesn't match the current state, + * signal that change to the OS code. + */ + for_each_port(adapter, pidx) { + struct port_info *pi = adap2pinfo(adapter, pidx); + struct link_config *lc; + + if (pi->port_id != port_id) + continue; + + lc = &pi->link_cfg; + + mod = FW_PORT_CMD_MODTYPE_G(stat); + if (mod != pi->mod_type) { + pi->mod_type = mod; + t4vf_os_portmod_changed(adapter, pidx); + } + + if (link_ok != lc->link_ok || speed != lc->speed || + fc != lc->fc) { + /* something changed */ + lc->link_ok = link_ok; + lc->speed = speed; + lc->fc = fc; + lc->supported = + be16_to_cpu(port_cmd->u.info.pcap); + t4vf_os_link_changed(adapter, pidx, link_ok); + } + } + break; + } + + default: + dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n", + opcode); + } + return 0; +} + +/** + */ +int t4vf_prep_adapter(struct adapter *adapter) +{ + int err; + unsigned int chipid; + + /* Wait for the device to become ready before proceeding ... + */ + err = t4vf_wait_dev_ready(adapter); + if (err) + return err; + + /* Default port and clock for debugging in case we can't reach + * firmware. + */ + adapter->params.nports = 1; + adapter->params.vfres.pmask = 1; + adapter->params.vpd.cclk = 50000; + + adapter->params.chip = 0; + switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) { + case CHELSIO_T4: + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0); + break; + + case CHELSIO_T5: + chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A)); + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); + break; + } + + return 0; +} diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig new file mode 100644 index 000000000..905ac5f5d --- /dev/null +++ b/drivers/net/ethernet/cirrus/Kconfig @@ -0,0 +1,65 @@ +# +# Cirrus network device configuration +# + +config NET_VENDOR_CIRRUS + bool "Cirrus devices" + default y + depends on ISA || EISA || ARM || MAC + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Cirrus cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_CIRRUS + +config CS89x0 + tristate "CS89x0 support" + depends on ISA || EISA || ARM + ---help--- + Support for CS89x0 chipset based Ethernet cards. If you have a + network (Ethernet) card of this type, say Y and read the + Ethernet-HOWTO, available from + as well as + . + + To compile this driver as a module, choose M here. The module + will be called cs89x0. + +config CS89x0_PLATFORM + bool "CS89x0 platform driver support" if HAS_IOPORT_MAP + default !HAS_IOPORT_MAP + depends on CS89x0 + help + Say Y to compile the cs89x0 driver as a platform driver. This + makes this driver suitable for use on certain evaluation boards + such as the iMX21ADS. + + If you are unsure, say N. + +config EP93XX_ETH + tristate "EP93xx Ethernet support" + depends on ARM && ARCH_EP93XX + select MII + help + This is a driver for the ethernet hardware included in EP93xx CPUs. + Say Y if you are building a kernel for EP93xx based devices. + +config MAC89x0 + tristate "Macintosh CS89x0 based ethernet cards" + depends on MAC + ---help--- + Support for CS89x0 chipset based Ethernet cards. If you have a + Nubus or LC-PDS network (Ethernet) card of this type, say Y and + read the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. This module will + be called mac89x0. + +endif # NET_VENDOR_CIRRUS diff --git a/drivers/net/ethernet/cirrus/Makefile b/drivers/net/ethernet/cirrus/Makefile new file mode 100644 index 000000000..ca245e2b5 --- /dev/null +++ b/drivers/net/ethernet/cirrus/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the Cirrus network device drivers. +# + +obj-$(CONFIG_CS89x0) += cs89x0.o +obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o +obj-$(CONFIG_MAC89x0) += mac89x0.o diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c new file mode 100644 index 000000000..60383040d --- /dev/null +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -0,0 +1,1907 @@ +/* cs89x0.c: A Crystal Semiconductor (Now Cirrus Logic) CS89[02]0 + * driver for linux. + * Written 1996 by Russell Nelson, with reference to skeleton.c + * written 1993-1994 by Donald Becker. + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + * The author may be reached at nelson@crynwr.com, Crynwr + * Software, 521 Pleasant Valley Rd., Potsdam, NY 13676 + * + * Other contributors: + * Mike Cruse : mcruse@cti-ltd.com + * Russ Nelson + * Melody Lee : ethernet@crystal.cirrus.com + * Alan Cox + * Andrew Morton + * Oskar Schirmer : oskar@scara.com + * Deepak Saxena : dsaxena@plexity.net + * Dmitry Pervushin : dpervushin@ru.mvista.com + * Deepak Saxena : dsaxena@plexity.net + * Domenico Andreoli : cavokz@gmail.com + */ + + +/* + * Set this to zero to disable DMA code + * + * Note that even if DMA is turned off we still support the 'dma' and 'use_dma' + * module options so we don't break any startup scripts. + */ +#ifndef CONFIG_ISA_DMA_API +#define ALLOW_DMA 0 +#else +#define ALLOW_DMA 1 +#endif + +/* + * Set this to zero to remove all the debug statements via + * dead code elimination + */ +#define DEBUGGING 1 + +/* Sources: + * Crynwr packet driver epktisa. + * Crystal Semiconductor data sheets. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#if ALLOW_DMA +#include +#endif + +#include "cs89x0.h" + +#define cs89_dbg(val, level, fmt, ...) \ +do { \ + if (val <= net_debug) \ + pr_##level(fmt, ##__VA_ARGS__); \ +} while (0) + +static char version[] __initdata = + "v2.4.3-pre1 Russell Nelson , Andrew Morton"; + +#define DRV_NAME "cs89x0" + +/* First, a few definitions that the brave might change. + * A zero-terminated list of I/O addresses to be probed. Some special flags.. + * Addr & 1 = Read back the address port, look for signature and reset + * the page window before probing + * Addr & 3 = Reset the page window and probe + * The CLPS eval board has the Cirrus chip at 0x80090300, in ARM IO space, + * but it is possible that a Cirrus board could be plugged into the ISA + * slots. + */ +/* The cs8900 has 4 IRQ pins, software selectable. cs8900_irq_map maps + * them to system IRQ numbers. This mapping is card specific and is set to + * the configuration of the Cirrus Eval board for this chip. + */ +#ifndef CONFIG_CS89x0_PLATFORM +static unsigned int netcard_portlist[] __used __initdata = { + 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, + 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0 +}; +static unsigned int cs8900_irq_map[] = { + 10, 11, 12, 5 +}; +#endif + +#if DEBUGGING +static unsigned int net_debug = DEBUGGING; +#else +#define net_debug 0 /* gcc will remove all the debug code for us */ +#endif + +/* The number of low I/O ports used by the ethercard. */ +#define NETCARD_IO_EXTENT 16 + +/* we allow the user to override various values normally set in the EEPROM */ +#define FORCE_RJ45 0x0001 /* pick one of these three */ +#define FORCE_AUI 0x0002 +#define FORCE_BNC 0x0004 + +#define FORCE_AUTO 0x0010 /* pick one of these three */ +#define FORCE_HALF 0x0020 +#define FORCE_FULL 0x0030 + +/* Information that need to be kept for each board. */ +struct net_local { + int chip_type; /* one of: CS8900, CS8920, CS8920M */ + char chip_revision; /* revision letter of the chip ('A'...) */ + int send_cmd; /* the proper send command: TX_NOW, TX_AFTER_381, or TX_AFTER_ALL */ + int auto_neg_cnf; /* auto-negotiation word from EEPROM */ + int adapter_cnf; /* adapter configuration from EEPROM */ + int isa_config; /* ISA configuration from EEPROM */ + int irq_map; /* IRQ map from EEPROM */ + int rx_mode; /* what mode are we in? 0, RX_MULTCAST_ACCEPT, or RX_ALL_ACCEPT */ + int curr_rx_cfg; /* a copy of PP_RxCFG */ + int linectl; /* either 0 or LOW_RX_SQUELCH, depending on configuration. */ + int send_underrun; /* keep track of how many underruns in a row we get */ + int force; /* force various values; see FORCE* above. */ + spinlock_t lock; + void __iomem *virt_addr;/* CS89x0 virtual address. */ +#if ALLOW_DMA + int use_dma; /* Flag: we're using dma */ + int dma; /* DMA channel */ + int dmasize; /* 16 or 64 */ + unsigned char *dma_buff; /* points to the beginning of the buffer */ + unsigned char *end_dma_buff; /* points to the end of the buffer */ + unsigned char *rx_dma_ptr; /* points to the next packet */ +#endif +}; + +/* Example routines you must write ;->. */ +#define tx_done(dev) 1 + +/* + * Permit 'cs89x0_dma=N' in the kernel boot environment + */ +#if !defined(MODULE) +#if ALLOW_DMA +static int g_cs89x0_dma; + +static int __init dma_fn(char *str) +{ + g_cs89x0_dma = simple_strtol(str, NULL, 0); + return 1; +} + +__setup("cs89x0_dma=", dma_fn); +#endif /* ALLOW_DMA */ + +static int g_cs89x0_media__force; + +static int __init media_fn(char *str) +{ + if (!strcmp(str, "rj45")) + g_cs89x0_media__force = FORCE_RJ45; + else if (!strcmp(str, "aui")) + g_cs89x0_media__force = FORCE_AUI; + else if (!strcmp(str, "bnc")) + g_cs89x0_media__force = FORCE_BNC; + + return 1; +} + +__setup("cs89x0_media=", media_fn); +#endif + +static void readwords(struct net_local *lp, int portno, void *buf, int length) +{ + u8 *buf8 = (u8 *)buf; + + do { + u16 tmp16; + + tmp16 = ioread16(lp->virt_addr + portno); + *buf8++ = (u8)tmp16; + *buf8++ = (u8)(tmp16 >> 8); + } while (--length); +} + +static void writewords(struct net_local *lp, int portno, void *buf, int length) +{ + u8 *buf8 = (u8 *)buf; + + do { + u16 tmp16; + + tmp16 = *buf8++; + tmp16 |= (*buf8++) << 8; + iowrite16(tmp16, lp->virt_addr + portno); + } while (--length); +} + +static u16 +readreg(struct net_device *dev, u16 regno) +{ + struct net_local *lp = netdev_priv(dev); + + iowrite16(regno, lp->virt_addr + ADD_PORT); + return ioread16(lp->virt_addr + DATA_PORT); +} + +static void +writereg(struct net_device *dev, u16 regno, u16 value) +{ + struct net_local *lp = netdev_priv(dev); + + iowrite16(regno, lp->virt_addr + ADD_PORT); + iowrite16(value, lp->virt_addr + DATA_PORT); +} + +static int __init +wait_eeprom_ready(struct net_device *dev) +{ + unsigned long timeout = jiffies; + /* check to see if the EEPROM is ready, + * a timeout is used just in case EEPROM is ready when + * SI_BUSY in the PP_SelfST is clear + */ + while (readreg(dev, PP_SelfST) & SI_BUSY) + if (time_after_eq(jiffies, timeout + 40)) + return -1; + return 0; +} + +static int __init +get_eeprom_data(struct net_device *dev, int off, int len, int *buffer) +{ + int i; + + cs89_dbg(3, info, "EEPROM data from %x for %x:", off, len); + for (i = 0; i < len; i++) { + if (wait_eeprom_ready(dev) < 0) + return -1; + /* Now send the EEPROM read command and EEPROM location to read */ + writereg(dev, PP_EECMD, (off + i) | EEPROM_READ_CMD); + if (wait_eeprom_ready(dev) < 0) + return -1; + buffer[i] = readreg(dev, PP_EEData); + cs89_dbg(3, cont, " %04x", buffer[i]); + } + cs89_dbg(3, cont, "\n"); + return 0; +} + +static int __init +get_eeprom_cksum(int off, int len, int *buffer) +{ + int i, cksum; + + cksum = 0; + for (i = 0; i < len; i++) + cksum += buffer[i]; + cksum &= 0xffff; + if (cksum == 0) + return 0; + return -1; +} + +static void +write_irq(struct net_device *dev, int chip_type, int irq) +{ + int i; + + if (chip_type == CS8900) { +#ifndef CONFIG_CS89x0_PLATFORM + /* Search the mapping table for the corresponding IRQ pin. */ + for (i = 0; i != ARRAY_SIZE(cs8900_irq_map); i++) + if (cs8900_irq_map[i] == irq) + break; + /* Not found */ + if (i == ARRAY_SIZE(cs8900_irq_map)) + i = 3; +#else + /* INTRQ0 pin is used for interrupt generation. */ + i = 0; +#endif + writereg(dev, PP_CS8900_ISAINT, i); + } else { + writereg(dev, PP_CS8920_ISAINT, irq); + } +} + +static void +count_rx_errors(int status, struct net_device *dev) +{ + dev->stats.rx_errors++; + if (status & RX_RUNT) + dev->stats.rx_length_errors++; + if (status & RX_EXTRA_DATA) + dev->stats.rx_length_errors++; + if ((status & RX_CRC_ERROR) && !(status & (RX_EXTRA_DATA | RX_RUNT))) + /* per str 172 */ + dev->stats.rx_crc_errors++; + if (status & RX_DRIBBLE) + dev->stats.rx_frame_errors++; +} + +/********************************* + * This page contains DMA routines + *********************************/ + +#if ALLOW_DMA + +#define dma_page_eq(ptr1, ptr2) ((long)(ptr1) >> 17 == (long)(ptr2) >> 17) + +static void +get_dma_channel(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + + if (lp->dma) { + dev->dma = lp->dma; + lp->isa_config |= ISA_RxDMA; + } else { + if ((lp->isa_config & ANY_ISA_DMA) == 0) + return; + dev->dma = lp->isa_config & DMA_NO_MASK; + if (lp->chip_type == CS8900) + dev->dma += 5; + if (dev->dma < 5 || dev->dma > 7) { + lp->isa_config &= ~ANY_ISA_DMA; + return; + } + } +} + +static void +write_dma(struct net_device *dev, int chip_type, int dma) +{ + struct net_local *lp = netdev_priv(dev); + if ((lp->isa_config & ANY_ISA_DMA) == 0) + return; + if (chip_type == CS8900) + writereg(dev, PP_CS8900_ISADMA, dma - 5); + else + writereg(dev, PP_CS8920_ISADMA, dma); +} + +static void +set_dma_cfg(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + + if (lp->use_dma) { + if ((lp->isa_config & ANY_ISA_DMA) == 0) { + cs89_dbg(3, err, "set_dma_cfg(): no DMA\n"); + return; + } + if (lp->isa_config & ISA_RxDMA) { + lp->curr_rx_cfg |= RX_DMA_ONLY; + cs89_dbg(3, info, "set_dma_cfg(): RX_DMA_ONLY\n"); + } else { + lp->curr_rx_cfg |= AUTO_RX_DMA; /* not that we support it... */ + cs89_dbg(3, info, "set_dma_cfg(): AUTO_RX_DMA\n"); + } + } +} + +static int +dma_bufcfg(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + if (lp->use_dma) + return (lp->isa_config & ANY_ISA_DMA) ? RX_DMA_ENBL : 0; + else + return 0; +} + +static int +dma_busctl(struct net_device *dev) +{ + int retval = 0; + struct net_local *lp = netdev_priv(dev); + if (lp->use_dma) { + if (lp->isa_config & ANY_ISA_DMA) + retval |= RESET_RX_DMA; /* Reset the DMA pointer */ + if (lp->isa_config & DMA_BURST) + retval |= DMA_BURST_MODE; /* Does ISA config specify DMA burst ? */ + if (lp->dmasize == 64) + retval |= RX_DMA_SIZE_64K; /* did they ask for 64K? */ + retval |= MEMORY_ON; /* we need memory enabled to use DMA. */ + } + return retval; +} + +static void +dma_rx(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + struct sk_buff *skb; + int status, length; + unsigned char *bp = lp->rx_dma_ptr; + + status = bp[0] + (bp[1] << 8); + length = bp[2] + (bp[3] << 8); + bp += 4; + + cs89_dbg(5, debug, "%s: receiving DMA packet at %lx, status %x, length %x\n", + dev->name, (unsigned long)bp, status, length); + + if ((status & RX_OK) == 0) { + count_rx_errors(status, dev); + goto skip_this_frame; + } + + /* Malloc up new buffer. */ + skb = netdev_alloc_skb(dev, length + 2); + if (skb == NULL) { + dev->stats.rx_dropped++; + + /* AKPM: advance bp to the next frame */ +skip_this_frame: + bp += (length + 3) & ~3; + if (bp >= lp->end_dma_buff) + bp -= lp->dmasize * 1024; + lp->rx_dma_ptr = bp; + return; + } + skb_reserve(skb, 2); /* longword align L3 header */ + + if (bp + length > lp->end_dma_buff) { + int semi_cnt = lp->end_dma_buff - bp; + memcpy(skb_put(skb, semi_cnt), bp, semi_cnt); + memcpy(skb_put(skb, length - semi_cnt), lp->dma_buff, + length - semi_cnt); + } else { + memcpy(skb_put(skb, length), bp, length); + } + bp += (length + 3) & ~3; + if (bp >= lp->end_dma_buff) + bp -= lp->dmasize*1024; + lp->rx_dma_ptr = bp; + + cs89_dbg(3, info, "%s: received %d byte DMA packet of type %x\n", + dev->name, length, + ((skb->data[ETH_ALEN + ETH_ALEN] << 8) | + skb->data[ETH_ALEN + ETH_ALEN + 1])); + + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += length; +} + +static void release_dma_buff(struct net_local *lp) +{ + if (lp->dma_buff) { + free_pages((unsigned long)(lp->dma_buff), + get_order(lp->dmasize * 1024)); + lp->dma_buff = NULL; + } +} + +#endif /* ALLOW_DMA */ + +static void +control_dc_dc(struct net_device *dev, int on_not_off) +{ + struct net_local *lp = netdev_priv(dev); + unsigned int selfcontrol; + unsigned long timenow = jiffies; + /* control the DC to DC convertor in the SelfControl register. + * Note: This is hooked up to a general purpose pin, might not + * always be a DC to DC convertor. + */ + + selfcontrol = HCB1_ENBL; /* Enable the HCB1 bit as an output */ + if (((lp->adapter_cnf & A_CNF_DC_DC_POLARITY) != 0) ^ on_not_off) + selfcontrol |= HCB1; + else + selfcontrol &= ~HCB1; + writereg(dev, PP_SelfCTL, selfcontrol); + + /* Wait for the DC/DC converter to power up - 500ms */ + while (time_before(jiffies, timenow + HZ)) + ; +} + +/* send a test packet - return true if carrier bits are ok */ +static int +send_test_pkt(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + char test_packet[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 46, /* A 46 in network order */ + 0, 0, /* DSAP=0 & SSAP=0 fields */ + 0xf3, 0 /* Control (Test Req + P bit set) */ + }; + unsigned long timenow = jiffies; + + writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_TX_ON); + + memcpy(test_packet, dev->dev_addr, ETH_ALEN); + memcpy(test_packet + ETH_ALEN, dev->dev_addr, ETH_ALEN); + + iowrite16(TX_AFTER_ALL, lp->virt_addr + TX_CMD_PORT); + iowrite16(ETH_ZLEN, lp->virt_addr + TX_LEN_PORT); + + /* Test to see if the chip has allocated memory for the packet */ + while (time_before(jiffies, timenow + 5)) + if (readreg(dev, PP_BusST) & READY_FOR_TX_NOW) + break; + if (time_after_eq(jiffies, timenow + 5)) + return 0; /* this shouldn't happen */ + + /* Write the contents of the packet */ + writewords(lp, TX_FRAME_PORT, test_packet, (ETH_ZLEN + 1) >> 1); + + cs89_dbg(1, debug, "Sending test packet "); + /* wait a couple of jiffies for packet to be received */ + for (timenow = jiffies; time_before(jiffies, timenow + 3);) + ; + if ((readreg(dev, PP_TxEvent) & TX_SEND_OK_BITS) == TX_OK) { + cs89_dbg(1, cont, "succeeded\n"); + return 1; + } + cs89_dbg(1, cont, "failed\n"); + return 0; +} + +#define DETECTED_NONE 0 +#define DETECTED_RJ45H 1 +#define DETECTED_RJ45F 2 +#define DETECTED_AUI 3 +#define DETECTED_BNC 4 + +static int +detect_tp(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + unsigned long timenow = jiffies; + int fdx; + + cs89_dbg(1, debug, "%s: Attempting TP\n", dev->name); + + /* If connected to another full duplex capable 10-Base-T card + * the link pulses seem to be lost when the auto detect bit in + * the LineCTL is set. To overcome this the auto detect bit will + * be cleared whilst testing the 10-Base-T interface. This would + * not be necessary for the sparrow chip but is simpler to do it + * anyway. + */ + writereg(dev, PP_LineCTL, lp->linectl & ~AUI_ONLY); + control_dc_dc(dev, 0); + + /* Delay for the hardware to work out if the TP cable is present + * - 150ms + */ + for (timenow = jiffies; time_before(jiffies, timenow + 15);) + ; + if ((readreg(dev, PP_LineST) & LINK_OK) == 0) + return DETECTED_NONE; + + if (lp->chip_type == CS8900) { + switch (lp->force & 0xf0) { +#if 0 + case FORCE_AUTO: + pr_info("%s: cs8900 doesn't autonegotiate\n", + dev->name); + return DETECTED_NONE; +#endif + /* CS8900 doesn't support AUTO, change to HALF*/ + case FORCE_AUTO: + lp->force &= ~FORCE_AUTO; + lp->force |= FORCE_HALF; + break; + case FORCE_HALF: + break; + case FORCE_FULL: + writereg(dev, PP_TestCTL, + readreg(dev, PP_TestCTL) | FDX_8900); + break; + } + fdx = readreg(dev, PP_TestCTL) & FDX_8900; + } else { + switch (lp->force & 0xf0) { + case FORCE_AUTO: + lp->auto_neg_cnf = AUTO_NEG_ENABLE; + break; + case FORCE_HALF: + lp->auto_neg_cnf = 0; + break; + case FORCE_FULL: + lp->auto_neg_cnf = RE_NEG_NOW | ALLOW_FDX; + break; + } + + writereg(dev, PP_AutoNegCTL, lp->auto_neg_cnf & AUTO_NEG_MASK); + + if ((lp->auto_neg_cnf & AUTO_NEG_BITS) == AUTO_NEG_ENABLE) { + pr_info("%s: negotiating duplex...\n", dev->name); + while (readreg(dev, PP_AutoNegST) & AUTO_NEG_BUSY) { + if (time_after(jiffies, timenow + 4000)) { + pr_err("**** Full / half duplex auto-negotiation timed out ****\n"); + break; + } + } + } + fdx = readreg(dev, PP_AutoNegST) & FDX_ACTIVE; + } + if (fdx) + return DETECTED_RJ45F; + else + return DETECTED_RJ45H; +} + +static int +detect_bnc(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + + cs89_dbg(1, debug, "%s: Attempting BNC\n", dev->name); + control_dc_dc(dev, 1); + + writereg(dev, PP_LineCTL, (lp->linectl & ~AUTO_AUI_10BASET) | AUI_ONLY); + + if (send_test_pkt(dev)) + return DETECTED_BNC; + else + return DETECTED_NONE; +} + +static int +detect_aui(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + + cs89_dbg(1, debug, "%s: Attempting AUI\n", dev->name); + control_dc_dc(dev, 0); + + writereg(dev, PP_LineCTL, (lp->linectl & ~AUTO_AUI_10BASET) | AUI_ONLY); + + if (send_test_pkt(dev)) + return DETECTED_AUI; + else + return DETECTED_NONE; +} + +/* We have a good packet(s), get it/them out of the buffers. */ +static void +net_rx(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + struct sk_buff *skb; + int status, length; + + status = ioread16(lp->virt_addr + RX_FRAME_PORT); + length = ioread16(lp->virt_addr + RX_FRAME_PORT); + + if ((status & RX_OK) == 0) { + count_rx_errors(status, dev); + return; + } + + /* Malloc up new buffer. */ + skb = netdev_alloc_skb(dev, length + 2); + if (skb == NULL) { + dev->stats.rx_dropped++; + return; + } + skb_reserve(skb, 2); /* longword align L3 header */ + + readwords(lp, RX_FRAME_PORT, skb_put(skb, length), length >> 1); + if (length & 1) + skb->data[length-1] = ioread16(lp->virt_addr + RX_FRAME_PORT); + + cs89_dbg(3, debug, "%s: received %d byte packet of type %x\n", + dev->name, length, + (skb->data[ETH_ALEN + ETH_ALEN] << 8) | + skb->data[ETH_ALEN + ETH_ALEN + 1]); + + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += length; +} + +/* The typical workload of the driver: + * Handle the network interface interrupts. + */ + +static irqreturn_t net_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct net_local *lp; + int status; + int handled = 0; + + lp = netdev_priv(dev); + + /* we MUST read all the events out of the ISQ, otherwise we'll never + * get interrupted again. As a consequence, we can't have any limit + * on the number of times we loop in the interrupt handler. The + * hardware guarantees that eventually we'll run out of events. Of + * course, if you're on a slow machine, and packets are arriving + * faster than you can read them off, you're screwed. Hasta la + * vista, baby! + */ + while ((status = ioread16(lp->virt_addr + ISQ_PORT))) { + cs89_dbg(4, debug, "%s: event=%04x\n", dev->name, status); + handled = 1; + switch (status & ISQ_EVENT_MASK) { + case ISQ_RECEIVER_EVENT: + /* Got a packet(s). */ + net_rx(dev); + break; + case ISQ_TRANSMITTER_EVENT: + dev->stats.tx_packets++; + netif_wake_queue(dev); /* Inform upper layers. */ + if ((status & (TX_OK | + TX_LOST_CRS | + TX_SQE_ERROR | + TX_LATE_COL | + TX_16_COL)) != TX_OK) { + if ((status & TX_OK) == 0) + dev->stats.tx_errors++; + if (status & TX_LOST_CRS) + dev->stats.tx_carrier_errors++; + if (status & TX_SQE_ERROR) + dev->stats.tx_heartbeat_errors++; + if (status & TX_LATE_COL) + dev->stats.tx_window_errors++; + if (status & TX_16_COL) + dev->stats.tx_aborted_errors++; + } + break; + case ISQ_BUFFER_EVENT: + if (status & READY_FOR_TX) { + /* we tried to transmit a packet earlier, + * but inexplicably ran out of buffers. + * That shouldn't happen since we only ever + * load one packet. Shrug. Do the right + * thing anyway. + */ + netif_wake_queue(dev); /* Inform upper layers. */ + } + if (status & TX_UNDERRUN) { + cs89_dbg(0, err, "%s: transmit underrun\n", + dev->name); + lp->send_underrun++; + if (lp->send_underrun == 3) + lp->send_cmd = TX_AFTER_381; + else if (lp->send_underrun == 6) + lp->send_cmd = TX_AFTER_ALL; + /* transmit cycle is done, although + * frame wasn't transmitted - this + * avoids having to wait for the upper + * layers to timeout on us, in the + * event of a tx underrun + */ + netif_wake_queue(dev); /* Inform upper layers. */ + } +#if ALLOW_DMA + if (lp->use_dma && (status & RX_DMA)) { + int count = readreg(dev, PP_DmaFrameCnt); + while (count) { + cs89_dbg(5, debug, + "%s: receiving %d DMA frames\n", + dev->name, count); + if (count > 1) + cs89_dbg(2, debug, + "%s: receiving %d DMA frames\n", + dev->name, count); + dma_rx(dev); + if (--count == 0) + count = readreg(dev, PP_DmaFrameCnt); + if (count > 0) + cs89_dbg(2, debug, + "%s: continuing with %d DMA frames\n", + dev->name, count); + } + } +#endif + break; + case ISQ_RX_MISS_EVENT: + dev->stats.rx_missed_errors += (status >> 6); + break; + case ISQ_TX_COL_EVENT: + dev->stats.collisions += (status >> 6); + break; + } + } + return IRQ_RETVAL(handled); +} + +/* Open/initialize the board. This is called (in the current kernel) + sometime after booting when the 'ifconfig' program is run. + + This routine should set everything up anew at each open, even + registers that "should" only need to be set once at boot, so that + there is non-reboot way to recover if something goes wrong. +*/ + +/* AKPM: do we need to do any locking here? */ + +static int +net_open(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + int result = 0; + int i; + int ret; + + if (dev->irq < 2) { + /* Allow interrupts to be generated by the chip */ +/* Cirrus' release had this: */ +#if 0 + writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL) | ENABLE_IRQ); +#endif +/* And 2.3.47 had this: */ + writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON); + + for (i = 2; i < CS8920_NO_INTS; i++) { + if ((1 << i) & lp->irq_map) { + if (request_irq(i, net_interrupt, 0, dev->name, + dev) == 0) { + dev->irq = i; + write_irq(dev, lp->chip_type, i); + /* writereg(dev, PP_BufCFG, GENERATE_SW_INTERRUPT); */ + break; + } + } + } + + if (i >= CS8920_NO_INTS) { + writereg(dev, PP_BusCTL, 0); /* disable interrupts. */ + pr_err("can't get an interrupt\n"); + ret = -EAGAIN; + goto bad_out; + } + } else { +#if !defined(CONFIG_CS89x0_PLATFORM) + if (((1 << dev->irq) & lp->irq_map) == 0) { + pr_err("%s: IRQ %d is not in our map of allowable IRQs, which is %x\n", + dev->name, dev->irq, lp->irq_map); + ret = -EAGAIN; + goto bad_out; + } +#endif +/* FIXME: Cirrus' release had this: */ + writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ); +/* And 2.3.47 had this: */ +#if 0 + writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON); +#endif + write_irq(dev, lp->chip_type, dev->irq); + ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev); + if (ret) { + pr_err("request_irq(%d) failed\n", dev->irq); + goto bad_out; + } + } + +#if ALLOW_DMA + if (lp->use_dma && (lp->isa_config & ANY_ISA_DMA)) { + unsigned long flags; + lp->dma_buff = (unsigned char *)__get_dma_pages(GFP_KERNEL, + get_order(lp->dmasize * 1024)); + if (!lp->dma_buff) { + pr_err("%s: cannot get %dK memory for DMA\n", + dev->name, lp->dmasize); + goto release_irq; + } + cs89_dbg(1, debug, "%s: dma %lx %lx\n", + dev->name, + (unsigned long)lp->dma_buff, + (unsigned long)isa_virt_to_bus(lp->dma_buff)); + if ((unsigned long)lp->dma_buff >= MAX_DMA_ADDRESS || + !dma_page_eq(lp->dma_buff, + lp->dma_buff + lp->dmasize * 1024 - 1)) { + pr_err("%s: not usable as DMA buffer\n", dev->name); + goto release_irq; + } + memset(lp->dma_buff, 0, lp->dmasize * 1024); /* Why? */ + if (request_dma(dev->dma, dev->name)) { + pr_err("%s: cannot get dma channel %d\n", + dev->name, dev->dma); + goto release_irq; + } + write_dma(dev, lp->chip_type, dev->dma); + lp->rx_dma_ptr = lp->dma_buff; + lp->end_dma_buff = lp->dma_buff + lp->dmasize * 1024; + spin_lock_irqsave(&lp->lock, flags); + disable_dma(dev->dma); + clear_dma_ff(dev->dma); + set_dma_mode(dev->dma, DMA_RX_MODE); /* auto_init as well */ + set_dma_addr(dev->dma, isa_virt_to_bus(lp->dma_buff)); + set_dma_count(dev->dma, lp->dmasize * 1024); + enable_dma(dev->dma); + spin_unlock_irqrestore(&lp->lock, flags); + } +#endif /* ALLOW_DMA */ + + /* set the Ethernet address */ + for (i = 0; i < ETH_ALEN / 2; i++) + writereg(dev, PP_IA + i * 2, + (dev->dev_addr[i * 2] | + (dev->dev_addr[i * 2 + 1] << 8))); + + /* while we're testing the interface, leave interrupts disabled */ + writereg(dev, PP_BusCTL, MEMORY_ON); + + /* Set the LineCTL quintuplet based on adapter configuration read from EEPROM */ + if ((lp->adapter_cnf & A_CNF_EXTND_10B_2) && + (lp->adapter_cnf & A_CNF_LOW_RX_SQUELCH)) + lp->linectl = LOW_RX_SQUELCH; + else + lp->linectl = 0; + + /* check to make sure that they have the "right" hardware available */ + switch (lp->adapter_cnf & A_CNF_MEDIA_TYPE) { + case A_CNF_MEDIA_10B_T: + result = lp->adapter_cnf & A_CNF_10B_T; + break; + case A_CNF_MEDIA_AUI: + result = lp->adapter_cnf & A_CNF_AUI; + break; + case A_CNF_MEDIA_10B_2: + result = lp->adapter_cnf & A_CNF_10B_2; + break; + default: + result = lp->adapter_cnf & (A_CNF_10B_T | + A_CNF_AUI | + A_CNF_10B_2); + } + if (!result) { + pr_err("%s: EEPROM is configured for unavailable media\n", + dev->name); +release_dma: +#if ALLOW_DMA + free_dma(dev->dma); +release_irq: + release_dma_buff(lp); +#endif + writereg(dev, PP_LineCTL, + readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON)); + free_irq(dev->irq, dev); + ret = -EAGAIN; + goto bad_out; + } + + /* set the hardware to the configured choice */ + switch (lp->adapter_cnf & A_CNF_MEDIA_TYPE) { + case A_CNF_MEDIA_10B_T: + result = detect_tp(dev); + if (result == DETECTED_NONE) { + pr_warn("%s: 10Base-T (RJ-45) has no cable\n", + dev->name); + if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */ + result = DETECTED_RJ45H; /* Yes! I don't care if I see a link pulse */ + } + break; + case A_CNF_MEDIA_AUI: + result = detect_aui(dev); + if (result == DETECTED_NONE) { + pr_warn("%s: 10Base-5 (AUI) has no cable\n", dev->name); + if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */ + result = DETECTED_AUI; /* Yes! I don't care if I see a carrrier */ + } + break; + case A_CNF_MEDIA_10B_2: + result = detect_bnc(dev); + if (result == DETECTED_NONE) { + pr_warn("%s: 10Base-2 (BNC) has no cable\n", dev->name); + if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */ + result = DETECTED_BNC; /* Yes! I don't care if I can xmit a packet */ + } + break; + case A_CNF_MEDIA_AUTO: + writereg(dev, PP_LineCTL, lp->linectl | AUTO_AUI_10BASET); + if (lp->adapter_cnf & A_CNF_10B_T) { + result = detect_tp(dev); + if (result != DETECTED_NONE) + break; + } + if (lp->adapter_cnf & A_CNF_AUI) { + result = detect_aui(dev); + if (result != DETECTED_NONE) + break; + } + if (lp->adapter_cnf & A_CNF_10B_2) { + result = detect_bnc(dev); + if (result != DETECTED_NONE) + break; + } + pr_err("%s: no media detected\n", dev->name); + goto release_dma; + } + switch (result) { + case DETECTED_NONE: + pr_err("%s: no network cable attached to configured media\n", + dev->name); + goto release_dma; + case DETECTED_RJ45H: + pr_info("%s: using half-duplex 10Base-T (RJ-45)\n", dev->name); + break; + case DETECTED_RJ45F: + pr_info("%s: using full-duplex 10Base-T (RJ-45)\n", dev->name); + break; + case DETECTED_AUI: + pr_info("%s: using 10Base-5 (AUI)\n", dev->name); + break; + case DETECTED_BNC: + pr_info("%s: using 10Base-2 (BNC)\n", dev->name); + break; + } + + /* Turn on both receive and transmit operations */ + writereg(dev, PP_LineCTL, + readreg(dev, PP_LineCTL) | SERIAL_RX_ON | SERIAL_TX_ON); + + /* Receive only error free packets addressed to this card */ + lp->rx_mode = 0; + writereg(dev, PP_RxCTL, DEF_RX_ACCEPT); + + lp->curr_rx_cfg = RX_OK_ENBL | RX_CRC_ERROR_ENBL; + + if (lp->isa_config & STREAM_TRANSFER) + lp->curr_rx_cfg |= RX_STREAM_ENBL; +#if ALLOW_DMA + set_dma_cfg(dev); +#endif + writereg(dev, PP_RxCFG, lp->curr_rx_cfg); + + writereg(dev, PP_TxCFG, (TX_LOST_CRS_ENBL | + TX_SQE_ERROR_ENBL | + TX_OK_ENBL | + TX_LATE_COL_ENBL | + TX_JBR_ENBL | + TX_ANY_COL_ENBL | + TX_16_COL_ENBL)); + + writereg(dev, PP_BufCFG, (READY_FOR_TX_ENBL | + RX_MISS_COUNT_OVRFLOW_ENBL | +#if ALLOW_DMA + dma_bufcfg(dev) | +#endif + TX_COL_COUNT_OVRFLOW_ENBL | + TX_UNDERRUN_ENBL)); + + /* now that we've got our act together, enable everything */ + writereg(dev, PP_BusCTL, (ENABLE_IRQ + | (dev->mem_start ? MEMORY_ON : 0) /* turn memory on */ +#if ALLOW_DMA + | dma_busctl(dev) +#endif + )); + netif_start_queue(dev); + cs89_dbg(1, debug, "net_open() succeeded\n"); + return 0; +bad_out: + return ret; +} + +/* The inverse routine to net_open(). */ +static int +net_close(struct net_device *dev) +{ +#if ALLOW_DMA + struct net_local *lp = netdev_priv(dev); +#endif + + netif_stop_queue(dev); + + writereg(dev, PP_RxCFG, 0); + writereg(dev, PP_TxCFG, 0); + writereg(dev, PP_BufCFG, 0); + writereg(dev, PP_BusCTL, 0); + + free_irq(dev->irq, dev); + +#if ALLOW_DMA + if (lp->use_dma && lp->dma) { + free_dma(dev->dma); + release_dma_buff(lp); + } +#endif + + /* Update the statistics here. */ + return 0; +} + +/* Get the current statistics. + * This may be called with the card open or closed. + */ +static struct net_device_stats * +net_get_stats(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&lp->lock, flags); + /* Update the statistics from the device registers. */ + dev->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6); + dev->stats.collisions += (readreg(dev, PP_TxCol) >> 6); + spin_unlock_irqrestore(&lp->lock, flags); + + return &dev->stats; +} + +static void net_timeout(struct net_device *dev) +{ + /* If we get here, some higher level has decided we are broken. + There should really be a "kick me" function call instead. */ + cs89_dbg(0, err, "%s: transmit timed out, %s?\n", + dev->name, + tx_done(dev) ? "IRQ conflict" : "network cable problem"); + /* Try to restart the adaptor. */ + netif_wake_queue(dev); +} + +static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + unsigned long flags; + + cs89_dbg(3, debug, "%s: sent %d byte packet of type %x\n", + dev->name, skb->len, + ((skb->data[ETH_ALEN + ETH_ALEN] << 8) | + skb->data[ETH_ALEN + ETH_ALEN + 1])); + + /* keep the upload from being interrupted, since we + * ask the chip to start transmitting before the + * whole packet has been completely uploaded. + */ + + spin_lock_irqsave(&lp->lock, flags); + netif_stop_queue(dev); + + /* initiate a transmit sequence */ + iowrite16(lp->send_cmd, lp->virt_addr + TX_CMD_PORT); + iowrite16(skb->len, lp->virt_addr + TX_LEN_PORT); + + /* Test to see if the chip has allocated memory for the packet */ + if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) { + /* Gasp! It hasn't. But that shouldn't happen since + * we're waiting for TxOk, so return 1 and requeue this packet. + */ + + spin_unlock_irqrestore(&lp->lock, flags); + cs89_dbg(0, err, "Tx buffer not free!\n"); + return NETDEV_TX_BUSY; + } + /* Write the contents of the packet */ + writewords(lp, TX_FRAME_PORT, skb->data, (skb->len + 1) >> 1); + spin_unlock_irqrestore(&lp->lock, flags); + dev->stats.tx_bytes += skb->len; + dev_consume_skb_any(skb); + + /* We DO NOT call netif_wake_queue() here. + * We also DO NOT call netif_start_queue(). + * + * Either of these would cause another bottom half run through + * net_send_packet() before this packet has fully gone out. + * That causes us to hit the "Gasp!" above and the send is rescheduled. + * it runs like a dog. We just return and wait for the Tx completion + * interrupt handler to restart the netdevice layer + */ + + return NETDEV_TX_OK; +} + +static void set_multicast_list(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + unsigned long flags; + u16 cfg; + + spin_lock_irqsave(&lp->lock, flags); + if (dev->flags & IFF_PROMISC) + lp->rx_mode = RX_ALL_ACCEPT; + else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) + /* The multicast-accept list is initialized to accept-all, + * and we rely on higher-level filtering for now. + */ + lp->rx_mode = RX_MULTCAST_ACCEPT; + else + lp->rx_mode = 0; + + writereg(dev, PP_RxCTL, DEF_RX_ACCEPT | lp->rx_mode); + + /* in promiscuous mode, we accept errored packets, + * so we have to enable interrupts on them also + */ + cfg = lp->curr_rx_cfg; + if (lp->rx_mode == RX_ALL_ACCEPT) + cfg |= RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL; + writereg(dev, PP_RxCFG, cfg); + spin_unlock_irqrestore(&lp->lock, flags); +} + +static int set_mac_address(struct net_device *dev, void *p) +{ + int i; + struct sockaddr *addr = p; + + if (netif_running(dev)) + return -EBUSY; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + cs89_dbg(0, debug, "%s: Setting MAC address to %pM\n", + dev->name, dev->dev_addr); + + /* set the Ethernet address */ + for (i = 0; i < ETH_ALEN / 2; i++) + writereg(dev, PP_IA + i * 2, + (dev->dev_addr[i * 2] | + (dev->dev_addr[i * 2 + 1] << 8))); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling receive - used by netconsole and other diagnostic tools + * to allow network i/o with interrupts disabled. + */ +static void net_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + net_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +static const struct net_device_ops net_ops = { + .ndo_open = net_open, + .ndo_stop = net_close, + .ndo_tx_timeout = net_timeout, + .ndo_start_xmit = net_send_packet, + .ndo_get_stats = net_get_stats, + .ndo_set_rx_mode = set_multicast_list, + .ndo_set_mac_address = set_mac_address, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = net_poll_controller, +#endif + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + +static void __init reset_chip(struct net_device *dev) +{ +#if !defined(CONFIG_MACH_MX31ADS) + struct net_local *lp = netdev_priv(dev); + unsigned long reset_start_time; + + writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET); + + /* wait 30 ms */ + msleep(30); + + if (lp->chip_type != CS8900) { + /* Hardware problem requires PNP registers to be reconfigured after a reset */ + iowrite16(PP_CS8920_ISAINT, lp->virt_addr + ADD_PORT); + iowrite8(dev->irq, lp->virt_addr + DATA_PORT); + iowrite8(0, lp->virt_addr + DATA_PORT + 1); + + iowrite16(PP_CS8920_ISAMemB, lp->virt_addr + ADD_PORT); + iowrite8((dev->mem_start >> 16) & 0xff, + lp->virt_addr + DATA_PORT); + iowrite8((dev->mem_start >> 8) & 0xff, + lp->virt_addr + DATA_PORT + 1); + } + + /* Wait until the chip is reset */ + reset_start_time = jiffies; + while ((readreg(dev, PP_SelfST) & INIT_DONE) == 0 && + time_before(jiffies, reset_start_time + 2)) + ; +#endif /* !CONFIG_MACH_MX31ADS */ +} + +/* This is the real probe routine. + * Linux has a history of friendly device probes on the ISA bus. + * A good device probes avoids doing writes, and + * verifies that the correct device exists and functions. + * Return 0 on success. + */ +static int __init +cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular) +{ + struct net_local *lp = netdev_priv(dev); + int i; + int tmp; + unsigned rev_type = 0; + int eeprom_buff[CHKSUM_LEN]; + int retval; + + /* Initialize the device structure. */ + if (!modular) { + memset(lp, 0, sizeof(*lp)); + spin_lock_init(&lp->lock); +#ifndef MODULE +#if ALLOW_DMA + if (g_cs89x0_dma) { + lp->use_dma = 1; + lp->dma = g_cs89x0_dma; + lp->dmasize = 16; /* Could make this an option... */ + } +#endif + lp->force = g_cs89x0_media__force; +#endif + } + + pr_debug("PP_addr at %p[%x]: 0x%x\n", + ioaddr, ADD_PORT, ioread16(ioaddr + ADD_PORT)); + iowrite16(PP_ChipID, ioaddr + ADD_PORT); + + tmp = ioread16(ioaddr + DATA_PORT); + if (tmp != CHIP_EISA_ID_SIG) { + pr_debug("%s: incorrect signature at %p[%x]: 0x%x!=" + CHIP_EISA_ID_SIG_STR "\n", + dev->name, ioaddr, DATA_PORT, tmp); + retval = -ENODEV; + goto out1; + } + + lp->virt_addr = ioaddr; + + /* get the chip type */ + rev_type = readreg(dev, PRODUCT_ID_ADD); + lp->chip_type = rev_type & ~REVISON_BITS; + lp->chip_revision = ((rev_type & REVISON_BITS) >> 8) + 'A'; + + /* Check the chip type and revision in order to set the correct + * send command. CS8920 revision C and CS8900 revision F can use + * the faster send. + */ + lp->send_cmd = TX_AFTER_381; + if (lp->chip_type == CS8900 && lp->chip_revision >= 'F') + lp->send_cmd = TX_NOW; + if (lp->chip_type != CS8900 && lp->chip_revision >= 'C') + lp->send_cmd = TX_NOW; + + pr_info_once("%s\n", version); + + pr_info("%s: cs89%c0%s rev %c found at %p ", + dev->name, + lp->chip_type == CS8900 ? '0' : '2', + lp->chip_type == CS8920M ? "M" : "", + lp->chip_revision, + lp->virt_addr); + + reset_chip(dev); + + /* Here we read the current configuration of the chip. + * If there is no Extended EEPROM then the idea is to not disturb + * the chip configuration, it should have been correctly setup by + * automatic EEPROM read on reset. So, if the chip says it read + * the EEPROM the driver will always do *something* instead of + * complain that adapter_cnf is 0. + */ + + if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) == + (EEPROM_OK | EEPROM_PRESENT)) { + /* Load the MAC. */ + for (i = 0; i < ETH_ALEN / 2; i++) { + unsigned int Addr; + Addr = readreg(dev, PP_IA + i * 2); + dev->dev_addr[i * 2] = Addr & 0xFF; + dev->dev_addr[i * 2 + 1] = Addr >> 8; + } + + /* Load the Adapter Configuration. + * Note: Barring any more specific information from some + * other source (ie EEPROM+Schematics), we would not know + * how to operate a 10Base2 interface on the AUI port. + * However, since we do read the status of HCB1 and use + * settings that always result in calls to control_dc_dc(dev,0) + * a BNC interface should work if the enable pin + * (dc/dc converter) is on HCB1. + * It will be called AUI however. + */ + + lp->adapter_cnf = 0; + i = readreg(dev, PP_LineCTL); + /* Preserve the setting of the HCB1 pin. */ + if ((i & (HCB1 | HCB1_ENBL)) == (HCB1 | HCB1_ENBL)) + lp->adapter_cnf |= A_CNF_DC_DC_POLARITY; + /* Save the sqelch bit */ + if ((i & LOW_RX_SQUELCH) == LOW_RX_SQUELCH) + lp->adapter_cnf |= A_CNF_EXTND_10B_2 | A_CNF_LOW_RX_SQUELCH; + /* Check if the card is in 10Base-t only mode */ + if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == 0) + lp->adapter_cnf |= A_CNF_10B_T | A_CNF_MEDIA_10B_T; + /* Check if the card is in AUI only mode */ + if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUI_ONLY) + lp->adapter_cnf |= A_CNF_AUI | A_CNF_MEDIA_AUI; + /* Check if the card is in Auto mode. */ + if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUTO_AUI_10BASET) + lp->adapter_cnf |= A_CNF_AUI | A_CNF_10B_T | + A_CNF_MEDIA_AUI | A_CNF_MEDIA_10B_T | A_CNF_MEDIA_AUTO; + + cs89_dbg(1, info, "%s: PP_LineCTL=0x%x, adapter_cnf=0x%x\n", + dev->name, i, lp->adapter_cnf); + + /* IRQ. Other chips already probe, see below. */ + if (lp->chip_type == CS8900) + lp->isa_config = readreg(dev, PP_CS8900_ISAINT) & INT_NO_MASK; + + pr_cont("[Cirrus EEPROM] "); + } + + pr_cont("\n"); + + /* First check to see if an EEPROM is attached. */ + + if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0) + pr_warn("No EEPROM, relying on command line....\n"); + else if (get_eeprom_data(dev, START_EEPROM_DATA, CHKSUM_LEN, eeprom_buff) < 0) { + pr_warn("EEPROM read failed, relying on command line\n"); + } else if (get_eeprom_cksum(START_EEPROM_DATA, CHKSUM_LEN, eeprom_buff) < 0) { + /* Check if the chip was able to read its own configuration starting + at 0 in the EEPROM*/ + if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) != + (EEPROM_OK | EEPROM_PRESENT)) + pr_warn("Extended EEPROM checksum bad and no Cirrus EEPROM, relying on command line\n"); + + } else { + /* This reads an extended EEPROM that is not documented + * in the CS8900 datasheet. + */ + + /* get transmission control word but keep the autonegotiation bits */ + if (!lp->auto_neg_cnf) + lp->auto_neg_cnf = eeprom_buff[AUTO_NEG_CNF_OFFSET / 2]; + /* Store adapter configuration */ + if (!lp->adapter_cnf) + lp->adapter_cnf = eeprom_buff[ADAPTER_CNF_OFFSET / 2]; + /* Store ISA configuration */ + lp->isa_config = eeprom_buff[ISA_CNF_OFFSET / 2]; + dev->mem_start = eeprom_buff[PACKET_PAGE_OFFSET / 2] << 8; + + /* eeprom_buff has 32-bit ints, so we can't just memcpy it */ + /* store the initial memory base address */ + for (i = 0; i < ETH_ALEN / 2; i++) { + dev->dev_addr[i * 2] = eeprom_buff[i]; + dev->dev_addr[i * 2 + 1] = eeprom_buff[i] >> 8; + } + cs89_dbg(1, debug, "%s: new adapter_cnf: 0x%x\n", + dev->name, lp->adapter_cnf); + } + + /* allow them to force multiple transceivers. If they force multiple, autosense */ + { + int count = 0; + if (lp->force & FORCE_RJ45) { + lp->adapter_cnf |= A_CNF_10B_T; + count++; + } + if (lp->force & FORCE_AUI) { + lp->adapter_cnf |= A_CNF_AUI; + count++; + } + if (lp->force & FORCE_BNC) { + lp->adapter_cnf |= A_CNF_10B_2; + count++; + } + if (count > 1) + lp->adapter_cnf |= A_CNF_MEDIA_AUTO; + else if (lp->force & FORCE_RJ45) + lp->adapter_cnf |= A_CNF_MEDIA_10B_T; + else if (lp->force & FORCE_AUI) + lp->adapter_cnf |= A_CNF_MEDIA_AUI; + else if (lp->force & FORCE_BNC) + lp->adapter_cnf |= A_CNF_MEDIA_10B_2; + } + + cs89_dbg(1, debug, "%s: after force 0x%x, adapter_cnf=0x%x\n", + dev->name, lp->force, lp->adapter_cnf); + + /* FIXME: We don't let you set dc-dc polarity or low RX squelch from the command line: add it here */ + + /* FIXME: We don't let you set the IMM bit from the command line: add it to lp->auto_neg_cnf here */ + + /* FIXME: we don't set the Ethernet address on the command line. Use + * ifconfig IFACE hw ether AABBCCDDEEFF + */ + + pr_info("media %s%s%s", + (lp->adapter_cnf & A_CNF_10B_T) ? "RJ-45," : "", + (lp->adapter_cnf & A_CNF_AUI) ? "AUI," : "", + (lp->adapter_cnf & A_CNF_10B_2) ? "BNC," : ""); + + lp->irq_map = 0xffff; + + /* If this is a CS8900 then no pnp soft */ + if (lp->chip_type != CS8900 && + /* Check if the ISA IRQ has been set */ + (i = readreg(dev, PP_CS8920_ISAINT) & 0xff, + (i != 0 && i < CS8920_NO_INTS))) { + if (!dev->irq) + dev->irq = i; + } else { + i = lp->isa_config & INT_NO_MASK; +#ifndef CONFIG_CS89x0_PLATFORM + if (lp->chip_type == CS8900) { + /* Translate the IRQ using the IRQ mapping table. */ + if (i >= ARRAY_SIZE(cs8900_irq_map)) + pr_err("invalid ISA interrupt number %d\n", i); + else + i = cs8900_irq_map[i]; + + lp->irq_map = CS8900_IRQ_MAP; /* fixed IRQ map for CS8900 */ + } else { + int irq_map_buff[IRQ_MAP_LEN/2]; + + if (get_eeprom_data(dev, IRQ_MAP_EEPROM_DATA, + IRQ_MAP_LEN / 2, + irq_map_buff) >= 0) { + if ((irq_map_buff[0] & 0xff) == PNP_IRQ_FRMT) + lp->irq_map = ((irq_map_buff[0] >> 8) | + (irq_map_buff[1] << 8)); + } + } +#endif + if (!dev->irq) + dev->irq = i; + } + + pr_cont(" IRQ %d", dev->irq); + +#if ALLOW_DMA + if (lp->use_dma) { + get_dma_channel(dev); + pr_cont(", DMA %d", dev->dma); + } else +#endif + pr_cont(", programmed I/O"); + + /* print the ethernet address. */ + pr_cont(", MAC %pM\n", dev->dev_addr); + + dev->netdev_ops = &net_ops; + dev->watchdog_timeo = HZ; + + cs89_dbg(0, info, "cs89x0_probe1() successful\n"); + + retval = register_netdev(dev); + if (retval) + goto out2; + return 0; +out2: + iowrite16(PP_ChipID, lp->virt_addr + ADD_PORT); +out1: + return retval; +} + +#ifndef CONFIG_CS89x0_PLATFORM +/* + * This function converts the I/O port address used by the cs89x0_probe() and + * init_module() functions to the I/O memory address used by the + * cs89x0_probe1() function. + */ +static int __init +cs89x0_ioport_probe(struct net_device *dev, unsigned long ioport, int modular) +{ + struct net_local *lp = netdev_priv(dev); + int ret; + void __iomem *io_mem; + + if (!lp) + return -ENOMEM; + + dev->base_addr = ioport; + + if (!request_region(ioport, NETCARD_IO_EXTENT, DRV_NAME)) { + ret = -EBUSY; + goto out; + } + + io_mem = ioport_map(ioport & ~3, NETCARD_IO_EXTENT); + if (!io_mem) { + ret = -ENOMEM; + goto release; + } + + /* if they give us an odd I/O address, then do ONE write to + * the address port, to get it back to address zero, where we + * expect to find the EISA signature word. An IO with a base of 0x3 + * will skip the test for the ADD_PORT. + */ + if (ioport & 1) { + cs89_dbg(1, info, "%s: odd ioaddr 0x%lx\n", dev->name, ioport); + if ((ioport & 2) != 2) { + if ((ioread16(io_mem + ADD_PORT) & ADD_MASK) != + ADD_SIG) { + pr_err("%s: bad signature 0x%x\n", + dev->name, ioread16(io_mem + ADD_PORT)); + ret = -ENODEV; + goto unmap; + } + } + } + + ret = cs89x0_probe1(dev, io_mem, modular); + if (!ret) + goto out; +unmap: + ioport_unmap(io_mem); +release: + release_region(ioport, NETCARD_IO_EXTENT); +out: + return ret; +} + +#ifndef MODULE +/* Check for a network adaptor of this type, and return '0' iff one exists. + * If dev->base_addr == 0, probe all likely locations. + * If dev->base_addr == 1, always return failure. + * If dev->base_addr == 2, allocate space for the device and return success + * (detachable devices only). + * Return 0 on success. + */ + +struct net_device * __init cs89x0_probe(int unit) +{ + struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); + unsigned *port; + int err = 0; + int irq; + int io; + + if (!dev) + return ERR_PTR(-ENODEV); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + io = dev->base_addr; + irq = dev->irq; + + cs89_dbg(0, info, "cs89x0_probe(0x%x)\n", io); + + if (io > 0x1ff) { /* Check a single specified location. */ + err = cs89x0_ioport_probe(dev, io, 0); + } else if (io != 0) { /* Don't probe at all. */ + err = -ENXIO; + } else { + for (port = netcard_portlist; *port; port++) { + if (cs89x0_ioport_probe(dev, *port, 0) == 0) + break; + dev->irq = irq; + } + if (!*port) + err = -ENODEV; + } + if (err) + goto out; + return dev; +out: + free_netdev(dev); + pr_warn("no cs8900 or cs8920 detected. Be sure to disable PnP with SETUP\n"); + return ERR_PTR(err); +} +#endif +#endif + +#if defined(MODULE) && !defined(CONFIG_CS89x0_PLATFORM) + +static struct net_device *dev_cs89x0; + +/* Support the 'debug' module parm even if we're compiled for non-debug to + * avoid breaking someone's startup scripts + */ + +static int io; +static int irq; +static int debug; +static char media[8]; +static int duplex = -1; + +static int use_dma; /* These generate unused var warnings if ALLOW_DMA = 0 */ +static int dma; +static int dmasize = 16; /* or 64 */ + +module_param(io, int, 0); +module_param(irq, int, 0); +module_param(debug, int, 0); +module_param_string(media, media, sizeof(media), 0); +module_param(duplex, int, 0); +module_param(dma , int, 0); +module_param(dmasize , int, 0); +module_param(use_dma , int, 0); +MODULE_PARM_DESC(io, "cs89x0 I/O base address"); +MODULE_PARM_DESC(irq, "cs89x0 IRQ number"); +#if DEBUGGING +MODULE_PARM_DESC(debug, "cs89x0 debug level (0-6)"); +#else +MODULE_PARM_DESC(debug, "(ignored)"); +#endif +MODULE_PARM_DESC(media, "Set cs89x0 adapter(s) media type(s) (rj45,bnc,aui)"); +/* No other value than -1 for duplex seems to be currently interpreted */ +MODULE_PARM_DESC(duplex, "(ignored)"); +#if ALLOW_DMA +MODULE_PARM_DESC(dma , "cs89x0 ISA DMA channel; ignored if use_dma=0"); +MODULE_PARM_DESC(dmasize , "cs89x0 DMA size in kB (16,64); ignored if use_dma=0"); +MODULE_PARM_DESC(use_dma , "cs89x0 using DMA (0-1)"); +#else +MODULE_PARM_DESC(dma , "(ignored)"); +MODULE_PARM_DESC(dmasize , "(ignored)"); +MODULE_PARM_DESC(use_dma , "(ignored)"); +#endif + +MODULE_AUTHOR("Mike Cruse, Russwll Nelson , Andrew Morton"); +MODULE_LICENSE("GPL"); + +/* + * media=t - specify media type + * or media=2 + * or media=aui + * or medai=auto + * duplex=0 - specify forced half/full/autonegotiate duplex + * debug=# - debug level + * + * Default Chip Configuration: + * DMA Burst = enabled + * IOCHRDY Enabled = enabled + * UseSA = enabled + * CS8900 defaults to half-duplex if not specified on command-line + * CS8920 defaults to autoneg if not specified on command-line + * Use reset defaults for other config parameters + * + * Assumptions: + * media type specified is supported (circuitry is present) + * if memory address is > 1MB, then required mem decode hw is present + * if 10B-2, then agent other than driver will enable DC/DC converter + * (hw or software util) + */ + +int __init init_module(void) +{ + struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); + struct net_local *lp; + int ret = 0; + +#if DEBUGGING + net_debug = debug; +#else + debug = 0; +#endif + if (!dev) + return -ENOMEM; + + dev->irq = irq; + dev->base_addr = io; + lp = netdev_priv(dev); + +#if ALLOW_DMA + if (use_dma) { + lp->use_dma = use_dma; + lp->dma = dma; + lp->dmasize = dmasize; + } +#endif + + spin_lock_init(&lp->lock); + + /* boy, they'd better get these right */ + if (!strcmp(media, "rj45")) + lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T; + else if (!strcmp(media, "aui")) + lp->adapter_cnf = A_CNF_MEDIA_AUI | A_CNF_AUI; + else if (!strcmp(media, "bnc")) + lp->adapter_cnf = A_CNF_MEDIA_10B_2 | A_CNF_10B_2; + else + lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T; + + if (duplex == -1) + lp->auto_neg_cnf = AUTO_NEG_ENABLE; + + if (io == 0) { + pr_err("Module autoprobing not allowed\n"); + pr_err("Append io=0xNNN\n"); + ret = -EPERM; + goto out; + } else if (io <= 0x1ff) { + ret = -ENXIO; + goto out; + } + +#if ALLOW_DMA + if (use_dma && dmasize != 16 && dmasize != 64) { + pr_err("dma size must be either 16K or 64K, not %dK\n", + dmasize); + ret = -EPERM; + goto out; + } +#endif + ret = cs89x0_ioport_probe(dev, io, 1); + if (ret) + goto out; + + dev_cs89x0 = dev; + return 0; +out: + free_netdev(dev); + return ret; +} + +void __exit +cleanup_module(void) +{ + struct net_local *lp = netdev_priv(dev_cs89x0); + + unregister_netdev(dev_cs89x0); + iowrite16(PP_ChipID, lp->virt_addr + ADD_PORT); + ioport_unmap(lp->virt_addr); + release_region(dev_cs89x0->base_addr, NETCARD_IO_EXTENT); + free_netdev(dev_cs89x0); +} +#endif /* MODULE && !CONFIG_CS89x0_PLATFORM */ + +#ifdef CONFIG_CS89x0_PLATFORM +static int __init cs89x0_platform_probe(struct platform_device *pdev) +{ + struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); + struct net_local *lp; + struct resource *mem_res; + void __iomem *virt_addr; + int err; + + if (!dev) + return -ENOMEM; + + lp = netdev_priv(dev); + + dev->irq = platform_get_irq(pdev, 0); + if (dev->irq <= 0) { + dev_warn(&dev->dev, "interrupt resource missing\n"); + err = -ENXIO; + goto free; + } + + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + virt_addr = devm_ioremap_resource(&pdev->dev, mem_res); + if (IS_ERR(virt_addr)) { + err = PTR_ERR(virt_addr); + goto free; + } + + err = cs89x0_probe1(dev, virt_addr, 0); + if (err) { + dev_warn(&dev->dev, "no cs8900 or cs8920 detected\n"); + goto free; + } + + platform_set_drvdata(pdev, dev); + return 0; + +free: + free_netdev(dev); + return err; +} + +static int cs89x0_platform_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + + /* This platform_get_resource() call will not return NULL, because + * the same call in cs89x0_platform_probe() has returned a non NULL + * value. + */ + unregister_netdev(dev); + free_netdev(dev); + return 0; +} + +static struct platform_driver cs89x0_driver = { + .driver = { + .name = DRV_NAME, + }, + .remove = cs89x0_platform_remove, +}; + +module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe); + +#endif /* CONFIG_CS89x0_PLATFORM */ diff --git a/drivers/net/ethernet/cirrus/cs89x0.h b/drivers/net/ethernet/cirrus/cs89x0.h new file mode 100644 index 000000000..91423b70b --- /dev/null +++ b/drivers/net/ethernet/cirrus/cs89x0.h @@ -0,0 +1,465 @@ +/* Copyright, 1988-1992, Russell Nelson, Crynwr Software + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, version 1. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +#define PP_ChipID 0x0000 /* offset 0h -> Corp -ID */ + /* offset 2h -> Model/Product Number */ + /* offset 3h -> Chip Revision Number */ + +#define PP_ISAIOB 0x0020 /* IO base address */ +#define PP_CS8900_ISAINT 0x0022 /* ISA interrupt select */ +#define PP_CS8920_ISAINT 0x0370 /* ISA interrupt select */ +#define PP_CS8900_ISADMA 0x0024 /* ISA Rec DMA channel */ +#define PP_CS8920_ISADMA 0x0374 /* ISA Rec DMA channel */ +#define PP_ISASOF 0x0026 /* ISA DMA offset */ +#define PP_DmaFrameCnt 0x0028 /* ISA DMA Frame count */ +#define PP_DmaByteCnt 0x002A /* ISA DMA Byte count */ +#define PP_CS8900_ISAMemB 0x002C /* Memory base */ +#define PP_CS8920_ISAMemB 0x0348 /* */ + +#define PP_ISABootBase 0x0030 /* Boot Prom base */ +#define PP_ISABootMask 0x0034 /* Boot Prom Mask */ + +/* EEPROM data and command registers */ +#define PP_EECMD 0x0040 /* NVR Interface Command register */ +#define PP_EEData 0x0042 /* NVR Interface Data Register */ +#define PP_DebugReg 0x0044 /* Debug Register */ + +#define PP_RxCFG 0x0102 /* Rx Bus config */ +#define PP_RxCTL 0x0104 /* Receive Control Register */ +#define PP_TxCFG 0x0106 /* Transmit Config Register */ +#define PP_TxCMD 0x0108 /* Transmit Command Register */ +#define PP_BufCFG 0x010A /* Bus configuration Register */ +#define PP_LineCTL 0x0112 /* Line Config Register */ +#define PP_SelfCTL 0x0114 /* Self Command Register */ +#define PP_BusCTL 0x0116 /* ISA bus control Register */ +#define PP_TestCTL 0x0118 /* Test Register */ +#define PP_AutoNegCTL 0x011C /* Auto Negotiation Ctrl */ + +#define PP_ISQ 0x0120 /* Interrupt Status */ +#define PP_RxEvent 0x0124 /* Rx Event Register */ +#define PP_TxEvent 0x0128 /* Tx Event Register */ +#define PP_BufEvent 0x012C /* Bus Event Register */ +#define PP_RxMiss 0x0130 /* Receive Miss Count */ +#define PP_TxCol 0x0132 /* Transmit Collision Count */ +#define PP_LineST 0x0134 /* Line State Register */ +#define PP_SelfST 0x0136 /* Self State register */ +#define PP_BusST 0x0138 /* Bus Status */ +#define PP_TDR 0x013C /* Time Domain Reflectometry */ +#define PP_AutoNegST 0x013E /* Auto Neg Status */ +#define PP_TxCommand 0x0144 /* Tx Command */ +#define PP_TxLength 0x0146 /* Tx Length */ +#define PP_LAF 0x0150 /* Hash Table */ +#define PP_IA 0x0158 /* Physical Address Register */ + +#define PP_RxStatus 0x0400 /* Receive start of frame */ +#define PP_RxLength 0x0402 /* Receive Length of frame */ +#define PP_RxFrame 0x0404 /* Receive frame pointer */ +#define PP_TxFrame 0x0A00 /* Transmit frame pointer */ + +/* Primary I/O Base Address. If no I/O base is supplied by the user, then this */ +/* can be used as the default I/O base to access the PacketPage Area. */ +#define DEFAULTIOBASE 0x0300 +#define FIRST_IO 0x020C /* First I/O port to check */ +#define LAST_IO 0x037C /* Last I/O port to check (+10h) */ +#define ADD_MASK 0x3000 /* Mask it use of the ADD_PORT register */ +#define ADD_SIG 0x3000 /* Expected ID signature */ + +/* On Macs, we only need use the ISA I/O stuff until we do MEMORY_ON */ +#ifdef CONFIG_MAC +#define LCSLOTBASE 0xfee00000 +#define MMIOBASE 0x40000 +#endif + +#define CHIP_EISA_ID_SIG 0x630E /* Product ID Code for Crystal Chip (CS8900 spec 4.3) */ +#define CHIP_EISA_ID_SIG_STR "0x630E" + +#ifdef IBMEIPKT +#define EISA_ID_SIG 0x4D24 /* IBM */ +#define PART_NO_SIG 0x1010 /* IBM */ +#define MONGOOSE_BIT 0x0000 /* IBM */ +#else +#define EISA_ID_SIG 0x630E /* PnP Vendor ID (same as chip id for Crystal board) */ +#define PART_NO_SIG 0x4000 /* ID code CS8920 board (PnP Vendor Product code) */ +#define MONGOOSE_BIT 0x2000 /* PART_NO_SIG + MONGOOSE_BUT => ID of mongoose */ +#endif + +#define PRODUCT_ID_ADD 0x0002 /* Address of product ID */ + +/* Mask to find out the types of registers */ +#define REG_TYPE_MASK 0x001F + +/* Eeprom Commands */ +#define ERSE_WR_ENBL 0x00F0 +#define ERSE_WR_DISABLE 0x0000 + +/* Defines Control/Config register quintuplet numbers */ +#define RX_BUF_CFG 0x0003 +#define RX_CONTROL 0x0005 +#define TX_CFG 0x0007 +#define TX_COMMAND 0x0009 +#define BUF_CFG 0x000B +#define LINE_CONTROL 0x0013 +#define SELF_CONTROL 0x0015 +#define BUS_CONTROL 0x0017 +#define TEST_CONTROL 0x0019 + +/* Defines Status/Count registers quintuplet numbers */ +#define RX_EVENT 0x0004 +#define TX_EVENT 0x0008 +#define BUF_EVENT 0x000C +#define RX_MISS_COUNT 0x0010 +#define TX_COL_COUNT 0x0012 +#define LINE_STATUS 0x0014 +#define SELF_STATUS 0x0016 +#define BUS_STATUS 0x0018 +#define TDR 0x001C + +/* PP_RxCFG - Receive Configuration and Interrupt Mask bit definition - Read/write */ +#define SKIP_1 0x0040 +#define RX_STREAM_ENBL 0x0080 +#define RX_OK_ENBL 0x0100 +#define RX_DMA_ONLY 0x0200 +#define AUTO_RX_DMA 0x0400 +#define BUFFER_CRC 0x0800 +#define RX_CRC_ERROR_ENBL 0x1000 +#define RX_RUNT_ENBL 0x2000 +#define RX_EXTRA_DATA_ENBL 0x4000 + +/* PP_RxCTL - Receive Control bit definition - Read/write */ +#define RX_IA_HASH_ACCEPT 0x0040 +#define RX_PROM_ACCEPT 0x0080 +#define RX_OK_ACCEPT 0x0100 +#define RX_MULTCAST_ACCEPT 0x0200 +#define RX_IA_ACCEPT 0x0400 +#define RX_BROADCAST_ACCEPT 0x0800 +#define RX_BAD_CRC_ACCEPT 0x1000 +#define RX_RUNT_ACCEPT 0x2000 +#define RX_EXTRA_DATA_ACCEPT 0x4000 +#define RX_ALL_ACCEPT (RX_PROM_ACCEPT|RX_BAD_CRC_ACCEPT|RX_RUNT_ACCEPT|RX_EXTRA_DATA_ACCEPT) +/* Default receive mode - individually addressed, broadcast, and error free */ +#define DEF_RX_ACCEPT (RX_IA_ACCEPT | RX_BROADCAST_ACCEPT | RX_OK_ACCEPT) + +/* PP_TxCFG - Transmit Configuration Interrupt Mask bit definition - Read/write */ +#define TX_LOST_CRS_ENBL 0x0040 +#define TX_SQE_ERROR_ENBL 0x0080 +#define TX_OK_ENBL 0x0100 +#define TX_LATE_COL_ENBL 0x0200 +#define TX_JBR_ENBL 0x0400 +#define TX_ANY_COL_ENBL 0x0800 +#define TX_16_COL_ENBL 0x8000 + +/* PP_TxCMD - Transmit Command bit definition - Read-only */ +#define TX_START_4_BYTES 0x0000 +#define TX_START_64_BYTES 0x0040 +#define TX_START_128_BYTES 0x0080 +#define TX_START_ALL_BYTES 0x00C0 +#define TX_FORCE 0x0100 +#define TX_ONE_COL 0x0200 +#define TX_TWO_PART_DEFF_DISABLE 0x0400 +#define TX_NO_CRC 0x1000 +#define TX_RUNT 0x2000 + +/* PP_BufCFG - Buffer Configuration Interrupt Mask bit definition - Read/write */ +#define GENERATE_SW_INTERRUPT 0x0040 +#define RX_DMA_ENBL 0x0080 +#define READY_FOR_TX_ENBL 0x0100 +#define TX_UNDERRUN_ENBL 0x0200 +#define RX_MISS_ENBL 0x0400 +#define RX_128_BYTE_ENBL 0x0800 +#define TX_COL_COUNT_OVRFLOW_ENBL 0x1000 +#define RX_MISS_COUNT_OVRFLOW_ENBL 0x2000 +#define RX_DEST_MATCH_ENBL 0x8000 + +/* PP_LineCTL - Line Control bit definition - Read/write */ +#define SERIAL_RX_ON 0x0040 +#define SERIAL_TX_ON 0x0080 +#define AUI_ONLY 0x0100 +#define AUTO_AUI_10BASET 0x0200 +#define MODIFIED_BACKOFF 0x0800 +#define NO_AUTO_POLARITY 0x1000 +#define TWO_PART_DEFDIS 0x2000 +#define LOW_RX_SQUELCH 0x4000 + +/* PP_SelfCTL - Software Self Control bit definition - Read/write */ +#define POWER_ON_RESET 0x0040 +#define SW_STOP 0x0100 +#define SLEEP_ON 0x0200 +#define AUTO_WAKEUP 0x0400 +#define HCB0_ENBL 0x1000 +#define HCB1_ENBL 0x2000 +#define HCB0 0x4000 +#define HCB1 0x8000 + +/* PP_BusCTL - ISA Bus Control bit definition - Read/write */ +#define RESET_RX_DMA 0x0040 +#define MEMORY_ON 0x0400 +#define DMA_BURST_MODE 0x0800 +#define IO_CHANNEL_READY_ON 0x1000 +#define RX_DMA_SIZE_64K 0x2000 +#define ENABLE_IRQ 0x8000 + +/* PP_TestCTL - Test Control bit definition - Read/write */ +#define LINK_OFF 0x0080 +#define ENDEC_LOOPBACK 0x0200 +#define AUI_LOOPBACK 0x0400 +#define BACKOFF_OFF 0x0800 +#define FDX_8900 0x4000 +#define FAST_TEST 0x8000 + +/* PP_RxEvent - Receive Event Bit definition - Read-only */ +#define RX_IA_HASHED 0x0040 +#define RX_DRIBBLE 0x0080 +#define RX_OK 0x0100 +#define RX_HASHED 0x0200 +#define RX_IA 0x0400 +#define RX_BROADCAST 0x0800 +#define RX_CRC_ERROR 0x1000 +#define RX_RUNT 0x2000 +#define RX_EXTRA_DATA 0x4000 + +#define HASH_INDEX_MASK 0x0FC00 + +/* PP_TxEvent - Transmit Event Bit definition - Read-only */ +#define TX_LOST_CRS 0x0040 +#define TX_SQE_ERROR 0x0080 +#define TX_OK 0x0100 +#define TX_LATE_COL 0x0200 +#define TX_JBR 0x0400 +#define TX_16_COL 0x8000 +#define TX_SEND_OK_BITS (TX_OK|TX_LOST_CRS) +#define TX_COL_COUNT_MASK 0x7800 + +/* PP_BufEvent - Buffer Event Bit definition - Read-only */ +#define SW_INTERRUPT 0x0040 +#define RX_DMA 0x0080 +#define READY_FOR_TX 0x0100 +#define TX_UNDERRUN 0x0200 +#define RX_MISS 0x0400 +#define RX_128_BYTE 0x0800 +#define TX_COL_OVRFLW 0x1000 +#define RX_MISS_OVRFLW 0x2000 +#define RX_DEST_MATCH 0x8000 + +/* PP_LineST - Ethernet Line Status bit definition - Read-only */ +#define LINK_OK 0x0080 +#define AUI_ON 0x0100 +#define TENBASET_ON 0x0200 +#define POLARITY_OK 0x1000 +#define CRS_OK 0x4000 + +/* PP_SelfST - Chip Software Status bit definition */ +#define ACTIVE_33V 0x0040 +#define INIT_DONE 0x0080 +#define SI_BUSY 0x0100 +#define EEPROM_PRESENT 0x0200 +#define EEPROM_OK 0x0400 +#define EL_PRESENT 0x0800 +#define EE_SIZE_64 0x1000 + +/* PP_BusST - ISA Bus Status bit definition */ +#define TX_BID_ERROR 0x0080 +#define READY_FOR_TX_NOW 0x0100 + +/* PP_AutoNegCTL - Auto Negotiation Control bit definition */ +#define RE_NEG_NOW 0x0040 +#define ALLOW_FDX 0x0080 +#define AUTO_NEG_ENABLE 0x0100 +#define NLP_ENABLE 0x0200 +#define FORCE_FDX 0x8000 +#define AUTO_NEG_BITS (FORCE_FDX|NLP_ENABLE|AUTO_NEG_ENABLE) +#define AUTO_NEG_MASK (FORCE_FDX|NLP_ENABLE|AUTO_NEG_ENABLE|ALLOW_FDX|RE_NEG_NOW) + +/* PP_AutoNegST - Auto Negotiation Status bit definition */ +#define AUTO_NEG_BUSY 0x0080 +#define FLP_LINK 0x0100 +#define FLP_LINK_GOOD 0x0800 +#define LINK_FAULT 0x1000 +#define HDX_ACTIVE 0x4000 +#define FDX_ACTIVE 0x8000 + +/* The following block defines the ISQ event types */ +#define ISQ_RECEIVER_EVENT 0x04 +#define ISQ_TRANSMITTER_EVENT 0x08 +#define ISQ_BUFFER_EVENT 0x0c +#define ISQ_RX_MISS_EVENT 0x10 +#define ISQ_TX_COL_EVENT 0x12 + +#define ISQ_EVENT_MASK 0x003F /* ISQ mask to find out type of event */ +#define ISQ_HIST 16 /* small history buffer */ +#define AUTOINCREMENT 0x8000 /* Bit mask to set bit-15 for autoincrement */ + +#define TXRXBUFSIZE 0x0600 +#define RXDMABUFSIZE 0x8000 +#define RXDMASIZE 0x4000 +#define TXRX_LENGTH_MASK 0x07FF + +/* rx options bits */ +#define RCV_WITH_RXON 1 /* Set SerRx ON */ +#define RCV_COUNTS 2 /* Use Framecnt1 */ +#define RCV_PONG 4 /* Pong respondent */ +#define RCV_DONG 8 /* Dong operation */ +#define RCV_POLLING 0x10 /* Poll RxEvent */ +#define RCV_ISQ 0x20 /* Use ISQ, int */ +#define RCV_AUTO_DMA 0x100 /* Set AutoRxDMAE */ +#define RCV_DMA 0x200 /* Set RxDMA only */ +#define RCV_DMA_ALL 0x400 /* Copy all DMA'ed */ +#define RCV_FIXED_DATA 0x800 /* Every frame same */ +#define RCV_IO 0x1000 /* Use ISA IO only */ +#define RCV_MEMORY 0x2000 /* Use ISA Memory */ + +#define RAM_SIZE 0x1000 /* The card has 4k bytes or RAM */ +#define PKT_START PP_TxFrame /* Start of packet RAM */ + +#define RX_FRAME_PORT 0x0000 +#define TX_FRAME_PORT RX_FRAME_PORT +#define TX_CMD_PORT 0x0004 +#define TX_NOW 0x0000 /* Tx packet after 5 bytes copied */ +#define TX_AFTER_381 0x0040 /* Tx packet after 381 bytes copied */ +#define TX_AFTER_ALL 0x00c0 /* Tx packet after all bytes copied */ +#define TX_LEN_PORT 0x0006 +#define ISQ_PORT 0x0008 +#define ADD_PORT 0x000A +#define DATA_PORT 0x000C + +#define EEPROM_WRITE_EN 0x00F0 +#define EEPROM_WRITE_DIS 0x0000 +#define EEPROM_WRITE_CMD 0x0100 +#define EEPROM_READ_CMD 0x0200 + +/* Receive Header */ +/* Description of header of each packet in receive area of memory */ +#define RBUF_EVENT_LOW 0 /* Low byte of RxEvent - status of received frame */ +#define RBUF_EVENT_HIGH 1 /* High byte of RxEvent - status of received frame */ +#define RBUF_LEN_LOW 2 /* Length of received data - low byte */ +#define RBUF_LEN_HI 3 /* Length of received data - high byte */ +#define RBUF_HEAD_LEN 4 /* Length of this header */ + +#define CHIP_READ 0x1 /* Used to mark state of the repins code (chip or dma) */ +#define DMA_READ 0x2 /* Used to mark state of the repins code (chip or dma) */ + +/* for bios scan */ +/* */ +#ifdef CSDEBUG +/* use these values for debugging bios scan */ +#define BIOS_START_SEG 0x00000 +#define BIOS_OFFSET_INC 0x0010 +#else +#define BIOS_START_SEG 0x0c000 +#define BIOS_OFFSET_INC 0x0200 +#endif + +#define BIOS_LAST_OFFSET 0x0fc00 + +/* Byte offsets into the EEPROM configuration buffer */ +#define ISA_CNF_OFFSET 0x6 +#define TX_CTL_OFFSET (ISA_CNF_OFFSET + 8) /* 8900 eeprom */ +#define AUTO_NEG_CNF_OFFSET (ISA_CNF_OFFSET + 8) /* 8920 eeprom */ + + /* the assumption here is that the bits in the eeprom are generally */ + /* in the same position as those in the autonegctl register. */ + /* Of course the IMM bit is not in that register so it must be */ + /* masked out */ +#define EE_FORCE_FDX 0x8000 +#define EE_NLP_ENABLE 0x0200 +#define EE_AUTO_NEG_ENABLE 0x0100 +#define EE_ALLOW_FDX 0x0080 +#define EE_AUTO_NEG_CNF_MASK (EE_FORCE_FDX|EE_NLP_ENABLE|EE_AUTO_NEG_ENABLE|EE_ALLOW_FDX) + +#define IMM_BIT 0x0040 /* ignore missing media */ + +#define ADAPTER_CNF_OFFSET (AUTO_NEG_CNF_OFFSET + 2) +#define A_CNF_10B_T 0x0001 +#define A_CNF_AUI 0x0002 +#define A_CNF_10B_2 0x0004 +#define A_CNF_MEDIA_TYPE 0x0070 +#define A_CNF_MEDIA_AUTO 0x0070 +#define A_CNF_MEDIA_10B_T 0x0020 +#define A_CNF_MEDIA_AUI 0x0040 +#define A_CNF_MEDIA_10B_2 0x0010 +#define A_CNF_DC_DC_POLARITY 0x0080 +#define A_CNF_NO_AUTO_POLARITY 0x2000 +#define A_CNF_LOW_RX_SQUELCH 0x4000 +#define A_CNF_EXTND_10B_2 0x8000 + +#define PACKET_PAGE_OFFSET 0x8 + +/* Bit definitions for the ISA configuration word from the EEPROM */ +#define INT_NO_MASK 0x000F +#define DMA_NO_MASK 0x0070 +#define ISA_DMA_SIZE 0x0200 +#define ISA_AUTO_RxDMA 0x0400 +#define ISA_RxDMA 0x0800 +#define DMA_BURST 0x1000 +#define STREAM_TRANSFER 0x2000 +#define ANY_ISA_DMA (ISA_AUTO_RxDMA | ISA_RxDMA) + +/* DMA controller registers */ +#define DMA_BASE 0x00 /* DMA controller base */ +#define DMA_BASE_2 0x0C0 /* DMA controller base */ + +#define DMA_STAT 0x0D0 /* DMA controller status register */ +#define DMA_MASK 0x0D4 /* DMA controller mask register */ +#define DMA_MODE 0x0D6 /* DMA controller mode register */ +#define DMA_RESETFF 0x0D8 /* DMA controller first/last flip flop */ + +/* DMA data */ +#define DMA_DISABLE 0x04 /* Disable channel n */ +#define DMA_ENABLE 0x00 /* Enable channel n */ +/* Demand transfers, incr. address, auto init, writes, ch. n */ +#define DMA_RX_MODE 0x14 +/* Demand transfers, incr. address, auto init, reads, ch. n */ +#define DMA_TX_MODE 0x18 + +#define DMA_SIZE (16*1024) /* Size of dma buffer - 16k */ + +#define CS8900 0x0000 +#define CS8920 0x4000 +#define CS8920M 0x6000 +#define REVISON_BITS 0x1F00 +#define EEVER_NUMBER 0x12 +#define CHKSUM_LEN 0x14 +#define CHKSUM_VAL 0x0000 +#define START_EEPROM_DATA 0x001c /* Offset into eeprom for start of data */ +#define IRQ_MAP_EEPROM_DATA 0x0046 /* Offset into eeprom for the IRQ map */ +#define IRQ_MAP_LEN 0x0004 /* No of bytes to read for the IRQ map */ +#define PNP_IRQ_FRMT 0x0022 /* PNP small item IRQ format */ +#define CS8900_IRQ_MAP 0x1c20 /* This IRQ map is fixed */ + +#define CS8920_NO_INTS 0x0F /* Max CS8920 interrupt select # */ + +#define PNP_ADD_PORT 0x0279 +#define PNP_WRITE_PORT 0x0A79 + +#define GET_PNP_ISA_STRUCT 0x40 +#define PNP_ISA_STRUCT_LEN 0x06 +#define PNP_CSN_CNT_OFF 0x01 +#define PNP_RD_PORT_OFF 0x02 +#define PNP_FUNCTION_OK 0x00 +#define PNP_WAKE 0x03 +#define PNP_RSRC_DATA 0x04 +#define PNP_RSRC_READY 0x01 +#define PNP_STATUS 0x05 +#define PNP_ACTIVATE 0x30 +#define PNP_CNF_IO_H 0x60 +#define PNP_CNF_IO_L 0x61 +#define PNP_CNF_INT 0x70 +#define PNP_CNF_DMA 0x74 +#define PNP_CNF_MEM 0x48 + +#define BIT0 1 +#define BIT15 0x8000 + diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c new file mode 100644 index 000000000..de9f7c97d --- /dev/null +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -0,0 +1,888 @@ +/* + * EP93xx ethernet network device driver + * Copyright (C) 2006 Lennert Buytenhek + * Dedicated to Marija Kulikova. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define DRV_MODULE_NAME "ep93xx-eth" +#define DRV_MODULE_VERSION "0.1" + +#define RX_QUEUE_ENTRIES 64 +#define TX_QUEUE_ENTRIES 8 + +#define MAX_PKT_SIZE 2044 +#define PKT_BUF_SIZE 2048 + +#define REG_RXCTL 0x0000 +#define REG_RXCTL_DEFAULT 0x00073800 +#define REG_TXCTL 0x0004 +#define REG_TXCTL_ENABLE 0x00000001 +#define REG_MIICMD 0x0010 +#define REG_MIICMD_READ 0x00008000 +#define REG_MIICMD_WRITE 0x00004000 +#define REG_MIIDATA 0x0014 +#define REG_MIISTS 0x0018 +#define REG_MIISTS_BUSY 0x00000001 +#define REG_SELFCTL 0x0020 +#define REG_SELFCTL_RESET 0x00000001 +#define REG_INTEN 0x0024 +#define REG_INTEN_TX 0x00000008 +#define REG_INTEN_RX 0x00000007 +#define REG_INTSTSP 0x0028 +#define REG_INTSTS_TX 0x00000008 +#define REG_INTSTS_RX 0x00000004 +#define REG_INTSTSC 0x002c +#define REG_AFP 0x004c +#define REG_INDAD0 0x0050 +#define REG_INDAD1 0x0051 +#define REG_INDAD2 0x0052 +#define REG_INDAD3 0x0053 +#define REG_INDAD4 0x0054 +#define REG_INDAD5 0x0055 +#define REG_GIINTMSK 0x0064 +#define REG_GIINTMSK_ENABLE 0x00008000 +#define REG_BMCTL 0x0080 +#define REG_BMCTL_ENABLE_TX 0x00000100 +#define REG_BMCTL_ENABLE_RX 0x00000001 +#define REG_BMSTS 0x0084 +#define REG_BMSTS_RX_ACTIVE 0x00000008 +#define REG_RXDQBADD 0x0090 +#define REG_RXDQBLEN 0x0094 +#define REG_RXDCURADD 0x0098 +#define REG_RXDENQ 0x009c +#define REG_RXSTSQBADD 0x00a0 +#define REG_RXSTSQBLEN 0x00a4 +#define REG_RXSTSQCURADD 0x00a8 +#define REG_RXSTSENQ 0x00ac +#define REG_TXDQBADD 0x00b0 +#define REG_TXDQBLEN 0x00b4 +#define REG_TXDQCURADD 0x00b8 +#define REG_TXDENQ 0x00bc +#define REG_TXSTSQBADD 0x00c0 +#define REG_TXSTSQBLEN 0x00c4 +#define REG_TXSTSQCURADD 0x00c8 +#define REG_MAXFRMLEN 0x00e8 + +struct ep93xx_rdesc +{ + u32 buf_addr; + u32 rdesc1; +}; + +#define RDESC1_NSOF 0x80000000 +#define RDESC1_BUFFER_INDEX 0x7fff0000 +#define RDESC1_BUFFER_LENGTH 0x0000ffff + +struct ep93xx_rstat +{ + u32 rstat0; + u32 rstat1; +}; + +#define RSTAT0_RFP 0x80000000 +#define RSTAT0_RWE 0x40000000 +#define RSTAT0_EOF 0x20000000 +#define RSTAT0_EOB 0x10000000 +#define RSTAT0_AM 0x00c00000 +#define RSTAT0_RX_ERR 0x00200000 +#define RSTAT0_OE 0x00100000 +#define RSTAT0_FE 0x00080000 +#define RSTAT0_RUNT 0x00040000 +#define RSTAT0_EDATA 0x00020000 +#define RSTAT0_CRCE 0x00010000 +#define RSTAT0_CRCI 0x00008000 +#define RSTAT0_HTI 0x00003f00 +#define RSTAT1_RFP 0x80000000 +#define RSTAT1_BUFFER_INDEX 0x7fff0000 +#define RSTAT1_FRAME_LENGTH 0x0000ffff + +struct ep93xx_tdesc +{ + u32 buf_addr; + u32 tdesc1; +}; + +#define TDESC1_EOF 0x80000000 +#define TDESC1_BUFFER_INDEX 0x7fff0000 +#define TDESC1_BUFFER_ABORT 0x00008000 +#define TDESC1_BUFFER_LENGTH 0x00000fff + +struct ep93xx_tstat +{ + u32 tstat0; +}; + +#define TSTAT0_TXFP 0x80000000 +#define TSTAT0_TXWE 0x40000000 +#define TSTAT0_FA 0x20000000 +#define TSTAT0_LCRS 0x10000000 +#define TSTAT0_OW 0x04000000 +#define TSTAT0_TXU 0x02000000 +#define TSTAT0_ECOLL 0x01000000 +#define TSTAT0_NCOLL 0x001f0000 +#define TSTAT0_BUFFER_INDEX 0x00007fff + +struct ep93xx_descs +{ + struct ep93xx_rdesc rdesc[RX_QUEUE_ENTRIES]; + struct ep93xx_tdesc tdesc[TX_QUEUE_ENTRIES]; + struct ep93xx_rstat rstat[RX_QUEUE_ENTRIES]; + struct ep93xx_tstat tstat[TX_QUEUE_ENTRIES]; +}; + +struct ep93xx_priv +{ + struct resource *res; + void __iomem *base_addr; + int irq; + + struct ep93xx_descs *descs; + dma_addr_t descs_dma_addr; + + void *rx_buf[RX_QUEUE_ENTRIES]; + void *tx_buf[TX_QUEUE_ENTRIES]; + + spinlock_t rx_lock; + unsigned int rx_pointer; + unsigned int tx_clean_pointer; + unsigned int tx_pointer; + spinlock_t tx_pending_lock; + unsigned int tx_pending; + + struct net_device *dev; + struct napi_struct napi; + + struct mii_if_info mii; + u8 mdc_divisor; +}; + +#define rdb(ep, off) __raw_readb((ep)->base_addr + (off)) +#define rdw(ep, off) __raw_readw((ep)->base_addr + (off)) +#define rdl(ep, off) __raw_readl((ep)->base_addr + (off)) +#define wrb(ep, off, val) __raw_writeb((val), (ep)->base_addr + (off)) +#define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off)) +#define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off)) + +static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg) +{ + struct ep93xx_priv *ep = netdev_priv(dev); + int data; + int i; + + wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg); + + for (i = 0; i < 10; i++) { + if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0) + break; + msleep(1); + } + + if (i == 10) { + pr_info("mdio read timed out\n"); + data = 0xffff; + } else { + data = rdl(ep, REG_MIIDATA); + } + + return data; +} + +static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data) +{ + struct ep93xx_priv *ep = netdev_priv(dev); + int i; + + wrl(ep, REG_MIIDATA, data); + wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg); + + for (i = 0; i < 10; i++) { + if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0) + break; + msleep(1); + } + + if (i == 10) + pr_info("mdio write timed out\n"); +} + +static int ep93xx_rx(struct net_device *dev, int processed, int budget) +{ + struct ep93xx_priv *ep = netdev_priv(dev); + + while (processed < budget) { + int entry; + struct ep93xx_rstat *rstat; + u32 rstat0; + u32 rstat1; + int length; + struct sk_buff *skb; + + entry = ep->rx_pointer; + rstat = ep->descs->rstat + entry; + + rstat0 = rstat->rstat0; + rstat1 = rstat->rstat1; + if (!(rstat0 & RSTAT0_RFP) || !(rstat1 & RSTAT1_RFP)) + break; + + rstat->rstat0 = 0; + rstat->rstat1 = 0; + + if (!(rstat0 & RSTAT0_EOF)) + pr_crit("not end-of-frame %.8x %.8x\n", rstat0, rstat1); + if (!(rstat0 & RSTAT0_EOB)) + pr_crit("not end-of-buffer %.8x %.8x\n", rstat0, rstat1); + if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry) + pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1); + + if (!(rstat0 & RSTAT0_RWE)) { + dev->stats.rx_errors++; + if (rstat0 & RSTAT0_OE) + dev->stats.rx_fifo_errors++; + if (rstat0 & RSTAT0_FE) + dev->stats.rx_frame_errors++; + if (rstat0 & (RSTAT0_RUNT | RSTAT0_EDATA)) + dev->stats.rx_length_errors++; + if (rstat0 & RSTAT0_CRCE) + dev->stats.rx_crc_errors++; + goto err; + } + + length = rstat1 & RSTAT1_FRAME_LENGTH; + if (length > MAX_PKT_SIZE) { + pr_notice("invalid length %.8x %.8x\n", rstat0, rstat1); + goto err; + } + + /* Strip FCS. */ + if (rstat0 & RSTAT0_CRCI) + length -= 4; + + skb = netdev_alloc_skb(dev, length + 2); + if (likely(skb != NULL)) { + struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry]; + skb_reserve(skb, 2); + dma_sync_single_for_cpu(dev->dev.parent, rxd->buf_addr, + length, DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); + dma_sync_single_for_device(dev->dev.parent, + rxd->buf_addr, length, + DMA_FROM_DEVICE); + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, dev); + + netif_receive_skb(skb); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += length; + } else { + dev->stats.rx_dropped++; + } + +err: + ep->rx_pointer = (entry + 1) & (RX_QUEUE_ENTRIES - 1); + processed++; + } + + return processed; +} + +static int ep93xx_have_more_rx(struct ep93xx_priv *ep) +{ + struct ep93xx_rstat *rstat = ep->descs->rstat + ep->rx_pointer; + return !!((rstat->rstat0 & RSTAT0_RFP) && (rstat->rstat1 & RSTAT1_RFP)); +} + +static int ep93xx_poll(struct napi_struct *napi, int budget) +{ + struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi); + struct net_device *dev = ep->dev; + int rx = 0; + +poll_some_more: + rx = ep93xx_rx(dev, rx, budget); + if (rx < budget) { + int more = 0; + + spin_lock_irq(&ep->rx_lock); + __napi_complete(napi); + wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); + if (ep93xx_have_more_rx(ep)) { + wrl(ep, REG_INTEN, REG_INTEN_TX); + wrl(ep, REG_INTSTSP, REG_INTSTS_RX); + more = 1; + } + spin_unlock_irq(&ep->rx_lock); + + if (more && napi_reschedule(napi)) + goto poll_some_more; + } + + if (rx) { + wrw(ep, REG_RXDENQ, rx); + wrw(ep, REG_RXSTSENQ, rx); + } + + return rx; +} + +static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ep93xx_priv *ep = netdev_priv(dev); + struct ep93xx_tdesc *txd; + int entry; + + if (unlikely(skb->len > MAX_PKT_SIZE)) { + dev->stats.tx_dropped++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + entry = ep->tx_pointer; + ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1); + + txd = &ep->descs->tdesc[entry]; + + txd->tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); + dma_sync_single_for_cpu(dev->dev.parent, txd->buf_addr, skb->len, + DMA_TO_DEVICE); + skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); + dma_sync_single_for_device(dev->dev.parent, txd->buf_addr, skb->len, + DMA_TO_DEVICE); + dev_kfree_skb(skb); + + spin_lock_irq(&ep->tx_pending_lock); + ep->tx_pending++; + if (ep->tx_pending == TX_QUEUE_ENTRIES) + netif_stop_queue(dev); + spin_unlock_irq(&ep->tx_pending_lock); + + wrl(ep, REG_TXDENQ, 1); + + return NETDEV_TX_OK; +} + +static void ep93xx_tx_complete(struct net_device *dev) +{ + struct ep93xx_priv *ep = netdev_priv(dev); + int wake; + + wake = 0; + + spin_lock(&ep->tx_pending_lock); + while (1) { + int entry; + struct ep93xx_tstat *tstat; + u32 tstat0; + + entry = ep->tx_clean_pointer; + tstat = ep->descs->tstat + entry; + + tstat0 = tstat->tstat0; + if (!(tstat0 & TSTAT0_TXFP)) + break; + + tstat->tstat0 = 0; + + if (tstat0 & TSTAT0_FA) + pr_crit("frame aborted %.8x\n", tstat0); + if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry) + pr_crit("entry mismatch %.8x\n", tstat0); + + if (tstat0 & TSTAT0_TXWE) { + int length = ep->descs->tdesc[entry].tdesc1 & 0xfff; + + dev->stats.tx_packets++; + dev->stats.tx_bytes += length; + } else { + dev->stats.tx_errors++; + } + + if (tstat0 & TSTAT0_OW) + dev->stats.tx_window_errors++; + if (tstat0 & TSTAT0_TXU) + dev->stats.tx_fifo_errors++; + dev->stats.collisions += (tstat0 >> 16) & 0x1f; + + ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1); + if (ep->tx_pending == TX_QUEUE_ENTRIES) + wake = 1; + ep->tx_pending--; + } + spin_unlock(&ep->tx_pending_lock); + + if (wake) + netif_wake_queue(dev); +} + +static irqreturn_t ep93xx_irq(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct ep93xx_priv *ep = netdev_priv(dev); + u32 status; + + status = rdl(ep, REG_INTSTSC); + if (status == 0) + return IRQ_NONE; + + if (status & REG_INTSTS_RX) { + spin_lock(&ep->rx_lock); + if (likely(napi_schedule_prep(&ep->napi))) { + wrl(ep, REG_INTEN, REG_INTEN_TX); + __napi_schedule(&ep->napi); + } + spin_unlock(&ep->rx_lock); + } + + if (status & REG_INTSTS_TX) + ep93xx_tx_complete(dev); + + return IRQ_HANDLED; +} + +static void ep93xx_free_buffers(struct ep93xx_priv *ep) +{ + struct device *dev = ep->dev->dev.parent; + int i; + + for (i = 0; i < RX_QUEUE_ENTRIES; i++) { + dma_addr_t d; + + d = ep->descs->rdesc[i].buf_addr; + if (d) + dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE); + + kfree(ep->rx_buf[i]); + } + + for (i = 0; i < TX_QUEUE_ENTRIES; i++) { + dma_addr_t d; + + d = ep->descs->tdesc[i].buf_addr; + if (d) + dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE); + + kfree(ep->tx_buf[i]); + } + + dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs, + ep->descs_dma_addr); +} + +static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) +{ + struct device *dev = ep->dev->dev.parent; + int i; + + ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs), + &ep->descs_dma_addr, GFP_KERNEL); + if (ep->descs == NULL) + return 1; + + for (i = 0; i < RX_QUEUE_ENTRIES; i++) { + void *buf; + dma_addr_t d; + + buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL); + if (buf == NULL) + goto err; + + d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, d)) { + kfree(buf); + goto err; + } + + ep->rx_buf[i] = buf; + ep->descs->rdesc[i].buf_addr = d; + ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE; + } + + for (i = 0; i < TX_QUEUE_ENTRIES; i++) { + void *buf; + dma_addr_t d; + + buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL); + if (buf == NULL) + goto err; + + d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(dev, d)) { + kfree(buf); + goto err; + } + + ep->tx_buf[i] = buf; + ep->descs->tdesc[i].buf_addr = d; + } + + return 0; + +err: + ep93xx_free_buffers(ep); + return 1; +} + +static int ep93xx_start_hw(struct net_device *dev) +{ + struct ep93xx_priv *ep = netdev_priv(dev); + unsigned long addr; + int i; + + wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET); + for (i = 0; i < 10; i++) { + if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0) + break; + msleep(1); + } + + if (i == 10) { + pr_crit("hw failed to reset\n"); + return 1; + } + + wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9)); + + /* Does the PHY support preamble suppress? */ + if ((ep93xx_mdio_read(dev, ep->mii.phy_id, MII_BMSR) & 0x0040) != 0) + wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9) | (1 << 8)); + + /* Receive descriptor ring. */ + addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rdesc); + wrl(ep, REG_RXDQBADD, addr); + wrl(ep, REG_RXDCURADD, addr); + wrw(ep, REG_RXDQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rdesc)); + + /* Receive status ring. */ + addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rstat); + wrl(ep, REG_RXSTSQBADD, addr); + wrl(ep, REG_RXSTSQCURADD, addr); + wrw(ep, REG_RXSTSQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rstat)); + + /* Transmit descriptor ring. */ + addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tdesc); + wrl(ep, REG_TXDQBADD, addr); + wrl(ep, REG_TXDQCURADD, addr); + wrw(ep, REG_TXDQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tdesc)); + + /* Transmit status ring. */ + addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tstat); + wrl(ep, REG_TXSTSQBADD, addr); + wrl(ep, REG_TXSTSQCURADD, addr); + wrw(ep, REG_TXSTSQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tstat)); + + wrl(ep, REG_BMCTL, REG_BMCTL_ENABLE_TX | REG_BMCTL_ENABLE_RX); + wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); + wrl(ep, REG_GIINTMSK, 0); + + for (i = 0; i < 10; i++) { + if ((rdl(ep, REG_BMSTS) & REG_BMSTS_RX_ACTIVE) != 0) + break; + msleep(1); + } + + if (i == 10) { + pr_crit("hw failed to start\n"); + return 1; + } + + wrl(ep, REG_RXDENQ, RX_QUEUE_ENTRIES); + wrl(ep, REG_RXSTSENQ, RX_QUEUE_ENTRIES); + + wrb(ep, REG_INDAD0, dev->dev_addr[0]); + wrb(ep, REG_INDAD1, dev->dev_addr[1]); + wrb(ep, REG_INDAD2, dev->dev_addr[2]); + wrb(ep, REG_INDAD3, dev->dev_addr[3]); + wrb(ep, REG_INDAD4, dev->dev_addr[4]); + wrb(ep, REG_INDAD5, dev->dev_addr[5]); + wrl(ep, REG_AFP, 0); + + wrl(ep, REG_MAXFRMLEN, (MAX_PKT_SIZE << 16) | MAX_PKT_SIZE); + + wrl(ep, REG_RXCTL, REG_RXCTL_DEFAULT); + wrl(ep, REG_TXCTL, REG_TXCTL_ENABLE); + + return 0; +} + +static void ep93xx_stop_hw(struct net_device *dev) +{ + struct ep93xx_priv *ep = netdev_priv(dev); + int i; + + wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET); + for (i = 0; i < 10; i++) { + if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0) + break; + msleep(1); + } + + if (i == 10) + pr_crit("hw failed to reset\n"); +} + +static int ep93xx_open(struct net_device *dev) +{ + struct ep93xx_priv *ep = netdev_priv(dev); + int err; + + if (ep93xx_alloc_buffers(ep)) + return -ENOMEM; + + napi_enable(&ep->napi); + + if (ep93xx_start_hw(dev)) { + napi_disable(&ep->napi); + ep93xx_free_buffers(ep); + return -EIO; + } + + spin_lock_init(&ep->rx_lock); + ep->rx_pointer = 0; + ep->tx_clean_pointer = 0; + ep->tx_pointer = 0; + spin_lock_init(&ep->tx_pending_lock); + ep->tx_pending = 0; + + err = request_irq(ep->irq, ep93xx_irq, IRQF_SHARED, dev->name, dev); + if (err) { + napi_disable(&ep->napi); + ep93xx_stop_hw(dev); + ep93xx_free_buffers(ep); + return err; + } + + wrl(ep, REG_GIINTMSK, REG_GIINTMSK_ENABLE); + + netif_start_queue(dev); + + return 0; +} + +static int ep93xx_close(struct net_device *dev) +{ + struct ep93xx_priv *ep = netdev_priv(dev); + + napi_disable(&ep->napi); + netif_stop_queue(dev); + + wrl(ep, REG_GIINTMSK, 0); + free_irq(ep->irq, dev); + ep93xx_stop_hw(dev); + ep93xx_free_buffers(ep); + + return 0; +} + +static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct ep93xx_priv *ep = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(ifr); + + return generic_mii_ioctl(&ep->mii, data, cmd, NULL); +} + +static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); +} + +static int ep93xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ep93xx_priv *ep = netdev_priv(dev); + return mii_ethtool_gset(&ep->mii, cmd); +} + +static int ep93xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ep93xx_priv *ep = netdev_priv(dev); + return mii_ethtool_sset(&ep->mii, cmd); +} + +static int ep93xx_nway_reset(struct net_device *dev) +{ + struct ep93xx_priv *ep = netdev_priv(dev); + return mii_nway_restart(&ep->mii); +} + +static u32 ep93xx_get_link(struct net_device *dev) +{ + struct ep93xx_priv *ep = netdev_priv(dev); + return mii_link_ok(&ep->mii); +} + +static const struct ethtool_ops ep93xx_ethtool_ops = { + .get_drvinfo = ep93xx_get_drvinfo, + .get_settings = ep93xx_get_settings, + .set_settings = ep93xx_set_settings, + .nway_reset = ep93xx_nway_reset, + .get_link = ep93xx_get_link, +}; + +static const struct net_device_ops ep93xx_netdev_ops = { + .ndo_open = ep93xx_open, + .ndo_stop = ep93xx_close, + .ndo_start_xmit = ep93xx_xmit, + .ndo_do_ioctl = ep93xx_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + +static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) +{ + struct net_device *dev; + + dev = alloc_etherdev(sizeof(struct ep93xx_priv)); + if (dev == NULL) + return NULL; + + memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN); + + dev->ethtool_ops = &ep93xx_ethtool_ops; + dev->netdev_ops = &ep93xx_netdev_ops; + + dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; + + return dev; +} + + +static int ep93xx_eth_remove(struct platform_device *pdev) +{ + struct net_device *dev; + struct ep93xx_priv *ep; + + dev = platform_get_drvdata(pdev); + if (dev == NULL) + return 0; + + ep = netdev_priv(dev); + + /* @@@ Force down. */ + unregister_netdev(dev); + ep93xx_free_buffers(ep); + + if (ep->base_addr != NULL) + iounmap(ep->base_addr); + + if (ep->res != NULL) { + release_resource(ep->res); + kfree(ep->res); + } + + free_netdev(dev); + + return 0; +} + +static int ep93xx_eth_probe(struct platform_device *pdev) +{ + struct ep93xx_eth_data *data; + struct net_device *dev; + struct ep93xx_priv *ep; + struct resource *mem; + int irq; + int err; + + if (pdev == NULL) + return -ENODEV; + data = dev_get_platdata(&pdev->dev); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq = platform_get_irq(pdev, 0); + if (!mem || irq < 0) + return -ENXIO; + + dev = ep93xx_dev_alloc(data); + if (dev == NULL) { + err = -ENOMEM; + goto err_out; + } + ep = netdev_priv(dev); + ep->dev = dev; + SET_NETDEV_DEV(dev, &pdev->dev); + netif_napi_add(dev, &ep->napi, ep93xx_poll, 64); + + platform_set_drvdata(pdev, dev); + + ep->res = request_mem_region(mem->start, resource_size(mem), + dev_name(&pdev->dev)); + if (ep->res == NULL) { + dev_err(&pdev->dev, "Could not reserve memory region\n"); + err = -ENOMEM; + goto err_out; + } + + ep->base_addr = ioremap(mem->start, resource_size(mem)); + if (ep->base_addr == NULL) { + dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); + err = -EIO; + goto err_out; + } + ep->irq = irq; + + ep->mii.phy_id = data->phy_id; + ep->mii.phy_id_mask = 0x1f; + ep->mii.reg_num_mask = 0x1f; + ep->mii.dev = dev; + ep->mii.mdio_read = ep93xx_mdio_read; + ep->mii.mdio_write = ep93xx_mdio_write; + ep->mdc_divisor = 40; /* Max HCLK 100 MHz, min MDIO clk 2.5 MHz. */ + + if (is_zero_ether_addr(dev->dev_addr)) + eth_hw_addr_random(dev); + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "Failed to register netdev\n"); + goto err_out; + } + + printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, %pM\n", + dev->name, ep->irq, dev->dev_addr); + + return 0; + +err_out: + ep93xx_eth_remove(pdev); + return err; +} + + +static struct platform_driver ep93xx_eth_driver = { + .probe = ep93xx_eth_probe, + .remove = ep93xx_eth_remove, + .driver = { + .name = "ep93xx-eth", + }, +}; + +module_platform_driver(ep93xx_eth_driver); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ep93xx-eth"); diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c new file mode 100644 index 000000000..07719676c --- /dev/null +++ b/drivers/net/ethernet/cirrus/mac89x0.c @@ -0,0 +1,633 @@ +/* mac89x0.c: A Crystal Semiconductor CS89[02]0 driver for linux. */ +/* + Written 1996 by Russell Nelson, with reference to skeleton.c + written 1993-1994 by Donald Becker. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + The author may be reached at nelson@crynwr.com, Crynwr + Software, 11 Grant St., Potsdam, NY 13676 + + Changelog: + + Mike Cruse : mcruse@cti-ltd.com + : Changes for Linux 2.0 compatibility. + : Added dev_id parameter in net_interrupt(), + : request_irq() and free_irq(). Just NULL for now. + + Mike Cruse : Added MOD_INC_USE_COUNT and MOD_DEC_USE_COUNT macros + : in net_open() and net_close() so kerneld would know + : that the module is in use and wouldn't eject the + : driver prematurely. + + Mike Cruse : Rewrote init_module() and cleanup_module using 8390.c + : as an example. Disabled autoprobing in init_module(), + : not a good thing to do to other devices while Linux + : is running from all accounts. + + Alan Cox : Removed 1.2 support, added 2.1 extra counters. + + David Huggins-Daines + + Split this off into mac89x0.c, and gutted it of all parts which are + not relevant to the existing CS8900 cards on the Macintosh + (i.e. basically the Daynaport CS and LC cards). To be precise: + + * Removed all the media-detection stuff, because these cards are + TP-only. + + * Lobotomized the ISA interrupt bogosity, because these cards use + a hardwired NuBus interrupt and a magic ISAIRQ value in the card. + + * Basically eliminated everything not relevant to getting the + cards minimally functioning on the Macintosh. + + I might add that these cards are badly designed even from the Mac + standpoint, in that Dayna, in their infinite wisdom, used NuBus slot + I/O space and NuBus interrupts for these cards, but neglected to + provide anything even remotely resembling a NuBus ROM. Therefore we + have to probe for them in a brain-damaged ISA-like fashion. + + Arnaldo Carvalho de Melo - 11/01/2001 + check kmalloc and release the allocated memory on failure in + mac89x0_probe and in init_module + use local_irq_{save,restore}(flags) in net_get_stat, not just + local_irq_{dis,en}able() +*/ + +static char *version = +"cs89x0.c:v1.02 11/26/96 Russell Nelson \n"; + +/* ======================= configure the driver here ======================= */ + +/* use 0 for production, 1 for verification, >2 for debug */ +#ifndef NET_DEBUG +#define NET_DEBUG 0 +#endif + +/* ======================= end of configuration ======================= */ + + +/* Always include 'config.h' first in case the user wants to turn on + or override something. */ +#include + +/* + Sources: + + Crynwr packet driver epktisa. + + Crystal Semiconductor data sheets. + +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "cs89x0.h" + +static unsigned int net_debug = NET_DEBUG; + +/* Information that need to be kept for each board. */ +struct net_local { + int chip_type; /* one of: CS8900, CS8920, CS8920M */ + char chip_revision; /* revision letter of the chip ('A'...) */ + int send_cmd; /* the propercommand used to send a packet. */ + int rx_mode; + int curr_rx_cfg; + int send_underrun; /* keep track of how many underruns in a row we get */ + struct sk_buff *skb; +}; + +/* Index to functions, as function prototypes. */ + +#if 0 +extern void reset_chip(struct net_device *dev); +#endif +static int net_open(struct net_device *dev); +static int net_send_packet(struct sk_buff *skb, struct net_device *dev); +static irqreturn_t net_interrupt(int irq, void *dev_id); +static void set_multicast_list(struct net_device *dev); +static void net_rx(struct net_device *dev); +static int net_close(struct net_device *dev); +static struct net_device_stats *net_get_stats(struct net_device *dev); +static int set_mac_address(struct net_device *dev, void *addr); + + +/* Example routines you must write ;->. */ +#define tx_done(dev) 1 + +/* For reading/writing registers ISA-style */ +static inline int +readreg_io(struct net_device *dev, int portno) +{ + nubus_writew(swab16(portno), dev->base_addr + ADD_PORT); + return swab16(nubus_readw(dev->base_addr + DATA_PORT)); +} + +static inline void +writereg_io(struct net_device *dev, int portno, int value) +{ + nubus_writew(swab16(portno), dev->base_addr + ADD_PORT); + nubus_writew(swab16(value), dev->base_addr + DATA_PORT); +} + +/* These are for reading/writing registers in shared memory */ +static inline int +readreg(struct net_device *dev, int portno) +{ + return swab16(nubus_readw(dev->mem_start + portno)); +} + +static inline void +writereg(struct net_device *dev, int portno, int value) +{ + nubus_writew(swab16(value), dev->mem_start + portno); +} + +static const struct net_device_ops mac89x0_netdev_ops = { + .ndo_open = net_open, + .ndo_stop = net_close, + .ndo_start_xmit = net_send_packet, + .ndo_get_stats = net_get_stats, + .ndo_set_rx_mode = set_multicast_list, + .ndo_set_mac_address = set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +/* Probe for the CS8900 card in slot E. We won't bother looking + anywhere else until we have a really good reason to do so. */ +struct net_device * __init mac89x0_probe(int unit) +{ + struct net_device *dev; + static int once_is_enough; + struct net_local *lp; + static unsigned version_printed; + int i, slot; + unsigned rev_type = 0; + unsigned long ioaddr; + unsigned short sig; + int err = -ENODEV; + + if (!MACH_IS_MAC) + return ERR_PTR(-ENODEV); + + dev = alloc_etherdev(sizeof(struct net_local)); + if (!dev) + return ERR_PTR(-ENOMEM); + + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + } + + if (once_is_enough) + goto out; + once_is_enough = 1; + + /* We might have to parameterize this later */ + slot = 0xE; + /* Get out now if there's a real NuBus card in slot E */ + if (nubus_find_slot(slot, NULL) != NULL) + goto out; + + /* The pseudo-ISA bits always live at offset 0x300 (gee, + wonder why...) */ + ioaddr = (unsigned long) + nubus_slot_addr(slot) | (((slot&0xf) << 20) + DEFAULTIOBASE); + { + int card_present; + + card_present = (hwreg_present((void *)ioaddr + 4) && + hwreg_present((void *)ioaddr + DATA_PORT)); + if (!card_present) + goto out; + } + + nubus_writew(0, ioaddr + ADD_PORT); + sig = nubus_readw(ioaddr + DATA_PORT); + if (sig != swab16(CHIP_EISA_ID_SIG)) + goto out; + + /* Initialize the net_device structure. */ + lp = netdev_priv(dev); + + /* Fill in the 'dev' fields. */ + dev->base_addr = ioaddr; + dev->mem_start = (unsigned long) + nubus_slot_addr(slot) | (((slot&0xf) << 20) + MMIOBASE); + dev->mem_end = dev->mem_start + 0x1000; + + /* Turn on shared memory */ + writereg_io(dev, PP_BusCTL, MEMORY_ON); + + /* get the chip type */ + rev_type = readreg(dev, PRODUCT_ID_ADD); + lp->chip_type = rev_type &~ REVISON_BITS; + lp->chip_revision = ((rev_type & REVISON_BITS) >> 8) + 'A'; + + /* Check the chip type and revision in order to set the correct send command + CS8920 revision C and CS8900 revision F can use the faster send. */ + lp->send_cmd = TX_AFTER_381; + if (lp->chip_type == CS8900 && lp->chip_revision >= 'F') + lp->send_cmd = TX_NOW; + if (lp->chip_type != CS8900 && lp->chip_revision >= 'C') + lp->send_cmd = TX_NOW; + + if (net_debug && version_printed++ == 0) + printk(version); + + printk(KERN_INFO "%s: cs89%c0%s rev %c found at %#8lx", + dev->name, + lp->chip_type==CS8900?'0':'2', + lp->chip_type==CS8920M?"M":"", + lp->chip_revision, + dev->base_addr); + + /* Try to read the MAC address */ + if ((readreg(dev, PP_SelfST) & (EEPROM_PRESENT | EEPROM_OK)) == 0) { + printk("\nmac89x0: No EEPROM, giving up now.\n"); + goto out1; + } else { + for (i = 0; i < ETH_ALEN; i += 2) { + /* Big-endian (why??!) */ + unsigned short s = readreg(dev, PP_IA + i); + dev->dev_addr[i] = s >> 8; + dev->dev_addr[i+1] = s & 0xff; + } + } + + dev->irq = SLOT2IRQ(slot); + + /* print the IRQ and ethernet address. */ + + printk(" IRQ %d ADDR %pM\n", dev->irq, dev->dev_addr); + + dev->netdev_ops = &mac89x0_netdev_ops; + + err = register_netdev(dev); + if (err) + goto out1; + return NULL; +out1: + nubus_writew(0, dev->base_addr + ADD_PORT); +out: + free_netdev(dev); + return ERR_PTR(err); +} + +#if 0 +/* This is useful for something, but I don't know what yet. */ +void __init reset_chip(struct net_device *dev) +{ + int reset_start_time; + + writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET); + + /* wait 30 ms */ + msleep_interruptible(30); + + /* Wait until the chip is reset */ + reset_start_time = jiffies; + while( (readreg(dev, PP_SelfST) & INIT_DONE) == 0 && jiffies - reset_start_time < 2) + ; +} +#endif + +/* Open/initialize the board. This is called (in the current kernel) + sometime after booting when the 'ifconfig' program is run. + + This routine should set everything up anew at each open, even + registers that "should" only need to be set once at boot, so that + there is non-reboot way to recover if something goes wrong. + */ +static int +net_open(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + int i; + + /* Disable the interrupt for now */ + writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL) & ~ENABLE_IRQ); + + /* Grab the interrupt */ + if (request_irq(dev->irq, net_interrupt, 0, "cs89x0", dev)) + return -EAGAIN; + + /* Set up the IRQ - Apparently magic */ + if (lp->chip_type == CS8900) + writereg(dev, PP_CS8900_ISAINT, 0); + else + writereg(dev, PP_CS8920_ISAINT, 0); + + /* set the Ethernet address */ + for (i=0; i < ETH_ALEN/2; i++) + writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8)); + + /* Turn on both receive and transmit operations */ + writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_RX_ON | SERIAL_TX_ON); + + /* Receive only error free packets addressed to this card */ + lp->rx_mode = 0; + writereg(dev, PP_RxCTL, DEF_RX_ACCEPT); + + lp->curr_rx_cfg = RX_OK_ENBL | RX_CRC_ERROR_ENBL; + + writereg(dev, PP_RxCFG, lp->curr_rx_cfg); + + writereg(dev, PP_TxCFG, TX_LOST_CRS_ENBL | TX_SQE_ERROR_ENBL | TX_OK_ENBL | + TX_LATE_COL_ENBL | TX_JBR_ENBL | TX_ANY_COL_ENBL | TX_16_COL_ENBL); + + writereg(dev, PP_BufCFG, READY_FOR_TX_ENBL | RX_MISS_COUNT_OVRFLOW_ENBL | + TX_COL_COUNT_OVRFLOW_ENBL | TX_UNDERRUN_ENBL); + + /* now that we've got our act together, enable everything */ + writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL) | ENABLE_IRQ); + netif_start_queue(dev); + return 0; +} + +static int +net_send_packet(struct sk_buff *skb, struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + unsigned long flags; + + if (net_debug > 3) + printk("%s: sent %d byte packet of type %x\n", + dev->name, skb->len, + (skb->data[ETH_ALEN+ETH_ALEN] << 8) + | skb->data[ETH_ALEN+ETH_ALEN+1]); + + /* keep the upload from being interrupted, since we + ask the chip to start transmitting before the + whole packet has been completely uploaded. */ + local_irq_save(flags); + netif_stop_queue(dev); + + /* initiate a transmit sequence */ + writereg(dev, PP_TxCMD, lp->send_cmd); + writereg(dev, PP_TxLength, skb->len); + + /* Test to see if the chip has allocated memory for the packet */ + if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) { + /* Gasp! It hasn't. But that shouldn't happen since + we're waiting for TxOk, so return 1 and requeue this packet. */ + local_irq_restore(flags); + return NETDEV_TX_BUSY; + } + + /* Write the contents of the packet */ + skb_copy_from_linear_data(skb, (void *)(dev->mem_start + PP_TxFrame), + skb->len+1); + + local_irq_restore(flags); + dev_kfree_skb (skb); + + return NETDEV_TX_OK; +} + +/* The typical workload of the driver: + Handle the network interface interrupts. */ +static irqreturn_t net_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct net_local *lp; + int ioaddr, status; + + if (dev == NULL) { + printk ("net_interrupt(): irq %d for unknown device.\n", irq); + return IRQ_NONE; + } + + ioaddr = dev->base_addr; + lp = netdev_priv(dev); + + /* we MUST read all the events out of the ISQ, otherwise we'll never + get interrupted again. As a consequence, we can't have any limit + on the number of times we loop in the interrupt handler. The + hardware guarantees that eventually we'll run out of events. Of + course, if you're on a slow machine, and packets are arriving + faster than you can read them off, you're screwed. Hasta la + vista, baby! */ + while ((status = swab16(nubus_readw(dev->base_addr + ISQ_PORT)))) { + if (net_debug > 4)printk("%s: event=%04x\n", dev->name, status); + switch(status & ISQ_EVENT_MASK) { + case ISQ_RECEIVER_EVENT: + /* Got a packet(s). */ + net_rx(dev); + break; + case ISQ_TRANSMITTER_EVENT: + dev->stats.tx_packets++; + netif_wake_queue(dev); + if ((status & TX_OK) == 0) + dev->stats.tx_errors++; + if (status & TX_LOST_CRS) + dev->stats.tx_carrier_errors++; + if (status & TX_SQE_ERROR) + dev->stats.tx_heartbeat_errors++; + if (status & TX_LATE_COL) + dev->stats.tx_window_errors++; + if (status & TX_16_COL) + dev->stats.tx_aborted_errors++; + break; + case ISQ_BUFFER_EVENT: + if (status & READY_FOR_TX) { + /* we tried to transmit a packet earlier, + but inexplicably ran out of buffers. + That shouldn't happen since we only ever + load one packet. Shrug. Do the right + thing anyway. */ + netif_wake_queue(dev); + } + if (status & TX_UNDERRUN) { + if (net_debug > 0) printk("%s: transmit underrun\n", dev->name); + lp->send_underrun++; + if (lp->send_underrun == 3) lp->send_cmd = TX_AFTER_381; + else if (lp->send_underrun == 6) lp->send_cmd = TX_AFTER_ALL; + } + break; + case ISQ_RX_MISS_EVENT: + dev->stats.rx_missed_errors += (status >> 6); + break; + case ISQ_TX_COL_EVENT: + dev->stats.collisions += (status >> 6); + break; + } + } + return IRQ_HANDLED; +} + +/* We have a good packet(s), get it/them out of the buffers. */ +static void +net_rx(struct net_device *dev) +{ + struct sk_buff *skb; + int status, length; + + status = readreg(dev, PP_RxStatus); + if ((status & RX_OK) == 0) { + dev->stats.rx_errors++; + if (status & RX_RUNT) + dev->stats.rx_length_errors++; + if (status & RX_EXTRA_DATA) + dev->stats.rx_length_errors++; + if ((status & RX_CRC_ERROR) && + !(status & (RX_EXTRA_DATA|RX_RUNT))) + /* per str 172 */ + dev->stats.rx_crc_errors++; + if (status & RX_DRIBBLE) + dev->stats.rx_frame_errors++; + return; + } + + length = readreg(dev, PP_RxLength); + /* Malloc up new buffer. */ + skb = alloc_skb(length, GFP_ATOMIC); + if (skb == NULL) { + printk("%s: Memory squeeze, dropping packet.\n", dev->name); + dev->stats.rx_dropped++; + return; + } + skb_put(skb, length); + + skb_copy_to_linear_data(skb, (void *)(dev->mem_start + PP_RxFrame), + length); + + if (net_debug > 3)printk("%s: received %d byte packet of type %x\n", + dev->name, length, + (skb->data[ETH_ALEN+ETH_ALEN] << 8) + | skb->data[ETH_ALEN+ETH_ALEN+1]); + + skb->protocol=eth_type_trans(skb,dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += length; +} + +/* The inverse routine to net_open(). */ +static int +net_close(struct net_device *dev) +{ + + writereg(dev, PP_RxCFG, 0); + writereg(dev, PP_TxCFG, 0); + writereg(dev, PP_BufCFG, 0); + writereg(dev, PP_BusCTL, 0); + + netif_stop_queue(dev); + + free_irq(dev->irq, dev); + + /* Update the statistics here. */ + + return 0; + +} + +/* Get the current statistics. This may be called with the card open or + closed. */ +static struct net_device_stats * +net_get_stats(struct net_device *dev) +{ + unsigned long flags; + + local_irq_save(flags); + /* Update the statistics from the device registers. */ + dev->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6); + dev->stats.collisions += (readreg(dev, PP_TxCol) >> 6); + local_irq_restore(flags); + + return &dev->stats; +} + +static void set_multicast_list(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + + if(dev->flags&IFF_PROMISC) + { + lp->rx_mode = RX_ALL_ACCEPT; + } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) { + /* The multicast-accept list is initialized to accept-all, and we + rely on higher-level filtering for now. */ + lp->rx_mode = RX_MULTCAST_ACCEPT; + } + else + lp->rx_mode = 0; + + writereg(dev, PP_RxCTL, DEF_RX_ACCEPT | lp->rx_mode); + + /* in promiscuous mode, we accept errored packets, so we have to enable interrupts on them also */ + writereg(dev, PP_RxCFG, lp->curr_rx_cfg | + (lp->rx_mode == RX_ALL_ACCEPT? (RX_CRC_ERROR_ENBL|RX_RUNT_ENBL|RX_EXTRA_DATA_ENBL) : 0)); +} + + +static int set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *saddr = addr; + int i; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); + printk("%s: Setting MAC address to %pM\n", dev->name, dev->dev_addr); + + /* set the Ethernet address */ + for (i=0; i < ETH_ALEN/2; i++) + writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8)); + + return 0; +} + +#ifdef MODULE + +static struct net_device *dev_cs89x0; +static int debug; + +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "CS89[02]0 debug level (0-5)"); +MODULE_LICENSE("GPL"); + +int __init +init_module(void) +{ + net_debug = debug; + dev_cs89x0 = mac89x0_probe(-1); + if (IS_ERR(dev_cs89x0)) { + printk(KERN_WARNING "mac89x0.c: No card found\n"); + return PTR_ERR(dev_cs89x0); + } + return 0; +} + +void +cleanup_module(void) +{ + unregister_netdev(dev_cs89x0); + nubus_writew(0, dev_cs89x0->base_addr + ADD_PORT); + free_netdev(dev_cs89x0); +} +#endif /* MODULE */ diff --git a/drivers/net/ethernet/cisco/Kconfig b/drivers/net/ethernet/cisco/Kconfig new file mode 100644 index 000000000..1c7b884e3 --- /dev/null +++ b/drivers/net/ethernet/cisco/Kconfig @@ -0,0 +1,23 @@ +# +# Cisco device configuration +# + +config NET_VENDOR_CISCO + bool "Cisco devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Cisco cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_CISCO + +source "drivers/net/ethernet/cisco/enic/Kconfig" + +endif # NET_VENDOR_CISCO diff --git a/drivers/net/ethernet/cisco/Makefile b/drivers/net/ethernet/cisco/Makefile new file mode 100644 index 000000000..6c7437bc4 --- /dev/null +++ b/drivers/net/ethernet/cisco/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Cisco device drivers. +# + +obj-$(CONFIG_ENIC) += enic/ diff --git a/drivers/net/ethernet/cisco/enic/Kconfig b/drivers/net/ethernet/cisco/enic/Kconfig new file mode 100644 index 000000000..b63f8d8a4 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/Kconfig @@ -0,0 +1,9 @@ +# +# Cisco device configuration +# + +config ENIC + tristate "Cisco VIC Ethernet NIC Support" + depends on PCI + ---help--- + This enables the support for the Cisco VIC Ethernet card. diff --git a/drivers/net/ethernet/cisco/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile new file mode 100644 index 000000000..aadcaf787 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_ENIC) := enic.o + +enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \ + enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \ + enic_ethtool.o enic_api.o enic_clsf.o + diff --git a/drivers/net/ethernet/cisco/enic/cq_desc.h b/drivers/net/ethernet/cisco/enic/cq_desc.h new file mode 100644 index 000000000..d6dd1b4ed --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/cq_desc.h @@ -0,0 +1,80 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _CQ_DESC_H_ +#define _CQ_DESC_H_ + +/* + * Completion queue descriptor types + */ +enum cq_desc_types { + CQ_DESC_TYPE_WQ_ENET = 0, + CQ_DESC_TYPE_DESC_COPY = 1, + CQ_DESC_TYPE_WQ_EXCH = 2, + CQ_DESC_TYPE_RQ_ENET = 3, + CQ_DESC_TYPE_RQ_FCP = 4, +}; + +/* Completion queue descriptor: 16B + * + * All completion queues have this basic layout. The + * type_specfic area is unique for each completion + * queue type. + */ +struct cq_desc { + __le16 completed_index; + __le16 q_number; + u8 type_specfic[11]; + u8 type_color; +}; + +#define CQ_DESC_TYPE_BITS 4 +#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1) +#define CQ_DESC_COLOR_MASK 1 +#define CQ_DESC_COLOR_SHIFT 7 +#define CQ_DESC_Q_NUM_BITS 10 +#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1) +#define CQ_DESC_COMP_NDX_BITS 12 +#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1) + +static inline void cq_desc_dec(const struct cq_desc *desc_arg, + u8 *type, u8 *color, u16 *q_number, u16 *completed_index) +{ + const struct cq_desc *desc = desc_arg; + const u8 type_color = desc->type_color; + + *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; + + /* + * Make sure color bit is read from desc *before* other fields + * are read from desc. Hardware guarantees color bit is last + * bit (byte) written. Adding the rmb() prevents the compiler + * and/or CPU from reordering the reads which would potentially + * result in reading stale values. + */ + + rmb(); + + *type = type_color & CQ_DESC_TYPE_MASK; + *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK; + *completed_index = le16_to_cpu(desc->completed_index) & + CQ_DESC_COMP_NDX_MASK; +} + +#endif /* _CQ_DESC_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h new file mode 100644 index 000000000..ac37cacc6 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h @@ -0,0 +1,185 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _CQ_ENET_DESC_H_ +#define _CQ_ENET_DESC_H_ + +#include "cq_desc.h" + +/* Ethernet completion queue descriptor: 16B */ +struct cq_enet_wq_desc { + __le16 completed_index; + __le16 q_number; + u8 reserved[11]; + u8 type_color; +}; + +static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc, + u8 *type, u8 *color, u16 *q_number, u16 *completed_index) +{ + cq_desc_dec((struct cq_desc *)desc, type, + color, q_number, completed_index); +} + +/* Completion queue descriptor: Ethernet receive queue, 16B */ +struct cq_enet_rq_desc { + __le16 completed_index_flags; + __le16 q_number_rss_type_flags; + __le32 rss_hash; + __le16 bytes_written_flags; + __le16 vlan; + __le16 checksum_fcoe; + u8 flags; + u8 type_color; +}; + +#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12) +#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13) +#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14) +#define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15) + +#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4 +#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \ + ((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1) +#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0 +#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1 +#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2 +#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3 +#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4 +#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5 +#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6 + +#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14) + +#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14 +#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \ + ((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1) +#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14) +#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15) + +#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS 12 +#define CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK \ + ((1 << CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_BITS) - 1) +#define CQ_ENET_RQ_DESC_VLAN_TCI_CFI_MASK (0x1 << 12) +#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS 3 +#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_MASK \ + ((1 << CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_BITS) - 1) +#define CQ_ENET_RQ_DESC_VLAN_TCI_USER_PRIO_SHIFT 13 + +#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 8 +#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \ + ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1) +#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8 +#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \ + ((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1) +#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8 + +#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0) +#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0) +#define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1) +#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1) +#define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2) +#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3) +#define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4) +#define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5) +#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6) +#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7) + +static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, + u8 *type, u8 *color, u16 *q_number, u16 *completed_index, + u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type, + u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error, + u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof, + u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof, + u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok, + u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok) +{ + u16 completed_index_flags; + u16 q_number_rss_type_flags; + u16 bytes_written_flags; + + cq_desc_dec((struct cq_desc *)desc, type, + color, q_number, completed_index); + + completed_index_flags = le16_to_cpu(desc->completed_index_flags); + q_number_rss_type_flags = + le16_to_cpu(desc->q_number_rss_type_flags); + bytes_written_flags = le16_to_cpu(desc->bytes_written_flags); + + *ingress_port = (completed_index_flags & + CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0; + *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ? + 1 : 0; + *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ? + 1 : 0; + *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ? + 1 : 0; + + *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) & + CQ_ENET_RQ_DESC_RSS_TYPE_MASK); + *csum_not_calc = (q_number_rss_type_flags & + CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0; + + *rss_hash = le32_to_cpu(desc->rss_hash); + + *bytes_written = bytes_written_flags & + CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; + *packet_error = (bytes_written_flags & + CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0; + *vlan_stripped = (bytes_written_flags & + CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0; + + /* + * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12) + */ + *vlan_tci = le16_to_cpu(desc->vlan); + + if (*fcoe) { + *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) & + CQ_ENET_RQ_DESC_FCOE_SOF_MASK); + *fcoe_fc_crc_ok = (desc->flags & + CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0; + *fcoe_enc_error = (desc->flags & + CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0; + *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >> + CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) & + CQ_ENET_RQ_DESC_FCOE_EOF_MASK); + *checksum = 0; + } else { + *fcoe_sof = 0; + *fcoe_fc_crc_ok = 0; + *fcoe_enc_error = 0; + *fcoe_eof = 0; + *checksum = le16_to_cpu(desc->checksum_fcoe); + } + + *tcp_udp_csum_ok = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0; + *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0; + *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0; + *ipv4_csum_ok = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0; + *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0; + *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0; + *ipv4_fragment = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0; + *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0; +} + +#endif /* _CQ_ENET_DESC_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h new file mode 100644 index 000000000..84b6a2b46 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -0,0 +1,266 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _ENIC_H_ +#define _ENIC_H_ + +#include "vnic_enet.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "vnic_cq.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "vnic_nic.h" +#include "vnic_rss.h" +#include + +#define DRV_NAME "enic" +#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" +#define DRV_VERSION "2.1.1.83" +#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" + +#define ENIC_BARS_MAX 6 + +#define ENIC_WQ_MAX 8 +#define ENIC_RQ_MAX 8 +#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) +#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) + +#define ENIC_AIC_LARGE_PKT_DIFF 3 + +struct enic_msix_entry { + int requested; + char devname[IFNAMSIZ]; + irqreturn_t (*isr)(int, void *); + void *devid; +}; + +/* Store only the lower range. Higher range is given by fw. */ +struct enic_intr_mod_range { + u32 small_pkt_range_start; + u32 large_pkt_range_start; +}; + +struct enic_intr_mod_table { + u32 rx_rate; + u32 range_percent; +}; + +#define ENIC_MAX_LINK_SPEEDS 3 +#define ENIC_LINK_SPEED_10G 10000 +#define ENIC_LINK_SPEED_4G 4000 +#define ENIC_LINK_40G_INDEX 2 +#define ENIC_LINK_10G_INDEX 1 +#define ENIC_LINK_4G_INDEX 0 +#define ENIC_RX_COALESCE_RANGE_END 125 +#define ENIC_AIC_TS_BREAK 100 + +struct enic_rx_coal { + u32 small_pkt_range_start; + u32 large_pkt_range_start; + u32 range_end; + u32 use_adaptive_rx_coalesce; +}; + +/* priv_flags */ +#define ENIC_SRIOV_ENABLED (1 << 0) + +/* enic port profile set flags */ +#define ENIC_PORT_REQUEST_APPLIED (1 << 0) +#define ENIC_SET_REQUEST (1 << 1) +#define ENIC_SET_NAME (1 << 2) +#define ENIC_SET_INSTANCE (1 << 3) +#define ENIC_SET_HOST (1 << 4) + +struct enic_port_profile { + u32 set; + u8 request; + char name[PORT_PROFILE_MAX]; + u8 instance_uuid[PORT_UUID_MAX]; + u8 host_uuid[PORT_UUID_MAX]; + u8 vf_mac[ETH_ALEN]; + u8 mac_addr[ETH_ALEN]; +}; + +/* enic_rfs_fltr_node - rfs filter node in hash table + * @@keys: IPv4 5 tuple + * @flow_id: flow_id of clsf filter provided by kernel + * @fltr_id: filter id of clsf filter returned by adaptor + * @rq_id: desired rq index + * @node: hlist_node + */ +struct enic_rfs_fltr_node { + struct flow_keys keys; + u32 flow_id; + u16 fltr_id; + u16 rq_id; + struct hlist_node node; +}; + +/* enic_rfs_flw_tbl - rfs flow table + * @max: Maximum number of filters vNIC supports + * @free: Number of free filters available + * @toclean: hash table index to clean next + * @ht_head: hash table list head + * @lock: spin lock + * @rfs_may_expire: timer function for enic_rps_may_expire_flow + */ +struct enic_rfs_flw_tbl { + u16 max; + int free; + +#define ENIC_RFS_FLW_BITSHIFT (10) +#define ENIC_RFS_FLW_MASK ((1 << ENIC_RFS_FLW_BITSHIFT) - 1) + u16 toclean:ENIC_RFS_FLW_BITSHIFT; + struct hlist_head ht_head[1 << ENIC_RFS_FLW_BITSHIFT]; + spinlock_t lock; + struct timer_list rfs_may_expire; +}; + +/* Per-instance private data structure */ +struct enic { + struct net_device *netdev; + struct pci_dev *pdev; + struct vnic_enet_config config; + struct vnic_dev_bar bar[ENIC_BARS_MAX]; + struct vnic_dev *vdev; + struct timer_list notify_timer; + struct work_struct reset; + struct work_struct change_mtu_work; + struct msix_entry msix_entry[ENIC_INTR_MAX]; + struct enic_msix_entry msix[ENIC_INTR_MAX]; + u32 msg_enable; + spinlock_t devcmd_lock; + u8 mac_addr[ETH_ALEN]; + unsigned int flags; + unsigned int priv_flags; + unsigned int mc_count; + unsigned int uc_count; + u32 port_mtu; + struct enic_rx_coal rx_coalesce_setting; + u32 rx_coalesce_usecs; + u32 tx_coalesce_usecs; +#ifdef CONFIG_PCI_IOV + u16 num_vfs; +#endif + spinlock_t enic_api_lock; + struct enic_port_profile *pp; + + /* work queue cache line section */ + ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX]; + spinlock_t wq_lock[ENIC_WQ_MAX]; + unsigned int wq_count; + u16 loop_enable; + u16 loop_tag; + + /* receive queue cache line section */ + ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX]; + unsigned int rq_count; + u64 rq_truncated_pkts; + u64 rq_bad_fcs; + struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX]; + + /* interrupt resource cache line section */ + ____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX]; + unsigned int intr_count; + u32 __iomem *legacy_pba; /* memory-mapped */ + + /* completion queue cache line section */ + ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX]; + unsigned int cq_count; + struct enic_rfs_flw_tbl rfs_h; + u32 rx_copybreak; + u8 rss_key[ENIC_RSS_LEN]; + struct vnic_gen_stats gen_stats; +}; + +static inline struct device *enic_get_dev(struct enic *enic) +{ + return &(enic->pdev->dev); +} + +static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq) +{ + return rq; +} + +static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) +{ + return enic->rq_count + wq; +} + +static inline unsigned int enic_legacy_io_intr(void) +{ + return 0; +} + +static inline unsigned int enic_legacy_err_intr(void) +{ + return 1; +} + +static inline unsigned int enic_legacy_notify_intr(void) +{ + return 2; +} + +static inline unsigned int enic_msix_rq_intr(struct enic *enic, + unsigned int rq) +{ + return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset; +} + +static inline unsigned int enic_msix_wq_intr(struct enic *enic, + unsigned int wq) +{ + return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; +} + +static inline unsigned int enic_msix_err_intr(struct enic *enic) +{ + return enic->rq_count + enic->wq_count; +} + +static inline unsigned int enic_msix_notify_intr(struct enic *enic) +{ + return enic->rq_count + enic->wq_count + 1; +} + +static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr) +{ + if (unlikely(pci_dma_mapping_error(enic->pdev, dma_addr))) { + net_warn_ratelimited("%s: PCI dma mapping failed!\n", + enic->netdev->name); + enic->gen_stats.dma_map_error++; + + return -ENOMEM; + } + + return 0; +} + +void enic_reset_addr_lists(struct enic *enic); +int enic_sriov_enabled(struct enic *enic); +int enic_is_valid_vf(struct enic *enic, int vf); +int enic_is_dynamic(struct enic *enic); +void enic_set_ethtool_ops(struct net_device *netdev); +int __enic_set_rsskey(struct enic *enic); + +#endif /* _ENIC_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c new file mode 100644 index 000000000..b161f2452 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_api.c @@ -0,0 +1,48 @@ +/** + * Copyright 2013 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include + +#include "vnic_dev.h" +#include "vnic_devcmd.h" + +#include "enic_res.h" +#include "enic.h" +#include "enic_api.h" + +int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf, + enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) +{ + int err; + struct enic *enic = netdev_priv(netdev); + struct vnic_dev *vdev = enic->vdev; + + spin_lock(&enic->enic_api_lock); + spin_lock_bh(&enic->devcmd_lock); + + vnic_dev_cmd_proxy_by_index_start(vdev, vf); + err = vnic_dev_cmd(vdev, cmd, a0, a1, wait); + vnic_dev_cmd_proxy_end(vdev); + + spin_unlock_bh(&enic->devcmd_lock); + spin_unlock(&enic->enic_api_lock); + + return err; +} +EXPORT_SYMBOL(enic_api_devcmd_proxy_by_index); diff --git a/drivers/net/ethernet/cisco/enic/enic_api.h b/drivers/net/ethernet/cisco/enic/enic_api.h new file mode 100644 index 000000000..6b9f9255a --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_api.h @@ -0,0 +1,30 @@ +/** + * Copyright 2013 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef __ENIC_API_H__ +#define __ENIC_API_H__ + +#include + +#include "vnic_dev.h" +#include "vnic_devcmd.h" + +int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf, + enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait); + +#endif diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c new file mode 100644 index 000000000..0be6850be --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c @@ -0,0 +1,284 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include "enic_res.h" +#include "enic_clsf.h" + +/* enic_addfltr_5t - Add ipv4 5tuple filter + * @enic: enic struct of vnic + * @keys: flow_keys of ipv4 5tuple + * @rq: rq number to steer to + * + * This function returns filter_id(hardware_id) of the filter + * added. In case of error it returns an negative number. + */ +int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq) +{ + int res; + struct filter data; + + switch (keys->ip_proto) { + case IPPROTO_TCP: + data.u.ipv4.protocol = PROTO_TCP; + break; + case IPPROTO_UDP: + data.u.ipv4.protocol = PROTO_UDP; + break; + default: + return -EPROTONOSUPPORT; + }; + data.type = FILTER_IPV4_5TUPLE; + data.u.ipv4.src_addr = ntohl(keys->src); + data.u.ipv4.dst_addr = ntohl(keys->dst); + data.u.ipv4.src_port = ntohs(keys->port16[0]); + data.u.ipv4.dst_port = ntohs(keys->port16[1]); + data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE; + + spin_lock_bh(&enic->devcmd_lock); + res = vnic_dev_classifier(enic->vdev, CLSF_ADD, &rq, &data); + spin_unlock_bh(&enic->devcmd_lock); + res = (res == 0) ? rq : res; + + return res; +} + +/* enic_delfltr - Delete clsf filter + * @enic: enic struct of vnic + * @filter_id: filter_is(hardware_id) of filter to be deleted + * + * This function returns zero in case of success, negative number incase of + * error. + */ +int enic_delfltr(struct enic *enic, u16 filter_id) +{ + int ret; + + spin_lock_bh(&enic->devcmd_lock); + ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL); + spin_unlock_bh(&enic->devcmd_lock); + + return ret; +} + +/* enic_rfs_flw_tbl_init - initialize enic->rfs_h members + * @enic: enic data + */ +void enic_rfs_flw_tbl_init(struct enic *enic) +{ + int i; + + spin_lock_init(&enic->rfs_h.lock); + for (i = 0; i <= ENIC_RFS_FLW_MASK; i++) + INIT_HLIST_HEAD(&enic->rfs_h.ht_head[i]); + enic->rfs_h.max = enic->config.num_arfs; + enic->rfs_h.free = enic->rfs_h.max; + enic->rfs_h.toclean = 0; + enic_rfs_timer_start(enic); +} + +void enic_rfs_flw_tbl_free(struct enic *enic) +{ + int i; + + enic_rfs_timer_stop(enic); + spin_lock_bh(&enic->rfs_h.lock); + enic->rfs_h.free = 0; + for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) { + struct hlist_head *hhead; + struct hlist_node *tmp; + struct enic_rfs_fltr_node *n; + + hhead = &enic->rfs_h.ht_head[i]; + hlist_for_each_entry_safe(n, tmp, hhead, node) { + enic_delfltr(enic, n->fltr_id); + hlist_del(&n->node); + kfree(n); + } + } + spin_unlock_bh(&enic->rfs_h.lock); +} + +struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id) +{ + int i; + + for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) { + struct hlist_head *hhead; + struct hlist_node *tmp; + struct enic_rfs_fltr_node *n; + + hhead = &enic->rfs_h.ht_head[i]; + hlist_for_each_entry_safe(n, tmp, hhead, node) + if (n->fltr_id == fltr_id) + return n; + } + + return NULL; +} + +#ifdef CONFIG_RFS_ACCEL +void enic_flow_may_expire(unsigned long data) +{ + struct enic *enic = (struct enic *)data; + bool res; + int j; + + spin_lock_bh(&enic->rfs_h.lock); + for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) { + struct hlist_head *hhead; + struct hlist_node *tmp; + struct enic_rfs_fltr_node *n; + + hhead = &enic->rfs_h.ht_head[enic->rfs_h.toclean++]; + hlist_for_each_entry_safe(n, tmp, hhead, node) { + res = rps_may_expire_flow(enic->netdev, n->rq_id, + n->flow_id, n->fltr_id); + if (res) { + res = enic_delfltr(enic, n->fltr_id); + if (unlikely(res)) + continue; + hlist_del(&n->node); + kfree(n); + enic->rfs_h.free++; + } + } + } + spin_unlock_bh(&enic->rfs_h.lock); + mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4); +} + +static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h, + struct flow_keys *k) +{ + struct enic_rfs_fltr_node *tpos; + + hlist_for_each_entry(tpos, h, node) + if (tpos->keys.src == k->src && + tpos->keys.dst == k->dst && + tpos->keys.ports == k->ports && + tpos->keys.ip_proto == k->ip_proto && + tpos->keys.n_proto == k->n_proto) + return tpos; + return NULL; +} + +int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id) +{ + struct flow_keys keys; + struct enic_rfs_fltr_node *n; + struct enic *enic; + u16 tbl_idx; + int res, i; + + enic = netdev_priv(dev); + res = skb_flow_dissect(skb, &keys); + if (!res || keys.n_proto != htons(ETH_P_IP) || + (keys.ip_proto != IPPROTO_TCP && keys.ip_proto != IPPROTO_UDP)) + return -EPROTONOSUPPORT; + + tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK; + spin_lock_bh(&enic->rfs_h.lock); + n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys); + + if (n) { /* entry already present */ + if (rxq_index == n->rq_id) { + res = -EEXIST; + goto ret_unlock; + } + + /* desired rq changed for the flow, we need to delete + * old fltr and add new one + * + * The moment we delete the fltr, the upcoming pkts + * are put it default rq based on rss. When we add + * new filter, upcoming pkts are put in desired queue. + * This could cause ooo pkts. + * + * Lets 1st try adding new fltr and then del old one. + */ + i = --enic->rfs_h.free; + /* clsf tbl is full, we have to del old fltr first*/ + if (unlikely(i < 0)) { + enic->rfs_h.free++; + res = enic_delfltr(enic, n->fltr_id); + if (unlikely(res < 0)) + goto ret_unlock; + res = enic_addfltr_5t(enic, &keys, rxq_index); + if (res < 0) { + hlist_del(&n->node); + enic->rfs_h.free++; + goto ret_unlock; + } + /* add new fltr 1st then del old fltr */ + } else { + int ret; + + res = enic_addfltr_5t(enic, &keys, rxq_index); + if (res < 0) { + enic->rfs_h.free++; + goto ret_unlock; + } + ret = enic_delfltr(enic, n->fltr_id); + /* deleting old fltr failed. Add old fltr to list. + * enic_flow_may_expire() will try to delete it later. + */ + if (unlikely(ret < 0)) { + struct enic_rfs_fltr_node *d; + struct hlist_head *head; + + head = &enic->rfs_h.ht_head[tbl_idx]; + d = kmalloc(sizeof(*d), GFP_ATOMIC); + if (d) { + d->fltr_id = n->fltr_id; + INIT_HLIST_NODE(&d->node); + hlist_add_head(&d->node, head); + } + } else { + enic->rfs_h.free++; + } + } + n->rq_id = rxq_index; + n->fltr_id = res; + n->flow_id = flow_id; + /* entry not present */ + } else { + i = --enic->rfs_h.free; + if (i <= 0) { + enic->rfs_h.free++; + res = -EBUSY; + goto ret_unlock; + } + + n = kmalloc(sizeof(*n), GFP_ATOMIC); + if (!n) { + res = -ENOMEM; + enic->rfs_h.free++; + goto ret_unlock; + } + + res = enic_addfltr_5t(enic, &keys, rxq_index); + if (res < 0) { + kfree(n); + enic->rfs_h.free++; + goto ret_unlock; + } + n->rq_id = rxq_index; + n->fltr_id = res; + n->flow_id = flow_id; + n->keys = keys; + INIT_HLIST_NODE(&n->node); + hlist_add_head(&n->node, &enic->rfs_h.ht_head[tbl_idx]); + } + +ret_unlock: + spin_unlock_bh(&enic->rfs_h.lock); + return res; +} + +#endif /* CONFIG_RFS_ACCEL */ diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.h b/drivers/net/ethernet/cisco/enic/enic_clsf.h new file mode 100644 index 000000000..6aa9f89d0 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_clsf.h @@ -0,0 +1,37 @@ +#ifndef _ENIC_CLSF_H_ +#define _ENIC_CLSF_H_ + +#include "vnic_dev.h" +#include "enic.h" + +#define ENIC_CLSF_EXPIRE_COUNT 128 + +int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq); +int enic_delfltr(struct enic *enic, u16 filter_id); +void enic_rfs_flw_tbl_init(struct enic *enic); +void enic_rfs_flw_tbl_free(struct enic *enic); +struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id); + +#ifdef CONFIG_RFS_ACCEL +int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id); +void enic_flow_may_expire(unsigned long data); + +static inline void enic_rfs_timer_start(struct enic *enic) +{ + init_timer(&enic->rfs_h.rfs_may_expire); + enic->rfs_h.rfs_may_expire.function = enic_flow_may_expire; + enic->rfs_h.rfs_may_expire.data = (unsigned long)enic; + mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4); +} + +static inline void enic_rfs_timer_stop(struct enic *enic) +{ + del_timer_sync(&enic->rfs_h.rfs_may_expire); +} +#else +static inline void enic_rfs_timer_start(struct enic *enic) {} +static inline void enic_rfs_timer_stop(struct enic *enic) {} +#endif /* CONFIG_RFS_ACCEL */ + +#endif /* _ENIC_CLSF_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c new file mode 100644 index 000000000..f8d2a6a34 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_dev.c @@ -0,0 +1,236 @@ +/* + * Copyright 2011 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include + +#include "vnic_dev.h" +#include "vnic_vic.h" +#include "enic_res.h" +#include "enic.h" +#include "enic_dev.h" + +int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info) +{ + int err; + + spin_lock_bh(&enic->devcmd_lock); + err = vnic_dev_fw_info(enic->vdev, fw_info); + spin_unlock_bh(&enic->devcmd_lock); + + return err; +} + +int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats) +{ + int err; + + spin_lock_bh(&enic->devcmd_lock); + err = vnic_dev_stats_dump(enic->vdev, vstats); + spin_unlock_bh(&enic->devcmd_lock); + + return err; +} + +int enic_dev_add_station_addr(struct enic *enic) +{ + int err; + + if (!is_valid_ether_addr(enic->netdev->dev_addr)) + return -EADDRNOTAVAIL; + + spin_lock_bh(&enic->devcmd_lock); + err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr); + spin_unlock_bh(&enic->devcmd_lock); + + return err; +} + +int enic_dev_del_station_addr(struct enic *enic) +{ + int err; + + if (!is_valid_ether_addr(enic->netdev->dev_addr)) + return -EADDRNOTAVAIL; + + spin_lock_bh(&enic->devcmd_lock); + err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr); + spin_unlock_bh(&enic->devcmd_lock); + + return err; +} + +int enic_dev_packet_filter(struct enic *enic, int directed, int multicast, + int broadcast, int promisc, int allmulti) +{ + int err; + + spin_lock_bh(&enic->devcmd_lock); + err = vnic_dev_packet_filter(enic->vdev, directed, + multicast, broadcast, promisc, allmulti); + spin_unlock_bh(&enic->devcmd_lock); + + return err; +} + +int enic_dev_add_addr(struct enic *enic, const u8 *addr) +{ + int err; + + spin_lock_bh(&enic->devcmd_lock); + err = vnic_dev_add_addr(enic->vdev, addr); + spin_unlock_bh(&enic->devcmd_lock); + + return err; +} + +int enic_dev_del_addr(struct enic *enic, const u8 *addr) +{ + int err; + + spin_lock_bh(&enic->devcmd_lock); + err = vnic_dev_del_addr(enic->vdev, addr); + spin_unlock_bh(&enic->devcmd_lock); + + return err; +} + +int enic_dev_notify_unset(struct enic *enic) +{ + int err; + + spin_lock_bh(&enic->devcmd_lock); + err = vnic_dev_notify_unset(enic->vdev); + spin_unlock_bh(&enic->devcmd_lock); + + return err; +} + +int enic_dev_hang_notify(struct enic *enic) +{ + int err; + + spin_lock_bh(&enic->devcmd_lock); + err = vnic_dev_hang_notify(enic->vdev); + spin_unlock_bh(&enic->devcmd_lock); + + return err; +} + +int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic) +{ + int err; + + spin_lock_bh(&enic->devcmd_lock); + err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev, + IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN); + spin_unlock_bh(&enic->devcmd_lock); + + return err; +} + +int enic_dev_enable(struct enic *enic) +{ + int err; + + spin_lock_bh(&enic->devcmd_lock); + err = vnic_dev_enable_wait(enic->vdev); + spin_unlock_bh(&enic->devcmd_lock); + + return err; +} + +int enic_dev_disable(struct enic *enic) +{ + int err; + + spin_lock_bh(&enic->devcmd_lock); + err = vnic_dev_disable(enic->vdev); + spin_unlock_bh(&enic->devcmd_lock); + + return err; +} + +int enic_dev_intr_coal_timer_info(struct enic *enic) +{ + int err; + + spin_lock_bh(&enic->devcmd_lock); + err = vnic_dev_intr_coal_timer_info(enic->vdev); + spin_unlock_bh(&enic->devcmd_lock); + + return err; +} + +/* rtnl lock is held */ +int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct enic *enic = netdev_priv(netdev); + int err; + + spin_lock_bh(&enic->devcmd_lock); + err = enic_add_vlan(enic, vid); + spin_unlock_bh(&enic->devcmd_lock); + + return err; +} + +/* rtnl lock is held */ +int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct enic *enic = netdev_priv(netdev); + int err; + + spin_lock_bh(&enic->devcmd_lock); + err = enic_del_vlan(enic, vid); + spin_unlock_bh(&enic->devcmd_lock); + + return err; +} + +int enic_dev_status_to_errno(int devcmd_status) +{ + switch (devcmd_status) { + case ERR_SUCCESS: + return 0; + case ERR_EINVAL: + return -EINVAL; + case ERR_EFAULT: + return -EFAULT; + case ERR_EPERM: + return -EPERM; + case ERR_EBUSY: + return -EBUSY; + case ERR_ECMDUNKNOWN: + case ERR_ENOTSUPPORTED: + return -EOPNOTSUPP; + case ERR_EBADSTATE: + return -EINVAL; + case ERR_ENOMEM: + return -ENOMEM; + case ERR_ETIMEDOUT: + return -ETIMEDOUT; + case ERR_ELINKDOWN: + return -ENETDOWN; + case ERR_EINPROGRESS: + return -EINPROGRESS; + case ERR_EMAXRES: + default: + return (devcmd_status < 0) ? devcmd_status : -1; + } +} diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h new file mode 100644 index 000000000..f5bb058b3 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_dev.h @@ -0,0 +1,60 @@ +/* + * Copyright 2011 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _ENIC_DEV_H_ +#define _ENIC_DEV_H_ + +#include "vnic_dev.h" +#include "vnic_vic.h" + +/* + * Calls the devcmd function given by argument vnicdevcmdfn. + * If vf argument is valid, it proxies the devcmd + */ +#define ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnicdevcmdfn, ...) \ + do { \ + spin_lock_bh(&enic->devcmd_lock); \ + if (enic_is_valid_vf(enic, vf)) { \ + vnic_dev_cmd_proxy_by_index_start(enic->vdev, vf); \ + err = vnicdevcmdfn(enic->vdev, ##__VA_ARGS__); \ + vnic_dev_cmd_proxy_end(enic->vdev); \ + } else { \ + err = vnicdevcmdfn(enic->vdev, ##__VA_ARGS__); \ + } \ + spin_unlock_bh(&enic->devcmd_lock); \ + } while (0) + +int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info); +int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats); +int enic_dev_add_station_addr(struct enic *enic); +int enic_dev_del_station_addr(struct enic *enic); +int enic_dev_packet_filter(struct enic *enic, int directed, int multicast, + int broadcast, int promisc, int allmulti); +int enic_dev_add_addr(struct enic *enic, const u8 *addr); +int enic_dev_del_addr(struct enic *enic, const u8 *addr); +int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); +int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); +int enic_dev_notify_unset(struct enic *enic); +int enic_dev_hang_notify(struct enic *enic); +int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic); +int enic_dev_enable(struct enic *enic); +int enic_dev_disable(struct enic *enic); +int enic_dev_intr_coal_timer_info(struct enic *enic); +int enic_dev_status_to_errno(int devcmd_status); + +#endif /* _ENIC_DEV_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c new file mode 100644 index 000000000..68d47b196 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -0,0 +1,507 @@ +/** + * Copyright 2013 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include + +#include "enic_res.h" +#include "enic.h" +#include "enic_dev.h" +#include "enic_clsf.h" +#include "vnic_rss.h" +#include "vnic_stats.h" + +struct enic_stat { + char name[ETH_GSTRING_LEN]; + unsigned int index; +}; + +#define ENIC_TX_STAT(stat) { \ + .name = #stat, \ + .index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \ +} + +#define ENIC_RX_STAT(stat) { \ + .name = #stat, \ + .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \ +} + +#define ENIC_GEN_STAT(stat) { \ + .name = #stat, \ + .index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\ +} + +static const struct enic_stat enic_tx_stats[] = { + ENIC_TX_STAT(tx_frames_ok), + ENIC_TX_STAT(tx_unicast_frames_ok), + ENIC_TX_STAT(tx_multicast_frames_ok), + ENIC_TX_STAT(tx_broadcast_frames_ok), + ENIC_TX_STAT(tx_bytes_ok), + ENIC_TX_STAT(tx_unicast_bytes_ok), + ENIC_TX_STAT(tx_multicast_bytes_ok), + ENIC_TX_STAT(tx_broadcast_bytes_ok), + ENIC_TX_STAT(tx_drops), + ENIC_TX_STAT(tx_errors), + ENIC_TX_STAT(tx_tso), +}; + +static const struct enic_stat enic_rx_stats[] = { + ENIC_RX_STAT(rx_frames_ok), + ENIC_RX_STAT(rx_frames_total), + ENIC_RX_STAT(rx_unicast_frames_ok), + ENIC_RX_STAT(rx_multicast_frames_ok), + ENIC_RX_STAT(rx_broadcast_frames_ok), + ENIC_RX_STAT(rx_bytes_ok), + ENIC_RX_STAT(rx_unicast_bytes_ok), + ENIC_RX_STAT(rx_multicast_bytes_ok), + ENIC_RX_STAT(rx_broadcast_bytes_ok), + ENIC_RX_STAT(rx_drop), + ENIC_RX_STAT(rx_no_bufs), + ENIC_RX_STAT(rx_errors), + ENIC_RX_STAT(rx_rss), + ENIC_RX_STAT(rx_crc_errors), + ENIC_RX_STAT(rx_frames_64), + ENIC_RX_STAT(rx_frames_127), + ENIC_RX_STAT(rx_frames_255), + ENIC_RX_STAT(rx_frames_511), + ENIC_RX_STAT(rx_frames_1023), + ENIC_RX_STAT(rx_frames_1518), + ENIC_RX_STAT(rx_frames_to_max), +}; + +static const struct enic_stat enic_gen_stats[] = { + ENIC_GEN_STAT(dma_map_error), +}; + +static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); +static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); +static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats); + +static void enic_intr_coal_set_rx(struct enic *enic, u32 timer) +{ + int i; + int intr; + + for (i = 0; i < enic->rq_count; i++) { + intr = enic_msix_rq_intr(enic, i); + vnic_intr_coalescing_timer_set(&enic->intr[intr], timer); + } +} + +static int enic_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct enic *enic = netdev_priv(netdev); + + ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + + if (netif_carrier_ok(netdev)) { + ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev)); + ecmd->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } + + ecmd->autoneg = AUTONEG_DISABLE; + + return 0; +} + +static void enic_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct enic *enic = netdev_priv(netdev); + struct vnic_devcmd_fw_info *fw_info; + int err; + + err = enic_dev_fw_info(enic, &fw_info); + /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info + * For other failures, like devcmd failure, we return previously + * recorded info. + */ + if (err == -ENOMEM) + return; + + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, fw_info->fw_version, + sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(enic->pdev), + sizeof(drvinfo->bus_info)); +} + +static void enic_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + unsigned int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < enic_n_tx_stats; i++) { + memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + for (i = 0; i < enic_n_rx_stats; i++) { + memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + for (i = 0; i < enic_n_gen_stats; i++) { + memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + } +} + +static int enic_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats; + default: + return -EOPNOTSUPP; + } +} + +static void enic_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct enic *enic = netdev_priv(netdev); + struct vnic_stats *vstats; + unsigned int i; + int err; + + err = enic_dev_stats_dump(enic, &vstats); + /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump + * For other failures, like devcmd failure, we return previously + * recorded stats. + */ + if (err == -ENOMEM) + return; + + for (i = 0; i < enic_n_tx_stats; i++) + *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index]; + for (i = 0; i < enic_n_rx_stats; i++) + *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index]; + for (i = 0; i < enic_n_gen_stats; i++) + *(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index]; +} + +static u32 enic_get_msglevel(struct net_device *netdev) +{ + struct enic *enic = netdev_priv(netdev); + return enic->msg_enable; +} + +static void enic_set_msglevel(struct net_device *netdev, u32 value) +{ + struct enic *enic = netdev_priv(netdev); + enic->msg_enable = value; +} + +static int enic_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ecmd) +{ + struct enic *enic = netdev_priv(netdev); + struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting; + + ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; + ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs; + if (rxcoal->use_adaptive_rx_coalesce) + ecmd->use_adaptive_rx_coalesce = 1; + ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start; + ecmd->rx_coalesce_usecs_high = rxcoal->range_end; + + return 0; +} + +static int enic_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ecmd) +{ + struct enic *enic = netdev_priv(netdev); + u32 tx_coalesce_usecs; + u32 rx_coalesce_usecs; + u32 rx_coalesce_usecs_low; + u32 rx_coalesce_usecs_high; + u32 coalesce_usecs_max; + unsigned int i, intr; + struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting; + + coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev); + tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs, + coalesce_usecs_max); + rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs, + coalesce_usecs_max); + + rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low, + coalesce_usecs_max); + rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high, + coalesce_usecs_max); + + switch (vnic_dev_get_intr_mode(enic->vdev)) { + case VNIC_DEV_INTR_MODE_INTX: + if (tx_coalesce_usecs != rx_coalesce_usecs) + return -EINVAL; + if (ecmd->use_adaptive_rx_coalesce || + ecmd->rx_coalesce_usecs_low || + ecmd->rx_coalesce_usecs_high) + return -EINVAL; + + intr = enic_legacy_io_intr(); + vnic_intr_coalescing_timer_set(&enic->intr[intr], + tx_coalesce_usecs); + break; + case VNIC_DEV_INTR_MODE_MSI: + if (tx_coalesce_usecs != rx_coalesce_usecs) + return -EINVAL; + if (ecmd->use_adaptive_rx_coalesce || + ecmd->rx_coalesce_usecs_low || + ecmd->rx_coalesce_usecs_high) + return -EINVAL; + + vnic_intr_coalescing_timer_set(&enic->intr[0], + tx_coalesce_usecs); + break; + case VNIC_DEV_INTR_MODE_MSIX: + if (ecmd->rx_coalesce_usecs_high && + (rx_coalesce_usecs_high < + rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF)) + return -EINVAL; + + for (i = 0; i < enic->wq_count; i++) { + intr = enic_msix_wq_intr(enic, i); + vnic_intr_coalescing_timer_set(&enic->intr[intr], + tx_coalesce_usecs); + } + + rxcoal->use_adaptive_rx_coalesce = + !!ecmd->use_adaptive_rx_coalesce; + if (!rxcoal->use_adaptive_rx_coalesce) + enic_intr_coal_set_rx(enic, rx_coalesce_usecs); + + if (ecmd->rx_coalesce_usecs_high) { + rxcoal->range_end = rx_coalesce_usecs_high; + rxcoal->small_pkt_range_start = rx_coalesce_usecs_low; + rxcoal->large_pkt_range_start = rx_coalesce_usecs_low + + ENIC_AIC_LARGE_PKT_DIFF; + } + break; + default: + break; + } + + enic->tx_coalesce_usecs = tx_coalesce_usecs; + enic->rx_coalesce_usecs = rx_coalesce_usecs; + + return 0; +} + +static int enic_grxclsrlall(struct enic *enic, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + int j, ret = 0, cnt = 0; + + cmd->data = enic->rfs_h.max - enic->rfs_h.free; + for (j = 0; j < (1 << ENIC_RFS_FLW_BITSHIFT); j++) { + struct hlist_head *hhead; + struct hlist_node *tmp; + struct enic_rfs_fltr_node *n; + + hhead = &enic->rfs_h.ht_head[j]; + hlist_for_each_entry_safe(n, tmp, hhead, node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = n->fltr_id; + cnt++; + } + } + cmd->rule_cnt = cnt; + + return ret; +} + +static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct enic_rfs_fltr_node *n; + + n = htbl_fltr_search(enic, (u16)fsp->location); + if (!n) + return -EINVAL; + switch (n->keys.ip_proto) { + case IPPROTO_TCP: + fsp->flow_type = TCP_V4_FLOW; + break; + case IPPROTO_UDP: + fsp->flow_type = UDP_V4_FLOW; + break; + default: + return -EINVAL; + break; + } + + fsp->h_u.tcp_ip4_spec.ip4src = n->keys.src; + fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0; + + fsp->h_u.tcp_ip4_spec.ip4dst = n->keys.dst; + fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0; + + fsp->h_u.tcp_ip4_spec.psrc = n->keys.port16[0]; + fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0; + + fsp->h_u.tcp_ip4_spec.pdst = n->keys.port16[1]; + fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0; + + fsp->ring_cookie = n->rq_id; + + return 0; +} + +static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct enic *enic = netdev_priv(dev); + int ret = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = enic->rq_count; + break; + case ETHTOOL_GRXCLSRLCNT: + spin_lock_bh(&enic->rfs_h.lock); + cmd->rule_cnt = enic->rfs_h.max - enic->rfs_h.free; + cmd->data = enic->rfs_h.max; + spin_unlock_bh(&enic->rfs_h.lock); + break; + case ETHTOOL_GRXCLSRLALL: + spin_lock_bh(&enic->rfs_h.lock); + ret = enic_grxclsrlall(enic, cmd, rule_locs); + spin_unlock_bh(&enic->rfs_h.lock); + break; + case ETHTOOL_GRXCLSRULE: + spin_lock_bh(&enic->rfs_h.lock); + ret = enic_grxclsrule(enic, cmd); + spin_unlock_bh(&enic->rfs_h.lock); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static int enic_get_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, void *data) +{ + struct enic *enic = netdev_priv(dev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = enic->rx_copybreak; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int enic_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct enic *enic = netdev_priv(dev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + enic->rx_copybreak = *(u32 *)data; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static u32 enic_get_rxfh_key_size(struct net_device *netdev) +{ + return ENIC_RSS_LEN; +} + +static int enic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey, + u8 *hfunc) +{ + struct enic *enic = netdev_priv(netdev); + + if (hkey) + memcpy(hkey, enic->rss_key, ENIC_RSS_LEN); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + return 0; +} + +static int enic_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *hkey, const u8 hfunc) +{ + struct enic *enic = netdev_priv(netdev); + + if ((hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) || + indir) + return -EINVAL; + + if (hkey) + memcpy(enic->rss_key, hkey, ENIC_RSS_LEN); + + return __enic_set_rsskey(enic); +} + +static const struct ethtool_ops enic_ethtool_ops = { + .get_settings = enic_get_settings, + .get_drvinfo = enic_get_drvinfo, + .get_msglevel = enic_get_msglevel, + .set_msglevel = enic_set_msglevel, + .get_link = ethtool_op_get_link, + .get_strings = enic_get_strings, + .get_sset_count = enic_get_sset_count, + .get_ethtool_stats = enic_get_ethtool_stats, + .get_coalesce = enic_get_coalesce, + .set_coalesce = enic_set_coalesce, + .get_rxnfc = enic_get_rxnfc, + .get_tunable = enic_get_tunable, + .set_tunable = enic_set_tunable, + .get_rxfh_key_size = enic_get_rxfh_key_size, + .get_rxfh = enic_get_rxfh, + .set_rxfh = enic_set_rxfh, +}; + +void enic_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &enic_ethtool_ops; +} diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c new file mode 100644 index 000000000..eadae1b41 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -0,0 +1,2724 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_RFS_ACCEL +#include +#endif +#ifdef CONFIG_NET_RX_BUSY_POLL +#include +#endif +#include + +#include "cq_enet_desc.h" +#include "vnic_dev.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "vnic_vic.h" +#include "enic_res.h" +#include "enic.h" +#include "enic_dev.h" +#include "enic_pp.h" +#include "enic_clsf.h" + +#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) +#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) +#define MAX_TSO (1 << 16) +#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1) + +#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ +#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */ +#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ + +#define RX_COPYBREAK_DEFAULT 256 + +/* Supported devices */ +static const struct pci_device_id enic_id_table[] = { + { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, + { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) }, + { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) }, + { 0, } /* end of table */ +}; + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR("Scott Feldman "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, enic_id_table); + +#define ENIC_LARGE_PKT_THRESHOLD 1000 +#define ENIC_MAX_COALESCE_TIMERS 10 +/* Interrupt moderation table, which will be used to decide the + * coalescing timer values + * {rx_rate in Mbps, mapping percentage of the range} + */ +static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = { + {4000, 0}, + {4400, 10}, + {5060, 20}, + {5230, 30}, + {5540, 40}, + {5820, 50}, + {6120, 60}, + {6435, 70}, + {6745, 80}, + {7000, 90}, + {0xFFFFFFFF, 100} +}; + +/* This table helps the driver to pick different ranges for rx coalescing + * timer depending on the link speed. + */ +static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = { + {0, 0}, /* 0 - 4 Gbps */ + {0, 3}, /* 4 - 10 Gbps */ + {3, 6}, /* 10 - 40 Gbps */ +}; + +int enic_is_dynamic(struct enic *enic) +{ + return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; +} + +int enic_sriov_enabled(struct enic *enic) +{ + return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0; +} + +static int enic_is_sriov_vf(struct enic *enic) +{ + return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF; +} + +int enic_is_valid_vf(struct enic *enic, int vf) +{ +#ifdef CONFIG_PCI_IOV + return vf >= 0 && vf < enic->num_vfs; +#else + return 0; +#endif +} + +static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) +{ + struct enic *enic = vnic_dev_priv(wq->vdev); + + if (buf->sop) + pci_unmap_single(enic->pdev, buf->dma_addr, + buf->len, PCI_DMA_TODEVICE); + else + pci_unmap_page(enic->pdev, buf->dma_addr, + buf->len, PCI_DMA_TODEVICE); + + if (buf->os_buf) + dev_kfree_skb_any(buf->os_buf); +} + +static void enic_wq_free_buf(struct vnic_wq *wq, + struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) +{ + enic_free_wq_buf(wq, buf); +} + +static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, + u8 type, u16 q_number, u16 completed_index, void *opaque) +{ + struct enic *enic = vnic_dev_priv(vdev); + + spin_lock(&enic->wq_lock[q_number]); + + vnic_wq_service(&enic->wq[q_number], cq_desc, + completed_index, enic_wq_free_buf, + opaque); + + if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) && + vnic_wq_desc_avail(&enic->wq[q_number]) >= + (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) + netif_wake_subqueue(enic->netdev, q_number); + + spin_unlock(&enic->wq_lock[q_number]); + + return 0; +} + +static void enic_log_q_error(struct enic *enic) +{ + unsigned int i; + u32 error_status; + + for (i = 0; i < enic->wq_count; i++) { + error_status = vnic_wq_error_status(&enic->wq[i]); + if (error_status) + netdev_err(enic->netdev, "WQ[%d] error_status %d\n", + i, error_status); + } + + for (i = 0; i < enic->rq_count; i++) { + error_status = vnic_rq_error_status(&enic->rq[i]); + if (error_status) + netdev_err(enic->netdev, "RQ[%d] error_status %d\n", + i, error_status); + } +} + +static void enic_msglvl_check(struct enic *enic) +{ + u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); + + if (msg_enable != enic->msg_enable) { + netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n", + enic->msg_enable, msg_enable); + enic->msg_enable = msg_enable; + } +} + +static void enic_mtu_check(struct enic *enic) +{ + u32 mtu = vnic_dev_mtu(enic->vdev); + struct net_device *netdev = enic->netdev; + + if (mtu && mtu != enic->port_mtu) { + enic->port_mtu = mtu; + if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { + mtu = max_t(int, ENIC_MIN_MTU, + min_t(int, ENIC_MAX_MTU, mtu)); + if (mtu != netdev->mtu) + schedule_work(&enic->change_mtu_work); + } else { + if (mtu < netdev->mtu) + netdev_warn(netdev, + "interface MTU (%d) set higher " + "than switch port MTU (%d)\n", + netdev->mtu, mtu); + } + } +} + +static void enic_link_check(struct enic *enic) +{ + int link_status = vnic_dev_link_status(enic->vdev); + int carrier_ok = netif_carrier_ok(enic->netdev); + + if (link_status && !carrier_ok) { + netdev_info(enic->netdev, "Link UP\n"); + netif_carrier_on(enic->netdev); + } else if (!link_status && carrier_ok) { + netdev_info(enic->netdev, "Link DOWN\n"); + netif_carrier_off(enic->netdev); + } +} + +static void enic_notify_check(struct enic *enic) +{ + enic_msglvl_check(enic); + enic_mtu_check(enic); + enic_link_check(enic); +} + +#define ENIC_TEST_INTR(pba, i) (pba & (1 << i)) + +static irqreturn_t enic_isr_legacy(int irq, void *data) +{ + struct net_device *netdev = data; + struct enic *enic = netdev_priv(netdev); + unsigned int io_intr = enic_legacy_io_intr(); + unsigned int err_intr = enic_legacy_err_intr(); + unsigned int notify_intr = enic_legacy_notify_intr(); + u32 pba; + + vnic_intr_mask(&enic->intr[io_intr]); + + pba = vnic_intr_legacy_pba(enic->legacy_pba); + if (!pba) { + vnic_intr_unmask(&enic->intr[io_intr]); + return IRQ_NONE; /* not our interrupt */ + } + + if (ENIC_TEST_INTR(pba, notify_intr)) { + enic_notify_check(enic); + vnic_intr_return_all_credits(&enic->intr[notify_intr]); + } + + if (ENIC_TEST_INTR(pba, err_intr)) { + vnic_intr_return_all_credits(&enic->intr[err_intr]); + enic_log_q_error(enic); + /* schedule recovery from WQ/RQ error */ + schedule_work(&enic->reset); + return IRQ_HANDLED; + } + + if (ENIC_TEST_INTR(pba, io_intr)) + napi_schedule_irqoff(&enic->napi[0]); + else + vnic_intr_unmask(&enic->intr[io_intr]); + + return IRQ_HANDLED; +} + +static irqreturn_t enic_isr_msi(int irq, void *data) +{ + struct enic *enic = data; + + /* With MSI, there is no sharing of interrupts, so this is + * our interrupt and there is no need to ack it. The device + * is not providing per-vector masking, so the OS will not + * write to PCI config space to mask/unmask the interrupt. + * We're using mask_on_assertion for MSI, so the device + * automatically masks the interrupt when the interrupt is + * generated. Later, when exiting polling, the interrupt + * will be unmasked (see enic_poll). + * + * Also, the device uses the same PCIe Traffic Class (TC) + * for Memory Write data and MSI, so there are no ordering + * issues; the MSI will always arrive at the Root Complex + * _after_ corresponding Memory Writes (i.e. descriptor + * writes). + */ + + napi_schedule_irqoff(&enic->napi[0]); + + return IRQ_HANDLED; +} + +static irqreturn_t enic_isr_msix(int irq, void *data) +{ + struct napi_struct *napi = data; + + napi_schedule_irqoff(napi); + + return IRQ_HANDLED; +} + +static irqreturn_t enic_isr_msix_err(int irq, void *data) +{ + struct enic *enic = data; + unsigned int intr = enic_msix_err_intr(enic); + + vnic_intr_return_all_credits(&enic->intr[intr]); + + enic_log_q_error(enic); + + /* schedule recovery from WQ/RQ error */ + schedule_work(&enic->reset); + + return IRQ_HANDLED; +} + +static irqreturn_t enic_isr_msix_notify(int irq, void *data) +{ + struct enic *enic = data; + unsigned int intr = enic_msix_notify_intr(enic); + + enic_notify_check(enic); + vnic_intr_return_all_credits(&enic->intr[intr]); + + return IRQ_HANDLED; +} + +static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq, + struct sk_buff *skb, unsigned int len_left, + int loopback) +{ + const skb_frag_t *frag; + dma_addr_t dma_addr; + + /* Queue additional data fragments */ + for (frag = skb_shinfo(skb)->frags; len_left; frag++) { + len_left -= skb_frag_size(frag); + dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + if (unlikely(enic_dma_map_check(enic, dma_addr))) + return -ENOMEM; + enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag), + (len_left == 0), /* EOP? */ + loopback); + } + + return 0; +} + +static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, + struct sk_buff *skb, int vlan_tag_insert, + unsigned int vlan_tag, int loopback) +{ + unsigned int head_len = skb_headlen(skb); + unsigned int len_left = skb->len - head_len; + int eop = (len_left == 0); + dma_addr_t dma_addr; + int err = 0; + + dma_addr = pci_map_single(enic->pdev, skb->data, head_len, + PCI_DMA_TODEVICE); + if (unlikely(enic_dma_map_check(enic, dma_addr))) + return -ENOMEM; + + /* Queue the main skb fragment. The fragments are no larger + * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less + * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor + * per fragment is queued. + */ + enic_queue_wq_desc(wq, skb, dma_addr, head_len, vlan_tag_insert, + vlan_tag, eop, loopback); + + if (!eop) + err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); + + return err; +} + +static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, + struct sk_buff *skb, int vlan_tag_insert, + unsigned int vlan_tag, int loopback) +{ + unsigned int head_len = skb_headlen(skb); + unsigned int len_left = skb->len - head_len; + unsigned int hdr_len = skb_checksum_start_offset(skb); + unsigned int csum_offset = hdr_len + skb->csum_offset; + int eop = (len_left == 0); + dma_addr_t dma_addr; + int err = 0; + + dma_addr = pci_map_single(enic->pdev, skb->data, head_len, + PCI_DMA_TODEVICE); + if (unlikely(enic_dma_map_check(enic, dma_addr))) + return -ENOMEM; + + /* Queue the main skb fragment. The fragments are no larger + * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less + * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor + * per fragment is queued. + */ + enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len, csum_offset, + hdr_len, vlan_tag_insert, vlan_tag, eop, + loopback); + + if (!eop) + err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); + + return err; +} + +static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, + struct sk_buff *skb, unsigned int mss, + int vlan_tag_insert, unsigned int vlan_tag, + int loopback) +{ + unsigned int frag_len_left = skb_headlen(skb); + unsigned int len_left = skb->len - frag_len_left; + unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int eop = (len_left == 0); + unsigned int len; + dma_addr_t dma_addr; + unsigned int offset = 0; + skb_frag_t *frag; + + /* Preload TCP csum field with IP pseudo hdr calculated + * with IP length set to zero. HW will later add in length + * to each TCP segment resulting from the TSO. + */ + + if (skb->protocol == cpu_to_be16(ETH_P_IP)) { + ip_hdr(skb)->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); + } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); + } + + /* Queue WQ_ENET_MAX_DESC_LEN length descriptors + * for the main skb fragment + */ + while (frag_len_left) { + len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); + dma_addr = pci_map_single(enic->pdev, skb->data + offset, len, + PCI_DMA_TODEVICE); + if (unlikely(enic_dma_map_check(enic, dma_addr))) + return -ENOMEM; + enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len, + vlan_tag_insert, vlan_tag, + eop && (len == frag_len_left), loopback); + frag_len_left -= len; + offset += len; + } + + if (eop) + return 0; + + /* Queue WQ_ENET_MAX_DESC_LEN length descriptors + * for additional data fragments + */ + for (frag = skb_shinfo(skb)->frags; len_left; frag++) { + len_left -= skb_frag_size(frag); + frag_len_left = skb_frag_size(frag); + offset = 0; + + while (frag_len_left) { + len = min(frag_len_left, + (unsigned int)WQ_ENET_MAX_DESC_LEN); + dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, + offset, len, + DMA_TO_DEVICE); + if (unlikely(enic_dma_map_check(enic, dma_addr))) + return -ENOMEM; + enic_queue_wq_desc_cont(wq, skb, dma_addr, len, + (len_left == 0) && + (len == frag_len_left),/*EOP*/ + loopback); + frag_len_left -= len; + offset += len; + } + } + + return 0; +} + +static inline void enic_queue_wq_skb(struct enic *enic, + struct vnic_wq *wq, struct sk_buff *skb) +{ + unsigned int mss = skb_shinfo(skb)->gso_size; + unsigned int vlan_tag = 0; + int vlan_tag_insert = 0; + int loopback = 0; + int err; + + if (skb_vlan_tag_present(skb)) { + /* VLAN tag from trunking driver */ + vlan_tag_insert = 1; + vlan_tag = skb_vlan_tag_get(skb); + } else if (enic->loop_enable) { + vlan_tag = enic->loop_tag; + loopback = 1; + } + + if (mss) + err = enic_queue_wq_skb_tso(enic, wq, skb, mss, + vlan_tag_insert, vlan_tag, + loopback); + else if (skb->ip_summed == CHECKSUM_PARTIAL) + err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert, + vlan_tag, loopback); + else + err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert, + vlan_tag, loopback); + if (unlikely(err)) { + struct vnic_wq_buf *buf; + + buf = wq->to_use->prev; + /* while not EOP of previous pkt && queue not empty. + * For all non EOP bufs, os_buf is NULL. + */ + while (!buf->os_buf && (buf->next != wq->to_clean)) { + enic_free_wq_buf(wq, buf); + wq->ring.desc_avail++; + buf = buf->prev; + } + wq->to_use = buf->next; + dev_kfree_skb(skb); + } +} + +/* netif_tx_lock held, process context with BHs disabled, or BH */ +static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct enic *enic = netdev_priv(netdev); + struct vnic_wq *wq; + unsigned int txq_map; + struct netdev_queue *txq; + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + txq_map = skb_get_queue_mapping(skb) % enic->wq_count; + wq = &enic->wq[txq_map]; + txq = netdev_get_tx_queue(netdev, txq_map); + + /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, + * which is very likely. In the off chance it's going to take + * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. + */ + + if (skb_shinfo(skb)->gso_size == 0 && + skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && + skb_linearize(skb)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + spin_lock(&enic->wq_lock[txq_map]); + + if (vnic_wq_desc_avail(wq) < + skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { + netif_tx_stop_queue(txq); + /* This is a hard error, log it */ + netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); + spin_unlock(&enic->wq_lock[txq_map]); + return NETDEV_TX_BUSY; + } + + enic_queue_wq_skb(enic, wq, skb); + + if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) + netif_tx_stop_queue(txq); + if (!skb->xmit_more || netif_xmit_stopped(txq)) + vnic_wq_doorbell(wq); + + spin_unlock(&enic->wq_lock[txq_map]); + + return NETDEV_TX_OK; +} + +/* dev_base_lock rwlock held, nominally process context */ +static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *net_stats) +{ + struct enic *enic = netdev_priv(netdev); + struct vnic_stats *stats; + int err; + + err = enic_dev_stats_dump(enic, &stats); + /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump + * For other failures, like devcmd failure, we return previously + * recorded stats. + */ + if (err == -ENOMEM) + return net_stats; + + net_stats->tx_packets = stats->tx.tx_frames_ok; + net_stats->tx_bytes = stats->tx.tx_bytes_ok; + net_stats->tx_errors = stats->tx.tx_errors; + net_stats->tx_dropped = stats->tx.tx_drops; + + net_stats->rx_packets = stats->rx.rx_frames_ok; + net_stats->rx_bytes = stats->rx.rx_bytes_ok; + net_stats->rx_errors = stats->rx.rx_errors; + net_stats->multicast = stats->rx.rx_multicast_frames_ok; + net_stats->rx_over_errors = enic->rq_truncated_pkts; + net_stats->rx_crc_errors = enic->rq_bad_fcs; + net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop; + + return net_stats; +} + +static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr) +{ + struct enic *enic = netdev_priv(netdev); + + if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) { + unsigned int mc_count = netdev_mc_count(netdev); + + netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n", + ENIC_MULTICAST_PERFECT_FILTERS, mc_count); + + return -ENOSPC; + } + + enic_dev_add_addr(enic, mc_addr); + enic->mc_count++; + + return 0; +} + +static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr) +{ + struct enic *enic = netdev_priv(netdev); + + enic_dev_del_addr(enic, mc_addr); + enic->mc_count--; + + return 0; +} + +static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr) +{ + struct enic *enic = netdev_priv(netdev); + + if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) { + unsigned int uc_count = netdev_uc_count(netdev); + + netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n", + ENIC_UNICAST_PERFECT_FILTERS, uc_count); + + return -ENOSPC; + } + + enic_dev_add_addr(enic, uc_addr); + enic->uc_count++; + + return 0; +} + +static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr) +{ + struct enic *enic = netdev_priv(netdev); + + enic_dev_del_addr(enic, uc_addr); + enic->uc_count--; + + return 0; +} + +void enic_reset_addr_lists(struct enic *enic) +{ + struct net_device *netdev = enic->netdev; + + __dev_uc_unsync(netdev, NULL); + __dev_mc_unsync(netdev, NULL); + + enic->mc_count = 0; + enic->uc_count = 0; + enic->flags = 0; +} + +static int enic_set_mac_addr(struct net_device *netdev, char *addr) +{ + struct enic *enic = netdev_priv(netdev); + + if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { + if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr)) + return -EADDRNOTAVAIL; + } else { + if (!is_valid_ether_addr(addr)) + return -EADDRNOTAVAIL; + } + + memcpy(netdev->dev_addr, addr, netdev->addr_len); + + return 0; +} + +static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p) +{ + struct enic *enic = netdev_priv(netdev); + struct sockaddr *saddr = p; + char *addr = saddr->sa_data; + int err; + + if (netif_running(enic->netdev)) { + err = enic_dev_del_station_addr(enic); + if (err) + return err; + } + + err = enic_set_mac_addr(netdev, addr); + if (err) + return err; + + if (netif_running(enic->netdev)) { + err = enic_dev_add_station_addr(enic); + if (err) + return err; + } + + return err; +} + +static int enic_set_mac_address(struct net_device *netdev, void *p) +{ + struct sockaddr *saddr = p; + char *addr = saddr->sa_data; + struct enic *enic = netdev_priv(netdev); + int err; + + err = enic_dev_del_station_addr(enic); + if (err) + return err; + + err = enic_set_mac_addr(netdev, addr); + if (err) + return err; + + return enic_dev_add_station_addr(enic); +} + +/* netif_tx_lock held, BHs disabled */ +static void enic_set_rx_mode(struct net_device *netdev) +{ + struct enic *enic = netdev_priv(netdev); + int directed = 1; + int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; + int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; + int promisc = (netdev->flags & IFF_PROMISC) || + netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS; + int allmulti = (netdev->flags & IFF_ALLMULTI) || + netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS; + unsigned int flags = netdev->flags | + (allmulti ? IFF_ALLMULTI : 0) | + (promisc ? IFF_PROMISC : 0); + + if (enic->flags != flags) { + enic->flags = flags; + enic_dev_packet_filter(enic, directed, + multicast, broadcast, promisc, allmulti); + } + + if (!promisc) { + __dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync); + if (!allmulti) + __dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync); + } +} + +/* netif_tx_lock held, BHs disabled */ +static void enic_tx_timeout(struct net_device *netdev) +{ + struct enic *enic = netdev_priv(netdev); + schedule_work(&enic->reset); +} + +static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct enic *enic = netdev_priv(netdev); + struct enic_port_profile *pp; + int err; + + ENIC_PP_BY_INDEX(enic, vf, pp, &err); + if (err) + return err; + + if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) { + if (vf == PORT_SELF_VF) { + memcpy(pp->vf_mac, mac, ETH_ALEN); + return 0; + } else { + /* + * For sriov vf's set the mac in hw + */ + ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, + vnic_dev_set_mac_addr, mac); + return enic_dev_status_to_errno(err); + } + } else + return -EINVAL; +} + +static int enic_set_vf_port(struct net_device *netdev, int vf, + struct nlattr *port[]) +{ + struct enic *enic = netdev_priv(netdev); + struct enic_port_profile prev_pp; + struct enic_port_profile *pp; + int err = 0, restore_pp = 1; + + ENIC_PP_BY_INDEX(enic, vf, pp, &err); + if (err) + return err; + + if (!port[IFLA_PORT_REQUEST]) + return -EOPNOTSUPP; + + memcpy(&prev_pp, pp, sizeof(*enic->pp)); + memset(pp, 0, sizeof(*enic->pp)); + + pp->set |= ENIC_SET_REQUEST; + pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]); + + if (port[IFLA_PORT_PROFILE]) { + pp->set |= ENIC_SET_NAME; + memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]), + PORT_PROFILE_MAX); + } + + if (port[IFLA_PORT_INSTANCE_UUID]) { + pp->set |= ENIC_SET_INSTANCE; + memcpy(pp->instance_uuid, + nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX); + } + + if (port[IFLA_PORT_HOST_UUID]) { + pp->set |= ENIC_SET_HOST; + memcpy(pp->host_uuid, + nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX); + } + + if (vf == PORT_SELF_VF) { + /* Special case handling: mac came from IFLA_VF_MAC */ + if (!is_zero_ether_addr(prev_pp.vf_mac)) + memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN); + + if (is_zero_ether_addr(netdev->dev_addr)) + eth_hw_addr_random(netdev); + } else { + /* SR-IOV VF: get mac from adapter */ + ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, + vnic_dev_get_mac_addr, pp->mac_addr); + if (err) { + netdev_err(netdev, "Error getting mac for vf %d\n", vf); + memcpy(pp, &prev_pp, sizeof(*pp)); + return enic_dev_status_to_errno(err); + } + } + + err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp); + if (err) { + if (restore_pp) { + /* Things are still the way they were: Implicit + * DISASSOCIATE failed + */ + memcpy(pp, &prev_pp, sizeof(*pp)); + } else { + memset(pp, 0, sizeof(*pp)); + if (vf == PORT_SELF_VF) + eth_zero_addr(netdev->dev_addr); + } + } else { + /* Set flag to indicate that the port assoc/disassoc + * request has been sent out to fw + */ + pp->set |= ENIC_PORT_REQUEST_APPLIED; + + /* If DISASSOCIATE, clean up all assigned/saved macaddresses */ + if (pp->request == PORT_REQUEST_DISASSOCIATE) { + eth_zero_addr(pp->mac_addr); + if (vf == PORT_SELF_VF) + eth_zero_addr(netdev->dev_addr); + } + } + + if (vf == PORT_SELF_VF) + eth_zero_addr(pp->vf_mac); + + return err; +} + +static int enic_get_vf_port(struct net_device *netdev, int vf, + struct sk_buff *skb) +{ + struct enic *enic = netdev_priv(netdev); + u16 response = PORT_PROFILE_RESPONSE_SUCCESS; + struct enic_port_profile *pp; + int err; + + ENIC_PP_BY_INDEX(enic, vf, pp, &err); + if (err) + return err; + + if (!(pp->set & ENIC_PORT_REQUEST_APPLIED)) + return -ENODATA; + + err = enic_process_get_pp_request(enic, vf, pp->request, &response); + if (err) + return err; + + if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) || + nla_put_u16(skb, IFLA_PORT_RESPONSE, response) || + ((pp->set & ENIC_SET_NAME) && + nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) || + ((pp->set & ENIC_SET_INSTANCE) && + nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX, + pp->instance_uuid)) || + ((pp->set & ENIC_SET_HOST) && + nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid))) + goto nla_put_failure; + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + +static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) +{ + struct enic *enic = vnic_dev_priv(rq->vdev); + + if (!buf->os_buf) + return; + + pci_unmap_single(enic->pdev, buf->dma_addr, + buf->len, PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(buf->os_buf); + buf->os_buf = NULL; +} + +static int enic_rq_alloc_buf(struct vnic_rq *rq) +{ + struct enic *enic = vnic_dev_priv(rq->vdev); + struct net_device *netdev = enic->netdev; + struct sk_buff *skb; + unsigned int len = netdev->mtu + VLAN_ETH_HLEN; + unsigned int os_buf_index = 0; + dma_addr_t dma_addr; + struct vnic_rq_buf *buf = rq->to_use; + + if (buf->os_buf) { + enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr, + buf->len); + + return 0; + } + skb = netdev_alloc_skb_ip_align(netdev, len); + if (!skb) + return -ENOMEM; + + dma_addr = pci_map_single(enic->pdev, skb->data, len, + PCI_DMA_FROMDEVICE); + if (unlikely(enic_dma_map_check(enic, dma_addr))) { + dev_kfree_skb(skb); + return -ENOMEM; + } + + enic_queue_rq_desc(rq, skb, os_buf_index, + dma_addr, len); + + return 0; +} + +static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size, + u32 pkt_len) +{ + if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len) + pkt_size->large_pkt_bytes_cnt += pkt_len; + else + pkt_size->small_pkt_bytes_cnt += pkt_len; +} + +static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb, + struct vnic_rq_buf *buf, u16 len) +{ + struct enic *enic = netdev_priv(netdev); + struct sk_buff *new_skb; + + if (len > enic->rx_copybreak) + return false; + new_skb = netdev_alloc_skb_ip_align(netdev, len); + if (!new_skb) + return false; + pci_dma_sync_single_for_cpu(enic->pdev, buf->dma_addr, len, + DMA_FROM_DEVICE); + memcpy(new_skb->data, (*skb)->data, len); + *skb = new_skb; + + return true; +} + +static void enic_rq_indicate_buf(struct vnic_rq *rq, + struct cq_desc *cq_desc, struct vnic_rq_buf *buf, + int skipped, void *opaque) +{ + struct enic *enic = vnic_dev_priv(rq->vdev); + struct net_device *netdev = enic->netdev; + struct sk_buff *skb; + struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; + + u8 type, color, eop, sop, ingress_port, vlan_stripped; + u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; + u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; + u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; + u8 packet_error; + u16 q_number, completed_index, bytes_written, vlan_tci, checksum; + u32 rss_hash; + + if (skipped) + return; + + skb = buf->os_buf; + + cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, + &type, &color, &q_number, &completed_index, + &ingress_port, &fcoe, &eop, &sop, &rss_type, + &csum_not_calc, &rss_hash, &bytes_written, + &packet_error, &vlan_stripped, &vlan_tci, &checksum, + &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, + &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, + &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, + &fcs_ok); + + if (packet_error) { + + if (!fcs_ok) { + if (bytes_written > 0) + enic->rq_bad_fcs++; + else if (bytes_written == 0) + enic->rq_truncated_pkts++; + } + + pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, + PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(skb); + buf->os_buf = NULL; + + return; + } + + if (eop && bytes_written > 0) { + + /* Good receive + */ + + if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) { + buf->os_buf = NULL; + pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, + PCI_DMA_FROMDEVICE); + } + prefetch(skb->data - NET_IP_ALIGN); + + skb_put(skb, bytes_written); + skb->protocol = eth_type_trans(skb, netdev); + skb_record_rx_queue(skb, q_number); + if (netdev->features & NETIF_F_RXHASH) { + skb_set_hash(skb, rss_hash, + (rss_type & + (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX | + NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 | + NIC_CFG_RSS_HASH_TYPE_TCP_IPV4)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); + } + + /* Hardware does not provide whole packet checksum. It only + * provides pseudo checksum. Since hw validates the packet + * checksum but not provide us the checksum value. use + * CHECSUM_UNNECESSARY. + */ + if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok && + ipv4_csum_ok) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (vlan_stripped) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); + + skb_mark_napi_id(skb, &enic->napi[rq->index]); + if (enic_poll_busy_polling(rq) || + !(netdev->features & NETIF_F_GRO)) + netif_receive_skb(skb); + else + napi_gro_receive(&enic->napi[q_number], skb); + if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) + enic_intr_update_pkt_size(&cq->pkt_size_counter, + bytes_written); + } else { + + /* Buffer overflow + */ + + pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, + PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(skb); + buf->os_buf = NULL; + } +} + +static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, + u8 type, u16 q_number, u16 completed_index, void *opaque) +{ + struct enic *enic = vnic_dev_priv(vdev); + + vnic_rq_service(&enic->rq[q_number], cq_desc, + completed_index, VNIC_RQ_RETURN_DESC, + enic_rq_indicate_buf, opaque); + + return 0; +} + +static int enic_poll(struct napi_struct *napi, int budget) +{ + struct net_device *netdev = napi->dev; + struct enic *enic = netdev_priv(netdev); + unsigned int cq_rq = enic_cq_rq(enic, 0); + unsigned int cq_wq = enic_cq_wq(enic, 0); + unsigned int intr = enic_legacy_io_intr(); + unsigned int rq_work_to_do = budget; + unsigned int wq_work_to_do = -1; /* no limit */ + unsigned int work_done, rq_work_done = 0, wq_work_done; + int err; + + wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do, + enic_wq_service, NULL); + + if (!enic_poll_lock_napi(&enic->rq[cq_rq])) { + if (wq_work_done > 0) + vnic_intr_return_credits(&enic->intr[intr], + wq_work_done, + 0 /* dont unmask intr */, + 0 /* dont reset intr timer */); + return rq_work_done; + } + + if (budget > 0) + rq_work_done = vnic_cq_service(&enic->cq[cq_rq], + rq_work_to_do, enic_rq_service, NULL); + + /* Accumulate intr event credits for this polling + * cycle. An intr event is the completion of a + * a WQ or RQ packet. + */ + + work_done = rq_work_done + wq_work_done; + + if (work_done > 0) + vnic_intr_return_credits(&enic->intr[intr], + work_done, + 0 /* don't unmask intr */, + 0 /* don't reset intr timer */); + + err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); + + /* Buffer allocation failed. Stay in polling + * mode so we can try to fill the ring again. + */ + + if (err) + rq_work_done = rq_work_to_do; + + if (rq_work_done < rq_work_to_do) { + + /* Some work done, but not enough to stay in polling, + * exit polling + */ + + napi_complete(napi); + vnic_intr_unmask(&enic->intr[intr]); + } + enic_poll_unlock_napi(&enic->rq[cq_rq]); + + return rq_work_done; +} + +static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq) +{ + unsigned int intr = enic_msix_rq_intr(enic, rq->index); + struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; + u32 timer = cq->tobe_rx_coal_timeval; + + if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) { + vnic_intr_coalescing_timer_set(&enic->intr[intr], timer); + cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval; + } +} + +static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq) +{ + struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; + struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; + struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter; + int index; + u32 timer; + u32 range_start; + u32 traffic; + u64 delta; + ktime_t now = ktime_get(); + + delta = ktime_us_delta(now, cq->prev_ts); + if (delta < ENIC_AIC_TS_BREAK) + return; + cq->prev_ts = now; + + traffic = pkt_size_counter->large_pkt_bytes_cnt + + pkt_size_counter->small_pkt_bytes_cnt; + /* The table takes Mbps + * traffic *= 8 => bits + * traffic *= (10^6 / delta) => bps + * traffic /= 10^6 => Mbps + * + * Combining, traffic *= (8 / delta) + */ + + traffic <<= 3; + traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta; + + for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++) + if (traffic < mod_table[index].rx_rate) + break; + range_start = (pkt_size_counter->small_pkt_bytes_cnt > + pkt_size_counter->large_pkt_bytes_cnt << 1) ? + rx_coal->small_pkt_range_start : + rx_coal->large_pkt_range_start; + timer = range_start + ((rx_coal->range_end - range_start) * + mod_table[index].range_percent / 100); + /* Damping */ + cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1; + + pkt_size_counter->large_pkt_bytes_cnt = 0; + pkt_size_counter->small_pkt_bytes_cnt = 0; +} + +#ifdef CONFIG_RFS_ACCEL +static void enic_free_rx_cpu_rmap(struct enic *enic) +{ + free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap); + enic->netdev->rx_cpu_rmap = NULL; +} + +static void enic_set_rx_cpu_rmap(struct enic *enic) +{ + int i, res; + + if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) { + enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count); + if (unlikely(!enic->netdev->rx_cpu_rmap)) + return; + for (i = 0; i < enic->rq_count; i++) { + res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap, + enic->msix_entry[i].vector); + if (unlikely(res)) { + enic_free_rx_cpu_rmap(enic); + return; + } + } + } +} + +#else + +static void enic_free_rx_cpu_rmap(struct enic *enic) +{ +} + +static void enic_set_rx_cpu_rmap(struct enic *enic) +{ +} + +#endif /* CONFIG_RFS_ACCEL */ + +#ifdef CONFIG_NET_RX_BUSY_POLL +static int enic_busy_poll(struct napi_struct *napi) +{ + struct net_device *netdev = napi->dev; + struct enic *enic = netdev_priv(netdev); + unsigned int rq = (napi - &enic->napi[0]); + unsigned int cq = enic_cq_rq(enic, rq); + unsigned int intr = enic_msix_rq_intr(enic, rq); + unsigned int work_to_do = -1; /* clean all pkts possible */ + unsigned int work_done; + + if (!enic_poll_lock_poll(&enic->rq[rq])) + return LL_FLUSH_BUSY; + work_done = vnic_cq_service(&enic->cq[cq], work_to_do, + enic_rq_service, NULL); + + if (work_done > 0) + vnic_intr_return_credits(&enic->intr[intr], + work_done, 0, 0); + vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); + if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) + enic_calc_int_moderation(enic, &enic->rq[rq]); + enic_poll_unlock_poll(&enic->rq[rq]); + + return work_done; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +static int enic_poll_msix_wq(struct napi_struct *napi, int budget) +{ + struct net_device *netdev = napi->dev; + struct enic *enic = netdev_priv(netdev); + unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count; + struct vnic_wq *wq = &enic->wq[wq_index]; + unsigned int cq; + unsigned int intr; + unsigned int wq_work_to_do = -1; /* clean all desc possible */ + unsigned int wq_work_done; + unsigned int wq_irq; + + wq_irq = wq->index; + cq = enic_cq_wq(enic, wq_irq); + intr = enic_msix_wq_intr(enic, wq_irq); + wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do, + enic_wq_service, NULL); + + vnic_intr_return_credits(&enic->intr[intr], wq_work_done, + 0 /* don't unmask intr */, + 1 /* reset intr timer */); + if (!wq_work_done) { + napi_complete(napi); + vnic_intr_unmask(&enic->intr[intr]); + return 0; + } + + return budget; +} + +static int enic_poll_msix_rq(struct napi_struct *napi, int budget) +{ + struct net_device *netdev = napi->dev; + struct enic *enic = netdev_priv(netdev); + unsigned int rq = (napi - &enic->napi[0]); + unsigned int cq = enic_cq_rq(enic, rq); + unsigned int intr = enic_msix_rq_intr(enic, rq); + unsigned int work_to_do = budget; + unsigned int work_done = 0; + int err; + + if (!enic_poll_lock_napi(&enic->rq[rq])) + return budget; + /* Service RQ + */ + + if (budget > 0) + work_done = vnic_cq_service(&enic->cq[cq], + work_to_do, enic_rq_service, NULL); + + /* Return intr event credits for this polling + * cycle. An intr event is the completion of a + * RQ packet. + */ + + if (work_done > 0) + vnic_intr_return_credits(&enic->intr[intr], + work_done, + 0 /* don't unmask intr */, + 0 /* don't reset intr timer */); + + err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); + + /* Buffer allocation failed. Stay in polling mode + * so we can try to fill the ring again. + */ + + if (err) + work_done = work_to_do; + if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) + /* Call the function which refreshes + * the intr coalescing timer value based on + * the traffic. This is supported only in + * the case of MSI-x mode + */ + enic_calc_int_moderation(enic, &enic->rq[rq]); + + enic_poll_unlock_napi(&enic->rq[rq]); + if (work_done < work_to_do) { + + /* Some work done, but not enough to stay in polling, + * exit polling + */ + + napi_complete(napi); + if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) + enic_set_int_moderation(enic, &enic->rq[rq]); + vnic_intr_unmask(&enic->intr[intr]); + } + + return work_done; +} + +static void enic_notify_timer(unsigned long data) +{ + struct enic *enic = (struct enic *)data; + + enic_notify_check(enic); + + mod_timer(&enic->notify_timer, + round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD)); +} + +static void enic_free_intr(struct enic *enic) +{ + struct net_device *netdev = enic->netdev; + unsigned int i; + + enic_free_rx_cpu_rmap(enic); + switch (vnic_dev_get_intr_mode(enic->vdev)) { + case VNIC_DEV_INTR_MODE_INTX: + free_irq(enic->pdev->irq, netdev); + break; + case VNIC_DEV_INTR_MODE_MSI: + free_irq(enic->pdev->irq, enic); + break; + case VNIC_DEV_INTR_MODE_MSIX: + for (i = 0; i < ARRAY_SIZE(enic->msix); i++) + if (enic->msix[i].requested) + free_irq(enic->msix_entry[i].vector, + enic->msix[i].devid); + break; + default: + break; + } +} + +static int enic_request_intr(struct enic *enic) +{ + struct net_device *netdev = enic->netdev; + unsigned int i, intr; + int err = 0; + + enic_set_rx_cpu_rmap(enic); + switch (vnic_dev_get_intr_mode(enic->vdev)) { + + case VNIC_DEV_INTR_MODE_INTX: + + err = request_irq(enic->pdev->irq, enic_isr_legacy, + IRQF_SHARED, netdev->name, netdev); + break; + + case VNIC_DEV_INTR_MODE_MSI: + + err = request_irq(enic->pdev->irq, enic_isr_msi, + 0, netdev->name, enic); + break; + + case VNIC_DEV_INTR_MODE_MSIX: + + for (i = 0; i < enic->rq_count; i++) { + intr = enic_msix_rq_intr(enic, i); + snprintf(enic->msix[intr].devname, + sizeof(enic->msix[intr].devname), + "%.11s-rx-%d", netdev->name, i); + enic->msix[intr].isr = enic_isr_msix; + enic->msix[intr].devid = &enic->napi[i]; + } + + for (i = 0; i < enic->wq_count; i++) { + int wq = enic_cq_wq(enic, i); + + intr = enic_msix_wq_intr(enic, i); + snprintf(enic->msix[intr].devname, + sizeof(enic->msix[intr].devname), + "%.11s-tx-%d", netdev->name, i); + enic->msix[intr].isr = enic_isr_msix; + enic->msix[intr].devid = &enic->napi[wq]; + } + + intr = enic_msix_err_intr(enic); + snprintf(enic->msix[intr].devname, + sizeof(enic->msix[intr].devname), + "%.11s-err", netdev->name); + enic->msix[intr].isr = enic_isr_msix_err; + enic->msix[intr].devid = enic; + + intr = enic_msix_notify_intr(enic); + snprintf(enic->msix[intr].devname, + sizeof(enic->msix[intr].devname), + "%.11s-notify", netdev->name); + enic->msix[intr].isr = enic_isr_msix_notify; + enic->msix[intr].devid = enic; + + for (i = 0; i < ARRAY_SIZE(enic->msix); i++) + enic->msix[i].requested = 0; + + for (i = 0; i < enic->intr_count; i++) { + err = request_irq(enic->msix_entry[i].vector, + enic->msix[i].isr, 0, + enic->msix[i].devname, + enic->msix[i].devid); + if (err) { + enic_free_intr(enic); + break; + } + enic->msix[i].requested = 1; + } + + break; + + default: + break; + } + + return err; +} + +static void enic_synchronize_irqs(struct enic *enic) +{ + unsigned int i; + + switch (vnic_dev_get_intr_mode(enic->vdev)) { + case VNIC_DEV_INTR_MODE_INTX: + case VNIC_DEV_INTR_MODE_MSI: + synchronize_irq(enic->pdev->irq); + break; + case VNIC_DEV_INTR_MODE_MSIX: + for (i = 0; i < enic->intr_count; i++) + synchronize_irq(enic->msix_entry[i].vector); + break; + default: + break; + } +} + +static void enic_set_rx_coal_setting(struct enic *enic) +{ + unsigned int speed; + int index = -1; + struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; + + /* If intr mode is not MSIX, do not do adaptive coalescing */ + if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) { + netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing"); + return; + } + + /* 1. Read the link speed from fw + * 2. Pick the default range for the speed + * 3. Update it in enic->rx_coalesce_setting + */ + speed = vnic_dev_port_speed(enic->vdev); + if (ENIC_LINK_SPEED_10G < speed) + index = ENIC_LINK_40G_INDEX; + else if (ENIC_LINK_SPEED_4G < speed) + index = ENIC_LINK_10G_INDEX; + else + index = ENIC_LINK_4G_INDEX; + + rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start; + rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start; + rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END; + + /* Start with the value provided by UCSM */ + for (index = 0; index < enic->rq_count; index++) + enic->cq[index].cur_rx_coal_timeval = + enic->config.intr_timer_usec; + + rx_coal->use_adaptive_rx_coalesce = 1; +} + +static int enic_dev_notify_set(struct enic *enic) +{ + int err; + + spin_lock_bh(&enic->devcmd_lock); + switch (vnic_dev_get_intr_mode(enic->vdev)) { + case VNIC_DEV_INTR_MODE_INTX: + err = vnic_dev_notify_set(enic->vdev, + enic_legacy_notify_intr()); + break; + case VNIC_DEV_INTR_MODE_MSIX: + err = vnic_dev_notify_set(enic->vdev, + enic_msix_notify_intr(enic)); + break; + default: + err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); + break; + } + spin_unlock_bh(&enic->devcmd_lock); + + return err; +} + +static void enic_notify_timer_start(struct enic *enic) +{ + switch (vnic_dev_get_intr_mode(enic->vdev)) { + case VNIC_DEV_INTR_MODE_MSI: + mod_timer(&enic->notify_timer, jiffies); + break; + default: + /* Using intr for notification for INTx/MSI-X */ + break; + } +} + +/* rtnl lock is held, process context */ +static int enic_open(struct net_device *netdev) +{ + struct enic *enic = netdev_priv(netdev); + unsigned int i; + int err; + + err = enic_request_intr(enic); + if (err) { + netdev_err(netdev, "Unable to request irq.\n"); + return err; + } + + err = enic_dev_notify_set(enic); + if (err) { + netdev_err(netdev, + "Failed to alloc notify buffer, aborting.\n"); + goto err_out_free_intr; + } + + for (i = 0; i < enic->rq_count; i++) { + vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); + /* Need at least one buffer on ring to get going */ + if (vnic_rq_desc_used(&enic->rq[i]) == 0) { + netdev_err(netdev, "Unable to alloc receive buffers\n"); + err = -ENOMEM; + goto err_out_free_rq; + } + } + + for (i = 0; i < enic->wq_count; i++) + vnic_wq_enable(&enic->wq[i]); + for (i = 0; i < enic->rq_count; i++) + vnic_rq_enable(&enic->rq[i]); + + if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) + enic_dev_add_station_addr(enic); + + enic_set_rx_mode(netdev); + + netif_tx_wake_all_queues(netdev); + + for (i = 0; i < enic->rq_count; i++) { + enic_busy_poll_init_lock(&enic->rq[i]); + napi_enable(&enic->napi[i]); + } + if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) + for (i = 0; i < enic->wq_count; i++) + napi_enable(&enic->napi[enic_cq_wq(enic, i)]); + enic_dev_enable(enic); + + for (i = 0; i < enic->intr_count; i++) + vnic_intr_unmask(&enic->intr[i]); + + enic_notify_timer_start(enic); + enic_rfs_flw_tbl_init(enic); + + return 0; + +err_out_free_rq: + for (i = 0; i < enic->rq_count; i++) + vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); + enic_dev_notify_unset(enic); +err_out_free_intr: + enic_free_intr(enic); + + return err; +} + +/* rtnl lock is held, process context */ +static int enic_stop(struct net_device *netdev) +{ + struct enic *enic = netdev_priv(netdev); + unsigned int i; + int err; + + for (i = 0; i < enic->intr_count; i++) { + vnic_intr_mask(&enic->intr[i]); + (void)vnic_intr_masked(&enic->intr[i]); /* flush write */ + } + + enic_synchronize_irqs(enic); + + del_timer_sync(&enic->notify_timer); + enic_rfs_flw_tbl_free(enic); + + enic_dev_disable(enic); + + for (i = 0; i < enic->rq_count; i++) { + napi_disable(&enic->napi[i]); + local_bh_disable(); + while (!enic_poll_lock_napi(&enic->rq[i])) + mdelay(1); + local_bh_enable(); + } + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) + for (i = 0; i < enic->wq_count; i++) + napi_disable(&enic->napi[enic_cq_wq(enic, i)]); + + if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) + enic_dev_del_station_addr(enic); + + for (i = 0; i < enic->wq_count; i++) { + err = vnic_wq_disable(&enic->wq[i]); + if (err) + return err; + } + for (i = 0; i < enic->rq_count; i++) { + err = vnic_rq_disable(&enic->rq[i]); + if (err) + return err; + } + + enic_dev_notify_unset(enic); + enic_free_intr(enic); + + for (i = 0; i < enic->wq_count; i++) + vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); + for (i = 0; i < enic->rq_count; i++) + vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); + for (i = 0; i < enic->cq_count; i++) + vnic_cq_clean(&enic->cq[i]); + for (i = 0; i < enic->intr_count; i++) + vnic_intr_clean(&enic->intr[i]); + + return 0; +} + +static int enic_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct enic *enic = netdev_priv(netdev); + int running = netif_running(netdev); + + if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU) + return -EINVAL; + + if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) + return -EOPNOTSUPP; + + if (running) + enic_stop(netdev); + + netdev->mtu = new_mtu; + + if (netdev->mtu > enic->port_mtu) + netdev_warn(netdev, + "interface MTU (%d) set higher than port MTU (%d)\n", + netdev->mtu, enic->port_mtu); + + if (running) + enic_open(netdev); + + return 0; +} + +static void enic_change_mtu_work(struct work_struct *work) +{ + struct enic *enic = container_of(work, struct enic, change_mtu_work); + struct net_device *netdev = enic->netdev; + int new_mtu = vnic_dev_mtu(enic->vdev); + int err; + unsigned int i; + + new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu)); + + rtnl_lock(); + + /* Stop RQ */ + del_timer_sync(&enic->notify_timer); + + for (i = 0; i < enic->rq_count; i++) + napi_disable(&enic->napi[i]); + + vnic_intr_mask(&enic->intr[0]); + enic_synchronize_irqs(enic); + err = vnic_rq_disable(&enic->rq[0]); + if (err) { + rtnl_unlock(); + netdev_err(netdev, "Unable to disable RQ.\n"); + return; + } + vnic_rq_clean(&enic->rq[0], enic_free_rq_buf); + vnic_cq_clean(&enic->cq[0]); + vnic_intr_clean(&enic->intr[0]); + + /* Fill RQ with new_mtu-sized buffers */ + netdev->mtu = new_mtu; + vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); + /* Need at least one buffer on ring to get going */ + if (vnic_rq_desc_used(&enic->rq[0]) == 0) { + rtnl_unlock(); + netdev_err(netdev, "Unable to alloc receive buffers.\n"); + return; + } + + /* Start RQ */ + vnic_rq_enable(&enic->rq[0]); + napi_enable(&enic->napi[0]); + vnic_intr_unmask(&enic->intr[0]); + enic_notify_timer_start(enic); + + rtnl_unlock(); + + netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void enic_poll_controller(struct net_device *netdev) +{ + struct enic *enic = netdev_priv(netdev); + struct vnic_dev *vdev = enic->vdev; + unsigned int i, intr; + + switch (vnic_dev_get_intr_mode(vdev)) { + case VNIC_DEV_INTR_MODE_MSIX: + for (i = 0; i < enic->rq_count; i++) { + intr = enic_msix_rq_intr(enic, i); + enic_isr_msix(enic->msix_entry[intr].vector, + &enic->napi[i]); + } + + for (i = 0; i < enic->wq_count; i++) { + intr = enic_msix_wq_intr(enic, i); + enic_isr_msix(enic->msix_entry[intr].vector, + &enic->napi[enic_cq_wq(enic, i)]); + } + + break; + case VNIC_DEV_INTR_MODE_MSI: + enic_isr_msi(enic->pdev->irq, enic); + break; + case VNIC_DEV_INTR_MODE_INTX: + enic_isr_legacy(enic->pdev->irq, netdev); + break; + default: + break; + } +} +#endif + +static int enic_dev_wait(struct vnic_dev *vdev, + int (*start)(struct vnic_dev *, int), + int (*finished)(struct vnic_dev *, int *), + int arg) +{ + unsigned long time; + int done; + int err; + + BUG_ON(in_interrupt()); + + err = start(vdev, arg); + if (err) + return err; + + /* Wait for func to complete...2 seconds max + */ + + time = jiffies + (HZ * 2); + do { + + err = finished(vdev, &done); + if (err) + return err; + + if (done) + return 0; + + schedule_timeout_uninterruptible(HZ / 10); + + } while (time_after(time, jiffies)); + + return -ETIMEDOUT; +} + +static int enic_dev_open(struct enic *enic) +{ + int err; + + err = enic_dev_wait(enic->vdev, vnic_dev_open, + vnic_dev_open_done, 0); + if (err) + dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n", + err); + + return err; +} + +static int enic_dev_hang_reset(struct enic *enic) +{ + int err; + + err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset, + vnic_dev_hang_reset_done, 0); + if (err) + netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n", + err); + + return err; +} + +int __enic_set_rsskey(struct enic *enic) +{ + union vnic_rss_key *rss_key_buf_va; + dma_addr_t rss_key_buf_pa; + int i, kidx, bidx, err; + + rss_key_buf_va = pci_zalloc_consistent(enic->pdev, + sizeof(union vnic_rss_key), + &rss_key_buf_pa); + if (!rss_key_buf_va) + return -ENOMEM; + + for (i = 0; i < ENIC_RSS_LEN; i++) { + kidx = i / ENIC_RSS_BYTES_PER_KEY; + bidx = i % ENIC_RSS_BYTES_PER_KEY; + rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i]; + } + spin_lock_bh(&enic->devcmd_lock); + err = enic_set_rss_key(enic, + rss_key_buf_pa, + sizeof(union vnic_rss_key)); + spin_unlock_bh(&enic->devcmd_lock); + + pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key), + rss_key_buf_va, rss_key_buf_pa); + + return err; +} + +static int enic_set_rsskey(struct enic *enic) +{ + netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN); + + return __enic_set_rsskey(enic); +} + +static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) +{ + dma_addr_t rss_cpu_buf_pa; + union vnic_rss_cpu *rss_cpu_buf_va = NULL; + unsigned int i; + int err; + + rss_cpu_buf_va = pci_alloc_consistent(enic->pdev, + sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa); + if (!rss_cpu_buf_va) + return -ENOMEM; + + for (i = 0; i < (1 << rss_hash_bits); i++) + (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; + + spin_lock_bh(&enic->devcmd_lock); + err = enic_set_rss_cpu(enic, + rss_cpu_buf_pa, + sizeof(union vnic_rss_cpu)); + spin_unlock_bh(&enic->devcmd_lock); + + pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu), + rss_cpu_buf_va, rss_cpu_buf_pa); + + return err; +} + +static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, + u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable) +{ + const u8 tso_ipid_split_en = 0; + const u8 ig_vlan_strip_en = 1; + int err; + + /* Enable VLAN tag stripping. + */ + + spin_lock_bh(&enic->devcmd_lock); + err = enic_set_nic_cfg(enic, + rss_default_cpu, rss_hash_type, + rss_hash_bits, rss_base_cpu, + rss_enable, tso_ipid_split_en, + ig_vlan_strip_en); + spin_unlock_bh(&enic->devcmd_lock); + + return err; +} + +static int enic_set_rss_nic_cfg(struct enic *enic) +{ + struct device *dev = enic_get_dev(enic); + const u8 rss_default_cpu = 0; + const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 | + NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 | + NIC_CFG_RSS_HASH_TYPE_IPV6 | + NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; + const u8 rss_hash_bits = 7; + const u8 rss_base_cpu = 0; + u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); + + if (rss_enable) { + if (!enic_set_rsskey(enic)) { + if (enic_set_rsscpu(enic, rss_hash_bits)) { + rss_enable = 0; + dev_warn(dev, "RSS disabled, " + "Failed to set RSS cpu indirection table."); + } + } else { + rss_enable = 0; + dev_warn(dev, "RSS disabled, Failed to set RSS key.\n"); + } + } + + return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type, + rss_hash_bits, rss_base_cpu, rss_enable); +} + +static void enic_reset(struct work_struct *work) +{ + struct enic *enic = container_of(work, struct enic, reset); + + if (!netif_running(enic->netdev)) + return; + + rtnl_lock(); + + spin_lock(&enic->enic_api_lock); + enic_dev_hang_notify(enic); + enic_stop(enic->netdev); + enic_dev_hang_reset(enic); + enic_reset_addr_lists(enic); + enic_init_vnic_resources(enic); + enic_set_rss_nic_cfg(enic); + enic_dev_set_ig_vlan_rewrite_mode(enic); + enic_open(enic->netdev); + spin_unlock(&enic->enic_api_lock); + call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); + + rtnl_unlock(); +} + +static int enic_set_intr_mode(struct enic *enic) +{ + unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); + unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX); + unsigned int i; + + /* Set interrupt mode (INTx, MSI, MSI-X) depending + * on system capabilities. + * + * Try MSI-X first + * + * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs + * (the second to last INTR is used for WQ/RQ errors) + * (the last INTR is used for notifications) + */ + + BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); + for (i = 0; i < n + m + 2; i++) + enic->msix_entry[i].entry = i; + + /* Use multiple RQs if RSS is enabled + */ + + if (ENIC_SETTING(enic, RSS) && + enic->config.intr_mode < 1 && + enic->rq_count >= n && + enic->wq_count >= m && + enic->cq_count >= n + m && + enic->intr_count >= n + m + 2) { + + if (pci_enable_msix_range(enic->pdev, enic->msix_entry, + n + m + 2, n + m + 2) > 0) { + + enic->rq_count = n; + enic->wq_count = m; + enic->cq_count = n + m; + enic->intr_count = n + m + 2; + + vnic_dev_set_intr_mode(enic->vdev, + VNIC_DEV_INTR_MODE_MSIX); + + return 0; + } + } + + if (enic->config.intr_mode < 1 && + enic->rq_count >= 1 && + enic->wq_count >= m && + enic->cq_count >= 1 + m && + enic->intr_count >= 1 + m + 2) { + if (pci_enable_msix_range(enic->pdev, enic->msix_entry, + 1 + m + 2, 1 + m + 2) > 0) { + + enic->rq_count = 1; + enic->wq_count = m; + enic->cq_count = 1 + m; + enic->intr_count = 1 + m + 2; + + vnic_dev_set_intr_mode(enic->vdev, + VNIC_DEV_INTR_MODE_MSIX); + + return 0; + } + } + + /* Next try MSI + * + * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR + */ + + if (enic->config.intr_mode < 2 && + enic->rq_count >= 1 && + enic->wq_count >= 1 && + enic->cq_count >= 2 && + enic->intr_count >= 1 && + !pci_enable_msi(enic->pdev)) { + + enic->rq_count = 1; + enic->wq_count = 1; + enic->cq_count = 2; + enic->intr_count = 1; + + vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); + + return 0; + } + + /* Next try INTx + * + * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs + * (the first INTR is used for WQ/RQ) + * (the second INTR is used for WQ/RQ errors) + * (the last INTR is used for notifications) + */ + + if (enic->config.intr_mode < 3 && + enic->rq_count >= 1 && + enic->wq_count >= 1 && + enic->cq_count >= 2 && + enic->intr_count >= 3) { + + enic->rq_count = 1; + enic->wq_count = 1; + enic->cq_count = 2; + enic->intr_count = 3; + + vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); + + return 0; + } + + vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); + + return -EINVAL; +} + +static void enic_clear_intr_mode(struct enic *enic) +{ + switch (vnic_dev_get_intr_mode(enic->vdev)) { + case VNIC_DEV_INTR_MODE_MSIX: + pci_disable_msix(enic->pdev); + break; + case VNIC_DEV_INTR_MODE_MSI: + pci_disable_msi(enic->pdev); + break; + default: + break; + } + + vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); +} + +static const struct net_device_ops enic_netdev_dynamic_ops = { + .ndo_open = enic_open, + .ndo_stop = enic_stop, + .ndo_start_xmit = enic_hard_start_xmit, + .ndo_get_stats64 = enic_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = enic_set_rx_mode, + .ndo_set_mac_address = enic_set_mac_address_dynamic, + .ndo_change_mtu = enic_change_mtu, + .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid, + .ndo_tx_timeout = enic_tx_timeout, + .ndo_set_vf_port = enic_set_vf_port, + .ndo_get_vf_port = enic_get_vf_port, + .ndo_set_vf_mac = enic_set_vf_mac, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = enic_poll_controller, +#endif +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = enic_rx_flow_steer, +#endif +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = enic_busy_poll, +#endif +}; + +static const struct net_device_ops enic_netdev_ops = { + .ndo_open = enic_open, + .ndo_stop = enic_stop, + .ndo_start_xmit = enic_hard_start_xmit, + .ndo_get_stats64 = enic_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = enic_set_mac_address, + .ndo_set_rx_mode = enic_set_rx_mode, + .ndo_change_mtu = enic_change_mtu, + .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid, + .ndo_tx_timeout = enic_tx_timeout, + .ndo_set_vf_port = enic_set_vf_port, + .ndo_get_vf_port = enic_get_vf_port, + .ndo_set_vf_mac = enic_set_vf_mac, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = enic_poll_controller, +#endif +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = enic_rx_flow_steer, +#endif +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = enic_busy_poll, +#endif +}; + +static void enic_dev_deinit(struct enic *enic) +{ + unsigned int i; + + for (i = 0; i < enic->rq_count; i++) { + napi_hash_del(&enic->napi[i]); + netif_napi_del(&enic->napi[i]); + } + if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) + for (i = 0; i < enic->wq_count; i++) + netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]); + + enic_free_vnic_resources(enic); + enic_clear_intr_mode(enic); +} + +static void enic_kdump_kernel_config(struct enic *enic) +{ + if (is_kdump_kernel()) { + dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n"); + enic->rq_count = 1; + enic->wq_count = 1; + enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS; + enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS; + enic->config.mtu = min_t(u16, 1500, enic->config.mtu); + } +} + +static int enic_dev_init(struct enic *enic) +{ + struct device *dev = enic_get_dev(enic); + struct net_device *netdev = enic->netdev; + unsigned int i; + int err; + + /* Get interrupt coalesce timer info */ + err = enic_dev_intr_coal_timer_info(enic); + if (err) { + dev_warn(dev, "Using default conversion factor for " + "interrupt coalesce timer\n"); + vnic_dev_intr_coal_timer_info_default(enic->vdev); + } + + /* Get vNIC configuration + */ + + err = enic_get_vnic_config(enic); + if (err) { + dev_err(dev, "Get vNIC configuration failed, aborting\n"); + return err; + } + + /* Get available resource counts + */ + + enic_get_res_counts(enic); + + /* modify resource count if we are in kdump_kernel + */ + enic_kdump_kernel_config(enic); + + /* Set interrupt mode based on resource counts and system + * capabilities + */ + + err = enic_set_intr_mode(enic); + if (err) { + dev_err(dev, "Failed to set intr mode based on resource " + "counts and system capabilities, aborting\n"); + return err; + } + + /* Allocate and configure vNIC resources + */ + + err = enic_alloc_vnic_resources(enic); + if (err) { + dev_err(dev, "Failed to alloc vNIC resources, aborting\n"); + goto err_out_free_vnic_resources; + } + + enic_init_vnic_resources(enic); + + err = enic_set_rss_nic_cfg(enic); + if (err) { + dev_err(dev, "Failed to config nic, aborting\n"); + goto err_out_free_vnic_resources; + } + + switch (vnic_dev_get_intr_mode(enic->vdev)) { + default: + netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); + napi_hash_add(&enic->napi[0]); + break; + case VNIC_DEV_INTR_MODE_MSIX: + for (i = 0; i < enic->rq_count; i++) { + netif_napi_add(netdev, &enic->napi[i], + enic_poll_msix_rq, NAPI_POLL_WEIGHT); + napi_hash_add(&enic->napi[i]); + } + for (i = 0; i < enic->wq_count; i++) + netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)], + enic_poll_msix_wq, NAPI_POLL_WEIGHT); + break; + } + + return 0; + +err_out_free_vnic_resources: + enic_clear_intr_mode(enic); + enic_free_vnic_resources(enic); + + return err; +} + +static void enic_iounmap(struct enic *enic) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(enic->bar); i++) + if (enic->bar[i].vaddr) + iounmap(enic->bar[i].vaddr); +} + +static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct net_device *netdev; + struct enic *enic; + int using_dac = 0; + unsigned int i; + int err; +#ifdef CONFIG_PCI_IOV + int pos = 0; +#endif + int num_pps = 1; + + /* Allocate net device structure and initialize. Private + * instance data is initialized to zero. + */ + + netdev = alloc_etherdev_mqs(sizeof(struct enic), + ENIC_RQ_MAX, ENIC_WQ_MAX); + if (!netdev) + return -ENOMEM; + + pci_set_drvdata(pdev, netdev); + + SET_NETDEV_DEV(netdev, &pdev->dev); + + enic = netdev_priv(netdev); + enic->netdev = netdev; + enic->pdev = pdev; + + /* Setup PCI resources + */ + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(dev, "Cannot enable PCI device, aborting\n"); + goto err_out_free_netdev; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "Cannot request PCI regions, aborting\n"); + goto err_out_disable_device; + } + + pci_set_master(pdev); + + /* Query PCI controller on system for DMA addressing + * limitation for the device. Try 64-bit first, and + * fail to 32-bit. + */ + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(dev, "No usable DMA configuration, aborting\n"); + goto err_out_release_regions; + } + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(dev, "Unable to obtain %u-bit DMA " + "for consistent allocations, aborting\n", 32); + goto err_out_release_regions; + } + } else { + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_err(dev, "Unable to obtain %u-bit DMA " + "for consistent allocations, aborting\n", 64); + goto err_out_release_regions; + } + using_dac = 1; + } + + /* Map vNIC resources from BAR0-5 + */ + + for (i = 0; i < ARRAY_SIZE(enic->bar); i++) { + if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) + continue; + enic->bar[i].len = pci_resource_len(pdev, i); + enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); + if (!enic->bar[i].vaddr) { + dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i); + err = -ENODEV; + goto err_out_iounmap; + } + enic->bar[i].bus_addr = pci_resource_start(pdev, i); + } + + /* Register vNIC device + */ + + enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar, + ARRAY_SIZE(enic->bar)); + if (!enic->vdev) { + dev_err(dev, "vNIC registration failed, aborting\n"); + err = -ENODEV; + goto err_out_iounmap; + } + +#ifdef CONFIG_PCI_IOV + /* Get number of subvnics */ + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (pos) { + pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, + &enic->num_vfs); + if (enic->num_vfs) { + err = pci_enable_sriov(pdev, enic->num_vfs); + if (err) { + dev_err(dev, "SRIOV enable failed, aborting." + " pci_enable_sriov() returned %d\n", + err); + goto err_out_vnic_unregister; + } + enic->priv_flags |= ENIC_SRIOV_ENABLED; + num_pps = enic->num_vfs; + } + } +#endif + + /* Allocate structure for port profiles */ + enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL); + if (!enic->pp) { + err = -ENOMEM; + goto err_out_disable_sriov_pp; + } + + /* Issue device open to get device in known state + */ + + err = enic_dev_open(enic); + if (err) { + dev_err(dev, "vNIC dev open failed, aborting\n"); + goto err_out_disable_sriov; + } + + /* Setup devcmd lock + */ + + spin_lock_init(&enic->devcmd_lock); + spin_lock_init(&enic->enic_api_lock); + + /* + * Set ingress vlan rewrite mode before vnic initialization + */ + + err = enic_dev_set_ig_vlan_rewrite_mode(enic); + if (err) { + dev_err(dev, + "Failed to set ingress vlan rewrite mode, aborting.\n"); + goto err_out_dev_close; + } + + /* Issue device init to initialize the vnic-to-switch link. + * We'll start with carrier off and wait for link UP + * notification later to turn on carrier. We don't need + * to wait here for the vnic-to-switch link initialization + * to complete; link UP notification is the indication that + * the process is complete. + */ + + netif_carrier_off(netdev); + + /* Do not call dev_init for a dynamic vnic. + * For a dynamic vnic, init_prov_info will be + * called later by an upper layer. + */ + + if (!enic_is_dynamic(enic)) { + err = vnic_dev_init(enic->vdev, 0); + if (err) { + dev_err(dev, "vNIC dev init failed, aborting\n"); + goto err_out_dev_close; + } + } + + err = enic_dev_init(enic); + if (err) { + dev_err(dev, "Device initialization failed, aborting\n"); + goto err_out_dev_close; + } + + netif_set_real_num_tx_queues(netdev, enic->wq_count); + netif_set_real_num_rx_queues(netdev, enic->rq_count); + + /* Setup notification timer, HW reset task, and wq locks + */ + + init_timer(&enic->notify_timer); + enic->notify_timer.function = enic_notify_timer; + enic->notify_timer.data = (unsigned long)enic; + + enic_set_rx_coal_setting(enic); + INIT_WORK(&enic->reset, enic_reset); + INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); + + for (i = 0; i < enic->wq_count; i++) + spin_lock_init(&enic->wq_lock[i]); + + /* Register net device + */ + + enic->port_mtu = enic->config.mtu; + (void)enic_change_mtu(netdev, enic->port_mtu); + + err = enic_set_mac_addr(netdev, enic->mac_addr); + if (err) { + dev_err(dev, "Invalid MAC address, aborting\n"); + goto err_out_dev_deinit; + } + + enic->tx_coalesce_usecs = enic->config.intr_timer_usec; + /* rx coalesce time already got initialized. This gets used + * if adaptive coal is turned off + */ + enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; + + if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) + netdev->netdev_ops = &enic_netdev_dynamic_ops; + else + netdev->netdev_ops = &enic_netdev_ops; + + netdev->watchdog_timeo = 2 * HZ; + enic_set_ethtool_ops(netdev); + + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + if (ENIC_SETTING(enic, LOOP)) { + netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX; + enic->loop_enable = 1; + enic->loop_tag = enic->config.loop_tag; + dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); + } + if (ENIC_SETTING(enic, TXCSUM)) + netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM; + if (ENIC_SETTING(enic, TSO)) + netdev->hw_features |= NETIF_F_TSO | + NETIF_F_TSO6 | NETIF_F_TSO_ECN; + if (ENIC_SETTING(enic, RSS)) + netdev->hw_features |= NETIF_F_RXHASH; + if (ENIC_SETTING(enic, RXCSUM)) + netdev->hw_features |= NETIF_F_RXCSUM; + + netdev->features |= netdev->hw_features; + +#ifdef CONFIG_RFS_ACCEL + netdev->hw_features |= NETIF_F_NTUPLE; +#endif + + if (using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + err = register_netdev(netdev); + if (err) { + dev_err(dev, "Cannot register net device, aborting\n"); + goto err_out_dev_deinit; + } + enic->rx_copybreak = RX_COPYBREAK_DEFAULT; + + return 0; + +err_out_dev_deinit: + enic_dev_deinit(enic); +err_out_dev_close: + vnic_dev_close(enic->vdev); +err_out_disable_sriov: + kfree(enic->pp); +err_out_disable_sriov_pp: +#ifdef CONFIG_PCI_IOV + if (enic_sriov_enabled(enic)) { + pci_disable_sriov(pdev); + enic->priv_flags &= ~ENIC_SRIOV_ENABLED; + } +err_out_vnic_unregister: +#endif + vnic_dev_unregister(enic->vdev); +err_out_iounmap: + enic_iounmap(enic); +err_out_release_regions: + pci_release_regions(pdev); +err_out_disable_device: + pci_disable_device(pdev); +err_out_free_netdev: + free_netdev(netdev); + + return err; +} + +static void enic_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netdev) { + struct enic *enic = netdev_priv(netdev); + + cancel_work_sync(&enic->reset); + cancel_work_sync(&enic->change_mtu_work); + unregister_netdev(netdev); + enic_dev_deinit(enic); + vnic_dev_close(enic->vdev); +#ifdef CONFIG_PCI_IOV + if (enic_sriov_enabled(enic)) { + pci_disable_sriov(pdev); + enic->priv_flags &= ~ENIC_SRIOV_ENABLED; + } +#endif + kfree(enic->pp); + vnic_dev_unregister(enic->vdev); + enic_iounmap(enic); + pci_release_regions(pdev); + pci_disable_device(pdev); + free_netdev(netdev); + } +} + +static struct pci_driver enic_driver = { + .name = DRV_NAME, + .id_table = enic_id_table, + .probe = enic_probe, + .remove = enic_remove, +}; + +static int __init enic_init_module(void) +{ + pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); + + return pci_register_driver(&enic_driver); +} + +static void __exit enic_cleanup_module(void) +{ + pci_unregister_driver(&enic_driver); +} + +module_init(enic_init_module); +module_exit(enic_cleanup_module); diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c new file mode 100644 index 000000000..e6a83198c --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_pp.c @@ -0,0 +1,368 @@ +/* + * Copyright 2011 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vnic_vic.h" +#include "enic_res.h" +#include "enic.h" +#include "enic_dev.h" +#include "enic_pp.h" + +/* + * Checks validity of vf index that came in + * port profile request + */ +int enic_is_valid_pp_vf(struct enic *enic, int vf, int *err) +{ + if (vf != PORT_SELF_VF) { +#ifdef CONFIG_PCI_IOV + if (enic_sriov_enabled(enic)) { + if (vf < 0 || vf >= enic->num_vfs) { + *err = -EINVAL; + goto err_out; + } + } else { + *err = -EOPNOTSUPP; + goto err_out; + } +#else + *err = -EOPNOTSUPP; + goto err_out; +#endif + } + + if (vf == PORT_SELF_VF && !enic_is_dynamic(enic)) { + *err = -EOPNOTSUPP; + goto err_out; + } + + *err = 0; + return 1; + +err_out: + return 0; +} + +static int enic_set_port_profile(struct enic *enic, int vf) +{ + struct net_device *netdev = enic->netdev; + struct enic_port_profile *pp; + struct vic_provinfo *vp; + const u8 oui[3] = VIC_PROVINFO_CISCO_OUI; + const __be16 os_type = htons(VIC_GENERIC_PROV_OS_TYPE_LINUX); + char uuid_str[38]; + char client_mac_str[18]; + u8 *client_mac; + int err; + + ENIC_PP_BY_INDEX(enic, vf, pp, &err); + if (err) + return err; + + if (!(pp->set & ENIC_SET_NAME) || !strlen(pp->name)) + return -EINVAL; + + vp = vic_provinfo_alloc(GFP_KERNEL, oui, + VIC_PROVINFO_GENERIC_TYPE); + if (!vp) + return -ENOMEM; + + VIC_PROVINFO_ADD_TLV(vp, + VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR, + strlen(pp->name) + 1, pp->name); + + if (!is_zero_ether_addr(pp->mac_addr)) { + client_mac = pp->mac_addr; + } else if (vf == PORT_SELF_VF) { + client_mac = netdev->dev_addr; + } else { + netdev_err(netdev, "Cannot find pp mac address " + "for VF %d\n", vf); + err = -EINVAL; + goto add_tlv_failure; + } + + VIC_PROVINFO_ADD_TLV(vp, + VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR, + ETH_ALEN, client_mac); + + snprintf(client_mac_str, sizeof(client_mac_str), "%pM", client_mac); + VIC_PROVINFO_ADD_TLV(vp, + VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR, + sizeof(client_mac_str), client_mac_str); + + if (pp->set & ENIC_SET_INSTANCE) { + sprintf(uuid_str, "%pUB", pp->instance_uuid); + VIC_PROVINFO_ADD_TLV(vp, + VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR, + sizeof(uuid_str), uuid_str); + } + + if (pp->set & ENIC_SET_HOST) { + sprintf(uuid_str, "%pUB", pp->host_uuid); + VIC_PROVINFO_ADD_TLV(vp, + VIC_GENERIC_PROV_TLV_HOST_UUID_STR, + sizeof(uuid_str), uuid_str); + } + + VIC_PROVINFO_ADD_TLV(vp, + VIC_GENERIC_PROV_TLV_OS_TYPE, + sizeof(os_type), &os_type); + + ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_init_prov2, (u8 *)vp, + vic_provinfo_size(vp)); + err = enic_dev_status_to_errno(err); + +add_tlv_failure: + vic_provinfo_free(vp); + + return err; +} + +static int enic_unset_port_profile(struct enic *enic, int vf) +{ + int err; + + ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_deinit); + if (err) + return enic_dev_status_to_errno(err); + + if (vf == PORT_SELF_VF) + enic_reset_addr_lists(enic); + + return 0; +} + +static int enic_are_pp_different(struct enic_port_profile *pp1, + struct enic_port_profile *pp2) +{ + return strcmp(pp1->name, pp2->name) | !!memcmp(pp1->instance_uuid, + pp2->instance_uuid, PORT_UUID_MAX) | + !!memcmp(pp1->host_uuid, pp2->host_uuid, PORT_UUID_MAX) | + !ether_addr_equal(pp1->mac_addr, pp2->mac_addr); +} + +static int enic_pp_preassociate(struct enic *enic, int vf, + struct enic_port_profile *prev_pp, int *restore_pp); +static int enic_pp_disassociate(struct enic *enic, int vf, + struct enic_port_profile *prev_pp, int *restore_pp); +static int enic_pp_preassociate_rr(struct enic *enic, int vf, + struct enic_port_profile *prev_pp, int *restore_pp); +static int enic_pp_associate(struct enic *enic, int vf, + struct enic_port_profile *prev_pp, int *restore_pp); + +static int (*enic_pp_handlers[])(struct enic *enic, int vf, + struct enic_port_profile *prev_state, + int *restore_pp) = { + [PORT_REQUEST_PREASSOCIATE] = enic_pp_preassociate, + [PORT_REQUEST_PREASSOCIATE_RR] = enic_pp_preassociate_rr, + [PORT_REQUEST_ASSOCIATE] = enic_pp_associate, + [PORT_REQUEST_DISASSOCIATE] = enic_pp_disassociate, +}; + +static const int enic_pp_handlers_count = + ARRAY_SIZE(enic_pp_handlers); + +static int enic_pp_preassociate(struct enic *enic, int vf, + struct enic_port_profile *prev_pp, int *restore_pp) +{ + return -EOPNOTSUPP; +} + +static int enic_pp_disassociate(struct enic *enic, int vf, + struct enic_port_profile *prev_pp, int *restore_pp) +{ + struct net_device *netdev = enic->netdev; + struct enic_port_profile *pp; + int err; + + ENIC_PP_BY_INDEX(enic, vf, pp, &err); + if (err) + return err; + + /* Deregister mac addresses */ + if (!is_zero_ether_addr(pp->mac_addr)) + ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_del_addr, + pp->mac_addr); + else if (vf == PORT_SELF_VF && !is_zero_ether_addr(netdev->dev_addr)) + ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_del_addr, + netdev->dev_addr); + + return enic_unset_port_profile(enic, vf); +} + +static int enic_pp_preassociate_rr(struct enic *enic, int vf, + struct enic_port_profile *prev_pp, int *restore_pp) +{ + struct enic_port_profile *pp; + int err; + int active = 0; + + ENIC_PP_BY_INDEX(enic, vf, pp, &err); + if (err) + return err; + + if (pp->request != PORT_REQUEST_ASSOCIATE) { + /* If pre-associate is not part of an associate. + We always disassociate first */ + err = enic_pp_handlers[PORT_REQUEST_DISASSOCIATE](enic, vf, + prev_pp, restore_pp); + if (err) + return err; + + *restore_pp = 0; + } + + *restore_pp = 0; + + err = enic_set_port_profile(enic, vf); + if (err) + return err; + + /* If pre-associate is not part of an associate. */ + if (pp->request != PORT_REQUEST_ASSOCIATE) { + /* Enable device as standby */ + ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_enable2, + active); + err = enic_dev_status_to_errno(err); + } + + return err; +} + +static int enic_pp_associate(struct enic *enic, int vf, + struct enic_port_profile *prev_pp, int *restore_pp) +{ + struct net_device *netdev = enic->netdev; + struct enic_port_profile *pp; + int err; + int active = 1; + + ENIC_PP_BY_INDEX(enic, vf, pp, &err); + if (err) + return err; + + /* Check if a pre-associate was called before */ + if (prev_pp->request != PORT_REQUEST_PREASSOCIATE_RR || + (prev_pp->request == PORT_REQUEST_PREASSOCIATE_RR && + enic_are_pp_different(prev_pp, pp))) { + err = enic_pp_handlers[PORT_REQUEST_DISASSOCIATE]( + enic, vf, prev_pp, restore_pp); + if (err) + return err; + + *restore_pp = 0; + } + + err = enic_pp_handlers[PORT_REQUEST_PREASSOCIATE_RR]( + enic, vf, prev_pp, restore_pp); + if (err) + return err; + + *restore_pp = 0; + + /* Enable device as active */ + ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_enable2, active); + err = enic_dev_status_to_errno(err); + if (err) + return err; + + /* Register mac address */ + if (!is_zero_ether_addr(pp->mac_addr)) + ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_add_addr, + pp->mac_addr); + else if (vf == PORT_SELF_VF && !is_zero_ether_addr(netdev->dev_addr)) + ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_add_addr, + netdev->dev_addr); + + return 0; +} + +int enic_process_set_pp_request(struct enic *enic, int vf, + struct enic_port_profile *prev_pp, int *restore_pp) +{ + struct enic_port_profile *pp; + int err; + + ENIC_PP_BY_INDEX(enic, vf, pp, &err); + if (err) + return err; + + if (pp->request >= enic_pp_handlers_count + || !enic_pp_handlers[pp->request]) + return -EOPNOTSUPP; + + return enic_pp_handlers[pp->request](enic, vf, prev_pp, restore_pp); +} + +int enic_process_get_pp_request(struct enic *enic, int vf, + int request, u16 *response) +{ + int err, status = ERR_SUCCESS; + + switch (request) { + + case PORT_REQUEST_PREASSOCIATE_RR: + case PORT_REQUEST_ASSOCIATE: + ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, + vnic_dev_enable2_done, &status); + break; + + case PORT_REQUEST_DISASSOCIATE: + ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, + vnic_dev_deinit_done, &status); + break; + + default: + return -EINVAL; + } + + if (err) + status = err; + + switch (status) { + case ERR_SUCCESS: + *response = PORT_PROFILE_RESPONSE_SUCCESS; + break; + case ERR_EINVAL: + *response = PORT_PROFILE_RESPONSE_INVALID; + break; + case ERR_EBADSTATE: + *response = PORT_PROFILE_RESPONSE_BADSTATE; + break; + case ERR_ENOMEM: + *response = PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES; + break; + case ERR_EINPROGRESS: + *response = PORT_PROFILE_RESPONSE_INPROGRESS; + break; + default: + *response = PORT_PROFILE_RESPONSE_ERROR; + break; + } + + return 0; +} diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.h b/drivers/net/ethernet/cisco/enic/enic_pp.h new file mode 100644 index 000000000..a09ff392c --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_pp.h @@ -0,0 +1,36 @@ +/* + * Copyright 2011 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _ENIC_PP_H_ +#define _ENIC_PP_H_ + +#define ENIC_PP_BY_INDEX(enic, vf, pp, err) \ + do { \ + if (enic_is_valid_pp_vf(enic, vf, err)) \ + pp = (vf == PORT_SELF_VF) ? enic->pp : enic->pp + vf; \ + else \ + pp = NULL; \ + } while (0) + +int enic_process_set_pp_request(struct enic *enic, int vf, + struct enic_port_profile *prev_pp, int *restore_pp); +int enic_process_get_pp_request(struct enic *enic, int vf, + int request, u16 *response); +int enic_is_valid_pp_vf(struct enic *enic, int vf, int *err); + +#endif /* _ENIC_PP_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c new file mode 100644 index 000000000..9c96911fb --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_res.c @@ -0,0 +1,385 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include +#include +#include + +#include "wq_enet_desc.h" +#include "rq_enet_desc.h" +#include "cq_enet_desc.h" +#include "vnic_resource.h" +#include "vnic_enet.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_rq.h" +#include "vnic_cq.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "vnic_nic.h" +#include "vnic_rss.h" +#include "enic_res.h" +#include "enic.h" + +int enic_get_vnic_config(struct enic *enic) +{ + struct vnic_enet_config *c = &enic->config; + int err; + + err = vnic_dev_get_mac_addr(enic->vdev, enic->mac_addr); + if (err) { + dev_err(enic_get_dev(enic), + "Error getting MAC addr, %d\n", err); + return err; + } + +#define GET_CONFIG(m) \ + do { \ + err = vnic_dev_spec(enic->vdev, \ + offsetof(struct vnic_enet_config, m), \ + sizeof(c->m), &c->m); \ + if (err) { \ + dev_err(enic_get_dev(enic), \ + "Error getting %s, %d\n", #m, err); \ + return err; \ + } \ + } while (0) + + GET_CONFIG(flags); + GET_CONFIG(wq_desc_count); + GET_CONFIG(rq_desc_count); + GET_CONFIG(mtu); + GET_CONFIG(intr_timer_type); + GET_CONFIG(intr_mode); + GET_CONFIG(intr_timer_usec); + GET_CONFIG(loop_tag); + GET_CONFIG(num_arfs); + + c->wq_desc_count = + min_t(u32, ENIC_MAX_WQ_DESCS, + max_t(u32, ENIC_MIN_WQ_DESCS, + c->wq_desc_count)); + c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ + + c->rq_desc_count = + min_t(u32, ENIC_MAX_RQ_DESCS, + max_t(u32, ENIC_MIN_RQ_DESCS, + c->rq_desc_count)); + c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ + + if (c->mtu == 0) + c->mtu = 1500; + c->mtu = min_t(u16, ENIC_MAX_MTU, + max_t(u16, ENIC_MIN_MTU, + c->mtu)); + + c->intr_timer_usec = min_t(u32, c->intr_timer_usec, + vnic_dev_get_intr_coal_timer_max(enic->vdev)); + + dev_info(enic_get_dev(enic), + "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n", + enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu); + + dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s " + "tso/lro %s/%s rss %s intr mode %s type %s timer %d usec " + "loopback tag 0x%04x\n", + ENIC_SETTING(enic, TXCSUM) ? "yes" : "no", + ENIC_SETTING(enic, RXCSUM) ? "yes" : "no", + ENIC_SETTING(enic, TSO) ? "yes" : "no", + ENIC_SETTING(enic, LRO) ? "yes" : "no", + ENIC_SETTING(enic, RSS) ? "yes" : "no", + c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" : + c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" : + c->intr_mode == VENET_INTR_MODE_ANY ? "any" : + "unknown", + c->intr_timer_type == VENET_INTR_TYPE_MIN ? "min" : + c->intr_timer_type == VENET_INTR_TYPE_IDLE ? "idle" : + "unknown", + c->intr_timer_usec, + c->loop_tag); + + return 0; +} + +int enic_add_vlan(struct enic *enic, u16 vlanid) +{ + u64 a0 = vlanid, a1 = 0; + int wait = 1000; + int err; + + err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait); + if (err) + dev_err(enic_get_dev(enic), "Can't add vlan id, %d\n", err); + + return err; +} + +int enic_del_vlan(struct enic *enic, u16 vlanid) +{ + u64 a0 = vlanid, a1 = 0; + int wait = 1000; + int err; + + err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait); + if (err) + dev_err(enic_get_dev(enic), "Can't delete vlan id, %d\n", err); + + return err; +} + +int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, + u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en, + u8 ig_vlan_strip_en) +{ + u64 a0, a1; + u32 nic_cfg; + int wait = 1000; + + vnic_set_nic_cfg(&nic_cfg, rss_default_cpu, + rss_hash_type, rss_hash_bits, rss_base_cpu, + rss_enable, tso_ipid_split_en, ig_vlan_strip_en); + + a0 = nic_cfg; + a1 = 0; + + return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait); +} + +int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len) +{ + u64 a0 = (u64)key_pa, a1 = len; + int wait = 1000; + + return vnic_dev_cmd(enic->vdev, CMD_RSS_KEY, &a0, &a1, wait); +} + +int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len) +{ + u64 a0 = (u64)cpu_pa, a1 = len; + int wait = 1000; + + return vnic_dev_cmd(enic->vdev, CMD_RSS_CPU, &a0, &a1, wait); +} + +void enic_free_vnic_resources(struct enic *enic) +{ + unsigned int i; + + for (i = 0; i < enic->wq_count; i++) + vnic_wq_free(&enic->wq[i]); + for (i = 0; i < enic->rq_count; i++) + vnic_rq_free(&enic->rq[i]); + for (i = 0; i < enic->cq_count; i++) + vnic_cq_free(&enic->cq[i]); + for (i = 0; i < enic->intr_count; i++) + vnic_intr_free(&enic->intr[i]); +} + +void enic_get_res_counts(struct enic *enic) +{ + enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ); + enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ); + enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ); + enic->intr_count = vnic_dev_get_res_count(enic->vdev, + RES_TYPE_INTR_CTRL); + + dev_info(enic_get_dev(enic), + "vNIC resources avail: wq %d rq %d cq %d intr %d\n", + enic->wq_count, enic->rq_count, + enic->cq_count, enic->intr_count); +} + +void enic_init_vnic_resources(struct enic *enic) +{ + enum vnic_dev_intr_mode intr_mode; + unsigned int mask_on_assertion; + unsigned int interrupt_offset; + unsigned int error_interrupt_enable; + unsigned int error_interrupt_offset; + unsigned int cq_index; + unsigned int i; + + intr_mode = vnic_dev_get_intr_mode(enic->vdev); + + /* Init RQ/WQ resources. + * + * RQ[0 - n-1] point to CQ[0 - n-1] + * WQ[0 - m-1] point to CQ[n - n+m-1] + * + * Error interrupt is not enabled for MSI. + */ + + switch (intr_mode) { + case VNIC_DEV_INTR_MODE_INTX: + case VNIC_DEV_INTR_MODE_MSIX: + error_interrupt_enable = 1; + error_interrupt_offset = enic->intr_count - 2; + break; + default: + error_interrupt_enable = 0; + error_interrupt_offset = 0; + break; + } + + for (i = 0; i < enic->rq_count; i++) { + cq_index = i; + vnic_rq_init(&enic->rq[i], + cq_index, + error_interrupt_enable, + error_interrupt_offset); + } + + for (i = 0; i < enic->wq_count; i++) { + cq_index = enic->rq_count + i; + vnic_wq_init(&enic->wq[i], + cq_index, + error_interrupt_enable, + error_interrupt_offset); + } + + /* Init CQ resources + * + * CQ[0 - n+m-1] point to INTR[0] for INTx, MSI + * CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X + */ + + for (i = 0; i < enic->cq_count; i++) { + + switch (intr_mode) { + case VNIC_DEV_INTR_MODE_MSIX: + interrupt_offset = i; + break; + default: + interrupt_offset = 0; + break; + } + + vnic_cq_init(&enic->cq[i], + 0 /* flow_control_enable */, + 1 /* color_enable */, + 0 /* cq_head */, + 0 /* cq_tail */, + 1 /* cq_tail_color */, + 1 /* interrupt_enable */, + 1 /* cq_entry_enable */, + 0 /* cq_message_enable */, + interrupt_offset, + 0 /* cq_message_addr */); + } + + /* Init INTR resources + * + * mask_on_assertion is not used for INTx due to the level- + * triggered nature of INTx + */ + + switch (intr_mode) { + case VNIC_DEV_INTR_MODE_MSI: + case VNIC_DEV_INTR_MODE_MSIX: + mask_on_assertion = 1; + break; + default: + mask_on_assertion = 0; + break; + } + + for (i = 0; i < enic->intr_count; i++) { + vnic_intr_init(&enic->intr[i], + enic->config.intr_timer_usec, + enic->config.intr_timer_type, + mask_on_assertion); + } +} + +int enic_alloc_vnic_resources(struct enic *enic) +{ + enum vnic_dev_intr_mode intr_mode; + unsigned int i; + int err; + + intr_mode = vnic_dev_get_intr_mode(enic->vdev); + + dev_info(enic_get_dev(enic), "vNIC resources used: " + "wq %d rq %d cq %d intr %d intr mode %s\n", + enic->wq_count, enic->rq_count, + enic->cq_count, enic->intr_count, + intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" : + intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" : + intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" : + "unknown"); + + /* Allocate queue resources + */ + + for (i = 0; i < enic->wq_count; i++) { + err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i, + enic->config.wq_desc_count, + sizeof(struct wq_enet_desc)); + if (err) + goto err_out_cleanup; + } + + for (i = 0; i < enic->rq_count; i++) { + err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i, + enic->config.rq_desc_count, + sizeof(struct rq_enet_desc)); + if (err) + goto err_out_cleanup; + } + + for (i = 0; i < enic->cq_count; i++) { + if (i < enic->rq_count) + err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i, + enic->config.rq_desc_count, + sizeof(struct cq_enet_rq_desc)); + else + err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i, + enic->config.wq_desc_count, + sizeof(struct cq_enet_wq_desc)); + if (err) + goto err_out_cleanup; + } + + for (i = 0; i < enic->intr_count; i++) { + err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i); + if (err) + goto err_out_cleanup; + } + + /* Hook remaining resource + */ + + enic->legacy_pba = vnic_dev_get_res(enic->vdev, + RES_TYPE_INTR_PBA_LEGACY, 0); + if (!enic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) { + dev_err(enic_get_dev(enic), + "Failed to hook legacy pba resource\n"); + err = -ENODEV; + goto err_out_cleanup; + } + + return 0; + +err_out_cleanup: + enic_free_vnic_resources(enic); + + return err; +} diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h new file mode 100644 index 000000000..69f60afd6 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_res.h @@ -0,0 +1,153 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _ENIC_RES_H_ +#define _ENIC_RES_H_ + +#include "wq_enet_desc.h" +#include "rq_enet_desc.h" +#include "vnic_wq.h" +#include "vnic_rq.h" + +#define ENIC_MIN_WQ_DESCS 64 +#define ENIC_MAX_WQ_DESCS 4096 +#define ENIC_MIN_RQ_DESCS 64 +#define ENIC_MAX_RQ_DESCS 4096 + +#define ENIC_MIN_MTU 68 +#define ENIC_MAX_MTU 9000 + +#define ENIC_MULTICAST_PERFECT_FILTERS 32 +#define ENIC_UNICAST_PERFECT_FILTERS 32 + +#define ENIC_NON_TSO_MAX_DESC 16 + +#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) + +static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, unsigned int len, + unsigned int mss_or_csum_offset, unsigned int hdr_len, + int vlan_tag_insert, unsigned int vlan_tag, + int offload_mode, int cq_entry, int sop, int eop, int loopback) +{ + struct wq_enet_desc *desc = vnic_wq_next_desc(wq); + u8 desc_skip_cnt = 1; + u8 compressed_send = 0; + u64 wrid = 0; + + wq_enet_desc_enc(desc, + (u64)dma_addr | VNIC_PADDR_TARGET, + (u16)len, + (u16)mss_or_csum_offset, + (u16)hdr_len, (u8)offload_mode, + (u8)eop, (u8)cq_entry, + 0, /* fcoe_encap */ + (u8)vlan_tag_insert, + (u16)vlan_tag, + (u8)loopback); + + vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt, + (u8)cq_entry, compressed_send, wrid); +} + +static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, unsigned int len, + int eop, int loopback) +{ + enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, + 0, 0, 0, 0, 0, + eop, 0 /* !SOP */, eop, loopback); +} + +static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf, + dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert, + unsigned int vlan_tag, int eop, int loopback) +{ + enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, + 0, 0, vlan_tag_insert, vlan_tag, + WQ_ENET_OFFLOAD_MODE_CSUM, + eop, 1 /* SOP */, eop, loopback); +} + +static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, unsigned int len, + int ip_csum, int tcpudp_csum, int vlan_tag_insert, + unsigned int vlan_tag, int eop, int loopback) +{ + enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, + (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0), + 0, vlan_tag_insert, vlan_tag, + WQ_ENET_OFFLOAD_MODE_CSUM, + eop, 1 /* SOP */, eop, loopback); +} + +static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, unsigned int len, + unsigned int csum_offset, unsigned int hdr_len, + int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback) +{ + enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, + csum_offset, hdr_len, vlan_tag_insert, vlan_tag, + WQ_ENET_OFFLOAD_MODE_CSUM_L4, + eop, 1 /* SOP */, eop, loopback); +} + +static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, unsigned int len, + unsigned int mss, unsigned int hdr_len, int vlan_tag_insert, + unsigned int vlan_tag, int eop, int loopback) +{ + enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, + mss, hdr_len, vlan_tag_insert, vlan_tag, + WQ_ENET_OFFLOAD_MODE_TSO, + eop, 1 /* SOP */, eop, loopback); +} + +static inline void enic_queue_rq_desc(struct vnic_rq *rq, + void *os_buf, unsigned int os_buf_index, + dma_addr_t dma_addr, unsigned int len) +{ + struct rq_enet_desc *desc = vnic_rq_next_desc(rq); + u64 wrid = 0; + u8 type = os_buf_index ? + RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP; + + rq_enet_desc_enc(desc, + (u64)dma_addr | VNIC_PADDR_TARGET, + type, (u16)len); + + vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid); +} + +struct enic; + +int enic_get_vnic_config(struct enic *); +int enic_add_vlan(struct enic *enic, u16 vlanid); +int enic_del_vlan(struct enic *enic, u16 vlanid); +int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, + u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en, + u8 ig_vlan_strip_en); +int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len); +int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len); +void enic_get_res_counts(struct enic *enic); +void enic_init_vnic_resources(struct enic *enic); +int enic_alloc_vnic_resources(struct enic *); +void enic_free_vnic_resources(struct enic *); + +#endif /* _ENIC_RES_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/rq_enet_desc.h b/drivers/net/ethernet/cisco/enic/rq_enet_desc.h new file mode 100644 index 000000000..e6dd30988 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/rq_enet_desc.h @@ -0,0 +1,60 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _RQ_ENET_DESC_H_ +#define _RQ_ENET_DESC_H_ + +/* Ethernet receive queue descriptor: 16B */ +struct rq_enet_desc { + __le64 address; + __le16 length_type; + u8 reserved[6]; +}; + +enum rq_enet_type_types { + RQ_ENET_TYPE_ONLY_SOP = 0, + RQ_ENET_TYPE_NOT_SOP = 1, + RQ_ENET_TYPE_RESV2 = 2, + RQ_ENET_TYPE_RESV3 = 3, +}; + +#define RQ_ENET_ADDR_BITS 64 +#define RQ_ENET_LEN_BITS 14 +#define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1) +#define RQ_ENET_TYPE_BITS 2 +#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1) + +static inline void rq_enet_desc_enc(struct rq_enet_desc *desc, + u64 address, u8 type, u16 length) +{ + desc->address = cpu_to_le64(address); + desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) | + ((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS)); +} + +static inline void rq_enet_desc_dec(struct rq_enet_desc *desc, + u64 *address, u8 *type, u16 *length) +{ + *address = le64_to_cpu(desc->address); + *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK; + *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) & + RQ_ENET_TYPE_MASK); +} + +#endif /* _RQ_ENET_DESC_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.c b/drivers/net/ethernet/cisco/enic/vnic_cq.c new file mode 100644 index 000000000..0daa1c707 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/vnic_cq.c @@ -0,0 +1,91 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include +#include + +#include "vnic_dev.h" +#include "vnic_cq.h" + +void vnic_cq_free(struct vnic_cq *cq) +{ + vnic_dev_free_desc_ring(cq->vdev, &cq->ring); + + cq->ctrl = NULL; +} + +int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + + cq->index = index; + cq->vdev = vdev; + + cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); + if (!cq->ctrl) { + pr_err("Failed to hook CQ[%d] resource\n", index); + return -EINVAL; + } + + err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); + if (err) + return err; + + return 0; +} + +void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, + unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, + unsigned int cq_tail_color, unsigned int interrupt_enable, + unsigned int cq_entry_enable, unsigned int cq_message_enable, + unsigned int interrupt_offset, u64 cq_message_addr) +{ + u64 paddr; + + paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &cq->ctrl->ring_base); + iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); + iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable); + iowrite32(color_enable, &cq->ctrl->color_enable); + iowrite32(cq_head, &cq->ctrl->cq_head); + iowrite32(cq_tail, &cq->ctrl->cq_tail); + iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color); + iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable); + iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable); + iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable); + iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset); + writeq(cq_message_addr, &cq->ctrl->cq_message_addr); + + cq->interrupt_offset = interrupt_offset; +} + +void vnic_cq_clean(struct vnic_cq *cq) +{ + cq->to_clean = 0; + cq->last_color = 0; + + iowrite32(0, &cq->ctrl->cq_head); + iowrite32(0, &cq->ctrl->cq_tail); + iowrite32(1, &cq->ctrl->cq_tail_color); + + vnic_dev_clear_desc_ring(&cq->ring); +} diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h new file mode 100644 index 000000000..4e6aa6585 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h @@ -0,0 +1,123 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _VNIC_CQ_H_ +#define _VNIC_CQ_H_ + +#include "cq_desc.h" +#include "vnic_dev.h" + +/* Completion queue control */ +struct vnic_cq_ctrl { + u64 ring_base; /* 0x00 */ + u32 ring_size; /* 0x08 */ + u32 pad0; + u32 flow_control_enable; /* 0x10 */ + u32 pad1; + u32 color_enable; /* 0x18 */ + u32 pad2; + u32 cq_head; /* 0x20 */ + u32 pad3; + u32 cq_tail; /* 0x28 */ + u32 pad4; + u32 cq_tail_color; /* 0x30 */ + u32 pad5; + u32 interrupt_enable; /* 0x38 */ + u32 pad6; + u32 cq_entry_enable; /* 0x40 */ + u32 pad7; + u32 cq_message_enable; /* 0x48 */ + u32 pad8; + u32 interrupt_offset; /* 0x50 */ + u32 pad9; + u64 cq_message_addr; /* 0x58 */ + u32 pad10; +}; + +struct vnic_rx_bytes_counter { + unsigned int small_pkt_bytes_cnt; + unsigned int large_pkt_bytes_cnt; +}; + +struct vnic_cq { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + unsigned int to_clean; + unsigned int last_color; + unsigned int interrupt_offset; + struct vnic_rx_bytes_counter pkt_size_counter; + unsigned int cur_rx_coal_timeval; + unsigned int tobe_rx_coal_timeval; + ktime_t prev_ts; +}; + +static inline unsigned int vnic_cq_service(struct vnic_cq *cq, + unsigned int work_to_do, + int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc, + u8 type, u16 q_number, u16 completed_index, void *opaque), + void *opaque) +{ + struct cq_desc *cq_desc; + unsigned int work_done = 0; + u16 q_number, completed_index; + u8 type, color; + + cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + + cq->ring.desc_size * cq->to_clean); + cq_desc_dec(cq_desc, &type, &color, + &q_number, &completed_index); + + while (color != cq->last_color) { + + if ((*q_service)(cq->vdev, cq_desc, type, + q_number, completed_index, opaque)) + break; + + cq->to_clean++; + if (cq->to_clean == cq->ring.desc_count) { + cq->to_clean = 0; + cq->last_color = cq->last_color ? 0 : 1; + } + + cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + + cq->ring.desc_size * cq->to_clean); + cq_desc_dec(cq_desc, &type, &color, + &q_number, &completed_index); + + work_done++; + if (work_done >= work_to_do) + break; + } + + return work_done; +} + +void vnic_cq_free(struct vnic_cq *cq); +int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, + unsigned int desc_count, unsigned int desc_size); +void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, + unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, + unsigned int cq_tail_color, unsigned int interrupt_enable, + unsigned int cq_entry_enable, unsigned int message_enable, + unsigned int interrupt_offset, u64 message_addr); +void vnic_cq_clean(struct vnic_cq *cq); + +#endif /* _VNIC_CQ_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c new file mode 100644 index 000000000..62f7b7baf --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -0,0 +1,1108 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include + +#include "vnic_resource.h" +#include "vnic_devcmd.h" +#include "vnic_dev.h" +#include "vnic_stats.h" + +enum vnic_proxy_type { + PROXY_NONE, + PROXY_BY_BDF, + PROXY_BY_INDEX, +}; + +struct vnic_res { + void __iomem *vaddr; + dma_addr_t bus_addr; + unsigned int count; +}; + +struct vnic_intr_coal_timer_info { + u32 mul; + u32 div; + u32 max_usec; +}; + +struct vnic_dev { + void *priv; + struct pci_dev *pdev; + struct vnic_res res[RES_TYPE_MAX]; + enum vnic_dev_intr_mode intr_mode; + struct vnic_devcmd __iomem *devcmd; + struct vnic_devcmd_notify *notify; + struct vnic_devcmd_notify notify_copy; + dma_addr_t notify_pa; + u32 notify_sz; + dma_addr_t linkstatus_pa; + struct vnic_stats *stats; + dma_addr_t stats_pa; + struct vnic_devcmd_fw_info *fw_info; + dma_addr_t fw_info_pa; + enum vnic_proxy_type proxy; + u32 proxy_index; + u64 args[VNIC_DEVCMD_NARGS]; + struct vnic_intr_coal_timer_info intr_coal_timer_info; +}; + +#define VNIC_MAX_RES_HDR_SIZE \ + (sizeof(struct vnic_resource_header) + \ + sizeof(struct vnic_resource) * RES_TYPE_MAX) +#define VNIC_RES_STRIDE 128 + +void *vnic_dev_priv(struct vnic_dev *vdev) +{ + return vdev->priv; +} + +static int vnic_dev_discover_res(struct vnic_dev *vdev, + struct vnic_dev_bar *bar, unsigned int num_bars) +{ + struct vnic_resource_header __iomem *rh; + struct mgmt_barmap_hdr __iomem *mrh; + struct vnic_resource __iomem *r; + u8 type; + + if (num_bars == 0) + return -EINVAL; + + if (bar->len < VNIC_MAX_RES_HDR_SIZE) { + pr_err("vNIC BAR0 res hdr length error\n"); + return -EINVAL; + } + + rh = bar->vaddr; + mrh = bar->vaddr; + if (!rh) { + pr_err("vNIC BAR0 res hdr not mem-mapped\n"); + return -EINVAL; + } + + /* Check for mgmt vnic in addition to normal vnic */ + if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) || + (ioread32(&rh->version) != VNIC_RES_VERSION)) { + if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) || + (ioread32(&mrh->version) != MGMTVNIC_VERSION)) { + pr_err("vNIC BAR0 res magic/version error " + "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", + VNIC_RES_MAGIC, VNIC_RES_VERSION, + MGMTVNIC_MAGIC, MGMTVNIC_VERSION, + ioread32(&rh->magic), ioread32(&rh->version)); + return -EINVAL; + } + } + + if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC) + r = (struct vnic_resource __iomem *)(mrh + 1); + else + r = (struct vnic_resource __iomem *)(rh + 1); + + + while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { + + u8 bar_num = ioread8(&r->bar); + u32 bar_offset = ioread32(&r->bar_offset); + u32 count = ioread32(&r->count); + u32 len; + + r++; + + if (bar_num >= num_bars) + continue; + + if (!bar[bar_num].len || !bar[bar_num].vaddr) + continue; + + switch (type) { + case RES_TYPE_WQ: + case RES_TYPE_RQ: + case RES_TYPE_CQ: + case RES_TYPE_INTR_CTRL: + /* each count is stride bytes long */ + len = count * VNIC_RES_STRIDE; + if (len + bar_offset > bar[bar_num].len) { + pr_err("vNIC BAR0 resource %d " + "out-of-bounds, offset 0x%x + " + "size 0x%x > bar len 0x%lx\n", + type, bar_offset, + len, + bar[bar_num].len); + return -EINVAL; + } + break; + case RES_TYPE_INTR_PBA_LEGACY: + case RES_TYPE_DEVCMD: + len = count; + break; + default: + continue; + } + + vdev->res[type].count = count; + vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr + + bar_offset; + vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset; + } + + return 0; +} + +unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, + enum vnic_res_type type) +{ + return vdev->res[type].count; +} +EXPORT_SYMBOL(vnic_dev_get_res_count); + +void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, + unsigned int index) +{ + if (!vdev->res[type].vaddr) + return NULL; + + switch (type) { + case RES_TYPE_WQ: + case RES_TYPE_RQ: + case RES_TYPE_CQ: + case RES_TYPE_INTR_CTRL: + return (char __iomem *)vdev->res[type].vaddr + + index * VNIC_RES_STRIDE; + default: + return (char __iomem *)vdev->res[type].vaddr; + } +} +EXPORT_SYMBOL(vnic_dev_get_res); + +static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, + unsigned int desc_count, unsigned int desc_size) +{ + /* The base address of the desc rings must be 512 byte aligned. + * Descriptor count is aligned to groups of 32 descriptors. A + * count of 0 means the maximum 4096 descriptors. Descriptor + * size is aligned to 16 bytes. + */ + + unsigned int count_align = 32; + unsigned int desc_align = 16; + + ring->base_align = 512; + + if (desc_count == 0) + desc_count = 4096; + + ring->desc_count = ALIGN(desc_count, count_align); + + ring->desc_size = ALIGN(desc_size, desc_align); + + ring->size = ring->desc_count * ring->desc_size; + ring->size_unaligned = ring->size + ring->base_align; + + return ring->size_unaligned; +} + +void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) +{ + memset(ring->descs, 0, ring->size); +} + +int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, + unsigned int desc_count, unsigned int desc_size) +{ + vnic_dev_desc_ring_size(ring, desc_count, desc_size); + + ring->descs_unaligned = pci_alloc_consistent(vdev->pdev, + ring->size_unaligned, + &ring->base_addr_unaligned); + + if (!ring->descs_unaligned) { + pr_err("Failed to allocate ring (size=%d), aborting\n", + (int)ring->size); + return -ENOMEM; + } + + ring->base_addr = ALIGN(ring->base_addr_unaligned, + ring->base_align); + ring->descs = (u8 *)ring->descs_unaligned + + (ring->base_addr - ring->base_addr_unaligned); + + vnic_dev_clear_desc_ring(ring); + + ring->desc_avail = ring->desc_count - 1; + + return 0; +} + +void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) +{ + if (ring->descs) { + pci_free_consistent(vdev->pdev, + ring->size_unaligned, + ring->descs_unaligned, + ring->base_addr_unaligned); + ring->descs = NULL; + } +} + +static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int wait) +{ + struct vnic_devcmd __iomem *devcmd = vdev->devcmd; + unsigned int i; + int delay; + u32 status; + int err; + + status = ioread32(&devcmd->status); + if (status == 0xFFFFFFFF) { + /* PCI-e target device is gone */ + return -ENODEV; + } + if (status & STAT_BUSY) { + pr_err("Busy devcmd %d\n", _CMD_N(cmd)); + return -EBUSY; + } + + if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { + for (i = 0; i < VNIC_DEVCMD_NARGS; i++) + writeq(vdev->args[i], &devcmd->args[i]); + wmb(); + } + + iowrite32(cmd, &devcmd->cmd); + + if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) + return 0; + + for (delay = 0; delay < wait; delay++) { + + udelay(100); + + status = ioread32(&devcmd->status); + if (status == 0xFFFFFFFF) { + /* PCI-e target device is gone */ + return -ENODEV; + } + + if (!(status & STAT_BUSY)) { + + if (status & STAT_ERROR) { + err = (int)readq(&devcmd->args[0]); + if (err == ERR_EINVAL && + cmd == CMD_CAPABILITY) + return -err; + if (err != ERR_ECMDUNKNOWN || + cmd != CMD_CAPABILITY) + pr_err("Error %d devcmd %d\n", + err, _CMD_N(cmd)); + return -err; + } + + if (_CMD_DIR(cmd) & _CMD_DIR_READ) { + rmb(); + for (i = 0; i < VNIC_DEVCMD_NARGS; i++) + vdev->args[i] = readq(&devcmd->args[i]); + } + + return 0; + } + } + + pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); + return -ETIMEDOUT; +} + +static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, + enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd, + u64 *a0, u64 *a1, int wait) +{ + u32 status; + int err; + + memset(vdev->args, 0, sizeof(vdev->args)); + + vdev->args[0] = vdev->proxy_index; + vdev->args[1] = cmd; + vdev->args[2] = *a0; + vdev->args[3] = *a1; + + err = _vnic_dev_cmd(vdev, proxy_cmd, wait); + if (err) + return err; + + status = (u32)vdev->args[0]; + if (status & STAT_ERROR) { + err = (int)vdev->args[1]; + if (err != ERR_ECMDUNKNOWN || + cmd != CMD_CAPABILITY) + pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd)); + return err; + } + + *a0 = vdev->args[1]; + *a1 = vdev->args[2]; + + return 0; +} + +static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, + enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) +{ + int err; + + vdev->args[0] = *a0; + vdev->args[1] = *a1; + + err = _vnic_dev_cmd(vdev, cmd, wait); + + *a0 = vdev->args[0]; + *a1 = vdev->args[1]; + + return err; +} + +void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index) +{ + vdev->proxy = PROXY_BY_INDEX; + vdev->proxy_index = index; +} + +void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev) +{ + vdev->proxy = PROXY_NONE; + vdev->proxy_index = 0; +} + +int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + u64 *a0, u64 *a1, int wait) +{ + memset(vdev->args, 0, sizeof(vdev->args)); + + switch (vdev->proxy) { + case PROXY_BY_INDEX: + return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd, + a0, a1, wait); + case PROXY_BY_BDF: + return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd, + a0, a1, wait); + case PROXY_NONE: + default: + return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait); + } +} + +static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd) +{ + u64 a0 = (u32)cmd, a1 = 0; + int wait = 1000; + int err; + + err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); + + return !(err || a0); +} + +int vnic_dev_fw_info(struct vnic_dev *vdev, + struct vnic_devcmd_fw_info **fw_info) +{ + u64 a0, a1 = 0; + int wait = 1000; + int err = 0; + + if (!vdev->fw_info) { + vdev->fw_info = pci_zalloc_consistent(vdev->pdev, + sizeof(struct vnic_devcmd_fw_info), + &vdev->fw_info_pa); + if (!vdev->fw_info) + return -ENOMEM; + + a0 = vdev->fw_info_pa; + a1 = sizeof(struct vnic_devcmd_fw_info); + + /* only get fw_info once and cache it */ + if (vnic_dev_capable(vdev, CMD_MCPU_FW_INFO)) + err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, + &a0, &a1, wait); + else + err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD, + &a0, &a1, wait); + } + + *fw_info = vdev->fw_info; + + return err; +} + +int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, + void *value) +{ + u64 a0, a1; + int wait = 1000; + int err; + + a0 = offset; + a1 = size; + + err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); + + switch (size) { + case 1: *(u8 *)value = (u8)a0; break; + case 2: *(u16 *)value = (u16)a0; break; + case 4: *(u32 *)value = (u32)a0; break; + case 8: *(u64 *)value = a0; break; + default: BUG(); break; + } + + return err; +} + +int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) +{ + u64 a0, a1; + int wait = 1000; + + if (!vdev->stats) { + vdev->stats = pci_alloc_consistent(vdev->pdev, + sizeof(struct vnic_stats), &vdev->stats_pa); + if (!vdev->stats) + return -ENOMEM; + } + + *stats = vdev->stats; + a0 = vdev->stats_pa; + a1 = sizeof(struct vnic_stats); + + return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); +} + +int vnic_dev_close(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); +} + +int vnic_dev_enable_wait(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + + if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT)) + return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait); + else + return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); +} + +int vnic_dev_disable(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); +} + +int vnic_dev_open(struct vnic_dev *vdev, int arg) +{ + u64 a0 = (u32)arg, a1 = 0; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); +} + +int vnic_dev_open_done(struct vnic_dev *vdev, int *done) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + int err; + + *done = 0; + + err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); + if (err) + return err; + + *done = (a0 == 0); + + return 0; +} + +static int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg) +{ + u64 a0 = (u32)arg, a1 = 0; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait); +} + +static int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + int err; + + *done = 0; + + err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait); + if (err) + return err; + + *done = (a0 == 0); + + return 0; +} + +int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg) +{ + u64 a0 = (u32)arg, a1 = 0; + int wait = 1000; + int err; + + if (vnic_dev_capable(vdev, CMD_HANG_RESET)) { + return vnic_dev_cmd(vdev, CMD_HANG_RESET, + &a0, &a1, wait); + } else { + err = vnic_dev_soft_reset(vdev, arg); + if (err) + return err; + return vnic_dev_init(vdev, 0); + } +} + +int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + int err; + + *done = 0; + + if (vnic_dev_capable(vdev, CMD_HANG_RESET_STATUS)) { + err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS, + &a0, &a1, wait); + if (err) + return err; + } else { + return vnic_dev_soft_reset_done(vdev, done); + } + + *done = (a0 == 0); + + return 0; +} + +int vnic_dev_hang_notify(struct vnic_dev *vdev) +{ + u64 a0, a1; + int wait = 1000; + return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait); +} + +int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) +{ + u64 a0, a1; + int wait = 1000; + int err, i; + + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] = 0; + + err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); + if (err) + return err; + + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] = ((u8 *)&a0)[i]; + + return 0; +} + +int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, + int broadcast, int promisc, int allmulti) +{ + u64 a0, a1 = 0; + int wait = 1000; + int err; + + a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | + (multicast ? CMD_PFILTER_MULTICAST : 0) | + (broadcast ? CMD_PFILTER_BROADCAST : 0) | + (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | + (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); + + err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); + if (err) + pr_err("Can't set packet filter\n"); + + return err; +} + +int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + int err; + int i; + + for (i = 0; i < ETH_ALEN; i++) + ((u8 *)&a0)[i] = addr[i]; + + err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); + if (err) + pr_err("Can't add addr [%pM], %d\n", addr, err); + + return err; +} + +int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + int err; + int i; + + for (i = 0; i < ETH_ALEN; i++) + ((u8 *)&a0)[i] = addr[i]; + + err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); + if (err) + pr_err("Can't del addr [%pM], %d\n", addr, err); + + return err; +} + +int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, + u8 ig_vlan_rewrite_mode) +{ + u64 a0 = ig_vlan_rewrite_mode, a1 = 0; + int wait = 1000; + + if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE)) + return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, + &a0, &a1, wait); + else + return 0; +} + +static int vnic_dev_notify_setcmd(struct vnic_dev *vdev, + void *notify_addr, dma_addr_t notify_pa, u16 intr) +{ + u64 a0, a1; + int wait = 1000; + int r; + + memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify)); + vdev->notify = notify_addr; + vdev->notify_pa = notify_pa; + + a0 = (u64)notify_pa; + a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; + a1 += sizeof(struct vnic_devcmd_notify); + + r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); + vdev->notify_sz = (r == 0) ? (u32)a1 : 0; + return r; +} + +int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) +{ + void *notify_addr; + dma_addr_t notify_pa; + + if (vdev->notify || vdev->notify_pa) { + pr_err("notify block %p still allocated", vdev->notify); + return -EINVAL; + } + + notify_addr = pci_alloc_consistent(vdev->pdev, + sizeof(struct vnic_devcmd_notify), + ¬ify_pa); + if (!notify_addr) + return -ENOMEM; + + return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr); +} + +static int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) +{ + u64 a0, a1; + int wait = 1000; + int err; + + a0 = 0; /* paddr = 0 to unset notify buffer */ + a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ + a1 += sizeof(struct vnic_devcmd_notify); + + err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); + vdev->notify = NULL; + vdev->notify_pa = 0; + vdev->notify_sz = 0; + + return err; +} + +int vnic_dev_notify_unset(struct vnic_dev *vdev) +{ + if (vdev->notify) { + pci_free_consistent(vdev->pdev, + sizeof(struct vnic_devcmd_notify), + vdev->notify, + vdev->notify_pa); + } + + return vnic_dev_notify_unsetcmd(vdev); +} + +static int vnic_dev_notify_ready(struct vnic_dev *vdev) +{ + u32 *words; + unsigned int nwords = vdev->notify_sz / 4; + unsigned int i; + u32 csum; + + if (!vdev->notify || !vdev->notify_sz) + return 0; + + do { + csum = 0; + memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz); + words = (u32 *)&vdev->notify_copy; + for (i = 1; i < nwords; i++) + csum += words[i]; + } while (csum != words[0]); + + return 1; +} + +int vnic_dev_init(struct vnic_dev *vdev, int arg) +{ + u64 a0 = (u32)arg, a1 = 0; + int wait = 1000; + int r = 0; + + if (vnic_dev_capable(vdev, CMD_INIT)) + r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); + else { + vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait); + if (a0 & CMD_INITF_DEFAULT_MAC) { + /* Emulate these for old CMD_INIT_v1 which + * didn't pass a0 so no CMD_INITF_*. + */ + vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); + vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); + } + } + return r; +} + +int vnic_dev_deinit(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = 1000; + + return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait); +} + +void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev) +{ + /* Default: hardware intr coal timer is in units of 1.5 usecs */ + vdev->intr_coal_timer_info.mul = 2; + vdev->intr_coal_timer_info.div = 3; + vdev->intr_coal_timer_info.max_usec = + vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff); +} + +int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev) +{ + int wait = 1000; + int err; + + memset(vdev->args, 0, sizeof(vdev->args)); + + if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT)) + err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait); + else + err = ERR_ECMDUNKNOWN; + + /* Use defaults when firmware doesn't support the devcmd at all or + * supports it for only specific hardware + */ + if ((err == ERR_ECMDUNKNOWN) || + (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) { + pr_warn("Using default conversion factor for interrupt coalesce timer\n"); + vnic_dev_intr_coal_timer_info_default(vdev); + return 0; + } + + if (!err) { + vdev->intr_coal_timer_info.mul = (u32) vdev->args[0]; + vdev->intr_coal_timer_info.div = (u32) vdev->args[1]; + vdev->intr_coal_timer_info.max_usec = (u32) vdev->args[2]; + } + + return err; +} + +int vnic_dev_link_status(struct vnic_dev *vdev) +{ + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.link_state; +} + +u32 vnic_dev_port_speed(struct vnic_dev *vdev) +{ + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.port_speed; +} + +u32 vnic_dev_msg_lvl(struct vnic_dev *vdev) +{ + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.msglvl; +} + +u32 vnic_dev_mtu(struct vnic_dev *vdev) +{ + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.mtu; +} + +void vnic_dev_set_intr_mode(struct vnic_dev *vdev, + enum vnic_dev_intr_mode intr_mode) +{ + vdev->intr_mode = intr_mode; +} + +enum vnic_dev_intr_mode vnic_dev_get_intr_mode( + struct vnic_dev *vdev) +{ + return vdev->intr_mode; +} + +u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec) +{ + return (usec * vdev->intr_coal_timer_info.mul) / + vdev->intr_coal_timer_info.div; +} + +u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles) +{ + return (hw_cycles * vdev->intr_coal_timer_info.div) / + vdev->intr_coal_timer_info.mul; +} + +u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev) +{ + return vdev->intr_coal_timer_info.max_usec; +} + +void vnic_dev_unregister(struct vnic_dev *vdev) +{ + if (vdev) { + if (vdev->notify) + pci_free_consistent(vdev->pdev, + sizeof(struct vnic_devcmd_notify), + vdev->notify, + vdev->notify_pa); + if (vdev->stats) + pci_free_consistent(vdev->pdev, + sizeof(struct vnic_stats), + vdev->stats, vdev->stats_pa); + if (vdev->fw_info) + pci_free_consistent(vdev->pdev, + sizeof(struct vnic_devcmd_fw_info), + vdev->fw_info, vdev->fw_info_pa); + kfree(vdev); + } +} +EXPORT_SYMBOL(vnic_dev_unregister); + +struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, + void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar, + unsigned int num_bars) +{ + if (!vdev) { + vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC); + if (!vdev) + return NULL; + } + + vdev->priv = priv; + vdev->pdev = pdev; + + if (vnic_dev_discover_res(vdev, bar, num_bars)) + goto err_out; + + vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); + if (!vdev->devcmd) + goto err_out; + + return vdev; + +err_out: + vnic_dev_unregister(vdev); + return NULL; +} +EXPORT_SYMBOL(vnic_dev_register); + +struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev) +{ + return vdev->pdev; +} +EXPORT_SYMBOL(vnic_dev_get_pdev); + +int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len) +{ + u64 a0, a1 = len; + int wait = 1000; + dma_addr_t prov_pa; + void *prov_buf; + int ret; + + prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa); + if (!prov_buf) + return -ENOMEM; + + memcpy(prov_buf, buf, len); + + a0 = prov_pa; + + ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO2, &a0, &a1, wait); + + pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa); + + return ret; +} + +int vnic_dev_enable2(struct vnic_dev *vdev, int active) +{ + u64 a0, a1 = 0; + int wait = 1000; + + a0 = (active ? CMD_ENABLE2_ACTIVE : 0); + + return vnic_dev_cmd(vdev, CMD_ENABLE2, &a0, &a1, wait); +} + +static int vnic_dev_cmd_status(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int *status) +{ + u64 a0 = cmd, a1 = 0; + int wait = 1000; + int ret; + + ret = vnic_dev_cmd(vdev, CMD_STATUS, &a0, &a1, wait); + if (!ret) + *status = (int)a0; + + return ret; +} + +int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status) +{ + return vnic_dev_cmd_status(vdev, CMD_ENABLE2, status); +} + +int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status) +{ + return vnic_dev_cmd_status(vdev, CMD_DEINIT, status); +} + +int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) +{ + u64 a0, a1; + int wait = 1000; + int i; + + for (i = 0; i < ETH_ALEN; i++) + ((u8 *)&a0)[i] = mac_addr[i]; + + return vnic_dev_cmd(vdev, CMD_SET_MAC_ADDR, &a0, &a1, wait); +} + +/* vnic_dev_classifier: Add/Delete classifier entries + * @vdev: vdev of the device + * @cmd: CLSF_ADD for Add filter + * CLSF_DEL for Delete filter + * @entry: In case of ADD filter, the caller passes the RQ number in this + * variable. + * + * This function stores the filter_id returned by the firmware in the + * same variable before return; + * + * In case of DEL filter, the caller passes the RQ number. Return + * value is irrelevant. + * @data: filter data + */ +int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, + struct filter *data) +{ + u64 a0, a1; + int wait = 1000; + dma_addr_t tlv_pa; + int ret = -EINVAL; + struct filter_tlv *tlv, *tlv_va; + struct filter_action *action; + u64 tlv_size; + + if (cmd == CLSF_ADD) { + tlv_size = sizeof(struct filter) + + sizeof(struct filter_action) + + 2 * sizeof(struct filter_tlv); + tlv_va = pci_alloc_consistent(vdev->pdev, tlv_size, &tlv_pa); + if (!tlv_va) + return -ENOMEM; + tlv = tlv_va; + a0 = tlv_pa; + a1 = tlv_size; + memset(tlv, 0, tlv_size); + tlv->type = CLSF_TLV_FILTER; + tlv->length = sizeof(struct filter); + *(struct filter *)&tlv->val = *data; + + tlv = (struct filter_tlv *)((char *)tlv + + sizeof(struct filter_tlv) + + sizeof(struct filter)); + + tlv->type = CLSF_TLV_ACTION; + tlv->length = sizeof(struct filter_action); + action = (struct filter_action *)&tlv->val; + action->type = FILTER_ACTION_RQ_STEERING; + action->u.rq_idx = *entry; + + ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait); + *entry = (u16)a0; + pci_free_consistent(vdev->pdev, tlv_size, tlv_va, tlv_pa); + } else if (cmd == CLSF_DEL) { + a0 = *entry; + ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait); + } + + return ret; +} diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h new file mode 100644 index 000000000..1fb214efc --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h @@ -0,0 +1,139 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _VNIC_DEV_H_ +#define _VNIC_DEV_H_ + +#include "vnic_resource.h" +#include "vnic_devcmd.h" + +#ifndef VNIC_PADDR_TARGET +#define VNIC_PADDR_TARGET 0x0000000000000000ULL +#endif + +#ifndef readq +static inline u64 readq(void __iomem *reg) +{ + return (((u64)readl(reg + 0x4UL) << 32) | + (u64)readl(reg)); +} + +static inline void writeq(u64 val, void __iomem *reg) +{ + writel(val & 0xffffffff, reg); + writel(val >> 32, reg + 0x4UL); +} +#endif + +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +enum vnic_dev_intr_mode { + VNIC_DEV_INTR_MODE_UNKNOWN, + VNIC_DEV_INTR_MODE_INTX, + VNIC_DEV_INTR_MODE_MSI, + VNIC_DEV_INTR_MODE_MSIX, +}; + +struct vnic_dev_bar { + void __iomem *vaddr; + dma_addr_t bus_addr; + unsigned long len; +}; + +struct vnic_dev_ring { + void *descs; + size_t size; + dma_addr_t base_addr; + size_t base_align; + void *descs_unaligned; + size_t size_unaligned; + dma_addr_t base_addr_unaligned; + unsigned int desc_size; + unsigned int desc_count; + unsigned int desc_avail; +}; + +struct vnic_dev; +struct vnic_stats; + +void *vnic_dev_priv(struct vnic_dev *vdev); +unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, + enum vnic_res_type type); +void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, + unsigned int index); +void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring); +int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, + unsigned int desc_count, unsigned int desc_size); +void vnic_dev_free_desc_ring(struct vnic_dev *vdev, + struct vnic_dev_ring *ring); +int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + u64 *a0, u64 *a1, int wait); +void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index); +void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev); +int vnic_dev_fw_info(struct vnic_dev *vdev, + struct vnic_devcmd_fw_info **fw_info); +int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, + void *value); +int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); +int vnic_dev_hang_notify(struct vnic_dev *vdev); +int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, + int broadcast, int promisc, int allmulti); +int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr); +int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr); +int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); +int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); +int vnic_dev_notify_unset(struct vnic_dev *vdev); +int vnic_dev_link_status(struct vnic_dev *vdev); +u32 vnic_dev_port_speed(struct vnic_dev *vdev); +u32 vnic_dev_msg_lvl(struct vnic_dev *vdev); +u32 vnic_dev_mtu(struct vnic_dev *vdev); +int vnic_dev_close(struct vnic_dev *vdev); +int vnic_dev_enable_wait(struct vnic_dev *vdev); +int vnic_dev_disable(struct vnic_dev *vdev); +int vnic_dev_open(struct vnic_dev *vdev, int arg); +int vnic_dev_open_done(struct vnic_dev *vdev, int *done); +int vnic_dev_init(struct vnic_dev *vdev, int arg); +int vnic_dev_deinit(struct vnic_dev *vdev); +void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev); +int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev); +int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg); +int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done); +void vnic_dev_set_intr_mode(struct vnic_dev *vdev, + enum vnic_dev_intr_mode intr_mode); +enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev); +u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec); +u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles); +u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev); +void vnic_dev_unregister(struct vnic_dev *vdev); +int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, + u8 ig_vlan_rewrite_mode); +struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, + void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar, + unsigned int num_bars); +struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev); +int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len); +int vnic_dev_enable2(struct vnic_dev *vdev, int active); +int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status); +int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status); +int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); +int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, + struct filter *data); + +#endif /* _VNIC_DEV_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h new file mode 100644 index 000000000..435d0cd96 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h @@ -0,0 +1,632 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _VNIC_DEVCMD_H_ +#define _VNIC_DEVCMD_H_ + +#define _CMD_NBITS 14 +#define _CMD_VTYPEBITS 10 +#define _CMD_FLAGSBITS 6 +#define _CMD_DIRBITS 2 + +#define _CMD_NMASK ((1 << _CMD_NBITS)-1) +#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1) +#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1) +#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1) + +#define _CMD_NSHIFT 0 +#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS) +#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS) +#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS) + +/* + * Direction bits (from host perspective). + */ +#define _CMD_DIR_NONE 0U +#define _CMD_DIR_WRITE 1U +#define _CMD_DIR_READ 2U +#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ) + +/* + * Flag bits. + */ +#define _CMD_FLAGS_NONE 0U +#define _CMD_FLAGS_NOWAIT 1U + +/* + * vNIC type bits. + */ +#define _CMD_VTYPE_NONE 0U +#define _CMD_VTYPE_ENET 1U +#define _CMD_VTYPE_FC 2U +#define _CMD_VTYPE_SCSI 4U +#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI) + +/* + * Used to create cmds.. +*/ +#define _CMDCF(dir, flags, vtype, nr) \ + (((dir) << _CMD_DIRSHIFT) | \ + ((flags) << _CMD_FLAGSSHIFT) | \ + ((vtype) << _CMD_VTYPESHIFT) | \ + ((nr) << _CMD_NSHIFT)) +#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr) +#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr) + +/* + * Used to decode cmds.. +*/ +#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK) +#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK) +#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK) +#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK) + +enum vnic_devcmd_cmd { + CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0), + + /* + * mcpu fw info in mem: + * in: + * (u64)a0=paddr to struct vnic_devcmd_fw_info + * action: + * Fills in struct vnic_devcmd_fw_info (128 bytes) + * note: + * An old definition of CMD_MCPU_FW_INFO + */ + CMD_MCPU_FW_INFO_OLD = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1), + + /* + * mcpu fw info in mem: + * in: + * (u64)a0=paddr to struct vnic_devcmd_fw_info + * (u16)a1=size of the structure + * out: + * (u16)a1=0 for in:a1 = 0, + * data size actually written for other values. + * action: + * Fills in first 128 bytes of vnic_devcmd_fw_info for in:a1 = 0, + * first in:a1 bytes for 0 < in:a1 <= 132, + * 132 bytes for other values of in:a1. + * note: + * CMD_MCPU_FW_INFO and CMD_MCPU_FW_INFO_OLD have the same enum 1 + * for source compatibility. + */ + CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 1), + + /* dev-specific block member: + * in: (u16)a0=offset,(u8)a1=size + * out: a0=value */ + CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2), + + /* stats clear */ + CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3), + + /* stats dump in mem: (u64)a0=paddr to stats area, + * (u16)a1=sizeof stats area */ + CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4), + + /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */ + CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7), + + /* set Rx packet filter for all: (u32)a0=filters (see CMD_PFILTER_*) */ + CMD_PACKET_FILTER_ALL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7), + + /* hang detection notification */ + CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8), + + /* MAC address in (u48)a0 */ + CMD_GET_MAC_ADDR = _CMDC(_CMD_DIR_READ, + _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9), + + /* add addr from (u48)a0 */ + CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE, + _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12), + + /* del addr from (u48)a0 */ + CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE, + _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13), + + /* add VLAN id in (u16)a0 */ + CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14), + + /* del VLAN id in (u16)a0 */ + CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15), + + /* nic_cfg in (u32)a0 */ + CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16), + + /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */ + CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17), + + /* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */ + CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18), + + /* initiate softreset */ + CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19), + + /* softreset status: + * out: a0=0 reset complete, a0=1 reset in progress */ + CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20), + + /* set struct vnic_devcmd_notify buffer in mem: + * in: + * (u64)a0=paddr to notify (set paddr=0 to unset) + * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify) + * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr) + * out: + * (u32)a1 = effective size + */ + CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21), + + /* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct, + * (u8)a1=PXENV_UNDI_xxx */ + CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22), + + /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */ + CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23), + + /* open status: + * out: a0=0 open complete, a0=1 open in progress */ + CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24), + + /* close vnic */ + CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25), + + /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */ +/***** Replaced by CMD_INIT *****/ + CMD_INIT_v1 = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26), + + /* variant of CMD_INIT, with provisioning info + * (u64)a0=paddr of vnic_devcmd_provinfo + * (u32)a1=sizeof provision info */ + CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27), + + /* enable virtual link */ + CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28), + + /* enable virtual link, waiting variant. */ + CMD_ENABLE_WAIT = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28), + + /* disable virtual link */ + CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29), + + /* stats dump sum of all vnic stats on same uplink in mem: + * (u64)a0=paddr + * (u16)a1=sizeof stats area */ + CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30), + + /* init status: + * out: a0=0 init complete, a0=1 init in progress + * if a0=0, a1=errno */ + CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31), + + /* INT13 API: (u64)a0=paddr to vnic_int13_params struct + * (u32)a1=INT13_CMD_xxx */ + CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32), + + /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */ + CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33), + + /* undo initialize of virtual link */ + CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34), + + /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */ + CMD_INIT = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 35), + + /* check fw capability of a cmd: + * in: (u32)a0=cmd + * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */ + CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36), + + /* persistent binding info + * in: (u64)a0=paddr of arg + * (u32)a1=CMD_PERBI_XXX */ + CMD_PERBI = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37), + + /* Interrupt Assert Register functionality + * in: (u16)a0=interrupt number to assert + */ + CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38), + + /* initiate hangreset, like softreset after hang detected */ + CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39), + + /* hangreset status: + * out: a0=0 reset complete, a0=1 reset in progress */ + CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40), + + /* + * Set hw ingress packet vlan rewrite mode: + * in: (u32)a0=new vlan rewrite mode + * out: (u32)a0=old vlan rewrite mode */ + CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41), + + /* + * in: (u16)a0=bdf of target vnic + * (u32)a1=cmd to proxy + * a2-a15=args to cmd in a1 + * out: (u32)a0=status of proxied cmd + * a1-a15=out args of proxied cmd */ + CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42), + + /* + * As for BY_BDF except a0 is index of hvnlink subordinate vnic + * or SR-IOV virtual vnic + */ + CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43), + + /* + * For HPP toggle: + * adapter-info-get + * in: (u64)a0=phsical address of buffer passed in from caller. + * (u16)a1=size of buffer specified in a0. + * out: (u64)a0=phsical address of buffer passed in from caller. + * (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or + * 0 if no VIF-CONFIG-INFO TLV was ever received. */ + CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44), + + /* INT13 API: (u64)a0=paddr to vnic_int13_params struct + * (u32)a1=INT13_CMD_xxx + */ + CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45), + + /* Set default vlan: + * in: (u16)a0=new default vlan + * (u16)a1=zero for overriding vlan with param a0, + * non-zero for resetting vlan to the default + * out: (u16)a0=old default vlan + */ + CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46), + + /* init_prov_info2: + * Variant of CMD_INIT_PROV_INFO, where it will not try to enable + * the vnic until CMD_ENABLE2 is issued. + * (u64)a0=paddr of vnic_devcmd_provinfo + * (u32)a1=sizeof provision info + */ + CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47), + + /* enable2: + * (u32)a0=0 ==> standby + * =CMD_ENABLE2_ACTIVE ==> active + */ + CMD_ENABLE2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 48), + + /* + * cmd_status: + * Returns the status of the specified command + * Input: + * a0 = command for which status is being queried. + * Possible values are: + * CMD_SOFT_RESET + * CMD_HANG_RESET + * CMD_OPEN + * CMD_INIT + * CMD_INIT_PROV_INFO + * CMD_DEINIT + * CMD_INIT_PROV_INFO2 + * CMD_ENABLE2 + * Output: + * if status == STAT_ERROR + * a0 = ERR_ENOTSUPPORTED - status for command in a0 is + * not supported + * if status == STAT_NONE + * a0 = status of the devcmd specified in a0 as follows. + * ERR_SUCCESS - command in a0 completed successfully + * ERR_EINPROGRESS - command in a0 is still in progress + */ + CMD_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 49), + + /* + * Returns interrupt coalescing timer conversion factors. + * After calling this devcmd, ENIC driver can convert + * interrupt coalescing timer in usec into CPU cycles as follows: + * + * intr_timer_cycles = intr_timer_usec * multiplier / divisor + * + * Interrupt coalescing timer in usecs can be obtained from + * CPU cycles as follows: + * + * intr_timer_usec = intr_timer_cycles * divisor / multiplier + * + * in: none + * out: (u32)a0 = multiplier + * (u32)a1 = divisor + * (u32)a2 = maximum timer value in usec + */ + CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50), + + /* + * Set the predefined mac address as default + * in: + * (u48)a0 = mac addr + */ + CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55), + + /* Update the provisioning info of the given VIF + * (u64)a0=paddr of vnic_devcmd_provinfo + * (u32)a1=sizeof provision info + */ + CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56), + + /* Add a filter. + * in: (u64) a0= filter address + * (u32) a1= size of filter + * out: (u32) a0=filter identifier + */ + CMD_ADD_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 58), + + /* Delete a filter. + * in: (u32) a0=filter identifier + */ + CMD_DEL_FILTER = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 59), + + /* Enable a Queue Pair in User space NIC + * in: (u32) a0=Queue Pair number + * (u32) a1= command + */ + CMD_QP_ENABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 60), + + /* Disable a Queue Pair in User space NIC + * in: (u32) a0=Queue Pair number + * (u32) a1= command + */ + CMD_QP_DISABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 61), + + /* Stats dump Queue Pair in User space NIC + * in: (u32) a0=Queue Pair number + * (u64) a1=host buffer addr for status dump + * (u32) a2=length of the buffer + */ + CMD_QP_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 62), + + /* Clear stats for Queue Pair in User space NIC + * in: (u32) a0=Queue Pair number + */ + CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63), +}; + +/* CMD_ENABLE2 flags */ +#define CMD_ENABLE2_STANDBY 0x0 +#define CMD_ENABLE2_ACTIVE 0x1 + +/* flags for CMD_OPEN */ +#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */ + +/* flags for CMD_INIT */ +#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */ + +/* flags for CMD_PACKET_FILTER */ +#define CMD_PFILTER_DIRECTED 0x01 +#define CMD_PFILTER_MULTICAST 0x02 +#define CMD_PFILTER_BROADCAST 0x04 +#define CMD_PFILTER_PROMISCUOUS 0x08 +#define CMD_PFILTER_ALL_MULTICAST 0x10 + +/* Commands for CMD_QP_ENABLE/CM_QP_DISABLE */ +#define CMD_QP_RQWQ 0x0 + +/* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */ +#define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK 0 +#define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN 1 +#define IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN 2 +#define IG_VLAN_REWRITE_MODE_PASS_THRU 3 + +enum vnic_devcmd_status { + STAT_NONE = 0, + STAT_BUSY = 1 << 0, /* cmd in progress */ + STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */ +}; + +enum vnic_devcmd_error { + ERR_SUCCESS = 0, + ERR_EINVAL = 1, + ERR_EFAULT = 2, + ERR_EPERM = 3, + ERR_EBUSY = 4, + ERR_ECMDUNKNOWN = 5, + ERR_EBADSTATE = 6, + ERR_ENOMEM = 7, + ERR_ETIMEDOUT = 8, + ERR_ELINKDOWN = 9, + ERR_EMAXRES = 10, + ERR_ENOTSUPPORTED = 11, + ERR_EINPROGRESS = 12, + ERR_MAX +}; + +/* + * note: hw_version and asic_rev refer to the same thing, + * but have different formats. hw_version is + * a 32-byte string (e.g. "A2") and asic_rev is + * a 16-bit integer (e.g. 0xA2). + */ +struct vnic_devcmd_fw_info { + char fw_version[32]; + char fw_build[32]; + char hw_version[32]; + char hw_serial_number[32]; + u16 asic_type; + u16 asic_rev; +}; + +struct vnic_devcmd_notify { + u32 csum; /* checksum over following words */ + + u32 link_state; /* link up == 1 */ + u32 port_speed; /* effective port speed (rate limit) */ + u32 mtu; /* MTU */ + u32 msglvl; /* requested driver msg lvl */ + u32 uif; /* uplink interface */ + u32 status; /* status bits (see VNIC_STF_*) */ + u32 error; /* error code (see ERR_*) for first ERR */ + u32 link_down_cnt; /* running count of link down transitions */ + u32 perbi_rebuild_cnt; /* running count of perbi rebuilds */ +}; +#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */ +#define VNIC_STF_STD_PAUSE 0x0002 /* standard link-level pause on */ +#define VNIC_STF_PFC_PAUSE 0x0004 /* priority flow control pause on */ +/* all supported status flags */ +#define VNIC_STF_ALL (VNIC_STF_FATAL_ERR |\ + VNIC_STF_STD_PAUSE |\ + VNIC_STF_PFC_PAUSE |\ + 0) + +struct vnic_devcmd_provinfo { + u8 oui[3]; + u8 type; + u8 data[0]; +}; + +/* These are used in flags field of different filters to denote + * valid fields used. + */ +#define FILTER_FIELD_VALID(fld) (1 << (fld - 1)) + +#define FILTER_FIELDS_USNIC ( \ + FILTER_FIELD_VALID(1) | \ + FILTER_FIELD_VALID(2) | \ + FILTER_FIELD_VALID(3) | \ + FILTER_FIELD_VALID(4)) + +#define FILTER_FIELDS_IPV4_5TUPLE ( \ + FILTER_FIELD_VALID(1) | \ + FILTER_FIELD_VALID(2) | \ + FILTER_FIELD_VALID(3) | \ + FILTER_FIELD_VALID(4) | \ + FILTER_FIELD_VALID(5)) + +#define FILTER_FIELDS_MAC_VLAN ( \ + FILTER_FIELD_VALID(1) | \ + FILTER_FIELD_VALID(2)) + +#define FILTER_FIELD_USNIC_VLAN FILTER_FIELD_VALID(1) +#define FILTER_FIELD_USNIC_ETHTYPE FILTER_FIELD_VALID(2) +#define FILTER_FIELD_USNIC_PROTO FILTER_FIELD_VALID(3) +#define FILTER_FIELD_USNIC_ID FILTER_FIELD_VALID(4) + +struct filter_usnic_id { + u32 flags; + u16 vlan; + u16 ethtype; + u8 proto_version; + u32 usnic_id; +} __packed; + +#define FILTER_FIELD_5TUP_PROTO FILTER_FIELD_VALID(1) +#define FILTER_FIELD_5TUP_SRC_AD FILTER_FIELD_VALID(2) +#define FILTER_FIELD_5TUP_DST_AD FILTER_FIELD_VALID(3) +#define FILTER_FIELD_5TUP_SRC_PT FILTER_FIELD_VALID(4) +#define FILTER_FIELD_5TUP_DST_PT FILTER_FIELD_VALID(5) + +/* Enums for the protocol field. */ +enum protocol_e { + PROTO_UDP = 0, + PROTO_TCP = 1, +}; + +struct filter_ipv4_5tuple { + u32 flags; + u32 protocol; + u32 src_addr; + u32 dst_addr; + u16 src_port; + u16 dst_port; +} __packed; + +#define FILTER_FIELD_VMQ_VLAN FILTER_FIELD_VALID(1) +#define FILTER_FIELD_VMQ_MAC FILTER_FIELD_VALID(2) + +struct filter_mac_vlan { + u32 flags; + u16 vlan; + u8 mac_addr[6]; +} __packed; + +/* Specifies the filter_action type. */ +enum { + FILTER_ACTION_RQ_STEERING = 0, + FILTER_ACTION_MAX +}; + +struct filter_action { + u32 type; + union { + u32 rq_idx; + } u; +} __packed; + +/* Specifies the filter type. */ +enum filter_type { + FILTER_USNIC_ID = 0, + FILTER_IPV4_5TUPLE = 1, + FILTER_MAC_VLAN = 2, + FILTER_MAX +}; + +struct filter { + u32 type; + union { + struct filter_usnic_id usnic; + struct filter_ipv4_5tuple ipv4; + struct filter_mac_vlan mac_vlan; + } u; +} __packed; + +enum { + CLSF_TLV_FILTER = 0, + CLSF_TLV_ACTION = 1, +}; + +/* Maximum size of buffer to CMD_ADD_FILTER */ +#define FILTER_MAX_BUF_SIZE 100 + +struct filter_tlv { + u_int32_t type; + u_int32_t length; + u_int32_t val[0]; +}; + +enum { + CLSF_ADD = 0, + CLSF_DEL = 1, +}; + +/* + * Writing cmd register causes STAT_BUSY to get set in status register. + * When cmd completes, STAT_BUSY will be cleared. + * + * If cmd completed successfully STAT_ERROR will be clear + * and args registers contain cmd-specific results. + * + * If cmd error, STAT_ERROR will be set and args[0] contains error code. + * + * status register is read-only. While STAT_BUSY is set, + * all other register contents are read-only. + */ + +/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */ +#define VNIC_DEVCMD_NARGS 15 +struct vnic_devcmd { + u32 status; /* RO */ + u32 cmd; /* RW */ + u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */ +}; + +#endif /* _VNIC_DEVCMD_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_enet.h b/drivers/net/ethernet/cisco/enic/vnic_enet.h new file mode 100644 index 000000000..75aced2de --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/vnic_enet.h @@ -0,0 +1,59 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _VNIC_ENIC_H_ +#define _VNIC_ENIC_H_ + +/* Device-specific region: enet configuration */ +struct vnic_enet_config { + u32 flags; + u32 wq_desc_count; + u32 rq_desc_count; + u16 mtu; + u16 intr_timer_deprecated; + u8 intr_timer_type; + u8 intr_mode; + char devname[16]; + u32 intr_timer_usec; + u16 loop_tag; + u16 vf_rq_count; + u16 num_arfs; +}; + +#define VENETF_TSO 0x1 /* TSO enabled */ +#define VENETF_LRO 0x2 /* LRO enabled */ +#define VENETF_RXCSUM 0x4 /* RX csum enabled */ +#define VENETF_TXCSUM 0x8 /* TX csum enabled */ +#define VENETF_RSS 0x10 /* RSS enabled */ +#define VENETF_RSSHASH_IPV4 0x20 /* Hash on IPv4 fields */ +#define VENETF_RSSHASH_TCPIPV4 0x40 /* Hash on TCP + IPv4 fields */ +#define VENETF_RSSHASH_IPV6 0x80 /* Hash on IPv6 fields */ +#define VENETF_RSSHASH_TCPIPV6 0x100 /* Hash on TCP + IPv6 fields */ +#define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */ +#define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */ +#define VENETF_LOOP 0x800 /* Loopback enabled */ + +#define VENET_INTR_TYPE_MIN 0 /* Timer specs min interrupt spacing */ +#define VENET_INTR_TYPE_IDLE 1 /* Timer specs idle time before irq */ + +#define VENET_INTR_MODE_ANY 0 /* Try MSI-X, then MSI, then INTx */ +#define VENET_INTR_MODE_MSI 1 /* Try MSI then INTx */ +#define VENET_INTR_MODE_INTX 2 /* Try INTx only */ + +#endif /* _VNIC_ENIC_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.c b/drivers/net/ethernet/cisco/enic/vnic_intr.c new file mode 100644 index 000000000..0ca107f7b --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/vnic_intr.c @@ -0,0 +1,68 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include +#include +#include + +#include "vnic_dev.h" +#include "vnic_intr.h" + +void vnic_intr_free(struct vnic_intr *intr) +{ + intr->ctrl = NULL; +} + +int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, + unsigned int index) +{ + intr->index = index; + intr->vdev = vdev; + + intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index); + if (!intr->ctrl) { + pr_err("Failed to hook INTR[%d].ctrl resource\n", index); + return -EINVAL; + } + + return 0; +} + +void vnic_intr_init(struct vnic_intr *intr, u32 coalescing_timer, + unsigned int coalescing_type, unsigned int mask_on_assertion) +{ + vnic_intr_coalescing_timer_set(intr, coalescing_timer); + iowrite32(coalescing_type, &intr->ctrl->coalescing_type); + iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion); + iowrite32(0, &intr->ctrl->int_credits); +} + +void vnic_intr_coalescing_timer_set(struct vnic_intr *intr, + u32 coalescing_timer) +{ + iowrite32(vnic_dev_intr_coal_timer_usec_to_hw(intr->vdev, + coalescing_timer), &intr->ctrl->coalescing_timer); +} + +void vnic_intr_clean(struct vnic_intr *intr) +{ + iowrite32(0, &intr->ctrl->int_credits); +} diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.h b/drivers/net/ethernet/cisco/enic/vnic_intr.h new file mode 100644 index 000000000..2b1636392 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/vnic_intr.h @@ -0,0 +1,111 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _VNIC_INTR_H_ +#define _VNIC_INTR_H_ + +#include + +#include "vnic_dev.h" + +#define VNIC_INTR_TIMER_TYPE_ABS 0 +#define VNIC_INTR_TIMER_TYPE_QUIET 1 + +/* Interrupt control */ +struct vnic_intr_ctrl { + u32 coalescing_timer; /* 0x00 */ + u32 pad0; + u32 coalescing_value; /* 0x08 */ + u32 pad1; + u32 coalescing_type; /* 0x10 */ + u32 pad2; + u32 mask_on_assertion; /* 0x18 */ + u32 pad3; + u32 mask; /* 0x20 */ + u32 pad4; + u32 int_credits; /* 0x28 */ + u32 pad5; + u32 int_credit_return; /* 0x30 */ + u32 pad6; +}; + +struct vnic_intr { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */ +}; + +static inline void vnic_intr_unmask(struct vnic_intr *intr) +{ + iowrite32(0, &intr->ctrl->mask); +} + +static inline void vnic_intr_mask(struct vnic_intr *intr) +{ + iowrite32(1, &intr->ctrl->mask); +} + +static inline int vnic_intr_masked(struct vnic_intr *intr) +{ + return ioread32(&intr->ctrl->mask); +} + +static inline void vnic_intr_return_credits(struct vnic_intr *intr, + unsigned int credits, int unmask, int reset_timer) +{ +#define VNIC_INTR_UNMASK_SHIFT 16 +#define VNIC_INTR_RESET_TIMER_SHIFT 17 + + u32 int_credit_return = (credits & 0xffff) | + (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) | + (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0); + + iowrite32(int_credit_return, &intr->ctrl->int_credit_return); +} + +static inline unsigned int vnic_intr_credits(struct vnic_intr *intr) +{ + return ioread32(&intr->ctrl->int_credits); +} + +static inline void vnic_intr_return_all_credits(struct vnic_intr *intr) +{ + unsigned int credits = vnic_intr_credits(intr); + int unmask = 1; + int reset_timer = 1; + + vnic_intr_return_credits(intr, credits, unmask, reset_timer); +} + +static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba) +{ + /* read PBA without clearing */ + return ioread32(legacy_pba); +} + +void vnic_intr_free(struct vnic_intr *intr); +int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, + unsigned int index); +void vnic_intr_init(struct vnic_intr *intr, u32 coalescing_timer, + unsigned int coalescing_type, unsigned int mask_on_assertion); +void vnic_intr_coalescing_timer_set(struct vnic_intr *intr, + u32 coalescing_timer); +void vnic_intr_clean(struct vnic_intr *intr); + +#endif /* _VNIC_INTR_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_nic.h b/drivers/net/ethernet/cisco/enic/vnic_nic.h new file mode 100644 index 000000000..995a50dd4 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/vnic_nic.h @@ -0,0 +1,72 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _VNIC_NIC_H_ +#define _VNIC_NIC_H_ + +#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL +#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0 +#define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8) +#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL +#define NIC_CFG_RSS_HASH_TYPE_SHIFT 8 +#define NIC_CFG_RSS_HASH_BITS (7UL << 16) +#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL +#define NIC_CFG_RSS_HASH_BITS_SHIFT 16 +#define NIC_CFG_RSS_BASE_CPU (7UL << 19) +#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL +#define NIC_CFG_RSS_BASE_CPU_SHIFT 19 +#define NIC_CFG_RSS_ENABLE (1UL << 22) +#define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL +#define NIC_CFG_RSS_ENABLE_SHIFT 22 +#define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23) +#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL +#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23 +#define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24) +#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL +#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24 + +#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1) +#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2) +#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3) +#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4) +#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5) +#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6) + +static inline void vnic_set_nic_cfg(u32 *nic_cfg, + u8 rss_default_cpu, u8 rss_hash_type, + u8 rss_hash_bits, u8 rss_base_cpu, + u8 rss_enable, u8 tso_ipid_split_en, + u8 ig_vlan_strip_en) +{ + *nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) | + ((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD) + << NIC_CFG_RSS_HASH_TYPE_SHIFT) | + ((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD) + << NIC_CFG_RSS_HASH_BITS_SHIFT) | + ((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD) + << NIC_CFG_RSS_BASE_CPU_SHIFT) | + ((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD) + << NIC_CFG_RSS_ENABLE_SHIFT) | + ((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD) + << NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) | + ((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD) + << NIC_CFG_IG_VLAN_STRIP_EN_SHIFT); +} + +#endif /* _VNIC_NIC_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_resource.h b/drivers/net/ethernet/cisco/enic/vnic_resource.h new file mode 100644 index 000000000..e0a73f1ca --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/vnic_resource.h @@ -0,0 +1,76 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _VNIC_RESOURCE_H_ +#define _VNIC_RESOURCE_H_ + +#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */ +#define VNIC_RES_VERSION 0x00000000L +#define MGMTVNIC_MAGIC 0x544d474dL /* 'MGMT' */ +#define MGMTVNIC_VERSION 0x00000000L + +/* The MAC address assigned to the CFG vNIC is fixed. */ +#define MGMTVNIC_MAC { 0x02, 0x00, 0x54, 0x4d, 0x47, 0x4d } + +/* vNIC resource types */ +enum vnic_res_type { + RES_TYPE_EOL, /* End-of-list */ + RES_TYPE_WQ, /* Work queues */ + RES_TYPE_RQ, /* Receive queues */ + RES_TYPE_CQ, /* Completion queues */ + RES_TYPE_RSVD1, + RES_TYPE_NIC_CFG, /* Enet NIC config registers */ + RES_TYPE_RSVD2, + RES_TYPE_RSVD3, + RES_TYPE_RSVD4, + RES_TYPE_RSVD5, + RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */ + RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */ + RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */ + RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */ + RES_TYPE_RSVD6, + RES_TYPE_RSVD7, + RES_TYPE_DEVCMD, /* Device command region */ + RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */ + + RES_TYPE_MAX, /* Count of resource types */ +}; + +struct vnic_resource_header { + u32 magic; + u32 version; +}; + +struct mgmt_barmap_hdr { + u32 magic; /* magic number */ + u32 version; /* header format version */ + u16 lif; /* loopback lif for mgmt frames */ + u16 pci_slot; /* installed pci slot */ + char serial[16]; /* card serial number */ +}; + +struct vnic_resource { + u8 type; + u8 bar; + u8 pad[2]; + u32 bar_offset; + u32 count; +}; + +#endif /* _VNIC_RESOURCE_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c new file mode 100644 index 000000000..c4b2183bf --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c @@ -0,0 +1,215 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include + +#include "vnic_dev.h" +#include "vnic_rq.h" + +static int vnic_rq_alloc_bufs(struct vnic_rq *rq) +{ + struct vnic_rq_buf *buf; + unsigned int i, j, count = rq->ring.desc_count; + unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count); + + for (i = 0; i < blks; i++) { + rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC); + if (!rq->bufs[i]) + return -ENOMEM; + } + + for (i = 0; i < blks; i++) { + buf = rq->bufs[i]; + for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES(count); j++) { + buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES(count) + j; + buf->desc = (u8 *)rq->ring.descs + + rq->ring.desc_size * buf->index; + if (buf->index + 1 == count) { + buf->next = rq->bufs[0]; + break; + } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES(count)) { + buf->next = rq->bufs[i + 1]; + } else { + buf->next = buf + 1; + buf++; + } + } + } + + rq->to_use = rq->to_clean = rq->bufs[0]; + + return 0; +} + +void vnic_rq_free(struct vnic_rq *rq) +{ + struct vnic_dev *vdev; + unsigned int i; + + vdev = rq->vdev; + + vnic_dev_free_desc_ring(vdev, &rq->ring); + + for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) { + if (rq->bufs[i]) { + kfree(rq->bufs[i]); + rq->bufs[i] = NULL; + } + } + + rq->ctrl = NULL; +} + +int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + + rq->index = index; + rq->vdev = vdev; + + rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index); + if (!rq->ctrl) { + pr_err("Failed to hook RQ[%d] resource\n", index); + return -EINVAL; + } + + vnic_rq_disable(rq); + + err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size); + if (err) + return err; + + err = vnic_rq_alloc_bufs(rq); + if (err) { + vnic_rq_free(rq); + return err; + } + + return 0; +} + +static void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + u64 paddr; + unsigned int count = rq->ring.desc_count; + + paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &rq->ctrl->ring_base); + iowrite32(count, &rq->ctrl->ring_size); + iowrite32(cq_index, &rq->ctrl->cq_index); + iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable); + iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset); + iowrite32(0, &rq->ctrl->dropped_packet_count); + iowrite32(0, &rq->ctrl->error_status); + iowrite32(fetch_index, &rq->ctrl->fetch_index); + iowrite32(posted_index, &rq->ctrl->posted_index); + + rq->to_use = rq->to_clean = + &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)] + [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)]; +} + +void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + u32 fetch_index = 0; + + /* Use current fetch_index as the ring starting point */ + fetch_index = ioread32(&rq->ctrl->fetch_index); + + if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ + /* Hardware surprise removal: reset fetch_index */ + fetch_index = 0; + } + + vnic_rq_init_start(rq, cq_index, + fetch_index, fetch_index, + error_interrupt_enable, + error_interrupt_offset); +} + +unsigned int vnic_rq_error_status(struct vnic_rq *rq) +{ + return ioread32(&rq->ctrl->error_status); +} + +void vnic_rq_enable(struct vnic_rq *rq) +{ + iowrite32(1, &rq->ctrl->enable); +} + +int vnic_rq_disable(struct vnic_rq *rq) +{ + unsigned int wait; + + iowrite32(0, &rq->ctrl->enable); + + /* Wait for HW to ACK disable request */ + for (wait = 0; wait < 1000; wait++) { + if (!(ioread32(&rq->ctrl->running))) + return 0; + udelay(10); + } + + pr_err("Failed to disable RQ[%d]\n", rq->index); + + return -ETIMEDOUT; +} + +void vnic_rq_clean(struct vnic_rq *rq, + void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)) +{ + struct vnic_rq_buf *buf; + u32 fetch_index; + unsigned int count = rq->ring.desc_count; + int i; + + buf = rq->to_clean; + + for (i = 0; i < rq->ring.desc_count; i++) { + (*buf_clean)(rq, buf); + buf = buf->next; + } + rq->ring.desc_avail = rq->ring.desc_count - 1; + + /* Use current fetch_index as the ring starting point */ + fetch_index = ioread32(&rq->ctrl->fetch_index); + + if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ + /* Hardware surprise removal: reset fetch_index */ + fetch_index = 0; + } + rq->to_use = rq->to_clean = + &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)] + [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)]; + iowrite32(fetch_index, &rq->ctrl->posted_index); + + vnic_dev_clear_desc_ring(&rq->ring); +} + diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h new file mode 100644 index 000000000..8111d5202 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h @@ -0,0 +1,334 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _VNIC_RQ_H_ +#define _VNIC_RQ_H_ + +#include + +#include "vnic_dev.h" +#include "vnic_cq.h" + +/* Receive queue control */ +struct vnic_rq_ctrl { + u64 ring_base; /* 0x00 */ + u32 ring_size; /* 0x08 */ + u32 pad0; + u32 posted_index; /* 0x10 */ + u32 pad1; + u32 cq_index; /* 0x18 */ + u32 pad2; + u32 enable; /* 0x20 */ + u32 pad3; + u32 running; /* 0x28 */ + u32 pad4; + u32 fetch_index; /* 0x30 */ + u32 pad5; + u32 error_interrupt_enable; /* 0x38 */ + u32 pad6; + u32 error_interrupt_offset; /* 0x40 */ + u32 pad7; + u32 error_status; /* 0x48 */ + u32 pad8; + u32 dropped_packet_count; /* 0x50 */ + u32 pad9; + u32 dropped_packet_count_rc; /* 0x58 */ + u32 pad10; +}; + +/* Break the vnic_rq_buf allocations into blocks of 32/64 entries */ +#define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32 +#define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64 +#define VNIC_RQ_BUF_BLK_ENTRIES(entries) \ + ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \ + VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES)) +#define VNIC_RQ_BUF_BLK_SZ(entries) \ + (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf)) +#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \ + DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries)) +#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096) + +struct vnic_rq_buf { + struct vnic_rq_buf *next; + dma_addr_t dma_addr; + void *os_buf; + unsigned int os_buf_index; + unsigned int len; + unsigned int index; + void *desc; + uint64_t wr_id; +}; + +struct vnic_rq { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX]; + struct vnic_rq_buf *to_use; + struct vnic_rq_buf *to_clean; + void *os_buf_head; + unsigned int pkts_outstanding; +#ifdef CONFIG_NET_RX_BUSY_POLL +#define ENIC_POLL_STATE_IDLE 0 +#define ENIC_POLL_STATE_NAPI (1 << 0) /* NAPI owns this poll */ +#define ENIC_POLL_STATE_POLL (1 << 1) /* poll owns this poll */ +#define ENIC_POLL_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this poll */ +#define ENIC_POLL_STATE_POLL_YIELD (1 << 3) /* poll yielded this poll */ +#define ENIC_POLL_YIELD (ENIC_POLL_STATE_NAPI_YIELD | \ + ENIC_POLL_STATE_POLL_YIELD) +#define ENIC_POLL_LOCKED (ENIC_POLL_STATE_NAPI | \ + ENIC_POLL_STATE_POLL) +#define ENIC_POLL_USER_PEND (ENIC_POLL_STATE_POLL | \ + ENIC_POLL_STATE_POLL_YIELD) + unsigned int bpoll_state; + spinlock_t bpoll_lock; +#endif /* CONFIG_NET_RX_BUSY_POLL */ +}; + +static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) +{ + /* how many does SW own? */ + return rq->ring.desc_avail; +} + +static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) +{ + /* how many does HW own? */ + return rq->ring.desc_count - rq->ring.desc_avail - 1; +} + +static inline void *vnic_rq_next_desc(struct vnic_rq *rq) +{ + return rq->to_use->desc; +} + +static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) +{ + return rq->to_use->index; +} + +static inline void vnic_rq_post(struct vnic_rq *rq, + void *os_buf, unsigned int os_buf_index, + dma_addr_t dma_addr, unsigned int len, + uint64_t wrid) +{ + struct vnic_rq_buf *buf = rq->to_use; + + buf->os_buf = os_buf; + buf->os_buf_index = os_buf_index; + buf->dma_addr = dma_addr; + buf->len = len; + buf->wr_id = wrid; + + buf = buf->next; + rq->to_use = buf; + rq->ring.desc_avail--; + + /* Move the posted_index every nth descriptor + */ + +#ifndef VNIC_RQ_RETURN_RATE +#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */ +#endif + + if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) { + /* Adding write memory barrier prevents compiler and/or CPU + * reordering, thus avoiding descriptor posting before + * descriptor is initialized. Otherwise, hardware can read + * stale descriptor fields. + */ + wmb(); + iowrite32(buf->index, &rq->ctrl->posted_index); + } +} + +static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) +{ + rq->ring.desc_avail += count; +} + +enum desc_return_options { + VNIC_RQ_RETURN_DESC, + VNIC_RQ_DEFER_RETURN_DESC, +}; + +static inline void vnic_rq_service(struct vnic_rq *rq, + struct cq_desc *cq_desc, u16 completed_index, + int desc_return, void (*buf_service)(struct vnic_rq *rq, + struct cq_desc *cq_desc, struct vnic_rq_buf *buf, + int skipped, void *opaque), void *opaque) +{ + struct vnic_rq_buf *buf; + int skipped; + + buf = rq->to_clean; + while (1) { + + skipped = (buf->index != completed_index); + + (*buf_service)(rq, cq_desc, buf, skipped, opaque); + + if (desc_return == VNIC_RQ_RETURN_DESC) + rq->ring.desc_avail++; + + rq->to_clean = buf->next; + + if (!skipped) + break; + + buf = rq->to_clean; + } +} + +static inline int vnic_rq_fill(struct vnic_rq *rq, + int (*buf_fill)(struct vnic_rq *rq)) +{ + int err; + + while (vnic_rq_desc_avail(rq) > 0) { + + err = (*buf_fill)(rq); + if (err) + return err; + } + + return 0; +} + +#ifdef CONFIG_NET_RX_BUSY_POLL +static inline void enic_busy_poll_init_lock(struct vnic_rq *rq) +{ + spin_lock_init(&rq->bpoll_lock); + rq->bpoll_state = ENIC_POLL_STATE_IDLE; +} + +static inline bool enic_poll_lock_napi(struct vnic_rq *rq) +{ + bool rc = true; + + spin_lock(&rq->bpoll_lock); + if (rq->bpoll_state & ENIC_POLL_LOCKED) { + WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI); + rq->bpoll_state |= ENIC_POLL_STATE_NAPI_YIELD; + rc = false; + } else { + rq->bpoll_state = ENIC_POLL_STATE_NAPI; + } + spin_unlock(&rq->bpoll_lock); + + return rc; +} + +static inline bool enic_poll_unlock_napi(struct vnic_rq *rq) +{ + bool rc = false; + + spin_lock(&rq->bpoll_lock); + WARN_ON(rq->bpoll_state & + (ENIC_POLL_STATE_POLL | ENIC_POLL_STATE_NAPI_YIELD)); + if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD) + rc = true; + rq->bpoll_state = ENIC_POLL_STATE_IDLE; + spin_unlock(&rq->bpoll_lock); + + return rc; +} + +static inline bool enic_poll_lock_poll(struct vnic_rq *rq) +{ + bool rc = true; + + spin_lock_bh(&rq->bpoll_lock); + if (rq->bpoll_state & ENIC_POLL_LOCKED) { + rq->bpoll_state |= ENIC_POLL_STATE_POLL_YIELD; + rc = false; + } else { + rq->bpoll_state |= ENIC_POLL_STATE_POLL; + } + spin_unlock_bh(&rq->bpoll_lock); + + return rc; +} + +static inline bool enic_poll_unlock_poll(struct vnic_rq *rq) +{ + bool rc = false; + + spin_lock_bh(&rq->bpoll_lock); + WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI); + if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD) + rc = true; + rq->bpoll_state = ENIC_POLL_STATE_IDLE; + spin_unlock_bh(&rq->bpoll_lock); + + return rc; +} + +static inline bool enic_poll_busy_polling(struct vnic_rq *rq) +{ + WARN_ON(!(rq->bpoll_state & ENIC_POLL_LOCKED)); + return rq->bpoll_state & ENIC_POLL_USER_PEND; +} + +#else + +static inline void enic_busy_poll_init_lock(struct vnic_rq *rq) +{ +} + +static inline bool enic_poll_lock_napi(struct vnic_rq *rq) +{ + return true; +} + +static inline bool enic_poll_unlock_napi(struct vnic_rq *rq) +{ + return false; +} + +static inline bool enic_poll_lock_poll(struct vnic_rq *rq) +{ + return false; +} + +static inline bool enic_poll_unlock_poll(struct vnic_rq *rq) +{ + return false; +} + +static inline bool enic_poll_ll_polling(struct vnic_rq *rq) +{ + return false; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +void vnic_rq_free(struct vnic_rq *rq); +int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, + unsigned int desc_count, unsigned int desc_size); +void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); +unsigned int vnic_rq_error_status(struct vnic_rq *rq); +void vnic_rq_enable(struct vnic_rq *rq); +int vnic_rq_disable(struct vnic_rq *rq); +void vnic_rq_clean(struct vnic_rq *rq, + void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)); + +#endif /* _VNIC_RQ_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_rss.h b/drivers/net/ethernet/cisco/enic/vnic_rss.h new file mode 100644 index 000000000..881fa1854 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/vnic_rss.h @@ -0,0 +1,45 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _VNIC_RSS_H_ +#define _VNIC_RSS_H_ + +/* RSS key array */ + +#define ENIC_RSS_BYTES_PER_KEY 10 +#define ENIC_RSS_KEYS 4 +#define ENIC_RSS_LEN (ENIC_RSS_BYTES_PER_KEY * ENIC_RSS_KEYS) + +union vnic_rss_key { + struct { + u8 b[ENIC_RSS_BYTES_PER_KEY]; + u8 b_pad[6]; + } key[ENIC_RSS_KEYS]; + u64 raw[8]; +}; + +/* RSS cpu array */ +union vnic_rss_cpu { + struct { + u8 b[4] ; + u8 b_pad[4]; + } cpu[32]; + u64 raw[32]; +}; + +#endif /* _VNIC_RSS_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_stats.h b/drivers/net/ethernet/cisco/enic/vnic_stats.h new file mode 100644 index 000000000..74c81ed6f --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/vnic_stats.h @@ -0,0 +1,75 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _VNIC_STATS_H_ +#define _VNIC_STATS_H_ + +/* Tx statistics */ +struct vnic_tx_stats { + u64 tx_frames_ok; + u64 tx_unicast_frames_ok; + u64 tx_multicast_frames_ok; + u64 tx_broadcast_frames_ok; + u64 tx_bytes_ok; + u64 tx_unicast_bytes_ok; + u64 tx_multicast_bytes_ok; + u64 tx_broadcast_bytes_ok; + u64 tx_drops; + u64 tx_errors; + u64 tx_tso; + u64 rsvd[16]; +}; + +/* Rx statistics */ +struct vnic_rx_stats { + u64 rx_frames_ok; + u64 rx_frames_total; + u64 rx_unicast_frames_ok; + u64 rx_multicast_frames_ok; + u64 rx_broadcast_frames_ok; + u64 rx_bytes_ok; + u64 rx_unicast_bytes_ok; + u64 rx_multicast_bytes_ok; + u64 rx_broadcast_bytes_ok; + u64 rx_drop; + u64 rx_no_bufs; + u64 rx_errors; + u64 rx_rss; + u64 rx_crc_errors; + u64 rx_frames_64; + u64 rx_frames_127; + u64 rx_frames_255; + u64 rx_frames_511; + u64 rx_frames_1023; + u64 rx_frames_1518; + u64 rx_frames_to_max; + u64 rsvd[16]; +}; + +/* Generic statistics */ +struct vnic_gen_stats { + u64 dma_map_error; +}; + +struct vnic_stats { + struct vnic_tx_stats tx; + struct vnic_rx_stats rx; +}; + +#endif /* _VNIC_STATS_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.c b/drivers/net/ethernet/cisco/enic/vnic_vic.c new file mode 100644 index 000000000..24ef8cd40 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/vnic_vic.c @@ -0,0 +1,79 @@ +/* + * Copyright 2010 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include +#include + +#include "vnic_vic.h" + +struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, const u8 *oui, + const u8 type) +{ + struct vic_provinfo *vp; + + if (!oui) + return NULL; + + vp = kzalloc(VIC_PROVINFO_MAX_DATA, flags); + if (!vp) + return NULL; + + memcpy(vp->oui, oui, sizeof(vp->oui)); + vp->type = type; + vp->length = htonl(sizeof(vp->num_tlvs)); + + return vp; +} + +void vic_provinfo_free(struct vic_provinfo *vp) +{ + kfree(vp); +} + +int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length, + const void *value) +{ + struct vic_provinfo_tlv *tlv; + + if (!vp || !value) + return -EINVAL; + + if (ntohl(vp->length) + offsetof(struct vic_provinfo_tlv, value) + + length > VIC_PROVINFO_MAX_TLV_DATA) + return -ENOMEM; + + tlv = (struct vic_provinfo_tlv *)((u8 *)vp->tlv + + ntohl(vp->length) - sizeof(vp->num_tlvs)); + + tlv->type = htons(type); + tlv->length = htons(length); + memcpy(tlv->value, value, length); + + vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1); + vp->length = htonl(ntohl(vp->length) + + offsetof(struct vic_provinfo_tlv, value) + length); + + return 0; +} + +size_t vic_provinfo_size(struct vic_provinfo *vp) +{ + return vp ? ntohl(vp->length) + sizeof(*vp) - sizeof(vp->num_tlvs) : 0; +} diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.h b/drivers/net/ethernet/cisco/enic/vnic_vic.h new file mode 100644 index 000000000..9ef81f148 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/vnic_vic.h @@ -0,0 +1,83 @@ +/* + * Copyright 2010 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _VNIC_VIC_H_ +#define _VNIC_VIC_H_ + +/* Note: All integer fields in NETWORK byte order */ + +/* Note: String field lengths include null char */ + +#define VIC_PROVINFO_CISCO_OUI { 0x00, 0x00, 0x0c } +#define VIC_PROVINFO_GENERIC_TYPE 0x4 + +enum vic_generic_prov_tlv_type { + VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR = 0, + VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR = 1, + VIC_GENERIC_PROV_TLV_CLIENT_NAME_STR = 2, + VIC_GENERIC_PROV_TLV_CLUSTER_PORT_NAME_STR = 3, + VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR = 4, + VIC_GENERIC_PROV_TLV_CLUSTER_UUID_STR = 5, + VIC_GENERIC_PROV_TLV_CLUSTER_NAME_STR = 7, + VIC_GENERIC_PROV_TLV_HOST_UUID_STR = 8, + VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR = 9, + VIC_GENERIC_PROV_TLV_INCARNATION_NUMBER = 10, + VIC_GENERIC_PROV_TLV_OS_TYPE = 11, + VIC_GENERIC_PROV_TLV_OS_VENDOR = 12, + VIC_GENERIC_PROV_TLV_CLIENT_TYPE = 15, +}; + +enum vic_generic_prov_os_type { + VIC_GENERIC_PROV_OS_TYPE_UNKNOWN = 0, + VIC_GENERIC_PROV_OS_TYPE_ESX = 1, + VIC_GENERIC_PROV_OS_TYPE_LINUX = 2, + VIC_GENERIC_PROV_OS_TYPE_WINDOWS = 3, + VIC_GENERIC_PROV_OS_TYPE_SOLARIS = 4, +}; + +struct vic_provinfo { + u8 oui[3]; /* OUI of data provider */ + u8 type; /* provider-specific type */ + u32 length; /* length of data below */ + u32 num_tlvs; /* number of tlvs */ + struct vic_provinfo_tlv { + u16 type; + u16 length; + u8 value[0]; + } tlv[0]; +} __packed; + +#define VIC_PROVINFO_ADD_TLV(vp, tlvtype, tlvlen, data) \ + do { \ + err = vic_provinfo_add_tlv(vp, tlvtype, tlvlen, data); \ + if (err) \ + goto add_tlv_failure; \ + } while (0) + +#define VIC_PROVINFO_MAX_DATA 1385 +#define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \ + sizeof(struct vic_provinfo)) + +struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, const u8 *oui, + const u8 type); +void vic_provinfo_free(struct vic_provinfo *vp); +int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length, + const void *value); +size_t vic_provinfo_size(struct vic_provinfo *vp); + +#endif /* _VNIC_VIC_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c new file mode 100644 index 000000000..b5a1c937f --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c @@ -0,0 +1,198 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include + +#include "vnic_dev.h" +#include "vnic_wq.h" + +static int vnic_wq_alloc_bufs(struct vnic_wq *wq) +{ + struct vnic_wq_buf *buf; + unsigned int i, j, count = wq->ring.desc_count; + unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count); + + for (i = 0; i < blks; i++) { + wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC); + if (!wq->bufs[i]) + return -ENOMEM; + } + + for (i = 0; i < blks; i++) { + buf = wq->bufs[i]; + for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) { + buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j; + buf->desc = (u8 *)wq->ring.descs + + wq->ring.desc_size * buf->index; + if (buf->index + 1 == count) { + buf->next = wq->bufs[0]; + buf->next->prev = buf; + break; + } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) { + buf->next = wq->bufs[i + 1]; + buf->next->prev = buf; + } else { + buf->next = buf + 1; + buf->next->prev = buf; + buf++; + } + } + } + + wq->to_use = wq->to_clean = wq->bufs[0]; + + return 0; +} + +void vnic_wq_free(struct vnic_wq *wq) +{ + struct vnic_dev *vdev; + unsigned int i; + + vdev = wq->vdev; + + vnic_dev_free_desc_ring(vdev, &wq->ring); + + for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) { + if (wq->bufs[i]) { + kfree(wq->bufs[i]); + wq->bufs[i] = NULL; + } + } + + wq->ctrl = NULL; +} + +int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + + wq->index = index; + wq->vdev = vdev; + + wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); + if (!wq->ctrl) { + pr_err("Failed to hook WQ[%d] resource\n", index); + return -EINVAL; + } + + vnic_wq_disable(wq); + + err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); + if (err) + return err; + + err = vnic_wq_alloc_bufs(wq); + if (err) { + vnic_wq_free(wq); + return err; + } + + return 0; +} + +static void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + u64 paddr; + unsigned int count = wq->ring.desc_count; + + paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &wq->ctrl->ring_base); + iowrite32(count, &wq->ctrl->ring_size); + iowrite32(fetch_index, &wq->ctrl->fetch_index); + iowrite32(posted_index, &wq->ctrl->posted_index); + iowrite32(cq_index, &wq->ctrl->cq_index); + iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); + iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); + iowrite32(0, &wq->ctrl->error_status); + + wq->to_use = wq->to_clean = + &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)] + [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)]; +} + +void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + vnic_wq_init_start(wq, cq_index, 0, 0, + error_interrupt_enable, + error_interrupt_offset); +} + +unsigned int vnic_wq_error_status(struct vnic_wq *wq) +{ + return ioread32(&wq->ctrl->error_status); +} + +void vnic_wq_enable(struct vnic_wq *wq) +{ + iowrite32(1, &wq->ctrl->enable); +} + +int vnic_wq_disable(struct vnic_wq *wq) +{ + unsigned int wait; + + iowrite32(0, &wq->ctrl->enable); + + /* Wait for HW to ACK disable request */ + for (wait = 0; wait < 1000; wait++) { + if (!(ioread32(&wq->ctrl->running))) + return 0; + udelay(10); + } + + pr_err("Failed to disable WQ[%d]\n", wq->index); + + return -ETIMEDOUT; +} + +void vnic_wq_clean(struct vnic_wq *wq, + void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) +{ + struct vnic_wq_buf *buf; + + buf = wq->to_clean; + + while (vnic_wq_desc_used(wq) > 0) { + + (*buf_clean)(wq, buf); + + buf = wq->to_clean = buf->next; + wq->ring.desc_avail++; + } + + wq->to_use = wq->to_clean = wq->bufs[0]; + + iowrite32(0, &wq->ctrl->fetch_index); + iowrite32(0, &wq->ctrl->posted_index); + iowrite32(0, &wq->ctrl->error_status); + + vnic_dev_clear_desc_ring(&wq->ring); +} diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h new file mode 100644 index 000000000..296154351 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h @@ -0,0 +1,178 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _VNIC_WQ_H_ +#define _VNIC_WQ_H_ + +#include + +#include "vnic_dev.h" +#include "vnic_cq.h" + +/* Work queue control */ +struct vnic_wq_ctrl { + u64 ring_base; /* 0x00 */ + u32 ring_size; /* 0x08 */ + u32 pad0; + u32 posted_index; /* 0x10 */ + u32 pad1; + u32 cq_index; /* 0x18 */ + u32 pad2; + u32 enable; /* 0x20 */ + u32 pad3; + u32 running; /* 0x28 */ + u32 pad4; + u32 fetch_index; /* 0x30 */ + u32 pad5; + u32 dca_value; /* 0x38 */ + u32 pad6; + u32 error_interrupt_enable; /* 0x40 */ + u32 pad7; + u32 error_interrupt_offset; /* 0x48 */ + u32 pad8; + u32 error_status; /* 0x50 */ + u32 pad9; +}; + +struct vnic_wq_buf { + struct vnic_wq_buf *next; + dma_addr_t dma_addr; + void *os_buf; + unsigned int len; + unsigned int index; + int sop; + void *desc; + uint64_t wr_id; /* Cookie */ + uint8_t cq_entry; /* Gets completion event from hw */ + uint8_t desc_skip_cnt; /* Num descs to occupy */ + uint8_t compressed_send; /* Both hdr and payload in one desc */ + struct vnic_wq_buf *prev; +}; + +/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */ +#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32 +#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64 +#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \ + ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \ + VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES)) +#define VNIC_WQ_BUF_BLK_SZ(entries) \ + (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf)) +#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ + DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries)) +#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) + +struct vnic_wq { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX]; + struct vnic_wq_buf *to_use; + struct vnic_wq_buf *to_clean; + unsigned int pkts_outstanding; +}; + +static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) +{ + /* how many does SW own? */ + return wq->ring.desc_avail; +} + +static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) +{ + /* how many does HW own? */ + return wq->ring.desc_count - wq->ring.desc_avail - 1; +} + +static inline void *vnic_wq_next_desc(struct vnic_wq *wq) +{ + return wq->to_use->desc; +} + +static inline void vnic_wq_doorbell(struct vnic_wq *wq) +{ + /* Adding write memory barrier prevents compiler and/or CPU + * reordering, thus avoiding descriptor posting before + * descriptor is initialized. Otherwise, hardware can read + * stale descriptor fields. + */ + wmb(); + iowrite32(wq->to_use->index, &wq->ctrl->posted_index); +} + +static inline void vnic_wq_post(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, + unsigned int len, int sop, int eop, + uint8_t desc_skip_cnt, uint8_t cq_entry, + uint8_t compressed_send, uint64_t wrid) +{ + struct vnic_wq_buf *buf = wq->to_use; + + buf->sop = sop; + buf->cq_entry = cq_entry; + buf->compressed_send = compressed_send; + buf->desc_skip_cnt = desc_skip_cnt; + buf->os_buf = eop ? os_buf : NULL; + buf->dma_addr = dma_addr; + buf->len = len; + buf->wr_id = wrid; + + buf = buf->next; + wq->to_use = buf; + + wq->ring.desc_avail -= desc_skip_cnt; +} + +static inline void vnic_wq_service(struct vnic_wq *wq, + struct cq_desc *cq_desc, u16 completed_index, + void (*buf_service)(struct vnic_wq *wq, + struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), + void *opaque) +{ + struct vnic_wq_buf *buf; + + buf = wq->to_clean; + while (1) { + + (*buf_service)(wq, cq_desc, buf, opaque); + + wq->ring.desc_avail++; + + wq->to_clean = buf->next; + + if (buf->index == completed_index) + break; + + buf = wq->to_clean; + } +} + +void vnic_wq_free(struct vnic_wq *wq); +int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, + unsigned int desc_count, unsigned int desc_size); +void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); +unsigned int vnic_wq_error_status(struct vnic_wq *wq); +void vnic_wq_enable(struct vnic_wq *wq); +int vnic_wq_disable(struct vnic_wq *wq); +void vnic_wq_clean(struct vnic_wq *wq, + void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); + +#endif /* _VNIC_WQ_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/wq_enet_desc.h b/drivers/net/ethernet/cisco/enic/wq_enet_desc.h new file mode 100644 index 000000000..c7021e3a6 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/wq_enet_desc.h @@ -0,0 +1,98 @@ +/* + * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _WQ_ENET_DESC_H_ +#define _WQ_ENET_DESC_H_ + +/* Ethernet work queue descriptor: 16B */ +struct wq_enet_desc { + __le64 address; + __le16 length; + __le16 mss_loopback; + __le16 header_length_flags; + __le16 vlan_tag; +}; + +#define WQ_ENET_ADDR_BITS 64 +#define WQ_ENET_LEN_BITS 14 +#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1) +#define WQ_ENET_MSS_BITS 14 +#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1) +#define WQ_ENET_MSS_SHIFT 2 +#define WQ_ENET_LOOPBACK_SHIFT 1 +#define WQ_ENET_HDRLEN_BITS 10 +#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1) +#define WQ_ENET_FLAGS_OM_BITS 2 +#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1) +#define WQ_ENET_FLAGS_EOP_SHIFT 12 +#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13 +#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14 +#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15 + +#define WQ_ENET_OFFLOAD_MODE_CSUM 0 +#define WQ_ENET_OFFLOAD_MODE_RESERVED 1 +#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2 +#define WQ_ENET_OFFLOAD_MODE_TSO 3 + +static inline void wq_enet_desc_enc(struct wq_enet_desc *desc, + u64 address, u16 length, u16 mss, u16 header_length, + u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap, + u8 vlan_tag_insert, u16 vlan_tag, u8 loopback) +{ + desc->address = cpu_to_le64(address); + desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK); + desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << + WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT); + desc->header_length_flags = cpu_to_le16( + (header_length & WQ_ENET_HDRLEN_MASK) | + (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS | + (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT | + (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT | + (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT | + (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT); + desc->vlan_tag = cpu_to_le16(vlan_tag); +} + +static inline void wq_enet_desc_dec(struct wq_enet_desc *desc, + u64 *address, u16 *length, u16 *mss, u16 *header_length, + u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap, + u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback) +{ + *address = le64_to_cpu(desc->address); + *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK; + *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & + WQ_ENET_MSS_MASK; + *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >> + WQ_ENET_LOOPBACK_SHIFT) & 1); + *header_length = le16_to_cpu(desc->header_length_flags) & + WQ_ENET_HDRLEN_MASK; + *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK); + *eop = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_EOP_SHIFT) & 1); + *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1); + *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1); + *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1); + *vlan_tag = le16_to_cpu(desc->vlan_tag); +} + +#endif /* _WQ_ENET_DESC_H_ */ diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig new file mode 100644 index 000000000..7ec2d74f9 --- /dev/null +++ b/drivers/net/ethernet/davicom/Kconfig @@ -0,0 +1,23 @@ +# +# Davicom device configuration +# + +config DM9000 + tristate "DM9000 support" + depends on ARM || BLACKFIN || MIPS || COLDFIRE || NIOS2 + select CRC32 + select MII + ---help--- + Support for DM9000 chipset. + + To compile this driver as a module, choose M here. The module + will be called dm9000. + +config DM9000_FORCE_SIMPLE_PHY_POLL + bool "Force simple NSR based PHY polling" + depends on DM9000 + ---help--- + This configuration forces the DM9000 to use the NSR's LinkStatus + bit to determine if the link is up or down instead of the more + costly MII PHY reads. Note, this will not work if the chip is + operating with an external PHY. diff --git a/drivers/net/ethernet/davicom/Makefile b/drivers/net/ethernet/davicom/Makefile new file mode 100644 index 000000000..74b31f0eb --- /dev/null +++ b/drivers/net/ethernet/davicom/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Davicom device drivers. +# + +obj-$(CONFIG_DM9000) += dm9000.o diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c new file mode 100644 index 000000000..c0a781360 --- /dev/null +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -0,0 +1,1804 @@ +/* + * Davicom DM9000 Fast Ethernet driver for Linux. + * Copyright (C) 1997 Sten Wang + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved. + * + * Additional updates, Copyright: + * Ben Dooks + * Sascha Hauer + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "dm9000.h" + +/* Board/System/Debug information/definition ---------------- */ + +#define DM9000_PHY 0x40 /* PHY address 0x01 */ + +#define CARDNAME "dm9000" +#define DRV_VERSION "1.31" + +/* + * Transmit timeout, default 5 seconds. + */ +static int watchdog = 5000; +module_param(watchdog, int, 0400); +MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); + +/* + * Debug messages level + */ +static int debug; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "dm9000 debug level (0-4)"); + +/* DM9000 register address locking. + * + * The DM9000 uses an address register to control where data written + * to the data register goes. This means that the address register + * must be preserved over interrupts or similar calls. + * + * During interrupt and other critical calls, a spinlock is used to + * protect the system, but the calls themselves save the address + * in the address register in case they are interrupting another + * access to the device. + * + * For general accesses a lock is provided so that calls which are + * allowed to sleep are serialised so that the address register does + * not need to be saved. This lock also serves to serialise access + * to the EEPROM and PHY access registers which are shared between + * these two devices. + */ + +/* The driver supports the original DM9000E, and now the two newer + * devices, DM9000A and DM9000B. + */ + +enum dm9000_type { + TYPE_DM9000E, /* original DM9000 */ + TYPE_DM9000A, + TYPE_DM9000B +}; + +/* Structure/enum declaration ------------------------------- */ +struct board_info { + + void __iomem *io_addr; /* Register I/O base address */ + void __iomem *io_data; /* Data I/O address */ + u16 irq; /* IRQ */ + + u16 tx_pkt_cnt; + u16 queue_pkt_len; + u16 queue_start_addr; + u16 queue_ip_summed; + u16 dbug_cnt; + u8 io_mode; /* 0:word, 2:byte */ + u8 phy_addr; + u8 imr_all; + + unsigned int flags; + unsigned int in_timeout:1; + unsigned int in_suspend:1; + unsigned int wake_supported:1; + + enum dm9000_type type; + + void (*inblk)(void __iomem *port, void *data, int length); + void (*outblk)(void __iomem *port, void *data, int length); + void (*dumpblk)(void __iomem *port, int length); + + struct device *dev; /* parent device */ + + struct resource *addr_res; /* resources found */ + struct resource *data_res; + struct resource *addr_req; /* resources requested */ + struct resource *data_req; + struct resource *irq_res; + + int irq_wake; + + struct mutex addr_lock; /* phy and eeprom access lock */ + + struct delayed_work phy_poll; + struct net_device *ndev; + + spinlock_t lock; + + struct mii_if_info mii; + u32 msg_enable; + u32 wake_state; + + int ip_summed; +}; + +/* debug code */ + +#define dm9000_dbg(db, lev, msg...) do { \ + if ((lev) < debug) { \ + dev_dbg(db->dev, msg); \ + } \ +} while (0) + +static inline struct board_info *to_dm9000_board(struct net_device *dev) +{ + return netdev_priv(dev); +} + +/* DM9000 network board routine ---------------------------- */ + +/* + * Read a byte from I/O port + */ +static u8 +ior(struct board_info *db, int reg) +{ + writeb(reg, db->io_addr); + return readb(db->io_data); +} + +/* + * Write a byte to I/O port + */ + +static void +iow(struct board_info *db, int reg, int value) +{ + writeb(reg, db->io_addr); + writeb(value, db->io_data); +} + +static void +dm9000_reset(struct board_info *db) +{ + dev_dbg(db->dev, "resetting device\n"); + + /* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29 + * The essential point is that we have to do a double reset, and the + * instruction is to set LBK into MAC internal loopback mode. + */ + iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK); + udelay(100); /* Application note says at least 20 us */ + if (ior(db, DM9000_NCR) & 1) + dev_err(db->dev, "dm9000 did not respond to first reset\n"); + + iow(db, DM9000_NCR, 0); + iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK); + udelay(100); + if (ior(db, DM9000_NCR) & 1) + dev_err(db->dev, "dm9000 did not respond to second reset\n"); +} + +/* routines for sending block to chip */ + +static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count) +{ + iowrite8_rep(reg, data, count); +} + +static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count) +{ + iowrite16_rep(reg, data, (count+1) >> 1); +} + +static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count) +{ + iowrite32_rep(reg, data, (count+3) >> 2); +} + +/* input block from chip to memory */ + +static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count) +{ + ioread8_rep(reg, data, count); +} + + +static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count) +{ + ioread16_rep(reg, data, (count+1) >> 1); +} + +static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count) +{ + ioread32_rep(reg, data, (count+3) >> 2); +} + +/* dump block from chip to null */ + +static void dm9000_dumpblk_8bit(void __iomem *reg, int count) +{ + int i; + int tmp; + + for (i = 0; i < count; i++) + tmp = readb(reg); +} + +static void dm9000_dumpblk_16bit(void __iomem *reg, int count) +{ + int i; + int tmp; + + count = (count + 1) >> 1; + + for (i = 0; i < count; i++) + tmp = readw(reg); +} + +static void dm9000_dumpblk_32bit(void __iomem *reg, int count) +{ + int i; + int tmp; + + count = (count + 3) >> 2; + + for (i = 0; i < count; i++) + tmp = readl(reg); +} + +/* + * Sleep, either by using msleep() or if we are suspending, then + * use mdelay() to sleep. + */ +static void dm9000_msleep(struct board_info *db, unsigned int ms) +{ + if (db->in_suspend || db->in_timeout) + mdelay(ms); + else + msleep(ms); +} + +/* Read a word from phyxcer */ +static int +dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) +{ + struct board_info *db = netdev_priv(dev); + unsigned long flags; + unsigned int reg_save; + int ret; + + mutex_lock(&db->addr_lock); + + spin_lock_irqsave(&db->lock, flags); + + /* Save previous register address */ + reg_save = readb(db->io_addr); + + /* Fill the phyxcer register into REG_0C */ + iow(db, DM9000_EPAR, DM9000_PHY | reg); + + /* Issue phyxcer read command */ + iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); + + writeb(reg_save, db->io_addr); + spin_unlock_irqrestore(&db->lock, flags); + + dm9000_msleep(db, 1); /* Wait read complete */ + + spin_lock_irqsave(&db->lock, flags); + reg_save = readb(db->io_addr); + + iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ + + /* The read data keeps on REG_0D & REG_0E */ + ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); + + /* restore the previous address */ + writeb(reg_save, db->io_addr); + spin_unlock_irqrestore(&db->lock, flags); + + mutex_unlock(&db->addr_lock); + + dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); + return ret; +} + +/* Write a word to phyxcer */ +static void +dm9000_phy_write(struct net_device *dev, + int phyaddr_unused, int reg, int value) +{ + struct board_info *db = netdev_priv(dev); + unsigned long flags; + unsigned long reg_save; + + dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); + if (!db->in_timeout) + mutex_lock(&db->addr_lock); + + spin_lock_irqsave(&db->lock, flags); + + /* Save previous register address */ + reg_save = readb(db->io_addr); + + /* Fill the phyxcer register into REG_0C */ + iow(db, DM9000_EPAR, DM9000_PHY | reg); + + /* Fill the written data into REG_0D & REG_0E */ + iow(db, DM9000_EPDRL, value); + iow(db, DM9000_EPDRH, value >> 8); + + /* Issue phyxcer write command */ + iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); + + writeb(reg_save, db->io_addr); + spin_unlock_irqrestore(&db->lock, flags); + + dm9000_msleep(db, 1); /* Wait write complete */ + + spin_lock_irqsave(&db->lock, flags); + reg_save = readb(db->io_addr); + + iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ + + /* restore the previous address */ + writeb(reg_save, db->io_addr); + + spin_unlock_irqrestore(&db->lock, flags); + if (!db->in_timeout) + mutex_unlock(&db->addr_lock); +} + +/* dm9000_set_io + * + * select the specified set of io routines to use with the + * device + */ + +static void dm9000_set_io(struct board_info *db, int byte_width) +{ + /* use the size of the data resource to work out what IO + * routines we want to use + */ + + switch (byte_width) { + case 1: + db->dumpblk = dm9000_dumpblk_8bit; + db->outblk = dm9000_outblk_8bit; + db->inblk = dm9000_inblk_8bit; + break; + + + case 3: + dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n"); + case 2: + db->dumpblk = dm9000_dumpblk_16bit; + db->outblk = dm9000_outblk_16bit; + db->inblk = dm9000_inblk_16bit; + break; + + case 4: + default: + db->dumpblk = dm9000_dumpblk_32bit; + db->outblk = dm9000_outblk_32bit; + db->inblk = dm9000_inblk_32bit; + break; + } +} + +static void dm9000_schedule_poll(struct board_info *db) +{ + if (db->type == TYPE_DM9000E) + schedule_delayed_work(&db->phy_poll, HZ * 2); +} + +static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd) +{ + struct board_info *dm = to_dm9000_board(dev); + + if (!netif_running(dev)) + return -EINVAL; + + return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL); +} + +static unsigned int +dm9000_read_locked(struct board_info *db, int reg) +{ + unsigned long flags; + unsigned int ret; + + spin_lock_irqsave(&db->lock, flags); + ret = ior(db, reg); + spin_unlock_irqrestore(&db->lock, flags); + + return ret; +} + +static int dm9000_wait_eeprom(struct board_info *db) +{ + unsigned int status; + int timeout = 8; /* wait max 8msec */ + + /* The DM9000 data sheets say we should be able to + * poll the ERRE bit in EPCR to wait for the EEPROM + * operation. From testing several chips, this bit + * does not seem to work. + * + * We attempt to use the bit, but fall back to the + * timeout (which is why we do not return an error + * on expiry) to say that the EEPROM operation has + * completed. + */ + + while (1) { + status = dm9000_read_locked(db, DM9000_EPCR); + + if ((status & EPCR_ERRE) == 0) + break; + + msleep(1); + + if (timeout-- < 0) { + dev_dbg(db->dev, "timeout waiting EEPROM\n"); + break; + } + } + + return 0; +} + +/* + * Read a word data from EEPROM + */ +static void +dm9000_read_eeprom(struct board_info *db, int offset, u8 *to) +{ + unsigned long flags; + + if (db->flags & DM9000_PLATF_NO_EEPROM) { + to[0] = 0xff; + to[1] = 0xff; + return; + } + + mutex_lock(&db->addr_lock); + + spin_lock_irqsave(&db->lock, flags); + + iow(db, DM9000_EPAR, offset); + iow(db, DM9000_EPCR, EPCR_ERPRR); + + spin_unlock_irqrestore(&db->lock, flags); + + dm9000_wait_eeprom(db); + + /* delay for at-least 150uS */ + msleep(1); + + spin_lock_irqsave(&db->lock, flags); + + iow(db, DM9000_EPCR, 0x0); + + to[0] = ior(db, DM9000_EPDRL); + to[1] = ior(db, DM9000_EPDRH); + + spin_unlock_irqrestore(&db->lock, flags); + + mutex_unlock(&db->addr_lock); +} + +/* + * Write a word data to SROM + */ +static void +dm9000_write_eeprom(struct board_info *db, int offset, u8 *data) +{ + unsigned long flags; + + if (db->flags & DM9000_PLATF_NO_EEPROM) + return; + + mutex_lock(&db->addr_lock); + + spin_lock_irqsave(&db->lock, flags); + iow(db, DM9000_EPAR, offset); + iow(db, DM9000_EPDRH, data[1]); + iow(db, DM9000_EPDRL, data[0]); + iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW); + spin_unlock_irqrestore(&db->lock, flags); + + dm9000_wait_eeprom(db); + + mdelay(1); /* wait at least 150uS to clear */ + + spin_lock_irqsave(&db->lock, flags); + iow(db, DM9000_EPCR, 0); + spin_unlock_irqrestore(&db->lock, flags); + + mutex_unlock(&db->addr_lock); +} + +/* ethtool ops */ + +static void dm9000_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct board_info *dm = to_dm9000_board(dev); + + strlcpy(info->driver, CARDNAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, to_platform_device(dm->dev)->name, + sizeof(info->bus_info)); +} + +static u32 dm9000_get_msglevel(struct net_device *dev) +{ + struct board_info *dm = to_dm9000_board(dev); + + return dm->msg_enable; +} + +static void dm9000_set_msglevel(struct net_device *dev, u32 value) +{ + struct board_info *dm = to_dm9000_board(dev); + + dm->msg_enable = value; +} + +static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct board_info *dm = to_dm9000_board(dev); + + mii_ethtool_gset(&dm->mii, cmd); + return 0; +} + +static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct board_info *dm = to_dm9000_board(dev); + + return mii_ethtool_sset(&dm->mii, cmd); +} + +static int dm9000_nway_reset(struct net_device *dev) +{ + struct board_info *dm = to_dm9000_board(dev); + return mii_nway_restart(&dm->mii); +} + +static int dm9000_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct board_info *dm = to_dm9000_board(dev); + netdev_features_t changed = dev->features ^ features; + unsigned long flags; + + if (!(changed & NETIF_F_RXCSUM)) + return 0; + + spin_lock_irqsave(&dm->lock, flags); + iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0); + spin_unlock_irqrestore(&dm->lock, flags); + + return 0; +} + +static u32 dm9000_get_link(struct net_device *dev) +{ + struct board_info *dm = to_dm9000_board(dev); + u32 ret; + + if (dm->flags & DM9000_PLATF_EXT_PHY) + ret = mii_link_ok(&dm->mii); + else + ret = dm9000_read_locked(dm, DM9000_NSR) & NSR_LINKST ? 1 : 0; + + return ret; +} + +#define DM_EEPROM_MAGIC (0x444D394B) + +static int dm9000_get_eeprom_len(struct net_device *dev) +{ + return 128; +} + +static int dm9000_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct board_info *dm = to_dm9000_board(dev); + int offset = ee->offset; + int len = ee->len; + int i; + + /* EEPROM access is aligned to two bytes */ + + if ((len & 1) != 0 || (offset & 1) != 0) + return -EINVAL; + + if (dm->flags & DM9000_PLATF_NO_EEPROM) + return -ENOENT; + + ee->magic = DM_EEPROM_MAGIC; + + for (i = 0; i < len; i += 2) + dm9000_read_eeprom(dm, (offset + i) / 2, data + i); + + return 0; +} + +static int dm9000_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct board_info *dm = to_dm9000_board(dev); + int offset = ee->offset; + int len = ee->len; + int done; + + /* EEPROM access is aligned to two bytes */ + + if (dm->flags & DM9000_PLATF_NO_EEPROM) + return -ENOENT; + + if (ee->magic != DM_EEPROM_MAGIC) + return -EINVAL; + + while (len > 0) { + if (len & 1 || offset & 1) { + int which = offset & 1; + u8 tmp[2]; + + dm9000_read_eeprom(dm, offset / 2, tmp); + tmp[which] = *data; + dm9000_write_eeprom(dm, offset / 2, tmp); + + done = 1; + } else { + dm9000_write_eeprom(dm, offset / 2, data); + done = 2; + } + + data += done; + offset += done; + len -= done; + } + + return 0; +} + +static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) +{ + struct board_info *dm = to_dm9000_board(dev); + + memset(w, 0, sizeof(struct ethtool_wolinfo)); + + /* note, we could probably support wake-phy too */ + w->supported = dm->wake_supported ? WAKE_MAGIC : 0; + w->wolopts = dm->wake_state; +} + +static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) +{ + struct board_info *dm = to_dm9000_board(dev); + unsigned long flags; + u32 opts = w->wolopts; + u32 wcr = 0; + + if (!dm->wake_supported) + return -EOPNOTSUPP; + + if (opts & ~WAKE_MAGIC) + return -EINVAL; + + if (opts & WAKE_MAGIC) + wcr |= WCR_MAGICEN; + + mutex_lock(&dm->addr_lock); + + spin_lock_irqsave(&dm->lock, flags); + iow(dm, DM9000_WCR, wcr); + spin_unlock_irqrestore(&dm->lock, flags); + + mutex_unlock(&dm->addr_lock); + + if (dm->wake_state != opts) { + /* change in wol state, update IRQ state */ + + if (!dm->wake_state) + irq_set_irq_wake(dm->irq_wake, 1); + else if (dm->wake_state && !opts) + irq_set_irq_wake(dm->irq_wake, 0); + } + + dm->wake_state = opts; + return 0; +} + +static const struct ethtool_ops dm9000_ethtool_ops = { + .get_drvinfo = dm9000_get_drvinfo, + .get_settings = dm9000_get_settings, + .set_settings = dm9000_set_settings, + .get_msglevel = dm9000_get_msglevel, + .set_msglevel = dm9000_set_msglevel, + .nway_reset = dm9000_nway_reset, + .get_link = dm9000_get_link, + .get_wol = dm9000_get_wol, + .set_wol = dm9000_set_wol, + .get_eeprom_len = dm9000_get_eeprom_len, + .get_eeprom = dm9000_get_eeprom, + .set_eeprom = dm9000_set_eeprom, +}; + +static void dm9000_show_carrier(struct board_info *db, + unsigned carrier, unsigned nsr) +{ + int lpa; + struct net_device *ndev = db->ndev; + struct mii_if_info *mii = &db->mii; + unsigned ncr = dm9000_read_locked(db, DM9000_NCR); + + if (carrier) { + lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA); + dev_info(db->dev, + "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n", + ndev->name, (nsr & NSR_SPEED) ? 10 : 100, + (ncr & NCR_FDX) ? "full" : "half", lpa); + } else { + dev_info(db->dev, "%s: link down\n", ndev->name); + } +} + +static void +dm9000_poll_work(struct work_struct *w) +{ + struct delayed_work *dw = to_delayed_work(w); + struct board_info *db = container_of(dw, struct board_info, phy_poll); + struct net_device *ndev = db->ndev; + + if (db->flags & DM9000_PLATF_SIMPLE_PHY && + !(db->flags & DM9000_PLATF_EXT_PHY)) { + unsigned nsr = dm9000_read_locked(db, DM9000_NSR); + unsigned old_carrier = netif_carrier_ok(ndev) ? 1 : 0; + unsigned new_carrier; + + new_carrier = (nsr & NSR_LINKST) ? 1 : 0; + + if (old_carrier != new_carrier) { + if (netif_msg_link(db)) + dm9000_show_carrier(db, new_carrier, nsr); + + if (!new_carrier) + netif_carrier_off(ndev); + else + netif_carrier_on(ndev); + } + } else + mii_check_media(&db->mii, netif_msg_link(db), 0); + + if (netif_running(ndev)) + dm9000_schedule_poll(db); +} + +/* dm9000_release_board + * + * release a board, and any mapped resources + */ + +static void +dm9000_release_board(struct platform_device *pdev, struct board_info *db) +{ + /* unmap our resources */ + + iounmap(db->io_addr); + iounmap(db->io_data); + + /* release the resources */ + + if (db->data_req) + release_resource(db->data_req); + kfree(db->data_req); + + if (db->addr_req) + release_resource(db->addr_req); + kfree(db->addr_req); +} + +static unsigned char dm9000_type_to_char(enum dm9000_type type) +{ + switch (type) { + case TYPE_DM9000E: return 'e'; + case TYPE_DM9000A: return 'a'; + case TYPE_DM9000B: return 'b'; + } + + return '?'; +} + +/* + * Set DM9000 multicast address + */ +static void +dm9000_hash_table_unlocked(struct net_device *dev) +{ + struct board_info *db = netdev_priv(dev); + struct netdev_hw_addr *ha; + int i, oft; + u32 hash_val; + u16 hash_table[4] = { 0, 0, 0, 0x8000 }; /* broadcast address */ + u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN; + + dm9000_dbg(db, 1, "entering %s\n", __func__); + + for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++) + iow(db, oft, dev->dev_addr[i]); + + if (dev->flags & IFF_PROMISC) + rcr |= RCR_PRMSC; + + if (dev->flags & IFF_ALLMULTI) + rcr |= RCR_ALL; + + /* the multicast address in Hash Table : 64 bits */ + netdev_for_each_mc_addr(ha, dev) { + hash_val = ether_crc_le(6, ha->addr) & 0x3f; + hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); + } + + /* Write the hash table to MAC MD table */ + for (i = 0, oft = DM9000_MAR; i < 4; i++) { + iow(db, oft++, hash_table[i]); + iow(db, oft++, hash_table[i] >> 8); + } + + iow(db, DM9000_RCR, rcr); +} + +static void +dm9000_hash_table(struct net_device *dev) +{ + struct board_info *db = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&db->lock, flags); + dm9000_hash_table_unlocked(dev); + spin_unlock_irqrestore(&db->lock, flags); +} + +static void +dm9000_mask_interrupts(struct board_info *db) +{ + iow(db, DM9000_IMR, IMR_PAR); +} + +static void +dm9000_unmask_interrupts(struct board_info *db) +{ + iow(db, DM9000_IMR, db->imr_all); +} + +/* + * Initialize dm9000 board + */ +static void +dm9000_init_dm9000(struct net_device *dev) +{ + struct board_info *db = netdev_priv(dev); + unsigned int imr; + unsigned int ncr; + + dm9000_dbg(db, 1, "entering %s\n", __func__); + + dm9000_reset(db); + dm9000_mask_interrupts(db); + + /* I/O mode */ + db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */ + + /* Checksum mode */ + if (dev->hw_features & NETIF_F_RXCSUM) + iow(db, DM9000_RCSR, + (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0); + + iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ + iow(db, DM9000_GPR, 0); + + /* If we are dealing with DM9000B, some extra steps are required: a + * manual phy reset, and setting init params. + */ + if (db->type == TYPE_DM9000B) { + dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); + dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); + } + + ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; + + /* if wol is needed, then always set NCR_WAKEEN otherwise we end + * up dumping the wake events if we disable this. There is already + * a wake-mask in DM9000_WCR */ + if (db->wake_supported) + ncr |= NCR_WAKEEN; + + iow(db, DM9000_NCR, ncr); + + /* Program operating register */ + iow(db, DM9000_TCR, 0); /* TX Polling clear */ + iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */ + iow(db, DM9000_FCR, 0xff); /* Flow Control */ + iow(db, DM9000_SMCR, 0); /* Special Mode */ + /* clear TX status */ + iow(db, DM9000_NSR, NSR_WAKEST | NSR_TX2END | NSR_TX1END); + iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */ + + /* Set address filter table */ + dm9000_hash_table_unlocked(dev); + + imr = IMR_PAR | IMR_PTM | IMR_PRM; + if (db->type != TYPE_DM9000E) + imr |= IMR_LNKCHNG; + + db->imr_all = imr; + + /* Init Driver variable */ + db->tx_pkt_cnt = 0; + db->queue_pkt_len = 0; + dev->trans_start = jiffies; +} + +/* Our watchdog timed out. Called by the networking layer */ +static void dm9000_timeout(struct net_device *dev) +{ + struct board_info *db = netdev_priv(dev); + u8 reg_save; + unsigned long flags; + + /* Save previous register address */ + spin_lock_irqsave(&db->lock, flags); + db->in_timeout = 1; + reg_save = readb(db->io_addr); + + netif_stop_queue(dev); + dm9000_init_dm9000(dev); + dm9000_unmask_interrupts(db); + /* We can accept TX packets again */ + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); + + /* Restore previous register address */ + writeb(reg_save, db->io_addr); + db->in_timeout = 0; + spin_unlock_irqrestore(&db->lock, flags); +} + +static void dm9000_send_packet(struct net_device *dev, + int ip_summed, + u16 pkt_len) +{ + struct board_info *dm = to_dm9000_board(dev); + + /* The DM9000 is not smart enough to leave fragmented packets alone. */ + if (dm->ip_summed != ip_summed) { + if (ip_summed == CHECKSUM_NONE) + iow(dm, DM9000_TCCR, 0); + else + iow(dm, DM9000_TCCR, TCCR_IP | TCCR_UDP | TCCR_TCP); + dm->ip_summed = ip_summed; + } + + /* Set TX length to DM9000 */ + iow(dm, DM9000_TXPLL, pkt_len); + iow(dm, DM9000_TXPLH, pkt_len >> 8); + + /* Issue TX polling command */ + iow(dm, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */ +} + +/* + * Hardware start transmission. + * Send a packet to media from the upper layer. + */ +static int +dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + unsigned long flags; + struct board_info *db = netdev_priv(dev); + + dm9000_dbg(db, 3, "%s:\n", __func__); + + if (db->tx_pkt_cnt > 1) + return NETDEV_TX_BUSY; + + spin_lock_irqsave(&db->lock, flags); + + /* Move data to DM9000 TX RAM */ + writeb(DM9000_MWCMD, db->io_addr); + + (db->outblk)(db->io_data, skb->data, skb->len); + dev->stats.tx_bytes += skb->len; + + db->tx_pkt_cnt++; + /* TX control: First packet immediately send, second packet queue */ + if (db->tx_pkt_cnt == 1) { + dm9000_send_packet(dev, skb->ip_summed, skb->len); + } else { + /* Second packet */ + db->queue_pkt_len = skb->len; + db->queue_ip_summed = skb->ip_summed; + netif_stop_queue(dev); + } + + spin_unlock_irqrestore(&db->lock, flags); + + /* free this SKB */ + dev_consume_skb_any(skb); + + return NETDEV_TX_OK; +} + +/* + * DM9000 interrupt handler + * receive the packet to upper layer, free the transmitted packet + */ + +static void dm9000_tx_done(struct net_device *dev, struct board_info *db) +{ + int tx_status = ior(db, DM9000_NSR); /* Got TX status */ + + if (tx_status & (NSR_TX2END | NSR_TX1END)) { + /* One packet sent complete */ + db->tx_pkt_cnt--; + dev->stats.tx_packets++; + + if (netif_msg_tx_done(db)) + dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status); + + /* Queue packet check & send */ + if (db->tx_pkt_cnt > 0) + dm9000_send_packet(dev, db->queue_ip_summed, + db->queue_pkt_len); + netif_wake_queue(dev); + } +} + +struct dm9000_rxhdr { + u8 RxPktReady; + u8 RxStatus; + __le16 RxLen; +} __packed; + +/* + * Received a packet and pass to upper layer + */ +static void +dm9000_rx(struct net_device *dev) +{ + struct board_info *db = netdev_priv(dev); + struct dm9000_rxhdr rxhdr; + struct sk_buff *skb; + u8 rxbyte, *rdptr; + bool GoodPacket; + int RxLen; + + /* Check packet ready or not */ + do { + ior(db, DM9000_MRCMDX); /* Dummy read */ + + /* Get most updated data */ + rxbyte = readb(db->io_data); + + /* Status check: this byte must be 0 or 1 */ + if (rxbyte & DM9000_PKT_ERR) { + dev_warn(db->dev, "status check fail: %d\n", rxbyte); + iow(db, DM9000_RCR, 0x00); /* Stop Device */ + return; + } + + if (!(rxbyte & DM9000_PKT_RDY)) + return; + + /* A packet ready now & Get status/length */ + GoodPacket = true; + writeb(DM9000_MRCMD, db->io_addr); + + (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr)); + + RxLen = le16_to_cpu(rxhdr.RxLen); + + if (netif_msg_rx_status(db)) + dev_dbg(db->dev, "RX: status %02x, length %04x\n", + rxhdr.RxStatus, RxLen); + + /* Packet Status check */ + if (RxLen < 0x40) { + GoodPacket = false; + if (netif_msg_rx_err(db)) + dev_dbg(db->dev, "RX: Bad Packet (runt)\n"); + } + + if (RxLen > DM9000_PKT_MAX) { + dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen); + } + + /* rxhdr.RxStatus is identical to RSR register. */ + if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE | + RSR_PLE | RSR_RWTO | + RSR_LCS | RSR_RF)) { + GoodPacket = false; + if (rxhdr.RxStatus & RSR_FOE) { + if (netif_msg_rx_err(db)) + dev_dbg(db->dev, "fifo error\n"); + dev->stats.rx_fifo_errors++; + } + if (rxhdr.RxStatus & RSR_CE) { + if (netif_msg_rx_err(db)) + dev_dbg(db->dev, "crc error\n"); + dev->stats.rx_crc_errors++; + } + if (rxhdr.RxStatus & RSR_RF) { + if (netif_msg_rx_err(db)) + dev_dbg(db->dev, "length error\n"); + dev->stats.rx_length_errors++; + } + } + + /* Move data from DM9000 */ + if (GoodPacket && + ((skb = netdev_alloc_skb(dev, RxLen + 4)) != NULL)) { + skb_reserve(skb, 2); + rdptr = (u8 *) skb_put(skb, RxLen - 4); + + /* Read received packet from RX SRAM */ + + (db->inblk)(db->io_data, rdptr, RxLen); + dev->stats.rx_bytes += RxLen; + + /* Pass to upper layer */ + skb->protocol = eth_type_trans(skb, dev); + if (dev->features & NETIF_F_RXCSUM) { + if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + } + netif_rx(skb); + dev->stats.rx_packets++; + + } else { + /* need to dump the packet's data */ + + (db->dumpblk)(db->io_data, RxLen); + } + } while (rxbyte & DM9000_PKT_RDY); +} + +static irqreturn_t dm9000_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct board_info *db = netdev_priv(dev); + int int_status; + unsigned long flags; + u8 reg_save; + + dm9000_dbg(db, 3, "entering %s\n", __func__); + + /* A real interrupt coming */ + + /* holders of db->lock must always block IRQs */ + spin_lock_irqsave(&db->lock, flags); + + /* Save previous register address */ + reg_save = readb(db->io_addr); + + dm9000_mask_interrupts(db); + /* Got DM9000 interrupt status */ + int_status = ior(db, DM9000_ISR); /* Got ISR */ + iow(db, DM9000_ISR, int_status); /* Clear ISR status */ + + if (netif_msg_intr(db)) + dev_dbg(db->dev, "interrupt status %02x\n", int_status); + + /* Received the coming packet */ + if (int_status & ISR_PRS) + dm9000_rx(dev); + + /* Trnasmit Interrupt check */ + if (int_status & ISR_PTS) + dm9000_tx_done(dev, db); + + if (db->type != TYPE_DM9000E) { + if (int_status & ISR_LNKCHNG) { + /* fire a link-change request */ + schedule_delayed_work(&db->phy_poll, 1); + } + } + + dm9000_unmask_interrupts(db); + /* Restore previous register address */ + writeb(reg_save, db->io_addr); + + spin_unlock_irqrestore(&db->lock, flags); + + return IRQ_HANDLED; +} + +static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct board_info *db = netdev_priv(dev); + unsigned long flags; + unsigned nsr, wcr; + + spin_lock_irqsave(&db->lock, flags); + + nsr = ior(db, DM9000_NSR); + wcr = ior(db, DM9000_WCR); + + dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr); + + if (nsr & NSR_WAKEST) { + /* clear, so we can avoid */ + iow(db, DM9000_NSR, NSR_WAKEST); + + if (wcr & WCR_LINKST) + dev_info(db->dev, "wake by link status change\n"); + if (wcr & WCR_SAMPLEST) + dev_info(db->dev, "wake by sample packet\n"); + if (wcr & WCR_MAGICST) + dev_info(db->dev, "wake by magic packet\n"); + if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST))) + dev_err(db->dev, "wake signalled with no reason? " + "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr); + } + + spin_unlock_irqrestore(&db->lock, flags); + + return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + *Used by netconsole + */ +static void dm9000_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + dm9000_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +/* + * Open the interface. + * The interface is opened whenever "ifconfig" actives it. + */ +static int +dm9000_open(struct net_device *dev) +{ + struct board_info *db = netdev_priv(dev); + unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK; + + if (netif_msg_ifup(db)) + dev_dbg(db->dev, "enabling %s\n", dev->name); + + /* If there is no IRQ type specified, default to something that + * may work, and tell the user that this is a problem */ + + if (irqflags == IRQF_TRIGGER_NONE) + irqflags = irq_get_trigger_type(dev->irq); + + if (irqflags == IRQF_TRIGGER_NONE) + dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); + + irqflags |= IRQF_SHARED; + + /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ + iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ + mdelay(1); /* delay needs by DM9000B */ + + /* Initialize DM9000 board */ + dm9000_init_dm9000(dev); + + if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) + return -EAGAIN; + /* Now that we have an interrupt handler hooked up we can unmask + * our interrupts + */ + dm9000_unmask_interrupts(db); + + /* Init driver variable */ + db->dbug_cnt = 0; + + mii_check_media(&db->mii, netif_msg_link(db), 1); + netif_start_queue(dev); + + /* Poll initial link status */ + schedule_delayed_work(&db->phy_poll, 1); + + return 0; +} + +static void +dm9000_shutdown(struct net_device *dev) +{ + struct board_info *db = netdev_priv(dev); + + /* RESET device */ + dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ + iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */ + dm9000_mask_interrupts(db); + iow(db, DM9000_RCR, 0x00); /* Disable RX */ +} + +/* + * Stop the interface. + * The interface is stopped when it is brought. + */ +static int +dm9000_stop(struct net_device *ndev) +{ + struct board_info *db = netdev_priv(ndev); + + if (netif_msg_ifdown(db)) + dev_dbg(db->dev, "shutting down %s\n", ndev->name); + + cancel_delayed_work_sync(&db->phy_poll); + + netif_stop_queue(ndev); + netif_carrier_off(ndev); + + /* free interrupt */ + free_irq(ndev->irq, ndev); + + dm9000_shutdown(ndev); + + return 0; +} + +static const struct net_device_ops dm9000_netdev_ops = { + .ndo_open = dm9000_open, + .ndo_stop = dm9000_stop, + .ndo_start_xmit = dm9000_start_xmit, + .ndo_tx_timeout = dm9000_timeout, + .ndo_set_rx_mode = dm9000_hash_table, + .ndo_do_ioctl = dm9000_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_features = dm9000_set_features, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = dm9000_poll_controller, +#endif +}; + +static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) +{ + struct dm9000_plat_data *pdata; + struct device_node *np = dev->of_node; + const void *mac_addr; + + if (!IS_ENABLED(CONFIG_OF) || !np) + return ERR_PTR(-ENXIO); + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + if (of_find_property(np, "davicom,ext-phy", NULL)) + pdata->flags |= DM9000_PLATF_EXT_PHY; + if (of_find_property(np, "davicom,no-eeprom", NULL)) + pdata->flags |= DM9000_PLATF_NO_EEPROM; + + mac_addr = of_get_mac_address(np); + if (mac_addr) + memcpy(pdata->dev_addr, mac_addr, sizeof(pdata->dev_addr)); + + return pdata; +} + +/* + * Search DM9000 board, allocate space and register it + */ +static int +dm9000_probe(struct platform_device *pdev) +{ + struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev); + struct board_info *db; /* Point a board information structure */ + struct net_device *ndev; + struct device *dev = &pdev->dev; + const unsigned char *mac_src; + int ret = 0; + int iosize; + int i; + u32 id_val; + int reset_gpios; + enum of_gpio_flags flags; + struct regulator *power; + + power = devm_regulator_get(dev, "vcc"); + if (IS_ERR(power)) { + if (PTR_ERR(power) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_dbg(dev, "no regulator provided\n"); + } else { + ret = regulator_enable(power); + if (ret != 0) { + dev_err(dev, + "Failed to enable power regulator: %d\n", ret); + return ret; + } + dev_dbg(dev, "regulator enabled\n"); + } + + reset_gpios = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0, + &flags); + if (gpio_is_valid(reset_gpios)) { + ret = devm_gpio_request_one(dev, reset_gpios, flags, + "dm9000_reset"); + if (ret) { + dev_err(dev, "failed to request reset gpio %d: %d\n", + reset_gpios, ret); + return -ENODEV; + } + + /* According to manual PWRST# Low Period Min 1ms */ + msleep(2); + gpio_set_value(reset_gpios, 1); + /* Needs 3ms to read eeprom when PWRST is deasserted */ + msleep(4); + } + + if (!pdata) { + pdata = dm9000_parse_dt(&pdev->dev); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + } + + /* Init network device */ + ndev = alloc_etherdev(sizeof(struct board_info)); + if (!ndev) + return -ENOMEM; + + SET_NETDEV_DEV(ndev, &pdev->dev); + + dev_dbg(&pdev->dev, "dm9000_probe()\n"); + + /* setup board info structure */ + db = netdev_priv(ndev); + + db->dev = &pdev->dev; + db->ndev = ndev; + + spin_lock_init(&db->lock); + mutex_init(&db->addr_lock); + + INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work); + + db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + + if (db->addr_res == NULL || db->data_res == NULL || + db->irq_res == NULL) { + dev_err(db->dev, "insufficient resources\n"); + ret = -ENOENT; + goto out; + } + + db->irq_wake = platform_get_irq(pdev, 1); + if (db->irq_wake >= 0) { + dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake); + + ret = request_irq(db->irq_wake, dm9000_wol_interrupt, + IRQF_SHARED, dev_name(db->dev), ndev); + if (ret) { + dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret); + } else { + + /* test to see if irq is really wakeup capable */ + ret = irq_set_irq_wake(db->irq_wake, 1); + if (ret) { + dev_err(db->dev, "irq %d cannot set wakeup (%d)\n", + db->irq_wake, ret); + ret = 0; + } else { + irq_set_irq_wake(db->irq_wake, 0); + db->wake_supported = 1; + } + } + } + + iosize = resource_size(db->addr_res); + db->addr_req = request_mem_region(db->addr_res->start, iosize, + pdev->name); + + if (db->addr_req == NULL) { + dev_err(db->dev, "cannot claim address reg area\n"); + ret = -EIO; + goto out; + } + + db->io_addr = ioremap(db->addr_res->start, iosize); + + if (db->io_addr == NULL) { + dev_err(db->dev, "failed to ioremap address reg\n"); + ret = -EINVAL; + goto out; + } + + iosize = resource_size(db->data_res); + db->data_req = request_mem_region(db->data_res->start, iosize, + pdev->name); + + if (db->data_req == NULL) { + dev_err(db->dev, "cannot claim data reg area\n"); + ret = -EIO; + goto out; + } + + db->io_data = ioremap(db->data_res->start, iosize); + + if (db->io_data == NULL) { + dev_err(db->dev, "failed to ioremap data reg\n"); + ret = -EINVAL; + goto out; + } + + /* fill in parameters for net-dev structure */ + ndev->base_addr = (unsigned long)db->io_addr; + ndev->irq = db->irq_res->start; + + /* ensure at least we have a default set of IO routines */ + dm9000_set_io(db, iosize); + + /* check to see if anything is being over-ridden */ + if (pdata != NULL) { + /* check to see if the driver wants to over-ride the + * default IO width */ + + if (pdata->flags & DM9000_PLATF_8BITONLY) + dm9000_set_io(db, 1); + + if (pdata->flags & DM9000_PLATF_16BITONLY) + dm9000_set_io(db, 2); + + if (pdata->flags & DM9000_PLATF_32BITONLY) + dm9000_set_io(db, 4); + + /* check to see if there are any IO routine + * over-rides */ + + if (pdata->inblk != NULL) + db->inblk = pdata->inblk; + + if (pdata->outblk != NULL) + db->outblk = pdata->outblk; + + if (pdata->dumpblk != NULL) + db->dumpblk = pdata->dumpblk; + + db->flags = pdata->flags; + } + +#ifdef CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL + db->flags |= DM9000_PLATF_SIMPLE_PHY; +#endif + + dm9000_reset(db); + + /* try multiple times, DM9000 sometimes gets the read wrong */ + for (i = 0; i < 8; i++) { + id_val = ior(db, DM9000_VIDL); + id_val |= (u32)ior(db, DM9000_VIDH) << 8; + id_val |= (u32)ior(db, DM9000_PIDL) << 16; + id_val |= (u32)ior(db, DM9000_PIDH) << 24; + + if (id_val == DM9000_ID) + break; + dev_err(db->dev, "read wrong id 0x%08x\n", id_val); + } + + if (id_val != DM9000_ID) { + dev_err(db->dev, "wrong id: 0x%08x\n", id_val); + ret = -ENODEV; + goto out; + } + + /* Identify what type of DM9000 we are working on */ + + id_val = ior(db, DM9000_CHIPR); + dev_dbg(db->dev, "dm9000 revision 0x%02x\n", id_val); + + switch (id_val) { + case CHIPR_DM9000A: + db->type = TYPE_DM9000A; + break; + case CHIPR_DM9000B: + db->type = TYPE_DM9000B; + break; + default: + dev_dbg(db->dev, "ID %02x => defaulting to DM9000E\n", id_val); + db->type = TYPE_DM9000E; + } + + /* dm9000a/b are capable of hardware checksum offload */ + if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) { + ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM; + ndev->features |= ndev->hw_features; + } + + /* from this point we assume that we have found a DM9000 */ + + ndev->netdev_ops = &dm9000_netdev_ops; + ndev->watchdog_timeo = msecs_to_jiffies(watchdog); + ndev->ethtool_ops = &dm9000_ethtool_ops; + + db->msg_enable = NETIF_MSG_LINK; + db->mii.phy_id_mask = 0x1f; + db->mii.reg_num_mask = 0x1f; + db->mii.force_media = 0; + db->mii.full_duplex = 0; + db->mii.dev = ndev; + db->mii.mdio_read = dm9000_phy_read; + db->mii.mdio_write = dm9000_phy_write; + + mac_src = "eeprom"; + + /* try reading the node address from the attached EEPROM */ + for (i = 0; i < 6; i += 2) + dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i); + + if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) { + mac_src = "platform data"; + memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN); + } + + if (!is_valid_ether_addr(ndev->dev_addr)) { + /* try reading from mac */ + + mac_src = "chip"; + for (i = 0; i < 6; i++) + ndev->dev_addr[i] = ior(db, i+DM9000_PAR); + } + + if (!is_valid_ether_addr(ndev->dev_addr)) { + dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please " + "set using ifconfig\n", ndev->name); + + eth_hw_addr_random(ndev); + mac_src = "random"; + } + + + platform_set_drvdata(pdev, ndev); + ret = register_netdev(ndev); + + if (ret == 0) + printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n", + ndev->name, dm9000_type_to_char(db->type), + db->io_addr, db->io_data, ndev->irq, + ndev->dev_addr, mac_src); + return 0; + +out: + dev_err(db->dev, "not found (%d).\n", ret); + + dm9000_release_board(pdev, db); + free_netdev(ndev); + + return ret; +} + +static int +dm9000_drv_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = platform_get_drvdata(pdev); + struct board_info *db; + + if (ndev) { + db = netdev_priv(ndev); + db->in_suspend = 1; + + if (!netif_running(ndev)) + return 0; + + netif_device_detach(ndev); + + /* only shutdown if not using WoL */ + if (!db->wake_state) + dm9000_shutdown(ndev); + } + return 0; +} + +static int +dm9000_drv_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = platform_get_drvdata(pdev); + struct board_info *db = netdev_priv(ndev); + + if (ndev) { + if (netif_running(ndev)) { + /* reset if we were not in wake mode to ensure if + * the device was powered off it is in a known state */ + if (!db->wake_state) { + dm9000_init_dm9000(ndev); + dm9000_unmask_interrupts(db); + } + + netif_device_attach(ndev); + } + + db->in_suspend = 0; + } + return 0; +} + +static const struct dev_pm_ops dm9000_drv_pm_ops = { + .suspend = dm9000_drv_suspend, + .resume = dm9000_drv_resume, +}; + +static int +dm9000_drv_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + + unregister_netdev(ndev); + dm9000_release_board(pdev, netdev_priv(ndev)); + free_netdev(ndev); /* free device structure */ + + dev_dbg(&pdev->dev, "released and freed device\n"); + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id dm9000_of_matches[] = { + { .compatible = "davicom,dm9000", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, dm9000_of_matches); +#endif + +static struct platform_driver dm9000_driver = { + .driver = { + .name = "dm9000", + .pm = &dm9000_drv_pm_ops, + .of_match_table = of_match_ptr(dm9000_of_matches), + }, + .probe = dm9000_probe, + .remove = dm9000_drv_remove, +}; + +module_platform_driver(dm9000_driver); + +MODULE_AUTHOR("Sascha Hauer, Ben Dooks"); +MODULE_DESCRIPTION("Davicom DM9000 network driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:dm9000"); diff --git a/drivers/net/ethernet/davicom/dm9000.h b/drivers/net/ethernet/davicom/dm9000.h new file mode 100644 index 000000000..9ce058ada --- /dev/null +++ b/drivers/net/ethernet/davicom/dm9000.h @@ -0,0 +1,180 @@ +/* + * dm9000 Ethernet + */ + +#ifndef _DM9000X_H_ +#define _DM9000X_H_ + +#define DM9000_ID 0x90000A46 + +/* although the registers are 16 bit, they are 32-bit aligned. + */ + +#define DM9000_NCR 0x00 +#define DM9000_NSR 0x01 +#define DM9000_TCR 0x02 +#define DM9000_TSR1 0x03 +#define DM9000_TSR2 0x04 +#define DM9000_RCR 0x05 +#define DM9000_RSR 0x06 +#define DM9000_ROCR 0x07 +#define DM9000_BPTR 0x08 +#define DM9000_FCTR 0x09 +#define DM9000_FCR 0x0A +#define DM9000_EPCR 0x0B +#define DM9000_EPAR 0x0C +#define DM9000_EPDRL 0x0D +#define DM9000_EPDRH 0x0E +#define DM9000_WCR 0x0F + +#define DM9000_PAR 0x10 +#define DM9000_MAR 0x16 + +#define DM9000_GPCR 0x1e +#define DM9000_GPR 0x1f +#define DM9000_TRPAL 0x22 +#define DM9000_TRPAH 0x23 +#define DM9000_RWPAL 0x24 +#define DM9000_RWPAH 0x25 + +#define DM9000_VIDL 0x28 +#define DM9000_VIDH 0x29 +#define DM9000_PIDL 0x2A +#define DM9000_PIDH 0x2B + +#define DM9000_CHIPR 0x2C +#define DM9000_SMCR 0x2F + +#define DM9000_ETXCSR 0x30 +#define DM9000_TCCR 0x31 +#define DM9000_RCSR 0x32 + +#define CHIPR_DM9000A 0x19 +#define CHIPR_DM9000B 0x1A + +#define DM9000_MRCMDX 0xF0 +#define DM9000_MRCMD 0xF2 +#define DM9000_MRRL 0xF4 +#define DM9000_MRRH 0xF5 +#define DM9000_MWCMDX 0xF6 +#define DM9000_MWCMD 0xF8 +#define DM9000_MWRL 0xFA +#define DM9000_MWRH 0xFB +#define DM9000_TXPLL 0xFC +#define DM9000_TXPLH 0xFD +#define DM9000_ISR 0xFE +#define DM9000_IMR 0xFF + +#define NCR_EXT_PHY (1<<7) +#define NCR_WAKEEN (1<<6) +#define NCR_FCOL (1<<4) +#define NCR_FDX (1<<3) + +#define NCR_RESERVED (3<<1) +#define NCR_MAC_LBK (1<<1) +#define NCR_RST (1<<0) + +#define NSR_SPEED (1<<7) +#define NSR_LINKST (1<<6) +#define NSR_WAKEST (1<<5) +#define NSR_TX2END (1<<3) +#define NSR_TX1END (1<<2) +#define NSR_RXOV (1<<1) + +#define TCR_TJDIS (1<<6) +#define TCR_EXCECM (1<<5) +#define TCR_PAD_DIS2 (1<<4) +#define TCR_CRC_DIS2 (1<<3) +#define TCR_PAD_DIS1 (1<<2) +#define TCR_CRC_DIS1 (1<<1) +#define TCR_TXREQ (1<<0) + +#define TSR_TJTO (1<<7) +#define TSR_LC (1<<6) +#define TSR_NC (1<<5) +#define TSR_LCOL (1<<4) +#define TSR_COL (1<<3) +#define TSR_EC (1<<2) + +#define RCR_WTDIS (1<<6) +#define RCR_DIS_LONG (1<<5) +#define RCR_DIS_CRC (1<<4) +#define RCR_ALL (1<<3) +#define RCR_RUNT (1<<2) +#define RCR_PRMSC (1<<1) +#define RCR_RXEN (1<<0) + +#define RSR_RF (1<<7) +#define RSR_MF (1<<6) +#define RSR_LCS (1<<5) +#define RSR_RWTO (1<<4) +#define RSR_PLE (1<<3) +#define RSR_AE (1<<2) +#define RSR_CE (1<<1) +#define RSR_FOE (1<<0) + +#define WCR_LINKEN (1 << 5) +#define WCR_SAMPLEEN (1 << 4) +#define WCR_MAGICEN (1 << 3) +#define WCR_LINKST (1 << 2) +#define WCR_SAMPLEST (1 << 1) +#define WCR_MAGICST (1 << 0) + +#define FCTR_HWOT(ot) (( ot & 0xf ) << 4 ) +#define FCTR_LWOT(ot) ( ot & 0xf ) + +#define IMR_PAR (1<<7) +#define IMR_ROOM (1<<3) +#define IMR_ROM (1<<2) +#define IMR_PTM (1<<1) +#define IMR_PRM (1<<0) + +#define ISR_ROOS (1<<3) +#define ISR_ROS (1<<2) +#define ISR_PTS (1<<1) +#define ISR_PRS (1<<0) +#define ISR_CLR_STATUS (ISR_ROOS | ISR_ROS | ISR_PTS | ISR_PRS) + +#define EPCR_REEP (1<<5) +#define EPCR_WEP (1<<4) +#define EPCR_EPOS (1<<3) +#define EPCR_ERPRR (1<<2) +#define EPCR_ERPRW (1<<1) +#define EPCR_ERRE (1<<0) + +#define GPCR_GEP_CNTL (1<<0) + +#define TCCR_IP (1<<0) +#define TCCR_TCP (1<<1) +#define TCCR_UDP (1<<2) + +#define RCSR_UDP_BAD (1<<7) +#define RCSR_TCP_BAD (1<<6) +#define RCSR_IP_BAD (1<<5) +#define RCSR_UDP (1<<4) +#define RCSR_TCP (1<<3) +#define RCSR_IP (1<<2) +#define RCSR_CSUM (1<<1) +#define RCSR_DISCARD (1<<0) + +#define DM9000_PKT_RDY 0x01 /* Packet ready to receive */ +#define DM9000_PKT_ERR 0x02 +#define DM9000_PKT_MAX 1536 /* Received packet max size */ + +/* DM9000A / DM9000B definitions */ + +#define IMR_LNKCHNG (1<<5) +#define IMR_UNDERRUN (1<<4) + +#define ISR_LNKCHNG (1<<5) +#define ISR_UNDERRUN (1<<4) + +/* Davicom MII registers. + */ + +#define MII_DM_DSPCR 0x1b /* DSP Control Register */ + +#define DSPCR_INIT_PARAM 0xE100 /* DSP init parameter */ + +#endif /* _DM9000X_H_ */ + diff --git a/drivers/net/ethernet/dec/Kconfig b/drivers/net/ethernet/dec/Kconfig new file mode 100644 index 000000000..68262aa57 --- /dev/null +++ b/drivers/net/ethernet/dec/Kconfig @@ -0,0 +1,21 @@ +# +# Digital Equipment Inc network device configuration +# + +config NET_VENDOR_DEC + bool "Digital Equipment devices" + default y + depends on PCI || EISA || CARDBUS + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about DEC cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_DEC +source "drivers/net/ethernet/dec/tulip/Kconfig" +endif # NET_VENDOR_DEC diff --git a/drivers/net/ethernet/dec/Makefile b/drivers/net/ethernet/dec/Makefile new file mode 100644 index 000000000..32993fccb --- /dev/null +++ b/drivers/net/ethernet/dec/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Digital Equipment Inc. network device drivers. +# + +obj-$(CONFIG_NET_TULIP) += tulip/ diff --git a/drivers/net/ethernet/dec/tulip/21142.c b/drivers/net/ethernet/dec/tulip/21142.c new file mode 100644 index 000000000..369858272 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/21142.c @@ -0,0 +1,257 @@ +/* + drivers/net/ethernet/dec/tulip/21142.c + + Copyright 2000,2001 The Linux Kernel Team + Written/copyright 1994-2001 by Donald Becker. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + DC21143 manual "21143 PCI/CardBus 10/100Mb/s Ethernet LAN Controller + Hardware Reference Manual" is currently available at : + http://developer.intel.com/design/network/manuals/278074.htm + + Please submit bugs to http://bugzilla.kernel.org/ . +*/ + +#include +#include "tulip.h" + + +static u16 t21142_csr13[] = { 0x0001, 0x0009, 0x0009, 0x0000, 0x0001, }; +u16 t21142_csr14[] = { 0xFFFF, 0x0705, 0x0705, 0x0000, 0x7F3D, }; +static u16 t21142_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, }; + + +/* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list + of available transceivers. */ +void t21142_media_task(struct work_struct *work) +{ + struct tulip_private *tp = + container_of(work, struct tulip_private, media_work); + struct net_device *dev = tp->dev; + void __iomem *ioaddr = tp->base_addr; + int csr12 = ioread32(ioaddr + CSR12); + int next_tick = 60*HZ; + int new_csr6 = 0; + int csr14 = ioread32(ioaddr + CSR14); + + /* CSR12[LS10,LS100] are not reliable during autonegotiation */ + if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000) + csr12 |= 6; + if (tulip_debug > 2) + dev_info(&dev->dev, "21143 negotiation status %08x, %s\n", + csr12, medianame[dev->if_port]); + if (tulip_media_cap[dev->if_port] & MediaIsMII) { + if (tulip_check_duplex(dev) < 0) { + netif_carrier_off(dev); + next_tick = 3*HZ; + } else { + netif_carrier_on(dev); + next_tick = 60*HZ; + } + } else if (tp->nwayset) { + /* Don't screw up a negotiated session! */ + if (tulip_debug > 1) + dev_info(&dev->dev, + "Using NWay-set %s media, csr12 %08x\n", + medianame[dev->if_port], csr12); + } else if (tp->medialock) { + ; + } else if (dev->if_port == 3) { + if (csr12 & 2) { /* No 100mbps link beat, revert to 10mbps. */ + if (tulip_debug > 1) + dev_info(&dev->dev, + "No 21143 100baseTx link beat, %08x, trying NWay\n", + csr12); + t21142_start_nway(dev); + next_tick = 3*HZ; + } + } else if ((csr12 & 0x7000) != 0x5000) { + /* Negotiation failed. Search media types. */ + if (tulip_debug > 1) + dev_info(&dev->dev, + "21143 negotiation failed, status %08x\n", + csr12); + if (!(csr12 & 4)) { /* 10mbps link beat good. */ + new_csr6 = 0x82420000; + dev->if_port = 0; + iowrite32(0, ioaddr + CSR13); + iowrite32(0x0003FFFF, ioaddr + CSR14); + iowrite16(t21142_csr15[dev->if_port], ioaddr + CSR15); + iowrite32(t21142_csr13[dev->if_port], ioaddr + CSR13); + } else { + /* Select 100mbps port to check for link beat. */ + new_csr6 = 0x83860000; + dev->if_port = 3; + iowrite32(0, ioaddr + CSR13); + iowrite32(0x0003FFFF, ioaddr + CSR14); + iowrite16(8, ioaddr + CSR15); + iowrite32(1, ioaddr + CSR13); + } + if (tulip_debug > 1) + dev_info(&dev->dev, "Testing new 21143 media %s\n", + medianame[dev->if_port]); + if (new_csr6 != (tp->csr6 & ~0x00D5)) { + tp->csr6 &= 0x00D5; + tp->csr6 |= new_csr6; + iowrite32(0x0301, ioaddr + CSR12); + tulip_restart_rxtx(tp); + } + next_tick = 3*HZ; + } + + /* mod_timer synchronizes us with potential add_timer calls + * from interrupts. + */ + mod_timer(&tp->timer, RUN_AT(next_tick)); +} + + +void t21142_start_nway(struct net_device *dev) +{ + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + int csr14 = ((tp->sym_advertise & 0x0780) << 9) | + ((tp->sym_advertise & 0x0020) << 1) | 0xffbf; + + dev->if_port = 0; + tp->nway = tp->mediasense = 1; + tp->nwayset = tp->lpar = 0; + if (tulip_debug > 1) + netdev_dbg(dev, "Restarting 21143 autonegotiation, csr14=%08x\n", + csr14); + iowrite32(0x0001, ioaddr + CSR13); + udelay(100); + iowrite32(csr14, ioaddr + CSR14); + tp->csr6 = 0x82420000 | (tp->sym_advertise & 0x0040 ? FullDuplex : 0); + iowrite32(tp->csr6, ioaddr + CSR6); + if (tp->mtable && tp->mtable->csr15dir) { + iowrite32(tp->mtable->csr15dir, ioaddr + CSR15); + iowrite32(tp->mtable->csr15val, ioaddr + CSR15); + } else + iowrite16(0x0008, ioaddr + CSR15); + iowrite32(0x1301, ioaddr + CSR12); /* Trigger NWAY. */ +} + + + +void t21142_lnk_change(struct net_device *dev, int csr5) +{ + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + int csr12 = ioread32(ioaddr + CSR12); + int csr14 = ioread32(ioaddr + CSR14); + + /* CSR12[LS10,LS100] are not reliable during autonegotiation */ + if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000) + csr12 |= 6; + if (tulip_debug > 1) + dev_info(&dev->dev, + "21143 link status interrupt %08x, CSR5 %x, %08x\n", + csr12, csr5, csr14); + + /* If NWay finished and we have a negotiated partner capability. */ + if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) { + int setup_done = 0; + int negotiated = tp->sym_advertise & (csr12 >> 16); + tp->lpar = csr12 >> 16; + tp->nwayset = 1; + /* If partner cannot negotiate, it is 10Mbps Half Duplex */ + if (!(csr12 & 0x8000)) dev->if_port = 0; + else if (negotiated & 0x0100) dev->if_port = 5; + else if (negotiated & 0x0080) dev->if_port = 3; + else if (negotiated & 0x0040) dev->if_port = 4; + else if (negotiated & 0x0020) dev->if_port = 0; + else { + tp->nwayset = 0; + if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180)) + dev->if_port = 3; + } + tp->full_duplex = (tulip_media_cap[dev->if_port] & MediaAlwaysFD) ? 1:0; + + if (tulip_debug > 1) { + if (tp->nwayset) + dev_info(&dev->dev, + "Switching to %s based on link negotiation %04x & %04x = %04x\n", + medianame[dev->if_port], + tp->sym_advertise, tp->lpar, + negotiated); + else + dev_info(&dev->dev, + "Autonegotiation failed, using %s, link beat status %04x\n", + medianame[dev->if_port], csr12); + } + + if (tp->mtable) { + int i; + for (i = 0; i < tp->mtable->leafcount; i++) + if (tp->mtable->mleaf[i].media == dev->if_port) { + int startup = ! ((tp->chip_id == DC21143 && (tp->revision == 48 || tp->revision == 65))); + tp->cur_index = i; + tulip_select_media(dev, startup); + setup_done = 1; + break; + } + } + if ( ! setup_done) { + tp->csr6 = (dev->if_port & 1 ? 0x838E0000 : 0x82420000) | (tp->csr6 & 0x20ff); + if (tp->full_duplex) + tp->csr6 |= 0x0200; + iowrite32(1, ioaddr + CSR13); + } +#if 0 /* Restart shouldn't be needed. */ + iowrite32(tp->csr6 | RxOn, ioaddr + CSR6); + if (tulip_debug > 2) + netdev_dbg(dev, " Restarting Tx and Rx, CSR5 is %08x\n", + ioread32(ioaddr + CSR5)); +#endif + tulip_start_rxtx(tp); + if (tulip_debug > 2) + netdev_dbg(dev, " Setting CSR6 %08x/%x CSR12 %08x\n", + tp->csr6, ioread32(ioaddr + CSR6), + ioread32(ioaddr + CSR12)); + } else if ((tp->nwayset && (csr5 & 0x08000000) && + (dev->if_port == 3 || dev->if_port == 5) && + (csr12 & 2) == 2) || + (tp->nway && (csr5 & (TPLnkFail)))) { + /* Link blew? Maybe restart NWay. */ + del_timer_sync(&tp->timer); + t21142_start_nway(dev); + tp->timer.expires = RUN_AT(3*HZ); + add_timer(&tp->timer); + } else if (dev->if_port == 3 || dev->if_port == 5) { + if (tulip_debug > 1) + dev_info(&dev->dev, "21143 %s link beat %s\n", + medianame[dev->if_port], + (csr12 & 2) ? "failed" : "good"); + if ((csr12 & 2) && ! tp->medialock) { + del_timer_sync(&tp->timer); + t21142_start_nway(dev); + tp->timer.expires = RUN_AT(3*HZ); + add_timer(&tp->timer); + } else if (dev->if_port == 5) + iowrite32(csr14 & ~0x080, ioaddr + CSR14); + } else if (dev->if_port == 0 || dev->if_port == 4) { + if ((csr12 & 4) == 0) + dev_info(&dev->dev, "21143 10baseT link beat good\n"); + } else if (!(csr12 & 4)) { /* 10mbps link beat good. */ + if (tulip_debug) + dev_info(&dev->dev, "21143 10mbps sensed media\n"); + dev->if_port = 0; + } else if (tp->nwayset) { + if (tulip_debug) + dev_info(&dev->dev, "21143 using NWay-set %s, csr6 %08x\n", + medianame[dev->if_port], tp->csr6); + } else { /* 100mbps link beat good. */ + if (tulip_debug) + dev_info(&dev->dev, "21143 100baseTx sensed media\n"); + dev->if_port = 3; + tp->csr6 = 0x838E0000 | (tp->csr6 & 0x20ff); + iowrite32(0x0003FF7F, ioaddr + CSR14); + iowrite32(0x0301, ioaddr + CSR12); + tulip_restart_rxtx(tp); + } +} + + diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig new file mode 100644 index 000000000..eb9ba6e97 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/Kconfig @@ -0,0 +1,172 @@ +# +# Tulip family network device configuration +# + +config NET_TULIP + bool "DEC - Tulip devices" + depends on (PCI || EISA || CARDBUS) + ---help--- + This selects the "Tulip" family of EISA/PCI network cards. + +if NET_TULIP + +config DE2104X + tristate "Early DECchip Tulip (dc2104x) PCI support" + depends on PCI + select CRC32 + ---help--- + This driver is developed for the SMC EtherPower series Ethernet + cards and also works with cards based on the DECchip + 21040 (Tulip series) chips. Some LinkSys PCI cards are + of this type. (If your card is NOT SMC EtherPower 10/100 PCI + (smc9332dst), you can also try the driver for "Generic DECchip" + cards, below. However, most people with a network card of this type + will say Y here.) Do read the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module will + be called de2104x. + +config DE2104X_DSL + int "Descriptor Skip Length in 32 bit longwords" + depends on DE2104X + range 0 31 + default 0 + ---help--- + Setting this value allows to align ring buffer descriptors into their + own cache lines. Value of 4 corresponds to the typical 32 byte line + (the descriptor is 16 bytes). This is necessary on systems that lack + cache coherence, an example is PowerMac 5500. Otherwise 0 is safe. + Default is 0, and range is 0 to 31. + +config TULIP + tristate "DECchip Tulip (dc2114x) PCI support" + depends on PCI + select CRC32 + ---help--- + This driver is developed for the SMC EtherPower series Ethernet + cards and also works with cards based on the DECchip + 21140 (Tulip series) chips. Some LinkSys PCI cards are + of this type. (If your card is NOT SMC EtherPower 10/100 PCI + (smc9332dst), you can also try the driver for "Generic DECchip" + cards, above. However, most people with a network card of this type + will say Y here.) Do read the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module will + be called tulip. + +config TULIP_MWI + bool "New bus configuration" + depends on TULIP + ---help--- + This configures your Tulip card specifically for the card and + system cache line size type you are using. + + This is experimental code, not yet tested on many boards. + + If unsure, say N. + +config TULIP_MMIO + bool "Use PCI shared mem for NIC registers" + depends on TULIP + ---help--- + Use PCI shared memory for the NIC registers, rather than going through + the Tulip's PIO (programmed I/O ports). Faster, but could produce + obscure bugs if your mainboard has memory controller timing issues. + If in doubt, say N. + +config TULIP_NAPI + bool "Use RX polling (NAPI)" + depends on TULIP + ---help--- + NAPI is a new driver API designed to reduce CPU and interrupt load + when the driver is receiving lots of packets from the card. It is + still somewhat experimental and thus not yet enabled by default. + + If your estimated Rx load is 10kpps or more, or if the card will be + deployed on potentially unfriendly networks (e.g. in a firewall), + then say Y here. + + If in doubt, say N. + +config TULIP_NAPI_HW_MITIGATION + bool "Use Interrupt Mitigation" + depends on TULIP_NAPI + ---help--- + Use HW to reduce RX interrupts. Not strictly necessary since NAPI + reduces RX interrupts by itself. Interrupt mitigation reduces RX + interrupts even at low levels of traffic at the cost of a small + latency. + + If in doubt, say Y. + +config TULIP_DM910X + def_bool y + depends on TULIP && SPARC + +config DE4X5 + tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA" + depends on (PCI || EISA) + depends on VIRT_TO_BUS || ALPHA || PPC || SPARC + select CRC32 + ---help--- + This is support for the DIGITAL series of PCI/EISA Ethernet cards. + These include the DE425, DE434, DE435, DE450 and DE500 models. If + you have a network card of this type, say Y and read the + Ethernet-HOWTO, available from + . More specific + information is contained in + . + + To compile this driver as a module, choose M here. The module will + be called de4x5. + +config WINBOND_840 + tristate "Winbond W89c840 Ethernet support" + depends on PCI + select CRC32 + select MII + ---help--- + This driver is for the Winbond W89c840 chip. It also works with + the TX9882 chip on the Compex RL100-ATX board. + More specific information and updates are available from + . + +config DM9102 + tristate "Davicom DM910x/DM980x support" + depends on PCI + select CRC32 + ---help--- + This driver is for DM9102(A)/DM9132/DM9801 compatible PCI cards from + Davicom (). If you have such a network + (Ethernet) card, say Y. Some information is contained in the file + . + + To compile this driver as a module, choose M here. The module will + be called dmfe. + +config ULI526X + tristate "ULi M526x controller support" + depends on PCI + select CRC32 + ---help--- + This driver is for ULi M5261/M5263 10/100M Ethernet Controller + (). + + To compile this driver as a module, choose M here. The module will + be called uli526x. + +config PCMCIA_XIRCOM + tristate "Xircom CardBus support" + depends on CARDBUS + ---help--- + This driver is for the Digital "Tulip" Ethernet CardBus adapters. + It should work with most DEC 21*4*-based chips/ethercards, as well + as with work-alike chips from Lite-On (PNIC) and Macronix (MXIC) and + ASIX. + + To compile this driver as a module, choose M here. The module will + be called xircom_cb. If unsure, say N. + +endif # NET_TULIP diff --git a/drivers/net/ethernet/dec/tulip/Makefile b/drivers/net/ethernet/dec/tulip/Makefile new file mode 100644 index 000000000..5e8be38b4 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/Makefile @@ -0,0 +1,19 @@ +# +# Makefile for the Linux "Tulip" family network device drivers. +# + +ccflags-$(CONFIG_NET_TULIP) := -DDEBUG + +obj-$(CONFIG_PCMCIA_XIRCOM) += xircom_cb.o +obj-$(CONFIG_DM9102) += dmfe.o +obj-$(CONFIG_WINBOND_840) += winbond-840.o +obj-$(CONFIG_DE2104X) += de2104x.o +obj-$(CONFIG_TULIP) += tulip.o +obj-$(CONFIG_DE4X5) += de4x5.o +obj-$(CONFIG_ULI526X) += uli526x.o + +# Declare multi-part drivers. + +tulip-objs := eeprom.o interrupt.o media.o \ + timer.o tulip_core.o \ + 21142.o pnic.o pnic2.o diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c new file mode 100644 index 000000000..a02ecc4f9 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -0,0 +1,2207 @@ +/* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */ +/* + Copyright 2001,2003 Jeff Garzik + + Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c] + Written/copyright 1994-2001 by Donald Becker. [tulip.c] + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. + + See the file COPYING in this distribution for more information. + + TODO, in rough priority order: + * Support forcing media type with a module parameter, + like dl2k.c/sundance.c + * Constants (module parms?) for Rx work limit + * Complete reset on PciErr + * Jumbo frames / dev->change_mtu + * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error + * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error + * Implement Tx software interrupt mitigation via + Tx descriptor bit + + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DRV_NAME "de2104x" +#define DRV_VERSION "0.7" +#define DRV_RELDATE "Mar 17, 2004" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* These identify the driver base version and may not be removed. */ +static char version[] = +"PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")"; + +MODULE_AUTHOR("Jeff Garzik "); +MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static int debug = -1; +module_param (debug, int, 0); +MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number"); + +/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ +#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \ + defined(CONFIG_SPARC) || defined(__ia64__) || \ + defined(__sh__) || defined(__mips__) +static int rx_copybreak = 1518; +#else +static int rx_copybreak = 100; +#endif +module_param (rx_copybreak, int, 0); +MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied"); + +#define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + +/* Descriptor skip length in 32 bit longwords. */ +#ifndef CONFIG_DE2104X_DSL +#define DSL 0 +#else +#define DSL CONFIG_DE2104X_DSL +#endif + +#define DE_RX_RING_SIZE 64 +#define DE_TX_RING_SIZE 64 +#define DE_RING_BYTES \ + ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \ + (sizeof(struct de_desc) * DE_TX_RING_SIZE)) +#define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1)) +#define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1)) +#define TX_BUFFS_AVAIL(CP) \ + (((CP)->tx_tail <= (CP)->tx_head) ? \ + (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \ + (CP)->tx_tail - (CP)->tx_head - 1) + +#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ +#define RX_OFFSET 2 + +#define DE_SETUP_SKB ((struct sk_buff *) 1) +#define DE_DUMMY_SKB ((struct sk_buff *) 2) +#define DE_SETUP_FRAME_WORDS 96 +#define DE_EEPROM_WORDS 256 +#define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16)) +#define DE_MAX_MEDIA 5 + +#define DE_MEDIA_TP_AUTO 0 +#define DE_MEDIA_BNC 1 +#define DE_MEDIA_AUI 2 +#define DE_MEDIA_TP 3 +#define DE_MEDIA_TP_FD 4 +#define DE_MEDIA_INVALID DE_MAX_MEDIA +#define DE_MEDIA_FIRST 0 +#define DE_MEDIA_LAST (DE_MAX_MEDIA - 1) +#define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC) + +#define DE_TIMER_LINK (60 * HZ) +#define DE_TIMER_NO_LINK (5 * HZ) + +#define DE_NUM_REGS 16 +#define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32)) +#define DE_REGS_VER 1 + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (6*HZ) + +/* This is a mysterious value that can be written to CSR11 in the 21040 (only) + to support a pre-NWay full-duplex signaling mechanism using short frames. + No one knows what it should be, but if left at its default value some + 10base2(!) packets trigger a full-duplex-request interrupt. */ +#define FULL_DUPLEX_MAGIC 0x6969 + +enum { + /* NIC registers */ + BusMode = 0x00, + TxPoll = 0x08, + RxPoll = 0x10, + RxRingAddr = 0x18, + TxRingAddr = 0x20, + MacStatus = 0x28, + MacMode = 0x30, + IntrMask = 0x38, + RxMissed = 0x40, + ROMCmd = 0x48, + CSR11 = 0x58, + SIAStatus = 0x60, + CSR13 = 0x68, + CSR14 = 0x70, + CSR15 = 0x78, + PCIPM = 0x40, + + /* BusMode bits */ + CmdReset = (1 << 0), + CacheAlign16 = 0x00008000, + BurstLen4 = 0x00000400, + DescSkipLen = (DSL << 2), + + /* Rx/TxPoll bits */ + NormalTxPoll = (1 << 0), + NormalRxPoll = (1 << 0), + + /* Tx/Rx descriptor status bits */ + DescOwn = (1 << 31), + RxError = (1 << 15), + RxErrLong = (1 << 7), + RxErrCRC = (1 << 1), + RxErrFIFO = (1 << 0), + RxErrRunt = (1 << 11), + RxErrFrame = (1 << 14), + RingEnd = (1 << 25), + FirstFrag = (1 << 29), + LastFrag = (1 << 30), + TxError = (1 << 15), + TxFIFOUnder = (1 << 1), + TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11), + TxMaxCol = (1 << 8), + TxOWC = (1 << 9), + TxJabber = (1 << 14), + SetupFrame = (1 << 27), + TxSwInt = (1 << 31), + + /* MacStatus bits */ + IntrOK = (1 << 16), + IntrErr = (1 << 15), + RxIntr = (1 << 6), + RxEmpty = (1 << 7), + TxIntr = (1 << 0), + TxEmpty = (1 << 2), + PciErr = (1 << 13), + TxState = (1 << 22) | (1 << 21) | (1 << 20), + RxState = (1 << 19) | (1 << 18) | (1 << 17), + LinkFail = (1 << 12), + LinkPass = (1 << 4), + RxStopped = (1 << 8), + TxStopped = (1 << 1), + + /* MacMode bits */ + TxEnable = (1 << 13), + RxEnable = (1 << 1), + RxTx = TxEnable | RxEnable, + FullDuplex = (1 << 9), + AcceptAllMulticast = (1 << 7), + AcceptAllPhys = (1 << 6), + BOCnt = (1 << 5), + MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) | + RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast, + + /* ROMCmd bits */ + EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */ + EE_CS = 0x01, /* EEPROM chip select. */ + EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */ + EE_WRITE_0 = 0x01, + EE_WRITE_1 = 0x05, + EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */ + EE_ENB = (0x4800 | EE_CS), + + /* The EEPROM commands include the alway-set leading bit. */ + EE_READ_CMD = 6, + + /* RxMissed bits */ + RxMissedOver = (1 << 16), + RxMissedMask = 0xffff, + + /* SROM-related bits */ + SROMC0InfoLeaf = 27, + MediaBlockMask = 0x3f, + MediaCustomCSRs = (1 << 6), + + /* PCIPM bits */ + PM_Sleep = (1 << 31), + PM_Snooze = (1 << 30), + PM_Mask = PM_Sleep | PM_Snooze, + + /* SIAStatus bits */ + NWayState = (1 << 14) | (1 << 13) | (1 << 12), + NWayRestart = (1 << 12), + NonselPortActive = (1 << 9), + SelPortActive = (1 << 8), + LinkFailStatus = (1 << 2), + NetCxnErr = (1 << 1), +}; + +static const u32 de_intr_mask = + IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty | + LinkPass | LinkFail | PciErr; + +/* + * Set the programmable burst length to 4 longwords for all: + * DMA errors result without these values. Cache align 16 long. + */ +static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen; + +struct de_srom_media_block { + u8 opts; + u16 csr13; + u16 csr14; + u16 csr15; +} __packed; + +struct de_srom_info_leaf { + u16 default_media; + u8 n_blocks; + u8 unused; +} __packed; + +struct de_desc { + __le32 opts1; + __le32 opts2; + __le32 addr1; + __le32 addr2; +#if DSL + __le32 skip[DSL]; +#endif +}; + +struct media_info { + u16 type; /* DE_MEDIA_xxx */ + u16 csr13; + u16 csr14; + u16 csr15; +}; + +struct ring_info { + struct sk_buff *skb; + dma_addr_t mapping; +}; + +struct de_private { + unsigned tx_head; + unsigned tx_tail; + unsigned rx_tail; + + void __iomem *regs; + struct net_device *dev; + spinlock_t lock; + + struct de_desc *rx_ring; + struct de_desc *tx_ring; + struct ring_info tx_skb[DE_TX_RING_SIZE]; + struct ring_info rx_skb[DE_RX_RING_SIZE]; + unsigned rx_buf_sz; + dma_addr_t ring_dma; + + u32 msg_enable; + + struct net_device_stats net_stats; + + struct pci_dev *pdev; + + u16 setup_frame[DE_SETUP_FRAME_WORDS]; + + u32 media_type; + u32 media_supported; + u32 media_advertise; + struct media_info media[DE_MAX_MEDIA]; + struct timer_list media_timer; + + u8 *ee_data; + unsigned board_idx; + unsigned de21040 : 1; + unsigned media_lock : 1; +}; + + +static void de_set_rx_mode (struct net_device *dev); +static void de_tx (struct de_private *de); +static void de_clean_rings (struct de_private *de); +static void de_media_interrupt (struct de_private *de, u32 status); +static void de21040_media_timer (unsigned long data); +static void de21041_media_timer (unsigned long data); +static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media); + + +static const struct pci_device_id de_pci_tbl[] = { + { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, + { }, +}; +MODULE_DEVICE_TABLE(pci, de_pci_tbl); + +static const char * const media_name[DE_MAX_MEDIA] = { + "10baseT auto", + "BNC", + "AUI", + "10baseT-HD", + "10baseT-FD" +}; + +/* 21040 transceiver register settings: + * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/ +static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, }; +static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, }; +static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, }; + +/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/ +static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, }; +static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, }; +/* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */ +static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, }; +static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, }; + + +#define dr32(reg) ioread32(de->regs + (reg)) +#define dw32(reg, val) iowrite32((val), de->regs + (reg)) + + +static void de_rx_err_acct (struct de_private *de, unsigned rx_tail, + u32 status, u32 len) +{ + netif_dbg(de, rx_err, de->dev, + "rx err, slot %d status 0x%x len %d\n", + rx_tail, status, len); + + if ((status & 0x38000300) != 0x0300) { + /* Ingore earlier buffers. */ + if ((status & 0xffff) != 0x7fff) { + netif_warn(de, rx_err, de->dev, + "Oversized Ethernet frame spanned multiple buffers, status %08x!\n", + status); + de->net_stats.rx_length_errors++; + } + } else if (status & RxError) { + /* There was a fatal error. */ + de->net_stats.rx_errors++; /* end of a packet.*/ + if (status & 0x0890) de->net_stats.rx_length_errors++; + if (status & RxErrCRC) de->net_stats.rx_crc_errors++; + if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++; + } +} + +static void de_rx (struct de_private *de) +{ + unsigned rx_tail = de->rx_tail; + unsigned rx_work = DE_RX_RING_SIZE; + unsigned drop = 0; + int rc; + + while (--rx_work) { + u32 status, len; + dma_addr_t mapping; + struct sk_buff *skb, *copy_skb; + unsigned copying_skb, buflen; + + skb = de->rx_skb[rx_tail].skb; + BUG_ON(!skb); + rmb(); + status = le32_to_cpu(de->rx_ring[rx_tail].opts1); + if (status & DescOwn) + break; + + len = ((status >> 16) & 0x7ff) - 4; + mapping = de->rx_skb[rx_tail].mapping; + + if (unlikely(drop)) { + de->net_stats.rx_dropped++; + goto rx_next; + } + + if (unlikely((status & 0x38008300) != 0x0300)) { + de_rx_err_acct(de, rx_tail, status, len); + goto rx_next; + } + + copying_skb = (len <= rx_copybreak); + + netif_dbg(de, rx_status, de->dev, + "rx slot %d status 0x%x len %d copying? %d\n", + rx_tail, status, len, copying_skb); + + buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz; + copy_skb = netdev_alloc_skb(de->dev, buflen); + if (unlikely(!copy_skb)) { + de->net_stats.rx_dropped++; + drop = 1; + rx_work = 100; + goto rx_next; + } + + if (!copying_skb) { + pci_unmap_single(de->pdev, mapping, + buflen, PCI_DMA_FROMDEVICE); + skb_put(skb, len); + + mapping = + de->rx_skb[rx_tail].mapping = + pci_map_single(de->pdev, copy_skb->data, + buflen, PCI_DMA_FROMDEVICE); + de->rx_skb[rx_tail].skb = copy_skb; + } else { + pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); + skb_reserve(copy_skb, RX_OFFSET); + skb_copy_from_linear_data(skb, skb_put(copy_skb, len), + len); + pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); + + /* We'll reuse the original ring buffer. */ + skb = copy_skb; + } + + skb->protocol = eth_type_trans (skb, de->dev); + + de->net_stats.rx_packets++; + de->net_stats.rx_bytes += skb->len; + rc = netif_rx (skb); + if (rc == NET_RX_DROP) + drop = 1; + +rx_next: + if (rx_tail == (DE_RX_RING_SIZE - 1)) + de->rx_ring[rx_tail].opts2 = + cpu_to_le32(RingEnd | de->rx_buf_sz); + else + de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz); + de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping); + wmb(); + de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn); + rx_tail = NEXT_RX(rx_tail); + } + + if (!rx_work) + netdev_warn(de->dev, "rx work limit reached\n"); + + de->rx_tail = rx_tail; +} + +static irqreturn_t de_interrupt (int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct de_private *de = netdev_priv(dev); + u32 status; + + status = dr32(MacStatus); + if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF)) + return IRQ_NONE; + + netif_dbg(de, intr, dev, "intr, status %08x mode %08x desc %u/%u/%u\n", + status, dr32(MacMode), + de->rx_tail, de->tx_head, de->tx_tail); + + dw32(MacStatus, status); + + if (status & (RxIntr | RxEmpty)) { + de_rx(de); + if (status & RxEmpty) + dw32(RxPoll, NormalRxPoll); + } + + spin_lock(&de->lock); + + if (status & (TxIntr | TxEmpty)) + de_tx(de); + + if (status & (LinkPass | LinkFail)) + de_media_interrupt(de, status); + + spin_unlock(&de->lock); + + if (status & PciErr) { + u16 pci_status; + + pci_read_config_word(de->pdev, PCI_STATUS, &pci_status); + pci_write_config_word(de->pdev, PCI_STATUS, pci_status); + netdev_err(de->dev, + "PCI bus error, status=%08x, PCI status=%04x\n", + status, pci_status); + } + + return IRQ_HANDLED; +} + +static void de_tx (struct de_private *de) +{ + unsigned tx_head = de->tx_head; + unsigned tx_tail = de->tx_tail; + + while (tx_tail != tx_head) { + struct sk_buff *skb; + u32 status; + + rmb(); + status = le32_to_cpu(de->tx_ring[tx_tail].opts1); + if (status & DescOwn) + break; + + skb = de->tx_skb[tx_tail].skb; + BUG_ON(!skb); + if (unlikely(skb == DE_DUMMY_SKB)) + goto next; + + if (unlikely(skb == DE_SETUP_SKB)) { + pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping, + sizeof(de->setup_frame), PCI_DMA_TODEVICE); + goto next; + } + + pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping, + skb->len, PCI_DMA_TODEVICE); + + if (status & LastFrag) { + if (status & TxError) { + netif_dbg(de, tx_err, de->dev, + "tx err, status 0x%x\n", + status); + de->net_stats.tx_errors++; + if (status & TxOWC) + de->net_stats.tx_window_errors++; + if (status & TxMaxCol) + de->net_stats.tx_aborted_errors++; + if (status & TxLinkFail) + de->net_stats.tx_carrier_errors++; + if (status & TxFIFOUnder) + de->net_stats.tx_fifo_errors++; + } else { + de->net_stats.tx_packets++; + de->net_stats.tx_bytes += skb->len; + netif_dbg(de, tx_done, de->dev, + "tx done, slot %d\n", tx_tail); + } + dev_kfree_skb_irq(skb); + } + +next: + de->tx_skb[tx_tail].skb = NULL; + + tx_tail = NEXT_TX(tx_tail); + } + + de->tx_tail = tx_tail; + + if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4))) + netif_wake_queue(de->dev); +} + +static netdev_tx_t de_start_xmit (struct sk_buff *skb, + struct net_device *dev) +{ + struct de_private *de = netdev_priv(dev); + unsigned int entry, tx_free; + u32 mapping, len, flags = FirstFrag | LastFrag; + struct de_desc *txd; + + spin_lock_irq(&de->lock); + + tx_free = TX_BUFFS_AVAIL(de); + if (tx_free == 0) { + netif_stop_queue(dev); + spin_unlock_irq(&de->lock); + return NETDEV_TX_BUSY; + } + tx_free--; + + entry = de->tx_head; + + txd = &de->tx_ring[entry]; + + len = skb->len; + mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE); + if (entry == (DE_TX_RING_SIZE - 1)) + flags |= RingEnd; + if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2))) + flags |= TxSwInt; + flags |= len; + txd->opts2 = cpu_to_le32(flags); + txd->addr1 = cpu_to_le32(mapping); + + de->tx_skb[entry].skb = skb; + de->tx_skb[entry].mapping = mapping; + wmb(); + + txd->opts1 = cpu_to_le32(DescOwn); + wmb(); + + de->tx_head = NEXT_TX(entry); + netif_dbg(de, tx_queued, dev, "tx queued, slot %d, skblen %d\n", + entry, skb->len); + + if (tx_free == 0) + netif_stop_queue(dev); + + spin_unlock_irq(&de->lock); + + /* Trigger an immediate transmit demand. */ + dw32(TxPoll, NormalTxPoll); + + return NETDEV_TX_OK; +} + +/* Set or clear the multicast filter for this adaptor. + Note that we only use exclusion around actually queueing the + new frame, not around filling de->setup_frame. This is non-deterministic + when re-entered but still correct. */ + +static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) +{ + struct de_private *de = netdev_priv(dev); + u16 hash_table[32]; + struct netdev_hw_addr *ha; + int i; + u16 *eaddrs; + + memset(hash_table, 0, sizeof(hash_table)); + __set_bit_le(255, hash_table); /* Broadcast entry */ + /* This should work on big-endian machines as well. */ + netdev_for_each_mc_addr(ha, dev) { + int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff; + + __set_bit_le(index, hash_table); + } + + for (i = 0; i < 32; i++) { + *setup_frm++ = hash_table[i]; + *setup_frm++ = hash_table[i]; + } + setup_frm = &de->setup_frame[13*6]; + + /* Fill the final entry with our physical address. */ + eaddrs = (u16 *)dev->dev_addr; + *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; + *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; + *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; +} + +static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) +{ + struct de_private *de = netdev_priv(dev); + struct netdev_hw_addr *ha; + u16 *eaddrs; + + /* We have <= 14 addresses so we can use the wonderful + 16 address perfect filtering of the Tulip. */ + netdev_for_each_mc_addr(ha, dev) { + eaddrs = (u16 *) ha->addr; + *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; + *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; + *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; + } + /* Fill the unused entries with the broadcast address. */ + memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12); + setup_frm = &de->setup_frame[15*6]; + + /* Fill the final entry with our physical address. */ + eaddrs = (u16 *)dev->dev_addr; + *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; + *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; + *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; +} + + +static void __de_set_rx_mode (struct net_device *dev) +{ + struct de_private *de = netdev_priv(dev); + u32 macmode; + unsigned int entry; + u32 mapping; + struct de_desc *txd; + struct de_desc *dummy_txd = NULL; + + macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys); + + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + macmode |= AcceptAllMulticast | AcceptAllPhys; + goto out; + } + + if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) { + /* Too many to filter well -- accept all multicasts. */ + macmode |= AcceptAllMulticast; + goto out; + } + + /* Note that only the low-address shortword of setup_frame is valid! + The values are doubled for big-endian architectures. */ + if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */ + build_setup_frame_hash (de->setup_frame, dev); + else + build_setup_frame_perfect (de->setup_frame, dev); + + /* + * Now add this frame to the Tx list. + */ + + entry = de->tx_head; + + /* Avoid a chip errata by prefixing a dummy entry. */ + if (entry != 0) { + de->tx_skb[entry].skb = DE_DUMMY_SKB; + + dummy_txd = &de->tx_ring[entry]; + dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ? + cpu_to_le32(RingEnd) : 0; + dummy_txd->addr1 = 0; + + /* Must set DescOwned later to avoid race with chip */ + + entry = NEXT_TX(entry); + } + + de->tx_skb[entry].skb = DE_SETUP_SKB; + de->tx_skb[entry].mapping = mapping = + pci_map_single (de->pdev, de->setup_frame, + sizeof (de->setup_frame), PCI_DMA_TODEVICE); + + /* Put the setup frame on the Tx list. */ + txd = &de->tx_ring[entry]; + if (entry == (DE_TX_RING_SIZE - 1)) + txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame)); + else + txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame)); + txd->addr1 = cpu_to_le32(mapping); + wmb(); + + txd->opts1 = cpu_to_le32(DescOwn); + wmb(); + + if (dummy_txd) { + dummy_txd->opts1 = cpu_to_le32(DescOwn); + wmb(); + } + + de->tx_head = NEXT_TX(entry); + + if (TX_BUFFS_AVAIL(de) == 0) + netif_stop_queue(dev); + + /* Trigger an immediate transmit demand. */ + dw32(TxPoll, NormalTxPoll); + +out: + if (macmode != dr32(MacMode)) + dw32(MacMode, macmode); +} + +static void de_set_rx_mode (struct net_device *dev) +{ + unsigned long flags; + struct de_private *de = netdev_priv(dev); + + spin_lock_irqsave (&de->lock, flags); + __de_set_rx_mode(dev); + spin_unlock_irqrestore (&de->lock, flags); +} + +static inline void de_rx_missed(struct de_private *de, u32 rx_missed) +{ + if (unlikely(rx_missed & RxMissedOver)) + de->net_stats.rx_missed_errors += RxMissedMask; + else + de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask); +} + +static void __de_get_stats(struct de_private *de) +{ + u32 tmp = dr32(RxMissed); /* self-clearing */ + + de_rx_missed(de, tmp); +} + +static struct net_device_stats *de_get_stats(struct net_device *dev) +{ + struct de_private *de = netdev_priv(dev); + + /* The chip only need report frame silently dropped. */ + spin_lock_irq(&de->lock); + if (netif_running(dev) && netif_device_present(dev)) + __de_get_stats(de); + spin_unlock_irq(&de->lock); + + return &de->net_stats; +} + +static inline int de_is_running (struct de_private *de) +{ + return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0; +} + +static void de_stop_rxtx (struct de_private *de) +{ + u32 macmode; + unsigned int i = 1300/100; + + macmode = dr32(MacMode); + if (macmode & RxTx) { + dw32(MacMode, macmode & ~RxTx); + dr32(MacMode); + } + + /* wait until in-flight frame completes. + * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin) + * Typically expect this loop to end in < 50 us on 100BT. + */ + while (--i) { + if (!de_is_running(de)) + return; + udelay(100); + } + + netdev_warn(de->dev, "timeout expired, stopping DMA\n"); +} + +static inline void de_start_rxtx (struct de_private *de) +{ + u32 macmode; + + macmode = dr32(MacMode); + if ((macmode & RxTx) != RxTx) { + dw32(MacMode, macmode | RxTx); + dr32(MacMode); + } +} + +static void de_stop_hw (struct de_private *de) +{ + + udelay(5); + dw32(IntrMask, 0); + + de_stop_rxtx(de); + + dw32(MacStatus, dr32(MacStatus)); + + udelay(10); + + de->rx_tail = 0; + de->tx_head = de->tx_tail = 0; +} + +static void de_link_up(struct de_private *de) +{ + if (!netif_carrier_ok(de->dev)) { + netif_carrier_on(de->dev); + netif_info(de, link, de->dev, "link up, media %s\n", + media_name[de->media_type]); + } +} + +static void de_link_down(struct de_private *de) +{ + if (netif_carrier_ok(de->dev)) { + netif_carrier_off(de->dev); + netif_info(de, link, de->dev, "link down\n"); + } +} + +static void de_set_media (struct de_private *de) +{ + unsigned media = de->media_type; + u32 macmode = dr32(MacMode); + + if (de_is_running(de)) + netdev_warn(de->dev, "chip is running while changing media!\n"); + + if (de->de21040) + dw32(CSR11, FULL_DUPLEX_MAGIC); + dw32(CSR13, 0); /* Reset phy */ + dw32(CSR14, de->media[media].csr14); + dw32(CSR15, de->media[media].csr15); + dw32(CSR13, de->media[media].csr13); + + /* must delay 10ms before writing to other registers, + * especially CSR6 + */ + mdelay(10); + + if (media == DE_MEDIA_TP_FD) + macmode |= FullDuplex; + else + macmode &= ~FullDuplex; + + netif_info(de, link, de->dev, "set link %s\n", media_name[media]); + netif_info(de, hw, de->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n", + dr32(MacMode), dr32(SIAStatus), + dr32(CSR13), dr32(CSR14), dr32(CSR15)); + netif_info(de, hw, de->dev, "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n", + macmode, de->media[media].csr13, + de->media[media].csr14, de->media[media].csr15); + if (macmode != dr32(MacMode)) + dw32(MacMode, macmode); +} + +static void de_next_media (struct de_private *de, const u32 *media, + unsigned int n_media) +{ + unsigned int i; + + for (i = 0; i < n_media; i++) { + if (de_ok_to_advertise(de, media[i])) { + de->media_type = media[i]; + return; + } + } +} + +static void de21040_media_timer (unsigned long data) +{ + struct de_private *de = (struct de_private *) data; + struct net_device *dev = de->dev; + u32 status = dr32(SIAStatus); + unsigned int carrier; + unsigned long flags; + + carrier = (status & NetCxnErr) ? 0 : 1; + + if (carrier) { + if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus)) + goto no_link_yet; + + de->media_timer.expires = jiffies + DE_TIMER_LINK; + add_timer(&de->media_timer); + if (!netif_carrier_ok(dev)) + de_link_up(de); + else + netif_info(de, timer, dev, "%s link ok, status %x\n", + media_name[de->media_type], status); + return; + } + + de_link_down(de); + + if (de->media_lock) + return; + + if (de->media_type == DE_MEDIA_AUI) { + static const u32 next_state = DE_MEDIA_TP; + de_next_media(de, &next_state, 1); + } else { + static const u32 next_state = DE_MEDIA_AUI; + de_next_media(de, &next_state, 1); + } + + spin_lock_irqsave(&de->lock, flags); + de_stop_rxtx(de); + spin_unlock_irqrestore(&de->lock, flags); + de_set_media(de); + de_start_rxtx(de); + +no_link_yet: + de->media_timer.expires = jiffies + DE_TIMER_NO_LINK; + add_timer(&de->media_timer); + + netif_info(de, timer, dev, "no link, trying media %s, status %x\n", + media_name[de->media_type], status); +} + +static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media) +{ + switch (new_media) { + case DE_MEDIA_TP_AUTO: + if (!(de->media_advertise & ADVERTISED_Autoneg)) + return 0; + if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full))) + return 0; + break; + case DE_MEDIA_BNC: + if (!(de->media_advertise & ADVERTISED_BNC)) + return 0; + break; + case DE_MEDIA_AUI: + if (!(de->media_advertise & ADVERTISED_AUI)) + return 0; + break; + case DE_MEDIA_TP: + if (!(de->media_advertise & ADVERTISED_10baseT_Half)) + return 0; + break; + case DE_MEDIA_TP_FD: + if (!(de->media_advertise & ADVERTISED_10baseT_Full)) + return 0; + break; + } + + return 1; +} + +static void de21041_media_timer (unsigned long data) +{ + struct de_private *de = (struct de_private *) data; + struct net_device *dev = de->dev; + u32 status = dr32(SIAStatus); + unsigned int carrier; + unsigned long flags; + + /* clear port active bits */ + dw32(SIAStatus, NonselPortActive | SelPortActive); + + carrier = (status & NetCxnErr) ? 0 : 1; + + if (carrier) { + if ((de->media_type == DE_MEDIA_TP_AUTO || + de->media_type == DE_MEDIA_TP || + de->media_type == DE_MEDIA_TP_FD) && + (status & LinkFailStatus)) + goto no_link_yet; + + de->media_timer.expires = jiffies + DE_TIMER_LINK; + add_timer(&de->media_timer); + if (!netif_carrier_ok(dev)) + de_link_up(de); + else + netif_info(de, timer, dev, + "%s link ok, mode %x status %x\n", + media_name[de->media_type], + dr32(MacMode), status); + return; + } + + de_link_down(de); + + /* if media type locked, don't switch media */ + if (de->media_lock) + goto set_media; + + /* if activity detected, use that as hint for new media type */ + if (status & NonselPortActive) { + unsigned int have_media = 1; + + /* if AUI/BNC selected, then activity is on TP port */ + if (de->media_type == DE_MEDIA_AUI || + de->media_type == DE_MEDIA_BNC) { + if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)) + de->media_type = DE_MEDIA_TP_AUTO; + else + have_media = 0; + } + + /* TP selected. If there is only TP and BNC, then it's BNC */ + else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) && + de_ok_to_advertise(de, DE_MEDIA_BNC)) + de->media_type = DE_MEDIA_BNC; + + /* TP selected. If there is only TP and AUI, then it's AUI */ + else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) && + de_ok_to_advertise(de, DE_MEDIA_AUI)) + de->media_type = DE_MEDIA_AUI; + + /* otherwise, ignore the hint */ + else + have_media = 0; + + if (have_media) + goto set_media; + } + + /* + * Absent or ambiguous activity hint, move to next advertised + * media state. If de->media_type is left unchanged, this + * simply resets the PHY and reloads the current media settings. + */ + if (de->media_type == DE_MEDIA_AUI) { + static const u32 next_states[] = { + DE_MEDIA_BNC, DE_MEDIA_TP_AUTO + }; + de_next_media(de, next_states, ARRAY_SIZE(next_states)); + } else if (de->media_type == DE_MEDIA_BNC) { + static const u32 next_states[] = { + DE_MEDIA_TP_AUTO, DE_MEDIA_AUI + }; + de_next_media(de, next_states, ARRAY_SIZE(next_states)); + } else { + static const u32 next_states[] = { + DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO + }; + de_next_media(de, next_states, ARRAY_SIZE(next_states)); + } + +set_media: + spin_lock_irqsave(&de->lock, flags); + de_stop_rxtx(de); + spin_unlock_irqrestore(&de->lock, flags); + de_set_media(de); + de_start_rxtx(de); + +no_link_yet: + de->media_timer.expires = jiffies + DE_TIMER_NO_LINK; + add_timer(&de->media_timer); + + netif_info(de, timer, dev, "no link, trying media %s, status %x\n", + media_name[de->media_type], status); +} + +static void de_media_interrupt (struct de_private *de, u32 status) +{ + if (status & LinkPass) { + /* Ignore if current media is AUI or BNC and we can't use TP */ + if ((de->media_type == DE_MEDIA_AUI || + de->media_type == DE_MEDIA_BNC) && + (de->media_lock || + !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))) + return; + /* If current media is not TP, change it to TP */ + if ((de->media_type == DE_MEDIA_AUI || + de->media_type == DE_MEDIA_BNC)) { + de->media_type = DE_MEDIA_TP_AUTO; + de_stop_rxtx(de); + de_set_media(de); + de_start_rxtx(de); + } + de_link_up(de); + mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK); + return; + } + + BUG_ON(!(status & LinkFail)); + /* Mark the link as down only if current media is TP */ + if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI && + de->media_type != DE_MEDIA_BNC) { + de_link_down(de); + mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); + } +} + +static int de_reset_mac (struct de_private *de) +{ + u32 status, tmp; + + /* + * Reset MAC. de4x5.c and tulip.c examined for "advice" + * in this area. + */ + + if (dr32(BusMode) == 0xffffffff) + return -EBUSY; + + /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */ + dw32 (BusMode, CmdReset); + mdelay (1); + + dw32 (BusMode, de_bus_mode); + mdelay (1); + + for (tmp = 0; tmp < 5; tmp++) { + dr32 (BusMode); + mdelay (1); + } + + mdelay (1); + + status = dr32(MacStatus); + if (status & (RxState | TxState)) + return -EBUSY; + if (status == 0xffffffff) + return -ENODEV; + return 0; +} + +static void de_adapter_wake (struct de_private *de) +{ + u32 pmctl; + + if (de->de21040) + return; + + pci_read_config_dword(de->pdev, PCIPM, &pmctl); + if (pmctl & PM_Mask) { + pmctl &= ~PM_Mask; + pci_write_config_dword(de->pdev, PCIPM, pmctl); + + /* de4x5.c delays, so we do too */ + msleep(10); + } +} + +static void de_adapter_sleep (struct de_private *de) +{ + u32 pmctl; + + if (de->de21040) + return; + + dw32(CSR13, 0); /* Reset phy */ + pci_read_config_dword(de->pdev, PCIPM, &pmctl); + pmctl |= PM_Sleep; + pci_write_config_dword(de->pdev, PCIPM, pmctl); +} + +static int de_init_hw (struct de_private *de) +{ + struct net_device *dev = de->dev; + u32 macmode; + int rc; + + de_adapter_wake(de); + + macmode = dr32(MacMode) & ~MacModeClear; + + rc = de_reset_mac(de); + if (rc) + return rc; + + de_set_media(de); /* reset phy */ + + dw32(RxRingAddr, de->ring_dma); + dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE)); + + dw32(MacMode, RxTx | macmode); + + dr32(RxMissed); /* self-clearing */ + + dw32(IntrMask, de_intr_mask); + + de_set_rx_mode(dev); + + return 0; +} + +static int de_refill_rx (struct de_private *de) +{ + unsigned i; + + for (i = 0; i < DE_RX_RING_SIZE; i++) { + struct sk_buff *skb; + + skb = netdev_alloc_skb(de->dev, de->rx_buf_sz); + if (!skb) + goto err_out; + + de->rx_skb[i].mapping = pci_map_single(de->pdev, + skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE); + de->rx_skb[i].skb = skb; + + de->rx_ring[i].opts1 = cpu_to_le32(DescOwn); + if (i == (DE_RX_RING_SIZE - 1)) + de->rx_ring[i].opts2 = + cpu_to_le32(RingEnd | de->rx_buf_sz); + else + de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz); + de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping); + de->rx_ring[i].addr2 = 0; + } + + return 0; + +err_out: + de_clean_rings(de); + return -ENOMEM; +} + +static int de_init_rings (struct de_private *de) +{ + memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE); + de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd); + + de->rx_tail = 0; + de->tx_head = de->tx_tail = 0; + + return de_refill_rx (de); +} + +static int de_alloc_rings (struct de_private *de) +{ + de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma); + if (!de->rx_ring) + return -ENOMEM; + de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE]; + return de_init_rings(de); +} + +static void de_clean_rings (struct de_private *de) +{ + unsigned i; + + memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE); + de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd); + wmb(); + memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE); + de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd); + wmb(); + + for (i = 0; i < DE_RX_RING_SIZE; i++) { + if (de->rx_skb[i].skb) { + pci_unmap_single(de->pdev, de->rx_skb[i].mapping, + de->rx_buf_sz, PCI_DMA_FROMDEVICE); + dev_kfree_skb(de->rx_skb[i].skb); + } + } + + for (i = 0; i < DE_TX_RING_SIZE; i++) { + struct sk_buff *skb = de->tx_skb[i].skb; + if ((skb) && (skb != DE_DUMMY_SKB)) { + if (skb != DE_SETUP_SKB) { + de->net_stats.tx_dropped++; + pci_unmap_single(de->pdev, + de->tx_skb[i].mapping, + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb(skb); + } else { + pci_unmap_single(de->pdev, + de->tx_skb[i].mapping, + sizeof(de->setup_frame), + PCI_DMA_TODEVICE); + } + } + } + + memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE); + memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE); +} + +static void de_free_rings (struct de_private *de) +{ + de_clean_rings(de); + pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma); + de->rx_ring = NULL; + de->tx_ring = NULL; +} + +static int de_open (struct net_device *dev) +{ + struct de_private *de = netdev_priv(dev); + const int irq = de->pdev->irq; + int rc; + + netif_dbg(de, ifup, dev, "enabling interface\n"); + + de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); + + rc = de_alloc_rings(de); + if (rc) { + netdev_err(dev, "ring allocation failure, err=%d\n", rc); + return rc; + } + + dw32(IntrMask, 0); + + rc = request_irq(irq, de_interrupt, IRQF_SHARED, dev->name, dev); + if (rc) { + netdev_err(dev, "IRQ %d request failure, err=%d\n", irq, rc); + goto err_out_free; + } + + rc = de_init_hw(de); + if (rc) { + netdev_err(dev, "h/w init failure, err=%d\n", rc); + goto err_out_free_irq; + } + + netif_start_queue(dev); + mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); + + return 0; + +err_out_free_irq: + free_irq(irq, dev); +err_out_free: + de_free_rings(de); + return rc; +} + +static int de_close (struct net_device *dev) +{ + struct de_private *de = netdev_priv(dev); + unsigned long flags; + + netif_dbg(de, ifdown, dev, "disabling interface\n"); + + del_timer_sync(&de->media_timer); + + spin_lock_irqsave(&de->lock, flags); + de_stop_hw(de); + netif_stop_queue(dev); + netif_carrier_off(dev); + spin_unlock_irqrestore(&de->lock, flags); + + free_irq(de->pdev->irq, dev); + + de_free_rings(de); + de_adapter_sleep(de); + return 0; +} + +static void de_tx_timeout (struct net_device *dev) +{ + struct de_private *de = netdev_priv(dev); + const int irq = de->pdev->irq; + + netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n", + dr32(MacStatus), dr32(MacMode), dr32(SIAStatus), + de->rx_tail, de->tx_head, de->tx_tail); + + del_timer_sync(&de->media_timer); + + disable_irq(irq); + spin_lock_irq(&de->lock); + + de_stop_hw(de); + netif_stop_queue(dev); + netif_carrier_off(dev); + + spin_unlock_irq(&de->lock); + enable_irq(irq); + + /* Update the error counts. */ + __de_get_stats(de); + + synchronize_irq(irq); + de_clean_rings(de); + + de_init_rings(de); + + de_init_hw(de); + + netif_wake_queue(dev); +} + +static void __de_get_regs(struct de_private *de, u8 *buf) +{ + int i; + u32 *rbuf = (u32 *)buf; + + /* read all CSRs */ + for (i = 0; i < DE_NUM_REGS; i++) + rbuf[i] = dr32(i * 8); + + /* handle self-clearing RxMissed counter, CSR8 */ + de_rx_missed(de, rbuf[8]); +} + +static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd) +{ + ecmd->supported = de->media_supported; + ecmd->transceiver = XCVR_INTERNAL; + ecmd->phy_address = 0; + ecmd->advertising = de->media_advertise; + + switch (de->media_type) { + case DE_MEDIA_AUI: + ecmd->port = PORT_AUI; + break; + case DE_MEDIA_BNC: + ecmd->port = PORT_BNC; + break; + default: + ecmd->port = PORT_TP; + break; + } + + ethtool_cmd_speed_set(ecmd, 10); + + if (dr32(MacMode) & FullDuplex) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + + if (de->media_lock) + ecmd->autoneg = AUTONEG_DISABLE; + else + ecmd->autoneg = AUTONEG_ENABLE; + + /* ignore maxtxpkt, maxrxpkt for now */ + + return 0; +} + +static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd) +{ + u32 new_media; + unsigned int media_lock; + + if (ethtool_cmd_speed(ecmd) != 10) + return -EINVAL; + if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + return -EINVAL; + if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC) + return -EINVAL; + if (de->de21040 && ecmd->port == PORT_BNC) + return -EINVAL; + if (ecmd->transceiver != XCVR_INTERNAL) + return -EINVAL; + if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) + return -EINVAL; + if (ecmd->advertising & ~de->media_supported) + return -EINVAL; + if (ecmd->autoneg == AUTONEG_ENABLE && + (!(ecmd->advertising & ADVERTISED_Autoneg))) + return -EINVAL; + + switch (ecmd->port) { + case PORT_AUI: + new_media = DE_MEDIA_AUI; + if (!(ecmd->advertising & ADVERTISED_AUI)) + return -EINVAL; + break; + case PORT_BNC: + new_media = DE_MEDIA_BNC; + if (!(ecmd->advertising & ADVERTISED_BNC)) + return -EINVAL; + break; + default: + if (ecmd->autoneg == AUTONEG_ENABLE) + new_media = DE_MEDIA_TP_AUTO; + else if (ecmd->duplex == DUPLEX_FULL) + new_media = DE_MEDIA_TP_FD; + else + new_media = DE_MEDIA_TP; + if (!(ecmd->advertising & ADVERTISED_TP)) + return -EINVAL; + if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half))) + return -EINVAL; + break; + } + + media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1; + + if ((new_media == de->media_type) && + (media_lock == de->media_lock) && + (ecmd->advertising == de->media_advertise)) + return 0; /* nothing to change */ + + de_link_down(de); + mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); + de_stop_rxtx(de); + + de->media_type = new_media; + de->media_lock = media_lock; + de->media_advertise = ecmd->advertising; + de_set_media(de); + if (netif_running(de->dev)) + de_start_rxtx(de); + + return 0; +} + +static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info) +{ + struct de_private *de = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info)); + info->eedump_len = DE_EEPROM_SIZE; +} + +static int de_get_regs_len(struct net_device *dev) +{ + return DE_REGS_SIZE; +} + +static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct de_private *de = netdev_priv(dev); + int rc; + + spin_lock_irq(&de->lock); + rc = __de_get_settings(de, ecmd); + spin_unlock_irq(&de->lock); + + return rc; +} + +static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct de_private *de = netdev_priv(dev); + int rc; + + spin_lock_irq(&de->lock); + rc = __de_set_settings(de, ecmd); + spin_unlock_irq(&de->lock); + + return rc; +} + +static u32 de_get_msglevel(struct net_device *dev) +{ + struct de_private *de = netdev_priv(dev); + + return de->msg_enable; +} + +static void de_set_msglevel(struct net_device *dev, u32 msglvl) +{ + struct de_private *de = netdev_priv(dev); + + de->msg_enable = msglvl; +} + +static int de_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct de_private *de = netdev_priv(dev); + + if (!de->ee_data) + return -EOPNOTSUPP; + if ((eeprom->offset != 0) || (eeprom->magic != 0) || + (eeprom->len != DE_EEPROM_SIZE)) + return -EINVAL; + memcpy(data, de->ee_data, eeprom->len); + + return 0; +} + +static int de_nway_reset(struct net_device *dev) +{ + struct de_private *de = netdev_priv(dev); + u32 status; + + if (de->media_type != DE_MEDIA_TP_AUTO) + return -EINVAL; + if (netif_carrier_ok(de->dev)) + de_link_down(de); + + status = dr32(SIAStatus); + dw32(SIAStatus, (status & ~NWayState) | NWayRestart); + netif_info(de, link, dev, "link nway restart, status %x,%x\n", + status, dr32(SIAStatus)); + return 0; +} + +static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *data) +{ + struct de_private *de = netdev_priv(dev); + + regs->version = (DE_REGS_VER << 2) | de->de21040; + + spin_lock_irq(&de->lock); + __de_get_regs(de, data); + spin_unlock_irq(&de->lock); +} + +static const struct ethtool_ops de_ethtool_ops = { + .get_link = ethtool_op_get_link, + .get_drvinfo = de_get_drvinfo, + .get_regs_len = de_get_regs_len, + .get_settings = de_get_settings, + .set_settings = de_set_settings, + .get_msglevel = de_get_msglevel, + .set_msglevel = de_set_msglevel, + .get_eeprom = de_get_eeprom, + .nway_reset = de_nway_reset, + .get_regs = de_get_regs, +}; + +static void de21040_get_mac_address(struct de_private *de) +{ + unsigned i; + + dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */ + udelay(5); + + for (i = 0; i < 6; i++) { + int value, boguscnt = 100000; + do { + value = dr32(ROMCmd); + rmb(); + } while (value < 0 && --boguscnt > 0); + de->dev->dev_addr[i] = value; + udelay(1); + if (boguscnt <= 0) + pr_warn("timeout reading 21040 MAC address byte %u\n", + i); + } +} + +static void de21040_get_media_info(struct de_private *de) +{ + unsigned int i; + + de->media_type = DE_MEDIA_TP; + de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full | + SUPPORTED_10baseT_Half | SUPPORTED_AUI; + de->media_advertise = de->media_supported; + + for (i = 0; i < DE_MAX_MEDIA; i++) { + switch (i) { + case DE_MEDIA_AUI: + case DE_MEDIA_TP: + case DE_MEDIA_TP_FD: + de->media[i].type = i; + de->media[i].csr13 = t21040_csr13[i]; + de->media[i].csr14 = t21040_csr14[i]; + de->media[i].csr15 = t21040_csr15[i]; + break; + default: + de->media[i].type = DE_MEDIA_INVALID; + break; + } + } +} + +/* Note: this routine returns extra data bits for size detection. */ +static unsigned tulip_read_eeprom(void __iomem *regs, int location, + int addr_len) +{ + int i; + unsigned retval = 0; + void __iomem *ee_addr = regs + ROMCmd; + int read_cmd = location | (EE_READ_CMD << addr_len); + + writel(EE_ENB & ~EE_CS, ee_addr); + writel(EE_ENB, ee_addr); + + /* Shift the read command bits out. */ + for (i = 4 + addr_len; i >= 0; i--) { + short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0; + writel(EE_ENB | dataval, ee_addr); + readl(ee_addr); + writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr); + readl(ee_addr); + retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0); + } + writel(EE_ENB, ee_addr); + readl(ee_addr); + + for (i = 16; i > 0; i--) { + writel(EE_ENB | EE_SHIFT_CLK, ee_addr); + readl(ee_addr); + retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0); + writel(EE_ENB, ee_addr); + readl(ee_addr); + } + + /* Terminate the EEPROM access. */ + writel(EE_ENB & ~EE_CS, ee_addr); + return retval; +} + +static void de21041_get_srom_info(struct de_private *de) +{ + unsigned i, sa_offset = 0, ofs; + u8 ee_data[DE_EEPROM_SIZE + 6] = {}; + unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6; + struct de_srom_info_leaf *il; + void *bufp; + + /* download entire eeprom */ + for (i = 0; i < DE_EEPROM_WORDS; i++) + ((__le16 *)ee_data)[i] = + cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size)); + + /* DEC now has a specification but early board makers + just put the address in the first EEPROM locations. */ + /* This does memcmp(eedata, eedata+16, 8) */ + +#ifndef CONFIG_MIPS_COBALT + + for (i = 0; i < 8; i ++) + if (ee_data[i] != ee_data[16+i]) + sa_offset = 20; + +#endif + + /* store MAC address */ + for (i = 0; i < 6; i ++) + de->dev->dev_addr[i] = ee_data[i + sa_offset]; + + /* get offset of controller 0 info leaf. ignore 2nd byte. */ + ofs = ee_data[SROMC0InfoLeaf]; + if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block))) + goto bad_srom; + + /* get pointer to info leaf */ + il = (struct de_srom_info_leaf *) &ee_data[ofs]; + + /* paranoia checks */ + if (il->n_blocks == 0) + goto bad_srom; + if ((sizeof(ee_data) - ofs) < + (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks))) + goto bad_srom; + + /* get default media type */ + switch (get_unaligned(&il->default_media)) { + case 0x0001: de->media_type = DE_MEDIA_BNC; break; + case 0x0002: de->media_type = DE_MEDIA_AUI; break; + case 0x0204: de->media_type = DE_MEDIA_TP_FD; break; + default: de->media_type = DE_MEDIA_TP_AUTO; break; + } + + if (netif_msg_probe(de)) + pr_info("de%d: SROM leaf offset %u, default media %s\n", + de->board_idx, ofs, media_name[de->media_type]); + + /* init SIA register values to defaults */ + for (i = 0; i < DE_MAX_MEDIA; i++) { + de->media[i].type = DE_MEDIA_INVALID; + de->media[i].csr13 = 0xffff; + de->media[i].csr14 = 0xffff; + de->media[i].csr15 = 0xffff; + } + + /* parse media blocks to see what medias are supported, + * and if any custom CSR values are provided + */ + bufp = ((void *)il) + sizeof(*il); + for (i = 0; i < il->n_blocks; i++) { + struct de_srom_media_block *ib = bufp; + unsigned idx; + + /* index based on media type in media block */ + switch(ib->opts & MediaBlockMask) { + case 0: /* 10baseT */ + de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half + | SUPPORTED_Autoneg; + idx = DE_MEDIA_TP; + de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO; + break; + case 1: /* BNC */ + de->media_supported |= SUPPORTED_BNC; + idx = DE_MEDIA_BNC; + break; + case 2: /* AUI */ + de->media_supported |= SUPPORTED_AUI; + idx = DE_MEDIA_AUI; + break; + case 4: /* 10baseT-FD */ + de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full + | SUPPORTED_Autoneg; + idx = DE_MEDIA_TP_FD; + de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO; + break; + default: + goto bad_srom; + } + + de->media[idx].type = idx; + + if (netif_msg_probe(de)) + pr_info("de%d: media block #%u: %s", + de->board_idx, i, + media_name[de->media[idx].type]); + + bufp += sizeof (ib->opts); + + if (ib->opts & MediaCustomCSRs) { + de->media[idx].csr13 = get_unaligned(&ib->csr13); + de->media[idx].csr14 = get_unaligned(&ib->csr14); + de->media[idx].csr15 = get_unaligned(&ib->csr15); + bufp += sizeof(ib->csr13) + sizeof(ib->csr14) + + sizeof(ib->csr15); + + if (netif_msg_probe(de)) + pr_cont(" (%x,%x,%x)\n", + de->media[idx].csr13, + de->media[idx].csr14, + de->media[idx].csr15); + + } else { + if (netif_msg_probe(de)) + pr_cont("\n"); + } + + if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3])) + break; + } + + de->media_advertise = de->media_supported; + +fill_defaults: + /* fill in defaults, for cases where custom CSRs not used */ + for (i = 0; i < DE_MAX_MEDIA; i++) { + if (de->media[i].csr13 == 0xffff) + de->media[i].csr13 = t21041_csr13[i]; + if (de->media[i].csr14 == 0xffff) { + /* autonegotiation is broken at least on some chip + revisions - rev. 0x21 works, 0x11 does not */ + if (de->pdev->revision < 0x20) + de->media[i].csr14 = t21041_csr14_brk[i]; + else + de->media[i].csr14 = t21041_csr14[i]; + } + if (de->media[i].csr15 == 0xffff) + de->media[i].csr15 = t21041_csr15[i]; + } + + de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL); + + return; + +bad_srom: + /* for error cases, it's ok to assume we support all these */ + for (i = 0; i < DE_MAX_MEDIA; i++) + de->media[i].type = i; + de->media_supported = + SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP | + SUPPORTED_AUI | + SUPPORTED_BNC; + goto fill_defaults; +} + +static const struct net_device_ops de_netdev_ops = { + .ndo_open = de_open, + .ndo_stop = de_close, + .ndo_set_rx_mode = de_set_rx_mode, + .ndo_start_xmit = de_start_xmit, + .ndo_get_stats = de_get_stats, + .ndo_tx_timeout = de_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *dev; + struct de_private *de; + int rc; + void __iomem *regs; + unsigned long pciaddr; + static int board_idx = -1; + + board_idx++; + +#ifndef MODULE + if (board_idx == 0) + pr_info("%s\n", version); +#endif + + /* allocate a new ethernet device structure, and fill in defaults */ + dev = alloc_etherdev(sizeof(struct de_private)); + if (!dev) + return -ENOMEM; + + dev->netdev_ops = &de_netdev_ops; + SET_NETDEV_DEV(dev, &pdev->dev); + dev->ethtool_ops = &de_ethtool_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + de = netdev_priv(dev); + de->de21040 = ent->driver_data == 0 ? 1 : 0; + de->pdev = pdev; + de->dev = dev; + de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug); + de->board_idx = board_idx; + spin_lock_init (&de->lock); + init_timer(&de->media_timer); + if (de->de21040) + de->media_timer.function = de21040_media_timer; + else + de->media_timer.function = de21041_media_timer; + de->media_timer.data = (unsigned long) de; + + netif_carrier_off(dev); + + /* wake up device, assign resources */ + rc = pci_enable_device(pdev); + if (rc) + goto err_out_free; + + /* reserve PCI resources to ensure driver atomicity */ + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out_disable; + + /* check for invalid IRQ value */ + if (pdev->irq < 2) { + rc = -EIO; + pr_err("invalid irq (%d) for pci dev %s\n", + pdev->irq, pci_name(pdev)); + goto err_out_res; + } + + /* obtain and check validity of PCI I/O address */ + pciaddr = pci_resource_start(pdev, 1); + if (!pciaddr) { + rc = -EIO; + pr_err("no MMIO resource for pci dev %s\n", pci_name(pdev)); + goto err_out_res; + } + if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) { + rc = -EIO; + pr_err("MMIO resource (%llx) too small on pci dev %s\n", + (unsigned long long)pci_resource_len(pdev, 1), + pci_name(pdev)); + goto err_out_res; + } + + /* remap CSR registers */ + regs = ioremap_nocache(pciaddr, DE_REGS_SIZE); + if (!regs) { + rc = -EIO; + pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n", + (unsigned long long)pci_resource_len(pdev, 1), + pciaddr, pci_name(pdev)); + goto err_out_res; + } + de->regs = regs; + + de_adapter_wake(de); + + /* make sure hardware is not running */ + rc = de_reset_mac(de); + if (rc) { + pr_err("Cannot reset MAC, pci dev %s\n", pci_name(pdev)); + goto err_out_iomap; + } + + /* get MAC address, initialize default media type and + * get list of supported media + */ + if (de->de21040) { + de21040_get_mac_address(de); + de21040_get_media_info(de); + } else { + de21041_get_srom_info(de); + } + + /* register new network interface with kernel */ + rc = register_netdev(dev); + if (rc) + goto err_out_iomap; + + /* print info about board and interface just registered */ + netdev_info(dev, "%s at %p, %pM, IRQ %d\n", + de->de21040 ? "21040" : "21041", + regs, dev->dev_addr, pdev->irq); + + pci_set_drvdata(pdev, dev); + + /* enable busmastering */ + pci_set_master(pdev); + + /* put adapter to sleep */ + de_adapter_sleep(de); + + return 0; + +err_out_iomap: + kfree(de->ee_data); + iounmap(regs); +err_out_res: + pci_release_regions(pdev); +err_out_disable: + pci_disable_device(pdev); +err_out_free: + free_netdev(dev); + return rc; +} + +static void de_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct de_private *de = netdev_priv(dev); + + BUG_ON(!dev); + unregister_netdev(dev); + kfree(de->ee_data); + iounmap(de->regs); + pci_release_regions(pdev); + pci_disable_device(pdev); + free_netdev(dev); +} + +#ifdef CONFIG_PM + +static int de_suspend (struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata (pdev); + struct de_private *de = netdev_priv(dev); + + rtnl_lock(); + if (netif_running (dev)) { + const int irq = pdev->irq; + + del_timer_sync(&de->media_timer); + + disable_irq(irq); + spin_lock_irq(&de->lock); + + de_stop_hw(de); + netif_stop_queue(dev); + netif_device_detach(dev); + netif_carrier_off(dev); + + spin_unlock_irq(&de->lock); + enable_irq(irq); + + /* Update the error counts. */ + __de_get_stats(de); + + synchronize_irq(irq); + de_clean_rings(de); + + de_adapter_sleep(de); + pci_disable_device(pdev); + } else { + netif_device_detach(dev); + } + rtnl_unlock(); + return 0; +} + +static int de_resume (struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata (pdev); + struct de_private *de = netdev_priv(dev); + int retval = 0; + + rtnl_lock(); + if (netif_device_present(dev)) + goto out; + if (!netif_running(dev)) + goto out_attach; + if ((retval = pci_enable_device(pdev))) { + netdev_err(dev, "pci_enable_device failed in resume\n"); + goto out; + } + pci_set_master(pdev); + de_init_rings(de); + de_init_hw(de); +out_attach: + netif_device_attach(dev); +out: + rtnl_unlock(); + return 0; +} + +#endif /* CONFIG_PM */ + +static struct pci_driver de_driver = { + .name = DRV_NAME, + .id_table = de_pci_tbl, + .probe = de_init_one, + .remove = de_remove_one, +#ifdef CONFIG_PM + .suspend = de_suspend, + .resume = de_resume, +#endif +}; + +static int __init de_init (void) +{ +#ifdef MODULE + pr_info("%s\n", version); +#endif + return pci_register_driver(&de_driver); +} + +static void __exit de_exit (void) +{ + pci_unregister_driver (&de_driver); +} + +module_init(de_init); +module_exit(de_exit); diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c new file mode 100644 index 000000000..badff181e --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -0,0 +1,5575 @@ +/* de4x5.c: A DIGITAL DC21x4x DECchip and DE425/DE434/DE435/DE450/DE500 + ethernet driver for Linux. + + Copyright 1994, 1995 Digital Equipment Corporation. + + Testing resources for this driver have been made available + in part by NASA Ames Research Center (mjacob@nas.nasa.gov). + + The author may be reached at davies@maniac.ultranet.com. + + This program is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the + Free Software Foundation; either version 2 of the License, or (at your + option) any later version. + + THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 675 Mass Ave, Cambridge, MA 02139, USA. + + Originally, this driver was written for the Digital Equipment + Corporation series of EtherWORKS ethernet cards: + + DE425 TP/COAX EISA + DE434 TP PCI + DE435 TP/COAX/AUI PCI + DE450 TP/COAX/AUI PCI + DE500 10/100 PCI Fasternet + + but it will now attempt to support all cards which conform to the + Digital Semiconductor SROM Specification. The driver currently + recognises the following chips: + + DC21040 (no SROM) + DC21041[A] + DC21140[A] + DC21142 + DC21143 + + So far the driver is known to work with the following cards: + + KINGSTON + Linksys + ZNYX342 + SMC8432 + SMC9332 (w/new SROM) + ZNYX31[45] + ZNYX346 10/100 4 port (can act as a 10/100 bridge!) + + The driver has been tested on a relatively busy network using the DE425, + DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred + 16M of data to a DECstation 5000/200 as follows: + + TCP UDP + TX RX TX RX + DE425 1030k 997k 1170k 1128k + DE434 1063k 995k 1170k 1125k + DE435 1063k 995k 1170k 1125k + DE500 1063k 998k 1170k 1125k in 10Mb/s mode + + All values are typical (in kBytes/sec) from a sample of 4 for each + measurement. Their error is +/-20k on a quiet (private) network and also + depend on what load the CPU has. + + ========================================================================= + This driver has been written substantially from scratch, although its + inheritance of style and stack interface from 'ewrk3.c' and in turn from + Donald Becker's 'lance.c' should be obvious. With the module autoload of + every usable DECchip board, I pinched Donald's 'next_module' field to + link my modules together. + + Up to 15 EISA cards can be supported under this driver, limited primarily + by the available IRQ lines. I have checked different configurations of + multiple depca, EtherWORKS 3 cards and de4x5 cards and have not found a + problem yet (provided you have at least depca.c v0.38) ... + + PCI support has been added to allow the driver to work with the DE434, + DE435, DE450 and DE500 cards. The I/O accesses are a bit of a kludge due + to the differences in the EISA and PCI CSR address offsets from the base + address. + + The ability to load this driver as a loadable module has been included + and used extensively during the driver development (to save those long + reboot sequences). Loadable module support under PCI and EISA has been + achieved by letting the driver autoprobe as if it were compiled into the + kernel. Do make sure you're not sharing interrupts with anything that + cannot accommodate interrupt sharing! + + To utilise this ability, you have to do 8 things: + + 0) have a copy of the loadable modules code installed on your system. + 1) copy de4x5.c from the /linux/drivers/net directory to your favourite + temporary directory. + 2) for fixed autoprobes (not recommended), edit the source code near + line 5594 to reflect the I/O address you're using, or assign these when + loading by: + + insmod de4x5 io=0xghh where g = bus number + hh = device number + + NB: autoprobing for modules is now supported by default. You may just + use: + + insmod de4x5 + + to load all available boards. For a specific board, still use + the 'io=?' above. + 3) compile de4x5.c, but include -DMODULE in the command line to ensure + that the correct bits are compiled (see end of source code). + 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a + kernel with the de4x5 configuration turned off and reboot. + 5) insmod de4x5 [io=0xghh] + 6) run the net startup bits for your new eth?? interface(s) manually + (usually /etc/rc.inet[12] at boot time). + 7) enjoy! + + To unload a module, turn off the associated interface(s) + 'ifconfig eth?? down' then 'rmmod de4x5'. + + Automedia detection is included so that in principal you can disconnect + from, e.g. TP, reconnect to BNC and things will still work (after a + pause whilst the driver figures out where its media went). My tests + using ping showed that it appears to work.... + + By default, the driver will now autodetect any DECchip based card. + Should you have a need to restrict the driver to DIGITAL only cards, you + can compile with a DEC_ONLY define, or if loading as a module, use the + 'dec_only=1' parameter. + + I've changed the timing routines to use the kernel timer and scheduling + functions so that the hangs and other assorted problems that occurred + while autosensing the media should be gone. A bonus for the DC21040 + auto media sense algorithm is that it can now use one that is more in + line with the rest (the DC21040 chip doesn't have a hardware timer). + The downside is the 1 'jiffies' (10ms) resolution. + + IEEE 802.3u MII interface code has been added in anticipation that some + products may use it in the future. + + The SMC9332 card has a non-compliant SROM which needs fixing - I have + patched this driver to detect it because the SROM format used complies + to a previous DEC-STD format. + + I have removed the buffer copies needed for receive on Intels. I cannot + remove them for Alphas since the Tulip hardware only does longword + aligned DMA transfers and the Alphas get alignment traps with non + longword aligned data copies (which makes them really slow). No comment. + + I have added SROM decoding routines to make this driver work with any + card that supports the Digital Semiconductor SROM spec. This will help + all cards running the dc2114x series chips in particular. Cards using + the dc2104x chips should run correctly with the basic driver. I'm in + debt to for the testing and feedback that helped get + this feature working. So far we have tested KINGSTON, SMC8432, SMC9332 + (with the latest SROM complying with the SROM spec V3: their first was + broken), ZNYX342 and LinkSys. ZYNX314 (dual 21041 MAC) and ZNYX 315 + (quad 21041 MAC) cards also appear to work despite their incorrectly + wired IRQs. + + I have added a temporary fix for interrupt problems when some SCSI cards + share the same interrupt as the DECchip based cards. The problem occurs + because the SCSI card wants to grab the interrupt as a fast interrupt + (runs the service routine with interrupts turned off) vs. this card + which really needs to run the service routine with interrupts turned on. + This driver will now add the interrupt service routine as a fast + interrupt if it is bounced from the slow interrupt. THIS IS NOT A + RECOMMENDED WAY TO RUN THE DRIVER and has been done for a limited time + until people sort out their compatibility issues and the kernel + interrupt service code is fixed. YOU SHOULD SEPARATE OUT THE FAST + INTERRUPT CARDS FROM THE SLOW INTERRUPT CARDS to ensure that they do not + run on the same interrupt. PCMCIA/CardBus is another can of worms... + + Finally, I think I have really fixed the module loading problem with + more than one DECchip based card. As a side effect, I don't mess with + the device structure any more which means that if more than 1 card in + 2.0.x is installed (4 in 2.1.x), the user will have to edit + linux/drivers/net/Space.c to make room for them. Hence, module loading + is the preferred way to use this driver, since it doesn't have this + limitation. + + Where SROM media detection is used and full duplex is specified in the + SROM, the feature is ignored unless lp->params.fdx is set at compile + time OR during a module load (insmod de4x5 args='eth??:fdx' [see + below]). This is because there is no way to automatically detect full + duplex links except through autonegotiation. When I include the + autonegotiation feature in the SROM autoconf code, this detection will + occur automatically for that case. + + Command line arguments are now allowed, similar to passing arguments + through LILO. This will allow a per adapter board set up of full duplex + and media. The only lexical constraints are: the board name (dev->name) + appears in the list before its parameters. The list of parameters ends + either at the end of the parameter list or with another board name. The + following parameters are allowed: + + fdx for full duplex + autosense to set the media/speed; with the following + sub-parameters: + TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO + + Case sensitivity is important for the sub-parameters. They *must* be + upper case. Examples: + + insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'. + + For a compiled in driver, at or above line 548, place e.g. + #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP" + + Yes, I know full duplex isn't permissible on BNC or AUI; they're just + examples. By default, full duplex is turned off and AUTO is the default + autosense setting. In reality, I expect only the full duplex option to + be used. Note the use of single quotes in the two examples above and the + lack of commas to separate items. ALSO, you must get the requested media + correct in relation to what the adapter SROM says it has. There's no way + to determine this in advance other than by trial and error and common + sense, e.g. call a BNC connectored port 'BNC', not '10Mb'. + + Changed the bus probing. EISA used to be done first, followed by PCI. + Most people probably don't even know what a de425 is today and the EISA + probe has messed up some SCSI cards in the past, so now PCI is always + probed first followed by EISA if a) the architecture allows EISA and + either b) there have been no PCI cards detected or c) an EISA probe is + forced by the user. To force a probe include "force_eisa" in your + insmod "args" line; for built-in kernels either change the driver to do + this automatically or include #define DE4X5_FORCE_EISA on or before + line 1040 in the driver. + + TO DO: + ------ + + Revision History + ---------------- + + Version Date Description + + 0.1 17-Nov-94 Initial writing. ALPHA code release. + 0.2 13-Jan-95 Added PCI support for DE435's. + 0.21 19-Jan-95 Added auto media detection. + 0.22 10-Feb-95 Fix interrupt handler call . + Fix recognition bug reported by . + Add request/release_region code. + Add loadable modules support for PCI. + Clean up loadable modules support. + 0.23 28-Feb-95 Added DC21041 and DC21140 support. + Fix missed frame counter value and initialisation. + Fixed EISA probe. + 0.24 11-Apr-95 Change delay routine to use . + Change TX_BUFFS_AVAIL macro. + Change media autodetection to allow manual setting. + Completed DE500 (DC21140) support. + 0.241 18-Apr-95 Interim release without DE500 Autosense Algorithm. + 0.242 10-May-95 Minor changes. + 0.30 12-Jun-95 Timer fix for DC21140. + Portability changes. + Add ALPHA changes from . + Add DE500 semi automatic autosense. + Add Link Fail interrupt TP failure detection. + Add timer based link change detection. + Plugged a memory leak in de4x5_queue_pkt(). + 0.31 13-Jun-95 Fixed PCI stuff for 1.3.1. + 0.32 26-Jun-95 Added verify_area() calls in de4x5_ioctl() from a + suggestion by . + 0.33 8-Aug-95 Add shared interrupt support (not released yet). + 0.331 21-Aug-95 Fix de4x5_open() with fast CPUs. + Fix de4x5_interrupt(). + Fix dc21140_autoconf() mess. + No shared interrupt support. + 0.332 11-Sep-95 Added MII management interface routines. + 0.40 5-Mar-96 Fix setup frame timeout . + Add kernel timer code (h/w is too flaky). + Add MII based PHY autosense. + Add new multicasting code. + Add new autosense algorithms for media/mode + selection using kernel scheduling/timing. + Re-formatted. + Made changes suggested by : + Change driver to detect all DECchip based cards + with DEC_ONLY restriction a special case. + Changed driver to autoprobe as a module. No irq + checking is done now - assume BIOS is good! + Added SMC9332 detection + 0.41 21-Mar-96 Don't check for get_hw_addr checksum unless DEC card + only + Fix for multiple PCI cards reported by + Duh, put the IRQF_SHARED flag into request_interrupt(). + Fix SMC ethernet address in enet_det[]. + Print chip name instead of "UNKNOWN" during boot. + 0.42 26-Apr-96 Fix MII write TA bit error. + Fix bug in dc21040 and dc21041 autosense code. + Remove buffer copies on receive for Intels. + Change sk_buff handling during media disconnects to + eliminate DUP packets. + Add dynamic TX thresholding. + Change all chips to use perfect multicast filtering. + Fix alloc_device() bug + 0.43 21-Jun-96 Fix unconnected media TX retry bug. + Add Accton to the list of broken cards. + Fix TX under-run bug for non DC21140 chips. + Fix boot command probe bug in alloc_device() as + reported by and + . + Add cache locks to prevent a race condition as + reported by and + . + Upgraded alloc_device() code. + 0.431 28-Jun-96 Fix potential bug in queue_pkt() from discussion + with + 0.44 13-Aug-96 Fix RX overflow bug in 2114[023] chips. + Fix EISA probe bugs reported by + and . + 0.441 9-Sep-96 Change dc21041_autoconf() to probe quiet BNC media + with a loopback packet. + 0.442 9-Sep-96 Include AUI in dc21041 media printout. Bug reported + by + 0.45 8-Dec-96 Include endian functions for PPC use, from work + by and . + 0.451 28-Dec-96 Added fix to allow autoprobe for modules after + suggestion from . + 0.5 30-Jan-97 Added SROM decoding functions. + Updated debug flags. + Fix sleep/wakeup calls for PCI cards, bug reported + by . + Added multi-MAC, one SROM feature from discussion + with . + Added full module autoprobe capability. + Added attempt to use an SMC9332 with broken SROM. + Added fix for ZYNX multi-mac cards that didn't + get their IRQs wired correctly. + 0.51 13-Feb-97 Added endian fixes for the SROM accesses from + + Fix init_connection() to remove extra device reset. + Fix MAC/PHY reset ordering in dc21140m_autoconf(). + Fix initialisation problem with lp->timeout in + typeX_infoblock() from . + Fix MII PHY reset problem from work done by + . + 0.52 26-Apr-97 Some changes may not credit the right people - + a disk crash meant I lost some mail. + Change RX interrupt routine to drop rather than + defer packets to avoid hang reported by + . + Fix srom_exec() to return for COMPACT and type 1 + infoblocks. + Added DC21142 and DC21143 functions. + Added byte counters from + Added IRQF_DISABLED temporary fix from + . + 0.53 12-Nov-97 Fix the *_probe() to include 'eth??' name during + module load: bug reported by + + Fix multi-MAC, one SROM, to work with 2114x chips: + bug reported by . + Make above search independent of BIOS device scan + direction. + Completed DC2114[23] autosense functions. + 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by + and + . + Added argument list to set up each board from either + a module's command line or a compiled in #define. + Added generic MII PHY functionality to deal with + newer PHY chips. + Fix the mess in 2.1.67. + 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by + . + Fix bug in pci_probe() for 64 bit systems reported + by . + 0.533 9-Jan-98 Fix more 64 bit bugs reported by . + 0.534 24-Jan-98 Fix last (?) endian bug from + 0.535 21-Feb-98 Fix Ethernet Address PROM reset bug for DC21040. + 0.536 21-Mar-98 Change pci_probe() to use the pci_dev structure. + **Incompatible with 2.0.x from here.** + 0.540 5-Jul-98 Atomicize assertion of dev->interrupt for SMP + from + Add TP, AUI and BNC cases to 21140m_autoconf() for + case where a 21140 under SROM control uses, e.g. AUI + from problem report by + Add MII parallel detection to 2114x_autoconf() for + case where no autonegotiation partner exists from + problem report by . + Add ability to force connection type directly even + when using SROM control from problem report by + . + Updated the PCI interface to conform with the latest + version. I hope nothing is broken... + Add TX done interrupt modification from suggestion + by . + Fix is_anc_capable() bug reported by + . + Fix type[13]_infoblock() bug: during MII search, PHY + lp->rst not run because lp->ibn not initialised - + from report & fix by . + Fix probe bug with EISA & PCI cards present from + report by . + 0.541 24-Aug-98 Fix compiler problems associated with i386-string + ops from multiple bug reports and temporary fix + from . + Fix pci_probe() to correctly emulate the old + pcibios_find_class() function. + Add an_exception() for old ZYNX346 and fix compile + warning on PPC & SPARC, from . + Fix lastPCI to correctly work with compiled in + kernels and modules from bug report by + et al. + 0.542 15-Sep-98 Fix dc2114x_autoconf() to stop multiple messages + when media is unconnected. + Change dev->interrupt to lp->interrupt to ensure + alignment for Alpha's and avoid their unaligned + access traps. This flag is merely for log messages: + should do something more definitive though... + 0.543 30-Dec-98 Add SMP spin locking. + 0.544 8-May-99 Fix for buggy SROM in Motorola embedded boards using + a 21143 by . + Change PCI/EISA bus probing order. + 0.545 28-Nov-99 Further Moto SROM bug fix from + + Remove double checking for DEBUG_RX in de4x5_dbg_rx() + from report by + 0.546 22-Feb-01 Fixes Alpha XP1000 oops. The srom_search function + was causing a page fault when initializing the + variable 'pb', on a non de4x5 PCI device, in this + case a PCI bridge (DEC chip 21152). The value of + 'pb' is now only initialized if a de4x5 chip is + present. + + 0.547 08-Nov-01 Use library crc32 functions by + 0.548 30-Aug-03 Big 2.6 cleanup. Ported to PCI/EISA probing and + generic DMA APIs. Fixed DE425 support on Alpha. + + ========================================================================= +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#ifdef CONFIG_PPC_PMAC +#include +#endif /* CONFIG_PPC_PMAC */ + +#include "de4x5.h" + +static const char version[] = + KERN_INFO "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n"; + +#define c_char const char + +/* +** MII Information +*/ +struct phy_table { + int reset; /* Hard reset required? */ + int id; /* IEEE OUI */ + int ta; /* One cycle TA time - 802.3u is confusing here */ + struct { /* Non autonegotiation (parallel) speed det. */ + int reg; + int mask; + int value; + } spd; +}; + +struct mii_phy { + int reset; /* Hard reset required? */ + int id; /* IEEE OUI */ + int ta; /* One cycle TA time */ + struct { /* Non autonegotiation (parallel) speed det. */ + int reg; + int mask; + int value; + } spd; + int addr; /* MII address for the PHY */ + u_char *gep; /* Start of GEP sequence block in SROM */ + u_char *rst; /* Start of reset sequence in SROM */ + u_int mc; /* Media Capabilities */ + u_int ana; /* NWay Advertisement */ + u_int fdx; /* Full DupleX capabilities for each media */ + u_int ttm; /* Transmit Threshold Mode for each media */ + u_int mci; /* 21142 MII Connector Interrupt info */ +}; + +#define DE4X5_MAX_PHY 8 /* Allow up to 8 attached PHY devices per board */ + +struct sia_phy { + u_char mc; /* Media Code */ + u_char ext; /* csr13-15 valid when set */ + int csr13; /* SIA Connectivity Register */ + int csr14; /* SIA TX/RX Register */ + int csr15; /* SIA General Register */ + int gepc; /* SIA GEP Control Information */ + int gep; /* SIA GEP Data */ +}; + +/* +** Define the know universe of PHY devices that can be +** recognised by this driver. +*/ +static struct phy_table phy_info[] = { + {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}}, /* National TX */ + {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}}, /* Broadcom T4 */ + {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}}, /* SEEQ T4 */ + {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}}, /* Cypress T4 */ + {0, 0x7810 , 1, {0x14, 0x0800, 0x0800}} /* Level One LTX970 */ +}; + +/* +** These GENERIC values assumes that the PHY devices follow 802.3u and +** allow parallel detection to set the link partner ability register. +** Detection of 100Base-TX [H/F Duplex] and 100Base-T4 is supported. +*/ +#define GENERIC_REG 0x05 /* Autoneg. Link Partner Advertisement Reg. */ +#define GENERIC_MASK MII_ANLPA_100M /* All 100Mb/s Technologies */ +#define GENERIC_VALUE MII_ANLPA_100M /* 100B-TX, 100B-TX FDX, 100B-T4 */ + +/* +** Define special SROM detection cases +*/ +static c_char enet_det[][ETH_ALEN] = { + {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00}, + {0x00, 0x00, 0xe8, 0x00, 0x00, 0x00} +}; + +#define SMC 1 +#define ACCTON 2 + +/* +** SROM Repair definitions. If a broken SROM is detected a card may +** use this information to help figure out what to do. This is a +** "stab in the dark" and so far for SMC9332's only. +*/ +static c_char srom_repair_info[][100] = { + {0x00,0x1e,0x00,0x00,0x00,0x08, /* SMC9332 */ + 0x1f,0x01,0x8f,0x01,0x00,0x01,0x00,0x02, + 0x01,0x00,0x00,0x78,0xe0,0x01,0x00,0x50, + 0x00,0x18,} +}; + + +#ifdef DE4X5_DEBUG +static int de4x5_debug = DE4X5_DEBUG; +#else +/*static int de4x5_debug = (DEBUG_MII | DEBUG_SROM | DEBUG_PCICFG | DEBUG_MEDIA | DEBUG_VERSION);*/ +static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION); +#endif + +/* +** Allow per adapter set up. For modules this is simply a command line +** parameter, e.g.: +** insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'. +** +** For a compiled in driver, place e.g. +** #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP" +** here +*/ +#ifdef DE4X5_PARM +static char *args = DE4X5_PARM; +#else +static char *args; +#endif + +struct parameters { + bool fdx; + int autosense; +}; + +#define DE4X5_AUTOSENSE_MS 250 /* msec autosense tick (DE500) */ + +#define DE4X5_NDA 0xffe0 /* No Device (I/O) Address */ + +/* +** Ethernet PROM defines +*/ +#define PROBE_LENGTH 32 +#define ETH_PROM_SIG 0xAA5500FFUL + +/* +** Ethernet Info +*/ +#define PKT_BUF_SZ 1536 /* Buffer size for each Tx/Rx buffer */ +#define IEEE802_3_SZ 1518 /* Packet + CRC */ +#define MAX_PKT_SZ 1514 /* Maximum ethernet packet length */ +#define MAX_DAT_SZ 1500 /* Maximum ethernet data length */ +#define MIN_DAT_SZ 1 /* Minimum ethernet data length */ +#define PKT_HDR_LEN 14 /* Addresses and data length info */ +#define FAKE_FRAME_LEN (MAX_PKT_SZ + 1) +#define QUEUE_PKT_TIMEOUT (3*HZ) /* 3 second timeout */ + + +/* +** EISA bus defines +*/ +#define DE4X5_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */ +#define DE4X5_EISA_TOTAL_SIZE 0x100 /* I/O address extent */ + +#define EISA_ALLOWED_IRQ_LIST {5, 9, 10, 11} + +#define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"} +#define DE4X5_NAME_LENGTH 8 + +static c_char *de4x5_signatures[] = DE4X5_SIGNATURE; + +/* +** Ethernet PROM defines for DC21040 +*/ +#define PROBE_LENGTH 32 +#define ETH_PROM_SIG 0xAA5500FFUL + +/* +** PCI Bus defines +*/ +#define PCI_MAX_BUS_NUM 8 +#define DE4X5_PCI_TOTAL_SIZE 0x80 /* I/O address extent */ +#define DE4X5_CLASS_CODE 0x00020000 /* Network controller, Ethernet */ + +/* +** Memory Alignment. Each descriptor is 4 longwords long. To force a +** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and +** DESC_ALIGN. ALIGN aligns the start address of the private memory area +** and hence the RX descriptor ring's first entry. +*/ +#define DE4X5_ALIGN4 ((u_long)4 - 1) /* 1 longword align */ +#define DE4X5_ALIGN8 ((u_long)8 - 1) /* 2 longword align */ +#define DE4X5_ALIGN16 ((u_long)16 - 1) /* 4 longword align */ +#define DE4X5_ALIGN32 ((u_long)32 - 1) /* 8 longword align */ +#define DE4X5_ALIGN64 ((u_long)64 - 1) /* 16 longword align */ +#define DE4X5_ALIGN128 ((u_long)128 - 1) /* 32 longword align */ + +#define DE4X5_ALIGN DE4X5_ALIGN32 /* Keep the DC21040 happy... */ +#define DE4X5_CACHE_ALIGN CAL_16LONG +#define DESC_SKIP_LEN DSL_0 /* Must agree with DESC_ALIGN */ +/*#define DESC_ALIGN u32 dummy[4]; / * Must agree with DESC_SKIP_LEN */ +#define DESC_ALIGN + +#ifndef DEC_ONLY /* See README.de4x5 for using this */ +static int dec_only; +#else +static int dec_only = 1; +#endif + +/* +** DE4X5 IRQ ENABLE/DISABLE +*/ +#define ENABLE_IRQs { \ + imr |= lp->irq_en;\ + outl(imr, DE4X5_IMR); /* Enable the IRQs */\ +} + +#define DISABLE_IRQs {\ + imr = inl(DE4X5_IMR);\ + imr &= ~lp->irq_en;\ + outl(imr, DE4X5_IMR); /* Disable the IRQs */\ +} + +#define UNMASK_IRQs {\ + imr |= lp->irq_mask;\ + outl(imr, DE4X5_IMR); /* Unmask the IRQs */\ +} + +#define MASK_IRQs {\ + imr = inl(DE4X5_IMR);\ + imr &= ~lp->irq_mask;\ + outl(imr, DE4X5_IMR); /* Mask the IRQs */\ +} + +/* +** DE4X5 START/STOP +*/ +#define START_DE4X5 {\ + omr = inl(DE4X5_OMR);\ + omr |= OMR_ST | OMR_SR;\ + outl(omr, DE4X5_OMR); /* Enable the TX and/or RX */\ +} + +#define STOP_DE4X5 {\ + omr = inl(DE4X5_OMR);\ + omr &= ~(OMR_ST|OMR_SR);\ + outl(omr, DE4X5_OMR); /* Disable the TX and/or RX */ \ +} + +/* +** DE4X5 SIA RESET +*/ +#define RESET_SIA outl(0, DE4X5_SICR); /* Reset SIA connectivity regs */ + +/* +** DE500 AUTOSENSE TIMER INTERVAL (MILLISECS) +*/ +#define DE4X5_AUTOSENSE_MS 250 + +/* +** SROM Structure +*/ +struct de4x5_srom { + char sub_vendor_id[2]; + char sub_system_id[2]; + char reserved[12]; + char id_block_crc; + char reserved2; + char version; + char num_controllers; + char ieee_addr[6]; + char info[100]; + short chksum; +}; +#define SUB_VENDOR_ID 0x500a + +/* +** DE4X5 Descriptors. Make sure that all the RX buffers are contiguous +** and have sizes of both a power of 2 and a multiple of 4. +** A size of 256 bytes for each buffer could be chosen because over 90% of +** all packets in our network are <256 bytes long and 64 longword alignment +** is possible. 1536 showed better 'ttcp' performance. Take your pick. 32 TX +** descriptors are needed for machines with an ALPHA CPU. +*/ +#define NUM_RX_DESC 8 /* Number of RX descriptors */ +#define NUM_TX_DESC 32 /* Number of TX descriptors */ +#define RX_BUFF_SZ 1536 /* Power of 2 for kmalloc and */ + /* Multiple of 4 for DC21040 */ + /* Allows 512 byte alignment */ +struct de4x5_desc { + volatile __le32 status; + __le32 des1; + __le32 buf; + __le32 next; + DESC_ALIGN +}; + +/* +** The DE4X5 private structure +*/ +#define DE4X5_PKT_STAT_SZ 16 +#define DE4X5_PKT_BIN_SZ 128 /* Should be >=100 unless you + increase DE4X5_PKT_STAT_SZ */ + +struct pkt_stats { + u_int bins[DE4X5_PKT_STAT_SZ]; /* Private stats counters */ + u_int unicast; + u_int multicast; + u_int broadcast; + u_int excessive_collisions; + u_int tx_underruns; + u_int excessive_underruns; + u_int rx_runt_frames; + u_int rx_collision; + u_int rx_dribble; + u_int rx_overflow; +}; + +struct de4x5_private { + char adapter_name[80]; /* Adapter name */ + u_long interrupt; /* Aligned ISR flag */ + struct de4x5_desc *rx_ring; /* RX descriptor ring */ + struct de4x5_desc *tx_ring; /* TX descriptor ring */ + struct sk_buff *tx_skb[NUM_TX_DESC]; /* TX skb for freeing when sent */ + struct sk_buff *rx_skb[NUM_RX_DESC]; /* RX skb's */ + int rx_new, rx_old; /* RX descriptor ring pointers */ + int tx_new, tx_old; /* TX descriptor ring pointers */ + char setup_frame[SETUP_FRAME_LEN]; /* Holds MCA and PA info. */ + char frame[64]; /* Min sized packet for loopback*/ + spinlock_t lock; /* Adapter specific spinlock */ + struct net_device_stats stats; /* Public stats */ + struct pkt_stats pktStats; /* Private stats counters */ + char rxRingSize; + char txRingSize; + int bus; /* EISA or PCI */ + int bus_num; /* PCI Bus number */ + int device; /* Device number on PCI bus */ + int state; /* Adapter OPENED or CLOSED */ + int chipset; /* DC21040, DC21041 or DC21140 */ + s32 irq_mask; /* Interrupt Mask (Enable) bits */ + s32 irq_en; /* Summary interrupt bits */ + int media; /* Media (eg TP), mode (eg 100B)*/ + int c_media; /* Remember the last media conn */ + bool fdx; /* media full duplex flag */ + int linkOK; /* Link is OK */ + int autosense; /* Allow/disallow autosensing */ + bool tx_enable; /* Enable descriptor polling */ + int setup_f; /* Setup frame filtering type */ + int local_state; /* State within a 'media' state */ + struct mii_phy phy[DE4X5_MAX_PHY]; /* List of attached PHY devices */ + struct sia_phy sia; /* SIA PHY Information */ + int active; /* Index to active PHY device */ + int mii_cnt; /* Number of attached PHY's */ + int timeout; /* Scheduling counter */ + struct timer_list timer; /* Timer info for kernel */ + int tmp; /* Temporary global per card */ + struct { + u_long lock; /* Lock the cache accesses */ + s32 csr0; /* Saved Bus Mode Register */ + s32 csr6; /* Saved Operating Mode Reg. */ + s32 csr7; /* Saved IRQ Mask Register */ + s32 gep; /* Saved General Purpose Reg. */ + s32 gepc; /* Control info for GEP */ + s32 csr13; /* Saved SIA Connectivity Reg. */ + s32 csr14; /* Saved SIA TX/RX Register */ + s32 csr15; /* Saved SIA General Register */ + int save_cnt; /* Flag if state already saved */ + struct sk_buff_head queue; /* Save the (re-ordered) skb's */ + } cache; + struct de4x5_srom srom; /* A copy of the SROM */ + int cfrv; /* Card CFRV copy */ + int rx_ovf; /* Check for 'RX overflow' tag */ + bool useSROM; /* For non-DEC card use SROM */ + bool useMII; /* Infoblock using the MII */ + int asBitValid; /* Autosense bits in GEP? */ + int asPolarity; /* 0 => asserted high */ + int asBit; /* Autosense bit number in GEP */ + int defMedium; /* SROM default medium */ + int tcount; /* Last infoblock number */ + int infoblock_init; /* Initialised this infoblock? */ + int infoleaf_offset; /* SROM infoleaf for controller */ + s32 infoblock_csr6; /* csr6 value in SROM infoblock */ + int infoblock_media; /* infoblock media */ + int (*infoleaf_fn)(struct net_device *); /* Pointer to infoleaf function */ + u_char *rst; /* Pointer to Type 5 reset info */ + u_char ibn; /* Infoblock number */ + struct parameters params; /* Command line/ #defined params */ + struct device *gendev; /* Generic device */ + dma_addr_t dma_rings; /* DMA handle for rings */ + int dma_size; /* Size of the DMA area */ + char *rx_bufs; /* rx bufs on alpha, sparc, ... */ +}; + +/* +** To get around certain poxy cards that don't provide an SROM +** for the second and more DECchip, I have to key off the first +** chip's address. I'll assume there's not a bad SROM iff: +** +** o the chipset is the same +** o the bus number is the same and > 0 +** o the sum of all the returned hw address bytes is 0 or 0x5fa +** +** Also have to save the irq for those cards whose hardware designers +** can't follow the PCI to PCI Bridge Architecture spec. +*/ +static struct { + int chipset; + int bus; + int irq; + u_char addr[ETH_ALEN]; +} last = {0,}; + +/* +** The transmit ring full condition is described by the tx_old and tx_new +** pointers by: +** tx_old = tx_new Empty ring +** tx_old = tx_new+1 Full ring +** tx_old+txRingSize = tx_new+1 Full ring (wrapped condition) +*/ +#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\ + lp->tx_old+lp->txRingSize-lp->tx_new-1:\ + lp->tx_old -lp->tx_new-1) + +#define TX_PKT_PENDING (lp->tx_old != lp->tx_new) + +/* +** Public Functions +*/ +static int de4x5_open(struct net_device *dev); +static netdev_tx_t de4x5_queue_pkt(struct sk_buff *skb, + struct net_device *dev); +static irqreturn_t de4x5_interrupt(int irq, void *dev_id); +static int de4x5_close(struct net_device *dev); +static struct net_device_stats *de4x5_get_stats(struct net_device *dev); +static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len); +static void set_multicast_list(struct net_device *dev); +static int de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); + +/* +** Private functions +*/ +static int de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev); +static int de4x5_init(struct net_device *dev); +static int de4x5_sw_reset(struct net_device *dev); +static int de4x5_rx(struct net_device *dev); +static int de4x5_tx(struct net_device *dev); +static void de4x5_ast(struct net_device *dev); +static int de4x5_txur(struct net_device *dev); +static int de4x5_rx_ovfc(struct net_device *dev); + +static int autoconf_media(struct net_device *dev); +static void create_packet(struct net_device *dev, char *frame, int len); +static void load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb); +static int dc21040_autoconf(struct net_device *dev); +static int dc21041_autoconf(struct net_device *dev); +static int dc21140m_autoconf(struct net_device *dev); +static int dc2114x_autoconf(struct net_device *dev); +static int srom_autoconf(struct net_device *dev); +static int de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, int (*fn)(struct net_device *, int), int (*asfn)(struct net_device *)); +static int dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct net_device *, int)); +static int test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec); +static int test_for_100Mb(struct net_device *dev, int msec); +static int wait_for_link(struct net_device *dev); +static int test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec); +static int is_spd_100(struct net_device *dev); +static int is_100_up(struct net_device *dev); +static int is_10_up(struct net_device *dev); +static int is_anc_capable(struct net_device *dev); +static int ping_media(struct net_device *dev, int msec); +static struct sk_buff *de4x5_alloc_rx_buff(struct net_device *dev, int index, int len); +static void de4x5_free_rx_buffs(struct net_device *dev); +static void de4x5_free_tx_buffs(struct net_device *dev); +static void de4x5_save_skbs(struct net_device *dev); +static void de4x5_rst_desc_ring(struct net_device *dev); +static void de4x5_cache_state(struct net_device *dev, int flag); +static void de4x5_put_cache(struct net_device *dev, struct sk_buff *skb); +static void de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb); +static struct sk_buff *de4x5_get_cache(struct net_device *dev); +static void de4x5_setup_intr(struct net_device *dev); +static void de4x5_init_connection(struct net_device *dev); +static int de4x5_reset_phy(struct net_device *dev); +static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 sigr); +static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec); +static int test_tp(struct net_device *dev, s32 msec); +static int EISA_signature(char *name, struct device *device); +static int PCI_signature(char *name, struct de4x5_private *lp); +static void DevicePresent(struct net_device *dev, u_long iobase); +static void enet_addr_rst(u_long aprom_addr); +static int de4x5_bad_srom(struct de4x5_private *lp); +static short srom_rd(u_long address, u_char offset); +static void srom_latch(u_int command, u_long address); +static void srom_command(u_int command, u_long address); +static void srom_address(u_int command, u_long address, u_char offset); +static short srom_data(u_int command, u_long address); +/*static void srom_busy(u_int command, u_long address);*/ +static void sendto_srom(u_int command, u_long addr); +static int getfrom_srom(u_long addr); +static int srom_map_media(struct net_device *dev); +static int srom_infoleaf_info(struct net_device *dev); +static void srom_init(struct net_device *dev); +static void srom_exec(struct net_device *dev, u_char *p); +static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr); +static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr); +static int mii_rdata(u_long ioaddr); +static void mii_wdata(int data, int len, u_long ioaddr); +static void mii_ta(u_long rw, u_long ioaddr); +static int mii_swap(int data, int len); +static void mii_address(u_char addr, u_long ioaddr); +static void sendto_mii(u32 command, int data, u_long ioaddr); +static int getfrom_mii(u32 command, u_long ioaddr); +static int mii_get_oui(u_char phyaddr, u_long ioaddr); +static int mii_get_phy(struct net_device *dev); +static void SetMulticastFilter(struct net_device *dev); +static int get_hw_addr(struct net_device *dev); +static void srom_repair(struct net_device *dev, int card); +static int test_bad_enet(struct net_device *dev, int status); +static int an_exception(struct de4x5_private *lp); +static char *build_setup_frame(struct net_device *dev, int mode); +static void disable_ast(struct net_device *dev); +static long de4x5_switch_mac_port(struct net_device *dev); +static int gep_rd(struct net_device *dev); +static void gep_wr(s32 data, struct net_device *dev); +static void yawn(struct net_device *dev, int state); +static void de4x5_parse_params(struct net_device *dev); +static void de4x5_dbg_open(struct net_device *dev); +static void de4x5_dbg_mii(struct net_device *dev, int k); +static void de4x5_dbg_media(struct net_device *dev); +static void de4x5_dbg_srom(struct de4x5_srom *p); +static void de4x5_dbg_rx(struct sk_buff *skb, int len); +static int dc21041_infoleaf(struct net_device *dev); +static int dc21140_infoleaf(struct net_device *dev); +static int dc21142_infoleaf(struct net_device *dev); +static int dc21143_infoleaf(struct net_device *dev); +static int type0_infoblock(struct net_device *dev, u_char count, u_char *p); +static int type1_infoblock(struct net_device *dev, u_char count, u_char *p); +static int type2_infoblock(struct net_device *dev, u_char count, u_char *p); +static int type3_infoblock(struct net_device *dev, u_char count, u_char *p); +static int type4_infoblock(struct net_device *dev, u_char count, u_char *p); +static int type5_infoblock(struct net_device *dev, u_char count, u_char *p); +static int compact_infoblock(struct net_device *dev, u_char count, u_char *p); + +/* +** Note now that module autoprobing is allowed under EISA and PCI. The +** IRQ lines will not be auto-detected; instead I'll rely on the BIOSes +** to "do the right thing". +*/ + +static int io=0x0;/* EDIT THIS LINE FOR YOUR CONFIGURATION IF NEEDED */ + +module_param(io, int, 0); +module_param(de4x5_debug, int, 0); +module_param(dec_only, int, 0); +module_param(args, charp, 0); + +MODULE_PARM_DESC(io, "de4x5 I/O base address"); +MODULE_PARM_DESC(de4x5_debug, "de4x5 debug mask"); +MODULE_PARM_DESC(dec_only, "de4x5 probe only for Digital boards (0-1)"); +MODULE_PARM_DESC(args, "de4x5 full duplex and media type settings; see de4x5.c for details"); +MODULE_LICENSE("GPL"); + +/* +** List the SROM infoleaf functions and chipsets +*/ +struct InfoLeaf { + int chipset; + int (*fn)(struct net_device *); +}; +static struct InfoLeaf infoleaf_array[] = { + {DC21041, dc21041_infoleaf}, + {DC21140, dc21140_infoleaf}, + {DC21142, dc21142_infoleaf}, + {DC21143, dc21143_infoleaf} +}; +#define INFOLEAF_SIZE ARRAY_SIZE(infoleaf_array) + +/* +** List the SROM info block functions +*/ +static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = { + type0_infoblock, + type1_infoblock, + type2_infoblock, + type3_infoblock, + type4_infoblock, + type5_infoblock, + compact_infoblock +}; + +#define COMPACT (ARRAY_SIZE(dc_infoblock) - 1) + +/* +** Miscellaneous defines... +*/ +#define RESET_DE4X5 {\ + int i;\ + i=inl(DE4X5_BMR);\ + mdelay(1);\ + outl(i | BMR_SWR, DE4X5_BMR);\ + mdelay(1);\ + outl(i, DE4X5_BMR);\ + mdelay(1);\ + for (i=0;i<5;i++) {inl(DE4X5_BMR); mdelay(1);}\ + mdelay(1);\ +} + +#define PHY_HARD_RESET {\ + outl(GEP_HRST, DE4X5_GEP); /* Hard RESET the PHY dev. */\ + mdelay(1); /* Assert for 1ms */\ + outl(0x00, DE4X5_GEP);\ + mdelay(2); /* Wait for 2ms */\ +} + +static const struct net_device_ops de4x5_netdev_ops = { + .ndo_open = de4x5_open, + .ndo_stop = de4x5_close, + .ndo_start_xmit = de4x5_queue_pkt, + .ndo_get_stats = de4x5_get_stats, + .ndo_set_rx_mode = set_multicast_list, + .ndo_do_ioctl = de4x5_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address= eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + + +static int +de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) +{ + char name[DE4X5_NAME_LENGTH + 1]; + struct de4x5_private *lp = netdev_priv(dev); + struct pci_dev *pdev = NULL; + int i, status=0; + + dev_set_drvdata(gendev, dev); + + /* Ensure we're not sleeping */ + if (lp->bus == EISA) { + outb(WAKEUP, PCI_CFPM); + } else { + pdev = to_pci_dev (gendev); + pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP); + } + mdelay(10); + + RESET_DE4X5; + + if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) { + return -ENXIO; /* Hardware could not reset */ + } + + /* + ** Now find out what kind of DC21040/DC21041/DC21140 board we have. + */ + lp->useSROM = false; + if (lp->bus == PCI) { + PCI_signature(name, lp); + } else { + EISA_signature(name, gendev); + } + + if (*name == '\0') { /* Not found a board signature */ + return -ENXIO; + } + + dev->base_addr = iobase; + printk ("%s: %s at 0x%04lx", dev_name(gendev), name, iobase); + + status = get_hw_addr(dev); + printk(", h/w address %pM\n", dev->dev_addr); + + if (status != 0) { + printk(" which has an Ethernet PROM CRC error.\n"); + return -ENXIO; + } else { + skb_queue_head_init(&lp->cache.queue); + lp->cache.gepc = GEP_INIT; + lp->asBit = GEP_SLNK; + lp->asPolarity = GEP_SLNK; + lp->asBitValid = ~0; + lp->timeout = -1; + lp->gendev = gendev; + spin_lock_init(&lp->lock); + init_timer(&lp->timer); + lp->timer.function = (void (*)(unsigned long))de4x5_ast; + lp->timer.data = (unsigned long)dev; + de4x5_parse_params(dev); + + /* + ** Choose correct autosensing in case someone messed up + */ + lp->autosense = lp->params.autosense; + if (lp->chipset != DC21140) { + if ((lp->chipset==DC21040) && (lp->params.autosense&TP_NW)) { + lp->params.autosense = TP; + } + if ((lp->chipset==DC21041) && (lp->params.autosense&BNC_AUI)) { + lp->params.autosense = BNC; + } + } + lp->fdx = lp->params.fdx; + sprintf(lp->adapter_name,"%s (%s)", name, dev_name(gendev)); + + lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc); +#if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY) + lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN; +#endif + lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size, + &lp->dma_rings, GFP_ATOMIC); + if (lp->rx_ring == NULL) { + return -ENOMEM; + } + + lp->tx_ring = lp->rx_ring + NUM_RX_DESC; + + /* + ** Set up the RX descriptor ring (Intels) + ** Allocate contiguous receive buffers, long word aligned (Alphas) + */ +#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY) + for (i=0; irx_ring[i].status = 0; + lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ); + lp->rx_ring[i].buf = 0; + lp->rx_ring[i].next = 0; + lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */ + } + +#else + { + dma_addr_t dma_rx_bufs; + + dma_rx_bufs = lp->dma_rings + (NUM_RX_DESC + NUM_TX_DESC) + * sizeof(struct de4x5_desc); + dma_rx_bufs = (dma_rx_bufs + DE4X5_ALIGN) & ~DE4X5_ALIGN; + lp->rx_bufs = (char *)(((long)(lp->rx_ring + NUM_RX_DESC + + NUM_TX_DESC) + DE4X5_ALIGN) & ~DE4X5_ALIGN); + for (i=0; irx_ring[i].status = 0; + lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ); + lp->rx_ring[i].buf = + cpu_to_le32(dma_rx_bufs+i*RX_BUFF_SZ); + lp->rx_ring[i].next = 0; + lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */ + } + + } +#endif + + barrier(); + + lp->rxRingSize = NUM_RX_DESC; + lp->txRingSize = NUM_TX_DESC; + + /* Write the end of list marker to the descriptor lists */ + lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER); + lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER); + + /* Tell the adapter where the TX/RX rings are located. */ + outl(lp->dma_rings, DE4X5_RRBA); + outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc), + DE4X5_TRBA); + + /* Initialise the IRQ mask and Enable/Disable */ + lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM; + lp->irq_en = IMR_NIM | IMR_AIM; + + /* Create a loopback packet frame for later media probing */ + create_packet(dev, lp->frame, sizeof(lp->frame)); + + /* Check if the RX overflow bug needs testing for */ + i = lp->cfrv & 0x000000fe; + if ((lp->chipset == DC21140) && (i == 0x20)) { + lp->rx_ovf = 1; + } + + /* Initialise the SROM pointers if possible */ + if (lp->useSROM) { + lp->state = INITIALISED; + if (srom_infoleaf_info(dev)) { + dma_free_coherent (gendev, lp->dma_size, + lp->rx_ring, lp->dma_rings); + return -ENXIO; + } + srom_init(dev); + } + + lp->state = CLOSED; + + /* + ** Check for an MII interface + */ + if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) { + mii_get_phy(dev); + } + + printk(" and requires IRQ%d (provided by %s).\n", dev->irq, + ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG")); + } + + if (de4x5_debug & DEBUG_VERSION) { + printk(version); + } + + /* The DE4X5-specific entries in the device structure. */ + SET_NETDEV_DEV(dev, gendev); + dev->netdev_ops = &de4x5_netdev_ops; + dev->mem_start = 0; + + /* Fill in the generic fields of the device structure. */ + if ((status = register_netdev (dev))) { + dma_free_coherent (gendev, lp->dma_size, + lp->rx_ring, lp->dma_rings); + return status; + } + + /* Let the adapter sleep to save power */ + yawn(dev, SLEEP); + + return status; +} + + +static int +de4x5_open(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + int i, status = 0; + s32 omr; + + /* Allocate the RX buffers */ + for (i=0; irxRingSize; i++) { + if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) { + de4x5_free_rx_buffs(dev); + return -EAGAIN; + } + } + + /* + ** Wake up the adapter + */ + yawn(dev, WAKEUP); + + /* + ** Re-initialize the DE4X5... + */ + status = de4x5_init(dev); + spin_lock_init(&lp->lock); + lp->state = OPEN; + de4x5_dbg_open(dev); + + if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED, + lp->adapter_name, dev)) { + printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq); + if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED, + lp->adapter_name, dev)) { + printk("\n Cannot get IRQ- reconfigure your hardware.\n"); + disable_ast(dev); + de4x5_free_rx_buffs(dev); + de4x5_free_tx_buffs(dev); + yawn(dev, SLEEP); + lp->state = CLOSED; + return -EAGAIN; + } else { + printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n"); + printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n"); + } + } + + lp->interrupt = UNMASK_INTERRUPTS; + dev->trans_start = jiffies; /* prevent tx timeout */ + + START_DE4X5; + + de4x5_setup_intr(dev); + + if (de4x5_debug & DEBUG_OPEN) { + printk("\tsts: 0x%08x\n", inl(DE4X5_STS)); + printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR)); + printk("\timr: 0x%08x\n", inl(DE4X5_IMR)); + printk("\tomr: 0x%08x\n", inl(DE4X5_OMR)); + printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR)); + printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR)); + printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR)); + printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR)); + } + + return status; +} + +/* +** Initialize the DE4X5 operating conditions. NB: a chip problem with the +** DC21140 requires using perfect filtering mode for that chip. Since I can't +** see why I'd want > 14 multicast addresses, I have changed all chips to use +** the perfect filtering mode. Keep the DMA burst length at 8: there seems +** to be data corruption problems if it is larger (UDP errors seen from a +** ttcp source). +*/ +static int +de4x5_init(struct net_device *dev) +{ + /* Lock out other processes whilst setting up the hardware */ + netif_stop_queue(dev); + + de4x5_sw_reset(dev); + + /* Autoconfigure the connected port */ + autoconf_media(dev); + + return 0; +} + +static int +de4x5_sw_reset(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + int i, j, status = 0; + s32 bmr, omr; + + /* Select the MII or SRL port now and RESET the MAC */ + if (!lp->useSROM) { + if (lp->phy[lp->active].id != 0) { + lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD; + } else { + lp->infoblock_csr6 = OMR_SDP | OMR_TTM; + } + de4x5_switch_mac_port(dev); + } + + /* + ** Set the programmable burst length to 8 longwords for all the DC21140 + ** Fasternet chips and 4 longwords for all others: DMA errors result + ** without these values. Cache align 16 long. + */ + bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | DE4X5_CACHE_ALIGN; + bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0); + outl(bmr, DE4X5_BMR); + + omr = inl(DE4X5_OMR) & ~OMR_PR; /* Turn off promiscuous mode */ + if (lp->chipset == DC21140) { + omr |= (OMR_SDP | OMR_SB); + } + lp->setup_f = PERFECT; + outl(lp->dma_rings, DE4X5_RRBA); + outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc), + DE4X5_TRBA); + + lp->rx_new = lp->rx_old = 0; + lp->tx_new = lp->tx_old = 0; + + for (i = 0; i < lp->rxRingSize; i++) { + lp->rx_ring[i].status = cpu_to_le32(R_OWN); + } + + for (i = 0; i < lp->txRingSize; i++) { + lp->tx_ring[i].status = cpu_to_le32(0); + } + + barrier(); + + /* Build the setup frame depending on filtering mode */ + SetMulticastFilter(dev); + + load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1); + outl(omr|OMR_ST, DE4X5_OMR); + + /* Poll for setup frame completion (adapter interrupts are disabled now) */ + + for (j=0, i=0;(i<500) && (j==0);i++) { /* Up to 500ms delay */ + mdelay(1); + if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1; + } + outl(omr, DE4X5_OMR); /* Stop everything! */ + + if (j == 0) { + printk("%s: Setup frame timed out, status %08x\n", dev->name, + inl(DE4X5_STS)); + status = -EIO; + } + + lp->tx_new = (lp->tx_new + 1) % lp->txRingSize; + lp->tx_old = lp->tx_new; + + return status; +} + +/* +** Writes a socket buffer address to the next available transmit descriptor. +*/ +static netdev_tx_t +de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + u_long flags = 0; + + netif_stop_queue(dev); + if (!lp->tx_enable) /* Cannot send for now */ + return NETDEV_TX_LOCKED; + + /* + ** Clean out the TX ring asynchronously to interrupts - sometimes the + ** interrupts are lost by delayed descriptor status updates relative to + ** the irq assertion, especially with a busy PCI bus. + */ + spin_lock_irqsave(&lp->lock, flags); + de4x5_tx(dev); + spin_unlock_irqrestore(&lp->lock, flags); + + /* Test if cache is already locked - requeue skb if so */ + if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt) + return NETDEV_TX_LOCKED; + + /* Transmit descriptor ring full or stale skb */ + if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) { + if (lp->interrupt) { + de4x5_putb_cache(dev, skb); /* Requeue the buffer */ + } else { + de4x5_put_cache(dev, skb); + } + if (de4x5_debug & DEBUG_TX) { + printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), netif_queue_stopped(dev), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO"); + } + } else if (skb->len > 0) { + /* If we already have stuff queued locally, use that first */ + if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) { + de4x5_put_cache(dev, skb); + skb = de4x5_get_cache(dev); + } + + while (skb && !netif_queue_stopped(dev) && + (u_long) lp->tx_skb[lp->tx_new] <= 1) { + spin_lock_irqsave(&lp->lock, flags); + netif_stop_queue(dev); + load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb); + lp->stats.tx_bytes += skb->len; + outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */ + + lp->tx_new = (lp->tx_new + 1) % lp->txRingSize; + + if (TX_BUFFS_AVAIL) { + netif_start_queue(dev); /* Another pkt may be queued */ + } + skb = de4x5_get_cache(dev); + spin_unlock_irqrestore(&lp->lock, flags); + } + if (skb) de4x5_putb_cache(dev, skb); + } + + lp->cache.lock = 0; + + return NETDEV_TX_OK; +} + +/* +** The DE4X5 interrupt handler. +** +** I/O Read/Writes through intermediate PCI bridges are never 'posted', +** so that the asserted interrupt always has some real data to work with - +** if these I/O accesses are ever changed to memory accesses, ensure the +** STS write is read immediately to complete the transaction if the adapter +** is not on bus 0. Lost interrupts can still occur when the PCI bus load +** is high and descriptor status bits cannot be set before the associated +** interrupt is asserted and this routine entered. +*/ +static irqreturn_t +de4x5_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct de4x5_private *lp; + s32 imr, omr, sts, limit; + u_long iobase; + unsigned int handled = 0; + + lp = netdev_priv(dev); + spin_lock(&lp->lock); + iobase = dev->base_addr; + + DISABLE_IRQs; /* Ensure non re-entrancy */ + + if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt)) + printk("%s: Re-entering the interrupt handler.\n", dev->name); + + synchronize_irq(dev->irq); + + for (limit=0; limit<8; limit++) { + sts = inl(DE4X5_STS); /* Read IRQ status */ + outl(sts, DE4X5_STS); /* Reset the board interrupts */ + + if (!(sts & lp->irq_mask)) break;/* All done */ + handled = 1; + + if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */ + de4x5_rx(dev); + + if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */ + de4x5_tx(dev); + + if (sts & STS_LNF) { /* TP Link has failed */ + lp->irq_mask &= ~IMR_LFM; + } + + if (sts & STS_UNF) { /* Transmit underrun */ + de4x5_txur(dev); + } + + if (sts & STS_SE) { /* Bus Error */ + STOP_DE4X5; + printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n", + dev->name, sts); + spin_unlock(&lp->lock); + return IRQ_HANDLED; + } + } + + /* Load the TX ring with any locally stored packets */ + if (!test_and_set_bit(0, (void *)&lp->cache.lock)) { + while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) { + de4x5_queue_pkt(de4x5_get_cache(dev), dev); + } + lp->cache.lock = 0; + } + + lp->interrupt = UNMASK_INTERRUPTS; + ENABLE_IRQs; + spin_unlock(&lp->lock); + + return IRQ_RETVAL(handled); +} + +static int +de4x5_rx(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + int entry; + s32 status; + + for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0; + entry=lp->rx_new) { + status = (s32)le32_to_cpu(lp->rx_ring[entry].status); + + if (lp->rx_ovf) { + if (inl(DE4X5_MFC) & MFC_FOCM) { + de4x5_rx_ovfc(dev); + break; + } + } + + if (status & RD_FS) { /* Remember the start of frame */ + lp->rx_old = entry; + } + + if (status & RD_LS) { /* Valid frame status */ + if (lp->tx_enable) lp->linkOK++; + if (status & RD_ES) { /* There was an error. */ + lp->stats.rx_errors++; /* Update the error stats. */ + if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++; + if (status & RD_CE) lp->stats.rx_crc_errors++; + if (status & RD_OF) lp->stats.rx_fifo_errors++; + if (status & RD_TL) lp->stats.rx_length_errors++; + if (status & RD_RF) lp->pktStats.rx_runt_frames++; + if (status & RD_CS) lp->pktStats.rx_collision++; + if (status & RD_DB) lp->pktStats.rx_dribble++; + if (status & RD_OF) lp->pktStats.rx_overflow++; + } else { /* A valid frame received */ + struct sk_buff *skb; + short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status) + >> 16) - 4; + + if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) { + printk("%s: Insufficient memory; nuking packet.\n", + dev->name); + lp->stats.rx_dropped++; + } else { + de4x5_dbg_rx(skb, pkt_len); + + /* Push up the protocol stack */ + skb->protocol=eth_type_trans(skb,dev); + de4x5_local_stats(dev, skb->data, pkt_len); + netif_rx(skb); + + /* Update stats */ + lp->stats.rx_packets++; + lp->stats.rx_bytes += pkt_len; + } + } + + /* Change buffer ownership for this frame, back to the adapter */ + for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old + 1)%lp->rxRingSize) { + lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN); + barrier(); + } + lp->rx_ring[entry].status = cpu_to_le32(R_OWN); + barrier(); + } + + /* + ** Update entry information + */ + lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize; + } + + return 0; +} + +static inline void +de4x5_free_tx_buff(struct de4x5_private *lp, int entry) +{ + dma_unmap_single(lp->gendev, le32_to_cpu(lp->tx_ring[entry].buf), + le32_to_cpu(lp->tx_ring[entry].des1) & TD_TBS1, + DMA_TO_DEVICE); + if ((u_long) lp->tx_skb[entry] > 1) + dev_kfree_skb_irq(lp->tx_skb[entry]); + lp->tx_skb[entry] = NULL; +} + +/* +** Buffer sent - check for TX buffer errors. +*/ +static int +de4x5_tx(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + int entry; + s32 status; + + for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) { + status = (s32)le32_to_cpu(lp->tx_ring[entry].status); + if (status < 0) { /* Buffer not sent yet */ + break; + } else if (status != 0x7fffffff) { /* Not setup frame */ + if (status & TD_ES) { /* An error happened */ + lp->stats.tx_errors++; + if (status & TD_NC) lp->stats.tx_carrier_errors++; + if (status & TD_LC) lp->stats.tx_window_errors++; + if (status & TD_UF) lp->stats.tx_fifo_errors++; + if (status & TD_EC) lp->pktStats.excessive_collisions++; + if (status & TD_DE) lp->stats.tx_aborted_errors++; + + if (TX_PKT_PENDING) { + outl(POLL_DEMAND, DE4X5_TPD);/* Restart a stalled TX */ + } + } else { /* Packet sent */ + lp->stats.tx_packets++; + if (lp->tx_enable) lp->linkOK++; + } + /* Update the collision counter */ + lp->stats.collisions += ((status & TD_EC) ? 16 : + ((status & TD_CC) >> 3)); + + /* Free the buffer. */ + if (lp->tx_skb[entry] != NULL) + de4x5_free_tx_buff(lp, entry); + } + + /* Update all the pointers */ + lp->tx_old = (lp->tx_old + 1) % lp->txRingSize; + } + + /* Any resources available? */ + if (TX_BUFFS_AVAIL && netif_queue_stopped(dev)) { + if (lp->interrupt) + netif_wake_queue(dev); + else + netif_start_queue(dev); + } + + return 0; +} + +static void +de4x5_ast(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + int next_tick = DE4X5_AUTOSENSE_MS; + int dt; + + if (lp->useSROM) + next_tick = srom_autoconf(dev); + else if (lp->chipset == DC21140) + next_tick = dc21140m_autoconf(dev); + else if (lp->chipset == DC21041) + next_tick = dc21041_autoconf(dev); + else if (lp->chipset == DC21040) + next_tick = dc21040_autoconf(dev); + lp->linkOK = 0; + + dt = (next_tick * HZ) / 1000; + + if (!dt) + dt = 1; + + mod_timer(&lp->timer, jiffies + dt); +} + +static int +de4x5_txur(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + int omr; + + omr = inl(DE4X5_OMR); + if (!(omr & OMR_SF) || (lp->chipset==DC21041) || (lp->chipset==DC21040)) { + omr &= ~(OMR_ST|OMR_SR); + outl(omr, DE4X5_OMR); + while (inl(DE4X5_STS) & STS_TS); + if ((omr & OMR_TR) < OMR_TR) { + omr += 0x4000; + } else { + omr |= OMR_SF; + } + outl(omr | OMR_ST | OMR_SR, DE4X5_OMR); + } + + return 0; +} + +static int +de4x5_rx_ovfc(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + int omr; + + omr = inl(DE4X5_OMR); + outl(omr & ~OMR_SR, DE4X5_OMR); + while (inl(DE4X5_STS) & STS_RS); + + for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) { + lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN); + lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize; + } + + outl(omr, DE4X5_OMR); + + return 0; +} + +static int +de4x5_close(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + s32 imr, omr; + + disable_ast(dev); + + netif_stop_queue(dev); + + if (de4x5_debug & DEBUG_CLOSE) { + printk("%s: Shutting down ethercard, status was %8.8x.\n", + dev->name, inl(DE4X5_STS)); + } + + /* + ** We stop the DE4X5 here... mask interrupts and stop TX & RX + */ + DISABLE_IRQs; + STOP_DE4X5; + + /* Free the associated irq */ + free_irq(dev->irq, dev); + lp->state = CLOSED; + + /* Free any socket buffers */ + de4x5_free_rx_buffs(dev); + de4x5_free_tx_buffs(dev); + + /* Put the adapter to sleep to save power */ + yawn(dev, SLEEP); + + return 0; +} + +static struct net_device_stats * +de4x5_get_stats(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + + lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR)); + + return &lp->stats; +} + +static void +de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len) +{ + struct de4x5_private *lp = netdev_priv(dev); + int i; + + for (i=1; ipktStats.bins[i]++; + i = DE4X5_PKT_STAT_SZ; + } + } + if (is_multicast_ether_addr(buf)) { + if (is_broadcast_ether_addr(buf)) { + lp->pktStats.broadcast++; + } else { + lp->pktStats.multicast++; + } + } else if (ether_addr_equal(buf, dev->dev_addr)) { + lp->pktStats.unicast++; + } + + lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */ + if (lp->pktStats.bins[0] == 0) { /* Reset counters */ + memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats)); + } +} + +/* +** Removes the TD_IC flag from previous descriptor to improve TX performance. +** If the flag is changed on a descriptor that is being read by the hardware, +** I assume PCI transaction ordering will mean you are either successful or +** just miss asserting the change to the hardware. Anyway you're messing with +** a descriptor you don't own, but this shouldn't kill the chip provided +** the descriptor register is read only to the hardware. +*/ +static void +load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb) +{ + struct de4x5_private *lp = netdev_priv(dev); + int entry = (lp->tx_new ? lp->tx_new-1 : lp->txRingSize-1); + dma_addr_t buf_dma = dma_map_single(lp->gendev, buf, flags & TD_TBS1, DMA_TO_DEVICE); + + lp->tx_ring[lp->tx_new].buf = cpu_to_le32(buf_dma); + lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER); + lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags); + lp->tx_skb[lp->tx_new] = skb; + lp->tx_ring[entry].des1 &= cpu_to_le32(~TD_IC); + barrier(); + + lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN); + barrier(); +} + +/* +** Set or clear the multicast filter for this adaptor. +*/ +static void +set_multicast_list(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + + /* First, double check that the adapter is open */ + if (lp->state == OPEN) { + if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */ + u32 omr; + omr = inl(DE4X5_OMR); + omr |= OMR_PR; + outl(omr, DE4X5_OMR); + } else { + SetMulticastFilter(dev); + load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET | + SETUP_FRAME_LEN, (struct sk_buff *)1); + + lp->tx_new = (lp->tx_new + 1) % lp->txRingSize; + outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */ + dev->trans_start = jiffies; /* prevent tx timeout */ + } + } +} + +/* +** Calculate the hash code and update the logical address filter +** from a list of ethernet multicast addresses. +** Little endian crc one liner from Matt Thomas, DEC. +*/ +static void +SetMulticastFilter(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + struct netdev_hw_addr *ha; + u_long iobase = dev->base_addr; + int i, bit, byte; + u16 hashcode; + u32 omr, crc; + char *pa; + unsigned char *addrs; + + omr = inl(DE4X5_OMR); + omr &= ~(OMR_PR | OMR_PM); + pa = build_setup_frame(dev, ALL); /* Build the basic frame */ + + if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) { + omr |= OMR_PM; /* Pass all multicasts */ + } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */ + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(ETH_ALEN, ha->addr); + hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */ + + byte = hashcode >> 3; /* bit[3-8] -> byte in filter */ + bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */ + + byte <<= 1; /* calc offset into setup frame */ + if (byte & 0x02) { + byte -= 1; + } + lp->setup_frame[byte] |= bit; + } + } else { /* Perfect filtering */ + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; + for (i=0; ibase_addr; + + if (!request_region (iobase, DE4X5_EISA_TOTAL_SIZE, "de4x5")) + return -EBUSY; + + if (!request_region (iobase + DE4X5_EISA_IO_PORTS, + DE4X5_EISA_TOTAL_SIZE, "de4x5")) { + status = -EBUSY; + goto release_reg_1; + } + + if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) { + status = -ENOMEM; + goto release_reg_2; + } + lp = netdev_priv(dev); + + cfid = (u32) inl(PCI_CFID); + lp->cfrv = (u_short) inl(PCI_CFRV); + device = (cfid >> 8) & 0x00ffff00; + vendor = (u_short) cfid; + + /* Read the EISA Configuration Registers */ + regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT); +#ifdef CONFIG_ALPHA + /* Looks like the Jensen firmware (rev 2.2) doesn't really + * care about the EISA configuration, and thus doesn't + * configure the PLX bridge properly. Oh well... Simply mimic + * the EISA config file to sort it out. */ + + /* EISA REG1: Assert DecChip 21040 HW Reset */ + outb (ER1_IAM | 1, EISA_REG1); + mdelay (1); + + /* EISA REG1: Deassert DecChip 21040 HW Reset */ + outb (ER1_IAM, EISA_REG1); + mdelay (1); + + /* EISA REG3: R/W Burst Transfer Enable */ + outb (ER3_BWE | ER3_BRE, EISA_REG3); + + /* 32_bit slave/master, Preempt Time=23 bclks, Unlatched Interrupt */ + outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0); +#endif + irq = de4x5_irq[(regval >> 1) & 0x03]; + + if (is_DC2114x) { + device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143); + } + lp->chipset = device; + lp->bus = EISA; + + /* Write the PCI Configuration Registers */ + outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS); + outl(0x00006000, PCI_CFLT); + outl(iobase, PCI_CBIO); + + DevicePresent(dev, EISA_APROM); + + dev->irq = irq; + + if (!(status = de4x5_hw_init (dev, iobase, gendev))) { + return 0; + } + + free_netdev (dev); + release_reg_2: + release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE); + release_reg_1: + release_region (iobase, DE4X5_EISA_TOTAL_SIZE); + + return status; +} + +static int de4x5_eisa_remove(struct device *device) +{ + struct net_device *dev; + u_long iobase; + + dev = dev_get_drvdata(device); + iobase = dev->base_addr; + + unregister_netdev (dev); + free_netdev (dev); + release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE); + release_region (iobase, DE4X5_EISA_TOTAL_SIZE); + + return 0; +} + +static struct eisa_device_id de4x5_eisa_ids[] = { + { "DEC4250", 0 }, /* 0 is the board name index... */ + { "" } +}; +MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids); + +static struct eisa_driver de4x5_eisa_driver = { + .id_table = de4x5_eisa_ids, + .driver = { + .name = "de4x5", + .probe = de4x5_eisa_probe, + .remove = de4x5_eisa_remove, + } +}; +MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids); +#endif + +#ifdef CONFIG_PCI + +/* +** This function searches the current bus (which is >0) for a DECchip with an +** SROM, so that in multiport cards that have one SROM shared between multiple +** DECchips, we can find the base SROM irrespective of the BIOS scan direction. +** For single port cards this is a time waster... +*/ +static void +srom_search(struct net_device *dev, struct pci_dev *pdev) +{ + u_char pb; + u_short vendor, status; + u_int irq = 0, device; + u_long iobase = 0; /* Clear upper 32 bits in Alphas */ + int i, j; + struct de4x5_private *lp = netdev_priv(dev); + struct pci_dev *this_dev; + + list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) { + vendor = this_dev->vendor; + device = this_dev->device << 8; + if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue; + + /* Get the chip configuration revision register */ + pb = this_dev->bus->number; + + /* Set the device number information */ + lp->device = PCI_SLOT(this_dev->devfn); + lp->bus_num = pb; + + /* Set the chipset information */ + if (is_DC2114x) { + device = ((this_dev->revision & CFRV_RN) < DC2114x_BRK + ? DC21142 : DC21143); + } + lp->chipset = device; + + /* Get the board I/O address (64 bits on sparc64) */ + iobase = pci_resource_start(this_dev, 0); + + /* Fetch the IRQ to be used */ + irq = this_dev->irq; + if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue; + + /* Check if I/O accesses are enabled */ + pci_read_config_word(this_dev, PCI_COMMAND, &status); + if (!(status & PCI_COMMAND_IO)) continue; + + /* Search for a valid SROM attached to this DECchip */ + DevicePresent(dev, DE4X5_APROM); + for (j=0, i=0; isrom + SROM_HWADD + i); + } + if (j != 0 && j != 6 * 0xff) { + last.chipset = device; + last.bus = pb; + last.irq = irq; + for (i=0; isrom + SROM_HWADD + i); + } + return; + } + } +} + +/* +** PCI bus I/O device probe +** NB: PCI I/O accesses and Bus Mastering are enabled by the PCI BIOS, not +** the driver. Some PCI BIOS's, pre V2.1, need the slot + features to be +** enabled by the user first in the set up utility. Hence we just check for +** enabled features and silently ignore the card if they're not. +** +** STOP PRESS: Some BIOS's __require__ the driver to enable the bus mastering +** bit. Here, check for I/O accesses and then set BM. If you put the card in +** a non BM slot, you're on your own (and complain to the PC vendor that your +** PC doesn't conform to the PCI standard)! +** +** This function is only compatible with the *latest* 2.1.x kernels. For 2.0.x +** kernels use the V0.535[n] drivers. +*/ + +static int de4x5_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + u_char pb, pbus = 0, dev_num, dnum = 0, timer; + u_short vendor, status; + u_int irq = 0, device; + u_long iobase = 0; /* Clear upper 32 bits in Alphas */ + int error; + struct net_device *dev; + struct de4x5_private *lp; + + dev_num = PCI_SLOT(pdev->devfn); + pb = pdev->bus->number; + + if (io) { /* probe a single PCI device */ + pbus = (u_short)(io >> 8); + dnum = (u_short)(io & 0xff); + if ((pbus != pb) || (dnum != dev_num)) + return -ENODEV; + } + + vendor = pdev->vendor; + device = pdev->device << 8; + if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) + return -ENODEV; + + /* Ok, the device seems to be for us. */ + if ((error = pci_enable_device (pdev))) + return error; + + if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) { + error = -ENOMEM; + goto disable_dev; + } + + lp = netdev_priv(dev); + lp->bus = PCI; + lp->bus_num = 0; + + /* Search for an SROM on this bus */ + if (lp->bus_num != pb) { + lp->bus_num = pb; + srom_search(dev, pdev); + } + + /* Get the chip configuration revision register */ + lp->cfrv = pdev->revision; + + /* Set the device number information */ + lp->device = dev_num; + lp->bus_num = pb; + + /* Set the chipset information */ + if (is_DC2114x) { + device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143); + } + lp->chipset = device; + + /* Get the board I/O address (64 bits on sparc64) */ + iobase = pci_resource_start(pdev, 0); + + /* Fetch the IRQ to be used */ + irq = pdev->irq; + if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) { + error = -ENODEV; + goto free_dev; + } + + /* Check if I/O accesses and Bus Mastering are enabled */ + pci_read_config_word(pdev, PCI_COMMAND, &status); +#ifdef __powerpc__ + if (!(status & PCI_COMMAND_IO)) { + status |= PCI_COMMAND_IO; + pci_write_config_word(pdev, PCI_COMMAND, status); + pci_read_config_word(pdev, PCI_COMMAND, &status); + } +#endif /* __powerpc__ */ + if (!(status & PCI_COMMAND_IO)) { + error = -ENODEV; + goto free_dev; + } + + if (!(status & PCI_COMMAND_MASTER)) { + status |= PCI_COMMAND_MASTER; + pci_write_config_word(pdev, PCI_COMMAND, status); + pci_read_config_word(pdev, PCI_COMMAND, &status); + } + if (!(status & PCI_COMMAND_MASTER)) { + error = -ENODEV; + goto free_dev; + } + + /* Check the latency timer for values >= 0x60 */ + pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &timer); + if (timer < 0x60) { + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x60); + } + + DevicePresent(dev, DE4X5_APROM); + + if (!request_region (iobase, DE4X5_PCI_TOTAL_SIZE, "de4x5")) { + error = -EBUSY; + goto free_dev; + } + + dev->irq = irq; + + if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) { + goto release; + } + + return 0; + + release: + release_region (iobase, DE4X5_PCI_TOTAL_SIZE); + free_dev: + free_netdev (dev); + disable_dev: + pci_disable_device (pdev); + return error; +} + +static void de4x5_pci_remove(struct pci_dev *pdev) +{ + struct net_device *dev; + u_long iobase; + + dev = pci_get_drvdata(pdev); + iobase = dev->base_addr; + + unregister_netdev (dev); + free_netdev (dev); + release_region (iobase, DE4X5_PCI_TOTAL_SIZE); + pci_disable_device (pdev); +} + +static const struct pci_device_id de4x5_pci_tbl[] = { + { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, + { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, + { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 }, + { }, +}; + +static struct pci_driver de4x5_pci_driver = { + .name = "de4x5", + .id_table = de4x5_pci_tbl, + .probe = de4x5_pci_probe, + .remove = de4x5_pci_remove, +}; + +#endif + +/* +** Auto configure the media here rather than setting the port at compile +** time. This routine is called by de4x5_init() and when a loss of media is +** detected (excessive collisions, loss of carrier, no carrier or link fail +** [TP] or no recent receive activity) to check whether the user has been +** sneaky and changed the port on us. +*/ +static int +autoconf_media(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + + disable_ast(dev); + + lp->c_media = AUTO; /* Bogus last media */ + inl(DE4X5_MFC); /* Zero the lost frames counter */ + lp->media = INIT; + lp->tcount = 0; + + de4x5_ast(dev); + + return lp->media; +} + +/* +** Autoconfigure the media when using the DC21040. AUI cannot be distinguished +** from BNC as the port has a jumper to set thick or thin wire. When set for +** BNC, the BNC port will indicate activity if it's not terminated correctly. +** The only way to test for that is to place a loopback packet onto the +** network and watch for errors. Since we're messing with the interrupt mask +** register, disable the board interrupts and do not allow any more packets to +** be queued to the hardware. Re-enable everything only when the media is +** found. +** I may have to "age out" locally queued packets so that the higher layer +** timeouts don't effectively duplicate packets on the network. +*/ +static int +dc21040_autoconf(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + int next_tick = DE4X5_AUTOSENSE_MS; + s32 imr; + + switch (lp->media) { + case INIT: + DISABLE_IRQs; + lp->tx_enable = false; + lp->timeout = -1; + de4x5_save_skbs(dev); + if ((lp->autosense == AUTO) || (lp->autosense == TP)) { + lp->media = TP; + } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) { + lp->media = BNC_AUI; + } else if (lp->autosense == EXT_SIA) { + lp->media = EXT_SIA; + } else { + lp->media = NC; + } + lp->local_state = 0; + next_tick = dc21040_autoconf(dev); + break; + + case TP: + next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI, + TP_SUSPECT, test_tp); + break; + + case TP_SUSPECT: + next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf); + break; + + case BNC: + case AUI: + case BNC_AUI: + next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA, + BNC_AUI_SUSPECT, ping_media); + break; + + case BNC_AUI_SUSPECT: + next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf); + break; + + case EXT_SIA: + next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000, + NC, EXT_SIA_SUSPECT, ping_media); + break; + + case EXT_SIA_SUSPECT: + next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf); + break; + + case NC: + /* default to TP for all */ + reset_init_sia(dev, 0x8f01, 0xffff, 0x0000); + if (lp->media != lp->c_media) { + de4x5_dbg_media(dev); + lp->c_media = lp->media; + } + lp->media = INIT; + lp->tx_enable = false; + break; + } + + return next_tick; +} + +static int +dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, + int next_state, int suspect_state, + int (*fn)(struct net_device *, int)) +{ + struct de4x5_private *lp = netdev_priv(dev); + int next_tick = DE4X5_AUTOSENSE_MS; + int linkBad; + + switch (lp->local_state) { + case 0: + reset_init_sia(dev, csr13, csr14, csr15); + lp->local_state++; + next_tick = 500; + break; + + case 1: + if (!lp->tx_enable) { + linkBad = fn(dev, timeout); + if (linkBad < 0) { + next_tick = linkBad & ~TIMER_CB; + } else { + if (linkBad && (lp->autosense == AUTO)) { + lp->local_state = 0; + lp->media = next_state; + } else { + de4x5_init_connection(dev); + } + } + } else if (!lp->linkOK && (lp->autosense == AUTO)) { + lp->media = suspect_state; + next_tick = 3000; + } + break; + } + + return next_tick; +} + +static int +de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, + int (*fn)(struct net_device *, int), + int (*asfn)(struct net_device *)) +{ + struct de4x5_private *lp = netdev_priv(dev); + int next_tick = DE4X5_AUTOSENSE_MS; + int linkBad; + + switch (lp->local_state) { + case 1: + if (lp->linkOK) { + lp->media = prev_state; + } else { + lp->local_state++; + next_tick = asfn(dev); + } + break; + + case 2: + linkBad = fn(dev, timeout); + if (linkBad < 0) { + next_tick = linkBad & ~TIMER_CB; + } else if (!linkBad) { + lp->local_state--; + lp->media = prev_state; + } else { + lp->media = INIT; + lp->tcount++; + } + } + + return next_tick; +} + +/* +** Autoconfigure the media when using the DC21041. AUI needs to be tested +** before BNC, because the BNC port will indicate activity if it's not +** terminated correctly. The only way to test for that is to place a loopback +** packet onto the network and watch for errors. Since we're messing with +** the interrupt mask register, disable the board interrupts and do not allow +** any more packets to be queued to the hardware. Re-enable everything only +** when the media is found. +*/ +static int +dc21041_autoconf(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + s32 sts, irqs, irq_mask, imr, omr; + int next_tick = DE4X5_AUTOSENSE_MS; + + switch (lp->media) { + case INIT: + DISABLE_IRQs; + lp->tx_enable = false; + lp->timeout = -1; + de4x5_save_skbs(dev); /* Save non transmitted skb's */ + if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) { + lp->media = TP; /* On chip auto negotiation is broken */ + } else if (lp->autosense == TP) { + lp->media = TP; + } else if (lp->autosense == BNC) { + lp->media = BNC; + } else if (lp->autosense == AUI) { + lp->media = AUI; + } else { + lp->media = NC; + } + lp->local_state = 0; + next_tick = dc21041_autoconf(dev); + break; + + case TP_NW: + if (lp->timeout < 0) { + omr = inl(DE4X5_OMR);/* Set up full duplex for the autonegotiate */ + outl(omr | OMR_FDX, DE4X5_OMR); + } + irqs = STS_LNF | STS_LNP; + irq_mask = IMR_LFM | IMR_LPM; + sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400); + if (sts < 0) { + next_tick = sts & ~TIMER_CB; + } else { + if (sts & STS_LNP) { + lp->media = ANS; + } else { + lp->media = AUI; + } + next_tick = dc21041_autoconf(dev); + } + break; + + case ANS: + if (!lp->tx_enable) { + irqs = STS_LNP; + irq_mask = IMR_LPM; + sts = test_ans(dev, irqs, irq_mask, 3000); + if (sts < 0) { + next_tick = sts & ~TIMER_CB; + } else { + if (!(sts & STS_LNP) && (lp->autosense == AUTO)) { + lp->media = TP; + next_tick = dc21041_autoconf(dev); + } else { + lp->local_state = 1; + de4x5_init_connection(dev); + } + } + } else if (!lp->linkOK && (lp->autosense == AUTO)) { + lp->media = ANS_SUSPECT; + next_tick = 3000; + } + break; + + case ANS_SUSPECT: + next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf); + break; + + case TP: + if (!lp->tx_enable) { + if (lp->timeout < 0) { + omr = inl(DE4X5_OMR); /* Set up half duplex for TP */ + outl(omr & ~OMR_FDX, DE4X5_OMR); + } + irqs = STS_LNF | STS_LNP; + irq_mask = IMR_LFM | IMR_LPM; + sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400); + if (sts < 0) { + next_tick = sts & ~TIMER_CB; + } else { + if (!(sts & STS_LNP) && (lp->autosense == AUTO)) { + if (inl(DE4X5_SISR) & SISR_NRA) { + lp->media = AUI; /* Non selected port activity */ + } else { + lp->media = BNC; + } + next_tick = dc21041_autoconf(dev); + } else { + lp->local_state = 1; + de4x5_init_connection(dev); + } + } + } else if (!lp->linkOK && (lp->autosense == AUTO)) { + lp->media = TP_SUSPECT; + next_tick = 3000; + } + break; + + case TP_SUSPECT: + next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf); + break; + + case AUI: + if (!lp->tx_enable) { + if (lp->timeout < 0) { + omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */ + outl(omr & ~OMR_FDX, DE4X5_OMR); + } + irqs = 0; + irq_mask = 0; + sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000); + if (sts < 0) { + next_tick = sts & ~TIMER_CB; + } else { + if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) { + lp->media = BNC; + next_tick = dc21041_autoconf(dev); + } else { + lp->local_state = 1; + de4x5_init_connection(dev); + } + } + } else if (!lp->linkOK && (lp->autosense == AUTO)) { + lp->media = AUI_SUSPECT; + next_tick = 3000; + } + break; + + case AUI_SUSPECT: + next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf); + break; + + case BNC: + switch (lp->local_state) { + case 0: + if (lp->timeout < 0) { + omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */ + outl(omr & ~OMR_FDX, DE4X5_OMR); + } + irqs = 0; + irq_mask = 0; + sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000); + if (sts < 0) { + next_tick = sts & ~TIMER_CB; + } else { + lp->local_state++; /* Ensure media connected */ + next_tick = dc21041_autoconf(dev); + } + break; + + case 1: + if (!lp->tx_enable) { + if ((sts = ping_media(dev, 3000)) < 0) { + next_tick = sts & ~TIMER_CB; + } else { + if (sts) { + lp->local_state = 0; + lp->media = NC; + } else { + de4x5_init_connection(dev); + } + } + } else if (!lp->linkOK && (lp->autosense == AUTO)) { + lp->media = BNC_SUSPECT; + next_tick = 3000; + } + break; + } + break; + + case BNC_SUSPECT: + next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf); + break; + + case NC: + omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */ + outl(omr | OMR_FDX, DE4X5_OMR); + reset_init_sia(dev, 0xef01, 0xffff, 0x0008);/* Initialise the SIA */ + if (lp->media != lp->c_media) { + de4x5_dbg_media(dev); + lp->c_media = lp->media; + } + lp->media = INIT; + lp->tx_enable = false; + break; + } + + return next_tick; +} + +/* +** Some autonegotiation chips are broken in that they do not return the +** acknowledge bit (anlpa & MII_ANLPA_ACK) in the link partner advertisement +** register, except at the first power up negotiation. +*/ +static int +dc21140m_autoconf(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + int ana, anlpa, cap, cr, slnk, sr; + int next_tick = DE4X5_AUTOSENSE_MS; + u_long imr, omr, iobase = dev->base_addr; + + switch(lp->media) { + case INIT: + if (lp->timeout < 0) { + DISABLE_IRQs; + lp->tx_enable = false; + lp->linkOK = 0; + de4x5_save_skbs(dev); /* Save non transmitted skb's */ + } + if ((next_tick = de4x5_reset_phy(dev)) < 0) { + next_tick &= ~TIMER_CB; + } else { + if (lp->useSROM) { + if (srom_map_media(dev) < 0) { + lp->tcount++; + return next_tick; + } + srom_exec(dev, lp->phy[lp->active].gep); + if (lp->infoblock_media == ANS) { + ana = lp->phy[lp->active].ana | MII_ANA_CSMA; + mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); + } + } else { + lp->tmp = MII_SR_ASSC; /* Fake out the MII speed set */ + SET_10Mb; + if (lp->autosense == _100Mb) { + lp->media = _100Mb; + } else if (lp->autosense == _10Mb) { + lp->media = _10Mb; + } else if ((lp->autosense == AUTO) && + ((sr=is_anc_capable(dev)) & MII_SR_ANC)) { + ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA); + ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM); + mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); + lp->media = ANS; + } else if (lp->autosense == AUTO) { + lp->media = SPD_DET; + } else if (is_spd_100(dev) && is_100_up(dev)) { + lp->media = _100Mb; + } else { + lp->media = NC; + } + } + lp->local_state = 0; + next_tick = dc21140m_autoconf(dev); + } + break; + + case ANS: + switch (lp->local_state) { + case 0: + if (lp->timeout < 0) { + mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII); + } + cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500); + if (cr < 0) { + next_tick = cr & ~TIMER_CB; + } else { + if (cr) { + lp->local_state = 0; + lp->media = SPD_DET; + } else { + lp->local_state++; + } + next_tick = dc21140m_autoconf(dev); + } + break; + + case 1: + if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000)) < 0) { + next_tick = sr & ~TIMER_CB; + } else { + lp->media = SPD_DET; + lp->local_state = 0; + if (sr) { /* Success! */ + lp->tmp = MII_SR_ASSC; + anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII); + ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); + if (!(anlpa & MII_ANLPA_RF) && + (cap = anlpa & MII_ANLPA_TAF & ana)) { + if (cap & MII_ANA_100M) { + lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0; + lp->media = _100Mb; + } else if (cap & MII_ANA_10M) { + lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0; + + lp->media = _10Mb; + } + } + } /* Auto Negotiation failed to finish */ + next_tick = dc21140m_autoconf(dev); + } /* Auto Negotiation failed to start */ + break; + } + break; + + case SPD_DET: /* Choose 10Mb/s or 100Mb/s */ + if (lp->timeout < 0) { + lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS : + (~gep_rd(dev) & GEP_LNP)); + SET_100Mb_PDET; + } + if ((slnk = test_for_100Mb(dev, 6500)) < 0) { + next_tick = slnk & ~TIMER_CB; + } else { + if (is_spd_100(dev) && is_100_up(dev)) { + lp->media = _100Mb; + } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) { + lp->media = _10Mb; + } else { + lp->media = NC; + } + next_tick = dc21140m_autoconf(dev); + } + break; + + case _100Mb: /* Set 100Mb/s */ + next_tick = 3000; + if (!lp->tx_enable) { + SET_100Mb; + de4x5_init_connection(dev); + } else { + if (!lp->linkOK && (lp->autosense == AUTO)) { + if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) { + lp->media = INIT; + lp->tcount++; + next_tick = DE4X5_AUTOSENSE_MS; + } + } + } + break; + + case BNC: + case AUI: + case _10Mb: /* Set 10Mb/s */ + next_tick = 3000; + if (!lp->tx_enable) { + SET_10Mb; + de4x5_init_connection(dev); + } else { + if (!lp->linkOK && (lp->autosense == AUTO)) { + if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) { + lp->media = INIT; + lp->tcount++; + next_tick = DE4X5_AUTOSENSE_MS; + } + } + } + break; + + case NC: + if (lp->media != lp->c_media) { + de4x5_dbg_media(dev); + lp->c_media = lp->media; + } + lp->media = INIT; + lp->tx_enable = false; + break; + } + + return next_tick; +} + +/* +** This routine may be merged into dc21140m_autoconf() sometime as I'm +** changing how I figure out the media - but trying to keep it backwards +** compatible with the de500-xa and de500-aa. +** Whether it's BNC, AUI, SYM or MII is sorted out in the infoblock +** functions and set during de4x5_mac_port() and/or de4x5_reset_phy(). +** This routine just has to figure out whether 10Mb/s or 100Mb/s is +** active. +** When autonegotiation is working, the ANS part searches the SROM for +** the highest common speed (TP) link that both can run and if that can +** be full duplex. That infoblock is executed and then the link speed set. +** +** Only _10Mb and _100Mb are tested here. +*/ +static int +dc2114x_autoconf(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + s32 cr, anlpa, ana, cap, irqs, irq_mask, imr, omr, slnk, sr, sts; + int next_tick = DE4X5_AUTOSENSE_MS; + + switch (lp->media) { + case INIT: + if (lp->timeout < 0) { + DISABLE_IRQs; + lp->tx_enable = false; + lp->linkOK = 0; + lp->timeout = -1; + de4x5_save_skbs(dev); /* Save non transmitted skb's */ + if (lp->params.autosense & ~AUTO) { + srom_map_media(dev); /* Fixed media requested */ + if (lp->media != lp->params.autosense) { + lp->tcount++; + lp->media = INIT; + return next_tick; + } + lp->media = INIT; + } + } + if ((next_tick = de4x5_reset_phy(dev)) < 0) { + next_tick &= ~TIMER_CB; + } else { + if (lp->autosense == _100Mb) { + lp->media = _100Mb; + } else if (lp->autosense == _10Mb) { + lp->media = _10Mb; + } else if (lp->autosense == TP) { + lp->media = TP; + } else if (lp->autosense == BNC) { + lp->media = BNC; + } else if (lp->autosense == AUI) { + lp->media = AUI; + } else { + lp->media = SPD_DET; + if ((lp->infoblock_media == ANS) && + ((sr=is_anc_capable(dev)) & MII_SR_ANC)) { + ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA); + ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM); + mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); + lp->media = ANS; + } + } + lp->local_state = 0; + next_tick = dc2114x_autoconf(dev); + } + break; + + case ANS: + switch (lp->local_state) { + case 0: + if (lp->timeout < 0) { + mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII); + } + cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500); + if (cr < 0) { + next_tick = cr & ~TIMER_CB; + } else { + if (cr) { + lp->local_state = 0; + lp->media = SPD_DET; + } else { + lp->local_state++; + } + next_tick = dc2114x_autoconf(dev); + } + break; + + case 1: + sr = test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000); + if (sr < 0) { + next_tick = sr & ~TIMER_CB; + } else { + lp->media = SPD_DET; + lp->local_state = 0; + if (sr) { /* Success! */ + lp->tmp = MII_SR_ASSC; + anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII); + ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); + if (!(anlpa & MII_ANLPA_RF) && + (cap = anlpa & MII_ANLPA_TAF & ana)) { + if (cap & MII_ANA_100M) { + lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0; + lp->media = _100Mb; + } else if (cap & MII_ANA_10M) { + lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0; + lp->media = _10Mb; + } + } + } /* Auto Negotiation failed to finish */ + next_tick = dc2114x_autoconf(dev); + } /* Auto Negotiation failed to start */ + break; + } + break; + + case AUI: + if (!lp->tx_enable) { + if (lp->timeout < 0) { + omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */ + outl(omr & ~OMR_FDX, DE4X5_OMR); + } + irqs = 0; + irq_mask = 0; + sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000); + if (sts < 0) { + next_tick = sts & ~TIMER_CB; + } else { + if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) { + lp->media = BNC; + next_tick = dc2114x_autoconf(dev); + } else { + lp->local_state = 1; + de4x5_init_connection(dev); + } + } + } else if (!lp->linkOK && (lp->autosense == AUTO)) { + lp->media = AUI_SUSPECT; + next_tick = 3000; + } + break; + + case AUI_SUSPECT: + next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf); + break; + + case BNC: + switch (lp->local_state) { + case 0: + if (lp->timeout < 0) { + omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */ + outl(omr & ~OMR_FDX, DE4X5_OMR); + } + irqs = 0; + irq_mask = 0; + sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000); + if (sts < 0) { + next_tick = sts & ~TIMER_CB; + } else { + lp->local_state++; /* Ensure media connected */ + next_tick = dc2114x_autoconf(dev); + } + break; + + case 1: + if (!lp->tx_enable) { + if ((sts = ping_media(dev, 3000)) < 0) { + next_tick = sts & ~TIMER_CB; + } else { + if (sts) { + lp->local_state = 0; + lp->tcount++; + lp->media = INIT; + } else { + de4x5_init_connection(dev); + } + } + } else if (!lp->linkOK && (lp->autosense == AUTO)) { + lp->media = BNC_SUSPECT; + next_tick = 3000; + } + break; + } + break; + + case BNC_SUSPECT: + next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf); + break; + + case SPD_DET: /* Choose 10Mb/s or 100Mb/s */ + if (srom_map_media(dev) < 0) { + lp->tcount++; + lp->media = INIT; + return next_tick; + } + if (lp->media == _100Mb) { + if ((slnk = test_for_100Mb(dev, 6500)) < 0) { + lp->media = SPD_DET; + return slnk & ~TIMER_CB; + } + } else { + if (wait_for_link(dev) < 0) { + lp->media = SPD_DET; + return PDET_LINK_WAIT; + } + } + if (lp->media == ANS) { /* Do MII parallel detection */ + if (is_spd_100(dev)) { + lp->media = _100Mb; + } else { + lp->media = _10Mb; + } + next_tick = dc2114x_autoconf(dev); + } else if (((lp->media == _100Mb) && is_100_up(dev)) || + (((lp->media == _10Mb) || (lp->media == TP) || + (lp->media == BNC) || (lp->media == AUI)) && + is_10_up(dev))) { + next_tick = dc2114x_autoconf(dev); + } else { + lp->tcount++; + lp->media = INIT; + } + break; + + case _10Mb: + next_tick = 3000; + if (!lp->tx_enable) { + SET_10Mb; + de4x5_init_connection(dev); + } else { + if (!lp->linkOK && (lp->autosense == AUTO)) { + if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) { + lp->media = INIT; + lp->tcount++; + next_tick = DE4X5_AUTOSENSE_MS; + } + } + } + break; + + case _100Mb: + next_tick = 3000; + if (!lp->tx_enable) { + SET_100Mb; + de4x5_init_connection(dev); + } else { + if (!lp->linkOK && (lp->autosense == AUTO)) { + if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) { + lp->media = INIT; + lp->tcount++; + next_tick = DE4X5_AUTOSENSE_MS; + } + } + } + break; + + default: + lp->tcount++; +printk("Huh?: media:%02x\n", lp->media); + lp->media = INIT; + break; + } + + return next_tick; +} + +static int +srom_autoconf(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + + return lp->infoleaf_fn(dev); +} + +/* +** This mapping keeps the original media codes and FDX flag unchanged. +** While it isn't strictly necessary, it helps me for the moment... +** The early return avoids a media state / SROM media space clash. +*/ +static int +srom_map_media(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + + lp->fdx = false; + if (lp->infoblock_media == lp->media) + return 0; + + switch(lp->infoblock_media) { + case SROM_10BASETF: + if (!lp->params.fdx) return -1; + lp->fdx = true; + case SROM_10BASET: + if (lp->params.fdx && !lp->fdx) return -1; + if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) { + lp->media = _10Mb; + } else { + lp->media = TP; + } + break; + + case SROM_10BASE2: + lp->media = BNC; + break; + + case SROM_10BASE5: + lp->media = AUI; + break; + + case SROM_100BASETF: + if (!lp->params.fdx) return -1; + lp->fdx = true; + case SROM_100BASET: + if (lp->params.fdx && !lp->fdx) return -1; + lp->media = _100Mb; + break; + + case SROM_100BASET4: + lp->media = _100Mb; + break; + + case SROM_100BASEFF: + if (!lp->params.fdx) return -1; + lp->fdx = true; + case SROM_100BASEF: + if (lp->params.fdx && !lp->fdx) return -1; + lp->media = _100Mb; + break; + + case ANS: + lp->media = ANS; + lp->fdx = lp->params.fdx; + break; + + default: + printk("%s: Bad media code [%d] detected in SROM!\n", dev->name, + lp->infoblock_media); + return -1; + } + + return 0; +} + +static void +de4x5_init_connection(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + u_long flags = 0; + + if (lp->media != lp->c_media) { + de4x5_dbg_media(dev); + lp->c_media = lp->media; /* Stop scrolling media messages */ + } + + spin_lock_irqsave(&lp->lock, flags); + de4x5_rst_desc_ring(dev); + de4x5_setup_intr(dev); + lp->tx_enable = true; + spin_unlock_irqrestore(&lp->lock, flags); + outl(POLL_DEMAND, DE4X5_TPD); + + netif_wake_queue(dev); +} + +/* +** General PHY reset function. Some MII devices don't reset correctly +** since their MII address pins can float at voltages that are dependent +** on the signal pin use. Do a double reset to ensure a reset. +*/ +static int +de4x5_reset_phy(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + int next_tick = 0; + + if ((lp->useSROM) || (lp->phy[lp->active].id)) { + if (lp->timeout < 0) { + if (lp->useSROM) { + if (lp->phy[lp->active].rst) { + srom_exec(dev, lp->phy[lp->active].rst); + srom_exec(dev, lp->phy[lp->active].rst); + } else if (lp->rst) { /* Type 5 infoblock reset */ + srom_exec(dev, lp->rst); + srom_exec(dev, lp->rst); + } + } else { + PHY_HARD_RESET; + } + if (lp->useMII) { + mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII); + } + } + if (lp->useMII) { + next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, false, 500); + } + } else if (lp->chipset == DC21140) { + PHY_HARD_RESET; + } + + return next_tick; +} + +static int +test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + s32 sts, csr12; + + if (lp->timeout < 0) { + lp->timeout = msec/100; + if (!lp->useSROM) { /* Already done if by SROM, else dc2104[01] */ + reset_init_sia(dev, csr13, csr14, csr15); + } + + /* set up the interrupt mask */ + outl(irq_mask, DE4X5_IMR); + + /* clear all pending interrupts */ + sts = inl(DE4X5_STS); + outl(sts, DE4X5_STS); + + /* clear csr12 NRA and SRA bits */ + if ((lp->chipset == DC21041) || lp->useSROM) { + csr12 = inl(DE4X5_SISR); + outl(csr12, DE4X5_SISR); + } + } + + sts = inl(DE4X5_STS) & ~TIMER_CB; + + if (!(sts & irqs) && --lp->timeout) { + sts = 100 | TIMER_CB; + } else { + lp->timeout = -1; + } + + return sts; +} + +static int +test_tp(struct net_device *dev, s32 msec) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + int sisr; + + if (lp->timeout < 0) { + lp->timeout = msec/100; + } + + sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR); + + if (sisr && --lp->timeout) { + sisr = 100 | TIMER_CB; + } else { + lp->timeout = -1; + } + + return sisr; +} + +/* +** Samples the 100Mb Link State Signal. The sample interval is important +** because too fast a rate can give erroneous results and confuse the +** speed sense algorithm. +*/ +#define SAMPLE_INTERVAL 500 /* ms */ +#define SAMPLE_DELAY 2000 /* ms */ +static int +test_for_100Mb(struct net_device *dev, int msec) +{ + struct de4x5_private *lp = netdev_priv(dev); + int gep = 0, ret = ((lp->chipset & ~0x00ff)==DC2114x? -1 :GEP_SLNK); + + if (lp->timeout < 0) { + if ((msec/SAMPLE_INTERVAL) <= 0) return 0; + if (msec > SAMPLE_DELAY) { + lp->timeout = (msec - SAMPLE_DELAY)/SAMPLE_INTERVAL; + gep = SAMPLE_DELAY | TIMER_CB; + return gep; + } else { + lp->timeout = msec/SAMPLE_INTERVAL; + } + } + + if (lp->phy[lp->active].id || lp->useSROM) { + gep = is_100_up(dev) | is_spd_100(dev); + } else { + gep = (~gep_rd(dev) & (GEP_SLNK | GEP_LNP)); + } + if (!(gep & ret) && --lp->timeout) { + gep = SAMPLE_INTERVAL | TIMER_CB; + } else { + lp->timeout = -1; + } + + return gep; +} + +static int +wait_for_link(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + + if (lp->timeout < 0) { + lp->timeout = 1; + } + + if (lp->timeout--) { + return TIMER_CB; + } else { + lp->timeout = -1; + } + + return 0; +} + +/* +** +** +*/ +static int +test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec) +{ + struct de4x5_private *lp = netdev_priv(dev); + int test; + u_long iobase = dev->base_addr; + + if (lp->timeout < 0) { + lp->timeout = msec/100; + } + + reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask; + test = (reg ^ (pol ? ~0 : 0)) & mask; + + if (test && --lp->timeout) { + reg = 100 | TIMER_CB; + } else { + lp->timeout = -1; + } + + return reg; +} + +static int +is_spd_100(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + int spd; + + if (lp->useMII) { + spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII); + spd = ~(spd ^ lp->phy[lp->active].spd.value); + spd &= lp->phy[lp->active].spd.mask; + } else if (!lp->useSROM) { /* de500-xa */ + spd = ((~gep_rd(dev)) & GEP_SLNK); + } else { + if ((lp->ibn == 2) || !lp->asBitValid) + return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0; + + spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) | + (lp->linkOK & ~lp->asBitValid); + } + + return spd; +} + +static int +is_100_up(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + + if (lp->useMII) { + /* Double read for sticky bits & temporary drops */ + mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII); + return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS; + } else if (!lp->useSROM) { /* de500-xa */ + return (~gep_rd(dev)) & GEP_SLNK; + } else { + if ((lp->ibn == 2) || !lp->asBitValid) + return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0; + + return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) | + (lp->linkOK & ~lp->asBitValid); + } +} + +static int +is_10_up(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + + if (lp->useMII) { + /* Double read for sticky bits & temporary drops */ + mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII); + return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS; + } else if (!lp->useSROM) { /* de500-xa */ + return (~gep_rd(dev)) & GEP_LNP; + } else { + if ((lp->ibn == 2) || !lp->asBitValid) + return ((lp->chipset & ~0x00ff) == DC2114x) ? + (~inl(DE4X5_SISR)&SISR_LS10): + 0; + + return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) | + (lp->linkOK & ~lp->asBitValid); + } +} + +static int +is_anc_capable(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + + if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) { + return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII); + } else if ((lp->chipset & ~0x00ff) == DC2114x) { + return (inl(DE4X5_SISR) & SISR_LPN) >> 12; + } else { + return 0; + } +} + +/* +** Send a packet onto the media and watch for send errors that indicate the +** media is bad or unconnected. +*/ +static int +ping_media(struct net_device *dev, int msec) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + int sisr; + + if (lp->timeout < 0) { + lp->timeout = msec/100; + + lp->tmp = lp->tx_new; /* Remember the ring position */ + load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1); + lp->tx_new = (lp->tx_new + 1) % lp->txRingSize; + outl(POLL_DEMAND, DE4X5_TPD); + } + + sisr = inl(DE4X5_SISR); + + if ((!(sisr & SISR_NCR)) && + ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) && + (--lp->timeout)) { + sisr = 100 | TIMER_CB; + } else { + if ((!(sisr & SISR_NCR)) && + !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) && + lp->timeout) { + sisr = 0; + } else { + sisr = 1; + } + lp->timeout = -1; + } + + return sisr; +} + +/* +** This function does 2 things: on Intels it kmalloc's another buffer to +** replace the one about to be passed up. On Alpha's it kmallocs a buffer +** into which the packet is copied. +*/ +static struct sk_buff * +de4x5_alloc_rx_buff(struct net_device *dev, int index, int len) +{ + struct de4x5_private *lp = netdev_priv(dev); + struct sk_buff *p; + +#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY) + struct sk_buff *ret; + u_long i=0, tmp; + + p = netdev_alloc_skb(dev, IEEE802_3_SZ + DE4X5_ALIGN + 2); + if (!p) return NULL; + + tmp = virt_to_bus(p->data); + i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp; + skb_reserve(p, i); + lp->rx_ring[index].buf = cpu_to_le32(tmp + i); + + ret = lp->rx_skb[index]; + lp->rx_skb[index] = p; + + if ((u_long) ret > 1) { + skb_put(ret, len); + } + + return ret; + +#else + if (lp->state != OPEN) return (struct sk_buff *)1; /* Fake out the open */ + + p = netdev_alloc_skb(dev, len + 2); + if (!p) return NULL; + + skb_reserve(p, 2); /* Align */ + if (index < lp->rx_old) { /* Wrapped buffer */ + short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ; + memcpy(skb_put(p,tlen),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,tlen); + memcpy(skb_put(p,len-tlen),lp->rx_bufs,len-tlen); + } else { /* Linear buffer */ + memcpy(skb_put(p,len),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,len); + } + + return p; +#endif +} + +static void +de4x5_free_rx_buffs(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + int i; + + for (i=0; irxRingSize; i++) { + if ((u_long) lp->rx_skb[i] > 1) { + dev_kfree_skb(lp->rx_skb[i]); + } + lp->rx_ring[i].status = 0; + lp->rx_skb[i] = (struct sk_buff *)1; /* Dummy entry */ + } +} + +static void +de4x5_free_tx_buffs(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + int i; + + for (i=0; itxRingSize; i++) { + if (lp->tx_skb[i]) + de4x5_free_tx_buff(lp, i); + lp->tx_ring[i].status = 0; + } + + /* Unload the locally queued packets */ + __skb_queue_purge(&lp->cache.queue); +} + +/* +** When a user pulls a connection, the DECchip can end up in a +** 'running - waiting for end of transmission' state. This means that we +** have to perform a chip soft reset to ensure that we can synchronize +** the hardware and software and make any media probes using a loopback +** packet meaningful. +*/ +static void +de4x5_save_skbs(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + s32 omr; + + if (!lp->cache.save_cnt) { + STOP_DE4X5; + de4x5_tx(dev); /* Flush any sent skb's */ + de4x5_free_tx_buffs(dev); + de4x5_cache_state(dev, DE4X5_SAVE_STATE); + de4x5_sw_reset(dev); + de4x5_cache_state(dev, DE4X5_RESTORE_STATE); + lp->cache.save_cnt++; + START_DE4X5; + } +} + +static void +de4x5_rst_desc_ring(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + int i; + s32 omr; + + if (lp->cache.save_cnt) { + STOP_DE4X5; + outl(lp->dma_rings, DE4X5_RRBA); + outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc), + DE4X5_TRBA); + + lp->rx_new = lp->rx_old = 0; + lp->tx_new = lp->tx_old = 0; + + for (i = 0; i < lp->rxRingSize; i++) { + lp->rx_ring[i].status = cpu_to_le32(R_OWN); + } + + for (i = 0; i < lp->txRingSize; i++) { + lp->tx_ring[i].status = cpu_to_le32(0); + } + + barrier(); + lp->cache.save_cnt--; + START_DE4X5; + } +} + +static void +de4x5_cache_state(struct net_device *dev, int flag) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + + switch(flag) { + case DE4X5_SAVE_STATE: + lp->cache.csr0 = inl(DE4X5_BMR); + lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR)); + lp->cache.csr7 = inl(DE4X5_IMR); + break; + + case DE4X5_RESTORE_STATE: + outl(lp->cache.csr0, DE4X5_BMR); + outl(lp->cache.csr6, DE4X5_OMR); + outl(lp->cache.csr7, DE4X5_IMR); + if (lp->chipset == DC21140) { + gep_wr(lp->cache.gepc, dev); + gep_wr(lp->cache.gep, dev); + } else { + reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, + lp->cache.csr15); + } + break; + } +} + +static void +de4x5_put_cache(struct net_device *dev, struct sk_buff *skb) +{ + struct de4x5_private *lp = netdev_priv(dev); + + __skb_queue_tail(&lp->cache.queue, skb); +} + +static void +de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb) +{ + struct de4x5_private *lp = netdev_priv(dev); + + __skb_queue_head(&lp->cache.queue, skb); +} + +static struct sk_buff * +de4x5_get_cache(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + + return __skb_dequeue(&lp->cache.queue); +} + +/* +** Check the Auto Negotiation State. Return OK when a link pass interrupt +** is received and the auto-negotiation status is NWAY OK. +*/ +static int +test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + s32 sts, ans; + + if (lp->timeout < 0) { + lp->timeout = msec/100; + outl(irq_mask, DE4X5_IMR); + + /* clear all pending interrupts */ + sts = inl(DE4X5_STS); + outl(sts, DE4X5_STS); + } + + ans = inl(DE4X5_SISR) & SISR_ANS; + sts = inl(DE4X5_STS) & ~TIMER_CB; + + if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) { + sts = 100 | TIMER_CB; + } else { + lp->timeout = -1; + } + + return sts; +} + +static void +de4x5_setup_intr(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + s32 imr, sts; + + if (inl(DE4X5_OMR) & OMR_SR) { /* Only unmask if TX/RX is enabled */ + imr = 0; + UNMASK_IRQs; + sts = inl(DE4X5_STS); /* Reset any pending (stale) interrupts */ + outl(sts, DE4X5_STS); + ENABLE_IRQs; + } +} + +/* +** +*/ +static void +reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + + RESET_SIA; + if (lp->useSROM) { + if (lp->ibn == 3) { + srom_exec(dev, lp->phy[lp->active].rst); + srom_exec(dev, lp->phy[lp->active].gep); + outl(1, DE4X5_SICR); + return; + } else { + csr15 = lp->cache.csr15; + csr14 = lp->cache.csr14; + csr13 = lp->cache.csr13; + outl(csr15 | lp->cache.gepc, DE4X5_SIGR); + outl(csr15 | lp->cache.gep, DE4X5_SIGR); + } + } else { + outl(csr15, DE4X5_SIGR); + } + outl(csr14, DE4X5_STRR); + outl(csr13, DE4X5_SICR); + + mdelay(10); +} + +/* +** Create a loopback ethernet packet +*/ +static void +create_packet(struct net_device *dev, char *frame, int len) +{ + int i; + char *buf = frame; + + for (i=0; idev_addr[i]; + } + for (i=0; idev_addr[i]; + } + + *buf++ = 0; /* Packet length (2 bytes) */ + *buf++ = 1; +} + +/* +** Look for a particular board name in the EISA configuration space +*/ +static int +EISA_signature(char *name, struct device *device) +{ + int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures); + struct eisa_device *edev; + + *name = '\0'; + edev = to_eisa_device (device); + i = edev->id.driver_data; + + if (i >= 0 && i < siglen) { + strcpy (name, de4x5_signatures[i]); + status = 1; + } + + return status; /* return the device name string */ +} + +/* +** Look for a particular board name in the PCI configuration space +*/ +static int +PCI_signature(char *name, struct de4x5_private *lp) +{ + int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures); + + if (lp->chipset == DC21040) { + strcpy(name, "DE434/5"); + return status; + } else { /* Search for a DEC name in the SROM */ + int tmp = *((char *)&lp->srom + 19) * 3; + strncpy(name, (char *)&lp->srom + 26 + tmp, 8); + } + name[8] = '\0'; + for (i=0; ichipset == DC21040) ? "DC21040" : + ((lp->chipset == DC21041) ? "DC21041" : + ((lp->chipset == DC21140) ? "DC21140" : + ((lp->chipset == DC21142) ? "DC21142" : + ((lp->chipset == DC21143) ? "DC21143" : "UNKNOWN" + ))))))); + } + if (lp->chipset != DC21041) { + lp->useSROM = true; /* card is not recognisably DEC */ + } + } else if ((lp->chipset & ~0x00ff) == DC2114x) { + lp->useSROM = true; + } + + return status; +} + +/* +** Set up the Ethernet PROM counter to the start of the Ethernet address on +** the DC21040, else read the SROM for the other chips. +** The SROM may not be present in a multi-MAC card, so first read the +** MAC address and check for a bad address. If there is a bad one then exit +** immediately with the prior srom contents intact (the h/w address will +** be fixed up later). +*/ +static void +DevicePresent(struct net_device *dev, u_long aprom_addr) +{ + int i, j=0; + struct de4x5_private *lp = netdev_priv(dev); + + if (lp->chipset == DC21040) { + if (lp->bus == EISA) { + enet_addr_rst(aprom_addr); /* Reset Ethernet Address ROM Pointer */ + } else { + outl(0, aprom_addr); /* Reset Ethernet Address ROM Pointer */ + } + } else { /* Read new srom */ + u_short tmp; + __le16 *p = (__le16 *)((char *)&lp->srom + SROM_HWADD); + for (i=0; i<(ETH_ALEN>>1); i++) { + tmp = srom_rd(aprom_addr, (SROM_HWADD>>1) + i); + j += tmp; /* for check for 0:0:0:0:0:0 or ff:ff:ff:ff:ff:ff */ + *p = cpu_to_le16(tmp); + } + if (j == 0 || j == 3 * 0xffff) { + /* could get 0 only from all-0 and 3 * 0xffff only from all-1 */ + return; + } + + p = (__le16 *)&lp->srom; + for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) { + tmp = srom_rd(aprom_addr, i); + *p++ = cpu_to_le16(tmp); + } + de4x5_dbg_srom(&lp->srom); + } +} + +/* +** Since the write on the Enet PROM register doesn't seem to reset the PROM +** pointer correctly (at least on my DE425 EISA card), this routine should do +** it...from depca.c. +*/ +static void +enet_addr_rst(u_long aprom_addr) +{ + union { + struct { + u32 a; + u32 b; + } llsig; + char Sig[sizeof(u32) << 1]; + } dev; + short sigLength=0; + s8 data; + int i, j; + + dev.llsig.a = ETH_PROM_SIG; + dev.llsig.b = ETH_PROM_SIG; + sigLength = sizeof(u32) << 1; + + for (i=0,j=0;jbase_addr; + int broken, i, k, tmp, status = 0; + u_short j,chksum; + struct de4x5_private *lp = netdev_priv(dev); + + broken = de4x5_bad_srom(lp); + + for (i=0,k=0,j=0;j<3;j++) { + k <<= 1; + if (k > 0xffff) k-=0xffff; + + if (lp->bus == PCI) { + if (lp->chipset == DC21040) { + while ((tmp = inl(DE4X5_APROM)) < 0); + k += (u_char) tmp; + dev->dev_addr[i++] = (u_char) tmp; + while ((tmp = inl(DE4X5_APROM)) < 0); + k += (u_short) (tmp << 8); + dev->dev_addr[i++] = (u_char) tmp; + } else if (!broken) { + dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++; + dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++; + } else if ((broken == SMC) || (broken == ACCTON)) { + dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++; + dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++; + } + } else { + k += (u_char) (tmp = inb(EISA_APROM)); + dev->dev_addr[i++] = (u_char) tmp; + k += (u_short) ((tmp = inb(EISA_APROM)) << 8); + dev->dev_addr[i++] = (u_char) tmp; + } + + if (k > 0xffff) k-=0xffff; + } + if (k == 0xffff) k=0; + + if (lp->bus == PCI) { + if (lp->chipset == DC21040) { + while ((tmp = inl(DE4X5_APROM)) < 0); + chksum = (u_char) tmp; + while ((tmp = inl(DE4X5_APROM)) < 0); + chksum |= (u_short) (tmp << 8); + if ((k != chksum) && (dec_only)) status = -1; + } + } else { + chksum = (u_char) inb(EISA_APROM); + chksum |= (u_short) (inb(EISA_APROM) << 8); + if ((k != chksum) && (dec_only)) status = -1; + } + + /* If possible, try to fix a broken card - SMC only so far */ + srom_repair(dev, broken); + +#ifdef CONFIG_PPC_PMAC + /* + ** If the address starts with 00 a0, we have to bit-reverse + ** each byte of the address. + */ + if ( machine_is(powermac) && + (dev->dev_addr[0] == 0) && + (dev->dev_addr[1] == 0xa0) ) + { + for (i = 0; i < ETH_ALEN; ++i) + { + int x = dev->dev_addr[i]; + x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4); + x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2); + dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1); + } + } +#endif /* CONFIG_PPC_PMAC */ + + /* Test for a bad enet address */ + status = test_bad_enet(dev, status); + + return status; +} + +/* +** Test for enet addresses in the first 32 bytes. +*/ +static int +de4x5_bad_srom(struct de4x5_private *lp) +{ + int i, status = 0; + + for (i = 0; i < ARRAY_SIZE(enet_det); i++) { + if (!memcmp(&lp->srom, &enet_det[i], 3) && + !memcmp((char *)&lp->srom+0x10, &enet_det[i], 3)) { + if (i == 0) { + status = SMC; + } else if (i == 1) { + status = ACCTON; + } + break; + } + } + + return status; +} + +static void +srom_repair(struct net_device *dev, int card) +{ + struct de4x5_private *lp = netdev_priv(dev); + + switch(card) { + case SMC: + memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom)); + memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN); + memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100); + lp->useSROM = true; + break; + } +} + +/* +** Assume that the irq's do not follow the PCI spec - this is seems +** to be true so far (2 for 2). +*/ +static int +test_bad_enet(struct net_device *dev, int status) +{ + struct de4x5_private *lp = netdev_priv(dev); + int i, tmp; + + for (tmp=0,i=0; idev_addr[i]; + if ((tmp == 0) || (tmp == 0x5fa)) { + if ((lp->chipset == last.chipset) && + (lp->bus_num == last.bus) && (lp->bus_num > 0)) { + for (i=0; idev_addr[i] = last.addr[i]; + for (i=ETH_ALEN-1; i>2; --i) { + dev->dev_addr[i] += 1; + if (dev->dev_addr[i] != 0) break; + } + for (i=0; idev_addr[i]; + if (!an_exception(lp)) { + dev->irq = last.irq; + } + + status = 0; + } + } else if (!status) { + last.chipset = lp->chipset; + last.bus = lp->bus_num; + last.irq = dev->irq; + for (i=0; idev_addr[i]; + } + + return status; +} + +/* +** List of board exceptions with correctly wired IRQs +*/ +static int +an_exception(struct de4x5_private *lp) +{ + if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) && + (*(u_short *)lp->srom.sub_system_id == 0x95e0)) { + return -1; + } + + return 0; +} + +/* +** SROM Read +*/ +static short +srom_rd(u_long addr, u_char offset) +{ + sendto_srom(SROM_RD | SROM_SR, addr); + + srom_latch(SROM_RD | SROM_SR | DT_CS, addr); + srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr); + srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset); + + return srom_data(SROM_RD | SROM_SR | DT_CS, addr); +} + +static void +srom_latch(u_int command, u_long addr) +{ + sendto_srom(command, addr); + sendto_srom(command | DT_CLK, addr); + sendto_srom(command, addr); +} + +static void +srom_command(u_int command, u_long addr) +{ + srom_latch(command, addr); + srom_latch(command, addr); + srom_latch((command & 0x0000ff00) | DT_CS, addr); +} + +static void +srom_address(u_int command, u_long addr, u_char offset) +{ + int i, a; + + a = offset << 2; + for (i=0; i<6; i++, a <<= 1) { + srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr); + } + udelay(1); + + i = (getfrom_srom(addr) >> 3) & 0x01; +} + +static short +srom_data(u_int command, u_long addr) +{ + int i; + short word = 0; + s32 tmp; + + for (i=0; i<16; i++) { + sendto_srom(command | DT_CLK, addr); + tmp = getfrom_srom(addr); + sendto_srom(command, addr); + + word = (word << 1) | ((tmp >> 3) & 0x01); + } + + sendto_srom(command & 0x0000ff00, addr); + + return word; +} + +/* +static void +srom_busy(u_int command, u_long addr) +{ + sendto_srom((command & 0x0000ff00) | DT_CS, addr); + + while (!((getfrom_srom(addr) >> 3) & 0x01)) { + mdelay(1); + } + + sendto_srom(command & 0x0000ff00, addr); +} +*/ + +static void +sendto_srom(u_int command, u_long addr) +{ + outl(command, addr); + udelay(1); +} + +static int +getfrom_srom(u_long addr) +{ + s32 tmp; + + tmp = inl(addr); + udelay(1); + + return tmp; +} + +static int +srom_infoleaf_info(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + int i, count; + u_char *p; + + /* Find the infoleaf decoder function that matches this chipset */ + for (i=0; ichipset == infoleaf_array[i].chipset) break; + } + if (i == INFOLEAF_SIZE) { + lp->useSROM = false; + printk("%s: Cannot find correct chipset for SROM decoding!\n", + dev->name); + return -ENXIO; + } + + lp->infoleaf_fn = infoleaf_array[i].fn; + + /* Find the information offset that this function should use */ + count = *((u_char *)&lp->srom + 19); + p = (u_char *)&lp->srom + 26; + + if (count > 1) { + for (i=count; i; --i, p+=3) { + if (lp->device == *p) break; + } + if (i == 0) { + lp->useSROM = false; + printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n", + dev->name, lp->device); + return -ENXIO; + } + } + + lp->infoleaf_offset = get_unaligned_le16(p + 1); + + return 0; +} + +/* +** This routine loads any type 1 or 3 MII info into the mii device +** struct and executes any type 5 code to reset PHY devices for this +** controller. +** The info for the MII devices will be valid since the index used +** will follow the discovery process from MII address 1-31 then 0. +*/ +static void +srom_init(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset; + u_char count; + + p+=2; + if (lp->chipset == DC21140) { + lp->cache.gepc = (*p++ | GEP_CTRL); + gep_wr(lp->cache.gepc, dev); + } + + /* Block count */ + count = *p++; + + /* Jump the infoblocks to find types */ + for (;count; --count) { + if (*p < 128) { + p += COMPACT_LEN; + } else if (*(p+1) == 5) { + type5_infoblock(dev, 1, p); + p += ((*p & BLOCK_LEN) + 1); + } else if (*(p+1) == 4) { + p += ((*p & BLOCK_LEN) + 1); + } else if (*(p+1) == 3) { + type3_infoblock(dev, 1, p); + p += ((*p & BLOCK_LEN) + 1); + } else if (*(p+1) == 2) { + p += ((*p & BLOCK_LEN) + 1); + } else if (*(p+1) == 1) { + type1_infoblock(dev, 1, p); + p += ((*p & BLOCK_LEN) + 1); + } else { + p += ((*p & BLOCK_LEN) + 1); + } + } +} + +/* +** A generic routine that writes GEP control, data and reset information +** to the GEP register (21140) or csr15 GEP portion (2114[23]). +*/ +static void +srom_exec(struct net_device *dev, u_char *p) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + u_char count = (p ? *p++ : 0); + u_short *w = (u_short *)p; + + if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return; + + if (lp->chipset != DC21140) RESET_SIA; + + while (count--) { + gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ? + *p++ : get_unaligned_le16(w++)), dev); + mdelay(2); /* 2ms per action */ + } + + if (lp->chipset != DC21140) { + outl(lp->cache.csr14, DE4X5_STRR); + outl(lp->cache.csr13, DE4X5_SICR); + } +} + +/* +** Basically this function is a NOP since it will never be called, +** unless I implement the DC21041 SROM functions. There's no need +** since the existing code will be satisfactory for all boards. +*/ +static int +dc21041_infoleaf(struct net_device *dev) +{ + return DE4X5_AUTOSENSE_MS; +} + +static int +dc21140_infoleaf(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_char count = 0; + u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset; + int next_tick = DE4X5_AUTOSENSE_MS; + + /* Read the connection type */ + p+=2; + + /* GEP control */ + lp->cache.gepc = (*p++ | GEP_CTRL); + + /* Block count */ + count = *p++; + + /* Recursively figure out the info blocks */ + if (*p < 128) { + next_tick = dc_infoblock[COMPACT](dev, count, p); + } else { + next_tick = dc_infoblock[*(p+1)](dev, count, p); + } + + if (lp->tcount == count) { + lp->media = NC; + if (lp->media != lp->c_media) { + de4x5_dbg_media(dev); + lp->c_media = lp->media; + } + lp->media = INIT; + lp->tcount = 0; + lp->tx_enable = false; + } + + return next_tick & ~TIMER_CB; +} + +static int +dc21142_infoleaf(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_char count = 0; + u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset; + int next_tick = DE4X5_AUTOSENSE_MS; + + /* Read the connection type */ + p+=2; + + /* Block count */ + count = *p++; + + /* Recursively figure out the info blocks */ + if (*p < 128) { + next_tick = dc_infoblock[COMPACT](dev, count, p); + } else { + next_tick = dc_infoblock[*(p+1)](dev, count, p); + } + + if (lp->tcount == count) { + lp->media = NC; + if (lp->media != lp->c_media) { + de4x5_dbg_media(dev); + lp->c_media = lp->media; + } + lp->media = INIT; + lp->tcount = 0; + lp->tx_enable = false; + } + + return next_tick & ~TIMER_CB; +} + +static int +dc21143_infoleaf(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_char count = 0; + u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset; + int next_tick = DE4X5_AUTOSENSE_MS; + + /* Read the connection type */ + p+=2; + + /* Block count */ + count = *p++; + + /* Recursively figure out the info blocks */ + if (*p < 128) { + next_tick = dc_infoblock[COMPACT](dev, count, p); + } else { + next_tick = dc_infoblock[*(p+1)](dev, count, p); + } + if (lp->tcount == count) { + lp->media = NC; + if (lp->media != lp->c_media) { + de4x5_dbg_media(dev); + lp->c_media = lp->media; + } + lp->media = INIT; + lp->tcount = 0; + lp->tx_enable = false; + } + + return next_tick & ~TIMER_CB; +} + +/* +** The compact infoblock is only designed for DC21140[A] chips, so +** we'll reuse the dc21140m_autoconf function. Non MII media only. +*/ +static int +compact_infoblock(struct net_device *dev, u_char count, u_char *p) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_char flags, csr6; + + /* Recursively figure out the info blocks */ + if (--count > lp->tcount) { + if (*(p+COMPACT_LEN) < 128) { + return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN); + } else { + return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN); + } + } + + if ((lp->media == INIT) && (lp->timeout < 0)) { + lp->ibn = COMPACT; + lp->active = 0; + gep_wr(lp->cache.gepc, dev); + lp->infoblock_media = (*p++) & COMPACT_MC; + lp->cache.gep = *p++; + csr6 = *p++; + flags = *p++; + + lp->asBitValid = (flags & 0x80) ? 0 : -1; + lp->defMedium = (flags & 0x40) ? -1 : 0; + lp->asBit = 1 << ((csr6 >> 1) & 0x07); + lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit; + lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18); + lp->useMII = false; + + de4x5_switch_mac_port(dev); + } + + return dc21140m_autoconf(dev); +} + +/* +** This block describes non MII media for the DC21140[A] only. +*/ +static int +type0_infoblock(struct net_device *dev, u_char count, u_char *p) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_char flags, csr6, len = (*p & BLOCK_LEN)+1; + + /* Recursively figure out the info blocks */ + if (--count > lp->tcount) { + if (*(p+len) < 128) { + return dc_infoblock[COMPACT](dev, count, p+len); + } else { + return dc_infoblock[*(p+len+1)](dev, count, p+len); + } + } + + if ((lp->media == INIT) && (lp->timeout < 0)) { + lp->ibn = 0; + lp->active = 0; + gep_wr(lp->cache.gepc, dev); + p+=2; + lp->infoblock_media = (*p++) & BLOCK0_MC; + lp->cache.gep = *p++; + csr6 = *p++; + flags = *p++; + + lp->asBitValid = (flags & 0x80) ? 0 : -1; + lp->defMedium = (flags & 0x40) ? -1 : 0; + lp->asBit = 1 << ((csr6 >> 1) & 0x07); + lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit; + lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18); + lp->useMII = false; + + de4x5_switch_mac_port(dev); + } + + return dc21140m_autoconf(dev); +} + +/* These functions are under construction! */ + +static int +type1_infoblock(struct net_device *dev, u_char count, u_char *p) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_char len = (*p & BLOCK_LEN)+1; + + /* Recursively figure out the info blocks */ + if (--count > lp->tcount) { + if (*(p+len) < 128) { + return dc_infoblock[COMPACT](dev, count, p+len); + } else { + return dc_infoblock[*(p+len+1)](dev, count, p+len); + } + } + + p += 2; + if (lp->state == INITIALISED) { + lp->ibn = 1; + lp->active = *p++; + lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1); + lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1); + lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2; + lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2; + lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2; + lp->phy[lp->active].ttm = get_unaligned_le16(p); + return 0; + } else if ((lp->media == INIT) && (lp->timeout < 0)) { + lp->ibn = 1; + lp->active = *p; + lp->infoblock_csr6 = OMR_MII_100; + lp->useMII = true; + lp->infoblock_media = ANS; + + de4x5_switch_mac_port(dev); + } + + return dc21140m_autoconf(dev); +} + +static int +type2_infoblock(struct net_device *dev, u_char count, u_char *p) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_char len = (*p & BLOCK_LEN)+1; + + /* Recursively figure out the info blocks */ + if (--count > lp->tcount) { + if (*(p+len) < 128) { + return dc_infoblock[COMPACT](dev, count, p+len); + } else { + return dc_infoblock[*(p+len+1)](dev, count, p+len); + } + } + + if ((lp->media == INIT) && (lp->timeout < 0)) { + lp->ibn = 2; + lp->active = 0; + p += 2; + lp->infoblock_media = (*p) & MEDIA_CODE; + + if ((*p++) & EXT_FIELD) { + lp->cache.csr13 = get_unaligned_le16(p); p += 2; + lp->cache.csr14 = get_unaligned_le16(p); p += 2; + lp->cache.csr15 = get_unaligned_le16(p); p += 2; + } else { + lp->cache.csr13 = CSR13; + lp->cache.csr14 = CSR14; + lp->cache.csr15 = CSR15; + } + lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2; + lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); + lp->infoblock_csr6 = OMR_SIA; + lp->useMII = false; + + de4x5_switch_mac_port(dev); + } + + return dc2114x_autoconf(dev); +} + +static int +type3_infoblock(struct net_device *dev, u_char count, u_char *p) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_char len = (*p & BLOCK_LEN)+1; + + /* Recursively figure out the info blocks */ + if (--count > lp->tcount) { + if (*(p+len) < 128) { + return dc_infoblock[COMPACT](dev, count, p+len); + } else { + return dc_infoblock[*(p+len+1)](dev, count, p+len); + } + } + + p += 2; + if (lp->state == INITIALISED) { + lp->ibn = 3; + lp->active = *p++; + if (MOTO_SROM_BUG) lp->active = 0; + lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1); + lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1); + lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2; + lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2; + lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2; + lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2; + lp->phy[lp->active].mci = *p; + return 0; + } else if ((lp->media == INIT) && (lp->timeout < 0)) { + lp->ibn = 3; + lp->active = *p; + if (MOTO_SROM_BUG) lp->active = 0; + lp->infoblock_csr6 = OMR_MII_100; + lp->useMII = true; + lp->infoblock_media = ANS; + + de4x5_switch_mac_port(dev); + } + + return dc2114x_autoconf(dev); +} + +static int +type4_infoblock(struct net_device *dev, u_char count, u_char *p) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_char flags, csr6, len = (*p & BLOCK_LEN)+1; + + /* Recursively figure out the info blocks */ + if (--count > lp->tcount) { + if (*(p+len) < 128) { + return dc_infoblock[COMPACT](dev, count, p+len); + } else { + return dc_infoblock[*(p+len+1)](dev, count, p+len); + } + } + + if ((lp->media == INIT) && (lp->timeout < 0)) { + lp->ibn = 4; + lp->active = 0; + p+=2; + lp->infoblock_media = (*p++) & MEDIA_CODE; + lp->cache.csr13 = CSR13; /* Hard coded defaults */ + lp->cache.csr14 = CSR14; + lp->cache.csr15 = CSR15; + lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2; + lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2; + csr6 = *p++; + flags = *p++; + + lp->asBitValid = (flags & 0x80) ? 0 : -1; + lp->defMedium = (flags & 0x40) ? -1 : 0; + lp->asBit = 1 << ((csr6 >> 1) & 0x07); + lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit; + lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18); + lp->useMII = false; + + de4x5_switch_mac_port(dev); + } + + return dc2114x_autoconf(dev); +} + +/* +** This block type provides information for resetting external devices +** (chips) through the General Purpose Register. +*/ +static int +type5_infoblock(struct net_device *dev, u_char count, u_char *p) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_char len = (*p & BLOCK_LEN)+1; + + /* Recursively figure out the info blocks */ + if (--count > lp->tcount) { + if (*(p+len) < 128) { + return dc_infoblock[COMPACT](dev, count, p+len); + } else { + return dc_infoblock[*(p+len+1)](dev, count, p+len); + } + } + + /* Must be initializing to run this code */ + if ((lp->state == INITIALISED) || (lp->media == INIT)) { + p+=2; + lp->rst = p; + srom_exec(dev, lp->rst); + } + + return DE4X5_AUTOSENSE_MS; +} + +/* +** MII Read/Write +*/ + +static int +mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr) +{ + mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */ + mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */ + mii_wdata(MII_STRD, 4, ioaddr); /* SFD and Read operation */ + mii_address(phyaddr, ioaddr); /* PHY address to be accessed */ + mii_address(phyreg, ioaddr); /* PHY Register to read */ + mii_ta(MII_STRD, ioaddr); /* Turn around time - 2 MDC */ + + return mii_rdata(ioaddr); /* Read data */ +} + +static void +mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr) +{ + mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */ + mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */ + mii_wdata(MII_STWR, 4, ioaddr); /* SFD and Write operation */ + mii_address(phyaddr, ioaddr); /* PHY address to be accessed */ + mii_address(phyreg, ioaddr); /* PHY Register to write */ + mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */ + data = mii_swap(data, 16); /* Swap data bit ordering */ + mii_wdata(data, 16, ioaddr); /* Write data */ +} + +static int +mii_rdata(u_long ioaddr) +{ + int i; + s32 tmp = 0; + + for (i=0; i<16; i++) { + tmp <<= 1; + tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr); + } + + return tmp; +} + +static void +mii_wdata(int data, int len, u_long ioaddr) +{ + int i; + + for (i=0; i>= 1; + } +} + +static void +mii_address(u_char addr, u_long ioaddr) +{ + int i; + + addr = mii_swap(addr, 5); + for (i=0; i<5; i++) { + sendto_mii(MII_MWR | MII_WR, addr, ioaddr); + addr >>= 1; + } +} + +static void +mii_ta(u_long rw, u_long ioaddr) +{ + if (rw == MII_STWR) { + sendto_mii(MII_MWR | MII_WR, 1, ioaddr); + sendto_mii(MII_MWR | MII_WR, 0, ioaddr); + } else { + getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */ + } +} + +static int +mii_swap(int data, int len) +{ + int i, tmp = 0; + + for (i=0; i>= 1; + } + + return tmp; +} + +static void +sendto_mii(u32 command, int data, u_long ioaddr) +{ + u32 j; + + j = (data & 1) << 17; + outl(command | j, ioaddr); + udelay(1); + outl(command | MII_MDC | j, ioaddr); + udelay(1); +} + +static int +getfrom_mii(u32 command, u_long ioaddr) +{ + outl(command, ioaddr); + udelay(1); + outl(command | MII_MDC, ioaddr); + udelay(1); + + return (inl(ioaddr) >> 19) & 1; +} + +/* +** Here's 3 ways to calculate the OUI from the ID registers. +*/ +static int +mii_get_oui(u_char phyaddr, u_long ioaddr) +{ +/* + union { + u_short reg; + u_char breg[2]; + } a; + int i, r2, r3, ret=0;*/ + int r2, r3; + + /* Read r2 and r3 */ + r2 = mii_rd(MII_ID0, phyaddr, ioaddr); + r3 = mii_rd(MII_ID1, phyaddr, ioaddr); + /* SEEQ and Cypress way * / + / * Shuffle r2 and r3 * / + a.reg=0; + r3 = ((r3>>10)|(r2<<6))&0x0ff; + r2 = ((r2>>2)&0x3fff); + + / * Bit reverse r3 * / + for (i=0;i<8;i++) { + ret<<=1; + ret |= (r3&1); + r3>>=1; + } + + / * Bit reverse r2 * / + for (i=0;i<16;i++) { + a.reg<<=1; + a.reg |= (r2&1); + r2>>=1; + } + + / * Swap r2 bytes * / + i=a.breg[0]; + a.breg[0]=a.breg[1]; + a.breg[1]=i; + + return (a.reg<<8)|ret; */ /* SEEQ and Cypress way */ +/* return (r2<<6)|(u_int)(r3>>10); */ /* NATIONAL and BROADCOM way */ + return r2; /* (I did it) My way */ +} + +/* +** The SROM spec forces us to search addresses [1-31 0]. Bummer. +*/ +static int +mii_get_phy(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + int i, j, k, n, limit=ARRAY_SIZE(phy_info); + int id; + + lp->active = 0; + lp->useMII = true; + + /* Search the MII address space for possible PHY devices */ + for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) { + lp->phy[lp->active].addr = i; + if (i==0) n++; /* Count cycles */ + while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */ + id = mii_get_oui(i, DE4X5_MII); + if ((id == 0) || (id == 65535)) continue; /* Valid ID? */ + for (j=0; jphy[k].id; k++); + if (k < DE4X5_MAX_PHY) { + memcpy((char *)&lp->phy[k], + (char *)&phy_info[j], sizeof(struct phy_table)); + lp->phy[k].addr = i; + lp->mii_cnt++; + lp->active++; + } else { + goto purgatory; /* Stop the search */ + } + break; + } + if ((j == limit) && (i < DE4X5_MAX_MII)) { + for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++); + lp->phy[k].addr = i; + lp->phy[k].id = id; + lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ + lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */ + lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */ + lp->mii_cnt++; + lp->active++; + printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name); + j = de4x5_debug; + de4x5_debug |= DEBUG_MII; + de4x5_dbg_mii(dev, k); + de4x5_debug = j; + printk("\n"); + } + } + purgatory: + lp->active = 0; + if (lp->phy[0].id) { /* Reset the PHY devices */ + for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) { /*For each PHY*/ + mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII); + while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST); + + de4x5_dbg_mii(dev, k); + } + } + if (!lp->mii_cnt) lp->useMII = false; + + return lp->mii_cnt; +} + +static char * +build_setup_frame(struct net_device *dev, int mode) +{ + struct de4x5_private *lp = netdev_priv(dev); + int i; + char *pa = lp->setup_frame; + + /* Initialise the setup frame */ + if (mode == ALL) { + memset(lp->setup_frame, 0, SETUP_FRAME_LEN); + } + + if (lp->setup_f == HASH_PERF) { + for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; idev_addr[i]; /* Host address */ + if (i & 0x01) pa += 2; + } + *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80; + } else { + for (i=0; idev_addr[i]; + if (i & 0x01) pa += 4; + } + for (i=0; itimer); +} + +static long +de4x5_switch_mac_port(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + s32 omr; + + STOP_DE4X5; + + /* Assert the OMR_PS bit in CSR6 */ + omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | + OMR_FDX)); + omr |= lp->infoblock_csr6; + if (omr & OMR_PS) omr |= OMR_HBD; + outl(omr, DE4X5_OMR); + + /* Soft Reset */ + RESET_DE4X5; + + /* Restore the GEP - especially for COMPACT and Type 0 Infoblocks */ + if (lp->chipset == DC21140) { + gep_wr(lp->cache.gepc, dev); + gep_wr(lp->cache.gep, dev); + } else if ((lp->chipset & ~0x0ff) == DC2114x) { + reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15); + } + + /* Restore CSR6 */ + outl(omr, DE4X5_OMR); + + /* Reset CSR8 */ + inl(DE4X5_MFC); + + return omr; +} + +static void +gep_wr(s32 data, struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + + if (lp->chipset == DC21140) { + outl(data, DE4X5_GEP); + } else if ((lp->chipset & ~0x00ff) == DC2114x) { + outl((data<<16) | lp->cache.csr15, DE4X5_SIGR); + } +} + +static int +gep_rd(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + + if (lp->chipset == DC21140) { + return inl(DE4X5_GEP); + } else if ((lp->chipset & ~0x00ff) == DC2114x) { + return inl(DE4X5_SIGR) & 0x000fffff; + } + + return 0; +} + +static void +yawn(struct net_device *dev, int state) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + + if ((lp->chipset == DC21040) || (lp->chipset == DC21140)) return; + + if(lp->bus == EISA) { + switch(state) { + case WAKEUP: + outb(WAKEUP, PCI_CFPM); + mdelay(10); + break; + + case SNOOZE: + outb(SNOOZE, PCI_CFPM); + break; + + case SLEEP: + outl(0, DE4X5_SICR); + outb(SLEEP, PCI_CFPM); + break; + } + } else { + struct pci_dev *pdev = to_pci_dev (lp->gendev); + switch(state) { + case WAKEUP: + pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP); + mdelay(10); + break; + + case SNOOZE: + pci_write_config_byte(pdev, PCI_CFDA_PSM, SNOOZE); + break; + + case SLEEP: + outl(0, DE4X5_SICR); + pci_write_config_byte(pdev, PCI_CFDA_PSM, SLEEP); + break; + } + } +} + +static void +de4x5_parse_params(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + char *p, *q, t; + + lp->params.fdx = false; + lp->params.autosense = AUTO; + + if (args == NULL) return; + + if ((p = strstr(args, dev->name))) { + if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p); + t = *q; + *q = '\0'; + + if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true; + + if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) { + if (strstr(p, "TP")) { + lp->params.autosense = TP; + } else if (strstr(p, "TP_NW")) { + lp->params.autosense = TP_NW; + } else if (strstr(p, "BNC")) { + lp->params.autosense = BNC; + } else if (strstr(p, "AUI")) { + lp->params.autosense = AUI; + } else if (strstr(p, "BNC_AUI")) { + lp->params.autosense = BNC; + } else if (strstr(p, "10Mb")) { + lp->params.autosense = _10Mb; + } else if (strstr(p, "100Mb")) { + lp->params.autosense = _100Mb; + } else if (strstr(p, "AUTO")) { + lp->params.autosense = AUTO; + } + } + *q = t; + } +} + +static void +de4x5_dbg_open(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + int i; + + if (de4x5_debug & DEBUG_OPEN) { + printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq); + printk("\tphysical address: %pM\n", dev->dev_addr); + printk("Descriptor head addresses:\n"); + printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring); + printk("Descriptor addresses:\nRX: "); + for (i=0;irxRingSize-1;i++){ + if (i < 3) { + printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status); + } + } + printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status); + printk("TX: "); + for (i=0;itxRingSize-1;i++){ + if (i < 3) { + printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status); + } + } + printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status); + printk("Descriptor buffers:\nRX: "); + for (i=0;irxRingSize-1;i++){ + if (i < 3) { + printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf)); + } + } + printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf)); + printk("TX: "); + for (i=0;itxRingSize-1;i++){ + if (i < 3) { + printk("0x%8.8x ", le32_to_cpu(lp->tx_ring[i].buf)); + } + } + printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf)); + printk("Ring size:\nRX: %d\nTX: %d\n", + (short)lp->rxRingSize, + (short)lp->txRingSize); + } +} + +static void +de4x5_dbg_mii(struct net_device *dev, int k) +{ + struct de4x5_private *lp = netdev_priv(dev); + u_long iobase = dev->base_addr; + + if (de4x5_debug & DEBUG_MII) { + printk("\nMII device address: %d\n", lp->phy[k].addr); + printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII)); + printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII)); + printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII)); + printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII)); + if (lp->phy[k].id != BROADCOM_T4) { + printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII)); + printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII)); + } + printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII)); + if (lp->phy[k].id != BROADCOM_T4) { + printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII)); + printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII)); + } else { + printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII)); + } + } +} + +static void +de4x5_dbg_media(struct net_device *dev) +{ + struct de4x5_private *lp = netdev_priv(dev); + + if (lp->media != lp->c_media) { + if (de4x5_debug & DEBUG_MEDIA) { + printk("%s: media is %s%s\n", dev->name, + (lp->media == NC ? "unconnected, link down or incompatible connection" : + (lp->media == TP ? "TP" : + (lp->media == ANS ? "TP/Nway" : + (lp->media == BNC ? "BNC" : + (lp->media == AUI ? "AUI" : + (lp->media == BNC_AUI ? "BNC/AUI" : + (lp->media == EXT_SIA ? "EXT SIA" : + (lp->media == _100Mb ? "100Mb/s" : + (lp->media == _10Mb ? "10Mb/s" : + "???" + ))))))))), (lp->fdx?" full duplex.":".")); + } + lp->c_media = lp->media; + } +} + +static void +de4x5_dbg_srom(struct de4x5_srom *p) +{ + int i; + + if (de4x5_debug & DEBUG_SROM) { + printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id)); + printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id)); + printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc)); + printk("SROM version: %02x\n", (u_char)(p->version)); + printk("# controllers: %02x\n", (u_char)(p->num_controllers)); + + printk("Hardware Address: %pM\n", p->ieee_addr); + printk("CRC checksum: %04x\n", (u_short)(p->chksum)); + for (i=0; i<64; i++) { + printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i)); + } + } +} + +static void +de4x5_dbg_rx(struct sk_buff *skb, int len) +{ + int i, j; + + if (de4x5_debug & DEBUG_RX) { + printk("R: %pM <- %pM len/SAP:%02x%02x [%d]\n", + skb->data, &skb->data[6], + (u_char)skb->data[12], + (u_char)skb->data[13], + len); + for (j=0; len>0;j+=16, len-=16) { + printk(" %03x: ",j); + for (i=0; i<16 && idata[i+j]); + } + printk("\n"); + } + } +} + +/* +** Perform IOCTL call functions here. Some are privileged operations and the +** effective uid is checked in those cases. In the normal course of events +** this function is only used for my testing. +*/ +static int +de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct de4x5_private *lp = netdev_priv(dev); + struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru; + u_long iobase = dev->base_addr; + int i, j, status = 0; + s32 omr; + union { + u8 addr[144]; + u16 sval[72]; + u32 lval[36]; + } tmp; + u_long flags = 0; + + switch(ioc->cmd) { + case DE4X5_GET_HWADDR: /* Get the hardware address */ + ioc->len = ETH_ALEN; + for (i=0; idev_addr[i]; + } + if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; + break; + + case DE4X5_SET_HWADDR: /* Set the hardware address */ + if (!capable(CAP_NET_ADMIN)) return -EPERM; + if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) return -EFAULT; + if (netif_queue_stopped(dev)) + return -EBUSY; + netif_stop_queue(dev); + for (i=0; idev_addr[i] = tmp.addr[i]; + } + build_setup_frame(dev, PHYS_ADDR_ONLY); + /* Set up the descriptor and give ownership to the card */ + load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET | + SETUP_FRAME_LEN, (struct sk_buff *)1); + lp->tx_new = (lp->tx_new + 1) % lp->txRingSize; + outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */ + netif_wake_queue(dev); /* Unlock the TX ring */ + break; + + case DE4X5_SAY_BOO: /* Say "Boo!" to the kernel log file */ + if (!capable(CAP_NET_ADMIN)) return -EPERM; + printk("%s: Boo!\n", dev->name); + break; + + case DE4X5_MCA_EN: /* Enable pass all multicast addressing */ + if (!capable(CAP_NET_ADMIN)) return -EPERM; + omr = inl(DE4X5_OMR); + omr |= OMR_PM; + outl(omr, DE4X5_OMR); + break; + + case DE4X5_GET_STATS: /* Get the driver statistics */ + { + struct pkt_stats statbuf; + ioc->len = sizeof(statbuf); + spin_lock_irqsave(&lp->lock, flags); + memcpy(&statbuf, &lp->pktStats, ioc->len); + spin_unlock_irqrestore(&lp->lock, flags); + if (copy_to_user(ioc->data, &statbuf, ioc->len)) + return -EFAULT; + break; + } + case DE4X5_CLR_STATS: /* Zero out the driver statistics */ + if (!capable(CAP_NET_ADMIN)) return -EPERM; + spin_lock_irqsave(&lp->lock, flags); + memset(&lp->pktStats, 0, sizeof(lp->pktStats)); + spin_unlock_irqrestore(&lp->lock, flags); + break; + + case DE4X5_GET_OMR: /* Get the OMR Register contents */ + tmp.addr[0] = inl(DE4X5_OMR); + if (copy_to_user(ioc->data, tmp.addr, 1)) return -EFAULT; + break; + + case DE4X5_SET_OMR: /* Set the OMR Register contents */ + if (!capable(CAP_NET_ADMIN)) return -EPERM; + if (copy_from_user(tmp.addr, ioc->data, 1)) return -EFAULT; + outl(tmp.addr[0], DE4X5_OMR); + break; + + case DE4X5_GET_REG: /* Get the DE4X5 Registers */ + j = 0; + tmp.lval[0] = inl(DE4X5_STS); j+=4; + tmp.lval[1] = inl(DE4X5_BMR); j+=4; + tmp.lval[2] = inl(DE4X5_IMR); j+=4; + tmp.lval[3] = inl(DE4X5_OMR); j+=4; + tmp.lval[4] = inl(DE4X5_SISR); j+=4; + tmp.lval[5] = inl(DE4X5_SICR); j+=4; + tmp.lval[6] = inl(DE4X5_STRR); j+=4; + tmp.lval[7] = inl(DE4X5_SIGR); j+=4; + ioc->len = j; + if (copy_to_user(ioc->data, tmp.lval, ioc->len)) + return -EFAULT; + break; + +#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */ +/* + case DE4X5_DUMP: + j = 0; + tmp.addr[j++] = dev->irq; + for (i=0; idev_addr[i]; + } + tmp.addr[j++] = lp->rxRingSize; + tmp.lval[j>>2] = (long)lp->rx_ring; j+=4; + tmp.lval[j>>2] = (long)lp->tx_ring; j+=4; + + for (i=0;irxRingSize-1;i++){ + if (i < 3) { + tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4; + } + } + tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4; + for (i=0;itxRingSize-1;i++){ + if (i < 3) { + tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4; + } + } + tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4; + + for (i=0;irxRingSize-1;i++){ + if (i < 3) { + tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4; + } + } + tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4; + for (i=0;itxRingSize-1;i++){ + if (i < 3) { + tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4; + } + } + tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4; + + for (i=0;irxRingSize;i++){ + tmp.lval[j>>2] = le32_to_cpu(lp->rx_ring[i].status); j+=4; + } + for (i=0;itxRingSize;i++){ + tmp.lval[j>>2] = le32_to_cpu(lp->tx_ring[i].status); j+=4; + } + + tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4; + tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4; + tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4; + tmp.lval[j>>2] = inl(DE4X5_RRBA); j+=4; + tmp.lval[j>>2] = inl(DE4X5_TRBA); j+=4; + tmp.lval[j>>2] = inl(DE4X5_STS); j+=4; + tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4; + tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4; + tmp.lval[j>>2] = lp->chipset; j+=4; + if (lp->chipset == DC21140) { + tmp.lval[j>>2] = gep_rd(dev); j+=4; + } else { + tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4; + tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4; + tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4; + tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4; + } + tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4; + if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) { + tmp.lval[j>>2] = lp->active; j+=4; + tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4; + tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4; + tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4; + tmp.lval[j>>2]=mii_rd(MII_ID1,lp->phy[lp->active].addr,DE4X5_MII); j+=4; + if (lp->phy[lp->active].id != BROADCOM_T4) { + tmp.lval[j>>2]=mii_rd(MII_ANA,lp->phy[lp->active].addr,DE4X5_MII); j+=4; + tmp.lval[j>>2]=mii_rd(MII_ANLPA,lp->phy[lp->active].addr,DE4X5_MII); j+=4; + } + tmp.lval[j>>2]=mii_rd(0x10,lp->phy[lp->active].addr,DE4X5_MII); j+=4; + if (lp->phy[lp->active].id != BROADCOM_T4) { + tmp.lval[j>>2]=mii_rd(0x11,lp->phy[lp->active].addr,DE4X5_MII); j+=4; + tmp.lval[j>>2]=mii_rd(0x12,lp->phy[lp->active].addr,DE4X5_MII); j+=4; + } else { + tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4; + } + } + + tmp.addr[j++] = lp->txRingSize; + tmp.addr[j++] = netif_queue_stopped(dev); + + ioc->len = j; + if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; + break; + +*/ + default: + return -EOPNOTSUPP; + } + + return status; +} + +static int __init de4x5_module_init (void) +{ + int err = 0; + +#ifdef CONFIG_PCI + err = pci_register_driver(&de4x5_pci_driver); +#endif +#ifdef CONFIG_EISA + err |= eisa_driver_register (&de4x5_eisa_driver); +#endif + + return err; +} + +static void __exit de4x5_module_exit (void) +{ +#ifdef CONFIG_PCI + pci_unregister_driver (&de4x5_pci_driver); +#endif +#ifdef CONFIG_EISA + eisa_driver_unregister (&de4x5_eisa_driver); +#endif +} + +module_init (de4x5_module_init); +module_exit (de4x5_module_exit); diff --git a/drivers/net/ethernet/dec/tulip/de4x5.h b/drivers/net/ethernet/dec/tulip/de4x5.h new file mode 100644 index 000000000..ec756eba3 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/de4x5.h @@ -0,0 +1,1017 @@ +/* + Copyright 1994 Digital Equipment Corporation. + + This software may be used and distributed according to the terms of the + GNU General Public License, incorporated herein by reference. + + The author may be reached as davies@wanton.lkg.dec.com or Digital + Equipment Corporation, 550 King Street, Littleton MA 01460. + + ========================================================================= +*/ + +/* +** DC21040 CSR<1..15> Register Address Map +*/ +#define DE4X5_BMR iobase+(0x000 << lp->bus) /* Bus Mode Register */ +#define DE4X5_TPD iobase+(0x008 << lp->bus) /* Transmit Poll Demand Reg */ +#define DE4X5_RPD iobase+(0x010 << lp->bus) /* Receive Poll Demand Reg */ +#define DE4X5_RRBA iobase+(0x018 << lp->bus) /* RX Ring Base Address Reg */ +#define DE4X5_TRBA iobase+(0x020 << lp->bus) /* TX Ring Base Address Reg */ +#define DE4X5_STS iobase+(0x028 << lp->bus) /* Status Register */ +#define DE4X5_OMR iobase+(0x030 << lp->bus) /* Operation Mode Register */ +#define DE4X5_IMR iobase+(0x038 << lp->bus) /* Interrupt Mask Register */ +#define DE4X5_MFC iobase+(0x040 << lp->bus) /* Missed Frame Counter */ +#define DE4X5_APROM iobase+(0x048 << lp->bus) /* Ethernet Address PROM */ +#define DE4X5_BROM iobase+(0x048 << lp->bus) /* Boot ROM Register */ +#define DE4X5_SROM iobase+(0x048 << lp->bus) /* Serial ROM Register */ +#define DE4X5_MII iobase+(0x048 << lp->bus) /* MII Interface Register */ +#define DE4X5_DDR iobase+(0x050 << lp->bus) /* Data Diagnostic Register */ +#define DE4X5_FDR iobase+(0x058 << lp->bus) /* Full Duplex Register */ +#define DE4X5_GPT iobase+(0x058 << lp->bus) /* General Purpose Timer Reg.*/ +#define DE4X5_GEP iobase+(0x060 << lp->bus) /* General Purpose Register */ +#define DE4X5_SISR iobase+(0x060 << lp->bus) /* SIA Status Register */ +#define DE4X5_SICR iobase+(0x068 << lp->bus) /* SIA Connectivity Register */ +#define DE4X5_STRR iobase+(0x070 << lp->bus) /* SIA TX/RX Register */ +#define DE4X5_SIGR iobase+(0x078 << lp->bus) /* SIA General Register */ + +/* +** EISA Register Address Map +*/ +#define EISA_ID iobase+0x0c80 /* EISA ID Registers */ +#define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */ +#define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */ +#define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */ +#define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */ +#define EISA_CR iobase+0x0c84 /* EISA Control Register */ +#define EISA_REG0 iobase+0x0c88 /* EISA Configuration Register 0 */ +#define EISA_REG1 iobase+0x0c89 /* EISA Configuration Register 1 */ +#define EISA_REG2 iobase+0x0c8a /* EISA Configuration Register 2 */ +#define EISA_REG3 iobase+0x0c8f /* EISA Configuration Register 3 */ +#define EISA_APROM iobase+0x0c90 /* Ethernet Address PROM */ + +/* +** PCI/EISA Configuration Registers Address Map +*/ +#define PCI_CFID iobase+0x0008 /* PCI Configuration ID Register */ +#define PCI_CFCS iobase+0x000c /* PCI Command/Status Register */ +#define PCI_CFRV iobase+0x0018 /* PCI Revision Register */ +#define PCI_CFLT iobase+0x001c /* PCI Latency Timer Register */ +#define PCI_CBIO iobase+0x0028 /* PCI Base I/O Register */ +#define PCI_CBMA iobase+0x002c /* PCI Base Memory Address Register */ +#define PCI_CBER iobase+0x0030 /* PCI Expansion ROM Base Address Reg. */ +#define PCI_CFIT iobase+0x003c /* PCI Configuration Interrupt Register */ +#define PCI_CFDA iobase+0x0040 /* PCI Driver Area Register */ +#define PCI_CFDD iobase+0x0041 /* PCI Driver Dependent Area Register */ +#define PCI_CFPM iobase+0x0043 /* PCI Power Management Area Register */ + +/* +** EISA Configuration Register 0 bit definitions +*/ +#define ER0_BSW 0x80 /* EISA Bus Slave Width, 1: 32 bits */ +#define ER0_BMW 0x40 /* EISA Bus Master Width, 1: 32 bits */ +#define ER0_EPT 0x20 /* EISA PREEMPT Time, 0: 23 BCLKs */ +#define ER0_ISTS 0x10 /* Interrupt Status (X) */ +#define ER0_LI 0x08 /* Latch Interrupts */ +#define ER0_INTL 0x06 /* INTerrupt Level */ +#define ER0_INTT 0x01 /* INTerrupt Type, 0: Level, 1: Edge */ + +/* +** EISA Configuration Register 1 bit definitions +*/ +#define ER1_IAM 0xe0 /* ISA Address Mode */ +#define ER1_IAE 0x10 /* ISA Addressing Enable */ +#define ER1_UPIN 0x0f /* User Pins */ + +/* +** EISA Configuration Register 2 bit definitions +*/ +#define ER2_BRS 0xc0 /* Boot ROM Size */ +#define ER2_BRA 0x3c /* Boot ROM Address <16:13> */ + +/* +** EISA Configuration Register 3 bit definitions +*/ +#define ER3_BWE 0x40 /* Burst Write Enable */ +#define ER3_BRE 0x04 /* Burst Read Enable */ +#define ER3_LSR 0x02 /* Local Software Reset */ + +/* +** PCI Configuration ID Register (PCI_CFID). The Device IDs are left +** shifted 8 bits to allow detection of DC21142 and DC21143 variants with +** the configuration revision register step number. +*/ +#define CFID_DID 0xff00 /* Device ID */ +#define CFID_VID 0x00ff /* Vendor ID */ +#define DC21040_DID 0x0200 /* Unique Device ID # */ +#define DC21040_VID 0x1011 /* DC21040 Manufacturer */ +#define DC21041_DID 0x1400 /* Unique Device ID # */ +#define DC21041_VID 0x1011 /* DC21041 Manufacturer */ +#define DC21140_DID 0x0900 /* Unique Device ID # */ +#define DC21140_VID 0x1011 /* DC21140 Manufacturer */ +#define DC2114x_DID 0x1900 /* Unique Device ID # */ +#define DC2114x_VID 0x1011 /* DC2114[23] Manufacturer */ + +/* +** Chipset defines +*/ +#define DC21040 DC21040_DID +#define DC21041 DC21041_DID +#define DC21140 DC21140_DID +#define DC2114x DC2114x_DID +#define DC21142 (DC2114x_DID | 0x0010) +#define DC21143 (DC2114x_DID | 0x0030) +#define DC2114x_BRK 0x0020 /* CFRV break between DC21142 & DC21143 */ + +#define is_DC21040 ((vendor == DC21040_VID) && (device == DC21040_DID)) +#define is_DC21041 ((vendor == DC21041_VID) && (device == DC21041_DID)) +#define is_DC21140 ((vendor == DC21140_VID) && (device == DC21140_DID)) +#define is_DC2114x ((vendor == DC2114x_VID) && (device == DC2114x_DID)) +#define is_DC21142 ((vendor == DC2114x_VID) && (device == DC21142)) +#define is_DC21143 ((vendor == DC2114x_VID) && (device == DC21143)) + +/* +** PCI Configuration Command/Status Register (PCI_CFCS) +*/ +#define CFCS_DPE 0x80000000 /* Detected Parity Error (S) */ +#define CFCS_SSE 0x40000000 /* Signal System Error (S) */ +#define CFCS_RMA 0x20000000 /* Receive Master Abort (S) */ +#define CFCS_RTA 0x10000000 /* Receive Target Abort (S) */ +#define CFCS_DST 0x06000000 /* DEVSEL Timing (S) */ +#define CFCS_DPR 0x01000000 /* Data Parity Report (S) */ +#define CFCS_FBB 0x00800000 /* Fast Back-To-Back (S) */ +#define CFCS_SEE 0x00000100 /* System Error Enable (C) */ +#define CFCS_PER 0x00000040 /* Parity Error Response (C) */ +#define CFCS_MO 0x00000004 /* Master Operation (C) */ +#define CFCS_MSA 0x00000002 /* Memory Space Access (C) */ +#define CFCS_IOSA 0x00000001 /* I/O Space Access (C) */ + +/* +** PCI Configuration Revision Register (PCI_CFRV) +*/ +#define CFRV_BC 0xff000000 /* Base Class */ +#define CFRV_SC 0x00ff0000 /* Subclass */ +#define CFRV_RN 0x000000f0 /* Revision Number */ +#define CFRV_SN 0x0000000f /* Step Number */ +#define BASE_CLASS 0x02000000 /* Indicates Network Controller */ +#define SUB_CLASS 0x00000000 /* Indicates Ethernet Controller */ +#define STEP_NUMBER 0x00000020 /* Increments for future chips */ +#define REV_NUMBER 0x00000003 /* 0x00, 0x01, 0x02, 0x03: Rev in Step */ +#define CFRV_MASK 0xffff0000 /* Register mask */ + +/* +** PCI Configuration Latency Timer Register (PCI_CFLT) +*/ +#define CFLT_BC 0x0000ff00 /* Latency Timer bits */ + +/* +** PCI Configuration Base I/O Address Register (PCI_CBIO) +*/ +#define CBIO_MASK -128 /* Base I/O Address Mask */ +#define CBIO_IOSI 0x00000001 /* I/O Space Indicator (RO, value is 1) */ + +/* +** PCI Configuration Card Information Structure Register (PCI_CCIS) +*/ +#define CCIS_ROMI 0xf0000000 /* ROM Image */ +#define CCIS_ASO 0x0ffffff8 /* Address Space Offset */ +#define CCIS_ASI 0x00000007 /* Address Space Indicator */ + +/* +** PCI Configuration Subsystem ID Register (PCI_SSID) +*/ +#define SSID_SSID 0xffff0000 /* Subsystem ID */ +#define SSID_SVID 0x0000ffff /* Subsystem Vendor ID */ + +/* +** PCI Configuration Expansion ROM Base Address Register (PCI_CBER) +*/ +#define CBER_MASK 0xfffffc00 /* Expansion ROM Base Address Mask */ +#define CBER_ROME 0x00000001 /* ROM Enable */ + +/* +** PCI Configuration Interrupt Register (PCI_CFIT) +*/ +#define CFIT_MXLT 0xff000000 /* MAX_LAT Value (0.25us periods) */ +#define CFIT_MNGT 0x00ff0000 /* MIN_GNT Value (0.25us periods) */ +#define CFIT_IRQP 0x0000ff00 /* Interrupt Pin */ +#define CFIT_IRQL 0x000000ff /* Interrupt Line */ + +/* +** PCI Configuration Power Management Area Register (PCI_CFPM) +*/ +#define SLEEP 0x80 /* Power Saving Sleep Mode */ +#define SNOOZE 0x40 /* Power Saving Snooze Mode */ +#define WAKEUP 0x00 /* Power Saving Wakeup */ + +#define PCI_CFDA_DSU 0x41 /* 8 bit Configuration Space Address */ +#define PCI_CFDA_PSM 0x43 /* 8 bit Configuration Space Address */ + +/* +** DC21040 Bus Mode Register (DE4X5_BMR) +*/ +#define BMR_RML 0x00200000 /* [Memory] Read Multiple */ +#define BMR_DBO 0x00100000 /* Descriptor Byte Ordering (Endian) */ +#define BMR_TAP 0x000e0000 /* Transmit Automatic Polling */ +#define BMR_DAS 0x00010000 /* Diagnostic Address Space */ +#define BMR_CAL 0x0000c000 /* Cache Alignment */ +#define BMR_PBL 0x00003f00 /* Programmable Burst Length */ +#define BMR_BLE 0x00000080 /* Big/Little Endian */ +#define BMR_DSL 0x0000007c /* Descriptor Skip Length */ +#define BMR_BAR 0x00000002 /* Bus ARbitration */ +#define BMR_SWR 0x00000001 /* Software Reset */ + + /* Timings here are for 10BASE-T/AUI only*/ +#define TAP_NOPOLL 0x00000000 /* No automatic polling */ +#define TAP_200US 0x00020000 /* TX automatic polling every 200us */ +#define TAP_800US 0x00040000 /* TX automatic polling every 800us */ +#define TAP_1_6MS 0x00060000 /* TX automatic polling every 1.6ms */ +#define TAP_12_8US 0x00080000 /* TX automatic polling every 12.8us */ +#define TAP_25_6US 0x000a0000 /* TX automatic polling every 25.6us */ +#define TAP_51_2US 0x000c0000 /* TX automatic polling every 51.2us */ +#define TAP_102_4US 0x000e0000 /* TX automatic polling every 102.4us */ + +#define CAL_NOUSE 0x00000000 /* Not used */ +#define CAL_8LONG 0x00004000 /* 8-longword alignment */ +#define CAL_16LONG 0x00008000 /* 16-longword alignment */ +#define CAL_32LONG 0x0000c000 /* 32-longword alignment */ + +#define PBL_0 0x00000000 /* DMA burst length = amount in RX FIFO */ +#define PBL_1 0x00000100 /* 1 longword DMA burst length */ +#define PBL_2 0x00000200 /* 2 longwords DMA burst length */ +#define PBL_4 0x00000400 /* 4 longwords DMA burst length */ +#define PBL_8 0x00000800 /* 8 longwords DMA burst length */ +#define PBL_16 0x00001000 /* 16 longwords DMA burst length */ +#define PBL_32 0x00002000 /* 32 longwords DMA burst length */ + +#define DSL_0 0x00000000 /* 0 longword / descriptor */ +#define DSL_1 0x00000004 /* 1 longword / descriptor */ +#define DSL_2 0x00000008 /* 2 longwords / descriptor */ +#define DSL_4 0x00000010 /* 4 longwords / descriptor */ +#define DSL_8 0x00000020 /* 8 longwords / descriptor */ +#define DSL_16 0x00000040 /* 16 longwords / descriptor */ +#define DSL_32 0x00000080 /* 32 longwords / descriptor */ + +/* +** DC21040 Transmit Poll Demand Register (DE4X5_TPD) +*/ +#define TPD 0x00000001 /* Transmit Poll Demand */ + +/* +** DC21040 Receive Poll Demand Register (DE4X5_RPD) +*/ +#define RPD 0x00000001 /* Receive Poll Demand */ + +/* +** DC21040 Receive Ring Base Address Register (DE4X5_RRBA) +*/ +#define RRBA 0xfffffffc /* RX Descriptor List Start Address */ + +/* +** DC21040 Transmit Ring Base Address Register (DE4X5_TRBA) +*/ +#define TRBA 0xfffffffc /* TX Descriptor List Start Address */ + +/* +** Status Register (DE4X5_STS) +*/ +#define STS_GPI 0x04000000 /* General Purpose Port Interrupt */ +#define STS_BE 0x03800000 /* Bus Error Bits */ +#define STS_TS 0x00700000 /* Transmit Process State */ +#define STS_RS 0x000e0000 /* Receive Process State */ +#define STS_NIS 0x00010000 /* Normal Interrupt Summary */ +#define STS_AIS 0x00008000 /* Abnormal Interrupt Summary */ +#define STS_ER 0x00004000 /* Early Receive */ +#define STS_FBE 0x00002000 /* Fatal Bus Error */ +#define STS_SE 0x00002000 /* System Error */ +#define STS_LNF 0x00001000 /* Link Fail */ +#define STS_FD 0x00000800 /* Full-Duplex Short Frame Received */ +#define STS_TM 0x00000800 /* Timer Expired (DC21041) */ +#define STS_ETI 0x00000400 /* Early Transmit Interrupt */ +#define STS_AT 0x00000400 /* AUI/TP Pin */ +#define STS_RWT 0x00000200 /* Receive Watchdog Time-Out */ +#define STS_RPS 0x00000100 /* Receive Process Stopped */ +#define STS_RU 0x00000080 /* Receive Buffer Unavailable */ +#define STS_RI 0x00000040 /* Receive Interrupt */ +#define STS_UNF 0x00000020 /* Transmit Underflow */ +#define STS_LNP 0x00000010 /* Link Pass */ +#define STS_ANC 0x00000010 /* Autonegotiation Complete */ +#define STS_TJT 0x00000008 /* Transmit Jabber Time-Out */ +#define STS_TU 0x00000004 /* Transmit Buffer Unavailable */ +#define STS_TPS 0x00000002 /* Transmit Process Stopped */ +#define STS_TI 0x00000001 /* Transmit Interrupt */ + +#define EB_PAR 0x00000000 /* Parity Error */ +#define EB_MA 0x00800000 /* Master Abort */ +#define EB_TA 0x01000000 /* Target Abort */ +#define EB_RES0 0x01800000 /* Reserved */ +#define EB_RES1 0x02000000 /* Reserved */ + +#define TS_STOP 0x00000000 /* Stopped */ +#define TS_FTD 0x00100000 /* Fetch Transmit Descriptor */ +#define TS_WEOT 0x00200000 /* Wait for End Of Transmission */ +#define TS_QDAT 0x00300000 /* Queue skb data into TX FIFO */ +#define TS_RES 0x00400000 /* Reserved */ +#define TS_SPKT 0x00500000 /* Setup Packet */ +#define TS_SUSP 0x00600000 /* Suspended */ +#define TS_CLTD 0x00700000 /* Close Transmit Descriptor */ + +#define RS_STOP 0x00000000 /* Stopped */ +#define RS_FRD 0x00020000 /* Fetch Receive Descriptor */ +#define RS_CEOR 0x00040000 /* Check for End of Receive Packet */ +#define RS_WFRP 0x00060000 /* Wait for Receive Packet */ +#define RS_SUSP 0x00080000 /* Suspended */ +#define RS_CLRD 0x000a0000 /* Close Receive Descriptor */ +#define RS_FLUSH 0x000c0000 /* Flush RX FIFO */ +#define RS_QRFS 0x000e0000 /* Queue RX FIFO into RX Skb */ + +#define INT_CANCEL 0x0001ffff /* For zeroing all interrupt sources */ + +/* +** Operation Mode Register (DE4X5_OMR) +*/ +#define OMR_SC 0x80000000 /* Special Capture Effect Enable */ +#define OMR_RA 0x40000000 /* Receive All */ +#define OMR_SDP 0x02000000 /* SD Polarity - MUST BE ASSERTED */ +#define OMR_SCR 0x01000000 /* Scrambler Mode */ +#define OMR_PCS 0x00800000 /* PCS Function */ +#define OMR_TTM 0x00400000 /* Transmit Threshold Mode */ +#define OMR_SF 0x00200000 /* Store and Forward */ +#define OMR_HBD 0x00080000 /* HeartBeat Disable */ +#define OMR_PS 0x00040000 /* Port Select */ +#define OMR_CA 0x00020000 /* Capture Effect Enable */ +#define OMR_BP 0x00010000 /* Back Pressure */ +#define OMR_TR 0x0000c000 /* Threshold Control Bits */ +#define OMR_ST 0x00002000 /* Start/Stop Transmission Command */ +#define OMR_FC 0x00001000 /* Force Collision Mode */ +#define OMR_OM 0x00000c00 /* Operating Mode */ +#define OMR_FDX 0x00000200 /* Full Duplex Mode */ +#define OMR_FKD 0x00000100 /* Flaky Oscillator Disable */ +#define OMR_PM 0x00000080 /* Pass All Multicast */ +#define OMR_PR 0x00000040 /* Promiscuous Mode */ +#define OMR_SB 0x00000020 /* Start/Stop Backoff Counter */ +#define OMR_IF 0x00000010 /* Inverse Filtering */ +#define OMR_PB 0x00000008 /* Pass Bad Frames */ +#define OMR_HO 0x00000004 /* Hash Only Filtering Mode */ +#define OMR_SR 0x00000002 /* Start/Stop Receive */ +#define OMR_HP 0x00000001 /* Hash/Perfect Receive Filtering Mode */ + +#define TR_72 0x00000000 /* Threshold set to 72 (128) bytes */ +#define TR_96 0x00004000 /* Threshold set to 96 (256) bytes */ +#define TR_128 0x00008000 /* Threshold set to 128 (512) bytes */ +#define TR_160 0x0000c000 /* Threshold set to 160 (1024) bytes */ + +#define OMR_DEF (OMR_SDP) +#define OMR_SIA (OMR_SDP | OMR_TTM) +#define OMR_SYM (OMR_SDP | OMR_SCR | OMR_PCS | OMR_HBD | OMR_PS) +#define OMR_MII_10 (OMR_SDP | OMR_TTM | OMR_PS) +#define OMR_MII_100 (OMR_SDP | OMR_HBD | OMR_PS) + +/* +** DC21040 Interrupt Mask Register (DE4X5_IMR) +*/ +#define IMR_GPM 0x04000000 /* General Purpose Port Mask */ +#define IMR_NIM 0x00010000 /* Normal Interrupt Summary Mask */ +#define IMR_AIM 0x00008000 /* Abnormal Interrupt Summary Mask */ +#define IMR_ERM 0x00004000 /* Early Receive Mask */ +#define IMR_FBM 0x00002000 /* Fatal Bus Error Mask */ +#define IMR_SEM 0x00002000 /* System Error Mask */ +#define IMR_LFM 0x00001000 /* Link Fail Mask */ +#define IMR_FDM 0x00000800 /* Full-Duplex (Short Frame) Mask */ +#define IMR_TMM 0x00000800 /* Timer Expired Mask (DC21041) */ +#define IMR_ETM 0x00000400 /* Early Transmit Interrupt Mask */ +#define IMR_ATM 0x00000400 /* AUI/TP Switch Mask */ +#define IMR_RWM 0x00000200 /* Receive Watchdog Time-Out Mask */ +#define IMR_RSM 0x00000100 /* Receive Stopped Mask */ +#define IMR_RUM 0x00000080 /* Receive Buffer Unavailable Mask */ +#define IMR_RIM 0x00000040 /* Receive Interrupt Mask */ +#define IMR_UNM 0x00000020 /* Underflow Interrupt Mask */ +#define IMR_ANM 0x00000010 /* Autonegotiation Complete Mask */ +#define IMR_LPM 0x00000010 /* Link Pass */ +#define IMR_TJM 0x00000008 /* Transmit Time-Out Jabber Mask */ +#define IMR_TUM 0x00000004 /* Transmit Buffer Unavailable Mask */ +#define IMR_TSM 0x00000002 /* Transmission Stopped Mask */ +#define IMR_TIM 0x00000001 /* Transmit Interrupt Mask */ + +/* +** Missed Frames and FIFO Overflow Counters (DE4X5_MFC) +*/ +#define MFC_FOCO 0x10000000 /* FIFO Overflow Counter Overflow Bit */ +#define MFC_FOC 0x0ffe0000 /* FIFO Overflow Counter Bits */ +#define MFC_OVFL 0x00010000 /* Missed Frames Counter Overflow Bit */ +#define MFC_CNTR 0x0000ffff /* Missed Frames Counter Bits */ +#define MFC_FOCM 0x1ffe0000 /* FIFO Overflow Counter Mask */ + +/* +** DC21040 Ethernet Address PROM (DE4X5_APROM) +*/ +#define APROM_DN 0x80000000 /* Data Not Valid */ +#define APROM_DT 0x000000ff /* Address Byte */ + +/* +** DC21041 Boot/Ethernet Address ROM (DE4X5_BROM) +*/ +#define BROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */ +#define BROM_RD 0x00004000 /* Read from Boot ROM */ +#define BROM_WR 0x00002000 /* Write to Boot ROM */ +#define BROM_BR 0x00001000 /* Select Boot ROM when set */ +#define BROM_SR 0x00000800 /* Select Serial ROM when set */ +#define BROM_REG 0x00000400 /* External Register Select */ +#define BROM_DT 0x000000ff /* Data Byte */ + +/* +** DC21041 Serial/Ethernet Address ROM (DE4X5_SROM, DE4X5_MII) +*/ +#define MII_MDI 0x00080000 /* MII Management Data In */ +#define MII_MDO 0x00060000 /* MII Management Mode/Data Out */ +#define MII_MRD 0x00040000 /* MII Management Define Read Mode */ +#define MII_MWR 0x00000000 /* MII Management Define Write Mode */ +#define MII_MDT 0x00020000 /* MII Management Data Out */ +#define MII_MDC 0x00010000 /* MII Management Clock */ +#define MII_RD 0x00004000 /* Read from MII */ +#define MII_WR 0x00002000 /* Write to MII */ +#define MII_SEL 0x00000800 /* Select MII when RESET */ + +#define SROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */ +#define SROM_RD 0x00004000 /* Read from Boot ROM */ +#define SROM_WR 0x00002000 /* Write to Boot ROM */ +#define SROM_BR 0x00001000 /* Select Boot ROM when set */ +#define SROM_SR 0x00000800 /* Select Serial ROM when set */ +#define SROM_REG 0x00000400 /* External Register Select */ +#define SROM_DT 0x000000ff /* Data Byte */ + +#define DT_OUT 0x00000008 /* Serial Data Out */ +#define DT_IN 0x00000004 /* Serial Data In */ +#define DT_CLK 0x00000002 /* Serial ROM Clock */ +#define DT_CS 0x00000001 /* Serial ROM Chip Select */ + +#define MII_PREAMBLE 0xffffffff /* MII Management Preamble */ +#define MII_TEST 0xaaaaaaaa /* MII Test Signal */ +#define MII_STRD 0x06 /* Start of Frame+Op Code: use low nibble */ +#define MII_STWR 0x0a /* Start of Frame+Op Code: use low nibble */ + +#define MII_CR 0x00 /* MII Management Control Register */ +#define MII_SR 0x01 /* MII Management Status Register */ +#define MII_ID0 0x02 /* PHY Identifier Register 0 */ +#define MII_ID1 0x03 /* PHY Identifier Register 1 */ +#define MII_ANA 0x04 /* Auto Negotiation Advertisement */ +#define MII_ANLPA 0x05 /* Auto Negotiation Link Partner Ability */ +#define MII_ANE 0x06 /* Auto Negotiation Expansion */ +#define MII_ANP 0x07 /* Auto Negotiation Next Page TX */ + +#define DE4X5_MAX_MII 32 /* Maximum address of MII PHY devices */ + +/* +** MII Management Control Register +*/ +#define MII_CR_RST 0x8000 /* RESET the PHY chip */ +#define MII_CR_LPBK 0x4000 /* Loopback enable */ +#define MII_CR_SPD 0x2000 /* 0: 10Mb/s; 1: 100Mb/s */ +#define MII_CR_10 0x0000 /* Set 10Mb/s */ +#define MII_CR_100 0x2000 /* Set 100Mb/s */ +#define MII_CR_ASSE 0x1000 /* Auto Speed Select Enable */ +#define MII_CR_PD 0x0800 /* Power Down */ +#define MII_CR_ISOL 0x0400 /* Isolate Mode */ +#define MII_CR_RAN 0x0200 /* Restart Auto Negotiation */ +#define MII_CR_FDM 0x0100 /* Full Duplex Mode */ +#define MII_CR_CTE 0x0080 /* Collision Test Enable */ + +/* +** MII Management Status Register +*/ +#define MII_SR_T4C 0x8000 /* 100BASE-T4 capable */ +#define MII_SR_TXFD 0x4000 /* 100BASE-TX Full Duplex capable */ +#define MII_SR_TXHD 0x2000 /* 100BASE-TX Half Duplex capable */ +#define MII_SR_TFD 0x1000 /* 10BASE-T Full Duplex capable */ +#define MII_SR_THD 0x0800 /* 10BASE-T Half Duplex capable */ +#define MII_SR_ASSC 0x0020 /* Auto Speed Selection Complete*/ +#define MII_SR_RFD 0x0010 /* Remote Fault Detected */ +#define MII_SR_ANC 0x0008 /* Auto Negotiation capable */ +#define MII_SR_LKS 0x0004 /* Link Status */ +#define MII_SR_JABD 0x0002 /* Jabber Detect */ +#define MII_SR_XC 0x0001 /* Extended Capabilities */ + +/* +** MII Management Auto Negotiation Advertisement Register +*/ +#define MII_ANA_TAF 0x03e0 /* Technology Ability Field */ +#define MII_ANA_T4AM 0x0200 /* T4 Technology Ability Mask */ +#define MII_ANA_TXAM 0x0180 /* TX Technology Ability Mask */ +#define MII_ANA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */ +#define MII_ANA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */ +#define MII_ANA_100M 0x0380 /* 100Mb Technology Ability Mask */ +#define MII_ANA_10M 0x0060 /* 10Mb Technology Ability Mask */ +#define MII_ANA_CSMA 0x0001 /* CSMA-CD Capable */ + +/* +** MII Management Auto Negotiation Remote End Register +*/ +#define MII_ANLPA_NP 0x8000 /* Next Page (Enable) */ +#define MII_ANLPA_ACK 0x4000 /* Remote Acknowledge */ +#define MII_ANLPA_RF 0x2000 /* Remote Fault */ +#define MII_ANLPA_TAF 0x03e0 /* Technology Ability Field */ +#define MII_ANLPA_T4AM 0x0200 /* T4 Technology Ability Mask */ +#define MII_ANLPA_TXAM 0x0180 /* TX Technology Ability Mask */ +#define MII_ANLPA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */ +#define MII_ANLPA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */ +#define MII_ANLPA_100M 0x0380 /* 100Mb Technology Ability Mask */ +#define MII_ANLPA_10M 0x0060 /* 10Mb Technology Ability Mask */ +#define MII_ANLPA_CSMA 0x0001 /* CSMA-CD Capable */ + +/* +** SROM Media Definitions (ABG SROM Section) +*/ +#define MEDIA_NWAY 0x0080 /* Nway (Auto Negotiation) on PHY */ +#define MEDIA_MII 0x0040 /* MII Present on the adapter */ +#define MEDIA_FIBRE 0x0008 /* Fibre Media present */ +#define MEDIA_AUI 0x0004 /* AUI Media present */ +#define MEDIA_TP 0x0002 /* TP Media present */ +#define MEDIA_BNC 0x0001 /* BNC Media present */ + +/* +** SROM Definitions (Digital Semiconductor Format) +*/ +#define SROM_SSVID 0x0000 /* Sub-system Vendor ID offset */ +#define SROM_SSID 0x0002 /* Sub-system ID offset */ +#define SROM_CISPL 0x0004 /* CardBus CIS Pointer low offset */ +#define SROM_CISPH 0x0006 /* CardBus CIS Pointer high offset */ +#define SROM_IDCRC 0x0010 /* ID Block CRC offset*/ +#define SROM_RSVD2 0x0011 /* ID Reserved 2 offset */ +#define SROM_SFV 0x0012 /* SROM Format Version offset */ +#define SROM_CCNT 0x0013 /* Controller Count offset */ +#define SROM_HWADD 0x0014 /* Hardware Address offset */ +#define SROM_MRSVD 0x007c /* Manufacturer Reserved offset*/ +#define SROM_CRC 0x007e /* SROM CRC offset */ + +/* +** SROM Media Connection Definitions +*/ +#define SROM_10BT 0x0000 /* 10BASE-T half duplex */ +#define SROM_10BTN 0x0100 /* 10BASE-T with Nway */ +#define SROM_10BTF 0x0204 /* 10BASE-T full duplex */ +#define SROM_10BTNLP 0x0400 /* 10BASE-T without Link Pass test */ +#define SROM_10B2 0x0001 /* 10BASE-2 (BNC) */ +#define SROM_10B5 0x0002 /* 10BASE-5 (AUI) */ +#define SROM_100BTH 0x0003 /* 100BASE-T half duplex */ +#define SROM_100BTF 0x0205 /* 100BASE-T full duplex */ +#define SROM_100BT4 0x0006 /* 100BASE-T4 */ +#define SROM_100BFX 0x0007 /* 100BASE-FX half duplex (Fiber) */ +#define SROM_M10BT 0x0009 /* MII 10BASE-T half duplex */ +#define SROM_M10BTF 0x020a /* MII 10BASE-T full duplex */ +#define SROM_M100BT 0x000d /* MII 100BASE-T half duplex */ +#define SROM_M100BTF 0x020e /* MII 100BASE-T full duplex */ +#define SROM_M100BT4 0x000f /* MII 100BASE-T4 */ +#define SROM_M100BF 0x0010 /* MII 100BASE-FX half duplex */ +#define SROM_M100BFF 0x0211 /* MII 100BASE-FX full duplex */ +#define SROM_PDA 0x0800 /* Powerup & Dynamic Autosense */ +#define SROM_PAO 0x8800 /* Powerup Autosense Only */ +#define SROM_NSMI 0xffff /* No Selected Media Information */ + +/* +** SROM Media Definitions +*/ +#define SROM_10BASET 0x0000 /* 10BASE-T half duplex */ +#define SROM_10BASE2 0x0001 /* 10BASE-2 (BNC) */ +#define SROM_10BASE5 0x0002 /* 10BASE-5 (AUI) */ +#define SROM_100BASET 0x0003 /* 100BASE-T half duplex */ +#define SROM_10BASETF 0x0004 /* 10BASE-T full duplex */ +#define SROM_100BASETF 0x0005 /* 100BASE-T full duplex */ +#define SROM_100BASET4 0x0006 /* 100BASE-T4 */ +#define SROM_100BASEF 0x0007 /* 100BASE-FX half duplex */ +#define SROM_100BASEFF 0x0008 /* 100BASE-FX full duplex */ + +#define BLOCK_LEN 0x7f /* Extended blocks length mask */ +#define EXT_FIELD 0x40 /* Extended blocks extension field bit */ +#define MEDIA_CODE 0x3f /* Extended blocks media code mask */ + +/* +** SROM Compact Format Block Masks +*/ +#define COMPACT_FI 0x80 /* Format Indicator */ +#define COMPACT_LEN 0x04 /* Length */ +#define COMPACT_MC 0x3f /* Media Code */ + +/* +** SROM Extended Format Block Type 0 Masks +*/ +#define BLOCK0_FI 0x80 /* Format Indicator */ +#define BLOCK0_MCS 0x80 /* Media Code byte Sign */ +#define BLOCK0_MC 0x3f /* Media Code */ + +/* +** DC21040 Full Duplex Register (DE4X5_FDR) +*/ +#define FDR_FDACV 0x0000ffff /* Full Duplex Auto Configuration Value */ + +/* +** DC21041 General Purpose Timer Register (DE4X5_GPT) +*/ +#define GPT_CON 0x00010000 /* One shot: 0, Continuous: 1 */ +#define GPT_VAL 0x0000ffff /* Timer Value */ + +/* +** DC21140 General Purpose Register (DE4X5_GEP) (hardware dependent bits) +*/ +/* Valid ONLY for DE500 hardware */ +#define GEP_LNP 0x00000080 /* Link Pass (input) */ +#define GEP_SLNK 0x00000040 /* SYM LINK (input) */ +#define GEP_SDET 0x00000020 /* Signal Detect (input) */ +#define GEP_HRST 0x00000010 /* Hard RESET (to PHY) (output) */ +#define GEP_FDXD 0x00000008 /* Full Duplex Disable (output) */ +#define GEP_PHYL 0x00000004 /* PHY Loopback (output) */ +#define GEP_FLED 0x00000002 /* Force Activity LED on (output) */ +#define GEP_MODE 0x00000001 /* 0: 10Mb/s, 1: 100Mb/s */ +#define GEP_INIT 0x0000011f /* Setup inputs (0) and outputs (1) */ +#define GEP_CTRL 0x00000100 /* GEP control bit */ + +/* +** SIA Register Defaults +*/ +#define CSR13 0x00000001 +#define CSR14 0x0003ff7f /* Autonegotiation disabled */ +#define CSR15 0x00000008 + +/* +** SIA Status Register (DE4X5_SISR) +*/ +#define SISR_LPC 0xffff0000 /* Link Partner's Code Word */ +#define SISR_LPN 0x00008000 /* Link Partner Negotiable */ +#define SISR_ANS 0x00007000 /* Auto Negotiation Arbitration State */ +#define SISR_NSN 0x00000800 /* Non Stable NLPs Detected (DC21041) */ +#define SISR_TRF 0x00000800 /* Transmit Remote Fault */ +#define SISR_NSND 0x00000400 /* Non Stable NLPs Detected (DC21142) */ +#define SISR_ANR_FDS 0x00000400 /* Auto Negotiate Restart/Full Duplex Sel.*/ +#define SISR_TRA 0x00000200 /* 10BASE-T Receive Port Activity */ +#define SISR_NRA 0x00000200 /* Non Selected Port Receive Activity */ +#define SISR_ARA 0x00000100 /* AUI Receive Port Activity */ +#define SISR_SRA 0x00000100 /* Selected Port Receive Activity */ +#define SISR_DAO 0x00000080 /* PLL All One */ +#define SISR_DAZ 0x00000040 /* PLL All Zero */ +#define SISR_DSP 0x00000020 /* PLL Self-Test Pass */ +#define SISR_DSD 0x00000010 /* PLL Self-Test Done */ +#define SISR_APS 0x00000008 /* Auto Polarity State */ +#define SISR_LKF 0x00000004 /* Link Fail Status */ +#define SISR_LS10 0x00000004 /* 10Mb/s Link Fail Status */ +#define SISR_NCR 0x00000002 /* Network Connection Error */ +#define SISR_LS100 0x00000002 /* 100Mb/s Link Fail Status */ +#define SISR_PAUI 0x00000001 /* AUI_TP Indication */ +#define SISR_MRA 0x00000001 /* MII Receive Port Activity */ + +#define ANS_NDIS 0x00000000 /* Nway disable */ +#define ANS_TDIS 0x00001000 /* Transmit Disable */ +#define ANS_ADET 0x00002000 /* Ability Detect */ +#define ANS_ACK 0x00003000 /* Acknowledge */ +#define ANS_CACK 0x00004000 /* Complete Acknowledge */ +#define ANS_NWOK 0x00005000 /* Nway OK - FLP Link Good */ +#define ANS_LCHK 0x00006000 /* Link Check */ + +#define SISR_RST 0x00000301 /* CSR12 reset */ +#define SISR_ANR 0x00001301 /* Autonegotiation restart */ + +/* +** SIA Connectivity Register (DE4X5_SICR) +*/ +#define SICR_SDM 0xffff0000 /* SIA Diagnostics Mode */ +#define SICR_OE57 0x00008000 /* Output Enable 5 6 7 */ +#define SICR_OE24 0x00004000 /* Output Enable 2 4 */ +#define SICR_OE13 0x00002000 /* Output Enable 1 3 */ +#define SICR_IE 0x00001000 /* Input Enable */ +#define SICR_EXT 0x00000000 /* SIA MUX Select External SIA Mode */ +#define SICR_D_SIA 0x00000400 /* SIA MUX Select Diagnostics - SIA Sigs */ +#define SICR_DPLL 0x00000800 /* SIA MUX Select Diagnostics - DPLL Sigs*/ +#define SICR_APLL 0x00000a00 /* SIA MUX Select Diagnostics - DPLL Sigs*/ +#define SICR_D_RxM 0x00000c00 /* SIA MUX Select Diagnostics - RxM Sigs */ +#define SICR_M_RxM 0x00000d00 /* SIA MUX Select Diagnostics - RxM Sigs */ +#define SICR_LNKT 0x00000e00 /* SIA MUX Select Diagnostics - Link Test*/ +#define SICR_SEL 0x00000f00 /* SIA MUX Select AUI or TP with LEDs */ +#define SICR_ASE 0x00000080 /* APLL Start Enable*/ +#define SICR_SIM 0x00000040 /* Serial Interface Input Multiplexer */ +#define SICR_ENI 0x00000020 /* Encoder Input Multiplexer */ +#define SICR_EDP 0x00000010 /* SIA PLL External Input Enable */ +#define SICR_AUI 0x00000008 /* 10Base-T (0) or AUI (1) */ +#define SICR_CAC 0x00000004 /* CSR Auto Configuration */ +#define SICR_PS 0x00000002 /* Pin AUI/TP Selection */ +#define SICR_SRL 0x00000001 /* SIA Reset */ +#define SIA_RESET 0x00000000 /* SIA Reset Value */ + +/* +** SIA Transmit and Receive Register (DE4X5_STRR) +*/ +#define STRR_TAS 0x00008000 /* 10Base-T/AUI Autosensing Enable */ +#define STRR_SPP 0x00004000 /* Set Polarity Plus */ +#define STRR_APE 0x00002000 /* Auto Polarity Enable */ +#define STRR_LTE 0x00001000 /* Link Test Enable */ +#define STRR_SQE 0x00000800 /* Signal Quality Enable */ +#define STRR_CLD 0x00000400 /* Collision Detect Enable */ +#define STRR_CSQ 0x00000200 /* Collision Squelch Enable */ +#define STRR_RSQ 0x00000100 /* Receive Squelch Enable */ +#define STRR_ANE 0x00000080 /* Auto Negotiate Enable */ +#define STRR_HDE 0x00000040 /* Half Duplex Enable */ +#define STRR_CPEN 0x00000030 /* Compensation Enable */ +#define STRR_LSE 0x00000008 /* Link Pulse Send Enable */ +#define STRR_DREN 0x00000004 /* Driver Enable */ +#define STRR_LBK 0x00000002 /* Loopback Enable */ +#define STRR_ECEN 0x00000001 /* Encoder Enable */ +#define STRR_RESET 0xffffffff /* Reset value for STRR */ + +/* +** SIA General Register (DE4X5_SIGR) +*/ +#define SIGR_RMI 0x40000000 /* Receive Match Interrupt */ +#define SIGR_GI1 0x20000000 /* General Port Interrupt 1 */ +#define SIGR_GI0 0x10000000 /* General Port Interrupt 0 */ +#define SIGR_CWE 0x08000000 /* Control Write Enable */ +#define SIGR_RME 0x04000000 /* Receive Match Enable */ +#define SIGR_GEI1 0x02000000 /* GEP Interrupt Enable on Port 1 */ +#define SIGR_GEI0 0x01000000 /* GEP Interrupt Enable on Port 0 */ +#define SIGR_LGS3 0x00800000 /* LED/GEP3 Select */ +#define SIGR_LGS2 0x00400000 /* LED/GEP2 Select */ +#define SIGR_LGS1 0x00200000 /* LED/GEP1 Select */ +#define SIGR_LGS0 0x00100000 /* LED/GEP0 Select */ +#define SIGR_MD 0x000f0000 /* General Purpose Mode and Data */ +#define SIGR_LV2 0x00008000 /* General Purpose LED2 value */ +#define SIGR_LE2 0x00004000 /* General Purpose LED2 enable */ +#define SIGR_FRL 0x00002000 /* Force Receiver Low */ +#define SIGR_DPST 0x00001000 /* PLL Self Test Start */ +#define SIGR_LSD 0x00000800 /* LED Stretch Disable */ +#define SIGR_FLF 0x00000400 /* Force Link Fail */ +#define SIGR_FUSQ 0x00000200 /* Force Unsquelch */ +#define SIGR_TSCK 0x00000100 /* Test Clock */ +#define SIGR_LV1 0x00000080 /* General Purpose LED1 value */ +#define SIGR_LE1 0x00000040 /* General Purpose LED1 enable */ +#define SIGR_RWR 0x00000020 /* Receive Watchdog Release */ +#define SIGR_RWD 0x00000010 /* Receive Watchdog Disable */ +#define SIGR_ABM 0x00000008 /* BNC: 0, AUI:1 */ +#define SIGR_JCK 0x00000004 /* Jabber Clock */ +#define SIGR_HUJ 0x00000002 /* Host Unjab */ +#define SIGR_JBD 0x00000001 /* Jabber Disable */ +#define SIGR_RESET 0xffff0000 /* Reset value for SIGR */ + +/* +** Receive Descriptor Bit Summary +*/ +#define R_OWN 0x80000000 /* Own Bit */ +#define RD_FF 0x40000000 /* Filtering Fail */ +#define RD_FL 0x3fff0000 /* Frame Length */ +#define RD_ES 0x00008000 /* Error Summary */ +#define RD_LE 0x00004000 /* Length Error */ +#define RD_DT 0x00003000 /* Data Type */ +#define RD_RF 0x00000800 /* Runt Frame */ +#define RD_MF 0x00000400 /* Multicast Frame */ +#define RD_FS 0x00000200 /* First Descriptor */ +#define RD_LS 0x00000100 /* Last Descriptor */ +#define RD_TL 0x00000080 /* Frame Too Long */ +#define RD_CS 0x00000040 /* Collision Seen */ +#define RD_FT 0x00000020 /* Frame Type */ +#define RD_RJ 0x00000010 /* Receive Watchdog */ +#define RD_RE 0x00000008 /* Report on MII Error */ +#define RD_DB 0x00000004 /* Dribbling Bit */ +#define RD_CE 0x00000002 /* CRC Error */ +#define RD_OF 0x00000001 /* Overflow */ + +#define RD_RER 0x02000000 /* Receive End Of Ring */ +#define RD_RCH 0x01000000 /* Second Address Chained */ +#define RD_RBS2 0x003ff800 /* Buffer 2 Size */ +#define RD_RBS1 0x000007ff /* Buffer 1 Size */ + +/* +** Transmit Descriptor Bit Summary +*/ +#define T_OWN 0x80000000 /* Own Bit */ +#define TD_ES 0x00008000 /* Error Summary */ +#define TD_TO 0x00004000 /* Transmit Jabber Time-Out */ +#define TD_LO 0x00000800 /* Loss Of Carrier */ +#define TD_NC 0x00000400 /* No Carrier */ +#define TD_LC 0x00000200 /* Late Collision */ +#define TD_EC 0x00000100 /* Excessive Collisions */ +#define TD_HF 0x00000080 /* Heartbeat Fail */ +#define TD_CC 0x00000078 /* Collision Counter */ +#define TD_LF 0x00000004 /* Link Fail */ +#define TD_UF 0x00000002 /* Underflow Error */ +#define TD_DE 0x00000001 /* Deferred */ + +#define TD_IC 0x80000000 /* Interrupt On Completion */ +#define TD_LS 0x40000000 /* Last Segment */ +#define TD_FS 0x20000000 /* First Segment */ +#define TD_FT1 0x10000000 /* Filtering Type */ +#define TD_SET 0x08000000 /* Setup Packet */ +#define TD_AC 0x04000000 /* Add CRC Disable */ +#define TD_TER 0x02000000 /* Transmit End Of Ring */ +#define TD_TCH 0x01000000 /* Second Address Chained */ +#define TD_DPD 0x00800000 /* Disabled Padding */ +#define TD_FT0 0x00400000 /* Filtering Type */ +#define TD_TBS2 0x003ff800 /* Buffer 2 Size */ +#define TD_TBS1 0x000007ff /* Buffer 1 Size */ + +#define PERFECT_F 0x00000000 +#define HASH_F TD_FT0 +#define INVERSE_F TD_FT1 +#define HASH_O_F (TD_FT1 | TD_F0) + +/* +** Media / mode state machine definitions +** User selectable: +*/ +#define TP 0x0040 /* 10Base-T (now equiv to _10Mb) */ +#define TP_NW 0x0002 /* 10Base-T with Nway */ +#define BNC 0x0004 /* Thinwire */ +#define AUI 0x0008 /* Thickwire */ +#define BNC_AUI 0x0010 /* BNC/AUI on DC21040 indistinguishable */ +#define _10Mb 0x0040 /* 10Mb/s Ethernet */ +#define _100Mb 0x0080 /* 100Mb/s Ethernet */ +#define AUTO 0x4000 /* Auto sense the media or speed */ + +/* +** Internal states +*/ +#define NC 0x0000 /* No Connection */ +#define ANS 0x0020 /* Intermediate AutoNegotiation State */ +#define SPD_DET 0x0100 /* Parallel speed detection */ +#define INIT 0x0200 /* Initial state */ +#define EXT_SIA 0x0400 /* External SIA for motherboard chip */ +#define ANS_SUSPECT 0x0802 /* Suspect the ANS (TP) port is down */ +#define TP_SUSPECT 0x0803 /* Suspect the TP port is down */ +#define BNC_AUI_SUSPECT 0x0804 /* Suspect the BNC or AUI port is down */ +#define EXT_SIA_SUSPECT 0x0805 /* Suspect the EXT SIA port is down */ +#define BNC_SUSPECT 0x0806 /* Suspect the BNC port is down */ +#define AUI_SUSPECT 0x0807 /* Suspect the AUI port is down */ +#define MII 0x1000 /* MII on the 21143 */ + +#define TIMER_CB 0x80000000 /* Timer callback detection */ + +/* +** DE4X5 DEBUG Options +*/ +#define DEBUG_NONE 0x0000 /* No DEBUG messages */ +#define DEBUG_VERSION 0x0001 /* Print version message */ +#define DEBUG_MEDIA 0x0002 /* Print media messages */ +#define DEBUG_TX 0x0004 /* Print TX (queue_pkt) messages */ +#define DEBUG_RX 0x0008 /* Print RX (de4x5_rx) messages */ +#define DEBUG_SROM 0x0010 /* Print SROM messages */ +#define DEBUG_MII 0x0020 /* Print MII messages */ +#define DEBUG_OPEN 0x0040 /* Print de4x5_open() messages */ +#define DEBUG_CLOSE 0x0080 /* Print de4x5_close() messages */ +#define DEBUG_PCICFG 0x0100 +#define DEBUG_ALL 0x01ff + +/* +** Miscellaneous +*/ +#define PCI 0 +#define EISA 1 + +#define HASH_TABLE_LEN 512 /* Bits */ +#define HASH_BITS 0x01ff /* 9 LS bits */ + +#define SETUP_FRAME_LEN 192 /* Bytes */ +#define IMPERF_PA_OFFSET 156 /* Bytes */ + +#define POLL_DEMAND 1 + +#define LOST_MEDIA_THRESHOLD 3 + +#define MASK_INTERRUPTS 1 +#define UNMASK_INTERRUPTS 0 + +#define DE4X5_STRLEN 8 + +#define DE4X5_INIT 0 /* Initialisation time */ +#define DE4X5_RUN 1 /* Run time */ + +#define DE4X5_SAVE_STATE 0 +#define DE4X5_RESTORE_STATE 1 + +/* +** Address Filtering Modes +*/ +#define PERFECT 0 /* 16 perfect physical addresses */ +#define HASH_PERF 1 /* 1 perfect, 512 multicast addresses */ +#define PERFECT_REJ 2 /* Reject 16 perfect physical addresses */ +#define ALL_HASH 3 /* Hashes all physical & multicast addrs */ + +#define ALL 0 /* Clear out all the setup frame */ +#define PHYS_ADDR_ONLY 1 /* Update the physical address only */ + +/* +** Adapter state +*/ +#define INITIALISED 0 /* After h/w initialised and mem alloc'd */ +#define CLOSED 1 /* Ready for opening */ +#define OPEN 2 /* Running */ + +/* +** Various wait times +*/ +#define PDET_LINK_WAIT 1200 /* msecs to wait for link detect bits */ +#define ANS_FINISH_WAIT 1000 /* msecs to wait for link detect bits */ + +/* +** IEEE OUIs for various PHY vendor/chip combos - Reg 2 values only. Since +** the vendors seem split 50-50 on how to calculate the OUI register values +** anyway, just reading Reg2 seems reasonable for now [see de4x5_get_oui()]. +*/ +#define NATIONAL_TX 0x2000 +#define BROADCOM_T4 0x03e0 +#define SEEQ_T4 0x0016 +#define CYPRESS_T4 0x0014 + +/* +** Speed Selection stuff +*/ +#define SET_10Mb {\ + if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\ + omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\ + if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\ + mii_wr(MII_CR_10|(lp->fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\ + }\ + omr |= ((lp->fdx ? OMR_FDX : 0) | OMR_TTM);\ + outl(omr, DE4X5_OMR);\ + if (!lp->useSROM) lp->cache.gep = 0;\ + } else if (lp->useSROM && !lp->useMII) {\ + omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ + omr |= (lp->fdx ? OMR_FDX : 0);\ + outl(omr | (lp->infoblock_csr6 & ~(OMR_SCR | OMR_HBD)), DE4X5_OMR);\ + } else {\ + omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ + omr |= (lp->fdx ? OMR_FDX : 0);\ + outl(omr | OMR_SDP | OMR_TTM, DE4X5_OMR);\ + lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD);\ + gep_wr(lp->cache.gep, dev);\ + }\ +} + +#define SET_100Mb {\ + if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\ + int fdx=0;\ + if (lp->phy[lp->active].id == NATIONAL_TX) {\ + mii_wr(mii_rd(0x18, lp->phy[lp->active].addr, DE4X5_MII) & ~0x2000,\ + 0x18, lp->phy[lp->active].addr, DE4X5_MII);\ + }\ + omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\ + sr = mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);\ + if (!(sr & MII_ANA_T4AM) && lp->fdx) fdx=1;\ + if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\ + mii_wr(MII_CR_100|(fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\ + }\ + if (fdx) omr |= OMR_FDX;\ + outl(omr, DE4X5_OMR);\ + if (!lp->useSROM) lp->cache.gep = 0;\ + } else if (lp->useSROM && !lp->useMII) {\ + omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ + omr |= (lp->fdx ? OMR_FDX : 0);\ + outl(omr | lp->infoblock_csr6, DE4X5_OMR);\ + } else {\ + omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ + omr |= (lp->fdx ? OMR_FDX : 0);\ + outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS | OMR_SCR, DE4X5_OMR);\ + lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD) | GEP_MODE;\ + gep_wr(lp->cache.gep, dev);\ + }\ +} + +/* FIX ME so I don't jam 10Mb networks */ +#define SET_100Mb_PDET {\ + if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\ + mii_wr(MII_CR_100|MII_CR_ASSE, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\ + omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ + outl(omr, DE4X5_OMR);\ + } else if (lp->useSROM && !lp->useMII) {\ + omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ + outl(omr, DE4X5_OMR);\ + } else {\ + omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ + outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS, DE4X5_OMR);\ + lp->cache.gep = (GEP_FDXD | GEP_MODE);\ + gep_wr(lp->cache.gep, dev);\ + }\ +} + +/* +** Include the IOCTL stuff +*/ +#include + +struct de4x5_ioctl { + unsigned short cmd; /* Command to run */ + unsigned short len; /* Length of the data buffer */ + unsigned char __user *data; /* Pointer to the data buffer */ +}; + +/* +** Recognised commands for the driver +*/ +#define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */ +#define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */ +/* 0x03 and 0x04 were used before and are obsoleted now. Don't use them. */ +#define DE4X5_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */ +#define DE4X5_GET_MCA 0x06 /* Get a multicast address */ +#define DE4X5_SET_MCA 0x07 /* Set a multicast address */ +#define DE4X5_CLR_MCA 0x08 /* Clear a multicast address */ +#define DE4X5_MCA_EN 0x09 /* Enable a multicast address group */ +#define DE4X5_GET_STATS 0x0a /* Get the driver statistics */ +#define DE4X5_CLR_STATS 0x0b /* Zero out the driver statistics */ +#define DE4X5_GET_OMR 0x0c /* Get the OMR Register contents */ +#define DE4X5_SET_OMR 0x0d /* Set the OMR Register contents */ +#define DE4X5_GET_REG 0x0e /* Get the DE4X5 Registers */ + +#define MOTO_SROM_BUG (lp->active == 8 && (get_unaligned_le32(dev->dev_addr) & 0x00ffffff) == 0x3e0008) diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c new file mode 100644 index 000000000..afd8e78e0 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -0,0 +1,2273 @@ +/* + A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast + ethernet driver for Linux. + Copyright (C) 1997 Sten Wang + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; either version 2 + of the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + DAVICOM Web-Site: www.davicom.com.tw + + Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw + Maintainer: Tobias Ringstrom + + (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved. + + Marcelo Tosatti : + Made it compile in 2.3 (device to net_device) + + Alan Cox : + Cleaned up for kernel merge. + Removed the back compatibility support + Reformatted, fixing spelling etc as I went + Removed IRQ 0-15 assumption + + Jeff Garzik : + Updated to use new PCI driver API. + Resource usage cleanups. + Report driver version to user. + + Tobias Ringstrom : + Cleaned up and added SMP safety. Thanks go to Jeff Garzik, + Andrew Morton and Frank Davis for the SMP safety fixes. + + Vojtech Pavlik : + Cleaned up pointer arithmetics. + Fixed a lot of 64bit issues. + Cleaned up printk()s a bit. + Fixed some obvious big endian problems. + + Tobias Ringstrom : + Use time_after for jiffies calculation. Added ethtool + support. Updated PCI resource allocation. Do not + forget to unmap PCI mapped skbs. + + Alan Cox + Added new PCI identifiers provided by Clear Zhang at ALi + for their 1563 ethernet device. + + TODO + + Check on 64 bit boxes. + Check and fix on big endian boxes. + + Test and make sure PCI latency is now correct for all cases. +*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DRV_NAME "dmfe" +#define DRV_VERSION "1.36.4" +#define DRV_RELDATE "2002-01-17" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#ifdef CONFIG_TULIP_DM910X +#include +#endif + + +/* Board/System/Debug information/definition ---------------- */ +#define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */ +#define PCI_DM9102_ID 0x91021282 /* Davicom DM9102 ID */ +#define PCI_DM9100_ID 0x91001282 /* Davicom DM9100 ID */ +#define PCI_DM9009_ID 0x90091282 /* Davicom DM9009 ID */ + +#define DM9102_IO_SIZE 0x80 +#define DM9102A_IO_SIZE 0x100 +#define TX_MAX_SEND_CNT 0x1 /* Maximum tx packet per time */ +#define TX_DESC_CNT 0x10 /* Allocated Tx descriptors */ +#define RX_DESC_CNT 0x20 /* Allocated Rx descriptors */ +#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */ +#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */ +#define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT) +#define TX_BUF_ALLOC 0x600 +#define RX_ALLOC_SIZE 0x620 +#define DM910X_RESET 1 +#define CR0_DEFAULT 0x00E00000 /* TX & RX burst mode */ +#define CR6_DEFAULT 0x00080000 /* HD */ +#define CR7_DEFAULT 0x180c1 +#define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */ +#define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */ +#define MAX_PACKET_SIZE 1514 +#define DMFE_MAX_MULTICAST 14 +#define RX_COPY_SIZE 100 +#define MAX_CHECK_PACKET 0x8000 +#define DM9801_NOISE_FLOOR 8 +#define DM9802_NOISE_FLOOR 5 + +#define DMFE_WOL_LINKCHANGE 0x20000000 +#define DMFE_WOL_SAMPLEPACKET 0x10000000 +#define DMFE_WOL_MAGICPACKET 0x08000000 + + +#define DMFE_10MHF 0 +#define DMFE_100MHF 1 +#define DMFE_10MFD 4 +#define DMFE_100MFD 5 +#define DMFE_AUTO 8 +#define DMFE_1M_HPNA 0x10 + +#define DMFE_TXTH_72 0x400000 /* TX TH 72 byte */ +#define DMFE_TXTH_96 0x404000 /* TX TH 96 byte */ +#define DMFE_TXTH_128 0x0000 /* TX TH 128 byte */ +#define DMFE_TXTH_256 0x4000 /* TX TH 256 byte */ +#define DMFE_TXTH_512 0x8000 /* TX TH 512 byte */ +#define DMFE_TXTH_1K 0xC000 /* TX TH 1K byte */ + +#define DMFE_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */ +#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */ +#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */ + +#define dw32(reg, val) iowrite32(val, ioaddr + (reg)) +#define dw16(reg, val) iowrite16(val, ioaddr + (reg)) +#define dr32(reg) ioread32(ioaddr + (reg)) +#define dr16(reg) ioread16(ioaddr + (reg)) +#define dr8(reg) ioread8(ioaddr + (reg)) + +#define DMFE_DBUG(dbug_now, msg, value) \ + do { \ + if (dmfe_debug || (dbug_now)) \ + pr_err("%s %lx\n", \ + (msg), (long) (value)); \ + } while (0) + +#define SHOW_MEDIA_TYPE(mode) \ + pr_info("Change Speed to %sMhz %s duplex\n" , \ + (mode & 1) ? "100":"10", \ + (mode & 4) ? "full":"half"); + + +/* CR9 definition: SROM/MII */ +#define CR9_SROM_READ 0x4800 +#define CR9_SRCS 0x1 +#define CR9_SRCLK 0x2 +#define CR9_CRDOUT 0x8 +#define SROM_DATA_0 0x0 +#define SROM_DATA_1 0x4 +#define PHY_DATA_1 0x20000 +#define PHY_DATA_0 0x00000 +#define MDCLKH 0x10000 + +#define PHY_POWER_DOWN 0x800 + +#define SROM_V41_CODE 0x14 + +#define __CHK_IO_SIZE(pci_id, dev_rev) \ + (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \ + DM9102A_IO_SIZE: DM9102_IO_SIZE) + +#define CHK_IO_SIZE(pci_dev) \ + (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \ + (pci_dev)->revision)) + +/* Sten Check */ +#define DEVICE net_device + +/* Structure/enum declaration ------------------------------- */ +struct tx_desc { + __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */ + char *tx_buf_ptr; /* Data for us */ + struct tx_desc *next_tx_desc; +} __attribute__(( aligned(32) )); + +struct rx_desc { + __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */ + struct sk_buff *rx_skb_ptr; /* Data for us */ + struct rx_desc *next_rx_desc; +} __attribute__(( aligned(32) )); + +struct dmfe_board_info { + u32 chip_id; /* Chip vendor/Device ID */ + u8 chip_revision; /* Chip revision */ + struct net_device *next_dev; /* next device */ + struct pci_dev *pdev; /* PCI device */ + spinlock_t lock; + + void __iomem *ioaddr; /* I/O base address */ + u32 cr0_data; + u32 cr5_data; + u32 cr6_data; + u32 cr7_data; + u32 cr15_data; + + /* pointer for memory physical address */ + dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */ + dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */ + dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */ + dma_addr_t first_tx_desc_dma; + dma_addr_t first_rx_desc_dma; + + /* descriptor pointer */ + unsigned char *buf_pool_ptr; /* Tx buffer pool memory */ + unsigned char *buf_pool_start; /* Tx buffer pool align dword */ + unsigned char *desc_pool_ptr; /* descriptor pool memory */ + struct tx_desc *first_tx_desc; + struct tx_desc *tx_insert_ptr; + struct tx_desc *tx_remove_ptr; + struct rx_desc *first_rx_desc; + struct rx_desc *rx_insert_ptr; + struct rx_desc *rx_ready_ptr; /* packet come pointer */ + unsigned long tx_packet_cnt; /* transmitted packet count */ + unsigned long tx_queue_cnt; /* wait to send packet count */ + unsigned long rx_avail_cnt; /* available rx descriptor count */ + unsigned long interval_rx_cnt; /* rx packet count a callback time */ + + u16 HPNA_command; /* For HPNA register 16 */ + u16 HPNA_timer; /* For HPNA remote device check */ + u16 dbug_cnt; + u16 NIC_capability; /* NIC media capability */ + u16 PHY_reg4; /* Saved Phyxcer register 4 value */ + + u8 HPNA_present; /* 0:none, 1:DM9801, 2:DM9802 */ + u8 chip_type; /* Keep DM9102A chip type */ + u8 media_mode; /* user specify media mode */ + u8 op_mode; /* real work media mode */ + u8 phy_addr; + u8 wait_reset; /* Hardware failed, need to reset */ + u8 dm910x_chk_mode; /* Operating mode check */ + u8 first_in_callback; /* Flag to record state */ + u8 wol_mode; /* user WOL settings */ + struct timer_list timer; + + /* Driver defined statistic counter */ + unsigned long tx_fifo_underrun; + unsigned long tx_loss_carrier; + unsigned long tx_no_carrier; + unsigned long tx_late_collision; + unsigned long tx_excessive_collision; + unsigned long tx_jabber_timeout; + unsigned long reset_count; + unsigned long reset_cr8; + unsigned long reset_fatal; + unsigned long reset_TXtimeout; + + /* NIC SROM data */ + unsigned char srom[128]; +}; + +enum dmfe_offsets { + DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20, + DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48, + DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70, + DCR15 = 0x78 +}; + +enum dmfe_CR6_bits { + CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80, + CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000, + CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000 +}; + +/* Global variable declaration ----------------------------- */ +static int printed_version; +static const char version[] = + "Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")"; + +static int dmfe_debug; +static unsigned char dmfe_media_mode = DMFE_AUTO; +static u32 dmfe_cr6_user_set; + +/* For module input parameter */ +static int debug; +static u32 cr6set; +static unsigned char mode = 8; +static u8 chkmode = 1; +static u8 HPNA_mode; /* Default: Low Power/High Speed */ +static u8 HPNA_rx_cmd; /* Default: Disable Rx remote command */ +static u8 HPNA_tx_cmd; /* Default: Don't issue remote command */ +static u8 HPNA_NoiseFloor; /* Default: HPNA NoiseFloor */ +static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control + 4: TX pause packet */ + + +/* function declaration ------------------------------------- */ +static int dmfe_open(struct DEVICE *); +static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *); +static int dmfe_stop(struct DEVICE *); +static void dmfe_set_filter_mode(struct DEVICE *); +static const struct ethtool_ops netdev_ethtool_ops; +static u16 read_srom_word(void __iomem *, int); +static irqreturn_t dmfe_interrupt(int , void *); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void poll_dmfe (struct net_device *dev); +#endif +static void dmfe_descriptor_init(struct net_device *); +static void allocate_rx_buffer(struct net_device *); +static void update_cr6(u32, void __iomem *); +static void send_filter_frame(struct DEVICE *); +static void dm9132_id_table(struct DEVICE *); +static u16 dmfe_phy_read(void __iomem *, u8, u8, u32); +static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32); +static void dmfe_phy_write_1bit(void __iomem *, u32); +static u16 dmfe_phy_read_1bit(void __iomem *); +static u8 dmfe_sense_speed(struct dmfe_board_info *); +static void dmfe_process_mode(struct dmfe_board_info *); +static void dmfe_timer(unsigned long); +static inline u32 cal_CRC(unsigned char *, unsigned int, u8); +static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *); +static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *); +static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *); +static void dmfe_dynamic_reset(struct DEVICE *); +static void dmfe_free_rxbuffer(struct dmfe_board_info *); +static void dmfe_init_dm910x(struct DEVICE *); +static void dmfe_parse_srom(struct dmfe_board_info *); +static void dmfe_program_DM9801(struct dmfe_board_info *, int); +static void dmfe_program_DM9802(struct dmfe_board_info *); +static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * ); +static void dmfe_set_phyxcer(struct dmfe_board_info *); + +/* DM910X network board routine ---------------------------- */ + +static const struct net_device_ops netdev_ops = { + .ndo_open = dmfe_open, + .ndo_stop = dmfe_stop, + .ndo_start_xmit = dmfe_start_xmit, + .ndo_set_rx_mode = dmfe_set_filter_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = poll_dmfe, +#endif +}; + +/* + * Search DM910X board ,allocate space and register it + */ + +static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct dmfe_board_info *db; /* board information structure */ + struct net_device *dev; + u32 pci_pmr; + int i, err; + + DMFE_DBUG(0, "dmfe_init_one()", 0); + + if (!printed_version++) + pr_info("%s\n", version); + + /* + * SPARC on-board DM910x chips should be handled by the main + * tulip driver, except for early DM9100s. + */ +#ifdef CONFIG_TULIP_DM910X + if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) || + ent->driver_data == PCI_DM9102_ID) { + struct device_node *dp = pci_device_to_OF_node(pdev); + + if (dp && of_get_property(dp, "local-mac-address", NULL)) { + pr_info("skipping on-board DM910x (use tulip)\n"); + return -ENODEV; + } + } +#endif + + /* Init network device */ + dev = alloc_etherdev(sizeof(*db)); + if (dev == NULL) + return -ENOMEM; + SET_NETDEV_DEV(dev, &pdev->dev); + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + pr_warn("32-bit PCI DMA not available\n"); + err = -ENODEV; + goto err_out_free; + } + + /* Enable Master/IO access, Disable memory access */ + err = pci_enable_device(pdev); + if (err) + goto err_out_free; + + if (!pci_resource_start(pdev, 0)) { + pr_err("I/O base is zero\n"); + err = -ENODEV; + goto err_out_disable; + } + + if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) { + pr_err("Allocated I/O size too small\n"); + err = -ENODEV; + goto err_out_disable; + } + +#if 0 /* pci_{enable_device,set_master} sets minimum latency for us now */ + + /* Set Latency Timer 80h */ + /* FIXME: setting values > 32 breaks some SiS 559x stuff. + Need a PCI quirk.. */ + + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80); +#endif + + if (pci_request_regions(pdev, DRV_NAME)) { + pr_err("Failed to request PCI regions\n"); + err = -ENODEV; + goto err_out_disable; + } + + /* Init system & device */ + db = netdev_priv(dev); + + /* Allocate Tx/Rx descriptor memory */ + db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * + DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); + if (!db->desc_pool_ptr) { + err = -ENOMEM; + goto err_out_res; + } + + db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * + TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); + if (!db->buf_pool_ptr) { + err = -ENOMEM; + goto err_out_free_desc; + } + + db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; + db->first_tx_desc_dma = db->desc_pool_dma_ptr; + db->buf_pool_start = db->buf_pool_ptr; + db->buf_pool_dma_start = db->buf_pool_dma_ptr; + + db->chip_id = ent->driver_data; + /* IO type range. */ + db->ioaddr = pci_iomap(pdev, 0, 0); + if (!db->ioaddr) { + err = -ENOMEM; + goto err_out_free_buf; + } + + db->chip_revision = pdev->revision; + db->wol_mode = 0; + + db->pdev = pdev; + + pci_set_drvdata(pdev, dev); + dev->netdev_ops = &netdev_ops; + dev->ethtool_ops = &netdev_ethtool_ops; + netif_carrier_off(dev); + spin_lock_init(&db->lock); + + pci_read_config_dword(pdev, 0x50, &pci_pmr); + pci_pmr &= 0x70000; + if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) ) + db->chip_type = 1; /* DM9102A E3 */ + else + db->chip_type = 0; + + /* read 64 word srom data */ + for (i = 0; i < 64; i++) { + ((__le16 *) db->srom)[i] = + cpu_to_le16(read_srom_word(db->ioaddr, i)); + } + + /* Set Node address */ + for (i = 0; i < 6; i++) + dev->dev_addr[i] = db->srom[20 + i]; + + err = register_netdev (dev); + if (err) + goto err_out_unmap; + + dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n", + ent->driver_data >> 16, + pci_name(pdev), dev->dev_addr, pdev->irq); + + pci_set_master(pdev); + + return 0; + +err_out_unmap: + pci_iounmap(pdev, db->ioaddr); +err_out_free_buf: + pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, + db->buf_pool_ptr, db->buf_pool_dma_ptr); +err_out_free_desc: + pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, + db->desc_pool_ptr, db->desc_pool_dma_ptr); +err_out_res: + pci_release_regions(pdev); +err_out_disable: + pci_disable_device(pdev); +err_out_free: + free_netdev(dev); + + return err; +} + + +static void dmfe_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct dmfe_board_info *db = netdev_priv(dev); + + DMFE_DBUG(0, "dmfe_remove_one()", 0); + + if (dev) { + + unregister_netdev(dev); + pci_iounmap(db->pdev, db->ioaddr); + pci_free_consistent(db->pdev, sizeof(struct tx_desc) * + DESC_ALL_CNT + 0x20, db->desc_pool_ptr, + db->desc_pool_dma_ptr); + pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, + db->buf_pool_ptr, db->buf_pool_dma_ptr); + pci_release_regions(pdev); + free_netdev(dev); /* free board information */ + } + + DMFE_DBUG(0, "dmfe_remove_one() exit", 0); +} + + +/* + * Open the interface. + * The interface is opened whenever "ifconfig" actives it. + */ + +static int dmfe_open(struct DEVICE *dev) +{ + struct dmfe_board_info *db = netdev_priv(dev); + const int irq = db->pdev->irq; + int ret; + + DMFE_DBUG(0, "dmfe_open", 0); + + ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev); + if (ret) + return ret; + + /* system variable init */ + db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set; + db->tx_packet_cnt = 0; + db->tx_queue_cnt = 0; + db->rx_avail_cnt = 0; + db->wait_reset = 0; + + db->first_in_callback = 0; + db->NIC_capability = 0xf; /* All capability*/ + db->PHY_reg4 = 0x1e0; + + /* CR6 operation mode decision */ + if ( !chkmode || (db->chip_id == PCI_DM9132_ID) || + (db->chip_revision >= 0x30) ) { + db->cr6_data |= DMFE_TXTH_256; + db->cr0_data = CR0_DEFAULT; + db->dm910x_chk_mode=4; /* Enter the normal mode */ + } else { + db->cr6_data |= CR6_SFT; /* Store & Forward mode */ + db->cr0_data = 0; + db->dm910x_chk_mode = 1; /* Enter the check mode */ + } + + /* Initialize DM910X board */ + dmfe_init_dm910x(dev); + + /* Active System Interface */ + netif_wake_queue(dev); + + /* set and active a timer process */ + init_timer(&db->timer); + db->timer.expires = DMFE_TIMER_WUT + HZ * 2; + db->timer.data = (unsigned long)dev; + db->timer.function = dmfe_timer; + add_timer(&db->timer); + + return 0; +} + + +/* Initialize DM910X board + * Reset DM910X board + * Initialize TX/Rx descriptor chain structure + * Send the set-up frame + * Enable Tx/Rx machine + */ + +static void dmfe_init_dm910x(struct DEVICE *dev) +{ + struct dmfe_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; + + DMFE_DBUG(0, "dmfe_init_dm910x()", 0); + + /* Reset DM910x MAC controller */ + dw32(DCR0, DM910X_RESET); /* RESET MAC */ + udelay(100); + dw32(DCR0, db->cr0_data); + udelay(5); + + /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */ + db->phy_addr = 1; + + /* Parser SROM and media mode */ + dmfe_parse_srom(db); + db->media_mode = dmfe_media_mode; + + /* RESET Phyxcer Chip by GPR port bit 7 */ + dw32(DCR12, 0x180); /* Let bit 7 output port */ + if (db->chip_id == PCI_DM9009_ID) { + dw32(DCR12, 0x80); /* Issue RESET signal */ + mdelay(300); /* Delay 300 ms */ + } + dw32(DCR12, 0x0); /* Clear RESET signal */ + + /* Process Phyxcer Media Mode */ + if ( !(db->media_mode & 0x10) ) /* Force 1M mode */ + dmfe_set_phyxcer(db); + + /* Media Mode Process */ + if ( !(db->media_mode & DMFE_AUTO) ) + db->op_mode = db->media_mode; /* Force Mode */ + + /* Initialize Transmit/Receive descriptor and CR3/4 */ + dmfe_descriptor_init(dev); + + /* Init CR6 to program DM910x operation */ + update_cr6(db->cr6_data, ioaddr); + + /* Send setup frame */ + if (db->chip_id == PCI_DM9132_ID) + dm9132_id_table(dev); /* DM9132 */ + else + send_filter_frame(dev); /* DM9102/DM9102A */ + + /* Init CR7, interrupt active bit */ + db->cr7_data = CR7_DEFAULT; + dw32(DCR7, db->cr7_data); + + /* Init CR15, Tx jabber and Rx watchdog timer */ + dw32(DCR15, db->cr15_data); + + /* Enable DM910X Tx/Rx function */ + db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000; + update_cr6(db->cr6_data, ioaddr); +} + + +/* + * Hardware start transmission. + * Send a packet to media from the upper layer. + */ + +static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb, + struct DEVICE *dev) +{ + struct dmfe_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; + struct tx_desc *txptr; + unsigned long flags; + + DMFE_DBUG(0, "dmfe_start_xmit", 0); + + /* Too large packet check */ + if (skb->len > MAX_PACKET_SIZE) { + pr_err("big packet = %d\n", (u16)skb->len); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* Resource flag check */ + netif_stop_queue(dev); + + spin_lock_irqsave(&db->lock, flags); + + /* No Tx resource check, it never happen nromally */ + if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) { + spin_unlock_irqrestore(&db->lock, flags); + pr_err("No Tx resource %ld\n", db->tx_queue_cnt); + return NETDEV_TX_BUSY; + } + + /* Disable NIC interrupt */ + dw32(DCR7, 0); + + /* transmit this packet */ + txptr = db->tx_insert_ptr; + skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len); + txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); + + /* Point to next transmit free descriptor */ + db->tx_insert_ptr = txptr->next_tx_desc; + + /* Transmit Packet Process */ + if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) { + txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ + db->tx_packet_cnt++; /* Ready to send */ + dw32(DCR1, 0x1); /* Issue Tx polling */ + dev->trans_start = jiffies; /* saved time stamp */ + } else { + db->tx_queue_cnt++; /* queue TX packet */ + dw32(DCR1, 0x1); /* Issue Tx polling */ + } + + /* Tx resource check */ + if ( db->tx_queue_cnt < TX_FREE_DESC_CNT ) + netif_wake_queue(dev); + + /* Restore CR7 to enable interrupt */ + spin_unlock_irqrestore(&db->lock, flags); + dw32(DCR7, db->cr7_data); + + /* free this SKB */ + dev_consume_skb_any(skb); + + return NETDEV_TX_OK; +} + + +/* + * Stop the interface. + * The interface is stopped when it is brought. + */ + +static int dmfe_stop(struct DEVICE *dev) +{ + struct dmfe_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; + + DMFE_DBUG(0, "dmfe_stop", 0); + + /* disable system */ + netif_stop_queue(dev); + + /* deleted timer */ + del_timer_sync(&db->timer); + + /* Reset & stop DM910X board */ + dw32(DCR0, DM910X_RESET); + udelay(100); + dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id); + + /* free interrupt */ + free_irq(db->pdev->irq, dev); + + /* free allocated rx buffer */ + dmfe_free_rxbuffer(db); + +#if 0 + /* show statistic counter */ + printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n", + db->tx_fifo_underrun, db->tx_excessive_collision, + db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier, + db->tx_jabber_timeout, db->reset_count, db->reset_cr8, + db->reset_fatal, db->reset_TXtimeout); +#endif + + return 0; +} + + +/* + * DM9102 insterrupt handler + * receive the packet to upper layer, free the transmitted packet + */ + +static irqreturn_t dmfe_interrupt(int irq, void *dev_id) +{ + struct DEVICE *dev = dev_id; + struct dmfe_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; + unsigned long flags; + + DMFE_DBUG(0, "dmfe_interrupt()", 0); + + spin_lock_irqsave(&db->lock, flags); + + /* Got DM910X status */ + db->cr5_data = dr32(DCR5); + dw32(DCR5, db->cr5_data); + if ( !(db->cr5_data & 0xc1) ) { + spin_unlock_irqrestore(&db->lock, flags); + return IRQ_HANDLED; + } + + /* Disable all interrupt in CR7 to solve the interrupt edge problem */ + dw32(DCR7, 0); + + /* Check system status */ + if (db->cr5_data & 0x2000) { + /* system bus error happen */ + DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data); + db->reset_fatal++; + db->wait_reset = 1; /* Need to RESET */ + spin_unlock_irqrestore(&db->lock, flags); + return IRQ_HANDLED; + } + + /* Received the coming packet */ + if ( (db->cr5_data & 0x40) && db->rx_avail_cnt ) + dmfe_rx_packet(dev, db); + + /* reallocate rx descriptor buffer */ + if (db->rx_avail_cntcr5_data & 0x01) + dmfe_free_tx_pkt(dev, db); + + /* Mode Check */ + if (db->dm910x_chk_mode & 0x2) { + db->dm910x_chk_mode = 0x4; + db->cr6_data |= 0x100; + update_cr6(db->cr6_data, ioaddr); + } + + /* Restore CR7 to enable interrupt mask */ + dw32(DCR7, db->cr7_data); + + spin_unlock_irqrestore(&db->lock, flags); + return IRQ_HANDLED; +} + + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ + +static void poll_dmfe (struct net_device *dev) +{ + struct dmfe_board_info *db = netdev_priv(dev); + const int irq = db->pdev->irq; + + /* disable_irq here is not very nice, but with the lockless + interrupt handler we have no other choice. */ + disable_irq(irq); + dmfe_interrupt (irq, dev); + enable_irq(irq); +} +#endif + +/* + * Free TX resource after TX complete + */ + +static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db) +{ + struct tx_desc *txptr; + void __iomem *ioaddr = db->ioaddr; + u32 tdes0; + + txptr = db->tx_remove_ptr; + while(db->tx_packet_cnt) { + tdes0 = le32_to_cpu(txptr->tdes0); + if (tdes0 & 0x80000000) + break; + + /* A packet sent completed */ + db->tx_packet_cnt--; + dev->stats.tx_packets++; + + /* Transmit statistic counter */ + if ( tdes0 != 0x7fffffff ) { + dev->stats.collisions += (tdes0 >> 3) & 0xf; + dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff; + if (tdes0 & TDES0_ERR_MASK) { + dev->stats.tx_errors++; + + if (tdes0 & 0x0002) { /* UnderRun */ + db->tx_fifo_underrun++; + if ( !(db->cr6_data & CR6_SFT) ) { + db->cr6_data = db->cr6_data | CR6_SFT; + update_cr6(db->cr6_data, ioaddr); + } + } + if (tdes0 & 0x0100) + db->tx_excessive_collision++; + if (tdes0 & 0x0200) + db->tx_late_collision++; + if (tdes0 & 0x0400) + db->tx_no_carrier++; + if (tdes0 & 0x0800) + db->tx_loss_carrier++; + if (tdes0 & 0x4000) + db->tx_jabber_timeout++; + } + } + + txptr = txptr->next_tx_desc; + }/* End of while */ + + /* Update TX remove pointer to next */ + db->tx_remove_ptr = txptr; + + /* Send the Tx packet in queue */ + if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) { + txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ + db->tx_packet_cnt++; /* Ready to send */ + db->tx_queue_cnt--; + dw32(DCR1, 0x1); /* Issue Tx polling */ + dev->trans_start = jiffies; /* saved time stamp */ + } + + /* Resource available check */ + if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT ) + netif_wake_queue(dev); /* Active upper layer, send again */ +} + + +/* + * Calculate the CRC valude of the Rx packet + * flag = 1 : return the reverse CRC (for the received packet CRC) + * 0 : return the normal CRC (for Hash Table index) + */ + +static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag) +{ + u32 crc = crc32(~0, Data, Len); + if (flag) crc = ~crc; + return crc; +} + + +/* + * Receive the come packet and pass to upper layer + */ + +static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) +{ + struct rx_desc *rxptr; + struct sk_buff *skb, *newskb; + int rxlen; + u32 rdes0; + + rxptr = db->rx_ready_ptr; + + while(db->rx_avail_cnt) { + rdes0 = le32_to_cpu(rxptr->rdes0); + if (rdes0 & 0x80000000) /* packet owner check */ + break; + + db->rx_avail_cnt--; + db->interval_rx_cnt++; + + pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), + RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE); + + if ( (rdes0 & 0x300) != 0x300) { + /* A packet without First/Last flag */ + /* reuse this SKB */ + DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0); + dmfe_reuse_skb(db, rxptr->rx_skb_ptr); + } else { + /* A packet with First/Last flag */ + rxlen = ( (rdes0 >> 16) & 0x3fff) - 4; + + /* error summary bit check */ + if (rdes0 & 0x8000) { + /* This is a error packet */ + dev->stats.rx_errors++; + if (rdes0 & 1) + dev->stats.rx_fifo_errors++; + if (rdes0 & 2) + dev->stats.rx_crc_errors++; + if (rdes0 & 0x80) + dev->stats.rx_length_errors++; + } + + if ( !(rdes0 & 0x8000) || + ((db->cr6_data & CR6_PM) && (rxlen>6)) ) { + skb = rxptr->rx_skb_ptr; + + /* Received Packet CRC check need or not */ + if ( (db->dm910x_chk_mode & 1) && + (cal_CRC(skb->data, rxlen, 1) != + (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */ + /* Found a error received packet */ + dmfe_reuse_skb(db, rxptr->rx_skb_ptr); + db->dm910x_chk_mode = 3; + } else { + /* Good packet, send to upper layer */ + /* Shorst packet used new SKB */ + if ((rxlen < RX_COPY_SIZE) && + ((newskb = netdev_alloc_skb(dev, rxlen + 2)) + != NULL)) { + + skb = newskb; + /* size less than COPY_SIZE, allocate a rxlen SKB */ + skb_reserve(skb, 2); /* 16byte align */ + skb_copy_from_linear_data(rxptr->rx_skb_ptr, + skb_put(skb, rxlen), + rxlen); + dmfe_reuse_skb(db, rxptr->rx_skb_ptr); + } else + skb_put(skb, rxlen); + + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += rxlen; + } + } else { + /* Reuse SKB buffer when the packet is error */ + DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0); + dmfe_reuse_skb(db, rxptr->rx_skb_ptr); + } + } + + rxptr = rxptr->next_rx_desc; + } + + db->rx_ready_ptr = rxptr; +} + +/* + * Set DM910X multicast address + */ + +static void dmfe_set_filter_mode(struct DEVICE * dev) +{ + struct dmfe_board_info *db = netdev_priv(dev); + unsigned long flags; + int mc_count = netdev_mc_count(dev); + + DMFE_DBUG(0, "dmfe_set_filter_mode()", 0); + spin_lock_irqsave(&db->lock, flags); + + if (dev->flags & IFF_PROMISC) { + DMFE_DBUG(0, "Enable PROM Mode", 0); + db->cr6_data |= CR6_PM | CR6_PBF; + update_cr6(db->cr6_data, db->ioaddr); + spin_unlock_irqrestore(&db->lock, flags); + return; + } + + if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) { + DMFE_DBUG(0, "Pass all multicast address", mc_count); + db->cr6_data &= ~(CR6_PM | CR6_PBF); + db->cr6_data |= CR6_PAM; + spin_unlock_irqrestore(&db->lock, flags); + return; + } + + DMFE_DBUG(0, "Set multicast address", mc_count); + if (db->chip_id == PCI_DM9132_ID) + dm9132_id_table(dev); /* DM9132 */ + else + send_filter_frame(dev); /* DM9102/DM9102A */ + spin_unlock_irqrestore(&db->lock, flags); +} + +/* + * Ethtool interace + */ + +static void dmfe_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct dmfe_board_info *np = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); +} + +static int dmfe_ethtool_set_wol(struct net_device *dev, + struct ethtool_wolinfo *wolinfo) +{ + struct dmfe_board_info *db = netdev_priv(dev); + + if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | + WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + db->wol_mode = wolinfo->wolopts; + return 0; +} + +static void dmfe_ethtool_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wolinfo) +{ + struct dmfe_board_info *db = netdev_priv(dev); + + wolinfo->supported = WAKE_PHY | WAKE_MAGIC; + wolinfo->wolopts = db->wol_mode; +} + + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = dmfe_ethtool_get_drvinfo, + .get_link = ethtool_op_get_link, + .set_wol = dmfe_ethtool_set_wol, + .get_wol = dmfe_ethtool_get_wol, +}; + +/* + * A periodic timer routine + * Dynamic media sense, allocate Rx buffer... + */ + +static void dmfe_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct dmfe_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; + u32 tmp_cr8; + unsigned char tmp_cr12; + unsigned long flags; + + int link_ok, link_ok_phy; + + DMFE_DBUG(0, "dmfe_timer()", 0); + spin_lock_irqsave(&db->lock, flags); + + /* Media mode process when Link OK before enter this route */ + if (db->first_in_callback == 0) { + db->first_in_callback = 1; + if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) { + db->cr6_data &= ~0x40000; + update_cr6(db->cr6_data, ioaddr); + dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); + db->cr6_data |= 0x40000; + update_cr6(db->cr6_data, ioaddr); + db->timer.expires = DMFE_TIMER_WUT + HZ * 2; + add_timer(&db->timer); + spin_unlock_irqrestore(&db->lock, flags); + return; + } + } + + + /* Operating Mode Check */ + if ( (db->dm910x_chk_mode & 0x1) && + (dev->stats.rx_packets > MAX_CHECK_PACKET) ) + db->dm910x_chk_mode = 0x4; + + /* Dynamic reset DM910X : system error or transmit time-out */ + tmp_cr8 = dr32(DCR8); + if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) { + db->reset_cr8++; + db->wait_reset = 1; + } + db->interval_rx_cnt = 0; + + /* TX polling kick monitor */ + if ( db->tx_packet_cnt && + time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) { + dw32(DCR1, 0x1); /* Tx polling again */ + + /* TX Timeout */ + if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) { + db->reset_TXtimeout++; + db->wait_reset = 1; + dev_warn(&dev->dev, "Tx timeout - resetting\n"); + } + } + + if (db->wait_reset) { + DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt); + db->reset_count++; + dmfe_dynamic_reset(dev); + db->first_in_callback = 0; + db->timer.expires = DMFE_TIMER_WUT; + add_timer(&db->timer); + spin_unlock_irqrestore(&db->lock, flags); + return; + } + + /* Link status check, Dynamic media type change */ + if (db->chip_id == PCI_DM9132_ID) + tmp_cr12 = dr8(DCR9 + 3); /* DM9132 */ + else + tmp_cr12 = dr8(DCR12); /* DM9102/DM9102A */ + + if ( ((db->chip_id == PCI_DM9102_ID) && + (db->chip_revision == 0x30)) || + ((db->chip_id == PCI_DM9132_ID) && + (db->chip_revision == 0x10)) ) { + /* DM9102A Chip */ + if (tmp_cr12 & 2) + link_ok = 0; + else + link_ok = 1; + } + else + /*0x43 is used instead of 0x3 because bit 6 should represent + link status of external PHY */ + link_ok = (tmp_cr12 & 0x43) ? 1 : 0; + + + /* If chip reports that link is failed it could be because external + PHY link status pin is not connected correctly to chip + To be sure ask PHY too. + */ + + /* need a dummy read because of PHY's register latch*/ + dmfe_phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id); + link_ok_phy = (dmfe_phy_read (db->ioaddr, + db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0; + + if (link_ok_phy != link_ok) { + DMFE_DBUG (0, "PHY and chip report different link status", 0); + link_ok = link_ok | link_ok_phy; + } + + if ( !link_ok && netif_carrier_ok(dev)) { + /* Link Failed */ + DMFE_DBUG(0, "Link Failed", tmp_cr12); + netif_carrier_off(dev); + + /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ + /* AUTO or force 1M Homerun/Longrun don't need */ + if ( !(db->media_mode & 0x38) ) + dmfe_phy_write(db->ioaddr, db->phy_addr, + 0, 0x1000, db->chip_id); + + /* AUTO mode, if INT phyxcer link failed, select EXT device */ + if (db->media_mode & DMFE_AUTO) { + /* 10/100M link failed, used 1M Home-Net */ + db->cr6_data|=0x00040000; /* bit18=1, MII */ + db->cr6_data&=~0x00000200; /* bit9=0, HD mode */ + update_cr6(db->cr6_data, ioaddr); + } + } else if (!netif_carrier_ok(dev)) { + + DMFE_DBUG(0, "Link link OK", tmp_cr12); + + /* Auto Sense Speed */ + if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) { + netif_carrier_on(dev); + SHOW_MEDIA_TYPE(db->op_mode); + } + + dmfe_process_mode(db); + } + + /* HPNA remote command check */ + if (db->HPNA_command & 0xf00) { + db->HPNA_timer--; + if (!db->HPNA_timer) + dmfe_HPNA_remote_cmd_chk(db); + } + + /* Timer active again */ + db->timer.expires = DMFE_TIMER_WUT; + add_timer(&db->timer); + spin_unlock_irqrestore(&db->lock, flags); +} + + +/* + * Dynamic reset the DM910X board + * Stop DM910X board + * Free Tx/Rx allocated memory + * Reset DM910X board + * Re-initialize DM910X board + */ + +static void dmfe_dynamic_reset(struct net_device *dev) +{ + struct dmfe_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; + + DMFE_DBUG(0, "dmfe_dynamic_reset()", 0); + + /* Sopt MAC controller */ + db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */ + update_cr6(db->cr6_data, ioaddr); + dw32(DCR7, 0); /* Disable Interrupt */ + dw32(DCR5, dr32(DCR5)); + + /* Disable upper layer interface */ + netif_stop_queue(dev); + + /* Free Rx Allocate buffer */ + dmfe_free_rxbuffer(db); + + /* system variable init */ + db->tx_packet_cnt = 0; + db->tx_queue_cnt = 0; + db->rx_avail_cnt = 0; + netif_carrier_off(dev); + db->wait_reset = 0; + + /* Re-initialize DM910X board */ + dmfe_init_dm910x(dev); + + /* Restart upper layer interface */ + netif_wake_queue(dev); +} + + +/* + * free all allocated rx buffer + */ + +static void dmfe_free_rxbuffer(struct dmfe_board_info * db) +{ + DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0); + + /* free allocated rx buffer */ + while (db->rx_avail_cnt) { + dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr); + db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc; + db->rx_avail_cnt--; + } +} + + +/* + * Reuse the SK buffer + */ + +static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb) +{ + struct rx_desc *rxptr = db->rx_insert_ptr; + + if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { + rxptr->rx_skb_ptr = skb; + rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, + skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); + wmb(); + rxptr->rdes0 = cpu_to_le32(0x80000000); + db->rx_avail_cnt++; + db->rx_insert_ptr = rxptr->next_rx_desc; + } else + DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt); +} + + +/* + * Initialize transmit/Receive descriptor + * Using Chain structure, and allocate Tx/Rx buffer + */ + +static void dmfe_descriptor_init(struct net_device *dev) +{ + struct dmfe_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; + struct tx_desc *tmp_tx; + struct rx_desc *tmp_rx; + unsigned char *tmp_buf; + dma_addr_t tmp_tx_dma, tmp_rx_dma; + dma_addr_t tmp_buf_dma; + int i; + + DMFE_DBUG(0, "dmfe_descriptor_init()", 0); + + /* tx descriptor start pointer */ + db->tx_insert_ptr = db->first_tx_desc; + db->tx_remove_ptr = db->first_tx_desc; + dw32(DCR4, db->first_tx_desc_dma); /* TX DESC address */ + + /* rx descriptor start pointer */ + db->first_rx_desc = (void *)db->first_tx_desc + + sizeof(struct tx_desc) * TX_DESC_CNT; + + db->first_rx_desc_dma = db->first_tx_desc_dma + + sizeof(struct tx_desc) * TX_DESC_CNT; + db->rx_insert_ptr = db->first_rx_desc; + db->rx_ready_ptr = db->first_rx_desc; + dw32(DCR3, db->first_rx_desc_dma); /* RX DESC address */ + + /* Init Transmit chain */ + tmp_buf = db->buf_pool_start; + tmp_buf_dma = db->buf_pool_dma_start; + tmp_tx_dma = db->first_tx_desc_dma; + for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) { + tmp_tx->tx_buf_ptr = tmp_buf; + tmp_tx->tdes0 = cpu_to_le32(0); + tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */ + tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma); + tmp_tx_dma += sizeof(struct tx_desc); + tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma); + tmp_tx->next_tx_desc = tmp_tx + 1; + tmp_buf = tmp_buf + TX_BUF_ALLOC; + tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC; + } + (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma); + tmp_tx->next_tx_desc = db->first_tx_desc; + + /* Init Receive descriptor chain */ + tmp_rx_dma=db->first_rx_desc_dma; + for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) { + tmp_rx->rdes0 = cpu_to_le32(0); + tmp_rx->rdes1 = cpu_to_le32(0x01000600); + tmp_rx_dma += sizeof(struct rx_desc); + tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma); + tmp_rx->next_rx_desc = tmp_rx + 1; + } + (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma); + tmp_rx->next_rx_desc = db->first_rx_desc; + + /* pre-allocate Rx buffer */ + allocate_rx_buffer(dev); +} + + +/* + * Update CR6 value + * Firstly stop DM910X , then written value and start + */ + +static void update_cr6(u32 cr6_data, void __iomem *ioaddr) +{ + u32 cr6_tmp; + + cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */ + dw32(DCR6, cr6_tmp); + udelay(5); + dw32(DCR6, cr6_data); + udelay(5); +} + + +/* + * Send a setup frame for DM9132 + * This setup frame initialize DM910X address filter mode +*/ + +static void dm9132_id_table(struct net_device *dev) +{ + struct dmfe_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr + 0xc0; + u16 *addrptr = (u16 *)dev->dev_addr; + struct netdev_hw_addr *ha; + u16 i, hash_table[4]; + + /* Node address */ + for (i = 0; i < 3; i++) { + dw16(0, addrptr[i]); + ioaddr += 4; + } + + /* Clear Hash Table */ + memset(hash_table, 0, sizeof(hash_table)); + + /* broadcast address */ + hash_table[3] = 0x8000; + + /* the multicast address in Hash Table : 64 bits */ + netdev_for_each_mc_addr(ha, dev) { + u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f; + + hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); + } + + /* Write the hash table to MAC MD table */ + for (i = 0; i < 4; i++, ioaddr += 4) + dw16(0, hash_table[i]); +} + + +/* + * Send a setup frame for DM9102/DM9102A + * This setup frame initialize DM910X address filter mode + */ + +static void send_filter_frame(struct net_device *dev) +{ + struct dmfe_board_info *db = netdev_priv(dev); + struct netdev_hw_addr *ha; + struct tx_desc *txptr; + u16 * addrptr; + u32 * suptr; + int i; + + DMFE_DBUG(0, "send_filter_frame()", 0); + + txptr = db->tx_insert_ptr; + suptr = (u32 *) txptr->tx_buf_ptr; + + /* Node address */ + addrptr = (u16 *) dev->dev_addr; + *suptr++ = addrptr[0]; + *suptr++ = addrptr[1]; + *suptr++ = addrptr[2]; + + /* broadcast address */ + *suptr++ = 0xffff; + *suptr++ = 0xffff; + *suptr++ = 0xffff; + + /* fit the multicast address */ + netdev_for_each_mc_addr(ha, dev) { + addrptr = (u16 *) ha->addr; + *suptr++ = addrptr[0]; + *suptr++ = addrptr[1]; + *suptr++ = addrptr[2]; + } + + for (i = netdev_mc_count(dev); i < 14; i++) { + *suptr++ = 0xffff; + *suptr++ = 0xffff; + *suptr++ = 0xffff; + } + + /* prepare the setup frame */ + db->tx_insert_ptr = txptr->next_tx_desc; + txptr->tdes1 = cpu_to_le32(0x890000c0); + + /* Resource Check and Send the setup packet */ + if (!db->tx_packet_cnt) { + void __iomem *ioaddr = db->ioaddr; + + /* Resource Empty */ + db->tx_packet_cnt++; + txptr->tdes0 = cpu_to_le32(0x80000000); + update_cr6(db->cr6_data | 0x2000, ioaddr); + dw32(DCR1, 0x1); /* Issue Tx polling */ + update_cr6(db->cr6_data, ioaddr); + dev->trans_start = jiffies; + } else + db->tx_queue_cnt++; /* Put in TX queue */ +} + + +/* + * Allocate rx buffer, + * As possible as allocate maxiumn Rx buffer + */ + +static void allocate_rx_buffer(struct net_device *dev) +{ + struct dmfe_board_info *db = netdev_priv(dev); + struct rx_desc *rxptr; + struct sk_buff *skb; + + rxptr = db->rx_insert_ptr; + + while(db->rx_avail_cnt < RX_DESC_CNT) { + if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL ) + break; + rxptr->rx_skb_ptr = skb; /* FIXME (?) */ + rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, + RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); + wmb(); + rxptr->rdes0 = cpu_to_le32(0x80000000); + rxptr = rxptr->next_rx_desc; + db->rx_avail_cnt++; + } + + db->rx_insert_ptr = rxptr; +} + +static void srom_clk_write(void __iomem *ioaddr, u32 data) +{ + static const u32 cmd[] = { + CR9_SROM_READ | CR9_SRCS, + CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, + CR9_SROM_READ | CR9_SRCS + }; + int i; + + for (i = 0; i < ARRAY_SIZE(cmd); i++) { + dw32(DCR9, data | cmd[i]); + udelay(5); + } +} + +/* + * Read one word data from the serial ROM + */ +static u16 read_srom_word(void __iomem *ioaddr, int offset) +{ + u16 srom_data; + int i; + + dw32(DCR9, CR9_SROM_READ); + udelay(5); + dw32(DCR9, CR9_SROM_READ | CR9_SRCS); + udelay(5); + + /* Send the Read Command 110b */ + srom_clk_write(ioaddr, SROM_DATA_1); + srom_clk_write(ioaddr, SROM_DATA_1); + srom_clk_write(ioaddr, SROM_DATA_0); + + /* Send the offset */ + for (i = 5; i >= 0; i--) { + srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0; + srom_clk_write(ioaddr, srom_data); + } + + dw32(DCR9, CR9_SROM_READ | CR9_SRCS); + udelay(5); + + for (i = 16; i > 0; i--) { + dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK); + udelay(5); + srom_data = (srom_data << 1) | + ((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0); + dw32(DCR9, CR9_SROM_READ | CR9_SRCS); + udelay(5); + } + + dw32(DCR9, CR9_SROM_READ); + udelay(5); + return srom_data; +} + + +/* + * Auto sense the media mode + */ + +static u8 dmfe_sense_speed(struct dmfe_board_info *db) +{ + void __iomem *ioaddr = db->ioaddr; + u8 ErrFlag = 0; + u16 phy_mode; + + /* CR6 bit18=0, select 10/100M */ + update_cr6(db->cr6_data & ~0x40000, ioaddr); + + phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); + phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); + + if ( (phy_mode & 0x24) == 0x24 ) { + if (db->chip_id == PCI_DM9132_ID) /* DM9132 */ + phy_mode = dmfe_phy_read(db->ioaddr, + db->phy_addr, 7, db->chip_id) & 0xf000; + else /* DM9102/DM9102A */ + phy_mode = dmfe_phy_read(db->ioaddr, + db->phy_addr, 17, db->chip_id) & 0xf000; + switch (phy_mode) { + case 0x1000: db->op_mode = DMFE_10MHF; break; + case 0x2000: db->op_mode = DMFE_10MFD; break; + case 0x4000: db->op_mode = DMFE_100MHF; break; + case 0x8000: db->op_mode = DMFE_100MFD; break; + default: db->op_mode = DMFE_10MHF; + ErrFlag = 1; + break; + } + } else { + db->op_mode = DMFE_10MHF; + DMFE_DBUG(0, "Link Failed :", phy_mode); + ErrFlag = 1; + } + + return ErrFlag; +} + + +/* + * Set 10/100 phyxcer capability + * AUTO mode : phyxcer register4 is NIC capability + * Force mode: phyxcer register4 is the force media + */ + +static void dmfe_set_phyxcer(struct dmfe_board_info *db) +{ + void __iomem *ioaddr = db->ioaddr; + u16 phy_reg; + + /* Select 10/100M phyxcer */ + db->cr6_data &= ~0x40000; + update_cr6(db->cr6_data, ioaddr); + + /* DM9009 Chip: Phyxcer reg18 bit12=0 */ + if (db->chip_id == PCI_DM9009_ID) { + phy_reg = dmfe_phy_read(db->ioaddr, + db->phy_addr, 18, db->chip_id) & ~0x1000; + + dmfe_phy_write(db->ioaddr, + db->phy_addr, 18, phy_reg, db->chip_id); + } + + /* Phyxcer capability setting */ + phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0; + + if (db->media_mode & DMFE_AUTO) { + /* AUTO Mode */ + phy_reg |= db->PHY_reg4; + } else { + /* Force Mode */ + switch(db->media_mode) { + case DMFE_10MHF: phy_reg |= 0x20; break; + case DMFE_10MFD: phy_reg |= 0x40; break; + case DMFE_100MHF: phy_reg |= 0x80; break; + case DMFE_100MFD: phy_reg |= 0x100; break; + } + if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61; + } + + /* Write new capability to Phyxcer Reg4 */ + if ( !(phy_reg & 0x01e0)) { + phy_reg|=db->PHY_reg4; + db->media_mode|=DMFE_AUTO; + } + dmfe_phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id); + + /* Restart Auto-Negotiation */ + if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) ) + dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id); + if ( !db->chip_type ) + dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id); +} + + +/* + * Process op-mode + * AUTO mode : PHY controller in Auto-negotiation Mode + * Force mode: PHY controller in force mode with HUB + * N-way force capability with SWITCH + */ + +static void dmfe_process_mode(struct dmfe_board_info *db) +{ + u16 phy_reg; + + /* Full Duplex Mode Check */ + if (db->op_mode & 0x4) + db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */ + else + db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */ + + /* Transciver Selection */ + if (db->op_mode & 0x10) /* 1M HomePNA */ + db->cr6_data |= 0x40000;/* External MII select */ + else + db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */ + + update_cr6(db->cr6_data, db->ioaddr); + + /* 10/100M phyxcer force mode need */ + if ( !(db->media_mode & 0x18)) { + /* Forece Mode */ + phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id); + if ( !(phy_reg & 0x1) ) { + /* parter without N-Way capability */ + phy_reg = 0x0; + switch(db->op_mode) { + case DMFE_10MHF: phy_reg = 0x0; break; + case DMFE_10MFD: phy_reg = 0x100; break; + case DMFE_100MHF: phy_reg = 0x2000; break; + case DMFE_100MFD: phy_reg = 0x2100; break; + } + dmfe_phy_write(db->ioaddr, + db->phy_addr, 0, phy_reg, db->chip_id); + if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) ) + mdelay(20); + dmfe_phy_write(db->ioaddr, + db->phy_addr, 0, phy_reg, db->chip_id); + } + } +} + + +/* + * Write a word to Phy register + */ + +static void dmfe_phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset, + u16 phy_data, u32 chip_id) +{ + u16 i; + + if (chip_id == PCI_DM9132_ID) { + dw16(0x80 + offset * 4, phy_data); + } else { + /* DM9102/DM9102A Chip */ + + /* Send 33 synchronization clock to Phy controller */ + for (i = 0; i < 35; i++) + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); + + /* Send start command(01) to Phy */ + dmfe_phy_write_1bit(ioaddr, PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); + + /* Send write command(01) to Phy */ + dmfe_phy_write_1bit(ioaddr, PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); + + /* Send Phy address */ + for (i = 0x10; i > 0; i = i >> 1) + dmfe_phy_write_1bit(ioaddr, + phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); + + /* Send register address */ + for (i = 0x10; i > 0; i = i >> 1) + dmfe_phy_write_1bit(ioaddr, + offset & i ? PHY_DATA_1 : PHY_DATA_0); + + /* written trasnition */ + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_0); + + /* Write a word data to PHY controller */ + for ( i = 0x8000; i > 0; i >>= 1) + dmfe_phy_write_1bit(ioaddr, + phy_data & i ? PHY_DATA_1 : PHY_DATA_0); + } +} + + +/* + * Read a word data from phy register + */ + +static u16 dmfe_phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id) +{ + int i; + u16 phy_data; + + if (chip_id == PCI_DM9132_ID) { + /* DM9132 Chip */ + phy_data = dr16(0x80 + offset * 4); + } else { + /* DM9102/DM9102A Chip */ + + /* Send 33 synchronization clock to Phy controller */ + for (i = 0; i < 35; i++) + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); + + /* Send start command(01) to Phy */ + dmfe_phy_write_1bit(ioaddr, PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); + + /* Send read command(10) to Phy */ + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_0); + + /* Send Phy address */ + for (i = 0x10; i > 0; i = i >> 1) + dmfe_phy_write_1bit(ioaddr, + phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); + + /* Send register address */ + for (i = 0x10; i > 0; i = i >> 1) + dmfe_phy_write_1bit(ioaddr, + offset & i ? PHY_DATA_1 : PHY_DATA_0); + + /* Skip transition state */ + dmfe_phy_read_1bit(ioaddr); + + /* read 16bit data */ + for (phy_data = 0, i = 0; i < 16; i++) { + phy_data <<= 1; + phy_data |= dmfe_phy_read_1bit(ioaddr); + } + } + + return phy_data; +} + + +/* + * Write one bit data to Phy Controller + */ + +static void dmfe_phy_write_1bit(void __iomem *ioaddr, u32 phy_data) +{ + dw32(DCR9, phy_data); /* MII Clock Low */ + udelay(1); + dw32(DCR9, phy_data | MDCLKH); /* MII Clock High */ + udelay(1); + dw32(DCR9, phy_data); /* MII Clock Low */ + udelay(1); +} + + +/* + * Read one bit phy data from PHY controller + */ + +static u16 dmfe_phy_read_1bit(void __iomem *ioaddr) +{ + u16 phy_data; + + dw32(DCR9, 0x50000); + udelay(1); + phy_data = (dr32(DCR9) >> 19) & 0x1; + dw32(DCR9, 0x40000); + udelay(1); + + return phy_data; +} + + +/* + * Parser SROM and media mode + */ + +static void dmfe_parse_srom(struct dmfe_board_info * db) +{ + char * srom = db->srom; + int dmfe_mode, tmp_reg; + + DMFE_DBUG(0, "dmfe_parse_srom() ", 0); + + /* Init CR15 */ + db->cr15_data = CR15_DEFAULT; + + /* Check SROM Version */ + if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) { + /* SROM V4.01 */ + /* Get NIC support media mode */ + db->NIC_capability = le16_to_cpup((__le16 *) (srom + 34)); + db->PHY_reg4 = 0; + for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) { + switch( db->NIC_capability & tmp_reg ) { + case 0x1: db->PHY_reg4 |= 0x0020; break; + case 0x2: db->PHY_reg4 |= 0x0040; break; + case 0x4: db->PHY_reg4 |= 0x0080; break; + case 0x8: db->PHY_reg4 |= 0x0100; break; + } + } + + /* Media Mode Force or not check */ + dmfe_mode = (le32_to_cpup((__le32 *) (srom + 34)) & + le32_to_cpup((__le32 *) (srom + 36))); + switch(dmfe_mode) { + case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */ + case 0x2: dmfe_media_mode = DMFE_10MFD; break; /* 10MFD */ + case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */ + case 0x100: + case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */ + } + + /* Special Function setting */ + /* VLAN function */ + if ( (SF_mode & 0x1) || (srom[43] & 0x80) ) + db->cr15_data |= 0x40; + + /* Flow Control */ + if ( (SF_mode & 0x2) || (srom[40] & 0x1) ) + db->cr15_data |= 0x400; + + /* TX pause packet */ + if ( (SF_mode & 0x4) || (srom[40] & 0xe) ) + db->cr15_data |= 0x9800; + } + + /* Parse HPNA parameter */ + db->HPNA_command = 1; + + /* Accept remote command or not */ + if (HPNA_rx_cmd == 0) + db->HPNA_command |= 0x8000; + + /* Issue remote command & operation mode */ + if (HPNA_tx_cmd == 1) + switch(HPNA_mode) { /* Issue Remote Command */ + case 0: db->HPNA_command |= 0x0904; break; + case 1: db->HPNA_command |= 0x0a00; break; + case 2: db->HPNA_command |= 0x0506; break; + case 3: db->HPNA_command |= 0x0602; break; + } + else + switch(HPNA_mode) { /* Don't Issue */ + case 0: db->HPNA_command |= 0x0004; break; + case 1: db->HPNA_command |= 0x0000; break; + case 2: db->HPNA_command |= 0x0006; break; + case 3: db->HPNA_command |= 0x0002; break; + } + + /* Check DM9801 or DM9802 present or not */ + db->HPNA_present = 0; + update_cr6(db->cr6_data | 0x40000, db->ioaddr); + tmp_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id); + if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) { + /* DM9801 or DM9802 present */ + db->HPNA_timer = 8; + if ( dmfe_phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) { + /* DM9801 HomeRun */ + db->HPNA_present = 1; + dmfe_program_DM9801(db, tmp_reg); + } else { + /* DM9802 LongRun */ + db->HPNA_present = 2; + dmfe_program_DM9802(db); + } + } + +} + + +/* + * Init HomeRun DM9801 + */ + +static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev) +{ + uint reg17, reg25; + + if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR; + switch(HPNA_rev) { + case 0xb900: /* DM9801 E3 */ + db->HPNA_command |= 0x1000; + reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id); + reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000; + reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); + break; + case 0xb901: /* DM9801 E4 */ + reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); + reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor; + reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); + reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3; + break; + case 0xb902: /* DM9801 E5 */ + case 0xb903: /* DM9801 E6 */ + default: + db->HPNA_command |= 0x1000; + reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); + reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5; + reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); + reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor; + break; + } + dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id); +} + + +/* + * Init HomeRun DM9802 + */ + +static void dmfe_program_DM9802(struct dmfe_board_info * db) +{ + uint phy_reg; + + if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR; + dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id); + phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); + phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor; + dmfe_phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id); +} + + +/* + * Check remote HPNA power and speed status. If not correct, + * issue command again. +*/ + +static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db) +{ + uint phy_reg; + + /* Got remote device status */ + phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60; + switch(phy_reg) { + case 0x00: phy_reg = 0x0a00;break; /* LP/LS */ + case 0x20: phy_reg = 0x0900;break; /* LP/HS */ + case 0x40: phy_reg = 0x0600;break; /* HP/LS */ + case 0x60: phy_reg = 0x0500;break; /* HP/HS */ + } + + /* Check remote device status match our setting ot not */ + if ( phy_reg != (db->HPNA_command & 0x0f00) ) { + dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, + db->chip_id); + db->HPNA_timer=8; + } else + db->HPNA_timer=600; /* Match, every 10 minutes, check */ +} + + + +static const struct pci_device_id dmfe_pci_tbl[] = { + { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID }, + { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID }, + { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID }, + { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl); + + +#ifdef CONFIG_PM +static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pci_dev); + struct dmfe_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; + u32 tmp; + + /* Disable upper layer interface */ + netif_device_detach(dev); + + /* Disable Tx/Rx */ + db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); + update_cr6(db->cr6_data, ioaddr); + + /* Disable Interrupt */ + dw32(DCR7, 0); + dw32(DCR5, dr32(DCR5)); + + /* Fre RX buffers */ + dmfe_free_rxbuffer(db); + + /* Enable WOL */ + pci_read_config_dword(pci_dev, 0x40, &tmp); + tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET); + + if (db->wol_mode & WAKE_PHY) + tmp |= DMFE_WOL_LINKCHANGE; + if (db->wol_mode & WAKE_MAGIC) + tmp |= DMFE_WOL_MAGICPACKET; + + pci_write_config_dword(pci_dev, 0x40, tmp); + + pci_enable_wake(pci_dev, PCI_D3hot, 1); + pci_enable_wake(pci_dev, PCI_D3cold, 1); + + /* Power down device*/ + pci_save_state(pci_dev); + pci_set_power_state(pci_dev, pci_choose_state (pci_dev, state)); + + return 0; +} + +static int dmfe_resume(struct pci_dev *pci_dev) +{ + struct net_device *dev = pci_get_drvdata(pci_dev); + u32 tmp; + + pci_set_power_state(pci_dev, PCI_D0); + pci_restore_state(pci_dev); + + /* Re-initialize DM910X board */ + dmfe_init_dm910x(dev); + + /* Disable WOL */ + pci_read_config_dword(pci_dev, 0x40, &tmp); + + tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET); + pci_write_config_dword(pci_dev, 0x40, tmp); + + pci_enable_wake(pci_dev, PCI_D3hot, 0); + pci_enable_wake(pci_dev, PCI_D3cold, 0); + + /* Restart upper layer interface */ + netif_device_attach(dev); + + return 0; +} +#else +#define dmfe_suspend NULL +#define dmfe_resume NULL +#endif + +static struct pci_driver dmfe_driver = { + .name = "dmfe", + .id_table = dmfe_pci_tbl, + .probe = dmfe_init_one, + .remove = dmfe_remove_one, + .suspend = dmfe_suspend, + .resume = dmfe_resume +}; + +MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw"); +MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +module_param(debug, int, 0); +module_param(mode, byte, 0); +module_param(cr6set, int, 0); +module_param(chkmode, byte, 0); +module_param(HPNA_mode, byte, 0); +module_param(HPNA_rx_cmd, byte, 0); +module_param(HPNA_tx_cmd, byte, 0); +module_param(HPNA_NoiseFloor, byte, 0); +module_param(SF_mode, byte, 0); +MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)"); +MODULE_PARM_DESC(mode, "Davicom DM9xxx: " + "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA"); + +MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function " + "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)"); + +/* Description: + * when user used insmod to add module, system invoked init_module() + * to initialize and register. + */ + +static int __init dmfe_init_module(void) +{ + int rc; + + pr_info("%s\n", version); + printed_version = 1; + + DMFE_DBUG(0, "init_module() ", debug); + + if (debug) + dmfe_debug = debug; /* set debug flag */ + if (cr6set) + dmfe_cr6_user_set = cr6set; + + switch(mode) { + case DMFE_10MHF: + case DMFE_100MHF: + case DMFE_10MFD: + case DMFE_100MFD: + case DMFE_1M_HPNA: + dmfe_media_mode = mode; + break; + default:dmfe_media_mode = DMFE_AUTO; + break; + } + + if (HPNA_mode > 4) + HPNA_mode = 0; /* Default: LP/HS */ + if (HPNA_rx_cmd > 1) + HPNA_rx_cmd = 0; /* Default: Ignored remote cmd */ + if (HPNA_tx_cmd > 1) + HPNA_tx_cmd = 0; /* Default: Don't issue remote cmd */ + if (HPNA_NoiseFloor > 15) + HPNA_NoiseFloor = 0; + + rc = pci_register_driver(&dmfe_driver); + if (rc < 0) + return rc; + + return 0; +} + + +/* + * Description: + * when user used rmmod to delete module, system invoked clean_module() + * to un-register all registered services. + */ + +static void __exit dmfe_cleanup_module(void) +{ + DMFE_DBUG(0, "dmfe_cleanup_module() ", debug); + pci_unregister_driver(&dmfe_driver); +} + +module_init(dmfe_init_module); +module_exit(dmfe_cleanup_module); diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c new file mode 100644 index 000000000..1812f4916 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/eeprom.c @@ -0,0 +1,382 @@ +/* + drivers/net/ethernet/dec/tulip/eeprom.c + + Copyright 2000,2001 The Linux Kernel Team + Written/copyright 1994-2001 by Donald Becker. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + Please submit bug reports to http://bugzilla.kernel.org/. +*/ + +#include +#include +#include "tulip.h" +#include + + + +/* Serial EEPROM section. */ +/* The main routine to parse the very complicated SROM structure. + Search www.digital.com for "21X4 SROM" to get details. + This code is very complex, and will require changes to support + additional cards, so I'll be verbose about what is going on. + */ + +/* Known cards that have old-style EEPROMs. */ +static struct eeprom_fixup eeprom_fixups[] = { + {"Asante", 0, 0, 0x94, {0x1e00, 0x0000, 0x0800, 0x0100, 0x018c, + 0x0000, 0x0000, 0xe078, 0x0001, 0x0050, 0x0018 }}, + {"SMC9332DST", 0, 0, 0xC0, { 0x1e00, 0x0000, 0x0800, 0x041f, + 0x0000, 0x009E, /* 10baseT */ + 0x0004, 0x009E, /* 10baseT-FD */ + 0x0903, 0x006D, /* 100baseTx */ + 0x0905, 0x006D, /* 100baseTx-FD */ }}, + {"Cogent EM100", 0, 0, 0x92, { 0x1e00, 0x0000, 0x0800, 0x063f, + 0x0107, 0x8021, /* 100baseFx */ + 0x0108, 0x8021, /* 100baseFx-FD */ + 0x0100, 0x009E, /* 10baseT */ + 0x0104, 0x009E, /* 10baseT-FD */ + 0x0103, 0x006D, /* 100baseTx */ + 0x0105, 0x006D, /* 100baseTx-FD */ }}, + {"Maxtech NX-110", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x0513, + 0x1001, 0x009E, /* 10base2, CSR12 0x10*/ + 0x0000, 0x009E, /* 10baseT */ + 0x0004, 0x009E, /* 10baseT-FD */ + 0x0303, 0x006D, /* 100baseTx, CSR12 0x03 */ + 0x0305, 0x006D, /* 100baseTx-FD CSR12 0x03 */}}, + {"Accton EN1207", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x051F, + 0x1B01, 0x0000, /* 10base2, CSR12 0x1B */ + 0x0B00, 0x009E, /* 10baseT, CSR12 0x0B */ + 0x0B04, 0x009E, /* 10baseT-FD,CSR12 0x0B */ + 0x1B03, 0x006D, /* 100baseTx, CSR12 0x1B */ + 0x1B05, 0x006D, /* 100baseTx-FD CSR12 0x1B */ + }}, + {"NetWinder", 0x00, 0x10, 0x57, + /* Default media = MII + * MII block, reset sequence (3) = 0x0821 0x0000 0x0001, capabilities 0x01e1 + */ + { 0x1e00, 0x0000, 0x000b, 0x8f01, 0x0103, 0x0300, 0x0821, 0x000, 0x0001, 0x0000, 0x01e1 } + }, + {"Cobalt Microserver", 0, 0x10, 0xE0, {0x1e00, /* 0 == controller #, 1e == offset */ + 0x0000, /* 0 == high offset, 0 == gap */ + 0x0800, /* Default Autoselect */ + 0x8001, /* 1 leaf, extended type, bogus len */ + 0x0003, /* Type 3 (MII), PHY #0 */ + 0x0400, /* 0 init instr, 4 reset instr */ + 0x0801, /* Set control mode, GP0 output */ + 0x0000, /* Drive GP0 Low (RST is active low) */ + 0x0800, /* control mode, GP0 input (undriven) */ + 0x0000, /* clear control mode */ + 0x7800, /* 100TX FDX + HDX, 10bT FDX + HDX */ + 0x01e0, /* Advertise all above */ + 0x5000, /* FDX all above */ + 0x1800, /* Set fast TTM in 100bt modes */ + 0x0000, /* PHY cannot be unplugged */ + }}, + {NULL}}; + + +static const char *const block_name[] = { + "21140 non-MII", + "21140 MII PHY", + "21142 Serial PHY", + "21142 MII PHY", + "21143 SYM PHY", + "21143 reset method" +}; + + +/** + * tulip_build_fake_mediatable - Build a fake mediatable entry. + * @tp: Ptr to the tulip private data. + * + * Some cards like the 3x5 HSC cards (J3514A) do not have a standard + * srom and can not be handled under the fixup routine. These cards + * still need a valid mediatable entry for correct csr12 setup and + * mii handling. + * + * Since this is currently a parisc-linux specific function, the + * #ifdef __hppa__ should completely optimize this function away for + * non-parisc hardware. + */ +static void tulip_build_fake_mediatable(struct tulip_private *tp) +{ +#ifdef CONFIG_GSC + if (tp->flags & NEEDS_FAKE_MEDIA_TABLE) { + static unsigned char leafdata[] = + { 0x01, /* phy number */ + 0x02, /* gpr setup sequence length */ + 0x02, 0x00, /* gpr setup sequence */ + 0x02, /* phy reset sequence length */ + 0x01, 0x00, /* phy reset sequence */ + 0x00, 0x78, /* media capabilities */ + 0x00, 0xe0, /* nway advertisement */ + 0x00, 0x05, /* fdx bit map */ + 0x00, 0x06 /* ttm bit map */ + }; + + tp->mtable = kmalloc(sizeof(struct mediatable) + + sizeof(struct medialeaf), GFP_KERNEL); + + if (tp->mtable == NULL) + return; /* Horrible, impossible failure. */ + + tp->mtable->defaultmedia = 0x800; + tp->mtable->leafcount = 1; + tp->mtable->csr12dir = 0x3f; /* inputs on bit7 for hsc-pci, bit6 for pci-fx */ + tp->mtable->has_nonmii = 0; + tp->mtable->has_reset = 0; + tp->mtable->has_mii = 1; + tp->mtable->csr15dir = tp->mtable->csr15val = 0; + tp->mtable->mleaf[0].type = 1; + tp->mtable->mleaf[0].media = 11; + tp->mtable->mleaf[0].leafdata = &leafdata[0]; + tp->flags |= HAS_PHY_IRQ; + tp->csr12_shadow = -1; + } +#endif +} + +void tulip_parse_eeprom(struct net_device *dev) +{ + /* + dev is not registered at this point, so logging messages can't + use dev_ or netdev_ but dev->name is good via a + hack in the caller + */ + + /* The last media info list parsed, for multiport boards. */ + static struct mediatable *last_mediatable; + static unsigned char *last_ee_data; + static int controller_index; + struct tulip_private *tp = netdev_priv(dev); + unsigned char *ee_data = tp->eeprom; + int i; + + tp->mtable = NULL; + /* Detect an old-style (SA only) EEPROM layout: + memcmp(eedata, eedata+16, 8). */ + for (i = 0; i < 8; i ++) + if (ee_data[i] != ee_data[16+i]) + break; + if (i >= 8) { + if (ee_data[0] == 0xff) { + if (last_mediatable) { + controller_index++; + pr_info("%s: Controller %d of multiport board\n", + dev->name, controller_index); + tp->mtable = last_mediatable; + ee_data = last_ee_data; + goto subsequent_board; + } else + pr_info("%s: Missing EEPROM, this interface may not work correctly!\n", + dev->name); + return; + } + /* Do a fix-up based on the vendor half of the station address prefix. */ + for (i = 0; eeprom_fixups[i].name; i++) { + if (dev->dev_addr[0] == eeprom_fixups[i].addr0 && + dev->dev_addr[1] == eeprom_fixups[i].addr1 && + dev->dev_addr[2] == eeprom_fixups[i].addr2) { + if (dev->dev_addr[2] == 0xE8 && ee_data[0x1a] == 0x55) + i++; /* An Accton EN1207, not an outlaw Maxtech. */ + memcpy(ee_data + 26, eeprom_fixups[i].newtable, + sizeof(eeprom_fixups[i].newtable)); + pr_info("%s: Old format EEPROM on '%s' board. Using substitute media control info\n", + dev->name, eeprom_fixups[i].name); + break; + } + } + if (eeprom_fixups[i].name == NULL) { /* No fixup found. */ + pr_info("%s: Old style EEPROM with no media selection information\n", + dev->name); + return; + } + } + + controller_index = 0; + if (ee_data[19] > 1) { /* Multiport board. */ + last_ee_data = ee_data; + } +subsequent_board: + + if (ee_data[27] == 0) { /* No valid media table. */ + tulip_build_fake_mediatable(tp); + } else { + unsigned char *p = (void *)ee_data + ee_data[27]; + unsigned char csr12dir = 0; + int count, new_advertise = 0; + struct mediatable *mtable; + u16 media = get_u16(p); + + p += 2; + if (tp->flags & CSR12_IN_SROM) + csr12dir = *p++; + count = *p++; + + /* there is no phy information, don't even try to build mtable */ + if (count == 0) { + if (tulip_debug > 0) + pr_warn("%s: no phy info, aborting mtable build\n", + dev->name); + return; + } + + mtable = kmalloc(sizeof(struct mediatable) + + count * sizeof(struct medialeaf), + GFP_KERNEL); + if (mtable == NULL) + return; /* Horrible, impossible failure. */ + last_mediatable = tp->mtable = mtable; + mtable->defaultmedia = media; + mtable->leafcount = count; + mtable->csr12dir = csr12dir; + mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0; + mtable->csr15dir = mtable->csr15val = 0; + + pr_info("%s: EEPROM default media type %s\n", + dev->name, + media & 0x0800 ? "Autosense" + : medianame[media & MEDIA_MASK]); + for (i = 0; i < count; i++) { + struct medialeaf *leaf = &mtable->mleaf[i]; + + if ((p[0] & 0x80) == 0) { /* 21140 Compact block. */ + leaf->type = 0; + leaf->media = p[0] & 0x3f; + leaf->leafdata = p; + if ((p[2] & 0x61) == 0x01) /* Bogus, but Znyx boards do it. */ + mtable->has_mii = 1; + p += 4; + } else { + leaf->type = p[1]; + if (p[1] == 0x05) { + mtable->has_reset = i; + leaf->media = p[2] & 0x0f; + } else if (tp->chip_id == DM910X && p[1] == 0x80) { + /* Hack to ignore Davicom delay period block */ + mtable->leafcount--; + count--; + i--; + leaf->leafdata = p + 2; + p += (p[0] & 0x3f) + 1; + continue; + } else if (p[1] & 1) { + int gpr_len, reset_len; + + mtable->has_mii = 1; + leaf->media = 11; + gpr_len=p[3]*2; + reset_len=p[4+gpr_len]*2; + new_advertise |= get_u16(&p[7+gpr_len+reset_len]); + } else { + mtable->has_nonmii = 1; + leaf->media = p[2] & MEDIA_MASK; + /* Davicom's media number for 100BaseTX is strange */ + if (tp->chip_id == DM910X && leaf->media == 1) + leaf->media = 3; + switch (leaf->media) { + case 0: new_advertise |= 0x0020; break; + case 4: new_advertise |= 0x0040; break; + case 3: new_advertise |= 0x0080; break; + case 5: new_advertise |= 0x0100; break; + case 6: new_advertise |= 0x0200; break; + } + if (p[1] == 2 && leaf->media == 0) { + if (p[2] & 0x40) { + u32 base15 = get_unaligned((u16*)&p[7]); + mtable->csr15dir = + (get_unaligned((u16*)&p[9])<<16) + base15; + mtable->csr15val = + (get_unaligned((u16*)&p[11])<<16) + base15; + } else { + mtable->csr15dir = get_unaligned((u16*)&p[3])<<16; + mtable->csr15val = get_unaligned((u16*)&p[5])<<16; + } + } + } + leaf->leafdata = p + 2; + p += (p[0] & 0x3f) + 1; + } + if (tulip_debug > 1 && leaf->media == 11) { + unsigned char *bp = leaf->leafdata; + pr_info("%s: MII interface PHY %d, setup/reset sequences %d/%d long, capabilities %02x %02x\n", + dev->name, + bp[0], bp[1], bp[2 + bp[1]*2], + bp[5 + bp[2 + bp[1]*2]*2], + bp[4 + bp[2 + bp[1]*2]*2]); + } + pr_info("%s: Index #%d - Media %s (#%d) described by a %s (%d) block\n", + dev->name, + i, medianame[leaf->media & 15], leaf->media, + leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "", + leaf->type); + } + if (new_advertise) + tp->sym_advertise = new_advertise; + } +} +/* Reading a serial EEPROM is a "bit" grungy, but we work our way through:->.*/ + +/* EEPROM_Ctrl bits. */ +#define EE_SHIFT_CLK 0x02 /* EEPROM shift clock. */ +#define EE_CS 0x01 /* EEPROM chip select. */ +#define EE_DATA_WRITE 0x04 /* Data from the Tulip to EEPROM. */ +#define EE_WRITE_0 0x01 +#define EE_WRITE_1 0x05 +#define EE_DATA_READ 0x08 /* Data from the EEPROM chip. */ +#define EE_ENB (0x4800 | EE_CS) + +/* Delay between EEPROM clock transitions. + Even at 33Mhz current PCI implementations don't overrun the EEPROM clock. + We add a bus turn-around to insure that this remains true. */ +#define eeprom_delay() ioread32(ee_addr) + +/* The EEPROM commands include the alway-set leading bit. */ +#define EE_READ_CMD (6) + +/* Note: this routine returns extra data bits for size detection. */ +int tulip_read_eeprom(struct net_device *dev, int location, int addr_len) +{ + int i; + unsigned retval = 0; + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ee_addr = tp->base_addr + CSR9; + int read_cmd = location | (EE_READ_CMD << addr_len); + + /* If location is past the end of what we can address, don't + * read some other location (ie truncate). Just return zero. + */ + if (location > (1 << addr_len) - 1) + return 0; + + iowrite32(EE_ENB & ~EE_CS, ee_addr); + iowrite32(EE_ENB, ee_addr); + + /* Shift the read command bits out. */ + for (i = 4 + addr_len; i >= 0; i--) { + short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0; + iowrite32(EE_ENB | dataval, ee_addr); + eeprom_delay(); + iowrite32(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr); + eeprom_delay(); + retval = (retval << 1) | ((ioread32(ee_addr) & EE_DATA_READ) ? 1 : 0); + } + iowrite32(EE_ENB, ee_addr); + eeprom_delay(); + + for (i = 16; i > 0; i--) { + iowrite32(EE_ENB | EE_SHIFT_CLK, ee_addr); + eeprom_delay(); + retval = (retval << 1) | ((ioread32(ee_addr) & EE_DATA_READ) ? 1 : 0); + iowrite32(EE_ENB, ee_addr); + eeprom_delay(); + } + + /* Terminate the EEPROM access. */ + iowrite32(EE_ENB & ~EE_CS, ee_addr); + return (tp->flags & HAS_SWAPPED_SEEPROM) ? swab16(retval) : retval; +} + diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c new file mode 100644 index 000000000..92306b320 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/interrupt.c @@ -0,0 +1,814 @@ +/* + drivers/net/ethernet/dec/tulip/interrupt.c + + Copyright 2000,2001 The Linux Kernel Team + Written/copyright 1994-2001 by Donald Becker. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + Please submit bugs to http://bugzilla.kernel.org/ . +*/ + +#include +#include "tulip.h" +#include + +int tulip_rx_copybreak; +unsigned int tulip_max_interrupt_work; + +#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION +#define MIT_SIZE 15 +#define MIT_TABLE 15 /* We use 0 or max */ + +static unsigned int mit_table[MIT_SIZE+1] = +{ + /* CRS11 21143 hardware Mitigation Control Interrupt + We use only RX mitigation we other techniques for + TX intr. mitigation. + + 31 Cycle Size (timer control) + 30:27 TX timer in 16 * Cycle size + 26:24 TX No pkts before Int. + 23:20 RX timer in Cycle size + 19:17 RX No pkts before Int. + 16 Continues Mode (CM) + */ + + 0x0, /* IM disabled */ + 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */ + 0x80150000, + 0x80270000, + 0x80370000, + 0x80490000, + 0x80590000, + 0x80690000, + 0x807B0000, + 0x808B0000, + 0x809D0000, + 0x80AD0000, + 0x80BD0000, + 0x80CF0000, + 0x80DF0000, +// 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */ + 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */ +}; +#endif + + +int tulip_refill_rx(struct net_device *dev) +{ + struct tulip_private *tp = netdev_priv(dev); + int entry; + int refilled = 0; + + /* Refill the Rx ring buffers. */ + for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) { + entry = tp->dirty_rx % RX_RING_SIZE; + if (tp->rx_buffers[entry].skb == NULL) { + struct sk_buff *skb; + dma_addr_t mapping; + + skb = tp->rx_buffers[entry].skb = + netdev_alloc_skb(dev, PKT_BUF_SZ); + if (skb == NULL) + break; + + mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&tp->pdev->dev, mapping)) { + dev_kfree_skb(skb); + tp->rx_buffers[entry].skb = NULL; + break; + } + + tp->rx_buffers[entry].mapping = mapping; + + tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); + refilled++; + } + tp->rx_ring[entry].status = cpu_to_le32(DescOwned); + } + if(tp->chip_id == LC82C168) { + if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) { + /* Rx stopped due to out of buffers, + * restart it + */ + iowrite32(0x01, tp->base_addr + CSR2); + } + } + return refilled; +} + +#ifdef CONFIG_TULIP_NAPI + +void oom_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct tulip_private *tp = netdev_priv(dev); + napi_schedule(&tp->napi); +} + +int tulip_poll(struct napi_struct *napi, int budget) +{ + struct tulip_private *tp = container_of(napi, struct tulip_private, napi); + struct net_device *dev = tp->dev; + int entry = tp->cur_rx % RX_RING_SIZE; + int work_done = 0; +#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION + int received = 0; +#endif + +#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION + +/* that one buffer is needed for mit activation; or might be a + bug in the ring buffer code; check later -- JHS*/ + + if (budget >=RX_RING_SIZE) budget--; +#endif + + if (tulip_debug > 4) + netdev_dbg(dev, " In tulip_rx(), entry %d %08x\n", + entry, tp->rx_ring[entry].status); + + do { + if (ioread32(tp->base_addr + CSR5) == 0xffffffff) { + netdev_dbg(dev, " In tulip_poll(), hardware disappeared\n"); + break; + } + /* Acknowledge current RX interrupt sources. */ + iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5); + + + /* If we own the next entry, it is a new packet. Send it up. */ + while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { + s32 status = le32_to_cpu(tp->rx_ring[entry].status); + short pkt_len; + + if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx) + break; + + if (tulip_debug > 5) + netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n", + entry, status); + + if (++work_done >= budget) + goto not_done; + + /* + * Omit the four octet CRC from the length. + * (May not be considered valid until we have + * checked status for RxLengthOver2047 bits) + */ + pkt_len = ((status >> 16) & 0x7ff) - 4; + + /* + * Maximum pkt_len is 1518 (1514 + vlan header) + * Anything higher than this is always invalid + * regardless of RxLengthOver2047 bits + */ + + if ((status & (RxLengthOver2047 | + RxDescCRCError | + RxDescCollisionSeen | + RxDescRunt | + RxDescDescErr | + RxWholePkt)) != RxWholePkt || + pkt_len > 1518) { + if ((status & (RxLengthOver2047 | + RxWholePkt)) != RxWholePkt) { + /* Ingore earlier buffers. */ + if ((status & 0xffff) != 0x7fff) { + if (tulip_debug > 1) + dev_warn(&dev->dev, + "Oversized Ethernet frame spanned multiple buffers, status %08x!\n", + status); + dev->stats.rx_length_errors++; + } + } else { + /* There was a fatal error. */ + if (tulip_debug > 2) + netdev_dbg(dev, "Receive error, Rx status %08x\n", + status); + dev->stats.rx_errors++; /* end of a packet.*/ + if (pkt_len > 1518 || + (status & RxDescRunt)) + dev->stats.rx_length_errors++; + + if (status & 0x0004) + dev->stats.rx_frame_errors++; + if (status & 0x0002) + dev->stats.rx_crc_errors++; + if (status & 0x0001) + dev->stats.rx_fifo_errors++; + } + } else { + struct sk_buff *skb; + + /* Check if the packet is long enough to accept without copying + to a minimally-sized skbuff. */ + if (pkt_len < tulip_rx_copybreak && + (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { + skb_reserve(skb, 2); /* 16 byte align the IP header */ + pci_dma_sync_single_for_cpu(tp->pdev, + tp->rx_buffers[entry].mapping, + pkt_len, PCI_DMA_FROMDEVICE); +#if ! defined(__alpha__) + skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, + pkt_len); + skb_put(skb, pkt_len); +#else + memcpy(skb_put(skb, pkt_len), + tp->rx_buffers[entry].skb->data, + pkt_len); +#endif + pci_dma_sync_single_for_device(tp->pdev, + tp->rx_buffers[entry].mapping, + pkt_len, PCI_DMA_FROMDEVICE); + } else { /* Pass up the skb already on the Rx ring. */ + char *temp = skb_put(skb = tp->rx_buffers[entry].skb, + pkt_len); + +#ifndef final_version + if (tp->rx_buffers[entry].mapping != + le32_to_cpu(tp->rx_ring[entry].buffer1)) { + dev_err(&dev->dev, + "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n", + le32_to_cpu(tp->rx_ring[entry].buffer1), + (unsigned long long)tp->rx_buffers[entry].mapping, + skb->head, temp); + } +#endif + + pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, + PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + + tp->rx_buffers[entry].skb = NULL; + tp->rx_buffers[entry].mapping = 0; + } + skb->protocol = eth_type_trans(skb, dev); + + netif_receive_skb(skb); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } +#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION + received++; +#endif + + entry = (++tp->cur_rx) % RX_RING_SIZE; + if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4) + tulip_refill_rx(dev); + + } + + /* New ack strategy... irq does not ack Rx any longer + hopefully this helps */ + + /* Really bad things can happen here... If new packet arrives + * and an irq arrives (tx or just due to occasionally unset + * mask), it will be acked by irq handler, but new thread + * is not scheduled. It is major hole in design. + * No idea how to fix this if "playing with fire" will fail + * tomorrow (night 011029). If it will not fail, we won + * finally: amount of IO did not increase at all. */ + } while ((ioread32(tp->base_addr + CSR5) & RxIntr)); + + #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION + + /* We use this simplistic scheme for IM. It's proven by + real life installations. We can have IM enabled + continuesly but this would cause unnecessary latency. + Unfortunely we can't use all the NET_RX_* feedback here. + This would turn on IM for devices that is not contributing + to backlog congestion with unnecessary latency. + + We monitor the device RX-ring and have: + + HW Interrupt Mitigation either ON or OFF. + + ON: More then 1 pkt received (per intr.) OR we are dropping + OFF: Only 1 pkt received + + Note. We only use min and max (0, 15) settings from mit_table */ + + + if( tp->flags & HAS_INTR_MITIGATION) { + if( received > 1 ) { + if( ! tp->mit_on ) { + tp->mit_on = 1; + iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11); + } + } + else { + if( tp->mit_on ) { + tp->mit_on = 0; + iowrite32(0, tp->base_addr + CSR11); + } + } + } + +#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */ + + tulip_refill_rx(dev); + + /* If RX ring is not full we are out of memory. */ + if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) + goto oom; + + /* Remove us from polling list and enable RX intr. */ + + napi_complete(napi); + iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); + + /* The last op happens after poll completion. Which means the following: + * 1. it can race with disabling irqs in irq handler + * 2. it can race with dise/enabling irqs in other poll threads + * 3. if an irq raised after beginning loop, it will be immediately + * triggered here. + * + * Summarizing: the logic results in some redundant irqs both + * due to races in masking and due to too late acking of already + * processed irqs. But it must not result in losing events. + */ + + return work_done; + + not_done: + if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || + tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) + tulip_refill_rx(dev); + + if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) + goto oom; + + return work_done; + + oom: /* Executed with RX ints disabled */ + + /* Start timer, stop polling, but do not enable rx interrupts. */ + mod_timer(&tp->oom_timer, jiffies+1); + + /* Think: timer_pending() was an explicit signature of bug. + * Timer can be pending now but fired and completed + * before we did napi_complete(). See? We would lose it. */ + + /* remove ourselves from the polling list */ + napi_complete(napi); + + return work_done; +} + +#else /* CONFIG_TULIP_NAPI */ + +static int tulip_rx(struct net_device *dev) +{ + struct tulip_private *tp = netdev_priv(dev); + int entry = tp->cur_rx % RX_RING_SIZE; + int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx; + int received = 0; + + if (tulip_debug > 4) + netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n", + entry, tp->rx_ring[entry].status); + /* If we own the next entry, it is a new packet. Send it up. */ + while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { + s32 status = le32_to_cpu(tp->rx_ring[entry].status); + short pkt_len; + + if (tulip_debug > 5) + netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n", + entry, status); + if (--rx_work_limit < 0) + break; + + /* + Omit the four octet CRC from the length. + (May not be considered valid until we have + checked status for RxLengthOver2047 bits) + */ + pkt_len = ((status >> 16) & 0x7ff) - 4; + /* + Maximum pkt_len is 1518 (1514 + vlan header) + Anything higher than this is always invalid + regardless of RxLengthOver2047 bits + */ + + if ((status & (RxLengthOver2047 | + RxDescCRCError | + RxDescCollisionSeen | + RxDescRunt | + RxDescDescErr | + RxWholePkt)) != RxWholePkt || + pkt_len > 1518) { + if ((status & (RxLengthOver2047 | + RxWholePkt)) != RxWholePkt) { + /* Ingore earlier buffers. */ + if ((status & 0xffff) != 0x7fff) { + if (tulip_debug > 1) + netdev_warn(dev, + "Oversized Ethernet frame spanned multiple buffers, status %08x!\n", + status); + dev->stats.rx_length_errors++; + } + } else { + /* There was a fatal error. */ + if (tulip_debug > 2) + netdev_dbg(dev, "Receive error, Rx status %08x\n", + status); + dev->stats.rx_errors++; /* end of a packet.*/ + if (pkt_len > 1518 || + (status & RxDescRunt)) + dev->stats.rx_length_errors++; + if (status & 0x0004) + dev->stats.rx_frame_errors++; + if (status & 0x0002) + dev->stats.rx_crc_errors++; + if (status & 0x0001) + dev->stats.rx_fifo_errors++; + } + } else { + struct sk_buff *skb; + + /* Check if the packet is long enough to accept without copying + to a minimally-sized skbuff. */ + if (pkt_len < tulip_rx_copybreak && + (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { + skb_reserve(skb, 2); /* 16 byte align the IP header */ + pci_dma_sync_single_for_cpu(tp->pdev, + tp->rx_buffers[entry].mapping, + pkt_len, PCI_DMA_FROMDEVICE); +#if ! defined(__alpha__) + skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, + pkt_len); + skb_put(skb, pkt_len); +#else + memcpy(skb_put(skb, pkt_len), + tp->rx_buffers[entry].skb->data, + pkt_len); +#endif + pci_dma_sync_single_for_device(tp->pdev, + tp->rx_buffers[entry].mapping, + pkt_len, PCI_DMA_FROMDEVICE); + } else { /* Pass up the skb already on the Rx ring. */ + char *temp = skb_put(skb = tp->rx_buffers[entry].skb, + pkt_len); + +#ifndef final_version + if (tp->rx_buffers[entry].mapping != + le32_to_cpu(tp->rx_ring[entry].buffer1)) { + dev_err(&dev->dev, + "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n", + le32_to_cpu(tp->rx_ring[entry].buffer1), + (long long)tp->rx_buffers[entry].mapping, + skb->head, temp); + } +#endif + + pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, + PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + + tp->rx_buffers[entry].skb = NULL; + tp->rx_buffers[entry].mapping = 0; + } + skb->protocol = eth_type_trans(skb, dev); + + netif_rx(skb); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + received++; + entry = (++tp->cur_rx) % RX_RING_SIZE; + } + return received; +} +#endif /* CONFIG_TULIP_NAPI */ + +static inline unsigned int phy_interrupt (struct net_device *dev) +{ +#ifdef __hppa__ + struct tulip_private *tp = netdev_priv(dev); + int csr12 = ioread32(tp->base_addr + CSR12) & 0xff; + + if (csr12 != tp->csr12_shadow) { + /* ack interrupt */ + iowrite32(csr12 | 0x02, tp->base_addr + CSR12); + tp->csr12_shadow = csr12; + /* do link change stuff */ + spin_lock(&tp->lock); + tulip_check_duplex(dev); + spin_unlock(&tp->lock); + /* clear irq ack bit */ + iowrite32(csr12 & ~0x02, tp->base_addr + CSR12); + + return 1; + } +#endif + + return 0; +} + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ +irqreturn_t tulip_interrupt(int irq, void *dev_instance) +{ + struct net_device *dev = (struct net_device *)dev_instance; + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + int csr5; + int missed; + int rx = 0; + int tx = 0; + int oi = 0; + int maxrx = RX_RING_SIZE; + int maxtx = TX_RING_SIZE; + int maxoi = TX_RING_SIZE; +#ifdef CONFIG_TULIP_NAPI + int rxd = 0; +#else + int entry; +#endif + unsigned int work_count = tulip_max_interrupt_work; + unsigned int handled = 0; + + /* Let's see whether the interrupt really is for us */ + csr5 = ioread32(ioaddr + CSR5); + + if (tp->flags & HAS_PHY_IRQ) + handled = phy_interrupt (dev); + + if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) + return IRQ_RETVAL(handled); + + tp->nir++; + + do { + +#ifdef CONFIG_TULIP_NAPI + + if (!rxd && (csr5 & (RxIntr | RxNoBuf))) { + rxd++; + /* Mask RX intrs and add the device to poll list. */ + iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); + napi_schedule(&tp->napi); + + if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) + break; + } + + /* Acknowledge the interrupt sources we handle here ASAP + the poll function does Rx and RxNoBuf acking */ + + iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5); + +#else + /* Acknowledge all of the current interrupt sources ASAP. */ + iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5); + + + if (csr5 & (RxIntr | RxNoBuf)) { + rx += tulip_rx(dev); + tulip_refill_rx(dev); + } + +#endif /* CONFIG_TULIP_NAPI */ + + if (tulip_debug > 4) + netdev_dbg(dev, "interrupt csr5=%#8.8x new csr5=%#8.8x\n", + csr5, ioread32(ioaddr + CSR5)); + + + if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) { + unsigned int dirty_tx; + + spin_lock(&tp->lock); + + for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0; + dirty_tx++) { + int entry = dirty_tx % TX_RING_SIZE; + int status = le32_to_cpu(tp->tx_ring[entry].status); + + if (status < 0) + break; /* It still has not been Txed */ + + /* Check for Rx filter setup frames. */ + if (tp->tx_buffers[entry].skb == NULL) { + /* test because dummy frames not mapped */ + if (tp->tx_buffers[entry].mapping) + pci_unmap_single(tp->pdev, + tp->tx_buffers[entry].mapping, + sizeof(tp->setup_frame), + PCI_DMA_TODEVICE); + continue; + } + + if (status & 0x8000) { + /* There was an major error, log it. */ +#ifndef final_version + if (tulip_debug > 1) + netdev_dbg(dev, "Transmit error, Tx status %08x\n", + status); +#endif + dev->stats.tx_errors++; + if (status & 0x4104) + dev->stats.tx_aborted_errors++; + if (status & 0x0C00) + dev->stats.tx_carrier_errors++; + if (status & 0x0200) + dev->stats.tx_window_errors++; + if (status & 0x0002) + dev->stats.tx_fifo_errors++; + if ((status & 0x0080) && tp->full_duplex == 0) + dev->stats.tx_heartbeat_errors++; + } else { + dev->stats.tx_bytes += + tp->tx_buffers[entry].skb->len; + dev->stats.collisions += (status >> 3) & 15; + dev->stats.tx_packets++; + } + + pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, + tp->tx_buffers[entry].skb->len, + PCI_DMA_TODEVICE); + + /* Free the original skb. */ + dev_kfree_skb_irq(tp->tx_buffers[entry].skb); + tp->tx_buffers[entry].skb = NULL; + tp->tx_buffers[entry].mapping = 0; + tx++; + } + +#ifndef final_version + if (tp->cur_tx - dirty_tx > TX_RING_SIZE) { + dev_err(&dev->dev, + "Out-of-sync dirty pointer, %d vs. %d\n", + dirty_tx, tp->cur_tx); + dirty_tx += TX_RING_SIZE; + } +#endif + + if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) + netif_wake_queue(dev); + + tp->dirty_tx = dirty_tx; + if (csr5 & TxDied) { + if (tulip_debug > 2) + dev_warn(&dev->dev, + "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n", + csr5, ioread32(ioaddr + CSR6), + tp->csr6); + tulip_restart_rxtx(tp); + } + spin_unlock(&tp->lock); + } + + /* Log errors. */ + if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */ + if (csr5 == 0xffffffff) + break; + if (csr5 & TxJabber) + dev->stats.tx_errors++; + if (csr5 & TxFIFOUnderflow) { + if ((tp->csr6 & 0xC000) != 0xC000) + tp->csr6 += 0x4000; /* Bump up the Tx threshold */ + else + tp->csr6 |= 0x00200000; /* Store-n-forward. */ + /* Restart the transmit process. */ + tulip_restart_rxtx(tp); + iowrite32(0, ioaddr + CSR1); + } + if (csr5 & (RxDied | RxNoBuf)) { + if (tp->flags & COMET_MAC_ADDR) { + iowrite32(tp->mc_filter[0], ioaddr + 0xAC); + iowrite32(tp->mc_filter[1], ioaddr + 0xB0); + } + } + if (csr5 & RxDied) { /* Missed a Rx frame. */ + dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; + dev->stats.rx_errors++; + tulip_start_rxtx(tp); + } + /* + * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this + * call is ever done under the spinlock + */ + if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) { + if (tp->link_change) + (tp->link_change)(dev, csr5); + } + if (csr5 & SystemError) { + int error = (csr5 >> 23) & 7; + /* oops, we hit a PCI error. The code produced corresponds + * to the reason: + * 0 - parity error + * 1 - master abort + * 2 - target abort + * Note that on parity error, we should do a software reset + * of the chip to get it back into a sane state (according + * to the 21142/3 docs that is). + * -- rmk + */ + dev_err(&dev->dev, + "(%lu) System Error occurred (%d)\n", + tp->nir, error); + } + /* Clear all error sources, included undocumented ones! */ + iowrite32(0x0800f7ba, ioaddr + CSR5); + oi++; + } + if (csr5 & TimerInt) { + + if (tulip_debug > 2) + dev_err(&dev->dev, + "Re-enabling interrupts, %08x\n", + csr5); + iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7); + tp->ttimer = 0; + oi++; + } + if (tx > maxtx || rx > maxrx || oi > maxoi) { + if (tulip_debug > 1) + dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n", + csr5, tp->nir, tx, rx, oi); + + /* Acknowledge all interrupt sources. */ + iowrite32(0x8001ffff, ioaddr + CSR5); + if (tp->flags & HAS_INTR_MITIGATION) { + /* Josip Loncaric at ICASE did extensive experimentation + to develop a good interrupt mitigation setting.*/ + iowrite32(0x8b240000, ioaddr + CSR11); + } else if (tp->chip_id == LC82C168) { + /* the LC82C168 doesn't have a hw timer.*/ + iowrite32(0x00, ioaddr + CSR7); + mod_timer(&tp->timer, RUN_AT(HZ/50)); + } else { + /* Mask all interrupting sources, set timer to + re-enable. */ + iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7); + iowrite32(0x0012, ioaddr + CSR11); + } + break; + } + + work_count--; + if (work_count == 0) + break; + + csr5 = ioread32(ioaddr + CSR5); + +#ifdef CONFIG_TULIP_NAPI + if (rxd) + csr5 &= ~RxPollInt; + } while ((csr5 & (TxNoBuf | + TxDied | + TxIntr | + TimerInt | + /* Abnormal intr. */ + RxDied | + TxFIFOUnderflow | + TxJabber | + TPLnkFail | + SystemError )) != 0); +#else + } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0); + + tulip_refill_rx(dev); + + /* check if the card is in suspend mode */ + entry = tp->dirty_rx % RX_RING_SIZE; + if (tp->rx_buffers[entry].skb == NULL) { + if (tulip_debug > 1) + dev_warn(&dev->dev, + "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", + tp->nir, tp->cur_rx, tp->ttimer, rx); + if (tp->chip_id == LC82C168) { + iowrite32(0x00, ioaddr + CSR7); + mod_timer(&tp->timer, RUN_AT(HZ/50)); + } else { + if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) { + if (tulip_debug > 1) + dev_warn(&dev->dev, + "in rx suspend mode: (%lu) set timer\n", + tp->nir); + iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt, + ioaddr + CSR7); + iowrite32(TimerInt, ioaddr + CSR5); + iowrite32(12, ioaddr + CSR11); + tp->ttimer = 1; + } + } + } +#endif /* CONFIG_TULIP_NAPI */ + + if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) { + dev->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed; + } + + if (tulip_debug > 4) + netdev_dbg(dev, "exiting interrupt, csr5=%#04x\n", + ioread32(ioaddr + CSR5)); + + return IRQ_HANDLED; +} diff --git a/drivers/net/ethernet/dec/tulip/media.c b/drivers/net/ethernet/dec/tulip/media.c new file mode 100644 index 000000000..dcf21a36a --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/media.c @@ -0,0 +1,552 @@ +/* + drivers/net/ethernet/dec/tulip/media.c + + Copyright 2000,2001 The Linux Kernel Team + Written/copyright 1994-2001 by Donald Becker. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + Please submit bugs to http://bugzilla.kernel.org/ . +*/ + +#include +#include +#include +#include +#include "tulip.h" + + +/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually + met by back-to-back PCI I/O cycles, but we insert a delay to avoid + "overclocking" issues or future 66Mhz PCI. */ +#define mdio_delay() ioread32(mdio_addr) + +/* Read and write the MII registers using software-generated serial + MDIO protocol. It is just different enough from the EEPROM protocol + to not share code. The maxium data clock rate is 2.5 Mhz. */ +#define MDIO_SHIFT_CLK 0x10000 +#define MDIO_DATA_WRITE0 0x00000 +#define MDIO_DATA_WRITE1 0x20000 +#define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */ +#define MDIO_ENB_IN 0x40000 +#define MDIO_DATA_READ 0x80000 + +static const unsigned char comet_miireg2offset[32] = { + 0xB4, 0xB8, 0xBC, 0xC0, 0xC4, 0xC8, 0xCC, 0, 0,0,0,0, 0,0,0,0, + 0,0xD0,0,0, 0,0,0,0, 0,0,0,0, 0, 0xD4, 0xD8, 0xDC, }; + + +/* MII transceiver control section. + Read and write the MII registers using software-generated serial + MDIO protocol. + See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management functions") + or DP83840A data sheet for more details. + */ + +int tulip_mdio_read(struct net_device *dev, int phy_id, int location) +{ + struct tulip_private *tp = netdev_priv(dev); + int i; + int read_cmd = (0xf6 << 10) | ((phy_id & 0x1f) << 5) | location; + int retval = 0; + void __iomem *ioaddr = tp->base_addr; + void __iomem *mdio_addr = ioaddr + CSR9; + unsigned long flags; + + if (location & ~0x1f) + return 0xffff; + + if (tp->chip_id == COMET && phy_id == 30) { + if (comet_miireg2offset[location]) + return ioread32(ioaddr + comet_miireg2offset[location]); + return 0xffff; + } + + spin_lock_irqsave(&tp->mii_lock, flags); + if (tp->chip_id == LC82C168) { + iowrite32(0x60020000 + (phy_id<<23) + (location<<18), ioaddr + 0xA0); + ioread32(ioaddr + 0xA0); + ioread32(ioaddr + 0xA0); + for (i = 1000; i >= 0; --i) { + barrier(); + if ( ! ((retval = ioread32(ioaddr + 0xA0)) & 0x80000000)) + break; + } + spin_unlock_irqrestore(&tp->mii_lock, flags); + return retval & 0xffff; + } + + /* Establish sync by sending at least 32 logic ones. */ + for (i = 32; i >= 0; i--) { + iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); + mdio_delay(); + iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); + mdio_delay(); + } + /* Shift the read command bits out. */ + for (i = 15; i >= 0; i--) { + int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0; + + iowrite32(MDIO_ENB | dataval, mdio_addr); + mdio_delay(); + iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr); + mdio_delay(); + } + /* Read the two transition, 16 data, and wire-idle bits. */ + for (i = 19; i > 0; i--) { + iowrite32(MDIO_ENB_IN, mdio_addr); + mdio_delay(); + retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DATA_READ) ? 1 : 0); + iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); + mdio_delay(); + } + + spin_unlock_irqrestore(&tp->mii_lock, flags); + return (retval>>1) & 0xffff; +} + +void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val) +{ + struct tulip_private *tp = netdev_priv(dev); + int i; + int cmd = (0x5002 << 16) | ((phy_id & 0x1f) << 23) | (location<<18) | (val & 0xffff); + void __iomem *ioaddr = tp->base_addr; + void __iomem *mdio_addr = ioaddr + CSR9; + unsigned long flags; + + if (location & ~0x1f) + return; + + if (tp->chip_id == COMET && phy_id == 30) { + if (comet_miireg2offset[location]) + iowrite32(val, ioaddr + comet_miireg2offset[location]); + return; + } + + spin_lock_irqsave(&tp->mii_lock, flags); + if (tp->chip_id == LC82C168) { + iowrite32(cmd, ioaddr + 0xA0); + for (i = 1000; i >= 0; --i) { + barrier(); + if ( ! (ioread32(ioaddr + 0xA0) & 0x80000000)) + break; + } + spin_unlock_irqrestore(&tp->mii_lock, flags); + return; + } + + /* Establish sync by sending 32 logic ones. */ + for (i = 32; i >= 0; i--) { + iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); + mdio_delay(); + iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); + mdio_delay(); + } + /* Shift the command bits out. */ + for (i = 31; i >= 0; i--) { + int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0; + iowrite32(MDIO_ENB | dataval, mdio_addr); + mdio_delay(); + iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr); + mdio_delay(); + } + /* Clear out extra bits. */ + for (i = 2; i > 0; i--) { + iowrite32(MDIO_ENB_IN, mdio_addr); + mdio_delay(); + iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); + mdio_delay(); + } + + spin_unlock_irqrestore(&tp->mii_lock, flags); +} + + +/* Set up the transceiver control registers for the selected media type. */ +void tulip_select_media(struct net_device *dev, int startup) +{ + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + struct mediatable *mtable = tp->mtable; + u32 new_csr6; + int i; + + if (mtable) { + struct medialeaf *mleaf = &mtable->mleaf[tp->cur_index]; + unsigned char *p = mleaf->leafdata; + switch (mleaf->type) { + case 0: /* 21140 non-MII xcvr. */ + if (tulip_debug > 1) + netdev_dbg(dev, "Using a 21140 non-MII transceiver with control setting %02x\n", + p[1]); + dev->if_port = p[0]; + if (startup) + iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12); + iowrite32(p[1], ioaddr + CSR12); + new_csr6 = 0x02000000 | ((p[2] & 0x71) << 18); + break; + case 2: case 4: { + u16 setup[5]; + u32 csr13val, csr14val, csr15dir, csr15val; + for (i = 0; i < 5; i++) + setup[i] = get_u16(&p[i*2 + 1]); + + dev->if_port = p[0] & MEDIA_MASK; + if (tulip_media_cap[dev->if_port] & MediaAlwaysFD) + tp->full_duplex = 1; + + if (startup && mtable->has_reset) { + struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset]; + unsigned char *rst = rleaf->leafdata; + if (tulip_debug > 1) + netdev_dbg(dev, "Resetting the transceiver\n"); + for (i = 0; i < rst[0]; i++) + iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15); + } + if (tulip_debug > 1) + netdev_dbg(dev, "21143 non-MII %s transceiver control %04x/%04x\n", + medianame[dev->if_port], + setup[0], setup[1]); + if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */ + csr13val = setup[0]; + csr14val = setup[1]; + csr15dir = (setup[3]<<16) | setup[2]; + csr15val = (setup[4]<<16) | setup[2]; + iowrite32(0, ioaddr + CSR13); + iowrite32(csr14val, ioaddr + CSR14); + iowrite32(csr15dir, ioaddr + CSR15); /* Direction */ + iowrite32(csr15val, ioaddr + CSR15); /* Data */ + iowrite32(csr13val, ioaddr + CSR13); + } else { + csr13val = 1; + csr14val = 0; + csr15dir = (setup[0]<<16) | 0x0008; + csr15val = (setup[1]<<16) | 0x0008; + if (dev->if_port <= 4) + csr14val = t21142_csr14[dev->if_port]; + if (startup) { + iowrite32(0, ioaddr + CSR13); + iowrite32(csr14val, ioaddr + CSR14); + } + iowrite32(csr15dir, ioaddr + CSR15); /* Direction */ + iowrite32(csr15val, ioaddr + CSR15); /* Data */ + if (startup) iowrite32(csr13val, ioaddr + CSR13); + } + if (tulip_debug > 1) + netdev_dbg(dev, "Setting CSR15 to %08x/%08x\n", + csr15dir, csr15val); + if (mleaf->type == 4) + new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18); + else + new_csr6 = 0x82420000; + break; + } + case 1: case 3: { + int phy_num = p[0]; + int init_length = p[1]; + u16 *misc_info, tmp_info; + + dev->if_port = 11; + new_csr6 = 0x020E0000; + if (mleaf->type == 3) { /* 21142 */ + u16 *init_sequence = (u16*)(p+2); + u16 *reset_sequence = &((u16*)(p+3))[init_length]; + int reset_length = p[2 + init_length*2]; + misc_info = reset_sequence + reset_length; + if (startup) { + int timeout = 10; /* max 1 ms */ + for (i = 0; i < reset_length; i++) + iowrite32(get_u16(&reset_sequence[i]) << 16, ioaddr + CSR15); + + /* flush posted writes */ + ioread32(ioaddr + CSR15); + + /* Sect 3.10.3 in DP83840A.pdf (p39) */ + udelay(500); + + /* Section 4.2 in DP83840A.pdf (p43) */ + /* and IEEE 802.3 "22.2.4.1.1 Reset" */ + while (timeout-- && + (tulip_mdio_read (dev, phy_num, MII_BMCR) & BMCR_RESET)) + udelay(100); + } + for (i = 0; i < init_length; i++) + iowrite32(get_u16(&init_sequence[i]) << 16, ioaddr + CSR15); + + ioread32(ioaddr + CSR15); /* flush posted writes */ + } else { + u8 *init_sequence = p + 2; + u8 *reset_sequence = p + 3 + init_length; + int reset_length = p[2 + init_length]; + misc_info = (u16*)(reset_sequence + reset_length); + if (startup) { + int timeout = 10; /* max 1 ms */ + iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12); + for (i = 0; i < reset_length; i++) + iowrite32(reset_sequence[i], ioaddr + CSR12); + + /* flush posted writes */ + ioread32(ioaddr + CSR12); + + /* Sect 3.10.3 in DP83840A.pdf (p39) */ + udelay(500); + + /* Section 4.2 in DP83840A.pdf (p43) */ + /* and IEEE 802.3 "22.2.4.1.1 Reset" */ + while (timeout-- && + (tulip_mdio_read (dev, phy_num, MII_BMCR) & BMCR_RESET)) + udelay(100); + } + for (i = 0; i < init_length; i++) + iowrite32(init_sequence[i], ioaddr + CSR12); + + ioread32(ioaddr + CSR12); /* flush posted writes */ + } + + tmp_info = get_u16(&misc_info[1]); + if (tmp_info) + tp->advertising[phy_num] = tmp_info | 1; + if (tmp_info && startup < 2) { + if (tp->mii_advertise == 0) + tp->mii_advertise = tp->advertising[phy_num]; + if (tulip_debug > 1) + netdev_dbg(dev, " Advertising %04x on MII %d\n", + tp->mii_advertise, + tp->phys[phy_num]); + tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise); + } + break; + } + case 5: case 6: { + u16 setup[5]; + + new_csr6 = 0; /* FIXME */ + + for (i = 0; i < 5; i++) + setup[i] = get_u16(&p[i*2 + 1]); + + if (startup && mtable->has_reset) { + struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset]; + unsigned char *rst = rleaf->leafdata; + if (tulip_debug > 1) + netdev_dbg(dev, "Resetting the transceiver\n"); + for (i = 0; i < rst[0]; i++) + iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15); + } + + break; + } + default: + netdev_dbg(dev, " Invalid media table selection %d\n", + mleaf->type); + new_csr6 = 0x020E0000; + } + if (tulip_debug > 1) + netdev_dbg(dev, "Using media type %s, CSR12 is %02x\n", + medianame[dev->if_port], + ioread32(ioaddr + CSR12) & 0xff); + } else if (tp->chip_id == LC82C168) { + if (startup && ! tp->medialock) + dev->if_port = tp->mii_cnt ? 11 : 0; + if (tulip_debug > 1) + netdev_dbg(dev, "PNIC PHY status is %3.3x, media %s\n", + ioread32(ioaddr + 0xB8), + medianame[dev->if_port]); + if (tp->mii_cnt) { + new_csr6 = 0x810C0000; + iowrite32(0x0001, ioaddr + CSR15); + iowrite32(0x0201B07A, ioaddr + 0xB8); + } else if (startup) { + /* Start with 10mbps to do autonegotiation. */ + iowrite32(0x32, ioaddr + CSR12); + new_csr6 = 0x00420000; + iowrite32(0x0001B078, ioaddr + 0xB8); + iowrite32(0x0201B078, ioaddr + 0xB8); + } else if (dev->if_port == 3 || dev->if_port == 5) { + iowrite32(0x33, ioaddr + CSR12); + new_csr6 = 0x01860000; + /* Trigger autonegotiation. */ + iowrite32(startup ? 0x0201F868 : 0x0001F868, ioaddr + 0xB8); + } else { + iowrite32(0x32, ioaddr + CSR12); + new_csr6 = 0x00420000; + iowrite32(0x1F078, ioaddr + 0xB8); + } + } else { /* Unknown chip type with no media table. */ + if (tp->default_port == 0) + dev->if_port = tp->mii_cnt ? 11 : 3; + if (tulip_media_cap[dev->if_port] & MediaIsMII) { + new_csr6 = 0x020E0000; + } else if (tulip_media_cap[dev->if_port] & MediaIsFx) { + new_csr6 = 0x02860000; + } else + new_csr6 = 0x03860000; + if (tulip_debug > 1) + netdev_dbg(dev, "No media description table, assuming %s transceiver, CSR12 %02x\n", + medianame[dev->if_port], + ioread32(ioaddr + CSR12)); + } + + tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0); + + mdelay(1); +} + +/* + Check the MII negotiated duplex and change the CSR6 setting if + required. + Return 0 if everything is OK. + Return < 0 if the transceiver is missing or has no link beat. + */ +int tulip_check_duplex(struct net_device *dev) +{ + struct tulip_private *tp = netdev_priv(dev); + unsigned int bmsr, lpa, negotiated, new_csr6; + + bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR); + lpa = tulip_mdio_read(dev, tp->phys[0], MII_LPA); + if (tulip_debug > 1) + dev_info(&dev->dev, "MII status %04x, Link partner report %04x\n", + bmsr, lpa); + if (bmsr == 0xffff) + return -2; + if ((bmsr & BMSR_LSTATUS) == 0) { + int new_bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR); + if ((new_bmsr & BMSR_LSTATUS) == 0) { + if (tulip_debug > 1) + dev_info(&dev->dev, + "No link beat on the MII interface, status %04x\n", + new_bmsr); + return -1; + } + } + negotiated = lpa & tp->advertising[0]; + tp->full_duplex = mii_duplex(tp->full_duplex_lock, negotiated); + + new_csr6 = tp->csr6; + + if (negotiated & LPA_100) new_csr6 &= ~TxThreshold; + else new_csr6 |= TxThreshold; + if (tp->full_duplex) new_csr6 |= FullDuplex; + else new_csr6 &= ~FullDuplex; + + if (new_csr6 != tp->csr6) { + tp->csr6 = new_csr6; + tulip_restart_rxtx(tp); + + if (tulip_debug > 0) + dev_info(&dev->dev, + "Setting %s-duplex based on MII#%d link partner capability of %04x\n", + tp->full_duplex ? "full" : "half", + tp->phys[0], lpa); + return 1; + } + + return 0; +} + +void tulip_find_mii(struct net_device *dev, int board_idx) +{ + struct tulip_private *tp = netdev_priv(dev); + int phyn, phy_idx = 0; + int mii_reg0; + int mii_advert; + unsigned int to_advert, new_bmcr, ane_switch; + + /* Find the connected MII xcvrs. + Doing this in open() would allow detecting external xcvrs later, + but takes much time. */ + for (phyn = 1; phyn <= 32 && phy_idx < ARRAY_SIZE(tp->phys); phyn++) { + int phy = phyn & 0x1f; + int mii_status = tulip_mdio_read (dev, phy, MII_BMSR); + if ((mii_status & 0x8301) == 0x8001 || + ((mii_status & BMSR_100BASE4) == 0 && + (mii_status & 0x7800) != 0)) { + /* preserve Becker logic, gain indentation level */ + } else { + continue; + } + + mii_reg0 = tulip_mdio_read (dev, phy, MII_BMCR); + mii_advert = tulip_mdio_read (dev, phy, MII_ADVERTISE); + ane_switch = 0; + + /* if not advertising at all, gen an + * advertising value from the capability + * bits in BMSR + */ + if ((mii_advert & ADVERTISE_ALL) == 0) { + unsigned int tmpadv = tulip_mdio_read (dev, phy, MII_BMSR); + mii_advert = ((tmpadv >> 6) & 0x3e0) | 1; + } + + if (tp->mii_advertise) { + tp->advertising[phy_idx] = + to_advert = tp->mii_advertise; + } else if (tp->advertising[phy_idx]) { + to_advert = tp->advertising[phy_idx]; + } else { + tp->advertising[phy_idx] = + tp->mii_advertise = + to_advert = mii_advert; + } + + tp->phys[phy_idx++] = phy; + + pr_info("tulip%d: MII transceiver #%d config %04x status %04x advertising %04x\n", + board_idx, phy, mii_reg0, mii_status, mii_advert); + + /* Fixup for DLink with miswired PHY. */ + if (mii_advert != to_advert) { + pr_debug("tulip%d: Advertising %04x on PHY %d, previously advertising %04x\n", + board_idx, to_advert, phy, mii_advert); + tulip_mdio_write (dev, phy, 4, to_advert); + } + + /* Enable autonegotiation: some boards default to off. */ + if (tp->default_port == 0) { + new_bmcr = mii_reg0 | BMCR_ANENABLE; + if (new_bmcr != mii_reg0) { + new_bmcr |= BMCR_ANRESTART; + ane_switch = 1; + } + } + /* ...or disable nway, if forcing media */ + else { + new_bmcr = mii_reg0 & ~BMCR_ANENABLE; + if (new_bmcr != mii_reg0) + ane_switch = 1; + } + + /* clear out bits we never want at this point */ + new_bmcr &= ~(BMCR_CTST | BMCR_FULLDPLX | BMCR_ISOLATE | + BMCR_PDOWN | BMCR_SPEED100 | BMCR_LOOPBACK | + BMCR_RESET); + + if (tp->full_duplex) + new_bmcr |= BMCR_FULLDPLX; + if (tulip_media_cap[tp->default_port] & MediaIs100) + new_bmcr |= BMCR_SPEED100; + + if (new_bmcr != mii_reg0) { + /* some phys need the ANE switch to + * happen before forced media settings + * will "take." However, we write the + * same value twice in order not to + * confuse the sane phys. + */ + if (ane_switch) { + tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr); + udelay (10); + } + tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr); + } + } + tp->mii_cnt = phy_idx; + if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) { + pr_info("tulip%d: ***WARNING***: No MII transceiver found!\n", + board_idx); + tp->phys[0] = 1; + } +} diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c new file mode 100644 index 000000000..5364563c4 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/pnic.c @@ -0,0 +1,170 @@ +/* + drivers/net/ethernet/dec/tulip/pnic.c + + Copyright 2000,2001 The Linux Kernel Team + Written/copyright 1994-2001 by Donald Becker. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + Please submit bugs to http://bugzilla.kernel.org/ . +*/ + +#include +#include +#include +#include "tulip.h" + + +void pnic_do_nway(struct net_device *dev) +{ + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + u32 phy_reg = ioread32(ioaddr + 0xB8); + u32 new_csr6 = tp->csr6 & ~0x40C40200; + + if (phy_reg & 0x78000000) { /* Ignore baseT4 */ + if (phy_reg & 0x20000000) dev->if_port = 5; + else if (phy_reg & 0x40000000) dev->if_port = 3; + else if (phy_reg & 0x10000000) dev->if_port = 4; + else if (phy_reg & 0x08000000) dev->if_port = 0; + tp->nwayset = 1; + new_csr6 = (dev->if_port & 1) ? 0x01860000 : 0x00420000; + iowrite32(0x32 | (dev->if_port & 1), ioaddr + CSR12); + if (dev->if_port & 1) + iowrite32(0x1F868, ioaddr + 0xB8); + if (phy_reg & 0x30000000) { + tp->full_duplex = 1; + new_csr6 |= 0x00000200; + } + if (tulip_debug > 1) + netdev_dbg(dev, "PNIC autonegotiated status %08x, %s\n", + phy_reg, medianame[dev->if_port]); + if (tp->csr6 != new_csr6) { + tp->csr6 = new_csr6; + /* Restart Tx */ + tulip_restart_rxtx(tp); + dev->trans_start = jiffies; + } + } +} + +void pnic_lnk_change(struct net_device *dev, int csr5) +{ + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + int phy_reg = ioread32(ioaddr + 0xB8); + + if (tulip_debug > 1) + netdev_dbg(dev, "PNIC link changed state %08x, CSR5 %08x\n", + phy_reg, csr5); + if (ioread32(ioaddr + CSR5) & TPLnkFail) { + iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkFail) | TPLnkPass, ioaddr + CSR7); + /* If we use an external MII, then we mustn't use the + * internal negotiation. + */ + if (tulip_media_cap[dev->if_port] & MediaIsMII) + return; + if (! tp->nwayset || time_after(jiffies, dev_trans_start(dev) + 1*HZ)) { + tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff); + iowrite32(tp->csr6, ioaddr + CSR6); + iowrite32(0x30, ioaddr + CSR12); + iowrite32(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */ + dev->trans_start = jiffies; + } + } else if (ioread32(ioaddr + CSR5) & TPLnkPass) { + if (tulip_media_cap[dev->if_port] & MediaIsMII) { + spin_lock(&tp->lock); + tulip_check_duplex(dev); + spin_unlock(&tp->lock); + } else { + pnic_do_nway(dev); + } + iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkPass) | TPLnkFail, ioaddr + CSR7); + } +} + +void pnic_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + int next_tick = 60*HZ; + + if(!ioread32(ioaddr + CSR7)) { + /* the timer was called due to a work overflow + * in the interrupt handler. Skip the connection + * checks, the nic is definitively speaking with + * his link partner. + */ + goto too_good_connection; + } + + if (tulip_media_cap[dev->if_port] & MediaIsMII) { + spin_lock_irq(&tp->lock); + if (tulip_check_duplex(dev) > 0) + next_tick = 3*HZ; + spin_unlock_irq(&tp->lock); + } else { + int csr12 = ioread32(ioaddr + CSR12); + int new_csr6 = tp->csr6 & ~0x40C40200; + int phy_reg = ioread32(ioaddr + 0xB8); + int csr5 = ioread32(ioaddr + CSR5); + + if (tulip_debug > 1) + netdev_dbg(dev, "PNIC timer PHY status %08x, %s CSR5 %08x\n", + phy_reg, medianame[dev->if_port], csr5); + if (phy_reg & 0x04000000) { /* Remote link fault */ + iowrite32(0x0201F078, ioaddr + 0xB8); + next_tick = 1*HZ; + tp->nwayset = 0; + } else if (phy_reg & 0x78000000) { /* Ignore baseT4 */ + pnic_do_nway(dev); + next_tick = 60*HZ; + } else if (csr5 & TPLnkFail) { /* 100baseTx link beat */ + if (tulip_debug > 1) + netdev_dbg(dev, "%s link beat failed, CSR12 %04x, CSR5 %08x, PHY %03x\n", + medianame[dev->if_port], + csr12, + ioread32(ioaddr + CSR5), + ioread32(ioaddr + 0xB8)); + next_tick = 3*HZ; + if (tp->medialock) { + } else if (tp->nwayset && (dev->if_port & 1)) { + next_tick = 1*HZ; + } else if (dev->if_port == 0) { + dev->if_port = 3; + iowrite32(0x33, ioaddr + CSR12); + new_csr6 = 0x01860000; + iowrite32(0x1F868, ioaddr + 0xB8); + } else { + dev->if_port = 0; + iowrite32(0x32, ioaddr + CSR12); + new_csr6 = 0x00420000; + iowrite32(0x1F078, ioaddr + 0xB8); + } + if (tp->csr6 != new_csr6) { + tp->csr6 = new_csr6; + /* Restart Tx */ + tulip_restart_rxtx(tp); + dev->trans_start = jiffies; + if (tulip_debug > 1) + dev_info(&dev->dev, + "Changing PNIC configuration to %s %s-duplex, CSR6 %08x\n", + medianame[dev->if_port], + tp->full_duplex ? "full" : "half", + new_csr6); + } + } + } +too_good_connection: + mod_timer(&tp->timer, RUN_AT(next_tick)); + if(!ioread32(ioaddr + CSR7)) { + if (tulip_debug > 1) + dev_info(&dev->dev, "sw timer wakeup\n"); + disable_irq(dev->irq); + tulip_refill_rx(dev); + enable_irq(dev->irq); + iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7); + } +} diff --git a/drivers/net/ethernet/dec/tulip/pnic2.c b/drivers/net/ethernet/dec/tulip/pnic2.c new file mode 100644 index 000000000..5895fc43f --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/pnic2.c @@ -0,0 +1,403 @@ +/* + drivers/net/ethernet/dec/tulip/pnic2.c + + Copyright 2000,2001 The Linux Kernel Team + Written/copyright 1994-2001 by Donald Becker. + Modified to hep support PNIC_II by Kevin B. Hendricks + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + Please submit bugs to http://bugzilla.kernel.org/ . +*/ + + +/* Understanding the PNIC_II - everything is this file is based + * on the PNIC_II_PDF datasheet which is sorely lacking in detail + * + * As I understand things, here are the registers and bits that + * explain the masks and constants used in this file that are + * either different from the 21142/3 or important for basic operation. + * + * + * CSR 6 (mask = 0xfe3bd1fd of bits not to change) + * ----- + * Bit 24 - SCR + * Bit 23 - PCS + * Bit 22 - TTM (Trasmit Threshold Mode) + * Bit 18 - Port Select + * Bit 13 - Start - 1, Stop - 0 Transmissions + * Bit 11:10 - Loop Back Operation Mode + * Bit 9 - Full Duplex mode (Advertise 10BaseT-FD is CSR14<7> is set) + * Bit 1 - Start - 1, Stop - 0 Receive + * + * + * CSR 14 (mask = 0xfff0ee39 of bits not to change) + * ------ + * Bit 19 - PAUSE-Pause + * Bit 18 - Advertise T4 + * Bit 17 - Advertise 100baseTx-FD + * Bit 16 - Advertise 100baseTx-HD + * Bit 12 - LTE - Link Test Enable + * Bit 7 - ANE - Auto Negotiate Enable + * Bit 6 - HDE - Advertise 10baseT-HD + * Bit 2 - Reset to Power down - kept as 1 for normal operation + * Bit 1 - Loop Back enable for 10baseT MCC + * + * + * CSR 12 + * ------ + * Bit 25 - Partner can do T4 + * Bit 24 - Partner can do 100baseTx-FD + * Bit 23 - Partner can do 100baseTx-HD + * Bit 22 - Partner can do 10baseT-FD + * Bit 21 - Partner can do 10baseT-HD + * Bit 15 - LPN is 1 if all above bits are valid other wise 0 + * Bit 14:12 - autonegotiation state (write 001 to start autonegotiate) + * Bit 3 - Autopolarity state + * Bit 2 - LS10B - link state of 10baseT 0 - good, 1 - failed + * Bit 1 - LS100B - link state of 100baseT 0 - good, 1 - failed + * + * + * Data Port Selection Info + *------------------------- + * + * CSR14<7> CSR6<18> CSR6<22> CSR6<23> CSR6<24> MODE/PORT + * 1 0 0 (X) 0 (X) 1 NWAY + * 0 0 1 0 (X) 0 10baseT + * 0 1 0 1 1 (X) 100baseT + * + * + */ + + + +#include "tulip.h" +#include + + +void pnic2_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + int next_tick = 60*HZ; + + if (tulip_debug > 3) + dev_info(&dev->dev, "PNIC2 negotiation status %08x\n", + ioread32(ioaddr + CSR12)); + + if (next_tick) { + mod_timer(&tp->timer, RUN_AT(next_tick)); + } +} + + +void pnic2_start_nway(struct net_device *dev) +{ + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + int csr14; + int csr12; + + /* set up what to advertise during the negotiation */ + + /* load in csr14 and mask off bits not to touch + * comment at top of file explains mask value + */ + csr14 = (ioread32(ioaddr + CSR14) & 0xfff0ee39); + + /* bit 17 - advetise 100baseTx-FD */ + if (tp->sym_advertise & 0x0100) csr14 |= 0x00020000; + + /* bit 16 - advertise 100baseTx-HD */ + if (tp->sym_advertise & 0x0080) csr14 |= 0x00010000; + + /* bit 6 - advertise 10baseT-HD */ + if (tp->sym_advertise & 0x0020) csr14 |= 0x00000040; + + /* Now set bit 12 Link Test Enable, Bit 7 Autonegotiation Enable + * and bit 0 Don't PowerDown 10baseT + */ + csr14 |= 0x00001184; + + if (tulip_debug > 1) + netdev_dbg(dev, "Restarting PNIC2 autonegotiation, csr14=%08x\n", + csr14); + + /* tell pnic2_lnk_change we are doing an nway negotiation */ + dev->if_port = 0; + tp->nway = tp->mediasense = 1; + tp->nwayset = tp->lpar = 0; + + /* now we have to set up csr6 for NWAY state */ + + tp->csr6 = ioread32(ioaddr + CSR6); + if (tulip_debug > 1) + netdev_dbg(dev, "On Entry to Nway, csr6=%08x\n", tp->csr6); + + /* mask off any bits not to touch + * comment at top of file explains mask value + */ + tp->csr6 = tp->csr6 & 0xfe3bd1fd; + + /* don't forget that bit 9 is also used for advertising */ + /* advertise 10baseT-FD for the negotiation (bit 9) */ + if (tp->sym_advertise & 0x0040) tp->csr6 |= 0x00000200; + + /* set bit 24 for nway negotiation mode ... + * see Data Port Selection comment at top of file + * and "Stop" - reset both Transmit (bit 13) and Receive (bit 1) + */ + tp->csr6 |= 0x01000000; + iowrite32(csr14, ioaddr + CSR14); + iowrite32(tp->csr6, ioaddr + CSR6); + udelay(100); + + /* all set up so now force the negotiation to begin */ + + /* read in current values and mask off all but the + * Autonegotiation bits 14:12. Writing a 001 to those bits + * should start the autonegotiation + */ + csr12 = (ioread32(ioaddr + CSR12) & 0xffff8fff); + csr12 |= 0x1000; + iowrite32(csr12, ioaddr + CSR12); +} + + + +void pnic2_lnk_change(struct net_device *dev, int csr5) +{ + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + int csr14; + + /* read the staus register to find out what is up */ + int csr12 = ioread32(ioaddr + CSR12); + + if (tulip_debug > 1) + dev_info(&dev->dev, + "PNIC2 link status interrupt %08x, CSR5 %x, %08x\n", + csr12, csr5, ioread32(ioaddr + CSR14)); + + /* If NWay finished and we have a negotiated partner capability. + * check bits 14:12 for bit pattern 101 - all is good + */ + if (tp->nway && !tp->nwayset) { + + /* we did an auto negotiation */ + + if ((csr12 & 0x7000) == 0x5000) { + + /* negotiation ended successfully */ + + /* get the link partners reply and mask out all but + * bits 24-21 which show the partners capabilities + * and match those to what we advertised + * + * then begin to interpret the results of the negotiation. + * Always go in this order : (we are ignoring T4 for now) + * 100baseTx-FD, 100baseTx-HD, 10baseT-FD, 10baseT-HD + */ + + int negotiated = ((csr12 >> 16) & 0x01E0) & tp->sym_advertise; + tp->lpar = (csr12 >> 16); + tp->nwayset = 1; + + if (negotiated & 0x0100) dev->if_port = 5; + else if (negotiated & 0x0080) dev->if_port = 3; + else if (negotiated & 0x0040) dev->if_port = 4; + else if (negotiated & 0x0020) dev->if_port = 0; + else { + if (tulip_debug > 1) + dev_info(&dev->dev, + "funny autonegotiate result csr12 %08x advertising %04x\n", + csr12, tp->sym_advertise); + tp->nwayset = 0; + /* so check if 100baseTx link state is okay */ + if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180)) + dev->if_port = 3; + } + + /* now record the duplex that was negotiated */ + tp->full_duplex = 0; + if ((dev->if_port == 4) || (dev->if_port == 5)) + tp->full_duplex = 1; + + if (tulip_debug > 1) { + if (tp->nwayset) + dev_info(&dev->dev, + "Switching to %s based on link negotiation %04x & %04x = %04x\n", + medianame[dev->if_port], + tp->sym_advertise, tp->lpar, + negotiated); + } + + /* remember to turn off bit 7 - autonegotiate + * enable so we can properly end nway mode and + * set duplex (ie. use csr6<9> again) + */ + csr14 = (ioread32(ioaddr + CSR14) & 0xffffff7f); + iowrite32(csr14,ioaddr + CSR14); + + + /* now set the data port and operating mode + * (see the Data Port Selection comments at + * the top of the file + */ + + /* get current csr6 and mask off bits not to touch */ + /* see comment at top of file */ + + tp->csr6 = (ioread32(ioaddr + CSR6) & 0xfe3bd1fd); + + /* so if using if_port 3 or 5 then select the 100baseT + * port else select the 10baseT port. + * See the Data Port Selection table at the top + * of the file which was taken from the PNIC_II.PDF + * datasheet + */ + if (dev->if_port & 1) tp->csr6 |= 0x01840000; + else tp->csr6 |= 0x00400000; + + /* now set the full duplex bit appropriately */ + if (tp->full_duplex) tp->csr6 |= 0x00000200; + + iowrite32(1, ioaddr + CSR13); + + if (tulip_debug > 2) + netdev_dbg(dev, "Setting CSR6 %08x/%x CSR12 %08x\n", + tp->csr6, + ioread32(ioaddr + CSR6), + ioread32(ioaddr + CSR12)); + + /* now the following actually writes out the + * new csr6 values + */ + tulip_start_rxtx(tp); + + return; + + } else { + dev_info(&dev->dev, + "Autonegotiation failed, using %s, link beat status %04x\n", + medianame[dev->if_port], csr12); + + /* remember to turn off bit 7 - autonegotiate + * enable so we don't forget + */ + csr14 = (ioread32(ioaddr + CSR14) & 0xffffff7f); + iowrite32(csr14,ioaddr + CSR14); + + /* what should we do when autonegotiate fails? + * should we try again or default to baseline + * case. I just don't know. + * + * for now default to some baseline case + */ + + dev->if_port = 0; + tp->nway = 0; + tp->nwayset = 1; + + /* set to 10baseTx-HD - see Data Port Selection + * comment given at the top of the file + */ + tp->csr6 = (ioread32(ioaddr + CSR6) & 0xfe3bd1fd); + tp->csr6 |= 0x00400000; + + tulip_restart_rxtx(tp); + + return; + + } + } + + if ((tp->nwayset && (csr5 & 0x08000000) && + (dev->if_port == 3 || dev->if_port == 5) && + (csr12 & 2) == 2) || (tp->nway && (csr5 & (TPLnkFail)))) { + + /* Link blew? Maybe restart NWay. */ + + if (tulip_debug > 2) + netdev_dbg(dev, "Ugh! Link blew?\n"); + + del_timer_sync(&tp->timer); + pnic2_start_nway(dev); + tp->timer.expires = RUN_AT(3*HZ); + add_timer(&tp->timer); + + return; + } + + + if (dev->if_port == 3 || dev->if_port == 5) { + + /* we are at 100mb and a potential link change occurred */ + + if (tulip_debug > 1) + dev_info(&dev->dev, "PNIC2 %s link beat %s\n", + medianame[dev->if_port], + (csr12 & 2) ? "failed" : "good"); + + /* check 100 link beat */ + + tp->nway = 0; + tp->nwayset = 1; + + /* if failed then try doing an nway to get in sync */ + if ((csr12 & 2) && ! tp->medialock) { + del_timer_sync(&tp->timer); + pnic2_start_nway(dev); + tp->timer.expires = RUN_AT(3*HZ); + add_timer(&tp->timer); + } + + return; + } + + if (dev->if_port == 0 || dev->if_port == 4) { + + /* we are at 10mb and a potential link change occurred */ + + if (tulip_debug > 1) + dev_info(&dev->dev, "PNIC2 %s link beat %s\n", + medianame[dev->if_port], + (csr12 & 4) ? "failed" : "good"); + + + tp->nway = 0; + tp->nwayset = 1; + + /* if failed, try doing an nway to get in sync */ + if ((csr12 & 4) && ! tp->medialock) { + del_timer_sync(&tp->timer); + pnic2_start_nway(dev); + tp->timer.expires = RUN_AT(3*HZ); + add_timer(&tp->timer); + } + + return; + } + + + if (tulip_debug > 1) + dev_info(&dev->dev, "PNIC2 Link Change Default?\n"); + + /* if all else fails default to trying 10baseT-HD */ + dev->if_port = 0; + + /* make sure autonegotiate enable is off */ + csr14 = (ioread32(ioaddr + CSR14) & 0xffffff7f); + iowrite32(csr14,ioaddr + CSR14); + + /* set to 10baseTx-HD - see Data Port Selection + * comment given at the top of the file + */ + tp->csr6 = (ioread32(ioaddr + CSR6) & 0xfe3bd1fd); + tp->csr6 |= 0x00400000; + + tulip_restart_rxtx(tp); +} + diff --git a/drivers/net/ethernet/dec/tulip/timer.c b/drivers/net/ethernet/dec/tulip/timer.c new file mode 100644 index 000000000..523d9dde5 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/timer.c @@ -0,0 +1,176 @@ +/* + drivers/net/ethernet/dec/tulip/timer.c + + Copyright 2000,2001 The Linux Kernel Team + Written/copyright 1994-2001 by Donald Becker. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + Please submit bugs to http://bugzilla.kernel.org/ . +*/ + + +#include "tulip.h" + + +void tulip_media_task(struct work_struct *work) +{ + struct tulip_private *tp = + container_of(work, struct tulip_private, media_work); + struct net_device *dev = tp->dev; + void __iomem *ioaddr = tp->base_addr; + u32 csr12 = ioread32(ioaddr + CSR12); + int next_tick = 2*HZ; + unsigned long flags; + + if (tulip_debug > 2) { + netdev_dbg(dev, "Media selection tick, %s, status %08x mode %08x SIA %08x %08x %08x %08x\n", + medianame[dev->if_port], + ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR6), + csr12, ioread32(ioaddr + CSR13), + ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15)); + } + switch (tp->chip_id) { + case DC21140: + case DC21142: + case MX98713: + case COMPEX9881: + case DM910X: + default: { + struct medialeaf *mleaf; + unsigned char *p; + if (tp->mtable == NULL) { /* No EEPROM info, use generic code. */ + /* Not much that can be done. + Assume this a generic MII or SYM transceiver. */ + next_tick = 60*HZ; + if (tulip_debug > 2) + netdev_dbg(dev, "network media monitor CSR6 %08x CSR12 0x%02x\n", + ioread32(ioaddr + CSR6), + csr12 & 0xff); + break; + } + mleaf = &tp->mtable->mleaf[tp->cur_index]; + p = mleaf->leafdata; + switch (mleaf->type) { + case 0: case 4: { + /* Type 0 serial or 4 SYM transceiver. Check the link beat bit. */ + int offset = mleaf->type == 4 ? 5 : 2; + s8 bitnum = p[offset]; + if (p[offset+1] & 0x80) { + if (tulip_debug > 1) + netdev_dbg(dev, "Transceiver monitor tick CSR12=%#02x, no media sense\n", + csr12); + if (mleaf->type == 4) { + if (mleaf->media == 3 && (csr12 & 0x02)) + goto select_next_media; + } + break; + } + if (tulip_debug > 2) + netdev_dbg(dev, "Transceiver monitor tick: CSR12=%#02x bit %d is %d, expecting %d\n", + csr12, (bitnum >> 1) & 7, + (csr12 & (1 << ((bitnum >> 1) & 7))) != 0, + (bitnum >= 0)); + /* Check that the specified bit has the proper value. */ + if ((bitnum < 0) != + ((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) { + if (tulip_debug > 2) + netdev_dbg(dev, "Link beat detected for %s\n", + medianame[mleaf->media & MEDIA_MASK]); + if ((p[2] & 0x61) == 0x01) /* Bogus Znyx board. */ + goto actually_mii; + netif_carrier_on(dev); + break; + } + netif_carrier_off(dev); + if (tp->medialock) + break; + select_next_media: + if (--tp->cur_index < 0) { + /* We start again, but should instead look for default. */ + tp->cur_index = tp->mtable->leafcount - 1; + } + dev->if_port = tp->mtable->mleaf[tp->cur_index].media; + if (tulip_media_cap[dev->if_port] & MediaIsFD) + goto select_next_media; /* Skip FD entries. */ + if (tulip_debug > 1) + netdev_dbg(dev, "No link beat on media %s, trying transceiver type %s\n", + medianame[mleaf->media & MEDIA_MASK], + medianame[tp->mtable->mleaf[tp->cur_index].media]); + tulip_select_media(dev, 0); + /* Restart the transmit process. */ + tulip_restart_rxtx(tp); + next_tick = (24*HZ)/10; + break; + } + case 1: case 3: /* 21140, 21142 MII */ + actually_mii: + if (tulip_check_duplex(dev) < 0) { + netif_carrier_off(dev); + next_tick = 3*HZ; + } else { + netif_carrier_on(dev); + next_tick = 60*HZ; + } + break; + case 2: /* 21142 serial block has no link beat. */ + default: + break; + } + } + break; + } + + + spin_lock_irqsave(&tp->lock, flags); + if (tp->timeout_recovery) { + tulip_tx_timeout_complete(tp, ioaddr); + tp->timeout_recovery = 0; + } + spin_unlock_irqrestore(&tp->lock, flags); + + /* mod_timer synchronizes us with potential add_timer calls + * from interrupts. + */ + mod_timer(&tp->timer, RUN_AT(next_tick)); +} + + +void mxic_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + int next_tick = 60*HZ; + + if (tulip_debug > 3) { + dev_info(&dev->dev, "MXIC negotiation status %08x\n", + ioread32(ioaddr + CSR12)); + } + if (next_tick) { + mod_timer(&tp->timer, RUN_AT(next_tick)); + } +} + + +void comet_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct tulip_private *tp = netdev_priv(dev); + int next_tick = 2*HZ; + + if (tulip_debug > 1) + netdev_dbg(dev, "Comet link status %04x partner capability %04x\n", + tulip_mdio_read(dev, tp->phys[0], 1), + tulip_mdio_read(dev, tp->phys[0], 5)); + /* mod_timer synchronizes us with potential add_timer calls + * from interrupts. + */ + if (tulip_check_duplex(dev) < 0) + { netif_carrier_off(dev); } + else + { netif_carrier_on(dev); } + mod_timer(&tp->timer, RUN_AT(next_tick)); +} + diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h new file mode 100644 index 000000000..38431a155 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/tulip.h @@ -0,0 +1,570 @@ +/* + drivers/net/ethernet/dec/tulip/tulip.h + + Copyright 2000,2001 The Linux Kernel Team + Written/copyright 1994-2001 by Donald Becker. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + Please submit bugs to http://bugzilla.kernel.org/ . +*/ + +#ifndef __NET_TULIP_H__ +#define __NET_TULIP_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +/* undefine, or define to various debugging levels (>4 == obscene levels) */ +#define TULIP_DEBUG 1 + +#ifdef CONFIG_TULIP_MMIO +#define TULIP_BAR 1 /* CBMA */ +#else +#define TULIP_BAR 0 /* CBIO */ +#endif + + + +struct tulip_chip_table { + char *chip_name; + int io_size; + int valid_intrs; /* CSR7 interrupt enable settings */ + int flags; + void (*media_timer) (unsigned long); + work_func_t media_task; +}; + + +enum tbl_flag { + HAS_MII = 0x00001, + HAS_MEDIA_TABLE = 0x00002, + CSR12_IN_SROM = 0x00004, + ALWAYS_CHECK_MII = 0x00008, + HAS_ACPI = 0x00010, + MC_HASH_ONLY = 0x00020, /* Hash-only multicast filter. */ + HAS_PNICNWAY = 0x00080, + HAS_NWAY = 0x00040, /* Uses internal NWay xcvr. */ + HAS_INTR_MITIGATION = 0x00100, + IS_ASIX = 0x00200, + HAS_8023X = 0x00400, + COMET_MAC_ADDR = 0x00800, + HAS_PCI_MWI = 0x01000, + HAS_PHY_IRQ = 0x02000, + HAS_SWAPPED_SEEPROM = 0x04000, + NEEDS_FAKE_MEDIA_TABLE = 0x08000, + COMET_PM = 0x10000, +}; + + +/* chip types. careful! order is VERY IMPORTANT here, as these + * are used throughout the driver as indices into arrays */ +/* Note 21142 == 21143. */ +enum chips { + DC21040 = 0, + DC21041 = 1, + DC21140 = 2, + DC21142 = 3, DC21143 = 3, + LC82C168, + MX98713, + MX98715, + MX98725, + AX88140, + PNIC2, + COMET, + COMPEX9881, + I21145, + DM910X, + CONEXANT, +}; + + +enum MediaIs { + MediaIsFD = 1, + MediaAlwaysFD = 2, + MediaIsMII = 4, + MediaIsFx = 8, + MediaIs100 = 16 +}; + + +/* Offsets to the Command and Status Registers, "CSRs". All accesses + must be longword instructions and quadword aligned. */ +enum tulip_offsets { + CSR0 = 0, + CSR1 = 0x08, + CSR2 = 0x10, + CSR3 = 0x18, + CSR4 = 0x20, + CSR5 = 0x28, + CSR6 = 0x30, + CSR7 = 0x38, + CSR8 = 0x40, + CSR9 = 0x48, + CSR10 = 0x50, + CSR11 = 0x58, + CSR12 = 0x60, + CSR13 = 0x68, + CSR14 = 0x70, + CSR15 = 0x78, + CSR18 = 0x88, + CSR19 = 0x8c, + CSR20 = 0x90, + CSR27 = 0xAC, + CSR28 = 0xB0, +}; + +/* register offset and bits for CFDD PCI config reg */ +enum pci_cfg_driver_reg { + CFDD = 0x40, + CFDD_Sleep = (1 << 31), + CFDD_Snooze = (1 << 30), +}; + +#define RxPollInt (RxIntr|RxNoBuf|RxDied|RxJabber) + +/* The bits in the CSR5 status registers, mostly interrupt sources. */ +enum status_bits { + TimerInt = 0x800, + SystemError = 0x2000, + TPLnkFail = 0x1000, + TPLnkPass = 0x10, + NormalIntr = 0x10000, + AbnormalIntr = 0x8000, + RxJabber = 0x200, + RxDied = 0x100, + RxNoBuf = 0x80, + RxIntr = 0x40, + TxFIFOUnderflow = 0x20, + RxErrIntr = 0x10, + TxJabber = 0x08, + TxNoBuf = 0x04, + TxDied = 0x02, + TxIntr = 0x01, +}; + +/* bit mask for CSR5 TX/RX process state */ +#define CSR5_TS 0x00700000 +#define CSR5_RS 0x000e0000 + +enum tulip_mode_bits { + TxThreshold = (1 << 22), + FullDuplex = (1 << 9), + TxOn = 0x2000, + AcceptBroadcast = 0x0100, + AcceptAllMulticast = 0x0080, + AcceptAllPhys = 0x0040, + AcceptRunt = 0x0008, + RxOn = 0x0002, + RxTx = (TxOn | RxOn), +}; + + +enum tulip_busconfig_bits { + MWI = (1 << 24), + MRL = (1 << 23), + MRM = (1 << 21), + CALShift = 14, + BurstLenShift = 8, +}; + + +/* The Tulip Rx and Tx buffer descriptors. */ +struct tulip_rx_desc { + __le32 status; + __le32 length; + __le32 buffer1; + __le32 buffer2; +}; + + +struct tulip_tx_desc { + __le32 status; + __le32 length; + __le32 buffer1; + __le32 buffer2; /* We use only buffer 1. */ +}; + + +enum desc_status_bits { + DescOwned = 0x80000000, + DescWholePkt = 0x60000000, + DescEndPkt = 0x40000000, + DescStartPkt = 0x20000000, + DescEndRing = 0x02000000, + DescUseLink = 0x01000000, + + /* + * Error summary flag is logical or of 'CRC Error', 'Collision Seen', + * 'Frame Too Long', 'Runt' and 'Descriptor Error' flags generated + * within tulip chip. + */ + RxDescErrorSummary = 0x8000, + RxDescCRCError = 0x0002, + RxDescCollisionSeen = 0x0040, + + /* + * 'Frame Too Long' flag is set if packet length including CRC exceeds + * 1518. However, a full sized VLAN tagged frame is 1522 bytes + * including CRC. + * + * The tulip chip does not block oversized frames, and if this flag is + * set on a receive descriptor it does not indicate the frame has been + * truncated. The receive descriptor also includes the actual length. + * Therefore we can safety ignore this flag and check the length + * ourselves. + */ + RxDescFrameTooLong = 0x0080, + RxDescRunt = 0x0800, + RxDescDescErr = 0x4000, + RxWholePkt = 0x00000300, + /* + * Top three bits of 14 bit frame length (status bits 27-29) should + * never be set as that would make frame over 2047 bytes. The Receive + * Watchdog flag (bit 4) may indicate the length is over 2048 and the + * length field is invalid. + */ + RxLengthOver2047 = 0x38000010 +}; + + +enum t21143_csr6_bits { + csr6_sc = (1<<31), + csr6_ra = (1<<30), + csr6_ign_dest_msb = (1<<26), + csr6_mbo = (1<<25), + csr6_scr = (1<<24), /* scramble mode flag: can't be set */ + csr6_pcs = (1<<23), /* Enables PCS functions (symbol mode requires csr6_ps be set) default is set */ + csr6_ttm = (1<<22), /* Transmit Threshold Mode, set for 10baseT, 0 for 100BaseTX */ + csr6_sf = (1<<21), /* Store and forward. If set ignores TR bits */ + csr6_hbd = (1<<19), /* Heart beat disable. Disables SQE function in 10baseT */ + csr6_ps = (1<<18), /* Port Select. 0 (defualt) = 10baseT, 1 = 100baseTX: can't be set */ + csr6_ca = (1<<17), /* Collision Offset Enable. If set uses special algorithm in low collision situations */ + csr6_trh = (1<<15), /* Transmit Threshold high bit */ + csr6_trl = (1<<14), /* Transmit Threshold low bit */ + + /*************************************************************** + * This table shows transmit threshold values based on media * + * and these two registers (from PNIC1 & 2 docs) Note: this is * + * all meaningless if sf is set. * + ***************************************************************/ + + /*********************************** + * (trh,trl) * 100BaseTX * 10BaseT * + *********************************** + * (0,0) * 128 * 72 * + * (0,1) * 256 * 96 * + * (1,0) * 512 * 128 * + * (1,1) * 1024 * 160 * + ***********************************/ + + csr6_fc = (1<<12), /* Forces a collision in next transmission (for testing in loopback mode) */ + csr6_om_int_loop = (1<<10), /* internal (FIFO) loopback flag */ + csr6_om_ext_loop = (1<<11), /* external (PMD) loopback flag */ + /* set both and you get (PHY) loopback */ + csr6_fd = (1<<9), /* Full duplex mode, disables hearbeat, no loopback */ + csr6_pm = (1<<7), /* Pass All Multicast */ + csr6_pr = (1<<6), /* Promiscuous mode */ + csr6_sb = (1<<5), /* Start(1)/Stop(0) backoff counter */ + csr6_if = (1<<4), /* Inverse Filtering, rejects only addresses in address table: can't be set */ + csr6_pb = (1<<3), /* Pass Bad Frames, (1) causes even bad frames to be passed on */ + csr6_ho = (1<<2), /* Hash-only filtering mode: can't be set */ + csr6_hp = (1<<0), /* Hash/Perfect Receive Filtering Mode: can't be set */ + + csr6_mask_capture = (csr6_sc | csr6_ca), + csr6_mask_defstate = (csr6_mask_capture | csr6_mbo), + csr6_mask_hdcap = (csr6_mask_defstate | csr6_hbd | csr6_ps), + csr6_mask_hdcaptt = (csr6_mask_hdcap | csr6_trh | csr6_trl), + csr6_mask_fullcap = (csr6_mask_hdcaptt | csr6_fd), + csr6_mask_fullpromisc = (csr6_pr | csr6_pm), + csr6_mask_filters = (csr6_hp | csr6_ho | csr6_if), + csr6_mask_100bt = (csr6_scr | csr6_pcs | csr6_hbd), +}; + +enum tulip_comet_csr13_bits { +/* The LINKOFFE and LINKONE work in conjunction with LSCE, i.e. they + * determine which link status transition wakes up if LSCE is + * enabled */ + comet_csr13_linkoffe = (1 << 17), + comet_csr13_linkone = (1 << 16), + comet_csr13_wfre = (1 << 10), + comet_csr13_mpre = (1 << 9), + comet_csr13_lsce = (1 << 8), + comet_csr13_wfr = (1 << 2), + comet_csr13_mpr = (1 << 1), + comet_csr13_lsc = (1 << 0), +}; + +enum tulip_comet_csr18_bits { + comet_csr18_pmes_sticky = (1 << 24), + comet_csr18_pm_mode = (1 << 19), + comet_csr18_apm_mode = (1 << 18), + comet_csr18_d3a = (1 << 7) +}; + +enum tulip_comet_csr20_bits { + comet_csr20_pmes = (1 << 15), +}; + +/* Keep the ring sizes a power of two for efficiency. + Making the Tx ring too large decreases the effectiveness of channel + bonding and packet priority. + There are no ill effects from too-large receive rings. */ + +#define TX_RING_SIZE 32 +#define RX_RING_SIZE 128 +#define MEDIA_MASK 31 + +/* The receiver on the DC21143 rev 65 can fail to close the last + * receive descriptor in certain circumstances (see errata) when + * using MWI. This can only occur if the receive buffer ends on + * a cache line boundary, so the "+ 4" below ensures it doesn't. + */ +#define PKT_BUF_SZ (1536 + 4) /* Size of each temporary Rx buffer. */ + +#define TULIP_MIN_CACHE_LINE 8 /* in units of 32-bit words */ + +#if defined(__sparc__) || defined(__hppa__) +/* The UltraSparc PCI controllers will disconnect at every 64-byte + * crossing anyways so it makes no sense to tell Tulip to burst + * any more than that. + */ +#define TULIP_MAX_CACHE_LINE 16 /* in units of 32-bit words */ +#else +#define TULIP_MAX_CACHE_LINE 32 /* in units of 32-bit words */ +#endif + + +/* Ring-wrap flag in length field, use for last ring entry. + 0x01000000 means chain on buffer2 address, + 0x02000000 means use the ring start address in CSR2/3. + Note: Some work-alike chips do not function correctly in chained mode. + The ASIX chip works only in chained mode. + Thus we indicates ring mode, but always write the 'next' field for + chained mode as well. +*/ +#define DESC_RING_WRAP 0x02000000 + + +#define EEPROM_SIZE 512 /* 2 << EEPROM_ADDRLEN */ + + +#define RUN_AT(x) (jiffies + (x)) + +#define get_u16(ptr) get_unaligned_le16((ptr)) + +struct medialeaf { + u8 type; + u8 media; + unsigned char *leafdata; +}; + + +struct mediatable { + u16 defaultmedia; + u8 leafcount; + u8 csr12dir; /* General purpose pin directions. */ + unsigned has_mii:1; + unsigned has_nonmii:1; + unsigned has_reset:6; + u32 csr15dir; + u32 csr15val; /* 21143 NWay setting. */ + struct medialeaf mleaf[0]; +}; + + +struct mediainfo { + struct mediainfo *next; + int info_type; + int index; + unsigned char *info; +}; + +struct ring_info { + struct sk_buff *skb; + dma_addr_t mapping; +}; + + +struct tulip_private { + const char *product_name; + struct net_device *next_module; + struct tulip_rx_desc *rx_ring; + struct tulip_tx_desc *tx_ring; + dma_addr_t rx_ring_dma; + dma_addr_t tx_ring_dma; + /* The saved address of a sent-in-place packet/buffer, for skfree(). */ + struct ring_info tx_buffers[TX_RING_SIZE]; + /* The addresses of receive-in-place skbuffs. */ + struct ring_info rx_buffers[RX_RING_SIZE]; + u16 setup_frame[96]; /* Pseudo-Tx frame to init address table. */ + int chip_id; + int revision; + int flags; + struct napi_struct napi; + struct timer_list timer; /* Media selection timer. */ + struct timer_list oom_timer; /* Out of memory timer. */ + u32 mc_filter[2]; + spinlock_t lock; + spinlock_t mii_lock; + unsigned int cur_rx, cur_tx; /* The next free ring entry */ + unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ + +#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION + int mit_on; +#endif + unsigned int full_duplex:1; /* Full-duplex operation requested. */ + unsigned int full_duplex_lock:1; + unsigned int fake_addr:1; /* Multiport board faked address. */ + unsigned int default_port:4; /* Last dev->if_port value. */ + unsigned int media2:4; /* Secondary monitored media port. */ + unsigned int medialock:1; /* Don't sense media type. */ + unsigned int mediasense:1; /* Media sensing in progress. */ + unsigned int nway:1, nwayset:1; /* 21143 internal NWay. */ + unsigned int timeout_recovery:1; + unsigned int csr0; /* CSR0 setting. */ + unsigned int csr6; /* Current CSR6 control settings. */ + unsigned char eeprom[EEPROM_SIZE]; /* Serial EEPROM contents. */ + void (*link_change) (struct net_device * dev, int csr5); + struct ethtool_wolinfo wolinfo; /* WOL settings */ + u16 sym_advertise, mii_advertise; /* NWay capabilities advertised. */ + u16 lpar; /* 21143 Link partner ability. */ + u16 advertising[4]; + signed char phys[4], mii_cnt; /* MII device addresses. */ + struct mediatable *mtable; + int cur_index; /* Current media index. */ + int saved_if_port; + struct pci_dev *pdev; + int ttimer; + int susp_rx; + unsigned long nir; + void __iomem *base_addr; + int csr12_shadow; + int pad0; /* Used for 8-byte alignment */ + struct work_struct media_work; + struct net_device *dev; +}; + + +struct eeprom_fixup { + char *name; + unsigned char addr0; + unsigned char addr1; + unsigned char addr2; + u16 newtable[32]; /* Max length below. */ +}; + + +/* 21142.c */ +extern u16 t21142_csr14[]; +void t21142_media_task(struct work_struct *work); +void t21142_start_nway(struct net_device *dev); +void t21142_lnk_change(struct net_device *dev, int csr5); + + +/* PNIC2.c */ +void pnic2_lnk_change(struct net_device *dev, int csr5); +void pnic2_timer(unsigned long data); +void pnic2_start_nway(struct net_device *dev); +void pnic2_lnk_change(struct net_device *dev, int csr5); + +/* eeprom.c */ +void tulip_parse_eeprom(struct net_device *dev); +int tulip_read_eeprom(struct net_device *dev, int location, int addr_len); + +/* interrupt.c */ +extern unsigned int tulip_max_interrupt_work; +extern int tulip_rx_copybreak; +irqreturn_t tulip_interrupt(int irq, void *dev_instance); +int tulip_refill_rx(struct net_device *dev); +#ifdef CONFIG_TULIP_NAPI +int tulip_poll(struct napi_struct *napi, int budget); +#endif + + +/* media.c */ +int tulip_mdio_read(struct net_device *dev, int phy_id, int location); +void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int value); +void tulip_select_media(struct net_device *dev, int startup); +int tulip_check_duplex(struct net_device *dev); +void tulip_find_mii (struct net_device *dev, int board_idx); + +/* pnic.c */ +void pnic_do_nway(struct net_device *dev); +void pnic_lnk_change(struct net_device *dev, int csr5); +void pnic_timer(unsigned long data); + +/* timer.c */ +void tulip_media_task(struct work_struct *work); +void mxic_timer(unsigned long data); +void comet_timer(unsigned long data); + +/* tulip_core.c */ +extern int tulip_debug; +extern const char * const medianame[]; +extern const char tulip_media_cap[]; +extern struct tulip_chip_table tulip_tbl[]; +void oom_timer(unsigned long data); +extern u8 t21040_csr13[]; + +static inline void tulip_start_rxtx(struct tulip_private *tp) +{ + void __iomem *ioaddr = tp->base_addr; + iowrite32(tp->csr6 | RxTx, ioaddr + CSR6); + barrier(); + (void) ioread32(ioaddr + CSR6); /* mmio sync */ +} + +static inline void tulip_stop_rxtx(struct tulip_private *tp) +{ + void __iomem *ioaddr = tp->base_addr; + u32 csr6 = ioread32(ioaddr + CSR6); + + if (csr6 & RxTx) { + unsigned i=1300/10; + iowrite32(csr6 & ~RxTx, ioaddr + CSR6); + barrier(); + /* wait until in-flight frame completes. + * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin) + * Typically expect this loop to end in < 50 us on 100BT. + */ + while (--i && (ioread32(ioaddr + CSR5) & (CSR5_TS|CSR5_RS))) + udelay(10); + + if (!i) + netdev_dbg(tp->dev, "tulip_stop_rxtx() failed (CSR5 0x%x CSR6 0x%x)\n", + ioread32(ioaddr + CSR5), + ioread32(ioaddr + CSR6)); + } +} + +static inline void tulip_restart_rxtx(struct tulip_private *tp) +{ + tulip_stop_rxtx(tp); + udelay(5); + tulip_start_rxtx(tp); +} + +static inline void tulip_tx_timeout_complete(struct tulip_private *tp, void __iomem *ioaddr) +{ + /* Stop and restart the chip's Tx processes. */ + tulip_restart_rxtx(tp); + /* Trigger an immediate transmit demand. */ + iowrite32(0, ioaddr + CSR1); + + tp->dev->stats.tx_errors++; +} + +#endif /* __NET_TULIP_H__ */ diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c new file mode 100644 index 000000000..ed41559ba --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -0,0 +1,2001 @@ +/* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux. + + Copyright 2000,2001 The Linux Kernel Team + Written/copyright 1994-2001 by Donald Becker. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + Please submit bugs to http://bugzilla.kernel.org/ . +*/ + +#define pr_fmt(fmt) "tulip: " fmt + +#define DRV_NAME "tulip" +#ifdef CONFIG_TULIP_NAPI +#define DRV_VERSION "1.1.15-NAPI" /* Keep at least for test */ +#else +#define DRV_VERSION "1.1.15" +#endif +#define DRV_RELDATE "Feb 27, 2007" + + +#include +#include +#include +#include "tulip.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SPARC +#include +#endif + +static char version[] = + "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n"; + +/* A few user-configurable values. */ + +/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ +static unsigned int max_interrupt_work = 25; + +#define MAX_UNITS 8 +/* Used to pass the full-duplex flag, etc. */ +static int full_duplex[MAX_UNITS]; +static int options[MAX_UNITS]; +static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */ + +/* The possible media types that can be set in options[] are: */ +const char * const medianame[32] = { + "10baseT", "10base2", "AUI", "100baseTx", + "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx", + "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII", + "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4", + "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19", + "","","","", "","","","", "","","","Transceiver reset", +}; + +/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ +#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \ + defined(CONFIG_SPARC) || defined(__ia64__) || \ + defined(__sh__) || defined(__mips__) +static int rx_copybreak = 1518; +#else +static int rx_copybreak = 100; +#endif + +/* + Set the bus performance register. + Typical: Set 16 longword cache alignment, no burst limit. + Cache alignment bits 15:14 Burst length 13:8 + 0000 No alignment 0x00000000 unlimited 0800 8 longwords + 4000 8 longwords 0100 1 longword 1000 16 longwords + 8000 16 longwords 0200 2 longwords 2000 32 longwords + C000 32 longwords 0400 4 longwords + Warning: many older 486 systems are broken and require setting 0x00A04800 + 8 longword cache alignment, 8 longword burst. + ToDo: Non-Intel setting could be better. +*/ + +#if defined(__alpha__) || defined(__ia64__) +static int csr0 = 0x01A00000 | 0xE000; +#elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__) +static int csr0 = 0x01A00000 | 0x8000; +#elif defined(CONFIG_SPARC) || defined(__hppa__) +/* The UltraSparc PCI controllers will disconnect at every 64-byte + * crossing anyways so it makes no sense to tell Tulip to burst + * any more than that. + */ +static int csr0 = 0x01A00000 | 0x9000; +#elif defined(__arm__) || defined(__sh__) +static int csr0 = 0x01A00000 | 0x4800; +#elif defined(__mips__) +static int csr0 = 0x00200000 | 0x4000; +#else +#warning Processor architecture undefined! +static int csr0 = 0x00A00000 | 0x4800; +#endif + +/* Operational parameters that usually are not changed. */ +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (4*HZ) + + +MODULE_AUTHOR("The Linux Kernel Team"); +MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); +module_param(tulip_debug, int, 0); +module_param(max_interrupt_work, int, 0); +module_param(rx_copybreak, int, 0); +module_param(csr0, int, 0); +module_param_array(options, int, NULL, 0); +module_param_array(full_duplex, int, NULL, 0); + +#ifdef TULIP_DEBUG +int tulip_debug = TULIP_DEBUG; +#else +int tulip_debug = 1; +#endif + +static void tulip_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct tulip_private *tp = netdev_priv(dev); + + if (netif_running(dev)) + schedule_work(&tp->media_work); +} + +/* + * This table use during operation for capabilities and media timer. + * + * It is indexed via the values in 'enum chips' + */ + +struct tulip_chip_table tulip_tbl[] = { + { }, /* placeholder for array, slot unused currently */ + { }, /* placeholder for array, slot unused currently */ + + /* DC21140 */ + { "Digital DS21140 Tulip", 128, 0x0001ebef, + HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer, + tulip_media_task }, + + /* DC21142, DC21143 */ + { "Digital DS21142/43 Tulip", 128, 0x0801fbff, + HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY + | HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task }, + + /* LC82C168 */ + { "Lite-On 82c168 PNIC", 256, 0x0001fbef, + HAS_MII | HAS_PNICNWAY, pnic_timer, }, + + /* MX98713 */ + { "Macronix 98713 PMAC", 128, 0x0001ebef, + HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, }, + + /* MX98715 */ + { "Macronix 98715 PMAC", 256, 0x0001ebef, + HAS_MEDIA_TABLE, mxic_timer, }, + + /* MX98725 */ + { "Macronix 98725 PMAC", 256, 0x0001ebef, + HAS_MEDIA_TABLE, mxic_timer, }, + + /* AX88140 */ + { "ASIX AX88140", 128, 0x0001fbff, + HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY + | IS_ASIX, tulip_timer, tulip_media_task }, + + /* PNIC2 */ + { "Lite-On PNIC-II", 256, 0x0801fbff, + HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, }, + + /* COMET */ + { "ADMtek Comet", 256, 0x0001abef, + HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, }, + + /* COMPEX9881 */ + { "Compex 9881 PMAC", 128, 0x0001ebef, + HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, }, + + /* I21145 */ + { "Intel DS21145 Tulip", 128, 0x0801fbff, + HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI + | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task }, + + /* DM910X */ +#ifdef CONFIG_TULIP_DM910X + { "Davicom DM9102/DM9102A", 128, 0x0001ebef, + HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, + tulip_timer, tulip_media_task }, +#else + { NULL }, +#endif + + /* RS7112 */ + { "Conexant LANfinity", 256, 0x0001ebef, + HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task }, + +}; + + +static const struct pci_device_id tulip_pci_tbl[] = { + { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 }, + { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 }, + { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 }, + { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 }, + { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 }, +/* { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/ + { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 }, + { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 }, + { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, + { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, + { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, + { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, + { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, + { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, + { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, + { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, + { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, + { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, + { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 }, + { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 }, +#ifdef CONFIG_TULIP_DM910X + { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X }, + { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X }, +#endif + { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, + { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 }, + { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, + { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, + { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, + { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, + { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT }, + { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, + { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, + { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, + { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, + { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ + { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ + { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */ + { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, + { } /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, tulip_pci_tbl); + + +/* A full-duplex map for media types. */ +const char tulip_media_cap[32] = +{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, }; + +static void tulip_tx_timeout(struct net_device *dev); +static void tulip_init_ring(struct net_device *dev); +static void tulip_free_ring(struct net_device *dev); +static netdev_tx_t tulip_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static int tulip_open(struct net_device *dev); +static int tulip_close(struct net_device *dev); +static void tulip_up(struct net_device *dev); +static void tulip_down(struct net_device *dev); +static struct net_device_stats *tulip_get_stats(struct net_device *dev); +static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static void set_rx_mode(struct net_device *dev); +static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void poll_tulip(struct net_device *dev); +#endif + +static void tulip_set_power_state (struct tulip_private *tp, + int sleep, int snooze) +{ + if (tp->flags & HAS_ACPI) { + u32 tmp, newtmp; + pci_read_config_dword (tp->pdev, CFDD, &tmp); + newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze); + if (sleep) + newtmp |= CFDD_Sleep; + else if (snooze) + newtmp |= CFDD_Snooze; + if (tmp != newtmp) + pci_write_config_dword (tp->pdev, CFDD, newtmp); + } + +} + + +static void tulip_up(struct net_device *dev) +{ + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + int next_tick = 3*HZ; + u32 reg; + int i; + +#ifdef CONFIG_TULIP_NAPI + napi_enable(&tp->napi); +#endif + + /* Wake the chip from sleep/snooze mode. */ + tulip_set_power_state (tp, 0, 0); + + /* Disable all WOL events */ + pci_enable_wake(tp->pdev, PCI_D3hot, 0); + pci_enable_wake(tp->pdev, PCI_D3cold, 0); + tulip_set_wolopts(tp->pdev, 0); + + /* On some chip revs we must set the MII/SYM port before the reset!? */ + if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii)) + iowrite32(0x00040000, ioaddr + CSR6); + + /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */ + iowrite32(0x00000001, ioaddr + CSR0); + pci_read_config_dword(tp->pdev, PCI_COMMAND, ®); /* flush write */ + udelay(100); + + /* Deassert reset. + Wait the specified 50 PCI cycles after a reset by initializing + Tx and Rx queues and the address filter list. */ + iowrite32(tp->csr0, ioaddr + CSR0); + pci_read_config_dword(tp->pdev, PCI_COMMAND, ®); /* flush write */ + udelay(100); + + if (tulip_debug > 1) + netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq); + + iowrite32(tp->rx_ring_dma, ioaddr + CSR3); + iowrite32(tp->tx_ring_dma, ioaddr + CSR4); + tp->cur_rx = tp->cur_tx = 0; + tp->dirty_rx = tp->dirty_tx = 0; + + if (tp->flags & MC_HASH_ONLY) { + u32 addr_low = get_unaligned_le32(dev->dev_addr); + u32 addr_high = get_unaligned_le16(dev->dev_addr + 4); + if (tp->chip_id == AX88140) { + iowrite32(0, ioaddr + CSR13); + iowrite32(addr_low, ioaddr + CSR14); + iowrite32(1, ioaddr + CSR13); + iowrite32(addr_high, ioaddr + CSR14); + } else if (tp->flags & COMET_MAC_ADDR) { + iowrite32(addr_low, ioaddr + 0xA4); + iowrite32(addr_high, ioaddr + 0xA8); + iowrite32(0, ioaddr + CSR27); + iowrite32(0, ioaddr + CSR28); + } + } else { + /* This is set_rx_mode(), but without starting the transmitter. */ + u16 *eaddrs = (u16 *)dev->dev_addr; + u16 *setup_frm = &tp->setup_frame[15*6]; + dma_addr_t mapping; + + /* 21140 bug: you must add the broadcast address. */ + memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame)); + /* Fill the final entry of the table with our physical address. */ + *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; + *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; + *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; + + mapping = pci_map_single(tp->pdev, tp->setup_frame, + sizeof(tp->setup_frame), + PCI_DMA_TODEVICE); + tp->tx_buffers[tp->cur_tx].skb = NULL; + tp->tx_buffers[tp->cur_tx].mapping = mapping; + + /* Put the setup frame on the Tx list. */ + tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192); + tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping); + tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned); + + tp->cur_tx++; + } + + tp->saved_if_port = dev->if_port; + if (dev->if_port == 0) + dev->if_port = tp->default_port; + + /* Allow selecting a default media. */ + i = 0; + if (tp->mtable == NULL) + goto media_picked; + if (dev->if_port) { + int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 : + (dev->if_port == 12 ? 0 : dev->if_port); + for (i = 0; i < tp->mtable->leafcount; i++) + if (tp->mtable->mleaf[i].media == looking_for) { + dev_info(&dev->dev, + "Using user-specified media %s\n", + medianame[dev->if_port]); + goto media_picked; + } + } + if ((tp->mtable->defaultmedia & 0x0800) == 0) { + int looking_for = tp->mtable->defaultmedia & MEDIA_MASK; + for (i = 0; i < tp->mtable->leafcount; i++) + if (tp->mtable->mleaf[i].media == looking_for) { + dev_info(&dev->dev, + "Using EEPROM-set media %s\n", + medianame[looking_for]); + goto media_picked; + } + } + /* Start sensing first non-full-duplex media. */ + for (i = tp->mtable->leafcount - 1; + (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--) + ; +media_picked: + + tp->csr6 = 0; + tp->cur_index = i; + tp->nwayset = 0; + + if (dev->if_port) { + if (tp->chip_id == DC21143 && + (tulip_media_cap[dev->if_port] & MediaIsMII)) { + /* We must reset the media CSRs when we force-select MII mode. */ + iowrite32(0x0000, ioaddr + CSR13); + iowrite32(0x0000, ioaddr + CSR14); + iowrite32(0x0008, ioaddr + CSR15); + } + tulip_select_media(dev, 1); + } else if (tp->chip_id == DC21142) { + if (tp->mii_cnt) { + tulip_select_media(dev, 1); + if (tulip_debug > 1) + dev_info(&dev->dev, + "Using MII transceiver %d, status %04x\n", + tp->phys[0], + tulip_mdio_read(dev, tp->phys[0], 1)); + iowrite32(csr6_mask_defstate, ioaddr + CSR6); + tp->csr6 = csr6_mask_hdcap; + dev->if_port = 11; + iowrite32(0x0000, ioaddr + CSR13); + iowrite32(0x0000, ioaddr + CSR14); + } else + t21142_start_nway(dev); + } else if (tp->chip_id == PNIC2) { + /* for initial startup advertise 10/100 Full and Half */ + tp->sym_advertise = 0x01E0; + /* enable autonegotiate end interrupt */ + iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5); + iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7); + pnic2_start_nway(dev); + } else if (tp->chip_id == LC82C168 && ! tp->medialock) { + if (tp->mii_cnt) { + dev->if_port = 11; + tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0); + iowrite32(0x0001, ioaddr + CSR15); + } else if (ioread32(ioaddr + CSR5) & TPLnkPass) + pnic_do_nway(dev); + else { + /* Start with 10mbps to do autonegotiation. */ + iowrite32(0x32, ioaddr + CSR12); + tp->csr6 = 0x00420000; + iowrite32(0x0001B078, ioaddr + 0xB8); + iowrite32(0x0201B078, ioaddr + 0xB8); + next_tick = 1*HZ; + } + } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) && + ! tp->medialock) { + dev->if_port = 0; + tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0); + iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80); + } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) { + /* Provided by BOLO, Macronix - 12/10/1998. */ + dev->if_port = 0; + tp->csr6 = 0x01a80200; + iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80); + iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0); + } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) { + /* Enable automatic Tx underrun recovery. */ + iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88); + dev->if_port = tp->mii_cnt ? 11 : 0; + tp->csr6 = 0x00040000; + } else if (tp->chip_id == AX88140) { + tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100; + } else + tulip_select_media(dev, 1); + + /* Start the chip's Tx to process setup frame. */ + tulip_stop_rxtx(tp); + barrier(); + udelay(5); + iowrite32(tp->csr6 | TxOn, ioaddr + CSR6); + + /* Enable interrupts by setting the interrupt mask. */ + iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5); + iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7); + tulip_start_rxtx(tp); + iowrite32(0, ioaddr + CSR2); /* Rx poll demand */ + + if (tulip_debug > 2) { + netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n", + ioread32(ioaddr + CSR0), + ioread32(ioaddr + CSR5), + ioread32(ioaddr + CSR6)); + } + + /* Set the timer to switch to check for link beat and perhaps switch + to an alternate media type. */ + tp->timer.expires = RUN_AT(next_tick); + add_timer(&tp->timer); +#ifdef CONFIG_TULIP_NAPI + init_timer(&tp->oom_timer); + tp->oom_timer.data = (unsigned long)dev; + tp->oom_timer.function = oom_timer; +#endif +} + +static int +tulip_open(struct net_device *dev) +{ + struct tulip_private *tp = netdev_priv(dev); + int retval; + + tulip_init_ring (dev); + + retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED, + dev->name, dev); + if (retval) + goto free_ring; + + tulip_up (dev); + + netif_start_queue (dev); + + return 0; + +free_ring: + tulip_free_ring (dev); + return retval; +} + + +static void tulip_tx_timeout(struct net_device *dev) +{ + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + unsigned long flags; + + spin_lock_irqsave (&tp->lock, flags); + + if (tulip_media_cap[dev->if_port] & MediaIsMII) { + /* Do nothing -- the media monitor should handle this. */ + if (tulip_debug > 1) + dev_warn(&dev->dev, + "Transmit timeout using MII device\n"); + } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 || + tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 || + tp->chip_id == DM910X) { + dev_warn(&dev->dev, + "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n", + ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12), + ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14), + ioread32(ioaddr + CSR15)); + tp->timeout_recovery = 1; + schedule_work(&tp->media_work); + goto out_unlock; + } else if (tp->chip_id == PNIC2) { + dev_warn(&dev->dev, + "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n", + (int)ioread32(ioaddr + CSR5), + (int)ioread32(ioaddr + CSR6), + (int)ioread32(ioaddr + CSR7), + (int)ioread32(ioaddr + CSR12)); + } else { + dev_warn(&dev->dev, + "Transmit timed out, status %08x, CSR12 %08x, resetting...\n", + ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12)); + dev->if_port = 0; + } + +#if defined(way_too_many_messages) + if (tulip_debug > 3) { + int i; + for (i = 0; i < RX_RING_SIZE; i++) { + u8 *buf = (u8 *)(tp->rx_ring[i].buffer1); + int j; + printk(KERN_DEBUG + "%2d: %08x %08x %08x %08x %02x %02x %02x\n", + i, + (unsigned int)tp->rx_ring[i].status, + (unsigned int)tp->rx_ring[i].length, + (unsigned int)tp->rx_ring[i].buffer1, + (unsigned int)tp->rx_ring[i].buffer2, + buf[0], buf[1], buf[2]); + for (j = 0; ((j < 1600) && buf[j] != 0xee); j++) + if (j < 100) + pr_cont(" %02x", buf[j]); + pr_cont(" j=%d\n", j); + } + printk(KERN_DEBUG " Rx ring %p: ", tp->rx_ring); + for (i = 0; i < RX_RING_SIZE; i++) + pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status); + printk(KERN_DEBUG " Tx ring %p: ", tp->tx_ring); + for (i = 0; i < TX_RING_SIZE; i++) + pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status); + pr_cont("\n"); + } +#endif + + tulip_tx_timeout_complete(tp, ioaddr); + +out_unlock: + spin_unlock_irqrestore (&tp->lock, flags); + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue (dev); +} + + +/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ +static void tulip_init_ring(struct net_device *dev) +{ + struct tulip_private *tp = netdev_priv(dev); + int i; + + tp->susp_rx = 0; + tp->ttimer = 0; + tp->nir = 0; + + for (i = 0; i < RX_RING_SIZE; i++) { + tp->rx_ring[i].status = 0x00000000; + tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ); + tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1)); + tp->rx_buffers[i].skb = NULL; + tp->rx_buffers[i].mapping = 0; + } + /* Mark the last entry as wrapping the ring. */ + tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP); + tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma); + + for (i = 0; i < RX_RING_SIZE; i++) { + dma_addr_t mapping; + + /* Note the receive buffer must be longword aligned. + netdev_alloc_skb() provides 16 byte alignment. But do *not* + use skb_reserve() to align the IP header! */ + struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ); + tp->rx_buffers[i].skb = skb; + if (skb == NULL) + break; + mapping = pci_map_single(tp->pdev, skb->data, + PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + tp->rx_buffers[i].mapping = mapping; + tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */ + tp->rx_ring[i].buffer1 = cpu_to_le32(mapping); + } + tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); + + /* The Tx buffer descriptor is filled in as needed, but we + do need to clear the ownership bit. */ + for (i = 0; i < TX_RING_SIZE; i++) { + tp->tx_buffers[i].skb = NULL; + tp->tx_buffers[i].mapping = 0; + tp->tx_ring[i].status = 0x00000000; + tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1)); + } + tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma); +} + +static netdev_tx_t +tulip_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct tulip_private *tp = netdev_priv(dev); + int entry; + u32 flag; + dma_addr_t mapping; + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + + /* Calculate the next Tx descriptor entry. */ + entry = tp->cur_tx % TX_RING_SIZE; + + tp->tx_buffers[entry].skb = skb; + mapping = pci_map_single(tp->pdev, skb->data, + skb->len, PCI_DMA_TODEVICE); + tp->tx_buffers[entry].mapping = mapping; + tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping); + + if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */ + flag = 0x60000000; /* No interrupt */ + } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) { + flag = 0xe0000000; /* Tx-done intr. */ + } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) { + flag = 0x60000000; /* No Tx-done intr. */ + } else { /* Leave room for set_rx_mode() to fill entries. */ + flag = 0xe0000000; /* Tx-done intr. */ + netif_stop_queue(dev); + } + if (entry == TX_RING_SIZE-1) + flag = 0xe0000000 | DESC_RING_WRAP; + + tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag); + /* if we were using Transmit Automatic Polling, we would need a + * wmb() here. */ + tp->tx_ring[entry].status = cpu_to_le32(DescOwned); + wmb(); + + tp->cur_tx++; + + /* Trigger an immediate transmit demand. */ + iowrite32(0, tp->base_addr + CSR1); + + spin_unlock_irqrestore(&tp->lock, flags); + + return NETDEV_TX_OK; +} + +static void tulip_clean_tx_ring(struct tulip_private *tp) +{ + unsigned int dirty_tx; + + for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0; + dirty_tx++) { + int entry = dirty_tx % TX_RING_SIZE; + int status = le32_to_cpu(tp->tx_ring[entry].status); + + if (status < 0) { + tp->dev->stats.tx_errors++; /* It wasn't Txed */ + tp->tx_ring[entry].status = 0; + } + + /* Check for Tx filter setup frames. */ + if (tp->tx_buffers[entry].skb == NULL) { + /* test because dummy frames not mapped */ + if (tp->tx_buffers[entry].mapping) + pci_unmap_single(tp->pdev, + tp->tx_buffers[entry].mapping, + sizeof(tp->setup_frame), + PCI_DMA_TODEVICE); + continue; + } + + pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, + tp->tx_buffers[entry].skb->len, + PCI_DMA_TODEVICE); + + /* Free the original skb. */ + dev_kfree_skb_irq(tp->tx_buffers[entry].skb); + tp->tx_buffers[entry].skb = NULL; + tp->tx_buffers[entry].mapping = 0; + } +} + +static void tulip_down (struct net_device *dev) +{ + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + unsigned long flags; + + cancel_work_sync(&tp->media_work); + +#ifdef CONFIG_TULIP_NAPI + napi_disable(&tp->napi); +#endif + + del_timer_sync (&tp->timer); +#ifdef CONFIG_TULIP_NAPI + del_timer_sync (&tp->oom_timer); +#endif + spin_lock_irqsave (&tp->lock, flags); + + /* Disable interrupts by clearing the interrupt mask. */ + iowrite32 (0x00000000, ioaddr + CSR7); + + /* Stop the Tx and Rx processes. */ + tulip_stop_rxtx(tp); + + /* prepare receive buffers */ + tulip_refill_rx(dev); + + /* release any unconsumed transmit buffers */ + tulip_clean_tx_ring(tp); + + if (ioread32(ioaddr + CSR6) != 0xffffffff) + dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; + + spin_unlock_irqrestore (&tp->lock, flags); + + init_timer(&tp->timer); + tp->timer.data = (unsigned long)dev; + tp->timer.function = tulip_tbl[tp->chip_id].media_timer; + + dev->if_port = tp->saved_if_port; + + /* Leave the driver in snooze, not sleep, mode. */ + tulip_set_power_state (tp, 0, 1); +} + +static void tulip_free_ring (struct net_device *dev) +{ + struct tulip_private *tp = netdev_priv(dev); + int i; + + /* Free all the skbuffs in the Rx queue. */ + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb = tp->rx_buffers[i].skb; + dma_addr_t mapping = tp->rx_buffers[i].mapping; + + tp->rx_buffers[i].skb = NULL; + tp->rx_buffers[i].mapping = 0; + + tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */ + tp->rx_ring[i].length = 0; + /* An invalid address. */ + tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0); + if (skb) { + pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ, + PCI_DMA_FROMDEVICE); + dev_kfree_skb (skb); + } + } + + for (i = 0; i < TX_RING_SIZE; i++) { + struct sk_buff *skb = tp->tx_buffers[i].skb; + + if (skb != NULL) { + pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping, + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb (skb); + } + tp->tx_buffers[i].skb = NULL; + tp->tx_buffers[i].mapping = 0; + } +} + +static int tulip_close (struct net_device *dev) +{ + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + + netif_stop_queue (dev); + + tulip_down (dev); + + if (tulip_debug > 1) + netdev_dbg(dev, "Shutting down ethercard, status was %02x\n", + ioread32 (ioaddr + CSR5)); + + free_irq (tp->pdev->irq, dev); + + tulip_free_ring (dev); + + return 0; +} + +static struct net_device_stats *tulip_get_stats(struct net_device *dev) +{ + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + + if (netif_running(dev)) { + unsigned long flags; + + spin_lock_irqsave (&tp->lock, flags); + + dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; + + spin_unlock_irqrestore(&tp->lock, flags); + } + + return &dev->stats; +} + + +static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct tulip_private *np = netdev_priv(dev); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); +} + + +static int tulip_ethtool_set_wol(struct net_device *dev, + struct ethtool_wolinfo *wolinfo) +{ + struct tulip_private *tp = netdev_priv(dev); + + if (wolinfo->wolopts & (~tp->wolinfo.supported)) + return -EOPNOTSUPP; + + tp->wolinfo.wolopts = wolinfo->wolopts; + device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts); + return 0; +} + +static void tulip_ethtool_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wolinfo) +{ + struct tulip_private *tp = netdev_priv(dev); + + wolinfo->supported = tp->wolinfo.supported; + wolinfo->wolopts = tp->wolinfo.wolopts; + return; +} + + +static const struct ethtool_ops ops = { + .get_drvinfo = tulip_get_drvinfo, + .set_wol = tulip_ethtool_set_wol, + .get_wol = tulip_ethtool_get_wol, +}; + +/* Provide ioctl() calls to examine the MII xcvr state. */ +static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + struct mii_ioctl_data *data = if_mii(rq); + const unsigned int phy_idx = 0; + int phy = tp->phys[phy_idx] & 0x1f; + unsigned int regnum = data->reg_num; + + switch (cmd) { + case SIOCGMIIPHY: /* Get address of MII PHY in use. */ + if (tp->mii_cnt) + data->phy_id = phy; + else if (tp->flags & HAS_NWAY) + data->phy_id = 32; + else if (tp->chip_id == COMET) + data->phy_id = 1; + else + return -ENODEV; + + case SIOCGMIIREG: /* Read MII PHY register. */ + if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) { + int csr12 = ioread32 (ioaddr + CSR12); + int csr14 = ioread32 (ioaddr + CSR14); + switch (regnum) { + case 0: + if (((csr14<<5) & 0x1000) || + (dev->if_port == 5 && tp->nwayset)) + data->val_out = 0x1000; + else + data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0) + | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0); + break; + case 1: + data->val_out = + 0x1848 + + ((csr12&0x7000) == 0x5000 ? 0x20 : 0) + + ((csr12&0x06) == 6 ? 0 : 4); + data->val_out |= 0x6048; + break; + case 4: + /* Advertised value, bogus 10baseTx-FD value from CSR6. */ + data->val_out = + ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) + + ((csr14 >> 1) & 0x20) + 1; + data->val_out |= ((csr14 >> 9) & 0x03C0); + break; + case 5: data->val_out = tp->lpar; break; + default: data->val_out = 0; break; + } + } else { + data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum); + } + return 0; + + case SIOCSMIIREG: /* Write MII PHY register. */ + if (regnum & ~0x1f) + return -EINVAL; + if (data->phy_id == phy) { + u16 value = data->val_in; + switch (regnum) { + case 0: /* Check for autonegotiation on or reset. */ + tp->full_duplex_lock = (value & 0x9000) ? 0 : 1; + if (tp->full_duplex_lock) + tp->full_duplex = (value & 0x0100) ? 1 : 0; + break; + case 4: + tp->advertising[phy_idx] = + tp->mii_advertise = data->val_in; + break; + } + } + if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) { + u16 value = data->val_in; + if (regnum == 0) { + if ((value & 0x1200) == 0x1200) { + if (tp->chip_id == PNIC2) { + pnic2_start_nway (dev); + } else { + t21142_start_nway (dev); + } + } + } else if (regnum == 4) + tp->sym_advertise = value; + } else { + tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in); + } + return 0; + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + + +/* Set or clear the multicast filter for this adaptor. + Note that we only use exclusion around actually queueing the + new frame, not around filling tp->setup_frame. This is non-deterministic + when re-entered but still correct. */ + +static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) +{ + struct tulip_private *tp = netdev_priv(dev); + u16 hash_table[32]; + struct netdev_hw_addr *ha; + int i; + u16 *eaddrs; + + memset(hash_table, 0, sizeof(hash_table)); + __set_bit_le(255, hash_table); /* Broadcast entry */ + /* This should work on big-endian machines as well. */ + netdev_for_each_mc_addr(ha, dev) { + int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff; + + __set_bit_le(index, hash_table); + } + for (i = 0; i < 32; i++) { + *setup_frm++ = hash_table[i]; + *setup_frm++ = hash_table[i]; + } + setup_frm = &tp->setup_frame[13*6]; + + /* Fill the final entry with our physical address. */ + eaddrs = (u16 *)dev->dev_addr; + *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; + *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; + *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; +} + +static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) +{ + struct tulip_private *tp = netdev_priv(dev); + struct netdev_hw_addr *ha; + u16 *eaddrs; + + /* We have <= 14 addresses so we can use the wonderful + 16 address perfect filtering of the Tulip. */ + netdev_for_each_mc_addr(ha, dev) { + eaddrs = (u16 *) ha->addr; + *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; + *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; + *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; + } + /* Fill the unused entries with the broadcast address. */ + memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12); + setup_frm = &tp->setup_frame[15*6]; + + /* Fill the final entry with our physical address. */ + eaddrs = (u16 *)dev->dev_addr; + *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; + *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; + *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; +} + + +static void set_rx_mode(struct net_device *dev) +{ + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + int csr6; + + csr6 = ioread32(ioaddr + CSR6) & ~0x00D5; + + tp->csr6 &= ~0x00D5; + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + tp->csr6 |= AcceptAllMulticast | AcceptAllPhys; + csr6 |= AcceptAllMulticast | AcceptAllPhys; + } else if ((netdev_mc_count(dev) > 1000) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to filter well -- accept all multicasts. */ + tp->csr6 |= AcceptAllMulticast; + csr6 |= AcceptAllMulticast; + } else if (tp->flags & MC_HASH_ONLY) { + /* Some work-alikes have only a 64-entry hash filter table. */ + /* Should verify correctness on big-endian/__powerpc__ */ + struct netdev_hw_addr *ha; + if (netdev_mc_count(dev) > 64) { + /* Arbitrary non-effective limit. */ + tp->csr6 |= AcceptAllMulticast; + csr6 |= AcceptAllMulticast; + } else { + u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */ + int filterbit; + netdev_for_each_mc_addr(ha, dev) { + if (tp->flags & COMET_MAC_ADDR) + filterbit = ether_crc_le(ETH_ALEN, + ha->addr); + else + filterbit = ether_crc(ETH_ALEN, + ha->addr) >> 26; + filterbit &= 0x3f; + mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); + if (tulip_debug > 2) + dev_info(&dev->dev, + "Added filter for %pM %08x bit %d\n", + ha->addr, + ether_crc(ETH_ALEN, ha->addr), + filterbit); + } + if (mc_filter[0] == tp->mc_filter[0] && + mc_filter[1] == tp->mc_filter[1]) + ; /* No change. */ + else if (tp->flags & IS_ASIX) { + iowrite32(2, ioaddr + CSR13); + iowrite32(mc_filter[0], ioaddr + CSR14); + iowrite32(3, ioaddr + CSR13); + iowrite32(mc_filter[1], ioaddr + CSR14); + } else if (tp->flags & COMET_MAC_ADDR) { + iowrite32(mc_filter[0], ioaddr + CSR27); + iowrite32(mc_filter[1], ioaddr + CSR28); + } + tp->mc_filter[0] = mc_filter[0]; + tp->mc_filter[1] = mc_filter[1]; + } + } else { + unsigned long flags; + u32 tx_flags = 0x08000000 | 192; + + /* Note that only the low-address shortword of setup_frame is valid! + The values are doubled for big-endian architectures. */ + if (netdev_mc_count(dev) > 14) { + /* Must use a multicast hash table. */ + build_setup_frame_hash(tp->setup_frame, dev); + tx_flags = 0x08400000 | 192; + } else { + build_setup_frame_perfect(tp->setup_frame, dev); + } + + spin_lock_irqsave(&tp->lock, flags); + + if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) { + /* Same setup recently queued, we need not add it. */ + } else { + unsigned int entry; + int dummy = -1; + + /* Now add this frame to the Tx list. */ + + entry = tp->cur_tx++ % TX_RING_SIZE; + + if (entry != 0) { + /* Avoid a chip errata by prefixing a dummy entry. */ + tp->tx_buffers[entry].skb = NULL; + tp->tx_buffers[entry].mapping = 0; + tp->tx_ring[entry].length = + (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0; + tp->tx_ring[entry].buffer1 = 0; + /* Must set DescOwned later to avoid race with chip */ + dummy = entry; + entry = tp->cur_tx++ % TX_RING_SIZE; + + } + + tp->tx_buffers[entry].skb = NULL; + tp->tx_buffers[entry].mapping = + pci_map_single(tp->pdev, tp->setup_frame, + sizeof(tp->setup_frame), + PCI_DMA_TODEVICE); + /* Put the setup frame on the Tx list. */ + if (entry == TX_RING_SIZE-1) + tx_flags |= DESC_RING_WRAP; /* Wrap ring. */ + tp->tx_ring[entry].length = cpu_to_le32(tx_flags); + tp->tx_ring[entry].buffer1 = + cpu_to_le32(tp->tx_buffers[entry].mapping); + tp->tx_ring[entry].status = cpu_to_le32(DescOwned); + if (dummy >= 0) + tp->tx_ring[dummy].status = cpu_to_le32(DescOwned); + if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) + netif_stop_queue(dev); + + /* Trigger an immediate transmit demand. */ + iowrite32(0, ioaddr + CSR1); + } + + spin_unlock_irqrestore(&tp->lock, flags); + } + + iowrite32(csr6, ioaddr + CSR6); +} + +#ifdef CONFIG_TULIP_MWI +static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev) +{ + struct tulip_private *tp = netdev_priv(dev); + u8 cache; + u16 pci_command; + u32 csr0; + + if (tulip_debug > 3) + netdev_dbg(dev, "tulip_mwi_config()\n"); + + tp->csr0 = csr0 = 0; + + /* if we have any cache line size at all, we can do MRM and MWI */ + csr0 |= MRM | MWI; + + /* Enable MWI in the standard PCI command bit. + * Check for the case where MWI is desired but not available + */ + pci_try_set_mwi(pdev); + + /* read result from hardware (in case bit refused to enable) */ + pci_read_config_word(pdev, PCI_COMMAND, &pci_command); + if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE))) + csr0 &= ~MWI; + + /* if cache line size hardwired to zero, no MWI */ + pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache); + if ((csr0 & MWI) && (cache == 0)) { + csr0 &= ~MWI; + pci_clear_mwi(pdev); + } + + /* assign per-cacheline-size cache alignment and + * burst length values + */ + switch (cache) { + case 8: + csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift); + break; + case 16: + csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift); + break; + case 32: + csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift); + break; + default: + cache = 0; + break; + } + + /* if we have a good cache line size, we by now have a good + * csr0, so save it and exit + */ + if (cache) + goto out; + + /* we don't have a good csr0 or cache line size, disable MWI */ + if (csr0 & MWI) { + pci_clear_mwi(pdev); + csr0 &= ~MWI; + } + + /* sane defaults for burst length and cache alignment + * originally from de4x5 driver + */ + csr0 |= (8 << BurstLenShift) | (1 << CALShift); + +out: + tp->csr0 = csr0; + if (tulip_debug > 2) + netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n", + cache, csr0); +} +#endif + +/* + * Chips that have the MRM/reserved bit quirk and the burst quirk. That + * is the DM910X and the on chip ULi devices + */ + +static int tulip_uli_dm_quirk(struct pci_dev *pdev) +{ + if (pdev->vendor == 0x1282 && pdev->device == 0x9102) + return 1; + return 0; +} + +static const struct net_device_ops tulip_netdev_ops = { + .ndo_open = tulip_open, + .ndo_start_xmit = tulip_start_xmit, + .ndo_tx_timeout = tulip_tx_timeout, + .ndo_stop = tulip_close, + .ndo_get_stats = tulip_get_stats, + .ndo_do_ioctl = private_ioctl, + .ndo_set_rx_mode = set_rx_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = poll_tulip, +#endif +}; + +const struct pci_device_id early_486_chipsets[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) }, + { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) }, + { }, +}; + +static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct tulip_private *tp; + /* See note below on the multiport cards. */ + static unsigned char last_phys_addr[ETH_ALEN] = { + 0x00, 'L', 'i', 'n', 'u', 'x' + }; + static int last_irq; + static int multiport_cnt; /* For four-port boards w/one EEPROM */ + int i, irq; + unsigned short sum; + unsigned char *ee_data; + struct net_device *dev; + void __iomem *ioaddr; + static int board_idx = -1; + int chip_idx = ent->driver_data; + const char *chip_name = tulip_tbl[chip_idx].chip_name; + unsigned int eeprom_missing = 0; + unsigned int force_csr0 = 0; + +#ifndef MODULE + if (tulip_debug > 0) + printk_once(KERN_INFO "%s", version); +#endif + + board_idx++; + + /* + * Lan media wire a tulip chip to a wan interface. Needs a very + * different driver (lmc driver) + */ + + if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) { + pr_err("skipping LMC card\n"); + return -ENODEV; + } else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE && + (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 || + pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 || + pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) { + pr_err("skipping SBE T3E3 port\n"); + return -ENODEV; + } + + /* + * DM910x chips should be handled by the dmfe driver, except + * on-board chips on SPARC systems. Also, early DM9100s need + * software CRC which only the dmfe driver supports. + */ + +#ifdef CONFIG_TULIP_DM910X + if (chip_idx == DM910X) { + struct device_node *dp; + + if (pdev->vendor == 0x1282 && pdev->device == 0x9100 && + pdev->revision < 0x30) { + pr_info("skipping early DM9100 with Crc bug (use dmfe)\n"); + return -ENODEV; + } + + dp = pci_device_to_OF_node(pdev); + if (!(dp && of_get_property(dp, "local-mac-address", NULL))) { + pr_info("skipping DM910x expansion card (use dmfe)\n"); + return -ENODEV; + } + } +#endif + + /* + * Looks for early PCI chipsets where people report hangs + * without the workarounds being on. + */ + + /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache + aligned. Aries might need this too. The Saturn errata are not + pretty reading but thankfully it's an old 486 chipset. + + 2. The dreaded SiS496 486 chipset. Same workaround as Intel + Saturn. + */ + + if (pci_dev_present(early_486_chipsets)) { + csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift); + force_csr0 = 1; + } + + /* bugfix: the ASIX must have a burst limit or horrible things happen. */ + if (chip_idx == AX88140) { + if ((csr0 & 0x3f00) == 0) + csr0 |= 0x2000; + } + + /* PNIC doesn't have MWI/MRL/MRM... */ + if (chip_idx == LC82C168) + csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */ + + /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */ + if (tulip_uli_dm_quirk(pdev)) { + csr0 &= ~0x01f100ff; +#if defined(CONFIG_SPARC) + csr0 = (csr0 & ~0xff00) | 0xe000; +#endif + } + /* + * And back to business + */ + + i = pci_enable_device(pdev); + if (i) { + pr_err("Cannot enable tulip board #%d, aborting\n", board_idx); + return i; + } + + irq = pdev->irq; + + /* alloc_etherdev ensures aligned and zeroed private structures */ + dev = alloc_etherdev (sizeof (*tp)); + if (!dev) + return -ENOMEM; + + SET_NETDEV_DEV(dev, &pdev->dev); + if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) { + pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n", + pci_name(pdev), + (unsigned long long)pci_resource_len (pdev, 0), + (unsigned long long)pci_resource_start (pdev, 0)); + goto err_out_free_netdev; + } + + /* grab all resources from both PIO and MMIO regions, as we + * don't want anyone else messing around with our hardware */ + if (pci_request_regions (pdev, DRV_NAME)) + goto err_out_free_netdev; + + ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size); + + if (!ioaddr) + goto err_out_free_res; + + /* + * initialize private data structure 'tp' + * it is zeroed and aligned in alloc_etherdev + */ + tp = netdev_priv(dev); + tp->dev = dev; + + tp->rx_ring = pci_alloc_consistent(pdev, + sizeof(struct tulip_rx_desc) * RX_RING_SIZE + + sizeof(struct tulip_tx_desc) * TX_RING_SIZE, + &tp->rx_ring_dma); + if (!tp->rx_ring) + goto err_out_mtable; + tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE); + tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE; + + tp->chip_id = chip_idx; + tp->flags = tulip_tbl[chip_idx].flags; + + tp->wolinfo.supported = 0; + tp->wolinfo.wolopts = 0; + /* COMET: Enable power management only for AN983B */ + if (chip_idx == COMET ) { + u32 sig; + pci_read_config_dword (pdev, 0x80, &sig); + if (sig == 0x09811317) { + tp->flags |= COMET_PM; + tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC; + pr_info("%s: Enabled WOL support for AN983B\n", + __func__); + } + } + tp->pdev = pdev; + tp->base_addr = ioaddr; + tp->revision = pdev->revision; + tp->csr0 = csr0; + spin_lock_init(&tp->lock); + spin_lock_init(&tp->mii_lock); + init_timer(&tp->timer); + tp->timer.data = (unsigned long)dev; + tp->timer.function = tulip_tbl[tp->chip_id].media_timer; + + INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task); + +#ifdef CONFIG_TULIP_MWI + if (!force_csr0 && (tp->flags & HAS_PCI_MWI)) + tulip_mwi_config (pdev, dev); +#endif + + /* Stop the chip's Tx and Rx processes. */ + tulip_stop_rxtx(tp); + + pci_set_master(pdev); + +#ifdef CONFIG_GSC + if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) { + switch (pdev->subsystem_device) { + default: + break; + case 0x1061: + case 0x1062: + case 0x1063: + case 0x1098: + case 0x1099: + case 0x10EE: + tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE; + chip_name = "GSC DS21140 Tulip"; + } + } +#endif + + /* Clear the missed-packet counter. */ + ioread32(ioaddr + CSR8); + + /* The station address ROM is read byte serially. The register must + be polled, waiting for the value to be read bit serially from the + EEPROM. + */ + ee_data = tp->eeprom; + memset(ee_data, 0, sizeof(tp->eeprom)); + sum = 0; + if (chip_idx == LC82C168) { + for (i = 0; i < 3; i++) { + int value, boguscnt = 100000; + iowrite32(0x600 | i, ioaddr + 0x98); + do { + value = ioread32(ioaddr + CSR9); + } while (value < 0 && --boguscnt > 0); + put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i); + sum += value & 0xffff; + } + } else if (chip_idx == COMET) { + /* No need to read the EEPROM. */ + put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr); + put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4); + for (i = 0; i < 6; i ++) + sum += dev->dev_addr[i]; + } else { + /* A serial EEPROM interface, we read now and sort it out later. */ + int sa_offset = 0; + int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6; + int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16); + + if (ee_max_addr > sizeof(tp->eeprom)) + ee_max_addr = sizeof(tp->eeprom); + + for (i = 0; i < ee_max_addr ; i += sizeof(u16)) { + u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size); + ee_data[i] = data & 0xff; + ee_data[i + 1] = data >> 8; + } + + /* DEC now has a specification (see Notes) but early board makers + just put the address in the first EEPROM locations. */ + /* This does memcmp(ee_data, ee_data+16, 8) */ + for (i = 0; i < 8; i ++) + if (ee_data[i] != ee_data[16+i]) + sa_offset = 20; + if (chip_idx == CONEXANT) { + /* Check that the tuple type and length is correct. */ + if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6) + sa_offset = 0x19A; + } else if (ee_data[0] == 0xff && ee_data[1] == 0xff && + ee_data[2] == 0) { + sa_offset = 2; /* Grrr, damn Matrox boards. */ + multiport_cnt = 4; + } +#ifdef CONFIG_MIPS_COBALT + if ((pdev->bus->number == 0) && + ((PCI_SLOT(pdev->devfn) == 7) || + (PCI_SLOT(pdev->devfn) == 12))) { + /* Cobalt MAC address in first EEPROM locations. */ + sa_offset = 0; + /* Ensure our media table fixup get's applied */ + memcpy(ee_data + 16, ee_data, 8); + } +#endif +#ifdef CONFIG_GSC + /* Check to see if we have a broken srom */ + if (ee_data[0] == 0x61 && ee_data[1] == 0x10) { + /* pci_vendor_id and subsystem_id are swapped */ + ee_data[0] = ee_data[2]; + ee_data[1] = ee_data[3]; + ee_data[2] = 0x61; + ee_data[3] = 0x10; + + /* HSC-PCI boards need to be byte-swaped and shifted + * up 1 word. This shift needs to happen at the end + * of the MAC first because of the 2 byte overlap. + */ + for (i = 4; i >= 0; i -= 2) { + ee_data[17 + i + 3] = ee_data[17 + i]; + ee_data[16 + i + 5] = ee_data[16 + i]; + } + } +#endif + + for (i = 0; i < 6; i ++) { + dev->dev_addr[i] = ee_data[i + sa_offset]; + sum += ee_data[i + sa_offset]; + } + } + /* Lite-On boards have the address byte-swapped. */ + if ((dev->dev_addr[0] == 0xA0 || + dev->dev_addr[0] == 0xC0 || + dev->dev_addr[0] == 0x02) && + dev->dev_addr[1] == 0x00) + for (i = 0; i < 6; i+=2) { + char tmp = dev->dev_addr[i]; + dev->dev_addr[i] = dev->dev_addr[i+1]; + dev->dev_addr[i+1] = tmp; + } + /* On the Zynx 315 Etherarray and other multiport boards only the + first Tulip has an EEPROM. + On Sparc systems the mac address is held in the OBP property + "local-mac-address". + The addresses of the subsequent ports are derived from the first. + Many PCI BIOSes also incorrectly report the IRQ line, so we correct + that here as well. */ + if (sum == 0 || sum == 6*0xff) { +#if defined(CONFIG_SPARC) + struct device_node *dp = pci_device_to_OF_node(pdev); + const unsigned char *addr; + int len; +#endif + eeprom_missing = 1; + for (i = 0; i < 5; i++) + dev->dev_addr[i] = last_phys_addr[i]; + dev->dev_addr[i] = last_phys_addr[i] + 1; +#if defined(CONFIG_SPARC) + addr = of_get_property(dp, "local-mac-address", &len); + if (addr && len == ETH_ALEN) + memcpy(dev->dev_addr, addr, ETH_ALEN); +#endif +#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ + if (last_irq) + irq = last_irq; +#endif + } + + for (i = 0; i < 6; i++) + last_phys_addr[i] = dev->dev_addr[i]; + last_irq = irq; + + /* The lower four bits are the media type. */ + if (board_idx >= 0 && board_idx < MAX_UNITS) { + if (options[board_idx] & MEDIA_MASK) + tp->default_port = options[board_idx] & MEDIA_MASK; + if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0) + tp->full_duplex = 1; + if (mtu[board_idx] > 0) + dev->mtu = mtu[board_idx]; + } + if (dev->mem_start & MEDIA_MASK) + tp->default_port = dev->mem_start & MEDIA_MASK; + if (tp->default_port) { + pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n", + board_idx, medianame[tp->default_port & MEDIA_MASK]); + tp->medialock = 1; + if (tulip_media_cap[tp->default_port] & MediaAlwaysFD) + tp->full_duplex = 1; + } + if (tp->full_duplex) + tp->full_duplex_lock = 1; + + if (tulip_media_cap[tp->default_port] & MediaIsMII) { + static const u16 media2advert[] = { + 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 + }; + tp->mii_advertise = media2advert[tp->default_port - 9]; + tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */ + } + + if (tp->flags & HAS_MEDIA_TABLE) { + sprintf(dev->name, DRV_NAME "%d", board_idx); /* hack */ + tulip_parse_eeprom(dev); + strcpy(dev->name, "eth%d"); /* un-hack */ + } + + if ((tp->flags & ALWAYS_CHECK_MII) || + (tp->mtable && tp->mtable->has_mii) || + ( ! tp->mtable && (tp->flags & HAS_MII))) { + if (tp->mtable && tp->mtable->has_mii) { + for (i = 0; i < tp->mtable->leafcount; i++) + if (tp->mtable->mleaf[i].media == 11) { + tp->cur_index = i; + tp->saved_if_port = dev->if_port; + tulip_select_media(dev, 2); + dev->if_port = tp->saved_if_port; + break; + } + } + + /* Find the connected MII xcvrs. + Doing this in open() would allow detecting external xcvrs + later, but takes much time. */ + tulip_find_mii (dev, board_idx); + } + + /* The Tulip-specific entries in the device structure. */ + dev->netdev_ops = &tulip_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; +#ifdef CONFIG_TULIP_NAPI + netif_napi_add(dev, &tp->napi, tulip_poll, 16); +#endif + dev->ethtool_ops = &ops; + + if (register_netdev(dev)) + goto err_out_free_ring; + + pci_set_drvdata(pdev, dev); + + dev_info(&dev->dev, +#ifdef CONFIG_TULIP_MMIO + "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n", +#else + "%s rev %d at Port %#llx,%s %pM, IRQ %d\n", +#endif + chip_name, pdev->revision, + (unsigned long long)pci_resource_start(pdev, TULIP_BAR), + eeprom_missing ? " EEPROM not present," : "", + dev->dev_addr, irq); + + if (tp->chip_id == PNIC2) + tp->link_change = pnic2_lnk_change; + else if (tp->flags & HAS_NWAY) + tp->link_change = t21142_lnk_change; + else if (tp->flags & HAS_PNICNWAY) + tp->link_change = pnic_lnk_change; + + /* Reset the xcvr interface and turn on heartbeat. */ + switch (chip_idx) { + case DC21140: + case DM910X: + default: + if (tp->mtable) + iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12); + break; + case DC21142: + if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) { + iowrite32(csr6_mask_defstate, ioaddr + CSR6); + iowrite32(0x0000, ioaddr + CSR13); + iowrite32(0x0000, ioaddr + CSR14); + iowrite32(csr6_mask_hdcap, ioaddr + CSR6); + } else + t21142_start_nway(dev); + break; + case PNIC2: + /* just do a reset for sanity sake */ + iowrite32(0x0000, ioaddr + CSR13); + iowrite32(0x0000, ioaddr + CSR14); + break; + case LC82C168: + if ( ! tp->mii_cnt) { + tp->nway = 1; + tp->nwayset = 0; + iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6); + iowrite32(0x30, ioaddr + CSR12); + iowrite32(0x0001F078, ioaddr + CSR6); + iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */ + } + break; + case MX98713: + case COMPEX9881: + iowrite32(0x00000000, ioaddr + CSR6); + iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */ + iowrite32(0x00000001, ioaddr + CSR13); + break; + case MX98715: + case MX98725: + iowrite32(0x01a80000, ioaddr + CSR6); + iowrite32(0xFFFFFFFF, ioaddr + CSR14); + iowrite32(0x00001000, ioaddr + CSR12); + break; + case COMET: + /* No initialization necessary. */ + break; + } + + /* put the chip in snooze mode until opened */ + tulip_set_power_state (tp, 0, 1); + + return 0; + +err_out_free_ring: + pci_free_consistent (pdev, + sizeof (struct tulip_rx_desc) * RX_RING_SIZE + + sizeof (struct tulip_tx_desc) * TX_RING_SIZE, + tp->rx_ring, tp->rx_ring_dma); + +err_out_mtable: + kfree (tp->mtable); + pci_iounmap(pdev, ioaddr); + +err_out_free_res: + pci_release_regions (pdev); + +err_out_free_netdev: + free_netdev (dev); + return -ENODEV; +} + + +/* set the registers according to the given wolopts */ +static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + + if (tp->flags & COMET_PM) { + + unsigned int tmp; + + tmp = ioread32(ioaddr + CSR18); + tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a); + tmp |= comet_csr18_pm_mode; + iowrite32(tmp, ioaddr + CSR18); + + /* Set the Wake-up Control/Status Register to the given WOL options*/ + tmp = ioread32(ioaddr + CSR13); + tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre); + if (wolopts & WAKE_MAGIC) + tmp |= comet_csr13_mpre; + if (wolopts & WAKE_PHY) + tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce; + /* Clear the event flags */ + tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc; + iowrite32(tmp, ioaddr + CSR13); + } +} + +#ifdef CONFIG_PM + + +static int tulip_suspend (struct pci_dev *pdev, pm_message_t state) +{ + pci_power_t pstate; + struct net_device *dev = pci_get_drvdata(pdev); + struct tulip_private *tp = netdev_priv(dev); + + if (!dev) + return -EINVAL; + + if (!netif_running(dev)) + goto save_state; + + tulip_down(dev); + + netif_device_detach(dev); + /* FIXME: it needlessly adds an error path. */ + free_irq(tp->pdev->irq, dev); + +save_state: + pci_save_state(pdev); + pci_disable_device(pdev); + pstate = pci_choose_state(pdev, state); + if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) { + int rc; + + tulip_set_wolopts(pdev, tp->wolinfo.wolopts); + rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts); + if (rc) + pr_err("pci_enable_wake failed (%d)\n", rc); + } + pci_set_power_state(pdev, pstate); + + return 0; +} + + +static int tulip_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct tulip_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->base_addr; + int retval; + unsigned int tmp; + + if (!dev) + return -EINVAL; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + if (!netif_running(dev)) + return 0; + + if ((retval = pci_enable_device(pdev))) { + pr_err("pci_enable_device failed in resume\n"); + return retval; + } + + retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED, + dev->name, dev); + if (retval) { + pr_err("request_irq failed in resume\n"); + return retval; + } + + if (tp->flags & COMET_PM) { + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + /* Clear the PMES flag */ + tmp = ioread32(ioaddr + CSR20); + tmp |= comet_csr20_pmes; + iowrite32(tmp, ioaddr + CSR20); + + /* Disable all wake-up events */ + tulip_set_wolopts(pdev, 0); + } + netif_device_attach(dev); + + if (netif_running(dev)) + tulip_up(dev); + + return 0; +} + +#endif /* CONFIG_PM */ + + +static void tulip_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata (pdev); + struct tulip_private *tp; + + if (!dev) + return; + + tp = netdev_priv(dev); + unregister_netdev(dev); + pci_free_consistent (pdev, + sizeof (struct tulip_rx_desc) * RX_RING_SIZE + + sizeof (struct tulip_tx_desc) * TX_RING_SIZE, + tp->rx_ring, tp->rx_ring_dma); + kfree (tp->mtable); + pci_iounmap(pdev, tp->base_addr); + free_netdev (dev); + pci_release_regions (pdev); + pci_disable_device(pdev); + + /* pci_power_off (pdev, -1); */ +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ + +static void poll_tulip (struct net_device *dev) +{ + struct tulip_private *tp = netdev_priv(dev); + const int irq = tp->pdev->irq; + + /* disable_irq here is not very nice, but with the lockless + interrupt handler we have no other choice. */ + disable_irq(irq); + tulip_interrupt (irq, dev); + enable_irq(irq); +} +#endif + +static struct pci_driver tulip_driver = { + .name = DRV_NAME, + .id_table = tulip_pci_tbl, + .probe = tulip_init_one, + .remove = tulip_remove_one, +#ifdef CONFIG_PM + .suspend = tulip_suspend, + .resume = tulip_resume, +#endif /* CONFIG_PM */ +}; + + +static int __init tulip_init (void) +{ +#ifdef MODULE + pr_info("%s", version); +#endif + + /* copy module parms into globals */ + tulip_rx_copybreak = rx_copybreak; + tulip_max_interrupt_work = max_interrupt_work; + + /* probe for and init boards */ + return pci_register_driver(&tulip_driver); +} + + +static void __exit tulip_cleanup (void) +{ + pci_unregister_driver (&tulip_driver); +} + + +module_init(tulip_init); +module_exit(tulip_cleanup); diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c new file mode 100644 index 000000000..2c30c0c83 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -0,0 +1,1845 @@ +/* + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; either version 2 + of the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + +*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DRV_NAME "uli526x" +#define DRV_VERSION "0.9.3" +#define DRV_RELDATE "2005-7-29" + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define uw32(reg, val) iowrite32(val, ioaddr + (reg)) +#define ur32(reg) ioread32(ioaddr + (reg)) + +/* Board/System/Debug information/definition ---------------- */ +#define PCI_ULI5261_ID 0x526110B9 /* ULi M5261 ID*/ +#define PCI_ULI5263_ID 0x526310B9 /* ULi M5263 ID*/ + +#define ULI526X_IO_SIZE 0x100 +#define TX_DESC_CNT 0x20 /* Allocated Tx descriptors */ +#define RX_DESC_CNT 0x30 /* Allocated Rx descriptors */ +#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */ +#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */ +#define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT) +#define TX_BUF_ALLOC 0x600 +#define RX_ALLOC_SIZE 0x620 +#define ULI526X_RESET 1 +#define CR0_DEFAULT 0 +#define CR6_DEFAULT 0x22200000 +#define CR7_DEFAULT 0x180c1 +#define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */ +#define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */ +#define MAX_PACKET_SIZE 1514 +#define ULI5261_MAX_MULTICAST 14 +#define RX_COPY_SIZE 100 +#define MAX_CHECK_PACKET 0x8000 + +#define ULI526X_10MHF 0 +#define ULI526X_100MHF 1 +#define ULI526X_10MFD 4 +#define ULI526X_100MFD 5 +#define ULI526X_AUTO 8 + +#define ULI526X_TXTH_72 0x400000 /* TX TH 72 byte */ +#define ULI526X_TXTH_96 0x404000 /* TX TH 96 byte */ +#define ULI526X_TXTH_128 0x0000 /* TX TH 128 byte */ +#define ULI526X_TXTH_256 0x4000 /* TX TH 256 byte */ +#define ULI526X_TXTH_512 0x8000 /* TX TH 512 byte */ +#define ULI526X_TXTH_1K 0xC000 /* TX TH 1K byte */ + +#define ULI526X_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */ +#define ULI526X_TX_TIMEOUT ((16*HZ)/2) /* tx packet time-out time 8 s" */ +#define ULI526X_TX_KICK (4*HZ/2) /* tx packet Kick-out time 2 s" */ + +#define ULI526X_DBUG(dbug_now, msg, value) \ +do { \ + if (uli526x_debug || (dbug_now)) \ + pr_err("%s %lx\n", (msg), (long) (value)); \ +} while (0) + +#define SHOW_MEDIA_TYPE(mode) \ + pr_err("Change Speed to %sMhz %s duplex\n", \ + mode & 1 ? "100" : "10", \ + mode & 4 ? "full" : "half"); + + +/* CR9 definition: SROM/MII */ +#define CR9_SROM_READ 0x4800 +#define CR9_SRCS 0x1 +#define CR9_SRCLK 0x2 +#define CR9_CRDOUT 0x8 +#define SROM_DATA_0 0x0 +#define SROM_DATA_1 0x4 +#define PHY_DATA_1 0x20000 +#define PHY_DATA_0 0x00000 +#define MDCLKH 0x10000 + +#define PHY_POWER_DOWN 0x800 + +#define SROM_V41_CODE 0x14 + +/* Structure/enum declaration ------------------------------- */ +struct tx_desc { + __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */ + char *tx_buf_ptr; /* Data for us */ + struct tx_desc *next_tx_desc; +} __attribute__(( aligned(32) )); + +struct rx_desc { + __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */ + struct sk_buff *rx_skb_ptr; /* Data for us */ + struct rx_desc *next_rx_desc; +} __attribute__(( aligned(32) )); + +struct uli526x_board_info { + struct uli_phy_ops { + void (*write)(struct uli526x_board_info *, u8, u8, u16); + u16 (*read)(struct uli526x_board_info *, u8, u8); + } phy; + struct net_device *next_dev; /* next device */ + struct pci_dev *pdev; /* PCI device */ + spinlock_t lock; + + void __iomem *ioaddr; /* I/O base address */ + u32 cr0_data; + u32 cr5_data; + u32 cr6_data; + u32 cr7_data; + u32 cr15_data; + + /* pointer for memory physical address */ + dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */ + dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */ + dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */ + dma_addr_t first_tx_desc_dma; + dma_addr_t first_rx_desc_dma; + + /* descriptor pointer */ + unsigned char *buf_pool_ptr; /* Tx buffer pool memory */ + unsigned char *buf_pool_start; /* Tx buffer pool align dword */ + unsigned char *desc_pool_ptr; /* descriptor pool memory */ + struct tx_desc *first_tx_desc; + struct tx_desc *tx_insert_ptr; + struct tx_desc *tx_remove_ptr; + struct rx_desc *first_rx_desc; + struct rx_desc *rx_insert_ptr; + struct rx_desc *rx_ready_ptr; /* packet come pointer */ + unsigned long tx_packet_cnt; /* transmitted packet count */ + unsigned long rx_avail_cnt; /* available rx descriptor count */ + unsigned long interval_rx_cnt; /* rx packet count a callback time */ + + u16 dbug_cnt; + u16 NIC_capability; /* NIC media capability */ + u16 PHY_reg4; /* Saved Phyxcer register 4 value */ + + u8 media_mode; /* user specify media mode */ + u8 op_mode; /* real work media mode */ + u8 phy_addr; + u8 link_failed; /* Ever link failed */ + u8 wait_reset; /* Hardware failed, need to reset */ + struct timer_list timer; + + /* Driver defined statistic counter */ + unsigned long tx_fifo_underrun; + unsigned long tx_loss_carrier; + unsigned long tx_no_carrier; + unsigned long tx_late_collision; + unsigned long tx_excessive_collision; + unsigned long tx_jabber_timeout; + unsigned long reset_count; + unsigned long reset_cr8; + unsigned long reset_fatal; + unsigned long reset_TXtimeout; + + /* NIC SROM data */ + unsigned char srom[128]; + u8 init; +}; + +enum uli526x_offsets { + DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20, + DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48, + DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70, + DCR15 = 0x78 +}; + +enum uli526x_CR6_bits { + CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80, + CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000, + CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000 +}; + +/* Global variable declaration ----------------------------- */ +static int printed_version; +static const char version[] = + "ULi M5261/M5263 net driver, version " DRV_VERSION " (" DRV_RELDATE ")"; + +static int uli526x_debug; +static unsigned char uli526x_media_mode = ULI526X_AUTO; +static u32 uli526x_cr6_user_set; + +/* For module input parameter */ +static int debug; +static u32 cr6set; +static int mode = 8; + +/* function declaration ------------------------------------- */ +static int uli526x_open(struct net_device *); +static netdev_tx_t uli526x_start_xmit(struct sk_buff *, + struct net_device *); +static int uli526x_stop(struct net_device *); +static void uli526x_set_filter_mode(struct net_device *); +static const struct ethtool_ops netdev_ethtool_ops; +static u16 read_srom_word(struct uli526x_board_info *, int); +static irqreturn_t uli526x_interrupt(int, void *); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void uli526x_poll(struct net_device *dev); +#endif +static void uli526x_descriptor_init(struct net_device *, void __iomem *); +static void allocate_rx_buffer(struct net_device *); +static void update_cr6(u32, void __iomem *); +static void send_filter_frame(struct net_device *, int); +static u16 phy_readby_cr9(struct uli526x_board_info *, u8, u8); +static u16 phy_readby_cr10(struct uli526x_board_info *, u8, u8); +static void phy_writeby_cr9(struct uli526x_board_info *, u8, u8, u16); +static void phy_writeby_cr10(struct uli526x_board_info *, u8, u8, u16); +static void phy_write_1bit(struct uli526x_board_info *db, u32); +static u16 phy_read_1bit(struct uli526x_board_info *db); +static u8 uli526x_sense_speed(struct uli526x_board_info *); +static void uli526x_process_mode(struct uli526x_board_info *); +static void uli526x_timer(unsigned long); +static void uli526x_rx_packet(struct net_device *, struct uli526x_board_info *); +static void uli526x_free_tx_pkt(struct net_device *, struct uli526x_board_info *); +static void uli526x_reuse_skb(struct uli526x_board_info *, struct sk_buff *); +static void uli526x_dynamic_reset(struct net_device *); +static void uli526x_free_rxbuffer(struct uli526x_board_info *); +static void uli526x_init(struct net_device *); +static void uli526x_set_phyxcer(struct uli526x_board_info *); + +static void srom_clk_write(struct uli526x_board_info *db, u32 data) +{ + void __iomem *ioaddr = db->ioaddr; + + uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS); + udelay(5); + uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS | CR9_SRCLK); + udelay(5); + uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS); + udelay(5); +} + +/* ULI526X network board routine ---------------------------- */ + +static const struct net_device_ops netdev_ops = { + .ndo_open = uli526x_open, + .ndo_stop = uli526x_stop, + .ndo_start_xmit = uli526x_start_xmit, + .ndo_set_rx_mode = uli526x_set_filter_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = uli526x_poll, +#endif +}; + +/* + * Search ULI526X board, allocate space and register it + */ + +static int uli526x_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct uli526x_board_info *db; /* board information structure */ + struct net_device *dev; + void __iomem *ioaddr; + int i, err; + + ULI526X_DBUG(0, "uli526x_init_one()", 0); + + if (!printed_version++) + pr_info("%s\n", version); + + /* Init network device */ + dev = alloc_etherdev(sizeof(*db)); + if (dev == NULL) + return -ENOMEM; + SET_NETDEV_DEV(dev, &pdev->dev); + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + pr_warn("32-bit PCI DMA not available\n"); + err = -ENODEV; + goto err_out_free; + } + + /* Enable Master/IO access, Disable memory access */ + err = pci_enable_device(pdev); + if (err) + goto err_out_free; + + if (!pci_resource_start(pdev, 0)) { + pr_err("I/O base is zero\n"); + err = -ENODEV; + goto err_out_disable; + } + + if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) { + pr_err("Allocated I/O size too small\n"); + err = -ENODEV; + goto err_out_disable; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err < 0) { + pr_err("Failed to request PCI regions\n"); + goto err_out_disable; + } + + /* Init system & device */ + db = netdev_priv(dev); + + /* Allocate Tx/Rx descriptor memory */ + err = -ENOMEM; + + db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); + if (!db->desc_pool_ptr) + goto err_out_release; + + db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); + if (!db->buf_pool_ptr) + goto err_out_free_tx_desc; + + db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; + db->first_tx_desc_dma = db->desc_pool_dma_ptr; + db->buf_pool_start = db->buf_pool_ptr; + db->buf_pool_dma_start = db->buf_pool_dma_ptr; + + switch (ent->driver_data) { + case PCI_ULI5263_ID: + db->phy.write = phy_writeby_cr10; + db->phy.read = phy_readby_cr10; + break; + default: + db->phy.write = phy_writeby_cr9; + db->phy.read = phy_readby_cr9; + break; + } + + /* IO region. */ + ioaddr = pci_iomap(pdev, 0, 0); + if (!ioaddr) + goto err_out_free_tx_buf; + + db->ioaddr = ioaddr; + db->pdev = pdev; + db->init = 1; + + pci_set_drvdata(pdev, dev); + + /* Register some necessary functions */ + dev->netdev_ops = &netdev_ops; + dev->ethtool_ops = &netdev_ethtool_ops; + + spin_lock_init(&db->lock); + + + /* read 64 word srom data */ + for (i = 0; i < 64; i++) + ((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db, i)); + + /* Set Node address */ + if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0) /* SROM absent, so read MAC address from ID Table */ + { + uw32(DCR0, 0x10000); //Diagnosis mode + uw32(DCR13, 0x1c0); //Reset dianostic pointer port + uw32(DCR14, 0); //Clear reset port + uw32(DCR14, 0x10); //Reset ID Table pointer + uw32(DCR14, 0); //Clear reset port + uw32(DCR13, 0); //Clear CR13 + uw32(DCR13, 0x1b0); //Select ID Table access port + //Read MAC address from CR14 + for (i = 0; i < 6; i++) + dev->dev_addr[i] = ur32(DCR14); + //Read end + uw32(DCR13, 0); //Clear CR13 + uw32(DCR0, 0); //Clear CR0 + udelay(10); + } + else /*Exist SROM*/ + { + for (i = 0; i < 6; i++) + dev->dev_addr[i] = db->srom[20 + i]; + } + err = register_netdev (dev); + if (err) + goto err_out_unmap; + + netdev_info(dev, "ULi M%04lx at pci%s, %pM, irq %d\n", + ent->driver_data >> 16, pci_name(pdev), + dev->dev_addr, pdev->irq); + + pci_set_master(pdev); + + return 0; + +err_out_unmap: + pci_iounmap(pdev, db->ioaddr); +err_out_free_tx_buf: + pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, + db->buf_pool_ptr, db->buf_pool_dma_ptr); +err_out_free_tx_desc: + pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, + db->desc_pool_ptr, db->desc_pool_dma_ptr); +err_out_release: + pci_release_regions(pdev); +err_out_disable: + pci_disable_device(pdev); +err_out_free: + free_netdev(dev); + + return err; +} + + +static void uli526x_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct uli526x_board_info *db = netdev_priv(dev); + + unregister_netdev(dev); + pci_iounmap(pdev, db->ioaddr); + pci_free_consistent(db->pdev, sizeof(struct tx_desc) * + DESC_ALL_CNT + 0x20, db->desc_pool_ptr, + db->desc_pool_dma_ptr); + pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, + db->buf_pool_ptr, db->buf_pool_dma_ptr); + pci_release_regions(pdev); + pci_disable_device(pdev); + free_netdev(dev); +} + + +/* + * Open the interface. + * The interface is opened whenever "ifconfig" activates it. + */ + +static int uli526x_open(struct net_device *dev) +{ + int ret; + struct uli526x_board_info *db = netdev_priv(dev); + + ULI526X_DBUG(0, "uli526x_open", 0); + + /* system variable init */ + db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set; + db->tx_packet_cnt = 0; + db->rx_avail_cnt = 0; + db->link_failed = 1; + netif_carrier_off(dev); + db->wait_reset = 0; + + db->NIC_capability = 0xf; /* All capability*/ + db->PHY_reg4 = 0x1e0; + + /* CR6 operation mode decision */ + db->cr6_data |= ULI526X_TXTH_256; + db->cr0_data = CR0_DEFAULT; + + /* Initialize ULI526X board */ + uli526x_init(dev); + + ret = request_irq(db->pdev->irq, uli526x_interrupt, IRQF_SHARED, + dev->name, dev); + if (ret) + return ret; + + /* Active System Interface */ + netif_wake_queue(dev); + + /* set and active a timer process */ + init_timer(&db->timer); + db->timer.expires = ULI526X_TIMER_WUT + HZ * 2; + db->timer.data = (unsigned long)dev; + db->timer.function = uli526x_timer; + add_timer(&db->timer); + + return 0; +} + + +/* Initialize ULI526X board + * Reset ULI526X board + * Initialize TX/Rx descriptor chain structure + * Send the set-up frame + * Enable Tx/Rx machine + */ + +static void uli526x_init(struct net_device *dev) +{ + struct uli526x_board_info *db = netdev_priv(dev); + struct uli_phy_ops *phy = &db->phy; + void __iomem *ioaddr = db->ioaddr; + u8 phy_tmp; + u8 timeout; + u16 phy_reg_reset; + + + ULI526X_DBUG(0, "uli526x_init()", 0); + + /* Reset M526x MAC controller */ + uw32(DCR0, ULI526X_RESET); /* RESET MAC */ + udelay(100); + uw32(DCR0, db->cr0_data); + udelay(5); + + /* Phy addr : In some boards,M5261/M5263 phy address != 1 */ + db->phy_addr = 1; + for (phy_tmp = 0; phy_tmp < 32; phy_tmp++) { + u16 phy_value; + + phy_value = phy->read(db, phy_tmp, 3); //peer add + if (phy_value != 0xffff && phy_value != 0) { + db->phy_addr = phy_tmp; + break; + } + } + + if (phy_tmp == 32) + pr_warn("Can not find the phy address!!!\n"); + /* Parser SROM and media mode */ + db->media_mode = uli526x_media_mode; + + /* phyxcer capability setting */ + phy_reg_reset = phy->read(db, db->phy_addr, 0); + phy_reg_reset = (phy_reg_reset | 0x8000); + phy->write(db, db->phy_addr, 0, phy_reg_reset); + + /* See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management + * functions") or phy data sheet for details on phy reset + */ + udelay(500); + timeout = 10; + while (timeout-- && phy->read(db, db->phy_addr, 0) & 0x8000) + udelay(100); + + /* Process Phyxcer Media Mode */ + uli526x_set_phyxcer(db); + + /* Media Mode Process */ + if ( !(db->media_mode & ULI526X_AUTO) ) + db->op_mode = db->media_mode; /* Force Mode */ + + /* Initialize Transmit/Receive descriptor and CR3/4 */ + uli526x_descriptor_init(dev, ioaddr); + + /* Init CR6 to program M526X operation */ + update_cr6(db->cr6_data, ioaddr); + + /* Send setup frame */ + send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */ + + /* Init CR7, interrupt active bit */ + db->cr7_data = CR7_DEFAULT; + uw32(DCR7, db->cr7_data); + + /* Init CR15, Tx jabber and Rx watchdog timer */ + uw32(DCR15, db->cr15_data); + + /* Enable ULI526X Tx/Rx function */ + db->cr6_data |= CR6_RXSC | CR6_TXSC; + update_cr6(db->cr6_data, ioaddr); +} + + +/* + * Hardware start transmission. + * Send a packet to media from the upper layer. + */ + +static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct uli526x_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; + struct tx_desc *txptr; + unsigned long flags; + + ULI526X_DBUG(0, "uli526x_start_xmit", 0); + + /* Resource flag check */ + netif_stop_queue(dev); + + /* Too large packet check */ + if (skb->len > MAX_PACKET_SIZE) { + netdev_err(dev, "big packet = %d\n", (u16)skb->len); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + spin_lock_irqsave(&db->lock, flags); + + /* No Tx resource check, it never happen nromally */ + if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) { + spin_unlock_irqrestore(&db->lock, flags); + netdev_err(dev, "No Tx resource %ld\n", db->tx_packet_cnt); + return NETDEV_TX_BUSY; + } + + /* Disable NIC interrupt */ + uw32(DCR7, 0); + + /* transmit this packet */ + txptr = db->tx_insert_ptr; + skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len); + txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); + + /* Point to next transmit free descriptor */ + db->tx_insert_ptr = txptr->next_tx_desc; + + /* Transmit Packet Process */ + if (db->tx_packet_cnt < TX_DESC_CNT) { + txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ + db->tx_packet_cnt++; /* Ready to send */ + uw32(DCR1, 0x1); /* Issue Tx polling */ + dev->trans_start = jiffies; /* saved time stamp */ + } + + /* Tx resource check */ + if ( db->tx_packet_cnt < TX_FREE_DESC_CNT ) + netif_wake_queue(dev); + + /* Restore CR7 to enable interrupt */ + spin_unlock_irqrestore(&db->lock, flags); + uw32(DCR7, db->cr7_data); + + /* free this SKB */ + dev_consume_skb_any(skb); + + return NETDEV_TX_OK; +} + + +/* + * Stop the interface. + * The interface is stopped when it is brought. + */ + +static int uli526x_stop(struct net_device *dev) +{ + struct uli526x_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; + + /* disable system */ + netif_stop_queue(dev); + + /* deleted timer */ + del_timer_sync(&db->timer); + + /* Reset & stop ULI526X board */ + uw32(DCR0, ULI526X_RESET); + udelay(5); + db->phy.write(db, db->phy_addr, 0, 0x8000); + + /* free interrupt */ + free_irq(db->pdev->irq, dev); + + /* free allocated rx buffer */ + uli526x_free_rxbuffer(db); + + return 0; +} + + +/* + * M5261/M5263 insterrupt handler + * receive the packet to upper layer, free the transmitted packet + */ + +static irqreturn_t uli526x_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct uli526x_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; + unsigned long flags; + + spin_lock_irqsave(&db->lock, flags); + uw32(DCR7, 0); + + /* Got ULI526X status */ + db->cr5_data = ur32(DCR5); + uw32(DCR5, db->cr5_data); + if ( !(db->cr5_data & 0x180c1) ) { + /* Restore CR7 to enable interrupt mask */ + uw32(DCR7, db->cr7_data); + spin_unlock_irqrestore(&db->lock, flags); + return IRQ_HANDLED; + } + + /* Check system status */ + if (db->cr5_data & 0x2000) { + /* system bus error happen */ + ULI526X_DBUG(1, "System bus error happen. CR5=", db->cr5_data); + db->reset_fatal++; + db->wait_reset = 1; /* Need to RESET */ + spin_unlock_irqrestore(&db->lock, flags); + return IRQ_HANDLED; + } + + /* Received the coming packet */ + if ( (db->cr5_data & 0x40) && db->rx_avail_cnt ) + uli526x_rx_packet(dev, db); + + /* reallocate rx descriptor buffer */ + if (db->rx_avail_cntcr5_data & 0x01) + uli526x_free_tx_pkt(dev, db); + + /* Restore CR7 to enable interrupt mask */ + uw32(DCR7, db->cr7_data); + + spin_unlock_irqrestore(&db->lock, flags); + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void uli526x_poll(struct net_device *dev) +{ + struct uli526x_board_info *db = netdev_priv(dev); + + /* ISR grabs the irqsave lock, so this should be safe */ + uli526x_interrupt(db->pdev->irq, dev); +} +#endif + +/* + * Free TX resource after TX complete + */ + +static void uli526x_free_tx_pkt(struct net_device *dev, + struct uli526x_board_info * db) +{ + struct tx_desc *txptr; + u32 tdes0; + + txptr = db->tx_remove_ptr; + while(db->tx_packet_cnt) { + tdes0 = le32_to_cpu(txptr->tdes0); + if (tdes0 & 0x80000000) + break; + + /* A packet sent completed */ + db->tx_packet_cnt--; + dev->stats.tx_packets++; + + /* Transmit statistic counter */ + if ( tdes0 != 0x7fffffff ) { + dev->stats.collisions += (tdes0 >> 3) & 0xf; + dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff; + if (tdes0 & TDES0_ERR_MASK) { + dev->stats.tx_errors++; + if (tdes0 & 0x0002) { /* UnderRun */ + db->tx_fifo_underrun++; + if ( !(db->cr6_data & CR6_SFT) ) { + db->cr6_data = db->cr6_data | CR6_SFT; + update_cr6(db->cr6_data, db->ioaddr); + } + } + if (tdes0 & 0x0100) + db->tx_excessive_collision++; + if (tdes0 & 0x0200) + db->tx_late_collision++; + if (tdes0 & 0x0400) + db->tx_no_carrier++; + if (tdes0 & 0x0800) + db->tx_loss_carrier++; + if (tdes0 & 0x4000) + db->tx_jabber_timeout++; + } + } + + txptr = txptr->next_tx_desc; + }/* End of while */ + + /* Update TX remove pointer to next */ + db->tx_remove_ptr = txptr; + + /* Resource available check */ + if ( db->tx_packet_cnt < TX_WAKE_DESC_CNT ) + netif_wake_queue(dev); /* Active upper layer, send again */ +} + + +/* + * Receive the come packet and pass to upper layer + */ + +static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info * db) +{ + struct rx_desc *rxptr; + struct sk_buff *skb; + int rxlen; + u32 rdes0; + + rxptr = db->rx_ready_ptr; + + while(db->rx_avail_cnt) { + rdes0 = le32_to_cpu(rxptr->rdes0); + if (rdes0 & 0x80000000) /* packet owner check */ + { + break; + } + + db->rx_avail_cnt--; + db->interval_rx_cnt++; + + pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE); + if ( (rdes0 & 0x300) != 0x300) { + /* A packet without First/Last flag */ + /* reuse this SKB */ + ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0); + uli526x_reuse_skb(db, rxptr->rx_skb_ptr); + } else { + /* A packet with First/Last flag */ + rxlen = ( (rdes0 >> 16) & 0x3fff) - 4; + + /* error summary bit check */ + if (rdes0 & 0x8000) { + /* This is a error packet */ + dev->stats.rx_errors++; + if (rdes0 & 1) + dev->stats.rx_fifo_errors++; + if (rdes0 & 2) + dev->stats.rx_crc_errors++; + if (rdes0 & 0x80) + dev->stats.rx_length_errors++; + } + + if ( !(rdes0 & 0x8000) || + ((db->cr6_data & CR6_PM) && (rxlen>6)) ) { + struct sk_buff *new_skb = NULL; + + skb = rxptr->rx_skb_ptr; + + /* Good packet, send to upper layer */ + /* Shorst packet used new SKB */ + if ((rxlen < RX_COPY_SIZE) && + (((new_skb = netdev_alloc_skb(dev, rxlen + 2)) != NULL))) { + skb = new_skb; + /* size less than COPY_SIZE, allocate a rxlen SKB */ + skb_reserve(skb, 2); /* 16byte align */ + memcpy(skb_put(skb, rxlen), + skb_tail_pointer(rxptr->rx_skb_ptr), + rxlen); + uli526x_reuse_skb(db, rxptr->rx_skb_ptr); + } else + skb_put(skb, rxlen); + + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += rxlen; + + } else { + /* Reuse SKB buffer when the packet is error */ + ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0); + uli526x_reuse_skb(db, rxptr->rx_skb_ptr); + } + } + + rxptr = rxptr->next_rx_desc; + } + + db->rx_ready_ptr = rxptr; +} + + +/* + * Set ULI526X multicast address + */ + +static void uli526x_set_filter_mode(struct net_device * dev) +{ + struct uli526x_board_info *db = netdev_priv(dev); + unsigned long flags; + + ULI526X_DBUG(0, "uli526x_set_filter_mode()", 0); + spin_lock_irqsave(&db->lock, flags); + + if (dev->flags & IFF_PROMISC) { + ULI526X_DBUG(0, "Enable PROM Mode", 0); + db->cr6_data |= CR6_PM | CR6_PBF; + update_cr6(db->cr6_data, db->ioaddr); + spin_unlock_irqrestore(&db->lock, flags); + return; + } + + if (dev->flags & IFF_ALLMULTI || + netdev_mc_count(dev) > ULI5261_MAX_MULTICAST) { + ULI526X_DBUG(0, "Pass all multicast address", + netdev_mc_count(dev)); + db->cr6_data &= ~(CR6_PM | CR6_PBF); + db->cr6_data |= CR6_PAM; + spin_unlock_irqrestore(&db->lock, flags); + return; + } + + ULI526X_DBUG(0, "Set multicast address", netdev_mc_count(dev)); + send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */ + spin_unlock_irqrestore(&db->lock, flags); +} + +static void +ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd) +{ + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_MII); + + ecmd->advertising = (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_Autoneg | + ADVERTISED_MII); + + + ecmd->port = PORT_MII; + ecmd->phy_address = db->phy_addr; + + ecmd->transceiver = XCVR_EXTERNAL; + + ethtool_cmd_speed_set(ecmd, SPEED_10); + ecmd->duplex = DUPLEX_HALF; + + if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD) + { + ethtool_cmd_speed_set(ecmd, SPEED_100); + } + if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD) + { + ecmd->duplex = DUPLEX_FULL; + } + if(db->link_failed) + { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } + + if (db->media_mode & ULI526X_AUTO) + { + ecmd->autoneg = AUTONEG_ENABLE; + } +} + +static void netdev_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct uli526x_board_info *np = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); +} + +static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { + struct uli526x_board_info *np = netdev_priv(dev); + + ULi_ethtool_gset(np, cmd); + + return 0; +} + +static u32 netdev_get_link(struct net_device *dev) { + struct uli526x_board_info *np = netdev_priv(dev); + + if(np->link_failed) + return 0; + else + return 1; +} + +static void uli526x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + wol->supported = WAKE_PHY | WAKE_MAGIC; + wol->wolopts = 0; +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, + .get_settings = netdev_get_settings, + .get_link = netdev_get_link, + .get_wol = uli526x_get_wol, +}; + +/* + * A periodic timer routine + * Dynamic media sense, allocate Rx buffer... + */ + +static void uli526x_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *) data; + struct uli526x_board_info *db = netdev_priv(dev); + struct uli_phy_ops *phy = &db->phy; + void __iomem *ioaddr = db->ioaddr; + unsigned long flags; + u8 tmp_cr12 = 0; + u32 tmp_cr8; + + //ULI526X_DBUG(0, "uli526x_timer()", 0); + spin_lock_irqsave(&db->lock, flags); + + + /* Dynamic reset ULI526X : system error or transmit time-out */ + tmp_cr8 = ur32(DCR8); + if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) { + db->reset_cr8++; + db->wait_reset = 1; + } + db->interval_rx_cnt = 0; + + /* TX polling kick monitor */ + if ( db->tx_packet_cnt && + time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) { + uw32(DCR1, 0x1); // Tx polling again + + // TX Timeout + if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) { + db->reset_TXtimeout++; + db->wait_reset = 1; + netdev_err(dev, " Tx timeout - resetting\n"); + } + } + + if (db->wait_reset) { + ULI526X_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt); + db->reset_count++; + uli526x_dynamic_reset(dev); + db->timer.expires = ULI526X_TIMER_WUT; + add_timer(&db->timer); + spin_unlock_irqrestore(&db->lock, flags); + return; + } + + /* Link status check, Dynamic media type change */ + if ((phy->read(db, db->phy_addr, 5) & 0x01e0)!=0) + tmp_cr12 = 3; + + if ( !(tmp_cr12 & 0x3) && !db->link_failed ) { + /* Link Failed */ + ULI526X_DBUG(0, "Link Failed", tmp_cr12); + netif_carrier_off(dev); + netdev_info(dev, "NIC Link is Down\n"); + db->link_failed = 1; + + /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ + /* AUTO don't need */ + if ( !(db->media_mode & 0x8) ) + phy->write(db, db->phy_addr, 0, 0x1000); + + /* AUTO mode, if INT phyxcer link failed, select EXT device */ + if (db->media_mode & ULI526X_AUTO) { + db->cr6_data&=~0x00000200; /* bit9=0, HD mode */ + update_cr6(db->cr6_data, db->ioaddr); + } + } else + if ((tmp_cr12 & 0x3) && db->link_failed) { + ULI526X_DBUG(0, "Link link OK", tmp_cr12); + db->link_failed = 0; + + /* Auto Sense Speed */ + if ( (db->media_mode & ULI526X_AUTO) && + uli526x_sense_speed(db) ) + db->link_failed = 1; + uli526x_process_mode(db); + + if(db->link_failed==0) + { + netdev_info(dev, "NIC Link is Up %d Mbps %s duplex\n", + (db->op_mode == ULI526X_100MHF || + db->op_mode == ULI526X_100MFD) + ? 100 : 10, + (db->op_mode == ULI526X_10MFD || + db->op_mode == ULI526X_100MFD) + ? "Full" : "Half"); + netif_carrier_on(dev); + } + /* SHOW_MEDIA_TYPE(db->op_mode); */ + } + else if(!(tmp_cr12 & 0x3) && db->link_failed) + { + if(db->init==1) + { + netdev_info(dev, "NIC Link is Down\n"); + netif_carrier_off(dev); + } + } + db->init=0; + + /* Timer active again */ + db->timer.expires = ULI526X_TIMER_WUT; + add_timer(&db->timer); + spin_unlock_irqrestore(&db->lock, flags); +} + + +/* + * Stop ULI526X board + * Free Tx/Rx allocated memory + * Init system variable + */ + +static void uli526x_reset_prepare(struct net_device *dev) +{ + struct uli526x_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; + + /* Sopt MAC controller */ + db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */ + update_cr6(db->cr6_data, ioaddr); + uw32(DCR7, 0); /* Disable Interrupt */ + uw32(DCR5, ur32(DCR5)); + + /* Disable upper layer interface */ + netif_stop_queue(dev); + + /* Free Rx Allocate buffer */ + uli526x_free_rxbuffer(db); + + /* system variable init */ + db->tx_packet_cnt = 0; + db->rx_avail_cnt = 0; + db->link_failed = 1; + db->init=1; + db->wait_reset = 0; +} + + +/* + * Dynamic reset the ULI526X board + * Stop ULI526X board + * Free Tx/Rx allocated memory + * Reset ULI526X board + * Re-initialize ULI526X board + */ + +static void uli526x_dynamic_reset(struct net_device *dev) +{ + ULI526X_DBUG(0, "uli526x_dynamic_reset()", 0); + + uli526x_reset_prepare(dev); + + /* Re-initialize ULI526X board */ + uli526x_init(dev); + + /* Restart upper layer interface */ + netif_wake_queue(dev); +} + + +#ifdef CONFIG_PM + +/* + * Suspend the interface. + */ + +static int uli526x_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + pci_power_t power_state; + int err; + + ULI526X_DBUG(0, "uli526x_suspend", 0); + + pci_save_state(pdev); + + if (!netif_running(dev)) + return 0; + + netif_device_detach(dev); + uli526x_reset_prepare(dev); + + power_state = pci_choose_state(pdev, state); + pci_enable_wake(pdev, power_state, 0); + err = pci_set_power_state(pdev, power_state); + if (err) { + netif_device_attach(dev); + /* Re-initialize ULI526X board */ + uli526x_init(dev); + /* Restart upper layer interface */ + netif_wake_queue(dev); + } + + return err; +} + +/* + * Resume the interface. + */ + +static int uli526x_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + int err; + + ULI526X_DBUG(0, "uli526x_resume", 0); + + pci_restore_state(pdev); + + if (!netif_running(dev)) + return 0; + + err = pci_set_power_state(pdev, PCI_D0); + if (err) { + netdev_warn(dev, "Could not put device into D0\n"); + return err; + } + + netif_device_attach(dev); + /* Re-initialize ULI526X board */ + uli526x_init(dev); + /* Restart upper layer interface */ + netif_wake_queue(dev); + + return 0; +} + +#else /* !CONFIG_PM */ + +#define uli526x_suspend NULL +#define uli526x_resume NULL + +#endif /* !CONFIG_PM */ + + +/* + * free all allocated rx buffer + */ + +static void uli526x_free_rxbuffer(struct uli526x_board_info * db) +{ + ULI526X_DBUG(0, "uli526x_free_rxbuffer()", 0); + + /* free allocated rx buffer */ + while (db->rx_avail_cnt) { + dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr); + db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc; + db->rx_avail_cnt--; + } +} + + +/* + * Reuse the SK buffer + */ + +static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * skb) +{ + struct rx_desc *rxptr = db->rx_insert_ptr; + + if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { + rxptr->rx_skb_ptr = skb; + rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev, + skb_tail_pointer(skb), + RX_ALLOC_SIZE, + PCI_DMA_FROMDEVICE)); + wmb(); + rxptr->rdes0 = cpu_to_le32(0x80000000); + db->rx_avail_cnt++; + db->rx_insert_ptr = rxptr->next_rx_desc; + } else + ULI526X_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt); +} + + +/* + * Initialize transmit/Receive descriptor + * Using Chain structure, and allocate Tx/Rx buffer + */ + +static void uli526x_descriptor_init(struct net_device *dev, void __iomem *ioaddr) +{ + struct uli526x_board_info *db = netdev_priv(dev); + struct tx_desc *tmp_tx; + struct rx_desc *tmp_rx; + unsigned char *tmp_buf; + dma_addr_t tmp_tx_dma, tmp_rx_dma; + dma_addr_t tmp_buf_dma; + int i; + + ULI526X_DBUG(0, "uli526x_descriptor_init()", 0); + + /* tx descriptor start pointer */ + db->tx_insert_ptr = db->first_tx_desc; + db->tx_remove_ptr = db->first_tx_desc; + uw32(DCR4, db->first_tx_desc_dma); /* TX DESC address */ + + /* rx descriptor start pointer */ + db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT; + db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT; + db->rx_insert_ptr = db->first_rx_desc; + db->rx_ready_ptr = db->first_rx_desc; + uw32(DCR3, db->first_rx_desc_dma); /* RX DESC address */ + + /* Init Transmit chain */ + tmp_buf = db->buf_pool_start; + tmp_buf_dma = db->buf_pool_dma_start; + tmp_tx_dma = db->first_tx_desc_dma; + for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) { + tmp_tx->tx_buf_ptr = tmp_buf; + tmp_tx->tdes0 = cpu_to_le32(0); + tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */ + tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma); + tmp_tx_dma += sizeof(struct tx_desc); + tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma); + tmp_tx->next_tx_desc = tmp_tx + 1; + tmp_buf = tmp_buf + TX_BUF_ALLOC; + tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC; + } + (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma); + tmp_tx->next_tx_desc = db->first_tx_desc; + + /* Init Receive descriptor chain */ + tmp_rx_dma=db->first_rx_desc_dma; + for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) { + tmp_rx->rdes0 = cpu_to_le32(0); + tmp_rx->rdes1 = cpu_to_le32(0x01000600); + tmp_rx_dma += sizeof(struct rx_desc); + tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma); + tmp_rx->next_rx_desc = tmp_rx + 1; + } + (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma); + tmp_rx->next_rx_desc = db->first_rx_desc; + + /* pre-allocate Rx buffer */ + allocate_rx_buffer(dev); +} + + +/* + * Update CR6 value + * Firstly stop ULI526X, then written value and start + */ +static void update_cr6(u32 cr6_data, void __iomem *ioaddr) +{ + uw32(DCR6, cr6_data); + udelay(5); +} + + +/* + * Send a setup frame for M5261/M5263 + * This setup frame initialize ULI526X address filter mode + */ + +#ifdef __BIG_ENDIAN +#define FLT_SHIFT 16 +#else +#define FLT_SHIFT 0 +#endif + +static void send_filter_frame(struct net_device *dev, int mc_cnt) +{ + struct uli526x_board_info *db = netdev_priv(dev); + void __iomem *ioaddr = db->ioaddr; + struct netdev_hw_addr *ha; + struct tx_desc *txptr; + u16 * addrptr; + u32 * suptr; + int i; + + ULI526X_DBUG(0, "send_filter_frame()", 0); + + txptr = db->tx_insert_ptr; + suptr = (u32 *) txptr->tx_buf_ptr; + + /* Node address */ + addrptr = (u16 *) dev->dev_addr; + *suptr++ = addrptr[0] << FLT_SHIFT; + *suptr++ = addrptr[1] << FLT_SHIFT; + *suptr++ = addrptr[2] << FLT_SHIFT; + + /* broadcast address */ + *suptr++ = 0xffff << FLT_SHIFT; + *suptr++ = 0xffff << FLT_SHIFT; + *suptr++ = 0xffff << FLT_SHIFT; + + /* fit the multicast address */ + netdev_for_each_mc_addr(ha, dev) { + addrptr = (u16 *) ha->addr; + *suptr++ = addrptr[0] << FLT_SHIFT; + *suptr++ = addrptr[1] << FLT_SHIFT; + *suptr++ = addrptr[2] << FLT_SHIFT; + } + + for (i = netdev_mc_count(dev); i < 14; i++) { + *suptr++ = 0xffff << FLT_SHIFT; + *suptr++ = 0xffff << FLT_SHIFT; + *suptr++ = 0xffff << FLT_SHIFT; + } + + /* prepare the setup frame */ + db->tx_insert_ptr = txptr->next_tx_desc; + txptr->tdes1 = cpu_to_le32(0x890000c0); + + /* Resource Check and Send the setup packet */ + if (db->tx_packet_cnt < TX_DESC_CNT) { + /* Resource Empty */ + db->tx_packet_cnt++; + txptr->tdes0 = cpu_to_le32(0x80000000); + update_cr6(db->cr6_data | 0x2000, ioaddr); + uw32(DCR1, 0x1); /* Issue Tx polling */ + update_cr6(db->cr6_data, ioaddr); + dev->trans_start = jiffies; + } else + netdev_err(dev, "No Tx resource - Send_filter_frame!\n"); +} + + +/* + * Allocate rx buffer, + * As possible as allocate maxiumn Rx buffer + */ + +static void allocate_rx_buffer(struct net_device *dev) +{ + struct uli526x_board_info *db = netdev_priv(dev); + struct rx_desc *rxptr; + struct sk_buff *skb; + + rxptr = db->rx_insert_ptr; + + while(db->rx_avail_cnt < RX_DESC_CNT) { + skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE); + if (skb == NULL) + break; + rxptr->rx_skb_ptr = skb; /* FIXME (?) */ + rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev, + skb_tail_pointer(skb), + RX_ALLOC_SIZE, + PCI_DMA_FROMDEVICE)); + wmb(); + rxptr->rdes0 = cpu_to_le32(0x80000000); + rxptr = rxptr->next_rx_desc; + db->rx_avail_cnt++; + } + + db->rx_insert_ptr = rxptr; +} + + +/* + * Read one word data from the serial ROM + */ + +static u16 read_srom_word(struct uli526x_board_info *db, int offset) +{ + void __iomem *ioaddr = db->ioaddr; + u16 srom_data = 0; + int i; + + uw32(DCR9, CR9_SROM_READ); + uw32(DCR9, CR9_SROM_READ | CR9_SRCS); + + /* Send the Read Command 110b */ + srom_clk_write(db, SROM_DATA_1); + srom_clk_write(db, SROM_DATA_1); + srom_clk_write(db, SROM_DATA_0); + + /* Send the offset */ + for (i = 5; i >= 0; i--) { + srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0; + srom_clk_write(db, srom_data); + } + + uw32(DCR9, CR9_SROM_READ | CR9_SRCS); + + for (i = 16; i > 0; i--) { + uw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK); + udelay(5); + srom_data = (srom_data << 1) | + ((ur32(DCR9) & CR9_CRDOUT) ? 1 : 0); + uw32(DCR9, CR9_SROM_READ | CR9_SRCS); + udelay(5); + } + + uw32(DCR9, CR9_SROM_READ); + return srom_data; +} + + +/* + * Auto sense the media mode + */ + +static u8 uli526x_sense_speed(struct uli526x_board_info * db) +{ + struct uli_phy_ops *phy = &db->phy; + u8 ErrFlag = 0; + u16 phy_mode; + + phy_mode = phy->read(db, db->phy_addr, 1); + phy_mode = phy->read(db, db->phy_addr, 1); + + if ( (phy_mode & 0x24) == 0x24 ) { + + phy_mode = ((phy->read(db, db->phy_addr, 5) & 0x01e0)<<7); + if(phy_mode&0x8000) + phy_mode = 0x8000; + else if(phy_mode&0x4000) + phy_mode = 0x4000; + else if(phy_mode&0x2000) + phy_mode = 0x2000; + else + phy_mode = 0x1000; + + switch (phy_mode) { + case 0x1000: db->op_mode = ULI526X_10MHF; break; + case 0x2000: db->op_mode = ULI526X_10MFD; break; + case 0x4000: db->op_mode = ULI526X_100MHF; break; + case 0x8000: db->op_mode = ULI526X_100MFD; break; + default: db->op_mode = ULI526X_10MHF; ErrFlag = 1; break; + } + } else { + db->op_mode = ULI526X_10MHF; + ULI526X_DBUG(0, "Link Failed :", phy_mode); + ErrFlag = 1; + } + + return ErrFlag; +} + + +/* + * Set 10/100 phyxcer capability + * AUTO mode : phyxcer register4 is NIC capability + * Force mode: phyxcer register4 is the force media + */ + +static void uli526x_set_phyxcer(struct uli526x_board_info *db) +{ + struct uli_phy_ops *phy = &db->phy; + u16 phy_reg; + + /* Phyxcer capability setting */ + phy_reg = phy->read(db, db->phy_addr, 4) & ~0x01e0; + + if (db->media_mode & ULI526X_AUTO) { + /* AUTO Mode */ + phy_reg |= db->PHY_reg4; + } else { + /* Force Mode */ + switch(db->media_mode) { + case ULI526X_10MHF: phy_reg |= 0x20; break; + case ULI526X_10MFD: phy_reg |= 0x40; break; + case ULI526X_100MHF: phy_reg |= 0x80; break; + case ULI526X_100MFD: phy_reg |= 0x100; break; + } + + } + + /* Write new capability to Phyxcer Reg4 */ + if ( !(phy_reg & 0x01e0)) { + phy_reg|=db->PHY_reg4; + db->media_mode|=ULI526X_AUTO; + } + phy->write(db, db->phy_addr, 4, phy_reg); + + /* Restart Auto-Negotiation */ + phy->write(db, db->phy_addr, 0, 0x1200); + udelay(50); +} + + +/* + * Process op-mode + AUTO mode : PHY controller in Auto-negotiation Mode + * Force mode: PHY controller in force mode with HUB + * N-way force capability with SWITCH + */ + +static void uli526x_process_mode(struct uli526x_board_info *db) +{ + struct uli_phy_ops *phy = &db->phy; + u16 phy_reg; + + /* Full Duplex Mode Check */ + if (db->op_mode & 0x4) + db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */ + else + db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */ + + update_cr6(db->cr6_data, db->ioaddr); + + /* 10/100M phyxcer force mode need */ + if (!(db->media_mode & 0x8)) { + /* Forece Mode */ + phy_reg = phy->read(db, db->phy_addr, 6); + if (!(phy_reg & 0x1)) { + /* parter without N-Way capability */ + phy_reg = 0x0; + switch(db->op_mode) { + case ULI526X_10MHF: phy_reg = 0x0; break; + case ULI526X_10MFD: phy_reg = 0x100; break; + case ULI526X_100MHF: phy_reg = 0x2000; break; + case ULI526X_100MFD: phy_reg = 0x2100; break; + } + phy->write(db, db->phy_addr, 0, phy_reg); + } + } +} + + +/* M5261/M5263 Chip */ +static void phy_writeby_cr9(struct uli526x_board_info *db, u8 phy_addr, + u8 offset, u16 phy_data) +{ + u16 i; + + /* Send 33 synchronization clock to Phy controller */ + for (i = 0; i < 35; i++) + phy_write_1bit(db, PHY_DATA_1); + + /* Send start command(01) to Phy */ + phy_write_1bit(db, PHY_DATA_0); + phy_write_1bit(db, PHY_DATA_1); + + /* Send write command(01) to Phy */ + phy_write_1bit(db, PHY_DATA_0); + phy_write_1bit(db, PHY_DATA_1); + + /* Send Phy address */ + for (i = 0x10; i > 0; i = i >> 1) + phy_write_1bit(db, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); + + /* Send register address */ + for (i = 0x10; i > 0; i = i >> 1) + phy_write_1bit(db, offset & i ? PHY_DATA_1 : PHY_DATA_0); + + /* written trasnition */ + phy_write_1bit(db, PHY_DATA_1); + phy_write_1bit(db, PHY_DATA_0); + + /* Write a word data to PHY controller */ + for (i = 0x8000; i > 0; i >>= 1) + phy_write_1bit(db, phy_data & i ? PHY_DATA_1 : PHY_DATA_0); +} + +static u16 phy_readby_cr9(struct uli526x_board_info *db, u8 phy_addr, u8 offset) +{ + u16 phy_data; + int i; + + /* Send 33 synchronization clock to Phy controller */ + for (i = 0; i < 35; i++) + phy_write_1bit(db, PHY_DATA_1); + + /* Send start command(01) to Phy */ + phy_write_1bit(db, PHY_DATA_0); + phy_write_1bit(db, PHY_DATA_1); + + /* Send read command(10) to Phy */ + phy_write_1bit(db, PHY_DATA_1); + phy_write_1bit(db, PHY_DATA_0); + + /* Send Phy address */ + for (i = 0x10; i > 0; i = i >> 1) + phy_write_1bit(db, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); + + /* Send register address */ + for (i = 0x10; i > 0; i = i >> 1) + phy_write_1bit(db, offset & i ? PHY_DATA_1 : PHY_DATA_0); + + /* Skip transition state */ + phy_read_1bit(db); + + /* read 16bit data */ + for (phy_data = 0, i = 0; i < 16; i++) { + phy_data <<= 1; + phy_data |= phy_read_1bit(db); + } + + return phy_data; +} + +static u16 phy_readby_cr10(struct uli526x_board_info *db, u8 phy_addr, + u8 offset) +{ + void __iomem *ioaddr = db->ioaddr; + u32 cr10_value = phy_addr; + + cr10_value = (cr10_value << 5) + offset; + cr10_value = (cr10_value << 16) + 0x08000000; + uw32(DCR10, cr10_value); + udelay(1); + while (1) { + cr10_value = ur32(DCR10); + if (cr10_value & 0x10000000) + break; + } + return cr10_value & 0x0ffff; +} + +static void phy_writeby_cr10(struct uli526x_board_info *db, u8 phy_addr, + u8 offset, u16 phy_data) +{ + void __iomem *ioaddr = db->ioaddr; + u32 cr10_value = phy_addr; + + cr10_value = (cr10_value << 5) + offset; + cr10_value = (cr10_value << 16) + 0x04000000 + phy_data; + uw32(DCR10, cr10_value); + udelay(1); +} +/* + * Write one bit data to Phy Controller + */ + +static void phy_write_1bit(struct uli526x_board_info *db, u32 data) +{ + void __iomem *ioaddr = db->ioaddr; + + uw32(DCR9, data); /* MII Clock Low */ + udelay(1); + uw32(DCR9, data | MDCLKH); /* MII Clock High */ + udelay(1); + uw32(DCR9, data); /* MII Clock Low */ + udelay(1); +} + + +/* + * Read one bit phy data from PHY controller + */ + +static u16 phy_read_1bit(struct uli526x_board_info *db) +{ + void __iomem *ioaddr = db->ioaddr; + u16 phy_data; + + uw32(DCR9, 0x50000); + udelay(1); + phy_data = (ur32(DCR9) >> 19) & 0x1; + uw32(DCR9, 0x40000); + udelay(1); + + return phy_data; +} + + +static const struct pci_device_id uli526x_pci_tbl[] = { + { 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID }, + { 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, uli526x_pci_tbl); + + +static struct pci_driver uli526x_driver = { + .name = "uli526x", + .id_table = uli526x_pci_tbl, + .probe = uli526x_init_one, + .remove = uli526x_remove_one, + .suspend = uli526x_suspend, + .resume = uli526x_resume, +}; + +MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw"); +MODULE_DESCRIPTION("ULi M5261/M5263 fast ethernet driver"); +MODULE_LICENSE("GPL"); + +module_param(debug, int, 0644); +module_param(mode, int, 0); +module_param(cr6set, int, 0); +MODULE_PARM_DESC(debug, "ULi M5261/M5263 enable debugging (0-1)"); +MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA"); + +/* Description: + * when user used insmod to add module, system invoked init_module() + * to register the services. + */ + +static int __init uli526x_init_module(void) +{ + + pr_info("%s\n", version); + printed_version = 1; + + ULI526X_DBUG(0, "init_module() ", debug); + + if (debug) + uli526x_debug = debug; /* set debug flag */ + if (cr6set) + uli526x_cr6_user_set = cr6set; + + switch (mode) { + case ULI526X_10MHF: + case ULI526X_100MHF: + case ULI526X_10MFD: + case ULI526X_100MFD: + uli526x_media_mode = mode; + break; + default: + uli526x_media_mode = ULI526X_AUTO; + break; + } + + return pci_register_driver(&uli526x_driver); +} + + +/* + * Description: + * when user used rmmod to delete module, system invoked clean_module() + * to un-register all registered services. + */ + +static void __exit uli526x_cleanup_module(void) +{ + ULI526X_DBUG(0, "uli526x_cleanup_module() ", debug); + pci_unregister_driver(&uli526x_driver); +} + +module_init(uli526x_init_module); +module_exit(uli526x_cleanup_module); diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c new file mode 100644 index 000000000..9beb3d34d --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -0,0 +1,1665 @@ +/* winbond-840.c: A Linux PCI network adapter device driver. */ +/* + Written 1998-2001 by Donald Becker. + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + Support and updates available at + http://www.scyld.com/network/drivers.html + + Do not remove the copyright information. + Do not change the version information unless an improvement has been made. + Merely removing my name, as Compex has done in the past, does not count + as an improvement. + + Changelog: + * ported to 2.4 + ??? + * spin lock update, memory barriers, new style dma mappings + limit each tx buffer to < 1024 bytes + remove DescIntr from Rx descriptors (that's an Tx flag) + remove next pointer from Tx descriptors + synchronize tx_q_bytes + software reset in tx_timeout + Copyright (C) 2000 Manfred Spraul + * further cleanups + power management. + support for big endian descriptors + Copyright (C) 2001 Manfred Spraul + * ethtool support (jgarzik) + * Replace some MII-related magic numbers with constants (jgarzik) + + TODO: + * enable pci_power_off + * Wake-On-LAN +*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DRV_NAME "winbond-840" +#define DRV_VERSION "1.01-e" +#define DRV_RELDATE "Sep-11-2006" + + +/* Automatically extracted configuration info: +probe-func: winbond840_probe +config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840 + +c-help-name: Winbond W89c840 PCI Ethernet support +c-help-symbol: CONFIG_WINBOND_840 +c-help: This driver is for the Winbond W89c840 chip. It also works with +c-help: the TX9882 chip on the Compex RL100-ATX board. +c-help: More specific information and updates are available from +c-help: http://www.scyld.com/network/drivers.html +*/ + +/* The user-configurable values. + These may be modified when a driver module is loaded.*/ + +static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ +static int max_interrupt_work = 20; +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The '840 uses a 64 element hash table based on the Ethernet CRC. */ +static int multicast_filter_limit = 32; + +/* Set the copy breakpoint for the copy-only-tiny-frames scheme. + Setting to > 1518 effectively disables this feature. */ +static int rx_copybreak; + +/* Used to pass the media type, etc. + Both 'options[]' and 'full_duplex[]' should exist for driver + interoperability. + The media type is usually passed in 'options[]'. +*/ +#define MAX_UNITS 8 /* More are supported, limit only on options */ +static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; +static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; + +/* Operational parameters that are set at compile time. */ + +/* Keep the ring sizes a power of two for compile efficiency. + The compiler will convert '%'<2^N> into a bit mask. + Making the Tx ring too large decreases the effectiveness of channel + bonding and packet priority. + There are no ill effects from too-large receive rings. */ +#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ +#define TX_QUEUE_LEN_RESTART 5 + +#define TX_BUFLIMIT (1024-128) + +/* The presumed FIFO size for working around the Tx-FIFO-overflow bug. + To avoid overflowing we don't queue again until we have room for a + full-size packet. + */ +#define TX_FIFO_SIZE (2048) +#define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16) + + +/* Operational parameters that usually are not changed. */ +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (2*HZ) + +/* Include files, designed to support most kernel versions 2.0.0 and later. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* Processor type for cache alignment. */ +#include +#include + +#include "tulip.h" + +#undef PKT_BUF_SZ /* tulip.h also defines this */ +#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ + +/* These identify the driver base version and may not be removed. */ +static const char version[] __initconst = + "v" DRV_VERSION " (2.4 port) " + DRV_RELDATE " Donald Becker \n" + " http://www.scyld.com/network/drivers.html\n"; + +MODULE_AUTHOR("Donald Becker "); +MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +module_param(max_interrupt_work, int, 0); +module_param(debug, int, 0); +module_param(rx_copybreak, int, 0); +module_param(multicast_filter_limit, int, 0); +module_param_array(options, int, NULL, 0); +module_param_array(full_duplex, int, NULL, 0); +MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt"); +MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)"); +MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames"); +MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses"); +MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex"); +MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)"); + +/* + Theory of Operation + +I. Board Compatibility + +This driver is for the Winbond w89c840 chip. + +II. Board-specific settings + +None. + +III. Driver operation + +This chip is very similar to the Digital 21*4* "Tulip" family. The first +twelve registers and the descriptor format are nearly identical. Read a +Tulip manual for operational details. + +A significant difference is that the multicast filter and station address are +stored in registers rather than loaded through a pseudo-transmit packet. + +Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a +full-sized packet we must use both data buffers in a descriptor. Thus the +driver uses ring mode where descriptors are implicitly sequential in memory, +rather than using the second descriptor address as a chain pointer to +subsequent descriptors. + +IV. Notes + +If you are going to almost clone a Tulip, why not go all the way and avoid +the need for a new driver? + +IVb. References + +http://www.scyld.com/expert/100mbps.html +http://www.scyld.com/expert/NWay.html +http://www.winbond.com.tw/ + +IVc. Errata + +A horrible bug exists in the transmit FIFO. Apparently the chip doesn't +correctly detect a full FIFO, and queuing more than 2048 bytes may result in +silent data corruption. + +Test with 'ping -s 10000' on a fast computer. + +*/ + + + +/* + PCI probe table. +*/ +enum chip_capability_flags { + CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8, +}; + +static const struct pci_device_id w840_pci_tbl[] = { + { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 }, + { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, + { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, + { } +}; +MODULE_DEVICE_TABLE(pci, w840_pci_tbl); + +enum { + netdev_res_size = 128, /* size of PCI BAR resource */ +}; + +struct pci_id_info { + const char *name; + int drv_flags; /* Driver use, intended as capability flags. */ +}; + +static const struct pci_id_info pci_id_tbl[] = { + { /* Sometime a Level-One switch card. */ + "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII}, + { "Winbond W89c840", CanHaveMII | HasBrokenTx}, + { "Compex RL100-ATX", CanHaveMII | HasBrokenTx}, + { } /* terminate list. */ +}; + +/* This driver was written to use PCI memory space, however some x86 systems + work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config +*/ + +/* Offsets to the Command and Status Registers, "CSRs". + While similar to the Tulip, these registers are longword aligned. + Note: It's not useful to define symbolic names for every register bit in + the device. The name can only partially document the semantics and make + the driver longer and more difficult to read. +*/ +enum w840_offsets { + PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08, + RxRingPtr=0x0C, TxRingPtr=0x10, + IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C, + RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C, + CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */ + MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40, + CurTxDescAddr=0x4C, CurTxBufAddr=0x50, +}; + +/* Bits in the NetworkConfig register. */ +enum rx_mode_bits { + AcceptErr=0x80, + RxAcceptBroadcast=0x20, AcceptMulticast=0x10, + RxAcceptAllPhys=0x08, AcceptMyPhys=0x02, +}; + +enum mii_reg_bits { + MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000, + MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000, +}; + +/* The Tulip Rx and Tx buffer descriptors. */ +struct w840_rx_desc { + s32 status; + s32 length; + u32 buffer1; + u32 buffer2; +}; + +struct w840_tx_desc { + s32 status; + s32 length; + u32 buffer1, buffer2; +}; + +#define MII_CNT 1 /* winbond only supports one MII */ +struct netdev_private { + struct w840_rx_desc *rx_ring; + dma_addr_t rx_addr[RX_RING_SIZE]; + struct w840_tx_desc *tx_ring; + dma_addr_t tx_addr[TX_RING_SIZE]; + dma_addr_t ring_dma_addr; + /* The addresses of receive-in-place skbuffs. */ + struct sk_buff* rx_skbuff[RX_RING_SIZE]; + /* The saved address of a sent-in-place packet/buffer, for later free(). */ + struct sk_buff* tx_skbuff[TX_RING_SIZE]; + struct net_device_stats stats; + struct timer_list timer; /* Media monitoring timer. */ + /* Frequently used values: keep some adjacent for cache effect. */ + spinlock_t lock; + int chip_id, drv_flags; + struct pci_dev *pci_dev; + int csr6; + struct w840_rx_desc *rx_head_desc; + unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ + unsigned int rx_buf_sz; /* Based on MTU+slack. */ + unsigned int cur_tx, dirty_tx; + unsigned int tx_q_bytes; + unsigned int tx_full; /* The Tx queue is full. */ + /* MII transceiver section. */ + int mii_cnt; /* MII device addresses. */ + unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */ + u32 mii; + struct mii_if_info mii_if; + void __iomem *base_addr; +}; + +static int eeprom_read(void __iomem *ioaddr, int location); +static int mdio_read(struct net_device *dev, int phy_id, int location); +static void mdio_write(struct net_device *dev, int phy_id, int location, int value); +static int netdev_open(struct net_device *dev); +static int update_link(struct net_device *dev); +static void netdev_timer(unsigned long data); +static void init_rxtx_rings(struct net_device *dev); +static void free_rxtx_rings(struct netdev_private *np); +static void init_registers(struct net_device *dev); +static void tx_timeout(struct net_device *dev); +static int alloc_ringdesc(struct net_device *dev); +static void free_ringdesc(struct netdev_private *np); +static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); +static irqreturn_t intr_handler(int irq, void *dev_instance); +static void netdev_error(struct net_device *dev, int intr_status); +static int netdev_rx(struct net_device *dev); +static u32 __set_rx_mode(struct net_device *dev); +static void set_rx_mode(struct net_device *dev); +static struct net_device_stats *get_stats(struct net_device *dev); +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static const struct ethtool_ops netdev_ethtool_ops; +static int netdev_close(struct net_device *dev); + +static const struct net_device_ops netdev_ops = { + .ndo_open = netdev_open, + .ndo_stop = netdev_close, + .ndo_start_xmit = start_tx, + .ndo_get_stats = get_stats, + .ndo_set_rx_mode = set_rx_mode, + .ndo_do_ioctl = netdev_ioctl, + .ndo_tx_timeout = tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *dev; + struct netdev_private *np; + static int find_cnt; + int chip_idx = ent->driver_data; + int irq; + int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; + void __iomem *ioaddr; + + i = pci_enable_device(pdev); + if (i) return i; + + pci_set_master(pdev); + + irq = pdev->irq; + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + pr_warn("Device %s disabled due to DMA limitations\n", + pci_name(pdev)); + return -EIO; + } + dev = alloc_etherdev(sizeof(*np)); + if (!dev) + return -ENOMEM; + SET_NETDEV_DEV(dev, &pdev->dev); + + if (pci_request_regions(pdev, DRV_NAME)) + goto err_out_netdev; + + ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size); + if (!ioaddr) + goto err_out_free_res; + + for (i = 0; i < 3; i++) + ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i)); + + /* Reset the chip to erase previous misconfiguration. + No hold time required! */ + iowrite32(0x00000001, ioaddr + PCIBusCfg); + + np = netdev_priv(dev); + np->pci_dev = pdev; + np->chip_id = chip_idx; + np->drv_flags = pci_id_tbl[chip_idx].drv_flags; + spin_lock_init(&np->lock); + np->mii_if.dev = dev; + np->mii_if.mdio_read = mdio_read; + np->mii_if.mdio_write = mdio_write; + np->base_addr = ioaddr; + + pci_set_drvdata(pdev, dev); + + if (dev->mem_start) + option = dev->mem_start; + + /* The lower four bits are the media type. */ + if (option > 0) { + if (option & 0x200) + np->mii_if.full_duplex = 1; + if (option & 15) + dev_info(&dev->dev, + "ignoring user supplied media type %d", + option & 15); + } + if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0) + np->mii_if.full_duplex = 1; + + if (np->mii_if.full_duplex) + np->mii_if.force_media = 1; + + /* The chip-specific entries in the device structure. */ + dev->netdev_ops = &netdev_ops; + dev->ethtool_ops = &netdev_ethtool_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + i = register_netdev(dev); + if (i) + goto err_out_cleardev; + + dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n", + pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq); + + if (np->drv_flags & CanHaveMII) { + int phy, phy_idx = 0; + for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) { + int mii_status = mdio_read(dev, phy, MII_BMSR); + if (mii_status != 0xffff && mii_status != 0x0000) { + np->phys[phy_idx++] = phy; + np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE); + np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+ + mdio_read(dev, phy, MII_PHYSID2); + dev_info(&dev->dev, + "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n", + np->mii, phy, mii_status, + np->mii_if.advertising); + } + } + np->mii_cnt = phy_idx; + np->mii_if.phy_id = np->phys[0]; + if (phy_idx == 0) { + dev_warn(&dev->dev, + "MII PHY not found -- this device may not operate correctly\n"); + } + } + + find_cnt++; + return 0; + +err_out_cleardev: + pci_iounmap(pdev, ioaddr); +err_out_free_res: + pci_release_regions(pdev); +err_out_netdev: + free_netdev (dev); + return -ENODEV; +} + + +/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are + often serial bit streams generated by the host processor. + The example below is for the common 93c46 EEPROM, 64 16 bit words. */ + +/* Delay between EEPROM clock transitions. + No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need + a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that + made udelay() unreliable. + The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is + deprecated. +*/ +#define eeprom_delay(ee_addr) ioread32(ee_addr) + +enum EEPROM_Ctrl_Bits { + EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805, + EE_ChipSelect=0x801, EE_DataIn=0x08, +}; + +/* The EEPROM commands include the alway-set leading bit. */ +enum EEPROM_Cmds { + EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6), +}; + +static int eeprom_read(void __iomem *addr, int location) +{ + int i; + int retval = 0; + void __iomem *ee_addr = addr + EECtrl; + int read_cmd = location | EE_ReadCmd; + iowrite32(EE_ChipSelect, ee_addr); + + /* Shift the read command bits out. */ + for (i = 10; i >= 0; i--) { + short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0; + iowrite32(dataval, ee_addr); + eeprom_delay(ee_addr); + iowrite32(dataval | EE_ShiftClk, ee_addr); + eeprom_delay(ee_addr); + } + iowrite32(EE_ChipSelect, ee_addr); + eeprom_delay(ee_addr); + + for (i = 16; i > 0; i--) { + iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr); + eeprom_delay(ee_addr); + retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0); + iowrite32(EE_ChipSelect, ee_addr); + eeprom_delay(ee_addr); + } + + /* Terminate the EEPROM access. */ + iowrite32(0, ee_addr); + return retval; +} + +/* MII transceiver control section. + Read and write the MII registers using software-generated serial + MDIO protocol. See the MII specifications or DP83840A data sheet + for details. + + The maximum data clock rate is 2.5 Mhz. The minimum timing is usually + met by back-to-back 33Mhz PCI cycles. */ +#define mdio_delay(mdio_addr) ioread32(mdio_addr) + +/* Set iff a MII transceiver on any interface requires mdio preamble. + This only set with older transceivers, so the extra + code size of a per-interface flag is not worthwhile. */ +static char mii_preamble_required = 1; + +#define MDIO_WRITE0 (MDIO_EnbOutput) +#define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput) + +/* Generate the preamble required for initial synchronization and + a few older transceivers. */ +static void mdio_sync(void __iomem *mdio_addr) +{ + int bits = 32; + + /* Establish sync by sending at least 32 logic ones. */ + while (--bits >= 0) { + iowrite32(MDIO_WRITE1, mdio_addr); + mdio_delay(mdio_addr); + iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr); + mdio_delay(mdio_addr); + } +} + +static int mdio_read(struct net_device *dev, int phy_id, int location) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *mdio_addr = np->base_addr + MIICtrl; + int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; + int i, retval = 0; + + if (mii_preamble_required) + mdio_sync(mdio_addr); + + /* Shift the read command bits out. */ + for (i = 15; i >= 0; i--) { + int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; + + iowrite32(dataval, mdio_addr); + mdio_delay(mdio_addr); + iowrite32(dataval | MDIO_ShiftClk, mdio_addr); + mdio_delay(mdio_addr); + } + /* Read the two transition, 16 data, and wire-idle bits. */ + for (i = 20; i > 0; i--) { + iowrite32(MDIO_EnbIn, mdio_addr); + mdio_delay(mdio_addr); + retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0); + iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); + mdio_delay(mdio_addr); + } + return (retval>>1) & 0xffff; +} + +static void mdio_write(struct net_device *dev, int phy_id, int location, int value) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *mdio_addr = np->base_addr + MIICtrl; + int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value; + int i; + + if (location == 4 && phy_id == np->phys[0]) + np->mii_if.advertising = value; + + if (mii_preamble_required) + mdio_sync(mdio_addr); + + /* Shift the command bits out. */ + for (i = 31; i >= 0; i--) { + int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; + + iowrite32(dataval, mdio_addr); + mdio_delay(mdio_addr); + iowrite32(dataval | MDIO_ShiftClk, mdio_addr); + mdio_delay(mdio_addr); + } + /* Clear out extra bits. */ + for (i = 2; i > 0; i--) { + iowrite32(MDIO_EnbIn, mdio_addr); + mdio_delay(mdio_addr); + iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); + mdio_delay(mdio_addr); + } +} + + +static int netdev_open(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base_addr; + const int irq = np->pci_dev->irq; + int i; + + iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */ + + netif_device_detach(dev); + i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev); + if (i) + goto out_err; + + if (debug > 1) + netdev_dbg(dev, "w89c840_open() irq %d\n", irq); + + if((i=alloc_ringdesc(dev))) + goto out_err; + + spin_lock_irq(&np->lock); + netif_device_attach(dev); + init_registers(dev); + spin_unlock_irq(&np->lock); + + netif_start_queue(dev); + if (debug > 2) + netdev_dbg(dev, "Done netdev_open()\n"); + + /* Set the timer to check for link beat. */ + init_timer(&np->timer); + np->timer.expires = jiffies + 1*HZ; + np->timer.data = (unsigned long)dev; + np->timer.function = netdev_timer; /* timer handler */ + add_timer(&np->timer); + return 0; +out_err: + netif_device_attach(dev); + return i; +} + +#define MII_DAVICOM_DM9101 0x0181b800 + +static int update_link(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + int duplex, fasteth, result, mii_reg; + + /* BSMR */ + mii_reg = mdio_read(dev, np->phys[0], MII_BMSR); + + if (mii_reg == 0xffff) + return np->csr6; + /* reread: the link status bit is sticky */ + mii_reg = mdio_read(dev, np->phys[0], MII_BMSR); + if (!(mii_reg & 0x4)) { + if (netif_carrier_ok(dev)) { + if (debug) + dev_info(&dev->dev, + "MII #%d reports no link. Disabling watchdog\n", + np->phys[0]); + netif_carrier_off(dev); + } + return np->csr6; + } + if (!netif_carrier_ok(dev)) { + if (debug) + dev_info(&dev->dev, + "MII #%d link is back. Enabling watchdog\n", + np->phys[0]); + netif_carrier_on(dev); + } + + if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) { + /* If the link partner doesn't support autonegotiation + * the MII detects it's abilities with the "parallel detection". + * Some MIIs update the LPA register to the result of the parallel + * detection, some don't. + * The Davicom PHY [at least 0181b800] doesn't. + * Instead bit 9 and 13 of the BMCR are updated to the result + * of the negotiation.. + */ + mii_reg = mdio_read(dev, np->phys[0], MII_BMCR); + duplex = mii_reg & BMCR_FULLDPLX; + fasteth = mii_reg & BMCR_SPEED100; + } else { + int negotiated; + mii_reg = mdio_read(dev, np->phys[0], MII_LPA); + negotiated = mii_reg & np->mii_if.advertising; + + duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL); + fasteth = negotiated & 0x380; + } + duplex |= np->mii_if.force_media; + /* remove fastether and fullduplex */ + result = np->csr6 & ~0x20000200; + if (duplex) + result |= 0x200; + if (fasteth) + result |= 0x20000000; + if (result != np->csr6 && debug) + dev_info(&dev->dev, + "Setting %dMBit-%s-duplex based on MII#%d\n", + fasteth ? 100 : 10, duplex ? "full" : "half", + np->phys[0]); + return result; +} + +#define RXTX_TIMEOUT 2000 +static inline void update_csr6(struct net_device *dev, int new) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base_addr; + int limit = RXTX_TIMEOUT; + + if (!netif_device_present(dev)) + new = 0; + if (new==np->csr6) + return; + /* stop both Tx and Rx processes */ + iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig); + /* wait until they have really stopped */ + for (;;) { + int csr5 = ioread32(ioaddr + IntrStatus); + int t; + + t = (csr5 >> 17) & 0x07; + if (t==0||t==1) { + /* rx stopped */ + t = (csr5 >> 20) & 0x07; + if (t==0||t==1) + break; + } + + limit--; + if(!limit) { + dev_info(&dev->dev, + "couldn't stop rxtx, IntrStatus %xh\n", csr5); + break; + } + udelay(1); + } + np->csr6 = new; + /* and restart them with the new configuration */ + iowrite32(np->csr6, ioaddr + NetworkConfig); + if (new & 0x200) + np->mii_if.full_duplex = 1; +} + +static void netdev_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base_addr; + + if (debug > 2) + netdev_dbg(dev, "Media selection timer tick, status %08x config %08x\n", + ioread32(ioaddr + IntrStatus), + ioread32(ioaddr + NetworkConfig)); + spin_lock_irq(&np->lock); + update_csr6(dev, update_link(dev)); + spin_unlock_irq(&np->lock); + np->timer.expires = jiffies + 10*HZ; + add_timer(&np->timer); +} + +static void init_rxtx_rings(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + int i; + + np->rx_head_desc = &np->rx_ring[0]; + np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE]; + + /* Initial all Rx descriptors. */ + for (i = 0; i < RX_RING_SIZE; i++) { + np->rx_ring[i].length = np->rx_buf_sz; + np->rx_ring[i].status = 0; + np->rx_skbuff[i] = NULL; + } + /* Mark the last entry as wrapping the ring. */ + np->rx_ring[i-1].length |= DescEndRing; + + /* Fill in the Rx buffers. Handle allocation failure gracefully. */ + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz); + np->rx_skbuff[i] = skb; + if (skb == NULL) + break; + np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data, + np->rx_buf_sz,PCI_DMA_FROMDEVICE); + + np->rx_ring[i].buffer1 = np->rx_addr[i]; + np->rx_ring[i].status = DescOwned; + } + + np->cur_rx = 0; + np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); + + /* Initialize the Tx descriptors */ + for (i = 0; i < TX_RING_SIZE; i++) { + np->tx_skbuff[i] = NULL; + np->tx_ring[i].status = 0; + } + np->tx_full = 0; + np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0; + + iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr); + iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE, + np->base_addr + TxRingPtr); + +} + +static void free_rxtx_rings(struct netdev_private* np) +{ + int i; + /* Free all the skbuffs in the Rx queue. */ + for (i = 0; i < RX_RING_SIZE; i++) { + np->rx_ring[i].status = 0; + if (np->rx_skbuff[i]) { + pci_unmap_single(np->pci_dev, + np->rx_addr[i], + np->rx_skbuff[i]->len, + PCI_DMA_FROMDEVICE); + dev_kfree_skb(np->rx_skbuff[i]); + } + np->rx_skbuff[i] = NULL; + } + for (i = 0; i < TX_RING_SIZE; i++) { + if (np->tx_skbuff[i]) { + pci_unmap_single(np->pci_dev, + np->tx_addr[i], + np->tx_skbuff[i]->len, + PCI_DMA_TODEVICE); + dev_kfree_skb(np->tx_skbuff[i]); + } + np->tx_skbuff[i] = NULL; + } +} + +static void init_registers(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base_addr; + int i; + + for (i = 0; i < 6; i++) + iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i); + + /* Initialize other registers. */ +#ifdef __BIG_ENDIAN + i = (1<<20); /* Big-endian descriptors */ +#else + i = 0; +#endif + i |= (0x04<<2); /* skip length 4 u32 */ + i |= 0x02; /* give Rx priority */ + + /* Configure the PCI bus bursts and FIFO thresholds. + 486: Set 8 longword cache alignment, 8 longword burst. + 586: Set 16 longword cache alignment, no burst limit. + Cache alignment bits 15:14 Burst length 13:8 + 0000 0000 align to cache 0800 8 longwords + 4000 8 longwords 0100 1 longword 1000 16 longwords + 8000 16 longwords 0200 2 longwords 2000 32 longwords + C000 32 longwords 0400 4 longwords */ + +#if defined (__i386__) && !defined(MODULE) + /* When not a module we can work around broken '486 PCI boards. */ + if (boot_cpu_data.x86 <= 4) { + i |= 0x4800; + dev_info(&dev->dev, + "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n"); + } else { + i |= 0xE000; + } +#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__) + i |= 0xE000; +#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM) + i |= 0x4800; +#else +#warning Processor architecture undefined + i |= 0x4800; +#endif + iowrite32(i, ioaddr + PCIBusCfg); + + np->csr6 = 0; + /* 128 byte Tx threshold; + Transmit on; Receive on; */ + update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev)); + + /* Clear and Enable interrupts by setting the interrupt mask. */ + iowrite32(0x1A0F5, ioaddr + IntrStatus); + iowrite32(0x1A0F5, ioaddr + IntrEnable); + + iowrite32(0, ioaddr + RxStartDemand); +} + +static void tx_timeout(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base_addr; + const int irq = np->pci_dev->irq; + + dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n", + ioread32(ioaddr + IntrStatus)); + + { + int i; + printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); + for (i = 0; i < RX_RING_SIZE; i++) + printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status); + printk(KERN_CONT "\n"); + printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring); + for (i = 0; i < TX_RING_SIZE; i++) + printk(KERN_CONT " %08x", np->tx_ring[i].status); + printk(KERN_CONT "\n"); + } + printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n", + np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes); + printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C)); + + disable_irq(irq); + spin_lock_irq(&np->lock); + /* + * Under high load dirty_tx and the internal tx descriptor pointer + * come out of sync, thus perform a software reset and reinitialize + * everything. + */ + + iowrite32(1, np->base_addr+PCIBusCfg); + udelay(1); + + free_rxtx_rings(np); + init_rxtx_rings(dev); + init_registers(dev); + spin_unlock_irq(&np->lock); + enable_irq(irq); + + netif_wake_queue(dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + np->stats.tx_errors++; +} + +/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ +static int alloc_ringdesc(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + + np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); + + np->rx_ring = pci_alloc_consistent(np->pci_dev, + sizeof(struct w840_rx_desc)*RX_RING_SIZE + + sizeof(struct w840_tx_desc)*TX_RING_SIZE, + &np->ring_dma_addr); + if(!np->rx_ring) + return -ENOMEM; + init_rxtx_rings(dev); + return 0; +} + +static void free_ringdesc(struct netdev_private *np) +{ + pci_free_consistent(np->pci_dev, + sizeof(struct w840_rx_desc)*RX_RING_SIZE + + sizeof(struct w840_tx_desc)*TX_RING_SIZE, + np->rx_ring, np->ring_dma_addr); + +} + +static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + unsigned entry; + + /* Caution: the write order is important here, set the field + with the "ownership" bits last. */ + + /* Calculate the next Tx descriptor entry. */ + entry = np->cur_tx % TX_RING_SIZE; + + np->tx_addr[entry] = pci_map_single(np->pci_dev, + skb->data,skb->len, PCI_DMA_TODEVICE); + np->tx_skbuff[entry] = skb; + + np->tx_ring[entry].buffer1 = np->tx_addr[entry]; + if (skb->len < TX_BUFLIMIT) { + np->tx_ring[entry].length = DescWholePkt | skb->len; + } else { + int len = skb->len - TX_BUFLIMIT; + + np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT; + np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT; + } + if(entry == TX_RING_SIZE-1) + np->tx_ring[entry].length |= DescEndRing; + + /* Now acquire the irq spinlock. + * The difficult race is the ordering between + * increasing np->cur_tx and setting DescOwned: + * - if np->cur_tx is increased first the interrupt + * handler could consider the packet as transmitted + * since DescOwned is cleared. + * - If DescOwned is set first the NIC could report the + * packet as sent, but the interrupt handler would ignore it + * since the np->cur_tx was not yet increased. + */ + spin_lock_irq(&np->lock); + np->cur_tx++; + + wmb(); /* flush length, buffer1, buffer2 */ + np->tx_ring[entry].status = DescOwned; + wmb(); /* flush status and kick the hardware */ + iowrite32(0, np->base_addr + TxStartDemand); + np->tx_q_bytes += skb->len; + /* Work around horrible bug in the chip by marking the queue as full + when we do not have FIFO room for a maximum sized packet. */ + if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN || + ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) { + netif_stop_queue(dev); + wmb(); + np->tx_full = 1; + } + spin_unlock_irq(&np->lock); + + if (debug > 4) { + netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n", + np->cur_tx, entry); + } + return NETDEV_TX_OK; +} + +static void netdev_tx_done(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { + int entry = np->dirty_tx % TX_RING_SIZE; + int tx_status = np->tx_ring[entry].status; + + if (tx_status < 0) + break; + if (tx_status & 0x8000) { /* There was an error, log it. */ +#ifndef final_version + if (debug > 1) + netdev_dbg(dev, "Transmit error, Tx status %08x\n", + tx_status); +#endif + np->stats.tx_errors++; + if (tx_status & 0x0104) np->stats.tx_aborted_errors++; + if (tx_status & 0x0C80) np->stats.tx_carrier_errors++; + if (tx_status & 0x0200) np->stats.tx_window_errors++; + if (tx_status & 0x0002) np->stats.tx_fifo_errors++; + if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0) + np->stats.tx_heartbeat_errors++; + } else { +#ifndef final_version + if (debug > 3) + netdev_dbg(dev, "Transmit slot %d ok, Tx status %08x\n", + entry, tx_status); +#endif + np->stats.tx_bytes += np->tx_skbuff[entry]->len; + np->stats.collisions += (tx_status >> 3) & 15; + np->stats.tx_packets++; + } + /* Free the original skb. */ + pci_unmap_single(np->pci_dev,np->tx_addr[entry], + np->tx_skbuff[entry]->len, + PCI_DMA_TODEVICE); + np->tx_q_bytes -= np->tx_skbuff[entry]->len; + dev_kfree_skb_irq(np->tx_skbuff[entry]); + np->tx_skbuff[entry] = NULL; + } + if (np->tx_full && + np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART && + np->tx_q_bytes < TX_BUG_FIFO_LIMIT) { + /* The ring is no longer full, clear tbusy. */ + np->tx_full = 0; + wmb(); + netif_wake_queue(dev); + } +} + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ +static irqreturn_t intr_handler(int irq, void *dev_instance) +{ + struct net_device *dev = (struct net_device *)dev_instance; + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base_addr; + int work_limit = max_interrupt_work; + int handled = 0; + + if (!netif_device_present(dev)) + return IRQ_NONE; + do { + u32 intr_status = ioread32(ioaddr + IntrStatus); + + /* Acknowledge all of the current interrupt sources ASAP. */ + iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus); + + if (debug > 4) + netdev_dbg(dev, "Interrupt, status %04x\n", intr_status); + + if ((intr_status & (NormalIntr|AbnormalIntr)) == 0) + break; + + handled = 1; + + if (intr_status & (RxIntr | RxNoBuf)) + netdev_rx(dev); + if (intr_status & RxNoBuf) + iowrite32(0, ioaddr + RxStartDemand); + + if (intr_status & (TxNoBuf | TxIntr) && + np->cur_tx != np->dirty_tx) { + spin_lock(&np->lock); + netdev_tx_done(dev); + spin_unlock(&np->lock); + } + + /* Abnormal error summary/uncommon events handlers. */ + if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SystemError | + TimerInt | TxDied)) + netdev_error(dev, intr_status); + + if (--work_limit < 0) { + dev_warn(&dev->dev, + "Too much work at interrupt, status=0x%04x\n", + intr_status); + /* Set the timer to re-enable the other interrupts after + 10*82usec ticks. */ + spin_lock(&np->lock); + if (netif_device_present(dev)) { + iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable); + iowrite32(10, ioaddr + GPTimer); + } + spin_unlock(&np->lock); + break; + } + } while (1); + + if (debug > 3) + netdev_dbg(dev, "exiting interrupt, status=%#4.4x\n", + ioread32(ioaddr + IntrStatus)); + return IRQ_RETVAL(handled); +} + +/* This routine is logically part of the interrupt handler, but separated + for clarity and better register allocation. */ +static int netdev_rx(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + int entry = np->cur_rx % RX_RING_SIZE; + int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx; + + if (debug > 4) { + netdev_dbg(dev, " In netdev_rx(), entry %d status %04x\n", + entry, np->rx_ring[entry].status); + } + + /* If EOP is set on the next entry, it's a new packet. Send it up. */ + while (--work_limit >= 0) { + struct w840_rx_desc *desc = np->rx_head_desc; + s32 status = desc->status; + + if (debug > 4) + netdev_dbg(dev, " netdev_rx() status was %08x\n", + status); + if (status < 0) + break; + if ((status & 0x38008300) != 0x0300) { + if ((status & 0x38000300) != 0x0300) { + /* Ingore earlier buffers. */ + if ((status & 0xffff) != 0x7fff) { + dev_warn(&dev->dev, + "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n", + np->cur_rx, status); + np->stats.rx_length_errors++; + } + } else if (status & 0x8000) { + /* There was a fatal error. */ + if (debug > 2) + netdev_dbg(dev, "Receive error, Rx status %08x\n", + status); + np->stats.rx_errors++; /* end of a packet.*/ + if (status & 0x0890) np->stats.rx_length_errors++; + if (status & 0x004C) np->stats.rx_frame_errors++; + if (status & 0x0002) np->stats.rx_crc_errors++; + } + } else { + struct sk_buff *skb; + /* Omit the four octet CRC from the length. */ + int pkt_len = ((status >> 16) & 0x7ff) - 4; + +#ifndef final_version + if (debug > 4) + netdev_dbg(dev, " netdev_rx() normal Rx pkt length %d status %x\n", + pkt_len, status); +#endif + /* Check if the packet is long enough to accept without copying + to a minimally-sized skbuff. */ + if (pkt_len < rx_copybreak && + (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { + skb_reserve(skb, 2); /* 16 byte align the IP header */ + pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], + np->rx_skbuff[entry]->len, + PCI_DMA_FROMDEVICE); + skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); + skb_put(skb, pkt_len); + pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry], + np->rx_skbuff[entry]->len, + PCI_DMA_FROMDEVICE); + } else { + pci_unmap_single(np->pci_dev,np->rx_addr[entry], + np->rx_skbuff[entry]->len, + PCI_DMA_FROMDEVICE); + skb_put(skb = np->rx_skbuff[entry], pkt_len); + np->rx_skbuff[entry] = NULL; + } +#ifndef final_version /* Remove after testing. */ + /* You will want this info for the initial debug. */ + if (debug > 5) + netdev_dbg(dev, " Rx data %pM %pM %02x%02x %pI4\n", + &skb->data[0], &skb->data[6], + skb->data[12], skb->data[13], + &skb->data[14]); +#endif + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + np->stats.rx_packets++; + np->stats.rx_bytes += pkt_len; + } + entry = (++np->cur_rx) % RX_RING_SIZE; + np->rx_head_desc = &np->rx_ring[entry]; + } + + /* Refill the Rx ring buffers. */ + for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { + struct sk_buff *skb; + entry = np->dirty_rx % RX_RING_SIZE; + if (np->rx_skbuff[entry] == NULL) { + skb = netdev_alloc_skb(dev, np->rx_buf_sz); + np->rx_skbuff[entry] = skb; + if (skb == NULL) + break; /* Better luck next round. */ + np->rx_addr[entry] = pci_map_single(np->pci_dev, + skb->data, + np->rx_buf_sz, PCI_DMA_FROMDEVICE); + np->rx_ring[entry].buffer1 = np->rx_addr[entry]; + } + wmb(); + np->rx_ring[entry].status = DescOwned; + } + + return 0; +} + +static void netdev_error(struct net_device *dev, int intr_status) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base_addr; + + if (debug > 2) + netdev_dbg(dev, "Abnormal event, %08x\n", intr_status); + if (intr_status == 0xffffffff) + return; + spin_lock(&np->lock); + if (intr_status & TxFIFOUnderflow) { + int new; + /* Bump up the Tx threshold */ +#if 0 + /* This causes lots of dropped packets, + * and under high load even tx_timeouts + */ + new = np->csr6 + 0x4000; +#else + new = (np->csr6 >> 14)&0x7f; + if (new < 64) + new *= 2; + else + new = 127; /* load full packet before starting */ + new = (np->csr6 & ~(0x7F << 14)) | (new<<14); +#endif + netdev_dbg(dev, "Tx underflow, new csr6 %08x\n", new); + update_csr6(dev, new); + } + if (intr_status & RxDied) { /* Missed a Rx frame. */ + np->stats.rx_errors++; + } + if (intr_status & TimerInt) { + /* Re-enable other interrupts. */ + if (netif_device_present(dev)) + iowrite32(0x1A0F5, ioaddr + IntrEnable); + } + np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; + iowrite32(0, ioaddr + RxStartDemand); + spin_unlock(&np->lock); +} + +static struct net_device_stats *get_stats(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base_addr; + + /* The chip only need report frame silently dropped. */ + spin_lock_irq(&np->lock); + if (netif_running(dev) && netif_device_present(dev)) + np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; + spin_unlock_irq(&np->lock); + + return &np->stats; +} + + +static u32 __set_rx_mode(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base_addr; + u32 mc_filter[2]; /* Multicast hash filter */ + u32 rx_mode; + + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + memset(mc_filter, 0xff, sizeof(mc_filter)); + rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys + | AcceptMyPhys; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to match, or accept all multicasts. */ + memset(mc_filter, 0xff, sizeof(mc_filter)); + rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys; + } else { + struct netdev_hw_addr *ha; + + memset(mc_filter, 0, sizeof(mc_filter)); + netdev_for_each_mc_addr(ha, dev) { + int filbit; + + filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F; + filbit &= 0x3f; + mc_filter[filbit >> 5] |= 1 << (filbit & 31); + } + rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys; + } + iowrite32(mc_filter[0], ioaddr + MulticastFilter0); + iowrite32(mc_filter[1], ioaddr + MulticastFilter1); + return rx_mode; +} + +static void set_rx_mode(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + u32 rx_mode = __set_rx_mode(dev); + spin_lock_irq(&np->lock); + update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode); + spin_unlock_irq(&np->lock); +} + +static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct netdev_private *np = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); +} + +static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct netdev_private *np = netdev_priv(dev); + int rc; + + spin_lock_irq(&np->lock); + rc = mii_ethtool_gset(&np->mii_if, cmd); + spin_unlock_irq(&np->lock); + + return rc; +} + +static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct netdev_private *np = netdev_priv(dev); + int rc; + + spin_lock_irq(&np->lock); + rc = mii_ethtool_sset(&np->mii_if, cmd); + spin_unlock_irq(&np->lock); + + return rc; +} + +static int netdev_nway_reset(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + return mii_nway_restart(&np->mii_if); +} + +static u32 netdev_get_link(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + return mii_link_ok(&np->mii_if); +} + +static u32 netdev_get_msglevel(struct net_device *dev) +{ + return debug; +} + +static void netdev_set_msglevel(struct net_device *dev, u32 value) +{ + debug = value; +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, + .get_settings = netdev_get_settings, + .set_settings = netdev_set_settings, + .nway_reset = netdev_nway_reset, + .get_link = netdev_get_link, + .get_msglevel = netdev_get_msglevel, + .set_msglevel = netdev_set_msglevel, +}; + +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct mii_ioctl_data *data = if_mii(rq); + struct netdev_private *np = netdev_priv(dev); + + switch(cmd) { + case SIOCGMIIPHY: /* Get address of MII PHY in use. */ + data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f; + /* Fall Through */ + + case SIOCGMIIREG: /* Read MII PHY register. */ + spin_lock_irq(&np->lock); + data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f); + spin_unlock_irq(&np->lock); + return 0; + + case SIOCSMIIREG: /* Write MII PHY register. */ + spin_lock_irq(&np->lock); + mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); + spin_unlock_irq(&np->lock); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int netdev_close(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base_addr; + + netif_stop_queue(dev); + + if (debug > 1) { + netdev_dbg(dev, "Shutting down ethercard, status was %08x Config %08x\n", + ioread32(ioaddr + IntrStatus), + ioread32(ioaddr + NetworkConfig)); + netdev_dbg(dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n", + np->cur_tx, np->dirty_tx, + np->cur_rx, np->dirty_rx); + } + + /* Stop the chip's Tx and Rx processes. */ + spin_lock_irq(&np->lock); + netif_device_detach(dev); + update_csr6(dev, 0); + iowrite32(0x0000, ioaddr + IntrEnable); + spin_unlock_irq(&np->lock); + + free_irq(np->pci_dev->irq, dev); + wmb(); + netif_device_attach(dev); + + if (ioread32(ioaddr + NetworkConfig) != 0xffffffff) + np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; + +#ifdef __i386__ + if (debug > 2) { + int i; + + printk(KERN_DEBUG" Tx ring at %p:\n", np->tx_ring); + for (i = 0; i < TX_RING_SIZE; i++) + printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n", + i, np->tx_ring[i].length, + np->tx_ring[i].status, np->tx_ring[i].buffer1); + printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring); + for (i = 0; i < RX_RING_SIZE; i++) { + printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n", + i, np->rx_ring[i].length, + np->rx_ring[i].status, np->rx_ring[i].buffer1); + } + } +#endif /* __i386__ debugging only */ + + del_timer_sync(&np->timer); + + free_rxtx_rings(np); + free_ringdesc(np); + + return 0; +} + +static void w840_remove1(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (dev) { + struct netdev_private *np = netdev_priv(dev); + unregister_netdev(dev); + pci_release_regions(pdev); + pci_iounmap(pdev, np->base_addr); + free_netdev(dev); + } +} + +#ifdef CONFIG_PM + +/* + * suspend/resume synchronization: + * - open, close, do_ioctl: + * rtnl_lock, & netif_device_detach after the rtnl_unlock. + * - get_stats: + * spin_lock_irq(np->lock), doesn't touch hw if not present + * - start_xmit: + * synchronize_irq + netif_tx_disable; + * - tx_timeout: + * netif_device_detach + netif_tx_disable; + * - set_multicast_list + * netif_device_detach + netif_tx_disable; + * - interrupt handler + * doesn't touch hw if not present, synchronize_irq waits for + * running instances of the interrupt handler. + * + * Disabling hw requires clearing csr6 & IntrEnable. + * update_csr6 & all function that write IntrEnable check netif_device_present + * before settings any bits. + * + * Detach must occur under spin_unlock_irq(), interrupts from a detached + * device would cause an irq storm. + */ +static int w840_suspend (struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata (pdev); + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base_addr; + + rtnl_lock(); + if (netif_running (dev)) { + del_timer_sync(&np->timer); + + spin_lock_irq(&np->lock); + netif_device_detach(dev); + update_csr6(dev, 0); + iowrite32(0, ioaddr + IntrEnable); + spin_unlock_irq(&np->lock); + + synchronize_irq(np->pci_dev->irq); + netif_tx_disable(dev); + + np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; + + /* no more hardware accesses behind this line. */ + + BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable)); + + /* pci_power_off(pdev, -1); */ + + free_rxtx_rings(np); + } else { + netif_device_detach(dev); + } + rtnl_unlock(); + return 0; +} + +static int w840_resume (struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata (pdev); + struct netdev_private *np = netdev_priv(dev); + int retval = 0; + + rtnl_lock(); + if (netif_device_present(dev)) + goto out; /* device not suspended */ + if (netif_running(dev)) { + if ((retval = pci_enable_device(pdev))) { + dev_err(&dev->dev, + "pci_enable_device failed in resume\n"); + goto out; + } + spin_lock_irq(&np->lock); + iowrite32(1, np->base_addr+PCIBusCfg); + ioread32(np->base_addr+PCIBusCfg); + udelay(1); + netif_device_attach(dev); + init_rxtx_rings(dev); + init_registers(dev); + spin_unlock_irq(&np->lock); + + netif_wake_queue(dev); + + mod_timer(&np->timer, jiffies + 1*HZ); + } else { + netif_device_attach(dev); + } +out: + rtnl_unlock(); + return retval; +} +#endif + +static struct pci_driver w840_driver = { + .name = DRV_NAME, + .id_table = w840_pci_tbl, + .probe = w840_probe1, + .remove = w840_remove1, +#ifdef CONFIG_PM + .suspend = w840_suspend, + .resume = w840_resume, +#endif +}; + +static int __init w840_init(void) +{ + printk(version); + return pci_register_driver(&w840_driver); +} + +static void __exit w840_exit(void) +{ + pci_unregister_driver(&w840_driver); +} + +module_init(w840_init); +module_exit(w840_exit); diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c new file mode 100644 index 000000000..0e721cedf --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c @@ -0,0 +1,1171 @@ +/* + * xircom_cb: A driver for the (tulip-like) Xircom Cardbus ethernet cards + * + * This software is (C) by the respective authors, and licensed under the GPL + * License. + * + * Written by Arjan van de Ven for Red Hat, Inc. + * Based on work by Jeff Garzik, Doug Ledford and Donald Becker + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + * + * $Id: xircom_cb.c,v 1.33 2001/03/19 14:02:07 arjanv Exp $ + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#ifdef CONFIG_NET_POLL_CONTROLLER +#include +#endif + +MODULE_DESCRIPTION("Xircom Cardbus ethernet driver"); +MODULE_AUTHOR("Arjan van de Ven "); +MODULE_LICENSE("GPL"); + +#define xw32(reg, val) iowrite32(val, ioaddr + (reg)) +#define xr32(reg) ioread32(ioaddr + (reg)) +#define xr8(reg) ioread8(ioaddr + (reg)) + +/* IO registers on the card, offsets */ +#define CSR0 0x00 +#define CSR1 0x08 +#define CSR2 0x10 +#define CSR3 0x18 +#define CSR4 0x20 +#define CSR5 0x28 +#define CSR6 0x30 +#define CSR7 0x38 +#define CSR8 0x40 +#define CSR9 0x48 +#define CSR10 0x50 +#define CSR11 0x58 +#define CSR12 0x60 +#define CSR13 0x68 +#define CSR14 0x70 +#define CSR15 0x78 +#define CSR16 0x80 + +/* PCI registers */ +#define PCI_POWERMGMT 0x40 + +/* Offsets of the buffers within the descriptor pages, in bytes */ + +#define NUMDESCRIPTORS 4 + +static int bufferoffsets[NUMDESCRIPTORS] = {128,2048,4096,6144}; + + +struct xircom_private { + /* Send and receive buffers, kernel-addressable and dma addressable forms */ + + __le32 *rx_buffer; + __le32 *tx_buffer; + + dma_addr_t rx_dma_handle; + dma_addr_t tx_dma_handle; + + struct sk_buff *tx_skb[4]; + + void __iomem *ioaddr; + int open; + + /* transmit_used is the rotating counter that indicates which transmit + descriptor has to be used next */ + int transmit_used; + + /* Spinlock to serialize register operations. + It must be helt while manipulating the following registers: + CSR0, CSR6, CSR7, CSR9, CSR10, CSR15 + */ + spinlock_t lock; + + struct pci_dev *pdev; + struct net_device *dev; +}; + + +/* Function prototypes */ +static int xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id); +static void xircom_remove(struct pci_dev *pdev); +static irqreturn_t xircom_interrupt(int irq, void *dev_instance); +static netdev_tx_t xircom_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static int xircom_open(struct net_device *dev); +static int xircom_close(struct net_device *dev); +static void xircom_up(struct xircom_private *card); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void xircom_poll_controller(struct net_device *dev); +#endif + +static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset); +static void investigate_write_descriptor(struct net_device *dev, struct xircom_private *card, int descnr, unsigned int bufferoffset); +static void read_mac_address(struct xircom_private *card); +static void transceiver_voodoo(struct xircom_private *card); +static void initialize_card(struct xircom_private *card); +static void trigger_transmit(struct xircom_private *card); +static void trigger_receive(struct xircom_private *card); +static void setup_descriptors(struct xircom_private *card); +static void remove_descriptors(struct xircom_private *card); +static int link_status_changed(struct xircom_private *card); +static void activate_receiver(struct xircom_private *card); +static void deactivate_receiver(struct xircom_private *card); +static void activate_transmitter(struct xircom_private *card); +static void deactivate_transmitter(struct xircom_private *card); +static void enable_transmit_interrupt(struct xircom_private *card); +static void enable_receive_interrupt(struct xircom_private *card); +static void enable_link_interrupt(struct xircom_private *card); +static void disable_all_interrupts(struct xircom_private *card); +static int link_status(struct xircom_private *card); + + + +static const struct pci_device_id xircom_pci_table[] = { + { PCI_VDEVICE(XIRCOM, 0x0003), }, + {0,}, +}; +MODULE_DEVICE_TABLE(pci, xircom_pci_table); + +static struct pci_driver xircom_ops = { + .name = "xircom_cb", + .id_table = xircom_pci_table, + .probe = xircom_probe, + .remove = xircom_remove, +}; + + +#if defined DEBUG && DEBUG > 1 +static void print_binary(unsigned int number) +{ + int i,i2; + char buffer[64]; + memset(buffer,0,64); + i2=0; + for (i=31;i>=0;i--) { + if (number & (1<dev; + struct net_device *dev = NULL; + struct xircom_private *private; + unsigned long flags; + unsigned short tmp16; + int rc; + + /* First do the PCI initialisation */ + + rc = pci_enable_device(pdev); + if (rc < 0) + goto out; + + /* disable all powermanagement */ + pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000); + + pci_set_master(pdev); /* Why isn't this done by pci_enable_device ?*/ + + /* clear PCI status, if any */ + pci_read_config_word (pdev,PCI_STATUS, &tmp16); + pci_write_config_word (pdev, PCI_STATUS,tmp16); + + rc = pci_request_regions(pdev, "xircom_cb"); + if (rc < 0) { + pr_err("%s: failed to allocate io-region\n", __func__); + goto err_disable; + } + + rc = -ENOMEM; + /* + Before changing the hardware, allocate the memory. + This way, we can fail gracefully if not enough memory + is available. + */ + dev = alloc_etherdev(sizeof(struct xircom_private)); + if (!dev) + goto err_release; + + private = netdev_priv(dev); + + /* Allocate the send/receive buffers */ + private->rx_buffer = dma_alloc_coherent(d, 8192, + &private->rx_dma_handle, + GFP_KERNEL); + if (private->rx_buffer == NULL) + goto rx_buf_fail; + + private->tx_buffer = dma_alloc_coherent(d, 8192, + &private->tx_dma_handle, + GFP_KERNEL); + if (private->tx_buffer == NULL) + goto tx_buf_fail; + + SET_NETDEV_DEV(dev, &pdev->dev); + + + private->dev = dev; + private->pdev = pdev; + + /* IO range. */ + private->ioaddr = pci_iomap(pdev, 0, 0); + if (!private->ioaddr) + goto reg_fail; + + spin_lock_init(&private->lock); + + initialize_card(private); + read_mac_address(private); + setup_descriptors(private); + + dev->netdev_ops = &netdev_ops; + pci_set_drvdata(pdev, dev); + + rc = register_netdev(dev); + if (rc < 0) { + pr_err("%s: netdevice registration failed\n", __func__); + goto err_unmap; + } + + netdev_info(dev, "Xircom cardbus revision %i at irq %i\n", + pdev->revision, pdev->irq); + /* start the transmitter to get a heartbeat */ + /* TODO: send 2 dummy packets here */ + transceiver_voodoo(private); + + spin_lock_irqsave(&private->lock,flags); + activate_transmitter(private); + activate_receiver(private); + spin_unlock_irqrestore(&private->lock,flags); + + trigger_receive(private); +out: + return rc; + +err_unmap: + pci_iounmap(pdev, private->ioaddr); +reg_fail: + dma_free_coherent(d, 8192, private->tx_buffer, private->tx_dma_handle); +tx_buf_fail: + dma_free_coherent(d, 8192, private->rx_buffer, private->rx_dma_handle); +rx_buf_fail: + free_netdev(dev); +err_release: + pci_release_regions(pdev); +err_disable: + pci_disable_device(pdev); + goto out; +} + + +/* + xircom_remove is called on module-unload or on device-eject. + it unregisters the irq, io-region and network device. + Interrupts and such are already stopped in the "ifconfig ethX down" + code. + */ +static void xircom_remove(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct xircom_private *card = netdev_priv(dev); + struct device *d = &pdev->dev; + + unregister_netdev(dev); + pci_iounmap(pdev, card->ioaddr); + dma_free_coherent(d, 8192, card->tx_buffer, card->tx_dma_handle); + dma_free_coherent(d, 8192, card->rx_buffer, card->rx_dma_handle); + free_netdev(dev); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static irqreturn_t xircom_interrupt(int irq, void *dev_instance) +{ + struct net_device *dev = (struct net_device *) dev_instance; + struct xircom_private *card = netdev_priv(dev); + void __iomem *ioaddr = card->ioaddr; + unsigned int status; + int i; + + spin_lock(&card->lock); + status = xr32(CSR5); + +#if defined DEBUG && DEBUG > 1 + print_binary(status); + pr_debug("tx status 0x%08x 0x%08x\n", + card->tx_buffer[0], card->tx_buffer[4]); + pr_debug("rx status 0x%08x 0x%08x\n", + card->rx_buffer[0], card->rx_buffer[4]); +#endif + /* Handle shared irq and hotplug */ + if (status == 0 || status == 0xffffffff) { + spin_unlock(&card->lock); + return IRQ_NONE; + } + + if (link_status_changed(card)) { + int newlink; + netdev_dbg(dev, "Link status has changed\n"); + newlink = link_status(card); + netdev_info(dev, "Link is %d mbit\n", newlink); + if (newlink) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + + } + + /* Clear all remaining interrupts */ + status |= 0xffffffff; /* FIXME: make this clear only the + real existing bits */ + xw32(CSR5, status); + + + for (i=0;ilock); + return IRQ_HANDLED; +} + +static netdev_tx_t xircom_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct xircom_private *card; + unsigned long flags; + int nextdescriptor; + int desc; + + card = netdev_priv(dev); + spin_lock_irqsave(&card->lock,flags); + + /* First see if we can free some descriptors */ + for (desc=0;desctransmit_used +1) % (NUMDESCRIPTORS); + desc = card->transmit_used; + + /* only send the packet if the descriptor is free */ + if (card->tx_buffer[4*desc]==0) { + /* Copy the packet data; zero the memory first as the card + sometimes sends more than you ask it to. */ + + memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536); + skb_copy_from_linear_data(skb, + &(card->tx_buffer[bufferoffsets[desc] / 4]), + skb->len); + /* FIXME: The specification tells us that the length we send HAS to be a multiple of + 4 bytes. */ + + card->tx_buffer[4*desc+1] = cpu_to_le32(skb->len); + if (desc == NUMDESCRIPTORS - 1) /* bit 25: last descriptor of the ring */ + card->tx_buffer[4*desc+1] |= cpu_to_le32(1<<25); + + card->tx_buffer[4*desc+1] |= cpu_to_le32(0xF0000000); + /* 0xF0... means want interrupts*/ + card->tx_skb[desc] = skb; + + wmb(); + /* This gives the descriptor to the card */ + card->tx_buffer[4*desc] = cpu_to_le32(0x80000000); + trigger_transmit(card); + if (card->tx_buffer[nextdescriptor*4] & cpu_to_le32(0x8000000)) { + /* next descriptor is occupied... */ + netif_stop_queue(dev); + } + card->transmit_used = nextdescriptor; + spin_unlock_irqrestore(&card->lock,flags); + return NETDEV_TX_OK; + } + + /* Uh oh... no free descriptor... drop the packet */ + netif_stop_queue(dev); + spin_unlock_irqrestore(&card->lock,flags); + trigger_transmit(card); + + return NETDEV_TX_BUSY; +} + + + + +static int xircom_open(struct net_device *dev) +{ + struct xircom_private *xp = netdev_priv(dev); + const int irq = xp->pdev->irq; + int retval; + + netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n", irq); + retval = request_irq(irq, xircom_interrupt, IRQF_SHARED, dev->name, dev); + if (retval) + return retval; + + xircom_up(xp); + xp->open = 1; + + return 0; +} + +static int xircom_close(struct net_device *dev) +{ + struct xircom_private *card; + unsigned long flags; + + card = netdev_priv(dev); + netif_stop_queue(dev); /* we don't want new packets */ + + + spin_lock_irqsave(&card->lock,flags); + + disable_all_interrupts(card); +#if 0 + /* We can enable this again once we send dummy packets on ifconfig ethX up */ + deactivate_receiver(card); + deactivate_transmitter(card); +#endif + remove_descriptors(card); + + spin_unlock_irqrestore(&card->lock,flags); + + card->open = 0; + free_irq(card->pdev->irq, dev); + + return 0; + +} + + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void xircom_poll_controller(struct net_device *dev) +{ + struct xircom_private *xp = netdev_priv(dev); + const int irq = xp->pdev->irq; + + disable_irq(irq); + xircom_interrupt(irq, dev); + enable_irq(irq); +} +#endif + + +static void initialize_card(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + unsigned long flags; + u32 val; + + spin_lock_irqsave(&card->lock, flags); + + /* First: reset the card */ + val = xr32(CSR0); + val |= 0x01; /* Software reset */ + xw32(CSR0, val); + + udelay(100); /* give the card some time to reset */ + + val = xr32(CSR0); + val &= ~0x01; /* disable Software reset */ + xw32(CSR0, val); + + + val = 0; /* Value 0x00 is a safe and conservative value + for the PCI configuration settings */ + xw32(CSR0, val); + + + disable_all_interrupts(card); + deactivate_receiver(card); + deactivate_transmitter(card); + + spin_unlock_irqrestore(&card->lock, flags); +} + +/* +trigger_transmit causes the card to check for frames to be transmitted. +This is accomplished by writing to the CSR1 port. The documentation +claims that the act of writing is sufficient and that the value is +ignored; I chose zero. +*/ +static void trigger_transmit(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + + xw32(CSR1, 0); +} + +/* +trigger_receive causes the card to check for empty frames in the +descriptor list in which packets can be received. +This is accomplished by writing to the CSR2 port. The documentation +claims that the act of writing is sufficient and that the value is +ignored; I chose zero. +*/ +static void trigger_receive(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + + xw32(CSR2, 0); +} + +/* +setup_descriptors initializes the send and receive buffers to be valid +descriptors and programs the addresses into the card. +*/ +static void setup_descriptors(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + u32 address; + int i; + + BUG_ON(card->rx_buffer == NULL); + BUG_ON(card->tx_buffer == NULL); + + /* Receive descriptors */ + memset(card->rx_buffer, 0, 128); /* clear the descriptors */ + for (i=0;i 0x80000000 */ + card->rx_buffer[i*4 + 0] = cpu_to_le32(0x80000000); + /* Rx Descr1: buffer 1 is 1536 bytes, buffer 2 is 0 bytes */ + card->rx_buffer[i*4 + 1] = cpu_to_le32(1536); + if (i == NUMDESCRIPTORS - 1) /* bit 25 is "last descriptor" */ + card->rx_buffer[i*4 + 1] |= cpu_to_le32(1 << 25); + + /* Rx Descr2: address of the buffer + we store the buffer at the 2nd half of the page */ + + address = card->rx_dma_handle; + card->rx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]); + /* Rx Desc3: address of 2nd buffer -> 0 */ + card->rx_buffer[i*4 + 3] = 0; + } + + wmb(); + /* Write the receive descriptor ring address to the card */ + address = card->rx_dma_handle; + xw32(CSR3, address); /* Receive descr list address */ + + + /* transmit descriptors */ + memset(card->tx_buffer, 0, 128); /* clear the descriptors */ + + for (i=0;i 0x00000000 */ + card->tx_buffer[i*4 + 0] = 0x00000000; + /* Tx Descr1: buffer 1 is 1536 bytes, buffer 2 is 0 bytes */ + card->tx_buffer[i*4 + 1] = cpu_to_le32(1536); + if (i == NUMDESCRIPTORS - 1) /* bit 25 is "last descriptor" */ + card->tx_buffer[i*4 + 1] |= cpu_to_le32(1 << 25); + + /* Tx Descr2: address of the buffer + we store the buffer at the 2nd half of the page */ + address = card->tx_dma_handle; + card->tx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]); + /* Tx Desc3: address of 2nd buffer -> 0 */ + card->tx_buffer[i*4 + 3] = 0; + } + + wmb(); + /* wite the transmit descriptor ring to the card */ + address = card->tx_dma_handle; + xw32(CSR4, address); /* xmit descr list address */ +} + +/* +remove_descriptors informs the card the descriptors are no longer +valid by setting the address in the card to 0x00. +*/ +static void remove_descriptors(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + unsigned int val; + + val = 0; + xw32(CSR3, val); /* Receive descriptor address */ + xw32(CSR4, val); /* Send descriptor address */ +} + +/* +link_status_changed returns 1 if the card has indicated that +the link status has changed. The new link status has to be read from CSR12. + +This function also clears the status-bit. +*/ +static int link_status_changed(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + unsigned int val; + + val = xr32(CSR5); /* Status register */ + if (!(val & (1 << 27))) /* no change */ + return 0; + + /* clear the event by writing a 1 to the bit in the + status register. */ + val = (1 << 27); + xw32(CSR5, val); + + return 1; +} + + +/* +transmit_active returns 1 if the transmitter on the card is +in a non-stopped state. +*/ +static int transmit_active(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + + if (!(xr32(CSR5) & (7 << 20))) /* transmitter disabled */ + return 0; + + return 1; +} + +/* +receive_active returns 1 if the receiver on the card is +in a non-stopped state. +*/ +static int receive_active(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + + if (!(xr32(CSR5) & (7 << 17))) /* receiver disabled */ + return 0; + + return 1; +} + +/* +activate_receiver enables the receiver on the card. +Before being allowed to active the receiver, the receiver +must be completely de-activated. To achieve this, +this code actually disables the receiver first; then it waits for the +receiver to become inactive, then it activates the receiver and then +it waits for the receiver to be active. + +must be called with the lock held and interrupts disabled. +*/ +static void activate_receiver(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + unsigned int val; + int counter; + + val = xr32(CSR6); /* Operation mode */ + + /* If the "active" bit is set and the receiver is already + active, no need to do the expensive thing */ + if ((val&2) && (receive_active(card))) + return; + + + val = val & ~2; /* disable the receiver */ + xw32(CSR6, val); + + counter = 10; + while (counter > 0) { + if (!receive_active(card)) + break; + /* wait a while */ + udelay(50); + counter--; + if (counter <= 0) + netdev_err(card->dev, "Receiver failed to deactivate\n"); + } + + /* enable the receiver */ + val = xr32(CSR6); /* Operation mode */ + val = val | 2; /* enable the receiver */ + xw32(CSR6, val); + + /* now wait for the card to activate again */ + counter = 10; + while (counter > 0) { + if (receive_active(card)) + break; + /* wait a while */ + udelay(50); + counter--; + if (counter <= 0) + netdev_err(card->dev, + "Receiver failed to re-activate\n"); + } +} + +/* +deactivate_receiver disables the receiver on the card. +To achieve this this code disables the receiver first; +then it waits for the receiver to become inactive. + +must be called with the lock held and interrupts disabled. +*/ +static void deactivate_receiver(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + unsigned int val; + int counter; + + val = xr32(CSR6); /* Operation mode */ + val = val & ~2; /* disable the receiver */ + xw32(CSR6, val); + + counter = 10; + while (counter > 0) { + if (!receive_active(card)) + break; + /* wait a while */ + udelay(50); + counter--; + if (counter <= 0) + netdev_err(card->dev, "Receiver failed to deactivate\n"); + } +} + + +/* +activate_transmitter enables the transmitter on the card. +Before being allowed to active the transmitter, the transmitter +must be completely de-activated. To achieve this, +this code actually disables the transmitter first; then it waits for the +transmitter to become inactive, then it activates the transmitter and then +it waits for the transmitter to be active again. + +must be called with the lock held and interrupts disabled. +*/ +static void activate_transmitter(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + unsigned int val; + int counter; + + val = xr32(CSR6); /* Operation mode */ + + /* If the "active" bit is set and the receiver is already + active, no need to do the expensive thing */ + if ((val&(1<<13)) && (transmit_active(card))) + return; + + val = val & ~(1 << 13); /* disable the transmitter */ + xw32(CSR6, val); + + counter = 10; + while (counter > 0) { + if (!transmit_active(card)) + break; + /* wait a while */ + udelay(50); + counter--; + if (counter <= 0) + netdev_err(card->dev, + "Transmitter failed to deactivate\n"); + } + + /* enable the transmitter */ + val = xr32(CSR6); /* Operation mode */ + val = val | (1 << 13); /* enable the transmitter */ + xw32(CSR6, val); + + /* now wait for the card to activate again */ + counter = 10; + while (counter > 0) { + if (transmit_active(card)) + break; + /* wait a while */ + udelay(50); + counter--; + if (counter <= 0) + netdev_err(card->dev, + "Transmitter failed to re-activate\n"); + } +} + +/* +deactivate_transmitter disables the transmitter on the card. +To achieve this this code disables the transmitter first; +then it waits for the transmitter to become inactive. + +must be called with the lock held and interrupts disabled. +*/ +static void deactivate_transmitter(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + unsigned int val; + int counter; + + val = xr32(CSR6); /* Operation mode */ + val = val & ~2; /* disable the transmitter */ + xw32(CSR6, val); + + counter = 20; + while (counter > 0) { + if (!transmit_active(card)) + break; + /* wait a while */ + udelay(50); + counter--; + if (counter <= 0) + netdev_err(card->dev, + "Transmitter failed to deactivate\n"); + } +} + + +/* +enable_transmit_interrupt enables the transmit interrupt + +must be called with the lock held and interrupts disabled. +*/ +static void enable_transmit_interrupt(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + unsigned int val; + + val = xr32(CSR7); /* Interrupt enable register */ + val |= 1; /* enable the transmit interrupt */ + xw32(CSR7, val); +} + + +/* +enable_receive_interrupt enables the receive interrupt + +must be called with the lock held and interrupts disabled. +*/ +static void enable_receive_interrupt(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + unsigned int val; + + val = xr32(CSR7); /* Interrupt enable register */ + val = val | (1 << 6); /* enable the receive interrupt */ + xw32(CSR7, val); +} + +/* +enable_link_interrupt enables the link status change interrupt + +must be called with the lock held and interrupts disabled. +*/ +static void enable_link_interrupt(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + unsigned int val; + + val = xr32(CSR7); /* Interrupt enable register */ + val = val | (1 << 27); /* enable the link status chage interrupt */ + xw32(CSR7, val); +} + + + +/* +disable_all_interrupts disables all interrupts + +must be called with the lock held and interrupts disabled. +*/ +static void disable_all_interrupts(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + + xw32(CSR7, 0); +} + +/* +enable_common_interrupts enables several weird interrupts + +must be called with the lock held and interrupts disabled. +*/ +static void enable_common_interrupts(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + unsigned int val; + + val = xr32(CSR7); /* Interrupt enable register */ + val |= (1<<16); /* Normal Interrupt Summary */ + val |= (1<<15); /* Abnormal Interrupt Summary */ + val |= (1<<13); /* Fatal bus error */ + val |= (1<<8); /* Receive Process Stopped */ + val |= (1<<7); /* Receive Buffer Unavailable */ + val |= (1<<5); /* Transmit Underflow */ + val |= (1<<2); /* Transmit Buffer Unavailable */ + val |= (1<<1); /* Transmit Process Stopped */ + xw32(CSR7, val); +} + +/* +enable_promisc starts promisc mode + +must be called with the lock held and interrupts disabled. +*/ +static int enable_promisc(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + unsigned int val; + + val = xr32(CSR6); + val = val | (1 << 6); + xw32(CSR6, val); + + return 1; +} + + + + +/* +link_status() checks the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what. + +Must be called in locked state with interrupts disabled +*/ +static int link_status(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + u8 val; + + val = xr8(CSR12); + + /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */ + if (!(val & (1 << 2))) + return 10; + /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */ + if (!(val & (1 << 1))) + return 100; + + /* If we get here -> no link at all */ + + return 0; +} + + + + + +/* + read_mac_address() reads the MAC address from the NIC and stores it in the "dev" structure. + + This function will take the spinlock itself and can, as a result, not be called with the lock helt. + */ +static void read_mac_address(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + unsigned long flags; + u8 link; + int i; + + spin_lock_irqsave(&card->lock, flags); + + xw32(CSR9, 1 << 12); /* enable boot rom access */ + for (i = 0x100; i < 0x1f7; i += link + 2) { + u8 tuple, data_id, data_count; + + xw32(CSR10, i); + tuple = xr32(CSR9); + xw32(CSR10, i + 1); + link = xr32(CSR9); + xw32(CSR10, i + 2); + data_id = xr32(CSR9); + xw32(CSR10, i + 3); + data_count = xr32(CSR9); + if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) { + int j; + + for (j = 0; j < 6; j++) { + xw32(CSR10, i + j + 4); + card->dev->dev_addr[j] = xr32(CSR9) & 0xff; + } + break; + } else if (link == 0) { + break; + } + } + spin_unlock_irqrestore(&card->lock, flags); + pr_debug(" %pM\n", card->dev->dev_addr); +} + + +/* + transceiver_voodoo() enables the external UTP plug thingy. + it's called voodoo as I stole this code and cannot cross-reference + it with the specification. + */ +static void transceiver_voodoo(struct xircom_private *card) +{ + void __iomem *ioaddr = card->ioaddr; + unsigned long flags; + + /* disable all powermanagement */ + pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000); + + setup_descriptors(card); + + spin_lock_irqsave(&card->lock, flags); + + xw32(CSR15, 0x0008); + udelay(25); + xw32(CSR15, 0xa8050000); + udelay(25); + xw32(CSR15, 0xa00f0000); + udelay(25); + + spin_unlock_irqrestore(&card->lock, flags); + + netif_start_queue(card->dev); +} + + +static void xircom_up(struct xircom_private *card) +{ + unsigned long flags; + int i; + + /* disable all powermanagement */ + pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000); + + setup_descriptors(card); + + spin_lock_irqsave(&card->lock, flags); + + + enable_link_interrupt(card); + enable_transmit_interrupt(card); + enable_receive_interrupt(card); + enable_common_interrupts(card); + enable_promisc(card); + + /* The card can have received packets already, read them away now */ + for (i=0;idev,card,i,bufferoffsets[i]); + + + spin_unlock_irqrestore(&card->lock, flags); + trigger_receive(card); + trigger_transmit(card); + netif_start_queue(card->dev); +} + +/* Bufferoffset is in BYTES */ +static void +investigate_read_descriptor(struct net_device *dev, struct xircom_private *card, + int descnr, unsigned int bufferoffset) +{ + int status; + + status = le32_to_cpu(card->rx_buffer[4*descnr]); + + if (status > 0) { /* packet received */ + + /* TODO: discard error packets */ + + short pkt_len = ((status >> 16) & 0x7ff) - 4; + /* minus 4, we don't want the CRC */ + struct sk_buff *skb; + + if (pkt_len > 1518) { + netdev_err(dev, "Packet length %i is bogus\n", pkt_len); + pkt_len = 1518; + } + + skb = netdev_alloc_skb(dev, pkt_len + 2); + if (skb == NULL) { + dev->stats.rx_dropped++; + goto out; + } + skb_reserve(skb, 2); + skb_copy_to_linear_data(skb, + &card->rx_buffer[bufferoffset / 4], + pkt_len); + skb_put(skb, pkt_len); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + +out: + /* give the buffer back to the card */ + card->rx_buffer[4*descnr] = cpu_to_le32(0x80000000); + trigger_receive(card); + } +} + + +/* Bufferoffset is in BYTES */ +static void +investigate_write_descriptor(struct net_device *dev, + struct xircom_private *card, + int descnr, unsigned int bufferoffset) +{ + int status; + + status = le32_to_cpu(card->tx_buffer[4*descnr]); +#if 0 + if (status & 0x8000) { /* Major error */ + pr_err("Major transmit error status %x\n", status); + card->tx_buffer[4*descnr] = 0; + netif_wake_queue (dev); + } +#endif + if (status > 0) { /* bit 31 is 0 when done */ + if (card->tx_skb[descnr]!=NULL) { + dev->stats.tx_bytes += card->tx_skb[descnr]->len; + dev_kfree_skb_irq(card->tx_skb[descnr]); + } + card->tx_skb[descnr] = NULL; + /* Bit 8 in the status field is 1 if there was a collision */ + if (status & (1 << 8)) + dev->stats.collisions++; + card->tx_buffer[4*descnr] = 0; /* descriptor is free again */ + netif_wake_queue (dev); + dev->stats.tx_packets++; + } +} + +module_pci_driver(xircom_ops); diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig new file mode 100644 index 000000000..c543ac11c --- /dev/null +++ b/drivers/net/ethernet/dlink/Kconfig @@ -0,0 +1,55 @@ +# +# D-Link device configuration +# + +config NET_VENDOR_DLINK + bool "D-Link devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about D-Link devices. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_DLINK + +config DL2K + tristate "DL2000/TC902x-based Gigabit Ethernet support" + depends on PCI + select CRC32 + ---help--- + This driver supports DL2000/TC902x-based Gigabit ethernet cards, + which includes + D-Link DGE-550T Gigabit Ethernet Adapter. + D-Link DL2000-based Gigabit Ethernet Adapter. + Sundance/Tamarack TC902x Gigabit Ethernet Adapter. + + To compile this driver as a module, choose M here: the + module will be called dl2k. + +config SUNDANCE + tristate "Sundance Alta support" + depends on PCI + select CRC32 + select MII + ---help--- + This driver is for the Sundance "Alta" chip. + More specific information and updates are available from + . + +config SUNDANCE_MMIO + bool "Use MMIO instead of PIO" + depends on SUNDANCE + ---help--- + Enable memory-mapped I/O for interaction with Sundance NIC registers. + Do NOT enable this by default, PIO (enabled when MMIO is disabled) + is known to solve bugs on certain chips. + + If unsure, say N. + +endif # NET_VENDOR_DLINK diff --git a/drivers/net/ethernet/dlink/Makefile b/drivers/net/ethernet/dlink/Makefile new file mode 100644 index 000000000..40085f671 --- /dev/null +++ b/drivers/net/ethernet/dlink/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the D-Link network device drivers. +# + +obj-$(CONFIG_DL2K) += dl2k.o +obj-$(CONFIG_SUNDANCE) += sundance.o diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c new file mode 100644 index 000000000..1274b6fda --- /dev/null +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -0,0 +1,1768 @@ +/* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */ +/* + Copyright (c) 2001, 2002 by D-Link Corporation + Written by Edward Peng. + Created 03-May-2001, base on Linux' sundance.c. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. +*/ + +#define DRV_NAME "DL2000/TC902x-based linux driver" +#define DRV_VERSION "v1.19" +#define DRV_RELDATE "2007/08/12" +#include "dl2k.h" +#include + +#define dw32(reg, val) iowrite32(val, ioaddr + (reg)) +#define dw16(reg, val) iowrite16(val, ioaddr + (reg)) +#define dw8(reg, val) iowrite8(val, ioaddr + (reg)) +#define dr32(reg) ioread32(ioaddr + (reg)) +#define dr16(reg) ioread16(ioaddr + (reg)) +#define dr8(reg) ioread8(ioaddr + (reg)) + +static char version[] = + KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; +#define MAX_UNITS 8 +static int mtu[MAX_UNITS]; +static int vlan[MAX_UNITS]; +static int jumbo[MAX_UNITS]; +static char *media[MAX_UNITS]; +static int tx_flow=-1; +static int rx_flow=-1; +static int copy_thresh; +static int rx_coalesce=10; /* Rx frame count each interrupt */ +static int rx_timeout=200; /* Rx DMA wait time in 640ns increments */ +static int tx_coalesce=16; /* HW xmit count each TxDMAComplete */ + + +MODULE_AUTHOR ("Edward Peng"); +MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter"); +MODULE_LICENSE("GPL"); +module_param_array(mtu, int, NULL, 0); +module_param_array(media, charp, NULL, 0); +module_param_array(vlan, int, NULL, 0); +module_param_array(jumbo, int, NULL, 0); +module_param(tx_flow, int, 0); +module_param(rx_flow, int, 0); +module_param(copy_thresh, int, 0); +module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */ +module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */ +module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */ + + +/* Enable the default interrupts */ +#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \ + UpdateStats | LinkEvent) + +static void dl2k_enable_int(struct netdev_private *np) +{ + void __iomem *ioaddr = np->ioaddr; + + dw16(IntEnable, DEFAULT_INTR); +} + +static const int max_intrloop = 50; +static const int multicast_filter_limit = 0x40; + +static int rio_open (struct net_device *dev); +static void rio_timer (unsigned long data); +static void rio_tx_timeout (struct net_device *dev); +static void alloc_list (struct net_device *dev); +static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev); +static irqreturn_t rio_interrupt (int irq, void *dev_instance); +static void rio_free_tx (struct net_device *dev, int irq); +static void tx_error (struct net_device *dev, int tx_status); +static int receive_packet (struct net_device *dev); +static void rio_error (struct net_device *dev, int int_status); +static int change_mtu (struct net_device *dev, int new_mtu); +static void set_multicast (struct net_device *dev); +static struct net_device_stats *get_stats (struct net_device *dev); +static int clear_stats (struct net_device *dev); +static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); +static int rio_close (struct net_device *dev); +static int find_miiphy (struct net_device *dev); +static int parse_eeprom (struct net_device *dev); +static int read_eeprom (struct netdev_private *, int eep_addr); +static int mii_wait_link (struct net_device *dev, int wait); +static int mii_set_media (struct net_device *dev); +static int mii_get_media (struct net_device *dev); +static int mii_set_media_pcs (struct net_device *dev); +static int mii_get_media_pcs (struct net_device *dev); +static int mii_read (struct net_device *dev, int phy_addr, int reg_num); +static int mii_write (struct net_device *dev, int phy_addr, int reg_num, + u16 data); + +static const struct ethtool_ops ethtool_ops; + +static const struct net_device_ops netdev_ops = { + .ndo_open = rio_open, + .ndo_start_xmit = start_xmit, + .ndo_stop = rio_close, + .ndo_get_stats = get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_set_rx_mode = set_multicast, + .ndo_do_ioctl = rio_ioctl, + .ndo_tx_timeout = rio_tx_timeout, + .ndo_change_mtu = change_mtu, +}; + +static int +rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *dev; + struct netdev_private *np; + static int card_idx; + int chip_idx = ent->driver_data; + int err, irq; + void __iomem *ioaddr; + static int version_printed; + void *ring_space; + dma_addr_t ring_dma; + + if (!version_printed++) + printk ("%s", version); + + err = pci_enable_device (pdev); + if (err) + return err; + + irq = pdev->irq; + err = pci_request_regions (pdev, "dl2k"); + if (err) + goto err_out_disable; + + pci_set_master (pdev); + + err = -ENOMEM; + + dev = alloc_etherdev (sizeof (*np)); + if (!dev) + goto err_out_res; + SET_NETDEV_DEV(dev, &pdev->dev); + + np = netdev_priv(dev); + + /* IO registers range. */ + ioaddr = pci_iomap(pdev, 0, 0); + if (!ioaddr) + goto err_out_dev; + np->eeprom_addr = ioaddr; + +#ifdef MEM_MAPPING + /* MM registers range. */ + ioaddr = pci_iomap(pdev, 1, 0); + if (!ioaddr) + goto err_out_iounmap; +#endif + np->ioaddr = ioaddr; + np->chip_id = chip_idx; + np->pdev = pdev; + spin_lock_init (&np->tx_lock); + spin_lock_init (&np->rx_lock); + + /* Parse manual configuration */ + np->an_enable = 1; + np->tx_coalesce = 1; + if (card_idx < MAX_UNITS) { + if (media[card_idx] != NULL) { + np->an_enable = 0; + if (strcmp (media[card_idx], "auto") == 0 || + strcmp (media[card_idx], "autosense") == 0 || + strcmp (media[card_idx], "0") == 0 ) { + np->an_enable = 2; + } else if (strcmp (media[card_idx], "100mbps_fd") == 0 || + strcmp (media[card_idx], "4") == 0) { + np->speed = 100; + np->full_duplex = 1; + } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || + strcmp (media[card_idx], "3") == 0) { + np->speed = 100; + np->full_duplex = 0; + } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || + strcmp (media[card_idx], "2") == 0) { + np->speed = 10; + np->full_duplex = 1; + } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || + strcmp (media[card_idx], "1") == 0) { + np->speed = 10; + np->full_duplex = 0; + } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 || + strcmp (media[card_idx], "6") == 0) { + np->speed=1000; + np->full_duplex=1; + } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 || + strcmp (media[card_idx], "5") == 0) { + np->speed = 1000; + np->full_duplex = 0; + } else { + np->an_enable = 1; + } + } + if (jumbo[card_idx] != 0) { + np->jumbo = 1; + dev->mtu = MAX_JUMBO; + } else { + np->jumbo = 0; + if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE) + dev->mtu = mtu[card_idx]; + } + np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ? + vlan[card_idx] : 0; + if (rx_coalesce > 0 && rx_timeout > 0) { + np->rx_coalesce = rx_coalesce; + np->rx_timeout = rx_timeout; + np->coalesce = 1; + } + np->tx_flow = (tx_flow == 0) ? 0 : 1; + np->rx_flow = (rx_flow == 0) ? 0 : 1; + + if (tx_coalesce < 1) + tx_coalesce = 1; + else if (tx_coalesce > TX_RING_SIZE-1) + tx_coalesce = TX_RING_SIZE - 1; + } + dev->netdev_ops = &netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + dev->ethtool_ops = ðtool_ops; +#if 0 + dev->features = NETIF_F_IP_CSUM; +#endif + pci_set_drvdata (pdev, dev); + + ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma); + if (!ring_space) + goto err_out_iounmap; + np->tx_ring = ring_space; + np->tx_ring_dma = ring_dma; + + ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma); + if (!ring_space) + goto err_out_unmap_tx; + np->rx_ring = ring_space; + np->rx_ring_dma = ring_dma; + + /* Parse eeprom data */ + parse_eeprom (dev); + + /* Find PHY address */ + err = find_miiphy (dev); + if (err) + goto err_out_unmap_rx; + + /* Fiber device? */ + np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0; + np->link_status = 0; + /* Set media and reset PHY */ + if (np->phy_media) { + /* default Auto-Negotiation for fiber deivices */ + if (np->an_enable == 2) { + np->an_enable = 1; + } + mii_set_media_pcs (dev); + } else { + /* Auto-Negotiation is mandatory for 1000BASE-T, + IEEE 802.3ab Annex 28D page 14 */ + if (np->speed == 1000) + np->an_enable = 1; + mii_set_media (dev); + } + + err = register_netdev (dev); + if (err) + goto err_out_unmap_rx; + + card_idx++; + + printk (KERN_INFO "%s: %s, %pM, IRQ %d\n", + dev->name, np->name, dev->dev_addr, irq); + if (tx_coalesce > 1) + printk(KERN_INFO "tx_coalesce:\t%d packets\n", + tx_coalesce); + if (np->coalesce) + printk(KERN_INFO + "rx_coalesce:\t%d packets\n" + "rx_timeout: \t%d ns\n", + np->rx_coalesce, np->rx_timeout*640); + if (np->vlan) + printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); + return 0; + +err_out_unmap_rx: + pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); +err_out_unmap_tx: + pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); +err_out_iounmap: +#ifdef MEM_MAPPING + pci_iounmap(pdev, np->ioaddr); +#endif + pci_iounmap(pdev, np->eeprom_addr); +err_out_dev: + free_netdev (dev); +err_out_res: + pci_release_regions (pdev); +err_out_disable: + pci_disable_device (pdev); + return err; +} + +static int +find_miiphy (struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + int i, phy_found = 0; + np = netdev_priv(dev); + np->phy_addr = 1; + + for (i = 31; i >= 0; i--) { + int mii_status = mii_read (dev, i, 1); + if (mii_status != 0xffff && mii_status != 0x0000) { + np->phy_addr = i; + phy_found++; + } + } + if (!phy_found) { + printk (KERN_ERR "%s: No MII PHY found!\n", dev->name); + return -ENODEV; + } + return 0; +} + +static int +parse_eeprom (struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; + int i, j; + u8 sromdata[256]; + u8 *psib; + u32 crc; + PSROM_t psrom = (PSROM_t) sromdata; + + int cid, next; + + for (i = 0; i < 128; i++) + ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i)); + + if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */ + /* Check CRC */ + crc = ~ether_crc_le (256 - 4, sromdata); + if (psrom->crc != cpu_to_le32(crc)) { + printk (KERN_ERR "%s: EEPROM data CRC error.\n", + dev->name); + return -1; + } + } + + /* Set MAC address */ + for (i = 0; i < 6; i++) + dev->dev_addr[i] = psrom->mac_addr[i]; + + if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) { + return 0; + } + + /* Parse Software Information Block */ + i = 0x30; + psib = (u8 *) sromdata; + do { + cid = psib[i++]; + next = psib[i++]; + if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) { + printk (KERN_ERR "Cell data error\n"); + return -1; + } + switch (cid) { + case 0: /* Format version */ + break; + case 1: /* End of cell */ + return 0; + case 2: /* Duplex Polarity */ + np->duplex_polarity = psib[i]; + dw8(PhyCtrl, dr8(PhyCtrl) | psib[i]); + break; + case 3: /* Wake Polarity */ + np->wake_polarity = psib[i]; + break; + case 9: /* Adapter description */ + j = (next - i > 255) ? 255 : next - i; + memcpy (np->name, &(psib[i]), j); + break; + case 4: + case 5: + case 6: + case 7: + case 8: /* Reversed */ + break; + default: /* Unknown cell */ + return -1; + } + i = next; + } while (1); + + return 0; +} + +static int +rio_open (struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; + const int irq = np->pdev->irq; + int i; + u16 macctrl; + + i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev); + if (i) + return i; + + /* Reset all logic functions */ + dw16(ASICCtrl + 2, + GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset); + mdelay(10); + + /* DebugCtrl bit 4, 5, 9 must set */ + dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230); + + /* Jumbo frame */ + if (np->jumbo != 0) + dw16(MaxFrameSize, MAX_JUMBO+14); + + alloc_list (dev); + + /* Get station address */ + for (i = 0; i < 6; i++) + dw8(StationAddr0 + i, dev->dev_addr[i]); + + set_multicast (dev); + if (np->coalesce) { + dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16); + } + /* Set RIO to poll every N*320nsec. */ + dw8(RxDMAPollPeriod, 0x20); + dw8(TxDMAPollPeriod, 0xff); + dw8(RxDMABurstThresh, 0x30); + dw8(RxDMAUrgentThresh, 0x30); + dw32(RmonStatMask, 0x0007ffff); + /* clear statistics */ + clear_stats (dev); + + /* VLAN supported */ + if (np->vlan) { + /* priority field in RxDMAIntCtrl */ + dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10); + /* VLANId */ + dw16(VLANId, np->vlan); + /* Length/Type should be 0x8100 */ + dw32(VLANTag, 0x8100 << 16 | np->vlan); + /* Enable AutoVLANuntagging, but disable AutoVLANtagging. + VLAN information tagged by TFC' VID, CFI fields. */ + dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging); + } + + init_timer (&np->timer); + np->timer.expires = jiffies + 1*HZ; + np->timer.data = (unsigned long) dev; + np->timer.function = rio_timer; + add_timer (&np->timer); + + /* Start Tx/Rx */ + dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable); + + macctrl = 0; + macctrl |= (np->vlan) ? AutoVLANuntagging : 0; + macctrl |= (np->full_duplex) ? DuplexSelect : 0; + macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; + macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; + dw16(MACCtrl, macctrl); + + netif_start_queue (dev); + + dl2k_enable_int(np); + return 0; +} + +static void +rio_timer (unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct netdev_private *np = netdev_priv(dev); + unsigned int entry; + int next_tick = 1*HZ; + unsigned long flags; + + spin_lock_irqsave(&np->rx_lock, flags); + /* Recover rx ring exhausted error */ + if (np->cur_rx - np->old_rx >= RX_RING_SIZE) { + printk(KERN_INFO "Try to recover rx ring exhausted...\n"); + /* Re-allocate skbuffs to fill the descriptor ring */ + for (; np->cur_rx - np->old_rx > 0; np->old_rx++) { + struct sk_buff *skb; + entry = np->old_rx % RX_RING_SIZE; + /* Dropped packets don't need to re-allocate */ + if (np->rx_skbuff[entry] == NULL) { + skb = netdev_alloc_skb_ip_align(dev, + np->rx_buf_sz); + if (skb == NULL) { + np->rx_ring[entry].fraginfo = 0; + printk (KERN_INFO + "%s: Still unable to re-allocate Rx skbuff.#%d\n", + dev->name, entry); + break; + } + np->rx_skbuff[entry] = skb; + np->rx_ring[entry].fraginfo = + cpu_to_le64 (pci_map_single + (np->pdev, skb->data, np->rx_buf_sz, + PCI_DMA_FROMDEVICE)); + } + np->rx_ring[entry].fraginfo |= + cpu_to_le64((u64)np->rx_buf_sz << 48); + np->rx_ring[entry].status = 0; + } /* end for */ + } /* end if */ + spin_unlock_irqrestore (&np->rx_lock, flags); + np->timer.expires = jiffies + next_tick; + add_timer(&np->timer); +} + +static void +rio_tx_timeout (struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; + + printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n", + dev->name, dr32(TxStatus)); + rio_free_tx(dev, 0); + dev->if_port = 0; + dev->trans_start = jiffies; /* prevent tx timeout */ +} + + /* allocate and initialize Tx and Rx descriptors */ +static void +alloc_list (struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; + int i; + + np->cur_rx = np->cur_tx = 0; + np->old_rx = np->old_tx = 0; + np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32); + + /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */ + for (i = 0; i < TX_RING_SIZE; i++) { + np->tx_skbuff[i] = NULL; + np->tx_ring[i].status = cpu_to_le64 (TFDDone); + np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma + + ((i+1)%TX_RING_SIZE) * + sizeof (struct netdev_desc)); + } + + /* Initialize Rx descriptors */ + for (i = 0; i < RX_RING_SIZE; i++) { + np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma + + ((i + 1) % RX_RING_SIZE) * + sizeof (struct netdev_desc)); + np->rx_ring[i].status = 0; + np->rx_ring[i].fraginfo = 0; + np->rx_skbuff[i] = NULL; + } + + /* Allocate the rx buffers */ + for (i = 0; i < RX_RING_SIZE; i++) { + /* Allocated fixed size of skbuff */ + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); + np->rx_skbuff[i] = skb; + if (skb == NULL) + break; + + /* Rubicon now supports 40 bits of addressing space. */ + np->rx_ring[i].fraginfo = + cpu_to_le64 ( pci_map_single ( + np->pdev, skb->data, np->rx_buf_sz, + PCI_DMA_FROMDEVICE)); + np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); + } + + /* Set RFDListPtr */ + dw32(RFDListPtr0, np->rx_ring_dma); + dw32(RFDListPtr1, 0); +} + +static netdev_tx_t +start_xmit (struct sk_buff *skb, struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; + struct netdev_desc *txdesc; + unsigned entry; + u64 tfc_vlan_tag = 0; + + if (np->link_status == 0) { /* Link Down */ + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + entry = np->cur_tx % TX_RING_SIZE; + np->tx_skbuff[entry] = skb; + txdesc = &np->tx_ring[entry]; + +#if 0 + if (skb->ip_summed == CHECKSUM_PARTIAL) { + txdesc->status |= + cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable | + IPChecksumEnable); + } +#endif + if (np->vlan) { + tfc_vlan_tag = VLANTagInsert | + ((u64)np->vlan << 32) | + ((u64)skb->priority << 45); + } + txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, + skb->len, + PCI_DMA_TODEVICE)); + txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48); + + /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode + * Work around: Always use 1 descriptor in 10Mbps mode */ + if (entry % np->tx_coalesce == 0 || np->speed == 10) + txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | + WordAlignDisable | + TxDMAIndicate | + (1 << FragCountShift)); + else + txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | + WordAlignDisable | + (1 << FragCountShift)); + + /* TxDMAPollNow */ + dw32(DMACtrl, dr32(DMACtrl) | 0x00001000); + /* Schedule ISR */ + dw32(CountDown, 10000); + np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE; + if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE + < TX_QUEUE_LEN - 1 && np->speed != 10) { + /* do nothing */ + } else if (!netif_queue_stopped(dev)) { + netif_stop_queue (dev); + } + + /* The first TFDListPtr */ + if (!dr32(TFDListPtr0)) { + dw32(TFDListPtr0, np->tx_ring_dma + + entry * sizeof (struct netdev_desc)); + dw32(TFDListPtr1, 0); + } + + return NETDEV_TX_OK; +} + +static irqreturn_t +rio_interrupt (int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; + unsigned int_status; + int cnt = max_intrloop; + int handled = 0; + + while (1) { + int_status = dr16(IntStatus); + dw16(IntStatus, int_status); + int_status &= DEFAULT_INTR; + if (int_status == 0 || --cnt < 0) + break; + handled = 1; + /* Processing received packets */ + if (int_status & RxDMAComplete) + receive_packet (dev); + /* TxDMAComplete interrupt */ + if ((int_status & (TxDMAComplete|IntRequested))) { + int tx_status; + tx_status = dr32(TxStatus); + if (tx_status & 0x01) + tx_error (dev, tx_status); + /* Free used tx skbuffs */ + rio_free_tx (dev, 1); + } + + /* Handle uncommon events */ + if (int_status & + (HostError | LinkEvent | UpdateStats)) + rio_error (dev, int_status); + } + if (np->cur_tx != np->old_tx) + dw32(CountDown, 100); + return IRQ_RETVAL(handled); +} + +static inline dma_addr_t desc_to_dma(struct netdev_desc *desc) +{ + return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48); +} + +static void +rio_free_tx (struct net_device *dev, int irq) +{ + struct netdev_private *np = netdev_priv(dev); + int entry = np->old_tx % TX_RING_SIZE; + int tx_use = 0; + unsigned long flag = 0; + + if (irq) + spin_lock(&np->tx_lock); + else + spin_lock_irqsave(&np->tx_lock, flag); + + /* Free used tx skbuffs */ + while (entry != np->cur_tx) { + struct sk_buff *skb; + + if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone))) + break; + skb = np->tx_skbuff[entry]; + pci_unmap_single (np->pdev, + desc_to_dma(&np->tx_ring[entry]), + skb->len, PCI_DMA_TODEVICE); + if (irq) + dev_kfree_skb_irq (skb); + else + dev_kfree_skb (skb); + + np->tx_skbuff[entry] = NULL; + entry = (entry + 1) % TX_RING_SIZE; + tx_use++; + } + if (irq) + spin_unlock(&np->tx_lock); + else + spin_unlock_irqrestore(&np->tx_lock, flag); + np->old_tx = entry; + + /* If the ring is no longer full, clear tx_full and + call netif_wake_queue() */ + + if (netif_queue_stopped(dev) && + ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE + < TX_QUEUE_LEN - 1 || np->speed == 10)) { + netif_wake_queue (dev); + } +} + +static void +tx_error (struct net_device *dev, int tx_status) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; + int frame_id; + int i; + + frame_id = (tx_status & 0xffff0000); + printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", + dev->name, tx_status, frame_id); + np->stats.tx_errors++; + /* Ttransmit Underrun */ + if (tx_status & 0x10) { + np->stats.tx_fifo_errors++; + dw16(TxStartThresh, dr16(TxStartThresh) + 0x10); + /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */ + dw16(ASICCtrl + 2, + TxReset | DMAReset | FIFOReset | NetworkReset); + /* Wait for ResetBusy bit clear */ + for (i = 50; i > 0; i--) { + if (!(dr16(ASICCtrl + 2) & ResetBusy)) + break; + mdelay (1); + } + rio_free_tx (dev, 1); + /* Reset TFDListPtr */ + dw32(TFDListPtr0, np->tx_ring_dma + + np->old_tx * sizeof (struct netdev_desc)); + dw32(TFDListPtr1, 0); + + /* Let TxStartThresh stay default value */ + } + /* Late Collision */ + if (tx_status & 0x04) { + np->stats.tx_fifo_errors++; + /* TxReset and clear FIFO */ + dw16(ASICCtrl + 2, TxReset | FIFOReset); + /* Wait reset done */ + for (i = 50; i > 0; i--) { + if (!(dr16(ASICCtrl + 2) & ResetBusy)) + break; + mdelay (1); + } + /* Let TxStartThresh stay default value */ + } + /* Maximum Collisions */ +#ifdef ETHER_STATS + if (tx_status & 0x08) + np->stats.collisions16++; +#else + if (tx_status & 0x08) + np->stats.collisions++; +#endif + /* Restart the Tx */ + dw32(MACCtrl, dr16(MACCtrl) | TxEnable); +} + +static int +receive_packet (struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + int entry = np->cur_rx % RX_RING_SIZE; + int cnt = 30; + + /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */ + while (1) { + struct netdev_desc *desc = &np->rx_ring[entry]; + int pkt_len; + u64 frame_status; + + if (!(desc->status & cpu_to_le64(RFDDone)) || + !(desc->status & cpu_to_le64(FrameStart)) || + !(desc->status & cpu_to_le64(FrameEnd))) + break; + + /* Chip omits the CRC. */ + frame_status = le64_to_cpu(desc->status); + pkt_len = frame_status & 0xffff; + if (--cnt < 0) + break; + /* Update rx error statistics, drop packet. */ + if (frame_status & RFS_Errors) { + np->stats.rx_errors++; + if (frame_status & (RxRuntFrame | RxLengthError)) + np->stats.rx_length_errors++; + if (frame_status & RxFCSError) + np->stats.rx_crc_errors++; + if (frame_status & RxAlignmentError && np->speed != 1000) + np->stats.rx_frame_errors++; + if (frame_status & RxFIFOOverrun) + np->stats.rx_fifo_errors++; + } else { + struct sk_buff *skb; + + /* Small skbuffs for short packets */ + if (pkt_len > copy_thresh) { + pci_unmap_single (np->pdev, + desc_to_dma(desc), + np->rx_buf_sz, + PCI_DMA_FROMDEVICE); + skb_put (skb = np->rx_skbuff[entry], pkt_len); + np->rx_skbuff[entry] = NULL; + } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) { + pci_dma_sync_single_for_cpu(np->pdev, + desc_to_dma(desc), + np->rx_buf_sz, + PCI_DMA_FROMDEVICE); + skb_copy_to_linear_data (skb, + np->rx_skbuff[entry]->data, + pkt_len); + skb_put (skb, pkt_len); + pci_dma_sync_single_for_device(np->pdev, + desc_to_dma(desc), + np->rx_buf_sz, + PCI_DMA_FROMDEVICE); + } + skb->protocol = eth_type_trans (skb, dev); +#if 0 + /* Checksum done by hw, but csum value unavailable. */ + if (np->pdev->pci_rev_id >= 0x0c && + !(frame_status & (TCPError | UDPError | IPError))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } +#endif + netif_rx (skb); + } + entry = (entry + 1) % RX_RING_SIZE; + } + spin_lock(&np->rx_lock); + np->cur_rx = entry; + /* Re-allocate skbuffs to fill the descriptor ring */ + entry = np->old_rx; + while (entry != np->cur_rx) { + struct sk_buff *skb; + /* Dropped packets don't need to re-allocate */ + if (np->rx_skbuff[entry] == NULL) { + skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); + if (skb == NULL) { + np->rx_ring[entry].fraginfo = 0; + printk (KERN_INFO + "%s: receive_packet: " + "Unable to re-allocate Rx skbuff.#%d\n", + dev->name, entry); + break; + } + np->rx_skbuff[entry] = skb; + np->rx_ring[entry].fraginfo = + cpu_to_le64 (pci_map_single + (np->pdev, skb->data, np->rx_buf_sz, + PCI_DMA_FROMDEVICE)); + } + np->rx_ring[entry].fraginfo |= + cpu_to_le64((u64)np->rx_buf_sz << 48); + np->rx_ring[entry].status = 0; + entry = (entry + 1) % RX_RING_SIZE; + } + np->old_rx = entry; + spin_unlock(&np->rx_lock); + return 0; +} + +static void +rio_error (struct net_device *dev, int int_status) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; + u16 macctrl; + + /* Link change event */ + if (int_status & LinkEvent) { + if (mii_wait_link (dev, 10) == 0) { + printk (KERN_INFO "%s: Link up\n", dev->name); + if (np->phy_media) + mii_get_media_pcs (dev); + else + mii_get_media (dev); + if (np->speed == 1000) + np->tx_coalesce = tx_coalesce; + else + np->tx_coalesce = 1; + macctrl = 0; + macctrl |= (np->vlan) ? AutoVLANuntagging : 0; + macctrl |= (np->full_duplex) ? DuplexSelect : 0; + macctrl |= (np->tx_flow) ? + TxFlowControlEnable : 0; + macctrl |= (np->rx_flow) ? + RxFlowControlEnable : 0; + dw16(MACCtrl, macctrl); + np->link_status = 1; + netif_carrier_on(dev); + } else { + printk (KERN_INFO "%s: Link off\n", dev->name); + np->link_status = 0; + netif_carrier_off(dev); + } + } + + /* UpdateStats statistics registers */ + if (int_status & UpdateStats) { + get_stats (dev); + } + + /* PCI Error, a catastronphic error related to the bus interface + occurs, set GlobalReset and HostReset to reset. */ + if (int_status & HostError) { + printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n", + dev->name, int_status); + dw16(ASICCtrl + 2, GlobalReset | HostReset); + mdelay (500); + } +} + +static struct net_device_stats * +get_stats (struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; +#ifdef MEM_MAPPING + int i; +#endif + unsigned int stat_reg; + + /* All statistics registers need to be acknowledged, + else statistic overflow could cause problems */ + + np->stats.rx_packets += dr32(FramesRcvOk); + np->stats.tx_packets += dr32(FramesXmtOk); + np->stats.rx_bytes += dr32(OctetRcvOk); + np->stats.tx_bytes += dr32(OctetXmtOk); + + np->stats.multicast = dr32(McstFramesRcvdOk); + np->stats.collisions += dr32(SingleColFrames) + + dr32(MultiColFrames); + + /* detailed tx errors */ + stat_reg = dr16(FramesAbortXSColls); + np->stats.tx_aborted_errors += stat_reg; + np->stats.tx_errors += stat_reg; + + stat_reg = dr16(CarrierSenseErrors); + np->stats.tx_carrier_errors += stat_reg; + np->stats.tx_errors += stat_reg; + + /* Clear all other statistic register. */ + dr32(McstOctetXmtOk); + dr16(BcstFramesXmtdOk); + dr32(McstFramesXmtdOk); + dr16(BcstFramesRcvdOk); + dr16(MacControlFramesRcvd); + dr16(FrameTooLongErrors); + dr16(InRangeLengthErrors); + dr16(FramesCheckSeqErrors); + dr16(FramesLostRxErrors); + dr32(McstOctetXmtOk); + dr32(BcstOctetXmtOk); + dr32(McstFramesXmtdOk); + dr32(FramesWDeferredXmt); + dr32(LateCollisions); + dr16(BcstFramesXmtdOk); + dr16(MacControlFramesXmtd); + dr16(FramesWEXDeferal); + +#ifdef MEM_MAPPING + for (i = 0x100; i <= 0x150; i += 4) + dr32(i); +#endif + dr16(TxJumboFrames); + dr16(RxJumboFrames); + dr16(TCPCheckSumErrors); + dr16(UDPCheckSumErrors); + dr16(IPCheckSumErrors); + return &np->stats; +} + +static int +clear_stats (struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; +#ifdef MEM_MAPPING + int i; +#endif + + /* All statistics registers need to be acknowledged, + else statistic overflow could cause problems */ + dr32(FramesRcvOk); + dr32(FramesXmtOk); + dr32(OctetRcvOk); + dr32(OctetXmtOk); + + dr32(McstFramesRcvdOk); + dr32(SingleColFrames); + dr32(MultiColFrames); + dr32(LateCollisions); + /* detailed rx errors */ + dr16(FrameTooLongErrors); + dr16(InRangeLengthErrors); + dr16(FramesCheckSeqErrors); + dr16(FramesLostRxErrors); + + /* detailed tx errors */ + dr16(FramesAbortXSColls); + dr16(CarrierSenseErrors); + + /* Clear all other statistic register. */ + dr32(McstOctetXmtOk); + dr16(BcstFramesXmtdOk); + dr32(McstFramesXmtdOk); + dr16(BcstFramesRcvdOk); + dr16(MacControlFramesRcvd); + dr32(McstOctetXmtOk); + dr32(BcstOctetXmtOk); + dr32(McstFramesXmtdOk); + dr32(FramesWDeferredXmt); + dr16(BcstFramesXmtdOk); + dr16(MacControlFramesXmtd); + dr16(FramesWEXDeferal); +#ifdef MEM_MAPPING + for (i = 0x100; i <= 0x150; i += 4) + dr32(i); +#endif + dr16(TxJumboFrames); + dr16(RxJumboFrames); + dr16(TCPCheckSumErrors); + dr16(UDPCheckSumErrors); + dr16(IPCheckSumErrors); + return 0; +} + + +static int +change_mtu (struct net_device *dev, int new_mtu) +{ + struct netdev_private *np = netdev_priv(dev); + int max = (np->jumbo) ? MAX_JUMBO : 1536; + + if ((new_mtu < 68) || (new_mtu > max)) { + return -EINVAL; + } + + dev->mtu = new_mtu; + + return 0; +} + +static void +set_multicast (struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; + u32 hash_table[2]; + u16 rx_mode = 0; + + hash_table[0] = hash_table[1] = 0; + /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */ + hash_table[1] |= 0x02000000; + if (dev->flags & IFF_PROMISC) { + /* Receive all frames promiscuously. */ + rx_mode = ReceiveAllFrames; + } else if ((dev->flags & IFF_ALLMULTI) || + (netdev_mc_count(dev) > multicast_filter_limit)) { + /* Receive broadcast and multicast frames */ + rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast; + } else if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + /* Receive broadcast frames and multicast frames filtering + by Hashtable */ + rx_mode = + ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast; + netdev_for_each_mc_addr(ha, dev) { + int bit, index = 0; + int crc = ether_crc_le(ETH_ALEN, ha->addr); + /* The inverted high significant 6 bits of CRC are + used as an index to hashtable */ + for (bit = 0; bit < 6; bit++) + if (crc & (1 << (31 - bit))) + index |= (1 << bit); + hash_table[index / 32] |= (1 << (index % 32)); + } + } else { + rx_mode = ReceiveBroadcast | ReceiveUnicast; + } + if (np->vlan) { + /* ReceiveVLANMatch field in ReceiveMode */ + rx_mode |= ReceiveVLANMatch; + } + + dw32(HashTable0, hash_table[0]); + dw32(HashTable1, hash_table[1]); + dw16(ReceiveMode, rx_mode); +} + +static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct netdev_private *np = netdev_priv(dev); + + strlcpy(info->driver, "dl2k", sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); +} + +static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct netdev_private *np = netdev_priv(dev); + if (np->phy_media) { + /* fiber device */ + cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE; + cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE; + cmd->port = PORT_FIBRE; + cmd->transceiver = XCVR_INTERNAL; + } else { + /* copper device */ + cmd->supported = SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_MII; + cmd->advertising = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full| + ADVERTISED_Autoneg | ADVERTISED_MII; + cmd->port = PORT_MII; + cmd->transceiver = XCVR_INTERNAL; + } + if ( np->link_status ) { + ethtool_cmd_speed_set(cmd, np->speed); + cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; + } else { + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->duplex = DUPLEX_UNKNOWN; + } + if ( np->an_enable) + cmd->autoneg = AUTONEG_ENABLE; + else + cmd->autoneg = AUTONEG_DISABLE; + + cmd->phy_address = np->phy_addr; + return 0; +} + +static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct netdev_private *np = netdev_priv(dev); + netif_carrier_off(dev); + if (cmd->autoneg == AUTONEG_ENABLE) { + if (np->an_enable) + return 0; + else { + np->an_enable = 1; + mii_set_media(dev); + return 0; + } + } else { + np->an_enable = 0; + if (np->speed == 1000) { + ethtool_cmd_speed_set(cmd, SPEED_100); + cmd->duplex = DUPLEX_FULL; + printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n"); + } + switch (ethtool_cmd_speed(cmd)) { + case SPEED_10: + np->speed = 10; + np->full_duplex = (cmd->duplex == DUPLEX_FULL); + break; + case SPEED_100: + np->speed = 100; + np->full_duplex = (cmd->duplex == DUPLEX_FULL); + break; + case SPEED_1000: /* not supported */ + default: + return -EINVAL; + } + mii_set_media(dev); + } + return 0; +} + +static u32 rio_get_link(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + return np->link_status; +} + +static const struct ethtool_ops ethtool_ops = { + .get_drvinfo = rio_get_drvinfo, + .get_settings = rio_get_settings, + .set_settings = rio_set_settings, + .get_link = rio_get_link, +}; + +static int +rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) +{ + int phy_addr; + struct netdev_private *np = netdev_priv(dev); + struct mii_ioctl_data *miidata = if_mii(rq); + + phy_addr = np->phy_addr; + switch (cmd) { + case SIOCGMIIPHY: + miidata->phy_id = phy_addr; + break; + case SIOCGMIIREG: + miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num); + break; + case SIOCSMIIREG: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +#define EEP_READ 0x0200 +#define EEP_BUSY 0x8000 +/* Read the EEPROM word */ +/* We use I/O instruction to read/write eeprom to avoid fail on some machines */ +static int read_eeprom(struct netdev_private *np, int eep_addr) +{ + void __iomem *ioaddr = np->eeprom_addr; + int i = 1000; + + dw16(EepromCtrl, EEP_READ | (eep_addr & 0xff)); + while (i-- > 0) { + if (!(dr16(EepromCtrl) & EEP_BUSY)) + return dr16(EepromData); + } + return 0; +} + +enum phy_ctrl_bits { + MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04, + MII_DUPLEX = 0x08, +}; + +#define mii_delay() dr8(PhyCtrl) +static void +mii_sendbit (struct net_device *dev, u32 data) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; + + data = ((data) ? MII_DATA1 : 0) | (dr8(PhyCtrl) & 0xf8) | MII_WRITE; + dw8(PhyCtrl, data); + mii_delay (); + dw8(PhyCtrl, data | MII_CLK); + mii_delay (); +} + +static int +mii_getbit (struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; + u8 data; + + data = (dr8(PhyCtrl) & 0xf8) | MII_READ; + dw8(PhyCtrl, data); + mii_delay (); + dw8(PhyCtrl, data | MII_CLK); + mii_delay (); + return (dr8(PhyCtrl) >> 1) & 1; +} + +static void +mii_send_bits (struct net_device *dev, u32 data, int len) +{ + int i; + + for (i = len - 1; i >= 0; i--) { + mii_sendbit (dev, data & (1 << i)); + } +} + +static int +mii_read (struct net_device *dev, int phy_addr, int reg_num) +{ + u32 cmd; + int i; + u32 retval = 0; + + /* Preamble */ + mii_send_bits (dev, 0xffffffff, 32); + /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ + /* ST,OP = 0110'b for read operation */ + cmd = (0x06 << 10 | phy_addr << 5 | reg_num); + mii_send_bits (dev, cmd, 14); + /* Turnaround */ + if (mii_getbit (dev)) + goto err_out; + /* Read data */ + for (i = 0; i < 16; i++) { + retval |= mii_getbit (dev); + retval <<= 1; + } + /* End cycle */ + mii_getbit (dev); + return (retval >> 1) & 0xffff; + + err_out: + return 0; +} +static int +mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data) +{ + u32 cmd; + + /* Preamble */ + mii_send_bits (dev, 0xffffffff, 32); + /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ + /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */ + cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data; + mii_send_bits (dev, cmd, 32); + /* End cycle */ + mii_getbit (dev); + return 0; +} +static int +mii_wait_link (struct net_device *dev, int wait) +{ + __u16 bmsr; + int phy_addr; + struct netdev_private *np; + + np = netdev_priv(dev); + phy_addr = np->phy_addr; + + do { + bmsr = mii_read (dev, phy_addr, MII_BMSR); + if (bmsr & BMSR_LSTATUS) + return 0; + mdelay (1); + } while (--wait > 0); + return -1; +} +static int +mii_get_media (struct net_device *dev) +{ + __u16 negotiate; + __u16 bmsr; + __u16 mscr; + __u16 mssr; + int phy_addr; + struct netdev_private *np; + + np = netdev_priv(dev); + phy_addr = np->phy_addr; + + bmsr = mii_read (dev, phy_addr, MII_BMSR); + if (np->an_enable) { + if (!(bmsr & BMSR_ANEGCOMPLETE)) { + /* Auto-Negotiation not completed */ + return -1; + } + negotiate = mii_read (dev, phy_addr, MII_ADVERTISE) & + mii_read (dev, phy_addr, MII_LPA); + mscr = mii_read (dev, phy_addr, MII_CTRL1000); + mssr = mii_read (dev, phy_addr, MII_STAT1000); + if (mscr & ADVERTISE_1000FULL && mssr & LPA_1000FULL) { + np->speed = 1000; + np->full_duplex = 1; + printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); + } else if (mscr & ADVERTISE_1000HALF && mssr & LPA_1000HALF) { + np->speed = 1000; + np->full_duplex = 0; + printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n"); + } else if (negotiate & ADVERTISE_100FULL) { + np->speed = 100; + np->full_duplex = 1; + printk (KERN_INFO "Auto 100 Mbps, Full duplex\n"); + } else if (negotiate & ADVERTISE_100HALF) { + np->speed = 100; + np->full_duplex = 0; + printk (KERN_INFO "Auto 100 Mbps, Half duplex\n"); + } else if (negotiate & ADVERTISE_10FULL) { + np->speed = 10; + np->full_duplex = 1; + printk (KERN_INFO "Auto 10 Mbps, Full duplex\n"); + } else if (negotiate & ADVERTISE_10HALF) { + np->speed = 10; + np->full_duplex = 0; + printk (KERN_INFO "Auto 10 Mbps, Half duplex\n"); + } + if (negotiate & ADVERTISE_PAUSE_CAP) { + np->tx_flow &= 1; + np->rx_flow &= 1; + } else if (negotiate & ADVERTISE_PAUSE_ASYM) { + np->tx_flow = 0; + np->rx_flow &= 1; + } + /* else tx_flow, rx_flow = user select */ + } else { + __u16 bmcr = mii_read (dev, phy_addr, MII_BMCR); + switch (bmcr & (BMCR_SPEED100 | BMCR_SPEED1000)) { + case BMCR_SPEED1000: + printk (KERN_INFO "Operating at 1000 Mbps, "); + break; + case BMCR_SPEED100: + printk (KERN_INFO "Operating at 100 Mbps, "); + break; + case 0: + printk (KERN_INFO "Operating at 10 Mbps, "); + } + if (bmcr & BMCR_FULLDPLX) { + printk (KERN_CONT "Full duplex\n"); + } else { + printk (KERN_CONT "Half duplex\n"); + } + } + if (np->tx_flow) + printk(KERN_INFO "Enable Tx Flow Control\n"); + else + printk(KERN_INFO "Disable Tx Flow Control\n"); + if (np->rx_flow) + printk(KERN_INFO "Enable Rx Flow Control\n"); + else + printk(KERN_INFO "Disable Rx Flow Control\n"); + + return 0; +} + +static int +mii_set_media (struct net_device *dev) +{ + __u16 pscr; + __u16 bmcr; + __u16 bmsr; + __u16 anar; + int phy_addr; + struct netdev_private *np; + np = netdev_priv(dev); + phy_addr = np->phy_addr; + + /* Does user set speed? */ + if (np->an_enable) { + /* Advertise capabilities */ + bmsr = mii_read (dev, phy_addr, MII_BMSR); + anar = mii_read (dev, phy_addr, MII_ADVERTISE) & + ~(ADVERTISE_100FULL | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_10HALF | + ADVERTISE_100BASE4); + if (bmsr & BMSR_100FULL) + anar |= ADVERTISE_100FULL; + if (bmsr & BMSR_100HALF) + anar |= ADVERTISE_100HALF; + if (bmsr & BMSR_100BASE4) + anar |= ADVERTISE_100BASE4; + if (bmsr & BMSR_10FULL) + anar |= ADVERTISE_10FULL; + if (bmsr & BMSR_10HALF) + anar |= ADVERTISE_10HALF; + anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + mii_write (dev, phy_addr, MII_ADVERTISE, anar); + + /* Enable Auto crossover */ + pscr = mii_read (dev, phy_addr, MII_PHY_SCR); + pscr |= 3 << 5; /* 11'b */ + mii_write (dev, phy_addr, MII_PHY_SCR, pscr); + + /* Soft reset PHY */ + mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET); + bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET; + mii_write (dev, phy_addr, MII_BMCR, bmcr); + mdelay(1); + } else { + /* Force speed setting */ + /* 1) Disable Auto crossover */ + pscr = mii_read (dev, phy_addr, MII_PHY_SCR); + pscr &= ~(3 << 5); + mii_write (dev, phy_addr, MII_PHY_SCR, pscr); + + /* 2) PHY Reset */ + bmcr = mii_read (dev, phy_addr, MII_BMCR); + bmcr |= BMCR_RESET; + mii_write (dev, phy_addr, MII_BMCR, bmcr); + + /* 3) Power Down */ + bmcr = 0x1940; /* must be 0x1940 */ + mii_write (dev, phy_addr, MII_BMCR, bmcr); + mdelay (100); /* wait a certain time */ + + /* 4) Advertise nothing */ + mii_write (dev, phy_addr, MII_ADVERTISE, 0); + + /* 5) Set media and Power Up */ + bmcr = BMCR_PDOWN; + if (np->speed == 100) { + bmcr |= BMCR_SPEED100; + printk (KERN_INFO "Manual 100 Mbps, "); + } else if (np->speed == 10) { + printk (KERN_INFO "Manual 10 Mbps, "); + } + if (np->full_duplex) { + bmcr |= BMCR_FULLDPLX; + printk (KERN_CONT "Full duplex\n"); + } else { + printk (KERN_CONT "Half duplex\n"); + } +#if 0 + /* Set 1000BaseT Master/Slave setting */ + mscr = mii_read (dev, phy_addr, MII_CTRL1000); + mscr |= MII_MSCR_CFG_ENABLE; + mscr &= ~MII_MSCR_CFG_VALUE = 0; +#endif + mii_write (dev, phy_addr, MII_BMCR, bmcr); + mdelay(10); + } + return 0; +} + +static int +mii_get_media_pcs (struct net_device *dev) +{ + __u16 negotiate; + __u16 bmsr; + int phy_addr; + struct netdev_private *np; + + np = netdev_priv(dev); + phy_addr = np->phy_addr; + + bmsr = mii_read (dev, phy_addr, PCS_BMSR); + if (np->an_enable) { + if (!(bmsr & BMSR_ANEGCOMPLETE)) { + /* Auto-Negotiation not completed */ + return -1; + } + negotiate = mii_read (dev, phy_addr, PCS_ANAR) & + mii_read (dev, phy_addr, PCS_ANLPAR); + np->speed = 1000; + if (negotiate & PCS_ANAR_FULL_DUPLEX) { + printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); + np->full_duplex = 1; + } else { + printk (KERN_INFO "Auto 1000 Mbps, half duplex\n"); + np->full_duplex = 0; + } + if (negotiate & PCS_ANAR_PAUSE) { + np->tx_flow &= 1; + np->rx_flow &= 1; + } else if (negotiate & PCS_ANAR_ASYMMETRIC) { + np->tx_flow = 0; + np->rx_flow &= 1; + } + /* else tx_flow, rx_flow = user select */ + } else { + __u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR); + printk (KERN_INFO "Operating at 1000 Mbps, "); + if (bmcr & BMCR_FULLDPLX) { + printk (KERN_CONT "Full duplex\n"); + } else { + printk (KERN_CONT "Half duplex\n"); + } + } + if (np->tx_flow) + printk(KERN_INFO "Enable Tx Flow Control\n"); + else + printk(KERN_INFO "Disable Tx Flow Control\n"); + if (np->rx_flow) + printk(KERN_INFO "Enable Rx Flow Control\n"); + else + printk(KERN_INFO "Disable Rx Flow Control\n"); + + return 0; +} + +static int +mii_set_media_pcs (struct net_device *dev) +{ + __u16 bmcr; + __u16 esr; + __u16 anar; + int phy_addr; + struct netdev_private *np; + np = netdev_priv(dev); + phy_addr = np->phy_addr; + + /* Auto-Negotiation? */ + if (np->an_enable) { + /* Advertise capabilities */ + esr = mii_read (dev, phy_addr, PCS_ESR); + anar = mii_read (dev, phy_addr, MII_ADVERTISE) & + ~PCS_ANAR_HALF_DUPLEX & + ~PCS_ANAR_FULL_DUPLEX; + if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD)) + anar |= PCS_ANAR_HALF_DUPLEX; + if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD)) + anar |= PCS_ANAR_FULL_DUPLEX; + anar |= PCS_ANAR_PAUSE | PCS_ANAR_ASYMMETRIC; + mii_write (dev, phy_addr, MII_ADVERTISE, anar); + + /* Soft reset PHY */ + mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET); + bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET; + mii_write (dev, phy_addr, MII_BMCR, bmcr); + mdelay(1); + } else { + /* Force speed setting */ + /* PHY Reset */ + bmcr = BMCR_RESET; + mii_write (dev, phy_addr, MII_BMCR, bmcr); + mdelay(10); + if (np->full_duplex) { + bmcr = BMCR_FULLDPLX; + printk (KERN_INFO "Manual full duplex\n"); + } else { + bmcr = 0; + printk (KERN_INFO "Manual half duplex\n"); + } + mii_write (dev, phy_addr, MII_BMCR, bmcr); + mdelay(10); + + /* Advertise nothing */ + mii_write (dev, phy_addr, MII_ADVERTISE, 0); + } + return 0; +} + + +static int +rio_close (struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; + + struct pci_dev *pdev = np->pdev; + struct sk_buff *skb; + int i; + + netif_stop_queue (dev); + + /* Disable interrupts */ + dw16(IntEnable, 0); + + /* Stop Tx and Rx logics */ + dw32(MACCtrl, TxDisable | RxDisable | StatsDisable); + + free_irq(pdev->irq, dev); + del_timer_sync (&np->timer); + + /* Free all the skbuffs in the queue. */ + for (i = 0; i < RX_RING_SIZE; i++) { + skb = np->rx_skbuff[i]; + if (skb) { + pci_unmap_single(pdev, desc_to_dma(&np->rx_ring[i]), + skb->len, PCI_DMA_FROMDEVICE); + dev_kfree_skb (skb); + np->rx_skbuff[i] = NULL; + } + np->rx_ring[i].status = 0; + np->rx_ring[i].fraginfo = 0; + } + for (i = 0; i < TX_RING_SIZE; i++) { + skb = np->tx_skbuff[i]; + if (skb) { + pci_unmap_single(pdev, desc_to_dma(&np->tx_ring[i]), + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb (skb); + np->tx_skbuff[i] = NULL; + } + } + + return 0; +} + +static void +rio_remove1 (struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata (pdev); + + if (dev) { + struct netdev_private *np = netdev_priv(dev); + + unregister_netdev (dev); + pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, + np->rx_ring_dma); + pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, + np->tx_ring_dma); +#ifdef MEM_MAPPING + pci_iounmap(pdev, np->ioaddr); +#endif + pci_iounmap(pdev, np->eeprom_addr); + free_netdev (dev); + pci_release_regions (pdev); + pci_disable_device (pdev); + } +} + +static struct pci_driver rio_driver = { + .name = "dl2k", + .id_table = rio_pci_tbl, + .probe = rio_probe1, + .remove = rio_remove1, +}; + +module_pci_driver(rio_driver); +/* + +Compile command: + +gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c + +Read Documentation/networking/dl2k.txt for details. + +*/ + diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h new file mode 100644 index 000000000..23c07b007 --- /dev/null +++ b/drivers/net/ethernet/dlink/dl2k.h @@ -0,0 +1,425 @@ +/* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */ +/* + Copyright (c) 2001, 2002 by D-Link Corporation + Written by Edward Peng. + Created 03-May-2001, base on Linux' sundance.c. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. +*/ + +#ifndef __DL2K_H__ +#define __DL2K_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* Processor type for cache alignment. */ +#include +#include +#include +#include +#include +#define TX_RING_SIZE 256 +#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used.*/ +#define RX_RING_SIZE 256 +#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc) +#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc) + +/* Offsets to the device registers. + Unlike software-only systems, device drivers interact with complex hardware. + It's not useful to define symbolic names for every register bit in the + device. The name can only partially document the semantics and make + the driver longer and more difficult to read. + In general, only the important configuration values or bits changed + multiple times should be defined symbolically. +*/ +enum dl2x_offsets { + /* I/O register offsets */ + DMACtrl = 0x00, + RxDMAStatus = 0x08, + TFDListPtr0 = 0x10, + TFDListPtr1 = 0x14, + TxDMABurstThresh = 0x18, + TxDMAUrgentThresh = 0x19, + TxDMAPollPeriod = 0x1a, + RFDListPtr0 = 0x1c, + RFDListPtr1 = 0x20, + RxDMABurstThresh = 0x24, + RxDMAUrgentThresh = 0x25, + RxDMAPollPeriod = 0x26, + RxDMAIntCtrl = 0x28, + DebugCtrl = 0x2c, + ASICCtrl = 0x30, + FifoCtrl = 0x38, + RxEarlyThresh = 0x3a, + FlowOffThresh = 0x3c, + FlowOnThresh = 0x3e, + TxStartThresh = 0x44, + EepromData = 0x48, + EepromCtrl = 0x4a, + ExpromAddr = 0x4c, + Exprodata = 0x50, + WakeEvent = 0x51, + CountDown = 0x54, + IntStatusAck = 0x5a, + IntEnable = 0x5c, + IntStatus = 0x5e, + TxStatus = 0x60, + MACCtrl = 0x6c, + VLANTag = 0x70, + PhyCtrl = 0x76, + StationAddr0 = 0x78, + StationAddr1 = 0x7a, + StationAddr2 = 0x7c, + VLANId = 0x80, + MaxFrameSize = 0x86, + ReceiveMode = 0x88, + HashTable0 = 0x8c, + HashTable1 = 0x90, + RmonStatMask = 0x98, + StatMask = 0x9c, + RxJumboFrames = 0xbc, + TCPCheckSumErrors = 0xc0, + IPCheckSumErrors = 0xc2, + UDPCheckSumErrors = 0xc4, + TxJumboFrames = 0xf4, + /* Ethernet MIB statistic register offsets */ + OctetRcvOk = 0xa8, + McstOctetRcvOk = 0xac, + BcstOctetRcvOk = 0xb0, + FramesRcvOk = 0xb4, + McstFramesRcvdOk = 0xb8, + BcstFramesRcvdOk = 0xbe, + MacControlFramesRcvd = 0xc6, + FrameTooLongErrors = 0xc8, + InRangeLengthErrors = 0xca, + FramesCheckSeqErrors = 0xcc, + FramesLostRxErrors = 0xce, + OctetXmtOk = 0xd0, + McstOctetXmtOk = 0xd4, + BcstOctetXmtOk = 0xd8, + FramesXmtOk = 0xdc, + McstFramesXmtdOk = 0xe0, + FramesWDeferredXmt = 0xe4, + LateCollisions = 0xe8, + MultiColFrames = 0xec, + SingleColFrames = 0xf0, + BcstFramesXmtdOk = 0xf6, + CarrierSenseErrors = 0xf8, + MacControlFramesXmtd = 0xfa, + FramesAbortXSColls = 0xfc, + FramesWEXDeferal = 0xfe, + /* RMON statistic register offsets */ + EtherStatsCollisions = 0x100, + EtherStatsOctetsTransmit = 0x104, + EtherStatsPktsTransmit = 0x108, + EtherStatsPkts64OctetTransmit = 0x10c, + EtherStats65to127OctetsTransmit = 0x110, + EtherStatsPkts128to255OctetsTransmit = 0x114, + EtherStatsPkts256to511OctetsTransmit = 0x118, + EtherStatsPkts512to1023OctetsTransmit = 0x11c, + EtherStatsPkts1024to1518OctetsTransmit = 0x120, + EtherStatsCRCAlignErrors = 0x124, + EtherStatsUndersizePkts = 0x128, + EtherStatsFragments = 0x12c, + EtherStatsJabbers = 0x130, + EtherStatsOctets = 0x134, + EtherStatsPkts = 0x138, + EtherStats64Octets = 0x13c, + EtherStatsPkts65to127Octets = 0x140, + EtherStatsPkts128to255Octets = 0x144, + EtherStatsPkts256to511Octets = 0x148, + EtherStatsPkts512to1023Octets = 0x14c, + EtherStatsPkts1024to1518Octets = 0x150, +}; + +/* Bits in the interrupt status/mask registers. */ +enum IntStatus_bits { + InterruptStatus = 0x0001, + HostError = 0x0002, + MACCtrlFrame = 0x0008, + TxComplete = 0x0004, + RxComplete = 0x0010, + RxEarly = 0x0020, + IntRequested = 0x0040, + UpdateStats = 0x0080, + LinkEvent = 0x0100, + TxDMAComplete = 0x0200, + RxDMAComplete = 0x0400, + RFDListEnd = 0x0800, + RxDMAPriority = 0x1000, +}; + +/* Bits in the ReceiveMode register. */ +enum ReceiveMode_bits { + ReceiveUnicast = 0x0001, + ReceiveMulticast = 0x0002, + ReceiveBroadcast = 0x0004, + ReceiveAllFrames = 0x0008, + ReceiveMulticastHash = 0x0010, + ReceiveIPMulticast = 0x0020, + ReceiveVLANMatch = 0x0100, + ReceiveVLANHash = 0x0200, +}; +/* Bits in MACCtrl. */ +enum MACCtrl_bits { + DuplexSelect = 0x20, + TxFlowControlEnable = 0x80, + RxFlowControlEnable = 0x0100, + RcvFCS = 0x200, + AutoVLANtagging = 0x1000, + AutoVLANuntagging = 0x2000, + StatsEnable = 0x00200000, + StatsDisable = 0x00400000, + StatsEnabled = 0x00800000, + TxEnable = 0x01000000, + TxDisable = 0x02000000, + TxEnabled = 0x04000000, + RxEnable = 0x08000000, + RxDisable = 0x10000000, + RxEnabled = 0x20000000, +}; + +enum ASICCtrl_LoWord_bits { + PhyMedia = 0x0080, +}; + +enum ASICCtrl_HiWord_bits { + GlobalReset = 0x0001, + RxReset = 0x0002, + TxReset = 0x0004, + DMAReset = 0x0008, + FIFOReset = 0x0010, + NetworkReset = 0x0020, + HostReset = 0x0040, + ResetBusy = 0x0400, +}; + +/* Transmit Frame Control bits */ +enum TFC_bits { + DwordAlign = 0x00000000, + WordAlignDisable = 0x00030000, + WordAlign = 0x00020000, + TCPChecksumEnable = 0x00040000, + UDPChecksumEnable = 0x00080000, + IPChecksumEnable = 0x00100000, + FCSAppendDisable = 0x00200000, + TxIndicate = 0x00400000, + TxDMAIndicate = 0x00800000, + FragCountShift = 24, + VLANTagInsert = 0x0000000010000000, + TFDDone = 0x80000000, + VIDShift = 32, + UsePriorityShift = 48, +}; + +/* Receive Frames Status bits */ +enum RFS_bits { + RxFIFOOverrun = 0x00010000, + RxRuntFrame = 0x00020000, + RxAlignmentError = 0x00040000, + RxFCSError = 0x00080000, + RxOverSizedFrame = 0x00100000, + RxLengthError = 0x00200000, + VLANDetected = 0x00400000, + TCPDetected = 0x00800000, + TCPError = 0x01000000, + UDPDetected = 0x02000000, + UDPError = 0x04000000, + IPDetected = 0x08000000, + IPError = 0x10000000, + FrameStart = 0x20000000, + FrameEnd = 0x40000000, + RFDDone = 0x80000000, + TCIShift = 32, + RFS_Errors = 0x003f0000, +}; + +#define MII_RESET_TIME_OUT 10000 +/* MII register */ +enum _mii_reg { + MII_PHY_SCR = 16, +}; + +/* PCS register */ +enum _pcs_reg { + PCS_BMCR = 0, + PCS_BMSR = 1, + PCS_ANAR = 4, + PCS_ANLPAR = 5, + PCS_ANER = 6, + PCS_ANNPT = 7, + PCS_ANLPRNP = 8, + PCS_ESR = 15, +}; + +/* IEEE Extened Status Register */ +enum _mii_esr { + MII_ESR_1000BX_FD = 0x8000, + MII_ESR_1000BX_HD = 0x4000, + MII_ESR_1000BT_FD = 0x2000, + MII_ESR_1000BT_HD = 0x1000, +}; +/* PHY Specific Control Register */ +#if 0 +typedef union t_MII_PHY_SCR { + u16 image; + struct { + u16 disable_jabber:1; // bit 0 + u16 polarity_reversal:1; // bit 1 + u16 SEQ_test:1; // bit 2 + u16 _bit_3:1; // bit 3 + u16 disable_CLK125:1; // bit 4 + u16 mdi_crossover_mode:2; // bit 6:5 + u16 enable_ext_dist:1; // bit 7 + u16 _bit_8_9:2; // bit 9:8 + u16 force_link:1; // bit 10 + u16 assert_CRS:1; // bit 11 + u16 rcv_fifo_depth:2; // bit 13:12 + u16 xmit_fifo_depth:2; // bit 15:14 + } bits; +} PHY_SCR_t, *PPHY_SCR_t; +#endif + +typedef enum t_MII_ADMIN_STATUS { + adm_reset, + adm_operational, + adm_loopback, + adm_power_down, + adm_isolate +} MII_ADMIN_t, *PMII_ADMIN_t; + +/* Physical Coding Sublayer Management (PCS) */ +/* PCS control and status registers bitmap as the same as MII */ +/* PCS Extended Status register bitmap as the same as MII */ +/* PCS ANAR */ +enum _pcs_anar { + PCS_ANAR_NEXT_PAGE = 0x8000, + PCS_ANAR_REMOTE_FAULT = 0x3000, + PCS_ANAR_ASYMMETRIC = 0x0100, + PCS_ANAR_PAUSE = 0x0080, + PCS_ANAR_HALF_DUPLEX = 0x0040, + PCS_ANAR_FULL_DUPLEX = 0x0020, +}; +/* PCS ANLPAR */ +enum _pcs_anlpar { + PCS_ANLPAR_NEXT_PAGE = PCS_ANAR_NEXT_PAGE, + PCS_ANLPAR_REMOTE_FAULT = PCS_ANAR_REMOTE_FAULT, + PCS_ANLPAR_ASYMMETRIC = PCS_ANAR_ASYMMETRIC, + PCS_ANLPAR_PAUSE = PCS_ANAR_PAUSE, + PCS_ANLPAR_HALF_DUPLEX = PCS_ANAR_HALF_DUPLEX, + PCS_ANLPAR_FULL_DUPLEX = PCS_ANAR_FULL_DUPLEX, +}; + +typedef struct t_SROM { + u16 config_param; /* 0x00 */ + u16 asic_ctrl; /* 0x02 */ + u16 sub_vendor_id; /* 0x04 */ + u16 sub_system_id; /* 0x06 */ + u16 reserved1[12]; /* 0x08-0x1f */ + u8 mac_addr[6]; /* 0x20-0x25 */ + u8 reserved2[10]; /* 0x26-0x2f */ + u8 sib[204]; /* 0x30-0xfb */ + u32 crc; /* 0xfc-0xff */ +} SROM_t, *PSROM_t; + +/* Ioctl custom data */ +struct ioctl_data { + char signature[10]; + int cmd; + int len; + char *data; +}; + +/* The Rx and Tx buffer descriptors. */ +struct netdev_desc { + __le64 next_desc; + __le64 status; + __le64 fraginfo; +}; + +#define PRIV_ALIGN 15 /* Required alignment mask */ +/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment + within the structure. */ +struct netdev_private { + /* Descriptor rings first for alignment. */ + struct netdev_desc *rx_ring; + struct netdev_desc *tx_ring; + struct sk_buff *rx_skbuff[RX_RING_SIZE]; + struct sk_buff *tx_skbuff[TX_RING_SIZE]; + dma_addr_t tx_ring_dma; + dma_addr_t rx_ring_dma; + struct pci_dev *pdev; + void __iomem *ioaddr; + void __iomem *eeprom_addr; + spinlock_t tx_lock; + spinlock_t rx_lock; + struct net_device_stats stats; + unsigned int rx_buf_sz; /* Based on MTU+slack. */ + unsigned int speed; /* Operating speed */ + unsigned int vlan; /* VLAN Id */ + unsigned int chip_id; /* PCI table chip id */ + unsigned int rx_coalesce; /* Maximum frames each RxDMAComplete intr */ + unsigned int rx_timeout; /* Wait time between RxDMAComplete intr */ + unsigned int tx_coalesce; /* Maximum frames each tx interrupt */ + unsigned int full_duplex:1; /* Full-duplex operation requested. */ + unsigned int an_enable:2; /* Auto-Negotiated Enable */ + unsigned int jumbo:1; /* Jumbo frame enable */ + unsigned int coalesce:1; /* Rx coalescing enable */ + unsigned int tx_flow:1; /* Tx flow control enable */ + unsigned int rx_flow:1; /* Rx flow control enable */ + unsigned int phy_media:1; /* 1: fiber, 0: copper */ + unsigned int link_status:1; /* Current link status */ + struct netdev_desc *last_tx; /* Last Tx descriptor used. */ + unsigned long cur_rx, old_rx; /* Producer/consumer ring indices */ + unsigned long cur_tx, old_tx; + struct timer_list timer; + int wake_polarity; + char name[256]; /* net device description */ + u8 duplex_polarity; + u16 mcast_filter[4]; + u16 advertising; /* NWay media advertisement */ + u16 negotiate; /* Negotiated media */ + int phy_addr; /* PHY addresses. */ +}; + +/* The station address location in the EEPROM. */ +/* The struct pci_device_id consist of: + vendor, device Vendor and device ID to match (or PCI_ANY_ID) + subvendor, subdevice Subsystem vendor and device ID to match (or PCI_ANY_ID) + class Device class to match. The class_mask tells which bits + class_mask of the class are honored during the comparison. + driver_data Data private to the driver. +*/ + +static const struct pci_device_id rio_pci_tbl[] = { + {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, }, + {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, }, + { } +}; +MODULE_DEVICE_TABLE (pci, rio_pci_tbl); +#define TX_TIMEOUT (4*HZ) +#define PACKET_SIZE 1536 +#define MAX_JUMBO 8000 +#define RIO_IO_SIZE 340 +#define DEFAULT_RXC 5 +#define DEFAULT_RXT 750 +#define DEFAULT_TXC 1 +#define MAX_TXC 8 +#endif /* __DL2K_H__ */ diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c new file mode 100644 index 000000000..a28a2e583 --- /dev/null +++ b/drivers/net/ethernet/dlink/sundance.c @@ -0,0 +1,2027 @@ +/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */ +/* + Written 1999-2000 by Donald Becker. + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + Support and updates available at + http://www.scyld.com/network/sundance.html + [link no longer provides useful info -jgarzik] + Archives of the mailing list are still available at + http://www.beowulf.org/pipermail/netdrivers/ + +*/ + +#define DRV_NAME "sundance" +#define DRV_VERSION "1.2" +#define DRV_RELDATE "11-Sep-2006" + + +/* The user-configurable values. + These may be modified when a driver module is loaded.*/ +static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ +/* Maximum number of multicast addresses to filter (vs. rx-all-multicast). + Typical is a 64 element hash table based on the Ethernet CRC. */ +static const int multicast_filter_limit = 32; + +/* Set the copy breakpoint for the copy-only-tiny-frames scheme. + Setting to > 1518 effectively disables this feature. + This chip can receive into offset buffers, so the Alpha does not + need a copy-align. */ +static int rx_copybreak; +static int flowctrl=1; + +/* media[] specifies the media type the NIC operates at. + autosense Autosensing active media. + 10mbps_hd 10Mbps half duplex. + 10mbps_fd 10Mbps full duplex. + 100mbps_hd 100Mbps half duplex. + 100mbps_fd 100Mbps full duplex. + 0 Autosensing active media. + 1 10Mbps half duplex. + 2 10Mbps full duplex. + 3 100Mbps half duplex. + 4 100Mbps full duplex. +*/ +#define MAX_UNITS 8 +static char *media[MAX_UNITS]; + + +/* Operational parameters that are set at compile time. */ + +/* Keep the ring sizes a power of two for compile efficiency. + The compiler will convert '%'<2^N> into a bit mask. + Making the Tx ring too large decreases the effectiveness of channel + bonding and packet priority, and more than 128 requires modifying the + Tx error recovery. + Large receive rings merely waste memory. */ +#define TX_RING_SIZE 32 +#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */ +#define RX_RING_SIZE 64 +#define RX_BUDGET 32 +#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc) +#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc) + +/* Operational parameters that usually are not changed. */ +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (4*HZ) +#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ + +/* Include files, designed to support most kernel versions 2.0.0 and later. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* Processor type for cache alignment. */ +#include +#include +#include +#include +#include +#include +#include + +/* These identify the driver base version and may not be removed. */ +static const char version[] = + KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE + " Written by Donald Becker\n"; + +MODULE_AUTHOR("Donald Becker "); +MODULE_DESCRIPTION("Sundance Alta Ethernet driver"); +MODULE_LICENSE("GPL"); + +module_param(debug, int, 0); +module_param(rx_copybreak, int, 0); +module_param_array(media, charp, NULL, 0); +module_param(flowctrl, int, 0); +MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)"); +MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames"); +MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]"); + +/* + Theory of Operation + +I. Board Compatibility + +This driver is designed for the Sundance Technologies "Alta" ST201 chip. + +II. Board-specific settings + +III. Driver operation + +IIIa. Ring buffers + +This driver uses two statically allocated fixed-size descriptor lists +formed into rings by a branch from the final descriptor to the beginning of +the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. +Some chips explicitly use only 2^N sized rings, while others use a +'next descriptor' pointer that the driver forms into rings. + +IIIb/c. Transmit/Receive Structure + +This driver uses a zero-copy receive and transmit scheme. +The driver allocates full frame size skbuffs for the Rx ring buffers at +open() time and passes the skb->data field to the chip as receive data +buffers. When an incoming frame is less than RX_COPYBREAK bytes long, +a fresh skbuff is allocated and the frame is copied to the new skbuff. +When the incoming frame is larger, the skbuff is passed directly up the +protocol stack. Buffers consumed this way are replaced by newly allocated +skbuffs in a later phase of receives. + +The RX_COPYBREAK value is chosen to trade-off the memory wasted by +using a full-sized skbuff for small frames vs. the copying costs of larger +frames. New boards are typically used in generously configured machines +and the underfilled buffers have negligible impact compared to the benefit of +a single allocation size, so the default value of zero results in never +copying packets. When copying is done, the cost is usually mitigated by using +a combined copy/checksum routine. Copying also preloads the cache, which is +most useful with small frames. + +A subtle aspect of the operation is that the IP header at offset 14 in an +ethernet frame isn't longword aligned for further processing. +Unaligned buffers are permitted by the Sundance hardware, so +frames are received into the skbuff at an offset of "+2", 16-byte aligning +the IP header. + +IIId. Synchronization + +The driver runs as two independent, single-threaded flows of control. One +is the send-packet routine, which enforces single-threaded use by the +dev->tbusy flag. The other thread is the interrupt handler, which is single +threaded by the hardware and interrupt handling software. + +The send packet thread has partial control over the Tx ring and 'dev->tbusy' +flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next +queue slot is empty, it clears the tbusy flag when finished otherwise it sets +the 'lp->tx_full' flag. + +The interrupt handler has exclusive control over the Rx ring and records stats +from the Tx ring. After reaping the stats, it marks the Tx queue entry as +empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it +clears both the tx_full and tbusy flags. + +IV. Notes + +IVb. References + +The Sundance ST201 datasheet, preliminary version. +The Kendin KS8723 datasheet, preliminary version. +The ICplus IP100 datasheet, preliminary version. +http://www.scyld.com/expert/100mbps.html +http://www.scyld.com/expert/NWay.html + +IVc. Errata + +*/ + +/* Work-around for Kendin chip bugs. */ +#ifndef CONFIG_SUNDANCE_MMIO +#define USE_IO_OPS 1 +#endif + +static const struct pci_device_id sundance_pci_tbl[] = { + { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 }, + { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 }, + { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 }, + { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 }, + { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, + { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, + { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 }, + { } +}; +MODULE_DEVICE_TABLE(pci, sundance_pci_tbl); + +enum { + netdev_io_size = 128 +}; + +struct pci_id_info { + const char *name; +}; +static const struct pci_id_info pci_id_tbl[] = { + {"D-Link DFE-550TX FAST Ethernet Adapter"}, + {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"}, + {"D-Link DFE-580TX 4 port Server Adapter"}, + {"D-Link DFE-530TXS FAST Ethernet Adapter"}, + {"D-Link DL10050-based FAST Ethernet Adapter"}, + {"Sundance Technology Alta"}, + {"IC Plus Corporation IP100A FAST Ethernet Adapter"}, + { } /* terminate list. */ +}; + +/* This driver was written to use PCI memory space, however x86-oriented + hardware often uses I/O space accesses. */ + +/* Offsets to the device registers. + Unlike software-only systems, device drivers interact with complex hardware. + It's not useful to define symbolic names for every register bit in the + device. The name can only partially document the semantics and make + the driver longer and more difficult to read. + In general, only the important configuration values or bits changed + multiple times should be defined symbolically. +*/ +enum alta_offsets { + DMACtrl = 0x00, + TxListPtr = 0x04, + TxDMABurstThresh = 0x08, + TxDMAUrgentThresh = 0x09, + TxDMAPollPeriod = 0x0a, + RxDMAStatus = 0x0c, + RxListPtr = 0x10, + DebugCtrl0 = 0x1a, + DebugCtrl1 = 0x1c, + RxDMABurstThresh = 0x14, + RxDMAUrgentThresh = 0x15, + RxDMAPollPeriod = 0x16, + LEDCtrl = 0x1a, + ASICCtrl = 0x30, + EEData = 0x34, + EECtrl = 0x36, + FlashAddr = 0x40, + FlashData = 0x44, + WakeEvent = 0x45, + TxStatus = 0x46, + TxFrameId = 0x47, + DownCounter = 0x18, + IntrClear = 0x4a, + IntrEnable = 0x4c, + IntrStatus = 0x4e, + MACCtrl0 = 0x50, + MACCtrl1 = 0x52, + StationAddr = 0x54, + MaxFrameSize = 0x5A, + RxMode = 0x5c, + MIICtrl = 0x5e, + MulticastFilter0 = 0x60, + MulticastFilter1 = 0x64, + RxOctetsLow = 0x68, + RxOctetsHigh = 0x6a, + TxOctetsLow = 0x6c, + TxOctetsHigh = 0x6e, + TxFramesOK = 0x70, + RxFramesOK = 0x72, + StatsCarrierError = 0x74, + StatsLateColl = 0x75, + StatsMultiColl = 0x76, + StatsOneColl = 0x77, + StatsTxDefer = 0x78, + RxMissed = 0x79, + StatsTxXSDefer = 0x7a, + StatsTxAbort = 0x7b, + StatsBcastTx = 0x7c, + StatsBcastRx = 0x7d, + StatsMcastTx = 0x7e, + StatsMcastRx = 0x7f, + /* Aliased and bogus values! */ + RxStatus = 0x0c, +}; + +#define ASIC_HI_WORD(x) ((x) + 2) + +enum ASICCtrl_HiWord_bit { + GlobalReset = 0x0001, + RxReset = 0x0002, + TxReset = 0x0004, + DMAReset = 0x0008, + FIFOReset = 0x0010, + NetworkReset = 0x0020, + HostReset = 0x0040, + ResetBusy = 0x0400, +}; + +/* Bits in the interrupt status/mask registers. */ +enum intr_status_bits { + IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008, + IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020, + IntrDrvRqst=0x0040, + StatsMax=0x0080, LinkChange=0x0100, + IntrTxDMADone=0x0200, IntrRxDMADone=0x0400, +}; + +/* Bits in the RxMode register. */ +enum rx_mode_bits { + AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08, + AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01, +}; +/* Bits in MACCtrl. */ +enum mac_ctrl0_bits { + EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40, + EnbFlowCtrl=0x100, EnbPassRxCRC=0x200, +}; +enum mac_ctrl1_bits { + StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080, + TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400, + RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000, +}; + +/* Bits in WakeEvent register. */ +enum wake_event_bits { + WakePktEnable = 0x01, + MagicPktEnable = 0x02, + LinkEventEnable = 0x04, + WolEnable = 0x80, +}; + +/* The Rx and Tx buffer descriptors. */ +/* Note that using only 32 bit fields simplifies conversion to big-endian + architectures. */ +struct netdev_desc { + __le32 next_desc; + __le32 status; + struct desc_frag { __le32 addr, length; } frag[1]; +}; + +/* Bits in netdev_desc.status */ +enum desc_status_bits { + DescOwn=0x8000, + DescEndPacket=0x4000, + DescEndRing=0x2000, + LastFrag=0x80000000, + DescIntrOnTx=0x8000, + DescIntrOnDMADone=0x80000000, + DisableAlign = 0x00000001, +}; + +#define PRIV_ALIGN 15 /* Required alignment mask */ +/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment + within the structure. */ +#define MII_CNT 4 +struct netdev_private { + /* Descriptor rings first for alignment. */ + struct netdev_desc *rx_ring; + struct netdev_desc *tx_ring; + struct sk_buff* rx_skbuff[RX_RING_SIZE]; + struct sk_buff* tx_skbuff[TX_RING_SIZE]; + dma_addr_t tx_ring_dma; + dma_addr_t rx_ring_dma; + struct timer_list timer; /* Media monitoring timer. */ + /* ethtool extra stats */ + struct { + u64 tx_multiple_collisions; + u64 tx_single_collisions; + u64 tx_late_collisions; + u64 tx_deferred; + u64 tx_deferred_excessive; + u64 tx_aborted; + u64 tx_bcasts; + u64 rx_bcasts; + u64 tx_mcasts; + u64 rx_mcasts; + } xstats; + /* Frequently used values: keep some adjacent for cache effect. */ + spinlock_t lock; + int msg_enable; + int chip_id; + unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ + unsigned int rx_buf_sz; /* Based on MTU+slack. */ + struct netdev_desc *last_tx; /* Last Tx descriptor used. */ + unsigned int cur_tx, dirty_tx; + /* These values are keep track of the transceiver/media in use. */ + unsigned int flowctrl:1; + unsigned int default_port:4; /* Last dev->if_port value. */ + unsigned int an_enable:1; + unsigned int speed; + unsigned int wol_enabled:1; /* Wake on LAN enabled */ + struct tasklet_struct rx_tasklet; + struct tasklet_struct tx_tasklet; + int budget; + int cur_task; + /* Multicast and receive mode. */ + spinlock_t mcastlock; /* SMP lock multicast updates. */ + u16 mcast_filter[4]; + /* MII transceiver section. */ + struct mii_if_info mii_if; + int mii_preamble_required; + unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */ + struct pci_dev *pci_dev; + void __iomem *base; + spinlock_t statlock; +}; + +/* The station address location in the EEPROM. */ +#define EEPROM_SA_OFFSET 0x10 +#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \ + IntrDrvRqst | IntrTxDone | StatsMax | \ + LinkChange) + +static int change_mtu(struct net_device *dev, int new_mtu); +static int eeprom_read(void __iomem *ioaddr, int location); +static int mdio_read(struct net_device *dev, int phy_id, int location); +static void mdio_write(struct net_device *dev, int phy_id, int location, int value); +static int mdio_wait_link(struct net_device *dev, int wait); +static int netdev_open(struct net_device *dev); +static void check_duplex(struct net_device *dev); +static void netdev_timer(unsigned long data); +static void tx_timeout(struct net_device *dev); +static void init_ring(struct net_device *dev); +static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); +static int reset_tx (struct net_device *dev); +static irqreturn_t intr_handler(int irq, void *dev_instance); +static void rx_poll(unsigned long data); +static void tx_poll(unsigned long data); +static void refill_rx (struct net_device *dev); +static void netdev_error(struct net_device *dev, int intr_status); +static void netdev_error(struct net_device *dev, int intr_status); +static void set_rx_mode(struct net_device *dev); +static int __set_mac_addr(struct net_device *dev); +static int sundance_set_mac_addr(struct net_device *dev, void *data); +static struct net_device_stats *get_stats(struct net_device *dev); +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int netdev_close(struct net_device *dev); +static const struct ethtool_ops ethtool_ops; + +static void sundance_reset(struct net_device *dev, unsigned long reset_cmd) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base + ASICCtrl; + int countdown; + + /* ST201 documentation states ASICCtrl is a 32bit register */ + iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr); + /* ST201 documentation states reset can take up to 1 ms */ + countdown = 10 + 1; + while (ioread32 (ioaddr) & (ResetBusy << 16)) { + if (--countdown == 0) { + printk(KERN_WARNING "%s : reset not completed !!\n", dev->name); + break; + } + udelay(100); + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void sundance_poll_controller(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + + disable_irq(np->pci_dev->irq); + intr_handler(np->pci_dev->irq, dev); + enable_irq(np->pci_dev->irq); +} +#endif + +static const struct net_device_ops netdev_ops = { + .ndo_open = netdev_open, + .ndo_stop = netdev_close, + .ndo_start_xmit = start_tx, + .ndo_get_stats = get_stats, + .ndo_set_rx_mode = set_rx_mode, + .ndo_do_ioctl = netdev_ioctl, + .ndo_tx_timeout = tx_timeout, + .ndo_change_mtu = change_mtu, + .ndo_set_mac_address = sundance_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = sundance_poll_controller, +#endif +}; + +static int sundance_probe1(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev; + struct netdev_private *np; + static int card_idx; + int chip_idx = ent->driver_data; + int irq; + int i; + void __iomem *ioaddr; + u16 mii_ctl; + void *ring_space; + dma_addr_t ring_dma; +#ifdef USE_IO_OPS + int bar = 0; +#else + int bar = 1; +#endif + int phy, phy_end, phy_idx = 0; + +/* when built into the kernel, we only print version if device is found */ +#ifndef MODULE + static int printed_version; + if (!printed_version++) + printk(version); +#endif + + if (pci_enable_device(pdev)) + return -EIO; + pci_set_master(pdev); + + irq = pdev->irq; + + dev = alloc_etherdev(sizeof(*np)); + if (!dev) + return -ENOMEM; + SET_NETDEV_DEV(dev, &pdev->dev); + + if (pci_request_regions(pdev, DRV_NAME)) + goto err_out_netdev; + + ioaddr = pci_iomap(pdev, bar, netdev_io_size); + if (!ioaddr) + goto err_out_res; + + for (i = 0; i < 3; i++) + ((__le16 *)dev->dev_addr)[i] = + cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET)); + + np = netdev_priv(dev); + np->base = ioaddr; + np->pci_dev = pdev; + np->chip_id = chip_idx; + np->msg_enable = (1 << debug) - 1; + spin_lock_init(&np->lock); + spin_lock_init(&np->statlock); + tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev); + tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev); + + ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, + &ring_dma, GFP_KERNEL); + if (!ring_space) + goto err_out_cleardev; + np->tx_ring = (struct netdev_desc *)ring_space; + np->tx_ring_dma = ring_dma; + + ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, + &ring_dma, GFP_KERNEL); + if (!ring_space) + goto err_out_unmap_tx; + np->rx_ring = (struct netdev_desc *)ring_space; + np->rx_ring_dma = ring_dma; + + np->mii_if.dev = dev; + np->mii_if.mdio_read = mdio_read; + np->mii_if.mdio_write = mdio_write; + np->mii_if.phy_id_mask = 0x1f; + np->mii_if.reg_num_mask = 0x1f; + + /* The chip-specific entries in the device structure. */ + dev->netdev_ops = &netdev_ops; + dev->ethtool_ops = ðtool_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + pci_set_drvdata(pdev, dev); + + i = register_netdev(dev); + if (i) + goto err_out_unmap_rx; + + printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n", + dev->name, pci_id_tbl[chip_idx].name, ioaddr, + dev->dev_addr, irq); + + np->phys[0] = 1; /* Default setting */ + np->mii_preamble_required++; + + /* + * It seems some phys doesn't deal well with address 0 being accessed + * first + */ + if (sundance_pci_tbl[np->chip_id].device == 0x0200) { + phy = 0; + phy_end = 31; + } else { + phy = 1; + phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */ + } + for (; phy <= phy_end && phy_idx < MII_CNT; phy++) { + int phyx = phy & 0x1f; + int mii_status = mdio_read(dev, phyx, MII_BMSR); + if (mii_status != 0xffff && mii_status != 0x0000) { + np->phys[phy_idx++] = phyx; + np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE); + if ((mii_status & 0x0040) == 0) + np->mii_preamble_required++; + printk(KERN_INFO "%s: MII PHY found at address %d, status " + "0x%4.4x advertising %4.4x.\n", + dev->name, phyx, mii_status, np->mii_if.advertising); + } + } + np->mii_preamble_required--; + + if (phy_idx == 0) { + printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n", + dev->name, ioread32(ioaddr + ASICCtrl)); + goto err_out_unregister; + } + + np->mii_if.phy_id = np->phys[0]; + + /* Parse override configuration */ + np->an_enable = 1; + if (card_idx < MAX_UNITS) { + if (media[card_idx] != NULL) { + np->an_enable = 0; + if (strcmp (media[card_idx], "100mbps_fd") == 0 || + strcmp (media[card_idx], "4") == 0) { + np->speed = 100; + np->mii_if.full_duplex = 1; + } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || + strcmp (media[card_idx], "3") == 0) { + np->speed = 100; + np->mii_if.full_duplex = 0; + } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || + strcmp (media[card_idx], "2") == 0) { + np->speed = 10; + np->mii_if.full_duplex = 1; + } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || + strcmp (media[card_idx], "1") == 0) { + np->speed = 10; + np->mii_if.full_duplex = 0; + } else { + np->an_enable = 1; + } + } + if (flowctrl == 1) + np->flowctrl = 1; + } + + /* Fibre PHY? */ + if (ioread32 (ioaddr + ASICCtrl) & 0x80) { + /* Default 100Mbps Full */ + if (np->an_enable) { + np->speed = 100; + np->mii_if.full_duplex = 1; + np->an_enable = 0; + } + } + /* Reset PHY */ + mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET); + mdelay (300); + /* If flow control enabled, we need to advertise it.*/ + if (np->flowctrl) + mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400); + mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART); + /* Force media type */ + if (!np->an_enable) { + mii_ctl = 0; + mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0; + mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0; + mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl); + printk (KERN_INFO "Override speed=%d, %s duplex\n", + np->speed, np->mii_if.full_duplex ? "Full" : "Half"); + + } + + /* Perhaps move the reset here? */ + /* Reset the chip to erase previous misconfiguration. */ + if (netif_msg_hw(np)) + printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl)); + sundance_reset(dev, 0x00ff << 16); + if (netif_msg_hw(np)) + printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl)); + + card_idx++; + return 0; + +err_out_unregister: + unregister_netdev(dev); +err_out_unmap_rx: + dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, + np->rx_ring, np->rx_ring_dma); +err_out_unmap_tx: + dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, + np->tx_ring, np->tx_ring_dma); +err_out_cleardev: + pci_iounmap(pdev, ioaddr); +err_out_res: + pci_release_regions(pdev); +err_out_netdev: + free_netdev (dev); + return -ENODEV; +} + +static int change_mtu(struct net_device *dev, int new_mtu) +{ + if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */ + return -EINVAL; + if (netif_running(dev)) + return -EBUSY; + dev->mtu = new_mtu; + return 0; +} + +#define eeprom_delay(ee_addr) ioread32(ee_addr) +/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */ +static int eeprom_read(void __iomem *ioaddr, int location) +{ + int boguscnt = 10000; /* Typical 1900 ticks. */ + iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl); + do { + eeprom_delay(ioaddr + EECtrl); + if (! (ioread16(ioaddr + EECtrl) & 0x8000)) { + return ioread16(ioaddr + EEData); + } + } while (--boguscnt > 0); + return 0; +} + +/* MII transceiver control section. + Read and write the MII registers using software-generated serial + MDIO protocol. See the MII specifications or DP83840A data sheet + for details. + + The maximum data clock rate is 2.5 Mhz. The minimum timing is usually + met by back-to-back 33Mhz PCI cycles. */ +#define mdio_delay() ioread8(mdio_addr) + +enum mii_reg_bits { + MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004, +}; +#define MDIO_EnbIn (0) +#define MDIO_WRITE0 (MDIO_EnbOutput) +#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput) + +/* Generate the preamble required for initial synchronization and + a few older transceivers. */ +static void mdio_sync(void __iomem *mdio_addr) +{ + int bits = 32; + + /* Establish sync by sending at least 32 logic ones. */ + while (--bits >= 0) { + iowrite8(MDIO_WRITE1, mdio_addr); + mdio_delay(); + iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr); + mdio_delay(); + } +} + +static int mdio_read(struct net_device *dev, int phy_id, int location) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *mdio_addr = np->base + MIICtrl; + int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; + int i, retval = 0; + + if (np->mii_preamble_required) + mdio_sync(mdio_addr); + + /* Shift the read command bits out. */ + for (i = 15; i >= 0; i--) { + int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; + + iowrite8(dataval, mdio_addr); + mdio_delay(); + iowrite8(dataval | MDIO_ShiftClk, mdio_addr); + mdio_delay(); + } + /* Read the two transition, 16 data, and wire-idle bits. */ + for (i = 19; i > 0; i--) { + iowrite8(MDIO_EnbIn, mdio_addr); + mdio_delay(); + retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0); + iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); + mdio_delay(); + } + return (retval>>1) & 0xffff; +} + +static void mdio_write(struct net_device *dev, int phy_id, int location, int value) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *mdio_addr = np->base + MIICtrl; + int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value; + int i; + + if (np->mii_preamble_required) + mdio_sync(mdio_addr); + + /* Shift the command bits out. */ + for (i = 31; i >= 0; i--) { + int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; + + iowrite8(dataval, mdio_addr); + mdio_delay(); + iowrite8(dataval | MDIO_ShiftClk, mdio_addr); + mdio_delay(); + } + /* Clear out extra bits. */ + for (i = 2; i > 0; i--) { + iowrite8(MDIO_EnbIn, mdio_addr); + mdio_delay(); + iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); + mdio_delay(); + } +} + +static int mdio_wait_link(struct net_device *dev, int wait) +{ + int bmsr; + int phy_id; + struct netdev_private *np; + + np = netdev_priv(dev); + phy_id = np->phys[0]; + + do { + bmsr = mdio_read(dev, phy_id, MII_BMSR); + if (bmsr & 0x0004) + return 0; + mdelay(1); + } while (--wait > 0); + return -1; +} + +static int netdev_open(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + const int irq = np->pci_dev->irq; + unsigned long flags; + int i; + + sundance_reset(dev, 0x00ff << 16); + + i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev); + if (i) + return i; + + if (netif_msg_ifup(np)) + printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq); + + init_ring(dev); + + iowrite32(np->rx_ring_dma, ioaddr + RxListPtr); + /* The Tx list pointer is written as packets are queued. */ + + /* Initialize other registers. */ + __set_mac_addr(dev); +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) + iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize); +#else + iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize); +#endif + if (dev->mtu > 2047) + iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl); + + /* Configure the PCI bus bursts and FIFO thresholds. */ + + if (dev->if_port == 0) + dev->if_port = np->default_port; + + spin_lock_init(&np->mcastlock); + + set_rx_mode(dev); + iowrite16(0, ioaddr + IntrEnable); + iowrite16(0, ioaddr + DownCounter); + /* Set the chip to poll every N*320nsec. */ + iowrite8(100, ioaddr + RxDMAPollPeriod); + iowrite8(127, ioaddr + TxDMAPollPeriod); + /* Fix DFE-580TX packet drop issue */ + if (np->pci_dev->revision >= 0x14) + iowrite8(0x01, ioaddr + DebugCtrl1); + netif_start_queue(dev); + + spin_lock_irqsave(&np->lock, flags); + reset_tx(dev); + spin_unlock_irqrestore(&np->lock, flags); + + iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); + + /* Disable Wol */ + iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent); + np->wol_enabled = 0; + + if (netif_msg_ifup(np)) + printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x " + "MAC Control %x, %4.4x %4.4x.\n", + dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus), + ioread32(ioaddr + MACCtrl0), + ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0)); + + /* Set the timer to check for link beat. */ + init_timer(&np->timer); + np->timer.expires = jiffies + 3*HZ; + np->timer.data = (unsigned long)dev; + np->timer.function = netdev_timer; /* timer handler */ + add_timer(&np->timer); + + /* Enable interrupts by setting the interrupt mask. */ + iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); + + return 0; +} + +static void check_duplex(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); + int negotiated = mii_lpa & np->mii_if.advertising; + int duplex; + + /* Force media */ + if (!np->an_enable || mii_lpa == 0xffff) { + if (np->mii_if.full_duplex) + iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex, + ioaddr + MACCtrl0); + return; + } + + /* Autonegotiation */ + duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040; + if (np->mii_if.full_duplex != duplex) { + np->mii_if.full_duplex = duplex; + if (netif_msg_link(np)) + printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d " + "negotiated capability %4.4x.\n", dev->name, + duplex ? "full" : "half", np->phys[0], negotiated); + iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0); + } +} + +static void netdev_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + int next_tick = 10*HZ; + + if (netif_msg_timer(np)) { + printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, " + "Tx %x Rx %x.\n", + dev->name, ioread16(ioaddr + IntrEnable), + ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus)); + } + check_duplex(dev); + np->timer.expires = jiffies + next_tick; + add_timer(&np->timer); +} + +static void tx_timeout(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + unsigned long flag; + + netif_stop_queue(dev); + tasklet_disable(&np->tx_tasklet); + iowrite16(0, ioaddr + IntrEnable); + printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x " + "TxFrameId %2.2x," + " resetting...\n", dev->name, ioread8(ioaddr + TxStatus), + ioread8(ioaddr + TxFrameId)); + + { + int i; + for (i=0; itx_ring_dma + i*sizeof(*np->tx_ring)), + le32_to_cpu(np->tx_ring[i].next_desc), + le32_to_cpu(np->tx_ring[i].status), + (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff, + le32_to_cpu(np->tx_ring[i].frag[0].addr), + le32_to_cpu(np->tx_ring[i].frag[0].length)); + } + printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n", + ioread32(np->base + TxListPtr), + netif_queue_stopped(dev)); + printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n", + np->cur_tx, np->cur_tx % TX_RING_SIZE, + np->dirty_tx, np->dirty_tx % TX_RING_SIZE); + printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx); + printk(KERN_DEBUG "cur_task=%d\n", np->cur_task); + } + spin_lock_irqsave(&np->lock, flag); + + /* Stop and restart the chip's Tx processes . */ + reset_tx(dev); + spin_unlock_irqrestore(&np->lock, flag); + + dev->if_port = 0; + + dev->trans_start = jiffies; /* prevent tx timeout */ + dev->stats.tx_errors++; + if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { + netif_wake_queue(dev); + } + iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); + tasklet_enable(&np->tx_tasklet); +} + + +/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ +static void init_ring(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + int i; + + np->cur_rx = np->cur_tx = 0; + np->dirty_rx = np->dirty_tx = 0; + np->cur_task = 0; + + np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16); + + /* Initialize all Rx descriptors. */ + for (i = 0; i < RX_RING_SIZE; i++) { + np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma + + ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring)); + np->rx_ring[i].status = 0; + np->rx_ring[i].frag[0].length = 0; + np->rx_skbuff[i] = NULL; + } + + /* Fill in the Rx buffers. Handle allocation failure gracefully. */ + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb = + netdev_alloc_skb(dev, np->rx_buf_sz + 2); + np->rx_skbuff[i] = skb; + if (skb == NULL) + break; + skb_reserve(skb, 2); /* 16 byte align the IP header. */ + np->rx_ring[i].frag[0].addr = cpu_to_le32( + dma_map_single(&np->pci_dev->dev, skb->data, + np->rx_buf_sz, DMA_FROM_DEVICE)); + if (dma_mapping_error(&np->pci_dev->dev, + np->rx_ring[i].frag[0].addr)) { + dev_kfree_skb(skb); + np->rx_skbuff[i] = NULL; + break; + } + np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); + } + np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); + + for (i = 0; i < TX_RING_SIZE; i++) { + np->tx_skbuff[i] = NULL; + np->tx_ring[i].status = 0; + } +} + +static void tx_poll (unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct netdev_private *np = netdev_priv(dev); + unsigned head = np->cur_task % TX_RING_SIZE; + struct netdev_desc *txdesc = + &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE]; + + /* Chain the next pointer */ + for (; np->cur_tx - np->cur_task > 0; np->cur_task++) { + int entry = np->cur_task % TX_RING_SIZE; + txdesc = &np->tx_ring[entry]; + if (np->last_tx) { + np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma + + entry*sizeof(struct netdev_desc)); + } + np->last_tx = txdesc; + } + /* Indicate the latest descriptor of tx ring */ + txdesc->status |= cpu_to_le32(DescIntrOnTx); + + if (ioread32 (np->base + TxListPtr) == 0) + iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc), + np->base + TxListPtr); +} + +static netdev_tx_t +start_tx (struct sk_buff *skb, struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + struct netdev_desc *txdesc; + unsigned entry; + + /* Calculate the next Tx descriptor entry. */ + entry = np->cur_tx % TX_RING_SIZE; + np->tx_skbuff[entry] = skb; + txdesc = &np->tx_ring[entry]; + + txdesc->next_desc = 0; + txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign); + txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev, + skb->data, skb->len, DMA_TO_DEVICE)); + if (dma_mapping_error(&np->pci_dev->dev, + txdesc->frag[0].addr)) + goto drop_frame; + txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag); + + /* Increment cur_tx before tasklet_schedule() */ + np->cur_tx++; + mb(); + /* Schedule a tx_poll() task */ + tasklet_schedule(&np->tx_tasklet); + + /* On some architectures: explicitly flush cache lines here. */ + if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 && + !netif_queue_stopped(dev)) { + /* do nothing */ + } else { + netif_stop_queue (dev); + } + if (netif_msg_tx_queued(np)) { + printk (KERN_DEBUG + "%s: Transmit frame #%d queued in slot %d.\n", + dev->name, np->cur_tx, entry); + } + return NETDEV_TX_OK; + +drop_frame: + dev_kfree_skb_any(skb); + np->tx_skbuff[entry] = NULL; + dev->stats.tx_dropped++; + return NETDEV_TX_OK; +} + +/* Reset hardware tx and free all of tx buffers */ +static int +reset_tx (struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + struct sk_buff *skb; + int i; + + /* Reset tx logic, TxListPtr will be cleaned */ + iowrite16 (TxDisable, ioaddr + MACCtrl1); + sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16); + + /* free all tx skbuff */ + for (i = 0; i < TX_RING_SIZE; i++) { + np->tx_ring[i].next_desc = 0; + + skb = np->tx_skbuff[i]; + if (skb) { + dma_unmap_single(&np->pci_dev->dev, + le32_to_cpu(np->tx_ring[i].frag[0].addr), + skb->len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + np->tx_skbuff[i] = NULL; + dev->stats.tx_dropped++; + } + } + np->cur_tx = np->dirty_tx = 0; + np->cur_task = 0; + + np->last_tx = NULL; + iowrite8(127, ioaddr + TxDMAPollPeriod); + + iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); + return 0; +} + +/* The interrupt handler cleans up after the Tx thread, + and schedule a Rx thread work */ +static irqreturn_t intr_handler(int irq, void *dev_instance) +{ + struct net_device *dev = (struct net_device *)dev_instance; + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + int hw_frame_id; + int tx_cnt; + int tx_status; + int handled = 0; + int i; + + + do { + int intr_status = ioread16(ioaddr + IntrStatus); + iowrite16(intr_status, ioaddr + IntrStatus); + + if (netif_msg_intr(np)) + printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", + dev->name, intr_status); + + if (!(intr_status & DEFAULT_INTR)) + break; + + handled = 1; + + if (intr_status & (IntrRxDMADone)) { + iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone), + ioaddr + IntrEnable); + if (np->budget < 0) + np->budget = RX_BUDGET; + tasklet_schedule(&np->rx_tasklet); + } + if (intr_status & (IntrTxDone | IntrDrvRqst)) { + tx_status = ioread16 (ioaddr + TxStatus); + for (tx_cnt=32; tx_status & 0x80; --tx_cnt) { + if (netif_msg_tx_done(np)) + printk + ("%s: Transmit status is %2.2x.\n", + dev->name, tx_status); + if (tx_status & 0x1e) { + if (netif_msg_tx_err(np)) + printk("%s: Transmit error status %4.4x.\n", + dev->name, tx_status); + dev->stats.tx_errors++; + if (tx_status & 0x10) + dev->stats.tx_fifo_errors++; + if (tx_status & 0x08) + dev->stats.collisions++; + if (tx_status & 0x04) + dev->stats.tx_fifo_errors++; + if (tx_status & 0x02) + dev->stats.tx_window_errors++; + + /* + ** This reset has been verified on + ** DFE-580TX boards ! phdm@macqel.be. + */ + if (tx_status & 0x10) { /* TxUnderrun */ + /* Restart Tx FIFO and transmitter */ + sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16); + /* No need to reset the Tx pointer here */ + } + /* Restart the Tx. Need to make sure tx enabled */ + i = 10; + do { + iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1); + if (ioread16(ioaddr + MACCtrl1) & TxEnabled) + break; + mdelay(1); + } while (--i); + } + /* Yup, this is a documentation bug. It cost me *hours*. */ + iowrite16 (0, ioaddr + TxStatus); + if (tx_cnt < 0) { + iowrite32(5000, ioaddr + DownCounter); + break; + } + tx_status = ioread16 (ioaddr + TxStatus); + } + hw_frame_id = (tx_status >> 8) & 0xff; + } else { + hw_frame_id = ioread8(ioaddr + TxFrameId); + } + + if (np->pci_dev->revision >= 0x14) { + spin_lock(&np->lock); + for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { + int entry = np->dirty_tx % TX_RING_SIZE; + struct sk_buff *skb; + int sw_frame_id; + sw_frame_id = (le32_to_cpu( + np->tx_ring[entry].status) >> 2) & 0xff; + if (sw_frame_id == hw_frame_id && + !(le32_to_cpu(np->tx_ring[entry].status) + & 0x00010000)) + break; + if (sw_frame_id == (hw_frame_id + 1) % + TX_RING_SIZE) + break; + skb = np->tx_skbuff[entry]; + /* Free the original skb. */ + dma_unmap_single(&np->pci_dev->dev, + le32_to_cpu(np->tx_ring[entry].frag[0].addr), + skb->len, DMA_TO_DEVICE); + dev_kfree_skb_irq (np->tx_skbuff[entry]); + np->tx_skbuff[entry] = NULL; + np->tx_ring[entry].frag[0].addr = 0; + np->tx_ring[entry].frag[0].length = 0; + } + spin_unlock(&np->lock); + } else { + spin_lock(&np->lock); + for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { + int entry = np->dirty_tx % TX_RING_SIZE; + struct sk_buff *skb; + if (!(le32_to_cpu(np->tx_ring[entry].status) + & 0x00010000)) + break; + skb = np->tx_skbuff[entry]; + /* Free the original skb. */ + dma_unmap_single(&np->pci_dev->dev, + le32_to_cpu(np->tx_ring[entry].frag[0].addr), + skb->len, DMA_TO_DEVICE); + dev_kfree_skb_irq (np->tx_skbuff[entry]); + np->tx_skbuff[entry] = NULL; + np->tx_ring[entry].frag[0].addr = 0; + np->tx_ring[entry].frag[0].length = 0; + } + spin_unlock(&np->lock); + } + + if (netif_queue_stopped(dev) && + np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { + /* The ring is no longer full, clear busy flag. */ + netif_wake_queue (dev); + } + /* Abnormal error summary/uncommon events handlers. */ + if (intr_status & (IntrPCIErr | LinkChange | StatsMax)) + netdev_error(dev, intr_status); + } while (0); + if (netif_msg_intr(np)) + printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", + dev->name, ioread16(ioaddr + IntrStatus)); + return IRQ_RETVAL(handled); +} + +static void rx_poll(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct netdev_private *np = netdev_priv(dev); + int entry = np->cur_rx % RX_RING_SIZE; + int boguscnt = np->budget; + void __iomem *ioaddr = np->base; + int received = 0; + + /* If EOP is set on the next entry, it's a new packet. Send it up. */ + while (1) { + struct netdev_desc *desc = &(np->rx_ring[entry]); + u32 frame_status = le32_to_cpu(desc->status); + int pkt_len; + + if (--boguscnt < 0) { + goto not_done; + } + if (!(frame_status & DescOwn)) + break; + pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */ + if (netif_msg_rx_status(np)) + printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", + frame_status); + if (frame_status & 0x001f4000) { + /* There was a error. */ + if (netif_msg_rx_err(np)) + printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n", + frame_status); + dev->stats.rx_errors++; + if (frame_status & 0x00100000) + dev->stats.rx_length_errors++; + if (frame_status & 0x00010000) + dev->stats.rx_fifo_errors++; + if (frame_status & 0x00060000) + dev->stats.rx_frame_errors++; + if (frame_status & 0x00080000) + dev->stats.rx_crc_errors++; + if (frame_status & 0x00100000) { + printk(KERN_WARNING "%s: Oversized Ethernet frame," + " status %8.8x.\n", + dev->name, frame_status); + } + } else { + struct sk_buff *skb; +#ifndef final_version + if (netif_msg_rx_status(np)) + printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" + ", bogus_cnt %d.\n", + pkt_len, boguscnt); +#endif + /* Check if the packet is long enough to accept without copying + to a minimally-sized skbuff. */ + if (pkt_len < rx_copybreak && + (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { + skb_reserve(skb, 2); /* 16 byte align the IP header */ + dma_sync_single_for_cpu(&np->pci_dev->dev, + le32_to_cpu(desc->frag[0].addr), + np->rx_buf_sz, DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); + dma_sync_single_for_device(&np->pci_dev->dev, + le32_to_cpu(desc->frag[0].addr), + np->rx_buf_sz, DMA_FROM_DEVICE); + skb_put(skb, pkt_len); + } else { + dma_unmap_single(&np->pci_dev->dev, + le32_to_cpu(desc->frag[0].addr), + np->rx_buf_sz, DMA_FROM_DEVICE); + skb_put(skb = np->rx_skbuff[entry], pkt_len); + np->rx_skbuff[entry] = NULL; + } + skb->protocol = eth_type_trans(skb, dev); + /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */ + netif_rx(skb); + } + entry = (entry + 1) % RX_RING_SIZE; + received++; + } + np->cur_rx = entry; + refill_rx (dev); + np->budget -= received; + iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); + return; + +not_done: + np->cur_rx = entry; + refill_rx (dev); + if (!received) + received = 1; + np->budget -= received; + if (np->budget <= 0) + np->budget = RX_BUDGET; + tasklet_schedule(&np->rx_tasklet); +} + +static void refill_rx (struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + int entry; + int cnt = 0; + + /* Refill the Rx ring buffers. */ + for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0; + np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) { + struct sk_buff *skb; + entry = np->dirty_rx % RX_RING_SIZE; + if (np->rx_skbuff[entry] == NULL) { + skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2); + np->rx_skbuff[entry] = skb; + if (skb == NULL) + break; /* Better luck next round. */ + skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + np->rx_ring[entry].frag[0].addr = cpu_to_le32( + dma_map_single(&np->pci_dev->dev, skb->data, + np->rx_buf_sz, DMA_FROM_DEVICE)); + if (dma_mapping_error(&np->pci_dev->dev, + np->rx_ring[entry].frag[0].addr)) { + dev_kfree_skb_irq(skb); + np->rx_skbuff[entry] = NULL; + break; + } + } + /* Perhaps we need not reset this field. */ + np->rx_ring[entry].frag[0].length = + cpu_to_le32(np->rx_buf_sz | LastFrag); + np->rx_ring[entry].status = 0; + cnt++; + } +} +static void netdev_error(struct net_device *dev, int intr_status) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + u16 mii_ctl, mii_advertise, mii_lpa; + int speed; + + if (intr_status & LinkChange) { + if (mdio_wait_link(dev, 10) == 0) { + printk(KERN_INFO "%s: Link up\n", dev->name); + if (np->an_enable) { + mii_advertise = mdio_read(dev, np->phys[0], + MII_ADVERTISE); + mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); + mii_advertise &= mii_lpa; + printk(KERN_INFO "%s: Link changed: ", + dev->name); + if (mii_advertise & ADVERTISE_100FULL) { + np->speed = 100; + printk("100Mbps, full duplex\n"); + } else if (mii_advertise & ADVERTISE_100HALF) { + np->speed = 100; + printk("100Mbps, half duplex\n"); + } else if (mii_advertise & ADVERTISE_10FULL) { + np->speed = 10; + printk("10Mbps, full duplex\n"); + } else if (mii_advertise & ADVERTISE_10HALF) { + np->speed = 10; + printk("10Mbps, half duplex\n"); + } else + printk("\n"); + + } else { + mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR); + speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10; + np->speed = speed; + printk(KERN_INFO "%s: Link changed: %dMbps ,", + dev->name, speed); + printk("%s duplex.\n", + (mii_ctl & BMCR_FULLDPLX) ? + "full" : "half"); + } + check_duplex(dev); + if (np->flowctrl && np->mii_if.full_duplex) { + iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200, + ioaddr + MulticastFilter1+2); + iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl, + ioaddr + MACCtrl0); + } + netif_carrier_on(dev); + } else { + printk(KERN_INFO "%s: Link down\n", dev->name); + netif_carrier_off(dev); + } + } + if (intr_status & StatsMax) { + get_stats(dev); + } + if (intr_status & IntrPCIErr) { + printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n", + dev->name, intr_status); + /* We must do a global reset of DMA to continue. */ + } +} + +static struct net_device_stats *get_stats(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + unsigned long flags; + u8 late_coll, single_coll, mult_coll; + + spin_lock_irqsave(&np->statlock, flags); + /* The chip only need report frame silently dropped. */ + dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed); + dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK); + dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK); + dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError); + + mult_coll = ioread8(ioaddr + StatsMultiColl); + np->xstats.tx_multiple_collisions += mult_coll; + single_coll = ioread8(ioaddr + StatsOneColl); + np->xstats.tx_single_collisions += single_coll; + late_coll = ioread8(ioaddr + StatsLateColl); + np->xstats.tx_late_collisions += late_coll; + dev->stats.collisions += mult_coll + + single_coll + + late_coll; + + np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer); + np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer); + np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort); + np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx); + np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx); + np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx); + np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx); + + dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow); + dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16; + dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow); + dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16; + + spin_unlock_irqrestore(&np->statlock, flags); + + return &dev->stats; +} + +static void set_rx_mode(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + u16 mc_filter[4]; /* Multicast hash filter */ + u32 rx_mode; + int i; + + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + memset(mc_filter, 0xff, sizeof(mc_filter)); + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to match, or accept all multicasts. */ + memset(mc_filter, 0xff, sizeof(mc_filter)); + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; + } else if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + int bit; + int index; + int crc; + memset (mc_filter, 0, sizeof (mc_filter)); + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(ETH_ALEN, ha->addr); + for (index=0, bit=0; bit < 6; bit++, crc <<= 1) + if (crc & 0x80000000) index |= 1 << bit; + mc_filter[index/16] |= (1 << (index % 16)); + } + rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys; + } else { + iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode); + return; + } + if (np->mii_if.full_duplex && np->flowctrl) + mc_filter[3] |= 0x0200; + + for (i = 0; i < 4; i++) + iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2); + iowrite8(rx_mode, ioaddr + RxMode); +} + +static int __set_mac_addr(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + u16 addr16; + + addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8)); + iowrite16(addr16, np->base + StationAddr); + addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8)); + iowrite16(addr16, np->base + StationAddr+2); + addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8)); + iowrite16(addr16, np->base + StationAddr+4); + return 0; +} + +/* Invoked with rtnl_lock held */ +static int sundance_set_mac_addr(struct net_device *dev, void *data) +{ + const struct sockaddr *addr = data; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + __set_mac_addr(dev); + + return 0; +} + +static const struct { + const char name[ETH_GSTRING_LEN]; +} sundance_stats[] = { + { "tx_multiple_collisions" }, + { "tx_single_collisions" }, + { "tx_late_collisions" }, + { "tx_deferred" }, + { "tx_deferred_excessive" }, + { "tx_aborted" }, + { "tx_bcasts" }, + { "rx_bcasts" }, + { "tx_mcasts" }, + { "rx_mcasts" }, +}; + +static int check_if_running(struct net_device *dev) +{ + if (!netif_running(dev)) + return -EINVAL; + return 0; +} + +static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct netdev_private *np = netdev_priv(dev); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); +} + +static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct netdev_private *np = netdev_priv(dev); + spin_lock_irq(&np->lock); + mii_ethtool_gset(&np->mii_if, ecmd); + spin_unlock_irq(&np->lock); + return 0; +} + +static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct netdev_private *np = netdev_priv(dev); + int res; + spin_lock_irq(&np->lock); + res = mii_ethtool_sset(&np->mii_if, ecmd); + spin_unlock_irq(&np->lock); + return res; +} + +static int nway_reset(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + return mii_nway_restart(&np->mii_if); +} + +static u32 get_link(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + return mii_link_ok(&np->mii_if); +} + +static u32 get_msglevel(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + return np->msg_enable; +} + +static void set_msglevel(struct net_device *dev, u32 val) +{ + struct netdev_private *np = netdev_priv(dev); + np->msg_enable = val; +} + +static void get_strings(struct net_device *dev, u32 stringset, + u8 *data) +{ + if (stringset == ETH_SS_STATS) + memcpy(data, sundance_stats, sizeof(sundance_stats)); +} + +static int get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(sundance_stats); + default: + return -EOPNOTSUPP; + } +} + +static void get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct netdev_private *np = netdev_priv(dev); + int i = 0; + + get_stats(dev); + data[i++] = np->xstats.tx_multiple_collisions; + data[i++] = np->xstats.tx_single_collisions; + data[i++] = np->xstats.tx_late_collisions; + data[i++] = np->xstats.tx_deferred; + data[i++] = np->xstats.tx_deferred_excessive; + data[i++] = np->xstats.tx_aborted; + data[i++] = np->xstats.tx_bcasts; + data[i++] = np->xstats.rx_bcasts; + data[i++] = np->xstats.tx_mcasts; + data[i++] = np->xstats.rx_mcasts; +} + +#ifdef CONFIG_PM + +static void sundance_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + u8 wol_bits; + + wol->wolopts = 0; + + wol->supported = (WAKE_PHY | WAKE_MAGIC); + if (!np->wol_enabled) + return; + + wol_bits = ioread8(ioaddr + WakeEvent); + if (wol_bits & MagicPktEnable) + wol->wolopts |= WAKE_MAGIC; + if (wol_bits & LinkEventEnable) + wol->wolopts |= WAKE_PHY; +} + +static int sundance_set_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + u8 wol_bits; + + if (!device_can_wakeup(&np->pci_dev->dev)) + return -EOPNOTSUPP; + + np->wol_enabled = !!(wol->wolopts); + wol_bits = ioread8(ioaddr + WakeEvent); + wol_bits &= ~(WakePktEnable | MagicPktEnable | + LinkEventEnable | WolEnable); + + if (np->wol_enabled) { + if (wol->wolopts & WAKE_MAGIC) + wol_bits |= (MagicPktEnable | WolEnable); + if (wol->wolopts & WAKE_PHY) + wol_bits |= (LinkEventEnable | WolEnable); + } + iowrite8(wol_bits, ioaddr + WakeEvent); + + device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled); + + return 0; +} +#else +#define sundance_get_wol NULL +#define sundance_set_wol NULL +#endif /* CONFIG_PM */ + +static const struct ethtool_ops ethtool_ops = { + .begin = check_if_running, + .get_drvinfo = get_drvinfo, + .get_settings = get_settings, + .set_settings = set_settings, + .nway_reset = nway_reset, + .get_link = get_link, + .get_wol = sundance_get_wol, + .set_wol = sundance_set_wol, + .get_msglevel = get_msglevel, + .set_msglevel = set_msglevel, + .get_strings = get_strings, + .get_sset_count = get_sset_count, + .get_ethtool_stats = get_ethtool_stats, +}; + +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct netdev_private *np = netdev_priv(dev); + int rc; + + if (!netif_running(dev)) + return -EINVAL; + + spin_lock_irq(&np->lock); + rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL); + spin_unlock_irq(&np->lock); + + return rc; +} + +static int netdev_close(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + struct sk_buff *skb; + int i; + + /* Wait and kill tasklet */ + tasklet_kill(&np->rx_tasklet); + tasklet_kill(&np->tx_tasklet); + np->cur_tx = 0; + np->dirty_tx = 0; + np->cur_task = 0; + np->last_tx = NULL; + + netif_stop_queue(dev); + + if (netif_msg_ifdown(np)) { + printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x " + "Rx %4.4x Int %2.2x.\n", + dev->name, ioread8(ioaddr + TxStatus), + ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus)); + printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", + dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); + } + + /* Disable interrupts by clearing the interrupt mask. */ + iowrite16(0x0000, ioaddr + IntrEnable); + + /* Disable Rx and Tx DMA for safely release resource */ + iowrite32(0x500, ioaddr + DMACtrl); + + /* Stop the chip's Tx and Rx processes. */ + iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1); + + for (i = 2000; i > 0; i--) { + if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0) + break; + mdelay(1); + } + + iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset, + ioaddr + ASIC_HI_WORD(ASICCtrl)); + + for (i = 2000; i > 0; i--) { + if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0) + break; + mdelay(1); + } + +#ifdef __i386__ + if (netif_msg_hw(np)) { + printk(KERN_DEBUG " Tx ring at %8.8x:\n", + (int)(np->tx_ring_dma)); + for (i = 0; i < TX_RING_SIZE; i++) + printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n", + i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr, + np->tx_ring[i].frag[0].length); + printk(KERN_DEBUG " Rx ring %8.8x:\n", + (int)(np->rx_ring_dma)); + for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) { + printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n", + i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr, + np->rx_ring[i].frag[0].length); + } + } +#endif /* __i386__ debugging only */ + + free_irq(np->pci_dev->irq, dev); + + del_timer_sync(&np->timer); + + /* Free all the skbuffs in the Rx queue. */ + for (i = 0; i < RX_RING_SIZE; i++) { + np->rx_ring[i].status = 0; + skb = np->rx_skbuff[i]; + if (skb) { + dma_unmap_single(&np->pci_dev->dev, + le32_to_cpu(np->rx_ring[i].frag[0].addr), + np->rx_buf_sz, DMA_FROM_DEVICE); + dev_kfree_skb(skb); + np->rx_skbuff[i] = NULL; + } + np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */ + } + for (i = 0; i < TX_RING_SIZE; i++) { + np->tx_ring[i].next_desc = 0; + skb = np->tx_skbuff[i]; + if (skb) { + dma_unmap_single(&np->pci_dev->dev, + le32_to_cpu(np->tx_ring[i].frag[0].addr), + skb->len, DMA_TO_DEVICE); + dev_kfree_skb(skb); + np->tx_skbuff[i] = NULL; + } + } + + return 0; +} + +static void sundance_remove1(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (dev) { + struct netdev_private *np = netdev_priv(dev); + unregister_netdev(dev); + dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, + np->rx_ring, np->rx_ring_dma); + dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, + np->tx_ring, np->tx_ring_dma); + pci_iounmap(pdev, np->base); + pci_release_regions(pdev); + free_netdev(dev); + } +} + +#ifdef CONFIG_PM + +static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pci_dev); + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + + if (!netif_running(dev)) + return 0; + + netdev_close(dev); + netif_device_detach(dev); + + pci_save_state(pci_dev); + if (np->wol_enabled) { + iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode); + iowrite16(RxEnable, ioaddr + MACCtrl1); + } + pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state), + np->wol_enabled); + pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); + + return 0; +} + +static int sundance_resume(struct pci_dev *pci_dev) +{ + struct net_device *dev = pci_get_drvdata(pci_dev); + int err = 0; + + if (!netif_running(dev)) + return 0; + + pci_set_power_state(pci_dev, PCI_D0); + pci_restore_state(pci_dev); + pci_enable_wake(pci_dev, PCI_D0, 0); + + err = netdev_open(dev); + if (err) { + printk(KERN_ERR "%s: Can't resume interface!\n", + dev->name); + goto out; + } + + netif_device_attach(dev); + +out: + return err; +} + +#endif /* CONFIG_PM */ + +static struct pci_driver sundance_driver = { + .name = DRV_NAME, + .id_table = sundance_pci_tbl, + .probe = sundance_probe1, + .remove = sundance_remove1, +#ifdef CONFIG_PM + .suspend = sundance_suspend, + .resume = sundance_resume, +#endif /* CONFIG_PM */ +}; + +static int __init sundance_init(void) +{ +/* when a module, this is printed whether or not devices are found in probe */ +#ifdef MODULE + printk(version); +#endif + return pci_register_driver(&sundance_driver); +} + +static void __exit sundance_exit(void) +{ + pci_unregister_driver(&sundance_driver); +} + +module_init(sundance_init); +module_exit(sundance_exit); + + diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c new file mode 100644 index 000000000..13d00a38a --- /dev/null +++ b/drivers/net/ethernet/dnet.c @@ -0,0 +1,945 @@ +/* + * Dave DNET Ethernet Controller driver + * + * Copyright (C) 2008 Dave S.r.l. + * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dnet.h" + +#undef DEBUG + +/* function for reading internal MAC register */ +static u16 dnet_readw_mac(struct dnet *bp, u16 reg) +{ + u16 data_read; + + /* issue a read */ + dnet_writel(bp, reg, MACREG_ADDR); + + /* since a read/write op to the MAC is very slow, + * we must wait before reading the data */ + ndelay(500); + + /* read data read from the MAC register */ + data_read = dnet_readl(bp, MACREG_DATA); + + /* all done */ + return data_read; +} + +/* function for writing internal MAC register */ +static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val) +{ + /* load data to write */ + dnet_writel(bp, val, MACREG_DATA); + + /* issue a write */ + dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR); + + /* since a read/write op to the MAC is very slow, + * we must wait before exiting */ + ndelay(500); +} + +static void __dnet_set_hwaddr(struct dnet *bp) +{ + u16 tmp; + + tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr); + dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp); + tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2)); + dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp); + tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4)); + dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp); +} + +static void dnet_get_hwaddr(struct dnet *bp) +{ + u16 tmp; + u8 addr[6]; + + /* + * from MAC docs: + * "Note that the MAC address is stored in the registers in Hexadecimal + * form. For example, to set the MAC Address to: AC-DE-48-00-00-80 + * would require writing 0xAC (octet 0) to address 0x0B (high byte of + * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of + * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of + * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of + * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of + * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of + * Mac_addr[15:0]). + */ + tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG); + *((__be16 *)addr) = cpu_to_be16(tmp); + tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG); + *((__be16 *)(addr + 2)) = cpu_to_be16(tmp); + tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG); + *((__be16 *)(addr + 4)) = cpu_to_be16(tmp); + + if (is_valid_ether_addr(addr)) + memcpy(bp->dev->dev_addr, addr, sizeof(addr)); +} + +static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct dnet *bp = bus->priv; + u16 value; + + while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) + & DNET_INTERNAL_GMII_MNG_CMD_FIN)) + cpu_relax(); + + /* only 5 bits allowed for phy-addr and reg_offset */ + mii_id &= 0x1f; + regnum &= 0x1f; + + /* prepare reg_value for a read */ + value = (mii_id << 8); + value |= regnum; + + /* write control word */ + dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value); + + /* wait for end of transfer */ + while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) + & DNET_INTERNAL_GMII_MNG_CMD_FIN)) + cpu_relax(); + + value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG); + + pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value); + + return value; +} + +static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) +{ + struct dnet *bp = bus->priv; + u16 tmp; + + pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value); + + while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) + & DNET_INTERNAL_GMII_MNG_CMD_FIN)) + cpu_relax(); + + /* prepare for a write operation */ + tmp = (1 << 13); + + /* only 5 bits allowed for phy-addr and reg_offset */ + mii_id &= 0x1f; + regnum &= 0x1f; + + /* only 16 bits on data */ + value &= 0xffff; + + /* prepare reg_value for a write */ + tmp |= (mii_id << 8); + tmp |= regnum; + + /* write data to write first */ + dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value); + + /* write control word */ + dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp); + + while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) + & DNET_INTERNAL_GMII_MNG_CMD_FIN)) + cpu_relax(); + + return 0; +} + +static void dnet_handle_link_change(struct net_device *dev) +{ + struct dnet *bp = netdev_priv(dev); + struct phy_device *phydev = bp->phy_dev; + unsigned long flags; + u32 mode_reg, ctl_reg; + + int status_change = 0; + + spin_lock_irqsave(&bp->lock, flags); + + mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG); + ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG); + + if (phydev->link) { + if (bp->duplex != phydev->duplex) { + if (phydev->duplex) + ctl_reg &= + ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP); + else + ctl_reg |= + DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP; + + bp->duplex = phydev->duplex; + status_change = 1; + } + + if (bp->speed != phydev->speed) { + status_change = 1; + switch (phydev->speed) { + case 1000: + mode_reg |= DNET_INTERNAL_MODE_GBITEN; + break; + case 100: + case 10: + mode_reg &= ~DNET_INTERNAL_MODE_GBITEN; + break; + default: + printk(KERN_WARNING + "%s: Ack! Speed (%d) is not " + "10/100/1000!\n", dev->name, + phydev->speed); + break; + } + bp->speed = phydev->speed; + } + } + + if (phydev->link != bp->link) { + if (phydev->link) { + mode_reg |= + (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN); + } else { + mode_reg &= + ~(DNET_INTERNAL_MODE_RXEN | + DNET_INTERNAL_MODE_TXEN); + bp->speed = 0; + bp->duplex = -1; + } + bp->link = phydev->link; + + status_change = 1; + } + + if (status_change) { + dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg); + dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg); + } + + spin_unlock_irqrestore(&bp->lock, flags); + + if (status_change) { + if (phydev->link) + printk(KERN_INFO "%s: link up (%d/%s)\n", + dev->name, phydev->speed, + DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); + else + printk(KERN_INFO "%s: link down\n", dev->name); + } +} + +static int dnet_mii_probe(struct net_device *dev) +{ + struct dnet *bp = netdev_priv(dev); + struct phy_device *phydev = NULL; + int phy_addr; + + /* find the first phy */ + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { + if (bp->mii_bus->phy_map[phy_addr]) { + phydev = bp->mii_bus->phy_map[phy_addr]; + break; + } + } + + if (!phydev) { + printk(KERN_ERR "%s: no PHY found\n", dev->name); + return -ENODEV; + } + + /* TODO : add pin_irq */ + + /* attach the mac to the phy */ + if (bp->capabilities & DNET_HAS_RMII) { + phydev = phy_connect(dev, dev_name(&phydev->dev), + &dnet_handle_link_change, + PHY_INTERFACE_MODE_RMII); + } else { + phydev = phy_connect(dev, dev_name(&phydev->dev), + &dnet_handle_link_change, + PHY_INTERFACE_MODE_MII); + } + + if (IS_ERR(phydev)) { + printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); + return PTR_ERR(phydev); + } + + /* mask with MAC supported features */ + if (bp->capabilities & DNET_HAS_GIGABIT) + phydev->supported &= PHY_GBIT_FEATURES; + else + phydev->supported &= PHY_BASIC_FEATURES; + + phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause; + + phydev->advertising = phydev->supported; + + bp->link = 0; + bp->speed = 0; + bp->duplex = -1; + bp->phy_dev = phydev; + + return 0; +} + +static int dnet_mii_init(struct dnet *bp) +{ + int err, i; + + bp->mii_bus = mdiobus_alloc(); + if (bp->mii_bus == NULL) + return -ENOMEM; + + bp->mii_bus->name = "dnet_mii_bus"; + bp->mii_bus->read = &dnet_mdio_read; + bp->mii_bus->write = &dnet_mdio_write; + + snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + bp->pdev->name, bp->pdev->id); + + bp->mii_bus->priv = bp; + + bp->mii_bus->irq = devm_kmalloc(&bp->pdev->dev, + sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!bp->mii_bus->irq) { + err = -ENOMEM; + goto err_out; + } + + for (i = 0; i < PHY_MAX_ADDR; i++) + bp->mii_bus->irq[i] = PHY_POLL; + + if (mdiobus_register(bp->mii_bus)) { + err = -ENXIO; + goto err_out; + } + + if (dnet_mii_probe(bp->dev) != 0) { + err = -ENXIO; + goto err_out_unregister_bus; + } + + return 0; + +err_out_unregister_bus: + mdiobus_unregister(bp->mii_bus); +err_out: + mdiobus_free(bp->mii_bus); + return err; +} + +/* For Neptune board: LINK1000 as Link LED and TX as activity LED */ +static int dnet_phy_marvell_fixup(struct phy_device *phydev) +{ + return phy_write(phydev, 0x18, 0x4148); +} + +static void dnet_update_stats(struct dnet *bp) +{ + u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT; + u32 *p = &bp->hw_stats.rx_pkt_ignr; + u32 *end = &bp->hw_stats.rx_byte + 1; + + WARN_ON((unsigned long)(end - p - 1) != + (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4); + + for (; p < end; p++, reg++) + *p += readl(reg); + + reg = bp->regs + DNET_TX_UNICAST_CNT; + p = &bp->hw_stats.tx_unicast; + end = &bp->hw_stats.tx_byte + 1; + + WARN_ON((unsigned long)(end - p - 1) != + (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4); + + for (; p < end; p++, reg++) + *p += readl(reg); +} + +static int dnet_poll(struct napi_struct *napi, int budget) +{ + struct dnet *bp = container_of(napi, struct dnet, napi); + struct net_device *dev = bp->dev; + int npackets = 0; + unsigned int pkt_len; + struct sk_buff *skb; + unsigned int *data_ptr; + u32 int_enable; + u32 cmd_word; + int i; + + while (npackets < budget) { + /* + * break out of while loop if there are no more + * packets waiting + */ + if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) + break; + + cmd_word = dnet_readl(bp, RX_LEN_FIFO); + pkt_len = cmd_word & 0xFFFF; + + if (cmd_word & 0xDF180000) + printk(KERN_ERR "%s packet receive error %x\n", + __func__, cmd_word); + + skb = netdev_alloc_skb(dev, pkt_len + 5); + if (skb != NULL) { + /* Align IP on 16 byte boundaries */ + skb_reserve(skb, 2); + /* + * 'skb_put()' points to the start of sk_buff + * data area. + */ + data_ptr = (unsigned int *)skb_put(skb, pkt_len); + for (i = 0; i < (pkt_len + 3) >> 2; i++) + *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO); + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); + npackets++; + } else + printk(KERN_NOTICE + "%s: No memory to allocate a sk_buff of " + "size %u.\n", dev->name, pkt_len); + } + + if (npackets < budget) { + /* We processed all packets available. Tell NAPI it can + * stop polling then re-enable rx interrupts. + */ + napi_complete(napi); + int_enable = dnet_readl(bp, INTR_ENB); + int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; + dnet_writel(bp, int_enable, INTR_ENB); + } + + return npackets; +} + +static irqreturn_t dnet_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct dnet *bp = netdev_priv(dev); + u32 int_src, int_enable, int_current; + unsigned long flags; + unsigned int handled = 0; + + spin_lock_irqsave(&bp->lock, flags); + + /* read and clear the DNET irq (clear on read) */ + int_src = dnet_readl(bp, INTR_SRC); + int_enable = dnet_readl(bp, INTR_ENB); + int_current = int_src & int_enable; + + /* restart the queue if we had stopped it for TX fifo almost full */ + if (int_current & DNET_INTR_SRC_TX_FIFOAE) { + int_enable = dnet_readl(bp, INTR_ENB); + int_enable &= ~DNET_INTR_ENB_TX_FIFOAE; + dnet_writel(bp, int_enable, INTR_ENB); + netif_wake_queue(dev); + handled = 1; + } + + /* RX FIFO error checking */ + if (int_current & + (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) { + printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__, + dnet_readl(bp, RX_STATUS), int_current); + /* we can only flush the RX FIFOs */ + dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL); + ndelay(500); + dnet_writel(bp, 0, SYS_CTL); + handled = 1; + } + + /* TX FIFO error checking */ + if (int_current & + (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) { + printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__, + dnet_readl(bp, TX_STATUS), int_current); + /* we can only flush the TX FIFOs */ + dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL); + ndelay(500); + dnet_writel(bp, 0, SYS_CTL); + handled = 1; + } + + if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) { + if (napi_schedule_prep(&bp->napi)) { + /* + * There's no point taking any more interrupts + * until we have processed the buffers + */ + /* Disable Rx interrupts and schedule NAPI poll */ + int_enable = dnet_readl(bp, INTR_ENB); + int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF; + dnet_writel(bp, int_enable, INTR_ENB); + __napi_schedule(&bp->napi); + } + handled = 1; + } + + if (!handled) + pr_debug("%s: irq %x remains\n", __func__, int_current); + + spin_unlock_irqrestore(&bp->lock, flags); + + return IRQ_RETVAL(handled); +} + +#ifdef DEBUG +static inline void dnet_print_skb(struct sk_buff *skb) +{ + int k; + printk(KERN_DEBUG PFX "data:"); + for (k = 0; k < skb->len; k++) + printk(" %02x", (unsigned int)skb->data[k]); + printk("\n"); +} +#else +#define dnet_print_skb(skb) do {} while (0) +#endif + +static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + + struct dnet *bp = netdev_priv(dev); + u32 tx_status, irq_enable; + unsigned int len, i, tx_cmd, wrsz; + unsigned long flags; + unsigned int *bufp; + + tx_status = dnet_readl(bp, TX_STATUS); + + pr_debug("start_xmit: len %u head %p data %p\n", + skb->len, skb->head, skb->data); + dnet_print_skb(skb); + + /* frame size (words) */ + len = (skb->len + 3) >> 2; + + spin_lock_irqsave(&bp->lock, flags); + + tx_status = dnet_readl(bp, TX_STATUS); + + bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL); + wrsz = (u32) skb->len + 3; + wrsz += ((unsigned long) skb->data) & 0x3; + wrsz >>= 2; + tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len; + + /* check if there is enough room for the current frame */ + if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) { + for (i = 0; i < wrsz; i++) + dnet_writel(bp, *bufp++, TX_DATA_FIFO); + + /* + * inform MAC that a packet's written and ready to be + * shipped out + */ + dnet_writel(bp, tx_cmd, TX_LEN_FIFO); + } + + if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) { + netif_stop_queue(dev); + tx_status = dnet_readl(bp, INTR_SRC); + irq_enable = dnet_readl(bp, INTR_ENB); + irq_enable |= DNET_INTR_ENB_TX_FIFOAE; + dnet_writel(bp, irq_enable, INTR_ENB); + } + + skb_tx_timestamp(skb); + + /* free the buffer */ + dev_kfree_skb(skb); + + spin_unlock_irqrestore(&bp->lock, flags); + + return NETDEV_TX_OK; +} + +static void dnet_reset_hw(struct dnet *bp) +{ + /* put ts_mac in IDLE state i.e. disable rx/tx */ + dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN); + + /* + * RX FIFO almost full threshold: only cmd FIFO almost full is + * implemented for RX side + */ + dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH); + /* + * TX FIFO almost empty threshold: only data FIFO almost empty + * is implemented for TX side + */ + dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH); + + /* flush rx/tx fifos */ + dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH, + SYS_CTL); + msleep(1); + dnet_writel(bp, 0, SYS_CTL); +} + +static void dnet_init_hw(struct dnet *bp) +{ + u32 config; + + dnet_reset_hw(bp); + __dnet_set_hwaddr(bp); + + config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG); + + if (bp->dev->flags & IFF_PROMISC) + /* Copy All Frames */ + config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC; + if (!(bp->dev->flags & IFF_BROADCAST)) + /* No BroadCast */ + config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST; + + config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE | + DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST | + DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL | + DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS; + + dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config); + + /* clear irq before enabling them */ + config = dnet_readl(bp, INTR_SRC); + + /* enable RX/TX interrupt, recv packet ready interrupt */ + dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY | + DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR | + DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL | + DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM | + DNET_INTR_ENB_RX_PKTRDY, INTR_ENB); +} + +static int dnet_open(struct net_device *dev) +{ + struct dnet *bp = netdev_priv(dev); + + /* if the phy is not yet register, retry later */ + if (!bp->phy_dev) + return -EAGAIN; + + napi_enable(&bp->napi); + dnet_init_hw(bp); + + phy_start_aneg(bp->phy_dev); + + /* schedule a link state check */ + phy_start(bp->phy_dev); + + netif_start_queue(dev); + + return 0; +} + +static int dnet_close(struct net_device *dev) +{ + struct dnet *bp = netdev_priv(dev); + + netif_stop_queue(dev); + napi_disable(&bp->napi); + + if (bp->phy_dev) + phy_stop(bp->phy_dev); + + dnet_reset_hw(bp); + netif_carrier_off(dev); + + return 0; +} + +static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat) +{ + pr_debug("%s\n", __func__); + pr_debug("----------------------------- RX statistics " + "-------------------------------\n"); + pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr); + pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err); + pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm); + pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm); + pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol); + pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err); + pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt); + pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm); + pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm); + pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast); + pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast); + pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag); + pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink); + pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib); + pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd); + pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte); + pr_debug("----------------------------- TX statistics " + "-------------------------------\n"); + pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast); + pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm); + pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast); + pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast); + pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag); + pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs); + pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo); + pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte); +} + +static struct net_device_stats *dnet_get_stats(struct net_device *dev) +{ + + struct dnet *bp = netdev_priv(dev); + struct net_device_stats *nstat = &dev->stats; + struct dnet_stats *hwstat = &bp->hw_stats; + + /* read stats from hardware */ + dnet_update_stats(bp); + + /* Convert HW stats into netdevice stats */ + nstat->rx_errors = (hwstat->rx_len_chk_err + + hwstat->rx_lng_frm + hwstat->rx_shrt_frm + + /* ignore IGP violation error + hwstat->rx_ipg_viol + */ + hwstat->rx_crc_err + + hwstat->rx_pre_shrink + + hwstat->rx_drib_nib + hwstat->rx_unsup_opcd); + nstat->tx_errors = hwstat->tx_bad_fcs; + nstat->rx_length_errors = (hwstat->rx_len_chk_err + + hwstat->rx_lng_frm + + hwstat->rx_shrt_frm + hwstat->rx_pre_shrink); + nstat->rx_crc_errors = hwstat->rx_crc_err; + nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib; + nstat->rx_packets = hwstat->rx_ok_pkt; + nstat->tx_packets = (hwstat->tx_unicast + + hwstat->tx_multicast + hwstat->tx_brdcast); + nstat->rx_bytes = hwstat->rx_byte; + nstat->tx_bytes = hwstat->tx_byte; + nstat->multicast = hwstat->rx_multicast; + nstat->rx_missed_errors = hwstat->rx_pkt_ignr; + + dnet_print_pretty_hwstats(hwstat); + + return nstat; +} + +static int dnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct dnet *bp = netdev_priv(dev); + struct phy_device *phydev = bp->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_gset(phydev, cmd); +} + +static int dnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct dnet *bp = netdev_priv(dev); + struct phy_device *phydev = bp->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_sset(phydev, cmd); +} + +static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct dnet *bp = netdev_priv(dev); + struct phy_device *phydev = bp->phy_dev; + + if (!netif_running(dev)) + return -EINVAL; + + if (!phydev) + return -ENODEV; + + return phy_mii_ioctl(phydev, rq, cmd); +} + +static void dnet_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, "0", sizeof(info->bus_info)); +} + +static const struct ethtool_ops dnet_ethtool_ops = { + .get_settings = dnet_get_settings, + .set_settings = dnet_set_settings, + .get_drvinfo = dnet_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, +}; + +static const struct net_device_ops dnet_netdev_ops = { + .ndo_open = dnet_open, + .ndo_stop = dnet_close, + .ndo_get_stats = dnet_get_stats, + .ndo_start_xmit = dnet_start_xmit, + .ndo_do_ioctl = dnet_ioctl, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static int dnet_probe(struct platform_device *pdev) +{ + struct resource *res; + struct net_device *dev; + struct dnet *bp; + struct phy_device *phydev; + int err; + unsigned int irq; + + irq = platform_get_irq(pdev, 0); + + dev = alloc_etherdev(sizeof(*bp)); + if (!dev) + return -ENOMEM; + + /* TODO: Actually, we have some interesting features... */ + dev->features |= 0; + + bp = netdev_priv(dev); + bp->dev = dev; + + platform_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + + spin_lock_init(&bp->lock); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + bp->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(bp->regs)) { + err = PTR_ERR(bp->regs); + goto err_out_free_dev; + } + + dev->irq = irq; + err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev); + if (err) { + dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n", + irq, err); + goto err_out_free_dev; + } + + dev->netdev_ops = &dnet_netdev_ops; + netif_napi_add(dev, &bp->napi, dnet_poll, 64); + dev->ethtool_ops = &dnet_ethtool_ops; + + dev->base_addr = (unsigned long)bp->regs; + + bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK; + + dnet_get_hwaddr(bp); + + if (!is_valid_ether_addr(dev->dev_addr)) { + /* choose a random ethernet address */ + eth_hw_addr_random(dev); + __dnet_set_hwaddr(bp); + } + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); + goto err_out_free_irq; + } + + /* register the PHY board fixup (for Marvell 88E1111) */ + err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0, + dnet_phy_marvell_fixup); + /* we can live without it, so just issue a warning */ + if (err) + dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n"); + + err = dnet_mii_init(bp); + if (err) + goto err_out_unregister_netdev; + + dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n", + bp->regs, (unsigned int)res->start, dev->irq, dev->dev_addr); + dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n", + (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ", + (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ", + (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ", + (bp->capabilities & DNET_HAS_DMA) ? "" : "no "); + phydev = bp->phy_dev; + dev_info(&pdev->dev, "attached PHY driver [%s] " + "(mii_bus:phy_addr=%s, irq=%d)\n", + phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + + return 0; + +err_out_unregister_netdev: + unregister_netdev(dev); +err_out_free_irq: + free_irq(dev->irq, dev); +err_out_free_dev: + free_netdev(dev); + return err; +} + +static int dnet_remove(struct platform_device *pdev) +{ + + struct net_device *dev; + struct dnet *bp; + + dev = platform_get_drvdata(pdev); + + if (dev) { + bp = netdev_priv(dev); + if (bp->phy_dev) + phy_disconnect(bp->phy_dev); + mdiobus_unregister(bp->mii_bus); + mdiobus_free(bp->mii_bus); + unregister_netdev(dev); + free_irq(dev->irq, dev); + free_netdev(dev); + } + + return 0; +} + +static struct platform_driver dnet_driver = { + .probe = dnet_probe, + .remove = dnet_remove, + .driver = { + .name = "dnet", + }, +}; + +module_platform_driver(dnet_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Dave DNET Ethernet driver"); +MODULE_AUTHOR("Ilya Yanok , " + "Matteo Vit "); diff --git a/drivers/net/ethernet/dnet.h b/drivers/net/ethernet/dnet.h new file mode 100644 index 000000000..37f5b30fa --- /dev/null +++ b/drivers/net/ethernet/dnet.h @@ -0,0 +1,225 @@ +/* + * Dave DNET Ethernet Controller driver + * + * Copyright (C) 2008 Dave S.r.l. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _DNET_H +#define _DNET_H + +#define DRV_NAME "dnet" +#define DRV_VERSION "0.9.1" +#define PFX DRV_NAME ": " + +/* Register access macros */ +#define dnet_writel(port, value, reg) \ + writel((value), (port)->regs + DNET_##reg) +#define dnet_readl(port, reg) readl((port)->regs + DNET_##reg) + +/* ALL DNET FIFO REGISTERS */ +#define DNET_RX_LEN_FIFO 0x000 /* RX_LEN_FIFO */ +#define DNET_RX_DATA_FIFO 0x004 /* RX_DATA_FIFO */ +#define DNET_TX_LEN_FIFO 0x008 /* TX_LEN_FIFO */ +#define DNET_TX_DATA_FIFO 0x00C /* TX_DATA_FIFO */ + +/* ALL DNET CONTROL/STATUS REGISTERS OFFSETS */ +#define DNET_VERCAPS 0x100 /* VERCAPS */ +#define DNET_INTR_SRC 0x104 /* INTR_SRC */ +#define DNET_INTR_ENB 0x108 /* INTR_ENB */ +#define DNET_RX_STATUS 0x10C /* RX_STATUS */ +#define DNET_TX_STATUS 0x110 /* TX_STATUS */ +#define DNET_RX_FRAMES_CNT 0x114 /* RX_FRAMES_CNT */ +#define DNET_TX_FRAMES_CNT 0x118 /* TX_FRAMES_CNT */ +#define DNET_RX_FIFO_TH 0x11C /* RX_FIFO_TH */ +#define DNET_TX_FIFO_TH 0x120 /* TX_FIFO_TH */ +#define DNET_SYS_CTL 0x124 /* SYS_CTL */ +#define DNET_PAUSE_TMR 0x128 /* PAUSE_TMR */ +#define DNET_RX_FIFO_WCNT 0x12C /* RX_FIFO_WCNT */ +#define DNET_TX_FIFO_WCNT 0x130 /* TX_FIFO_WCNT */ + +/* ALL DNET MAC REGISTERS */ +#define DNET_MACREG_DATA 0x200 /* Mac-Reg Data */ +#define DNET_MACREG_ADDR 0x204 /* Mac-Reg Addr */ + +/* ALL DNET RX STATISTICS COUNTERS */ +#define DNET_RX_PKT_IGNR_CNT 0x300 +#define DNET_RX_LEN_CHK_ERR_CNT 0x304 +#define DNET_RX_LNG_FRM_CNT 0x308 +#define DNET_RX_SHRT_FRM_CNT 0x30C +#define DNET_RX_IPG_VIOL_CNT 0x310 +#define DNET_RX_CRC_ERR_CNT 0x314 +#define DNET_RX_OK_PKT_CNT 0x318 +#define DNET_RX_CTL_FRM_CNT 0x31C +#define DNET_RX_PAUSE_FRM_CNT 0x320 +#define DNET_RX_MULTICAST_CNT 0x324 +#define DNET_RX_BROADCAST_CNT 0x328 +#define DNET_RX_VLAN_TAG_CNT 0x32C +#define DNET_RX_PRE_SHRINK_CNT 0x330 +#define DNET_RX_DRIB_NIB_CNT 0x334 +#define DNET_RX_UNSUP_OPCD_CNT 0x338 +#define DNET_RX_BYTE_CNT 0x33C + +/* DNET TX STATISTICS COUNTERS */ +#define DNET_TX_UNICAST_CNT 0x400 +#define DNET_TX_PAUSE_FRM_CNT 0x404 +#define DNET_TX_MULTICAST_CNT 0x408 +#define DNET_TX_BRDCAST_CNT 0x40C +#define DNET_TX_VLAN_TAG_CNT 0x410 +#define DNET_TX_BAD_FCS_CNT 0x414 +#define DNET_TX_JUMBO_CNT 0x418 +#define DNET_TX_BYTE_CNT 0x41C + +/* SOME INTERNAL MAC-CORE REGISTER */ +#define DNET_INTERNAL_MODE_REG 0x0 +#define DNET_INTERNAL_RXTX_CONTROL_REG 0x2 +#define DNET_INTERNAL_MAX_PKT_SIZE_REG 0x4 +#define DNET_INTERNAL_IGP_REG 0x8 +#define DNET_INTERNAL_MAC_ADDR_0_REG 0xa +#define DNET_INTERNAL_MAC_ADDR_1_REG 0xc +#define DNET_INTERNAL_MAC_ADDR_2_REG 0xe +#define DNET_INTERNAL_TX_RX_STS_REG 0x12 +#define DNET_INTERNAL_GMII_MNG_CTL_REG 0x14 +#define DNET_INTERNAL_GMII_MNG_DAT_REG 0x16 + +#define DNET_INTERNAL_GMII_MNG_CMD_FIN (1 << 14) + +#define DNET_INTERNAL_WRITE (1 << 31) + +/* MAC-CORE REGISTER FIELDS */ + +/* MAC-CORE MODE REGISTER FIELDS */ +#define DNET_INTERNAL_MODE_GBITEN (1 << 0) +#define DNET_INTERNAL_MODE_FCEN (1 << 1) +#define DNET_INTERNAL_MODE_RXEN (1 << 2) +#define DNET_INTERNAL_MODE_TXEN (1 << 3) + +/* MAC-CORE RXTX CONTROL REGISTER FIELDS */ +#define DNET_INTERNAL_RXTX_CONTROL_RXSHORTFRAME (1 << 8) +#define DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST (1 << 7) +#define DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST (1 << 4) +#define DNET_INTERNAL_RXTX_CONTROL_RXPAUSE (1 << 3) +#define DNET_INTERNAL_RXTX_CONTROL_DISTXFCS (1 << 2) +#define DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS (1 << 1) +#define DNET_INTERNAL_RXTX_CONTROL_ENPROMISC (1 << 0) +#define DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL (1 << 6) +#define DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP (1 << 5) + +/* SYSTEM CONTROL REGISTER FIELDS */ +#define DNET_SYS_CTL_IGNORENEXTPKT (1 << 0) +#define DNET_SYS_CTL_SENDPAUSE (1 << 2) +#define DNET_SYS_CTL_RXFIFOFLUSH (1 << 3) +#define DNET_SYS_CTL_TXFIFOFLUSH (1 << 4) + +/* TX STATUS REGISTER FIELDS */ +#define DNET_TX_STATUS_FIFO_ALMOST_EMPTY (1 << 2) +#define DNET_TX_STATUS_FIFO_ALMOST_FULL (1 << 1) + +/* INTERRUPT SOURCE REGISTER FIELDS */ +#define DNET_INTR_SRC_TX_PKTSENT (1 << 0) +#define DNET_INTR_SRC_TX_FIFOAF (1 << 1) +#define DNET_INTR_SRC_TX_FIFOAE (1 << 2) +#define DNET_INTR_SRC_TX_DISCFRM (1 << 3) +#define DNET_INTR_SRC_TX_FIFOFULL (1 << 4) +#define DNET_INTR_SRC_RX_CMDFIFOAF (1 << 8) +#define DNET_INTR_SRC_RX_CMDFIFOFF (1 << 9) +#define DNET_INTR_SRC_RX_DATAFIFOFF (1 << 10) +#define DNET_INTR_SRC_TX_SUMMARY (1 << 16) +#define DNET_INTR_SRC_RX_SUMMARY (1 << 17) +#define DNET_INTR_SRC_PHY (1 << 19) + +/* INTERRUPT ENABLE REGISTER FIELDS */ +#define DNET_INTR_ENB_TX_PKTSENT (1 << 0) +#define DNET_INTR_ENB_TX_FIFOAF (1 << 1) +#define DNET_INTR_ENB_TX_FIFOAE (1 << 2) +#define DNET_INTR_ENB_TX_DISCFRM (1 << 3) +#define DNET_INTR_ENB_TX_FIFOFULL (1 << 4) +#define DNET_INTR_ENB_RX_PKTRDY (1 << 8) +#define DNET_INTR_ENB_RX_FIFOAF (1 << 9) +#define DNET_INTR_ENB_RX_FIFOERR (1 << 10) +#define DNET_INTR_ENB_RX_ERROR (1 << 11) +#define DNET_INTR_ENB_RX_FIFOFULL (1 << 12) +#define DNET_INTR_ENB_RX_FIFOAE (1 << 13) +#define DNET_INTR_ENB_TX_SUMMARY (1 << 16) +#define DNET_INTR_ENB_RX_SUMMARY (1 << 17) +#define DNET_INTR_ENB_GLOBAL_ENABLE (1 << 18) + +/* default values: + * almost empty = less than one full sized ethernet frame (no jumbo) inside + * the fifo almost full = can write less than one full sized ethernet frame + * (no jumbo) inside the fifo + */ +#define DNET_CFG_TX_FIFO_FULL_THRES 25 +#define DNET_CFG_RX_FIFO_FULL_THRES 20 + +/* + * Capabilities. Used by the driver to know the capabilities that the ethernet + * controller inside the FPGA have. + */ + +#define DNET_HAS_MDIO (1 << 0) +#define DNET_HAS_IRQ (1 << 1) +#define DNET_HAS_GIGABIT (1 << 2) +#define DNET_HAS_DMA (1 << 3) + +#define DNET_HAS_MII (1 << 4) /* or GMII */ +#define DNET_HAS_RMII (1 << 5) /* or RGMII */ + +#define DNET_CAPS_MASK 0xFFFF + +#define DNET_FIFO_SIZE 1024 /* 1K x 32 bit */ +#define DNET_FIFO_TX_DATA_AF_TH (DNET_FIFO_SIZE - 384) /* 384 = 1536 / 4 */ +#define DNET_FIFO_TX_DATA_AE_TH 384 + +#define DNET_FIFO_RX_CMD_AF_TH (1 << 16) /* just one frame inside the FIFO */ + +/* + * Hardware-collected statistics. + */ +struct dnet_stats { + u32 rx_pkt_ignr; + u32 rx_len_chk_err; + u32 rx_lng_frm; + u32 rx_shrt_frm; + u32 rx_ipg_viol; + u32 rx_crc_err; + u32 rx_ok_pkt; + u32 rx_ctl_frm; + u32 rx_pause_frm; + u32 rx_multicast; + u32 rx_broadcast; + u32 rx_vlan_tag; + u32 rx_pre_shrink; + u32 rx_drib_nib; + u32 rx_unsup_opcd; + u32 rx_byte; + u32 tx_unicast; + u32 tx_pause_frm; + u32 tx_multicast; + u32 tx_brdcast; + u32 tx_vlan_tag; + u32 tx_bad_fcs; + u32 tx_jumbo; + u32 tx_byte; +}; + +struct dnet { + void __iomem *regs; + spinlock_t lock; + struct platform_device *pdev; + struct net_device *dev; + struct dnet_stats hw_stats; + unsigned int capabilities; /* read from FPGA */ + struct napi_struct napi; + + /* PHY stuff */ + struct mii_bus *mii_bus; + struct phy_device *phy_dev; + unsigned int link; + unsigned int speed; + unsigned int duplex; +}; + +#endif /* _DNET_H */ diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c new file mode 100644 index 000000000..d1017509b --- /dev/null +++ b/drivers/net/ethernet/ec_bhf.c @@ -0,0 +1,625 @@ + /* + * drivers/net/ethernet/ec_bhf.c + * + * Copyright (C) 2014 Darek Marcinkiewicz + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* This is a driver for EtherCAT master module present on CCAT FPGA. + * Those can be found on Bechhoff CX50xx industrial PCs. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#define TIMER_INTERVAL_NSEC 20000 + +#define INFO_BLOCK_SIZE 0x10 +#define INFO_BLOCK_TYPE 0x0 +#define INFO_BLOCK_REV 0x2 +#define INFO_BLOCK_BLK_CNT 0x4 +#define INFO_BLOCK_TX_CHAN 0x4 +#define INFO_BLOCK_RX_CHAN 0x5 +#define INFO_BLOCK_OFFSET 0x8 + +#define EC_MII_OFFSET 0x4 +#define EC_FIFO_OFFSET 0x8 +#define EC_MAC_OFFSET 0xc + +#define MAC_FRAME_ERR_CNT 0x0 +#define MAC_RX_ERR_CNT 0x1 +#define MAC_CRC_ERR_CNT 0x2 +#define MAC_LNK_LST_ERR_CNT 0x3 +#define MAC_TX_FRAME_CNT 0x10 +#define MAC_RX_FRAME_CNT 0x14 +#define MAC_TX_FIFO_LVL 0x20 +#define MAC_DROPPED_FRMS 0x28 +#define MAC_CONNECTED_CCAT_FLAG 0x78 + +#define MII_MAC_ADDR 0x8 +#define MII_MAC_FILT_FLAG 0xe +#define MII_LINK_STATUS 0xf + +#define FIFO_TX_REG 0x0 +#define FIFO_TX_RESET 0x8 +#define FIFO_RX_REG 0x10 +#define FIFO_RX_ADDR_VALID (1u << 31) +#define FIFO_RX_RESET 0x18 + +#define DMA_CHAN_OFFSET 0x1000 +#define DMA_CHAN_SIZE 0x8 + +#define DMA_WINDOW_SIZE_MASK 0xfffffffc + +#define ETHERCAT_MASTER_ID 0x14 + +static struct pci_device_id ids[] = { + { PCI_DEVICE(0x15ec, 0x5000), }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, ids); + +struct rx_header { +#define RXHDR_NEXT_ADDR_MASK 0xffffffu +#define RXHDR_NEXT_VALID (1u << 31) + __le32 next; +#define RXHDR_NEXT_RECV_FLAG 0x1 + __le32 recv; +#define RXHDR_LEN_MASK 0xfffu + __le16 len; + __le16 port; + __le32 reserved; + u8 timestamp[8]; +} __packed; + +#define PKT_PAYLOAD_SIZE 0x7e8 +struct rx_desc { + struct rx_header header; + u8 data[PKT_PAYLOAD_SIZE]; +} __packed; + +struct tx_header { + __le16 len; +#define TX_HDR_PORT_0 0x1 +#define TX_HDR_PORT_1 0x2 + u8 port; + u8 ts_enable; +#define TX_HDR_SENT 0x1 + __le32 sent; + u8 timestamp[8]; +} __packed; + +struct tx_desc { + struct tx_header header; + u8 data[PKT_PAYLOAD_SIZE]; +} __packed; + +#define FIFO_SIZE 64 + +static long polling_frequency = TIMER_INTERVAL_NSEC; + +struct bhf_dma { + u8 *buf; + size_t len; + dma_addr_t buf_phys; + + u8 *alloc; + size_t alloc_len; + dma_addr_t alloc_phys; +}; + +struct ec_bhf_priv { + struct net_device *net_dev; + struct pci_dev *dev; + + void __iomem *io; + void __iomem *dma_io; + + struct hrtimer hrtimer; + + int tx_dma_chan; + int rx_dma_chan; + void __iomem *ec_io; + void __iomem *fifo_io; + void __iomem *mii_io; + void __iomem *mac_io; + + struct bhf_dma rx_buf; + struct rx_desc *rx_descs; + int rx_dnext; + int rx_dcount; + + struct bhf_dma tx_buf; + struct tx_desc *tx_descs; + int tx_dcount; + int tx_dnext; + + u64 stat_rx_bytes; + u64 stat_tx_bytes; +}; + +#define PRIV_TO_DEV(priv) (&(priv)->dev->dev) + +static void ec_bhf_reset(struct ec_bhf_priv *priv) +{ + iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT); + iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT); + iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT); + iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT); + iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT); + iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT); + iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS); + + iowrite8(0, priv->fifo_io + FIFO_TX_RESET); + iowrite8(0, priv->fifo_io + FIFO_RX_RESET); + + iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL); +} + +static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc) +{ + u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header); + u32 addr = (u8 *)desc - priv->tx_buf.buf; + + iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG); +} + +static int ec_bhf_desc_sent(struct tx_desc *desc) +{ + return le32_to_cpu(desc->header.sent) & TX_HDR_SENT; +} + +static void ec_bhf_process_tx(struct ec_bhf_priv *priv) +{ + if (unlikely(netif_queue_stopped(priv->net_dev))) { + /* Make sure that we perceive changes to tx_dnext. */ + smp_rmb(); + + if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) + netif_wake_queue(priv->net_dev); + } +} + +static int ec_bhf_pkt_received(struct rx_desc *desc) +{ + return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG; +} + +static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc) +{ + iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf), + priv->fifo_io + FIFO_RX_REG); +} + +static void ec_bhf_process_rx(struct ec_bhf_priv *priv) +{ + struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext]; + + while (ec_bhf_pkt_received(desc)) { + int pkt_size = (le16_to_cpu(desc->header.len) & + RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4; + u8 *data = desc->data; + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size); + if (skb) { + memcpy(skb_put(skb, pkt_size), data, pkt_size); + skb->protocol = eth_type_trans(skb, priv->net_dev); + priv->stat_rx_bytes += pkt_size; + + netif_rx(skb); + } else { + dev_err_ratelimited(PRIV_TO_DEV(priv), + "Couldn't allocate a skb_buff for a packet of size %u\n", + pkt_size); + } + + desc->header.recv = 0; + + ec_bhf_add_rx_desc(priv, desc); + + priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount; + desc = &priv->rx_descs[priv->rx_dnext]; + } +} + +static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer) +{ + struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv, + hrtimer); + ec_bhf_process_rx(priv); + ec_bhf_process_tx(priv); + + if (!netif_running(priv->net_dev)) + return HRTIMER_NORESTART; + + hrtimer_forward_now(timer, ktime_set(0, polling_frequency)); + return HRTIMER_RESTART; +} + +static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv) +{ + struct device *dev = PRIV_TO_DEV(priv); + unsigned block_count, i; + void __iomem *ec_info; + + block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT); + for (i = 0; i < block_count; i++) { + u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE + + INFO_BLOCK_TYPE); + if (type == ETHERCAT_MASTER_ID) + break; + } + if (i == block_count) { + dev_err(dev, "EtherCAT master with DMA block not found\n"); + return -ENODEV; + } + + ec_info = priv->io + i * INFO_BLOCK_SIZE; + + priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN); + priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN); + + priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET); + priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET); + priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET); + priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET); + + return 0; +} + +static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb, + struct net_device *net_dev) +{ + struct ec_bhf_priv *priv = netdev_priv(net_dev); + struct tx_desc *desc; + unsigned len; + + desc = &priv->tx_descs[priv->tx_dnext]; + + skb_copy_and_csum_dev(skb, desc->data); + len = skb->len; + + memset(&desc->header, 0, sizeof(desc->header)); + desc->header.len = cpu_to_le16(len); + desc->header.port = TX_HDR_PORT_0; + + ec_bhf_send_packet(priv, desc); + + priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount; + + if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) { + /* Make sure that updates to tx_dnext are perceived + * by timer routine. + */ + smp_wmb(); + + netif_stop_queue(net_dev); + } + + priv->stat_tx_bytes += len; + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv, + struct bhf_dma *buf, + int channel, + int size) +{ + int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET; + struct device *dev = PRIV_TO_DEV(priv); + u32 mask; + + iowrite32(0xffffffff, priv->dma_io + offset); + + mask = ioread32(priv->dma_io + offset); + mask &= DMA_WINDOW_SIZE_MASK; + + /* We want to allocate a chunk of memory that is: + * - aligned to the mask we just read + * - is of size 2^mask bytes (at most) + * In order to ensure that we will allocate buffer of + * 2 * 2^mask bytes. + */ + buf->len = min_t(int, ~mask + 1, size); + buf->alloc_len = 2 * buf->len; + + buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys, + GFP_KERNEL); + if (buf->alloc == NULL) { + dev_err(dev, "Failed to allocate buffer\n"); + return -ENOMEM; + } + + buf->buf_phys = (buf->alloc_phys + buf->len) & mask; + buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys); + + iowrite32(0, priv->dma_io + offset + 4); + iowrite32(buf->buf_phys, priv->dma_io + offset); + + return 0; +} + +static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv) +{ + int i = 0; + + priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc); + priv->tx_descs = (struct tx_desc *)priv->tx_buf.buf; + priv->tx_dnext = 0; + + for (i = 0; i < priv->tx_dcount; i++) + priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT); +} + +static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv) +{ + int i; + + priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc); + priv->rx_descs = (struct rx_desc *)priv->rx_buf.buf; + priv->rx_dnext = 0; + + for (i = 0; i < priv->rx_dcount; i++) { + struct rx_desc *desc = &priv->rx_descs[i]; + u32 next; + + if (i != priv->rx_dcount - 1) + next = (u8 *)(desc + 1) - priv->rx_buf.buf; + else + next = 0; + next |= RXHDR_NEXT_VALID; + desc->header.next = cpu_to_le32(next); + desc->header.recv = 0; + ec_bhf_add_rx_desc(priv, desc); + } +} + +static int ec_bhf_open(struct net_device *net_dev) +{ + struct ec_bhf_priv *priv = netdev_priv(net_dev); + struct device *dev = PRIV_TO_DEV(priv); + int err = 0; + + ec_bhf_reset(priv); + + err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan, + FIFO_SIZE * sizeof(struct rx_desc)); + if (err) { + dev_err(dev, "Failed to allocate rx buffer\n"); + goto out; + } + ec_bhf_setup_rx_descs(priv); + + err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan, + FIFO_SIZE * sizeof(struct tx_desc)); + if (err) { + dev_err(dev, "Failed to allocate tx buffer\n"); + goto error_rx_free; + } + iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG); + ec_bhf_setup_tx_descs(priv); + + netif_start_queue(net_dev); + + hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + priv->hrtimer.function = ec_bhf_timer_fun; + hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency), + HRTIMER_MODE_REL); + + return 0; + +error_rx_free: + dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc, + priv->rx_buf.alloc_len); +out: + return err; +} + +static int ec_bhf_stop(struct net_device *net_dev) +{ + struct ec_bhf_priv *priv = netdev_priv(net_dev); + struct device *dev = PRIV_TO_DEV(priv); + + hrtimer_cancel(&priv->hrtimer); + + ec_bhf_reset(priv); + + netif_tx_disable(net_dev); + + dma_free_coherent(dev, priv->tx_buf.alloc_len, + priv->tx_buf.alloc, priv->tx_buf.alloc_phys); + dma_free_coherent(dev, priv->rx_buf.alloc_len, + priv->rx_buf.alloc, priv->rx_buf.alloc_phys); + + return 0; +} + +static struct rtnl_link_stats64 * +ec_bhf_get_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats) +{ + struct ec_bhf_priv *priv = netdev_priv(net_dev); + + stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) + + ioread8(priv->mac_io + MAC_CRC_ERR_CNT) + + ioread8(priv->mac_io + MAC_FRAME_ERR_CNT); + stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT); + stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT); + stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS); + + stats->tx_bytes = priv->stat_tx_bytes; + stats->rx_bytes = priv->stat_rx_bytes; + + return stats; +} + +static const struct net_device_ops ec_bhf_netdev_ops = { + .ndo_start_xmit = ec_bhf_start_xmit, + .ndo_open = ec_bhf_open, + .ndo_stop = ec_bhf_stop, + .ndo_get_stats64 = ec_bhf_get_stats, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr +}; + +static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + struct net_device *net_dev; + struct ec_bhf_priv *priv; + void __iomem *dma_io; + void __iomem *io; + int err = 0; + + err = pci_enable_device(dev); + if (err) + return err; + + pci_set_master(dev); + + err = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&dev->dev, + "Required dma mask not supported, failed to initialize device\n"); + err = -EIO; + goto err_disable_dev; + } + + err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&dev->dev, + "Required dma mask not supported, failed to initialize device\n"); + goto err_disable_dev; + } + + err = pci_request_regions(dev, "ec_bhf"); + if (err) { + dev_err(&dev->dev, "Failed to request pci memory regions\n"); + goto err_disable_dev; + } + + io = pci_iomap(dev, 0, 0); + if (!io) { + dev_err(&dev->dev, "Failed to map pci card memory bar 0"); + err = -EIO; + goto err_release_regions; + } + + dma_io = pci_iomap(dev, 2, 0); + if (!dma_io) { + dev_err(&dev->dev, "Failed to map pci card memory bar 2"); + err = -EIO; + goto err_unmap; + } + + net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv)); + if (net_dev == NULL) { + err = -ENOMEM; + goto err_unmap_dma_io; + } + + pci_set_drvdata(dev, net_dev); + SET_NETDEV_DEV(net_dev, &dev->dev); + + net_dev->features = 0; + net_dev->flags |= IFF_NOARP; + + net_dev->netdev_ops = &ec_bhf_netdev_ops; + + priv = netdev_priv(net_dev); + priv->net_dev = net_dev; + priv->io = io; + priv->dma_io = dma_io; + priv->dev = dev; + + err = ec_bhf_setup_offsets(priv); + if (err < 0) + goto err_free_net_dev; + + memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6); + + err = register_netdev(net_dev); + if (err < 0) + goto err_free_net_dev; + + return 0; + +err_free_net_dev: + free_netdev(net_dev); +err_unmap_dma_io: + pci_iounmap(dev, dma_io); +err_unmap: + pci_iounmap(dev, io); +err_release_regions: + pci_release_regions(dev); +err_disable_dev: + pci_clear_master(dev); + pci_disable_device(dev); + + return err; +} + +static void ec_bhf_remove(struct pci_dev *dev) +{ + struct net_device *net_dev = pci_get_drvdata(dev); + struct ec_bhf_priv *priv = netdev_priv(net_dev); + + unregister_netdev(net_dev); + free_netdev(net_dev); + + pci_iounmap(dev, priv->dma_io); + pci_iounmap(dev, priv->io); + pci_release_regions(dev); + pci_clear_master(dev); + pci_disable_device(dev); +} + +static struct pci_driver pci_driver = { + .name = "ec_bhf", + .id_table = ids, + .probe = ec_bhf_probe, + .remove = ec_bhf_remove, +}; + +static int __init ec_bhf_init(void) +{ + return pci_register_driver(&pci_driver); +} + +static void __exit ec_bhf_exit(void) +{ + pci_unregister_driver(&pci_driver); +} + +module_init(ec_bhf_init); +module_exit(ec_bhf_exit); + +module_param(polling_frequency, long, S_IRUGO); +MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns"); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Dariusz Marcinkiewicz "); diff --git a/drivers/net/ethernet/emulex/Kconfig b/drivers/net/ethernet/emulex/Kconfig new file mode 100644 index 000000000..1b8d638c6 --- /dev/null +++ b/drivers/net/ethernet/emulex/Kconfig @@ -0,0 +1,23 @@ +# +# Emulex driver configuration +# + +config NET_VENDOR_EMULEX + bool "Emulex devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Emulex cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_EMULEX + +source "drivers/net/ethernet/emulex/benet/Kconfig" + +endif # NET_VENDOR_EMULEX diff --git a/drivers/net/ethernet/emulex/Makefile b/drivers/net/ethernet/emulex/Makefile new file mode 100644 index 000000000..ea8ec574d --- /dev/null +++ b/drivers/net/ethernet/emulex/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Emulex device drivers. +# + +obj-$(CONFIG_BE2NET) += benet/ diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig new file mode 100644 index 000000000..ea94a8eb6 --- /dev/null +++ b/drivers/net/ethernet/emulex/benet/Kconfig @@ -0,0 +1,14 @@ +config BE2NET + tristate "ServerEngines' 10Gbps NIC - BladeEngine" + depends on PCI + ---help--- + This driver implements the NIC functionality for ServerEngines' + 10Gbps network adapter - BladeEngine. + +config BE2NET_VXLAN + bool "VXLAN offload support on be2net driver" + default y + depends on BE2NET && VXLAN && !(BE2NET=y && VXLAN=m) + ---help--- + Say Y here if you want to enable VXLAN offload support on + be2net driver. diff --git a/drivers/net/ethernet/emulex/benet/Makefile b/drivers/net/ethernet/emulex/benet/Makefile new file mode 100644 index 000000000..1a91b2769 --- /dev/null +++ b/drivers/net/ethernet/emulex/benet/Makefile @@ -0,0 +1,7 @@ +# +# Makefile to build the network driver for ServerEngine's BladeEngine. +# + +obj-$(CONFIG_BE2NET) += be2net.o + +be2net-y := be_main.o be_cmds.o be_ethtool.o be_roce.o diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h new file mode 100644 index 000000000..1bf1cdce7 --- /dev/null +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -0,0 +1,821 @@ +/* + * Copyright (C) 2005 - 2014 Emulex + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. The full GNU General + * Public License is included in this distribution in the file called COPYING. + * + * Contact Information: + * linux-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + */ + +#ifndef BE_H +#define BE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "be_hw.h" +#include "be_roce.h" + +#define DRV_VER "10.6.0.1" +#define DRV_NAME "be2net" +#define BE_NAME "Emulex BladeEngine2" +#define BE3_NAME "Emulex BladeEngine3" +#define OC_NAME "Emulex OneConnect" +#define OC_NAME_BE OC_NAME "(be3)" +#define OC_NAME_LANCER OC_NAME "(Lancer)" +#define OC_NAME_SH OC_NAME "(Skyhawk)" +#define DRV_DESC "Emulex OneConnect NIC Driver" + +#define BE_VENDOR_ID 0x19a2 +#define EMULEX_VENDOR_ID 0x10df +#define BE_DEVICE_ID1 0x211 +#define BE_DEVICE_ID2 0x221 +#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */ +#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */ +#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */ +#define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */ +#define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */ +#define OC_DEVICE_ID6 0x728 /* Device id for VF in SkyHawk */ +#define OC_SUBSYS_DEVICE_ID1 0xE602 +#define OC_SUBSYS_DEVICE_ID2 0xE642 +#define OC_SUBSYS_DEVICE_ID3 0xE612 +#define OC_SUBSYS_DEVICE_ID4 0xE652 + +/* Number of bytes of an RX frame that are copied to skb->data */ +#define BE_HDR_LEN ((u16) 64) +/* allocate extra space to allow tunneling decapsulation without head reallocation */ +#define BE_RX_SKB_ALLOC_SIZE (BE_HDR_LEN + 64) + +#define BE_MAX_JUMBO_FRAME_SIZE 9018 +#define BE_MIN_MTU 256 +#define BE_MAX_MTU (BE_MAX_JUMBO_FRAME_SIZE - \ + (ETH_HLEN + ETH_FCS_LEN)) + +#define BE_NUM_VLANS_SUPPORTED 64 +#define BE_MAX_EQD 128u +#define BE_MAX_TX_FRAG_COUNT 30 + +#define EVNT_Q_LEN 1024 +#define TX_Q_LEN 2048 +#define TX_CQ_LEN 1024 +#define RX_Q_LEN 1024 /* Does not support any other value */ +#define RX_CQ_LEN 1024 +#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */ +#define MCC_CQ_LEN 256 + +#define BE2_MAX_RSS_QS 4 +#define BE3_MAX_RSS_QS 16 +#define BE3_MAX_TX_QS 16 +#define BE3_MAX_EVT_QS 16 +#define BE3_SRIOV_MAX_EVT_QS 8 + +#define MAX_RSS_IFACES 15 +#define MAX_RX_QS 32 +#define MAX_EVT_QS 32 +#define MAX_TX_QS 32 + +#define MAX_ROCE_EQS 5 +#define MAX_MSIX_VECTORS 32 +#define MIN_MSIX_VECTORS 1 +#define BE_NAPI_WEIGHT 64 +#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ +#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) +#define MAX_NUM_POST_ERX_DB 255u + +#define MAX_VFS 30 /* Max VFs supported by BE3 FW */ +#define FW_VER_LEN 32 + +#define RSS_INDIR_TABLE_LEN 128 +#define RSS_HASH_KEY_LEN 40 + +struct be_dma_mem { + void *va; + dma_addr_t dma; + u32 size; +}; + +struct be_queue_info { + struct be_dma_mem dma_mem; + u16 len; + u16 entry_size; /* Size of an element in the queue */ + u16 id; + u16 tail, head; + bool created; + atomic_t used; /* Number of valid elements in the queue */ +}; + +static inline u32 MODULO(u16 val, u16 limit) +{ + BUG_ON(limit & (limit - 1)); + return val & (limit - 1); +} + +static inline void index_adv(u16 *index, u16 val, u16 limit) +{ + *index = MODULO((*index + val), limit); +} + +static inline void index_inc(u16 *index, u16 limit) +{ + *index = MODULO((*index + 1), limit); +} + +static inline void *queue_head_node(struct be_queue_info *q) +{ + return q->dma_mem.va + q->head * q->entry_size; +} + +static inline void *queue_tail_node(struct be_queue_info *q) +{ + return q->dma_mem.va + q->tail * q->entry_size; +} + +static inline void *queue_index_node(struct be_queue_info *q, u16 index) +{ + return q->dma_mem.va + index * q->entry_size; +} + +static inline void queue_head_inc(struct be_queue_info *q) +{ + index_inc(&q->head, q->len); +} + +static inline void index_dec(u16 *index, u16 limit) +{ + *index = MODULO((*index - 1), limit); +} + +static inline void queue_tail_inc(struct be_queue_info *q) +{ + index_inc(&q->tail, q->len); +} + +struct be_eq_obj { + struct be_queue_info q; + char desc[32]; + + /* Adaptive interrupt coalescing (AIC) info */ + bool enable_aic; + u32 min_eqd; /* in usecs */ + u32 max_eqd; /* in usecs */ + u32 eqd; /* configured val when aic is off */ + u32 cur_eqd; /* in usecs */ + + u8 idx; /* array index */ + u8 msix_idx; + u16 spurious_intr; + struct napi_struct napi; + struct be_adapter *adapter; + cpumask_var_t affinity_mask; + +#ifdef CONFIG_NET_RX_BUSY_POLL +#define BE_EQ_IDLE 0 +#define BE_EQ_NAPI 1 /* napi owns this EQ */ +#define BE_EQ_POLL 2 /* poll owns this EQ */ +#define BE_EQ_LOCKED (BE_EQ_NAPI | BE_EQ_POLL) +#define BE_EQ_NAPI_YIELD 4 /* napi yielded this EQ */ +#define BE_EQ_POLL_YIELD 8 /* poll yielded this EQ */ +#define BE_EQ_YIELD (BE_EQ_NAPI_YIELD | BE_EQ_POLL_YIELD) +#define BE_EQ_USER_PEND (BE_EQ_POLL | BE_EQ_POLL_YIELD) + unsigned int state; + spinlock_t lock; /* lock to serialize napi and busy-poll */ +#endif /* CONFIG_NET_RX_BUSY_POLL */ +} ____cacheline_aligned_in_smp; + +struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ + bool enable; + u32 min_eqd; /* in usecs */ + u32 max_eqd; /* in usecs */ + u32 prev_eqd; /* in usecs */ + u32 et_eqd; /* configured val when aic is off */ + ulong jiffies; + u64 rx_pkts_prev; /* Used to calculate RX pps */ + u64 tx_reqs_prev; /* Used to calculate TX pps */ +}; + +enum { + NAPI_POLLING, + BUSY_POLLING +}; + +struct be_mcc_obj { + struct be_queue_info q; + struct be_queue_info cq; + bool rearm_cq; +}; + +struct be_tx_stats { + u64 tx_bytes; + u64 tx_pkts; + u64 tx_reqs; + u64 tx_compl; + ulong tx_jiffies; + u32 tx_stops; + u32 tx_drv_drops; /* pkts dropped by driver */ + /* the error counters are described in be_ethtool.c */ + u32 tx_hdr_parse_err; + u32 tx_dma_err; + u32 tx_tso_err; + u32 tx_spoof_check_err; + u32 tx_qinq_err; + u32 tx_internal_parity_err; + struct u64_stats_sync sync; + struct u64_stats_sync sync_compl; +}; + +/* Structure to hold some data of interest obtained from a TX CQE */ +struct be_tx_compl_info { + u8 status; /* Completion status */ + u16 end_index; /* Completed TXQ Index */ +}; + +struct be_tx_obj { + u32 db_offset; + struct be_queue_info q; + struct be_queue_info cq; + struct be_tx_compl_info txcp; + /* Remember the skbs that were transmitted */ + struct sk_buff *sent_skb_list[TX_Q_LEN]; + struct be_tx_stats stats; + u16 pend_wrb_cnt; /* Number of WRBs yet to be given to HW */ + u16 last_req_wrb_cnt; /* wrb cnt of the last req in the Q */ + u16 last_req_hdr; /* index of the last req's hdr-wrb */ +} ____cacheline_aligned_in_smp; + +/* Struct to remember the pages posted for rx frags */ +struct be_rx_page_info { + struct page *page; + /* set to page-addr for last frag of the page & frag-addr otherwise */ + DEFINE_DMA_UNMAP_ADDR(bus); + u16 page_offset; + bool last_frag; /* last frag of the page */ +}; + +struct be_rx_stats { + u64 rx_bytes; + u64 rx_pkts; + u32 rx_drops_no_skbs; /* skb allocation errors */ + u32 rx_drops_no_frags; /* HW has no fetched frags */ + u32 rx_post_fail; /* page post alloc failures */ + u32 rx_compl; + u32 rx_mcast_pkts; + u32 rx_compl_err; /* completions with err set */ + struct u64_stats_sync sync; +}; + +struct be_rx_compl_info { + u32 rss_hash; + u16 vlan_tag; + u16 pkt_size; + u16 port; + u8 vlanf; + u8 num_rcvd; + u8 err; + u8 ipf; + u8 tcpf; + u8 udpf; + u8 ip_csum; + u8 l4_csum; + u8 ipv6; + u8 qnq; + u8 pkt_type; + u8 ip_frag; + u8 tunneled; +}; + +struct be_rx_obj { + struct be_adapter *adapter; + struct be_queue_info q; + struct be_queue_info cq; + struct be_rx_compl_info rxcp; + struct be_rx_page_info page_info_tbl[RX_Q_LEN]; + struct be_rx_stats stats; + u8 rss_id; + bool rx_post_starved; /* Zero rx frags have been posted to BE */ +} ____cacheline_aligned_in_smp; + +struct be_drv_stats { + u32 be_on_die_temperature; + u32 eth_red_drops; + u32 dma_map_errors; + u32 rx_drops_no_pbuf; + u32 rx_drops_no_txpb; + u32 rx_drops_no_erx_descr; + u32 rx_drops_no_tpre_descr; + u32 rx_drops_too_many_frags; + u32 forwarded_packets; + u32 rx_drops_mtu; + u32 rx_crc_errors; + u32 rx_alignment_symbol_errors; + u32 rx_pause_frames; + u32 rx_priority_pause_frames; + u32 rx_control_frames; + u32 rx_in_range_errors; + u32 rx_out_range_errors; + u32 rx_frame_too_long; + u32 rx_address_filtered; + u32 rx_dropped_too_small; + u32 rx_dropped_too_short; + u32 rx_dropped_header_too_small; + u32 rx_dropped_tcp_length; + u32 rx_dropped_runt; + u32 rx_ip_checksum_errs; + u32 rx_tcp_checksum_errs; + u32 rx_udp_checksum_errs; + u32 tx_pauseframes; + u32 tx_priority_pauseframes; + u32 tx_controlframes; + u32 rxpp_fifo_overflow_drop; + u32 rx_input_fifo_overflow_drop; + u32 pmem_fifo_overflow_drop; + u32 jabber_events; + u32 rx_roce_bytes_lsd; + u32 rx_roce_bytes_msd; + u32 rx_roce_frames; + u32 roce_drops_payload_len; + u32 roce_drops_crc; +}; + +/* A vlan-id of 0xFFFF must be used to clear transparent vlan-tagging */ +#define BE_RESET_VLAN_TAG_ID 0xFFFF + +struct be_vf_cfg { + unsigned char mac_addr[ETH_ALEN]; + int if_handle; + int pmac_id; + u16 vlan_tag; + u32 tx_rate; + u32 plink_tracking; + u32 privileges; +}; + +enum vf_state { + ENABLED = 0, + ASSIGNED = 1 +}; + +#define BE_FLAGS_LINK_STATUS_INIT BIT(1) +#define BE_FLAGS_SRIOV_ENABLED BIT(2) +#define BE_FLAGS_WORKER_SCHEDULED BIT(3) +#define BE_FLAGS_NAPI_ENABLED BIT(6) +#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD BIT(7) +#define BE_FLAGS_VXLAN_OFFLOADS BIT(8) +#define BE_FLAGS_SETUP_DONE BIT(9) +#define BE_FLAGS_EVT_INCOMPATIBLE_SFP BIT(10) +#define BE_FLAGS_ERR_DETECTION_SCHEDULED BIT(11) + +#define BE_UC_PMAC_COUNT 30 +#define BE_VF_UC_PMAC_COUNT 2 + +/* Ethtool set_dump flags */ +#define LANCER_INITIATE_FW_DUMP 0x1 +#define LANCER_DELETE_FW_DUMP 0x2 + +struct phy_info { +/* From SFF-8472 spec */ +#define SFP_VENDOR_NAME_LEN 17 + u8 transceiver; + u8 autoneg; + u8 fc_autoneg; + u8 port_type; + u16 phy_type; + u16 interface_type; + u32 misc_params; + u16 auto_speeds_supported; + u16 fixed_speeds_supported; + int link_speed; + u32 advertising; + u32 supported; + u8 cable_type; + u8 vendor_name[SFP_VENDOR_NAME_LEN]; + u8 vendor_pn[SFP_VENDOR_NAME_LEN]; +}; + +struct be_resources { + u16 max_vfs; /* Total VFs "really" supported by FW/HW */ + u16 max_mcast_mac; + u16 max_tx_qs; + u16 max_rss_qs; + u16 max_rx_qs; + u16 max_cq_count; + u16 max_uc_mac; /* Max UC MACs programmable */ + u16 max_vlans; /* Number of vlans supported */ + u16 max_iface_count; + u16 max_mcc_count; + u16 max_evt_qs; + u32 if_cap_flags; + u32 vf_if_cap_flags; /* VF if capability flags */ +}; + +struct rss_info { + u64 rss_flags; + u8 rsstable[RSS_INDIR_TABLE_LEN]; + u8 rss_queue[RSS_INDIR_TABLE_LEN]; + u8 rss_hkey[RSS_HASH_KEY_LEN]; +}; + +/* Macros to read/write the 'features' word of be_wrb_params structure. + */ +#define BE_WRB_F_BIT(name) BE_WRB_F_##name##_BIT +#define BE_WRB_F_MASK(name) BIT_MASK(BE_WRB_F_##name##_BIT) + +#define BE_WRB_F_GET(word, name) \ + (((word) & (BE_WRB_F_MASK(name))) >> BE_WRB_F_BIT(name)) + +#define BE_WRB_F_SET(word, name, val) \ + ((word) |= (((val) << BE_WRB_F_BIT(name)) & BE_WRB_F_MASK(name))) + +/* Feature/offload bits */ +enum { + BE_WRB_F_CRC_BIT, /* Ethernet CRC */ + BE_WRB_F_IPCS_BIT, /* IP csum */ + BE_WRB_F_TCPCS_BIT, /* TCP csum */ + BE_WRB_F_UDPCS_BIT, /* UDP csum */ + BE_WRB_F_LSO_BIT, /* LSO */ + BE_WRB_F_LSO6_BIT, /* LSO6 */ + BE_WRB_F_VLAN_BIT, /* VLAN */ + BE_WRB_F_VLAN_SKIP_HW_BIT /* Skip VLAN tag (workaround) */ +}; + +/* The structure below provides a HW-agnostic abstraction of WRB params + * retrieved from a TX skb. This is in turn passed to chip specific routines + * during transmit, to set the corresponding params in the WRB. + */ +struct be_wrb_params { + u32 features; /* Feature bits */ + u16 vlan_tag; /* VLAN tag */ + u16 lso_mss; /* MSS for LSO */ +}; + +struct be_adapter { + struct pci_dev *pdev; + struct net_device *netdev; + + u8 __iomem *csr; /* CSR BAR used only for BE2/3 */ + u8 __iomem *db; /* Door Bell */ + u8 __iomem *pcicfg; /* On SH,BEx only. Shadow of PCI config space */ + + struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ + struct be_dma_mem mbox_mem; + /* Mbox mem is adjusted to align to 16 bytes. The allocated addr + * is stored for freeing purpose */ + struct be_dma_mem mbox_mem_alloced; + + struct be_mcc_obj mcc_obj; + spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ + spinlock_t mcc_cq_lock; + + u16 cfg_num_qs; /* configured via set-channels */ + u16 num_evt_qs; + u16 num_msix_vec; + struct be_eq_obj eq_obj[MAX_EVT_QS]; + struct msix_entry msix_entries[MAX_MSIX_VECTORS]; + bool isr_registered; + + /* TX Rings */ + u16 num_tx_qs; + struct be_tx_obj tx_obj[MAX_TX_QS]; + + /* Rx rings */ + u16 num_rx_qs; + u16 num_rss_qs; + u16 need_def_rxq; + struct be_rx_obj rx_obj[MAX_RX_QS]; + u32 big_page_size; /* Compounded page size shared by rx wrbs */ + + struct be_drv_stats drv_stats; + struct be_aic_obj aic_obj[MAX_EVT_QS]; + u8 vlan_prio_bmap; /* Available Priority BitMap */ + u16 recommended_prio; /* Recommended Priority */ + struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */ + + struct be_dma_mem stats_cmd; + /* Work queue used to perform periodic tasks like getting statistics */ + struct delayed_work work; + u16 work_counter; + + struct delayed_work be_err_detection_work; + u32 flags; + u32 cmd_privileges; + /* Ethtool knobs and info */ + char fw_ver[FW_VER_LEN]; + char fw_on_flash[FW_VER_LEN]; + + /* IFACE filtering fields */ + int if_handle; /* Used to configure filtering */ + u32 if_flags; /* Interface filtering flags */ + u32 *pmac_id; /* MAC addr handle used by BE card */ + u32 uc_macs; /* Count of secondary UC MAC programmed */ + unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)]; + u16 vlans_added; + + u32 beacon_state; /* for set_phys_id */ + + bool eeh_error; + bool fw_timeout; + bool hw_error; + + u32 port_num; + char port_name; + u8 mc_type; + u32 function_mode; + u32 function_caps; + u32 rx_fc; /* Rx flow control */ + u32 tx_fc; /* Tx flow control */ + bool stats_cmd_sent; + struct { + u32 size; + u32 total_size; + u64 io_addr; + } roce_db; + u32 num_msix_roce_vec; + struct ocrdma_dev *ocrdma_dev; + struct list_head entry; + + u32 flash_status; + struct completion et_cmd_compl; + + struct be_resources pool_res; /* resources available for the port */ + struct be_resources res; /* resources available for the func */ + u16 num_vfs; /* Number of VFs provisioned by PF */ + u8 virtfn; + struct be_vf_cfg *vf_cfg; + bool be3_native; + u32 sli_family; + u8 hba_port_num; + u16 pvid; + __be16 vxlan_port; + int vxlan_port_count; + struct phy_info phy; + u8 wol_cap; + bool wol_en; + u16 asic_rev; + u16 qnq_vid; + u32 msg_enable; + int be_get_temp_freq; + u8 pf_number; + struct rss_info rss_info; +}; + +#define be_physfn(adapter) (!adapter->virtfn) +#define be_virtfn(adapter) (adapter->virtfn) +#define sriov_enabled(adapter) (adapter->flags & \ + BE_FLAGS_SRIOV_ENABLED) + +#define for_all_vfs(adapter, vf_cfg, i) \ + for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \ + i++, vf_cfg++) + +#define ON 1 +#define OFF 0 + +#define be_max_vlans(adapter) (adapter->res.max_vlans) +#define be_max_uc(adapter) (adapter->res.max_uc_mac) +#define be_max_mc(adapter) (adapter->res.max_mcast_mac) +#define be_max_vfs(adapter) (adapter->pool_res.max_vfs) +#define be_max_rss(adapter) (adapter->res.max_rss_qs) +#define be_max_txqs(adapter) (adapter->res.max_tx_qs) +#define be_max_prio_txqs(adapter) (adapter->res.max_prio_tx_qs) +#define be_max_rxqs(adapter) (adapter->res.max_rx_qs) +#define be_max_eqs(adapter) (adapter->res.max_evt_qs) +#define be_if_cap_flags(adapter) (adapter->res.if_cap_flags) + +static inline u16 be_max_qs(struct be_adapter *adapter) +{ + /* If no RSS, need atleast the one def RXQ */ + u16 num = max_t(u16, be_max_rss(adapter), 1); + + num = min(num, be_max_eqs(adapter)); + return min_t(u16, num, num_online_cpus()); +} + +/* Is BE in pvid_tagging mode */ +#define be_pvid_tagging_enabled(adapter) (adapter->pvid) + +/* Is BE in QNQ multi-channel mode */ +#define be_is_qnq_mode(adapter) (adapter->function_mode & QNQ_MODE) + +#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \ + adapter->pdev->device == OC_DEVICE_ID4) + +#define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5 || \ + adapter->pdev->device == OC_DEVICE_ID6) + +#define BE3_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID2 || \ + adapter->pdev->device == OC_DEVICE_ID2) + +#define BE2_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID1 || \ + adapter->pdev->device == OC_DEVICE_ID1) + +#define BEx_chip(adapter) (BE3_chip(adapter) || BE2_chip(adapter)) + +#define be_roce_supported(adapter) (skyhawk_chip(adapter) && \ + (adapter->function_mode & RDMA_ENABLED)) + +extern const struct ethtool_ops be_ethtool_ops; + +#define msix_enabled(adapter) (adapter->num_msix_vec > 0) +#define num_irqs(adapter) (msix_enabled(adapter) ? \ + adapter->num_msix_vec : 1) +#define tx_stats(txo) (&(txo)->stats) +#define rx_stats(rxo) (&(rxo)->stats) + +/* The default RXQ is the last RXQ */ +#define default_rxo(adpt) (&adpt->rx_obj[adpt->num_rx_qs - 1]) + +#define for_all_rx_queues(adapter, rxo, i) \ + for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \ + i++, rxo++) + +#define for_all_rss_queues(adapter, rxo, i) \ + for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rss_qs; \ + i++, rxo++) + +#define for_all_tx_queues(adapter, txo, i) \ + for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \ + i++, txo++) + +#define for_all_evt_queues(adapter, eqo, i) \ + for (i = 0, eqo = &adapter->eq_obj[i]; i < adapter->num_evt_qs; \ + i++, eqo++) + +#define for_all_rx_queues_on_eq(adapter, eqo, rxo, i) \ + for (i = eqo->idx, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;\ + i += adapter->num_evt_qs, rxo += adapter->num_evt_qs) + +#define for_all_tx_queues_on_eq(adapter, eqo, txo, i) \ + for (i = eqo->idx, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs;\ + i += adapter->num_evt_qs, txo += adapter->num_evt_qs) + +#define is_mcc_eqo(eqo) (eqo->idx == 0) +#define mcc_eqo(adapter) (&adapter->eq_obj[0]) + +#define PAGE_SHIFT_4K 12 +#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) + +/* Returns number of pages spanned by the data starting at the given addr */ +#define PAGES_4K_SPANNED(_address, size) \ + ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \ + (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K)) + +/* Returns bit offset within a DWORD of a bitfield */ +#define AMAP_BIT_OFFSET(_struct, field) \ + (((size_t)&(((_struct *)0)->field))%32) + +/* Returns the bit mask of the field that is NOT shifted into location. */ +static inline u32 amap_mask(u32 bitsize) +{ + return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1); +} + +static inline void +amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value) +{ + u32 *dw = (u32 *) ptr + dw_offset; + *dw &= ~(mask << offset); + *dw |= (mask & value) << offset; +} + +#define AMAP_SET_BITS(_struct, field, ptr, val) \ + amap_set(ptr, \ + offsetof(_struct, field)/32, \ + amap_mask(sizeof(((_struct *)0)->field)), \ + AMAP_BIT_OFFSET(_struct, field), \ + val) + +static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset) +{ + u32 *dw = (u32 *) ptr; + return mask & (*(dw + dw_offset) >> offset); +} + +#define AMAP_GET_BITS(_struct, field, ptr) \ + amap_get(ptr, \ + offsetof(_struct, field)/32, \ + amap_mask(sizeof(((_struct *)0)->field)), \ + AMAP_BIT_OFFSET(_struct, field)) + +#define GET_RX_COMPL_V0_BITS(field, ptr) \ + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, field, ptr) + +#define GET_RX_COMPL_V1_BITS(field, ptr) \ + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, field, ptr) + +#define GET_TX_COMPL_BITS(field, ptr) \ + AMAP_GET_BITS(struct amap_eth_tx_compl, field, ptr) + +#define SET_TX_WRB_HDR_BITS(field, ptr, val) \ + AMAP_SET_BITS(struct amap_eth_hdr_wrb, field, ptr, val) + +#define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len) +#define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len) +static inline void swap_dws(void *wrb, int len) +{ +#ifdef __BIG_ENDIAN + u32 *dw = wrb; + BUG_ON(len % 4); + do { + *dw = cpu_to_le32(*dw); + dw++; + len -= 4; + } while (len); +#endif /* __BIG_ENDIAN */ +} + +#define be_cmd_status(status) (status > 0 ? -EIO : status) + +static inline u8 is_tcp_pkt(struct sk_buff *skb) +{ + u8 val = 0; + + if (ip_hdr(skb)->version == 4) + val = (ip_hdr(skb)->protocol == IPPROTO_TCP); + else if (ip_hdr(skb)->version == 6) + val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP); + + return val; +} + +static inline u8 is_udp_pkt(struct sk_buff *skb) +{ + u8 val = 0; + + if (ip_hdr(skb)->version == 4) + val = (ip_hdr(skb)->protocol == IPPROTO_UDP); + else if (ip_hdr(skb)->version == 6) + val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP); + + return val; +} + +static inline bool is_ipv4_pkt(struct sk_buff *skb) +{ + return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; +} + +static inline bool be_multi_rxq(const struct be_adapter *adapter) +{ + return adapter->num_rx_qs > 1; +} + +static inline bool be_error(struct be_adapter *adapter) +{ + return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout; +} + +static inline bool be_hw_error(struct be_adapter *adapter) +{ + return adapter->eeh_error || adapter->hw_error; +} + +static inline void be_clear_all_error(struct be_adapter *adapter) +{ + adapter->eeh_error = false; + adapter->hw_error = false; + adapter->fw_timeout = false; +} + +void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, + u16 num_popped); +void be_link_status_update(struct be_adapter *adapter, u8 link_status); +void be_parse_stats(struct be_adapter *adapter); +int be_load_fw(struct be_adapter *adapter, u8 *func); +bool be_is_wol_supported(struct be_adapter *adapter); +bool be_pause_supported(struct be_adapter *adapter); +u32 be_get_fw_log_level(struct be_adapter *adapter); +int be_update_queues(struct be_adapter *adapter); +int be_poll(struct napi_struct *napi, int budget); + +/* + * internal function to initialize-cleanup roce device. + */ +void be_roce_dev_add(struct be_adapter *); +void be_roce_dev_remove(struct be_adapter *); + +/* + * internal function to open-close roce device during ifup-ifdown. + */ +void be_roce_dev_open(struct be_adapter *); +void be_roce_dev_close(struct be_adapter *); +void be_roce_dev_shutdown(struct be_adapter *); + +#endif /* BE_H */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c new file mode 100644 index 000000000..c5e1d0ac7 --- /dev/null +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -0,0 +1,4248 @@ +/* + * Copyright (C) 2005 - 2014 Emulex + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. The full GNU General + * Public License is included in this distribution in the file called COPYING. + * + * Contact Information: + * linux-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + */ + +#include +#include "be.h" +#include "be_cmds.h" + +static char *be_port_misconfig_evt_desc[] = { + "A valid SFP module detected", + "Optics faulted/ incorrectly installed/ not installed.", + "Optics of two types installed.", + "Incompatible optics.", + "Unknown port SFP status" +}; + +static char *be_port_misconfig_remedy_desc[] = { + "", + "Reseat optics. If issue not resolved, replace", + "Remove one optic or install matching pair of optics", + "Replace with compatible optics for card to function", + "" +}; + +static struct be_cmd_priv_map cmd_priv_map[] = { + { + OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, + CMD_SUBSYSTEM_ETH, + BE_PRIV_LNKMGMT | BE_PRIV_VHADM | + BE_PRIV_DEVCFG | BE_PRIV_DEVSEC + }, + { + OPCODE_COMMON_GET_FLOW_CONTROL, + CMD_SUBSYSTEM_COMMON, + BE_PRIV_LNKQUERY | BE_PRIV_VHADM | + BE_PRIV_DEVCFG | BE_PRIV_DEVSEC + }, + { + OPCODE_COMMON_SET_FLOW_CONTROL, + CMD_SUBSYSTEM_COMMON, + BE_PRIV_LNKMGMT | BE_PRIV_VHADM | + BE_PRIV_DEVCFG | BE_PRIV_DEVSEC + }, + { + OPCODE_ETH_GET_PPORT_STATS, + CMD_SUBSYSTEM_ETH, + BE_PRIV_LNKMGMT | BE_PRIV_VHADM | + BE_PRIV_DEVCFG | BE_PRIV_DEVSEC + }, + { + OPCODE_COMMON_GET_PHY_DETAILS, + CMD_SUBSYSTEM_COMMON, + BE_PRIV_LNKMGMT | BE_PRIV_VHADM | + BE_PRIV_DEVCFG | BE_PRIV_DEVSEC + } +}; + +static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem) +{ + int i; + int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map); + u32 cmd_privileges = adapter->cmd_privileges; + + for (i = 0; i < num_entries; i++) + if (opcode == cmd_priv_map[i].opcode && + subsystem == cmd_priv_map[i].subsystem) + if (!(cmd_privileges & cmd_priv_map[i].priv_mask)) + return false; + + return true; +} + +static inline void *embedded_payload(struct be_mcc_wrb *wrb) +{ + return wrb->payload.embedded_payload; +} + +static void be_mcc_notify(struct be_adapter *adapter) +{ + struct be_queue_info *mccq = &adapter->mcc_obj.q; + u32 val = 0; + + if (be_error(adapter)) + return; + + val |= mccq->id & DB_MCCQ_RING_ID_MASK; + val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; + + wmb(); + iowrite32(val, adapter->db + DB_MCCQ_OFFSET); +} + +/* To check if valid bit is set, check the entire word as we don't know + * the endianness of the data (old entry is host endian while a new entry is + * little endian) */ +static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) +{ + u32 flags; + + if (compl->flags != 0) { + flags = le32_to_cpu(compl->flags); + if (flags & CQE_FLAGS_VALID_MASK) { + compl->flags = flags; + return true; + } + } + return false; +} + +/* Need to reset the entire word that houses the valid bit */ +static inline void be_mcc_compl_use(struct be_mcc_compl *compl) +{ + compl->flags = 0; +} + +static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1) +{ + unsigned long addr; + + addr = tag1; + addr = ((addr << 16) << 16) | tag0; + return (void *)addr; +} + +static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status) +{ + if (base_status == MCC_STATUS_NOT_SUPPORTED || + base_status == MCC_STATUS_ILLEGAL_REQUEST || + addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES || + (opcode == OPCODE_COMMON_WRITE_FLASHROM && + (base_status == MCC_STATUS_ILLEGAL_FIELD || + addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH))) + return true; + else + return false; +} + +/* Place holder for all the async MCC cmds wherein the caller is not in a busy + * loop (has not issued be_mcc_notify_wait()) + */ +static void be_async_cmd_process(struct be_adapter *adapter, + struct be_mcc_compl *compl, + struct be_cmd_resp_hdr *resp_hdr) +{ + enum mcc_base_status base_status = base_status(compl->status); + u8 opcode = 0, subsystem = 0; + + if (resp_hdr) { + opcode = resp_hdr->opcode; + subsystem = resp_hdr->subsystem; + } + + if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST && + subsystem == CMD_SUBSYSTEM_LOWLEVEL) { + complete(&adapter->et_cmd_compl); + return; + } + + if ((opcode == OPCODE_COMMON_WRITE_FLASHROM || + opcode == OPCODE_COMMON_WRITE_OBJECT) && + subsystem == CMD_SUBSYSTEM_COMMON) { + adapter->flash_status = compl->status; + complete(&adapter->et_cmd_compl); + return; + } + + if ((opcode == OPCODE_ETH_GET_STATISTICS || + opcode == OPCODE_ETH_GET_PPORT_STATS) && + subsystem == CMD_SUBSYSTEM_ETH && + base_status == MCC_STATUS_SUCCESS) { + be_parse_stats(adapter); + adapter->stats_cmd_sent = false; + return; + } + + if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES && + subsystem == CMD_SUBSYSTEM_COMMON) { + if (base_status == MCC_STATUS_SUCCESS) { + struct be_cmd_resp_get_cntl_addnl_attribs *resp = + (void *)resp_hdr; + adapter->drv_stats.be_on_die_temperature = + resp->on_die_temperature; + } else { + adapter->be_get_temp_freq = 0; + } + return; + } +} + +static int be_mcc_compl_process(struct be_adapter *adapter, + struct be_mcc_compl *compl) +{ + enum mcc_base_status base_status; + enum mcc_addl_status addl_status; + struct be_cmd_resp_hdr *resp_hdr; + u8 opcode = 0, subsystem = 0; + + /* Just swap the status to host endian; mcc tag is opaquely copied + * from mcc_wrb */ + be_dws_le_to_cpu(compl, 4); + + base_status = base_status(compl->status); + addl_status = addl_status(compl->status); + + resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1); + if (resp_hdr) { + opcode = resp_hdr->opcode; + subsystem = resp_hdr->subsystem; + } + + be_async_cmd_process(adapter, compl, resp_hdr); + + if (base_status != MCC_STATUS_SUCCESS && + !be_skip_err_log(opcode, base_status, addl_status)) { + if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { + dev_warn(&adapter->pdev->dev, + "VF is not privileged to issue opcode %d-%d\n", + opcode, subsystem); + } else { + dev_err(&adapter->pdev->dev, + "opcode %d-%d failed:status %d-%d\n", + opcode, subsystem, base_status, addl_status); + } + } + return compl->status; +} + +/* Link state evt is a string of bytes; no need for endian swapping */ +static void be_async_link_state_process(struct be_adapter *adapter, + struct be_mcc_compl *compl) +{ + struct be_async_event_link_state *evt = + (struct be_async_event_link_state *)compl; + + /* When link status changes, link speed must be re-queried from FW */ + adapter->phy.link_speed = -1; + + /* On BEx the FW does not send a separate link status + * notification for physical and logical link. + * On other chips just process the logical link + * status notification + */ + if (!BEx_chip(adapter) && + !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK)) + return; + + /* For the initial link status do not rely on the ASYNC event as + * it may not be received in some cases. + */ + if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT) + be_link_status_update(adapter, + evt->port_link_status & LINK_STATUS_MASK); +} + +static void be_async_port_misconfig_event_process(struct be_adapter *adapter, + struct be_mcc_compl *compl) +{ + struct be_async_event_misconfig_port *evt = + (struct be_async_event_misconfig_port *)compl; + u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1); + struct device *dev = &adapter->pdev->dev; + u8 port_misconfig_evt; + + port_misconfig_evt = + ((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff); + + /* Log an error message that would allow a user to determine + * whether the SFPs have an issue + */ + dev_info(dev, "Port %c: %s %s", adapter->port_name, + be_port_misconfig_evt_desc[port_misconfig_evt], + be_port_misconfig_remedy_desc[port_misconfig_evt]); + + if (port_misconfig_evt == INCOMPATIBLE_SFP) + adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP; +} + +/* Grp5 CoS Priority evt */ +static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, + struct be_mcc_compl *compl) +{ + struct be_async_event_grp5_cos_priority *evt = + (struct be_async_event_grp5_cos_priority *)compl; + + if (evt->valid) { + adapter->vlan_prio_bmap = evt->available_priority_bmap; + adapter->recommended_prio &= ~VLAN_PRIO_MASK; + adapter->recommended_prio = + evt->reco_default_priority << VLAN_PRIO_SHIFT; + } +} + +/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */ +static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, + struct be_mcc_compl *compl) +{ + struct be_async_event_grp5_qos_link_speed *evt = + (struct be_async_event_grp5_qos_link_speed *)compl; + + if (adapter->phy.link_speed >= 0 && + evt->physical_port == adapter->port_num) + adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10; +} + +/*Grp5 PVID evt*/ +static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, + struct be_mcc_compl *compl) +{ + struct be_async_event_grp5_pvid_state *evt = + (struct be_async_event_grp5_pvid_state *)compl; + + if (evt->enabled) { + adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK; + dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid); + } else { + adapter->pvid = 0; + } +} + +static void be_async_grp5_evt_process(struct be_adapter *adapter, + struct be_mcc_compl *compl) +{ + u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) & + ASYNC_EVENT_TYPE_MASK; + + switch (event_type) { + case ASYNC_EVENT_COS_PRIORITY: + be_async_grp5_cos_priority_process(adapter, compl); + break; + case ASYNC_EVENT_QOS_SPEED: + be_async_grp5_qos_speed_process(adapter, compl); + break; + case ASYNC_EVENT_PVID_STATE: + be_async_grp5_pvid_state_process(adapter, compl); + break; + default: + break; + } +} + +static void be_async_dbg_evt_process(struct be_adapter *adapter, + struct be_mcc_compl *cmp) +{ + u8 event_type = 0; + struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp; + + event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) & + ASYNC_EVENT_TYPE_MASK; + + switch (event_type) { + case ASYNC_DEBUG_EVENT_TYPE_QNQ: + if (evt->valid) + adapter->qnq_vid = le16_to_cpu(evt->vlan_tag); + adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD; + break; + default: + dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n", + event_type); + break; + } +} + +static void be_async_sliport_evt_process(struct be_adapter *adapter, + struct be_mcc_compl *cmp) +{ + u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) & + ASYNC_EVENT_TYPE_MASK; + + if (event_type == ASYNC_EVENT_PORT_MISCONFIG) + be_async_port_misconfig_event_process(adapter, cmp); +} + +static inline bool is_link_state_evt(u32 flags) +{ + return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == + ASYNC_EVENT_CODE_LINK_STATE; +} + +static inline bool is_grp5_evt(u32 flags) +{ + return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == + ASYNC_EVENT_CODE_GRP_5; +} + +static inline bool is_dbg_evt(u32 flags) +{ + return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == + ASYNC_EVENT_CODE_QNQ; +} + +static inline bool is_sliport_evt(u32 flags) +{ + return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == + ASYNC_EVENT_CODE_SLIPORT; +} + +static void be_mcc_event_process(struct be_adapter *adapter, + struct be_mcc_compl *compl) +{ + if (is_link_state_evt(compl->flags)) + be_async_link_state_process(adapter, compl); + else if (is_grp5_evt(compl->flags)) + be_async_grp5_evt_process(adapter, compl); + else if (is_dbg_evt(compl->flags)) + be_async_dbg_evt_process(adapter, compl); + else if (is_sliport_evt(compl->flags)) + be_async_sliport_evt_process(adapter, compl); +} + +static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) +{ + struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq; + struct be_mcc_compl *compl = queue_tail_node(mcc_cq); + + if (be_mcc_compl_is_new(compl)) { + queue_tail_inc(mcc_cq); + return compl; + } + return NULL; +} + +void be_async_mcc_enable(struct be_adapter *adapter) +{ + spin_lock_bh(&adapter->mcc_cq_lock); + + be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0); + adapter->mcc_obj.rearm_cq = true; + + spin_unlock_bh(&adapter->mcc_cq_lock); +} + +void be_async_mcc_disable(struct be_adapter *adapter) +{ + spin_lock_bh(&adapter->mcc_cq_lock); + + adapter->mcc_obj.rearm_cq = false; + be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0); + + spin_unlock_bh(&adapter->mcc_cq_lock); +} + +int be_process_mcc(struct be_adapter *adapter) +{ + struct be_mcc_compl *compl; + int num = 0, status = 0; + struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; + + spin_lock(&adapter->mcc_cq_lock); + + while ((compl = be_mcc_compl_get(adapter))) { + if (compl->flags & CQE_FLAGS_ASYNC_MASK) { + be_mcc_event_process(adapter, compl); + } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { + status = be_mcc_compl_process(adapter, compl); + atomic_dec(&mcc_obj->q.used); + } + be_mcc_compl_use(compl); + num++; + } + + if (num) + be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); + + spin_unlock(&adapter->mcc_cq_lock); + return status; +} + +/* Wait till no more pending mcc requests are present */ +static int be_mcc_wait_compl(struct be_adapter *adapter) +{ +#define mcc_timeout 120000 /* 12s timeout */ + int i, status = 0; + struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; + + for (i = 0; i < mcc_timeout; i++) { + if (be_error(adapter)) + return -EIO; + + local_bh_disable(); + status = be_process_mcc(adapter); + local_bh_enable(); + + if (atomic_read(&mcc_obj->q.used) == 0) + break; + udelay(100); + } + if (i == mcc_timeout) { + dev_err(&adapter->pdev->dev, "FW not responding\n"); + adapter->fw_timeout = true; + return -EIO; + } + return status; +} + +/* Notify MCC requests and wait for completion */ +static int be_mcc_notify_wait(struct be_adapter *adapter) +{ + int status; + struct be_mcc_wrb *wrb; + struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; + u16 index = mcc_obj->q.head; + struct be_cmd_resp_hdr *resp; + + index_dec(&index, mcc_obj->q.len); + wrb = queue_index_node(&mcc_obj->q, index); + + resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1); + + be_mcc_notify(adapter); + + status = be_mcc_wait_compl(adapter); + if (status == -EIO) + goto out; + + status = (resp->base_status | + ((resp->addl_status & CQE_ADDL_STATUS_MASK) << + CQE_ADDL_STATUS_SHIFT)); +out: + return status; +} + +static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) +{ + int msecs = 0; + u32 ready; + + do { + if (be_error(adapter)) + return -EIO; + + ready = ioread32(db); + if (ready == 0xffffffff) + return -1; + + ready &= MPU_MAILBOX_DB_RDY_MASK; + if (ready) + break; + + if (msecs > 4000) { + dev_err(&adapter->pdev->dev, "FW not responding\n"); + adapter->fw_timeout = true; + be_detect_error(adapter); + return -1; + } + + msleep(1); + msecs++; + } while (true); + + return 0; +} + +/* + * Insert the mailbox address into the doorbell in two steps + * Polls on the mbox doorbell till a command completion (or a timeout) occurs + */ +static int be_mbox_notify_wait(struct be_adapter *adapter) +{ + int status; + u32 val = 0; + void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET; + struct be_dma_mem *mbox_mem = &adapter->mbox_mem; + struct be_mcc_mailbox *mbox = mbox_mem->va; + struct be_mcc_compl *compl = &mbox->compl; + + /* wait for ready to be set */ + status = be_mbox_db_ready_wait(adapter, db); + if (status != 0) + return status; + + val |= MPU_MAILBOX_DB_HI_MASK; + /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ + val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; + iowrite32(val, db); + + /* wait for ready to be set */ + status = be_mbox_db_ready_wait(adapter, db); + if (status != 0) + return status; + + val = 0; + /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */ + val |= (u32)(mbox_mem->dma >> 4) << 2; + iowrite32(val, db); + + status = be_mbox_db_ready_wait(adapter, db); + if (status != 0) + return status; + + /* A cq entry has been made now */ + if (be_mcc_compl_is_new(compl)) { + status = be_mcc_compl_process(adapter, &mbox->compl); + be_mcc_compl_use(compl); + if (status) + return status; + } else { + dev_err(&adapter->pdev->dev, "invalid mailbox completion\n"); + return -1; + } + return 0; +} + +static u16 be_POST_stage_get(struct be_adapter *adapter) +{ + u32 sem; + + if (BEx_chip(adapter)) + sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx); + else + pci_read_config_dword(adapter->pdev, + SLIPORT_SEMAPHORE_OFFSET_SH, &sem); + + return sem & POST_STAGE_MASK; +} + +static int lancer_wait_ready(struct be_adapter *adapter) +{ +#define SLIPORT_READY_TIMEOUT 30 + u32 sliport_status; + int i; + + for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) { + sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); + if (sliport_status & SLIPORT_STATUS_RDY_MASK) + return 0; + + if (sliport_status & SLIPORT_STATUS_ERR_MASK && + !(sliport_status & SLIPORT_STATUS_RN_MASK)) + return -EIO; + + msleep(1000); + } + + return sliport_status ? : -1; +} + +int be_fw_wait_ready(struct be_adapter *adapter) +{ + u16 stage; + int status, timeout = 0; + struct device *dev = &adapter->pdev->dev; + + if (lancer_chip(adapter)) { + status = lancer_wait_ready(adapter); + if (status) { + stage = status; + goto err; + } + return 0; + } + + do { + /* There's no means to poll POST state on BE2/3 VFs */ + if (BEx_chip(adapter) && be_virtfn(adapter)) + return 0; + + stage = be_POST_stage_get(adapter); + if (stage == POST_STAGE_ARMFW_RDY) + return 0; + + dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout); + if (msleep_interruptible(2000)) { + dev_err(dev, "Waiting for POST aborted\n"); + return -EINTR; + } + timeout += 2; + } while (timeout < 60); + +err: + dev_err(dev, "POST timeout; stage=%#x\n", stage); + return -ETIMEDOUT; +} + +static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) +{ + return &wrb->payload.sgl[0]; +} + +static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr) +{ + wrb->tag0 = addr & 0xFFFFFFFF; + wrb->tag1 = upper_32_bits(addr); +} + +/* Don't touch the hdr after it's prepared */ +/* mem will be NULL for embedded commands */ +static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, + u8 subsystem, u8 opcode, int cmd_len, + struct be_mcc_wrb *wrb, + struct be_dma_mem *mem) +{ + struct be_sge *sge; + + req_hdr->opcode = opcode; + req_hdr->subsystem = subsystem; + req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); + req_hdr->version = 0; + fill_wrb_tags(wrb, (ulong) req_hdr); + wrb->payload_length = cmd_len; + if (mem) { + wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) << + MCC_WRB_SGE_CNT_SHIFT; + sge = nonembedded_sgl(wrb); + sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma)); + sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF); + sge->len = cpu_to_le32(mem->size); + } else + wrb->embedded |= MCC_WRB_EMBEDDED_MASK; + be_dws_cpu_to_le(wrb, 8); +} + +static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, + struct be_dma_mem *mem) +{ + int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); + u64 dma = (u64)mem->dma; + + for (i = 0; i < buf_pages; i++) { + pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); + pages[i].hi = cpu_to_le32(upper_32_bits(dma)); + dma += PAGE_SIZE_4K; + } +} + +static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter) +{ + struct be_dma_mem *mbox_mem = &adapter->mbox_mem; + struct be_mcc_wrb *wrb + = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; + memset(wrb, 0, sizeof(*wrb)); + return wrb; +} + +static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter) +{ + struct be_queue_info *mccq = &adapter->mcc_obj.q; + struct be_mcc_wrb *wrb; + + if (!mccq->created) + return NULL; + + if (atomic_read(&mccq->used) >= mccq->len) + return NULL; + + wrb = queue_head_node(mccq); + queue_head_inc(mccq); + atomic_inc(&mccq->used); + memset(wrb, 0, sizeof(*wrb)); + return wrb; +} + +static bool use_mcc(struct be_adapter *adapter) +{ + return adapter->mcc_obj.q.created; +} + +/* Must be used only in process context */ +static int be_cmd_lock(struct be_adapter *adapter) +{ + if (use_mcc(adapter)) { + spin_lock_bh(&adapter->mcc_lock); + return 0; + } else { + return mutex_lock_interruptible(&adapter->mbox_lock); + } +} + +/* Must be used only in process context */ +static void be_cmd_unlock(struct be_adapter *adapter) +{ + if (use_mcc(adapter)) + spin_unlock_bh(&adapter->mcc_lock); + else + return mutex_unlock(&adapter->mbox_lock); +} + +static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter, + struct be_mcc_wrb *wrb) +{ + struct be_mcc_wrb *dest_wrb; + + if (use_mcc(adapter)) { + dest_wrb = wrb_from_mccq(adapter); + if (!dest_wrb) + return NULL; + } else { + dest_wrb = wrb_from_mbox(adapter); + } + + memcpy(dest_wrb, wrb, sizeof(*wrb)); + if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK)) + fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb)); + + return dest_wrb; +} + +/* Must be used only in process context */ +static int be_cmd_notify_wait(struct be_adapter *adapter, + struct be_mcc_wrb *wrb) +{ + struct be_mcc_wrb *dest_wrb; + int status; + + status = be_cmd_lock(adapter); + if (status) + return status; + + dest_wrb = be_cmd_copy(adapter, wrb); + if (!dest_wrb) + return -EBUSY; + + if (use_mcc(adapter)) + status = be_mcc_notify_wait(adapter); + else + status = be_mbox_notify_wait(adapter); + + if (!status) + memcpy(wrb, dest_wrb, sizeof(*wrb)); + + be_cmd_unlock(adapter); + return status; +} + +/* Tell fw we're about to start firing cmds by writing a + * special pattern across the wrb hdr; uses mbox + */ +int be_cmd_fw_init(struct be_adapter *adapter) +{ + u8 *wrb; + int status; + + if (lancer_chip(adapter)) + return 0; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + wrb = (u8 *)wrb_from_mbox(adapter); + *wrb++ = 0xFF; + *wrb++ = 0x12; + *wrb++ = 0x34; + *wrb++ = 0xFF; + *wrb++ = 0xFF; + *wrb++ = 0x56; + *wrb++ = 0x78; + *wrb = 0xFF; + + status = be_mbox_notify_wait(adapter); + + mutex_unlock(&adapter->mbox_lock); + return status; +} + +/* Tell fw we're done with firing cmds by writing a + * special pattern across the wrb hdr; uses mbox + */ +int be_cmd_fw_clean(struct be_adapter *adapter) +{ + u8 *wrb; + int status; + + if (lancer_chip(adapter)) + return 0; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + wrb = (u8 *)wrb_from_mbox(adapter); + *wrb++ = 0xFF; + *wrb++ = 0xAA; + *wrb++ = 0xBB; + *wrb++ = 0xFF; + *wrb++ = 0xFF; + *wrb++ = 0xCC; + *wrb++ = 0xDD; + *wrb = 0xFF; + + status = be_mbox_notify_wait(adapter); + + mutex_unlock(&adapter->mbox_lock); + return status; +} + +int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_eq_create *req; + struct be_dma_mem *q_mem = &eqo->q.dma_mem; + int status, ver = 0; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, + NULL); + + /* Support for EQ_CREATEv2 available only SH-R onwards */ + if (!(BEx_chip(adapter) || lancer_chip(adapter))) + ver = 2; + + req->hdr.version = ver; + req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); + + AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); + /* 4byte eqe*/ + AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); + AMAP_SET_BITS(struct amap_eq_context, count, req->context, + __ilog2_u32(eqo->q.len / 256)); + be_dws_cpu_to_le(req->context, sizeof(req->context)); + + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + + status = be_mbox_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); + + eqo->q.id = le16_to_cpu(resp->eq_id); + eqo->msix_idx = + (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx; + eqo->q.created = true; + } + + mutex_unlock(&adapter->mbox_lock); + return status; +} + +/* Use MCC */ +int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, + bool permanent, u32 if_handle, u32 pmac_id) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_mac_query *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, + NULL); + req->type = MAC_ADDRESS_TYPE_NETWORK; + if (permanent) { + req->permanent = 1; + } else { + req->if_id = cpu_to_le16((u16)if_handle); + req->pmac_id = cpu_to_le32(pmac_id); + req->permanent = 0; + } + + status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); + + memcpy(mac_addr, resp->mac.addr, ETH_ALEN); + } + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +/* Uses synchronous MCCQ */ +int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, + u32 if_id, u32 *pmac_id, u32 domain) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_pmac_add *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, + NULL); + + req->hdr.domain = domain; + req->if_id = cpu_to_le32(if_id); + memcpy(req->mac_address, mac_addr, ETH_ALEN); + + status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); + + *pmac_id = le32_to_cpu(resp->pmac_id); + } + +err: + spin_unlock_bh(&adapter->mcc_lock); + + if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) + status = -EPERM; + + return status; +} + +/* Uses synchronous MCCQ */ +int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_pmac_del *req; + int status; + + if (pmac_id == -1) + return 0; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), + wrb, NULL); + + req->hdr.domain = dom; + req->if_id = cpu_to_le32(if_id); + req->pmac_id = cpu_to_le32(pmac_id); + + status = be_mcc_notify_wait(adapter); + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +/* Uses Mbox */ +int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, + struct be_queue_info *eq, bool no_delay, int coalesce_wm) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_cq_create *req; + struct be_dma_mem *q_mem = &cq->dma_mem; + void *ctxt; + int status; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + ctxt = &req->context; + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, + NULL); + + req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); + + if (BEx_chip(adapter)) { + AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, + coalesce_wm); + AMAP_SET_BITS(struct amap_cq_context_be, nodelay, + ctxt, no_delay); + AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, + __ilog2_u32(cq->len / 256)); + AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); + } else { + req->hdr.version = 2; + req->page_size = 1; /* 1 for 4K */ + + /* coalesce-wm field in this cmd is not relevant to Lancer. + * Lancer uses COMMON_MODIFY_CQ to set this field + */ + if (!lancer_chip(adapter)) + AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, + ctxt, coalesce_wm); + AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, + no_delay); + AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, + __ilog2_u32(cq->len / 256)); + AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); + } + + be_dws_cpu_to_le(ctxt, sizeof(req->context)); + + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + + status = be_mbox_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); + + cq->id = le16_to_cpu(resp->cq_id); + cq->created = true; + } + + mutex_unlock(&adapter->mbox_lock); + + return status; +} + +static u32 be_encoded_q_len(int q_len) +{ + u32 len_encoded = fls(q_len); /* log2(len) + 1 */ + + if (len_encoded == 16) + len_encoded = 0; + return len_encoded; +} + +static int be_cmd_mccq_ext_create(struct be_adapter *adapter, + struct be_queue_info *mccq, + struct be_queue_info *cq) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_mcc_ext_create *req; + struct be_dma_mem *q_mem = &mccq->dma_mem; + void *ctxt; + int status; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + ctxt = &req->context; + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, + NULL); + + req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); + if (BEx_chip(adapter)) { + AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); + AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, + be_encoded_q_len(mccq->len)); + AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); + } else { + req->hdr.version = 1; + req->cq_id = cpu_to_le16(cq->id); + + AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt, + be_encoded_q_len(mccq->len)); + AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1); + AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id, + ctxt, cq->id); + AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid, + ctxt, 1); + } + + /* Subscribe to Link State, Sliport Event and Group 5 Events + * (bits 1, 5 and 17 set) + */ + req->async_event_bitmap[0] = + cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) | + BIT(ASYNC_EVENT_CODE_GRP_5) | + BIT(ASYNC_EVENT_CODE_QNQ) | + BIT(ASYNC_EVENT_CODE_SLIPORT)); + + be_dws_cpu_to_le(ctxt, sizeof(req->context)); + + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + + status = be_mbox_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); + + mccq->id = le16_to_cpu(resp->id); + mccq->created = true; + } + mutex_unlock(&adapter->mbox_lock); + + return status; +} + +static int be_cmd_mccq_org_create(struct be_adapter *adapter, + struct be_queue_info *mccq, + struct be_queue_info *cq) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_mcc_create *req; + struct be_dma_mem *q_mem = &mccq->dma_mem; + void *ctxt; + int status; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + ctxt = &req->context; + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, + NULL); + + req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); + + AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); + AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, + be_encoded_q_len(mccq->len)); + AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); + + be_dws_cpu_to_le(ctxt, sizeof(req->context)); + + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + + status = be_mbox_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); + + mccq->id = le16_to_cpu(resp->id); + mccq->created = true; + } + + mutex_unlock(&adapter->mbox_lock); + return status; +} + +int be_cmd_mccq_create(struct be_adapter *adapter, + struct be_queue_info *mccq, struct be_queue_info *cq) +{ + int status; + + status = be_cmd_mccq_ext_create(adapter, mccq, cq); + if (status && BEx_chip(adapter)) { + dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 " + "or newer to avoid conflicting priorities between NIC " + "and FCoE traffic"); + status = be_cmd_mccq_org_create(adapter, mccq, cq); + } + return status; +} + +int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo) +{ + struct be_mcc_wrb wrb = {0}; + struct be_cmd_req_eth_tx_create *req; + struct be_queue_info *txq = &txo->q; + struct be_queue_info *cq = &txo->cq; + struct be_dma_mem *q_mem = &txq->dma_mem; + int status, ver = 0; + + req = embedded_payload(&wrb); + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, + OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL); + + if (lancer_chip(adapter)) { + req->hdr.version = 1; + } else if (BEx_chip(adapter)) { + if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) + req->hdr.version = 2; + } else { /* For SH */ + req->hdr.version = 2; + } + + if (req->hdr.version > 0) + req->if_id = cpu_to_le16(adapter->if_handle); + req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); + req->ulp_num = BE_ULP1_NUM; + req->type = BE_ETH_TX_RING_TYPE_STANDARD; + req->cq_id = cpu_to_le16(cq->id); + req->queue_size = be_encoded_q_len(txq->len); + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + ver = req->hdr.version; + + status = be_cmd_notify_wait(adapter, &wrb); + if (!status) { + struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb); + + txq->id = le16_to_cpu(resp->cid); + if (ver == 2) + txo->db_offset = le32_to_cpu(resp->db_offset); + else + txo->db_offset = DB_TXULP1_OFFSET; + txq->created = true; + } + + return status; +} + +/* Uses MCC */ +int be_cmd_rxq_create(struct be_adapter *adapter, + struct be_queue_info *rxq, u16 cq_id, u16 frag_size, + u32 if_id, u32 rss, u8 *rss_id) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_eth_rx_create *req; + struct be_dma_mem *q_mem = &rxq->dma_mem; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, + OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL); + + req->cq_id = cpu_to_le16(cq_id); + req->frag_size = fls(frag_size) - 1; + req->num_pages = 2; + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + req->interface_id = cpu_to_le32(if_id); + req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE); + req->rss_queue = cpu_to_le32(rss); + + status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); + + rxq->id = le16_to_cpu(resp->id); + rxq->created = true; + *rss_id = resp->rss_id; + } + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +/* Generic destroyer function for all types of queues + * Uses Mbox + */ +int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, + int queue_type) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_q_destroy *req; + u8 subsys = 0, opcode = 0; + int status; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + + switch (queue_type) { + case QTYPE_EQ: + subsys = CMD_SUBSYSTEM_COMMON; + opcode = OPCODE_COMMON_EQ_DESTROY; + break; + case QTYPE_CQ: + subsys = CMD_SUBSYSTEM_COMMON; + opcode = OPCODE_COMMON_CQ_DESTROY; + break; + case QTYPE_TXQ: + subsys = CMD_SUBSYSTEM_ETH; + opcode = OPCODE_ETH_TX_DESTROY; + break; + case QTYPE_RXQ: + subsys = CMD_SUBSYSTEM_ETH; + opcode = OPCODE_ETH_RX_DESTROY; + break; + case QTYPE_MCCQ: + subsys = CMD_SUBSYSTEM_COMMON; + opcode = OPCODE_COMMON_MCC_DESTROY; + break; + default: + BUG(); + } + + be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb, + NULL); + req->id = cpu_to_le16(q->id); + + status = be_mbox_notify_wait(adapter); + q->created = false; + + mutex_unlock(&adapter->mbox_lock); + return status; +} + +/* Uses MCC */ +int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_q_destroy *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, + OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL); + req->id = cpu_to_le16(q->id); + + status = be_mcc_notify_wait(adapter); + q->created = false; + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +/* Create an rx filtering policy configuration on an i/f + * Will use MBOX only if MCCQ has not been created. + */ +int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, + u32 *if_handle, u32 domain) +{ + struct be_mcc_wrb wrb = {0}; + struct be_cmd_req_if_create *req; + int status; + + req = embedded_payload(&wrb); + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_INTERFACE_CREATE, + sizeof(*req), &wrb, NULL); + req->hdr.domain = domain; + req->capability_flags = cpu_to_le32(cap_flags); + req->enable_flags = cpu_to_le32(en_flags); + req->pmac_invalid = true; + + status = be_cmd_notify_wait(adapter, &wrb); + if (!status) { + struct be_cmd_resp_if_create *resp = embedded_payload(&wrb); + + *if_handle = le32_to_cpu(resp->interface_id); + + /* Hack to retrieve VF's pmac-id on BE3 */ + if (BE3_chip(adapter) && !be_physfn(adapter)) + adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id); + } + return status; +} + +/* Uses MCCQ */ +int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_if_destroy *req; + int status; + + if (interface_id == -1) + return 0; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_INTERFACE_DESTROY, + sizeof(*req), wrb, NULL); + req->hdr.domain = domain; + req->interface_id = cpu_to_le32(interface_id); + + status = be_mcc_notify_wait(adapter); +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +/* Get stats is a non embedded command: the request is not embedded inside + * WRB but is a separate dma memory block + * Uses asynchronous MCC + */ +int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_hdr *hdr; + int status = 0; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + hdr = nonemb_cmd->va; + + be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, + OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, + nonemb_cmd); + + /* version 1 of the cmd is not supported only by BE2 */ + if (BE2_chip(adapter)) + hdr->version = 0; + if (BE3_chip(adapter) || lancer_chip(adapter)) + hdr->version = 1; + else + hdr->version = 2; + + be_mcc_notify(adapter); + adapter->stats_cmd_sent = true; + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +/* Lancer Stats */ +int lancer_cmd_get_pport_stats(struct be_adapter *adapter, + struct be_dma_mem *nonemb_cmd) +{ + struct be_mcc_wrb *wrb; + struct lancer_cmd_req_pport_stats *req; + int status = 0; + + if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS, + CMD_SUBSYSTEM_ETH)) + return -EPERM; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = nonemb_cmd->va; + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, + OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, + wrb, nonemb_cmd); + + req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num); + req->cmd_params.params.reset_stats = 0; + + be_mcc_notify(adapter); + adapter->stats_cmd_sent = true; + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +static int be_mac_to_link_speed(int mac_speed) +{ + switch (mac_speed) { + case PHY_LINK_SPEED_ZERO: + return 0; + case PHY_LINK_SPEED_10MBPS: + return 10; + case PHY_LINK_SPEED_100MBPS: + return 100; + case PHY_LINK_SPEED_1GBPS: + return 1000; + case PHY_LINK_SPEED_10GBPS: + return 10000; + case PHY_LINK_SPEED_20GBPS: + return 20000; + case PHY_LINK_SPEED_25GBPS: + return 25000; + case PHY_LINK_SPEED_40GBPS: + return 40000; + } + return 0; +} + +/* Uses synchronous mcc + * Returns link_speed in Mbps + */ +int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, + u8 *link_status, u32 dom) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_link_status *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + if (link_status) + *link_status = LINK_DOWN; + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, + sizeof(*req), wrb, NULL); + + /* version 1 of the cmd is not supported only by BE2 */ + if (!BE2_chip(adapter)) + req->hdr.version = 1; + + req->hdr.domain = dom; + + status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_link_status *resp = embedded_payload(wrb); + + if (link_speed) { + *link_speed = resp->link_speed ? + le16_to_cpu(resp->link_speed) * 10 : + be_mac_to_link_speed(resp->mac_speed); + + if (!resp->logical_link_status) + *link_speed = 0; + } + if (link_status) + *link_status = resp->logical_link_status; + } + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +/* Uses synchronous mcc */ +int be_cmd_get_die_temperature(struct be_adapter *adapter) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_cntl_addnl_attribs *req; + int status = 0; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, + sizeof(*req), wrb, NULL); + + be_mcc_notify(adapter); + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +/* Uses synchronous mcc */ +int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_fat *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, + NULL); + req->fat_operation = cpu_to_le32(QUERY_FAT); + status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_get_fat *resp = embedded_payload(wrb); + + if (log_size && resp->log_size) + *log_size = le32_to_cpu(resp->log_size) - + sizeof(u32); + } +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) +{ + struct be_dma_mem get_fat_cmd; + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_fat *req; + u32 offset = 0, total_size, buf_size, + log_offset = sizeof(u32), payload_len; + int status = 0; + + if (buf_len == 0) + return -EIO; + + total_size = buf_len; + + get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; + get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + get_fat_cmd.size, + &get_fat_cmd.dma, GFP_ATOMIC); + if (!get_fat_cmd.va) { + dev_err(&adapter->pdev->dev, + "Memory allocation failure while reading FAT data\n"); + return -ENOMEM; + } + + spin_lock_bh(&adapter->mcc_lock); + + while (total_size) { + buf_size = min(total_size, (u32)60*1024); + total_size -= buf_size; + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = get_fat_cmd.va; + + payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size; + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_MANAGE_FAT, payload_len, + wrb, &get_fat_cmd); + + req->fat_operation = cpu_to_le32(RETRIEVE_FAT); + req->read_log_offset = cpu_to_le32(log_offset); + req->read_log_length = cpu_to_le32(buf_size); + req->data_buffer_size = cpu_to_le32(buf_size); + + status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; + + memcpy(buf + offset, + resp->data_buffer, + le32_to_cpu(resp->read_log_length)); + } else { + dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n"); + goto err; + } + offset += buf_size; + log_offset += buf_size; + } +err: + dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size, + get_fat_cmd.va, get_fat_cmd.dma); + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +/* Uses synchronous mcc */ +int be_cmd_get_fw_ver(struct be_adapter *adapter) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_fw_version *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, + NULL); + status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); + + strlcpy(adapter->fw_ver, resp->firmware_version_string, + sizeof(adapter->fw_ver)); + strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string, + sizeof(adapter->fw_on_flash)); + } +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +/* set the EQ delay interval of an EQ to specified value + * Uses async mcc + */ +static int __be_cmd_modify_eqd(struct be_adapter *adapter, + struct be_set_eqd *set_eqd, int num) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_modify_eq_delay *req; + int status = 0, i; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, + NULL); + + req->num_eq = cpu_to_le32(num); + for (i = 0; i < num; i++) { + req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id); + req->set_eqd[i].phase = 0; + req->set_eqd[i].delay_multiplier = + cpu_to_le32(set_eqd[i].delay_multiplier); + } + + be_mcc_notify(adapter); +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, + int num) +{ + int num_eqs, i = 0; + + while (num) { + num_eqs = min(num, 8); + __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); + i += num_eqs; + num -= num_eqs; + } + + return 0; +} + +/* Uses sycnhronous mcc */ +int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, + u32 num, u32 domain) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_vlan_config *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), + wrb, NULL); + req->hdr.domain = domain; + + req->interface_id = if_id; + req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; + req->num_vlan = num; + memcpy(req->normal_vlan, vtag_array, + req->num_vlan * sizeof(vtag_array[0])); + + status = be_mcc_notify_wait(adapter); +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) +{ + struct be_mcc_wrb *wrb; + struct be_dma_mem *mem = &adapter->rx_filter; + struct be_cmd_req_rx_filter *req = mem->va; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + memset(req, 0, sizeof(*req)); + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req), + wrb, mem); + + req->if_id = cpu_to_le32(adapter->if_handle); + req->if_flags_mask = cpu_to_le32(flags); + req->if_flags = (value == ON) ? req->if_flags_mask : 0; + + if (flags & BE_IF_FLAGS_MULTICAST) { + struct netdev_hw_addr *ha; + int i = 0; + + /* Reset mcast promisc mode if already set by setting mask + * and not setting flags field + */ + req->if_flags_mask |= + cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS & + be_if_cap_flags(adapter)); + req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev)); + netdev_for_each_mc_addr(ha, adapter->netdev) + memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); + } + + status = be_mcc_notify_wait(adapter); +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) +{ + struct device *dev = &adapter->pdev->dev; + + if ((flags & be_if_cap_flags(adapter)) != flags) { + dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags); + dev_warn(dev, "Interface is capable of 0x%x flags only\n", + be_if_cap_flags(adapter)); + } + flags &= be_if_cap_flags(adapter); + + return __be_cmd_rx_filter(adapter, flags, value); +} + +/* Uses synchrounous mcc */ +int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_set_flow_control *req; + int status; + + if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL, + CMD_SUBSYSTEM_COMMON)) + return -EPERM; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), + wrb, NULL); + + req->hdr.version = 1; + req->tx_flow_control = cpu_to_le16((u16)tx_fc); + req->rx_flow_control = cpu_to_le16((u16)rx_fc); + + status = be_mcc_notify_wait(adapter); + +err: + spin_unlock_bh(&adapter->mcc_lock); + + if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED) + return -EOPNOTSUPP; + + return status; +} + +/* Uses sycn mcc */ +int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_flow_control *req; + int status; + + if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL, + CMD_SUBSYSTEM_COMMON)) + return -EPERM; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), + wrb, NULL); + + status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_get_flow_control *resp = + embedded_payload(wrb); + + *tx_fc = le16_to_cpu(resp->tx_flow_control); + *rx_fc = le16_to_cpu(resp->rx_flow_control); + } + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +/* Uses mbox */ +int be_cmd_query_fw_cfg(struct be_adapter *adapter) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_query_fw_cfg *req; + int status; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, + sizeof(*req), wrb, NULL); + + status = be_mbox_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); + + adapter->port_num = le32_to_cpu(resp->phys_port); + adapter->function_mode = le32_to_cpu(resp->function_mode); + adapter->function_caps = le32_to_cpu(resp->function_caps); + adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF; + dev_info(&adapter->pdev->dev, + "FW config: function_mode=0x%x, function_caps=0x%x\n", + adapter->function_mode, adapter->function_caps); + } + + mutex_unlock(&adapter->mbox_lock); + return status; +} + +/* Uses mbox */ +int be_cmd_reset_function(struct be_adapter *adapter) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_hdr *req; + int status; + + if (lancer_chip(adapter)) { + iowrite32(SLI_PORT_CONTROL_IP_MASK, + adapter->db + SLIPORT_CONTROL_OFFSET); + status = lancer_wait_ready(adapter); + if (status) + dev_err(&adapter->pdev->dev, + "Adapter in non recoverable error\n"); + return status; + } + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, + NULL); + + status = be_mbox_notify_wait(adapter); + + mutex_unlock(&adapter->mbox_lock); + return status; +} + +int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, + u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_rss_config *req; + int status; + + if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) + return 0; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, + OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL); + + req->if_id = cpu_to_le32(adapter->if_handle); + req->enable_rss = cpu_to_le16(rss_hash_opts); + req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); + + if (!BEx_chip(adapter)) + req->hdr.version = 1; + + memcpy(req->cpu_table, rsstable, table_size); + memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN); + be_dws_cpu_to_le(req->hash, sizeof(req->hash)); + + status = be_mcc_notify_wait(adapter); +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +/* Uses sync mcc */ +int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, + u8 bcn, u8 sts, u8 state) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_enable_disable_beacon *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_ENABLE_DISABLE_BEACON, + sizeof(*req), wrb, NULL); + + req->port_num = port_num; + req->beacon_state = state; + req->beacon_duration = bcn; + req->status_duration = sts; + + status = be_mcc_notify_wait(adapter); + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +/* Uses sync mcc */ +int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_beacon_state *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), + wrb, NULL); + + req->port_num = port_num; + + status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_get_beacon_state *resp = + embedded_payload(wrb); + + *state = resp->beacon_state; + } + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +/* Uses sync mcc */ +int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, + u8 page_num, u8 *data) +{ + struct be_dma_mem cmd; + struct be_mcc_wrb *wrb; + struct be_cmd_req_port_type *req; + int status; + + if (page_num > TR_PAGE_A2) + return -EINVAL; + + cmd.size = sizeof(struct be_cmd_resp_port_type); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); + if (!cmd.va) { + dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); + return -ENOMEM; + } + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = cmd.va; + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_READ_TRANSRECV_DATA, + cmd.size, wrb, &cmd); + + req->port = cpu_to_le32(adapter->hba_port_num); + req->page_num = cpu_to_le32(page_num); + status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_port_type *resp = cmd.va; + + memcpy(data, resp->page_data, PAGE_DATA_LEN); + } +err: + spin_unlock_bh(&adapter->mcc_lock); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); + return status; +} + +int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, + u32 data_size, u32 data_offset, + const char *obj_name, u32 *data_written, + u8 *change_status, u8 *addn_status) +{ + struct be_mcc_wrb *wrb; + struct lancer_cmd_req_write_object *req; + struct lancer_cmd_resp_write_object *resp; + void *ctxt = NULL; + int status; + + spin_lock_bh(&adapter->mcc_lock); + adapter->flash_status = 0; + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err_unlock; + } + + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_WRITE_OBJECT, + sizeof(struct lancer_cmd_req_write_object), wrb, + NULL); + + ctxt = &req->context; + AMAP_SET_BITS(struct amap_lancer_write_obj_context, + write_length, ctxt, data_size); + + if (data_size == 0) + AMAP_SET_BITS(struct amap_lancer_write_obj_context, + eof, ctxt, 1); + else + AMAP_SET_BITS(struct amap_lancer_write_obj_context, + eof, ctxt, 0); + + be_dws_cpu_to_le(ctxt, sizeof(req->context)); + req->write_offset = cpu_to_le32(data_offset); + strlcpy(req->object_name, obj_name, sizeof(req->object_name)); + req->descriptor_count = cpu_to_le32(1); + req->buf_len = cpu_to_le32(data_size); + req->addr_low = cpu_to_le32((cmd->dma + + sizeof(struct lancer_cmd_req_write_object)) + & 0xFFFFFFFF); + req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + + sizeof(struct lancer_cmd_req_write_object))); + + be_mcc_notify(adapter); + spin_unlock_bh(&adapter->mcc_lock); + + if (!wait_for_completion_timeout(&adapter->et_cmd_compl, + msecs_to_jiffies(60000))) + status = -ETIMEDOUT; + else + status = adapter->flash_status; + + resp = embedded_payload(wrb); + if (!status) { + *data_written = le32_to_cpu(resp->actual_write_len); + *change_status = resp->change_status; + } else { + *addn_status = resp->additional_status; + } + + return status; + +err_unlock: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +int be_cmd_query_cable_type(struct be_adapter *adapter) +{ + u8 page_data[PAGE_DATA_LEN]; + int status; + + status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, + page_data); + if (!status) { + switch (adapter->phy.interface_type) { + case PHY_TYPE_QSFP: + adapter->phy.cable_type = + page_data[QSFP_PLUS_CABLE_TYPE_OFFSET]; + break; + case PHY_TYPE_SFP_PLUS_10GB: + adapter->phy.cable_type = + page_data[SFP_PLUS_CABLE_TYPE_OFFSET]; + break; + default: + adapter->phy.cable_type = 0; + break; + } + } + return status; +} + +int be_cmd_query_sfp_info(struct be_adapter *adapter) +{ + u8 page_data[PAGE_DATA_LEN]; + int status; + + status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, + page_data); + if (!status) { + strlcpy(adapter->phy.vendor_name, page_data + + SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1); + strlcpy(adapter->phy.vendor_pn, + page_data + SFP_VENDOR_PN_OFFSET, + SFP_VENDOR_NAME_LEN - 1); + } + + return status; +} + +int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name) +{ + struct lancer_cmd_req_delete_object *req; + struct be_mcc_wrb *wrb; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_DELETE_OBJECT, + sizeof(*req), wrb, NULL); + + strlcpy(req->object_name, obj_name, sizeof(req->object_name)); + + status = be_mcc_notify_wait(adapter); +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, + u32 data_size, u32 data_offset, const char *obj_name, + u32 *data_read, u32 *eof, u8 *addn_status) +{ + struct be_mcc_wrb *wrb; + struct lancer_cmd_req_read_object *req; + struct lancer_cmd_resp_read_object *resp; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err_unlock; + } + + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_READ_OBJECT, + sizeof(struct lancer_cmd_req_read_object), wrb, + NULL); + + req->desired_read_len = cpu_to_le32(data_size); + req->read_offset = cpu_to_le32(data_offset); + strcpy(req->object_name, obj_name); + req->descriptor_count = cpu_to_le32(1); + req->buf_len = cpu_to_le32(data_size); + req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF)); + req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma)); + + status = be_mcc_notify_wait(adapter); + + resp = embedded_payload(wrb); + if (!status) { + *data_read = le32_to_cpu(resp->actual_read_len); + *eof = le32_to_cpu(resp->eof); + } else { + *addn_status = resp->additional_status; + } + +err_unlock: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, + u32 flash_type, u32 flash_opcode, u32 img_offset, + u32 buf_size) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_write_flashrom *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + adapter->flash_status = 0; + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err_unlock; + } + req = cmd->va; + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, + cmd); + + req->params.op_type = cpu_to_le32(flash_type); + if (flash_type == OPTYPE_OFFSET_SPECIFIED) + req->params.offset = cpu_to_le32(img_offset); + + req->params.op_code = cpu_to_le32(flash_opcode); + req->params.data_buf_size = cpu_to_le32(buf_size); + + be_mcc_notify(adapter); + spin_unlock_bh(&adapter->mcc_lock); + + if (!wait_for_completion_timeout(&adapter->et_cmd_compl, + msecs_to_jiffies(40000))) + status = -ETIMEDOUT; + else + status = adapter->flash_status; + + return status; + +err_unlock: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, + u16 img_optype, u32 img_offset, u32 crc_offset) +{ + struct be_cmd_read_flash_crc *req; + struct be_mcc_wrb *wrb; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_READ_FLASHROM, sizeof(*req), + wrb, NULL); + + req->params.op_type = cpu_to_le32(img_optype); + if (img_optype == OPTYPE_OFFSET_SPECIFIED) + req->params.offset = cpu_to_le32(img_offset + crc_offset); + else + req->params.offset = cpu_to_le32(crc_offset); + + req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); + req->params.data_buf_size = cpu_to_le32(0x4); + + status = be_mcc_notify_wait(adapter); + if (!status) + memcpy(flashed_crc, req->crc, 4); + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, + struct be_dma_mem *nonemb_cmd) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_acpi_wol_magic_config *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = nonemb_cmd->va; + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, + OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), + wrb, nonemb_cmd); + memcpy(req->magic_mac, mac, ETH_ALEN); + + status = be_mcc_notify_wait(adapter); + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, + u8 loopback_type, u8 enable) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_set_lmode *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, + OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), + wrb, NULL); + + req->src_port = port_num; + req->dest_port = port_num; + req->loopback_type = loopback_type; + req->loopback_state = enable; + + status = be_mcc_notify_wait(adapter); +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, + u32 loopback_type, u32 pkt_size, u32 num_pkts, + u64 pattern) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_loopback_test *req; + struct be_cmd_resp_loopback_test *resp; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, + OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, + NULL); + + req->hdr.timeout = cpu_to_le32(15); + req->pattern = cpu_to_le64(pattern); + req->src_port = cpu_to_le32(port_num); + req->dest_port = cpu_to_le32(port_num); + req->pkt_size = cpu_to_le32(pkt_size); + req->num_pkts = cpu_to_le32(num_pkts); + req->loopback_type = cpu_to_le32(loopback_type); + + be_mcc_notify(adapter); + + spin_unlock_bh(&adapter->mcc_lock); + + wait_for_completion(&adapter->et_cmd_compl); + resp = embedded_payload(wrb); + status = le32_to_cpu(resp->status); + + return status; +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, + u32 byte_cnt, struct be_dma_mem *cmd) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_ddrdma_test *req; + int status; + int i, j = 0; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = cmd->va; + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, + OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, + cmd); + + req->pattern = cpu_to_le64(pattern); + req->byte_count = cpu_to_le32(byte_cnt); + for (i = 0; i < byte_cnt; i++) { + req->snd_buff[i] = (u8)(pattern >> (j*8)); + j++; + if (j > 7) + j = 0; + } + + status = be_mcc_notify_wait(adapter); + + if (!status) { + struct be_cmd_resp_ddrdma_test *resp; + + resp = cmd->va; + if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) || + resp->snd_err) { + status = -1; + } + } + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +int be_cmd_get_seeprom_data(struct be_adapter *adapter, + struct be_dma_mem *nonemb_cmd) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_seeprom_read *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = nonemb_cmd->va; + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb, + nonemb_cmd); + + status = be_mcc_notify_wait(adapter); + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +int be_cmd_get_phy_info(struct be_adapter *adapter) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_phy_info *req; + struct be_dma_mem cmd; + int status; + + if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS, + CMD_SUBSYSTEM_COMMON)) + return -EPERM; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + cmd.size = sizeof(struct be_cmd_req_get_phy_info); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); + if (!cmd.va) { + dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); + status = -ENOMEM; + goto err; + } + + req = cmd.va; + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req), + wrb, &cmd); + + status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_phy_info *resp_phy_info = + cmd.va + sizeof(struct be_cmd_req_hdr); + + adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type); + adapter->phy.interface_type = + le16_to_cpu(resp_phy_info->interface_type); + adapter->phy.auto_speeds_supported = + le16_to_cpu(resp_phy_info->auto_speeds_supported); + adapter->phy.fixed_speeds_supported = + le16_to_cpu(resp_phy_info->fixed_speeds_supported); + adapter->phy.misc_params = + le32_to_cpu(resp_phy_info->misc_params); + + if (BE2_chip(adapter)) { + adapter->phy.fixed_speeds_supported = + BE_SUPPORTED_SPEED_10GBPS | + BE_SUPPORTED_SPEED_1GBPS; + } + } + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_set_qos *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL); + + req->hdr.domain = domain; + req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); + req->max_bps_nic = cpu_to_le32(bps); + + status = be_mcc_notify_wait(adapter); + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +int be_cmd_get_cntl_attributes(struct be_adapter *adapter) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_cntl_attribs *req; + struct be_cmd_resp_cntl_attribs *resp; + int status; + int payload_len = max(sizeof(*req), sizeof(*resp)); + struct mgmt_controller_attrib *attribs; + struct be_dma_mem attribs_cmd; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); + attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); + attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + attribs_cmd.size, + &attribs_cmd.dma, GFP_ATOMIC); + if (!attribs_cmd.va) { + dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); + status = -ENOMEM; + goto err; + } + + wrb = wrb_from_mbox(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = attribs_cmd.va; + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, + wrb, &attribs_cmd); + + status = be_mbox_notify_wait(adapter); + if (!status) { + attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); + adapter->hba_port_num = attribs->hba_attribs.phy_port; + } + +err: + mutex_unlock(&adapter->mbox_lock); + if (attribs_cmd.va) + dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size, + attribs_cmd.va, attribs_cmd.dma); + return status; +} + +/* Uses mbox */ +int be_cmd_req_native_mode(struct be_adapter *adapter) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_set_func_cap *req; + int status; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + wrb = wrb_from_mbox(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, + sizeof(*req), wrb, NULL); + + req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | + CAPABILITY_BE3_NATIVE_ERX_API); + req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API); + + status = be_mbox_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb); + + adapter->be3_native = le32_to_cpu(resp->cap_flags) & + CAPABILITY_BE3_NATIVE_ERX_API; + if (!adapter->be3_native) + dev_warn(&adapter->pdev->dev, + "adapter not in advanced mode\n"); + } +err: + mutex_unlock(&adapter->mbox_lock); + return status; +} + +/* Get privilege(s) for a function */ +int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, + u32 domain) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_fn_privileges *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req), + wrb, NULL); + + req->hdr.domain = domain; + + status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_get_fn_privileges *resp = + embedded_payload(wrb); + + *privilege = le32_to_cpu(resp->privilege_mask); + + /* In UMC mode FW does not return right privileges. + * Override with correct privilege equivalent to PF. + */ + if (BEx_chip(adapter) && be_is_mc(adapter) && + be_physfn(adapter)) + *privilege = MAX_PRIVILEGES; + } + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +/* Set privilege(s) for a function */ +int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, + u32 domain) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_set_fn_privileges *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = embedded_payload(wrb); + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req), + wrb, NULL); + req->hdr.domain = domain; + if (lancer_chip(adapter)) + req->privileges_lancer = cpu_to_le32(privileges); + else + req->privileges = cpu_to_le32(privileges); + + status = be_mcc_notify_wait(adapter); +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +/* pmac_id_valid: true => pmac_id is supplied and MAC address is requested. + * pmac_id_valid: false => pmac_id or MAC address is requested. + * If pmac_id is returned, pmac_id_valid is returned as true + */ +int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, + bool *pmac_id_valid, u32 *pmac_id, u32 if_handle, + u8 domain) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_mac_list *req; + int status; + int mac_count; + struct be_dma_mem get_mac_list_cmd; + int i; + + memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); + get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); + get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + get_mac_list_cmd.size, + &get_mac_list_cmd.dma, + GFP_ATOMIC); + + if (!get_mac_list_cmd.va) { + dev_err(&adapter->pdev->dev, + "Memory allocation failure during GET_MAC_LIST\n"); + return -ENOMEM; + } + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto out; + } + + req = get_mac_list_cmd.va; + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_MAC_LIST, + get_mac_list_cmd.size, wrb, &get_mac_list_cmd); + req->hdr.domain = domain; + req->mac_type = MAC_ADDRESS_TYPE_NETWORK; + if (*pmac_id_valid) { + req->mac_id = cpu_to_le32(*pmac_id); + req->iface_id = cpu_to_le16(if_handle); + req->perm_override = 0; + } else { + req->perm_override = 1; + } + + status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_get_mac_list *resp = + get_mac_list_cmd.va; + + if (*pmac_id_valid) { + memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr, + ETH_ALEN); + goto out; + } + + mac_count = resp->true_mac_count + resp->pseudo_mac_count; + /* Mac list returned could contain one or more active mac_ids + * or one or more true or pseudo permanent mac addresses. + * If an active mac_id is present, return first active mac_id + * found. + */ + for (i = 0; i < mac_count; i++) { + struct get_list_macaddr *mac_entry; + u16 mac_addr_size; + u32 mac_id; + + mac_entry = &resp->macaddr_list[i]; + mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size); + /* mac_id is a 32 bit value and mac_addr size + * is 6 bytes + */ + if (mac_addr_size == sizeof(u32)) { + *pmac_id_valid = true; + mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id; + *pmac_id = le32_to_cpu(mac_id); + goto out; + } + } + /* If no active mac_id found, return first mac addr */ + *pmac_id_valid = false; + memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, + ETH_ALEN); + } + +out: + spin_unlock_bh(&adapter->mcc_lock); + dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size, + get_mac_list_cmd.va, get_mac_list_cmd.dma); + return status; +} + +int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, + u8 *mac, u32 if_handle, bool active, u32 domain) +{ + if (!active) + be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id, + if_handle, domain); + if (BEx_chip(adapter)) + return be_cmd_mac_addr_query(adapter, mac, false, + if_handle, curr_pmac_id); + else + /* Fetch the MAC address using pmac_id */ + return be_cmd_get_mac_from_list(adapter, mac, &active, + &curr_pmac_id, + if_handle, domain); +} + +int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac) +{ + int status; + bool pmac_valid = false; + + eth_zero_addr(mac); + + if (BEx_chip(adapter)) { + if (be_physfn(adapter)) + status = be_cmd_mac_addr_query(adapter, mac, true, 0, + 0); + else + status = be_cmd_mac_addr_query(adapter, mac, false, + adapter->if_handle, 0); + } else { + status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid, + NULL, adapter->if_handle, 0); + } + + return status; +} + +/* Uses synchronous MCCQ */ +int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, + u8 mac_count, u32 domain) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_set_mac_list *req; + int status; + struct be_dma_mem cmd; + + memset(&cmd, 0, sizeof(struct be_dma_mem)); + cmd.size = sizeof(struct be_cmd_req_set_mac_list); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_KERNEL); + if (!cmd.va) + return -ENOMEM; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = cmd.va; + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), + wrb, &cmd); + + req->hdr.domain = domain; + req->mac_count = mac_count; + if (mac_count) + memcpy(req->mac, mac_array, ETH_ALEN*mac_count); + + status = be_mcc_notify_wait(adapter); + +err: + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +/* Wrapper to delete any active MACs and provision the new mac. + * Changes to MAC_LIST are allowed iff none of the MAC addresses in the + * current list are active. + */ +int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom) +{ + bool active_mac = false; + u8 old_mac[ETH_ALEN]; + u32 pmac_id; + int status; + + status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac, + &pmac_id, if_id, dom); + + if (!status && active_mac) + be_cmd_pmac_del(adapter, if_id, pmac_id, dom); + + return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom); +} + +int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, + u32 domain, u16 intf_id, u16 hsw_mode) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_set_hsw_config *req; + void *ctxt; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = embedded_payload(wrb); + ctxt = &req->context; + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, + NULL); + + req->hdr.domain = domain; + AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id); + if (pvid) { + AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); + AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); + } + if (!BEx_chip(adapter) && hsw_mode) { + AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, + ctxt, adapter->hba_port_num); + AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1); + AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type, + ctxt, hsw_mode); + } + + be_dws_cpu_to_le(req->context, sizeof(req->context)); + status = be_mcc_notify_wait(adapter); + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +/* Get Hyper switch config */ +int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, + u32 domain, u16 intf_id, u8 *mode) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_hsw_config *req; + void *ctxt; + int status; + u16 vid; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = embedded_payload(wrb); + ctxt = &req->context; + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, + NULL); + + req->hdr.domain = domain; + AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, + ctxt, intf_id); + AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); + + if (!BEx_chip(adapter) && mode) { + AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, + ctxt, adapter->hba_port_num); + AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1); + } + be_dws_cpu_to_le(req->context, sizeof(req->context)); + + status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_get_hsw_config *resp = + embedded_payload(wrb); + + be_dws_le_to_cpu(&resp->context, sizeof(resp->context)); + vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, + pvid, &resp->context); + if (pvid) + *pvid = le16_to_cpu(vid); + if (mode) + *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context, + port_fwd_type, &resp->context); + } + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +static bool be_is_wol_excluded(struct be_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + + if (!be_physfn(adapter)) + return true; + + switch (pdev->subsystem_device) { + case OC_SUBSYS_DEVICE_ID1: + case OC_SUBSYS_DEVICE_ID2: + case OC_SUBSYS_DEVICE_ID3: + case OC_SUBSYS_DEVICE_ID4: + return true; + default: + return false; + } +} + +int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_acpi_wol_magic_config_v1 *req; + int status = 0; + struct be_dma_mem cmd; + + if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, + CMD_SUBSYSTEM_ETH)) + return -EPERM; + + if (be_is_wol_excluded(adapter)) + return status; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + memset(&cmd, 0, sizeof(struct be_dma_mem)); + cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); + if (!cmd.va) { + dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); + status = -ENOMEM; + goto err; + } + + wrb = wrb_from_mbox(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = cmd.va; + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, + OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, + sizeof(*req), wrb, &cmd); + + req->hdr.version = 1; + req->query_options = BE_GET_WOL_CAP; + + status = be_mbox_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_acpi_wol_magic_config_v1 *resp; + + resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va; + + adapter->wol_cap = resp->wol_settings; + if (adapter->wol_cap & BE_WOL_CAP) + adapter->wol_en = true; + } +err: + mutex_unlock(&adapter->mbox_lock); + if (cmd.va) + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, + cmd.dma); + return status; + +} + +int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) +{ + struct be_dma_mem extfat_cmd; + struct be_fat_conf_params *cfgs; + int status; + int i, j; + + memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); + extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); + extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + extfat_cmd.size, &extfat_cmd.dma, + GFP_ATOMIC); + if (!extfat_cmd.va) + return -ENOMEM; + + status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); + if (status) + goto err; + + cfgs = (struct be_fat_conf_params *) + (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr)); + for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) { + u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes); + + for (j = 0; j < num_modes; j++) { + if (cfgs->module[i].trace_lvl[j].mode == MODE_UART) + cfgs->module[i].trace_lvl[j].dbg_lvl = + cpu_to_le32(level); + } + } + + status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); +err: + dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va, + extfat_cmd.dma); + return status; +} + +int be_cmd_get_fw_log_level(struct be_adapter *adapter) +{ + struct be_dma_mem extfat_cmd; + struct be_fat_conf_params *cfgs; + int status, j; + int level = 0; + + memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); + extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); + extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + extfat_cmd.size, &extfat_cmd.dma, + GFP_ATOMIC); + + if (!extfat_cmd.va) { + dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", + __func__); + goto err; + } + + status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); + if (!status) { + cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + + sizeof(struct be_cmd_resp_hdr)); + + for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) { + if (cfgs->module[0].trace_lvl[j].mode == MODE_UART) + level = cfgs->module[0].trace_lvl[j].dbg_lvl; + } + } + dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va, + extfat_cmd.dma); +err: + return level; +} + +int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, + struct be_dma_mem *cmd) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_ext_fat_caps *req; + int status; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + wrb = wrb_from_mbox(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = cmd->va; + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_EXT_FAT_CAPABILITES, + cmd->size, wrb, cmd); + req->parameter_type = cpu_to_le32(1); + + status = be_mbox_notify_wait(adapter); +err: + mutex_unlock(&adapter->mbox_lock); + return status; +} + +int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, + struct be_dma_mem *cmd, + struct be_fat_conf_params *configs) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_set_ext_fat_caps *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = cmd->va; + memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params)); + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_EXT_FAT_CAPABILITES, + cmd->size, wrb, cmd); + + status = be_mcc_notify_wait(adapter); +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +int be_cmd_query_port_name(struct be_adapter *adapter) +{ + struct be_cmd_req_get_port_name *req; + struct be_mcc_wrb *wrb; + int status; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + wrb = wrb_from_mbox(adapter); + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb, + NULL); + if (!BEx_chip(adapter)) + req->hdr.version = 1; + + status = be_mbox_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb); + + adapter->port_name = resp->port_name[adapter->hba_port_num]; + } else { + adapter->port_name = adapter->hba_port_num + '0'; + } + + mutex_unlock(&adapter->mbox_lock); + return status; +} + +/* Descriptor type */ +enum { + FUNC_DESC = 1, + VFT_DESC = 2 +}; + +static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count, + int desc_type) +{ + struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; + struct be_nic_res_desc *nic; + int i; + + for (i = 0; i < desc_count; i++) { + if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 || + hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) { + nic = (struct be_nic_res_desc *)hdr; + if (desc_type == FUNC_DESC || + (desc_type == VFT_DESC && + nic->flags & (1 << VFT_SHIFT))) + return nic; + } + + hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; + hdr = (void *)hdr + hdr->desc_len; + } + return NULL; +} + +static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count) +{ + return be_get_nic_desc(buf, desc_count, VFT_DESC); +} + +static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count) +{ + return be_get_nic_desc(buf, desc_count, FUNC_DESC); +} + +static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf, + u32 desc_count) +{ + struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; + struct be_pcie_res_desc *pcie; + int i; + + for (i = 0; i < desc_count; i++) { + if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 || + hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) { + pcie = (struct be_pcie_res_desc *)hdr; + if (pcie->pf_num == devfn) + return pcie; + } + + hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; + hdr = (void *)hdr + hdr->desc_len; + } + return NULL; +} + +static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count) +{ + struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; + int i; + + for (i = 0; i < desc_count; i++) { + if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1) + return (struct be_port_res_desc *)hdr; + + hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; + hdr = (void *)hdr + hdr->desc_len; + } + return NULL; +} + +static void be_copy_nic_desc(struct be_resources *res, + struct be_nic_res_desc *desc) +{ + res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count); + res->max_vlans = le16_to_cpu(desc->vlan_count); + res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count); + res->max_tx_qs = le16_to_cpu(desc->txq_count); + res->max_rss_qs = le16_to_cpu(desc->rssq_count); + res->max_rx_qs = le16_to_cpu(desc->rq_count); + res->max_evt_qs = le16_to_cpu(desc->eq_count); + res->max_cq_count = le16_to_cpu(desc->cq_count); + res->max_iface_count = le16_to_cpu(desc->iface_count); + res->max_mcc_count = le16_to_cpu(desc->mcc_count); + /* Clear flags that driver is not interested in */ + res->if_cap_flags = le32_to_cpu(desc->cap_flags) & + BE_IF_CAP_FLAGS_WANT; +} + +/* Uses Mbox */ +int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_func_config *req; + int status; + struct be_dma_mem cmd; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + memset(&cmd, 0, sizeof(struct be_dma_mem)); + cmd.size = sizeof(struct be_cmd_resp_get_func_config); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); + if (!cmd.va) { + dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); + status = -ENOMEM; + goto err; + } + + wrb = wrb_from_mbox(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = cmd.va; + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_FUNC_CONFIG, + cmd.size, wrb, &cmd); + + if (skyhawk_chip(adapter)) + req->hdr.version = 1; + + status = be_mbox_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_get_func_config *resp = cmd.va; + u32 desc_count = le32_to_cpu(resp->desc_count); + struct be_nic_res_desc *desc; + + desc = be_get_func_nic_desc(resp->func_param, desc_count); + if (!desc) { + status = -EINVAL; + goto err; + } + + adapter->pf_number = desc->pf_num; + be_copy_nic_desc(res, desc); + } +err: + mutex_unlock(&adapter->mbox_lock); + if (cmd.va) + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, + cmd.dma); + return status; +} + +/* Will use MBOX only if MCCQ has not been created */ +int be_cmd_get_profile_config(struct be_adapter *adapter, + struct be_resources *res, u8 query, u8 domain) +{ + struct be_cmd_resp_get_profile_config *resp; + struct be_cmd_req_get_profile_config *req; + struct be_nic_res_desc *vf_res; + struct be_pcie_res_desc *pcie; + struct be_port_res_desc *port; + struct be_nic_res_desc *nic; + struct be_mcc_wrb wrb = {0}; + struct be_dma_mem cmd; + u16 desc_count; + int status; + + memset(&cmd, 0, sizeof(struct be_dma_mem)); + cmd.size = sizeof(struct be_cmd_resp_get_profile_config); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); + if (!cmd.va) + return -ENOMEM; + + req = cmd.va; + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_PROFILE_CONFIG, + cmd.size, &wrb, &cmd); + + req->hdr.domain = domain; + if (!lancer_chip(adapter)) + req->hdr.version = 1; + req->type = ACTIVE_PROFILE_TYPE; + + /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the + * descriptors with all bits set to "1" for the fields which can be + * modified using SET_PROFILE_CONFIG cmd. + */ + if (query == RESOURCE_MODIFIABLE) + req->type |= QUERY_MODIFIABLE_FIELDS_TYPE; + + status = be_cmd_notify_wait(adapter, &wrb); + if (status) + goto err; + + resp = cmd.va; + desc_count = le16_to_cpu(resp->desc_count); + + pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param, + desc_count); + if (pcie) + res->max_vfs = le16_to_cpu(pcie->num_vfs); + + port = be_get_port_desc(resp->func_param, desc_count); + if (port) + adapter->mc_type = port->mc_type; + + nic = be_get_func_nic_desc(resp->func_param, desc_count); + if (nic) + be_copy_nic_desc(res, nic); + + vf_res = be_get_vft_desc(resp->func_param, desc_count); + if (vf_res) + res->vf_if_cap_flags = vf_res->cap_flags; +err: + if (cmd.va) + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, + cmd.dma); + return status; +} + +/* Will use MBOX only if MCCQ has not been created */ +static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, + int size, int count, u8 version, u8 domain) +{ + struct be_cmd_req_set_profile_config *req; + struct be_mcc_wrb wrb = {0}; + struct be_dma_mem cmd; + int status; + + memset(&cmd, 0, sizeof(struct be_dma_mem)); + cmd.size = sizeof(struct be_cmd_req_set_profile_config); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); + if (!cmd.va) + return -ENOMEM; + + req = cmd.va; + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size, + &wrb, &cmd); + req->hdr.version = version; + req->hdr.domain = domain; + req->desc_count = cpu_to_le32(count); + memcpy(req->desc, desc, size); + + status = be_cmd_notify_wait(adapter, &wrb); + + if (cmd.va) + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, + cmd.dma); + return status; +} + +/* Mark all fields invalid */ +static void be_reset_nic_desc(struct be_nic_res_desc *nic) +{ + memset(nic, 0, sizeof(*nic)); + nic->unicast_mac_count = 0xFFFF; + nic->mcc_count = 0xFFFF; + nic->vlan_count = 0xFFFF; + nic->mcast_mac_count = 0xFFFF; + nic->txq_count = 0xFFFF; + nic->rq_count = 0xFFFF; + nic->rssq_count = 0xFFFF; + nic->lro_count = 0xFFFF; + nic->cq_count = 0xFFFF; + nic->toe_conn_count = 0xFFFF; + nic->eq_count = 0xFFFF; + nic->iface_count = 0xFFFF; + nic->link_param = 0xFF; + nic->channel_id_param = cpu_to_le16(0xF000); + nic->acpi_params = 0xFF; + nic->wol_param = 0x0F; + nic->tunnel_iface_count = 0xFFFF; + nic->direct_tenant_iface_count = 0xFFFF; + nic->bw_min = 0xFFFFFFFF; + nic->bw_max = 0xFFFFFFFF; +} + +/* Mark all fields invalid */ +static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie) +{ + memset(pcie, 0, sizeof(*pcie)); + pcie->sriov_state = 0xFF; + pcie->pf_state = 0xFF; + pcie->pf_type = 0xFF; + pcie->num_vfs = 0xFFFF; +} + +int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed, + u8 domain) +{ + struct be_nic_res_desc nic_desc; + u32 bw_percent; + u16 version = 0; + + if (BE3_chip(adapter)) + return be_cmd_set_qos(adapter, max_rate / 10, domain); + + be_reset_nic_desc(&nic_desc); + nic_desc.pf_num = adapter->pf_number; + nic_desc.vf_num = domain; + nic_desc.bw_min = 0; + if (lancer_chip(adapter)) { + nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0; + nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0; + nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) | + (1 << NOSV_SHIFT); + nic_desc.bw_max = cpu_to_le32(max_rate / 10); + } else { + version = 1; + nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1; + nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1; + nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT); + bw_percent = max_rate ? (max_rate * 100) / link_speed : 100; + nic_desc.bw_max = cpu_to_le32(bw_percent); + } + + return be_cmd_set_profile_config(adapter, &nic_desc, + nic_desc.hdr.desc_len, + 1, version, domain); +} + +static void be_fill_vf_res_template(struct be_adapter *adapter, + struct be_resources pool_res, + u16 num_vfs, u16 num_vf_qs, + struct be_nic_res_desc *nic_vft) +{ + u32 vf_if_cap_flags = pool_res.vf_if_cap_flags; + struct be_resources res_mod = {0}; + + /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd, + * which are modifiable using SET_PROFILE_CONFIG cmd. + */ + be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0); + + /* If RSS IFACE capability flags are modifiable for a VF, set the + * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if + * more than 1 RSSQ is available for a VF. + * Otherwise, provision only 1 queue pair for VF. + */ + if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) { + nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT); + if (num_vf_qs > 1) { + vf_if_cap_flags |= BE_IF_FLAGS_RSS; + if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS) + vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS; + } else { + vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS | + BE_IF_FLAGS_DEFQ_RSS); + } + + nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags); + } else { + num_vf_qs = 1; + } + + nic_vft->rq_count = cpu_to_le16(num_vf_qs); + nic_vft->txq_count = cpu_to_le16(num_vf_qs); + nic_vft->rssq_count = cpu_to_le16(num_vf_qs); + nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count / + (num_vfs + 1)); + + /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally + * among the PF and it's VFs, if the fields are changeable + */ + if (res_mod.max_uc_mac == FIELD_MODIFIABLE) + nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac / + (num_vfs + 1)); + + if (res_mod.max_vlans == FIELD_MODIFIABLE) + nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans / + (num_vfs + 1)); + + if (res_mod.max_iface_count == FIELD_MODIFIABLE) + nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count / + (num_vfs + 1)); + + if (res_mod.max_mcc_count == FIELD_MODIFIABLE) + nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count / + (num_vfs + 1)); +} + +int be_cmd_set_sriov_config(struct be_adapter *adapter, + struct be_resources pool_res, u16 num_vfs, + u16 num_vf_qs) +{ + struct { + struct be_pcie_res_desc pcie; + struct be_nic_res_desc nic_vft; + } __packed desc; + + /* PF PCIE descriptor */ + be_reset_pcie_desc(&desc.pcie); + desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1; + desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1; + desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT); + desc.pcie.pf_num = adapter->pdev->devfn; + desc.pcie.sriov_state = num_vfs ? 1 : 0; + desc.pcie.num_vfs = cpu_to_le16(num_vfs); + + /* VF NIC Template descriptor */ + be_reset_nic_desc(&desc.nic_vft); + desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1; + desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1; + desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT); + desc.nic_vft.pf_num = adapter->pdev->devfn; + desc.nic_vft.vf_num = 0; + + be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs, + &desc.nic_vft); + + return be_cmd_set_profile_config(adapter, &desc, + 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0); +} + +int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_manage_iface_filters *req; + int status; + + if (iface == 0xFFFFFFFF) + return -1; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req), + wrb, NULL); + req->op = op; + req->target_iface_id = cpu_to_le32(iface); + + status = be_mcc_notify_wait(adapter); +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port) +{ + struct be_port_res_desc port_desc; + + memset(&port_desc, 0, sizeof(port_desc)); + port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1; + port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1; + port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT); + port_desc.link_num = adapter->hba_port_num; + if (port) { + port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) | + (1 << RCVID_SHIFT); + port_desc.nv_port = swab16(port); + } else { + port_desc.nv_flags = NV_TYPE_DISABLED; + port_desc.nv_port = 0; + } + + return be_cmd_set_profile_config(adapter, &port_desc, + RESOURCE_DESC_SIZE_V1, 1, 1, 0); +} + +int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, + int vf_num) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_iface_list *req; + struct be_cmd_resp_get_iface_list *resp; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp), + wrb, NULL); + req->hdr.domain = vf_num + 1; + + status = be_mcc_notify_wait(adapter); + if (!status) { + resp = (struct be_cmd_resp_get_iface_list *)req; + vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id); + } + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +static int lancer_wait_idle(struct be_adapter *adapter) +{ +#define SLIPORT_IDLE_TIMEOUT 30 + u32 reg_val; + int status = 0, i; + + for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) { + reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET); + if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0) + break; + + ssleep(1); + } + + if (i == SLIPORT_IDLE_TIMEOUT) + status = -1; + + return status; +} + +int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask) +{ + int status = 0; + + status = lancer_wait_idle(adapter); + if (status) + return status; + + iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET); + + return status; +} + +/* Routine to check whether dump image is present or not */ +bool dump_present(struct be_adapter *adapter) +{ + u32 sliport_status = 0; + + sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); + return !!(sliport_status & SLIPORT_STATUS_DIP_MASK); +} + +int lancer_initiate_dump(struct be_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + int status; + + if (dump_present(adapter)) { + dev_info(dev, "Previous dump not cleared, not forcing dump\n"); + return -EEXIST; + } + + /* give firmware reset and diagnostic dump */ + status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK | + PHYSDEV_CONTROL_DD_MASK); + if (status < 0) { + dev_err(dev, "FW reset failed\n"); + return status; + } + + status = lancer_wait_idle(adapter); + if (status) + return status; + + if (!dump_present(adapter)) { + dev_err(dev, "FW dump not generated\n"); + return -EIO; + } + + return 0; +} + +int lancer_delete_dump(struct be_adapter *adapter) +{ + int status; + + status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE); + return be_cmd_status(status); +} + +/* Uses sync mcc */ +int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_enable_disable_vf *req; + int status; + + if (BEx_chip(adapter)) + return 0; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req), + wrb, NULL); + + req->hdr.domain = domain; + req->enable = 1; + status = be_mcc_notify_wait(adapter); +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_intr_set *req; + int status; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + wrb = wrb_from_mbox(adapter); + + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req), + wrb, NULL); + + req->intr_enabled = intr_enable; + + status = be_mbox_notify_wait(adapter); + + mutex_unlock(&adapter->mbox_lock); + return status; +} + +/* Uses MBOX */ +int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id) +{ + struct be_cmd_req_get_active_profile *req; + struct be_mcc_wrb *wrb; + int status; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + wrb = wrb_from_mbox(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req), + wrb, NULL); + + status = be_mbox_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_get_active_profile *resp = + embedded_payload(wrb); + + *profile_id = le16_to_cpu(resp->active_profile_id); + } + +err: + mutex_unlock(&adapter->mbox_lock); + return status; +} + +int be_cmd_set_logical_link_config(struct be_adapter *adapter, + int link_state, u8 domain) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_set_ll_link *req; + int status; + + if (BEx_chip(adapter) || lancer_chip(adapter)) + return -EOPNOTSUPP; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG, + sizeof(*req), wrb, NULL); + + req->hdr.version = 1; + req->hdr.domain = domain; + + if (link_state == IFLA_VF_LINK_STATE_ENABLE) + req->link_config |= 1; + + if (link_state == IFLA_VF_LINK_STATE_AUTO) + req->link_config |= 1 << PLINK_TRACK_SHIFT; + + status = be_mcc_notify_wait(adapter); +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + +int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, + int wrb_payload_size, u16 *cmd_status, u16 *ext_status) +{ + struct be_adapter *adapter = netdev_priv(netdev_handle); + struct be_mcc_wrb *wrb; + struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload; + struct be_cmd_req_hdr *req; + struct be_cmd_resp_hdr *resp; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + resp = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(req, hdr->subsystem, + hdr->opcode, wrb_payload_size, wrb, NULL); + memcpy(req, wrb_payload, wrb_payload_size); + be_dws_cpu_to_le(req, wrb_payload_size); + + status = be_mcc_notify_wait(adapter); + if (cmd_status) + *cmd_status = (status & 0xffff); + if (ext_status) + *ext_status = 0; + memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length); + be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length); +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} +EXPORT_SYMBOL(be_roce_mcc_cmd); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h new file mode 100644 index 000000000..1ec22300e --- /dev/null +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -0,0 +1,2369 @@ +/* + * Copyright (C) 2005 - 2014 Emulex + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. The full GNU General + * Public License is included in this distribution in the file called COPYING. + * + * Contact Information: + * linux-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + */ + +/* + * The driver sends configuration and managements command requests to the + * firmware in the BE. These requests are communicated to the processor + * using Work Request Blocks (WRBs) submitted to the MCC-WRB ring or via one + * WRB inside a MAILBOX. + * The commands are serviced by the ARM processor in the BladeEngine's MPU. + */ + +struct be_sge { + u32 pa_lo; + u32 pa_hi; + u32 len; +}; + +#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/ +#define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */ +#define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */ +struct be_mcc_wrb { + u32 embedded; /* dword 0 */ + u32 payload_length; /* dword 1 */ + u32 tag0; /* dword 2 */ + u32 tag1; /* dword 3 */ + u32 rsvd; /* dword 4 */ + union { + u8 embedded_payload[236]; /* used by embedded cmds */ + struct be_sge sgl[19]; /* used by non-embedded cmds */ + } payload; +}; + +#define CQE_FLAGS_VALID_MASK BIT(31) +#define CQE_FLAGS_ASYNC_MASK BIT(30) +#define CQE_FLAGS_COMPLETED_MASK BIT(28) +#define CQE_FLAGS_CONSUMED_MASK BIT(27) + +/* Completion Status */ +enum mcc_base_status { + MCC_STATUS_SUCCESS = 0, + MCC_STATUS_FAILED = 1, + MCC_STATUS_ILLEGAL_REQUEST = 2, + MCC_STATUS_ILLEGAL_FIELD = 3, + MCC_STATUS_INSUFFICIENT_BUFFER = 4, + MCC_STATUS_UNAUTHORIZED_REQUEST = 5, + MCC_STATUS_NOT_SUPPORTED = 66, + MCC_STATUS_FEATURE_NOT_SUPPORTED = 68 +}; + +/* Additional status */ +enum mcc_addl_status { + MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES = 0x16, + MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH = 0x4d, + MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a +}; + +#define CQE_BASE_STATUS_MASK 0xFFFF +#define CQE_BASE_STATUS_SHIFT 0 /* bits 0 - 15 */ +#define CQE_ADDL_STATUS_MASK 0xFF +#define CQE_ADDL_STATUS_SHIFT 16 /* bits 16 - 31 */ + +#define base_status(status) \ + ((enum mcc_base_status) \ + (status > 0 ? (status & CQE_BASE_STATUS_MASK) : 0)) +#define addl_status(status) \ + ((enum mcc_addl_status) \ + (status > 0 ? (status >> CQE_ADDL_STATUS_SHIFT) & \ + CQE_ADDL_STATUS_MASK : 0)) + +struct be_mcc_compl { + u32 status; /* dword 0 */ + u32 tag0; /* dword 1 */ + u32 tag1; /* dword 2 */ + u32 flags; /* dword 3 */ +}; + +/* When the async bit of mcc_compl flags is set, flags + * is interpreted as follows: + */ +#define ASYNC_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */ +#define ASYNC_EVENT_CODE_MASK 0xFF +#define ASYNC_EVENT_TYPE_SHIFT 16 +#define ASYNC_EVENT_TYPE_MASK 0xFF +#define ASYNC_EVENT_CODE_LINK_STATE 0x1 +#define ASYNC_EVENT_CODE_GRP_5 0x5 +#define ASYNC_EVENT_QOS_SPEED 0x1 +#define ASYNC_EVENT_COS_PRIORITY 0x2 +#define ASYNC_EVENT_PVID_STATE 0x3 +#define ASYNC_EVENT_CODE_QNQ 0x6 +#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1 +#define ASYNC_EVENT_CODE_SLIPORT 0x11 +#define ASYNC_EVENT_PORT_MISCONFIG 0x9 + +enum { + LINK_DOWN = 0x0, + LINK_UP = 0x1 +}; +#define LINK_STATUS_MASK 0x1 +#define LOGICAL_LINK_STATUS_MASK 0x2 + +/* When the event code of compl->flags is link-state, the mcc_compl + * must be interpreted as follows + */ +struct be_async_event_link_state { + u8 physical_port; + u8 port_link_status; + u8 port_duplex; + u8 port_speed; + u8 port_fault; + u8 rsvd0[7]; + u32 flags; +} __packed; + +/* When the event code of compl->flags is GRP-5 and event_type is QOS_SPEED + * the mcc_compl must be interpreted as follows + */ +struct be_async_event_grp5_qos_link_speed { + u8 physical_port; + u8 rsvd[5]; + u16 qos_link_speed; + u32 event_tag; + u32 flags; +} __packed; + +/* When the event code of compl->flags is GRP5 and event type is + * CoS-Priority, the mcc_compl must be interpreted as follows + */ +struct be_async_event_grp5_cos_priority { + u8 physical_port; + u8 available_priority_bmap; + u8 reco_default_priority; + u8 valid; + u8 rsvd0; + u8 event_tag; + u32 flags; +} __packed; + +/* When the event code of compl->flags is GRP5 and event type is + * PVID state, the mcc_compl must be interpreted as follows + */ +struct be_async_event_grp5_pvid_state { + u8 enabled; + u8 rsvd0; + u16 tag; + u32 event_tag; + u32 rsvd1; + u32 flags; +} __packed; + +/* async event indicating outer VLAN tag in QnQ */ +struct be_async_event_qnq { + u8 valid; /* Indicates if outer VLAN is valid */ + u8 rsvd0; + u16 vlan_tag; + u32 event_tag; + u8 rsvd1[4]; + u32 flags; +} __packed; + +#define INCOMPATIBLE_SFP 0x3 +/* async event indicating misconfigured port */ +struct be_async_event_misconfig_port { + u32 event_data_word1; + u32 event_data_word2; + u32 rsvd0; + u32 flags; +} __packed; + +struct be_mcc_mailbox { + struct be_mcc_wrb wrb; + struct be_mcc_compl compl; +}; + +#define CMD_SUBSYSTEM_COMMON 0x1 +#define CMD_SUBSYSTEM_ETH 0x3 +#define CMD_SUBSYSTEM_LOWLEVEL 0xb + +#define OPCODE_COMMON_NTWK_MAC_QUERY 1 +#define OPCODE_COMMON_NTWK_MAC_SET 2 +#define OPCODE_COMMON_NTWK_MULTICAST_SET 3 +#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4 +#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5 +#define OPCODE_COMMON_READ_FLASHROM 6 +#define OPCODE_COMMON_WRITE_FLASHROM 7 +#define OPCODE_COMMON_CQ_CREATE 12 +#define OPCODE_COMMON_EQ_CREATE 13 +#define OPCODE_COMMON_MCC_CREATE 21 +#define OPCODE_COMMON_SET_QOS 28 +#define OPCODE_COMMON_MCC_CREATE_EXT 90 +#define OPCODE_COMMON_SEEPROM_READ 30 +#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32 +#define OPCODE_COMMON_NTWK_RX_FILTER 34 +#define OPCODE_COMMON_GET_FW_VERSION 35 +#define OPCODE_COMMON_SET_FLOW_CONTROL 36 +#define OPCODE_COMMON_GET_FLOW_CONTROL 37 +#define OPCODE_COMMON_SET_FRAME_SIZE 39 +#define OPCODE_COMMON_MODIFY_EQ_DELAY 41 +#define OPCODE_COMMON_FIRMWARE_CONFIG 42 +#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50 +#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51 +#define OPCODE_COMMON_MCC_DESTROY 53 +#define OPCODE_COMMON_CQ_DESTROY 54 +#define OPCODE_COMMON_EQ_DESTROY 55 +#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58 +#define OPCODE_COMMON_NTWK_PMAC_ADD 59 +#define OPCODE_COMMON_NTWK_PMAC_DEL 60 +#define OPCODE_COMMON_FUNCTION_RESET 61 +#define OPCODE_COMMON_MANAGE_FAT 68 +#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69 +#define OPCODE_COMMON_GET_BEACON_STATE 70 +#define OPCODE_COMMON_READ_TRANSRECV_DATA 73 +#define OPCODE_COMMON_GET_PORT_NAME 77 +#define OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG 80 +#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89 +#define OPCODE_COMMON_SET_FN_PRIVILEGES 100 +#define OPCODE_COMMON_GET_PHY_DETAILS 102 +#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103 +#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121 +#define OPCODE_COMMON_GET_EXT_FAT_CAPABILITES 125 +#define OPCODE_COMMON_SET_EXT_FAT_CAPABILITES 126 +#define OPCODE_COMMON_GET_MAC_LIST 147 +#define OPCODE_COMMON_SET_MAC_LIST 148 +#define OPCODE_COMMON_GET_HSW_CONFIG 152 +#define OPCODE_COMMON_GET_FUNC_CONFIG 160 +#define OPCODE_COMMON_GET_PROFILE_CONFIG 164 +#define OPCODE_COMMON_SET_PROFILE_CONFIG 165 +#define OPCODE_COMMON_GET_ACTIVE_PROFILE 167 +#define OPCODE_COMMON_SET_HSW_CONFIG 153 +#define OPCODE_COMMON_GET_FN_PRIVILEGES 170 +#define OPCODE_COMMON_READ_OBJECT 171 +#define OPCODE_COMMON_WRITE_OBJECT 172 +#define OPCODE_COMMON_DELETE_OBJECT 174 +#define OPCODE_COMMON_MANAGE_IFACE_FILTERS 193 +#define OPCODE_COMMON_GET_IFACE_LIST 194 +#define OPCODE_COMMON_ENABLE_DISABLE_VF 196 + +#define OPCODE_ETH_RSS_CONFIG 1 +#define OPCODE_ETH_ACPI_CONFIG 2 +#define OPCODE_ETH_PROMISCUOUS 3 +#define OPCODE_ETH_GET_STATISTICS 4 +#define OPCODE_ETH_TX_CREATE 7 +#define OPCODE_ETH_RX_CREATE 8 +#define OPCODE_ETH_TX_DESTROY 9 +#define OPCODE_ETH_RX_DESTROY 10 +#define OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG 12 +#define OPCODE_ETH_GET_PPORT_STATS 18 + +#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17 +#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18 +#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19 + +struct be_cmd_req_hdr { + u8 opcode; /* dword 0 */ + u8 subsystem; /* dword 0 */ + u8 port_number; /* dword 0 */ + u8 domain; /* dword 0 */ + u32 timeout; /* dword 1 */ + u32 request_length; /* dword 2 */ + u8 version; /* dword 3 */ + u8 rsvd[3]; /* dword 3 */ +}; + +#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */ +#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */ +struct be_cmd_resp_hdr { + u8 opcode; /* dword 0 */ + u8 subsystem; /* dword 0 */ + u8 rsvd[2]; /* dword 0 */ + u8 base_status; /* dword 1 */ + u8 addl_status; /* dword 1 */ + u8 rsvd1[2]; /* dword 1 */ + u32 response_length; /* dword 2 */ + u32 actual_resp_len; /* dword 3 */ +}; + +struct phys_addr { + u32 lo; + u32 hi; +}; + +/************************** + * BE Command definitions * + **************************/ + +/* Pseudo amap definition in which each bit of the actual structure is defined + * as a byte: used to calculate offset/shift/mask of each field */ +struct amap_eq_context { + u8 cidx[13]; /* dword 0*/ + u8 rsvd0[3]; /* dword 0*/ + u8 epidx[13]; /* dword 0*/ + u8 valid; /* dword 0*/ + u8 rsvd1; /* dword 0*/ + u8 size; /* dword 0*/ + u8 pidx[13]; /* dword 1*/ + u8 rsvd2[3]; /* dword 1*/ + u8 pd[10]; /* dword 1*/ + u8 count[3]; /* dword 1*/ + u8 solevent; /* dword 1*/ + u8 stalled; /* dword 1*/ + u8 armed; /* dword 1*/ + u8 rsvd3[4]; /* dword 2*/ + u8 func[8]; /* dword 2*/ + u8 rsvd4; /* dword 2*/ + u8 delaymult[10]; /* dword 2*/ + u8 rsvd5[2]; /* dword 2*/ + u8 phase[2]; /* dword 2*/ + u8 nodelay; /* dword 2*/ + u8 rsvd6[4]; /* dword 2*/ + u8 rsvd7[32]; /* dword 3*/ +} __packed; + +struct be_cmd_req_eq_create { + struct be_cmd_req_hdr hdr; + u16 num_pages; /* sword */ + u16 rsvd0; /* sword */ + u8 context[sizeof(struct amap_eq_context) / 8]; + struct phys_addr pages[8]; +} __packed; + +struct be_cmd_resp_eq_create { + struct be_cmd_resp_hdr resp_hdr; + u16 eq_id; /* sword */ + u16 msix_idx; /* available only in v2 */ +} __packed; + +/******************** Mac query ***************************/ +enum { + MAC_ADDRESS_TYPE_STORAGE = 0x0, + MAC_ADDRESS_TYPE_NETWORK = 0x1, + MAC_ADDRESS_TYPE_PD = 0x2, + MAC_ADDRESS_TYPE_MANAGEMENT = 0x3 +}; + +struct mac_addr { + u16 size_of_struct; + u8 addr[ETH_ALEN]; +} __packed; + +struct be_cmd_req_mac_query { + struct be_cmd_req_hdr hdr; + u8 type; + u8 permanent; + u16 if_id; + u32 pmac_id; +} __packed; + +struct be_cmd_resp_mac_query { + struct be_cmd_resp_hdr hdr; + struct mac_addr mac; +}; + +/******************** PMac Add ***************************/ +struct be_cmd_req_pmac_add { + struct be_cmd_req_hdr hdr; + u32 if_id; + u8 mac_address[ETH_ALEN]; + u8 rsvd0[2]; +} __packed; + +struct be_cmd_resp_pmac_add { + struct be_cmd_resp_hdr hdr; + u32 pmac_id; +}; + +/******************** PMac Del ***************************/ +struct be_cmd_req_pmac_del { + struct be_cmd_req_hdr hdr; + u32 if_id; + u32 pmac_id; +}; + +/******************** Create CQ ***************************/ +/* Pseudo amap definition in which each bit of the actual structure is defined + * as a byte: used to calculate offset/shift/mask of each field */ +struct amap_cq_context_be { + u8 cidx[11]; /* dword 0*/ + u8 rsvd0; /* dword 0*/ + u8 coalescwm[2]; /* dword 0*/ + u8 nodelay; /* dword 0*/ + u8 epidx[11]; /* dword 0*/ + u8 rsvd1; /* dword 0*/ + u8 count[2]; /* dword 0*/ + u8 valid; /* dword 0*/ + u8 solevent; /* dword 0*/ + u8 eventable; /* dword 0*/ + u8 pidx[11]; /* dword 1*/ + u8 rsvd2; /* dword 1*/ + u8 pd[10]; /* dword 1*/ + u8 eqid[8]; /* dword 1*/ + u8 stalled; /* dword 1*/ + u8 armed; /* dword 1*/ + u8 rsvd3[4]; /* dword 2*/ + u8 func[8]; /* dword 2*/ + u8 rsvd4[20]; /* dword 2*/ + u8 rsvd5[32]; /* dword 3*/ +} __packed; + +struct amap_cq_context_v2 { + u8 rsvd0[12]; /* dword 0*/ + u8 coalescwm[2]; /* dword 0*/ + u8 nodelay; /* dword 0*/ + u8 rsvd1[12]; /* dword 0*/ + u8 count[2]; /* dword 0*/ + u8 valid; /* dword 0*/ + u8 rsvd2; /* dword 0*/ + u8 eventable; /* dword 0*/ + u8 eqid[16]; /* dword 1*/ + u8 rsvd3[15]; /* dword 1*/ + u8 armed; /* dword 1*/ + u8 rsvd4[32]; /* dword 2*/ + u8 rsvd5[32]; /* dword 3*/ +} __packed; + +struct be_cmd_req_cq_create { + struct be_cmd_req_hdr hdr; + u16 num_pages; + u8 page_size; + u8 rsvd0; + u8 context[sizeof(struct amap_cq_context_be) / 8]; + struct phys_addr pages[8]; +} __packed; + + +struct be_cmd_resp_cq_create { + struct be_cmd_resp_hdr hdr; + u16 cq_id; + u16 rsvd0; +} __packed; + +struct be_cmd_req_get_fat { + struct be_cmd_req_hdr hdr; + u32 fat_operation; + u32 read_log_offset; + u32 read_log_length; + u32 data_buffer_size; + u32 data_buffer[1]; +} __packed; + +struct be_cmd_resp_get_fat { + struct be_cmd_resp_hdr hdr; + u32 log_size; + u32 read_log_length; + u32 rsvd[2]; + u32 data_buffer[1]; +} __packed; + + +/******************** Create MCCQ ***************************/ +/* Pseudo amap definition in which each bit of the actual structure is defined + * as a byte: used to calculate offset/shift/mask of each field */ +struct amap_mcc_context_be { + u8 con_index[14]; + u8 rsvd0[2]; + u8 ring_size[4]; + u8 fetch_wrb; + u8 fetch_r2t; + u8 cq_id[10]; + u8 prod_index[14]; + u8 fid[8]; + u8 pdid[9]; + u8 valid; + u8 rsvd1[32]; + u8 rsvd2[32]; +} __packed; + +struct amap_mcc_context_v1 { + u8 async_cq_id[16]; + u8 ring_size[4]; + u8 rsvd0[12]; + u8 rsvd1[31]; + u8 valid; + u8 async_cq_valid[1]; + u8 rsvd2[31]; + u8 rsvd3[32]; +} __packed; + +struct be_cmd_req_mcc_create { + struct be_cmd_req_hdr hdr; + u16 num_pages; + u16 cq_id; + u8 context[sizeof(struct amap_mcc_context_be) / 8]; + struct phys_addr pages[8]; +} __packed; + +struct be_cmd_req_mcc_ext_create { + struct be_cmd_req_hdr hdr; + u16 num_pages; + u16 cq_id; + u32 async_event_bitmap[1]; + u8 context[sizeof(struct amap_mcc_context_v1) / 8]; + struct phys_addr pages[8]; +} __packed; + +struct be_cmd_resp_mcc_create { + struct be_cmd_resp_hdr hdr; + u16 id; + u16 rsvd0; +} __packed; + +/******************** Create TxQ ***************************/ +#define BE_ETH_TX_RING_TYPE_STANDARD 2 +#define BE_ULP1_NUM 1 + +struct be_cmd_req_eth_tx_create { + struct be_cmd_req_hdr hdr; + u8 num_pages; + u8 ulp_num; + u16 type; + u16 if_id; + u8 queue_size; + u8 rsvd0; + u32 rsvd1; + u16 cq_id; + u16 rsvd2; + u32 rsvd3[13]; + struct phys_addr pages[8]; +} __packed; + +struct be_cmd_resp_eth_tx_create { + struct be_cmd_resp_hdr hdr; + u16 cid; + u16 rid; + u32 db_offset; + u32 rsvd0[4]; +} __packed; + +/******************** Create RxQ ***************************/ +struct be_cmd_req_eth_rx_create { + struct be_cmd_req_hdr hdr; + u16 cq_id; + u8 frag_size; + u8 num_pages; + struct phys_addr pages[2]; + u32 interface_id; + u16 max_frame_size; + u16 rsvd0; + u32 rss_queue; +} __packed; + +struct be_cmd_resp_eth_rx_create { + struct be_cmd_resp_hdr hdr; + u16 id; + u8 rss_id; + u8 rsvd0; +} __packed; + +/******************** Q Destroy ***************************/ +/* Type of Queue to be destroyed */ +enum { + QTYPE_EQ = 1, + QTYPE_CQ, + QTYPE_TXQ, + QTYPE_RXQ, + QTYPE_MCCQ +}; + +struct be_cmd_req_q_destroy { + struct be_cmd_req_hdr hdr; + u16 id; + u16 bypass_flush; /* valid only for rx q destroy */ +} __packed; + +/************ I/f Create (it's actually I/f Config Create)**********/ + +/* Capability flags for the i/f */ +enum be_if_flags { + BE_IF_FLAGS_RSS = 0x4, + BE_IF_FLAGS_PROMISCUOUS = 0x8, + BE_IF_FLAGS_BROADCAST = 0x10, + BE_IF_FLAGS_UNTAGGED = 0x20, + BE_IF_FLAGS_ULP = 0x40, + BE_IF_FLAGS_VLAN_PROMISCUOUS = 0x80, + BE_IF_FLAGS_VLAN = 0x100, + BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200, + BE_IF_FLAGS_PASS_L2_ERRORS = 0x400, + BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800, + BE_IF_FLAGS_MULTICAST = 0x1000, + BE_IF_FLAGS_DEFQ_RSS = 0x1000000 +}; + +#define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\ + BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\ + BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\ + BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\ + BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_DEFQ_RSS) + +#define BE_IF_FLAGS_ALL_PROMISCUOUS (BE_IF_FLAGS_PROMISCUOUS | \ + BE_IF_FLAGS_VLAN_PROMISCUOUS |\ + BE_IF_FLAGS_MCAST_PROMISCUOUS) + +/* An RX interface is an object with one or more MAC addresses and + * filtering capabilities. */ +struct be_cmd_req_if_create { + struct be_cmd_req_hdr hdr; + u32 version; /* ignore currently */ + u32 capability_flags; + u32 enable_flags; + u8 mac_addr[ETH_ALEN]; + u8 rsvd0; + u8 pmac_invalid; /* if set, don't attach the mac addr to the i/f */ + u32 vlan_tag; /* not used currently */ +} __packed; + +struct be_cmd_resp_if_create { + struct be_cmd_resp_hdr hdr; + u32 interface_id; + u32 pmac_id; +}; + +/****** I/f Destroy(it's actually I/f Config Destroy )**********/ +struct be_cmd_req_if_destroy { + struct be_cmd_req_hdr hdr; + u32 interface_id; +}; + +/*************** HW Stats Get **********************************/ +struct be_port_rxf_stats_v0 { + u32 rx_bytes_lsd; /* dword 0*/ + u32 rx_bytes_msd; /* dword 1*/ + u32 rx_total_frames; /* dword 2*/ + u32 rx_unicast_frames; /* dword 3*/ + u32 rx_multicast_frames; /* dword 4*/ + u32 rx_broadcast_frames; /* dword 5*/ + u32 rx_crc_errors; /* dword 6*/ + u32 rx_alignment_symbol_errors; /* dword 7*/ + u32 rx_pause_frames; /* dword 8*/ + u32 rx_control_frames; /* dword 9*/ + u32 rx_in_range_errors; /* dword 10*/ + u32 rx_out_range_errors; /* dword 11*/ + u32 rx_frame_too_long; /* dword 12*/ + u32 rx_address_filtered; /* dword 13*/ + u32 rx_vlan_filtered; /* dword 14*/ + u32 rx_dropped_too_small; /* dword 15*/ + u32 rx_dropped_too_short; /* dword 16*/ + u32 rx_dropped_header_too_small; /* dword 17*/ + u32 rx_dropped_tcp_length; /* dword 18*/ + u32 rx_dropped_runt; /* dword 19*/ + u32 rx_64_byte_packets; /* dword 20*/ + u32 rx_65_127_byte_packets; /* dword 21*/ + u32 rx_128_256_byte_packets; /* dword 22*/ + u32 rx_256_511_byte_packets; /* dword 23*/ + u32 rx_512_1023_byte_packets; /* dword 24*/ + u32 rx_1024_1518_byte_packets; /* dword 25*/ + u32 rx_1519_2047_byte_packets; /* dword 26*/ + u32 rx_2048_4095_byte_packets; /* dword 27*/ + u32 rx_4096_8191_byte_packets; /* dword 28*/ + u32 rx_8192_9216_byte_packets; /* dword 29*/ + u32 rx_ip_checksum_errs; /* dword 30*/ + u32 rx_tcp_checksum_errs; /* dword 31*/ + u32 rx_udp_checksum_errs; /* dword 32*/ + u32 rx_non_rss_packets; /* dword 33*/ + u32 rx_ipv4_packets; /* dword 34*/ + u32 rx_ipv6_packets; /* dword 35*/ + u32 rx_ipv4_bytes_lsd; /* dword 36*/ + u32 rx_ipv4_bytes_msd; /* dword 37*/ + u32 rx_ipv6_bytes_lsd; /* dword 38*/ + u32 rx_ipv6_bytes_msd; /* dword 39*/ + u32 rx_chute1_packets; /* dword 40*/ + u32 rx_chute2_packets; /* dword 41*/ + u32 rx_chute3_packets; /* dword 42*/ + u32 rx_management_packets; /* dword 43*/ + u32 rx_switched_unicast_packets; /* dword 44*/ + u32 rx_switched_multicast_packets; /* dword 45*/ + u32 rx_switched_broadcast_packets; /* dword 46*/ + u32 tx_bytes_lsd; /* dword 47*/ + u32 tx_bytes_msd; /* dword 48*/ + u32 tx_unicastframes; /* dword 49*/ + u32 tx_multicastframes; /* dword 50*/ + u32 tx_broadcastframes; /* dword 51*/ + u32 tx_pauseframes; /* dword 52*/ + u32 tx_controlframes; /* dword 53*/ + u32 tx_64_byte_packets; /* dword 54*/ + u32 tx_65_127_byte_packets; /* dword 55*/ + u32 tx_128_256_byte_packets; /* dword 56*/ + u32 tx_256_511_byte_packets; /* dword 57*/ + u32 tx_512_1023_byte_packets; /* dword 58*/ + u32 tx_1024_1518_byte_packets; /* dword 59*/ + u32 tx_1519_2047_byte_packets; /* dword 60*/ + u32 tx_2048_4095_byte_packets; /* dword 61*/ + u32 tx_4096_8191_byte_packets; /* dword 62*/ + u32 tx_8192_9216_byte_packets; /* dword 63*/ + u32 rx_fifo_overflow; /* dword 64*/ + u32 rx_input_fifo_overflow; /* dword 65*/ +}; + +struct be_rxf_stats_v0 { + struct be_port_rxf_stats_v0 port[2]; + u32 rx_drops_no_pbuf; /* dword 132*/ + u32 rx_drops_no_txpb; /* dword 133*/ + u32 rx_drops_no_erx_descr; /* dword 134*/ + u32 rx_drops_no_tpre_descr; /* dword 135*/ + u32 management_rx_port_packets; /* dword 136*/ + u32 management_rx_port_bytes; /* dword 137*/ + u32 management_rx_port_pause_frames; /* dword 138*/ + u32 management_rx_port_errors; /* dword 139*/ + u32 management_tx_port_packets; /* dword 140*/ + u32 management_tx_port_bytes; /* dword 141*/ + u32 management_tx_port_pause; /* dword 142*/ + u32 management_rx_port_rxfifo_overflow; /* dword 143*/ + u32 rx_drops_too_many_frags; /* dword 144*/ + u32 rx_drops_invalid_ring; /* dword 145*/ + u32 forwarded_packets; /* dword 146*/ + u32 rx_drops_mtu; /* dword 147*/ + u32 rsvd0[7]; + u32 port0_jabber_events; + u32 port1_jabber_events; + u32 rsvd1[6]; +}; + +struct be_erx_stats_v0 { + u32 rx_drops_no_fragments[44]; /* dwordS 0 to 43*/ + u32 rsvd[4]; +}; + +struct be_pmem_stats { + u32 eth_red_drops; + u32 rsvd[5]; +}; + +struct be_hw_stats_v0 { + struct be_rxf_stats_v0 rxf; + u32 rsvd[48]; + struct be_erx_stats_v0 erx; + struct be_pmem_stats pmem; +}; + +struct be_cmd_req_get_stats_v0 { + struct be_cmd_req_hdr hdr; + u8 rsvd[sizeof(struct be_hw_stats_v0)]; +}; + +struct be_cmd_resp_get_stats_v0 { + struct be_cmd_resp_hdr hdr; + struct be_hw_stats_v0 hw_stats; +}; + +struct lancer_pport_stats { + u32 tx_packets_lo; + u32 tx_packets_hi; + u32 tx_unicast_packets_lo; + u32 tx_unicast_packets_hi; + u32 tx_multicast_packets_lo; + u32 tx_multicast_packets_hi; + u32 tx_broadcast_packets_lo; + u32 tx_broadcast_packets_hi; + u32 tx_bytes_lo; + u32 tx_bytes_hi; + u32 tx_unicast_bytes_lo; + u32 tx_unicast_bytes_hi; + u32 tx_multicast_bytes_lo; + u32 tx_multicast_bytes_hi; + u32 tx_broadcast_bytes_lo; + u32 tx_broadcast_bytes_hi; + u32 tx_discards_lo; + u32 tx_discards_hi; + u32 tx_errors_lo; + u32 tx_errors_hi; + u32 tx_pause_frames_lo; + u32 tx_pause_frames_hi; + u32 tx_pause_on_frames_lo; + u32 tx_pause_on_frames_hi; + u32 tx_pause_off_frames_lo; + u32 tx_pause_off_frames_hi; + u32 tx_internal_mac_errors_lo; + u32 tx_internal_mac_errors_hi; + u32 tx_control_frames_lo; + u32 tx_control_frames_hi; + u32 tx_packets_64_bytes_lo; + u32 tx_packets_64_bytes_hi; + u32 tx_packets_65_to_127_bytes_lo; + u32 tx_packets_65_to_127_bytes_hi; + u32 tx_packets_128_to_255_bytes_lo; + u32 tx_packets_128_to_255_bytes_hi; + u32 tx_packets_256_to_511_bytes_lo; + u32 tx_packets_256_to_511_bytes_hi; + u32 tx_packets_512_to_1023_bytes_lo; + u32 tx_packets_512_to_1023_bytes_hi; + u32 tx_packets_1024_to_1518_bytes_lo; + u32 tx_packets_1024_to_1518_bytes_hi; + u32 tx_packets_1519_to_2047_bytes_lo; + u32 tx_packets_1519_to_2047_bytes_hi; + u32 tx_packets_2048_to_4095_bytes_lo; + u32 tx_packets_2048_to_4095_bytes_hi; + u32 tx_packets_4096_to_8191_bytes_lo; + u32 tx_packets_4096_to_8191_bytes_hi; + u32 tx_packets_8192_to_9216_bytes_lo; + u32 tx_packets_8192_to_9216_bytes_hi; + u32 tx_lso_packets_lo; + u32 tx_lso_packets_hi; + u32 rx_packets_lo; + u32 rx_packets_hi; + u32 rx_unicast_packets_lo; + u32 rx_unicast_packets_hi; + u32 rx_multicast_packets_lo; + u32 rx_multicast_packets_hi; + u32 rx_broadcast_packets_lo; + u32 rx_broadcast_packets_hi; + u32 rx_bytes_lo; + u32 rx_bytes_hi; + u32 rx_unicast_bytes_lo; + u32 rx_unicast_bytes_hi; + u32 rx_multicast_bytes_lo; + u32 rx_multicast_bytes_hi; + u32 rx_broadcast_bytes_lo; + u32 rx_broadcast_bytes_hi; + u32 rx_unknown_protos; + u32 rsvd_69; /* Word 69 is reserved */ + u32 rx_discards_lo; + u32 rx_discards_hi; + u32 rx_errors_lo; + u32 rx_errors_hi; + u32 rx_crc_errors_lo; + u32 rx_crc_errors_hi; + u32 rx_alignment_errors_lo; + u32 rx_alignment_errors_hi; + u32 rx_symbol_errors_lo; + u32 rx_symbol_errors_hi; + u32 rx_pause_frames_lo; + u32 rx_pause_frames_hi; + u32 rx_pause_on_frames_lo; + u32 rx_pause_on_frames_hi; + u32 rx_pause_off_frames_lo; + u32 rx_pause_off_frames_hi; + u32 rx_frames_too_long_lo; + u32 rx_frames_too_long_hi; + u32 rx_internal_mac_errors_lo; + u32 rx_internal_mac_errors_hi; + u32 rx_undersize_packets; + u32 rx_oversize_packets; + u32 rx_fragment_packets; + u32 rx_jabbers; + u32 rx_control_frames_lo; + u32 rx_control_frames_hi; + u32 rx_control_frames_unknown_opcode_lo; + u32 rx_control_frames_unknown_opcode_hi; + u32 rx_in_range_errors; + u32 rx_out_of_range_errors; + u32 rx_address_filtered; + u32 rx_vlan_filtered; + u32 rx_dropped_too_small; + u32 rx_dropped_too_short; + u32 rx_dropped_header_too_small; + u32 rx_dropped_invalid_tcp_length; + u32 rx_dropped_runt; + u32 rx_ip_checksum_errors; + u32 rx_tcp_checksum_errors; + u32 rx_udp_checksum_errors; + u32 rx_non_rss_packets; + u32 rsvd_111; + u32 rx_ipv4_packets_lo; + u32 rx_ipv4_packets_hi; + u32 rx_ipv6_packets_lo; + u32 rx_ipv6_packets_hi; + u32 rx_ipv4_bytes_lo; + u32 rx_ipv4_bytes_hi; + u32 rx_ipv6_bytes_lo; + u32 rx_ipv6_bytes_hi; + u32 rx_nic_packets_lo; + u32 rx_nic_packets_hi; + u32 rx_tcp_packets_lo; + u32 rx_tcp_packets_hi; + u32 rx_iscsi_packets_lo; + u32 rx_iscsi_packets_hi; + u32 rx_management_packets_lo; + u32 rx_management_packets_hi; + u32 rx_switched_unicast_packets_lo; + u32 rx_switched_unicast_packets_hi; + u32 rx_switched_multicast_packets_lo; + u32 rx_switched_multicast_packets_hi; + u32 rx_switched_broadcast_packets_lo; + u32 rx_switched_broadcast_packets_hi; + u32 num_forwards_lo; + u32 num_forwards_hi; + u32 rx_fifo_overflow; + u32 rx_input_fifo_overflow; + u32 rx_drops_too_many_frags_lo; + u32 rx_drops_too_many_frags_hi; + u32 rx_drops_invalid_queue; + u32 rsvd_141; + u32 rx_drops_mtu_lo; + u32 rx_drops_mtu_hi; + u32 rx_packets_64_bytes_lo; + u32 rx_packets_64_bytes_hi; + u32 rx_packets_65_to_127_bytes_lo; + u32 rx_packets_65_to_127_bytes_hi; + u32 rx_packets_128_to_255_bytes_lo; + u32 rx_packets_128_to_255_bytes_hi; + u32 rx_packets_256_to_511_bytes_lo; + u32 rx_packets_256_to_511_bytes_hi; + u32 rx_packets_512_to_1023_bytes_lo; + u32 rx_packets_512_to_1023_bytes_hi; + u32 rx_packets_1024_to_1518_bytes_lo; + u32 rx_packets_1024_to_1518_bytes_hi; + u32 rx_packets_1519_to_2047_bytes_lo; + u32 rx_packets_1519_to_2047_bytes_hi; + u32 rx_packets_2048_to_4095_bytes_lo; + u32 rx_packets_2048_to_4095_bytes_hi; + u32 rx_packets_4096_to_8191_bytes_lo; + u32 rx_packets_4096_to_8191_bytes_hi; + u32 rx_packets_8192_to_9216_bytes_lo; + u32 rx_packets_8192_to_9216_bytes_hi; +}; + +struct pport_stats_params { + u16 pport_num; + u8 rsvd; + u8 reset_stats; +}; + +struct lancer_cmd_req_pport_stats { + struct be_cmd_req_hdr hdr; + union { + struct pport_stats_params params; + u8 rsvd[sizeof(struct lancer_pport_stats)]; + } cmd_params; +}; + +struct lancer_cmd_resp_pport_stats { + struct be_cmd_resp_hdr hdr; + struct lancer_pport_stats pport_stats; +}; + +static inline struct lancer_pport_stats* + pport_stats_from_cmd(struct be_adapter *adapter) +{ + struct lancer_cmd_resp_pport_stats *cmd = adapter->stats_cmd.va; + return &cmd->pport_stats; +} + +struct be_cmd_req_get_cntl_addnl_attribs { + struct be_cmd_req_hdr hdr; + u8 rsvd[8]; +}; + +struct be_cmd_resp_get_cntl_addnl_attribs { + struct be_cmd_resp_hdr hdr; + u16 ipl_file_number; + u8 ipl_file_version; + u8 rsvd0; + u8 on_die_temperature; /* in degrees centigrade*/ + u8 rsvd1[3]; +}; + +struct be_cmd_req_vlan_config { + struct be_cmd_req_hdr hdr; + u8 interface_id; + u8 promiscuous; + u8 untagged; + u8 num_vlan; + u16 normal_vlan[64]; +} __packed; + +/******************* RX FILTER ******************************/ +#define BE_MAX_MC 64 /* set mcast promisc if > 64 */ +struct macaddr { + u8 byte[ETH_ALEN]; +}; + +struct be_cmd_req_rx_filter { + struct be_cmd_req_hdr hdr; + u32 global_flags_mask; + u32 global_flags; + u32 if_flags_mask; + u32 if_flags; + u32 if_id; + u32 mcast_num; + struct macaddr mcast_mac[BE_MAX_MC]; +}; + +/******************** Link Status Query *******************/ +struct be_cmd_req_link_status { + struct be_cmd_req_hdr hdr; + u32 rsvd; +}; + +enum { + PHY_LINK_DUPLEX_NONE = 0x0, + PHY_LINK_DUPLEX_HALF = 0x1, + PHY_LINK_DUPLEX_FULL = 0x2 +}; + +enum { + PHY_LINK_SPEED_ZERO = 0x0, /* => No link */ + PHY_LINK_SPEED_10MBPS = 0x1, + PHY_LINK_SPEED_100MBPS = 0x2, + PHY_LINK_SPEED_1GBPS = 0x3, + PHY_LINK_SPEED_10GBPS = 0x4, + PHY_LINK_SPEED_20GBPS = 0x5, + PHY_LINK_SPEED_25GBPS = 0x6, + PHY_LINK_SPEED_40GBPS = 0x7 +}; + +struct be_cmd_resp_link_status { + struct be_cmd_resp_hdr hdr; + u8 physical_port; + u8 mac_duplex; + u8 mac_speed; + u8 mac_fault; + u8 mgmt_mac_duplex; + u8 mgmt_mac_speed; + u16 link_speed; + u8 logical_link_status; + u8 rsvd1[3]; +} __packed; + +/******************** Port Identification ***************************/ +/* Identifies the type of port attached to NIC */ +struct be_cmd_req_port_type { + struct be_cmd_req_hdr hdr; + __le32 page_num; + __le32 port; +}; + +enum { + TR_PAGE_A0 = 0xa0, + TR_PAGE_A2 = 0xa2 +}; + +/* From SFF-8436 QSFP+ spec */ +#define QSFP_PLUS_CABLE_TYPE_OFFSET 0x83 +#define QSFP_PLUS_CR4_CABLE 0x8 +#define QSFP_PLUS_SR4_CABLE 0x4 +#define QSFP_PLUS_LR4_CABLE 0x2 + +/* From SFF-8472 spec */ +#define SFP_PLUS_SFF_8472_COMP 0x5E +#define SFP_PLUS_CABLE_TYPE_OFFSET 0x8 +#define SFP_PLUS_COPPER_CABLE 0x4 +#define SFP_VENDOR_NAME_OFFSET 0x14 +#define SFP_VENDOR_PN_OFFSET 0x28 + +#define PAGE_DATA_LEN 256 +struct be_cmd_resp_port_type { + struct be_cmd_resp_hdr hdr; + u32 page_num; + u32 port; + u8 page_data[PAGE_DATA_LEN]; +}; + +/******************** Get FW Version *******************/ +struct be_cmd_req_get_fw_version { + struct be_cmd_req_hdr hdr; + u8 rsvd0[FW_VER_LEN]; + u8 rsvd1[FW_VER_LEN]; +} __packed; + +struct be_cmd_resp_get_fw_version { + struct be_cmd_resp_hdr hdr; + u8 firmware_version_string[FW_VER_LEN]; + u8 fw_on_flash_version_string[FW_VER_LEN]; +} __packed; + +/******************** Set Flow Contrl *******************/ +struct be_cmd_req_set_flow_control { + struct be_cmd_req_hdr hdr; + u16 tx_flow_control; + u16 rx_flow_control; +} __packed; + +/******************** Get Flow Contrl *******************/ +struct be_cmd_req_get_flow_control { + struct be_cmd_req_hdr hdr; + u32 rsvd; +}; + +struct be_cmd_resp_get_flow_control { + struct be_cmd_resp_hdr hdr; + u16 tx_flow_control; + u16 rx_flow_control; +} __packed; + +/******************** Modify EQ Delay *******************/ +struct be_set_eqd { + u32 eq_id; + u32 phase; + u32 delay_multiplier; +}; + +struct be_cmd_req_modify_eq_delay { + struct be_cmd_req_hdr hdr; + u32 num_eq; + struct be_set_eqd set_eqd[MAX_EVT_QS]; +} __packed; + +/******************** Get FW Config *******************/ +/* The HW can come up in either of the following multi-channel modes + * based on the skew/IPL. + */ +#define RDMA_ENABLED 0x4 +#define QNQ_MODE 0x400 +#define VNIC_MODE 0x20000 +#define UMC_ENABLED 0x1000000 +struct be_cmd_req_query_fw_cfg { + struct be_cmd_req_hdr hdr; + u32 rsvd[31]; +}; + +/* ASIC revisions */ +#define ASIC_REV_B0 0x10 +#define ASIC_REV_P2 0x11 + +struct be_cmd_resp_query_fw_cfg { + struct be_cmd_resp_hdr hdr; + u32 be_config_number; + u32 asic_revision; + u32 phys_port; + u32 function_mode; + u32 rsvd[26]; + u32 function_caps; +}; + +/******************** RSS Config ****************************************/ +/* RSS type Input parameters used to compute RX hash + * RSS_ENABLE_IPV4 SRC IPv4, DST IPv4 + * RSS_ENABLE_TCP_IPV4 SRC IPv4, DST IPv4, TCP SRC PORT, TCP DST PORT + * RSS_ENABLE_IPV6 SRC IPv6, DST IPv6 + * RSS_ENABLE_TCP_IPV6 SRC IPv6, DST IPv6, TCP SRC PORT, TCP DST PORT + * RSS_ENABLE_UDP_IPV4 SRC IPv4, DST IPv4, UDP SRC PORT, UDP DST PORT + * RSS_ENABLE_UDP_IPV6 SRC IPv6, DST IPv6, UDP SRC PORT, UDP DST PORT + * + * When multiple RSS types are enabled, HW picks the best hash policy + * based on the type of the received packet. + */ +#define RSS_ENABLE_NONE 0x0 +#define RSS_ENABLE_IPV4 0x1 +#define RSS_ENABLE_TCP_IPV4 0x2 +#define RSS_ENABLE_IPV6 0x4 +#define RSS_ENABLE_TCP_IPV6 0x8 +#define RSS_ENABLE_UDP_IPV4 0x10 +#define RSS_ENABLE_UDP_IPV6 0x20 + +#define L3_RSS_FLAGS (RXH_IP_DST | RXH_IP_SRC) +#define L4_RSS_FLAGS (RXH_L4_B_0_1 | RXH_L4_B_2_3) + +struct be_cmd_req_rss_config { + struct be_cmd_req_hdr hdr; + u32 if_id; + u16 enable_rss; + u16 cpu_table_size_log2; + u32 hash[10]; + u8 cpu_table[128]; + u8 flush; + u8 rsvd0[3]; +}; + +/******************** Port Beacon ***************************/ + +#define BEACON_STATE_ENABLED 0x1 +#define BEACON_STATE_DISABLED 0x0 + +struct be_cmd_req_enable_disable_beacon { + struct be_cmd_req_hdr hdr; + u8 port_num; + u8 beacon_state; + u8 beacon_duration; + u8 status_duration; +} __packed; + +struct be_cmd_req_get_beacon_state { + struct be_cmd_req_hdr hdr; + u8 port_num; + u8 rsvd0; + u16 rsvd1; +} __packed; + +struct be_cmd_resp_get_beacon_state { + struct be_cmd_resp_hdr resp_hdr; + u8 beacon_state; + u8 rsvd0[3]; +} __packed; + +/* Flashrom related descriptors */ +#define MAX_FLASH_COMP 32 + +#define OPTYPE_ISCSI_ACTIVE 0 +#define OPTYPE_REDBOOT 1 +#define OPTYPE_BIOS 2 +#define OPTYPE_PXE_BIOS 3 +#define OPTYPE_OFFSET_SPECIFIED 7 +#define OPTYPE_FCOE_BIOS 8 +#define OPTYPE_ISCSI_BACKUP 9 +#define OPTYPE_FCOE_FW_ACTIVE 10 +#define OPTYPE_FCOE_FW_BACKUP 11 +#define OPTYPE_NCSI_FW 13 +#define OPTYPE_REDBOOT_DIR 18 +#define OPTYPE_REDBOOT_CONFIG 19 +#define OPTYPE_SH_PHY_FW 21 +#define OPTYPE_FLASHISM_JUMPVECTOR 22 +#define OPTYPE_UFI_DIR 23 +#define OPTYPE_PHY_FW 99 + +#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 262144 /* Max OPTION ROM image sz */ +#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 262144 /* Max Redboot image sz */ +#define FLASH_IMAGE_MAX_SIZE_g2 1310720 /* Max firmware image size */ + +#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 262144 +#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 262144 +#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 524288 /* Max OPTION ROM image sz */ +#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 1048576 /* Max Redboot image sz */ +#define FLASH_IMAGE_MAX_SIZE_g3 2097152 /* Max firmware image size */ + +/* Offsets for components on Flash. */ +#define FLASH_REDBOOT_START_g2 0 +#define FLASH_FCoE_BIOS_START_g2 524288 +#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 1048576 +#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 2359296 +#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 3670016 +#define FLASH_FCoE_BACKUP_IMAGE_START_g2 4980736 +#define FLASH_iSCSI_BIOS_START_g2 7340032 +#define FLASH_PXE_BIOS_START_g2 7864320 + +#define FLASH_REDBOOT_START_g3 262144 +#define FLASH_PHY_FW_START_g3 1310720 +#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 2097152 +#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 4194304 +#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 6291456 +#define FLASH_FCoE_BACKUP_IMAGE_START_g3 8388608 +#define FLASH_iSCSI_BIOS_START_g3 12582912 +#define FLASH_PXE_BIOS_START_g3 13107200 +#define FLASH_FCoE_BIOS_START_g3 13631488 +#define FLASH_NCSI_START_g3 15990784 + +#define IMAGE_NCSI 16 +#define IMAGE_OPTION_ROM_PXE 32 +#define IMAGE_OPTION_ROM_FCoE 33 +#define IMAGE_OPTION_ROM_ISCSI 34 +#define IMAGE_FLASHISM_JUMPVECTOR 48 +#define IMAGE_FIRMWARE_iSCSI 160 +#define IMAGE_FIRMWARE_FCoE 162 +#define IMAGE_FIRMWARE_BACKUP_iSCSI 176 +#define IMAGE_FIRMWARE_BACKUP_FCoE 178 +#define IMAGE_FIRMWARE_PHY 192 +#define IMAGE_REDBOOT_DIR 208 +#define IMAGE_REDBOOT_CONFIG 209 +#define IMAGE_UFI_DIR 210 +#define IMAGE_BOOT_CODE 224 + +struct controller_id { + u32 vendor; + u32 device; + u32 subvendor; + u32 subdevice; +}; + +struct flash_comp { + unsigned long offset; + int optype; + int size; + int img_type; +}; + +struct image_hdr { + u32 imageid; + u32 imageoffset; + u32 imagelength; + u32 image_checksum; + u8 image_version[32]; +}; + +struct flash_file_hdr_g2 { + u8 sign[32]; + u32 cksum; + u32 antidote; + struct controller_id cont_id; + u32 file_len; + u32 chunk_num; + u32 total_chunks; + u32 num_imgs; + u8 build[24]; +}; + +/* First letter of the build version of the image */ +#define BLD_STR_UFI_TYPE_BE2 '2' +#define BLD_STR_UFI_TYPE_BE3 '3' +#define BLD_STR_UFI_TYPE_SH '4' + +struct flash_file_hdr_g3 { + u8 sign[52]; + u8 ufi_version[4]; + u32 file_len; + u32 cksum; + u32 antidote; + u32 num_imgs; + u8 build[24]; + u8 asic_type_rev; + u8 rsvd[31]; +}; + +struct flash_section_hdr { + u32 format_rev; + u32 cksum; + u32 antidote; + u32 num_images; + u8 id_string[128]; + u32 rsvd[4]; +} __packed; + +struct flash_section_hdr_g2 { + u32 format_rev; + u32 cksum; + u32 antidote; + u32 build_num; + u8 id_string[128]; + u32 rsvd[8]; +} __packed; + +struct flash_section_entry { + u32 type; + u32 offset; + u32 pad_size; + u32 image_size; + u32 cksum; + u32 entry_point; + u16 optype; + u16 rsvd0; + u32 rsvd1; + u8 ver_data[32]; +} __packed; + +struct flash_section_info { + u8 cookie[32]; + struct flash_section_hdr fsec_hdr; + struct flash_section_entry fsec_entry[32]; +} __packed; + +struct flash_section_info_g2 { + u8 cookie[32]; + struct flash_section_hdr_g2 fsec_hdr; + struct flash_section_entry fsec_entry[32]; +} __packed; + +/****************** Firmware Flash ******************/ +#define FLASHROM_OPER_FLASH 1 +#define FLASHROM_OPER_SAVE 2 +#define FLASHROM_OPER_REPORT 4 +#define FLASHROM_OPER_PHY_FLASH 9 +#define FLASHROM_OPER_PHY_SAVE 10 + +struct flashrom_params { + u32 op_code; + u32 op_type; + u32 data_buf_size; + u32 offset; +}; + +struct be_cmd_write_flashrom { + struct be_cmd_req_hdr hdr; + struct flashrom_params params; + u8 data_buf[32768]; + u8 rsvd[4]; +} __packed; + +/* cmd to read flash crc */ +struct be_cmd_read_flash_crc { + struct be_cmd_req_hdr hdr; + struct flashrom_params params; + u8 crc[4]; + u8 rsvd[4]; +} __packed; + +/**************** Lancer Firmware Flash ************/ +struct amap_lancer_write_obj_context { + u8 write_length[24]; + u8 reserved1[7]; + u8 eof; +} __packed; + +struct lancer_cmd_req_write_object { + struct be_cmd_req_hdr hdr; + u8 context[sizeof(struct amap_lancer_write_obj_context) / 8]; + u32 write_offset; + u8 object_name[104]; + u32 descriptor_count; + u32 buf_len; + u32 addr_low; + u32 addr_high; +}; + +#define LANCER_NO_RESET_NEEDED 0x00 +#define LANCER_FW_RESET_NEEDED 0x02 +struct lancer_cmd_resp_write_object { + u8 opcode; + u8 subsystem; + u8 rsvd1[2]; + u8 status; + u8 additional_status; + u8 rsvd2[2]; + u32 resp_len; + u32 actual_resp_len; + u32 actual_write_len; + u8 change_status; + u8 rsvd3[3]; +}; + +/************************ Lancer Read FW info **************/ +#define LANCER_READ_FILE_CHUNK (32*1024) +#define LANCER_READ_FILE_EOF_MASK 0x80000000 + +#define LANCER_FW_DUMP_FILE "/dbg/dump.bin" +#define LANCER_VPD_PF_FILE "/vpd/ntr_pf.vpd" +#define LANCER_VPD_VF_FILE "/vpd/ntr_vf.vpd" + +struct lancer_cmd_req_read_object { + struct be_cmd_req_hdr hdr; + u32 desired_read_len; + u32 read_offset; + u8 object_name[104]; + u32 descriptor_count; + u32 buf_len; + u32 addr_low; + u32 addr_high; +}; + +struct lancer_cmd_resp_read_object { + u8 opcode; + u8 subsystem; + u8 rsvd1[2]; + u8 status; + u8 additional_status; + u8 rsvd2[2]; + u32 resp_len; + u32 actual_resp_len; + u32 actual_read_len; + u32 eof; +}; + +struct lancer_cmd_req_delete_object { + struct be_cmd_req_hdr hdr; + u32 rsvd1; + u32 rsvd2; + u8 object_name[104]; +}; + +/************************ WOL *******************************/ +struct be_cmd_req_acpi_wol_magic_config{ + struct be_cmd_req_hdr hdr; + u32 rsvd0[145]; + u8 magic_mac[6]; + u8 rsvd2[2]; +} __packed; + +struct be_cmd_req_acpi_wol_magic_config_v1 { + struct be_cmd_req_hdr hdr; + u8 rsvd0[2]; + u8 query_options; + u8 rsvd1[5]; + u32 rsvd2[288]; + u8 magic_mac[6]; + u8 rsvd3[22]; +} __packed; + +struct be_cmd_resp_acpi_wol_magic_config_v1 { + struct be_cmd_resp_hdr hdr; + u8 rsvd0[2]; + u8 wol_settings; + u8 rsvd1[5]; + u32 rsvd2[295]; +} __packed; + +#define BE_GET_WOL_CAP 2 + +#define BE_WOL_CAP 0x1 +#define BE_PME_D0_CAP 0x8 +#define BE_PME_D1_CAP 0x10 +#define BE_PME_D2_CAP 0x20 +#define BE_PME_D3HOT_CAP 0x40 +#define BE_PME_D3COLD_CAP 0x80 + +/********************** LoopBack test *********************/ +struct be_cmd_req_loopback_test { + struct be_cmd_req_hdr hdr; + u32 loopback_type; + u32 num_pkts; + u64 pattern; + u32 src_port; + u32 dest_port; + u32 pkt_size; +}; + +struct be_cmd_resp_loopback_test { + struct be_cmd_resp_hdr resp_hdr; + u32 status; + u32 num_txfer; + u32 num_rx; + u32 miscomp_off; + u32 ticks_compl; +}; + +struct be_cmd_req_set_lmode { + struct be_cmd_req_hdr hdr; + u8 src_port; + u8 dest_port; + u8 loopback_type; + u8 loopback_state; +}; + +/********************** DDR DMA test *********************/ +struct be_cmd_req_ddrdma_test { + struct be_cmd_req_hdr hdr; + u64 pattern; + u32 byte_count; + u32 rsvd0; + u8 snd_buff[4096]; + u8 rsvd1[4096]; +}; + +struct be_cmd_resp_ddrdma_test { + struct be_cmd_resp_hdr hdr; + u64 pattern; + u32 byte_cnt; + u32 snd_err; + u8 rsvd0[4096]; + u8 rcv_buff[4096]; +}; + +/*********************** SEEPROM Read ***********************/ + +#define BE_READ_SEEPROM_LEN 1024 +struct be_cmd_req_seeprom_read { + struct be_cmd_req_hdr hdr; + u8 rsvd0[BE_READ_SEEPROM_LEN]; +}; + +struct be_cmd_resp_seeprom_read { + struct be_cmd_req_hdr hdr; + u8 seeprom_data[BE_READ_SEEPROM_LEN]; +}; + +enum { + PHY_TYPE_CX4_10GB = 0, + PHY_TYPE_XFP_10GB, + PHY_TYPE_SFP_1GB, + PHY_TYPE_SFP_PLUS_10GB, + PHY_TYPE_KR_10GB, + PHY_TYPE_KX4_10GB, + PHY_TYPE_BASET_10GB, + PHY_TYPE_BASET_1GB, + PHY_TYPE_BASEX_1GB, + PHY_TYPE_SGMII, + PHY_TYPE_QSFP, + PHY_TYPE_KR4_40GB, + PHY_TYPE_KR2_20GB, + PHY_TYPE_TN_8022, + PHY_TYPE_DISABLED = 255 +}; + +#define BE_SUPPORTED_SPEED_NONE 0 +#define BE_SUPPORTED_SPEED_10MBPS 1 +#define BE_SUPPORTED_SPEED_100MBPS 2 +#define BE_SUPPORTED_SPEED_1GBPS 4 +#define BE_SUPPORTED_SPEED_10GBPS 8 +#define BE_SUPPORTED_SPEED_20GBPS 0x10 +#define BE_SUPPORTED_SPEED_40GBPS 0x20 + +#define BE_AN_EN 0x2 +#define BE_PAUSE_SYM_EN 0x80 + +/* MAC speed valid values */ +#define SPEED_DEFAULT 0x0 +#define SPEED_FORCED_10GB 0x1 +#define SPEED_FORCED_1GB 0x2 +#define SPEED_AUTONEG_10GB 0x3 +#define SPEED_AUTONEG_1GB 0x4 +#define SPEED_AUTONEG_100MB 0x5 +#define SPEED_AUTONEG_10GB_1GB 0x6 +#define SPEED_AUTONEG_10GB_1GB_100MB 0x7 +#define SPEED_AUTONEG_1GB_100MB 0x8 +#define SPEED_AUTONEG_10MB 0x9 +#define SPEED_AUTONEG_1GB_100MB_10MB 0xa +#define SPEED_AUTONEG_100MB_10MB 0xb +#define SPEED_FORCED_100MB 0xc +#define SPEED_FORCED_10MB 0xd + +struct be_cmd_req_get_phy_info { + struct be_cmd_req_hdr hdr; + u8 rsvd0[24]; +}; + +struct be_phy_info { + u16 phy_type; + u16 interface_type; + u32 misc_params; + u16 ext_phy_details; + u16 rsvd; + u16 auto_speeds_supported; + u16 fixed_speeds_supported; + u32 future_use[2]; +}; + +struct be_cmd_resp_get_phy_info { + struct be_cmd_req_hdr hdr; + struct be_phy_info phy_info; +}; + +/*********************** Set QOS ***********************/ + +#define BE_QOS_BITS_NIC 1 + +struct be_cmd_req_set_qos { + struct be_cmd_req_hdr hdr; + u32 valid_bits; + u32 max_bps_nic; + u32 rsvd[7]; +}; + +/*********************** Controller Attributes ***********************/ +struct mgmt_hba_attribs { + u32 rsvd0[24]; + u8 controller_model_number[32]; + u32 rsvd1[79]; + u8 rsvd2[3]; + u8 phy_port; + u32 rsvd3[13]; +} __packed; + +struct mgmt_controller_attrib { + struct mgmt_hba_attribs hba_attribs; + u32 rsvd0[10]; +} __packed; + +struct be_cmd_req_cntl_attribs { + struct be_cmd_req_hdr hdr; +}; + +struct be_cmd_resp_cntl_attribs { + struct be_cmd_resp_hdr hdr; + struct mgmt_controller_attrib attribs; +}; + +/*********************** Set driver function ***********************/ +#define CAPABILITY_SW_TIMESTAMPS 2 +#define CAPABILITY_BE3_NATIVE_ERX_API 4 + +struct be_cmd_req_set_func_cap { + struct be_cmd_req_hdr hdr; + u32 valid_cap_flags; + u32 cap_flags; + u8 rsvd[212]; +}; + +struct be_cmd_resp_set_func_cap { + struct be_cmd_resp_hdr hdr; + u32 valid_cap_flags; + u32 cap_flags; + u8 rsvd[212]; +}; + +/*********************** Function Privileges ***********************/ +enum { + BE_PRIV_DEFAULT = 0x1, + BE_PRIV_LNKQUERY = 0x2, + BE_PRIV_LNKSTATS = 0x4, + BE_PRIV_LNKMGMT = 0x8, + BE_PRIV_LNKDIAG = 0x10, + BE_PRIV_UTILQUERY = 0x20, + BE_PRIV_FILTMGMT = 0x40, + BE_PRIV_IFACEMGMT = 0x80, + BE_PRIV_VHADM = 0x100, + BE_PRIV_DEVCFG = 0x200, + BE_PRIV_DEVSEC = 0x400 +}; +#define MAX_PRIVILEGES (BE_PRIV_VHADM | BE_PRIV_DEVCFG | \ + BE_PRIV_DEVSEC) +#define MIN_PRIVILEGES BE_PRIV_DEFAULT + +struct be_cmd_priv_map { + u8 opcode; + u8 subsystem; + u32 priv_mask; +}; + +struct be_cmd_req_get_fn_privileges { + struct be_cmd_req_hdr hdr; + u32 rsvd; +}; + +struct be_cmd_resp_get_fn_privileges { + struct be_cmd_resp_hdr hdr; + u32 privilege_mask; +}; + +struct be_cmd_req_set_fn_privileges { + struct be_cmd_req_hdr hdr; + u32 privileges; /* Used by BE3, SH-R */ + u32 privileges_lancer; /* Used by Lancer */ +}; + +/******************** GET/SET_MACLIST **************************/ +#define BE_MAX_MAC 64 +struct be_cmd_req_get_mac_list { + struct be_cmd_req_hdr hdr; + u8 mac_type; + u8 perm_override; + u16 iface_id; + u32 mac_id; + u32 rsvd[3]; +} __packed; + +struct get_list_macaddr { + u16 mac_addr_size; + union { + u8 macaddr[6]; + struct { + u8 rsvd[2]; + u32 mac_id; + } __packed s_mac_id; + } __packed mac_addr_id; +} __packed; + +struct be_cmd_resp_get_mac_list { + struct be_cmd_resp_hdr hdr; + struct get_list_macaddr fd_macaddr; /* Factory default mac */ + struct get_list_macaddr macid_macaddr; /* soft mac */ + u8 true_mac_count; + u8 pseudo_mac_count; + u8 mac_list_size; + u8 rsvd; + /* perm override mac */ + struct get_list_macaddr macaddr_list[BE_MAX_MAC]; +} __packed; + +struct be_cmd_req_set_mac_list { + struct be_cmd_req_hdr hdr; + u8 mac_count; + u8 rsvd1; + u16 rsvd2; + struct macaddr mac[BE_MAX_MAC]; +} __packed; + +/*********************** HSW Config ***********************/ +#define PORT_FWD_TYPE_VEPA 0x3 +#define PORT_FWD_TYPE_VEB 0x2 + +struct amap_set_hsw_context { + u8 interface_id[16]; + u8 rsvd0[14]; + u8 pvid_valid; + u8 pport; + u8 rsvd1[6]; + u8 port_fwd_type[3]; + u8 rsvd2[7]; + u8 pvid[16]; + u8 rsvd3[32]; + u8 rsvd4[32]; + u8 rsvd5[32]; +} __packed; + +struct be_cmd_req_set_hsw_config { + struct be_cmd_req_hdr hdr; + u8 context[sizeof(struct amap_set_hsw_context) / 8]; +} __packed; + +struct amap_get_hsw_req_context { + u8 interface_id[16]; + u8 rsvd0[14]; + u8 pvid_valid; + u8 pport; +} __packed; + +struct amap_get_hsw_resp_context { + u8 rsvd0[6]; + u8 port_fwd_type[3]; + u8 rsvd1[7]; + u8 pvid[16]; + u8 rsvd2[32]; + u8 rsvd3[32]; + u8 rsvd4[32]; +} __packed; + +struct be_cmd_req_get_hsw_config { + struct be_cmd_req_hdr hdr; + u8 context[sizeof(struct amap_get_hsw_req_context) / 8]; +} __packed; + +struct be_cmd_resp_get_hsw_config { + struct be_cmd_resp_hdr hdr; + u8 context[sizeof(struct amap_get_hsw_resp_context) / 8]; + u32 rsvd; +}; + +/******************* get port names ***************/ +struct be_cmd_req_get_port_name { + struct be_cmd_req_hdr hdr; + u32 rsvd0; +}; + +struct be_cmd_resp_get_port_name { + struct be_cmd_req_hdr hdr; + u8 port_name[4]; +}; + +/*************** HW Stats Get v1 **********************************/ +#define BE_TXP_SW_SZ 48 +struct be_port_rxf_stats_v1 { + u32 rsvd0[12]; + u32 rx_crc_errors; + u32 rx_alignment_symbol_errors; + u32 rx_pause_frames; + u32 rx_priority_pause_frames; + u32 rx_control_frames; + u32 rx_in_range_errors; + u32 rx_out_range_errors; + u32 rx_frame_too_long; + u32 rx_address_filtered; + u32 rx_dropped_too_small; + u32 rx_dropped_too_short; + u32 rx_dropped_header_too_small; + u32 rx_dropped_tcp_length; + u32 rx_dropped_runt; + u32 rsvd1[10]; + u32 rx_ip_checksum_errs; + u32 rx_tcp_checksum_errs; + u32 rx_udp_checksum_errs; + u32 rsvd2[7]; + u32 rx_switched_unicast_packets; + u32 rx_switched_multicast_packets; + u32 rx_switched_broadcast_packets; + u32 rsvd3[3]; + u32 tx_pauseframes; + u32 tx_priority_pauseframes; + u32 tx_controlframes; + u32 rsvd4[10]; + u32 rxpp_fifo_overflow_drop; + u32 rx_input_fifo_overflow_drop; + u32 pmem_fifo_overflow_drop; + u32 jabber_events; + u32 rsvd5[3]; +}; + + +struct be_rxf_stats_v1 { + struct be_port_rxf_stats_v1 port[4]; + u32 rsvd0[2]; + u32 rx_drops_no_pbuf; + u32 rx_drops_no_txpb; + u32 rx_drops_no_erx_descr; + u32 rx_drops_no_tpre_descr; + u32 rsvd1[6]; + u32 rx_drops_too_many_frags; + u32 rx_drops_invalid_ring; + u32 forwarded_packets; + u32 rx_drops_mtu; + u32 rsvd2[14]; +}; + +struct be_erx_stats_v1 { + u32 rx_drops_no_fragments[68]; /* dwordS 0 to 67*/ + u32 rsvd[4]; +}; + +struct be_port_rxf_stats_v2 { + u32 rsvd0[10]; + u32 roce_bytes_received_lsd; + u32 roce_bytes_received_msd; + u32 rsvd1[5]; + u32 roce_frames_received; + u32 rx_crc_errors; + u32 rx_alignment_symbol_errors; + u32 rx_pause_frames; + u32 rx_priority_pause_frames; + u32 rx_control_frames; + u32 rx_in_range_errors; + u32 rx_out_range_errors; + u32 rx_frame_too_long; + u32 rx_address_filtered; + u32 rx_dropped_too_small; + u32 rx_dropped_too_short; + u32 rx_dropped_header_too_small; + u32 rx_dropped_tcp_length; + u32 rx_dropped_runt; + u32 rsvd2[10]; + u32 rx_ip_checksum_errs; + u32 rx_tcp_checksum_errs; + u32 rx_udp_checksum_errs; + u32 rsvd3[7]; + u32 rx_switched_unicast_packets; + u32 rx_switched_multicast_packets; + u32 rx_switched_broadcast_packets; + u32 rsvd4[3]; + u32 tx_pauseframes; + u32 tx_priority_pauseframes; + u32 tx_controlframes; + u32 rsvd5[10]; + u32 rxpp_fifo_overflow_drop; + u32 rx_input_fifo_overflow_drop; + u32 pmem_fifo_overflow_drop; + u32 jabber_events; + u32 rsvd6[3]; + u32 rx_drops_payload_size; + u32 rx_drops_clipped_header; + u32 rx_drops_crc; + u32 roce_drops_payload_len; + u32 roce_drops_crc; + u32 rsvd7[19]; +}; + +struct be_rxf_stats_v2 { + struct be_port_rxf_stats_v2 port[4]; + u32 rsvd0[2]; + u32 rx_drops_no_pbuf; + u32 rx_drops_no_txpb; + u32 rx_drops_no_erx_descr; + u32 rx_drops_no_tpre_descr; + u32 rsvd1[6]; + u32 rx_drops_too_many_frags; + u32 rx_drops_invalid_ring; + u32 forwarded_packets; + u32 rx_drops_mtu; + u32 rsvd2[35]; +}; + +struct be_hw_stats_v1 { + struct be_rxf_stats_v1 rxf; + u32 rsvd0[BE_TXP_SW_SZ]; + struct be_erx_stats_v1 erx; + struct be_pmem_stats pmem; + u32 rsvd1[18]; +}; + +struct be_cmd_req_get_stats_v1 { + struct be_cmd_req_hdr hdr; + u8 rsvd[sizeof(struct be_hw_stats_v1)]; +}; + +struct be_cmd_resp_get_stats_v1 { + struct be_cmd_resp_hdr hdr; + struct be_hw_stats_v1 hw_stats; +}; + +struct be_erx_stats_v2 { + u32 rx_drops_no_fragments[136]; /* dwordS 0 to 135*/ + u32 rsvd[3]; +}; + +struct be_hw_stats_v2 { + struct be_rxf_stats_v2 rxf; + u32 rsvd0[BE_TXP_SW_SZ]; + struct be_erx_stats_v2 erx; + struct be_pmem_stats pmem; + u32 rsvd1[18]; +}; + +struct be_cmd_req_get_stats_v2 { + struct be_cmd_req_hdr hdr; + u8 rsvd[sizeof(struct be_hw_stats_v2)]; +}; + +struct be_cmd_resp_get_stats_v2 { + struct be_cmd_resp_hdr hdr; + struct be_hw_stats_v2 hw_stats; +}; + +/************** get fat capabilites *******************/ +#define MAX_MODULES 27 +#define MAX_MODES 4 +#define MODE_UART 0 +#define FW_LOG_LEVEL_DEFAULT 48 +#define FW_LOG_LEVEL_FATAL 64 + +struct ext_fat_mode { + u8 mode; + u8 rsvd0; + u16 port_mask; + u32 dbg_lvl; + u64 fun_mask; +} __packed; + +struct ext_fat_modules { + u8 modules_str[32]; + u32 modules_id; + u32 num_modes; + struct ext_fat_mode trace_lvl[MAX_MODES]; +} __packed; + +struct be_fat_conf_params { + u32 max_log_entries; + u32 log_entry_size; + u8 log_type; + u8 max_log_funs; + u8 max_log_ports; + u8 rsvd0; + u32 supp_modes; + u32 num_modules; + struct ext_fat_modules module[MAX_MODULES]; +} __packed; + +struct be_cmd_req_get_ext_fat_caps { + struct be_cmd_req_hdr hdr; + u32 parameter_type; +}; + +struct be_cmd_resp_get_ext_fat_caps { + struct be_cmd_resp_hdr hdr; + struct be_fat_conf_params get_params; +}; + +struct be_cmd_req_set_ext_fat_caps { + struct be_cmd_req_hdr hdr; + struct be_fat_conf_params set_params; +}; + +#define RESOURCE_DESC_SIZE_V0 72 +#define RESOURCE_DESC_SIZE_V1 88 +#define PCIE_RESOURCE_DESC_TYPE_V0 0x40 +#define NIC_RESOURCE_DESC_TYPE_V0 0x41 +#define PCIE_RESOURCE_DESC_TYPE_V1 0x50 +#define NIC_RESOURCE_DESC_TYPE_V1 0x51 +#define PORT_RESOURCE_DESC_TYPE_V1 0x55 +#define MAX_RESOURCE_DESC 264 + +#define IF_CAPS_FLAGS_VALID_SHIFT 0 /* IF caps valid */ +#define VFT_SHIFT 3 /* VF template */ +#define IMM_SHIFT 6 /* Immediate */ +#define NOSV_SHIFT 7 /* No save */ + +struct be_res_desc_hdr { + u8 desc_type; + u8 desc_len; +} __packed; + +struct be_port_res_desc { + struct be_res_desc_hdr hdr; + u8 rsvd0; + u8 flags; + u8 link_num; + u8 mc_type; + u16 rsvd1; + +#define NV_TYPE_MASK 0x3 /* bits 0-1 */ +#define NV_TYPE_DISABLED 1 +#define NV_TYPE_VXLAN 3 +#define SOCVID_SHIFT 2 /* Strip outer vlan */ +#define RCVID_SHIFT 4 /* Report vlan */ + u8 nv_flags; + u8 rsvd2; + __le16 nv_port; /* vxlan/gre port */ + u32 rsvd3[19]; +} __packed; + +struct be_pcie_res_desc { + struct be_res_desc_hdr hdr; + u8 rsvd0; + u8 flags; + u16 rsvd1; + u8 pf_num; + u8 rsvd2; + u32 rsvd3; + u8 sriov_state; + u8 pf_state; + u8 pf_type; + u8 rsvd4; + u16 num_vfs; + u16 rsvd5; + u32 rsvd6[17]; +} __packed; + +struct be_nic_res_desc { + struct be_res_desc_hdr hdr; + u8 rsvd1; + +#define QUN_SHIFT 4 /* QoS is in absolute units */ + u8 flags; + u8 vf_num; + u8 rsvd2; + u8 pf_num; + u8 rsvd3; + u16 unicast_mac_count; + u8 rsvd4[6]; + u16 mcc_count; + u16 vlan_count; + u16 mcast_mac_count; + u16 txq_count; + u16 rq_count; + u16 rssq_count; + u16 lro_count; + u16 cq_count; + u16 toe_conn_count; + u16 eq_count; + u16 vlan_id; + u16 iface_count; + u32 cap_flags; + u8 link_param; + u8 rsvd6; + u16 channel_id_param; + u32 bw_min; + u32 bw_max; + u8 acpi_params; + u8 wol_param; + u16 rsvd7; + u16 tunnel_iface_count; + u16 direct_tenant_iface_count; + u32 rsvd8[6]; +} __packed; + +/************ Multi-Channel type ***********/ +enum mc_type { + MC_NONE = 0x01, + UMC = 0x02, + FLEX10 = 0x03, + vNIC1 = 0x04, + nPAR = 0x05, + UFP = 0x06, + vNIC2 = 0x07 +}; + +/* Is BE in a multi-channel mode */ +static inline bool be_is_mc(struct be_adapter *adapter) +{ + return adapter->mc_type > MC_NONE; +} + +struct be_cmd_req_get_func_config { + struct be_cmd_req_hdr hdr; +}; + +struct be_cmd_resp_get_func_config { + struct be_cmd_resp_hdr hdr; + u32 desc_count; + u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1]; +}; + +enum { + RESOURCE_LIMITS, + RESOURCE_MODIFIABLE +}; + +struct be_cmd_req_get_profile_config { + struct be_cmd_req_hdr hdr; + u8 rsvd; +#define ACTIVE_PROFILE_TYPE 0x2 +#define QUERY_MODIFIABLE_FIELDS_TYPE BIT(3) + u8 type; + u16 rsvd1; +}; + +struct be_cmd_resp_get_profile_config { + struct be_cmd_resp_hdr hdr; + __le16 desc_count; + u16 rsvd; + u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1]; +}; + +#define FIELD_MODIFIABLE 0xFFFF +struct be_cmd_req_set_profile_config { + struct be_cmd_req_hdr hdr; + u32 rsvd; + u32 desc_count; + u8 desc[2 * RESOURCE_DESC_SIZE_V1]; +} __packed; + +struct be_cmd_req_get_active_profile { + struct be_cmd_req_hdr hdr; + u32 rsvd; +} __packed; + +struct be_cmd_resp_get_active_profile { + struct be_cmd_resp_hdr hdr; + u16 active_profile_id; + u16 next_profile_id; +} __packed; + +struct be_cmd_enable_disable_vf { + struct be_cmd_req_hdr hdr; + u8 enable; + u8 rsvd[3]; +}; + +struct be_cmd_req_intr_set { + struct be_cmd_req_hdr hdr; + u8 intr_enabled; + u8 rsvd[3]; +}; + +static inline bool check_privilege(struct be_adapter *adapter, u32 flags) +{ + return flags & adapter->cmd_privileges ? true : false; +} + +/************** Get IFACE LIST *******************/ +struct be_if_desc { + u32 if_id; + u32 cap_flags; + u32 en_flags; +}; + +struct be_cmd_req_get_iface_list { + struct be_cmd_req_hdr hdr; +}; + +struct be_cmd_resp_get_iface_list { + struct be_cmd_req_hdr hdr; + u32 if_cnt; + struct be_if_desc if_desc; +}; + +/*************** Set logical link ********************/ +#define PLINK_TRACK_SHIFT 8 +struct be_cmd_req_set_ll_link { + struct be_cmd_req_hdr hdr; + u32 link_config; /* Bit 0: UP_DOWN, Bit 9: PLINK */ +}; + +/************** Manage IFACE Filters *******************/ +#define OP_CONVERT_NORMAL_TO_TUNNEL 0 +#define OP_CONVERT_TUNNEL_TO_NORMAL 1 + +struct be_cmd_req_manage_iface_filters { + struct be_cmd_req_hdr hdr; + u8 op; + u8 rsvd0; + u8 flags; + u8 rsvd1; + u32 tunnel_iface_id; + u32 target_iface_id; + u8 mac[6]; + u16 vlan_tag; + u32 tenant_id; + u32 filter_id; + u32 cap_flags; + u32 cap_control_flags; +} __packed; + +int be_pci_fnum_get(struct be_adapter *adapter); +int be_fw_wait_ready(struct be_adapter *adapter); +int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, + bool permanent, u32 if_handle, u32 pmac_id); +int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id, + u32 *pmac_id, u32 domain); +int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, + u32 domain); +int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, + u32 *if_handle, u32 domain); +int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, u32 domain); +int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo); +int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, + struct be_queue_info *eq, bool no_delay, + int num_cqe_dma_coalesce); +int be_cmd_mccq_create(struct be_adapter *adapter, struct be_queue_info *mccq, + struct be_queue_info *cq); +int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo); +int be_cmd_rxq_create(struct be_adapter *adapter, struct be_queue_info *rxq, + u16 cq_id, u16 frag_size, u32 if_id, u32 rss, u8 *rss_id); +int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, + int type); +int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q); +int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, + u8 *link_status, u32 dom); +int be_cmd_reset(struct be_adapter *adapter); +int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd); +int lancer_cmd_get_pport_stats(struct be_adapter *adapter, + struct be_dma_mem *nonemb_cmd); +int be_cmd_get_fw_ver(struct be_adapter *adapter); +int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); +int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, + u32 num, u32 domain); +int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); +int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); +int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); +int be_cmd_query_fw_cfg(struct be_adapter *adapter); +int be_cmd_reset_function(struct be_adapter *adapter); +int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, + u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey); +int be_process_mcc(struct be_adapter *adapter); +int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon, + u8 status, u8 state); +int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, + u32 *state); +int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, + u8 page_num, u8 *data); +int be_cmd_query_cable_type(struct be_adapter *adapter); +int be_cmd_query_sfp_info(struct be_adapter *adapter); +int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, + u32 flash_oper, u32 flash_opcode, u32 img_offset, + u32 buf_size); +int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, + u32 data_size, u32 data_offset, + const char *obj_name, u32 *data_written, + u8 *change_status, u8 *addn_status); +int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, + u32 data_size, u32 data_offset, const char *obj_name, + u32 *data_read, u32 *eof, u8 *addn_status); +int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name); +int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, + u16 img_optype, u32 img_offset, u32 crc_offset); +int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, + struct be_dma_mem *nonemb_cmd); +int be_cmd_fw_init(struct be_adapter *adapter); +int be_cmd_fw_clean(struct be_adapter *adapter); +void be_async_mcc_enable(struct be_adapter *adapter); +void be_async_mcc_disable(struct be_adapter *adapter); +int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, + u32 loopback_type, u32 pkt_size, u32 num_pkts, + u64 pattern); +int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, u32 byte_cnt, + struct be_dma_mem *cmd); +int be_cmd_get_seeprom_data(struct be_adapter *adapter, + struct be_dma_mem *nonemb_cmd); +int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, + u8 loopback_type, u8 enable); +int be_cmd_get_phy_info(struct be_adapter *adapter); +int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, + u16 link_speed, u8 domain); +void be_detect_error(struct be_adapter *adapter); +int be_cmd_get_die_temperature(struct be_adapter *adapter); +int be_cmd_get_cntl_attributes(struct be_adapter *adapter); +int be_cmd_req_native_mode(struct be_adapter *adapter); +int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); +int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); +int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, + u32 domain); +int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, + u32 vf_num); +int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, + bool *pmac_id_active, u32 *pmac_id, + u32 if_handle, u8 domain); +int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac, + u32 if_handle, bool active, u32 domain); +int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac); +int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count, + u32 domain); +int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom); +int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain, + u16 intf_id, u16 hsw_mode); +int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain, + u16 intf_id, u8 *mode); +int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter); +int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level); +int be_cmd_get_fw_log_level(struct be_adapter *adapter); +int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, + struct be_dma_mem *cmd); +int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, + struct be_dma_mem *cmd, + struct be_fat_conf_params *cfgs); +int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask); +int lancer_initiate_dump(struct be_adapter *adapter); +int lancer_delete_dump(struct be_adapter *adapter); +bool dump_present(struct be_adapter *adapter); +int lancer_test_and_set_rdy_state(struct be_adapter *adapter); +int be_cmd_query_port_name(struct be_adapter *adapter); +int be_cmd_get_func_config(struct be_adapter *adapter, + struct be_resources *res); +int be_cmd_get_profile_config(struct be_adapter *adapter, + struct be_resources *res, u8 query, u8 domain); +int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile); +int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, + int vf_num); +int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain); +int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable); +int be_cmd_set_logical_link_config(struct be_adapter *adapter, + int link_state, u8 domain); +int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port); +int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op); +int be_cmd_set_sriov_config(struct be_adapter *adapter, + struct be_resources res, u16 num_vfs, + u16 num_vf_qs); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c new file mode 100644 index 000000000..2835dee5d --- /dev/null +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -0,0 +1,1324 @@ +/* + * Copyright (C) 2005 - 2014 Emulex + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. The full GNU General + * Public License is included in this distribution in the file called COPYING. + * + * Contact Information: + * linux-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + */ + +#include "be.h" +#include "be_cmds.h" +#include + +struct be_ethtool_stat { + char desc[ETH_GSTRING_LEN]; + int type; + int size; + int offset; +}; + +enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT}; +#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \ + offsetof(_struct, field) +#define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\ + FIELDINFO(struct be_tx_stats, field) +#define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\ + FIELDINFO(struct be_rx_stats, field) +#define DRVSTAT_INFO(field) #field, DRVSTAT,\ + FIELDINFO(struct be_drv_stats, field) + +static const struct be_ethtool_stat et_stats[] = { + {DRVSTAT_INFO(rx_crc_errors)}, + {DRVSTAT_INFO(rx_alignment_symbol_errors)}, + {DRVSTAT_INFO(rx_pause_frames)}, + {DRVSTAT_INFO(rx_control_frames)}, + /* Received packets dropped when the Ethernet length field + * is not equal to the actual Ethernet data length. + */ + {DRVSTAT_INFO(rx_in_range_errors)}, + /* Received packets dropped when their length field is >= 1501 bytes + * and <= 1535 bytes. + */ + {DRVSTAT_INFO(rx_out_range_errors)}, + /* Received packets dropped when they are longer than 9216 bytes */ + {DRVSTAT_INFO(rx_frame_too_long)}, + /* Received packets dropped when they don't pass the unicast or + * multicast address filtering. + */ + {DRVSTAT_INFO(rx_address_filtered)}, + /* Received packets dropped when IP packet length field is less than + * the IP header length field. + */ + {DRVSTAT_INFO(rx_dropped_too_small)}, + /* Received packets dropped when IP length field is greater than + * the actual packet length. + */ + {DRVSTAT_INFO(rx_dropped_too_short)}, + /* Received packets dropped when the IP header length field is less + * than 5. + */ + {DRVSTAT_INFO(rx_dropped_header_too_small)}, + /* Received packets dropped when the TCP header length field is less + * than 5 or the TCP header length + IP header length is more + * than IP packet length. + */ + {DRVSTAT_INFO(rx_dropped_tcp_length)}, + {DRVSTAT_INFO(rx_dropped_runt)}, + /* Number of received packets dropped when a fifo for descriptors going + * into the packet demux block overflows. In normal operation, this + * fifo must never overflow. + */ + {DRVSTAT_INFO(rxpp_fifo_overflow_drop)}, + /* Received packets dropped when the RX block runs out of space in + * one of its input FIFOs. This could happen due a long burst of + * minimum-sized (64b) frames in the receive path. + * This counter may also be erroneously incremented rarely. + */ + {DRVSTAT_INFO(rx_input_fifo_overflow_drop)}, + {DRVSTAT_INFO(rx_ip_checksum_errs)}, + {DRVSTAT_INFO(rx_tcp_checksum_errs)}, + {DRVSTAT_INFO(rx_udp_checksum_errs)}, + {DRVSTAT_INFO(tx_pauseframes)}, + {DRVSTAT_INFO(tx_controlframes)}, + {DRVSTAT_INFO(rx_priority_pause_frames)}, + {DRVSTAT_INFO(tx_priority_pauseframes)}, + /* Received packets dropped when an internal fifo going into + * main packet buffer tank (PMEM) overflows. + */ + {DRVSTAT_INFO(pmem_fifo_overflow_drop)}, + {DRVSTAT_INFO(jabber_events)}, + /* Received packets dropped due to lack of available HW packet buffers + * used to temporarily hold the received packets. + */ + {DRVSTAT_INFO(rx_drops_no_pbuf)}, + /* Received packets dropped due to input receive buffer + * descriptor fifo overflowing. + */ + {DRVSTAT_INFO(rx_drops_no_erx_descr)}, + /* Packets dropped because the internal FIFO to the offloaded TCP + * receive processing block is full. This could happen only for + * offloaded iSCSI or FCoE trarffic. + */ + {DRVSTAT_INFO(rx_drops_no_tpre_descr)}, + /* Received packets dropped when they need more than 8 + * receive buffers. This cannot happen as the driver configures + * 2048 byte receive buffers. + */ + {DRVSTAT_INFO(rx_drops_too_many_frags)}, + {DRVSTAT_INFO(forwarded_packets)}, + /* Received packets dropped when the frame length + * is more than 9018 bytes + */ + {DRVSTAT_INFO(rx_drops_mtu)}, + /* Number of dma mapping errors */ + {DRVSTAT_INFO(dma_map_errors)}, + /* Number of packets dropped due to random early drop function */ + {DRVSTAT_INFO(eth_red_drops)}, + {DRVSTAT_INFO(be_on_die_temperature)}, + {DRVSTAT_INFO(rx_roce_bytes_lsd)}, + {DRVSTAT_INFO(rx_roce_bytes_msd)}, + {DRVSTAT_INFO(rx_roce_frames)}, + {DRVSTAT_INFO(roce_drops_payload_len)}, + {DRVSTAT_INFO(roce_drops_crc)} +}; + +#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) + +/* Stats related to multi RX queues: get_stats routine assumes bytes, pkts + * are first and second members respectively. + */ +static const struct be_ethtool_stat et_rx_stats[] = { + {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */ + {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */ + {DRVSTAT_RX_INFO(rx_compl)}, + {DRVSTAT_RX_INFO(rx_compl_err)}, + {DRVSTAT_RX_INFO(rx_mcast_pkts)}, + /* Number of page allocation failures while posting receive buffers + * to HW. + */ + {DRVSTAT_RX_INFO(rx_post_fail)}, + /* Recevied packets dropped due to skb allocation failure */ + {DRVSTAT_RX_INFO(rx_drops_no_skbs)}, + /* Received packets dropped due to lack of available fetched buffers + * posted by the driver. + */ + {DRVSTAT_RX_INFO(rx_drops_no_frags)} +}; + +#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats)) + +/* Stats related to multi TX queues: get_stats routine assumes compl is the + * first member + */ +static const struct be_ethtool_stat et_tx_stats[] = { + {DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */ + /* This counter is incremented when the HW encounters an error while + * parsing the packet header of an outgoing TX request. This counter is + * applicable only for BE2, BE3 and Skyhawk based adapters. + */ + {DRVSTAT_TX_INFO(tx_hdr_parse_err)}, + /* This counter is incremented when an error occurs in the DMA + * operation associated with the TX request from the host to the device. + */ + {DRVSTAT_TX_INFO(tx_dma_err)}, + /* This counter is incremented when MAC or VLAN spoof checking is + * enabled on the interface and the TX request fails the spoof check + * in HW. + */ + {DRVSTAT_TX_INFO(tx_spoof_check_err)}, + /* This counter is incremented when the HW encounters an error while + * performing TSO offload. This counter is applicable only for Lancer + * adapters. + */ + {DRVSTAT_TX_INFO(tx_tso_err)}, + /* This counter is incremented when the HW detects Q-in-Q style VLAN + * tagging in a packet and such tagging is not expected on the outgoing + * interface. This counter is applicable only for Lancer adapters. + */ + {DRVSTAT_TX_INFO(tx_qinq_err)}, + /* This counter is incremented when the HW detects parity errors in the + * packet data. This counter is applicable only for Lancer adapters. + */ + {DRVSTAT_TX_INFO(tx_internal_parity_err)}, + {DRVSTAT_TX_INFO(tx_bytes)}, + {DRVSTAT_TX_INFO(tx_pkts)}, + /* Number of skbs queued for trasmission by the driver */ + {DRVSTAT_TX_INFO(tx_reqs)}, + /* Number of times the TX queue was stopped due to lack + * of spaces in the TXQ. + */ + {DRVSTAT_TX_INFO(tx_stops)}, + /* Pkts dropped in the driver's transmit path */ + {DRVSTAT_TX_INFO(tx_drv_drops)} +}; + +#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats)) + +static const char et_self_tests[][ETH_GSTRING_LEN] = { + "MAC Loopback test", + "PHY Loopback test", + "External Loopback test", + "DDR DMA test", + "Link test" +}; + +#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests) +#define BE_MAC_LOOPBACK 0x0 +#define BE_PHY_LOOPBACK 0x1 +#define BE_ONE_PORT_EXT_LOOPBACK 0x2 +#define BE_NO_LOOPBACK 0xff + +static void be_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version)); + if (!memcmp(adapter->fw_ver, adapter->fw_on_flash, FW_VER_LEN)) + strlcpy(drvinfo->fw_version, adapter->fw_ver, + sizeof(drvinfo->fw_version)); + else + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%s [%s]", adapter->fw_ver, adapter->fw_on_flash); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = 0; + drvinfo->eedump_len = 0; +} + +static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name) +{ + u32 data_read = 0, eof; + u8 addn_status; + struct be_dma_mem data_len_cmd; + int status; + + memset(&data_len_cmd, 0, sizeof(data_len_cmd)); + /* data_offset and data_size should be 0 to get reg len */ + status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0, + file_name, &data_read, &eof, + &addn_status); + + return data_read; +} + +static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, + u32 buf_len, void *buf) +{ + struct be_dma_mem read_cmd; + u32 read_len = 0, total_read_len = 0, chunk_size; + u32 eof = 0; + u8 addn_status; + int status = 0; + + read_cmd.size = LANCER_READ_FILE_CHUNK; + read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size, + &read_cmd.dma, GFP_ATOMIC); + + if (!read_cmd.va) { + dev_err(&adapter->pdev->dev, + "Memory allocation failure while reading dump\n"); + return -ENOMEM; + } + + while ((total_read_len < buf_len) && !eof) { + chunk_size = min_t(u32, (buf_len - total_read_len), + LANCER_READ_FILE_CHUNK); + chunk_size = ALIGN(chunk_size, 4); + status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size, + total_read_len, file_name, + &read_len, &eof, &addn_status); + if (!status) { + memcpy(buf + total_read_len, read_cmd.va, read_len); + total_read_len += read_len; + eof &= LANCER_READ_FILE_EOF_MASK; + } else { + status = -EIO; + break; + } + } + dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va, + read_cmd.dma); + + return status; +} + +static int be_get_reg_len(struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + u32 log_size = 0; + + if (!check_privilege(adapter, MAX_PRIVILEGES)) + return 0; + + if (be_physfn(adapter)) { + if (lancer_chip(adapter)) + log_size = lancer_cmd_get_file_len(adapter, + LANCER_FW_DUMP_FILE); + else + be_cmd_get_reg_len(adapter, &log_size); + } + return log_size; +} + +static void +be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + if (be_physfn(adapter)) { + memset(buf, 0, regs->len); + if (lancer_chip(adapter)) + lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE, + regs->len, buf); + else + be_cmd_get_regs(adapter, regs->len, buf); + } +} + +static int be_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *et) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_aic_obj *aic = &adapter->aic_obj[0]; + + et->rx_coalesce_usecs = aic->prev_eqd; + et->rx_coalesce_usecs_high = aic->max_eqd; + et->rx_coalesce_usecs_low = aic->min_eqd; + + et->tx_coalesce_usecs = aic->prev_eqd; + et->tx_coalesce_usecs_high = aic->max_eqd; + et->tx_coalesce_usecs_low = aic->min_eqd; + + et->use_adaptive_rx_coalesce = aic->enable; + et->use_adaptive_tx_coalesce = aic->enable; + + return 0; +} + +/* TX attributes are ignored. Only RX attributes are considered + * eqd cmd is issued in the worker thread. + */ +static int be_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *et) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_aic_obj *aic = &adapter->aic_obj[0]; + struct be_eq_obj *eqo; + int i; + + for_all_evt_queues(adapter, eqo, i) { + aic->enable = et->use_adaptive_rx_coalesce; + aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD); + aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd); + aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd); + aic->et_eqd = max(aic->et_eqd, aic->min_eqd); + aic++; + } + + return 0; +} + +static void be_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, uint64_t *data) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_rx_obj *rxo; + struct be_tx_obj *txo; + void *p; + unsigned int i, j, base = 0, start; + + for (i = 0; i < ETHTOOL_STATS_NUM; i++) { + p = (u8 *)&adapter->drv_stats + et_stats[i].offset; + data[i] = *(u32 *)p; + } + base += ETHTOOL_STATS_NUM; + + for_all_rx_queues(adapter, rxo, j) { + struct be_rx_stats *stats = rx_stats(rxo); + + do { + start = u64_stats_fetch_begin_irq(&stats->sync); + data[base] = stats->rx_bytes; + data[base + 1] = stats->rx_pkts; + } while (u64_stats_fetch_retry_irq(&stats->sync, start)); + + for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) { + p = (u8 *)stats + et_rx_stats[i].offset; + data[base + i] = *(u32 *)p; + } + base += ETHTOOL_RXSTATS_NUM; + } + + for_all_tx_queues(adapter, txo, j) { + struct be_tx_stats *stats = tx_stats(txo); + + do { + start = u64_stats_fetch_begin_irq(&stats->sync_compl); + data[base] = stats->tx_compl; + } while (u64_stats_fetch_retry_irq(&stats->sync_compl, start)); + + do { + start = u64_stats_fetch_begin_irq(&stats->sync); + for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) { + p = (u8 *)stats + et_tx_stats[i].offset; + data[base + i] = + (et_tx_stats[i].size == sizeof(u64)) ? + *(u64 *)p : *(u32 *)p; + } + } while (u64_stats_fetch_retry_irq(&stats->sync, start)); + base += ETHTOOL_TXSTATS_NUM; + } +} + +static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset, + uint8_t *data) +{ + struct be_adapter *adapter = netdev_priv(netdev); + int i, j; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ETHTOOL_STATS_NUM; i++) { + memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_rx_qs; i++) { + for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) { + sprintf(data, "rxq%d: %s", i, + et_rx_stats[j].desc); + data += ETH_GSTRING_LEN; + } + } + for (i = 0; i < adapter->num_tx_qs; i++) { + for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) { + sprintf(data, "txq%d: %s", i, + et_tx_stats[j].desc); + data += ETH_GSTRING_LEN; + } + } + break; + case ETH_SS_TEST: + for (i = 0; i < ETHTOOL_TESTS_NUM; i++) { + memcpy(data, et_self_tests[i], ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + } +} + +static int be_get_sset_count(struct net_device *netdev, int stringset) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + switch (stringset) { + case ETH_SS_TEST: + return ETHTOOL_TESTS_NUM; + case ETH_SS_STATS: + return ETHTOOL_STATS_NUM + + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM + + adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM; + default: + return -EINVAL; + } +} + +static u32 be_get_port_type(struct be_adapter *adapter) +{ + u32 port; + + switch (adapter->phy.interface_type) { + case PHY_TYPE_BASET_1GB: + case PHY_TYPE_BASEX_1GB: + case PHY_TYPE_SGMII: + port = PORT_TP; + break; + case PHY_TYPE_SFP_PLUS_10GB: + if (adapter->phy.cable_type & SFP_PLUS_COPPER_CABLE) + port = PORT_DA; + else + port = PORT_FIBRE; + break; + case PHY_TYPE_QSFP: + if (adapter->phy.cable_type & QSFP_PLUS_CR4_CABLE) + port = PORT_DA; + else + port = PORT_FIBRE; + break; + case PHY_TYPE_XFP_10GB: + case PHY_TYPE_SFP_1GB: + port = PORT_FIBRE; + break; + case PHY_TYPE_BASET_10GB: + port = PORT_TP; + break; + default: + port = PORT_OTHER; + } + + return port; +} + +static u32 convert_to_et_setting(struct be_adapter *adapter, u32 if_speeds) +{ + u32 val = 0; + + switch (adapter->phy.interface_type) { + case PHY_TYPE_BASET_1GB: + case PHY_TYPE_BASEX_1GB: + case PHY_TYPE_SGMII: + val |= SUPPORTED_TP; + if (if_speeds & BE_SUPPORTED_SPEED_1GBPS) + val |= SUPPORTED_1000baseT_Full; + if (if_speeds & BE_SUPPORTED_SPEED_100MBPS) + val |= SUPPORTED_100baseT_Full; + if (if_speeds & BE_SUPPORTED_SPEED_10MBPS) + val |= SUPPORTED_10baseT_Full; + break; + case PHY_TYPE_KX4_10GB: + val |= SUPPORTED_Backplane; + if (if_speeds & BE_SUPPORTED_SPEED_1GBPS) + val |= SUPPORTED_1000baseKX_Full; + if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) + val |= SUPPORTED_10000baseKX4_Full; + break; + case PHY_TYPE_KR2_20GB: + val |= SUPPORTED_Backplane; + if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) + val |= SUPPORTED_10000baseKR_Full; + if (if_speeds & BE_SUPPORTED_SPEED_20GBPS) + val |= SUPPORTED_20000baseKR2_Full; + break; + case PHY_TYPE_KR_10GB: + val |= SUPPORTED_Backplane | + SUPPORTED_10000baseKR_Full; + break; + case PHY_TYPE_KR4_40GB: + val |= SUPPORTED_Backplane; + if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) + val |= SUPPORTED_10000baseKR_Full; + if (if_speeds & BE_SUPPORTED_SPEED_40GBPS) + val |= SUPPORTED_40000baseKR4_Full; + break; + case PHY_TYPE_QSFP: + if (if_speeds & BE_SUPPORTED_SPEED_40GBPS) { + switch (adapter->phy.cable_type) { + case QSFP_PLUS_CR4_CABLE: + val |= SUPPORTED_40000baseCR4_Full; + break; + case QSFP_PLUS_LR4_CABLE: + val |= SUPPORTED_40000baseLR4_Full; + break; + default: + val |= SUPPORTED_40000baseSR4_Full; + break; + } + } + case PHY_TYPE_SFP_PLUS_10GB: + case PHY_TYPE_XFP_10GB: + case PHY_TYPE_SFP_1GB: + val |= SUPPORTED_FIBRE; + if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) + val |= SUPPORTED_10000baseT_Full; + if (if_speeds & BE_SUPPORTED_SPEED_1GBPS) + val |= SUPPORTED_1000baseT_Full; + break; + case PHY_TYPE_BASET_10GB: + val |= SUPPORTED_TP; + if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) + val |= SUPPORTED_10000baseT_Full; + if (if_speeds & BE_SUPPORTED_SPEED_1GBPS) + val |= SUPPORTED_1000baseT_Full; + if (if_speeds & BE_SUPPORTED_SPEED_100MBPS) + val |= SUPPORTED_100baseT_Full; + break; + default: + val |= SUPPORTED_TP; + } + + return val; +} + +bool be_pause_supported(struct be_adapter *adapter) +{ + return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB || + adapter->phy.interface_type == PHY_TYPE_XFP_10GB) ? + false : true; +} + +static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct be_adapter *adapter = netdev_priv(netdev); + u8 link_status; + u16 link_speed = 0; + int status; + u32 auto_speeds; + u32 fixed_speeds; + + if (adapter->phy.link_speed < 0) { + status = be_cmd_link_status_query(adapter, &link_speed, + &link_status, 0); + if (!status) + be_link_status_update(adapter, link_status); + ethtool_cmd_speed_set(ecmd, link_speed); + + status = be_cmd_get_phy_info(adapter); + if (!status) { + auto_speeds = adapter->phy.auto_speeds_supported; + fixed_speeds = adapter->phy.fixed_speeds_supported; + + be_cmd_query_cable_type(adapter); + + ecmd->supported = + convert_to_et_setting(adapter, + auto_speeds | + fixed_speeds); + ecmd->advertising = + convert_to_et_setting(adapter, auto_speeds); + + ecmd->port = be_get_port_type(adapter); + + if (adapter->phy.auto_speeds_supported) { + ecmd->supported |= SUPPORTED_Autoneg; + ecmd->autoneg = AUTONEG_ENABLE; + ecmd->advertising |= ADVERTISED_Autoneg; + } + + ecmd->supported |= SUPPORTED_Pause; + if (be_pause_supported(adapter)) + ecmd->advertising |= ADVERTISED_Pause; + + switch (adapter->phy.interface_type) { + case PHY_TYPE_KR_10GB: + case PHY_TYPE_KX4_10GB: + ecmd->transceiver = XCVR_INTERNAL; + break; + default: + ecmd->transceiver = XCVR_EXTERNAL; + break; + } + } else { + ecmd->port = PORT_OTHER; + ecmd->autoneg = AUTONEG_DISABLE; + ecmd->transceiver = XCVR_DUMMY1; + } + + /* Save for future use */ + adapter->phy.link_speed = ethtool_cmd_speed(ecmd); + adapter->phy.port_type = ecmd->port; + adapter->phy.transceiver = ecmd->transceiver; + adapter->phy.autoneg = ecmd->autoneg; + adapter->phy.advertising = ecmd->advertising; + adapter->phy.supported = ecmd->supported; + } else { + ethtool_cmd_speed_set(ecmd, adapter->phy.link_speed); + ecmd->port = adapter->phy.port_type; + ecmd->transceiver = adapter->phy.transceiver; + ecmd->autoneg = adapter->phy.autoneg; + ecmd->advertising = adapter->phy.advertising; + ecmd->supported = adapter->phy.supported; + } + + ecmd->duplex = netif_carrier_ok(netdev) ? DUPLEX_FULL : DUPLEX_UNKNOWN; + ecmd->phy_address = adapter->port_num; + + return 0; +} + +static void be_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = adapter->rx_obj[0].q.len; + ring->rx_pending = adapter->rx_obj[0].q.len; + ring->tx_max_pending = adapter->tx_obj[0].q.len; + ring->tx_pending = adapter->tx_obj[0].q.len; +} + +static void +be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause); + ecmd->autoneg = adapter->phy.fc_autoneg; +} + +static int +be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) +{ + struct be_adapter *adapter = netdev_priv(netdev); + int status; + + if (ecmd->autoneg != adapter->phy.fc_autoneg) + return -EINVAL; + + status = be_cmd_set_flow_control(adapter, ecmd->tx_pause, + ecmd->rx_pause); + if (status) { + dev_warn(&adapter->pdev->dev, "Pause param set failed\n"); + return be_cmd_status(status); + } + + adapter->tx_fc = ecmd->tx_pause; + adapter->rx_fc = ecmd->rx_pause; + return 0; +} + +static int be_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + be_cmd_get_beacon_state(adapter, adapter->hba_port_num, + &adapter->beacon_state); + return 1; /* cycle on/off once per second */ + + case ETHTOOL_ID_ON: + be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, + BEACON_STATE_ENABLED); + break; + + case ETHTOOL_ID_OFF: + be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, + BEACON_STATE_DISABLED); + break; + + case ETHTOOL_ID_INACTIVE: + be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, + adapter->beacon_state); + } + + return 0; +} + +static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->pdev->dev; + int status; + + if (!lancer_chip(adapter) || + !check_privilege(adapter, MAX_PRIVILEGES)) + return -EOPNOTSUPP; + + switch (dump->flag) { + case LANCER_INITIATE_FW_DUMP: + status = lancer_initiate_dump(adapter); + if (!status) + dev_info(dev, "FW dump initiated successfully\n"); + break; + case LANCER_DELETE_FW_DUMP: + status = lancer_delete_dump(adapter); + if (!status) + dev_info(dev, "FW dump deleted successfully\n"); + break; + default: + dev_err(dev, "Invalid dump level: 0x%x\n", dump->flag); + return -EINVAL; + } + return status; +} + +static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + if (adapter->wol_cap & BE_WOL_CAP) { + wol->supported |= WAKE_MAGIC; + if (adapter->wol_en) + wol->wolopts |= WAKE_MAGIC; + } else { + wol->wolopts = 0; + } + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & ~WAKE_MAGIC) + return -EOPNOTSUPP; + + if (!(adapter->wol_cap & BE_WOL_CAP)) { + dev_warn(&adapter->pdev->dev, "WOL not supported\n"); + return -EOPNOTSUPP; + } + + if (wol->wolopts & WAKE_MAGIC) + adapter->wol_en = true; + else + adapter->wol_en = false; + + return 0; +} + +static int be_test_ddr_dma(struct be_adapter *adapter) +{ + int ret, i; + struct be_dma_mem ddrdma_cmd; + static const u64 pattern[2] = { + 0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL + }; + + ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); + ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + ddrdma_cmd.size, &ddrdma_cmd.dma, + GFP_KERNEL); + if (!ddrdma_cmd.va) + return -ENOMEM; + + for (i = 0; i < 2; i++) { + ret = be_cmd_ddr_dma_test(adapter, pattern[i], + 4096, &ddrdma_cmd); + if (ret != 0) + goto err; + } + +err: + dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va, + ddrdma_cmd.dma); + return be_cmd_status(ret); +} + +static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, + u64 *status) +{ + be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1); + *status = be_cmd_loopback_test(adapter, adapter->hba_port_num, + loopback_type, 1500, 2, 0xabc); + be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1); + return *status; +} + +static void be_self_test(struct net_device *netdev, struct ethtool_test *test, + u64 *data) +{ + struct be_adapter *adapter = netdev_priv(netdev); + int status; + u8 link_status = 0; + + if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) { + dev_err(&adapter->pdev->dev, "Self test not supported\n"); + test->flags |= ETH_TEST_FL_FAILED; + return; + } + + memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); + + if (test->flags & ETH_TEST_FL_OFFLINE) { + if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0) + test->flags |= ETH_TEST_FL_FAILED; + + if (be_loopback_test(adapter, BE_PHY_LOOPBACK, &data[1]) != 0) + test->flags |= ETH_TEST_FL_FAILED; + + if (test->flags & ETH_TEST_FL_EXTERNAL_LB) { + if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK, + &data[2]) != 0) + test->flags |= ETH_TEST_FL_FAILED; + test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; + } + } + + if (!lancer_chip(adapter) && be_test_ddr_dma(adapter) != 0) { + data[3] = 1; + test->flags |= ETH_TEST_FL_FAILED; + } + + status = be_cmd_link_status_query(adapter, NULL, &link_status, 0); + if (status) { + test->flags |= ETH_TEST_FL_FAILED; + data[4] = -1; + } else if (!link_status) { + test->flags |= ETH_TEST_FL_FAILED; + data[4] = 1; + } +} + +static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + return be_load_fw(adapter, efl->data); +} + +static int be_get_eeprom_len(struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + if (!check_privilege(adapter, MAX_PRIVILEGES)) + return 0; + + if (lancer_chip(adapter)) { + if (be_physfn(adapter)) + return lancer_cmd_get_file_len(adapter, + LANCER_VPD_PF_FILE); + else + return lancer_cmd_get_file_len(adapter, + LANCER_VPD_VF_FILE); + } else { + return BE_READ_SEEPROM_LEN; + } +} + +static int be_read_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, uint8_t *data) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_dma_mem eeprom_cmd; + struct be_cmd_resp_seeprom_read *resp; + int status; + + if (!eeprom->len) + return -EINVAL; + + if (lancer_chip(adapter)) { + if (be_physfn(adapter)) + return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE, + eeprom->len, data); + else + return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE, + eeprom->len, data); + } + + eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16); + + memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); + eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); + eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + eeprom_cmd.size, &eeprom_cmd.dma, + GFP_KERNEL); + + if (!eeprom_cmd.va) + return -ENOMEM; + + status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd); + + if (!status) { + resp = eeprom_cmd.va; + memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len); + } + dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va, + eeprom_cmd.dma); + + return be_cmd_status(status); +} + +static u32 be_get_msg_level(struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void be_set_msg_level(struct net_device *netdev, u32 level) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + if (adapter->msg_enable == level) + return; + + if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW)) + if (BEx_chip(adapter)) + be_cmd_set_fw_log_level(adapter, level & NETIF_MSG_HW ? + FW_LOG_LEVEL_DEFAULT : + FW_LOG_LEVEL_FATAL); + adapter->msg_enable = level; +} + +static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type) +{ + u64 data = 0; + + switch (flow_type) { + case TCP_V4_FLOW: + if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV4) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V4_FLOW: + if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV4) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case TCP_V6_FLOW: + if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV6) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V6_FLOW: + if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV6) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + } + + return data; +} + +static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + if (!be_multi_rxq(adapter)) { + dev_info(&adapter->pdev->dev, + "ethtool::get_rxnfc: RX flow hashing is disabled\n"); + return -EINVAL; + } + + switch (cmd->cmd) { + case ETHTOOL_GRXFH: + cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type); + break; + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_qs - 1; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int be_set_rss_hash_opts(struct be_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct be_rx_obj *rxo; + int status = 0, i, j; + u8 rsstable[128]; + u32 rss_flags = adapter->rss_info.rss_flags; + + if (cmd->data != L3_RSS_FLAGS && + cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS)) + return -EINVAL; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~RSS_ENABLE_TCP_IPV4; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= RSS_ENABLE_IPV4 | + RSS_ENABLE_TCP_IPV4; + break; + case TCP_V6_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~RSS_ENABLE_TCP_IPV6; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= RSS_ENABLE_IPV6 | + RSS_ENABLE_TCP_IPV6; + break; + case UDP_V4_FLOW: + if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) && + BEx_chip(adapter)) + return -EINVAL; + + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~RSS_ENABLE_UDP_IPV4; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= RSS_ENABLE_IPV4 | + RSS_ENABLE_UDP_IPV4; + break; + case UDP_V6_FLOW: + if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) && + BEx_chip(adapter)) + return -EINVAL; + + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~RSS_ENABLE_UDP_IPV6; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= RSS_ENABLE_IPV6 | + RSS_ENABLE_UDP_IPV6; + break; + default: + return -EINVAL; + } + + if (rss_flags == adapter->rss_info.rss_flags) + return status; + + if (be_multi_rxq(adapter)) { + for (j = 0; j < 128; j += adapter->num_rss_qs) { + for_all_rss_queues(adapter, rxo, i) { + if ((j + i) >= 128) + break; + rsstable[j + i] = rxo->rss_id; + } + } + } + + status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable, + rss_flags, 128, adapter->rss_info.rss_hkey); + if (!status) + adapter->rss_info.rss_flags = rss_flags; + + return be_cmd_status(status); +} + +static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct be_adapter *adapter = netdev_priv(netdev); + int status = 0; + + if (!be_multi_rxq(adapter)) { + dev_err(&adapter->pdev->dev, + "ethtool::set_rxnfc: RX flow hashing is disabled\n"); + return -EINVAL; + } + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + status = be_set_rss_hash_opts(adapter, cmd); + break; + default: + return -EINVAL; + } + + return status; +} + +static void be_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + ch->combined_count = adapter->num_evt_qs; + ch->max_combined = be_max_qs(adapter); +} + +static int be_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct be_adapter *adapter = netdev_priv(netdev); + int status; + + if (ch->rx_count || ch->tx_count || ch->other_count || + !ch->combined_count || ch->combined_count > be_max_qs(adapter)) + return -EINVAL; + + adapter->cfg_num_qs = ch->combined_count; + + status = be_update_queues(adapter); + return be_cmd_status(status); +} + +static u32 be_get_rxfh_indir_size(struct net_device *netdev) +{ + return RSS_INDIR_TABLE_LEN; +} + +static u32 be_get_rxfh_key_size(struct net_device *netdev) +{ + return RSS_HASH_KEY_LEN; +} + +static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey, + u8 *hfunc) +{ + struct be_adapter *adapter = netdev_priv(netdev); + int i; + struct rss_info *rss = &adapter->rss_info; + + if (indir) { + for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) + indir[i] = rss->rss_queue[i]; + } + + if (hkey) + memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + return 0; +} + +static int be_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *hkey, const u8 hfunc) +{ + int rc = 0, i, j; + struct be_adapter *adapter = netdev_priv(netdev); + u8 rsstable[RSS_INDIR_TABLE_LEN]; + + /* We do not allow change in unsupported parameters */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (indir) { + struct be_rx_obj *rxo; + + for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) { + j = indir[i]; + rxo = &adapter->rx_obj[j]; + rsstable[i] = rxo->rss_id; + adapter->rss_info.rss_queue[i] = j; + } + } else { + memcpy(rsstable, adapter->rss_info.rsstable, + RSS_INDIR_TABLE_LEN); + } + + if (!hkey) + hkey = adapter->rss_info.rss_hkey; + + rc = be_cmd_rss_config(adapter, rsstable, + adapter->rss_info.rss_flags, + RSS_INDIR_TABLE_LEN, hkey); + if (rc) { + adapter->rss_info.rss_flags = RSS_ENABLE_NONE; + return -EIO; + } + memcpy(adapter->rss_info.rss_hkey, hkey, RSS_HASH_KEY_LEN); + memcpy(adapter->rss_info.rsstable, rsstable, + RSS_INDIR_TABLE_LEN); + return 0; +} + +static int be_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct be_adapter *adapter = netdev_priv(netdev); + u8 page_data[PAGE_DATA_LEN]; + int status; + + if (!check_privilege(adapter, MAX_PRIVILEGES)) + return -EOPNOTSUPP; + + status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, + page_data); + if (!status) { + if (!page_data[SFP_PLUS_SFF_8472_COMP]) { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = PAGE_DATA_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = 2 * PAGE_DATA_LEN; + } + } + return be_cmd_status(status); +} + +static int be_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct be_adapter *adapter = netdev_priv(netdev); + int status; + + if (!check_privilege(adapter, MAX_PRIVILEGES)) + return -EOPNOTSUPP; + + status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, + data); + if (status) + goto err; + + if (eeprom->offset + eeprom->len > PAGE_DATA_LEN) { + status = be_cmd_read_port_transceiver_data(adapter, + TR_PAGE_A2, + data + + PAGE_DATA_LEN); + if (status) + goto err; + } + if (eeprom->offset) + memcpy(data, data + eeprom->offset, eeprom->len); +err: + return be_cmd_status(status); +} + +const struct ethtool_ops be_ethtool_ops = { + .get_settings = be_get_settings, + .get_drvinfo = be_get_drvinfo, + .get_wol = be_get_wol, + .set_wol = be_set_wol, + .get_link = ethtool_op_get_link, + .get_eeprom_len = be_get_eeprom_len, + .get_eeprom = be_read_eeprom, + .get_coalesce = be_get_coalesce, + .set_coalesce = be_set_coalesce, + .get_ringparam = be_get_ringparam, + .get_pauseparam = be_get_pauseparam, + .set_pauseparam = be_set_pauseparam, + .get_strings = be_get_stat_strings, + .set_phys_id = be_set_phys_id, + .set_dump = be_set_dump, + .get_msglevel = be_get_msg_level, + .set_msglevel = be_set_msg_level, + .get_sset_count = be_get_sset_count, + .get_ethtool_stats = be_get_ethtool_stats, + .get_regs_len = be_get_reg_len, + .get_regs = be_get_regs, + .flash_device = be_do_flash, + .self_test = be_self_test, + .get_rxnfc = be_get_rxnfc, + .set_rxnfc = be_set_rxnfc, + .get_rxfh_indir_size = be_get_rxfh_indir_size, + .get_rxfh_key_size = be_get_rxfh_key_size, + .get_rxfh = be_get_rxfh, + .set_rxfh = be_set_rxfh, + .get_channels = be_get_channels, + .set_channels = be_set_channels, + .get_module_info = be_get_module_info, + .get_module_eeprom = be_get_module_eeprom +}; diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h new file mode 100644 index 000000000..48840889d --- /dev/null +++ b/drivers/net/ethernet/emulex/benet/be_hw.h @@ -0,0 +1,354 @@ +/* + * Copyright (C) 2005 - 2014 Emulex + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. The full GNU General + * Public License is included in this distribution in the file called COPYING. + * + * Contact Information: + * linux-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + */ + +/********* Mailbox door bell *************/ +/* Used for driver communication with the FW. + * The software must write this register twice to post any command. First, + * it writes the register with hi=1 and the upper bits of the physical address + * for the MAILBOX structure. Software must poll the ready bit until this + * is acknowledged. Then, sotware writes the register with hi=0 with the lower + * bits in the address. It must poll the ready bit until the command is + * complete. Upon completion, the MAILBOX will contain a valid completion + * queue entry. + */ +#define MPU_MAILBOX_DB_OFFSET 0x160 +#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */ +#define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */ + +#define MPU_EP_CONTROL 0 + +/********** MPU semphore: used for SH & BE *************/ +#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */ +#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */ +#define POST_STAGE_MASK 0x0000FFFF +#define POST_ERR_MASK 0x1 +#define POST_ERR_SHIFT 31 + +/* MPU semphore POST stage values */ +#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */ +#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */ +#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */ +#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */ + + +/* Lancer SLIPORT registers */ +#define SLIPORT_STATUS_OFFSET 0x404 +#define SLIPORT_CONTROL_OFFSET 0x408 +#define SLIPORT_ERROR1_OFFSET 0x40C +#define SLIPORT_ERROR2_OFFSET 0x410 +#define PHYSDEV_CONTROL_OFFSET 0x414 + +#define SLIPORT_STATUS_ERR_MASK 0x80000000 +#define SLIPORT_STATUS_DIP_MASK 0x02000000 +#define SLIPORT_STATUS_RN_MASK 0x01000000 +#define SLIPORT_STATUS_RDY_MASK 0x00800000 +#define SLI_PORT_CONTROL_IP_MASK 0x08000000 +#define PHYSDEV_CONTROL_FW_RESET_MASK 0x00000002 +#define PHYSDEV_CONTROL_DD_MASK 0x00000004 +#define PHYSDEV_CONTROL_INP_MASK 0x40000000 + +#define SLIPORT_ERROR_NO_RESOURCE1 0x2 +#define SLIPORT_ERROR_NO_RESOURCE2 0x9 + +#define SLIPORT_ERROR_FW_RESET1 0x2 +#define SLIPORT_ERROR_FW_RESET2 0x0 + +/********* Memory BAR register ************/ +#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc +/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt + * Disable" may still globally block interrupts in addition to individual + * interrupt masks; a mechanism for the device driver to block all interrupts + * atomically without having to arbitrate for the PCI Interrupt Disable bit + * with the OS. + */ +#define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK BIT(29) /* bit 29 */ + +/********* PCI Function Capability *********/ +#define BE_FUNCTION_CAPS_RSS 0x2 +#define BE_FUNCTION_CAPS_SUPER_NIC 0x40 + +/********* Power management (WOL) **********/ +#define PCICFG_PM_CONTROL_OFFSET 0x44 +#define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */ + +/********* Online Control Registers *******/ +#define PCICFG_ONLINE0 0xB0 +#define PCICFG_ONLINE1 0xB4 + +/********* UE Status and Mask Registers ***/ +#define PCICFG_UE_STATUS_LOW 0xA0 +#define PCICFG_UE_STATUS_HIGH 0xA4 +#define PCICFG_UE_STATUS_LOW_MASK 0xA8 +#define PCICFG_UE_STATUS_HI_MASK 0xAC + +/******** SLI_INTF ***********************/ +#define SLI_INTF_REG_OFFSET 0x58 +#define SLI_INTF_VALID_MASK 0xE0000000 +#define SLI_INTF_VALID 0xC0000000 +#define SLI_INTF_HINT2_MASK 0x1F000000 +#define SLI_INTF_HINT2_SHIFT 24 +#define SLI_INTF_HINT1_MASK 0x00FF0000 +#define SLI_INTF_HINT1_SHIFT 16 +#define SLI_INTF_FAMILY_MASK 0x00000F00 +#define SLI_INTF_FAMILY_SHIFT 8 +#define SLI_INTF_IF_TYPE_MASK 0x0000F000 +#define SLI_INTF_IF_TYPE_SHIFT 12 +#define SLI_INTF_REV_MASK 0x000000F0 +#define SLI_INTF_REV_SHIFT 4 +#define SLI_INTF_FT_MASK 0x00000001 + +#define SLI_INTF_TYPE_2 2 +#define SLI_INTF_TYPE_3 3 + +/********* ISR0 Register offset **********/ +#define CEV_ISR0_OFFSET 0xC18 +#define CEV_ISR_SIZE 4 + +/********* Event Q door bell *************/ +#define DB_EQ_OFFSET DB_CQ_OFFSET +#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */ +#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */ +#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */ + +/* Clear the interrupt for this eq */ +#define DB_EQ_CLR_SHIFT (9) /* bit 9 */ +/* Must be 1 */ +#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */ +/* Number of event entries processed */ +#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ +/* Rearm bit */ +#define DB_EQ_REARM_SHIFT (29) /* bit 29 */ + +/********* Compl Q door bell *************/ +#define DB_CQ_OFFSET 0x120 +#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ +#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */ +#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14 + placing at 11-15 */ + +/* Number of event entries processed */ +#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ +/* Rearm bit */ +#define DB_CQ_REARM_SHIFT (29) /* bit 29 */ + +/********** TX ULP door bell *************/ +#define DB_TXULP1_OFFSET 0x60 +#define DB_TXULP_RING_ID_MASK 0x7FF /* bits 0 - 10 */ +/* Number of tx entries posted */ +#define DB_TXULP_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */ +#define DB_TXULP_NUM_POSTED_MASK 0x3FFF /* bits 16 - 29 */ + +/********** RQ(erx) door bell ************/ +#define DB_RQ_OFFSET 0x100 +#define DB_RQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ +/* Number of rx frags posted */ +#define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */ + +/********** MCC door bell ************/ +#define DB_MCCQ_OFFSET 0x140 +#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */ +/* Number of entries posted */ +#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */ + +/********** SRIOV VF PCICFG OFFSET ********/ +#define SRIOV_VF_PCICFG_OFFSET (4096) + +/********** FAT TABLE ********/ +#define RETRIEVE_FAT 0 +#define QUERY_FAT 1 + +/************* Rx Packet Type Encoding **************/ +#define BE_UNICAST_PACKET 0 +#define BE_MULTICAST_PACKET 1 +#define BE_BROADCAST_PACKET 2 +#define BE_RSVD_PACKET 3 + +/* + * BE descriptors: host memory data structures whose formats + * are hardwired in BE silicon. + */ +/* Event Queue Descriptor */ +#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */ +#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */ +#define EQ_ENTRY_RES_ID_SHIFT 16 + +struct be_eq_entry { + u32 evt; +}; + +/* TX Queue Descriptor */ +#define ETH_WRB_FRAG_LEN_MASK 0xFFFF +struct be_eth_wrb { + __le32 frag_pa_hi; /* dword 0 */ + __le32 frag_pa_lo; /* dword 1 */ + u32 rsvd0; /* dword 2 */ + __le32 frag_len; /* dword 3: bits 0 - 15 */ +} __packed; + +/* Pseudo amap definition for eth_hdr_wrb in which each bit of the + * actual structure is defined as a byte : used to calculate + * offset/shift/mask of each field */ +struct amap_eth_hdr_wrb { + u8 rsvd0[32]; /* dword 0 */ + u8 rsvd1[32]; /* dword 1 */ + u8 complete; /* dword 2 */ + u8 event; + u8 crc; + u8 forward; + u8 lso6; + u8 mgmt; + u8 ipcs; + u8 udpcs; + u8 tcpcs; + u8 lso; + u8 vlan; + u8 gso[2]; + u8 num_wrb[5]; + u8 lso_mss[14]; + u8 len[16]; /* dword 3 */ + u8 vlan_tag[16]; +} __packed; + +#define TX_HDR_WRB_COMPL 1 /* word 2 */ +#define TX_HDR_WRB_EVT BIT(1) /* word 2 */ +#define TX_HDR_WRB_NUM_SHIFT 13 /* word 2: bits 13:17 */ +#define TX_HDR_WRB_NUM_MASK 0x1F /* word 2: bits 13:17 */ + +struct be_eth_hdr_wrb { + __le32 dw[4]; +}; + +/********* Tx Compl Status Encoding *********/ +#define BE_TX_COMP_HDR_PARSE_ERR 0x2 +#define BE_TX_COMP_NDMA_ERR 0x3 +#define BE_TX_COMP_ACL_ERR 0x5 + +#define LANCER_TX_COMP_LSO_ERR 0x1 +#define LANCER_TX_COMP_HSW_DROP_MAC_ERR 0x3 +#define LANCER_TX_COMP_HSW_DROP_VLAN_ERR 0x5 +#define LANCER_TX_COMP_QINQ_ERR 0x7 +#define LANCER_TX_COMP_PARITY_ERR 0xb +#define LANCER_TX_COMP_DMA_ERR 0xd + +/* TX Compl Queue Descriptor */ + +/* Pseudo amap definition for eth_tx_compl in which each bit of the + * actual structure is defined as a byte: used to calculate + * offset/shift/mask of each field */ +struct amap_eth_tx_compl { + u8 wrb_index[16]; /* dword 0 */ + u8 ct[2]; /* dword 0 */ + u8 port[2]; /* dword 0 */ + u8 rsvd0[8]; /* dword 0 */ + u8 status[4]; /* dword 0 */ + u8 user_bytes[16]; /* dword 1 */ + u8 nwh_bytes[8]; /* dword 1 */ + u8 lso; /* dword 1 */ + u8 cast_enc[2]; /* dword 1 */ + u8 rsvd1[5]; /* dword 1 */ + u8 rsvd2[32]; /* dword 2 */ + u8 pkts[16]; /* dword 3 */ + u8 ringid[11]; /* dword 3 */ + u8 hash_val[4]; /* dword 3 */ + u8 valid; /* dword 3 */ +} __packed; + +struct be_eth_tx_compl { + u32 dw[4]; +}; + +/* RX Queue Descriptor */ +struct be_eth_rx_d { + u32 fragpa_hi; + u32 fragpa_lo; +}; + +/* RX Compl Queue Descriptor */ + +/* Pseudo amap definition for BE2 and BE3 legacy mode eth_rx_compl in which + * each bit of the actual structure is defined as a byte: used to calculate + * offset/shift/mask of each field */ +struct amap_eth_rx_compl_v0 { + u8 vlan_tag[16]; /* dword 0 */ + u8 pktsize[14]; /* dword 0 */ + u8 port; /* dword 0 */ + u8 ip_opt; /* dword 0 */ + u8 err; /* dword 1 */ + u8 rsshp; /* dword 1 */ + u8 ipf; /* dword 1 */ + u8 tcpf; /* dword 1 */ + u8 udpf; /* dword 1 */ + u8 ipcksm; /* dword 1 */ + u8 l4_cksm; /* dword 1 */ + u8 ip_version; /* dword 1 */ + u8 macdst[6]; /* dword 1 */ + u8 vtp; /* dword 1 */ + u8 ip_frag; /* dword 1 */ + u8 fragndx[10]; /* dword 1 */ + u8 ct[2]; /* dword 1 */ + u8 sw; /* dword 1 */ + u8 numfrags[3]; /* dword 1 */ + u8 rss_flush; /* dword 2 */ + u8 cast_enc[2]; /* dword 2 */ + u8 qnq; /* dword 2 */ + u8 rss_bank; /* dword 2 */ + u8 rsvd1[23]; /* dword 2 */ + u8 lro_pkt; /* dword 2 */ + u8 rsvd2[2]; /* dword 2 */ + u8 valid; /* dword 2 */ + u8 rsshash[32]; /* dword 3 */ +} __packed; + +/* Pseudo amap definition for BE3 native mode eth_rx_compl in which + * each bit of the actual structure is defined as a byte: used to calculate + * offset/shift/mask of each field */ +struct amap_eth_rx_compl_v1 { + u8 vlan_tag[16]; /* dword 0 */ + u8 pktsize[14]; /* dword 0 */ + u8 vtp; /* dword 0 */ + u8 ip_opt; /* dword 0 */ + u8 err; /* dword 1 */ + u8 rsshp; /* dword 1 */ + u8 ipf; /* dword 1 */ + u8 tcpf; /* dword 1 */ + u8 udpf; /* dword 1 */ + u8 ipcksm; /* dword 1 */ + u8 l4_cksm; /* dword 1 */ + u8 ip_version; /* dword 1 */ + u8 macdst[7]; /* dword 1 */ + u8 rsvd0; /* dword 1 */ + u8 fragndx[10]; /* dword 1 */ + u8 ct[2]; /* dword 1 */ + u8 sw; /* dword 1 */ + u8 numfrags[3]; /* dword 1 */ + u8 rss_flush; /* dword 2 */ + u8 cast_enc[2]; /* dword 2 */ + u8 qnq; /* dword 2 */ + u8 rss_bank; /* dword 2 */ + u8 port[2]; /* dword 2 */ + u8 vntagp; /* dword 2 */ + u8 header_len[8]; /* dword 2 */ + u8 header_split[2]; /* dword 2 */ + u8 rsvd1[12]; /* dword 2 */ + u8 tunneled; + u8 valid; /* dword 2 */ + u8 rsshash[32]; /* dword 3 */ +} __packed; + +struct be_eth_rx_compl { + u32 dw[4]; +}; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c new file mode 100644 index 000000000..e43cc8a73 --- /dev/null +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -0,0 +1,5761 @@ +/* + * Copyright (C) 2005 - 2014 Emulex + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. The full GNU General + * Public License is included in this distribution in the file called COPYING. + * + * Contact Information: + * linux-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + */ + +#include +#include +#include "be.h" +#include "be_cmds.h" +#include +#include +#include +#include +#include + +MODULE_VERSION(DRV_VER); +MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); +MODULE_AUTHOR("Emulex Corporation"); +MODULE_LICENSE("GPL"); + +/* num_vfs module param is obsolete. + * Use sysfs method to enable/disable VFs. + */ +static unsigned int num_vfs; +module_param(num_vfs, uint, S_IRUGO); +MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize"); + +static ushort rx_frag_size = 2048; +module_param(rx_frag_size, ushort, S_IRUGO); +MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); + +static const struct pci_device_id be_dev_ids[] = { + { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, + { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, + { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, + { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, + { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)}, + { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)}, + { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)}, + { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)}, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, be_dev_ids); +/* UE Status Low CSR */ +static const char * const ue_status_low_desc[] = { + "CEV", + "CTX", + "DBUF", + "ERX", + "Host", + "MPU", + "NDMA", + "PTC ", + "RDMA ", + "RXF ", + "RXIPS ", + "RXULP0 ", + "RXULP1 ", + "RXULP2 ", + "TIM ", + "TPOST ", + "TPRE ", + "TXIPS ", + "TXULP0 ", + "TXULP1 ", + "UC ", + "WDMA ", + "TXULP2 ", + "HOST1 ", + "P0_OB_LINK ", + "P1_OB_LINK ", + "HOST_GPIO ", + "MBOX ", + "ERX2 ", + "SPARE ", + "JTAG ", + "MPU_INTPEND " +}; + +/* UE Status High CSR */ +static const char * const ue_status_hi_desc[] = { + "LPCMEMHOST", + "MGMT_MAC", + "PCS0ONLINE", + "MPU_IRAM", + "PCS1ONLINE", + "PCTL0", + "PCTL1", + "PMEM", + "RR", + "TXPB", + "RXPP", + "XAUI", + "TXP", + "ARM", + "IPC", + "HOST2", + "HOST3", + "HOST4", + "HOST5", + "HOST6", + "HOST7", + "ECRC", + "Poison TLP", + "NETC", + "PERIPH", + "LLTXULP", + "D2P", + "RCON", + "LDMA", + "LLTXP", + "LLTXPB", + "Unknown" +}; + +static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) +{ + struct be_dma_mem *mem = &q->dma_mem; + + if (mem->va) { + dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, + mem->dma); + mem->va = NULL; + } +} + +static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, + u16 len, u16 entry_size) +{ + struct be_dma_mem *mem = &q->dma_mem; + + memset(q, 0, sizeof(*q)); + q->len = len; + q->entry_size = entry_size; + mem->size = len * entry_size; + mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, + GFP_KERNEL); + if (!mem->va) + return -ENOMEM; + return 0; +} + +static void be_reg_intr_set(struct be_adapter *adapter, bool enable) +{ + u32 reg, enabled; + + pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, + ®); + enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; + + if (!enabled && enable) + reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; + else if (enabled && !enable) + reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; + else + return; + + pci_write_config_dword(adapter->pdev, + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg); +} + +static void be_intr_set(struct be_adapter *adapter, bool enable) +{ + int status = 0; + + /* On lancer interrupts can't be controlled via this register */ + if (lancer_chip(adapter)) + return; + + if (adapter->eeh_error) + return; + + status = be_cmd_intr_set(adapter, enable); + if (status) + be_reg_intr_set(adapter, enable); +} + +static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted) +{ + u32 val = 0; + + val |= qid & DB_RQ_RING_ID_MASK; + val |= posted << DB_RQ_NUM_POSTED_SHIFT; + + wmb(); + iowrite32(val, adapter->db + DB_RQ_OFFSET); +} + +static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo, + u16 posted) +{ + u32 val = 0; + + val |= txo->q.id & DB_TXULP_RING_ID_MASK; + val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT; + + wmb(); + iowrite32(val, adapter->db + txo->db_offset); +} + +static void be_eq_notify(struct be_adapter *adapter, u16 qid, + bool arm, bool clear_int, u16 num_popped) +{ + u32 val = 0; + + val |= qid & DB_EQ_RING_ID_MASK; + val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT); + + if (adapter->eeh_error) + return; + + if (arm) + val |= 1 << DB_EQ_REARM_SHIFT; + if (clear_int) + val |= 1 << DB_EQ_CLR_SHIFT; + val |= 1 << DB_EQ_EVNT_SHIFT; + val |= num_popped << DB_EQ_NUM_POPPED_SHIFT; + iowrite32(val, adapter->db + DB_EQ_OFFSET); +} + +void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped) +{ + u32 val = 0; + + val |= qid & DB_CQ_RING_ID_MASK; + val |= ((qid & DB_CQ_RING_ID_EXT_MASK) << + DB_CQ_RING_ID_EXT_MASK_SHIFT); + + if (adapter->eeh_error) + return; + + if (arm) + val |= 1 << DB_CQ_REARM_SHIFT; + val |= num_popped << DB_CQ_NUM_POPPED_SHIFT; + iowrite32(val, adapter->db + DB_CQ_OFFSET); +} + +static int be_mac_addr_set(struct net_device *netdev, void *p) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->pdev->dev; + struct sockaddr *addr = p; + int status; + u8 mac[ETH_ALEN]; + u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + /* Proceed further only if, User provided MAC is different + * from active MAC + */ + if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) + return 0; + + /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT + * privilege or if PF did not provision the new MAC address. + * On BE3, this cmd will always fail if the VF doesn't have the + * FILTMGMT privilege. This failure is OK, only if the PF programmed + * the MAC for the VF. + */ + status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, + adapter->if_handle, &adapter->pmac_id[0], 0); + if (!status) { + curr_pmac_id = adapter->pmac_id[0]; + + /* Delete the old programmed MAC. This call may fail if the + * old MAC was already deleted by the PF driver. + */ + if (adapter->pmac_id[0] != old_pmac_id) + be_cmd_pmac_del(adapter, adapter->if_handle, + old_pmac_id, 0); + } + + /* Decide if the new MAC is successfully activated only after + * querying the FW + */ + status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac, + adapter->if_handle, true, 0); + if (status) + goto err; + + /* The MAC change did not happen, either due to lack of privilege + * or PF didn't pre-provision. + */ + if (!ether_addr_equal(addr->sa_data, mac)) { + status = -EPERM; + goto err; + } + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + dev_info(dev, "MAC address changed to %pM\n", mac); + return 0; +err: + dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data); + return status; +} + +/* BE2 supports only v0 cmd */ +static void *hw_stats_from_cmd(struct be_adapter *adapter) +{ + if (BE2_chip(adapter)) { + struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va; + + return &cmd->hw_stats; + } else if (BE3_chip(adapter)) { + struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va; + + return &cmd->hw_stats; + } else { + struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va; + + return &cmd->hw_stats; + } +} + +/* BE2 supports only v0 cmd */ +static void *be_erx_stats_from_cmd(struct be_adapter *adapter) +{ + if (BE2_chip(adapter)) { + struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter); + + return &hw_stats->erx; + } else if (BE3_chip(adapter)) { + struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter); + + return &hw_stats->erx; + } else { + struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter); + + return &hw_stats->erx; + } +} + +static void populate_be_v0_stats(struct be_adapter *adapter) +{ + struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter); + struct be_pmem_stats *pmem_sts = &hw_stats->pmem; + struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf; + struct be_port_rxf_stats_v0 *port_stats = + &rxf_stats->port[adapter->port_num]; + struct be_drv_stats *drvs = &adapter->drv_stats; + + be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats)); + drvs->rx_pause_frames = port_stats->rx_pause_frames; + drvs->rx_crc_errors = port_stats->rx_crc_errors; + drvs->rx_control_frames = port_stats->rx_control_frames; + drvs->rx_in_range_errors = port_stats->rx_in_range_errors; + drvs->rx_frame_too_long = port_stats->rx_frame_too_long; + drvs->rx_dropped_runt = port_stats->rx_dropped_runt; + drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs; + drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs; + drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs; + drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow; + drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length; + drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small; + drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short; + drvs->rx_out_range_errors = port_stats->rx_out_range_errors; + drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow; + drvs->rx_dropped_header_too_small = + port_stats->rx_dropped_header_too_small; + drvs->rx_address_filtered = + port_stats->rx_address_filtered + + port_stats->rx_vlan_filtered; + drvs->rx_alignment_symbol_errors = + port_stats->rx_alignment_symbol_errors; + + drvs->tx_pauseframes = port_stats->tx_pauseframes; + drvs->tx_controlframes = port_stats->tx_controlframes; + + if (adapter->port_num) + drvs->jabber_events = rxf_stats->port1_jabber_events; + else + drvs->jabber_events = rxf_stats->port0_jabber_events; + drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; + drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; + drvs->forwarded_packets = rxf_stats->forwarded_packets; + drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu; + drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr; + drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags; + adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; +} + +static void populate_be_v1_stats(struct be_adapter *adapter) +{ + struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter); + struct be_pmem_stats *pmem_sts = &hw_stats->pmem; + struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf; + struct be_port_rxf_stats_v1 *port_stats = + &rxf_stats->port[adapter->port_num]; + struct be_drv_stats *drvs = &adapter->drv_stats; + + be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats)); + drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop; + drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames; + drvs->rx_pause_frames = port_stats->rx_pause_frames; + drvs->rx_crc_errors = port_stats->rx_crc_errors; + drvs->rx_control_frames = port_stats->rx_control_frames; + drvs->rx_in_range_errors = port_stats->rx_in_range_errors; + drvs->rx_frame_too_long = port_stats->rx_frame_too_long; + drvs->rx_dropped_runt = port_stats->rx_dropped_runt; + drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs; + drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs; + drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs; + drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length; + drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small; + drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short; + drvs->rx_out_range_errors = port_stats->rx_out_range_errors; + drvs->rx_dropped_header_too_small = + port_stats->rx_dropped_header_too_small; + drvs->rx_input_fifo_overflow_drop = + port_stats->rx_input_fifo_overflow_drop; + drvs->rx_address_filtered = port_stats->rx_address_filtered; + drvs->rx_alignment_symbol_errors = + port_stats->rx_alignment_symbol_errors; + drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop; + drvs->tx_pauseframes = port_stats->tx_pauseframes; + drvs->tx_controlframes = port_stats->tx_controlframes; + drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes; + drvs->jabber_events = port_stats->jabber_events; + drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; + drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; + drvs->forwarded_packets = rxf_stats->forwarded_packets; + drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu; + drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr; + drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags; + adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; +} + +static void populate_be_v2_stats(struct be_adapter *adapter) +{ + struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter); + struct be_pmem_stats *pmem_sts = &hw_stats->pmem; + struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf; + struct be_port_rxf_stats_v2 *port_stats = + &rxf_stats->port[adapter->port_num]; + struct be_drv_stats *drvs = &adapter->drv_stats; + + be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats)); + drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop; + drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames; + drvs->rx_pause_frames = port_stats->rx_pause_frames; + drvs->rx_crc_errors = port_stats->rx_crc_errors; + drvs->rx_control_frames = port_stats->rx_control_frames; + drvs->rx_in_range_errors = port_stats->rx_in_range_errors; + drvs->rx_frame_too_long = port_stats->rx_frame_too_long; + drvs->rx_dropped_runt = port_stats->rx_dropped_runt; + drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs; + drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs; + drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs; + drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length; + drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small; + drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short; + drvs->rx_out_range_errors = port_stats->rx_out_range_errors; + drvs->rx_dropped_header_too_small = + port_stats->rx_dropped_header_too_small; + drvs->rx_input_fifo_overflow_drop = + port_stats->rx_input_fifo_overflow_drop; + drvs->rx_address_filtered = port_stats->rx_address_filtered; + drvs->rx_alignment_symbol_errors = + port_stats->rx_alignment_symbol_errors; + drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop; + drvs->tx_pauseframes = port_stats->tx_pauseframes; + drvs->tx_controlframes = port_stats->tx_controlframes; + drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes; + drvs->jabber_events = port_stats->jabber_events; + drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; + drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; + drvs->forwarded_packets = rxf_stats->forwarded_packets; + drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu; + drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr; + drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags; + adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; + if (be_roce_supported(adapter)) { + drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd; + drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd; + drvs->rx_roce_frames = port_stats->roce_frames_received; + drvs->roce_drops_crc = port_stats->roce_drops_crc; + drvs->roce_drops_payload_len = + port_stats->roce_drops_payload_len; + } +} + +static void populate_lancer_stats(struct be_adapter *adapter) +{ + struct be_drv_stats *drvs = &adapter->drv_stats; + struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter); + + be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats)); + drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo; + drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo; + drvs->rx_control_frames = pport_stats->rx_control_frames_lo; + drvs->rx_in_range_errors = pport_stats->rx_in_range_errors; + drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo; + drvs->rx_dropped_runt = pport_stats->rx_dropped_runt; + drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors; + drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors; + drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors; + drvs->rx_dropped_tcp_length = + pport_stats->rx_dropped_invalid_tcp_length; + drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small; + drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short; + drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors; + drvs->rx_dropped_header_too_small = + pport_stats->rx_dropped_header_too_small; + drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow; + drvs->rx_address_filtered = + pport_stats->rx_address_filtered + + pport_stats->rx_vlan_filtered; + drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo; + drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow; + drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo; + drvs->tx_controlframes = pport_stats->tx_control_frames_lo; + drvs->jabber_events = pport_stats->rx_jabbers; + drvs->forwarded_packets = pport_stats->num_forwards_lo; + drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo; + drvs->rx_drops_too_many_frags = + pport_stats->rx_drops_too_many_frags_lo; +} + +static void accumulate_16bit_val(u32 *acc, u16 val) +{ +#define lo(x) (x & 0xFFFF) +#define hi(x) (x & 0xFFFF0000) + bool wrapped = val < lo(*acc); + u32 newacc = hi(*acc) + val; + + if (wrapped) + newacc += 65536; + ACCESS_ONCE(*acc) = newacc; +} + +static void populate_erx_stats(struct be_adapter *adapter, + struct be_rx_obj *rxo, u32 erx_stat) +{ + if (!BEx_chip(adapter)) + rx_stats(rxo)->rx_drops_no_frags = erx_stat; + else + /* below erx HW counter can actually wrap around after + * 65535. Driver accumulates a 32-bit value + */ + accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags, + (u16)erx_stat); +} + +void be_parse_stats(struct be_adapter *adapter) +{ + struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter); + struct be_rx_obj *rxo; + int i; + u32 erx_stat; + + if (lancer_chip(adapter)) { + populate_lancer_stats(adapter); + } else { + if (BE2_chip(adapter)) + populate_be_v0_stats(adapter); + else if (BE3_chip(adapter)) + /* for BE3 */ + populate_be_v1_stats(adapter); + else + populate_be_v2_stats(adapter); + + /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */ + for_all_rx_queues(adapter, rxo, i) { + erx_stat = erx->rx_drops_no_fragments[rxo->q.id]; + populate_erx_stats(adapter, rxo, erx_stat); + } + } +} + +static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_drv_stats *drvs = &adapter->drv_stats; + struct be_rx_obj *rxo; + struct be_tx_obj *txo; + u64 pkts, bytes; + unsigned int start; + int i; + + for_all_rx_queues(adapter, rxo, i) { + const struct be_rx_stats *rx_stats = rx_stats(rxo); + + do { + start = u64_stats_fetch_begin_irq(&rx_stats->sync); + pkts = rx_stats(rxo)->rx_pkts; + bytes = rx_stats(rxo)->rx_bytes; + } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start)); + stats->rx_packets += pkts; + stats->rx_bytes += bytes; + stats->multicast += rx_stats(rxo)->rx_mcast_pkts; + stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs + + rx_stats(rxo)->rx_drops_no_frags; + } + + for_all_tx_queues(adapter, txo, i) { + const struct be_tx_stats *tx_stats = tx_stats(txo); + + do { + start = u64_stats_fetch_begin_irq(&tx_stats->sync); + pkts = tx_stats(txo)->tx_pkts; + bytes = tx_stats(txo)->tx_bytes; + } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start)); + stats->tx_packets += pkts; + stats->tx_bytes += bytes; + } + + /* bad pkts received */ + stats->rx_errors = drvs->rx_crc_errors + + drvs->rx_alignment_symbol_errors + + drvs->rx_in_range_errors + + drvs->rx_out_range_errors + + drvs->rx_frame_too_long + + drvs->rx_dropped_too_small + + drvs->rx_dropped_too_short + + drvs->rx_dropped_header_too_small + + drvs->rx_dropped_tcp_length + + drvs->rx_dropped_runt; + + /* detailed rx errors */ + stats->rx_length_errors = drvs->rx_in_range_errors + + drvs->rx_out_range_errors + + drvs->rx_frame_too_long; + + stats->rx_crc_errors = drvs->rx_crc_errors; + + /* frame alignment errors */ + stats->rx_frame_errors = drvs->rx_alignment_symbol_errors; + + /* receiver fifo overrun */ + /* drops_no_pbuf is no per i/f, it's per BE card */ + stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop + + drvs->rx_input_fifo_overflow_drop + + drvs->rx_drops_no_pbuf; + return stats; +} + +void be_link_status_update(struct be_adapter *adapter, u8 link_status) +{ + struct net_device *netdev = adapter->netdev; + + if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) { + netif_carrier_off(netdev); + adapter->flags |= BE_FLAGS_LINK_STATUS_INIT; + } + + if (link_status) + netif_carrier_on(netdev); + else + netif_carrier_off(netdev); +} + +static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb) +{ + struct be_tx_stats *stats = tx_stats(txo); + + u64_stats_update_begin(&stats->sync); + stats->tx_reqs++; + stats->tx_bytes += skb->len; + stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1); + u64_stats_update_end(&stats->sync); +} + +/* Returns number of WRBs needed for the skb */ +static u32 skb_wrb_cnt(struct sk_buff *skb) +{ + /* +1 for the header wrb */ + return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags; +} + +static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len) +{ + wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr)); + wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr)); + wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK); + wrb->rsvd0 = 0; +} + +/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb + * to avoid the swap and shift/mask operations in wrb_fill(). + */ +static inline void wrb_fill_dummy(struct be_eth_wrb *wrb) +{ + wrb->frag_pa_hi = 0; + wrb->frag_pa_lo = 0; + wrb->frag_len = 0; + wrb->rsvd0 = 0; +} + +static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter, + struct sk_buff *skb) +{ + u8 vlan_prio; + u16 vlan_tag; + + vlan_tag = skb_vlan_tag_get(skb); + vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + /* If vlan priority provided by OS is NOT in available bmap */ + if (!(adapter->vlan_prio_bmap & (1 << vlan_prio))) + vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) | + adapter->recommended_prio; + + return vlan_tag; +} + +/* Used only for IP tunnel packets */ +static u16 skb_inner_ip_proto(struct sk_buff *skb) +{ + return (inner_ip_hdr(skb)->version == 4) ? + inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr; +} + +static u16 skb_ip_proto(struct sk_buff *skb) +{ + return (ip_hdr(skb)->version == 4) ? + ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr; +} + +static inline bool be_is_txq_full(struct be_tx_obj *txo) +{ + return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len; +} + +static inline bool be_can_txq_wake(struct be_tx_obj *txo) +{ + return atomic_read(&txo->q.used) < txo->q.len / 2; +} + +static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo) +{ + return atomic_read(&txo->q.used) > txo->pend_wrb_cnt; +} + +static void be_get_wrb_params_from_skb(struct be_adapter *adapter, + struct sk_buff *skb, + struct be_wrb_params *wrb_params) +{ + u16 proto; + + if (skb_is_gso(skb)) { + BE_WRB_F_SET(wrb_params->features, LSO, 1); + wrb_params->lso_mss = skb_shinfo(skb)->gso_size; + if (skb_is_gso_v6(skb) && !lancer_chip(adapter)) + BE_WRB_F_SET(wrb_params->features, LSO6, 1); + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb->encapsulation) { + BE_WRB_F_SET(wrb_params->features, IPCS, 1); + proto = skb_inner_ip_proto(skb); + } else { + proto = skb_ip_proto(skb); + } + if (proto == IPPROTO_TCP) + BE_WRB_F_SET(wrb_params->features, TCPCS, 1); + else if (proto == IPPROTO_UDP) + BE_WRB_F_SET(wrb_params->features, UDPCS, 1); + } + + if (skb_vlan_tag_present(skb)) { + BE_WRB_F_SET(wrb_params->features, VLAN, 1); + wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb); + } + + BE_WRB_F_SET(wrb_params->features, CRC, 1); +} + +static void wrb_fill_hdr(struct be_adapter *adapter, + struct be_eth_hdr_wrb *hdr, + struct be_wrb_params *wrb_params, + struct sk_buff *skb) +{ + memset(hdr, 0, sizeof(*hdr)); + + SET_TX_WRB_HDR_BITS(crc, hdr, + BE_WRB_F_GET(wrb_params->features, CRC)); + SET_TX_WRB_HDR_BITS(ipcs, hdr, + BE_WRB_F_GET(wrb_params->features, IPCS)); + SET_TX_WRB_HDR_BITS(tcpcs, hdr, + BE_WRB_F_GET(wrb_params->features, TCPCS)); + SET_TX_WRB_HDR_BITS(udpcs, hdr, + BE_WRB_F_GET(wrb_params->features, UDPCS)); + + SET_TX_WRB_HDR_BITS(lso, hdr, + BE_WRB_F_GET(wrb_params->features, LSO)); + SET_TX_WRB_HDR_BITS(lso6, hdr, + BE_WRB_F_GET(wrb_params->features, LSO6)); + SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss); + + /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this + * hack is not needed, the evt bit is set while ringing DB. + */ + SET_TX_WRB_HDR_BITS(event, hdr, + BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW)); + SET_TX_WRB_HDR_BITS(vlan, hdr, + BE_WRB_F_GET(wrb_params->features, VLAN)); + SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag); + + SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb)); + SET_TX_WRB_HDR_BITS(len, hdr, skb->len); +} + +static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, + bool unmap_single) +{ + dma_addr_t dma; + u32 frag_len = le32_to_cpu(wrb->frag_len); + + + dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 | + (u64)le32_to_cpu(wrb->frag_pa_lo); + if (frag_len) { + if (unmap_single) + dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE); + else + dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE); + } +} + +/* Grab a WRB header for xmit */ +static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo) +{ + u16 head = txo->q.head; + + queue_head_inc(&txo->q); + return head; +} + +/* Set up the WRB header for xmit */ +static void be_tx_setup_wrb_hdr(struct be_adapter *adapter, + struct be_tx_obj *txo, + struct be_wrb_params *wrb_params, + struct sk_buff *skb, u16 head) +{ + u32 num_frags = skb_wrb_cnt(skb); + struct be_queue_info *txq = &txo->q; + struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head); + + wrb_fill_hdr(adapter, hdr, wrb_params, skb); + be_dws_cpu_to_le(hdr, sizeof(*hdr)); + + BUG_ON(txo->sent_skb_list[head]); + txo->sent_skb_list[head] = skb; + txo->last_req_hdr = head; + atomic_add(num_frags, &txq->used); + txo->last_req_wrb_cnt = num_frags; + txo->pend_wrb_cnt += num_frags; +} + +/* Setup a WRB fragment (buffer descriptor) for xmit */ +static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr, + int len) +{ + struct be_eth_wrb *wrb; + struct be_queue_info *txq = &txo->q; + + wrb = queue_head_node(txq); + wrb_fill(wrb, busaddr, len); + queue_head_inc(txq); +} + +/* Bring the queue back to the state it was in before be_xmit_enqueue() routine + * was invoked. The producer index is restored to the previous packet and the + * WRBs of the current packet are unmapped. Invoked to handle tx setup errors. + */ +static void be_xmit_restore(struct be_adapter *adapter, + struct be_tx_obj *txo, u16 head, bool map_single, + u32 copied) +{ + struct device *dev; + struct be_eth_wrb *wrb; + struct be_queue_info *txq = &txo->q; + + dev = &adapter->pdev->dev; + txq->head = head; + + /* skip the first wrb (hdr); it's not mapped */ + queue_head_inc(txq); + while (copied) { + wrb = queue_head_node(txq); + unmap_tx_frag(dev, wrb, map_single); + map_single = false; + copied -= le32_to_cpu(wrb->frag_len); + queue_head_inc(txq); + } + + txq->head = head; +} + +/* Enqueue the given packet for transmit. This routine allocates WRBs for the + * packet, dma maps the packet buffers and sets up the WRBs. Returns the number + * of WRBs used up by the packet. + */ +static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo, + struct sk_buff *skb, + struct be_wrb_params *wrb_params) +{ + u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb); + struct device *dev = &adapter->pdev->dev; + struct be_queue_info *txq = &txo->q; + bool map_single = false; + u16 head = txq->head; + dma_addr_t busaddr; + int len; + + head = be_tx_get_wrb_hdr(txo); + + if (skb->len > skb->data_len) { + len = skb_headlen(skb); + + busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, busaddr)) + goto dma_err; + map_single = true; + be_tx_setup_wrb_frag(txo, busaddr, len); + copied += len; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + len = skb_frag_size(frag); + + busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, busaddr)) + goto dma_err; + be_tx_setup_wrb_frag(txo, busaddr, len); + copied += len; + } + + be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head); + + be_tx_stats_update(txo, skb); + return wrb_cnt; + +dma_err: + adapter->drv_stats.dma_map_errors++; + be_xmit_restore(adapter, txo, head, map_single, copied); + return 0; +} + +static inline int qnq_async_evt_rcvd(struct be_adapter *adapter) +{ + return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD; +} + +static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, + struct sk_buff *skb, + struct be_wrb_params + *wrb_params) +{ + u16 vlan_tag = 0; + + skb = skb_share_check(skb, GFP_ATOMIC); + if (unlikely(!skb)) + return skb; + + if (skb_vlan_tag_present(skb)) + vlan_tag = be_get_tx_vlan_tag(adapter, skb); + + if (qnq_async_evt_rcvd(adapter) && adapter->pvid) { + if (!vlan_tag) + vlan_tag = adapter->pvid; + /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to + * skip VLAN insertion + */ + BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1); + } + + if (vlan_tag) { + skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q), + vlan_tag); + if (unlikely(!skb)) + return skb; + skb->vlan_tci = 0; + } + + /* Insert the outer VLAN, if any */ + if (adapter->qnq_vid) { + vlan_tag = adapter->qnq_vid; + skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q), + vlan_tag); + if (unlikely(!skb)) + return skb; + BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1); + } + + return skb; +} + +static bool be_ipv6_exthdr_check(struct sk_buff *skb) +{ + struct ethhdr *eh = (struct ethhdr *)skb->data; + u16 offset = ETH_HLEN; + + if (eh->h_proto == htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset); + + offset += sizeof(struct ipv6hdr); + if (ip6h->nexthdr != NEXTHDR_TCP && + ip6h->nexthdr != NEXTHDR_UDP) { + struct ipv6_opt_hdr *ehdr = + (struct ipv6_opt_hdr *)(skb->data + offset); + + /* offending pkt: 2nd byte following IPv6 hdr is 0xff */ + if (ehdr->hdrlen == 0xff) + return true; + } + } + return false; +} + +static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb) +{ + return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid; +} + +static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb) +{ + return BE3_chip(adapter) && be_ipv6_exthdr_check(skb); +} + +static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, + struct sk_buff *skb, + struct be_wrb_params + *wrb_params) +{ + struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; + unsigned int eth_hdr_len; + struct iphdr *ip; + + /* For padded packets, BE HW modifies tot_len field in IP header + * incorrecly when VLAN tag is inserted by HW. + * For padded packets, Lancer computes incorrect checksum. + */ + eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? + VLAN_ETH_HLEN : ETH_HLEN; + if (skb->len <= 60 && + (lancer_chip(adapter) || skb_vlan_tag_present(skb)) && + is_ipv4_pkt(skb)) { + ip = (struct iphdr *)ip_hdr(skb); + pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)); + } + + /* If vlan tag is already inlined in the packet, skip HW VLAN + * tagging in pvid-tagging mode + */ + if (be_pvid_tagging_enabled(adapter) && + veh->h_vlan_proto == htons(ETH_P_8021Q)) + BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1); + + /* HW has a bug wherein it will calculate CSUM for VLAN + * pkts even though it is disabled. + * Manually insert VLAN in pkt. + */ + if (skb->ip_summed != CHECKSUM_PARTIAL && + skb_vlan_tag_present(skb)) { + skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params); + if (unlikely(!skb)) + goto err; + } + + /* HW may lockup when VLAN HW tagging is requested on + * certain ipv6 packets. Drop such pkts if the HW workaround to + * skip HW tagging is not enabled by FW. + */ + if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) && + (adapter->pvid || adapter->qnq_vid) && + !qnq_async_evt_rcvd(adapter))) + goto tx_drop; + + /* Manual VLAN tag insertion to prevent: + * ASIC lockup when the ASIC inserts VLAN tag into + * certain ipv6 packets. Insert VLAN tags in driver, + * and set event, completion, vlan bits accordingly + * in the Tx WRB. + */ + if (be_ipv6_tx_stall_chk(adapter, skb) && + be_vlan_tag_tx_chk(adapter, skb)) { + skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params); + if (unlikely(!skb)) + goto err; + } + + return skb; +tx_drop: + dev_kfree_skb_any(skb); +err: + return NULL; +} + +static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, + struct sk_buff *skb, + struct be_wrb_params *wrb_params) +{ + /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or + * less may cause a transmit stall on that port. So the work-around is + * to pad short packets (<= 32 bytes) to a 36-byte length. + */ + if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) { + if (skb_put_padto(skb, 36)) + return NULL; + } + + if (BEx_chip(adapter) || lancer_chip(adapter)) { + skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params); + if (!skb) + return NULL; + } + + return skb; +} + +static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo) +{ + struct be_queue_info *txq = &txo->q; + struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr); + + /* Mark the last request eventable if it hasn't been marked already */ + if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT))) + hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL); + + /* compose a dummy wrb if there are odd set of wrbs to notify */ + if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) { + wrb_fill_dummy(queue_head_node(txq)); + queue_head_inc(txq); + atomic_inc(&txq->used); + txo->pend_wrb_cnt++; + hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK << + TX_HDR_WRB_NUM_SHIFT); + hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) << + TX_HDR_WRB_NUM_SHIFT); + } + be_txq_notify(adapter, txo, txo->pend_wrb_cnt); + txo->pend_wrb_cnt = 0; +} + +static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + u16 q_idx = skb_get_queue_mapping(skb); + struct be_tx_obj *txo = &adapter->tx_obj[q_idx]; + struct be_wrb_params wrb_params = { 0 }; + bool flush = !skb->xmit_more; + u16 wrb_cnt; + + skb = be_xmit_workarounds(adapter, skb, &wrb_params); + if (unlikely(!skb)) + goto drop; + + be_get_wrb_params_from_skb(adapter, skb, &wrb_params); + + wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params); + if (unlikely(!wrb_cnt)) { + dev_kfree_skb_any(skb); + goto drop; + } + + if (be_is_txq_full(txo)) { + netif_stop_subqueue(netdev, q_idx); + tx_stats(txo)->tx_stops++; + } + + if (flush || __netif_subqueue_stopped(netdev, q_idx)) + be_xmit_flush(adapter, txo); + + return NETDEV_TX_OK; +drop: + tx_stats(txo)->tx_drv_drops++; + /* Flush the already enqueued tx requests */ + if (flush && txo->pend_wrb_cnt) + be_xmit_flush(adapter, txo); + + return NETDEV_TX_OK; +} + +static int be_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->pdev->dev; + + if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) { + dev_info(dev, "MTU must be between %d and %d bytes\n", + BE_MIN_MTU, BE_MAX_MTU); + return -EINVAL; + } + + dev_info(dev, "MTU changed from %d to %d bytes\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + return 0; +} + +static inline bool be_in_all_promisc(struct be_adapter *adapter) +{ + return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) == + BE_IF_FLAGS_ALL_PROMISCUOUS; +} + +static int be_set_vlan_promisc(struct be_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + int status; + + if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) + return 0; + + status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON); + if (!status) { + dev_info(dev, "Enabled VLAN promiscuous mode\n"); + adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS; + } else { + dev_err(dev, "Failed to enable VLAN promiscuous mode\n"); + } + return status; +} + +static int be_clear_vlan_promisc(struct be_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + int status; + + status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF); + if (!status) { + dev_info(dev, "Disabling VLAN promiscuous mode\n"); + adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS; + } + return status; +} + +/* + * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE. + * If the user configures more, place BE in vlan promiscuous mode. + */ +static int be_vid_config(struct be_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + u16 vids[BE_NUM_VLANS_SUPPORTED]; + u16 num = 0, i = 0; + int status = 0; + + /* No need to further configure vids if in promiscuous mode */ + if (be_in_all_promisc(adapter)) + return 0; + + if (adapter->vlans_added > be_max_vlans(adapter)) + return be_set_vlan_promisc(adapter); + + /* Construct VLAN Table to give to HW */ + for_each_set_bit(i, adapter->vids, VLAN_N_VID) + vids[num++] = cpu_to_le16(i); + + status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0); + if (status) { + dev_err(dev, "Setting HW VLAN filtering failed\n"); + /* Set to VLAN promisc mode as setting VLAN filter failed */ + if (addl_status(status) == + MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES) + return be_set_vlan_promisc(adapter); + } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) { + status = be_clear_vlan_promisc(adapter); + } + return status; +} + +static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct be_adapter *adapter = netdev_priv(netdev); + int status = 0; + + /* Packets with VID 0 are always received by Lancer by default */ + if (lancer_chip(adapter) && vid == 0) + return status; + + if (test_bit(vid, adapter->vids)) + return status; + + set_bit(vid, adapter->vids); + adapter->vlans_added++; + + status = be_vid_config(adapter); + if (status) { + adapter->vlans_added--; + clear_bit(vid, adapter->vids); + } + + return status; +} + +static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + /* Packets with VID 0 are always received by Lancer by default */ + if (lancer_chip(adapter) && vid == 0) + return 0; + + clear_bit(vid, adapter->vids); + adapter->vlans_added--; + + return be_vid_config(adapter); +} + +static void be_clear_all_promisc(struct be_adapter *adapter) +{ + be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF); + adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS; +} + +static void be_set_all_promisc(struct be_adapter *adapter) +{ + be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON); + adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS; +} + +static void be_set_mc_promisc(struct be_adapter *adapter) +{ + int status; + + if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) + return; + + status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON); + if (!status) + adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS; +} + +static void be_set_mc_list(struct be_adapter *adapter) +{ + int status; + + status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON); + if (!status) + adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS; + else + be_set_mc_promisc(adapter); +} + +static void be_set_uc_list(struct be_adapter *adapter) +{ + struct netdev_hw_addr *ha; + int i = 1; /* First slot is claimed by the Primary MAC */ + + for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) + be_cmd_pmac_del(adapter, adapter->if_handle, + adapter->pmac_id[i], 0); + + if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) { + be_set_all_promisc(adapter); + return; + } + + netdev_for_each_uc_addr(ha, adapter->netdev) { + adapter->uc_macs++; /* First slot is for Primary MAC */ + be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle, + &adapter->pmac_id[adapter->uc_macs], 0); + } +} + +static void be_clear_uc_list(struct be_adapter *adapter) +{ + int i; + + for (i = 1; i < (adapter->uc_macs + 1); i++) + be_cmd_pmac_del(adapter, adapter->if_handle, + adapter->pmac_id[i], 0); + adapter->uc_macs = 0; +} + +static void be_set_rx_mode(struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + if (netdev->flags & IFF_PROMISC) { + be_set_all_promisc(adapter); + return; + } + + /* Interface was previously in promiscuous mode; disable it */ + if (be_in_all_promisc(adapter)) { + be_clear_all_promisc(adapter); + if (adapter->vlans_added) + be_vid_config(adapter); + } + + /* Enable multicast promisc if num configured exceeds what we support */ + if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > be_max_mc(adapter)) { + be_set_mc_promisc(adapter); + return; + } + + if (netdev_uc_count(netdev) != adapter->uc_macs) + be_set_uc_list(adapter); + + be_set_mc_list(adapter); +} + +static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; + int status; + + if (!sriov_enabled(adapter)) + return -EPERM; + + if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs) + return -EINVAL; + + /* Proceed further only if user provided MAC is different + * from active MAC + */ + if (ether_addr_equal(mac, vf_cfg->mac_addr)) + return 0; + + if (BEx_chip(adapter)) { + be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id, + vf + 1); + + status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle, + &vf_cfg->pmac_id, vf + 1); + } else { + status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle, + vf + 1); + } + + if (status) { + dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x", + mac, vf, status); + return be_cmd_status(status); + } + + ether_addr_copy(vf_cfg->mac_addr, mac); + + return 0; +} + +static int be_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *vi) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; + + if (!sriov_enabled(adapter)) + return -EPERM; + + if (vf >= adapter->num_vfs) + return -EINVAL; + + vi->vf = vf; + vi->max_tx_rate = vf_cfg->tx_rate; + vi->min_tx_rate = 0; + vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK; + vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT; + memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); + vi->linkstate = adapter->vf_cfg[vf].plink_tracking; + + return 0; +} + +static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan) +{ + struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; + u16 vids[BE_NUM_VLANS_SUPPORTED]; + int vf_if_id = vf_cfg->if_handle; + int status; + + /* Enable Transparent VLAN Tagging */ + status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0); + if (status) + return status; + + /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */ + vids[0] = 0; + status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1); + if (!status) + dev_info(&adapter->pdev->dev, + "Cleared guest VLANs on VF%d", vf); + + /* After TVT is enabled, disallow VFs to program VLAN filters */ + if (vf_cfg->privileges & BE_PRIV_FILTMGMT) { + status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges & + ~BE_PRIV_FILTMGMT, vf + 1); + if (!status) + vf_cfg->privileges &= ~BE_PRIV_FILTMGMT; + } + return 0; +} + +static int be_clear_vf_tvt(struct be_adapter *adapter, int vf) +{ + struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; + struct device *dev = &adapter->pdev->dev; + int status; + + /* Reset Transparent VLAN Tagging. */ + status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1, + vf_cfg->if_handle, 0); + if (status) + return status; + + /* Allow VFs to program VLAN filtering */ + if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) { + status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges | + BE_PRIV_FILTMGMT, vf + 1); + if (!status) { + vf_cfg->privileges |= BE_PRIV_FILTMGMT; + dev_info(dev, "VF%d: FILTMGMT priv enabled", vf); + } + } + + dev_info(dev, + "Disable/re-enable i/f in VM to clear Transparent VLAN tag"); + return 0; +} + +static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; + int status; + + if (!sriov_enabled(adapter)) + return -EPERM; + + if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7) + return -EINVAL; + + if (vlan || qos) { + vlan |= qos << VLAN_PRIO_SHIFT; + status = be_set_vf_tvt(adapter, vf, vlan); + } else { + status = be_clear_vf_tvt(adapter, vf); + } + + if (status) { + dev_err(&adapter->pdev->dev, + "VLAN %d config on VF %d failed : %#x\n", vlan, vf, + status); + return be_cmd_status(status); + } + + vf_cfg->vlan_tag = vlan; + return 0; +} + +static int be_set_vf_tx_rate(struct net_device *netdev, int vf, + int min_tx_rate, int max_tx_rate) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->pdev->dev; + int percent_rate, status = 0; + u16 link_speed = 0; + u8 link_status; + + if (!sriov_enabled(adapter)) + return -EPERM; + + if (vf >= adapter->num_vfs) + return -EINVAL; + + if (min_tx_rate) + return -EINVAL; + + if (!max_tx_rate) + goto config_qos; + + status = be_cmd_link_status_query(adapter, &link_speed, + &link_status, 0); + if (status) + goto err; + + if (!link_status) { + dev_err(dev, "TX-rate setting not allowed when link is down\n"); + status = -ENETDOWN; + goto err; + } + + if (max_tx_rate < 100 || max_tx_rate > link_speed) { + dev_err(dev, "TX-rate must be between 100 and %d Mbps\n", + link_speed); + status = -EINVAL; + goto err; + } + + /* On Skyhawk the QOS setting must be done only as a % value */ + percent_rate = link_speed / 100; + if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) { + dev_err(dev, "TX-rate must be a multiple of %d Mbps\n", + percent_rate); + status = -EINVAL; + goto err; + } + +config_qos: + status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1); + if (status) + goto err; + + adapter->vf_cfg[vf].tx_rate = max_tx_rate; + return 0; + +err: + dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n", + max_tx_rate, vf); + return be_cmd_status(status); +} + +static int be_set_vf_link_state(struct net_device *netdev, int vf, + int link_state) +{ + struct be_adapter *adapter = netdev_priv(netdev); + int status; + + if (!sriov_enabled(adapter)) + return -EPERM; + + if (vf >= adapter->num_vfs) + return -EINVAL; + + status = be_cmd_set_logical_link_config(adapter, link_state, vf+1); + if (status) { + dev_err(&adapter->pdev->dev, + "Link state change on VF %d failed: %#x\n", vf, status); + return be_cmd_status(status); + } + + adapter->vf_cfg[vf].plink_tracking = link_state; + + return 0; +} + +static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts, + ulong now) +{ + aic->rx_pkts_prev = rx_pkts; + aic->tx_reqs_prev = tx_pkts; + aic->jiffies = now; +} + +static void be_eqd_update(struct be_adapter *adapter) +{ + struct be_set_eqd set_eqd[MAX_EVT_QS]; + int eqd, i, num = 0, start; + struct be_aic_obj *aic; + struct be_eq_obj *eqo; + struct be_rx_obj *rxo; + struct be_tx_obj *txo; + u64 rx_pkts, tx_pkts; + ulong now; + u32 pps, delta; + + for_all_evt_queues(adapter, eqo, i) { + aic = &adapter->aic_obj[eqo->idx]; + if (!aic->enable) { + if (aic->jiffies) + aic->jiffies = 0; + eqd = aic->et_eqd; + goto modify_eqd; + } + + rxo = &adapter->rx_obj[eqo->idx]; + do { + start = u64_stats_fetch_begin_irq(&rxo->stats.sync); + rx_pkts = rxo->stats.rx_pkts; + } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start)); + + txo = &adapter->tx_obj[eqo->idx]; + do { + start = u64_stats_fetch_begin_irq(&txo->stats.sync); + tx_pkts = txo->stats.tx_reqs; + } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start)); + + /* Skip, if wrapped around or first calculation */ + now = jiffies; + if (!aic->jiffies || time_before(now, aic->jiffies) || + rx_pkts < aic->rx_pkts_prev || + tx_pkts < aic->tx_reqs_prev) { + be_aic_update(aic, rx_pkts, tx_pkts, now); + continue; + } + + delta = jiffies_to_msecs(now - aic->jiffies); + pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) + + (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta); + eqd = (pps / 15000) << 2; + + if (eqd < 8) + eqd = 0; + eqd = min_t(u32, eqd, aic->max_eqd); + eqd = max_t(u32, eqd, aic->min_eqd); + + be_aic_update(aic, rx_pkts, tx_pkts, now); +modify_eqd: + if (eqd != aic->prev_eqd) { + set_eqd[num].delay_multiplier = (eqd * 65)/100; + set_eqd[num].eq_id = eqo->q.id; + aic->prev_eqd = eqd; + num++; + } + } + + if (num) + be_cmd_modify_eqd(adapter, set_eqd, num); +} + +static void be_rx_stats_update(struct be_rx_obj *rxo, + struct be_rx_compl_info *rxcp) +{ + struct be_rx_stats *stats = rx_stats(rxo); + + u64_stats_update_begin(&stats->sync); + stats->rx_compl++; + stats->rx_bytes += rxcp->pkt_size; + stats->rx_pkts++; + if (rxcp->pkt_type == BE_MULTICAST_PACKET) + stats->rx_mcast_pkts++; + if (rxcp->err) + stats->rx_compl_err++; + u64_stats_update_end(&stats->sync); +} + +static inline bool csum_passed(struct be_rx_compl_info *rxcp) +{ + /* L4 checksum is not reliable for non TCP/UDP packets. + * Also ignore ipcksm for ipv6 pkts + */ + return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum && + (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err; +} + +static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo) +{ + struct be_adapter *adapter = rxo->adapter; + struct be_rx_page_info *rx_page_info; + struct be_queue_info *rxq = &rxo->q; + u16 frag_idx = rxq->tail; + + rx_page_info = &rxo->page_info_tbl[frag_idx]; + BUG_ON(!rx_page_info->page); + + if (rx_page_info->last_frag) { + dma_unmap_page(&adapter->pdev->dev, + dma_unmap_addr(rx_page_info, bus), + adapter->big_page_size, DMA_FROM_DEVICE); + rx_page_info->last_frag = false; + } else { + dma_sync_single_for_cpu(&adapter->pdev->dev, + dma_unmap_addr(rx_page_info, bus), + rx_frag_size, DMA_FROM_DEVICE); + } + + queue_tail_inc(rxq); + atomic_dec(&rxq->used); + return rx_page_info; +} + +/* Throwaway the data in the Rx completion */ +static void be_rx_compl_discard(struct be_rx_obj *rxo, + struct be_rx_compl_info *rxcp) +{ + struct be_rx_page_info *page_info; + u16 i, num_rcvd = rxcp->num_rcvd; + + for (i = 0; i < num_rcvd; i++) { + page_info = get_rx_page_info(rxo); + put_page(page_info->page); + memset(page_info, 0, sizeof(*page_info)); + } +} + +/* + * skb_fill_rx_data forms a complete skb for an ether frame + * indicated by rxcp. + */ +static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb, + struct be_rx_compl_info *rxcp) +{ + struct be_rx_page_info *page_info; + u16 i, j; + u16 hdr_len, curr_frag_len, remaining; + u8 *start; + + page_info = get_rx_page_info(rxo); + start = page_address(page_info->page) + page_info->page_offset; + prefetch(start); + + /* Copy data in the first descriptor of this completion */ + curr_frag_len = min(rxcp->pkt_size, rx_frag_size); + + skb->len = curr_frag_len; + if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */ + memcpy(skb->data, start, curr_frag_len); + /* Complete packet has now been moved to data */ + put_page(page_info->page); + skb->data_len = 0; + skb->tail += curr_frag_len; + } else { + hdr_len = ETH_HLEN; + memcpy(skb->data, start, hdr_len); + skb_shinfo(skb)->nr_frags = 1; + skb_frag_set_page(skb, 0, page_info->page); + skb_shinfo(skb)->frags[0].page_offset = + page_info->page_offset + hdr_len; + skb_frag_size_set(&skb_shinfo(skb)->frags[0], + curr_frag_len - hdr_len); + skb->data_len = curr_frag_len - hdr_len; + skb->truesize += rx_frag_size; + skb->tail += hdr_len; + } + page_info->page = NULL; + + if (rxcp->pkt_size <= rx_frag_size) { + BUG_ON(rxcp->num_rcvd != 1); + return; + } + + /* More frags present for this completion */ + remaining = rxcp->pkt_size - curr_frag_len; + for (i = 1, j = 0; i < rxcp->num_rcvd; i++) { + page_info = get_rx_page_info(rxo); + curr_frag_len = min(remaining, rx_frag_size); + + /* Coalesce all frags from the same physical page in one slot */ + if (page_info->page_offset == 0) { + /* Fresh page */ + j++; + skb_frag_set_page(skb, j, page_info->page); + skb_shinfo(skb)->frags[j].page_offset = + page_info->page_offset; + skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0); + skb_shinfo(skb)->nr_frags++; + } else { + put_page(page_info->page); + } + + skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len); + skb->len += curr_frag_len; + skb->data_len += curr_frag_len; + skb->truesize += rx_frag_size; + remaining -= curr_frag_len; + page_info->page = NULL; + } + BUG_ON(j > MAX_SKB_FRAGS); +} + +/* Process the RX completion indicated by rxcp when GRO is disabled */ +static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi, + struct be_rx_compl_info *rxcp) +{ + struct be_adapter *adapter = rxo->adapter; + struct net_device *netdev = adapter->netdev; + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE); + if (unlikely(!skb)) { + rx_stats(rxo)->rx_drops_no_skbs++; + be_rx_compl_discard(rxo, rxcp); + return; + } + + skb_fill_rx_data(rxo, skb, rxcp); + + if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + + skb->protocol = eth_type_trans(skb, netdev); + skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]); + if (netdev->features & NETIF_F_RXHASH) + skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3); + + skb->csum_level = rxcp->tunneled; + skb_mark_napi_id(skb, napi); + + if (rxcp->vlanf) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag); + + netif_receive_skb(skb); +} + +/* Process the RX completion indicated by rxcp when GRO is enabled */ +static void be_rx_compl_process_gro(struct be_rx_obj *rxo, + struct napi_struct *napi, + struct be_rx_compl_info *rxcp) +{ + struct be_adapter *adapter = rxo->adapter; + struct be_rx_page_info *page_info; + struct sk_buff *skb = NULL; + u16 remaining, curr_frag_len; + u16 i, j; + + skb = napi_get_frags(napi); + if (!skb) { + be_rx_compl_discard(rxo, rxcp); + return; + } + + remaining = rxcp->pkt_size; + for (i = 0, j = -1; i < rxcp->num_rcvd; i++) { + page_info = get_rx_page_info(rxo); + + curr_frag_len = min(remaining, rx_frag_size); + + /* Coalesce all frags from the same physical page in one slot */ + if (i == 0 || page_info->page_offset == 0) { + /* First frag or Fresh page */ + j++; + skb_frag_set_page(skb, j, page_info->page); + skb_shinfo(skb)->frags[j].page_offset = + page_info->page_offset; + skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0); + } else { + put_page(page_info->page); + } + skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len); + skb->truesize += rx_frag_size; + remaining -= curr_frag_len; + memset(page_info, 0, sizeof(*page_info)); + } + BUG_ON(j > MAX_SKB_FRAGS); + + skb_shinfo(skb)->nr_frags = j + 1; + skb->len = rxcp->pkt_size; + skb->data_len = rxcp->pkt_size; + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]); + if (adapter->netdev->features & NETIF_F_RXHASH) + skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3); + + skb->csum_level = rxcp->tunneled; + skb_mark_napi_id(skb, napi); + + if (rxcp->vlanf) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag); + + napi_gro_frags(napi); +} + +static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl, + struct be_rx_compl_info *rxcp) +{ + rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl); + rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl); + rxcp->err = GET_RX_COMPL_V1_BITS(err, compl); + rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl); + rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl); + rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl); + rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl); + rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl); + rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl); + rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl); + rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl); + if (rxcp->vlanf) { + rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl); + rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl); + } + rxcp->port = GET_RX_COMPL_V1_BITS(port, compl); + rxcp->tunneled = + GET_RX_COMPL_V1_BITS(tunneled, compl); +} + +static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl, + struct be_rx_compl_info *rxcp) +{ + rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl); + rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl); + rxcp->err = GET_RX_COMPL_V0_BITS(err, compl); + rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl); + rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl); + rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl); + rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl); + rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl); + rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl); + rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl); + rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl); + if (rxcp->vlanf) { + rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl); + rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl); + } + rxcp->port = GET_RX_COMPL_V0_BITS(port, compl); + rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl); +} + +static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) +{ + struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq); + struct be_rx_compl_info *rxcp = &rxo->rxcp; + struct be_adapter *adapter = rxo->adapter; + + /* For checking the valid bit it is Ok to use either definition as the + * valid bit is at the same position in both v0 and v1 Rx compl */ + if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0) + return NULL; + + rmb(); + be_dws_le_to_cpu(compl, sizeof(*compl)); + + if (adapter->be3_native) + be_parse_rx_compl_v1(compl, rxcp); + else + be_parse_rx_compl_v0(compl, rxcp); + + if (rxcp->ip_frag) + rxcp->l4_csum = 0; + + if (rxcp->vlanf) { + /* In QNQ modes, if qnq bit is not set, then the packet was + * tagged only with the transparent outer vlan-tag and must + * not be treated as a vlan packet by host + */ + if (be_is_qnq_mode(adapter) && !rxcp->qnq) + rxcp->vlanf = 0; + + if (!lancer_chip(adapter)) + rxcp->vlan_tag = swab16(rxcp->vlan_tag); + + if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) && + !test_bit(rxcp->vlan_tag, adapter->vids)) + rxcp->vlanf = 0; + } + + /* As the compl has been parsed, reset it; we wont touch it again */ + compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0; + + queue_tail_inc(&rxo->cq); + return rxcp; +} + +static inline struct page *be_alloc_pages(u32 size, gfp_t gfp) +{ + u32 order = get_order(size); + + if (order > 0) + gfp |= __GFP_COMP; + return alloc_pages(gfp, order); +} + +/* + * Allocate a page, split it to fragments of size rx_frag_size and post as + * receive buffers to BE + */ +static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed) +{ + struct be_adapter *adapter = rxo->adapter; + struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL; + struct be_queue_info *rxq = &rxo->q; + struct page *pagep = NULL; + struct device *dev = &adapter->pdev->dev; + struct be_eth_rx_d *rxd; + u64 page_dmaaddr = 0, frag_dmaaddr; + u32 posted, page_offset = 0, notify = 0; + + page_info = &rxo->page_info_tbl[rxq->head]; + for (posted = 0; posted < frags_needed && !page_info->page; posted++) { + if (!pagep) { + pagep = be_alloc_pages(adapter->big_page_size, gfp); + if (unlikely(!pagep)) { + rx_stats(rxo)->rx_post_fail++; + break; + } + page_dmaaddr = dma_map_page(dev, pagep, 0, + adapter->big_page_size, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, page_dmaaddr)) { + put_page(pagep); + pagep = NULL; + adapter->drv_stats.dma_map_errors++; + break; + } + page_offset = 0; + } else { + get_page(pagep); + page_offset += rx_frag_size; + } + page_info->page_offset = page_offset; + page_info->page = pagep; + + rxd = queue_head_node(rxq); + frag_dmaaddr = page_dmaaddr + page_info->page_offset; + rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); + rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); + + /* Any space left in the current big page for another frag? */ + if ((page_offset + rx_frag_size + rx_frag_size) > + adapter->big_page_size) { + pagep = NULL; + page_info->last_frag = true; + dma_unmap_addr_set(page_info, bus, page_dmaaddr); + } else { + dma_unmap_addr_set(page_info, bus, frag_dmaaddr); + } + + prev_page_info = page_info; + queue_head_inc(rxq); + page_info = &rxo->page_info_tbl[rxq->head]; + } + + /* Mark the last frag of a page when we break out of the above loop + * with no more slots available in the RXQ + */ + if (pagep) { + prev_page_info->last_frag = true; + dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr); + } + + if (posted) { + atomic_add(posted, &rxq->used); + if (rxo->rx_post_starved) + rxo->rx_post_starved = false; + do { + notify = min(MAX_NUM_POST_ERX_DB, posted); + be_rxq_notify(adapter, rxq->id, notify); + posted -= notify; + } while (posted); + } else if (atomic_read(&rxq->used) == 0) { + /* Let be_worker replenish when memory is available */ + rxo->rx_post_starved = true; + } +} + +static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo) +{ + struct be_queue_info *tx_cq = &txo->cq; + struct be_tx_compl_info *txcp = &txo->txcp; + struct be_eth_tx_compl *compl = queue_tail_node(tx_cq); + + if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) + return NULL; + + /* Ensure load ordering of valid bit dword and other dwords below */ + rmb(); + be_dws_le_to_cpu(compl, sizeof(*compl)); + + txcp->status = GET_TX_COMPL_BITS(status, compl); + txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl); + + compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; + queue_tail_inc(tx_cq); + return txcp; +} + +static u16 be_tx_compl_process(struct be_adapter *adapter, + struct be_tx_obj *txo, u16 last_index) +{ + struct sk_buff **sent_skbs = txo->sent_skb_list; + struct be_queue_info *txq = &txo->q; + u16 frag_index, num_wrbs = 0; + struct sk_buff *skb = NULL; + bool unmap_skb_hdr = false; + struct be_eth_wrb *wrb; + + do { + if (sent_skbs[txq->tail]) { + /* Free skb from prev req */ + if (skb) + dev_consume_skb_any(skb); + skb = sent_skbs[txq->tail]; + sent_skbs[txq->tail] = NULL; + queue_tail_inc(txq); /* skip hdr wrb */ + num_wrbs++; + unmap_skb_hdr = true; + } + wrb = queue_tail_node(txq); + frag_index = txq->tail; + unmap_tx_frag(&adapter->pdev->dev, wrb, + (unmap_skb_hdr && skb_headlen(skb))); + unmap_skb_hdr = false; + queue_tail_inc(txq); + num_wrbs++; + } while (frag_index != last_index); + dev_consume_skb_any(skb); + + return num_wrbs; +} + +/* Return the number of events in the event queue */ +static inline int events_get(struct be_eq_obj *eqo) +{ + struct be_eq_entry *eqe; + int num = 0; + + do { + eqe = queue_tail_node(&eqo->q); + if (eqe->evt == 0) + break; + + rmb(); + eqe->evt = 0; + num++; + queue_tail_inc(&eqo->q); + } while (true); + + return num; +} + +/* Leaves the EQ is disarmed state */ +static void be_eq_clean(struct be_eq_obj *eqo) +{ + int num = events_get(eqo); + + be_eq_notify(eqo->adapter, eqo->q.id, false, true, num); +} + +static void be_rx_cq_clean(struct be_rx_obj *rxo) +{ + struct be_rx_page_info *page_info; + struct be_queue_info *rxq = &rxo->q; + struct be_queue_info *rx_cq = &rxo->cq; + struct be_rx_compl_info *rxcp; + struct be_adapter *adapter = rxo->adapter; + int flush_wait = 0; + + /* Consume pending rx completions. + * Wait for the flush completion (identified by zero num_rcvd) + * to arrive. Notify CQ even when there are no more CQ entries + * for HW to flush partially coalesced CQ entries. + * In Lancer, there is no need to wait for flush compl. + */ + for (;;) { + rxcp = be_rx_compl_get(rxo); + if (!rxcp) { + if (lancer_chip(adapter)) + break; + + if (flush_wait++ > 10 || be_hw_error(adapter)) { + dev_warn(&adapter->pdev->dev, + "did not receive flush compl\n"); + break; + } + be_cq_notify(adapter, rx_cq->id, true, 0); + mdelay(1); + } else { + be_rx_compl_discard(rxo, rxcp); + be_cq_notify(adapter, rx_cq->id, false, 1); + if (rxcp->num_rcvd == 0) + break; + } + } + + /* After cleanup, leave the CQ in unarmed state */ + be_cq_notify(adapter, rx_cq->id, false, 0); + + /* Then free posted rx buffers that were not used */ + while (atomic_read(&rxq->used) > 0) { + page_info = get_rx_page_info(rxo); + put_page(page_info->page); + memset(page_info, 0, sizeof(*page_info)); + } + BUG_ON(atomic_read(&rxq->used)); + rxq->tail = 0; + rxq->head = 0; +} + +static void be_tx_compl_clean(struct be_adapter *adapter) +{ + u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0; + struct device *dev = &adapter->pdev->dev; + struct be_tx_compl_info *txcp; + struct be_queue_info *txq; + struct be_tx_obj *txo; + int i, pending_txqs; + + /* Stop polling for compls when HW has been silent for 10ms */ + do { + pending_txqs = adapter->num_tx_qs; + + for_all_tx_queues(adapter, txo, i) { + cmpl = 0; + num_wrbs = 0; + txq = &txo->q; + while ((txcp = be_tx_compl_get(txo))) { + num_wrbs += + be_tx_compl_process(adapter, txo, + txcp->end_index); + cmpl++; + } + if (cmpl) { + be_cq_notify(adapter, txo->cq.id, false, cmpl); + atomic_sub(num_wrbs, &txq->used); + timeo = 0; + } + if (!be_is_tx_compl_pending(txo)) + pending_txqs--; + } + + if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter)) + break; + + mdelay(1); + } while (true); + + /* Free enqueued TX that was never notified to HW */ + for_all_tx_queues(adapter, txo, i) { + txq = &txo->q; + + if (atomic_read(&txq->used)) { + dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n", + i, atomic_read(&txq->used)); + notified_idx = txq->tail; + end_idx = txq->tail; + index_adv(&end_idx, atomic_read(&txq->used) - 1, + txq->len); + /* Use the tx-compl process logic to handle requests + * that were not sent to the HW. + */ + num_wrbs = be_tx_compl_process(adapter, txo, end_idx); + atomic_sub(num_wrbs, &txq->used); + BUG_ON(atomic_read(&txq->used)); + txo->pend_wrb_cnt = 0; + /* Since hw was never notified of these requests, + * reset TXQ indices + */ + txq->head = notified_idx; + txq->tail = notified_idx; + } + } +} + +static void be_evt_queues_destroy(struct be_adapter *adapter) +{ + struct be_eq_obj *eqo; + int i; + + for_all_evt_queues(adapter, eqo, i) { + if (eqo->q.created) { + be_eq_clean(eqo); + be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); + napi_hash_del(&eqo->napi); + netif_napi_del(&eqo->napi); + } + free_cpumask_var(eqo->affinity_mask); + be_queue_free(adapter, &eqo->q); + } +} + +static int be_evt_queues_create(struct be_adapter *adapter) +{ + struct be_queue_info *eq; + struct be_eq_obj *eqo; + struct be_aic_obj *aic; + int i, rc; + + adapter->num_evt_qs = min_t(u16, num_irqs(adapter), + adapter->cfg_num_qs); + + for_all_evt_queues(adapter, eqo, i) { + int numa_node = dev_to_node(&adapter->pdev->dev); + if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) + return -ENOMEM; + cpumask_set_cpu(cpumask_local_spread(i, numa_node), + eqo->affinity_mask); + netif_napi_add(adapter->netdev, &eqo->napi, be_poll, + BE_NAPI_WEIGHT); + napi_hash_add(&eqo->napi); + aic = &adapter->aic_obj[i]; + eqo->adapter = adapter; + eqo->idx = i; + aic->max_eqd = BE_MAX_EQD; + aic->enable = true; + + eq = &eqo->q; + rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN, + sizeof(struct be_eq_entry)); + if (rc) + return rc; + + rc = be_cmd_eq_create(adapter, eqo); + if (rc) + return rc; + } + return 0; +} + +static void be_mcc_queues_destroy(struct be_adapter *adapter) +{ + struct be_queue_info *q; + + q = &adapter->mcc_obj.q; + if (q->created) + be_cmd_q_destroy(adapter, q, QTYPE_MCCQ); + be_queue_free(adapter, q); + + q = &adapter->mcc_obj.cq; + if (q->created) + be_cmd_q_destroy(adapter, q, QTYPE_CQ); + be_queue_free(adapter, q); +} + +/* Must be called only after TX qs are created as MCC shares TX EQ */ +static int be_mcc_queues_create(struct be_adapter *adapter) +{ + struct be_queue_info *q, *cq; + + cq = &adapter->mcc_obj.cq; + if (be_queue_alloc(adapter, cq, MCC_CQ_LEN, + sizeof(struct be_mcc_compl))) + goto err; + + /* Use the default EQ for MCC completions */ + if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0)) + goto mcc_cq_free; + + q = &adapter->mcc_obj.q; + if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) + goto mcc_cq_destroy; + + if (be_cmd_mccq_create(adapter, q, cq)) + goto mcc_q_free; + + return 0; + +mcc_q_free: + be_queue_free(adapter, q); +mcc_cq_destroy: + be_cmd_q_destroy(adapter, cq, QTYPE_CQ); +mcc_cq_free: + be_queue_free(adapter, cq); +err: + return -1; +} + +static void be_tx_queues_destroy(struct be_adapter *adapter) +{ + struct be_queue_info *q; + struct be_tx_obj *txo; + u8 i; + + for_all_tx_queues(adapter, txo, i) { + q = &txo->q; + if (q->created) + be_cmd_q_destroy(adapter, q, QTYPE_TXQ); + be_queue_free(adapter, q); + + q = &txo->cq; + if (q->created) + be_cmd_q_destroy(adapter, q, QTYPE_CQ); + be_queue_free(adapter, q); + } +} + +static int be_tx_qs_create(struct be_adapter *adapter) +{ + struct be_queue_info *cq; + struct be_tx_obj *txo; + struct be_eq_obj *eqo; + int status, i; + + adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter)); + + for_all_tx_queues(adapter, txo, i) { + cq = &txo->cq; + status = be_queue_alloc(adapter, cq, TX_CQ_LEN, + sizeof(struct be_eth_tx_compl)); + if (status) + return status; + + u64_stats_init(&txo->stats.sync); + u64_stats_init(&txo->stats.sync_compl); + + /* If num_evt_qs is less than num_tx_qs, then more than + * one txq share an eq + */ + eqo = &adapter->eq_obj[i % adapter->num_evt_qs]; + status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3); + if (status) + return status; + + status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN, + sizeof(struct be_eth_wrb)); + if (status) + return status; + + status = be_cmd_txq_create(adapter, txo); + if (status) + return status; + + netif_set_xps_queue(adapter->netdev, eqo->affinity_mask, + eqo->idx); + } + + dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n", + adapter->num_tx_qs); + return 0; +} + +static void be_rx_cqs_destroy(struct be_adapter *adapter) +{ + struct be_queue_info *q; + struct be_rx_obj *rxo; + int i; + + for_all_rx_queues(adapter, rxo, i) { + q = &rxo->cq; + if (q->created) + be_cmd_q_destroy(adapter, q, QTYPE_CQ); + be_queue_free(adapter, q); + } +} + +static int be_rx_cqs_create(struct be_adapter *adapter) +{ + struct be_queue_info *eq, *cq; + struct be_rx_obj *rxo; + int rc, i; + + /* We can create as many RSS rings as there are EQs. */ + adapter->num_rss_qs = adapter->num_evt_qs; + + /* We'll use RSS only if atleast 2 RSS rings are supported. */ + if (adapter->num_rss_qs <= 1) + adapter->num_rss_qs = 0; + + adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq; + + /* When the interface is not capable of RSS rings (and there is no + * need to create a default RXQ) we'll still need one RXQ + */ + if (adapter->num_rx_qs == 0) + adapter->num_rx_qs = 1; + + adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; + for_all_rx_queues(adapter, rxo, i) { + rxo->adapter = adapter; + cq = &rxo->cq; + rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, + sizeof(struct be_eth_rx_compl)); + if (rc) + return rc; + + u64_stats_init(&rxo->stats.sync); + eq = &adapter->eq_obj[i % adapter->num_evt_qs].q; + rc = be_cmd_cq_create(adapter, cq, eq, false, 3); + if (rc) + return rc; + } + + dev_info(&adapter->pdev->dev, + "created %d RX queue(s)\n", adapter->num_rx_qs); + return 0; +} + +static irqreturn_t be_intx(int irq, void *dev) +{ + struct be_eq_obj *eqo = dev; + struct be_adapter *adapter = eqo->adapter; + int num_evts = 0; + + /* IRQ is not expected when NAPI is scheduled as the EQ + * will not be armed. + * But, this can happen on Lancer INTx where it takes + * a while to de-assert INTx or in BE2 where occasionaly + * an interrupt may be raised even when EQ is unarmed. + * If NAPI is already scheduled, then counting & notifying + * events will orphan them. + */ + if (napi_schedule_prep(&eqo->napi)) { + num_evts = events_get(eqo); + __napi_schedule(&eqo->napi); + if (num_evts) + eqo->spurious_intr = 0; + } + be_eq_notify(adapter, eqo->q.id, false, true, num_evts); + + /* Return IRQ_HANDLED only for the the first spurious intr + * after a valid intr to stop the kernel from branding + * this irq as a bad one! + */ + if (num_evts || eqo->spurious_intr++ == 0) + return IRQ_HANDLED; + else + return IRQ_NONE; +} + +static irqreturn_t be_msix(int irq, void *dev) +{ + struct be_eq_obj *eqo = dev; + + be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0); + napi_schedule(&eqo->napi); + return IRQ_HANDLED; +} + +static inline bool do_gro(struct be_rx_compl_info *rxcp) +{ + return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false; +} + +static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, + int budget, int polling) +{ + struct be_adapter *adapter = rxo->adapter; + struct be_queue_info *rx_cq = &rxo->cq; + struct be_rx_compl_info *rxcp; + u32 work_done; + u32 frags_consumed = 0; + + for (work_done = 0; work_done < budget; work_done++) { + rxcp = be_rx_compl_get(rxo); + if (!rxcp) + break; + + /* Is it a flush compl that has no data */ + if (unlikely(rxcp->num_rcvd == 0)) + goto loop_continue; + + /* Discard compl with partial DMA Lancer B0 */ + if (unlikely(!rxcp->pkt_size)) { + be_rx_compl_discard(rxo, rxcp); + goto loop_continue; + } + + /* On BE drop pkts that arrive due to imperfect filtering in + * promiscuous mode on some skews + */ + if (unlikely(rxcp->port != adapter->port_num && + !lancer_chip(adapter))) { + be_rx_compl_discard(rxo, rxcp); + goto loop_continue; + } + + /* Don't do gro when we're busy_polling */ + if (do_gro(rxcp) && polling != BUSY_POLLING) + be_rx_compl_process_gro(rxo, napi, rxcp); + else + be_rx_compl_process(rxo, napi, rxcp); + +loop_continue: + frags_consumed += rxcp->num_rcvd; + be_rx_stats_update(rxo, rxcp); + } + + if (work_done) { + be_cq_notify(adapter, rx_cq->id, true, work_done); + + /* When an rx-obj gets into post_starved state, just + * let be_worker do the posting. + */ + if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM && + !rxo->rx_post_starved) + be_post_rx_frags(rxo, GFP_ATOMIC, + max_t(u32, MAX_RX_POST, + frags_consumed)); + } + + return work_done; +} + +static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status) +{ + switch (status) { + case BE_TX_COMP_HDR_PARSE_ERR: + tx_stats(txo)->tx_hdr_parse_err++; + break; + case BE_TX_COMP_NDMA_ERR: + tx_stats(txo)->tx_dma_err++; + break; + case BE_TX_COMP_ACL_ERR: + tx_stats(txo)->tx_spoof_check_err++; + break; + } +} + +static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status) +{ + switch (status) { + case LANCER_TX_COMP_LSO_ERR: + tx_stats(txo)->tx_tso_err++; + break; + case LANCER_TX_COMP_HSW_DROP_MAC_ERR: + case LANCER_TX_COMP_HSW_DROP_VLAN_ERR: + tx_stats(txo)->tx_spoof_check_err++; + break; + case LANCER_TX_COMP_QINQ_ERR: + tx_stats(txo)->tx_qinq_err++; + break; + case LANCER_TX_COMP_PARITY_ERR: + tx_stats(txo)->tx_internal_parity_err++; + break; + case LANCER_TX_COMP_DMA_ERR: + tx_stats(txo)->tx_dma_err++; + break; + } +} + +static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, + int idx) +{ + int num_wrbs = 0, work_done = 0; + struct be_tx_compl_info *txcp; + + while ((txcp = be_tx_compl_get(txo))) { + num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index); + work_done++; + + if (txcp->status) { + if (lancer_chip(adapter)) + lancer_update_tx_err(txo, txcp->status); + else + be_update_tx_err(txo, txcp->status); + } + } + + if (work_done) { + be_cq_notify(adapter, txo->cq.id, true, work_done); + atomic_sub(num_wrbs, &txo->q.used); + + /* As Tx wrbs have been freed up, wake up netdev queue + * if it was stopped due to lack of tx wrbs. */ + if (__netif_subqueue_stopped(adapter->netdev, idx) && + be_can_txq_wake(txo)) { + netif_wake_subqueue(adapter->netdev, idx); + } + + u64_stats_update_begin(&tx_stats(txo)->sync_compl); + tx_stats(txo)->tx_compl += work_done; + u64_stats_update_end(&tx_stats(txo)->sync_compl); + } +} + +#ifdef CONFIG_NET_RX_BUSY_POLL +static inline bool be_lock_napi(struct be_eq_obj *eqo) +{ + bool status = true; + + spin_lock(&eqo->lock); /* BH is already disabled */ + if (eqo->state & BE_EQ_LOCKED) { + WARN_ON(eqo->state & BE_EQ_NAPI); + eqo->state |= BE_EQ_NAPI_YIELD; + status = false; + } else { + eqo->state = BE_EQ_NAPI; + } + spin_unlock(&eqo->lock); + return status; +} + +static inline void be_unlock_napi(struct be_eq_obj *eqo) +{ + spin_lock(&eqo->lock); /* BH is already disabled */ + + WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD)); + eqo->state = BE_EQ_IDLE; + + spin_unlock(&eqo->lock); +} + +static inline bool be_lock_busy_poll(struct be_eq_obj *eqo) +{ + bool status = true; + + spin_lock_bh(&eqo->lock); + if (eqo->state & BE_EQ_LOCKED) { + eqo->state |= BE_EQ_POLL_YIELD; + status = false; + } else { + eqo->state |= BE_EQ_POLL; + } + spin_unlock_bh(&eqo->lock); + return status; +} + +static inline void be_unlock_busy_poll(struct be_eq_obj *eqo) +{ + spin_lock_bh(&eqo->lock); + + WARN_ON(eqo->state & (BE_EQ_NAPI)); + eqo->state = BE_EQ_IDLE; + + spin_unlock_bh(&eqo->lock); +} + +static inline void be_enable_busy_poll(struct be_eq_obj *eqo) +{ + spin_lock_init(&eqo->lock); + eqo->state = BE_EQ_IDLE; +} + +static inline void be_disable_busy_poll(struct be_eq_obj *eqo) +{ + local_bh_disable(); + + /* It's enough to just acquire napi lock on the eqo to stop + * be_busy_poll() from processing any queueus. + */ + while (!be_lock_napi(eqo)) + mdelay(1); + + local_bh_enable(); +} + +#else /* CONFIG_NET_RX_BUSY_POLL */ + +static inline bool be_lock_napi(struct be_eq_obj *eqo) +{ + return true; +} + +static inline void be_unlock_napi(struct be_eq_obj *eqo) +{ +} + +static inline bool be_lock_busy_poll(struct be_eq_obj *eqo) +{ + return false; +} + +static inline void be_unlock_busy_poll(struct be_eq_obj *eqo) +{ +} + +static inline void be_enable_busy_poll(struct be_eq_obj *eqo) +{ +} + +static inline void be_disable_busy_poll(struct be_eq_obj *eqo) +{ +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +int be_poll(struct napi_struct *napi, int budget) +{ + struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi); + struct be_adapter *adapter = eqo->adapter; + int max_work = 0, work, i, num_evts; + struct be_rx_obj *rxo; + struct be_tx_obj *txo; + + num_evts = events_get(eqo); + + for_all_tx_queues_on_eq(adapter, eqo, txo, i) + be_process_tx(adapter, txo, i); + + if (be_lock_napi(eqo)) { + /* This loop will iterate twice for EQ0 in which + * completions of the last RXQ (default one) are also processed + * For other EQs the loop iterates only once + */ + for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { + work = be_process_rx(rxo, napi, budget, NAPI_POLLING); + max_work = max(work, max_work); + } + be_unlock_napi(eqo); + } else { + max_work = budget; + } + + if (is_mcc_eqo(eqo)) + be_process_mcc(adapter); + + if (max_work < budget) { + napi_complete(napi); + be_eq_notify(adapter, eqo->q.id, true, false, num_evts); + } else { + /* As we'll continue in polling mode, count and clear events */ + be_eq_notify(adapter, eqo->q.id, false, false, num_evts); + } + return max_work; +} + +#ifdef CONFIG_NET_RX_BUSY_POLL +static int be_busy_poll(struct napi_struct *napi) +{ + struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi); + struct be_adapter *adapter = eqo->adapter; + struct be_rx_obj *rxo; + int i, work = 0; + + if (!be_lock_busy_poll(eqo)) + return LL_FLUSH_BUSY; + + for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { + work = be_process_rx(rxo, napi, 4, BUSY_POLLING); + if (work) + break; + } + + be_unlock_busy_poll(eqo); + return work; +} +#endif + +void be_detect_error(struct be_adapter *adapter) +{ + u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0; + u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; + u32 i; + bool error_detected = false; + struct device *dev = &adapter->pdev->dev; + struct net_device *netdev = adapter->netdev; + + if (be_hw_error(adapter)) + return; + + if (lancer_chip(adapter)) { + sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); + if (sliport_status & SLIPORT_STATUS_ERR_MASK) { + sliport_err1 = ioread32(adapter->db + + SLIPORT_ERROR1_OFFSET); + sliport_err2 = ioread32(adapter->db + + SLIPORT_ERROR2_OFFSET); + adapter->hw_error = true; + error_detected = true; + /* Do not log error messages if its a FW reset */ + if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && + sliport_err2 == SLIPORT_ERROR_FW_RESET2) { + dev_info(dev, "Firmware update in progress\n"); + } else { + dev_err(dev, "Error detected in the card\n"); + dev_err(dev, "ERR: sliport status 0x%x\n", + sliport_status); + dev_err(dev, "ERR: sliport error1 0x%x\n", + sliport_err1); + dev_err(dev, "ERR: sliport error2 0x%x\n", + sliport_err2); + } + } + } else { + ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW); + ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH); + ue_lo_mask = ioread32(adapter->pcicfg + + PCICFG_UE_STATUS_LOW_MASK); + ue_hi_mask = ioread32(adapter->pcicfg + + PCICFG_UE_STATUS_HI_MASK); + + ue_lo = (ue_lo & ~ue_lo_mask); + ue_hi = (ue_hi & ~ue_hi_mask); + + /* On certain platforms BE hardware can indicate spurious UEs. + * Allow HW to stop working completely in case of a real UE. + * Hence not setting the hw_error for UE detection. + */ + + if (ue_lo || ue_hi) { + error_detected = true; + dev_err(dev, + "Unrecoverable Error detected in the adapter"); + dev_err(dev, "Please reboot server to recover"); + if (skyhawk_chip(adapter)) + adapter->hw_error = true; + for (i = 0; ue_lo; ue_lo >>= 1, i++) { + if (ue_lo & 1) + dev_err(dev, "UE: %s bit set\n", + ue_status_low_desc[i]); + } + for (i = 0; ue_hi; ue_hi >>= 1, i++) { + if (ue_hi & 1) + dev_err(dev, "UE: %s bit set\n", + ue_status_hi_desc[i]); + } + } + } + if (error_detected) + netif_carrier_off(netdev); +} + +static void be_msix_disable(struct be_adapter *adapter) +{ + if (msix_enabled(adapter)) { + pci_disable_msix(adapter->pdev); + adapter->num_msix_vec = 0; + adapter->num_msix_roce_vec = 0; + } +} + +static int be_msix_enable(struct be_adapter *adapter) +{ + int i, num_vec; + struct device *dev = &adapter->pdev->dev; + + /* If RoCE is supported, program the max number of NIC vectors that + * may be configured via set-channels, along with vectors needed for + * RoCe. Else, just program the number we'll use initially. + */ + if (be_roce_supported(adapter)) + num_vec = min_t(int, 2 * be_max_eqs(adapter), + 2 * num_online_cpus()); + else + num_vec = adapter->cfg_num_qs; + + for (i = 0; i < num_vec; i++) + adapter->msix_entries[i].entry = i; + + num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + MIN_MSIX_VECTORS, num_vec); + if (num_vec < 0) + goto fail; + + if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) { + adapter->num_msix_roce_vec = num_vec / 2; + dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n", + adapter->num_msix_roce_vec); + } + + adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec; + + dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n", + adapter->num_msix_vec); + return 0; + +fail: + dev_warn(dev, "MSIx enable failed\n"); + + /* INTx is not supported in VFs, so fail probe if enable_msix fails */ + if (!be_physfn(adapter)) + return num_vec; + return 0; +} + +static inline int be_msix_vec_get(struct be_adapter *adapter, + struct be_eq_obj *eqo) +{ + return adapter->msix_entries[eqo->msix_idx].vector; +} + +static int be_msix_register(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct be_eq_obj *eqo; + int status, i, vec; + + for_all_evt_queues(adapter, eqo, i) { + sprintf(eqo->desc, "%s-q%d", netdev->name, i); + vec = be_msix_vec_get(adapter, eqo); + status = request_irq(vec, be_msix, 0, eqo->desc, eqo); + if (status) + goto err_msix; + + irq_set_affinity_hint(vec, eqo->affinity_mask); + } + + return 0; +err_msix: + for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--) + free_irq(be_msix_vec_get(adapter, eqo), eqo); + dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n", + status); + be_msix_disable(adapter); + return status; +} + +static int be_irq_register(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int status; + + if (msix_enabled(adapter)) { + status = be_msix_register(adapter); + if (status == 0) + goto done; + /* INTx is not supported for VF */ + if (!be_physfn(adapter)) + return status; + } + + /* INTx: only the first EQ is used */ + netdev->irq = adapter->pdev->irq; + status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name, + &adapter->eq_obj[0]); + if (status) { + dev_err(&adapter->pdev->dev, + "INTx request IRQ failed - err %d\n", status); + return status; + } +done: + adapter->isr_registered = true; + return 0; +} + +static void be_irq_unregister(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct be_eq_obj *eqo; + int i, vec; + + if (!adapter->isr_registered) + return; + + /* INTx */ + if (!msix_enabled(adapter)) { + free_irq(netdev->irq, &adapter->eq_obj[0]); + goto done; + } + + /* MSIx */ + for_all_evt_queues(adapter, eqo, i) { + vec = be_msix_vec_get(adapter, eqo); + irq_set_affinity_hint(vec, NULL); + free_irq(vec, eqo); + } + +done: + adapter->isr_registered = false; +} + +static void be_rx_qs_destroy(struct be_adapter *adapter) +{ + struct be_queue_info *q; + struct be_rx_obj *rxo; + int i; + + for_all_rx_queues(adapter, rxo, i) { + q = &rxo->q; + if (q->created) { + be_cmd_rxq_destroy(adapter, q); + be_rx_cq_clean(rxo); + } + be_queue_free(adapter, q); + } +} + +static int be_close(struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_eq_obj *eqo; + int i; + + /* This protection is needed as be_close() may be called even when the + * adapter is in cleared state (after eeh perm failure) + */ + if (!(adapter->flags & BE_FLAGS_SETUP_DONE)) + return 0; + + be_roce_dev_close(adapter); + + if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { + for_all_evt_queues(adapter, eqo, i) { + napi_disable(&eqo->napi); + be_disable_busy_poll(eqo); + } + adapter->flags &= ~BE_FLAGS_NAPI_ENABLED; + } + + be_async_mcc_disable(adapter); + + /* Wait for all pending tx completions to arrive so that + * all tx skbs are freed. + */ + netif_tx_disable(netdev); + be_tx_compl_clean(adapter); + + be_rx_qs_destroy(adapter); + be_clear_uc_list(adapter); + + for_all_evt_queues(adapter, eqo, i) { + if (msix_enabled(adapter)) + synchronize_irq(be_msix_vec_get(adapter, eqo)); + else + synchronize_irq(netdev->irq); + be_eq_clean(eqo); + } + + be_irq_unregister(adapter); + + return 0; +} + +static int be_rx_qs_create(struct be_adapter *adapter) +{ + struct rss_info *rss = &adapter->rss_info; + u8 rss_key[RSS_HASH_KEY_LEN]; + struct be_rx_obj *rxo; + int rc, i, j; + + for_all_rx_queues(adapter, rxo, i) { + rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN, + sizeof(struct be_eth_rx_d)); + if (rc) + return rc; + } + + if (adapter->need_def_rxq || !adapter->num_rss_qs) { + rxo = default_rxo(adapter); + rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, + rx_frag_size, adapter->if_handle, + false, &rxo->rss_id); + if (rc) + return rc; + } + + for_all_rss_queues(adapter, rxo, i) { + rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, + rx_frag_size, adapter->if_handle, + true, &rxo->rss_id); + if (rc) + return rc; + } + + if (be_multi_rxq(adapter)) { + for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) { + for_all_rss_queues(adapter, rxo, i) { + if ((j + i) >= RSS_INDIR_TABLE_LEN) + break; + rss->rsstable[j + i] = rxo->rss_id; + rss->rss_queue[j + i] = i; + } + } + rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 | + RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6; + + if (!BEx_chip(adapter)) + rss->rss_flags |= RSS_ENABLE_UDP_IPV4 | + RSS_ENABLE_UDP_IPV6; + } else { + /* Disable RSS, if only default RX Q is created */ + rss->rss_flags = RSS_ENABLE_NONE; + } + + netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN); + rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, + 128, rss_key); + if (rc) { + rss->rss_flags = RSS_ENABLE_NONE; + return rc; + } + + memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN); + + /* First time posting */ + for_all_rx_queues(adapter, rxo, i) + be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST); + return 0; +} + +static int be_open(struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_eq_obj *eqo; + struct be_rx_obj *rxo; + struct be_tx_obj *txo; + u8 link_status; + int status, i; + + status = be_rx_qs_create(adapter); + if (status) + goto err; + + status = be_irq_register(adapter); + if (status) + goto err; + + for_all_rx_queues(adapter, rxo, i) + be_cq_notify(adapter, rxo->cq.id, true, 0); + + for_all_tx_queues(adapter, txo, i) + be_cq_notify(adapter, txo->cq.id, true, 0); + + be_async_mcc_enable(adapter); + + for_all_evt_queues(adapter, eqo, i) { + napi_enable(&eqo->napi); + be_enable_busy_poll(eqo); + be_eq_notify(adapter, eqo->q.id, true, true, 0); + } + adapter->flags |= BE_FLAGS_NAPI_ENABLED; + + status = be_cmd_link_status_query(adapter, NULL, &link_status, 0); + if (!status) + be_link_status_update(adapter, link_status); + + netif_tx_start_all_queues(netdev); + be_roce_dev_open(adapter); + +#ifdef CONFIG_BE2NET_VXLAN + if (skyhawk_chip(adapter)) + vxlan_get_rx_port(netdev); +#endif + + return 0; +err: + be_close(adapter->netdev); + return -EIO; +} + +static int be_setup_wol(struct be_adapter *adapter, bool enable) +{ + struct be_dma_mem cmd; + int status = 0; + u8 mac[ETH_ALEN]; + + eth_zero_addr(mac); + + cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_KERNEL); + if (!cmd.va) + return -ENOMEM; + + if (enable) { + status = pci_write_config_dword(adapter->pdev, + PCICFG_PM_CONTROL_OFFSET, + PCICFG_PM_CONTROL_MASK); + if (status) { + dev_err(&adapter->pdev->dev, + "Could not enable Wake-on-lan\n"); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, + cmd.dma); + return status; + } + status = be_cmd_enable_magic_wol(adapter, + adapter->netdev->dev_addr, + &cmd); + pci_enable_wake(adapter->pdev, PCI_D3hot, 1); + pci_enable_wake(adapter->pdev, PCI_D3cold, 1); + } else { + status = be_cmd_enable_magic_wol(adapter, mac, &cmd); + pci_enable_wake(adapter->pdev, PCI_D3hot, 0); + pci_enable_wake(adapter->pdev, PCI_D3cold, 0); + } + + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); + return status; +} + +static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac) +{ + u32 addr; + + addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0); + + mac[5] = (u8)(addr & 0xFF); + mac[4] = (u8)((addr >> 8) & 0xFF); + mac[3] = (u8)((addr >> 16) & 0xFF); + /* Use the OUI from the current MAC address */ + memcpy(mac, adapter->netdev->dev_addr, 3); +} + +/* + * Generate a seed MAC address from the PF MAC Address using jhash. + * MAC Address for VFs are assigned incrementally starting from the seed. + * These addresses are programmed in the ASIC by the PF and the VF driver + * queries for the MAC address during its probe. + */ +static int be_vf_eth_addr_config(struct be_adapter *adapter) +{ + u32 vf; + int status = 0; + u8 mac[ETH_ALEN]; + struct be_vf_cfg *vf_cfg; + + be_vf_eth_addr_generate(adapter, mac); + + for_all_vfs(adapter, vf_cfg, vf) { + if (BEx_chip(adapter)) + status = be_cmd_pmac_add(adapter, mac, + vf_cfg->if_handle, + &vf_cfg->pmac_id, vf + 1); + else + status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle, + vf + 1); + + if (status) + dev_err(&adapter->pdev->dev, + "Mac address assignment failed for VF %d\n", + vf); + else + memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); + + mac[5] += 1; + } + return status; +} + +static int be_vfs_mac_query(struct be_adapter *adapter) +{ + int status, vf; + u8 mac[ETH_ALEN]; + struct be_vf_cfg *vf_cfg; + + for_all_vfs(adapter, vf_cfg, vf) { + status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id, + mac, vf_cfg->if_handle, + false, vf+1); + if (status) + return status; + memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); + } + return 0; +} + +static void be_vf_clear(struct be_adapter *adapter) +{ + struct be_vf_cfg *vf_cfg; + u32 vf; + + if (pci_vfs_assigned(adapter->pdev)) { + dev_warn(&adapter->pdev->dev, + "VFs are assigned to VMs: not disabling VFs\n"); + goto done; + } + + pci_disable_sriov(adapter->pdev); + + for_all_vfs(adapter, vf_cfg, vf) { + if (BEx_chip(adapter)) + be_cmd_pmac_del(adapter, vf_cfg->if_handle, + vf_cfg->pmac_id, vf + 1); + else + be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle, + vf + 1); + + be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1); + } +done: + kfree(adapter->vf_cfg); + adapter->num_vfs = 0; + adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED; +} + +static void be_clear_queues(struct be_adapter *adapter) +{ + be_mcc_queues_destroy(adapter); + be_rx_cqs_destroy(adapter); + be_tx_queues_destroy(adapter); + be_evt_queues_destroy(adapter); +} + +static void be_cancel_worker(struct be_adapter *adapter) +{ + if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) { + cancel_delayed_work_sync(&adapter->work); + adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED; + } +} + +static void be_cancel_err_detection(struct be_adapter *adapter) +{ + if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) { + cancel_delayed_work_sync(&adapter->be_err_detection_work); + adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED; + } +} + +static void be_mac_clear(struct be_adapter *adapter) +{ + if (adapter->pmac_id) { + be_cmd_pmac_del(adapter, adapter->if_handle, + adapter->pmac_id[0], 0); + kfree(adapter->pmac_id); + adapter->pmac_id = NULL; + } +} + +#ifdef CONFIG_BE2NET_VXLAN +static void be_disable_vxlan_offloads(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) + be_cmd_manage_iface(adapter, adapter->if_handle, + OP_CONVERT_TUNNEL_TO_NORMAL); + + if (adapter->vxlan_port) + be_cmd_set_vxlan_port(adapter, 0); + + adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS; + adapter->vxlan_port = 0; + + netdev->hw_enc_features = 0; + netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL); + netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL); +} +#endif + +static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs) +{ + struct be_resources res = adapter->pool_res; + u16 num_vf_qs = 1; + + /* Distribute the queue resources equally among the PF and it's VFs + * Do not distribute queue resources in multi-channel configuration. + */ + if (num_vfs && !be_is_mc(adapter)) { + /* If number of VFs requested is 8 less than max supported, + * assign 8 queue pairs to the PF and divide the remaining + * resources evenly among the VFs + */ + if (num_vfs < (be_max_vfs(adapter) - 8)) + num_vf_qs = (res.max_rss_qs - 8) / num_vfs; + else + num_vf_qs = res.max_rss_qs / num_vfs; + + /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable + * interfaces per port. Provide RSS on VFs, only if number + * of VFs requested is less than MAX_RSS_IFACES limit. + */ + if (num_vfs >= MAX_RSS_IFACES) + num_vf_qs = 1; + } + return num_vf_qs; +} + +static int be_clear(struct be_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + u16 num_vf_qs; + + be_cancel_worker(adapter); + + if (sriov_enabled(adapter)) + be_vf_clear(adapter); + + /* Re-configure FW to distribute resources evenly across max-supported + * number of VFs, only when VFs are not already enabled. + */ + if (skyhawk_chip(adapter) && be_physfn(adapter) && + !pci_vfs_assigned(pdev)) { + num_vf_qs = be_calculate_vf_qs(adapter, + pci_sriov_get_totalvfs(pdev)); + be_cmd_set_sriov_config(adapter, adapter->pool_res, + pci_sriov_get_totalvfs(pdev), + num_vf_qs); + } + +#ifdef CONFIG_BE2NET_VXLAN + be_disable_vxlan_offloads(adapter); +#endif + /* delete the primary mac along with the uc-mac list */ + be_mac_clear(adapter); + + be_cmd_if_destroy(adapter, adapter->if_handle, 0); + + be_clear_queues(adapter); + + be_msix_disable(adapter); + adapter->flags &= ~BE_FLAGS_SETUP_DONE; + return 0; +} + +static int be_if_create(struct be_adapter *adapter, u32 *if_handle, + u32 cap_flags, u32 vf) +{ + u32 en_flags; + + en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | + BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | + BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS; + + en_flags &= cap_flags; + + return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf); +} + +static int be_vfs_if_create(struct be_adapter *adapter) +{ + struct be_resources res = {0}; + struct be_vf_cfg *vf_cfg; + u32 cap_flags, vf; + int status; + + /* If a FW profile exists, then cap_flags are updated */ + cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | + BE_IF_FLAGS_MULTICAST; + + for_all_vfs(adapter, vf_cfg, vf) { + if (!BE3_chip(adapter)) { + status = be_cmd_get_profile_config(adapter, &res, + RESOURCE_LIMITS, + vf + 1); + if (!status) { + cap_flags = res.if_cap_flags; + /* Prevent VFs from enabling VLAN promiscuous + * mode + */ + cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS; + } + } + + status = be_if_create(adapter, &vf_cfg->if_handle, + cap_flags, vf + 1); + if (status) + return status; + } + + return 0; +} + +static int be_vf_setup_init(struct be_adapter *adapter) +{ + struct be_vf_cfg *vf_cfg; + int vf; + + adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg), + GFP_KERNEL); + if (!adapter->vf_cfg) + return -ENOMEM; + + for_all_vfs(adapter, vf_cfg, vf) { + vf_cfg->if_handle = -1; + vf_cfg->pmac_id = -1; + } + return 0; +} + +static int be_vf_setup(struct be_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + struct be_vf_cfg *vf_cfg; + int status, old_vfs, vf; + + old_vfs = pci_num_vf(adapter->pdev); + + status = be_vf_setup_init(adapter); + if (status) + goto err; + + if (old_vfs) { + for_all_vfs(adapter, vf_cfg, vf) { + status = be_cmd_get_if_id(adapter, vf_cfg, vf); + if (status) + goto err; + } + + status = be_vfs_mac_query(adapter); + if (status) + goto err; + } else { + status = be_vfs_if_create(adapter); + if (status) + goto err; + + status = be_vf_eth_addr_config(adapter); + if (status) + goto err; + } + + for_all_vfs(adapter, vf_cfg, vf) { + /* Allow VFs to programs MAC/VLAN filters */ + status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges, + vf + 1); + if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) { + status = be_cmd_set_fn_privileges(adapter, + vf_cfg->privileges | + BE_PRIV_FILTMGMT, + vf + 1); + if (!status) { + vf_cfg->privileges |= BE_PRIV_FILTMGMT; + dev_info(dev, "VF%d has FILTMGMT privilege\n", + vf); + } + } + + /* Allow full available bandwidth */ + if (!old_vfs) + be_cmd_config_qos(adapter, 0, 0, vf + 1); + + if (!old_vfs) { + be_cmd_enable_vf(adapter, vf + 1); + be_cmd_set_logical_link_config(adapter, + IFLA_VF_LINK_STATE_AUTO, + vf+1); + } + } + + if (!old_vfs) { + status = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + if (status) { + dev_err(dev, "SRIOV enable failed\n"); + adapter->num_vfs = 0; + goto err; + } + } + + adapter->flags |= BE_FLAGS_SRIOV_ENABLED; + return 0; +err: + dev_err(dev, "VF setup failed\n"); + be_vf_clear(adapter); + return status; +} + +/* Converting function_mode bits on BE3 to SH mc_type enums */ + +static u8 be_convert_mc_type(u32 function_mode) +{ + if (function_mode & VNIC_MODE && function_mode & QNQ_MODE) + return vNIC1; + else if (function_mode & QNQ_MODE) + return FLEX10; + else if (function_mode & VNIC_MODE) + return vNIC2; + else if (function_mode & UMC_ENABLED) + return UMC; + else + return MC_NONE; +} + +/* On BE2/BE3 FW does not suggest the supported limits */ +static void BEx_get_resources(struct be_adapter *adapter, + struct be_resources *res) +{ + bool use_sriov = adapter->num_vfs ? 1 : 0; + + if (be_physfn(adapter)) + res->max_uc_mac = BE_UC_PMAC_COUNT; + else + res->max_uc_mac = BE_VF_UC_PMAC_COUNT; + + adapter->mc_type = be_convert_mc_type(adapter->function_mode); + + if (be_is_mc(adapter)) { + /* Assuming that there are 4 channels per port, + * when multi-channel is enabled + */ + if (be_is_qnq_mode(adapter)) + res->max_vlans = BE_NUM_VLANS_SUPPORTED/8; + else + /* In a non-qnq multichannel mode, the pvid + * takes up one vlan entry + */ + res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1; + } else { + res->max_vlans = BE_NUM_VLANS_SUPPORTED; + } + + res->max_mcast_mac = BE_MAX_MC; + + /* 1) For BE3 1Gb ports, FW does not support multiple TXQs + * 2) Create multiple TX rings on a BE3-R multi-channel interface + * *only* if it is RSS-capable. + */ + if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) || + !be_physfn(adapter) || (be_is_mc(adapter) && + !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) { + res->max_tx_qs = 1; + } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) { + struct be_resources super_nic_res = {0}; + + /* On a SuperNIC profile, the driver needs to use the + * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits + */ + be_cmd_get_profile_config(adapter, &super_nic_res, + RESOURCE_LIMITS, 0); + /* Some old versions of BE3 FW don't report max_tx_qs value */ + res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS; + } else { + res->max_tx_qs = BE3_MAX_TX_QS; + } + + if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && + !use_sriov && be_physfn(adapter)) + res->max_rss_qs = (adapter->be3_native) ? + BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; + res->max_rx_qs = res->max_rss_qs + 1; + + if (be_physfn(adapter)) + res->max_evt_qs = (be_max_vfs(adapter) > 0) ? + BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS; + else + res->max_evt_qs = 1; + + res->if_cap_flags = BE_IF_CAP_FLAGS_WANT; + res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS; + if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS)) + res->if_cap_flags &= ~BE_IF_FLAGS_RSS; +} + +static void be_setup_init(struct be_adapter *adapter) +{ + adapter->vlan_prio_bmap = 0xff; + adapter->phy.link_speed = -1; + adapter->if_handle = -1; + adapter->be3_native = false; + adapter->if_flags = 0; + if (be_physfn(adapter)) + adapter->cmd_privileges = MAX_PRIVILEGES; + else + adapter->cmd_privileges = MIN_PRIVILEGES; +} + +static int be_get_sriov_config(struct be_adapter *adapter) +{ + struct be_resources res = {0}; + int max_vfs, old_vfs; + + be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0); + + /* Some old versions of BE3 FW don't report max_vfs value */ + if (BE3_chip(adapter) && !res.max_vfs) { + max_vfs = pci_sriov_get_totalvfs(adapter->pdev); + res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; + } + + adapter->pool_res = res; + + /* If during previous unload of the driver, the VFs were not disabled, + * then we cannot rely on the PF POOL limits for the TotalVFs value. + * Instead use the TotalVFs value stored in the pci-dev struct. + */ + old_vfs = pci_num_vf(adapter->pdev); + if (old_vfs) { + dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n", + old_vfs); + + adapter->pool_res.max_vfs = + pci_sriov_get_totalvfs(adapter->pdev); + adapter->num_vfs = old_vfs; + } + + return 0; +} + +static void be_alloc_sriov_res(struct be_adapter *adapter) +{ + int old_vfs = pci_num_vf(adapter->pdev); + u16 num_vf_qs; + int status; + + be_get_sriov_config(adapter); + + if (!old_vfs) + pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter)); + + /* When the HW is in SRIOV capable configuration, the PF-pool + * resources are given to PF during driver load, if there are no + * old VFs. This facility is not available in BE3 FW. + * Also, this is done by FW in Lancer chip. + */ + if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) { + num_vf_qs = be_calculate_vf_qs(adapter, 0); + status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0, + num_vf_qs); + if (status) + dev_err(&adapter->pdev->dev, + "Failed to optimize SRIOV resources\n"); + } +} + +static int be_get_resources(struct be_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + struct be_resources res = {0}; + int status; + + if (BEx_chip(adapter)) { + BEx_get_resources(adapter, &res); + adapter->res = res; + } + + /* For Lancer, SH etc read per-function resource limits from FW. + * GET_FUNC_CONFIG returns per function guaranteed limits. + * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits + */ + if (!BEx_chip(adapter)) { + status = be_cmd_get_func_config(adapter, &res); + if (status) + return status; + + /* If a deafault RXQ must be created, we'll use up one RSSQ*/ + if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs && + !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)) + res.max_rss_qs -= 1; + + /* If RoCE may be enabled stash away half the EQs for RoCE */ + if (be_roce_supported(adapter)) + res.max_evt_qs /= 2; + adapter->res = res; + } + + /* If FW supports RSS default queue, then skip creating non-RSS + * queue for non-IP traffic. + */ + adapter->need_def_rxq = (be_if_cap_flags(adapter) & + BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1; + + dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n", + be_max_txqs(adapter), be_max_rxqs(adapter), + be_max_rss(adapter), be_max_eqs(adapter), + be_max_vfs(adapter)); + dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n", + be_max_uc(adapter), be_max_mc(adapter), + be_max_vlans(adapter)); + + /* Sanitize cfg_num_qs based on HW and platform limits */ + adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(), + be_max_qs(adapter)); + return 0; +} + +static int be_get_config(struct be_adapter *adapter) +{ + int status, level; + u16 profile_id; + + status = be_cmd_get_cntl_attributes(adapter); + if (status) + return status; + + status = be_cmd_query_fw_cfg(adapter); + if (status) + return status; + + if (BEx_chip(adapter)) { + level = be_cmd_get_fw_log_level(adapter); + adapter->msg_enable = + level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; + } + + be_cmd_get_acpi_wol_cap(adapter); + + be_cmd_query_port_name(adapter); + + if (be_physfn(adapter)) { + status = be_cmd_get_active_profile(adapter, &profile_id); + if (!status) + dev_info(&adapter->pdev->dev, + "Using profile 0x%x\n", profile_id); + } + + status = be_get_resources(adapter); + if (status) + return status; + + adapter->pmac_id = kcalloc(be_max_uc(adapter), + sizeof(*adapter->pmac_id), GFP_KERNEL); + if (!adapter->pmac_id) + return -ENOMEM; + + return 0; +} + +static int be_mac_setup(struct be_adapter *adapter) +{ + u8 mac[ETH_ALEN]; + int status; + + if (is_zero_ether_addr(adapter->netdev->dev_addr)) { + status = be_cmd_get_perm_mac(adapter, mac); + if (status) + return status; + + memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); + memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); + } else { + /* Maybe the HW was reset; dev_addr must be re-programmed */ + memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); + } + + /* For BE3-R VFs, the PF programs the initial MAC address */ + if (!(BEx_chip(adapter) && be_virtfn(adapter))) + be_cmd_pmac_add(adapter, mac, adapter->if_handle, + &adapter->pmac_id[0], 0); + return 0; +} + +static void be_schedule_worker(struct be_adapter *adapter) +{ + schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); + adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; +} + +static void be_schedule_err_detection(struct be_adapter *adapter) +{ + schedule_delayed_work(&adapter->be_err_detection_work, + msecs_to_jiffies(1000)); + adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED; +} + +static int be_setup_queues(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int status; + + status = be_evt_queues_create(adapter); + if (status) + goto err; + + status = be_tx_qs_create(adapter); + if (status) + goto err; + + status = be_rx_cqs_create(adapter); + if (status) + goto err; + + status = be_mcc_queues_create(adapter); + if (status) + goto err; + + status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs); + if (status) + goto err; + + status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs); + if (status) + goto err; + + return 0; +err: + dev_err(&adapter->pdev->dev, "queue_setup failed\n"); + return status; +} + +int be_update_queues(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int status; + + if (netif_running(netdev)) + be_close(netdev); + + be_cancel_worker(adapter); + + /* If any vectors have been shared with RoCE we cannot re-program + * the MSIx table. + */ + if (!adapter->num_msix_roce_vec) + be_msix_disable(adapter); + + be_clear_queues(adapter); + + if (!msix_enabled(adapter)) { + status = be_msix_enable(adapter); + if (status) + return status; + } + + status = be_setup_queues(adapter); + if (status) + return status; + + be_schedule_worker(adapter); + + if (netif_running(netdev)) + status = be_open(netdev); + + return status; +} + +static inline int fw_major_num(const char *fw_ver) +{ + int fw_major = 0, i; + + i = sscanf(fw_ver, "%d.", &fw_major); + if (i != 1) + return 0; + + return fw_major; +} + +/* If any VFs are already enabled don't FLR the PF */ +static bool be_reset_required(struct be_adapter *adapter) +{ + return pci_num_vf(adapter->pdev) ? false : true; +} + +/* Wait for the FW to be ready and perform the required initialization */ +static int be_func_init(struct be_adapter *adapter) +{ + int status; + + status = be_fw_wait_ready(adapter); + if (status) + return status; + + if (be_reset_required(adapter)) { + status = be_cmd_reset_function(adapter); + if (status) + return status; + + /* Wait for interrupts to quiesce after an FLR */ + msleep(100); + + /* We can clear all errors when function reset succeeds */ + be_clear_all_error(adapter); + } + + /* Tell FW we're ready to fire cmds */ + status = be_cmd_fw_init(adapter); + if (status) + return status; + + /* Allow interrupts for other ULPs running on NIC function */ + be_intr_set(adapter, true); + + return 0; +} + +static int be_setup(struct be_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + int status; + + status = be_func_init(adapter); + if (status) + return status; + + be_setup_init(adapter); + + if (!lancer_chip(adapter)) + be_cmd_req_native_mode(adapter); + + if (!BE2_chip(adapter) && be_physfn(adapter)) + be_alloc_sriov_res(adapter); + + status = be_get_config(adapter); + if (status) + goto err; + + status = be_msix_enable(adapter); + if (status) + goto err; + + status = be_if_create(adapter, &adapter->if_handle, + be_if_cap_flags(adapter), 0); + if (status) + goto err; + + /* Updating real_num_tx/rx_queues() requires rtnl_lock() */ + rtnl_lock(); + status = be_setup_queues(adapter); + rtnl_unlock(); + if (status) + goto err; + + be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0); + + status = be_mac_setup(adapter); + if (status) + goto err; + + be_cmd_get_fw_ver(adapter); + dev_info(dev, "FW version is %s\n", adapter->fw_ver); + + if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) { + dev_err(dev, "Firmware on card is old(%s), IRQs may not work", + adapter->fw_ver); + dev_err(dev, "Please upgrade firmware to version >= 4.0\n"); + } + + if (adapter->vlans_added) + be_vid_config(adapter); + + be_set_rx_mode(adapter->netdev); + + status = be_cmd_set_flow_control(adapter, adapter->tx_fc, + adapter->rx_fc); + if (status) + be_cmd_get_flow_control(adapter, &adapter->tx_fc, + &adapter->rx_fc); + + dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n", + adapter->tx_fc, adapter->rx_fc); + + if (be_physfn(adapter)) + be_cmd_set_logical_link_config(adapter, + IFLA_VF_LINK_STATE_AUTO, 0); + + if (adapter->num_vfs) + be_vf_setup(adapter); + + status = be_cmd_get_phy_info(adapter); + if (!status && be_pause_supported(adapter)) + adapter->phy.fc_autoneg = 1; + + be_schedule_worker(adapter); + adapter->flags |= BE_FLAGS_SETUP_DONE; + return 0; +err: + be_clear(adapter); + return status; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void be_netpoll(struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_eq_obj *eqo; + int i; + + for_all_evt_queues(adapter, eqo, i) { + be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0); + napi_schedule(&eqo->napi); + } +} +#endif + +static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "}; + +static bool phy_flashing_required(struct be_adapter *adapter) +{ + return (adapter->phy.phy_type == PHY_TYPE_TN_8022 && + adapter->phy.interface_type == PHY_TYPE_BASET_10GB); +} + +static bool is_comp_in_ufi(struct be_adapter *adapter, + struct flash_section_info *fsec, int type) +{ + int i = 0, img_type = 0; + struct flash_section_info_g2 *fsec_g2 = NULL; + + if (BE2_chip(adapter)) + fsec_g2 = (struct flash_section_info_g2 *)fsec; + + for (i = 0; i < MAX_FLASH_COMP; i++) { + if (fsec_g2) + img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type); + else + img_type = le32_to_cpu(fsec->fsec_entry[i].type); + + if (img_type == type) + return true; + } + return false; + +} + +static struct flash_section_info *get_fsec_info(struct be_adapter *adapter, + int header_size, + const struct firmware *fw) +{ + struct flash_section_info *fsec = NULL; + const u8 *p = fw->data; + + p += header_size; + while (p < (fw->data + fw->size)) { + fsec = (struct flash_section_info *)p; + if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) + return fsec; + p += 32; + } + return NULL; +} + +static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p, + u32 img_offset, u32 img_size, int hdr_size, + u16 img_optype, bool *crc_match) +{ + u32 crc_offset; + int status; + u8 crc[4]; + + status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset, + img_size - 4); + if (status) + return status; + + crc_offset = hdr_size + img_offset + img_size - 4; + + /* Skip flashing, if crc of flashed region matches */ + if (!memcmp(crc, p + crc_offset, 4)) + *crc_match = true; + else + *crc_match = false; + + return status; +} + +static int be_flash(struct be_adapter *adapter, const u8 *img, + struct be_dma_mem *flash_cmd, int optype, int img_size, + u32 img_offset) +{ + u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0; + struct be_cmd_write_flashrom *req = flash_cmd->va; + int status; + + while (total_bytes) { + num_bytes = min_t(u32, 32*1024, total_bytes); + + total_bytes -= num_bytes; + + if (!total_bytes) { + if (optype == OPTYPE_PHY_FW) + flash_op = FLASHROM_OPER_PHY_FLASH; + else + flash_op = FLASHROM_OPER_FLASH; + } else { + if (optype == OPTYPE_PHY_FW) + flash_op = FLASHROM_OPER_PHY_SAVE; + else + flash_op = FLASHROM_OPER_SAVE; + } + + memcpy(req->data_buf, img, num_bytes); + img += num_bytes; + status = be_cmd_write_flashrom(adapter, flash_cmd, optype, + flash_op, img_offset + + bytes_sent, num_bytes); + if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST && + optype == OPTYPE_PHY_FW) + break; + else if (status) + return status; + + bytes_sent += num_bytes; + } + return 0; +} + +/* For BE2, BE3 and BE3-R */ +static int be_flash_BEx(struct be_adapter *adapter, + const struct firmware *fw, + struct be_dma_mem *flash_cmd, int num_of_images) +{ + int img_hdrs_size = (num_of_images * sizeof(struct image_hdr)); + struct device *dev = &adapter->pdev->dev; + struct flash_section_info *fsec = NULL; + int status, i, filehdr_size, num_comp; + const struct flash_comp *pflashcomp; + bool crc_match; + const u8 *p; + + struct flash_comp gen3_flash_types[] = { + { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE, + FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI}, + { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT, + FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE}, + { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS, + FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI}, + { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS, + FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE}, + { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS, + FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE}, + { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP, + FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI}, + { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE, + FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE}, + { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP, + FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE}, + { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW, + FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI}, + { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW, + FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY} + }; + + struct flash_comp gen2_flash_types[] = { + { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE, + FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI}, + { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT, + FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE}, + { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS, + FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI}, + { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS, + FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE}, + { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS, + FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE}, + { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP, + FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI}, + { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE, + FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE}, + { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP, + FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE} + }; + + if (BE3_chip(adapter)) { + pflashcomp = gen3_flash_types; + filehdr_size = sizeof(struct flash_file_hdr_g3); + num_comp = ARRAY_SIZE(gen3_flash_types); + } else { + pflashcomp = gen2_flash_types; + filehdr_size = sizeof(struct flash_file_hdr_g2); + num_comp = ARRAY_SIZE(gen2_flash_types); + img_hdrs_size = 0; + } + + /* Get flash section info*/ + fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); + if (!fsec) { + dev_err(dev, "Invalid Cookie. FW image may be corrupted\n"); + return -1; + } + for (i = 0; i < num_comp; i++) { + if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type)) + continue; + + if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) && + memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0) + continue; + + if (pflashcomp[i].optype == OPTYPE_PHY_FW && + !phy_flashing_required(adapter)) + continue; + + if (pflashcomp[i].optype == OPTYPE_REDBOOT) { + status = be_check_flash_crc(adapter, fw->data, + pflashcomp[i].offset, + pflashcomp[i].size, + filehdr_size + + img_hdrs_size, + OPTYPE_REDBOOT, &crc_match); + if (status) { + dev_err(dev, + "Could not get CRC for 0x%x region\n", + pflashcomp[i].optype); + continue; + } + + if (crc_match) + continue; + } + + p = fw->data + filehdr_size + pflashcomp[i].offset + + img_hdrs_size; + if (p + pflashcomp[i].size > fw->data + fw->size) + return -1; + + status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype, + pflashcomp[i].size, 0); + if (status) { + dev_err(dev, "Flashing section type 0x%x failed\n", + pflashcomp[i].img_type); + return status; + } + } + return 0; +} + +static u16 be_get_img_optype(struct flash_section_entry fsec_entry) +{ + u32 img_type = le32_to_cpu(fsec_entry.type); + u16 img_optype = le16_to_cpu(fsec_entry.optype); + + if (img_optype != 0xFFFF) + return img_optype; + + switch (img_type) { + case IMAGE_FIRMWARE_iSCSI: + img_optype = OPTYPE_ISCSI_ACTIVE; + break; + case IMAGE_BOOT_CODE: + img_optype = OPTYPE_REDBOOT; + break; + case IMAGE_OPTION_ROM_ISCSI: + img_optype = OPTYPE_BIOS; + break; + case IMAGE_OPTION_ROM_PXE: + img_optype = OPTYPE_PXE_BIOS; + break; + case IMAGE_OPTION_ROM_FCoE: + img_optype = OPTYPE_FCOE_BIOS; + break; + case IMAGE_FIRMWARE_BACKUP_iSCSI: + img_optype = OPTYPE_ISCSI_BACKUP; + break; + case IMAGE_NCSI: + img_optype = OPTYPE_NCSI_FW; + break; + case IMAGE_FLASHISM_JUMPVECTOR: + img_optype = OPTYPE_FLASHISM_JUMPVECTOR; + break; + case IMAGE_FIRMWARE_PHY: + img_optype = OPTYPE_SH_PHY_FW; + break; + case IMAGE_REDBOOT_DIR: + img_optype = OPTYPE_REDBOOT_DIR; + break; + case IMAGE_REDBOOT_CONFIG: + img_optype = OPTYPE_REDBOOT_CONFIG; + break; + case IMAGE_UFI_DIR: + img_optype = OPTYPE_UFI_DIR; + break; + default: + break; + } + + return img_optype; +} + +static int be_flash_skyhawk(struct be_adapter *adapter, + const struct firmware *fw, + struct be_dma_mem *flash_cmd, int num_of_images) +{ + int img_hdrs_size = num_of_images * sizeof(struct image_hdr); + bool crc_match, old_fw_img, flash_offset_support = true; + struct device *dev = &adapter->pdev->dev; + struct flash_section_info *fsec = NULL; + u32 img_offset, img_size, img_type; + u16 img_optype, flash_optype; + int status, i, filehdr_size; + const u8 *p; + + filehdr_size = sizeof(struct flash_file_hdr_g3); + fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); + if (!fsec) { + dev_err(dev, "Invalid Cookie. FW image may be corrupted\n"); + return -EINVAL; + } + +retry_flash: + for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) { + img_offset = le32_to_cpu(fsec->fsec_entry[i].offset); + img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size); + img_type = le32_to_cpu(fsec->fsec_entry[i].type); + img_optype = be_get_img_optype(fsec->fsec_entry[i]); + old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF; + + if (img_optype == 0xFFFF) + continue; + + if (flash_offset_support) + flash_optype = OPTYPE_OFFSET_SPECIFIED; + else + flash_optype = img_optype; + + /* Don't bother verifying CRC if an old FW image is being + * flashed + */ + if (old_fw_img) + goto flash; + + status = be_check_flash_crc(adapter, fw->data, img_offset, + img_size, filehdr_size + + img_hdrs_size, flash_optype, + &crc_match); + if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST || + base_status(status) == MCC_STATUS_ILLEGAL_FIELD) { + /* The current FW image on the card does not support + * OFFSET based flashing. Retry using older mechanism + * of OPTYPE based flashing + */ + if (flash_optype == OPTYPE_OFFSET_SPECIFIED) { + flash_offset_support = false; + goto retry_flash; + } + + /* The current FW image on the card does not recognize + * the new FLASH op_type. The FW download is partially + * complete. Reboot the server now to enable FW image + * to recognize the new FLASH op_type. To complete the + * remaining process, download the same FW again after + * the reboot. + */ + dev_err(dev, "Flash incomplete. Reset the server\n"); + dev_err(dev, "Download FW image again after reset\n"); + return -EAGAIN; + } else if (status) { + dev_err(dev, "Could not get CRC for 0x%x region\n", + img_optype); + return -EFAULT; + } + + if (crc_match) + continue; + +flash: + p = fw->data + filehdr_size + img_offset + img_hdrs_size; + if (p + img_size > fw->data + fw->size) + return -1; + + status = be_flash(adapter, p, flash_cmd, flash_optype, img_size, + img_offset); + + /* The current FW image on the card does not support OFFSET + * based flashing. Retry using older mechanism of OPTYPE based + * flashing + */ + if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD && + flash_optype == OPTYPE_OFFSET_SPECIFIED) { + flash_offset_support = false; + goto retry_flash; + } + + /* For old FW images ignore ILLEGAL_FIELD error or errors on + * UFI_DIR region + */ + if (old_fw_img && + (base_status(status) == MCC_STATUS_ILLEGAL_FIELD || + (img_optype == OPTYPE_UFI_DIR && + base_status(status) == MCC_STATUS_FAILED))) { + continue; + } else if (status) { + dev_err(dev, "Flashing section type 0x%x failed\n", + img_type); + return -EFAULT; + } + } + return 0; +} + +static int lancer_fw_download(struct be_adapter *adapter, + const struct firmware *fw) +{ +#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024) +#define LANCER_FW_DOWNLOAD_LOCATION "/prg" + struct device *dev = &adapter->pdev->dev; + struct be_dma_mem flash_cmd; + const u8 *data_ptr = NULL; + u8 *dest_image_ptr = NULL; + size_t image_size = 0; + u32 chunk_size = 0; + u32 data_written = 0; + u32 offset = 0; + int status = 0; + u8 add_status = 0; + u8 change_status; + + if (!IS_ALIGNED(fw->size, sizeof(u32))) { + dev_err(dev, "FW image size should be multiple of 4\n"); + return -EINVAL; + } + + flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) + + LANCER_FW_DOWNLOAD_CHUNK; + flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, + &flash_cmd.dma, GFP_KERNEL); + if (!flash_cmd.va) + return -ENOMEM; + + dest_image_ptr = flash_cmd.va + + sizeof(struct lancer_cmd_req_write_object); + image_size = fw->size; + data_ptr = fw->data; + + while (image_size) { + chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK); + + /* Copy the image chunk content. */ + memcpy(dest_image_ptr, data_ptr, chunk_size); + + status = lancer_cmd_write_object(adapter, &flash_cmd, + chunk_size, offset, + LANCER_FW_DOWNLOAD_LOCATION, + &data_written, &change_status, + &add_status); + if (status) + break; + + offset += data_written; + data_ptr += data_written; + image_size -= data_written; + } + + if (!status) { + /* Commit the FW written */ + status = lancer_cmd_write_object(adapter, &flash_cmd, + 0, offset, + LANCER_FW_DOWNLOAD_LOCATION, + &data_written, &change_status, + &add_status); + } + + dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma); + if (status) { + dev_err(dev, "Firmware load error\n"); + return be_cmd_status(status); + } + + dev_info(dev, "Firmware flashed successfully\n"); + + if (change_status == LANCER_FW_RESET_NEEDED) { + dev_info(dev, "Resetting adapter to activate new FW\n"); + status = lancer_physdev_ctrl(adapter, + PHYSDEV_CONTROL_FW_RESET_MASK); + if (status) { + dev_err(dev, "Adapter busy, could not reset FW\n"); + dev_err(dev, "Reboot server to activate new FW\n"); + } + } else if (change_status != LANCER_NO_RESET_NEEDED) { + dev_info(dev, "Reboot server to activate new FW\n"); + } + + return 0; +} + +#define BE2_UFI 2 +#define BE3_UFI 3 +#define BE3R_UFI 10 +#define SH_UFI 4 +#define SH_P2_UFI 11 + +static int be_get_ufi_type(struct be_adapter *adapter, + struct flash_file_hdr_g3 *fhdr) +{ + if (!fhdr) { + dev_err(&adapter->pdev->dev, "Invalid FW UFI file"); + return -1; + } + + /* First letter of the build version is used to identify + * which chip this image file is meant for. + */ + switch (fhdr->build[0]) { + case BLD_STR_UFI_TYPE_SH: + return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI : + SH_UFI; + case BLD_STR_UFI_TYPE_BE3: + return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI : + BE3_UFI; + case BLD_STR_UFI_TYPE_BE2: + return BE2_UFI; + default: + return -1; + } +} + +/* Check if the flash image file is compatible with the adapter that + * is being flashed. + * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type. + * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type. + */ +static bool be_check_ufi_compatibility(struct be_adapter *adapter, + struct flash_file_hdr_g3 *fhdr) +{ + int ufi_type = be_get_ufi_type(adapter, fhdr); + + switch (ufi_type) { + case SH_P2_UFI: + return skyhawk_chip(adapter); + case SH_UFI: + return (skyhawk_chip(adapter) && + adapter->asic_rev < ASIC_REV_P2); + case BE3R_UFI: + return BE3_chip(adapter); + case BE3_UFI: + return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0); + case BE2_UFI: + return BE2_chip(adapter); + default: + return false; + } +} + +static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) +{ + struct device *dev = &adapter->pdev->dev; + struct flash_file_hdr_g3 *fhdr3; + struct image_hdr *img_hdr_ptr; + int status = 0, i, num_imgs; + struct be_dma_mem flash_cmd; + + fhdr3 = (struct flash_file_hdr_g3 *)fw->data; + if (!be_check_ufi_compatibility(adapter, fhdr3)) { + dev_err(dev, "Flash image is not compatible with adapter\n"); + return -EINVAL; + } + + flash_cmd.size = sizeof(struct be_cmd_write_flashrom); + flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, + GFP_KERNEL); + if (!flash_cmd.va) + return -ENOMEM; + + num_imgs = le32_to_cpu(fhdr3->num_imgs); + for (i = 0; i < num_imgs; i++) { + img_hdr_ptr = (struct image_hdr *)(fw->data + + (sizeof(struct flash_file_hdr_g3) + + i * sizeof(struct image_hdr))); + if (!BE2_chip(adapter) && + le32_to_cpu(img_hdr_ptr->imageid) != 1) + continue; + + if (skyhawk_chip(adapter)) + status = be_flash_skyhawk(adapter, fw, &flash_cmd, + num_imgs); + else + status = be_flash_BEx(adapter, fw, &flash_cmd, + num_imgs); + } + + dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma); + if (!status) + dev_info(dev, "Firmware flashed successfully\n"); + + return status; +} + +int be_load_fw(struct be_adapter *adapter, u8 *fw_file) +{ + const struct firmware *fw; + int status; + + if (!netif_running(adapter->netdev)) { + dev_err(&adapter->pdev->dev, + "Firmware load not allowed (interface is down)\n"); + return -ENETDOWN; + } + + status = request_firmware(&fw, fw_file, &adapter->pdev->dev); + if (status) + goto fw_exit; + + dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file); + + if (lancer_chip(adapter)) + status = lancer_fw_download(adapter, fw); + else + status = be_fw_download(adapter, fw); + + if (!status) + be_cmd_get_fw_ver(adapter); + +fw_exit: + release_firmware(fw); + return status; +} + +static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + u16 flags) +{ + struct be_adapter *adapter = netdev_priv(dev); + struct nlattr *attr, *br_spec; + int rem; + int status = 0; + u16 mode = 0; + + if (!sriov_enabled(adapter)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; + + nla_for_each_nested(attr, br_spec, rem) { + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + if (nla_len(attr) < sizeof(mode)) + return -EINVAL; + + mode = nla_get_u16(attr); + if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) + return -EINVAL; + + status = be_cmd_set_hsw_config(adapter, 0, 0, + adapter->if_handle, + mode == BRIDGE_MODE_VEPA ? + PORT_FWD_TYPE_VEPA : + PORT_FWD_TYPE_VEB); + if (status) + goto err; + + dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + + return status; + } +err: + dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + + return status; +} + +static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u32 filter_mask, + int nlflags) +{ + struct be_adapter *adapter = netdev_priv(dev); + int status = 0; + u8 hsw_mode; + + if (!sriov_enabled(adapter)) + return 0; + + /* BE and Lancer chips support VEB mode only */ + if (BEx_chip(adapter) || lancer_chip(adapter)) { + hsw_mode = PORT_FWD_TYPE_VEB; + } else { + status = be_cmd_get_hsw_config(adapter, NULL, 0, + adapter->if_handle, &hsw_mode); + if (status) + return 0; + } + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, + hsw_mode == PORT_FWD_TYPE_VEPA ? + BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB, + 0, 0, nlflags); +} + +#ifdef CONFIG_BE2NET_VXLAN +/* VxLAN offload Notes: + * + * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't + * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload + * is expected to work across all types of IP tunnels once exported. Skyhawk + * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN + * offloads in hw_enc_features only when a VxLAN port is added. If other (non + * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for + * those other tunnels are unexported on the fly through ndo_features_check(). + * + * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack + * adds more than one port, disable offloads and don't re-enable them again + * until after all the tunnels are removed. + */ +static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, + __be16 port) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->pdev->dev; + int status; + + if (lancer_chip(adapter) || BEx_chip(adapter)) + return; + + if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { + dev_info(dev, + "Only one UDP port supported for VxLAN offloads\n"); + dev_info(dev, "Disabling VxLAN offloads\n"); + adapter->vxlan_port_count++; + goto err; + } + + if (adapter->vxlan_port_count++ >= 1) + return; + + status = be_cmd_manage_iface(adapter, adapter->if_handle, + OP_CONVERT_NORMAL_TO_TUNNEL); + if (status) { + dev_warn(dev, "Failed to convert normal interface to tunnel\n"); + goto err; + } + + status = be_cmd_set_vxlan_port(adapter, port); + if (status) { + dev_warn(dev, "Failed to add VxLAN port\n"); + goto err; + } + adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS; + adapter->vxlan_port = port; + + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_TUNNEL; + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + netdev->features |= NETIF_F_GSO_UDP_TUNNEL; + + dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n", + be16_to_cpu(port)); + return; +err: + be_disable_vxlan_offloads(adapter); +} + +static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, + __be16 port) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + if (lancer_chip(adapter) || BEx_chip(adapter)) + return; + + if (adapter->vxlan_port != port) + goto done; + + be_disable_vxlan_offloads(adapter); + + dev_info(&adapter->pdev->dev, + "Disabled VxLAN offloads for UDP port %d\n", + be16_to_cpu(port)); +done: + adapter->vxlan_port_count--; +} + +static netdev_features_t be_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + struct be_adapter *adapter = netdev_priv(dev); + u8 l4_hdr = 0; + + /* The code below restricts offload features for some tunneled packets. + * Offload features for normal (non tunnel) packets are unchanged. + */ + if (!skb->encapsulation || + !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)) + return features; + + /* It's an encapsulated packet and VxLAN offloads are enabled. We + * should disable tunnel offload features if it's not a VxLAN packet, + * as tunnel offloads have been enabled only for VxLAN. This is done to + * allow other tunneled traffic like GRE work fine while VxLAN + * offloads are configured in Skyhawk-R. + */ + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + l4_hdr = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + l4_hdr = ipv6_hdr(skb)->nexthdr; + break; + default: + return features; + } + + if (l4_hdr != IPPROTO_UDP || + skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB) || + skb_inner_mac_header(skb) - skb_transport_header(skb) != + sizeof(struct udphdr) + sizeof(struct vxlanhdr)) + return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); + + return features; +} +#endif + +static const struct net_device_ops be_netdev_ops = { + .ndo_open = be_open, + .ndo_stop = be_close, + .ndo_start_xmit = be_xmit, + .ndo_set_rx_mode = be_set_rx_mode, + .ndo_set_mac_address = be_mac_addr_set, + .ndo_change_mtu = be_change_mtu, + .ndo_get_stats64 = be_get_stats64, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = be_vlan_add_vid, + .ndo_vlan_rx_kill_vid = be_vlan_rem_vid, + .ndo_set_vf_mac = be_set_vf_mac, + .ndo_set_vf_vlan = be_set_vf_vlan, + .ndo_set_vf_rate = be_set_vf_tx_rate, + .ndo_get_vf_config = be_get_vf_config, + .ndo_set_vf_link_state = be_set_vf_link_state, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = be_netpoll, +#endif + .ndo_bridge_setlink = be_ndo_bridge_setlink, + .ndo_bridge_getlink = be_ndo_bridge_getlink, +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = be_busy_poll, +#endif +#ifdef CONFIG_BE2NET_VXLAN + .ndo_add_vxlan_port = be_add_vxlan_port, + .ndo_del_vxlan_port = be_del_vxlan_port, + .ndo_features_check = be_features_check, +#endif +}; + +static void be_netdev_init(struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + + netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX; + if (be_multi_rxq(adapter)) + netdev->hw_features |= NETIF_F_RXHASH; + + netdev->features |= netdev->hw_features | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; + + netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + netdev->flags |= IFF_MULTICAST; + + netif_set_gso_max_size(netdev, 65535 - ETH_HLEN); + + netdev->netdev_ops = &be_netdev_ops; + + netdev->ethtool_ops = &be_ethtool_ops; +} + +static void be_cleanup(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + netif_device_detach(netdev); + if (netif_running(netdev)) + be_close(netdev); + rtnl_unlock(); + + be_clear(adapter); +} + +static int be_resume(struct be_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int status; + + status = be_setup(adapter); + if (status) + return status; + + if (netif_running(netdev)) { + status = be_open(netdev); + if (status) + return status; + } + + netif_device_attach(netdev); + + return 0; +} + +static int be_err_recover(struct be_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + int status; + + status = be_resume(adapter); + if (status) + goto err; + + dev_info(dev, "Adapter recovery successful\n"); + return 0; +err: + if (be_physfn(adapter)) + dev_err(dev, "Adapter recovery failed\n"); + else + dev_err(dev, "Re-trying adapter recovery\n"); + + return status; +} + +static void be_err_detection_task(struct work_struct *work) +{ + struct be_adapter *adapter = + container_of(work, struct be_adapter, + be_err_detection_work.work); + int status = 0; + + be_detect_error(adapter); + + if (adapter->hw_error) { + be_cleanup(adapter); + + /* As of now error recovery support is in Lancer only */ + if (lancer_chip(adapter)) + status = be_err_recover(adapter); + } + + /* Always attempt recovery on VFs */ + if (!status || be_virtfn(adapter)) + be_schedule_err_detection(adapter); +} + +static void be_log_sfp_info(struct be_adapter *adapter) +{ + int status; + + status = be_cmd_query_sfp_info(adapter); + if (!status) { + dev_err(&adapter->pdev->dev, + "Unqualified SFP+ detected on %c from %s part no: %s", + adapter->port_name, adapter->phy.vendor_name, + adapter->phy.vendor_pn); + } + adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP; +} + +static void be_worker(struct work_struct *work) +{ + struct be_adapter *adapter = + container_of(work, struct be_adapter, work.work); + struct be_rx_obj *rxo; + int i; + + /* when interrupts are not yet enabled, just reap any pending + * mcc completions + */ + if (!netif_running(adapter->netdev)) { + local_bh_disable(); + be_process_mcc(adapter); + local_bh_enable(); + goto reschedule; + } + + if (!adapter->stats_cmd_sent) { + if (lancer_chip(adapter)) + lancer_cmd_get_pport_stats(adapter, + &adapter->stats_cmd); + else + be_cmd_get_stats(adapter, &adapter->stats_cmd); + } + + if (be_physfn(adapter) && + MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0) + be_cmd_get_die_temperature(adapter); + + for_all_rx_queues(adapter, rxo, i) { + /* Replenish RX-queues starved due to memory + * allocation failures. + */ + if (rxo->rx_post_starved) + be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST); + } + + be_eqd_update(adapter); + + if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP) + be_log_sfp_info(adapter); + +reschedule: + adapter->work_counter++; + schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); +} + +static void be_unmap_pci_bars(struct be_adapter *adapter) +{ + if (adapter->csr) + pci_iounmap(adapter->pdev, adapter->csr); + if (adapter->db) + pci_iounmap(adapter->pdev, adapter->db); +} + +static int db_bar(struct be_adapter *adapter) +{ + if (lancer_chip(adapter) || !be_physfn(adapter)) + return 0; + else + return 4; +} + +static int be_roce_map_pci_bars(struct be_adapter *adapter) +{ + if (skyhawk_chip(adapter)) { + adapter->roce_db.size = 4096; + adapter->roce_db.io_addr = pci_resource_start(adapter->pdev, + db_bar(adapter)); + adapter->roce_db.total_size = pci_resource_len(adapter->pdev, + db_bar(adapter)); + } + return 0; +} + +static int be_map_pci_bars(struct be_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + u8 __iomem *addr; + u32 sli_intf; + + pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf); + adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >> + SLI_INTF_FAMILY_SHIFT; + adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0; + + if (BEx_chip(adapter) && be_physfn(adapter)) { + adapter->csr = pci_iomap(pdev, 2, 0); + if (!adapter->csr) + return -ENOMEM; + } + + addr = pci_iomap(pdev, db_bar(adapter), 0); + if (!addr) + goto pci_map_err; + adapter->db = addr; + + if (skyhawk_chip(adapter) || BEx_chip(adapter)) { + if (be_physfn(adapter)) { + /* PCICFG is the 2nd BAR in BE2 */ + addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0); + if (!addr) + goto pci_map_err; + adapter->pcicfg = addr; + } else { + adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET; + } + } + + be_roce_map_pci_bars(adapter); + return 0; + +pci_map_err: + dev_err(&pdev->dev, "Error in mapping PCI BARs\n"); + be_unmap_pci_bars(adapter); + return -ENOMEM; +} + +static void be_drv_cleanup(struct be_adapter *adapter) +{ + struct be_dma_mem *mem = &adapter->mbox_mem_alloced; + struct device *dev = &adapter->pdev->dev; + + if (mem->va) + dma_free_coherent(dev, mem->size, mem->va, mem->dma); + + mem = &adapter->rx_filter; + if (mem->va) + dma_free_coherent(dev, mem->size, mem->va, mem->dma); + + mem = &adapter->stats_cmd; + if (mem->va) + dma_free_coherent(dev, mem->size, mem->va, mem->dma); +} + +/* Allocate and initialize various fields in be_adapter struct */ +static int be_drv_init(struct be_adapter *adapter) +{ + struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced; + struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem; + struct be_dma_mem *rx_filter = &adapter->rx_filter; + struct be_dma_mem *stats_cmd = &adapter->stats_cmd; + struct device *dev = &adapter->pdev->dev; + int status = 0; + + mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; + mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size, + &mbox_mem_alloc->dma, + GFP_KERNEL); + if (!mbox_mem_alloc->va) + return -ENOMEM; + + mbox_mem_align->size = sizeof(struct be_mcc_mailbox); + mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); + mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); + + rx_filter->size = sizeof(struct be_cmd_req_rx_filter); + rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, + &rx_filter->dma, GFP_KERNEL); + if (!rx_filter->va) { + status = -ENOMEM; + goto free_mbox; + } + + if (lancer_chip(adapter)) + stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats); + else if (BE2_chip(adapter)) + stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0); + else if (BE3_chip(adapter)) + stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1); + else + stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2); + stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size, + &stats_cmd->dma, GFP_KERNEL); + if (!stats_cmd->va) { + status = -ENOMEM; + goto free_rx_filter; + } + + mutex_init(&adapter->mbox_lock); + spin_lock_init(&adapter->mcc_lock); + spin_lock_init(&adapter->mcc_cq_lock); + init_completion(&adapter->et_cmd_compl); + + pci_save_state(adapter->pdev); + + INIT_DELAYED_WORK(&adapter->work, be_worker); + INIT_DELAYED_WORK(&adapter->be_err_detection_work, + be_err_detection_task); + + adapter->rx_fc = true; + adapter->tx_fc = true; + + /* Must be a power of 2 or else MODULO will BUG_ON */ + adapter->be_get_temp_freq = 64; + + return 0; + +free_rx_filter: + dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma); +free_mbox: + dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va, + mbox_mem_alloc->dma); + return status; +} + +static void be_remove(struct pci_dev *pdev) +{ + struct be_adapter *adapter = pci_get_drvdata(pdev); + + if (!adapter) + return; + + be_roce_dev_remove(adapter); + be_intr_set(adapter, false); + + be_cancel_err_detection(adapter); + + unregister_netdev(adapter->netdev); + + be_clear(adapter); + + /* tell fw we're done with firing cmds */ + be_cmd_fw_clean(adapter); + + be_unmap_pci_bars(adapter); + be_drv_cleanup(adapter); + + pci_disable_pcie_error_reporting(pdev); + + pci_release_regions(pdev); + pci_disable_device(pdev); + + free_netdev(adapter->netdev); +} + +static char *mc_name(struct be_adapter *adapter) +{ + char *str = ""; /* default */ + + switch (adapter->mc_type) { + case UMC: + str = "UMC"; + break; + case FLEX10: + str = "FLEX10"; + break; + case vNIC1: + str = "vNIC-1"; + break; + case nPAR: + str = "nPAR"; + break; + case UFP: + str = "UFP"; + break; + case vNIC2: + str = "vNIC-2"; + break; + default: + str = ""; + } + + return str; +} + +static inline char *func_name(struct be_adapter *adapter) +{ + return be_physfn(adapter) ? "PF" : "VF"; +} + +static inline char *nic_name(struct pci_dev *pdev) +{ + switch (pdev->device) { + case OC_DEVICE_ID1: + return OC_NAME; + case OC_DEVICE_ID2: + return OC_NAME_BE; + case OC_DEVICE_ID3: + case OC_DEVICE_ID4: + return OC_NAME_LANCER; + case BE_DEVICE_ID2: + return BE3_NAME; + case OC_DEVICE_ID5: + case OC_DEVICE_ID6: + return OC_NAME_SH; + default: + return BE_NAME; + } +} + +static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) +{ + struct be_adapter *adapter; + struct net_device *netdev; + int status = 0; + + dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER); + + status = pci_enable_device(pdev); + if (status) + goto do_none; + + status = pci_request_regions(pdev, DRV_NAME); + if (status) + goto disable_dev; + pci_set_master(pdev); + + netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS); + if (!netdev) { + status = -ENOMEM; + goto rel_reg; + } + adapter = netdev_priv(netdev); + adapter->pdev = pdev; + pci_set_drvdata(pdev, adapter); + adapter->netdev = netdev; + SET_NETDEV_DEV(netdev, &pdev->dev); + + status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (!status) { + netdev->features |= NETIF_F_HIGHDMA; + } else { + status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (status) { + dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); + goto free_netdev; + } + } + + status = pci_enable_pcie_error_reporting(pdev); + if (!status) + dev_info(&pdev->dev, "PCIe error reporting enabled\n"); + + status = be_map_pci_bars(adapter); + if (status) + goto free_netdev; + + status = be_drv_init(adapter); + if (status) + goto unmap_bars; + + status = be_setup(adapter); + if (status) + goto drv_cleanup; + + be_netdev_init(netdev); + status = register_netdev(netdev); + if (status != 0) + goto unsetup; + + be_roce_dev_add(adapter); + + be_schedule_err_detection(adapter); + + dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev), + func_name(adapter), mc_name(adapter), adapter->port_name); + + return 0; + +unsetup: + be_clear(adapter); +drv_cleanup: + be_drv_cleanup(adapter); +unmap_bars: + be_unmap_pci_bars(adapter); +free_netdev: + free_netdev(netdev); +rel_reg: + pci_release_regions(pdev); +disable_dev: + pci_disable_device(pdev); +do_none: + dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev)); + return status; +} + +static int be_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct be_adapter *adapter = pci_get_drvdata(pdev); + + if (adapter->wol_en) + be_setup_wol(adapter, true); + + be_intr_set(adapter, false); + be_cancel_err_detection(adapter); + + be_cleanup(adapter); + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +static int be_pci_resume(struct pci_dev *pdev) +{ + struct be_adapter *adapter = pci_get_drvdata(pdev); + int status = 0; + + status = pci_enable_device(pdev); + if (status) + return status; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + status = be_resume(adapter); + if (status) + return status; + + be_schedule_err_detection(adapter); + + if (adapter->wol_en) + be_setup_wol(adapter, false); + + return 0; +} + +/* + * An FLR will stop BE from DMAing any data. + */ +static void be_shutdown(struct pci_dev *pdev) +{ + struct be_adapter *adapter = pci_get_drvdata(pdev); + + if (!adapter) + return; + + be_roce_dev_shutdown(adapter); + cancel_delayed_work_sync(&adapter->work); + be_cancel_err_detection(adapter); + + netif_device_detach(adapter->netdev); + + be_cmd_reset_function(adapter); + + pci_disable_device(pdev); +} + +static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct be_adapter *adapter = pci_get_drvdata(pdev); + + dev_err(&adapter->pdev->dev, "EEH error detected\n"); + + if (!adapter->eeh_error) { + adapter->eeh_error = true; + + be_cancel_err_detection(adapter); + + be_cleanup(adapter); + } + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + pci_disable_device(pdev); + + /* The error could cause the FW to trigger a flash debug dump. + * Resetting the card while flash dump is in progress + * can cause it not to recover; wait for it to finish. + * Wait only for first function as it is needed only once per + * adapter. + */ + if (pdev->devfn == 0) + ssleep(30); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) +{ + struct be_adapter *adapter = pci_get_drvdata(pdev); + int status; + + dev_info(&adapter->pdev->dev, "EEH reset\n"); + + status = pci_enable_device(pdev); + if (status) + return PCI_ERS_RESULT_DISCONNECT; + + pci_set_master(pdev); + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + /* Check if card is ok and fw is ready */ + dev_info(&adapter->pdev->dev, + "Waiting for FW to be ready after EEH reset\n"); + status = be_fw_wait_ready(adapter); + if (status) + return PCI_ERS_RESULT_DISCONNECT; + + pci_cleanup_aer_uncorrect_error_status(pdev); + be_clear_all_error(adapter); + return PCI_ERS_RESULT_RECOVERED; +} + +static void be_eeh_resume(struct pci_dev *pdev) +{ + int status = 0; + struct be_adapter *adapter = pci_get_drvdata(pdev); + + dev_info(&adapter->pdev->dev, "EEH resume\n"); + + pci_save_state(pdev); + + status = be_resume(adapter); + if (status) + goto err; + + be_schedule_err_detection(adapter); + return; +err: + dev_err(&adapter->pdev->dev, "EEH resume failed\n"); +} + +static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct be_adapter *adapter = pci_get_drvdata(pdev); + u16 num_vf_qs; + int status; + + if (!num_vfs) + be_vf_clear(adapter); + + adapter->num_vfs = num_vfs; + + if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) { + dev_warn(&pdev->dev, + "Cannot disable VFs while they are assigned\n"); + return -EBUSY; + } + + /* When the HW is in SRIOV capable configuration, the PF-pool resources + * are equally distributed across the max-number of VFs. The user may + * request only a subset of the max-vfs to be enabled. + * Based on num_vfs, redistribute the resources across num_vfs so that + * each VF will have access to more number of resources. + * This facility is not available in BE3 FW. + * Also, this is done by FW in Lancer chip. + */ + if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) { + num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs); + status = be_cmd_set_sriov_config(adapter, adapter->pool_res, + adapter->num_vfs, num_vf_qs); + if (status) + dev_err(&pdev->dev, + "Failed to optimize SR-IOV resources\n"); + } + + status = be_get_resources(adapter); + if (status) + return be_cmd_status(status); + + /* Updating real_num_tx/rx_queues() requires rtnl_lock() */ + rtnl_lock(); + status = be_update_queues(adapter); + rtnl_unlock(); + if (status) + return be_cmd_status(status); + + if (adapter->num_vfs) + status = be_vf_setup(adapter); + + if (!status) + return adapter->num_vfs; + + return 0; +} + +static const struct pci_error_handlers be_eeh_handlers = { + .error_detected = be_eeh_err_detected, + .slot_reset = be_eeh_reset, + .resume = be_eeh_resume, +}; + +static struct pci_driver be_driver = { + .name = DRV_NAME, + .id_table = be_dev_ids, + .probe = be_probe, + .remove = be_remove, + .suspend = be_suspend, + .resume = be_pci_resume, + .shutdown = be_shutdown, + .sriov_configure = be_pci_sriov_configure, + .err_handler = &be_eeh_handlers +}; + +static int __init be_init_module(void) +{ + if (rx_frag_size != 8192 && rx_frag_size != 4096 && + rx_frag_size != 2048) { + printk(KERN_WARNING DRV_NAME + " : Module param rx_frag_size must be 2048/4096/8192." + " Using 2048\n"); + rx_frag_size = 2048; + } + + if (num_vfs > 0) { + pr_info(DRV_NAME " : Module param num_vfs is obsolete."); + pr_info(DRV_NAME " : Use sysfs method to enable VFs\n"); + } + + return pci_register_driver(&be_driver); +} +module_init(be_init_module); + +static void __exit be_exit_module(void) +{ + pci_unregister_driver(&be_driver); +} +module_exit(be_exit_module); diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c new file mode 100644 index 000000000..132866433 --- /dev/null +++ b/drivers/net/ethernet/emulex/benet/be_roce.c @@ -0,0 +1,200 @@ +/* + * Copyright (C) 2005 - 2014 Emulex + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. The full GNU General + * Public License is included in this distribution in the file called COPYING. + * + * Contact Information: + * linux-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + */ + +#include +#include +#include +#include + +#include "be.h" +#include "be_cmds.h" + +static struct ocrdma_driver *ocrdma_drv; +static LIST_HEAD(be_adapter_list); +static DEFINE_MUTEX(be_adapter_list_lock); + +static void _be_roce_dev_add(struct be_adapter *adapter) +{ + struct be_dev_info dev_info; + int i, num_vec; + struct pci_dev *pdev = adapter->pdev; + + if (!ocrdma_drv) + return; + + if (ocrdma_drv->be_abi_version != BE_ROCE_ABI_VERSION) { + dev_warn(&pdev->dev, "Cannot initialize RoCE due to ocrdma ABI mismatch\n"); + return; + } + + if (pdev->device == OC_DEVICE_ID5) { + /* only msix is supported on these devices */ + if (!msix_enabled(adapter)) + return; + /* DPP region address and length */ + dev_info.dpp_unmapped_addr = pci_resource_start(pdev, 2); + dev_info.dpp_unmapped_len = pci_resource_len(pdev, 2); + } else { + dev_info.dpp_unmapped_addr = 0; + dev_info.dpp_unmapped_len = 0; + } + dev_info.pdev = adapter->pdev; + dev_info.db = adapter->db; + dev_info.unmapped_db = adapter->roce_db.io_addr; + dev_info.db_page_size = adapter->roce_db.size; + dev_info.db_total_size = adapter->roce_db.total_size; + dev_info.netdev = adapter->netdev; + memcpy(dev_info.mac_addr, adapter->netdev->dev_addr, ETH_ALEN); + dev_info.dev_family = adapter->sli_family; + if (msix_enabled(adapter)) { + /* provide all the vectors, so that EQ creation response + * can decide which one to use. + */ + num_vec = adapter->num_msix_vec + adapter->num_msix_roce_vec; + dev_info.intr_mode = BE_INTERRUPT_MODE_MSIX; + dev_info.msix.num_vectors = min(num_vec, MAX_MSIX_VECTORS); + /* provide start index of the vector, + * so in case of linear usage, + * it can use the base as starting point. + */ + dev_info.msix.start_vector = adapter->num_evt_qs; + for (i = 0; i < dev_info.msix.num_vectors; i++) { + dev_info.msix.vector_list[i] = + adapter->msix_entries[i].vector; + } + } else { + dev_info.msix.num_vectors = 0; + dev_info.intr_mode = BE_INTERRUPT_MODE_INTX; + } + adapter->ocrdma_dev = ocrdma_drv->add(&dev_info); +} + +void be_roce_dev_add(struct be_adapter *adapter) +{ + if (be_roce_supported(adapter)) { + INIT_LIST_HEAD(&adapter->entry); + mutex_lock(&be_adapter_list_lock); + list_add_tail(&adapter->entry, &be_adapter_list); + + /* invoke add() routine of roce driver only if + * valid driver registered with add method and add() is not yet + * invoked on a given adapter. + */ + _be_roce_dev_add(adapter); + mutex_unlock(&be_adapter_list_lock); + } +} + +static void _be_roce_dev_remove(struct be_adapter *adapter) +{ + if (ocrdma_drv && ocrdma_drv->remove && adapter->ocrdma_dev) + ocrdma_drv->remove(adapter->ocrdma_dev); + adapter->ocrdma_dev = NULL; +} + +void be_roce_dev_remove(struct be_adapter *adapter) +{ + if (be_roce_supported(adapter)) { + mutex_lock(&be_adapter_list_lock); + _be_roce_dev_remove(adapter); + list_del(&adapter->entry); + mutex_unlock(&be_adapter_list_lock); + } +} + +static void _be_roce_dev_open(struct be_adapter *adapter) +{ + if (ocrdma_drv && adapter->ocrdma_dev && + ocrdma_drv->state_change_handler) + ocrdma_drv->state_change_handler(adapter->ocrdma_dev, + BE_DEV_UP); +} + +void be_roce_dev_open(struct be_adapter *adapter) +{ + if (be_roce_supported(adapter)) { + mutex_lock(&be_adapter_list_lock); + _be_roce_dev_open(adapter); + mutex_unlock(&be_adapter_list_lock); + } +} + +static void _be_roce_dev_close(struct be_adapter *adapter) +{ + if (ocrdma_drv && adapter->ocrdma_dev && + ocrdma_drv->state_change_handler) + ocrdma_drv->state_change_handler(adapter->ocrdma_dev, + BE_DEV_DOWN); +} + +void be_roce_dev_close(struct be_adapter *adapter) +{ + if (be_roce_supported(adapter)) { + mutex_lock(&be_adapter_list_lock); + _be_roce_dev_close(adapter); + mutex_unlock(&be_adapter_list_lock); + } +} + +void be_roce_dev_shutdown(struct be_adapter *adapter) +{ + if (be_roce_supported(adapter)) { + mutex_lock(&be_adapter_list_lock); + if (ocrdma_drv && adapter->ocrdma_dev && + ocrdma_drv->state_change_handler) + ocrdma_drv->state_change_handler(adapter->ocrdma_dev, + BE_DEV_SHUTDOWN); + mutex_unlock(&be_adapter_list_lock); + } +} + +int be_roce_register_driver(struct ocrdma_driver *drv) +{ + struct be_adapter *dev; + + mutex_lock(&be_adapter_list_lock); + if (ocrdma_drv) { + mutex_unlock(&be_adapter_list_lock); + return -EINVAL; + } + ocrdma_drv = drv; + list_for_each_entry(dev, &be_adapter_list, entry) { + struct net_device *netdev; + + _be_roce_dev_add(dev); + netdev = dev->netdev; + if (netif_running(netdev) && netif_oper_up(netdev)) + _be_roce_dev_open(dev); + } + mutex_unlock(&be_adapter_list_lock); + return 0; +} +EXPORT_SYMBOL(be_roce_register_driver); + +void be_roce_unregister_driver(struct ocrdma_driver *drv) +{ + struct be_adapter *dev; + + mutex_lock(&be_adapter_list_lock); + list_for_each_entry(dev, &be_adapter_list, entry) { + if (dev->ocrdma_dev) + _be_roce_dev_remove(dev); + } + ocrdma_drv = NULL; + mutex_unlock(&be_adapter_list_lock); +} +EXPORT_SYMBOL(be_roce_unregister_driver); diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h new file mode 100644 index 000000000..e6f7eb1a7 --- /dev/null +++ b/drivers/net/ethernet/emulex/benet/be_roce.h @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2005 - 2014 Emulex + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. The full GNU General + * Public License is included in this distribution in the file called COPYING. + * + * Contact Information: + * linux-drivers@emulex.com + * + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 + */ + +#ifndef BE_ROCE_H +#define BE_ROCE_H + +#include +#include + +#define BE_ROCE_ABI_VERSION 1 + +struct ocrdma_dev; + +enum be_interrupt_mode { + BE_INTERRUPT_MODE_MSIX = 0, + BE_INTERRUPT_MODE_INTX = 1, + BE_INTERRUPT_MODE_MSI = 2, +}; + +#define MAX_MSIX_VECTORS 32 +struct be_dev_info { + u8 __iomem *db; + u64 unmapped_db; + u32 db_page_size; + u32 db_total_size; + u64 dpp_unmapped_addr; + u32 dpp_unmapped_len; + struct pci_dev *pdev; + struct net_device *netdev; + u8 mac_addr[ETH_ALEN]; + u32 dev_family; + enum be_interrupt_mode intr_mode; + struct { + int num_vectors; + int start_vector; + u32 vector_list[MAX_MSIX_VECTORS]; + } msix; +}; + +/* ocrdma driver register's the callback functions with nic driver. */ +struct ocrdma_driver { + unsigned char name[32]; + u32 be_abi_version; + struct ocrdma_dev *(*add) (struct be_dev_info *dev_info); + void (*remove) (struct ocrdma_dev *); + void (*state_change_handler) (struct ocrdma_dev *, u32 new_state); +}; + +enum { + BE_DEV_UP = 0, + BE_DEV_DOWN = 1, + BE_DEV_SHUTDOWN = 2 +}; + +/* APIs for RoCE driver to register callback handlers, + * which will be invoked when device is added, removed, ifup, ifdown + */ +int be_roce_register_driver(struct ocrdma_driver *drv); +void be_roce_unregister_driver(struct ocrdma_driver *drv); + +/* API for RoCE driver to issue mailbox commands */ +int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, + int wrb_payload_size, u16 *cmd_status, u16 *ext_status); + +#endif /* BE_ROCE_H */ diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c new file mode 100644 index 000000000..442410cd2 --- /dev/null +++ b/drivers/net/ethernet/ethoc.c @@ -0,0 +1,1324 @@ +/* + * linux/drivers/net/ethernet/ethoc.c + * + * Copyright (C) 2007-2008 Avionic Design Development GmbH + * Copyright (C) 2008-2009 Avionic Design GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Written by Thierry Reding + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int buffer_size = 0x8000; /* 32 KBytes */ +module_param(buffer_size, int, 0); +MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); + +/* register offsets */ +#define MODER 0x00 +#define INT_SOURCE 0x04 +#define INT_MASK 0x08 +#define IPGT 0x0c +#define IPGR1 0x10 +#define IPGR2 0x14 +#define PACKETLEN 0x18 +#define COLLCONF 0x1c +#define TX_BD_NUM 0x20 +#define CTRLMODER 0x24 +#define MIIMODER 0x28 +#define MIICOMMAND 0x2c +#define MIIADDRESS 0x30 +#define MIITX_DATA 0x34 +#define MIIRX_DATA 0x38 +#define MIISTATUS 0x3c +#define MAC_ADDR0 0x40 +#define MAC_ADDR1 0x44 +#define ETH_HASH0 0x48 +#define ETH_HASH1 0x4c +#define ETH_TXCTRL 0x50 +#define ETH_END 0x54 + +/* mode register */ +#define MODER_RXEN (1 << 0) /* receive enable */ +#define MODER_TXEN (1 << 1) /* transmit enable */ +#define MODER_NOPRE (1 << 2) /* no preamble */ +#define MODER_BRO (1 << 3) /* broadcast address */ +#define MODER_IAM (1 << 4) /* individual address mode */ +#define MODER_PRO (1 << 5) /* promiscuous mode */ +#define MODER_IFG (1 << 6) /* interframe gap for incoming frames */ +#define MODER_LOOP (1 << 7) /* loopback */ +#define MODER_NBO (1 << 8) /* no back-off */ +#define MODER_EDE (1 << 9) /* excess defer enable */ +#define MODER_FULLD (1 << 10) /* full duplex */ +#define MODER_RESET (1 << 11) /* FIXME: reset (undocumented) */ +#define MODER_DCRC (1 << 12) /* delayed CRC enable */ +#define MODER_CRC (1 << 13) /* CRC enable */ +#define MODER_HUGE (1 << 14) /* huge packets enable */ +#define MODER_PAD (1 << 15) /* padding enabled */ +#define MODER_RSM (1 << 16) /* receive small packets */ + +/* interrupt source and mask registers */ +#define INT_MASK_TXF (1 << 0) /* transmit frame */ +#define INT_MASK_TXE (1 << 1) /* transmit error */ +#define INT_MASK_RXF (1 << 2) /* receive frame */ +#define INT_MASK_RXE (1 << 3) /* receive error */ +#define INT_MASK_BUSY (1 << 4) +#define INT_MASK_TXC (1 << 5) /* transmit control frame */ +#define INT_MASK_RXC (1 << 6) /* receive control frame */ + +#define INT_MASK_TX (INT_MASK_TXF | INT_MASK_TXE) +#define INT_MASK_RX (INT_MASK_RXF | INT_MASK_RXE) + +#define INT_MASK_ALL ( \ + INT_MASK_TXF | INT_MASK_TXE | \ + INT_MASK_RXF | INT_MASK_RXE | \ + INT_MASK_TXC | INT_MASK_RXC | \ + INT_MASK_BUSY \ + ) + +/* packet length register */ +#define PACKETLEN_MIN(min) (((min) & 0xffff) << 16) +#define PACKETLEN_MAX(max) (((max) & 0xffff) << 0) +#define PACKETLEN_MIN_MAX(min, max) (PACKETLEN_MIN(min) | \ + PACKETLEN_MAX(max)) + +/* transmit buffer number register */ +#define TX_BD_NUM_VAL(x) (((x) <= 0x80) ? (x) : 0x80) + +/* control module mode register */ +#define CTRLMODER_PASSALL (1 << 0) /* pass all receive frames */ +#define CTRLMODER_RXFLOW (1 << 1) /* receive control flow */ +#define CTRLMODER_TXFLOW (1 << 2) /* transmit control flow */ + +/* MII mode register */ +#define MIIMODER_CLKDIV(x) ((x) & 0xfe) /* needs to be an even number */ +#define MIIMODER_NOPRE (1 << 8) /* no preamble */ + +/* MII command register */ +#define MIICOMMAND_SCAN (1 << 0) /* scan status */ +#define MIICOMMAND_READ (1 << 1) /* read status */ +#define MIICOMMAND_WRITE (1 << 2) /* write control data */ + +/* MII address register */ +#define MIIADDRESS_FIAD(x) (((x) & 0x1f) << 0) +#define MIIADDRESS_RGAD(x) (((x) & 0x1f) << 8) +#define MIIADDRESS_ADDR(phy, reg) (MIIADDRESS_FIAD(phy) | \ + MIIADDRESS_RGAD(reg)) + +/* MII transmit data register */ +#define MIITX_DATA_VAL(x) ((x) & 0xffff) + +/* MII receive data register */ +#define MIIRX_DATA_VAL(x) ((x) & 0xffff) + +/* MII status register */ +#define MIISTATUS_LINKFAIL (1 << 0) +#define MIISTATUS_BUSY (1 << 1) +#define MIISTATUS_INVALID (1 << 2) + +/* TX buffer descriptor */ +#define TX_BD_CS (1 << 0) /* carrier sense lost */ +#define TX_BD_DF (1 << 1) /* defer indication */ +#define TX_BD_LC (1 << 2) /* late collision */ +#define TX_BD_RL (1 << 3) /* retransmission limit */ +#define TX_BD_RETRY_MASK (0x00f0) +#define TX_BD_RETRY(x) (((x) & 0x00f0) >> 4) +#define TX_BD_UR (1 << 8) /* transmitter underrun */ +#define TX_BD_CRC (1 << 11) /* TX CRC enable */ +#define TX_BD_PAD (1 << 12) /* pad enable for short packets */ +#define TX_BD_WRAP (1 << 13) +#define TX_BD_IRQ (1 << 14) /* interrupt request enable */ +#define TX_BD_READY (1 << 15) /* TX buffer ready */ +#define TX_BD_LEN(x) (((x) & 0xffff) << 16) +#define TX_BD_LEN_MASK (0xffff << 16) + +#define TX_BD_STATS (TX_BD_CS | TX_BD_DF | TX_BD_LC | \ + TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR) + +/* RX buffer descriptor */ +#define RX_BD_LC (1 << 0) /* late collision */ +#define RX_BD_CRC (1 << 1) /* RX CRC error */ +#define RX_BD_SF (1 << 2) /* short frame */ +#define RX_BD_TL (1 << 3) /* too long */ +#define RX_BD_DN (1 << 4) /* dribble nibble */ +#define RX_BD_IS (1 << 5) /* invalid symbol */ +#define RX_BD_OR (1 << 6) /* receiver overrun */ +#define RX_BD_MISS (1 << 7) +#define RX_BD_CF (1 << 8) /* control frame */ +#define RX_BD_WRAP (1 << 13) +#define RX_BD_IRQ (1 << 14) /* interrupt request enable */ +#define RX_BD_EMPTY (1 << 15) +#define RX_BD_LEN(x) (((x) & 0xffff) << 16) + +#define RX_BD_STATS (RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \ + RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS) + +#define ETHOC_BUFSIZ 1536 +#define ETHOC_ZLEN 64 +#define ETHOC_BD_BASE 0x400 +#define ETHOC_TIMEOUT (HZ / 2) +#define ETHOC_MII_TIMEOUT (1 + (HZ / 5)) + +/** + * struct ethoc - driver-private device structure + * @iobase: pointer to I/O memory region + * @membase: pointer to buffer memory region + * @dma_alloc: dma allocated buffer size + * @io_region_size: I/O memory region size + * @num_bd: number of buffer descriptors + * @num_tx: number of send buffers + * @cur_tx: last send buffer written + * @dty_tx: last buffer actually sent + * @num_rx: number of receive buffers + * @cur_rx: current receive buffer + * @vma: pointer to array of virtual memory addresses for buffers + * @netdev: pointer to network device structure + * @napi: NAPI structure + * @msg_enable: device state flags + * @lock: device lock + * @phy: attached PHY + * @mdio: MDIO bus for PHY access + * @phy_id: address of attached PHY + */ +struct ethoc { + void __iomem *iobase; + void __iomem *membase; + int dma_alloc; + resource_size_t io_region_size; + + unsigned int num_bd; + unsigned int num_tx; + unsigned int cur_tx; + unsigned int dty_tx; + + unsigned int num_rx; + unsigned int cur_rx; + + void **vma; + + struct net_device *netdev; + struct napi_struct napi; + u32 msg_enable; + + spinlock_t lock; + + struct phy_device *phy; + struct mii_bus *mdio; + struct clk *clk; + s8 phy_id; +}; + +/** + * struct ethoc_bd - buffer descriptor + * @stat: buffer statistics + * @addr: physical memory address + */ +struct ethoc_bd { + u32 stat; + u32 addr; +}; + +static inline u32 ethoc_read(struct ethoc *dev, loff_t offset) +{ + return ioread32(dev->iobase + offset); +} + +static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data) +{ + iowrite32(data, dev->iobase + offset); +} + +static inline void ethoc_read_bd(struct ethoc *dev, int index, + struct ethoc_bd *bd) +{ + loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); + bd->stat = ethoc_read(dev, offset + 0); + bd->addr = ethoc_read(dev, offset + 4); +} + +static inline void ethoc_write_bd(struct ethoc *dev, int index, + const struct ethoc_bd *bd) +{ + loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); + ethoc_write(dev, offset + 0, bd->stat); + ethoc_write(dev, offset + 4, bd->addr); +} + +static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask) +{ + u32 imask = ethoc_read(dev, INT_MASK); + imask |= mask; + ethoc_write(dev, INT_MASK, imask); +} + +static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask) +{ + u32 imask = ethoc_read(dev, INT_MASK); + imask &= ~mask; + ethoc_write(dev, INT_MASK, imask); +} + +static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask) +{ + ethoc_write(dev, INT_SOURCE, mask); +} + +static inline void ethoc_enable_rx_and_tx(struct ethoc *dev) +{ + u32 mode = ethoc_read(dev, MODER); + mode |= MODER_RXEN | MODER_TXEN; + ethoc_write(dev, MODER, mode); +} + +static inline void ethoc_disable_rx_and_tx(struct ethoc *dev) +{ + u32 mode = ethoc_read(dev, MODER); + mode &= ~(MODER_RXEN | MODER_TXEN); + ethoc_write(dev, MODER, mode); +} + +static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start) +{ + struct ethoc_bd bd; + int i; + void *vma; + + dev->cur_tx = 0; + dev->dty_tx = 0; + dev->cur_rx = 0; + + ethoc_write(dev, TX_BD_NUM, dev->num_tx); + + /* setup transmission buffers */ + bd.addr = mem_start; + bd.stat = TX_BD_IRQ | TX_BD_CRC; + vma = dev->membase; + + for (i = 0; i < dev->num_tx; i++) { + if (i == dev->num_tx - 1) + bd.stat |= TX_BD_WRAP; + + ethoc_write_bd(dev, i, &bd); + bd.addr += ETHOC_BUFSIZ; + + dev->vma[i] = vma; + vma += ETHOC_BUFSIZ; + } + + bd.stat = RX_BD_EMPTY | RX_BD_IRQ; + + for (i = 0; i < dev->num_rx; i++) { + if (i == dev->num_rx - 1) + bd.stat |= RX_BD_WRAP; + + ethoc_write_bd(dev, dev->num_tx + i, &bd); + bd.addr += ETHOC_BUFSIZ; + + dev->vma[dev->num_tx + i] = vma; + vma += ETHOC_BUFSIZ; + } + + return 0; +} + +static int ethoc_reset(struct ethoc *dev) +{ + u32 mode; + + /* TODO: reset controller? */ + + ethoc_disable_rx_and_tx(dev); + + /* TODO: setup registers */ + + /* enable FCS generation and automatic padding */ + mode = ethoc_read(dev, MODER); + mode |= MODER_CRC | MODER_PAD; + ethoc_write(dev, MODER, mode); + + /* set full-duplex mode */ + mode = ethoc_read(dev, MODER); + mode |= MODER_FULLD; + ethoc_write(dev, MODER, mode); + ethoc_write(dev, IPGT, 0x15); + + ethoc_ack_irq(dev, INT_MASK_ALL); + ethoc_enable_irq(dev, INT_MASK_ALL); + ethoc_enable_rx_and_tx(dev); + return 0; +} + +static unsigned int ethoc_update_rx_stats(struct ethoc *dev, + struct ethoc_bd *bd) +{ + struct net_device *netdev = dev->netdev; + unsigned int ret = 0; + + if (bd->stat & RX_BD_TL) { + dev_err(&netdev->dev, "RX: frame too long\n"); + netdev->stats.rx_length_errors++; + ret++; + } + + if (bd->stat & RX_BD_SF) { + dev_err(&netdev->dev, "RX: frame too short\n"); + netdev->stats.rx_length_errors++; + ret++; + } + + if (bd->stat & RX_BD_DN) { + dev_err(&netdev->dev, "RX: dribble nibble\n"); + netdev->stats.rx_frame_errors++; + } + + if (bd->stat & RX_BD_CRC) { + dev_err(&netdev->dev, "RX: wrong CRC\n"); + netdev->stats.rx_crc_errors++; + ret++; + } + + if (bd->stat & RX_BD_OR) { + dev_err(&netdev->dev, "RX: overrun\n"); + netdev->stats.rx_over_errors++; + ret++; + } + + if (bd->stat & RX_BD_MISS) + netdev->stats.rx_missed_errors++; + + if (bd->stat & RX_BD_LC) { + dev_err(&netdev->dev, "RX: late collision\n"); + netdev->stats.collisions++; + ret++; + } + + return ret; +} + +static int ethoc_rx(struct net_device *dev, int limit) +{ + struct ethoc *priv = netdev_priv(dev); + int count; + + for (count = 0; count < limit; ++count) { + unsigned int entry; + struct ethoc_bd bd; + + entry = priv->num_tx + priv->cur_rx; + ethoc_read_bd(priv, entry, &bd); + if (bd.stat & RX_BD_EMPTY) { + ethoc_ack_irq(priv, INT_MASK_RX); + /* If packet (interrupt) came in between checking + * BD_EMTPY and clearing the interrupt source, then we + * risk missing the packet as the RX interrupt won't + * trigger right away when we reenable it; hence, check + * BD_EMTPY here again to make sure there isn't such a + * packet waiting for us... + */ + ethoc_read_bd(priv, entry, &bd); + if (bd.stat & RX_BD_EMPTY) + break; + } + + if (ethoc_update_rx_stats(priv, &bd) == 0) { + int size = bd.stat >> 16; + struct sk_buff *skb; + + size -= 4; /* strip the CRC */ + skb = netdev_alloc_skb_ip_align(dev, size); + + if (likely(skb)) { + void *src = priv->vma[entry]; + memcpy_fromio(skb_put(skb, size), src, size); + skb->protocol = eth_type_trans(skb, dev); + dev->stats.rx_packets++; + dev->stats.rx_bytes += size; + netif_receive_skb(skb); + } else { + if (net_ratelimit()) + dev_warn(&dev->dev, + "low on memory - packet dropped\n"); + + dev->stats.rx_dropped++; + break; + } + } + + /* clear the buffer descriptor so it can be reused */ + bd.stat &= ~RX_BD_STATS; + bd.stat |= RX_BD_EMPTY; + ethoc_write_bd(priv, entry, &bd); + if (++priv->cur_rx == priv->num_rx) + priv->cur_rx = 0; + } + + return count; +} + +static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd) +{ + struct net_device *netdev = dev->netdev; + + if (bd->stat & TX_BD_LC) { + dev_err(&netdev->dev, "TX: late collision\n"); + netdev->stats.tx_window_errors++; + } + + if (bd->stat & TX_BD_RL) { + dev_err(&netdev->dev, "TX: retransmit limit\n"); + netdev->stats.tx_aborted_errors++; + } + + if (bd->stat & TX_BD_UR) { + dev_err(&netdev->dev, "TX: underrun\n"); + netdev->stats.tx_fifo_errors++; + } + + if (bd->stat & TX_BD_CS) { + dev_err(&netdev->dev, "TX: carrier sense lost\n"); + netdev->stats.tx_carrier_errors++; + } + + if (bd->stat & TX_BD_STATS) + netdev->stats.tx_errors++; + + netdev->stats.collisions += (bd->stat >> 4) & 0xf; + netdev->stats.tx_bytes += bd->stat >> 16; + netdev->stats.tx_packets++; +} + +static int ethoc_tx(struct net_device *dev, int limit) +{ + struct ethoc *priv = netdev_priv(dev); + int count; + struct ethoc_bd bd; + + for (count = 0; count < limit; ++count) { + unsigned int entry; + + entry = priv->dty_tx & (priv->num_tx-1); + + ethoc_read_bd(priv, entry, &bd); + + if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) { + ethoc_ack_irq(priv, INT_MASK_TX); + /* If interrupt came in between reading in the BD + * and clearing the interrupt source, then we risk + * missing the event as the TX interrupt won't trigger + * right away when we reenable it; hence, check + * BD_EMPTY here again to make sure there isn't such an + * event pending... + */ + ethoc_read_bd(priv, entry, &bd); + if (bd.stat & TX_BD_READY || + (priv->dty_tx == priv->cur_tx)) + break; + } + + ethoc_update_tx_stats(priv, &bd); + priv->dty_tx++; + } + + if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2)) + netif_wake_queue(dev); + + return count; +} + +static irqreturn_t ethoc_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct ethoc *priv = netdev_priv(dev); + u32 pending; + u32 mask; + + /* Figure out what triggered the interrupt... + * The tricky bit here is that the interrupt source bits get + * set in INT_SOURCE for an event regardless of whether that + * event is masked or not. Thus, in order to figure out what + * triggered the interrupt, we need to remove the sources + * for all events that are currently masked. This behaviour + * is not particularly well documented but reasonable... + */ + mask = ethoc_read(priv, INT_MASK); + pending = ethoc_read(priv, INT_SOURCE); + pending &= mask; + + if (unlikely(pending == 0)) + return IRQ_NONE; + + ethoc_ack_irq(priv, pending); + + /* We always handle the dropped packet interrupt */ + if (pending & INT_MASK_BUSY) { + dev_err(&dev->dev, "packet dropped\n"); + dev->stats.rx_dropped++; + } + + /* Handle receive/transmit event by switching to polling */ + if (pending & (INT_MASK_TX | INT_MASK_RX)) { + ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX); + napi_schedule(&priv->napi); + } + + return IRQ_HANDLED; +} + +static int ethoc_get_mac_address(struct net_device *dev, void *addr) +{ + struct ethoc *priv = netdev_priv(dev); + u8 *mac = (u8 *)addr; + u32 reg; + + reg = ethoc_read(priv, MAC_ADDR0); + mac[2] = (reg >> 24) & 0xff; + mac[3] = (reg >> 16) & 0xff; + mac[4] = (reg >> 8) & 0xff; + mac[5] = (reg >> 0) & 0xff; + + reg = ethoc_read(priv, MAC_ADDR1); + mac[0] = (reg >> 8) & 0xff; + mac[1] = (reg >> 0) & 0xff; + + return 0; +} + +static int ethoc_poll(struct napi_struct *napi, int budget) +{ + struct ethoc *priv = container_of(napi, struct ethoc, napi); + int rx_work_done = 0; + int tx_work_done = 0; + + rx_work_done = ethoc_rx(priv->netdev, budget); + tx_work_done = ethoc_tx(priv->netdev, budget); + + if (rx_work_done < budget && tx_work_done < budget) { + napi_complete(napi); + ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX); + } + + return rx_work_done; +} + +static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) +{ + struct ethoc *priv = bus->priv; + int i; + + ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); + ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ); + + for (i = 0; i < 5; i++) { + u32 status = ethoc_read(priv, MIISTATUS); + if (!(status & MIISTATUS_BUSY)) { + u32 data = ethoc_read(priv, MIIRX_DATA); + /* reset MII command register */ + ethoc_write(priv, MIICOMMAND, 0); + return data; + } + usleep_range(100, 200); + } + + return -EBUSY; +} + +static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) +{ + struct ethoc *priv = bus->priv; + int i; + + ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); + ethoc_write(priv, MIITX_DATA, val); + ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE); + + for (i = 0; i < 5; i++) { + u32 stat = ethoc_read(priv, MIISTATUS); + if (!(stat & MIISTATUS_BUSY)) { + /* reset MII command register */ + ethoc_write(priv, MIICOMMAND, 0); + return 0; + } + usleep_range(100, 200); + } + + return -EBUSY; +} + +static void ethoc_mdio_poll(struct net_device *dev) +{ +} + +static int ethoc_mdio_probe(struct net_device *dev) +{ + struct ethoc *priv = netdev_priv(dev); + struct phy_device *phy; + int err; + + if (priv->phy_id != -1) + phy = priv->mdio->phy_map[priv->phy_id]; + else + phy = phy_find_first(priv->mdio); + + if (!phy) { + dev_err(&dev->dev, "no PHY found\n"); + return -ENXIO; + } + + err = phy_connect_direct(dev, phy, ethoc_mdio_poll, + PHY_INTERFACE_MODE_GMII); + if (err) { + dev_err(&dev->dev, "could not attach to PHY\n"); + return err; + } + + priv->phy = phy; + phy->advertising &= ~(ADVERTISED_1000baseT_Full | + ADVERTISED_1000baseT_Half); + phy->supported &= ~(SUPPORTED_1000baseT_Full | + SUPPORTED_1000baseT_Half); + + return 0; +} + +static int ethoc_open(struct net_device *dev) +{ + struct ethoc *priv = netdev_priv(dev); + int ret; + + ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED, + dev->name, dev); + if (ret) + return ret; + + ethoc_init_ring(priv, dev->mem_start); + ethoc_reset(priv); + + if (netif_queue_stopped(dev)) { + dev_dbg(&dev->dev, " resuming queue\n"); + netif_wake_queue(dev); + } else { + dev_dbg(&dev->dev, " starting queue\n"); + netif_start_queue(dev); + } + + phy_start(priv->phy); + napi_enable(&priv->napi); + + if (netif_msg_ifup(priv)) { + dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n", + dev->base_addr, dev->mem_start, dev->mem_end); + } + + return 0; +} + +static int ethoc_stop(struct net_device *dev) +{ + struct ethoc *priv = netdev_priv(dev); + + napi_disable(&priv->napi); + + if (priv->phy) + phy_stop(priv->phy); + + ethoc_disable_rx_and_tx(priv); + free_irq(dev->irq, dev); + + if (!netif_queue_stopped(dev)) + netif_stop_queue(dev); + + return 0; +} + +static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct ethoc *priv = netdev_priv(dev); + struct mii_ioctl_data *mdio = if_mii(ifr); + struct phy_device *phy = NULL; + + if (!netif_running(dev)) + return -EINVAL; + + if (cmd != SIOCGMIIPHY) { + if (mdio->phy_id >= PHY_MAX_ADDR) + return -ERANGE; + + phy = priv->mdio->phy_map[mdio->phy_id]; + if (!phy) + return -ENODEV; + } else { + phy = priv->phy; + } + + return phy_mii_ioctl(phy, ifr, cmd); +} + +static void ethoc_do_set_mac_address(struct net_device *dev) +{ + struct ethoc *priv = netdev_priv(dev); + unsigned char *mac = dev->dev_addr; + + ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) | + (mac[4] << 8) | (mac[5] << 0)); + ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0)); +} + +static int ethoc_set_mac_address(struct net_device *dev, void *p) +{ + const struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + ethoc_do_set_mac_address(dev); + return 0; +} + +static void ethoc_set_multicast_list(struct net_device *dev) +{ + struct ethoc *priv = netdev_priv(dev); + u32 mode = ethoc_read(priv, MODER); + struct netdev_hw_addr *ha; + u32 hash[2] = { 0, 0 }; + + /* set loopback mode if requested */ + if (dev->flags & IFF_LOOPBACK) + mode |= MODER_LOOP; + else + mode &= ~MODER_LOOP; + + /* receive broadcast frames if requested */ + if (dev->flags & IFF_BROADCAST) + mode &= ~MODER_BRO; + else + mode |= MODER_BRO; + + /* enable promiscuous mode if requested */ + if (dev->flags & IFF_PROMISC) + mode |= MODER_PRO; + else + mode &= ~MODER_PRO; + + ethoc_write(priv, MODER, mode); + + /* receive multicast frames */ + if (dev->flags & IFF_ALLMULTI) { + hash[0] = 0xffffffff; + hash[1] = 0xffffffff; + } else { + netdev_for_each_mc_addr(ha, dev) { + u32 crc = ether_crc(ETH_ALEN, ha->addr); + int bit = (crc >> 26) & 0x3f; + hash[bit >> 5] |= 1 << (bit & 0x1f); + } + } + + ethoc_write(priv, ETH_HASH0, hash[0]); + ethoc_write(priv, ETH_HASH1, hash[1]); +} + +static int ethoc_change_mtu(struct net_device *dev, int new_mtu) +{ + return -ENOSYS; +} + +static void ethoc_tx_timeout(struct net_device *dev) +{ + struct ethoc *priv = netdev_priv(dev); + u32 pending = ethoc_read(priv, INT_SOURCE); + if (likely(pending)) + ethoc_interrupt(dev->irq, dev); +} + +static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ethoc *priv = netdev_priv(dev); + struct ethoc_bd bd; + unsigned int entry; + void *dest; + + if (unlikely(skb->len > ETHOC_BUFSIZ)) { + dev->stats.tx_errors++; + goto out; + } + + entry = priv->cur_tx % priv->num_tx; + spin_lock_irq(&priv->lock); + priv->cur_tx++; + + ethoc_read_bd(priv, entry, &bd); + if (unlikely(skb->len < ETHOC_ZLEN)) + bd.stat |= TX_BD_PAD; + else + bd.stat &= ~TX_BD_PAD; + + dest = priv->vma[entry]; + memcpy_toio(dest, skb->data, skb->len); + + bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); + bd.stat |= TX_BD_LEN(skb->len); + ethoc_write_bd(priv, entry, &bd); + + bd.stat |= TX_BD_READY; + ethoc_write_bd(priv, entry, &bd); + + if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) { + dev_dbg(&dev->dev, "stopping queue\n"); + netif_stop_queue(dev); + } + + spin_unlock_irq(&priv->lock); + skb_tx_timestamp(skb); +out: + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static int ethoc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ethoc *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phy; + + if (!phydev) + return -EOPNOTSUPP; + + return phy_ethtool_gset(phydev, cmd); +} + +static int ethoc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ethoc *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phy; + + if (!phydev) + return -EOPNOTSUPP; + + return phy_ethtool_sset(phydev, cmd); +} + +static int ethoc_get_regs_len(struct net_device *netdev) +{ + return ETH_END; +} + +static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct ethoc *priv = netdev_priv(dev); + u32 *regs_buff = p; + unsigned i; + + regs->version = 0; + for (i = 0; i < ETH_END / sizeof(u32); ++i) + regs_buff[i] = ethoc_read(priv, i * sizeof(u32)); +} + +static void ethoc_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct ethoc *priv = netdev_priv(dev); + + ring->rx_max_pending = priv->num_bd - 1; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->tx_max_pending = priv->num_bd - 1; + + ring->rx_pending = priv->num_rx; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; + ring->tx_pending = priv->num_tx; +} + +static int ethoc_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct ethoc *priv = netdev_priv(dev); + + if (ring->tx_pending < 1 || ring->rx_pending < 1 || + ring->tx_pending + ring->rx_pending > priv->num_bd) + return -EINVAL; + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + if (netif_running(dev)) { + netif_tx_disable(dev); + ethoc_disable_rx_and_tx(priv); + ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX); + synchronize_irq(dev->irq); + } + + priv->num_tx = rounddown_pow_of_two(ring->tx_pending); + priv->num_rx = ring->rx_pending; + ethoc_init_ring(priv, dev->mem_start); + + if (netif_running(dev)) { + ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX); + ethoc_enable_rx_and_tx(priv); + netif_wake_queue(dev); + } + return 0; +} + +const struct ethtool_ops ethoc_ethtool_ops = { + .get_settings = ethoc_get_settings, + .set_settings = ethoc_set_settings, + .get_regs_len = ethoc_get_regs_len, + .get_regs = ethoc_get_regs, + .get_link = ethtool_op_get_link, + .get_ringparam = ethoc_get_ringparam, + .set_ringparam = ethoc_set_ringparam, + .get_ts_info = ethtool_op_get_ts_info, +}; + +static const struct net_device_ops ethoc_netdev_ops = { + .ndo_open = ethoc_open, + .ndo_stop = ethoc_stop, + .ndo_do_ioctl = ethoc_ioctl, + .ndo_set_mac_address = ethoc_set_mac_address, + .ndo_set_rx_mode = ethoc_set_multicast_list, + .ndo_change_mtu = ethoc_change_mtu, + .ndo_tx_timeout = ethoc_tx_timeout, + .ndo_start_xmit = ethoc_start_xmit, +}; + +/** + * ethoc_probe - initialize OpenCores ethernet MAC + * pdev: platform device + */ +static int ethoc_probe(struct platform_device *pdev) +{ + struct net_device *netdev = NULL; + struct resource *res = NULL; + struct resource *mmio = NULL; + struct resource *mem = NULL; + struct ethoc *priv = NULL; + unsigned int phy; + int num_bd; + int ret = 0; + bool random_mac = false; + struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev); + u32 eth_clkfreq = pdata ? pdata->eth_clkfreq : 0; + + /* allocate networking device */ + netdev = alloc_etherdev(sizeof(struct ethoc)); + if (!netdev) { + ret = -ENOMEM; + goto out; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + platform_set_drvdata(pdev, netdev); + + /* obtain I/O memory space */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "cannot obtain I/O memory space\n"); + ret = -ENXIO; + goto free; + } + + mmio = devm_request_mem_region(&pdev->dev, res->start, + resource_size(res), res->name); + if (!mmio) { + dev_err(&pdev->dev, "cannot request I/O memory space\n"); + ret = -ENXIO; + goto free; + } + + netdev->base_addr = mmio->start; + + /* obtain buffer memory space */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + mem = devm_request_mem_region(&pdev->dev, res->start, + resource_size(res), res->name); + if (!mem) { + dev_err(&pdev->dev, "cannot request memory space\n"); + ret = -ENXIO; + goto free; + } + + netdev->mem_start = mem->start; + netdev->mem_end = mem->end; + } + + + /* obtain device IRQ number */ + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res) { + dev_err(&pdev->dev, "cannot obtain IRQ\n"); + ret = -ENXIO; + goto free; + } + + netdev->irq = res->start; + + /* setup driver-private data */ + priv = netdev_priv(netdev); + priv->netdev = netdev; + priv->dma_alloc = 0; + priv->io_region_size = resource_size(mmio); + + priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, + resource_size(mmio)); + if (!priv->iobase) { + dev_err(&pdev->dev, "cannot remap I/O memory space\n"); + ret = -ENXIO; + goto error; + } + + if (netdev->mem_end) { + priv->membase = devm_ioremap_nocache(&pdev->dev, + netdev->mem_start, resource_size(mem)); + if (!priv->membase) { + dev_err(&pdev->dev, "cannot remap memory space\n"); + ret = -ENXIO; + goto error; + } + } else { + /* Allocate buffer memory */ + priv->membase = dmam_alloc_coherent(&pdev->dev, + buffer_size, (void *)&netdev->mem_start, + GFP_KERNEL); + if (!priv->membase) { + dev_err(&pdev->dev, "cannot allocate %dB buffer\n", + buffer_size); + ret = -ENOMEM; + goto error; + } + netdev->mem_end = netdev->mem_start + buffer_size; + priv->dma_alloc = buffer_size; + } + + /* calculate the number of TX/RX buffers, maximum 128 supported */ + num_bd = min_t(unsigned int, + 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ); + if (num_bd < 4) { + ret = -ENODEV; + goto error; + } + priv->num_bd = num_bd; + /* num_tx must be a power of two */ + priv->num_tx = rounddown_pow_of_two(num_bd >> 1); + priv->num_rx = num_bd - priv->num_tx; + + dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n", + priv->num_tx, priv->num_rx); + + priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL); + if (!priv->vma) { + ret = -ENOMEM; + goto error; + } + + /* Allow the platform setup code to pass in a MAC address. */ + if (pdata) { + memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); + priv->phy_id = pdata->phy_id; + } else { + priv->phy_id = -1; + +#ifdef CONFIG_OF + { + const uint8_t *mac; + + mac = of_get_property(pdev->dev.of_node, + "local-mac-address", + NULL); + if (mac) + memcpy(netdev->dev_addr, mac, IFHWADDRLEN); + } +#endif + } + + /* Check that the given MAC address is valid. If it isn't, read the + * current MAC from the controller. + */ + if (!is_valid_ether_addr(netdev->dev_addr)) + ethoc_get_mac_address(netdev, netdev->dev_addr); + + /* Check the MAC again for validity, if it still isn't choose and + * program a random one. + */ + if (!is_valid_ether_addr(netdev->dev_addr)) { + eth_random_addr(netdev->dev_addr); + random_mac = true; + } + + ethoc_do_set_mac_address(netdev); + + if (random_mac) + netdev->addr_assign_type = NET_ADDR_RANDOM; + + /* Allow the platform setup code to adjust MII management bus clock. */ + if (!eth_clkfreq) { + struct clk *clk = devm_clk_get(&pdev->dev, NULL); + + if (!IS_ERR(clk)) { + priv->clk = clk; + clk_prepare_enable(clk); + eth_clkfreq = clk_get_rate(clk); + } + } + if (eth_clkfreq) { + u32 clkdiv = MIIMODER_CLKDIV(eth_clkfreq / 2500000 + 1); + + if (!clkdiv) + clkdiv = 2; + dev_dbg(&pdev->dev, "setting MII clkdiv to %u\n", clkdiv); + ethoc_write(priv, MIIMODER, + (ethoc_read(priv, MIIMODER) & MIIMODER_NOPRE) | + clkdiv); + } + + /* register MII bus */ + priv->mdio = mdiobus_alloc(); + if (!priv->mdio) { + ret = -ENOMEM; + goto free; + } + + priv->mdio->name = "ethoc-mdio"; + snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%d", + priv->mdio->name, pdev->id); + priv->mdio->read = ethoc_mdio_read; + priv->mdio->write = ethoc_mdio_write; + priv->mdio->priv = priv; + + priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!priv->mdio->irq) { + ret = -ENOMEM; + goto free_mdio; + } + + for (phy = 0; phy < PHY_MAX_ADDR; phy++) + priv->mdio->irq[phy] = PHY_POLL; + + ret = mdiobus_register(priv->mdio); + if (ret) { + dev_err(&netdev->dev, "failed to register MDIO bus\n"); + goto free_mdio; + } + + ret = ethoc_mdio_probe(netdev); + if (ret) { + dev_err(&netdev->dev, "failed to probe MDIO bus\n"); + goto error; + } + + /* setup the net_device structure */ + netdev->netdev_ops = ðoc_netdev_ops; + netdev->watchdog_timeo = ETHOC_TIMEOUT; + netdev->features |= 0; + netdev->ethtool_ops = ðoc_ethtool_ops; + + /* setup NAPI */ + netif_napi_add(netdev, &priv->napi, ethoc_poll, 64); + + spin_lock_init(&priv->lock); + + ret = register_netdev(netdev); + if (ret < 0) { + dev_err(&netdev->dev, "failed to register interface\n"); + goto error2; + } + + goto out; + +error2: + netif_napi_del(&priv->napi); +error: + mdiobus_unregister(priv->mdio); +free_mdio: + kfree(priv->mdio->irq); + mdiobus_free(priv->mdio); +free: + if (priv->clk) + clk_disable_unprepare(priv->clk); + free_netdev(netdev); +out: + return ret; +} + +/** + * ethoc_remove - shutdown OpenCores ethernet MAC + * @pdev: platform device + */ +static int ethoc_remove(struct platform_device *pdev) +{ + struct net_device *netdev = platform_get_drvdata(pdev); + struct ethoc *priv = netdev_priv(netdev); + + if (netdev) { + netif_napi_del(&priv->napi); + phy_disconnect(priv->phy); + priv->phy = NULL; + + if (priv->mdio) { + mdiobus_unregister(priv->mdio); + kfree(priv->mdio->irq); + mdiobus_free(priv->mdio); + } + if (priv->clk) + clk_disable_unprepare(priv->clk); + unregister_netdev(netdev); + free_netdev(netdev); + } + + return 0; +} + +#ifdef CONFIG_PM +static int ethoc_suspend(struct platform_device *pdev, pm_message_t state) +{ + return -ENOSYS; +} + +static int ethoc_resume(struct platform_device *pdev) +{ + return -ENOSYS; +} +#else +# define ethoc_suspend NULL +# define ethoc_resume NULL +#endif + +static const struct of_device_id ethoc_match[] = { + { .compatible = "opencores,ethoc", }, + {}, +}; +MODULE_DEVICE_TABLE(of, ethoc_match); + +static struct platform_driver ethoc_driver = { + .probe = ethoc_probe, + .remove = ethoc_remove, + .suspend = ethoc_suspend, + .resume = ethoc_resume, + .driver = { + .name = "ethoc", + .of_match_table = ethoc_match, + }, +}; + +module_platform_driver(ethoc_driver); + +MODULE_AUTHOR("Thierry Reding "); +MODULE_DESCRIPTION("OpenCores Ethernet MAC driver"); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig new file mode 100644 index 000000000..5918c6891 --- /dev/null +++ b/drivers/net/ethernet/faraday/Kconfig @@ -0,0 +1,39 @@ +# +# Faraday device configuration +# + +config NET_VENDOR_FARADAY + bool "Faraday devices" + default y + depends on ARM + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Faraday cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_FARADAY + +config FTMAC100 + tristate "Faraday FTMAC100 10/100 Ethernet support" + depends on ARM + select MII + ---help--- + This driver supports the FTMAC100 10/100 Ethernet controller + from Faraday. It is used on Faraday A320, Andes AG101 and some + other ARM/NDS32 SoC's. + +config FTGMAC100 + tristate "Faraday FTGMAC100 Gigabit Ethernet support" + depends on ARM + select PHYLIB + ---help--- + This driver supports the FTGMAC100 Gigabit Ethernet controller + from Faraday. It is used on Faraday A369, Andes AG102 and some + other ARM/NDS32 SoC's. + +endif # NET_VENDOR_FARADAY diff --git a/drivers/net/ethernet/faraday/Makefile b/drivers/net/ethernet/faraday/Makefile new file mode 100644 index 000000000..408b53980 --- /dev/null +++ b/drivers/net/ethernet/faraday/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the Faraday device drivers. +# + +obj-$(CONFIG_FTGMAC100) += ftgmac100.o +obj-$(CONFIG_FTMAC100) += ftmac100.o diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c new file mode 100644 index 000000000..6d0c5d5ee --- /dev/null +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -0,0 +1,1345 @@ +/* + * Faraday FTGMAC100 Gigabit Ethernet + * + * (C) Copyright 2009-2011 Faraday Technology + * Po-Yu Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ftgmac100.h" + +#define DRV_NAME "ftgmac100" +#define DRV_VERSION "0.7" + +#define RX_QUEUE_ENTRIES 256 /* must be power of 2 */ +#define TX_QUEUE_ENTRIES 512 /* must be power of 2 */ + +#define MAX_PKT_SIZE 1518 +#define RX_BUF_SIZE PAGE_SIZE /* must be smaller than 0x3fff */ + +/****************************************************************************** + * private data + *****************************************************************************/ +struct ftgmac100_descs { + struct ftgmac100_rxdes rxdes[RX_QUEUE_ENTRIES]; + struct ftgmac100_txdes txdes[TX_QUEUE_ENTRIES]; +}; + +struct ftgmac100 { + struct resource *res; + void __iomem *base; + int irq; + + struct ftgmac100_descs *descs; + dma_addr_t descs_dma_addr; + + unsigned int rx_pointer; + unsigned int tx_clean_pointer; + unsigned int tx_pointer; + unsigned int tx_pending; + + spinlock_t tx_lock; + + struct net_device *netdev; + struct device *dev; + struct napi_struct napi; + + struct mii_bus *mii_bus; + int phy_irq[PHY_MAX_ADDR]; + struct phy_device *phydev; + int old_speed; +}; + +static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv, + struct ftgmac100_rxdes *rxdes, gfp_t gfp); + +/****************************************************************************** + * internal functions (hardware register access) + *****************************************************************************/ +#define INT_MASK_ALL_ENABLED (FTGMAC100_INT_RPKT_LOST | \ + FTGMAC100_INT_XPKT_ETH | \ + FTGMAC100_INT_XPKT_LOST | \ + FTGMAC100_INT_AHB_ERR | \ + FTGMAC100_INT_PHYSTS_CHG | \ + FTGMAC100_INT_RPKT_BUF | \ + FTGMAC100_INT_NO_RXBUF) + +static void ftgmac100_set_rx_ring_base(struct ftgmac100 *priv, dma_addr_t addr) +{ + iowrite32(addr, priv->base + FTGMAC100_OFFSET_RXR_BADR); +} + +static void ftgmac100_set_rx_buffer_size(struct ftgmac100 *priv, + unsigned int size) +{ + size = FTGMAC100_RBSR_SIZE(size); + iowrite32(size, priv->base + FTGMAC100_OFFSET_RBSR); +} + +static void ftgmac100_set_normal_prio_tx_ring_base(struct ftgmac100 *priv, + dma_addr_t addr) +{ + iowrite32(addr, priv->base + FTGMAC100_OFFSET_NPTXR_BADR); +} + +static void ftgmac100_txdma_normal_prio_start_polling(struct ftgmac100 *priv) +{ + iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD); +} + +static int ftgmac100_reset_hw(struct ftgmac100 *priv) +{ + struct net_device *netdev = priv->netdev; + int i; + + /* NOTE: reset clears all registers */ + iowrite32(FTGMAC100_MACCR_SW_RST, priv->base + FTGMAC100_OFFSET_MACCR); + for (i = 0; i < 5; i++) { + unsigned int maccr; + + maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); + if (!(maccr & FTGMAC100_MACCR_SW_RST)) + return 0; + + udelay(1000); + } + + netdev_err(netdev, "software reset failed\n"); + return -EIO; +} + +static void ftgmac100_set_mac(struct ftgmac100 *priv, const unsigned char *mac) +{ + unsigned int maddr = mac[0] << 8 | mac[1]; + unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; + + iowrite32(maddr, priv->base + FTGMAC100_OFFSET_MAC_MADR); + iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR); +} + +static void ftgmac100_init_hw(struct ftgmac100 *priv) +{ + /* setup ring buffer base registers */ + ftgmac100_set_rx_ring_base(priv, + priv->descs_dma_addr + + offsetof(struct ftgmac100_descs, rxdes)); + ftgmac100_set_normal_prio_tx_ring_base(priv, + priv->descs_dma_addr + + offsetof(struct ftgmac100_descs, txdes)); + + ftgmac100_set_rx_buffer_size(priv, RX_BUF_SIZE); + + iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1), priv->base + FTGMAC100_OFFSET_APTC); + + ftgmac100_set_mac(priv, priv->netdev->dev_addr); +} + +#define MACCR_ENABLE_ALL (FTGMAC100_MACCR_TXDMA_EN | \ + FTGMAC100_MACCR_RXDMA_EN | \ + FTGMAC100_MACCR_TXMAC_EN | \ + FTGMAC100_MACCR_RXMAC_EN | \ + FTGMAC100_MACCR_FULLDUP | \ + FTGMAC100_MACCR_CRC_APD | \ + FTGMAC100_MACCR_RX_RUNT | \ + FTGMAC100_MACCR_RX_BROADPKT) + +static void ftgmac100_start_hw(struct ftgmac100 *priv, int speed) +{ + int maccr = MACCR_ENABLE_ALL; + + switch (speed) { + default: + case 10: + break; + + case 100: + maccr |= FTGMAC100_MACCR_FAST_MODE; + break; + + case 1000: + maccr |= FTGMAC100_MACCR_GIGA_MODE; + break; + } + + iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); +} + +static void ftgmac100_stop_hw(struct ftgmac100 *priv) +{ + iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR); +} + +/****************************************************************************** + * internal functions (receive descriptor) + *****************************************************************************/ +static bool ftgmac100_rxdes_first_segment(struct ftgmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_FRS); +} + +static bool ftgmac100_rxdes_last_segment(struct ftgmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_LRS); +} + +static bool ftgmac100_rxdes_packet_ready(struct ftgmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY); +} + +static void ftgmac100_rxdes_set_dma_own(struct ftgmac100_rxdes *rxdes) +{ + /* clear status bits */ + rxdes->rxdes0 &= cpu_to_le32(FTGMAC100_RXDES0_EDORR); +} + +static bool ftgmac100_rxdes_rx_error(struct ftgmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RX_ERR); +} + +static bool ftgmac100_rxdes_crc_error(struct ftgmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_CRC_ERR); +} + +static bool ftgmac100_rxdes_frame_too_long(struct ftgmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_FTL); +} + +static bool ftgmac100_rxdes_runt(struct ftgmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RUNT); +} + +static bool ftgmac100_rxdes_odd_nibble(struct ftgmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RX_ODD_NB); +} + +static unsigned int ftgmac100_rxdes_data_length(struct ftgmac100_rxdes *rxdes) +{ + return le32_to_cpu(rxdes->rxdes0) & FTGMAC100_RXDES0_VDBC; +} + +static bool ftgmac100_rxdes_multicast(struct ftgmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_MULTICAST); +} + +static void ftgmac100_rxdes_set_end_of_ring(struct ftgmac100_rxdes *rxdes) +{ + rxdes->rxdes0 |= cpu_to_le32(FTGMAC100_RXDES0_EDORR); +} + +static void ftgmac100_rxdes_set_dma_addr(struct ftgmac100_rxdes *rxdes, + dma_addr_t addr) +{ + rxdes->rxdes3 = cpu_to_le32(addr); +} + +static dma_addr_t ftgmac100_rxdes_get_dma_addr(struct ftgmac100_rxdes *rxdes) +{ + return le32_to_cpu(rxdes->rxdes3); +} + +static bool ftgmac100_rxdes_is_tcp(struct ftgmac100_rxdes *rxdes) +{ + return (rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_PROT_MASK)) == + cpu_to_le32(FTGMAC100_RXDES1_PROT_TCPIP); +} + +static bool ftgmac100_rxdes_is_udp(struct ftgmac100_rxdes *rxdes) +{ + return (rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_PROT_MASK)) == + cpu_to_le32(FTGMAC100_RXDES1_PROT_UDPIP); +} + +static bool ftgmac100_rxdes_tcpcs_err(struct ftgmac100_rxdes *rxdes) +{ + return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_TCP_CHKSUM_ERR); +} + +static bool ftgmac100_rxdes_udpcs_err(struct ftgmac100_rxdes *rxdes) +{ + return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_UDP_CHKSUM_ERR); +} + +static bool ftgmac100_rxdes_ipcs_err(struct ftgmac100_rxdes *rxdes) +{ + return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_IP_CHKSUM_ERR); +} + +/* + * rxdes2 is not used by hardware. We use it to keep track of page. + * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu(). + */ +static void ftgmac100_rxdes_set_page(struct ftgmac100_rxdes *rxdes, struct page *page) +{ + rxdes->rxdes2 = (unsigned int)page; +} + +static struct page *ftgmac100_rxdes_get_page(struct ftgmac100_rxdes *rxdes) +{ + return (struct page *)rxdes->rxdes2; +} + +/****************************************************************************** + * internal functions (receive) + *****************************************************************************/ +static int ftgmac100_next_rx_pointer(int pointer) +{ + return (pointer + 1) & (RX_QUEUE_ENTRIES - 1); +} + +static void ftgmac100_rx_pointer_advance(struct ftgmac100 *priv) +{ + priv->rx_pointer = ftgmac100_next_rx_pointer(priv->rx_pointer); +} + +static struct ftgmac100_rxdes *ftgmac100_current_rxdes(struct ftgmac100 *priv) +{ + return &priv->descs->rxdes[priv->rx_pointer]; +} + +static struct ftgmac100_rxdes * +ftgmac100_rx_locate_first_segment(struct ftgmac100 *priv) +{ + struct ftgmac100_rxdes *rxdes = ftgmac100_current_rxdes(priv); + + while (ftgmac100_rxdes_packet_ready(rxdes)) { + if (ftgmac100_rxdes_first_segment(rxdes)) + return rxdes; + + ftgmac100_rxdes_set_dma_own(rxdes); + ftgmac100_rx_pointer_advance(priv); + rxdes = ftgmac100_current_rxdes(priv); + } + + return NULL; +} + +static bool ftgmac100_rx_packet_error(struct ftgmac100 *priv, + struct ftgmac100_rxdes *rxdes) +{ + struct net_device *netdev = priv->netdev; + bool error = false; + + if (unlikely(ftgmac100_rxdes_rx_error(rxdes))) { + if (net_ratelimit()) + netdev_info(netdev, "rx err\n"); + + netdev->stats.rx_errors++; + error = true; + } + + if (unlikely(ftgmac100_rxdes_crc_error(rxdes))) { + if (net_ratelimit()) + netdev_info(netdev, "rx crc err\n"); + + netdev->stats.rx_crc_errors++; + error = true; + } else if (unlikely(ftgmac100_rxdes_ipcs_err(rxdes))) { + if (net_ratelimit()) + netdev_info(netdev, "rx IP checksum err\n"); + + error = true; + } + + if (unlikely(ftgmac100_rxdes_frame_too_long(rxdes))) { + if (net_ratelimit()) + netdev_info(netdev, "rx frame too long\n"); + + netdev->stats.rx_length_errors++; + error = true; + } else if (unlikely(ftgmac100_rxdes_runt(rxdes))) { + if (net_ratelimit()) + netdev_info(netdev, "rx runt\n"); + + netdev->stats.rx_length_errors++; + error = true; + } else if (unlikely(ftgmac100_rxdes_odd_nibble(rxdes))) { + if (net_ratelimit()) + netdev_info(netdev, "rx odd nibble\n"); + + netdev->stats.rx_length_errors++; + error = true; + } + + return error; +} + +static void ftgmac100_rx_drop_packet(struct ftgmac100 *priv) +{ + struct net_device *netdev = priv->netdev; + struct ftgmac100_rxdes *rxdes = ftgmac100_current_rxdes(priv); + bool done = false; + + if (net_ratelimit()) + netdev_dbg(netdev, "drop packet %p\n", rxdes); + + do { + if (ftgmac100_rxdes_last_segment(rxdes)) + done = true; + + ftgmac100_rxdes_set_dma_own(rxdes); + ftgmac100_rx_pointer_advance(priv); + rxdes = ftgmac100_current_rxdes(priv); + } while (!done && ftgmac100_rxdes_packet_ready(rxdes)); + + netdev->stats.rx_dropped++; +} + +static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed) +{ + struct net_device *netdev = priv->netdev; + struct ftgmac100_rxdes *rxdes; + struct sk_buff *skb; + bool done = false; + + rxdes = ftgmac100_rx_locate_first_segment(priv); + if (!rxdes) + return false; + + if (unlikely(ftgmac100_rx_packet_error(priv, rxdes))) { + ftgmac100_rx_drop_packet(priv); + return true; + } + + /* start processing */ + skb = netdev_alloc_skb_ip_align(netdev, 128); + if (unlikely(!skb)) { + if (net_ratelimit()) + netdev_err(netdev, "rx skb alloc failed\n"); + + ftgmac100_rx_drop_packet(priv); + return true; + } + + if (unlikely(ftgmac100_rxdes_multicast(rxdes))) + netdev->stats.multicast++; + + /* + * It seems that HW does checksum incorrectly with fragmented packets, + * so we are conservative here - if HW checksum error, let software do + * the checksum again. + */ + if ((ftgmac100_rxdes_is_tcp(rxdes) && !ftgmac100_rxdes_tcpcs_err(rxdes)) || + (ftgmac100_rxdes_is_udp(rxdes) && !ftgmac100_rxdes_udpcs_err(rxdes))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + do { + dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes); + struct page *page = ftgmac100_rxdes_get_page(rxdes); + unsigned int size; + + dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); + + size = ftgmac100_rxdes_data_length(rxdes); + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, 0, size); + + skb->len += size; + skb->data_len += size; + skb->truesize += PAGE_SIZE; + + if (ftgmac100_rxdes_last_segment(rxdes)) + done = true; + + ftgmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC); + + ftgmac100_rx_pointer_advance(priv); + rxdes = ftgmac100_current_rxdes(priv); + } while (!done); + + /* Small frames are copied into linear part of skb to free one page */ + if (skb->len <= 128) { + skb->truesize -= PAGE_SIZE; + __pskb_pull_tail(skb, skb->len); + } else { + /* We pull the minimum amount into linear part */ + __pskb_pull_tail(skb, ETH_HLEN); + } + skb->protocol = eth_type_trans(skb, netdev); + + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += skb->len; + + /* push packet to protocol stack */ + napi_gro_receive(&priv->napi, skb); + + (*processed)++; + return true; +} + +/****************************************************************************** + * internal functions (transmit descriptor) + *****************************************************************************/ +static void ftgmac100_txdes_reset(struct ftgmac100_txdes *txdes) +{ + /* clear all except end of ring bit */ + txdes->txdes0 &= cpu_to_le32(FTGMAC100_TXDES0_EDOTR); + txdes->txdes1 = 0; + txdes->txdes2 = 0; + txdes->txdes3 = 0; +} + +static bool ftgmac100_txdes_owned_by_dma(struct ftgmac100_txdes *txdes) +{ + return txdes->txdes0 & cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN); +} + +static void ftgmac100_txdes_set_dma_own(struct ftgmac100_txdes *txdes) +{ + /* + * Make sure dma own bit will not be set before any other + * descriptor fields. + */ + wmb(); + txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN); +} + +static void ftgmac100_txdes_set_end_of_ring(struct ftgmac100_txdes *txdes) +{ + txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_EDOTR); +} + +static void ftgmac100_txdes_set_first_segment(struct ftgmac100_txdes *txdes) +{ + txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_FTS); +} + +static void ftgmac100_txdes_set_last_segment(struct ftgmac100_txdes *txdes) +{ + txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_LTS); +} + +static void ftgmac100_txdes_set_buffer_size(struct ftgmac100_txdes *txdes, + unsigned int len) +{ + txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXBUF_SIZE(len)); +} + +static void ftgmac100_txdes_set_txint(struct ftgmac100_txdes *txdes) +{ + txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_TXIC); +} + +static void ftgmac100_txdes_set_tcpcs(struct ftgmac100_txdes *txdes) +{ + txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_TCP_CHKSUM); +} + +static void ftgmac100_txdes_set_udpcs(struct ftgmac100_txdes *txdes) +{ + txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_UDP_CHKSUM); +} + +static void ftgmac100_txdes_set_ipcs(struct ftgmac100_txdes *txdes) +{ + txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_IP_CHKSUM); +} + +static void ftgmac100_txdes_set_dma_addr(struct ftgmac100_txdes *txdes, + dma_addr_t addr) +{ + txdes->txdes3 = cpu_to_le32(addr); +} + +static dma_addr_t ftgmac100_txdes_get_dma_addr(struct ftgmac100_txdes *txdes) +{ + return le32_to_cpu(txdes->txdes3); +} + +/* + * txdes2 is not used by hardware. We use it to keep track of socket buffer. + * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu(). + */ +static void ftgmac100_txdes_set_skb(struct ftgmac100_txdes *txdes, + struct sk_buff *skb) +{ + txdes->txdes2 = (unsigned int)skb; +} + +static struct sk_buff *ftgmac100_txdes_get_skb(struct ftgmac100_txdes *txdes) +{ + return (struct sk_buff *)txdes->txdes2; +} + +/****************************************************************************** + * internal functions (transmit) + *****************************************************************************/ +static int ftgmac100_next_tx_pointer(int pointer) +{ + return (pointer + 1) & (TX_QUEUE_ENTRIES - 1); +} + +static void ftgmac100_tx_pointer_advance(struct ftgmac100 *priv) +{ + priv->tx_pointer = ftgmac100_next_tx_pointer(priv->tx_pointer); +} + +static void ftgmac100_tx_clean_pointer_advance(struct ftgmac100 *priv) +{ + priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv->tx_clean_pointer); +} + +static struct ftgmac100_txdes *ftgmac100_current_txdes(struct ftgmac100 *priv) +{ + return &priv->descs->txdes[priv->tx_pointer]; +} + +static struct ftgmac100_txdes * +ftgmac100_current_clean_txdes(struct ftgmac100 *priv) +{ + return &priv->descs->txdes[priv->tx_clean_pointer]; +} + +static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv) +{ + struct net_device *netdev = priv->netdev; + struct ftgmac100_txdes *txdes; + struct sk_buff *skb; + dma_addr_t map; + + if (priv->tx_pending == 0) + return false; + + txdes = ftgmac100_current_clean_txdes(priv); + + if (ftgmac100_txdes_owned_by_dma(txdes)) + return false; + + skb = ftgmac100_txdes_get_skb(txdes); + map = ftgmac100_txdes_get_dma_addr(txdes); + + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += skb->len; + + dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE); + + dev_kfree_skb(skb); + + ftgmac100_txdes_reset(txdes); + + ftgmac100_tx_clean_pointer_advance(priv); + + spin_lock(&priv->tx_lock); + priv->tx_pending--; + spin_unlock(&priv->tx_lock); + netif_wake_queue(netdev); + + return true; +} + +static void ftgmac100_tx_complete(struct ftgmac100 *priv) +{ + while (ftgmac100_tx_complete_packet(priv)) + ; +} + +static int ftgmac100_xmit(struct ftgmac100 *priv, struct sk_buff *skb, + dma_addr_t map) +{ + struct net_device *netdev = priv->netdev; + struct ftgmac100_txdes *txdes; + unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; + + txdes = ftgmac100_current_txdes(priv); + ftgmac100_tx_pointer_advance(priv); + + /* setup TX descriptor */ + ftgmac100_txdes_set_skb(txdes, skb); + ftgmac100_txdes_set_dma_addr(txdes, map); + ftgmac100_txdes_set_buffer_size(txdes, len); + + ftgmac100_txdes_set_first_segment(txdes); + ftgmac100_txdes_set_last_segment(txdes); + ftgmac100_txdes_set_txint(txdes); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + __be16 protocol = skb->protocol; + + if (protocol == cpu_to_be16(ETH_P_IP)) { + u8 ip_proto = ip_hdr(skb)->protocol; + + ftgmac100_txdes_set_ipcs(txdes); + if (ip_proto == IPPROTO_TCP) + ftgmac100_txdes_set_tcpcs(txdes); + else if (ip_proto == IPPROTO_UDP) + ftgmac100_txdes_set_udpcs(txdes); + } + } + + spin_lock(&priv->tx_lock); + priv->tx_pending++; + if (priv->tx_pending == TX_QUEUE_ENTRIES) + netif_stop_queue(netdev); + + /* start transmit */ + ftgmac100_txdes_set_dma_own(txdes); + spin_unlock(&priv->tx_lock); + + ftgmac100_txdma_normal_prio_start_polling(priv); + + return NETDEV_TX_OK; +} + +/****************************************************************************** + * internal functions (buffer) + *****************************************************************************/ +static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv, + struct ftgmac100_rxdes *rxdes, gfp_t gfp) +{ + struct net_device *netdev = priv->netdev; + struct page *page; + dma_addr_t map; + + page = alloc_page(gfp); + if (!page) { + if (net_ratelimit()) + netdev_err(netdev, "failed to allocate rx page\n"); + return -ENOMEM; + } + + map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, map))) { + if (net_ratelimit()) + netdev_err(netdev, "failed to map rx page\n"); + __free_page(page); + return -ENOMEM; + } + + ftgmac100_rxdes_set_page(rxdes, page); + ftgmac100_rxdes_set_dma_addr(rxdes, map); + ftgmac100_rxdes_set_dma_own(rxdes); + return 0; +} + +static void ftgmac100_free_buffers(struct ftgmac100 *priv) +{ + int i; + + for (i = 0; i < RX_QUEUE_ENTRIES; i++) { + struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i]; + struct page *page = ftgmac100_rxdes_get_page(rxdes); + dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes); + + if (!page) + continue; + + dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); + __free_page(page); + } + + for (i = 0; i < TX_QUEUE_ENTRIES; i++) { + struct ftgmac100_txdes *txdes = &priv->descs->txdes[i]; + struct sk_buff *skb = ftgmac100_txdes_get_skb(txdes); + dma_addr_t map = ftgmac100_txdes_get_dma_addr(txdes); + + if (!skb) + continue; + + dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE); + kfree_skb(skb); + } + + dma_free_coherent(priv->dev, sizeof(struct ftgmac100_descs), + priv->descs, priv->descs_dma_addr); +} + +static int ftgmac100_alloc_buffers(struct ftgmac100 *priv) +{ + int i; + + priv->descs = dma_zalloc_coherent(priv->dev, + sizeof(struct ftgmac100_descs), + &priv->descs_dma_addr, GFP_KERNEL); + if (!priv->descs) + return -ENOMEM; + + /* initialize RX ring */ + ftgmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]); + + for (i = 0; i < RX_QUEUE_ENTRIES; i++) { + struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i]; + + if (ftgmac100_alloc_rx_page(priv, rxdes, GFP_KERNEL)) + goto err; + } + + /* initialize TX ring */ + ftgmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]); + return 0; + +err: + ftgmac100_free_buffers(priv); + return -ENOMEM; +} + +/****************************************************************************** + * internal functions (mdio) + *****************************************************************************/ +static void ftgmac100_adjust_link(struct net_device *netdev) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + struct phy_device *phydev = priv->phydev; + int ier; + + if (phydev->speed == priv->old_speed) + return; + + priv->old_speed = phydev->speed; + + ier = ioread32(priv->base + FTGMAC100_OFFSET_IER); + + /* disable all interrupts */ + iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); + + netif_stop_queue(netdev); + ftgmac100_stop_hw(priv); + + netif_start_queue(netdev); + ftgmac100_init_hw(priv); + ftgmac100_start_hw(priv, phydev->speed); + + /* re-enable interrupts */ + iowrite32(ier, priv->base + FTGMAC100_OFFSET_IER); +} + +static int ftgmac100_mii_probe(struct ftgmac100 *priv) +{ + struct net_device *netdev = priv->netdev; + struct phy_device *phydev = NULL; + int i; + + /* search for connect PHY device */ + for (i = 0; i < PHY_MAX_ADDR; i++) { + struct phy_device *tmp = priv->mii_bus->phy_map[i]; + + if (tmp) { + phydev = tmp; + break; + } + } + + /* now we are supposed to have a proper phydev, to attach to... */ + if (!phydev) { + netdev_info(netdev, "%s: no PHY found\n", netdev->name); + return -ENODEV; + } + + phydev = phy_connect(netdev, dev_name(&phydev->dev), + &ftgmac100_adjust_link, PHY_INTERFACE_MODE_GMII); + + if (IS_ERR(phydev)) { + netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name); + return PTR_ERR(phydev); + } + + priv->phydev = phydev; + return 0; +} + +/****************************************************************************** + * struct mii_bus functions + *****************************************************************************/ +static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) +{ + struct net_device *netdev = bus->priv; + struct ftgmac100 *priv = netdev_priv(netdev); + unsigned int phycr; + int i; + + phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); + + /* preserve MDC cycle threshold */ + phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK; + + phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) | + FTGMAC100_PHYCR_REGAD(regnum) | + FTGMAC100_PHYCR_MIIRD; + + iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR); + + for (i = 0; i < 10; i++) { + phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); + + if ((phycr & FTGMAC100_PHYCR_MIIRD) == 0) { + int data; + + data = ioread32(priv->base + FTGMAC100_OFFSET_PHYDATA); + return FTGMAC100_PHYDATA_MIIRDATA(data); + } + + udelay(100); + } + + netdev_err(netdev, "mdio read timed out\n"); + return -EIO; +} + +static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr, + int regnum, u16 value) +{ + struct net_device *netdev = bus->priv; + struct ftgmac100 *priv = netdev_priv(netdev); + unsigned int phycr; + int data; + int i; + + phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); + + /* preserve MDC cycle threshold */ + phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK; + + phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) | + FTGMAC100_PHYCR_REGAD(regnum) | + FTGMAC100_PHYCR_MIIWR; + + data = FTGMAC100_PHYDATA_MIIWDATA(value); + + iowrite32(data, priv->base + FTGMAC100_OFFSET_PHYDATA); + iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR); + + for (i = 0; i < 10; i++) { + phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR); + + if ((phycr & FTGMAC100_PHYCR_MIIWR) == 0) + return 0; + + udelay(100); + } + + netdev_err(netdev, "mdio write timed out\n"); + return -EIO; +} + +/****************************************************************************** + * struct ethtool_ops functions + *****************************************************************************/ +static void ftgmac100_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info)); +} + +static int ftgmac100_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + + return phy_ethtool_gset(priv->phydev, cmd); +} + +static int ftgmac100_set_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + + return phy_ethtool_sset(priv->phydev, cmd); +} + +static const struct ethtool_ops ftgmac100_ethtool_ops = { + .set_settings = ftgmac100_set_settings, + .get_settings = ftgmac100_get_settings, + .get_drvinfo = ftgmac100_get_drvinfo, + .get_link = ethtool_op_get_link, +}; + +/****************************************************************************** + * interrupt handler + *****************************************************************************/ +static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct ftgmac100 *priv = netdev_priv(netdev); + + if (likely(netif_running(netdev))) { + /* Disable interrupts for polling */ + iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); + napi_schedule(&priv->napi); + } + + return IRQ_HANDLED; +} + +/****************************************************************************** + * struct napi_struct functions + *****************************************************************************/ +static int ftgmac100_poll(struct napi_struct *napi, int budget) +{ + struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi); + struct net_device *netdev = priv->netdev; + unsigned int status; + bool completed = true; + int rx = 0; + + status = ioread32(priv->base + FTGMAC100_OFFSET_ISR); + iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR); + + if (status & (FTGMAC100_INT_RPKT_BUF | FTGMAC100_INT_NO_RXBUF)) { + /* + * FTGMAC100_INT_RPKT_BUF: + * RX DMA has received packets into RX buffer successfully + * + * FTGMAC100_INT_NO_RXBUF: + * RX buffer unavailable + */ + bool retry; + + do { + retry = ftgmac100_rx_packet(priv, &rx); + } while (retry && rx < budget); + + if (retry && rx == budget) + completed = false; + } + + if (status & (FTGMAC100_INT_XPKT_ETH | FTGMAC100_INT_XPKT_LOST)) { + /* + * FTGMAC100_INT_XPKT_ETH: + * packet transmitted to ethernet successfully + * + * FTGMAC100_INT_XPKT_LOST: + * packet transmitted to ethernet lost due to late + * collision or excessive collision + */ + ftgmac100_tx_complete(priv); + } + + if (status & (FTGMAC100_INT_NO_RXBUF | FTGMAC100_INT_RPKT_LOST | + FTGMAC100_INT_AHB_ERR | FTGMAC100_INT_PHYSTS_CHG)) { + if (net_ratelimit()) + netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status, + status & FTGMAC100_INT_NO_RXBUF ? "NO_RXBUF " : "", + status & FTGMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "", + status & FTGMAC100_INT_AHB_ERR ? "AHB_ERR " : "", + status & FTGMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : ""); + + if (status & FTGMAC100_INT_NO_RXBUF) { + /* RX buffer unavailable */ + netdev->stats.rx_over_errors++; + } + + if (status & FTGMAC100_INT_RPKT_LOST) { + /* received packet lost due to RX FIFO full */ + netdev->stats.rx_fifo_errors++; + } + } + + if (completed) { + napi_complete(napi); + + /* enable all interrupts */ + iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTGMAC100_OFFSET_IER); + } + + return rx; +} + +/****************************************************************************** + * struct net_device_ops functions + *****************************************************************************/ +static int ftgmac100_open(struct net_device *netdev) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + int err; + + err = ftgmac100_alloc_buffers(priv); + if (err) { + netdev_err(netdev, "failed to allocate buffers\n"); + goto err_alloc; + } + + err = request_irq(priv->irq, ftgmac100_interrupt, 0, netdev->name, netdev); + if (err) { + netdev_err(netdev, "failed to request irq %d\n", priv->irq); + goto err_irq; + } + + priv->rx_pointer = 0; + priv->tx_clean_pointer = 0; + priv->tx_pointer = 0; + priv->tx_pending = 0; + + err = ftgmac100_reset_hw(priv); + if (err) + goto err_hw; + + ftgmac100_init_hw(priv); + ftgmac100_start_hw(priv, 10); + + phy_start(priv->phydev); + + napi_enable(&priv->napi); + netif_start_queue(netdev); + + /* enable all interrupts */ + iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTGMAC100_OFFSET_IER); + return 0; + +err_hw: + free_irq(priv->irq, netdev); +err_irq: + ftgmac100_free_buffers(priv); +err_alloc: + return err; +} + +static int ftgmac100_stop(struct net_device *netdev) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + + /* disable all interrupts */ + iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); + + netif_stop_queue(netdev); + napi_disable(&priv->napi); + phy_stop(priv->phydev); + + ftgmac100_stop_hw(priv); + free_irq(priv->irq, netdev); + ftgmac100_free_buffers(priv); + + return 0; +} + +static int ftgmac100_hard_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + dma_addr_t map; + + if (unlikely(skb->len > MAX_PKT_SIZE)) { + if (net_ratelimit()) + netdev_dbg(netdev, "tx packet too big\n"); + + netdev->stats.tx_dropped++; + kfree_skb(skb); + return NETDEV_TX_OK; + } + + map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, map))) { + /* drop packet */ + if (net_ratelimit()) + netdev_err(netdev, "map socket buffer failed\n"); + + netdev->stats.tx_dropped++; + kfree_skb(skb); + return NETDEV_TX_OK; + } + + return ftgmac100_xmit(priv, skb, map); +} + +/* optional */ +static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + + return phy_mii_ioctl(priv->phydev, ifr, cmd); +} + +static const struct net_device_ops ftgmac100_netdev_ops = { + .ndo_open = ftgmac100_open, + .ndo_stop = ftgmac100_stop, + .ndo_start_xmit = ftgmac100_hard_start_xmit, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = ftgmac100_do_ioctl, +}; + +/****************************************************************************** + * struct platform_driver functions + *****************************************************************************/ +static int ftgmac100_probe(struct platform_device *pdev) +{ + struct resource *res; + int irq; + struct net_device *netdev; + struct ftgmac100 *priv; + int err; + int i; + + if (!pdev) + return -ENODEV; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENXIO; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + /* setup net_device */ + netdev = alloc_etherdev(sizeof(*priv)); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + netdev->ethtool_ops = &ftgmac100_ethtool_ops; + netdev->netdev_ops = &ftgmac100_netdev_ops; + netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO; + + platform_set_drvdata(pdev, netdev); + + /* setup private data */ + priv = netdev_priv(netdev); + priv->netdev = netdev; + priv->dev = &pdev->dev; + + spin_lock_init(&priv->tx_lock); + + /* initialize NAPI */ + netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64); + + /* map io memory */ + priv->res = request_mem_region(res->start, resource_size(res), + dev_name(&pdev->dev)); + if (!priv->res) { + dev_err(&pdev->dev, "Could not reserve memory region\n"); + err = -ENOMEM; + goto err_req_mem; + } + + priv->base = ioremap(res->start, resource_size(res)); + if (!priv->base) { + dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); + err = -EIO; + goto err_ioremap; + } + + priv->irq = irq; + + /* initialize mdio bus */ + priv->mii_bus = mdiobus_alloc(); + if (!priv->mii_bus) { + err = -EIO; + goto err_alloc_mdiobus; + } + + priv->mii_bus->name = "ftgmac100_mdio"; + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "ftgmac100_mii"); + + priv->mii_bus->priv = netdev; + priv->mii_bus->read = ftgmac100_mdiobus_read; + priv->mii_bus->write = ftgmac100_mdiobus_write; + priv->mii_bus->irq = priv->phy_irq; + + for (i = 0; i < PHY_MAX_ADDR; i++) + priv->mii_bus->irq[i] = PHY_POLL; + + err = mdiobus_register(priv->mii_bus); + if (err) { + dev_err(&pdev->dev, "Cannot register MDIO bus!\n"); + goto err_register_mdiobus; + } + + err = ftgmac100_mii_probe(priv); + if (err) { + dev_err(&pdev->dev, "MII Probe failed!\n"); + goto err_mii_probe; + } + + /* register network device */ + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "Failed to register netdev\n"); + goto err_register_netdev; + } + + netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + eth_hw_addr_random(netdev); + netdev_info(netdev, "generated random MAC address %pM\n", + netdev->dev_addr); + } + + return 0; + +err_register_netdev: + phy_disconnect(priv->phydev); +err_mii_probe: + mdiobus_unregister(priv->mii_bus); +err_register_mdiobus: + mdiobus_free(priv->mii_bus); +err_alloc_mdiobus: + iounmap(priv->base); +err_ioremap: + release_resource(priv->res); +err_req_mem: + netif_napi_del(&priv->napi); + free_netdev(netdev); +err_alloc_etherdev: + return err; +} + +static int __exit ftgmac100_remove(struct platform_device *pdev) +{ + struct net_device *netdev; + struct ftgmac100 *priv; + + netdev = platform_get_drvdata(pdev); + priv = netdev_priv(netdev); + + unregister_netdev(netdev); + + phy_disconnect(priv->phydev); + mdiobus_unregister(priv->mii_bus); + mdiobus_free(priv->mii_bus); + + iounmap(priv->base); + release_resource(priv->res); + + netif_napi_del(&priv->napi); + free_netdev(netdev); + return 0; +} + +static struct platform_driver ftgmac100_driver = { + .probe = ftgmac100_probe, + .remove = __exit_p(ftgmac100_remove), + .driver = { + .name = DRV_NAME, + }, +}; + +module_platform_driver(ftgmac100_driver); + +MODULE_AUTHOR("Po-Yu Chuang "); +MODULE_DESCRIPTION("FTGMAC100 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h new file mode 100644 index 000000000..13408d448 --- /dev/null +++ b/drivers/net/ethernet/faraday/ftgmac100.h @@ -0,0 +1,246 @@ +/* + * Faraday FTGMAC100 Gigabit Ethernet + * + * (C) Copyright 2009-2011 Faraday Technology + * Po-Yu Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __FTGMAC100_H +#define __FTGMAC100_H + +#define FTGMAC100_OFFSET_ISR 0x00 +#define FTGMAC100_OFFSET_IER 0x04 +#define FTGMAC100_OFFSET_MAC_MADR 0x08 +#define FTGMAC100_OFFSET_MAC_LADR 0x0c +#define FTGMAC100_OFFSET_MAHT0 0x10 +#define FTGMAC100_OFFSET_MAHT1 0x14 +#define FTGMAC100_OFFSET_NPTXPD 0x18 +#define FTGMAC100_OFFSET_RXPD 0x1c +#define FTGMAC100_OFFSET_NPTXR_BADR 0x20 +#define FTGMAC100_OFFSET_RXR_BADR 0x24 +#define FTGMAC100_OFFSET_HPTXPD 0x28 +#define FTGMAC100_OFFSET_HPTXR_BADR 0x2c +#define FTGMAC100_OFFSET_ITC 0x30 +#define FTGMAC100_OFFSET_APTC 0x34 +#define FTGMAC100_OFFSET_DBLAC 0x38 +#define FTGMAC100_OFFSET_DMAFIFOS 0x3c +#define FTGMAC100_OFFSET_REVR 0x40 +#define FTGMAC100_OFFSET_FEAR 0x44 +#define FTGMAC100_OFFSET_TPAFCR 0x48 +#define FTGMAC100_OFFSET_RBSR 0x4c +#define FTGMAC100_OFFSET_MACCR 0x50 +#define FTGMAC100_OFFSET_MACSR 0x54 +#define FTGMAC100_OFFSET_TM 0x58 +#define FTGMAC100_OFFSET_PHYCR 0x60 +#define FTGMAC100_OFFSET_PHYDATA 0x64 +#define FTGMAC100_OFFSET_FCR 0x68 +#define FTGMAC100_OFFSET_BPR 0x6c +#define FTGMAC100_OFFSET_WOLCR 0x70 +#define FTGMAC100_OFFSET_WOLSR 0x74 +#define FTGMAC100_OFFSET_WFCRC 0x78 +#define FTGMAC100_OFFSET_WFBM1 0x80 +#define FTGMAC100_OFFSET_WFBM2 0x84 +#define FTGMAC100_OFFSET_WFBM3 0x88 +#define FTGMAC100_OFFSET_WFBM4 0x8c +#define FTGMAC100_OFFSET_NPTXR_PTR 0x90 +#define FTGMAC100_OFFSET_HPTXR_PTR 0x94 +#define FTGMAC100_OFFSET_RXR_PTR 0x98 +#define FTGMAC100_OFFSET_TX 0xa0 +#define FTGMAC100_OFFSET_TX_MCOL_SCOL 0xa4 +#define FTGMAC100_OFFSET_TX_ECOL_FAIL 0xa8 +#define FTGMAC100_OFFSET_TX_LCOL_UND 0xac +#define FTGMAC100_OFFSET_RX 0xb0 +#define FTGMAC100_OFFSET_RX_BC 0xb4 +#define FTGMAC100_OFFSET_RX_MC 0xb8 +#define FTGMAC100_OFFSET_RX_PF_AEP 0xbc +#define FTGMAC100_OFFSET_RX_RUNT 0xc0 +#define FTGMAC100_OFFSET_RX_CRCER_FTL 0xc4 +#define FTGMAC100_OFFSET_RX_COL_LOST 0xc8 + +/* + * Interrupt status register & interrupt enable register + */ +#define FTGMAC100_INT_RPKT_BUF (1 << 0) +#define FTGMAC100_INT_RPKT_FIFO (1 << 1) +#define FTGMAC100_INT_NO_RXBUF (1 << 2) +#define FTGMAC100_INT_RPKT_LOST (1 << 3) +#define FTGMAC100_INT_XPKT_ETH (1 << 4) +#define FTGMAC100_INT_XPKT_FIFO (1 << 5) +#define FTGMAC100_INT_NO_NPTXBUF (1 << 6) +#define FTGMAC100_INT_XPKT_LOST (1 << 7) +#define FTGMAC100_INT_AHB_ERR (1 << 8) +#define FTGMAC100_INT_PHYSTS_CHG (1 << 9) +#define FTGMAC100_INT_NO_HPTXBUF (1 << 10) + +/* + * Interrupt timer control register + */ +#define FTGMAC100_ITC_RXINT_CNT(x) (((x) & 0xf) << 0) +#define FTGMAC100_ITC_RXINT_THR(x) (((x) & 0x7) << 4) +#define FTGMAC100_ITC_RXINT_TIME_SEL (1 << 7) +#define FTGMAC100_ITC_TXINT_CNT(x) (((x) & 0xf) << 8) +#define FTGMAC100_ITC_TXINT_THR(x) (((x) & 0x7) << 12) +#define FTGMAC100_ITC_TXINT_TIME_SEL (1 << 15) + +/* + * Automatic polling timer control register + */ +#define FTGMAC100_APTC_RXPOLL_CNT(x) (((x) & 0xf) << 0) +#define FTGMAC100_APTC_RXPOLL_TIME_SEL (1 << 4) +#define FTGMAC100_APTC_TXPOLL_CNT(x) (((x) & 0xf) << 8) +#define FTGMAC100_APTC_TXPOLL_TIME_SEL (1 << 12) + +/* + * DMA burst length and arbitration control register + */ +#define FTGMAC100_DBLAC_RXFIFO_LTHR(x) (((x) & 0x7) << 0) +#define FTGMAC100_DBLAC_RXFIFO_HTHR(x) (((x) & 0x7) << 3) +#define FTGMAC100_DBLAC_RX_THR_EN (1 << 6) +#define FTGMAC100_DBLAC_RXBURST_SIZE(x) (((x) & 0x3) << 8) +#define FTGMAC100_DBLAC_TXBURST_SIZE(x) (((x) & 0x3) << 10) +#define FTGMAC100_DBLAC_RXDES_SIZE(x) (((x) & 0xf) << 12) +#define FTGMAC100_DBLAC_TXDES_SIZE(x) (((x) & 0xf) << 16) +#define FTGMAC100_DBLAC_IFG_CNT(x) (((x) & 0x7) << 20) +#define FTGMAC100_DBLAC_IFG_INC (1 << 23) + +/* + * DMA FIFO status register + */ +#define FTGMAC100_DMAFIFOS_RXDMA1_SM(dmafifos) ((dmafifos) & 0xf) +#define FTGMAC100_DMAFIFOS_RXDMA2_SM(dmafifos) (((dmafifos) >> 4) & 0xf) +#define FTGMAC100_DMAFIFOS_RXDMA3_SM(dmafifos) (((dmafifos) >> 8) & 0x7) +#define FTGMAC100_DMAFIFOS_TXDMA1_SM(dmafifos) (((dmafifos) >> 12) & 0xf) +#define FTGMAC100_DMAFIFOS_TXDMA2_SM(dmafifos) (((dmafifos) >> 16) & 0x3) +#define FTGMAC100_DMAFIFOS_TXDMA3_SM(dmafifos) (((dmafifos) >> 18) & 0xf) +#define FTGMAC100_DMAFIFOS_RXFIFO_EMPTY (1 << 26) +#define FTGMAC100_DMAFIFOS_TXFIFO_EMPTY (1 << 27) +#define FTGMAC100_DMAFIFOS_RXDMA_GRANT (1 << 28) +#define FTGMAC100_DMAFIFOS_TXDMA_GRANT (1 << 29) +#define FTGMAC100_DMAFIFOS_RXDMA_REQ (1 << 30) +#define FTGMAC100_DMAFIFOS_TXDMA_REQ (1 << 31) + +/* + * Receive buffer size register + */ +#define FTGMAC100_RBSR_SIZE(x) ((x) & 0x3fff) + +/* + * MAC control register + */ +#define FTGMAC100_MACCR_TXDMA_EN (1 << 0) +#define FTGMAC100_MACCR_RXDMA_EN (1 << 1) +#define FTGMAC100_MACCR_TXMAC_EN (1 << 2) +#define FTGMAC100_MACCR_RXMAC_EN (1 << 3) +#define FTGMAC100_MACCR_RM_VLAN (1 << 4) +#define FTGMAC100_MACCR_HPTXR_EN (1 << 5) +#define FTGMAC100_MACCR_LOOP_EN (1 << 6) +#define FTGMAC100_MACCR_ENRX_IN_HALFTX (1 << 7) +#define FTGMAC100_MACCR_FULLDUP (1 << 8) +#define FTGMAC100_MACCR_GIGA_MODE (1 << 9) +#define FTGMAC100_MACCR_CRC_APD (1 << 10) +#define FTGMAC100_MACCR_RX_RUNT (1 << 12) +#define FTGMAC100_MACCR_JUMBO_LF (1 << 13) +#define FTGMAC100_MACCR_RX_ALL (1 << 14) +#define FTGMAC100_MACCR_HT_MULTI_EN (1 << 15) +#define FTGMAC100_MACCR_RX_MULTIPKT (1 << 16) +#define FTGMAC100_MACCR_RX_BROADPKT (1 << 17) +#define FTGMAC100_MACCR_DISCARD_CRCERR (1 << 18) +#define FTGMAC100_MACCR_FAST_MODE (1 << 19) +#define FTGMAC100_MACCR_SW_RST (1 << 31) + +/* + * PHY control register + */ +#define FTGMAC100_PHYCR_MDC_CYCTHR_MASK 0x3f +#define FTGMAC100_PHYCR_MDC_CYCTHR(x) ((x) & 0x3f) +#define FTGMAC100_PHYCR_PHYAD(x) (((x) & 0x1f) << 16) +#define FTGMAC100_PHYCR_REGAD(x) (((x) & 0x1f) << 21) +#define FTGMAC100_PHYCR_MIIRD (1 << 26) +#define FTGMAC100_PHYCR_MIIWR (1 << 27) + +/* + * PHY data register + */ +#define FTGMAC100_PHYDATA_MIIWDATA(x) ((x) & 0xffff) +#define FTGMAC100_PHYDATA_MIIRDATA(phydata) (((phydata) >> 16) & 0xffff) + +/* + * Transmit descriptor, aligned to 16 bytes + */ +struct ftgmac100_txdes { + unsigned int txdes0; + unsigned int txdes1; + unsigned int txdes2; /* not used by HW */ + unsigned int txdes3; /* TXBUF_BADR */ +} __attribute__ ((aligned(16))); + +#define FTGMAC100_TXDES0_TXBUF_SIZE(x) ((x) & 0x3fff) +#define FTGMAC100_TXDES0_EDOTR (1 << 15) +#define FTGMAC100_TXDES0_CRC_ERR (1 << 19) +#define FTGMAC100_TXDES0_LTS (1 << 28) +#define FTGMAC100_TXDES0_FTS (1 << 29) +#define FTGMAC100_TXDES0_TXDMA_OWN (1 << 31) + +#define FTGMAC100_TXDES1_VLANTAG_CI(x) ((x) & 0xffff) +#define FTGMAC100_TXDES1_INS_VLANTAG (1 << 16) +#define FTGMAC100_TXDES1_TCP_CHKSUM (1 << 17) +#define FTGMAC100_TXDES1_UDP_CHKSUM (1 << 18) +#define FTGMAC100_TXDES1_IP_CHKSUM (1 << 19) +#define FTGMAC100_TXDES1_LLC (1 << 22) +#define FTGMAC100_TXDES1_TX2FIC (1 << 30) +#define FTGMAC100_TXDES1_TXIC (1 << 31) + +/* + * Receive descriptor, aligned to 16 bytes + */ +struct ftgmac100_rxdes { + unsigned int rxdes0; + unsigned int rxdes1; + unsigned int rxdes2; /* not used by HW */ + unsigned int rxdes3; /* RXBUF_BADR */ +} __attribute__ ((aligned(16))); + +#define FTGMAC100_RXDES0_VDBC 0x3fff +#define FTGMAC100_RXDES0_EDORR (1 << 15) +#define FTGMAC100_RXDES0_MULTICAST (1 << 16) +#define FTGMAC100_RXDES0_BROADCAST (1 << 17) +#define FTGMAC100_RXDES0_RX_ERR (1 << 18) +#define FTGMAC100_RXDES0_CRC_ERR (1 << 19) +#define FTGMAC100_RXDES0_FTL (1 << 20) +#define FTGMAC100_RXDES0_RUNT (1 << 21) +#define FTGMAC100_RXDES0_RX_ODD_NB (1 << 22) +#define FTGMAC100_RXDES0_FIFO_FULL (1 << 23) +#define FTGMAC100_RXDES0_PAUSE_OPCODE (1 << 24) +#define FTGMAC100_RXDES0_PAUSE_FRAME (1 << 25) +#define FTGMAC100_RXDES0_LRS (1 << 28) +#define FTGMAC100_RXDES0_FRS (1 << 29) +#define FTGMAC100_RXDES0_RXPKT_RDY (1 << 31) + +#define FTGMAC100_RXDES1_VLANTAG_CI 0xffff +#define FTGMAC100_RXDES1_PROT_MASK (0x3 << 20) +#define FTGMAC100_RXDES1_PROT_NONIP (0x0 << 20) +#define FTGMAC100_RXDES1_PROT_IP (0x1 << 20) +#define FTGMAC100_RXDES1_PROT_TCPIP (0x2 << 20) +#define FTGMAC100_RXDES1_PROT_UDPIP (0x3 << 20) +#define FTGMAC100_RXDES1_LLC (1 << 22) +#define FTGMAC100_RXDES1_DF (1 << 23) +#define FTGMAC100_RXDES1_VLANTAG_AVAIL (1 << 24) +#define FTGMAC100_RXDES1_TCP_CHKSUM_ERR (1 << 25) +#define FTGMAC100_RXDES1_UDP_CHKSUM_ERR (1 << 26) +#define FTGMAC100_RXDES1_IP_CHKSUM_ERR (1 << 27) + +#endif /* __FTGMAC100_H */ diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c new file mode 100644 index 000000000..dce5f7b7f --- /dev/null +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -0,0 +1,1202 @@ +/* + * Faraday FTMAC100 10/100 Ethernet + * + * (C) Copyright 2009-2011 Faraday Technology + * Po-Yu Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ftmac100.h" + +#define DRV_NAME "ftmac100" +#define DRV_VERSION "0.2" + +#define RX_QUEUE_ENTRIES 128 /* must be power of 2 */ +#define TX_QUEUE_ENTRIES 16 /* must be power of 2 */ + +#define MAX_PKT_SIZE 1518 +#define RX_BUF_SIZE 2044 /* must be smaller than 0x7ff */ + +#if MAX_PKT_SIZE > 0x7ff +#error invalid MAX_PKT_SIZE +#endif + +#if RX_BUF_SIZE > 0x7ff || RX_BUF_SIZE > PAGE_SIZE +#error invalid RX_BUF_SIZE +#endif + +/****************************************************************************** + * private data + *****************************************************************************/ +struct ftmac100_descs { + struct ftmac100_rxdes rxdes[RX_QUEUE_ENTRIES]; + struct ftmac100_txdes txdes[TX_QUEUE_ENTRIES]; +}; + +struct ftmac100 { + struct resource *res; + void __iomem *base; + int irq; + + struct ftmac100_descs *descs; + dma_addr_t descs_dma_addr; + + unsigned int rx_pointer; + unsigned int tx_clean_pointer; + unsigned int tx_pointer; + unsigned int tx_pending; + + spinlock_t tx_lock; + + struct net_device *netdev; + struct device *dev; + struct napi_struct napi; + + struct mii_if_info mii; +}; + +static int ftmac100_alloc_rx_page(struct ftmac100 *priv, + struct ftmac100_rxdes *rxdes, gfp_t gfp); + +/****************************************************************************** + * internal functions (hardware register access) + *****************************************************************************/ +#define INT_MASK_ALL_ENABLED (FTMAC100_INT_RPKT_FINISH | \ + FTMAC100_INT_NORXBUF | \ + FTMAC100_INT_XPKT_OK | \ + FTMAC100_INT_XPKT_LOST | \ + FTMAC100_INT_RPKT_LOST | \ + FTMAC100_INT_AHB_ERR | \ + FTMAC100_INT_PHYSTS_CHG) + +#define INT_MASK_ALL_DISABLED 0 + +static void ftmac100_enable_all_int(struct ftmac100 *priv) +{ + iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTMAC100_OFFSET_IMR); +} + +static void ftmac100_disable_all_int(struct ftmac100 *priv) +{ + iowrite32(INT_MASK_ALL_DISABLED, priv->base + FTMAC100_OFFSET_IMR); +} + +static void ftmac100_set_rx_ring_base(struct ftmac100 *priv, dma_addr_t addr) +{ + iowrite32(addr, priv->base + FTMAC100_OFFSET_RXR_BADR); +} + +static void ftmac100_set_tx_ring_base(struct ftmac100 *priv, dma_addr_t addr) +{ + iowrite32(addr, priv->base + FTMAC100_OFFSET_TXR_BADR); +} + +static void ftmac100_txdma_start_polling(struct ftmac100 *priv) +{ + iowrite32(1, priv->base + FTMAC100_OFFSET_TXPD); +} + +static int ftmac100_reset(struct ftmac100 *priv) +{ + struct net_device *netdev = priv->netdev; + int i; + + /* NOTE: reset clears all registers */ + iowrite32(FTMAC100_MACCR_SW_RST, priv->base + FTMAC100_OFFSET_MACCR); + + for (i = 0; i < 5; i++) { + unsigned int maccr; + + maccr = ioread32(priv->base + FTMAC100_OFFSET_MACCR); + if (!(maccr & FTMAC100_MACCR_SW_RST)) { + /* + * FTMAC100_MACCR_SW_RST cleared does not indicate + * that hardware reset completed (what the f*ck). + * We still need to wait for a while. + */ + udelay(500); + return 0; + } + + udelay(1000); + } + + netdev_err(netdev, "software reset failed\n"); + return -EIO; +} + +static void ftmac100_set_mac(struct ftmac100 *priv, const unsigned char *mac) +{ + unsigned int maddr = mac[0] << 8 | mac[1]; + unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; + + iowrite32(maddr, priv->base + FTMAC100_OFFSET_MAC_MADR); + iowrite32(laddr, priv->base + FTMAC100_OFFSET_MAC_LADR); +} + +#define MACCR_ENABLE_ALL (FTMAC100_MACCR_XMT_EN | \ + FTMAC100_MACCR_RCV_EN | \ + FTMAC100_MACCR_XDMA_EN | \ + FTMAC100_MACCR_RDMA_EN | \ + FTMAC100_MACCR_CRC_APD | \ + FTMAC100_MACCR_FULLDUP | \ + FTMAC100_MACCR_RX_RUNT | \ + FTMAC100_MACCR_RX_BROADPKT) + +static int ftmac100_start_hw(struct ftmac100 *priv) +{ + struct net_device *netdev = priv->netdev; + + if (ftmac100_reset(priv)) + return -EIO; + + /* setup ring buffer base registers */ + ftmac100_set_rx_ring_base(priv, + priv->descs_dma_addr + + offsetof(struct ftmac100_descs, rxdes)); + ftmac100_set_tx_ring_base(priv, + priv->descs_dma_addr + + offsetof(struct ftmac100_descs, txdes)); + + iowrite32(FTMAC100_APTC_RXPOLL_CNT(1), priv->base + FTMAC100_OFFSET_APTC); + + ftmac100_set_mac(priv, netdev->dev_addr); + + iowrite32(MACCR_ENABLE_ALL, priv->base + FTMAC100_OFFSET_MACCR); + return 0; +} + +static void ftmac100_stop_hw(struct ftmac100 *priv) +{ + iowrite32(0, priv->base + FTMAC100_OFFSET_MACCR); +} + +/****************************************************************************** + * internal functions (receive descriptor) + *****************************************************************************/ +static bool ftmac100_rxdes_first_segment(struct ftmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FRS); +} + +static bool ftmac100_rxdes_last_segment(struct ftmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_LRS); +} + +static bool ftmac100_rxdes_owned_by_dma(struct ftmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN); +} + +static void ftmac100_rxdes_set_dma_own(struct ftmac100_rxdes *rxdes) +{ + /* clear status bits */ + rxdes->rxdes0 = cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN); +} + +static bool ftmac100_rxdes_rx_error(struct ftmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ERR); +} + +static bool ftmac100_rxdes_crc_error(struct ftmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_CRC_ERR); +} + +static bool ftmac100_rxdes_frame_too_long(struct ftmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FTL); +} + +static bool ftmac100_rxdes_runt(struct ftmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RUNT); +} + +static bool ftmac100_rxdes_odd_nibble(struct ftmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ODD_NB); +} + +static unsigned int ftmac100_rxdes_frame_length(struct ftmac100_rxdes *rxdes) +{ + return le32_to_cpu(rxdes->rxdes0) & FTMAC100_RXDES0_RFL; +} + +static bool ftmac100_rxdes_multicast(struct ftmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_MULTICAST); +} + +static void ftmac100_rxdes_set_buffer_size(struct ftmac100_rxdes *rxdes, + unsigned int size) +{ + rxdes->rxdes1 &= cpu_to_le32(FTMAC100_RXDES1_EDORR); + rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_RXBUF_SIZE(size)); +} + +static void ftmac100_rxdes_set_end_of_ring(struct ftmac100_rxdes *rxdes) +{ + rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_EDORR); +} + +static void ftmac100_rxdes_set_dma_addr(struct ftmac100_rxdes *rxdes, + dma_addr_t addr) +{ + rxdes->rxdes2 = cpu_to_le32(addr); +} + +static dma_addr_t ftmac100_rxdes_get_dma_addr(struct ftmac100_rxdes *rxdes) +{ + return le32_to_cpu(rxdes->rxdes2); +} + +/* + * rxdes3 is not used by hardware. We use it to keep track of page. + * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu(). + */ +static void ftmac100_rxdes_set_page(struct ftmac100_rxdes *rxdes, struct page *page) +{ + rxdes->rxdes3 = (unsigned int)page; +} + +static struct page *ftmac100_rxdes_get_page(struct ftmac100_rxdes *rxdes) +{ + return (struct page *)rxdes->rxdes3; +} + +/****************************************************************************** + * internal functions (receive) + *****************************************************************************/ +static int ftmac100_next_rx_pointer(int pointer) +{ + return (pointer + 1) & (RX_QUEUE_ENTRIES - 1); +} + +static void ftmac100_rx_pointer_advance(struct ftmac100 *priv) +{ + priv->rx_pointer = ftmac100_next_rx_pointer(priv->rx_pointer); +} + +static struct ftmac100_rxdes *ftmac100_current_rxdes(struct ftmac100 *priv) +{ + return &priv->descs->rxdes[priv->rx_pointer]; +} + +static struct ftmac100_rxdes * +ftmac100_rx_locate_first_segment(struct ftmac100 *priv) +{ + struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv); + + while (!ftmac100_rxdes_owned_by_dma(rxdes)) { + if (ftmac100_rxdes_first_segment(rxdes)) + return rxdes; + + ftmac100_rxdes_set_dma_own(rxdes); + ftmac100_rx_pointer_advance(priv); + rxdes = ftmac100_current_rxdes(priv); + } + + return NULL; +} + +static bool ftmac100_rx_packet_error(struct ftmac100 *priv, + struct ftmac100_rxdes *rxdes) +{ + struct net_device *netdev = priv->netdev; + bool error = false; + + if (unlikely(ftmac100_rxdes_rx_error(rxdes))) { + if (net_ratelimit()) + netdev_info(netdev, "rx err\n"); + + netdev->stats.rx_errors++; + error = true; + } + + if (unlikely(ftmac100_rxdes_crc_error(rxdes))) { + if (net_ratelimit()) + netdev_info(netdev, "rx crc err\n"); + + netdev->stats.rx_crc_errors++; + error = true; + } + + if (unlikely(ftmac100_rxdes_frame_too_long(rxdes))) { + if (net_ratelimit()) + netdev_info(netdev, "rx frame too long\n"); + + netdev->stats.rx_length_errors++; + error = true; + } else if (unlikely(ftmac100_rxdes_runt(rxdes))) { + if (net_ratelimit()) + netdev_info(netdev, "rx runt\n"); + + netdev->stats.rx_length_errors++; + error = true; + } else if (unlikely(ftmac100_rxdes_odd_nibble(rxdes))) { + if (net_ratelimit()) + netdev_info(netdev, "rx odd nibble\n"); + + netdev->stats.rx_length_errors++; + error = true; + } + + return error; +} + +static void ftmac100_rx_drop_packet(struct ftmac100 *priv) +{ + struct net_device *netdev = priv->netdev; + struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv); + bool done = false; + + if (net_ratelimit()) + netdev_dbg(netdev, "drop packet %p\n", rxdes); + + do { + if (ftmac100_rxdes_last_segment(rxdes)) + done = true; + + ftmac100_rxdes_set_dma_own(rxdes); + ftmac100_rx_pointer_advance(priv); + rxdes = ftmac100_current_rxdes(priv); + } while (!done && !ftmac100_rxdes_owned_by_dma(rxdes)); + + netdev->stats.rx_dropped++; +} + +static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed) +{ + struct net_device *netdev = priv->netdev; + struct ftmac100_rxdes *rxdes; + struct sk_buff *skb; + struct page *page; + dma_addr_t map; + int length; + + rxdes = ftmac100_rx_locate_first_segment(priv); + if (!rxdes) + return false; + + if (unlikely(ftmac100_rx_packet_error(priv, rxdes))) { + ftmac100_rx_drop_packet(priv); + return true; + } + + /* + * It is impossible to get multi-segment packets + * because we always provide big enough receive buffers. + */ + if (unlikely(!ftmac100_rxdes_last_segment(rxdes))) + BUG(); + + /* start processing */ + skb = netdev_alloc_skb_ip_align(netdev, 128); + if (unlikely(!skb)) { + if (net_ratelimit()) + netdev_err(netdev, "rx skb alloc failed\n"); + + ftmac100_rx_drop_packet(priv); + return true; + } + + if (unlikely(ftmac100_rxdes_multicast(rxdes))) + netdev->stats.multicast++; + + map = ftmac100_rxdes_get_dma_addr(rxdes); + dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); + + length = ftmac100_rxdes_frame_length(rxdes); + page = ftmac100_rxdes_get_page(rxdes); + skb_fill_page_desc(skb, 0, page, 0, length); + skb->len += length; + skb->data_len += length; + + if (length > 128) { + skb->truesize += PAGE_SIZE; + /* We pull the minimum amount into linear part */ + __pskb_pull_tail(skb, ETH_HLEN); + } else { + /* Small frames are copied into linear part to free one page */ + __pskb_pull_tail(skb, length); + } + ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC); + + ftmac100_rx_pointer_advance(priv); + + skb->protocol = eth_type_trans(skb, netdev); + + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += skb->len; + + /* push packet to protocol stack */ + netif_receive_skb(skb); + + (*processed)++; + return true; +} + +/****************************************************************************** + * internal functions (transmit descriptor) + *****************************************************************************/ +static void ftmac100_txdes_reset(struct ftmac100_txdes *txdes) +{ + /* clear all except end of ring bit */ + txdes->txdes0 = 0; + txdes->txdes1 &= cpu_to_le32(FTMAC100_TXDES1_EDOTR); + txdes->txdes2 = 0; + txdes->txdes3 = 0; +} + +static bool ftmac100_txdes_owned_by_dma(struct ftmac100_txdes *txdes) +{ + return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN); +} + +static void ftmac100_txdes_set_dma_own(struct ftmac100_txdes *txdes) +{ + /* + * Make sure dma own bit will not be set before any other + * descriptor fields. + */ + wmb(); + txdes->txdes0 |= cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN); +} + +static bool ftmac100_txdes_excessive_collision(struct ftmac100_txdes *txdes) +{ + return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_EXSCOL); +} + +static bool ftmac100_txdes_late_collision(struct ftmac100_txdes *txdes) +{ + return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_LATECOL); +} + +static void ftmac100_txdes_set_end_of_ring(struct ftmac100_txdes *txdes) +{ + txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_EDOTR); +} + +static void ftmac100_txdes_set_first_segment(struct ftmac100_txdes *txdes) +{ + txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_FTS); +} + +static void ftmac100_txdes_set_last_segment(struct ftmac100_txdes *txdes) +{ + txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_LTS); +} + +static void ftmac100_txdes_set_txint(struct ftmac100_txdes *txdes) +{ + txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXIC); +} + +static void ftmac100_txdes_set_buffer_size(struct ftmac100_txdes *txdes, + unsigned int len) +{ + txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXBUF_SIZE(len)); +} + +static void ftmac100_txdes_set_dma_addr(struct ftmac100_txdes *txdes, + dma_addr_t addr) +{ + txdes->txdes2 = cpu_to_le32(addr); +} + +static dma_addr_t ftmac100_txdes_get_dma_addr(struct ftmac100_txdes *txdes) +{ + return le32_to_cpu(txdes->txdes2); +} + +/* + * txdes3 is not used by hardware. We use it to keep track of socket buffer. + * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu(). + */ +static void ftmac100_txdes_set_skb(struct ftmac100_txdes *txdes, struct sk_buff *skb) +{ + txdes->txdes3 = (unsigned int)skb; +} + +static struct sk_buff *ftmac100_txdes_get_skb(struct ftmac100_txdes *txdes) +{ + return (struct sk_buff *)txdes->txdes3; +} + +/****************************************************************************** + * internal functions (transmit) + *****************************************************************************/ +static int ftmac100_next_tx_pointer(int pointer) +{ + return (pointer + 1) & (TX_QUEUE_ENTRIES - 1); +} + +static void ftmac100_tx_pointer_advance(struct ftmac100 *priv) +{ + priv->tx_pointer = ftmac100_next_tx_pointer(priv->tx_pointer); +} + +static void ftmac100_tx_clean_pointer_advance(struct ftmac100 *priv) +{ + priv->tx_clean_pointer = ftmac100_next_tx_pointer(priv->tx_clean_pointer); +} + +static struct ftmac100_txdes *ftmac100_current_txdes(struct ftmac100 *priv) +{ + return &priv->descs->txdes[priv->tx_pointer]; +} + +static struct ftmac100_txdes *ftmac100_current_clean_txdes(struct ftmac100 *priv) +{ + return &priv->descs->txdes[priv->tx_clean_pointer]; +} + +static bool ftmac100_tx_complete_packet(struct ftmac100 *priv) +{ + struct net_device *netdev = priv->netdev; + struct ftmac100_txdes *txdes; + struct sk_buff *skb; + dma_addr_t map; + + if (priv->tx_pending == 0) + return false; + + txdes = ftmac100_current_clean_txdes(priv); + + if (ftmac100_txdes_owned_by_dma(txdes)) + return false; + + skb = ftmac100_txdes_get_skb(txdes); + map = ftmac100_txdes_get_dma_addr(txdes); + + if (unlikely(ftmac100_txdes_excessive_collision(txdes) || + ftmac100_txdes_late_collision(txdes))) { + /* + * packet transmitted to ethernet lost due to late collision + * or excessive collision + */ + netdev->stats.tx_aborted_errors++; + } else { + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += skb->len; + } + + dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE); + dev_kfree_skb(skb); + + ftmac100_txdes_reset(txdes); + + ftmac100_tx_clean_pointer_advance(priv); + + spin_lock(&priv->tx_lock); + priv->tx_pending--; + spin_unlock(&priv->tx_lock); + netif_wake_queue(netdev); + + return true; +} + +static void ftmac100_tx_complete(struct ftmac100 *priv) +{ + while (ftmac100_tx_complete_packet(priv)) + ; +} + +static int ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb, + dma_addr_t map) +{ + struct net_device *netdev = priv->netdev; + struct ftmac100_txdes *txdes; + unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; + + txdes = ftmac100_current_txdes(priv); + ftmac100_tx_pointer_advance(priv); + + /* setup TX descriptor */ + ftmac100_txdes_set_skb(txdes, skb); + ftmac100_txdes_set_dma_addr(txdes, map); + + ftmac100_txdes_set_first_segment(txdes); + ftmac100_txdes_set_last_segment(txdes); + ftmac100_txdes_set_txint(txdes); + ftmac100_txdes_set_buffer_size(txdes, len); + + spin_lock(&priv->tx_lock); + priv->tx_pending++; + if (priv->tx_pending == TX_QUEUE_ENTRIES) + netif_stop_queue(netdev); + + /* start transmit */ + ftmac100_txdes_set_dma_own(txdes); + spin_unlock(&priv->tx_lock); + + ftmac100_txdma_start_polling(priv); + return NETDEV_TX_OK; +} + +/****************************************************************************** + * internal functions (buffer) + *****************************************************************************/ +static int ftmac100_alloc_rx_page(struct ftmac100 *priv, + struct ftmac100_rxdes *rxdes, gfp_t gfp) +{ + struct net_device *netdev = priv->netdev; + struct page *page; + dma_addr_t map; + + page = alloc_page(gfp); + if (!page) { + if (net_ratelimit()) + netdev_err(netdev, "failed to allocate rx page\n"); + return -ENOMEM; + } + + map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, map))) { + if (net_ratelimit()) + netdev_err(netdev, "failed to map rx page\n"); + __free_page(page); + return -ENOMEM; + } + + ftmac100_rxdes_set_page(rxdes, page); + ftmac100_rxdes_set_dma_addr(rxdes, map); + ftmac100_rxdes_set_buffer_size(rxdes, RX_BUF_SIZE); + ftmac100_rxdes_set_dma_own(rxdes); + return 0; +} + +static void ftmac100_free_buffers(struct ftmac100 *priv) +{ + int i; + + for (i = 0; i < RX_QUEUE_ENTRIES; i++) { + struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i]; + struct page *page = ftmac100_rxdes_get_page(rxdes); + dma_addr_t map = ftmac100_rxdes_get_dma_addr(rxdes); + + if (!page) + continue; + + dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); + __free_page(page); + } + + for (i = 0; i < TX_QUEUE_ENTRIES; i++) { + struct ftmac100_txdes *txdes = &priv->descs->txdes[i]; + struct sk_buff *skb = ftmac100_txdes_get_skb(txdes); + dma_addr_t map = ftmac100_txdes_get_dma_addr(txdes); + + if (!skb) + continue; + + dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE); + dev_kfree_skb(skb); + } + + dma_free_coherent(priv->dev, sizeof(struct ftmac100_descs), + priv->descs, priv->descs_dma_addr); +} + +static int ftmac100_alloc_buffers(struct ftmac100 *priv) +{ + int i; + + priv->descs = dma_zalloc_coherent(priv->dev, + sizeof(struct ftmac100_descs), + &priv->descs_dma_addr, + GFP_KERNEL); + if (!priv->descs) + return -ENOMEM; + + /* initialize RX ring */ + ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]); + + for (i = 0; i < RX_QUEUE_ENTRIES; i++) { + struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i]; + + if (ftmac100_alloc_rx_page(priv, rxdes, GFP_KERNEL)) + goto err; + } + + /* initialize TX ring */ + ftmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]); + return 0; + +err: + ftmac100_free_buffers(priv); + return -ENOMEM; +} + +/****************************************************************************** + * struct mii_if_info functions + *****************************************************************************/ +static int ftmac100_mdio_read(struct net_device *netdev, int phy_id, int reg) +{ + struct ftmac100 *priv = netdev_priv(netdev); + unsigned int phycr; + int i; + + phycr = FTMAC100_PHYCR_PHYAD(phy_id) | + FTMAC100_PHYCR_REGAD(reg) | + FTMAC100_PHYCR_MIIRD; + + iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR); + + for (i = 0; i < 10; i++) { + phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR); + + if ((phycr & FTMAC100_PHYCR_MIIRD) == 0) + return phycr & FTMAC100_PHYCR_MIIRDATA; + + udelay(100); + } + + netdev_err(netdev, "mdio read timed out\n"); + return 0; +} + +static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg, + int data) +{ + struct ftmac100 *priv = netdev_priv(netdev); + unsigned int phycr; + int i; + + phycr = FTMAC100_PHYCR_PHYAD(phy_id) | + FTMAC100_PHYCR_REGAD(reg) | + FTMAC100_PHYCR_MIIWR; + + data = FTMAC100_PHYWDATA_MIIWDATA(data); + + iowrite32(data, priv->base + FTMAC100_OFFSET_PHYWDATA); + iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR); + + for (i = 0; i < 10; i++) { + phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR); + + if ((phycr & FTMAC100_PHYCR_MIIWR) == 0) + return; + + udelay(100); + } + + netdev_err(netdev, "mdio write timed out\n"); +} + +/****************************************************************************** + * struct ethtool_ops functions + *****************************************************************************/ +static void ftmac100_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info)); +} + +static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +{ + struct ftmac100 *priv = netdev_priv(netdev); + return mii_ethtool_gset(&priv->mii, cmd); +} + +static int ftmac100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +{ + struct ftmac100 *priv = netdev_priv(netdev); + return mii_ethtool_sset(&priv->mii, cmd); +} + +static int ftmac100_nway_reset(struct net_device *netdev) +{ + struct ftmac100 *priv = netdev_priv(netdev); + return mii_nway_restart(&priv->mii); +} + +static u32 ftmac100_get_link(struct net_device *netdev) +{ + struct ftmac100 *priv = netdev_priv(netdev); + return mii_link_ok(&priv->mii); +} + +static const struct ethtool_ops ftmac100_ethtool_ops = { + .set_settings = ftmac100_set_settings, + .get_settings = ftmac100_get_settings, + .get_drvinfo = ftmac100_get_drvinfo, + .nway_reset = ftmac100_nway_reset, + .get_link = ftmac100_get_link, +}; + +/****************************************************************************** + * interrupt handler + *****************************************************************************/ +static irqreturn_t ftmac100_interrupt(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct ftmac100 *priv = netdev_priv(netdev); + + if (likely(netif_running(netdev))) { + /* Disable interrupts for polling */ + ftmac100_disable_all_int(priv); + napi_schedule(&priv->napi); + } + + return IRQ_HANDLED; +} + +/****************************************************************************** + * struct napi_struct functions + *****************************************************************************/ +static int ftmac100_poll(struct napi_struct *napi, int budget) +{ + struct ftmac100 *priv = container_of(napi, struct ftmac100, napi); + struct net_device *netdev = priv->netdev; + unsigned int status; + bool completed = true; + int rx = 0; + + status = ioread32(priv->base + FTMAC100_OFFSET_ISR); + + if (status & (FTMAC100_INT_RPKT_FINISH | FTMAC100_INT_NORXBUF)) { + /* + * FTMAC100_INT_RPKT_FINISH: + * RX DMA has received packets into RX buffer successfully + * + * FTMAC100_INT_NORXBUF: + * RX buffer unavailable + */ + bool retry; + + do { + retry = ftmac100_rx_packet(priv, &rx); + } while (retry && rx < budget); + + if (retry && rx == budget) + completed = false; + } + + if (status & (FTMAC100_INT_XPKT_OK | FTMAC100_INT_XPKT_LOST)) { + /* + * FTMAC100_INT_XPKT_OK: + * packet transmitted to ethernet successfully + * + * FTMAC100_INT_XPKT_LOST: + * packet transmitted to ethernet lost due to late + * collision or excessive collision + */ + ftmac100_tx_complete(priv); + } + + if (status & (FTMAC100_INT_NORXBUF | FTMAC100_INT_RPKT_LOST | + FTMAC100_INT_AHB_ERR | FTMAC100_INT_PHYSTS_CHG)) { + if (net_ratelimit()) + netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status, + status & FTMAC100_INT_NORXBUF ? "NORXBUF " : "", + status & FTMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "", + status & FTMAC100_INT_AHB_ERR ? "AHB_ERR " : "", + status & FTMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : ""); + + if (status & FTMAC100_INT_NORXBUF) { + /* RX buffer unavailable */ + netdev->stats.rx_over_errors++; + } + + if (status & FTMAC100_INT_RPKT_LOST) { + /* received packet lost due to RX FIFO full */ + netdev->stats.rx_fifo_errors++; + } + + if (status & FTMAC100_INT_PHYSTS_CHG) { + /* PHY link status change */ + mii_check_link(&priv->mii); + } + } + + if (completed) { + /* stop polling */ + napi_complete(napi); + ftmac100_enable_all_int(priv); + } + + return rx; +} + +/****************************************************************************** + * struct net_device_ops functions + *****************************************************************************/ +static int ftmac100_open(struct net_device *netdev) +{ + struct ftmac100 *priv = netdev_priv(netdev); + int err; + + err = ftmac100_alloc_buffers(priv); + if (err) { + netdev_err(netdev, "failed to allocate buffers\n"); + goto err_alloc; + } + + err = request_irq(priv->irq, ftmac100_interrupt, 0, netdev->name, netdev); + if (err) { + netdev_err(netdev, "failed to request irq %d\n", priv->irq); + goto err_irq; + } + + priv->rx_pointer = 0; + priv->tx_clean_pointer = 0; + priv->tx_pointer = 0; + priv->tx_pending = 0; + + err = ftmac100_start_hw(priv); + if (err) + goto err_hw; + + napi_enable(&priv->napi); + netif_start_queue(netdev); + + ftmac100_enable_all_int(priv); + + return 0; + +err_hw: + free_irq(priv->irq, netdev); +err_irq: + ftmac100_free_buffers(priv); +err_alloc: + return err; +} + +static int ftmac100_stop(struct net_device *netdev) +{ + struct ftmac100 *priv = netdev_priv(netdev); + + ftmac100_disable_all_int(priv); + netif_stop_queue(netdev); + napi_disable(&priv->napi); + ftmac100_stop_hw(priv); + free_irq(priv->irq, netdev); + ftmac100_free_buffers(priv); + + return 0; +} + +static int ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct ftmac100 *priv = netdev_priv(netdev); + dma_addr_t map; + + if (unlikely(skb->len > MAX_PKT_SIZE)) { + if (net_ratelimit()) + netdev_dbg(netdev, "tx packet too big\n"); + + netdev->stats.tx_dropped++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, map))) { + /* drop packet */ + if (net_ratelimit()) + netdev_err(netdev, "map socket buffer failed\n"); + + netdev->stats.tx_dropped++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + return ftmac100_xmit(priv, skb, map); +} + +/* optional */ +static int ftmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct ftmac100 *priv = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + + return generic_mii_ioctl(&priv->mii, data, cmd, NULL); +} + +static const struct net_device_ops ftmac100_netdev_ops = { + .ndo_open = ftmac100_open, + .ndo_stop = ftmac100_stop, + .ndo_start_xmit = ftmac100_hard_start_xmit, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = ftmac100_do_ioctl, +}; + +/****************************************************************************** + * struct platform_driver functions + *****************************************************************************/ +static int ftmac100_probe(struct platform_device *pdev) +{ + struct resource *res; + int irq; + struct net_device *netdev; + struct ftmac100 *priv; + int err; + + if (!pdev) + return -ENODEV; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENXIO; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + /* setup net_device */ + netdev = alloc_etherdev(sizeof(*priv)); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + netdev->ethtool_ops = &ftmac100_ethtool_ops; + netdev->netdev_ops = &ftmac100_netdev_ops; + + platform_set_drvdata(pdev, netdev); + + /* setup private data */ + priv = netdev_priv(netdev); + priv->netdev = netdev; + priv->dev = &pdev->dev; + + spin_lock_init(&priv->tx_lock); + + /* initialize NAPI */ + netif_napi_add(netdev, &priv->napi, ftmac100_poll, 64); + + /* map io memory */ + priv->res = request_mem_region(res->start, resource_size(res), + dev_name(&pdev->dev)); + if (!priv->res) { + dev_err(&pdev->dev, "Could not reserve memory region\n"); + err = -ENOMEM; + goto err_req_mem; + } + + priv->base = ioremap(res->start, resource_size(res)); + if (!priv->base) { + dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); + err = -EIO; + goto err_ioremap; + } + + priv->irq = irq; + + /* initialize struct mii_if_info */ + priv->mii.phy_id = 0; + priv->mii.phy_id_mask = 0x1f; + priv->mii.reg_num_mask = 0x1f; + priv->mii.dev = netdev; + priv->mii.mdio_read = ftmac100_mdio_read; + priv->mii.mdio_write = ftmac100_mdio_write; + + /* register network device */ + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "Failed to register netdev\n"); + goto err_register_netdev; + } + + netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + eth_hw_addr_random(netdev); + netdev_info(netdev, "generated random MAC address %pM\n", + netdev->dev_addr); + } + + return 0; + +err_register_netdev: + iounmap(priv->base); +err_ioremap: + release_resource(priv->res); +err_req_mem: + netif_napi_del(&priv->napi); + free_netdev(netdev); +err_alloc_etherdev: + return err; +} + +static int __exit ftmac100_remove(struct platform_device *pdev) +{ + struct net_device *netdev; + struct ftmac100 *priv; + + netdev = platform_get_drvdata(pdev); + priv = netdev_priv(netdev); + + unregister_netdev(netdev); + + iounmap(priv->base); + release_resource(priv->res); + + netif_napi_del(&priv->napi); + free_netdev(netdev); + return 0; +} + +static struct platform_driver ftmac100_driver = { + .probe = ftmac100_probe, + .remove = __exit_p(ftmac100_remove), + .driver = { + .name = DRV_NAME, + }, +}; + +/****************************************************************************** + * initialization / finalization + *****************************************************************************/ +static int __init ftmac100_init(void) +{ + pr_info("Loading version " DRV_VERSION " ...\n"); + return platform_driver_register(&ftmac100_driver); +} + +static void __exit ftmac100_exit(void) +{ + platform_driver_unregister(&ftmac100_driver); +} + +module_init(ftmac100_init); +module_exit(ftmac100_exit); + +MODULE_AUTHOR("Po-Yu Chuang "); +MODULE_DESCRIPTION("FTMAC100 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/faraday/ftmac100.h b/drivers/net/ethernet/faraday/ftmac100.h new file mode 100644 index 000000000..46a0c47b1 --- /dev/null +++ b/drivers/net/ethernet/faraday/ftmac100.h @@ -0,0 +1,180 @@ +/* + * Faraday FTMAC100 10/100 Ethernet + * + * (C) Copyright 2009-2011 Faraday Technology + * Po-Yu Chuang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __FTMAC100_H +#define __FTMAC100_H + +#define FTMAC100_OFFSET_ISR 0x00 +#define FTMAC100_OFFSET_IMR 0x04 +#define FTMAC100_OFFSET_MAC_MADR 0x08 +#define FTMAC100_OFFSET_MAC_LADR 0x0c +#define FTMAC100_OFFSET_MAHT0 0x10 +#define FTMAC100_OFFSET_MAHT1 0x14 +#define FTMAC100_OFFSET_TXPD 0x18 +#define FTMAC100_OFFSET_RXPD 0x1c +#define FTMAC100_OFFSET_TXR_BADR 0x20 +#define FTMAC100_OFFSET_RXR_BADR 0x24 +#define FTMAC100_OFFSET_ITC 0x28 +#define FTMAC100_OFFSET_APTC 0x2c +#define FTMAC100_OFFSET_DBLAC 0x30 +#define FTMAC100_OFFSET_MACCR 0x88 +#define FTMAC100_OFFSET_MACSR 0x8c +#define FTMAC100_OFFSET_PHYCR 0x90 +#define FTMAC100_OFFSET_PHYWDATA 0x94 +#define FTMAC100_OFFSET_FCR 0x98 +#define FTMAC100_OFFSET_BPR 0x9c +#define FTMAC100_OFFSET_TS 0xc4 +#define FTMAC100_OFFSET_DMAFIFOS 0xc8 +#define FTMAC100_OFFSET_TM 0xcc +#define FTMAC100_OFFSET_TX_MCOL_SCOL 0xd4 +#define FTMAC100_OFFSET_RPF_AEP 0xd8 +#define FTMAC100_OFFSET_XM_PG 0xdc +#define FTMAC100_OFFSET_RUNT_TLCC 0xe0 +#define FTMAC100_OFFSET_CRCER_FTL 0xe4 +#define FTMAC100_OFFSET_RLC_RCC 0xe8 +#define FTMAC100_OFFSET_BROC 0xec +#define FTMAC100_OFFSET_MULCA 0xf0 +#define FTMAC100_OFFSET_RP 0xf4 +#define FTMAC100_OFFSET_XP 0xf8 + +/* + * Interrupt status register & interrupt mask register + */ +#define FTMAC100_INT_RPKT_FINISH (1 << 0) +#define FTMAC100_INT_NORXBUF (1 << 1) +#define FTMAC100_INT_XPKT_FINISH (1 << 2) +#define FTMAC100_INT_NOTXBUF (1 << 3) +#define FTMAC100_INT_XPKT_OK (1 << 4) +#define FTMAC100_INT_XPKT_LOST (1 << 5) +#define FTMAC100_INT_RPKT_SAV (1 << 6) +#define FTMAC100_INT_RPKT_LOST (1 << 7) +#define FTMAC100_INT_AHB_ERR (1 << 8) +#define FTMAC100_INT_PHYSTS_CHG (1 << 9) + +/* + * Interrupt timer control register + */ +#define FTMAC100_ITC_RXINT_CNT(x) (((x) & 0xf) << 0) +#define FTMAC100_ITC_RXINT_THR(x) (((x) & 0x7) << 4) +#define FTMAC100_ITC_RXINT_TIME_SEL (1 << 7) +#define FTMAC100_ITC_TXINT_CNT(x) (((x) & 0xf) << 8) +#define FTMAC100_ITC_TXINT_THR(x) (((x) & 0x7) << 12) +#define FTMAC100_ITC_TXINT_TIME_SEL (1 << 15) + +/* + * Automatic polling timer control register + */ +#define FTMAC100_APTC_RXPOLL_CNT(x) (((x) & 0xf) << 0) +#define FTMAC100_APTC_RXPOLL_TIME_SEL (1 << 4) +#define FTMAC100_APTC_TXPOLL_CNT(x) (((x) & 0xf) << 8) +#define FTMAC100_APTC_TXPOLL_TIME_SEL (1 << 12) + +/* + * DMA burst length and arbitration control register + */ +#define FTMAC100_DBLAC_INCR4_EN (1 << 0) +#define FTMAC100_DBLAC_INCR8_EN (1 << 1) +#define FTMAC100_DBLAC_INCR16_EN (1 << 2) +#define FTMAC100_DBLAC_RXFIFO_LTHR(x) (((x) & 0x7) << 3) +#define FTMAC100_DBLAC_RXFIFO_HTHR(x) (((x) & 0x7) << 6) +#define FTMAC100_DBLAC_RX_THR_EN (1 << 9) + +/* + * MAC control register + */ +#define FTMAC100_MACCR_XDMA_EN (1 << 0) +#define FTMAC100_MACCR_RDMA_EN (1 << 1) +#define FTMAC100_MACCR_SW_RST (1 << 2) +#define FTMAC100_MACCR_LOOP_EN (1 << 3) +#define FTMAC100_MACCR_CRC_DIS (1 << 4) +#define FTMAC100_MACCR_XMT_EN (1 << 5) +#define FTMAC100_MACCR_ENRX_IN_HALFTX (1 << 6) +#define FTMAC100_MACCR_RCV_EN (1 << 8) +#define FTMAC100_MACCR_HT_MULTI_EN (1 << 9) +#define FTMAC100_MACCR_RX_RUNT (1 << 10) +#define FTMAC100_MACCR_RX_FTL (1 << 11) +#define FTMAC100_MACCR_RCV_ALL (1 << 12) +#define FTMAC100_MACCR_CRC_APD (1 << 14) +#define FTMAC100_MACCR_FULLDUP (1 << 15) +#define FTMAC100_MACCR_RX_MULTIPKT (1 << 16) +#define FTMAC100_MACCR_RX_BROADPKT (1 << 17) + +/* + * PHY control register + */ +#define FTMAC100_PHYCR_MIIRDATA 0xffff +#define FTMAC100_PHYCR_PHYAD(x) (((x) & 0x1f) << 16) +#define FTMAC100_PHYCR_REGAD(x) (((x) & 0x1f) << 21) +#define FTMAC100_PHYCR_MIIRD (1 << 26) +#define FTMAC100_PHYCR_MIIWR (1 << 27) + +/* + * PHY write data register + */ +#define FTMAC100_PHYWDATA_MIIWDATA(x) ((x) & 0xffff) + +/* + * Transmit descriptor, aligned to 16 bytes + */ +struct ftmac100_txdes { + unsigned int txdes0; + unsigned int txdes1; + unsigned int txdes2; /* TXBUF_BADR */ + unsigned int txdes3; /* not used by HW */ +} __attribute__ ((aligned(16))); + +#define FTMAC100_TXDES0_TXPKT_LATECOL (1 << 0) +#define FTMAC100_TXDES0_TXPKT_EXSCOL (1 << 1) +#define FTMAC100_TXDES0_TXDMA_OWN (1 << 31) + +#define FTMAC100_TXDES1_TXBUF_SIZE(x) ((x) & 0x7ff) +#define FTMAC100_TXDES1_LTS (1 << 27) +#define FTMAC100_TXDES1_FTS (1 << 28) +#define FTMAC100_TXDES1_TX2FIC (1 << 29) +#define FTMAC100_TXDES1_TXIC (1 << 30) +#define FTMAC100_TXDES1_EDOTR (1 << 31) + +/* + * Receive descriptor, aligned to 16 bytes + */ +struct ftmac100_rxdes { + unsigned int rxdes0; + unsigned int rxdes1; + unsigned int rxdes2; /* RXBUF_BADR */ + unsigned int rxdes3; /* not used by HW */ +} __attribute__ ((aligned(16))); + +#define FTMAC100_RXDES0_RFL 0x7ff +#define FTMAC100_RXDES0_MULTICAST (1 << 16) +#define FTMAC100_RXDES0_BROADCAST (1 << 17) +#define FTMAC100_RXDES0_RX_ERR (1 << 18) +#define FTMAC100_RXDES0_CRC_ERR (1 << 19) +#define FTMAC100_RXDES0_FTL (1 << 20) +#define FTMAC100_RXDES0_RUNT (1 << 21) +#define FTMAC100_RXDES0_RX_ODD_NB (1 << 22) +#define FTMAC100_RXDES0_LRS (1 << 28) +#define FTMAC100_RXDES0_FRS (1 << 29) +#define FTMAC100_RXDES0_RXDMA_OWN (1 << 31) + +#define FTMAC100_RXDES1_RXBUF_SIZE(x) ((x) & 0x7ff) +#define FTMAC100_RXDES1_EDORR (1 << 31) + +#endif /* __FTMAC100_H */ diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c new file mode 100644 index 000000000..b1b9ebafb --- /dev/null +++ b/drivers/net/ethernet/fealnx.c @@ -0,0 +1,1971 @@ +/* + Written 1998-2000 by Donald Becker. + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + Support information and updates available at + http://www.scyld.com/network/pci-skeleton.html + + Linux kernel updates: + + Version 2.51, Nov 17, 2001 (jgarzik): + - Add ethtool support + - Replace some MII-related magic numbers with constants + +*/ + +#define DRV_NAME "fealnx" +#define DRV_VERSION "2.52" +#define DRV_RELDATE "Sep-11-2006" + +static int debug; /* 1-> print debug message */ +static int max_interrupt_work = 20; + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */ +static int multicast_filter_limit = 32; + +/* Set the copy breakpoint for the copy-only-tiny-frames scheme. */ +/* Setting to > 1518 effectively disables this feature. */ +static int rx_copybreak; + +/* Used to pass the media type, etc. */ +/* Both 'options[]' and 'full_duplex[]' should exist for driver */ +/* interoperability. */ +/* The media type is usually passed in 'options[]'. */ +#define MAX_UNITS 8 /* More are supported, limit only on options */ +static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; +static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; + +/* Operational parameters that are set at compile time. */ +/* Keep the ring sizes a power of two for compile efficiency. */ +/* The compiler will convert '%'<2^N> into a bit mask. */ +/* Making the Tx ring too large decreases the effectiveness of channel */ +/* bonding and packet priority. */ +/* There are no ill effects from too-large receive rings. */ +// 88-12-9 modify, +// #define TX_RING_SIZE 16 +// #define RX_RING_SIZE 32 +#define TX_RING_SIZE 6 +#define RX_RING_SIZE 12 +#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc) +#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct fealnx_desc) + +/* Operational parameters that usually are not changed. */ +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (2*HZ) + +#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */ + + +/* Include files, designed to support most kernel versions 2.0.0 and later. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include /* Processor type for cache alignment. */ +#include +#include +#include + +/* These identify the driver base version and may not be removed. */ +static const char version[] = + KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n"; + + +/* This driver was written to use PCI memory space, however some x86 systems + work only with I/O space accesses. */ +#ifndef __alpha__ +#define USE_IO_OPS +#endif + +/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */ +/* This is only in the support-all-kernels source code. */ + +#define RUN_AT(x) (jiffies + (x)) + +MODULE_AUTHOR("Myson or whoever"); +MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver"); +MODULE_LICENSE("GPL"); +module_param(max_interrupt_work, int, 0); +module_param(debug, int, 0); +module_param(rx_copybreak, int, 0); +module_param(multicast_filter_limit, int, 0); +module_param_array(options, int, NULL, 0); +module_param_array(full_duplex, int, NULL, 0); +MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt"); +MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)"); +MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames"); +MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses"); +MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex"); +MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)"); + +enum { + MIN_REGION_SIZE = 136, +}; + +/* A chip capabilities table, matching the entries in pci_tbl[] above. */ +enum chip_capability_flags { + HAS_MII_XCVR, + HAS_CHIP_XCVR, +}; + +/* 89/6/13 add, */ +/* for different PHY */ +enum phy_type_flags { + MysonPHY = 1, + AhdocPHY = 2, + SeeqPHY = 3, + MarvellPHY = 4, + Myson981 = 5, + LevelOnePHY = 6, + OtherPHY = 10, +}; + +struct chip_info { + char *chip_name; + int flags; +}; + +static const struct chip_info skel_netdrv_tbl[] = { + { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, + { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR }, + { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, +}; + +/* Offsets to the Command and Status Registers. */ +enum fealnx_offsets { + PAR0 = 0x0, /* physical address 0-3 */ + PAR1 = 0x04, /* physical address 4-5 */ + MAR0 = 0x08, /* multicast address 0-3 */ + MAR1 = 0x0C, /* multicast address 4-7 */ + FAR0 = 0x10, /* flow-control address 0-3 */ + FAR1 = 0x14, /* flow-control address 4-5 */ + TCRRCR = 0x18, /* receive & transmit configuration */ + BCR = 0x1C, /* bus command */ + TXPDR = 0x20, /* transmit polling demand */ + RXPDR = 0x24, /* receive polling demand */ + RXCWP = 0x28, /* receive current word pointer */ + TXLBA = 0x2C, /* transmit list base address */ + RXLBA = 0x30, /* receive list base address */ + ISR = 0x34, /* interrupt status */ + IMR = 0x38, /* interrupt mask */ + FTH = 0x3C, /* flow control high/low threshold */ + MANAGEMENT = 0x40, /* bootrom/eeprom and mii management */ + TALLY = 0x44, /* tally counters for crc and mpa */ + TSR = 0x48, /* tally counter for transmit status */ + BMCRSR = 0x4c, /* basic mode control and status */ + PHYIDENTIFIER = 0x50, /* phy identifier */ + ANARANLPAR = 0x54, /* auto-negotiation advertisement and link + partner ability */ + ANEROCR = 0x58, /* auto-negotiation expansion and pci conf. */ + BPREMRPSR = 0x5c, /* bypass & receive error mask and phy status */ +}; + +/* Bits in the interrupt status/enable registers. */ +/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */ +enum intr_status_bits { + RFCON = 0x00020000, /* receive flow control xon packet */ + RFCOFF = 0x00010000, /* receive flow control xoff packet */ + LSCStatus = 0x00008000, /* link status change */ + ANCStatus = 0x00004000, /* autonegotiation completed */ + FBE = 0x00002000, /* fatal bus error */ + FBEMask = 0x00001800, /* mask bit12-11 */ + ParityErr = 0x00000000, /* parity error */ + TargetErr = 0x00001000, /* target abort */ + MasterErr = 0x00000800, /* master error */ + TUNF = 0x00000400, /* transmit underflow */ + ROVF = 0x00000200, /* receive overflow */ + ETI = 0x00000100, /* transmit early int */ + ERI = 0x00000080, /* receive early int */ + CNTOVF = 0x00000040, /* counter overflow */ + RBU = 0x00000020, /* receive buffer unavailable */ + TBU = 0x00000010, /* transmit buffer unavilable */ + TI = 0x00000008, /* transmit interrupt */ + RI = 0x00000004, /* receive interrupt */ + RxErr = 0x00000002, /* receive error */ +}; + +/* Bits in the NetworkConfig register, W for writing, R for reading */ +/* FIXME: some names are invented by me. Marked with (name?) */ +/* If you have docs and know bit names, please fix 'em */ +enum rx_mode_bits { + CR_W_ENH = 0x02000000, /* enhanced mode (name?) */ + CR_W_FD = 0x00100000, /* full duplex */ + CR_W_PS10 = 0x00080000, /* 10 mbit */ + CR_W_TXEN = 0x00040000, /* tx enable (name?) */ + CR_W_PS1000 = 0x00010000, /* 1000 mbit */ + /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */ + CR_W_RXMODEMASK = 0x000000e0, + CR_W_PROM = 0x00000080, /* promiscuous mode */ + CR_W_AB = 0x00000040, /* accept broadcast */ + CR_W_AM = 0x00000020, /* accept mutlicast */ + CR_W_ARP = 0x00000008, /* receive runt pkt */ + CR_W_ALP = 0x00000004, /* receive long pkt */ + CR_W_SEP = 0x00000002, /* receive error pkt */ + CR_W_RXEN = 0x00000001, /* rx enable (unicast?) (name?) */ + + CR_R_TXSTOP = 0x04000000, /* tx stopped (name?) */ + CR_R_FD = 0x00100000, /* full duplex detected */ + CR_R_PS10 = 0x00080000, /* 10 mbit detected */ + CR_R_RXSTOP = 0x00008000, /* rx stopped (name?) */ +}; + +/* The Tulip Rx and Tx buffer descriptors. */ +struct fealnx_desc { + s32 status; + s32 control; + u32 buffer; + u32 next_desc; + struct fealnx_desc *next_desc_logical; + struct sk_buff *skbuff; + u32 reserved1; + u32 reserved2; +}; + +/* Bits in network_desc.status */ +enum rx_desc_status_bits { + RXOWN = 0x80000000, /* own bit */ + FLNGMASK = 0x0fff0000, /* frame length */ + FLNGShift = 16, + MARSTATUS = 0x00004000, /* multicast address received */ + BARSTATUS = 0x00002000, /* broadcast address received */ + PHYSTATUS = 0x00001000, /* physical address received */ + RXFSD = 0x00000800, /* first descriptor */ + RXLSD = 0x00000400, /* last descriptor */ + ErrorSummary = 0x80, /* error summary */ + RUNT = 0x40, /* runt packet received */ + LONG = 0x20, /* long packet received */ + FAE = 0x10, /* frame align error */ + CRC = 0x08, /* crc error */ + RXER = 0x04, /* receive error */ +}; + +enum rx_desc_control_bits { + RXIC = 0x00800000, /* interrupt control */ + RBSShift = 0, +}; + +enum tx_desc_status_bits { + TXOWN = 0x80000000, /* own bit */ + JABTO = 0x00004000, /* jabber timeout */ + CSL = 0x00002000, /* carrier sense lost */ + LC = 0x00001000, /* late collision */ + EC = 0x00000800, /* excessive collision */ + UDF = 0x00000400, /* fifo underflow */ + DFR = 0x00000200, /* deferred */ + HF = 0x00000100, /* heartbeat fail */ + NCRMask = 0x000000ff, /* collision retry count */ + NCRShift = 0, +}; + +enum tx_desc_control_bits { + TXIC = 0x80000000, /* interrupt control */ + ETIControl = 0x40000000, /* early transmit interrupt */ + TXLD = 0x20000000, /* last descriptor */ + TXFD = 0x10000000, /* first descriptor */ + CRCEnable = 0x08000000, /* crc control */ + PADEnable = 0x04000000, /* padding control */ + RetryTxLC = 0x02000000, /* retry late collision */ + PKTSMask = 0x3ff800, /* packet size bit21-11 */ + PKTSShift = 11, + TBSMask = 0x000007ff, /* transmit buffer bit 10-0 */ + TBSShift = 0, +}; + +/* BootROM/EEPROM/MII Management Register */ +#define MASK_MIIR_MII_READ 0x00000000 +#define MASK_MIIR_MII_WRITE 0x00000008 +#define MASK_MIIR_MII_MDO 0x00000004 +#define MASK_MIIR_MII_MDI 0x00000002 +#define MASK_MIIR_MII_MDC 0x00000001 + +/* ST+OP+PHYAD+REGAD+TA */ +#define OP_READ 0x6000 /* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */ +#define OP_WRITE 0x5002 /* ST:01+OP:01+PHYAD+REGAD+TA:10 */ + +/* ------------------------------------------------------------------------- */ +/* Constants for Myson PHY */ +/* ------------------------------------------------------------------------- */ +#define MysonPHYID 0xd0000302 +/* 89-7-27 add, (begin) */ +#define MysonPHYID0 0x0302 +#define StatusRegister 18 +#define SPEED100 0x0400 // bit10 +#define FULLMODE 0x0800 // bit11 +/* 89-7-27 add, (end) */ + +/* ------------------------------------------------------------------------- */ +/* Constants for Seeq 80225 PHY */ +/* ------------------------------------------------------------------------- */ +#define SeeqPHYID0 0x0016 + +#define MIIRegister18 18 +#define SPD_DET_100 0x80 +#define DPLX_DET_FULL 0x40 + +/* ------------------------------------------------------------------------- */ +/* Constants for Ahdoc 101 PHY */ +/* ------------------------------------------------------------------------- */ +#define AhdocPHYID0 0x0022 + +#define DiagnosticReg 18 +#define DPLX_FULL 0x0800 +#define Speed_100 0x0400 + +/* 89/6/13 add, */ +/* -------------------------------------------------------------------------- */ +/* Constants */ +/* -------------------------------------------------------------------------- */ +#define MarvellPHYID0 0x0141 +#define LevelOnePHYID0 0x0013 + +#define MII1000BaseTControlReg 9 +#define MII1000BaseTStatusReg 10 +#define SpecificReg 17 + +/* for 1000BaseT Control Register */ +#define PHYAbletoPerform1000FullDuplex 0x0200 +#define PHYAbletoPerform1000HalfDuplex 0x0100 +#define PHY1000AbilityMask 0x300 + +// for phy specific status register, marvell phy. +#define SpeedMask 0x0c000 +#define Speed_1000M 0x08000 +#define Speed_100M 0x4000 +#define Speed_10M 0 +#define Full_Duplex 0x2000 + +// 89/12/29 add, for phy specific status register, levelone phy, (begin) +#define LXT1000_100M 0x08000 +#define LXT1000_1000M 0x0c000 +#define LXT1000_Full 0x200 +// 89/12/29 add, for phy specific status register, levelone phy, (end) + +/* for 3-in-1 case, BMCRSR register */ +#define LinkIsUp2 0x00040000 + +/* for PHY */ +#define LinkIsUp 0x0004 + + +struct netdev_private { + /* Descriptor rings first for alignment. */ + struct fealnx_desc *rx_ring; + struct fealnx_desc *tx_ring; + + dma_addr_t rx_ring_dma; + dma_addr_t tx_ring_dma; + + spinlock_t lock; + + /* Media monitoring timer. */ + struct timer_list timer; + + /* Reset timer */ + struct timer_list reset_timer; + int reset_timer_armed; + unsigned long crvalue_sv; + unsigned long imrvalue_sv; + + /* Frequently used values: keep some adjacent for cache effect. */ + int flags; + struct pci_dev *pci_dev; + unsigned long crvalue; + unsigned long bcrvalue; + unsigned long imrvalue; + struct fealnx_desc *cur_rx; + struct fealnx_desc *lack_rxbuf; + int really_rx_count; + struct fealnx_desc *cur_tx; + struct fealnx_desc *cur_tx_copy; + int really_tx_count; + int free_tx_count; + unsigned int rx_buf_sz; /* Based on MTU+slack. */ + + /* These values are keep track of the transceiver/media in use. */ + unsigned int linkok; + unsigned int line_speed; + unsigned int duplexmode; + unsigned int default_port:4; /* Last dev->if_port value. */ + unsigned int PHYType; + + /* MII transceiver section. */ + int mii_cnt; /* MII device addresses. */ + unsigned char phys[2]; /* MII device addresses. */ + struct mii_if_info mii; + void __iomem *mem; +}; + + +static int mdio_read(struct net_device *dev, int phy_id, int location); +static void mdio_write(struct net_device *dev, int phy_id, int location, int value); +static int netdev_open(struct net_device *dev); +static void getlinktype(struct net_device *dev); +static void getlinkstatus(struct net_device *dev); +static void netdev_timer(unsigned long data); +static void reset_timer(unsigned long data); +static void fealnx_tx_timeout(struct net_device *dev); +static void init_ring(struct net_device *dev); +static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); +static irqreturn_t intr_handler(int irq, void *dev_instance); +static int netdev_rx(struct net_device *dev); +static void set_rx_mode(struct net_device *dev); +static void __set_rx_mode(struct net_device *dev); +static struct net_device_stats *get_stats(struct net_device *dev); +static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static const struct ethtool_ops netdev_ethtool_ops; +static int netdev_close(struct net_device *dev); +static void reset_rx_descriptors(struct net_device *dev); +static void reset_tx_descriptors(struct net_device *dev); + +static void stop_nic_rx(void __iomem *ioaddr, long crvalue) +{ + int delay = 0x1000; + iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR); + while (--delay) { + if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP) + break; + } +} + + +static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue) +{ + int delay = 0x1000; + iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR); + while (--delay) { + if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP)) + == (CR_R_RXSTOP+CR_R_TXSTOP) ) + break; + } +} + +static const struct net_device_ops netdev_ops = { + .ndo_open = netdev_open, + .ndo_stop = netdev_close, + .ndo_start_xmit = start_tx, + .ndo_get_stats = get_stats, + .ndo_set_rx_mode = set_rx_mode, + .ndo_do_ioctl = mii_ioctl, + .ndo_tx_timeout = fealnx_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int fealnx_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct netdev_private *np; + int i, option, err, irq; + static int card_idx = -1; + char boardname[12]; + void __iomem *ioaddr; + unsigned long len; + unsigned int chip_id = ent->driver_data; + struct net_device *dev; + void *ring_space; + dma_addr_t ring_dma; +#ifdef USE_IO_OPS + int bar = 0; +#else + int bar = 1; +#endif + +/* when built into the kernel, we only print version if device is found */ +#ifndef MODULE + static int printed_version; + if (!printed_version++) + printk(version); +#endif + + card_idx++; + sprintf(boardname, "fealnx%d", card_idx); + + option = card_idx < MAX_UNITS ? options[card_idx] : 0; + + i = pci_enable_device(pdev); + if (i) return i; + pci_set_master(pdev); + + len = pci_resource_len(pdev, bar); + if (len < MIN_REGION_SIZE) { + dev_err(&pdev->dev, + "region size %ld too small, aborting\n", len); + return -ENODEV; + } + + i = pci_request_regions(pdev, boardname); + if (i) + return i; + + irq = pdev->irq; + + ioaddr = pci_iomap(pdev, bar, len); + if (!ioaddr) { + err = -ENOMEM; + goto err_out_res; + } + + dev = alloc_etherdev(sizeof(struct netdev_private)); + if (!dev) { + err = -ENOMEM; + goto err_out_unmap; + } + SET_NETDEV_DEV(dev, &pdev->dev); + + /* read ethernet id */ + for (i = 0; i < 6; ++i) + dev->dev_addr[i] = ioread8(ioaddr + PAR0 + i); + + /* Reset the chip to erase previous misconfiguration. */ + iowrite32(0x00000001, ioaddr + BCR); + + /* Make certain the descriptor lists are aligned. */ + np = netdev_priv(dev); + np->mem = ioaddr; + spin_lock_init(&np->lock); + np->pci_dev = pdev; + np->flags = skel_netdrv_tbl[chip_id].flags; + pci_set_drvdata(pdev, dev); + np->mii.dev = dev; + np->mii.mdio_read = mdio_read; + np->mii.mdio_write = mdio_write; + np->mii.phy_id_mask = 0x1f; + np->mii.reg_num_mask = 0x1f; + + ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); + if (!ring_space) { + err = -ENOMEM; + goto err_out_free_dev; + } + np->rx_ring = ring_space; + np->rx_ring_dma = ring_dma; + + ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); + if (!ring_space) { + err = -ENOMEM; + goto err_out_free_rx; + } + np->tx_ring = ring_space; + np->tx_ring_dma = ring_dma; + + /* find the connected MII xcvrs */ + if (np->flags == HAS_MII_XCVR) { + int phy, phy_idx = 0; + + for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys); + phy++) { + int mii_status = mdio_read(dev, phy, 1); + + if (mii_status != 0xffff && mii_status != 0x0000) { + np->phys[phy_idx++] = phy; + dev_info(&pdev->dev, + "MII PHY found at address %d, status " + "0x%4.4x.\n", phy, mii_status); + /* get phy type */ + { + unsigned int data; + + data = mdio_read(dev, np->phys[0], 2); + if (data == SeeqPHYID0) + np->PHYType = SeeqPHY; + else if (data == AhdocPHYID0) + np->PHYType = AhdocPHY; + else if (data == MarvellPHYID0) + np->PHYType = MarvellPHY; + else if (data == MysonPHYID0) + np->PHYType = Myson981; + else if (data == LevelOnePHYID0) + np->PHYType = LevelOnePHY; + else + np->PHYType = OtherPHY; + } + } + } + + np->mii_cnt = phy_idx; + if (phy_idx == 0) + dev_warn(&pdev->dev, + "MII PHY not found -- this device may " + "not operate correctly.\n"); + } else { + np->phys[0] = 32; +/* 89/6/23 add, (begin) */ + /* get phy type */ + if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID) + np->PHYType = MysonPHY; + else + np->PHYType = OtherPHY; + } + np->mii.phy_id = np->phys[0]; + + if (dev->mem_start) + option = dev->mem_start; + + /* The lower four bits are the media type. */ + if (option > 0) { + if (option & 0x200) + np->mii.full_duplex = 1; + np->default_port = option & 15; + } + + if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0) + np->mii.full_duplex = full_duplex[card_idx]; + + if (np->mii.full_duplex) { + dev_info(&pdev->dev, "Media type forced to Full Duplex.\n"); +/* 89/6/13 add, (begin) */ +// if (np->PHYType==MarvellPHY) + if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) { + unsigned int data; + + data = mdio_read(dev, np->phys[0], 9); + data = (data & 0xfcff) | 0x0200; + mdio_write(dev, np->phys[0], 9, data); + } +/* 89/6/13 add, (end) */ + if (np->flags == HAS_MII_XCVR) + mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL); + else + iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR); + np->mii.force_media = 1; + } + + dev->netdev_ops = &netdev_ops; + dev->ethtool_ops = &netdev_ethtool_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + err = register_netdev(dev); + if (err) + goto err_out_free_tx; + + printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n", + dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr, + dev->dev_addr, irq); + + return 0; + +err_out_free_tx: + pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); +err_out_free_rx: + pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); +err_out_free_dev: + free_netdev(dev); +err_out_unmap: + pci_iounmap(pdev, ioaddr); +err_out_res: + pci_release_regions(pdev); + return err; +} + + +static void fealnx_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (dev) { + struct netdev_private *np = netdev_priv(dev); + + pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, + np->tx_ring_dma); + pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, + np->rx_ring_dma); + unregister_netdev(dev); + pci_iounmap(pdev, np->mem); + free_netdev(dev); + pci_release_regions(pdev); + } else + printk(KERN_ERR "fealnx: remove for unknown device\n"); +} + + +static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad) +{ + ulong miir; + int i; + unsigned int mask, data; + + /* enable MII output */ + miir = (ulong) ioread32(miiport); + miir &= 0xfffffff0; + + miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO; + + /* send 32 1's preamble */ + for (i = 0; i < 32; i++) { + /* low MDC; MDO is already high (miir) */ + miir &= ~MASK_MIIR_MII_MDC; + iowrite32(miir, miiport); + + /* high MDC */ + miir |= MASK_MIIR_MII_MDC; + iowrite32(miir, miiport); + } + + /* calculate ST+OP+PHYAD+REGAD+TA */ + data = opcode | (phyad << 7) | (regad << 2); + + /* sent out */ + mask = 0x8000; + while (mask) { + /* low MDC, prepare MDO */ + miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO); + if (mask & data) + miir |= MASK_MIIR_MII_MDO; + + iowrite32(miir, miiport); + /* high MDC */ + miir |= MASK_MIIR_MII_MDC; + iowrite32(miir, miiport); + udelay(30); + + /* next */ + mask >>= 1; + if (mask == 0x2 && opcode == OP_READ) + miir &= ~MASK_MIIR_MII_WRITE; + } + return miir; +} + + +static int mdio_read(struct net_device *dev, int phyad, int regad) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *miiport = np->mem + MANAGEMENT; + ulong miir; + unsigned int mask, data; + + miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad); + + /* read data */ + mask = 0x8000; + data = 0; + while (mask) { + /* low MDC */ + miir &= ~MASK_MIIR_MII_MDC; + iowrite32(miir, miiport); + + /* read MDI */ + miir = ioread32(miiport); + if (miir & MASK_MIIR_MII_MDI) + data |= mask; + + /* high MDC, and wait */ + miir |= MASK_MIIR_MII_MDC; + iowrite32(miir, miiport); + udelay(30); + + /* next */ + mask >>= 1; + } + + /* low MDC */ + miir &= ~MASK_MIIR_MII_MDC; + iowrite32(miir, miiport); + + return data & 0xffff; +} + + +static void mdio_write(struct net_device *dev, int phyad, int regad, int data) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *miiport = np->mem + MANAGEMENT; + ulong miir; + unsigned int mask; + + miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad); + + /* write data */ + mask = 0x8000; + while (mask) { + /* low MDC, prepare MDO */ + miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO); + if (mask & data) + miir |= MASK_MIIR_MII_MDO; + iowrite32(miir, miiport); + + /* high MDC */ + miir |= MASK_MIIR_MII_MDC; + iowrite32(miir, miiport); + + /* next */ + mask >>= 1; + } + + /* low MDC */ + miir &= ~MASK_MIIR_MII_MDC; + iowrite32(miir, miiport); +} + + +static int netdev_open(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->mem; + const int irq = np->pci_dev->irq; + int rc, i; + + iowrite32(0x00000001, ioaddr + BCR); /* Reset */ + + rc = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev); + if (rc) + return -EAGAIN; + + for (i = 0; i < 3; i++) + iowrite16(((unsigned short*)dev->dev_addr)[i], + ioaddr + PAR0 + i*2); + + init_ring(dev); + + iowrite32(np->rx_ring_dma, ioaddr + RXLBA); + iowrite32(np->tx_ring_dma, ioaddr + TXLBA); + + /* Initialize other registers. */ + /* Configure the PCI bus bursts and FIFO thresholds. + 486: Set 8 longword burst. + 586: no burst limit. + Burst length 5:3 + 0 0 0 1 + 0 0 1 4 + 0 1 0 8 + 0 1 1 16 + 1 0 0 32 + 1 0 1 64 + 1 1 0 128 + 1 1 1 256 + Wait the specified 50 PCI cycles after a reset by initializing + Tx and Rx queues and the address filter list. + FIXME (Ueimor): optimistic for alpha + posted writes ? */ + + np->bcrvalue = 0x10; /* little-endian, 8 burst length */ +#ifdef __BIG_ENDIAN + np->bcrvalue |= 0x04; /* big-endian */ +#endif + +#if defined(__i386__) && !defined(MODULE) + if (boot_cpu_data.x86 <= 4) + np->crvalue = 0xa00; + else +#endif + np->crvalue = 0xe00; /* rx 128 burst length */ + + +// 89/12/29 add, +// 90/1/16 modify, +// np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI; + np->imrvalue = TUNF | CNTOVF | RBU | TI | RI; + if (np->pci_dev->device == 0x891) { + np->bcrvalue |= 0x200; /* set PROG bit */ + np->crvalue |= CR_W_ENH; /* set enhanced bit */ + np->imrvalue |= ETI; + } + iowrite32(np->bcrvalue, ioaddr + BCR); + + if (dev->if_port == 0) + dev->if_port = np->default_port; + + iowrite32(0, ioaddr + RXPDR); +// 89/9/1 modify, +// np->crvalue = 0x00e40001; /* tx store and forward, tx/rx enable */ + np->crvalue |= 0x00e40001; /* tx store and forward, tx/rx enable */ + np->mii.full_duplex = np->mii.force_media; + getlinkstatus(dev); + if (np->linkok) + getlinktype(dev); + __set_rx_mode(dev); + + netif_start_queue(dev); + + /* Clear and Enable interrupts by setting the interrupt mask. */ + iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR); + iowrite32(np->imrvalue, ioaddr + IMR); + + if (debug) + printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name); + + /* Set the timer to check for link beat. */ + init_timer(&np->timer); + np->timer.expires = RUN_AT(3 * HZ); + np->timer.data = (unsigned long) dev; + np->timer.function = netdev_timer; + + /* timer handler */ + add_timer(&np->timer); + + init_timer(&np->reset_timer); + np->reset_timer.data = (unsigned long) dev; + np->reset_timer.function = reset_timer; + np->reset_timer_armed = 0; + return rc; +} + + +static void getlinkstatus(struct net_device *dev) +/* function: Routine will read MII Status Register to get link status. */ +/* input : dev... pointer to the adapter block. */ +/* output : none. */ +{ + struct netdev_private *np = netdev_priv(dev); + unsigned int i, DelayTime = 0x1000; + + np->linkok = 0; + + if (np->PHYType == MysonPHY) { + for (i = 0; i < DelayTime; ++i) { + if (ioread32(np->mem + BMCRSR) & LinkIsUp2) { + np->linkok = 1; + return; + } + udelay(100); + } + } else { + for (i = 0; i < DelayTime; ++i) { + if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) { + np->linkok = 1; + return; + } + udelay(100); + } + } +} + + +static void getlinktype(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + + if (np->PHYType == MysonPHY) { /* 3-in-1 case */ + if (ioread32(np->mem + TCRRCR) & CR_R_FD) + np->duplexmode = 2; /* full duplex */ + else + np->duplexmode = 1; /* half duplex */ + if (ioread32(np->mem + TCRRCR) & CR_R_PS10) + np->line_speed = 1; /* 10M */ + else + np->line_speed = 2; /* 100M */ + } else { + if (np->PHYType == SeeqPHY) { /* this PHY is SEEQ 80225 */ + unsigned int data; + + data = mdio_read(dev, np->phys[0], MIIRegister18); + if (data & SPD_DET_100) + np->line_speed = 2; /* 100M */ + else + np->line_speed = 1; /* 10M */ + if (data & DPLX_DET_FULL) + np->duplexmode = 2; /* full duplex mode */ + else + np->duplexmode = 1; /* half duplex mode */ + } else if (np->PHYType == AhdocPHY) { + unsigned int data; + + data = mdio_read(dev, np->phys[0], DiagnosticReg); + if (data & Speed_100) + np->line_speed = 2; /* 100M */ + else + np->line_speed = 1; /* 10M */ + if (data & DPLX_FULL) + np->duplexmode = 2; /* full duplex mode */ + else + np->duplexmode = 1; /* half duplex mode */ + } +/* 89/6/13 add, (begin) */ + else if (np->PHYType == MarvellPHY) { + unsigned int data; + + data = mdio_read(dev, np->phys[0], SpecificReg); + if (data & Full_Duplex) + np->duplexmode = 2; /* full duplex mode */ + else + np->duplexmode = 1; /* half duplex mode */ + data &= SpeedMask; + if (data == Speed_1000M) + np->line_speed = 3; /* 1000M */ + else if (data == Speed_100M) + np->line_speed = 2; /* 100M */ + else + np->line_speed = 1; /* 10M */ + } +/* 89/6/13 add, (end) */ +/* 89/7/27 add, (begin) */ + else if (np->PHYType == Myson981) { + unsigned int data; + + data = mdio_read(dev, np->phys[0], StatusRegister); + + if (data & SPEED100) + np->line_speed = 2; + else + np->line_speed = 1; + + if (data & FULLMODE) + np->duplexmode = 2; + else + np->duplexmode = 1; + } +/* 89/7/27 add, (end) */ +/* 89/12/29 add */ + else if (np->PHYType == LevelOnePHY) { + unsigned int data; + + data = mdio_read(dev, np->phys[0], SpecificReg); + if (data & LXT1000_Full) + np->duplexmode = 2; /* full duplex mode */ + else + np->duplexmode = 1; /* half duplex mode */ + data &= SpeedMask; + if (data == LXT1000_1000M) + np->line_speed = 3; /* 1000M */ + else if (data == LXT1000_100M) + np->line_speed = 2; /* 100M */ + else + np->line_speed = 1; /* 10M */ + } + np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000); + if (np->line_speed == 1) + np->crvalue |= CR_W_PS10; + else if (np->line_speed == 3) + np->crvalue |= CR_W_PS1000; + if (np->duplexmode == 2) + np->crvalue |= CR_W_FD; + } +} + + +/* Take lock before calling this */ +static void allocate_rx_buffers(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + + /* allocate skb for rx buffers */ + while (np->really_rx_count != RX_RING_SIZE) { + struct sk_buff *skb; + + skb = netdev_alloc_skb(dev, np->rx_buf_sz); + if (skb == NULL) + break; /* Better luck next round. */ + + while (np->lack_rxbuf->skbuff) + np->lack_rxbuf = np->lack_rxbuf->next_desc_logical; + + np->lack_rxbuf->skbuff = skb; + np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data, + np->rx_buf_sz, PCI_DMA_FROMDEVICE); + np->lack_rxbuf->status = RXOWN; + ++np->really_rx_count; + } +} + + +static void netdev_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *) data; + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->mem; + int old_crvalue = np->crvalue; + unsigned int old_linkok = np->linkok; + unsigned long flags; + + if (debug) + printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x " + "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR), + ioread32(ioaddr + TCRRCR)); + + spin_lock_irqsave(&np->lock, flags); + + if (np->flags == HAS_MII_XCVR) { + getlinkstatus(dev); + if ((old_linkok == 0) && (np->linkok == 1)) { /* we need to detect the media type again */ + getlinktype(dev); + if (np->crvalue != old_crvalue) { + stop_nic_rxtx(ioaddr, np->crvalue); + iowrite32(np->crvalue, ioaddr + TCRRCR); + } + } + } + + allocate_rx_buffers(dev); + + spin_unlock_irqrestore(&np->lock, flags); + + np->timer.expires = RUN_AT(10 * HZ); + add_timer(&np->timer); +} + + +/* Take lock before calling */ +/* Reset chip and disable rx, tx and interrupts */ +static void reset_and_disable_rxtx(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->mem; + int delay=51; + + /* Reset the chip's Tx and Rx processes. */ + stop_nic_rxtx(ioaddr, 0); + + /* Disable interrupts by clearing the interrupt mask. */ + iowrite32(0, ioaddr + IMR); + + /* Reset the chip to erase previous misconfiguration. */ + iowrite32(0x00000001, ioaddr + BCR); + + /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw). + We surely wait too long (address+data phase). Who cares? */ + while (--delay) { + ioread32(ioaddr + BCR); + rmb(); + } +} + + +/* Take lock before calling */ +/* Restore chip after reset */ +static void enable_rxtx(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->mem; + + reset_rx_descriptors(dev); + + iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring), + ioaddr + TXLBA); + iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring), + ioaddr + RXLBA); + + iowrite32(np->bcrvalue, ioaddr + BCR); + + iowrite32(0, ioaddr + RXPDR); + __set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */ + + /* Clear and Enable interrupts by setting the interrupt mask. */ + iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR); + iowrite32(np->imrvalue, ioaddr + IMR); + + iowrite32(0, ioaddr + TXPDR); +} + + +static void reset_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *) data; + struct netdev_private *np = netdev_priv(dev); + unsigned long flags; + + printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name); + + spin_lock_irqsave(&np->lock, flags); + np->crvalue = np->crvalue_sv; + np->imrvalue = np->imrvalue_sv; + + reset_and_disable_rxtx(dev); + /* works for me without this: + reset_tx_descriptors(dev); */ + enable_rxtx(dev); + netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */ + + np->reset_timer_armed = 0; + + spin_unlock_irqrestore(&np->lock, flags); +} + + +static void fealnx_tx_timeout(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->mem; + unsigned long flags; + int i; + + printk(KERN_WARNING + "%s: Transmit timed out, status %8.8x, resetting...\n", + dev->name, ioread32(ioaddr + ISR)); + + { + printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); + for (i = 0; i < RX_RING_SIZE; i++) + printk(KERN_CONT " %8.8x", + (unsigned int) np->rx_ring[i].status); + printk(KERN_CONT "\n"); + printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring); + for (i = 0; i < TX_RING_SIZE; i++) + printk(KERN_CONT " %4.4x", np->tx_ring[i].status); + printk(KERN_CONT "\n"); + } + + spin_lock_irqsave(&np->lock, flags); + + reset_and_disable_rxtx(dev); + reset_tx_descriptors(dev); + enable_rxtx(dev); + + spin_unlock_irqrestore(&np->lock, flags); + + dev->trans_start = jiffies; /* prevent tx timeout */ + dev->stats.tx_errors++; + netif_wake_queue(dev); /* or .._start_.. ?? */ +} + + +/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ +static void init_ring(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + int i; + + /* initialize rx variables */ + np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); + np->cur_rx = &np->rx_ring[0]; + np->lack_rxbuf = np->rx_ring; + np->really_rx_count = 0; + + /* initial rx descriptors. */ + for (i = 0; i < RX_RING_SIZE; i++) { + np->rx_ring[i].status = 0; + np->rx_ring[i].control = np->rx_buf_sz << RBSShift; + np->rx_ring[i].next_desc = np->rx_ring_dma + + (i + 1)*sizeof(struct fealnx_desc); + np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1]; + np->rx_ring[i].skbuff = NULL; + } + + /* for the last rx descriptor */ + np->rx_ring[i - 1].next_desc = np->rx_ring_dma; + np->rx_ring[i - 1].next_desc_logical = np->rx_ring; + + /* allocate skb for rx buffers */ + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz); + + if (skb == NULL) { + np->lack_rxbuf = &np->rx_ring[i]; + break; + } + + ++np->really_rx_count; + np->rx_ring[i].skbuff = skb; + np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data, + np->rx_buf_sz, PCI_DMA_FROMDEVICE); + np->rx_ring[i].status = RXOWN; + np->rx_ring[i].control |= RXIC; + } + + /* initialize tx variables */ + np->cur_tx = &np->tx_ring[0]; + np->cur_tx_copy = &np->tx_ring[0]; + np->really_tx_count = 0; + np->free_tx_count = TX_RING_SIZE; + + for (i = 0; i < TX_RING_SIZE; i++) { + np->tx_ring[i].status = 0; + /* do we need np->tx_ring[i].control = XXX; ?? */ + np->tx_ring[i].next_desc = np->tx_ring_dma + + (i + 1)*sizeof(struct fealnx_desc); + np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1]; + np->tx_ring[i].skbuff = NULL; + } + + /* for the last tx descriptor */ + np->tx_ring[i - 1].next_desc = np->tx_ring_dma; + np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0]; +} + + +static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&np->lock, flags); + + np->cur_tx_copy->skbuff = skb; + +#define one_buffer +#define BPT 1022 +#if defined(one_buffer) + np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, + skb->len, PCI_DMA_TODEVICE); + np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; + np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ + np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */ +// 89/12/29 add, + if (np->pci_dev->device == 0x891) + np->cur_tx_copy->control |= ETIControl | RetryTxLC; + np->cur_tx_copy->status = TXOWN; + np->cur_tx_copy = np->cur_tx_copy->next_desc_logical; + --np->free_tx_count; +#elif defined(two_buffer) + if (skb->len > BPT) { + struct fealnx_desc *next; + + /* for the first descriptor */ + np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, + BPT, PCI_DMA_TODEVICE); + np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable; + np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ + np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */ + + /* for the last descriptor */ + next = np->cur_tx_copy->next_desc_logical; + next->skbuff = skb; + next->control = TXIC | TXLD | CRCEnable | PADEnable; + next->control |= (skb->len << PKTSShift); /* pkt size */ + next->control |= ((skb->len - BPT) << TBSShift); /* buf size */ +// 89/12/29 add, + if (np->pci_dev->device == 0x891) + np->cur_tx_copy->control |= ETIControl | RetryTxLC; + next->buffer = pci_map_single(ep->pci_dev, skb->data + BPT, + skb->len - BPT, PCI_DMA_TODEVICE); + + next->status = TXOWN; + np->cur_tx_copy->status = TXOWN; + + np->cur_tx_copy = next->next_desc_logical; + np->free_tx_count -= 2; + } else { + np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, + skb->len, PCI_DMA_TODEVICE); + np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; + np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ + np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */ +// 89/12/29 add, + if (np->pci_dev->device == 0x891) + np->cur_tx_copy->control |= ETIControl | RetryTxLC; + np->cur_tx_copy->status = TXOWN; + np->cur_tx_copy = np->cur_tx_copy->next_desc_logical; + --np->free_tx_count; + } +#endif + + if (np->free_tx_count < 2) + netif_stop_queue(dev); + ++np->really_tx_count; + iowrite32(0, np->mem + TXPDR); + + spin_unlock_irqrestore(&np->lock, flags); + return NETDEV_TX_OK; +} + + +/* Take lock before calling */ +/* Chip probably hosed tx ring. Clean up. */ +static void reset_tx_descriptors(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + struct fealnx_desc *cur; + int i; + + /* initialize tx variables */ + np->cur_tx = &np->tx_ring[0]; + np->cur_tx_copy = &np->tx_ring[0]; + np->really_tx_count = 0; + np->free_tx_count = TX_RING_SIZE; + + for (i = 0; i < TX_RING_SIZE; i++) { + cur = &np->tx_ring[i]; + if (cur->skbuff) { + pci_unmap_single(np->pci_dev, cur->buffer, + cur->skbuff->len, PCI_DMA_TODEVICE); + dev_kfree_skb_any(cur->skbuff); + cur->skbuff = NULL; + } + cur->status = 0; + cur->control = 0; /* needed? */ + /* probably not needed. We do it for purely paranoid reasons */ + cur->next_desc = np->tx_ring_dma + + (i + 1)*sizeof(struct fealnx_desc); + cur->next_desc_logical = &np->tx_ring[i + 1]; + } + /* for the last tx descriptor */ + np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma; + np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0]; +} + + +/* Take lock and stop rx before calling this */ +static void reset_rx_descriptors(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + struct fealnx_desc *cur = np->cur_rx; + int i; + + allocate_rx_buffers(dev); + + for (i = 0; i < RX_RING_SIZE; i++) { + if (cur->skbuff) + cur->status = RXOWN; + cur = cur->next_desc_logical; + } + + iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring), + np->mem + RXLBA); +} + + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ +static irqreturn_t intr_handler(int irq, void *dev_instance) +{ + struct net_device *dev = (struct net_device *) dev_instance; + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->mem; + long boguscnt = max_interrupt_work; + unsigned int num_tx = 0; + int handled = 0; + + spin_lock(&np->lock); + + iowrite32(0, ioaddr + IMR); + + do { + u32 intr_status = ioread32(ioaddr + ISR); + + /* Acknowledge all of the current interrupt sources ASAP. */ + iowrite32(intr_status, ioaddr + ISR); + + if (debug) + printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name, + intr_status); + + if (!(intr_status & np->imrvalue)) + break; + + handled = 1; + +// 90/1/16 delete, +// +// if (intr_status & FBE) +// { /* fatal error */ +// stop_nic_tx(ioaddr, 0); +// stop_nic_rx(ioaddr, 0); +// break; +// }; + + if (intr_status & TUNF) + iowrite32(0, ioaddr + TXPDR); + + if (intr_status & CNTOVF) { + /* missed pkts */ + dev->stats.rx_missed_errors += + ioread32(ioaddr + TALLY) & 0x7fff; + + /* crc error */ + dev->stats.rx_crc_errors += + (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; + } + + if (intr_status & (RI | RBU)) { + if (intr_status & RI) + netdev_rx(dev); + else { + stop_nic_rx(ioaddr, np->crvalue); + reset_rx_descriptors(dev); + iowrite32(np->crvalue, ioaddr + TCRRCR); + } + } + + while (np->really_tx_count) { + long tx_status = np->cur_tx->status; + long tx_control = np->cur_tx->control; + + if (!(tx_control & TXLD)) { /* this pkt is combined by two tx descriptors */ + struct fealnx_desc *next; + + next = np->cur_tx->next_desc_logical; + tx_status = next->status; + tx_control = next->control; + } + + if (tx_status & TXOWN) + break; + + if (!(np->crvalue & CR_W_ENH)) { + if (tx_status & (CSL | LC | EC | UDF | HF)) { + dev->stats.tx_errors++; + if (tx_status & EC) + dev->stats.tx_aborted_errors++; + if (tx_status & CSL) + dev->stats.tx_carrier_errors++; + if (tx_status & LC) + dev->stats.tx_window_errors++; + if (tx_status & UDF) + dev->stats.tx_fifo_errors++; + if ((tx_status & HF) && np->mii.full_duplex == 0) + dev->stats.tx_heartbeat_errors++; + + } else { + dev->stats.tx_bytes += + ((tx_control & PKTSMask) >> PKTSShift); + + dev->stats.collisions += + ((tx_status & NCRMask) >> NCRShift); + dev->stats.tx_packets++; + } + } else { + dev->stats.tx_bytes += + ((tx_control & PKTSMask) >> PKTSShift); + dev->stats.tx_packets++; + } + + /* Free the original skb. */ + pci_unmap_single(np->pci_dev, np->cur_tx->buffer, + np->cur_tx->skbuff->len, PCI_DMA_TODEVICE); + dev_kfree_skb_irq(np->cur_tx->skbuff); + np->cur_tx->skbuff = NULL; + --np->really_tx_count; + if (np->cur_tx->control & TXLD) { + np->cur_tx = np->cur_tx->next_desc_logical; + ++np->free_tx_count; + } else { + np->cur_tx = np->cur_tx->next_desc_logical; + np->cur_tx = np->cur_tx->next_desc_logical; + np->free_tx_count += 2; + } + num_tx++; + } /* end of for loop */ + + if (num_tx && np->free_tx_count >= 2) + netif_wake_queue(dev); + + /* read transmit status for enhanced mode only */ + if (np->crvalue & CR_W_ENH) { + long data; + + data = ioread32(ioaddr + TSR); + dev->stats.tx_errors += (data & 0xff000000) >> 24; + dev->stats.tx_aborted_errors += + (data & 0xff000000) >> 24; + dev->stats.tx_window_errors += + (data & 0x00ff0000) >> 16; + dev->stats.collisions += (data & 0x0000ffff); + } + + if (--boguscnt < 0) { + printk(KERN_WARNING "%s: Too much work at interrupt, " + "status=0x%4.4x.\n", dev->name, intr_status); + if (!np->reset_timer_armed) { + np->reset_timer_armed = 1; + np->reset_timer.expires = RUN_AT(HZ/2); + add_timer(&np->reset_timer); + stop_nic_rxtx(ioaddr, 0); + netif_stop_queue(dev); + /* or netif_tx_disable(dev); ?? */ + /* Prevent other paths from enabling tx,rx,intrs */ + np->crvalue_sv = np->crvalue; + np->imrvalue_sv = np->imrvalue; + np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */ + np->imrvalue = 0; + } + + break; + } + } while (1); + + /* read the tally counters */ + /* missed pkts */ + dev->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff; + + /* crc error */ + dev->stats.rx_crc_errors += + (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; + + if (debug) + printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", + dev->name, ioread32(ioaddr + ISR)); + + iowrite32(np->imrvalue, ioaddr + IMR); + + spin_unlock(&np->lock); + + return IRQ_RETVAL(handled); +} + + +/* This routine is logically part of the interrupt handler, but separated + for clarity and better register allocation. */ +static int netdev_rx(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->mem; + + /* If EOP is set on the next entry, it's a new packet. Send it up. */ + while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) { + s32 rx_status = np->cur_rx->status; + + if (np->really_rx_count == 0) + break; + + if (debug) + printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", rx_status); + + if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) || + (rx_status & ErrorSummary)) { + if (rx_status & ErrorSummary) { /* there was a fatal error */ + if (debug) + printk(KERN_DEBUG + "%s: Receive error, Rx status %8.8x.\n", + dev->name, rx_status); + + dev->stats.rx_errors++; /* end of a packet. */ + if (rx_status & (LONG | RUNT)) + dev->stats.rx_length_errors++; + if (rx_status & RXER) + dev->stats.rx_frame_errors++; + if (rx_status & CRC) + dev->stats.rx_crc_errors++; + } else { + int need_to_reset = 0; + int desno = 0; + + if (rx_status & RXFSD) { /* this pkt is too long, over one rx buffer */ + struct fealnx_desc *cur; + + /* check this packet is received completely? */ + cur = np->cur_rx; + while (desno <= np->really_rx_count) { + ++desno; + if ((!(cur->status & RXOWN)) && + (cur->status & RXLSD)) + break; + /* goto next rx descriptor */ + cur = cur->next_desc_logical; + } + if (desno > np->really_rx_count) + need_to_reset = 1; + } else /* RXLSD did not find, something error */ + need_to_reset = 1; + + if (need_to_reset == 0) { + int i; + + dev->stats.rx_length_errors++; + + /* free all rx descriptors related this long pkt */ + for (i = 0; i < desno; ++i) { + if (!np->cur_rx->skbuff) { + printk(KERN_DEBUG + "%s: I'm scared\n", dev->name); + break; + } + np->cur_rx->status = RXOWN; + np->cur_rx = np->cur_rx->next_desc_logical; + } + continue; + } else { /* rx error, need to reset this chip */ + stop_nic_rx(ioaddr, np->crvalue); + reset_rx_descriptors(dev); + iowrite32(np->crvalue, ioaddr + TCRRCR); + } + break; /* exit the while loop */ + } + } else { /* this received pkt is ok */ + + struct sk_buff *skb; + /* Omit the four octet CRC from the length. */ + short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4; + +#ifndef final_version + if (debug) + printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" + " status %x.\n", pkt_len, rx_status); +#endif + + /* Check if the packet is long enough to accept without copying + to a minimally-sized skbuff. */ + if (pkt_len < rx_copybreak && + (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { + skb_reserve(skb, 2); /* 16 byte align the IP header */ + pci_dma_sync_single_for_cpu(np->pci_dev, + np->cur_rx->buffer, + np->rx_buf_sz, + PCI_DMA_FROMDEVICE); + /* Call copy + cksum if available. */ + +#if ! defined(__alpha__) + skb_copy_to_linear_data(skb, + np->cur_rx->skbuff->data, pkt_len); + skb_put(skb, pkt_len); +#else + memcpy(skb_put(skb, pkt_len), + np->cur_rx->skbuff->data, pkt_len); +#endif + pci_dma_sync_single_for_device(np->pci_dev, + np->cur_rx->buffer, + np->rx_buf_sz, + PCI_DMA_FROMDEVICE); + } else { + pci_unmap_single(np->pci_dev, + np->cur_rx->buffer, + np->rx_buf_sz, + PCI_DMA_FROMDEVICE); + skb_put(skb = np->cur_rx->skbuff, pkt_len); + np->cur_rx->skbuff = NULL; + --np->really_rx_count; + } + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + + np->cur_rx = np->cur_rx->next_desc_logical; + } /* end of while loop */ + + /* allocate skb for rx buffers */ + allocate_rx_buffers(dev); + + return 0; +} + + +static struct net_device_stats *get_stats(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->mem; + + /* The chip only need report frame silently dropped. */ + if (netif_running(dev)) { + dev->stats.rx_missed_errors += + ioread32(ioaddr + TALLY) & 0x7fff; + dev->stats.rx_crc_errors += + (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16; + } + + return &dev->stats; +} + + +/* for dev->set_multicast_list */ +static void set_rx_mode(struct net_device *dev) +{ + spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock; + unsigned long flags; + spin_lock_irqsave(lp, flags); + __set_rx_mode(dev); + spin_unlock_irqrestore(lp, flags); +} + + +/* Take lock before calling */ +static void __set_rx_mode(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->mem; + u32 mc_filter[2]; /* Multicast hash filter */ + u32 rx_mode; + + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + memset(mc_filter, 0xff, sizeof(mc_filter)); + rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to match, or accept all multicasts. */ + memset(mc_filter, 0xff, sizeof(mc_filter)); + rx_mode = CR_W_AB | CR_W_AM; + } else { + struct netdev_hw_addr *ha; + + memset(mc_filter, 0, sizeof(mc_filter)); + netdev_for_each_mc_addr(ha, dev) { + unsigned int bit; + bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F; + mc_filter[bit >> 5] |= (1 << bit); + } + rx_mode = CR_W_AB | CR_W_AM; + } + + stop_nic_rxtx(ioaddr, np->crvalue); + + iowrite32(mc_filter[0], ioaddr + MAR0); + iowrite32(mc_filter[1], ioaddr + MAR1); + np->crvalue &= ~CR_W_RXMODEMASK; + np->crvalue |= rx_mode; + iowrite32(np->crvalue, ioaddr + TCRRCR); +} + +static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct netdev_private *np = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); +} + +static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct netdev_private *np = netdev_priv(dev); + int rc; + + spin_lock_irq(&np->lock); + rc = mii_ethtool_gset(&np->mii, cmd); + spin_unlock_irq(&np->lock); + + return rc; +} + +static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct netdev_private *np = netdev_priv(dev); + int rc; + + spin_lock_irq(&np->lock); + rc = mii_ethtool_sset(&np->mii, cmd); + spin_unlock_irq(&np->lock); + + return rc; +} + +static int netdev_nway_reset(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + return mii_nway_restart(&np->mii); +} + +static u32 netdev_get_link(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + return mii_link_ok(&np->mii); +} + +static u32 netdev_get_msglevel(struct net_device *dev) +{ + return debug; +} + +static void netdev_set_msglevel(struct net_device *dev, u32 value) +{ + debug = value; +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, + .get_settings = netdev_get_settings, + .set_settings = netdev_set_settings, + .nway_reset = netdev_nway_reset, + .get_link = netdev_get_link, + .get_msglevel = netdev_get_msglevel, + .set_msglevel = netdev_set_msglevel, +}; + +static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct netdev_private *np = netdev_priv(dev); + int rc; + + if (!netif_running(dev)) + return -EINVAL; + + spin_lock_irq(&np->lock); + rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL); + spin_unlock_irq(&np->lock); + + return rc; +} + + +static int netdev_close(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->mem; + int i; + + netif_stop_queue(dev); + + /* Disable interrupts by clearing the interrupt mask. */ + iowrite32(0x0000, ioaddr + IMR); + + /* Stop the chip's Tx and Rx processes. */ + stop_nic_rxtx(ioaddr, 0); + + del_timer_sync(&np->timer); + del_timer_sync(&np->reset_timer); + + free_irq(np->pci_dev->irq, dev); + + /* Free all the skbuffs in the Rx queue. */ + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb = np->rx_ring[i].skbuff; + + np->rx_ring[i].status = 0; + if (skb) { + pci_unmap_single(np->pci_dev, np->rx_ring[i].buffer, + np->rx_buf_sz, PCI_DMA_FROMDEVICE); + dev_kfree_skb(skb); + np->rx_ring[i].skbuff = NULL; + } + } + + for (i = 0; i < TX_RING_SIZE; i++) { + struct sk_buff *skb = np->tx_ring[i].skbuff; + + if (skb) { + pci_unmap_single(np->pci_dev, np->tx_ring[i].buffer, + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb(skb); + np->tx_ring[i].skbuff = NULL; + } + } + + return 0; +} + +static const struct pci_device_id fealnx_pci_tbl[] = { + {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, + {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, + {} /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl); + + +static struct pci_driver fealnx_driver = { + .name = "fealnx", + .id_table = fealnx_pci_tbl, + .probe = fealnx_init_one, + .remove = fealnx_remove_one, +}; + +static int __init fealnx_init(void) +{ +/* when a module, this is printed whether or not devices are found in probe */ +#ifdef MODULE + printk(version); +#endif + + return pci_register_driver(&fealnx_driver); +} + +static void __exit fealnx_exit(void) +{ + pci_unregister_driver(&fealnx_driver); +} + +module_init(fealnx_init); +module_exit(fealnx_exit); diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig new file mode 100644 index 000000000..25e342572 --- /dev/null +++ b/drivers/net/ethernet/freescale/Kconfig @@ -0,0 +1,96 @@ +# +# Freescale device configuration +# + +config NET_VENDOR_FREESCALE + bool "Freescale devices" + default y + depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ + M523x || M527x || M5272 || M528x || M520x || M532x || \ + ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Freescale devices. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_FREESCALE + +config FEC + tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)" + depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \ + ARCH_MXC || SOC_IMX28) + default ARCH_MXC || SOC_IMX28 if ARM + select PHYLIB + select PTP_1588_CLOCK + ---help--- + Say Y here if you want to use the built-in 10/100 Fast ethernet + controller on some Motorola ColdFire and Freescale i.MX processors. + +config FEC_MPC52xx + tristate "FEC MPC52xx driver" + depends on PPC_MPC52xx && PPC_BESTCOMM + select CRC32 + select PHYLIB + select PPC_BESTCOMM_FEC + ---help--- + This option enables support for the MPC5200's on-chip + Fast Ethernet Controller + If compiled as module, it will be called fec_mpc52xx. + +config FEC_MPC52xx_MDIO + bool "FEC MPC52xx MDIO bus driver" + depends on FEC_MPC52xx + default y + ---help--- + The MPC5200's FEC can connect to the Ethernet either with + an external MII PHY chip or 10 Mbps 7-wire interface + (Motorola? industry standard). + If your board uses an external PHY connected to FEC, enable this. + If not sure, enable. + If compiled as module, it will be called fec_mpc52xx_phy. + +source "drivers/net/ethernet/freescale/fs_enet/Kconfig" + +config FSL_PQ_MDIO + tristate "Freescale PQ MDIO" + select PHYLIB + ---help--- + This driver supports the MDIO bus used by the gianfar and UCC drivers. + +config FSL_XGMAC_MDIO + tristate "Freescale XGMAC MDIO" + select PHYLIB + select OF_MDIO + ---help--- + This driver supports the MDIO bus on the Fman 10G Ethernet MACs, and + on the FMan mEMAC (which supports both Clauses 22 and 45) + +config UCC_GETH + tristate "Freescale QE Gigabit Ethernet" + depends on QUICC_ENGINE + select FSL_PQ_MDIO + select PHYLIB + ---help--- + This driver supports the Gigabit Ethernet mode of the QUICC Engine, + which is available on some Freescale SOCs. + +config UGETH_TX_ON_DEMAND + bool "Transmit on Demand support" + depends on UCC_GETH + +config GIANFAR + tristate "Gianfar Ethernet" + depends on FSL_SOC + select FSL_PQ_MDIO + select PHYLIB + select CRC32 + ---help--- + This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx, + and MPC86xx family of chips, and the FEC on the 8540. + +endif # NET_VENDOR_FREESCALE diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile new file mode 100644 index 000000000..71debd1c1 --- /dev/null +++ b/drivers/net/ethernet/freescale/Makefile @@ -0,0 +1,19 @@ +# +# Makefile for the Freescale network device drivers. +# + +obj-$(CONFIG_FEC) += fec.o +fec-objs :=fec_main.o fec_ptp.o +obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o +ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) + obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o +endif +obj-$(CONFIG_FS_ENET) += fs_enet/ +obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o +obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o +obj-$(CONFIG_GIANFAR) += gianfar_driver.o +obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o +gianfar_driver-objs := gianfar.o \ + gianfar_ethtool.o +obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o +ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h new file mode 100644 index 000000000..a86af8a74 --- /dev/null +++ b/drivers/net/ethernet/freescale/fec.h @@ -0,0 +1,569 @@ +/****************************************************************************/ + +/* + * fec.h -- Fast Ethernet Controller for Motorola ColdFire SoC + * processors. + * + * (C) Copyright 2000-2005, Greg Ungerer (gerg@snapgear.com) + * (C) Copyright 2000-2001, Lineo (www.lineo.com) + */ + +/****************************************************************************/ +#ifndef FEC_H +#define FEC_H +/****************************************************************************/ + +#include +#include +#include +#include + +#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ + defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ + defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) +/* + * Just figures, Motorola would have to change the offsets for + * registers in the same peripheral device on different models + * of the ColdFire! + */ +#define FEC_IEVENT 0x004 /* Interrupt event reg */ +#define FEC_IMASK 0x008 /* Interrupt mask reg */ +#define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */ +#define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */ +#define FEC_ECNTRL 0x024 /* Ethernet control reg */ +#define FEC_MII_DATA 0x040 /* MII manage frame reg */ +#define FEC_MII_SPEED 0x044 /* MII speed control reg */ +#define FEC_MIB_CTRLSTAT 0x064 /* MIB control/status reg */ +#define FEC_R_CNTRL 0x084 /* Receive control reg */ +#define FEC_X_CNTRL 0x0c4 /* Transmit Control reg */ +#define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */ +#define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */ +#define FEC_OPD 0x0ec /* Opcode + Pause duration */ +#define FEC_TXIC0 0x0f0 /* Tx Interrupt Coalescing for ring 0 */ +#define FEC_TXIC1 0x0f4 /* Tx Interrupt Coalescing for ring 1 */ +#define FEC_TXIC2 0x0f8 /* Tx Interrupt Coalescing for ring 2 */ +#define FEC_RXIC0 0x100 /* Rx Interrupt Coalescing for ring 0 */ +#define FEC_RXIC1 0x104 /* Rx Interrupt Coalescing for ring 1 */ +#define FEC_RXIC2 0x108 /* Rx Interrupt Coalescing for ring 2 */ +#define FEC_HASH_TABLE_HIGH 0x118 /* High 32bits hash table */ +#define FEC_HASH_TABLE_LOW 0x11c /* Low 32bits hash table */ +#define FEC_GRP_HASH_TABLE_HIGH 0x120 /* High 32bits hash table */ +#define FEC_GRP_HASH_TABLE_LOW 0x124 /* Low 32bits hash table */ +#define FEC_X_WMRK 0x144 /* FIFO transmit water mark */ +#define FEC_R_BOUND 0x14c /* FIFO receive bound reg */ +#define FEC_R_FSTART 0x150 /* FIFO receive start reg */ +#define FEC_R_DES_START_1 0x160 /* Receive descriptor ring 1 */ +#define FEC_X_DES_START_1 0x164 /* Transmit descriptor ring 1 */ +#define FEC_R_BUFF_SIZE_1 0x168 /* Maximum receive buff ring1 size */ +#define FEC_R_DES_START_2 0x16c /* Receive descriptor ring 2 */ +#define FEC_X_DES_START_2 0x170 /* Transmit descriptor ring 2 */ +#define FEC_R_BUFF_SIZE_2 0x174 /* Maximum receive buff ring2 size */ +#define FEC_R_DES_START_0 0x180 /* Receive descriptor ring */ +#define FEC_X_DES_START_0 0x184 /* Transmit descriptor ring */ +#define FEC_R_BUFF_SIZE_0 0x188 /* Maximum receive buff size */ +#define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */ +#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */ +#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */ +#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */ +#define FEC_RACC 0x1c4 /* Receive Accelerator function */ +#define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */ +#define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */ +#define FEC_DMA_CFG_1 0x1d8 /* DMA class configuration for ring 1 */ +#define FEC_DMA_CFG_2 0x1dc /* DMA class Configuration for ring 2 */ +#define FEC_R_DES_ACTIVE_1 0x1e0 /* Rx descriptor active for ring 1 */ +#define FEC_X_DES_ACTIVE_1 0x1e4 /* Tx descriptor active for ring 1 */ +#define FEC_R_DES_ACTIVE_2 0x1e8 /* Rx descriptor active for ring 2 */ +#define FEC_X_DES_ACTIVE_2 0x1ec /* Tx descriptor active for ring 2 */ +#define FEC_QOS_SCHEME 0x1f0 /* Set multi queues Qos scheme */ +#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ +#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ + +#define BM_MIIGSK_CFGR_MII 0x00 +#define BM_MIIGSK_CFGR_RMII 0x01 +#define BM_MIIGSK_CFGR_FRCONT_10M 0x40 + +#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */ +#define RMON_T_PACKETS 0x204 /* RMON TX packet count */ +#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */ +#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */ +#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */ +#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */ +#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */ +#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */ +#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */ +#define RMON_T_COL 0x224 /* RMON TX collision count */ +#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */ +#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */ +#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */ +#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */ +#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */ +#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */ +#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */ +#define RMON_T_OCTETS 0x244 /* RMON TX octets */ +#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */ +#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */ +#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */ +#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */ +#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */ +#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */ +#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */ +#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */ +#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */ +#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */ +#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */ +#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */ +#define RMON_R_PACKETS 0x284 /* RMON RX packet count */ +#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */ +#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */ +#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */ +#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */ +#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */ +#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */ +#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */ +#define RMON_R_RESVD_O 0x2a4 /* Reserved */ +#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */ +#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */ +#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */ +#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */ +#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */ +#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */ +#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */ +#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */ +#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */ +#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */ +#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */ +#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */ +#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */ +#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */ +#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */ + +#else + +#define FEC_ECNTRL 0x000 /* Ethernet control reg */ +#define FEC_IEVENT 0x004 /* Interrupt even reg */ +#define FEC_IMASK 0x008 /* Interrupt mask reg */ +#define FEC_IVEC 0x00c /* Interrupt vec status reg */ +#define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */ +#define FEC_R_DES_ACTIVE_1 FEC_R_DES_ACTIVE_0 +#define FEC_R_DES_ACTIVE_2 FEC_R_DES_ACTIVE_0 +#define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */ +#define FEC_X_DES_ACTIVE_1 FEC_X_DES_ACTIVE_0 +#define FEC_X_DES_ACTIVE_2 FEC_X_DES_ACTIVE_0 +#define FEC_MII_DATA 0x040 /* MII manage frame reg */ +#define FEC_MII_SPEED 0x044 /* MII speed control reg */ +#define FEC_R_BOUND 0x08c /* FIFO receive bound reg */ +#define FEC_R_FSTART 0x090 /* FIFO receive start reg */ +#define FEC_X_WMRK 0x0a4 /* FIFO transmit water mark */ +#define FEC_X_FSTART 0x0ac /* FIFO transmit start reg */ +#define FEC_R_CNTRL 0x104 /* Receive control reg */ +#define FEC_MAX_FRM_LEN 0x108 /* Maximum frame length reg */ +#define FEC_X_CNTRL 0x144 /* Transmit Control reg */ +#define FEC_ADDR_LOW 0x3c0 /* Low 32bits MAC address */ +#define FEC_ADDR_HIGH 0x3c4 /* High 16bits MAC address */ +#define FEC_GRP_HASH_TABLE_HIGH 0x3c8 /* High 32bits hash table */ +#define FEC_GRP_HASH_TABLE_LOW 0x3cc /* Low 32bits hash table */ +#define FEC_R_DES_START_0 0x3d0 /* Receive descriptor ring */ +#define FEC_R_DES_START_1 FEC_R_DES_START_0 +#define FEC_R_DES_START_2 FEC_R_DES_START_0 +#define FEC_X_DES_START_0 0x3d4 /* Transmit descriptor ring */ +#define FEC_X_DES_START_1 FEC_X_DES_START_0 +#define FEC_X_DES_START_2 FEC_X_DES_START_0 +#define FEC_R_BUFF_SIZE_0 0x3d8 /* Maximum receive buff size */ +#define FEC_R_BUFF_SIZE_1 FEC_R_BUFF_SIZE_0 +#define FEC_R_BUFF_SIZE_2 FEC_R_BUFF_SIZE_0 +#define FEC_FIFO_RAM 0x400 /* FIFO RAM buffer */ +/* Not existed in real chip + * Just for pass build. + */ +#define FEC_RCMR_1 0xfff +#define FEC_RCMR_2 0xfff +#define FEC_DMA_CFG_1 0xfff +#define FEC_DMA_CFG_2 0xfff +#define FEC_TXIC0 0xfff +#define FEC_TXIC1 0xfff +#define FEC_TXIC2 0xfff +#define FEC_RXIC0 0xfff +#define FEC_RXIC1 0xfff +#define FEC_RXIC2 0xfff +#endif /* CONFIG_M5272 */ + + +/* + * Define the buffer descriptor structure. + */ +#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) +struct bufdesc { + unsigned short cbd_datlen; /* Data length */ + unsigned short cbd_sc; /* Control and status info */ + unsigned long cbd_bufaddr; /* Buffer address */ +}; +#else +struct bufdesc { + unsigned short cbd_sc; /* Control and status info */ + unsigned short cbd_datlen; /* Data length */ + unsigned long cbd_bufaddr; /* Buffer address */ +}; +#endif + +struct bufdesc_ex { + struct bufdesc desc; + unsigned long cbd_esc; + unsigned long cbd_prot; + unsigned long cbd_bdu; + unsigned long ts; + unsigned short res0[4]; +}; + +/* + * The following definitions courtesy of commproc.h, which where + * Copyright (c) 1997 Dan Malek (dmalek@jlc.net). + */ +#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */ +#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */ +#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */ +#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */ +#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */ +#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */ +#define BD_SC_P ((ushort)0x0100) /* xmt preamble */ +#define BD_SC_BR ((ushort)0x0020) /* Break received */ +#define BD_SC_FR ((ushort)0x0010) /* Framing error */ +#define BD_SC_PR ((ushort)0x0008) /* Parity error */ +#define BD_SC_OV ((ushort)0x0002) /* Overrun */ +#define BD_SC_CD ((ushort)0x0001) /* ?? */ + +/* Buffer descriptor control/status used by Ethernet receive. + */ +#define BD_ENET_RX_EMPTY ((ushort)0x8000) +#define BD_ENET_RX_WRAP ((ushort)0x2000) +#define BD_ENET_RX_INTR ((ushort)0x1000) +#define BD_ENET_RX_LAST ((ushort)0x0800) +#define BD_ENET_RX_FIRST ((ushort)0x0400) +#define BD_ENET_RX_MISS ((ushort)0x0100) +#define BD_ENET_RX_LG ((ushort)0x0020) +#define BD_ENET_RX_NO ((ushort)0x0010) +#define BD_ENET_RX_SH ((ushort)0x0008) +#define BD_ENET_RX_CR ((ushort)0x0004) +#define BD_ENET_RX_OV ((ushort)0x0002) +#define BD_ENET_RX_CL ((ushort)0x0001) +#define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */ + +/* Enhanced buffer descriptor control/status used by Ethernet receive */ +#define BD_ENET_RX_VLAN 0x00000004 + +/* Buffer descriptor control/status used by Ethernet transmit. + */ +#define BD_ENET_TX_READY ((ushort)0x8000) +#define BD_ENET_TX_PAD ((ushort)0x4000) +#define BD_ENET_TX_WRAP ((ushort)0x2000) +#define BD_ENET_TX_INTR ((ushort)0x1000) +#define BD_ENET_TX_LAST ((ushort)0x0800) +#define BD_ENET_TX_TC ((ushort)0x0400) +#define BD_ENET_TX_DEF ((ushort)0x0200) +#define BD_ENET_TX_HB ((ushort)0x0100) +#define BD_ENET_TX_LC ((ushort)0x0080) +#define BD_ENET_TX_RL ((ushort)0x0040) +#define BD_ENET_TX_RCMASK ((ushort)0x003c) +#define BD_ENET_TX_UN ((ushort)0x0002) +#define BD_ENET_TX_CSL ((ushort)0x0001) +#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */ + +/* enhanced buffer descriptor control/status used by Ethernet transmit */ +#define BD_ENET_TX_INT 0x40000000 +#define BD_ENET_TX_TS 0x20000000 +#define BD_ENET_TX_PINS 0x10000000 +#define BD_ENET_TX_IINS 0x08000000 + + +/* This device has up to three irqs on some platforms */ +#define FEC_IRQ_NUM 3 + +/* Maximum number of queues supported + * ENET with AVB IP can support up to 3 independent tx queues and rx queues. + * User can point the queue number that is less than or equal to 3. + */ +#define FEC_ENET_MAX_TX_QS 3 +#define FEC_ENET_MAX_RX_QS 3 + +#define FEC_R_DES_START(X) (((X) == 1) ? FEC_R_DES_START_1 : \ + (((X) == 2) ? \ + FEC_R_DES_START_2 : FEC_R_DES_START_0)) +#define FEC_X_DES_START(X) (((X) == 1) ? FEC_X_DES_START_1 : \ + (((X) == 2) ? \ + FEC_X_DES_START_2 : FEC_X_DES_START_0)) +#define FEC_R_BUFF_SIZE(X) (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \ + (((X) == 2) ? \ + FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0)) +#define FEC_R_DES_ACTIVE(X) (((X) == 1) ? FEC_R_DES_ACTIVE_1 : \ + (((X) == 2) ? \ + FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0)) +#define FEC_X_DES_ACTIVE(X) (((X) == 1) ? FEC_X_DES_ACTIVE_1 : \ + (((X) == 2) ? \ + FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0)) + +#define FEC_DMA_CFG(X) (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1) + +#define DMA_CLASS_EN (1 << 16) +#define FEC_RCMR(X) (((X) == 2) ? FEC_RCMR_2 : FEC_RCMR_1) +#define IDLE_SLOPE_MASK 0xffff +#define IDLE_SLOPE_1 0x200 /* BW fraction: 0.5 */ +#define IDLE_SLOPE_2 0x200 /* BW fraction: 0.5 */ +#define IDLE_SLOPE(X) (((X) == 1) ? \ + (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \ + (IDLE_SLOPE_2 & IDLE_SLOPE_MASK)) +#define RCMR_MATCHEN (0x1 << 16) +#define RCMR_CMP_CFG(v, n) (((v) & 0x7) << (n << 2)) +#define RCMR_CMP_1 (RCMR_CMP_CFG(0, 0) | RCMR_CMP_CFG(1, 1) | \ + RCMR_CMP_CFG(2, 2) | RCMR_CMP_CFG(3, 3)) +#define RCMR_CMP_2 (RCMR_CMP_CFG(4, 0) | RCMR_CMP_CFG(5, 1) | \ + RCMR_CMP_CFG(6, 2) | RCMR_CMP_CFG(7, 3)) +#define RCMR_CMP(X) (((X) == 1) ? RCMR_CMP_1 : RCMR_CMP_2) +#define FEC_TX_BD_FTYPE(X) (((X) & 0xf) << 20) + +/* The number of Tx and Rx buffers. These are allocated from the page + * pool. The code may assume these are power of two, so it it best + * to keep them that size. + * We don't need to allocate pages for the transmitter. We just use + * the skbuffer directly. + */ + +#define FEC_ENET_RX_PAGES 256 +#define FEC_ENET_RX_FRSIZE 2048 +#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) +#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) +#define FEC_ENET_TX_FRSIZE 2048 +#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) +#define TX_RING_SIZE 512 /* Must be power of two */ +#define TX_RING_MOD_MASK 511 /* for this to work */ + +#define BD_ENET_RX_INT 0x00800000 +#define BD_ENET_RX_PTP ((ushort)0x0400) +#define BD_ENET_RX_ICE 0x00000020 +#define BD_ENET_RX_PCR 0x00000010 +#define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR) +#define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR) + +/* Interrupt events/masks. */ +#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ +#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ +#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ +#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ +#define FEC_ENET_TXF_0 ((uint)0x08000000) /* Full frame transmitted */ +#define FEC_ENET_TXF_1 ((uint)0x00000008) /* Full frame transmitted */ +#define FEC_ENET_TXF_2 ((uint)0x00000080) /* Full frame transmitted */ +#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ +#define FEC_ENET_RXF_0 ((uint)0x02000000) /* Full frame received */ +#define FEC_ENET_RXF_1 ((uint)0x00000002) /* Full frame received */ +#define FEC_ENET_RXF_2 ((uint)0x00000020) /* Full frame received */ +#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ +#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ +#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ +#define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */ +#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2) +#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2) +#define FEC_ENET_TS_AVAIL ((uint)0x00010000) +#define FEC_ENET_TS_TIMER ((uint)0x00008000) + +#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER) +#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) + +/* ENET interrupt coalescing macro define */ +#define FEC_ITR_CLK_SEL (0x1 << 30) +#define FEC_ITR_EN (0x1 << 31) +#define FEC_ITR_ICFT(X) (((X) & 0xff) << 20) +#define FEC_ITR_ICTT(X) ((X) & 0xffff) +#define FEC_ITR_ICFT_DEFAULT 200 /* Set 200 frame count threshold */ +#define FEC_ITR_ICTT_DEFAULT 1000 /* Set 1000us timer threshold */ + +#define FEC_VLAN_TAG_LEN 0x04 +#define FEC_ETHTYPE_LEN 0x02 + +/* Controller is ENET-MAC */ +#define FEC_QUIRK_ENET_MAC (1 << 0) +/* Controller needs driver to swap frame */ +#define FEC_QUIRK_SWAP_FRAME (1 << 1) +/* Controller uses gasket */ +#define FEC_QUIRK_USE_GASKET (1 << 2) +/* Controller has GBIT support */ +#define FEC_QUIRK_HAS_GBIT (1 << 3) +/* Controller has extend desc buffer */ +#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) +/* Controller has hardware checksum support */ +#define FEC_QUIRK_HAS_CSUM (1 << 5) +/* Controller has hardware vlan support */ +#define FEC_QUIRK_HAS_VLAN (1 << 6) +/* ENET IP errata ERR006358 + * + * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously + * detected as not set during a prior frame transmission, then the + * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs + * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in + * frames not being transmitted until there is a 0-to-1 transition on + * ENET_TDAR[TDAR]. + */ +#define FEC_QUIRK_ERR006358 (1 << 7) +/* ENET IP hw AVB + * + * i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support. + * - Two class indicators on receive with configurable priority + * - Two class indicators and line speed timer on transmit allowing + * implementation class credit based shapers externally + * - Additional DMA registers provisioned to allow managing up to 3 + * independent rings + */ +#define FEC_QUIRK_HAS_AVB (1 << 8) +/* There is a TDAR race condition for mutliQ when the software sets TDAR + * and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles). + * This will cause the udma_tx and udma_tx_arbiter state machines to hang. + * The issue exist at i.MX6SX enet IP. + */ +#define FEC_QUIRK_ERR007885 (1 << 9) +/* ENET Block Guide/ Chapter for the iMX6SX (PELE) address one issue: + * After set ENET_ATCR[Capture], there need some time cycles before the counter + * value is capture in the register clock domain. + * The wait-time-cycles is at least 6 clock cycles of the slower clock between + * the register clock and the 1588 clock. The 1588 ts_clk is fixed to 25Mhz, + * register clock is 66Mhz, so the wait-time-cycles must be greater than 240ns + * (40ns * 6). + */ +#define FEC_QUIRK_BUG_CAPTURE (1 << 10) +/* Controller has only one MDIO bus */ +#define FEC_QUIRK_SINGLE_MDIO (1 << 11) + +struct fec_enet_priv_tx_q { + int index; + unsigned char *tx_bounce[TX_RING_SIZE]; + struct sk_buff *tx_skbuff[TX_RING_SIZE]; + + dma_addr_t bd_dma; + struct bufdesc *tx_bd_base; + uint tx_ring_size; + + unsigned short tx_stop_threshold; + unsigned short tx_wake_threshold; + + struct bufdesc *cur_tx; + struct bufdesc *dirty_tx; + char *tso_hdrs; + dma_addr_t tso_hdrs_dma; +}; + +struct fec_enet_priv_rx_q { + int index; + struct sk_buff *rx_skbuff[RX_RING_SIZE]; + + dma_addr_t bd_dma; + struct bufdesc *rx_bd_base; + uint rx_ring_size; + + struct bufdesc *cur_rx; +}; + +/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and + * tx_bd_base always point to the base of the buffer descriptors. The + * cur_rx and cur_tx point to the currently available buffer. + * The dirty_tx tracks the current buffer that is being sent by the + * controller. The cur_tx and dirty_tx are equal under both completely + * empty and completely full conditions. The empty/ready indicator in + * the buffer descriptor determines the actual condition. + */ +struct fec_enet_private { + /* Hardware registers of the FEC device */ + void __iomem *hwp; + + struct net_device *netdev; + + struct clk *clk_ipg; + struct clk *clk_ahb; + struct clk *clk_ref; + struct clk *clk_enet_out; + struct clk *clk_ptp; + + bool ptp_clk_on; + struct mutex ptp_clk_mutex; + unsigned int num_tx_queues; + unsigned int num_rx_queues; + + /* The saved address of a sent-in-place packet/buffer, for skfree(). */ + struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS]; + struct fec_enet_priv_rx_q *rx_queue[FEC_ENET_MAX_RX_QS]; + + unsigned int total_tx_ring_size; + unsigned int total_rx_ring_size; + + unsigned long work_tx; + unsigned long work_rx; + unsigned long work_ts; + unsigned long work_mdio; + + unsigned short bufdesc_size; + + struct platform_device *pdev; + + int dev_id; + + /* Phylib and MDIO interface */ + struct mii_bus *mii_bus; + struct phy_device *phy_dev; + int mii_timeout; + uint phy_speed; + phy_interface_t phy_interface; + struct device_node *phy_node; + int link; + int full_duplex; + int speed; + struct completion mdio_done; + int irq[FEC_IRQ_NUM]; + bool bufdesc_ex; + int pause_flag; + int wol_flag; + u32 quirks; + + struct napi_struct napi; + int csum_flags; + + struct work_struct tx_timeout_work; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + unsigned long last_overflow_check; + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + int rx_hwtstamp_filter; + u32 base_incval; + u32 cycle_speed; + int hwts_rx_en; + int hwts_tx_en; + struct delayed_work time_keep; + struct regulator *reg_phy; + + unsigned int tx_align; + unsigned int rx_align; + + /* hw interrupt coalesce */ + unsigned int rx_pkts_itr; + unsigned int rx_time_itr; + unsigned int tx_pkts_itr; + unsigned int tx_time_itr; + unsigned int itr_clk_rate; + + u32 rx_copybreak; + + /* ptp clock period in ns*/ + unsigned int ptp_inc; + + /* pps */ + int pps_channel; + unsigned int reload_period; + int pps_enable; + unsigned int next_counter; +}; + +void fec_ptp_init(struct platform_device *pdev); +void fec_ptp_start_cyclecounter(struct net_device *ndev); +int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); +int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); +uint fec_ptp_check_pps_event(struct fec_enet_private *fep); + +/****************************************************************************/ +#endif /* FEC_H */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c new file mode 100644 index 000000000..66d47e448 --- /dev/null +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -0,0 +1,3503 @@ +/* + * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. + * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) + * + * Right now, I am very wasteful with the buffers. I allocate memory + * pages and then divide them into 2K frame buffers. This way I know I + * have buffers large enough to hold one frame within one buffer descriptor. + * Once I get this working, I will use 64 or 128 byte CPM buffers, which + * will be much more memory efficient and will easily handle lots of + * small packets. + * + * Much better multiple PHY support by Magnus Damm. + * Copyright (c) 2000 Ericsson Radio Systems AB. + * + * Support for FEC controller of ColdFire processors. + * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) + * + * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) + * Copyright (c) 2004-2006 Macq Electronique SA. + * + * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "fec.h" + +static void set_multicast_list(struct net_device *ndev); +static void fec_enet_itr_coal_init(struct net_device *ndev); + +#define DRIVER_NAME "fec" + +#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0)) + +/* Pause frame feild and FIFO threshold */ +#define FEC_ENET_FCE (1 << 5) +#define FEC_ENET_RSEM_V 0x84 +#define FEC_ENET_RSFL_V 16 +#define FEC_ENET_RAEM_V 0x8 +#define FEC_ENET_RAFL_V 0x8 +#define FEC_ENET_OPD_V 0xFFF0 + +static struct platform_device_id fec_devtype[] = { + { + /* keep it for coldfire */ + .name = DRIVER_NAME, + .driver_data = 0, + }, { + .name = "imx25-fec", + .driver_data = FEC_QUIRK_USE_GASKET, + }, { + .name = "imx27-fec", + .driver_data = 0, + }, { + .name = "imx28-fec", + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | + FEC_QUIRK_SINGLE_MDIO, + }, { + .name = "imx6q-fec", + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358, + }, { + .name = "mvf600-fec", + .driver_data = FEC_QUIRK_ENET_MAC, + }, { + .name = "imx6sx-fec", + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(platform, fec_devtype); + +enum imx_fec_type { + IMX25_FEC = 1, /* runs on i.mx25/50/53 */ + IMX27_FEC, /* runs on i.mx27/35/51 */ + IMX28_FEC, + IMX6Q_FEC, + MVF600_FEC, + IMX6SX_FEC, +}; + +static const struct of_device_id fec_dt_ids[] = { + { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], }, + { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, + { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, + { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, + { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, + { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, fec_dt_ids); + +static unsigned char macaddr[ETH_ALEN]; +module_param_array(macaddr, byte, NULL, 0); +MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); + +#if defined(CONFIG_M5272) +/* + * Some hardware gets it MAC address out of local flash memory. + * if this is non-zero then assume it is the address to get MAC from. + */ +#if defined(CONFIG_NETtel) +#define FEC_FLASHMAC 0xf0006006 +#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) +#define FEC_FLASHMAC 0xf0006000 +#elif defined(CONFIG_CANCam) +#define FEC_FLASHMAC 0xf0020000 +#elif defined (CONFIG_M5272C3) +#define FEC_FLASHMAC (0xffe04000 + 4) +#elif defined(CONFIG_MOD5272) +#define FEC_FLASHMAC 0xffc0406b +#else +#define FEC_FLASHMAC 0 +#endif +#endif /* CONFIG_M5272 */ + +/* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. + */ +#define PKT_MAXBUF_SIZE 1522 +#define PKT_MINBUF_SIZE 64 +#define PKT_MAXBLR_SIZE 1536 + +/* FEC receive acceleration */ +#define FEC_RACC_IPDIS (1 << 1) +#define FEC_RACC_PRODIS (1 << 2) +#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) + +/* + * The 5270/5271/5280/5282/532x RX control register also contains maximum frame + * size bits. Other FEC hardware does not, so we need to take that into + * account when setting it. + */ +#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) +#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) +#else +#define OPT_FRAME_SIZE 0 +#endif + +/* FEC MII MMFR bits definition */ +#define FEC_MMFR_ST (1 << 30) +#define FEC_MMFR_OP_READ (2 << 28) +#define FEC_MMFR_OP_WRITE (1 << 28) +#define FEC_MMFR_PA(v) ((v & 0x1f) << 23) +#define FEC_MMFR_RA(v) ((v & 0x1f) << 18) +#define FEC_MMFR_TA (2 << 16) +#define FEC_MMFR_DATA(v) (v & 0xffff) +/* FEC ECR bits definition */ +#define FEC_ECR_MAGICEN (1 << 2) +#define FEC_ECR_SLEEP (1 << 3) + +#define FEC_MII_TIMEOUT 30000 /* us */ + +/* Transmitter timeout */ +#define TX_TIMEOUT (2 * HZ) + +#define FEC_PAUSE_FLAG_AUTONEG 0x1 +#define FEC_PAUSE_FLAG_ENABLE 0x2 +#define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0) +#define FEC_WOL_FLAG_ENABLE (0x1 << 1) +#define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2) + +#define COPYBREAK_DEFAULT 256 + +#define TSO_HEADER_SIZE 128 +/* Max number of allowed TCP segments for software TSO */ +#define FEC_MAX_TSO_SEGS 100 +#define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) + +#define IS_TSO_HEADER(txq, addr) \ + ((addr >= txq->tso_hdrs_dma) && \ + (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) + +static int mii_cnt; + +static inline +struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, + struct fec_enet_private *fep, + int queue_id) +{ + struct bufdesc *new_bd = bdp + 1; + struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; + struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; + struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; + struct bufdesc_ex *ex_base; + struct bufdesc *base; + int ring_size; + + if (bdp >= txq->tx_bd_base) { + base = txq->tx_bd_base; + ring_size = txq->tx_ring_size; + ex_base = (struct bufdesc_ex *)txq->tx_bd_base; + } else { + base = rxq->rx_bd_base; + ring_size = rxq->rx_ring_size; + ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; + } + + if (fep->bufdesc_ex) + return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ? + ex_base : ex_new_bd); + else + return (new_bd >= (base + ring_size)) ? + base : new_bd; +} + +static inline +struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, + struct fec_enet_private *fep, + int queue_id) +{ + struct bufdesc *new_bd = bdp - 1; + struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; + struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; + struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; + struct bufdesc_ex *ex_base; + struct bufdesc *base; + int ring_size; + + if (bdp >= txq->tx_bd_base) { + base = txq->tx_bd_base; + ring_size = txq->tx_ring_size; + ex_base = (struct bufdesc_ex *)txq->tx_bd_base; + } else { + base = rxq->rx_bd_base; + ring_size = rxq->rx_ring_size; + ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; + } + + if (fep->bufdesc_ex) + return (struct bufdesc *)((ex_new_bd < ex_base) ? + (ex_new_bd + ring_size) : ex_new_bd); + else + return (new_bd < base) ? (new_bd + ring_size) : new_bd; +} + +static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp, + struct fec_enet_private *fep) +{ + return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; +} + +static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, + struct fec_enet_priv_tx_q *txq) +{ + int entries; + + entries = ((const char *)txq->dirty_tx - + (const char *)txq->cur_tx) / fep->bufdesc_size - 1; + + return entries > 0 ? entries : entries + txq->tx_ring_size; +} + +static void swap_buffer(void *bufaddr, int len) +{ + int i; + unsigned int *buf = bufaddr; + + for (i = 0; i < len; i += 4, buf++) + swab32s(buf); +} + +static void swap_buffer2(void *dst_buf, void *src_buf, int len) +{ + int i; + unsigned int *src = src_buf; + unsigned int *dst = dst_buf; + + for (i = 0; i < len; i += 4, src++, dst++) + *dst = swab32p(src); +} + +static void fec_dump(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct bufdesc *bdp; + struct fec_enet_priv_tx_q *txq; + int index = 0; + + netdev_info(ndev, "TX ring dump\n"); + pr_info("Nr SC addr len SKB\n"); + + txq = fep->tx_queue[0]; + bdp = txq->tx_bd_base; + + do { + pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n", + index, + bdp == txq->cur_tx ? 'S' : ' ', + bdp == txq->dirty_tx ? 'H' : ' ', + bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen, + txq->tx_skbuff[index]); + bdp = fec_enet_get_nextdesc(bdp, fep, 0); + index++; + } while (bdp != txq->tx_bd_base); +} + +static inline bool is_ipv4_pkt(struct sk_buff *skb) +{ + return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; +} + +static int +fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) +{ + /* Only run for packets requiring a checksum. */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (unlikely(skb_cow_head(skb, 0))) + return -1; + + if (is_ipv4_pkt(skb)) + ip_hdr(skb)->check = 0; + *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; + + return 0; +} + +static int +fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, + struct sk_buff *skb, + struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct bufdesc *bdp = txq->cur_tx; + struct bufdesc_ex *ebdp; + int nr_frags = skb_shinfo(skb)->nr_frags; + unsigned short queue = skb_get_queue_mapping(skb); + int frag, frag_len; + unsigned short status; + unsigned int estatus = 0; + skb_frag_t *this_frag; + unsigned int index; + void *bufaddr; + dma_addr_t addr; + int i; + + for (frag = 0; frag < nr_frags; frag++) { + this_frag = &skb_shinfo(skb)->frags[frag]; + bdp = fec_enet_get_nextdesc(bdp, fep, queue); + ebdp = (struct bufdesc_ex *)bdp; + + status = bdp->cbd_sc; + status &= ~BD_ENET_TX_STATS; + status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); + frag_len = skb_shinfo(skb)->frags[frag].size; + + /* Handle the last BD specially */ + if (frag == nr_frags - 1) { + status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); + if (fep->bufdesc_ex) { + estatus |= BD_ENET_TX_INT; + if (unlikely(skb_shinfo(skb)->tx_flags & + SKBTX_HW_TSTAMP && fep->hwts_tx_en)) + estatus |= BD_ENET_TX_TS; + } + } + + if (fep->bufdesc_ex) { + if (fep->quirks & FEC_QUIRK_HAS_AVB) + estatus |= FEC_TX_BD_FTYPE(queue); + if (skb->ip_summed == CHECKSUM_PARTIAL) + estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; + ebdp->cbd_bdu = 0; + ebdp->cbd_esc = estatus; + } + + bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; + + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + if (((unsigned long) bufaddr) & fep->tx_align || + fep->quirks & FEC_QUIRK_SWAP_FRAME) { + memcpy(txq->tx_bounce[index], bufaddr, frag_len); + bufaddr = txq->tx_bounce[index]; + + if (fep->quirks & FEC_QUIRK_SWAP_FRAME) + swap_buffer(bufaddr, frag_len); + } + + addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, + DMA_TO_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, addr)) { + dev_kfree_skb_any(skb); + if (net_ratelimit()) + netdev_err(ndev, "Tx DMA memory map failed\n"); + goto dma_mapping_error; + } + + bdp->cbd_bufaddr = addr; + bdp->cbd_datlen = frag_len; + bdp->cbd_sc = status; + } + + txq->cur_tx = bdp; + + return 0; + +dma_mapping_error: + bdp = txq->cur_tx; + for (i = 0; i < frag; i++) { + bdp = fec_enet_get_nextdesc(bdp, fep, queue); + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, + bdp->cbd_datlen, DMA_TO_DEVICE); + } + return NETDEV_TX_OK; +} + +static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, + struct sk_buff *skb, struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int nr_frags = skb_shinfo(skb)->nr_frags; + struct bufdesc *bdp, *last_bdp; + void *bufaddr; + dma_addr_t addr; + unsigned short status; + unsigned short buflen; + unsigned short queue; + unsigned int estatus = 0; + unsigned int index; + int entries_free; + int ret; + + entries_free = fec_enet_get_free_txdesc_num(fep, txq); + if (entries_free < MAX_SKB_FRAGS + 1) { + dev_kfree_skb_any(skb); + if (net_ratelimit()) + netdev_err(ndev, "NOT enough BD for SG!\n"); + return NETDEV_TX_OK; + } + + /* Protocol checksum off-load for TCP and UDP. */ + if (fec_enet_clear_csum(skb, ndev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* Fill in a Tx ring entry */ + bdp = txq->cur_tx; + status = bdp->cbd_sc; + status &= ~BD_ENET_TX_STATS; + + /* Set buffer length and buffer pointer */ + bufaddr = skb->data; + buflen = skb_headlen(skb); + + queue = skb_get_queue_mapping(skb); + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + if (((unsigned long) bufaddr) & fep->tx_align || + fep->quirks & FEC_QUIRK_SWAP_FRAME) { + memcpy(txq->tx_bounce[index], skb->data, buflen); + bufaddr = txq->tx_bounce[index]; + + if (fep->quirks & FEC_QUIRK_SWAP_FRAME) + swap_buffer(bufaddr, buflen); + } + + /* Push the data cache so the CPM does not get stale memory data. */ + addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, addr)) { + dev_kfree_skb_any(skb); + if (net_ratelimit()) + netdev_err(ndev, "Tx DMA memory map failed\n"); + return NETDEV_TX_OK; + } + + if (nr_frags) { + ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev); + if (ret) + return ret; + } else { + status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); + if (fep->bufdesc_ex) { + estatus = BD_ENET_TX_INT; + if (unlikely(skb_shinfo(skb)->tx_flags & + SKBTX_HW_TSTAMP && fep->hwts_tx_en)) + estatus |= BD_ENET_TX_TS; + } + } + + if (fep->bufdesc_ex) { + + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + fep->hwts_tx_en)) + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + if (fep->quirks & FEC_QUIRK_HAS_AVB) + estatus |= FEC_TX_BD_FTYPE(queue); + + if (skb->ip_summed == CHECKSUM_PARTIAL) + estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; + + ebdp->cbd_bdu = 0; + ebdp->cbd_esc = estatus; + } + + last_bdp = txq->cur_tx; + index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); + /* Save skb pointer */ + txq->tx_skbuff[index] = skb; + + bdp->cbd_datlen = buflen; + bdp->cbd_bufaddr = addr; + + /* Send it on its way. Tell FEC it's ready, interrupt when done, + * it's the last BD of the frame, and to put the CRC on the end. + */ + status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); + bdp->cbd_sc = status; + + /* If this was the last BD in the ring, start at the beginning again. */ + bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); + + skb_tx_timestamp(skb); + + txq->cur_tx = bdp; + + /* Trigger transmission start */ + writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); + + return 0; +} + +static int +fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, + struct net_device *ndev, + struct bufdesc *bdp, int index, char *data, + int size, bool last_tcp, bool is_last) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); + unsigned short queue = skb_get_queue_mapping(skb); + unsigned short status; + unsigned int estatus = 0; + dma_addr_t addr; + + status = bdp->cbd_sc; + status &= ~BD_ENET_TX_STATS; + + status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); + + if (((unsigned long) data) & fep->tx_align || + fep->quirks & FEC_QUIRK_SWAP_FRAME) { + memcpy(txq->tx_bounce[index], data, size); + data = txq->tx_bounce[index]; + + if (fep->quirks & FEC_QUIRK_SWAP_FRAME) + swap_buffer(data, size); + } + + addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, addr)) { + dev_kfree_skb_any(skb); + if (net_ratelimit()) + netdev_err(ndev, "Tx DMA memory map failed\n"); + return NETDEV_TX_BUSY; + } + + bdp->cbd_datlen = size; + bdp->cbd_bufaddr = addr; + + if (fep->bufdesc_ex) { + if (fep->quirks & FEC_QUIRK_HAS_AVB) + estatus |= FEC_TX_BD_FTYPE(queue); + if (skb->ip_summed == CHECKSUM_PARTIAL) + estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; + ebdp->cbd_bdu = 0; + ebdp->cbd_esc = estatus; + } + + /* Handle the last BD specially */ + if (last_tcp) + status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC); + if (is_last) { + status |= BD_ENET_TX_INTR; + if (fep->bufdesc_ex) + ebdp->cbd_esc |= BD_ENET_TX_INT; + } + + bdp->cbd_sc = status; + + return 0; +} + +static int +fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, + struct sk_buff *skb, struct net_device *ndev, + struct bufdesc *bdp, int index) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); + unsigned short queue = skb_get_queue_mapping(skb); + void *bufaddr; + unsigned long dmabuf; + unsigned short status; + unsigned int estatus = 0; + + status = bdp->cbd_sc; + status &= ~BD_ENET_TX_STATS; + status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); + + bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; + dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; + if (((unsigned long)bufaddr) & fep->tx_align || + fep->quirks & FEC_QUIRK_SWAP_FRAME) { + memcpy(txq->tx_bounce[index], skb->data, hdr_len); + bufaddr = txq->tx_bounce[index]; + + if (fep->quirks & FEC_QUIRK_SWAP_FRAME) + swap_buffer(bufaddr, hdr_len); + + dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, + hdr_len, DMA_TO_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, dmabuf)) { + dev_kfree_skb_any(skb); + if (net_ratelimit()) + netdev_err(ndev, "Tx DMA memory map failed\n"); + return NETDEV_TX_BUSY; + } + } + + bdp->cbd_bufaddr = dmabuf; + bdp->cbd_datlen = hdr_len; + + if (fep->bufdesc_ex) { + if (fep->quirks & FEC_QUIRK_HAS_AVB) + estatus |= FEC_TX_BD_FTYPE(queue); + if (skb->ip_summed == CHECKSUM_PARTIAL) + estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; + ebdp->cbd_bdu = 0; + ebdp->cbd_esc = estatus; + } + + bdp->cbd_sc = status; + + return 0; +} + +static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, + struct sk_buff *skb, + struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int total_len, data_left; + struct bufdesc *bdp = txq->cur_tx; + unsigned short queue = skb_get_queue_mapping(skb); + struct tso_t tso; + unsigned int index = 0; + int ret; + + if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { + dev_kfree_skb_any(skb); + if (net_ratelimit()) + netdev_err(ndev, "NOT enough BD for TSO!\n"); + return NETDEV_TX_OK; + } + + /* Protocol checksum off-load for TCP and UDP. */ + if (fec_enet_clear_csum(skb, ndev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* Initialize the TSO handler, and prepare the first payload */ + tso_start(skb, &tso); + + total_len = skb->len - hdr_len; + while (total_len > 0) { + char *hdr; + + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); + total_len -= data_left; + + /* prepare packet headers: MAC + IP + TCP */ + hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; + tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); + ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); + if (ret) + goto err_release; + + while (data_left > 0) { + int size; + + size = min_t(int, tso.size, data_left); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); + index = fec_enet_get_bd_index(txq->tx_bd_base, + bdp, fep); + ret = fec_enet_txq_put_data_tso(txq, skb, ndev, + bdp, index, + tso.data, size, + size == data_left, + total_len == 0); + if (ret) + goto err_release; + + data_left -= size; + tso_build_data(skb, &tso, size); + } + + bdp = fec_enet_get_nextdesc(bdp, fep, queue); + } + + /* Save skb pointer */ + txq->tx_skbuff[index] = skb; + + skb_tx_timestamp(skb); + txq->cur_tx = bdp; + + /* Trigger transmission start */ + if (!(fep->quirks & FEC_QUIRK_ERR007885) || + !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || + !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || + !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || + !readl(fep->hwp + FEC_X_DES_ACTIVE(queue))) + writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); + + return 0; + +err_release: + /* TODO: Release all used data descriptors for TSO */ + return ret; +} + +static netdev_tx_t +fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int entries_free; + unsigned short queue; + struct fec_enet_priv_tx_q *txq; + struct netdev_queue *nq; + int ret; + + queue = skb_get_queue_mapping(skb); + txq = fep->tx_queue[queue]; + nq = netdev_get_tx_queue(ndev, queue); + + if (skb_is_gso(skb)) + ret = fec_enet_txq_submit_tso(txq, skb, ndev); + else + ret = fec_enet_txq_submit_skb(txq, skb, ndev); + if (ret) + return ret; + + entries_free = fec_enet_get_free_txdesc_num(fep, txq); + if (entries_free <= txq->tx_stop_threshold) + netif_tx_stop_queue(nq); + + return NETDEV_TX_OK; +} + +/* Init RX & TX buffer descriptors + */ +static void fec_enet_bd_init(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; + struct bufdesc *bdp; + unsigned int i; + unsigned int q; + + for (q = 0; q < fep->num_rx_queues; q++) { + /* Initialize the receive buffer descriptors. */ + rxq = fep->rx_queue[q]; + bdp = rxq->rx_bd_base; + + for (i = 0; i < rxq->rx_ring_size; i++) { + + /* Initialize the BD for every fragment in the page. */ + if (bdp->cbd_bufaddr) + bdp->cbd_sc = BD_ENET_RX_EMPTY; + else + bdp->cbd_sc = 0; + bdp = fec_enet_get_nextdesc(bdp, fep, q); + } + + /* Set the last buffer to wrap */ + bdp = fec_enet_get_prevdesc(bdp, fep, q); + bdp->cbd_sc |= BD_SC_WRAP; + + rxq->cur_rx = rxq->rx_bd_base; + } + + for (q = 0; q < fep->num_tx_queues; q++) { + /* ...and the same for transmit */ + txq = fep->tx_queue[q]; + bdp = txq->tx_bd_base; + txq->cur_tx = bdp; + + for (i = 0; i < txq->tx_ring_size; i++) { + /* Initialize the BD for every fragment in the page. */ + bdp->cbd_sc = 0; + if (txq->tx_skbuff[i]) { + dev_kfree_skb_any(txq->tx_skbuff[i]); + txq->tx_skbuff[i] = NULL; + } + bdp->cbd_bufaddr = 0; + bdp = fec_enet_get_nextdesc(bdp, fep, q); + } + + /* Set the last buffer to wrap */ + bdp = fec_enet_get_prevdesc(bdp, fep, q); + bdp->cbd_sc |= BD_SC_WRAP; + txq->dirty_tx = bdp; + } +} + +static void fec_enet_active_rxring(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int i; + + for (i = 0; i < fep->num_rx_queues; i++) + writel(0, fep->hwp + FEC_R_DES_ACTIVE(i)); +} + +static void fec_enet_enable_ring(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; + int i; + + for (i = 0; i < fep->num_rx_queues; i++) { + rxq = fep->rx_queue[i]; + writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i)); + writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); + + /* enable DMA1/2 */ + if (i) + writel(RCMR_MATCHEN | RCMR_CMP(i), + fep->hwp + FEC_RCMR(i)); + } + + for (i = 0; i < fep->num_tx_queues; i++) { + txq = fep->tx_queue[i]; + writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i)); + + /* enable DMA1/2 */ + if (i) + writel(DMA_CLASS_EN | IDLE_SLOPE(i), + fep->hwp + FEC_DMA_CFG(i)); + } +} + +static void fec_enet_reset_skb(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_enet_priv_tx_q *txq; + int i, j; + + for (i = 0; i < fep->num_tx_queues; i++) { + txq = fep->tx_queue[i]; + + for (j = 0; j < txq->tx_ring_size; j++) { + if (txq->tx_skbuff[j]) { + dev_kfree_skb_any(txq->tx_skbuff[j]); + txq->tx_skbuff[j] = NULL; + } + } + } +} + +/* + * This function is called to start or restart the FEC during a link + * change, transmit timeout, or to reconfigure the FEC. The network + * packet processing for this device must be stopped before this call. + */ +static void +fec_restart(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + u32 val; + u32 temp_mac[2]; + u32 rcntl = OPT_FRAME_SIZE | 0x04; + u32 ecntl = 0x2; /* ETHEREN */ + + /* Whack a reset. We should wait for this. + * For i.MX6SX SOC, enet use AXI bus, we use disable MAC + * instead of reset MAC itself. + */ + if (fep->quirks & FEC_QUIRK_HAS_AVB) { + writel(0, fep->hwp + FEC_ECNTRL); + } else { + writel(1, fep->hwp + FEC_ECNTRL); + udelay(10); + } + + /* + * enet-mac reset will reset mac address registers too, + * so need to reconfigure it. + */ + if (fep->quirks & FEC_QUIRK_ENET_MAC) { + memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); + writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); + writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); + } + + /* Clear any outstanding interrupt. */ + writel(0xffffffff, fep->hwp + FEC_IEVENT); + + fec_enet_bd_init(ndev); + + fec_enet_enable_ring(ndev); + + /* Reset tx SKB buffers. */ + fec_enet_reset_skb(ndev); + + /* Enable MII mode */ + if (fep->full_duplex == DUPLEX_FULL) { + /* FD enable */ + writel(0x04, fep->hwp + FEC_X_CNTRL); + } else { + /* No Rcv on Xmit */ + rcntl |= 0x02; + writel(0x0, fep->hwp + FEC_X_CNTRL); + } + + /* Set MII speed */ + writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); + +#if !defined(CONFIG_M5272) + /* set RX checksum */ + val = readl(fep->hwp + FEC_RACC); + if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) + val |= FEC_RACC_OPTIONS; + else + val &= ~FEC_RACC_OPTIONS; + writel(val, fep->hwp + FEC_RACC); +#endif + + /* + * The phy interface and speed need to get configured + * differently on enet-mac. + */ + if (fep->quirks & FEC_QUIRK_ENET_MAC) { + /* Enable flow control and length check */ + rcntl |= 0x40000000 | 0x00000020; + + /* RGMII, RMII or MII */ + if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || + fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || + fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) + rcntl |= (1 << 6); + else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) + rcntl |= (1 << 8); + else + rcntl &= ~(1 << 8); + + /* 1G, 100M or 10M */ + if (fep->phy_dev) { + if (fep->phy_dev->speed == SPEED_1000) + ecntl |= (1 << 5); + else if (fep->phy_dev->speed == SPEED_100) + rcntl &= ~(1 << 9); + else + rcntl |= (1 << 9); + } + } else { +#ifdef FEC_MIIGSK_ENR + if (fep->quirks & FEC_QUIRK_USE_GASKET) { + u32 cfgr; + /* disable the gasket and wait */ + writel(0, fep->hwp + FEC_MIIGSK_ENR); + while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) + udelay(1); + + /* + * configure the gasket: + * RMII, 50 MHz, no loopback, no echo + * MII, 25 MHz, no loopback, no echo + */ + cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) + ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; + if (fep->phy_dev && fep->phy_dev->speed == SPEED_10) + cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; + writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); + + /* re-enable the gasket */ + writel(2, fep->hwp + FEC_MIIGSK_ENR); + } +#endif + } + +#if !defined(CONFIG_M5272) + /* enable pause frame*/ + if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || + ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && + fep->phy_dev && fep->phy_dev->pause)) { + rcntl |= FEC_ENET_FCE; + + /* set FIFO threshold parameter to reduce overrun */ + writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); + writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); + writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); + writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); + + /* OPD */ + writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); + } else { + rcntl &= ~FEC_ENET_FCE; + } +#endif /* !defined(CONFIG_M5272) */ + + writel(rcntl, fep->hwp + FEC_R_CNTRL); + + /* Setup multicast filter. */ + set_multicast_list(ndev); +#ifndef CONFIG_M5272 + writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); + writel(0, fep->hwp + FEC_HASH_TABLE_LOW); +#endif + + if (fep->quirks & FEC_QUIRK_ENET_MAC) { + /* enable ENET endian swap */ + ecntl |= (1 << 8); + /* enable ENET store and forward mode */ + writel(1 << 8, fep->hwp + FEC_X_WMRK); + } + + if (fep->bufdesc_ex) + ecntl |= (1 << 4); + +#ifndef CONFIG_M5272 + /* Enable the MIB statistic event counters */ + writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); +#endif + + /* And last, enable the transmit and receive processing */ + writel(ecntl, fep->hwp + FEC_ECNTRL); + fec_enet_active_rxring(ndev); + + if (fep->bufdesc_ex) + fec_ptp_start_cyclecounter(ndev); + + /* Enable interrupts we wish to service */ + if (fep->link) + writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); + else + writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); + + /* Init the interrupt coalescing */ + fec_enet_itr_coal_init(ndev); + +} + +static void +fec_stop(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_platform_data *pdata = fep->pdev->dev.platform_data; + u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); + u32 val; + + /* We cannot expect a graceful transmit stop without link !!! */ + if (fep->link) { + writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ + udelay(10); + if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) + netdev_err(ndev, "Graceful transmit stop did not complete!\n"); + } + + /* Whack a reset. We should wait for this. + * For i.MX6SX SOC, enet use AXI bus, we use disable MAC + * instead of reset MAC itself. + */ + if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { + if (fep->quirks & FEC_QUIRK_HAS_AVB) { + writel(0, fep->hwp + FEC_ECNTRL); + } else { + writel(1, fep->hwp + FEC_ECNTRL); + udelay(10); + } + writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); + } else { + writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); + val = readl(fep->hwp + FEC_ECNTRL); + val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); + writel(val, fep->hwp + FEC_ECNTRL); + + if (pdata && pdata->sleep_mode_enable) + pdata->sleep_mode_enable(true); + } + writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); + + /* We have to keep ENET enabled to have MII interrupt stay working */ + if (fep->quirks & FEC_QUIRK_ENET_MAC && + !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { + writel(2, fep->hwp + FEC_ECNTRL); + writel(rmii_mode, fep->hwp + FEC_R_CNTRL); + } +} + + +static void +fec_timeout(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + fec_dump(ndev); + + ndev->stats.tx_errors++; + + schedule_work(&fep->tx_timeout_work); +} + +static void fec_enet_timeout_work(struct work_struct *work) +{ + struct fec_enet_private *fep = + container_of(work, struct fec_enet_private, tx_timeout_work); + struct net_device *ndev = fep->netdev; + + rtnl_lock(); + if (netif_device_present(ndev) || netif_running(ndev)) { + napi_disable(&fep->napi); + netif_tx_lock_bh(ndev); + fec_restart(ndev); + netif_wake_queue(ndev); + netif_tx_unlock_bh(ndev); + napi_enable(&fep->napi); + } + rtnl_unlock(); +} + +static void +fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, + struct skb_shared_hwtstamps *hwtstamps) +{ + unsigned long flags; + u64 ns; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + ns = timecounter_cyc2time(&fep->tc, ts); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + + memset(hwtstamps, 0, sizeof(*hwtstamps)); + hwtstamps->hwtstamp = ns_to_ktime(ns); +} + +static void +fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) +{ + struct fec_enet_private *fep; + struct bufdesc *bdp; + unsigned short status; + struct sk_buff *skb; + struct fec_enet_priv_tx_q *txq; + struct netdev_queue *nq; + int index = 0; + int entries_free; + + fep = netdev_priv(ndev); + + queue_id = FEC_ENET_GET_QUQUE(queue_id); + + txq = fep->tx_queue[queue_id]; + /* get next bdp of dirty_tx */ + nq = netdev_get_tx_queue(ndev, queue_id); + bdp = txq->dirty_tx; + + /* get next bdp of dirty_tx */ + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); + + while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { + + /* current queue is empty */ + if (bdp == txq->cur_tx) + break; + + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + + skb = txq->tx_skbuff[index]; + txq->tx_skbuff[index] = NULL; + if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, + bdp->cbd_datlen, DMA_TO_DEVICE); + bdp->cbd_bufaddr = 0; + if (!skb) { + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); + continue; + } + + /* Check for errors. */ + if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | + BD_ENET_TX_RL | BD_ENET_TX_UN | + BD_ENET_TX_CSL)) { + ndev->stats.tx_errors++; + if (status & BD_ENET_TX_HB) /* No heartbeat */ + ndev->stats.tx_heartbeat_errors++; + if (status & BD_ENET_TX_LC) /* Late collision */ + ndev->stats.tx_window_errors++; + if (status & BD_ENET_TX_RL) /* Retrans limit */ + ndev->stats.tx_aborted_errors++; + if (status & BD_ENET_TX_UN) /* Underrun */ + ndev->stats.tx_fifo_errors++; + if (status & BD_ENET_TX_CSL) /* Carrier lost */ + ndev->stats.tx_carrier_errors++; + } else { + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; + } + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && + fep->bufdesc_ex) { + struct skb_shared_hwtstamps shhwtstamps; + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + + fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps); + skb_tstamp_tx(skb, &shhwtstamps); + } + + /* Deferred means some collisions occurred during transmit, + * but we eventually sent the packet OK. + */ + if (status & BD_ENET_TX_DEF) + ndev->stats.collisions++; + + /* Free the sk buffer associated with this last transmit */ + dev_kfree_skb_any(skb); + + txq->dirty_tx = bdp; + + /* Update pointer to next buffer descriptor to be transmitted */ + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); + + /* Since we have freed up a buffer, the ring is no longer full + */ + if (netif_queue_stopped(ndev)) { + entries_free = fec_enet_get_free_txdesc_num(fep, txq); + if (entries_free >= txq->tx_wake_threshold) + netif_tx_wake_queue(nq); + } + } + + /* ERR006538: Keep the transmitter going */ + if (bdp != txq->cur_tx && + readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0) + writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id)); +} + +static void +fec_enet_tx(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + u16 queue_id; + /* First process class A queue, then Class B and Best Effort queue */ + for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { + clear_bit(queue_id, &fep->work_tx); + fec_enet_tx_queue(ndev, queue_id); + } + return; +} + +static int +fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int off; + + off = ((unsigned long)skb->data) & fep->rx_align; + if (off) + skb_reserve(skb, fep->rx_align + 1 - off); + + bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, + FEC_ENET_RX_FRSIZE - fep->rx_align, + DMA_FROM_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { + if (net_ratelimit()) + netdev_err(ndev, "Rx DMA memory map failed\n"); + return -ENOMEM; + } + + return 0; +} + +static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, + struct bufdesc *bdp, u32 length, bool swap) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct sk_buff *new_skb; + + if (length > fep->rx_copybreak) + return false; + + new_skb = netdev_alloc_skb(ndev, length); + if (!new_skb) + return false; + + dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE - fep->rx_align, + DMA_FROM_DEVICE); + if (!swap) + memcpy(new_skb->data, (*skb)->data, length); + else + swap_buffer2(new_skb->data, (*skb)->data, length); + *skb = new_skb; + + return true; +} + +/* During a receive, the cur_rx points to the current incoming buffer. + * When we update through the ring, if the next incoming buffer has + * not been given to the system, we just set the empty indicator, + * effectively tossing the packet. + */ +static int +fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_enet_priv_rx_q *rxq; + struct bufdesc *bdp; + unsigned short status; + struct sk_buff *skb_new = NULL; + struct sk_buff *skb; + ushort pkt_len; + __u8 *data; + int pkt_received = 0; + struct bufdesc_ex *ebdp = NULL; + bool vlan_packet_rcvd = false; + u16 vlan_tag; + int index = 0; + bool is_copybreak; + bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; + +#ifdef CONFIG_M532x + flush_cache_all(); +#endif + queue_id = FEC_ENET_GET_QUQUE(queue_id); + rxq = fep->rx_queue[queue_id]; + + /* First, grab all of the stats for the incoming packet. + * These get messed up if we get called due to a busy condition. + */ + bdp = rxq->cur_rx; + + while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { + + if (pkt_received >= budget) + break; + pkt_received++; + + /* Since we have allocated space to hold a complete frame, + * the last indicator should be set. + */ + if ((status & BD_ENET_RX_LAST) == 0) + netdev_err(ndev, "rcv is not +last\n"); + + + /* Check for errors. */ + if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | + BD_ENET_RX_CR | BD_ENET_RX_OV)) { + ndev->stats.rx_errors++; + if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { + /* Frame too long or too short. */ + ndev->stats.rx_length_errors++; + } + if (status & BD_ENET_RX_NO) /* Frame alignment */ + ndev->stats.rx_frame_errors++; + if (status & BD_ENET_RX_CR) /* CRC Error */ + ndev->stats.rx_crc_errors++; + if (status & BD_ENET_RX_OV) /* FIFO overrun */ + ndev->stats.rx_fifo_errors++; + } + + /* Report late collisions as a frame error. + * On this error, the BD is closed, but we don't know what we + * have in the buffer. So, just drop this frame on the floor. + */ + if (status & BD_ENET_RX_CL) { + ndev->stats.rx_errors++; + ndev->stats.rx_frame_errors++; + goto rx_processing_done; + } + + /* Process the incoming frame. */ + ndev->stats.rx_packets++; + pkt_len = bdp->cbd_datlen; + ndev->stats.rx_bytes += pkt_len; + + index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); + skb = rxq->rx_skbuff[index]; + + /* The packet length includes FCS, but we don't want to + * include that when passing upstream as it messes up + * bridging applications. + */ + is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4, + need_swap); + if (!is_copybreak) { + skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); + if (unlikely(!skb_new)) { + ndev->stats.rx_dropped++; + goto rx_processing_done; + } + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE - fep->rx_align, + DMA_FROM_DEVICE); + } + + prefetch(skb->data - NET_IP_ALIGN); + skb_put(skb, pkt_len - 4); + data = skb->data; + if (!is_copybreak && need_swap) + swap_buffer(data, pkt_len); + + /* Extract the enhanced buffer descriptor */ + ebdp = NULL; + if (fep->bufdesc_ex) + ebdp = (struct bufdesc_ex *)bdp; + + /* If this is a VLAN packet remove the VLAN Tag */ + vlan_packet_rcvd = false; + if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && + fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { + /* Push and remove the vlan tag */ + struct vlan_hdr *vlan_header = + (struct vlan_hdr *) (data + ETH_HLEN); + vlan_tag = ntohs(vlan_header->h_vlan_TCI); + + vlan_packet_rcvd = true; + + memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2); + skb_pull(skb, VLAN_HLEN); + } + + skb->protocol = eth_type_trans(skb, ndev); + + /* Get receive timestamp from the skb */ + if (fep->hwts_rx_en && fep->bufdesc_ex) + fec_enet_hwtstamp(fep, ebdp->ts, + skb_hwtstamps(skb)); + + if (fep->bufdesc_ex && + (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { + if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { + /* don't check it */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + skb_checksum_none_assert(skb); + } + } + + /* Handle received VLAN packets */ + if (vlan_packet_rcvd) + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + vlan_tag); + + napi_gro_receive(&fep->napi, skb); + + if (is_copybreak) { + dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE - fep->rx_align, + DMA_FROM_DEVICE); + } else { + rxq->rx_skbuff[index] = skb_new; + fec_enet_new_rxbdp(ndev, bdp, skb_new); + } + +rx_processing_done: + /* Clear the status flags for this buffer */ + status &= ~BD_ENET_RX_STATS; + + /* Mark the buffer empty */ + status |= BD_ENET_RX_EMPTY; + bdp->cbd_sc = status; + + if (fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + + ebdp->cbd_esc = BD_ENET_RX_INT; + ebdp->cbd_prot = 0; + ebdp->cbd_bdu = 0; + } + + /* Update BD pointer to next entry */ + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); + + /* Doing this here will keep the FEC running while we process + * incoming frames. On a heavily loaded network, we should be + * able to keep up at the expense of system resources. + */ + writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id)); + } + rxq->cur_rx = bdp; + return pkt_received; +} + +static int +fec_enet_rx(struct net_device *ndev, int budget) +{ + int pkt_received = 0; + u16 queue_id; + struct fec_enet_private *fep = netdev_priv(ndev); + + for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { + clear_bit(queue_id, &fep->work_rx); + pkt_received += fec_enet_rx_queue(ndev, + budget - pkt_received, queue_id); + } + return pkt_received; +} + +static bool +fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) +{ + if (int_events == 0) + return false; + + if (int_events & FEC_ENET_RXF) + fep->work_rx |= (1 << 2); + if (int_events & FEC_ENET_RXF_1) + fep->work_rx |= (1 << 0); + if (int_events & FEC_ENET_RXF_2) + fep->work_rx |= (1 << 1); + + if (int_events & FEC_ENET_TXF) + fep->work_tx |= (1 << 2); + if (int_events & FEC_ENET_TXF_1) + fep->work_tx |= (1 << 0); + if (int_events & FEC_ENET_TXF_2) + fep->work_tx |= (1 << 1); + + return true; +} + +static irqreturn_t +fec_enet_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct fec_enet_private *fep = netdev_priv(ndev); + uint int_events; + irqreturn_t ret = IRQ_NONE; + + int_events = readl(fep->hwp + FEC_IEVENT); + writel(int_events, fep->hwp + FEC_IEVENT); + fec_enet_collect_events(fep, int_events); + + if ((fep->work_tx || fep->work_rx) && fep->link) { + ret = IRQ_HANDLED; + + if (napi_schedule_prep(&fep->napi)) { + /* Disable the NAPI interrupts */ + writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); + __napi_schedule(&fep->napi); + } + } + + if (int_events & FEC_ENET_MII) { + ret = IRQ_HANDLED; + complete(&fep->mdio_done); + } + + if (fep->ptp_clock) + fec_ptp_check_pps_event(fep); + + return ret; +} + +static int fec_enet_rx_napi(struct napi_struct *napi, int budget) +{ + struct net_device *ndev = napi->dev; + struct fec_enet_private *fep = netdev_priv(ndev); + int pkts; + + pkts = fec_enet_rx(ndev, budget); + + fec_enet_tx(ndev); + + if (pkts < budget) { + napi_complete(napi); + writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); + } + return pkts; +} + +/* ------------------------------------------------------------------------- */ +static void fec_get_mac(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); + unsigned char *iap, tmpaddr[ETH_ALEN]; + + /* + * try to get mac address in following order: + * + * 1) module parameter via kernel command line in form + * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 + */ + iap = macaddr; + + /* + * 2) from device tree data + */ + if (!is_valid_ether_addr(iap)) { + struct device_node *np = fep->pdev->dev.of_node; + if (np) { + const char *mac = of_get_mac_address(np); + if (mac) + iap = (unsigned char *) mac; + } + } + + /* + * 3) from flash or fuse (via platform data) + */ + if (!is_valid_ether_addr(iap)) { +#ifdef CONFIG_M5272 + if (FEC_FLASHMAC) + iap = (unsigned char *)FEC_FLASHMAC; +#else + if (pdata) + iap = (unsigned char *)&pdata->mac; +#endif + } + + /* + * 4) FEC mac registers set by bootloader + */ + if (!is_valid_ether_addr(iap)) { + *((__be32 *) &tmpaddr[0]) = + cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); + *((__be16 *) &tmpaddr[4]) = + cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); + iap = &tmpaddr[0]; + } + + /* + * 5) random mac address + */ + if (!is_valid_ether_addr(iap)) { + /* Report it and use a random ethernet address instead */ + netdev_err(ndev, "Invalid MAC address: %pM\n", iap); + eth_hw_addr_random(ndev); + netdev_info(ndev, "Using random MAC address: %pM\n", + ndev->dev_addr); + return; + } + + memcpy(ndev->dev_addr, iap, ETH_ALEN); + + /* Adjust MAC if using macaddr */ + if (iap == macaddr) + ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id; +} + +/* ------------------------------------------------------------------------- */ + +/* + * Phy section + */ +static void fec_enet_adjust_link(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct phy_device *phy_dev = fep->phy_dev; + int status_change = 0; + + /* Prevent a state halted on mii error */ + if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { + phy_dev->state = PHY_RESUMING; + return; + } + + /* + * If the netdev is down, or is going down, we're not interested + * in link state events, so just mark our idea of the link as down + * and ignore the event. + */ + if (!netif_running(ndev) || !netif_device_present(ndev)) { + fep->link = 0; + } else if (phy_dev->link) { + if (!fep->link) { + fep->link = phy_dev->link; + status_change = 1; + } + + if (fep->full_duplex != phy_dev->duplex) { + fep->full_duplex = phy_dev->duplex; + status_change = 1; + } + + if (phy_dev->speed != fep->speed) { + fep->speed = phy_dev->speed; + status_change = 1; + } + + /* if any of the above changed restart the FEC */ + if (status_change) { + napi_disable(&fep->napi); + netif_tx_lock_bh(ndev); + fec_restart(ndev); + netif_wake_queue(ndev); + netif_tx_unlock_bh(ndev); + napi_enable(&fep->napi); + } + } else { + if (fep->link) { + napi_disable(&fep->napi); + netif_tx_lock_bh(ndev); + fec_stop(ndev); + netif_tx_unlock_bh(ndev); + napi_enable(&fep->napi); + fep->link = phy_dev->link; + status_change = 1; + } + } + + if (status_change) + phy_print_status(phy_dev); +} + +static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct fec_enet_private *fep = bus->priv; + unsigned long time_left; + + fep->mii_timeout = 0; + init_completion(&fep->mdio_done); + + /* start a read op */ + writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | + FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); + + /* wait for end of transfer */ + time_left = wait_for_completion_timeout(&fep->mdio_done, + usecs_to_jiffies(FEC_MII_TIMEOUT)); + if (time_left == 0) { + fep->mii_timeout = 1; + netdev_err(fep->netdev, "MDIO read timeout\n"); + return -ETIMEDOUT; + } + + /* return value */ + return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); +} + +static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) +{ + struct fec_enet_private *fep = bus->priv; + unsigned long time_left; + + fep->mii_timeout = 0; + init_completion(&fep->mdio_done); + + /* start a write op */ + writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | + FEC_MMFR_TA | FEC_MMFR_DATA(value), + fep->hwp + FEC_MII_DATA); + + /* wait for end of transfer */ + time_left = wait_for_completion_timeout(&fep->mdio_done, + usecs_to_jiffies(FEC_MII_TIMEOUT)); + if (time_left == 0) { + fep->mii_timeout = 1; + netdev_err(fep->netdev, "MDIO write timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int fec_enet_clk_enable(struct net_device *ndev, bool enable) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int ret; + + if (enable) { + ret = clk_prepare_enable(fep->clk_ahb); + if (ret) + return ret; + ret = clk_prepare_enable(fep->clk_ipg); + if (ret) + goto failed_clk_ipg; + if (fep->clk_enet_out) { + ret = clk_prepare_enable(fep->clk_enet_out); + if (ret) + goto failed_clk_enet_out; + } + if (fep->clk_ptp) { + mutex_lock(&fep->ptp_clk_mutex); + ret = clk_prepare_enable(fep->clk_ptp); + if (ret) { + mutex_unlock(&fep->ptp_clk_mutex); + goto failed_clk_ptp; + } else { + fep->ptp_clk_on = true; + } + mutex_unlock(&fep->ptp_clk_mutex); + } + if (fep->clk_ref) { + ret = clk_prepare_enable(fep->clk_ref); + if (ret) + goto failed_clk_ref; + } + } else { + clk_disable_unprepare(fep->clk_ahb); + clk_disable_unprepare(fep->clk_ipg); + if (fep->clk_enet_out) + clk_disable_unprepare(fep->clk_enet_out); + if (fep->clk_ptp) { + mutex_lock(&fep->ptp_clk_mutex); + clk_disable_unprepare(fep->clk_ptp); + fep->ptp_clk_on = false; + mutex_unlock(&fep->ptp_clk_mutex); + } + if (fep->clk_ref) + clk_disable_unprepare(fep->clk_ref); + } + + return 0; + +failed_clk_ref: + if (fep->clk_ref) + clk_disable_unprepare(fep->clk_ref); +failed_clk_ptp: + if (fep->clk_enet_out) + clk_disable_unprepare(fep->clk_enet_out); +failed_clk_enet_out: + clk_disable_unprepare(fep->clk_ipg); +failed_clk_ipg: + clk_disable_unprepare(fep->clk_ahb); + + return ret; +} + +static int fec_enet_mii_probe(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct phy_device *phy_dev = NULL; + char mdio_bus_id[MII_BUS_ID_SIZE]; + char phy_name[MII_BUS_ID_SIZE + 3]; + int phy_id; + int dev_id = fep->dev_id; + + fep->phy_dev = NULL; + + if (fep->phy_node) { + phy_dev = of_phy_connect(ndev, fep->phy_node, + &fec_enet_adjust_link, 0, + fep->phy_interface); + if (!phy_dev) + return -ENODEV; + } else { + /* check for attached phy */ + for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { + if ((fep->mii_bus->phy_mask & (1 << phy_id))) + continue; + if (fep->mii_bus->phy_map[phy_id] == NULL) + continue; + if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) + continue; + if (dev_id--) + continue; + strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); + break; + } + + if (phy_id >= PHY_MAX_ADDR) { + netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); + strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); + phy_id = 0; + } + + snprintf(phy_name, sizeof(phy_name), + PHY_ID_FMT, mdio_bus_id, phy_id); + phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, + fep->phy_interface); + } + + if (IS_ERR(phy_dev)) { + netdev_err(ndev, "could not attach to PHY\n"); + return PTR_ERR(phy_dev); + } + + /* mask with MAC supported features */ + if (fep->quirks & FEC_QUIRK_HAS_GBIT) { + phy_dev->supported &= PHY_GBIT_FEATURES; + phy_dev->supported &= ~SUPPORTED_1000baseT_Half; +#if !defined(CONFIG_M5272) + phy_dev->supported |= SUPPORTED_Pause; +#endif + } + else + phy_dev->supported &= PHY_BASIC_FEATURES; + + phy_dev->advertising = phy_dev->supported; + + fep->phy_dev = phy_dev; + fep->link = 0; + fep->full_duplex = 0; + + netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), + fep->phy_dev->irq); + + return 0; +} + +static int fec_enet_mii_init(struct platform_device *pdev) +{ + static struct mii_bus *fec0_mii_bus; + struct net_device *ndev = platform_get_drvdata(pdev); + struct fec_enet_private *fep = netdev_priv(ndev); + struct device_node *node; + int err = -ENXIO, i; + u32 mii_speed, holdtime; + + /* + * The i.MX28 dual fec interfaces are not equal. + * Here are the differences: + * + * - fec0 supports MII & RMII modes while fec1 only supports RMII + * - fec0 acts as the 1588 time master while fec1 is slave + * - external phys can only be configured by fec0 + * + * That is to say fec1 can not work independently. It only works + * when fec0 is working. The reason behind this design is that the + * second interface is added primarily for Switch mode. + * + * Because of the last point above, both phys are attached on fec0 + * mdio interface in board design, and need to be configured by + * fec0 mii_bus. + */ + if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { + /* fec1 uses fec0 mii_bus */ + if (mii_cnt && fec0_mii_bus) { + fep->mii_bus = fec0_mii_bus; + mii_cnt++; + return 0; + } + return -ENOENT; + } + + fep->mii_timeout = 0; + + /* + * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed) + * + * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while + * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 + * Reference Manual has an error on this, and gets fixed on i.MX6Q + * document. + */ + mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); + if (fep->quirks & FEC_QUIRK_ENET_MAC) + mii_speed--; + if (mii_speed > 63) { + dev_err(&pdev->dev, + "fec clock (%lu) to fast to get right mii speed\n", + clk_get_rate(fep->clk_ipg)); + err = -EINVAL; + goto err_out; + } + + /* + * The i.MX28 and i.MX6 types have another filed in the MSCR (aka + * MII_SPEED) register that defines the MDIO output hold time. Earlier + * versions are RAZ there, so just ignore the difference and write the + * register always. + * The minimal hold time according to IEE802.3 (clause 22) is 10 ns. + * HOLDTIME + 1 is the number of clk cycles the fec is holding the + * output. + * The HOLDTIME bitfield takes values between 0 and 7 (inclusive). + * Given that ceil(clkrate / 5000000) <= 64, the calculation for + * holdtime cannot result in a value greater than 3. + */ + holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; + + fep->phy_speed = mii_speed << 1 | holdtime << 8; + + writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); + + fep->mii_bus = mdiobus_alloc(); + if (fep->mii_bus == NULL) { + err = -ENOMEM; + goto err_out; + } + + fep->mii_bus->name = "fec_enet_mii_bus"; + fep->mii_bus->read = fec_enet_mdio_read; + fep->mii_bus->write = fec_enet_mdio_write; + snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, fep->dev_id + 1); + fep->mii_bus->priv = fep; + fep->mii_bus->parent = &pdev->dev; + + fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!fep->mii_bus->irq) { + err = -ENOMEM; + goto err_out_free_mdiobus; + } + + for (i = 0; i < PHY_MAX_ADDR; i++) + fep->mii_bus->irq[i] = PHY_POLL; + + node = of_get_child_by_name(pdev->dev.of_node, "mdio"); + if (node) { + err = of_mdiobus_register(fep->mii_bus, node); + of_node_put(node); + } else { + err = mdiobus_register(fep->mii_bus); + } + + if (err) + goto err_out_free_mdio_irq; + + mii_cnt++; + + /* save fec0 mii_bus */ + if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) + fec0_mii_bus = fep->mii_bus; + + return 0; + +err_out_free_mdio_irq: + kfree(fep->mii_bus->irq); +err_out_free_mdiobus: + mdiobus_free(fep->mii_bus); +err_out: + return err; +} + +static void fec_enet_mii_remove(struct fec_enet_private *fep) +{ + if (--mii_cnt == 0) { + mdiobus_unregister(fep->mii_bus); + kfree(fep->mii_bus->irq); + mdiobus_free(fep->mii_bus); + } +} + +static int fec_enet_get_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct phy_device *phydev = fep->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_gset(phydev, cmd); +} + +static int fec_enet_set_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct phy_device *phydev = fep->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_sset(phydev, cmd); +} + +static void fec_enet_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + strlcpy(info->driver, fep->pdev->dev.driver->name, + sizeof(info->driver)); + strlcpy(info->version, "Revision: 1.0", sizeof(info->version)); + strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); +} + +static int fec_enet_get_ts_info(struct net_device *ndev, + struct ethtool_ts_info *info) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (fep->bufdesc_ex) { + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + if (fep->ptp_clock) + info->phc_index = ptp_clock_index(fep->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + return 0; + } else { + return ethtool_op_get_ts_info(ndev, info); + } +} + +#if !defined(CONFIG_M5272) + +static void fec_enet_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; + pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; + pause->rx_pause = pause->tx_pause; +} + +static int fec_enet_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (!fep->phy_dev) + return -ENODEV; + + if (pause->tx_pause != pause->rx_pause) { + netdev_info(ndev, + "hardware only support enable/disable both tx and rx"); + return -EINVAL; + } + + fep->pause_flag = 0; + + /* tx pause must be same as rx pause */ + fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; + fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; + + if (pause->rx_pause || pause->autoneg) { + fep->phy_dev->supported |= ADVERTISED_Pause; + fep->phy_dev->advertising |= ADVERTISED_Pause; + } else { + fep->phy_dev->supported &= ~ADVERTISED_Pause; + fep->phy_dev->advertising &= ~ADVERTISED_Pause; + } + + if (pause->autoneg) { + if (netif_running(ndev)) + fec_stop(ndev); + phy_start_aneg(fep->phy_dev); + } + if (netif_running(ndev)) { + napi_disable(&fep->napi); + netif_tx_lock_bh(ndev); + fec_restart(ndev); + netif_wake_queue(ndev); + netif_tx_unlock_bh(ndev); + napi_enable(&fep->napi); + } + + return 0; +} + +static const struct fec_stat { + char name[ETH_GSTRING_LEN]; + u16 offset; +} fec_stats[] = { + /* RMON TX */ + { "tx_dropped", RMON_T_DROP }, + { "tx_packets", RMON_T_PACKETS }, + { "tx_broadcast", RMON_T_BC_PKT }, + { "tx_multicast", RMON_T_MC_PKT }, + { "tx_crc_errors", RMON_T_CRC_ALIGN }, + { "tx_undersize", RMON_T_UNDERSIZE }, + { "tx_oversize", RMON_T_OVERSIZE }, + { "tx_fragment", RMON_T_FRAG }, + { "tx_jabber", RMON_T_JAB }, + { "tx_collision", RMON_T_COL }, + { "tx_64byte", RMON_T_P64 }, + { "tx_65to127byte", RMON_T_P65TO127 }, + { "tx_128to255byte", RMON_T_P128TO255 }, + { "tx_256to511byte", RMON_T_P256TO511 }, + { "tx_512to1023byte", RMON_T_P512TO1023 }, + { "tx_1024to2047byte", RMON_T_P1024TO2047 }, + { "tx_GTE2048byte", RMON_T_P_GTE2048 }, + { "tx_octets", RMON_T_OCTETS }, + + /* IEEE TX */ + { "IEEE_tx_drop", IEEE_T_DROP }, + { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK }, + { "IEEE_tx_1col", IEEE_T_1COL }, + { "IEEE_tx_mcol", IEEE_T_MCOL }, + { "IEEE_tx_def", IEEE_T_DEF }, + { "IEEE_tx_lcol", IEEE_T_LCOL }, + { "IEEE_tx_excol", IEEE_T_EXCOL }, + { "IEEE_tx_macerr", IEEE_T_MACERR }, + { "IEEE_tx_cserr", IEEE_T_CSERR }, + { "IEEE_tx_sqe", IEEE_T_SQE }, + { "IEEE_tx_fdxfc", IEEE_T_FDXFC }, + { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK }, + + /* RMON RX */ + { "rx_packets", RMON_R_PACKETS }, + { "rx_broadcast", RMON_R_BC_PKT }, + { "rx_multicast", RMON_R_MC_PKT }, + { "rx_crc_errors", RMON_R_CRC_ALIGN }, + { "rx_undersize", RMON_R_UNDERSIZE }, + { "rx_oversize", RMON_R_OVERSIZE }, + { "rx_fragment", RMON_R_FRAG }, + { "rx_jabber", RMON_R_JAB }, + { "rx_64byte", RMON_R_P64 }, + { "rx_65to127byte", RMON_R_P65TO127 }, + { "rx_128to255byte", RMON_R_P128TO255 }, + { "rx_256to511byte", RMON_R_P256TO511 }, + { "rx_512to1023byte", RMON_R_P512TO1023 }, + { "rx_1024to2047byte", RMON_R_P1024TO2047 }, + { "rx_GTE2048byte", RMON_R_P_GTE2048 }, + { "rx_octets", RMON_R_OCTETS }, + + /* IEEE RX */ + { "IEEE_rx_drop", IEEE_R_DROP }, + { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK }, + { "IEEE_rx_crc", IEEE_R_CRC }, + { "IEEE_rx_align", IEEE_R_ALIGN }, + { "IEEE_rx_macerr", IEEE_R_MACERR }, + { "IEEE_rx_fdxfc", IEEE_R_FDXFC }, + { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, +}; + +static void fec_enet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct fec_enet_private *fep = netdev_priv(dev); + int i; + + for (i = 0; i < ARRAY_SIZE(fec_stats); i++) + data[i] = readl(fep->hwp + fec_stats[i].offset); +} + +static void fec_enet_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + int i; + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(fec_stats); i++) + memcpy(data + i * ETH_GSTRING_LEN, + fec_stats[i].name, ETH_GSTRING_LEN); + break; + } +} + +static int fec_enet_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(fec_stats); + default: + return -EOPNOTSUPP; + } +} +#endif /* !defined(CONFIG_M5272) */ + +static int fec_enet_nway_reset(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev = fep->phy_dev; + + if (!phydev) + return -ENODEV; + + return genphy_restart_aneg(phydev); +} + +/* ITR clock source is enet system clock (clk_ahb). + * TCTT unit is cycle_ns * 64 cycle + * So, the ICTT value = X us / (cycle_ns * 64) + */ +static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + return us * (fep->itr_clk_rate / 64000) / 1000; +} + +/* Set threshold for interrupt coalescing */ +static void fec_enet_itr_coal_set(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int rx_itr, tx_itr; + + if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) + return; + + /* Must be greater than zero to avoid unpredictable behavior */ + if (!fep->rx_time_itr || !fep->rx_pkts_itr || + !fep->tx_time_itr || !fep->tx_pkts_itr) + return; + + /* Select enet system clock as Interrupt Coalescing + * timer Clock Source + */ + rx_itr = FEC_ITR_CLK_SEL; + tx_itr = FEC_ITR_CLK_SEL; + + /* set ICFT and ICTT */ + rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); + rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); + tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); + tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); + + rx_itr |= FEC_ITR_EN; + tx_itr |= FEC_ITR_EN; + + writel(tx_itr, fep->hwp + FEC_TXIC0); + writel(rx_itr, fep->hwp + FEC_RXIC0); + writel(tx_itr, fep->hwp + FEC_TXIC1); + writel(rx_itr, fep->hwp + FEC_RXIC1); + writel(tx_itr, fep->hwp + FEC_TXIC2); + writel(rx_itr, fep->hwp + FEC_RXIC2); +} + +static int +fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) + return -EOPNOTSUPP; + + ec->rx_coalesce_usecs = fep->rx_time_itr; + ec->rx_max_coalesced_frames = fep->rx_pkts_itr; + + ec->tx_coalesce_usecs = fep->tx_time_itr; + ec->tx_max_coalesced_frames = fep->tx_pkts_itr; + + return 0; +} + +static int +fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned int cycle; + + if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) + return -EOPNOTSUPP; + + if (ec->rx_max_coalesced_frames > 255) { + pr_err("Rx coalesced frames exceed hardware limiation"); + return -EINVAL; + } + + if (ec->tx_max_coalesced_frames > 255) { + pr_err("Tx coalesced frame exceed hardware limiation"); + return -EINVAL; + } + + cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); + if (cycle > 0xFFFF) { + pr_err("Rx coalesed usec exceeed hardware limiation"); + return -EINVAL; + } + + cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); + if (cycle > 0xFFFF) { + pr_err("Rx coalesed usec exceeed hardware limiation"); + return -EINVAL; + } + + fep->rx_time_itr = ec->rx_coalesce_usecs; + fep->rx_pkts_itr = ec->rx_max_coalesced_frames; + + fep->tx_time_itr = ec->tx_coalesce_usecs; + fep->tx_pkts_itr = ec->tx_max_coalesced_frames; + + fec_enet_itr_coal_set(ndev); + + return 0; +} + +static void fec_enet_itr_coal_init(struct net_device *ndev) +{ + struct ethtool_coalesce ec; + + ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; + ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; + + ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; + ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; + + fec_enet_set_coalesce(ndev, &ec); +} + +static int fec_enet_get_tunable(struct net_device *netdev, + const struct ethtool_tunable *tuna, + void *data) +{ + struct fec_enet_private *fep = netdev_priv(netdev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = fep->rx_copybreak; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int fec_enet_set_tunable(struct net_device *netdev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct fec_enet_private *fep = netdev_priv(netdev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + fep->rx_copybreak = *(u32 *)data; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static void +fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) { + wol->supported = WAKE_MAGIC; + wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0; + } else { + wol->supported = wol->wolopts = 0; + } +} + +static int +fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET)) + return -EINVAL; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + + device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC); + if (device_may_wakeup(&ndev->dev)) { + fep->wol_flag |= FEC_WOL_FLAG_ENABLE; + if (fep->irq[0] > 0) + enable_irq_wake(fep->irq[0]); + } else { + fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); + if (fep->irq[0] > 0) + disable_irq_wake(fep->irq[0]); + } + + return 0; +} + +static const struct ethtool_ops fec_enet_ethtool_ops = { + .get_settings = fec_enet_get_settings, + .set_settings = fec_enet_set_settings, + .get_drvinfo = fec_enet_get_drvinfo, + .nway_reset = fec_enet_nway_reset, + .get_link = ethtool_op_get_link, + .get_coalesce = fec_enet_get_coalesce, + .set_coalesce = fec_enet_set_coalesce, +#ifndef CONFIG_M5272 + .get_pauseparam = fec_enet_get_pauseparam, + .set_pauseparam = fec_enet_set_pauseparam, + .get_strings = fec_enet_get_strings, + .get_ethtool_stats = fec_enet_get_ethtool_stats, + .get_sset_count = fec_enet_get_sset_count, +#endif + .get_ts_info = fec_enet_get_ts_info, + .get_tunable = fec_enet_get_tunable, + .set_tunable = fec_enet_set_tunable, + .get_wol = fec_enet_get_wol, + .set_wol = fec_enet_set_wol, +}; + +static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct phy_device *phydev = fep->phy_dev; + + if (!netif_running(ndev)) + return -EINVAL; + + if (!phydev) + return -ENODEV; + + if (fep->bufdesc_ex) { + if (cmd == SIOCSHWTSTAMP) + return fec_ptp_set(ndev, rq); + if (cmd == SIOCGHWTSTAMP) + return fec_ptp_get(ndev, rq); + } + + return phy_mii_ioctl(phydev, rq, cmd); +} + +static void fec_enet_free_buffers(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned int i; + struct sk_buff *skb; + struct bufdesc *bdp; + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; + unsigned int q; + + for (q = 0; q < fep->num_rx_queues; q++) { + rxq = fep->rx_queue[q]; + bdp = rxq->rx_bd_base; + for (i = 0; i < rxq->rx_ring_size; i++) { + skb = rxq->rx_skbuff[i]; + rxq->rx_skbuff[i] = NULL; + if (skb) { + dma_unmap_single(&fep->pdev->dev, + bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE - fep->rx_align, + DMA_FROM_DEVICE); + dev_kfree_skb(skb); + } + bdp = fec_enet_get_nextdesc(bdp, fep, q); + } + } + + for (q = 0; q < fep->num_tx_queues; q++) { + txq = fep->tx_queue[q]; + bdp = txq->tx_bd_base; + for (i = 0; i < txq->tx_ring_size; i++) { + kfree(txq->tx_bounce[i]); + txq->tx_bounce[i] = NULL; + skb = txq->tx_skbuff[i]; + txq->tx_skbuff[i] = NULL; + dev_kfree_skb(skb); + } + } +} + +static void fec_enet_free_queue(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int i; + struct fec_enet_priv_tx_q *txq; + + for (i = 0; i < fep->num_tx_queues; i++) + if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { + txq = fep->tx_queue[i]; + dma_free_coherent(NULL, + txq->tx_ring_size * TSO_HEADER_SIZE, + txq->tso_hdrs, + txq->tso_hdrs_dma); + } + + for (i = 0; i < fep->num_rx_queues; i++) + kfree(fep->rx_queue[i]); + for (i = 0; i < fep->num_tx_queues; i++) + kfree(fep->tx_queue[i]); +} + +static int fec_enet_alloc_queue(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int i; + int ret = 0; + struct fec_enet_priv_tx_q *txq; + + for (i = 0; i < fep->num_tx_queues; i++) { + txq = kzalloc(sizeof(*txq), GFP_KERNEL); + if (!txq) { + ret = -ENOMEM; + goto alloc_failed; + } + + fep->tx_queue[i] = txq; + txq->tx_ring_size = TX_RING_SIZE; + fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size; + + txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; + txq->tx_wake_threshold = + (txq->tx_ring_size - txq->tx_stop_threshold) / 2; + + txq->tso_hdrs = dma_alloc_coherent(NULL, + txq->tx_ring_size * TSO_HEADER_SIZE, + &txq->tso_hdrs_dma, + GFP_KERNEL); + if (!txq->tso_hdrs) { + ret = -ENOMEM; + goto alloc_failed; + } + } + + for (i = 0; i < fep->num_rx_queues; i++) { + fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), + GFP_KERNEL); + if (!fep->rx_queue[i]) { + ret = -ENOMEM; + goto alloc_failed; + } + + fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE; + fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size; + } + return ret; + +alloc_failed: + fec_enet_free_queue(ndev); + return ret; +} + +static int +fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned int i; + struct sk_buff *skb; + struct bufdesc *bdp; + struct fec_enet_priv_rx_q *rxq; + + rxq = fep->rx_queue[queue]; + bdp = rxq->rx_bd_base; + for (i = 0; i < rxq->rx_ring_size; i++) { + skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); + if (!skb) + goto err_alloc; + + if (fec_enet_new_rxbdp(ndev, bdp, skb)) { + dev_kfree_skb(skb); + goto err_alloc; + } + + rxq->rx_skbuff[i] = skb; + bdp->cbd_sc = BD_ENET_RX_EMPTY; + + if (fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + ebdp->cbd_esc = BD_ENET_RX_INT; + } + + bdp = fec_enet_get_nextdesc(bdp, fep, queue); + } + + /* Set the last buffer to wrap. */ + bdp = fec_enet_get_prevdesc(bdp, fep, queue); + bdp->cbd_sc |= BD_SC_WRAP; + return 0; + + err_alloc: + fec_enet_free_buffers(ndev); + return -ENOMEM; +} + +static int +fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned int i; + struct bufdesc *bdp; + struct fec_enet_priv_tx_q *txq; + + txq = fep->tx_queue[queue]; + bdp = txq->tx_bd_base; + for (i = 0; i < txq->tx_ring_size; i++) { + txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); + if (!txq->tx_bounce[i]) + goto err_alloc; + + bdp->cbd_sc = 0; + bdp->cbd_bufaddr = 0; + + if (fep->bufdesc_ex) { + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + ebdp->cbd_esc = BD_ENET_TX_INT; + } + + bdp = fec_enet_get_nextdesc(bdp, fep, queue); + } + + /* Set the last buffer to wrap. */ + bdp = fec_enet_get_prevdesc(bdp, fep, queue); + bdp->cbd_sc |= BD_SC_WRAP; + + return 0; + + err_alloc: + fec_enet_free_buffers(ndev); + return -ENOMEM; +} + +static int fec_enet_alloc_buffers(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned int i; + + for (i = 0; i < fep->num_rx_queues; i++) + if (fec_enet_alloc_rxq_buffers(ndev, i)) + return -ENOMEM; + + for (i = 0; i < fep->num_tx_queues; i++) + if (fec_enet_alloc_txq_buffers(ndev, i)) + return -ENOMEM; + return 0; +} + +static int +fec_enet_open(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int ret; + + pinctrl_pm_select_default_state(&fep->pdev->dev); + ret = fec_enet_clk_enable(ndev, true); + if (ret) + return ret; + + /* I should reset the ring buffers here, but I don't yet know + * a simple way to do that. + */ + + ret = fec_enet_alloc_buffers(ndev); + if (ret) + goto err_enet_alloc; + + /* Probe and connect to PHY when open the interface */ + ret = fec_enet_mii_probe(ndev); + if (ret) + goto err_enet_mii_probe; + + fec_restart(ndev); + napi_enable(&fep->napi); + phy_start(fep->phy_dev); + netif_tx_start_all_queues(ndev); + + device_set_wakeup_enable(&ndev->dev, fep->wol_flag & + FEC_WOL_FLAG_ENABLE); + + return 0; + +err_enet_mii_probe: + fec_enet_free_buffers(ndev); +err_enet_alloc: + fec_enet_clk_enable(ndev, false); + pinctrl_pm_select_sleep_state(&fep->pdev->dev); + return ret; +} + +static int +fec_enet_close(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + phy_stop(fep->phy_dev); + + if (netif_device_present(ndev)) { + napi_disable(&fep->napi); + netif_tx_disable(ndev); + fec_stop(ndev); + } + + phy_disconnect(fep->phy_dev); + fep->phy_dev = NULL; + + fec_enet_clk_enable(ndev, false); + pinctrl_pm_select_sleep_state(&fep->pdev->dev); + fec_enet_free_buffers(ndev); + + return 0; +} + +/* Set or clear the multicast filter for this adaptor. + * Skeleton taken from sunlance driver. + * The CPM Ethernet implementation allows Multicast as well as individual + * MAC address filtering. Some of the drivers check to make sure it is + * a group multicast address, and discard those that are not. I guess I + * will do the same for now, but just remove the test if you want + * individual filtering as well (do the upper net layers want or support + * this kind of feature?). + */ + +#define HASH_BITS 6 /* #bits in hash */ +#define CRC32_POLY 0xEDB88320 + +static void set_multicast_list(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct netdev_hw_addr *ha; + unsigned int i, bit, data, crc, tmp; + unsigned char hash; + + if (ndev->flags & IFF_PROMISC) { + tmp = readl(fep->hwp + FEC_R_CNTRL); + tmp |= 0x8; + writel(tmp, fep->hwp + FEC_R_CNTRL); + return; + } + + tmp = readl(fep->hwp + FEC_R_CNTRL); + tmp &= ~0x8; + writel(tmp, fep->hwp + FEC_R_CNTRL); + + if (ndev->flags & IFF_ALLMULTI) { + /* Catch all multicast addresses, so set the + * filter to all 1's + */ + writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); + writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); + + return; + } + + /* Clear filter and add the addresses in hash register + */ + writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); + writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); + + netdev_for_each_mc_addr(ha, ndev) { + /* calculate crc32 value of mac address */ + crc = 0xffffffff; + + for (i = 0; i < ndev->addr_len; i++) { + data = ha->addr[i]; + for (bit = 0; bit < 8; bit++, data >>= 1) { + crc = (crc >> 1) ^ + (((crc ^ data) & 1) ? CRC32_POLY : 0); + } + } + + /* only upper 6 bits (HASH_BITS) are used + * which point to specific bit in he hash registers + */ + hash = (crc >> (32 - HASH_BITS)) & 0x3f; + + if (hash > 31) { + tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); + tmp |= 1 << (hash - 32); + writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); + } else { + tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); + tmp |= 1 << hash; + writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); + } + } +} + +/* Set a MAC change in hardware. */ +static int +fec_set_mac_address(struct net_device *ndev, void *p) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct sockaddr *addr = p; + + if (addr) { + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + } + + writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | + (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), + fep->hwp + FEC_ADDR_LOW); + writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), + fep->hwp + FEC_ADDR_HIGH); + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * fec_poll_controller - FEC Poll controller function + * @dev: The FEC network adapter + * + * Polled functionality used by netconsole and others in non interrupt mode + * + */ +static void fec_poll_controller(struct net_device *dev) +{ + int i; + struct fec_enet_private *fep = netdev_priv(dev); + + for (i = 0; i < FEC_IRQ_NUM; i++) { + if (fep->irq[i] > 0) { + disable_irq(fep->irq[i]); + fec_enet_interrupt(fep->irq[i], dev); + enable_irq(fep->irq[i]); + } + } +} +#endif + +#define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM +static inline void fec_enet_set_netdev_features(struct net_device *netdev, + netdev_features_t features) +{ + struct fec_enet_private *fep = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + netdev->features = features; + + /* Receive checksum has been changed */ + if (changed & NETIF_F_RXCSUM) { + if (features & NETIF_F_RXCSUM) + fep->csum_flags |= FLAG_RX_CSUM_ENABLED; + else + fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; + } +} + +static int fec_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct fec_enet_private *fep = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) { + napi_disable(&fep->napi); + netif_tx_lock_bh(netdev); + fec_stop(netdev); + fec_enet_set_netdev_features(netdev, features); + fec_restart(netdev); + netif_tx_wake_all_queues(netdev); + netif_tx_unlock_bh(netdev); + napi_enable(&fep->napi); + } else { + fec_enet_set_netdev_features(netdev, features); + } + + return 0; +} + +static const struct net_device_ops fec_netdev_ops = { + .ndo_open = fec_enet_open, + .ndo_stop = fec_enet_close, + .ndo_start_xmit = fec_enet_start_xmit, + .ndo_set_rx_mode = set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_tx_timeout = fec_timeout, + .ndo_set_mac_address = fec_set_mac_address, + .ndo_do_ioctl = fec_enet_ioctl, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = fec_poll_controller, +#endif + .ndo_set_features = fec_set_features, +}; + + /* + * XXX: We need to clean up on failure exits here. + * + */ +static int fec_enet_init(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; + struct bufdesc *cbd_base; + dma_addr_t bd_dma; + int bd_size; + unsigned int i; + +#if defined(CONFIG_ARM) + fep->rx_align = 0xf; + fep->tx_align = 0xf; +#else + fep->rx_align = 0x3; + fep->tx_align = 0x3; +#endif + + fec_enet_alloc_queue(ndev); + + if (fep->bufdesc_ex) + fep->bufdesc_size = sizeof(struct bufdesc_ex); + else + fep->bufdesc_size = sizeof(struct bufdesc); + bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * + fep->bufdesc_size; + + /* Allocate memory for buffer descriptors. */ + cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, + GFP_KERNEL); + if (!cbd_base) { + return -ENOMEM; + } + + memset(cbd_base, 0, bd_size); + + /* Get the Ethernet address */ + fec_get_mac(ndev); + /* make sure MAC we just acquired is programmed into the hw */ + fec_set_mac_address(ndev, NULL); + + /* Set receive and transmit descriptor base. */ + for (i = 0; i < fep->num_rx_queues; i++) { + rxq = fep->rx_queue[i]; + rxq->index = i; + rxq->rx_bd_base = (struct bufdesc *)cbd_base; + rxq->bd_dma = bd_dma; + if (fep->bufdesc_ex) { + bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size; + cbd_base = (struct bufdesc *) + (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size); + } else { + bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size; + cbd_base += rxq->rx_ring_size; + } + } + + for (i = 0; i < fep->num_tx_queues; i++) { + txq = fep->tx_queue[i]; + txq->index = i; + txq->tx_bd_base = (struct bufdesc *)cbd_base; + txq->bd_dma = bd_dma; + if (fep->bufdesc_ex) { + bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size; + cbd_base = (struct bufdesc *) + (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size); + } else { + bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size; + cbd_base += txq->tx_ring_size; + } + } + + + /* The FEC Ethernet specific entries in the device structure */ + ndev->watchdog_timeo = TX_TIMEOUT; + ndev->netdev_ops = &fec_netdev_ops; + ndev->ethtool_ops = &fec_enet_ethtool_ops; + + writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); + netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); + + if (fep->quirks & FEC_QUIRK_HAS_VLAN) + /* enable hw VLAN support */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; + + if (fep->quirks & FEC_QUIRK_HAS_CSUM) { + ndev->gso_max_segs = FEC_MAX_TSO_SEGS; + + /* enable hw accelerator */ + ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO); + fep->csum_flags |= FLAG_RX_CSUM_ENABLED; + } + + if (fep->quirks & FEC_QUIRK_HAS_AVB) { + fep->tx_align = 0; + fep->rx_align = 0x3f; + } + + ndev->hw_features = ndev->features; + + fec_restart(ndev); + + return 0; +} + +#ifdef CONFIG_OF +static void fec_reset_phy(struct platform_device *pdev) +{ + int err, phy_reset; + int msec = 1; + struct device_node *np = pdev->dev.of_node; + + if (!np) + return; + + of_property_read_u32(np, "phy-reset-duration", &msec); + /* A sane reset duration should not be longer than 1s */ + if (msec > 1000) + msec = 1; + + phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0); + if (!gpio_is_valid(phy_reset)) + return; + + err = devm_gpio_request_one(&pdev->dev, phy_reset, + GPIOF_OUT_INIT_LOW, "phy-reset"); + if (err) { + dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); + return; + } + msleep(msec); + gpio_set_value(phy_reset, 1); +} +#else /* CONFIG_OF */ +static void fec_reset_phy(struct platform_device *pdev) +{ + /* + * In case of platform probe, the reset has been done + * by machine code. + */ +} +#endif /* CONFIG_OF */ + +static void +fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) +{ + struct device_node *np = pdev->dev.of_node; + int err; + + *num_tx = *num_rx = 1; + + if (!np || !of_device_is_available(np)) + return; + + /* parse the num of tx and rx queues */ + err = of_property_read_u32(np, "fsl,num-tx-queues", num_tx); + if (err) + *num_tx = 1; + + err = of_property_read_u32(np, "fsl,num-rx-queues", num_rx); + if (err) + *num_rx = 1; + + if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { + dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n", + *num_tx); + *num_tx = 1; + return; + } + + if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { + dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n", + *num_rx); + *num_rx = 1; + return; + } + +} + +static int +fec_probe(struct platform_device *pdev) +{ + struct fec_enet_private *fep; + struct fec_platform_data *pdata; + struct net_device *ndev; + int i, irq, ret = 0; + struct resource *r; + const struct of_device_id *of_id; + static int dev_id; + struct device_node *np = pdev->dev.of_node, *phy_node; + int num_tx_qs; + int num_rx_qs; + + fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); + + /* Init network device */ + ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private), + num_tx_qs, num_rx_qs); + if (!ndev) + return -ENOMEM; + + SET_NETDEV_DEV(ndev, &pdev->dev); + + /* setup board info structure */ + fep = netdev_priv(ndev); + + of_id = of_match_device(fec_dt_ids, &pdev->dev); + if (of_id) + pdev->id_entry = of_id->data; + fep->quirks = pdev->id_entry->driver_data; + + fep->netdev = ndev; + fep->num_rx_queues = num_rx_qs; + fep->num_tx_queues = num_tx_qs; + +#if !defined(CONFIG_M5272) + /* default enable pause frame auto negotiation */ + if (fep->quirks & FEC_QUIRK_HAS_GBIT) + fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; +#endif + + /* Select default pin state */ + pinctrl_pm_select_default_state(&pdev->dev); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + fep->hwp = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(fep->hwp)) { + ret = PTR_ERR(fep->hwp); + goto failed_ioremap; + } + + fep->pdev = pdev; + fep->dev_id = dev_id++; + + platform_set_drvdata(pdev, ndev); + + if (of_get_property(np, "fsl,magic-packet", NULL)) + fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; + + phy_node = of_parse_phandle(np, "phy-handle", 0); + if (!phy_node && of_phy_is_fixed_link(np)) { + ret = of_phy_register_fixed_link(np); + if (ret < 0) { + dev_err(&pdev->dev, + "broken fixed-link specification\n"); + goto failed_phy; + } + phy_node = of_node_get(np); + } + fep->phy_node = phy_node; + + ret = of_get_phy_mode(pdev->dev.of_node); + if (ret < 0) { + pdata = dev_get_platdata(&pdev->dev); + if (pdata) + fep->phy_interface = pdata->phy; + else + fep->phy_interface = PHY_INTERFACE_MODE_MII; + } else { + fep->phy_interface = ret; + } + + fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(fep->clk_ipg)) { + ret = PTR_ERR(fep->clk_ipg); + goto failed_clk; + } + + fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); + if (IS_ERR(fep->clk_ahb)) { + ret = PTR_ERR(fep->clk_ahb); + goto failed_clk; + } + + fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); + + /* enet_out is optional, depends on board */ + fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); + if (IS_ERR(fep->clk_enet_out)) + fep->clk_enet_out = NULL; + + fep->ptp_clk_on = false; + mutex_init(&fep->ptp_clk_mutex); + + /* clk_ref is optional, depends on board */ + fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); + if (IS_ERR(fep->clk_ref)) + fep->clk_ref = NULL; + + fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; + fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); + if (IS_ERR(fep->clk_ptp)) { + fep->clk_ptp = NULL; + fep->bufdesc_ex = false; + } + + ret = fec_enet_clk_enable(ndev, true); + if (ret) + goto failed_clk; + + fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); + if (!IS_ERR(fep->reg_phy)) { + ret = regulator_enable(fep->reg_phy); + if (ret) { + dev_err(&pdev->dev, + "Failed to enable phy regulator: %d\n", ret); + goto failed_regulator; + } + } else { + fep->reg_phy = NULL; + } + + fec_reset_phy(pdev); + + if (fep->bufdesc_ex) + fec_ptp_init(pdev); + + ret = fec_enet_init(ndev); + if (ret) + goto failed_init; + + for (i = 0; i < FEC_IRQ_NUM; i++) { + irq = platform_get_irq(pdev, i); + if (irq < 0) { + if (i) + break; + ret = irq; + goto failed_irq; + } + ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, + 0, pdev->name, ndev); + if (ret) + goto failed_irq; + + fep->irq[i] = irq; + } + + init_completion(&fep->mdio_done); + ret = fec_enet_mii_init(pdev); + if (ret) + goto failed_mii_init; + + /* Carrier starts down, phylib will bring it up */ + netif_carrier_off(ndev); + fec_enet_clk_enable(ndev, false); + pinctrl_pm_select_sleep_state(&pdev->dev); + + ret = register_netdev(ndev); + if (ret) + goto failed_register; + + device_init_wakeup(&ndev->dev, fep->wol_flag & + FEC_WOL_HAS_MAGIC_PACKET); + + if (fep->bufdesc_ex && fep->ptp_clock) + netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); + + fep->rx_copybreak = COPYBREAK_DEFAULT; + INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); + return 0; + +failed_register: + fec_enet_mii_remove(fep); +failed_mii_init: +failed_irq: +failed_init: + if (fep->reg_phy) + regulator_disable(fep->reg_phy); +failed_regulator: + fec_enet_clk_enable(ndev, false); +failed_clk: +failed_phy: + of_node_put(phy_node); +failed_ioremap: + free_netdev(ndev); + + return ret; +} + +static int +fec_drv_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct fec_enet_private *fep = netdev_priv(ndev); + + cancel_delayed_work_sync(&fep->time_keep); + cancel_work_sync(&fep->tx_timeout_work); + unregister_netdev(ndev); + fec_enet_mii_remove(fep); + if (fep->reg_phy) + regulator_disable(fep->reg_phy); + if (fep->ptp_clock) + ptp_clock_unregister(fep->ptp_clock); + of_node_put(fep->phy_node); + free_netdev(ndev); + + return 0; +} + +static int __maybe_unused fec_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct fec_enet_private *fep = netdev_priv(ndev); + + rtnl_lock(); + if (netif_running(ndev)) { + if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) + fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; + phy_stop(fep->phy_dev); + napi_disable(&fep->napi); + netif_tx_lock_bh(ndev); + netif_device_detach(ndev); + netif_tx_unlock_bh(ndev); + fec_stop(ndev); + fec_enet_clk_enable(ndev, false); + if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) + pinctrl_pm_select_sleep_state(&fep->pdev->dev); + } + rtnl_unlock(); + + if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) + regulator_disable(fep->reg_phy); + + /* SOC supply clock to phy, when clock is disabled, phy link down + * SOC control phy regulator, when regulator is disabled, phy link down + */ + if (fep->clk_enet_out || fep->reg_phy) + fep->link = 0; + + return 0; +} + +static int __maybe_unused fec_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_platform_data *pdata = fep->pdev->dev.platform_data; + int ret; + int val; + + if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { + ret = regulator_enable(fep->reg_phy); + if (ret) + return ret; + } + + rtnl_lock(); + if (netif_running(ndev)) { + ret = fec_enet_clk_enable(ndev, true); + if (ret) { + rtnl_unlock(); + goto failed_clk; + } + if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { + if (pdata && pdata->sleep_mode_enable) + pdata->sleep_mode_enable(false); + val = readl(fep->hwp + FEC_ECNTRL); + val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP); + writel(val, fep->hwp + FEC_ECNTRL); + fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON; + } else { + pinctrl_pm_select_default_state(&fep->pdev->dev); + } + fec_restart(ndev); + netif_tx_lock_bh(ndev); + netif_device_attach(ndev); + netif_tx_unlock_bh(ndev); + napi_enable(&fep->napi); + phy_start(fep->phy_dev); + } + rtnl_unlock(); + + return 0; + +failed_clk: + if (fep->reg_phy) + regulator_disable(fep->reg_phy); + return ret; +} + +static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume); + +static struct platform_driver fec_driver = { + .driver = { + .name = DRIVER_NAME, + .pm = &fec_pm_ops, + .of_match_table = fec_dt_ids, + }, + .id_table = fec_devtype, + .probe = fec_probe, + .remove = fec_drv_remove, +}; + +module_platform_driver(fec_driver); + +MODULE_ALIAS("platform:"DRIVER_NAME); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c new file mode 100644 index 000000000..afe7f39cd --- /dev/null +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -0,0 +1,1116 @@ +/* + * Driver for the MPC5200 Fast Ethernet Controller + * + * Originally written by Dale Farnsworth and + * now maintained by Sylvain Munaut + * + * Copyright (C) 2007 Domen Puncer, Telargo, Inc. + * Copyright (C) 2007 Sylvain Munaut + * Copyright (C) 2003-2004 MontaVista, Software, Inc. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "fec_mpc52xx.h" + +#define DRIVER_NAME "mpc52xx-fec" + +/* Private driver data structure */ +struct mpc52xx_fec_priv { + struct net_device *ndev; + int duplex; + int speed; + int r_irq; + int t_irq; + struct mpc52xx_fec __iomem *fec; + struct bcom_task *rx_dmatsk; + struct bcom_task *tx_dmatsk; + spinlock_t lock; + int msg_enable; + + /* MDIO link details */ + unsigned int mdio_speed; + struct device_node *phy_node; + struct phy_device *phydev; + enum phy_state link; + int seven_wire_mode; +}; + + +static irqreturn_t mpc52xx_fec_interrupt(int, void *); +static irqreturn_t mpc52xx_fec_rx_interrupt(int, void *); +static irqreturn_t mpc52xx_fec_tx_interrupt(int, void *); +static void mpc52xx_fec_stop(struct net_device *dev); +static void mpc52xx_fec_start(struct net_device *dev); +static void mpc52xx_fec_reset(struct net_device *dev); + +#define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) +static int debug = -1; /* the above default */ +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "debugging messages level"); + +static void mpc52xx_fec_tx_timeout(struct net_device *dev) +{ + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + unsigned long flags; + + dev_warn(&dev->dev, "transmit timed out\n"); + + spin_lock_irqsave(&priv->lock, flags); + mpc52xx_fec_reset(dev); + dev->stats.tx_errors++; + spin_unlock_irqrestore(&priv->lock, flags); + + netif_wake_queue(dev); +} + +static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac) +{ + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct mpc52xx_fec __iomem *fec = priv->fec; + + out_be32(&fec->paddr1, *(u32 *)(&mac[0])); + out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE); +} + +static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *sock = addr; + + memcpy(dev->dev_addr, sock->sa_data, dev->addr_len); + + mpc52xx_fec_set_paddr(dev, sock->sa_data); + return 0; +} + +static void mpc52xx_fec_free_rx_buffers(struct net_device *dev, struct bcom_task *s) +{ + while (!bcom_queue_empty(s)) { + struct bcom_fec_bd *bd; + struct sk_buff *skb; + + skb = bcom_retrieve_buffer(s, NULL, (struct bcom_bd **)&bd); + dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len, + DMA_FROM_DEVICE); + kfree_skb(skb); + } +} + +static void +mpc52xx_fec_rx_submit(struct net_device *dev, struct sk_buff *rskb) +{ + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct bcom_fec_bd *bd; + + bd = (struct bcom_fec_bd *) bcom_prepare_next_buffer(priv->rx_dmatsk); + bd->status = FEC_RX_BUFFER_SIZE; + bd->skb_pa = dma_map_single(dev->dev.parent, rskb->data, + FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); + bcom_submit_next_buffer(priv->rx_dmatsk, rskb); +} + +static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task *rxtsk) +{ + struct sk_buff *skb; + + while (!bcom_queue_full(rxtsk)) { + skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE); + if (!skb) + return -EAGAIN; + + /* zero out the initial receive buffers to aid debugging */ + memset(skb->data, 0, FEC_RX_BUFFER_SIZE); + mpc52xx_fec_rx_submit(dev, skb); + } + return 0; +} + +/* based on generic_adjust_link from fs_enet-main.c */ +static void mpc52xx_fec_adjust_link(struct net_device *dev) +{ + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + int new_state = 0; + + if (phydev->link != PHY_DOWN) { + if (phydev->duplex != priv->duplex) { + struct mpc52xx_fec __iomem *fec = priv->fec; + u32 rcntrl; + u32 tcntrl; + + new_state = 1; + priv->duplex = phydev->duplex; + + rcntrl = in_be32(&fec->r_cntrl); + tcntrl = in_be32(&fec->x_cntrl); + + rcntrl &= ~FEC_RCNTRL_DRT; + tcntrl &= ~FEC_TCNTRL_FDEN; + if (phydev->duplex == DUPLEX_FULL) + tcntrl |= FEC_TCNTRL_FDEN; /* FD enable */ + else + rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */ + + out_be32(&fec->r_cntrl, rcntrl); + out_be32(&fec->x_cntrl, tcntrl); + } + + if (phydev->speed != priv->speed) { + new_state = 1; + priv->speed = phydev->speed; + } + + if (priv->link == PHY_DOWN) { + new_state = 1; + priv->link = phydev->link; + } + + } else if (priv->link) { + new_state = 1; + priv->link = PHY_DOWN; + priv->speed = 0; + priv->duplex = -1; + } + + if (new_state && netif_msg_link(priv)) + phy_print_status(phydev); +} + +static int mpc52xx_fec_open(struct net_device *dev) +{ + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + int err = -EBUSY; + + if (priv->phy_node) { + priv->phydev = of_phy_connect(priv->ndev, priv->phy_node, + mpc52xx_fec_adjust_link, 0, 0); + if (!priv->phydev) { + dev_err(&dev->dev, "of_phy_connect failed\n"); + return -ENODEV; + } + phy_start(priv->phydev); + } + + if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED, + DRIVER_NAME "_ctrl", dev)) { + dev_err(&dev->dev, "ctrl interrupt request failed\n"); + goto free_phy; + } + if (request_irq(priv->r_irq, mpc52xx_fec_rx_interrupt, 0, + DRIVER_NAME "_rx", dev)) { + dev_err(&dev->dev, "rx interrupt request failed\n"); + goto free_ctrl_irq; + } + if (request_irq(priv->t_irq, mpc52xx_fec_tx_interrupt, 0, + DRIVER_NAME "_tx", dev)) { + dev_err(&dev->dev, "tx interrupt request failed\n"); + goto free_2irqs; + } + + bcom_fec_rx_reset(priv->rx_dmatsk); + bcom_fec_tx_reset(priv->tx_dmatsk); + + err = mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk); + if (err) { + dev_err(&dev->dev, "mpc52xx_fec_alloc_rx_buffers failed\n"); + goto free_irqs; + } + + bcom_enable(priv->rx_dmatsk); + bcom_enable(priv->tx_dmatsk); + + mpc52xx_fec_start(dev); + + netif_start_queue(dev); + + return 0; + + free_irqs: + free_irq(priv->t_irq, dev); + free_2irqs: + free_irq(priv->r_irq, dev); + free_ctrl_irq: + free_irq(dev->irq, dev); + free_phy: + if (priv->phydev) { + phy_stop(priv->phydev); + phy_disconnect(priv->phydev); + priv->phydev = NULL; + } + + return err; +} + +static int mpc52xx_fec_close(struct net_device *dev) +{ + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + + netif_stop_queue(dev); + + mpc52xx_fec_stop(dev); + + mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk); + + free_irq(dev->irq, dev); + free_irq(priv->r_irq, dev); + free_irq(priv->t_irq, dev); + + if (priv->phydev) { + /* power down phy */ + phy_stop(priv->phydev); + phy_disconnect(priv->phydev); + priv->phydev = NULL; + } + + return 0; +} + +/* This will only be invoked if your driver is _not_ in XOFF state. + * What this means is that you need not check it, and that this + * invariant will hold if you make sure that the netif_*_queue() + * calls are done at the proper times. + */ +static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct bcom_fec_bd *bd; + unsigned long flags; + + if (bcom_queue_full(priv->tx_dmatsk)) { + if (net_ratelimit()) + dev_err(&dev->dev, "transmit queue overrun\n"); + return NETDEV_TX_BUSY; + } + + spin_lock_irqsave(&priv->lock, flags); + + bd = (struct bcom_fec_bd *) + bcom_prepare_next_buffer(priv->tx_dmatsk); + + bd->status = skb->len | BCOM_FEC_TX_BD_TFD | BCOM_FEC_TX_BD_TC; + bd->skb_pa = dma_map_single(dev->dev.parent, skb->data, skb->len, + DMA_TO_DEVICE); + + skb_tx_timestamp(skb); + bcom_submit_next_buffer(priv->tx_dmatsk, skb); + spin_unlock_irqrestore(&priv->lock, flags); + + if (bcom_queue_full(priv->tx_dmatsk)) { + netif_stop_queue(dev); + } + + return NETDEV_TX_OK; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void mpc52xx_fec_poll_controller(struct net_device *dev) +{ + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + + disable_irq(priv->t_irq); + mpc52xx_fec_tx_interrupt(priv->t_irq, dev); + enable_irq(priv->t_irq); + disable_irq(priv->r_irq); + mpc52xx_fec_rx_interrupt(priv->r_irq, dev); + enable_irq(priv->r_irq); +} +#endif + + +/* This handles BestComm transmit task interrupts + */ +static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + + spin_lock(&priv->lock); + while (bcom_buffer_done(priv->tx_dmatsk)) { + struct sk_buff *skb; + struct bcom_fec_bd *bd; + skb = bcom_retrieve_buffer(priv->tx_dmatsk, NULL, + (struct bcom_bd **)&bd); + dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len, + DMA_TO_DEVICE); + + dev_kfree_skb_irq(skb); + } + spin_unlock(&priv->lock); + + netif_wake_queue(dev); + + return IRQ_HANDLED; +} + +static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct sk_buff *rskb; /* received sk_buff */ + struct sk_buff *skb; /* new sk_buff to enqueue in its place */ + struct bcom_fec_bd *bd; + u32 status, physaddr; + int length; + + spin_lock(&priv->lock); + + while (bcom_buffer_done(priv->rx_dmatsk)) { + + rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status, + (struct bcom_bd **)&bd); + physaddr = bd->skb_pa; + + /* Test for errors in received frame */ + if (status & BCOM_FEC_RX_BD_ERRORS) { + /* Drop packet and reuse the buffer */ + mpc52xx_fec_rx_submit(dev, rskb); + dev->stats.rx_dropped++; + continue; + } + + /* skbs are allocated on open, so now we allocate a new one, + * and remove the old (with the packet) */ + skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE); + if (!skb) { + /* Can't get a new one : reuse the same & drop pkt */ + dev_notice(&dev->dev, "Low memory - dropped packet.\n"); + mpc52xx_fec_rx_submit(dev, rskb); + dev->stats.rx_dropped++; + continue; + } + + /* Enqueue the new sk_buff back on the hardware */ + mpc52xx_fec_rx_submit(dev, skb); + + /* Process the received skb - Drop the spin lock while + * calling into the network stack */ + spin_unlock(&priv->lock); + + dma_unmap_single(dev->dev.parent, physaddr, rskb->len, + DMA_FROM_DEVICE); + length = status & BCOM_FEC_RX_BD_LEN_MASK; + skb_put(rskb, length - 4); /* length without CRC32 */ + rskb->protocol = eth_type_trans(rskb, dev); + if (!skb_defer_rx_timestamp(rskb)) + netif_rx(rskb); + + spin_lock(&priv->lock); + } + + spin_unlock(&priv->lock); + + return IRQ_HANDLED; +} + +static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct mpc52xx_fec __iomem *fec = priv->fec; + u32 ievent; + + ievent = in_be32(&fec->ievent); + + ievent &= ~FEC_IEVENT_MII; /* mii is handled separately */ + if (!ievent) + return IRQ_NONE; + + out_be32(&fec->ievent, ievent); /* clear pending events */ + + /* on fifo error, soft-reset fec */ + if (ievent & (FEC_IEVENT_RFIFO_ERROR | FEC_IEVENT_XFIFO_ERROR)) { + + if (net_ratelimit() && (ievent & FEC_IEVENT_RFIFO_ERROR)) + dev_warn(&dev->dev, "FEC_IEVENT_RFIFO_ERROR\n"); + if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR)) + dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n"); + + spin_lock(&priv->lock); + mpc52xx_fec_reset(dev); + spin_unlock(&priv->lock); + + return IRQ_HANDLED; + } + + if (ievent & ~FEC_IEVENT_TFINT) + dev_dbg(&dev->dev, "ievent: %08x\n", ievent); + + return IRQ_HANDLED; +} + +/* + * Get the current statistics. + * This may be called with the card open or closed. + */ +static struct net_device_stats *mpc52xx_fec_get_stats(struct net_device *dev) +{ + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct mpc52xx_fec __iomem *fec = priv->fec; + + stats->rx_bytes = in_be32(&fec->rmon_r_octets); + stats->rx_packets = in_be32(&fec->rmon_r_packets); + stats->rx_errors = in_be32(&fec->rmon_r_crc_align) + + in_be32(&fec->rmon_r_undersize) + + in_be32(&fec->rmon_r_oversize) + + in_be32(&fec->rmon_r_frag) + + in_be32(&fec->rmon_r_jab); + + stats->tx_bytes = in_be32(&fec->rmon_t_octets); + stats->tx_packets = in_be32(&fec->rmon_t_packets); + stats->tx_errors = in_be32(&fec->rmon_t_crc_align) + + in_be32(&fec->rmon_t_undersize) + + in_be32(&fec->rmon_t_oversize) + + in_be32(&fec->rmon_t_frag) + + in_be32(&fec->rmon_t_jab); + + stats->multicast = in_be32(&fec->rmon_r_mc_pkt); + stats->collisions = in_be32(&fec->rmon_t_col); + + /* detailed rx_errors: */ + stats->rx_length_errors = in_be32(&fec->rmon_r_undersize) + + in_be32(&fec->rmon_r_oversize) + + in_be32(&fec->rmon_r_frag) + + in_be32(&fec->rmon_r_jab); + stats->rx_over_errors = in_be32(&fec->r_macerr); + stats->rx_crc_errors = in_be32(&fec->ieee_r_crc); + stats->rx_frame_errors = in_be32(&fec->ieee_r_align); + stats->rx_fifo_errors = in_be32(&fec->rmon_r_drop); + stats->rx_missed_errors = in_be32(&fec->rmon_r_drop); + + /* detailed tx_errors: */ + stats->tx_aborted_errors = 0; + stats->tx_carrier_errors = in_be32(&fec->ieee_t_cserr); + stats->tx_fifo_errors = in_be32(&fec->rmon_t_drop); + stats->tx_heartbeat_errors = in_be32(&fec->ieee_t_sqe); + stats->tx_window_errors = in_be32(&fec->ieee_t_lcol); + + return stats; +} + +/* + * Read MIB counters in order to reset them, + * then zero all the stats fields in memory + */ +static void mpc52xx_fec_reset_stats(struct net_device *dev) +{ + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct mpc52xx_fec __iomem *fec = priv->fec; + + out_be32(&fec->mib_control, FEC_MIB_DISABLE); + memset_io(&fec->rmon_t_drop, 0, + offsetof(struct mpc52xx_fec, reserved10) - + offsetof(struct mpc52xx_fec, rmon_t_drop)); + out_be32(&fec->mib_control, 0); + + memset(&dev->stats, 0, sizeof(dev->stats)); +} + +/* + * Set or clear the multicast filter for this adaptor. + */ +static void mpc52xx_fec_set_multicast_list(struct net_device *dev) +{ + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct mpc52xx_fec __iomem *fec = priv->fec; + u32 rx_control; + + rx_control = in_be32(&fec->r_cntrl); + + if (dev->flags & IFF_PROMISC) { + rx_control |= FEC_RCNTRL_PROM; + out_be32(&fec->r_cntrl, rx_control); + } else { + rx_control &= ~FEC_RCNTRL_PROM; + out_be32(&fec->r_cntrl, rx_control); + + if (dev->flags & IFF_ALLMULTI) { + out_be32(&fec->gaddr1, 0xffffffff); + out_be32(&fec->gaddr2, 0xffffffff); + } else { + u32 crc; + struct netdev_hw_addr *ha; + u32 gaddr1 = 0x00000000; + u32 gaddr2 = 0x00000000; + + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr) >> 26; + if (crc >= 32) + gaddr1 |= 1 << (crc-32); + else + gaddr2 |= 1 << crc; + } + out_be32(&fec->gaddr1, gaddr1); + out_be32(&fec->gaddr2, gaddr2); + } + } +} + +/** + * mpc52xx_fec_hw_init + * @dev: network device + * + * Setup various hardware setting, only needed once on start + */ +static void mpc52xx_fec_hw_init(struct net_device *dev) +{ + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct mpc52xx_fec __iomem *fec = priv->fec; + int i; + + /* Whack a reset. We should wait for this. */ + out_be32(&fec->ecntrl, FEC_ECNTRL_RESET); + for (i = 0; i < FEC_RESET_DELAY; ++i) { + if ((in_be32(&fec->ecntrl) & FEC_ECNTRL_RESET) == 0) + break; + udelay(1); + } + if (i == FEC_RESET_DELAY) + dev_err(&dev->dev, "FEC Reset timeout!\n"); + + /* set pause to 0x20 frames */ + out_be32(&fec->op_pause, FEC_OP_PAUSE_OPCODE | 0x20); + + /* high service request will be deasserted when there's < 7 bytes in fifo + * low service request will be deasserted when there's < 4*7 bytes in fifo + */ + out_be32(&fec->rfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7); + out_be32(&fec->tfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7); + + /* alarm when <= x bytes in FIFO */ + out_be32(&fec->rfifo_alarm, 0x0000030c); + out_be32(&fec->tfifo_alarm, 0x00000100); + + /* begin transmittion when 256 bytes are in FIFO (or EOF or FIFO full) */ + out_be32(&fec->x_wmrk, FEC_FIFO_WMRK_256B); + + /* enable crc generation */ + out_be32(&fec->xmit_fsm, FEC_XMIT_FSM_APPEND_CRC | FEC_XMIT_FSM_ENABLE_CRC); + out_be32(&fec->iaddr1, 0x00000000); /* No individual filter */ + out_be32(&fec->iaddr2, 0x00000000); /* No individual filter */ + + /* set phy speed. + * this can't be done in phy driver, since it needs to be called + * before fec stuff (even on resume) */ + out_be32(&fec->mii_speed, priv->mdio_speed); +} + +/** + * mpc52xx_fec_start + * @dev: network device + * + * This function is called to start or restart the FEC during a link + * change. This happens on fifo errors or when switching between half + * and full duplex. + */ +static void mpc52xx_fec_start(struct net_device *dev) +{ + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct mpc52xx_fec __iomem *fec = priv->fec; + u32 rcntrl; + u32 tcntrl; + u32 tmp; + + /* clear sticky error bits */ + tmp = FEC_FIFO_STATUS_ERR | FEC_FIFO_STATUS_UF | FEC_FIFO_STATUS_OF; + out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status) & tmp); + out_be32(&fec->tfifo_status, in_be32(&fec->tfifo_status) & tmp); + + /* FIFOs will reset on mpc52xx_fec_enable */ + out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_ENABLE_IS_RESET); + + /* Set station address. */ + mpc52xx_fec_set_paddr(dev, dev->dev_addr); + + mpc52xx_fec_set_multicast_list(dev); + + /* set max frame len, enable flow control, select mii mode */ + rcntrl = FEC_RX_BUFFER_SIZE << 16; /* max frame length */ + rcntrl |= FEC_RCNTRL_FCE; + + if (!priv->seven_wire_mode) + rcntrl |= FEC_RCNTRL_MII_MODE; + + if (priv->duplex == DUPLEX_FULL) + tcntrl = FEC_TCNTRL_FDEN; /* FD enable */ + else { + rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */ + tcntrl = 0; + } + out_be32(&fec->r_cntrl, rcntrl); + out_be32(&fec->x_cntrl, tcntrl); + + /* Clear any outstanding interrupt. */ + out_be32(&fec->ievent, 0xffffffff); + + /* Enable interrupts we wish to service. */ + out_be32(&fec->imask, FEC_IMASK_ENABLE); + + /* And last, enable the transmit and receive processing. */ + out_be32(&fec->ecntrl, FEC_ECNTRL_ETHER_EN); + out_be32(&fec->r_des_active, 0x01000000); +} + +/** + * mpc52xx_fec_stop + * @dev: network device + * + * stop all activity on fec and empty dma buffers + */ +static void mpc52xx_fec_stop(struct net_device *dev) +{ + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct mpc52xx_fec __iomem *fec = priv->fec; + unsigned long timeout; + + /* disable all interrupts */ + out_be32(&fec->imask, 0); + + /* Disable the rx task. */ + bcom_disable(priv->rx_dmatsk); + + /* Wait for tx queue to drain, but only if we're in process context */ + if (!in_interrupt()) { + timeout = jiffies + msecs_to_jiffies(2000); + while (time_before(jiffies, timeout) && + !bcom_queue_empty(priv->tx_dmatsk)) + msleep(100); + + if (time_after_eq(jiffies, timeout)) + dev_err(&dev->dev, "queues didn't drain\n"); +#if 1 + if (time_after_eq(jiffies, timeout)) { + dev_err(&dev->dev, " tx: index: %i, outdex: %i\n", + priv->tx_dmatsk->index, + priv->tx_dmatsk->outdex); + dev_err(&dev->dev, " rx: index: %i, outdex: %i\n", + priv->rx_dmatsk->index, + priv->rx_dmatsk->outdex); + } +#endif + } + + bcom_disable(priv->tx_dmatsk); + + /* Stop FEC */ + out_be32(&fec->ecntrl, in_be32(&fec->ecntrl) & ~FEC_ECNTRL_ETHER_EN); +} + +/* reset fec and bestcomm tasks */ +static void mpc52xx_fec_reset(struct net_device *dev) +{ + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct mpc52xx_fec __iomem *fec = priv->fec; + + mpc52xx_fec_stop(dev); + + out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status)); + out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_RESET_FIFO); + + mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk); + + mpc52xx_fec_hw_init(dev); + + bcom_fec_rx_reset(priv->rx_dmatsk); + bcom_fec_tx_reset(priv->tx_dmatsk); + + mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk); + + bcom_enable(priv->rx_dmatsk); + bcom_enable(priv->tx_dmatsk); + + mpc52xx_fec_start(dev); + + netif_wake_queue(dev); +} + + +/* ethtool interface */ + +static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + + if (!priv->phydev) + return -ENODEV; + + return phy_ethtool_gset(priv->phydev, cmd); +} + +static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + + if (!priv->phydev) + return -ENODEV; + + return phy_ethtool_sset(priv->phydev, cmd); +} + +static u32 mpc52xx_fec_get_msglevel(struct net_device *dev) +{ + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + return priv->msg_enable; +} + +static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level) +{ + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + priv->msg_enable = level; +} + +static const struct ethtool_ops mpc52xx_fec_ethtool_ops = { + .get_settings = mpc52xx_fec_get_settings, + .set_settings = mpc52xx_fec_set_settings, + .get_link = ethtool_op_get_link, + .get_msglevel = mpc52xx_fec_get_msglevel, + .set_msglevel = mpc52xx_fec_set_msglevel, + .get_ts_info = ethtool_op_get_ts_info, +}; + + +static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct mpc52xx_fec_priv *priv = netdev_priv(dev); + + if (!priv->phydev) + return -ENOTSUPP; + + return phy_mii_ioctl(priv->phydev, rq, cmd); +} + +static const struct net_device_ops mpc52xx_fec_netdev_ops = { + .ndo_open = mpc52xx_fec_open, + .ndo_stop = mpc52xx_fec_close, + .ndo_start_xmit = mpc52xx_fec_start_xmit, + .ndo_set_rx_mode = mpc52xx_fec_set_multicast_list, + .ndo_set_mac_address = mpc52xx_fec_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = mpc52xx_fec_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_tx_timeout = mpc52xx_fec_tx_timeout, + .ndo_get_stats = mpc52xx_fec_get_stats, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = mpc52xx_fec_poll_controller, +#endif +}; + +/* ======================================================================== */ +/* OF Driver */ +/* ======================================================================== */ + +static int mpc52xx_fec_probe(struct platform_device *op) +{ + int rv; + struct net_device *ndev; + struct mpc52xx_fec_priv *priv = NULL; + struct resource mem; + const u32 *prop; + int prop_size; + struct device_node *np = op->dev.of_node; + const char *mac_addr; + + phys_addr_t rx_fifo; + phys_addr_t tx_fifo; + + /* Get the ether ndev & it's private zone */ + ndev = alloc_etherdev(sizeof(struct mpc52xx_fec_priv)); + if (!ndev) + return -ENOMEM; + + priv = netdev_priv(ndev); + priv->ndev = ndev; + + /* Reserve FEC control zone */ + rv = of_address_to_resource(np, 0, &mem); + if (rv) { + pr_err("Error while parsing device node resource\n"); + goto err_netdev; + } + if (resource_size(&mem) < sizeof(struct mpc52xx_fec)) { + pr_err("invalid resource size (%lx < %x), check mpc52xx_devices.c\n", + (unsigned long)resource_size(&mem), + sizeof(struct mpc52xx_fec)); + rv = -EINVAL; + goto err_netdev; + } + + if (!request_mem_region(mem.start, sizeof(struct mpc52xx_fec), + DRIVER_NAME)) { + rv = -EBUSY; + goto err_netdev; + } + + /* Init ether ndev with what we have */ + ndev->netdev_ops = &mpc52xx_fec_netdev_ops; + ndev->ethtool_ops = &mpc52xx_fec_ethtool_ops; + ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT; + ndev->base_addr = mem.start; + SET_NETDEV_DEV(ndev, &op->dev); + + spin_lock_init(&priv->lock); + + /* ioremap the zones */ + priv->fec = ioremap(mem.start, sizeof(struct mpc52xx_fec)); + + if (!priv->fec) { + rv = -ENOMEM; + goto err_mem_region; + } + + /* Bestcomm init */ + rx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, rfifo_data); + tx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, tfifo_data); + + priv->rx_dmatsk = bcom_fec_rx_init(FEC_RX_NUM_BD, rx_fifo, FEC_RX_BUFFER_SIZE); + priv->tx_dmatsk = bcom_fec_tx_init(FEC_TX_NUM_BD, tx_fifo); + + if (!priv->rx_dmatsk || !priv->tx_dmatsk) { + pr_err("Can not init SDMA tasks\n"); + rv = -ENOMEM; + goto err_rx_tx_dmatsk; + } + + /* Get the IRQ we need one by one */ + /* Control */ + ndev->irq = irq_of_parse_and_map(np, 0); + + /* RX */ + priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk); + + /* TX */ + priv->t_irq = bcom_get_task_irq(priv->tx_dmatsk); + + /* + * MAC address init: + * + * First try to read MAC address from DT + */ + mac_addr = of_get_mac_address(np); + if (mac_addr) { + memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + } else { + struct mpc52xx_fec __iomem *fec = priv->fec; + + /* + * If the MAC addresse is not provided via DT then read + * it back from the controller regs + */ + *(u32 *)(&ndev->dev_addr[0]) = in_be32(&fec->paddr1); + *(u16 *)(&ndev->dev_addr[4]) = in_be32(&fec->paddr2) >> 16; + } + + /* + * Check if the MAC address is valid, if not get a random one + */ + if (!is_valid_ether_addr(ndev->dev_addr)) { + eth_hw_addr_random(ndev); + dev_warn(&ndev->dev, "using random MAC address %pM\n", + ndev->dev_addr); + } + + priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT); + + /* + * Link mode configuration + */ + + /* Start with safe defaults for link connection */ + priv->speed = 100; + priv->duplex = DUPLEX_HALF; + priv->mdio_speed = ((mpc5xxx_get_bus_frequency(np) >> 20) / 5) << 1; + + /* The current speed preconfigures the speed of the MII link */ + prop = of_get_property(np, "current-speed", &prop_size); + if (prop && (prop_size >= sizeof(u32) * 2)) { + priv->speed = prop[0]; + priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF; + } + + /* If there is a phy handle, then get the PHY node */ + priv->phy_node = of_parse_phandle(np, "phy-handle", 0); + + /* the 7-wire property means don't use MII mode */ + if (of_find_property(np, "fsl,7-wire-mode", NULL)) { + priv->seven_wire_mode = 1; + dev_info(&ndev->dev, "using 7-wire PHY mode\n"); + } + + /* Hardware init */ + mpc52xx_fec_hw_init(ndev); + mpc52xx_fec_reset_stats(ndev); + + rv = register_netdev(ndev); + if (rv < 0) + goto err_node; + + /* We're done ! */ + platform_set_drvdata(op, ndev); + netdev_info(ndev, "%s MAC %pM\n", + op->dev.of_node->full_name, ndev->dev_addr); + + return 0; + +err_node: + of_node_put(priv->phy_node); + irq_dispose_mapping(ndev->irq); +err_rx_tx_dmatsk: + if (priv->rx_dmatsk) + bcom_fec_rx_release(priv->rx_dmatsk); + if (priv->tx_dmatsk) + bcom_fec_tx_release(priv->tx_dmatsk); + iounmap(priv->fec); +err_mem_region: + release_mem_region(mem.start, sizeof(struct mpc52xx_fec)); +err_netdev: + free_netdev(ndev); + + return rv; +} + +static int +mpc52xx_fec_remove(struct platform_device *op) +{ + struct net_device *ndev; + struct mpc52xx_fec_priv *priv; + + ndev = platform_get_drvdata(op); + priv = netdev_priv(ndev); + + unregister_netdev(ndev); + + of_node_put(priv->phy_node); + priv->phy_node = NULL; + + irq_dispose_mapping(ndev->irq); + + bcom_fec_rx_release(priv->rx_dmatsk); + bcom_fec_tx_release(priv->tx_dmatsk); + + iounmap(priv->fec); + + release_mem_region(ndev->base_addr, sizeof(struct mpc52xx_fec)); + + free_netdev(ndev); + + return 0; +} + +#ifdef CONFIG_PM +static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state) +{ + struct net_device *dev = platform_get_drvdata(op); + + if (netif_running(dev)) + mpc52xx_fec_close(dev); + + return 0; +} + +static int mpc52xx_fec_of_resume(struct platform_device *op) +{ + struct net_device *dev = platform_get_drvdata(op); + + mpc52xx_fec_hw_init(dev); + mpc52xx_fec_reset_stats(dev); + + if (netif_running(dev)) + mpc52xx_fec_open(dev); + + return 0; +} +#endif + +static const struct of_device_id mpc52xx_fec_match[] = { + { .compatible = "fsl,mpc5200b-fec", }, + { .compatible = "fsl,mpc5200-fec", }, + { .compatible = "mpc5200-fec", }, + { } +}; + +MODULE_DEVICE_TABLE(of, mpc52xx_fec_match); + +static struct platform_driver mpc52xx_fec_driver = { + .driver = { + .name = DRIVER_NAME, + .of_match_table = mpc52xx_fec_match, + }, + .probe = mpc52xx_fec_probe, + .remove = mpc52xx_fec_remove, +#ifdef CONFIG_PM + .suspend = mpc52xx_fec_of_suspend, + .resume = mpc52xx_fec_of_resume, +#endif +}; + + +/* ======================================================================== */ +/* Module */ +/* ======================================================================== */ + +static int __init +mpc52xx_fec_init(void) +{ +#ifdef CONFIG_FEC_MPC52xx_MDIO + int ret; + ret = platform_driver_register(&mpc52xx_fec_mdio_driver); + if (ret) { + pr_err("failed to register mdio driver\n"); + return ret; + } +#endif + return platform_driver_register(&mpc52xx_fec_driver); +} + +static void __exit +mpc52xx_fec_exit(void) +{ + platform_driver_unregister(&mpc52xx_fec_driver); +#ifdef CONFIG_FEC_MPC52xx_MDIO + platform_driver_unregister(&mpc52xx_fec_mdio_driver); +#endif +} + + +module_init(mpc52xx_fec_init); +module_exit(mpc52xx_fec_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Dale Farnsworth"); +MODULE_DESCRIPTION("Ethernet driver for the Freescale MPC52xx FEC"); diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.h b/drivers/net/ethernet/freescale/fec_mpc52xx.h new file mode 100644 index 000000000..10afa54dd --- /dev/null +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.h @@ -0,0 +1,294 @@ +/* + * drivers/net/ethernet/freescale/fec_mpc52xx.h + * + * Driver for the MPC5200 Fast Ethernet Controller + * + * Author: Dale Farnsworth + * + * 2003-2004 (c) MontaVista, Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + */ + +#ifndef __DRIVERS_NET_MPC52XX_FEC_H__ +#define __DRIVERS_NET_MPC52XX_FEC_H__ + +#include + +/* Tunable constant */ +/* FEC_RX_BUFFER_SIZE includes 4 bytes for CRC32 */ +#define FEC_RX_BUFFER_SIZE 1522 /* max receive packet size */ +#define FEC_RX_NUM_BD 256 +#define FEC_TX_NUM_BD 64 + +#define FEC_RESET_DELAY 50 /* uS */ + +#define FEC_WATCHDOG_TIMEOUT ((400*HZ)/1000) + +/* ======================================================================== */ +/* Hardware register sets & bits */ +/* ======================================================================== */ + +struct mpc52xx_fec { + u32 fec_id; /* FEC + 0x000 */ + u32 ievent; /* FEC + 0x004 */ + u32 imask; /* FEC + 0x008 */ + + u32 reserved0[1]; /* FEC + 0x00C */ + u32 r_des_active; /* FEC + 0x010 */ + u32 x_des_active; /* FEC + 0x014 */ + u32 r_des_active_cl; /* FEC + 0x018 */ + u32 x_des_active_cl; /* FEC + 0x01C */ + u32 ivent_set; /* FEC + 0x020 */ + u32 ecntrl; /* FEC + 0x024 */ + + u32 reserved1[6]; /* FEC + 0x028-03C */ + u32 mii_data; /* FEC + 0x040 */ + u32 mii_speed; /* FEC + 0x044 */ + u32 mii_status; /* FEC + 0x048 */ + + u32 reserved2[5]; /* FEC + 0x04C-05C */ + u32 mib_data; /* FEC + 0x060 */ + u32 mib_control; /* FEC + 0x064 */ + + u32 reserved3[6]; /* FEC + 0x068-7C */ + u32 r_activate; /* FEC + 0x080 */ + u32 r_cntrl; /* FEC + 0x084 */ + u32 r_hash; /* FEC + 0x088 */ + u32 r_data; /* FEC + 0x08C */ + u32 ar_done; /* FEC + 0x090 */ + u32 r_test; /* FEC + 0x094 */ + u32 r_mib; /* FEC + 0x098 */ + u32 r_da_low; /* FEC + 0x09C */ + u32 r_da_high; /* FEC + 0x0A0 */ + + u32 reserved4[7]; /* FEC + 0x0A4-0BC */ + u32 x_activate; /* FEC + 0x0C0 */ + u32 x_cntrl; /* FEC + 0x0C4 */ + u32 backoff; /* FEC + 0x0C8 */ + u32 x_data; /* FEC + 0x0CC */ + u32 x_status; /* FEC + 0x0D0 */ + u32 x_mib; /* FEC + 0x0D4 */ + u32 x_test; /* FEC + 0x0D8 */ + u32 fdxfc_da1; /* FEC + 0x0DC */ + u32 fdxfc_da2; /* FEC + 0x0E0 */ + u32 paddr1; /* FEC + 0x0E4 */ + u32 paddr2; /* FEC + 0x0E8 */ + u32 op_pause; /* FEC + 0x0EC */ + + u32 reserved5[4]; /* FEC + 0x0F0-0FC */ + u32 instr_reg; /* FEC + 0x100 */ + u32 context_reg; /* FEC + 0x104 */ + u32 test_cntrl; /* FEC + 0x108 */ + u32 acc_reg; /* FEC + 0x10C */ + u32 ones; /* FEC + 0x110 */ + u32 zeros; /* FEC + 0x114 */ + u32 iaddr1; /* FEC + 0x118 */ + u32 iaddr2; /* FEC + 0x11C */ + u32 gaddr1; /* FEC + 0x120 */ + u32 gaddr2; /* FEC + 0x124 */ + u32 random; /* FEC + 0x128 */ + u32 rand1; /* FEC + 0x12C */ + u32 tmp; /* FEC + 0x130 */ + + u32 reserved6[3]; /* FEC + 0x134-13C */ + u32 fifo_id; /* FEC + 0x140 */ + u32 x_wmrk; /* FEC + 0x144 */ + u32 fcntrl; /* FEC + 0x148 */ + u32 r_bound; /* FEC + 0x14C */ + u32 r_fstart; /* FEC + 0x150 */ + u32 r_count; /* FEC + 0x154 */ + u32 r_lag; /* FEC + 0x158 */ + u32 r_read; /* FEC + 0x15C */ + u32 r_write; /* FEC + 0x160 */ + u32 x_count; /* FEC + 0x164 */ + u32 x_lag; /* FEC + 0x168 */ + u32 x_retry; /* FEC + 0x16C */ + u32 x_write; /* FEC + 0x170 */ + u32 x_read; /* FEC + 0x174 */ + + u32 reserved7[2]; /* FEC + 0x178-17C */ + u32 fm_cntrl; /* FEC + 0x180 */ + u32 rfifo_data; /* FEC + 0x184 */ + u32 rfifo_status; /* FEC + 0x188 */ + u32 rfifo_cntrl; /* FEC + 0x18C */ + u32 rfifo_lrf_ptr; /* FEC + 0x190 */ + u32 rfifo_lwf_ptr; /* FEC + 0x194 */ + u32 rfifo_alarm; /* FEC + 0x198 */ + u32 rfifo_rdptr; /* FEC + 0x19C */ + u32 rfifo_wrptr; /* FEC + 0x1A0 */ + u32 tfifo_data; /* FEC + 0x1A4 */ + u32 tfifo_status; /* FEC + 0x1A8 */ + u32 tfifo_cntrl; /* FEC + 0x1AC */ + u32 tfifo_lrf_ptr; /* FEC + 0x1B0 */ + u32 tfifo_lwf_ptr; /* FEC + 0x1B4 */ + u32 tfifo_alarm; /* FEC + 0x1B8 */ + u32 tfifo_rdptr; /* FEC + 0x1BC */ + u32 tfifo_wrptr; /* FEC + 0x1C0 */ + + u32 reset_cntrl; /* FEC + 0x1C4 */ + u32 xmit_fsm; /* FEC + 0x1C8 */ + + u32 reserved8[3]; /* FEC + 0x1CC-1D4 */ + u32 rdes_data0; /* FEC + 0x1D8 */ + u32 rdes_data1; /* FEC + 0x1DC */ + u32 r_length; /* FEC + 0x1E0 */ + u32 x_length; /* FEC + 0x1E4 */ + u32 x_addr; /* FEC + 0x1E8 */ + u32 cdes_data; /* FEC + 0x1EC */ + u32 status; /* FEC + 0x1F0 */ + u32 dma_control; /* FEC + 0x1F4 */ + u32 des_cmnd; /* FEC + 0x1F8 */ + u32 data; /* FEC + 0x1FC */ + + u32 rmon_t_drop; /* FEC + 0x200 */ + u32 rmon_t_packets; /* FEC + 0x204 */ + u32 rmon_t_bc_pkt; /* FEC + 0x208 */ + u32 rmon_t_mc_pkt; /* FEC + 0x20C */ + u32 rmon_t_crc_align; /* FEC + 0x210 */ + u32 rmon_t_undersize; /* FEC + 0x214 */ + u32 rmon_t_oversize; /* FEC + 0x218 */ + u32 rmon_t_frag; /* FEC + 0x21C */ + u32 rmon_t_jab; /* FEC + 0x220 */ + u32 rmon_t_col; /* FEC + 0x224 */ + u32 rmon_t_p64; /* FEC + 0x228 */ + u32 rmon_t_p65to127; /* FEC + 0x22C */ + u32 rmon_t_p128to255; /* FEC + 0x230 */ + u32 rmon_t_p256to511; /* FEC + 0x234 */ + u32 rmon_t_p512to1023; /* FEC + 0x238 */ + u32 rmon_t_p1024to2047; /* FEC + 0x23C */ + u32 rmon_t_p_gte2048; /* FEC + 0x240 */ + u32 rmon_t_octets; /* FEC + 0x244 */ + u32 ieee_t_drop; /* FEC + 0x248 */ + u32 ieee_t_frame_ok; /* FEC + 0x24C */ + u32 ieee_t_1col; /* FEC + 0x250 */ + u32 ieee_t_mcol; /* FEC + 0x254 */ + u32 ieee_t_def; /* FEC + 0x258 */ + u32 ieee_t_lcol; /* FEC + 0x25C */ + u32 ieee_t_excol; /* FEC + 0x260 */ + u32 ieee_t_macerr; /* FEC + 0x264 */ + u32 ieee_t_cserr; /* FEC + 0x268 */ + u32 ieee_t_sqe; /* FEC + 0x26C */ + u32 t_fdxfc; /* FEC + 0x270 */ + u32 ieee_t_octets_ok; /* FEC + 0x274 */ + + u32 reserved9[2]; /* FEC + 0x278-27C */ + u32 rmon_r_drop; /* FEC + 0x280 */ + u32 rmon_r_packets; /* FEC + 0x284 */ + u32 rmon_r_bc_pkt; /* FEC + 0x288 */ + u32 rmon_r_mc_pkt; /* FEC + 0x28C */ + u32 rmon_r_crc_align; /* FEC + 0x290 */ + u32 rmon_r_undersize; /* FEC + 0x294 */ + u32 rmon_r_oversize; /* FEC + 0x298 */ + u32 rmon_r_frag; /* FEC + 0x29C */ + u32 rmon_r_jab; /* FEC + 0x2A0 */ + + u32 rmon_r_resvd_0; /* FEC + 0x2A4 */ + + u32 rmon_r_p64; /* FEC + 0x2A8 */ + u32 rmon_r_p65to127; /* FEC + 0x2AC */ + u32 rmon_r_p128to255; /* FEC + 0x2B0 */ + u32 rmon_r_p256to511; /* FEC + 0x2B4 */ + u32 rmon_r_p512to1023; /* FEC + 0x2B8 */ + u32 rmon_r_p1024to2047; /* FEC + 0x2BC */ + u32 rmon_r_p_gte2048; /* FEC + 0x2C0 */ + u32 rmon_r_octets; /* FEC + 0x2C4 */ + u32 ieee_r_drop; /* FEC + 0x2C8 */ + u32 ieee_r_frame_ok; /* FEC + 0x2CC */ + u32 ieee_r_crc; /* FEC + 0x2D0 */ + u32 ieee_r_align; /* FEC + 0x2D4 */ + u32 r_macerr; /* FEC + 0x2D8 */ + u32 r_fdxfc; /* FEC + 0x2DC */ + u32 ieee_r_octets_ok; /* FEC + 0x2E0 */ + + u32 reserved10[7]; /* FEC + 0x2E4-2FC */ + + u32 reserved11[64]; /* FEC + 0x300-3FF */ +}; + +#define FEC_MIB_DISABLE 0x80000000 + +#define FEC_IEVENT_HBERR 0x80000000 +#define FEC_IEVENT_BABR 0x40000000 +#define FEC_IEVENT_BABT 0x20000000 +#define FEC_IEVENT_GRA 0x10000000 +#define FEC_IEVENT_TFINT 0x08000000 +#define FEC_IEVENT_MII 0x00800000 +#define FEC_IEVENT_LATE_COL 0x00200000 +#define FEC_IEVENT_COL_RETRY_LIM 0x00100000 +#define FEC_IEVENT_XFIFO_UN 0x00080000 +#define FEC_IEVENT_XFIFO_ERROR 0x00040000 +#define FEC_IEVENT_RFIFO_ERROR 0x00020000 + +#define FEC_IMASK_HBERR 0x80000000 +#define FEC_IMASK_BABR 0x40000000 +#define FEC_IMASK_BABT 0x20000000 +#define FEC_IMASK_GRA 0x10000000 +#define FEC_IMASK_MII 0x00800000 +#define FEC_IMASK_LATE_COL 0x00200000 +#define FEC_IMASK_COL_RETRY_LIM 0x00100000 +#define FEC_IMASK_XFIFO_UN 0x00080000 +#define FEC_IMASK_XFIFO_ERROR 0x00040000 +#define FEC_IMASK_RFIFO_ERROR 0x00020000 + +/* all but MII, which is enabled separately */ +#define FEC_IMASK_ENABLE (FEC_IMASK_HBERR | FEC_IMASK_BABR | \ + FEC_IMASK_BABT | FEC_IMASK_GRA | FEC_IMASK_LATE_COL | \ + FEC_IMASK_COL_RETRY_LIM | FEC_IMASK_XFIFO_UN | \ + FEC_IMASK_XFIFO_ERROR | FEC_IMASK_RFIFO_ERROR) + +#define FEC_RCNTRL_MAX_FL_SHIFT 16 +#define FEC_RCNTRL_LOOP 0x01 +#define FEC_RCNTRL_DRT 0x02 +#define FEC_RCNTRL_MII_MODE 0x04 +#define FEC_RCNTRL_PROM 0x08 +#define FEC_RCNTRL_BC_REJ 0x10 +#define FEC_RCNTRL_FCE 0x20 + +#define FEC_TCNTRL_GTS 0x00000001 +#define FEC_TCNTRL_HBC 0x00000002 +#define FEC_TCNTRL_FDEN 0x00000004 +#define FEC_TCNTRL_TFC_PAUSE 0x00000008 +#define FEC_TCNTRL_RFC_PAUSE 0x00000010 + +#define FEC_ECNTRL_RESET 0x00000001 +#define FEC_ECNTRL_ETHER_EN 0x00000002 + +#define FEC_MII_DATA_ST 0x40000000 /* Start frame */ +#define FEC_MII_DATA_OP_RD 0x20000000 /* Perform read */ +#define FEC_MII_DATA_OP_WR 0x10000000 /* Perform write */ +#define FEC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address mask */ +#define FEC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register mask */ +#define FEC_MII_DATA_TA 0x00020000 /* Turnaround */ +#define FEC_MII_DATA_DATAMSK 0x0000ffff /* PHY data mask */ + +#define FEC_MII_READ_FRAME (FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA) +#define FEC_MII_WRITE_FRAME (FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR | FEC_MII_DATA_TA) + +#define FEC_MII_DATA_RA_SHIFT 0x12 /* MII reg addr bits */ +#define FEC_MII_DATA_PA_SHIFT 0x17 /* MII PHY addr bits */ + +#define FEC_PADDR2_TYPE 0x8808 + +#define FEC_OP_PAUSE_OPCODE 0x00010000 + +#define FEC_FIFO_WMRK_256B 0x3 + +#define FEC_FIFO_STATUS_ERR 0x00400000 +#define FEC_FIFO_STATUS_UF 0x00200000 +#define FEC_FIFO_STATUS_OF 0x00100000 + +#define FEC_FIFO_CNTRL_FRAME 0x08000000 +#define FEC_FIFO_CNTRL_LTG_7 0x07000000 + +#define FEC_RESET_CNTRL_RESET_FIFO 0x02000000 +#define FEC_RESET_CNTRL_ENABLE_IS_RESET 0x01000000 + +#define FEC_XMIT_FSM_APPEND_CRC 0x02000000 +#define FEC_XMIT_FSM_ENABLE_CRC 0x01000000 + + +extern struct platform_driver mpc52xx_fec_mdio_driver; + +#endif /* __DRIVERS_NET_MPC52XX_FEC_H__ */ diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c new file mode 100644 index 000000000..1e647beaf --- /dev/null +++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c @@ -0,0 +1,158 @@ +/* + * Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver + * + * Copyright (C) 2007 Domen Puncer, Telargo, Inc. + * Copyright (C) 2008 Wolfram Sang, Pengutronix + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "fec_mpc52xx.h" + +struct mpc52xx_fec_mdio_priv { + struct mpc52xx_fec __iomem *regs; + int mdio_irqs[PHY_MAX_ADDR]; +}; + +static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id, + int reg, u32 value) +{ + struct mpc52xx_fec_mdio_priv *priv = bus->priv; + struct mpc52xx_fec __iomem *fec = priv->regs; + int tries = 3; + + value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK; + value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK; + + out_be32(&fec->ievent, FEC_IEVENT_MII); + out_be32(&fec->mii_data, value); + + /* wait for it to finish, this takes about 23 us on lite5200b */ + while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries) + msleep(1); + + if (!tries) + return -ETIMEDOUT; + + return value & FEC_MII_DATA_OP_RD ? + in_be32(&fec->mii_data) & FEC_MII_DATA_DATAMSK : 0; +} + +static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg) +{ + return mpc52xx_fec_mdio_transfer(bus, phy_id, reg, FEC_MII_READ_FRAME); +} + +static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg, + u16 data) +{ + return mpc52xx_fec_mdio_transfer(bus, phy_id, reg, + data | FEC_MII_WRITE_FRAME); +} + +static int mpc52xx_fec_mdio_probe(struct platform_device *of) +{ + struct device *dev = &of->dev; + struct device_node *np = of->dev.of_node; + struct mii_bus *bus; + struct mpc52xx_fec_mdio_priv *priv; + struct resource res; + int err; + + bus = mdiobus_alloc(); + if (bus == NULL) + return -ENOMEM; + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (priv == NULL) { + err = -ENOMEM; + goto out_free; + } + + bus->name = "mpc52xx MII bus"; + bus->read = mpc52xx_fec_mdio_read; + bus->write = mpc52xx_fec_mdio_write; + + /* setup irqs */ + bus->irq = priv->mdio_irqs; + + /* setup registers */ + err = of_address_to_resource(np, 0, &res); + if (err) + goto out_free; + priv->regs = ioremap(res.start, resource_size(&res)); + if (priv->regs == NULL) { + err = -ENOMEM; + goto out_free; + } + + snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); + bus->priv = priv; + + bus->parent = dev; + dev_set_drvdata(dev, bus); + + /* set MII speed */ + out_be32(&priv->regs->mii_speed, + ((mpc5xxx_get_bus_frequency(of->dev.of_node) >> 20) / 5) << 1); + + err = of_mdiobus_register(bus, np); + if (err) + goto out_unmap; + + return 0; + + out_unmap: + iounmap(priv->regs); + out_free: + kfree(priv); + mdiobus_free(bus); + + return err; +} + +static int mpc52xx_fec_mdio_remove(struct platform_device *of) +{ + struct mii_bus *bus = platform_get_drvdata(of); + struct mpc52xx_fec_mdio_priv *priv = bus->priv; + + mdiobus_unregister(bus); + iounmap(priv->regs); + kfree(priv); + mdiobus_free(bus); + + return 0; +} + +static const struct of_device_id mpc52xx_fec_mdio_match[] = { + { .compatible = "fsl,mpc5200b-mdio", }, + { .compatible = "fsl,mpc5200-mdio", }, + { .compatible = "mpc5200b-fec-phy", }, + {} +}; +MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match); + +struct platform_driver mpc52xx_fec_mdio_driver = { + .driver = { + .name = "mpc5200b-fec-phy", + .owner = THIS_MODULE, + .of_match_table = mpc52xx_fec_mdio_match, + }, + .probe = mpc52xx_fec_mdio_probe, + .remove = mpc52xx_fec_mdio_remove, +}; + +/* let fec driver call it, since this has to be registered before it */ +EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver); + +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c new file mode 100644 index 000000000..a583d89b1 --- /dev/null +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -0,0 +1,637 @@ +/* + * Fast Ethernet Controller (ENET) PTP driver for MX6x. + * + * Copyright (C) 2012 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "fec.h" + +/* FEC 1588 register bits */ +#define FEC_T_CTRL_SLAVE 0x00002000 +#define FEC_T_CTRL_CAPTURE 0x00000800 +#define FEC_T_CTRL_RESTART 0x00000200 +#define FEC_T_CTRL_PERIOD_RST 0x00000030 +#define FEC_T_CTRL_PERIOD_EN 0x00000010 +#define FEC_T_CTRL_ENABLE 0x00000001 + +#define FEC_T_INC_MASK 0x0000007f +#define FEC_T_INC_OFFSET 0 +#define FEC_T_INC_CORR_MASK 0x00007f00 +#define FEC_T_INC_CORR_OFFSET 8 + +#define FEC_T_CTRL_PINPER 0x00000080 +#define FEC_T_TF0_MASK 0x00000001 +#define FEC_T_TF0_OFFSET 0 +#define FEC_T_TF1_MASK 0x00000002 +#define FEC_T_TF1_OFFSET 1 +#define FEC_T_TF2_MASK 0x00000004 +#define FEC_T_TF2_OFFSET 2 +#define FEC_T_TF3_MASK 0x00000008 +#define FEC_T_TF3_OFFSET 3 +#define FEC_T_TDRE_MASK 0x00000001 +#define FEC_T_TDRE_OFFSET 0 +#define FEC_T_TMODE_MASK 0x0000003C +#define FEC_T_TMODE_OFFSET 2 +#define FEC_T_TIE_MASK 0x00000040 +#define FEC_T_TIE_OFFSET 6 +#define FEC_T_TF_MASK 0x00000080 +#define FEC_T_TF_OFFSET 7 + +#define FEC_ATIME_CTRL 0x400 +#define FEC_ATIME 0x404 +#define FEC_ATIME_EVT_OFFSET 0x408 +#define FEC_ATIME_EVT_PERIOD 0x40c +#define FEC_ATIME_CORR 0x410 +#define FEC_ATIME_INC 0x414 +#define FEC_TS_TIMESTAMP 0x418 + +#define FEC_TGSR 0x604 +#define FEC_TCSR(n) (0x608 + n * 0x08) +#define FEC_TCCR(n) (0x60C + n * 0x08) +#define MAX_TIMER_CHANNEL 3 +#define FEC_TMODE_TOGGLE 0x05 +#define FEC_HIGH_PULSE 0x0F + +#define FEC_CC_MULT (1 << 31) +#define FEC_COUNTER_PERIOD (1 << 31) +#define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC +#define FEC_CHANNLE_0 0 +#define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0 + +/** + * fec_ptp_enable_pps + * @fep: the fec_enet_private structure handle + * @enable: enable the channel pps output + * + * This function enble the PPS ouput on the timer channel. + */ +static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) +{ + unsigned long flags; + u32 val, tempval; + int inc; + struct timespec ts; + u64 ns; + u32 remainder; + val = 0; + + if (!(fep->hwts_tx_en || fep->hwts_rx_en)) { + dev_err(&fep->pdev->dev, "No ptp stack is running\n"); + return -EINVAL; + } + + if (fep->pps_enable == enable) + return 0; + + fep->pps_channel = DEFAULT_PPS_CHANNEL; + fep->reload_period = PPS_OUPUT_RELOAD_PERIOD; + inc = fep->ptp_inc; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + + if (enable) { + /* clear capture or output compare interrupt status if have. + */ + writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel)); + + /* It is recommended to double check the TMODE field in the + * TCSR register to be cleared before the first compare counter + * is written into TCCR register. Just add a double check. + */ + val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); + do { + val &= ~(FEC_T_TMODE_MASK); + writel(val, fep->hwp + FEC_TCSR(fep->pps_channel)); + val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); + } while (val & FEC_T_TMODE_MASK); + + /* Dummy read counter to update the counter */ + timecounter_read(&fep->tc); + /* We want to find the first compare event in the next + * second point. So we need to know what the ptp time + * is now and how many nanoseconds is ahead to get next second. + * The remaining nanosecond ahead before the next second would be + * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds + * to current timer would be next second. + */ + tempval = readl(fep->hwp + FEC_ATIME_CTRL); + tempval |= FEC_T_CTRL_CAPTURE; + writel(tempval, fep->hwp + FEC_ATIME_CTRL); + + tempval = readl(fep->hwp + FEC_ATIME); + /* Convert the ptp local counter to 1588 timestamp */ + ns = timecounter_cyc2time(&fep->tc, tempval); + ts.tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); + ts.tv_nsec = remainder; + + /* The tempval is less than 3 seconds, and so val is less than + * 4 seconds. No overflow for 32bit calculation. + */ + val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval; + + /* Need to consider the situation that the current time is + * very close to the second point, which means NSEC_PER_SEC + * - ts.tv_nsec is close to be zero(For example 20ns); Since the timer + * is still running when we calculate the first compare event, it is + * possible that the remaining nanoseonds run out before the compare + * counter is calculated and written into TCCR register. To avoid + * this possibility, we will set the compare event to be the next + * of next second. The current setting is 31-bit timer and wrap + * around over 2 seconds. So it is okay to set the next of next + * seond for the timer. + */ + val += NSEC_PER_SEC; + + /* We add (2 * NSEC_PER_SEC - (u32)ts.tv_nsec) to current + * ptp counter, which maybe cause 32-bit wrap. Since the + * (NSEC_PER_SEC - (u32)ts.tv_nsec) is less than 2 second. + * We can ensure the wrap will not cause issue. If the offset + * is bigger than fep->cc.mask would be a error. + */ + val &= fep->cc.mask; + writel(val, fep->hwp + FEC_TCCR(fep->pps_channel)); + + /* Calculate the second the compare event timestamp */ + fep->next_counter = (val + fep->reload_period) & fep->cc.mask; + + /* * Enable compare event when overflow */ + val = readl(fep->hwp + FEC_ATIME_CTRL); + val |= FEC_T_CTRL_PINPER; + writel(val, fep->hwp + FEC_ATIME_CTRL); + + /* Compare channel setting. */ + val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); + val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET); + val &= ~(1 << FEC_T_TDRE_OFFSET); + val &= ~(FEC_T_TMODE_MASK); + val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET); + writel(val, fep->hwp + FEC_TCSR(fep->pps_channel)); + + /* Write the second compare event timestamp and calculate + * the third timestamp. Refer the TCCR register detail in the spec. + */ + writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel)); + fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask; + } else { + writel(0, fep->hwp + FEC_TCSR(fep->pps_channel)); + } + + fep->pps_enable = enable; + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + + return 0; +} + +/** + * fec_ptp_read - read raw cycle counter (to be used by time counter) + * @cc: the cyclecounter structure + * + * this function reads the cyclecounter registers and is called by the + * cyclecounter structure used to construct a ns counter from the + * arbitrary fixed point registers + */ +static cycle_t fec_ptp_read(const struct cyclecounter *cc) +{ + struct fec_enet_private *fep = + container_of(cc, struct fec_enet_private, cc); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + u32 tempval; + + tempval = readl(fep->hwp + FEC_ATIME_CTRL); + tempval |= FEC_T_CTRL_CAPTURE; + writel(tempval, fep->hwp + FEC_ATIME_CTRL); + + if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE) + udelay(1); + + return readl(fep->hwp + FEC_ATIME); +} + +/** + * fec_ptp_start_cyclecounter - create the cycle counter from hw + * @ndev: network device + * + * this function initializes the timecounter and cyclecounter + * structures for use in generated a ns counter from the arbitrary + * fixed point cycles registers in the hardware. + */ +void fec_ptp_start_cyclecounter(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned long flags; + int inc; + + inc = 1000000000 / fep->cycle_speed; + + /* grab the ptp lock */ + spin_lock_irqsave(&fep->tmreg_lock, flags); + + /* 1ns counter */ + writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC); + + /* use 31-bit timer counter */ + writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD); + + writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST, + fep->hwp + FEC_ATIME_CTRL); + + memset(&fep->cc, 0, sizeof(fep->cc)); + fep->cc.read = fec_ptp_read; + fep->cc.mask = CLOCKSOURCE_MASK(31); + fep->cc.shift = 31; + fep->cc.mult = FEC_CC_MULT; + + /* reset the ns time counter */ + timecounter_init(&fep->tc, &fep->cc, ktime_to_ns(ktime_get_real())); + + spin_unlock_irqrestore(&fep->tmreg_lock, flags); +} + +/** + * fec_ptp_adjfreq - adjust ptp cycle frequency + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * Adjust the frequency of the ptp cycle counter by the + * indicated ppb from the base frequency. + * + * Because ENET hardware frequency adjust is complex, + * using software method to do that. + */ +static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + unsigned long flags; + int neg_adj = 0; + u32 i, tmp; + u32 corr_inc, corr_period; + u32 corr_ns; + u64 lhs, rhs; + + struct fec_enet_private *fep = + container_of(ptp, struct fec_enet_private, ptp_caps); + + if (ppb == 0) + return 0; + + if (ppb < 0) { + ppb = -ppb; + neg_adj = 1; + } + + /* In theory, corr_inc/corr_period = ppb/NSEC_PER_SEC; + * Try to find the corr_inc between 1 to fep->ptp_inc to + * meet adjustment requirement. + */ + lhs = NSEC_PER_SEC; + rhs = (u64)ppb * (u64)fep->ptp_inc; + for (i = 1; i <= fep->ptp_inc; i++) { + if (lhs >= rhs) { + corr_inc = i; + corr_period = div_u64(lhs, rhs); + break; + } + lhs += NSEC_PER_SEC; + } + /* Not found? Set it to high value - double speed + * correct in every clock step. + */ + if (i > fep->ptp_inc) { + corr_inc = fep->ptp_inc; + corr_period = 1; + } + + if (neg_adj) + corr_ns = fep->ptp_inc - corr_inc; + else + corr_ns = fep->ptp_inc + corr_inc; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + + tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK; + tmp |= corr_ns << FEC_T_INC_CORR_OFFSET; + writel(tmp, fep->hwp + FEC_ATIME_INC); + writel(corr_period, fep->hwp + FEC_ATIME_CORR); + /* dummy read to update the timer. */ + timecounter_read(&fep->tc); + + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + + return 0; +} + +/** + * fec_ptp_adjtime + * @ptp: the ptp clock structure + * @delta: offset to adjust the cycle counter by + * + * adjust the timer by resetting the timecounter structure. + */ +static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct fec_enet_private *fep = + container_of(ptp, struct fec_enet_private, ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + timecounter_adjtime(&fep->tc, delta); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + + return 0; +} + +/** + * fec_ptp_gettime + * @ptp: the ptp clock structure + * @ts: timespec structure to hold the current time value + * + * read the timecounter and return the correct value on ns, + * after converting it into a struct timespec. + */ +static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct fec_enet_private *adapter = + container_of(ptp, struct fec_enet_private, ptp_caps); + u64 ns; + unsigned long flags; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->tc); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/** + * fec_ptp_settime + * @ptp: the ptp clock structure + * @ts: the timespec containing the new time for the cycle counter + * + * reset the timecounter to use a new base value instead of the kernel + * wall timer value. + */ +static int fec_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct fec_enet_private *fep = + container_of(ptp, struct fec_enet_private, ptp_caps); + + u64 ns; + unsigned long flags; + u32 counter; + + mutex_lock(&fep->ptp_clk_mutex); + /* Check the ptp clock */ + if (!fep->ptp_clk_on) { + mutex_unlock(&fep->ptp_clk_mutex); + return -EINVAL; + } + + ns = timespec64_to_ns(ts); + /* Get the timer value based on timestamp. + * Update the counter with the masked value. + */ + counter = ns & fep->cc.mask; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + writel(counter, fep->hwp + FEC_ATIME); + timecounter_init(&fep->tc, &fep->cc, ns); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); + return 0; +} + +/** + * fec_ptp_enable + * @ptp: the ptp clock structure + * @rq: the requested feature to change + * @on: whether to enable or disable the feature + * + */ +static int fec_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct fec_enet_private *fep = + container_of(ptp, struct fec_enet_private, ptp_caps); + int ret = 0; + + if (rq->type == PTP_CLK_REQ_PPS) { + ret = fec_ptp_enable_pps(fep, on); + + return ret; + } + return -EOPNOTSUPP; +} + +/** + * fec_ptp_hwtstamp_ioctl - control hardware time stamping + * @ndev: pointer to net_device + * @ifreq: ioctl data + * @cmd: particular ioctl requested + */ +int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + struct hwtstamp_config config; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + fep->hwts_tx_en = 0; + break; + case HWTSTAMP_TX_ON: + fep->hwts_tx_en = 1; + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + if (fep->hwts_rx_en) + fep->hwts_rx_en = 0; + config.rx_filter = HWTSTAMP_FILTER_NONE; + break; + + default: + /* + * register RXMTRL must be set in order to do V1 packets, + * therefore it is not possible to time stamp both V1 Sync and + * Delay_Req messages and hardware does not support + * timestamping all packets => return error + */ + fep->hwts_rx_en = 1; + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + } + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct hwtstamp_config config; + + config.flags = 0; + config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + config.rx_filter = (fep->hwts_rx_en ? + HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/** + * fec_time_keep - call timecounter_read every second to avoid timer overrun + * because ENET just support 32bit counter, will timeout in 4s + */ +static void fec_time_keep(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep); + u64 ns; + unsigned long flags; + + mutex_lock(&fep->ptp_clk_mutex); + if (fep->ptp_clk_on) { + spin_lock_irqsave(&fep->tmreg_lock, flags); + ns = timecounter_read(&fep->tc); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + } + mutex_unlock(&fep->ptp_clk_mutex); + + schedule_delayed_work(&fep->time_keep, HZ); +} + +/** + * fec_ptp_init + * @ndev: The FEC network adapter + * + * This function performs the required steps for enabling ptp + * support. If ptp support has already been loaded it simply calls the + * cyclecounter init routine and exits. + */ + +void fec_ptp_init(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct fec_enet_private *fep = netdev_priv(ndev); + + fep->ptp_caps.owner = THIS_MODULE; + snprintf(fep->ptp_caps.name, 16, "fec ptp"); + + fep->ptp_caps.max_adj = 250000000; + fep->ptp_caps.n_alarm = 0; + fep->ptp_caps.n_ext_ts = 0; + fep->ptp_caps.n_per_out = 0; + fep->ptp_caps.n_pins = 0; + fep->ptp_caps.pps = 1; + fep->ptp_caps.adjfreq = fec_ptp_adjfreq; + fep->ptp_caps.adjtime = fec_ptp_adjtime; + fep->ptp_caps.gettime64 = fec_ptp_gettime; + fep->ptp_caps.settime64 = fec_ptp_settime; + fep->ptp_caps.enable = fec_ptp_enable; + + fep->cycle_speed = clk_get_rate(fep->clk_ptp); + fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed; + + spin_lock_init(&fep->tmreg_lock); + + fec_ptp_start_cyclecounter(ndev); + + INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); + + fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev); + if (IS_ERR(fep->ptp_clock)) { + fep->ptp_clock = NULL; + pr_err("ptp_clock_register failed\n"); + } + + schedule_delayed_work(&fep->time_keep, HZ); +} + +/** + * fec_ptp_check_pps_event + * @fep: the fec_enet_private structure handle + * + * This function check the pps event and reload the timer compare counter. + */ +uint fec_ptp_check_pps_event(struct fec_enet_private *fep) +{ + u32 val; + u8 channel = fep->pps_channel; + struct ptp_clock_event event; + + val = readl(fep->hwp + FEC_TCSR(channel)); + if (val & FEC_T_TF_MASK) { + /* Write the next next compare(not the next according the spec) + * value to the register + */ + writel(fep->next_counter, fep->hwp + FEC_TCCR(channel)); + do { + writel(val, fep->hwp + FEC_TCSR(channel)); + } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK); + + /* Update the counter; */ + fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask; + + event.type = PTP_CLOCK_PPS; + ptp_clock_event(fep->ptp_clock, &event); + return 1; + } + + return 0; +} diff --git a/drivers/net/ethernet/freescale/fs_enet/Kconfig b/drivers/net/ethernet/freescale/fs_enet/Kconfig new file mode 100644 index 000000000..be92229f2 --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/Kconfig @@ -0,0 +1,34 @@ +config FS_ENET + tristate "Freescale Ethernet Driver" + depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x) + select MII + select PHYLIB + +config FS_ENET_MPC5121_FEC + def_bool y if (FS_ENET && PPC_MPC512x) + select FS_ENET_HAS_FEC + +config FS_ENET_HAS_SCC + bool "Chip has an SCC usable for ethernet" + depends on FS_ENET && (CPM1 || CPM2) + default y + +config FS_ENET_HAS_FCC + bool "Chip has an FCC usable for ethernet" + depends on FS_ENET && CPM2 + default y + +config FS_ENET_HAS_FEC + bool "Chip has an FEC usable for ethernet" + depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC) + select FS_ENET_MDIO_FEC + default y + +config FS_ENET_MDIO_FEC + tristate "MDIO driver for FEC" + depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC) + +config FS_ENET_MDIO_FCC + tristate "MDIO driver for FCC" + depends on FS_ENET && CPM2 + select MDIO_BITBANG diff --git a/drivers/net/ethernet/freescale/fs_enet/Makefile b/drivers/net/ethernet/freescale/fs_enet/Makefile new file mode 100644 index 000000000..d4a305ee3 --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/Makefile @@ -0,0 +1,14 @@ +# +# Makefile for the Freescale Ethernet controllers +# + +obj-$(CONFIG_FS_ENET) += fs_enet.o + +fs_enet-$(CONFIG_FS_ENET_HAS_SCC) += mac-scc.o +fs_enet-$(CONFIG_FS_ENET_HAS_FEC) += mac-fec.o +fs_enet-$(CONFIG_FS_ENET_HAS_FCC) += mac-fcc.o + +obj-$(CONFIG_FS_ENET_MDIO_FEC) += mii-fec.o +obj-$(CONFIG_FS_ENET_MDIO_FCC) += mii-bitbang.o + +fs_enet-objs := fs_enet-main.o $(fs_enet-m) diff --git a/drivers/net/ethernet/freescale/fs_enet/fec.h b/drivers/net/ethernet/freescale/fs_enet/fec.h new file mode 100644 index 000000000..b9fe5bde4 --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/fec.h @@ -0,0 +1,44 @@ +#ifndef FS_ENET_FEC_H +#define FS_ENET_FEC_H + +/* CRC polynomium used by the FEC for the multicast group filtering */ +#define FEC_CRC_POLY 0x04C11DB7 + +#define FEC_MAX_MULTICAST_ADDRS 64 + +/* Interrupt events/masks. +*/ +#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */ +#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */ +#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */ +#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */ +#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */ +#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */ +#define FEC_ENET_RXF 0x02000000U /* Full frame received */ +#define FEC_ENET_RXB 0x01000000U /* A buffer was received */ +#define FEC_ENET_MII 0x00800000U /* MII interrupt */ +#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */ + +#define FEC_ECNTRL_PINMUX 0x00000004 +#define FEC_ECNTRL_ETHER_EN 0x00000002 +#define FEC_ECNTRL_RESET 0x00000001 + +/* RMII mode enabled only when MII_MODE bit is set too. */ +#define FEC_RCNTRL_RMII_MODE (0x00000100 | \ + FEC_RCNTRL_MII_MODE | FEC_RCNTRL_FCE) +#define FEC_RCNTRL_FCE 0x00000020 +#define FEC_RCNTRL_BC_REJ 0x00000010 +#define FEC_RCNTRL_PROM 0x00000008 +#define FEC_RCNTRL_MII_MODE 0x00000004 +#define FEC_RCNTRL_DRT 0x00000002 +#define FEC_RCNTRL_LOOP 0x00000001 + +#define FEC_TCNTRL_FDEN 0x00000004 +#define FEC_TCNTRL_HBC 0x00000002 +#define FEC_TCNTRL_GTS 0x00000001 + +/* + * Delay to wait for FEC reset command to complete (in us) + */ +#define FEC_RESET_DELAY 50 +#endif diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c new file mode 100644 index 000000000..9b3639eae --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -0,0 +1,1141 @@ +/* + * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. + * + * Copyright (c) 2003 Intracom S.A. + * by Pantelis Antoniou + * + * 2005 (c) MontaVista Software, Inc. + * Vitaly Bordug + * + * Heavily based on original FEC driver by Dan Malek + * and modifications by Joakim Tjernlund + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "fs_enet.h" + +/*************************************************/ + +MODULE_AUTHOR("Pantelis Antoniou "); +MODULE_DESCRIPTION("Freescale Ethernet Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */ +module_param(fs_enet_debug, int, 0); +MODULE_PARM_DESC(fs_enet_debug, + "Freescale bitmapped debugging message enable value"); + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void fs_enet_netpoll(struct net_device *dev); +#endif + +static void fs_set_multicast_list(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + (*fep->ops->set_multicast_list)(dev); +} + +static void skb_align(struct sk_buff *skb, int align) +{ + int off = ((unsigned long)skb->data) & (align - 1); + + if (off) + skb_reserve(skb, align - off); +} + +/* NAPI receive function */ +static int fs_enet_rx_napi(struct napi_struct *napi, int budget) +{ + struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi); + struct net_device *dev = fep->ndev; + const struct fs_platform_info *fpi = fep->fpi; + cbd_t __iomem *bdp; + struct sk_buff *skb, *skbn, *skbt; + int received = 0; + u16 pkt_len, sc; + int curidx; + + if (budget <= 0) + return received; + + /* + * First, grab all of the stats for the incoming packet. + * These get messed up if we get called due to a busy condition. + */ + bdp = fep->cur_rx; + + /* clear RX status bits for napi*/ + (*fep->ops->napi_clear_rx_event)(dev); + + while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { + curidx = bdp - fep->rx_bd_base; + + /* + * Since we have allocated space to hold a complete frame, + * the last indicator should be set. + */ + if ((sc & BD_ENET_RX_LAST) == 0) + dev_warn(fep->dev, "rcv is not +last\n"); + + /* + * Check for errors. + */ + if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | + BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { + fep->stats.rx_errors++; + /* Frame too long or too short. */ + if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) + fep->stats.rx_length_errors++; + /* Frame alignment */ + if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) + fep->stats.rx_frame_errors++; + /* CRC Error */ + if (sc & BD_ENET_RX_CR) + fep->stats.rx_crc_errors++; + /* FIFO overrun */ + if (sc & BD_ENET_RX_OV) + fep->stats.rx_crc_errors++; + + skb = fep->rx_skbuff[curidx]; + + dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE); + + skbn = skb; + + } else { + skb = fep->rx_skbuff[curidx]; + + dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE); + + /* + * Process the incoming frame. + */ + fep->stats.rx_packets++; + pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ + fep->stats.rx_bytes += pkt_len + 4; + + if (pkt_len <= fpi->rx_copybreak) { + /* +2 to make IP header L1 cache aligned */ + skbn = netdev_alloc_skb(dev, pkt_len + 2); + if (skbn != NULL) { + skb_reserve(skbn, 2); /* align IP header */ + skb_copy_from_linear_data(skb, + skbn->data, pkt_len); + /* swap */ + skbt = skb; + skb = skbn; + skbn = skbt; + } + } else { + skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE); + + if (skbn) + skb_align(skbn, ENET_RX_ALIGN); + } + + if (skbn != NULL) { + skb_put(skb, pkt_len); /* Make room */ + skb->protocol = eth_type_trans(skb, dev); + received++; + netif_receive_skb(skb); + } else { + fep->stats.rx_dropped++; + skbn = skb; + } + } + + fep->rx_skbuff[curidx] = skbn; + CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE)); + CBDW_DATLEN(bdp, 0); + CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); + + /* + * Update BD pointer to next entry. + */ + if ((sc & BD_ENET_RX_WRAP) == 0) + bdp++; + else + bdp = fep->rx_bd_base; + + (*fep->ops->rx_bd_done)(dev); + + if (received >= budget) + break; + } + + fep->cur_rx = bdp; + + if (received < budget) { + /* done */ + napi_complete(napi); + (*fep->ops->napi_enable_rx)(dev); + } + return received; +} + +static int fs_enet_tx_napi(struct napi_struct *napi, int budget) +{ + struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, + napi_tx); + struct net_device *dev = fep->ndev; + cbd_t __iomem *bdp; + struct sk_buff *skb; + int dirtyidx, do_wake, do_restart; + u16 sc; + int has_tx_work = 0; + + spin_lock(&fep->tx_lock); + bdp = fep->dirty_tx; + + /* clear TX status bits for napi*/ + (*fep->ops->napi_clear_tx_event)(dev); + + do_wake = do_restart = 0; + while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) { + dirtyidx = bdp - fep->tx_bd_base; + + if (fep->tx_free == fep->tx_ring) + break; + + skb = fep->tx_skbuff[dirtyidx]; + + /* + * Check for errors. + */ + if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | + BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { + + if (sc & BD_ENET_TX_HB) /* No heartbeat */ + fep->stats.tx_heartbeat_errors++; + if (sc & BD_ENET_TX_LC) /* Late collision */ + fep->stats.tx_window_errors++; + if (sc & BD_ENET_TX_RL) /* Retrans limit */ + fep->stats.tx_aborted_errors++; + if (sc & BD_ENET_TX_UN) /* Underrun */ + fep->stats.tx_fifo_errors++; + if (sc & BD_ENET_TX_CSL) /* Carrier lost */ + fep->stats.tx_carrier_errors++; + + if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) { + fep->stats.tx_errors++; + do_restart = 1; + } + } else + fep->stats.tx_packets++; + + if (sc & BD_ENET_TX_READY) { + dev_warn(fep->dev, + "HEY! Enet xmit interrupt and TX_READY.\n"); + } + + /* + * Deferred means some collisions occurred during transmit, + * but we eventually sent the packet OK. + */ + if (sc & BD_ENET_TX_DEF) + fep->stats.collisions++; + + /* unmap */ + if (fep->mapped_as_page[dirtyidx]) + dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp), + CBDR_DATLEN(bdp), DMA_TO_DEVICE); + else + dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), + CBDR_DATLEN(bdp), DMA_TO_DEVICE); + + /* + * Free the sk buffer associated with this last transmit. + */ + if (skb) { + dev_kfree_skb(skb); + fep->tx_skbuff[dirtyidx] = NULL; + } + + /* + * Update pointer to next buffer descriptor to be transmitted. + */ + if ((sc & BD_ENET_TX_WRAP) == 0) + bdp++; + else + bdp = fep->tx_bd_base; + + /* + * Since we have freed up a buffer, the ring is no longer + * full. + */ + if (++fep->tx_free >= MAX_SKB_FRAGS) + do_wake = 1; + has_tx_work = 1; + } + + fep->dirty_tx = bdp; + + if (do_restart) + (*fep->ops->tx_restart)(dev); + + if (!has_tx_work) { + napi_complete(napi); + (*fep->ops->napi_enable_tx)(dev); + } + + spin_unlock(&fep->tx_lock); + + if (do_wake) + netif_wake_queue(dev); + + if (has_tx_work) + return budget; + return 0; +} + +/* + * The interrupt handler. + * This is called from the MPC core interrupt. + */ +static irqreturn_t +fs_enet_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct fs_enet_private *fep; + const struct fs_platform_info *fpi; + u32 int_events; + u32 int_clr_events; + int nr, napi_ok; + int handled; + + fep = netdev_priv(dev); + fpi = fep->fpi; + + nr = 0; + while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) { + nr++; + + int_clr_events = int_events; + int_clr_events &= ~fep->ev_napi_rx; + + (*fep->ops->clear_int_events)(dev, int_clr_events); + + if (int_events & fep->ev_err) + (*fep->ops->ev_error)(dev, int_events); + + if (int_events & fep->ev_rx) { + napi_ok = napi_schedule_prep(&fep->napi); + + (*fep->ops->napi_disable_rx)(dev); + (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx); + + /* NOTE: it is possible for FCCs in NAPI mode */ + /* to submit a spurious interrupt while in poll */ + if (napi_ok) + __napi_schedule(&fep->napi); + } + + if (int_events & fep->ev_tx) { + napi_ok = napi_schedule_prep(&fep->napi_tx); + + (*fep->ops->napi_disable_tx)(dev); + (*fep->ops->clear_int_events)(dev, fep->ev_napi_tx); + + /* NOTE: it is possible for FCCs in NAPI mode */ + /* to submit a spurious interrupt while in poll */ + if (napi_ok) + __napi_schedule(&fep->napi_tx); + } + } + + handled = nr > 0; + return IRQ_RETVAL(handled); +} + +void fs_init_bds(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + cbd_t __iomem *bdp; + struct sk_buff *skb; + int i; + + fs_cleanup_bds(dev); + + fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; + fep->tx_free = fep->tx_ring; + fep->cur_rx = fep->rx_bd_base; + + /* + * Initialize the receive buffer descriptors. + */ + for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { + skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE); + if (skb == NULL) + break; + + skb_align(skb, ENET_RX_ALIGN); + fep->rx_skbuff[i] = skb; + CBDW_BUFADDR(bdp, + dma_map_single(fep->dev, skb->data, + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE)); + CBDW_DATLEN(bdp, 0); /* zero */ + CBDW_SC(bdp, BD_ENET_RX_EMPTY | + ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); + } + /* + * if we failed, fillup remainder + */ + for (; i < fep->rx_ring; i++, bdp++) { + fep->rx_skbuff[i] = NULL; + CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); + } + + /* + * ...and the same for transmit. + */ + for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { + fep->tx_skbuff[i] = NULL; + CBDW_BUFADDR(bdp, 0); + CBDW_DATLEN(bdp, 0); + CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP); + } +} + +void fs_cleanup_bds(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct sk_buff *skb; + cbd_t __iomem *bdp; + int i; + + /* + * Reset SKB transmit buffers. + */ + for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { + if ((skb = fep->tx_skbuff[i]) == NULL) + continue; + + /* unmap */ + dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), + skb->len, DMA_TO_DEVICE); + + fep->tx_skbuff[i] = NULL; + dev_kfree_skb(skb); + } + + /* + * Reset SKB receive buffers + */ + for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { + if ((skb = fep->rx_skbuff[i]) == NULL) + continue; + + /* unmap */ + dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE); + + fep->rx_skbuff[i] = NULL; + + dev_kfree_skb(skb); + } +} + +/**********************************************************************************/ + +#ifdef CONFIG_FS_ENET_MPC5121_FEC +/* + * MPC5121 FEC requeries 4-byte alignment for TX data buffer! + */ +static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, + struct sk_buff *skb) +{ + struct sk_buff *new_skb; + + /* Alloc new skb */ + new_skb = netdev_alloc_skb(dev, skb->len + 4); + if (!new_skb) + return NULL; + + /* Make sure new skb is properly aligned */ + skb_align(new_skb, 4); + + /* Copy data to new skb ... */ + skb_copy_from_linear_data(skb, new_skb->data, skb->len); + skb_put(new_skb, skb->len); + + /* ... and free an old one */ + dev_kfree_skb_any(skb); + + return new_skb; +} +#endif + +static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + cbd_t __iomem *bdp; + int curidx; + u16 sc; + int nr_frags = skb_shinfo(skb)->nr_frags; + skb_frag_t *frag; + int len; + +#ifdef CONFIG_FS_ENET_MPC5121_FEC + if (((unsigned long)skb->data) & 0x3) { + skb = tx_skb_align_workaround(dev, skb); + if (!skb) { + /* + * We have lost packet due to memory allocation error + * in tx_skb_align_workaround(). Hopefully original + * skb is still valid, so try transmit it later. + */ + return NETDEV_TX_BUSY; + } + } +#endif + spin_lock(&fep->tx_lock); + + /* + * Fill in a Tx ring entry + */ + bdp = fep->cur_tx; + + if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { + netif_stop_queue(dev); + spin_unlock(&fep->tx_lock); + + /* + * Ooops. All transmit buffers are full. Bail out. + * This should not happen, since the tx queue should be stopped. + */ + dev_warn(fep->dev, "tx queue full!.\n"); + return NETDEV_TX_BUSY; + } + + curidx = bdp - fep->tx_bd_base; + + len = skb->len; + fep->stats.tx_bytes += len; + if (nr_frags) + len -= skb->data_len; + fep->tx_free -= nr_frags + 1; + /* + * Push the data cache so the CPM does not get stale memory data. + */ + CBDW_BUFADDR(bdp, dma_map_single(fep->dev, + skb->data, len, DMA_TO_DEVICE)); + CBDW_DATLEN(bdp, len); + + fep->mapped_as_page[curidx] = 0; + frag = skb_shinfo(skb)->frags; + while (nr_frags) { + CBDC_SC(bdp, + BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC); + CBDS_SC(bdp, BD_ENET_TX_READY); + + if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) + bdp++, curidx++; + else + bdp = fep->tx_bd_base, curidx = 0; + + len = skb_frag_size(frag); + CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len, + DMA_TO_DEVICE)); + CBDW_DATLEN(bdp, len); + + fep->tx_skbuff[curidx] = NULL; + fep->mapped_as_page[curidx] = 1; + + frag++; + nr_frags--; + } + + /* Trigger transmission start */ + sc = BD_ENET_TX_READY | BD_ENET_TX_INTR | + BD_ENET_TX_LAST | BD_ENET_TX_TC; + + /* note that while FEC does not have this bit + * it marks it as available for software use + * yay for hw reuse :) */ + if (skb->len <= 60) + sc |= BD_ENET_TX_PAD; + CBDC_SC(bdp, BD_ENET_TX_STATS); + CBDS_SC(bdp, sc); + + /* Save skb pointer. */ + fep->tx_skbuff[curidx] = skb; + + /* If this was the last BD in the ring, start at the beginning again. */ + if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) + bdp++; + else + bdp = fep->tx_bd_base; + fep->cur_tx = bdp; + + if (fep->tx_free < MAX_SKB_FRAGS) + netif_stop_queue(dev); + + skb_tx_timestamp(skb); + + (*fep->ops->tx_kickstart)(dev); + + spin_unlock(&fep->tx_lock); + + return NETDEV_TX_OK; +} + +static void fs_timeout(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + unsigned long flags; + int wake = 0; + + fep->stats.tx_errors++; + + spin_lock_irqsave(&fep->lock, flags); + + if (dev->flags & IFF_UP) { + phy_stop(fep->phydev); + (*fep->ops->stop)(dev); + (*fep->ops->restart)(dev); + phy_start(fep->phydev); + } + + phy_start(fep->phydev); + wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); + spin_unlock_irqrestore(&fep->lock, flags); + + if (wake) + netif_wake_queue(dev); +} + +/*----------------------------------------------------------------------------- + * generic link-change handler - should be sufficient for most cases + *-----------------------------------------------------------------------------*/ +static void generic_adjust_link(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev = fep->phydev; + int new_state = 0; + + if (phydev->link) { + /* adjust to duplex mode */ + if (phydev->duplex != fep->oldduplex) { + new_state = 1; + fep->oldduplex = phydev->duplex; + } + + if (phydev->speed != fep->oldspeed) { + new_state = 1; + fep->oldspeed = phydev->speed; + } + + if (!fep->oldlink) { + new_state = 1; + fep->oldlink = 1; + } + + if (new_state) + fep->ops->restart(dev); + } else if (fep->oldlink) { + new_state = 1; + fep->oldlink = 0; + fep->oldspeed = 0; + fep->oldduplex = -1; + } + + if (new_state && netif_msg_link(fep)) + phy_print_status(phydev); +} + + +static void fs_adjust_link(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&fep->lock, flags); + + if(fep->ops->adjust_link) + fep->ops->adjust_link(dev); + else + generic_adjust_link(dev); + + spin_unlock_irqrestore(&fep->lock, flags); +} + +static int fs_init_phy(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev; + phy_interface_t iface; + + fep->oldlink = 0; + fep->oldspeed = 0; + fep->oldduplex = -1; + + iface = fep->fpi->use_rmii ? + PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII; + + phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0, + iface); + if (!phydev) { + dev_err(&dev->dev, "Could not attach to PHY\n"); + return -ENODEV; + } + + fep->phydev = phydev; + + return 0; +} + +static int fs_enet_open(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + int r; + int err; + + /* to initialize the fep->cur_rx,... */ + /* not doing this, will cause a crash in fs_enet_rx_napi */ + fs_init_bds(fep->ndev); + + napi_enable(&fep->napi); + napi_enable(&fep->napi_tx); + + /* Install our interrupt handler. */ + r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED, + "fs_enet-mac", dev); + if (r != 0) { + dev_err(fep->dev, "Could not allocate FS_ENET IRQ!"); + napi_disable(&fep->napi); + napi_disable(&fep->napi_tx); + return -EINVAL; + } + + err = fs_init_phy(dev); + if (err) { + free_irq(fep->interrupt, dev); + napi_disable(&fep->napi); + napi_disable(&fep->napi_tx); + return err; + } + phy_start(fep->phydev); + + netif_start_queue(dev); + + return 0; +} + +static int fs_enet_close(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + unsigned long flags; + + netif_stop_queue(dev); + netif_carrier_off(dev); + napi_disable(&fep->napi); + napi_disable(&fep->napi_tx); + phy_stop(fep->phydev); + + spin_lock_irqsave(&fep->lock, flags); + spin_lock(&fep->tx_lock); + (*fep->ops->stop)(dev); + spin_unlock(&fep->tx_lock); + spin_unlock_irqrestore(&fep->lock, flags); + + /* release any irqs */ + phy_disconnect(fep->phydev); + fep->phydev = NULL; + free_irq(fep->interrupt, dev); + + return 0; +} + +static struct net_device_stats *fs_enet_get_stats(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + return &fep->stats; +} + +/*************************************************************************/ + +static void fs_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); +} + +static int fs_get_regs_len(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + return (*fep->ops->get_regs_len)(dev); +} + +static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct fs_enet_private *fep = netdev_priv(dev); + unsigned long flags; + int r, len; + + len = regs->len; + + spin_lock_irqsave(&fep->lock, flags); + r = (*fep->ops->get_regs)(dev, p, &len); + spin_unlock_irqrestore(&fep->lock, flags); + + if (r == 0) + regs->version = 0; +} + +static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + if (!fep->phydev) + return -ENODEV; + + return phy_ethtool_gset(fep->phydev, cmd); +} + +static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + if (!fep->phydev) + return -ENODEV; + + return phy_ethtool_sset(fep->phydev, cmd); +} + +static int fs_nway_reset(struct net_device *dev) +{ + return 0; +} + +static u32 fs_get_msglevel(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + return fep->msg_enable; +} + +static void fs_set_msglevel(struct net_device *dev, u32 value) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fep->msg_enable = value; +} + +static const struct ethtool_ops fs_ethtool_ops = { + .get_drvinfo = fs_get_drvinfo, + .get_regs_len = fs_get_regs_len, + .get_settings = fs_get_settings, + .set_settings = fs_set_settings, + .nway_reset = fs_nway_reset, + .get_link = ethtool_op_get_link, + .get_msglevel = fs_get_msglevel, + .set_msglevel = fs_set_msglevel, + .get_regs = fs_get_regs, + .get_ts_info = ethtool_op_get_ts_info, +}; + +static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + return phy_mii_ioctl(fep->phydev, rq, cmd); +} + +extern int fs_mii_connect(struct net_device *dev); +extern void fs_mii_disconnect(struct net_device *dev); + +/**************************************************************************************/ + +#ifdef CONFIG_FS_ENET_HAS_FEC +#define IS_FEC(match) ((match)->data == &fs_fec_ops) +#else +#define IS_FEC(match) 0 +#endif + +static const struct net_device_ops fs_enet_netdev_ops = { + .ndo_open = fs_enet_open, + .ndo_stop = fs_enet_close, + .ndo_get_stats = fs_enet_get_stats, + .ndo_start_xmit = fs_enet_start_xmit, + .ndo_tx_timeout = fs_timeout, + .ndo_set_rx_mode = fs_set_multicast_list, + .ndo_do_ioctl = fs_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = fs_enet_netpoll, +#endif +}; + +static const struct of_device_id fs_enet_match[]; +static int fs_enet_probe(struct platform_device *ofdev) +{ + const struct of_device_id *match; + struct net_device *ndev; + struct fs_enet_private *fep; + struct fs_platform_info *fpi; + const u32 *data; + struct clk *clk; + int err; + const u8 *mac_addr; + const char *phy_connection_type; + int privsize, len, ret = -ENODEV; + + match = of_match_device(fs_enet_match, &ofdev->dev); + if (!match) + return -EINVAL; + + fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); + if (!fpi) + return -ENOMEM; + + if (!IS_FEC(match)) { + data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); + if (!data || len != 4) + goto out_free_fpi; + + fpi->cp_command = *data; + } + + fpi->rx_ring = 32; + fpi->tx_ring = 64; + fpi->rx_copybreak = 240; + fpi->napi_weight = 17; + fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); + if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) { + err = of_phy_register_fixed_link(ofdev->dev.of_node); + if (err) + goto out_free_fpi; + + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + fpi->phy_node = of_node_get(ofdev->dev.of_node); + } + + if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) { + phy_connection_type = of_get_property(ofdev->dev.of_node, + "phy-connection-type", NULL); + if (phy_connection_type && !strcmp("rmii", phy_connection_type)) + fpi->use_rmii = 1; + } + + /* make clock lookup non-fatal (the driver is shared among platforms), + * but require enable to succeed when a clock was specified/found, + * keep a reference to the clock upon successful acquisition + */ + clk = devm_clk_get(&ofdev->dev, "per"); + if (!IS_ERR(clk)) { + err = clk_prepare_enable(clk); + if (err) { + ret = err; + goto out_free_fpi; + } + fpi->clk_per = clk; + } + + privsize = sizeof(*fep) + + sizeof(struct sk_buff **) * + (fpi->rx_ring + fpi->tx_ring) + + sizeof(char) * fpi->tx_ring; + + ndev = alloc_etherdev(privsize); + if (!ndev) { + ret = -ENOMEM; + goto out_put; + } + + SET_NETDEV_DEV(ndev, &ofdev->dev); + platform_set_drvdata(ofdev, ndev); + + fep = netdev_priv(ndev); + fep->dev = &ofdev->dev; + fep->ndev = ndev; + fep->fpi = fpi; + fep->ops = match->data; + + ret = fep->ops->setup_data(ndev); + if (ret) + goto out_free_dev; + + fep->rx_skbuff = (struct sk_buff **)&fep[1]; + fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; + fep->mapped_as_page = (char *)(fep->rx_skbuff + fpi->rx_ring + + fpi->tx_ring); + + spin_lock_init(&fep->lock); + spin_lock_init(&fep->tx_lock); + + mac_addr = of_get_mac_address(ofdev->dev.of_node); + if (mac_addr) + memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + + ret = fep->ops->allocate_bd(ndev); + if (ret) + goto out_cleanup_data; + + fep->rx_bd_base = fep->ring_base; + fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring; + + fep->tx_ring = fpi->tx_ring; + fep->rx_ring = fpi->rx_ring; + + ndev->netdev_ops = &fs_enet_netdev_ops; + ndev->watchdog_timeo = 2 * HZ; + netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight); + netif_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2); + + ndev->ethtool_ops = &fs_ethtool_ops; + + init_timer(&fep->phy_timer_list); + + netif_carrier_off(ndev); + + ndev->features |= NETIF_F_SG; + + ret = register_netdev(ndev); + if (ret) + goto out_free_bd; + + pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr); + + return 0; + +out_free_bd: + fep->ops->free_bd(ndev); +out_cleanup_data: + fep->ops->cleanup_data(ndev); +out_free_dev: + free_netdev(ndev); +out_put: + of_node_put(fpi->phy_node); + if (fpi->clk_per) + clk_disable_unprepare(fpi->clk_per); +out_free_fpi: + kfree(fpi); + return ret; +} + +static int fs_enet_remove(struct platform_device *ofdev) +{ + struct net_device *ndev = platform_get_drvdata(ofdev); + struct fs_enet_private *fep = netdev_priv(ndev); + + unregister_netdev(ndev); + + fep->ops->free_bd(ndev); + fep->ops->cleanup_data(ndev); + dev_set_drvdata(fep->dev, NULL); + of_node_put(fep->fpi->phy_node); + if (fep->fpi->clk_per) + clk_disable_unprepare(fep->fpi->clk_per); + free_netdev(ndev); + return 0; +} + +static const struct of_device_id fs_enet_match[] = { +#ifdef CONFIG_FS_ENET_HAS_SCC + { + .compatible = "fsl,cpm1-scc-enet", + .data = (void *)&fs_scc_ops, + }, + { + .compatible = "fsl,cpm2-scc-enet", + .data = (void *)&fs_scc_ops, + }, +#endif +#ifdef CONFIG_FS_ENET_HAS_FCC + { + .compatible = "fsl,cpm2-fcc-enet", + .data = (void *)&fs_fcc_ops, + }, +#endif +#ifdef CONFIG_FS_ENET_HAS_FEC +#ifdef CONFIG_FS_ENET_MPC5121_FEC + { + .compatible = "fsl,mpc5121-fec", + .data = (void *)&fs_fec_ops, + }, + { + .compatible = "fsl,mpc5125-fec", + .data = (void *)&fs_fec_ops, + }, +#else + { + .compatible = "fsl,pq1-fec-enet", + .data = (void *)&fs_fec_ops, + }, +#endif +#endif + {} +}; +MODULE_DEVICE_TABLE(of, fs_enet_match); + +static struct platform_driver fs_enet_driver = { + .driver = { + .name = "fs_enet", + .of_match_table = fs_enet_match, + }, + .probe = fs_enet_probe, + .remove = fs_enet_remove, +}; + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void fs_enet_netpoll(struct net_device *dev) +{ + disable_irq(dev->irq); + fs_enet_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +module_platform_driver(fs_enet_driver); diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h new file mode 100644 index 000000000..f184d8f95 --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h @@ -0,0 +1,250 @@ +#ifndef FS_ENET_H +#define FS_ENET_H + +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifdef CONFIG_CPM1 +#include +#endif + +#if defined(CONFIG_FS_ENET_HAS_FEC) +#include + +#if defined(CONFIG_FS_ENET_MPC5121_FEC) +/* MPC5121 FEC has different register layout */ +struct fec { + u32 fec_reserved0; + u32 fec_ievent; /* Interrupt event reg */ + u32 fec_imask; /* Interrupt mask reg */ + u32 fec_reserved1; + u32 fec_r_des_active; /* Receive descriptor reg */ + u32 fec_x_des_active; /* Transmit descriptor reg */ + u32 fec_reserved2[3]; + u32 fec_ecntrl; /* Ethernet control reg */ + u32 fec_reserved3[6]; + u32 fec_mii_data; /* MII manage frame reg */ + u32 fec_mii_speed; /* MII speed control reg */ + u32 fec_reserved4[7]; + u32 fec_mib_ctrlstat; /* MIB control/status reg */ + u32 fec_reserved5[7]; + u32 fec_r_cntrl; /* Receive control reg */ + u32 fec_reserved6[15]; + u32 fec_x_cntrl; /* Transmit Control reg */ + u32 fec_reserved7[7]; + u32 fec_addr_low; /* Low 32bits MAC address */ + u32 fec_addr_high; /* High 16bits MAC address */ + u32 fec_opd; /* Opcode + Pause duration */ + u32 fec_reserved8[10]; + u32 fec_hash_table_high; /* High 32bits hash table */ + u32 fec_hash_table_low; /* Low 32bits hash table */ + u32 fec_grp_hash_table_high; /* High 32bits hash table */ + u32 fec_grp_hash_table_low; /* Low 32bits hash table */ + u32 fec_reserved9[7]; + u32 fec_x_wmrk; /* FIFO transmit water mark */ + u32 fec_reserved10; + u32 fec_r_bound; /* FIFO receive bound reg */ + u32 fec_r_fstart; /* FIFO receive start reg */ + u32 fec_reserved11[11]; + u32 fec_r_des_start; /* Receive descriptor ring */ + u32 fec_x_des_start; /* Transmit descriptor ring */ + u32 fec_r_buff_size; /* Maximum receive buff size */ + u32 fec_reserved12[26]; + u32 fec_dma_control; /* DMA Endian and other ctrl */ +}; +#endif + +struct fec_info { + struct fec __iomem *fecp; + u32 mii_speed; +}; +#endif + +#ifdef CONFIG_CPM2 +#include +#endif + +/* hw driver ops */ +struct fs_ops { + int (*setup_data)(struct net_device *dev); + int (*allocate_bd)(struct net_device *dev); + void (*free_bd)(struct net_device *dev); + void (*cleanup_data)(struct net_device *dev); + void (*set_multicast_list)(struct net_device *dev); + void (*adjust_link)(struct net_device *dev); + void (*restart)(struct net_device *dev); + void (*stop)(struct net_device *dev); + void (*napi_clear_rx_event)(struct net_device *dev); + void (*napi_enable_rx)(struct net_device *dev); + void (*napi_disable_rx)(struct net_device *dev); + void (*napi_clear_tx_event)(struct net_device *dev); + void (*napi_enable_tx)(struct net_device *dev); + void (*napi_disable_tx)(struct net_device *dev); + void (*rx_bd_done)(struct net_device *dev); + void (*tx_kickstart)(struct net_device *dev); + u32 (*get_int_events)(struct net_device *dev); + void (*clear_int_events)(struct net_device *dev, u32 int_events); + void (*ev_error)(struct net_device *dev, u32 int_events); + int (*get_regs)(struct net_device *dev, void *p, int *sizep); + int (*get_regs_len)(struct net_device *dev); + void (*tx_restart)(struct net_device *dev); +}; + +struct phy_info { + unsigned int id; + const char *name; + void (*startup) (struct net_device * dev); + void (*shutdown) (struct net_device * dev); + void (*ack_int) (struct net_device * dev); +}; + +/* The FEC stores dest/src/type, data, and checksum for receive packets. + */ +#define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */ +#define MIN_MTU 46 /* this is data size */ +#define CRC_LEN 4 + +#define PKT_MAXBUF_SIZE (MAX_MTU+ETH_HLEN+CRC_LEN) +#define PKT_MINBUF_SIZE (MIN_MTU+ETH_HLEN+CRC_LEN) + +/* Must be a multiple of 32 (to cover both FEC & FCC) */ +#define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE + 31) & ~31) +/* This is needed so that invalidate_xxx wont invalidate too much */ +#define ENET_RX_ALIGN 16 +#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE + ENET_RX_ALIGN - 1) + +struct fs_enet_private { + struct napi_struct napi; + struct napi_struct napi_tx; + struct device *dev; /* pointer back to the device (must be initialized first) */ + struct net_device *ndev; + spinlock_t lock; /* during all ops except TX pckt processing */ + spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */ + struct fs_platform_info *fpi; + const struct fs_ops *ops; + int rx_ring, tx_ring; + dma_addr_t ring_mem_addr; + void __iomem *ring_base; + struct sk_buff **rx_skbuff; + struct sk_buff **tx_skbuff; + char *mapped_as_page; + cbd_t __iomem *rx_bd_base; /* Address of Rx and Tx buffers. */ + cbd_t __iomem *tx_bd_base; + cbd_t __iomem *dirty_tx; /* ring entries to be free()ed. */ + cbd_t __iomem *cur_rx; + cbd_t __iomem *cur_tx; + int tx_free; + struct net_device_stats stats; + struct timer_list phy_timer_list; + const struct phy_info *phy; + u32 msg_enable; + struct mii_if_info mii_if; + unsigned int last_mii_status; + int interrupt; + + struct phy_device *phydev; + int oldduplex, oldspeed, oldlink; /* current settings */ + + /* event masks */ + u32 ev_napi_rx; /* mask of NAPI rx events */ + u32 ev_napi_tx; /* mask of NAPI rx events */ + u32 ev_rx; /* rx event mask */ + u32 ev_tx; /* tx event mask */ + u32 ev_err; /* error event mask */ + + u16 bd_rx_empty; /* mask of BD rx empty */ + u16 bd_rx_err; /* mask of BD rx errors */ + + union { + struct { + int idx; /* FEC1 = 0, FEC2 = 1 */ + void __iomem *fecp; /* hw registers */ + u32 hthi, htlo; /* state for multicast */ + } fec; + + struct { + int idx; /* FCC1-3 = 0-2 */ + void __iomem *fccp; /* hw registers */ + void __iomem *ep; /* parameter ram */ + void __iomem *fcccp; /* hw registers cont. */ + void __iomem *mem; /* FCC DPRAM */ + u32 gaddrh, gaddrl; /* group address */ + } fcc; + + struct { + int idx; /* FEC1 = 0, FEC2 = 1 */ + void __iomem *sccp; /* hw registers */ + void __iomem *ep; /* parameter ram */ + u32 hthi, htlo; /* state for multicast */ + } scc; + + }; +}; + +/***************************************************************************/ + +void fs_init_bds(struct net_device *dev); +void fs_cleanup_bds(struct net_device *dev); + +/***************************************************************************/ + +#define DRV_MODULE_NAME "fs_enet" +#define PFX DRV_MODULE_NAME ": " +#define DRV_MODULE_VERSION "1.1" +#define DRV_MODULE_RELDATE "Sep 22, 2014" + +/***************************************************************************/ + +int fs_enet_platform_init(void); +void fs_enet_platform_cleanup(void); + +/***************************************************************************/ +/* buffer descriptor access macros */ + +/* access macros */ +#if defined(CONFIG_CPM1) +/* for a a CPM1 __raw_xxx's are sufficient */ +#define __cbd_out32(addr, x) __raw_writel(x, addr) +#define __cbd_out16(addr, x) __raw_writew(x, addr) +#define __cbd_in32(addr) __raw_readl(addr) +#define __cbd_in16(addr) __raw_readw(addr) +#else +/* for others play it safe */ +#define __cbd_out32(addr, x) out_be32(addr, x) +#define __cbd_out16(addr, x) out_be16(addr, x) +#define __cbd_in32(addr) in_be32(addr) +#define __cbd_in16(addr) in_be16(addr) +#endif + +/* write */ +#define CBDW_SC(_cbd, _sc) __cbd_out16(&(_cbd)->cbd_sc, (_sc)) +#define CBDW_DATLEN(_cbd, _datlen) __cbd_out16(&(_cbd)->cbd_datlen, (_datlen)) +#define CBDW_BUFADDR(_cbd, _bufaddr) __cbd_out32(&(_cbd)->cbd_bufaddr, (_bufaddr)) + +/* read */ +#define CBDR_SC(_cbd) __cbd_in16(&(_cbd)->cbd_sc) +#define CBDR_DATLEN(_cbd) __cbd_in16(&(_cbd)->cbd_datlen) +#define CBDR_BUFADDR(_cbd) __cbd_in32(&(_cbd)->cbd_bufaddr) + +/* set bits */ +#define CBDS_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) | (_sc)) + +/* clear bits */ +#define CBDC_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) & ~(_sc)) + +/*******************************************************************/ + +extern const struct fs_ops fs_fec_ops; +extern const struct fs_ops fs_fcc_ops; +extern const struct fs_ops fs_scc_ops; + +/*******************************************************************/ + +#endif diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c new file mode 100644 index 000000000..08f5b911d --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c @@ -0,0 +1,614 @@ +/* + * FCC driver for Motorola MPC82xx (PQ2). + * + * Copyright (c) 2003 Intracom S.A. + * by Pantelis Antoniou + * + * 2005 (c) MontaVista Software, Inc. + * Vitaly Bordug + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include "fs_enet.h" + +/*************************************************/ + +/* FCC access macros */ + +/* write, read, set bits, clear bits */ +#define W32(_p, _m, _v) out_be32(&(_p)->_m, (_v)) +#define R32(_p, _m) in_be32(&(_p)->_m) +#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v)) +#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v)) + +#define W16(_p, _m, _v) out_be16(&(_p)->_m, (_v)) +#define R16(_p, _m) in_be16(&(_p)->_m) +#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v)) +#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v)) + +#define W8(_p, _m, _v) out_8(&(_p)->_m, (_v)) +#define R8(_p, _m) in_8(&(_p)->_m) +#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v)) +#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v)) + +/*************************************************/ + +#define FCC_MAX_MULTICAST_ADDRS 64 + +#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) +#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff)) +#define mk_mii_end 0 + +#define MAX_CR_CMD_LOOPS 10000 + +static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 op) +{ + const struct fs_platform_info *fpi = fep->fpi; + + return cpm_command(fpi->cp_command, op); +} + +static int do_pd_setup(struct fs_enet_private *fep) +{ + struct platform_device *ofdev = to_platform_device(fep->dev); + struct fs_platform_info *fpi = fep->fpi; + int ret = -EINVAL; + + fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0); + if (fep->interrupt == NO_IRQ) + goto out; + + fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0); + if (!fep->fcc.fccp) + goto out; + + fep->fcc.ep = of_iomap(ofdev->dev.of_node, 1); + if (!fep->fcc.ep) + goto out_fccp; + + fep->fcc.fcccp = of_iomap(ofdev->dev.of_node, 2); + if (!fep->fcc.fcccp) + goto out_ep; + + fep->fcc.mem = (void __iomem *)cpm2_immr; + fpi->dpram_offset = cpm_dpalloc(128, 32); + if (IS_ERR_VALUE(fpi->dpram_offset)) { + ret = fpi->dpram_offset; + goto out_fcccp; + } + + return 0; + +out_fcccp: + iounmap(fep->fcc.fcccp); +out_ep: + iounmap(fep->fcc.ep); +out_fccp: + iounmap(fep->fcc.fccp); +out: + return ret; +} + +#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB) +#define FCC_NAPI_TX_EVENT_MSK (FCC_ENET_TXB) +#define FCC_RX_EVENT (FCC_ENET_RXF) +#define FCC_TX_EVENT (FCC_ENET_TXB) +#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE) + +static int setup_data(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + if (do_pd_setup(fep) != 0) + return -EINVAL; + + fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK; + fep->ev_napi_tx = FCC_NAPI_TX_EVENT_MSK; + fep->ev_rx = FCC_RX_EVENT; + fep->ev_tx = FCC_TX_EVENT; + fep->ev_err = FCC_ERR_EVENT_MSK; + + return 0; +} + +static int allocate_bd(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + const struct fs_platform_info *fpi = fep->fpi; + + fep->ring_base = (void __iomem __force *)dma_alloc_coherent(fep->dev, + (fpi->tx_ring + fpi->rx_ring) * + sizeof(cbd_t), &fep->ring_mem_addr, + GFP_KERNEL); + if (fep->ring_base == NULL) + return -ENOMEM; + + return 0; +} + +static void free_bd(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + const struct fs_platform_info *fpi = fep->fpi; + + if (fep->ring_base) + dma_free_coherent(fep->dev, + (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), + (void __force *)fep->ring_base, fep->ring_mem_addr); +} + +static void cleanup_data(struct net_device *dev) +{ + /* nothing */ +} + +static void set_promiscuous_mode(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + S32(fccp, fcc_fpsmr, FCC_PSMR_PRO); +} + +static void set_multicast_start(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_enet_t __iomem *ep = fep->fcc.ep; + + W32(ep, fen_gaddrh, 0); + W32(ep, fen_gaddrl, 0); +} + +static void set_multicast_one(struct net_device *dev, const u8 *mac) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_enet_t __iomem *ep = fep->fcc.ep; + u16 taddrh, taddrm, taddrl; + + taddrh = ((u16)mac[5] << 8) | mac[4]; + taddrm = ((u16)mac[3] << 8) | mac[2]; + taddrl = ((u16)mac[1] << 8) | mac[0]; + + W16(ep, fen_taddrh, taddrh); + W16(ep, fen_taddrm, taddrm); + W16(ep, fen_taddrl, taddrl); + fcc_cr_cmd(fep, CPM_CR_SET_GADDR); +} + +static void set_multicast_finish(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + fcc_enet_t __iomem *ep = fep->fcc.ep; + + /* clear promiscuous always */ + C32(fccp, fcc_fpsmr, FCC_PSMR_PRO); + + /* if all multi or too many multicasts; just enable all */ + if ((dev->flags & IFF_ALLMULTI) != 0 || + netdev_mc_count(dev) > FCC_MAX_MULTICAST_ADDRS) { + + W32(ep, fen_gaddrh, 0xffffffff); + W32(ep, fen_gaddrl, 0xffffffff); + } + + /* read back */ + fep->fcc.gaddrh = R32(ep, fen_gaddrh); + fep->fcc.gaddrl = R32(ep, fen_gaddrl); +} + +static void set_multicast_list(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + + if ((dev->flags & IFF_PROMISC) == 0) { + set_multicast_start(dev); + netdev_for_each_mc_addr(ha, dev) + set_multicast_one(dev, ha->addr); + set_multicast_finish(dev); + } else + set_promiscuous_mode(dev); +} + +static void restart(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + const struct fs_platform_info *fpi = fep->fpi; + fcc_t __iomem *fccp = fep->fcc.fccp; + fcc_c_t __iomem *fcccp = fep->fcc.fcccp; + fcc_enet_t __iomem *ep = fep->fcc.ep; + dma_addr_t rx_bd_base_phys, tx_bd_base_phys; + u16 paddrh, paddrm, paddrl; + const unsigned char *mac; + int i; + + C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT); + + /* clear everything (slow & steady does it) */ + for (i = 0; i < sizeof(*ep); i++) + out_8((u8 __iomem *)ep + i, 0); + + /* get physical address */ + rx_bd_base_phys = fep->ring_mem_addr; + tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring; + + /* point to bds */ + W32(ep, fen_genfcc.fcc_rbase, rx_bd_base_phys); + W32(ep, fen_genfcc.fcc_tbase, tx_bd_base_phys); + + /* Set maximum bytes per receive buffer. + * It must be a multiple of 32. + */ + W16(ep, fen_genfcc.fcc_mrblr, PKT_MAXBLR_SIZE); + + W32(ep, fen_genfcc.fcc_rstate, (CPMFCR_GBL | CPMFCR_EB) << 24); + W32(ep, fen_genfcc.fcc_tstate, (CPMFCR_GBL | CPMFCR_EB) << 24); + + /* Allocate space in the reserved FCC area of DPRAM for the + * internal buffers. No one uses this space (yet), so we + * can do this. Later, we will add resource management for + * this area. + */ + + W16(ep, fen_genfcc.fcc_riptr, fpi->dpram_offset); + W16(ep, fen_genfcc.fcc_tiptr, fpi->dpram_offset + 32); + + W16(ep, fen_padptr, fpi->dpram_offset + 64); + + /* fill with special symbol... */ + memset_io(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32); + + W32(ep, fen_genfcc.fcc_rbptr, 0); + W32(ep, fen_genfcc.fcc_tbptr, 0); + W32(ep, fen_genfcc.fcc_rcrc, 0); + W32(ep, fen_genfcc.fcc_tcrc, 0); + W16(ep, fen_genfcc.fcc_res1, 0); + W32(ep, fen_genfcc.fcc_res2, 0); + + /* no CAM */ + W32(ep, fen_camptr, 0); + + /* Set CRC preset and mask */ + W32(ep, fen_cmask, 0xdebb20e3); + W32(ep, fen_cpres, 0xffffffff); + + W32(ep, fen_crcec, 0); /* CRC Error counter */ + W32(ep, fen_alec, 0); /* alignment error counter */ + W32(ep, fen_disfc, 0); /* discard frame counter */ + W16(ep, fen_retlim, 15); /* Retry limit threshold */ + W16(ep, fen_pper, 0); /* Normal persistence */ + + /* set group address */ + W32(ep, fen_gaddrh, fep->fcc.gaddrh); + W32(ep, fen_gaddrl, fep->fcc.gaddrh); + + /* Clear hash filter tables */ + W32(ep, fen_iaddrh, 0); + W32(ep, fen_iaddrl, 0); + + /* Clear the Out-of-sequence TxBD */ + W16(ep, fen_tfcstat, 0); + W16(ep, fen_tfclen, 0); + W32(ep, fen_tfcptr, 0); + + W16(ep, fen_mflr, PKT_MAXBUF_SIZE); /* maximum frame length register */ + W16(ep, fen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */ + + /* set address */ + mac = dev->dev_addr; + paddrh = ((u16)mac[5] << 8) | mac[4]; + paddrm = ((u16)mac[3] << 8) | mac[2]; + paddrl = ((u16)mac[1] << 8) | mac[0]; + + W16(ep, fen_paddrh, paddrh); + W16(ep, fen_paddrm, paddrm); + W16(ep, fen_paddrl, paddrl); + + W16(ep, fen_taddrh, 0); + W16(ep, fen_taddrm, 0); + W16(ep, fen_taddrl, 0); + + W16(ep, fen_maxd1, 1520); /* maximum DMA1 length */ + W16(ep, fen_maxd2, 1520); /* maximum DMA2 length */ + + /* Clear stat counters, in case we ever enable RMON */ + W32(ep, fen_octc, 0); + W32(ep, fen_colc, 0); + W32(ep, fen_broc, 0); + W32(ep, fen_mulc, 0); + W32(ep, fen_uspc, 0); + W32(ep, fen_frgc, 0); + W32(ep, fen_ospc, 0); + W32(ep, fen_jbrc, 0); + W32(ep, fen_p64c, 0); + W32(ep, fen_p65c, 0); + W32(ep, fen_p128c, 0); + W32(ep, fen_p256c, 0); + W32(ep, fen_p512c, 0); + W32(ep, fen_p1024c, 0); + + W16(ep, fen_rfthr, 0); /* Suggested by manual */ + W16(ep, fen_rfcnt, 0); + W16(ep, fen_cftype, 0); + + fs_init_bds(dev); + + /* adjust to speed (for RMII mode) */ + if (fpi->use_rmii) { + if (fep->phydev->speed == 100) + C8(fcccp, fcc_gfemr, 0x20); + else + S8(fcccp, fcc_gfemr, 0x20); + } + + fcc_cr_cmd(fep, CPM_CR_INIT_TRX); + + /* clear events */ + W16(fccp, fcc_fcce, 0xffff); + + /* Enable interrupts we wish to service */ + W16(fccp, fcc_fccm, FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB); + + /* Set GFMR to enable Ethernet operating mode */ + W32(fccp, fcc_gfmr, FCC_GFMR_TCI | FCC_GFMR_MODE_ENET); + + /* set sync/delimiters */ + W16(fccp, fcc_fdsr, 0xd555); + + W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC); + + if (fpi->use_rmii) + S32(fccp, fcc_fpsmr, FCC_PSMR_RMII); + + /* adjust to duplex mode */ + if (fep->phydev->duplex) + S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); + else + C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); + + /* Restore multicast and promiscuous settings */ + set_multicast_list(dev); + + S32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT); +} + +static void stop(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + /* stop ethernet */ + C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT); + + /* clear events */ + W16(fccp, fcc_fcce, 0xffff); + + /* clear interrupt mask */ + W16(fccp, fcc_fccm, 0); + + fs_cleanup_bds(dev); +} + +static void napi_clear_rx_event(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK); +} + +static void napi_enable_rx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK); +} + +static void napi_disable_rx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK); +} + +static void napi_clear_tx_event(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + W16(fccp, fcc_fcce, FCC_NAPI_TX_EVENT_MSK); +} + +static void napi_enable_tx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + S16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK); +} + +static void napi_disable_tx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + C16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK); +} + +static void rx_bd_done(struct net_device *dev) +{ + /* nothing */ +} + +static void tx_kickstart(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + S16(fccp, fcc_ftodr, 0x8000); +} + +static u32 get_int_events(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + return (u32)R16(fccp, fcc_fcce); +} + +static void clear_int_events(struct net_device *dev, u32 int_events) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + + W16(fccp, fcc_fcce, int_events & 0xffff); +} + +static void ev_error(struct net_device *dev, u32 int_events) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + dev_warn(fep->dev, "FS_ENET ERROR(s) 0x%x\n", int_events); +} + +static int get_regs(struct net_device *dev, void *p, int *sizep) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + if (*sizep < sizeof(fcc_t) + sizeof(fcc_enet_t) + 1) + return -EINVAL; + + memcpy_fromio(p, fep->fcc.fccp, sizeof(fcc_t)); + p = (char *)p + sizeof(fcc_t); + + memcpy_fromio(p, fep->fcc.ep, sizeof(fcc_enet_t)); + p = (char *)p + sizeof(fcc_enet_t); + + memcpy_fromio(p, fep->fcc.fcccp, 1); + return 0; +} + +static int get_regs_len(struct net_device *dev) +{ + return sizeof(fcc_t) + sizeof(fcc_enet_t) + 1; +} + +/* Some transmit errors cause the transmitter to shut + * down. We now issue a restart transmit. + * Also, to workaround 8260 device erratum CPM37, we must + * disable and then re-enable the transmitterfollowing a + * Late Collision, Underrun, or Retry Limit error. + * In addition, tbptr may point beyond BDs beyond still marked + * as ready due to internal pipelining, so we need to look back + * through the BDs and adjust tbptr to point to the last BD + * marked as ready. This may result in some buffers being + * retransmitted. + */ +static void tx_restart(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + fcc_t __iomem *fccp = fep->fcc.fccp; + const struct fs_platform_info *fpi = fep->fpi; + fcc_enet_t __iomem *ep = fep->fcc.ep; + cbd_t __iomem *curr_tbptr; + cbd_t __iomem *recheck_bd; + cbd_t __iomem *prev_bd; + cbd_t __iomem *last_tx_bd; + + last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t)); + + /* get the current bd held in TBPTR and scan back from this point */ + recheck_bd = curr_tbptr = (cbd_t __iomem *) + ((R32(ep, fen_genfcc.fcc_tbptr) - fep->ring_mem_addr) + + fep->ring_base); + + prev_bd = (recheck_bd == fep->tx_bd_base) ? last_tx_bd : recheck_bd - 1; + + /* Move through the bds in reverse, look for the earliest buffer + * that is not ready. Adjust TBPTR to the following buffer */ + while ((CBDR_SC(prev_bd) & BD_ENET_TX_READY) != 0) { + /* Go back one buffer */ + recheck_bd = prev_bd; + + /* update the previous buffer */ + prev_bd = (prev_bd == fep->tx_bd_base) ? last_tx_bd : prev_bd - 1; + + /* We should never see all bds marked as ready, check anyway */ + if (recheck_bd == curr_tbptr) + break; + } + /* Now update the TBPTR and dirty flag to the current buffer */ + W32(ep, fen_genfcc.fcc_tbptr, + (uint) (((void *)recheck_bd - fep->ring_base) + + fep->ring_mem_addr)); + fep->dirty_tx = recheck_bd; + + C32(fccp, fcc_gfmr, FCC_GFMR_ENT); + udelay(10); + S32(fccp, fcc_gfmr, FCC_GFMR_ENT); + + fcc_cr_cmd(fep, CPM_CR_RESTART_TX); +} + +/*************************************************************************/ + +const struct fs_ops fs_fcc_ops = { + .setup_data = setup_data, + .cleanup_data = cleanup_data, + .set_multicast_list = set_multicast_list, + .restart = restart, + .stop = stop, + .napi_clear_rx_event = napi_clear_rx_event, + .napi_enable_rx = napi_enable_rx, + .napi_disable_rx = napi_disable_rx, + .napi_clear_tx_event = napi_clear_tx_event, + .napi_enable_tx = napi_enable_tx, + .napi_disable_tx = napi_disable_tx, + .rx_bd_done = rx_bd_done, + .tx_kickstart = tx_kickstart, + .get_int_events = get_int_events, + .clear_int_events = clear_int_events, + .ev_error = ev_error, + .get_regs = get_regs, + .get_regs_len = get_regs_len, + .tx_restart = tx_restart, + .allocate_bd = allocate_bd, + .free_bd = free_bd, +}; diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c new file mode 100644 index 000000000..b34214e2d --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -0,0 +1,533 @@ +/* + * Freescale Ethernet controllers + * + * Copyright (c) 2005 Intracom S.A. + * by Pantelis Antoniou + * + * 2005 (c) MontaVista Software, Inc. + * Vitaly Bordug + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifdef CONFIG_8xx +#include +#include +#include +#endif + +#include "fs_enet.h" +#include "fec.h" + +/*************************************************/ + +#if defined(CONFIG_CPM1) +/* for a CPM1 __raw_xxx's are sufficient */ +#define __fs_out32(addr, x) __raw_writel(x, addr) +#define __fs_out16(addr, x) __raw_writew(x, addr) +#define __fs_in32(addr) __raw_readl(addr) +#define __fs_in16(addr) __raw_readw(addr) +#else +/* for others play it safe */ +#define __fs_out32(addr, x) out_be32(addr, x) +#define __fs_out16(addr, x) out_be16(addr, x) +#define __fs_in32(addr) in_be32(addr) +#define __fs_in16(addr) in_be16(addr) +#endif + +/* write */ +#define FW(_fecp, _reg, _v) __fs_out32(&(_fecp)->fec_ ## _reg, (_v)) + +/* read */ +#define FR(_fecp, _reg) __fs_in32(&(_fecp)->fec_ ## _reg) + +/* set bits */ +#define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v)) + +/* clear bits */ +#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v)) + +/* + * Delay to wait for FEC reset command to complete (in us) + */ +#define FEC_RESET_DELAY 50 + +static int whack_reset(struct fec __iomem *fecp) +{ + int i; + + FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET); + for (i = 0; i < FEC_RESET_DELAY; i++) { + if ((FR(fecp, ecntrl) & FEC_ECNTRL_RESET) == 0) + return 0; /* OK */ + udelay(1); + } + + return -1; +} + +static int do_pd_setup(struct fs_enet_private *fep) +{ + struct platform_device *ofdev = to_platform_device(fep->dev); + + fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0); + if (fep->interrupt == NO_IRQ) + return -EINVAL; + + fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0); + if (!fep->fcc.fccp) + return -EINVAL; + + return 0; +} + +#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB) +#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB) +#define FEC_RX_EVENT (FEC_ENET_RXF) +#define FEC_TX_EVENT (FEC_ENET_TXF) +#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \ + FEC_ENET_BABT | FEC_ENET_EBERR) + +static int setup_data(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + if (do_pd_setup(fep) != 0) + return -EINVAL; + + fep->fec.hthi = 0; + fep->fec.htlo = 0; + + fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK; + fep->ev_napi_tx = FEC_NAPI_TX_EVENT_MSK; + fep->ev_rx = FEC_RX_EVENT; + fep->ev_tx = FEC_TX_EVENT; + fep->ev_err = FEC_ERR_EVENT_MSK; + + return 0; +} + +static int allocate_bd(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + const struct fs_platform_info *fpi = fep->fpi; + + fep->ring_base = (void __force __iomem *)dma_alloc_coherent(fep->dev, + (fpi->tx_ring + fpi->rx_ring) * + sizeof(cbd_t), &fep->ring_mem_addr, + GFP_KERNEL); + if (fep->ring_base == NULL) + return -ENOMEM; + + return 0; +} + +static void free_bd(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + const struct fs_platform_info *fpi = fep->fpi; + + if(fep->ring_base) + dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) + * sizeof(cbd_t), + (void __force *)fep->ring_base, + fep->ring_mem_addr); +} + +static void cleanup_data(struct net_device *dev) +{ + /* nothing */ +} + +static void set_promiscuous_mode(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + FS(fecp, r_cntrl, FEC_RCNTRL_PROM); +} + +static void set_multicast_start(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + fep->fec.hthi = 0; + fep->fec.htlo = 0; +} + +static void set_multicast_one(struct net_device *dev, const u8 *mac) +{ + struct fs_enet_private *fep = netdev_priv(dev); + int temp, hash_index, i, j; + u32 crc, csrVal; + u8 byte, msb; + + crc = 0xffffffff; + for (i = 0; i < 6; i++) { + byte = mac[i]; + for (j = 0; j < 8; j++) { + msb = crc >> 31; + crc <<= 1; + if (msb ^ (byte & 0x1)) + crc ^= FEC_CRC_POLY; + byte >>= 1; + } + } + + temp = (crc & 0x3f) >> 1; + hash_index = ((temp & 0x01) << 4) | + ((temp & 0x02) << 2) | + ((temp & 0x04)) | + ((temp & 0x08) >> 2) | + ((temp & 0x10) >> 4); + csrVal = 1 << hash_index; + if (crc & 1) + fep->fec.hthi |= csrVal; + else + fep->fec.htlo |= csrVal; +} + +static void set_multicast_finish(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + /* if all multi or too many multicasts; just enable all */ + if ((dev->flags & IFF_ALLMULTI) != 0 || + netdev_mc_count(dev) > FEC_MAX_MULTICAST_ADDRS) { + fep->fec.hthi = 0xffffffffU; + fep->fec.htlo = 0xffffffffU; + } + + FC(fecp, r_cntrl, FEC_RCNTRL_PROM); + FW(fecp, grp_hash_table_high, fep->fec.hthi); + FW(fecp, grp_hash_table_low, fep->fec.htlo); +} + +static void set_multicast_list(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + + if ((dev->flags & IFF_PROMISC) == 0) { + set_multicast_start(dev); + netdev_for_each_mc_addr(ha, dev) + set_multicast_one(dev, ha->addr); + set_multicast_finish(dev); + } else + set_promiscuous_mode(dev); +} + +static void restart(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + const struct fs_platform_info *fpi = fep->fpi; + dma_addr_t rx_bd_base_phys, tx_bd_base_phys; + int r; + u32 addrhi, addrlo; + + struct mii_bus* mii = fep->phydev->bus; + struct fec_info* fec_inf = mii->priv; + + r = whack_reset(fep->fec.fecp); + if (r != 0) + dev_err(fep->dev, "FEC Reset FAILED!\n"); + /* + * Set station address. + */ + addrhi = ((u32) dev->dev_addr[0] << 24) | + ((u32) dev->dev_addr[1] << 16) | + ((u32) dev->dev_addr[2] << 8) | + (u32) dev->dev_addr[3]; + addrlo = ((u32) dev->dev_addr[4] << 24) | + ((u32) dev->dev_addr[5] << 16); + FW(fecp, addr_low, addrhi); + FW(fecp, addr_high, addrlo); + + /* + * Reset all multicast. + */ + FW(fecp, grp_hash_table_high, fep->fec.hthi); + FW(fecp, grp_hash_table_low, fep->fec.htlo); + + /* + * Set maximum receive buffer size. + */ + FW(fecp, r_buff_size, PKT_MAXBLR_SIZE); +#ifdef CONFIG_FS_ENET_MPC5121_FEC + FW(fecp, r_cntrl, PKT_MAXBUF_SIZE << 16); +#else + FW(fecp, r_hash, PKT_MAXBUF_SIZE); +#endif + + /* get physical address */ + rx_bd_base_phys = fep->ring_mem_addr; + tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring; + + /* + * Set receive and transmit descriptor base. + */ + FW(fecp, r_des_start, rx_bd_base_phys); + FW(fecp, x_des_start, tx_bd_base_phys); + + fs_init_bds(dev); + + /* + * Enable big endian and don't care about SDMA FC. + */ +#ifdef CONFIG_FS_ENET_MPC5121_FEC + FS(fecp, dma_control, 0xC0000000); +#else + FW(fecp, fun_code, 0x78000000); +#endif + + /* + * Set MII speed. + */ + FW(fecp, mii_speed, fec_inf->mii_speed); + + /* + * Clear any outstanding interrupt. + */ + FW(fecp, ievent, 0xffc0); +#ifndef CONFIG_FS_ENET_MPC5121_FEC + FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29); + + FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ +#else + /* + * Only set MII/RMII mode - do not touch maximum frame length + * configured before. + */ + FS(fecp, r_cntrl, fpi->use_rmii ? + FEC_RCNTRL_RMII_MODE : FEC_RCNTRL_MII_MODE); +#endif + /* + * adjust to duplex mode + */ + if (fep->phydev->duplex) { + FC(fecp, r_cntrl, FEC_RCNTRL_DRT); + FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */ + } else { + FS(fecp, r_cntrl, FEC_RCNTRL_DRT); + FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */ + } + + /* Restore multicast and promiscuous settings */ + set_multicast_list(dev); + + /* + * Enable interrupts we wish to service. + */ + FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB | + FEC_ENET_RXF | FEC_ENET_RXB); + + /* + * And last, enable the transmit and receive processing. + */ + FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); + FW(fecp, r_des_active, 0x01000000); +} + +static void stop(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + const struct fs_platform_info *fpi = fep->fpi; + struct fec __iomem *fecp = fep->fec.fecp; + + struct fec_info* feci= fep->phydev->bus->priv; + + int i; + + if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0) + return; /* already down */ + + FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */ + for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) && + i < FEC_RESET_DELAY; i++) + udelay(1); + + if (i == FEC_RESET_DELAY) + dev_warn(fep->dev, "FEC timeout on graceful transmit stop\n"); + /* + * Disable FEC. Let only MII interrupts. + */ + FW(fecp, imask, 0); + FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN); + + fs_cleanup_bds(dev); + + /* shut down FEC1? that's where the mii bus is */ + if (fpi->has_phy) { + FS(fecp, r_cntrl, fpi->use_rmii ? + FEC_RCNTRL_RMII_MODE : + FEC_RCNTRL_MII_MODE); /* MII/RMII enable */ + FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); + FW(fecp, ievent, FEC_ENET_MII); + FW(fecp, mii_speed, feci->mii_speed); + } +} + +static void napi_clear_rx_event(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK); +} + +static void napi_enable_rx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK); +} + +static void napi_disable_rx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK); +} + +static void napi_clear_tx_event(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + FW(fecp, ievent, FEC_NAPI_TX_EVENT_MSK); +} + +static void napi_enable_tx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + FS(fecp, imask, FEC_NAPI_TX_EVENT_MSK); +} + +static void napi_disable_tx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + FC(fecp, imask, FEC_NAPI_TX_EVENT_MSK); +} + +static void rx_bd_done(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + FW(fecp, r_des_active, 0x01000000); +} + +static void tx_kickstart(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + FW(fecp, x_des_active, 0x01000000); +} + +static u32 get_int_events(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + return FR(fecp, ievent) & FR(fecp, imask); +} + +static void clear_int_events(struct net_device *dev, u32 int_events) +{ + struct fs_enet_private *fep = netdev_priv(dev); + struct fec __iomem *fecp = fep->fec.fecp; + + FW(fecp, ievent, int_events); +} + +static void ev_error(struct net_device *dev, u32 int_events) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + dev_warn(fep->dev, "FEC ERROR(s) 0x%x\n", int_events); +} + +static int get_regs(struct net_device *dev, void *p, int *sizep) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + if (*sizep < sizeof(struct fec)) + return -EINVAL; + + memcpy_fromio(p, fep->fec.fecp, sizeof(struct fec)); + + return 0; +} + +static int get_regs_len(struct net_device *dev) +{ + return sizeof(struct fec); +} + +static void tx_restart(struct net_device *dev) +{ + /* nothing */ +} + +/*************************************************************************/ + +const struct fs_ops fs_fec_ops = { + .setup_data = setup_data, + .cleanup_data = cleanup_data, + .set_multicast_list = set_multicast_list, + .restart = restart, + .stop = stop, + .napi_clear_rx_event = napi_clear_rx_event, + .napi_enable_rx = napi_enable_rx, + .napi_disable_rx = napi_disable_rx, + .napi_clear_tx_event = napi_clear_tx_event, + .napi_enable_tx = napi_enable_tx, + .napi_disable_tx = napi_disable_tx, + .rx_bd_done = rx_bd_done, + .tx_kickstart = tx_kickstart, + .get_int_events = get_int_events, + .clear_int_events = clear_int_events, + .ev_error = ev_error, + .get_regs = get_regs, + .get_regs_len = get_regs_len, + .tx_restart = tx_restart, + .allocate_bd = allocate_bd, + .free_bd = free_bd, +}; + diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c new file mode 100644 index 000000000..7a184e881 --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c @@ -0,0 +1,516 @@ +/* + * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx. + * + * Copyright (c) 2003 Intracom S.A. + * by Pantelis Antoniou + * + * 2005 (c) MontaVista Software, Inc. + * Vitaly Bordug + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifdef CONFIG_8xx +#include +#include +#include +#endif + +#include "fs_enet.h" + +/*************************************************/ +#if defined(CONFIG_CPM1) +/* for a 8xx __raw_xxx's are sufficient */ +#define __fs_out32(addr, x) __raw_writel(x, addr) +#define __fs_out16(addr, x) __raw_writew(x, addr) +#define __fs_out8(addr, x) __raw_writeb(x, addr) +#define __fs_in32(addr) __raw_readl(addr) +#define __fs_in16(addr) __raw_readw(addr) +#define __fs_in8(addr) __raw_readb(addr) +#else +/* for others play it safe */ +#define __fs_out32(addr, x) out_be32(addr, x) +#define __fs_out16(addr, x) out_be16(addr, x) +#define __fs_in32(addr) in_be32(addr) +#define __fs_in16(addr) in_be16(addr) +#define __fs_out8(addr, x) out_8(addr, x) +#define __fs_in8(addr) in_8(addr) +#endif + +/* write, read, set bits, clear bits */ +#define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v)) +#define R32(_p, _m) __fs_in32(&(_p)->_m) +#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v)) +#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v)) + +#define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v)) +#define R16(_p, _m) __fs_in16(&(_p)->_m) +#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v)) +#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v)) + +#define W8(_p, _m, _v) __fs_out8(&(_p)->_m, (_v)) +#define R8(_p, _m) __fs_in8(&(_p)->_m) +#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v)) +#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v)) + +#define SCC_MAX_MULTICAST_ADDRS 64 + +/* + * Delay to wait for SCC reset command to complete (in us) + */ +#define SCC_RESET_DELAY 50 + +static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op) +{ + const struct fs_platform_info *fpi = fep->fpi; + + return cpm_command(fpi->cp_command, op); +} + +static int do_pd_setup(struct fs_enet_private *fep) +{ + struct platform_device *ofdev = to_platform_device(fep->dev); + + fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0); + if (fep->interrupt == NO_IRQ) + return -EINVAL; + + fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0); + if (!fep->scc.sccp) + return -EINVAL; + + fep->scc.ep = of_iomap(ofdev->dev.of_node, 1); + if (!fep->scc.ep) { + iounmap(fep->scc.sccp); + return -EINVAL; + } + + return 0; +} + +#define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB) +#define SCC_NAPI_TX_EVENT_MSK (SCCE_ENET_TXB) +#define SCC_RX_EVENT (SCCE_ENET_RXF) +#define SCC_TX_EVENT (SCCE_ENET_TXB) +#define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY) + +static int setup_data(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + do_pd_setup(fep); + + fep->scc.hthi = 0; + fep->scc.htlo = 0; + + fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK; + fep->ev_napi_tx = SCC_NAPI_TX_EVENT_MSK; + fep->ev_rx = SCC_RX_EVENT; + fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE; + fep->ev_err = SCC_ERR_EVENT_MSK; + + return 0; +} + +static int allocate_bd(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + const struct fs_platform_info *fpi = fep->fpi; + + fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) * + sizeof(cbd_t), 8); + if (IS_ERR_VALUE(fep->ring_mem_addr)) + return -ENOMEM; + + fep->ring_base = (void __iomem __force*) + cpm_dpram_addr(fep->ring_mem_addr); + + return 0; +} + +static void free_bd(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + if (fep->ring_base) + cpm_dpfree(fep->ring_mem_addr); +} + +static void cleanup_data(struct net_device *dev) +{ + /* nothing */ +} + +static void set_promiscuous_mode(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + + S16(sccp, scc_psmr, SCC_PSMR_PRO); +} + +static void set_multicast_start(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_enet_t __iomem *ep = fep->scc.ep; + + W16(ep, sen_gaddr1, 0); + W16(ep, sen_gaddr2, 0); + W16(ep, sen_gaddr3, 0); + W16(ep, sen_gaddr4, 0); +} + +static void set_multicast_one(struct net_device *dev, const u8 * mac) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_enet_t __iomem *ep = fep->scc.ep; + u16 taddrh, taddrm, taddrl; + + taddrh = ((u16) mac[5] << 8) | mac[4]; + taddrm = ((u16) mac[3] << 8) | mac[2]; + taddrl = ((u16) mac[1] << 8) | mac[0]; + + W16(ep, sen_taddrh, taddrh); + W16(ep, sen_taddrm, taddrm); + W16(ep, sen_taddrl, taddrl); + scc_cr_cmd(fep, CPM_CR_SET_GADDR); +} + +static void set_multicast_finish(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + scc_enet_t __iomem *ep = fep->scc.ep; + + /* clear promiscuous always */ + C16(sccp, scc_psmr, SCC_PSMR_PRO); + + /* if all multi or too many multicasts; just enable all */ + if ((dev->flags & IFF_ALLMULTI) != 0 || + netdev_mc_count(dev) > SCC_MAX_MULTICAST_ADDRS) { + + W16(ep, sen_gaddr1, 0xffff); + W16(ep, sen_gaddr2, 0xffff); + W16(ep, sen_gaddr3, 0xffff); + W16(ep, sen_gaddr4, 0xffff); + } +} + +static void set_multicast_list(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + + if ((dev->flags & IFF_PROMISC) == 0) { + set_multicast_start(dev); + netdev_for_each_mc_addr(ha, dev) + set_multicast_one(dev, ha->addr); + set_multicast_finish(dev); + } else + set_promiscuous_mode(dev); +} + +/* + * This function is called to start or restart the FEC during a link + * change. This only happens when switching between half and full + * duplex. + */ +static void restart(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + scc_enet_t __iomem *ep = fep->scc.ep; + const struct fs_platform_info *fpi = fep->fpi; + u16 paddrh, paddrm, paddrl; + const unsigned char *mac; + int i; + + C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); + + /* clear everything (slow & steady does it) */ + for (i = 0; i < sizeof(*ep); i++) + __fs_out8((u8 __iomem *)ep + i, 0); + + /* point to bds */ + W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr); + W16(ep, sen_genscc.scc_tbase, + fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring); + + /* Initialize function code registers for big-endian. + */ +#ifndef CONFIG_NOT_COHERENT_CACHE + W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL); + W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL); +#else + W8(ep, sen_genscc.scc_rfcr, SCC_EB); + W8(ep, sen_genscc.scc_tfcr, SCC_EB); +#endif + + /* Set maximum bytes per receive buffer. + * This appears to be an Ethernet frame size, not the buffer + * fragment size. It must be a multiple of four. + */ + W16(ep, sen_genscc.scc_mrblr, 0x5f0); + + /* Set CRC preset and mask. + */ + W32(ep, sen_cpres, 0xffffffff); + W32(ep, sen_cmask, 0xdebb20e3); + + W32(ep, sen_crcec, 0); /* CRC Error counter */ + W32(ep, sen_alec, 0); /* alignment error counter */ + W32(ep, sen_disfc, 0); /* discard frame counter */ + + W16(ep, sen_pads, 0x8888); /* Tx short frame pad character */ + W16(ep, sen_retlim, 15); /* Retry limit threshold */ + + W16(ep, sen_maxflr, 0x5ee); /* maximum frame length register */ + + W16(ep, sen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */ + + W16(ep, sen_maxd1, 0x000005f0); /* maximum DMA1 length */ + W16(ep, sen_maxd2, 0x000005f0); /* maximum DMA2 length */ + + /* Clear hash tables. + */ + W16(ep, sen_gaddr1, 0); + W16(ep, sen_gaddr2, 0); + W16(ep, sen_gaddr3, 0); + W16(ep, sen_gaddr4, 0); + W16(ep, sen_iaddr1, 0); + W16(ep, sen_iaddr2, 0); + W16(ep, sen_iaddr3, 0); + W16(ep, sen_iaddr4, 0); + + /* set address + */ + mac = dev->dev_addr; + paddrh = ((u16) mac[5] << 8) | mac[4]; + paddrm = ((u16) mac[3] << 8) | mac[2]; + paddrl = ((u16) mac[1] << 8) | mac[0]; + + W16(ep, sen_paddrh, paddrh); + W16(ep, sen_paddrm, paddrm); + W16(ep, sen_paddrl, paddrl); + + W16(ep, sen_pper, 0); + W16(ep, sen_taddrl, 0); + W16(ep, sen_taddrm, 0); + W16(ep, sen_taddrh, 0); + + fs_init_bds(dev); + + scc_cr_cmd(fep, CPM_CR_INIT_TRX); + + W16(sccp, scc_scce, 0xffff); + + /* Enable interrupts we wish to service. + */ + W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB); + + /* Set GSMR_H to enable all normal operating modes. + * Set GSMR_L to enable Ethernet to MC68160. + */ + W32(sccp, scc_gsmrh, 0); + W32(sccp, scc_gsmrl, + SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 | + SCC_GSMRL_MODE_ENET); + + /* Set sync/delimiters. + */ + W16(sccp, scc_dsr, 0xd555); + + /* Set processing mode. Use Ethernet CRC, catch broadcast, and + * start frame search 22 bit times after RENA. + */ + W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22); + + /* Set full duplex mode if needed */ + if (fep->phydev->duplex) + S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); + + /* Restore multicast and promiscuous settings */ + set_multicast_list(dev); + + S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); +} + +static void stop(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + int i; + + for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++) + udelay(1); + + if (i == SCC_RESET_DELAY) + dev_warn(fep->dev, "SCC timeout on graceful transmit stop\n"); + + W16(sccp, scc_sccm, 0); + C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); + + fs_cleanup_bds(dev); +} + +static void napi_clear_rx_event(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + + W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK); +} + +static void napi_enable_rx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + + S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK); +} + +static void napi_disable_rx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + + C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK); +} + +static void napi_clear_tx_event(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + + W16(sccp, scc_scce, SCC_NAPI_TX_EVENT_MSK); +} + +static void napi_enable_tx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + + S16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK); +} + +static void napi_disable_tx(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + + C16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK); +} + +static void rx_bd_done(struct net_device *dev) +{ + /* nothing */ +} + +static void tx_kickstart(struct net_device *dev) +{ + /* nothing */ +} + +static u32 get_int_events(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + + return (u32) R16(sccp, scc_scce); +} + +static void clear_int_events(struct net_device *dev, u32 int_events) +{ + struct fs_enet_private *fep = netdev_priv(dev); + scc_t __iomem *sccp = fep->scc.sccp; + + W16(sccp, scc_scce, int_events & 0xffff); +} + +static void ev_error(struct net_device *dev, u32 int_events) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + dev_warn(fep->dev, "SCC ERROR(s) 0x%x\n", int_events); +} + +static int get_regs(struct net_device *dev, void *p, int *sizep) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t __iomem *)) + return -EINVAL; + + memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t)); + p = (char *)p + sizeof(scc_t); + + memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *)); + + return 0; +} + +static int get_regs_len(struct net_device *dev) +{ + return sizeof(scc_t) + sizeof(scc_enet_t __iomem *); +} + +static void tx_restart(struct net_device *dev) +{ + struct fs_enet_private *fep = netdev_priv(dev); + + scc_cr_cmd(fep, CPM_CR_RESTART_TX); +} + + + +/*************************************************************************/ + +const struct fs_ops fs_scc_ops = { + .setup_data = setup_data, + .cleanup_data = cleanup_data, + .set_multicast_list = set_multicast_list, + .restart = restart, + .stop = stop, + .napi_clear_rx_event = napi_clear_rx_event, + .napi_enable_rx = napi_enable_rx, + .napi_disable_rx = napi_disable_rx, + .napi_clear_tx_event = napi_clear_tx_event, + .napi_enable_tx = napi_enable_tx, + .napi_disable_tx = napi_disable_tx, + .rx_bd_done = rx_bd_done, + .tx_kickstart = tx_kickstart, + .get_int_events = get_int_events, + .clear_int_events = clear_int_events, + .ev_error = ev_error, + .get_regs = get_regs, + .get_regs_len = get_regs_len, + .tx_restart = tx_restart, + .allocate_bd = allocate_bd, + .free_bd = free_bd, +}; diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c new file mode 100644 index 000000000..68a428de0 --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -0,0 +1,233 @@ +/* + * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. + * + * Copyright (c) 2003 Intracom S.A. + * by Pantelis Antoniou + * + * 2005 (c) MontaVista Software, Inc. + * Vitaly Bordug + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "fs_enet.h" + +struct bb_info { + struct mdiobb_ctrl ctrl; + __be32 __iomem *dir; + __be32 __iomem *dat; + u32 mdio_msk; + u32 mdc_msk; +}; + +/* FIXME: If any other users of GPIO crop up, then these will have to + * have some sort of global synchronization to avoid races with other + * pins on the same port. The ideal solution would probably be to + * bind the ports to a GPIO driver, and have this be a client of it. + */ +static inline void bb_set(u32 __iomem *p, u32 m) +{ + out_be32(p, in_be32(p) | m); +} + +static inline void bb_clr(u32 __iomem *p, u32 m) +{ + out_be32(p, in_be32(p) & ~m); +} + +static inline int bb_read(u32 __iomem *p, u32 m) +{ + return (in_be32(p) & m) != 0; +} + +static inline void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) +{ + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + + if (dir) + bb_set(bitbang->dir, bitbang->mdio_msk); + else + bb_clr(bitbang->dir, bitbang->mdio_msk); + + /* Read back to flush the write. */ + in_be32(bitbang->dir); +} + +static inline int mdio_read(struct mdiobb_ctrl *ctrl) +{ + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + return bb_read(bitbang->dat, bitbang->mdio_msk); +} + +static inline void mdio(struct mdiobb_ctrl *ctrl, int what) +{ + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + + if (what) + bb_set(bitbang->dat, bitbang->mdio_msk); + else + bb_clr(bitbang->dat, bitbang->mdio_msk); + + /* Read back to flush the write. */ + in_be32(bitbang->dat); +} + +static inline void mdc(struct mdiobb_ctrl *ctrl, int what) +{ + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + + if (what) + bb_set(bitbang->dat, bitbang->mdc_msk); + else + bb_clr(bitbang->dat, bitbang->mdc_msk); + + /* Read back to flush the write. */ + in_be32(bitbang->dat); +} + +static struct mdiobb_ops bb_ops = { + .owner = THIS_MODULE, + .set_mdc = mdc, + .set_mdio_dir = mdio_dir, + .set_mdio_data = mdio, + .get_mdio_data = mdio_read, +}; + +static int fs_mii_bitbang_init(struct mii_bus *bus, struct device_node *np) +{ + struct resource res; + const u32 *data; + int mdio_pin, mdc_pin, len; + struct bb_info *bitbang = bus->priv; + + int ret = of_address_to_resource(np, 0, &res); + if (ret) + return ret; + + if (resource_size(&res) <= 13) + return -ENODEV; + + /* This should really encode the pin number as well, but all + * we get is an int, and the odds of multiple bitbang mdio buses + * is low enough that it's not worth going too crazy. + */ + snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); + + data = of_get_property(np, "fsl,mdio-pin", &len); + if (!data || len != 4) + return -ENODEV; + mdio_pin = *data; + + data = of_get_property(np, "fsl,mdc-pin", &len); + if (!data || len != 4) + return -ENODEV; + mdc_pin = *data; + + bitbang->dir = ioremap(res.start, resource_size(&res)); + if (!bitbang->dir) + return -ENOMEM; + + bitbang->dat = bitbang->dir + 4; + bitbang->mdio_msk = 1 << (31 - mdio_pin); + bitbang->mdc_msk = 1 << (31 - mdc_pin); + + return 0; +} + +static int fs_enet_mdio_probe(struct platform_device *ofdev) +{ + struct mii_bus *new_bus; + struct bb_info *bitbang; + int ret = -ENOMEM; + + bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); + if (!bitbang) + goto out; + + bitbang->ctrl.ops = &bb_ops; + + new_bus = alloc_mdio_bitbang(&bitbang->ctrl); + if (!new_bus) + goto out_free_priv; + + new_bus->name = "CPM2 Bitbanged MII", + + ret = fs_mii_bitbang_init(new_bus, ofdev->dev.of_node); + if (ret) + goto out_free_bus; + + new_bus->phy_mask = ~0; + new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!new_bus->irq) { + ret = -ENOMEM; + goto out_unmap_regs; + } + + new_bus->parent = &ofdev->dev; + platform_set_drvdata(ofdev, new_bus); + + ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); + if (ret) + goto out_free_irqs; + + return 0; + +out_free_irqs: + kfree(new_bus->irq); +out_unmap_regs: + iounmap(bitbang->dir); +out_free_bus: + free_mdio_bitbang(new_bus); +out_free_priv: + kfree(bitbang); +out: + return ret; +} + +static int fs_enet_mdio_remove(struct platform_device *ofdev) +{ + struct mii_bus *bus = platform_get_drvdata(ofdev); + struct bb_info *bitbang = bus->priv; + + mdiobus_unregister(bus); + kfree(bus->irq); + free_mdio_bitbang(bus); + iounmap(bitbang->dir); + kfree(bitbang); + + return 0; +} + +static const struct of_device_id fs_enet_mdio_bb_match[] = { + { + .compatible = "fsl,cpm2-mdio-bitbang", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match); + +static struct platform_driver fs_enet_bb_mdio_driver = { + .driver = { + .name = "fsl-bb-mdio", + .of_match_table = fs_enet_mdio_bb_match, + }, + .probe = fs_enet_mdio_probe, + .remove = fs_enet_mdio_remove, +}; + +module_platform_driver(fs_enet_bb_mdio_driver); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c new file mode 100644 index 000000000..2be383e6d --- /dev/null +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -0,0 +1,234 @@ +/* + * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. + * + * Copyright (c) 2003 Intracom S.A. + * by Pantelis Antoniou + * + * 2005 (c) MontaVista Software, Inc. + * Vitaly Bordug + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "fs_enet.h" +#include "fec.h" + +/* Make MII read/write commands for the FEC. +*/ +#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) +#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff)) +#define mk_mii_end 0 + +#define FEC_MII_LOOPS 10000 + +static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location) +{ + struct fec_info* fec = bus->priv; + struct fec __iomem *fecp = fec->fecp; + int i, ret = -1; + + BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0); + + /* Add PHY address to register command. */ + out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location)); + + for (i = 0; i < FEC_MII_LOOPS; i++) + if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0) + break; + + if (i < FEC_MII_LOOPS) { + out_be32(&fecp->fec_ievent, FEC_ENET_MII); + ret = in_be32(&fecp->fec_mii_data) & 0xffff; + } + + return ret; +} + +static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val) +{ + struct fec_info* fec = bus->priv; + struct fec __iomem *fecp = fec->fecp; + int i; + + /* this must never happen */ + BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0); + + /* Add PHY address to register command. */ + out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val)); + + for (i = 0; i < FEC_MII_LOOPS; i++) + if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0) + break; + + if (i < FEC_MII_LOOPS) + out_be32(&fecp->fec_ievent, FEC_ENET_MII); + + return 0; + +} + +static const struct of_device_id fs_enet_mdio_fec_match[]; +static int fs_enet_mdio_probe(struct platform_device *ofdev) +{ + const struct of_device_id *match; + struct resource res; + struct mii_bus *new_bus; + struct fec_info *fec; + int (*get_bus_freq)(struct device_node *); + int ret = -ENOMEM, clock, speed; + + match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev); + if (!match) + return -EINVAL; + get_bus_freq = match->data; + + new_bus = mdiobus_alloc(); + if (!new_bus) + goto out; + + fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL); + if (!fec) + goto out_mii; + + new_bus->priv = fec; + new_bus->name = "FEC MII Bus"; + new_bus->read = &fs_enet_fec_mii_read; + new_bus->write = &fs_enet_fec_mii_write; + + ret = of_address_to_resource(ofdev->dev.of_node, 0, &res); + if (ret) + goto out_res; + + snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); + + fec->fecp = ioremap(res.start, resource_size(&res)); + if (!fec->fecp) { + ret = -ENOMEM; + goto out_fec; + } + + if (get_bus_freq) { + clock = get_bus_freq(ofdev->dev.of_node); + if (!clock) { + /* Use maximum divider if clock is unknown */ + dev_warn(&ofdev->dev, "could not determine IPS clock\n"); + clock = 0x3F * 5000000; + } + } else + clock = ppc_proc_freq; + + /* + * Scale for a MII clock <= 2.5 MHz + * Note that only 6 bits (25:30) are available for MII speed. + */ + speed = (clock + 4999999) / 5000000; + if (speed > 0x3F) { + speed = 0x3F; + dev_err(&ofdev->dev, + "MII clock (%d Hz) exceeds max (2.5 MHz)\n", + clock / speed); + } + + fec->mii_speed = speed << 1; + + setbits32(&fec->fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); + setbits32(&fec->fecp->fec_ecntrl, FEC_ECNTRL_PINMUX | + FEC_ECNTRL_ETHER_EN); + out_be32(&fec->fecp->fec_ievent, FEC_ENET_MII); + clrsetbits_be32(&fec->fecp->fec_mii_speed, 0x7E, fec->mii_speed); + + new_bus->phy_mask = ~0; + new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!new_bus->irq) { + ret = -ENOMEM; + goto out_unmap_regs; + } + + new_bus->parent = &ofdev->dev; + platform_set_drvdata(ofdev, new_bus); + + ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); + if (ret) + goto out_free_irqs; + + return 0; + +out_free_irqs: + kfree(new_bus->irq); +out_unmap_regs: + iounmap(fec->fecp); +out_res: +out_fec: + kfree(fec); +out_mii: + mdiobus_free(new_bus); +out: + return ret; +} + +static int fs_enet_mdio_remove(struct platform_device *ofdev) +{ + struct mii_bus *bus = platform_get_drvdata(ofdev); + struct fec_info *fec = bus->priv; + + mdiobus_unregister(bus); + kfree(bus->irq); + iounmap(fec->fecp); + kfree(fec); + mdiobus_free(bus); + + return 0; +} + +static const struct of_device_id fs_enet_mdio_fec_match[] = { + { + .compatible = "fsl,pq1-fec-mdio", + }, +#if defined(CONFIG_PPC_MPC512x) + { + .compatible = "fsl,mpc5121-fec-mdio", + .data = mpc5xxx_get_bus_frequency, + }, +#endif + {}, +}; +MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match); + +static struct platform_driver fs_enet_fec_mdio_driver = { + .driver = { + .name = "fsl-fec-mdio", + .of_match_table = fs_enet_mdio_fec_match, + }, + .probe = fs_enet_mdio_probe, + .remove = fs_enet_mdio_remove, +}; + +module_platform_driver(fs_enet_fec_mdio_driver); diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c new file mode 100644 index 000000000..3c40f6b99 --- /dev/null +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -0,0 +1,499 @@ +/* + * Freescale PowerQUICC Ethernet Driver -- MIIM bus implementation + * Provides Bus interface for MIIM regs + * + * Author: Andy Fleming + * Modifier: Sandeep Gopalpet + * + * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc. + * + * Based on gianfar_mii.c and ucc_geth_mii.c (Li Yang, Kim Phillips) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#if IS_ENABLED(CONFIG_UCC_GETH) +#include /* for ucc_set_qe_mux_mii_mng() */ +#endif + +#include "gianfar.h" + +#define MIIMIND_BUSY 0x00000001 +#define MIIMIND_NOTVALID 0x00000004 +#define MIIMCFG_INIT_VALUE 0x00000007 +#define MIIMCFG_RESET 0x80000000 + +#define MII_READ_COMMAND 0x00000001 + +struct fsl_pq_mii { + u32 miimcfg; /* MII management configuration reg */ + u32 miimcom; /* MII management command reg */ + u32 miimadd; /* MII management address reg */ + u32 miimcon; /* MII management control reg */ + u32 miimstat; /* MII management status reg */ + u32 miimind; /* MII management indication reg */ +}; + +struct fsl_pq_mdio { + u8 res1[16]; + u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/ + u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/ + u8 res2[4]; + u32 emapm; /* MDIO Event mapping register (for etsec2)*/ + u8 res3[1280]; + struct fsl_pq_mii mii; + u8 res4[28]; + u32 utbipar; /* TBI phy address reg (only on UCC) */ + u8 res5[2728]; +} __packed; + +/* Number of microseconds to wait for an MII register to respond */ +#define MII_TIMEOUT 1000 + +struct fsl_pq_mdio_priv { + void __iomem *map; + struct fsl_pq_mii __iomem *regs; + int irqs[PHY_MAX_ADDR]; +}; + +/* + * Per-device-type data. Each type of device tree node that we support gets + * one of these. + * + * @mii_offset: the offset of the MII registers within the memory map of the + * node. Some nodes define only the MII registers, and some define the whole + * MAC (which includes the MII registers). + * + * @get_tbipa: determines the address of the TBIPA register + * + * @ucc_configure: a special function for extra QE configuration + */ +struct fsl_pq_mdio_data { + unsigned int mii_offset; /* offset of the MII registers */ + uint32_t __iomem * (*get_tbipa)(void __iomem *p); + void (*ucc_configure)(phys_addr_t start, phys_addr_t end); +}; + +/* + * Write value to the PHY at mii_id at register regnum, on the bus attached + * to the local interface, which may be different from the generic mdio bus + * (tied to a single interface), waiting until the write is done before + * returning. This is helpful in programming interfaces like the TBI which + * control interfaces like onchip SERDES and are always tied to the local + * mdio pins, which may not be the same as system mdio bus, used for + * controlling the external PHYs, for example. + */ +static int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) +{ + struct fsl_pq_mdio_priv *priv = bus->priv; + struct fsl_pq_mii __iomem *regs = priv->regs; + unsigned int timeout; + + /* Set the PHY address and the register address we want to write */ + iowrite32be((mii_id << 8) | regnum, ®s->miimadd); + + /* Write out the value we want */ + iowrite32be(value, ®s->miimcon); + + /* Wait for the transaction to finish */ + timeout = MII_TIMEOUT; + while ((ioread32be(®s->miimind) & MIIMIND_BUSY) && timeout) { + cpu_relax(); + timeout--; + } + + return timeout ? 0 : -ETIMEDOUT; +} + +/* + * Read the bus for PHY at addr mii_id, register regnum, and return the value. + * Clears miimcom first. + * + * All PHY operation done on the bus attached to the local interface, which + * may be different from the generic mdio bus. This is helpful in programming + * interfaces like the TBI which, in turn, control interfaces like on-chip + * SERDES and are always tied to the local mdio pins, which may not be the + * same as system mdio bus, used for controlling the external PHYs, for eg. + */ +static int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct fsl_pq_mdio_priv *priv = bus->priv; + struct fsl_pq_mii __iomem *regs = priv->regs; + unsigned int timeout; + u16 value; + + /* Set the PHY address and the register address we want to read */ + iowrite32be((mii_id << 8) | regnum, ®s->miimadd); + + /* Clear miimcom, and then initiate a read */ + iowrite32be(0, ®s->miimcom); + iowrite32be(MII_READ_COMMAND, ®s->miimcom); + + /* Wait for the transaction to finish, normally less than 100us */ + timeout = MII_TIMEOUT; + while ((ioread32be(®s->miimind) & + (MIIMIND_NOTVALID | MIIMIND_BUSY)) && timeout) { + cpu_relax(); + timeout--; + } + + if (!timeout) + return -ETIMEDOUT; + + /* Grab the value of the register from miimstat */ + value = ioread32be(®s->miimstat); + + dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum); + return value; +} + +/* Reset the MIIM registers, and wait for the bus to free */ +static int fsl_pq_mdio_reset(struct mii_bus *bus) +{ + struct fsl_pq_mdio_priv *priv = bus->priv; + struct fsl_pq_mii __iomem *regs = priv->regs; + unsigned int timeout; + + mutex_lock(&bus->mdio_lock); + + /* Reset the management interface */ + iowrite32be(MIIMCFG_RESET, ®s->miimcfg); + + /* Setup the MII Mgmt clock speed */ + iowrite32be(MIIMCFG_INIT_VALUE, ®s->miimcfg); + + /* Wait until the bus is free */ + timeout = MII_TIMEOUT; + while ((ioread32be(®s->miimind) & MIIMIND_BUSY) && timeout) { + cpu_relax(); + timeout--; + } + + mutex_unlock(&bus->mdio_lock); + + if (!timeout) { + dev_err(&bus->dev, "timeout waiting for MII bus\n"); + return -EBUSY; + } + + return 0; +} + +#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) +/* + * This is mildly evil, but so is our hardware for doing this. + * Also, we have to cast back to struct gfar because of + * definition weirdness done in gianfar.h. + */ +static uint32_t __iomem *get_gfar_tbipa(void __iomem *p) +{ + struct gfar __iomem *enet_regs = p; + + return &enet_regs->tbipa; +} + +/* + * Return the TBIPAR address for an eTSEC2 node + */ +static uint32_t __iomem *get_etsec_tbipa(void __iomem *p) +{ + return p; +} +#endif + +#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) +/* + * Return the TBIPAR address for a QE MDIO node + */ +static uint32_t __iomem *get_ucc_tbipa(void __iomem *p) +{ + struct fsl_pq_mdio __iomem *mdio = p; + + return &mdio->utbipar; +} + +/* + * Find the UCC node that controls the given MDIO node + * + * For some reason, the QE MDIO nodes are not children of the UCC devices + * that control them. Therefore, we need to scan all UCC nodes looking for + * the one that encompases the given MDIO node. We do this by comparing + * physical addresses. The 'start' and 'end' addresses of the MDIO node are + * passed, and the correct UCC node will cover the entire address range. + * + * This assumes that there is only one QE MDIO node in the entire device tree. + */ +static void ucc_configure(phys_addr_t start, phys_addr_t end) +{ + static bool found_mii_master; + struct device_node *np = NULL; + + if (found_mii_master) + return; + + for_each_compatible_node(np, NULL, "ucc_geth") { + struct resource res; + const uint32_t *iprop; + uint32_t id; + int ret; + + ret = of_address_to_resource(np, 0, &res); + if (ret < 0) { + pr_debug("fsl-pq-mdio: no address range in node %s\n", + np->full_name); + continue; + } + + /* if our mdio regs fall within this UCC regs range */ + if ((start < res.start) || (end > res.end)) + continue; + + iprop = of_get_property(np, "cell-index", NULL); + if (!iprop) { + iprop = of_get_property(np, "device-id", NULL); + if (!iprop) { + pr_debug("fsl-pq-mdio: no UCC ID in node %s\n", + np->full_name); + continue; + } + } + + id = be32_to_cpup(iprop); + + /* + * cell-index and device-id for QE nodes are + * numbered from 1, not 0. + */ + if (ucc_set_qe_mux_mii_mng(id - 1) < 0) { + pr_debug("fsl-pq-mdio: invalid UCC ID in node %s\n", + np->full_name); + continue; + } + + pr_debug("fsl-pq-mdio: setting node UCC%u to MII master\n", id); + found_mii_master = true; + } +} + +#endif + +static const struct of_device_id fsl_pq_mdio_match[] = { +#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) + { + .compatible = "fsl,gianfar-tbi", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = 0, + .get_tbipa = get_gfar_tbipa, + }, + }, + { + .compatible = "fsl,gianfar-mdio", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = 0, + .get_tbipa = get_gfar_tbipa, + }, + }, + { + .type = "mdio", + .compatible = "gianfar", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = offsetof(struct fsl_pq_mdio, mii), + .get_tbipa = get_gfar_tbipa, + }, + }, + { + .compatible = "fsl,etsec2-tbi", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = offsetof(struct fsl_pq_mdio, mii), + .get_tbipa = get_etsec_tbipa, + }, + }, + { + .compatible = "fsl,etsec2-mdio", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = offsetof(struct fsl_pq_mdio, mii), + .get_tbipa = get_etsec_tbipa, + }, + }, +#endif +#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) + { + .compatible = "fsl,ucc-mdio", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = 0, + .get_tbipa = get_ucc_tbipa, + .ucc_configure = ucc_configure, + }, + }, + { + /* Legacy UCC MDIO node */ + .type = "mdio", + .compatible = "ucc_geth_phy", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = 0, + .get_tbipa = get_ucc_tbipa, + .ucc_configure = ucc_configure, + }, + }, +#endif + /* No Kconfig option for Fman support yet */ + { + .compatible = "fsl,fman-mdio", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = 0, + /* Fman TBI operations are handled elsewhere */ + }, + }, + + {}, +}; +MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match); + +static int fsl_pq_mdio_probe(struct platform_device *pdev) +{ + const struct of_device_id *id = + of_match_device(fsl_pq_mdio_match, &pdev->dev); + const struct fsl_pq_mdio_data *data = id->data; + struct device_node *np = pdev->dev.of_node; + struct resource res; + struct device_node *tbi; + struct fsl_pq_mdio_priv *priv; + struct mii_bus *new_bus; + int err; + + dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible); + + new_bus = mdiobus_alloc_size(sizeof(*priv)); + if (!new_bus) + return -ENOMEM; + + priv = new_bus->priv; + new_bus->name = "Freescale PowerQUICC MII Bus", + new_bus->read = &fsl_pq_mdio_read; + new_bus->write = &fsl_pq_mdio_write; + new_bus->reset = &fsl_pq_mdio_reset; + new_bus->irq = priv->irqs; + + err = of_address_to_resource(np, 0, &res); + if (err < 0) { + dev_err(&pdev->dev, "could not obtain address information\n"); + goto error; + } + + snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s@%llx", np->name, + (unsigned long long)res.start); + + priv->map = of_iomap(np, 0); + if (!priv->map) { + err = -ENOMEM; + goto error; + } + + /* + * Some device tree nodes represent only the MII registers, and + * others represent the MAC and MII registers. The 'mii_offset' field + * contains the offset of the MII registers inside the mapped register + * space. + */ + if (data->mii_offset > resource_size(&res)) { + dev_err(&pdev->dev, "invalid register map\n"); + err = -EINVAL; + goto error; + } + priv->regs = priv->map + data->mii_offset; + + new_bus->parent = &pdev->dev; + platform_set_drvdata(pdev, new_bus); + + if (data->get_tbipa) { + for_each_child_of_node(np, tbi) { + if (strcmp(tbi->type, "tbi-phy") == 0) { + dev_dbg(&pdev->dev, "found TBI PHY node %s\n", + strrchr(tbi->full_name, '/') + 1); + break; + } + } + + if (tbi) { + const u32 *prop = of_get_property(tbi, "reg", NULL); + uint32_t __iomem *tbipa; + + if (!prop) { + dev_err(&pdev->dev, + "missing 'reg' property in node %s\n", + tbi->full_name); + err = -EBUSY; + goto error; + } + + tbipa = data->get_tbipa(priv->map); + + iowrite32be(be32_to_cpup(prop), tbipa); + } + } + + if (data->ucc_configure) + data->ucc_configure(res.start, res.end); + + err = of_mdiobus_register(new_bus, np); + if (err) { + dev_err(&pdev->dev, "cannot register %s as MDIO bus\n", + new_bus->name); + goto error; + } + + return 0; + +error: + if (priv->map) + iounmap(priv->map); + + kfree(new_bus); + + return err; +} + + +static int fsl_pq_mdio_remove(struct platform_device *pdev) +{ + struct device *device = &pdev->dev; + struct mii_bus *bus = dev_get_drvdata(device); + struct fsl_pq_mdio_priv *priv = bus->priv; + + mdiobus_unregister(bus); + + iounmap(priv->map); + mdiobus_free(bus); + + return 0; +} + +static struct platform_driver fsl_pq_mdio_driver = { + .driver = { + .name = "fsl-pq_mdio", + .of_match_table = fsl_pq_mdio_match, + }, + .probe = fsl_pq_mdio_probe, + .remove = fsl_pq_mdio_remove, +}; + +module_platform_driver(fsl_pq_mdio_driver); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c new file mode 100644 index 000000000..4ee080d49 --- /dev/null +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -0,0 +1,3621 @@ +/* drivers/net/ethernet/freescale/gianfar.c + * + * Gianfar Ethernet Driver + * This driver is designed for the non-CPM ethernet controllers + * on the 85xx and 83xx family of integrated processors + * Based on 8260_io/fcc_enet.c + * + * Author: Andy Fleming + * Maintainer: Kumar Gala + * Modifier: Sandeep Gopalpet + * + * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc. + * Copyright 2007 MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Gianfar: AKA Lambda Draconis, "Dragon" + * RA 11 31 24.2 + * Dec +69 19 52 + * V 3.84 + * B-V +1.62 + * + * Theory of operation + * + * The driver is initialized through of_device. Configuration information + * is therefore conveyed through an OF-style device tree. + * + * The Gianfar Ethernet Controller uses a ring of buffer + * descriptors. The beginning is indicated by a register + * pointing to the physical address of the start of the ring. + * The end is determined by a "wrap" bit being set in the + * last descriptor of the ring. + * + * When a packet is received, the RXF bit in the + * IEVENT register is set, triggering an interrupt when the + * corresponding bit in the IMASK register is also set (if + * interrupt coalescing is active, then the interrupt may not + * happen immediately, but will wait until either a set number + * of frames or amount of time have passed). In NAPI, the + * interrupt handler will signal there is work to be done, and + * exit. This method will start at the last known empty + * descriptor, and process every subsequent descriptor until there + * are none left with data (NAPI will stop after a set number of + * packets to give time to other tasks, but will eventually + * process all the packets). The data arrives inside a + * pre-allocated skb, and so after the skb is passed up to the + * stack, a new skb must be allocated, and the address field in + * the buffer descriptor must be updated to indicate this new + * skb. + * + * When the kernel requests that a packet be transmitted, the + * driver starts where it left off last time, and points the + * descriptor at the buffer which was passed in. The driver + * then informs the DMA engine that there are packets ready to + * be transmitted. Once the controller is finished transmitting + * the packet, an interrupt may be triggered (under the same + * conditions as for reception, but depending on the TXF bit). + * The driver then cleans up the buffer. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define DEBUG + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#ifdef CONFIG_PPC +#include +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gianfar.h" + +#define TX_TIMEOUT (1*HZ) + +const char gfar_driver_version[] = "1.3"; + +static int gfar_enet_open(struct net_device *dev); +static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); +static void gfar_reset_task(struct work_struct *work); +static void gfar_timeout(struct net_device *dev); +static int gfar_close(struct net_device *dev); +static struct sk_buff *gfar_new_skb(struct net_device *dev, + dma_addr_t *bufaddr); +static int gfar_set_mac_address(struct net_device *dev); +static int gfar_change_mtu(struct net_device *dev, int new_mtu); +static irqreturn_t gfar_error(int irq, void *dev_id); +static irqreturn_t gfar_transmit(int irq, void *dev_id); +static irqreturn_t gfar_interrupt(int irq, void *dev_id); +static void adjust_link(struct net_device *dev); +static noinline void gfar_update_link_state(struct gfar_private *priv); +static int init_phy(struct net_device *dev); +static int gfar_probe(struct platform_device *ofdev); +static int gfar_remove(struct platform_device *ofdev); +static void free_skb_resources(struct gfar_private *priv); +static void gfar_set_multi(struct net_device *dev); +static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); +static void gfar_configure_serdes(struct net_device *dev); +static int gfar_poll_rx(struct napi_struct *napi, int budget); +static int gfar_poll_tx(struct napi_struct *napi, int budget); +static int gfar_poll_rx_sq(struct napi_struct *napi, int budget); +static int gfar_poll_tx_sq(struct napi_struct *napi, int budget); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void gfar_netpoll(struct net_device *dev); +#endif +int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); +static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); +static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, + int amount_pull, struct napi_struct *napi); +static void gfar_halt_nodisable(struct gfar_private *priv); +static void gfar_clear_exact_match(struct net_device *dev); +static void gfar_set_mac_for_addr(struct net_device *dev, int num, + const u8 *addr); +static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); + +MODULE_AUTHOR("Freescale Semiconductor, Inc"); +MODULE_DESCRIPTION("Gianfar Ethernet Driver"); +MODULE_LICENSE("GPL"); + +static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, + dma_addr_t buf) +{ + u32 lstatus; + + bdp->bufPtr = cpu_to_be32(buf); + + lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); + if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) + lstatus |= BD_LFLAG(RXBD_WRAP); + + gfar_wmb(); + + bdp->lstatus = cpu_to_be32(lstatus); +} + +static int gfar_init_bds(struct net_device *ndev) +{ + struct gfar_private *priv = netdev_priv(ndev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; + struct txbd8 *txbdp; + struct rxbd8 *rxbdp; + u32 __iomem *rfbptr; + int i, j; + dma_addr_t bufaddr; + + for (i = 0; i < priv->num_tx_queues; i++) { + tx_queue = priv->tx_queue[i]; + /* Initialize some variables in our dev structure */ + tx_queue->num_txbdfree = tx_queue->tx_ring_size; + tx_queue->dirty_tx = tx_queue->tx_bd_base; + tx_queue->cur_tx = tx_queue->tx_bd_base; + tx_queue->skb_curtx = 0; + tx_queue->skb_dirtytx = 0; + + /* Initialize Transmit Descriptor Ring */ + txbdp = tx_queue->tx_bd_base; + for (j = 0; j < tx_queue->tx_ring_size; j++) { + txbdp->lstatus = 0; + txbdp->bufPtr = 0; + txbdp++; + } + + /* Set the last descriptor in the ring to indicate wrap */ + txbdp--; + txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) | + TXBD_WRAP); + } + + rfbptr = ®s->rfbptr0; + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + rx_queue->cur_rx = rx_queue->rx_bd_base; + rx_queue->skb_currx = 0; + rxbdp = rx_queue->rx_bd_base; + + for (j = 0; j < rx_queue->rx_ring_size; j++) { + struct sk_buff *skb = rx_queue->rx_skbuff[j]; + + if (skb) { + bufaddr = be32_to_cpu(rxbdp->bufPtr); + } else { + skb = gfar_new_skb(ndev, &bufaddr); + if (!skb) { + netdev_err(ndev, "Can't allocate RX buffers\n"); + return -ENOMEM; + } + rx_queue->rx_skbuff[j] = skb; + } + + gfar_init_rxbdp(rx_queue, rxbdp, bufaddr); + rxbdp++; + } + + rx_queue->rfbptr = rfbptr; + rfbptr += 2; + } + + return 0; +} + +static int gfar_alloc_skb_resources(struct net_device *ndev) +{ + void *vaddr; + dma_addr_t addr; + int i, j, k; + struct gfar_private *priv = netdev_priv(ndev); + struct device *dev = priv->dev; + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; + + priv->total_tx_ring_size = 0; + for (i = 0; i < priv->num_tx_queues; i++) + priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; + + priv->total_rx_ring_size = 0; + for (i = 0; i < priv->num_rx_queues; i++) + priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; + + /* Allocate memory for the buffer descriptors */ + vaddr = dma_alloc_coherent(dev, + (priv->total_tx_ring_size * + sizeof(struct txbd8)) + + (priv->total_rx_ring_size * + sizeof(struct rxbd8)), + &addr, GFP_KERNEL); + if (!vaddr) + return -ENOMEM; + + for (i = 0; i < priv->num_tx_queues; i++) { + tx_queue = priv->tx_queue[i]; + tx_queue->tx_bd_base = vaddr; + tx_queue->tx_bd_dma_base = addr; + tx_queue->dev = ndev; + /* enet DMA only understands physical addresses */ + addr += sizeof(struct txbd8) * tx_queue->tx_ring_size; + vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size; + } + + /* Start the rx descriptor ring where the tx ring leaves off */ + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + rx_queue->rx_bd_base = vaddr; + rx_queue->rx_bd_dma_base = addr; + rx_queue->dev = ndev; + addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; + vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; + } + + /* Setup the skbuff rings */ + for (i = 0; i < priv->num_tx_queues; i++) { + tx_queue = priv->tx_queue[i]; + tx_queue->tx_skbuff = + kmalloc_array(tx_queue->tx_ring_size, + sizeof(*tx_queue->tx_skbuff), + GFP_KERNEL); + if (!tx_queue->tx_skbuff) + goto cleanup; + + for (k = 0; k < tx_queue->tx_ring_size; k++) + tx_queue->tx_skbuff[k] = NULL; + } + + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + rx_queue->rx_skbuff = + kmalloc_array(rx_queue->rx_ring_size, + sizeof(*rx_queue->rx_skbuff), + GFP_KERNEL); + if (!rx_queue->rx_skbuff) + goto cleanup; + + for (j = 0; j < rx_queue->rx_ring_size; j++) + rx_queue->rx_skbuff[j] = NULL; + } + + if (gfar_init_bds(ndev)) + goto cleanup; + + return 0; + +cleanup: + free_skb_resources(priv); + return -ENOMEM; +} + +static void gfar_init_tx_rx_base(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 __iomem *baddr; + int i; + + baddr = ®s->tbase0; + for (i = 0; i < priv->num_tx_queues; i++) { + gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); + baddr += 2; + } + + baddr = ®s->rbase0; + for (i = 0; i < priv->num_rx_queues; i++) { + gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); + baddr += 2; + } +} + +static void gfar_init_rqprm(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 __iomem *baddr; + int i; + + baddr = ®s->rqprm0; + for (i = 0; i < priv->num_rx_queues; i++) { + gfar_write(baddr, priv->rx_queue[i]->rx_ring_size | + (DEFAULT_RX_LFC_THR << FBTHR_SHIFT)); + baddr++; + } +} + +static void gfar_rx_buff_size_config(struct gfar_private *priv) +{ + int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN; + + /* set this when rx hw offload (TOE) functions are being used */ + priv->uses_rxfcb = 0; + + if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) + priv->uses_rxfcb = 1; + + if (priv->hwts_rx_en) + priv->uses_rxfcb = 1; + + if (priv->uses_rxfcb) + frame_size += GMAC_FCB_LEN; + + frame_size += priv->padding; + + frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + + INCREMENTAL_BUFFER_SIZE; + + priv->rx_buffer_size = frame_size; +} + +static void gfar_mac_rx_config(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 rctrl = 0; + + if (priv->rx_filer_enable) { + rctrl |= RCTRL_FILREN; + /* Program the RIR0 reg with the required distribution */ + if (priv->poll_mode == GFAR_SQ_POLLING) + gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); + else /* GFAR_MQ_POLLING */ + gfar_write(®s->rir0, DEFAULT_8RXQ_RIR0); + } + + /* Restore PROMISC mode */ + if (priv->ndev->flags & IFF_PROMISC) + rctrl |= RCTRL_PROM; + + if (priv->ndev->features & NETIF_F_RXCSUM) + rctrl |= RCTRL_CHECKSUMMING; + + if (priv->extended_hash) + rctrl |= RCTRL_EXTHASH | RCTRL_EMEN; + + if (priv->padding) { + rctrl &= ~RCTRL_PAL_MASK; + rctrl |= RCTRL_PADDING(priv->padding); + } + + /* Enable HW time stamping if requested from user space */ + if (priv->hwts_rx_en) + rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; + + if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) + rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; + + /* Clear the LFC bit */ + gfar_write(®s->rctrl, rctrl); + /* Init flow control threshold values */ + gfar_init_rqprm(priv); + gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL); + rctrl |= RCTRL_LFC; + + /* Init rctrl based on our settings */ + gfar_write(®s->rctrl, rctrl); +} + +static void gfar_mac_tx_config(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tctrl = 0; + + if (priv->ndev->features & NETIF_F_IP_CSUM) + tctrl |= TCTRL_INIT_CSUM; + + if (priv->prio_sched_en) + tctrl |= TCTRL_TXSCHED_PRIO; + else { + tctrl |= TCTRL_TXSCHED_WRRS; + gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT); + gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT); + } + + if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) + tctrl |= TCTRL_VLINS; + + gfar_write(®s->tctrl, tctrl); +} + +static void gfar_configure_coalescing(struct gfar_private *priv, + unsigned long tx_mask, unsigned long rx_mask) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 __iomem *baddr; + + if (priv->mode == MQ_MG_MODE) { + int i = 0; + + baddr = ®s->txic0; + for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { + gfar_write(baddr + i, 0); + if (likely(priv->tx_queue[i]->txcoalescing)) + gfar_write(baddr + i, priv->tx_queue[i]->txic); + } + + baddr = ®s->rxic0; + for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { + gfar_write(baddr + i, 0); + if (likely(priv->rx_queue[i]->rxcoalescing)) + gfar_write(baddr + i, priv->rx_queue[i]->rxic); + } + } else { + /* Backward compatible case -- even if we enable + * multiple queues, there's only single reg to program + */ + gfar_write(®s->txic, 0); + if (likely(priv->tx_queue[0]->txcoalescing)) + gfar_write(®s->txic, priv->tx_queue[0]->txic); + + gfar_write(®s->rxic, 0); + if (unlikely(priv->rx_queue[0]->rxcoalescing)) + gfar_write(®s->rxic, priv->rx_queue[0]->rxic); + } +} + +void gfar_configure_coalescing_all(struct gfar_private *priv) +{ + gfar_configure_coalescing(priv, 0xFF, 0xFF); +} + +static struct net_device_stats *gfar_get_stats(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; + unsigned long tx_packets = 0, tx_bytes = 0; + int i; + + for (i = 0; i < priv->num_rx_queues; i++) { + rx_packets += priv->rx_queue[i]->stats.rx_packets; + rx_bytes += priv->rx_queue[i]->stats.rx_bytes; + rx_dropped += priv->rx_queue[i]->stats.rx_dropped; + } + + dev->stats.rx_packets = rx_packets; + dev->stats.rx_bytes = rx_bytes; + dev->stats.rx_dropped = rx_dropped; + + for (i = 0; i < priv->num_tx_queues; i++) { + tx_bytes += priv->tx_queue[i]->stats.tx_bytes; + tx_packets += priv->tx_queue[i]->stats.tx_packets; + } + + dev->stats.tx_bytes = tx_bytes; + dev->stats.tx_packets = tx_packets; + + return &dev->stats; +} + +static const struct net_device_ops gfar_netdev_ops = { + .ndo_open = gfar_enet_open, + .ndo_start_xmit = gfar_start_xmit, + .ndo_stop = gfar_close, + .ndo_change_mtu = gfar_change_mtu, + .ndo_set_features = gfar_set_features, + .ndo_set_rx_mode = gfar_set_multi, + .ndo_tx_timeout = gfar_timeout, + .ndo_do_ioctl = gfar_ioctl, + .ndo_get_stats = gfar_get_stats, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = gfar_netpoll, +#endif +}; + +static void gfar_ints_disable(struct gfar_private *priv) +{ + int i; + for (i = 0; i < priv->num_grps; i++) { + struct gfar __iomem *regs = priv->gfargrp[i].regs; + /* Clear IEVENT */ + gfar_write(®s->ievent, IEVENT_INIT_CLEAR); + + /* Initialize IMASK */ + gfar_write(®s->imask, IMASK_INIT_CLEAR); + } +} + +static void gfar_ints_enable(struct gfar_private *priv) +{ + int i; + for (i = 0; i < priv->num_grps; i++) { + struct gfar __iomem *regs = priv->gfargrp[i].regs; + /* Unmask the interrupts we look for */ + gfar_write(®s->imask, IMASK_DEFAULT); + } +} + +static void lock_tx_qs(struct gfar_private *priv) +{ + int i; + + for (i = 0; i < priv->num_tx_queues; i++) + spin_lock(&priv->tx_queue[i]->txlock); +} + +static void unlock_tx_qs(struct gfar_private *priv) +{ + int i; + + for (i = 0; i < priv->num_tx_queues; i++) + spin_unlock(&priv->tx_queue[i]->txlock); +} + +static int gfar_alloc_tx_queues(struct gfar_private *priv) +{ + int i; + + for (i = 0; i < priv->num_tx_queues; i++) { + priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q), + GFP_KERNEL); + if (!priv->tx_queue[i]) + return -ENOMEM; + + priv->tx_queue[i]->tx_skbuff = NULL; + priv->tx_queue[i]->qindex = i; + priv->tx_queue[i]->dev = priv->ndev; + spin_lock_init(&(priv->tx_queue[i]->txlock)); + } + return 0; +} + +static int gfar_alloc_rx_queues(struct gfar_private *priv) +{ + int i; + + for (i = 0; i < priv->num_rx_queues; i++) { + priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q), + GFP_KERNEL); + if (!priv->rx_queue[i]) + return -ENOMEM; + + priv->rx_queue[i]->rx_skbuff = NULL; + priv->rx_queue[i]->qindex = i; + priv->rx_queue[i]->dev = priv->ndev; + } + return 0; +} + +static void gfar_free_tx_queues(struct gfar_private *priv) +{ + int i; + + for (i = 0; i < priv->num_tx_queues; i++) + kfree(priv->tx_queue[i]); +} + +static void gfar_free_rx_queues(struct gfar_private *priv) +{ + int i; + + for (i = 0; i < priv->num_rx_queues; i++) + kfree(priv->rx_queue[i]); +} + +static void unmap_group_regs(struct gfar_private *priv) +{ + int i; + + for (i = 0; i < MAXGROUPS; i++) + if (priv->gfargrp[i].regs) + iounmap(priv->gfargrp[i].regs); +} + +static void free_gfar_dev(struct gfar_private *priv) +{ + int i, j; + + for (i = 0; i < priv->num_grps; i++) + for (j = 0; j < GFAR_NUM_IRQS; j++) { + kfree(priv->gfargrp[i].irqinfo[j]); + priv->gfargrp[i].irqinfo[j] = NULL; + } + + free_netdev(priv->ndev); +} + +static void disable_napi(struct gfar_private *priv) +{ + int i; + + for (i = 0; i < priv->num_grps; i++) { + napi_disable(&priv->gfargrp[i].napi_rx); + napi_disable(&priv->gfargrp[i].napi_tx); + } +} + +static void enable_napi(struct gfar_private *priv) +{ + int i; + + for (i = 0; i < priv->num_grps; i++) { + napi_enable(&priv->gfargrp[i].napi_rx); + napi_enable(&priv->gfargrp[i].napi_tx); + } +} + +static int gfar_parse_group(struct device_node *np, + struct gfar_private *priv, const char *model) +{ + struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; + int i; + + for (i = 0; i < GFAR_NUM_IRQS; i++) { + grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo), + GFP_KERNEL); + if (!grp->irqinfo[i]) + return -ENOMEM; + } + + grp->regs = of_iomap(np, 0); + if (!grp->regs) + return -ENOMEM; + + gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0); + + /* If we aren't the FEC we have multiple interrupts */ + if (model && strcasecmp(model, "FEC")) { + gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); + gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); + if (gfar_irq(grp, TX)->irq == NO_IRQ || + gfar_irq(grp, RX)->irq == NO_IRQ || + gfar_irq(grp, ER)->irq == NO_IRQ) + return -EINVAL; + } + + grp->priv = priv; + spin_lock_init(&grp->grplock); + if (priv->mode == MQ_MG_MODE) { + u32 rxq_mask, txq_mask; + int ret; + + grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); + grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); + + ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask); + if (!ret) { + grp->rx_bit_map = rxq_mask ? + rxq_mask : (DEFAULT_MAPPING >> priv->num_grps); + } + + ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask); + if (!ret) { + grp->tx_bit_map = txq_mask ? + txq_mask : (DEFAULT_MAPPING >> priv->num_grps); + } + + if (priv->poll_mode == GFAR_SQ_POLLING) { + /* One Q per interrupt group: Q0 to G0, Q1 to G1 */ + grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); + grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); + } + } else { + grp->rx_bit_map = 0xFF; + grp->tx_bit_map = 0xFF; + } + + /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses + * right to left, so we need to revert the 8 bits to get the q index + */ + grp->rx_bit_map = bitrev8(grp->rx_bit_map); + grp->tx_bit_map = bitrev8(grp->tx_bit_map); + + /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, + * also assign queues to groups + */ + for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { + if (!grp->rx_queue) + grp->rx_queue = priv->rx_queue[i]; + grp->num_rx_queues++; + grp->rstat |= (RSTAT_CLEAR_RHALT >> i); + priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i); + priv->rx_queue[i]->grp = grp; + } + + for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { + if (!grp->tx_queue) + grp->tx_queue = priv->tx_queue[i]; + grp->num_tx_queues++; + grp->tstat |= (TSTAT_CLEAR_THALT >> i); + priv->tqueue |= (TQUEUE_EN0 >> i); + priv->tx_queue[i]->grp = grp; + } + + priv->num_grps++; + + return 0; +} + +static int gfar_of_group_count(struct device_node *np) +{ + struct device_node *child; + int num = 0; + + for_each_available_child_of_node(np, child) + if (!of_node_cmp(child->name, "queue-group")) + num++; + + return num; +} + +static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) +{ + const char *model; + const char *ctype; + const void *mac_addr; + int err = 0, i; + struct net_device *dev = NULL; + struct gfar_private *priv = NULL; + struct device_node *np = ofdev->dev.of_node; + struct device_node *child = NULL; + struct property *stash; + u32 stash_len = 0; + u32 stash_idx = 0; + unsigned int num_tx_qs, num_rx_qs; + unsigned short mode, poll_mode; + + if (!np) + return -ENODEV; + + if (of_device_is_compatible(np, "fsl,etsec2")) { + mode = MQ_MG_MODE; + poll_mode = GFAR_SQ_POLLING; + } else { + mode = SQ_SG_MODE; + poll_mode = GFAR_SQ_POLLING; + } + + if (mode == SQ_SG_MODE) { + num_tx_qs = 1; + num_rx_qs = 1; + } else { /* MQ_MG_MODE */ + /* get the actual number of supported groups */ + unsigned int num_grps = gfar_of_group_count(np); + + if (num_grps == 0 || num_grps > MAXGROUPS) { + dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", + num_grps); + pr_err("Cannot do alloc_etherdev, aborting\n"); + return -EINVAL; + } + + if (poll_mode == GFAR_SQ_POLLING) { + num_tx_qs = num_grps; /* one txq per int group */ + num_rx_qs = num_grps; /* one rxq per int group */ + } else { /* GFAR_MQ_POLLING */ + u32 tx_queues, rx_queues; + int ret; + + /* parse the num of HW tx and rx queues */ + ret = of_property_read_u32(np, "fsl,num_tx_queues", + &tx_queues); + num_tx_qs = ret ? 1 : tx_queues; + + ret = of_property_read_u32(np, "fsl,num_rx_queues", + &rx_queues); + num_rx_qs = ret ? 1 : rx_queues; + } + } + + if (num_tx_qs > MAX_TX_QS) { + pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", + num_tx_qs, MAX_TX_QS); + pr_err("Cannot do alloc_etherdev, aborting\n"); + return -EINVAL; + } + + if (num_rx_qs > MAX_RX_QS) { + pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", + num_rx_qs, MAX_RX_QS); + pr_err("Cannot do alloc_etherdev, aborting\n"); + return -EINVAL; + } + + *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); + dev = *pdev; + if (NULL == dev) + return -ENOMEM; + + priv = netdev_priv(dev); + priv->ndev = dev; + + priv->mode = mode; + priv->poll_mode = poll_mode; + + priv->num_tx_queues = num_tx_qs; + netif_set_real_num_rx_queues(dev, num_rx_qs); + priv->num_rx_queues = num_rx_qs; + + err = gfar_alloc_tx_queues(priv); + if (err) + goto tx_alloc_failed; + + err = gfar_alloc_rx_queues(priv); + if (err) + goto rx_alloc_failed; + + err = of_property_read_string(np, "model", &model); + if (err) { + pr_err("Device model property missing, aborting\n"); + goto rx_alloc_failed; + } + + /* Init Rx queue filer rule set linked list */ + INIT_LIST_HEAD(&priv->rx_list.list); + priv->rx_list.count = 0; + mutex_init(&priv->rx_queue_access); + + for (i = 0; i < MAXGROUPS; i++) + priv->gfargrp[i].regs = NULL; + + /* Parse and initialize group specific information */ + if (priv->mode == MQ_MG_MODE) { + for_each_available_child_of_node(np, child) { + if (of_node_cmp(child->name, "queue-group")) + continue; + + err = gfar_parse_group(child, priv, model); + if (err) + goto err_grp_init; + } + } else { /* SQ_SG_MODE */ + err = gfar_parse_group(np, priv, model); + if (err) + goto err_grp_init; + } + + stash = of_find_property(np, "bd-stash", NULL); + + if (stash) { + priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; + priv->bd_stash_en = 1; + } + + err = of_property_read_u32(np, "rx-stash-len", &stash_len); + + if (err == 0) + priv->rx_stash_size = stash_len; + + err = of_property_read_u32(np, "rx-stash-idx", &stash_idx); + + if (err == 0) + priv->rx_stash_index = stash_idx; + + if (stash_len || stash_idx) + priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; + + mac_addr = of_get_mac_address(np); + + if (mac_addr) + memcpy(dev->dev_addr, mac_addr, ETH_ALEN); + + if (model && !strcasecmp(model, "TSEC")) + priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | + FSL_GIANFAR_DEV_HAS_COALESCE | + FSL_GIANFAR_DEV_HAS_RMON | + FSL_GIANFAR_DEV_HAS_MULTI_INTR; + + if (model && !strcasecmp(model, "eTSEC")) + priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | + FSL_GIANFAR_DEV_HAS_COALESCE | + FSL_GIANFAR_DEV_HAS_RMON | + FSL_GIANFAR_DEV_HAS_MULTI_INTR | + FSL_GIANFAR_DEV_HAS_CSUM | + FSL_GIANFAR_DEV_HAS_VLAN | + FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | + FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | + FSL_GIANFAR_DEV_HAS_TIMER; + + err = of_property_read_string(np, "phy-connection-type", &ctype); + + /* We only care about rgmii-id. The rest are autodetected */ + if (err == 0 && !strcmp(ctype, "rgmii-id")) + priv->interface = PHY_INTERFACE_MODE_RGMII_ID; + else + priv->interface = PHY_INTERFACE_MODE_MII; + + if (of_find_property(np, "fsl,magic-packet", NULL)) + priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; + + priv->phy_node = of_parse_phandle(np, "phy-handle", 0); + + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (!priv->phy_node && of_phy_is_fixed_link(np)) { + err = of_phy_register_fixed_link(np); + if (err) + goto err_grp_init; + + priv->phy_node = of_node_get(np); + } + + /* Find the TBI PHY. If it's not there, we don't support SGMII */ + priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); + + return 0; + +err_grp_init: + unmap_group_regs(priv); +rx_alloc_failed: + gfar_free_rx_queues(priv); +tx_alloc_failed: + gfar_free_tx_queues(priv); + free_gfar_dev(priv); + return err; +} + +static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) +{ + struct hwtstamp_config config; + struct gfar_private *priv = netdev_priv(netdev); + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + priv->hwts_tx_en = 0; + break; + case HWTSTAMP_TX_ON: + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) + return -ERANGE; + priv->hwts_tx_en = 1; + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + if (priv->hwts_rx_en) { + priv->hwts_rx_en = 0; + reset_gfar(netdev); + } + break; + default: + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) + return -ERANGE; + if (!priv->hwts_rx_en) { + priv->hwts_rx_en = 1; + reset_gfar(netdev); + } + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + } + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) +{ + struct hwtstamp_config config; + struct gfar_private *priv = netdev_priv(netdev); + + config.flags = 0; + config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + config.rx_filter = (priv->hwts_rx_en ? + HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct gfar_private *priv = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + if (cmd == SIOCSHWTSTAMP) + return gfar_hwtstamp_set(dev, rq); + if (cmd == SIOCGHWTSTAMP) + return gfar_hwtstamp_get(dev, rq); + + if (!priv->phydev) + return -ENODEV; + + return phy_mii_ioctl(priv->phydev, rq, cmd); +} + +static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, + u32 class) +{ + u32 rqfpr = FPR_FILER_MASK; + u32 rqfcr = 0x0; + + rqfar--; + rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; + priv->ftp_rqfpr[rqfar] = rqfpr; + priv->ftp_rqfcr[rqfar] = rqfcr; + gfar_write_filer(priv, rqfar, rqfcr, rqfpr); + + rqfar--; + rqfcr = RQFCR_CMP_NOMATCH; + priv->ftp_rqfpr[rqfar] = rqfpr; + priv->ftp_rqfcr[rqfar] = rqfcr; + gfar_write_filer(priv, rqfar, rqfcr, rqfpr); + + rqfar--; + rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; + rqfpr = class; + priv->ftp_rqfcr[rqfar] = rqfcr; + priv->ftp_rqfpr[rqfar] = rqfpr; + gfar_write_filer(priv, rqfar, rqfcr, rqfpr); + + rqfar--; + rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; + rqfpr = class; + priv->ftp_rqfcr[rqfar] = rqfcr; + priv->ftp_rqfpr[rqfar] = rqfpr; + gfar_write_filer(priv, rqfar, rqfcr, rqfpr); + + return rqfar; +} + +static void gfar_init_filer_table(struct gfar_private *priv) +{ + int i = 0x0; + u32 rqfar = MAX_FILER_IDX; + u32 rqfcr = 0x0; + u32 rqfpr = FPR_FILER_MASK; + + /* Default rule */ + rqfcr = RQFCR_CMP_MATCH; + priv->ftp_rqfcr[rqfar] = rqfcr; + priv->ftp_rqfpr[rqfar] = rqfpr; + gfar_write_filer(priv, rqfar, rqfcr, rqfpr); + + rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); + rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); + rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); + rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); + rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); + rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); + + /* cur_filer_idx indicated the first non-masked rule */ + priv->cur_filer_idx = rqfar; + + /* Rest are masked rules */ + rqfcr = RQFCR_CMP_NOMATCH; + for (i = 0; i < rqfar; i++) { + priv->ftp_rqfcr[i] = rqfcr; + priv->ftp_rqfpr[i] = rqfpr; + gfar_write_filer(priv, i, rqfcr, rqfpr); + } +} + +#ifdef CONFIG_PPC +static void __gfar_detect_errata_83xx(struct gfar_private *priv) +{ + unsigned int pvr = mfspr(SPRN_PVR); + unsigned int svr = mfspr(SPRN_SVR); + unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */ + unsigned int rev = svr & 0xffff; + + /* MPC8313 Rev 2.0 and higher; All MPC837x */ + if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) || + (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) + priv->errata |= GFAR_ERRATA_74; + + /* MPC8313 and MPC837x all rev */ + if ((pvr == 0x80850010 && mod == 0x80b0) || + (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) + priv->errata |= GFAR_ERRATA_76; + + /* MPC8313 Rev < 2.0 */ + if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) + priv->errata |= GFAR_ERRATA_12; +} + +static void __gfar_detect_errata_85xx(struct gfar_private *priv) +{ + unsigned int svr = mfspr(SPRN_SVR); + + if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) + priv->errata |= GFAR_ERRATA_12; + if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || + ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20))) + priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ +} +#endif + +static void gfar_detect_errata(struct gfar_private *priv) +{ + struct device *dev = &priv->ofdev->dev; + + /* no plans to fix */ + priv->errata |= GFAR_ERRATA_A002; + +#ifdef CONFIG_PPC + if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2)) + __gfar_detect_errata_85xx(priv); + else /* non-mpc85xx parts, i.e. e300 core based */ + __gfar_detect_errata_83xx(priv); +#endif + + if (priv->errata) + dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", + priv->errata); +} + +void gfar_mac_reset(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; + + /* Reset MAC layer */ + gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET); + + /* We need to delay at least 3 TX clocks */ + udelay(3); + + /* the soft reset bit is not self-resetting, so we need to + * clear it before resuming normal operation + */ + gfar_write(®s->maccfg1, 0); + + udelay(3); + + /* Compute rx_buff_size based on config flags */ + gfar_rx_buff_size_config(priv); + + /* Initialize the max receive frame/buffer lengths */ + gfar_write(®s->maxfrm, priv->rx_buffer_size); + gfar_write(®s->mrblr, priv->rx_buffer_size); + + /* Initialize the Minimum Frame Length Register */ + gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); + + /* Initialize MACCFG2. */ + tempval = MACCFG2_INIT_SETTINGS; + + /* If the mtu is larger than the max size for standard + * ethernet frames (ie, a jumbo frame), then set maccfg2 + * to allow huge frames, and to check the length + */ + if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || + gfar_has_errata(priv, GFAR_ERRATA_74)) + tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; + + gfar_write(®s->maccfg2, tempval); + + /* Clear mac addr hash registers */ + gfar_write(®s->igaddr0, 0); + gfar_write(®s->igaddr1, 0); + gfar_write(®s->igaddr2, 0); + gfar_write(®s->igaddr3, 0); + gfar_write(®s->igaddr4, 0); + gfar_write(®s->igaddr5, 0); + gfar_write(®s->igaddr6, 0); + gfar_write(®s->igaddr7, 0); + + gfar_write(®s->gaddr0, 0); + gfar_write(®s->gaddr1, 0); + gfar_write(®s->gaddr2, 0); + gfar_write(®s->gaddr3, 0); + gfar_write(®s->gaddr4, 0); + gfar_write(®s->gaddr5, 0); + gfar_write(®s->gaddr6, 0); + gfar_write(®s->gaddr7, 0); + + if (priv->extended_hash) + gfar_clear_exact_match(priv->ndev); + + gfar_mac_rx_config(priv); + + gfar_mac_tx_config(priv); + + gfar_set_mac_address(priv->ndev); + + gfar_set_multi(priv->ndev); + + /* clear ievent and imask before configuring coalescing */ + gfar_ints_disable(priv); + + /* Configure the coalescing support */ + gfar_configure_coalescing_all(priv); +} + +static void gfar_hw_init(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 attrs; + + /* Stop the DMA engine now, in case it was running before + * (The firmware could have used it, and left it running). + */ + gfar_halt(priv); + + gfar_mac_reset(priv); + + /* Zero out the rmon mib registers if it has them */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { + memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib)); + + /* Mask off the CAM interrupts */ + gfar_write(®s->rmon.cam1, 0xffffffff); + gfar_write(®s->rmon.cam2, 0xffffffff); + } + + /* Initialize ECNTRL */ + gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS); + + /* Set the extraction length and index */ + attrs = ATTRELI_EL(priv->rx_stash_size) | + ATTRELI_EI(priv->rx_stash_index); + + gfar_write(®s->attreli, attrs); + + /* Start with defaults, and add stashing + * depending on driver parameters + */ + attrs = ATTR_INIT_SETTINGS; + + if (priv->bd_stash_en) + attrs |= ATTR_BDSTASH; + + if (priv->rx_stash_size != 0) + attrs |= ATTR_BUFSTASH; + + gfar_write(®s->attr, attrs); + + /* FIFO configs */ + gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR); + gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE); + gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF); + + /* Program the interrupt steering regs, only for MG devices */ + if (priv->num_grps > 1) + gfar_write_isrg(priv); +} + +static void gfar_init_addr_hash_table(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { + priv->extended_hash = 1; + priv->hash_width = 9; + + priv->hash_regs[0] = ®s->igaddr0; + priv->hash_regs[1] = ®s->igaddr1; + priv->hash_regs[2] = ®s->igaddr2; + priv->hash_regs[3] = ®s->igaddr3; + priv->hash_regs[4] = ®s->igaddr4; + priv->hash_regs[5] = ®s->igaddr5; + priv->hash_regs[6] = ®s->igaddr6; + priv->hash_regs[7] = ®s->igaddr7; + priv->hash_regs[8] = ®s->gaddr0; + priv->hash_regs[9] = ®s->gaddr1; + priv->hash_regs[10] = ®s->gaddr2; + priv->hash_regs[11] = ®s->gaddr3; + priv->hash_regs[12] = ®s->gaddr4; + priv->hash_regs[13] = ®s->gaddr5; + priv->hash_regs[14] = ®s->gaddr6; + priv->hash_regs[15] = ®s->gaddr7; + + } else { + priv->extended_hash = 0; + priv->hash_width = 8; + + priv->hash_regs[0] = ®s->gaddr0; + priv->hash_regs[1] = ®s->gaddr1; + priv->hash_regs[2] = ®s->gaddr2; + priv->hash_regs[3] = ®s->gaddr3; + priv->hash_regs[4] = ®s->gaddr4; + priv->hash_regs[5] = ®s->gaddr5; + priv->hash_regs[6] = ®s->gaddr6; + priv->hash_regs[7] = ®s->gaddr7; + } +} + +/* Set up the ethernet device structure, private data, + * and anything else we need before we start + */ +static int gfar_probe(struct platform_device *ofdev) +{ + struct net_device *dev = NULL; + struct gfar_private *priv = NULL; + int err = 0, i; + + err = gfar_of_init(ofdev, &dev); + + if (err) + return err; + + priv = netdev_priv(dev); + priv->ndev = dev; + priv->ofdev = ofdev; + priv->dev = &ofdev->dev; + SET_NETDEV_DEV(dev, &ofdev->dev); + + spin_lock_init(&priv->bflock); + INIT_WORK(&priv->reset_task, gfar_reset_task); + + platform_set_drvdata(ofdev, priv); + + gfar_detect_errata(priv); + + /* Set the dev->base_addr to the gfar reg region */ + dev->base_addr = (unsigned long) priv->gfargrp[0].regs; + + /* Fill in the dev structure */ + dev->watchdog_timeo = TX_TIMEOUT; + dev->mtu = 1500; + dev->netdev_ops = &gfar_netdev_ops; + dev->ethtool_ops = &gfar_ethtool_ops; + + /* Register for napi ...We are registering NAPI for each grp */ + for (i = 0; i < priv->num_grps; i++) { + if (priv->poll_mode == GFAR_SQ_POLLING) { + netif_napi_add(dev, &priv->gfargrp[i].napi_rx, + gfar_poll_rx_sq, GFAR_DEV_WEIGHT); + netif_napi_add(dev, &priv->gfargrp[i].napi_tx, + gfar_poll_tx_sq, 2); + } else { + netif_napi_add(dev, &priv->gfargrp[i].napi_rx, + gfar_poll_rx, GFAR_DEV_WEIGHT); + netif_napi_add(dev, &priv->gfargrp[i].napi_tx, + gfar_poll_tx, 2); + } + } + + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_RXCSUM; + dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_RXCSUM | NETIF_F_HIGHDMA; + } + + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + dev->features |= NETIF_F_HW_VLAN_CTAG_RX; + } + + gfar_init_addr_hash_table(priv); + + /* Insert receive time stamps into padding alignment bytes */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) + priv->padding = 8; + + if (dev->features & NETIF_F_IP_CSUM || + priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) + dev->needed_headroom = GMAC_FCB_LEN; + + priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; + + /* Initializing some of the rx/tx queue level parameters */ + for (i = 0; i < priv->num_tx_queues; i++) { + priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; + priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; + priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; + priv->tx_queue[i]->txic = DEFAULT_TXIC; + } + + for (i = 0; i < priv->num_rx_queues; i++) { + priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; + priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; + priv->rx_queue[i]->rxic = DEFAULT_RXIC; + } + + /* always enable rx filer */ + priv->rx_filer_enable = 1; + /* Enable most messages by default */ + priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; + /* use pritority h/w tx queue scheduling for single queue devices */ + if (priv->num_tx_queues == 1) + priv->prio_sched_en = 1; + + set_bit(GFAR_DOWN, &priv->state); + + gfar_hw_init(priv); + + /* Carrier starts down, phylib will bring it up */ + netif_carrier_off(dev); + + err = register_netdev(dev); + + if (err) { + pr_err("%s: Cannot register net device, aborting\n", dev->name); + goto register_fail; + } + + device_init_wakeup(&dev->dev, + priv->device_flags & + FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + + /* fill out IRQ number and name fields */ + for (i = 0; i < priv->num_grps; i++) { + struct gfar_priv_grp *grp = &priv->gfargrp[i]; + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { + sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s", + dev->name, "_g", '0' + i, "_tx"); + sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s", + dev->name, "_g", '0' + i, "_rx"); + sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s", + dev->name, "_g", '0' + i, "_er"); + } else + strcpy(gfar_irq(grp, TX)->name, dev->name); + } + + /* Initialize the filer table */ + gfar_init_filer_table(priv); + + /* Print out the device info */ + netdev_info(dev, "mac: %pM\n", dev->dev_addr); + + /* Even more device info helps when determining which kernel + * provided which set of benchmarks. + */ + netdev_info(dev, "Running with NAPI enabled\n"); + for (i = 0; i < priv->num_rx_queues; i++) + netdev_info(dev, "RX BD ring size for Q[%d]: %d\n", + i, priv->rx_queue[i]->rx_ring_size); + for (i = 0; i < priv->num_tx_queues; i++) + netdev_info(dev, "TX BD ring size for Q[%d]: %d\n", + i, priv->tx_queue[i]->tx_ring_size); + + return 0; + +register_fail: + unmap_group_regs(priv); + gfar_free_rx_queues(priv); + gfar_free_tx_queues(priv); + of_node_put(priv->phy_node); + of_node_put(priv->tbi_node); + free_gfar_dev(priv); + return err; +} + +static int gfar_remove(struct platform_device *ofdev) +{ + struct gfar_private *priv = platform_get_drvdata(ofdev); + + of_node_put(priv->phy_node); + of_node_put(priv->tbi_node); + + unregister_netdev(priv->ndev); + unmap_group_regs(priv); + gfar_free_rx_queues(priv); + gfar_free_tx_queues(priv); + free_gfar_dev(priv); + + return 0; +} + +#ifdef CONFIG_PM + +static int gfar_suspend(struct device *dev) +{ + struct gfar_private *priv = dev_get_drvdata(dev); + struct net_device *ndev = priv->ndev; + struct gfar __iomem *regs = priv->gfargrp[0].regs; + unsigned long flags; + u32 tempval; + + int magic_packet = priv->wol_en && + (priv->device_flags & + FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + + netif_device_detach(ndev); + + if (netif_running(ndev)) { + + local_irq_save(flags); + lock_tx_qs(priv); + + gfar_halt_nodisable(priv); + + /* Disable Tx, and Rx if wake-on-LAN is disabled. */ + tempval = gfar_read(®s->maccfg1); + + tempval &= ~MACCFG1_TX_EN; + + if (!magic_packet) + tempval &= ~MACCFG1_RX_EN; + + gfar_write(®s->maccfg1, tempval); + + unlock_tx_qs(priv); + local_irq_restore(flags); + + disable_napi(priv); + + if (magic_packet) { + /* Enable interrupt on Magic Packet */ + gfar_write(®s->imask, IMASK_MAG); + + /* Enable Magic Packet mode */ + tempval = gfar_read(®s->maccfg2); + tempval |= MACCFG2_MPEN; + gfar_write(®s->maccfg2, tempval); + } else { + phy_stop(priv->phydev); + } + } + + return 0; +} + +static int gfar_resume(struct device *dev) +{ + struct gfar_private *priv = dev_get_drvdata(dev); + struct net_device *ndev = priv->ndev; + struct gfar __iomem *regs = priv->gfargrp[0].regs; + unsigned long flags; + u32 tempval; + int magic_packet = priv->wol_en && + (priv->device_flags & + FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + + if (!netif_running(ndev)) { + netif_device_attach(ndev); + return 0; + } + + if (!magic_packet && priv->phydev) + phy_start(priv->phydev); + + /* Disable Magic Packet mode, in case something + * else woke us up. + */ + local_irq_save(flags); + lock_tx_qs(priv); + + tempval = gfar_read(®s->maccfg2); + tempval &= ~MACCFG2_MPEN; + gfar_write(®s->maccfg2, tempval); + + gfar_start(priv); + + unlock_tx_qs(priv); + local_irq_restore(flags); + + netif_device_attach(ndev); + + enable_napi(priv); + + return 0; +} + +static int gfar_restore(struct device *dev) +{ + struct gfar_private *priv = dev_get_drvdata(dev); + struct net_device *ndev = priv->ndev; + + if (!netif_running(ndev)) { + netif_device_attach(ndev); + + return 0; + } + + if (gfar_init_bds(ndev)) { + free_skb_resources(priv); + return -ENOMEM; + } + + gfar_mac_reset(priv); + + gfar_init_tx_rx_base(priv); + + gfar_start(priv); + + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + + if (priv->phydev) + phy_start(priv->phydev); + + netif_device_attach(ndev); + enable_napi(priv); + + return 0; +} + +static struct dev_pm_ops gfar_pm_ops = { + .suspend = gfar_suspend, + .resume = gfar_resume, + .freeze = gfar_suspend, + .thaw = gfar_resume, + .restore = gfar_restore, +}; + +#define GFAR_PM_OPS (&gfar_pm_ops) + +#else + +#define GFAR_PM_OPS NULL + +#endif + +/* Reads the controller's registers to determine what interface + * connects it to the PHY. + */ +static phy_interface_t gfar_get_interface(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 ecntrl; + + ecntrl = gfar_read(®s->ecntrl); + + if (ecntrl & ECNTRL_SGMII_MODE) + return PHY_INTERFACE_MODE_SGMII; + + if (ecntrl & ECNTRL_TBI_MODE) { + if (ecntrl & ECNTRL_REDUCED_MODE) + return PHY_INTERFACE_MODE_RTBI; + else + return PHY_INTERFACE_MODE_TBI; + } + + if (ecntrl & ECNTRL_REDUCED_MODE) { + if (ecntrl & ECNTRL_REDUCED_MII_MODE) { + return PHY_INTERFACE_MODE_RMII; + } + else { + phy_interface_t interface = priv->interface; + + /* This isn't autodetected right now, so it must + * be set by the device tree or platform code. + */ + if (interface == PHY_INTERFACE_MODE_RGMII_ID) + return PHY_INTERFACE_MODE_RGMII_ID; + + return PHY_INTERFACE_MODE_RGMII; + } + } + + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) + return PHY_INTERFACE_MODE_GMII; + + return PHY_INTERFACE_MODE_MII; +} + + +/* Initializes driver's PHY state, and attaches to the PHY. + * Returns 0 on success. + */ +static int init_phy(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + uint gigabit_support = + priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? + GFAR_SUPPORTED_GBIT : 0; + phy_interface_t interface; + + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + + interface = gfar_get_interface(dev); + + priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, + interface); + if (!priv->phydev) { + dev_err(&dev->dev, "could not attach to PHY\n"); + return -ENODEV; + } + + if (interface == PHY_INTERFACE_MODE_SGMII) + gfar_configure_serdes(dev); + + /* Remove any features not supported by the controller */ + priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); + priv->phydev->advertising = priv->phydev->supported; + + /* Add support for flow control, but don't advertise it by default */ + priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); + + return 0; +} + +/* Initialize TBI PHY interface for communicating with the + * SERDES lynx PHY on the chip. We communicate with this PHY + * through the MDIO bus on each controller, treating it as a + * "normal" PHY at the address found in the TBIPA register. We assume + * that the TBIPA register is valid. Either the MDIO bus code will set + * it to a value that doesn't conflict with other PHYs on the bus, or the + * value doesn't matter, as there are no other PHYs on the bus. + */ +static void gfar_configure_serdes(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct phy_device *tbiphy; + + if (!priv->tbi_node) { + dev_warn(&dev->dev, "error: SGMII mode requires that the " + "device tree specify a tbi-handle\n"); + return; + } + + tbiphy = of_phy_find_device(priv->tbi_node); + if (!tbiphy) { + dev_err(&dev->dev, "error: Could not get TBI device\n"); + return; + } + + /* If the link is already up, we must already be ok, and don't need to + * configure and reset the TBI<->SerDes link. Maybe U-Boot configured + * everything for us? Resetting it takes the link down and requires + * several seconds for it to come back. + */ + if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) + return; + + /* Single clk mode, mii mode off(for serdes communication) */ + phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); + + phy_write(tbiphy, MII_ADVERTISE, + ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | + ADVERTISE_1000XPSE_ASYM); + + phy_write(tbiphy, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | + BMCR_SPEED1000); +} + +static int __gfar_is_rx_idle(struct gfar_private *priv) +{ + u32 res; + + /* Normaly TSEC should not hang on GRS commands, so we should + * actually wait for IEVENT_GRSC flag. + */ + if (!gfar_has_errata(priv, GFAR_ERRATA_A002)) + return 0; + + /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are + * the same as bits 23-30, the eTSEC Rx is assumed to be idle + * and the Rx can be safely reset. + */ + res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c); + res &= 0x7f807f80; + if ((res & 0xffff) == (res >> 16)) + return 1; + + return 0; +} + +/* Halt the receive and transmit queues */ +static void gfar_halt_nodisable(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; + unsigned int timeout; + int stopped; + + gfar_ints_disable(priv); + + if (gfar_is_dma_stopped(priv)) + return; + + /* Stop the DMA, and wait for it to stop */ + tempval = gfar_read(®s->dmactrl); + tempval |= (DMACTRL_GRS | DMACTRL_GTS); + gfar_write(®s->dmactrl, tempval); + +retry: + timeout = 1000; + while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) { + cpu_relax(); + timeout--; + } + + if (!timeout) + stopped = gfar_is_dma_stopped(priv); + + if (!stopped && !gfar_is_rx_dma_stopped(priv) && + !__gfar_is_rx_idle(priv)) + goto retry; +} + +/* Halt the receive and transmit queues */ +void gfar_halt(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; + + /* Dissable the Rx/Tx hw queues */ + gfar_write(®s->rqueue, 0); + gfar_write(®s->tqueue, 0); + + mdelay(10); + + gfar_halt_nodisable(priv); + + /* Disable Rx/Tx DMA */ + tempval = gfar_read(®s->maccfg1); + tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); + gfar_write(®s->maccfg1, tempval); +} + +void stop_gfar(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + + netif_tx_stop_all_queues(dev); + + smp_mb__before_atomic(); + set_bit(GFAR_DOWN, &priv->state); + smp_mb__after_atomic(); + + disable_napi(priv); + + /* disable ints and gracefully shut down Rx/Tx DMA */ + gfar_halt(priv); + + phy_stop(priv->phydev); + + free_skb_resources(priv); +} + +static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) +{ + struct txbd8 *txbdp; + struct gfar_private *priv = netdev_priv(tx_queue->dev); + int i, j; + + txbdp = tx_queue->tx_bd_base; + + for (i = 0; i < tx_queue->tx_ring_size; i++) { + if (!tx_queue->tx_skbuff[i]) + continue; + + dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr), + be16_to_cpu(txbdp->length), DMA_TO_DEVICE); + txbdp->lstatus = 0; + for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; + j++) { + txbdp++; + dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr), + be16_to_cpu(txbdp->length), + DMA_TO_DEVICE); + } + txbdp++; + dev_kfree_skb_any(tx_queue->tx_skbuff[i]); + tx_queue->tx_skbuff[i] = NULL; + } + kfree(tx_queue->tx_skbuff); + tx_queue->tx_skbuff = NULL; +} + +static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) +{ + struct rxbd8 *rxbdp; + struct gfar_private *priv = netdev_priv(rx_queue->dev); + int i; + + rxbdp = rx_queue->rx_bd_base; + + for (i = 0; i < rx_queue->rx_ring_size; i++) { + if (rx_queue->rx_skbuff[i]) { + dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr), + priv->rx_buffer_size, + DMA_FROM_DEVICE); + dev_kfree_skb_any(rx_queue->rx_skbuff[i]); + rx_queue->rx_skbuff[i] = NULL; + } + rxbdp->lstatus = 0; + rxbdp->bufPtr = 0; + rxbdp++; + } + kfree(rx_queue->rx_skbuff); + rx_queue->rx_skbuff = NULL; +} + +/* If there are any tx skbs or rx skbs still around, free them. + * Then free tx_skbuff and rx_skbuff + */ +static void free_skb_resources(struct gfar_private *priv) +{ + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; + int i; + + /* Go through all the buffer descriptors and free their data buffers */ + for (i = 0; i < priv->num_tx_queues; i++) { + struct netdev_queue *txq; + + tx_queue = priv->tx_queue[i]; + txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex); + if (tx_queue->tx_skbuff) + free_skb_tx_queue(tx_queue); + netdev_tx_reset_queue(txq); + } + + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + if (rx_queue->rx_skbuff) + free_skb_rx_queue(rx_queue); + } + + dma_free_coherent(priv->dev, + sizeof(struct txbd8) * priv->total_tx_ring_size + + sizeof(struct rxbd8) * priv->total_rx_ring_size, + priv->tx_queue[0]->tx_bd_base, + priv->tx_queue[0]->tx_bd_dma_base); +} + +void gfar_start(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; + int i = 0; + + /* Enable Rx/Tx hw queues */ + gfar_write(®s->rqueue, priv->rqueue); + gfar_write(®s->tqueue, priv->tqueue); + + /* Initialize DMACTRL to have WWR and WOP */ + tempval = gfar_read(®s->dmactrl); + tempval |= DMACTRL_INIT_SETTINGS; + gfar_write(®s->dmactrl, tempval); + + /* Make sure we aren't stopped */ + tempval = gfar_read(®s->dmactrl); + tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); + gfar_write(®s->dmactrl, tempval); + + for (i = 0; i < priv->num_grps; i++) { + regs = priv->gfargrp[i].regs; + /* Clear THLT/RHLT, so that the DMA starts polling now */ + gfar_write(®s->tstat, priv->gfargrp[i].tstat); + gfar_write(®s->rstat, priv->gfargrp[i].rstat); + } + + /* Enable Rx/Tx DMA */ + tempval = gfar_read(®s->maccfg1); + tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); + gfar_write(®s->maccfg1, tempval); + + gfar_ints_enable(priv); + + priv->ndev->trans_start = jiffies; /* prevent tx timeout */ +} + +static void free_grp_irqs(struct gfar_priv_grp *grp) +{ + free_irq(gfar_irq(grp, TX)->irq, grp); + free_irq(gfar_irq(grp, RX)->irq, grp); + free_irq(gfar_irq(grp, ER)->irq, grp); +} + +static int register_grp_irqs(struct gfar_priv_grp *grp) +{ + struct gfar_private *priv = grp->priv; + struct net_device *dev = priv->ndev; + int err; + + /* If the device has multiple interrupts, register for + * them. Otherwise, only register for the one + */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { + /* Install our interrupt handlers for Error, + * Transmit, and Receive + */ + err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, + gfar_irq(grp, ER)->name, grp); + if (err < 0) { + netif_err(priv, intr, dev, "Can't get IRQ %d\n", + gfar_irq(grp, ER)->irq); + + goto err_irq_fail; + } + err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, + gfar_irq(grp, TX)->name, grp); + if (err < 0) { + netif_err(priv, intr, dev, "Can't get IRQ %d\n", + gfar_irq(grp, TX)->irq); + goto tx_irq_fail; + } + err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0, + gfar_irq(grp, RX)->name, grp); + if (err < 0) { + netif_err(priv, intr, dev, "Can't get IRQ %d\n", + gfar_irq(grp, RX)->irq); + goto rx_irq_fail; + } + } else { + err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, + gfar_irq(grp, TX)->name, grp); + if (err < 0) { + netif_err(priv, intr, dev, "Can't get IRQ %d\n", + gfar_irq(grp, TX)->irq); + goto err_irq_fail; + } + } + + return 0; + +rx_irq_fail: + free_irq(gfar_irq(grp, TX)->irq, grp); +tx_irq_fail: + free_irq(gfar_irq(grp, ER)->irq, grp); +err_irq_fail: + return err; + +} + +static void gfar_free_irq(struct gfar_private *priv) +{ + int i; + + /* Free the IRQs */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { + for (i = 0; i < priv->num_grps; i++) + free_grp_irqs(&priv->gfargrp[i]); + } else { + for (i = 0; i < priv->num_grps; i++) + free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq, + &priv->gfargrp[i]); + } +} + +static int gfar_request_irq(struct gfar_private *priv) +{ + int err, i, j; + + for (i = 0; i < priv->num_grps; i++) { + err = register_grp_irqs(&priv->gfargrp[i]); + if (err) { + for (j = 0; j < i; j++) + free_grp_irqs(&priv->gfargrp[j]); + return err; + } + } + + return 0; +} + +/* Bring the controller up and running */ +int startup_gfar(struct net_device *ndev) +{ + struct gfar_private *priv = netdev_priv(ndev); + int err; + + gfar_mac_reset(priv); + + err = gfar_alloc_skb_resources(ndev); + if (err) + return err; + + gfar_init_tx_rx_base(priv); + + smp_mb__before_atomic(); + clear_bit(GFAR_DOWN, &priv->state); + smp_mb__after_atomic(); + + /* Start Rx/Tx DMA and enable the interrupts */ + gfar_start(priv); + + phy_start(priv->phydev); + + enable_napi(priv); + + netif_tx_wake_all_queues(ndev); + + return 0; +} + +/* Called when something needs to use the ethernet device + * Returns 0 for success. + */ +static int gfar_enet_open(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + int err; + + err = init_phy(dev); + if (err) + return err; + + err = gfar_request_irq(priv); + if (err) + return err; + + err = startup_gfar(dev); + if (err) + return err; + + device_set_wakeup_enable(&dev->dev, priv->wol_en); + + return err; +} + +static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) +{ + struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); + + memset(fcb, 0, GMAC_FCB_LEN); + + return fcb; +} + +static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, + int fcb_length) +{ + /* If we're here, it's a IP packet with a TCP or UDP + * payload. We set it to checksum, using a pseudo-header + * we provide + */ + u8 flags = TXFCB_DEFAULT; + + /* Tell the controller what the protocol is + * And provide the already calculated phcs + */ + if (ip_hdr(skb)->protocol == IPPROTO_UDP) { + flags |= TXFCB_UDP; + fcb->phcs = (__force __be16)(udp_hdr(skb)->check); + } else + fcb->phcs = (__force __be16)(tcp_hdr(skb)->check); + + /* l3os is the distance between the start of the + * frame (skb->data) and the start of the IP hdr. + * l4os is the distance between the start of the + * l3 hdr and the l4 hdr + */ + fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length); + fcb->l4os = skb_network_header_len(skb); + + fcb->flags = flags; +} + +void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) +{ + fcb->flags |= TXFCB_VLN; + fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb)); +} + +static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, + struct txbd8 *base, int ring_size) +{ + struct txbd8 *new_bd = bdp + stride; + + return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; +} + +static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, + int ring_size) +{ + return skip_txbd(bdp, 1, base, ring_size); +} + +/* eTSEC12: csum generation not supported for some fcb offsets */ +static inline bool gfar_csum_errata_12(struct gfar_private *priv, + unsigned long fcb_addr) +{ + return (gfar_has_errata(priv, GFAR_ERRATA_12) && + (fcb_addr % 0x20) > 0x18); +} + +/* eTSEC76: csum generation for frames larger than 2500 may + * cause excess delays before start of transmission + */ +static inline bool gfar_csum_errata_76(struct gfar_private *priv, + unsigned int len) +{ + return (gfar_has_errata(priv, GFAR_ERRATA_76) && + (len > 2500)); +} + +/* This is called by the kernel when a frame is ready for transmission. + * It is pointed to by the dev->hard_start_xmit function pointer + */ +static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar_priv_tx_q *tx_queue = NULL; + struct netdev_queue *txq; + struct gfar __iomem *regs = NULL; + struct txfcb *fcb = NULL; + struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; + u32 lstatus; + int i, rq = 0; + int do_tstamp, do_csum, do_vlan; + u32 bufaddr; + unsigned long flags; + unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0; + + rq = skb->queue_mapping; + tx_queue = priv->tx_queue[rq]; + txq = netdev_get_tx_queue(dev, rq); + base = tx_queue->tx_bd_base; + regs = tx_queue->grp->regs; + + do_csum = (CHECKSUM_PARTIAL == skb->ip_summed); + do_vlan = skb_vlan_tag_present(skb); + do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en; + + if (do_csum || do_vlan) + fcb_len = GMAC_FCB_LEN; + + /* check if time stamp should be generated */ + if (unlikely(do_tstamp)) + fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN; + + /* make space for additional header when fcb is needed */ + if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) { + struct sk_buff *skb_new; + + skb_new = skb_realloc_headroom(skb, fcb_len); + if (!skb_new) { + dev->stats.tx_errors++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->sk) + skb_set_owner_w(skb_new, skb->sk); + dev_consume_skb_any(skb); + skb = skb_new; + } + + /* total number of fragments in the SKB */ + nr_frags = skb_shinfo(skb)->nr_frags; + + /* calculate the required number of TxBDs for this skb */ + if (unlikely(do_tstamp)) + nr_txbds = nr_frags + 2; + else + nr_txbds = nr_frags + 1; + + /* check if there is space to queue this packet */ + if (nr_txbds > tx_queue->num_txbdfree) { + /* no space, stop the queue */ + netif_tx_stop_queue(txq); + dev->stats.tx_fifo_errors++; + return NETDEV_TX_BUSY; + } + + /* Update transmit stats */ + bytes_sent = skb->len; + tx_queue->stats.tx_bytes += bytes_sent; + /* keep Tx bytes on wire for BQL accounting */ + GFAR_CB(skb)->bytes_sent = bytes_sent; + tx_queue->stats.tx_packets++; + + txbdp = txbdp_start = tx_queue->cur_tx; + lstatus = be32_to_cpu(txbdp->lstatus); + + /* Time stamp insertion requires one additional TxBD */ + if (unlikely(do_tstamp)) + txbdp_tstamp = txbdp = next_txbd(txbdp, base, + tx_queue->tx_ring_size); + + if (nr_frags == 0) { + if (unlikely(do_tstamp)) { + u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); + + lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); + txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); + } else { + lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); + } + } else { + /* Place the fragment addresses and lengths into the TxBDs */ + for (i = 0; i < nr_frags; i++) { + unsigned int frag_len; + /* Point at the next BD, wrapping as needed */ + txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); + + frag_len = skb_shinfo(skb)->frags[i].size; + + lstatus = be32_to_cpu(txbdp->lstatus) | frag_len | + BD_LFLAG(TXBD_READY); + + /* Handle the last BD specially */ + if (i == nr_frags - 1) + lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); + + bufaddr = skb_frag_dma_map(priv->dev, + &skb_shinfo(skb)->frags[i], + 0, + frag_len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, bufaddr))) + goto dma_map_err; + + /* set the TxBD length and buffer pointer */ + txbdp->bufPtr = cpu_to_be32(bufaddr); + txbdp->lstatus = cpu_to_be32(lstatus); + } + + lstatus = be32_to_cpu(txbdp_start->lstatus); + } + + /* Add TxPAL between FCB and frame if required */ + if (unlikely(do_tstamp)) { + skb_push(skb, GMAC_TXPAL_LEN); + memset(skb->data, 0, GMAC_TXPAL_LEN); + } + + /* Add TxFCB if required */ + if (fcb_len) { + fcb = gfar_add_fcb(skb); + lstatus |= BD_LFLAG(TXBD_TOE); + } + + /* Set up checksumming */ + if (do_csum) { + gfar_tx_checksum(skb, fcb, fcb_len); + + if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) || + unlikely(gfar_csum_errata_76(priv, skb->len))) { + __skb_pull(skb, GMAC_FCB_LEN); + skb_checksum_help(skb); + if (do_vlan || do_tstamp) { + /* put back a new fcb for vlan/tstamp TOE */ + fcb = gfar_add_fcb(skb); + } else { + /* Tx TOE not used */ + lstatus &= ~(BD_LFLAG(TXBD_TOE)); + fcb = NULL; + } + } + } + + if (do_vlan) + gfar_tx_vlan(skb, fcb); + + /* Setup tx hardware time stamping if requested */ + if (unlikely(do_tstamp)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + fcb->ptp = 1; + } + + bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb), + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, bufaddr))) + goto dma_map_err; + + txbdp_start->bufPtr = cpu_to_be32(bufaddr); + + /* If time stamping is requested one additional TxBD must be set up. The + * first TxBD points to the FCB and must have a data length of + * GMAC_FCB_LEN. The second TxBD points to the actual frame data with + * the full frame length. + */ + if (unlikely(do_tstamp)) { + u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); + + bufaddr = be32_to_cpu(txbdp_start->bufPtr); + bufaddr += fcb_len; + lstatus_ts |= BD_LFLAG(TXBD_READY) | + (skb_headlen(skb) - fcb_len); + + txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr); + txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); + lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; + } else { + lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); + } + + netdev_tx_sent_queue(txq, bytes_sent); + + /* We can work in parallel with gfar_clean_tx_ring(), except + * when modifying num_txbdfree. Note that we didn't grab the lock + * when we were reading the num_txbdfree and checking for available + * space, that's because outside of this function it can only grow, + * and once we've got needed space, it cannot suddenly disappear. + * + * The lock also protects us from gfar_error(), which can modify + * regs->tstat and thus retrigger the transfers, which is why we + * also must grab the lock before setting ready bit for the first + * to be transmitted BD. + */ + spin_lock_irqsave(&tx_queue->txlock, flags); + + gfar_wmb(); + + txbdp_start->lstatus = cpu_to_be32(lstatus); + + gfar_wmb(); /* force lstatus write before tx_skbuff */ + + tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; + + /* Update the current skb pointer to the next entry we will use + * (wrapping if necessary) + */ + tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & + TX_RING_MOD_MASK(tx_queue->tx_ring_size); + + tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); + + /* reduce TxBD free count */ + tx_queue->num_txbdfree -= (nr_txbds); + + /* If the next BD still needs to be cleaned up, then the bds + * are full. We need to tell the kernel to stop sending us stuff. + */ + if (!tx_queue->num_txbdfree) { + netif_tx_stop_queue(txq); + + dev->stats.tx_fifo_errors++; + } + + /* Tell the DMA to go go go */ + gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); + + /* Unlock priv */ + spin_unlock_irqrestore(&tx_queue->txlock, flags); + + return NETDEV_TX_OK; + +dma_map_err: + txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size); + if (do_tstamp) + txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); + for (i = 0; i < nr_frags; i++) { + lstatus = be32_to_cpu(txbdp->lstatus); + if (!(lstatus & BD_LFLAG(TXBD_READY))) + break; + + lstatus &= ~BD_LFLAG(TXBD_READY); + txbdp->lstatus = cpu_to_be32(lstatus); + bufaddr = be32_to_cpu(txbdp->bufPtr); + dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length), + DMA_TO_DEVICE); + txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); + } + gfar_wmb(); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/* Stops the kernel queue, and halts the controller */ +static int gfar_close(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + + cancel_work_sync(&priv->reset_task); + stop_gfar(dev); + + /* Disconnect from the PHY */ + phy_disconnect(priv->phydev); + priv->phydev = NULL; + + gfar_free_irq(priv); + + return 0; +} + +/* Changes the mac address if the controller is not running. */ +static int gfar_set_mac_address(struct net_device *dev) +{ + gfar_set_mac_for_addr(dev, 0, dev->dev_addr); + + return 0; +} + +static int gfar_change_mtu(struct net_device *dev, int new_mtu) +{ + struct gfar_private *priv = netdev_priv(dev); + int frame_size = new_mtu + ETH_HLEN; + + if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { + netif_err(priv, drv, dev, "Invalid MTU setting\n"); + return -EINVAL; + } + + while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) + cpu_relax(); + + if (dev->flags & IFF_UP) + stop_gfar(dev); + + dev->mtu = new_mtu; + + if (dev->flags & IFF_UP) + startup_gfar(dev); + + clear_bit_unlock(GFAR_RESETTING, &priv->state); + + return 0; +} + +void reset_gfar(struct net_device *ndev) +{ + struct gfar_private *priv = netdev_priv(ndev); + + while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) + cpu_relax(); + + stop_gfar(ndev); + startup_gfar(ndev); + + clear_bit_unlock(GFAR_RESETTING, &priv->state); +} + +/* gfar_reset_task gets scheduled when a packet has not been + * transmitted after a set amount of time. + * For now, assume that clearing out all the structures, and + * starting over will fix the problem. + */ +static void gfar_reset_task(struct work_struct *work) +{ + struct gfar_private *priv = container_of(work, struct gfar_private, + reset_task); + reset_gfar(priv->ndev); +} + +static void gfar_timeout(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + + dev->stats.tx_errors++; + schedule_work(&priv->reset_task); +} + +static void gfar_align_skb(struct sk_buff *skb) +{ + /* We need the data buffer to be aligned properly. We will reserve + * as many bytes as needed to align the data properly + */ + skb_reserve(skb, RXBUF_ALIGNMENT - + (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); +} + +/* Interrupt Handler for Transmit complete */ +static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) +{ + struct net_device *dev = tx_queue->dev; + struct netdev_queue *txq; + struct gfar_private *priv = netdev_priv(dev); + struct txbd8 *bdp, *next = NULL; + struct txbd8 *lbdp = NULL; + struct txbd8 *base = tx_queue->tx_bd_base; + struct sk_buff *skb; + int skb_dirtytx; + int tx_ring_size = tx_queue->tx_ring_size; + int frags = 0, nr_txbds = 0; + int i; + int howmany = 0; + int tqi = tx_queue->qindex; + unsigned int bytes_sent = 0; + u32 lstatus; + size_t buflen; + + txq = netdev_get_tx_queue(dev, tqi); + bdp = tx_queue->dirty_tx; + skb_dirtytx = tx_queue->skb_dirtytx; + + while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { + unsigned long flags; + + frags = skb_shinfo(skb)->nr_frags; + + /* When time stamping, one additional TxBD must be freed. + * Also, we need to dma_unmap_single() the TxPAL. + */ + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) + nr_txbds = frags + 2; + else + nr_txbds = frags + 1; + + lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); + + lstatus = be32_to_cpu(lbdp->lstatus); + + /* Only clean completed frames */ + if ((lstatus & BD_LFLAG(TXBD_READY)) && + (lstatus & BD_LENGTH_MASK)) + break; + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { + next = next_txbd(bdp, base, tx_ring_size); + buflen = be16_to_cpu(next->length) + + GMAC_FCB_LEN + GMAC_TXPAL_LEN; + } else + buflen = be16_to_cpu(bdp->length); + + dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), + buflen, DMA_TO_DEVICE); + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { + struct skb_shared_hwtstamps shhwtstamps; + u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); + + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(*ns); + skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); + skb_tstamp_tx(skb, &shhwtstamps); + gfar_clear_txbd_status(bdp); + bdp = next; + } + + gfar_clear_txbd_status(bdp); + bdp = next_txbd(bdp, base, tx_ring_size); + + for (i = 0; i < frags; i++) { + dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr), + be16_to_cpu(bdp->length), + DMA_TO_DEVICE); + gfar_clear_txbd_status(bdp); + bdp = next_txbd(bdp, base, tx_ring_size); + } + + bytes_sent += GFAR_CB(skb)->bytes_sent; + + dev_kfree_skb_any(skb); + + tx_queue->tx_skbuff[skb_dirtytx] = NULL; + + skb_dirtytx = (skb_dirtytx + 1) & + TX_RING_MOD_MASK(tx_ring_size); + + howmany++; + spin_lock_irqsave(&tx_queue->txlock, flags); + tx_queue->num_txbdfree += nr_txbds; + spin_unlock_irqrestore(&tx_queue->txlock, flags); + } + + /* If we freed a buffer, we can restart transmission, if necessary */ + if (tx_queue->num_txbdfree && + netif_tx_queue_stopped(txq) && + !(test_bit(GFAR_DOWN, &priv->state))) + netif_wake_subqueue(priv->ndev, tqi); + + /* Update dirty indicators */ + tx_queue->skb_dirtytx = skb_dirtytx; + tx_queue->dirty_tx = bdp; + + netdev_tx_completed_queue(txq, howmany, bytes_sent); +} + +static struct sk_buff *gfar_alloc_skb(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct sk_buff *skb; + + skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); + if (!skb) + return NULL; + + gfar_align_skb(skb); + + return skb; +} + +static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr) +{ + struct gfar_private *priv = netdev_priv(dev); + struct sk_buff *skb; + dma_addr_t addr; + + skb = gfar_alloc_skb(dev); + if (!skb) + return NULL; + + addr = dma_map_single(priv->dev, skb->data, + priv->rx_buffer_size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, addr))) { + dev_kfree_skb_any(skb); + return NULL; + } + + *bufaddr = addr; + return skb; +} + +static inline void count_errors(unsigned short status, struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct gfar_extra_stats *estats = &priv->extra_stats; + + /* If the packet was truncated, none of the other errors matter */ + if (status & RXBD_TRUNCATED) { + stats->rx_length_errors++; + + atomic64_inc(&estats->rx_trunc); + + return; + } + /* Count the errors, if there were any */ + if (status & (RXBD_LARGE | RXBD_SHORT)) { + stats->rx_length_errors++; + + if (status & RXBD_LARGE) + atomic64_inc(&estats->rx_large); + else + atomic64_inc(&estats->rx_short); + } + if (status & RXBD_NONOCTET) { + stats->rx_frame_errors++; + atomic64_inc(&estats->rx_nonoctet); + } + if (status & RXBD_CRCERR) { + atomic64_inc(&estats->rx_crcerr); + stats->rx_crc_errors++; + } + if (status & RXBD_OVERRUN) { + atomic64_inc(&estats->rx_overrun); + stats->rx_crc_errors++; + } +} + +irqreturn_t gfar_receive(int irq, void *grp_id) +{ + struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; + unsigned long flags; + u32 imask; + + if (likely(napi_schedule_prep(&grp->napi_rx))) { + spin_lock_irqsave(&grp->grplock, flags); + imask = gfar_read(&grp->regs->imask); + imask &= IMASK_RX_DISABLED; + gfar_write(&grp->regs->imask, imask); + spin_unlock_irqrestore(&grp->grplock, flags); + __napi_schedule(&grp->napi_rx); + } else { + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived. + */ + gfar_write(&grp->regs->ievent, IEVENT_RX_MASK); + } + + return IRQ_HANDLED; +} + +/* Interrupt Handler for Transmit complete */ +static irqreturn_t gfar_transmit(int irq, void *grp_id) +{ + struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; + unsigned long flags; + u32 imask; + + if (likely(napi_schedule_prep(&grp->napi_tx))) { + spin_lock_irqsave(&grp->grplock, flags); + imask = gfar_read(&grp->regs->imask); + imask &= IMASK_TX_DISABLED; + gfar_write(&grp->regs->imask, imask); + spin_unlock_irqrestore(&grp->grplock, flags); + __napi_schedule(&grp->napi_tx); + } else { + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived. + */ + gfar_write(&grp->regs->ievent, IEVENT_TX_MASK); + } + + return IRQ_HANDLED; +} + +static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) +{ + /* If valid headers were found, and valid sums + * were verified, then we tell the kernel that no + * checksumming is necessary. Otherwise, it is [FIXME] + */ + if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) == + (RXFCB_CIP | RXFCB_CTU)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); +} + +/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ +static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, + int amount_pull, struct napi_struct *napi) +{ + struct gfar_private *priv = netdev_priv(dev); + struct rxfcb *fcb = NULL; + + /* fcb is at the beginning if exists */ + fcb = (struct rxfcb *)skb->data; + + /* Remove the FCB from the skb + * Remove the padded bytes, if there are any + */ + if (amount_pull) { + skb_record_rx_queue(skb, fcb->rq); + skb_pull(skb, amount_pull); + } + + /* Get receive timestamp from the skb */ + if (priv->hwts_rx_en) { + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); + u64 *ns = (u64 *) skb->data; + + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + shhwtstamps->hwtstamp = ns_to_ktime(*ns); + } + + if (priv->padding) + skb_pull(skb, priv->padding); + + if (dev->features & NETIF_F_RXCSUM) + gfar_rx_checksum(skb, fcb); + + /* Tell the skb what kind of packet this is */ + skb->protocol = eth_type_trans(skb, dev); + + /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. + * Even if vlan rx accel is disabled, on some chips + * RXFCB_VLN is pseudo randomly set. + */ + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && + be16_to_cpu(fcb->flags) & RXFCB_VLN) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + be16_to_cpu(fcb->vlctl)); + + /* Send the packet up the stack */ + napi_gro_receive(napi, skb); + +} + +/* gfar_clean_rx_ring() -- Processes each frame in the rx ring + * until the budget/quota has been reached. Returns the number + * of frames handled + */ +int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) +{ + struct net_device *dev = rx_queue->dev; + struct rxbd8 *bdp, *base; + struct sk_buff *skb; + int pkt_len; + int amount_pull; + int howmany = 0; + struct gfar_private *priv = netdev_priv(dev); + + /* Get the first full descriptor */ + bdp = rx_queue->cur_rx; + base = rx_queue->rx_bd_base; + + amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0; + + while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) { + struct sk_buff *newskb; + dma_addr_t bufaddr; + + rmb(); + + /* Add another skb for the future */ + newskb = gfar_new_skb(dev, &bufaddr); + + skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; + + dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), + priv->rx_buffer_size, DMA_FROM_DEVICE); + + if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) && + be16_to_cpu(bdp->length) > priv->rx_buffer_size)) + bdp->status = cpu_to_be16(RXBD_LARGE); + + /* We drop the frame if we failed to allocate a new buffer */ + if (unlikely(!newskb || + !(be16_to_cpu(bdp->status) & RXBD_LAST) || + be16_to_cpu(bdp->status) & RXBD_ERR)) { + count_errors(be16_to_cpu(bdp->status), dev); + + if (unlikely(!newskb)) { + newskb = skb; + bufaddr = be32_to_cpu(bdp->bufPtr); + } else if (skb) + dev_kfree_skb(skb); + } else { + /* Increment the number of packets */ + rx_queue->stats.rx_packets++; + howmany++; + + if (likely(skb)) { + pkt_len = be16_to_cpu(bdp->length) - + ETH_FCS_LEN; + /* Remove the FCS from the packet length */ + skb_put(skb, pkt_len); + rx_queue->stats.rx_bytes += pkt_len; + skb_record_rx_queue(skb, rx_queue->qindex); + gfar_process_frame(dev, skb, amount_pull, + &rx_queue->grp->napi_rx); + + } else { + netif_warn(priv, rx_err, dev, "Missing skb!\n"); + rx_queue->stats.rx_dropped++; + atomic64_inc(&priv->extra_stats.rx_skbmissing); + } + + } + + rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; + + /* Setup the new bdp */ + gfar_init_rxbdp(rx_queue, bdp, bufaddr); + + /* Update Last Free RxBD pointer for LFC */ + if (unlikely(rx_queue->rfbptr && priv->tx_actual_en)) + gfar_write(rx_queue->rfbptr, (u32)bdp); + + /* Update to the next pointer */ + bdp = next_bd(bdp, base, rx_queue->rx_ring_size); + + /* update to point at the next skb */ + rx_queue->skb_currx = (rx_queue->skb_currx + 1) & + RX_RING_MOD_MASK(rx_queue->rx_ring_size); + } + + /* Update the current rxbd pointer to be the next one */ + rx_queue->cur_rx = bdp; + + return howmany; +} + +static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) +{ + struct gfar_priv_grp *gfargrp = + container_of(napi, struct gfar_priv_grp, napi_rx); + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue; + int work_done = 0; + + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived + */ + gfar_write(®s->ievent, IEVENT_RX_MASK); + + work_done = gfar_clean_rx_ring(rx_queue, budget); + + if (work_done < budget) { + u32 imask; + napi_complete(napi); + /* Clear the halt bit in RSTAT */ + gfar_write(®s->rstat, gfargrp->rstat); + + spin_lock_irq(&gfargrp->grplock); + imask = gfar_read(®s->imask); + imask |= IMASK_RX_DEFAULT; + gfar_write(®s->imask, imask); + spin_unlock_irq(&gfargrp->grplock); + } + + return work_done; +} + +static int gfar_poll_tx_sq(struct napi_struct *napi, int budget) +{ + struct gfar_priv_grp *gfargrp = + container_of(napi, struct gfar_priv_grp, napi_tx); + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; + u32 imask; + + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived + */ + gfar_write(®s->ievent, IEVENT_TX_MASK); + + /* run Tx cleanup to completion */ + if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) + gfar_clean_tx_ring(tx_queue); + + napi_complete(napi); + + spin_lock_irq(&gfargrp->grplock); + imask = gfar_read(®s->imask); + imask |= IMASK_TX_DEFAULT; + gfar_write(®s->imask, imask); + spin_unlock_irq(&gfargrp->grplock); + + return 0; +} + +static int gfar_poll_rx(struct napi_struct *napi, int budget) +{ + struct gfar_priv_grp *gfargrp = + container_of(napi, struct gfar_priv_grp, napi_rx); + struct gfar_private *priv = gfargrp->priv; + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_priv_rx_q *rx_queue = NULL; + int work_done = 0, work_done_per_q = 0; + int i, budget_per_q = 0; + unsigned long rstat_rxf; + int num_act_queues; + + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived + */ + gfar_write(®s->ievent, IEVENT_RX_MASK); + + rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK; + + num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS); + if (num_act_queues) + budget_per_q = budget/num_act_queues; + + for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { + /* skip queue if not active */ + if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) + continue; + + rx_queue = priv->rx_queue[i]; + work_done_per_q = + gfar_clean_rx_ring(rx_queue, budget_per_q); + work_done += work_done_per_q; + + /* finished processing this queue */ + if (work_done_per_q < budget_per_q) { + /* clear active queue hw indication */ + gfar_write(®s->rstat, + RSTAT_CLEAR_RXF0 >> i); + num_act_queues--; + + if (!num_act_queues) + break; + } + } + + if (!num_act_queues) { + u32 imask; + napi_complete(napi); + + /* Clear the halt bit in RSTAT */ + gfar_write(®s->rstat, gfargrp->rstat); + + spin_lock_irq(&gfargrp->grplock); + imask = gfar_read(®s->imask); + imask |= IMASK_RX_DEFAULT; + gfar_write(®s->imask, imask); + spin_unlock_irq(&gfargrp->grplock); + } + + return work_done; +} + +static int gfar_poll_tx(struct napi_struct *napi, int budget) +{ + struct gfar_priv_grp *gfargrp = + container_of(napi, struct gfar_priv_grp, napi_tx); + struct gfar_private *priv = gfargrp->priv; + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_priv_tx_q *tx_queue = NULL; + int has_tx_work = 0; + int i; + + /* Clear IEVENT, so interrupts aren't called again + * because of the packets that have already arrived + */ + gfar_write(®s->ievent, IEVENT_TX_MASK); + + for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { + tx_queue = priv->tx_queue[i]; + /* run Tx cleanup to completion */ + if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { + gfar_clean_tx_ring(tx_queue); + has_tx_work = 1; + } + } + + if (!has_tx_work) { + u32 imask; + napi_complete(napi); + + spin_lock_irq(&gfargrp->grplock); + imask = gfar_read(®s->imask); + imask |= IMASK_TX_DEFAULT; + gfar_write(®s->imask, imask); + spin_unlock_irq(&gfargrp->grplock); + } + + return 0; +} + + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void gfar_netpoll(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + int i; + + /* If the device has multiple interrupts, run tx/rx */ + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { + for (i = 0; i < priv->num_grps; i++) { + struct gfar_priv_grp *grp = &priv->gfargrp[i]; + + disable_irq(gfar_irq(grp, TX)->irq); + disable_irq(gfar_irq(grp, RX)->irq); + disable_irq(gfar_irq(grp, ER)->irq); + gfar_interrupt(gfar_irq(grp, TX)->irq, grp); + enable_irq(gfar_irq(grp, ER)->irq); + enable_irq(gfar_irq(grp, RX)->irq); + enable_irq(gfar_irq(grp, TX)->irq); + } + } else { + for (i = 0; i < priv->num_grps; i++) { + struct gfar_priv_grp *grp = &priv->gfargrp[i]; + + disable_irq(gfar_irq(grp, TX)->irq); + gfar_interrupt(gfar_irq(grp, TX)->irq, grp); + enable_irq(gfar_irq(grp, TX)->irq); + } + } +} +#endif + +/* The interrupt handler for devices with one interrupt */ +static irqreturn_t gfar_interrupt(int irq, void *grp_id) +{ + struct gfar_priv_grp *gfargrp = grp_id; + + /* Save ievent for future reference */ + u32 events = gfar_read(&gfargrp->regs->ievent); + + /* Check for reception */ + if (events & IEVENT_RX_MASK) + gfar_receive(irq, grp_id); + + /* Check for transmit completion */ + if (events & IEVENT_TX_MASK) + gfar_transmit(irq, grp_id); + + /* Check for errors */ + if (events & IEVENT_ERR_MASK) + gfar_error(irq, grp_id); + + return IRQ_HANDLED; +} + +/* Called every time the controller might need to be made + * aware of new link state. The PHY code conveys this + * information through variables in the phydev structure, and this + * function converts those variables into the appropriate + * register values, and can bring down the device if needed. + */ +static void adjust_link(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + + if (unlikely(phydev->link != priv->oldlink || + (phydev->link && (phydev->duplex != priv->oldduplex || + phydev->speed != priv->oldspeed)))) + gfar_update_link_state(priv); +} + +/* Update the hash table based on the current list of multicast + * addresses we subscribe to. Also, change the promiscuity of + * the device based on the flags (this function is called + * whenever dev->flags is changed + */ +static void gfar_set_multi(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; + + if (dev->flags & IFF_PROMISC) { + /* Set RCTRL to PROM */ + tempval = gfar_read(®s->rctrl); + tempval |= RCTRL_PROM; + gfar_write(®s->rctrl, tempval); + } else { + /* Set RCTRL to not PROM */ + tempval = gfar_read(®s->rctrl); + tempval &= ~(RCTRL_PROM); + gfar_write(®s->rctrl, tempval); + } + + if (dev->flags & IFF_ALLMULTI) { + /* Set the hash to rx all multicast frames */ + gfar_write(®s->igaddr0, 0xffffffff); + gfar_write(®s->igaddr1, 0xffffffff); + gfar_write(®s->igaddr2, 0xffffffff); + gfar_write(®s->igaddr3, 0xffffffff); + gfar_write(®s->igaddr4, 0xffffffff); + gfar_write(®s->igaddr5, 0xffffffff); + gfar_write(®s->igaddr6, 0xffffffff); + gfar_write(®s->igaddr7, 0xffffffff); + gfar_write(®s->gaddr0, 0xffffffff); + gfar_write(®s->gaddr1, 0xffffffff); + gfar_write(®s->gaddr2, 0xffffffff); + gfar_write(®s->gaddr3, 0xffffffff); + gfar_write(®s->gaddr4, 0xffffffff); + gfar_write(®s->gaddr5, 0xffffffff); + gfar_write(®s->gaddr6, 0xffffffff); + gfar_write(®s->gaddr7, 0xffffffff); + } else { + int em_num; + int idx; + + /* zero out the hash */ + gfar_write(®s->igaddr0, 0x0); + gfar_write(®s->igaddr1, 0x0); + gfar_write(®s->igaddr2, 0x0); + gfar_write(®s->igaddr3, 0x0); + gfar_write(®s->igaddr4, 0x0); + gfar_write(®s->igaddr5, 0x0); + gfar_write(®s->igaddr6, 0x0); + gfar_write(®s->igaddr7, 0x0); + gfar_write(®s->gaddr0, 0x0); + gfar_write(®s->gaddr1, 0x0); + gfar_write(®s->gaddr2, 0x0); + gfar_write(®s->gaddr3, 0x0); + gfar_write(®s->gaddr4, 0x0); + gfar_write(®s->gaddr5, 0x0); + gfar_write(®s->gaddr6, 0x0); + gfar_write(®s->gaddr7, 0x0); + + /* If we have extended hash tables, we need to + * clear the exact match registers to prepare for + * setting them + */ + if (priv->extended_hash) { + em_num = GFAR_EM_NUM + 1; + gfar_clear_exact_match(dev); + idx = 1; + } else { + idx = 0; + em_num = 0; + } + + if (netdev_mc_empty(dev)) + return; + + /* Parse the list, and set the appropriate bits */ + netdev_for_each_mc_addr(ha, dev) { + if (idx < em_num) { + gfar_set_mac_for_addr(dev, idx, ha->addr); + idx++; + } else + gfar_set_hash_for_addr(dev, ha->addr); + } + } +} + + +/* Clears each of the exact match registers to zero, so they + * don't interfere with normal reception + */ +static void gfar_clear_exact_match(struct net_device *dev) +{ + int idx; + static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; + + for (idx = 1; idx < GFAR_EM_NUM + 1; idx++) + gfar_set_mac_for_addr(dev, idx, zero_arr); +} + +/* Set the appropriate hash bit for the given addr */ +/* The algorithm works like so: + * 1) Take the Destination Address (ie the multicast address), and + * do a CRC on it (little endian), and reverse the bits of the + * result. + * 2) Use the 8 most significant bits as a hash into a 256-entry + * table. The table is controlled through 8 32-bit registers: + * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is + * gaddr7. This means that the 3 most significant bits in the + * hash index which gaddr register to use, and the 5 other bits + * indicate which bit (assuming an IBM numbering scheme, which + * for PowerPC (tm) is usually the case) in the register holds + * the entry. + */ +static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) +{ + u32 tempval; + struct gfar_private *priv = netdev_priv(dev); + u32 result = ether_crc(ETH_ALEN, addr); + int width = priv->hash_width; + u8 whichbit = (result >> (32 - width)) & 0x1f; + u8 whichreg = result >> (32 - width + 5); + u32 value = (1 << (31-whichbit)); + + tempval = gfar_read(priv->hash_regs[whichreg]); + tempval |= value; + gfar_write(priv->hash_regs[whichreg], tempval); +} + + +/* There are multiple MAC Address register pairs on some controllers + * This function sets the numth pair to a given address + */ +static void gfar_set_mac_for_addr(struct net_device *dev, int num, + const u8 *addr) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; + u32 __iomem *macptr = ®s->macstnaddr1; + + macptr += num*2; + + /* For a station address of 0x12345678ABCD in transmission + * order (BE), MACnADDR1 is set to 0xCDAB7856 and + * MACnADDR2 is set to 0x34120000. + */ + tempval = (addr[5] << 24) | (addr[4] << 16) | + (addr[3] << 8) | addr[2]; + + gfar_write(macptr, tempval); + + tempval = (addr[1] << 24) | (addr[0] << 16); + + gfar_write(macptr+1, tempval); +} + +/* GFAR error interrupt handler */ +static irqreturn_t gfar_error(int irq, void *grp_id) +{ + struct gfar_priv_grp *gfargrp = grp_id; + struct gfar __iomem *regs = gfargrp->regs; + struct gfar_private *priv= gfargrp->priv; + struct net_device *dev = priv->ndev; + + /* Save ievent for future reference */ + u32 events = gfar_read(®s->ievent); + + /* Clear IEVENT */ + gfar_write(®s->ievent, events & IEVENT_ERR_MASK); + + /* Magic Packet is not an error. */ + if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && + (events & IEVENT_MAG)) + events &= ~IEVENT_MAG; + + /* Hmm... */ + if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) + netdev_dbg(dev, + "error interrupt (ievent=0x%08x imask=0x%08x)\n", + events, gfar_read(®s->imask)); + + /* Update the error counters */ + if (events & IEVENT_TXE) { + dev->stats.tx_errors++; + + if (events & IEVENT_LC) + dev->stats.tx_window_errors++; + if (events & IEVENT_CRL) + dev->stats.tx_aborted_errors++; + if (events & IEVENT_XFUN) { + unsigned long flags; + + netif_dbg(priv, tx_err, dev, + "TX FIFO underrun, packet dropped\n"); + dev->stats.tx_dropped++; + atomic64_inc(&priv->extra_stats.tx_underrun); + + local_irq_save(flags); + lock_tx_qs(priv); + + /* Reactivate the Tx Queues */ + gfar_write(®s->tstat, gfargrp->tstat); + + unlock_tx_qs(priv); + local_irq_restore(flags); + } + netif_dbg(priv, tx_err, dev, "Transmit Error\n"); + } + if (events & IEVENT_BSY) { + dev->stats.rx_errors++; + atomic64_inc(&priv->extra_stats.rx_bsy); + + gfar_receive(irq, grp_id); + + netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", + gfar_read(®s->rstat)); + } + if (events & IEVENT_BABR) { + dev->stats.rx_errors++; + atomic64_inc(&priv->extra_stats.rx_babr); + + netif_dbg(priv, rx_err, dev, "babbling RX error\n"); + } + if (events & IEVENT_EBERR) { + atomic64_inc(&priv->extra_stats.eberr); + netif_dbg(priv, rx_err, dev, "bus error\n"); + } + if (events & IEVENT_RXC) + netif_dbg(priv, rx_status, dev, "control frame\n"); + + if (events & IEVENT_BABT) { + atomic64_inc(&priv->extra_stats.tx_babt); + netif_dbg(priv, tx_err, dev, "babbling TX error\n"); + } + return IRQ_HANDLED; +} + +static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) +{ + struct phy_device *phydev = priv->phydev; + u32 val = 0; + + if (!phydev->duplex) + return val; + + if (!priv->pause_aneg_en) { + if (priv->tx_pause_en) + val |= MACCFG1_TX_FLOW; + if (priv->rx_pause_en) + val |= MACCFG1_RX_FLOW; + } else { + u16 lcl_adv, rmt_adv; + u8 flowctrl; + /* get link partner capabilities */ + rmt_adv = 0; + if (phydev->pause) + rmt_adv = LPA_PAUSE_CAP; + if (phydev->asym_pause) + rmt_adv |= LPA_PAUSE_ASYM; + + lcl_adv = 0; + if (phydev->advertising & ADVERTISED_Pause) + lcl_adv |= ADVERTISE_PAUSE_CAP; + if (phydev->advertising & ADVERTISED_Asym_Pause) + lcl_adv |= ADVERTISE_PAUSE_ASYM; + + flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); + if (flowctrl & FLOW_CTRL_TX) + val |= MACCFG1_TX_FLOW; + if (flowctrl & FLOW_CTRL_RX) + val |= MACCFG1_RX_FLOW; + } + + return val; +} + +static noinline void gfar_update_link_state(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + struct phy_device *phydev = priv->phydev; + struct gfar_priv_rx_q *rx_queue = NULL; + int i; + struct rxbd8 *bdp; + + if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) + return; + + if (phydev->link) { + u32 tempval1 = gfar_read(®s->maccfg1); + u32 tempval = gfar_read(®s->maccfg2); + u32 ecntrl = gfar_read(®s->ecntrl); + u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW); + + if (phydev->duplex != priv->oldduplex) { + if (!(phydev->duplex)) + tempval &= ~(MACCFG2_FULL_DUPLEX); + else + tempval |= MACCFG2_FULL_DUPLEX; + + priv->oldduplex = phydev->duplex; + } + + if (phydev->speed != priv->oldspeed) { + switch (phydev->speed) { + case 1000: + tempval = + ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); + + ecntrl &= ~(ECNTRL_R100); + break; + case 100: + case 10: + tempval = + ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); + + /* Reduced mode distinguishes + * between 10 and 100 + */ + if (phydev->speed == SPEED_100) + ecntrl |= ECNTRL_R100; + else + ecntrl &= ~(ECNTRL_R100); + break; + default: + netif_warn(priv, link, priv->ndev, + "Ack! Speed (%d) is not 10/100/1000!\n", + phydev->speed); + break; + } + + priv->oldspeed = phydev->speed; + } + + tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); + tempval1 |= gfar_get_flowctrl_cfg(priv); + + /* Turn last free buffer recording on */ + if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { + for (i = 0; i < priv->num_rx_queues; i++) { + rx_queue = priv->rx_queue[i]; + bdp = rx_queue->cur_rx; + /* skip to previous bd */ + bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1, + rx_queue->rx_bd_base, + rx_queue->rx_ring_size); + + if (rx_queue->rfbptr) + gfar_write(rx_queue->rfbptr, (u32)bdp); + } + + priv->tx_actual_en = 1; + } + + if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval)) + priv->tx_actual_en = 0; + + gfar_write(®s->maccfg1, tempval1); + gfar_write(®s->maccfg2, tempval); + gfar_write(®s->ecntrl, ecntrl); + + if (!priv->oldlink) + priv->oldlink = 1; + + } else if (priv->oldlink) { + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + } + + if (netif_msg_link(priv)) + phy_print_status(phydev); +} + +static const struct of_device_id gfar_match[] = +{ + { + .type = "network", + .compatible = "gianfar", + }, + { + .compatible = "fsl,etsec2", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, gfar_match); + +/* Structure for a device driver */ +static struct platform_driver gfar_driver = { + .driver = { + .name = "fsl-gianfar", + .pm = GFAR_PM_OPS, + .of_match_table = gfar_match, + }, + .probe = gfar_probe, + .remove = gfar_remove, +}; + +module_platform_driver(gfar_driver); diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h new file mode 100644 index 000000000..daa1d37de --- /dev/null +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -0,0 +1,1343 @@ +/* + * drivers/net/ethernet/freescale/gianfar.h + * + * Gianfar Ethernet Driver + * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560 + * Based on 8260_io/fcc_enet.c + * + * Author: Andy Fleming + * Maintainer: Kumar Gala + * Modifier: Sandeep Gopalpet + * + * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Still left to do: + * -Add support for module parameters + * -Add patch for ethtool phys id + */ +#ifndef __GIANFAR_H +#define __GIANFAR_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +struct ethtool_flow_spec_container { + struct ethtool_rx_flow_spec fs; + struct list_head list; +}; + +struct ethtool_rx_list { + struct list_head list; + unsigned int count; +}; + +/* The maximum number of packets to be handled in one call of gfar_poll */ +#define GFAR_DEV_WEIGHT 64 + +/* Length for FCB */ +#define GMAC_FCB_LEN 8 + +/* Length for TxPAL */ +#define GMAC_TXPAL_LEN 16 + +/* Default padding amount */ +#define DEFAULT_PADDING 2 + +/* Number of bytes to align the rx bufs to */ +#define RXBUF_ALIGNMENT 64 + +/* The number of bytes which composes a unit for the purpose of + * allocating data buffers. ie-for any given MTU, the data buffer + * will be the next highest multiple of 512 bytes. */ +#define INCREMENTAL_BUFFER_SIZE 512 + +#define PHY_INIT_TIMEOUT 100000 + +#define DRV_NAME "gfar-enet" +extern const char gfar_driver_version[]; + +/* MAXIMUM NUMBER OF QUEUES SUPPORTED */ +#define MAX_TX_QS 0x8 +#define MAX_RX_QS 0x8 + +/* MAXIMUM NUMBER OF GROUPS SUPPORTED */ +#define MAXGROUPS 0x2 + +/* These need to be powers of 2 for this driver */ +#define DEFAULT_TX_RING_SIZE 256 +#define DEFAULT_RX_RING_SIZE 256 + +#define GFAR_RX_MAX_RING_SIZE 256 +#define GFAR_TX_MAX_RING_SIZE 256 + +#define GFAR_MAX_FIFO_THRESHOLD 511 +#define GFAR_MAX_FIFO_STARVE 511 +#define GFAR_MAX_FIFO_STARVE_OFF 511 + +#define FBTHR_SHIFT 24 +#define DEFAULT_RX_LFC_THR 16 +#define DEFAULT_LFC_PTVVAL 4 + +#define DEFAULT_RX_BUFFER_SIZE 1536 +#define TX_RING_MOD_MASK(size) (size-1) +#define RX_RING_MOD_MASK(size) (size-1) +#define JUMBO_BUFFER_SIZE 9728 +#define JUMBO_FRAME_SIZE 9600 + +#define DEFAULT_FIFO_TX_THR 0x100 +#define DEFAULT_FIFO_TX_STARVE 0x40 +#define DEFAULT_FIFO_TX_STARVE_OFF 0x80 +#define DEFAULT_BD_STASH 1 +#define DEFAULT_STASH_LENGTH 96 +#define DEFAULT_STASH_INDEX 0 + +/* The number of Exact Match registers */ +#define GFAR_EM_NUM 15 + +/* Latency of interface clock in nanoseconds */ +/* Interface clock latency , in this case, means the + * time described by a value of 1 in the interrupt + * coalescing registers' time fields. Since those fields + * refer to the time it takes for 64 clocks to pass, the + * latencies are as such: + * GBIT = 125MHz => 8ns/clock => 8*64 ns / tick + * 100 = 25 MHz => 40ns/clock => 40*64 ns / tick + * 10 = 2.5 MHz => 400ns/clock => 400*64 ns / tick + */ +#define GFAR_GBIT_TIME 512 +#define GFAR_100_TIME 2560 +#define GFAR_10_TIME 25600 + +#define DEFAULT_TX_COALESCE 1 +#define DEFAULT_TXCOUNT 16 +#define DEFAULT_TXTIME 21 + +#define DEFAULT_RXTIME 21 + +#define DEFAULT_RX_COALESCE 0 +#define DEFAULT_RXCOUNT 0 + +#define GFAR_SUPPORTED (SUPPORTED_10baseT_Half \ + | SUPPORTED_10baseT_Full \ + | SUPPORTED_100baseT_Half \ + | SUPPORTED_100baseT_Full \ + | SUPPORTED_Autoneg \ + | SUPPORTED_MII) + +#define GFAR_SUPPORTED_GBIT SUPPORTED_1000baseT_Full + +/* TBI register addresses */ +#define MII_TBICON 0x11 + +/* TBICON register bit fields */ +#define TBICON_CLK_SELECT 0x0020 + +/* MAC register bits */ +#define MACCFG1_SOFT_RESET 0x80000000 +#define MACCFG1_RESET_RX_MC 0x00080000 +#define MACCFG1_RESET_TX_MC 0x00040000 +#define MACCFG1_RESET_RX_FUN 0x00020000 +#define MACCFG1_RESET_TX_FUN 0x00010000 +#define MACCFG1_LOOPBACK 0x00000100 +#define MACCFG1_RX_FLOW 0x00000020 +#define MACCFG1_TX_FLOW 0x00000010 +#define MACCFG1_SYNCD_RX_EN 0x00000008 +#define MACCFG1_RX_EN 0x00000004 +#define MACCFG1_SYNCD_TX_EN 0x00000002 +#define MACCFG1_TX_EN 0x00000001 + +#define MACCFG2_INIT_SETTINGS 0x00007205 +#define MACCFG2_FULL_DUPLEX 0x00000001 +#define MACCFG2_IF 0x00000300 +#define MACCFG2_MII 0x00000100 +#define MACCFG2_GMII 0x00000200 +#define MACCFG2_HUGEFRAME 0x00000020 +#define MACCFG2_LENGTHCHECK 0x00000010 +#define MACCFG2_MPEN 0x00000008 + +#define ECNTRL_FIFM 0x00008000 +#define ECNTRL_INIT_SETTINGS 0x00001000 +#define ECNTRL_TBI_MODE 0x00000020 +#define ECNTRL_REDUCED_MODE 0x00000010 +#define ECNTRL_R100 0x00000008 +#define ECNTRL_REDUCED_MII_MODE 0x00000004 +#define ECNTRL_SGMII_MODE 0x00000002 + +#define MRBLR_INIT_SETTINGS DEFAULT_RX_BUFFER_SIZE + +#define MINFLR_INIT_SETTINGS 0x00000040 + +/* Tqueue control */ +#define TQUEUE_EN0 0x00008000 +#define TQUEUE_EN1 0x00004000 +#define TQUEUE_EN2 0x00002000 +#define TQUEUE_EN3 0x00001000 +#define TQUEUE_EN4 0x00000800 +#define TQUEUE_EN5 0x00000400 +#define TQUEUE_EN6 0x00000200 +#define TQUEUE_EN7 0x00000100 +#define TQUEUE_EN_ALL 0x0000FF00 + +#define TR03WT_WT0_MASK 0xFF000000 +#define TR03WT_WT1_MASK 0x00FF0000 +#define TR03WT_WT2_MASK 0x0000FF00 +#define TR03WT_WT3_MASK 0x000000FF + +#define TR47WT_WT4_MASK 0xFF000000 +#define TR47WT_WT5_MASK 0x00FF0000 +#define TR47WT_WT6_MASK 0x0000FF00 +#define TR47WT_WT7_MASK 0x000000FF + +/* Rqueue control */ +#define RQUEUE_EX0 0x00800000 +#define RQUEUE_EX1 0x00400000 +#define RQUEUE_EX2 0x00200000 +#define RQUEUE_EX3 0x00100000 +#define RQUEUE_EX4 0x00080000 +#define RQUEUE_EX5 0x00040000 +#define RQUEUE_EX6 0x00020000 +#define RQUEUE_EX7 0x00010000 +#define RQUEUE_EX_ALL 0x00FF0000 + +#define RQUEUE_EN0 0x00000080 +#define RQUEUE_EN1 0x00000040 +#define RQUEUE_EN2 0x00000020 +#define RQUEUE_EN3 0x00000010 +#define RQUEUE_EN4 0x00000008 +#define RQUEUE_EN5 0x00000004 +#define RQUEUE_EN6 0x00000002 +#define RQUEUE_EN7 0x00000001 +#define RQUEUE_EN_ALL 0x000000FF + +/* Init to do tx snooping for buffers and descriptors */ +#define DMACTRL_INIT_SETTINGS 0x000000c3 +#define DMACTRL_GRS 0x00000010 +#define DMACTRL_GTS 0x00000008 + +#define TSTAT_CLEAR_THALT_ALL 0xFF000000 +#define TSTAT_CLEAR_THALT 0x80000000 +#define TSTAT_CLEAR_THALT0 0x80000000 +#define TSTAT_CLEAR_THALT1 0x40000000 +#define TSTAT_CLEAR_THALT2 0x20000000 +#define TSTAT_CLEAR_THALT3 0x10000000 +#define TSTAT_CLEAR_THALT4 0x08000000 +#define TSTAT_CLEAR_THALT5 0x04000000 +#define TSTAT_CLEAR_THALT6 0x02000000 +#define TSTAT_CLEAR_THALT7 0x01000000 + +/* Interrupt coalescing macros */ +#define IC_ICEN 0x80000000 +#define IC_ICFT_MASK 0x1fe00000 +#define IC_ICFT_SHIFT 21 +#define mk_ic_icft(x) \ + (((unsigned int)x << IC_ICFT_SHIFT)&IC_ICFT_MASK) +#define IC_ICTT_MASK 0x0000ffff +#define mk_ic_ictt(x) (x&IC_ICTT_MASK) + +#define mk_ic_value(count, time) (IC_ICEN | \ + mk_ic_icft(count) | \ + mk_ic_ictt(time)) +#define get_icft_value(ic) (((unsigned long)ic & IC_ICFT_MASK) >> \ + IC_ICFT_SHIFT) +#define get_ictt_value(ic) ((unsigned long)ic & IC_ICTT_MASK) + +#define DEFAULT_TXIC mk_ic_value(DEFAULT_TXCOUNT, DEFAULT_TXTIME) +#define DEFAULT_RXIC mk_ic_value(DEFAULT_RXCOUNT, DEFAULT_RXTIME) + +#define skip_bd(bdp, stride, base, ring_size) ({ \ + typeof(bdp) new_bd = (bdp) + (stride); \ + (new_bd >= (base) + (ring_size)) ? (new_bd - (ring_size)) : new_bd; }) + +#define next_bd(bdp, base, ring_size) skip_bd(bdp, 1, base, ring_size) + +#define RCTRL_TS_ENABLE 0x01000000 +#define RCTRL_PAL_MASK 0x001f0000 +#define RCTRL_LFC 0x00004000 +#define RCTRL_VLEX 0x00002000 +#define RCTRL_FILREN 0x00001000 +#define RCTRL_GHTX 0x00000400 +#define RCTRL_IPCSEN 0x00000200 +#define RCTRL_TUCSEN 0x00000100 +#define RCTRL_PRSDEP_MASK 0x000000c0 +#define RCTRL_PRSDEP_INIT 0x000000c0 +#define RCTRL_PRSFM 0x00000020 +#define RCTRL_PROM 0x00000008 +#define RCTRL_EMEN 0x00000002 +#define RCTRL_REQ_PARSER (RCTRL_VLEX | RCTRL_IPCSEN | \ + RCTRL_TUCSEN | RCTRL_FILREN) +#define RCTRL_CHECKSUMMING (RCTRL_IPCSEN | RCTRL_TUCSEN | \ + RCTRL_PRSDEP_INIT) +#define RCTRL_EXTHASH (RCTRL_GHTX) +#define RCTRL_VLAN (RCTRL_PRSDEP_INIT) +#define RCTRL_PADDING(x) ((x << 16) & RCTRL_PAL_MASK) + + +#define RSTAT_CLEAR_RHALT 0x00800000 +#define RSTAT_CLEAR_RXF0 0x00000080 +#define RSTAT_RXF_MASK 0x000000ff + +#define TCTRL_IPCSEN 0x00004000 +#define TCTRL_TUCSEN 0x00002000 +#define TCTRL_VLINS 0x00001000 +#define TCTRL_THDF 0x00000800 +#define TCTRL_RFCPAUSE 0x00000010 +#define TCTRL_TFCPAUSE 0x00000008 +#define TCTRL_TXSCHED_MASK 0x00000006 +#define TCTRL_TXSCHED_INIT 0x00000000 +/* priority scheduling */ +#define TCTRL_TXSCHED_PRIO 0x00000002 +/* weighted round-robin scheduling (WRRS) */ +#define TCTRL_TXSCHED_WRRS 0x00000004 +/* default WRRS weight and policy setting, + * tailored to the tr03wt and tr47wt registers: + * equal weight for all Tx Qs, measured in 64byte units + */ +#define DEFAULT_WRRS_WEIGHT 0x18181818 + +#define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN) + +#define IEVENT_INIT_CLEAR 0xffffffff +#define IEVENT_BABR 0x80000000 +#define IEVENT_RXC 0x40000000 +#define IEVENT_BSY 0x20000000 +#define IEVENT_EBERR 0x10000000 +#define IEVENT_MSRO 0x04000000 +#define IEVENT_GTSC 0x02000000 +#define IEVENT_BABT 0x01000000 +#define IEVENT_TXC 0x00800000 +#define IEVENT_TXE 0x00400000 +#define IEVENT_TXB 0x00200000 +#define IEVENT_TXF 0x00100000 +#define IEVENT_LC 0x00040000 +#define IEVENT_CRL 0x00020000 +#define IEVENT_XFUN 0x00010000 +#define IEVENT_RXB0 0x00008000 +#define IEVENT_MAG 0x00000800 +#define IEVENT_GRSC 0x00000100 +#define IEVENT_RXF0 0x00000080 +#define IEVENT_FIR 0x00000008 +#define IEVENT_FIQ 0x00000004 +#define IEVENT_DPE 0x00000002 +#define IEVENT_PERR 0x00000001 +#define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0 | IEVENT_BSY) +#define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF) +#define IEVENT_RTX_MASK (IEVENT_RX_MASK | IEVENT_TX_MASK) +#define IEVENT_ERR_MASK \ +(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \ + IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \ + | IEVENT_CRL | IEVENT_XFUN | IEVENT_DPE | IEVENT_PERR \ + | IEVENT_MAG | IEVENT_BABR) + +#define IMASK_INIT_CLEAR 0x00000000 +#define IMASK_BABR 0x80000000 +#define IMASK_RXC 0x40000000 +#define IMASK_BSY 0x20000000 +#define IMASK_EBERR 0x10000000 +#define IMASK_MSRO 0x04000000 +#define IMASK_GTSC 0x02000000 +#define IMASK_BABT 0x01000000 +#define IMASK_TXC 0x00800000 +#define IMASK_TXEEN 0x00400000 +#define IMASK_TXBEN 0x00200000 +#define IMASK_TXFEN 0x00100000 +#define IMASK_LC 0x00040000 +#define IMASK_CRL 0x00020000 +#define IMASK_XFUN 0x00010000 +#define IMASK_RXB0 0x00008000 +#define IMASK_MAG 0x00000800 +#define IMASK_GRSC 0x00000100 +#define IMASK_RXFEN0 0x00000080 +#define IMASK_FIR 0x00000008 +#define IMASK_FIQ 0x00000004 +#define IMASK_DPE 0x00000002 +#define IMASK_PERR 0x00000001 +#define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \ + IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \ + IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \ + | IMASK_PERR) +#define IMASK_RX_DEFAULT (IMASK_RXFEN0 | IMASK_BSY) +#define IMASK_TX_DEFAULT (IMASK_TXFEN | IMASK_TXBEN) + +#define IMASK_RX_DISABLED ((~(IMASK_RX_DEFAULT)) & IMASK_DEFAULT) +#define IMASK_TX_DISABLED ((~(IMASK_TX_DEFAULT)) & IMASK_DEFAULT) + +/* Fifo management */ +#define FIFO_TX_THR_MASK 0x01ff +#define FIFO_TX_STARVE_MASK 0x01ff +#define FIFO_TX_STARVE_OFF_MASK 0x01ff + +/* Attribute fields */ + +/* This enables rx snooping for buffers and descriptors */ +#define ATTR_BDSTASH 0x00000800 + +#define ATTR_BUFSTASH 0x00004000 + +#define ATTR_SNOOPING 0x000000c0 +#define ATTR_INIT_SETTINGS ATTR_SNOOPING + +#define ATTRELI_INIT_SETTINGS 0x0 +#define ATTRELI_EL_MASK 0x3fff0000 +#define ATTRELI_EL(x) (x << 16) +#define ATTRELI_EI_MASK 0x00003fff +#define ATTRELI_EI(x) (x) + +#define BD_LFLAG(flags) ((flags) << 16) +#define BD_LENGTH_MASK 0x0000ffff + +#define FPR_FILER_MASK 0xFFFFFFFF +#define MAX_FILER_IDX 0xFF + +/* This default RIR value directly corresponds + * to the 3-bit hash value generated */ +#define DEFAULT_8RXQ_RIR0 0x05397700 +/* Map even hash values to Q0, and odd ones to Q1 */ +#define DEFAULT_2RXQ_RIR0 0x04104100 + +/* RQFCR register bits */ +#define RQFCR_GPI 0x80000000 +#define RQFCR_HASHTBL_Q 0x00000000 +#define RQFCR_HASHTBL_0 0x00020000 +#define RQFCR_HASHTBL_1 0x00040000 +#define RQFCR_HASHTBL_2 0x00060000 +#define RQFCR_HASHTBL_3 0x00080000 +#define RQFCR_HASH 0x00010000 +#define RQFCR_QUEUE 0x0000FC00 +#define RQFCR_CLE 0x00000200 +#define RQFCR_RJE 0x00000100 +#define RQFCR_AND 0x00000080 +#define RQFCR_CMP_EXACT 0x00000000 +#define RQFCR_CMP_MATCH 0x00000020 +#define RQFCR_CMP_NOEXACT 0x00000040 +#define RQFCR_CMP_NOMATCH 0x00000060 + +/* RQFCR PID values */ +#define RQFCR_PID_MASK 0x00000000 +#define RQFCR_PID_PARSE 0x00000001 +#define RQFCR_PID_ARB 0x00000002 +#define RQFCR_PID_DAH 0x00000003 +#define RQFCR_PID_DAL 0x00000004 +#define RQFCR_PID_SAH 0x00000005 +#define RQFCR_PID_SAL 0x00000006 +#define RQFCR_PID_ETY 0x00000007 +#define RQFCR_PID_VID 0x00000008 +#define RQFCR_PID_PRI 0x00000009 +#define RQFCR_PID_TOS 0x0000000A +#define RQFCR_PID_L4P 0x0000000B +#define RQFCR_PID_DIA 0x0000000C +#define RQFCR_PID_SIA 0x0000000D +#define RQFCR_PID_DPT 0x0000000E +#define RQFCR_PID_SPT 0x0000000F + +/* RQFPR when PID is 0x0001 */ +#define RQFPR_HDR_GE_512 0x00200000 +#define RQFPR_LERR 0x00100000 +#define RQFPR_RAR 0x00080000 +#define RQFPR_RARQ 0x00040000 +#define RQFPR_AR 0x00020000 +#define RQFPR_ARQ 0x00010000 +#define RQFPR_EBC 0x00008000 +#define RQFPR_VLN 0x00004000 +#define RQFPR_CFI 0x00002000 +#define RQFPR_JUM 0x00001000 +#define RQFPR_IPF 0x00000800 +#define RQFPR_FIF 0x00000400 +#define RQFPR_IPV4 0x00000200 +#define RQFPR_IPV6 0x00000100 +#define RQFPR_ICC 0x00000080 +#define RQFPR_ICV 0x00000040 +#define RQFPR_TCP 0x00000020 +#define RQFPR_UDP 0x00000010 +#define RQFPR_TUC 0x00000008 +#define RQFPR_TUV 0x00000004 +#define RQFPR_PER 0x00000002 +#define RQFPR_EER 0x00000001 + +/* TxBD status field bits */ +#define TXBD_READY 0x8000 +#define TXBD_PADCRC 0x4000 +#define TXBD_WRAP 0x2000 +#define TXBD_INTERRUPT 0x1000 +#define TXBD_LAST 0x0800 +#define TXBD_CRC 0x0400 +#define TXBD_DEF 0x0200 +#define TXBD_HUGEFRAME 0x0080 +#define TXBD_LATECOLLISION 0x0080 +#define TXBD_RETRYLIMIT 0x0040 +#define TXBD_RETRYCOUNTMASK 0x003c +#define TXBD_UNDERRUN 0x0002 +#define TXBD_TOE 0x0002 + +/* Tx FCB param bits */ +#define TXFCB_VLN 0x80 +#define TXFCB_IP 0x40 +#define TXFCB_IP6 0x20 +#define TXFCB_TUP 0x10 +#define TXFCB_UDP 0x08 +#define TXFCB_CIP 0x04 +#define TXFCB_CTU 0x02 +#define TXFCB_NPH 0x01 +#define TXFCB_DEFAULT (TXFCB_IP|TXFCB_TUP|TXFCB_CTU|TXFCB_NPH) + +/* RxBD status field bits */ +#define RXBD_EMPTY 0x8000 +#define RXBD_RO1 0x4000 +#define RXBD_WRAP 0x2000 +#define RXBD_INTERRUPT 0x1000 +#define RXBD_LAST 0x0800 +#define RXBD_FIRST 0x0400 +#define RXBD_MISS 0x0100 +#define RXBD_BROADCAST 0x0080 +#define RXBD_MULTICAST 0x0040 +#define RXBD_LARGE 0x0020 +#define RXBD_NONOCTET 0x0010 +#define RXBD_SHORT 0x0008 +#define RXBD_CRCERR 0x0004 +#define RXBD_OVERRUN 0x0002 +#define RXBD_TRUNCATED 0x0001 +#define RXBD_STATS 0x01ff +#define RXBD_ERR (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET \ + | RXBD_CRCERR | RXBD_OVERRUN \ + | RXBD_TRUNCATED) + +/* Rx FCB status field bits */ +#define RXFCB_VLN 0x8000 +#define RXFCB_IP 0x4000 +#define RXFCB_IP6 0x2000 +#define RXFCB_TUP 0x1000 +#define RXFCB_CIP 0x0800 +#define RXFCB_CTU 0x0400 +#define RXFCB_EIP 0x0200 +#define RXFCB_ETU 0x0100 +#define RXFCB_CSUM_MASK 0x0f00 +#define RXFCB_PERR_MASK 0x000c +#define RXFCB_PERR_BADL3 0x0008 + +#define GFAR_INT_NAME_MAX (IFNAMSIZ + 6) /* '_g#_xx' */ + +struct txbd8 +{ + union { + struct { + __be16 status; /* Status Fields */ + __be16 length; /* Buffer length */ + }; + __be32 lstatus; + }; + __be32 bufPtr; /* Buffer Pointer */ +}; + +struct txfcb { + u8 flags; + u8 ptp; /* Flag to enable tx timestamping */ + u8 l4os; /* Level 4 Header Offset */ + u8 l3os; /* Level 3 Header Offset */ + __be16 phcs; /* Pseudo-header Checksum */ + __be16 vlctl; /* VLAN control word */ +}; + +struct rxbd8 +{ + union { + struct { + __be16 status; /* Status Fields */ + __be16 length; /* Buffer Length */ + }; + __be32 lstatus; + }; + __be32 bufPtr; /* Buffer Pointer */ +}; + +struct rxfcb { + __be16 flags; + u8 rq; /* Receive Queue index */ + u8 pro; /* Layer 4 Protocol */ + u16 reserved; + __be16 vlctl; /* VLAN control word */ +}; + +struct gianfar_skb_cb { + unsigned int bytes_sent; /* bytes-on-wire (i.e. no FCB) */ +}; + +#define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb)) + +struct rmon_mib +{ + u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */ + u32 tr127; /* 0x.684 - Transmit and Receive 65-127 byte Frame Counter */ + u32 tr255; /* 0x.688 - Transmit and Receive 128-255 byte Frame Counter */ + u32 tr511; /* 0x.68c - Transmit and Receive 256-511 byte Frame Counter */ + u32 tr1k; /* 0x.690 - Transmit and Receive 512-1023 byte Frame Counter */ + u32 trmax; /* 0x.694 - Transmit and Receive 1024-1518 byte Frame Counter */ + u32 trmgv; /* 0x.698 - Transmit and Receive 1519-1522 byte Good VLAN Frame */ + u32 rbyt; /* 0x.69c - Receive Byte Counter */ + u32 rpkt; /* 0x.6a0 - Receive Packet Counter */ + u32 rfcs; /* 0x.6a4 - Receive FCS Error Counter */ + u32 rmca; /* 0x.6a8 - Receive Multicast Packet Counter */ + u32 rbca; /* 0x.6ac - Receive Broadcast Packet Counter */ + u32 rxcf; /* 0x.6b0 - Receive Control Frame Packet Counter */ + u32 rxpf; /* 0x.6b4 - Receive Pause Frame Packet Counter */ + u32 rxuo; /* 0x.6b8 - Receive Unknown OP Code Counter */ + u32 raln; /* 0x.6bc - Receive Alignment Error Counter */ + u32 rflr; /* 0x.6c0 - Receive Frame Length Error Counter */ + u32 rcde; /* 0x.6c4 - Receive Code Error Counter */ + u32 rcse; /* 0x.6c8 - Receive Carrier Sense Error Counter */ + u32 rund; /* 0x.6cc - Receive Undersize Packet Counter */ + u32 rovr; /* 0x.6d0 - Receive Oversize Packet Counter */ + u32 rfrg; /* 0x.6d4 - Receive Fragments Counter */ + u32 rjbr; /* 0x.6d8 - Receive Jabber Counter */ + u32 rdrp; /* 0x.6dc - Receive Drop Counter */ + u32 tbyt; /* 0x.6e0 - Transmit Byte Counter Counter */ + u32 tpkt; /* 0x.6e4 - Transmit Packet Counter */ + u32 tmca; /* 0x.6e8 - Transmit Multicast Packet Counter */ + u32 tbca; /* 0x.6ec - Transmit Broadcast Packet Counter */ + u32 txpf; /* 0x.6f0 - Transmit Pause Control Frame Counter */ + u32 tdfr; /* 0x.6f4 - Transmit Deferral Packet Counter */ + u32 tedf; /* 0x.6f8 - Transmit Excessive Deferral Packet Counter */ + u32 tscl; /* 0x.6fc - Transmit Single Collision Packet Counter */ + u32 tmcl; /* 0x.700 - Transmit Multiple Collision Packet Counter */ + u32 tlcl; /* 0x.704 - Transmit Late Collision Packet Counter */ + u32 txcl; /* 0x.708 - Transmit Excessive Collision Packet Counter */ + u32 tncl; /* 0x.70c - Transmit Total Collision Counter */ + u8 res1[4]; + u32 tdrp; /* 0x.714 - Transmit Drop Frame Counter */ + u32 tjbr; /* 0x.718 - Transmit Jabber Frame Counter */ + u32 tfcs; /* 0x.71c - Transmit FCS Error Counter */ + u32 txcf; /* 0x.720 - Transmit Control Frame Counter */ + u32 tovr; /* 0x.724 - Transmit Oversize Frame Counter */ + u32 tund; /* 0x.728 - Transmit Undersize Frame Counter */ + u32 tfrg; /* 0x.72c - Transmit Fragments Frame Counter */ + u32 car1; /* 0x.730 - Carry Register One */ + u32 car2; /* 0x.734 - Carry Register Two */ + u32 cam1; /* 0x.738 - Carry Mask Register One */ + u32 cam2; /* 0x.73c - Carry Mask Register Two */ +}; + +struct gfar_extra_stats { + atomic64_t rx_large; + atomic64_t rx_short; + atomic64_t rx_nonoctet; + atomic64_t rx_crcerr; + atomic64_t rx_overrun; + atomic64_t rx_bsy; + atomic64_t rx_babr; + atomic64_t rx_trunc; + atomic64_t eberr; + atomic64_t tx_babt; + atomic64_t tx_underrun; + atomic64_t rx_skbmissing; + atomic64_t tx_timeout; +}; + +#define GFAR_RMON_LEN ((sizeof(struct rmon_mib) - 16)/sizeof(u32)) +#define GFAR_EXTRA_STATS_LEN \ + (sizeof(struct gfar_extra_stats)/sizeof(atomic64_t)) + +/* Number of stats exported via ethtool */ +#define GFAR_STATS_LEN (GFAR_RMON_LEN + GFAR_EXTRA_STATS_LEN) + +struct gfar { + u32 tsec_id; /* 0x.000 - Controller ID register */ + u32 tsec_id2; /* 0x.004 - Controller ID2 register */ + u8 res1[8]; + u32 ievent; /* 0x.010 - Interrupt Event Register */ + u32 imask; /* 0x.014 - Interrupt Mask Register */ + u32 edis; /* 0x.018 - Error Disabled Register */ + u32 emapg; /* 0x.01c - Group Error mapping register */ + u32 ecntrl; /* 0x.020 - Ethernet Control Register */ + u32 minflr; /* 0x.024 - Minimum Frame Length Register */ + u32 ptv; /* 0x.028 - Pause Time Value Register */ + u32 dmactrl; /* 0x.02c - DMA Control Register */ + u32 tbipa; /* 0x.030 - TBI PHY Address Register */ + u8 res2[28]; + u32 fifo_rx_pause; /* 0x.050 - FIFO receive pause start threshold + register */ + u32 fifo_rx_pause_shutoff; /* x.054 - FIFO receive starve shutoff + register */ + u32 fifo_rx_alarm; /* 0x.058 - FIFO receive alarm start threshold + register */ + u32 fifo_rx_alarm_shutoff; /*0x.05c - FIFO receive alarm starve + shutoff register */ + u8 res3[44]; + u32 fifo_tx_thr; /* 0x.08c - FIFO transmit threshold register */ + u8 res4[8]; + u32 fifo_tx_starve; /* 0x.098 - FIFO transmit starve register */ + u32 fifo_tx_starve_shutoff; /* 0x.09c - FIFO transmit starve shutoff register */ + u8 res5[96]; + u32 tctrl; /* 0x.100 - Transmit Control Register */ + u32 tstat; /* 0x.104 - Transmit Status Register */ + u32 dfvlan; /* 0x.108 - Default VLAN Control word */ + u32 tbdlen; /* 0x.10c - Transmit Buffer Descriptor Data Length Register */ + u32 txic; /* 0x.110 - Transmit Interrupt Coalescing Configuration Register */ + u32 tqueue; /* 0x.114 - Transmit queue control register */ + u8 res7[40]; + u32 tr03wt; /* 0x.140 - TxBD Rings 0-3 round-robin weightings */ + u32 tr47wt; /* 0x.144 - TxBD Rings 4-7 round-robin weightings */ + u8 res8[52]; + u32 tbdbph; /* 0x.17c - Tx data buffer pointer high */ + u8 res9a[4]; + u32 tbptr0; /* 0x.184 - TxBD Pointer for ring 0 */ + u8 res9b[4]; + u32 tbptr1; /* 0x.18c - TxBD Pointer for ring 1 */ + u8 res9c[4]; + u32 tbptr2; /* 0x.194 - TxBD Pointer for ring 2 */ + u8 res9d[4]; + u32 tbptr3; /* 0x.19c - TxBD Pointer for ring 3 */ + u8 res9e[4]; + u32 tbptr4; /* 0x.1a4 - TxBD Pointer for ring 4 */ + u8 res9f[4]; + u32 tbptr5; /* 0x.1ac - TxBD Pointer for ring 5 */ + u8 res9g[4]; + u32 tbptr6; /* 0x.1b4 - TxBD Pointer for ring 6 */ + u8 res9h[4]; + u32 tbptr7; /* 0x.1bc - TxBD Pointer for ring 7 */ + u8 res9[64]; + u32 tbaseh; /* 0x.200 - TxBD base address high */ + u32 tbase0; /* 0x.204 - TxBD Base Address of ring 0 */ + u8 res10a[4]; + u32 tbase1; /* 0x.20c - TxBD Base Address of ring 1 */ + u8 res10b[4]; + u32 tbase2; /* 0x.214 - TxBD Base Address of ring 2 */ + u8 res10c[4]; + u32 tbase3; /* 0x.21c - TxBD Base Address of ring 3 */ + u8 res10d[4]; + u32 tbase4; /* 0x.224 - TxBD Base Address of ring 4 */ + u8 res10e[4]; + u32 tbase5; /* 0x.22c - TxBD Base Address of ring 5 */ + u8 res10f[4]; + u32 tbase6; /* 0x.234 - TxBD Base Address of ring 6 */ + u8 res10g[4]; + u32 tbase7; /* 0x.23c - TxBD Base Address of ring 7 */ + u8 res10[192]; + u32 rctrl; /* 0x.300 - Receive Control Register */ + u32 rstat; /* 0x.304 - Receive Status Register */ + u8 res12[8]; + u32 rxic; /* 0x.310 - Receive Interrupt Coalescing Configuration Register */ + u32 rqueue; /* 0x.314 - Receive queue control register */ + u32 rir0; /* 0x.318 - Ring mapping register 0 */ + u32 rir1; /* 0x.31c - Ring mapping register 1 */ + u32 rir2; /* 0x.320 - Ring mapping register 2 */ + u32 rir3; /* 0x.324 - Ring mapping register 3 */ + u8 res13[8]; + u32 rbifx; /* 0x.330 - Receive bit field extract control register */ + u32 rqfar; /* 0x.334 - Receive queue filing table address register */ + u32 rqfcr; /* 0x.338 - Receive queue filing table control register */ + u32 rqfpr; /* 0x.33c - Receive queue filing table property register */ + u32 mrblr; /* 0x.340 - Maximum Receive Buffer Length Register */ + u8 res14[56]; + u32 rbdbph; /* 0x.37c - Rx data buffer pointer high */ + u8 res15a[4]; + u32 rbptr0; /* 0x.384 - RxBD pointer for ring 0 */ + u8 res15b[4]; + u32 rbptr1; /* 0x.38c - RxBD pointer for ring 1 */ + u8 res15c[4]; + u32 rbptr2; /* 0x.394 - RxBD pointer for ring 2 */ + u8 res15d[4]; + u32 rbptr3; /* 0x.39c - RxBD pointer for ring 3 */ + u8 res15e[4]; + u32 rbptr4; /* 0x.3a4 - RxBD pointer for ring 4 */ + u8 res15f[4]; + u32 rbptr5; /* 0x.3ac - RxBD pointer for ring 5 */ + u8 res15g[4]; + u32 rbptr6; /* 0x.3b4 - RxBD pointer for ring 6 */ + u8 res15h[4]; + u32 rbptr7; /* 0x.3bc - RxBD pointer for ring 7 */ + u8 res16[64]; + u32 rbaseh; /* 0x.400 - RxBD base address high */ + u32 rbase0; /* 0x.404 - RxBD base address of ring 0 */ + u8 res17a[4]; + u32 rbase1; /* 0x.40c - RxBD base address of ring 1 */ + u8 res17b[4]; + u32 rbase2; /* 0x.414 - RxBD base address of ring 2 */ + u8 res17c[4]; + u32 rbase3; /* 0x.41c - RxBD base address of ring 3 */ + u8 res17d[4]; + u32 rbase4; /* 0x.424 - RxBD base address of ring 4 */ + u8 res17e[4]; + u32 rbase5; /* 0x.42c - RxBD base address of ring 5 */ + u8 res17f[4]; + u32 rbase6; /* 0x.434 - RxBD base address of ring 6 */ + u8 res17g[4]; + u32 rbase7; /* 0x.43c - RxBD base address of ring 7 */ + u8 res17[192]; + u32 maccfg1; /* 0x.500 - MAC Configuration 1 Register */ + u32 maccfg2; /* 0x.504 - MAC Configuration 2 Register */ + u32 ipgifg; /* 0x.508 - Inter Packet Gap/Inter Frame Gap Register */ + u32 hafdup; /* 0x.50c - Half Duplex Register */ + u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */ + u8 res18[12]; + u8 gfar_mii_regs[24]; /* See gianfar_phy.h */ + u32 ifctrl; /* 0x.538 - Interface control register */ + u32 ifstat; /* 0x.53c - Interface Status Register */ + u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */ + u32 macstnaddr2; /* 0x.544 - Station Address Part 2 Register */ + u32 mac01addr1; /* 0x.548 - MAC exact match address 1, part 1 */ + u32 mac01addr2; /* 0x.54c - MAC exact match address 1, part 2 */ + u32 mac02addr1; /* 0x.550 - MAC exact match address 2, part 1 */ + u32 mac02addr2; /* 0x.554 - MAC exact match address 2, part 2 */ + u32 mac03addr1; /* 0x.558 - MAC exact match address 3, part 1 */ + u32 mac03addr2; /* 0x.55c - MAC exact match address 3, part 2 */ + u32 mac04addr1; /* 0x.560 - MAC exact match address 4, part 1 */ + u32 mac04addr2; /* 0x.564 - MAC exact match address 4, part 2 */ + u32 mac05addr1; /* 0x.568 - MAC exact match address 5, part 1 */ + u32 mac05addr2; /* 0x.56c - MAC exact match address 5, part 2 */ + u32 mac06addr1; /* 0x.570 - MAC exact match address 6, part 1 */ + u32 mac06addr2; /* 0x.574 - MAC exact match address 6, part 2 */ + u32 mac07addr1; /* 0x.578 - MAC exact match address 7, part 1 */ + u32 mac07addr2; /* 0x.57c - MAC exact match address 7, part 2 */ + u32 mac08addr1; /* 0x.580 - MAC exact match address 8, part 1 */ + u32 mac08addr2; /* 0x.584 - MAC exact match address 8, part 2 */ + u32 mac09addr1; /* 0x.588 - MAC exact match address 9, part 1 */ + u32 mac09addr2; /* 0x.58c - MAC exact match address 9, part 2 */ + u32 mac10addr1; /* 0x.590 - MAC exact match address 10, part 1*/ + u32 mac10addr2; /* 0x.594 - MAC exact match address 10, part 2*/ + u32 mac11addr1; /* 0x.598 - MAC exact match address 11, part 1*/ + u32 mac11addr2; /* 0x.59c - MAC exact match address 11, part 2*/ + u32 mac12addr1; /* 0x.5a0 - MAC exact match address 12, part 1*/ + u32 mac12addr2; /* 0x.5a4 - MAC exact match address 12, part 2*/ + u32 mac13addr1; /* 0x.5a8 - MAC exact match address 13, part 1*/ + u32 mac13addr2; /* 0x.5ac - MAC exact match address 13, part 2*/ + u32 mac14addr1; /* 0x.5b0 - MAC exact match address 14, part 1*/ + u32 mac14addr2; /* 0x.5b4 - MAC exact match address 14, part 2*/ + u32 mac15addr1; /* 0x.5b8 - MAC exact match address 15, part 1*/ + u32 mac15addr2; /* 0x.5bc - MAC exact match address 15, part 2*/ + u8 res20[192]; + struct rmon_mib rmon; /* 0x.680-0x.73c */ + u32 rrej; /* 0x.740 - Receive filer rejected packet counter */ + u8 res21[188]; + u32 igaddr0; /* 0x.800 - Indivdual/Group address register 0*/ + u32 igaddr1; /* 0x.804 - Indivdual/Group address register 1*/ + u32 igaddr2; /* 0x.808 - Indivdual/Group address register 2*/ + u32 igaddr3; /* 0x.80c - Indivdual/Group address register 3*/ + u32 igaddr4; /* 0x.810 - Indivdual/Group address register 4*/ + u32 igaddr5; /* 0x.814 - Indivdual/Group address register 5*/ + u32 igaddr6; /* 0x.818 - Indivdual/Group address register 6*/ + u32 igaddr7; /* 0x.81c - Indivdual/Group address register 7*/ + u8 res22[96]; + u32 gaddr0; /* 0x.880 - Group address register 0 */ + u32 gaddr1; /* 0x.884 - Group address register 1 */ + u32 gaddr2; /* 0x.888 - Group address register 2 */ + u32 gaddr3; /* 0x.88c - Group address register 3 */ + u32 gaddr4; /* 0x.890 - Group address register 4 */ + u32 gaddr5; /* 0x.894 - Group address register 5 */ + u32 gaddr6; /* 0x.898 - Group address register 6 */ + u32 gaddr7; /* 0x.89c - Group address register 7 */ + u8 res23a[352]; + u32 fifocfg; /* 0x.a00 - FIFO interface config register */ + u8 res23b[252]; + u8 res23c[248]; + u32 attr; /* 0x.bf8 - Attributes Register */ + u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */ + u32 rqprm0; /* 0x.c00 - Receive queue parameters register 0 */ + u32 rqprm1; /* 0x.c04 - Receive queue parameters register 1 */ + u32 rqprm2; /* 0x.c08 - Receive queue parameters register 2 */ + u32 rqprm3; /* 0x.c0c - Receive queue parameters register 3 */ + u32 rqprm4; /* 0x.c10 - Receive queue parameters register 4 */ + u32 rqprm5; /* 0x.c14 - Receive queue parameters register 5 */ + u32 rqprm6; /* 0x.c18 - Receive queue parameters register 6 */ + u32 rqprm7; /* 0x.c1c - Receive queue parameters register 7 */ + u8 res24[36]; + u32 rfbptr0; /* 0x.c44 - Last free RxBD pointer for ring 0 */ + u8 res24a[4]; + u32 rfbptr1; /* 0x.c4c - Last free RxBD pointer for ring 1 */ + u8 res24b[4]; + u32 rfbptr2; /* 0x.c54 - Last free RxBD pointer for ring 2 */ + u8 res24c[4]; + u32 rfbptr3; /* 0x.c5c - Last free RxBD pointer for ring 3 */ + u8 res24d[4]; + u32 rfbptr4; /* 0x.c64 - Last free RxBD pointer for ring 4 */ + u8 res24e[4]; + u32 rfbptr5; /* 0x.c6c - Last free RxBD pointer for ring 5 */ + u8 res24f[4]; + u32 rfbptr6; /* 0x.c74 - Last free RxBD pointer for ring 6 */ + u8 res24g[4]; + u32 rfbptr7; /* 0x.c7c - Last free RxBD pointer for ring 7 */ + u8 res24h[4]; + u8 res24x[556]; + u32 isrg0; /* 0x.eb0 - Interrupt steering group 0 register */ + u32 isrg1; /* 0x.eb4 - Interrupt steering group 1 register */ + u32 isrg2; /* 0x.eb8 - Interrupt steering group 2 register */ + u32 isrg3; /* 0x.ebc - Interrupt steering group 3 register */ + u8 res25[16]; + u32 rxic0; /* 0x.ed0 - Ring 0 Rx interrupt coalescing */ + u32 rxic1; /* 0x.ed4 - Ring 1 Rx interrupt coalescing */ + u32 rxic2; /* 0x.ed8 - Ring 2 Rx interrupt coalescing */ + u32 rxic3; /* 0x.edc - Ring 3 Rx interrupt coalescing */ + u32 rxic4; /* 0x.ee0 - Ring 4 Rx interrupt coalescing */ + u32 rxic5; /* 0x.ee4 - Ring 5 Rx interrupt coalescing */ + u32 rxic6; /* 0x.ee8 - Ring 6 Rx interrupt coalescing */ + u32 rxic7; /* 0x.eec - Ring 7 Rx interrupt coalescing */ + u8 res26[32]; + u32 txic0; /* 0x.f10 - Ring 0 Tx interrupt coalescing */ + u32 txic1; /* 0x.f14 - Ring 1 Tx interrupt coalescing */ + u32 txic2; /* 0x.f18 - Ring 2 Tx interrupt coalescing */ + u32 txic3; /* 0x.f1c - Ring 3 Tx interrupt coalescing */ + u32 txic4; /* 0x.f20 - Ring 4 Tx interrupt coalescing */ + u32 txic5; /* 0x.f24 - Ring 5 Tx interrupt coalescing */ + u32 txic6; /* 0x.f28 - Ring 6 Tx interrupt coalescing */ + u32 txic7; /* 0x.f2c - Ring 7 Tx interrupt coalescing */ + u8 res27[208]; +}; + +/* Flags related to gianfar device features */ +#define FSL_GIANFAR_DEV_HAS_GIGABIT 0x00000001 +#define FSL_GIANFAR_DEV_HAS_COALESCE 0x00000002 +#define FSL_GIANFAR_DEV_HAS_RMON 0x00000004 +#define FSL_GIANFAR_DEV_HAS_MULTI_INTR 0x00000008 +#define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010 +#define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020 +#define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040 +#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100 +#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200 +#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 +#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800 + +#if (MAXGROUPS == 2) +#define DEFAULT_MAPPING 0xAA +#else +#define DEFAULT_MAPPING 0xFF +#endif + +#define ISRG_RR0 0x80000000 +#define ISRG_TR0 0x00800000 + +/* The same driver can operate in two modes */ +/* SQ_SG_MODE: Single Queue Single Group Mode + * (Backward compatible mode) + * MQ_MG_MODE: Multi Queue Multi Group mode + */ +enum { + SQ_SG_MODE = 0, + MQ_MG_MODE +}; + +/* GFAR_SQ_POLLING: Single Queue NAPI polling mode + * The driver supports a single pair of RX/Tx queues + * per interrupt group (Rx/Tx int line). MQ_MG mode + * devices have 2 interrupt groups, so the device will + * have a total of 2 Tx and 2 Rx queues in this case. + * GFAR_MQ_POLLING: Multi Queue NAPI polling mode + * The driver supports all the 8 Rx and Tx HW queues + * each queue mapped by the Device Tree to one of + * the 2 interrupt groups. This mode implies significant + * processing overhead (CPU and controller level). + */ +enum gfar_poll_mode { + GFAR_SQ_POLLING = 0, + GFAR_MQ_POLLING +}; + +/* + * Per TX queue stats + */ +struct tx_q_stats { + unsigned long tx_packets; + unsigned long tx_bytes; +}; + +/** + * struct gfar_priv_tx_q - per tx queue structure + * @txlock: per queue tx spin lock + * @tx_skbuff:skb pointers + * @skb_curtx: to be used skb pointer + * @skb_dirtytx:the last used skb pointer + * @stats: bytes/packets stats + * @qindex: index of this queue + * @dev: back pointer to the dev structure + * @grp: back pointer to the group to which this queue belongs + * @tx_bd_base: First tx buffer descriptor + * @cur_tx: Next free ring entry + * @dirty_tx: First buffer in line to be transmitted + * @tx_ring_size: Tx ring size + * @num_txbdfree: number of free TxBds + * @txcoalescing: enable/disable tx coalescing + * @txic: transmit interrupt coalescing value + * @txcount: coalescing value if based on tx frame count + * @txtime: coalescing value if based on time + */ +struct gfar_priv_tx_q { + /* cacheline 1 */ + spinlock_t txlock __attribute__ ((aligned (SMP_CACHE_BYTES))); + struct txbd8 *tx_bd_base; + struct txbd8 *cur_tx; + unsigned int num_txbdfree; + unsigned short skb_curtx; + unsigned short tx_ring_size; + struct tx_q_stats stats; + struct gfar_priv_grp *grp; + /* cacheline 2 */ + struct net_device *dev; + struct sk_buff **tx_skbuff; + struct txbd8 *dirty_tx; + unsigned short skb_dirtytx; + unsigned short qindex; + /* Configuration info for the coalescing features */ + unsigned int txcoalescing; + unsigned long txic; + dma_addr_t tx_bd_dma_base; +}; + +/* + * Per RX queue stats + */ +struct rx_q_stats { + unsigned long rx_packets; + unsigned long rx_bytes; + unsigned long rx_dropped; +}; + +/** + * struct gfar_priv_rx_q - per rx queue structure + * @rx_skbuff: skb pointers + * @skb_currx: currently use skb pointer + * @rx_bd_base: First rx buffer descriptor + * @cur_rx: Next free rx ring entry + * @qindex: index of this queue + * @dev: back pointer to the dev structure + * @rx_ring_size: Rx ring size + * @rxcoalescing: enable/disable rx-coalescing + * @rxic: receive interrupt coalescing vlaue + */ + +struct gfar_priv_rx_q { + struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES); + dma_addr_t rx_bd_dma_base; + struct rxbd8 *rx_bd_base; + struct rxbd8 *cur_rx; + struct net_device *dev; + struct gfar_priv_grp *grp; + struct rx_q_stats stats; + u16 skb_currx; + u16 qindex; + unsigned int rx_ring_size; + /* RX Coalescing values */ + unsigned char rxcoalescing; + unsigned long rxic; + u32 __iomem *rfbptr; +}; + +enum gfar_irqinfo_id { + GFAR_TX = 0, + GFAR_RX = 1, + GFAR_ER = 2, + GFAR_NUM_IRQS = 3 +}; + +struct gfar_irqinfo { + unsigned int irq; + char name[GFAR_INT_NAME_MAX]; +}; + +/** + * struct gfar_priv_grp - per group structure + * @napi: the napi poll function + * @priv: back pointer to the priv structure + * @regs: the ioremapped register space for this group + * @irqinfo: TX/RX/ER irq data for this group + */ + +struct gfar_priv_grp { + spinlock_t grplock __aligned(SMP_CACHE_BYTES); + struct napi_struct napi_rx; + struct napi_struct napi_tx; + struct gfar __iomem *regs; + struct gfar_priv_tx_q *tx_queue; + struct gfar_priv_rx_q *rx_queue; + unsigned int tstat; + unsigned int rstat; + + struct gfar_private *priv; + unsigned long num_tx_queues; + unsigned long tx_bit_map; + unsigned long num_rx_queues; + unsigned long rx_bit_map; + + struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS]; +}; + +#define gfar_irq(grp, ID) \ + ((grp)->irqinfo[GFAR_##ID]) + +enum gfar_errata { + GFAR_ERRATA_74 = 0x01, + GFAR_ERRATA_76 = 0x02, + GFAR_ERRATA_A002 = 0x04, + GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */ +}; + +enum gfar_dev_state { + GFAR_DOWN = 1, + GFAR_RESETTING +}; + +/* Struct stolen almost completely (and shamelessly) from the FCC enet source + * (Ok, that's not so true anymore, but there is a family resemblance) + * The GFAR buffer descriptors track the ring buffers. The rx_bd_base + * and tx_bd_base always point to the currently available buffer. + * The dirty_tx tracks the current buffer that is being sent by the + * controller. The cur_tx and dirty_tx are equal under both completely + * empty and completely full conditions. The empty/ready indicator in + * the buffer descriptor determines the actual condition. + */ +struct gfar_private { + struct device *dev; + struct net_device *ndev; + enum gfar_errata errata; + unsigned int rx_buffer_size; + + u16 uses_rxfcb; + u16 padding; + u32 device_flags; + + /* HW time stamping enabled flag */ + int hwts_rx_en; + int hwts_tx_en; + + struct gfar_priv_tx_q *tx_queue[MAX_TX_QS]; + struct gfar_priv_rx_q *rx_queue[MAX_RX_QS]; + struct gfar_priv_grp gfargrp[MAXGROUPS]; + + unsigned long state; + + unsigned short mode; + unsigned short poll_mode; + unsigned int num_tx_queues; + unsigned int num_rx_queues; + unsigned int num_grps; + int tx_actual_en; + + /* Network Statistics */ + struct gfar_extra_stats extra_stats; + + /* PHY stuff */ + phy_interface_t interface; + struct device_node *phy_node; + struct device_node *tbi_node; + struct phy_device *phydev; + struct mii_bus *mii_bus; + int oldspeed; + int oldduplex; + int oldlink; + + /* Bitfield update lock */ + spinlock_t bflock; + + uint32_t msg_enable; + + struct work_struct reset_task; + + struct platform_device *ofdev; + unsigned char + extended_hash:1, + bd_stash_en:1, + rx_filer_enable:1, + /* Wake-on-LAN enabled */ + wol_en:1, + /* Enable priorty based Tx scheduling in Hw */ + prio_sched_en:1, + /* Flow control flags */ + pause_aneg_en:1, + tx_pause_en:1, + rx_pause_en:1; + + /* The total tx and rx ring size for the enabled queues */ + unsigned int total_tx_ring_size; + unsigned int total_rx_ring_size; + + u32 rqueue; + u32 tqueue; + + /* RX per device parameters */ + unsigned int rx_stash_size; + unsigned int rx_stash_index; + + u32 cur_filer_idx; + + /* RX queue filer rule set*/ + struct ethtool_rx_list rx_list; + struct mutex rx_queue_access; + + /* Hash registers and their width */ + u32 __iomem *hash_regs[16]; + int hash_width; + + /*Filer table*/ + unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; + unsigned int ftp_rqfcr[MAX_FILER_IDX + 1]; +}; + + +static inline int gfar_has_errata(struct gfar_private *priv, + enum gfar_errata err) +{ + return priv->errata & err; +} + +static inline u32 gfar_read(unsigned __iomem *addr) +{ + u32 val; + val = ioread32be(addr); + return val; +} + +static inline void gfar_write(unsigned __iomem *addr, u32 val) +{ + iowrite32be(val, addr); +} + +static inline void gfar_write_filer(struct gfar_private *priv, + unsigned int far, unsigned int fcr, unsigned int fpr) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + + gfar_write(®s->rqfar, far); + gfar_write(®s->rqfcr, fcr); + gfar_write(®s->rqfpr, fpr); +} + +static inline void gfar_read_filer(struct gfar_private *priv, + unsigned int far, unsigned int *fcr, unsigned int *fpr) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + + gfar_write(®s->rqfar, far); + *fcr = gfar_read(®s->rqfcr); + *fpr = gfar_read(®s->rqfpr); +} + +static inline void gfar_write_isrg(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 __iomem *baddr = ®s->isrg0; + u32 isrg = 0; + int grp_idx, i; + + for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { + struct gfar_priv_grp *grp = &priv->gfargrp[grp_idx]; + + for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { + isrg |= (ISRG_RR0 >> i); + } + + for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { + isrg |= (ISRG_TR0 >> i); + } + + gfar_write(baddr, isrg); + + baddr++; + isrg = 0; + } +} + +static inline int gfar_is_dma_stopped(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + + return ((gfar_read(®s->ievent) & (IEVENT_GRSC | IEVENT_GTSC)) == + (IEVENT_GRSC | IEVENT_GTSC)); +} + +static inline int gfar_is_rx_dma_stopped(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + + return gfar_read(®s->ievent) & IEVENT_GRSC; +} + +static inline void gfar_wmb(void) +{ +#if defined(CONFIG_PPC) + /* The powerpc-specific eieio() is used, as wmb() has too strong + * semantics (it requires synchronization between cacheable and + * uncacheable mappings, which eieio() doesn't provide and which we + * don't need), thus requiring a more expensive sync instruction. At + * some point, the set of architecture-independent barrier functions + * should be expanded to include weaker barriers. + */ + eieio(); +#else + wmb(); /* order write acesses for BD (or FCB) fields */ +#endif +} + +static inline void gfar_clear_txbd_status(struct txbd8 *bdp) +{ + u32 lstatus = be32_to_cpu(bdp->lstatus); + + lstatus &= BD_LFLAG(TXBD_WRAP); + bdp->lstatus = cpu_to_be32(lstatus); +} + +irqreturn_t gfar_receive(int irq, void *dev_id); +int startup_gfar(struct net_device *dev); +void stop_gfar(struct net_device *dev); +void reset_gfar(struct net_device *dev); +void gfar_mac_reset(struct gfar_private *priv); +void gfar_halt(struct gfar_private *priv); +void gfar_start(struct gfar_private *priv); +void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable, + u32 regnum, u32 read); +void gfar_configure_coalescing_all(struct gfar_private *priv); +int gfar_set_features(struct net_device *dev, netdev_features_t features); + +extern const struct ethtool_ops gfar_ethtool_ops; + +#define MAX_FILER_CACHE_IDX (2*(MAX_FILER_IDX)) + +#define RQFCR_PID_PRI_MASK 0xFFFFFFF8 +#define RQFCR_PID_L4P_MASK 0xFFFFFF00 +#define RQFCR_PID_VID_MASK 0xFFFFF000 +#define RQFCR_PID_PORT_MASK 0xFFFF0000 +#define RQFCR_PID_MAC_MASK 0xFF000000 + +struct gfar_mask_entry { + unsigned int mask; /* The mask value which is valid form start to end */ + unsigned int start; + unsigned int end; + unsigned int block; /* Same block values indicate depended entries */ +}; + +/* Represents a receive filer table entry */ +struct gfar_filer_entry { + u32 ctrl; + u32 prop; +}; + + +/* The 20 additional entries are a shadow for one extra element */ +struct filer_table { + u32 index; + struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20]; +}; + +/* The gianfar_ptp module will set this variable */ +extern int gfar_phc_index; + +#endif /* __GIANFAR_H */ diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c new file mode 100644 index 000000000..fda12fb32 --- /dev/null +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -0,0 +1,1911 @@ +/* + * drivers/net/ethernet/freescale/gianfar_ethtool.c + * + * Gianfar Ethernet Driver + * Ethtool support for Gianfar Enet + * Based on e1000 ethtool support + * + * Author: Andy Fleming + * Maintainer: Kumar Gala + * Modifier: Sandeep Gopalpet + * + * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc. + * + * This software may be used and distributed according to + * the terms of the GNU Public License, Version 2, incorporated herein + * by reference. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gianfar.h" + +#define GFAR_MAX_COAL_USECS 0xffff +#define GFAR_MAX_COAL_FRAMES 0xff +static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, + u64 *buf); +static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf); +static int gfar_gcoalesce(struct net_device *dev, + struct ethtool_coalesce *cvals); +static int gfar_scoalesce(struct net_device *dev, + struct ethtool_coalesce *cvals); +static void gfar_gringparam(struct net_device *dev, + struct ethtool_ringparam *rvals); +static int gfar_sringparam(struct net_device *dev, + struct ethtool_ringparam *rvals); +static void gfar_gdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo); + +static const char stat_gstrings[][ETH_GSTRING_LEN] = { + "rx-large-frame-errors", + "rx-short-frame-errors", + "rx-non-octet-errors", + "rx-crc-errors", + "rx-overrun-errors", + "rx-busy-errors", + "rx-babbling-errors", + "rx-truncated-frames", + "ethernet-bus-error", + "tx-babbling-errors", + "tx-underrun-errors", + "rx-skb-missing-errors", + "tx-timeout-errors", + "tx-rx-64-frames", + "tx-rx-65-127-frames", + "tx-rx-128-255-frames", + "tx-rx-256-511-frames", + "tx-rx-512-1023-frames", + "tx-rx-1024-1518-frames", + "tx-rx-1519-1522-good-vlan", + "rx-bytes", + "rx-packets", + "rx-fcs-errors", + "receive-multicast-packet", + "receive-broadcast-packet", + "rx-control-frame-packets", + "rx-pause-frame-packets", + "rx-unknown-op-code", + "rx-alignment-error", + "rx-frame-length-error", + "rx-code-error", + "rx-carrier-sense-error", + "rx-undersize-packets", + "rx-oversize-packets", + "rx-fragmented-frames", + "rx-jabber-frames", + "rx-dropped-frames", + "tx-byte-counter", + "tx-packets", + "tx-multicast-packets", + "tx-broadcast-packets", + "tx-pause-control-frames", + "tx-deferral-packets", + "tx-excessive-deferral-packets", + "tx-single-collision-packets", + "tx-multiple-collision-packets", + "tx-late-collision-packets", + "tx-excessive-collision-packets", + "tx-total-collision", + "reserved", + "tx-dropped-frames", + "tx-jabber-frames", + "tx-fcs-errors", + "tx-control-frames", + "tx-oversize-frames", + "tx-undersize-frames", + "tx-fragmented-frames", +}; + +/* Fill in a buffer with the strings which correspond to the + * stats */ +static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf) +{ + struct gfar_private *priv = netdev_priv(dev); + + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) + memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN); + else + memcpy(buf, stat_gstrings, + GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN); +} + +/* Fill in an array of 64-bit statistics from various sources. + * This array will be appended to the end of the ethtool_stats + * structure, and returned to user space + */ +static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, + u64 *buf) +{ + int i; + struct gfar_private *priv = netdev_priv(dev); + struct gfar __iomem *regs = priv->gfargrp[0].regs; + atomic64_t *extra = (atomic64_t *)&priv->extra_stats; + + for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) + buf[i] = atomic64_read(&extra[i]); + + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { + u32 __iomem *rmon = (u32 __iomem *) ®s->rmon; + + for (; i < GFAR_STATS_LEN; i++, rmon++) + buf[i] = (u64) gfar_read(rmon); + } +} + +static int gfar_sset_count(struct net_device *dev, int sset) +{ + struct gfar_private *priv = netdev_priv(dev); + + switch (sset) { + case ETH_SS_STATS: + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) + return GFAR_STATS_LEN; + else + return GFAR_EXTRA_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +/* Fills in the drvinfo structure with some basic info */ +static void gfar_gdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, gfar_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info)); + drvinfo->regdump_len = 0; + drvinfo->eedump_len = 0; +} + + +static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct gfar_private *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + + if (NULL == phydev) + return -ENODEV; + + return phy_ethtool_sset(phydev, cmd); +} + + +/* Return the current settings in the ethtool_cmd structure */ +static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct gfar_private *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + struct gfar_priv_rx_q *rx_queue = NULL; + struct gfar_priv_tx_q *tx_queue = NULL; + + if (NULL == phydev) + return -ENODEV; + tx_queue = priv->tx_queue[0]; + rx_queue = priv->rx_queue[0]; + + /* etsec-1.7 and older versions have only one txic + * and rxic regs although they support multiple queues */ + cmd->maxtxpkt = get_icft_value(tx_queue->txic); + cmd->maxrxpkt = get_icft_value(rx_queue->rxic); + + return phy_ethtool_gset(phydev, cmd); +} + +/* Return the length of the register structure */ +static int gfar_reglen(struct net_device *dev) +{ + return sizeof (struct gfar); +} + +/* Return a dump of the GFAR register space */ +static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *regbuf) +{ + int i; + struct gfar_private *priv = netdev_priv(dev); + u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs; + u32 *buf = (u32 *) regbuf; + + for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++) + buf[i] = gfar_read(&theregs[i]); +} + +/* Convert microseconds to ethernet clock ticks, which changes + * depending on what speed the controller is running at */ +static unsigned int gfar_usecs2ticks(struct gfar_private *priv, + unsigned int usecs) +{ + unsigned int count; + + /* The timer is different, depending on the interface speed */ + switch (priv->phydev->speed) { + case SPEED_1000: + count = GFAR_GBIT_TIME; + break; + case SPEED_100: + count = GFAR_100_TIME; + break; + case SPEED_10: + default: + count = GFAR_10_TIME; + break; + } + + /* Make sure we return a number greater than 0 + * if usecs > 0 */ + return (usecs * 1000 + count - 1) / count; +} + +/* Convert ethernet clock ticks to microseconds */ +static unsigned int gfar_ticks2usecs(struct gfar_private *priv, + unsigned int ticks) +{ + unsigned int count; + + /* The timer is different, depending on the interface speed */ + switch (priv->phydev->speed) { + case SPEED_1000: + count = GFAR_GBIT_TIME; + break; + case SPEED_100: + count = GFAR_100_TIME; + break; + case SPEED_10: + default: + count = GFAR_10_TIME; + break; + } + + /* Make sure we return a number greater than 0 */ + /* if ticks is > 0 */ + return (ticks * count) / 1000; +} + +/* Get the coalescing parameters, and put them in the cvals + * structure. */ +static int gfar_gcoalesce(struct net_device *dev, + struct ethtool_coalesce *cvals) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar_priv_rx_q *rx_queue = NULL; + struct gfar_priv_tx_q *tx_queue = NULL; + unsigned long rxtime; + unsigned long rxcount; + unsigned long txtime; + unsigned long txcount; + + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) + return -EOPNOTSUPP; + + if (NULL == priv->phydev) + return -ENODEV; + + rx_queue = priv->rx_queue[0]; + tx_queue = priv->tx_queue[0]; + + rxtime = get_ictt_value(rx_queue->rxic); + rxcount = get_icft_value(rx_queue->rxic); + txtime = get_ictt_value(tx_queue->txic); + txcount = get_icft_value(tx_queue->txic); + cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime); + cvals->rx_max_coalesced_frames = rxcount; + + cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime); + cvals->tx_max_coalesced_frames = txcount; + + cvals->use_adaptive_rx_coalesce = 0; + cvals->use_adaptive_tx_coalesce = 0; + + cvals->pkt_rate_low = 0; + cvals->rx_coalesce_usecs_low = 0; + cvals->rx_max_coalesced_frames_low = 0; + cvals->tx_coalesce_usecs_low = 0; + cvals->tx_max_coalesced_frames_low = 0; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + cvals->pkt_rate_high = 0; + cvals->rx_coalesce_usecs_high = 0; + cvals->rx_max_coalesced_frames_high = 0; + cvals->tx_coalesce_usecs_high = 0; + cvals->tx_max_coalesced_frames_high = 0; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + cvals->rate_sample_interval = 0; + + return 0; +} + +/* Change the coalescing values. + * Both cvals->*_usecs and cvals->*_frames have to be > 0 + * in order for coalescing to be active + */ +static int gfar_scoalesce(struct net_device *dev, + struct ethtool_coalesce *cvals) +{ + struct gfar_private *priv = netdev_priv(dev); + int i, err = 0; + + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) + return -EOPNOTSUPP; + + if (NULL == priv->phydev) + return -ENODEV; + + /* Check the bounds of the values */ + if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) { + netdev_info(dev, "Coalescing is limited to %d microseconds\n", + GFAR_MAX_COAL_USECS); + return -EINVAL; + } + + if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { + netdev_info(dev, "Coalescing is limited to %d frames\n", + GFAR_MAX_COAL_FRAMES); + return -EINVAL; + } + + /* Check the bounds of the values */ + if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) { + netdev_info(dev, "Coalescing is limited to %d microseconds\n", + GFAR_MAX_COAL_USECS); + return -EINVAL; + } + + if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { + netdev_info(dev, "Coalescing is limited to %d frames\n", + GFAR_MAX_COAL_FRAMES); + return -EINVAL; + } + + while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) + cpu_relax(); + + /* Set up rx coalescing */ + if ((cvals->rx_coalesce_usecs == 0) || + (cvals->rx_max_coalesced_frames == 0)) { + for (i = 0; i < priv->num_rx_queues; i++) + priv->rx_queue[i]->rxcoalescing = 0; + } else { + for (i = 0; i < priv->num_rx_queues; i++) + priv->rx_queue[i]->rxcoalescing = 1; + } + + for (i = 0; i < priv->num_rx_queues; i++) { + priv->rx_queue[i]->rxic = mk_ic_value( + cvals->rx_max_coalesced_frames, + gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs)); + } + + /* Set up tx coalescing */ + if ((cvals->tx_coalesce_usecs == 0) || + (cvals->tx_max_coalesced_frames == 0)) { + for (i = 0; i < priv->num_tx_queues; i++) + priv->tx_queue[i]->txcoalescing = 0; + } else { + for (i = 0; i < priv->num_tx_queues; i++) + priv->tx_queue[i]->txcoalescing = 1; + } + + for (i = 0; i < priv->num_tx_queues; i++) { + priv->tx_queue[i]->txic = mk_ic_value( + cvals->tx_max_coalesced_frames, + gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); + } + + if (dev->flags & IFF_UP) { + stop_gfar(dev); + err = startup_gfar(dev); + } else { + gfar_mac_reset(priv); + } + + clear_bit_unlock(GFAR_RESETTING, &priv->state); + + return err; +} + +/* Fills in rvals with the current ring parameters. Currently, + * rx, rx_mini, and rx_jumbo rings are the same size, as mini and + * jumbo are ignored by the driver */ +static void gfar_gringparam(struct net_device *dev, + struct ethtool_ringparam *rvals) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar_priv_tx_q *tx_queue = NULL; + struct gfar_priv_rx_q *rx_queue = NULL; + + tx_queue = priv->tx_queue[0]; + rx_queue = priv->rx_queue[0]; + + rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE; + rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE; + rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE; + rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + rvals->rx_pending = rx_queue->rx_ring_size; + rvals->rx_mini_pending = rx_queue->rx_ring_size; + rvals->rx_jumbo_pending = rx_queue->rx_ring_size; + rvals->tx_pending = tx_queue->tx_ring_size; +} + +/* Change the current ring parameters, stopping the controller if + * necessary so that we don't mess things up while we're in motion. + */ +static int gfar_sringparam(struct net_device *dev, + struct ethtool_ringparam *rvals) +{ + struct gfar_private *priv = netdev_priv(dev); + int err = 0, i; + + if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) + return -EINVAL; + + if (!is_power_of_2(rvals->rx_pending)) { + netdev_err(dev, "Ring sizes must be a power of 2\n"); + return -EINVAL; + } + + if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE) + return -EINVAL; + + if (!is_power_of_2(rvals->tx_pending)) { + netdev_err(dev, "Ring sizes must be a power of 2\n"); + return -EINVAL; + } + + while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) + cpu_relax(); + + if (dev->flags & IFF_UP) + stop_gfar(dev); + + /* Change the sizes */ + for (i = 0; i < priv->num_rx_queues; i++) + priv->rx_queue[i]->rx_ring_size = rvals->rx_pending; + + for (i = 0; i < priv->num_tx_queues; i++) + priv->tx_queue[i]->tx_ring_size = rvals->tx_pending; + + /* Rebuild the rings with the new size */ + if (dev->flags & IFF_UP) + err = startup_gfar(dev); + + clear_bit_unlock(GFAR_RESETTING, &priv->state); + + return err; +} + +static void gfar_gpauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct gfar_private *priv = netdev_priv(dev); + + epause->autoneg = !!priv->pause_aneg_en; + epause->rx_pause = !!priv->rx_pause_en; + epause->tx_pause = !!priv->tx_pause_en; +} + +static int gfar_spauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct gfar_private *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 oldadv, newadv; + + if (!phydev) + return -ENODEV; + + if (!(phydev->supported & SUPPORTED_Pause) || + (!(phydev->supported & SUPPORTED_Asym_Pause) && + (epause->rx_pause != epause->tx_pause))) + return -EINVAL; + + priv->rx_pause_en = priv->tx_pause_en = 0; + if (epause->rx_pause) { + priv->rx_pause_en = 1; + + if (epause->tx_pause) { + priv->tx_pause_en = 1; + /* FLOW_CTRL_RX & TX */ + newadv = ADVERTISED_Pause; + } else /* FLOW_CTLR_RX */ + newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause; + } else if (epause->tx_pause) { + priv->tx_pause_en = 1; + /* FLOW_CTLR_TX */ + newadv = ADVERTISED_Asym_Pause; + } else + newadv = 0; + + if (epause->autoneg) + priv->pause_aneg_en = 1; + else + priv->pause_aneg_en = 0; + + oldadv = phydev->advertising & + (ADVERTISED_Pause | ADVERTISED_Asym_Pause); + if (oldadv != newadv) { + phydev->advertising &= + ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + phydev->advertising |= newadv; + if (phydev->autoneg) + /* inform link partner of our + * new flow ctrl settings + */ + return phy_start_aneg(phydev); + + if (!epause->autoneg) { + u32 tempval; + tempval = gfar_read(®s->maccfg1); + tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); + + priv->tx_actual_en = 0; + if (priv->tx_pause_en) { + priv->tx_actual_en = 1; + tempval |= MACCFG1_TX_FLOW; + } + + if (priv->rx_pause_en) + tempval |= MACCFG1_RX_FLOW; + gfar_write(®s->maccfg1, tempval); + } + } + + return 0; +} + +int gfar_set_features(struct net_device *dev, netdev_features_t features) +{ + netdev_features_t changed = dev->features ^ features; + struct gfar_private *priv = netdev_priv(dev); + int err = 0; + + if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_RXCSUM))) + return 0; + + while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) + cpu_relax(); + + dev->features = features; + + if (dev->flags & IFF_UP) { + /* Now we take down the rings to rebuild them */ + stop_gfar(dev); + err = startup_gfar(dev); + } else { + gfar_mac_reset(priv); + } + + clear_bit_unlock(GFAR_RESETTING, &priv->state); + + return err; +} + +static uint32_t gfar_get_msglevel(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + + return priv->msg_enable; +} + +static void gfar_set_msglevel(struct net_device *dev, uint32_t data) +{ + struct gfar_private *priv = netdev_priv(dev); + + priv->msg_enable = data; +} + +#ifdef CONFIG_PM +static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct gfar_private *priv = netdev_priv(dev); + + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) { + wol->supported = WAKE_MAGIC; + wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0; + } else { + wol->supported = wol->wolopts = 0; + } +} + +static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct gfar_private *priv = netdev_priv(dev); + unsigned long flags; + + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && + wol->wolopts != 0) + return -EINVAL; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + + device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC); + + spin_lock_irqsave(&priv->bflock, flags); + priv->wol_en = !!device_may_wakeup(&dev->dev); + spin_unlock_irqrestore(&priv->bflock, flags); + + return 0; +} +#endif + +static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow) +{ + u32 fcr = 0x0, fpr = FPR_FILER_MASK; + + if (ethflow & RXH_L2DA) { + fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH | + RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; + priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; + priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; + gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); + priv->cur_filer_idx = priv->cur_filer_idx - 1; + + fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH | + RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; + priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; + priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; + gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); + priv->cur_filer_idx = priv->cur_filer_idx - 1; + } + + if (ethflow & RXH_VLAN) { + fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH | + RQFCR_AND | RQFCR_HASHTBL_0; + gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); + priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; + priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; + priv->cur_filer_idx = priv->cur_filer_idx - 1; + } + + if (ethflow & RXH_IP_SRC) { + fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH | + RQFCR_AND | RQFCR_HASHTBL_0; + priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; + priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; + gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); + priv->cur_filer_idx = priv->cur_filer_idx - 1; + } + + if (ethflow & (RXH_IP_DST)) { + fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH | + RQFCR_AND | RQFCR_HASHTBL_0; + priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; + priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; + gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); + priv->cur_filer_idx = priv->cur_filer_idx - 1; + } + + if (ethflow & RXH_L3_PROTO) { + fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH | + RQFCR_AND | RQFCR_HASHTBL_0; + priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; + priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; + gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); + priv->cur_filer_idx = priv->cur_filer_idx - 1; + } + + if (ethflow & RXH_L4_B_0_1) { + fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH | + RQFCR_AND | RQFCR_HASHTBL_0; + priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; + priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; + gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); + priv->cur_filer_idx = priv->cur_filer_idx - 1; + } + + if (ethflow & RXH_L4_B_2_3) { + fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH | + RQFCR_AND | RQFCR_HASHTBL_0; + priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; + priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; + gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); + priv->cur_filer_idx = priv->cur_filer_idx - 1; + } +} + +static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, + u64 class) +{ + unsigned int last_rule_idx = priv->cur_filer_idx; + unsigned int cmp_rqfpr; + unsigned int *local_rqfpr; + unsigned int *local_rqfcr; + int i = 0x0, k = 0x0; + int j = MAX_FILER_IDX, l = 0x0; + int ret = 1; + + local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int), + GFP_KERNEL); + local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int), + GFP_KERNEL); + if (!local_rqfpr || !local_rqfcr) { + ret = 0; + goto err; + } + + switch (class) { + case TCP_V4_FLOW: + cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP; + break; + case UDP_V4_FLOW: + cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP; + break; + case TCP_V6_FLOW: + cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP; + break; + case UDP_V6_FLOW: + cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP; + break; + default: + netdev_err(priv->ndev, + "Right now this class is not supported\n"); + ret = 0; + goto err; + } + + for (i = 0; i < MAX_FILER_IDX + 1; i++) { + local_rqfpr[j] = priv->ftp_rqfpr[i]; + local_rqfcr[j] = priv->ftp_rqfcr[i]; + j--; + if ((priv->ftp_rqfcr[i] == + (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) && + (priv->ftp_rqfpr[i] == cmp_rqfpr)) + break; + } + + if (i == MAX_FILER_IDX + 1) { + netdev_err(priv->ndev, + "No parse rule found, can't create hash rules\n"); + ret = 0; + goto err; + } + + /* If a match was found, then it begins the starting of a cluster rule + * if it was already programmed, we need to overwrite these rules + */ + for (l = i+1; l < MAX_FILER_IDX; l++) { + if ((priv->ftp_rqfcr[l] & RQFCR_CLE) && + !(priv->ftp_rqfcr[l] & RQFCR_AND)) { + priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT | + RQFCR_HASHTBL_0 | RQFCR_PID_MASK; + priv->ftp_rqfpr[l] = FPR_FILER_MASK; + gfar_write_filer(priv, l, priv->ftp_rqfcr[l], + priv->ftp_rqfpr[l]); + break; + } + + if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) && + (priv->ftp_rqfcr[l] & RQFCR_AND)) + continue; + else { + local_rqfpr[j] = priv->ftp_rqfpr[l]; + local_rqfcr[j] = priv->ftp_rqfcr[l]; + j--; + } + } + + priv->cur_filer_idx = l - 1; + last_rule_idx = l; + + /* hash rules */ + ethflow_to_filer_rules(priv, ethflow); + + /* Write back the popped out rules again */ + for (k = j+1; k < MAX_FILER_IDX; k++) { + priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k]; + priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k]; + gfar_write_filer(priv, priv->cur_filer_idx, + local_rqfcr[k], local_rqfpr[k]); + if (!priv->cur_filer_idx) + break; + priv->cur_filer_idx = priv->cur_filer_idx - 1; + } + +err: + kfree(local_rqfcr); + kfree(local_rqfpr); + return ret; +} + +static int gfar_set_hash_opts(struct gfar_private *priv, + struct ethtool_rxnfc *cmd) +{ + /* write the filer rules here */ + if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type)) + return -EINVAL; + + return 0; +} + +static int gfar_check_filer_hardware(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 i; + + /* Check if we are in FIFO mode */ + i = gfar_read(®s->ecntrl); + i &= ECNTRL_FIFM; + if (i == ECNTRL_FIFM) { + netdev_notice(priv->ndev, "Interface in FIFO mode\n"); + i = gfar_read(®s->rctrl); + i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM; + if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) { + netdev_info(priv->ndev, + "Receive Queue Filtering enabled\n"); + } else { + netdev_warn(priv->ndev, + "Receive Queue Filtering disabled\n"); + return -EOPNOTSUPP; + } + } + /* Or in standard mode */ + else { + i = gfar_read(®s->rctrl); + i &= RCTRL_PRSDEP_MASK; + if (i == RCTRL_PRSDEP_MASK) { + netdev_info(priv->ndev, + "Receive Queue Filtering enabled\n"); + } else { + netdev_warn(priv->ndev, + "Receive Queue Filtering disabled\n"); + return -EOPNOTSUPP; + } + } + + /* Sets the properties for arbitrary filer rule + * to the first 4 Layer 4 Bytes + */ + gfar_write(®s->rbifx, 0xC0C1C2C3); + return 0; +} + +static int gfar_comp_asc(const void *a, const void *b) +{ + return memcmp(a, b, 4); +} + +static int gfar_comp_desc(const void *a, const void *b) +{ + return -memcmp(a, b, 4); +} + +static void gfar_swap(void *a, void *b, int size) +{ + u32 *_a = a; + u32 *_b = b; + + swap(_a[0], _b[0]); + swap(_a[1], _b[1]); + swap(_a[2], _b[2]); + swap(_a[3], _b[3]); +} + +/* Write a mask to filer cache */ +static void gfar_set_mask(u32 mask, struct filer_table *tab) +{ + tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT; + tab->fe[tab->index].prop = mask; + tab->index++; +} + +/* Sets parse bits (e.g. IP or TCP) */ +static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab) +{ + gfar_set_mask(mask, tab); + tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | + RQFCR_AND; + tab->fe[tab->index].prop = value; + tab->index++; +} + +static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag, + struct filer_table *tab) +{ + gfar_set_mask(mask, tab); + tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag; + tab->fe[tab->index].prop = value; + tab->index++; +} + +/* For setting a tuple of value and mask of type flag + * Example: + * IP-Src = 10.0.0.0/255.0.0.0 + * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4 + * + * Ethtool gives us a value=0 and mask=~0 for don't care a tuple + * For a don't care mask it gives us a 0 + * + * The check if don't care and the mask adjustment if mask=0 is done for VLAN + * and MAC stuff on an upper level (due to missing information on this level). + * For these guys we can discard them if they are value=0 and mask=0. + * + * Further the all masks are one-padded for better hardware efficiency. + */ +static void gfar_set_attribute(u32 value, u32 mask, u32 flag, + struct filer_table *tab) +{ + switch (flag) { + /* 3bit */ + case RQFCR_PID_PRI: + if (!(value | mask)) + return; + mask |= RQFCR_PID_PRI_MASK; + break; + /* 8bit */ + case RQFCR_PID_L4P: + case RQFCR_PID_TOS: + if (!~(mask | RQFCR_PID_L4P_MASK)) + return; + if (!mask) + mask = ~0; + else + mask |= RQFCR_PID_L4P_MASK; + break; + /* 12bit */ + case RQFCR_PID_VID: + if (!(value | mask)) + return; + mask |= RQFCR_PID_VID_MASK; + break; + /* 16bit */ + case RQFCR_PID_DPT: + case RQFCR_PID_SPT: + case RQFCR_PID_ETY: + if (!~(mask | RQFCR_PID_PORT_MASK)) + return; + if (!mask) + mask = ~0; + else + mask |= RQFCR_PID_PORT_MASK; + break; + /* 24bit */ + case RQFCR_PID_DAH: + case RQFCR_PID_DAL: + case RQFCR_PID_SAH: + case RQFCR_PID_SAL: + if (!(value | mask)) + return; + mask |= RQFCR_PID_MAC_MASK; + break; + /* for all real 32bit masks */ + default: + if (!~mask) + return; + if (!mask) + mask = ~0; + break; + } + gfar_set_general_attribute(value, mask, flag, tab); +} + +/* Translates value and mask for UDP, TCP or SCTP */ +static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value, + struct ethtool_tcpip4_spec *mask, + struct filer_table *tab) +{ + gfar_set_attribute(be32_to_cpu(value->ip4src), + be32_to_cpu(mask->ip4src), + RQFCR_PID_SIA, tab); + gfar_set_attribute(be32_to_cpu(value->ip4dst), + be32_to_cpu(mask->ip4dst), + RQFCR_PID_DIA, tab); + gfar_set_attribute(be16_to_cpu(value->pdst), + be16_to_cpu(mask->pdst), + RQFCR_PID_DPT, tab); + gfar_set_attribute(be16_to_cpu(value->psrc), + be16_to_cpu(mask->psrc), + RQFCR_PID_SPT, tab); + gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab); +} + +/* Translates value and mask for RAW-IP4 */ +static void gfar_set_user_ip(struct ethtool_usrip4_spec *value, + struct ethtool_usrip4_spec *mask, + struct filer_table *tab) +{ + gfar_set_attribute(be32_to_cpu(value->ip4src), + be32_to_cpu(mask->ip4src), + RQFCR_PID_SIA, tab); + gfar_set_attribute(be32_to_cpu(value->ip4dst), + be32_to_cpu(mask->ip4dst), + RQFCR_PID_DIA, tab); + gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab); + gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab); + gfar_set_attribute(be32_to_cpu(value->l4_4_bytes), + be32_to_cpu(mask->l4_4_bytes), + RQFCR_PID_ARB, tab); + +} + +/* Translates value and mask for ETHER spec */ +static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask, + struct filer_table *tab) +{ + u32 upper_temp_mask = 0; + u32 lower_temp_mask = 0; + + /* Source address */ + if (!is_broadcast_ether_addr(mask->h_source)) { + if (is_zero_ether_addr(mask->h_source)) { + upper_temp_mask = 0xFFFFFFFF; + lower_temp_mask = 0xFFFFFFFF; + } else { + upper_temp_mask = mask->h_source[0] << 16 | + mask->h_source[1] << 8 | + mask->h_source[2]; + lower_temp_mask = mask->h_source[3] << 16 | + mask->h_source[4] << 8 | + mask->h_source[5]; + } + /* Upper 24bit */ + gfar_set_attribute(value->h_source[0] << 16 | + value->h_source[1] << 8 | + value->h_source[2], + upper_temp_mask, RQFCR_PID_SAH, tab); + /* And the same for the lower part */ + gfar_set_attribute(value->h_source[3] << 16 | + value->h_source[4] << 8 | + value->h_source[5], + lower_temp_mask, RQFCR_PID_SAL, tab); + } + /* Destination address */ + if (!is_broadcast_ether_addr(mask->h_dest)) { + /* Special for destination is limited broadcast */ + if ((is_broadcast_ether_addr(value->h_dest) && + is_zero_ether_addr(mask->h_dest))) { + gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab); + } else { + if (is_zero_ether_addr(mask->h_dest)) { + upper_temp_mask = 0xFFFFFFFF; + lower_temp_mask = 0xFFFFFFFF; + } else { + upper_temp_mask = mask->h_dest[0] << 16 | + mask->h_dest[1] << 8 | + mask->h_dest[2]; + lower_temp_mask = mask->h_dest[3] << 16 | + mask->h_dest[4] << 8 | + mask->h_dest[5]; + } + + /* Upper 24bit */ + gfar_set_attribute(value->h_dest[0] << 16 | + value->h_dest[1] << 8 | + value->h_dest[2], + upper_temp_mask, RQFCR_PID_DAH, tab); + /* And the same for the lower part */ + gfar_set_attribute(value->h_dest[3] << 16 | + value->h_dest[4] << 8 | + value->h_dest[5], + lower_temp_mask, RQFCR_PID_DAL, tab); + } + } + + gfar_set_attribute(be16_to_cpu(value->h_proto), + be16_to_cpu(mask->h_proto), + RQFCR_PID_ETY, tab); +} + +static inline u32 vlan_tci_vid(struct ethtool_rx_flow_spec *rule) +{ + return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_VID_MASK; +} + +static inline u32 vlan_tci_vidm(struct ethtool_rx_flow_spec *rule) +{ + return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_VID_MASK; +} + +static inline u32 vlan_tci_cfi(struct ethtool_rx_flow_spec *rule) +{ + return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_CFI_MASK; +} + +static inline u32 vlan_tci_cfim(struct ethtool_rx_flow_spec *rule) +{ + return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_CFI_MASK; +} + +static inline u32 vlan_tci_prio(struct ethtool_rx_flow_spec *rule) +{ + return (be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; +} + +static inline u32 vlan_tci_priom(struct ethtool_rx_flow_spec *rule) +{ + return (be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_PRIO_MASK) >> + VLAN_PRIO_SHIFT; +} + +/* Convert a rule to binary filter format of gianfar */ +static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule, + struct filer_table *tab) +{ + u32 vlan = 0, vlan_mask = 0; + u32 id = 0, id_mask = 0; + u32 cfi = 0, cfi_mask = 0; + u32 prio = 0, prio_mask = 0; + u32 old_index = tab->index; + + /* Check if vlan is wanted */ + if ((rule->flow_type & FLOW_EXT) && + (rule->m_ext.vlan_tci != cpu_to_be16(0xFFFF))) { + if (!rule->m_ext.vlan_tci) + rule->m_ext.vlan_tci = cpu_to_be16(0xFFFF); + + vlan = RQFPR_VLN; + vlan_mask = RQFPR_VLN; + + /* Separate the fields */ + id = vlan_tci_vid(rule); + id_mask = vlan_tci_vidm(rule); + cfi = vlan_tci_cfi(rule); + cfi_mask = vlan_tci_cfim(rule); + prio = vlan_tci_prio(rule); + prio_mask = vlan_tci_priom(rule); + + if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) { + vlan |= RQFPR_CFI; + vlan_mask |= RQFPR_CFI; + } else if (cfi != VLAN_TAG_PRESENT && + cfi_mask == VLAN_TAG_PRESENT) { + vlan_mask |= RQFPR_CFI; + } + } + + switch (rule->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan, + RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab); + gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec, + &rule->m_u.tcp_ip4_spec, tab); + break; + case UDP_V4_FLOW: + gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan, + RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab); + gfar_set_basic_ip(&rule->h_u.udp_ip4_spec, + &rule->m_u.udp_ip4_spec, tab); + break; + case SCTP_V4_FLOW: + gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask, + tab); + gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab); + gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u, + (struct ethtool_tcpip4_spec *)&rule->m_u, + tab); + break; + case IP_USER_FLOW: + gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask, + tab); + gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u, + (struct ethtool_usrip4_spec *) &rule->m_u, + tab); + break; + case ETHER_FLOW: + if (vlan) + gfar_set_parse_bits(vlan, vlan_mask, tab); + gfar_set_ether((struct ethhdr *) &rule->h_u, + (struct ethhdr *) &rule->m_u, tab); + break; + default: + return -1; + } + + /* Set the vlan attributes in the end */ + if (vlan) { + gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab); + gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab); + } + + /* If there has been nothing written till now, it must be a default */ + if (tab->index == old_index) { + gfar_set_mask(0xFFFFFFFF, tab); + tab->fe[tab->index].ctrl = 0x20; + tab->fe[tab->index].prop = 0x0; + tab->index++; + } + + /* Remove last AND */ + tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND); + + /* Specify which queue to use or to drop */ + if (rule->ring_cookie == RX_CLS_FLOW_DISC) + tab->fe[tab->index - 1].ctrl |= RQFCR_RJE; + else + tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10); + + /* Only big enough entries can be clustered */ + if (tab->index > (old_index + 2)) { + tab->fe[old_index + 1].ctrl |= RQFCR_CLE; + tab->fe[tab->index - 1].ctrl |= RQFCR_CLE; + } + + /* In rare cases the cache can be full while there is + * free space in hw + */ + if (tab->index > MAX_FILER_CACHE_IDX - 1) + return -EBUSY; + + return 0; +} + +/* Copy size filer entries */ +static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0], + struct gfar_filer_entry src[0], s32 size) +{ + while (size > 0) { + size--; + dst[size].ctrl = src[size].ctrl; + dst[size].prop = src[size].prop; + } +} + +/* Delete the contents of the filer-table between start and end + * and collapse them + */ +static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab) +{ + int length; + + if (end > MAX_FILER_CACHE_IDX || end < begin) + return -EINVAL; + + end++; + length = end - begin; + + /* Copy */ + while (end < tab->index) { + tab->fe[begin].ctrl = tab->fe[end].ctrl; + tab->fe[begin++].prop = tab->fe[end++].prop; + + } + /* Fill up with don't cares */ + while (begin < tab->index) { + tab->fe[begin].ctrl = 0x60; + tab->fe[begin].prop = 0xFFFFFFFF; + begin++; + } + + tab->index -= length; + return 0; +} + +/* Make space on the wanted location */ +static int gfar_expand_filer_entries(u32 begin, u32 length, + struct filer_table *tab) +{ + if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || + begin > MAX_FILER_CACHE_IDX) + return -EINVAL; + + gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]), + tab->index - length + 1); + + tab->index += length; + return 0; +} + +static int gfar_get_next_cluster_start(int start, struct filer_table *tab) +{ + for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); + start++) { + if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) == + (RQFCR_AND | RQFCR_CLE)) + return start; + } + return -1; +} + +static int gfar_get_next_cluster_end(int start, struct filer_table *tab) +{ + for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); + start++) { + if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) == + (RQFCR_CLE)) + return start; + } + return -1; +} + +/* Uses hardwares clustering option to reduce + * the number of filer table entries + */ +static void gfar_cluster_filer(struct filer_table *tab) +{ + s32 i = -1, j, iend, jend; + + while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) { + j = i; + while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) { + /* The cluster entries self and the previous one + * (a mask) must be identical! + */ + if (tab->fe[i].ctrl != tab->fe[j].ctrl) + break; + if (tab->fe[i].prop != tab->fe[j].prop) + break; + if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl) + break; + if (tab->fe[i - 1].prop != tab->fe[j - 1].prop) + break; + iend = gfar_get_next_cluster_end(i, tab); + jend = gfar_get_next_cluster_end(j, tab); + if (jend == -1 || iend == -1) + break; + + /* First we make some free space, where our cluster + * element should be. Then we copy it there and finally + * delete in from its old location. + */ + if (gfar_expand_filer_entries(iend, (jend - j), tab) == + -EINVAL) + break; + + gfar_copy_filer_entries(&(tab->fe[iend + 1]), + &(tab->fe[jend + 1]), jend - j); + + if (gfar_trim_filer_entries(jend - 1, + jend + (jend - j), + tab) == -EINVAL) + return; + + /* Mask out cluster bit */ + tab->fe[iend].ctrl &= ~(RQFCR_CLE); + } + } +} + +/* Swaps the masked bits of a1<>a2 and b1<>b2 */ +static void gfar_swap_bits(struct gfar_filer_entry *a1, + struct gfar_filer_entry *a2, + struct gfar_filer_entry *b1, + struct gfar_filer_entry *b2, u32 mask) +{ + u32 temp[4]; + temp[0] = a1->ctrl & mask; + temp[1] = a2->ctrl & mask; + temp[2] = b1->ctrl & mask; + temp[3] = b2->ctrl & mask; + + a1->ctrl &= ~mask; + a2->ctrl &= ~mask; + b1->ctrl &= ~mask; + b2->ctrl &= ~mask; + + a1->ctrl |= temp[1]; + a2->ctrl |= temp[0]; + b1->ctrl |= temp[3]; + b2->ctrl |= temp[2]; +} + +/* Generate a list consisting of masks values with their start and + * end of validity and block as indicator for parts belonging + * together (glued by ANDs) in mask_table + */ +static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table, + struct filer_table *tab) +{ + u32 i, and_index = 0, block_index = 1; + + for (i = 0; i < tab->index; i++) { + + /* LSByte of control = 0 sets a mask */ + if (!(tab->fe[i].ctrl & 0xF)) { + mask_table[and_index].mask = tab->fe[i].prop; + mask_table[and_index].start = i; + mask_table[and_index].block = block_index; + if (and_index >= 1) + mask_table[and_index - 1].end = i - 1; + and_index++; + } + /* cluster starts and ends will be separated because they should + * hold their position + */ + if (tab->fe[i].ctrl & RQFCR_CLE) + block_index++; + /* A not set AND indicates the end of a depended block */ + if (!(tab->fe[i].ctrl & RQFCR_AND)) + block_index++; + } + + mask_table[and_index - 1].end = i - 1; + + return and_index; +} + +/* Sorts the entries of mask_table by the values of the masks. + * Important: The 0xFF80 flags of the first and last entry of a + * block must hold their position (which queue, CLusterEnable, ReJEct, + * AND) + */ +static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table, + struct filer_table *temp_table, u32 and_index) +{ + /* Pointer to compare function (_asc or _desc) */ + int (*gfar_comp)(const void *, const void *); + + u32 i, size = 0, start = 0, prev = 1; + u32 old_first, old_last, new_first, new_last; + + gfar_comp = &gfar_comp_desc; + + for (i = 0; i < and_index; i++) { + if (prev != mask_table[i].block) { + old_first = mask_table[start].start + 1; + old_last = mask_table[i - 1].end; + sort(mask_table + start, size, + sizeof(struct gfar_mask_entry), + gfar_comp, &gfar_swap); + + /* Toggle order for every block. This makes the + * thing more efficient! + */ + if (gfar_comp == gfar_comp_desc) + gfar_comp = &gfar_comp_asc; + else + gfar_comp = &gfar_comp_desc; + + new_first = mask_table[start].start + 1; + new_last = mask_table[i - 1].end; + + gfar_swap_bits(&temp_table->fe[new_first], + &temp_table->fe[old_first], + &temp_table->fe[new_last], + &temp_table->fe[old_last], + RQFCR_QUEUE | RQFCR_CLE | + RQFCR_RJE | RQFCR_AND); + + start = i; + size = 0; + } + size++; + prev = mask_table[i].block; + } +} + +/* Reduces the number of masks needed in the filer table to save entries + * This is done by sorting the masks of a depended block. A depended block is + * identified by gluing ANDs or CLE. The sorting order toggles after every + * block. Of course entries in scope of a mask must change their location with + * it. + */ +static int gfar_optimize_filer_masks(struct filer_table *tab) +{ + struct filer_table *temp_table; + struct gfar_mask_entry *mask_table; + + u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0; + s32 ret = 0; + + /* We need a copy of the filer table because + * we want to change its order + */ + temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL); + if (temp_table == NULL) + return -ENOMEM; + + mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1, + sizeof(struct gfar_mask_entry), GFP_KERNEL); + + if (mask_table == NULL) { + ret = -ENOMEM; + goto end; + } + + and_index = gfar_generate_mask_table(mask_table, tab); + + gfar_sort_mask_table(mask_table, temp_table, and_index); + + /* Now we can copy the data from our duplicated filer table to + * the real one in the order the mask table says + */ + for (i = 0; i < and_index; i++) { + size = mask_table[i].end - mask_table[i].start + 1; + gfar_copy_filer_entries(&(tab->fe[j]), + &(temp_table->fe[mask_table[i].start]), size); + j += size; + } + + /* And finally we just have to check for duplicated masks and drop the + * second ones + */ + for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { + if (tab->fe[i].ctrl == 0x80) { + previous_mask = i++; + break; + } + } + for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { + if (tab->fe[i].ctrl == 0x80) { + if (tab->fe[i].prop == tab->fe[previous_mask].prop) { + /* Two identical ones found! + * So drop the second one! + */ + gfar_trim_filer_entries(i, i, tab); + } else + /* Not identical! */ + previous_mask = i; + } + } + + kfree(mask_table); +end: kfree(temp_table); + return ret; +} + +/* Write the bit-pattern from software's buffer to hardware registers */ +static int gfar_write_filer_table(struct gfar_private *priv, + struct filer_table *tab) +{ + u32 i = 0; + if (tab->index > MAX_FILER_IDX - 1) + return -EBUSY; + + /* Fill regular entries */ + for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop); + i++) + gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); + /* Fill the rest with fall-troughs */ + for (; i < MAX_FILER_IDX - 1; i++) + gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF); + /* Last entry must be default accept + * because that's what people expect + */ + gfar_write_filer(priv, i, 0x20, 0x0); + + return 0; +} + +static int gfar_check_capability(struct ethtool_rx_flow_spec *flow, + struct gfar_private *priv) +{ + + if (flow->flow_type & FLOW_EXT) { + if (~flow->m_ext.data[0] || ~flow->m_ext.data[1]) + netdev_warn(priv->ndev, + "User-specific data not supported!\n"); + if (~flow->m_ext.vlan_etype) + netdev_warn(priv->ndev, + "VLAN-etype not supported!\n"); + } + if (flow->flow_type == IP_USER_FLOW) + if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4) + netdev_warn(priv->ndev, + "IP-Version differing from IPv4 not supported!\n"); + + return 0; +} + +static int gfar_process_filer_changes(struct gfar_private *priv) +{ + struct ethtool_flow_spec_container *j; + struct filer_table *tab; + s32 i = 0; + s32 ret = 0; + + /* So index is set to zero, too! */ + tab = kzalloc(sizeof(*tab), GFP_KERNEL); + if (tab == NULL) + return -ENOMEM; + + /* Now convert the existing filer data from flow_spec into + * filer tables binary format + */ + list_for_each_entry(j, &priv->rx_list.list, list) { + ret = gfar_convert_to_filer(&j->fs, tab); + if (ret == -EBUSY) { + netdev_err(priv->ndev, + "Rule not added: No free space!\n"); + goto end; + } + if (ret == -1) { + netdev_err(priv->ndev, + "Rule not added: Unsupported Flow-type!\n"); + goto end; + } + } + + i = tab->index; + + /* Optimizations to save entries */ + gfar_cluster_filer(tab); + gfar_optimize_filer_masks(tab); + + pr_debug("\tSummary:\n" + "\tData on hardware: %d\n" + "\tCompression rate: %d%%\n", + tab->index, 100 - (100 * tab->index) / i); + + /* Write everything to hardware */ + ret = gfar_write_filer_table(priv, tab); + if (ret == -EBUSY) { + netdev_err(priv->ndev, "Rule not added: No free space!\n"); + goto end; + } + +end: + kfree(tab); + return ret; +} + +static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow) +{ + u32 i = 0; + + for (i = 0; i < sizeof(flow->m_u); i++) + flow->m_u.hdata[i] ^= 0xFF; + + flow->m_ext.vlan_etype ^= cpu_to_be16(0xFFFF); + flow->m_ext.vlan_tci ^= cpu_to_be16(0xFFFF); + flow->m_ext.data[0] ^= cpu_to_be32(~0); + flow->m_ext.data[1] ^= cpu_to_be32(~0); +} + +static int gfar_add_cls(struct gfar_private *priv, + struct ethtool_rx_flow_spec *flow) +{ + struct ethtool_flow_spec_container *temp, *comp; + int ret = 0; + + temp = kmalloc(sizeof(*temp), GFP_KERNEL); + if (temp == NULL) + return -ENOMEM; + memcpy(&temp->fs, flow, sizeof(temp->fs)); + + gfar_invert_masks(&temp->fs); + ret = gfar_check_capability(&temp->fs, priv); + if (ret) + goto clean_mem; + /* Link in the new element at the right @location */ + if (list_empty(&priv->rx_list.list)) { + ret = gfar_check_filer_hardware(priv); + if (ret != 0) + goto clean_mem; + list_add(&temp->list, &priv->rx_list.list); + goto process; + } else { + list_for_each_entry(comp, &priv->rx_list.list, list) { + if (comp->fs.location > flow->location) { + list_add_tail(&temp->list, &comp->list); + goto process; + } + if (comp->fs.location == flow->location) { + netdev_err(priv->ndev, + "Rule not added: ID %d not free!\n", + flow->location); + ret = -EBUSY; + goto clean_mem; + } + } + list_add_tail(&temp->list, &priv->rx_list.list); + } + +process: + ret = gfar_process_filer_changes(priv); + if (ret) + goto clean_list; + priv->rx_list.count++; + return ret; + +clean_list: + list_del(&temp->list); +clean_mem: + kfree(temp); + return ret; +} + +static int gfar_del_cls(struct gfar_private *priv, u32 loc) +{ + struct ethtool_flow_spec_container *comp; + u32 ret = -EINVAL; + + if (list_empty(&priv->rx_list.list)) + return ret; + + list_for_each_entry(comp, &priv->rx_list.list, list) { + if (comp->fs.location == loc) { + list_del(&comp->list); + kfree(comp); + priv->rx_list.count--; + gfar_process_filer_changes(priv); + ret = 0; + break; + } + } + + return ret; +} + +static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd) +{ + struct ethtool_flow_spec_container *comp; + u32 ret = -EINVAL; + + list_for_each_entry(comp, &priv->rx_list.list, list) { + if (comp->fs.location == cmd->fs.location) { + memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs)); + gfar_invert_masks(&cmd->fs); + ret = 0; + break; + } + } + + return ret; +} + +static int gfar_get_cls_all(struct gfar_private *priv, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct ethtool_flow_spec_container *comp; + u32 i = 0; + + list_for_each_entry(comp, &priv->rx_list.list, list) { + if (i == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[i] = comp->fs.location; + i++; + } + + cmd->data = MAX_FILER_IDX; + cmd->rule_cnt = i; + + return 0; +} + +static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct gfar_private *priv = netdev_priv(dev); + int ret = 0; + + if (test_bit(GFAR_RESETTING, &priv->state)) + return -EBUSY; + + mutex_lock(&priv->rx_queue_access); + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = gfar_set_hash_opts(priv, cmd); + break; + case ETHTOOL_SRXCLSRLINS: + if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC && + cmd->fs.ring_cookie >= priv->num_rx_queues) || + cmd->fs.location >= MAX_FILER_IDX) { + ret = -EINVAL; + break; + } + ret = gfar_add_cls(priv, &cmd->fs); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = gfar_del_cls(priv, cmd->fs.location); + break; + default: + ret = -EINVAL; + } + + mutex_unlock(&priv->rx_queue_access); + + return ret; +} + +static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct gfar_private *priv = netdev_priv(dev); + int ret = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = priv->num_rx_queues; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = priv->rx_list.count; + break; + case ETHTOOL_GRXCLSRULE: + ret = gfar_get_cls(priv, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = gfar_get_cls_all(priv, cmd, rule_locs); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +int gfar_phc_index = -1; +EXPORT_SYMBOL(gfar_phc_index); + +static int gfar_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct gfar_private *priv = netdev_priv(dev); + + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) { + info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + return 0; + } + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->phc_index = gfar_phc_index; + info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + return 0; +} + +const struct ethtool_ops gfar_ethtool_ops = { + .get_settings = gfar_gsettings, + .set_settings = gfar_ssettings, + .get_drvinfo = gfar_gdrvinfo, + .get_regs_len = gfar_reglen, + .get_regs = gfar_get_regs, + .get_link = ethtool_op_get_link, + .get_coalesce = gfar_gcoalesce, + .set_coalesce = gfar_scoalesce, + .get_ringparam = gfar_gringparam, + .set_ringparam = gfar_sringparam, + .get_pauseparam = gfar_gpauseparam, + .set_pauseparam = gfar_spauseparam, + .get_strings = gfar_gstrings, + .get_sset_count = gfar_sset_count, + .get_ethtool_stats = gfar_fill_stats, + .get_msglevel = gfar_get_msglevel, + .set_msglevel = gfar_set_msglevel, +#ifdef CONFIG_PM + .get_wol = gfar_get_wol, + .set_wol = gfar_set_wol, +#endif + .set_rxnfc = gfar_set_nfc, + .get_rxnfc = gfar_get_nfc, + .get_ts_info = gfar_get_ts_info, +}; diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c new file mode 100644 index 000000000..8e3cd77aa --- /dev/null +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -0,0 +1,574 @@ +/* + * PTP 1588 clock using the eTSEC + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "gianfar.h" + +/* + * gianfar ptp registers + * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010 + */ +struct gianfar_ptp_registers { + u32 tmr_ctrl; /* Timer control register */ + u32 tmr_tevent; /* Timestamp event register */ + u32 tmr_temask; /* Timer event mask register */ + u32 tmr_pevent; /* Timestamp event register */ + u32 tmr_pemask; /* Timer event mask register */ + u32 tmr_stat; /* Timestamp status register */ + u32 tmr_cnt_h; /* Timer counter high register */ + u32 tmr_cnt_l; /* Timer counter low register */ + u32 tmr_add; /* Timer drift compensation addend register */ + u32 tmr_acc; /* Timer accumulator register */ + u32 tmr_prsc; /* Timer prescale */ + u8 res1[4]; + u32 tmroff_h; /* Timer offset high */ + u32 tmroff_l; /* Timer offset low */ + u8 res2[8]; + u32 tmr_alarm1_h; /* Timer alarm 1 high register */ + u32 tmr_alarm1_l; /* Timer alarm 1 high register */ + u32 tmr_alarm2_h; /* Timer alarm 2 high register */ + u32 tmr_alarm2_l; /* Timer alarm 2 high register */ + u8 res3[48]; + u32 tmr_fiper1; /* Timer fixed period interval */ + u32 tmr_fiper2; /* Timer fixed period interval */ + u32 tmr_fiper3; /* Timer fixed period interval */ + u8 res4[20]; + u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */ + u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */ + u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */ + u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */ +}; + +/* Bit definitions for the TMR_CTRL register */ +#define ALM1P (1<<31) /* Alarm1 output polarity */ +#define ALM2P (1<<30) /* Alarm2 output polarity */ +#define FS (1<<28) /* FIPER start indication */ +#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */ +#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */ +#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */ +#define TCLK_PERIOD_MASK (0x3ff) +#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */ +#define FRD (1<<14) /* FIPER Realignment Disable */ +#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */ +#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */ +#define ETEP2 (1<<9) /* External trigger 2 edge polarity */ +#define ETEP1 (1<<8) /* External trigger 1 edge polarity */ +#define COPH (1<<7) /* Generated clock output phase. */ +#define CIPH (1<<6) /* External oscillator input clock phase */ +#define TMSR (1<<5) /* Timer soft reset. */ +#define BYP (1<<3) /* Bypass drift compensated clock */ +#define TE (1<<2) /* 1588 timer enable. */ +#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */ +#define CKSEL_MASK (0x3) + +/* Bit definitions for the TMR_TEVENT register */ +#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */ +#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */ +#define ALM2 (1<<17) /* Current time = alarm time register 2 */ +#define ALM1 (1<<16) /* Current time = alarm time register 1 */ +#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */ +#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */ +#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */ + +/* Bit definitions for the TMR_TEMASK register */ +#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */ +#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */ +#define ALM2EN (1<<17) /* Timer ALM2 event enable */ +#define ALM1EN (1<<16) /* Timer ALM1 event enable */ +#define PP1EN (1<<7) /* Periodic pulse event 1 enable */ +#define PP2EN (1<<6) /* Periodic pulse event 2 enable */ + +/* Bit definitions for the TMR_PEVENT register */ +#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */ +#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */ +#define RXP (1<<0) /* PTP frame has been received */ + +/* Bit definitions for the TMR_PEMASK register */ +#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */ +#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */ +#define RXPEN (1<<0) /* Receive PTP packet event enable */ + +/* Bit definitions for the TMR_STAT register */ +#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */ +#define STAT_VEC_MASK (0x3f) + +/* Bit definitions for the TMR_PRSC register */ +#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */ +#define PRSC_OCK_MASK (0xffff) + + +#define DRIVER "gianfar_ptp" +#define DEFAULT_CKSEL 1 +#define N_EXT_TS 2 +#define REG_SIZE sizeof(struct gianfar_ptp_registers) + +struct etsects { + struct gianfar_ptp_registers __iomem *regs; + spinlock_t lock; /* protects regs */ + struct ptp_clock *clock; + struct ptp_clock_info caps; + struct resource *rsrc; + int irq; + u64 alarm_interval; /* for periodic alarm */ + u64 alarm_value; + u32 tclk_period; /* nanoseconds */ + u32 tmr_prsc; + u32 tmr_add; + u32 cksel; + u32 tmr_fiper1; + u32 tmr_fiper2; +}; + +/* + * Register access functions + */ + +/* Caller must hold etsects->lock. */ +static u64 tmr_cnt_read(struct etsects *etsects) +{ + u64 ns; + u32 lo, hi; + + lo = gfar_read(&etsects->regs->tmr_cnt_l); + hi = gfar_read(&etsects->regs->tmr_cnt_h); + ns = ((u64) hi) << 32; + ns |= lo; + return ns; +} + +/* Caller must hold etsects->lock. */ +static void tmr_cnt_write(struct etsects *etsects, u64 ns) +{ + u32 hi = ns >> 32; + u32 lo = ns & 0xffffffff; + + gfar_write(&etsects->regs->tmr_cnt_l, lo); + gfar_write(&etsects->regs->tmr_cnt_h, hi); +} + +/* Caller must hold etsects->lock. */ +static void set_alarm(struct etsects *etsects) +{ + u64 ns; + u32 lo, hi; + + ns = tmr_cnt_read(etsects) + 1500000000ULL; + ns = div_u64(ns, 1000000000UL) * 1000000000ULL; + ns -= etsects->tclk_period; + hi = ns >> 32; + lo = ns & 0xffffffff; + gfar_write(&etsects->regs->tmr_alarm1_l, lo); + gfar_write(&etsects->regs->tmr_alarm1_h, hi); +} + +/* Caller must hold etsects->lock. */ +static void set_fipers(struct etsects *etsects) +{ + set_alarm(etsects); + gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); + gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); +} + +/* + * Interrupt service routine + */ + +static irqreturn_t isr(int irq, void *priv) +{ + struct etsects *etsects = priv; + struct ptp_clock_event event; + u64 ns; + u32 ack = 0, lo, hi, mask, val; + + val = gfar_read(&etsects->regs->tmr_tevent); + + if (val & ETS1) { + ack |= ETS1; + hi = gfar_read(&etsects->regs->tmr_etts1_h); + lo = gfar_read(&etsects->regs->tmr_etts1_l); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = ((u64) hi) << 32; + event.timestamp |= lo; + ptp_clock_event(etsects->clock, &event); + } + + if (val & ETS2) { + ack |= ETS2; + hi = gfar_read(&etsects->regs->tmr_etts2_h); + lo = gfar_read(&etsects->regs->tmr_etts2_l); + event.type = PTP_CLOCK_EXTTS; + event.index = 1; + event.timestamp = ((u64) hi) << 32; + event.timestamp |= lo; + ptp_clock_event(etsects->clock, &event); + } + + if (val & ALM2) { + ack |= ALM2; + if (etsects->alarm_value) { + event.type = PTP_CLOCK_ALARM; + event.index = 0; + event.timestamp = etsects->alarm_value; + ptp_clock_event(etsects->clock, &event); + } + if (etsects->alarm_interval) { + ns = etsects->alarm_value + etsects->alarm_interval; + hi = ns >> 32; + lo = ns & 0xffffffff; + spin_lock(&etsects->lock); + gfar_write(&etsects->regs->tmr_alarm2_l, lo); + gfar_write(&etsects->regs->tmr_alarm2_h, hi); + spin_unlock(&etsects->lock); + etsects->alarm_value = ns; + } else { + gfar_write(&etsects->regs->tmr_tevent, ALM2); + spin_lock(&etsects->lock); + mask = gfar_read(&etsects->regs->tmr_temask); + mask &= ~ALM2EN; + gfar_write(&etsects->regs->tmr_temask, mask); + spin_unlock(&etsects->lock); + etsects->alarm_value = 0; + etsects->alarm_interval = 0; + } + } + + if (val & PP1) { + ack |= PP1; + event.type = PTP_CLOCK_PPS; + ptp_clock_event(etsects->clock, &event); + } + + if (ack) { + gfar_write(&etsects->regs->tmr_tevent, ack); + return IRQ_HANDLED; + } else + return IRQ_NONE; +} + +/* + * PTP clock operations + */ + +static int ptp_gianfar_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + u64 adj; + u32 diff, tmr_add; + int neg_adj = 0; + struct etsects *etsects = container_of(ptp, struct etsects, caps); + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + tmr_add = etsects->tmr_add; + adj = tmr_add; + adj *= ppb; + diff = div_u64(adj, 1000000000ULL); + + tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; + + gfar_write(&etsects->regs->tmr_add, tmr_add); + + return 0; +} + +static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + s64 now; + unsigned long flags; + struct etsects *etsects = container_of(ptp, struct etsects, caps); + + spin_lock_irqsave(&etsects->lock, flags); + + now = tmr_cnt_read(etsects); + now += delta; + tmr_cnt_write(etsects, now); + + spin_unlock_irqrestore(&etsects->lock, flags); + + set_fipers(etsects); + + return 0; +} + +static int ptp_gianfar_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + u64 ns; + unsigned long flags; + struct etsects *etsects = container_of(ptp, struct etsects, caps); + + spin_lock_irqsave(&etsects->lock, flags); + + ns = tmr_cnt_read(etsects); + + spin_unlock_irqrestore(&etsects->lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int ptp_gianfar_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + u64 ns; + unsigned long flags; + struct etsects *etsects = container_of(ptp, struct etsects, caps); + + ns = timespec64_to_ns(ts); + + spin_lock_irqsave(&etsects->lock, flags); + + tmr_cnt_write(etsects, ns); + set_fipers(etsects); + + spin_unlock_irqrestore(&etsects->lock, flags); + + return 0; +} + +static int ptp_gianfar_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct etsects *etsects = container_of(ptp, struct etsects, caps); + unsigned long flags; + u32 bit, mask; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + switch (rq->extts.index) { + case 0: + bit = ETS1EN; + break; + case 1: + bit = ETS2EN; + break; + default: + return -EINVAL; + } + spin_lock_irqsave(&etsects->lock, flags); + mask = gfar_read(&etsects->regs->tmr_temask); + if (on) + mask |= bit; + else + mask &= ~bit; + gfar_write(&etsects->regs->tmr_temask, mask); + spin_unlock_irqrestore(&etsects->lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + spin_lock_irqsave(&etsects->lock, flags); + mask = gfar_read(&etsects->regs->tmr_temask); + if (on) + mask |= PP1EN; + else + mask &= ~PP1EN; + gfar_write(&etsects->regs->tmr_temask, mask); + spin_unlock_irqrestore(&etsects->lock, flags); + return 0; + + default: + break; + } + + return -EOPNOTSUPP; +} + +static struct ptp_clock_info ptp_gianfar_caps = { + .owner = THIS_MODULE, + .name = "gianfar clock", + .max_adj = 512000, + .n_alarm = 0, + .n_ext_ts = N_EXT_TS, + .n_per_out = 0, + .n_pins = 0, + .pps = 1, + .adjfreq = ptp_gianfar_adjfreq, + .adjtime = ptp_gianfar_adjtime, + .gettime64 = ptp_gianfar_gettime, + .settime64 = ptp_gianfar_settime, + .enable = ptp_gianfar_enable, +}; + +/* OF device tree */ + +static int get_of_u32(struct device_node *node, char *str, u32 *val) +{ + int plen; + const u32 *prop = of_get_property(node, str, &plen); + + if (!prop || plen != sizeof(*prop)) + return -1; + *val = *prop; + return 0; +} + +static int gianfar_ptp_probe(struct platform_device *dev) +{ + struct device_node *node = dev->dev.of_node; + struct etsects *etsects; + struct timespec64 now; + int err = -ENOMEM; + u32 tmr_ctrl; + unsigned long flags; + + etsects = kzalloc(sizeof(*etsects), GFP_KERNEL); + if (!etsects) + goto no_memory; + + err = -ENODEV; + + etsects->caps = ptp_gianfar_caps; + + if (get_of_u32(node, "fsl,cksel", &etsects->cksel)) + etsects->cksel = DEFAULT_CKSEL; + + if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) || + get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) || + get_of_u32(node, "fsl,tmr-add", &etsects->tmr_add) || + get_of_u32(node, "fsl,tmr-fiper1", &etsects->tmr_fiper1) || + get_of_u32(node, "fsl,tmr-fiper2", &etsects->tmr_fiper2) || + get_of_u32(node, "fsl,max-adj", &etsects->caps.max_adj)) { + pr_err("device tree node missing required elements\n"); + goto no_node; + } + + etsects->irq = platform_get_irq(dev, 0); + + if (etsects->irq == NO_IRQ) { + pr_err("irq not in device tree\n"); + goto no_node; + } + if (request_irq(etsects->irq, isr, 0, DRIVER, etsects)) { + pr_err("request_irq failed\n"); + goto no_node; + } + + etsects->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!etsects->rsrc) { + pr_err("no resource\n"); + goto no_resource; + } + if (request_resource(&iomem_resource, etsects->rsrc)) { + pr_err("resource busy\n"); + goto no_resource; + } + + spin_lock_init(&etsects->lock); + + etsects->regs = ioremap(etsects->rsrc->start, + resource_size(etsects->rsrc)); + if (!etsects->regs) { + pr_err("ioremap ptp registers failed\n"); + goto no_ioremap; + } + getnstimeofday64(&now); + ptp_gianfar_settime(&etsects->caps, &now); + + tmr_ctrl = + (etsects->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT | + (etsects->cksel & CKSEL_MASK) << CKSEL_SHIFT; + + spin_lock_irqsave(&etsects->lock, flags); + + gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl); + gfar_write(&etsects->regs->tmr_add, etsects->tmr_add); + gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc); + gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); + gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); + set_alarm(etsects); + gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE|FRD); + + spin_unlock_irqrestore(&etsects->lock, flags); + + etsects->clock = ptp_clock_register(&etsects->caps, &dev->dev); + if (IS_ERR(etsects->clock)) { + err = PTR_ERR(etsects->clock); + goto no_clock; + } + gfar_phc_index = ptp_clock_index(etsects->clock); + + platform_set_drvdata(dev, etsects); + + return 0; + +no_clock: + iounmap(etsects->regs); +no_ioremap: + release_resource(etsects->rsrc); +no_resource: + free_irq(etsects->irq, etsects); +no_node: + kfree(etsects); +no_memory: + return err; +} + +static int gianfar_ptp_remove(struct platform_device *dev) +{ + struct etsects *etsects = platform_get_drvdata(dev); + + gfar_write(&etsects->regs->tmr_temask, 0); + gfar_write(&etsects->regs->tmr_ctrl, 0); + + gfar_phc_index = -1; + ptp_clock_unregister(etsects->clock); + iounmap(etsects->regs); + release_resource(etsects->rsrc); + free_irq(etsects->irq, etsects); + kfree(etsects); + + return 0; +} + +static const struct of_device_id match_table[] = { + { .compatible = "fsl,etsec-ptp" }, + {}, +}; + +static struct platform_driver gianfar_ptp_driver = { + .driver = { + .name = "gianfar_ptp", + .of_match_table = match_table, + }, + .probe = gianfar_ptp_probe, + .remove = gianfar_ptp_remove, +}; + +module_platform_driver(gianfar_ptp_driver); + +MODULE_AUTHOR("Richard Cochran "); +MODULE_DESCRIPTION("PTP clock using the eTSEC"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c new file mode 100644 index 000000000..4dd40e057 --- /dev/null +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -0,0 +1,3983 @@ +/* + * Copyright (C) 2006-2009 Freescale Semicondutor, Inc. All rights reserved. + * + * Author: Shlomi Gridish + * Li Yang + * + * Description: + * QE UCC Gigabit Ethernet Driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ucc_geth.h" + +#undef DEBUG + +#define ugeth_printk(level, format, arg...) \ + printk(level format "\n", ## arg) + +#define ugeth_dbg(format, arg...) \ + ugeth_printk(KERN_DEBUG , format , ## arg) + +#ifdef UGETH_VERBOSE_DEBUG +#define ugeth_vdbg ugeth_dbg +#else +#define ugeth_vdbg(fmt, args...) do { } while (0) +#endif /* UGETH_VERBOSE_DEBUG */ +#define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1 + + +static DEFINE_SPINLOCK(ugeth_lock); + +static struct { + u32 msg_enable; +} debug = { -1 }; + +module_param_named(debug, debug.msg_enable, int, 0); +MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)"); + +static struct ucc_geth_info ugeth_primary_info = { + .uf_info = { + .bd_mem_part = MEM_PART_SYSTEM, + .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES, + .max_rx_buf_length = 1536, + /* adjusted at startup if max-speed 1000 */ + .urfs = UCC_GETH_URFS_INIT, + .urfet = UCC_GETH_URFET_INIT, + .urfset = UCC_GETH_URFSET_INIT, + .utfs = UCC_GETH_UTFS_INIT, + .utfet = UCC_GETH_UTFET_INIT, + .utftt = UCC_GETH_UTFTT_INIT, + .ufpt = 256, + .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET, + .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, + .tenc = UCC_FAST_TX_ENCODING_NRZ, + .renc = UCC_FAST_RX_ENCODING_NRZ, + .tcrc = UCC_FAST_16_BIT_CRC, + .synl = UCC_FAST_SYNC_LEN_NOT_USED, + }, + .numQueuesTx = 1, + .numQueuesRx = 1, + .extendedFilteringChainPointer = ((uint32_t) NULL), + .typeorlen = 3072 /*1536 */ , + .nonBackToBackIfgPart1 = 0x40, + .nonBackToBackIfgPart2 = 0x60, + .miminumInterFrameGapEnforcement = 0x50, + .backToBackInterFrameGap = 0x60, + .mblinterval = 128, + .nortsrbytetime = 5, + .fracsiz = 1, + .strictpriorityq = 0xff, + .altBebTruncation = 0xa, + .excessDefer = 1, + .maxRetransmission = 0xf, + .collisionWindow = 0x37, + .receiveFlowControl = 1, + .transmitFlowControl = 1, + .maxGroupAddrInHash = 4, + .maxIndAddrInHash = 4, + .prel = 7, + .maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */ + .minFrameLength = 64, + .maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */ + .maxD2Length = 1520+16, /* Add extra bytes for VLANs etc. */ + .vlantype = 0x8100, + .ecamptr = ((uint32_t) NULL), + .eventRegMask = UCCE_OTHER, + .pausePeriod = 0xf000, + .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1}, + .bdRingLenTx = { + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN, + TX_BD_RING_LEN}, + + .bdRingLenRx = { + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN, + RX_BD_RING_LEN}, + + .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1, + .largestexternallookupkeysize = + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE, + .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE | + UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX | + UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX, + .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP, + .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP, + .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT, + .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE, + .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC, + .numThreadsTx = UCC_GETH_NUM_OF_THREADS_1, + .numThreadsRx = UCC_GETH_NUM_OF_THREADS_1, + .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, + .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, +}; + +static struct ucc_geth_info ugeth_info[8]; + +#ifdef DEBUG +static void mem_disp(u8 *addr, int size) +{ + u8 *i; + int size16Aling = (size >> 4) << 4; + int size4Aling = (size >> 2) << 2; + int notAlign = 0; + if (size % 16) + notAlign = 1; + + for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16) + printk("0x%08x: %08x %08x %08x %08x\r\n", + (u32) i, + *((u32 *) (i)), + *((u32 *) (i + 4)), + *((u32 *) (i + 8)), *((u32 *) (i + 12))); + if (notAlign == 1) + printk("0x%08x: ", (u32) i); + for (; (u32) i < (u32) addr + size4Aling; i += 4) + printk("%08x ", *((u32 *) (i))); + for (; (u32) i < (u32) addr + size; i++) + printk("%02x", *((i))); + if (notAlign == 1) + printk("\r\n"); +} +#endif /* DEBUG */ + +static struct list_head *dequeue(struct list_head *lh) +{ + unsigned long flags; + + spin_lock_irqsave(&ugeth_lock, flags); + if (!list_empty(lh)) { + struct list_head *node = lh->next; + list_del(node); + spin_unlock_irqrestore(&ugeth_lock, flags); + return node; + } else { + spin_unlock_irqrestore(&ugeth_lock, flags); + return NULL; + } +} + +static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, + u8 __iomem *bd) +{ + struct sk_buff *skb; + + skb = netdev_alloc_skb(ugeth->ndev, + ugeth->ug_info->uf_info.max_rx_buf_length + + UCC_GETH_RX_DATA_BUF_ALIGNMENT); + if (!skb) + return NULL; + + /* We need the data buffer to be aligned properly. We will reserve + * as many bytes as needed to align the data properly + */ + skb_reserve(skb, + UCC_GETH_RX_DATA_BUF_ALIGNMENT - + (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - + 1))); + + out_be32(&((struct qe_bd __iomem *)bd)->buf, + dma_map_single(ugeth->dev, + skb->data, + ugeth->ug_info->uf_info.max_rx_buf_length + + UCC_GETH_RX_DATA_BUF_ALIGNMENT, + DMA_FROM_DEVICE)); + + out_be32((u32 __iomem *)bd, + (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W))); + + return skb; +} + +static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) +{ + u8 __iomem *bd; + u32 bd_status; + struct sk_buff *skb; + int i; + + bd = ugeth->p_rx_bd_ring[rxQ]; + i = 0; + + do { + bd_status = in_be32((u32 __iomem *)bd); + skb = get_new_skb(ugeth, bd); + + if (!skb) /* If can not allocate data buffer, + abort. Cleanup will be elsewhere */ + return -ENOMEM; + + ugeth->rx_skbuff[rxQ][i] = skb; + + /* advance the BD pointer */ + bd += sizeof(struct qe_bd); + i++; + } while (!(bd_status & R_W)); + + return 0; +} + +static int fill_init_enet_entries(struct ucc_geth_private *ugeth, + u32 *p_start, + u8 num_entries, + u32 thread_size, + u32 thread_alignment, + unsigned int risc, + int skip_page_for_first_entry) +{ + u32 init_enet_offset; + u8 i; + int snum; + + for (i = 0; i < num_entries; i++) { + if ((snum = qe_get_snum()) < 0) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not get SNUM\n"); + return snum; + } + if ((i == 0) && skip_page_for_first_entry) + /* First entry of Rx does not have page */ + init_enet_offset = 0; + else { + init_enet_offset = + qe_muram_alloc(thread_size, thread_alignment); + if (IS_ERR_VALUE(init_enet_offset)) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not allocate DPRAM memory\n"); + qe_put_snum((u8) snum); + return -ENOMEM; + } + } + *(p_start++) = + ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset + | risc; + } + + return 0; +} + +static int return_init_enet_entries(struct ucc_geth_private *ugeth, + u32 *p_start, + u8 num_entries, + unsigned int risc, + int skip_page_for_first_entry) +{ + u32 init_enet_offset; + u8 i; + int snum; + + for (i = 0; i < num_entries; i++) { + u32 val = *p_start; + + /* Check that this entry was actually valid -- + needed in case failed in allocations */ + if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { + snum = + (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> + ENET_INIT_PARAM_SNUM_SHIFT; + qe_put_snum((u8) snum); + if (!((i == 0) && skip_page_for_first_entry)) { + /* First entry of Rx does not have page */ + init_enet_offset = + (val & ENET_INIT_PARAM_PTR_MASK); + qe_muram_free(init_enet_offset); + } + *p_start++ = 0; + } + } + + return 0; +} + +#ifdef DEBUG +static int dump_init_enet_entries(struct ucc_geth_private *ugeth, + u32 __iomem *p_start, + u8 num_entries, + u32 thread_size, + unsigned int risc, + int skip_page_for_first_entry) +{ + u32 init_enet_offset; + u8 i; + int snum; + + for (i = 0; i < num_entries; i++) { + u32 val = in_be32(p_start); + + /* Check that this entry was actually valid -- + needed in case failed in allocations */ + if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { + snum = + (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> + ENET_INIT_PARAM_SNUM_SHIFT; + qe_put_snum((u8) snum); + if (!((i == 0) && skip_page_for_first_entry)) { + /* First entry of Rx does not have page */ + init_enet_offset = + (in_be32(p_start) & + ENET_INIT_PARAM_PTR_MASK); + pr_info("Init enet entry %d:\n", i); + pr_info("Base address: 0x%08x\n", + (u32)qe_muram_addr(init_enet_offset)); + mem_disp(qe_muram_addr(init_enet_offset), + thread_size); + } + p_start++; + } + } + + return 0; +} +#endif + +static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont) +{ + kfree(enet_addr_cont); +} + +static void set_mac_addr(__be16 __iomem *reg, u8 *mac) +{ + out_be16(®[0], ((u16)mac[5] << 8) | mac[4]); + out_be16(®[1], ((u16)mac[3] << 8) | mac[2]); + out_be16(®[2], ((u16)mac[1] << 8) | mac[0]); +} + +static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) +{ + struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; + + if (paddr_num >= NUM_OF_PADDRS) { + pr_warn("%s: Invalid paddr_num: %u\n", __func__, paddr_num); + return -EINVAL; + } + + p_82xx_addr_filt = + (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> + addressfiltering; + + /* Writing address ff.ff.ff.ff.ff.ff disables address + recognition for this register */ + out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff); + out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff); + out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff); + + return 0; +} + +static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, + u8 *p_enet_addr) +{ + struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; + u32 cecr_subblock; + + p_82xx_addr_filt = + (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> + addressfiltering; + + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); + + /* Ethernet frames are defined in Little Endian mode, + therefore to insert */ + /* the address to the hash (Big Endian mode), we reverse the bytes.*/ + + set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr); + + qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock, + QE_CR_PROTOCOL_ETHERNET, 0); +} + +#ifdef DEBUG +static void get_statistics(struct ucc_geth_private *ugeth, + struct ucc_geth_tx_firmware_statistics * + tx_firmware_statistics, + struct ucc_geth_rx_firmware_statistics * + rx_firmware_statistics, + struct ucc_geth_hardware_statistics *hardware_statistics) +{ + struct ucc_fast __iomem *uf_regs; + struct ucc_geth __iomem *ug_regs; + struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; + struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; + + ug_regs = ugeth->ug_regs; + uf_regs = (struct ucc_fast __iomem *) ug_regs; + p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; + p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; + + /* Tx firmware only if user handed pointer and driver actually + gathers Tx firmware statistics */ + if (tx_firmware_statistics && p_tx_fw_statistics_pram) { + tx_firmware_statistics->sicoltx = + in_be32(&p_tx_fw_statistics_pram->sicoltx); + tx_firmware_statistics->mulcoltx = + in_be32(&p_tx_fw_statistics_pram->mulcoltx); + tx_firmware_statistics->latecoltxfr = + in_be32(&p_tx_fw_statistics_pram->latecoltxfr); + tx_firmware_statistics->frabortduecol = + in_be32(&p_tx_fw_statistics_pram->frabortduecol); + tx_firmware_statistics->frlostinmactxer = + in_be32(&p_tx_fw_statistics_pram->frlostinmactxer); + tx_firmware_statistics->carriersenseertx = + in_be32(&p_tx_fw_statistics_pram->carriersenseertx); + tx_firmware_statistics->frtxok = + in_be32(&p_tx_fw_statistics_pram->frtxok); + tx_firmware_statistics->txfrexcessivedefer = + in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer); + tx_firmware_statistics->txpkts256 = + in_be32(&p_tx_fw_statistics_pram->txpkts256); + tx_firmware_statistics->txpkts512 = + in_be32(&p_tx_fw_statistics_pram->txpkts512); + tx_firmware_statistics->txpkts1024 = + in_be32(&p_tx_fw_statistics_pram->txpkts1024); + tx_firmware_statistics->txpktsjumbo = + in_be32(&p_tx_fw_statistics_pram->txpktsjumbo); + } + + /* Rx firmware only if user handed pointer and driver actually + * gathers Rx firmware statistics */ + if (rx_firmware_statistics && p_rx_fw_statistics_pram) { + int i; + rx_firmware_statistics->frrxfcser = + in_be32(&p_rx_fw_statistics_pram->frrxfcser); + rx_firmware_statistics->fraligner = + in_be32(&p_rx_fw_statistics_pram->fraligner); + rx_firmware_statistics->inrangelenrxer = + in_be32(&p_rx_fw_statistics_pram->inrangelenrxer); + rx_firmware_statistics->outrangelenrxer = + in_be32(&p_rx_fw_statistics_pram->outrangelenrxer); + rx_firmware_statistics->frtoolong = + in_be32(&p_rx_fw_statistics_pram->frtoolong); + rx_firmware_statistics->runt = + in_be32(&p_rx_fw_statistics_pram->runt); + rx_firmware_statistics->verylongevent = + in_be32(&p_rx_fw_statistics_pram->verylongevent); + rx_firmware_statistics->symbolerror = + in_be32(&p_rx_fw_statistics_pram->symbolerror); + rx_firmware_statistics->dropbsy = + in_be32(&p_rx_fw_statistics_pram->dropbsy); + for (i = 0; i < 0x8; i++) + rx_firmware_statistics->res0[i] = + p_rx_fw_statistics_pram->res0[i]; + rx_firmware_statistics->mismatchdrop = + in_be32(&p_rx_fw_statistics_pram->mismatchdrop); + rx_firmware_statistics->underpkts = + in_be32(&p_rx_fw_statistics_pram->underpkts); + rx_firmware_statistics->pkts256 = + in_be32(&p_rx_fw_statistics_pram->pkts256); + rx_firmware_statistics->pkts512 = + in_be32(&p_rx_fw_statistics_pram->pkts512); + rx_firmware_statistics->pkts1024 = + in_be32(&p_rx_fw_statistics_pram->pkts1024); + rx_firmware_statistics->pktsjumbo = + in_be32(&p_rx_fw_statistics_pram->pktsjumbo); + rx_firmware_statistics->frlossinmacer = + in_be32(&p_rx_fw_statistics_pram->frlossinmacer); + rx_firmware_statistics->pausefr = + in_be32(&p_rx_fw_statistics_pram->pausefr); + for (i = 0; i < 0x4; i++) + rx_firmware_statistics->res1[i] = + p_rx_fw_statistics_pram->res1[i]; + rx_firmware_statistics->removevlan = + in_be32(&p_rx_fw_statistics_pram->removevlan); + rx_firmware_statistics->replacevlan = + in_be32(&p_rx_fw_statistics_pram->replacevlan); + rx_firmware_statistics->insertvlan = + in_be32(&p_rx_fw_statistics_pram->insertvlan); + } + + /* Hardware only if user handed pointer and driver actually + gathers hardware statistics */ + if (hardware_statistics && + (in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) { + hardware_statistics->tx64 = in_be32(&ug_regs->tx64); + hardware_statistics->tx127 = in_be32(&ug_regs->tx127); + hardware_statistics->tx255 = in_be32(&ug_regs->tx255); + hardware_statistics->rx64 = in_be32(&ug_regs->rx64); + hardware_statistics->rx127 = in_be32(&ug_regs->rx127); + hardware_statistics->rx255 = in_be32(&ug_regs->rx255); + hardware_statistics->txok = in_be32(&ug_regs->txok); + hardware_statistics->txcf = in_be16(&ug_regs->txcf); + hardware_statistics->tmca = in_be32(&ug_regs->tmca); + hardware_statistics->tbca = in_be32(&ug_regs->tbca); + hardware_statistics->rxfok = in_be32(&ug_regs->rxfok); + hardware_statistics->rxbok = in_be32(&ug_regs->rxbok); + hardware_statistics->rbyt = in_be32(&ug_regs->rbyt); + hardware_statistics->rmca = in_be32(&ug_regs->rmca); + hardware_statistics->rbca = in_be32(&ug_regs->rbca); + } +} + +static void dump_bds(struct ucc_geth_private *ugeth) +{ + int i; + int length; + + for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { + if (ugeth->p_tx_bd_ring[i]) { + length = + (ugeth->ug_info->bdRingLenTx[i] * + sizeof(struct qe_bd)); + pr_info("TX BDs[%d]\n", i); + mem_disp(ugeth->p_tx_bd_ring[i], length); + } + } + for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { + if (ugeth->p_rx_bd_ring[i]) { + length = + (ugeth->ug_info->bdRingLenRx[i] * + sizeof(struct qe_bd)); + pr_info("RX BDs[%d]\n", i); + mem_disp(ugeth->p_rx_bd_ring[i], length); + } + } +} + +static void dump_regs(struct ucc_geth_private *ugeth) +{ + int i; + + pr_info("UCC%d Geth registers:\n", ugeth->ug_info->uf_info.ucc_num + 1); + pr_info("Base address: 0x%08x\n", (u32)ugeth->ug_regs); + + pr_info("maccfg1 : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->maccfg1, + in_be32(&ugeth->ug_regs->maccfg1)); + pr_info("maccfg2 : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->maccfg2, + in_be32(&ugeth->ug_regs->maccfg2)); + pr_info("ipgifg : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->ipgifg, + in_be32(&ugeth->ug_regs->ipgifg)); + pr_info("hafdup : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->hafdup, + in_be32(&ugeth->ug_regs->hafdup)); + pr_info("ifctl : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->ifctl, + in_be32(&ugeth->ug_regs->ifctl)); + pr_info("ifstat : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->ifstat, + in_be32(&ugeth->ug_regs->ifstat)); + pr_info("macstnaddr1: addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->macstnaddr1, + in_be32(&ugeth->ug_regs->macstnaddr1)); + pr_info("macstnaddr2: addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->macstnaddr2, + in_be32(&ugeth->ug_regs->macstnaddr2)); + pr_info("uempr : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->uempr, + in_be32(&ugeth->ug_regs->uempr)); + pr_info("utbipar : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->utbipar, + in_be32(&ugeth->ug_regs->utbipar)); + pr_info("uescr : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->ug_regs->uescr, + in_be16(&ugeth->ug_regs->uescr)); + pr_info("tx64 : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->tx64, + in_be32(&ugeth->ug_regs->tx64)); + pr_info("tx127 : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->tx127, + in_be32(&ugeth->ug_regs->tx127)); + pr_info("tx255 : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->tx255, + in_be32(&ugeth->ug_regs->tx255)); + pr_info("rx64 : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->rx64, + in_be32(&ugeth->ug_regs->rx64)); + pr_info("rx127 : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->rx127, + in_be32(&ugeth->ug_regs->rx127)); + pr_info("rx255 : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->rx255, + in_be32(&ugeth->ug_regs->rx255)); + pr_info("txok : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->txok, + in_be32(&ugeth->ug_regs->txok)); + pr_info("txcf : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->ug_regs->txcf, + in_be16(&ugeth->ug_regs->txcf)); + pr_info("tmca : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->tmca, + in_be32(&ugeth->ug_regs->tmca)); + pr_info("tbca : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->tbca, + in_be32(&ugeth->ug_regs->tbca)); + pr_info("rxfok : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->rxfok, + in_be32(&ugeth->ug_regs->rxfok)); + pr_info("rxbok : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->rxbok, + in_be32(&ugeth->ug_regs->rxbok)); + pr_info("rbyt : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->rbyt, + in_be32(&ugeth->ug_regs->rbyt)); + pr_info("rmca : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->rmca, + in_be32(&ugeth->ug_regs->rmca)); + pr_info("rbca : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->rbca, + in_be32(&ugeth->ug_regs->rbca)); + pr_info("scar : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->scar, + in_be32(&ugeth->ug_regs->scar)); + pr_info("scam : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->ug_regs->scam, + in_be32(&ugeth->ug_regs->scam)); + + if (ugeth->p_thread_data_tx) { + int numThreadsTxNumerical; + switch (ugeth->ug_info->numThreadsTx) { + case UCC_GETH_NUM_OF_THREADS_1: + numThreadsTxNumerical = 1; + break; + case UCC_GETH_NUM_OF_THREADS_2: + numThreadsTxNumerical = 2; + break; + case UCC_GETH_NUM_OF_THREADS_4: + numThreadsTxNumerical = 4; + break; + case UCC_GETH_NUM_OF_THREADS_6: + numThreadsTxNumerical = 6; + break; + case UCC_GETH_NUM_OF_THREADS_8: + numThreadsTxNumerical = 8; + break; + default: + numThreadsTxNumerical = 0; + break; + } + + pr_info("Thread data TXs:\n"); + pr_info("Base address: 0x%08x\n", + (u32)ugeth->p_thread_data_tx); + for (i = 0; i < numThreadsTxNumerical; i++) { + pr_info("Thread data TX[%d]:\n", i); + pr_info("Base address: 0x%08x\n", + (u32)&ugeth->p_thread_data_tx[i]); + mem_disp((u8 *) & ugeth->p_thread_data_tx[i], + sizeof(struct ucc_geth_thread_data_tx)); + } + } + if (ugeth->p_thread_data_rx) { + int numThreadsRxNumerical; + switch (ugeth->ug_info->numThreadsRx) { + case UCC_GETH_NUM_OF_THREADS_1: + numThreadsRxNumerical = 1; + break; + case UCC_GETH_NUM_OF_THREADS_2: + numThreadsRxNumerical = 2; + break; + case UCC_GETH_NUM_OF_THREADS_4: + numThreadsRxNumerical = 4; + break; + case UCC_GETH_NUM_OF_THREADS_6: + numThreadsRxNumerical = 6; + break; + case UCC_GETH_NUM_OF_THREADS_8: + numThreadsRxNumerical = 8; + break; + default: + numThreadsRxNumerical = 0; + break; + } + + pr_info("Thread data RX:\n"); + pr_info("Base address: 0x%08x\n", + (u32)ugeth->p_thread_data_rx); + for (i = 0; i < numThreadsRxNumerical; i++) { + pr_info("Thread data RX[%d]:\n", i); + pr_info("Base address: 0x%08x\n", + (u32)&ugeth->p_thread_data_rx[i]); + mem_disp((u8 *) & ugeth->p_thread_data_rx[i], + sizeof(struct ucc_geth_thread_data_rx)); + } + } + if (ugeth->p_exf_glbl_param) { + pr_info("EXF global param:\n"); + pr_info("Base address: 0x%08x\n", + (u32)ugeth->p_exf_glbl_param); + mem_disp((u8 *) ugeth->p_exf_glbl_param, + sizeof(*ugeth->p_exf_glbl_param)); + } + if (ugeth->p_tx_glbl_pram) { + pr_info("TX global param:\n"); + pr_info("Base address: 0x%08x\n", (u32)ugeth->p_tx_glbl_pram); + pr_info("temoder : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->p_tx_glbl_pram->temoder, + in_be16(&ugeth->p_tx_glbl_pram->temoder)); + pr_info("sqptr : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->sqptr, + in_be32(&ugeth->p_tx_glbl_pram->sqptr)); + pr_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->schedulerbasepointer, + in_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer)); + pr_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->txrmonbaseptr, + in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr)); + pr_info("tstate : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->tstate, + in_be32(&ugeth->p_tx_glbl_pram->tstate)); + pr_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x\n", + (u32)&ugeth->p_tx_glbl_pram->iphoffset[0], + ugeth->p_tx_glbl_pram->iphoffset[0]); + pr_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x\n", + (u32)&ugeth->p_tx_glbl_pram->iphoffset[1], + ugeth->p_tx_glbl_pram->iphoffset[1]); + pr_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x\n", + (u32)&ugeth->p_tx_glbl_pram->iphoffset[2], + ugeth->p_tx_glbl_pram->iphoffset[2]); + pr_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x\n", + (u32)&ugeth->p_tx_glbl_pram->iphoffset[3], + ugeth->p_tx_glbl_pram->iphoffset[3]); + pr_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x\n", + (u32)&ugeth->p_tx_glbl_pram->iphoffset[4], + ugeth->p_tx_glbl_pram->iphoffset[4]); + pr_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x\n", + (u32)&ugeth->p_tx_glbl_pram->iphoffset[5], + ugeth->p_tx_glbl_pram->iphoffset[5]); + pr_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x\n", + (u32)&ugeth->p_tx_glbl_pram->iphoffset[6], + ugeth->p_tx_glbl_pram->iphoffset[6]); + pr_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x\n", + (u32)&ugeth->p_tx_glbl_pram->iphoffset[7], + ugeth->p_tx_glbl_pram->iphoffset[7]); + pr_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->vtagtable[0], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0])); + pr_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->vtagtable[1], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1])); + pr_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->vtagtable[2], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2])); + pr_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->vtagtable[3], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3])); + pr_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->vtagtable[4], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4])); + pr_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->vtagtable[5], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5])); + pr_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->vtagtable[6], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6])); + pr_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->vtagtable[7], + in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7])); + pr_info("tqptr : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_tx_glbl_pram->tqptr, + in_be32(&ugeth->p_tx_glbl_pram->tqptr)); + } + if (ugeth->p_rx_glbl_pram) { + pr_info("RX global param:\n"); + pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_glbl_pram); + pr_info("remoder : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->remoder, + in_be32(&ugeth->p_rx_glbl_pram->remoder)); + pr_info("rqptr : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->rqptr, + in_be32(&ugeth->p_rx_glbl_pram->rqptr)); + pr_info("typeorlen : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->p_rx_glbl_pram->typeorlen, + in_be16(&ugeth->p_rx_glbl_pram->typeorlen)); + pr_info("rxgstpack : addr - 0x%08x, val - 0x%02x\n", + (u32)&ugeth->p_rx_glbl_pram->rxgstpack, + ugeth->p_rx_glbl_pram->rxgstpack); + pr_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->rxrmonbaseptr, + in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr)); + pr_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->intcoalescingptr, + in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr)); + pr_info("rstate : addr - 0x%08x, val - 0x%02x\n", + (u32)&ugeth->p_rx_glbl_pram->rstate, + ugeth->p_rx_glbl_pram->rstate); + pr_info("mrblr : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->p_rx_glbl_pram->mrblr, + in_be16(&ugeth->p_rx_glbl_pram->mrblr)); + pr_info("rbdqptr : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->rbdqptr, + in_be32(&ugeth->p_rx_glbl_pram->rbdqptr)); + pr_info("mflr : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->p_rx_glbl_pram->mflr, + in_be16(&ugeth->p_rx_glbl_pram->mflr)); + pr_info("minflr : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->p_rx_glbl_pram->minflr, + in_be16(&ugeth->p_rx_glbl_pram->minflr)); + pr_info("maxd1 : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->p_rx_glbl_pram->maxd1, + in_be16(&ugeth->p_rx_glbl_pram->maxd1)); + pr_info("maxd2 : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->p_rx_glbl_pram->maxd2, + in_be16(&ugeth->p_rx_glbl_pram->maxd2)); + pr_info("ecamptr : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->ecamptr, + in_be32(&ugeth->p_rx_glbl_pram->ecamptr)); + pr_info("l2qt : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->l2qt, + in_be32(&ugeth->p_rx_glbl_pram->l2qt)); + pr_info("l3qt[0] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->l3qt[0], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[0])); + pr_info("l3qt[1] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->l3qt[1], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[1])); + pr_info("l3qt[2] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->l3qt[2], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[2])); + pr_info("l3qt[3] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->l3qt[3], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[3])); + pr_info("l3qt[4] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->l3qt[4], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[4])); + pr_info("l3qt[5] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->l3qt[5], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[5])); + pr_info("l3qt[6] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->l3qt[6], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[6])); + pr_info("l3qt[7] : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->l3qt[7], + in_be32(&ugeth->p_rx_glbl_pram->l3qt[7])); + pr_info("vlantype : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->p_rx_glbl_pram->vlantype, + in_be16(&ugeth->p_rx_glbl_pram->vlantype)); + pr_info("vlantci : addr - 0x%08x, val - 0x%04x\n", + (u32)&ugeth->p_rx_glbl_pram->vlantci, + in_be16(&ugeth->p_rx_glbl_pram->vlantci)); + for (i = 0; i < 64; i++) + pr_info("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x\n", + i, + (u32)&ugeth->p_rx_glbl_pram->addressfiltering[i], + ugeth->p_rx_glbl_pram->addressfiltering[i]); + pr_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_glbl_pram->exfGlobalParam, + in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam)); + } + if (ugeth->p_send_q_mem_reg) { + pr_info("Send Q memory registers:\n"); + pr_info("Base address: 0x%08x\n", (u32)ugeth->p_send_q_mem_reg); + for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { + pr_info("SQQD[%d]:\n", i); + pr_info("Base address: 0x%08x\n", + (u32)&ugeth->p_send_q_mem_reg->sqqd[i]); + mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i], + sizeof(struct ucc_geth_send_queue_qd)); + } + } + if (ugeth->p_scheduler) { + pr_info("Scheduler:\n"); + pr_info("Base address: 0x%08x\n", (u32)ugeth->p_scheduler); + mem_disp((u8 *) ugeth->p_scheduler, + sizeof(*ugeth->p_scheduler)); + } + if (ugeth->p_tx_fw_statistics_pram) { + pr_info("TX FW statistics pram:\n"); + pr_info("Base address: 0x%08x\n", + (u32)ugeth->p_tx_fw_statistics_pram); + mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram, + sizeof(*ugeth->p_tx_fw_statistics_pram)); + } + if (ugeth->p_rx_fw_statistics_pram) { + pr_info("RX FW statistics pram:\n"); + pr_info("Base address: 0x%08x\n", + (u32)ugeth->p_rx_fw_statistics_pram); + mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram, + sizeof(*ugeth->p_rx_fw_statistics_pram)); + } + if (ugeth->p_rx_irq_coalescing_tbl) { + pr_info("RX IRQ coalescing tables:\n"); + pr_info("Base address: 0x%08x\n", + (u32)ugeth->p_rx_irq_coalescing_tbl); + for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { + pr_info("RX IRQ coalescing table entry[%d]:\n", i); + pr_info("Base address: 0x%08x\n", + (u32)&ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i]); + pr_info("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i].interruptcoalescingmaxvalue, + in_be32(&ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i]. + interruptcoalescingmaxvalue)); + pr_info("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i].interruptcoalescingcounter, + in_be32(&ugeth->p_rx_irq_coalescing_tbl-> + coalescingentry[i]. + interruptcoalescingcounter)); + } + } + if (ugeth->p_rx_bd_qs_tbl) { + pr_info("RX BD QS tables:\n"); + pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_bd_qs_tbl); + for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { + pr_info("RX BD QS table[%d]:\n", i); + pr_info("Base address: 0x%08x\n", + (u32)&ugeth->p_rx_bd_qs_tbl[i]); + pr_info("bdbaseptr : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr, + in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr)); + pr_info("bdptr : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_bd_qs_tbl[i].bdptr, + in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr)); + pr_info("externalbdbaseptr: addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, + in_be32(&ugeth->p_rx_bd_qs_tbl[i]. + externalbdbaseptr)); + pr_info("externalbdptr : addr - 0x%08x, val - 0x%08x\n", + (u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdptr, + in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr)); + pr_info("ucode RX Prefetched BDs:\n"); + pr_info("Base address: 0x%08x\n", + (u32)qe_muram_addr(in_be32 + (&ugeth->p_rx_bd_qs_tbl[i]. + bdbaseptr))); + mem_disp((u8 *) + qe_muram_addr(in_be32 + (&ugeth->p_rx_bd_qs_tbl[i]. + bdbaseptr)), + sizeof(struct ucc_geth_rx_prefetched_bds)); + } + } + if (ugeth->p_init_enet_param_shadow) { + int size; + pr_info("Init enet param shadow:\n"); + pr_info("Base address: 0x%08x\n", + (u32) ugeth->p_init_enet_param_shadow); + mem_disp((u8 *) ugeth->p_init_enet_param_shadow, + sizeof(*ugeth->p_init_enet_param_shadow)); + + size = sizeof(struct ucc_geth_thread_rx_pram); + if (ugeth->ug_info->rxExtendedFiltering) { + size += + THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; + if (ugeth->ug_info->largestexternallookupkeysize == + QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) + size += + THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; + if (ugeth->ug_info->largestexternallookupkeysize == + QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) + size += + THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; + } + + dump_init_enet_entries(ugeth, + &(ugeth->p_init_enet_param_shadow-> + txthread[0]), + ENET_INIT_PARAM_MAX_ENTRIES_TX, + sizeof(struct ucc_geth_thread_tx_pram), + ugeth->ug_info->riscTx, 0); + dump_init_enet_entries(ugeth, + &(ugeth->p_init_enet_param_shadow-> + rxthread[0]), + ENET_INIT_PARAM_MAX_ENTRIES_RX, size, + ugeth->ug_info->riscRx, 1); + } +} +#endif /* DEBUG */ + +static void init_default_reg_vals(u32 __iomem *upsmr_register, + u32 __iomem *maccfg1_register, + u32 __iomem *maccfg2_register) +{ + out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); + out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); + out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT); +} + +static int init_half_duplex_params(int alt_beb, + int back_pressure_no_backoff, + int no_backoff, + int excess_defer, + u8 alt_beb_truncation, + u8 max_retransmissions, + u8 collision_window, + u32 __iomem *hafdup_register) +{ + u32 value = 0; + + if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) || + (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) || + (collision_window > HALFDUP_COLLISION_WINDOW_MAX)) + return -EINVAL; + + value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT); + + if (alt_beb) + value |= HALFDUP_ALT_BEB; + if (back_pressure_no_backoff) + value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF; + if (no_backoff) + value |= HALFDUP_NO_BACKOFF; + if (excess_defer) + value |= HALFDUP_EXCESSIVE_DEFER; + + value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT); + + value |= collision_window; + + out_be32(hafdup_register, value); + return 0; +} + +static int init_inter_frame_gap_params(u8 non_btb_cs_ipg, + u8 non_btb_ipg, + u8 min_ifg, + u8 btb_ipg, + u32 __iomem *ipgifg_register) +{ + u32 value = 0; + + /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back + IPG part 2 */ + if (non_btb_cs_ipg > non_btb_ipg) + return -EINVAL; + + if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) || + (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) || + /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */ + (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX)) + return -EINVAL; + + value |= + ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) & + IPGIFG_NBTB_CS_IPG_MASK); + value |= + ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) & + IPGIFG_NBTB_IPG_MASK); + value |= + ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) & + IPGIFG_MIN_IFG_MASK); + value |= (btb_ipg & IPGIFG_BTB_IPG_MASK); + + out_be32(ipgifg_register, value); + return 0; +} + +int init_flow_control_params(u32 automatic_flow_control_mode, + int rx_flow_control_enable, + int tx_flow_control_enable, + u16 pause_period, + u16 extension_field, + u32 __iomem *upsmr_register, + u32 __iomem *uempr_register, + u32 __iomem *maccfg1_register) +{ + u32 value = 0; + + /* Set UEMPR register */ + value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT; + value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT; + out_be32(uempr_register, value); + + /* Set UPSMR register */ + setbits32(upsmr_register, automatic_flow_control_mode); + + value = in_be32(maccfg1_register); + if (rx_flow_control_enable) + value |= MACCFG1_FLOW_RX; + if (tx_flow_control_enable) + value |= MACCFG1_FLOW_TX; + out_be32(maccfg1_register, value); + + return 0; +} + +static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, + int auto_zero_hardware_statistics, + u32 __iomem *upsmr_register, + u16 __iomem *uescr_register) +{ + u16 uescr_value = 0; + + /* Enable hardware statistics gathering if requested */ + if (enable_hardware_statistics) + setbits32(upsmr_register, UCC_GETH_UPSMR_HSE); + + /* Clear hardware statistics counters */ + uescr_value = in_be16(uescr_register); + uescr_value |= UESCR_CLRCNT; + /* Automatically zero hardware statistics counters on read, + if requested */ + if (auto_zero_hardware_statistics) + uescr_value |= UESCR_AUTOZ; + out_be16(uescr_register, uescr_value); + + return 0; +} + +static int init_firmware_statistics_gathering_mode(int + enable_tx_firmware_statistics, + int enable_rx_firmware_statistics, + u32 __iomem *tx_rmon_base_ptr, + u32 tx_firmware_statistics_structure_address, + u32 __iomem *rx_rmon_base_ptr, + u32 rx_firmware_statistics_structure_address, + u16 __iomem *temoder_register, + u32 __iomem *remoder_register) +{ + /* Note: this function does not check if */ + /* the parameters it receives are NULL */ + + if (enable_tx_firmware_statistics) { + out_be32(tx_rmon_base_ptr, + tx_firmware_statistics_structure_address); + setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE); + } + + if (enable_rx_firmware_statistics) { + out_be32(rx_rmon_base_ptr, + rx_firmware_statistics_structure_address); + setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE); + } + + return 0; +} + +static int init_mac_station_addr_regs(u8 address_byte_0, + u8 address_byte_1, + u8 address_byte_2, + u8 address_byte_3, + u8 address_byte_4, + u8 address_byte_5, + u32 __iomem *macstnaddr1_register, + u32 __iomem *macstnaddr2_register) +{ + u32 value = 0; + + /* Example: for a station address of 0x12345678ABCD, */ + /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */ + + /* MACSTNADDR1 Register: */ + + /* 0 7 8 15 */ + /* station address byte 5 station address byte 4 */ + /* 16 23 24 31 */ + /* station address byte 3 station address byte 2 */ + value |= (u32) ((address_byte_2 << 0) & 0x000000FF); + value |= (u32) ((address_byte_3 << 8) & 0x0000FF00); + value |= (u32) ((address_byte_4 << 16) & 0x00FF0000); + value |= (u32) ((address_byte_5 << 24) & 0xFF000000); + + out_be32(macstnaddr1_register, value); + + /* MACSTNADDR2 Register: */ + + /* 0 7 8 15 */ + /* station address byte 1 station address byte 0 */ + /* 16 23 24 31 */ + /* reserved reserved */ + value = 0; + value |= (u32) ((address_byte_0 << 16) & 0x00FF0000); + value |= (u32) ((address_byte_1 << 24) & 0xFF000000); + + out_be32(macstnaddr2_register, value); + + return 0; +} + +static int init_check_frame_length_mode(int length_check, + u32 __iomem *maccfg2_register) +{ + u32 value = 0; + + value = in_be32(maccfg2_register); + + if (length_check) + value |= MACCFG2_LC; + else + value &= ~MACCFG2_LC; + + out_be32(maccfg2_register, value); + return 0; +} + +static int init_preamble_length(u8 preamble_length, + u32 __iomem *maccfg2_register) +{ + if ((preamble_length < 3) || (preamble_length > 7)) + return -EINVAL; + + clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK, + preamble_length << MACCFG2_PREL_SHIFT); + + return 0; +} + +static int init_rx_parameters(int reject_broadcast, + int receive_short_frames, + int promiscuous, u32 __iomem *upsmr_register) +{ + u32 value = 0; + + value = in_be32(upsmr_register); + + if (reject_broadcast) + value |= UCC_GETH_UPSMR_BRO; + else + value &= ~UCC_GETH_UPSMR_BRO; + + if (receive_short_frames) + value |= UCC_GETH_UPSMR_RSH; + else + value &= ~UCC_GETH_UPSMR_RSH; + + if (promiscuous) + value |= UCC_GETH_UPSMR_PRO; + else + value &= ~UCC_GETH_UPSMR_PRO; + + out_be32(upsmr_register, value); + + return 0; +} + +static int init_max_rx_buff_len(u16 max_rx_buf_len, + u16 __iomem *mrblr_register) +{ + /* max_rx_buf_len value must be a multiple of 128 */ + if ((max_rx_buf_len == 0) || + (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT)) + return -EINVAL; + + out_be16(mrblr_register, max_rx_buf_len); + return 0; +} + +static int init_min_frame_len(u16 min_frame_length, + u16 __iomem *minflr_register, + u16 __iomem *mrblr_register) +{ + u16 mrblr_value = 0; + + mrblr_value = in_be16(mrblr_register); + if (min_frame_length >= (mrblr_value - 4)) + return -EINVAL; + + out_be16(minflr_register, min_frame_length); + return 0; +} + +static int adjust_enet_interface(struct ucc_geth_private *ugeth) +{ + struct ucc_geth_info *ug_info; + struct ucc_geth __iomem *ug_regs; + struct ucc_fast __iomem *uf_regs; + int ret_val; + u32 upsmr, maccfg2; + u16 value; + + ugeth_vdbg("%s: IN", __func__); + + ug_info = ugeth->ug_info; + ug_regs = ugeth->ug_regs; + uf_regs = ugeth->uccf->uf_regs; + + /* Set MACCFG2 */ + maccfg2 = in_be32(&ug_regs->maccfg2); + maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; + if ((ugeth->max_speed == SPEED_10) || + (ugeth->max_speed == SPEED_100)) + maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; + else if (ugeth->max_speed == SPEED_1000) + maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; + maccfg2 |= ug_info->padAndCrc; + out_be32(&ug_regs->maccfg2, maccfg2); + + /* Set UPSMR */ + upsmr = in_be32(&uf_regs->upsmr); + upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M | + UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM); + if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || + (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || + (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || + (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || + (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || + (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { + if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII) + upsmr |= UCC_GETH_UPSMR_RPM; + switch (ugeth->max_speed) { + case SPEED_10: + upsmr |= UCC_GETH_UPSMR_R10M; + /* FALLTHROUGH */ + case SPEED_100: + if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI) + upsmr |= UCC_GETH_UPSMR_RMM; + } + } + if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || + (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { + upsmr |= UCC_GETH_UPSMR_TBIM; + } + if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)) + upsmr |= UCC_GETH_UPSMR_SGMM; + + out_be32(&uf_regs->upsmr, upsmr); + + /* Disable autonegotiation in tbi mode, because by default it + comes up in autonegotiation mode. */ + /* Note that this depends on proper setting in utbipar register. */ + if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || + (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { + struct ucc_geth_info *ug_info = ugeth->ug_info; + struct phy_device *tbiphy; + + if (!ug_info->tbi_node) + pr_warn("TBI mode requires that the device tree specify a tbi-handle\n"); + + tbiphy = of_phy_find_device(ug_info->tbi_node); + if (!tbiphy) + pr_warn("Could not get TBI device\n"); + + value = phy_read(tbiphy, ENET_TBI_MII_CR); + value &= ~0x1000; /* Turn off autonegotiation */ + phy_write(tbiphy, ENET_TBI_MII_CR, value); + } + + init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); + + ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); + if (ret_val != 0) { + if (netif_msg_probe(ugeth)) + pr_err("Preamble length must be between 3 and 7 inclusive\n"); + return ret_val; + } + + return 0; +} + +static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth) +{ + struct ucc_fast_private *uccf; + u32 cecr_subblock; + u32 temp; + int i = 10; + + uccf = ugeth->uccf; + + /* Mask GRACEFUL STOP TX interrupt bit and clear it */ + clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA); + out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */ + + /* Issue host command */ + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); + qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, + QE_CR_PROTOCOL_ETHERNET, 0); + + /* Wait for command to complete */ + do { + msleep(10); + temp = in_be32(uccf->p_ucce); + } while (!(temp & UCC_GETH_UCCE_GRA) && --i); + + uccf->stopped_tx = 1; + + return 0; +} + +static int ugeth_graceful_stop_rx(struct ucc_geth_private *ugeth) +{ + struct ucc_fast_private *uccf; + u32 cecr_subblock; + u8 temp; + int i = 10; + + uccf = ugeth->uccf; + + /* Clear acknowledge bit */ + temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); + temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; + out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp); + + /* Keep issuing command and checking acknowledge bit until + it is asserted, according to spec */ + do { + /* Issue host command */ + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info. + ucc_num); + qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, + QE_CR_PROTOCOL_ETHERNET, 0); + msleep(10); + temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); + } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX) && --i); + + uccf->stopped_rx = 1; + + return 0; +} + +static int ugeth_restart_tx(struct ucc_geth_private *ugeth) +{ + struct ucc_fast_private *uccf; + u32 cecr_subblock; + + uccf = ugeth->uccf; + + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); + qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0); + uccf->stopped_tx = 0; + + return 0; +} + +static int ugeth_restart_rx(struct ucc_geth_private *ugeth) +{ + struct ucc_fast_private *uccf; + u32 cecr_subblock; + + uccf = ugeth->uccf; + + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); + qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, + 0); + uccf->stopped_rx = 0; + + return 0; +} + +static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode) +{ + struct ucc_fast_private *uccf; + int enabled_tx, enabled_rx; + + uccf = ugeth->uccf; + + /* check if the UCC number is in range. */ + if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { + if (netif_msg_probe(ugeth)) + pr_err("ucc_num out of range\n"); + return -EINVAL; + } + + enabled_tx = uccf->enabled_tx; + enabled_rx = uccf->enabled_rx; + + /* Get Tx and Rx going again, in case this channel was actively + disabled. */ + if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx) + ugeth_restart_tx(ugeth); + if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx) + ugeth_restart_rx(ugeth); + + ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */ + + return 0; + +} + +static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode) +{ + struct ucc_fast_private *uccf; + + uccf = ugeth->uccf; + + /* check if the UCC number is in range. */ + if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { + if (netif_msg_probe(ugeth)) + pr_err("ucc_num out of range\n"); + return -EINVAL; + } + + /* Stop any transmissions */ + if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx) + ugeth_graceful_stop_tx(ugeth); + + /* Stop any receptions */ + if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx) + ugeth_graceful_stop_rx(ugeth); + + ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */ + + return 0; +} + +static void ugeth_quiesce(struct ucc_geth_private *ugeth) +{ + /* Prevent any further xmits, plus detach the device. */ + netif_device_detach(ugeth->ndev); + + /* Wait for any current xmits to finish. */ + netif_tx_disable(ugeth->ndev); + + /* Disable the interrupt to avoid NAPI rescheduling. */ + disable_irq(ugeth->ug_info->uf_info.irq); + + /* Stop NAPI, and possibly wait for its completion. */ + napi_disable(&ugeth->napi); +} + +static void ugeth_activate(struct ucc_geth_private *ugeth) +{ + napi_enable(&ugeth->napi); + enable_irq(ugeth->ug_info->uf_info.irq); + netif_device_attach(ugeth->ndev); +} + +/* Called every time the controller might need to be made + * aware of new link state. The PHY code conveys this + * information through variables in the ugeth structure, and this + * function converts those variables into the appropriate + * register values, and can bring down the device if needed. + */ + +static void adjust_link(struct net_device *dev) +{ + struct ucc_geth_private *ugeth = netdev_priv(dev); + struct ucc_geth __iomem *ug_regs; + struct ucc_fast __iomem *uf_regs; + struct phy_device *phydev = ugeth->phydev; + int new_state = 0; + + ug_regs = ugeth->ug_regs; + uf_regs = ugeth->uccf->uf_regs; + + if (phydev->link) { + u32 tempval = in_be32(&ug_regs->maccfg2); + u32 upsmr = in_be32(&uf_regs->upsmr); + /* Now we make sure that we can be in full duplex mode. + * If not, we operate in half-duplex mode. */ + if (phydev->duplex != ugeth->oldduplex) { + new_state = 1; + if (!(phydev->duplex)) + tempval &= ~(MACCFG2_FDX); + else + tempval |= MACCFG2_FDX; + ugeth->oldduplex = phydev->duplex; + } + + if (phydev->speed != ugeth->oldspeed) { + new_state = 1; + switch (phydev->speed) { + case SPEED_1000: + tempval = ((tempval & + ~(MACCFG2_INTERFACE_MODE_MASK)) | + MACCFG2_INTERFACE_MODE_BYTE); + break; + case SPEED_100: + case SPEED_10: + tempval = ((tempval & + ~(MACCFG2_INTERFACE_MODE_MASK)) | + MACCFG2_INTERFACE_MODE_NIBBLE); + /* if reduced mode, re-set UPSMR.R10M */ + if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || + (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || + (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || + (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || + (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || + (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { + if (phydev->speed == SPEED_10) + upsmr |= UCC_GETH_UPSMR_R10M; + else + upsmr &= ~UCC_GETH_UPSMR_R10M; + } + break; + default: + if (netif_msg_link(ugeth)) + pr_warn( + "%s: Ack! Speed (%d) is not 10/100/1000!", + dev->name, phydev->speed); + break; + } + ugeth->oldspeed = phydev->speed; + } + + if (!ugeth->oldlink) { + new_state = 1; + ugeth->oldlink = 1; + } + + if (new_state) { + /* + * To change the MAC configuration we need to disable + * the controller. To do so, we have to either grab + * ugeth->lock, which is a bad idea since 'graceful + * stop' commands might take quite a while, or we can + * quiesce driver's activity. + */ + ugeth_quiesce(ugeth); + ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); + + out_be32(&ug_regs->maccfg2, tempval); + out_be32(&uf_regs->upsmr, upsmr); + + ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); + ugeth_activate(ugeth); + } + } else if (ugeth->oldlink) { + new_state = 1; + ugeth->oldlink = 0; + ugeth->oldspeed = 0; + ugeth->oldduplex = -1; + } + + if (new_state && netif_msg_link(ugeth)) + phy_print_status(phydev); +} + +/* Initialize TBI PHY interface for communicating with the + * SERDES lynx PHY on the chip. We communicate with this PHY + * through the MDIO bus on each controller, treating it as a + * "normal" PHY at the address found in the UTBIPA register. We assume + * that the UTBIPA register is valid. Either the MDIO bus code will set + * it to a value that doesn't conflict with other PHYs on the bus, or the + * value doesn't matter, as there are no other PHYs on the bus. + */ +static void uec_configure_serdes(struct net_device *dev) +{ + struct ucc_geth_private *ugeth = netdev_priv(dev); + struct ucc_geth_info *ug_info = ugeth->ug_info; + struct phy_device *tbiphy; + + if (!ug_info->tbi_node) { + dev_warn(&dev->dev, "SGMII mode requires that the device " + "tree specify a tbi-handle\n"); + return; + } + + tbiphy = of_phy_find_device(ug_info->tbi_node); + if (!tbiphy) { + dev_err(&dev->dev, "error: Could not get TBI device\n"); + return; + } + + /* + * If the link is already up, we must already be ok, and don't need to + * configure and reset the TBI<->SerDes link. Maybe U-Boot configured + * everything for us? Resetting it takes the link down and requires + * several seconds for it to come back. + */ + if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) + return; + + /* Single clk mode, mii mode off(for serdes communication) */ + phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS); + + phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT); + + phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); +} + +/* Configure the PHY for dev. + * returns 0 if success. -1 if failure + */ +static int init_phy(struct net_device *dev) +{ + struct ucc_geth_private *priv = netdev_priv(dev); + struct ucc_geth_info *ug_info = priv->ug_info; + struct phy_device *phydev; + + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + + phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0, + priv->phy_interface); + if (!phydev) { + dev_err(&dev->dev, "Could not attach to PHY\n"); + return -ENODEV; + } + + if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) + uec_configure_serdes(dev); + + phydev->supported &= (SUPPORTED_MII | + SUPPORTED_Autoneg | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full); + + if (priv->max_speed == SPEED_1000) + phydev->supported |= ADVERTISED_1000baseT_Full; + + phydev->advertising = phydev->supported; + + priv->phydev = phydev; + + return 0; +} + +static void ugeth_dump_regs(struct ucc_geth_private *ugeth) +{ +#ifdef DEBUG + ucc_fast_dump_regs(ugeth->uccf); + dump_regs(ugeth); + dump_bds(ugeth); +#endif +} + +static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private * + ugeth, + enum enet_addr_type + enet_addr_type) +{ + struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; + struct ucc_fast_private *uccf; + enum comm_dir comm_dir; + struct list_head *p_lh; + u16 i, num; + u32 __iomem *addr_h; + u32 __iomem *addr_l; + u8 *p_counter; + + uccf = ugeth->uccf; + + p_82xx_addr_filt = + (struct ucc_geth_82xx_address_filtering_pram __iomem *) + ugeth->p_rx_glbl_pram->addressfiltering; + + if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { + addr_h = &(p_82xx_addr_filt->gaddr_h); + addr_l = &(p_82xx_addr_filt->gaddr_l); + p_lh = &ugeth->group_hash_q; + p_counter = &(ugeth->numGroupAddrInHash); + } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) { + addr_h = &(p_82xx_addr_filt->iaddr_h); + addr_l = &(p_82xx_addr_filt->iaddr_l); + p_lh = &ugeth->ind_hash_q; + p_counter = &(ugeth->numIndAddrInHash); + } else + return -EINVAL; + + comm_dir = 0; + if (uccf->enabled_tx) + comm_dir |= COMM_DIR_TX; + if (uccf->enabled_rx) + comm_dir |= COMM_DIR_RX; + if (comm_dir) + ugeth_disable(ugeth, comm_dir); + + /* Clear the hash table. */ + out_be32(addr_h, 0x00000000); + out_be32(addr_l, 0x00000000); + + if (!p_lh) + return 0; + + num = *p_counter; + + /* Delete all remaining CQ elements */ + for (i = 0; i < num; i++) + put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh))); + + *p_counter = 0; + + if (comm_dir) + ugeth_enable(ugeth, comm_dir); + + return 0; +} + +static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth, + u8 paddr_num) +{ + ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */ + return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */ +} + +static void ucc_geth_free_rx(struct ucc_geth_private *ugeth) +{ + struct ucc_geth_info *ug_info; + struct ucc_fast_info *uf_info; + u16 i, j; + u8 __iomem *bd; + + + ug_info = ugeth->ug_info; + uf_info = &ug_info->uf_info; + + for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { + if (ugeth->p_rx_bd_ring[i]) { + /* Return existing data buffers in ring */ + bd = ugeth->p_rx_bd_ring[i]; + for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { + if (ugeth->rx_skbuff[i][j]) { + dma_unmap_single(ugeth->dev, + in_be32(&((struct qe_bd __iomem *)bd)->buf), + ugeth->ug_info-> + uf_info.max_rx_buf_length + + UCC_GETH_RX_DATA_BUF_ALIGNMENT, + DMA_FROM_DEVICE); + dev_kfree_skb_any( + ugeth->rx_skbuff[i][j]); + ugeth->rx_skbuff[i][j] = NULL; + } + bd += sizeof(struct qe_bd); + } + + kfree(ugeth->rx_skbuff[i]); + + if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_SYSTEM) + kfree((void *)ugeth->rx_bd_ring_offset[i]); + else if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_MURAM) + qe_muram_free(ugeth->rx_bd_ring_offset[i]); + ugeth->p_rx_bd_ring[i] = NULL; + } + } + +} + +static void ucc_geth_free_tx(struct ucc_geth_private *ugeth) +{ + struct ucc_geth_info *ug_info; + struct ucc_fast_info *uf_info; + u16 i, j; + u8 __iomem *bd; + + ug_info = ugeth->ug_info; + uf_info = &ug_info->uf_info; + + for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { + bd = ugeth->p_tx_bd_ring[i]; + if (!bd) + continue; + for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { + if (ugeth->tx_skbuff[i][j]) { + dma_unmap_single(ugeth->dev, + in_be32(&((struct qe_bd __iomem *)bd)->buf), + (in_be32((u32 __iomem *)bd) & + BD_LENGTH_MASK), + DMA_TO_DEVICE); + dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); + ugeth->tx_skbuff[i][j] = NULL; + } + } + + kfree(ugeth->tx_skbuff[i]); + + if (ugeth->p_tx_bd_ring[i]) { + if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_SYSTEM) + kfree((void *)ugeth->tx_bd_ring_offset[i]); + else if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_MURAM) + qe_muram_free(ugeth->tx_bd_ring_offset[i]); + ugeth->p_tx_bd_ring[i] = NULL; + } + } + +} + +static void ucc_geth_memclean(struct ucc_geth_private *ugeth) +{ + if (!ugeth) + return; + + if (ugeth->uccf) { + ucc_fast_free(ugeth->uccf); + ugeth->uccf = NULL; + } + + if (ugeth->p_thread_data_tx) { + qe_muram_free(ugeth->thread_dat_tx_offset); + ugeth->p_thread_data_tx = NULL; + } + if (ugeth->p_thread_data_rx) { + qe_muram_free(ugeth->thread_dat_rx_offset); + ugeth->p_thread_data_rx = NULL; + } + if (ugeth->p_exf_glbl_param) { + qe_muram_free(ugeth->exf_glbl_param_offset); + ugeth->p_exf_glbl_param = NULL; + } + if (ugeth->p_rx_glbl_pram) { + qe_muram_free(ugeth->rx_glbl_pram_offset); + ugeth->p_rx_glbl_pram = NULL; + } + if (ugeth->p_tx_glbl_pram) { + qe_muram_free(ugeth->tx_glbl_pram_offset); + ugeth->p_tx_glbl_pram = NULL; + } + if (ugeth->p_send_q_mem_reg) { + qe_muram_free(ugeth->send_q_mem_reg_offset); + ugeth->p_send_q_mem_reg = NULL; + } + if (ugeth->p_scheduler) { + qe_muram_free(ugeth->scheduler_offset); + ugeth->p_scheduler = NULL; + } + if (ugeth->p_tx_fw_statistics_pram) { + qe_muram_free(ugeth->tx_fw_statistics_pram_offset); + ugeth->p_tx_fw_statistics_pram = NULL; + } + if (ugeth->p_rx_fw_statistics_pram) { + qe_muram_free(ugeth->rx_fw_statistics_pram_offset); + ugeth->p_rx_fw_statistics_pram = NULL; + } + if (ugeth->p_rx_irq_coalescing_tbl) { + qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset); + ugeth->p_rx_irq_coalescing_tbl = NULL; + } + if (ugeth->p_rx_bd_qs_tbl) { + qe_muram_free(ugeth->rx_bd_qs_tbl_offset); + ugeth->p_rx_bd_qs_tbl = NULL; + } + if (ugeth->p_init_enet_param_shadow) { + return_init_enet_entries(ugeth, + &(ugeth->p_init_enet_param_shadow-> + rxthread[0]), + ENET_INIT_PARAM_MAX_ENTRIES_RX, + ugeth->ug_info->riscRx, 1); + return_init_enet_entries(ugeth, + &(ugeth->p_init_enet_param_shadow-> + txthread[0]), + ENET_INIT_PARAM_MAX_ENTRIES_TX, + ugeth->ug_info->riscTx, 0); + kfree(ugeth->p_init_enet_param_shadow); + ugeth->p_init_enet_param_shadow = NULL; + } + ucc_geth_free_tx(ugeth); + ucc_geth_free_rx(ugeth); + while (!list_empty(&ugeth->group_hash_q)) + put_enet_addr_container(ENET_ADDR_CONT_ENTRY + (dequeue(&ugeth->group_hash_q))); + while (!list_empty(&ugeth->ind_hash_q)) + put_enet_addr_container(ENET_ADDR_CONT_ENTRY + (dequeue(&ugeth->ind_hash_q))); + if (ugeth->ug_regs) { + iounmap(ugeth->ug_regs); + ugeth->ug_regs = NULL; + } +} + +static void ucc_geth_set_multi(struct net_device *dev) +{ + struct ucc_geth_private *ugeth; + struct netdev_hw_addr *ha; + struct ucc_fast __iomem *uf_regs; + struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; + + ugeth = netdev_priv(dev); + + uf_regs = ugeth->uccf->uf_regs; + + if (dev->flags & IFF_PROMISC) { + setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO); + } else { + clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO); + + p_82xx_addr_filt = + (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> + p_rx_glbl_pram->addressfiltering; + + if (dev->flags & IFF_ALLMULTI) { + /* Catch all multicast addresses, so set the + * filter to all 1's. + */ + out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff); + out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff); + } else { + /* Clear filter and add the addresses in the list. + */ + out_be32(&p_82xx_addr_filt->gaddr_h, 0x0); + out_be32(&p_82xx_addr_filt->gaddr_l, 0x0); + + netdev_for_each_mc_addr(ha, dev) { + /* Ask CPM to run CRC and set bit in + * filter mask. + */ + hw_add_addr_in_hash(ugeth, ha->addr); + } + } + } +} + +static void ucc_geth_stop(struct ucc_geth_private *ugeth) +{ + struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; + struct phy_device *phydev = ugeth->phydev; + + ugeth_vdbg("%s: IN", __func__); + + /* + * Tell the kernel the link is down. + * Must be done before disabling the controller + * or deadlock may happen. + */ + phy_stop(phydev); + + /* Disable the controller */ + ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); + + /* Mask all interrupts */ + out_be32(ugeth->uccf->p_uccm, 0x00000000); + + /* Clear all interrupts */ + out_be32(ugeth->uccf->p_ucce, 0xffffffff); + + /* Disable Rx and Tx */ + clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); + + ucc_geth_memclean(ugeth); +} + +static int ucc_struct_init(struct ucc_geth_private *ugeth) +{ + struct ucc_geth_info *ug_info; + struct ucc_fast_info *uf_info; + int i; + + ug_info = ugeth->ug_info; + uf_info = &ug_info->uf_info; + + if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || + (uf_info->bd_mem_part == MEM_PART_MURAM))) { + if (netif_msg_probe(ugeth)) + pr_err("Bad memory partition value\n"); + return -EINVAL; + } + + /* Rx BD lengths */ + for (i = 0; i < ug_info->numQueuesRx; i++) { + if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) || + (ug_info->bdRingLenRx[i] % + UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) { + if (netif_msg_probe(ugeth)) + pr_err("Rx BD ring length must be multiple of 4, no smaller than 8\n"); + return -EINVAL; + } + } + + /* Tx BD lengths */ + for (i = 0; i < ug_info->numQueuesTx; i++) { + if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) { + if (netif_msg_probe(ugeth)) + pr_err("Tx BD ring length must be no smaller than 2\n"); + return -EINVAL; + } + } + + /* mrblr */ + if ((uf_info->max_rx_buf_length == 0) || + (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) { + if (netif_msg_probe(ugeth)) + pr_err("max_rx_buf_length must be non-zero multiple of 128\n"); + return -EINVAL; + } + + /* num Tx queues */ + if (ug_info->numQueuesTx > NUM_TX_QUEUES) { + if (netif_msg_probe(ugeth)) + pr_err("number of tx queues too large\n"); + return -EINVAL; + } + + /* num Rx queues */ + if (ug_info->numQueuesRx > NUM_RX_QUEUES) { + if (netif_msg_probe(ugeth)) + pr_err("number of rx queues too large\n"); + return -EINVAL; + } + + /* l2qt */ + for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) { + if (ug_info->l2qt[i] >= ug_info->numQueuesRx) { + if (netif_msg_probe(ugeth)) + pr_err("VLAN priority table entry must not be larger than number of Rx queues\n"); + return -EINVAL; + } + } + + /* l3qt */ + for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) { + if (ug_info->l3qt[i] >= ug_info->numQueuesRx) { + if (netif_msg_probe(ugeth)) + pr_err("IP priority table entry must not be larger than number of Rx queues\n"); + return -EINVAL; + } + } + + if (ug_info->cam && !ug_info->ecamptr) { + if (netif_msg_probe(ugeth)) + pr_err("If cam mode is chosen, must supply cam ptr\n"); + return -EINVAL; + } + + if ((ug_info->numStationAddresses != + UCC_GETH_NUM_OF_STATION_ADDRESSES_1) && + ug_info->rxExtendedFiltering) { + if (netif_msg_probe(ugeth)) + pr_err("Number of station addresses greater than 1 not allowed in extended parsing mode\n"); + return -EINVAL; + } + + /* Generate uccm_mask for receive */ + uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */ + for (i = 0; i < ug_info->numQueuesRx; i++) + uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i); + + for (i = 0; i < ug_info->numQueuesTx; i++) + uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i); + /* Initialize the general fast UCC block. */ + if (ucc_fast_init(uf_info, &ugeth->uccf)) { + if (netif_msg_probe(ugeth)) + pr_err("Failed to init uccf\n"); + return -ENOMEM; + } + + /* read the number of risc engines, update the riscTx and riscRx + * if there are 4 riscs in QE + */ + if (qe_get_num_of_risc() == 4) { + ug_info->riscTx = QE_RISC_ALLOCATION_FOUR_RISCS; + ug_info->riscRx = QE_RISC_ALLOCATION_FOUR_RISCS; + } + + ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs)); + if (!ugeth->ug_regs) { + if (netif_msg_probe(ugeth)) + pr_err("Failed to ioremap regs\n"); + return -ENOMEM; + } + + return 0; +} + +static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth) +{ + struct ucc_geth_info *ug_info; + struct ucc_fast_info *uf_info; + int length; + u16 i, j; + u8 __iomem *bd; + + ug_info = ugeth->ug_info; + uf_info = &ug_info->uf_info; + + /* Allocate Tx bds */ + for (j = 0; j < ug_info->numQueuesTx; j++) { + /* Allocate in multiple of + UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT, + according to spec */ + length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) + / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) + * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; + if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) % + UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) + length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; + if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { + u32 align = 4; + if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) + align = UCC_GETH_TX_BD_RING_ALIGNMENT; + ugeth->tx_bd_ring_offset[j] = + (u32) kmalloc((u32) (length + align), GFP_KERNEL); + + if (ugeth->tx_bd_ring_offset[j] != 0) + ugeth->p_tx_bd_ring[j] = + (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] + + align) & ~(align - 1)); + } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { + ugeth->tx_bd_ring_offset[j] = + qe_muram_alloc(length, + UCC_GETH_TX_BD_RING_ALIGNMENT); + if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j])) + ugeth->p_tx_bd_ring[j] = + (u8 __iomem *) qe_muram_addr(ugeth-> + tx_bd_ring_offset[j]); + } + if (!ugeth->p_tx_bd_ring[j]) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not allocate memory for Tx bd rings\n"); + return -ENOMEM; + } + /* Zero unused end of bd ring, according to spec */ + memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] + + ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0, + length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)); + } + + /* Init Tx bds */ + for (j = 0; j < ug_info->numQueuesTx; j++) { + /* Setup the skbuff rings */ + ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * + ugeth->ug_info->bdRingLenTx[j], + GFP_KERNEL); + + if (ugeth->tx_skbuff[j] == NULL) { + if (netif_msg_ifup(ugeth)) + pr_err("Could not allocate tx_skbuff\n"); + return -ENOMEM; + } + + for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++) + ugeth->tx_skbuff[j][i] = NULL; + + ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0; + bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; + for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { + /* clear bd buffer */ + out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); + /* set bd status and length */ + out_be32((u32 __iomem *)bd, 0); + bd += sizeof(struct qe_bd); + } + bd -= sizeof(struct qe_bd); + /* set bd status and length */ + out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */ + } + + return 0; +} + +static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth) +{ + struct ucc_geth_info *ug_info; + struct ucc_fast_info *uf_info; + int length; + u16 i, j; + u8 __iomem *bd; + + ug_info = ugeth->ug_info; + uf_info = &ug_info->uf_info; + + /* Allocate Rx bds */ + for (j = 0; j < ug_info->numQueuesRx; j++) { + length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd); + if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { + u32 align = 4; + if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) + align = UCC_GETH_RX_BD_RING_ALIGNMENT; + ugeth->rx_bd_ring_offset[j] = + (u32) kmalloc((u32) (length + align), GFP_KERNEL); + if (ugeth->rx_bd_ring_offset[j] != 0) + ugeth->p_rx_bd_ring[j] = + (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] + + align) & ~(align - 1)); + } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { + ugeth->rx_bd_ring_offset[j] = + qe_muram_alloc(length, + UCC_GETH_RX_BD_RING_ALIGNMENT); + if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j])) + ugeth->p_rx_bd_ring[j] = + (u8 __iomem *) qe_muram_addr(ugeth-> + rx_bd_ring_offset[j]); + } + if (!ugeth->p_rx_bd_ring[j]) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not allocate memory for Rx bd rings\n"); + return -ENOMEM; + } + } + + /* Init Rx bds */ + for (j = 0; j < ug_info->numQueuesRx; j++) { + /* Setup the skbuff rings */ + ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * + ugeth->ug_info->bdRingLenRx[j], + GFP_KERNEL); + + if (ugeth->rx_skbuff[j] == NULL) { + if (netif_msg_ifup(ugeth)) + pr_err("Could not allocate rx_skbuff\n"); + return -ENOMEM; + } + + for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++) + ugeth->rx_skbuff[j][i] = NULL; + + ugeth->skb_currx[j] = 0; + bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; + for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { + /* set bd status and length */ + out_be32((u32 __iomem *)bd, R_I); + /* clear bd buffer */ + out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); + bd += sizeof(struct qe_bd); + } + bd -= sizeof(struct qe_bd); + /* set bd status and length */ + out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */ + } + + return 0; +} + +static int ucc_geth_startup(struct ucc_geth_private *ugeth) +{ + struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; + struct ucc_geth_init_pram __iomem *p_init_enet_pram; + struct ucc_fast_private *uccf; + struct ucc_geth_info *ug_info; + struct ucc_fast_info *uf_info; + struct ucc_fast __iomem *uf_regs; + struct ucc_geth __iomem *ug_regs; + int ret_val = -EINVAL; + u32 remoder = UCC_GETH_REMODER_INIT; + u32 init_enet_pram_offset, cecr_subblock, command; + u32 ifstat, i, j, size, l2qt, l3qt; + u16 temoder = UCC_GETH_TEMODER_INIT; + u16 test; + u8 function_code = 0; + u8 __iomem *endOfRing; + u8 numThreadsRxNumerical, numThreadsTxNumerical; + + ugeth_vdbg("%s: IN", __func__); + uccf = ugeth->uccf; + ug_info = ugeth->ug_info; + uf_info = &ug_info->uf_info; + uf_regs = uccf->uf_regs; + ug_regs = ugeth->ug_regs; + + switch (ug_info->numThreadsRx) { + case UCC_GETH_NUM_OF_THREADS_1: + numThreadsRxNumerical = 1; + break; + case UCC_GETH_NUM_OF_THREADS_2: + numThreadsRxNumerical = 2; + break; + case UCC_GETH_NUM_OF_THREADS_4: + numThreadsRxNumerical = 4; + break; + case UCC_GETH_NUM_OF_THREADS_6: + numThreadsRxNumerical = 6; + break; + case UCC_GETH_NUM_OF_THREADS_8: + numThreadsRxNumerical = 8; + break; + default: + if (netif_msg_ifup(ugeth)) + pr_err("Bad number of Rx threads value\n"); + return -EINVAL; + } + + switch (ug_info->numThreadsTx) { + case UCC_GETH_NUM_OF_THREADS_1: + numThreadsTxNumerical = 1; + break; + case UCC_GETH_NUM_OF_THREADS_2: + numThreadsTxNumerical = 2; + break; + case UCC_GETH_NUM_OF_THREADS_4: + numThreadsTxNumerical = 4; + break; + case UCC_GETH_NUM_OF_THREADS_6: + numThreadsTxNumerical = 6; + break; + case UCC_GETH_NUM_OF_THREADS_8: + numThreadsTxNumerical = 8; + break; + default: + if (netif_msg_ifup(ugeth)) + pr_err("Bad number of Tx threads value\n"); + return -EINVAL; + } + + /* Calculate rx_extended_features */ + ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck || + ug_info->ipAddressAlignment || + (ug_info->numStationAddresses != + UCC_GETH_NUM_OF_STATION_ADDRESSES_1); + + ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features || + (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) || + (ug_info->vlanOperationNonTagged != + UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP); + + init_default_reg_vals(&uf_regs->upsmr, + &ug_regs->maccfg1, &ug_regs->maccfg2); + + /* Set UPSMR */ + /* For more details see the hardware spec. */ + init_rx_parameters(ug_info->bro, + ug_info->rsh, ug_info->pro, &uf_regs->upsmr); + + /* We're going to ignore other registers for now, */ + /* except as needed to get up and running */ + + /* Set MACCFG1 */ + /* For more details see the hardware spec. */ + init_flow_control_params(ug_info->aufc, + ug_info->receiveFlowControl, + ug_info->transmitFlowControl, + ug_info->pausePeriod, + ug_info->extensionField, + &uf_regs->upsmr, + &ug_regs->uempr, &ug_regs->maccfg1); + + setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); + + /* Set IPGIFG */ + /* For more details see the hardware spec. */ + ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1, + ug_info->nonBackToBackIfgPart2, + ug_info-> + miminumInterFrameGapEnforcement, + ug_info->backToBackInterFrameGap, + &ug_regs->ipgifg); + if (ret_val != 0) { + if (netif_msg_ifup(ugeth)) + pr_err("IPGIFG initialization parameter too large\n"); + return ret_val; + } + + /* Set HAFDUP */ + /* For more details see the hardware spec. */ + ret_val = init_half_duplex_params(ug_info->altBeb, + ug_info->backPressureNoBackoff, + ug_info->noBackoff, + ug_info->excessDefer, + ug_info->altBebTruncation, + ug_info->maxRetransmission, + ug_info->collisionWindow, + &ug_regs->hafdup); + if (ret_val != 0) { + if (netif_msg_ifup(ugeth)) + pr_err("Half Duplex initialization parameter too large\n"); + return ret_val; + } + + /* Set IFSTAT */ + /* For more details see the hardware spec. */ + /* Read only - resets upon read */ + ifstat = in_be32(&ug_regs->ifstat); + + /* Clear UEMPR */ + /* For more details see the hardware spec. */ + out_be32(&ug_regs->uempr, 0); + + /* Set UESCR */ + /* For more details see the hardware spec. */ + init_hw_statistics_gathering_mode((ug_info->statisticsMode & + UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE), + 0, &uf_regs->upsmr, &ug_regs->uescr); + + ret_val = ucc_geth_alloc_tx(ugeth); + if (ret_val != 0) + return ret_val; + + ret_val = ucc_geth_alloc_rx(ugeth); + if (ret_val != 0) + return ret_val; + + /* + * Global PRAM + */ + /* Tx global PRAM */ + /* Allocate global tx parameter RAM page */ + ugeth->tx_glbl_pram_offset = + qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram), + UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); + if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not allocate DPRAM memory for p_tx_glbl_pram\n"); + return -ENOMEM; + } + ugeth->p_tx_glbl_pram = + (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth-> + tx_glbl_pram_offset); + /* Zero out p_tx_glbl_pram */ + memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); + + /* Fill global PRAM */ + + /* TQPTR */ + /* Size varies with number of Tx threads */ + ugeth->thread_dat_tx_offset = + qe_muram_alloc(numThreadsTxNumerical * + sizeof(struct ucc_geth_thread_data_tx) + + 32 * (numThreadsTxNumerical == 1), + UCC_GETH_THREAD_DATA_ALIGNMENT); + if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not allocate DPRAM memory for p_thread_data_tx\n"); + return -ENOMEM; + } + + ugeth->p_thread_data_tx = + (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth-> + thread_dat_tx_offset); + out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); + + /* vtagtable */ + for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++) + out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i], + ug_info->vtagtable[i]); + + /* iphoffset */ + for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) + out_8(&ugeth->p_tx_glbl_pram->iphoffset[i], + ug_info->iphoffset[i]); + + /* SQPTR */ + /* Size varies with number of Tx queues */ + ugeth->send_q_mem_reg_offset = + qe_muram_alloc(ug_info->numQueuesTx * + sizeof(struct ucc_geth_send_queue_qd), + UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); + if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not allocate DPRAM memory for p_send_q_mem_reg\n"); + return -ENOMEM; + } + + ugeth->p_send_q_mem_reg = + (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth-> + send_q_mem_reg_offset); + out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); + + /* Setup the table */ + /* Assume BD rings are already established */ + for (i = 0; i < ug_info->numQueuesTx; i++) { + endOfRing = + ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] - + 1) * sizeof(struct qe_bd); + if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { + out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, + (u32) virt_to_phys(ugeth->p_tx_bd_ring[i])); + out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. + last_bd_completed_address, + (u32) virt_to_phys(endOfRing)); + } else if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_MURAM) { + out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, + (u32) immrbar_virt_to_phys(ugeth-> + p_tx_bd_ring[i])); + out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. + last_bd_completed_address, + (u32) immrbar_virt_to_phys(endOfRing)); + } + } + + /* schedulerbasepointer */ + + if (ug_info->numQueuesTx > 1) { + /* scheduler exists only if more than 1 tx queue */ + ugeth->scheduler_offset = + qe_muram_alloc(sizeof(struct ucc_geth_scheduler), + UCC_GETH_SCHEDULER_ALIGNMENT); + if (IS_ERR_VALUE(ugeth->scheduler_offset)) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not allocate DPRAM memory for p_scheduler\n"); + return -ENOMEM; + } + + ugeth->p_scheduler = + (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth-> + scheduler_offset); + out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, + ugeth->scheduler_offset); + /* Zero out p_scheduler */ + memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); + + /* Set values in scheduler */ + out_be32(&ugeth->p_scheduler->mblinterval, + ug_info->mblinterval); + out_be16(&ugeth->p_scheduler->nortsrbytetime, + ug_info->nortsrbytetime); + out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz); + out_8(&ugeth->p_scheduler->strictpriorityq, + ug_info->strictpriorityq); + out_8(&ugeth->p_scheduler->txasap, ug_info->txasap); + out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw); + for (i = 0; i < NUM_TX_QUEUES; i++) + out_8(&ugeth->p_scheduler->weightfactor[i], + ug_info->weightfactor[i]); + + /* Set pointers to cpucount registers in scheduler */ + ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); + ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1); + ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2); + ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3); + ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4); + ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5); + ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6); + ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7); + } + + /* schedulerbasepointer */ + /* TxRMON_PTR (statistics) */ + if (ug_info-> + statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { + ugeth->tx_fw_statistics_pram_offset = + qe_muram_alloc(sizeof + (struct ucc_geth_tx_firmware_statistics_pram), + UCC_GETH_TX_STATISTICS_ALIGNMENT); + if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not allocate DPRAM memory for p_tx_fw_statistics_pram\n"); + return -ENOMEM; + } + ugeth->p_tx_fw_statistics_pram = + (struct ucc_geth_tx_firmware_statistics_pram __iomem *) + qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); + /* Zero out p_tx_fw_statistics_pram */ + memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram, + 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram)); + } + + /* temoder */ + /* Already has speed set */ + + if (ug_info->numQueuesTx > 1) + temoder |= TEMODER_SCHEDULER_ENABLE; + if (ug_info->ipCheckSumGenerate) + temoder |= TEMODER_IP_CHECKSUM_GENERATE; + temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT); + out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder); + + test = in_be16(&ugeth->p_tx_glbl_pram->temoder); + + /* Function code register value to be used later */ + function_code = UCC_BMR_BO_BE | UCC_BMR_GBL; + /* Required for QE */ + + /* function code register */ + out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24); + + /* Rx global PRAM */ + /* Allocate global rx parameter RAM page */ + ugeth->rx_glbl_pram_offset = + qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram), + UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); + if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not allocate DPRAM memory for p_rx_glbl_pram\n"); + return -ENOMEM; + } + ugeth->p_rx_glbl_pram = + (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth-> + rx_glbl_pram_offset); + /* Zero out p_rx_glbl_pram */ + memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); + + /* Fill global PRAM */ + + /* RQPTR */ + /* Size varies with number of Rx threads */ + ugeth->thread_dat_rx_offset = + qe_muram_alloc(numThreadsRxNumerical * + sizeof(struct ucc_geth_thread_data_rx), + UCC_GETH_THREAD_DATA_ALIGNMENT); + if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not allocate DPRAM memory for p_thread_data_rx\n"); + return -ENOMEM; + } + + ugeth->p_thread_data_rx = + (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth-> + thread_dat_rx_offset); + out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); + + /* typeorlen */ + out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen); + + /* rxrmonbaseptr (statistics) */ + if (ug_info-> + statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { + ugeth->rx_fw_statistics_pram_offset = + qe_muram_alloc(sizeof + (struct ucc_geth_rx_firmware_statistics_pram), + UCC_GETH_RX_STATISTICS_ALIGNMENT); + if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not allocate DPRAM memory for p_rx_fw_statistics_pram\n"); + return -ENOMEM; + } + ugeth->p_rx_fw_statistics_pram = + (struct ucc_geth_rx_firmware_statistics_pram __iomem *) + qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); + /* Zero out p_rx_fw_statistics_pram */ + memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0, + sizeof(struct ucc_geth_rx_firmware_statistics_pram)); + } + + /* intCoalescingPtr */ + + /* Size varies with number of Rx queues */ + ugeth->rx_irq_coalescing_tbl_offset = + qe_muram_alloc(ug_info->numQueuesRx * + sizeof(struct ucc_geth_rx_interrupt_coalescing_entry) + + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); + if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not allocate DPRAM memory for p_rx_irq_coalescing_tbl\n"); + return -ENOMEM; + } + + ugeth->p_rx_irq_coalescing_tbl = + (struct ucc_geth_rx_interrupt_coalescing_table __iomem *) + qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); + out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, + ugeth->rx_irq_coalescing_tbl_offset); + + /* Fill interrupt coalescing table */ + for (i = 0; i < ug_info->numQueuesRx; i++) { + out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. + interruptcoalescingmaxvalue, + ug_info->interruptcoalescingmaxvalue[i]); + out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. + interruptcoalescingcounter, + ug_info->interruptcoalescingmaxvalue[i]); + } + + /* MRBLR */ + init_max_rx_buff_len(uf_info->max_rx_buf_length, + &ugeth->p_rx_glbl_pram->mrblr); + /* MFLR */ + out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength); + /* MINFLR */ + init_min_frame_len(ug_info->minFrameLength, + &ugeth->p_rx_glbl_pram->minflr, + &ugeth->p_rx_glbl_pram->mrblr); + /* MAXD1 */ + out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length); + /* MAXD2 */ + out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length); + + /* l2qt */ + l2qt = 0; + for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) + l2qt |= (ug_info->l2qt[i] << (28 - 4 * i)); + out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt); + + /* l3qt */ + for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) { + l3qt = 0; + for (i = 0; i < 8; i++) + l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i)); + out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt); + } + + /* vlantype */ + out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype); + + /* vlantci */ + out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci); + + /* ecamptr */ + out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr); + + /* RBDQPTR */ + /* Size varies with number of Rx queues */ + ugeth->rx_bd_qs_tbl_offset = + qe_muram_alloc(ug_info->numQueuesRx * + (sizeof(struct ucc_geth_rx_bd_queues_entry) + + sizeof(struct ucc_geth_rx_prefetched_bds)), + UCC_GETH_RX_BD_QUEUES_ALIGNMENT); + if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not allocate DPRAM memory for p_rx_bd_qs_tbl\n"); + return -ENOMEM; + } + + ugeth->p_rx_bd_qs_tbl = + (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth-> + rx_bd_qs_tbl_offset); + out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); + /* Zero out p_rx_bd_qs_tbl */ + memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl, + 0, + ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) + + sizeof(struct ucc_geth_rx_prefetched_bds))); + + /* Setup the table */ + /* Assume BD rings are already established */ + for (i = 0; i < ug_info->numQueuesRx; i++) { + if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { + out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, + (u32) virt_to_phys(ugeth->p_rx_bd_ring[i])); + } else if (ugeth->ug_info->uf_info.bd_mem_part == + MEM_PART_MURAM) { + out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, + (u32) immrbar_virt_to_phys(ugeth-> + p_rx_bd_ring[i])); + } + /* rest of fields handled by QE */ + } + + /* remoder */ + /* Already has speed set */ + + if (ugeth->rx_extended_features) + remoder |= REMODER_RX_EXTENDED_FEATURES; + if (ug_info->rxExtendedFiltering) + remoder |= REMODER_RX_EXTENDED_FILTERING; + if (ug_info->dynamicMaxFrameLength) + remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH; + if (ug_info->dynamicMinFrameLength) + remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH; + remoder |= + ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT; + remoder |= + ug_info-> + vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT; + remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT; + remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT); + if (ug_info->ipCheckSumCheck) + remoder |= REMODER_IP_CHECKSUM_CHECK; + if (ug_info->ipAddressAlignment) + remoder |= REMODER_IP_ADDRESS_ALIGNMENT; + out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder); + + /* Note that this function must be called */ + /* ONLY AFTER p_tx_fw_statistics_pram */ + /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */ + init_firmware_statistics_gathering_mode((ug_info-> + statisticsMode & + UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX), + (ug_info->statisticsMode & + UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX), + &ugeth->p_tx_glbl_pram->txrmonbaseptr, + ugeth->tx_fw_statistics_pram_offset, + &ugeth->p_rx_glbl_pram->rxrmonbaseptr, + ugeth->rx_fw_statistics_pram_offset, + &ugeth->p_tx_glbl_pram->temoder, + &ugeth->p_rx_glbl_pram->remoder); + + /* function code register */ + out_8(&ugeth->p_rx_glbl_pram->rstate, function_code); + + /* initialize extended filtering */ + if (ug_info->rxExtendedFiltering) { + if (!ug_info->extendedFilteringChainPointer) { + if (netif_msg_ifup(ugeth)) + pr_err("Null Extended Filtering Chain Pointer\n"); + return -EINVAL; + } + + /* Allocate memory for extended filtering Mode Global + Parameters */ + ugeth->exf_glbl_param_offset = + qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram), + UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); + if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not allocate DPRAM memory for p_exf_glbl_param\n"); + return -ENOMEM; + } + + ugeth->p_exf_glbl_param = + (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth-> + exf_glbl_param_offset); + out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, + ugeth->exf_glbl_param_offset); + out_be32(&ugeth->p_exf_glbl_param->l2pcdptr, + (u32) ug_info->extendedFilteringChainPointer); + + } else { /* initialize 82xx style address filtering */ + + /* Init individual address recognition registers to disabled */ + + for (j = 0; j < NUM_OF_PADDRS; j++) + ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); + + p_82xx_addr_filt = + (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> + p_rx_glbl_pram->addressfiltering; + + ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, + ENET_ADDR_TYPE_GROUP); + ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, + ENET_ADDR_TYPE_INDIVIDUAL); + } + + /* + * Initialize UCC at QE level + */ + + command = QE_INIT_TX_RX; + + /* Allocate shadow InitEnet command parameter structure. + * This is needed because after the InitEnet command is executed, + * the structure in DPRAM is released, because DPRAM is a premium + * resource. + * This shadow structure keeps a copy of what was done so that the + * allocated resources can be released when the channel is freed. + */ + if (!(ugeth->p_init_enet_param_shadow = + kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not allocate memory for p_UccInitEnetParamShadows\n"); + return -ENOMEM; + } + /* Zero out *p_init_enet_param_shadow */ + memset((char *)ugeth->p_init_enet_param_shadow, + 0, sizeof(struct ucc_geth_init_pram)); + + /* Fill shadow InitEnet command parameter structure */ + + ugeth->p_init_enet_param_shadow->resinit1 = + ENET_INIT_PARAM_MAGIC_RES_INIT1; + ugeth->p_init_enet_param_shadow->resinit2 = + ENET_INIT_PARAM_MAGIC_RES_INIT2; + ugeth->p_init_enet_param_shadow->resinit3 = + ENET_INIT_PARAM_MAGIC_RES_INIT3; + ugeth->p_init_enet_param_shadow->resinit4 = + ENET_INIT_PARAM_MAGIC_RES_INIT4; + ugeth->p_init_enet_param_shadow->resinit5 = + ENET_INIT_PARAM_MAGIC_RES_INIT5; + ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= + ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT; + ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= + ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT; + + ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= + ugeth->rx_glbl_pram_offset | ug_info->riscRx; + if ((ug_info->largestexternallookupkeysize != + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) && + (ug_info->largestexternallookupkeysize != + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) && + (ug_info->largestexternallookupkeysize != + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { + if (netif_msg_ifup(ugeth)) + pr_err("Invalid largest External Lookup Key Size\n"); + return -EINVAL; + } + ugeth->p_init_enet_param_shadow->largestexternallookupkeysize = + ug_info->largestexternallookupkeysize; + size = sizeof(struct ucc_geth_thread_rx_pram); + if (ug_info->rxExtendedFiltering) { + size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; + if (ug_info->largestexternallookupkeysize == + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) + size += + THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; + if (ug_info->largestexternallookupkeysize == + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES) + size += + THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; + } + + if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth-> + p_init_enet_param_shadow->rxthread[0]), + (u8) (numThreadsRxNumerical + 1) + /* Rx needs one extra for terminator */ + , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT, + ug_info->riscRx, 1)) != 0) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not fill p_init_enet_param_shadow\n"); + return ret_val; + } + + ugeth->p_init_enet_param_shadow->txglobal = + ugeth->tx_glbl_pram_offset | ug_info->riscTx; + if ((ret_val = + fill_init_enet_entries(ugeth, + &(ugeth->p_init_enet_param_shadow-> + txthread[0]), numThreadsTxNumerical, + sizeof(struct ucc_geth_thread_tx_pram), + UCC_GETH_THREAD_TX_PRAM_ALIGNMENT, + ug_info->riscTx, 0)) != 0) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not fill p_init_enet_param_shadow\n"); + return ret_val; + } + + /* Load Rx bds with buffers */ + for (i = 0; i < ug_info->numQueuesRx; i++) { + if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not fill Rx bds with buffers\n"); + return ret_val; + } + } + + /* Allocate InitEnet command parameter structure */ + init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4); + if (IS_ERR_VALUE(init_enet_pram_offset)) { + if (netif_msg_ifup(ugeth)) + pr_err("Can not allocate DPRAM memory for p_init_enet_pram\n"); + return -ENOMEM; + } + p_init_enet_pram = + (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset); + + /* Copy shadow InitEnet command parameter structure into PRAM */ + out_8(&p_init_enet_pram->resinit1, + ugeth->p_init_enet_param_shadow->resinit1); + out_8(&p_init_enet_pram->resinit2, + ugeth->p_init_enet_param_shadow->resinit2); + out_8(&p_init_enet_pram->resinit3, + ugeth->p_init_enet_param_shadow->resinit3); + out_8(&p_init_enet_pram->resinit4, + ugeth->p_init_enet_param_shadow->resinit4); + out_be16(&p_init_enet_pram->resinit5, + ugeth->p_init_enet_param_shadow->resinit5); + out_8(&p_init_enet_pram->largestexternallookupkeysize, + ugeth->p_init_enet_param_shadow->largestexternallookupkeysize); + out_be32(&p_init_enet_pram->rgftgfrxglobal, + ugeth->p_init_enet_param_shadow->rgftgfrxglobal); + for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) + out_be32(&p_init_enet_pram->rxthread[i], + ugeth->p_init_enet_param_shadow->rxthread[i]); + out_be32(&p_init_enet_pram->txglobal, + ugeth->p_init_enet_param_shadow->txglobal); + for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++) + out_be32(&p_init_enet_pram->txthread[i], + ugeth->p_init_enet_param_shadow->txthread[i]); + + /* Issue QE command */ + cecr_subblock = + ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); + qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, + init_enet_pram_offset); + + /* Free InitEnet command parameter */ + qe_muram_free(init_enet_pram_offset); + + return 0; +} + +/* This is called by the kernel when a frame is ready for transmission. */ +/* It is pointed to by the dev->hard_start_xmit function pointer */ +static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ucc_geth_private *ugeth = netdev_priv(dev); +#ifdef CONFIG_UGETH_TX_ON_DEMAND + struct ucc_fast_private *uccf; +#endif + u8 __iomem *bd; /* BD pointer */ + u32 bd_status; + u8 txQ = 0; + unsigned long flags; + + ugeth_vdbg("%s: IN", __func__); + + spin_lock_irqsave(&ugeth->lock, flags); + + dev->stats.tx_bytes += skb->len; + + /* Start from the next BD that should be filled */ + bd = ugeth->txBd[txQ]; + bd_status = in_be32((u32 __iomem *)bd); + /* Save the skb pointer so we can free it later */ + ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; + + /* Update the current skb pointer (wrapping if this was the last) */ + ugeth->skb_curtx[txQ] = + (ugeth->skb_curtx[txQ] + + 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); + + /* set up the buffer descriptor */ + out_be32(&((struct qe_bd __iomem *)bd)->buf, + dma_map_single(ugeth->dev, skb->data, + skb->len, DMA_TO_DEVICE)); + + /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ + + bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; + + /* set bd status and length */ + out_be32((u32 __iomem *)bd, bd_status); + + /* Move to next BD in the ring */ + if (!(bd_status & T_W)) + bd += sizeof(struct qe_bd); + else + bd = ugeth->p_tx_bd_ring[txQ]; + + /* If the next BD still needs to be cleaned up, then the bds + are full. We need to tell the kernel to stop sending us stuff. */ + if (bd == ugeth->confBd[txQ]) { + if (!netif_queue_stopped(dev)) + netif_stop_queue(dev); + } + + ugeth->txBd[txQ] = bd; + + skb_tx_timestamp(skb); + + if (ugeth->p_scheduler) { + ugeth->cpucount[txQ]++; + /* Indicate to QE that there are more Tx bds ready for + transmission */ + /* This is done by writing a running counter of the bd + count to the scheduler PRAM. */ + out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]); + } + +#ifdef CONFIG_UGETH_TX_ON_DEMAND + uccf = ugeth->uccf; + out_be16(uccf->p_utodr, UCC_FAST_TOD); +#endif + spin_unlock_irqrestore(&ugeth->lock, flags); + + return NETDEV_TX_OK; +} + +static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) +{ + struct sk_buff *skb; + u8 __iomem *bd; + u16 length, howmany = 0; + u32 bd_status; + u8 *bdBuffer; + struct net_device *dev; + + ugeth_vdbg("%s: IN", __func__); + + dev = ugeth->ndev; + + /* collect received buffers */ + bd = ugeth->rxBd[rxQ]; + + bd_status = in_be32((u32 __iomem *)bd); + + /* while there are received buffers and BD is full (~R_E) */ + while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { + bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf); + length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); + skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; + + /* determine whether buffer is first, last, first and last + (single buffer frame) or middle (not first and not last) */ + if (!skb || + (!(bd_status & (R_F | R_L))) || + (bd_status & R_ERRORS_FATAL)) { + if (netif_msg_rx_err(ugeth)) + pr_err("%d: ERROR!!! skb - 0x%08x\n", + __LINE__, (u32)skb); + dev_kfree_skb(skb); + + ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; + dev->stats.rx_dropped++; + } else { + dev->stats.rx_packets++; + howmany++; + + /* Prep the skb for the packet */ + skb_put(skb, length); + + /* Tell the skb what kind of packet this is */ + skb->protocol = eth_type_trans(skb, ugeth->ndev); + + dev->stats.rx_bytes += length; + /* Send the packet up the stack */ + netif_receive_skb(skb); + } + + skb = get_new_skb(ugeth, bd); + if (!skb) { + if (netif_msg_rx_err(ugeth)) + pr_warn("No Rx Data Buffer\n"); + dev->stats.rx_dropped++; + break; + } + + ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb; + + /* update to point at the next skb */ + ugeth->skb_currx[rxQ] = + (ugeth->skb_currx[rxQ] + + 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]); + + if (bd_status & R_W) + bd = ugeth->p_rx_bd_ring[rxQ]; + else + bd += sizeof(struct qe_bd); + + bd_status = in_be32((u32 __iomem *)bd); + } + + ugeth->rxBd[rxQ] = bd; + return howmany; +} + +static int ucc_geth_tx(struct net_device *dev, u8 txQ) +{ + /* Start from the next BD that should be filled */ + struct ucc_geth_private *ugeth = netdev_priv(dev); + u8 __iomem *bd; /* BD pointer */ + u32 bd_status; + + bd = ugeth->confBd[txQ]; + bd_status = in_be32((u32 __iomem *)bd); + + /* Normal processing. */ + while ((bd_status & T_R) == 0) { + struct sk_buff *skb; + + /* BD contains already transmitted buffer. */ + /* Handle the transmitted buffer and release */ + /* the BD to be used with the current frame */ + + skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; + if (!skb) + break; + + dev->stats.tx_packets++; + + dev_consume_skb_any(skb); + + ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; + ugeth->skb_dirtytx[txQ] = + (ugeth->skb_dirtytx[txQ] + + 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); + + /* We freed a buffer, so now we can restart transmission */ + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + + /* Advance the confirmation BD pointer */ + if (!(bd_status & T_W)) + bd += sizeof(struct qe_bd); + else + bd = ugeth->p_tx_bd_ring[txQ]; + bd_status = in_be32((u32 __iomem *)bd); + } + ugeth->confBd[txQ] = bd; + return 0; +} + +static int ucc_geth_poll(struct napi_struct *napi, int budget) +{ + struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi); + struct ucc_geth_info *ug_info; + int howmany, i; + + ug_info = ugeth->ug_info; + + /* Tx event processing */ + spin_lock(&ugeth->lock); + for (i = 0; i < ug_info->numQueuesTx; i++) + ucc_geth_tx(ugeth->ndev, i); + spin_unlock(&ugeth->lock); + + howmany = 0; + for (i = 0; i < ug_info->numQueuesRx; i++) + howmany += ucc_geth_rx(ugeth, i, budget - howmany); + + if (howmany < budget) { + napi_complete(napi); + setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS); + } + + return howmany; +} + +static irqreturn_t ucc_geth_irq_handler(int irq, void *info) +{ + struct net_device *dev = info; + struct ucc_geth_private *ugeth = netdev_priv(dev); + struct ucc_fast_private *uccf; + struct ucc_geth_info *ug_info; + register u32 ucce; + register u32 uccm; + + ugeth_vdbg("%s: IN", __func__); + + uccf = ugeth->uccf; + ug_info = ugeth->ug_info; + + /* read and clear events */ + ucce = (u32) in_be32(uccf->p_ucce); + uccm = (u32) in_be32(uccf->p_uccm); + ucce &= uccm; + out_be32(uccf->p_ucce, ucce); + + /* check for receive events that require processing */ + if (ucce & (UCCE_RX_EVENTS | UCCE_TX_EVENTS)) { + if (napi_schedule_prep(&ugeth->napi)) { + uccm &= ~(UCCE_RX_EVENTS | UCCE_TX_EVENTS); + out_be32(uccf->p_uccm, uccm); + __napi_schedule(&ugeth->napi); + } + } + + /* Errors and other events */ + if (ucce & UCCE_OTHER) { + if (ucce & UCC_GETH_UCCE_BSY) + dev->stats.rx_errors++; + if (ucce & UCC_GETH_UCCE_TXE) + dev->stats.tx_errors++; + } + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void ucc_netpoll(struct net_device *dev) +{ + struct ucc_geth_private *ugeth = netdev_priv(dev); + int irq = ugeth->ug_info->uf_info.irq; + + disable_irq(irq); + ucc_geth_irq_handler(irq, dev); + enable_irq(irq); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static int ucc_geth_set_mac_addr(struct net_device *dev, void *p) +{ + struct ucc_geth_private *ugeth = netdev_priv(dev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + /* + * If device is not running, we will set mac addr register + * when opening the device. + */ + if (!netif_running(dev)) + return 0; + + spin_lock_irq(&ugeth->lock); + init_mac_station_addr_regs(dev->dev_addr[0], + dev->dev_addr[1], + dev->dev_addr[2], + dev->dev_addr[3], + dev->dev_addr[4], + dev->dev_addr[5], + &ugeth->ug_regs->macstnaddr1, + &ugeth->ug_regs->macstnaddr2); + spin_unlock_irq(&ugeth->lock); + + return 0; +} + +static int ucc_geth_init_mac(struct ucc_geth_private *ugeth) +{ + struct net_device *dev = ugeth->ndev; + int err; + + err = ucc_struct_init(ugeth); + if (err) { + netif_err(ugeth, ifup, dev, "Cannot configure internal struct, aborting\n"); + goto err; + } + + err = ucc_geth_startup(ugeth); + if (err) { + netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n"); + goto err; + } + + err = adjust_enet_interface(ugeth); + if (err) { + netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n"); + goto err; + } + + /* Set MACSTNADDR1, MACSTNADDR2 */ + /* For more details see the hardware spec. */ + init_mac_station_addr_regs(dev->dev_addr[0], + dev->dev_addr[1], + dev->dev_addr[2], + dev->dev_addr[3], + dev->dev_addr[4], + dev->dev_addr[5], + &ugeth->ug_regs->macstnaddr1, + &ugeth->ug_regs->macstnaddr2); + + err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); + if (err) { + netif_err(ugeth, ifup, dev, "Cannot enable net device, aborting\n"); + goto err; + } + + return 0; +err: + ucc_geth_stop(ugeth); + return err; +} + +/* Called when something needs to use the ethernet device */ +/* Returns 0 for success. */ +static int ucc_geth_open(struct net_device *dev) +{ + struct ucc_geth_private *ugeth = netdev_priv(dev); + int err; + + ugeth_vdbg("%s: IN", __func__); + + /* Test station address */ + if (dev->dev_addr[0] & ENET_GROUP_ADDR) { + netif_err(ugeth, ifup, dev, + "Multicast address used for station address - is this what you wanted?\n"); + return -EINVAL; + } + + err = init_phy(dev); + if (err) { + netif_err(ugeth, ifup, dev, "Cannot initialize PHY, aborting\n"); + return err; + } + + err = ucc_geth_init_mac(ugeth); + if (err) { + netif_err(ugeth, ifup, dev, "Cannot initialize MAC, aborting\n"); + goto err; + } + + err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, + 0, "UCC Geth", dev); + if (err) { + netif_err(ugeth, ifup, dev, "Cannot get IRQ for net device, aborting\n"); + goto err; + } + + phy_start(ugeth->phydev); + napi_enable(&ugeth->napi); + netif_start_queue(dev); + + device_set_wakeup_capable(&dev->dev, + qe_alive_during_sleep() || ugeth->phydev->irq); + device_set_wakeup_enable(&dev->dev, ugeth->wol_en); + + return err; + +err: + ucc_geth_stop(ugeth); + return err; +} + +/* Stops the kernel queue, and halts the controller */ +static int ucc_geth_close(struct net_device *dev) +{ + struct ucc_geth_private *ugeth = netdev_priv(dev); + + ugeth_vdbg("%s: IN", __func__); + + napi_disable(&ugeth->napi); + + cancel_work_sync(&ugeth->timeout_work); + ucc_geth_stop(ugeth); + phy_disconnect(ugeth->phydev); + ugeth->phydev = NULL; + + free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); + + netif_stop_queue(dev); + + return 0; +} + +/* Reopen device. This will reset the MAC and PHY. */ +static void ucc_geth_timeout_work(struct work_struct *work) +{ + struct ucc_geth_private *ugeth; + struct net_device *dev; + + ugeth = container_of(work, struct ucc_geth_private, timeout_work); + dev = ugeth->ndev; + + ugeth_vdbg("%s: IN", __func__); + + dev->stats.tx_errors++; + + ugeth_dump_regs(ugeth); + + if (dev->flags & IFF_UP) { + /* + * Must reset MAC *and* PHY. This is done by reopening + * the device. + */ + netif_tx_stop_all_queues(dev); + ucc_geth_stop(ugeth); + ucc_geth_init_mac(ugeth); + /* Must start PHY here */ + phy_start(ugeth->phydev); + netif_tx_start_all_queues(dev); + } + + netif_tx_schedule_all(dev); +} + +/* + * ucc_geth_timeout gets called when a packet has not been + * transmitted after a set amount of time. + */ +static void ucc_geth_timeout(struct net_device *dev) +{ + struct ucc_geth_private *ugeth = netdev_priv(dev); + + schedule_work(&ugeth->timeout_work); +} + + +#ifdef CONFIG_PM + +static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state) +{ + struct net_device *ndev = platform_get_drvdata(ofdev); + struct ucc_geth_private *ugeth = netdev_priv(ndev); + + if (!netif_running(ndev)) + return 0; + + netif_device_detach(ndev); + napi_disable(&ugeth->napi); + + /* + * Disable the controller, otherwise we'll wakeup on any network + * activity. + */ + ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); + + if (ugeth->wol_en & WAKE_MAGIC) { + setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD); + setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE); + ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX); + } else if (!(ugeth->wol_en & WAKE_PHY)) { + phy_stop(ugeth->phydev); + } + + return 0; +} + +static int ucc_geth_resume(struct platform_device *ofdev) +{ + struct net_device *ndev = platform_get_drvdata(ofdev); + struct ucc_geth_private *ugeth = netdev_priv(ndev); + int err; + + if (!netif_running(ndev)) + return 0; + + if (qe_alive_during_sleep()) { + if (ugeth->wol_en & WAKE_MAGIC) { + ucc_fast_disable(ugeth->uccf, COMM_DIR_RX_AND_TX); + clrbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE); + clrbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD); + } + ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); + } else { + /* + * Full reinitialization is required if QE shuts down + * during sleep. + */ + ucc_geth_memclean(ugeth); + + err = ucc_geth_init_mac(ugeth); + if (err) { + netdev_err(ndev, "Cannot initialize MAC, aborting\n"); + return err; + } + } + + ugeth->oldlink = 0; + ugeth->oldspeed = 0; + ugeth->oldduplex = -1; + + phy_stop(ugeth->phydev); + phy_start(ugeth->phydev); + + napi_enable(&ugeth->napi); + netif_device_attach(ndev); + + return 0; +} + +#else +#define ucc_geth_suspend NULL +#define ucc_geth_resume NULL +#endif + +static phy_interface_t to_phy_interface(const char *phy_connection_type) +{ + if (strcasecmp(phy_connection_type, "mii") == 0) + return PHY_INTERFACE_MODE_MII; + if (strcasecmp(phy_connection_type, "gmii") == 0) + return PHY_INTERFACE_MODE_GMII; + if (strcasecmp(phy_connection_type, "tbi") == 0) + return PHY_INTERFACE_MODE_TBI; + if (strcasecmp(phy_connection_type, "rmii") == 0) + return PHY_INTERFACE_MODE_RMII; + if (strcasecmp(phy_connection_type, "rgmii") == 0) + return PHY_INTERFACE_MODE_RGMII; + if (strcasecmp(phy_connection_type, "rgmii-id") == 0) + return PHY_INTERFACE_MODE_RGMII_ID; + if (strcasecmp(phy_connection_type, "rgmii-txid") == 0) + return PHY_INTERFACE_MODE_RGMII_TXID; + if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0) + return PHY_INTERFACE_MODE_RGMII_RXID; + if (strcasecmp(phy_connection_type, "rtbi") == 0) + return PHY_INTERFACE_MODE_RTBI; + if (strcasecmp(phy_connection_type, "sgmii") == 0) + return PHY_INTERFACE_MODE_SGMII; + + return PHY_INTERFACE_MODE_MII; +} + +static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct ucc_geth_private *ugeth = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + if (!ugeth->phydev) + return -ENODEV; + + return phy_mii_ioctl(ugeth->phydev, rq, cmd); +} + +static const struct net_device_ops ucc_geth_netdev_ops = { + .ndo_open = ucc_geth_open, + .ndo_stop = ucc_geth_close, + .ndo_start_xmit = ucc_geth_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ucc_geth_set_mac_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_rx_mode = ucc_geth_set_multi, + .ndo_tx_timeout = ucc_geth_timeout, + .ndo_do_ioctl = ucc_geth_ioctl, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ucc_netpoll, +#endif +}; + +static int ucc_geth_probe(struct platform_device* ofdev) +{ + struct device *device = &ofdev->dev; + struct device_node *np = ofdev->dev.of_node; + struct net_device *dev = NULL; + struct ucc_geth_private *ugeth = NULL; + struct ucc_geth_info *ug_info; + struct resource res; + int err, ucc_num, max_speed = 0; + const unsigned int *prop; + const char *sprop; + const void *mac_addr; + phy_interface_t phy_interface; + static const int enet_to_speed[] = { + SPEED_10, SPEED_10, SPEED_10, + SPEED_100, SPEED_100, SPEED_100, + SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000, + }; + static const phy_interface_t enet_to_phy_interface[] = { + PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII, + PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII, + PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII, + PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII, + PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI, + PHY_INTERFACE_MODE_SGMII, + }; + + ugeth_vdbg("%s: IN", __func__); + + prop = of_get_property(np, "cell-index", NULL); + if (!prop) { + prop = of_get_property(np, "device-id", NULL); + if (!prop) + return -ENODEV; + } + + ucc_num = *prop - 1; + if ((ucc_num < 0) || (ucc_num > 7)) + return -ENODEV; + + ug_info = &ugeth_info[ucc_num]; + if (ug_info == NULL) { + if (netif_msg_probe(&debug)) + pr_err("[%d] Missing additional data!\n", ucc_num); + return -ENODEV; + } + + ug_info->uf_info.ucc_num = ucc_num; + + sprop = of_get_property(np, "rx-clock-name", NULL); + if (sprop) { + ug_info->uf_info.rx_clock = qe_clock_source(sprop); + if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) || + (ug_info->uf_info.rx_clock > QE_CLK24)) { + pr_err("invalid rx-clock-name property\n"); + return -EINVAL; + } + } else { + prop = of_get_property(np, "rx-clock", NULL); + if (!prop) { + /* If both rx-clock-name and rx-clock are missing, + we want to tell people to use rx-clock-name. */ + pr_err("missing rx-clock-name property\n"); + return -EINVAL; + } + if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { + pr_err("invalid rx-clock propperty\n"); + return -EINVAL; + } + ug_info->uf_info.rx_clock = *prop; + } + + sprop = of_get_property(np, "tx-clock-name", NULL); + if (sprop) { + ug_info->uf_info.tx_clock = qe_clock_source(sprop); + if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) || + (ug_info->uf_info.tx_clock > QE_CLK24)) { + pr_err("invalid tx-clock-name property\n"); + return -EINVAL; + } + } else { + prop = of_get_property(np, "tx-clock", NULL); + if (!prop) { + pr_err("missing tx-clock-name property\n"); + return -EINVAL; + } + if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { + pr_err("invalid tx-clock property\n"); + return -EINVAL; + } + ug_info->uf_info.tx_clock = *prop; + } + + err = of_address_to_resource(np, 0, &res); + if (err) + return -EINVAL; + + ug_info->uf_info.regs = res.start; + ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); + + ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0); + if (!ug_info->phy_node && of_phy_is_fixed_link(np)) { + /* + * In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + err = of_phy_register_fixed_link(np); + if (err) + return err; + ug_info->phy_node = of_node_get(np); + } + + /* Find the TBI PHY node. If it's not there, we don't support SGMII */ + ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0); + + /* get the phy interface type, or default to MII */ + prop = of_get_property(np, "phy-connection-type", NULL); + if (!prop) { + /* handle interface property present in old trees */ + prop = of_get_property(ug_info->phy_node, "interface", NULL); + if (prop != NULL) { + phy_interface = enet_to_phy_interface[*prop]; + max_speed = enet_to_speed[*prop]; + } else + phy_interface = PHY_INTERFACE_MODE_MII; + } else { + phy_interface = to_phy_interface((const char *)prop); + } + + /* get speed, or derive from PHY interface */ + if (max_speed == 0) + switch (phy_interface) { + case PHY_INTERFACE_MODE_GMII: + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_TBI: + case PHY_INTERFACE_MODE_RTBI: + case PHY_INTERFACE_MODE_SGMII: + max_speed = SPEED_1000; + break; + default: + max_speed = SPEED_100; + break; + } + + if (max_speed == SPEED_1000) { + unsigned int snums = qe_get_num_of_snums(); + + /* configure muram FIFOs for gigabit operation */ + ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT; + ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT; + ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT; + ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT; + ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT; + ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT; + ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4; + + /* If QE's snum number is 46/76 which means we need to support + * 4 UECs at 1000Base-T simultaneously, we need to allocate + * more Threads to Rx. + */ + if ((snums == 76) || (snums == 46)) + ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6; + else + ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4; + } + + if (netif_msg_probe(&debug)) + pr_info("UCC%1d at 0x%8x (irq = %d)\n", + ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, + ug_info->uf_info.irq); + + /* Create an ethernet device instance */ + dev = alloc_etherdev(sizeof(*ugeth)); + + if (dev == NULL) { + of_node_put(ug_info->tbi_node); + of_node_put(ug_info->phy_node); + return -ENOMEM; + } + + ugeth = netdev_priv(dev); + spin_lock_init(&ugeth->lock); + + /* Create CQs for hash tables */ + INIT_LIST_HEAD(&ugeth->group_hash_q); + INIT_LIST_HEAD(&ugeth->ind_hash_q); + + dev_set_drvdata(device, dev); + + /* Set the dev->base_addr to the gfar reg region */ + dev->base_addr = (unsigned long)(ug_info->uf_info.regs); + + SET_NETDEV_DEV(dev, device); + + /* Fill in the dev structure */ + uec_set_ethtool_ops(dev); + dev->netdev_ops = &ucc_geth_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work); + netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64); + dev->mtu = 1500; + + ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT); + ugeth->phy_interface = phy_interface; + ugeth->max_speed = max_speed; + + /* Carrier starts down, phylib will bring it up */ + netif_carrier_off(dev); + + err = register_netdev(dev); + if (err) { + if (netif_msg_probe(ugeth)) + pr_err("%s: Cannot register net device, aborting\n", + dev->name); + free_netdev(dev); + of_node_put(ug_info->tbi_node); + of_node_put(ug_info->phy_node); + return err; + } + + mac_addr = of_get_mac_address(np); + if (mac_addr) + memcpy(dev->dev_addr, mac_addr, ETH_ALEN); + + ugeth->ug_info = ug_info; + ugeth->dev = device; + ugeth->ndev = dev; + ugeth->node = np; + + return 0; +} + +static int ucc_geth_remove(struct platform_device* ofdev) +{ + struct net_device *dev = platform_get_drvdata(ofdev); + struct ucc_geth_private *ugeth = netdev_priv(dev); + + unregister_netdev(dev); + free_netdev(dev); + ucc_geth_memclean(ugeth); + of_node_put(ugeth->ug_info->tbi_node); + of_node_put(ugeth->ug_info->phy_node); + + return 0; +} + +static const struct of_device_id ucc_geth_match[] = { + { + .type = "network", + .compatible = "ucc_geth", + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, ucc_geth_match); + +static struct platform_driver ucc_geth_driver = { + .driver = { + .name = DRV_NAME, + .of_match_table = ucc_geth_match, + }, + .probe = ucc_geth_probe, + .remove = ucc_geth_remove, + .suspend = ucc_geth_suspend, + .resume = ucc_geth_resume, +}; + +static int __init ucc_geth_init(void) +{ + int i, ret; + + if (netif_msg_drv(&debug)) + pr_info(DRV_DESC "\n"); + for (i = 0; i < 8; i++) + memcpy(&(ugeth_info[i]), &ugeth_primary_info, + sizeof(ugeth_primary_info)); + + ret = platform_driver_register(&ucc_geth_driver); + + return ret; +} + +static void __exit ucc_geth_exit(void) +{ + platform_driver_unregister(&ucc_geth_driver); +} + +module_init(ucc_geth_init); +module_exit(ucc_geth_exit); + +MODULE_AUTHOR("Freescale Semiconductor, Inc"); +MODULE_DESCRIPTION(DRV_DESC); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h new file mode 100644 index 000000000..75f337163 --- /dev/null +++ b/drivers/net/ethernet/freescale/ucc_geth.h @@ -0,0 +1,1238 @@ +/* + * Copyright (C) Freescale Semicondutor, Inc. 2006-2009. All rights reserved. + * + * Author: Shlomi Gridish + * + * Description: + * Internal header file for UCC Gigabit Ethernet unit routines. + * + * Changelog: + * Jun 28, 2006 Li Yang + * - Rearrange code and style fixes + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef __UCC_GETH_H__ +#define __UCC_GETH_H__ + +#include +#include +#include + +#include +#include + +#include +#include + +#define DRV_DESC "QE UCC Gigabit Ethernet Controller" +#define DRV_NAME "ucc_geth" +#define DRV_VERSION "1.1" + +#define NUM_TX_QUEUES 8 +#define NUM_RX_QUEUES 8 +#define NUM_BDS_IN_PREFETCHED_BDS 4 +#define TX_IP_OFFSET_ENTRY_MAX 8 +#define NUM_OF_PADDRS 4 +#define ENET_INIT_PARAM_MAX_ENTRIES_RX 9 +#define ENET_INIT_PARAM_MAX_ENTRIES_TX 8 + +struct ucc_geth { + struct ucc_fast uccf; + u8 res0[0x100 - sizeof(struct ucc_fast)]; + + u32 maccfg1; /* mac configuration reg. 1 */ + u32 maccfg2; /* mac configuration reg. 2 */ + u32 ipgifg; /* interframe gap reg. */ + u32 hafdup; /* half-duplex reg. */ + u8 res1[0x10]; + u8 miimng[0x18]; /* MII management structure moved to _mii.h */ + u32 ifctl; /* interface control reg */ + u32 ifstat; /* interface statux reg */ + u32 macstnaddr1; /* mac station address part 1 reg */ + u32 macstnaddr2; /* mac station address part 2 reg */ + u8 res2[0x8]; + u32 uempr; /* UCC Ethernet Mac parameter reg */ + u32 utbipar; /* UCC tbi address reg */ + u16 uescr; /* UCC Ethernet statistics control reg */ + u8 res3[0x180 - 0x15A]; + u32 tx64; /* Total number of frames (including bad + frames) transmitted that were exactly of the + minimal length (64 for un tagged, 68 for + tagged, or with length exactly equal to the + parameter MINLength */ + u32 tx127; /* Total number of frames (including bad + frames) transmitted that were between + MINLength (Including FCS length==4) and 127 + octets */ + u32 tx255; /* Total number of frames (including bad + frames) transmitted that were between 128 + (Including FCS length==4) and 255 octets */ + u32 rx64; /* Total number of frames received including + bad frames that were exactly of the mninimal + length (64 bytes) */ + u32 rx127; /* Total number of frames (including bad + frames) received that were between MINLength + (Including FCS length==4) and 127 octets */ + u32 rx255; /* Total number of frames (including bad + frames) received that were between 128 + (Including FCS length==4) and 255 octets */ + u32 txok; /* Total number of octets residing in frames + that where involved in successful + transmission */ + u16 txcf; /* Total number of PAUSE control frames + transmitted by this MAC */ + u8 res4[0x2]; + u32 tmca; /* Total number of frames that were transmitted + successfully with the group address bit set + that are not broadcast frames */ + u32 tbca; /* Total number of frames transmitted + successfully that had destination address + field equal to the broadcast address */ + u32 rxfok; /* Total number of frames received OK */ + u32 rxbok; /* Total number of octets received OK */ + u32 rbyt; /* Total number of octets received including + octets in bad frames. Must be implemented in + HW because it includes octets in frames that + never even reach the UCC */ + u32 rmca; /* Total number of frames that were received + successfully with the group address bit set + that are not broadcast frames */ + u32 rbca; /* Total number of frames received successfully + that had destination address equal to the + broadcast address */ + u32 scar; /* Statistics carry register */ + u32 scam; /* Statistics caryy mask register */ + u8 res5[0x200 - 0x1c4]; +} __packed; + +/* UCC GETH TEMODR Register */ +#define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics + */ +#define TEMODER_SCHEDULER_ENABLE 0x2000 /* enable scheduler */ +#define TEMODER_IP_CHECKSUM_GENERATE 0x0400 /* generate IPv4 + checksums */ +#define TEMODER_PERFORMANCE_OPTIMIZATION_MODE1 0x0200 /* enable performance + optimization + enhancement (mode1) */ +#define TEMODER_RMON_STATISTICS 0x0100 /* enable tx statistics + */ +#define TEMODER_NUM_OF_QUEUES_SHIFT (15-15) /* Number of queues << + shift */ + +/* UCC GETH TEMODR Register */ +#define REMODER_RX_RMON_STATISTICS_ENABLE 0x00001000 /* enable Rx + statistics */ +#define REMODER_RX_EXTENDED_FEATURES 0x80000000 /* enable + extended + features */ +#define REMODER_VLAN_OPERATION_TAGGED_SHIFT (31-9 ) /* vlan operation + tagged << shift */ +#define REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT (31-10) /* vlan operation non + tagged << shift */ +#define REMODER_RX_QOS_MODE_SHIFT (31-15) /* rx QoS mode << shift + */ +#define REMODER_RMON_STATISTICS 0x00001000 /* enable rx + statistics */ +#define REMODER_RX_EXTENDED_FILTERING 0x00000800 /* extended + filtering + vs. + mpc82xx-like + filtering */ +#define REMODER_NUM_OF_QUEUES_SHIFT (31-23) /* Number of queues << + shift */ +#define REMODER_DYNAMIC_MAX_FRAME_LENGTH 0x00000008 /* enable + dynamic max + frame length + */ +#define REMODER_DYNAMIC_MIN_FRAME_LENGTH 0x00000004 /* enable + dynamic min + frame length + */ +#define REMODER_IP_CHECKSUM_CHECK 0x00000002 /* check IPv4 + checksums */ +#define REMODER_IP_ADDRESS_ALIGNMENT 0x00000001 /* align ip + address to + 4-byte + boundary */ + +/* UCC GETH Event Register */ +#define UCCE_TXB (UCC_GETH_UCCE_TXB7 | UCC_GETH_UCCE_TXB6 | \ + UCC_GETH_UCCE_TXB5 | UCC_GETH_UCCE_TXB4 | \ + UCC_GETH_UCCE_TXB3 | UCC_GETH_UCCE_TXB2 | \ + UCC_GETH_UCCE_TXB1 | UCC_GETH_UCCE_TXB0) + +#define UCCE_RXB (UCC_GETH_UCCE_RXB7 | UCC_GETH_UCCE_RXB6 | \ + UCC_GETH_UCCE_RXB5 | UCC_GETH_UCCE_RXB4 | \ + UCC_GETH_UCCE_RXB3 | UCC_GETH_UCCE_RXB2 | \ + UCC_GETH_UCCE_RXB1 | UCC_GETH_UCCE_RXB0) + +#define UCCE_RXF (UCC_GETH_UCCE_RXF7 | UCC_GETH_UCCE_RXF6 | \ + UCC_GETH_UCCE_RXF5 | UCC_GETH_UCCE_RXF4 | \ + UCC_GETH_UCCE_RXF3 | UCC_GETH_UCCE_RXF2 | \ + UCC_GETH_UCCE_RXF1 | UCC_GETH_UCCE_RXF0) + +#define UCCE_OTHER (UCC_GETH_UCCE_SCAR | UCC_GETH_UCCE_GRA | \ + UCC_GETH_UCCE_CBPR | UCC_GETH_UCCE_BSY | \ + UCC_GETH_UCCE_RXC | UCC_GETH_UCCE_TXC | UCC_GETH_UCCE_TXE) + +#define UCCE_RX_EVENTS (UCCE_RXF | UCC_GETH_UCCE_BSY) +#define UCCE_TX_EVENTS (UCCE_TXB | UCC_GETH_UCCE_TXE) + +/* TBI defines */ +#define ENET_TBI_MII_CR 0x00 /* Control */ +#define ENET_TBI_MII_SR 0x01 /* Status */ +#define ENET_TBI_MII_ANA 0x04 /* AN advertisement */ +#define ENET_TBI_MII_ANLPBPA 0x05 /* AN link partner base page ability */ +#define ENET_TBI_MII_ANEX 0x06 /* AN expansion */ +#define ENET_TBI_MII_ANNPT 0x07 /* AN next page transmit */ +#define ENET_TBI_MII_ANLPANP 0x08 /* AN link partner ability next page */ +#define ENET_TBI_MII_EXST 0x0F /* Extended status */ +#define ENET_TBI_MII_JD 0x10 /* Jitter diagnostics */ +#define ENET_TBI_MII_TBICON 0x11 /* TBI control */ + +/* TBI MDIO register bit fields*/ +#define TBISR_LSTATUS 0x0004 +#define TBICON_CLK_SELECT 0x0020 +#define TBIANA_ASYMMETRIC_PAUSE 0x0100 +#define TBIANA_SYMMETRIC_PAUSE 0x0080 +#define TBIANA_HALF_DUPLEX 0x0040 +#define TBIANA_FULL_DUPLEX 0x0020 +#define TBICR_PHY_RESET 0x8000 +#define TBICR_ANEG_ENABLE 0x1000 +#define TBICR_RESTART_ANEG 0x0200 +#define TBICR_FULL_DUPLEX 0x0100 +#define TBICR_SPEED1_SET 0x0040 + +#define TBIANA_SETTINGS ( \ + TBIANA_ASYMMETRIC_PAUSE \ + | TBIANA_SYMMETRIC_PAUSE \ + | TBIANA_FULL_DUPLEX \ + ) +#define TBICR_SETTINGS ( \ + TBICR_PHY_RESET \ + | TBICR_ANEG_ENABLE \ + | TBICR_FULL_DUPLEX \ + | TBICR_SPEED1_SET \ + ) + +/* UCC GETH MACCFG1 (MAC Configuration 1 Register) */ +#define MACCFG1_FLOW_RX 0x00000020 /* Flow Control + Rx */ +#define MACCFG1_FLOW_TX 0x00000010 /* Flow Control + Tx */ +#define MACCFG1_ENABLE_SYNCHED_RX 0x00000008 /* Rx Enable + synchronized + to Rx stream + */ +#define MACCFG1_ENABLE_RX 0x00000004 /* Enable Rx */ +#define MACCFG1_ENABLE_SYNCHED_TX 0x00000002 /* Tx Enable + synchronized + to Tx stream + */ +#define MACCFG1_ENABLE_TX 0x00000001 /* Enable Tx */ + +/* UCC GETH MACCFG2 (MAC Configuration 2 Register) */ +#define MACCFG2_PREL_SHIFT (31 - 19) /* Preamble + Length << + shift */ +#define MACCFG2_PREL_MASK 0x0000f000 /* Preamble + Length mask */ +#define MACCFG2_SRP 0x00000080 /* Soft Receive + Preamble */ +#define MACCFG2_STP 0x00000040 /* Soft + Transmit + Preamble */ +#define MACCFG2_RESERVED_1 0x00000020 /* Reserved - + must be set + to 1 */ +#define MACCFG2_LC 0x00000010 /* Length Check + */ +#define MACCFG2_MPE 0x00000008 /* Magic packet + detect */ +#define MACCFG2_FDX 0x00000001 /* Full Duplex */ +#define MACCFG2_FDX_MASK 0x00000001 /* Full Duplex + mask */ +#define MACCFG2_PAD_CRC 0x00000004 +#define MACCFG2_CRC_EN 0x00000002 +#define MACCFG2_PAD_AND_CRC_MODE_NONE 0x00000000 /* Neither + Padding + short frames + nor CRC */ +#define MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY 0x00000002 /* Append CRC + only */ +#define MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC 0x00000004 +#define MACCFG2_INTERFACE_MODE_NIBBLE 0x00000100 /* nibble mode + (MII/RMII/RGMII + 10/100bps) */ +#define MACCFG2_INTERFACE_MODE_BYTE 0x00000200 /* byte mode + (GMII/TBI/RTB/RGMII + 1000bps ) */ +#define MACCFG2_INTERFACE_MODE_MASK 0x00000300 /* mask + covering all + relevant + bits */ + +/* UCC GETH IPGIFG (Inter-frame Gap / Inter-Frame Gap Register) */ +#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT (31 - 7) /* Non + back-to-back + inter frame + gap part 1. + << shift */ +#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT (31 - 15) /* Non + back-to-back + inter frame + gap part 2. + << shift */ +#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT (31 - 23) /* Mimimum IFG + Enforcement + << shift */ +#define IPGIFG_BACK_TO_BACK_IFG_SHIFT (31 - 31) /* back-to-back + inter frame + gap << shift + */ +#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX 127 /* Non back-to-back + inter frame gap part + 1. max val */ +#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX 127 /* Non back-to-back + inter frame gap part + 2. max val */ +#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX 255 /* Mimimum IFG + Enforcement max val */ +#define IPGIFG_BACK_TO_BACK_IFG_MAX 127 /* back-to-back inter + frame gap max val */ +#define IPGIFG_NBTB_CS_IPG_MASK 0x7F000000 +#define IPGIFG_NBTB_IPG_MASK 0x007F0000 +#define IPGIFG_MIN_IFG_MASK 0x0000FF00 +#define IPGIFG_BTB_IPG_MASK 0x0000007F + +/* UCC GETH HAFDUP (Half Duplex Register) */ +#define HALFDUP_ALT_BEB_TRUNCATION_SHIFT (31 - 11) /* Alternate + Binary + Exponential + Backoff + Truncation + << shift */ +#define HALFDUP_ALT_BEB_TRUNCATION_MAX 0xf /* Alternate Binary + Exponential Backoff + Truncation max val */ +#define HALFDUP_ALT_BEB 0x00080000 /* Alternate + Binary + Exponential + Backoff */ +#define HALFDUP_BACK_PRESSURE_NO_BACKOFF 0x00040000 /* Back + pressure no + backoff */ +#define HALFDUP_NO_BACKOFF 0x00020000 /* No Backoff */ +#define HALFDUP_EXCESSIVE_DEFER 0x00010000 /* Excessive + Defer */ +#define HALFDUP_MAX_RETRANSMISSION_SHIFT (31 - 19) /* Maximum + Retransmission + << shift */ +#define HALFDUP_MAX_RETRANSMISSION_MAX 0xf /* Maximum + Retransmission max + val */ +#define HALFDUP_COLLISION_WINDOW_SHIFT (31 - 31) /* Collision + Window << + shift */ +#define HALFDUP_COLLISION_WINDOW_MAX 0x3f /* Collision Window max + val */ +#define HALFDUP_ALT_BEB_TR_MASK 0x00F00000 +#define HALFDUP_RETRANS_MASK 0x0000F000 +#define HALFDUP_COL_WINDOW_MASK 0x0000003F + +/* UCC GETH UCCS (Ethernet Status Register) */ +#define UCCS_BPR 0x02 /* Back pressure (in + half duplex mode) */ +#define UCCS_PAU 0x02 /* Pause state (in full + duplex mode) */ +#define UCCS_MPD 0x01 /* Magic Packet + Detected */ + +/* UCC GETH IFSTAT (Interface Status Register) */ +#define IFSTAT_EXCESS_DEFER 0x00000200 /* Excessive + transmission + defer */ + +/* UCC GETH MACSTNADDR1 (Station Address Part 1 Register) */ +#define MACSTNADDR1_OCTET_6_SHIFT (31 - 7) /* Station + address 6th + octet << + shift */ +#define MACSTNADDR1_OCTET_5_SHIFT (31 - 15) /* Station + address 5th + octet << + shift */ +#define MACSTNADDR1_OCTET_4_SHIFT (31 - 23) /* Station + address 4th + octet << + shift */ +#define MACSTNADDR1_OCTET_3_SHIFT (31 - 31) /* Station + address 3rd + octet << + shift */ + +/* UCC GETH MACSTNADDR2 (Station Address Part 2 Register) */ +#define MACSTNADDR2_OCTET_2_SHIFT (31 - 7) /* Station + address 2nd + octet << + shift */ +#define MACSTNADDR2_OCTET_1_SHIFT (31 - 15) /* Station + address 1st + octet << + shift */ + +/* UCC GETH UEMPR (Ethernet Mac Parameter Register) */ +#define UEMPR_PAUSE_TIME_VALUE_SHIFT (31 - 15) /* Pause time + value << + shift */ +#define UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT (31 - 31) /* Extended + pause time + value << + shift */ + +/* UCC GETH UTBIPAR (Ten Bit Interface Physical Address Register) */ +#define UTBIPAR_PHY_ADDRESS_SHIFT (31 - 31) /* Phy address + << shift */ +#define UTBIPAR_PHY_ADDRESS_MASK 0x0000001f /* Phy address + mask */ + +/* UCC GETH UESCR (Ethernet Statistics Control Register) */ +#define UESCR_AUTOZ 0x8000 /* Automatically zero + addressed + statistical counter + values */ +#define UESCR_CLRCNT 0x4000 /* Clear all statistics + counters */ +#define UESCR_MAXCOV_SHIFT (15 - 7) /* Max + Coalescing + Value << + shift */ +#define UESCR_SCOV_SHIFT (15 - 15) /* Status + Coalescing + Value << + shift */ + +/* UCC GETH UDSR (Data Synchronization Register) */ +#define UDSR_MAGIC 0x067E + +struct ucc_geth_thread_data_tx { + u8 res0[104]; +} __packed; + +struct ucc_geth_thread_data_rx { + u8 res0[40]; +} __packed; + +/* Send Queue Queue-Descriptor */ +struct ucc_geth_send_queue_qd { + u32 bd_ring_base; /* pointer to BD ring base address */ + u8 res0[0x8]; + u32 last_bd_completed_address;/* initialize to last entry in BD ring */ + u8 res1[0x30]; +} __packed; + +struct ucc_geth_send_queue_mem_region { + struct ucc_geth_send_queue_qd sqqd[NUM_TX_QUEUES]; +} __packed; + +struct ucc_geth_thread_tx_pram { + u8 res0[64]; +} __packed; + +struct ucc_geth_thread_rx_pram { + u8 res0[128]; +} __packed; + +#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64 +#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64 +#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16 96 + +struct ucc_geth_scheduler { + u16 cpucount0; /* CPU packet counter */ + u16 cpucount1; /* CPU packet counter */ + u16 cecount0; /* QE packet counter */ + u16 cecount1; /* QE packet counter */ + u16 cpucount2; /* CPU packet counter */ + u16 cpucount3; /* CPU packet counter */ + u16 cecount2; /* QE packet counter */ + u16 cecount3; /* QE packet counter */ + u16 cpucount4; /* CPU packet counter */ + u16 cpucount5; /* CPU packet counter */ + u16 cecount4; /* QE packet counter */ + u16 cecount5; /* QE packet counter */ + u16 cpucount6; /* CPU packet counter */ + u16 cpucount7; /* CPU packet counter */ + u16 cecount6; /* QE packet counter */ + u16 cecount7; /* QE packet counter */ + u32 weightstatus[NUM_TX_QUEUES]; /* accumulated weight factor */ + u32 rtsrshadow; /* temporary variable handled by QE */ + u32 time; /* temporary variable handled by QE */ + u32 ttl; /* temporary variable handled by QE */ + u32 mblinterval; /* max burst length interval */ + u16 nortsrbytetime; /* normalized value of byte time in tsr units */ + u8 fracsiz; /* radix 2 log value of denom. of + NorTSRByteTime */ + u8 res0[1]; + u8 strictpriorityq; /* Strict Priority Mask register */ + u8 txasap; /* Transmit ASAP register */ + u8 extrabw; /* Extra BandWidth register */ + u8 oldwfqmask; /* temporary variable handled by QE */ + u8 weightfactor[NUM_TX_QUEUES]; + /**< weight factor for queues */ + u32 minw; /* temporary variable handled by QE */ + u8 res1[0x70 - 0x64]; +} __packed; + +struct ucc_geth_tx_firmware_statistics_pram { + u32 sicoltx; /* single collision */ + u32 mulcoltx; /* multiple collision */ + u32 latecoltxfr; /* late collision */ + u32 frabortduecol; /* frames aborted due to transmit collision */ + u32 frlostinmactxer; /* frames lost due to internal MAC error + transmission that are not counted on any + other counter */ + u32 carriersenseertx; /* carrier sense error */ + u32 frtxok; /* frames transmitted OK */ + u32 txfrexcessivedefer; /* frames with defferal time greater than + specified threshold */ + u32 txpkts256; /* total packets (including bad) between 256 + and 511 octets */ + u32 txpkts512; /* total packets (including bad) between 512 + and 1023 octets */ + u32 txpkts1024; /* total packets (including bad) between 1024 + and 1518 octets */ + u32 txpktsjumbo; /* total packets (including bad) between 1024 + and MAXLength octets */ +} __packed; + +struct ucc_geth_rx_firmware_statistics_pram { + u32 frrxfcser; /* frames with crc error */ + u32 fraligner; /* frames with alignment error */ + u32 inrangelenrxer; /* in range length error */ + u32 outrangelenrxer; /* out of range length error */ + u32 frtoolong; /* frame too long */ + u32 runt; /* runt */ + u32 verylongevent; /* very long event */ + u32 symbolerror; /* symbol error */ + u32 dropbsy; /* drop because of BD not ready */ + u8 res0[0x8]; + u32 mismatchdrop; /* drop because of MAC filtering (e.g. address + or type mismatch) */ + u32 underpkts; /* total frames less than 64 octets */ + u32 pkts256; /* total frames (including bad) between 256 and + 511 octets */ + u32 pkts512; /* total frames (including bad) between 512 and + 1023 octets */ + u32 pkts1024; /* total frames (including bad) between 1024 + and 1518 octets */ + u32 pktsjumbo; /* total frames (including bad) between 1024 + and MAXLength octets */ + u32 frlossinmacer; /* frames lost because of internal MAC error + that is not counted in any other counter */ + u32 pausefr; /* pause frames */ + u8 res1[0x4]; + u32 removevlan; /* total frames that had their VLAN tag removed + */ + u32 replacevlan; /* total frames that had their VLAN tag + replaced */ + u32 insertvlan; /* total frames that had their VLAN tag + inserted */ +} __packed; + +struct ucc_geth_rx_interrupt_coalescing_entry { + u32 interruptcoalescingmaxvalue; /* interrupt coalescing max + value */ + u32 interruptcoalescingcounter; /* interrupt coalescing counter, + initialize to + interruptcoalescingmaxvalue */ +} __packed; + +struct ucc_geth_rx_interrupt_coalescing_table { + struct ucc_geth_rx_interrupt_coalescing_entry coalescingentry[NUM_RX_QUEUES]; + /**< interrupt coalescing entry */ +} __packed; + +struct ucc_geth_rx_prefetched_bds { + struct qe_bd bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */ +} __packed; + +struct ucc_geth_rx_bd_queues_entry { + u32 bdbaseptr; /* BD base pointer */ + u32 bdptr; /* BD pointer */ + u32 externalbdbaseptr; /* external BD base pointer */ + u32 externalbdptr; /* external BD pointer */ +} __packed; + +struct ucc_geth_tx_global_pram { + u16 temoder; + u8 res0[0x38 - 0x02]; + u32 sqptr; /* a base pointer to send queue memory region */ + u32 schedulerbasepointer; /* a base pointer to scheduler memory + region */ + u32 txrmonbaseptr; /* base pointer to Tx RMON statistics counter */ + u32 tstate; /* tx internal state. High byte contains + function code */ + u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX]; + u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */ + u32 tqptr; /* a base pointer to the Tx Queues Memory + Region */ + u8 res2[0x80 - 0x74]; +} __packed; + +/* structure representing Extended Filtering Global Parameters in PRAM */ +struct ucc_geth_exf_global_pram { + u32 l2pcdptr; /* individual address filter, high */ + u8 res0[0x10 - 0x04]; +} __packed; + +struct ucc_geth_rx_global_pram { + u32 remoder; /* ethernet mode reg. */ + u32 rqptr; /* base pointer to the Rx Queues Memory Region*/ + u32 res0[0x1]; + u8 res1[0x20 - 0xC]; + u16 typeorlen; /* cutoff point less than which, type/len field + is considered length */ + u8 res2[0x1]; + u8 rxgstpack; /* acknowledgement on GRACEFUL STOP RX command*/ + u32 rxrmonbaseptr; /* base pointer to Rx RMON statistics counter */ + u8 res3[0x30 - 0x28]; + u32 intcoalescingptr; /* Interrupt coalescing table pointer */ + u8 res4[0x36 - 0x34]; + u8 rstate; /* rx internal state. High byte contains + function code */ + u8 res5[0x46 - 0x37]; + u16 mrblr; /* max receive buffer length reg. */ + u32 rbdqptr; /* base pointer to RxBD parameter table + description */ + u16 mflr; /* max frame length reg. */ + u16 minflr; /* min frame length reg. */ + u16 maxd1; /* max dma1 length reg. */ + u16 maxd2; /* max dma2 length reg. */ + u32 ecamptr; /* external CAM address */ + u32 l2qt; /* VLAN priority mapping table. */ + u32 l3qt[0x8]; /* IP priority mapping table. */ + u16 vlantype; /* vlan type */ + u16 vlantci; /* default vlan tci */ + u8 addressfiltering[64]; /* address filtering data structure */ + u32 exfGlobalParam; /* base address for extended filtering global + parameters */ + u8 res6[0x100 - 0xC4]; /* Initialize to zero */ +} __packed; + +#define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01 + +/* structure representing InitEnet command */ +struct ucc_geth_init_pram { + u8 resinit1; + u8 resinit2; + u8 resinit3; + u8 resinit4; + u16 resinit5; + u8 res1[0x1]; + u8 largestexternallookupkeysize; + u32 rgftgfrxglobal; + u32 rxthread[ENET_INIT_PARAM_MAX_ENTRIES_RX]; /* rx threads */ + u8 res2[0x38 - 0x30]; + u32 txglobal; /* tx global */ + u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */ + u8 res3[0x1]; +} __packed; + +#define ENET_INIT_PARAM_RGF_SHIFT (32 - 4) +#define ENET_INIT_PARAM_TGF_SHIFT (32 - 8) + +#define ENET_INIT_PARAM_RISC_MASK 0x0000003f +#define ENET_INIT_PARAM_PTR_MASK 0x00ffffc0 +#define ENET_INIT_PARAM_SNUM_MASK 0xff000000 +#define ENET_INIT_PARAM_SNUM_SHIFT 24 + +#define ENET_INIT_PARAM_MAGIC_RES_INIT1 0x06 +#define ENET_INIT_PARAM_MAGIC_RES_INIT2 0x30 +#define ENET_INIT_PARAM_MAGIC_RES_INIT3 0xff +#define ENET_INIT_PARAM_MAGIC_RES_INIT4 0x00 +#define ENET_INIT_PARAM_MAGIC_RES_INIT5 0x0400 + +/* structure representing 82xx Address Filtering Enet Address in PRAM */ +struct ucc_geth_82xx_enet_address { + u8 res1[0x2]; + u16 h; /* address (MSB) */ + u16 m; /* address */ + u16 l; /* address (LSB) */ +} __packed; + +/* structure representing 82xx Address Filtering PRAM */ +struct ucc_geth_82xx_address_filtering_pram { + u32 iaddr_h; /* individual address filter, high */ + u32 iaddr_l; /* individual address filter, low */ + u32 gaddr_h; /* group address filter, high */ + u32 gaddr_l; /* group address filter, low */ + struct ucc_geth_82xx_enet_address __iomem taddr; + struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS]; + u8 res0[0x40 - 0x38]; +} __packed; + +/* GETH Tx firmware statistics structure, used when calling + UCC_GETH_GetStatistics. */ +struct ucc_geth_tx_firmware_statistics { + u32 sicoltx; /* single collision */ + u32 mulcoltx; /* multiple collision */ + u32 latecoltxfr; /* late collision */ + u32 frabortduecol; /* frames aborted due to transmit collision */ + u32 frlostinmactxer; /* frames lost due to internal MAC error + transmission that are not counted on any + other counter */ + u32 carriersenseertx; /* carrier sense error */ + u32 frtxok; /* frames transmitted OK */ + u32 txfrexcessivedefer; /* frames with defferal time greater than + specified threshold */ + u32 txpkts256; /* total packets (including bad) between 256 + and 511 octets */ + u32 txpkts512; /* total packets (including bad) between 512 + and 1023 octets */ + u32 txpkts1024; /* total packets (including bad) between 1024 + and 1518 octets */ + u32 txpktsjumbo; /* total packets (including bad) between 1024 + and MAXLength octets */ +} __packed; + +/* GETH Rx firmware statistics structure, used when calling + UCC_GETH_GetStatistics. */ +struct ucc_geth_rx_firmware_statistics { + u32 frrxfcser; /* frames with crc error */ + u32 fraligner; /* frames with alignment error */ + u32 inrangelenrxer; /* in range length error */ + u32 outrangelenrxer; /* out of range length error */ + u32 frtoolong; /* frame too long */ + u32 runt; /* runt */ + u32 verylongevent; /* very long event */ + u32 symbolerror; /* symbol error */ + u32 dropbsy; /* drop because of BD not ready */ + u8 res0[0x8]; + u32 mismatchdrop; /* drop because of MAC filtering (e.g. address + or type mismatch) */ + u32 underpkts; /* total frames less than 64 octets */ + u32 pkts256; /* total frames (including bad) between 256 and + 511 octets */ + u32 pkts512; /* total frames (including bad) between 512 and + 1023 octets */ + u32 pkts1024; /* total frames (including bad) between 1024 + and 1518 octets */ + u32 pktsjumbo; /* total frames (including bad) between 1024 + and MAXLength octets */ + u32 frlossinmacer; /* frames lost because of internal MAC error + that is not counted in any other counter */ + u32 pausefr; /* pause frames */ + u8 res1[0x4]; + u32 removevlan; /* total frames that had their VLAN tag removed + */ + u32 replacevlan; /* total frames that had their VLAN tag + replaced */ + u32 insertvlan; /* total frames that had their VLAN tag + inserted */ +} __packed; + +/* GETH hardware statistics structure, used when calling + UCC_GETH_GetStatistics. */ +struct ucc_geth_hardware_statistics { + u32 tx64; /* Total number of frames (including bad + frames) transmitted that were exactly of the + minimal length (64 for un tagged, 68 for + tagged, or with length exactly equal to the + parameter MINLength */ + u32 tx127; /* Total number of frames (including bad + frames) transmitted that were between + MINLength (Including FCS length==4) and 127 + octets */ + u32 tx255; /* Total number of frames (including bad + frames) transmitted that were between 128 + (Including FCS length==4) and 255 octets */ + u32 rx64; /* Total number of frames received including + bad frames that were exactly of the mninimal + length (64 bytes) */ + u32 rx127; /* Total number of frames (including bad + frames) received that were between MINLength + (Including FCS length==4) and 127 octets */ + u32 rx255; /* Total number of frames (including bad + frames) received that were between 128 + (Including FCS length==4) and 255 octets */ + u32 txok; /* Total number of octets residing in frames + that where involved in successful + transmission */ + u16 txcf; /* Total number of PAUSE control frames + transmitted by this MAC */ + u32 tmca; /* Total number of frames that were transmitted + successfully with the group address bit set + that are not broadcast frames */ + u32 tbca; /* Total number of frames transmitted + successfully that had destination address + field equal to the broadcast address */ + u32 rxfok; /* Total number of frames received OK */ + u32 rxbok; /* Total number of octets received OK */ + u32 rbyt; /* Total number of octets received including + octets in bad frames. Must be implemented in + HW because it includes octets in frames that + never even reach the UCC */ + u32 rmca; /* Total number of frames that were received + successfully with the group address bit set + that are not broadcast frames */ + u32 rbca; /* Total number of frames received successfully + that had destination address equal to the + broadcast address */ +} __packed; + +/* UCC GETH Tx errors returned via TxConf callback */ +#define TX_ERRORS_DEF 0x0200 +#define TX_ERRORS_EXDEF 0x0100 +#define TX_ERRORS_LC 0x0080 +#define TX_ERRORS_RL 0x0040 +#define TX_ERRORS_RC_MASK 0x003C +#define TX_ERRORS_RC_SHIFT 2 +#define TX_ERRORS_UN 0x0002 +#define TX_ERRORS_CSL 0x0001 + +/* UCC GETH Rx errors returned via RxStore callback */ +#define RX_ERRORS_CMR 0x0200 +#define RX_ERRORS_M 0x0100 +#define RX_ERRORS_BC 0x0080 +#define RX_ERRORS_MC 0x0040 + +/* Transmit BD. These are in addition to values defined in uccf. */ +#define T_VID 0x003c0000 /* insert VLAN id index mask. */ +#define T_DEF (((u32) TX_ERRORS_DEF ) << 16) +#define T_EXDEF (((u32) TX_ERRORS_EXDEF ) << 16) +#define T_LC (((u32) TX_ERRORS_LC ) << 16) +#define T_RL (((u32) TX_ERRORS_RL ) << 16) +#define T_RC_MASK (((u32) TX_ERRORS_RC_MASK ) << 16) +#define T_UN (((u32) TX_ERRORS_UN ) << 16) +#define T_CSL (((u32) TX_ERRORS_CSL ) << 16) +#define T_ERRORS_REPORT (T_DEF | T_EXDEF | T_LC | T_RL | T_RC_MASK \ + | T_UN | T_CSL) /* transmit errors to report */ + +/* Receive BD. These are in addition to values defined in uccf. */ +#define R_LG 0x00200000 /* Frame length violation. */ +#define R_NO 0x00100000 /* Non-octet aligned frame. */ +#define R_SH 0x00080000 /* Short frame. */ +#define R_CR 0x00040000 /* CRC error. */ +#define R_OV 0x00020000 /* Overrun. */ +#define R_IPCH 0x00010000 /* IP checksum check failed. */ +#define R_CMR (((u32) RX_ERRORS_CMR ) << 16) +#define R_M (((u32) RX_ERRORS_M ) << 16) +#define R_BC (((u32) RX_ERRORS_BC ) << 16) +#define R_MC (((u32) RX_ERRORS_MC ) << 16) +#define R_ERRORS_REPORT (R_CMR | R_M | R_BC | R_MC) /* receive errors to + report */ +#define R_ERRORS_FATAL (R_LG | R_NO | R_SH | R_CR | \ + R_OV | R_IPCH) /* receive errors to discard */ + +/* Alignments */ +#define UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT 256 +#define UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT 128 +#define UCC_GETH_THREAD_RX_PRAM_ALIGNMENT 128 +#define UCC_GETH_THREAD_TX_PRAM_ALIGNMENT 64 +#define UCC_GETH_THREAD_DATA_ALIGNMENT 256 /* spec gives values + based on num of + threads, but always + using the maximum is + easier */ +#define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32 +#define UCC_GETH_SCHEDULER_ALIGNMENT 8 /* This is a guess */ +#define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */ +#define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */ +#define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 64 +#define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */ +#define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */ +#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 8 /* This + is a + guess + */ +#define UCC_GETH_RX_BD_RING_ALIGNMENT 32 +#define UCC_GETH_TX_BD_RING_ALIGNMENT 32 +#define UCC_GETH_MRBLR_ALIGNMENT 128 +#define UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT 4 +#define UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT 32 +#define UCC_GETH_RX_DATA_BUF_ALIGNMENT 64 + +#define UCC_GETH_TAD_EF 0x80 +#define UCC_GETH_TAD_V 0x40 +#define UCC_GETH_TAD_REJ 0x20 +#define UCC_GETH_TAD_VTAG_OP_RIGHT_SHIFT 2 +#define UCC_GETH_TAD_VTAG_OP_SHIFT 6 +#define UCC_GETH_TAD_V_NON_VTAG_OP 0x20 +#define UCC_GETH_TAD_RQOS_SHIFT 0 +#define UCC_GETH_TAD_V_PRIORITY_SHIFT 5 +#define UCC_GETH_TAD_CFI 0x10 + +#define UCC_GETH_VLAN_PRIORITY_MAX 8 +#define UCC_GETH_IP_PRIORITY_MAX 64 +#define UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX 8 +#define UCC_GETH_RX_BD_RING_SIZE_MIN 8 +#define UCC_GETH_TX_BD_RING_SIZE_MIN 2 +#define UCC_GETH_BD_RING_SIZE_MAX 0xffff + +#define UCC_GETH_SIZE_OF_BD QE_SIZEOF_BD + +/* Driver definitions */ +#define TX_BD_RING_LEN 0x10 +#define RX_BD_RING_LEN 0x20 + +#define TX_RING_MOD_MASK(size) (size-1) +#define RX_RING_MOD_MASK(size) (size-1) + +#define ENET_GROUP_ADDR 0x01 /* Group address mask + for ethernet + addresses */ + +#define TX_TIMEOUT (1*HZ) +#define SKB_ALLOC_TIMEOUT 100000 +#define PHY_INIT_TIMEOUT 100000 +#define PHY_CHANGE_TIME 2 + +/* Fast Ethernet (10/100 Mbps) */ +#define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size + */ +#define UCC_GETH_URFET_INIT 256 /* 1/2 urfs */ +#define UCC_GETH_URFSET_INIT 384 /* 3/4 urfs */ +#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size + */ +#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */ +#define UCC_GETH_UTFTT_INIT 256 /* 1/2 utfs + due to errata */ +/* Gigabit Ethernet (1000 Mbps) */ +#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual + FIFO size */ +#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */ +#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */ +#define UCC_GETH_UTFS_GIGA_INIT 4096/*2048*/ /* Tx virtual + FIFO size */ +#define UCC_GETH_UTFET_GIGA_INIT 2048/*1024*/ /* 1/2 utfs */ +#define UCC_GETH_UTFTT_GIGA_INIT 4096/*0x40*/ /* Tx virtual + FIFO size */ + +#define UCC_GETH_REMODER_INIT 0 /* bits that must be + set */ +#define UCC_GETH_TEMODER_INIT 0xC000 /* bits that must */ + +/* Initial value for UPSMR */ +#define UCC_GETH_UPSMR_INIT UCC_GETH_UPSMR_RES1 + +#define UCC_GETH_MACCFG1_INIT 0 +#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1) + +/* Ethernet Address Type. */ +enum enet_addr_type { + ENET_ADDR_TYPE_INDIVIDUAL, + ENET_ADDR_TYPE_GROUP, + ENET_ADDR_TYPE_BROADCAST +}; + +/* UCC GETH 82xx Ethernet Address Recognition Location */ +enum ucc_geth_enet_address_recognition_location { + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_STATION_ADDRESS,/* station + address */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_FIRST, /* additional + station + address + paddr1 */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR2, /* additional + station + address + paddr2 */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR3, /* additional + station + address + paddr3 */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_LAST, /* additional + station + address + paddr4 */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH, /* group hash */ + UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH /* individual + hash */ +}; + +/* UCC GETH vlan operation tagged */ +enum ucc_geth_vlan_operation_tagged { + UCC_GETH_VLAN_OPERATION_TAGGED_NOP = 0x0, /* Tagged - nop */ + UCC_GETH_VLAN_OPERATION_TAGGED_REPLACE_VID_PORTION_OF_Q_TAG + = 0x1, /* Tagged - replace vid portion of q tag */ + UCC_GETH_VLAN_OPERATION_TAGGED_IF_VID0_REPLACE_VID_WITH_DEFAULT_VALUE + = 0x2, /* Tagged - if vid0 replace vid with default value */ + UCC_GETH_VLAN_OPERATION_TAGGED_EXTRACT_Q_TAG_FROM_FRAME + = 0x3 /* Tagged - extract q tag from frame */ +}; + +/* UCC GETH vlan operation non-tagged */ +enum ucc_geth_vlan_operation_non_tagged { + UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP = 0x0, /* Non tagged - nop */ + UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT = 0x1 /* Non tagged - + q tag insert + */ +}; + +/* UCC GETH Rx Quality of Service Mode */ +enum ucc_geth_qos_mode { + UCC_GETH_QOS_MODE_DEFAULT = 0x0, /* default queue */ + UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L2_CRITERIA = 0x1, /* queue + determined + by L2 + criteria */ + UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L3_CRITERIA = 0x2 /* queue + determined + by L3 + criteria */ +}; + +/* UCC GETH Statistics Gathering Mode - These are bit flags, 'or' them together + for combined functionality */ +enum ucc_geth_statistics_gathering_mode { + UCC_GETH_STATISTICS_GATHERING_MODE_NONE = 0x00000000, /* No + statistics + gathering */ + UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE = 0x00000001,/* Enable + hardware + statistics + gathering + */ + UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX = 0x00000004,/*Enable + firmware + tx + statistics + gathering + */ + UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX = 0x00000008/* Enable + firmware + rx + statistics + gathering + */ +}; + +/* UCC GETH Pad and CRC Mode - Note, Padding without CRC is not possible */ +enum ucc_geth_maccfg2_pad_and_crc_mode { + UCC_GETH_PAD_AND_CRC_MODE_NONE + = MACCFG2_PAD_AND_CRC_MODE_NONE, /* Neither Padding + short frames + nor CRC */ + UCC_GETH_PAD_AND_CRC_MODE_CRC_ONLY + = MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY, /* Append + CRC only */ + UCC_GETH_PAD_AND_CRC_MODE_PAD_AND_CRC = + MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC +}; + +/* UCC GETH upsmr Flow Control Mode */ +enum ucc_geth_flow_control_mode { + UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE = 0x00000000, /* No automatic + flow control + */ + UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_PAUSE_WHEN_EMERGENCY + = 0x00004000 /* Send pause frame when RxFIFO reaches its + emergency threshold */ +}; + +/* UCC GETH number of threads */ +enum ucc_geth_num_of_threads { + UCC_GETH_NUM_OF_THREADS_1 = 0x1, /* 1 */ + UCC_GETH_NUM_OF_THREADS_2 = 0x2, /* 2 */ + UCC_GETH_NUM_OF_THREADS_4 = 0x0, /* 4 */ + UCC_GETH_NUM_OF_THREADS_6 = 0x3, /* 6 */ + UCC_GETH_NUM_OF_THREADS_8 = 0x4 /* 8 */ +}; + +/* UCC GETH number of station addresses */ +enum ucc_geth_num_of_station_addresses { + UCC_GETH_NUM_OF_STATION_ADDRESSES_1, /* 1 */ + UCC_GETH_NUM_OF_STATION_ADDRESSES_5 /* 5 */ +}; + +/* UCC GETH 82xx Ethernet Address Container */ +struct enet_addr_container { + u8 address[ETH_ALEN]; /* ethernet address */ + enum ucc_geth_enet_address_recognition_location location; /* location in + 82xx address + recognition + hardware */ + struct list_head node; +}; + +#define ENET_ADDR_CONT_ENTRY(ptr) list_entry(ptr, struct enet_addr_container, node) + +/* UCC GETH Termination Action Descriptor (TAD) structure. */ +struct ucc_geth_tad_params { + int rx_non_dynamic_extended_features_mode; + int reject_frame; + enum ucc_geth_vlan_operation_tagged vtag_op; + enum ucc_geth_vlan_operation_non_tagged vnontag_op; + enum ucc_geth_qos_mode rqos; + u8 vpri; + u16 vid; +}; + +/* GETH protocol initialization structure */ +struct ucc_geth_info { + struct ucc_fast_info uf_info; + u8 numQueuesTx; + u8 numQueuesRx; + int ipCheckSumCheck; + int ipCheckSumGenerate; + int rxExtendedFiltering; + u32 extendedFilteringChainPointer; + u16 typeorlen; + int dynamicMaxFrameLength; + int dynamicMinFrameLength; + u8 nonBackToBackIfgPart1; + u8 nonBackToBackIfgPart2; + u8 miminumInterFrameGapEnforcement; + u8 backToBackInterFrameGap; + int ipAddressAlignment; + int lengthCheckRx; + u32 mblinterval; + u16 nortsrbytetime; + u8 fracsiz; + u8 strictpriorityq; + u8 txasap; + u8 extrabw; + int miiPreambleSupress; + u8 altBebTruncation; + int altBeb; + int backPressureNoBackoff; + int noBackoff; + int excessDefer; + u8 maxRetransmission; + u8 collisionWindow; + int pro; + int cap; + int rsh; + int rlpb; + int cam; + int bro; + int ecm; + int receiveFlowControl; + int transmitFlowControl; + u8 maxGroupAddrInHash; + u8 maxIndAddrInHash; + u8 prel; + u16 maxFrameLength; + u16 minFrameLength; + u16 maxD1Length; + u16 maxD2Length; + u16 vlantype; + u16 vlantci; + u32 ecamptr; + u32 eventRegMask; + u16 pausePeriod; + u16 extensionField; + struct device_node *phy_node; + struct device_node *tbi_node; + u8 weightfactor[NUM_TX_QUEUES]; + u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES]; + u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX]; + u8 l3qt[UCC_GETH_IP_PRIORITY_MAX]; + u32 vtagtable[UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX]; + u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX]; + u16 bdRingLenTx[NUM_TX_QUEUES]; + u16 bdRingLenRx[NUM_RX_QUEUES]; + enum ucc_geth_num_of_station_addresses numStationAddresses; + enum qe_fltr_largest_external_tbl_lookup_key_size + largestexternallookupkeysize; + enum ucc_geth_statistics_gathering_mode statisticsMode; + enum ucc_geth_vlan_operation_tagged vlanOperationTagged; + enum ucc_geth_vlan_operation_non_tagged vlanOperationNonTagged; + enum ucc_geth_qos_mode rxQoSMode; + enum ucc_geth_flow_control_mode aufc; + enum ucc_geth_maccfg2_pad_and_crc_mode padAndCrc; + enum ucc_geth_num_of_threads numThreadsTx; + enum ucc_geth_num_of_threads numThreadsRx; + unsigned int riscTx; + unsigned int riscRx; +}; + +/* structure representing UCC GETH */ +struct ucc_geth_private { + struct ucc_geth_info *ug_info; + struct ucc_fast_private *uccf; + struct device *dev; + struct net_device *ndev; + struct napi_struct napi; + struct work_struct timeout_work; + struct ucc_geth __iomem *ug_regs; + struct ucc_geth_init_pram *p_init_enet_param_shadow; + struct ucc_geth_exf_global_pram __iomem *p_exf_glbl_param; + u32 exf_glbl_param_offset; + struct ucc_geth_rx_global_pram __iomem *p_rx_glbl_pram; + u32 rx_glbl_pram_offset; + struct ucc_geth_tx_global_pram __iomem *p_tx_glbl_pram; + u32 tx_glbl_pram_offset; + struct ucc_geth_send_queue_mem_region __iomem *p_send_q_mem_reg; + u32 send_q_mem_reg_offset; + struct ucc_geth_thread_data_tx __iomem *p_thread_data_tx; + u32 thread_dat_tx_offset; + struct ucc_geth_thread_data_rx __iomem *p_thread_data_rx; + u32 thread_dat_rx_offset; + struct ucc_geth_scheduler __iomem *p_scheduler; + u32 scheduler_offset; + struct ucc_geth_tx_firmware_statistics_pram __iomem *p_tx_fw_statistics_pram; + u32 tx_fw_statistics_pram_offset; + struct ucc_geth_rx_firmware_statistics_pram __iomem *p_rx_fw_statistics_pram; + u32 rx_fw_statistics_pram_offset; + struct ucc_geth_rx_interrupt_coalescing_table __iomem *p_rx_irq_coalescing_tbl; + u32 rx_irq_coalescing_tbl_offset; + struct ucc_geth_rx_bd_queues_entry __iomem *p_rx_bd_qs_tbl; + u32 rx_bd_qs_tbl_offset; + u8 __iomem *p_tx_bd_ring[NUM_TX_QUEUES]; + u32 tx_bd_ring_offset[NUM_TX_QUEUES]; + u8 __iomem *p_rx_bd_ring[NUM_RX_QUEUES]; + u32 rx_bd_ring_offset[NUM_RX_QUEUES]; + u8 __iomem *confBd[NUM_TX_QUEUES]; + u8 __iomem *txBd[NUM_TX_QUEUES]; + u8 __iomem *rxBd[NUM_RX_QUEUES]; + int badFrame[NUM_RX_QUEUES]; + u16 cpucount[NUM_TX_QUEUES]; + u16 __iomem *p_cpucount[NUM_TX_QUEUES]; + int indAddrRegUsed[NUM_OF_PADDRS]; + u8 paddr[NUM_OF_PADDRS][ETH_ALEN]; /* ethernet address */ + u8 numGroupAddrInHash; + u8 numIndAddrInHash; + u8 numIndAddrInReg; + int rx_extended_features; + int rx_non_dynamic_extended_features; + struct list_head conf_skbs; + struct list_head group_hash_q; + struct list_head ind_hash_q; + u32 saved_uccm; + spinlock_t lock; + /* pointers to arrays of skbuffs for tx and rx */ + struct sk_buff **tx_skbuff[NUM_TX_QUEUES]; + struct sk_buff **rx_skbuff[NUM_RX_QUEUES]; + /* indices pointing to the next free sbk in skb arrays */ + u16 skb_curtx[NUM_TX_QUEUES]; + u16 skb_currx[NUM_RX_QUEUES]; + /* index of the first skb which hasn't been transmitted yet. */ + u16 skb_dirtytx[NUM_TX_QUEUES]; + + struct ugeth_mii_info *mii_info; + struct phy_device *phydev; + phy_interface_t phy_interface; + int max_speed; + uint32_t msg_enable; + int oldspeed; + int oldduplex; + int oldlink; + int wol_en; + + struct device_node *node; +}; + +void uec_set_ethtool_ops(struct net_device *netdev); +int init_flow_control_params(u32 automatic_flow_control_mode, + int rx_flow_control_enable, int tx_flow_control_enable, + u16 pause_period, u16 extension_field, + u32 __iomem *upsmr_register, u32 __iomem *uempr_register, + u32 __iomem *maccfg1_register); + + +#endif /* __UCC_GETH_H__ */ diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c new file mode 100644 index 000000000..cc83350d5 --- /dev/null +++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c @@ -0,0 +1,421 @@ +/* + * Copyright (c) 2007 Freescale Semiconductor, Inc. All rights reserved. + * + * Description: QE UCC Gigabit Ethernet Ethtool API Set + * + * Author: Li Yang + * + * Limitation: + * Can only get/set settings of the first queue. + * Need to re-open the interface manually after changing some parameters. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "ucc_geth.h" + +static const char hw_stat_gstrings[][ETH_GSTRING_LEN] = { + "tx-64-frames", + "tx-65-127-frames", + "tx-128-255-frames", + "rx-64-frames", + "rx-65-127-frames", + "rx-128-255-frames", + "tx-bytes-ok", + "tx-pause-frames", + "tx-multicast-frames", + "tx-broadcast-frames", + "rx-frames", + "rx-bytes-ok", + "rx-bytes-all", + "rx-multicast-frames", + "rx-broadcast-frames", + "stats-counter-carry", + "stats-counter-mask", + "rx-dropped-frames", +}; + +static const char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { + "tx-single-collision", + "tx-multiple-collision", + "tx-late-collsion", + "tx-aborted-frames", + "tx-lost-frames", + "tx-carrier-sense-errors", + "tx-frames-ok", + "tx-excessive-differ-frames", + "tx-256-511-frames", + "tx-512-1023-frames", + "tx-1024-1518-frames", + "tx-jumbo-frames", +}; + +static const char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { + "rx-crc-errors", + "rx-alignment-errors", + "rx-in-range-length-errors", + "rx-out-of-range-length-errors", + "rx-too-long-frames", + "rx-runt", + "rx-very-long-event", + "rx-symbol-errors", + "rx-busy-drop-frames", + "reserved", + "reserved", + "rx-mismatch-drop-frames", + "rx-small-than-64", + "rx-256-511-frames", + "rx-512-1023-frames", + "rx-1024-1518-frames", + "rx-jumbo-frames", + "rx-mac-error-loss", + "rx-pause-frames", + "reserved", + "rx-vlan-removed", + "rx-vlan-replaced", + "rx-vlan-inserted", + "rx-ip-checksum-errors", +}; + +#define UEC_HW_STATS_LEN ARRAY_SIZE(hw_stat_gstrings) +#define UEC_TX_FW_STATS_LEN ARRAY_SIZE(tx_fw_stat_gstrings) +#define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings) + +static int +uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + struct phy_device *phydev = ugeth->phydev; + struct ucc_geth_info *ug_info = ugeth->ug_info; + + if (!phydev) + return -ENODEV; + + ecmd->maxtxpkt = 1; + ecmd->maxrxpkt = ug_info->interruptcoalescingmaxvalue[0]; + + return phy_ethtool_gset(phydev, ecmd); +} + +static int +uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + struct phy_device *phydev = ugeth->phydev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_sset(phydev, ecmd); +} + +static void +uec_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + + pause->autoneg = ugeth->phydev->autoneg; + + if (ugeth->ug_info->receiveFlowControl) + pause->rx_pause = 1; + if (ugeth->ug_info->transmitFlowControl) + pause->tx_pause = 1; +} + +static int +uec_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + int ret = 0; + + ugeth->ug_info->receiveFlowControl = pause->rx_pause; + ugeth->ug_info->transmitFlowControl = pause->tx_pause; + + if (ugeth->phydev->autoneg) { + if (netif_running(netdev)) { + /* FIXME: automatically restart */ + netdev_info(netdev, "Please re-open the interface\n"); + } + } else { + struct ucc_geth_info *ug_info = ugeth->ug_info; + + ret = init_flow_control_params(ug_info->aufc, + ug_info->receiveFlowControl, + ug_info->transmitFlowControl, + ug_info->pausePeriod, + ug_info->extensionField, + &ugeth->uccf->uf_regs->upsmr, + &ugeth->ug_regs->uempr, + &ugeth->ug_regs->maccfg1); + } + + return ret; +} + +static uint32_t +uec_get_msglevel(struct net_device *netdev) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + return ugeth->msg_enable; +} + +static void +uec_set_msglevel(struct net_device *netdev, uint32_t data) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + ugeth->msg_enable = data; +} + +static int +uec_get_regs_len(struct net_device *netdev) +{ + return sizeof(struct ucc_geth); +} + +static void +uec_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + int i; + struct ucc_geth_private *ugeth = netdev_priv(netdev); + u32 __iomem *ug_regs = (u32 __iomem *)ugeth->ug_regs; + u32 *buff = p; + + for (i = 0; i < sizeof(struct ucc_geth) / sizeof(u32); i++) + buff[i] = in_be32(&ug_regs[i]); +} + +static void +uec_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + struct ucc_geth_info *ug_info = ugeth->ug_info; + int queue = 0; + + ring->rx_max_pending = UCC_GETH_BD_RING_SIZE_MAX; + ring->rx_mini_max_pending = UCC_GETH_BD_RING_SIZE_MAX; + ring->rx_jumbo_max_pending = UCC_GETH_BD_RING_SIZE_MAX; + ring->tx_max_pending = UCC_GETH_BD_RING_SIZE_MAX; + + ring->rx_pending = ug_info->bdRingLenRx[queue]; + ring->rx_mini_pending = ug_info->bdRingLenRx[queue]; + ring->rx_jumbo_pending = ug_info->bdRingLenRx[queue]; + ring->tx_pending = ug_info->bdRingLenTx[queue]; +} + +static int +uec_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + struct ucc_geth_info *ug_info = ugeth->ug_info; + int queue = 0, ret = 0; + + if (ring->rx_pending < UCC_GETH_RX_BD_RING_SIZE_MIN) { + netdev_info(netdev, "RxBD ring size must be no smaller than %d\n", + UCC_GETH_RX_BD_RING_SIZE_MIN); + return -EINVAL; + } + if (ring->rx_pending % UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT) { + netdev_info(netdev, "RxBD ring size must be multiple of %d\n", + UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT); + return -EINVAL; + } + if (ring->tx_pending < UCC_GETH_TX_BD_RING_SIZE_MIN) { + netdev_info(netdev, "TxBD ring size must be no smaller than %d\n", + UCC_GETH_TX_BD_RING_SIZE_MIN); + return -EINVAL; + } + + ug_info->bdRingLenRx[queue] = ring->rx_pending; + ug_info->bdRingLenTx[queue] = ring->tx_pending; + + if (netif_running(netdev)) { + /* FIXME: restart automatically */ + netdev_info(netdev, "Please re-open the interface\n"); + } + + return ret; +} + +static int uec_get_sset_count(struct net_device *netdev, int sset) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + u32 stats_mode = ugeth->ug_info->statisticsMode; + int len = 0; + + switch (sset) { + case ETH_SS_STATS: + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) + len += UEC_HW_STATS_LEN; + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) + len += UEC_TX_FW_STATS_LEN; + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) + len += UEC_RX_FW_STATS_LEN; + + return len; + + default: + return -EOPNOTSUPP; + } +} + +static void uec_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + u32 stats_mode = ugeth->ug_info->statisticsMode; + + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) { + memcpy(buf, hw_stat_gstrings, UEC_HW_STATS_LEN * + ETH_GSTRING_LEN); + buf += UEC_HW_STATS_LEN * ETH_GSTRING_LEN; + } + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { + memcpy(buf, tx_fw_stat_gstrings, UEC_TX_FW_STATS_LEN * + ETH_GSTRING_LEN); + buf += UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN; + } + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) + memcpy(buf, rx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN * + ETH_GSTRING_LEN); +} + +static void uec_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, uint64_t *data) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + u32 stats_mode = ugeth->ug_info->statisticsMode; + u32 __iomem *base; + int i, j = 0; + + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) { + if (ugeth->ug_regs) + base = (u32 __iomem *)&ugeth->ug_regs->tx64; + else + base = NULL; + + for (i = 0; i < UEC_HW_STATS_LEN; i++) + data[j++] = base ? in_be32(&base[i]) : 0; + } + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { + base = (u32 __iomem *)ugeth->p_tx_fw_statistics_pram; + for (i = 0; i < UEC_TX_FW_STATS_LEN; i++) + data[j++] = base ? in_be32(&base[i]) : 0; + } + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { + base = (u32 __iomem *)ugeth->p_rx_fw_statistics_pram; + for (i = 0; i < UEC_RX_FW_STATS_LEN; i++) + data[j++] = base ? in_be32(&base[i]) : 0; + } +} + +static int uec_nway_reset(struct net_device *netdev) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + + return phy_start_aneg(ugeth->phydev); +} + +/* Report driver information */ +static void +uec_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info)); + drvinfo->eedump_len = 0; + drvinfo->regdump_len = uec_get_regs_len(netdev); +} + +#ifdef CONFIG_PM + +static void uec_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + struct phy_device *phydev = ugeth->phydev; + + if (phydev && phydev->irq) + wol->supported |= WAKE_PHY; + if (qe_alive_during_sleep()) + wol->supported |= WAKE_MAGIC; + + wol->wolopts = ugeth->wol_en; +} + +static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct ucc_geth_private *ugeth = netdev_priv(netdev); + struct phy_device *phydev = ugeth->phydev; + + if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) + return -EINVAL; + else if (wol->wolopts & WAKE_PHY && (!phydev || !phydev->irq)) + return -EINVAL; + else if (wol->wolopts & WAKE_MAGIC && !qe_alive_during_sleep()) + return -EINVAL; + + ugeth->wol_en = wol->wolopts; + device_set_wakeup_enable(&netdev->dev, ugeth->wol_en); + + return 0; +} + +#else +#define uec_get_wol NULL +#define uec_set_wol NULL +#endif /* CONFIG_PM */ + +static const struct ethtool_ops uec_ethtool_ops = { + .get_settings = uec_get_settings, + .set_settings = uec_set_settings, + .get_drvinfo = uec_get_drvinfo, + .get_regs_len = uec_get_regs_len, + .get_regs = uec_get_regs, + .get_msglevel = uec_get_msglevel, + .set_msglevel = uec_set_msglevel, + .nway_reset = uec_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = uec_get_ringparam, + .set_ringparam = uec_set_ringparam, + .get_pauseparam = uec_get_pauseparam, + .set_pauseparam = uec_set_pauseparam, + .get_sset_count = uec_get_sset_count, + .get_strings = uec_get_strings, + .get_ethtool_stats = uec_get_ethtool_stats, + .get_wol = uec_get_wol, + .set_wol = uec_set_wol, + .get_ts_info = ethtool_op_get_ts_info, +}; + +void uec_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &uec_ethtool_ops; +} diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c new file mode 100644 index 000000000..7b8fe866f --- /dev/null +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -0,0 +1,333 @@ +/* + * QorIQ 10G MDIO Controller + * + * Copyright 2012 Freescale Semiconductor, Inc. + * + * Authors: Andy Fleming + * Timur Tabi + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Number of microseconds to wait for a register to respond */ +#define TIMEOUT 1000 + +struct tgec_mdio_controller { + __be32 reserved[12]; + __be32 mdio_stat; /* MDIO configuration and status */ + __be32 mdio_ctl; /* MDIO control */ + __be32 mdio_data; /* MDIO data */ + __be32 mdio_addr; /* MDIO address */ +} __packed; + +#define MDIO_STAT_ENC BIT(6) +#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8) +#define MDIO_STAT_BSY BIT(0) +#define MDIO_STAT_RD_ER BIT(1) +#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f) +#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5) +#define MDIO_CTL_PRE_DIS BIT(10) +#define MDIO_CTL_SCAN_EN BIT(11) +#define MDIO_CTL_POST_INC BIT(14) +#define MDIO_CTL_READ BIT(15) + +#define MDIO_DATA(x) (x & 0xffff) +#define MDIO_DATA_BSY BIT(31) + +struct mdio_fsl_priv { + struct tgec_mdio_controller __iomem *mdio_base; + bool is_little_endian; +}; + +static u32 xgmac_read32(void __iomem *regs, + bool is_little_endian) +{ + if (is_little_endian) + return ioread32(regs); + else + return ioread32be(regs); +} + +static void xgmac_write32(u32 value, + void __iomem *regs, + bool is_little_endian) +{ + if (is_little_endian) + iowrite32(value, regs); + else + iowrite32be(value, regs); +} + +/* + * Wait until the MDIO bus is free + */ +static int xgmac_wait_until_free(struct device *dev, + struct tgec_mdio_controller __iomem *regs, + bool is_little_endian) +{ + unsigned int timeout; + + /* Wait till the bus is free */ + timeout = TIMEOUT; + while ((xgmac_read32(®s->mdio_stat, is_little_endian) & + MDIO_STAT_BSY) && timeout) { + cpu_relax(); + timeout--; + } + + if (!timeout) { + dev_err(dev, "timeout waiting for bus to be free\n"); + return -ETIMEDOUT; + } + + return 0; +} + +/* + * Wait till the MDIO read or write operation is complete + */ +static int xgmac_wait_until_done(struct device *dev, + struct tgec_mdio_controller __iomem *regs, + bool is_little_endian) +{ + unsigned int timeout; + + /* Wait till the MDIO write is complete */ + timeout = TIMEOUT; + while ((xgmac_read32(®s->mdio_stat, is_little_endian) & + MDIO_STAT_BSY) && timeout) { + cpu_relax(); + timeout--; + } + + if (!timeout) { + dev_err(dev, "timeout waiting for operation to complete\n"); + return -ETIMEDOUT; + } + + return 0; +} + +/* + * Write value to the PHY for this device to the register at regnum,waiting + * until the write is done before it returns. All PHY configuration has to be + * done through the TSEC1 MIIM regs. + */ +static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) +{ + struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; + struct tgec_mdio_controller __iomem *regs = priv->mdio_base; + uint16_t dev_addr; + u32 mdio_ctl, mdio_stat; + int ret; + bool endian = priv->is_little_endian; + + mdio_stat = xgmac_read32(®s->mdio_stat, endian); + if (regnum & MII_ADDR_C45) { + /* Clause 45 (ie 10G) */ + dev_addr = (regnum >> 16) & 0x1f; + mdio_stat |= MDIO_STAT_ENC; + } else { + /* Clause 22 (ie 1G) */ + dev_addr = regnum & 0x1f; + mdio_stat &= ~MDIO_STAT_ENC; + } + + xgmac_write32(mdio_stat, ®s->mdio_stat, endian); + + ret = xgmac_wait_until_free(&bus->dev, regs, endian); + if (ret) + return ret; + + /* Set the port and dev addr */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); + xgmac_write32(mdio_ctl, ®s->mdio_ctl, endian); + + /* Set the register address */ + if (regnum & MII_ADDR_C45) { + xgmac_write32(regnum & 0xffff, ®s->mdio_addr, endian); + + ret = xgmac_wait_until_free(&bus->dev, regs, endian); + if (ret) + return ret; + } + + /* Write the value to the register */ + xgmac_write32(MDIO_DATA(value), ®s->mdio_data, endian); + + ret = xgmac_wait_until_done(&bus->dev, regs, endian); + if (ret) + return ret; + + return 0; +} + +/* + * Reads from register regnum in the PHY for device dev, returning the value. + * Clears miimcom first. All PHY configuration has to be done through the + * TSEC1 MIIM regs. + */ +static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) +{ + struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; + struct tgec_mdio_controller __iomem *regs = priv->mdio_base; + uint16_t dev_addr; + uint32_t mdio_stat; + uint32_t mdio_ctl; + uint16_t value; + int ret; + bool endian = priv->is_little_endian; + + mdio_stat = xgmac_read32(®s->mdio_stat, endian); + if (regnum & MII_ADDR_C45) { + dev_addr = (regnum >> 16) & 0x1f; + mdio_stat |= MDIO_STAT_ENC; + } else { + dev_addr = regnum & 0x1f; + mdio_stat &= ~MDIO_STAT_ENC; + } + + xgmac_write32(mdio_stat, ®s->mdio_stat, endian); + + ret = xgmac_wait_until_free(&bus->dev, regs, endian); + if (ret) + return ret; + + /* Set the Port and Device Addrs */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); + xgmac_write32(mdio_ctl, ®s->mdio_ctl, endian); + + /* Set the register address */ + if (regnum & MII_ADDR_C45) { + xgmac_write32(regnum & 0xffff, ®s->mdio_addr, endian); + + ret = xgmac_wait_until_free(&bus->dev, regs, endian); + if (ret) + return ret; + } + + /* Initiate the read */ + xgmac_write32(mdio_ctl | MDIO_CTL_READ, ®s->mdio_ctl, endian); + + ret = xgmac_wait_until_done(&bus->dev, regs, endian); + if (ret) + return ret; + + /* Return all Fs if nothing was there */ + if (xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) { + dev_err(&bus->dev, + "Error while reading PHY%d reg at %d.%hhu\n", + phy_id, dev_addr, regnum); + return 0xffff; + } + + value = xgmac_read32(®s->mdio_data, endian) & 0xffff; + dev_dbg(&bus->dev, "read %04x\n", value); + + return value; +} + +static int xgmac_mdio_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct mii_bus *bus; + struct resource res; + struct mdio_fsl_priv *priv; + int ret; + + ret = of_address_to_resource(np, 0, &res); + if (ret) { + dev_err(&pdev->dev, "could not obtain address\n"); + return ret; + } + + bus = mdiobus_alloc_size(sizeof(struct mdio_fsl_priv)); + if (!bus) + return -ENOMEM; + + bus->name = "Freescale XGMAC MDIO Bus"; + bus->read = xgmac_mdio_read; + bus->write = xgmac_mdio_write; + bus->parent = &pdev->dev; + snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start); + + /* Set the PHY base address */ + priv = bus->priv; + priv->mdio_base = of_iomap(np, 0); + if (!priv->mdio_base) { + ret = -ENOMEM; + goto err_ioremap; + } + + if (of_get_property(pdev->dev.of_node, + "little-endian", NULL)) + priv->is_little_endian = true; + else + priv->is_little_endian = false; + + ret = of_mdiobus_register(bus, np); + if (ret) { + dev_err(&pdev->dev, "cannot register MDIO bus\n"); + goto err_registration; + } + + platform_set_drvdata(pdev, bus); + + return 0; + +err_registration: + iounmap(priv->mdio_base); + +err_ioremap: + mdiobus_free(bus); + + return ret; +} + +static int xgmac_mdio_remove(struct platform_device *pdev) +{ + struct mii_bus *bus = platform_get_drvdata(pdev); + + mdiobus_unregister(bus); + iounmap(bus->priv); + mdiobus_free(bus); + + return 0; +} + +static const struct of_device_id xgmac_mdio_match[] = { + { + .compatible = "fsl,fman-xmdio", + }, + { + .compatible = "fsl,fman-memac-mdio", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, xgmac_mdio_match); + +static struct platform_driver xgmac_mdio_driver = { + .driver = { + .name = "fsl-fman_xmdio", + .of_match_table = xgmac_mdio_match, + }, + .probe = xgmac_mdio_probe, + .remove = xgmac_mdio_remove, +}; + +module_platform_driver(xgmac_mdio_driver); + +MODULE_DESCRIPTION("Freescale QorIQ 10G MDIO Controller"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/fujitsu/Kconfig b/drivers/net/ethernet/fujitsu/Kconfig new file mode 100644 index 000000000..108525738 --- /dev/null +++ b/drivers/net/ethernet/fujitsu/Kconfig @@ -0,0 +1,31 @@ +# +# Fujitsu Network device configuration +# + +config NET_VENDOR_FUJITSU + bool "Fujitsu devices" + default y + depends on PCMCIA + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + the questions about Fujitsu cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_FUJITSU + +config PCMCIA_FMVJ18X + tristate "Fujitsu FMV-J18x PCMCIA support" + depends on PCMCIA + select CRC32 + ---help--- + Say Y here if you intend to attach a Fujitsu FMV-J18x or compatible + PCMCIA (PC-card) Ethernet card to your computer. + + To compile this driver as a module, choose M here: the module will be + called fmvj18x_cs. If unsure, say N. + +endif # NET_VENDOR_FUJITSU diff --git a/drivers/net/ethernet/fujitsu/Makefile b/drivers/net/ethernet/fujitsu/Makefile new file mode 100644 index 000000000..21561fdcc --- /dev/null +++ b/drivers/net/ethernet/fujitsu/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Fujitsu network device drivers. +# + +obj-$(CONFIG_PCMCIA_FMVJ18X) += fmvj18x_cs.o diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c new file mode 100644 index 000000000..a7139f588 --- /dev/null +++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c @@ -0,0 +1,1172 @@ +/*====================================================================== + fmvj18x_cs.c 2.8 2002/03/23 + + A fmvj18x (and its compatibles) PCMCIA client driver + + Contributed by Shingo Fujimoto, shingo@flab.fujitsu.co.jp + + TDK LAK-CD021 and CONTEC C-NET(PC)C support added by + Nobuhiro Katayama, kata-n@po.iijnet.or.jp + + The PCMCIA client code is based on code written by David Hinds. + Network code is based on the "FMV-18x driver" by Yutaka TAMIYA + but is actually largely Donald Becker's AT1700 driver, which + carries the following attribution: + + Written 1993-94 by Donald Becker. + + Copyright 1993 United States Government as represented by the + Director, National Security Agency. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + +======================================================================*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DRV_NAME "fmvj18x_cs" +#define DRV_VERSION "2.9" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +/*====================================================================*/ + +/* Module parameters */ + +MODULE_DESCRIPTION("fmvj18x and compatible PCMCIA ethernet driver"); +MODULE_LICENSE("GPL"); + +#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) + +/* SRAM configuration */ +/* 0:4KB*2 TX buffer else:8KB*2 TX buffer */ +INT_MODULE_PARM(sram_config, 0); + + +/*====================================================================*/ +/* + PCMCIA event handlers + */ +static int fmvj18x_config(struct pcmcia_device *link); +static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id); +static int fmvj18x_setup_mfc(struct pcmcia_device *link); +static void fmvj18x_release(struct pcmcia_device *link); +static void fmvj18x_detach(struct pcmcia_device *p_dev); + +/* + LAN controller(MBH86960A) specific routines + */ +static int fjn_config(struct net_device *dev, struct ifmap *map); +static int fjn_open(struct net_device *dev); +static int fjn_close(struct net_device *dev); +static netdev_tx_t fjn_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static irqreturn_t fjn_interrupt(int irq, void *dev_id); +static void fjn_rx(struct net_device *dev); +static void fjn_reset(struct net_device *dev); +static void set_rx_mode(struct net_device *dev); +static void fjn_tx_timeout(struct net_device *dev); +static const struct ethtool_ops netdev_ethtool_ops; + +/* + card type + */ +enum cardtype { MBH10302, MBH10304, TDK, CONTEC, LA501, UNGERMANN, + XXX10304, NEC, KME +}; + +/* + driver specific data structure +*/ +struct local_info { + struct pcmcia_device *p_dev; + long open_time; + uint tx_started:1; + uint tx_queue; + u_short tx_queue_len; + enum cardtype cardtype; + u_short sent; + u_char __iomem *base; +}; + +#define MC_FILTERBREAK 64 + +/*====================================================================*/ +/* + ioport offset from the base address + */ +#define TX_STATUS 0 /* transmit status register */ +#define RX_STATUS 1 /* receive status register */ +#define TX_INTR 2 /* transmit interrupt mask register */ +#define RX_INTR 3 /* receive interrupt mask register */ +#define TX_MODE 4 /* transmit mode register */ +#define RX_MODE 5 /* receive mode register */ +#define CONFIG_0 6 /* configuration register 0 */ +#define CONFIG_1 7 /* configuration register 1 */ + +#define NODE_ID 8 /* node ID register (bank 0) */ +#define MAR_ADR 8 /* multicast address registers (bank 1) */ + +#define DATAPORT 8 /* buffer mem port registers (bank 2) */ +#define TX_START 10 /* transmit start register */ +#define COL_CTRL 11 /* 16 collision control register */ +#define BMPR12 12 /* reserved */ +#define BMPR13 13 /* reserved */ +#define RX_SKIP 14 /* skip received packet register */ + +#define LAN_CTRL 16 /* LAN card control register */ + +#define MAC_ID 0x1a /* hardware address */ +#define UNGERMANN_MAC_ID 0x18 /* UNGERMANN-BASS hardware address */ + +/* + control bits + */ +#define ENA_TMT_OK 0x80 +#define ENA_TMT_REC 0x20 +#define ENA_COL 0x04 +#define ENA_16_COL 0x02 +#define ENA_TBUS_ERR 0x01 + +#define ENA_PKT_RDY 0x80 +#define ENA_BUS_ERR 0x40 +#define ENA_LEN_ERR 0x08 +#define ENA_ALG_ERR 0x04 +#define ENA_CRC_ERR 0x02 +#define ENA_OVR_FLO 0x01 + +/* flags */ +#define F_TMT_RDY 0x80 /* can accept new packet */ +#define F_NET_BSY 0x40 /* carrier is detected */ +#define F_TMT_OK 0x20 /* send packet successfully */ +#define F_SRT_PKT 0x10 /* short packet error */ +#define F_COL_ERR 0x04 /* collision error */ +#define F_16_COL 0x02 /* 16 collision error */ +#define F_TBUS_ERR 0x01 /* bus read error */ + +#define F_PKT_RDY 0x80 /* packet(s) in buffer */ +#define F_BUS_ERR 0x40 /* bus read error */ +#define F_LEN_ERR 0x08 /* short packet */ +#define F_ALG_ERR 0x04 /* frame error */ +#define F_CRC_ERR 0x02 /* CRC error */ +#define F_OVR_FLO 0x01 /* overflow error */ + +#define F_BUF_EMP 0x40 /* receive buffer is empty */ + +#define F_SKP_PKT 0x05 /* drop packet in buffer */ + +/* default bitmaps */ +#define D_TX_INTR ( ENA_TMT_OK ) +#define D_RX_INTR ( ENA_PKT_RDY | ENA_LEN_ERR \ + | ENA_ALG_ERR | ENA_CRC_ERR | ENA_OVR_FLO ) +#define TX_STAT_M ( F_TMT_RDY ) +#define RX_STAT_M ( F_PKT_RDY | F_LEN_ERR \ + | F_ALG_ERR | F_CRC_ERR | F_OVR_FLO ) + +/* commands */ +#define D_TX_MODE 0x06 /* no tests, detect carrier */ +#define ID_MATCHED 0x02 /* (RX_MODE) */ +#define RECV_ALL 0x03 /* (RX_MODE) */ +#define CONFIG0_DFL 0x5a /* 16bit bus, 4K x 2 Tx queues */ +#define CONFIG0_DFL_1 0x5e /* 16bit bus, 8K x 2 Tx queues */ +#define CONFIG0_RST 0xda /* Data Link Controller off (CONFIG_0) */ +#define CONFIG0_RST_1 0xde /* Data Link Controller off (CONFIG_0) */ +#define BANK_0 0xa0 /* bank 0 (CONFIG_1) */ +#define BANK_1 0xa4 /* bank 1 (CONFIG_1) */ +#define BANK_2 0xa8 /* bank 2 (CONFIG_1) */ +#define CHIP_OFF 0x80 /* contrl chip power off (CONFIG_1) */ +#define DO_TX 0x80 /* do transmit packet */ +#define SEND_PKT 0x81 /* send a packet */ +#define AUTO_MODE 0x07 /* Auto skip packet on 16 col detected */ +#define MANU_MODE 0x03 /* Stop and skip packet on 16 col */ +#define TDK_AUTO_MODE 0x47 /* Auto skip packet on 16 col detected */ +#define TDK_MANU_MODE 0x43 /* Stop and skip packet on 16 col */ +#define INTR_OFF 0x0d /* LAN controller ignores interrupts */ +#define INTR_ON 0x1d /* LAN controller will catch interrupts */ + +#define TX_TIMEOUT ((400*HZ)/1000) + +#define BANK_0U 0x20 /* bank 0 (CONFIG_1) */ +#define BANK_1U 0x24 /* bank 1 (CONFIG_1) */ +#define BANK_2U 0x28 /* bank 2 (CONFIG_1) */ + +static const struct net_device_ops fjn_netdev_ops = { + .ndo_open = fjn_open, + .ndo_stop = fjn_close, + .ndo_start_xmit = fjn_start_xmit, + .ndo_tx_timeout = fjn_tx_timeout, + .ndo_set_config = fjn_config, + .ndo_set_rx_mode = set_rx_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int fmvj18x_probe(struct pcmcia_device *link) +{ + struct local_info *lp; + struct net_device *dev; + + dev_dbg(&link->dev, "fmvj18x_attach()\n"); + + /* Make up a FMVJ18x specific data structure */ + dev = alloc_etherdev(sizeof(struct local_info)); + if (!dev) + return -ENOMEM; + lp = netdev_priv(dev); + link->priv = dev; + lp->p_dev = link; + lp->base = NULL; + + /* The io structure describes IO port mapping */ + link->resource[0]->end = 32; + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; + + /* General socket configuration */ + link->config_flags |= CONF_ENABLE_IRQ; + + dev->netdev_ops = &fjn_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + dev->ethtool_ops = &netdev_ethtool_ops; + + return fmvj18x_config(link); +} /* fmvj18x_attach */ + +/*====================================================================*/ + +static void fmvj18x_detach(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + dev_dbg(&link->dev, "fmvj18x_detach\n"); + + unregister_netdev(dev); + + fmvj18x_release(link); + + free_netdev(dev); +} /* fmvj18x_detach */ + +/*====================================================================*/ + +static int mfc_try_io_port(struct pcmcia_device *link) +{ + int i, ret; + static const unsigned int serial_base[5] = + { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; + + for (i = 0; i < 5; i++) { + link->resource[1]->start = serial_base[i]; + link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; + if (link->resource[1]->start == 0) { + link->resource[1]->end = 0; + pr_notice("out of resource for serial\n"); + } + ret = pcmcia_request_io(link); + if (ret == 0) + return ret; + } + return ret; +} + +static int ungermann_try_io_port(struct pcmcia_device *link) +{ + int ret; + unsigned int ioaddr; + /* + Ungermann-Bass Access/CARD accepts 0x300,0x320,0x340,0x360 + 0x380,0x3c0 only for ioport. + */ + for (ioaddr = 0x300; ioaddr < 0x3e0; ioaddr += 0x20) { + link->resource[0]->start = ioaddr; + ret = pcmcia_request_io(link); + if (ret == 0) { + /* calculate ConfigIndex value */ + link->config_index = + ((link->resource[0]->start & 0x0f0) >> 3) | 0x22; + return ret; + } + } + return ret; /* RequestIO failed */ +} + +static int fmvj18x_ioprobe(struct pcmcia_device *p_dev, void *priv_data) +{ + return 0; /* strange, but that's what the code did already before... */ +} + +static int fmvj18x_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + struct local_info *lp = netdev_priv(dev); + int i, ret; + unsigned int ioaddr; + enum cardtype cardtype; + char *card_name = "unknown"; + u8 *buf; + size_t len; + u_char buggybuf[32]; + + dev_dbg(&link->dev, "fmvj18x_config\n"); + + link->io_lines = 5; + + len = pcmcia_get_tuple(link, CISTPL_FUNCE, &buf); + kfree(buf); + + if (len) { + /* Yes, I have CISTPL_FUNCE. Let's check CISTPL_MANFID */ + ret = pcmcia_loop_config(link, fmvj18x_ioprobe, NULL); + if (ret != 0) + goto failed; + + switch (link->manf_id) { + case MANFID_TDK: + cardtype = TDK; + if (link->card_id == PRODID_TDK_GN3410 || + link->card_id == PRODID_TDK_NP9610 || + link->card_id == PRODID_TDK_MN3200) { + /* MultiFunction Card */ + link->config_base = 0x800; + link->config_index = 0x47; + link->resource[1]->end = 8; + } + break; + case MANFID_NEC: + cardtype = NEC; /* MultiFunction Card */ + link->config_base = 0x800; + link->config_index = 0x47; + link->resource[1]->end = 8; + break; + case MANFID_KME: + cardtype = KME; /* MultiFunction Card */ + link->config_base = 0x800; + link->config_index = 0x47; + link->resource[1]->end = 8; + break; + case MANFID_CONTEC: + cardtype = CONTEC; + break; + case MANFID_FUJITSU: + if (link->config_base == 0x0fe0) + cardtype = MBH10302; + else if (link->card_id == PRODID_FUJITSU_MBH10302) + /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302), + but these are MBH10304 based card. */ + cardtype = MBH10304; + else if (link->card_id == PRODID_FUJITSU_MBH10304) + cardtype = MBH10304; + else + cardtype = LA501; + break; + default: + cardtype = MBH10304; + } + } else { + /* old type card */ + switch (link->manf_id) { + case MANFID_FUJITSU: + if (link->card_id == PRODID_FUJITSU_MBH10304) { + cardtype = XXX10304; /* MBH10304 with buggy CIS */ + link->config_index = 0x20; + } else { + cardtype = MBH10302; /* NextCom NC5310, etc. */ + link->config_index = 1; + } + break; + case MANFID_UNGERMANN: + cardtype = UNGERMANN; + break; + default: + cardtype = MBH10302; + link->config_index = 1; + } + } + + if (link->resource[1]->end != 0) { + ret = mfc_try_io_port(link); + if (ret != 0) goto failed; + } else if (cardtype == UNGERMANN) { + ret = ungermann_try_io_port(link); + if (ret != 0) goto failed; + } else { + ret = pcmcia_request_io(link); + if (ret) + goto failed; + } + ret = pcmcia_request_irq(link, fjn_interrupt); + if (ret) + goto failed; + ret = pcmcia_enable_device(link); + if (ret) + goto failed; + + dev->irq = link->irq; + dev->base_addr = link->resource[0]->start; + + if (resource_size(link->resource[1]) != 0) { + ret = fmvj18x_setup_mfc(link); + if (ret != 0) goto failed; + } + + ioaddr = dev->base_addr; + + /* Reset controller */ + if (sram_config == 0) + outb(CONFIG0_RST, ioaddr + CONFIG_0); + else + outb(CONFIG0_RST_1, ioaddr + CONFIG_0); + + /* Power On chip and select bank 0 */ + if (cardtype == MBH10302) + outb(BANK_0, ioaddr + CONFIG_1); + else + outb(BANK_0U, ioaddr + CONFIG_1); + + /* Set hardware address */ + switch (cardtype) { + case MBH10304: + case TDK: + case LA501: + case CONTEC: + case NEC: + case KME: + if (cardtype == MBH10304) { + card_name = "FMV-J182"; + + len = pcmcia_get_tuple(link, CISTPL_FUNCE, &buf); + if (len < 11) { + kfree(buf); + goto failed; + } + /* Read MACID from CIS */ + for (i = 5; i < 11; i++) + dev->dev_addr[i] = buf[i]; + kfree(buf); + } else { + if (pcmcia_get_mac_from_cis(link, dev)) + goto failed; + if( cardtype == TDK ) { + card_name = "TDK LAK-CD021"; + } else if( cardtype == LA501 ) { + card_name = "LA501"; + } else if( cardtype == NEC ) { + card_name = "PK-UG-J001"; + } else if( cardtype == KME ) { + card_name = "Panasonic"; + } else { + card_name = "C-NET(PC)C"; + } + } + break; + case UNGERMANN: + /* Read MACID from register */ + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb(ioaddr + UNGERMANN_MAC_ID + i); + card_name = "Access/CARD"; + break; + case XXX10304: + /* Read MACID from Buggy CIS */ + if (fmvj18x_get_hwinfo(link, buggybuf) == -1) { + pr_notice("unable to read hardware net address\n"); + goto failed; + } + for (i = 0 ; i < 6; i++) { + dev->dev_addr[i] = buggybuf[i]; + } + card_name = "FMV-J182"; + break; + case MBH10302: + default: + /* Read MACID from register */ + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb(ioaddr + MAC_ID + i); + card_name = "FMV-J181"; + break; + } + + lp->cardtype = cardtype; + SET_NETDEV_DEV(dev, &link->dev); + + if (register_netdev(dev) != 0) { + pr_notice("register_netdev() failed\n"); + goto failed; + } + + /* print current configuration */ + netdev_info(dev, "%s, sram %s, port %#3lx, irq %d, hw_addr %pM\n", + card_name, sram_config == 0 ? "4K TX*2" : "8K TX*2", + dev->base_addr, dev->irq, dev->dev_addr); + + return 0; + +failed: + fmvj18x_release(link); + return -ENODEV; +} /* fmvj18x_config */ +/*====================================================================*/ + +static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id) +{ + u_char __iomem *base; + int i, j; + + /* Allocate a small memory window */ + link->resource[2]->flags |= WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; + link->resource[2]->start = 0; link->resource[2]->end = 0; + i = pcmcia_request_window(link, link->resource[2], 0); + if (i != 0) + return -1; + + base = ioremap(link->resource[2]->start, resource_size(link->resource[2])); + pcmcia_map_mem_page(link, link->resource[2], 0); + + /* + * MBH10304 CISTPL_FUNCE_LAN_NODE_ID format + * 22 0d xx xx xx 04 06 yy yy yy yy yy yy ff + * 'xx' is garbage. + * 'yy' is MAC address. + */ + for (i = 0; i < 0x200; i++) { + if (readb(base+i*2) == 0x22) { + if (readb(base+(i-1)*2) == 0xff && + readb(base+(i+5)*2) == 0x04 && + readb(base+(i+6)*2) == 0x06 && + readb(base+(i+13)*2) == 0xff) + break; + } + } + + if (i != 0x200) { + for (j = 0 ; j < 6; j++,i++) { + node_id[j] = readb(base+(i+7)*2); + } + } + + iounmap(base); + j = pcmcia_release_window(link, link->resource[2]); + return (i != 0x200) ? 0 : -1; + +} /* fmvj18x_get_hwinfo */ +/*====================================================================*/ + +static int fmvj18x_setup_mfc(struct pcmcia_device *link) +{ + int i; + struct net_device *dev = link->priv; + unsigned int ioaddr; + struct local_info *lp = netdev_priv(dev); + + /* Allocate a small memory window */ + link->resource[3]->flags = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; + link->resource[3]->start = link->resource[3]->end = 0; + i = pcmcia_request_window(link, link->resource[3], 0); + if (i != 0) + return -1; + + lp->base = ioremap(link->resource[3]->start, + resource_size(link->resource[3])); + if (lp->base == NULL) { + netdev_notice(dev, "ioremap failed\n"); + return -1; + } + + i = pcmcia_map_mem_page(link, link->resource[3], 0); + if (i != 0) { + iounmap(lp->base); + lp->base = NULL; + return -1; + } + + ioaddr = dev->base_addr; + writeb(0x47, lp->base+0x800); /* Config Option Register of LAN */ + writeb(0x0, lp->base+0x802); /* Config and Status Register */ + + writeb(ioaddr & 0xff, lp->base+0x80a); /* I/O Base(Low) of LAN */ + writeb((ioaddr >> 8) & 0xff, lp->base+0x80c); /* I/O Base(High) of LAN */ + + writeb(0x45, lp->base+0x820); /* Config Option Register of Modem */ + writeb(0x8, lp->base+0x822); /* Config and Status Register */ + + return 0; + +} +/*====================================================================*/ + +static void fmvj18x_release(struct pcmcia_device *link) +{ + + struct net_device *dev = link->priv; + struct local_info *lp = netdev_priv(dev); + u_char __iomem *tmp; + + dev_dbg(&link->dev, "fmvj18x_release\n"); + + if (lp->base != NULL) { + tmp = lp->base; + lp->base = NULL; /* set NULL before iounmap */ + iounmap(tmp); + } + + pcmcia_disable_device(link); + +} + +static int fmvj18x_suspend(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) + netif_device_detach(dev); + + return 0; +} + +static int fmvj18x_resume(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) { + fjn_reset(dev); + netif_device_attach(dev); + } + + return 0; +} + +/*====================================================================*/ + +static const struct pcmcia_device_id fmvj18x_ids[] = { + PCMCIA_DEVICE_MANF_CARD(0x0004, 0x0004), + PCMCIA_DEVICE_PROD_ID12("EAGLE Technology", "NE200 ETHERNET LAN MBH10302 04", 0x528c88c4, 0x74f91e59), + PCMCIA_DEVICE_PROD_ID12("Eiger Labs,Inc", "EPX-10BT PC Card Ethernet 10BT", 0x53af556e, 0x877f9922), + PCMCIA_DEVICE_PROD_ID12("Eiger labs,Inc.", "EPX-10BT PC Card Ethernet 10BT", 0xf47e6c66, 0x877f9922), + PCMCIA_DEVICE_PROD_ID12("FUJITSU", "LAN Card(FMV-J182)", 0x6ee5a3d8, 0x5baf31db), + PCMCIA_DEVICE_PROD_ID12("FUJITSU", "MBH10308", 0x6ee5a3d8, 0x3f04875e), + PCMCIA_DEVICE_PROD_ID12("FUJITSU TOWA", "LA501", 0xb8451188, 0x12939ba2), + PCMCIA_DEVICE_PROD_ID12("HITACHI", "HT-4840-11", 0xf4f43949, 0x773910f4), + PCMCIA_DEVICE_PROD_ID12("NextComK.K.", "NC5310B Ver1.0 ", 0x8cef4d3a, 0x075fc7b6), + PCMCIA_DEVICE_PROD_ID12("NextComK.K.", "NC5310 Ver1.0 ", 0x8cef4d3a, 0xbccf43e6), + PCMCIA_DEVICE_PROD_ID12("RATOC System Inc.", "10BASE_T CARD R280", 0x85c10e17, 0xd9413666), + PCMCIA_DEVICE_PROD_ID12("TDK", "LAC-CD02x", 0x1eae9475, 0x8fa0ee70), + PCMCIA_DEVICE_PROD_ID12("TDK", "LAC-CF010", 0x1eae9475, 0x7683bc9a), + PCMCIA_DEVICE_PROD_ID1("CONTEC Co.,Ltd.", 0x58d8fee2), + PCMCIA_DEVICE_PROD_ID1("PCMCIA LAN MBH10304 ES", 0x2599f454), + PCMCIA_DEVICE_PROD_ID1("PCMCIA MBH10302", 0x8f4005da), + PCMCIA_DEVICE_PROD_ID1("UBKK,V2.0", 0x90888080), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, fmvj18x_ids); + +static struct pcmcia_driver fmvj18x_cs_driver = { + .owner = THIS_MODULE, + .name = "fmvj18x_cs", + .probe = fmvj18x_probe, + .remove = fmvj18x_detach, + .id_table = fmvj18x_ids, + .suspend = fmvj18x_suspend, + .resume = fmvj18x_resume, +}; +module_pcmcia_driver(fmvj18x_cs_driver); + +/*====================================================================*/ + +static irqreturn_t fjn_interrupt(int dummy, void *dev_id) +{ + struct net_device *dev = dev_id; + struct local_info *lp = netdev_priv(dev); + unsigned int ioaddr; + unsigned short tx_stat, rx_stat; + + ioaddr = dev->base_addr; + + /* avoid multiple interrupts */ + outw(0x0000, ioaddr + TX_INTR); + + /* wait for a while */ + udelay(1); + + /* get status */ + tx_stat = inb(ioaddr + TX_STATUS); + rx_stat = inb(ioaddr + RX_STATUS); + + /* clear status */ + outb(tx_stat, ioaddr + TX_STATUS); + outb(rx_stat, ioaddr + RX_STATUS); + + pr_debug("%s: interrupt, rx_status %02x.\n", dev->name, rx_stat); + pr_debug(" tx_status %02x.\n", tx_stat); + + if (rx_stat || (inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) { + /* there is packet(s) in rx buffer */ + fjn_rx(dev); + } + if (tx_stat & F_TMT_RDY) { + dev->stats.tx_packets += lp->sent ; + lp->sent = 0 ; + if (lp->tx_queue) { + outb(DO_TX | lp->tx_queue, ioaddr + TX_START); + lp->sent = lp->tx_queue ; + lp->tx_queue = 0; + lp->tx_queue_len = 0; + dev->trans_start = jiffies; + } else { + lp->tx_started = 0; + } + netif_wake_queue(dev); + } + pr_debug("%s: exiting interrupt,\n", dev->name); + pr_debug(" tx_status %02x, rx_status %02x.\n", tx_stat, rx_stat); + + outb(D_TX_INTR, ioaddr + TX_INTR); + outb(D_RX_INTR, ioaddr + RX_INTR); + + if (lp->base != NULL) { + /* Ack interrupt for multifunction card */ + writeb(0x01, lp->base+0x802); + writeb(0x09, lp->base+0x822); + } + + return IRQ_HANDLED; + +} /* fjn_interrupt */ + +/*====================================================================*/ + +static void fjn_tx_timeout(struct net_device *dev) +{ + struct local_info *lp = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + + netdev_notice(dev, "transmit timed out with status %04x, %s?\n", + htons(inw(ioaddr + TX_STATUS)), + inb(ioaddr + TX_STATUS) & F_TMT_RDY + ? "IRQ conflict" : "network cable problem"); + netdev_notice(dev, "timeout registers: %04x %04x %04x " + "%04x %04x %04x %04x %04x.\n", + htons(inw(ioaddr + 0)), htons(inw(ioaddr + 2)), + htons(inw(ioaddr + 4)), htons(inw(ioaddr + 6)), + htons(inw(ioaddr + 8)), htons(inw(ioaddr + 10)), + htons(inw(ioaddr + 12)), htons(inw(ioaddr + 14))); + dev->stats.tx_errors++; + /* ToDo: We should try to restart the adaptor... */ + local_irq_disable(); + fjn_reset(dev); + + lp->tx_started = 0; + lp->tx_queue = 0; + lp->tx_queue_len = 0; + lp->sent = 0; + lp->open_time = jiffies; + local_irq_enable(); + netif_wake_queue(dev); +} + +static netdev_tx_t fjn_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct local_info *lp = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + short length = skb->len; + + if (length < ETH_ZLEN) + { + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + length = ETH_ZLEN; + } + + netif_stop_queue(dev); + + { + unsigned char *buf = skb->data; + + if (length > ETH_FRAME_LEN) { + netdev_notice(dev, "Attempting to send a large packet (%d bytes)\n", + length); + return NETDEV_TX_BUSY; + } + + netdev_dbg(dev, "Transmitting a packet of length %lu\n", + (unsigned long)skb->len); + dev->stats.tx_bytes += skb->len; + + /* Disable both interrupts. */ + outw(0x0000, ioaddr + TX_INTR); + + /* wait for a while */ + udelay(1); + + outw(length, ioaddr + DATAPORT); + outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1); + + lp->tx_queue++; + lp->tx_queue_len += ((length+3) & ~1); + + if (lp->tx_started == 0) { + /* If the Tx is idle, always trigger a transmit. */ + outb(DO_TX | lp->tx_queue, ioaddr + TX_START); + lp->sent = lp->tx_queue ; + lp->tx_queue = 0; + lp->tx_queue_len = 0; + lp->tx_started = 1; + netif_start_queue(dev); + } else { + if( sram_config == 0 ) { + if (lp->tx_queue_len < (4096 - (ETH_FRAME_LEN +2)) ) + /* Yes, there is room for one more packet. */ + netif_start_queue(dev); + } else { + if (lp->tx_queue_len < (8192 - (ETH_FRAME_LEN +2)) && + lp->tx_queue < 127 ) + /* Yes, there is room for one more packet. */ + netif_start_queue(dev); + } + } + + /* Re-enable interrupts */ + outb(D_TX_INTR, ioaddr + TX_INTR); + outb(D_RX_INTR, ioaddr + RX_INTR); + } + dev_kfree_skb (skb); + + return NETDEV_TX_OK; +} /* fjn_start_xmit */ + +/*====================================================================*/ + +static void fjn_reset(struct net_device *dev) +{ + struct local_info *lp = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + int i; + + netdev_dbg(dev, "fjn_reset() called\n"); + + /* Reset controller */ + if( sram_config == 0 ) + outb(CONFIG0_RST, ioaddr + CONFIG_0); + else + outb(CONFIG0_RST_1, ioaddr + CONFIG_0); + + /* Power On chip and select bank 0 */ + if (lp->cardtype == MBH10302) + outb(BANK_0, ioaddr + CONFIG_1); + else + outb(BANK_0U, ioaddr + CONFIG_1); + + /* Set Tx modes */ + outb(D_TX_MODE, ioaddr + TX_MODE); + /* set Rx modes */ + outb(ID_MATCHED, ioaddr + RX_MODE); + + /* Set hardware address */ + for (i = 0; i < 6; i++) + outb(dev->dev_addr[i], ioaddr + NODE_ID + i); + + /* (re)initialize the multicast table */ + set_rx_mode(dev); + + /* Switch to bank 2 (runtime mode) */ + if (lp->cardtype == MBH10302) + outb(BANK_2, ioaddr + CONFIG_1); + else + outb(BANK_2U, ioaddr + CONFIG_1); + + /* set 16col ctrl bits */ + if( lp->cardtype == TDK || lp->cardtype == CONTEC) + outb(TDK_AUTO_MODE, ioaddr + COL_CTRL); + else + outb(AUTO_MODE, ioaddr + COL_CTRL); + + /* clear Reserved Regs */ + outb(0x00, ioaddr + BMPR12); + outb(0x00, ioaddr + BMPR13); + + /* reset Skip packet reg. */ + outb(0x01, ioaddr + RX_SKIP); + + /* Enable Tx and Rx */ + if( sram_config == 0 ) + outb(CONFIG0_DFL, ioaddr + CONFIG_0); + else + outb(CONFIG0_DFL_1, ioaddr + CONFIG_0); + + /* Init receive pointer ? */ + inw(ioaddr + DATAPORT); + inw(ioaddr + DATAPORT); + + /* Clear all status */ + outb(0xff, ioaddr + TX_STATUS); + outb(0xff, ioaddr + RX_STATUS); + + if (lp->cardtype == MBH10302) + outb(INTR_OFF, ioaddr + LAN_CTRL); + + /* Turn on Rx interrupts */ + outb(D_TX_INTR, ioaddr + TX_INTR); + outb(D_RX_INTR, ioaddr + RX_INTR); + + /* Turn on interrupts from LAN card controller */ + if (lp->cardtype == MBH10302) + outb(INTR_ON, ioaddr + LAN_CTRL); +} /* fjn_reset */ + +/*====================================================================*/ + +static void fjn_rx(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + int boguscount = 10; /* 5 -> 10: by agy 19940922 */ + + pr_debug("%s: in rx_packet(), rx_status %02x.\n", + dev->name, inb(ioaddr + RX_STATUS)); + + while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) { + u_short status = inw(ioaddr + DATAPORT); + + netdev_dbg(dev, "Rxing packet mode %02x status %04x.\n", + inb(ioaddr + RX_MODE), status); +#ifndef final_version + if (status == 0) { + outb(F_SKP_PKT, ioaddr + RX_SKIP); + break; + } +#endif + if ((status & 0xF0) != 0x20) { /* There was an error. */ + dev->stats.rx_errors++; + if (status & F_LEN_ERR) dev->stats.rx_length_errors++; + if (status & F_ALG_ERR) dev->stats.rx_frame_errors++; + if (status & F_CRC_ERR) dev->stats.rx_crc_errors++; + if (status & F_OVR_FLO) dev->stats.rx_over_errors++; + } else { + u_short pkt_len = inw(ioaddr + DATAPORT); + /* Malloc up new buffer. */ + struct sk_buff *skb; + + if (pkt_len > 1550) { + netdev_notice(dev, "The FMV-18x claimed a very large packet, size %d\n", + pkt_len); + outb(F_SKP_PKT, ioaddr + RX_SKIP); + dev->stats.rx_errors++; + break; + } + skb = netdev_alloc_skb(dev, pkt_len + 2); + if (skb == NULL) { + outb(F_SKP_PKT, ioaddr + RX_SKIP); + dev->stats.rx_dropped++; + break; + } + + skb_reserve(skb, 2); + insw(ioaddr + DATAPORT, skb_put(skb, pkt_len), + (pkt_len + 1) >> 1); + skb->protocol = eth_type_trans(skb, dev); + + { + int i; + pr_debug("%s: Rxed packet of length %d: ", + dev->name, pkt_len); + for (i = 0; i < 14; i++) + pr_debug(" %02x", skb->data[i]); + pr_debug(".\n"); + } + + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + if (--boguscount <= 0) + break; + } + + /* If any worth-while packets have been received, dev_rint() + has done a netif_wake_queue() for us and will work on them + when we get to the bottom-half routine. */ +/* + if (lp->cardtype != TDK) { + int i; + for (i = 0; i < 20; i++) { + if ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == F_BUF_EMP) + break; + (void)inw(ioaddr + DATAPORT); /+ dummy status read +/ + outb(F_SKP_PKT, ioaddr + RX_SKIP); + } + + if (i > 0) + pr_debug("%s: Exint Rx packet with mode %02x after " + "%d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i); + } +*/ +} /* fjn_rx */ + +/*====================================================================*/ + +static void netdev_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + snprintf(info->bus_info, sizeof(info->bus_info), + "PCMCIA 0x%lx", dev->base_addr); +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, +}; + +static int fjn_config(struct net_device *dev, struct ifmap *map){ + return 0; +} + +static int fjn_open(struct net_device *dev) +{ + struct local_info *lp = netdev_priv(dev); + struct pcmcia_device *link = lp->p_dev; + + pr_debug("fjn_open('%s').\n", dev->name); + + if (!pcmcia_dev_present(link)) + return -ENODEV; + + link->open++; + + fjn_reset(dev); + + lp->tx_started = 0; + lp->tx_queue = 0; + lp->tx_queue_len = 0; + lp->open_time = jiffies; + netif_start_queue(dev); + + return 0; +} /* fjn_open */ + +/*====================================================================*/ + +static int fjn_close(struct net_device *dev) +{ + struct local_info *lp = netdev_priv(dev); + struct pcmcia_device *link = lp->p_dev; + unsigned int ioaddr = dev->base_addr; + + pr_debug("fjn_close('%s').\n", dev->name); + + lp->open_time = 0; + netif_stop_queue(dev); + + /* Set configuration register 0 to disable Tx and Rx. */ + if( sram_config == 0 ) + outb(CONFIG0_RST ,ioaddr + CONFIG_0); + else + outb(CONFIG0_RST_1 ,ioaddr + CONFIG_0); + + /* Update the statistics -- ToDo. */ + + /* Power-down the chip. Green, green, green! */ + outb(CHIP_OFF ,ioaddr + CONFIG_1); + + /* Set the ethernet adaptor disable IRQ */ + if (lp->cardtype == MBH10302) + outb(INTR_OFF, ioaddr + LAN_CTRL); + + link->open--; + + return 0; +} /* fjn_close */ + +/*====================================================================*/ + +/* + Set the multicast/promiscuous mode for this adaptor. +*/ + +static void set_rx_mode(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + u_char mc_filter[8]; /* Multicast hash filter */ + u_long flags; + int i; + + int saved_bank; + int saved_config_0 = inb(ioaddr + CONFIG_0); + + local_irq_save(flags); + + /* Disable Tx and Rx */ + if (sram_config == 0) + outb(CONFIG0_RST, ioaddr + CONFIG_0); + else + outb(CONFIG0_RST_1, ioaddr + CONFIG_0); + + if (dev->flags & IFF_PROMISC) { + memset(mc_filter, 0xff, sizeof(mc_filter)); + outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */ + } else if (netdev_mc_count(dev) > MC_FILTERBREAK || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to filter perfectly -- accept all multicasts. */ + memset(mc_filter, 0xff, sizeof(mc_filter)); + outb(2, ioaddr + RX_MODE); /* Use normal mode. */ + } else if (netdev_mc_empty(dev)) { + memset(mc_filter, 0x00, sizeof(mc_filter)); + outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */ + } else { + struct netdev_hw_addr *ha; + + memset(mc_filter, 0, sizeof(mc_filter)); + netdev_for_each_mc_addr(ha, dev) { + unsigned int bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26; + mc_filter[bit >> 3] |= (1 << (bit & 7)); + } + outb(2, ioaddr + RX_MODE); /* Use normal mode. */ + } + + /* Switch to bank 1 and set the multicast table. */ + saved_bank = inb(ioaddr + CONFIG_1); + outb(0xe4, ioaddr + CONFIG_1); + + for (i = 0; i < 8; i++) + outb(mc_filter[i], ioaddr + MAR_ADR + i); + outb(saved_bank, ioaddr + CONFIG_1); + + outb(saved_config_0, ioaddr + CONFIG_0); + + local_irq_restore(flags); +} diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig new file mode 100644 index 000000000..a54d89791 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -0,0 +1,36 @@ +# +# HISILICON device configuration +# + +config NET_VENDOR_HISILICON + bool "Hisilicon devices" + default y + depends on ARM + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Hisilicon devices. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_HISILICON + +config HIX5HD2_GMAC + tristate "Hisilicon HIX5HD2 Family Network Device Support" + select PHYLIB + help + This selects the hix5hd2 mac family network device. + +config HIP04_ETH + tristate "HISILICON P04 Ethernet support" + select PHYLIB + select MARVELL_PHY + select MFD_SYSCON + ---help--- + If you wish to compile a kernel for a hardware with hisilicon p04 SoC and + want to use the internal ethernet then you should answer Y to this. + +endif # NET_VENDOR_HISILICON diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile new file mode 100644 index 000000000..6c14540a4 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the HISILICON network device drivers. +# + +obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o +obj-$(CONFIG_HIP04_ETH) += hip04_mdio.o hip04_eth.o diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c new file mode 100644 index 000000000..3b39fddde --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -0,0 +1,975 @@ + +/* Copyright (c) 2014 Linaro Ltd. + * Copyright (c) 2014 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PPE_CFG_RX_ADDR 0x100 +#define PPE_CFG_POOL_GRP 0x300 +#define PPE_CFG_RX_BUF_SIZE 0x400 +#define PPE_CFG_RX_FIFO_SIZE 0x500 +#define PPE_CURR_BUF_CNT 0xa200 + +#define GE_DUPLEX_TYPE 0x08 +#define GE_MAX_FRM_SIZE_REG 0x3c +#define GE_PORT_MODE 0x40 +#define GE_PORT_EN 0x44 +#define GE_SHORT_RUNTS_THR_REG 0x50 +#define GE_TX_LOCAL_PAGE_REG 0x5c +#define GE_TRANSMIT_CONTROL_REG 0x60 +#define GE_CF_CRC_STRIP_REG 0x1b0 +#define GE_MODE_CHANGE_REG 0x1b4 +#define GE_RECV_CONTROL_REG 0x1e0 +#define GE_STATION_MAC_ADDRESS 0x210 +#define PPE_CFG_CPU_ADD_ADDR 0x580 +#define PPE_CFG_MAX_FRAME_LEN_REG 0x408 +#define PPE_CFG_BUS_CTRL_REG 0x424 +#define PPE_CFG_RX_CTRL_REG 0x428 +#define PPE_CFG_RX_PKT_MODE_REG 0x438 +#define PPE_CFG_QOS_VMID_GEN 0x500 +#define PPE_CFG_RX_PKT_INT 0x538 +#define PPE_INTEN 0x600 +#define PPE_INTSTS 0x608 +#define PPE_RINT 0x604 +#define PPE_CFG_STS_MODE 0x700 +#define PPE_HIS_RX_PKT_CNT 0x804 + +/* REG_INTERRUPT */ +#define RCV_INT BIT(10) +#define RCV_NOBUF BIT(8) +#define RCV_DROP BIT(7) +#define TX_DROP BIT(6) +#define DEF_INT_ERR (RCV_NOBUF | RCV_DROP | TX_DROP) +#define DEF_INT_MASK (RCV_INT | DEF_INT_ERR) + +/* TX descriptor config */ +#define TX_FREE_MEM BIT(0) +#define TX_READ_ALLOC_L3 BIT(1) +#define TX_FINISH_CACHE_INV BIT(2) +#define TX_CLEAR_WB BIT(4) +#define TX_L3_CHECKSUM BIT(5) +#define TX_LOOP_BACK BIT(11) + +/* RX error */ +#define RX_PKT_DROP BIT(0) +#define RX_L2_ERR BIT(1) +#define RX_PKT_ERR (RX_PKT_DROP | RX_L2_ERR) + +#define SGMII_SPEED_1000 0x08 +#define SGMII_SPEED_100 0x07 +#define SGMII_SPEED_10 0x06 +#define MII_SPEED_100 0x01 +#define MII_SPEED_10 0x00 + +#define GE_DUPLEX_FULL BIT(0) +#define GE_DUPLEX_HALF 0x00 +#define GE_MODE_CHANGE_EN BIT(0) + +#define GE_TX_AUTO_NEG BIT(5) +#define GE_TX_ADD_CRC BIT(6) +#define GE_TX_SHORT_PAD_THROUGH BIT(7) + +#define GE_RX_STRIP_CRC BIT(0) +#define GE_RX_STRIP_PAD BIT(3) +#define GE_RX_PAD_EN BIT(4) + +#define GE_AUTO_NEG_CTL BIT(0) + +#define GE_RX_INT_THRESHOLD BIT(6) +#define GE_RX_TIMEOUT 0x04 + +#define GE_RX_PORT_EN BIT(1) +#define GE_TX_PORT_EN BIT(2) + +#define PPE_CFG_STS_RX_PKT_CNT_RC BIT(12) + +#define PPE_CFG_RX_PKT_ALIGN BIT(18) +#define PPE_CFG_QOS_VMID_MODE BIT(14) +#define PPE_CFG_QOS_VMID_GRP_SHIFT 8 + +#define PPE_CFG_RX_FIFO_FSFU BIT(11) +#define PPE_CFG_RX_DEPTH_SHIFT 16 +#define PPE_CFG_RX_START_SHIFT 0 +#define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11 + +#define PPE_CFG_BUS_LOCAL_REL BIT(14) +#define PPE_CFG_BUS_BIG_ENDIEN BIT(0) + +#define RX_DESC_NUM 128 +#define TX_DESC_NUM 256 +#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM-1)) +#define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM-1)) + +#define GMAC_PPE_RX_PKT_MAX_LEN 379 +#define GMAC_MAX_PKT_LEN 1516 +#define GMAC_MIN_PKT_LEN 31 +#define RX_BUF_SIZE 1600 +#define RESET_TIMEOUT 1000 +#define TX_TIMEOUT (6 * HZ) + +#define DRV_NAME "hip04-ether" +#define DRV_VERSION "v1.0" + +#define HIP04_MAX_TX_COALESCE_USECS 200 +#define HIP04_MIN_TX_COALESCE_USECS 100 +#define HIP04_MAX_TX_COALESCE_FRAMES 200 +#define HIP04_MIN_TX_COALESCE_FRAMES 100 + +struct tx_desc { + u32 send_addr; + u32 send_size; + u32 next_addr; + u32 cfg; + u32 wb_addr; +} __aligned(64); + +struct rx_desc { + u16 reserved_16; + u16 pkt_len; + u32 reserve1[3]; + u32 pkt_err; + u32 reserve2[4]; +}; + +struct hip04_priv { + void __iomem *base; + int phy_mode; + int chan; + unsigned int port; + unsigned int speed; + unsigned int duplex; + unsigned int reg_inten; + + struct napi_struct napi; + struct net_device *ndev; + + struct tx_desc *tx_desc; + dma_addr_t tx_desc_dma; + struct sk_buff *tx_skb[TX_DESC_NUM]; + dma_addr_t tx_phys[TX_DESC_NUM]; + unsigned int tx_head; + + int tx_coalesce_frames; + int tx_coalesce_usecs; + struct hrtimer tx_coalesce_timer; + + unsigned char *rx_buf[RX_DESC_NUM]; + dma_addr_t rx_phys[RX_DESC_NUM]; + unsigned int rx_head; + unsigned int rx_buf_size; + + struct device_node *phy_node; + struct phy_device *phy; + struct regmap *map; + struct work_struct tx_timeout_task; + + /* written only by tx cleanup */ + unsigned int tx_tail ____cacheline_aligned_in_smp; +}; + +static inline unsigned int tx_count(unsigned int head, unsigned int tail) +{ + return (head - tail) % (TX_DESC_NUM - 1); +} + +static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex) +{ + struct hip04_priv *priv = netdev_priv(ndev); + u32 val; + + priv->speed = speed; + priv->duplex = duplex; + + switch (priv->phy_mode) { + case PHY_INTERFACE_MODE_SGMII: + if (speed == SPEED_1000) + val = SGMII_SPEED_1000; + else if (speed == SPEED_100) + val = SGMII_SPEED_100; + else + val = SGMII_SPEED_10; + break; + case PHY_INTERFACE_MODE_MII: + if (speed == SPEED_100) + val = MII_SPEED_100; + else + val = MII_SPEED_10; + break; + default: + netdev_warn(ndev, "not supported mode\n"); + val = MII_SPEED_10; + break; + } + writel_relaxed(val, priv->base + GE_PORT_MODE); + + val = duplex ? GE_DUPLEX_FULL : GE_DUPLEX_HALF; + writel_relaxed(val, priv->base + GE_DUPLEX_TYPE); + + val = GE_MODE_CHANGE_EN; + writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG); +} + +static void hip04_reset_ppe(struct hip04_priv *priv) +{ + u32 val, tmp, timeout = 0; + + do { + regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val); + regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp); + if (timeout++ > RESET_TIMEOUT) + break; + } while (val & 0xfff); +} + +static void hip04_config_fifo(struct hip04_priv *priv) +{ + u32 val; + + val = readl_relaxed(priv->base + PPE_CFG_STS_MODE); + val |= PPE_CFG_STS_RX_PKT_CNT_RC; + writel_relaxed(val, priv->base + PPE_CFG_STS_MODE); + + val = BIT(priv->port); + regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val); + + val = priv->port << PPE_CFG_QOS_VMID_GRP_SHIFT; + val |= PPE_CFG_QOS_VMID_MODE; + writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN); + + val = RX_BUF_SIZE; + regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val); + + val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT; + val |= PPE_CFG_RX_FIFO_FSFU; + val |= priv->chan << PPE_CFG_RX_START_SHIFT; + regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val); + + val = NET_IP_ALIGN << PPE_CFG_RX_CTRL_ALIGN_SHIFT; + writel_relaxed(val, priv->base + PPE_CFG_RX_CTRL_REG); + + val = PPE_CFG_RX_PKT_ALIGN; + writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_MODE_REG); + + val = PPE_CFG_BUS_LOCAL_REL | PPE_CFG_BUS_BIG_ENDIEN; + writel_relaxed(val, priv->base + PPE_CFG_BUS_CTRL_REG); + + val = GMAC_PPE_RX_PKT_MAX_LEN; + writel_relaxed(val, priv->base + PPE_CFG_MAX_FRAME_LEN_REG); + + val = GMAC_MAX_PKT_LEN; + writel_relaxed(val, priv->base + GE_MAX_FRM_SIZE_REG); + + val = GMAC_MIN_PKT_LEN; + writel_relaxed(val, priv->base + GE_SHORT_RUNTS_THR_REG); + + val = readl_relaxed(priv->base + GE_TRANSMIT_CONTROL_REG); + val |= GE_TX_AUTO_NEG | GE_TX_ADD_CRC | GE_TX_SHORT_PAD_THROUGH; + writel_relaxed(val, priv->base + GE_TRANSMIT_CONTROL_REG); + + val = GE_RX_STRIP_CRC; + writel_relaxed(val, priv->base + GE_CF_CRC_STRIP_REG); + + val = readl_relaxed(priv->base + GE_RECV_CONTROL_REG); + val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN; + writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG); + + val = GE_AUTO_NEG_CTL; + writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG); +} + +static void hip04_mac_enable(struct net_device *ndev) +{ + struct hip04_priv *priv = netdev_priv(ndev); + u32 val; + + /* enable tx & rx */ + val = readl_relaxed(priv->base + GE_PORT_EN); + val |= GE_RX_PORT_EN | GE_TX_PORT_EN; + writel_relaxed(val, priv->base + GE_PORT_EN); + + /* clear rx int */ + val = RCV_INT; + writel_relaxed(val, priv->base + PPE_RINT); + + /* config recv int */ + val = GE_RX_INT_THRESHOLD | GE_RX_TIMEOUT; + writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_INT); + + /* enable interrupt */ + priv->reg_inten = DEF_INT_MASK; + writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN); +} + +static void hip04_mac_disable(struct net_device *ndev) +{ + struct hip04_priv *priv = netdev_priv(ndev); + u32 val; + + /* disable int */ + priv->reg_inten &= ~(DEF_INT_MASK); + writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN); + + /* disable tx & rx */ + val = readl_relaxed(priv->base + GE_PORT_EN); + val &= ~(GE_RX_PORT_EN | GE_TX_PORT_EN); + writel_relaxed(val, priv->base + GE_PORT_EN); +} + +static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys) +{ + writel(phys, priv->base + PPE_CFG_CPU_ADD_ADDR); +} + +static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys) +{ + regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, phys); +} + +static u32 hip04_recv_cnt(struct hip04_priv *priv) +{ + return readl(priv->base + PPE_HIS_RX_PKT_CNT); +} + +static void hip04_update_mac_address(struct net_device *ndev) +{ + struct hip04_priv *priv = netdev_priv(ndev); + + writel_relaxed(((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])), + priv->base + GE_STATION_MAC_ADDRESS); + writel_relaxed(((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) | + (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5])), + priv->base + GE_STATION_MAC_ADDRESS + 4); +} + +static int hip04_set_mac_address(struct net_device *ndev, void *addr) +{ + eth_mac_addr(ndev, addr); + hip04_update_mac_address(ndev); + return 0; +} + +static int hip04_tx_reclaim(struct net_device *ndev, bool force) +{ + struct hip04_priv *priv = netdev_priv(ndev); + unsigned tx_tail = priv->tx_tail; + struct tx_desc *desc; + unsigned int bytes_compl = 0, pkts_compl = 0; + unsigned int count; + + smp_rmb(); + count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail); + if (count == 0) + goto out; + + while (count) { + desc = &priv->tx_desc[tx_tail]; + if (desc->send_addr != 0) { + if (force) + desc->send_addr = 0; + else + break; + } + + if (priv->tx_phys[tx_tail]) { + dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail], + priv->tx_skb[tx_tail]->len, + DMA_TO_DEVICE); + priv->tx_phys[tx_tail] = 0; + } + pkts_compl++; + bytes_compl += priv->tx_skb[tx_tail]->len; + dev_kfree_skb(priv->tx_skb[tx_tail]); + priv->tx_skb[tx_tail] = NULL; + tx_tail = TX_NEXT(tx_tail); + count--; + } + + priv->tx_tail = tx_tail; + smp_wmb(); /* Ensure tx_tail visible to xmit */ + +out: + if (pkts_compl || bytes_compl) + netdev_completed_queue(ndev, pkts_compl, bytes_compl); + + if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1))) + netif_wake_queue(ndev); + + return count; +} + +static void hip04_start_tx_timer(struct hip04_priv *priv) +{ + unsigned long ns = priv->tx_coalesce_usecs * NSEC_PER_USEC / 2; + + /* allow timer to fire after half the time at the earliest */ + hrtimer_start_range_ns(&priv->tx_coalesce_timer, ns_to_ktime(ns), + ns, HRTIMER_MODE_REL); +} + +static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct hip04_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + unsigned int tx_head = priv->tx_head, count; + struct tx_desc *desc = &priv->tx_desc[tx_head]; + dma_addr_t phys; + + smp_rmb(); + count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail)); + if (count == (TX_DESC_NUM - 1)) { + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; + } + + phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&ndev->dev, phys)) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + priv->tx_skb[tx_head] = skb; + priv->tx_phys[tx_head] = phys; + desc->send_addr = cpu_to_be32(phys); + desc->send_size = cpu_to_be32(skb->len); + desc->cfg = cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV); + phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc); + desc->wb_addr = cpu_to_be32(phys); + skb_tx_timestamp(skb); + + hip04_set_xmit_desc(priv, phys); + priv->tx_head = TX_NEXT(tx_head); + count++; + netdev_sent_queue(ndev, skb->len); + + stats->tx_bytes += skb->len; + stats->tx_packets++; + + /* Ensure tx_head update visible to tx reclaim */ + smp_wmb(); + + /* queue is getting full, better start cleaning up now */ + if (count >= priv->tx_coalesce_frames) { + if (napi_schedule_prep(&priv->napi)) { + /* disable rx interrupt and timer */ + priv->reg_inten &= ~(RCV_INT); + writel_relaxed(DEF_INT_MASK & ~RCV_INT, + priv->base + PPE_INTEN); + hrtimer_cancel(&priv->tx_coalesce_timer); + __napi_schedule(&priv->napi); + } + } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) { + /* cleanup not pending yet, start a new timer */ + hip04_start_tx_timer(priv); + } + + return NETDEV_TX_OK; +} + +static int hip04_rx_poll(struct napi_struct *napi, int budget) +{ + struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi); + struct net_device *ndev = priv->ndev; + struct net_device_stats *stats = &ndev->stats; + unsigned int cnt = hip04_recv_cnt(priv); + struct rx_desc *desc; + struct sk_buff *skb; + unsigned char *buf; + bool last = false; + dma_addr_t phys; + int rx = 0; + int tx_remaining; + u16 len; + u32 err; + + while (cnt && !last) { + buf = priv->rx_buf[priv->rx_head]; + skb = build_skb(buf, priv->rx_buf_size); + if (unlikely(!skb)) + net_dbg_ratelimited("build_skb failed\n"); + + dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head], + RX_BUF_SIZE, DMA_FROM_DEVICE); + priv->rx_phys[priv->rx_head] = 0; + + desc = (struct rx_desc *)skb->data; + len = be16_to_cpu(desc->pkt_len); + err = be32_to_cpu(desc->pkt_err); + + if (0 == len) { + dev_kfree_skb_any(skb); + last = true; + } else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) { + dev_kfree_skb_any(skb); + stats->rx_dropped++; + stats->rx_errors++; + } else { + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, ndev); + napi_gro_receive(&priv->napi, skb); + stats->rx_packets++; + stats->rx_bytes += len; + rx++; + } + + buf = netdev_alloc_frag(priv->rx_buf_size); + if (!buf) + goto done; + phys = dma_map_single(&ndev->dev, buf, + RX_BUF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&ndev->dev, phys)) + goto done; + priv->rx_buf[priv->rx_head] = buf; + priv->rx_phys[priv->rx_head] = phys; + hip04_set_recv_desc(priv, phys); + + priv->rx_head = RX_NEXT(priv->rx_head); + if (rx >= budget) + goto done; + + if (--cnt == 0) + cnt = hip04_recv_cnt(priv); + } + + if (!(priv->reg_inten & RCV_INT)) { + /* enable rx interrupt */ + priv->reg_inten |= RCV_INT; + writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN); + } + napi_complete(napi); +done: + /* clean up tx descriptors and start a new timer if necessary */ + tx_remaining = hip04_tx_reclaim(ndev, false); + if (rx < budget && tx_remaining) + hip04_start_tx_timer(priv); + + return rx; +} + +static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = (struct net_device *)dev_id; + struct hip04_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + u32 ists = readl_relaxed(priv->base + PPE_INTSTS); + + if (!ists) + return IRQ_NONE; + + writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT); + + if (unlikely(ists & DEF_INT_ERR)) { + if (ists & (RCV_NOBUF | RCV_DROP)) { + stats->rx_errors++; + stats->rx_dropped++; + netdev_err(ndev, "rx drop\n"); + } + if (ists & TX_DROP) { + stats->tx_dropped++; + netdev_err(ndev, "tx drop\n"); + } + } + + if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) { + /* disable rx interrupt */ + priv->reg_inten &= ~(RCV_INT); + writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN); + hrtimer_cancel(&priv->tx_coalesce_timer); + __napi_schedule(&priv->napi); + } + + return IRQ_HANDLED; +} + +enum hrtimer_restart tx_done(struct hrtimer *hrtimer) +{ + struct hip04_priv *priv; + + priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer); + + if (napi_schedule_prep(&priv->napi)) { + /* disable rx interrupt */ + priv->reg_inten &= ~(RCV_INT); + writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN); + __napi_schedule(&priv->napi); + } + + return HRTIMER_NORESTART; +} + +static void hip04_adjust_link(struct net_device *ndev) +{ + struct hip04_priv *priv = netdev_priv(ndev); + struct phy_device *phy = priv->phy; + + if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) { + hip04_config_port(ndev, phy->speed, phy->duplex); + phy_print_status(phy); + } +} + +static int hip04_mac_open(struct net_device *ndev) +{ + struct hip04_priv *priv = netdev_priv(ndev); + int i; + + priv->rx_head = 0; + priv->tx_head = 0; + priv->tx_tail = 0; + hip04_reset_ppe(priv); + + for (i = 0; i < RX_DESC_NUM; i++) { + dma_addr_t phys; + + phys = dma_map_single(&ndev->dev, priv->rx_buf[i], + RX_BUF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&ndev->dev, phys)) + return -EIO; + + priv->rx_phys[i] = phys; + hip04_set_recv_desc(priv, phys); + } + + if (priv->phy) + phy_start(priv->phy); + + netdev_reset_queue(ndev); + netif_start_queue(ndev); + hip04_mac_enable(ndev); + napi_enable(&priv->napi); + + return 0; +} + +static int hip04_mac_stop(struct net_device *ndev) +{ + struct hip04_priv *priv = netdev_priv(ndev); + int i; + + napi_disable(&priv->napi); + netif_stop_queue(ndev); + hip04_mac_disable(ndev); + hip04_tx_reclaim(ndev, true); + hip04_reset_ppe(priv); + + if (priv->phy) + phy_stop(priv->phy); + + for (i = 0; i < RX_DESC_NUM; i++) { + if (priv->rx_phys[i]) { + dma_unmap_single(&ndev->dev, priv->rx_phys[i], + RX_BUF_SIZE, DMA_FROM_DEVICE); + priv->rx_phys[i] = 0; + } + } + + return 0; +} + +static void hip04_timeout(struct net_device *ndev) +{ + struct hip04_priv *priv = netdev_priv(ndev); + + schedule_work(&priv->tx_timeout_task); +} + +static void hip04_tx_timeout_task(struct work_struct *work) +{ + struct hip04_priv *priv; + + priv = container_of(work, struct hip04_priv, tx_timeout_task); + hip04_mac_stop(priv->ndev); + hip04_mac_open(priv->ndev); +} + +static struct net_device_stats *hip04_get_stats(struct net_device *ndev) +{ + return &ndev->stats; +} + +static int hip04_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct hip04_priv *priv = netdev_priv(netdev); + + ec->tx_coalesce_usecs = priv->tx_coalesce_usecs; + ec->tx_max_coalesced_frames = priv->tx_coalesce_frames; + + return 0; +} + +static int hip04_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct hip04_priv *priv = netdev_priv(netdev); + + /* Check not supported parameters */ + if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) || + (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) || + (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) || + (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) || + (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) || + (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) || + (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) || + (ec->rx_max_coalesced_frames_high) || (ec->rx_coalesce_usecs) || + (ec->tx_max_coalesced_frames_irq) || + (ec->stats_block_coalesce_usecs) || + (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval)) + return -EOPNOTSUPP; + + if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS || + ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) || + (ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES || + ec->tx_max_coalesced_frames < HIP04_MIN_TX_COALESCE_FRAMES)) + return -EINVAL; + + priv->tx_coalesce_usecs = ec->tx_coalesce_usecs; + priv->tx_coalesce_frames = ec->tx_max_coalesced_frames; + + return 0; +} + +static void hip04_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); +} + +static struct ethtool_ops hip04_ethtool_ops = { + .get_coalesce = hip04_get_coalesce, + .set_coalesce = hip04_set_coalesce, + .get_drvinfo = hip04_get_drvinfo, +}; + +static struct net_device_ops hip04_netdev_ops = { + .ndo_open = hip04_mac_open, + .ndo_stop = hip04_mac_stop, + .ndo_get_stats = hip04_get_stats, + .ndo_start_xmit = hip04_mac_start_xmit, + .ndo_set_mac_address = hip04_set_mac_address, + .ndo_tx_timeout = hip04_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static int hip04_alloc_ring(struct net_device *ndev, struct device *d) +{ + struct hip04_priv *priv = netdev_priv(ndev); + int i; + + priv->tx_desc = dma_alloc_coherent(d, + TX_DESC_NUM * sizeof(struct tx_desc), + &priv->tx_desc_dma, GFP_KERNEL); + if (!priv->tx_desc) + return -ENOMEM; + + priv->rx_buf_size = RX_BUF_SIZE + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + for (i = 0; i < RX_DESC_NUM; i++) { + priv->rx_buf[i] = netdev_alloc_frag(priv->rx_buf_size); + if (!priv->rx_buf[i]) + return -ENOMEM; + } + + return 0; +} + +static void hip04_free_ring(struct net_device *ndev, struct device *d) +{ + struct hip04_priv *priv = netdev_priv(ndev); + int i; + + for (i = 0; i < RX_DESC_NUM; i++) + if (priv->rx_buf[i]) + put_page(virt_to_head_page(priv->rx_buf[i])); + + for (i = 0; i < TX_DESC_NUM; i++) + if (priv->tx_skb[i]) + dev_kfree_skb_any(priv->tx_skb[i]); + + dma_free_coherent(d, TX_DESC_NUM * sizeof(struct tx_desc), + priv->tx_desc, priv->tx_desc_dma); +} + +static int hip04_mac_probe(struct platform_device *pdev) +{ + struct device *d = &pdev->dev; + struct device_node *node = d->of_node; + struct of_phandle_args arg; + struct net_device *ndev; + struct hip04_priv *priv; + struct resource *res; + unsigned int irq; + int ret; + + ndev = alloc_etherdev(sizeof(struct hip04_priv)); + if (!ndev) + return -ENOMEM; + + priv = netdev_priv(ndev); + priv->ndev = ndev; + platform_set_drvdata(pdev, ndev); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(d, res); + if (IS_ERR(priv->base)) { + ret = PTR_ERR(priv->base); + goto init_fail; + } + + ret = of_parse_phandle_with_fixed_args(node, "port-handle", 2, 0, &arg); + if (ret < 0) { + dev_warn(d, "no port-handle\n"); + goto init_fail; + } + + priv->port = arg.args[0]; + priv->chan = arg.args[1] * RX_DESC_NUM; + + hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + + /* BQL will try to keep the TX queue as short as possible, but it can't + * be faster than tx_coalesce_usecs, so we need a fast timeout here, + * but also long enough to gather up enough frames to ensure we don't + * get more interrupts than necessary. + * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate + */ + priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4; + priv->tx_coalesce_usecs = 200; + priv->tx_coalesce_timer.function = tx_done; + + priv->map = syscon_node_to_regmap(arg.np); + if (IS_ERR(priv->map)) { + dev_warn(d, "no syscon hisilicon,hip04-ppe\n"); + ret = PTR_ERR(priv->map); + goto init_fail; + } + + priv->phy_mode = of_get_phy_mode(node); + if (priv->phy_mode < 0) { + dev_warn(d, "not find phy-mode\n"); + ret = -EINVAL; + goto init_fail; + } + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + ret = -EINVAL; + goto init_fail; + } + + ret = devm_request_irq(d, irq, hip04_mac_interrupt, + 0, pdev->name, ndev); + if (ret) { + netdev_err(ndev, "devm_request_irq failed\n"); + goto init_fail; + } + + priv->phy_node = of_parse_phandle(node, "phy-handle", 0); + if (priv->phy_node) { + priv->phy = of_phy_connect(ndev, priv->phy_node, + &hip04_adjust_link, + 0, priv->phy_mode); + if (!priv->phy) { + ret = -EPROBE_DEFER; + goto init_fail; + } + } + + INIT_WORK(&priv->tx_timeout_task, hip04_tx_timeout_task); + + ether_setup(ndev); + ndev->netdev_ops = &hip04_netdev_ops; + ndev->ethtool_ops = &hip04_ethtool_ops; + ndev->watchdog_timeo = TX_TIMEOUT; + ndev->priv_flags |= IFF_UNICAST_FLT; + ndev->irq = irq; + netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT); + SET_NETDEV_DEV(ndev, &pdev->dev); + + hip04_reset_ppe(priv); + if (priv->phy_mode == PHY_INTERFACE_MODE_MII) + hip04_config_port(ndev, SPEED_100, DUPLEX_FULL); + + hip04_config_fifo(priv); + random_ether_addr(ndev->dev_addr); + hip04_update_mac_address(ndev); + + ret = hip04_alloc_ring(ndev, d); + if (ret) { + netdev_err(ndev, "alloc ring fail\n"); + goto alloc_fail; + } + + ret = register_netdev(ndev); + if (ret) { + free_netdev(ndev); + goto alloc_fail; + } + + return 0; + +alloc_fail: + hip04_free_ring(ndev, d); +init_fail: + of_node_put(priv->phy_node); + free_netdev(ndev); + return ret; +} + +static int hip04_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct hip04_priv *priv = netdev_priv(ndev); + struct device *d = &pdev->dev; + + if (priv->phy) + phy_disconnect(priv->phy); + + hip04_free_ring(ndev, d); + unregister_netdev(ndev); + free_irq(ndev->irq, ndev); + of_node_put(priv->phy_node); + cancel_work_sync(&priv->tx_timeout_task); + free_netdev(ndev); + + return 0; +} + +static const struct of_device_id hip04_mac_match[] = { + { .compatible = "hisilicon,hip04-mac" }, + { } +}; + +MODULE_DEVICE_TABLE(of, hip04_mac_match); + +static struct platform_driver hip04_mac_driver = { + .probe = hip04_mac_probe, + .remove = hip04_remove, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = hip04_mac_match, + }, +}; +module_platform_driver(hip04_mac_driver); + +MODULE_DESCRIPTION("HISILICON P04 Ethernet driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/hisilicon/hip04_mdio.c b/drivers/net/ethernet/hisilicon/hip04_mdio.c new file mode 100644 index 000000000..b3bac25db --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hip04_mdio.c @@ -0,0 +1,186 @@ +/* Copyright (c) 2014 Linaro Ltd. + * Copyright (c) 2014 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include + +#define MDIO_CMD_REG 0x0 +#define MDIO_ADDR_REG 0x4 +#define MDIO_WDATA_REG 0x8 +#define MDIO_RDATA_REG 0xc +#define MDIO_STA_REG 0x10 + +#define MDIO_START BIT(14) +#define MDIO_R_VALID BIT(1) +#define MDIO_READ (BIT(12) | BIT(11) | MDIO_START) +#define MDIO_WRITE (BIT(12) | BIT(10) | MDIO_START) + +struct hip04_mdio_priv { + void __iomem *base; +}; + +#define WAIT_TIMEOUT 10 +static int hip04_mdio_wait_ready(struct mii_bus *bus) +{ + struct hip04_mdio_priv *priv = bus->priv; + int i; + + for (i = 0; readl_relaxed(priv->base + MDIO_CMD_REG) & MDIO_START; i++) { + if (i == WAIT_TIMEOUT) + return -ETIMEDOUT; + msleep(20); + } + + return 0; +} + +static int hip04_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct hip04_mdio_priv *priv = bus->priv; + u32 val; + int ret; + + ret = hip04_mdio_wait_ready(bus); + if (ret < 0) + goto out; + + val = regnum | (mii_id << 5) | MDIO_READ; + writel_relaxed(val, priv->base + MDIO_CMD_REG); + + ret = hip04_mdio_wait_ready(bus); + if (ret < 0) + goto out; + + val = readl_relaxed(priv->base + MDIO_STA_REG); + if (val & MDIO_R_VALID) { + dev_err(bus->parent, "SMI bus read not valid\n"); + ret = -ENODEV; + goto out; + } + + val = readl_relaxed(priv->base + MDIO_RDATA_REG); + ret = val & 0xFFFF; +out: + return ret; +} + +static int hip04_mdio_write(struct mii_bus *bus, int mii_id, + int regnum, u16 value) +{ + struct hip04_mdio_priv *priv = bus->priv; + u32 val; + int ret; + + ret = hip04_mdio_wait_ready(bus); + if (ret < 0) + goto out; + + writel_relaxed(value, priv->base + MDIO_WDATA_REG); + val = regnum | (mii_id << 5) | MDIO_WRITE; + writel_relaxed(val, priv->base + MDIO_CMD_REG); +out: + return ret; +} + +static int hip04_mdio_reset(struct mii_bus *bus) +{ + int temp, i; + + for (i = 0; i < PHY_MAX_ADDR; i++) { + hip04_mdio_write(bus, i, 22, 0); + temp = hip04_mdio_read(bus, i, MII_BMCR); + if (temp < 0) + continue; + + temp |= BMCR_RESET; + if (hip04_mdio_write(bus, i, MII_BMCR, temp) < 0) + continue; + } + + mdelay(500); + return 0; +} + +static int hip04_mdio_probe(struct platform_device *pdev) +{ + struct resource *r; + struct mii_bus *bus; + struct hip04_mdio_priv *priv; + int ret; + + bus = mdiobus_alloc_size(sizeof(struct hip04_mdio_priv)); + if (!bus) { + dev_err(&pdev->dev, "Cannot allocate MDIO bus\n"); + return -ENOMEM; + } + + bus->name = "hip04_mdio_bus"; + bus->read = hip04_mdio_read; + bus->write = hip04_mdio_write; + bus->reset = hip04_mdio_reset; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev)); + bus->parent = &pdev->dev; + priv = bus->priv; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(priv->base)) { + ret = PTR_ERR(priv->base); + goto out_mdio; + } + + ret = of_mdiobus_register(bus, pdev->dev.of_node); + if (ret < 0) { + dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret); + goto out_mdio; + } + + platform_set_drvdata(pdev, bus); + + return 0; + +out_mdio: + mdiobus_free(bus); + return ret; +} + +static int hip04_mdio_remove(struct platform_device *pdev) +{ + struct mii_bus *bus = platform_get_drvdata(pdev); + + mdiobus_unregister(bus); + mdiobus_free(bus); + + return 0; +} + +static const struct of_device_id hip04_mdio_match[] = { + { .compatible = "hisilicon,hip04-mdio" }, + { } +}; +MODULE_DEVICE_TABLE(of, hip04_mdio_match); + +static struct platform_driver hip04_mdio_driver = { + .probe = hip04_mdio_probe, + .remove = hip04_mdio_remove, + .driver = { + .name = "hip04-mdio", + .owner = THIS_MODULE, + .of_match_table = hip04_mdio_match, + }, +}; + +module_platform_driver(hip04_mdio_driver); + +MODULE_DESCRIPTION("HISILICON P04 MDIO interface driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:hip04-mdio"); diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c new file mode 100644 index 000000000..0ffdcd381 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -0,0 +1,1066 @@ +/* Copyright (c) 2014 Linaro Ltd. + * Copyright (c) 2014 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define STATION_ADDR_LOW 0x0000 +#define STATION_ADDR_HIGH 0x0004 +#define MAC_DUPLEX_HALF_CTRL 0x0008 +#define MAX_FRM_SIZE 0x003c +#define PORT_MODE 0x0040 +#define PORT_EN 0x0044 +#define BITS_TX_EN BIT(2) +#define BITS_RX_EN BIT(1) +#define REC_FILT_CONTROL 0x0064 +#define BIT_CRC_ERR_PASS BIT(5) +#define BIT_PAUSE_FRM_PASS BIT(4) +#define BIT_VLAN_DROP_EN BIT(3) +#define BIT_BC_DROP_EN BIT(2) +#define BIT_MC_MATCH_EN BIT(1) +#define BIT_UC_MATCH_EN BIT(0) +#define PORT_MC_ADDR_LOW 0x0068 +#define PORT_MC_ADDR_HIGH 0x006C +#define CF_CRC_STRIP 0x01b0 +#define MODE_CHANGE_EN 0x01b4 +#define BIT_MODE_CHANGE_EN BIT(0) +#define COL_SLOT_TIME 0x01c0 +#define RECV_CONTROL 0x01e0 +#define BIT_STRIP_PAD_EN BIT(3) +#define BIT_RUNT_PKT_EN BIT(4) +#define CONTROL_WORD 0x0214 +#define MDIO_SINGLE_CMD 0x03c0 +#define MDIO_SINGLE_DATA 0x03c4 +#define MDIO_CTRL 0x03cc +#define MDIO_RDATA_STATUS 0x03d0 + +#define MDIO_START BIT(20) +#define MDIO_R_VALID BIT(0) +#define MDIO_READ (BIT(17) | MDIO_START) +#define MDIO_WRITE (BIT(16) | MDIO_START) + +#define RX_FQ_START_ADDR 0x0500 +#define RX_FQ_DEPTH 0x0504 +#define RX_FQ_WR_ADDR 0x0508 +#define RX_FQ_RD_ADDR 0x050c +#define RX_FQ_VLDDESC_CNT 0x0510 +#define RX_FQ_ALEMPTY_TH 0x0514 +#define RX_FQ_REG_EN 0x0518 +#define BITS_RX_FQ_START_ADDR_EN BIT(2) +#define BITS_RX_FQ_DEPTH_EN BIT(1) +#define BITS_RX_FQ_RD_ADDR_EN BIT(0) +#define RX_FQ_ALFULL_TH 0x051c +#define RX_BQ_START_ADDR 0x0520 +#define RX_BQ_DEPTH 0x0524 +#define RX_BQ_WR_ADDR 0x0528 +#define RX_BQ_RD_ADDR 0x052c +#define RX_BQ_FREE_DESC_CNT 0x0530 +#define RX_BQ_ALEMPTY_TH 0x0534 +#define RX_BQ_REG_EN 0x0538 +#define BITS_RX_BQ_START_ADDR_EN BIT(2) +#define BITS_RX_BQ_DEPTH_EN BIT(1) +#define BITS_RX_BQ_WR_ADDR_EN BIT(0) +#define RX_BQ_ALFULL_TH 0x053c +#define TX_BQ_START_ADDR 0x0580 +#define TX_BQ_DEPTH 0x0584 +#define TX_BQ_WR_ADDR 0x0588 +#define TX_BQ_RD_ADDR 0x058c +#define TX_BQ_VLDDESC_CNT 0x0590 +#define TX_BQ_ALEMPTY_TH 0x0594 +#define TX_BQ_REG_EN 0x0598 +#define BITS_TX_BQ_START_ADDR_EN BIT(2) +#define BITS_TX_BQ_DEPTH_EN BIT(1) +#define BITS_TX_BQ_RD_ADDR_EN BIT(0) +#define TX_BQ_ALFULL_TH 0x059c +#define TX_RQ_START_ADDR 0x05a0 +#define TX_RQ_DEPTH 0x05a4 +#define TX_RQ_WR_ADDR 0x05a8 +#define TX_RQ_RD_ADDR 0x05ac +#define TX_RQ_FREE_DESC_CNT 0x05b0 +#define TX_RQ_ALEMPTY_TH 0x05b4 +#define TX_RQ_REG_EN 0x05b8 +#define BITS_TX_RQ_START_ADDR_EN BIT(2) +#define BITS_TX_RQ_DEPTH_EN BIT(1) +#define BITS_TX_RQ_WR_ADDR_EN BIT(0) +#define TX_RQ_ALFULL_TH 0x05bc +#define RAW_PMU_INT 0x05c0 +#define ENA_PMU_INT 0x05c4 +#define STATUS_PMU_INT 0x05c8 +#define MAC_FIFO_ERR_IN BIT(30) +#define TX_RQ_IN_TIMEOUT_INT BIT(29) +#define RX_BQ_IN_TIMEOUT_INT BIT(28) +#define TXOUTCFF_FULL_INT BIT(27) +#define TXOUTCFF_EMPTY_INT BIT(26) +#define TXCFF_FULL_INT BIT(25) +#define TXCFF_EMPTY_INT BIT(24) +#define RXOUTCFF_FULL_INT BIT(23) +#define RXOUTCFF_EMPTY_INT BIT(22) +#define RXCFF_FULL_INT BIT(21) +#define RXCFF_EMPTY_INT BIT(20) +#define TX_RQ_IN_INT BIT(19) +#define TX_BQ_OUT_INT BIT(18) +#define RX_BQ_IN_INT BIT(17) +#define RX_FQ_OUT_INT BIT(16) +#define TX_RQ_EMPTY_INT BIT(15) +#define TX_RQ_FULL_INT BIT(14) +#define TX_RQ_ALEMPTY_INT BIT(13) +#define TX_RQ_ALFULL_INT BIT(12) +#define TX_BQ_EMPTY_INT BIT(11) +#define TX_BQ_FULL_INT BIT(10) +#define TX_BQ_ALEMPTY_INT BIT(9) +#define TX_BQ_ALFULL_INT BIT(8) +#define RX_BQ_EMPTY_INT BIT(7) +#define RX_BQ_FULL_INT BIT(6) +#define RX_BQ_ALEMPTY_INT BIT(5) +#define RX_BQ_ALFULL_INT BIT(4) +#define RX_FQ_EMPTY_INT BIT(3) +#define RX_FQ_FULL_INT BIT(2) +#define RX_FQ_ALEMPTY_INT BIT(1) +#define RX_FQ_ALFULL_INT BIT(0) + +#define DEF_INT_MASK (RX_BQ_IN_INT | RX_BQ_IN_TIMEOUT_INT | \ + TX_RQ_IN_INT | TX_RQ_IN_TIMEOUT_INT) + +#define DESC_WR_RD_ENA 0x05cc +#define IN_QUEUE_TH 0x05d8 +#define OUT_QUEUE_TH 0x05dc +#define QUEUE_TX_BQ_SHIFT 16 +#define RX_BQ_IN_TIMEOUT_TH 0x05e0 +#define TX_RQ_IN_TIMEOUT_TH 0x05e4 +#define STOP_CMD 0x05e8 +#define BITS_TX_STOP BIT(1) +#define BITS_RX_STOP BIT(0) +#define FLUSH_CMD 0x05eC +#define BITS_TX_FLUSH_CMD BIT(5) +#define BITS_RX_FLUSH_CMD BIT(4) +#define BITS_TX_FLUSH_FLAG_DOWN BIT(3) +#define BITS_TX_FLUSH_FLAG_UP BIT(2) +#define BITS_RX_FLUSH_FLAG_DOWN BIT(1) +#define BITS_RX_FLUSH_FLAG_UP BIT(0) +#define RX_CFF_NUM_REG 0x05f0 +#define PMU_FSM_REG 0x05f8 +#define RX_FIFO_PKT_IN_NUM 0x05fc +#define RX_FIFO_PKT_OUT_NUM 0x0600 + +#define RGMII_SPEED_1000 0x2c +#define RGMII_SPEED_100 0x2f +#define RGMII_SPEED_10 0x2d +#define MII_SPEED_100 0x0f +#define MII_SPEED_10 0x0d +#define GMAC_SPEED_1000 0x05 +#define GMAC_SPEED_100 0x01 +#define GMAC_SPEED_10 0x00 +#define GMAC_FULL_DUPLEX BIT(4) + +#define RX_BQ_INT_THRESHOLD 0x01 +#define TX_RQ_INT_THRESHOLD 0x01 +#define RX_BQ_IN_TIMEOUT 0x10000 +#define TX_RQ_IN_TIMEOUT 0x50000 + +#define MAC_MAX_FRAME_SIZE 1600 +#define DESC_SIZE 32 +#define RX_DESC_NUM 1024 +#define TX_DESC_NUM 1024 + +#define DESC_VLD_FREE 0 +#define DESC_VLD_BUSY 0x80000000 +#define DESC_FL_MID 0 +#define DESC_FL_LAST 0x20000000 +#define DESC_FL_FIRST 0x40000000 +#define DESC_FL_FULL 0x60000000 +#define DESC_DATA_LEN_OFF 16 +#define DESC_BUFF_LEN_OFF 0 +#define DESC_DATA_MASK 0x7ff + +/* DMA descriptor ring helpers */ +#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1)) +#define dma_cnt(n) ((n) >> 5) +#define dma_byte(n) ((n) << 5) + +struct hix5hd2_desc { + __le32 buff_addr; + __le32 cmd; +} __aligned(32); + +struct hix5hd2_desc_sw { + struct hix5hd2_desc *desc; + dma_addr_t phys_addr; + unsigned int count; + unsigned int size; +}; + +#define QUEUE_NUMS 4 +struct hix5hd2_priv { + struct hix5hd2_desc_sw pool[QUEUE_NUMS]; +#define rx_fq pool[0] +#define rx_bq pool[1] +#define tx_bq pool[2] +#define tx_rq pool[3] + + void __iomem *base; + void __iomem *ctrl_base; + + struct sk_buff *tx_skb[TX_DESC_NUM]; + struct sk_buff *rx_skb[RX_DESC_NUM]; + + struct device *dev; + struct net_device *netdev; + + struct phy_device *phy; + struct device_node *phy_node; + phy_interface_t phy_mode; + + unsigned int speed; + unsigned int duplex; + + struct clk *clk; + struct mii_bus *bus; + struct napi_struct napi; + struct work_struct tx_timeout_task; +}; + +static void hix5hd2_config_port(struct net_device *dev, u32 speed, u32 duplex) +{ + struct hix5hd2_priv *priv = netdev_priv(dev); + u32 val; + + priv->speed = speed; + priv->duplex = duplex; + + switch (priv->phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + if (speed == SPEED_1000) + val = RGMII_SPEED_1000; + else if (speed == SPEED_100) + val = RGMII_SPEED_100; + else + val = RGMII_SPEED_10; + break; + case PHY_INTERFACE_MODE_MII: + if (speed == SPEED_100) + val = MII_SPEED_100; + else + val = MII_SPEED_10; + break; + default: + netdev_warn(dev, "not supported mode\n"); + val = MII_SPEED_10; + break; + } + + if (duplex) + val |= GMAC_FULL_DUPLEX; + writel_relaxed(val, priv->ctrl_base); + + writel_relaxed(BIT_MODE_CHANGE_EN, priv->base + MODE_CHANGE_EN); + if (speed == SPEED_1000) + val = GMAC_SPEED_1000; + else if (speed == SPEED_100) + val = GMAC_SPEED_100; + else + val = GMAC_SPEED_10; + writel_relaxed(val, priv->base + PORT_MODE); + writel_relaxed(0, priv->base + MODE_CHANGE_EN); + writel_relaxed(duplex, priv->base + MAC_DUPLEX_HALF_CTRL); +} + +static void hix5hd2_set_desc_depth(struct hix5hd2_priv *priv, int rx, int tx) +{ + writel_relaxed(BITS_RX_FQ_DEPTH_EN, priv->base + RX_FQ_REG_EN); + writel_relaxed(rx << 3, priv->base + RX_FQ_DEPTH); + writel_relaxed(0, priv->base + RX_FQ_REG_EN); + + writel_relaxed(BITS_RX_BQ_DEPTH_EN, priv->base + RX_BQ_REG_EN); + writel_relaxed(rx << 3, priv->base + RX_BQ_DEPTH); + writel_relaxed(0, priv->base + RX_BQ_REG_EN); + + writel_relaxed(BITS_TX_BQ_DEPTH_EN, priv->base + TX_BQ_REG_EN); + writel_relaxed(tx << 3, priv->base + TX_BQ_DEPTH); + writel_relaxed(0, priv->base + TX_BQ_REG_EN); + + writel_relaxed(BITS_TX_RQ_DEPTH_EN, priv->base + TX_RQ_REG_EN); + writel_relaxed(tx << 3, priv->base + TX_RQ_DEPTH); + writel_relaxed(0, priv->base + TX_RQ_REG_EN); +} + +static void hix5hd2_set_rx_fq(struct hix5hd2_priv *priv, dma_addr_t phy_addr) +{ + writel_relaxed(BITS_RX_FQ_START_ADDR_EN, priv->base + RX_FQ_REG_EN); + writel_relaxed(phy_addr, priv->base + RX_FQ_START_ADDR); + writel_relaxed(0, priv->base + RX_FQ_REG_EN); +} + +static void hix5hd2_set_rx_bq(struct hix5hd2_priv *priv, dma_addr_t phy_addr) +{ + writel_relaxed(BITS_RX_BQ_START_ADDR_EN, priv->base + RX_BQ_REG_EN); + writel_relaxed(phy_addr, priv->base + RX_BQ_START_ADDR); + writel_relaxed(0, priv->base + RX_BQ_REG_EN); +} + +static void hix5hd2_set_tx_bq(struct hix5hd2_priv *priv, dma_addr_t phy_addr) +{ + writel_relaxed(BITS_TX_BQ_START_ADDR_EN, priv->base + TX_BQ_REG_EN); + writel_relaxed(phy_addr, priv->base + TX_BQ_START_ADDR); + writel_relaxed(0, priv->base + TX_BQ_REG_EN); +} + +static void hix5hd2_set_tx_rq(struct hix5hd2_priv *priv, dma_addr_t phy_addr) +{ + writel_relaxed(BITS_TX_RQ_START_ADDR_EN, priv->base + TX_RQ_REG_EN); + writel_relaxed(phy_addr, priv->base + TX_RQ_START_ADDR); + writel_relaxed(0, priv->base + TX_RQ_REG_EN); +} + +static void hix5hd2_set_desc_addr(struct hix5hd2_priv *priv) +{ + hix5hd2_set_rx_fq(priv, priv->rx_fq.phys_addr); + hix5hd2_set_rx_bq(priv, priv->rx_bq.phys_addr); + hix5hd2_set_tx_rq(priv, priv->tx_rq.phys_addr); + hix5hd2_set_tx_bq(priv, priv->tx_bq.phys_addr); +} + +static void hix5hd2_hw_init(struct hix5hd2_priv *priv) +{ + u32 val; + + /* disable and clear all interrupts */ + writel_relaxed(0, priv->base + ENA_PMU_INT); + writel_relaxed(~0, priv->base + RAW_PMU_INT); + + writel_relaxed(BIT_CRC_ERR_PASS, priv->base + REC_FILT_CONTROL); + writel_relaxed(MAC_MAX_FRAME_SIZE, priv->base + CONTROL_WORD); + writel_relaxed(0, priv->base + COL_SLOT_TIME); + + val = RX_BQ_INT_THRESHOLD | TX_RQ_INT_THRESHOLD << QUEUE_TX_BQ_SHIFT; + writel_relaxed(val, priv->base + IN_QUEUE_TH); + + writel_relaxed(RX_BQ_IN_TIMEOUT, priv->base + RX_BQ_IN_TIMEOUT_TH); + writel_relaxed(TX_RQ_IN_TIMEOUT, priv->base + TX_RQ_IN_TIMEOUT_TH); + + hix5hd2_set_desc_depth(priv, RX_DESC_NUM, TX_DESC_NUM); + hix5hd2_set_desc_addr(priv); +} + +static void hix5hd2_irq_enable(struct hix5hd2_priv *priv) +{ + writel_relaxed(DEF_INT_MASK, priv->base + ENA_PMU_INT); +} + +static void hix5hd2_irq_disable(struct hix5hd2_priv *priv) +{ + writel_relaxed(0, priv->base + ENA_PMU_INT); +} + +static void hix5hd2_port_enable(struct hix5hd2_priv *priv) +{ + writel_relaxed(0xf, priv->base + DESC_WR_RD_ENA); + writel_relaxed(BITS_RX_EN | BITS_TX_EN, priv->base + PORT_EN); +} + +static void hix5hd2_port_disable(struct hix5hd2_priv *priv) +{ + writel_relaxed(~(BITS_RX_EN | BITS_TX_EN), priv->base + PORT_EN); + writel_relaxed(0, priv->base + DESC_WR_RD_ENA); +} + +static void hix5hd2_hw_set_mac_addr(struct net_device *dev) +{ + struct hix5hd2_priv *priv = netdev_priv(dev); + unsigned char *mac = dev->dev_addr; + u32 val; + + val = mac[1] | (mac[0] << 8); + writel_relaxed(val, priv->base + STATION_ADDR_HIGH); + + val = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24); + writel_relaxed(val, priv->base + STATION_ADDR_LOW); +} + +static int hix5hd2_net_set_mac_address(struct net_device *dev, void *p) +{ + int ret; + + ret = eth_mac_addr(dev, p); + if (!ret) + hix5hd2_hw_set_mac_addr(dev); + + return ret; +} + +static void hix5hd2_adjust_link(struct net_device *dev) +{ + struct hix5hd2_priv *priv = netdev_priv(dev); + struct phy_device *phy = priv->phy; + + if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) { + hix5hd2_config_port(dev, phy->speed, phy->duplex); + phy_print_status(phy); + } +} + +static void hix5hd2_rx_refill(struct hix5hd2_priv *priv) +{ + struct hix5hd2_desc *desc; + struct sk_buff *skb; + u32 start, end, num, pos, i; + u32 len = MAC_MAX_FRAME_SIZE; + dma_addr_t addr; + + /* software write pointer */ + start = dma_cnt(readl_relaxed(priv->base + RX_FQ_WR_ADDR)); + /* logic read pointer */ + end = dma_cnt(readl_relaxed(priv->base + RX_FQ_RD_ADDR)); + num = CIRC_SPACE(start, end, RX_DESC_NUM); + + for (i = 0, pos = start; i < num; i++) { + if (priv->rx_skb[pos]) { + break; + } else { + skb = netdev_alloc_skb_ip_align(priv->netdev, len); + if (unlikely(skb == NULL)) + break; + } + + addr = dma_map_single(priv->dev, skb->data, len, DMA_FROM_DEVICE); + if (dma_mapping_error(priv->dev, addr)) { + dev_kfree_skb_any(skb); + break; + } + + desc = priv->rx_fq.desc + pos; + desc->buff_addr = cpu_to_le32(addr); + priv->rx_skb[pos] = skb; + desc->cmd = cpu_to_le32(DESC_VLD_FREE | + (len - 1) << DESC_BUFF_LEN_OFF); + pos = dma_ring_incr(pos, RX_DESC_NUM); + } + + /* ensure desc updated */ + wmb(); + + if (pos != start) + writel_relaxed(dma_byte(pos), priv->base + RX_FQ_WR_ADDR); +} + +static int hix5hd2_rx(struct net_device *dev, int limit) +{ + struct hix5hd2_priv *priv = netdev_priv(dev); + struct sk_buff *skb; + struct hix5hd2_desc *desc; + dma_addr_t addr; + u32 start, end, num, pos, i, len; + + /* software read pointer */ + start = dma_cnt(readl_relaxed(priv->base + RX_BQ_RD_ADDR)); + /* logic write pointer */ + end = dma_cnt(readl_relaxed(priv->base + RX_BQ_WR_ADDR)); + num = CIRC_CNT(end, start, RX_DESC_NUM); + if (num > limit) + num = limit; + + /* ensure get updated desc */ + rmb(); + for (i = 0, pos = start; i < num; i++) { + skb = priv->rx_skb[pos]; + if (unlikely(!skb)) { + netdev_err(dev, "inconsistent rx_skb\n"); + break; + } + priv->rx_skb[pos] = NULL; + + desc = priv->rx_bq.desc + pos; + len = (le32_to_cpu(desc->cmd) >> DESC_DATA_LEN_OFF) & + DESC_DATA_MASK; + addr = le32_to_cpu(desc->buff_addr); + dma_unmap_single(priv->dev, addr, MAC_MAX_FRAME_SIZE, + DMA_FROM_DEVICE); + + skb_put(skb, len); + if (skb->len > MAC_MAX_FRAME_SIZE) { + netdev_err(dev, "rcv len err, len = %d\n", skb->len); + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + dev_kfree_skb_any(skb); + goto next; + } + + skb->protocol = eth_type_trans(skb, dev); + napi_gro_receive(&priv->napi, skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + dev->last_rx = jiffies; +next: + pos = dma_ring_incr(pos, RX_DESC_NUM); + } + + if (pos != start) + writel_relaxed(dma_byte(pos), priv->base + RX_BQ_RD_ADDR); + + hix5hd2_rx_refill(priv); + + return num; +} + +static void hix5hd2_xmit_reclaim(struct net_device *dev) +{ + struct sk_buff *skb; + struct hix5hd2_desc *desc; + struct hix5hd2_priv *priv = netdev_priv(dev); + unsigned int bytes_compl = 0, pkts_compl = 0; + u32 start, end, num, pos, i; + dma_addr_t addr; + + netif_tx_lock(dev); + + /* software read */ + start = dma_cnt(readl_relaxed(priv->base + TX_RQ_RD_ADDR)); + /* logic write */ + end = dma_cnt(readl_relaxed(priv->base + TX_RQ_WR_ADDR)); + num = CIRC_CNT(end, start, TX_DESC_NUM); + + for (i = 0, pos = start; i < num; i++) { + skb = priv->tx_skb[pos]; + if (unlikely(!skb)) { + netdev_err(dev, "inconsistent tx_skb\n"); + break; + } + + pkts_compl++; + bytes_compl += skb->len; + desc = priv->tx_rq.desc + pos; + addr = le32_to_cpu(desc->buff_addr); + dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE); + priv->tx_skb[pos] = NULL; + dev_consume_skb_any(skb); + pos = dma_ring_incr(pos, TX_DESC_NUM); + } + + if (pos != start) + writel_relaxed(dma_byte(pos), priv->base + TX_RQ_RD_ADDR); + + netif_tx_unlock(dev); + + if (pkts_compl || bytes_compl) + netdev_completed_queue(dev, pkts_compl, bytes_compl); + + if (unlikely(netif_queue_stopped(priv->netdev)) && pkts_compl) + netif_wake_queue(priv->netdev); +} + +static int hix5hd2_poll(struct napi_struct *napi, int budget) +{ + struct hix5hd2_priv *priv = container_of(napi, + struct hix5hd2_priv, napi); + struct net_device *dev = priv->netdev; + int work_done = 0, task = budget; + int ints, num; + + do { + hix5hd2_xmit_reclaim(dev); + num = hix5hd2_rx(dev, task); + work_done += num; + task -= num; + if ((work_done >= budget) || (num == 0)) + break; + + ints = readl_relaxed(priv->base + RAW_PMU_INT); + writel_relaxed(ints, priv->base + RAW_PMU_INT); + } while (ints & DEF_INT_MASK); + + if (work_done < budget) { + napi_complete(napi); + hix5hd2_irq_enable(priv); + } + + return work_done; +} + +static irqreturn_t hix5hd2_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct hix5hd2_priv *priv = netdev_priv(dev); + int ints = readl_relaxed(priv->base + RAW_PMU_INT); + + writel_relaxed(ints, priv->base + RAW_PMU_INT); + if (likely(ints & DEF_INT_MASK)) { + hix5hd2_irq_disable(priv); + napi_schedule(&priv->napi); + } + + return IRQ_HANDLED; +} + +static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct hix5hd2_priv *priv = netdev_priv(dev); + struct hix5hd2_desc *desc; + dma_addr_t addr; + u32 pos; + + /* software write pointer */ + pos = dma_cnt(readl_relaxed(priv->base + TX_BQ_WR_ADDR)); + if (unlikely(priv->tx_skb[pos])) { + dev->stats.tx_dropped++; + dev->stats.tx_fifo_errors++; + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + + addr = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->dev, addr)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + desc = priv->tx_bq.desc + pos; + desc->buff_addr = cpu_to_le32(addr); + priv->tx_skb[pos] = skb; + desc->cmd = cpu_to_le32(DESC_VLD_BUSY | DESC_FL_FULL | + (skb->len & DESC_DATA_MASK) << DESC_DATA_LEN_OFF | + (skb->len & DESC_DATA_MASK) << DESC_BUFF_LEN_OFF); + + /* ensure desc updated */ + wmb(); + + pos = dma_ring_incr(pos, TX_DESC_NUM); + writel_relaxed(dma_byte(pos), priv->base + TX_BQ_WR_ADDR); + + dev->trans_start = jiffies; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + netdev_sent_queue(dev, skb->len); + + return NETDEV_TX_OK; +} + +static void hix5hd2_free_dma_desc_rings(struct hix5hd2_priv *priv) +{ + struct hix5hd2_desc *desc; + dma_addr_t addr; + int i; + + for (i = 0; i < RX_DESC_NUM; i++) { + struct sk_buff *skb = priv->rx_skb[i]; + if (skb == NULL) + continue; + + desc = priv->rx_fq.desc + i; + addr = le32_to_cpu(desc->buff_addr); + dma_unmap_single(priv->dev, addr, + MAC_MAX_FRAME_SIZE, DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + priv->rx_skb[i] = NULL; + } + + for (i = 0; i < TX_DESC_NUM; i++) { + struct sk_buff *skb = priv->tx_skb[i]; + if (skb == NULL) + continue; + + desc = priv->tx_rq.desc + i; + addr = le32_to_cpu(desc->buff_addr); + dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + priv->tx_skb[i] = NULL; + } +} + +static int hix5hd2_net_open(struct net_device *dev) +{ + struct hix5hd2_priv *priv = netdev_priv(dev); + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret < 0) { + netdev_err(dev, "failed to enable clk %d\n", ret); + return ret; + } + + priv->phy = of_phy_connect(dev, priv->phy_node, + &hix5hd2_adjust_link, 0, priv->phy_mode); + if (!priv->phy) + return -ENODEV; + + phy_start(priv->phy); + hix5hd2_hw_init(priv); + hix5hd2_rx_refill(priv); + + netdev_reset_queue(dev); + netif_start_queue(dev); + napi_enable(&priv->napi); + + hix5hd2_port_enable(priv); + hix5hd2_irq_enable(priv); + + return 0; +} + +static int hix5hd2_net_close(struct net_device *dev) +{ + struct hix5hd2_priv *priv = netdev_priv(dev); + + hix5hd2_port_disable(priv); + hix5hd2_irq_disable(priv); + napi_disable(&priv->napi); + netif_stop_queue(dev); + hix5hd2_free_dma_desc_rings(priv); + + if (priv->phy) { + phy_stop(priv->phy); + phy_disconnect(priv->phy); + } + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static void hix5hd2_tx_timeout_task(struct work_struct *work) +{ + struct hix5hd2_priv *priv; + + priv = container_of(work, struct hix5hd2_priv, tx_timeout_task); + hix5hd2_net_close(priv->netdev); + hix5hd2_net_open(priv->netdev); +} + +static void hix5hd2_net_timeout(struct net_device *dev) +{ + struct hix5hd2_priv *priv = netdev_priv(dev); + + schedule_work(&priv->tx_timeout_task); +} + +static const struct net_device_ops hix5hd2_netdev_ops = { + .ndo_open = hix5hd2_net_open, + .ndo_stop = hix5hd2_net_close, + .ndo_start_xmit = hix5hd2_net_xmit, + .ndo_tx_timeout = hix5hd2_net_timeout, + .ndo_set_mac_address = hix5hd2_net_set_mac_address, +}; + +static int hix5hd2_get_settings(struct net_device *net_dev, + struct ethtool_cmd *cmd) +{ + struct hix5hd2_priv *priv = netdev_priv(net_dev); + + if (!priv->phy) + return -ENODEV; + + return phy_ethtool_gset(priv->phy, cmd); +} + +static int hix5hd2_set_settings(struct net_device *net_dev, + struct ethtool_cmd *cmd) +{ + struct hix5hd2_priv *priv = netdev_priv(net_dev); + + if (!priv->phy) + return -ENODEV; + + return phy_ethtool_sset(priv->phy, cmd); +} + +static struct ethtool_ops hix5hd2_ethtools_ops = { + .get_link = ethtool_op_get_link, + .get_settings = hix5hd2_get_settings, + .set_settings = hix5hd2_set_settings, +}; + +static int hix5hd2_mdio_wait_ready(struct mii_bus *bus) +{ + struct hix5hd2_priv *priv = bus->priv; + void __iomem *base = priv->base; + int i, timeout = 10000; + + for (i = 0; readl_relaxed(base + MDIO_SINGLE_CMD) & MDIO_START; i++) { + if (i == timeout) + return -ETIMEDOUT; + usleep_range(10, 20); + } + + return 0; +} + +static int hix5hd2_mdio_read(struct mii_bus *bus, int phy, int reg) +{ + struct hix5hd2_priv *priv = bus->priv; + void __iomem *base = priv->base; + int val, ret; + + ret = hix5hd2_mdio_wait_ready(bus); + if (ret < 0) + goto out; + + writel_relaxed(MDIO_READ | phy << 8 | reg, base + MDIO_SINGLE_CMD); + ret = hix5hd2_mdio_wait_ready(bus); + if (ret < 0) + goto out; + + val = readl_relaxed(base + MDIO_RDATA_STATUS); + if (val & MDIO_R_VALID) { + dev_err(bus->parent, "SMI bus read not valid\n"); + ret = -ENODEV; + goto out; + } + + val = readl_relaxed(priv->base + MDIO_SINGLE_DATA); + ret = (val >> 16) & 0xFFFF; +out: + return ret; +} + +static int hix5hd2_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) +{ + struct hix5hd2_priv *priv = bus->priv; + void __iomem *base = priv->base; + int ret; + + ret = hix5hd2_mdio_wait_ready(bus); + if (ret < 0) + goto out; + + writel_relaxed(val, base + MDIO_SINGLE_DATA); + writel_relaxed(MDIO_WRITE | phy << 8 | reg, base + MDIO_SINGLE_CMD); + ret = hix5hd2_mdio_wait_ready(bus); +out: + return ret; +} + +static void hix5hd2_destroy_hw_desc_queue(struct hix5hd2_priv *priv) +{ + int i; + + for (i = 0; i < QUEUE_NUMS; i++) { + if (priv->pool[i].desc) { + dma_free_coherent(priv->dev, priv->pool[i].size, + priv->pool[i].desc, + priv->pool[i].phys_addr); + priv->pool[i].desc = NULL; + } + } +} + +static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv) +{ + struct device *dev = priv->dev; + struct hix5hd2_desc *virt_addr; + dma_addr_t phys_addr; + int size, i; + + priv->rx_fq.count = RX_DESC_NUM; + priv->rx_bq.count = RX_DESC_NUM; + priv->tx_bq.count = TX_DESC_NUM; + priv->tx_rq.count = TX_DESC_NUM; + + for (i = 0; i < QUEUE_NUMS; i++) { + size = priv->pool[i].count * sizeof(struct hix5hd2_desc); + virt_addr = dma_alloc_coherent(dev, size, &phys_addr, + GFP_KERNEL); + if (virt_addr == NULL) + goto error_free_pool; + + memset(virt_addr, 0, size); + priv->pool[i].size = size; + priv->pool[i].desc = virt_addr; + priv->pool[i].phys_addr = phys_addr; + } + return 0; + +error_free_pool: + hix5hd2_destroy_hw_desc_queue(priv); + + return -ENOMEM; +} + +static int hix5hd2_dev_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct net_device *ndev; + struct hix5hd2_priv *priv; + struct resource *res; + struct mii_bus *bus; + const char *mac_addr; + int ret; + + ndev = alloc_etherdev(sizeof(struct hix5hd2_priv)); + if (!ndev) + return -ENOMEM; + + platform_set_drvdata(pdev, ndev); + + priv = netdev_priv(ndev); + priv->dev = dev; + priv->netdev = ndev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->base)) { + ret = PTR_ERR(priv->base); + goto out_free_netdev; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->ctrl_base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->ctrl_base)) { + ret = PTR_ERR(priv->ctrl_base); + goto out_free_netdev; + } + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) { + netdev_err(ndev, "failed to get clk\n"); + ret = -ENODEV; + goto out_free_netdev; + } + + ret = clk_prepare_enable(priv->clk); + if (ret < 0) { + netdev_err(ndev, "failed to enable clk %d\n", ret); + goto out_free_netdev; + } + + bus = mdiobus_alloc(); + if (bus == NULL) { + ret = -ENOMEM; + goto out_free_netdev; + } + + bus->priv = priv; + bus->name = "hix5hd2_mii_bus"; + bus->read = hix5hd2_mdio_read; + bus->write = hix5hd2_mdio_write; + bus->parent = &pdev->dev; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev)); + priv->bus = bus; + + ret = of_mdiobus_register(bus, node); + if (ret) + goto err_free_mdio; + + priv->phy_mode = of_get_phy_mode(node); + if (priv->phy_mode < 0) { + netdev_err(ndev, "not find phy-mode\n"); + ret = -EINVAL; + goto err_mdiobus; + } + + priv->phy_node = of_parse_phandle(node, "phy-handle", 0); + if (!priv->phy_node) { + netdev_err(ndev, "not find phy-handle\n"); + ret = -EINVAL; + goto err_mdiobus; + } + + ndev->irq = platform_get_irq(pdev, 0); + if (ndev->irq <= 0) { + netdev_err(ndev, "No irq resource\n"); + ret = -EINVAL; + goto out_phy_node; + } + + ret = devm_request_irq(dev, ndev->irq, hix5hd2_interrupt, + 0, pdev->name, ndev); + if (ret) { + netdev_err(ndev, "devm_request_irq failed\n"); + goto out_phy_node; + } + + mac_addr = of_get_mac_address(node); + if (mac_addr) + ether_addr_copy(ndev->dev_addr, mac_addr); + if (!is_valid_ether_addr(ndev->dev_addr)) { + eth_hw_addr_random(ndev); + netdev_warn(ndev, "using random MAC address %pM\n", + ndev->dev_addr); + } + + INIT_WORK(&priv->tx_timeout_task, hix5hd2_tx_timeout_task); + ndev->watchdog_timeo = 6 * HZ; + ndev->priv_flags |= IFF_UNICAST_FLT; + ndev->netdev_ops = &hix5hd2_netdev_ops; + ndev->ethtool_ops = &hix5hd2_ethtools_ops; + SET_NETDEV_DEV(ndev, dev); + + ret = hix5hd2_init_hw_desc_queue(priv); + if (ret) + goto out_phy_node; + + netif_napi_add(ndev, &priv->napi, hix5hd2_poll, NAPI_POLL_WEIGHT); + ret = register_netdev(priv->netdev); + if (ret) { + netdev_err(ndev, "register_netdev failed!"); + goto out_destroy_queue; + } + + clk_disable_unprepare(priv->clk); + + return ret; + +out_destroy_queue: + netif_napi_del(&priv->napi); + hix5hd2_destroy_hw_desc_queue(priv); +out_phy_node: + of_node_put(priv->phy_node); +err_mdiobus: + mdiobus_unregister(bus); +err_free_mdio: + mdiobus_free(bus); +out_free_netdev: + free_netdev(ndev); + + return ret; +} + +static int hix5hd2_dev_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct hix5hd2_priv *priv = netdev_priv(ndev); + + netif_napi_del(&priv->napi); + unregister_netdev(ndev); + mdiobus_unregister(priv->bus); + mdiobus_free(priv->bus); + + hix5hd2_destroy_hw_desc_queue(priv); + of_node_put(priv->phy_node); + cancel_work_sync(&priv->tx_timeout_task); + free_netdev(ndev); + + return 0; +} + +static const struct of_device_id hix5hd2_of_match[] = { + {.compatible = "hisilicon,hix5hd2-gmac",}, + {}, +}; + +MODULE_DEVICE_TABLE(of, hix5hd2_of_match); + +static struct platform_driver hix5hd2_dev_driver = { + .driver = { + .name = "hix5hd2-gmac", + .of_match_table = hix5hd2_of_match, + }, + .probe = hix5hd2_dev_probe, + .remove = hix5hd2_dev_remove, +}; + +module_platform_driver(hix5hd2_dev_driver); + +MODULE_DESCRIPTION("HISILICON HIX5HD2 Ethernet driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:hix5hd2-gmac"); diff --git a/drivers/net/ethernet/hp/Kconfig b/drivers/net/ethernet/hp/Kconfig new file mode 100644 index 000000000..a0b8ece1e --- /dev/null +++ b/drivers/net/ethernet/hp/Kconfig @@ -0,0 +1,32 @@ +# +# HP network device configuration +# + +config NET_VENDOR_HP + bool "HP devices" + default y + depends on ISA || EISA || PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about HP cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_HP + +config HP100 + tristate "HP 10/100VG PCLAN (ISA, EISA, PCI) support" + depends on (ISA || EISA || PCI) + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called hp100. + +endif # NET_VENDOR_HP diff --git a/drivers/net/ethernet/hp/Makefile b/drivers/net/ethernet/hp/Makefile new file mode 100644 index 000000000..20b6918b5 --- /dev/null +++ b/drivers/net/ethernet/hp/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the HP network device drivers. +# + +obj-$(CONFIG_HP100) += hp100.o diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c new file mode 100644 index 000000000..ae6e30d39 --- /dev/null +++ b/drivers/net/ethernet/hp/hp100.c @@ -0,0 +1,3069 @@ +/* +** hp100.c +** HP CASCADE Architecture Driver for 100VG-AnyLan Network Adapters +** +** $Id: hp100.c,v 1.58 2001/09/24 18:03:01 perex Exp perex $ +** +** Based on the HP100 driver written by Jaroslav Kysela +** Extended for new busmaster capable chipsets by +** Siegfried "Frieder" Loeffler (dg1sek) +** +** Maintained by: Jaroslav Kysela +** +** This driver has only been tested with +** -- HP J2585B 10/100 Mbit/s PCI Busmaster +** -- HP J2585A 10/100 Mbit/s PCI +** -- HP J2970A 10 Mbit/s PCI Combo 10base-T/BNC +** -- HP J2973A 10 Mbit/s PCI 10base-T +** -- HP J2573 10/100 ISA +** -- Compex ReadyLink ENET100-VG4 10/100 Mbit/s PCI / EISA +** -- Compex FreedomLine 100/VG 10/100 Mbit/s ISA / EISA / PCI +** +** but it should also work with the other CASCADE based adapters. +** +** TODO: +** - J2573 seems to hang sometimes when in shared memory mode. +** - Mode for Priority TX +** - Check PCI registers, performance might be improved? +** - To reduce interrupt load in busmaster, one could switch off +** the interrupts that are used to refill the queues whenever the +** queues are filled up to more than a certain threshold. +** - some updates for EISA version of card +** +** +** This code is free software; you can redistribute it and/or modify +** it under the terms of the GNU General Public License as published by +** the Free Software Foundation; either version 2 of the License, or +** (at your option) any later version. +** +** This code is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +** GNU General Public License for more details. +** +** You should have received a copy of the GNU General Public License +** along with this program; if not, write to the Free Software +** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +** +** 1.57c -> 1.58 +** - used indent to change coding-style +** - added KTI DP-200 EISA ID +** - ioremap is also used for low (<1MB) memory (multi-architecture support) +** +** 1.57b -> 1.57c - Arnaldo Carvalho de Melo +** - release resources on failure in init_module +** +** 1.57 -> 1.57b - Jean II +** - fix spinlocks, SMP is now working ! +** +** 1.56 -> 1.57 +** - updates for new PCI interface for 2.1 kernels +** +** 1.55 -> 1.56 +** - removed printk in misc. interrupt and update statistics to allow +** monitoring of card status +** - timing changes in xmit routines, relogin to 100VG hub added when +** driver does reset +** - included fix for Compex FreedomLine PCI adapter +** +** 1.54 -> 1.55 +** - fixed bad initialization in init_module +** - added Compex FreedomLine adapter +** - some fixes in card initialization +** +** 1.53 -> 1.54 +** - added hardware multicast filter support (doesn't work) +** - little changes in hp100_sense_lan routine +** - added support for Coax and AUI (J2970) +** - fix for multiple cards and hp100_mode parameter (insmod) +** - fix for shared IRQ +** +** 1.52 -> 1.53 +** - fixed bug in multicast support +** +*/ + +#define HP100_DEFAULT_PRIORITY_TX 0 + +#undef HP100_DEBUG +#undef HP100_DEBUG_B /* Trace */ +#undef HP100_DEBUG_BM /* Debug busmaster code (PDL stuff) */ + +#undef HP100_DEBUG_TRAINING /* Debug login-to-hub procedure */ +#undef HP100_DEBUG_TX +#undef HP100_DEBUG_IRQ +#undef HP100_DEBUG_RX + +#undef HP100_MULTICAST_FILTER /* Need to be debugged... */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "hp100.h" + +/* + * defines + */ + +#define HP100_BUS_ISA 0 +#define HP100_BUS_EISA 1 +#define HP100_BUS_PCI 2 + +#define HP100_REGION_SIZE 0x20 /* for ioports */ +#define HP100_SIG_LEN 8 /* same as EISA_SIG_LEN */ + +#define HP100_MAX_PACKET_SIZE (1536+4) +#define HP100_MIN_PACKET_SIZE 60 + +#ifndef HP100_DEFAULT_RX_RATIO +/* default - 75% onboard memory on the card are used for RX packets */ +#define HP100_DEFAULT_RX_RATIO 75 +#endif + +#ifndef HP100_DEFAULT_PRIORITY_TX +/* default - don't enable transmit outgoing packets as priority */ +#define HP100_DEFAULT_PRIORITY_TX 0 +#endif + +/* + * structures + */ + +struct hp100_private { + spinlock_t lock; + char id[HP100_SIG_LEN]; + u_short chip; + u_short soft_model; + u_int memory_size; + u_int virt_memory_size; + u_short rx_ratio; /* 1 - 99 */ + u_short priority_tx; /* != 0 - priority tx */ + u_short mode; /* PIO, Shared Mem or Busmaster */ + u_char bus; + struct pci_dev *pci_dev; + short mem_mapped; /* memory mapped access */ + void __iomem *mem_ptr_virt; /* virtual memory mapped area, maybe NULL */ + unsigned long mem_ptr_phys; /* physical memory mapped area */ + short lan_type; /* 10Mb/s, 100Mb/s or -1 (error) */ + int hub_status; /* was login to hub successful? */ + u_char mac1_mode; + u_char mac2_mode; + u_char hash_bytes[8]; + + /* Rings for busmaster mode: */ + hp100_ring_t *rxrhead; /* Head (oldest) index into rxring */ + hp100_ring_t *rxrtail; /* Tail (newest) index into rxring */ + hp100_ring_t *txrhead; /* Head (oldest) index into txring */ + hp100_ring_t *txrtail; /* Tail (newest) index into txring */ + + hp100_ring_t rxring[MAX_RX_PDL]; + hp100_ring_t txring[MAX_TX_PDL]; + + u_int *page_vaddr_algn; /* Aligned virtual address of allocated page */ + u_long whatever_offset; /* Offset to bus/phys/dma address */ + int rxrcommit; /* # Rx PDLs committed to adapter */ + int txrcommit; /* # Tx PDLs committed to adapter */ +}; + +/* + * variables + */ +#ifdef CONFIG_ISA +static const char *hp100_isa_tbl[] = { + "HWPF150", /* HP J2573 rev A */ + "HWP1950", /* HP J2573 */ +}; +#endif + +#ifdef CONFIG_EISA +static struct eisa_device_id hp100_eisa_tbl[] = { + { "HWPF180" }, /* HP J2577 rev A */ + { "HWP1920" }, /* HP 27248B */ + { "HWP1940" }, /* HP J2577 */ + { "HWP1990" }, /* HP J2577 */ + { "CPX0301" }, /* ReadyLink ENET100-VG4 */ + { "CPX0401" }, /* FreedomLine 100/VG */ + { "" } /* Mandatory final entry ! */ +}; +MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl); +#endif + +#ifdef CONFIG_PCI +static const struct pci_device_id hp100_pci_tbl[] = { + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2970A, PCI_ANY_ID, PCI_ANY_ID,}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2973A, PCI_ANY_ID, PCI_ANY_ID,}, + {PCI_VENDOR_ID_COMPEX, PCI_DEVICE_ID_COMPEX_ENET100VG4, PCI_ANY_ID, PCI_ANY_ID,}, + {PCI_VENDOR_ID_COMPEX2, PCI_DEVICE_ID_COMPEX2_100VG, PCI_ANY_ID, PCI_ANY_ID,}, +/* {PCI_VENDOR_ID_KTI, PCI_DEVICE_ID_KTI_DP200, PCI_ANY_ID, PCI_ANY_ID }, */ + {} /* Terminating entry */ +}; +MODULE_DEVICE_TABLE(pci, hp100_pci_tbl); +#endif + +static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO; +static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX; +static int hp100_mode = 1; + +module_param(hp100_rx_ratio, int, 0); +module_param(hp100_priority_tx, int, 0); +module_param(hp100_mode, int, 0); + +/* + * prototypes + */ + +static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus, + struct pci_dev *pci_dev); + + +static int hp100_open(struct net_device *dev); +static int hp100_close(struct net_device *dev); +static netdev_tx_t hp100_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb, + struct net_device *dev); +static void hp100_rx(struct net_device *dev); +static struct net_device_stats *hp100_get_stats(struct net_device *dev); +static void hp100_misc_interrupt(struct net_device *dev); +static void hp100_update_stats(struct net_device *dev); +static void hp100_clear_stats(struct hp100_private *lp, int ioaddr); +static void hp100_set_multicast_list(struct net_device *dev); +static irqreturn_t hp100_interrupt(int irq, void *dev_id); +static void hp100_start_interface(struct net_device *dev); +static void hp100_stop_interface(struct net_device *dev); +static void hp100_load_eeprom(struct net_device *dev, u_short ioaddr); +static int hp100_sense_lan(struct net_device *dev); +static int hp100_login_to_vg_hub(struct net_device *dev, + u_short force_relogin); +static int hp100_down_vg_link(struct net_device *dev); +static void hp100_cascade_reset(struct net_device *dev, u_short enable); +static void hp100_BM_shutdown(struct net_device *dev); +static void hp100_mmuinit(struct net_device *dev); +static void hp100_init_pdls(struct net_device *dev); +static int hp100_init_rxpdl(struct net_device *dev, + register hp100_ring_t * ringptr, + register u_int * pdlptr); +static int hp100_init_txpdl(struct net_device *dev, + register hp100_ring_t * ringptr, + register u_int * pdlptr); +static void hp100_rxfill(struct net_device *dev); +static void hp100_hwinit(struct net_device *dev); +static void hp100_clean_txring(struct net_device *dev); +#ifdef HP100_DEBUG +static void hp100_RegisterDump(struct net_device *dev); +#endif + +/* Conversion to new PCI API : + * Convert an address in a kernel buffer to a bus/phys/dma address. + * This work *only* for memory fragments part of lp->page_vaddr, + * because it was properly DMA allocated via pci_alloc_consistent(), + * so we just need to "retrieve" the original mapping to bus/phys/dma + * address - Jean II */ +static inline dma_addr_t virt_to_whatever(struct net_device *dev, u32 * ptr) +{ + struct hp100_private *lp = netdev_priv(dev); + return ((u_long) ptr) + lp->whatever_offset; +} + +static inline u_int pdl_map_data(struct hp100_private *lp, void *data) +{ + return pci_map_single(lp->pci_dev, data, + MAX_ETHER_SIZE, PCI_DMA_FROMDEVICE); +} + +/* TODO: This function should not really be needed in a good design... */ +static void wait(void) +{ + mdelay(1); +} + +/* + * probe functions + * These functions should - if possible - avoid doing write operations + * since this could cause problems when the card is not installed. + */ + +/* + * Read board id and convert to string. + * Effectively same code as decode_eisa_sig + */ +static const char *hp100_read_id(int ioaddr) +{ + int i; + static char str[HP100_SIG_LEN]; + unsigned char sig[4], sum; + unsigned short rev; + + hp100_page(ID_MAC_ADDR); + sum = 0; + for (i = 0; i < 4; i++) { + sig[i] = hp100_inb(BOARD_ID + i); + sum += sig[i]; + } + + sum += hp100_inb(BOARD_ID + i); + if (sum != 0xff) + return NULL; /* bad checksum */ + + str[0] = ((sig[0] >> 2) & 0x1f) + ('A' - 1); + str[1] = (((sig[0] & 3) << 3) | (sig[1] >> 5)) + ('A' - 1); + str[2] = (sig[1] & 0x1f) + ('A' - 1); + rev = (sig[2] << 8) | sig[3]; + sprintf(str + 3, "%04X", rev); + + return str; +} + +#ifdef CONFIG_ISA +static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr) +{ + const char *sig; + int i; + + if (!request_region(ioaddr, HP100_REGION_SIZE, "hp100")) + goto err; + + if (hp100_inw(HW_ID) != HP100_HW_ID_CASCADE) { + release_region(ioaddr, HP100_REGION_SIZE); + goto err; + } + + sig = hp100_read_id(ioaddr); + release_region(ioaddr, HP100_REGION_SIZE); + + if (sig == NULL) + goto err; + + for (i = 0; i < ARRAY_SIZE(hp100_isa_tbl); i++) { + if (!strcmp(hp100_isa_tbl[i], sig)) + break; + + } + + if (i < ARRAY_SIZE(hp100_isa_tbl)) + return hp100_probe1(dev, ioaddr, HP100_BUS_ISA, NULL); + err: + return -ENODEV; + +} +/* + * Probe for ISA board. + * EISA and PCI are handled by device infrastructure. + */ + +static int __init hp100_isa_probe(struct net_device *dev, int addr) +{ + int err = -ENODEV; + + /* Probe for a specific ISA address */ + if (addr > 0xff && addr < 0x400) + err = hp100_isa_probe1(dev, addr); + + else if (addr != 0) + err = -ENXIO; + + else { + /* Probe all ISA possible port regions */ + for (addr = 0x100; addr < 0x400; addr += 0x20) { + err = hp100_isa_probe1(dev, addr); + if (!err) + break; + } + } + return err; +} +#endif /* CONFIG_ISA */ + +#if !defined(MODULE) && defined(CONFIG_ISA) +struct net_device * __init hp100_probe(int unit) +{ + struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private)); + int err; + + if (!dev) + return ERR_PTR(-ENODEV); + +#ifdef HP100_DEBUG_B + hp100_outw(0x4200, TRACE); + printk("hp100: %s: probe\n", dev->name); +#endif + + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + } + + err = hp100_isa_probe(dev, dev->base_addr); + if (err) + goto out; + + return dev; + out: + free_netdev(dev); + return ERR_PTR(err); +} +#endif /* !MODULE && CONFIG_ISA */ + +static const struct net_device_ops hp100_bm_netdev_ops = { + .ndo_open = hp100_open, + .ndo_stop = hp100_close, + .ndo_start_xmit = hp100_start_xmit_bm, + .ndo_get_stats = hp100_get_stats, + .ndo_set_rx_mode = hp100_set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static const struct net_device_ops hp100_netdev_ops = { + .ndo_open = hp100_open, + .ndo_stop = hp100_close, + .ndo_start_xmit = hp100_start_xmit, + .ndo_get_stats = hp100_get_stats, + .ndo_set_rx_mode = hp100_set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus, + struct pci_dev *pci_dev) +{ + int i; + int err = -ENODEV; + const char *eid; + u_int chip; + u_char uc; + u_int memory_size = 0, virt_memory_size = 0; + u_short local_mode, lsw; + short mem_mapped; + unsigned long mem_ptr_phys; + void __iomem *mem_ptr_virt; + struct hp100_private *lp; + +#ifdef HP100_DEBUG_B + hp100_outw(0x4201, TRACE); + printk("hp100: %s: probe1\n", dev->name); +#endif + + /* memory region for programmed i/o */ + if (!request_region(ioaddr, HP100_REGION_SIZE, "hp100")) + goto out1; + + if (hp100_inw(HW_ID) != HP100_HW_ID_CASCADE) + goto out2; + + chip = hp100_inw(PAGING) & HP100_CHIPID_MASK; +#ifdef HP100_DEBUG + if (chip == HP100_CHIPID_SHASTA) + printk("hp100: %s: Shasta Chip detected. (This is a pre 802.12 chip)\n", dev->name); + else if (chip == HP100_CHIPID_RAINIER) + printk("hp100: %s: Rainier Chip detected. (This is a pre 802.12 chip)\n", dev->name); + else if (chip == HP100_CHIPID_LASSEN) + printk("hp100: %s: Lassen Chip detected.\n", dev->name); + else + printk("hp100: %s: Warning: Unknown CASCADE chip (id=0x%.4x).\n", dev->name, chip); +#endif + + dev->base_addr = ioaddr; + + eid = hp100_read_id(ioaddr); + if (eid == NULL) { /* bad checksum? */ + printk(KERN_WARNING "%s: bad ID checksum at base port 0x%x\n", + __func__, ioaddr); + goto out2; + } + + hp100_page(ID_MAC_ADDR); + for (i = uc = 0; i < 7; i++) + uc += hp100_inb(LAN_ADDR + i); + if (uc != 0xff) { + printk(KERN_WARNING + "%s: bad lan address checksum at port 0x%x)\n", + __func__, ioaddr); + err = -EIO; + goto out2; + } + + /* Make sure, that all registers are correctly updated... */ + + hp100_load_eeprom(dev, ioaddr); + wait(); + + /* + * Determine driver operation mode + * + * Use the variable "hp100_mode" upon insmod or as kernel parameter to + * force driver modes: + * hp100_mode=1 -> default, use busmaster mode if configured. + * hp100_mode=2 -> enable shared memory mode + * hp100_mode=3 -> force use of i/o mapped mode. + * hp100_mode=4 -> same as 1, but re-set the enable bit on the card. + */ + + /* + * LSW values: + * 0x2278 -> J2585B, PnP shared memory mode + * 0x2270 -> J2585B, shared memory mode, 0xdc000 + * 0xa23c -> J2585B, I/O mapped mode + * 0x2240 -> EISA COMPEX, BusMaster (Shasta Chip) + * 0x2220 -> EISA HP, I/O (Shasta Chip) + * 0x2260 -> EISA HP, BusMaster (Shasta Chip) + */ + +#if 0 + local_mode = 0x2270; + hp100_outw(0xfefe, OPTION_LSW); + hp100_outw(local_mode | HP100_SET_LB | HP100_SET_HB, OPTION_LSW); +#endif + + /* hp100_mode value maybe used in future by another card */ + local_mode = hp100_mode; + if (local_mode < 1 || local_mode > 4) + local_mode = 1; /* default */ +#ifdef HP100_DEBUG + printk("hp100: %s: original LSW = 0x%x\n", dev->name, + hp100_inw(OPTION_LSW)); +#endif + + if (local_mode == 3) { + hp100_outw(HP100_MEM_EN | HP100_RESET_LB, OPTION_LSW); + hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW); + hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_RESET_HB, OPTION_LSW); + printk("hp100: IO mapped mode forced.\n"); + } else if (local_mode == 2) { + hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW); + hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW); + hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_RESET_HB, OPTION_LSW); + printk("hp100: Shared memory mode requested.\n"); + } else if (local_mode == 4) { + if (chip == HP100_CHIPID_LASSEN) { + hp100_outw(HP100_BM_WRITE | HP100_BM_READ | HP100_SET_HB, OPTION_LSW); + hp100_outw(HP100_IO_EN | HP100_MEM_EN | HP100_RESET_LB, OPTION_LSW); + printk("hp100: Busmaster mode requested.\n"); + } + local_mode = 1; + } + + if (local_mode == 1) { /* default behaviour */ + lsw = hp100_inw(OPTION_LSW); + + if ((lsw & HP100_IO_EN) && (~lsw & HP100_MEM_EN) && + (~lsw & (HP100_BM_WRITE | HP100_BM_READ))) { +#ifdef HP100_DEBUG + printk("hp100: %s: IO_EN bit is set on card.\n", dev->name); +#endif + local_mode = 3; + } else if (chip == HP100_CHIPID_LASSEN && + (lsw & (HP100_BM_WRITE | HP100_BM_READ)) == (HP100_BM_WRITE | HP100_BM_READ)) { + /* Conversion to new PCI API : + * I don't have the doc, but I assume that the card + * can map the full 32bit address space. + * Also, we can have EISA Busmaster cards (not tested), + * so beware !!! - Jean II */ + if((bus == HP100_BUS_PCI) && + (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)))) { + /* Gracefully fallback to shared memory */ + goto busmasterfail; + } + printk("hp100: Busmaster mode enabled.\n"); + hp100_outw(HP100_MEM_EN | HP100_IO_EN | HP100_RESET_LB, OPTION_LSW); + } else { + busmasterfail: +#ifdef HP100_DEBUG + printk("hp100: %s: Card not configured for BM or BM not supported with this card.\n", dev->name); + printk("hp100: %s: Trying shared memory mode.\n", dev->name); +#endif + /* In this case, try shared memory mode */ + local_mode = 2; + hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW); + /* hp100_outw(HP100_IO_EN|HP100_RESET_LB, OPTION_LSW); */ + } + } +#ifdef HP100_DEBUG + printk("hp100: %s: new LSW = 0x%x\n", dev->name, hp100_inw(OPTION_LSW)); +#endif + + /* Check for shared memory on the card, eventually remap it */ + hp100_page(HW_MAP); + mem_mapped = ((hp100_inw(OPTION_LSW) & (HP100_MEM_EN)) != 0); + mem_ptr_phys = 0UL; + mem_ptr_virt = NULL; + memory_size = (8192 << ((hp100_inb(SRAM) >> 5) & 0x07)); + virt_memory_size = 0; + + /* For memory mapped or busmaster mode, we want the memory address */ + if (mem_mapped || (local_mode == 1)) { + mem_ptr_phys = (hp100_inw(MEM_MAP_LSW) | (hp100_inw(MEM_MAP_MSW) << 16)); + mem_ptr_phys &= ~0x1fff; /* 8k alignment */ + + if (bus == HP100_BUS_ISA && (mem_ptr_phys & ~0xfffff) != 0) { + printk("hp100: Can only use programmed i/o mode.\n"); + mem_ptr_phys = 0; + mem_mapped = 0; + local_mode = 3; /* Use programmed i/o */ + } + + /* We do not need access to shared memory in busmaster mode */ + /* However in slave mode we need to remap high (>1GB) card memory */ + if (local_mode != 1) { /* = not busmaster */ + /* We try with smaller memory sizes, if ioremap fails */ + for (virt_memory_size = memory_size; virt_memory_size > 16383; virt_memory_size >>= 1) { + if ((mem_ptr_virt = ioremap((u_long) mem_ptr_phys, virt_memory_size)) == NULL) { +#ifdef HP100_DEBUG + printk("hp100: %s: ioremap for 0x%x bytes high PCI memory at 0x%lx failed\n", dev->name, virt_memory_size, mem_ptr_phys); +#endif + } else { +#ifdef HP100_DEBUG + printk("hp100: %s: remapped 0x%x bytes high PCI memory at 0x%lx to %p.\n", dev->name, virt_memory_size, mem_ptr_phys, mem_ptr_virt); +#endif + break; + } + } + + if (mem_ptr_virt == NULL) { /* all ioremap tries failed */ + printk("hp100: Failed to ioremap the PCI card memory. Will have to use i/o mapped mode.\n"); + local_mode = 3; + virt_memory_size = 0; + } + } + } + + if (local_mode == 3) { /* io mapped forced */ + mem_mapped = 0; + mem_ptr_phys = 0; + mem_ptr_virt = NULL; + printk("hp100: Using (slow) programmed i/o mode.\n"); + } + + /* Initialise the "private" data structure for this card. */ + lp = netdev_priv(dev); + + spin_lock_init(&lp->lock); + strlcpy(lp->id, eid, HP100_SIG_LEN); + lp->chip = chip; + lp->mode = local_mode; + lp->bus = bus; + lp->pci_dev = pci_dev; + lp->priority_tx = hp100_priority_tx; + lp->rx_ratio = hp100_rx_ratio; + lp->mem_ptr_phys = mem_ptr_phys; + lp->mem_ptr_virt = mem_ptr_virt; + hp100_page(ID_MAC_ADDR); + lp->soft_model = hp100_inb(SOFT_MODEL); + lp->mac1_mode = HP100_MAC1MODE3; + lp->mac2_mode = HP100_MAC2MODE3; + memset(&lp->hash_bytes, 0x00, 8); + + dev->base_addr = ioaddr; + + lp->memory_size = memory_size; + lp->virt_memory_size = virt_memory_size; + lp->rx_ratio = hp100_rx_ratio; /* can be conf'd with insmod */ + + if (lp->mode == 1) /* busmaster */ + dev->netdev_ops = &hp100_bm_netdev_ops; + else + dev->netdev_ops = &hp100_netdev_ops; + + /* Ask the card for which IRQ line it is configured */ + if (bus == HP100_BUS_PCI) { + dev->irq = pci_dev->irq; + } else { + hp100_page(HW_MAP); + dev->irq = hp100_inb(IRQ_CHANNEL) & HP100_IRQMASK; + if (dev->irq == 2) + dev->irq = 9; + } + + if (lp->mode == 1) /* busmaster */ + dev->dma = 4; + + /* Ask the card for its MAC address and store it for later use. */ + hp100_page(ID_MAC_ADDR); + for (i = uc = 0; i < 6; i++) + dev->dev_addr[i] = hp100_inb(LAN_ADDR + i); + + /* Reset statistics (counters) */ + hp100_clear_stats(lp, ioaddr); + + /* If busmaster mode is wanted, a dma-capable memory area is needed for + * the rx and tx PDLs + * PCI cards can access the whole PC memory. Therefore GFP_DMA is not + * needed for the allocation of the memory area. + */ + + /* TODO: We do not need this with old cards, where PDLs are stored + * in the cards shared memory area. But currently, busmaster has been + * implemented/tested only with the lassen chip anyway... */ + if (lp->mode == 1) { /* busmaster */ + dma_addr_t page_baddr; + /* Get physically continuous memory for TX & RX PDLs */ + /* Conversion to new PCI API : + * Pages are always aligned and zeroed, no need to it ourself. + * Doc says should be OK for EISA bus as well - Jean II */ + lp->page_vaddr_algn = pci_alloc_consistent(lp->pci_dev, MAX_RINGSIZE, &page_baddr); + if (!lp->page_vaddr_algn) { + err = -ENOMEM; + goto out_mem_ptr; + } + lp->whatever_offset = ((u_long) page_baddr) - ((u_long) lp->page_vaddr_algn); + +#ifdef HP100_DEBUG_BM + printk("hp100: %s: Reserved DMA memory from 0x%x to 0x%x\n", dev->name, (u_int) lp->page_vaddr_algn, (u_int) lp->page_vaddr_algn + MAX_RINGSIZE); +#endif + lp->rxrcommit = lp->txrcommit = 0; + lp->rxrhead = lp->rxrtail = &(lp->rxring[0]); + lp->txrhead = lp->txrtail = &(lp->txring[0]); + } + + /* Initialise the card. */ + /* (I'm not really sure if it's a good idea to do this during probing, but + * like this it's assured that the lan connection type can be sensed + * correctly) + */ + hp100_hwinit(dev); + + /* Try to find out which kind of LAN the card is connected to. */ + lp->lan_type = hp100_sense_lan(dev); + + /* Print out a message what about what we think we have probed. */ + printk("hp100: at 0x%x, IRQ %d, ", ioaddr, dev->irq); + switch (bus) { + case HP100_BUS_EISA: + printk("EISA"); + break; + case HP100_BUS_PCI: + printk("PCI"); + break; + default: + printk("ISA"); + break; + } + printk(" bus, %dk SRAM (rx/tx %d%%).\n", lp->memory_size >> 10, lp->rx_ratio); + + if (lp->mode == 2) { /* memory mapped */ + printk("hp100: Memory area at 0x%lx-0x%lx", mem_ptr_phys, + (mem_ptr_phys + (mem_ptr_phys > 0x100000 ? (u_long) lp->memory_size : 16 * 1024)) - 1); + if (mem_ptr_virt) + printk(" (virtual base %p)", mem_ptr_virt); + printk(".\n"); + + /* Set for info when doing ifconfig */ + dev->mem_start = mem_ptr_phys; + dev->mem_end = mem_ptr_phys + lp->memory_size; + } + + printk("hp100: "); + if (lp->lan_type != HP100_LAN_ERR) + printk("Adapter is attached to "); + switch (lp->lan_type) { + case HP100_LAN_100: + printk("100Mb/s Voice Grade AnyLAN network.\n"); + break; + case HP100_LAN_10: + printk("10Mb/s network (10baseT).\n"); + break; + case HP100_LAN_COAX: + printk("10Mb/s network (coax).\n"); + break; + default: + printk("Warning! Link down.\n"); + } + + err = register_netdev(dev); + if (err) + goto out3; + + return 0; +out3: + if (local_mode == 1) + pci_free_consistent(lp->pci_dev, MAX_RINGSIZE + 0x0f, + lp->page_vaddr_algn, + virt_to_whatever(dev, lp->page_vaddr_algn)); +out_mem_ptr: + if (mem_ptr_virt) + iounmap(mem_ptr_virt); +out2: + release_region(ioaddr, HP100_REGION_SIZE); +out1: + return err; +} + +/* This procedure puts the card into a stable init state */ +static void hp100_hwinit(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + struct hp100_private *lp = netdev_priv(dev); + +#ifdef HP100_DEBUG_B + hp100_outw(0x4202, TRACE); + printk("hp100: %s: hwinit\n", dev->name); +#endif + + /* Initialise the card. -------------------------------------------- */ + + /* Clear all pending Ints and disable Ints */ + hp100_page(PERFORMANCE); + hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */ + hp100_outw(0xffff, IRQ_STATUS); /* clear all pending ints */ + + hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW); + hp100_outw(HP100_TRI_INT | HP100_SET_HB, OPTION_LSW); + + if (lp->mode == 1) { + hp100_BM_shutdown(dev); /* disables BM, puts cascade in reset */ + wait(); + } else { + hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW); + hp100_cascade_reset(dev, 1); + hp100_page(MAC_CTRL); + hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1); + } + + /* Initiate EEPROM reload */ + hp100_load_eeprom(dev, 0); + + wait(); + + /* Go into reset again. */ + hp100_cascade_reset(dev, 1); + + /* Set Option Registers to a safe state */ + hp100_outw(HP100_DEBUG_EN | + HP100_RX_HDR | + HP100_EE_EN | + HP100_BM_WRITE | + HP100_BM_READ | HP100_RESET_HB | + HP100_FAKE_INT | + HP100_INT_EN | + HP100_MEM_EN | + HP100_IO_EN | HP100_RESET_LB, OPTION_LSW); + + hp100_outw(HP100_TRI_INT | + HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW); + + hp100_outb(HP100_PRIORITY_TX | + HP100_ADV_NXT_PKT | + HP100_TX_CMD | HP100_RESET_LB, OPTION_MSW); + + /* TODO: Configure MMU for Ram Test. */ + /* TODO: Ram Test. */ + + /* Re-check if adapter is still at same i/o location */ + /* (If the base i/o in eeprom has been changed but the */ + /* registers had not been changed, a reload of the eeprom */ + /* would move the adapter to the address stored in eeprom */ + + /* TODO: Code to implement. */ + + /* Until here it was code from HWdiscover procedure. */ + /* Next comes code from mmuinit procedure of SCO BM driver which is + * called from HWconfigure in the SCO driver. */ + + /* Initialise MMU, eventually switch on Busmaster Mode, initialise + * multicast filter... + */ + hp100_mmuinit(dev); + + /* We don't turn the interrupts on here - this is done by start_interface. */ + wait(); /* TODO: Do we really need this? */ + + /* Enable Hardware (e.g. unreset) */ + hp100_cascade_reset(dev, 0); + + /* ------- initialisation complete ----------- */ + + /* Finally try to log in the Hub if there may be a VG connection. */ + if ((lp->lan_type == HP100_LAN_100) || (lp->lan_type == HP100_LAN_ERR)) + hp100_login_to_vg_hub(dev, 0); /* relogin */ + +} + + +/* + * mmuinit - Reinitialise Cascade MMU and MAC settings. + * Note: Must already be in reset and leaves card in reset. + */ +static void hp100_mmuinit(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + struct hp100_private *lp = netdev_priv(dev); + int i; + +#ifdef HP100_DEBUG_B + hp100_outw(0x4203, TRACE); + printk("hp100: %s: mmuinit\n", dev->name); +#endif + +#ifdef HP100_DEBUG + if (0 != (hp100_inw(OPTION_LSW) & HP100_HW_RST)) { + printk("hp100: %s: Not in reset when entering mmuinit. Fix me.\n", dev->name); + return; + } +#endif + + /* Make sure IRQs are masked off and ack'ed. */ + hp100_page(PERFORMANCE); + hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */ + hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */ + + /* + * Enable Hardware + * - Clear Debug En, Rx Hdr Pipe, EE En, I/O En, Fake Int and Intr En + * - Set Tri-State Int, Bus Master Rd/Wr, and Mem Map Disable + * - Clear Priority, Advance Pkt and Xmit Cmd + */ + + hp100_outw(HP100_DEBUG_EN | + HP100_RX_HDR | + HP100_EE_EN | HP100_RESET_HB | + HP100_IO_EN | + HP100_FAKE_INT | + HP100_INT_EN | HP100_RESET_LB, OPTION_LSW); + + hp100_outw(HP100_TRI_INT | HP100_SET_HB, OPTION_LSW); + + if (lp->mode == 1) { /* busmaster */ + hp100_outw(HP100_BM_WRITE | + HP100_BM_READ | + HP100_MMAP_DIS | HP100_SET_HB, OPTION_LSW); + } else if (lp->mode == 2) { /* memory mapped */ + hp100_outw(HP100_BM_WRITE | + HP100_BM_READ | HP100_RESET_HB, OPTION_LSW); + hp100_outw(HP100_MMAP_DIS | HP100_RESET_HB, OPTION_LSW); + hp100_outw(HP100_MEM_EN | HP100_SET_LB, OPTION_LSW); + hp100_outw(HP100_IO_EN | HP100_SET_LB, OPTION_LSW); + } else if (lp->mode == 3) { /* i/o mapped mode */ + hp100_outw(HP100_MMAP_DIS | HP100_SET_HB | + HP100_IO_EN | HP100_SET_LB, OPTION_LSW); + } + + hp100_page(HW_MAP); + hp100_outb(0, EARLYRXCFG); + hp100_outw(0, EARLYTXCFG); + + /* + * Enable Bus Master mode + */ + if (lp->mode == 1) { /* busmaster */ + /* Experimental: Set some PCI configuration bits */ + hp100_page(HW_MAP); + hp100_andb(~HP100_PDL_USE3, MODECTRL1); /* BM engine read maximum */ + hp100_andb(~HP100_TX_DUALQ, MODECTRL1); /* No Queue for Priority TX */ + + /* PCI Bus failures should result in a Misc. Interrupt */ + hp100_orb(HP100_EN_BUS_FAIL, MODECTRL2); + + hp100_outw(HP100_BM_READ | HP100_BM_WRITE | HP100_SET_HB, OPTION_LSW); + hp100_page(HW_MAP); + /* Use Burst Mode and switch on PAGE_CK */ + hp100_orb(HP100_BM_BURST_RD | HP100_BM_BURST_WR, BM); + if ((lp->chip == HP100_CHIPID_RAINIER) || (lp->chip == HP100_CHIPID_SHASTA)) + hp100_orb(HP100_BM_PAGE_CK, BM); + hp100_orb(HP100_BM_MASTER, BM); + } else { /* not busmaster */ + + hp100_page(HW_MAP); + hp100_andb(~HP100_BM_MASTER, BM); + } + + /* + * Divide card memory into regions for Rx, Tx and, if non-ETR chip, PDLs + */ + hp100_page(MMU_CFG); + if (lp->mode == 1) { /* only needed for Busmaster */ + int xmit_stop, recv_stop; + + if ((lp->chip == HP100_CHIPID_RAINIER) || + (lp->chip == HP100_CHIPID_SHASTA)) { + int pdl_stop; + + /* + * Each pdl is 508 bytes long. (63 frags * 4 bytes for address and + * 4 bytes for header). We will leave NUM_RXPDLS * 508 (rounded + * to the next higher 1k boundary) bytes for the rx-pdl's + * Note: For non-etr chips the transmit stop register must be + * programmed on a 1k boundary, i.e. bits 9:0 must be zero. + */ + pdl_stop = lp->memory_size; + xmit_stop = (pdl_stop - 508 * (MAX_RX_PDL) - 16) & ~(0x03ff); + recv_stop = (xmit_stop * (lp->rx_ratio) / 100) & ~(0x03ff); + hp100_outw((pdl_stop >> 4) - 1, PDL_MEM_STOP); +#ifdef HP100_DEBUG_BM + printk("hp100: %s: PDL_STOP = 0x%x\n", dev->name, pdl_stop); +#endif + } else { + /* ETR chip (Lassen) in busmaster mode */ + xmit_stop = (lp->memory_size) - 1; + recv_stop = ((lp->memory_size * lp->rx_ratio) / 100) & ~(0x03ff); + } + + hp100_outw(xmit_stop >> 4, TX_MEM_STOP); + hp100_outw(recv_stop >> 4, RX_MEM_STOP); +#ifdef HP100_DEBUG_BM + printk("hp100: %s: TX_STOP = 0x%x\n", dev->name, xmit_stop >> 4); + printk("hp100: %s: RX_STOP = 0x%x\n", dev->name, recv_stop >> 4); +#endif + } else { + /* Slave modes (memory mapped and programmed io) */ + hp100_outw((((lp->memory_size * lp->rx_ratio) / 100) >> 4), RX_MEM_STOP); + hp100_outw(((lp->memory_size - 1) >> 4), TX_MEM_STOP); +#ifdef HP100_DEBUG + printk("hp100: %s: TX_MEM_STOP: 0x%x\n", dev->name, hp100_inw(TX_MEM_STOP)); + printk("hp100: %s: RX_MEM_STOP: 0x%x\n", dev->name, hp100_inw(RX_MEM_STOP)); +#endif + } + + /* Write MAC address into page 1 */ + hp100_page(MAC_ADDRESS); + for (i = 0; i < 6; i++) + hp100_outb(dev->dev_addr[i], MAC_ADDR + i); + + /* Zero the multicast hash registers */ + for (i = 0; i < 8; i++) + hp100_outb(0x0, HASH_BYTE0 + i); + + /* Set up MAC defaults */ + hp100_page(MAC_CTRL); + + /* Go to LAN Page and zero all filter bits */ + /* Zero accept error, accept multicast, accept broadcast and accept */ + /* all directed packet bits */ + hp100_andb(~(HP100_RX_EN | + HP100_TX_EN | + HP100_ACC_ERRORED | + HP100_ACC_MC | + HP100_ACC_BC | HP100_ACC_PHY), MAC_CFG_1); + + hp100_outb(0x00, MAC_CFG_2); + + /* Zero the frame format bit. This works around a training bug in the */ + /* new hubs. */ + hp100_outb(0x00, VG_LAN_CFG_2); /* (use 802.3) */ + + if (lp->priority_tx) + hp100_outb(HP100_PRIORITY_TX | HP100_SET_LB, OPTION_MSW); + else + hp100_outb(HP100_PRIORITY_TX | HP100_RESET_LB, OPTION_MSW); + + hp100_outb(HP100_ADV_NXT_PKT | + HP100_TX_CMD | HP100_RESET_LB, OPTION_MSW); + + /* If busmaster, initialize the PDLs */ + if (lp->mode == 1) + hp100_init_pdls(dev); + + /* Go to performance page and initialize isr and imr registers */ + hp100_page(PERFORMANCE); + hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */ + hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */ +} + +/* + * open/close functions + */ + +static int hp100_open(struct net_device *dev) +{ + struct hp100_private *lp = netdev_priv(dev); +#ifdef HP100_DEBUG_B + int ioaddr = dev->base_addr; +#endif + +#ifdef HP100_DEBUG_B + hp100_outw(0x4204, TRACE); + printk("hp100: %s: open\n", dev->name); +#endif + + /* New: if bus is PCI or EISA, interrupts might be shared interrupts */ + if (request_irq(dev->irq, hp100_interrupt, + lp->bus == HP100_BUS_PCI || lp->bus == + HP100_BUS_EISA ? IRQF_SHARED : 0, + dev->name, dev)) { + printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq); + return -EAGAIN; + } + + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_start_queue(dev); + + lp->lan_type = hp100_sense_lan(dev); + lp->mac1_mode = HP100_MAC1MODE3; + lp->mac2_mode = HP100_MAC2MODE3; + memset(&lp->hash_bytes, 0x00, 8); + + hp100_stop_interface(dev); + + hp100_hwinit(dev); + + hp100_start_interface(dev); /* sets mac modes, enables interrupts */ + + return 0; +} + +/* The close function is called when the interface is to be brought down */ +static int hp100_close(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + struct hp100_private *lp = netdev_priv(dev); + +#ifdef HP100_DEBUG_B + hp100_outw(0x4205, TRACE); + printk("hp100: %s: close\n", dev->name); +#endif + + hp100_page(PERFORMANCE); + hp100_outw(0xfefe, IRQ_MASK); /* mask off all IRQs */ + + hp100_stop_interface(dev); + + if (lp->lan_type == HP100_LAN_100) + lp->hub_status = hp100_login_to_vg_hub(dev, 0); + + netif_stop_queue(dev); + + free_irq(dev->irq, dev); + +#ifdef HP100_DEBUG + printk("hp100: %s: close LSW = 0x%x\n", dev->name, + hp100_inw(OPTION_LSW)); +#endif + + return 0; +} + + +/* + * Configure the PDL Rx rings and LAN + */ +static void hp100_init_pdls(struct net_device *dev) +{ + struct hp100_private *lp = netdev_priv(dev); + hp100_ring_t *ringptr; + u_int *pageptr; /* Warning : increment by 4 - Jean II */ + int i; + +#ifdef HP100_DEBUG_B + int ioaddr = dev->base_addr; +#endif + +#ifdef HP100_DEBUG_B + hp100_outw(0x4206, TRACE); + printk("hp100: %s: init pdls\n", dev->name); +#endif + + if (!lp->page_vaddr_algn) + printk("hp100: %s: Warning: lp->page_vaddr_algn not initialised!\n", dev->name); + else { + /* pageptr shall point into the DMA accessible memory region */ + /* we use this pointer to status the upper limit of allocated */ + /* memory in the allocated page. */ + /* note: align the pointers to the pci cache line size */ + memset(lp->page_vaddr_algn, 0, MAX_RINGSIZE); /* Zero Rx/Tx ring page */ + pageptr = lp->page_vaddr_algn; + + lp->rxrcommit = 0; + ringptr = lp->rxrhead = lp->rxrtail = &(lp->rxring[0]); + + /* Initialise Rx Ring */ + for (i = MAX_RX_PDL - 1; i >= 0; i--) { + lp->rxring[i].next = ringptr; + ringptr = &(lp->rxring[i]); + pageptr += hp100_init_rxpdl(dev, ringptr, pageptr); + } + + /* Initialise Tx Ring */ + lp->txrcommit = 0; + ringptr = lp->txrhead = lp->txrtail = &(lp->txring[0]); + for (i = MAX_TX_PDL - 1; i >= 0; i--) { + lp->txring[i].next = ringptr; + ringptr = &(lp->txring[i]); + pageptr += hp100_init_txpdl(dev, ringptr, pageptr); + } + } +} + + +/* These functions "format" the entries in the pdl structure */ +/* They return how much memory the fragments need. */ +static int hp100_init_rxpdl(struct net_device *dev, + register hp100_ring_t * ringptr, + register u32 * pdlptr) +{ + /* pdlptr is starting address for this pdl */ + + if (0 != (((unsigned long) pdlptr) & 0xf)) + printk("hp100: %s: Init rxpdl: Unaligned pdlptr 0x%lx.\n", + dev->name, (unsigned long) pdlptr); + + ringptr->pdl = pdlptr + 1; + ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr + 1); + ringptr->skb = NULL; + + /* + * Write address and length of first PDL Fragment (which is used for + * storing the RX-Header + * We use the 4 bytes _before_ the PDH in the pdl memory area to + * store this information. (PDH is at offset 0x04) + */ + /* Note that pdlptr+1 and not pdlptr is the pointer to the PDH */ + + *(pdlptr + 2) = (u_int) virt_to_whatever(dev, pdlptr); /* Address Frag 1 */ + *(pdlptr + 3) = 4; /* Length Frag 1 */ + + return roundup(MAX_RX_FRAG * 2 + 2, 4); +} + + +static int hp100_init_txpdl(struct net_device *dev, + register hp100_ring_t * ringptr, + register u32 * pdlptr) +{ + if (0 != (((unsigned long) pdlptr) & 0xf)) + printk("hp100: %s: Init txpdl: Unaligned pdlptr 0x%lx.\n", dev->name, (unsigned long) pdlptr); + + ringptr->pdl = pdlptr; /* +1; */ + ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr); /* +1 */ + ringptr->skb = NULL; + + return roundup(MAX_TX_FRAG * 2 + 2, 4); +} + +/* + * hp100_build_rx_pdl allocates an skb_buff of maximum size plus two bytes + * for possible odd word alignment rounding up to next dword and set PDL + * address for fragment#2 + * Returns: 0 if unable to allocate skb_buff + * 1 if successful + */ +static int hp100_build_rx_pdl(hp100_ring_t * ringptr, + struct net_device *dev) +{ +#ifdef HP100_DEBUG_B + int ioaddr = dev->base_addr; +#endif +#ifdef HP100_DEBUG_BM + u_int *p; +#endif + +#ifdef HP100_DEBUG_B + hp100_outw(0x4207, TRACE); + printk("hp100: %s: build rx pdl\n", dev->name); +#endif + + /* Allocate skb buffer of maximum size */ + /* Note: This depends on the alloc_skb functions allocating more + * space than requested, i.e. aligning to 16bytes */ + + ringptr->skb = netdev_alloc_skb(dev, roundup(MAX_ETHER_SIZE + 2, 4)); + + if (NULL != ringptr->skb) { + /* + * Reserve 2 bytes at the head of the buffer to land the IP header + * on a long word boundary (According to the Network Driver section + * in the Linux KHG, this should help to increase performance.) + */ + skb_reserve(ringptr->skb, 2); + + ringptr->skb->data = (u_char *) skb_put(ringptr->skb, MAX_ETHER_SIZE); + + /* ringptr->pdl points to the beginning of the PDL, i.e. the PDH */ + /* Note: 1st Fragment is used for the 4 byte packet status + * (receive header). Its PDL entries are set up by init_rxpdl. So + * here we only have to set up the PDL fragment entries for the data + * part. Those 4 bytes will be stored in the DMA memory region + * directly before the PDL. + */ +#ifdef HP100_DEBUG_BM + printk("hp100: %s: build_rx_pdl: PDH@0x%x, skb->data (len %d) at 0x%x\n", + dev->name, (u_int) ringptr->pdl, + roundup(MAX_ETHER_SIZE + 2, 4), + (unsigned int) ringptr->skb->data); +#endif + + /* Conversion to new PCI API : map skbuf data to PCI bus. + * Doc says it's OK for EISA as well - Jean II */ + ringptr->pdl[0] = 0x00020000; /* Write PDH */ + ringptr->pdl[3] = pdl_map_data(netdev_priv(dev), + ringptr->skb->data); + ringptr->pdl[4] = MAX_ETHER_SIZE; /* Length of Data */ + +#ifdef HP100_DEBUG_BM + for (p = (ringptr->pdl); p < (ringptr->pdl + 5); p++) + printk("hp100: %s: Adr 0x%.8x = 0x%.8x\n", dev->name, (u_int) p, (u_int) * p); +#endif + return 1; + } + /* else: */ + /* alloc_skb failed (no memory) -> still can receive the header + * fragment into PDL memory. make PDL safe by clearing msgptr and + * making the PDL only 1 fragment (i.e. the 4 byte packet status) + */ +#ifdef HP100_DEBUG_BM + printk("hp100: %s: build_rx_pdl: PDH@0x%x, No space for skb.\n", dev->name, (u_int) ringptr->pdl); +#endif + + ringptr->pdl[0] = 0x00010000; /* PDH: Count=1 Fragment */ + + return 0; +} + +/* + * hp100_rxfill - attempt to fill the Rx Ring will empty skb's + * + * Makes assumption that skb's are always contiguous memory areas and + * therefore PDLs contain only 2 physical fragments. + * - While the number of Rx PDLs with buffers is less than maximum + * a. Get a maximum packet size skb + * b. Put the physical address of the buffer into the PDL. + * c. Output physical address of PDL to adapter. + */ +static void hp100_rxfill(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + + struct hp100_private *lp = netdev_priv(dev); + hp100_ring_t *ringptr; + +#ifdef HP100_DEBUG_B + hp100_outw(0x4208, TRACE); + printk("hp100: %s: rxfill\n", dev->name); +#endif + + hp100_page(PERFORMANCE); + + while (lp->rxrcommit < MAX_RX_PDL) { + /* + ** Attempt to get a buffer and build a Rx PDL. + */ + ringptr = lp->rxrtail; + if (0 == hp100_build_rx_pdl(ringptr, dev)) { + return; /* None available, return */ + } + + /* Hand this PDL over to the card */ + /* Note: This needs performance page selected! */ +#ifdef HP100_DEBUG_BM + printk("hp100: %s: rxfill: Hand to card: pdl #%d @0x%x phys:0x%x, buffer: 0x%x\n", + dev->name, lp->rxrcommit, (u_int) ringptr->pdl, + (u_int) ringptr->pdl_paddr, (u_int) ringptr->pdl[3]); +#endif + + hp100_outl((u32) ringptr->pdl_paddr, RX_PDA); + + lp->rxrcommit += 1; + lp->rxrtail = ringptr->next; + } +} + +/* + * BM_shutdown - shutdown bus mastering and leave chip in reset state + */ + +static void hp100_BM_shutdown(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + struct hp100_private *lp = netdev_priv(dev); + unsigned long time; + +#ifdef HP100_DEBUG_B + hp100_outw(0x4209, TRACE); + printk("hp100: %s: bm shutdown\n", dev->name); +#endif + + hp100_page(PERFORMANCE); + hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */ + hp100_outw(0xffff, IRQ_STATUS); /* Ack all ints */ + + /* Ensure Interrupts are off */ + hp100_outw(HP100_INT_EN | HP100_RESET_LB, OPTION_LSW); + + /* Disable all MAC activity */ + hp100_page(MAC_CTRL); + hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1); /* stop rx/tx */ + + /* If cascade MMU is not already in reset */ + if (0 != (hp100_inw(OPTION_LSW) & HP100_HW_RST)) { + /* Wait 1.3ms (10Mb max packet time) to ensure MAC is idle so + * MMU pointers will not be reset out from underneath + */ + hp100_page(MAC_CTRL); + for (time = 0; time < 5000; time++) { + if ((hp100_inb(MAC_CFG_1) & (HP100_TX_IDLE | HP100_RX_IDLE)) == (HP100_TX_IDLE | HP100_RX_IDLE)) + break; + } + + /* Shutdown algorithm depends on the generation of Cascade */ + if (lp->chip == HP100_CHIPID_LASSEN) { /* ETR shutdown/reset */ + /* Disable Busmaster mode and wait for bit to go to zero. */ + hp100_page(HW_MAP); + hp100_andb(~HP100_BM_MASTER, BM); + /* 100 ms timeout */ + for (time = 0; time < 32000; time++) { + if (0 == (hp100_inb(BM) & HP100_BM_MASTER)) + break; + } + } else { /* Shasta or Rainier Shutdown/Reset */ + /* To ensure all bus master inloading activity has ceased, + * wait for no Rx PDAs or no Rx packets on card. + */ + hp100_page(PERFORMANCE); + /* 100 ms timeout */ + for (time = 0; time < 10000; time++) { + /* RX_PDL: PDLs not executed. */ + /* RX_PKT_CNT: RX'd packets on card. */ + if ((hp100_inb(RX_PDL) == 0) && (hp100_inb(RX_PKT_CNT) == 0)) + break; + } + + if (time >= 10000) + printk("hp100: %s: BM shutdown error.\n", dev->name); + + /* To ensure all bus master outloading activity has ceased, + * wait until the Tx PDA count goes to zero or no more Tx space + * available in the Tx region of the card. + */ + /* 100 ms timeout */ + for (time = 0; time < 10000; time++) { + if ((0 == hp100_inb(TX_PKT_CNT)) && + (0 != (hp100_inb(TX_MEM_FREE) & HP100_AUTO_COMPARE))) + break; + } + + /* Disable Busmaster mode */ + hp100_page(HW_MAP); + hp100_andb(~HP100_BM_MASTER, BM); + } /* end of shutdown procedure for non-etr parts */ + + hp100_cascade_reset(dev, 1); + } + hp100_page(PERFORMANCE); + /* hp100_outw( HP100_BM_READ | HP100_BM_WRITE | HP100_RESET_HB, OPTION_LSW ); */ + /* Busmaster mode should be shut down now. */ +} + +static int hp100_check_lan(struct net_device *dev) +{ + struct hp100_private *lp = netdev_priv(dev); + + if (lp->lan_type < 0) { /* no LAN type detected yet? */ + hp100_stop_interface(dev); + if ((lp->lan_type = hp100_sense_lan(dev)) < 0) { + printk("hp100: %s: no connection found - check wire\n", dev->name); + hp100_start_interface(dev); /* 10Mb/s RX packets maybe handled */ + return -EIO; + } + if (lp->lan_type == HP100_LAN_100) + lp->hub_status = hp100_login_to_vg_hub(dev, 0); /* relogin */ + hp100_start_interface(dev); + } + return 0; +} + +/* + * transmit functions + */ + +/* tx function for busmaster mode */ +static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb, + struct net_device *dev) +{ + unsigned long flags; + int i, ok_flag; + int ioaddr = dev->base_addr; + struct hp100_private *lp = netdev_priv(dev); + hp100_ring_t *ringptr; + +#ifdef HP100_DEBUG_B + hp100_outw(0x4210, TRACE); + printk("hp100: %s: start_xmit_bm\n", dev->name); +#endif + if (skb->len <= 0) + goto drop; + + if (lp->chip == HP100_CHIPID_SHASTA && skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + /* Get Tx ring tail pointer */ + if (lp->txrtail->next == lp->txrhead) { + /* No memory. */ +#ifdef HP100_DEBUG + printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name); +#endif + /* not waited long enough since last tx? */ + if (time_before(jiffies, dev_trans_start(dev) + HZ)) + goto drop; + + if (hp100_check_lan(dev)) + goto drop; + + if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) { + /* we have a 100Mb/s adapter but it isn't connected to hub */ + printk("hp100: %s: login to 100Mb/s hub retry\n", dev->name); + hp100_stop_interface(dev); + lp->hub_status = hp100_login_to_vg_hub(dev, 0); + hp100_start_interface(dev); + } else { + spin_lock_irqsave(&lp->lock, flags); + hp100_ints_off(); /* Useful ? Jean II */ + i = hp100_sense_lan(dev); + hp100_ints_on(); + spin_unlock_irqrestore(&lp->lock, flags); + if (i == HP100_LAN_ERR) + printk("hp100: %s: link down detected\n", dev->name); + else if (lp->lan_type != i) { /* cable change! */ + /* it's very hard - all network settings must be changed!!! */ + printk("hp100: %s: cable change 10Mb/s <-> 100Mb/s detected\n", dev->name); + lp->lan_type = i; + hp100_stop_interface(dev); + if (lp->lan_type == HP100_LAN_100) + lp->hub_status = hp100_login_to_vg_hub(dev, 0); + hp100_start_interface(dev); + } else { + printk("hp100: %s: interface reset\n", dev->name); + hp100_stop_interface(dev); + if (lp->lan_type == HP100_LAN_100) + lp->hub_status = hp100_login_to_vg_hub(dev, 0); + hp100_start_interface(dev); + } + } + + goto drop; + } + + /* + * we have to turn int's off before modifying this, otherwise + * a tx_pdl_cleanup could occur at the same time + */ + spin_lock_irqsave(&lp->lock, flags); + ringptr = lp->txrtail; + lp->txrtail = ringptr->next; + + /* Check whether packet has minimal packet size */ + ok_flag = skb->len >= HP100_MIN_PACKET_SIZE; + i = ok_flag ? skb->len : HP100_MIN_PACKET_SIZE; + + ringptr->skb = skb; + ringptr->pdl[0] = ((1 << 16) | i); /* PDH: 1 Fragment & length */ + if (lp->chip == HP100_CHIPID_SHASTA) { + /* TODO:Could someone who has the EISA card please check if this works? */ + ringptr->pdl[2] = i; + } else { /* Lassen */ + /* In the PDL, don't use the padded size but the real packet size: */ + ringptr->pdl[2] = skb->len; /* 1st Frag: Length of frag */ + } + /* Conversion to new PCI API : map skbuf data to PCI bus. + * Doc says it's OK for EISA as well - Jean II */ + ringptr->pdl[1] = ((u32) pci_map_single(lp->pci_dev, skb->data, ringptr->pdl[2], PCI_DMA_TODEVICE)); /* 1st Frag: Adr. of data */ + + /* Hand this PDL to the card. */ + hp100_outl(ringptr->pdl_paddr, TX_PDA_L); /* Low Prio. Queue */ + + lp->txrcommit++; + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + spin_unlock_irqrestore(&lp->lock, flags); + + return NETDEV_TX_OK; + +drop: + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + + +/* clean_txring checks if packets have been sent by the card by reading + * the TX_PDL register from the performance page and comparing it to the + * number of committed packets. It then frees the skb's of the packets that + * obviously have been sent to the network. + * + * Needs the PERFORMANCE page selected. + */ +static void hp100_clean_txring(struct net_device *dev) +{ + struct hp100_private *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + int donecount; + +#ifdef HP100_DEBUG_B + hp100_outw(0x4211, TRACE); + printk("hp100: %s: clean txring\n", dev->name); +#endif + + /* How many PDLs have been transmitted? */ + donecount = (lp->txrcommit) - hp100_inb(TX_PDL); + +#ifdef HP100_DEBUG + if (donecount > MAX_TX_PDL) + printk("hp100: %s: Warning: More PDLs transmitted than committed to card???\n", dev->name); +#endif + + for (; 0 != donecount; donecount--) { +#ifdef HP100_DEBUG_BM + printk("hp100: %s: Free skb: data @0x%.8x txrcommit=0x%x TXPDL=0x%x, done=0x%x\n", + dev->name, (u_int) lp->txrhead->skb->data, + lp->txrcommit, hp100_inb(TX_PDL), donecount); +#endif + /* Conversion to new PCI API : NOP */ + pci_unmap_single(lp->pci_dev, (dma_addr_t) lp->txrhead->pdl[1], lp->txrhead->pdl[2], PCI_DMA_TODEVICE); + dev_consume_skb_any(lp->txrhead->skb); + lp->txrhead->skb = NULL; + lp->txrhead = lp->txrhead->next; + lp->txrcommit--; + } +} + +/* tx function for slave modes */ +static netdev_tx_t hp100_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + unsigned long flags; + int i, ok_flag; + int ioaddr = dev->base_addr; + u_short val; + struct hp100_private *lp = netdev_priv(dev); + +#ifdef HP100_DEBUG_B + hp100_outw(0x4212, TRACE); + printk("hp100: %s: start_xmit\n", dev->name); +#endif + if (skb->len <= 0) + goto drop; + + if (hp100_check_lan(dev)) + goto drop; + + /* If there is not enough free memory on the card... */ + i = hp100_inl(TX_MEM_FREE) & 0x7fffffff; + if (!(((i / 2) - 539) > (skb->len + 16) && (hp100_inb(TX_PKT_CNT) < 255))) { +#ifdef HP100_DEBUG + printk("hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i); +#endif + /* not waited long enough since last failed tx try? */ + if (time_before(jiffies, dev_trans_start(dev) + HZ)) { +#ifdef HP100_DEBUG + printk("hp100: %s: trans_start timing problem\n", + dev->name); +#endif + goto drop; + } + if (lp->lan_type == HP100_LAN_100 && lp->hub_status < 0) { + /* we have a 100Mb/s adapter but it isn't connected to hub */ + printk("hp100: %s: login to 100Mb/s hub retry\n", dev->name); + hp100_stop_interface(dev); + lp->hub_status = hp100_login_to_vg_hub(dev, 0); + hp100_start_interface(dev); + } else { + spin_lock_irqsave(&lp->lock, flags); + hp100_ints_off(); /* Useful ? Jean II */ + i = hp100_sense_lan(dev); + hp100_ints_on(); + spin_unlock_irqrestore(&lp->lock, flags); + if (i == HP100_LAN_ERR) + printk("hp100: %s: link down detected\n", dev->name); + else if (lp->lan_type != i) { /* cable change! */ + /* it's very hard - all network setting must be changed!!! */ + printk("hp100: %s: cable change 10Mb/s <-> 100Mb/s detected\n", dev->name); + lp->lan_type = i; + hp100_stop_interface(dev); + if (lp->lan_type == HP100_LAN_100) + lp->hub_status = hp100_login_to_vg_hub(dev, 0); + hp100_start_interface(dev); + } else { + printk("hp100: %s: interface reset\n", dev->name); + hp100_stop_interface(dev); + if (lp->lan_type == HP100_LAN_100) + lp->hub_status = hp100_login_to_vg_hub(dev, 0); + hp100_start_interface(dev); + mdelay(1); + } + } + goto drop; + } + + for (i = 0; i < 6000 && (hp100_inb(OPTION_MSW) & HP100_TX_CMD); i++) { +#ifdef HP100_DEBUG_TX + printk("hp100: %s: start_xmit: busy\n", dev->name); +#endif + } + + spin_lock_irqsave(&lp->lock, flags); + hp100_ints_off(); + val = hp100_inw(IRQ_STATUS); + /* Ack / clear the interrupt TX_COMPLETE interrupt - this interrupt is set + * when the current packet being transmitted on the wire is completed. */ + hp100_outw(HP100_TX_COMPLETE, IRQ_STATUS); +#ifdef HP100_DEBUG_TX + printk("hp100: %s: start_xmit: irq_status=0x%.4x, irqmask=0x%.4x, len=%d\n", + dev->name, val, hp100_inw(IRQ_MASK), (int) skb->len); +#endif + + ok_flag = skb->len >= HP100_MIN_PACKET_SIZE; + i = ok_flag ? skb->len : HP100_MIN_PACKET_SIZE; + + hp100_outw(i, DATA32); /* tell card the total packet length */ + hp100_outw(i, FRAGMENT_LEN); /* and first/only fragment length */ + + if (lp->mode == 2) { /* memory mapped */ + /* Note: The J2585B needs alignment to 32bits here! */ + memcpy_toio(lp->mem_ptr_virt, skb->data, (skb->len + 3) & ~3); + if (!ok_flag) + memset_io(lp->mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb->len); + } else { /* programmed i/o */ + outsl(ioaddr + HP100_REG_DATA32, skb->data, + (skb->len + 3) >> 2); + if (!ok_flag) + for (i = (skb->len + 3) & ~3; i < HP100_MIN_PACKET_SIZE; i += 4) + hp100_outl(0, DATA32); + } + + hp100_outb(HP100_TX_CMD | HP100_SET_LB, OPTION_MSW); /* send packet */ + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + hp100_ints_on(); + spin_unlock_irqrestore(&lp->lock, flags); + + dev_consume_skb_any(skb); + +#ifdef HP100_DEBUG_TX + printk("hp100: %s: start_xmit: end\n", dev->name); +#endif + + return NETDEV_TX_OK; + +drop: + dev_kfree_skb(skb); + return NETDEV_TX_OK; + +} + + +/* + * Receive Function (Non-Busmaster mode) + * Called when an "Receive Packet" interrupt occurs, i.e. the receive + * packet counter is non-zero. + * For non-busmaster, this function does the whole work of transferring + * the packet to the host memory and then up to higher layers via skb + * and netif_rx. + */ + +static void hp100_rx(struct net_device *dev) +{ + int packets, pkt_len; + int ioaddr = dev->base_addr; + struct hp100_private *lp = netdev_priv(dev); + u_int header; + struct sk_buff *skb; + +#ifdef DEBUG_B + hp100_outw(0x4213, TRACE); + printk("hp100: %s: rx\n", dev->name); +#endif + + /* First get indication of received lan packet */ + /* RX_PKT_CND indicates the number of packets which have been fully */ + /* received onto the card but have not been fully transferred of the card */ + packets = hp100_inb(RX_PKT_CNT); +#ifdef HP100_DEBUG_RX + if (packets > 1) + printk("hp100: %s: rx: waiting packets = %d\n", dev->name, packets); +#endif + + while (packets-- > 0) { + /* If ADV_NXT_PKT is still set, we have to wait until the card has */ + /* really advanced to the next packet. */ + for (pkt_len = 0; pkt_len < 6000 && (hp100_inb(OPTION_MSW) & HP100_ADV_NXT_PKT); pkt_len++) { +#ifdef HP100_DEBUG_RX + printk ("hp100: %s: rx: busy, remaining packets = %d\n", dev->name, packets); +#endif + } + + /* First we get the header, which contains information about the */ + /* actual length of the received packet. */ + if (lp->mode == 2) { /* memory mapped mode */ + header = readl(lp->mem_ptr_virt); + } else /* programmed i/o */ + header = hp100_inl(DATA32); + + pkt_len = ((header & HP100_PKT_LEN_MASK) + 3) & ~3; + +#ifdef HP100_DEBUG_RX + printk("hp100: %s: rx: new packet - length=%d, errors=0x%x, dest=0x%x\n", + dev->name, header & HP100_PKT_LEN_MASK, + (header >> 16) & 0xfff8, (header >> 16) & 7); +#endif + + /* Now we allocate the skb and transfer the data into it. */ + skb = netdev_alloc_skb(dev, pkt_len + 2); + if (skb == NULL) { /* Not enough memory->drop packet */ +#ifdef HP100_DEBUG + printk("hp100: %s: rx: couldn't allocate a sk_buff of size %d\n", + dev->name, pkt_len); +#endif + dev->stats.rx_dropped++; + } else { /* skb successfully allocated */ + + u_char *ptr; + + skb_reserve(skb,2); + + /* ptr to start of the sk_buff data area */ + skb_put(skb, pkt_len); + ptr = skb->data; + + /* Now transfer the data from the card into that area */ + if (lp->mode == 2) + memcpy_fromio(ptr, lp->mem_ptr_virt,pkt_len); + else /* io mapped */ + insl(ioaddr + HP100_REG_DATA32, ptr, pkt_len >> 2); + + skb->protocol = eth_type_trans(skb, dev); + +#ifdef HP100_DEBUG_RX + printk("hp100: %s: rx: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + dev->name, ptr[0], ptr[1], ptr[2], ptr[3], + ptr[4], ptr[5], ptr[6], ptr[7], ptr[8], + ptr[9], ptr[10], ptr[11]); +#endif + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + + /* Indicate the card that we have got the packet */ + hp100_outb(HP100_ADV_NXT_PKT | HP100_SET_LB, OPTION_MSW); + + switch (header & 0x00070000) { + case (HP100_MULTI_ADDR_HASH << 16): + case (HP100_MULTI_ADDR_NO_HASH << 16): + dev->stats.multicast++; + break; + } + } /* end of while(there are packets) loop */ +#ifdef HP100_DEBUG_RX + printk("hp100_rx: %s: end\n", dev->name); +#endif +} + +/* + * Receive Function for Busmaster Mode + */ +static void hp100_rx_bm(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + struct hp100_private *lp = netdev_priv(dev); + hp100_ring_t *ptr; + u_int header; + int pkt_len; + +#ifdef HP100_DEBUG_B + hp100_outw(0x4214, TRACE); + printk("hp100: %s: rx_bm\n", dev->name); +#endif + +#ifdef HP100_DEBUG + if (0 == lp->rxrcommit) { + printk("hp100: %s: rx_bm called although no PDLs were committed to adapter?\n", dev->name); + return; + } else + /* RX_PKT_CNT states how many PDLs are currently formatted and available to + * the cards BM engine */ + if ((hp100_inw(RX_PKT_CNT) & 0x00ff) >= lp->rxrcommit) { + printk("hp100: %s: More packets received than committed? RX_PKT_CNT=0x%x, commit=0x%x\n", + dev->name, hp100_inw(RX_PKT_CNT) & 0x00ff, + lp->rxrcommit); + return; + } +#endif + + while ((lp->rxrcommit > hp100_inb(RX_PDL))) { + /* + * The packet was received into the pdl pointed to by lp->rxrhead ( + * the oldest pdl in the ring + */ + + /* First we get the header, which contains information about the */ + /* actual length of the received packet. */ + + ptr = lp->rxrhead; + + header = *(ptr->pdl - 1); + pkt_len = (header & HP100_PKT_LEN_MASK); + + /* Conversion to new PCI API : NOP */ + pci_unmap_single(lp->pci_dev, (dma_addr_t) ptr->pdl[3], MAX_ETHER_SIZE, PCI_DMA_FROMDEVICE); + +#ifdef HP100_DEBUG_BM + printk("hp100: %s: rx_bm: header@0x%x=0x%x length=%d, errors=0x%x, dest=0x%x\n", + dev->name, (u_int) (ptr->pdl - 1), (u_int) header, + pkt_len, (header >> 16) & 0xfff8, (header >> 16) & 7); + printk("hp100: %s: RX_PDL_COUNT:0x%x TX_PDL_COUNT:0x%x, RX_PKT_CNT=0x%x PDH=0x%x, Data@0x%x len=0x%x\n", + dev->name, hp100_inb(RX_PDL), hp100_inb(TX_PDL), + hp100_inb(RX_PKT_CNT), (u_int) * (ptr->pdl), + (u_int) * (ptr->pdl + 3), (u_int) * (ptr->pdl + 4)); +#endif + + if ((pkt_len >= MIN_ETHER_SIZE) && + (pkt_len <= MAX_ETHER_SIZE)) { + if (ptr->skb == NULL) { + printk("hp100: %s: rx_bm: skb null\n", dev->name); + /* can happen if we only allocated room for the pdh due to memory shortage. */ + dev->stats.rx_dropped++; + } else { + skb_trim(ptr->skb, pkt_len); /* Shorten it */ + ptr->skb->protocol = + eth_type_trans(ptr->skb, dev); + + netif_rx(ptr->skb); /* Up and away... */ + + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + + switch (header & 0x00070000) { + case (HP100_MULTI_ADDR_HASH << 16): + case (HP100_MULTI_ADDR_NO_HASH << 16): + dev->stats.multicast++; + break; + } + } else { +#ifdef HP100_DEBUG + printk("hp100: %s: rx_bm: Received bad packet (length=%d)\n", dev->name, pkt_len); +#endif + if (ptr->skb != NULL) + dev_kfree_skb_any(ptr->skb); + dev->stats.rx_errors++; + } + + lp->rxrhead = lp->rxrhead->next; + + /* Allocate a new rx PDL (so lp->rxrcommit stays the same) */ + if (0 == hp100_build_rx_pdl(lp->rxrtail, dev)) { + /* No space for skb, header can still be received. */ +#ifdef HP100_DEBUG + printk("hp100: %s: rx_bm: No space for new PDL.\n", dev->name); +#endif + return; + } else { /* successfully allocated new PDL - put it in ringlist at tail. */ + hp100_outl((u32) lp->rxrtail->pdl_paddr, RX_PDA); + lp->rxrtail = lp->rxrtail->next; + } + + } +} + +/* + * statistics + */ +static struct net_device_stats *hp100_get_stats(struct net_device *dev) +{ + unsigned long flags; + int ioaddr = dev->base_addr; + struct hp100_private *lp = netdev_priv(dev); + +#ifdef HP100_DEBUG_B + hp100_outw(0x4215, TRACE); +#endif + + spin_lock_irqsave(&lp->lock, flags); + hp100_ints_off(); /* Useful ? Jean II */ + hp100_update_stats(dev); + hp100_ints_on(); + spin_unlock_irqrestore(&lp->lock, flags); + return &(dev->stats); +} + +static void hp100_update_stats(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + u_short val; + +#ifdef HP100_DEBUG_B + hp100_outw(0x4216, TRACE); + printk("hp100: %s: update-stats\n", dev->name); +#endif + + /* Note: Statistics counters clear when read. */ + hp100_page(MAC_CTRL); + val = hp100_inw(DROPPED) & 0x0fff; + dev->stats.rx_errors += val; + dev->stats.rx_over_errors += val; + val = hp100_inb(CRC); + dev->stats.rx_errors += val; + dev->stats.rx_crc_errors += val; + val = hp100_inb(ABORT); + dev->stats.tx_errors += val; + dev->stats.tx_aborted_errors += val; + hp100_page(PERFORMANCE); +} + +static void hp100_misc_interrupt(struct net_device *dev) +{ +#ifdef HP100_DEBUG_B + int ioaddr = dev->base_addr; +#endif + +#ifdef HP100_DEBUG_B + int ioaddr = dev->base_addr; + hp100_outw(0x4216, TRACE); + printk("hp100: %s: misc_interrupt\n", dev->name); +#endif + + /* Note: Statistics counters clear when read. */ + dev->stats.rx_errors++; + dev->stats.tx_errors++; +} + +static void hp100_clear_stats(struct hp100_private *lp, int ioaddr) +{ + unsigned long flags; + +#ifdef HP100_DEBUG_B + hp100_outw(0x4217, TRACE); + printk("hp100: %s: clear_stats\n", dev->name); +#endif + + spin_lock_irqsave(&lp->lock, flags); + hp100_page(MAC_CTRL); /* get all statistics bytes */ + hp100_inw(DROPPED); + hp100_inb(CRC); + hp100_inb(ABORT); + hp100_page(PERFORMANCE); + spin_unlock_irqrestore(&lp->lock, flags); +} + + +/* + * multicast setup + */ + +/* + * Set or clear the multicast filter for this adapter. + */ + +static void hp100_set_multicast_list(struct net_device *dev) +{ + unsigned long flags; + int ioaddr = dev->base_addr; + struct hp100_private *lp = netdev_priv(dev); + +#ifdef HP100_DEBUG_B + hp100_outw(0x4218, TRACE); + printk("hp100: %s: set_mc_list\n", dev->name); +#endif + + spin_lock_irqsave(&lp->lock, flags); + hp100_ints_off(); + hp100_page(MAC_CTRL); + hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1); /* stop rx/tx */ + + if (dev->flags & IFF_PROMISC) { + lp->mac2_mode = HP100_MAC2MODE6; /* promiscuous mode = get all good */ + lp->mac1_mode = HP100_MAC1MODE6; /* packets on the net */ + memset(&lp->hash_bytes, 0xff, 8); + } else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) { + lp->mac2_mode = HP100_MAC2MODE5; /* multicast mode = get packets for */ + lp->mac1_mode = HP100_MAC1MODE5; /* me, broadcasts and all multicasts */ +#ifdef HP100_MULTICAST_FILTER /* doesn't work!!! */ + if (dev->flags & IFF_ALLMULTI) { + /* set hash filter to receive all multicast packets */ + memset(&lp->hash_bytes, 0xff, 8); + } else { + int i, idx; + u_char *addrs; + struct netdev_hw_addr *ha; + + memset(&lp->hash_bytes, 0x00, 8); +#ifdef HP100_DEBUG + printk("hp100: %s: computing hash filter - mc_count = %i\n", + dev->name, netdev_mc_count(dev)); +#endif + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; +#ifdef HP100_DEBUG + printk("hp100: %s: multicast = %pM, ", + dev->name, addrs); +#endif + for (i = idx = 0; i < 6; i++) { + idx ^= *addrs++ & 0x3f; + printk(":%02x:", idx); + } +#ifdef HP100_DEBUG + printk("idx = %i\n", idx); +#endif + lp->hash_bytes[idx >> 3] |= (1 << (idx & 7)); + } + } +#else + memset(&lp->hash_bytes, 0xff, 8); +#endif + } else { + lp->mac2_mode = HP100_MAC2MODE3; /* normal mode = get packets for me */ + lp->mac1_mode = HP100_MAC1MODE3; /* and broadcasts */ + memset(&lp->hash_bytes, 0x00, 8); + } + + if (((hp100_inb(MAC_CFG_1) & 0x0f) != lp->mac1_mode) || + (hp100_inb(MAC_CFG_2) != lp->mac2_mode)) { + int i; + + hp100_outb(lp->mac2_mode, MAC_CFG_2); + hp100_andb(HP100_MAC1MODEMASK, MAC_CFG_1); /* clear mac1 mode bits */ + hp100_orb(lp->mac1_mode, MAC_CFG_1); /* and set the new mode */ + + hp100_page(MAC_ADDRESS); + for (i = 0; i < 8; i++) + hp100_outb(lp->hash_bytes[i], HASH_BYTE0 + i); +#ifdef HP100_DEBUG + printk("hp100: %s: mac1 = 0x%x, mac2 = 0x%x, multicast hash = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", + dev->name, lp->mac1_mode, lp->mac2_mode, + lp->hash_bytes[0], lp->hash_bytes[1], + lp->hash_bytes[2], lp->hash_bytes[3], + lp->hash_bytes[4], lp->hash_bytes[5], + lp->hash_bytes[6], lp->hash_bytes[7]); +#endif + + if (lp->lan_type == HP100_LAN_100) { +#ifdef HP100_DEBUG + printk("hp100: %s: 100VG MAC settings have changed - relogin.\n", dev->name); +#endif + lp->hub_status = hp100_login_to_vg_hub(dev, 1); /* force a relogin to the hub */ + } + } else { + int i; + u_char old_hash_bytes[8]; + + hp100_page(MAC_ADDRESS); + for (i = 0; i < 8; i++) + old_hash_bytes[i] = hp100_inb(HASH_BYTE0 + i); + if (memcmp(old_hash_bytes, &lp->hash_bytes, 8)) { + for (i = 0; i < 8; i++) + hp100_outb(lp->hash_bytes[i], HASH_BYTE0 + i); +#ifdef HP100_DEBUG + printk("hp100: %s: multicast hash = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", + dev->name, lp->hash_bytes[0], + lp->hash_bytes[1], lp->hash_bytes[2], + lp->hash_bytes[3], lp->hash_bytes[4], + lp->hash_bytes[5], lp->hash_bytes[6], + lp->hash_bytes[7]); +#endif + + if (lp->lan_type == HP100_LAN_100) { +#ifdef HP100_DEBUG + printk("hp100: %s: 100VG MAC settings have changed - relogin.\n", dev->name); +#endif + lp->hub_status = hp100_login_to_vg_hub(dev, 1); /* force a relogin to the hub */ + } + } + } + + hp100_page(MAC_CTRL); + hp100_orb(HP100_RX_EN | HP100_RX_IDLE | /* enable rx */ + HP100_TX_EN | HP100_TX_IDLE, MAC_CFG_1); /* enable tx */ + + hp100_page(PERFORMANCE); + hp100_ints_on(); + spin_unlock_irqrestore(&lp->lock, flags); +} + +/* + * hardware interrupt handling + */ + +static irqreturn_t hp100_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct hp100_private *lp = netdev_priv(dev); + + int ioaddr; + u_int val; + + if (dev == NULL) + return IRQ_NONE; + ioaddr = dev->base_addr; + + spin_lock(&lp->lock); + + hp100_ints_off(); + +#ifdef HP100_DEBUG_B + hp100_outw(0x4219, TRACE); +#endif + + /* hp100_page( PERFORMANCE ); */ + val = hp100_inw(IRQ_STATUS); +#ifdef HP100_DEBUG_IRQ + printk("hp100: %s: mode=%x,IRQ_STAT=0x%.4x,RXPKTCNT=0x%.2x RXPDL=0x%.2x TXPKTCNT=0x%.2x TXPDL=0x%.2x\n", + dev->name, lp->mode, (u_int) val, hp100_inb(RX_PKT_CNT), + hp100_inb(RX_PDL), hp100_inb(TX_PKT_CNT), hp100_inb(TX_PDL)); +#endif + + if (val == 0) { /* might be a shared interrupt */ + spin_unlock(&lp->lock); + hp100_ints_on(); + return IRQ_NONE; + } + /* We're only interested in those interrupts we really enabled. */ + /* val &= hp100_inw( IRQ_MASK ); */ + + /* + * RX_PDL_FILL_COMPL is set whenever a RX_PDL has been executed. A RX_PDL + * is considered executed whenever the RX_PDL data structure is no longer + * needed. + */ + if (val & HP100_RX_PDL_FILL_COMPL) { + if (lp->mode == 1) + hp100_rx_bm(dev); + else { + printk("hp100: %s: rx_pdl_fill_compl interrupt although not busmaster?\n", dev->name); + } + } + + /* + * The RX_PACKET interrupt is set, when the receive packet counter is + * non zero. We use this interrupt for receiving in slave mode. In + * busmaster mode, we use it to make sure we did not miss any rx_pdl_fill + * interrupts. If rx_pdl_fill_compl is not set and rx_packet is set, then + * we somehow have missed a rx_pdl_fill_compl interrupt. + */ + + if (val & HP100_RX_PACKET) { /* Receive Packet Counter is non zero */ + if (lp->mode != 1) /* non busmaster */ + hp100_rx(dev); + else if (!(val & HP100_RX_PDL_FILL_COMPL)) { + /* Shouldn't happen - maybe we missed a RX_PDL_FILL Interrupt? */ + hp100_rx_bm(dev); + } + } + + /* + * Ack. that we have noticed the interrupt and thereby allow next one. + * Note that this is now done after the slave rx function, since first + * acknowledging and then setting ADV_NXT_PKT caused an extra interrupt + * on the J2573. + */ + hp100_outw(val, IRQ_STATUS); + + /* + * RX_ERROR is set when a packet is dropped due to no memory resources on + * the card or when a RCV_ERR occurs. + * TX_ERROR is set when a TX_ABORT condition occurs in the MAC->exists + * only in the 802.3 MAC and happens when 16 collisions occur during a TX + */ + if (val & (HP100_TX_ERROR | HP100_RX_ERROR)) { +#ifdef HP100_DEBUG_IRQ + printk("hp100: %s: TX/RX Error IRQ\n", dev->name); +#endif + hp100_update_stats(dev); + if (lp->mode == 1) { + hp100_rxfill(dev); + hp100_clean_txring(dev); + } + } + + /* + * RX_PDA_ZERO is set when the PDA count goes from non-zero to zero. + */ + if ((lp->mode == 1) && (val & (HP100_RX_PDA_ZERO))) + hp100_rxfill(dev); + + /* + * HP100_TX_COMPLETE interrupt occurs when packet transmitted on wire + * is completed + */ + if ((lp->mode == 1) && (val & (HP100_TX_COMPLETE))) + hp100_clean_txring(dev); + + /* + * MISC_ERROR is set when either the LAN link goes down or a detected + * bus error occurs. + */ + if (val & HP100_MISC_ERROR) { /* New for J2585B */ +#ifdef HP100_DEBUG_IRQ + printk + ("hp100: %s: Misc. Error Interrupt - Check cabling.\n", + dev->name); +#endif + if (lp->mode == 1) { + hp100_clean_txring(dev); + hp100_rxfill(dev); + } + hp100_misc_interrupt(dev); + } + + spin_unlock(&lp->lock); + hp100_ints_on(); + return IRQ_HANDLED; +} + +/* + * some misc functions + */ + +static void hp100_start_interface(struct net_device *dev) +{ + unsigned long flags; + int ioaddr = dev->base_addr; + struct hp100_private *lp = netdev_priv(dev); + +#ifdef HP100_DEBUG_B + hp100_outw(0x4220, TRACE); + printk("hp100: %s: hp100_start_interface\n", dev->name); +#endif + + spin_lock_irqsave(&lp->lock, flags); + + /* Ensure the adapter does not want to request an interrupt when */ + /* enabling the IRQ line to be active on the bus (i.e. not tri-stated) */ + hp100_page(PERFORMANCE); + hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */ + hp100_outw(0xffff, IRQ_STATUS); /* ack all IRQs */ + hp100_outw(HP100_FAKE_INT | HP100_INT_EN | HP100_RESET_LB, + OPTION_LSW); + /* Un Tri-state int. TODO: Check if shared interrupts can be realised? */ + hp100_outw(HP100_TRI_INT | HP100_RESET_HB, OPTION_LSW); + + if (lp->mode == 1) { + /* Make sure BM bit is set... */ + hp100_page(HW_MAP); + hp100_orb(HP100_BM_MASTER, BM); + hp100_rxfill(dev); + } else if (lp->mode == 2) { + /* Enable memory mapping. Note: Don't do this when busmaster. */ + hp100_outw(HP100_MMAP_DIS | HP100_RESET_HB, OPTION_LSW); + } + + hp100_page(PERFORMANCE); + hp100_outw(0xfefe, IRQ_MASK); /* mask off all ints */ + hp100_outw(0xffff, IRQ_STATUS); /* ack IRQ */ + + /* enable a few interrupts: */ + if (lp->mode == 1) { /* busmaster mode */ + hp100_outw(HP100_RX_PDL_FILL_COMPL | + HP100_RX_PDA_ZERO | HP100_RX_ERROR | + /* HP100_RX_PACKET | */ + /* HP100_RX_EARLY_INT | */ HP100_SET_HB | + /* HP100_TX_PDA_ZERO | */ + HP100_TX_COMPLETE | + /* HP100_MISC_ERROR | */ + HP100_TX_ERROR | HP100_SET_LB, IRQ_MASK); + } else { + hp100_outw(HP100_RX_PACKET | + HP100_RX_ERROR | HP100_SET_HB | + HP100_TX_ERROR | HP100_SET_LB, IRQ_MASK); + } + + /* Note : before hp100_set_multicast_list(), because it will play with + * spinlock itself... Jean II */ + spin_unlock_irqrestore(&lp->lock, flags); + + /* Enable MAC Tx and RX, set MAC modes, ... */ + hp100_set_multicast_list(dev); +} + +static void hp100_stop_interface(struct net_device *dev) +{ + struct hp100_private *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + u_int val; + +#ifdef HP100_DEBUG_B + printk("hp100: %s: hp100_stop_interface\n", dev->name); + hp100_outw(0x4221, TRACE); +#endif + + if (lp->mode == 1) + hp100_BM_shutdown(dev); + else { + /* Note: MMAP_DIS will be reenabled by start_interface */ + hp100_outw(HP100_INT_EN | HP100_RESET_LB | + HP100_TRI_INT | HP100_MMAP_DIS | HP100_SET_HB, + OPTION_LSW); + val = hp100_inw(OPTION_LSW); + + hp100_page(MAC_CTRL); + hp100_andb(~(HP100_RX_EN | HP100_TX_EN), MAC_CFG_1); + + if (!(val & HP100_HW_RST)) + return; /* If reset, imm. return ... */ + /* ... else: busy wait until idle */ + for (val = 0; val < 6000; val++) + if ((hp100_inb(MAC_CFG_1) & (HP100_TX_IDLE | HP100_RX_IDLE)) == (HP100_TX_IDLE | HP100_RX_IDLE)) { + hp100_page(PERFORMANCE); + return; + } + printk("hp100: %s: hp100_stop_interface - timeout\n", dev->name); + hp100_page(PERFORMANCE); + } +} + +static void hp100_load_eeprom(struct net_device *dev, u_short probe_ioaddr) +{ + int i; + int ioaddr = probe_ioaddr > 0 ? probe_ioaddr : dev->base_addr; + +#ifdef HP100_DEBUG_B + hp100_outw(0x4222, TRACE); +#endif + + hp100_page(EEPROM_CTRL); + hp100_andw(~HP100_EEPROM_LOAD, EEPROM_CTRL); + hp100_orw(HP100_EEPROM_LOAD, EEPROM_CTRL); + for (i = 0; i < 10000; i++) + if (!(hp100_inb(OPTION_MSW) & HP100_EE_LOAD)) + return; + printk("hp100: %s: hp100_load_eeprom - timeout\n", dev->name); +} + +/* Sense connection status. + * return values: LAN_10 - Connected to 10Mbit/s network + * LAN_100 - Connected to 100Mbit/s network + * LAN_ERR - not connected or 100Mbit/s Hub down + */ +static int hp100_sense_lan(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + u_short val_VG, val_10; + struct hp100_private *lp = netdev_priv(dev); + +#ifdef HP100_DEBUG_B + hp100_outw(0x4223, TRACE); +#endif + + hp100_page(MAC_CTRL); + val_10 = hp100_inb(10_LAN_CFG_1); + val_VG = hp100_inb(VG_LAN_CFG_1); + hp100_page(PERFORMANCE); +#ifdef HP100_DEBUG + printk("hp100: %s: sense_lan: val_VG = 0x%04x, val_10 = 0x%04x\n", + dev->name, val_VG, val_10); +#endif + + if (val_10 & HP100_LINK_BEAT_ST) /* 10Mb connection is active */ + return HP100_LAN_10; + + if (val_10 & HP100_AUI_ST) { /* have we BNC or AUI onboard? */ + /* + * This can be overriden by dos utility, so if this has no effect, + * perhaps you need to download that utility from HP and set card + * back to "auto detect". + */ + val_10 |= HP100_AUI_SEL | HP100_LOW_TH; + hp100_page(MAC_CTRL); + hp100_outb(val_10, 10_LAN_CFG_1); + hp100_page(PERFORMANCE); + return HP100_LAN_COAX; + } + + /* Those cards don't have a 100 Mbit connector */ + if ( !strcmp(lp->id, "HWP1920") || + (lp->pci_dev && + lp->pci_dev->vendor == PCI_VENDOR_ID && + (lp->pci_dev->device == PCI_DEVICE_ID_HP_J2970A || + lp->pci_dev->device == PCI_DEVICE_ID_HP_J2973A))) + return HP100_LAN_ERR; + + if (val_VG & HP100_LINK_CABLE_ST) /* Can hear the HUBs tone. */ + return HP100_LAN_100; + return HP100_LAN_ERR; +} + +static int hp100_down_vg_link(struct net_device *dev) +{ + struct hp100_private *lp = netdev_priv(dev); + int ioaddr = dev->base_addr; + unsigned long time; + long savelan, newlan; + +#ifdef HP100_DEBUG_B + hp100_outw(0x4224, TRACE); + printk("hp100: %s: down_vg_link\n", dev->name); +#endif + + hp100_page(MAC_CTRL); + time = jiffies + (HZ / 4); + do { + if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST) + break; + if (!in_interrupt()) + schedule_timeout_interruptible(1); + } while (time_after(time, jiffies)); + + if (time_after_eq(jiffies, time)) /* no signal->no logout */ + return 0; + + /* Drop the VG Link by clearing the link up cmd and load addr. */ + + hp100_andb(~(HP100_LOAD_ADDR | HP100_LINK_CMD), VG_LAN_CFG_1); + hp100_orb(HP100_VG_SEL, VG_LAN_CFG_1); + + /* Conditionally stall for >250ms on Link-Up Status (to go down) */ + time = jiffies + (HZ / 2); + do { + if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) + break; + if (!in_interrupt()) + schedule_timeout_interruptible(1); + } while (time_after(time, jiffies)); + +#ifdef HP100_DEBUG + if (time_after_eq(jiffies, time)) + printk("hp100: %s: down_vg_link: Link does not go down?\n", dev->name); +#endif + + /* To prevent condition where Rev 1 VG MAC and old hubs do not complete */ + /* logout under traffic (even though all the status bits are cleared), */ + /* do this workaround to get the Rev 1 MAC in its idle state */ + if (lp->chip == HP100_CHIPID_LASSEN) { + /* Reset VG MAC to insure it leaves the logoff state even if */ + /* the Hub is still emitting tones */ + hp100_andb(~HP100_VG_RESET, VG_LAN_CFG_1); + udelay(1500); /* wait for >1ms */ + hp100_orb(HP100_VG_RESET, VG_LAN_CFG_1); /* Release Reset */ + udelay(1500); + } + + /* New: For lassen, switch to 10 Mbps mac briefly to clear training ACK */ + /* to get the VG mac to full reset. This is not req.d with later chips */ + /* Note: It will take the between 1 and 2 seconds for the VG mac to be */ + /* selected again! This will be left to the connect hub function to */ + /* perform if desired. */ + if (lp->chip == HP100_CHIPID_LASSEN) { + /* Have to write to 10 and 100VG control registers simultaneously */ + savelan = newlan = hp100_inl(10_LAN_CFG_1); /* read 10+100 LAN_CFG regs */ + newlan &= ~(HP100_VG_SEL << 16); + newlan |= (HP100_DOT3_MAC) << 8; + hp100_andb(~HP100_AUTO_MODE, MAC_CFG_3); /* Autosel off */ + hp100_outl(newlan, 10_LAN_CFG_1); + + /* Conditionally stall for 5sec on VG selected. */ + time = jiffies + (HZ * 5); + do { + if (!(hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST)) + break; + if (!in_interrupt()) + schedule_timeout_interruptible(1); + } while (time_after(time, jiffies)); + + hp100_orb(HP100_AUTO_MODE, MAC_CFG_3); /* Autosel back on */ + hp100_outl(savelan, 10_LAN_CFG_1); + } + + time = jiffies + (3 * HZ); /* Timeout 3s */ + do { + if ((hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST) == 0) + break; + if (!in_interrupt()) + schedule_timeout_interruptible(1); + } while (time_after(time, jiffies)); + + if (time_before_eq(time, jiffies)) { +#ifdef HP100_DEBUG + printk("hp100: %s: down_vg_link: timeout\n", dev->name); +#endif + return -EIO; + } + + time = jiffies + (2 * HZ); /* This seems to take a while.... */ + do { + if (!in_interrupt()) + schedule_timeout_interruptible(1); + } while (time_after(time, jiffies)); + + return 0; +} + +static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin) +{ + int ioaddr = dev->base_addr; + struct hp100_private *lp = netdev_priv(dev); + u_short val = 0; + unsigned long time; + int startst; + +#ifdef HP100_DEBUG_B + hp100_outw(0x4225, TRACE); + printk("hp100: %s: login_to_vg_hub\n", dev->name); +#endif + + /* Initiate a login sequence iff VG MAC is enabled and either Load Address + * bit is zero or the force relogin flag is set (e.g. due to MAC address or + * promiscuous mode change) + */ + hp100_page(MAC_CTRL); + startst = hp100_inb(VG_LAN_CFG_1); + if ((force_relogin == 1) || (hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST)) { +#ifdef HP100_DEBUG_TRAINING + printk("hp100: %s: Start training\n", dev->name); +#endif + + /* Ensure VG Reset bit is 1 (i.e., do not reset) */ + hp100_orb(HP100_VG_RESET, VG_LAN_CFG_1); + + /* If Lassen AND auto-select-mode AND VG tones were sensed on */ + /* entry then temporarily put them into force 100Mbit mode */ + if ((lp->chip == HP100_CHIPID_LASSEN) && (startst & HP100_LINK_CABLE_ST)) + hp100_andb(~HP100_DOT3_MAC, 10_LAN_CFG_2); + + /* Drop the VG link by zeroing Link Up Command and Load Address */ + hp100_andb(~(HP100_LINK_CMD /* |HP100_LOAD_ADDR */ ), VG_LAN_CFG_1); + +#ifdef HP100_DEBUG_TRAINING + printk("hp100: %s: Bring down the link\n", dev->name); +#endif + + /* Wait for link to drop */ + time = jiffies + (HZ / 10); + do { + if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) + break; + if (!in_interrupt()) + schedule_timeout_interruptible(1); + } while (time_after(time, jiffies)); + + /* Start an addressed training and optionally request promiscuous port */ + if ((dev->flags) & IFF_PROMISC) { + hp100_orb(HP100_PROM_MODE, VG_LAN_CFG_2); + if (lp->chip == HP100_CHIPID_LASSEN) + hp100_orw(HP100_MACRQ_PROMSC, TRAIN_REQUEST); + } else { + hp100_andb(~HP100_PROM_MODE, VG_LAN_CFG_2); + /* For ETR parts we need to reset the prom. bit in the training + * register, otherwise promiscious mode won't be disabled. + */ + if (lp->chip == HP100_CHIPID_LASSEN) { + hp100_andw(~HP100_MACRQ_PROMSC, TRAIN_REQUEST); + } + } + + /* With ETR parts, frame format request bits can be set. */ + if (lp->chip == HP100_CHIPID_LASSEN) + hp100_orb(HP100_MACRQ_FRAMEFMT_EITHER, TRAIN_REQUEST); + + hp100_orb(HP100_LINK_CMD | HP100_LOAD_ADDR | HP100_VG_RESET, VG_LAN_CFG_1); + + /* Note: Next wait could be omitted for Hood and earlier chips under */ + /* certain circumstances */ + /* TODO: check if hood/earlier and skip wait. */ + + /* Wait for either short timeout for VG tones or long for login */ + /* Wait for the card hardware to signalise link cable status ok... */ + hp100_page(MAC_CTRL); + time = jiffies + (1 * HZ); /* 1 sec timeout for cable st */ + do { + if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST) + break; + if (!in_interrupt()) + schedule_timeout_interruptible(1); + } while (time_before(jiffies, time)); + + if (time_after_eq(jiffies, time)) { +#ifdef HP100_DEBUG_TRAINING + printk("hp100: %s: Link cable status not ok? Training aborted.\n", dev->name); +#endif + } else { +#ifdef HP100_DEBUG_TRAINING + printk + ("hp100: %s: HUB tones detected. Trying to train.\n", + dev->name); +#endif + + time = jiffies + (2 * HZ); /* again a timeout */ + do { + val = hp100_inb(VG_LAN_CFG_1); + if ((val & (HP100_LINK_UP_ST))) { +#ifdef HP100_DEBUG_TRAINING + printk("hp100: %s: Passed training.\n", dev->name); +#endif + break; + } + if (!in_interrupt()) + schedule_timeout_interruptible(1); + } while (time_after(time, jiffies)); + } + + /* If LINK_UP_ST is set, then we are logged into the hub. */ + if (time_before_eq(jiffies, time) && (val & HP100_LINK_UP_ST)) { +#ifdef HP100_DEBUG_TRAINING + printk("hp100: %s: Successfully logged into the HUB.\n", dev->name); + if (lp->chip == HP100_CHIPID_LASSEN) { + val = hp100_inw(TRAIN_ALLOW); + printk("hp100: %s: Card supports 100VG MAC Version \"%s\" ", + dev->name, (hp100_inw(TRAIN_REQUEST) & HP100_CARD_MACVER) ? "802.12" : "Pre"); + printk("Driver will use MAC Version \"%s\"\n", (val & HP100_HUB_MACVER) ? "802.12" : "Pre"); + printk("hp100: %s: Frame format is %s.\n", dev->name, (val & HP100_MALLOW_FRAMEFMT) ? "802.5" : "802.3"); + } +#endif + } else { + /* If LINK_UP_ST is not set, login was not successful */ + printk("hp100: %s: Problem logging into the HUB.\n", dev->name); + if (lp->chip == HP100_CHIPID_LASSEN) { + /* Check allowed Register to find out why there is a problem. */ + val = hp100_inw(TRAIN_ALLOW); /* won't work on non-ETR card */ +#ifdef HP100_DEBUG_TRAINING + printk("hp100: %s: MAC Configuration requested: 0x%04x, HUB allowed: 0x%04x\n", dev->name, hp100_inw(TRAIN_REQUEST), val); +#endif + if (val & HP100_MALLOW_ACCDENIED) + printk("hp100: %s: HUB access denied.\n", dev->name); + if (val & HP100_MALLOW_CONFIGURE) + printk("hp100: %s: MAC Configuration is incompatible with the Network.\n", dev->name); + if (val & HP100_MALLOW_DUPADDR) + printk("hp100: %s: Duplicate MAC Address on the Network.\n", dev->name); + } + } + + /* If we have put the chip into forced 100 Mbit mode earlier, go back */ + /* to auto-select mode */ + + if ((lp->chip == HP100_CHIPID_LASSEN) && (startst & HP100_LINK_CABLE_ST)) { + hp100_page(MAC_CTRL); + hp100_orb(HP100_DOT3_MAC, 10_LAN_CFG_2); + } + + val = hp100_inb(VG_LAN_CFG_1); + + /* Clear the MISC_ERROR Interrupt, which might be generated when doing the relogin */ + hp100_page(PERFORMANCE); + hp100_outw(HP100_MISC_ERROR, IRQ_STATUS); + + if (val & HP100_LINK_UP_ST) + return 0; /* login was ok */ + else { + printk("hp100: %s: Training failed.\n", dev->name); + hp100_down_vg_link(dev); + return -EIO; + } + } + /* no forced relogin & already link there->no training. */ + return -EIO; +} + +static void hp100_cascade_reset(struct net_device *dev, u_short enable) +{ + int ioaddr = dev->base_addr; + struct hp100_private *lp = netdev_priv(dev); + +#ifdef HP100_DEBUG_B + hp100_outw(0x4226, TRACE); + printk("hp100: %s: cascade_reset\n", dev->name); +#endif + + if (enable) { + hp100_outw(HP100_HW_RST | HP100_RESET_LB, OPTION_LSW); + if (lp->chip == HP100_CHIPID_LASSEN) { + /* Lassen requires a PCI transmit fifo reset */ + hp100_page(HW_MAP); + hp100_andb(~HP100_PCI_RESET, PCICTRL2); + hp100_orb(HP100_PCI_RESET, PCICTRL2); + /* Wait for min. 300 ns */ + /* we can't use jiffies here, because it may be */ + /* that we have disabled the timer... */ + udelay(400); + hp100_andb(~HP100_PCI_RESET, PCICTRL2); + hp100_page(PERFORMANCE); + } + } else { /* bring out of reset */ + hp100_outw(HP100_HW_RST | HP100_SET_LB, OPTION_LSW); + udelay(400); + hp100_page(PERFORMANCE); + } +} + +#ifdef HP100_DEBUG +void hp100_RegisterDump(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + int Page; + int Register; + + /* Dump common registers */ + printk("hp100: %s: Cascade Register Dump\n", dev->name); + printk("hardware id #1: 0x%.2x\n", hp100_inb(HW_ID)); + printk("hardware id #2/paging: 0x%.2x\n", hp100_inb(PAGING)); + printk("option #1: 0x%.4x\n", hp100_inw(OPTION_LSW)); + printk("option #2: 0x%.4x\n", hp100_inw(OPTION_MSW)); + + /* Dump paged registers */ + for (Page = 0; Page < 8; Page++) { + /* Dump registers */ + printk("page: 0x%.2x\n", Page); + outw(Page, ioaddr + 0x02); + for (Register = 0x8; Register < 0x22; Register += 2) { + /* Display Register contents except data port */ + if (((Register != 0x10) && (Register != 0x12)) || (Page > 0)) { + printk("0x%.2x = 0x%.4x\n", Register, inw(ioaddr + Register)); + } + } + } + hp100_page(PERFORMANCE); +} +#endif + + +static void cleanup_dev(struct net_device *d) +{ + struct hp100_private *p = netdev_priv(d); + + unregister_netdev(d); + release_region(d->base_addr, HP100_REGION_SIZE); + + if (p->mode == 1) /* busmaster */ + pci_free_consistent(p->pci_dev, MAX_RINGSIZE + 0x0f, + p->page_vaddr_algn, + virt_to_whatever(d, p->page_vaddr_algn)); + if (p->mem_ptr_virt) + iounmap(p->mem_ptr_virt); + + free_netdev(d); +} + +#ifdef CONFIG_EISA +static int __init hp100_eisa_probe (struct device *gendev) +{ + struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private)); + struct eisa_device *edev = to_eisa_device(gendev); + int err; + + if (!dev) + return -ENOMEM; + + SET_NETDEV_DEV(dev, &edev->dev); + + err = hp100_probe1(dev, edev->base_addr + 0xC38, HP100_BUS_EISA, NULL); + if (err) + goto out1; + +#ifdef HP100_DEBUG + printk("hp100: %s: EISA adapter found at 0x%x\n", dev->name, + dev->base_addr); +#endif + dev_set_drvdata(gendev, dev); + return 0; + out1: + free_netdev(dev); + return err; +} + +static int hp100_eisa_remove(struct device *gendev) +{ + struct net_device *dev = dev_get_drvdata(gendev); + cleanup_dev(dev); + return 0; +} + +static struct eisa_driver hp100_eisa_driver = { + .id_table = hp100_eisa_tbl, + .driver = { + .name = "hp100", + .probe = hp100_eisa_probe, + .remove = hp100_eisa_remove, + } +}; +#endif + +#ifdef CONFIG_PCI +static int hp100_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev; + int ioaddr; + u_short pci_command; + int err; + + if (pci_enable_device(pdev)) + return -ENODEV; + + dev = alloc_etherdev(sizeof(struct hp100_private)); + if (!dev) { + err = -ENOMEM; + goto out0; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + + pci_read_config_word(pdev, PCI_COMMAND, &pci_command); + if (!(pci_command & PCI_COMMAND_IO)) { +#ifdef HP100_DEBUG + printk("hp100: %s: PCI I/O Bit has not been set. Setting...\n", dev->name); +#endif + pci_command |= PCI_COMMAND_IO; + pci_write_config_word(pdev, PCI_COMMAND, pci_command); + } + + if (!(pci_command & PCI_COMMAND_MASTER)) { +#ifdef HP100_DEBUG + printk("hp100: %s: PCI Master Bit has not been set. Setting...\n", dev->name); +#endif + pci_command |= PCI_COMMAND_MASTER; + pci_write_config_word(pdev, PCI_COMMAND, pci_command); + } + + ioaddr = pci_resource_start(pdev, 0); + err = hp100_probe1(dev, ioaddr, HP100_BUS_PCI, pdev); + if (err) + goto out1; + +#ifdef HP100_DEBUG + printk("hp100: %s: PCI adapter found at 0x%x\n", dev->name, ioaddr); +#endif + pci_set_drvdata(pdev, dev); + return 0; + out1: + free_netdev(dev); + out0: + pci_disable_device(pdev); + return err; +} + +static void hp100_pci_remove(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + cleanup_dev(dev); + pci_disable_device(pdev); +} + + +static struct pci_driver hp100_pci_driver = { + .name = "hp100", + .id_table = hp100_pci_tbl, + .probe = hp100_pci_probe, + .remove = hp100_pci_remove, +}; +#endif + +/* + * module section + */ + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jaroslav Kysela , " + "Siegfried \"Frieder\" Loeffler (dg1sek) "); +MODULE_DESCRIPTION("HP CASCADE Architecture Driver for 100VG-AnyLan Network Adapters"); + +/* + * Note: to register three isa devices, use: + * option hp100 hp100_port=0,0,0 + * to register one card at io 0x280 as eth239, use: + * option hp100 hp100_port=0x280 + */ +#if defined(MODULE) && defined(CONFIG_ISA) +#define HP100_DEVICES 5 +/* Parameters set by insmod */ +static int hp100_port[HP100_DEVICES] = { 0, [1 ... (HP100_DEVICES-1)] = -1 }; +module_param_array(hp100_port, int, NULL, 0); + +/* List of devices */ +static struct net_device *hp100_devlist[HP100_DEVICES]; + +static int __init hp100_isa_init(void) +{ + struct net_device *dev; + int i, err, cards = 0; + + /* Don't autoprobe ISA bus */ + if (hp100_port[0] == 0) + return -ENODEV; + + /* Loop on all possible base addresses */ + for (i = 0; i < HP100_DEVICES && hp100_port[i] != -1; ++i) { + dev = alloc_etherdev(sizeof(struct hp100_private)); + if (!dev) { + while (cards > 0) + cleanup_dev(hp100_devlist[--cards]); + + return -ENOMEM; + } + + err = hp100_isa_probe(dev, hp100_port[i]); + if (!err) + hp100_devlist[cards++] = dev; + else + free_netdev(dev); + } + + return cards > 0 ? 0 : -ENODEV; +} + +static void hp100_isa_cleanup(void) +{ + int i; + + for (i = 0; i < HP100_DEVICES; i++) { + struct net_device *dev = hp100_devlist[i]; + if (dev) + cleanup_dev(dev); + } +} +#else +#define hp100_isa_init() (0) +#define hp100_isa_cleanup() do { } while(0) +#endif + +static int __init hp100_module_init(void) +{ + int err; + + err = hp100_isa_init(); + if (err && err != -ENODEV) + goto out; +#ifdef CONFIG_EISA + err = eisa_driver_register(&hp100_eisa_driver); + if (err && err != -ENODEV) + goto out2; +#endif +#ifdef CONFIG_PCI + err = pci_register_driver(&hp100_pci_driver); + if (err && err != -ENODEV) + goto out3; +#endif + out: + return err; + out3: +#ifdef CONFIG_EISA + eisa_driver_unregister (&hp100_eisa_driver); + out2: +#endif + hp100_isa_cleanup(); + goto out; +} + + +static void __exit hp100_module_exit(void) +{ + hp100_isa_cleanup(); +#ifdef CONFIG_EISA + eisa_driver_unregister (&hp100_eisa_driver); +#endif +#ifdef CONFIG_PCI + pci_unregister_driver (&hp100_pci_driver); +#endif +} + +module_init(hp100_module_init) +module_exit(hp100_module_exit) diff --git a/drivers/net/ethernet/hp/hp100.h b/drivers/net/ethernet/hp/hp100.h new file mode 100644 index 000000000..b60e96fe3 --- /dev/null +++ b/drivers/net/ethernet/hp/hp100.h @@ -0,0 +1,615 @@ +/* + * hp100.h: Hewlett Packard HP10/100VG ANY LAN ethernet driver for Linux. + * + * $Id: hp100.h,v 1.51 1997/04/08 14:26:42 floeff Exp floeff $ + * + * Authors: Jaroslav Kysela, + * Siegfried Loeffler + * + * This driver is based on the 'hpfepkt' crynwr packet driver. + * + * This source/code is public free; you can distribute it and/or modify + * it under terms of the GNU General Public License (published by the + * Free Software Foundation) either version two of this License, or any + * later version. + */ + +/**************************************************************************** + * Hardware Constants + ****************************************************************************/ + +/* + * Page Identifiers + * (Swap Paging Register, PAGING, bits 3:0, Offset 0x02) + */ + +#define HP100_PAGE_PERFORMANCE 0x0 /* Page 0 */ +#define HP100_PAGE_MAC_ADDRESS 0x1 /* Page 1 */ +#define HP100_PAGE_HW_MAP 0x2 /* Page 2 */ +#define HP100_PAGE_EEPROM_CTRL 0x3 /* Page 3 */ +#define HP100_PAGE_MAC_CTRL 0x4 /* Page 4 */ +#define HP100_PAGE_MMU_CFG 0x5 /* Page 5 */ +#define HP100_PAGE_ID_MAC_ADDR 0x6 /* Page 6 */ +#define HP100_PAGE_MMU_POINTER 0x7 /* Page 7 */ + + +/* Registers that are present on all pages */ + +#define HP100_REG_HW_ID 0x00 /* R: (16) Unique card ID */ +#define HP100_REG_TRACE 0x00 /* W: (16) Used for debug output */ +#define HP100_REG_PAGING 0x02 /* R: (16),15:4 Card ID */ + /* W: (16),3:0 Switch pages */ +#define HP100_REG_OPTION_LSW 0x04 /* RW: (16) Select card functions */ +#define HP100_REG_OPTION_MSW 0x06 /* RW: (16) Select card functions */ + +/* Page 0 - Performance */ + +#define HP100_REG_IRQ_STATUS 0x08 /* RW: (16) Which ints are pending */ +#define HP100_REG_IRQ_MASK 0x0a /* RW: (16) Select ints to allow */ +#define HP100_REG_FRAGMENT_LEN 0x0c /* W: (16)12:0 Current fragment len */ +/* Note: For 32 bit systems, fragment len and offset registers are available */ +/* at offset 0x28 and 0x2c, where they can be written as 32bit values. */ +#define HP100_REG_OFFSET 0x0e /* RW: (16)12:0 Offset to start read */ +#define HP100_REG_DATA32 0x10 /* RW: (32) I/O mode data port */ +#define HP100_REG_DATA16 0x12 /* RW: WORDs must be read from here */ +#define HP100_REG_TX_MEM_FREE 0x14 /* RD: (32) Amount of free Tx mem */ +#define HP100_REG_TX_PDA_L 0x14 /* W: (32) BM: Ptr to PDL, Low Pri */ +#define HP100_REG_TX_PDA_H 0x1c /* W: (32) BM: Ptr to PDL, High Pri */ +#define HP100_REG_RX_PKT_CNT 0x18 /* RD: (8) Rx count of pkts on card */ +#define HP100_REG_TX_PKT_CNT 0x19 /* RD: (8) Tx count of pkts on card */ +#define HP100_REG_RX_PDL 0x1a /* R: (8) BM: # rx pdl not executed */ +#define HP100_REG_TX_PDL 0x1b /* R: (8) BM: # tx pdl not executed */ +#define HP100_REG_RX_PDA 0x18 /* W: (32) BM: Up to 31 addresses */ + /* which point to a PDL */ +#define HP100_REG_SL_EARLY 0x1c /* (32) Enhanced Slave Early Rx */ +#define HP100_REG_STAT_DROPPED 0x20 /* R (12) Dropped Packet Counter */ +#define HP100_REG_STAT_ERRORED 0x22 /* R (8) Errored Packet Counter */ +#define HP100_REG_STAT_ABORT 0x23 /* R (8) Abort Counter/OW Coll. Flag */ +#define HP100_REG_RX_RING 0x24 /* W (32) Slave: RX Ring Pointers */ +#define HP100_REG_32_FRAGMENT_LEN 0x28 /* W (13) Slave: Fragment Length Reg */ +#define HP100_REG_32_OFFSET 0x2c /* W (16) Slave: Offset Register */ + +/* Page 1 - MAC Address/Hash Table */ + +#define HP100_REG_MAC_ADDR 0x08 /* RW: (8) Cards MAC address */ +#define HP100_REG_HASH_BYTE0 0x10 /* RW: (8) Cards multicast filter */ + +/* Page 2 - Hardware Mapping */ + +#define HP100_REG_MEM_MAP_LSW 0x08 /* RW: (16) LSW of cards mem addr */ +#define HP100_REG_MEM_MAP_MSW 0x0a /* RW: (16) MSW of cards mem addr */ +#define HP100_REG_IO_MAP 0x0c /* RW: (8) Cards I/O address */ +#define HP100_REG_IRQ_CHANNEL 0x0d /* RW: (8) IRQ and edge/level int */ +#define HP100_REG_SRAM 0x0e /* RW: (8) How much RAM on card */ +#define HP100_REG_BM 0x0f /* RW: (8) Controls BM functions */ + +/* New on Page 2 for ETR chips: */ +#define HP100_REG_MODECTRL1 0x10 /* RW: (8) Mode Control 1 */ +#define HP100_REG_MODECTRL2 0x11 /* RW: (8) Mode Control 2 */ +#define HP100_REG_PCICTRL1 0x12 /* RW: (8) PCI Cfg 1 */ +#define HP100_REG_PCICTRL2 0x13 /* RW: (8) PCI Cfg 2 */ +#define HP100_REG_PCIBUSMLAT 0x15 /* RW: (8) PCI Bus Master Latency */ +#define HP100_REG_EARLYTXCFG 0x16 /* RW: (16) Early TX Cfg/Cntrl Reg */ +#define HP100_REG_EARLYRXCFG 0x18 /* RW: (8) Early RX Cfg/Cntrl Reg */ +#define HP100_REG_ISAPNPCFG1 0x1a /* RW: (8) ISA PnP Cfg/Cntrl Reg 1 */ +#define HP100_REG_ISAPNPCFG2 0x1b /* RW: (8) ISA PnP Cfg/Cntrl Reg 2 */ + +/* Page 3 - EEPROM/Boot ROM */ + +#define HP100_REG_EEPROM_CTRL 0x08 /* RW: (16) Used to load EEPROM */ +#define HP100_REG_BOOTROM_CTRL 0x0a + +/* Page 4 - LAN Configuration (MAC_CTRL) */ + +#define HP100_REG_10_LAN_CFG_1 0x08 /* RW: (8) Set 10M XCVR functions */ +#define HP100_REG_10_LAN_CFG_2 0x09 /* RW: (8) 10M XCVR functions */ +#define HP100_REG_VG_LAN_CFG_1 0x0a /* RW: (8) Set 100M XCVR functions */ +#define HP100_REG_VG_LAN_CFG_2 0x0b /* RW: (8) 100M LAN Training cfgregs */ +#define HP100_REG_MAC_CFG_1 0x0c /* RW: (8) Types of pkts to accept */ +#define HP100_REG_MAC_CFG_2 0x0d /* RW: (8) Misc MAC functions */ +#define HP100_REG_MAC_CFG_3 0x0e /* RW: (8) Misc MAC functions */ +#define HP100_REG_MAC_CFG_4 0x0f /* R: (8) Misc MAC states */ +#define HP100_REG_DROPPED 0x10 /* R: (16),11:0 Pkts can't fit in mem */ +#define HP100_REG_CRC 0x12 /* R: (8) Pkts with CRC */ +#define HP100_REG_ABORT 0x13 /* R: (8) Aborted Tx pkts */ +#define HP100_REG_TRAIN_REQUEST 0x14 /* RW: (16) Endnode MAC register. */ +#define HP100_REG_TRAIN_ALLOW 0x16 /* R: (16) Hub allowed register */ + +/* Page 5 - MMU */ + +#define HP100_REG_RX_MEM_STOP 0x0c /* RW: (16) End of Rx ring addr */ +#define HP100_REG_TX_MEM_STOP 0x0e /* RW: (16) End of Tx ring addr */ +#define HP100_REG_PDL_MEM_STOP 0x10 /* Not used by 802.12 devices */ +#define HP100_REG_ECB_MEM_STOP 0x14 /* I've no idea what this is */ + +/* Page 6 - Card ID/Physical LAN Address */ + +#define HP100_REG_BOARD_ID 0x08 /* R: (8) EISA/ISA card ID */ +#define HP100_REG_BOARD_IO_CHCK 0x0c /* R: (8) Added to ID to get FFh */ +#define HP100_REG_SOFT_MODEL 0x0d /* R: (8) Config program defined */ +#define HP100_REG_LAN_ADDR 0x10 /* R: (8) MAC addr of card */ +#define HP100_REG_LAN_ADDR_CHCK 0x16 /* R: (8) Added to addr to get FFh */ + +/* Page 7 - MMU Current Pointers */ + +#define HP100_REG_PTR_RXSTART 0x08 /* R: (16) Current begin of Rx ring */ +#define HP100_REG_PTR_RXEND 0x0a /* R: (16) Current end of Rx ring */ +#define HP100_REG_PTR_TXSTART 0x0c /* R: (16) Current begin of Tx ring */ +#define HP100_REG_PTR_TXEND 0x0e /* R: (16) Current end of Rx ring */ +#define HP100_REG_PTR_RPDLSTART 0x10 +#define HP100_REG_PTR_RPDLEND 0x12 +#define HP100_REG_PTR_RINGPTRS 0x14 +#define HP100_REG_PTR_MEMDEBUG 0x1a +/* ------------------------------------------------------------------------ */ + + +/* + * Hardware ID Register I (Always available, HW_ID, Offset 0x00) + */ +#define HP100_HW_ID_CASCADE 0x4850 /* Identifies Cascade Chip */ + +/* + * Hardware ID Register 2 & Paging Register + * (Always available, PAGING, Offset 0x02) + * Bits 15:4 are for the Chip ID + */ +#define HP100_CHIPID_MASK 0xFFF0 +#define HP100_CHIPID_SHASTA 0x5350 /* Not 802.12 compliant */ + /* EISA BM/SL, MCA16/32 SL, ISA SL */ +#define HP100_CHIPID_RAINIER 0x5360 /* Not 802.12 compliant EISA BM, */ + /* PCI SL, MCA16/32 SL, ISA SL */ +#define HP100_CHIPID_LASSEN 0x5370 /* 802.12 compliant PCI BM, PCI SL */ + /* LRF supported */ + +/* + * Option Registers I and II + * (Always available, OPTION_LSW, Offset 0x04-0x05) + */ +#define HP100_DEBUG_EN 0x8000 /* 0:Dis., 1:Enable Debug Dump Ptr. */ +#define HP100_RX_HDR 0x4000 /* 0:Dis., 1:Enable putting pkt into */ + /* system mem. before Rx interrupt */ +#define HP100_MMAP_DIS 0x2000 /* 0:Enable, 1:Disable mem.mapping. */ + /* MMAP_DIS must be 0 and MEM_EN */ + /* must be 1 for memory-mapped */ + /* mode to be enabled */ +#define HP100_EE_EN 0x1000 /* 0:Disable,1:Enable EEPROM writing */ +#define HP100_BM_WRITE 0x0800 /* 0:Slave, 1:Bus Master for Tx data */ +#define HP100_BM_READ 0x0400 /* 0:Slave, 1:Bus Master for Rx data */ +#define HP100_TRI_INT 0x0200 /* 0:Don't, 1:Do tri-state the int */ +#define HP100_MEM_EN 0x0040 /* Config program set this to */ + /* 0:Disable, 1:Enable mem map. */ + /* See MMAP_DIS. */ +#define HP100_IO_EN 0x0020 /* 1:Enable I/O transfers */ +#define HP100_BOOT_EN 0x0010 /* 1:Enable boot ROM access */ +#define HP100_FAKE_INT 0x0008 /* 1:int */ +#define HP100_INT_EN 0x0004 /* 1:Enable ints from card */ +#define HP100_HW_RST 0x0002 /* 0:Reset, 1:Out of reset */ + /* NIC reset on 0 to 1 transition */ + +/* + * Option Register III + * (Always available, OPTION_MSW, Offset 0x06) + */ +#define HP100_PRIORITY_TX 0x0080 /* 1:Do all Tx pkts as priority */ +#define HP100_EE_LOAD 0x0040 /* 1:EEPROM loading, 0 when done */ +#define HP100_ADV_NXT_PKT 0x0004 /* 1:Advance to next pkt in Rx queue */ + /* h/w will set to 0 when done */ +#define HP100_TX_CMD 0x0002 /* 1:Tell h/w download done, h/w */ + /* will set to 0 when done */ + +/* + * Interrupt Status Registers I and II + * (Page PERFORMANCE, IRQ_STATUS, Offset 0x08-0x09) + * Note: With old chips, these Registers will clear when 1 is written to them + * with new chips this depends on setting of CLR_ISMODE + */ +#define HP100_RX_EARLY_INT 0x2000 +#define HP100_RX_PDA_ZERO 0x1000 +#define HP100_RX_PDL_FILL_COMPL 0x0800 +#define HP100_RX_PACKET 0x0400 /* 0:No, 1:Yes pkt has been Rx */ +#define HP100_RX_ERROR 0x0200 /* 0:No, 1:Yes Rx pkt had error */ +#define HP100_TX_PDA_ZERO 0x0020 /* 1 when PDA count goes to zero */ +#define HP100_TX_SPACE_AVAIL 0x0010 /* 0:<8192, 1:>=8192 Tx free bytes */ +#define HP100_TX_COMPLETE 0x0008 /* 0:No, 1:Yes a Tx has completed */ +#define HP100_MISC_ERROR 0x0004 /* 0:No, 1:Lan Link down or bus error */ +#define HP100_TX_ERROR 0x0002 /* 0:No, 1:Yes Tx pkt had error */ + +/* + * Xmit Memory Free Count + * (Page PERFORMANCE, TX_MEM_FREE, Offset 0x14) (Read only, 32bit) + */ +#define HP100_AUTO_COMPARE 0x80000000 /* Tx Space avail & pkts<255 */ +#define HP100_FREE_SPACE 0x7fffffe0 /* Tx free memory */ + +/* + * IRQ Channel + * (Page HW_MAP, IRQ_CHANNEL, Offset 0x0d) + */ +#define HP100_ZERO_WAIT_EN 0x80 /* 0:No, 1:Yes asserts NOWS signal */ +#define HP100_IRQ_SCRAMBLE 0x40 +#define HP100_BOND_HP 0x20 +#define HP100_LEVEL_IRQ 0x10 /* 0:Edge, 1:Level type interrupts. */ + /* (Only valid on EISA cards) */ +#define HP100_IRQMASK 0x0F /* Isolate the IRQ bits */ + +/* + * SRAM Parameters + * (Page HW_MAP, SRAM, Offset 0x0e) + */ +#define HP100_RAM_SIZE_MASK 0xe0 /* AND to get SRAM size index */ +#define HP100_RAM_SIZE_SHIFT 0x05 /* Shift count(put index in lwr bits) */ + +/* + * Bus Master Register + * (Page HW_MAP, BM, Offset 0x0f) + */ +#define HP100_BM_BURST_RD 0x01 /* EISA only: 1=Use burst trans. fm system */ + /* memory to chip (tx) */ +#define HP100_BM_BURST_WR 0x02 /* EISA only: 1=Use burst trans. fm system */ + /* memory to chip (rx) */ +#define HP100_BM_MASTER 0x04 /* 0:Slave, 1:BM mode */ +#define HP100_BM_PAGE_CK 0x08 /* This bit should be set whenever in */ + /* an EISA system */ +#define HP100_BM_PCI_8CLK 0x40 /* ... cycles 8 clocks apart */ + + +/* + * Mode Control Register I + * (Page HW_MAP, MODECTRL1, Offset0x10) + */ +#define HP100_TX_DUALQ 0x10 + /* If set and BM -> dual tx pda queues */ +#define HP100_ISR_CLRMODE 0x02 /* If set ISR will clear all pending */ + /* interrupts on read (etr only?) */ +#define HP100_EE_NOLOAD 0x04 /* Status whether res will be loaded */ + /* from the eeprom */ +#define HP100_TX_CNT_FLG 0x08 /* Controls Early TX Reg Cnt Field */ +#define HP100_PDL_USE3 0x10 /* If set BM engine will read only */ + /* first three data elements of a PDL */ + /* on the first access. */ +#define HP100_BUSTYPE_MASK 0xe0 /* Three bit bus type info */ + +/* + * Mode Control Register II + * (Page HW_MAP, MODECTRL2, Offset0x11) + */ +#define HP100_EE_MASK 0x0f /* Tell EEPROM circuit not to load */ + /* certain resources */ +#define HP100_DIS_CANCEL 0x20 /* For tx dualq mode operation */ +#define HP100_EN_PDL_WB 0x40 /* 1: Status of PDL completion may be */ + /* written back to system mem */ +#define HP100_EN_BUS_FAIL 0x80 /* Enables bus-fail portion of misc */ + /* interrupt */ + +/* + * PCI Configuration and Control Register I + * (Page HW_MAP, PCICTRL1, Offset 0x12) + */ +#define HP100_LO_MEM 0x01 /* 1: Mapped Mem requested below 1MB */ +#define HP100_NO_MEM 0x02 /* 1: Disables Req for sysmem to PCI */ + /* bios */ +#define HP100_USE_ISA 0x04 /* 1: isa type decodes will occur */ + /* simultaneously with PCI decodes */ +#define HP100_IRQ_HI_MASK 0xf0 /* pgmed by pci bios */ +#define HP100_PCI_IRQ_HI_MASK 0x78 /* Isolate 4 bits for PCI IRQ */ + +/* + * PCI Configuration and Control Register II + * (Page HW_MAP, PCICTRL2, Offset 0x13) + */ +#define HP100_RD_LINE_PDL 0x01 /* 1: PCI command Memory Read Line en */ +#define HP100_RD_TX_DATA_MASK 0x06 /* choose PCI memread cmds for TX */ +#define HP100_MWI 0x08 /* 1: en. PCI memory write invalidate */ +#define HP100_ARB_MODE 0x10 /* Select PCI arbitor type */ +#define HP100_STOP_EN 0x20 /* Enables PCI state machine to issue */ + /* pci stop if cascade not ready */ +#define HP100_IGNORE_PAR 0x40 /* 1: PCI state machine ignores parity */ +#define HP100_PCI_RESET 0x80 /* 0->1: Reset PCI block */ + +/* + * Early TX Configuration and Control Register + * (Page HW_MAP, EARLYTXCFG, Offset 0x16) + */ +#define HP100_EN_EARLY_TX 0x8000 /* 1=Enable Early TX */ +#define HP100_EN_ADAPTIVE 0x4000 /* 1=Enable adaptive mode */ +#define HP100_EN_TX_UR_IRQ 0x2000 /* reserved, must be 0 */ +#define HP100_EN_LOW_TX 0x1000 /* reserved, must be 0 */ +#define HP100_ET_CNT_MASK 0x0fff /* bits 11..0: ET counters */ + +/* + * Early RX Configuration and Control Register + * (Page HW_MAP, EARLYRXCFG, Offset 0x18) + */ +#define HP100_EN_EARLY_RX 0x80 /* 1=Enable Early RX */ +#define HP100_EN_LOW_RX 0x40 /* reserved, must be 0 */ +#define HP100_RX_TRIP_MASK 0x1f /* bits 4..0: threshold at which the + * early rx circuit will start the + * dma of received packet into system + * memory for BM */ + +/* + * Serial Devices Control Register + * (Page EEPROM_CTRL, EEPROM_CTRL, Offset 0x08) + */ +#define HP100_EEPROM_LOAD 0x0001 /* 0->1 loads EEPROM into registers. */ + /* When it goes back to 0, load is */ + /* complete. This should take ~600us. */ + +/* + * 10MB LAN Control and Configuration Register I + * (Page MAC_CTRL, 10_LAN_CFG_1, Offset 0x08) + */ +#define HP100_MAC10_SEL 0xc0 /* Get bits to indicate MAC */ +#define HP100_AUI_SEL 0x20 /* Status of AUI selection */ +#define HP100_LOW_TH 0x10 /* 0:No, 1:Yes allow better cabling */ +#define HP100_LINK_BEAT_DIS 0x08 /* 0:Enable, 1:Disable link beat */ +#define HP100_LINK_BEAT_ST 0x04 /* 0:No, 1:Yes link beat being Rx */ +#define HP100_R_ROL_ST 0x02 /* 0:No, 1:Yes Rx twisted pair has */ + /* been reversed */ +#define HP100_AUI_ST 0x01 /* 0:No, 1:Yes use AUI on TP card */ + +/* + * 10 MB LAN Control and Configuration Register II + * (Page MAC_CTRL, 10_LAN_CFG_2, Offset 0x09) + */ +#define HP100_SQU_ST 0x01 /* 0:No, 1:Yes collision signal sent */ + /* after Tx.Only used for AUI. */ +#define HP100_FULLDUP 0x02 /* 1: LXT901 XCVR fullduplx enabled */ +#define HP100_DOT3_MAC 0x04 /* 1: DOT 3 Mac sel. unless Autosel */ + +/* + * MAC Selection, use with MAC10_SEL bits + */ +#define HP100_AUTO_SEL_10 0x0 /* Auto select */ +#define HP100_XCVR_LXT901_10 0x1 /* LXT901 10BaseT transceiver */ +#define HP100_XCVR_7213 0x2 /* 7213 transceiver */ +#define HP100_XCVR_82503 0x3 /* 82503 transceiver */ + +/* + * 100MB LAN Training Register + * (Page MAC_CTRL, VG_LAN_CFG_2, Offset 0x0b) (old, pre 802.12) + */ +#define HP100_FRAME_FORMAT 0x08 /* 0:802.3, 1:802.5 frames */ +#define HP100_BRIDGE 0x04 /* 0:No, 1:Yes tell hub i am a bridge */ +#define HP100_PROM_MODE 0x02 /* 0:No, 1:Yes tell hub card is */ + /* promiscuous */ +#define HP100_REPEATER 0x01 /* 0:No, 1:Yes tell hub MAC wants to */ + /* be a cascaded repeater */ + +/* + * 100MB LAN Control and Configuration Register + * (Page MAC_CTRL, VG_LAN_CFG_1, Offset 0x0a) + */ +#define HP100_VG_SEL 0x80 /* 0:No, 1:Yes use 100 Mbit MAC */ +#define HP100_LINK_UP_ST 0x40 /* 0:No, 1:Yes endnode logged in */ +#define HP100_LINK_CABLE_ST 0x20 /* 0:No, 1:Yes cable can hear tones */ + /* from hub */ +#define HP100_LOAD_ADDR 0x10 /* 0->1 card addr will be sent */ + /* 100ms later the link status */ + /* bits are valid */ +#define HP100_LINK_CMD 0x08 /* 0->1 link will attempt to log in. */ + /* 100ms later the link status */ + /* bits are valid */ +#define HP100_TRN_DONE 0x04 /* NEW ETR-Chips only: Will be reset */ + /* after LinkUp Cmd is given and set */ + /* when training has completed. */ +#define HP100_LINK_GOOD_ST 0x02 /* 0:No, 1:Yes cable passed training */ +#define HP100_VG_RESET 0x01 /* 0:Yes, 1:No reset the 100VG MAC */ + + +/* + * MAC Configuration Register I + * (Page MAC_CTRL, MAC_CFG_1, Offset 0x0c) + */ +#define HP100_RX_IDLE 0x80 /* 0:Yes, 1:No currently receiving pkts */ +#define HP100_TX_IDLE 0x40 /* 0:Yes, 1:No currently Txing pkts */ +#define HP100_RX_EN 0x20 /* 1: allow receiving of pkts */ +#define HP100_TX_EN 0x10 /* 1: allow transmitting of pkts */ +#define HP100_ACC_ERRORED 0x08 /* 0:No, 1:Yes allow Rx of errored pkts */ +#define HP100_ACC_MC 0x04 /* 0:No, 1:Yes allow Rx of multicast pkts */ +#define HP100_ACC_BC 0x02 /* 0:No, 1:Yes allow Rx of broadcast pkts */ +#define HP100_ACC_PHY 0x01 /* 0:No, 1:Yes allow Rx of ALL phys. pkts */ +#define HP100_MAC1MODEMASK 0xf0 /* Hide ACC bits */ +#define HP100_MAC1MODE1 0x00 /* Receive nothing, must also disable RX */ +#define HP100_MAC1MODE2 0x00 +#define HP100_MAC1MODE3 HP100_MAC1MODE2 | HP100_ACC_BC +#define HP100_MAC1MODE4 HP100_MAC1MODE3 | HP100_ACC_MC +#define HP100_MAC1MODE5 HP100_MAC1MODE4 /* set mc hash to all ones also */ +#define HP100_MAC1MODE6 HP100_MAC1MODE5 | HP100_ACC_PHY /* Promiscuous */ +/* Note MODE6 will receive all GOOD packets on the LAN. This really needs + a mode 7 defined to be LAN Analyzer mode, which will receive errored and + runt packets, and keep the CRC bytes. */ +#define HP100_MAC1MODE7 HP100_MAC1MODE6 | HP100_ACC_ERRORED + +/* + * MAC Configuration Register II + * (Page MAC_CTRL, MAC_CFG_2, Offset 0x0d) + */ +#define HP100_TR_MODE 0x80 /* 0:No, 1:Yes support Token Ring formats */ +#define HP100_TX_SAME 0x40 /* 0:No, 1:Yes Tx same packet continuous */ +#define HP100_LBK_XCVR 0x20 /* 0:No, 1:Yes loopback through MAC & */ + /* transceiver */ +#define HP100_LBK_MAC 0x10 /* 0:No, 1:Yes loopback through MAC */ +#define HP100_CRC_I 0x08 /* 0:No, 1:Yes inhibit CRC on Tx packets */ +#define HP100_ACCNA 0x04 /* 1: For 802.5: Accept only token ring + * group addr that maches NA mask */ +#define HP100_KEEP_CRC 0x02 /* 0:No, 1:Yes keep CRC on Rx packets. */ + /* The length will reflect this. */ +#define HP100_ACCFA 0x01 /* 1: For 802.5: Accept only functional + * addrs that match FA mask (page1) */ +#define HP100_MAC2MODEMASK 0x02 +#define HP100_MAC2MODE1 0x00 +#define HP100_MAC2MODE2 0x00 +#define HP100_MAC2MODE3 0x00 +#define HP100_MAC2MODE4 0x00 +#define HP100_MAC2MODE5 0x00 +#define HP100_MAC2MODE6 0x00 +#define HP100_MAC2MODE7 KEEP_CRC + +/* + * MAC Configuration Register III + * (Page MAC_CTRL, MAC_CFG_3, Offset 0x0e) + */ +#define HP100_PACKET_PACE 0x03 /* Packet Pacing: + * 00: No packet pacing + * 01: 8 to 16 uS delay + * 10: 16 to 32 uS delay + * 11: 32 to 64 uS delay + */ +#define HP100_LRF_EN 0x04 /* 1: External LAN Rcv Filter and + * TCP/IP Checksumming enabled. */ +#define HP100_AUTO_MODE 0x10 /* 1: AutoSelect between 10/100 */ + +/* + * MAC Configuration Register IV + * (Page MAC_CTRL, MAC_CFG_4, Offset 0x0f) + */ +#define HP100_MAC_SEL_ST 0x01 /* (R): Status of external VGSEL + * Signal, 1=100VG, 0=10Mbit sel. */ +#define HP100_LINK_FAIL_ST 0x02 /* (R): Status of Link Fail portion + * of the Misc. Interrupt */ + +/* + * 100 MB LAN Training Request/Allowed Registers + * (Page MAC_CTRL, TRAIN_REQUEST and TRAIN_ALLOW, Offset 0x14-0x16)(ETR parts only) + */ +#define HP100_MACRQ_REPEATER 0x0001 /* 1: MAC tells HUB it wants to be + * a cascaded repeater + * 0: ... wants to be a DTE */ +#define HP100_MACRQ_PROMSC 0x0006 /* 2 bits: Promiscious mode + * 00: Rcv only unicast packets + * specifically addr to this + * endnode + * 10: Rcv all pckts fwded by + * the local repeater */ +#define HP100_MACRQ_FRAMEFMT_EITHER 0x0018 /* 11: either format allowed */ +#define HP100_MACRQ_FRAMEFMT_802_3 0x0000 /* 00: 802.3 is requested */ +#define HP100_MACRQ_FRAMEFMT_802_5 0x0010 /* 10: 802.5 format is requested */ +#define HP100_CARD_MACVER 0xe000 /* R: 3 bit Cards 100VG MAC version */ +#define HP100_MALLOW_REPEATER 0x0001 /* If reset, requested access as an + * end node is allowed */ +#define HP100_MALLOW_PROMSC 0x0004 /* 2 bits: Promiscious mode + * 00: Rcv only unicast packets + * specifically addr to this + * endnode + * 10: Rcv all pckts fwded by + * the local repeater */ +#define HP100_MALLOW_FRAMEFMT 0x00e0 /* 2 bits: Frame Format + * 00: 802.3 format will be used + * 10: 802.5 format will be used */ +#define HP100_MALLOW_ACCDENIED 0x0400 /* N bit */ +#define HP100_MALLOW_CONFIGURE 0x0f00 /* C bit */ +#define HP100_MALLOW_DUPADDR 0x1000 /* D bit */ +#define HP100_HUB_MACVER 0xe000 /* R: 3 bit 802.12 MAC/RMAC training */ + /* protocol of repeater */ + +/* ****************************************************************************** */ + +/* + * Set/Reset bits + */ +#define HP100_SET_HB 0x0100 /* 0:Set fields to 0 whose mask is 1 */ +#define HP100_SET_LB 0x0001 /* HB sets upper byte, LB sets lower byte */ +#define HP100_RESET_HB 0x0000 /* For readability when resetting bits */ +#define HP100_RESET_LB 0x0000 /* For readability when resetting bits */ + +/* + * Misc. Constants + */ +#define HP100_LAN_100 100 /* lan_type value for VG */ +#define HP100_LAN_10 10 /* lan_type value for 10BaseT */ +#define HP100_LAN_COAX 9 /* lan_type value for Coax */ +#define HP100_LAN_ERR (-1) /* lan_type value for link down */ + +/* + * Bus Master Data Structures ---------------------------------------------- + */ + +#define MAX_RX_PDL 30 /* Card limit = 31 */ +#define MAX_RX_FRAG 2 /* Don't need more... */ +#define MAX_TX_PDL 29 +#define MAX_TX_FRAG 2 /* Limit = 31 */ + +/* Define total PDL area size in bytes (should be 4096) */ +/* This is the size of kernel (dma) memory that will be allocated. */ +#define MAX_RINGSIZE ((MAX_RX_FRAG*8+4+4)*MAX_RX_PDL+(MAX_TX_FRAG*8+4+4)*MAX_TX_PDL)+16 + +/* Ethernet Packet Sizes */ +#define MIN_ETHER_SIZE 60 +#define MAX_ETHER_SIZE 1514 /* Needed for preallocation of */ + /* skb buffer when busmastering */ + +/* Tx or Rx Ring Entry */ +typedef struct hp100_ring { + u_int *pdl; /* Address of PDLs PDH, dword before + * this address is used for rx hdr */ + u_int pdl_paddr; /* Physical address of PDL */ + struct sk_buff *skb; + struct hp100_ring *next; +} hp100_ring_t; + + + +/* Mask for Header Descriptor */ +#define HP100_PKT_LEN_MASK 0x1FFF /* AND with RxLength to get length */ + + +/* Receive Packet Status. Note, the error bits are only valid if ACC_ERRORED + bit in the MAC Configuration Register 1 is set. */ +#define HP100_RX_PRI 0x8000 /* 0:No, 1:Yes packet is priority */ +#define HP100_SDF_ERR 0x4000 /* 0:No, 1:Yes start of frame error */ +#define HP100_SKEW_ERR 0x2000 /* 0:No, 1:Yes skew out of range */ +#define HP100_BAD_SYMBOL_ERR 0x1000 /* 0:No, 1:Yes invalid symbol received */ +#define HP100_RCV_IPM_ERR 0x0800 /* 0:No, 1:Yes pkt had an invalid packet */ + /* marker */ +#define HP100_SYMBOL_BAL_ERR 0x0400 /* 0:No, 1:Yes symbol balance error */ +#define HP100_VG_ALN_ERR 0x0200 /* 0:No, 1:Yes non-octet received */ +#define HP100_TRUNC_ERR 0x0100 /* 0:No, 1:Yes the packet was truncated */ +#define HP100_RUNT_ERR 0x0040 /* 0:No, 1:Yes pkt length < Min Pkt */ + /* Length Reg. */ +#define HP100_ALN_ERR 0x0010 /* 0:No, 1:Yes align error. */ +#define HP100_CRC_ERR 0x0008 /* 0:No, 1:Yes CRC occurred. */ + +/* The last three bits indicate the type of destination address */ + +#define HP100_MULTI_ADDR_HASH 0x0006 /* 110: Addr multicast, matched hash */ +#define HP100_BROADCAST_ADDR 0x0003 /* x11: Addr broadcast */ +#define HP100_MULTI_ADDR_NO_HASH 0x0002 /* 010: Addr multicast, didn't match hash */ +#define HP100_PHYS_ADDR_MATCH 0x0001 /* x01: Addr was physical and mine */ +#define HP100_PHYS_ADDR_NO_MATCH 0x0000 /* x00: Addr was physical but not mine */ + +/* + * macros + */ + +#define hp100_inb( reg ) \ + inb( ioaddr + HP100_REG_##reg ) +#define hp100_inw( reg ) \ + inw( ioaddr + HP100_REG_##reg ) +#define hp100_inl( reg ) \ + inl( ioaddr + HP100_REG_##reg ) +#define hp100_outb( data, reg ) \ + outb( data, ioaddr + HP100_REG_##reg ) +#define hp100_outw( data, reg ) \ + outw( data, ioaddr + HP100_REG_##reg ) +#define hp100_outl( data, reg ) \ + outl( data, ioaddr + HP100_REG_##reg ) +#define hp100_orb( data, reg ) \ + outb( inb( ioaddr + HP100_REG_##reg ) | (data), ioaddr + HP100_REG_##reg ) +#define hp100_orw( data, reg ) \ + outw( inw( ioaddr + HP100_REG_##reg ) | (data), ioaddr + HP100_REG_##reg ) +#define hp100_andb( data, reg ) \ + outb( inb( ioaddr + HP100_REG_##reg ) & (data), ioaddr + HP100_REG_##reg ) +#define hp100_andw( data, reg ) \ + outw( inw( ioaddr + HP100_REG_##reg ) & (data), ioaddr + HP100_REG_##reg ) + +#define hp100_page( page ) \ + outw( HP100_PAGE_##page, ioaddr + HP100_REG_PAGING ) +#define hp100_ints_off() \ + outw( HP100_INT_EN | HP100_RESET_LB, ioaddr + HP100_REG_OPTION_LSW ) +#define hp100_ints_on() \ + outw( HP100_INT_EN | HP100_SET_LB, ioaddr + HP100_REG_OPTION_LSW ) +#define hp100_mem_map_enable() \ + outw( HP100_MMAP_DIS | HP100_RESET_HB, ioaddr + HP100_REG_OPTION_LSW ) +#define hp100_mem_map_disable() \ + outw( HP100_MMAP_DIS | HP100_SET_HB, ioaddr + HP100_REG_OPTION_LSW ) diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c new file mode 100644 index 000000000..7ce6379fd --- /dev/null +++ b/drivers/net/ethernet/i825xx/82596.c @@ -0,0 +1,1548 @@ +/* 82596.c: A generic 82596 ethernet driver for linux. */ +/* + Based on Apricot.c + Written 1994 by Mark Evans. + This driver is for the Apricot 82596 bus-master interface + + Modularised 12/94 Mark Evans + + + Modified to support the 82596 ethernet chips on 680x0 VME boards. + by Richard Hirst + Renamed to be 82596.c + + 980825: Changed to receive directly in to sk_buffs which are + allocated at open() time. Eliminates copy on incoming frames + (small ones are still copied). Shared data now held in a + non-cached page, so we can run on 68060 in copyback mode. + + TBD: + * look at deferring rx frames rather than discarding (as per tulip) + * handle tx ring full as per tulip + * performance test to tune rx_copybreak + + Most of my modifications relate to the braindead big-endian + implementation by Intel. When the i596 is operating in + 'big-endian' mode, it thinks a 32 bit value of 0x12345678 + should be stored as 0x56781234. This is a real pain, when + you have linked lists which are shared by the 680x0 and the + i596. + + Driver skeleton + Written 1993 by Donald Becker. + Copyright 1993 United States Government as represented by the Director, + National Security Agency. This software may only be used and distributed + according to the terms of the GNU General Public License as modified by SRC, + incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 + + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +static char version[] __initdata = + "82596.c $Revision: 1.5 $\n"; + +#define DRV_NAME "82596" + +/* DEBUG flags + */ + +#define DEB_INIT 0x0001 +#define DEB_PROBE 0x0002 +#define DEB_SERIOUS 0x0004 +#define DEB_ERRORS 0x0008 +#define DEB_MULTI 0x0010 +#define DEB_TDR 0x0020 +#define DEB_OPEN 0x0040 +#define DEB_RESET 0x0080 +#define DEB_ADDCMD 0x0100 +#define DEB_STATUS 0x0200 +#define DEB_STARTTX 0x0400 +#define DEB_RXADDR 0x0800 +#define DEB_TXADDR 0x1000 +#define DEB_RXFRAME 0x2000 +#define DEB_INTS 0x4000 +#define DEB_STRUCT 0x8000 +#define DEB_ANY 0xffff + + +#define DEB(x,y) if (i596_debug & (x)) y + + +#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_MVME16x_NET_MODULE) +#define ENABLE_MVME16x_NET +#endif +#if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE) +#define ENABLE_BVME6000_NET +#endif + +#ifdef ENABLE_MVME16x_NET +#include +#endif +#ifdef ENABLE_BVME6000_NET +#include +#endif + +/* + * Define various macros for Channel Attention, word swapping etc., dependent + * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel. + */ + +#ifdef __mc68000__ +#define WSWAPrfd(x) ((struct i596_rfd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) +#define WSWAPrbd(x) ((struct i596_rbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) +#define WSWAPiscp(x) ((struct i596_iscp *)(((u32)(x)<<16) | ((((u32)(x)))>>16))) +#define WSWAPscb(x) ((struct i596_scb *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) +#define WSWAPcmd(x) ((struct i596_cmd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) +#define WSWAPtbd(x) ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) +#define WSWAPchar(x) ((char *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) +#define ISCP_BUSY 0x00010000 +#else +#error 82596.c: unknown architecture +#endif + +/* + * These were the intel versions, left here for reference. There + * are currently no x86 users of this legacy i82596 chip. + */ +#if 0 +#define WSWAPrfd(x) ((struct i596_rfd *)((long)x)) +#define WSWAPrbd(x) ((struct i596_rbd *)((long)x)) +#define WSWAPiscp(x) ((struct i596_iscp *)((long)x)) +#define WSWAPscb(x) ((struct i596_scb *)((long)x)) +#define WSWAPcmd(x) ((struct i596_cmd *)((long)x)) +#define WSWAPtbd(x) ((struct i596_tbd *)((long)x)) +#define WSWAPchar(x) ((char *)((long)x)) +#define ISCP_BUSY 0x0001 +#endif + +/* + * The MPU_PORT command allows direct access to the 82596. With PORT access + * the following commands are available (p5-18). The 32-bit port command + * must be word-swapped with the most significant word written first. + * This only applies to VME boards. + */ +#define PORT_RESET 0x00 /* reset 82596 */ +#define PORT_SELFTEST 0x01 /* selftest */ +#define PORT_ALTSCP 0x02 /* alternate SCB address */ +#define PORT_ALTDUMP 0x03 /* Alternate DUMP address */ + +static int i596_debug = (DEB_SERIOUS|DEB_PROBE); + +MODULE_AUTHOR("Richard Hirst"); +MODULE_DESCRIPTION("i82596 driver"); +MODULE_LICENSE("GPL"); + +module_param(i596_debug, int, 0); +MODULE_PARM_DESC(i596_debug, "i82596 debug mask"); + + +/* Copy frames shorter than rx_copybreak, otherwise pass on up in + * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha). + */ +static int rx_copybreak = 100; + +#define PKT_BUF_SZ 1536 +#define MAX_MC_CNT 64 + +#define I596_TOTAL_SIZE 17 + +#define I596_NULL ((void *)0xffffffff) + +#define CMD_EOL 0x8000 /* The last command of the list, stop. */ +#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */ +#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */ + +#define CMD_FLEX 0x0008 /* Enable flexible memory model */ + +enum commands { + CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3, + CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7 +}; + +#define STAT_C 0x8000 /* Set to 0 after execution */ +#define STAT_B 0x4000 /* Command being executed */ +#define STAT_OK 0x2000 /* Command executed ok */ +#define STAT_A 0x1000 /* Command aborted */ + +#define CUC_START 0x0100 +#define CUC_RESUME 0x0200 +#define CUC_SUSPEND 0x0300 +#define CUC_ABORT 0x0400 +#define RX_START 0x0010 +#define RX_RESUME 0x0020 +#define RX_SUSPEND 0x0030 +#define RX_ABORT 0x0040 + +#define TX_TIMEOUT (HZ/20) + + +struct i596_reg { + unsigned short porthi; + unsigned short portlo; + unsigned long ca; +}; + +#define EOF 0x8000 +#define SIZE_MASK 0x3fff + +struct i596_tbd { + unsigned short size; + unsigned short pad; + struct i596_tbd *next; + char *data; +}; + +/* The command structure has two 'next' pointers; v_next is the address of + * the next command as seen by the CPU, b_next is the address of the next + * command as seen by the 82596. The b_next pointer, as used by the 82596 + * always references the status field of the next command, rather than the + * v_next field, because the 82596 is unaware of v_next. It may seem more + * logical to put v_next at the end of the structure, but we cannot do that + * because the 82596 expects other fields to be there, depending on command + * type. + */ + +struct i596_cmd { + struct i596_cmd *v_next; /* Address from CPUs viewpoint */ + unsigned short status; + unsigned short command; + struct i596_cmd *b_next; /* Address from i596 viewpoint */ +}; + +struct tx_cmd { + struct i596_cmd cmd; + struct i596_tbd *tbd; + unsigned short size; + unsigned short pad; + struct sk_buff *skb; /* So we can free it after tx */ +}; + +struct tdr_cmd { + struct i596_cmd cmd; + unsigned short status; + unsigned short pad; +}; + +struct mc_cmd { + struct i596_cmd cmd; + short mc_cnt; + char mc_addrs[MAX_MC_CNT*6]; +}; + +struct sa_cmd { + struct i596_cmd cmd; + char eth_addr[8]; +}; + +struct cf_cmd { + struct i596_cmd cmd; + char i596_config[16]; +}; + +struct i596_rfd { + unsigned short stat; + unsigned short cmd; + struct i596_rfd *b_next; /* Address from i596 viewpoint */ + struct i596_rbd *rbd; + unsigned short count; + unsigned short size; + struct i596_rfd *v_next; /* Address from CPUs viewpoint */ + struct i596_rfd *v_prev; +}; + +struct i596_rbd { + unsigned short count; + unsigned short zero1; + struct i596_rbd *b_next; + unsigned char *b_data; /* Address from i596 viewpoint */ + unsigned short size; + unsigned short zero2; + struct sk_buff *skb; + struct i596_rbd *v_next; + struct i596_rbd *b_addr; /* This rbd addr from i596 view */ + unsigned char *v_data; /* Address from CPUs viewpoint */ +}; + +#define TX_RING_SIZE 64 +#define RX_RING_SIZE 16 + +struct i596_scb { + unsigned short status; + unsigned short command; + struct i596_cmd *cmd; + struct i596_rfd *rfd; + unsigned long crc_err; + unsigned long align_err; + unsigned long resource_err; + unsigned long over_err; + unsigned long rcvdt_err; + unsigned long short_err; + unsigned short t_on; + unsigned short t_off; +}; + +struct i596_iscp { + unsigned long stat; + struct i596_scb *scb; +}; + +struct i596_scp { + unsigned long sysbus; + unsigned long pad; + struct i596_iscp *iscp; +}; + +struct i596_private { + volatile struct i596_scp scp; + volatile struct i596_iscp iscp; + volatile struct i596_scb scb; + struct sa_cmd sa_cmd; + struct cf_cmd cf_cmd; + struct tdr_cmd tdr_cmd; + struct mc_cmd mc_cmd; + unsigned long stat; + int last_restart __attribute__((aligned(4))); + struct i596_rfd *rfd_head; + struct i596_rbd *rbd_head; + struct i596_cmd *cmd_tail; + struct i596_cmd *cmd_head; + int cmd_backlog; + unsigned long last_cmd; + struct i596_rfd rfds[RX_RING_SIZE]; + struct i596_rbd rbds[RX_RING_SIZE]; + struct tx_cmd tx_cmds[TX_RING_SIZE]; + struct i596_tbd tbds[TX_RING_SIZE]; + int next_tx_cmd; + spinlock_t lock; +}; + +static char init_setup[] = +{ + 0x8E, /* length, prefetch on */ + 0xC8, /* fifo to 8, monitor off */ +#ifdef CONFIG_VME + 0xc0, /* don't save bad frames */ +#else + 0x80, /* don't save bad frames */ +#endif + 0x2E, /* No source address insertion, 8 byte preamble */ + 0x00, /* priority and backoff defaults */ + 0x60, /* interframe spacing */ + 0x00, /* slot time LSB */ + 0xf2, /* slot time and retries */ + 0x00, /* promiscuous mode */ + 0x00, /* collision detect */ + 0x40, /* minimum frame length */ + 0xff, + 0x00, + 0x7f /* *multi IA */ }; + +static int i596_open(struct net_device *dev); +static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev); +static irqreturn_t i596_interrupt(int irq, void *dev_id); +static int i596_close(struct net_device *dev); +static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); +static void i596_tx_timeout (struct net_device *dev); +static void print_eth(unsigned char *buf, char *str); +static void set_multicast_list(struct net_device *dev); + +static int rx_ring_size = RX_RING_SIZE; +static int ticks_limit = 25; +static int max_cmd_backlog = TX_RING_SIZE-1; + + +static inline void CA(struct net_device *dev) +{ +#ifdef ENABLE_MVME16x_NET + if (MACH_IS_MVME16x) { + ((struct i596_reg *) dev->base_addr)->ca = 1; + } +#endif +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) { + volatile u32 i; + + i = *(volatile u32 *) (dev->base_addr); + } +#endif +} + + +static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x) +{ +#ifdef ENABLE_MVME16x_NET + if (MACH_IS_MVME16x) { + struct i596_reg *p = (struct i596_reg *) (dev->base_addr); + p->porthi = ((c) | (u32) (x)) & 0xffff; + p->portlo = ((c) | (u32) (x)) >> 16; + } +#endif +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) { + u32 v = (u32) (c) | (u32) (x); + v = ((u32) (v) << 16) | ((u32) (v) >> 16); + *(volatile u32 *) dev->base_addr = v; + udelay(1); + *(volatile u32 *) dev->base_addr = v; + } +#endif +} + + +static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str) +{ + while (--delcnt && lp->iscp.stat) + udelay(10); + if (!delcnt) { + printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n", + dev->name, str, lp->scb.status, lp->scb.command); + return -1; + } + else + return 0; +} + + +static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str) +{ + while (--delcnt && lp->scb.command) + udelay(10); + if (!delcnt) { + printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n", + dev->name, str, lp->scb.status, lp->scb.command); + return -1; + } + else + return 0; +} + + +static inline int wait_cfg(struct net_device *dev, struct i596_cmd *cmd, int delcnt, char *str) +{ + volatile struct i596_cmd *c = cmd; + + while (--delcnt && c->command) + udelay(10); + if (!delcnt) { + printk(KERN_ERR "%s: %s.\n", dev->name, str); + return -1; + } + else + return 0; +} + + +static void i596_display_data(struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; + struct i596_cmd *cmd; + struct i596_rfd *rfd; + struct i596_rbd *rbd; + + printk(KERN_ERR "lp and scp at %p, .sysbus = %08lx, .iscp = %p\n", + &lp->scp, lp->scp.sysbus, lp->scp.iscp); + printk(KERN_ERR "iscp at %p, iscp.stat = %08lx, .scb = %p\n", + &lp->iscp, lp->iscp.stat, lp->iscp.scb); + printk(KERN_ERR "scb at %p, scb.status = %04x, .command = %04x," + " .cmd = %p, .rfd = %p\n", + &lp->scb, lp->scb.status, lp->scb.command, + lp->scb.cmd, lp->scb.rfd); + printk(KERN_ERR " errors: crc %lx, align %lx, resource %lx," + " over %lx, rcvdt %lx, short %lx\n", + lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err, + lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err); + cmd = lp->cmd_head; + while (cmd != I596_NULL) { + printk(KERN_ERR "cmd at %p, .status = %04x, .command = %04x, .b_next = %p\n", + cmd, cmd->status, cmd->command, cmd->b_next); + cmd = cmd->v_next; + } + rfd = lp->rfd_head; + printk(KERN_ERR "rfd_head = %p\n", rfd); + do { + printk(KERN_ERR " %p .stat %04x, .cmd %04x, b_next %p, rbd %p," + " count %04x\n", + rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd, + rfd->count); + rfd = rfd->v_next; + } while (rfd != lp->rfd_head); + rbd = lp->rbd_head; + printk(KERN_ERR "rbd_head = %p\n", rbd); + do { + printk(KERN_ERR " %p .count %04x, b_next %p, b_data %p, size %04x\n", + rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size); + rbd = rbd->v_next; + } while (rbd != lp->rbd_head); +} + + +#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET) +static irqreturn_t i596_error(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; +#ifdef ENABLE_MVME16x_NET + if (MACH_IS_MVME16x) { + volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; + + pcc2[0x28] = 1; + pcc2[0x2b] = 0x1d; + } +#endif +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) { + volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; + + *ethirq = 1; + *ethirq = 3; + } +#endif + printk(KERN_ERR "%s: Error interrupt\n", dev->name); + i596_display_data(dev); + return IRQ_HANDLED; +} +#endif + +static inline void remove_rx_bufs(struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; + struct i596_rbd *rbd; + int i; + + for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) { + if (rbd->skb == NULL) + break; + dev_kfree_skb(rbd->skb); + rbd->skb = NULL; + } +} + +static inline int init_rx_bufs(struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; + int i; + struct i596_rfd *rfd; + struct i596_rbd *rbd; + + /* First build the Receive Buffer Descriptor List */ + + for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) { + struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ); + + if (skb == NULL) { + remove_rx_bufs(dev); + return -ENOMEM; + } + + rbd->v_next = rbd+1; + rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1)); + rbd->b_addr = WSWAPrbd(virt_to_bus(rbd)); + rbd->skb = skb; + rbd->v_data = skb->data; + rbd->b_data = WSWAPchar(virt_to_bus(skb->data)); + rbd->size = PKT_BUF_SZ; +#ifdef __mc68000__ + cache_clear(virt_to_phys(skb->data), PKT_BUF_SZ); +#endif + } + lp->rbd_head = lp->rbds; + rbd = lp->rbds + rx_ring_size - 1; + rbd->v_next = lp->rbds; + rbd->b_next = WSWAPrbd(virt_to_bus(lp->rbds)); + + /* Now build the Receive Frame Descriptor List */ + + for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) { + rfd->rbd = I596_NULL; + rfd->v_next = rfd+1; + rfd->v_prev = rfd-1; + rfd->b_next = WSWAPrfd(virt_to_bus(rfd+1)); + rfd->cmd = CMD_FLEX; + } + lp->rfd_head = lp->rfds; + lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds)); + rfd = lp->rfds; + rfd->rbd = lp->rbd_head; + rfd->v_prev = lp->rfds + rx_ring_size - 1; + rfd = lp->rfds + rx_ring_size - 1; + rfd->v_next = lp->rfds; + rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds)); + rfd->cmd = CMD_EOL|CMD_FLEX; + + return 0; +} + + +static void rebuild_rx_bufs(struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; + int i; + + /* Ensure rx frame/buffer descriptors are tidy */ + + for (i = 0; i < rx_ring_size; i++) { + lp->rfds[i].rbd = I596_NULL; + lp->rfds[i].cmd = CMD_FLEX; + } + lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX; + lp->rfd_head = lp->rfds; + lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds)); + lp->rbd_head = lp->rbds; + lp->rfds[0].rbd = WSWAPrbd(virt_to_bus(lp->rbds)); +} + + +static int init_i596_mem(struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; + unsigned long flags; + + MPU_PORT(dev, PORT_RESET, NULL); + + udelay(100); /* Wait 100us - seems to help */ + +#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET) +#ifdef ENABLE_MVME16x_NET + if (MACH_IS_MVME16x) { + volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; + + /* Disable all ints for now */ + pcc2[0x28] = 1; + pcc2[0x2a] = 0x48; + /* Following disables snooping. Snooping is not required + * as we make appropriate use of non-cached pages for + * shared data, and cache_push/cache_clear. + */ + pcc2[0x2b] = 0x08; + } +#endif +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) { + volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; + + *ethirq = 1; + } +#endif + + /* change the scp address */ + + MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp)); + +#endif + + lp->last_cmd = jiffies; + +#ifdef ENABLE_MVME16x_NET + if (MACH_IS_MVME16x) + lp->scp.sysbus = 0x00000054; +#endif +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) + lp->scp.sysbus = 0x0000004c; +#endif + + lp->scp.iscp = WSWAPiscp(virt_to_bus((void *)&lp->iscp)); + lp->iscp.scb = WSWAPscb(virt_to_bus((void *)&lp->scb)); + lp->iscp.stat = ISCP_BUSY; + lp->cmd_backlog = 0; + + lp->cmd_head = lp->scb.cmd = I596_NULL; + +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) { + lp->scb.t_on = 7 * 25; + lp->scb.t_off = 1 * 25; + } +#endif + + DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name)); + + CA(dev); + + if (wait_istat(dev,lp,1000,"initialization timed out")) + goto failed; + DEB(DEB_INIT,printk(KERN_DEBUG "%s: i82596 initialization successful\n", dev->name)); + + /* Ensure rx frame/buffer descriptors are tidy */ + rebuild_rx_bufs(dev); + lp->scb.command = 0; + +#ifdef ENABLE_MVME16x_NET + if (MACH_IS_MVME16x) { + volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; + + /* Enable ints, etc. now */ + pcc2[0x2a] = 0x55; /* Edge sensitive */ + pcc2[0x2b] = 0x15; + } +#endif +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) { + volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; + + *ethirq = 3; + } +#endif + + + DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdConfigure\n", dev->name)); + memcpy(lp->cf_cmd.i596_config, init_setup, 14); + lp->cf_cmd.cmd.command = CmdConfigure; + i596_add_cmd(dev, &lp->cf_cmd.cmd); + + DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name)); + memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN); + lp->sa_cmd.cmd.command = CmdSASetup; + i596_add_cmd(dev, &lp->sa_cmd.cmd); + + DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name)); + lp->tdr_cmd.cmd.command = CmdTDR; + i596_add_cmd(dev, &lp->tdr_cmd.cmd); + + spin_lock_irqsave (&lp->lock, flags); + + if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) { + spin_unlock_irqrestore (&lp->lock, flags); + goto failed; + } + DEB(DEB_INIT,printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name)); + lp->scb.command = RX_START; + CA(dev); + + spin_unlock_irqrestore (&lp->lock, flags); + + if (wait_cmd(dev,lp,1000,"RX_START not processed")) + goto failed; + DEB(DEB_INIT,printk(KERN_DEBUG "%s: Receive unit started OK\n", dev->name)); + return 0; + +failed: + printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name); + MPU_PORT(dev, PORT_RESET, NULL); + return -1; +} + +static inline int i596_rx(struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; + struct i596_rfd *rfd; + struct i596_rbd *rbd; + int frames = 0; + + DEB(DEB_RXFRAME,printk(KERN_DEBUG "i596_rx(), rfd_head %p, rbd_head %p\n", + lp->rfd_head, lp->rbd_head)); + + rfd = lp->rfd_head; /* Ref next frame to check */ + + while ((rfd->stat) & STAT_C) { /* Loop while complete frames */ + if (rfd->rbd == I596_NULL) + rbd = I596_NULL; + else if (rfd->rbd == lp->rbd_head->b_addr) + rbd = lp->rbd_head; + else { + printk(KERN_CRIT "%s: rbd chain broken!\n", dev->name); + /* XXX Now what? */ + rbd = I596_NULL; + } + DEB(DEB_RXFRAME, printk(KERN_DEBUG " rfd %p, rfd.rbd %p, rfd.stat %04x\n", + rfd, rfd->rbd, rfd->stat)); + + if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) { + /* a good frame */ + int pkt_len = rbd->count & 0x3fff; + struct sk_buff *skb = rbd->skb; + int rx_in_place = 0; + + DEB(DEB_RXADDR,print_eth(rbd->v_data, "received")); + frames++; + + /* Check if the packet is long enough to just accept + * without copying to a properly sized skbuff. + */ + + if (pkt_len > rx_copybreak) { + struct sk_buff *newskb; + + /* Get fresh skbuff to replace filled one. */ + newskb = netdev_alloc_skb(dev, PKT_BUF_SZ); + if (newskb == NULL) { + skb = NULL; /* drop pkt */ + goto memory_squeeze; + } + /* Pass up the skb already on the Rx ring. */ + skb_put(skb, pkt_len); + rx_in_place = 1; + rbd->skb = newskb; + rbd->v_data = newskb->data; + rbd->b_data = WSWAPchar(virt_to_bus(newskb->data)); +#ifdef __mc68000__ + cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ); +#endif + } else { + skb = netdev_alloc_skb(dev, pkt_len + 2); + } +memory_squeeze: + if (skb == NULL) { + /* XXX tulip.c can defer packets here!! */ + dev->stats.rx_dropped++; + } else { + if (!rx_in_place) { + /* 16 byte align the data fields */ + skb_reserve(skb, 2); + memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len); + } + skb->protocol=eth_type_trans(skb,dev); + skb->len = pkt_len; +#ifdef __mc68000__ + cache_clear(virt_to_phys(rbd->skb->data), + pkt_len); +#endif + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes+=pkt_len; + } + } + else { + DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n", + dev->name, rfd->stat)); + dev->stats.rx_errors++; + if ((rfd->stat) & 0x0001) + dev->stats.collisions++; + if ((rfd->stat) & 0x0080) + dev->stats.rx_length_errors++; + if ((rfd->stat) & 0x0100) + dev->stats.rx_over_errors++; + if ((rfd->stat) & 0x0200) + dev->stats.rx_fifo_errors++; + if ((rfd->stat) & 0x0400) + dev->stats.rx_frame_errors++; + if ((rfd->stat) & 0x0800) + dev->stats.rx_crc_errors++; + if ((rfd->stat) & 0x1000) + dev->stats.rx_length_errors++; + } + + /* Clear the buffer descriptor count and EOF + F flags */ + + if (rbd != I596_NULL && (rbd->count & 0x4000)) { + rbd->count = 0; + lp->rbd_head = rbd->v_next; + } + + /* Tidy the frame descriptor, marking it as end of list */ + + rfd->rbd = I596_NULL; + rfd->stat = 0; + rfd->cmd = CMD_EOL|CMD_FLEX; + rfd->count = 0; + + /* Remove end-of-list from old end descriptor */ + + rfd->v_prev->cmd = CMD_FLEX; + + /* Update record of next frame descriptor to process */ + + lp->scb.rfd = rfd->b_next; + lp->rfd_head = rfd->v_next; + rfd = lp->rfd_head; + } + + DEB(DEB_RXFRAME,printk(KERN_DEBUG "frames %d\n", frames)); + + return 0; +} + + +static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) +{ + struct i596_cmd *ptr; + + while (lp->cmd_head != I596_NULL) { + ptr = lp->cmd_head; + lp->cmd_head = ptr->v_next; + lp->cmd_backlog--; + + switch ((ptr->command) & 0x7) { + case CmdTx: + { + struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; + struct sk_buff *skb = tx_cmd->skb; + + dev_kfree_skb(skb); + + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; + + ptr->v_next = ptr->b_next = I596_NULL; + tx_cmd->cmd.command = 0; /* Mark as free */ + break; + } + default: + ptr->v_next = ptr->b_next = I596_NULL; + } + } + + wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out"); + lp->scb.cmd = I596_NULL; +} + +static void i596_reset(struct net_device *dev, struct i596_private *lp, + int ioaddr) +{ + unsigned long flags; + + DEB(DEB_RESET,printk(KERN_DEBUG "i596_reset\n")); + + spin_lock_irqsave (&lp->lock, flags); + + wait_cmd(dev,lp,100,"i596_reset timed out"); + + netif_stop_queue(dev); + + lp->scb.command = CUC_ABORT | RX_ABORT; + CA(dev); + + /* wait for shutdown */ + wait_cmd(dev,lp,1000,"i596_reset 2 timed out"); + spin_unlock_irqrestore (&lp->lock, flags); + + i596_cleanup_cmd(dev,lp); + i596_rx(dev); + + netif_start_queue(dev); + init_i596_mem(dev); +} + +static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) +{ + struct i596_private *lp = dev->ml_priv; + int ioaddr = dev->base_addr; + unsigned long flags; + + DEB(DEB_ADDCMD,printk(KERN_DEBUG "i596_add_cmd\n")); + + cmd->status = 0; + cmd->command |= (CMD_EOL | CMD_INTR); + cmd->v_next = cmd->b_next = I596_NULL; + + spin_lock_irqsave (&lp->lock, flags); + + if (lp->cmd_head != I596_NULL) { + lp->cmd_tail->v_next = cmd; + lp->cmd_tail->b_next = WSWAPcmd(virt_to_bus(&cmd->status)); + } else { + lp->cmd_head = cmd; + wait_cmd(dev,lp,100,"i596_add_cmd timed out"); + lp->scb.cmd = WSWAPcmd(virt_to_bus(&cmd->status)); + lp->scb.command = CUC_START; + CA(dev); + } + lp->cmd_tail = cmd; + lp->cmd_backlog++; + + spin_unlock_irqrestore (&lp->lock, flags); + + if (lp->cmd_backlog > max_cmd_backlog) { + unsigned long tickssofar = jiffies - lp->last_cmd; + + if (tickssofar < ticks_limit) + return; + + printk(KERN_NOTICE "%s: command unit timed out, status resetting.\n", dev->name); + + i596_reset(dev, lp, ioaddr); + } +} + +static int i596_open(struct net_device *dev) +{ + int res = 0; + + DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq)); + + if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) { + printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); + return -EAGAIN; + } +#ifdef ENABLE_MVME16x_NET + if (MACH_IS_MVME16x) { + if (request_irq(0x56, i596_error, 0, "i82596_error", dev)) { + res = -EAGAIN; + goto err_irq_dev; + } + } +#endif + res = init_rx_bufs(dev); + if (res) + goto err_irq_56; + + netif_start_queue(dev); + + if (init_i596_mem(dev)) { + res = -EAGAIN; + goto err_queue; + } + + return 0; + +err_queue: + netif_stop_queue(dev); + remove_rx_bufs(dev); +err_irq_56: +#ifdef ENABLE_MVME16x_NET + free_irq(0x56, dev); +err_irq_dev: +#endif + free_irq(dev->irq, dev); + + return res; +} + +static void i596_tx_timeout (struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; + int ioaddr = dev->base_addr; + + /* Transmitter timeout, serious problems. */ + DEB(DEB_ERRORS,printk(KERN_ERR "%s: transmit timed out, status resetting.\n", + dev->name)); + + dev->stats.tx_errors++; + + /* Try to restart the adaptor */ + if (lp->last_restart == dev->stats.tx_packets) { + DEB(DEB_ERRORS,printk(KERN_ERR "Resetting board.\n")); + /* Shutdown and restart */ + i596_reset (dev, lp, ioaddr); + } else { + /* Issue a channel attention signal */ + DEB(DEB_ERRORS,printk(KERN_ERR "Kicking board.\n")); + lp->scb.command = CUC_START | RX_START; + CA (dev); + lp->last_restart = dev->stats.tx_packets; + } + + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue (dev); +} + +static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; + struct tx_cmd *tx_cmd; + struct i596_tbd *tbd; + short length = skb->len; + + DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%p) called\n", + dev->name, skb->len, skb->data)); + + if (skb->len < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + length = ETH_ZLEN; + } + netif_stop_queue(dev); + + tx_cmd = lp->tx_cmds + lp->next_tx_cmd; + tbd = lp->tbds + lp->next_tx_cmd; + + if (tx_cmd->cmd.command) { + printk(KERN_NOTICE "%s: xmit ring full, dropping packet.\n", + dev->name); + dev->stats.tx_dropped++; + + dev_kfree_skb(skb); + } else { + if (++lp->next_tx_cmd == TX_RING_SIZE) + lp->next_tx_cmd = 0; + tx_cmd->tbd = WSWAPtbd(virt_to_bus(tbd)); + tbd->next = I596_NULL; + + tx_cmd->cmd.command = CMD_FLEX | CmdTx; + tx_cmd->skb = skb; + + tx_cmd->pad = 0; + tx_cmd->size = 0; + tbd->pad = 0; + tbd->size = EOF | length; + + tbd->data = WSWAPchar(virt_to_bus(skb->data)); + +#ifdef __mc68000__ + cache_push(virt_to_phys(skb->data), length); +#endif + DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued")); + i596_add_cmd(dev, &tx_cmd->cmd); + + dev->stats.tx_packets++; + dev->stats.tx_bytes += length; + } + + netif_start_queue(dev); + + return NETDEV_TX_OK; +} + +static void print_eth(unsigned char *add, char *str) +{ + printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n", + add, add + 6, add, add[12], add[13], str); +} + +static int io = 0x300; +static int irq = 10; + +static const struct net_device_ops i596_netdev_ops = { + .ndo_open = i596_open, + .ndo_stop = i596_close, + .ndo_start_xmit = i596_start_xmit, + .ndo_set_rx_mode = set_multicast_list, + .ndo_tx_timeout = i596_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +struct net_device * __init i82596_probe(int unit) +{ + struct net_device *dev; + int i; + struct i596_private *lp; + char eth_addr[8]; + static int probed; + int err; + + if (probed) + return ERR_PTR(-ENODEV); + probed++; + + dev = alloc_etherdev(0); + if (!dev) + return ERR_PTR(-ENOMEM); + + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + } else { + dev->base_addr = io; + dev->irq = irq; + } + +#ifdef ENABLE_MVME16x_NET + if (MACH_IS_MVME16x) { + if (mvme16x_config & MVME16x_CONFIG_NO_ETHERNET) { + printk(KERN_NOTICE "Ethernet probe disabled - chip not present\n"); + err = -ENODEV; + goto out; + } + memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN); /* YUCK! Get addr from NOVRAM */ + dev->base_addr = MVME_I596_BASE; + dev->irq = (unsigned) MVME16x_IRQ_I596; + goto found; + } +#endif +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) { + volatile unsigned char *rtc = (unsigned char *) BVME_RTC_BASE; + unsigned char msr = rtc[3]; + int i; + + rtc[3] |= 0x80; + for (i = 0; i < 6; i++) + eth_addr[i] = rtc[i * 4 + 7]; /* Stored in RTC RAM at offset 1 */ + rtc[3] = msr; + dev->base_addr = BVME_I596_BASE; + dev->irq = (unsigned) BVME_IRQ_I596; + goto found; + } +#endif + err = -ENODEV; + goto out; + +found: + dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0); + if (!dev->mem_start) { + err = -ENOMEM; + goto out1; + } + + DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr)); + + for (i = 0; i < 6; i++) + DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i])); + + DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq)); + + DEB(DEB_PROBE,printk(KERN_INFO "%s", version)); + + /* The 82596-specific entries in the device structure. */ + dev->netdev_ops = &i596_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + dev->ml_priv = (void *)(dev->mem_start); + + lp = dev->ml_priv; + DEB(DEB_INIT,printk(KERN_DEBUG "%s: lp at 0x%08lx (%zd bytes), " + "lp->scb at 0x%08lx\n", + dev->name, (unsigned long)lp, + sizeof(struct i596_private), (unsigned long)&lp->scb)); + memset((void *) lp, 0, sizeof(struct i596_private)); + +#ifdef __mc68000__ + cache_push(virt_to_phys((void *)(dev->mem_start)), 4096); + cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096); + kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER); +#endif + lp->scb.command = 0; + lp->scb.cmd = I596_NULL; + lp->scb.rfd = I596_NULL; + spin_lock_init(&lp->lock); + + err = register_netdev(dev); + if (err) + goto out2; + return dev; +out2: +#ifdef __mc68000__ + /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, + * XXX which may be invalid (CONFIG_060_WRITETHROUGH) + */ + kernel_set_cachemode((void *)(dev->mem_start), 4096, + IOMAP_FULL_CACHING); +#endif + free_page ((u32)(dev->mem_start)); +out1: +out: + free_netdev(dev); + return ERR_PTR(err); +} + +static irqreturn_t i596_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct i596_private *lp; + short ioaddr; + unsigned short status, ack_cmd = 0; + int handled = 0; + +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) { + if (*(char *) BVME_LOCAL_IRQ_STAT & BVME_ETHERR) { + i596_error(irq, dev_id); + return IRQ_HANDLED; + } + } +#endif + if (dev == NULL) { + printk(KERN_ERR "i596_interrupt(): irq %d for unknown device.\n", irq); + return IRQ_NONE; + } + + ioaddr = dev->base_addr; + lp = dev->ml_priv; + + spin_lock (&lp->lock); + + wait_cmd(dev,lp,100,"i596 interrupt, timeout"); + status = lp->scb.status; + + DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt, IRQ %d, status %4.4x.\n", + dev->name, irq, status)); + + ack_cmd = status & 0xf000; + + if ((status & 0x8000) || (status & 0x2000)) { + struct i596_cmd *ptr; + + handled = 1; + if ((status & 0x8000)) + DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt completed command.\n", dev->name)); + if ((status & 0x2000)) + DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700)); + + while ((lp->cmd_head != I596_NULL) && (lp->cmd_head->status & STAT_C)) { + ptr = lp->cmd_head; + + DEB(DEB_STATUS,printk(KERN_DEBUG "cmd_head->status = %04x, ->command = %04x\n", + lp->cmd_head->status, lp->cmd_head->command)); + lp->cmd_head = ptr->v_next; + lp->cmd_backlog--; + + switch ((ptr->command) & 0x7) { + case CmdTx: + { + struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; + struct sk_buff *skb = tx_cmd->skb; + + if ((ptr->status) & STAT_OK) { + DEB(DEB_TXADDR,print_eth(skb->data, "tx-done")); + } else { + dev->stats.tx_errors++; + if ((ptr->status) & 0x0020) + dev->stats.collisions++; + if (!((ptr->status) & 0x0040)) + dev->stats.tx_heartbeat_errors++; + if ((ptr->status) & 0x0400) + dev->stats.tx_carrier_errors++; + if ((ptr->status) & 0x0800) + dev->stats.collisions++; + if ((ptr->status) & 0x1000) + dev->stats.tx_aborted_errors++; + } + + dev_kfree_skb_irq(skb); + + tx_cmd->cmd.command = 0; /* Mark free */ + break; + } + case CmdTDR: + { + unsigned short status = ((struct tdr_cmd *)ptr)->status; + + if (status & 0x8000) { + DEB(DEB_TDR,printk(KERN_INFO "%s: link ok.\n", dev->name)); + } else { + if (status & 0x4000) + printk(KERN_ERR "%s: Transceiver problem.\n", dev->name); + if (status & 0x2000) + printk(KERN_ERR "%s: Termination problem.\n", dev->name); + if (status & 0x1000) + printk(KERN_ERR "%s: Short circuit.\n", dev->name); + + DEB(DEB_TDR,printk(KERN_INFO "%s: Time %d.\n", dev->name, status & 0x07ff)); + } + break; + } + case CmdConfigure: + case CmdMulticastList: + /* Zap command so set_multicast_list() knows it is free */ + ptr->command = 0; + break; + } + ptr->v_next = ptr->b_next = I596_NULL; + lp->last_cmd = jiffies; + } + + ptr = lp->cmd_head; + while ((ptr != I596_NULL) && (ptr != lp->cmd_tail)) { + ptr->command &= 0x1fff; + ptr = ptr->v_next; + } + + if ((lp->cmd_head != I596_NULL)) + ack_cmd |= CUC_START; + lp->scb.cmd = WSWAPcmd(virt_to_bus(&lp->cmd_head->status)); + } + if ((status & 0x1000) || (status & 0x4000)) { + if ((status & 0x4000)) + DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt received a frame.\n", dev->name)); + i596_rx(dev); + /* Only RX_START if stopped - RGH 07-07-96 */ + if (status & 0x1000) { + if (netif_running(dev)) { + DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status)); + ack_cmd |= RX_START; + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; + rebuild_rx_bufs(dev); + } + } + } + wait_cmd(dev,lp,100,"i596 interrupt, timeout"); + lp->scb.command = ack_cmd; + +#ifdef ENABLE_MVME16x_NET + if (MACH_IS_MVME16x) { + /* Ack the interrupt */ + + volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; + + pcc2[0x2a] |= 0x08; + } +#endif +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) { + volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; + + *ethirq = 1; + *ethirq = 3; + } +#endif + CA(dev); + + DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name)); + + spin_unlock (&lp->lock); + return IRQ_RETVAL(handled); +} + +static int i596_close(struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; + unsigned long flags; + + netif_stop_queue(dev); + + DEB(DEB_INIT,printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n", + dev->name, lp->scb.status)); + + spin_lock_irqsave(&lp->lock, flags); + + wait_cmd(dev,lp,100,"close1 timed out"); + lp->scb.command = CUC_ABORT | RX_ABORT; + CA(dev); + + wait_cmd(dev,lp,100,"close2 timed out"); + + spin_unlock_irqrestore(&lp->lock, flags); + DEB(DEB_STRUCT,i596_display_data(dev)); + i596_cleanup_cmd(dev,lp); + +#ifdef ENABLE_MVME16x_NET + if (MACH_IS_MVME16x) { + volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; + + /* Disable all ints */ + pcc2[0x28] = 1; + pcc2[0x2a] = 0x40; + pcc2[0x2b] = 0x40; /* Set snooping bits now! */ + } +#endif +#ifdef ENABLE_BVME6000_NET + if (MACH_IS_BVME6000) { + volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; + + *ethirq = 1; + } +#endif + +#ifdef ENABLE_MVME16x_NET + free_irq(0x56, dev); +#endif + free_irq(dev->irq, dev); + remove_rx_bufs(dev); + + return 0; +} + +/* + * Set or clear the multicast filter for this adaptor. + */ + +static void set_multicast_list(struct net_device *dev) +{ + struct i596_private *lp = dev->ml_priv; + int config = 0, cnt; + + DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n", + dev->name, netdev_mc_count(dev), + dev->flags & IFF_PROMISC ? "ON" : "OFF", + dev->flags & IFF_ALLMULTI ? "ON" : "OFF")); + + if (wait_cfg(dev, &lp->cf_cmd.cmd, 1000, "config change request timed out")) + return; + + if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) { + lp->cf_cmd.i596_config[8] |= 0x01; + config = 1; + } + if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) { + lp->cf_cmd.i596_config[8] &= ~0x01; + config = 1; + } + if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) { + lp->cf_cmd.i596_config[11] &= ~0x20; + config = 1; + } + if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) { + lp->cf_cmd.i596_config[11] |= 0x20; + config = 1; + } + if (config) { + lp->cf_cmd.cmd.command = CmdConfigure; + i596_add_cmd(dev, &lp->cf_cmd.cmd); + } + + cnt = netdev_mc_count(dev); + if (cnt > MAX_MC_CNT) + { + cnt = MAX_MC_CNT; + printk(KERN_ERR "%s: Only %d multicast addresses supported", + dev->name, cnt); + } + + if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + unsigned char *cp; + struct mc_cmd *cmd; + + if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out")) + return; + cmd = &lp->mc_cmd; + cmd->cmd.command = CmdMulticastList; + cmd->mc_cnt = cnt * ETH_ALEN; + cp = cmd->mc_addrs; + netdev_for_each_mc_addr(ha, dev) { + if (!cnt--) + break; + memcpy(cp, ha->addr, ETH_ALEN); + if (i596_debug > 1) + DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n", + dev->name, cp)); + cp += ETH_ALEN; + } + i596_add_cmd(dev, &cmd->cmd); + } +} + +#ifdef MODULE +static struct net_device *dev_82596; + +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "i82596 debug mask"); + +int __init init_module(void) +{ + if (debug >= 0) + i596_debug = debug; + dev_82596 = i82596_probe(-1); + return PTR_ERR_OR_ZERO(dev_82596); +} + +void __exit cleanup_module(void) +{ + unregister_netdev(dev_82596); +#ifdef __mc68000__ + /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, + * XXX which may be invalid (CONFIG_060_WRITETHROUGH) + */ + + kernel_set_cachemode((void *)(dev_82596->mem_start), 4096, + IOMAP_FULL_CACHING); +#endif + free_page ((u32)(dev_82596->mem_start)); + free_netdev(dev_82596); +} + +#endif /* MODULE */ diff --git a/drivers/net/ethernet/i825xx/Kconfig b/drivers/net/ethernet/i825xx/Kconfig new file mode 100644 index 000000000..9521e68aa --- /dev/null +++ b/drivers/net/ethernet/i825xx/Kconfig @@ -0,0 +1,69 @@ +# +# Intel 82596/82593/82596 network device configuration +# + +config NET_VENDOR_I825XX + bool "Intel (82586/82593/82596) devices" + default y + depends on NET_VENDOR_INTEL + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question does not directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about these devices. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_I825XX + +config ARM_ETHER1 + tristate "Acorn Ether1 support" + depends on ARM && ARCH_ACORN + ---help--- + If you have an Acorn system with one of these (AKA25) network cards, + you should say Y to this option if you wish to use it with Linux. + +config BVME6000_NET + tristate "BVME6000 Ethernet support" + depends on BVME6000 + ---help--- + This is the driver for the Ethernet interface on BVME4000 and + BVME6000 VME boards. Say Y here to include the driver for this chip + in your kernel. + To compile this driver as a module, choose M here. + +config LASI_82596 + tristate "Lasi ethernet" + depends on GSC + ---help--- + Say Y here to support the builtin Intel 82596 ethernet controller + found in Hewlett-Packard PA-RISC machines with 10Mbit ethernet. + +config MVME16x_NET + tristate "MVME16x Ethernet support" + depends on MVME16x + ---help--- + This is the driver for the Ethernet interface on the Motorola + MVME162, 166, 167, 172 and 177 boards. Say Y here to include the + driver for this chip in your kernel. + To compile this driver as a module, choose M here. + +config SNI_82596 + tristate "SNI RM ethernet" + depends on SNI_RM + ---help--- + Say Y here to support the on-board Intel 82596 ethernet controller + built into SNI RM machines. + +config SUN3_82586 + bool "Sun3 on-board Intel 82586 support" + depends on SUN3 + ---help--- + This driver enables support for the on-board Intel 82586 based + Ethernet adapter found on Sun 3/1xx and 3/2xx motherboards. Note + that this driver does not support 82586-based adapters on additional + VME boards. + +endif # NET_VENDOR_I825XX diff --git a/drivers/net/ethernet/i825xx/Makefile b/drivers/net/ethernet/i825xx/Makefile new file mode 100644 index 000000000..8c8dcd29c --- /dev/null +++ b/drivers/net/ethernet/i825xx/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the Intel 82586/82593/82596 chipset device drivers. +# + +obj-$(CONFIG_ARM_ETHER1) += ether1.o +obj-$(CONFIG_SUN3_82586) += sun3_82586.o +obj-$(CONFIG_LASI_82596) += lasi_82596.o +obj-$(CONFIG_SNI_82596) += sni_82596.o +obj-$(CONFIG_MVME16x_NET) += 82596.o +obj-$(CONFIG_BVME6000_NET) += 82596.o diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c new file mode 100644 index 000000000..5d353c660 --- /dev/null +++ b/drivers/net/ethernet/i825xx/ether1.c @@ -0,0 +1,1087 @@ +/* + * linux/drivers/acorn/net/ether1.c + * + * Copyright (C) 1996-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Acorn ether1 driver (82586 chip) for Acorn machines + * + * We basically keep two queues in the cards memory - one for transmit + * and one for receive. Each has a head and a tail. The head is where + * we/the chip adds packets to be transmitted/received, and the tail + * is where the transmitter has got to/where the receiver will stop. + * Both of these queues are circular, and since the chip is running + * all the time, we have to be careful when we modify the pointers etc + * so that the buffer memory contents is valid all the time. + * + * Change log: + * 1.00 RMK Released + * 1.01 RMK 19/03/1996 Transfers the last odd byte onto/off of the card now. + * 1.02 RMK 25/05/1997 Added code to restart RU if it goes not ready + * 1.03 RMK 14/09/1997 Cleaned up the handling of a reset during the TX interrupt. + * Should prevent lockup. + * 1.04 RMK 17/09/1997 Added more info when initialsation of chip goes wrong. + * TDR now only reports failure when chip reports non-zero + * TDR time-distance. + * 1.05 RMK 31/12/1997 Removed calls to dev_tint for 2.1 + * 1.06 RMK 10/02/2000 Updated for 2.3.43 + * 1.07 RMK 13/05/2000 Updated for 2.3.99-pre8 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define __ETHER1_C +#include "ether1.h" + +static unsigned int net_debug = NET_DEBUG; + +#define BUFFER_SIZE 0x10000 +#define TX_AREA_START 0x00100 +#define TX_AREA_END 0x05000 +#define RX_AREA_START 0x05000 +#define RX_AREA_END 0x0fc00 + +static int ether1_open(struct net_device *dev); +static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev); +static irqreturn_t ether1_interrupt(int irq, void *dev_id); +static int ether1_close(struct net_device *dev); +static void ether1_setmulticastlist(struct net_device *dev); +static void ether1_timeout(struct net_device *dev); + +/* ------------------------------------------------------------------------- */ + +static char version[] = "ether1 ethernet driver (c) 2000 Russell King v1.07\n"; + +#define BUS_16 16 +#define BUS_8 8 + +/* ------------------------------------------------------------------------- */ + +#define DISABLEIRQS 1 +#define NORMALIRQS 0 + +#define ether1_readw(dev, addr, type, offset, svflgs) ether1_inw_p (dev, addr + (int)(&((type *)0)->offset), svflgs) +#define ether1_writew(dev, val, addr, type, offset, svflgs) ether1_outw_p (dev, val, addr + (int)(&((type *)0)->offset), svflgs) + +static inline unsigned short +ether1_inw_p (struct net_device *dev, int addr, int svflgs) +{ + unsigned long flags; + unsigned short ret; + + if (svflgs) + local_irq_save (flags); + + writeb(addr >> 12, REG_PAGE); + ret = readw(ETHER1_RAM + ((addr & 4095) << 1)); + if (svflgs) + local_irq_restore (flags); + return ret; +} + +static inline void +ether1_outw_p (struct net_device *dev, unsigned short val, int addr, int svflgs) +{ + unsigned long flags; + + if (svflgs) + local_irq_save (flags); + + writeb(addr >> 12, REG_PAGE); + writew(val, ETHER1_RAM + ((addr & 4095) << 1)); + if (svflgs) + local_irq_restore (flags); +} + +/* + * Some inline assembler to allow fast transfers on to/off of the card. + * Since this driver depends on some features presented by the ARM + * specific architecture, and that you can't configure this driver + * without specifiing ARM mode, this is not a problem. + * + * This routine is essentially an optimised memcpy from the card's + * onboard RAM to kernel memory. + */ +static void +ether1_writebuffer (struct net_device *dev, void *data, unsigned int start, unsigned int length) +{ + unsigned int page, thislen, offset; + void __iomem *addr; + + offset = start & 4095; + page = start >> 12; + addr = ETHER1_RAM + (offset << 1); + + if (offset + length > 4096) + thislen = 4096 - offset; + else + thislen = length; + + do { + int used; + + writeb(page, REG_PAGE); + length -= thislen; + + __asm__ __volatile__( + "subs %3, %3, #2\n\ + bmi 2f\n\ +1: ldr %0, [%1], #2\n\ + mov %0, %0, lsl #16\n\ + orr %0, %0, %0, lsr #16\n\ + str %0, [%2], #4\n\ + subs %3, %3, #2\n\ + bmi 2f\n\ + ldr %0, [%1], #2\n\ + mov %0, %0, lsl #16\n\ + orr %0, %0, %0, lsr #16\n\ + str %0, [%2], #4\n\ + subs %3, %3, #2\n\ + bmi 2f\n\ + ldr %0, [%1], #2\n\ + mov %0, %0, lsl #16\n\ + orr %0, %0, %0, lsr #16\n\ + str %0, [%2], #4\n\ + subs %3, %3, #2\n\ + bmi 2f\n\ + ldr %0, [%1], #2\n\ + mov %0, %0, lsl #16\n\ + orr %0, %0, %0, lsr #16\n\ + str %0, [%2], #4\n\ + subs %3, %3, #2\n\ + bpl 1b\n\ +2: adds %3, %3, #1\n\ + ldreqb %0, [%1]\n\ + streqb %0, [%2]" + : "=&r" (used), "=&r" (data) + : "r" (addr), "r" (thislen), "1" (data)); + + addr = ETHER1_RAM; + + thislen = length; + if (thislen > 4096) + thislen = 4096; + page++; + } while (thislen); +} + +static void +ether1_readbuffer (struct net_device *dev, void *data, unsigned int start, unsigned int length) +{ + unsigned int page, thislen, offset; + void __iomem *addr; + + offset = start & 4095; + page = start >> 12; + addr = ETHER1_RAM + (offset << 1); + + if (offset + length > 4096) + thislen = 4096 - offset; + else + thislen = length; + + do { + int used; + + writeb(page, REG_PAGE); + length -= thislen; + + __asm__ __volatile__( + "subs %3, %3, #2\n\ + bmi 2f\n\ +1: ldr %0, [%2], #4\n\ + strb %0, [%1], #1\n\ + mov %0, %0, lsr #8\n\ + strb %0, [%1], #1\n\ + subs %3, %3, #2\n\ + bmi 2f\n\ + ldr %0, [%2], #4\n\ + strb %0, [%1], #1\n\ + mov %0, %0, lsr #8\n\ + strb %0, [%1], #1\n\ + subs %3, %3, #2\n\ + bmi 2f\n\ + ldr %0, [%2], #4\n\ + strb %0, [%1], #1\n\ + mov %0, %0, lsr #8\n\ + strb %0, [%1], #1\n\ + subs %3, %3, #2\n\ + bmi 2f\n\ + ldr %0, [%2], #4\n\ + strb %0, [%1], #1\n\ + mov %0, %0, lsr #8\n\ + strb %0, [%1], #1\n\ + subs %3, %3, #2\n\ + bpl 1b\n\ +2: adds %3, %3, #1\n\ + ldreqb %0, [%2]\n\ + streqb %0, [%1]" + : "=&r" (used), "=&r" (data) + : "r" (addr), "r" (thislen), "1" (data)); + + addr = ETHER1_RAM; + + thislen = length; + if (thislen > 4096) + thislen = 4096; + page++; + } while (thislen); +} + +static int +ether1_ramtest(struct net_device *dev, unsigned char byte) +{ + unsigned char *buffer = kmalloc (BUFFER_SIZE, GFP_KERNEL); + int i, ret = BUFFER_SIZE; + int max_errors = 15; + int bad = -1; + int bad_start = 0; + + if (!buffer) + return 1; + + memset (buffer, byte, BUFFER_SIZE); + ether1_writebuffer (dev, buffer, 0, BUFFER_SIZE); + memset (buffer, byte ^ 0xff, BUFFER_SIZE); + ether1_readbuffer (dev, buffer, 0, BUFFER_SIZE); + + for (i = 0; i < BUFFER_SIZE; i++) { + if (buffer[i] != byte) { + if (max_errors >= 0 && bad != buffer[i]) { + if (bad != -1) + printk ("\n"); + printk (KERN_CRIT "%s: RAM failed with (%02X instead of %02X) at 0x%04X", + dev->name, buffer[i], byte, i); + ret = -ENODEV; + max_errors --; + bad = buffer[i]; + bad_start = i; + } + } else { + if (bad != -1) { + if (bad_start == i - 1) + printk ("\n"); + else + printk (" - 0x%04X\n", i - 1); + bad = -1; + } + } + } + + if (bad != -1) + printk (" - 0x%04X\n", BUFFER_SIZE); + kfree (buffer); + + return ret; +} + +static int +ether1_reset (struct net_device *dev) +{ + writeb(CTRL_RST|CTRL_ACK, REG_CONTROL); + return BUS_16; +} + +static int +ether1_init_2(struct net_device *dev) +{ + int i; + dev->mem_start = 0; + + i = ether1_ramtest (dev, 0x5a); + + if (i > 0) + i = ether1_ramtest (dev, 0x1e); + + if (i <= 0) + return -ENODEV; + + dev->mem_end = i; + return 0; +} + +/* + * These are the structures that are loaded into the ether RAM card to + * initialise the 82586 + */ + +/* at 0x0100 */ +#define NOP_ADDR (TX_AREA_START) +#define NOP_SIZE (0x06) +static nop_t init_nop = { + 0, + CMD_NOP, + NOP_ADDR +}; + +/* at 0x003a */ +#define TDR_ADDR (0x003a) +#define TDR_SIZE (0x08) +static tdr_t init_tdr = { + 0, + CMD_TDR | CMD_INTR, + NOP_ADDR, + 0 +}; + +/* at 0x002e */ +#define MC_ADDR (0x002e) +#define MC_SIZE (0x0c) +static mc_t init_mc = { + 0, + CMD_SETMULTICAST, + TDR_ADDR, + 0, + { { 0, } } +}; + +/* at 0x0022 */ +#define SA_ADDR (0x0022) +#define SA_SIZE (0x0c) +static sa_t init_sa = { + 0, + CMD_SETADDRESS, + MC_ADDR, + { 0, } +}; + +/* at 0x0010 */ +#define CFG_ADDR (0x0010) +#define CFG_SIZE (0x12) +static cfg_t init_cfg = { + 0, + CMD_CONFIG, + SA_ADDR, + 8, + 8, + CFG8_SRDY, + CFG9_PREAMB8 | CFG9_ADDRLENBUF | CFG9_ADDRLEN(6), + 0, + 0x60, + 0, + CFG13_RETRY(15) | CFG13_SLOTH(2), + 0, +}; + +/* at 0x0000 */ +#define SCB_ADDR (0x0000) +#define SCB_SIZE (0x10) +static scb_t init_scb = { + 0, + SCB_CMDACKRNR | SCB_CMDACKCNA | SCB_CMDACKFR | SCB_CMDACKCX, + CFG_ADDR, + RX_AREA_START, + 0, + 0, + 0, + 0 +}; + +/* at 0xffee */ +#define ISCP_ADDR (0xffee) +#define ISCP_SIZE (0x08) +static iscp_t init_iscp = { + 1, + SCB_ADDR, + 0x0000, + 0x0000 +}; + +/* at 0xfff6 */ +#define SCP_ADDR (0xfff6) +#define SCP_SIZE (0x0a) +static scp_t init_scp = { + SCP_SY_16BBUS, + { 0, 0 }, + ISCP_ADDR, + 0 +}; + +#define RFD_SIZE (0x16) +static rfd_t init_rfd = { + 0, + 0, + 0, + 0, + { 0, }, + { 0, }, + 0 +}; + +#define RBD_SIZE (0x0a) +static rbd_t init_rbd = { + 0, + 0, + 0, + 0, + ETH_FRAME_LEN + 8 +}; + +#define TX_SIZE (0x08) +#define TBD_SIZE (0x08) + +static int +ether1_init_for_open (struct net_device *dev) +{ + int i, status, addr, next, next2; + int failures = 0; + unsigned long timeout; + + writeb(CTRL_RST|CTRL_ACK, REG_CONTROL); + + for (i = 0; i < 6; i++) + init_sa.sa_addr[i] = dev->dev_addr[i]; + + /* load data structures into ether1 RAM */ + ether1_writebuffer (dev, &init_scp, SCP_ADDR, SCP_SIZE); + ether1_writebuffer (dev, &init_iscp, ISCP_ADDR, ISCP_SIZE); + ether1_writebuffer (dev, &init_scb, SCB_ADDR, SCB_SIZE); + ether1_writebuffer (dev, &init_cfg, CFG_ADDR, CFG_SIZE); + ether1_writebuffer (dev, &init_sa, SA_ADDR, SA_SIZE); + ether1_writebuffer (dev, &init_mc, MC_ADDR, MC_SIZE); + ether1_writebuffer (dev, &init_tdr, TDR_ADDR, TDR_SIZE); + ether1_writebuffer (dev, &init_nop, NOP_ADDR, NOP_SIZE); + + if (ether1_readw(dev, CFG_ADDR, cfg_t, cfg_command, NORMALIRQS) != CMD_CONFIG) { + printk (KERN_ERR "%s: detected either RAM fault or compiler bug\n", + dev->name); + return 1; + } + + /* + * setup circularly linked list of { rfd, rbd, buffer }, with + * all rfds circularly linked, rbds circularly linked. + * First rfd is linked to scp, first rbd is linked to first + * rfd. Last rbd has a suspend command. + */ + addr = RX_AREA_START; + do { + next = addr + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10; + next2 = next + RFD_SIZE + RBD_SIZE + ETH_FRAME_LEN + 10; + + if (next2 >= RX_AREA_END) { + next = RX_AREA_START; + init_rfd.rfd_command = RFD_CMDEL | RFD_CMDSUSPEND; + priv(dev)->rx_tail = addr; + } else + init_rfd.rfd_command = 0; + if (addr == RX_AREA_START) + init_rfd.rfd_rbdoffset = addr + RFD_SIZE; + else + init_rfd.rfd_rbdoffset = 0; + init_rfd.rfd_link = next; + init_rbd.rbd_link = next + RFD_SIZE; + init_rbd.rbd_bufl = addr + RFD_SIZE + RBD_SIZE; + + ether1_writebuffer (dev, &init_rfd, addr, RFD_SIZE); + ether1_writebuffer (dev, &init_rbd, addr + RFD_SIZE, RBD_SIZE); + addr = next; + } while (next2 < RX_AREA_END); + + priv(dev)->tx_link = NOP_ADDR; + priv(dev)->tx_head = NOP_ADDR + NOP_SIZE; + priv(dev)->tx_tail = TDR_ADDR; + priv(dev)->rx_head = RX_AREA_START; + + /* release reset & give 586 a prod */ + priv(dev)->resetting = 1; + priv(dev)->initialising = 1; + writeb(CTRL_RST, REG_CONTROL); + writeb(0, REG_CONTROL); + writeb(CTRL_CA, REG_CONTROL); + + /* 586 should now unset iscp.busy */ + timeout = jiffies + HZ/2; + while (ether1_readw(dev, ISCP_ADDR, iscp_t, iscp_busy, DISABLEIRQS) == 1) { + if (time_after(jiffies, timeout)) { + printk (KERN_WARNING "%s: can't initialise 82586: iscp is busy\n", dev->name); + return 1; + } + } + + /* check status of commands that we issued */ + timeout += HZ/10; + while (((status = ether1_readw(dev, CFG_ADDR, cfg_t, cfg_status, DISABLEIRQS)) + & STAT_COMPLETE) == 0) { + if (time_after(jiffies, timeout)) + break; + } + + if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { + printk (KERN_WARNING "%s: can't initialise 82586: config status %04X\n", dev->name, status); + printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, + ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); + failures += 1; + } + + timeout += HZ/10; + while (((status = ether1_readw(dev, SA_ADDR, sa_t, sa_status, DISABLEIRQS)) + & STAT_COMPLETE) == 0) { + if (time_after(jiffies, timeout)) + break; + } + + if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { + printk (KERN_WARNING "%s: can't initialise 82586: set address status %04X\n", dev->name, status); + printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, + ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); + failures += 1; + } + + timeout += HZ/10; + while (((status = ether1_readw(dev, MC_ADDR, mc_t, mc_status, DISABLEIRQS)) + & STAT_COMPLETE) == 0) { + if (time_after(jiffies, timeout)) + break; + } + + if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { + printk (KERN_WARNING "%s: can't initialise 82586: set multicast status %04X\n", dev->name, status); + printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, + ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); + failures += 1; + } + + timeout += HZ; + while (((status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_status, DISABLEIRQS)) + & STAT_COMPLETE) == 0) { + if (time_after(jiffies, timeout)) + break; + } + + if ((status & (STAT_COMPLETE | STAT_OK)) != (STAT_COMPLETE | STAT_OK)) { + printk (KERN_WARNING "%s: can't tdr (ignored)\n", dev->name); + printk (KERN_DEBUG "%s: SCB=[STS=%04X CMD=%04X CBL=%04X RFA=%04X]\n", dev->name, + ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_command, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS), + ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, NORMALIRQS)); + } else { + status = ether1_readw(dev, TDR_ADDR, tdr_t, tdr_result, DISABLEIRQS); + if (status & TDR_XCVRPROB) + printk (KERN_WARNING "%s: i/f failed tdr: transceiver problem\n", dev->name); + else if ((status & (TDR_SHORT|TDR_OPEN)) && (status & TDR_TIME)) { +#ifdef FANCY + printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d.%d us away\n", dev->name, + status & TDR_SHORT ? "short" : "open", (status & TDR_TIME) / 10, + (status & TDR_TIME) % 10); +#else + printk (KERN_WARNING "%s: i/f failed tdr: cable %s %d clks away\n", dev->name, + status & TDR_SHORT ? "short" : "open", (status & TDR_TIME)); +#endif + } + } + + if (failures) + ether1_reset (dev); + return failures ? 1 : 0; +} + +/* ------------------------------------------------------------------------- */ + +static int +ether1_txalloc (struct net_device *dev, int size) +{ + int start, tail; + + size = (size + 1) & ~1; + tail = priv(dev)->tx_tail; + + if (priv(dev)->tx_head + size > TX_AREA_END) { + if (tail > priv(dev)->tx_head) + return -1; + start = TX_AREA_START; + if (start + size > tail) + return -1; + priv(dev)->tx_head = start + size; + } else { + if (priv(dev)->tx_head < tail && (priv(dev)->tx_head + size) > tail) + return -1; + start = priv(dev)->tx_head; + priv(dev)->tx_head += size; + } + + return start; +} + +static int +ether1_open (struct net_device *dev) +{ + if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev)) + return -EAGAIN; + + if (ether1_init_for_open (dev)) { + free_irq (dev->irq, dev); + return -EAGAIN; + } + + netif_start_queue(dev); + + return 0; +} + +static void +ether1_timeout(struct net_device *dev) +{ + printk(KERN_WARNING "%s: transmit timeout, network cable problem?\n", + dev->name); + printk(KERN_WARNING "%s: resetting device\n", dev->name); + + ether1_reset (dev); + + if (ether1_init_for_open (dev)) + printk (KERN_ERR "%s: unable to restart interface\n", dev->name); + + dev->stats.tx_errors++; + netif_wake_queue(dev); +} + +static int +ether1_sendpacket (struct sk_buff *skb, struct net_device *dev) +{ + int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr; + unsigned long flags; + tx_t tx; + tbd_t tbd; + nop_t nop; + + if (priv(dev)->restart) { + printk(KERN_WARNING "%s: resetting device\n", dev->name); + + ether1_reset(dev); + + if (ether1_init_for_open(dev)) + printk(KERN_ERR "%s: unable to restart interface\n", dev->name); + else + priv(dev)->restart = 0; + } + + if (skb->len < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) + goto out; + } + + /* + * insert packet followed by a nop + */ + txaddr = ether1_txalloc (dev, TX_SIZE); + tbdaddr = ether1_txalloc (dev, TBD_SIZE); + dataddr = ether1_txalloc (dev, skb->len); + nopaddr = ether1_txalloc (dev, NOP_SIZE); + + tx.tx_status = 0; + tx.tx_command = CMD_TX | CMD_INTR; + tx.tx_link = nopaddr; + tx.tx_tbdoffset = tbdaddr; + tbd.tbd_opts = TBD_EOL | skb->len; + tbd.tbd_link = I82586_NULL; + tbd.tbd_bufl = dataddr; + tbd.tbd_bufh = 0; + nop.nop_status = 0; + nop.nop_command = CMD_NOP; + nop.nop_link = nopaddr; + + local_irq_save(flags); + ether1_writebuffer (dev, &tx, txaddr, TX_SIZE); + ether1_writebuffer (dev, &tbd, tbdaddr, TBD_SIZE); + ether1_writebuffer (dev, skb->data, dataddr, skb->len); + ether1_writebuffer (dev, &nop, nopaddr, NOP_SIZE); + tmp = priv(dev)->tx_link; + priv(dev)->tx_link = nopaddr; + + /* now reset the previous nop pointer */ + ether1_writew(dev, txaddr, tmp, nop_t, nop_link, NORMALIRQS); + + local_irq_restore(flags); + + /* handle transmit */ + + /* check to see if we have room for a full sized ether frame */ + tmp = priv(dev)->tx_head; + tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN); + priv(dev)->tx_head = tmp; + dev_kfree_skb (skb); + + if (tst == -1) + netif_stop_queue(dev); + + out: + return NETDEV_TX_OK; +} + +static void +ether1_xmit_done (struct net_device *dev) +{ + nop_t nop; + int caddr, tst; + + caddr = priv(dev)->tx_tail; + +again: + ether1_readbuffer (dev, &nop, caddr, NOP_SIZE); + + switch (nop.nop_command & CMD_MASK) { + case CMD_TDR: + /* special case */ + if (ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS) + != (unsigned short)I82586_NULL) { + ether1_writew(dev, SCB_CMDCUCSTART | SCB_CMDRXSTART, SCB_ADDR, scb_t, + scb_command, NORMALIRQS); + writeb(CTRL_CA, REG_CONTROL); + } + priv(dev)->tx_tail = NOP_ADDR; + return; + + case CMD_NOP: + if (nop.nop_link == caddr) { + if (priv(dev)->initialising == 0) + printk (KERN_WARNING "%s: strange command complete with no tx command!\n", dev->name); + else + priv(dev)->initialising = 0; + return; + } + if (caddr == nop.nop_link) + return; + caddr = nop.nop_link; + goto again; + + case CMD_TX: + if (nop.nop_status & STAT_COMPLETE) + break; + printk (KERN_ERR "%s: strange command complete without completed command\n", dev->name); + priv(dev)->restart = 1; + return; + + default: + printk (KERN_WARNING "%s: strange command %d complete! (offset %04X)", dev->name, + nop.nop_command & CMD_MASK, caddr); + priv(dev)->restart = 1; + return; + } + + while (nop.nop_status & STAT_COMPLETE) { + if (nop.nop_status & STAT_OK) { + dev->stats.tx_packets++; + dev->stats.collisions += (nop.nop_status & STAT_COLLISIONS); + } else { + dev->stats.tx_errors++; + + if (nop.nop_status & STAT_COLLAFTERTX) + dev->stats.collisions++; + if (nop.nop_status & STAT_NOCARRIER) + dev->stats.tx_carrier_errors++; + if (nop.nop_status & STAT_TXLOSTCTS) + printk (KERN_WARNING "%s: cts lost\n", dev->name); + if (nop.nop_status & STAT_TXSLOWDMA) + dev->stats.tx_fifo_errors++; + if (nop.nop_status & STAT_COLLEXCESSIVE) + dev->stats.collisions += 16; + } + + if (nop.nop_link == caddr) { + printk (KERN_ERR "%s: tx buffer chaining error: tx command points to itself\n", dev->name); + break; + } + + caddr = nop.nop_link; + ether1_readbuffer (dev, &nop, caddr, NOP_SIZE); + if ((nop.nop_command & CMD_MASK) != CMD_NOP) { + printk (KERN_ERR "%s: tx buffer chaining error: no nop after tx command\n", dev->name); + break; + } + + if (caddr == nop.nop_link) + break; + + caddr = nop.nop_link; + ether1_readbuffer (dev, &nop, caddr, NOP_SIZE); + if ((nop.nop_command & CMD_MASK) != CMD_TX) { + printk (KERN_ERR "%s: tx buffer chaining error: no tx command after nop\n", dev->name); + break; + } + } + priv(dev)->tx_tail = caddr; + + caddr = priv(dev)->tx_head; + tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN); + priv(dev)->tx_head = caddr; + if (tst != -1) + netif_wake_queue(dev); +} + +static void +ether1_recv_done (struct net_device *dev) +{ + int status; + int nexttail, rbdaddr; + rbd_t rbd; + + do { + status = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_status, NORMALIRQS); + if ((status & RFD_COMPLETE) == 0) + break; + + rbdaddr = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_rbdoffset, NORMALIRQS); + ether1_readbuffer (dev, &rbd, rbdaddr, RBD_SIZE); + + if ((rbd.rbd_status & (RBD_EOF | RBD_ACNTVALID)) == (RBD_EOF | RBD_ACNTVALID)) { + int length = rbd.rbd_status & RBD_ACNT; + struct sk_buff *skb; + + length = (length + 1) & ~1; + skb = netdev_alloc_skb(dev, length + 2); + + if (skb) { + skb_reserve (skb, 2); + + ether1_readbuffer (dev, skb_put (skb, length), rbd.rbd_bufl, length); + + skb->protocol = eth_type_trans (skb, dev); + netif_rx (skb); + dev->stats.rx_packets++; + } else + dev->stats.rx_dropped++; + } else { + printk(KERN_WARNING "%s: %s\n", dev->name, + (rbd.rbd_status & RBD_EOF) ? "oversized packet" : "acnt not valid"); + dev->stats.rx_dropped++; + } + + nexttail = ether1_readw(dev, priv(dev)->rx_tail, rfd_t, rfd_link, NORMALIRQS); + /* nexttail should be rx_head */ + if (nexttail != priv(dev)->rx_head) + printk(KERN_ERR "%s: receiver buffer chaining error (%04X != %04X)\n", + dev->name, nexttail, priv(dev)->rx_head); + ether1_writew(dev, RFD_CMDEL | RFD_CMDSUSPEND, nexttail, rfd_t, rfd_command, NORMALIRQS); + ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_command, NORMALIRQS); + ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_status, NORMALIRQS); + ether1_writew(dev, 0, priv(dev)->rx_tail, rfd_t, rfd_rbdoffset, NORMALIRQS); + + priv(dev)->rx_tail = nexttail; + priv(dev)->rx_head = ether1_readw(dev, priv(dev)->rx_head, rfd_t, rfd_link, NORMALIRQS); + } while (1); +} + +static irqreturn_t +ether1_interrupt (int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + int status; + + status = ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS); + + if (status) { + ether1_writew(dev, status & (SCB_STRNR | SCB_STCNA | SCB_STFR | SCB_STCX), + SCB_ADDR, scb_t, scb_command, NORMALIRQS); + writeb(CTRL_CA | CTRL_ACK, REG_CONTROL); + if (status & SCB_STCX) { + ether1_xmit_done (dev); + } + if (status & SCB_STCNA) { + if (priv(dev)->resetting == 0) + printk (KERN_WARNING "%s: CU went not ready ???\n", dev->name); + else + priv(dev)->resetting += 1; + if (ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS) + != (unsigned short)I82586_NULL) { + ether1_writew(dev, SCB_CMDCUCSTART, SCB_ADDR, scb_t, scb_command, NORMALIRQS); + writeb(CTRL_CA, REG_CONTROL); + } + if (priv(dev)->resetting == 2) + priv(dev)->resetting = 0; + } + if (status & SCB_STFR) { + ether1_recv_done (dev); + } + if (status & SCB_STRNR) { + if (ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS) & SCB_STRXSUSP) { + printk (KERN_WARNING "%s: RU went not ready: RU suspended\n", dev->name); + ether1_writew(dev, SCB_CMDRXRESUME, SCB_ADDR, scb_t, scb_command, NORMALIRQS); + writeb(CTRL_CA, REG_CONTROL); + dev->stats.rx_dropped++; /* we suspended due to lack of buffer space */ + } else + printk(KERN_WARNING "%s: RU went not ready: %04X\n", dev->name, + ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS)); + printk (KERN_WARNING "RU ptr = %04X\n", ether1_readw(dev, SCB_ADDR, scb_t, scb_rfa_offset, + NORMALIRQS)); + } + } else + writeb(CTRL_ACK, REG_CONTROL); + + return IRQ_HANDLED; +} + +static int +ether1_close (struct net_device *dev) +{ + ether1_reset (dev); + + free_irq(dev->irq, dev); + + return 0; +} + +/* + * Set or clear the multicast filter for this adaptor. + * num_addrs == -1 Promiscuous mode, receive all packets. + * num_addrs == 0 Normal mode, clear multicast list. + * num_addrs > 0 Multicast mode, receive normal and MC packets, and do + * best-effort filtering. + */ +static void +ether1_setmulticastlist (struct net_device *dev) +{ +} + +/* ------------------------------------------------------------------------- */ + +static void ether1_banner(void) +{ + static unsigned int version_printed = 0; + + if (net_debug && version_printed++ == 0) + printk(KERN_INFO "%s", version); +} + +static const struct net_device_ops ether1_netdev_ops = { + .ndo_open = ether1_open, + .ndo_stop = ether1_close, + .ndo_start_xmit = ether1_sendpacket, + .ndo_set_rx_mode = ether1_setmulticastlist, + .ndo_tx_timeout = ether1_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + +static int +ether1_probe(struct expansion_card *ec, const struct ecard_id *id) +{ + struct net_device *dev; + int i, ret = 0; + + ether1_banner(); + + ret = ecard_request_resources(ec); + if (ret) + goto out; + + dev = alloc_etherdev(sizeof(struct ether1_priv)); + if (!dev) { + ret = -ENOMEM; + goto release; + } + + SET_NETDEV_DEV(dev, &ec->dev); + + dev->irq = ec->irq; + priv(dev)->base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); + if (!priv(dev)->base) { + ret = -ENOMEM; + goto free; + } + + if ((priv(dev)->bus_type = ether1_reset(dev)) == 0) { + ret = -ENODEV; + goto free; + } + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = readb(IDPROM_ADDRESS + (i << 2)); + + if (ether1_init_2(dev)) { + ret = -ENODEV; + goto free; + } + + dev->netdev_ops = ðer1_netdev_ops; + dev->watchdog_timeo = 5 * HZ / 100; + + ret = register_netdev(dev); + if (ret) + goto free; + + printk(KERN_INFO "%s: ether1 in slot %d, %pM\n", + dev->name, ec->slot_no, dev->dev_addr); + + ecard_set_drvdata(ec, dev); + return 0; + + free: + free_netdev(dev); + release: + ecard_release_resources(ec); + out: + return ret; +} + +static void ether1_remove(struct expansion_card *ec) +{ + struct net_device *dev = ecard_get_drvdata(ec); + + ecard_set_drvdata(ec, NULL); + + unregister_netdev(dev); + free_netdev(dev); + ecard_release_resources(ec); +} + +static const struct ecard_id ether1_ids[] = { + { MANU_ACORN, PROD_ACORN_ETHER1 }, + { 0xffff, 0xffff } +}; + +static struct ecard_driver ether1_driver = { + .probe = ether1_probe, + .remove = ether1_remove, + .id_table = ether1_ids, + .drv = { + .name = "ether1", + }, +}; + +static int __init ether1_init(void) +{ + return ecard_register_driver(ðer1_driver); +} + +static void __exit ether1_exit(void) +{ + ecard_remove_driver(ðer1_driver); +} + +module_init(ether1_init); +module_exit(ether1_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/i825xx/ether1.h b/drivers/net/ethernet/i825xx/ether1.h new file mode 100644 index 000000000..3a5830ab3 --- /dev/null +++ b/drivers/net/ethernet/i825xx/ether1.h @@ -0,0 +1,280 @@ +/* + * linux/drivers/acorn/net/ether1.h + * + * Copyright (C) 1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Network driver for Acorn Ether1 cards. + */ + +#ifndef _LINUX_ether1_H +#define _LINUX_ether1_H + +#ifdef __ETHER1_C +/* use 0 for production, 1 for verification, >2 for debug */ +#ifndef NET_DEBUG +#define NET_DEBUG 0 +#endif + +#define priv(dev) ((struct ether1_priv *)netdev_priv(dev)) + +/* Page register */ +#define REG_PAGE (priv(dev)->base + 0x0000) + +/* Control register */ +#define REG_CONTROL (priv(dev)->base + 0x0004) +#define CTRL_RST 0x01 +#define CTRL_LOOPBACK 0x02 +#define CTRL_CA 0x04 +#define CTRL_ACK 0x08 + +#define ETHER1_RAM (priv(dev)->base + 0x2000) + +/* HW address */ +#define IDPROM_ADDRESS (priv(dev)->base + 0x0024) + +struct ether1_priv { + void __iomem *base; + unsigned int tx_link; + unsigned int tx_head; + volatile unsigned int tx_tail; + volatile unsigned int rx_head; + volatile unsigned int rx_tail; + unsigned char bus_type; + unsigned char resetting; + unsigned char initialising : 1; + unsigned char restart : 1; +}; + +#define I82586_NULL (-1) + +typedef struct { /* tdr */ + unsigned short tdr_status; + unsigned short tdr_command; + unsigned short tdr_link; + unsigned short tdr_result; +#define TDR_TIME (0x7ff) +#define TDR_SHORT (1 << 12) +#define TDR_OPEN (1 << 13) +#define TDR_XCVRPROB (1 << 14) +#define TDR_LNKOK (1 << 15) +} tdr_t; + +typedef struct { /* transmit */ + unsigned short tx_status; + unsigned short tx_command; + unsigned short tx_link; + unsigned short tx_tbdoffset; +} tx_t; + +typedef struct { /* tbd */ + unsigned short tbd_opts; +#define TBD_CNT (0x3fff) +#define TBD_EOL (1 << 15) + unsigned short tbd_link; + unsigned short tbd_bufl; + unsigned short tbd_bufh; +} tbd_t; + +typedef struct { /* rfd */ + unsigned short rfd_status; +#define RFD_NOEOF (1 << 6) +#define RFD_FRAMESHORT (1 << 7) +#define RFD_DMAOVRN (1 << 8) +#define RFD_NORESOURCES (1 << 9) +#define RFD_ALIGNERROR (1 << 10) +#define RFD_CRCERROR (1 << 11) +#define RFD_OK (1 << 13) +#define RFD_FDCONSUMED (1 << 14) +#define RFD_COMPLETE (1 << 15) + unsigned short rfd_command; +#define RFD_CMDSUSPEND (1 << 14) +#define RFD_CMDEL (1 << 15) + unsigned short rfd_link; + unsigned short rfd_rbdoffset; + unsigned char rfd_dest[6]; + unsigned char rfd_src[6]; + unsigned short rfd_len; +} rfd_t; + +typedef struct { /* rbd */ + unsigned short rbd_status; +#define RBD_ACNT (0x3fff) +#define RBD_ACNTVALID (1 << 14) +#define RBD_EOF (1 << 15) + unsigned short rbd_link; + unsigned short rbd_bufl; + unsigned short rbd_bufh; + unsigned short rbd_len; +} rbd_t; + +typedef struct { /* nop */ + unsigned short nop_status; + unsigned short nop_command; + unsigned short nop_link; +} nop_t; + +typedef struct { /* set multicast */ + unsigned short mc_status; + unsigned short mc_command; + unsigned short mc_link; + unsigned short mc_cnt; + unsigned char mc_addrs[1][6]; +} mc_t; + +typedef struct { /* set address */ + unsigned short sa_status; + unsigned short sa_command; + unsigned short sa_link; + unsigned char sa_addr[6]; +} sa_t; + +typedef struct { /* config command */ + unsigned short cfg_status; + unsigned short cfg_command; + unsigned short cfg_link; + unsigned char cfg_bytecnt; /* size foll data: 4 - 12 */ + unsigned char cfg_fifolim; /* FIFO threshold */ + unsigned char cfg_byte8; +#define CFG8_SRDY (1 << 6) +#define CFG8_SAVEBADF (1 << 7) + unsigned char cfg_byte9; +#define CFG9_ADDRLEN(x) (x) +#define CFG9_ADDRLENBUF (1 << 3) +#define CFG9_PREAMB2 (0 << 4) +#define CFG9_PREAMB4 (1 << 4) +#define CFG9_PREAMB8 (2 << 4) +#define CFG9_PREAMB16 (3 << 4) +#define CFG9_ILOOPBACK (1 << 6) +#define CFG9_ELOOPBACK (1 << 7) + unsigned char cfg_byte10; +#define CFG10_LINPRI(x) (x) +#define CFG10_ACR(x) (x << 4) +#define CFG10_BOFMET (1 << 7) + unsigned char cfg_ifs; + unsigned char cfg_slotl; + unsigned char cfg_byte13; +#define CFG13_SLOTH(x) (x) +#define CFG13_RETRY(x) (x << 4) + unsigned char cfg_byte14; +#define CFG14_PROMISC (1 << 0) +#define CFG14_DISBRD (1 << 1) +#define CFG14_MANCH (1 << 2) +#define CFG14_TNCRS (1 << 3) +#define CFG14_NOCRC (1 << 4) +#define CFG14_CRC16 (1 << 5) +#define CFG14_BTSTF (1 << 6) +#define CFG14_FLGPAD (1 << 7) + unsigned char cfg_byte15; +#define CFG15_CSTF(x) (x) +#define CFG15_ICSS (1 << 3) +#define CFG15_CDTF(x) (x << 4) +#define CFG15_ICDS (1 << 7) + unsigned short cfg_minfrmlen; +} cfg_t; + +typedef struct { /* scb */ + unsigned short scb_status; /* status of 82586 */ +#define SCB_STRXMASK (7 << 4) /* Receive unit status */ +#define SCB_STRXIDLE (0 << 4) /* Idle */ +#define SCB_STRXSUSP (1 << 4) /* Suspended */ +#define SCB_STRXNRES (2 << 4) /* No resources */ +#define SCB_STRXRDY (4 << 4) /* Ready */ +#define SCB_STCUMASK (7 << 8) /* Command unit status */ +#define SCB_STCUIDLE (0 << 8) /* Idle */ +#define SCB_STCUSUSP (1 << 8) /* Suspended */ +#define SCB_STCUACTV (2 << 8) /* Active */ +#define SCB_STRNR (1 << 12) /* Receive unit not ready */ +#define SCB_STCNA (1 << 13) /* Command unit not ready */ +#define SCB_STFR (1 << 14) /* Frame received */ +#define SCB_STCX (1 << 15) /* Command completed */ + unsigned short scb_command; /* Next command */ +#define SCB_CMDRXSTART (1 << 4) /* Start (at rfa_offset) */ +#define SCB_CMDRXRESUME (2 << 4) /* Resume reception */ +#define SCB_CMDRXSUSPEND (3 << 4) /* Suspend reception */ +#define SCB_CMDRXABORT (4 << 4) /* Abort reception */ +#define SCB_CMDCUCSTART (1 << 8) /* Start (at cbl_offset) */ +#define SCB_CMDCUCRESUME (2 << 8) /* Resume execution */ +#define SCB_CMDCUCSUSPEND (3 << 8) /* Suspend execution */ +#define SCB_CMDCUCABORT (4 << 8) /* Abort execution */ +#define SCB_CMDACKRNR (1 << 12) /* Ack RU not ready */ +#define SCB_CMDACKCNA (1 << 13) /* Ack CU not ready */ +#define SCB_CMDACKFR (1 << 14) /* Ack Frame received */ +#define SCB_CMDACKCX (1 << 15) /* Ack Command complete */ + unsigned short scb_cbl_offset; /* Offset of first command unit */ + unsigned short scb_rfa_offset; /* Offset of first receive frame area */ + unsigned short scb_crc_errors; /* Properly aligned frame with CRC error*/ + unsigned short scb_aln_errors; /* Misaligned frames */ + unsigned short scb_rsc_errors; /* Frames lost due to no space */ + unsigned short scb_ovn_errors; /* Frames lost due to slow bus */ +} scb_t; + +typedef struct { /* iscp */ + unsigned short iscp_busy; /* set by CPU before CA */ + unsigned short iscp_offset; /* offset of SCB */ + unsigned short iscp_basel; /* base of SCB */ + unsigned short iscp_baseh; +} iscp_t; + + /* this address must be 0xfff6 */ +typedef struct { /* scp */ + unsigned short scp_sysbus; /* bus size */ +#define SCP_SY_16BBUS 0x00 +#define SCP_SY_8BBUS 0x01 + unsigned short scp_junk[2]; /* junk */ + unsigned short scp_iscpl; /* lower 16 bits of iscp */ + unsigned short scp_iscph; /* upper 16 bits of iscp */ +} scp_t; + +/* commands */ +#define CMD_NOP 0 +#define CMD_SETADDRESS 1 +#define CMD_CONFIG 2 +#define CMD_SETMULTICAST 3 +#define CMD_TX 4 +#define CMD_TDR 5 +#define CMD_DUMP 6 +#define CMD_DIAGNOSE 7 + +#define CMD_MASK 7 + +#define CMD_INTR (1 << 13) +#define CMD_SUSP (1 << 14) +#define CMD_EOL (1 << 15) + +#define STAT_COLLISIONS (15) +#define STAT_COLLEXCESSIVE (1 << 5) +#define STAT_COLLAFTERTX (1 << 6) +#define STAT_TXDEFERRED (1 << 7) +#define STAT_TXSLOWDMA (1 << 8) +#define STAT_TXLOSTCTS (1 << 9) +#define STAT_NOCARRIER (1 << 10) +#define STAT_FAIL (1 << 11) +#define STAT_ABORTED (1 << 12) +#define STAT_OK (1 << 13) +#define STAT_BUSY (1 << 14) +#define STAT_COMPLETE (1 << 15) +#endif +#endif + +/* + * Ether1 card definitions: + * + * FAST accesses: + * +0 Page register + * 16 pages + * +4 Control + * '1' = reset + * '2' = loopback + * '4' = CA + * '8' = int ack + * + * RAM at address + 0x2000 + * Pod. Prod id = 3 + * Words after ID block [base + 8 words] + * +0 pcb issue (0x0c and 0xf3 invalid) + * +1 - +6 eth hw address + */ diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c new file mode 100644 index 000000000..d787fdd5d --- /dev/null +++ b/drivers/net/ethernet/i825xx/lasi_82596.c @@ -0,0 +1,237 @@ +/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as + munged into HPPA boxen . + + This driver is based upon 82596.c, original credits are below... + but there were too many hoops which HP wants jumped through to + keep this code in there in a sane manner. + + 3 primary sources of the mess -- + 1) hppa needs *lots* of cacheline flushing to keep this kind of + MMIO running. + + 2) The 82596 needs to see all of its pointers as their physical + address. Thus virt_to_bus/bus_to_virt are *everywhere*. + + 3) The implementation HP is using seems to be significantly pickier + about when and how the command and RX units are started. some + command ordering was changed. + + Examination of the mach driver leads one to believe that there + might be a saner way to pull this off... anyone who feels like a + full rewrite can be my guest. + + Split 02/13/2000 Sam Creasey (sammy@oh.verio.com) + + 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de) + 03/02/2000 changes for better/correct(?) cache-flushing (deller) +*/ + +/* 82596.c: A generic 82596 ethernet driver for linux. */ +/* + Based on Apricot.c + Written 1994 by Mark Evans. + This driver is for the Apricot 82596 bus-master interface + + Modularised 12/94 Mark Evans + + + Modified to support the 82596 ethernet chips on 680x0 VME boards. + by Richard Hirst + Renamed to be 82596.c + + 980825: Changed to receive directly in to sk_buffs which are + allocated at open() time. Eliminates copy on incoming frames + (small ones are still copied). Shared data now held in a + non-cached page, so we can run on 68060 in copyback mode. + + TBD: + * look at deferring rx frames rather than discarding (as per tulip) + * handle tx ring full as per tulip + * performance test to tune rx_copybreak + + Most of my modifications relate to the braindead big-endian + implementation by Intel. When the i596 is operating in + 'big-endian' mode, it thinks a 32 bit value of 0x12345678 + should be stored as 0x56781234. This is a real pain, when + you have linked lists which are shared by the 680x0 and the + i596. + + Driver skeleton + Written 1993 by Donald Becker. + Copyright 1993 United States Government as represented by the Director, + National Security Agency. This software may only be used and distributed + according to the terms of the GNU General Public License as modified by SRC, + incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 + + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define LASI_82596_DRIVER_VERSION "LASI 82596 driver - Revision: 1.30" + +#define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/ +#define PA_CPU_PORT_L_ACCESS 4 +#define PA_CHANNEL_ATTENTION 8 + +#define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */ + +#define DMA_ALLOC dma_alloc_noncoherent +#define DMA_FREE dma_free_noncoherent +#define DMA_WBACK(ndev, addr, len) \ + do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0) + +#define DMA_INV(ndev, addr, len) \ + do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_FROM_DEVICE); } while (0) + +#define DMA_WBACK_INV(ndev, addr, len) \ + do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0) + +#define SYSBUS 0x0000006c; + +/* big endian CPU, 82596 "big" endian mode */ +#define SWAP32(x) (((u32)(x)<<16) | ((((u32)(x)))>>16)) +#define SWAP16(x) (x) + +#include "lib82596.c" + +MODULE_AUTHOR("Richard Hirst"); +MODULE_DESCRIPTION("i82596 driver"); +MODULE_LICENSE("GPL"); +module_param(i596_debug, int, 0); +MODULE_PARM_DESC(i596_debug, "lasi_82596 debug mask"); + +static inline void ca(struct net_device *dev) +{ + gsc_writel(0, dev->base_addr + PA_CHANNEL_ATTENTION); +} + + +static void mpu_port(struct net_device *dev, int c, dma_addr_t x) +{ + struct i596_private *lp = netdev_priv(dev); + + u32 v = (u32) (c) | (u32) (x); + u16 a, b; + + if (lp->options & OPT_SWAP_PORT) { + a = v >> 16; + b = v & 0xffff; + } else { + a = v & 0xffff; + b = v >> 16; + } + + gsc_writel(a, dev->base_addr + PA_CPU_PORT_L_ACCESS); + udelay(1); + gsc_writel(b, dev->base_addr + PA_CPU_PORT_L_ACCESS); +} + +#define LAN_PROM_ADDR 0xF0810000 + +static int +lan_init_chip(struct parisc_device *dev) +{ + struct net_device *netdevice; + struct i596_private *lp; + int retval; + int i; + + if (!dev->irq) { + printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n", + __FILE__, (unsigned long)dev->hpa.start); + return -ENODEV; + } + + printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", + (unsigned long)dev->hpa.start, dev->irq); + + netdevice = alloc_etherdev(sizeof(struct i596_private)); + if (!netdevice) + return -ENOMEM; + SET_NETDEV_DEV(netdevice, &dev->dev); + parisc_set_drvdata (dev, netdevice); + + netdevice->base_addr = dev->hpa.start; + netdevice->irq = dev->irq; + + if (pdc_lan_station_id(netdevice->dev_addr, netdevice->base_addr)) { + for (i = 0; i < 6; i++) { + netdevice->dev_addr[i] = gsc_readb(LAN_PROM_ADDR + i); + } + printk(KERN_INFO + "%s: MAC of HP700 LAN read from EEPROM\n", __FILE__); + } + + lp = netdev_priv(netdevice); + lp->options = dev->id.sversion == 0x72 ? OPT_SWAP_PORT : 0; + + retval = i82596_probe(netdevice); + if (retval) { + free_netdev(netdevice); + return -ENODEV; + } + return retval; +} + +static int lan_remove_chip(struct parisc_device *pdev) +{ + struct net_device *dev = parisc_get_drvdata(pdev); + struct i596_private *lp = netdev_priv(dev); + + unregister_netdev (dev); + DMA_FREE(&pdev->dev, sizeof(struct i596_private), + (void *)lp->dma, lp->dma_addr); + free_netdev (dev); + return 0; +} + +static struct parisc_device_id lan_tbl[] = { + { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008a }, + { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00072 }, + { 0, } +}; + +MODULE_DEVICE_TABLE(parisc, lan_tbl); + +static struct parisc_driver lan_driver = { + .name = "lasi_82596", + .id_table = lan_tbl, + .probe = lan_init_chip, + .remove = lan_remove_chip, +}; + +static int lasi_82596_init(void) +{ + printk(KERN_INFO LASI_82596_DRIVER_VERSION "\n"); + return register_parisc_driver(&lan_driver); +} + +module_init(lasi_82596_init); + +static void __exit lasi_82596_exit(void) +{ + unregister_parisc_driver(&lan_driver); +} + +module_exit(lasi_82596_exit); diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c new file mode 100644 index 000000000..c984998b3 --- /dev/null +++ b/drivers/net/ethernet/i825xx/lib82596.c @@ -0,0 +1,1409 @@ +/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as + munged into HPPA boxen . + + This driver is based upon 82596.c, original credits are below... + but there were too many hoops which HP wants jumped through to + keep this code in there in a sane manner. + + 3 primary sources of the mess -- + 1) hppa needs *lots* of cacheline flushing to keep this kind of + MMIO running. + + 2) The 82596 needs to see all of its pointers as their physical + address. Thus virt_to_bus/bus_to_virt are *everywhere*. + + 3) The implementation HP is using seems to be significantly pickier + about when and how the command and RX units are started. some + command ordering was changed. + + Examination of the mach driver leads one to believe that there + might be a saner way to pull this off... anyone who feels like a + full rewrite can be my guest. + + Split 02/13/2000 Sam Creasey (sammy@oh.verio.com) + + 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de) + 03/02/2000 changes for better/correct(?) cache-flushing (deller) +*/ + +/* 82596.c: A generic 82596 ethernet driver for linux. */ +/* + Based on Apricot.c + Written 1994 by Mark Evans. + This driver is for the Apricot 82596 bus-master interface + + Modularised 12/94 Mark Evans + + + Modified to support the 82596 ethernet chips on 680x0 VME boards. + by Richard Hirst + Renamed to be 82596.c + + 980825: Changed to receive directly in to sk_buffs which are + allocated at open() time. Eliminates copy on incoming frames + (small ones are still copied). Shared data now held in a + non-cached page, so we can run on 68060 in copyback mode. + + TBD: + * look at deferring rx frames rather than discarding (as per tulip) + * handle tx ring full as per tulip + * performance test to tune rx_copybreak + + Most of my modifications relate to the braindead big-endian + implementation by Intel. When the i596 is operating in + 'big-endian' mode, it thinks a 32 bit value of 0x12345678 + should be stored as 0x56781234. This is a real pain, when + you have linked lists which are shared by the 680x0 and the + i596. + + Driver skeleton + Written 1993 by Donald Becker. + Copyright 1993 United States Government as represented by the Director, + National Security Agency. This software may only be used and distributed + according to the terms of the GNU General Public License as modified by SRC, + incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 + + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* DEBUG flags + */ + +#define DEB_INIT 0x0001 +#define DEB_PROBE 0x0002 +#define DEB_SERIOUS 0x0004 +#define DEB_ERRORS 0x0008 +#define DEB_MULTI 0x0010 +#define DEB_TDR 0x0020 +#define DEB_OPEN 0x0040 +#define DEB_RESET 0x0080 +#define DEB_ADDCMD 0x0100 +#define DEB_STATUS 0x0200 +#define DEB_STARTTX 0x0400 +#define DEB_RXADDR 0x0800 +#define DEB_TXADDR 0x1000 +#define DEB_RXFRAME 0x2000 +#define DEB_INTS 0x4000 +#define DEB_STRUCT 0x8000 +#define DEB_ANY 0xffff + + +#define DEB(x, y) if (i596_debug & (x)) { y; } + + +/* + * The MPU_PORT command allows direct access to the 82596. With PORT access + * the following commands are available (p5-18). The 32-bit port command + * must be word-swapped with the most significant word written first. + * This only applies to VME boards. + */ +#define PORT_RESET 0x00 /* reset 82596 */ +#define PORT_SELFTEST 0x01 /* selftest */ +#define PORT_ALTSCP 0x02 /* alternate SCB address */ +#define PORT_ALTDUMP 0x03 /* Alternate DUMP address */ + +static int i596_debug = (DEB_SERIOUS|DEB_PROBE); + +/* Copy frames shorter than rx_copybreak, otherwise pass on up in + * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha). + */ +static int rx_copybreak = 100; + +#define PKT_BUF_SZ 1536 +#define MAX_MC_CNT 64 + +#define ISCP_BUSY 0x0001 + +#define I596_NULL ((u32)0xffffffff) + +#define CMD_EOL 0x8000 /* The last command of the list, stop. */ +#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */ +#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */ + +#define CMD_FLEX 0x0008 /* Enable flexible memory model */ + +enum commands { + CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3, + CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7 +}; + +#define STAT_C 0x8000 /* Set to 0 after execution */ +#define STAT_B 0x4000 /* Command being executed */ +#define STAT_OK 0x2000 /* Command executed ok */ +#define STAT_A 0x1000 /* Command aborted */ + +#define CUC_START 0x0100 +#define CUC_RESUME 0x0200 +#define CUC_SUSPEND 0x0300 +#define CUC_ABORT 0x0400 +#define RX_START 0x0010 +#define RX_RESUME 0x0020 +#define RX_SUSPEND 0x0030 +#define RX_ABORT 0x0040 + +#define TX_TIMEOUT (HZ/20) + + +struct i596_reg { + unsigned short porthi; + unsigned short portlo; + u32 ca; +}; + +#define EOF 0x8000 +#define SIZE_MASK 0x3fff + +struct i596_tbd { + unsigned short size; + unsigned short pad; + u32 next; + u32 data; + u32 cache_pad[5]; /* Total 32 bytes... */ +}; + +/* The command structure has two 'next' pointers; v_next is the address of + * the next command as seen by the CPU, b_next is the address of the next + * command as seen by the 82596. The b_next pointer, as used by the 82596 + * always references the status field of the next command, rather than the + * v_next field, because the 82596 is unaware of v_next. It may seem more + * logical to put v_next at the end of the structure, but we cannot do that + * because the 82596 expects other fields to be there, depending on command + * type. + */ + +struct i596_cmd { + struct i596_cmd *v_next; /* Address from CPUs viewpoint */ + unsigned short status; + unsigned short command; + u32 b_next; /* Address from i596 viewpoint */ +}; + +struct tx_cmd { + struct i596_cmd cmd; + u32 tbd; + unsigned short size; + unsigned short pad; + struct sk_buff *skb; /* So we can free it after tx */ + dma_addr_t dma_addr; +#ifdef __LP64__ + u32 cache_pad[6]; /* Total 64 bytes... */ +#else + u32 cache_pad[1]; /* Total 32 bytes... */ +#endif +}; + +struct tdr_cmd { + struct i596_cmd cmd; + unsigned short status; + unsigned short pad; +}; + +struct mc_cmd { + struct i596_cmd cmd; + short mc_cnt; + char mc_addrs[MAX_MC_CNT*6]; +}; + +struct sa_cmd { + struct i596_cmd cmd; + char eth_addr[8]; +}; + +struct cf_cmd { + struct i596_cmd cmd; + char i596_config[16]; +}; + +struct i596_rfd { + unsigned short stat; + unsigned short cmd; + u32 b_next; /* Address from i596 viewpoint */ + u32 rbd; + unsigned short count; + unsigned short size; + struct i596_rfd *v_next; /* Address from CPUs viewpoint */ + struct i596_rfd *v_prev; +#ifndef __LP64__ + u32 cache_pad[2]; /* Total 32 bytes... */ +#endif +}; + +struct i596_rbd { + /* hardware data */ + unsigned short count; + unsigned short zero1; + u32 b_next; + u32 b_data; /* Address from i596 viewpoint */ + unsigned short size; + unsigned short zero2; + /* driver data */ + struct sk_buff *skb; + struct i596_rbd *v_next; + u32 b_addr; /* This rbd addr from i596 view */ + unsigned char *v_data; /* Address from CPUs viewpoint */ + /* Total 32 bytes... */ +#ifdef __LP64__ + u32 cache_pad[4]; +#endif +}; + +/* These values as chosen so struct i596_dma fits in one page... */ + +#define TX_RING_SIZE 32 +#define RX_RING_SIZE 16 + +struct i596_scb { + unsigned short status; + unsigned short command; + u32 cmd; + u32 rfd; + u32 crc_err; + u32 align_err; + u32 resource_err; + u32 over_err; + u32 rcvdt_err; + u32 short_err; + unsigned short t_on; + unsigned short t_off; +}; + +struct i596_iscp { + u32 stat; + u32 scb; +}; + +struct i596_scp { + u32 sysbus; + u32 pad; + u32 iscp; +}; + +struct i596_dma { + struct i596_scp scp __attribute__((aligned(32))); + volatile struct i596_iscp iscp __attribute__((aligned(32))); + volatile struct i596_scb scb __attribute__((aligned(32))); + struct sa_cmd sa_cmd __attribute__((aligned(32))); + struct cf_cmd cf_cmd __attribute__((aligned(32))); + struct tdr_cmd tdr_cmd __attribute__((aligned(32))); + struct mc_cmd mc_cmd __attribute__((aligned(32))); + struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32))); + struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32))); + struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32))); + struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32))); +}; + +struct i596_private { + struct i596_dma *dma; + u32 stat; + int last_restart; + struct i596_rfd *rfd_head; + struct i596_rbd *rbd_head; + struct i596_cmd *cmd_tail; + struct i596_cmd *cmd_head; + int cmd_backlog; + u32 last_cmd; + int next_tx_cmd; + int options; + spinlock_t lock; /* serialize access to chip */ + dma_addr_t dma_addr; + void __iomem *mpu_port; + void __iomem *ca; +}; + +static const char init_setup[] = +{ + 0x8E, /* length, prefetch on */ + 0xC8, /* fifo to 8, monitor off */ + 0x80, /* don't save bad frames */ + 0x2E, /* No source address insertion, 8 byte preamble */ + 0x00, /* priority and backoff defaults */ + 0x60, /* interframe spacing */ + 0x00, /* slot time LSB */ + 0xf2, /* slot time and retries */ + 0x00, /* promiscuous mode */ + 0x00, /* collision detect */ + 0x40, /* minimum frame length */ + 0xff, + 0x00, + 0x7f /* *multi IA */ }; + +static int i596_open(struct net_device *dev); +static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); +static irqreturn_t i596_interrupt(int irq, void *dev_id); +static int i596_close(struct net_device *dev); +static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); +static void i596_tx_timeout (struct net_device *dev); +static void print_eth(unsigned char *buf, char *str); +static void set_multicast_list(struct net_device *dev); +static inline void ca(struct net_device *dev); +static void mpu_port(struct net_device *dev, int c, dma_addr_t x); + +static int rx_ring_size = RX_RING_SIZE; +static int ticks_limit = 100; +static int max_cmd_backlog = TX_RING_SIZE-1; + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void i596_poll_controller(struct net_device *dev); +#endif + + +static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str) +{ + DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp)); + while (--delcnt && dma->iscp.stat) { + udelay(10); + DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp)); + } + if (!delcnt) { + printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n", + dev->name, str, SWAP16(dma->iscp.stat)); + return -1; + } else + return 0; +} + + +static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str) +{ + DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb)); + while (--delcnt && dma->scb.command) { + udelay(10); + DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb)); + } + if (!delcnt) { + printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n", + dev->name, str, + SWAP16(dma->scb.status), + SWAP16(dma->scb.command)); + return -1; + } else + return 0; +} + + +static void i596_display_data(struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + struct i596_dma *dma = lp->dma; + struct i596_cmd *cmd; + struct i596_rfd *rfd; + struct i596_rbd *rbd; + + printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n", + &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp)); + printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n", + &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb)); + printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x," + " .cmd = %08x, .rfd = %08x\n", + &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command), + SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd)); + printk(KERN_DEBUG " errors: crc %x, align %x, resource %x," + " over %x, rcvdt %x, short %x\n", + SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err), + SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err), + SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err)); + cmd = lp->cmd_head; + while (cmd != NULL) { + printk(KERN_DEBUG + "cmd at %p, .status = %04x, .command = %04x," + " .b_next = %08x\n", + cmd, SWAP16(cmd->status), SWAP16(cmd->command), + SWAP32(cmd->b_next)); + cmd = cmd->v_next; + } + rfd = lp->rfd_head; + printk(KERN_DEBUG "rfd_head = %p\n", rfd); + do { + printk(KERN_DEBUG + " %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x," + " count %04x\n", + rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd), + SWAP32(rfd->b_next), SWAP32(rfd->rbd), + SWAP16(rfd->count)); + rfd = rfd->v_next; + } while (rfd != lp->rfd_head); + rbd = lp->rbd_head; + printk(KERN_DEBUG "rbd_head = %p\n", rbd); + do { + printk(KERN_DEBUG + " %p .count %04x, b_next %08x, b_data %08x," + " size %04x\n", + rbd, SWAP16(rbd->count), SWAP32(rbd->b_next), + SWAP32(rbd->b_data), SWAP16(rbd->size)); + rbd = rbd->v_next; + } while (rbd != lp->rbd_head); + DMA_INV(dev, dma, sizeof(struct i596_dma)); +} + + +#define virt_to_dma(lp, v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)((lp)->dma))) + +static inline int init_rx_bufs(struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + struct i596_dma *dma = lp->dma; + int i; + struct i596_rfd *rfd; + struct i596_rbd *rbd; + + /* First build the Receive Buffer Descriptor List */ + + for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) { + dma_addr_t dma_addr; + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); + if (skb == NULL) + return -1; + dma_addr = dma_map_single(dev->dev.parent, skb->data, + PKT_BUF_SZ, DMA_FROM_DEVICE); + rbd->v_next = rbd+1; + rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1)); + rbd->b_addr = SWAP32(virt_to_dma(lp, rbd)); + rbd->skb = skb; + rbd->v_data = skb->data; + rbd->b_data = SWAP32(dma_addr); + rbd->size = SWAP16(PKT_BUF_SZ); + } + lp->rbd_head = dma->rbds; + rbd = dma->rbds + rx_ring_size - 1; + rbd->v_next = dma->rbds; + rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds)); + + /* Now build the Receive Frame Descriptor List */ + + for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) { + rfd->rbd = I596_NULL; + rfd->v_next = rfd+1; + rfd->v_prev = rfd-1; + rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1)); + rfd->cmd = SWAP16(CMD_FLEX); + } + lp->rfd_head = dma->rfds; + dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); + rfd = dma->rfds; + rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head)); + rfd->v_prev = dma->rfds + rx_ring_size - 1; + rfd = dma->rfds + rx_ring_size - 1; + rfd->v_next = dma->rfds; + rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds)); + rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX); + + DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma)); + return 0; +} + +static inline void remove_rx_bufs(struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + struct i596_rbd *rbd; + int i; + + for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) { + if (rbd->skb == NULL) + break; + dma_unmap_single(dev->dev.parent, + (dma_addr_t)SWAP32(rbd->b_data), + PKT_BUF_SZ, DMA_FROM_DEVICE); + dev_kfree_skb(rbd->skb); + } +} + + +static void rebuild_rx_bufs(struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + struct i596_dma *dma = lp->dma; + int i; + + /* Ensure rx frame/buffer descriptors are tidy */ + + for (i = 0; i < rx_ring_size; i++) { + dma->rfds[i].rbd = I596_NULL; + dma->rfds[i].cmd = SWAP16(CMD_FLEX); + } + dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX); + lp->rfd_head = dma->rfds; + dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); + lp->rbd_head = dma->rbds; + dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds)); + + DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma)); +} + + +static int init_i596_mem(struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + struct i596_dma *dma = lp->dma; + unsigned long flags; + + mpu_port(dev, PORT_RESET, 0); + udelay(100); /* Wait 100us - seems to help */ + + /* change the scp address */ + + lp->last_cmd = jiffies; + + dma->scp.sysbus = SYSBUS; + dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp))); + dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb))); + dma->iscp.stat = SWAP32(ISCP_BUSY); + lp->cmd_backlog = 0; + + lp->cmd_head = NULL; + dma->scb.cmd = I596_NULL; + + DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name)); + + DMA_WBACK(dev, &(dma->scp), sizeof(struct i596_scp)); + DMA_WBACK(dev, &(dma->iscp), sizeof(struct i596_iscp)); + DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb)); + + mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp)); + ca(dev); + if (wait_istat(dev, dma, 1000, "initialization timed out")) + goto failed; + DEB(DEB_INIT, printk(KERN_DEBUG + "%s: i82596 initialization successful\n", + dev->name)); + + if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) { + printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); + goto failed; + } + + /* Ensure rx frame/buffer descriptors are tidy */ + rebuild_rx_bufs(dev); + + dma->scb.command = 0; + DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb)); + + DEB(DEB_INIT, printk(KERN_DEBUG + "%s: queuing CmdConfigure\n", dev->name)); + memcpy(dma->cf_cmd.i596_config, init_setup, 14); + dma->cf_cmd.cmd.command = SWAP16(CmdConfigure); + DMA_WBACK(dev, &(dma->cf_cmd), sizeof(struct cf_cmd)); + i596_add_cmd(dev, &dma->cf_cmd.cmd); + + DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name)); + memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN); + dma->sa_cmd.cmd.command = SWAP16(CmdSASetup); + DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd)); + i596_add_cmd(dev, &dma->sa_cmd.cmd); + + DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name)); + dma->tdr_cmd.cmd.command = SWAP16(CmdTDR); + DMA_WBACK(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd)); + i596_add_cmd(dev, &dma->tdr_cmd.cmd); + + spin_lock_irqsave (&lp->lock, flags); + + if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) { + spin_unlock_irqrestore (&lp->lock, flags); + goto failed_free_irq; + } + DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name)); + dma->scb.command = SWAP16(RX_START); + dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); + DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb)); + + ca(dev); + + spin_unlock_irqrestore (&lp->lock, flags); + if (wait_cmd(dev, dma, 1000, "RX_START not processed")) + goto failed_free_irq; + DEB(DEB_INIT, printk(KERN_DEBUG + "%s: Receive unit started OK\n", dev->name)); + return 0; + +failed_free_irq: + free_irq(dev->irq, dev); +failed: + printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name); + mpu_port(dev, PORT_RESET, 0); + return -1; +} + + +static inline int i596_rx(struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + struct i596_rfd *rfd; + struct i596_rbd *rbd; + int frames = 0; + + DEB(DEB_RXFRAME, printk(KERN_DEBUG + "i596_rx(), rfd_head %p, rbd_head %p\n", + lp->rfd_head, lp->rbd_head)); + + + rfd = lp->rfd_head; /* Ref next frame to check */ + + DMA_INV(dev, rfd, sizeof(struct i596_rfd)); + while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */ + if (rfd->rbd == I596_NULL) + rbd = NULL; + else if (rfd->rbd == lp->rbd_head->b_addr) { + rbd = lp->rbd_head; + DMA_INV(dev, rbd, sizeof(struct i596_rbd)); + } else { + printk(KERN_ERR "%s: rbd chain broken!\n", dev->name); + /* XXX Now what? */ + rbd = NULL; + } + DEB(DEB_RXFRAME, printk(KERN_DEBUG + " rfd %p, rfd.rbd %08x, rfd.stat %04x\n", + rfd, rfd->rbd, rfd->stat)); + + if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) { + /* a good frame */ + int pkt_len = SWAP16(rbd->count) & 0x3fff; + struct sk_buff *skb = rbd->skb; + int rx_in_place = 0; + + DEB(DEB_RXADDR, print_eth(rbd->v_data, "received")); + frames++; + + /* Check if the packet is long enough to just accept + * without copying to a properly sized skbuff. + */ + + if (pkt_len > rx_copybreak) { + struct sk_buff *newskb; + dma_addr_t dma_addr; + + dma_unmap_single(dev->dev.parent, + (dma_addr_t)SWAP32(rbd->b_data), + PKT_BUF_SZ, DMA_FROM_DEVICE); + /* Get fresh skbuff to replace filled one. */ + newskb = netdev_alloc_skb_ip_align(dev, + PKT_BUF_SZ); + if (newskb == NULL) { + skb = NULL; /* drop pkt */ + goto memory_squeeze; + } + + /* Pass up the skb already on the Rx ring. */ + skb_put(skb, pkt_len); + rx_in_place = 1; + rbd->skb = newskb; + dma_addr = dma_map_single(dev->dev.parent, + newskb->data, + PKT_BUF_SZ, + DMA_FROM_DEVICE); + rbd->v_data = newskb->data; + rbd->b_data = SWAP32(dma_addr); + DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); + } else { + skb = netdev_alloc_skb_ip_align(dev, pkt_len); + } +memory_squeeze: + if (skb == NULL) { + /* XXX tulip.c can defer packets here!! */ + dev->stats.rx_dropped++; + } else { + if (!rx_in_place) { + /* 16 byte align the data fields */ + dma_sync_single_for_cpu(dev->dev.parent, + (dma_addr_t)SWAP32(rbd->b_data), + PKT_BUF_SZ, DMA_FROM_DEVICE); + memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len); + dma_sync_single_for_device(dev->dev.parent, + (dma_addr_t)SWAP32(rbd->b_data), + PKT_BUF_SZ, DMA_FROM_DEVICE); + } + skb->len = pkt_len; + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + } else { + DEB(DEB_ERRORS, printk(KERN_DEBUG + "%s: Error, rfd.stat = 0x%04x\n", + dev->name, rfd->stat)); + dev->stats.rx_errors++; + if (rfd->stat & SWAP16(0x0100)) + dev->stats.collisions++; + if (rfd->stat & SWAP16(0x8000)) + dev->stats.rx_length_errors++; + if (rfd->stat & SWAP16(0x0001)) + dev->stats.rx_over_errors++; + if (rfd->stat & SWAP16(0x0002)) + dev->stats.rx_fifo_errors++; + if (rfd->stat & SWAP16(0x0004)) + dev->stats.rx_frame_errors++; + if (rfd->stat & SWAP16(0x0008)) + dev->stats.rx_crc_errors++; + if (rfd->stat & SWAP16(0x0010)) + dev->stats.rx_length_errors++; + } + + /* Clear the buffer descriptor count and EOF + F flags */ + + if (rbd != NULL && (rbd->count & SWAP16(0x4000))) { + rbd->count = 0; + lp->rbd_head = rbd->v_next; + DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); + } + + /* Tidy the frame descriptor, marking it as end of list */ + + rfd->rbd = I596_NULL; + rfd->stat = 0; + rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX); + rfd->count = 0; + + /* Update record of next frame descriptor to process */ + + lp->dma->scb.rfd = rfd->b_next; + lp->rfd_head = rfd->v_next; + DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd)); + + /* Remove end-of-list from old end descriptor */ + + rfd->v_prev->cmd = SWAP16(CMD_FLEX); + DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd)); + rfd = lp->rfd_head; + DMA_INV(dev, rfd, sizeof(struct i596_rfd)); + } + + DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames)); + + return 0; +} + + +static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) +{ + struct i596_cmd *ptr; + + while (lp->cmd_head != NULL) { + ptr = lp->cmd_head; + lp->cmd_head = ptr->v_next; + lp->cmd_backlog--; + + switch (SWAP16(ptr->command) & 0x7) { + case CmdTx: + { + struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; + struct sk_buff *skb = tx_cmd->skb; + dma_unmap_single(dev->dev.parent, + tx_cmd->dma_addr, + skb->len, DMA_TO_DEVICE); + + dev_kfree_skb(skb); + + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; + + ptr->v_next = NULL; + ptr->b_next = I596_NULL; + tx_cmd->cmd.command = 0; /* Mark as free */ + break; + } + default: + ptr->v_next = NULL; + ptr->b_next = I596_NULL; + } + DMA_WBACK_INV(dev, ptr, sizeof(struct i596_cmd)); + } + + wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out"); + lp->dma->scb.cmd = I596_NULL; + DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb)); +} + + +static inline void i596_reset(struct net_device *dev, struct i596_private *lp) +{ + unsigned long flags; + + DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n")); + + spin_lock_irqsave (&lp->lock, flags); + + wait_cmd(dev, lp->dma, 100, "i596_reset timed out"); + + netif_stop_queue(dev); + + /* FIXME: this command might cause an lpmc */ + lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT); + DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb)); + ca(dev); + + /* wait for shutdown */ + wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out"); + spin_unlock_irqrestore (&lp->lock, flags); + + i596_cleanup_cmd(dev, lp); + i596_rx(dev); + + netif_start_queue(dev); + init_i596_mem(dev); +} + + +static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) +{ + struct i596_private *lp = netdev_priv(dev); + struct i596_dma *dma = lp->dma; + unsigned long flags; + + DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n", + lp->cmd_head)); + + cmd->status = 0; + cmd->command |= SWAP16(CMD_EOL | CMD_INTR); + cmd->v_next = NULL; + cmd->b_next = I596_NULL; + DMA_WBACK(dev, cmd, sizeof(struct i596_cmd)); + + spin_lock_irqsave (&lp->lock, flags); + + if (lp->cmd_head != NULL) { + lp->cmd_tail->v_next = cmd; + lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status)); + DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd)); + } else { + lp->cmd_head = cmd; + wait_cmd(dev, dma, 100, "i596_add_cmd timed out"); + dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status)); + dma->scb.command = SWAP16(CUC_START); + DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb)); + ca(dev); + } + lp->cmd_tail = cmd; + lp->cmd_backlog++; + + spin_unlock_irqrestore (&lp->lock, flags); + + if (lp->cmd_backlog > max_cmd_backlog) { + unsigned long tickssofar = jiffies - lp->last_cmd; + + if (tickssofar < ticks_limit) + return; + + printk(KERN_ERR + "%s: command unit timed out, status resetting.\n", + dev->name); +#if 1 + i596_reset(dev, lp); +#endif + } +} + +static int i596_open(struct net_device *dev) +{ + DEB(DEB_OPEN, printk(KERN_DEBUG + "%s: i596_open() irq %d.\n", dev->name, dev->irq)); + + if (init_rx_bufs(dev)) { + printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name); + return -EAGAIN; + } + if (init_i596_mem(dev)) { + printk(KERN_ERR "%s: Failed to init memory\n", dev->name); + goto out_remove_rx_bufs; + } + netif_start_queue(dev); + + return 0; + +out_remove_rx_bufs: + remove_rx_bufs(dev); + return -EAGAIN; +} + +static void i596_tx_timeout (struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + + /* Transmitter timeout, serious problems. */ + DEB(DEB_ERRORS, printk(KERN_DEBUG + "%s: transmit timed out, status resetting.\n", + dev->name)); + + dev->stats.tx_errors++; + + /* Try to restart the adaptor */ + if (lp->last_restart == dev->stats.tx_packets) { + DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n")); + /* Shutdown and restart */ + i596_reset (dev, lp); + } else { + /* Issue a channel attention signal */ + DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n")); + lp->dma->scb.command = SWAP16(CUC_START | RX_START); + DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb)); + ca (dev); + lp->last_restart = dev->stats.tx_packets; + } + + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue (dev); +} + + +static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + struct tx_cmd *tx_cmd; + struct i596_tbd *tbd; + short length = skb->len; + + DEB(DEB_STARTTX, printk(KERN_DEBUG + "%s: i596_start_xmit(%x,%p) called\n", + dev->name, skb->len, skb->data)); + + if (length < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + length = ETH_ZLEN; + } + + netif_stop_queue(dev); + + tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd; + tbd = lp->dma->tbds + lp->next_tx_cmd; + + if (tx_cmd->cmd.command) { + DEB(DEB_ERRORS, printk(KERN_DEBUG + "%s: xmit ring full, dropping packet.\n", + dev->name)); + dev->stats.tx_dropped++; + + dev_kfree_skb_any(skb); + } else { + if (++lp->next_tx_cmd == TX_RING_SIZE) + lp->next_tx_cmd = 0; + tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd)); + tbd->next = I596_NULL; + + tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx); + tx_cmd->skb = skb; + + tx_cmd->pad = 0; + tx_cmd->size = 0; + tbd->pad = 0; + tbd->size = SWAP16(EOF | length); + + tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data, + skb->len, DMA_TO_DEVICE); + tbd->data = SWAP32(tx_cmd->dma_addr); + + DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued")); + DMA_WBACK_INV(dev, tx_cmd, sizeof(struct tx_cmd)); + DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd)); + i596_add_cmd(dev, &tx_cmd->cmd); + + dev->stats.tx_packets++; + dev->stats.tx_bytes += length; + } + + netif_start_queue(dev); + + return NETDEV_TX_OK; +} + +static void print_eth(unsigned char *add, char *str) +{ + printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n", + add, add + 6, add, add[12], add[13], str); +} +static const struct net_device_ops i596_netdev_ops = { + .ndo_open = i596_open, + .ndo_stop = i596_close, + .ndo_start_xmit = i596_start_xmit, + .ndo_set_rx_mode = set_multicast_list, + .ndo_tx_timeout = i596_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = i596_poll_controller, +#endif +}; + +static int i82596_probe(struct net_device *dev) +{ + int i; + struct i596_private *lp = netdev_priv(dev); + struct i596_dma *dma; + + /* This lot is ensure things have been cache line aligned. */ + BUILD_BUG_ON(sizeof(struct i596_rfd) != 32); + BUILD_BUG_ON(sizeof(struct i596_rbd) & 31); + BUILD_BUG_ON(sizeof(struct tx_cmd) & 31); + BUILD_BUG_ON(sizeof(struct i596_tbd) != 32); +#ifndef __LP64__ + BUILD_BUG_ON(sizeof(struct i596_dma) > 4096); +#endif + + if (!dev->base_addr || !dev->irq) + return -ENODEV; + + dma = (struct i596_dma *) DMA_ALLOC(dev->dev.parent, + sizeof(struct i596_dma), &lp->dma_addr, GFP_KERNEL); + if (!dma) { + printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__); + return -ENOMEM; + } + + dev->netdev_ops = &i596_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + memset(dma, 0, sizeof(struct i596_dma)); + lp->dma = dma; + + dma->scb.command = 0; + dma->scb.cmd = I596_NULL; + dma->scb.rfd = I596_NULL; + spin_lock_init(&lp->lock); + + DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma)); + + i = register_netdev(dev); + if (i) { + DMA_FREE(dev->dev.parent, sizeof(struct i596_dma), + (void *)dma, lp->dma_addr); + return i; + } + + DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n", + dev->name, dev->base_addr, dev->dev_addr, + dev->irq)); + DEB(DEB_INIT, printk(KERN_INFO + "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n", + dev->name, dma, (int)sizeof(struct i596_dma), + &dma->scb)); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void i596_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + i596_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +static irqreturn_t i596_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct i596_private *lp; + struct i596_dma *dma; + unsigned short status, ack_cmd = 0; + + lp = netdev_priv(dev); + dma = lp->dma; + + spin_lock (&lp->lock); + + wait_cmd(dev, dma, 100, "i596 interrupt, timeout"); + status = SWAP16(dma->scb.status); + + DEB(DEB_INTS, printk(KERN_DEBUG + "%s: i596 interrupt, IRQ %d, status %4.4x.\n", + dev->name, dev->irq, status)); + + ack_cmd = status & 0xf000; + + if (!ack_cmd) { + DEB(DEB_ERRORS, printk(KERN_DEBUG + "%s: interrupt with no events\n", + dev->name)); + spin_unlock (&lp->lock); + return IRQ_NONE; + } + + if ((status & 0x8000) || (status & 0x2000)) { + struct i596_cmd *ptr; + + if ((status & 0x8000)) + DEB(DEB_INTS, + printk(KERN_DEBUG + "%s: i596 interrupt completed command.\n", + dev->name)); + if ((status & 0x2000)) + DEB(DEB_INTS, + printk(KERN_DEBUG + "%s: i596 interrupt command unit inactive %x.\n", + dev->name, status & 0x0700)); + + while (lp->cmd_head != NULL) { + DMA_INV(dev, lp->cmd_head, sizeof(struct i596_cmd)); + if (!(lp->cmd_head->status & SWAP16(STAT_C))) + break; + + ptr = lp->cmd_head; + + DEB(DEB_STATUS, + printk(KERN_DEBUG + "cmd_head->status = %04x, ->command = %04x\n", + SWAP16(lp->cmd_head->status), + SWAP16(lp->cmd_head->command))); + lp->cmd_head = ptr->v_next; + lp->cmd_backlog--; + + switch (SWAP16(ptr->command) & 0x7) { + case CmdTx: + { + struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; + struct sk_buff *skb = tx_cmd->skb; + + if (ptr->status & SWAP16(STAT_OK)) { + DEB(DEB_TXADDR, + print_eth(skb->data, "tx-done")); + } else { + dev->stats.tx_errors++; + if (ptr->status & SWAP16(0x0020)) + dev->stats.collisions++; + if (!(ptr->status & SWAP16(0x0040))) + dev->stats.tx_heartbeat_errors++; + if (ptr->status & SWAP16(0x0400)) + dev->stats.tx_carrier_errors++; + if (ptr->status & SWAP16(0x0800)) + dev->stats.collisions++; + if (ptr->status & SWAP16(0x1000)) + dev->stats.tx_aborted_errors++; + } + dma_unmap_single(dev->dev.parent, + tx_cmd->dma_addr, + skb->len, DMA_TO_DEVICE); + dev_kfree_skb_irq(skb); + + tx_cmd->cmd.command = 0; /* Mark free */ + break; + } + case CmdTDR: + { + unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status); + + if (status & 0x8000) { + DEB(DEB_ANY, + printk(KERN_DEBUG "%s: link ok.\n", + dev->name)); + } else { + if (status & 0x4000) + printk(KERN_ERR + "%s: Transceiver problem.\n", + dev->name); + if (status & 0x2000) + printk(KERN_ERR + "%s: Termination problem.\n", + dev->name); + if (status & 0x1000) + printk(KERN_ERR + "%s: Short circuit.\n", + dev->name); + + DEB(DEB_TDR, + printk(KERN_DEBUG "%s: Time %d.\n", + dev->name, status & 0x07ff)); + } + break; + } + case CmdConfigure: + /* + * Zap command so set_multicast_list() know + * it is free + */ + ptr->command = 0; + break; + } + ptr->v_next = NULL; + ptr->b_next = I596_NULL; + DMA_WBACK(dev, ptr, sizeof(struct i596_cmd)); + lp->last_cmd = jiffies; + } + + /* This mess is arranging that only the last of any outstanding + * commands has the interrupt bit set. Should probably really + * only add to the cmd queue when the CU is stopped. + */ + ptr = lp->cmd_head; + while ((ptr != NULL) && (ptr != lp->cmd_tail)) { + struct i596_cmd *prev = ptr; + + ptr->command &= SWAP16(0x1fff); + ptr = ptr->v_next; + DMA_WBACK_INV(dev, prev, sizeof(struct i596_cmd)); + } + + if (lp->cmd_head != NULL) + ack_cmd |= CUC_START; + dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status)); + DMA_WBACK_INV(dev, &dma->scb, sizeof(struct i596_scb)); + } + if ((status & 0x1000) || (status & 0x4000)) { + if ((status & 0x4000)) + DEB(DEB_INTS, + printk(KERN_DEBUG + "%s: i596 interrupt received a frame.\n", + dev->name)); + i596_rx(dev); + /* Only RX_START if stopped - RGH 07-07-96 */ + if (status & 0x1000) { + if (netif_running(dev)) { + DEB(DEB_ERRORS, + printk(KERN_DEBUG + "%s: i596 interrupt receive unit inactive, status 0x%x\n", + dev->name, status)); + ack_cmd |= RX_START; + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; + rebuild_rx_bufs(dev); + } + } + } + wait_cmd(dev, dma, 100, "i596 interrupt, timeout"); + dma->scb.command = SWAP16(ack_cmd); + DMA_WBACK(dev, &dma->scb, sizeof(struct i596_scb)); + + /* DANGER: I suspect that some kind of interrupt + acknowledgement aside from acking the 82596 might be needed + here... but it's running acceptably without */ + + ca(dev); + + wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout"); + DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name)); + + spin_unlock (&lp->lock); + return IRQ_HANDLED; +} + +static int i596_close(struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + unsigned long flags; + + netif_stop_queue(dev); + + DEB(DEB_INIT, + printk(KERN_DEBUG + "%s: Shutting down ethercard, status was %4.4x.\n", + dev->name, SWAP16(lp->dma->scb.status))); + + spin_lock_irqsave(&lp->lock, flags); + + wait_cmd(dev, lp->dma, 100, "close1 timed out"); + lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT); + DMA_WBACK(dev, &lp->dma->scb, sizeof(struct i596_scb)); + + ca(dev); + + wait_cmd(dev, lp->dma, 100, "close2 timed out"); + spin_unlock_irqrestore(&lp->lock, flags); + DEB(DEB_STRUCT, i596_display_data(dev)); + i596_cleanup_cmd(dev, lp); + + free_irq(dev->irq, dev); + remove_rx_bufs(dev); + + return 0; +} + +/* + * Set or clear the multicast filter for this adaptor. + */ + +static void set_multicast_list(struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + struct i596_dma *dma = lp->dma; + int config = 0, cnt; + + DEB(DEB_MULTI, + printk(KERN_DEBUG + "%s: set multicast list, %d entries, promisc %s, allmulti %s\n", + dev->name, netdev_mc_count(dev), + dev->flags & IFF_PROMISC ? "ON" : "OFF", + dev->flags & IFF_ALLMULTI ? "ON" : "OFF")); + + if ((dev->flags & IFF_PROMISC) && + !(dma->cf_cmd.i596_config[8] & 0x01)) { + dma->cf_cmd.i596_config[8] |= 0x01; + config = 1; + } + if (!(dev->flags & IFF_PROMISC) && + (dma->cf_cmd.i596_config[8] & 0x01)) { + dma->cf_cmd.i596_config[8] &= ~0x01; + config = 1; + } + if ((dev->flags & IFF_ALLMULTI) && + (dma->cf_cmd.i596_config[11] & 0x20)) { + dma->cf_cmd.i596_config[11] &= ~0x20; + config = 1; + } + if (!(dev->flags & IFF_ALLMULTI) && + !(dma->cf_cmd.i596_config[11] & 0x20)) { + dma->cf_cmd.i596_config[11] |= 0x20; + config = 1; + } + if (config) { + if (dma->cf_cmd.cmd.command) + printk(KERN_INFO + "%s: config change request already queued\n", + dev->name); + else { + dma->cf_cmd.cmd.command = SWAP16(CmdConfigure); + DMA_WBACK_INV(dev, &dma->cf_cmd, sizeof(struct cf_cmd)); + i596_add_cmd(dev, &dma->cf_cmd.cmd); + } + } + + cnt = netdev_mc_count(dev); + if (cnt > MAX_MC_CNT) { + cnt = MAX_MC_CNT; + printk(KERN_NOTICE "%s: Only %d multicast addresses supported", + dev->name, cnt); + } + + if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + unsigned char *cp; + struct mc_cmd *cmd; + + cmd = &dma->mc_cmd; + cmd->cmd.command = SWAP16(CmdMulticastList); + cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6); + cp = cmd->mc_addrs; + netdev_for_each_mc_addr(ha, dev) { + if (!cnt--) + break; + memcpy(cp, ha->addr, ETH_ALEN); + if (i596_debug > 1) + DEB(DEB_MULTI, + printk(KERN_DEBUG + "%s: Adding address %pM\n", + dev->name, cp)); + cp += ETH_ALEN; + } + DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd)); + i596_add_cmd(dev, &cmd->cmd); + } +} diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c new file mode 100644 index 000000000..2af7f7734 --- /dev/null +++ b/drivers/net/ethernet/i825xx/sni_82596.c @@ -0,0 +1,184 @@ +/* + * sni_82596.c -- driver for intel 82596 ethernet controller, as + * used in older SNI RM machines + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SNI_82596_DRIVER_VERSION "SNI RM 82596 driver - Revision: 0.01" + +static const char sni_82596_string[] = "snirm_82596"; + +#define DMA_ALLOC dma_alloc_coherent +#define DMA_FREE dma_free_coherent +#define DMA_WBACK(priv, addr, len) do { } while (0) +#define DMA_INV(priv, addr, len) do { } while (0) +#define DMA_WBACK_INV(priv, addr, len) do { } while (0) + +#define SYSBUS 0x00004400 + +/* big endian CPU, 82596 little endian */ +#define SWAP32(x) cpu_to_le32((u32)(x)) +#define SWAP16(x) cpu_to_le16((u16)(x)) + +#define OPT_MPU_16BIT 0x01 + +#include "lib82596.c" + +MODULE_AUTHOR("Thomas Bogendoerfer"); +MODULE_DESCRIPTION("i82596 driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:snirm_82596"); +module_param(i596_debug, int, 0); +MODULE_PARM_DESC(i596_debug, "82596 debug mask"); + +static inline void ca(struct net_device *dev) +{ + struct i596_private *lp = netdev_priv(dev); + + writel(0, lp->ca); +} + + +static void mpu_port(struct net_device *dev, int c, dma_addr_t x) +{ + struct i596_private *lp = netdev_priv(dev); + + u32 v = (u32) (c) | (u32) (x); + + if (lp->options & OPT_MPU_16BIT) { + writew(v & 0xffff, lp->mpu_port); + wmb(); /* order writes to MPU port */ + udelay(1); + writew(v >> 16, lp->mpu_port); + } else { + writel(v, lp->mpu_port); + wmb(); /* order writes to MPU port */ + udelay(1); + writel(v, lp->mpu_port); + } +} + + +static int sni_82596_probe(struct platform_device *dev) +{ + struct net_device *netdevice; + struct i596_private *lp; + struct resource *res, *ca, *idprom, *options; + int retval = -ENOMEM; + void __iomem *mpu_addr; + void __iomem *ca_addr; + u8 __iomem *eth_addr; + + res = platform_get_resource(dev, IORESOURCE_MEM, 0); + ca = platform_get_resource(dev, IORESOURCE_MEM, 1); + options = platform_get_resource(dev, 0, 0); + idprom = platform_get_resource(dev, IORESOURCE_MEM, 2); + if (!res || !ca || !options || !idprom) + return -ENODEV; + mpu_addr = ioremap_nocache(res->start, 4); + if (!mpu_addr) + return -ENOMEM; + ca_addr = ioremap_nocache(ca->start, 4); + if (!ca_addr) + goto probe_failed_free_mpu; + + printk(KERN_INFO "Found i82596 at 0x%x\n", res->start); + + netdevice = alloc_etherdev(sizeof(struct i596_private)); + if (!netdevice) + goto probe_failed_free_ca; + + SET_NETDEV_DEV(netdevice, &dev->dev); + platform_set_drvdata (dev, netdevice); + + netdevice->base_addr = res->start; + netdevice->irq = platform_get_irq(dev, 0); + + eth_addr = ioremap_nocache(idprom->start, 0x10); + if (!eth_addr) + goto probe_failed; + + /* someone seems to like messed up stuff */ + netdevice->dev_addr[0] = readb(eth_addr + 0x0b); + netdevice->dev_addr[1] = readb(eth_addr + 0x0a); + netdevice->dev_addr[2] = readb(eth_addr + 0x09); + netdevice->dev_addr[3] = readb(eth_addr + 0x08); + netdevice->dev_addr[4] = readb(eth_addr + 0x07); + netdevice->dev_addr[5] = readb(eth_addr + 0x06); + iounmap(eth_addr); + + if (!netdevice->irq) { + printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n", + __FILE__, netdevice->base_addr); + goto probe_failed; + } + + lp = netdev_priv(netdevice); + lp->options = options->flags & IORESOURCE_BITS; + lp->ca = ca_addr; + lp->mpu_port = mpu_addr; + + retval = i82596_probe(netdevice); + if (retval == 0) + return 0; + +probe_failed: + free_netdev(netdevice); +probe_failed_free_ca: + iounmap(ca_addr); +probe_failed_free_mpu: + iounmap(mpu_addr); + return retval; +} + +static int sni_82596_driver_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct i596_private *lp = netdev_priv(dev); + + unregister_netdev(dev); + DMA_FREE(dev->dev.parent, sizeof(struct i596_private), + lp->dma, lp->dma_addr); + iounmap(lp->ca); + iounmap(lp->mpu_port); + free_netdev (dev); + return 0; +} + +static struct platform_driver sni_82596_driver = { + .probe = sni_82596_probe, + .remove = sni_82596_driver_remove, + .driver = { + .name = sni_82596_string, + }, +}; + +static int sni_82596_init(void) +{ + printk(KERN_INFO SNI_82596_DRIVER_VERSION "\n"); + return platform_driver_register(&sni_82596_driver); +} + + +static void __exit sni_82596_exit(void) +{ + platform_driver_unregister(&sni_82596_driver); +} + +module_init(sni_82596_init); +module_exit(sni_82596_exit); diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c new file mode 100644 index 000000000..353f57f67 --- /dev/null +++ b/drivers/net/ethernet/i825xx/sun3_82586.c @@ -0,0 +1,1188 @@ +/* + * Sun3 i82586 Ethernet driver + * + * Cloned from ni52.c for the Sun3 by Sam Creasey (sammy@sammy.net) + * + * Original copyright follows: + * -------------------------- + * + * net-3-driver for the NI5210 card (i82586 Ethernet chip) + * + * This is an extension to the Linux operating system, and is covered by the + * same Gnu Public License that covers that work. + * + * Alphacode 0.82 (96/09/29) for Linux 2.0.0 (or later) + * Copyrights (c) 1994,1995,1996 by M.Hipp (hippm@informatik.uni-tuebingen.de) + * -------------------------- + * + * Consult ni52.c for further notes from the original driver. + * + * This incarnation currently supports the OBIO version of the i82586 chip + * used in certain sun3 models. It should be fairly doable to expand this + * to support VME if I should every acquire such a board. + * + */ + +static int debuglevel = 0; /* debug-printk 0: off 1: a few 2: more */ +static int automatic_resume = 0; /* experimental .. better should be zero */ +static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */ +static int fifo=0x8; /* don't change */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "sun3_82586.h" + +#define DRV_NAME "sun3_82586" + +#define DEBUG /* debug on */ +#define SYSBUSVAL 0 /* 16 Bit */ +#define SUN3_82586_TOTAL_SIZE PAGE_SIZE + +#define sun3_attn586() {*(volatile unsigned char *)(dev->base_addr) |= IEOB_ATTEN; *(volatile unsigned char *)(dev->base_addr) &= ~IEOB_ATTEN;} +#define sun3_reset586() {*(volatile unsigned char *)(dev->base_addr) = 0; udelay(100); *(volatile unsigned char *)(dev->base_addr) = IEOB_NORSET;} +#define sun3_disint() {*(volatile unsigned char *)(dev->base_addr) &= ~IEOB_IENAB;} +#define sun3_enaint() {*(volatile unsigned char *)(dev->base_addr) |= IEOB_IENAB;} +#define sun3_active() {*(volatile unsigned char *)(dev->base_addr) |= (IEOB_IENAB|IEOB_ONAIR|IEOB_NORSET);} + +#define make32(ptr16) (p->memtop + (swab16((unsigned short) (ptr16))) ) +#define make24(ptr32) (char *)swab32(( ((unsigned long) (ptr32)) - p->base)) +#define make16(ptr32) (swab16((unsigned short) ((unsigned long)(ptr32) - (unsigned long) p->memtop ))) + +/******************* how to calculate the buffers ***************************** + + * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works + * --------------- in a different (more stable?) mode. Only in this mode it's + * possible to configure the driver with 'NO_NOPCOMMANDS' + +sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8; +sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT +sizeof(rfd) = 24; sizeof(rbd) = 12; +sizeof(tbd) = 8; sizeof(transmit_cmd) = 16; +sizeof(nop_cmd) = 8; + + * if you don't know the driver, better do not change these values: */ + +#define RECV_BUFF_SIZE 1536 /* slightly oversized */ +#define XMIT_BUFF_SIZE 1536 /* slightly oversized */ +#define NUM_XMIT_BUFFS 1 /* config for 32K shmem */ +#define NUM_RECV_BUFFS_8 4 /* config for 32K shared mem */ +#define NUM_RECV_BUFFS_16 9 /* config for 32K shared mem */ +#define NUM_RECV_BUFFS_32 16 /* config for 32K shared mem */ +#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */ + +/**************************************************************************/ + +/* different DELAYs */ +#define DELAY(x) mdelay(32 * x); +#define DELAY_16(); { udelay(16); } +#define DELAY_18(); { udelay(4); } + +/* wait for command with timeout: */ +#define WAIT_4_SCB_CMD() \ +{ int i; \ + for(i=0;i<16384;i++) { \ + if(!p->scb->cmd_cuc) break; \ + DELAY_18(); \ + if(i == 16383) { \ + printk("%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_cuc,p->scb->cus); \ + if(!p->reseted) { p->reseted = 1; sun3_reset586(); } } } } + +#define WAIT_4_SCB_CMD_RUC() { int i; \ + for(i=0;i<16384;i++) { \ + if(!p->scb->cmd_ruc) break; \ + DELAY_18(); \ + if(i == 16383) { \ + printk("%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_ruc,p->scb->rus); \ + if(!p->reseted) { p->reseted = 1; sun3_reset586(); } } } } + +#define WAIT_4_STAT_COMPL(addr) { int i; \ + for(i=0;i<32767;i++) { \ + if(swab16((addr)->cmd_status) & STAT_COMPL) break; \ + DELAY_16(); DELAY_16(); } } + +static int sun3_82586_probe1(struct net_device *dev,int ioaddr); +static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id); +static int sun3_82586_open(struct net_device *dev); +static int sun3_82586_close(struct net_device *dev); +static int sun3_82586_send_packet(struct sk_buff *,struct net_device *); +static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); +static void sun3_82586_timeout(struct net_device *dev); +#if 0 +static void sun3_82586_dump(struct net_device *,void *); +#endif + +/* helper-functions */ +static int init586(struct net_device *dev); +static int check586(struct net_device *dev,char *where,unsigned size); +static void alloc586(struct net_device *dev); +static void startrecv586(struct net_device *dev); +static void *alloc_rfa(struct net_device *dev,void *ptr); +static void sun3_82586_rcv_int(struct net_device *dev); +static void sun3_82586_xmt_int(struct net_device *dev); +static void sun3_82586_rnr_int(struct net_device *dev); + +struct priv +{ + unsigned long base; + char *memtop; + long int lock; + int reseted; + volatile struct rfd_struct *rfd_last,*rfd_top,*rfd_first; + volatile struct scp_struct *scp; /* volatile is important */ + volatile struct iscp_struct *iscp; /* volatile is important */ + volatile struct scb_struct *scb; /* volatile is important */ + volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS]; + volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS]; +#if (NUM_XMIT_BUFFS == 1) + volatile struct nop_cmd_struct *nop_cmds[2]; +#else + volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS]; +#endif + volatile int nop_point,num_recv_buffs; + volatile char *xmit_cbuffs[NUM_XMIT_BUFFS]; + volatile int xmit_count,xmit_last; +}; + +/********************************************** + * close device + */ +static int sun3_82586_close(struct net_device *dev) +{ + free_irq(dev->irq, dev); + + sun3_reset586(); /* the hard way to stop the receiver */ + + netif_stop_queue(dev); + + return 0; +} + +/********************************************** + * open device + */ +static int sun3_82586_open(struct net_device *dev) +{ + int ret; + + sun3_disint(); + alloc586(dev); + init586(dev); + startrecv586(dev); + sun3_enaint(); + + ret = request_irq(dev->irq, sun3_82586_interrupt,0,dev->name,dev); + if (ret) + { + sun3_reset586(); + return ret; + } + + netif_start_queue(dev); + + return 0; /* most done by init */ +} + +/********************************************** + * Check to see if there's an 82586 out there. + */ +static int check586(struct net_device *dev,char *where,unsigned size) +{ + struct priv pb; + struct priv *p = &pb; + char *iscp_addr; + int i; + + p->base = (unsigned long) dvma_btov(0); + p->memtop = (char *)dvma_btov((unsigned long)where); + p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS); + memset((char *)p->scp,0, sizeof(struct scp_struct)); + for(i=0;iscp)[i]) + return 0; + p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */ + if(p->scp->sysbus != SYSBUSVAL) + return 0; + + iscp_addr = (char *)dvma_btov((unsigned long)where); + + p->iscp = (struct iscp_struct *) iscp_addr; + memset((char *)p->iscp,0, sizeof(struct iscp_struct)); + + p->scp->iscp = make24(p->iscp); + p->iscp->busy = 1; + + sun3_reset586(); + sun3_attn586(); + DELAY(1); /* wait a while... */ + + if(p->iscp->busy) /* i82586 clears 'busy' after successful init */ + return 0; + + return 1; +} + +/****************************************************************** + * set iscp at the right place, called by sun3_82586_probe1 and open586. + */ +static void alloc586(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); + + sun3_reset586(); + DELAY(1); + + p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS); + p->iscp = (struct iscp_struct *) dvma_btov(dev->mem_start); + p->scb = (struct scb_struct *) ((char *)p->iscp + sizeof(struct iscp_struct)); + + memset((char *) p->iscp,0,sizeof(struct iscp_struct)); + memset((char *) p->scp ,0,sizeof(struct scp_struct)); + + p->scp->iscp = make24(p->iscp); + p->scp->sysbus = SYSBUSVAL; + p->iscp->scb_offset = make16(p->scb); + p->iscp->scb_base = make24(dvma_btov(dev->mem_start)); + + p->iscp->busy = 1; + sun3_reset586(); + sun3_attn586(); + + DELAY(1); + + if(p->iscp->busy) + printk("%s: Init-Problems (alloc).\n",dev->name); + + p->reseted = 0; + + memset((char *)p->scb,0,sizeof(struct scb_struct)); +} + +struct net_device * __init sun3_82586_probe(int unit) +{ + struct net_device *dev; + unsigned long ioaddr; + static int found = 0; + int err = -ENOMEM; + + /* check that this machine has an onboard 82586 */ + switch(idprom->id_machtype) { + case SM_SUN3|SM_3_160: + case SM_SUN3|SM_3_260: + /* these machines have 82586 */ + break; + + default: + return ERR_PTR(-ENODEV); + } + + if (found) + return ERR_PTR(-ENODEV); + + ioaddr = (unsigned long)ioremap(IE_OBIO, SUN3_82586_TOTAL_SIZE); + if (!ioaddr) + return ERR_PTR(-ENOMEM); + found = 1; + + dev = alloc_etherdev(sizeof(struct priv)); + if (!dev) + goto out; + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + } + + dev->irq = IE_IRQ; + dev->base_addr = ioaddr; + err = sun3_82586_probe1(dev, ioaddr); + if (err) + goto out1; + err = register_netdev(dev); + if (err) + goto out2; + return dev; + +out2: + release_region(ioaddr, SUN3_82586_TOTAL_SIZE); +out1: + free_netdev(dev); +out: + iounmap((void __iomem *)ioaddr); + return ERR_PTR(err); +} + +static const struct net_device_ops sun3_82586_netdev_ops = { + .ndo_open = sun3_82586_open, + .ndo_stop = sun3_82586_close, + .ndo_start_xmit = sun3_82586_send_packet, + .ndo_set_rx_mode = set_multicast_list, + .ndo_tx_timeout = sun3_82586_timeout, + .ndo_get_stats = sun3_82586_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr) +{ + int i, size, retval; + + if (!request_region(ioaddr, SUN3_82586_TOTAL_SIZE, DRV_NAME)) + return -EBUSY; + + /* copy in the ethernet address from the prom */ + for(i = 0; i < 6 ; i++) + dev->dev_addr[i] = idprom->id_ethaddr[i]; + + printk("%s: SUN3 Intel 82586 found at %lx, ",dev->name,dev->base_addr); + + /* + * check (or search) IO-Memory, 32K + */ + size = 0x8000; + + dev->mem_start = (unsigned long)dvma_malloc_align(0x8000, 0x1000); + dev->mem_end = dev->mem_start + size; + + if(size != 0x2000 && size != 0x4000 && size != 0x8000) { + printk("\n%s: Illegal memory size %d. Allowed is 0x2000 or 0x4000 or 0x8000 bytes.\n",dev->name,size); + retval = -ENODEV; + goto out; + } + if(!check586(dev,(char *) dev->mem_start,size)) { + printk("?memcheck, Can't find memory at 0x%lx with size %d!\n",dev->mem_start,size); + retval = -ENODEV; + goto out; + } + + ((struct priv *)netdev_priv(dev))->memtop = + (char *)dvma_btov(dev->mem_start); + ((struct priv *)netdev_priv(dev))->base = (unsigned long) dvma_btov(0); + alloc586(dev); + + /* set number of receive-buffs according to memsize */ + if(size == 0x2000) + ((struct priv *)netdev_priv(dev))->num_recv_buffs = + NUM_RECV_BUFFS_8; + else if(size == 0x4000) + ((struct priv *)netdev_priv(dev))->num_recv_buffs = + NUM_RECV_BUFFS_16; + else + ((struct priv *)netdev_priv(dev))->num_recv_buffs = + NUM_RECV_BUFFS_32; + + printk("Memaddr: 0x%lx, Memsize: %d, IRQ %d\n",dev->mem_start,size, dev->irq); + + dev->netdev_ops = &sun3_82586_netdev_ops; + dev->watchdog_timeo = HZ/20; + + dev->if_port = 0; + return 0; +out: + release_region(ioaddr, SUN3_82586_TOTAL_SIZE); + return retval; +} + + +static int init586(struct net_device *dev) +{ + void *ptr; + int i,result=0; + struct priv *p = netdev_priv(dev); + volatile struct configure_cmd_struct *cfg_cmd; + volatile struct iasetup_cmd_struct *ias_cmd; + volatile struct tdr_cmd_struct *tdr_cmd; + volatile struct mcsetup_cmd_struct *mc_cmd; + struct netdev_hw_addr *ha; + int num_addrs=netdev_mc_count(dev); + + ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct)); + + cfg_cmd = (struct configure_cmd_struct *)ptr; /* configure-command */ + cfg_cmd->cmd_status = 0; + cfg_cmd->cmd_cmd = swab16(CMD_CONFIGURE | CMD_LAST); + cfg_cmd->cmd_link = 0xffff; + + cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */ + cfg_cmd->fifo = fifo; /* fifo-limit (8=tx:32/rx:64) */ + cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */ + cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */ + cfg_cmd->priority = 0x00; + cfg_cmd->ifs = 0x60; + cfg_cmd->time_low = 0x00; + cfg_cmd->time_high = 0xf2; + cfg_cmd->promisc = 0; + if(dev->flags & IFF_ALLMULTI) { + int len = ((char *) p->iscp - (char *) ptr - 8) / 6; + if(num_addrs > len) { + printk("%s: switching to promisc. mode\n",dev->name); + cfg_cmd->promisc = 1; + } + } + if(dev->flags&IFF_PROMISC) + cfg_cmd->promisc = 1; + cfg_cmd->carr_coll = 0x00; + + p->scb->cbl_offset = make16(cfg_cmd); + p->scb->cmd_ruc = 0; + + p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */ + sun3_attn586(); + + WAIT_4_STAT_COMPL(cfg_cmd); + + if((swab16(cfg_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != (STAT_COMPL|STAT_OK)) + { + printk("%s: configure command failed: %x\n",dev->name,swab16(cfg_cmd->cmd_status)); + return 1; + } + + /* + * individual address setup + */ + + ias_cmd = (struct iasetup_cmd_struct *)ptr; + + ias_cmd->cmd_status = 0; + ias_cmd->cmd_cmd = swab16(CMD_IASETUP | CMD_LAST); + ias_cmd->cmd_link = 0xffff; + + memcpy((char *)&ias_cmd->iaddr,(char *) dev->dev_addr,ETH_ALEN); + + p->scb->cbl_offset = make16(ias_cmd); + + p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */ + sun3_attn586(); + + WAIT_4_STAT_COMPL(ias_cmd); + + if((swab16(ias_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != (STAT_OK|STAT_COMPL)) { + printk("%s (82586): individual address setup command failed: %04x\n",dev->name,swab16(ias_cmd->cmd_status)); + return 1; + } + + /* + * TDR, wire check .. e.g. no resistor e.t.c + */ + + tdr_cmd = (struct tdr_cmd_struct *)ptr; + + tdr_cmd->cmd_status = 0; + tdr_cmd->cmd_cmd = swab16(CMD_TDR | CMD_LAST); + tdr_cmd->cmd_link = 0xffff; + tdr_cmd->status = 0; + + p->scb->cbl_offset = make16(tdr_cmd); + p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */ + sun3_attn586(); + + WAIT_4_STAT_COMPL(tdr_cmd); + + if(!(swab16(tdr_cmd->cmd_status) & STAT_COMPL)) + { + printk("%s: Problems while running the TDR.\n",dev->name); + } + else + { + DELAY_16(); /* wait for result */ + result = swab16(tdr_cmd->status); + + p->scb->cmd_cuc = p->scb->cus & STAT_MASK; + sun3_attn586(); /* ack the interrupts */ + + if(result & TDR_LNK_OK) + ; + else if(result & TDR_XCVR_PRB) + printk("%s: TDR: Transceiver problem. Check the cable(s)!\n",dev->name); + else if(result & TDR_ET_OPN) + printk("%s: TDR: No correct termination %d clocks away.\n",dev->name,result & TDR_TIMEMASK); + else if(result & TDR_ET_SRT) + { + if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */ + printk("%s: TDR: Detected a short circuit %d clocks away.\n",dev->name,result & TDR_TIMEMASK); + } + else + printk("%s: TDR: Unknown status %04x\n",dev->name,result); + } + + /* + * Multicast setup + */ + if(num_addrs && !(dev->flags & IFF_PROMISC) ) + { + mc_cmd = (struct mcsetup_cmd_struct *) ptr; + mc_cmd->cmd_status = 0; + mc_cmd->cmd_cmd = swab16(CMD_MCSETUP | CMD_LAST); + mc_cmd->cmd_link = 0xffff; + mc_cmd->mc_cnt = swab16(num_addrs * 6); + + i = 0; + netdev_for_each_mc_addr(ha, dev) + memcpy((char *) mc_cmd->mc_list[i++], + ha->addr, ETH_ALEN); + + p->scb->cbl_offset = make16(mc_cmd); + p->scb->cmd_cuc = CUC_START; + sun3_attn586(); + + WAIT_4_STAT_COMPL(mc_cmd); + + if( (swab16(mc_cmd->cmd_status) & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) ) + printk("%s: Can't apply multicast-address-list.\n",dev->name); + } + + /* + * alloc nop/xmit-cmds + */ +#if (NUM_XMIT_BUFFS == 1) + for(i=0;i<2;i++) + { + p->nop_cmds[i] = (struct nop_cmd_struct *)ptr; + p->nop_cmds[i]->cmd_cmd = swab16(CMD_NOP); + p->nop_cmds[i]->cmd_status = 0; + p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i])); + ptr = (char *) ptr + sizeof(struct nop_cmd_struct); + } +#else + for(i=0;inop_cmds[i] = (struct nop_cmd_struct *)ptr; + p->nop_cmds[i]->cmd_cmd = swab16(CMD_NOP); + p->nop_cmds[i]->cmd_status = 0; + p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i])); + ptr = (char *) ptr + sizeof(struct nop_cmd_struct); + } +#endif + + ptr = alloc_rfa(dev,ptr); /* init receive-frame-area */ + + /* + * alloc xmit-buffs / init xmit_cmds + */ + for(i=0;ixmit_cmds[i] = (struct transmit_cmd_struct *)ptr; /*transmit cmd/buff 0*/ + ptr = (char *) ptr + sizeof(struct transmit_cmd_struct); + p->xmit_cbuffs[i] = (char *)ptr; /* char-buffs */ + ptr = (char *) ptr + XMIT_BUFF_SIZE; + p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */ + ptr = (char *) ptr + sizeof(struct tbd_struct); + if(ptr > (void *)dev->mem_end) + { + printk("%s: not enough shared-mem for your configuration!\n",dev->name); + return 1; + } + memset((char *)(p->xmit_cmds[i]) ,0, sizeof(struct transmit_cmd_struct)); + memset((char *)(p->xmit_buffs[i]),0, sizeof(struct tbd_struct)); + p->xmit_cmds[i]->cmd_link = make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]); + p->xmit_cmds[i]->cmd_status = swab16(STAT_COMPL); + p->xmit_cmds[i]->cmd_cmd = swab16(CMD_XMIT | CMD_INT); + p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i])); + p->xmit_buffs[i]->next = 0xffff; + p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i])); + } + + p->xmit_count = 0; + p->xmit_last = 0; +#ifndef NO_NOPCOMMANDS + p->nop_point = 0; +#endif + + /* + * 'start transmitter' + */ +#ifndef NO_NOPCOMMANDS + p->scb->cbl_offset = make16(p->nop_cmds[0]); + p->scb->cmd_cuc = CUC_START; + sun3_attn586(); + WAIT_4_SCB_CMD(); +#else + p->xmit_cmds[0]->cmd_link = make16(p->xmit_cmds[0]); + p->xmit_cmds[0]->cmd_cmd = swab16(CMD_XMIT | CMD_SUSPEND | CMD_INT); +#endif + + /* + * ack. interrupts + */ + p->scb->cmd_cuc = p->scb->cus & STAT_MASK; + sun3_attn586(); + DELAY_16(); + + sun3_enaint(); + sun3_active(); + + return 0; +} + +/****************************************************** + * This is a helper routine for sun3_82586_rnr_int() and init586(). + * It sets up the Receive Frame Area (RFA). + */ + +static void *alloc_rfa(struct net_device *dev,void *ptr) +{ + volatile struct rfd_struct *rfd = (struct rfd_struct *)ptr; + volatile struct rbd_struct *rbd; + int i; + struct priv *p = netdev_priv(dev); + + memset((char *) rfd,0,sizeof(struct rfd_struct)*(p->num_recv_buffs+rfdadd)); + p->rfd_first = rfd; + + for(i = 0; i < (p->num_recv_buffs+rfdadd); i++) { + rfd[i].next = make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd) ); + rfd[i].rbd_offset = 0xffff; + } + rfd[p->num_recv_buffs-1+rfdadd].last = RFD_SUSP; /* RU suspend */ + + ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd) ); + + rbd = (struct rbd_struct *) ptr; + ptr = (void *) (rbd + p->num_recv_buffs); + + /* clr descriptors */ + memset((char *) rbd,0,sizeof(struct rbd_struct)*(p->num_recv_buffs)); + + for(i=0;inum_recv_buffs;i++) + { + rbd[i].next = make16((rbd + (i+1) % p->num_recv_buffs)); + rbd[i].size = swab16(RECV_BUFF_SIZE); + rbd[i].buffer = make24(ptr); + ptr = (char *) ptr + RECV_BUFF_SIZE; + } + + p->rfd_top = p->rfd_first; + p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd); + + p->scb->rfa_offset = make16(p->rfd_first); + p->rfd_first->rbd_offset = make16(rbd); + + return ptr; +} + + +/************************************************** + * Interrupt Handler ... + */ + +static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id) +{ + struct net_device *dev = dev_id; + unsigned short stat; + int cnt=0; + struct priv *p; + + if (!dev) { + printk ("sun3_82586-interrupt: irq %d for unknown device.\n",irq); + return IRQ_NONE; + } + p = netdev_priv(dev); + + if(debuglevel > 1) + printk("I"); + + WAIT_4_SCB_CMD(); /* wait for last command */ + + while((stat=p->scb->cus & STAT_MASK)) + { + p->scb->cmd_cuc = stat; + sun3_attn586(); + + if(stat & STAT_FR) /* received a frame */ + sun3_82586_rcv_int(dev); + + if(stat & STAT_RNR) /* RU went 'not ready' */ + { + printk("(R)"); + if(p->scb->rus & RU_SUSPEND) /* special case: RU_SUSPEND */ + { + WAIT_4_SCB_CMD(); + p->scb->cmd_ruc = RUC_RESUME; + sun3_attn586(); + WAIT_4_SCB_CMD_RUC(); + } + else + { + printk("%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->rus); + sun3_82586_rnr_int(dev); + } + } + + if(stat & STAT_CX) /* command with I-bit set complete */ + sun3_82586_xmt_int(dev); + +#ifndef NO_NOPCOMMANDS + if(stat & STAT_CNA) /* CU went 'not ready' */ + { + if(netif_running(dev)) + printk("%s: oops! CU has left active state. stat: %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->cus); + } +#endif + + if(debuglevel > 1) + printk("%d",cnt++); + + WAIT_4_SCB_CMD(); /* wait for ack. (sun3_82586_xmt_int can be faster than ack!!) */ + if(p->scb->cmd_cuc) /* timed out? */ + { + printk("%s: Acknowledge timed out.\n",dev->name); + sun3_disint(); + break; + } + } + + if(debuglevel > 1) + printk("i"); + return IRQ_HANDLED; +} + +/******************************************************* + * receive-interrupt + */ + +static void sun3_82586_rcv_int(struct net_device *dev) +{ + int status,cnt=0; + unsigned short totlen; + struct sk_buff *skb; + struct rbd_struct *rbd; + struct priv *p = netdev_priv(dev); + + if(debuglevel > 0) + printk("R"); + + for(;(status = p->rfd_top->stat_high) & RFD_COMPL;) + { + rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset); + + if(status & RFD_OK) /* frame received without error? */ + { + if( (totlen = swab16(rbd->status)) & RBD_LAST) /* the first and the last buffer? */ + { + totlen &= RBD_MASK; /* length of this frame */ + rbd->status = 0; + skb = netdev_alloc_skb(dev, totlen + 2); + if(skb != NULL) + { + skb_reserve(skb,2); + skb_put(skb,totlen); + skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen); + skb->protocol=eth_type_trans(skb,dev); + netif_rx(skb); + dev->stats.rx_packets++; + } + else + dev->stats.rx_dropped++; + } + else + { + int rstat; + /* free all RBD's until RBD_LAST is set */ + totlen = 0; + while(!((rstat=swab16(rbd->status)) & RBD_LAST)) + { + totlen += rstat & RBD_MASK; + if(!rstat) + { + printk("%s: Whoops .. no end mark in RBD list\n",dev->name); + break; + } + rbd->status = 0; + rbd = (struct rbd_struct *) make32(rbd->next); + } + totlen += rstat & RBD_MASK; + rbd->status = 0; + printk("%s: received oversized frame! length: %d\n",dev->name,totlen); + dev->stats.rx_dropped++; + } + } + else /* frame !(ok), only with 'save-bad-frames' */ + { + printk("%s: oops! rfd-error-status: %04x\n",dev->name,status); + dev->stats.rx_errors++; + } + p->rfd_top->stat_high = 0; + p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */ + p->rfd_top->rbd_offset = 0xffff; + p->rfd_last->last = 0; /* delete RFD_SUSP */ + p->rfd_last = p->rfd_top; + p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */ + p->scb->rfa_offset = make16(p->rfd_top); + + if(debuglevel > 0) + printk("%d",cnt++); + } + + if(automatic_resume) + { + WAIT_4_SCB_CMD(); + p->scb->cmd_ruc = RUC_RESUME; + sun3_attn586(); + WAIT_4_SCB_CMD_RUC(); + } + +#ifdef WAIT_4_BUSY + { + int i; + for(i=0;i<1024;i++) + { + if(p->rfd_top->status) + break; + DELAY_16(); + if(i == 1023) + printk("%s: RU hasn't fetched next RFD (not busy/complete)\n",dev->name); + } + } +#endif + +#if 0 + if(!at_least_one) + { + int i; + volatile struct rfd_struct *rfds=p->rfd_top; + volatile struct rbd_struct *rbds; + printk("%s: received a FC intr. without having a frame: %04x %d\n",dev->name,status,old_at_least); + for(i=0;i< (p->num_recv_buffs+4);i++) + { + rbds = (struct rbd_struct *) make32(rfds->rbd_offset); + printk("%04x:%04x ",rfds->status,rbds->status); + rfds = (struct rfd_struct *) make32(rfds->next); + } + printk("\nerrs: %04x %04x stat: %04x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->status); + printk("\nerrs: %04x %04x rus: %02x, cus: %02x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->rus,(int)p->scb->cus); + } + old_at_least = at_least_one; +#endif + + if(debuglevel > 0) + printk("r"); +} + +/********************************************************** + * handle 'Receiver went not ready'. + */ + +static void sun3_82586_rnr_int(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); + + dev->stats.rx_errors++; + + WAIT_4_SCB_CMD(); /* wait for the last cmd, WAIT_4_FULLSTAT?? */ + p->scb->cmd_ruc = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */ + sun3_attn586(); + WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. */ + + alloc_rfa(dev,(char *)p->rfd_first); +/* maybe add a check here, before restarting the RU */ + startrecv586(dev); /* restart RU */ + + printk("%s: Receive-Unit restarted. Status: %04x\n",dev->name,p->scb->rus); + +} + +/********************************************************** + * handle xmit - interrupt + */ + +static void sun3_82586_xmt_int(struct net_device *dev) +{ + int status; + struct priv *p = netdev_priv(dev); + + if(debuglevel > 0) + printk("X"); + + status = swab16(p->xmit_cmds[p->xmit_last]->cmd_status); + if(!(status & STAT_COMPL)) + printk("%s: strange .. xmit-int without a 'COMPLETE'\n",dev->name); + + if(status & STAT_OK) + { + dev->stats.tx_packets++; + dev->stats.collisions += (status & TCMD_MAXCOLLMASK); + } + else + { + dev->stats.tx_errors++; + if(status & TCMD_LATECOLL) { + printk("%s: late collision detected.\n",dev->name); + dev->stats.collisions++; + } + else if(status & TCMD_NOCARRIER) { + dev->stats.tx_carrier_errors++; + printk("%s: no carrier detected.\n",dev->name); + } + else if(status & TCMD_LOSTCTS) + printk("%s: loss of CTS detected.\n",dev->name); + else if(status & TCMD_UNDERRUN) { + dev->stats.tx_fifo_errors++; + printk("%s: DMA underrun detected.\n",dev->name); + } + else if(status & TCMD_MAXCOLL) { + printk("%s: Max. collisions exceeded.\n",dev->name); + dev->stats.collisions += 16; + } + } + +#if (NUM_XMIT_BUFFS > 1) + if( (++p->xmit_last) == NUM_XMIT_BUFFS) + p->xmit_last = 0; +#endif + netif_wake_queue(dev); +} + +/*********************************************************** + * (re)start the receiver + */ + +static void startrecv586(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); + + WAIT_4_SCB_CMD(); + WAIT_4_SCB_CMD_RUC(); + p->scb->rfa_offset = make16(p->rfd_first); + p->scb->cmd_ruc = RUC_START; + sun3_attn586(); /* start cmd. */ + WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. (no timeout!!) */ +} + +static void sun3_82586_timeout(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); +#ifndef NO_NOPCOMMANDS + if(p->scb->cus & CU_ACTIVE) /* COMMAND-UNIT active? */ + { + netif_wake_queue(dev); +#ifdef DEBUG + printk("%s: strange ... timeout with CU active?!?\n",dev->name); + printk("%s: X0: %04x N0: %04x N1: %04x %d\n",dev->name,(int)swab16(p->xmit_cmds[0]->cmd_status),(int)swab16(p->nop_cmds[0]->cmd_status),(int)swab16(p->nop_cmds[1]->cmd_status),(int)p->nop_point); +#endif + p->scb->cmd_cuc = CUC_ABORT; + sun3_attn586(); + WAIT_4_SCB_CMD(); + p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]); + p->scb->cmd_cuc = CUC_START; + sun3_attn586(); + WAIT_4_SCB_CMD(); + dev->trans_start = jiffies; /* prevent tx timeout */ + return 0; + } +#endif + { +#ifdef DEBUG + printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus); + printk("%s: command-stats: %04x %04x\n",dev->name,swab16(p->xmit_cmds[0]->cmd_status),swab16(p->xmit_cmds[1]->cmd_status)); + printk("%s: check, whether you set the right interrupt number!\n",dev->name); +#endif + sun3_82586_close(dev); + sun3_82586_open(dev); + } + dev->trans_start = jiffies; /* prevent tx timeout */ +} + +/****************************************************** + * send frame + */ + +static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev) +{ + int len,i; +#ifndef NO_NOPCOMMANDS + int next_nop; +#endif + struct priv *p = netdev_priv(dev); + + if(skb->len > XMIT_BUFF_SIZE) + { + printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len); + return NETDEV_TX_OK; + } + + netif_stop_queue(dev); + +#if(NUM_XMIT_BUFFS > 1) + if(test_and_set_bit(0,(void *) &p->lock)) { + printk("%s: Queue was locked\n",dev->name); + return NETDEV_TX_BUSY; + } + else +#endif + { + len = skb->len; + if (len < ETH_ZLEN) { + memset((void *)p->xmit_cbuffs[p->xmit_count], 0, + ETH_ZLEN); + len = ETH_ZLEN; + } + skb_copy_from_linear_data(skb, (void *)p->xmit_cbuffs[p->xmit_count], skb->len); + +#if (NUM_XMIT_BUFFS == 1) +# ifdef NO_NOPCOMMANDS + +#ifdef DEBUG + if(p->scb->cus & CU_ACTIVE) + { + printk("%s: Hmmm .. CU is still running and we wanna send a new packet.\n",dev->name); + printk("%s: stat: %04x %04x\n",dev->name,p->scb->cus,swab16(p->xmit_cmds[0]->cmd_status)); + } +#endif + + p->xmit_buffs[0]->size = swab16(TBD_LAST | len); + for(i=0;i<16;i++) + { + p->xmit_cmds[0]->cmd_status = 0; + WAIT_4_SCB_CMD(); + if( (p->scb->cus & CU_STATUS) == CU_SUSPEND) + p->scb->cmd_cuc = CUC_RESUME; + else + { + p->scb->cbl_offset = make16(p->xmit_cmds[0]); + p->scb->cmd_cuc = CUC_START; + } + + sun3_attn586(); + if(!i) + dev_kfree_skb(skb); + WAIT_4_SCB_CMD(); + if( (p->scb->cus & CU_ACTIVE)) /* test it, because CU sometimes doesn't start immediately */ + break; + if(p->xmit_cmds[0]->cmd_status) + break; + if(i==15) + printk("%s: Can't start transmit-command.\n",dev->name); + } +# else + next_nop = (p->nop_point + 1) & 0x1; + p->xmit_buffs[0]->size = swab16(TBD_LAST | len); + + p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link + = make16((p->nop_cmds[next_nop])); + p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0; + + p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0])); + p->nop_point = next_nop; + dev_kfree_skb(skb); +# endif +#else + p->xmit_buffs[p->xmit_count]->size = swab16(TBD_LAST | len); + if( (next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS ) + next_nop = 0; + + p->xmit_cmds[p->xmit_count]->cmd_status = 0; + /* linkpointer of xmit-command already points to next nop cmd */ + p->nop_cmds[next_nop]->cmd_link = make16((p->nop_cmds[next_nop])); + p->nop_cmds[next_nop]->cmd_status = 0; + + p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count])); + p->xmit_count = next_nop; + + { + unsigned long flags; + local_irq_save(flags); + if(p->xmit_count != p->xmit_last) + netif_wake_queue(dev); + p->lock = 0; + local_irq_restore(flags); + } + dev_kfree_skb(skb); +#endif + } + return NETDEV_TX_OK; +} + +/******************************************* + * Someone wanna have the statistics + */ + +static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev) +{ + struct priv *p = netdev_priv(dev); + unsigned short crc,aln,rsc,ovrn; + + crc = swab16(p->scb->crc_errs); /* get error-statistic from the ni82586 */ + p->scb->crc_errs = 0; + aln = swab16(p->scb->aln_errs); + p->scb->aln_errs = 0; + rsc = swab16(p->scb->rsc_errs); + p->scb->rsc_errs = 0; + ovrn = swab16(p->scb->ovrn_errs); + p->scb->ovrn_errs = 0; + + dev->stats.rx_crc_errors += crc; + dev->stats.rx_fifo_errors += ovrn; + dev->stats.rx_frame_errors += aln; + dev->stats.rx_dropped += rsc; + + return &dev->stats; +} + +/******************************************************** + * Set MC list .. + */ + +static void set_multicast_list(struct net_device *dev) +{ + netif_stop_queue(dev); + sun3_disint(); + alloc586(dev); + init586(dev); + startrecv586(dev); + sun3_enaint(); + netif_wake_queue(dev); +} + +#if 0 +/* + * DUMP .. we expect a not running CMD unit and enough space + */ +void sun3_82586_dump(struct net_device *dev,void *ptr) +{ + struct priv *p = netdev_priv(dev); + struct dump_cmd_struct *dump_cmd = (struct dump_cmd_struct *) ptr; + int i; + + p->scb->cmd_cuc = CUC_ABORT; + sun3_attn586(); + WAIT_4_SCB_CMD(); + WAIT_4_SCB_CMD_RUC(); + + dump_cmd->cmd_status = 0; + dump_cmd->cmd_cmd = CMD_DUMP | CMD_LAST; + dump_cmd->dump_offset = make16((dump_cmd + 1)); + dump_cmd->cmd_link = 0xffff; + + p->scb->cbl_offset = make16(dump_cmd); + p->scb->cmd_cuc = CUC_START; + sun3_attn586(); + WAIT_4_STAT_COMPL(dump_cmd); + + if( (dump_cmd->cmd_status & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) ) + printk("%s: Can't get dump information.\n",dev->name); + + for(i=0;i<170;i++) { + printk("%02x ",(int) ((unsigned char *) (dump_cmd + 1))[i]); + if(i % 24 == 23) + printk("\n"); + } + printk("\n"); +} +#endif diff --git a/drivers/net/ethernet/i825xx/sun3_82586.h b/drivers/net/ethernet/i825xx/sun3_82586.h new file mode 100644 index 000000000..79aef681a --- /dev/null +++ b/drivers/net/ethernet/i825xx/sun3_82586.h @@ -0,0 +1,318 @@ +/* + * Intel i82586 Ethernet definitions + * + * This is an extension to the Linux operating system, and is covered by the + * same Gnu Public License that covers that work. + * + * copyrights (c) 1994 by Michael Hipp (hippm@informatik.uni-tuebingen.de) + * + * I have done a look in the following sources: + * crynwr-packet-driver by Russ Nelson + * Garret A. Wollman's i82586-driver for BSD + */ + +/* + * Cloned from ni52.h, copyright as above. + * + * Modified for Sun3 OBIO i82586 by Sam Creasey (sammy@sammy.net) + */ + + +/* defines for the obio chip (not vme) */ +#define IEOB_NORSET 0x80 /* don't reset the board */ +#define IEOB_ONAIR 0x40 /* put us on the air */ +#define IEOB_ATTEN 0x20 /* attention! */ +#define IEOB_IENAB 0x10 /* interrupt enable */ +#define IEOB_XXXXX 0x08 /* free bit */ +#define IEOB_XCVRL2 0x04 /* level 2 transceiver? */ +#define IEOB_BUSERR 0x02 /* bus error */ +#define IEOB_INT 0x01 /* interrupt */ + +/* where the obio one lives */ +#define IE_OBIO 0xc0000 +#define IE_IRQ 3 + +/* + * where to find the System Configuration Pointer (SCP) + */ +#define SCP_DEFAULT_ADDRESS 0xfffff4 + + +/* + * System Configuration Pointer Struct + */ + +struct scp_struct +{ + unsigned short zero_dum0; /* has to be zero */ + unsigned char sysbus; /* 0=16Bit,1=8Bit */ + unsigned char zero_dum1; /* has to be zero for 586 */ + unsigned short zero_dum2; + unsigned short zero_dum3; + char *iscp; /* pointer to the iscp-block */ +}; + + +/* + * Intermediate System Configuration Pointer (ISCP) + */ +struct iscp_struct +{ + unsigned char busy; /* 586 clears after successful init */ + unsigned char zero_dummy; /* has to be zero */ + unsigned short scb_offset; /* pointeroffset to the scb_base */ + char *scb_base; /* base-address of all 16-bit offsets */ +}; + +/* + * System Control Block (SCB) + */ +struct scb_struct +{ + unsigned char rus; + unsigned char cus; + unsigned char cmd_ruc; /* command word: RU part */ + unsigned char cmd_cuc; /* command word: CU part & ACK */ + unsigned short cbl_offset; /* pointeroffset, command block list */ + unsigned short rfa_offset; /* pointeroffset, receive frame area */ + unsigned short crc_errs; /* CRC-Error counter */ + unsigned short aln_errs; /* allignmenterror counter */ + unsigned short rsc_errs; /* Resourceerror counter */ + unsigned short ovrn_errs; /* OVerrunerror counter */ +}; + +/* + * possible command values for the command word + */ +#define RUC_MASK 0x0070 /* mask for RU commands */ +#define RUC_NOP 0x0000 /* NOP-command */ +#define RUC_START 0x0010 /* start RU */ +#define RUC_RESUME 0x0020 /* resume RU after suspend */ +#define RUC_SUSPEND 0x0030 /* suspend RU */ +#define RUC_ABORT 0x0040 /* abort receiver operation immediately */ + +#define CUC_MASK 0x07 /* mask for CU command */ +#define CUC_NOP 0x00 /* NOP-command */ +#define CUC_START 0x01 /* start execution of 1. cmd on the CBL */ +#define CUC_RESUME 0x02 /* resume after suspend */ +#define CUC_SUSPEND 0x03 /* Suspend CU */ +#define CUC_ABORT 0x04 /* abort command operation immediately */ + +#define ACK_MASK 0xf0 /* mask for ACK command */ +#define ACK_CX 0x80 /* acknowledges STAT_CX */ +#define ACK_FR 0x40 /* ack. STAT_FR */ +#define ACK_CNA 0x20 /* ack. STAT_CNA */ +#define ACK_RNR 0x10 /* ack. STAT_RNR */ + +/* + * possible status values for the status word + */ +#define STAT_MASK 0xf0 /* mask for cause of interrupt */ +#define STAT_CX 0x80 /* CU finished cmd with its I bit set */ +#define STAT_FR 0x40 /* RU finished receiving a frame */ +#define STAT_CNA 0x20 /* CU left active state */ +#define STAT_RNR 0x10 /* RU left ready state */ + +#define CU_STATUS 0x7 /* CU status, 0=idle */ +#define CU_SUSPEND 0x1 /* CU is suspended */ +#define CU_ACTIVE 0x2 /* CU is active */ + +#define RU_STATUS 0x70 /* RU status, 0=idle */ +#define RU_SUSPEND 0x10 /* RU suspended */ +#define RU_NOSPACE 0x20 /* RU no resources */ +#define RU_READY 0x40 /* RU is ready */ + +/* + * Receive Frame Descriptor (RFD) + */ +struct rfd_struct +{ + unsigned char stat_low; /* status word */ + unsigned char stat_high; /* status word */ + unsigned char rfd_sf; /* 82596 mode only */ + unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */ + unsigned short next; /* linkoffset to next RFD */ + unsigned short rbd_offset; /* pointeroffset to RBD-buffer */ + unsigned char dest[ETH_ALEN]; /* ethernet-address, destination */ + unsigned char source[ETH_ALEN]; /* ethernet-address, source */ + unsigned short length; /* 802.3 frame-length */ + unsigned short zero_dummy; /* dummy */ +}; + +#define RFD_LAST 0x80 /* last: last rfd in the list */ +#define RFD_SUSP 0x40 /* last: suspend RU after */ +#define RFD_COMPL 0x80 +#define RFD_OK 0x20 +#define RFD_BUSY 0x40 +#define RFD_ERR_LEN 0x10 /* Length error (if enabled length-checking */ +#define RFD_ERR_CRC 0x08 /* CRC error */ +#define RFD_ERR_ALGN 0x04 /* Alignment error */ +#define RFD_ERR_RNR 0x02 /* status: receiver out of resources */ +#define RFD_ERR_OVR 0x01 /* DMA Overrun! */ + +#define RFD_ERR_FTS 0x0080 /* Frame to short */ +#define RFD_ERR_NEOP 0x0040 /* No EOP flag (for bitstuffing only) */ +#define RFD_ERR_TRUN 0x0020 /* (82596 only/SF mode) indicates truncated frame */ +#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA (only 82596) */ +#define RFD_COLLDET 0x0001 /* Detected collision during reception */ + +/* + * Receive Buffer Descriptor (RBD) + */ +struct rbd_struct +{ + unsigned short status; /* status word,number of used bytes in buff */ + unsigned short next; /* pointeroffset to next RBD */ + char *buffer; /* receive buffer address pointer */ + unsigned short size; /* size of this buffer */ + unsigned short zero_dummy; /* dummy */ +}; + +#define RBD_LAST 0x8000 /* last buffer */ +#define RBD_USED 0x4000 /* this buffer has data */ +#define RBD_MASK 0x3fff /* size-mask for length */ + +/* + * Statusvalues for Commands/RFD + */ +#define STAT_COMPL 0x8000 /* status: frame/command is complete */ +#define STAT_BUSY 0x4000 /* status: frame/command is busy */ +#define STAT_OK 0x2000 /* status: frame/command is ok */ + +/* + * Action-Commands + */ +#define CMD_NOP 0x0000 /* NOP */ +#define CMD_IASETUP 0x0001 /* initial address setup command */ +#define CMD_CONFIGURE 0x0002 /* configure command */ +#define CMD_MCSETUP 0x0003 /* MC setup command */ +#define CMD_XMIT 0x0004 /* transmit command */ +#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */ +#define CMD_DUMP 0x0006 /* dump command */ +#define CMD_DIAGNOSE 0x0007 /* diagnose command */ + +/* + * Action command bits + */ +#define CMD_LAST 0x8000 /* indicates last command in the CBL */ +#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */ +#define CMD_INT 0x2000 /* generate interrupt after execution */ + +/* + * NOP - command + */ +struct nop_cmd_struct +{ + unsigned short cmd_status; /* status of this command */ + unsigned short cmd_cmd; /* the command itself (+bits) */ + unsigned short cmd_link; /* offsetpointer to next command */ +}; + +/* + * IA Setup command + */ +struct iasetup_cmd_struct +{ + unsigned short cmd_status; + unsigned short cmd_cmd; + unsigned short cmd_link; + unsigned char iaddr[6]; +}; + +/* + * Configure command + */ +struct configure_cmd_struct +{ + unsigned short cmd_status; + unsigned short cmd_cmd; + unsigned short cmd_link; + unsigned char byte_cnt; /* size of the config-cmd */ + unsigned char fifo; /* fifo/recv monitor */ + unsigned char sav_bf; /* save bad frames (bit7=1)*/ + unsigned char adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/ + unsigned char priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */ + unsigned char ifs; /* inter frame spacing */ + unsigned char time_low; /* slot time low */ + unsigned char time_high; /* slot time high(0-2) and max. retries(4-7) */ + unsigned char promisc; /* promisc-mode(0) , et al (1-7) */ + unsigned char carr_coll; /* carrier(0-3)/collision(4-7) stuff */ + unsigned char fram_len; /* minimal frame len */ + unsigned char dummy; /* dummy */ +}; + +/* + * Multicast Setup command + */ +struct mcsetup_cmd_struct +{ + unsigned short cmd_status; + unsigned short cmd_cmd; + unsigned short cmd_link; + unsigned short mc_cnt; /* number of bytes in the MC-List */ + unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */ +}; + +/* + * DUMP command + */ +struct dump_cmd_struct +{ + unsigned short cmd_status; + unsigned short cmd_cmd; + unsigned short cmd_link; + unsigned short dump_offset; /* pointeroffset to DUMP space */ +}; + +/* + * transmit command + */ +struct transmit_cmd_struct +{ + unsigned short cmd_status; + unsigned short cmd_cmd; + unsigned short cmd_link; + unsigned short tbd_offset; /* pointeroffset to TBD */ + unsigned char dest[6]; /* destination address of the frame */ + unsigned short length; /* user defined: 802.3 length / Ether type */ +}; + +#define TCMD_ERRMASK 0x0fa0 +#define TCMD_MAXCOLLMASK 0x000f +#define TCMD_MAXCOLL 0x0020 +#define TCMD_HEARTBEAT 0x0040 +#define TCMD_DEFERRED 0x0080 +#define TCMD_UNDERRUN 0x0100 +#define TCMD_LOSTCTS 0x0200 +#define TCMD_NOCARRIER 0x0400 +#define TCMD_LATECOLL 0x0800 + +struct tdr_cmd_struct +{ + unsigned short cmd_status; + unsigned short cmd_cmd; + unsigned short cmd_link; + unsigned short status; +}; + +#define TDR_LNK_OK 0x8000 /* No link problem identified */ +#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */ +#define TDR_ET_OPN 0x2000 /* open, no correct termination */ +#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */ +#define TDR_TIMEMASK 0x07ff /* mask for the time field */ + +/* + * Transmit Buffer Descriptor (TBD) + */ +struct tbd_struct +{ + unsigned short size; /* size + EOF-Flag(15) */ + unsigned short next; /* pointeroffset to next TBD */ + char *buffer; /* pointer to buffer */ +}; + +#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */ + + + + diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig new file mode 100644 index 000000000..563a1ac71 --- /dev/null +++ b/drivers/net/ethernet/ibm/Kconfig @@ -0,0 +1,42 @@ +# +# IBM device configuration. +# + +config NET_VENDOR_IBM + bool "IBM devices" + default y + depends on PPC_PSERIES || PPC_DCR || (IBMEBUS && SPARSEMEM) + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about IBM devices. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_IBM + +config IBMVETH + tristate "IBM LAN Virtual Ethernet support" + depends on PPC_PSERIES + ---help--- + This driver supports virtual ethernet adapters on newer IBM iSeries + and pSeries systems. + + To compile this driver as a module, choose M here. The module will + be called ibmveth. + +source "drivers/net/ethernet/ibm/emac/Kconfig" + +config EHEA + tristate "eHEA Ethernet support" + depends on IBMEBUS && SPARSEMEM + ---help--- + This driver supports the IBM pSeries eHEA ethernet adapter. + + To compile the driver as a module, choose M here. The module + will be called ehea. + +endif # NET_VENDOR_IBM diff --git a/drivers/net/ethernet/ibm/Makefile b/drivers/net/ethernet/ibm/Makefile new file mode 100644 index 000000000..2f04e71a5 --- /dev/null +++ b/drivers/net/ethernet/ibm/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for th IBM network device drivers. +# + +obj-$(CONFIG_IBMVETH) += ibmveth.o +obj-$(CONFIG_IBM_EMAC) += emac/ +obj-$(CONFIG_EHEA) += ehea/ diff --git a/drivers/net/ethernet/ibm/ehea/Makefile b/drivers/net/ethernet/ibm/ehea/Makefile new file mode 100644 index 000000000..cd473e295 --- /dev/null +++ b/drivers/net/ethernet/ibm/ehea/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the eHEA ethernet device driver for IBM eServer System p +# +ehea-y = ehea_main.o ehea_phyp.o ehea_qmr.o ehea_ethtool.o +obj-$(CONFIG_EHEA) += ehea.o + diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h new file mode 100644 index 000000000..6be7b9839 --- /dev/null +++ b/drivers/net/ethernet/ibm/ehea/ehea.h @@ -0,0 +1,490 @@ +/* + * linux/drivers/net/ethernet/ibm/ehea/ehea.h + * + * eHEA ethernet device driver for IBM eServer System p + * + * (C) Copyright IBM Corp. 2006 + * + * Authors: + * Christoph Raisch + * Jan-Bernd Themann + * Thomas Klein + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __EHEA_H__ +#define __EHEA_H__ + +#include +#include +#include +#include + +#include +#include + +#define DRV_NAME "ehea" +#define DRV_VERSION "EHEA_0107" + +/* eHEA capability flags */ +#define DLPAR_PORT_ADD_REM 1 +#define DLPAR_MEM_ADD 2 +#define DLPAR_MEM_REM 4 +#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD | DLPAR_MEM_REM) + +#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ + | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) + +#define EHEA_MAX_ENTRIES_RQ1 32767 +#define EHEA_MAX_ENTRIES_RQ2 16383 +#define EHEA_MAX_ENTRIES_RQ3 16383 +#define EHEA_MAX_ENTRIES_SQ 32767 +#define EHEA_MIN_ENTRIES_QP 127 + +#define EHEA_SMALL_QUEUES + +#ifdef EHEA_SMALL_QUEUES +#define EHEA_MAX_CQE_COUNT 1023 +#define EHEA_DEF_ENTRIES_SQ 1023 +#define EHEA_DEF_ENTRIES_RQ1 1023 +#define EHEA_DEF_ENTRIES_RQ2 1023 +#define EHEA_DEF_ENTRIES_RQ3 511 +#else +#define EHEA_MAX_CQE_COUNT 4080 +#define EHEA_DEF_ENTRIES_SQ 4080 +#define EHEA_DEF_ENTRIES_RQ1 8160 +#define EHEA_DEF_ENTRIES_RQ2 2040 +#define EHEA_DEF_ENTRIES_RQ3 2040 +#endif + +#define EHEA_MAX_ENTRIES_EQ 20 + +#define EHEA_SG_SQ 2 +#define EHEA_SG_RQ1 1 +#define EHEA_SG_RQ2 0 +#define EHEA_SG_RQ3 0 + +#define EHEA_MAX_PACKET_SIZE 9022 /* for jumbo frames */ +#define EHEA_RQ2_PKT_SIZE 2048 +#define EHEA_L_PKT_SIZE 256 /* low latency */ + +/* Send completion signaling */ + +/* Protection Domain Identifier */ +#define EHEA_PD_ID 0xaabcdeff + +#define EHEA_RQ2_THRESHOLD 1 +#define EHEA_RQ3_THRESHOLD 4 /* use RQ3 threshold of 2048 bytes */ + +#define EHEA_SPEED_10G 10000 +#define EHEA_SPEED_1G 1000 +#define EHEA_SPEED_100M 100 +#define EHEA_SPEED_10M 10 +#define EHEA_SPEED_AUTONEG 0 + +/* Broadcast/Multicast registration types */ +#define EHEA_BCMC_SCOPE_ALL 0x08 +#define EHEA_BCMC_SCOPE_SINGLE 0x00 +#define EHEA_BCMC_MULTICAST 0x04 +#define EHEA_BCMC_BROADCAST 0x00 +#define EHEA_BCMC_UNTAGGED 0x02 +#define EHEA_BCMC_TAGGED 0x00 +#define EHEA_BCMC_VLANID_ALL 0x01 +#define EHEA_BCMC_VLANID_SINGLE 0x00 + +#define EHEA_CACHE_LINE 128 + +/* Memory Regions */ +#define EHEA_MR_ACC_CTRL 0x00800000 + +#define EHEA_BUSMAP_START 0x8000000000000000ULL +#define EHEA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL +#define EHEA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */ +#define EHEA_TOP_INDEX_SHIFT (EHEA_DIR_INDEX_SHIFT * 2) +#define EHEA_MAP_ENTRIES (1 << EHEA_DIR_INDEX_SHIFT) +#define EHEA_MAP_SIZE (0x10000) /* currently fixed map size */ +#define EHEA_INDEX_MASK (EHEA_MAP_ENTRIES - 1) + + +#define EHEA_WATCH_DOG_TIMEOUT 10*HZ + +/* utility functions */ + +void ehea_dump(void *adr, int len, char *msg); + +#define EHEA_BMASK(pos, length) (((pos) << 16) + (length)) + +#define EHEA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1)) + +#define EHEA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff) + +#define EHEA_BMASK_MASK(mask) \ + (0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff)) + +#define EHEA_BMASK_SET(mask, value) \ + ((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask)) + +#define EHEA_BMASK_GET(mask, value) \ + (EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask))) + +/* + * Generic ehea page + */ +struct ehea_page { + u8 entries[PAGE_SIZE]; +}; + +/* + * Generic queue in linux kernel virtual memory + */ +struct hw_queue { + u64 current_q_offset; /* current queue entry */ + struct ehea_page **queue_pages; /* array of pages belonging to queue */ + u32 qe_size; /* queue entry size */ + u32 queue_length; /* queue length allocated in bytes */ + u32 pagesize; + u32 toggle_state; /* toggle flag - per page */ + u32 reserved; /* 64 bit alignment */ +}; + +/* + * For pSeries this is a 64bit memory address where + * I/O memory is mapped into CPU address space + */ +struct h_epa { + void __iomem *addr; +}; + +struct h_epa_user { + u64 addr; +}; + +struct h_epas { + struct h_epa kernel; /* kernel space accessible resource, + set to 0 if unused */ + struct h_epa_user user; /* user space accessible resource + set to 0 if unused */ +}; + +/* + * Memory map data structures + */ +struct ehea_dir_bmap +{ + u64 ent[EHEA_MAP_ENTRIES]; +}; +struct ehea_top_bmap +{ + struct ehea_dir_bmap *dir[EHEA_MAP_ENTRIES]; +}; +struct ehea_bmap +{ + struct ehea_top_bmap *top[EHEA_MAP_ENTRIES]; +}; + +struct ehea_qp; +struct ehea_cq; +struct ehea_eq; +struct ehea_port; +struct ehea_av; + +/* + * Queue attributes passed to ehea_create_qp() + */ +struct ehea_qp_init_attr { + /* input parameter */ + u32 qp_token; /* queue token */ + u8 low_lat_rq1; + u8 signalingtype; /* cqe generation flag */ + u8 rq_count; /* num of receive queues */ + u8 eqe_gen; /* eqe generation flag */ + u16 max_nr_send_wqes; /* max number of send wqes */ + u16 max_nr_rwqes_rq1; /* max number of receive wqes */ + u16 max_nr_rwqes_rq2; + u16 max_nr_rwqes_rq3; + u8 wqe_size_enc_sq; + u8 wqe_size_enc_rq1; + u8 wqe_size_enc_rq2; + u8 wqe_size_enc_rq3; + u8 swqe_imm_data_len; /* immediate data length for swqes */ + u16 port_nr; + u16 rq2_threshold; + u16 rq3_threshold; + u64 send_cq_handle; + u64 recv_cq_handle; + u64 aff_eq_handle; + + /* output parameter */ + u32 qp_nr; + u16 act_nr_send_wqes; + u16 act_nr_rwqes_rq1; + u16 act_nr_rwqes_rq2; + u16 act_nr_rwqes_rq3; + u8 act_wqe_size_enc_sq; + u8 act_wqe_size_enc_rq1; + u8 act_wqe_size_enc_rq2; + u8 act_wqe_size_enc_rq3; + u32 nr_sq_pages; + u32 nr_rq1_pages; + u32 nr_rq2_pages; + u32 nr_rq3_pages; + u32 liobn_sq; + u32 liobn_rq1; + u32 liobn_rq2; + u32 liobn_rq3; +}; + +/* + * Event Queue attributes, passed as parameter + */ +struct ehea_eq_attr { + u32 type; + u32 max_nr_of_eqes; + u8 eqe_gen; /* generate eqe flag */ + u64 eq_handle; + u32 act_nr_of_eqes; + u32 nr_pages; + u32 ist1; /* Interrupt service token */ + u32 ist2; + u32 ist3; + u32 ist4; +}; + + +/* + * Event Queue + */ +struct ehea_eq { + struct ehea_adapter *adapter; + struct hw_queue hw_queue; + u64 fw_handle; + struct h_epas epas; + spinlock_t spinlock; + struct ehea_eq_attr attr; +}; + +/* + * HEA Queues + */ +struct ehea_qp { + struct ehea_adapter *adapter; + u64 fw_handle; /* QP handle for firmware calls */ + struct hw_queue hw_squeue; + struct hw_queue hw_rqueue1; + struct hw_queue hw_rqueue2; + struct hw_queue hw_rqueue3; + struct h_epas epas; + struct ehea_qp_init_attr init_attr; +}; + +/* + * Completion Queue attributes + */ +struct ehea_cq_attr { + /* input parameter */ + u32 max_nr_of_cqes; + u32 cq_token; + u64 eq_handle; + + /* output parameter */ + u32 act_nr_of_cqes; + u32 nr_pages; +}; + +/* + * Completion Queue + */ +struct ehea_cq { + struct ehea_adapter *adapter; + u64 fw_handle; + struct hw_queue hw_queue; + struct h_epas epas; + struct ehea_cq_attr attr; +}; + +/* + * Memory Region + */ +struct ehea_mr { + struct ehea_adapter *adapter; + u64 handle; + u64 vaddr; + u32 lkey; +}; + +/* + * Port state information + */ +struct port_stats { + int poll_receive_errors; + int queue_stopped; + int err_tcp_cksum; + int err_ip_cksum; + int err_frame_crc; +}; + +#define EHEA_IRQ_NAME_SIZE 20 + +/* + * Queue SKB Array + */ +struct ehea_q_skb_arr { + struct sk_buff **arr; /* skb array for queue */ + int len; /* array length */ + int index; /* array index */ + int os_skbs; /* rq2/rq3 only: outstanding skbs */ +}; + +/* + * Port resources + */ +struct ehea_port_res { + struct napi_struct napi; + struct port_stats p_stats; + struct ehea_mr send_mr; /* send memory region */ + struct ehea_mr recv_mr; /* receive memory region */ + struct ehea_port *port; + char int_recv_name[EHEA_IRQ_NAME_SIZE]; + char int_send_name[EHEA_IRQ_NAME_SIZE]; + struct ehea_qp *qp; + struct ehea_cq *send_cq; + struct ehea_cq *recv_cq; + struct ehea_eq *eq; + struct ehea_q_skb_arr rq1_skba; + struct ehea_q_skb_arr rq2_skba; + struct ehea_q_skb_arr rq3_skba; + struct ehea_q_skb_arr sq_skba; + int sq_skba_size; + int swqe_refill_th; + atomic_t swqe_avail; + int swqe_ll_count; + u32 swqe_id_counter; + u64 tx_packets; + u64 tx_bytes; + u64 rx_packets; + u64 rx_bytes; + int sq_restart_flag; +}; + + +#define EHEA_MAX_PORTS 16 + +#define EHEA_NUM_PORTRES_FW_HANDLES 6 /* QP handle, SendCQ handle, + RecvCQ handle, EQ handle, + SendMR handle, RecvMR handle */ +#define EHEA_NUM_PORT_FW_HANDLES 1 /* EQ handle */ +#define EHEA_NUM_ADAPTER_FW_HANDLES 2 /* MR handle, NEQ handle */ + +struct ehea_adapter { + u64 handle; + struct platform_device *ofdev; + struct ehea_port *port[EHEA_MAX_PORTS]; + struct ehea_eq *neq; /* notification event queue */ + struct tasklet_struct neq_tasklet; + struct ehea_mr mr; + u32 pd; /* protection domain */ + u64 max_mc_mac; /* max number of multicast mac addresses */ + int active_ports; + struct list_head list; +}; + + +struct ehea_mc_list { + struct list_head list; + u64 macaddr; +}; + +/* kdump support */ +struct ehea_fw_handle_entry { + u64 adh; /* Adapter Handle */ + u64 fwh; /* Firmware Handle */ +}; + +struct ehea_fw_handle_array { + struct ehea_fw_handle_entry *arr; + int num_entries; + struct mutex lock; +}; + +struct ehea_bcmc_reg_entry { + u64 adh; /* Adapter Handle */ + u32 port_id; /* Logical Port Id */ + u8 reg_type; /* Registration Type */ + u64 macaddr; +}; + +struct ehea_bcmc_reg_array { + struct ehea_bcmc_reg_entry *arr; + int num_entries; + spinlock_t lock; +}; + +#define EHEA_PORT_UP 1 +#define EHEA_PORT_DOWN 0 +#define EHEA_PHY_LINK_UP 1 +#define EHEA_PHY_LINK_DOWN 0 +#define EHEA_MAX_PORT_RES 16 +struct ehea_port { + struct ehea_adapter *adapter; /* adapter that owns this port */ + struct net_device *netdev; + struct rtnl_link_stats64 stats; + struct ehea_port_res port_res[EHEA_MAX_PORT_RES]; + struct platform_device ofdev; /* Open Firmware Device */ + struct ehea_mc_list *mc_list; /* Multicast MAC addresses */ + struct ehea_eq *qp_eq; + struct work_struct reset_task; + struct delayed_work stats_work; + struct mutex port_lock; + char int_aff_name[EHEA_IRQ_NAME_SIZE]; + int allmulti; /* Indicates IFF_ALLMULTI state */ + int promisc; /* Indicates IFF_PROMISC state */ + int num_mcs; + int resets; + unsigned long flags; + u64 mac_addr; + u32 logical_port_id; + u32 port_speed; + u32 msg_enable; + u32 sig_comp_iv; + u32 state; + u8 phy_link; + u8 full_duplex; + u8 autoneg; + u8 num_def_qps; + wait_queue_head_t swqe_avail_wq; + wait_queue_head_t restart_wq; +}; + +struct port_res_cfg { + int max_entries_rcq; + int max_entries_scq; + int max_entries_sq; + int max_entries_rq1; + int max_entries_rq2; + int max_entries_rq3; +}; + +enum ehea_flag_bits { + __EHEA_STOP_XFER, + __EHEA_DISABLE_PORT_RESET +}; + +void ehea_set_ethtool_ops(struct net_device *netdev); +int ehea_sense_port_attr(struct ehea_port *port); +int ehea_set_portspeed(struct ehea_port *port, u32 port_speed); + +#endif /* __EHEA_H__ */ diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c new file mode 100644 index 000000000..85a386645 --- /dev/null +++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c @@ -0,0 +1,282 @@ +/* + * linux/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c + * + * eHEA ethernet device driver for IBM eServer System p + * + * (C) Copyright IBM Corp. 2006 + * + * Authors: + * Christoph Raisch + * Jan-Bernd Themann + * Thomas Klein + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "ehea.h" +#include "ehea_phyp.h" + +static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ehea_port *port = netdev_priv(dev); + u32 speed; + int ret; + + ret = ehea_sense_port_attr(port); + + if (ret) + return ret; + + if (netif_carrier_ok(dev)) { + switch (port->port_speed) { + case EHEA_SPEED_10M: + speed = SPEED_10; + break; + case EHEA_SPEED_100M: + speed = SPEED_100; + break; + case EHEA_SPEED_1G: + speed = SPEED_1000; + break; + case EHEA_SPEED_10G: + speed = SPEED_10000; + break; + default: + speed = -1; + break; /* BUG */ + } + cmd->duplex = port->full_duplex == 1 ? + DUPLEX_FULL : DUPLEX_HALF; + } else { + speed = SPEED_UNKNOWN; + cmd->duplex = DUPLEX_UNKNOWN; + } + ethtool_cmd_speed_set(cmd, speed); + + if (cmd->speed == SPEED_10000) { + cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + cmd->port = PORT_FIBRE; + } else { + cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full + | SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full + | SUPPORTED_10baseT_Half | SUPPORTED_Autoneg + | SUPPORTED_TP); + cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg + | ADVERTISED_TP); + cmd->port = PORT_TP; + } + + cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + return 0; +} + +static int ehea_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ehea_port *port = netdev_priv(dev); + int ret = 0; + u32 sp; + + if (cmd->autoneg == AUTONEG_ENABLE) { + sp = EHEA_SPEED_AUTONEG; + goto doit; + } + + switch (cmd->speed) { + case SPEED_10: + if (cmd->duplex == DUPLEX_FULL) + sp = H_SPEED_10M_F; + else + sp = H_SPEED_10M_H; + break; + + case SPEED_100: + if (cmd->duplex == DUPLEX_FULL) + sp = H_SPEED_100M_F; + else + sp = H_SPEED_100M_H; + break; + + case SPEED_1000: + if (cmd->duplex == DUPLEX_FULL) + sp = H_SPEED_1G_F; + else + ret = -EINVAL; + break; + + case SPEED_10000: + if (cmd->duplex == DUPLEX_FULL) + sp = H_SPEED_10G_F; + else + ret = -EINVAL; + break; + + default: + ret = -EINVAL; + break; + } + + if (ret) + goto out; +doit: + ret = ehea_set_portspeed(port, sp); + + if (!ret) + netdev_info(dev, + "Port speed successfully set: %dMbps %s Duplex\n", + port->port_speed, + port->full_duplex == 1 ? "Full" : "Half"); +out: + return ret; +} + +static int ehea_nway_reset(struct net_device *dev) +{ + struct ehea_port *port = netdev_priv(dev); + int ret; + + ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG); + + if (!ret) + netdev_info(port->netdev, + "Port speed successfully set: %dMbps %s Duplex\n", + port->port_speed, + port->full_duplex == 1 ? "Full" : "Half"); + return ret; +} + +static void ehea_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); +} + +static u32 ehea_get_msglevel(struct net_device *dev) +{ + struct ehea_port *port = netdev_priv(dev); + return port->msg_enable; +} + +static void ehea_set_msglevel(struct net_device *dev, u32 value) +{ + struct ehea_port *port = netdev_priv(dev); + port->msg_enable = value; +} + +static const char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = { + {"sig_comp_iv"}, + {"swqe_refill_th"}, + {"port resets"}, + {"Receive errors"}, + {"TCP cksum errors"}, + {"IP cksum errors"}, + {"Frame cksum errors"}, + {"num SQ stopped"}, + {"PR0 free_swqes"}, + {"PR1 free_swqes"}, + {"PR2 free_swqes"}, + {"PR3 free_swqes"}, + {"PR4 free_swqes"}, + {"PR5 free_swqes"}, + {"PR6 free_swqes"}, + {"PR7 free_swqes"}, + {"PR8 free_swqes"}, + {"PR9 free_swqes"}, + {"PR10 free_swqes"}, + {"PR11 free_swqes"}, + {"PR12 free_swqes"}, + {"PR13 free_swqes"}, + {"PR14 free_swqes"}, + {"PR15 free_swqes"}, +}; + +static void ehea_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + if (stringset == ETH_SS_STATS) { + memcpy(data, &ehea_ethtool_stats_keys, + sizeof(ehea_ethtool_stats_keys)); + } +} + +static int ehea_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(ehea_ethtool_stats_keys); + default: + return -EOPNOTSUPP; + } +} + +static void ehea_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + int i, k, tmp; + struct ehea_port *port = netdev_priv(dev); + + for (i = 0; i < ehea_get_sset_count(dev, ETH_SS_STATS); i++) + data[i] = 0; + i = 0; + + data[i++] = port->sig_comp_iv; + data[i++] = port->port_res[0].swqe_refill_th; + data[i++] = port->resets; + + for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) + tmp += port->port_res[k].p_stats.poll_receive_errors; + data[i++] = tmp; + + for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) + tmp += port->port_res[k].p_stats.err_tcp_cksum; + data[i++] = tmp; + + for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) + tmp += port->port_res[k].p_stats.err_ip_cksum; + data[i++] = tmp; + + for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) + tmp += port->port_res[k].p_stats.err_frame_crc; + data[i++] = tmp; + + for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) + tmp += port->port_res[k].p_stats.queue_stopped; + data[i++] = tmp; + + for (k = 0; k < 16; k++) + data[i++] = atomic_read(&port->port_res[k].swqe_avail); +} + +static const struct ethtool_ops ehea_ethtool_ops = { + .get_settings = ehea_get_settings, + .get_drvinfo = ehea_get_drvinfo, + .get_msglevel = ehea_get_msglevel, + .set_msglevel = ehea_set_msglevel, + .get_link = ethtool_op_get_link, + .get_strings = ehea_get_strings, + .get_sset_count = ehea_get_sset_count, + .get_ethtool_stats = ehea_get_ethtool_stats, + .set_settings = ehea_set_settings, + .nway_reset = ehea_nway_reset, /* Restart autonegotiation */ +}; + +void ehea_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ehea_ethtool_ops; +} diff --git a/drivers/net/ethernet/ibm/ehea/ehea_hw.h b/drivers/net/ethernet/ibm/ehea/ehea_hw.h new file mode 100644 index 000000000..180d4128a --- /dev/null +++ b/drivers/net/ethernet/ibm/ehea/ehea_hw.h @@ -0,0 +1,267 @@ +/* + * linux/drivers/net/ethernet/ibm/ehea/ehea_hw.h + * + * eHEA ethernet device driver for IBM eServer System p + * + * (C) Copyright IBM Corp. 2006 + * + * Authors: + * Christoph Raisch + * Jan-Bernd Themann + * Thomas Klein + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __EHEA_HW_H__ +#define __EHEA_HW_H__ + +#define QPX_SQA_VALUE EHEA_BMASK_IBM(48, 63) +#define QPX_RQ1A_VALUE EHEA_BMASK_IBM(48, 63) +#define QPX_RQ2A_VALUE EHEA_BMASK_IBM(48, 63) +#define QPX_RQ3A_VALUE EHEA_BMASK_IBM(48, 63) + +#define QPTEMM_OFFSET(x) offsetof(struct ehea_qptemm, x) + +struct ehea_qptemm { + u64 qpx_hcr; + u64 qpx_c; + u64 qpx_herr; + u64 qpx_aer; + u64 qpx_sqa; + u64 qpx_sqc; + u64 qpx_rq1a; + u64 qpx_rq1c; + u64 qpx_st; + u64 qpx_aerr; + u64 qpx_tenure; + u64 qpx_reserved1[(0x098 - 0x058) / 8]; + u64 qpx_portp; + u64 qpx_reserved2[(0x100 - 0x0A0) / 8]; + u64 qpx_t; + u64 qpx_sqhp; + u64 qpx_sqptp; + u64 qpx_reserved3[(0x140 - 0x118) / 8]; + u64 qpx_sqwsize; + u64 qpx_reserved4[(0x170 - 0x148) / 8]; + u64 qpx_sqsize; + u64 qpx_reserved5[(0x1B0 - 0x178) / 8]; + u64 qpx_sigt; + u64 qpx_wqecnt; + u64 qpx_rq1hp; + u64 qpx_rq1ptp; + u64 qpx_rq1size; + u64 qpx_reserved6[(0x220 - 0x1D8) / 8]; + u64 qpx_rq1wsize; + u64 qpx_reserved7[(0x240 - 0x228) / 8]; + u64 qpx_pd; + u64 qpx_scqn; + u64 qpx_rcqn; + u64 qpx_aeqn; + u64 reserved49; + u64 qpx_ram; + u64 qpx_reserved8[(0x300 - 0x270) / 8]; + u64 qpx_rq2a; + u64 qpx_rq2c; + u64 qpx_rq2hp; + u64 qpx_rq2ptp; + u64 qpx_rq2size; + u64 qpx_rq2wsize; + u64 qpx_rq2th; + u64 qpx_rq3a; + u64 qpx_rq3c; + u64 qpx_rq3hp; + u64 qpx_rq3ptp; + u64 qpx_rq3size; + u64 qpx_rq3wsize; + u64 qpx_rq3th; + u64 qpx_lpn; + u64 qpx_reserved9[(0x400 - 0x378) / 8]; + u64 reserved_ext[(0x500 - 0x400) / 8]; + u64 reserved2[(0x1000 - 0x500) / 8]; +}; + +#define MRx_HCR_LPARID_VALID EHEA_BMASK_IBM(0, 0) + +#define MRMWMM_OFFSET(x) offsetof(struct ehea_mrmwmm, x) + +struct ehea_mrmwmm { + u64 mrx_hcr; + u64 mrx_c; + u64 mrx_herr; + u64 mrx_aer; + u64 mrx_pp; + u64 reserved1; + u64 reserved2; + u64 reserved3; + u64 reserved4[(0x200 - 0x40) / 8]; + u64 mrx_ctl[64]; +}; + +#define QPEDMM_OFFSET(x) offsetof(struct ehea_qpedmm, x) + +struct ehea_qpedmm { + + u64 reserved0[(0x400) / 8]; + u64 qpedx_phh; + u64 qpedx_ppsgp; + u64 qpedx_ppsgu; + u64 qpedx_ppdgp; + u64 qpedx_ppdgu; + u64 qpedx_aph; + u64 qpedx_apsgp; + u64 qpedx_apsgu; + u64 qpedx_apdgp; + u64 qpedx_apdgu; + u64 qpedx_apav; + u64 qpedx_apsav; + u64 qpedx_hcr; + u64 reserved1[4]; + u64 qpedx_rrl0; + u64 qpedx_rrrkey0; + u64 qpedx_rrva0; + u64 reserved2; + u64 qpedx_rrl1; + u64 qpedx_rrrkey1; + u64 qpedx_rrva1; + u64 reserved3; + u64 qpedx_rrl2; + u64 qpedx_rrrkey2; + u64 qpedx_rrva2; + u64 reserved4; + u64 qpedx_rrl3; + u64 qpedx_rrrkey3; + u64 qpedx_rrva3; +}; + +#define CQX_FECADDER EHEA_BMASK_IBM(32, 63) +#define CQX_FEC_CQE_CNT EHEA_BMASK_IBM(32, 63) +#define CQX_N1_GENERATE_COMP_EVENT EHEA_BMASK_IBM(0, 0) +#define CQX_EP_EVENT_PENDING EHEA_BMASK_IBM(0, 0) + +#define CQTEMM_OFFSET(x) offsetof(struct ehea_cqtemm, x) + +struct ehea_cqtemm { + u64 cqx_hcr; + u64 cqx_c; + u64 cqx_herr; + u64 cqx_aer; + u64 cqx_ptp; + u64 cqx_tp; + u64 cqx_fec; + u64 cqx_feca; + u64 cqx_ep; + u64 cqx_eq; + u64 reserved1; + u64 cqx_n0; + u64 cqx_n1; + u64 reserved2[(0x1000 - 0x60) / 8]; +}; + +#define EQTEMM_OFFSET(x) offsetof(struct ehea_eqtemm, x) + +struct ehea_eqtemm { + u64 eqx_hcr; + u64 eqx_c; + u64 eqx_herr; + u64 eqx_aer; + u64 eqx_ptp; + u64 eqx_tp; + u64 eqx_ssba; + u64 eqx_psba; + u64 eqx_cec; + u64 eqx_meql; + u64 eqx_xisbi; + u64 eqx_xisc; + u64 eqx_it; +}; + +/* + * These access functions will be changed when the dissuccsion about + * the new access methods for POWER has settled. + */ + +static inline u64 epa_load(struct h_epa epa, u32 offset) +{ + return __raw_readq((void __iomem *)(epa.addr + offset)); +} + +static inline void epa_store(struct h_epa epa, u32 offset, u64 value) +{ + __raw_writeq(value, (void __iomem *)(epa.addr + offset)); + epa_load(epa, offset); /* synchronize explicitly to eHEA */ +} + +static inline void epa_store_acc(struct h_epa epa, u32 offset, u64 value) +{ + __raw_writeq(value, (void __iomem *)(epa.addr + offset)); +} + +#define epa_store_cq(epa, offset, value)\ + epa_store(epa, CQTEMM_OFFSET(offset), value) +#define epa_load_cq(epa, offset)\ + epa_load(epa, CQTEMM_OFFSET(offset)) + +static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes) +{ + struct h_epa epa = qp->epas.kernel; + epa_store_acc(epa, QPTEMM_OFFSET(qpx_sqa), + EHEA_BMASK_SET(QPX_SQA_VALUE, nr_wqes)); +} + +static inline void ehea_update_rq3a(struct ehea_qp *qp, u16 nr_wqes) +{ + struct h_epa epa = qp->epas.kernel; + epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq3a), + EHEA_BMASK_SET(QPX_RQ1A_VALUE, nr_wqes)); +} + +static inline void ehea_update_rq2a(struct ehea_qp *qp, u16 nr_wqes) +{ + struct h_epa epa = qp->epas.kernel; + epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq2a), + EHEA_BMASK_SET(QPX_RQ2A_VALUE, nr_wqes)); +} + +static inline void ehea_update_rq1a(struct ehea_qp *qp, u16 nr_wqes) +{ + struct h_epa epa = qp->epas.kernel; + epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq1a), + EHEA_BMASK_SET(QPX_RQ3A_VALUE, nr_wqes)); +} + +static inline void ehea_update_feca(struct ehea_cq *cq, u32 nr_cqes) +{ + struct h_epa epa = cq->epas.kernel; + epa_store_acc(epa, CQTEMM_OFFSET(cqx_feca), + EHEA_BMASK_SET(CQX_FECADDER, nr_cqes)); +} + +static inline void ehea_reset_cq_n1(struct ehea_cq *cq) +{ + struct h_epa epa = cq->epas.kernel; + epa_store_cq(epa, cqx_n1, + EHEA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, 1)); +} + +static inline void ehea_reset_cq_ep(struct ehea_cq *my_cq) +{ + struct h_epa epa = my_cq->epas.kernel; + epa_store_acc(epa, CQTEMM_OFFSET(cqx_ep), + EHEA_BMASK_SET(CQX_EP_EVENT_PENDING, 0)); +} + +#endif /* __EHEA_HW_H__ */ diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c new file mode 100644 index 000000000..2a0dc127d --- /dev/null +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -0,0 +1,3620 @@ +/* + * linux/drivers/net/ethernet/ibm/ehea/ehea_main.c + * + * eHEA ethernet device driver for IBM eServer System p + * + * (C) Copyright IBM Corp. 2006 + * + * Authors: + * Christoph Raisch + * Jan-Bernd Themann + * Thomas Klein + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "ehea.h" +#include "ehea_qmr.h" +#include "ehea_phyp.h" + + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Christoph Raisch "); +MODULE_DESCRIPTION("IBM eServer HEA Driver"); +MODULE_VERSION(DRV_VERSION); + + +static int msg_level = -1; +static int rq1_entries = EHEA_DEF_ENTRIES_RQ1; +static int rq2_entries = EHEA_DEF_ENTRIES_RQ2; +static int rq3_entries = EHEA_DEF_ENTRIES_RQ3; +static int sq_entries = EHEA_DEF_ENTRIES_SQ; +static int use_mcs = 1; +static int prop_carrier_state; + +module_param(msg_level, int, 0); +module_param(rq1_entries, int, 0); +module_param(rq2_entries, int, 0); +module_param(rq3_entries, int, 0); +module_param(sq_entries, int, 0); +module_param(prop_carrier_state, int, 0); +module_param(use_mcs, int, 0); + +MODULE_PARM_DESC(msg_level, "msg_level"); +MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical " + "port to stack. 1:yes, 0:no. Default = 0 "); +MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 " + "[2^x - 1], x = [7..14]. Default = " + __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")"); +MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 " + "[2^x - 1], x = [7..14]. Default = " + __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")"); +MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 " + "[2^x - 1], x = [7..14]. Default = " + __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")"); +MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue " + "[2^x - 1], x = [7..14]. Default = " + __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")"); +MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, " + "Default = 1"); + +static int port_name_cnt; +static LIST_HEAD(adapter_list); +static unsigned long ehea_driver_flags; +static DEFINE_MUTEX(dlpar_mem_lock); +static struct ehea_fw_handle_array ehea_fw_handles; +static struct ehea_bcmc_reg_array ehea_bcmc_regs; + + +static int ehea_probe_adapter(struct platform_device *dev); + +static int ehea_remove(struct platform_device *dev); + +static const struct of_device_id ehea_module_device_table[] = { + { + .name = "lhea", + .compatible = "IBM,lhea", + }, + { + .type = "network", + .compatible = "IBM,lhea-ethernet", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, ehea_module_device_table); + +static const struct of_device_id ehea_device_table[] = { + { + .name = "lhea", + .compatible = "IBM,lhea", + }, + {}, +}; + +static struct platform_driver ehea_driver = { + .driver = { + .name = "ehea", + .owner = THIS_MODULE, + .of_match_table = ehea_device_table, + }, + .probe = ehea_probe_adapter, + .remove = ehea_remove, +}; + +void ehea_dump(void *adr, int len, char *msg) +{ + int x; + unsigned char *deb = adr; + for (x = 0; x < len; x += 16) { + pr_info("%s adr=%p ofs=%04x %016llx %016llx\n", + msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8])); + deb += 16; + } +} + +static void ehea_schedule_port_reset(struct ehea_port *port) +{ + if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags)) + schedule_work(&port->reset_task); +} + +static void ehea_update_firmware_handles(void) +{ + struct ehea_fw_handle_entry *arr = NULL; + struct ehea_adapter *adapter; + int num_adapters = 0; + int num_ports = 0; + int num_portres = 0; + int i = 0; + int num_fw_handles, k, l; + + /* Determine number of handles */ + mutex_lock(&ehea_fw_handles.lock); + + list_for_each_entry(adapter, &adapter_list, list) { + num_adapters++; + + for (k = 0; k < EHEA_MAX_PORTS; k++) { + struct ehea_port *port = adapter->port[k]; + + if (!port || (port->state != EHEA_PORT_UP)) + continue; + + num_ports++; + num_portres += port->num_def_qps; + } + } + + num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES + + num_ports * EHEA_NUM_PORT_FW_HANDLES + + num_portres * EHEA_NUM_PORTRES_FW_HANDLES; + + if (num_fw_handles) { + arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL); + if (!arr) + goto out; /* Keep the existing array */ + } else + goto out_update; + + list_for_each_entry(adapter, &adapter_list, list) { + if (num_adapters == 0) + break; + + for (k = 0; k < EHEA_MAX_PORTS; k++) { + struct ehea_port *port = adapter->port[k]; + + if (!port || (port->state != EHEA_PORT_UP) || + (num_ports == 0)) + continue; + + for (l = 0; l < port->num_def_qps; l++) { + struct ehea_port_res *pr = &port->port_res[l]; + + arr[i].adh = adapter->handle; + arr[i++].fwh = pr->qp->fw_handle; + arr[i].adh = adapter->handle; + arr[i++].fwh = pr->send_cq->fw_handle; + arr[i].adh = adapter->handle; + arr[i++].fwh = pr->recv_cq->fw_handle; + arr[i].adh = adapter->handle; + arr[i++].fwh = pr->eq->fw_handle; + arr[i].adh = adapter->handle; + arr[i++].fwh = pr->send_mr.handle; + arr[i].adh = adapter->handle; + arr[i++].fwh = pr->recv_mr.handle; + } + arr[i].adh = adapter->handle; + arr[i++].fwh = port->qp_eq->fw_handle; + num_ports--; + } + + arr[i].adh = adapter->handle; + arr[i++].fwh = adapter->neq->fw_handle; + + if (adapter->mr.handle) { + arr[i].adh = adapter->handle; + arr[i++].fwh = adapter->mr.handle; + } + num_adapters--; + } + +out_update: + kfree(ehea_fw_handles.arr); + ehea_fw_handles.arr = arr; + ehea_fw_handles.num_entries = i; +out: + mutex_unlock(&ehea_fw_handles.lock); +} + +static void ehea_update_bcmc_registrations(void) +{ + unsigned long flags; + struct ehea_bcmc_reg_entry *arr = NULL; + struct ehea_adapter *adapter; + struct ehea_mc_list *mc_entry; + int num_registrations = 0; + int i = 0; + int k; + + spin_lock_irqsave(&ehea_bcmc_regs.lock, flags); + + /* Determine number of registrations */ + list_for_each_entry(adapter, &adapter_list, list) + for (k = 0; k < EHEA_MAX_PORTS; k++) { + struct ehea_port *port = adapter->port[k]; + + if (!port || (port->state != EHEA_PORT_UP)) + continue; + + num_registrations += 2; /* Broadcast registrations */ + + list_for_each_entry(mc_entry, &port->mc_list->list,list) + num_registrations += 2; + } + + if (num_registrations) { + arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC); + if (!arr) + goto out; /* Keep the existing array */ + } else + goto out_update; + + list_for_each_entry(adapter, &adapter_list, list) { + for (k = 0; k < EHEA_MAX_PORTS; k++) { + struct ehea_port *port = adapter->port[k]; + + if (!port || (port->state != EHEA_PORT_UP)) + continue; + + if (num_registrations == 0) + goto out_update; + + arr[i].adh = adapter->handle; + arr[i].port_id = port->logical_port_id; + arr[i].reg_type = EHEA_BCMC_BROADCAST | + EHEA_BCMC_UNTAGGED; + arr[i++].macaddr = port->mac_addr; + + arr[i].adh = adapter->handle; + arr[i].port_id = port->logical_port_id; + arr[i].reg_type = EHEA_BCMC_BROADCAST | + EHEA_BCMC_VLANID_ALL; + arr[i++].macaddr = port->mac_addr; + num_registrations -= 2; + + list_for_each_entry(mc_entry, + &port->mc_list->list, list) { + if (num_registrations == 0) + goto out_update; + + arr[i].adh = adapter->handle; + arr[i].port_id = port->logical_port_id; + arr[i].reg_type = EHEA_BCMC_MULTICAST | + EHEA_BCMC_UNTAGGED; + if (mc_entry->macaddr == 0) + arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL; + arr[i++].macaddr = mc_entry->macaddr; + + arr[i].adh = adapter->handle; + arr[i].port_id = port->logical_port_id; + arr[i].reg_type = EHEA_BCMC_MULTICAST | + EHEA_BCMC_VLANID_ALL; + if (mc_entry->macaddr == 0) + arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL; + arr[i++].macaddr = mc_entry->macaddr; + num_registrations -= 2; + } + } + } + +out_update: + kfree(ehea_bcmc_regs.arr); + ehea_bcmc_regs.arr = arr; + ehea_bcmc_regs.num_entries = i; +out: + spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags); +} + +static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct ehea_port *port = netdev_priv(dev); + u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0; + int i; + + for (i = 0; i < port->num_def_qps; i++) { + rx_packets += port->port_res[i].rx_packets; + rx_bytes += port->port_res[i].rx_bytes; + } + + for (i = 0; i < port->num_def_qps; i++) { + tx_packets += port->port_res[i].tx_packets; + tx_bytes += port->port_res[i].tx_bytes; + } + + stats->tx_packets = tx_packets; + stats->rx_bytes = rx_bytes; + stats->tx_bytes = tx_bytes; + stats->rx_packets = rx_packets; + + stats->multicast = port->stats.multicast; + stats->rx_errors = port->stats.rx_errors; + return stats; +} + +static void ehea_update_stats(struct work_struct *work) +{ + struct ehea_port *port = + container_of(work, struct ehea_port, stats_work.work); + struct net_device *dev = port->netdev; + struct rtnl_link_stats64 *stats = &port->stats; + struct hcp_ehea_port_cb2 *cb2; + u64 hret; + + cb2 = (void *)get_zeroed_page(GFP_KERNEL); + if (!cb2) { + netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n"); + goto resched; + } + + hret = ehea_h_query_ehea_port(port->adapter->handle, + port->logical_port_id, + H_PORT_CB2, H_PORT_CB2_ALL, cb2); + if (hret != H_SUCCESS) { + netdev_err(dev, "query_ehea_port failed\n"); + goto out_herr; + } + + if (netif_msg_hw(port)) + ehea_dump(cb2, sizeof(*cb2), "net_device_stats"); + + stats->multicast = cb2->rxmcp; + stats->rx_errors = cb2->rxuerr; + +out_herr: + free_page((unsigned long)cb2); +resched: + schedule_delayed_work(&port->stats_work, + round_jiffies_relative(msecs_to_jiffies(1000))); +} + +static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) +{ + struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; + struct net_device *dev = pr->port->netdev; + int max_index_mask = pr->rq1_skba.len - 1; + int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes; + int adder = 0; + int i; + + pr->rq1_skba.os_skbs = 0; + + if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { + if (nr_of_wqes > 0) + pr->rq1_skba.index = index; + pr->rq1_skba.os_skbs = fill_wqes; + return; + } + + for (i = 0; i < fill_wqes; i++) { + if (!skb_arr_rq1[index]) { + skb_arr_rq1[index] = netdev_alloc_skb(dev, + EHEA_L_PKT_SIZE); + if (!skb_arr_rq1[index]) { + pr->rq1_skba.os_skbs = fill_wqes - i; + break; + } + } + index--; + index &= max_index_mask; + adder++; + } + + if (adder == 0) + return; + + /* Ring doorbell */ + ehea_update_rq1a(pr->qp, adder); +} + +static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) +{ + struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; + struct net_device *dev = pr->port->netdev; + int i; + + if (nr_rq1a > pr->rq1_skba.len) { + netdev_err(dev, "NR_RQ1A bigger than skb array len\n"); + return; + } + + for (i = 0; i < nr_rq1a; i++) { + skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); + if (!skb_arr_rq1[i]) + break; + } + /* Ring doorbell */ + ehea_update_rq1a(pr->qp, i - 1); +} + +static int ehea_refill_rq_def(struct ehea_port_res *pr, + struct ehea_q_skb_arr *q_skba, int rq_nr, + int num_wqes, int wqe_type, int packet_size) +{ + struct net_device *dev = pr->port->netdev; + struct ehea_qp *qp = pr->qp; + struct sk_buff **skb_arr = q_skba->arr; + struct ehea_rwqe *rwqe; + int i, index, max_index_mask, fill_wqes; + int adder = 0; + int ret = 0; + + fill_wqes = q_skba->os_skbs + num_wqes; + q_skba->os_skbs = 0; + + if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { + q_skba->os_skbs = fill_wqes; + return ret; + } + + index = q_skba->index; + max_index_mask = q_skba->len - 1; + for (i = 0; i < fill_wqes; i++) { + u64 tmp_addr; + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(dev, packet_size); + if (!skb) { + q_skba->os_skbs = fill_wqes - i; + if (q_skba->os_skbs == q_skba->len - 2) { + netdev_info(pr->port->netdev, + "rq%i ran dry - no mem for skb\n", + rq_nr); + ret = -ENOMEM; + } + break; + } + + skb_arr[index] = skb; + tmp_addr = ehea_map_vaddr(skb->data); + if (tmp_addr == -1) { + dev_consume_skb_any(skb); + q_skba->os_skbs = fill_wqes - i; + ret = 0; + break; + } + + rwqe = ehea_get_next_rwqe(qp, rq_nr); + rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type) + | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index); + rwqe->sg_list[0].l_key = pr->recv_mr.lkey; + rwqe->sg_list[0].vaddr = tmp_addr; + rwqe->sg_list[0].len = packet_size; + rwqe->data_segments = 1; + + index++; + index &= max_index_mask; + adder++; + } + + q_skba->index = index; + if (adder == 0) + goto out; + + /* Ring doorbell */ + iosync(); + if (rq_nr == 2) + ehea_update_rq2a(pr->qp, adder); + else + ehea_update_rq3a(pr->qp, adder); +out: + return ret; +} + + +static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes) +{ + return ehea_refill_rq_def(pr, &pr->rq2_skba, 2, + nr_of_wqes, EHEA_RWQE2_TYPE, + EHEA_RQ2_PKT_SIZE); +} + + +static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes) +{ + return ehea_refill_rq_def(pr, &pr->rq3_skba, 3, + nr_of_wqes, EHEA_RWQE3_TYPE, + EHEA_MAX_PACKET_SIZE); +} + +static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num) +{ + *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5; + if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0) + return 0; + if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) && + (cqe->header_length == 0)) + return 0; + return -EINVAL; +} + +static inline void ehea_fill_skb(struct net_device *dev, + struct sk_buff *skb, struct ehea_cqe *cqe, + struct ehea_port_res *pr) +{ + int length = cqe->num_bytes_transfered - 4; /*remove CRC */ + + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, dev); + + /* The packet was not an IPV4 packet so a complemented checksum was + calculated. The value is found in the Internet Checksum field. */ + if (cqe->status & EHEA_CQE_BLIND_CKSUM) { + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = csum_unfold(~cqe->inet_checksum_value); + } else + skb->ip_summed = CHECKSUM_UNNECESSARY; + + skb_record_rx_queue(skb, pr - &pr->port->port_res[0]); +} + +static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array, + int arr_len, + struct ehea_cqe *cqe) +{ + int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); + struct sk_buff *skb; + void *pref; + int x; + + x = skb_index + 1; + x &= (arr_len - 1); + + pref = skb_array[x]; + if (pref) { + prefetchw(pref); + prefetchw(pref + EHEA_CACHE_LINE); + + pref = (skb_array[x]->data); + prefetch(pref); + prefetch(pref + EHEA_CACHE_LINE); + prefetch(pref + EHEA_CACHE_LINE * 2); + prefetch(pref + EHEA_CACHE_LINE * 3); + } + + skb = skb_array[skb_index]; + skb_array[skb_index] = NULL; + return skb; +} + +static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array, + int arr_len, int wqe_index) +{ + struct sk_buff *skb; + void *pref; + int x; + + x = wqe_index + 1; + x &= (arr_len - 1); + + pref = skb_array[x]; + if (pref) { + prefetchw(pref); + prefetchw(pref + EHEA_CACHE_LINE); + + pref = (skb_array[x]->data); + prefetchw(pref); + prefetchw(pref + EHEA_CACHE_LINE); + } + + skb = skb_array[wqe_index]; + skb_array[wqe_index] = NULL; + return skb; +} + +static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, + struct ehea_cqe *cqe, int *processed_rq2, + int *processed_rq3) +{ + struct sk_buff *skb; + + if (cqe->status & EHEA_CQE_STAT_ERR_TCP) + pr->p_stats.err_tcp_cksum++; + if (cqe->status & EHEA_CQE_STAT_ERR_IP) + pr->p_stats.err_ip_cksum++; + if (cqe->status & EHEA_CQE_STAT_ERR_CRC) + pr->p_stats.err_frame_crc++; + + if (rq == 2) { + *processed_rq2 += 1; + skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe); + dev_kfree_skb(skb); + } else if (rq == 3) { + *processed_rq3 += 1; + skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe); + dev_kfree_skb(skb); + } + + if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { + if (netif_msg_rx_err(pr->port)) { + pr_err("Critical receive error for QP %d. Resetting port.\n", + pr->qp->init_attr.qp_nr); + ehea_dump(cqe, sizeof(*cqe), "CQE"); + } + ehea_schedule_port_reset(pr->port); + return 1; + } + + return 0; +} + +static int ehea_proc_rwqes(struct net_device *dev, + struct ehea_port_res *pr, + int budget) +{ + struct ehea_port *port = pr->port; + struct ehea_qp *qp = pr->qp; + struct ehea_cqe *cqe; + struct sk_buff *skb; + struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; + struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr; + struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr; + int skb_arr_rq1_len = pr->rq1_skba.len; + int skb_arr_rq2_len = pr->rq2_skba.len; + int skb_arr_rq3_len = pr->rq3_skba.len; + int processed, processed_rq1, processed_rq2, processed_rq3; + u64 processed_bytes = 0; + int wqe_index, last_wqe_index, rq, port_reset; + + processed = processed_rq1 = processed_rq2 = processed_rq3 = 0; + last_wqe_index = 0; + + cqe = ehea_poll_rq1(qp, &wqe_index); + while ((processed < budget) && cqe) { + ehea_inc_rq1(qp); + processed_rq1++; + processed++; + if (netif_msg_rx_status(port)) + ehea_dump(cqe, sizeof(*cqe), "CQE"); + + last_wqe_index = wqe_index; + rmb(); + if (!ehea_check_cqe(cqe, &rq)) { + if (rq == 1) { + /* LL RQ1 */ + skb = get_skb_by_index_ll(skb_arr_rq1, + skb_arr_rq1_len, + wqe_index); + if (unlikely(!skb)) { + netif_info(port, rx_err, dev, + "LL rq1: skb=NULL\n"); + + skb = netdev_alloc_skb(dev, + EHEA_L_PKT_SIZE); + if (!skb) + break; + } + skb_copy_to_linear_data(skb, ((char *)cqe) + 64, + cqe->num_bytes_transfered - 4); + ehea_fill_skb(dev, skb, cqe, pr); + } else if (rq == 2) { + /* RQ2 */ + skb = get_skb_by_index(skb_arr_rq2, + skb_arr_rq2_len, cqe); + if (unlikely(!skb)) { + netif_err(port, rx_err, dev, + "rq2: skb=NULL\n"); + break; + } + ehea_fill_skb(dev, skb, cqe, pr); + processed_rq2++; + } else { + /* RQ3 */ + skb = get_skb_by_index(skb_arr_rq3, + skb_arr_rq3_len, cqe); + if (unlikely(!skb)) { + netif_err(port, rx_err, dev, + "rq3: skb=NULL\n"); + break; + } + ehea_fill_skb(dev, skb, cqe, pr); + processed_rq3++; + } + + processed_bytes += skb->len; + + if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + cqe->vlan_tag); + + napi_gro_receive(&pr->napi, skb); + } else { + pr->p_stats.poll_receive_errors++; + port_reset = ehea_treat_poll_error(pr, rq, cqe, + &processed_rq2, + &processed_rq3); + if (port_reset) + break; + } + cqe = ehea_poll_rq1(qp, &wqe_index); + } + + pr->rx_packets += processed; + pr->rx_bytes += processed_bytes; + + ehea_refill_rq1(pr, last_wqe_index, processed_rq1); + ehea_refill_rq2(pr, processed_rq2); + ehea_refill_rq3(pr, processed_rq3); + + return processed; +} + +#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull + +static void reset_sq_restart_flag(struct ehea_port *port) +{ + int i; + + for (i = 0; i < port->num_def_qps; i++) { + struct ehea_port_res *pr = &port->port_res[i]; + pr->sq_restart_flag = 0; + } + wake_up(&port->restart_wq); +} + +static void check_sqs(struct ehea_port *port) +{ + struct ehea_swqe *swqe; + int swqe_index; + int i, k; + + for (i = 0; i < port->num_def_qps; i++) { + struct ehea_port_res *pr = &port->port_res[i]; + int ret; + k = 0; + swqe = ehea_get_swqe(pr->qp, &swqe_index); + memset(swqe, 0, SWQE_HEADER_SIZE); + atomic_dec(&pr->swqe_avail); + + swqe->tx_control |= EHEA_SWQE_PURGE; + swqe->wr_id = SWQE_RESTART_CHECK; + swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; + swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT; + swqe->immediate_data_length = 80; + + ehea_post_swqe(pr->qp, swqe); + + ret = wait_event_timeout(port->restart_wq, + pr->sq_restart_flag == 0, + msecs_to_jiffies(100)); + + if (!ret) { + pr_err("HW/SW queues out of sync\n"); + ehea_schedule_port_reset(pr->port); + return; + } + } +} + + +static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) +{ + struct sk_buff *skb; + struct ehea_cq *send_cq = pr->send_cq; + struct ehea_cqe *cqe; + int quota = my_quota; + int cqe_counter = 0; + int swqe_av = 0; + int index; + struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev, + pr - &pr->port->port_res[0]); + + cqe = ehea_poll_cq(send_cq); + while (cqe && (quota > 0)) { + ehea_inc_cq(send_cq); + + cqe_counter++; + rmb(); + + if (cqe->wr_id == SWQE_RESTART_CHECK) { + pr->sq_restart_flag = 1; + swqe_av++; + break; + } + + if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { + pr_err("Bad send completion status=0x%04X\n", + cqe->status); + + if (netif_msg_tx_err(pr->port)) + ehea_dump(cqe, sizeof(*cqe), "Send CQE"); + + if (cqe->status & EHEA_CQE_STAT_RESET_MASK) { + pr_err("Resetting port\n"); + ehea_schedule_port_reset(pr->port); + break; + } + } + + if (netif_msg_tx_done(pr->port)) + ehea_dump(cqe, sizeof(*cqe), "CQE"); + + if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id) + == EHEA_SWQE2_TYPE)) { + + index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); + skb = pr->sq_skba.arr[index]; + dev_consume_skb_any(skb); + pr->sq_skba.arr[index] = NULL; + } + + swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); + quota--; + + cqe = ehea_poll_cq(send_cq); + } + + ehea_update_feca(send_cq, cqe_counter); + atomic_add(swqe_av, &pr->swqe_avail); + + if (unlikely(netif_tx_queue_stopped(txq) && + (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) { + __netif_tx_lock(txq, smp_processor_id()); + if (netif_tx_queue_stopped(txq) && + (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th)) + netif_tx_wake_queue(txq); + __netif_tx_unlock(txq); + } + + wake_up(&pr->port->swqe_avail_wq); + + return cqe; +} + +#define EHEA_POLL_MAX_CQES 65535 + +static int ehea_poll(struct napi_struct *napi, int budget) +{ + struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, + napi); + struct net_device *dev = pr->port->netdev; + struct ehea_cqe *cqe; + struct ehea_cqe *cqe_skb = NULL; + int wqe_index; + int rx = 0; + + cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); + rx += ehea_proc_rwqes(dev, pr, budget - rx); + + while (rx != budget) { + napi_complete(napi); + ehea_reset_cq_ep(pr->recv_cq); + ehea_reset_cq_ep(pr->send_cq); + ehea_reset_cq_n1(pr->recv_cq); + ehea_reset_cq_n1(pr->send_cq); + rmb(); + cqe = ehea_poll_rq1(pr->qp, &wqe_index); + cqe_skb = ehea_poll_cq(pr->send_cq); + + if (!cqe && !cqe_skb) + return rx; + + if (!napi_reschedule(napi)) + return rx; + + cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); + rx += ehea_proc_rwqes(dev, pr, budget - rx); + } + + return rx; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void ehea_netpoll(struct net_device *dev) +{ + struct ehea_port *port = netdev_priv(dev); + int i; + + for (i = 0; i < port->num_def_qps; i++) + napi_schedule(&port->port_res[i].napi); +} +#endif + +static irqreturn_t ehea_recv_irq_handler(int irq, void *param) +{ + struct ehea_port_res *pr = param; + + napi_schedule(&pr->napi); + + return IRQ_HANDLED; +} + +static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param) +{ + struct ehea_port *port = param; + struct ehea_eqe *eqe; + struct ehea_qp *qp; + u32 qp_token; + u64 resource_type, aer, aerr; + int reset_port = 0; + + eqe = ehea_poll_eq(port->qp_eq); + + while (eqe) { + qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry); + pr_err("QP aff_err: entry=0x%llx, token=0x%x\n", + eqe->entry, qp_token); + + qp = port->port_res[qp_token].qp; + + resource_type = ehea_error_data(port->adapter, qp->fw_handle, + &aer, &aerr); + + if (resource_type == EHEA_AER_RESTYPE_QP) { + if ((aer & EHEA_AER_RESET_MASK) || + (aerr & EHEA_AERR_RESET_MASK)) + reset_port = 1; + } else + reset_port = 1; /* Reset in case of CQ or EQ error */ + + eqe = ehea_poll_eq(port->qp_eq); + } + + if (reset_port) { + pr_err("Resetting port\n"); + ehea_schedule_port_reset(port); + } + + return IRQ_HANDLED; +} + +static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter, + int logical_port) +{ + int i; + + for (i = 0; i < EHEA_MAX_PORTS; i++) + if (adapter->port[i]) + if (adapter->port[i]->logical_port_id == logical_port) + return adapter->port[i]; + return NULL; +} + +int ehea_sense_port_attr(struct ehea_port *port) +{ + int ret; + u64 hret; + struct hcp_ehea_port_cb0 *cb0; + + /* may be called via ehea_neq_tasklet() */ + cb0 = (void *)get_zeroed_page(GFP_ATOMIC); + if (!cb0) { + pr_err("no mem for cb0\n"); + ret = -ENOMEM; + goto out; + } + + hret = ehea_h_query_ehea_port(port->adapter->handle, + port->logical_port_id, H_PORT_CB0, + EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF), + cb0); + if (hret != H_SUCCESS) { + ret = -EIO; + goto out_free; + } + + /* MAC address */ + port->mac_addr = cb0->port_mac_addr << 16; + + if (!is_valid_ether_addr((u8 *)&port->mac_addr)) { + ret = -EADDRNOTAVAIL; + goto out_free; + } + + /* Port speed */ + switch (cb0->port_speed) { + case H_SPEED_10M_H: + port->port_speed = EHEA_SPEED_10M; + port->full_duplex = 0; + break; + case H_SPEED_10M_F: + port->port_speed = EHEA_SPEED_10M; + port->full_duplex = 1; + break; + case H_SPEED_100M_H: + port->port_speed = EHEA_SPEED_100M; + port->full_duplex = 0; + break; + case H_SPEED_100M_F: + port->port_speed = EHEA_SPEED_100M; + port->full_duplex = 1; + break; + case H_SPEED_1G_F: + port->port_speed = EHEA_SPEED_1G; + port->full_duplex = 1; + break; + case H_SPEED_10G_F: + port->port_speed = EHEA_SPEED_10G; + port->full_duplex = 1; + break; + default: + port->port_speed = 0; + port->full_duplex = 0; + break; + } + + port->autoneg = 1; + port->num_mcs = cb0->num_default_qps; + + /* Number of default QPs */ + if (use_mcs) + port->num_def_qps = cb0->num_default_qps; + else + port->num_def_qps = 1; + + if (!port->num_def_qps) { + ret = -EINVAL; + goto out_free; + } + + ret = 0; +out_free: + if (ret || netif_msg_probe(port)) + ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr"); + free_page((unsigned long)cb0); +out: + return ret; +} + +int ehea_set_portspeed(struct ehea_port *port, u32 port_speed) +{ + struct hcp_ehea_port_cb4 *cb4; + u64 hret; + int ret = 0; + + cb4 = (void *)get_zeroed_page(GFP_KERNEL); + if (!cb4) { + pr_err("no mem for cb4\n"); + ret = -ENOMEM; + goto out; + } + + cb4->port_speed = port_speed; + + netif_carrier_off(port->netdev); + + hret = ehea_h_modify_ehea_port(port->adapter->handle, + port->logical_port_id, + H_PORT_CB4, H_PORT_CB4_SPEED, cb4); + if (hret == H_SUCCESS) { + port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0; + + hret = ehea_h_query_ehea_port(port->adapter->handle, + port->logical_port_id, + H_PORT_CB4, H_PORT_CB4_SPEED, + cb4); + if (hret == H_SUCCESS) { + switch (cb4->port_speed) { + case H_SPEED_10M_H: + port->port_speed = EHEA_SPEED_10M; + port->full_duplex = 0; + break; + case H_SPEED_10M_F: + port->port_speed = EHEA_SPEED_10M; + port->full_duplex = 1; + break; + case H_SPEED_100M_H: + port->port_speed = EHEA_SPEED_100M; + port->full_duplex = 0; + break; + case H_SPEED_100M_F: + port->port_speed = EHEA_SPEED_100M; + port->full_duplex = 1; + break; + case H_SPEED_1G_F: + port->port_speed = EHEA_SPEED_1G; + port->full_duplex = 1; + break; + case H_SPEED_10G_F: + port->port_speed = EHEA_SPEED_10G; + port->full_duplex = 1; + break; + default: + port->port_speed = 0; + port->full_duplex = 0; + break; + } + } else { + pr_err("Failed sensing port speed\n"); + ret = -EIO; + } + } else { + if (hret == H_AUTHORITY) { + pr_info("Hypervisor denied setting port speed\n"); + ret = -EPERM; + } else { + ret = -EIO; + pr_err("Failed setting port speed\n"); + } + } + if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP)) + netif_carrier_on(port->netdev); + + free_page((unsigned long)cb4); +out: + return ret; +} + +static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe) +{ + int ret; + u8 ec; + u8 portnum; + struct ehea_port *port; + struct net_device *dev; + + ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe); + portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe); + port = ehea_get_port(adapter, portnum); + dev = port->netdev; + + switch (ec) { + case EHEA_EC_PORTSTATE_CHG: /* port state change */ + + if (!port) { + netdev_err(dev, "unknown portnum %x\n", portnum); + break; + } + + if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { + if (!netif_carrier_ok(dev)) { + ret = ehea_sense_port_attr(port); + if (ret) { + netdev_err(dev, "failed resensing port attributes\n"); + break; + } + + netif_info(port, link, dev, + "Logical port up: %dMbps %s Duplex\n", + port->port_speed, + port->full_duplex == 1 ? + "Full" : "Half"); + + netif_carrier_on(dev); + netif_wake_queue(dev); + } + } else + if (netif_carrier_ok(dev)) { + netif_info(port, link, dev, + "Logical port down\n"); + netif_carrier_off(dev); + netif_tx_disable(dev); + } + + if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) { + port->phy_link = EHEA_PHY_LINK_UP; + netif_info(port, link, dev, + "Physical port up\n"); + if (prop_carrier_state) + netif_carrier_on(dev); + } else { + port->phy_link = EHEA_PHY_LINK_DOWN; + netif_info(port, link, dev, + "Physical port down\n"); + if (prop_carrier_state) + netif_carrier_off(dev); + } + + if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe)) + netdev_info(dev, + "External switch port is primary port\n"); + else + netdev_info(dev, + "External switch port is backup port\n"); + + break; + case EHEA_EC_ADAPTER_MALFUNC: + netdev_err(dev, "Adapter malfunction\n"); + break; + case EHEA_EC_PORT_MALFUNC: + netdev_info(dev, "Port malfunction\n"); + netif_carrier_off(dev); + netif_tx_disable(dev); + break; + default: + netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe); + break; + } +} + +static void ehea_neq_tasklet(unsigned long data) +{ + struct ehea_adapter *adapter = (struct ehea_adapter *)data; + struct ehea_eqe *eqe; + u64 event_mask; + + eqe = ehea_poll_eq(adapter->neq); + pr_debug("eqe=%p\n", eqe); + + while (eqe) { + pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry); + ehea_parse_eqe(adapter, eqe->entry); + eqe = ehea_poll_eq(adapter->neq); + pr_debug("next eqe=%p\n", eqe); + } + + event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1) + | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1) + | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1); + + ehea_h_reset_events(adapter->handle, + adapter->neq->fw_handle, event_mask); +} + +static irqreturn_t ehea_interrupt_neq(int irq, void *param) +{ + struct ehea_adapter *adapter = param; + tasklet_hi_schedule(&adapter->neq_tasklet); + return IRQ_HANDLED; +} + + +static int ehea_fill_port_res(struct ehea_port_res *pr) +{ + int ret; + struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; + + ehea_init_fill_rq1(pr, pr->rq1_skba.len); + + ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); + + ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1); + + return ret; +} + +static int ehea_reg_interrupts(struct net_device *dev) +{ + struct ehea_port *port = netdev_priv(dev); + struct ehea_port_res *pr; + int i, ret; + + + snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff", + dev->name); + + ret = ibmebus_request_irq(port->qp_eq->attr.ist1, + ehea_qp_aff_irq_handler, + 0, port->int_aff_name, port); + if (ret) { + netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n", + port->qp_eq->attr.ist1); + goto out_free_qpeq; + } + + netif_info(port, ifup, dev, + "irq_handle 0x%X for function qp_aff_irq_handler registered\n", + port->qp_eq->attr.ist1); + + + for (i = 0; i < port->num_def_qps; i++) { + pr = &port->port_res[i]; + snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1, + "%s-queue%d", dev->name, i); + ret = ibmebus_request_irq(pr->eq->attr.ist1, + ehea_recv_irq_handler, + 0, pr->int_send_name, pr); + if (ret) { + netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n", + i, pr->eq->attr.ist1); + goto out_free_req; + } + netif_info(port, ifup, dev, + "irq_handle 0x%X for function ehea_queue_int %d registered\n", + pr->eq->attr.ist1, i); + } +out: + return ret; + + +out_free_req: + while (--i >= 0) { + u32 ist = port->port_res[i].eq->attr.ist1; + ibmebus_free_irq(ist, &port->port_res[i]); + } + +out_free_qpeq: + ibmebus_free_irq(port->qp_eq->attr.ist1, port); + i = port->num_def_qps; + + goto out; + +} + +static void ehea_free_interrupts(struct net_device *dev) +{ + struct ehea_port *port = netdev_priv(dev); + struct ehea_port_res *pr; + int i; + + /* send */ + + for (i = 0; i < port->num_def_qps; i++) { + pr = &port->port_res[i]; + ibmebus_free_irq(pr->eq->attr.ist1, pr); + netif_info(port, intr, dev, + "free send irq for res %d with handle 0x%X\n", + i, pr->eq->attr.ist1); + } + + /* associated events */ + ibmebus_free_irq(port->qp_eq->attr.ist1, port); + netif_info(port, intr, dev, + "associated event interrupt for handle 0x%X freed\n", + port->qp_eq->attr.ist1); +} + +static int ehea_configure_port(struct ehea_port *port) +{ + int ret, i; + u64 hret, mask; + struct hcp_ehea_port_cb0 *cb0; + + ret = -ENOMEM; + cb0 = (void *)get_zeroed_page(GFP_KERNEL); + if (!cb0) + goto out; + + cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1) + | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1) + | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1) + | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1) + | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER, + PXLY_RC_VLAN_FILTER) + | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1); + + for (i = 0; i < port->num_mcs; i++) + if (use_mcs) + cb0->default_qpn_arr[i] = + port->port_res[i].qp->init_attr.qp_nr; + else + cb0->default_qpn_arr[i] = + port->port_res[0].qp->init_attr.qp_nr; + + if (netif_msg_ifup(port)) + ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port"); + + mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1) + | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1); + + hret = ehea_h_modify_ehea_port(port->adapter->handle, + port->logical_port_id, + H_PORT_CB0, mask, cb0); + ret = -EIO; + if (hret != H_SUCCESS) + goto out_free; + + ret = 0; + +out_free: + free_page((unsigned long)cb0); +out: + return ret; +} + +static int ehea_gen_smrs(struct ehea_port_res *pr) +{ + int ret; + struct ehea_adapter *adapter = pr->port->adapter; + + ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr); + if (ret) + goto out; + + ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr); + if (ret) + goto out_free; + + return 0; + +out_free: + ehea_rem_mr(&pr->send_mr); +out: + pr_err("Generating SMRS failed\n"); + return -EIO; +} + +static int ehea_rem_smrs(struct ehea_port_res *pr) +{ + if ((ehea_rem_mr(&pr->send_mr)) || + (ehea_rem_mr(&pr->recv_mr))) + return -EIO; + else + return 0; +} + +static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries) +{ + int arr_size = sizeof(void *) * max_q_entries; + + q_skba->arr = vzalloc(arr_size); + if (!q_skba->arr) + return -ENOMEM; + + q_skba->len = max_q_entries; + q_skba->index = 0; + q_skba->os_skbs = 0; + + return 0; +} + +static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, + struct port_res_cfg *pr_cfg, int queue_token) +{ + struct ehea_adapter *adapter = port->adapter; + enum ehea_eq_type eq_type = EHEA_EQ; + struct ehea_qp_init_attr *init_attr = NULL; + int ret = -EIO; + u64 tx_bytes, rx_bytes, tx_packets, rx_packets; + + tx_bytes = pr->tx_bytes; + tx_packets = pr->tx_packets; + rx_bytes = pr->rx_bytes; + rx_packets = pr->rx_packets; + + memset(pr, 0, sizeof(struct ehea_port_res)); + + pr->tx_bytes = rx_bytes; + pr->tx_packets = tx_packets; + pr->rx_bytes = rx_bytes; + pr->rx_packets = rx_packets; + + pr->port = port; + + pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); + if (!pr->eq) { + pr_err("create_eq failed (eq)\n"); + goto out_free; + } + + pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq, + pr->eq->fw_handle, + port->logical_port_id); + if (!pr->recv_cq) { + pr_err("create_cq failed (cq_recv)\n"); + goto out_free; + } + + pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, + pr->eq->fw_handle, + port->logical_port_id); + if (!pr->send_cq) { + pr_err("create_cq failed (cq_send)\n"); + goto out_free; + } + + if (netif_msg_ifup(port)) + pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n", + pr->send_cq->attr.act_nr_of_cqes, + pr->recv_cq->attr.act_nr_of_cqes); + + init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); + if (!init_attr) { + ret = -ENOMEM; + pr_err("no mem for ehea_qp_init_attr\n"); + goto out_free; + } + + init_attr->low_lat_rq1 = 1; + init_attr->signalingtype = 1; /* generate CQE if specified in WQE */ + init_attr->rq_count = 3; + init_attr->qp_token = queue_token; + init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq; + init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1; + init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2; + init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3; + init_attr->wqe_size_enc_sq = EHEA_SG_SQ; + init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1; + init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2; + init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3; + init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD; + init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD; + init_attr->port_nr = port->logical_port_id; + init_attr->send_cq_handle = pr->send_cq->fw_handle; + init_attr->recv_cq_handle = pr->recv_cq->fw_handle; + init_attr->aff_eq_handle = port->qp_eq->fw_handle; + + pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); + if (!pr->qp) { + pr_err("create_qp failed\n"); + ret = -EIO; + goto out_free; + } + + if (netif_msg_ifup(port)) + pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n", + init_attr->qp_nr, + init_attr->act_nr_send_wqes, + init_attr->act_nr_rwqes_rq1, + init_attr->act_nr_rwqes_rq2, + init_attr->act_nr_rwqes_rq3); + + pr->sq_skba_size = init_attr->act_nr_send_wqes + 1; + + ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size); + ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1); + ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1); + ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1); + if (ret) + goto out_free; + + pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10; + if (ehea_gen_smrs(pr) != 0) { + ret = -EIO; + goto out_free; + } + + atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1); + + kfree(init_attr); + + netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64); + + ret = 0; + goto out; + +out_free: + kfree(init_attr); + vfree(pr->sq_skba.arr); + vfree(pr->rq1_skba.arr); + vfree(pr->rq2_skba.arr); + vfree(pr->rq3_skba.arr); + ehea_destroy_qp(pr->qp); + ehea_destroy_cq(pr->send_cq); + ehea_destroy_cq(pr->recv_cq); + ehea_destroy_eq(pr->eq); +out: + return ret; +} + +static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) +{ + int ret, i; + + if (pr->qp) + netif_napi_del(&pr->napi); + + ret = ehea_destroy_qp(pr->qp); + + if (!ret) { + ehea_destroy_cq(pr->send_cq); + ehea_destroy_cq(pr->recv_cq); + ehea_destroy_eq(pr->eq); + + for (i = 0; i < pr->rq1_skba.len; i++) + if (pr->rq1_skba.arr[i]) + dev_kfree_skb(pr->rq1_skba.arr[i]); + + for (i = 0; i < pr->rq2_skba.len; i++) + if (pr->rq2_skba.arr[i]) + dev_kfree_skb(pr->rq2_skba.arr[i]); + + for (i = 0; i < pr->rq3_skba.len; i++) + if (pr->rq3_skba.arr[i]) + dev_kfree_skb(pr->rq3_skba.arr[i]); + + for (i = 0; i < pr->sq_skba.len; i++) + if (pr->sq_skba.arr[i]) + dev_kfree_skb(pr->sq_skba.arr[i]); + + vfree(pr->rq1_skba.arr); + vfree(pr->rq2_skba.arr); + vfree(pr->rq3_skba.arr); + vfree(pr->sq_skba.arr); + ret = ehea_rem_smrs(pr); + } + return ret; +} + +static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe, + u32 lkey) +{ + int skb_data_size = skb_headlen(skb); + u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; + struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; + unsigned int immediate_len = SWQE2_MAX_IMM; + + swqe->descriptors = 0; + + if (skb_is_gso(skb)) { + swqe->tx_control |= EHEA_SWQE_TSO; + swqe->mss = skb_shinfo(skb)->gso_size; + /* + * For TSO packets we only copy the headers into the + * immediate area. + */ + immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); + } + + if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) { + skb_copy_from_linear_data(skb, imm_data, immediate_len); + swqe->immediate_data_length = immediate_len; + + if (skb_data_size > immediate_len) { + sg1entry->l_key = lkey; + sg1entry->len = skb_data_size - immediate_len; + sg1entry->vaddr = + ehea_map_vaddr(skb->data + immediate_len); + swqe->descriptors++; + } + } else { + skb_copy_from_linear_data(skb, imm_data, skb_data_size); + swqe->immediate_data_length = skb_data_size; + } +} + +static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev, + struct ehea_swqe *swqe, u32 lkey) +{ + struct ehea_vsgentry *sg_list, *sg1entry, *sgentry; + skb_frag_t *frag; + int nfrags, sg1entry_contains_frag_data, i; + + nfrags = skb_shinfo(skb)->nr_frags; + sg1entry = &swqe->u.immdata_desc.sg_entry; + sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list; + sg1entry_contains_frag_data = 0; + + write_swqe2_immediate(skb, swqe, lkey); + + /* write descriptors */ + if (nfrags > 0) { + if (swqe->descriptors == 0) { + /* sg1entry not yet used */ + frag = &skb_shinfo(skb)->frags[0]; + + /* copy sg1entry data */ + sg1entry->l_key = lkey; + sg1entry->len = skb_frag_size(frag); + sg1entry->vaddr = + ehea_map_vaddr(skb_frag_address(frag)); + swqe->descriptors++; + sg1entry_contains_frag_data = 1; + } + + for (i = sg1entry_contains_frag_data; i < nfrags; i++) { + + frag = &skb_shinfo(skb)->frags[i]; + sgentry = &sg_list[i - sg1entry_contains_frag_data]; + + sgentry->l_key = lkey; + sgentry->len = skb_frag_size(frag); + sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag)); + swqe->descriptors++; + } + } +} + +static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid) +{ + int ret = 0; + u64 hret; + u8 reg_type; + + /* De/Register untagged packets */ + reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED; + hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, + port->logical_port_id, + reg_type, port->mac_addr, 0, hcallid); + if (hret != H_SUCCESS) { + pr_err("%sregistering bc address failed (tagged)\n", + hcallid == H_REG_BCMC ? "" : "de"); + ret = -EIO; + goto out_herr; + } + + /* De/Register VLAN packets */ + reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL; + hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, + port->logical_port_id, + reg_type, port->mac_addr, 0, hcallid); + if (hret != H_SUCCESS) { + pr_err("%sregistering bc address failed (vlan)\n", + hcallid == H_REG_BCMC ? "" : "de"); + ret = -EIO; + } +out_herr: + return ret; +} + +static int ehea_set_mac_addr(struct net_device *dev, void *sa) +{ + struct ehea_port *port = netdev_priv(dev); + struct sockaddr *mac_addr = sa; + struct hcp_ehea_port_cb0 *cb0; + int ret; + u64 hret; + + if (!is_valid_ether_addr(mac_addr->sa_data)) { + ret = -EADDRNOTAVAIL; + goto out; + } + + cb0 = (void *)get_zeroed_page(GFP_KERNEL); + if (!cb0) { + pr_err("no mem for cb0\n"); + ret = -ENOMEM; + goto out; + } + + memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN); + + cb0->port_mac_addr = cb0->port_mac_addr >> 16; + + hret = ehea_h_modify_ehea_port(port->adapter->handle, + port->logical_port_id, H_PORT_CB0, + EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0); + if (hret != H_SUCCESS) { + ret = -EIO; + goto out_free; + } + + memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); + + /* Deregister old MAC in pHYP */ + if (port->state == EHEA_PORT_UP) { + ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); + if (ret) + goto out_upregs; + } + + port->mac_addr = cb0->port_mac_addr << 16; + + /* Register new MAC in pHYP */ + if (port->state == EHEA_PORT_UP) { + ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); + if (ret) + goto out_upregs; + } + + ret = 0; + +out_upregs: + ehea_update_bcmc_registrations(); +out_free: + free_page((unsigned long)cb0); +out: + return ret; +} + +static void ehea_promiscuous_error(u64 hret, int enable) +{ + if (hret == H_AUTHORITY) + pr_info("Hypervisor denied %sabling promiscuous mode\n", + enable == 1 ? "en" : "dis"); + else + pr_err("failed %sabling promiscuous mode\n", + enable == 1 ? "en" : "dis"); +} + +static void ehea_promiscuous(struct net_device *dev, int enable) +{ + struct ehea_port *port = netdev_priv(dev); + struct hcp_ehea_port_cb7 *cb7; + u64 hret; + + if (enable == port->promisc) + return; + + cb7 = (void *)get_zeroed_page(GFP_ATOMIC); + if (!cb7) { + pr_err("no mem for cb7\n"); + goto out; + } + + /* Modify Pxs_DUCQPN in CB7 */ + cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0; + + hret = ehea_h_modify_ehea_port(port->adapter->handle, + port->logical_port_id, + H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7); + if (hret) { + ehea_promiscuous_error(hret, enable); + goto out; + } + + port->promisc = enable; +out: + free_page((unsigned long)cb7); +} + +static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr, + u32 hcallid) +{ + u64 hret; + u8 reg_type; + + reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED; + if (mc_mac_addr == 0) + reg_type |= EHEA_BCMC_SCOPE_ALL; + + hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, + port->logical_port_id, + reg_type, mc_mac_addr, 0, hcallid); + if (hret) + goto out; + + reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL; + if (mc_mac_addr == 0) + reg_type |= EHEA_BCMC_SCOPE_ALL; + + hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, + port->logical_port_id, + reg_type, mc_mac_addr, 0, hcallid); +out: + return hret; +} + +static int ehea_drop_multicast_list(struct net_device *dev) +{ + struct ehea_port *port = netdev_priv(dev); + struct ehea_mc_list *mc_entry = port->mc_list; + struct list_head *pos; + struct list_head *temp; + int ret = 0; + u64 hret; + + list_for_each_safe(pos, temp, &(port->mc_list->list)) { + mc_entry = list_entry(pos, struct ehea_mc_list, list); + + hret = ehea_multicast_reg_helper(port, mc_entry->macaddr, + H_DEREG_BCMC); + if (hret) { + pr_err("failed deregistering mcast MAC\n"); + ret = -EIO; + } + + list_del(pos); + kfree(mc_entry); + } + return ret; +} + +static void ehea_allmulti(struct net_device *dev, int enable) +{ + struct ehea_port *port = netdev_priv(dev); + u64 hret; + + if (!port->allmulti) { + if (enable) { + /* Enable ALLMULTI */ + ehea_drop_multicast_list(dev); + hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC); + if (!hret) + port->allmulti = 1; + else + netdev_err(dev, + "failed enabling IFF_ALLMULTI\n"); + } + } else { + if (!enable) { + /* Disable ALLMULTI */ + hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC); + if (!hret) + port->allmulti = 0; + else + netdev_err(dev, + "failed disabling IFF_ALLMULTI\n"); + } + } +} + +static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr) +{ + struct ehea_mc_list *ehea_mcl_entry; + u64 hret; + + ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC); + if (!ehea_mcl_entry) + return; + + INIT_LIST_HEAD(&ehea_mcl_entry->list); + + memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN); + + hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr, + H_REG_BCMC); + if (!hret) + list_add(&ehea_mcl_entry->list, &port->mc_list->list); + else { + pr_err("failed registering mcast MAC\n"); + kfree(ehea_mcl_entry); + } +} + +static void ehea_set_multicast_list(struct net_device *dev) +{ + struct ehea_port *port = netdev_priv(dev); + struct netdev_hw_addr *ha; + int ret; + + ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC)); + + if (dev->flags & IFF_ALLMULTI) { + ehea_allmulti(dev, 1); + goto out; + } + ehea_allmulti(dev, 0); + + if (!netdev_mc_empty(dev)) { + ret = ehea_drop_multicast_list(dev); + if (ret) { + /* Dropping the current multicast list failed. + * Enabling ALL_MULTI is the best we can do. + */ + ehea_allmulti(dev, 1); + } + + if (netdev_mc_count(dev) > port->adapter->max_mc_mac) { + pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n", + port->adapter->max_mc_mac); + goto out; + } + + netdev_for_each_mc_addr(ha, dev) + ehea_add_multicast_entry(port, ha->addr); + + } +out: + ehea_update_bcmc_registrations(); +} + +static int ehea_change_mtu(struct net_device *dev, int new_mtu) +{ + if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE)) + return -EINVAL; + dev->mtu = new_mtu; + return 0; +} + +static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe) +{ + swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC; + + if (vlan_get_protocol(skb) != htons(ETH_P_IP)) + return; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM; + + swqe->ip_start = skb_network_offset(skb); + swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1; + + switch (ip_hdr(skb)->protocol) { + case IPPROTO_UDP: + if (skb->ip_summed == CHECKSUM_PARTIAL) + swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM; + + swqe->tcp_offset = swqe->ip_end + 1 + + offsetof(struct udphdr, check); + break; + + case IPPROTO_TCP: + if (skb->ip_summed == CHECKSUM_PARTIAL) + swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM; + + swqe->tcp_offset = swqe->ip_end + 1 + + offsetof(struct tcphdr, check); + break; + } +} + +static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, + struct ehea_swqe *swqe, u32 lkey) +{ + swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT; + + xmit_common(skb, swqe); + + write_swqe2_data(skb, dev, swqe, lkey); +} + +static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, + struct ehea_swqe *swqe) +{ + u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0]; + + xmit_common(skb, swqe); + + if (!skb->data_len) + skb_copy_from_linear_data(skb, imm_data, skb->len); + else + skb_copy_bits(skb, 0, imm_data, skb->len); + + swqe->immediate_data_length = skb->len; + dev_consume_skb_any(skb); +} + +static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ehea_port *port = netdev_priv(dev); + struct ehea_swqe *swqe; + u32 lkey; + int swqe_index; + struct ehea_port_res *pr; + struct netdev_queue *txq; + + pr = &port->port_res[skb_get_queue_mapping(skb)]; + txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + + swqe = ehea_get_swqe(pr->qp, &swqe_index); + memset(swqe, 0, SWQE_HEADER_SIZE); + atomic_dec(&pr->swqe_avail); + + if (skb_vlan_tag_present(skb)) { + swqe->tx_control |= EHEA_SWQE_VLAN_INSERT; + swqe->vlan_tag = skb_vlan_tag_get(skb); + } + + pr->tx_packets++; + pr->tx_bytes += skb->len; + + if (skb->len <= SWQE3_MAX_IMM) { + u32 sig_iv = port->sig_comp_iv; + u32 swqe_num = pr->swqe_id_counter; + ehea_xmit3(skb, dev, swqe); + swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE) + | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num); + if (pr->swqe_ll_count >= (sig_iv - 1)) { + swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL, + sig_iv); + swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; + pr->swqe_ll_count = 0; + } else + pr->swqe_ll_count += 1; + } else { + swqe->wr_id = + EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE) + | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter) + | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1) + | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index); + pr->sq_skba.arr[pr->sq_skba.index] = skb; + + pr->sq_skba.index++; + pr->sq_skba.index &= (pr->sq_skba.len - 1); + + lkey = pr->send_mr.lkey; + ehea_xmit2(skb, dev, swqe, lkey); + swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; + } + pr->swqe_id_counter += 1; + + netif_info(port, tx_queued, dev, + "post swqe on QP %d\n", pr->qp->init_attr.qp_nr); + if (netif_msg_tx_queued(port)) + ehea_dump(swqe, 512, "swqe"); + + if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { + netif_tx_stop_queue(txq); + swqe->tx_control |= EHEA_SWQE_PURGE; + } + + ehea_post_swqe(pr->qp, swqe); + + if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { + pr->p_stats.queue_stopped++; + netif_tx_stop_queue(txq); + } + + return NETDEV_TX_OK; +} + +static int ehea_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct ehea_port *port = netdev_priv(dev); + struct ehea_adapter *adapter = port->adapter; + struct hcp_ehea_port_cb1 *cb1; + int index; + u64 hret; + int err = 0; + + cb1 = (void *)get_zeroed_page(GFP_KERNEL); + if (!cb1) { + pr_err("no mem for cb1\n"); + err = -ENOMEM; + goto out; + } + + hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, + H_PORT_CB1, H_PORT_CB1_ALL, cb1); + if (hret != H_SUCCESS) { + pr_err("query_ehea_port failed\n"); + err = -EINVAL; + goto out; + } + + index = (vid / 64); + cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F))); + + hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, + H_PORT_CB1, H_PORT_CB1_ALL, cb1); + if (hret != H_SUCCESS) { + pr_err("modify_ehea_port failed\n"); + err = -EINVAL; + } +out: + free_page((unsigned long)cb1); + return err; +} + +static int ehea_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct ehea_port *port = netdev_priv(dev); + struct ehea_adapter *adapter = port->adapter; + struct hcp_ehea_port_cb1 *cb1; + int index; + u64 hret; + int err = 0; + + cb1 = (void *)get_zeroed_page(GFP_KERNEL); + if (!cb1) { + pr_err("no mem for cb1\n"); + err = -ENOMEM; + goto out; + } + + hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, + H_PORT_CB1, H_PORT_CB1_ALL, cb1); + if (hret != H_SUCCESS) { + pr_err("query_ehea_port failed\n"); + err = -EINVAL; + goto out; + } + + index = (vid / 64); + cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F))); + + hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, + H_PORT_CB1, H_PORT_CB1_ALL, cb1); + if (hret != H_SUCCESS) { + pr_err("modify_ehea_port failed\n"); + err = -EINVAL; + } +out: + free_page((unsigned long)cb1); + return err; +} + +static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) +{ + int ret = -EIO; + u64 hret; + u16 dummy16 = 0; + u64 dummy64 = 0; + struct hcp_modify_qp_cb0 *cb0; + + cb0 = (void *)get_zeroed_page(GFP_KERNEL); + if (!cb0) { + ret = -ENOMEM; + goto out; + } + + hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); + if (hret != H_SUCCESS) { + pr_err("query_ehea_qp failed (1)\n"); + goto out; + } + + cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED; + hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, + &dummy64, &dummy64, &dummy16, &dummy16); + if (hret != H_SUCCESS) { + pr_err("modify_ehea_qp failed (1)\n"); + goto out; + } + + hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); + if (hret != H_SUCCESS) { + pr_err("query_ehea_qp failed (2)\n"); + goto out; + } + + cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED; + hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, + &dummy64, &dummy64, &dummy16, &dummy16); + if (hret != H_SUCCESS) { + pr_err("modify_ehea_qp failed (2)\n"); + goto out; + } + + hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); + if (hret != H_SUCCESS) { + pr_err("query_ehea_qp failed (3)\n"); + goto out; + } + + cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND; + hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, + &dummy64, &dummy64, &dummy16, &dummy16); + if (hret != H_SUCCESS) { + pr_err("modify_ehea_qp failed (3)\n"); + goto out; + } + + hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); + if (hret != H_SUCCESS) { + pr_err("query_ehea_qp failed (4)\n"); + goto out; + } + + ret = 0; +out: + free_page((unsigned long)cb0); + return ret; +} + +static int ehea_port_res_setup(struct ehea_port *port, int def_qps) +{ + int ret, i; + struct port_res_cfg pr_cfg, pr_cfg_small_rx; + enum ehea_eq_type eq_type = EHEA_EQ; + + port->qp_eq = ehea_create_eq(port->adapter, eq_type, + EHEA_MAX_ENTRIES_EQ, 1); + if (!port->qp_eq) { + ret = -EINVAL; + pr_err("ehea_create_eq failed (qp_eq)\n"); + goto out_kill_eq; + } + + pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries; + pr_cfg.max_entries_scq = sq_entries * 2; + pr_cfg.max_entries_sq = sq_entries; + pr_cfg.max_entries_rq1 = rq1_entries; + pr_cfg.max_entries_rq2 = rq2_entries; + pr_cfg.max_entries_rq3 = rq3_entries; + + pr_cfg_small_rx.max_entries_rcq = 1; + pr_cfg_small_rx.max_entries_scq = sq_entries; + pr_cfg_small_rx.max_entries_sq = sq_entries; + pr_cfg_small_rx.max_entries_rq1 = 1; + pr_cfg_small_rx.max_entries_rq2 = 1; + pr_cfg_small_rx.max_entries_rq3 = 1; + + for (i = 0; i < def_qps; i++) { + ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i); + if (ret) + goto out_clean_pr; + } + for (i = def_qps; i < def_qps; i++) { + ret = ehea_init_port_res(port, &port->port_res[i], + &pr_cfg_small_rx, i); + if (ret) + goto out_clean_pr; + } + + return 0; + +out_clean_pr: + while (--i >= 0) + ehea_clean_portres(port, &port->port_res[i]); + +out_kill_eq: + ehea_destroy_eq(port->qp_eq); + return ret; +} + +static int ehea_clean_all_portres(struct ehea_port *port) +{ + int ret = 0; + int i; + + for (i = 0; i < port->num_def_qps; i++) + ret |= ehea_clean_portres(port, &port->port_res[i]); + + ret |= ehea_destroy_eq(port->qp_eq); + + return ret; +} + +static void ehea_remove_adapter_mr(struct ehea_adapter *adapter) +{ + if (adapter->active_ports) + return; + + ehea_rem_mr(&adapter->mr); +} + +static int ehea_add_adapter_mr(struct ehea_adapter *adapter) +{ + if (adapter->active_ports) + return 0; + + return ehea_reg_kernel_mr(adapter, &adapter->mr); +} + +static int ehea_up(struct net_device *dev) +{ + int ret, i; + struct ehea_port *port = netdev_priv(dev); + + if (port->state == EHEA_PORT_UP) + return 0; + + ret = ehea_port_res_setup(port, port->num_def_qps); + if (ret) { + netdev_err(dev, "port_res_failed\n"); + goto out; + } + + /* Set default QP for this port */ + ret = ehea_configure_port(port); + if (ret) { + netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret); + goto out_clean_pr; + } + + ret = ehea_reg_interrupts(dev); + if (ret) { + netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret); + goto out_clean_pr; + } + + for (i = 0; i < port->num_def_qps; i++) { + ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); + if (ret) { + netdev_err(dev, "activate_qp failed\n"); + goto out_free_irqs; + } + } + + for (i = 0; i < port->num_def_qps; i++) { + ret = ehea_fill_port_res(&port->port_res[i]); + if (ret) { + netdev_err(dev, "out_free_irqs\n"); + goto out_free_irqs; + } + } + + ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); + if (ret) { + ret = -EIO; + goto out_free_irqs; + } + + port->state = EHEA_PORT_UP; + + ret = 0; + goto out; + +out_free_irqs: + ehea_free_interrupts(dev); + +out_clean_pr: + ehea_clean_all_portres(port); +out: + if (ret) + netdev_info(dev, "Failed starting. ret=%i\n", ret); + + ehea_update_bcmc_registrations(); + ehea_update_firmware_handles(); + + return ret; +} + +static void port_napi_disable(struct ehea_port *port) +{ + int i; + + for (i = 0; i < port->num_def_qps; i++) + napi_disable(&port->port_res[i].napi); +} + +static void port_napi_enable(struct ehea_port *port) +{ + int i; + + for (i = 0; i < port->num_def_qps; i++) + napi_enable(&port->port_res[i].napi); +} + +static int ehea_open(struct net_device *dev) +{ + int ret; + struct ehea_port *port = netdev_priv(dev); + + mutex_lock(&port->port_lock); + + netif_info(port, ifup, dev, "enabling port\n"); + + ret = ehea_up(dev); + if (!ret) { + port_napi_enable(port); + netif_tx_start_all_queues(dev); + } + + mutex_unlock(&port->port_lock); + schedule_delayed_work(&port->stats_work, + round_jiffies_relative(msecs_to_jiffies(1000))); + + return ret; +} + +static int ehea_down(struct net_device *dev) +{ + int ret; + struct ehea_port *port = netdev_priv(dev); + + if (port->state == EHEA_PORT_DOWN) + return 0; + + ehea_drop_multicast_list(dev); + ehea_allmulti(dev, 0); + ehea_broadcast_reg_helper(port, H_DEREG_BCMC); + + ehea_free_interrupts(dev); + + port->state = EHEA_PORT_DOWN; + + ehea_update_bcmc_registrations(); + + ret = ehea_clean_all_portres(port); + if (ret) + netdev_info(dev, "Failed freeing resources. ret=%i\n", ret); + + ehea_update_firmware_handles(); + + return ret; +} + +static int ehea_stop(struct net_device *dev) +{ + int ret; + struct ehea_port *port = netdev_priv(dev); + + netif_info(port, ifdown, dev, "disabling port\n"); + + set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); + cancel_work_sync(&port->reset_task); + cancel_delayed_work_sync(&port->stats_work); + mutex_lock(&port->port_lock); + netif_tx_stop_all_queues(dev); + port_napi_disable(port); + ret = ehea_down(dev); + mutex_unlock(&port->port_lock); + clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); + return ret; +} + +static void ehea_purge_sq(struct ehea_qp *orig_qp) +{ + struct ehea_qp qp = *orig_qp; + struct ehea_qp_init_attr *init_attr = &qp.init_attr; + struct ehea_swqe *swqe; + int wqe_index; + int i; + + for (i = 0; i < init_attr->act_nr_send_wqes; i++) { + swqe = ehea_get_swqe(&qp, &wqe_index); + swqe->tx_control |= EHEA_SWQE_PURGE; + } +} + +static void ehea_flush_sq(struct ehea_port *port) +{ + int i; + + for (i = 0; i < port->num_def_qps; i++) { + struct ehea_port_res *pr = &port->port_res[i]; + int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count; + int ret; + + ret = wait_event_timeout(port->swqe_avail_wq, + atomic_read(&pr->swqe_avail) >= swqe_max, + msecs_to_jiffies(100)); + + if (!ret) { + pr_err("WARNING: sq not flushed completely\n"); + break; + } + } +} + +static int ehea_stop_qps(struct net_device *dev) +{ + struct ehea_port *port = netdev_priv(dev); + struct ehea_adapter *adapter = port->adapter; + struct hcp_modify_qp_cb0 *cb0; + int ret = -EIO; + int dret; + int i; + u64 hret; + u64 dummy64 = 0; + u16 dummy16 = 0; + + cb0 = (void *)get_zeroed_page(GFP_KERNEL); + if (!cb0) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < (port->num_def_qps); i++) { + struct ehea_port_res *pr = &port->port_res[i]; + struct ehea_qp *qp = pr->qp; + + /* Purge send queue */ + ehea_purge_sq(qp); + + /* Disable queue pair */ + hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), + cb0); + if (hret != H_SUCCESS) { + pr_err("query_ehea_qp failed (1)\n"); + goto out; + } + + cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8; + cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED; + + hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, + 1), cb0, &dummy64, + &dummy64, &dummy16, &dummy16); + if (hret != H_SUCCESS) { + pr_err("modify_ehea_qp failed (1)\n"); + goto out; + } + + hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), + cb0); + if (hret != H_SUCCESS) { + pr_err("query_ehea_qp failed (2)\n"); + goto out; + } + + /* deregister shared memory regions */ + dret = ehea_rem_smrs(pr); + if (dret) { + pr_err("unreg shared memory region failed\n"); + goto out; + } + } + + ret = 0; +out: + free_page((unsigned long)cb0); + + return ret; +} + +static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr) +{ + struct ehea_qp qp = *orig_qp; + struct ehea_qp_init_attr *init_attr = &qp.init_attr; + struct ehea_rwqe *rwqe; + struct sk_buff **skba_rq2 = pr->rq2_skba.arr; + struct sk_buff **skba_rq3 = pr->rq3_skba.arr; + struct sk_buff *skb; + u32 lkey = pr->recv_mr.lkey; + + + int i; + int index; + + for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) { + rwqe = ehea_get_next_rwqe(&qp, 2); + rwqe->sg_list[0].l_key = lkey; + index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id); + skb = skba_rq2[index]; + if (skb) + rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); + } + + for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) { + rwqe = ehea_get_next_rwqe(&qp, 3); + rwqe->sg_list[0].l_key = lkey; + index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id); + skb = skba_rq3[index]; + if (skb) + rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); + } +} + +static int ehea_restart_qps(struct net_device *dev) +{ + struct ehea_port *port = netdev_priv(dev); + struct ehea_adapter *adapter = port->adapter; + int ret = 0; + int i; + + struct hcp_modify_qp_cb0 *cb0; + u64 hret; + u64 dummy64 = 0; + u16 dummy16 = 0; + + cb0 = (void *)get_zeroed_page(GFP_KERNEL); + if (!cb0) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < (port->num_def_qps); i++) { + struct ehea_port_res *pr = &port->port_res[i]; + struct ehea_qp *qp = pr->qp; + + ret = ehea_gen_smrs(pr); + if (ret) { + netdev_err(dev, "creation of shared memory regions failed\n"); + goto out; + } + + ehea_update_rqs(qp, pr); + + /* Enable queue pair */ + hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), + cb0); + if (hret != H_SUCCESS) { + netdev_err(dev, "query_ehea_qp failed (1)\n"); + goto out; + } + + cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8; + cb0->qp_ctl_reg |= H_QP_CR_ENABLED; + + hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, + 1), cb0, &dummy64, + &dummy64, &dummy16, &dummy16); + if (hret != H_SUCCESS) { + netdev_err(dev, "modify_ehea_qp failed (1)\n"); + goto out; + } + + hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), + cb0); + if (hret != H_SUCCESS) { + netdev_err(dev, "query_ehea_qp failed (2)\n"); + goto out; + } + + /* refill entire queue */ + ehea_refill_rq1(pr, pr->rq1_skba.index, 0); + ehea_refill_rq2(pr, 0); + ehea_refill_rq3(pr, 0); + } +out: + free_page((unsigned long)cb0); + + return ret; +} + +static void ehea_reset_port(struct work_struct *work) +{ + int ret; + struct ehea_port *port = + container_of(work, struct ehea_port, reset_task); + struct net_device *dev = port->netdev; + + mutex_lock(&dlpar_mem_lock); + port->resets++; + mutex_lock(&port->port_lock); + netif_tx_disable(dev); + + port_napi_disable(port); + + ehea_down(dev); + + ret = ehea_up(dev); + if (ret) + goto out; + + ehea_set_multicast_list(dev); + + netif_info(port, timer, dev, "reset successful\n"); + + port_napi_enable(port); + + netif_tx_wake_all_queues(dev); +out: + mutex_unlock(&port->port_lock); + mutex_unlock(&dlpar_mem_lock); +} + +static void ehea_rereg_mrs(void) +{ + int ret, i; + struct ehea_adapter *adapter; + + pr_info("LPAR memory changed - re-initializing driver\n"); + + list_for_each_entry(adapter, &adapter_list, list) + if (adapter->active_ports) { + /* Shutdown all ports */ + for (i = 0; i < EHEA_MAX_PORTS; i++) { + struct ehea_port *port = adapter->port[i]; + struct net_device *dev; + + if (!port) + continue; + + dev = port->netdev; + + if (dev->flags & IFF_UP) { + mutex_lock(&port->port_lock); + netif_tx_disable(dev); + ehea_flush_sq(port); + ret = ehea_stop_qps(dev); + if (ret) { + mutex_unlock(&port->port_lock); + goto out; + } + port_napi_disable(port); + mutex_unlock(&port->port_lock); + } + reset_sq_restart_flag(port); + } + + /* Unregister old memory region */ + ret = ehea_rem_mr(&adapter->mr); + if (ret) { + pr_err("unregister MR failed - driver inoperable!\n"); + goto out; + } + } + + clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags); + + list_for_each_entry(adapter, &adapter_list, list) + if (adapter->active_ports) { + /* Register new memory region */ + ret = ehea_reg_kernel_mr(adapter, &adapter->mr); + if (ret) { + pr_err("register MR failed - driver inoperable!\n"); + goto out; + } + + /* Restart all ports */ + for (i = 0; i < EHEA_MAX_PORTS; i++) { + struct ehea_port *port = adapter->port[i]; + + if (port) { + struct net_device *dev = port->netdev; + + if (dev->flags & IFF_UP) { + mutex_lock(&port->port_lock); + ret = ehea_restart_qps(dev); + if (!ret) { + check_sqs(port); + port_napi_enable(port); + netif_tx_wake_all_queues(dev); + } else { + netdev_err(dev, "Unable to restart QPS\n"); + } + mutex_unlock(&port->port_lock); + } + } + } + } + pr_info("re-initializing driver complete\n"); +out: + return; +} + +static void ehea_tx_watchdog(struct net_device *dev) +{ + struct ehea_port *port = netdev_priv(dev); + + if (netif_carrier_ok(dev) && + !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags)) + ehea_schedule_port_reset(port); +} + +static int ehea_sense_adapter_attr(struct ehea_adapter *adapter) +{ + struct hcp_query_ehea *cb; + u64 hret; + int ret; + + cb = (void *)get_zeroed_page(GFP_KERNEL); + if (!cb) { + ret = -ENOMEM; + goto out; + } + + hret = ehea_h_query_ehea(adapter->handle, cb); + + if (hret != H_SUCCESS) { + ret = -EIO; + goto out_herr; + } + + adapter->max_mc_mac = cb->max_mc_mac - 1; + ret = 0; + +out_herr: + free_page((unsigned long)cb); +out: + return ret; +} + +static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo) +{ + struct hcp_ehea_port_cb4 *cb4; + u64 hret; + int ret = 0; + + *jumbo = 0; + + /* (Try to) enable *jumbo frames */ + cb4 = (void *)get_zeroed_page(GFP_KERNEL); + if (!cb4) { + pr_err("no mem for cb4\n"); + ret = -ENOMEM; + goto out; + } else { + hret = ehea_h_query_ehea_port(port->adapter->handle, + port->logical_port_id, + H_PORT_CB4, + H_PORT_CB4_JUMBO, cb4); + if (hret == H_SUCCESS) { + if (cb4->jumbo_frame) + *jumbo = 1; + else { + cb4->jumbo_frame = 1; + hret = ehea_h_modify_ehea_port(port->adapter-> + handle, + port-> + logical_port_id, + H_PORT_CB4, + H_PORT_CB4_JUMBO, + cb4); + if (hret == H_SUCCESS) + *jumbo = 1; + } + } else + ret = -EINVAL; + + free_page((unsigned long)cb4); + } +out: + return ret; +} + +static ssize_t ehea_show_port_id(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev); + return sprintf(buf, "%d", port->logical_port_id); +} + +static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id, + NULL); + +static void logical_port_release(struct device *dev) +{ + struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev); + of_node_put(port->ofdev.dev.of_node); +} + +static struct device *ehea_register_port(struct ehea_port *port, + struct device_node *dn) +{ + int ret; + + port->ofdev.dev.of_node = of_node_get(dn); + port->ofdev.dev.parent = &port->adapter->ofdev->dev; + port->ofdev.dev.bus = &ibmebus_bus_type; + + dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++); + port->ofdev.dev.release = logical_port_release; + + ret = of_device_register(&port->ofdev); + if (ret) { + pr_err("failed to register device. ret=%d\n", ret); + goto out; + } + + ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id); + if (ret) { + pr_err("failed to register attributes, ret=%d\n", ret); + goto out_unreg_of_dev; + } + + return &port->ofdev.dev; + +out_unreg_of_dev: + of_device_unregister(&port->ofdev); +out: + return NULL; +} + +static void ehea_unregister_port(struct ehea_port *port) +{ + device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id); + of_device_unregister(&port->ofdev); +} + +static const struct net_device_ops ehea_netdev_ops = { + .ndo_open = ehea_open, + .ndo_stop = ehea_stop, + .ndo_start_xmit = ehea_start_xmit, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ehea_netpoll, +#endif + .ndo_get_stats64 = ehea_get_stats64, + .ndo_set_mac_address = ehea_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = ehea_set_multicast_list, + .ndo_change_mtu = ehea_change_mtu, + .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid, + .ndo_tx_timeout = ehea_tx_watchdog, +}; + +static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, + u32 logical_port_id, + struct device_node *dn) +{ + int ret; + struct net_device *dev; + struct ehea_port *port; + struct device *port_dev; + int jumbo; + + /* allocate memory for the port structures */ + dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES); + + if (!dev) { + ret = -ENOMEM; + goto out_err; + } + + port = netdev_priv(dev); + + mutex_init(&port->port_lock); + port->state = EHEA_PORT_DOWN; + port->sig_comp_iv = sq_entries / 10; + + port->adapter = adapter; + port->netdev = dev; + port->logical_port_id = logical_port_id; + + port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT); + + port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL); + if (!port->mc_list) { + ret = -ENOMEM; + goto out_free_ethdev; + } + + INIT_LIST_HEAD(&port->mc_list->list); + + ret = ehea_sense_port_attr(port); + if (ret) + goto out_free_mc_list; + + netif_set_real_num_rx_queues(dev, port->num_def_qps); + netif_set_real_num_tx_queues(dev, port->num_def_qps); + + port_dev = ehea_register_port(port, dn); + if (!port_dev) + goto out_free_mc_list; + + SET_NETDEV_DEV(dev, port_dev); + + /* initialize net_device structure */ + memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); + + dev->netdev_ops = &ehea_netdev_ops; + ehea_set_ethtool_ops(dev); + + dev->hw_features = NETIF_F_SG | NETIF_F_TSO | + NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX; + dev->features = NETIF_F_SG | NETIF_F_TSO | + NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM; + dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA | + NETIF_F_IP_CSUM; + dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; + + INIT_WORK(&port->reset_task, ehea_reset_port); + INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats); + + init_waitqueue_head(&port->swqe_avail_wq); + init_waitqueue_head(&port->restart_wq); + + memset(&port->stats, 0, sizeof(struct net_device_stats)); + ret = register_netdev(dev); + if (ret) { + pr_err("register_netdev failed. ret=%d\n", ret); + goto out_unreg_port; + } + + ret = ehea_get_jumboframe_status(port, &jumbo); + if (ret) + netdev_err(dev, "failed determining jumbo frame status\n"); + + netdev_info(dev, "Jumbo frames are %sabled\n", + jumbo == 1 ? "en" : "dis"); + + adapter->active_ports++; + + return port; + +out_unreg_port: + ehea_unregister_port(port); + +out_free_mc_list: + kfree(port->mc_list); + +out_free_ethdev: + free_netdev(dev); + +out_err: + pr_err("setting up logical port with id=%d failed, ret=%d\n", + logical_port_id, ret); + return NULL; +} + +static void ehea_shutdown_single_port(struct ehea_port *port) +{ + struct ehea_adapter *adapter = port->adapter; + + cancel_work_sync(&port->reset_task); + cancel_delayed_work_sync(&port->stats_work); + unregister_netdev(port->netdev); + ehea_unregister_port(port); + kfree(port->mc_list); + free_netdev(port->netdev); + adapter->active_ports--; +} + +static int ehea_setup_ports(struct ehea_adapter *adapter) +{ + struct device_node *lhea_dn; + struct device_node *eth_dn = NULL; + + const u32 *dn_log_port_id; + int i = 0; + + lhea_dn = adapter->ofdev->dev.of_node; + while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { + + dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", + NULL); + if (!dn_log_port_id) { + pr_err("bad device node: eth_dn name=%s\n", + eth_dn->full_name); + continue; + } + + if (ehea_add_adapter_mr(adapter)) { + pr_err("creating MR failed\n"); + of_node_put(eth_dn); + return -EIO; + } + + adapter->port[i] = ehea_setup_single_port(adapter, + *dn_log_port_id, + eth_dn); + if (adapter->port[i]) + netdev_info(adapter->port[i]->netdev, + "logical port id #%d\n", *dn_log_port_id); + else + ehea_remove_adapter_mr(adapter); + + i++; + } + return 0; +} + +static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter, + u32 logical_port_id) +{ + struct device_node *lhea_dn; + struct device_node *eth_dn = NULL; + const u32 *dn_log_port_id; + + lhea_dn = adapter->ofdev->dev.of_node; + while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { + + dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", + NULL); + if (dn_log_port_id) + if (*dn_log_port_id == logical_port_id) + return eth_dn; + } + + return NULL; +} + +static ssize_t ehea_probe_port(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ehea_adapter *adapter = dev_get_drvdata(dev); + struct ehea_port *port; + struct device_node *eth_dn = NULL; + int i; + + u32 logical_port_id; + + sscanf(buf, "%d", &logical_port_id); + + port = ehea_get_port(adapter, logical_port_id); + + if (port) { + netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n", + logical_port_id); + return -EINVAL; + } + + eth_dn = ehea_get_eth_dn(adapter, logical_port_id); + + if (!eth_dn) { + pr_info("no logical port with id %d found\n", logical_port_id); + return -EINVAL; + } + + if (ehea_add_adapter_mr(adapter)) { + pr_err("creating MR failed\n"); + return -EIO; + } + + port = ehea_setup_single_port(adapter, logical_port_id, eth_dn); + + of_node_put(eth_dn); + + if (port) { + for (i = 0; i < EHEA_MAX_PORTS; i++) + if (!adapter->port[i]) { + adapter->port[i] = port; + break; + } + + netdev_info(port->netdev, "added: (logical port id=%d)\n", + logical_port_id); + } else { + ehea_remove_adapter_mr(adapter); + return -EIO; + } + + return (ssize_t) count; +} + +static ssize_t ehea_remove_port(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ehea_adapter *adapter = dev_get_drvdata(dev); + struct ehea_port *port; + int i; + u32 logical_port_id; + + sscanf(buf, "%d", &logical_port_id); + + port = ehea_get_port(adapter, logical_port_id); + + if (port) { + netdev_info(port->netdev, "removed: (logical port id=%d)\n", + logical_port_id); + + ehea_shutdown_single_port(port); + + for (i = 0; i < EHEA_MAX_PORTS; i++) + if (adapter->port[i] == port) { + adapter->port[i] = NULL; + break; + } + } else { + pr_err("removing port with logical port id=%d failed. port not configured.\n", + logical_port_id); + return -EINVAL; + } + + ehea_remove_adapter_mr(adapter); + + return (ssize_t) count; +} + +static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port); +static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port); + +static int ehea_create_device_sysfs(struct platform_device *dev) +{ + int ret = device_create_file(&dev->dev, &dev_attr_probe_port); + if (ret) + goto out; + + ret = device_create_file(&dev->dev, &dev_attr_remove_port); +out: + return ret; +} + +static void ehea_remove_device_sysfs(struct platform_device *dev) +{ + device_remove_file(&dev->dev, &dev_attr_probe_port); + device_remove_file(&dev->dev, &dev_attr_remove_port); +} + +static int ehea_reboot_notifier(struct notifier_block *nb, + unsigned long action, void *unused) +{ + if (action == SYS_RESTART) { + pr_info("Reboot: freeing all eHEA resources\n"); + ibmebus_unregister_driver(&ehea_driver); + } + return NOTIFY_DONE; +} + +static struct notifier_block ehea_reboot_nb = { + .notifier_call = ehea_reboot_notifier, +}; + +static int ehea_mem_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + int ret = NOTIFY_BAD; + struct memory_notify *arg = data; + + mutex_lock(&dlpar_mem_lock); + + switch (action) { + case MEM_CANCEL_OFFLINE: + pr_info("memory offlining canceled"); + /* Fall through: re-add canceled memory block */ + + case MEM_ONLINE: + pr_info("memory is going online"); + set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); + if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) + goto out_unlock; + ehea_rereg_mrs(); + break; + + case MEM_GOING_OFFLINE: + pr_info("memory is going offline"); + set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); + if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) + goto out_unlock; + ehea_rereg_mrs(); + break; + + default: + break; + } + + ehea_update_firmware_handles(); + ret = NOTIFY_OK; + +out_unlock: + mutex_unlock(&dlpar_mem_lock); + return ret; +} + +static struct notifier_block ehea_mem_nb = { + .notifier_call = ehea_mem_notifier, +}; + +static void ehea_crash_handler(void) +{ + int i; + + if (ehea_fw_handles.arr) + for (i = 0; i < ehea_fw_handles.num_entries; i++) + ehea_h_free_resource(ehea_fw_handles.arr[i].adh, + ehea_fw_handles.arr[i].fwh, + FORCE_FREE); + + if (ehea_bcmc_regs.arr) + for (i = 0; i < ehea_bcmc_regs.num_entries; i++) + ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh, + ehea_bcmc_regs.arr[i].port_id, + ehea_bcmc_regs.arr[i].reg_type, + ehea_bcmc_regs.arr[i].macaddr, + 0, H_DEREG_BCMC); +} + +static atomic_t ehea_memory_hooks_registered; + +/* Register memory hooks on probe of first adapter */ +static int ehea_register_memory_hooks(void) +{ + int ret = 0; + + if (atomic_inc_return(&ehea_memory_hooks_registered) > 1) + return 0; + + ret = ehea_create_busmap(); + if (ret) { + pr_info("ehea_create_busmap failed\n"); + goto out; + } + + ret = register_reboot_notifier(&ehea_reboot_nb); + if (ret) { + pr_info("register_reboot_notifier failed\n"); + goto out; + } + + ret = register_memory_notifier(&ehea_mem_nb); + if (ret) { + pr_info("register_memory_notifier failed\n"); + goto out2; + } + + ret = crash_shutdown_register(ehea_crash_handler); + if (ret) { + pr_info("crash_shutdown_register failed\n"); + goto out3; + } + + return 0; + +out3: + unregister_memory_notifier(&ehea_mem_nb); +out2: + unregister_reboot_notifier(&ehea_reboot_nb); +out: + atomic_dec(&ehea_memory_hooks_registered); + return ret; +} + +static void ehea_unregister_memory_hooks(void) +{ + /* Only remove the hooks if we've registered them */ + if (atomic_read(&ehea_memory_hooks_registered) == 0) + return; + + unregister_reboot_notifier(&ehea_reboot_nb); + if (crash_shutdown_unregister(ehea_crash_handler)) + pr_info("failed unregistering crash handler\n"); + unregister_memory_notifier(&ehea_mem_nb); +} + +static int ehea_probe_adapter(struct platform_device *dev) +{ + struct ehea_adapter *adapter; + const u64 *adapter_handle; + int ret; + int i; + + ret = ehea_register_memory_hooks(); + if (ret) + return ret; + + if (!dev || !dev->dev.of_node) { + pr_err("Invalid ibmebus device probed\n"); + return -EINVAL; + } + + adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL); + if (!adapter) { + ret = -ENOMEM; + dev_err(&dev->dev, "no mem for ehea_adapter\n"); + goto out; + } + + list_add(&adapter->list, &adapter_list); + + adapter->ofdev = dev; + + adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle", + NULL); + if (adapter_handle) + adapter->handle = *adapter_handle; + + if (!adapter->handle) { + dev_err(&dev->dev, "failed getting handle for adapter" + " '%s'\n", dev->dev.of_node->full_name); + ret = -ENODEV; + goto out_free_ad; + } + + adapter->pd = EHEA_PD_ID; + + platform_set_drvdata(dev, adapter); + + + /* initialize adapter and ports */ + /* get adapter properties */ + ret = ehea_sense_adapter_attr(adapter); + if (ret) { + dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret); + goto out_free_ad; + } + + adapter->neq = ehea_create_eq(adapter, + EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1); + if (!adapter->neq) { + ret = -EIO; + dev_err(&dev->dev, "NEQ creation failed\n"); + goto out_free_ad; + } + + tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet, + (unsigned long)adapter); + + ret = ehea_create_device_sysfs(dev); + if (ret) + goto out_kill_eq; + + ret = ehea_setup_ports(adapter); + if (ret) { + dev_err(&dev->dev, "setup_ports failed\n"); + goto out_rem_dev_sysfs; + } + + ret = ibmebus_request_irq(adapter->neq->attr.ist1, + ehea_interrupt_neq, 0, + "ehea_neq", adapter); + if (ret) { + dev_err(&dev->dev, "requesting NEQ IRQ failed\n"); + goto out_shutdown_ports; + } + + /* Handle any events that might be pending. */ + tasklet_hi_schedule(&adapter->neq_tasklet); + + ret = 0; + goto out; + +out_shutdown_ports: + for (i = 0; i < EHEA_MAX_PORTS; i++) + if (adapter->port[i]) { + ehea_shutdown_single_port(adapter->port[i]); + adapter->port[i] = NULL; + } + +out_rem_dev_sysfs: + ehea_remove_device_sysfs(dev); + +out_kill_eq: + ehea_destroy_eq(adapter->neq); + +out_free_ad: + list_del(&adapter->list); + +out: + ehea_update_firmware_handles(); + + return ret; +} + +static int ehea_remove(struct platform_device *dev) +{ + struct ehea_adapter *adapter = platform_get_drvdata(dev); + int i; + + for (i = 0; i < EHEA_MAX_PORTS; i++) + if (adapter->port[i]) { + ehea_shutdown_single_port(adapter->port[i]); + adapter->port[i] = NULL; + } + + ehea_remove_device_sysfs(dev); + + ibmebus_free_irq(adapter->neq->attr.ist1, adapter); + tasklet_kill(&adapter->neq_tasklet); + + ehea_destroy_eq(adapter->neq); + ehea_remove_adapter_mr(adapter); + list_del(&adapter->list); + + ehea_update_firmware_handles(); + + return 0; +} + +static int check_module_parm(void) +{ + int ret = 0; + + if ((rq1_entries < EHEA_MIN_ENTRIES_QP) || + (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) { + pr_info("Bad parameter: rq1_entries\n"); + ret = -EINVAL; + } + if ((rq2_entries < EHEA_MIN_ENTRIES_QP) || + (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) { + pr_info("Bad parameter: rq2_entries\n"); + ret = -EINVAL; + } + if ((rq3_entries < EHEA_MIN_ENTRIES_QP) || + (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) { + pr_info("Bad parameter: rq3_entries\n"); + ret = -EINVAL; + } + if ((sq_entries < EHEA_MIN_ENTRIES_QP) || + (sq_entries > EHEA_MAX_ENTRIES_SQ)) { + pr_info("Bad parameter: sq_entries\n"); + ret = -EINVAL; + } + + return ret; +} + +static ssize_t ehea_show_capabilities(struct device_driver *drv, + char *buf) +{ + return sprintf(buf, "%d", EHEA_CAPABILITIES); +} + +static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH, + ehea_show_capabilities, NULL); + +static int __init ehea_module_init(void) +{ + int ret; + + pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION); + + memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles)); + memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs)); + + mutex_init(&ehea_fw_handles.lock); + spin_lock_init(&ehea_bcmc_regs.lock); + + ret = check_module_parm(); + if (ret) + goto out; + + ret = ibmebus_register_driver(&ehea_driver); + if (ret) { + pr_err("failed registering eHEA device driver on ebus\n"); + goto out; + } + + ret = driver_create_file(&ehea_driver.driver, + &driver_attr_capabilities); + if (ret) { + pr_err("failed to register capabilities attribute, ret=%d\n", + ret); + goto out2; + } + + return ret; + +out2: + ibmebus_unregister_driver(&ehea_driver); +out: + return ret; +} + +static void __exit ehea_module_exit(void) +{ + driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); + ibmebus_unregister_driver(&ehea_driver); + ehea_unregister_memory_hooks(); + kfree(ehea_fw_handles.arr); + kfree(ehea_bcmc_regs.arr); + ehea_destroy_busmap(); +} + +module_init(ehea_module_init); +module_exit(ehea_module_exit); diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.c b/drivers/net/ethernet/ibm/ehea/ehea_phyp.c new file mode 100644 index 000000000..d3a130ccd --- /dev/null +++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.c @@ -0,0 +1,626 @@ +/* + * linux/drivers/net/ethernet/ibm/ehea/ehea_phyp.c + * + * eHEA ethernet device driver for IBM eServer System p + * + * (C) Copyright IBM Corp. 2006 + * + * Authors: + * Christoph Raisch + * Jan-Bernd Themann + * Thomas Klein + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "ehea_phyp.h" + + +static inline u16 get_order_of_qentries(u16 queue_entries) +{ + u8 ld = 1; /* logarithmus dualis */ + while (((1U << ld) - 1) < queue_entries) + ld++; + return ld - 1; +} + +/* Defines for H_CALL H_ALLOC_RESOURCE */ +#define H_ALL_RES_TYPE_QP 1 +#define H_ALL_RES_TYPE_CQ 2 +#define H_ALL_RES_TYPE_EQ 3 +#define H_ALL_RES_TYPE_MR 5 +#define H_ALL_RES_TYPE_MW 6 + +static long ehea_plpar_hcall_norets(unsigned long opcode, + unsigned long arg1, + unsigned long arg2, + unsigned long arg3, + unsigned long arg4, + unsigned long arg5, + unsigned long arg6, + unsigned long arg7) +{ + long ret; + int i, sleep_msecs; + + for (i = 0; i < 5; i++) { + ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4, + arg5, arg6, arg7); + + if (H_IS_LONG_BUSY(ret)) { + sleep_msecs = get_longbusy_msecs(ret); + msleep_interruptible(sleep_msecs); + continue; + } + + if (ret < H_SUCCESS) + pr_err("opcode=%lx ret=%lx" + " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" + " arg5=%lx arg6=%lx arg7=%lx\n", + opcode, ret, + arg1, arg2, arg3, arg4, arg5, arg6, arg7); + + return ret; + } + + return H_BUSY; +} + +static long ehea_plpar_hcall9(unsigned long opcode, + unsigned long *outs, /* array of 9 outputs */ + unsigned long arg1, + unsigned long arg2, + unsigned long arg3, + unsigned long arg4, + unsigned long arg5, + unsigned long arg6, + unsigned long arg7, + unsigned long arg8, + unsigned long arg9) +{ + long ret; + int i, sleep_msecs; + u8 cb_cat; + + for (i = 0; i < 5; i++) { + ret = plpar_hcall9(opcode, outs, + arg1, arg2, arg3, arg4, arg5, + arg6, arg7, arg8, arg9); + + if (H_IS_LONG_BUSY(ret)) { + sleep_msecs = get_longbusy_msecs(ret); + msleep_interruptible(sleep_msecs); + continue; + } + + cb_cat = EHEA_BMASK_GET(H_MEHEAPORT_CAT, arg2); + + if ((ret < H_SUCCESS) && !(((ret == H_AUTHORITY) + && (opcode == H_MODIFY_HEA_PORT)) + && (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO) + || (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7) + && (arg3 == H_PORT_CB7_DUCQPN))))) + pr_err("opcode=%lx ret=%lx" + " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" + " arg5=%lx arg6=%lx arg7=%lx arg8=%lx" + " arg9=%lx" + " out1=%lx out2=%lx out3=%lx out4=%lx" + " out5=%lx out6=%lx out7=%lx out8=%lx" + " out9=%lx\n", + opcode, ret, + arg1, arg2, arg3, arg4, arg5, + arg6, arg7, arg8, arg9, + outs[0], outs[1], outs[2], outs[3], outs[4], + outs[5], outs[6], outs[7], outs[8]); + return ret; + } + + return H_BUSY; +} + +u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category, + const u64 qp_handle, const u64 sel_mask, void *cb_addr) +{ + return ehea_plpar_hcall_norets(H_QUERY_HEA_QP, + adapter_handle, /* R4 */ + qp_category, /* R5 */ + qp_handle, /* R6 */ + sel_mask, /* R7 */ + __pa(cb_addr), /* R8 */ + 0, 0); +} + +/* input param R5 */ +#define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11) +#define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12) +#define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15) +#define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16) +#define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17) +#define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19) +#define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21) +#define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23) +#define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55) +#define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63) + +/* input param R9 */ +#define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31) +#define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32, 63) + +/* input param R10 */ +#define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7) +#define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15) +#define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23) +#define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31) +/* Max Send Scatter Gather Elements */ +#define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39) +#define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47) +/* Max Receive SG Elements RQ1 */ +#define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55) +#define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63) + +/* input param R11 */ +#define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7) +/* max swqe immediate data length */ +#define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63) + +/* input param R12 */ +#define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15) +/* Threshold RQ2 */ +#define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31) +/* Threshold RQ3 */ + +/* output param R6 */ +#define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15) +#define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31) +#define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47) +#define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63) + +/* output param, R7 */ +#define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7) +#define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15) +#define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23) +#define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31) +#define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39) + +/* output param R8,R9 */ +#define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31) +#define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63) +#define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31) +#define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63) + +/* output param R11,R12 */ +#define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31) +#define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63) +#define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31) +#define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63) + +u64 ehea_h_alloc_resource_qp(const u64 adapter_handle, + struct ehea_qp_init_attr *init_attr, const u32 pd, + u64 *qp_handle, struct h_epas *h_epas) +{ + u64 hret; + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; + + u64 allocate_controls = + EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0) + | EHEA_BMASK_SET(H_ALL_RES_QP_QPP, 0) + | EHEA_BMASK_SET(H_ALL_RES_QP_RQR, 6) /* rq1 & rq2 & rq3 */ + | EHEA_BMASK_SET(H_ALL_RES_QP_EQEG, 0) /* EQE gen. disabled */ + | EHEA_BMASK_SET(H_ALL_RES_QP_LL_QP, init_attr->low_lat_rq1) + | EHEA_BMASK_SET(H_ALL_RES_QP_DMA128, 0) + | EHEA_BMASK_SET(H_ALL_RES_QP_HSM, 0) + | EHEA_BMASK_SET(H_ALL_RES_QP_SIGT, init_attr->signalingtype) + | EHEA_BMASK_SET(H_ALL_RES_QP_RES_TYP, H_ALL_RES_TYPE_QP); + + u64 r9_reg = EHEA_BMASK_SET(H_ALL_RES_QP_PD, pd) + | EHEA_BMASK_SET(H_ALL_RES_QP_TOKEN, init_attr->qp_token); + + u64 max_r10_reg = + EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SWQE, + get_order_of_qentries(init_attr->max_nr_send_wqes)) + | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1WQE, + get_order_of_qentries(init_attr->max_nr_rwqes_rq1)) + | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2WQE, + get_order_of_qentries(init_attr->max_nr_rwqes_rq2)) + | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3WQE, + get_order_of_qentries(init_attr->max_nr_rwqes_rq3)) + | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SSGE, init_attr->wqe_size_enc_sq) + | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1SGE, + init_attr->wqe_size_enc_rq1) + | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2SGE, + init_attr->wqe_size_enc_rq2) + | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3SGE, + init_attr->wqe_size_enc_rq3); + + u64 r11_in = + EHEA_BMASK_SET(H_ALL_RES_QP_SWQE_IDL, init_attr->swqe_imm_data_len) + | EHEA_BMASK_SET(H_ALL_RES_QP_PORT_NUM, init_attr->port_nr); + u64 threshold = + EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ2, init_attr->rq2_threshold) + | EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ3, init_attr->rq3_threshold); + + hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE, + outs, + adapter_handle, /* R4 */ + allocate_controls, /* R5 */ + init_attr->send_cq_handle, /* R6 */ + init_attr->recv_cq_handle, /* R7 */ + init_attr->aff_eq_handle, /* R8 */ + r9_reg, /* R9 */ + max_r10_reg, /* R10 */ + r11_in, /* R11 */ + threshold); /* R12 */ + + *qp_handle = outs[0]; + init_attr->qp_nr = (u32)outs[1]; + + init_attr->act_nr_send_wqes = + (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, outs[2]); + init_attr->act_nr_rwqes_rq1 = + (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, outs[2]); + init_attr->act_nr_rwqes_rq2 = + (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, outs[2]); + init_attr->act_nr_rwqes_rq3 = + (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, outs[2]); + + init_attr->act_wqe_size_enc_sq = init_attr->wqe_size_enc_sq; + init_attr->act_wqe_size_enc_rq1 = init_attr->wqe_size_enc_rq1; + init_attr->act_wqe_size_enc_rq2 = init_attr->wqe_size_enc_rq2; + init_attr->act_wqe_size_enc_rq3 = init_attr->wqe_size_enc_rq3; + + init_attr->nr_sq_pages = + (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, outs[4]); + init_attr->nr_rq1_pages = + (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, outs[4]); + init_attr->nr_rq2_pages = + (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, outs[5]); + init_attr->nr_rq3_pages = + (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, outs[5]); + + init_attr->liobn_sq = + (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, outs[7]); + init_attr->liobn_rq1 = + (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, outs[7]); + init_attr->liobn_rq2 = + (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, outs[8]); + init_attr->liobn_rq3 = + (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, outs[8]); + + if (!hret) + hcp_epas_ctor(h_epas, outs[6], outs[6]); + + return hret; +} + +u64 ehea_h_alloc_resource_cq(const u64 adapter_handle, + struct ehea_cq_attr *cq_attr, + u64 *cq_handle, struct h_epas *epas) +{ + u64 hret; + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; + + hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE, + outs, + adapter_handle, /* R4 */ + H_ALL_RES_TYPE_CQ, /* R5 */ + cq_attr->eq_handle, /* R6 */ + cq_attr->cq_token, /* R7 */ + cq_attr->max_nr_of_cqes, /* R8 */ + 0, 0, 0, 0); /* R9-R12 */ + + *cq_handle = outs[0]; + cq_attr->act_nr_of_cqes = outs[3]; + cq_attr->nr_pages = outs[4]; + + if (!hret) + hcp_epas_ctor(epas, outs[5], outs[6]); + + return hret; +} + +/* Defines for H_CALL H_ALLOC_RESOURCE */ +#define H_ALL_RES_TYPE_QP 1 +#define H_ALL_RES_TYPE_CQ 2 +#define H_ALL_RES_TYPE_EQ 3 +#define H_ALL_RES_TYPE_MR 5 +#define H_ALL_RES_TYPE_MW 6 + +/* input param R5 */ +#define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0) +#define H_ALL_RES_EQ_NON_NEQ_ISN EHEA_BMASK_IBM(6, 7) +#define H_ALL_RES_EQ_INH_EQE_GEN EHEA_BMASK_IBM(16, 16) +#define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63) +/* input param R6 */ +#define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63) + +/* output param R6 */ +#define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63) + +/* output param R7 */ +#define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63) + +/* output param R8 */ +#define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63) + +/* output param R9 */ +#define H_ALL_RES_EQ_ACT_EQ_IST_C EHEA_BMASK_IBM(30, 31) +#define H_ALL_RES_EQ_ACT_EQ_IST_1 EHEA_BMASK_IBM(40, 63) + +/* output param R10 */ +#define H_ALL_RES_EQ_ACT_EQ_IST_2 EHEA_BMASK_IBM(40, 63) + +/* output param R11 */ +#define H_ALL_RES_EQ_ACT_EQ_IST_3 EHEA_BMASK_IBM(40, 63) + +/* output param R12 */ +#define H_ALL_RES_EQ_ACT_EQ_IST_4 EHEA_BMASK_IBM(40, 63) + +u64 ehea_h_alloc_resource_eq(const u64 adapter_handle, + struct ehea_eq_attr *eq_attr, u64 *eq_handle) +{ + u64 hret, allocate_controls; + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; + + /* resource type */ + allocate_controls = + EHEA_BMASK_SET(H_ALL_RES_EQ_RES_TYPE, H_ALL_RES_TYPE_EQ) + | EHEA_BMASK_SET(H_ALL_RES_EQ_NEQ, eq_attr->type ? 1 : 0) + | EHEA_BMASK_SET(H_ALL_RES_EQ_INH_EQE_GEN, !eq_attr->eqe_gen) + | EHEA_BMASK_SET(H_ALL_RES_EQ_NON_NEQ_ISN, 1); + + hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE, + outs, + adapter_handle, /* R4 */ + allocate_controls, /* R5 */ + eq_attr->max_nr_of_eqes, /* R6 */ + 0, 0, 0, 0, 0, 0); /* R7-R10 */ + + *eq_handle = outs[0]; + eq_attr->act_nr_of_eqes = outs[3]; + eq_attr->nr_pages = outs[4]; + eq_attr->ist1 = outs[5]; + eq_attr->ist2 = outs[6]; + eq_attr->ist3 = outs[7]; + eq_attr->ist4 = outs[8]; + + return hret; +} + +u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat, + const u64 qp_handle, const u64 sel_mask, + void *cb_addr, u64 *inv_attr_id, u64 *proc_mask, + u16 *out_swr, u16 *out_rwr) +{ + u64 hret; + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; + + hret = ehea_plpar_hcall9(H_MODIFY_HEA_QP, + outs, + adapter_handle, /* R4 */ + (u64) cat, /* R5 */ + qp_handle, /* R6 */ + sel_mask, /* R7 */ + __pa(cb_addr), /* R8 */ + 0, 0, 0, 0); /* R9-R12 */ + + *inv_attr_id = outs[0]; + *out_swr = outs[3]; + *out_rwr = outs[4]; + *proc_mask = outs[5]; + + return hret; +} + +u64 ehea_h_register_rpage(const u64 adapter_handle, const u8 pagesize, + const u8 queue_type, const u64 resource_handle, + const u64 log_pageaddr, u64 count) +{ + u64 reg_control; + + reg_control = EHEA_BMASK_SET(H_REG_RPAGE_PAGE_SIZE, pagesize) + | EHEA_BMASK_SET(H_REG_RPAGE_QT, queue_type); + + return ehea_plpar_hcall_norets(H_REGISTER_HEA_RPAGES, + adapter_handle, /* R4 */ + reg_control, /* R5 */ + resource_handle, /* R6 */ + log_pageaddr, /* R7 */ + count, /* R8 */ + 0, 0); /* R9-R10 */ +} + +u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle, + const u64 vaddr_in, const u32 access_ctrl, const u32 pd, + struct ehea_mr *mr) +{ + u64 hret; + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; + + hret = ehea_plpar_hcall9(H_REGISTER_SMR, + outs, + adapter_handle , /* R4 */ + orig_mr_handle, /* R5 */ + vaddr_in, /* R6 */ + (((u64)access_ctrl) << 32ULL), /* R7 */ + pd, /* R8 */ + 0, 0, 0, 0); /* R9-R12 */ + + mr->handle = outs[0]; + mr->lkey = (u32)outs[2]; + + return hret; +} + +u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle) +{ + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; + + return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA, + outs, + adapter_handle, /* R4 */ + H_DISABLE_GET_EHEA_WQE_P, /* R5 */ + qp_handle, /* R6 */ + 0, 0, 0, 0, 0, 0); /* R7-R12 */ +} + +u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle, + u64 force_bit) +{ + return ehea_plpar_hcall_norets(H_FREE_RESOURCE, + adapter_handle, /* R4 */ + res_handle, /* R5 */ + force_bit, + 0, 0, 0, 0); /* R7-R10 */ +} + +u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr, + const u64 length, const u32 access_ctrl, + const u32 pd, u64 *mr_handle, u32 *lkey) +{ + u64 hret; + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; + + hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE, + outs, + adapter_handle, /* R4 */ + 5, /* R5 */ + vaddr, /* R6 */ + length, /* R7 */ + (((u64) access_ctrl) << 32ULL), /* R8 */ + pd, /* R9 */ + 0, 0, 0); /* R10-R12 */ + + *mr_handle = outs[0]; + *lkey = (u32)outs[2]; + return hret; +} + +u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle, + const u8 pagesize, const u8 queue_type, + const u64 log_pageaddr, const u64 count) +{ + if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) { + pr_err("not on pageboundary\n"); + return H_PARAMETER; + } + + return ehea_h_register_rpage(adapter_handle, pagesize, + queue_type, mr_handle, + log_pageaddr, count); +} + +u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr) +{ + u64 hret, cb_logaddr; + + cb_logaddr = __pa(cb_addr); + + hret = ehea_plpar_hcall_norets(H_QUERY_HEA, + adapter_handle, /* R4 */ + cb_logaddr, /* R5 */ + 0, 0, 0, 0, 0); /* R6-R10 */ +#ifdef DEBUG + ehea_dump(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea"); +#endif + return hret; +} + +u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num, + const u8 cb_cat, const u64 select_mask, + void *cb_addr) +{ + u64 port_info; + u64 cb_logaddr = __pa(cb_addr); + u64 arr_index = 0; + + port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat) + | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num); + + return ehea_plpar_hcall_norets(H_QUERY_HEA_PORT, + adapter_handle, /* R4 */ + port_info, /* R5 */ + select_mask, /* R6 */ + arr_index, /* R7 */ + cb_logaddr, /* R8 */ + 0, 0); /* R9-R10 */ +} + +u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num, + const u8 cb_cat, const u64 select_mask, + void *cb_addr) +{ + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; + u64 port_info; + u64 arr_index = 0; + u64 cb_logaddr = __pa(cb_addr); + + port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat) + | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num); +#ifdef DEBUG + ehea_dump(cb_addr, sizeof(struct hcp_ehea_port_cb0), "Before HCALL"); +#endif + return ehea_plpar_hcall9(H_MODIFY_HEA_PORT, + outs, + adapter_handle, /* R4 */ + port_info, /* R5 */ + select_mask, /* R6 */ + arr_index, /* R7 */ + cb_logaddr, /* R8 */ + 0, 0, 0, 0); /* R9-R12 */ +} + +u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num, + const u8 reg_type, const u64 mc_mac_addr, + const u16 vlan_id, const u32 hcall_id) +{ + u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id; + u64 mac_addr = mc_mac_addr >> 16; + + r5_port_num = EHEA_BMASK_SET(H_REGBCMC_PN, port_num); + r6_reg_type = EHEA_BMASK_SET(H_REGBCMC_REGTYPE, reg_type); + r7_mc_mac_addr = EHEA_BMASK_SET(H_REGBCMC_MACADDR, mac_addr); + r8_vlan_id = EHEA_BMASK_SET(H_REGBCMC_VLANID, vlan_id); + + return ehea_plpar_hcall_norets(hcall_id, + adapter_handle, /* R4 */ + r5_port_num, /* R5 */ + r6_reg_type, /* R6 */ + r7_mc_mac_addr, /* R7 */ + r8_vlan_id, /* R8 */ + 0, 0); /* R9-R12 */ +} + +u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle, + const u64 event_mask) +{ + return ehea_plpar_hcall_norets(H_RESET_EVENTS, + adapter_handle, /* R4 */ + neq_handle, /* R5 */ + event_mask, /* R6 */ + 0, 0, 0, 0); /* R7-R12 */ +} + +u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle, + void *rblock) +{ + return ehea_plpar_hcall_norets(H_ERROR_DATA, + adapter_handle, /* R4 */ + ressource_handle, /* R5 */ + __pa(rblock), /* R6 */ + 0, 0, 0, 0); /* R7-R12 */ +} diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h new file mode 100644 index 000000000..99b6c2a38 --- /dev/null +++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h @@ -0,0 +1,447 @@ +/* + * linux/drivers/net/ethernet/ibm/ehea/ehea_phyp.h + * + * eHEA ethernet device driver for IBM eServer System p + * + * (C) Copyright IBM Corp. 2006 + * + * Authors: + * Christoph Raisch + * Jan-Bernd Themann + * Thomas Klein + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __EHEA_PHYP_H__ +#define __EHEA_PHYP_H__ + +#include +#include +#include "ehea.h" +#include "ehea_hw.h" + +/* Some abbreviations used here: + * + * hcp_* - structures, variables and functions releated to Hypervisor Calls + */ + +/* Number of pages which can be registered at once by H_REGISTER_HEA_RPAGES */ +#define EHEA_MAX_RPAGE 512 + +/* Notification Event Queue (NEQ) Entry bit masks */ +#define NEQE_EVENT_CODE EHEA_BMASK_IBM(2, 7) +#define NEQE_PORTNUM EHEA_BMASK_IBM(32, 47) +#define NEQE_PORT_UP EHEA_BMASK_IBM(16, 16) +#define NEQE_EXTSWITCH_PORT_UP EHEA_BMASK_IBM(17, 17) +#define NEQE_EXTSWITCH_PRIMARY EHEA_BMASK_IBM(18, 18) +#define NEQE_PLID EHEA_BMASK_IBM(16, 47) + +/* Notification Event Codes */ +#define EHEA_EC_PORTSTATE_CHG 0x30 +#define EHEA_EC_ADAPTER_MALFUNC 0x32 +#define EHEA_EC_PORT_MALFUNC 0x33 + +/* Notification Event Log Register (NELR) bit masks */ +#define NELR_PORT_MALFUNC EHEA_BMASK_IBM(61, 61) +#define NELR_ADAPTER_MALFUNC EHEA_BMASK_IBM(62, 62) +#define NELR_PORTSTATE_CHG EHEA_BMASK_IBM(63, 63) + +static inline void hcp_epas_ctor(struct h_epas *epas, u64 paddr_kernel, + u64 paddr_user) +{ + /* To support 64k pages we must round to 64k page boundary */ + epas->kernel.addr = ioremap((paddr_kernel & PAGE_MASK), PAGE_SIZE) + + (paddr_kernel & ~PAGE_MASK); + epas->user.addr = paddr_user; +} + +static inline void hcp_epas_dtor(struct h_epas *epas) +{ + if (epas->kernel.addr) + iounmap((void __iomem *)((u64)epas->kernel.addr & PAGE_MASK)); + + epas->user.addr = 0; + epas->kernel.addr = 0; +} + +struct hcp_modify_qp_cb0 { + u64 qp_ctl_reg; /* 00 */ + u32 max_swqe; /* 02 */ + u32 max_rwqe; /* 03 */ + u32 port_nb; /* 04 */ + u32 reserved0; /* 05 */ + u64 qp_aer; /* 06 */ + u64 qp_tenure; /* 08 */ +}; + +/* Hcall Query/Modify Queue Pair Control Block 0 Selection Mask Bits */ +#define H_QPCB0_ALL EHEA_BMASK_IBM(0, 5) +#define H_QPCB0_QP_CTL_REG EHEA_BMASK_IBM(0, 0) +#define H_QPCB0_MAX_SWQE EHEA_BMASK_IBM(1, 1) +#define H_QPCB0_MAX_RWQE EHEA_BMASK_IBM(2, 2) +#define H_QPCB0_PORT_NB EHEA_BMASK_IBM(3, 3) +#define H_QPCB0_QP_AER EHEA_BMASK_IBM(4, 4) +#define H_QPCB0_QP_TENURE EHEA_BMASK_IBM(5, 5) + +/* Queue Pair Control Register Status Bits */ +#define H_QP_CR_ENABLED 0x8000000000000000ULL /* QP enabled */ + /* QP States: */ +#define H_QP_CR_STATE_RESET 0x0000010000000000ULL /* Reset */ +#define H_QP_CR_STATE_INITIALIZED 0x0000020000000000ULL /* Initialized */ +#define H_QP_CR_STATE_RDY2RCV 0x0000030000000000ULL /* Ready to recv */ +#define H_QP_CR_STATE_RDY2SND 0x0000050000000000ULL /* Ready to send */ +#define H_QP_CR_STATE_ERROR 0x0000800000000000ULL /* Error */ +#define H_QP_CR_RES_STATE 0x0000007F00000000ULL /* Resultant state */ + +struct hcp_modify_qp_cb1 { + u32 qpn; /* 00 */ + u32 qp_asyn_ev_eq_nb; /* 01 */ + u64 sq_cq_handle; /* 02 */ + u64 rq_cq_handle; /* 04 */ + /* sgel = scatter gather element */ + u32 sgel_nb_sq; /* 06 */ + u32 sgel_nb_rq1; /* 07 */ + u32 sgel_nb_rq2; /* 08 */ + u32 sgel_nb_rq3; /* 09 */ +}; + +/* Hcall Query/Modify Queue Pair Control Block 1 Selection Mask Bits */ +#define H_QPCB1_ALL EHEA_BMASK_IBM(0, 7) +#define H_QPCB1_QPN EHEA_BMASK_IBM(0, 0) +#define H_QPCB1_ASYN_EV_EQ_NB EHEA_BMASK_IBM(1, 1) +#define H_QPCB1_SQ_CQ_HANDLE EHEA_BMASK_IBM(2, 2) +#define H_QPCB1_RQ_CQ_HANDLE EHEA_BMASK_IBM(3, 3) +#define H_QPCB1_SGEL_NB_SQ EHEA_BMASK_IBM(4, 4) +#define H_QPCB1_SGEL_NB_RQ1 EHEA_BMASK_IBM(5, 5) +#define H_QPCB1_SGEL_NB_RQ2 EHEA_BMASK_IBM(6, 6) +#define H_QPCB1_SGEL_NB_RQ3 EHEA_BMASK_IBM(7, 7) + +struct hcp_query_ehea { + u32 cur_num_qps; /* 00 */ + u32 cur_num_cqs; /* 01 */ + u32 cur_num_eqs; /* 02 */ + u32 cur_num_mrs; /* 03 */ + u32 auth_level; /* 04 */ + u32 max_num_qps; /* 05 */ + u32 max_num_cqs; /* 06 */ + u32 max_num_eqs; /* 07 */ + u32 max_num_mrs; /* 08 */ + u32 reserved0; /* 09 */ + u32 int_clock_freq; /* 10 */ + u32 max_num_pds; /* 11 */ + u32 max_num_addr_handles; /* 12 */ + u32 max_num_cqes; /* 13 */ + u32 max_num_wqes; /* 14 */ + u32 max_num_sgel_rq1wqe; /* 15 */ + u32 max_num_sgel_rq2wqe; /* 16 */ + u32 max_num_sgel_rq3wqe; /* 17 */ + u32 mr_page_size; /* 18 */ + u32 reserved1; /* 19 */ + u64 max_mr_size; /* 20 */ + u64 reserved2; /* 22 */ + u32 num_ports; /* 24 */ + u32 reserved3; /* 25 */ + u32 reserved4; /* 26 */ + u32 reserved5; /* 27 */ + u64 max_mc_mac; /* 28 */ + u64 ehea_cap; /* 30 */ + u32 max_isn_per_eq; /* 32 */ + u32 max_num_neq; /* 33 */ + u64 max_num_vlan_ids; /* 34 */ + u32 max_num_port_group; /* 36 */ + u32 max_num_phys_port; /* 37 */ + +}; + +/* Hcall Query/Modify Port Control Block defines */ +#define H_PORT_CB0 0 +#define H_PORT_CB1 1 +#define H_PORT_CB2 2 +#define H_PORT_CB3 3 +#define H_PORT_CB4 4 +#define H_PORT_CB5 5 +#define H_PORT_CB6 6 +#define H_PORT_CB7 7 + +struct hcp_ehea_port_cb0 { + u64 port_mac_addr; + u64 port_rc; + u64 reserved0; + u32 port_op_state; + u32 port_speed; + u32 ext_swport_op_state; + u32 neg_tpf_prpf; + u32 num_default_qps; + u32 reserved1; + u64 default_qpn_arr[16]; +}; + +/* Hcall Query/Modify Port Control Block 0 Selection Mask Bits */ +#define H_PORT_CB0_ALL EHEA_BMASK_IBM(0, 7) /* Set all bits */ +#define H_PORT_CB0_MAC EHEA_BMASK_IBM(0, 0) /* MAC address */ +#define H_PORT_CB0_PRC EHEA_BMASK_IBM(1, 1) /* Port Recv Control */ +#define H_PORT_CB0_DEFQPNARRAY EHEA_BMASK_IBM(7, 7) /* Default QPN Array */ + +/* Hcall Query Port: Returned port speed values */ +#define H_SPEED_10M_H 1 /* 10 Mbps, Half Duplex */ +#define H_SPEED_10M_F 2 /* 10 Mbps, Full Duplex */ +#define H_SPEED_100M_H 3 /* 100 Mbps, Half Duplex */ +#define H_SPEED_100M_F 4 /* 100 Mbps, Full Duplex */ +#define H_SPEED_1G_F 6 /* 1 Gbps, Full Duplex */ +#define H_SPEED_10G_F 8 /* 10 Gbps, Full Duplex */ + +/* Port Receive Control Status Bits */ +#define PXLY_RC_VALID EHEA_BMASK_IBM(49, 49) +#define PXLY_RC_VLAN_XTRACT EHEA_BMASK_IBM(50, 50) +#define PXLY_RC_TCP_6_TUPLE EHEA_BMASK_IBM(51, 51) +#define PXLY_RC_UDP_6_TUPLE EHEA_BMASK_IBM(52, 52) +#define PXLY_RC_TCP_3_TUPLE EHEA_BMASK_IBM(53, 53) +#define PXLY_RC_TCP_2_TUPLE EHEA_BMASK_IBM(54, 54) +#define PXLY_RC_LLC_SNAP EHEA_BMASK_IBM(55, 55) +#define PXLY_RC_JUMBO_FRAME EHEA_BMASK_IBM(56, 56) +#define PXLY_RC_FRAG_IP_PKT EHEA_BMASK_IBM(57, 57) +#define PXLY_RC_TCP_UDP_CHKSUM EHEA_BMASK_IBM(58, 58) +#define PXLY_RC_IP_CHKSUM EHEA_BMASK_IBM(59, 59) +#define PXLY_RC_MAC_FILTER EHEA_BMASK_IBM(60, 60) +#define PXLY_RC_UNTAG_FILTER EHEA_BMASK_IBM(61, 61) +#define PXLY_RC_VLAN_TAG_FILTER EHEA_BMASK_IBM(62, 63) + +#define PXLY_RC_VLAN_FILTER 2 +#define PXLY_RC_VLAN_PERM 0 + + +#define H_PORT_CB1_ALL 0x8000000000000000ULL + +struct hcp_ehea_port_cb1 { + u64 vlan_filter[64]; +}; + +#define H_PORT_CB2_ALL 0xFFE0000000000000ULL + +struct hcp_ehea_port_cb2 { + u64 rxo; + u64 rxucp; + u64 rxufd; + u64 rxuerr; + u64 rxftl; + u64 rxmcp; + u64 rxbcp; + u64 txo; + u64 txucp; + u64 txmcp; + u64 txbcp; +}; + +struct hcp_ehea_port_cb3 { + u64 vlan_bc_filter[64]; + u64 vlan_mc_filter[64]; + u64 vlan_un_filter[64]; + u64 port_mac_hash_array[64]; +}; + +#define H_PORT_CB4_ALL 0xF000000000000000ULL +#define H_PORT_CB4_JUMBO 0x1000000000000000ULL +#define H_PORT_CB4_SPEED 0x8000000000000000ULL + +struct hcp_ehea_port_cb4 { + u32 port_speed; + u32 pause_frame; + u32 ens_port_op_state; + u32 jumbo_frame; + u32 ens_port_wrap; +}; + +/* Hcall Query/Modify Port Control Block 5 Selection Mask Bits */ +#define H_PORT_CB5_RCU 0x0001000000000000ULL +#define PXS_RCU EHEA_BMASK_IBM(61, 63) + +struct hcp_ehea_port_cb5 { + u64 prc; /* 00 */ + u64 uaa; /* 01 */ + u64 macvc; /* 02 */ + u64 xpcsc; /* 03 */ + u64 xpcsp; /* 04 */ + u64 pcsid; /* 05 */ + u64 xpcsst; /* 06 */ + u64 pthlb; /* 07 */ + u64 pthrb; /* 08 */ + u64 pqu; /* 09 */ + u64 pqd; /* 10 */ + u64 prt; /* 11 */ + u64 wsth; /* 12 */ + u64 rcb; /* 13 */ + u64 rcm; /* 14 */ + u64 rcu; /* 15 */ + u64 macc; /* 16 */ + u64 pc; /* 17 */ + u64 pst; /* 18 */ + u64 ducqpn; /* 19 */ + u64 mcqpn; /* 20 */ + u64 mma; /* 21 */ + u64 pmc0h; /* 22 */ + u64 pmc0l; /* 23 */ + u64 lbc; /* 24 */ +}; + +#define H_PORT_CB6_ALL 0xFFFFFE7FFFFF8000ULL + +struct hcp_ehea_port_cb6 { + u64 rxo; /* 00 */ + u64 rx64; /* 01 */ + u64 rx65; /* 02 */ + u64 rx128; /* 03 */ + u64 rx256; /* 04 */ + u64 rx512; /* 05 */ + u64 rx1024; /* 06 */ + u64 rxbfcs; /* 07 */ + u64 rxime; /* 08 */ + u64 rxrle; /* 09 */ + u64 rxorle; /* 10 */ + u64 rxftl; /* 11 */ + u64 rxjab; /* 12 */ + u64 rxse; /* 13 */ + u64 rxce; /* 14 */ + u64 rxrf; /* 15 */ + u64 rxfrag; /* 16 */ + u64 rxuoc; /* 17 */ + u64 rxcpf; /* 18 */ + u64 rxsb; /* 19 */ + u64 rxfd; /* 20 */ + u64 rxoerr; /* 21 */ + u64 rxaln; /* 22 */ + u64 ducqpn; /* 23 */ + u64 reserved0; /* 24 */ + u64 rxmcp; /* 25 */ + u64 rxbcp; /* 26 */ + u64 txmcp; /* 27 */ + u64 txbcp; /* 28 */ + u64 txo; /* 29 */ + u64 tx64; /* 30 */ + u64 tx65; /* 31 */ + u64 tx128; /* 32 */ + u64 tx256; /* 33 */ + u64 tx512; /* 34 */ + u64 tx1024; /* 35 */ + u64 txbfcs; /* 36 */ + u64 txcpf; /* 37 */ + u64 txlf; /* 38 */ + u64 txrf; /* 39 */ + u64 txime; /* 40 */ + u64 txsc; /* 41 */ + u64 txmc; /* 42 */ + u64 txsqe; /* 43 */ + u64 txdef; /* 44 */ + u64 txlcol; /* 45 */ + u64 txexcol; /* 46 */ + u64 txcse; /* 47 */ + u64 txbor; /* 48 */ +}; + +#define H_PORT_CB7_DUCQPN 0x8000000000000000ULL + +struct hcp_ehea_port_cb7 { + u64 def_uc_qpn; +}; + +u64 ehea_h_query_ehea_qp(const u64 adapter_handle, + const u8 qp_category, + const u64 qp_handle, const u64 sel_mask, + void *cb_addr); + +u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, + const u8 cat, + const u64 qp_handle, + const u64 sel_mask, + void *cb_addr, + u64 *inv_attr_id, + u64 *proc_mask, u16 *out_swr, u16 *out_rwr); + +u64 ehea_h_alloc_resource_eq(const u64 adapter_handle, + struct ehea_eq_attr *eq_attr, u64 *eq_handle); + +u64 ehea_h_alloc_resource_cq(const u64 adapter_handle, + struct ehea_cq_attr *cq_attr, + u64 *cq_handle, struct h_epas *epas); + +u64 ehea_h_alloc_resource_qp(const u64 adapter_handle, + struct ehea_qp_init_attr *init_attr, + const u32 pd, + u64 *qp_handle, struct h_epas *h_epas); + +#define H_REG_RPAGE_PAGE_SIZE EHEA_BMASK_IBM(48, 55) +#define H_REG_RPAGE_QT EHEA_BMASK_IBM(62, 63) + +u64 ehea_h_register_rpage(const u64 adapter_handle, + const u8 pagesize, + const u8 queue_type, + const u64 resource_handle, + const u64 log_pageaddr, u64 count); + +#define H_DISABLE_GET_EHEA_WQE_P 1 +#define H_DISABLE_GET_SQ_WQE_P 2 +#define H_DISABLE_GET_RQC 3 + +u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle); + +#define FORCE_FREE 1 +#define NORMAL_FREE 0 + +u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle, + u64 force_bit); + +u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr, + const u64 length, const u32 access_ctrl, + const u32 pd, u64 *mr_handle, u32 *lkey); + +u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle, + const u8 pagesize, const u8 queue_type, + const u64 log_pageaddr, const u64 count); + +u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle, + const u64 vaddr_in, const u32 access_ctrl, const u32 pd, + struct ehea_mr *mr); + +u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr); + +/* output param R5 */ +#define H_MEHEAPORT_CAT EHEA_BMASK_IBM(40, 47) +#define H_MEHEAPORT_PN EHEA_BMASK_IBM(48, 63) + +u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num, + const u8 cb_cat, const u64 select_mask, + void *cb_addr); + +u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num, + const u8 cb_cat, const u64 select_mask, + void *cb_addr); + +#define H_REGBCMC_PN EHEA_BMASK_IBM(48, 63) +#define H_REGBCMC_REGTYPE EHEA_BMASK_IBM(60, 63) +#define H_REGBCMC_MACADDR EHEA_BMASK_IBM(16, 63) +#define H_REGBCMC_VLANID EHEA_BMASK_IBM(52, 63) + +u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num, + const u8 reg_type, const u64 mc_mac_addr, + const u16 vlan_id, const u32 hcall_id); + +u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle, + const u64 event_mask); + +u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle, + void *rblock); + +#endif /* __EHEA_PHYP_H__ */ diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c new file mode 100644 index 000000000..a0820f72b --- /dev/null +++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c @@ -0,0 +1,1022 @@ +/* + * linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.c + * + * eHEA ethernet device driver for IBM eServer System p + * + * (C) Copyright IBM Corp. 2006 + * + * Authors: + * Christoph Raisch + * Jan-Bernd Themann + * Thomas Klein + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include "ehea.h" +#include "ehea_phyp.h" +#include "ehea_qmr.h" + +static struct ehea_bmap *ehea_bmap; + +static void *hw_qpageit_get_inc(struct hw_queue *queue) +{ + void *retvalue = hw_qeit_get(queue); + + queue->current_q_offset += queue->pagesize; + if (queue->current_q_offset > queue->queue_length) { + queue->current_q_offset -= queue->pagesize; + retvalue = NULL; + } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) { + pr_err("not on pageboundary\n"); + retvalue = NULL; + } + return retvalue; +} + +static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages, + const u32 pagesize, const u32 qe_size) +{ + int pages_per_kpage = PAGE_SIZE / pagesize; + int i, k; + + if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) { + pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n", + (int)PAGE_SIZE, (int)pagesize); + return -EINVAL; + } + + queue->queue_length = nr_of_pages * pagesize; + queue->queue_pages = kmalloc_array(nr_of_pages, sizeof(void *), + GFP_KERNEL); + if (!queue->queue_pages) + return -ENOMEM; + + /* + * allocate pages for queue: + * outer loop allocates whole kernel pages (page aligned) and + * inner loop divides a kernel page into smaller hea queue pages + */ + i = 0; + while (i < nr_of_pages) { + u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL); + if (!kpage) + goto out_nomem; + for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) { + (queue->queue_pages)[i] = (struct ehea_page *)kpage; + kpage += pagesize; + i++; + } + } + + queue->current_q_offset = 0; + queue->qe_size = qe_size; + queue->pagesize = pagesize; + queue->toggle_state = 1; + + return 0; +out_nomem: + for (i = 0; i < nr_of_pages; i += pages_per_kpage) { + if (!(queue->queue_pages)[i]) + break; + free_page((unsigned long)(queue->queue_pages)[i]); + } + return -ENOMEM; +} + +static void hw_queue_dtor(struct hw_queue *queue) +{ + int pages_per_kpage; + int i, nr_pages; + + if (!queue || !queue->queue_pages) + return; + + pages_per_kpage = PAGE_SIZE / queue->pagesize; + + nr_pages = queue->queue_length / queue->pagesize; + + for (i = 0; i < nr_pages; i += pages_per_kpage) + free_page((unsigned long)(queue->queue_pages)[i]); + + kfree(queue->queue_pages); +} + +struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, + int nr_of_cqe, u64 eq_handle, u32 cq_token) +{ + struct ehea_cq *cq; + struct h_epa epa; + u64 *cq_handle_ref, hret, rpage; + u32 act_nr_of_entries, act_pages, counter; + int ret; + void *vpage; + + cq = kzalloc(sizeof(*cq), GFP_KERNEL); + if (!cq) + goto out_nomem; + + cq->attr.max_nr_of_cqes = nr_of_cqe; + cq->attr.cq_token = cq_token; + cq->attr.eq_handle = eq_handle; + + cq->adapter = adapter; + + cq_handle_ref = &cq->fw_handle; + act_nr_of_entries = 0; + act_pages = 0; + + hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr, + &cq->fw_handle, &cq->epas); + if (hret != H_SUCCESS) { + pr_err("alloc_resource_cq failed\n"); + goto out_freemem; + } + + ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages, + EHEA_PAGESIZE, sizeof(struct ehea_cqe)); + if (ret) + goto out_freeres; + + for (counter = 0; counter < cq->attr.nr_pages; counter++) { + vpage = hw_qpageit_get_inc(&cq->hw_queue); + if (!vpage) { + pr_err("hw_qpageit_get_inc failed\n"); + goto out_kill_hwq; + } + + rpage = __pa(vpage); + hret = ehea_h_register_rpage(adapter->handle, + 0, EHEA_CQ_REGISTER_ORIG, + cq->fw_handle, rpage, 1); + if (hret < H_SUCCESS) { + pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n", + cq, hret, counter, cq->attr.nr_pages); + goto out_kill_hwq; + } + + if (counter == (cq->attr.nr_pages - 1)) { + vpage = hw_qpageit_get_inc(&cq->hw_queue); + + if ((hret != H_SUCCESS) || (vpage)) { + pr_err("registration of pages not complete hret=%llx\n", + hret); + goto out_kill_hwq; + } + } else { + if (hret != H_PAGE_REGISTERED) { + pr_err("CQ: registration of page failed hret=%llx\n", + hret); + goto out_kill_hwq; + } + } + } + + hw_qeit_reset(&cq->hw_queue); + epa = cq->epas.kernel; + ehea_reset_cq_ep(cq); + ehea_reset_cq_n1(cq); + + return cq; + +out_kill_hwq: + hw_queue_dtor(&cq->hw_queue); + +out_freeres: + ehea_h_free_resource(adapter->handle, cq->fw_handle, FORCE_FREE); + +out_freemem: + kfree(cq); + +out_nomem: + return NULL; +} + +static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force) +{ + u64 hret; + u64 adapter_handle = cq->adapter->handle; + + /* deregister all previous registered pages */ + hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force); + if (hret != H_SUCCESS) + return hret; + + hw_queue_dtor(&cq->hw_queue); + kfree(cq); + + return hret; +} + +int ehea_destroy_cq(struct ehea_cq *cq) +{ + u64 hret, aer, aerr; + if (!cq) + return 0; + + hcp_epas_dtor(&cq->epas); + hret = ehea_destroy_cq_res(cq, NORMAL_FREE); + if (hret == H_R_STATE) { + ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr); + hret = ehea_destroy_cq_res(cq, FORCE_FREE); + } + + if (hret != H_SUCCESS) { + pr_err("destroy CQ failed\n"); + return -EIO; + } + + return 0; +} + +struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter, + const enum ehea_eq_type type, + const u32 max_nr_of_eqes, const u8 eqe_gen) +{ + int ret, i; + u64 hret, rpage; + void *vpage; + struct ehea_eq *eq; + + eq = kzalloc(sizeof(*eq), GFP_KERNEL); + if (!eq) + return NULL; + + eq->adapter = adapter; + eq->attr.type = type; + eq->attr.max_nr_of_eqes = max_nr_of_eqes; + eq->attr.eqe_gen = eqe_gen; + spin_lock_init(&eq->spinlock); + + hret = ehea_h_alloc_resource_eq(adapter->handle, + &eq->attr, &eq->fw_handle); + if (hret != H_SUCCESS) { + pr_err("alloc_resource_eq failed\n"); + goto out_freemem; + } + + ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages, + EHEA_PAGESIZE, sizeof(struct ehea_eqe)); + if (ret) { + pr_err("can't allocate eq pages\n"); + goto out_freeres; + } + + for (i = 0; i < eq->attr.nr_pages; i++) { + vpage = hw_qpageit_get_inc(&eq->hw_queue); + if (!vpage) { + pr_err("hw_qpageit_get_inc failed\n"); + hret = H_RESOURCE; + goto out_kill_hwq; + } + + rpage = __pa(vpage); + + hret = ehea_h_register_rpage(adapter->handle, 0, + EHEA_EQ_REGISTER_ORIG, + eq->fw_handle, rpage, 1); + + if (i == (eq->attr.nr_pages - 1)) { + /* last page */ + vpage = hw_qpageit_get_inc(&eq->hw_queue); + if ((hret != H_SUCCESS) || (vpage)) + goto out_kill_hwq; + + } else { + if (hret != H_PAGE_REGISTERED) + goto out_kill_hwq; + + } + } + + hw_qeit_reset(&eq->hw_queue); + return eq; + +out_kill_hwq: + hw_queue_dtor(&eq->hw_queue); + +out_freeres: + ehea_h_free_resource(adapter->handle, eq->fw_handle, FORCE_FREE); + +out_freemem: + kfree(eq); + return NULL; +} + +struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq) +{ + struct ehea_eqe *eqe; + unsigned long flags; + + spin_lock_irqsave(&eq->spinlock, flags); + eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue); + spin_unlock_irqrestore(&eq->spinlock, flags); + + return eqe; +} + +static u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force) +{ + u64 hret; + unsigned long flags; + + spin_lock_irqsave(&eq->spinlock, flags); + + hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle, force); + spin_unlock_irqrestore(&eq->spinlock, flags); + + if (hret != H_SUCCESS) + return hret; + + hw_queue_dtor(&eq->hw_queue); + kfree(eq); + + return hret; +} + +int ehea_destroy_eq(struct ehea_eq *eq) +{ + u64 hret, aer, aerr; + if (!eq) + return 0; + + hcp_epas_dtor(&eq->epas); + + hret = ehea_destroy_eq_res(eq, NORMAL_FREE); + if (hret == H_R_STATE) { + ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr); + hret = ehea_destroy_eq_res(eq, FORCE_FREE); + } + + if (hret != H_SUCCESS) { + pr_err("destroy EQ failed\n"); + return -EIO; + } + + return 0; +} + +/* allocates memory for a queue and registers pages in phyp */ +static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue, + int nr_pages, int wqe_size, int act_nr_sges, + struct ehea_adapter *adapter, int h_call_q_selector) +{ + u64 hret, rpage; + int ret, cnt; + void *vpage; + + ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size); + if (ret) + return ret; + + for (cnt = 0; cnt < nr_pages; cnt++) { + vpage = hw_qpageit_get_inc(hw_queue); + if (!vpage) { + pr_err("hw_qpageit_get_inc failed\n"); + goto out_kill_hwq; + } + rpage = __pa(vpage); + hret = ehea_h_register_rpage(adapter->handle, + 0, h_call_q_selector, + qp->fw_handle, rpage, 1); + if (hret < H_SUCCESS) { + pr_err("register_rpage_qp failed\n"); + goto out_kill_hwq; + } + } + hw_qeit_reset(hw_queue); + return 0; + +out_kill_hwq: + hw_queue_dtor(hw_queue); + return -EIO; +} + +static inline u32 map_wqe_size(u8 wqe_enc_size) +{ + return 128 << wqe_enc_size; +} + +struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, + u32 pd, struct ehea_qp_init_attr *init_attr) +{ + int ret; + u64 hret; + struct ehea_qp *qp; + u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1; + u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3; + + + qp = kzalloc(sizeof(*qp), GFP_KERNEL); + if (!qp) + return NULL; + + qp->adapter = adapter; + + hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd, + &qp->fw_handle, &qp->epas); + if (hret != H_SUCCESS) { + pr_err("ehea_h_alloc_resource_qp failed\n"); + goto out_freemem; + } + + wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq); + wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1); + wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2); + wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3); + + ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages, + wqe_size_in_bytes_sq, + init_attr->act_wqe_size_enc_sq, adapter, + 0); + if (ret) { + pr_err("can't register for sq ret=%x\n", ret); + goto out_freeres; + } + + ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1, + init_attr->nr_rq1_pages, + wqe_size_in_bytes_rq1, + init_attr->act_wqe_size_enc_rq1, + adapter, 1); + if (ret) { + pr_err("can't register for rq1 ret=%x\n", ret); + goto out_kill_hwsq; + } + + if (init_attr->rq_count > 1) { + ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2, + init_attr->nr_rq2_pages, + wqe_size_in_bytes_rq2, + init_attr->act_wqe_size_enc_rq2, + adapter, 2); + if (ret) { + pr_err("can't register for rq2 ret=%x\n", ret); + goto out_kill_hwr1q; + } + } + + if (init_attr->rq_count > 2) { + ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3, + init_attr->nr_rq3_pages, + wqe_size_in_bytes_rq3, + init_attr->act_wqe_size_enc_rq3, + adapter, 3); + if (ret) { + pr_err("can't register for rq3 ret=%x\n", ret); + goto out_kill_hwr2q; + } + } + + qp->init_attr = *init_attr; + + return qp; + +out_kill_hwr2q: + hw_queue_dtor(&qp->hw_rqueue2); + +out_kill_hwr1q: + hw_queue_dtor(&qp->hw_rqueue1); + +out_kill_hwsq: + hw_queue_dtor(&qp->hw_squeue); + +out_freeres: + ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle); + ehea_h_free_resource(adapter->handle, qp->fw_handle, FORCE_FREE); + +out_freemem: + kfree(qp); + return NULL; +} + +static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force) +{ + u64 hret; + struct ehea_qp_init_attr *qp_attr = &qp->init_attr; + + + ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle); + hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force); + if (hret != H_SUCCESS) + return hret; + + hw_queue_dtor(&qp->hw_squeue); + hw_queue_dtor(&qp->hw_rqueue1); + + if (qp_attr->rq_count > 1) + hw_queue_dtor(&qp->hw_rqueue2); + if (qp_attr->rq_count > 2) + hw_queue_dtor(&qp->hw_rqueue3); + kfree(qp); + + return hret; +} + +int ehea_destroy_qp(struct ehea_qp *qp) +{ + u64 hret, aer, aerr; + if (!qp) + return 0; + + hcp_epas_dtor(&qp->epas); + + hret = ehea_destroy_qp_res(qp, NORMAL_FREE); + if (hret == H_R_STATE) { + ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr); + hret = ehea_destroy_qp_res(qp, FORCE_FREE); + } + + if (hret != H_SUCCESS) { + pr_err("destroy QP failed\n"); + return -EIO; + } + + return 0; +} + +static inline int ehea_calc_index(unsigned long i, unsigned long s) +{ + return (i >> s) & EHEA_INDEX_MASK; +} + +static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap, + int dir) +{ + if (!ehea_top_bmap->dir[dir]) { + ehea_top_bmap->dir[dir] = + kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL); + if (!ehea_top_bmap->dir[dir]) + return -ENOMEM; + } + return 0; +} + +static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir) +{ + if (!ehea_bmap->top[top]) { + ehea_bmap->top[top] = + kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL); + if (!ehea_bmap->top[top]) + return -ENOMEM; + } + return ehea_init_top_bmap(ehea_bmap->top[top], dir); +} + +static DEFINE_MUTEX(ehea_busmap_mutex); +static unsigned long ehea_mr_len; + +#define EHEA_BUSMAP_ADD_SECT 1 +#define EHEA_BUSMAP_REM_SECT 0 + +static void ehea_rebuild_busmap(void) +{ + u64 vaddr = EHEA_BUSMAP_START; + int top, dir, idx; + + for (top = 0; top < EHEA_MAP_ENTRIES; top++) { + struct ehea_top_bmap *ehea_top; + int valid_dir_entries = 0; + + if (!ehea_bmap->top[top]) + continue; + ehea_top = ehea_bmap->top[top]; + for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) { + struct ehea_dir_bmap *ehea_dir; + int valid_entries = 0; + + if (!ehea_top->dir[dir]) + continue; + valid_dir_entries++; + ehea_dir = ehea_top->dir[dir]; + for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) { + if (!ehea_dir->ent[idx]) + continue; + valid_entries++; + ehea_dir->ent[idx] = vaddr; + vaddr += EHEA_SECTSIZE; + } + if (!valid_entries) { + ehea_top->dir[dir] = NULL; + kfree(ehea_dir); + } + } + if (!valid_dir_entries) { + ehea_bmap->top[top] = NULL; + kfree(ehea_top); + } + } +} + +static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add) +{ + unsigned long i, start_section, end_section; + + if (!nr_pages) + return 0; + + if (!ehea_bmap) { + ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL); + if (!ehea_bmap) + return -ENOMEM; + } + + start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE; + end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE); + /* Mark entries as valid or invalid only; address is assigned later */ + for (i = start_section; i < end_section; i++) { + u64 flag; + int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT); + int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT); + int idx = i & EHEA_INDEX_MASK; + + if (add) { + int ret = ehea_init_bmap(ehea_bmap, top, dir); + if (ret) + return ret; + flag = 1; /* valid */ + ehea_mr_len += EHEA_SECTSIZE; + } else { + if (!ehea_bmap->top[top]) + continue; + if (!ehea_bmap->top[top]->dir[dir]) + continue; + flag = 0; /* invalid */ + ehea_mr_len -= EHEA_SECTSIZE; + } + + ehea_bmap->top[top]->dir[dir]->ent[idx] = flag; + } + ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */ + return 0; +} + +int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages) +{ + int ret; + + mutex_lock(&ehea_busmap_mutex); + ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT); + mutex_unlock(&ehea_busmap_mutex); + return ret; +} + +int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages) +{ + int ret; + + mutex_lock(&ehea_busmap_mutex); + ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT); + mutex_unlock(&ehea_busmap_mutex); + return ret; +} + +static int ehea_is_hugepage(unsigned long pfn) +{ + int page_order; + + if (pfn & EHEA_HUGEPAGE_PFN_MASK) + return 0; + + page_order = compound_order(pfn_to_page(pfn)); + if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT) + return 0; + + return 1; +} + +static int ehea_create_busmap_callback(unsigned long initial_pfn, + unsigned long total_nr_pages, void *arg) +{ + int ret; + unsigned long pfn, start_pfn, end_pfn, nr_pages; + + if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE) + return ehea_update_busmap(initial_pfn, total_nr_pages, + EHEA_BUSMAP_ADD_SECT); + + /* Given chunk is >= 16GB -> check for hugepages */ + start_pfn = initial_pfn; + end_pfn = initial_pfn + total_nr_pages; + pfn = start_pfn; + + while (pfn < end_pfn) { + if (ehea_is_hugepage(pfn)) { + /* Add mem found in front of the hugepage */ + nr_pages = pfn - start_pfn; + ret = ehea_update_busmap(start_pfn, nr_pages, + EHEA_BUSMAP_ADD_SECT); + if (ret) + return ret; + + /* Skip the hugepage */ + pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE); + start_pfn = pfn; + } else + pfn += (EHEA_SECTSIZE / PAGE_SIZE); + } + + /* Add mem found behind the hugepage(s) */ + nr_pages = pfn - start_pfn; + return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT); +} + +int ehea_create_busmap(void) +{ + int ret; + + mutex_lock(&ehea_busmap_mutex); + ehea_mr_len = 0; + ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL, + ehea_create_busmap_callback); + mutex_unlock(&ehea_busmap_mutex); + return ret; +} + +void ehea_destroy_busmap(void) +{ + int top, dir; + mutex_lock(&ehea_busmap_mutex); + if (!ehea_bmap) + goto out_destroy; + + for (top = 0; top < EHEA_MAP_ENTRIES; top++) { + if (!ehea_bmap->top[top]) + continue; + + for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) { + if (!ehea_bmap->top[top]->dir[dir]) + continue; + + kfree(ehea_bmap->top[top]->dir[dir]); + } + + kfree(ehea_bmap->top[top]); + } + + kfree(ehea_bmap); + ehea_bmap = NULL; +out_destroy: + mutex_unlock(&ehea_busmap_mutex); +} + +u64 ehea_map_vaddr(void *caddr) +{ + int top, dir, idx; + unsigned long index, offset; + + if (!ehea_bmap) + return EHEA_INVAL_ADDR; + + index = __pa(caddr) >> SECTION_SIZE_BITS; + top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK; + if (!ehea_bmap->top[top]) + return EHEA_INVAL_ADDR; + + dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK; + if (!ehea_bmap->top[top]->dir[dir]) + return EHEA_INVAL_ADDR; + + idx = index & EHEA_INDEX_MASK; + if (!ehea_bmap->top[top]->dir[dir]->ent[idx]) + return EHEA_INVAL_ADDR; + + offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1); + return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset; +} + +static inline void *ehea_calc_sectbase(int top, int dir, int idx) +{ + unsigned long ret = idx; + ret |= dir << EHEA_DIR_INDEX_SHIFT; + ret |= top << EHEA_TOP_INDEX_SHIFT; + return __va(ret << SECTION_SIZE_BITS); +} + +static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt, + struct ehea_adapter *adapter, + struct ehea_mr *mr) +{ + void *pg; + u64 j, m, hret; + unsigned long k = 0; + u64 pt_abs = __pa(pt); + + void *sectbase = ehea_calc_sectbase(top, dir, idx); + + for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) { + + for (m = 0; m < EHEA_MAX_RPAGE; m++) { + pg = sectbase + ((k++) * EHEA_PAGESIZE); + pt[m] = __pa(pg); + } + hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0, + 0, pt_abs, EHEA_MAX_RPAGE); + + if ((hret != H_SUCCESS) && + (hret != H_PAGE_REGISTERED)) { + ehea_h_free_resource(adapter->handle, mr->handle, + FORCE_FREE); + pr_err("register_rpage_mr failed\n"); + return hret; + } + } + return hret; +} + +static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt, + struct ehea_adapter *adapter, + struct ehea_mr *mr) +{ + u64 hret = H_SUCCESS; + int idx; + + for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) { + if (!ehea_bmap->top[top]->dir[dir]->ent[idx]) + continue; + + hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr); + if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) + return hret; + } + return hret; +} + +static u64 ehea_reg_mr_dir_sections(int top, u64 *pt, + struct ehea_adapter *adapter, + struct ehea_mr *mr) +{ + u64 hret = H_SUCCESS; + int dir; + + for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) { + if (!ehea_bmap->top[top]->dir[dir]) + continue; + + hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr); + if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) + return hret; + } + return hret; +} + +int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) +{ + int ret; + u64 *pt; + u64 hret; + u32 acc_ctrl = EHEA_MR_ACC_CTRL; + + unsigned long top; + + pt = (void *)get_zeroed_page(GFP_KERNEL); + if (!pt) { + pr_err("no mem\n"); + ret = -ENOMEM; + goto out; + } + + hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START, + ehea_mr_len, acc_ctrl, adapter->pd, + &mr->handle, &mr->lkey); + + if (hret != H_SUCCESS) { + pr_err("alloc_resource_mr failed\n"); + ret = -EIO; + goto out; + } + + if (!ehea_bmap) { + ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); + pr_err("no busmap available\n"); + ret = -EIO; + goto out; + } + + for (top = 0; top < EHEA_MAP_ENTRIES; top++) { + if (!ehea_bmap->top[top]) + continue; + + hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr); + if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS)) + break; + } + + if (hret != H_SUCCESS) { + ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); + pr_err("registering mr failed\n"); + ret = -EIO; + goto out; + } + + mr->vaddr = EHEA_BUSMAP_START; + mr->adapter = adapter; + ret = 0; +out: + free_page((unsigned long)pt); + return ret; +} + +int ehea_rem_mr(struct ehea_mr *mr) +{ + u64 hret; + + if (!mr || !mr->adapter) + return -EINVAL; + + hret = ehea_h_free_resource(mr->adapter->handle, mr->handle, + FORCE_FREE); + if (hret != H_SUCCESS) { + pr_err("destroy MR failed\n"); + return -EIO; + } + + return 0; +} + +int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr, + struct ehea_mr *shared_mr) +{ + u64 hret; + + hret = ehea_h_register_smr(adapter->handle, old_mr->handle, + old_mr->vaddr, EHEA_MR_ACC_CTRL, + adapter->pd, shared_mr); + if (hret != H_SUCCESS) + return -EIO; + + shared_mr->adapter = adapter; + + return 0; +} + +static void print_error_data(u64 *data) +{ + int length; + u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]); + u64 resource = data[1]; + + length = EHEA_BMASK_GET(ERROR_DATA_LENGTH, data[0]); + + if (length > EHEA_PAGESIZE) + length = EHEA_PAGESIZE; + + if (type == EHEA_AER_RESTYPE_QP) + pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n", + resource, data[6], data[12], data[22]); + else if (type == EHEA_AER_RESTYPE_CQ) + pr_err("CQ (resource=%llX) state: AER=0x%llX\n", + resource, data[6]); + else if (type == EHEA_AER_RESTYPE_EQ) + pr_err("EQ (resource=%llX) state: AER=0x%llX\n", + resource, data[6]); + + ehea_dump(data, length, "error data"); +} + +u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle, + u64 *aer, u64 *aerr) +{ + unsigned long ret; + u64 *rblock; + u64 type = 0; + + rblock = (void *)get_zeroed_page(GFP_KERNEL); + if (!rblock) { + pr_err("Cannot allocate rblock memory\n"); + goto out; + } + + ret = ehea_h_error_data(adapter->handle, res_handle, rblock); + + if (ret == H_SUCCESS) { + type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]); + *aer = rblock[6]; + *aerr = rblock[12]; + print_error_data(rblock); + } else if (ret == H_R_STATE) { + pr_err("No error data available: %llX\n", res_handle); + } else + pr_err("Error data could not be fetched: %llX\n", res_handle); + + free_page((unsigned long)rblock); +out: + return type; +} diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.h b/drivers/net/ethernet/ibm/ehea/ehea_qmr.h new file mode 100644 index 000000000..8e4a70c20 --- /dev/null +++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.h @@ -0,0 +1,404 @@ +/* + * linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.h + * + * eHEA ethernet device driver for IBM eServer System p + * + * (C) Copyright IBM Corp. 2006 + * + * Authors: + * Christoph Raisch + * Jan-Bernd Themann + * Thomas Klein + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __EHEA_QMR_H__ +#define __EHEA_QMR_H__ + +#include +#include "ehea.h" +#include "ehea_hw.h" + +/* + * page size of ehea hardware queues + */ + +#define EHEA_PAGESHIFT 12 +#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT) +#define EHEA_SECTSIZE (1UL << 24) +#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT) +#define EHEA_HUGEPAGESHIFT 34 +#define EHEA_HUGEPAGE_SIZE (1UL << EHEA_HUGEPAGESHIFT) +#define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT) + +#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE) +#error eHEA module cannot work if kernel sectionsize < ehea sectionsize +#endif + +/* Some abbreviations used here: + * + * WQE - Work Queue Entry + * SWQE - Send Work Queue Entry + * RWQE - Receive Work Queue Entry + * CQE - Completion Queue Entry + * EQE - Event Queue Entry + * MR - Memory Region + */ + +/* Use of WR_ID field for EHEA */ +#define EHEA_WR_ID_COUNT EHEA_BMASK_IBM(0, 19) +#define EHEA_WR_ID_TYPE EHEA_BMASK_IBM(20, 23) +#define EHEA_SWQE2_TYPE 0x1 +#define EHEA_SWQE3_TYPE 0x2 +#define EHEA_RWQE2_TYPE 0x3 +#define EHEA_RWQE3_TYPE 0x4 +#define EHEA_WR_ID_INDEX EHEA_BMASK_IBM(24, 47) +#define EHEA_WR_ID_REFILL EHEA_BMASK_IBM(48, 63) + +struct ehea_vsgentry { + u64 vaddr; + u32 l_key; + u32 len; +}; + +/* maximum number of sg entries allowed in a WQE */ +#define EHEA_MAX_WQE_SG_ENTRIES 252 +#define SWQE2_MAX_IMM (0xD0 - 0x30) +#define SWQE3_MAX_IMM 224 + +/* tx control flags for swqe */ +#define EHEA_SWQE_CRC 0x8000 +#define EHEA_SWQE_IP_CHECKSUM 0x4000 +#define EHEA_SWQE_TCP_CHECKSUM 0x2000 +#define EHEA_SWQE_TSO 0x1000 +#define EHEA_SWQE_SIGNALLED_COMPLETION 0x0800 +#define EHEA_SWQE_VLAN_INSERT 0x0400 +#define EHEA_SWQE_IMM_DATA_PRESENT 0x0200 +#define EHEA_SWQE_DESCRIPTORS_PRESENT 0x0100 +#define EHEA_SWQE_WRAP_CTL_REC 0x0080 +#define EHEA_SWQE_WRAP_CTL_FORCE 0x0040 +#define EHEA_SWQE_BIND 0x0020 +#define EHEA_SWQE_PURGE 0x0010 + +/* sizeof(struct ehea_swqe) less the union */ +#define SWQE_HEADER_SIZE 32 + +struct ehea_swqe { + u64 wr_id; + u16 tx_control; + u16 vlan_tag; + u8 reserved1; + u8 ip_start; + u8 ip_end; + u8 immediate_data_length; + u8 tcp_offset; + u8 reserved2; + u16 reserved2b; + u8 wrap_tag; + u8 descriptors; /* number of valid descriptors in WQE */ + u16 reserved3; + u16 reserved4; + u16 mss; + u32 reserved5; + union { + /* Send WQE Format 1 */ + struct { + struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES]; + } no_immediate_data; + + /* Send WQE Format 2 */ + struct { + struct ehea_vsgentry sg_entry; + /* 0x30 */ + u8 immediate_data[SWQE2_MAX_IMM]; + /* 0xd0 */ + struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1]; + } immdata_desc __packed; + + /* Send WQE Format 3 */ + struct { + u8 immediate_data[SWQE3_MAX_IMM]; + } immdata_nodesc; + } u; +}; + +struct ehea_rwqe { + u64 wr_id; /* work request ID */ + u8 reserved1[5]; + u8 data_segments; + u16 reserved2; + u64 reserved3; + u64 reserved4; + struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES]; +}; + +#define EHEA_CQE_VLAN_TAG_XTRACT 0x0400 + +#define EHEA_CQE_TYPE_RQ 0x60 +#define EHEA_CQE_STAT_ERR_MASK 0x700F +#define EHEA_CQE_STAT_FAT_ERR_MASK 0xF +#define EHEA_CQE_BLIND_CKSUM 0x8000 +#define EHEA_CQE_STAT_ERR_TCP 0x4000 +#define EHEA_CQE_STAT_ERR_IP 0x2000 +#define EHEA_CQE_STAT_ERR_CRC 0x1000 + +/* Defines which bad send cqe stati lead to a port reset */ +#define EHEA_CQE_STAT_RESET_MASK 0x0002 + +struct ehea_cqe { + u64 wr_id; /* work request ID from WQE */ + u8 type; + u8 valid; + u16 status; + u16 reserved1; + u16 num_bytes_transfered; + u16 vlan_tag; + u16 inet_checksum_value; + u8 reserved2; + u8 header_length; + u16 reserved3; + u16 page_offset; + u16 wqe_count; + u32 qp_token; + u32 timestamp; + u32 reserved4; + u64 reserved5[3]; +}; + +#define EHEA_EQE_VALID EHEA_BMASK_IBM(0, 0) +#define EHEA_EQE_IS_CQE EHEA_BMASK_IBM(1, 1) +#define EHEA_EQE_IDENTIFIER EHEA_BMASK_IBM(2, 7) +#define EHEA_EQE_QP_CQ_NUMBER EHEA_BMASK_IBM(8, 31) +#define EHEA_EQE_QP_TOKEN EHEA_BMASK_IBM(32, 63) +#define EHEA_EQE_CQ_TOKEN EHEA_BMASK_IBM(32, 63) +#define EHEA_EQE_KEY EHEA_BMASK_IBM(32, 63) +#define EHEA_EQE_PORT_NUMBER EHEA_BMASK_IBM(56, 63) +#define EHEA_EQE_EQ_NUMBER EHEA_BMASK_IBM(48, 63) +#define EHEA_EQE_SM_ID EHEA_BMASK_IBM(48, 63) +#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55) +#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63) + +#define EHEA_AER_RESTYPE_QP 0x8 +#define EHEA_AER_RESTYPE_CQ 0x4 +#define EHEA_AER_RESTYPE_EQ 0x3 + +/* Defines which affiliated errors lead to a port reset */ +#define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL +#define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL + +struct ehea_eqe { + u64 entry; +}; + +#define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63) +#define ERROR_DATA_TYPE EHEA_BMASK_IBM(0, 7) + +static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) +{ + struct ehea_page *current_page; + + if (q_offset >= queue->queue_length) + q_offset -= queue->queue_length; + current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT]; + return ¤t_page->entries[q_offset & (EHEA_PAGESIZE - 1)]; +} + +static inline void *hw_qeit_get(struct hw_queue *queue) +{ + return hw_qeit_calc(queue, queue->current_q_offset); +} + +static inline void hw_qeit_inc(struct hw_queue *queue) +{ + queue->current_q_offset += queue->qe_size; + if (queue->current_q_offset >= queue->queue_length) { + queue->current_q_offset = 0; + /* toggle the valid flag */ + queue->toggle_state = (~queue->toggle_state) & 1; + } +} + +static inline void *hw_qeit_get_inc(struct hw_queue *queue) +{ + void *retvalue = hw_qeit_get(queue); + hw_qeit_inc(queue); + return retvalue; +} + +static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue) +{ + struct ehea_cqe *retvalue = hw_qeit_get(queue); + u8 valid = retvalue->valid; + void *pref; + + if ((valid >> 7) == (queue->toggle_state & 1)) { + /* this is a good one */ + hw_qeit_inc(queue); + pref = hw_qeit_calc(queue, queue->current_q_offset); + prefetch(pref); + prefetch(pref + 128); + } else + retvalue = NULL; + return retvalue; +} + +static inline void *hw_qeit_get_valid(struct hw_queue *queue) +{ + struct ehea_cqe *retvalue = hw_qeit_get(queue); + void *pref; + u8 valid; + + pref = hw_qeit_calc(queue, queue->current_q_offset); + prefetch(pref); + prefetch(pref + 128); + prefetch(pref + 256); + valid = retvalue->valid; + if (!((valid >> 7) == (queue->toggle_state & 1))) + retvalue = NULL; + return retvalue; +} + +static inline void *hw_qeit_reset(struct hw_queue *queue) +{ + queue->current_q_offset = 0; + return hw_qeit_get(queue); +} + +static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue) +{ + u64 last_entry_in_q = queue->queue_length - queue->qe_size; + void *retvalue; + + retvalue = hw_qeit_get(queue); + queue->current_q_offset += queue->qe_size; + if (queue->current_q_offset > last_entry_in_q) { + queue->current_q_offset = 0; + queue->toggle_state = (~queue->toggle_state) & 1; + } + return retvalue; +} + +static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue) +{ + void *retvalue = hw_qeit_get(queue); + u32 qe = *(u8 *)retvalue; + if ((qe >> 7) == (queue->toggle_state & 1)) + hw_qeit_eq_get_inc(queue); + else + retvalue = NULL; + return retvalue; +} + +static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp, + int rq_nr) +{ + struct hw_queue *queue; + + if (rq_nr == 1) + queue = &qp->hw_rqueue1; + else if (rq_nr == 2) + queue = &qp->hw_rqueue2; + else + queue = &qp->hw_rqueue3; + + return hw_qeit_get_inc(queue); +} + +static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp, + int *wqe_index) +{ + struct hw_queue *queue = &my_qp->hw_squeue; + struct ehea_swqe *wqe_p; + + *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ); + wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue); + + return wqe_p; +} + +static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe) +{ + iosync(); + ehea_update_sqa(my_qp, 1); +} + +static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index) +{ + struct hw_queue *queue = &qp->hw_rqueue1; + + *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1); + return hw_qeit_get_valid(queue); +} + +static inline void ehea_inc_cq(struct ehea_cq *cq) +{ + hw_qeit_inc(&cq->hw_queue); +} + +static inline void ehea_inc_rq1(struct ehea_qp *qp) +{ + hw_qeit_inc(&qp->hw_rqueue1); +} + +static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq) +{ + return hw_qeit_get_valid(&my_cq->hw_queue); +} + +#define EHEA_CQ_REGISTER_ORIG 0 +#define EHEA_EQ_REGISTER_ORIG 0 + +enum ehea_eq_type { + EHEA_EQ = 0, /* event queue */ + EHEA_NEQ /* notification event queue */ +}; + +struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter, + enum ehea_eq_type type, + const u32 length, const u8 eqe_gen); + +int ehea_destroy_eq(struct ehea_eq *eq); + +struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq); + +struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe, + u64 eq_handle, u32 cq_token); + +int ehea_destroy_cq(struct ehea_cq *cq); + +struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd, + struct ehea_qp_init_attr *init_attr); + +int ehea_destroy_qp(struct ehea_qp *qp); + +int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr); + +int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr, + struct ehea_mr *shared_mr); + +int ehea_rem_mr(struct ehea_mr *mr); + +u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle, + u64 *aer, u64 *aerr); + +int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages); +int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages); +int ehea_create_busmap(void); +void ehea_destroy_busmap(void); +u64 ehea_map_vaddr(void *caddr); + +#endif /* __EHEA_QMR_H__ */ diff --git a/drivers/net/ethernet/ibm/emac/Kconfig b/drivers/net/ethernet/ibm/emac/Kconfig new file mode 100644 index 000000000..3f44a30e0 --- /dev/null +++ b/drivers/net/ethernet/ibm/emac/Kconfig @@ -0,0 +1,76 @@ +config IBM_EMAC + tristate "IBM EMAC Ethernet support" + depends on PPC_DCR + select CRC32 + help + This driver supports the IBM EMAC family of Ethernet controllers + typically found on 4xx embedded PowerPC chips, but also on the + Axon southbridge for Cell. + +config IBM_EMAC_RXB + int "Number of receive buffers" + depends on IBM_EMAC + default "128" + +config IBM_EMAC_TXB + int "Number of transmit buffers" + depends on IBM_EMAC + default "64" + +config IBM_EMAC_POLL_WEIGHT + int "MAL NAPI polling weight" + depends on IBM_EMAC + default "32" + +config IBM_EMAC_RX_COPY_THRESHOLD + int "RX skb copy threshold (bytes)" + depends on IBM_EMAC + default "256" + +config IBM_EMAC_RX_SKB_HEADROOM + int "Additional RX skb headroom (bytes)" + depends on IBM_EMAC + default "0" + help + Additional receive skb headroom. Note, that driver + will always reserve at least 2 bytes to make IP header + aligned, so usually there is no need to add any additional + headroom. + + If unsure, set to 0. + +config IBM_EMAC_DEBUG + bool "Debugging" + depends on IBM_EMAC + default n + +# The options below has to be select'ed by the respective +# processor types or platforms + +config IBM_EMAC_ZMII + bool + default n + +config IBM_EMAC_RGMII + bool + default n + +config IBM_EMAC_TAH + bool + default n + +config IBM_EMAC_EMAC4 + bool + default n + +config IBM_EMAC_NO_FLOW_CTRL + bool + default n + +config IBM_EMAC_MAL_CLR_ICINTSTAT + bool + default n + +config IBM_EMAC_MAL_COMMON_ERR + bool + default n diff --git a/drivers/net/ethernet/ibm/emac/Makefile b/drivers/net/ethernet/ibm/emac/Makefile new file mode 100644 index 000000000..eba21835d --- /dev/null +++ b/drivers/net/ethernet/ibm/emac/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the PowerPC 4xx on-chip ethernet driver +# + +obj-$(CONFIG_IBM_EMAC) += ibm_emac.o + +ibm_emac-y := mal.o core.o phy.o +ibm_emac-$(CONFIG_IBM_EMAC_ZMII) += zmii.o +ibm_emac-$(CONFIG_IBM_EMAC_RGMII) += rgmii.o +ibm_emac-$(CONFIG_IBM_EMAC_TAH) += tah.o +ibm_emac-$(CONFIG_IBM_EMAC_DEBUG) += debug.o diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c new file mode 100644 index 000000000..b9df0cbd0 --- /dev/null +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -0,0 +1,3100 @@ +/* + * drivers/net/ethernet/ibm/emac/core.c + * + * Driver for PowerPC 4xx on-chip ethernet controller. + * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * + * + * Based on the arch/ppc version of the driver: + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin or + * + * Based on original work by + * Matt Porter + * (c) 2003 Benjamin Herrenschmidt + * Armin Kuster + * Johnnie Peters + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "core.h" + +/* + * Lack of dma_unmap_???? calls is intentional. + * + * API-correct usage requires additional support state information to be + * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to + * EMAC design (e.g. TX buffer passed from network stack can be split into + * several BDs, dma_map_single/dma_map_page can be used to map particular BD), + * maintaining such information will add additional overhead. + * Current DMA API implementation for 4xx processors only ensures cache coherency + * and dma_unmap_???? routines are empty and are likely to stay this way. + * I decided to omit dma_unmap_??? calls because I don't want to add additional + * complexity just for the sake of following some abstract API, when it doesn't + * add any real benefit to the driver. I understand that this decision maybe + * controversial, but I really tried to make code API-correct and efficient + * at the same time and didn't come up with code I liked :(. --ebs + */ + +#define DRV_NAME "emac" +#define DRV_VERSION "3.54" +#define DRV_DESC "PPC 4xx OCP EMAC driver" + +MODULE_DESCRIPTION(DRV_DESC); +MODULE_AUTHOR + ("Eugene Surovegin or "); +MODULE_LICENSE("GPL"); + +/* minimum number of free TX descriptors required to wake up TX process */ +#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4) + +/* If packet size is less than this number, we allocate small skb and copy packet + * contents into it instead of just sending original big skb up + */ +#define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD + +/* Since multiple EMACs share MDIO lines in various ways, we need + * to avoid re-using the same PHY ID in cases where the arch didn't + * setup precise phy_map entries + * + * XXX This is something that needs to be reworked as we can have multiple + * EMAC "sets" (multiple ASICs containing several EMACs) though we can + * probably require in that case to have explicit PHY IDs in the device-tree + */ +static u32 busy_phy_map; +static DEFINE_MUTEX(emac_phy_map_lock); + +/* This is the wait queue used to wait on any event related to probe, that + * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc... + */ +static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait); + +/* Having stable interface names is a doomed idea. However, it would be nice + * if we didn't have completely random interface names at boot too :-) It's + * just a matter of making everybody's life easier. Since we are doing + * threaded probing, it's a bit harder though. The base idea here is that + * we make up a list of all emacs in the device-tree before we register the + * driver. Every emac will then wait for the previous one in the list to + * initialize before itself. We should also keep that list ordered by + * cell_index. + * That list is only 4 entries long, meaning that additional EMACs don't + * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased. + */ + +#define EMAC_BOOT_LIST_SIZE 4 +static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE]; + +/* How long should I wait for dependent devices ? */ +#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5) + +/* I don't want to litter system log with timeout errors + * when we have brain-damaged PHY. + */ +static inline void emac_report_timeout_error(struct emac_instance *dev, + const char *error) +{ + if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX | + EMAC_FTR_460EX_PHY_CLK_FIX | + EMAC_FTR_440EP_PHY_CLK_FIX)) + DBG(dev, "%s" NL, error); + else if (net_ratelimit()) + printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name, + error); +} + +/* EMAC PHY clock workaround: + * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX, + * which allows controlling each EMAC clock + */ +static inline void emac_rx_clk_tx(struct emac_instance *dev) +{ +#ifdef CONFIG_PPC_DCR_NATIVE + if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX)) + dcri_clrset(SDR0, SDR0_MFR, + 0, SDR0_MFR_ECS >> dev->cell_index); +#endif +} + +static inline void emac_rx_clk_default(struct emac_instance *dev) +{ +#ifdef CONFIG_PPC_DCR_NATIVE + if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX)) + dcri_clrset(SDR0, SDR0_MFR, + SDR0_MFR_ECS >> dev->cell_index, 0); +#endif +} + +/* PHY polling intervals */ +#define PHY_POLL_LINK_ON HZ +#define PHY_POLL_LINK_OFF (HZ / 5) + +/* Graceful stop timeouts in us. + * We should allow up to 1 frame time (full-duplex, ignoring collisions) + */ +#define STOP_TIMEOUT_10 1230 +#define STOP_TIMEOUT_100 124 +#define STOP_TIMEOUT_1000 13 +#define STOP_TIMEOUT_1000_JUMBO 73 + +static unsigned char default_mcast_addr[] = { + 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 +}; + +/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */ +static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = { + "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum", + "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom", + "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu", + "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet", + "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error", + "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range", + "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun", + "rx_bad_packet", "rx_runt_packet", "rx_short_event", + "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long", + "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors", + "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral", + "tx_bd_excessive_collisions", "tx_bd_late_collision", + "tx_bd_multple_collisions", "tx_bd_single_collision", + "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe", + "tx_errors" +}; + +static irqreturn_t emac_irq(int irq, void *dev_instance); +static void emac_clean_tx_ring(struct emac_instance *dev); +static void __emac_set_multicast_list(struct emac_instance *dev); + +static inline int emac_phy_supports_gige(int phy_mode) +{ + return phy_mode == PHY_MODE_GMII || + phy_mode == PHY_MODE_RGMII || + phy_mode == PHY_MODE_SGMII || + phy_mode == PHY_MODE_TBI || + phy_mode == PHY_MODE_RTBI; +} + +static inline int emac_phy_gpcs(int phy_mode) +{ + return phy_mode == PHY_MODE_SGMII || + phy_mode == PHY_MODE_TBI || + phy_mode == PHY_MODE_RTBI; +} + +static inline void emac_tx_enable(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r; + + DBG(dev, "tx_enable" NL); + + r = in_be32(&p->mr0); + if (!(r & EMAC_MR0_TXE)) + out_be32(&p->mr0, r | EMAC_MR0_TXE); +} + +static void emac_tx_disable(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r; + + DBG(dev, "tx_disable" NL); + + r = in_be32(&p->mr0); + if (r & EMAC_MR0_TXE) { + int n = dev->stop_timeout; + out_be32(&p->mr0, r & ~EMAC_MR0_TXE); + while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) { + udelay(1); + --n; + } + if (unlikely(!n)) + emac_report_timeout_error(dev, "TX disable timeout"); + } +} + +static void emac_rx_enable(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r; + + if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) + goto out; + + DBG(dev, "rx_enable" NL); + + r = in_be32(&p->mr0); + if (!(r & EMAC_MR0_RXE)) { + if (unlikely(!(r & EMAC_MR0_RXI))) { + /* Wait if previous async disable is still in progress */ + int n = dev->stop_timeout; + while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) { + udelay(1); + --n; + } + if (unlikely(!n)) + emac_report_timeout_error(dev, + "RX disable timeout"); + } + out_be32(&p->mr0, r | EMAC_MR0_RXE); + } + out: + ; +} + +static void emac_rx_disable(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r; + + DBG(dev, "rx_disable" NL); + + r = in_be32(&p->mr0); + if (r & EMAC_MR0_RXE) { + int n = dev->stop_timeout; + out_be32(&p->mr0, r & ~EMAC_MR0_RXE); + while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) { + udelay(1); + --n; + } + if (unlikely(!n)) + emac_report_timeout_error(dev, "RX disable timeout"); + } +} + +static inline void emac_netif_stop(struct emac_instance *dev) +{ + netif_tx_lock_bh(dev->ndev); + netif_addr_lock(dev->ndev); + dev->no_mcast = 1; + netif_addr_unlock(dev->ndev); + netif_tx_unlock_bh(dev->ndev); + dev->ndev->trans_start = jiffies; /* prevent tx timeout */ + mal_poll_disable(dev->mal, &dev->commac); + netif_tx_disable(dev->ndev); +} + +static inline void emac_netif_start(struct emac_instance *dev) +{ + netif_tx_lock_bh(dev->ndev); + netif_addr_lock(dev->ndev); + dev->no_mcast = 0; + if (dev->mcast_pending && netif_running(dev->ndev)) + __emac_set_multicast_list(dev); + netif_addr_unlock(dev->ndev); + netif_tx_unlock_bh(dev->ndev); + + netif_wake_queue(dev->ndev); + + /* NOTE: unconditional netif_wake_queue is only appropriate + * so long as all callers are assured to have free tx slots + * (taken from tg3... though the case where that is wrong is + * not terribly harmful) + */ + mal_poll_enable(dev->mal, &dev->commac); +} + +static inline void emac_rx_disable_async(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r; + + DBG(dev, "rx_disable_async" NL); + + r = in_be32(&p->mr0); + if (r & EMAC_MR0_RXE) + out_be32(&p->mr0, r & ~EMAC_MR0_RXE); +} + +static int emac_reset(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + int n = 20; + + DBG(dev, "reset" NL); + + if (!dev->reset_failed) { + /* 40x erratum suggests stopping RX channel before reset, + * we stop TX as well + */ + emac_rx_disable(dev); + emac_tx_disable(dev); + } + +#ifdef CONFIG_PPC_DCR_NATIVE + /* + * PPC460EX/GT Embedded Processor Advanced User's Manual + * section 28.10.1 Mode Register 0 (EMACx_MR0) states: + * Note: The PHY must provide a TX Clk in order to perform a soft reset + * of the EMAC. If none is present, select the internal clock + * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). + * After a soft reset, select the external clock. + */ + if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { + if (dev->phy_address == 0xffffffff && + dev->phy_map == 0xffffffff) { + /* No PHY: select internal loop clock before reset */ + dcri_clrset(SDR0, SDR0_ETH_CFG, + 0, SDR0_ETH_CFG_ECS << dev->cell_index); + } else { + /* PHY present: select external clock before reset */ + dcri_clrset(SDR0, SDR0_ETH_CFG, + SDR0_ETH_CFG_ECS << dev->cell_index, 0); + } + } +#endif + + out_be32(&p->mr0, EMAC_MR0_SRST); + while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n) + --n; + +#ifdef CONFIG_PPC_DCR_NATIVE + if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { + if (dev->phy_address == 0xffffffff && + dev->phy_map == 0xffffffff) { + /* No PHY: restore external clock source after reset */ + dcri_clrset(SDR0, SDR0_ETH_CFG, + SDR0_ETH_CFG_ECS << dev->cell_index, 0); + } + } +#endif + + if (n) { + dev->reset_failed = 0; + return 0; + } else { + emac_report_timeout_error(dev, "reset timeout"); + dev->reset_failed = 1; + return -ETIMEDOUT; + } +} + +static void emac_hash_mc(struct emac_instance *dev) +{ + const int regs = EMAC_XAHT_REGS(dev); + u32 *gaht_base = emac_gaht_base(dev); + u32 gaht_temp[regs]; + struct netdev_hw_addr *ha; + int i; + + DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev)); + + memset(gaht_temp, 0, sizeof (gaht_temp)); + + netdev_for_each_mc_addr(ha, dev->ndev) { + int slot, reg, mask; + DBG2(dev, "mc %pM" NL, ha->addr); + + slot = EMAC_XAHT_CRC_TO_SLOT(dev, + ether_crc(ETH_ALEN, ha->addr)); + reg = EMAC_XAHT_SLOT_TO_REG(dev, slot); + mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot); + + gaht_temp[reg] |= mask; + } + + for (i = 0; i < regs; i++) + out_be32(gaht_base + i, gaht_temp[i]); +} + +static inline u32 emac_iff2rmr(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + u32 r; + + r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE; + + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + r |= EMAC4_RMR_BASE; + else + r |= EMAC_RMR_BASE; + + if (ndev->flags & IFF_PROMISC) + r |= EMAC_RMR_PME; + else if (ndev->flags & IFF_ALLMULTI || + (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev))) + r |= EMAC_RMR_PMME; + else if (!netdev_mc_empty(ndev)) + r |= EMAC_RMR_MAE; + + if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) { + r &= ~EMAC4_RMR_MJS_MASK; + r |= EMAC4_RMR_MJS(ndev->mtu); + } + + return r; +} + +static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size) +{ + u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT; + + DBG2(dev, "__emac_calc_base_mr1" NL); + + switch(tx_size) { + case 2048: + ret |= EMAC_MR1_TFS_2K; + break; + default: + printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n", + dev->ndev->name, tx_size); + } + + switch(rx_size) { + case 16384: + ret |= EMAC_MR1_RFS_16K; + break; + case 4096: + ret |= EMAC_MR1_RFS_4K; + break; + default: + printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n", + dev->ndev->name, rx_size); + } + + return ret; +} + +static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size) +{ + u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR | + EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000); + + DBG2(dev, "__emac4_calc_base_mr1" NL); + + switch(tx_size) { + case 16384: + ret |= EMAC4_MR1_TFS_16K; + break; + case 4096: + ret |= EMAC4_MR1_TFS_4K; + break; + case 2048: + ret |= EMAC4_MR1_TFS_2K; + break; + default: + printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n", + dev->ndev->name, tx_size); + } + + switch(rx_size) { + case 16384: + ret |= EMAC4_MR1_RFS_16K; + break; + case 4096: + ret |= EMAC4_MR1_RFS_4K; + break; + case 2048: + ret |= EMAC4_MR1_RFS_2K; + break; + default: + printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n", + dev->ndev->name, rx_size); + } + + return ret; +} + +static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size) +{ + return emac_has_feature(dev, EMAC_FTR_EMAC4) ? + __emac4_calc_base_mr1(dev, tx_size, rx_size) : + __emac_calc_base_mr1(dev, tx_size, rx_size); +} + +static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size) +{ + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4; + else + return ((size >> 6) - 1) << EMAC_TRTR_SHIFT; +} + +static inline u32 emac_calc_rwmr(struct emac_instance *dev, + unsigned int low, unsigned int high) +{ + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + return (low << 22) | ( (high & 0x3ff) << 6); + else + return (low << 23) | ( (high & 0x1ff) << 7); +} + +static int emac_configure(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + struct net_device *ndev = dev->ndev; + int tx_size, rx_size, link = netif_carrier_ok(dev->ndev); + u32 r, mr1 = 0; + + DBG(dev, "configure" NL); + + if (!link) { + out_be32(&p->mr1, in_be32(&p->mr1) + | EMAC_MR1_FDE | EMAC_MR1_ILE); + udelay(100); + } else if (emac_reset(dev) < 0) + return -ETIMEDOUT; + + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) + tah_reset(dev->tah_dev); + + DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n", + link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause); + + /* Default fifo sizes */ + tx_size = dev->tx_fifo_size; + rx_size = dev->rx_fifo_size; + + /* No link, force loopback */ + if (!link) + mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE; + + /* Check for full duplex */ + else if (dev->phy.duplex == DUPLEX_FULL) + mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001; + + /* Adjust fifo sizes, mr1 and timeouts based on link speed */ + dev->stop_timeout = STOP_TIMEOUT_10; + switch (dev->phy.speed) { + case SPEED_1000: + if (emac_phy_gpcs(dev->phy.mode)) { + mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA( + (dev->phy.gpcs_address != 0xffffffff) ? + dev->phy.gpcs_address : dev->phy.address); + + /* Put some arbitrary OUI, Manuf & Rev IDs so we can + * identify this GPCS PHY later. + */ + out_be32(&p->u1.emac4.ipcr, 0xdeadbeef); + } else + mr1 |= EMAC_MR1_MF_1000; + + /* Extended fifo sizes */ + tx_size = dev->tx_fifo_size_gige; + rx_size = dev->rx_fifo_size_gige; + + if (dev->ndev->mtu > ETH_DATA_LEN) { + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + mr1 |= EMAC4_MR1_JPSM; + else + mr1 |= EMAC_MR1_JPSM; + dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO; + } else + dev->stop_timeout = STOP_TIMEOUT_1000; + break; + case SPEED_100: + mr1 |= EMAC_MR1_MF_100; + dev->stop_timeout = STOP_TIMEOUT_100; + break; + default: /* make gcc happy */ + break; + } + + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port, + dev->phy.speed); + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed); + + /* on 40x erratum forces us to NOT use integrated flow control, + * let's hope it works on 44x ;) + */ + if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) && + dev->phy.duplex == DUPLEX_FULL) { + if (dev->phy.pause) + mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP; + else if (dev->phy.asym_pause) + mr1 |= EMAC_MR1_APP; + } + + /* Add base settings & fifo sizes & program MR1 */ + mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size); + out_be32(&p->mr1, mr1); + + /* Set individual MAC address */ + out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]); + out_be32(&p->ialr, (ndev->dev_addr[2] << 24) | + (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) | + ndev->dev_addr[5]); + + /* VLAN Tag Protocol ID */ + out_be32(&p->vtpid, 0x8100); + + /* Receive mode register */ + r = emac_iff2rmr(ndev); + if (r & EMAC_RMR_MAE) + emac_hash_mc(dev); + out_be32(&p->rmr, r); + + /* FIFOs thresholds */ + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1, + tx_size / 2 / dev->fifo_entry_size); + else + r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1, + tx_size / 2 / dev->fifo_entry_size); + out_be32(&p->tmr1, r); + out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2)); + + /* PAUSE frame is sent when RX FIFO reaches its high-water mark, + there should be still enough space in FIFO to allow the our link + partner time to process this frame and also time to send PAUSE + frame itself. + + Here is the worst case scenario for the RX FIFO "headroom" + (from "The Switch Book") (100Mbps, without preamble, inter-frame gap): + + 1) One maximum-length frame on TX 1522 bytes + 2) One PAUSE frame time 64 bytes + 3) PAUSE frame decode time allowance 64 bytes + 4) One maximum-length frame on RX 1522 bytes + 5) Round-trip propagation delay of the link (100Mb) 15 bytes + ---------- + 3187 bytes + + I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes) + low-water mark to RX_FIFO_SIZE / 8 (512 bytes) + */ + r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size, + rx_size / 4 / dev->fifo_entry_size); + out_be32(&p->rwmr, r); + + /* Set PAUSE timer to the maximum */ + out_be32(&p->ptr, 0xffff); + + /* IRQ sources */ + r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE | + EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE | + EMAC_ISR_IRE | EMAC_ISR_TE; + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE | + EMAC4_ISR_RXOE | */; + out_be32(&p->iser, r); + + /* We need to take GPCS PHY out of isolate mode after EMAC reset */ + if (emac_phy_gpcs(dev->phy.mode)) { + if (dev->phy.gpcs_address != 0xffffffff) + emac_mii_reset_gpcs(&dev->phy); + else + emac_mii_reset_phy(&dev->phy); + } + + return 0; +} + +static void emac_reinitialize(struct emac_instance *dev) +{ + DBG(dev, "reinitialize" NL); + + emac_netif_stop(dev); + if (!emac_configure(dev)) { + emac_tx_enable(dev); + emac_rx_enable(dev); + } + emac_netif_start(dev); +} + +static void emac_full_tx_reset(struct emac_instance *dev) +{ + DBG(dev, "full_tx_reset" NL); + + emac_tx_disable(dev); + mal_disable_tx_channel(dev->mal, dev->mal_tx_chan); + emac_clean_tx_ring(dev); + dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0; + + emac_configure(dev); + + mal_enable_tx_channel(dev->mal, dev->mal_tx_chan); + emac_tx_enable(dev); + emac_rx_enable(dev); +} + +static void emac_reset_work(struct work_struct *work) +{ + struct emac_instance *dev = container_of(work, struct emac_instance, reset_work); + + DBG(dev, "reset_work" NL); + + mutex_lock(&dev->link_lock); + if (dev->opened) { + emac_netif_stop(dev); + emac_full_tx_reset(dev); + emac_netif_start(dev); + } + mutex_unlock(&dev->link_lock); +} + +static void emac_tx_timeout(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + + DBG(dev, "tx_timeout" NL); + + schedule_work(&dev->reset_work); +} + + +static inline int emac_phy_done(struct emac_instance *dev, u32 stacr) +{ + int done = !!(stacr & EMAC_STACR_OC); + + if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT)) + done = !done; + + return done; +}; + +static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r = 0; + int n, err = -ETIMEDOUT; + + mutex_lock(&dev->mdio_lock); + + DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg); + + /* Enable proper MDIO port */ + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_get_mdio(dev->zmii_dev, dev->zmii_port); + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port); + + /* Wait for management interface to become idle */ + n = 20; + while (!emac_phy_done(dev, in_be32(&p->stacr))) { + udelay(1); + if (!--n) { + DBG2(dev, " -> timeout wait idle\n"); + goto bail; + } + } + + /* Issue read command */ + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + r = EMAC4_STACR_BASE(dev->opb_bus_freq); + else + r = EMAC_STACR_BASE(dev->opb_bus_freq); + if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT)) + r |= EMAC_STACR_OC; + if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR)) + r |= EMACX_STACR_STAC_READ; + else + r |= EMAC_STACR_STAC_READ; + r |= (reg & EMAC_STACR_PRA_MASK) + | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT); + out_be32(&p->stacr, r); + + /* Wait for read to complete */ + n = 200; + while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) { + udelay(1); + if (!--n) { + DBG2(dev, " -> timeout wait complete\n"); + goto bail; + } + } + + if (unlikely(r & EMAC_STACR_PHYE)) { + DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg); + err = -EREMOTEIO; + goto bail; + } + + r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK); + + DBG2(dev, "mdio_read -> %04x" NL, r); + err = 0; + bail: + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port); + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_put_mdio(dev->zmii_dev, dev->zmii_port); + mutex_unlock(&dev->mdio_lock); + + return err == 0 ? r : err; +} + +static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg, + u16 val) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r = 0; + int n, err = -ETIMEDOUT; + + mutex_lock(&dev->mdio_lock); + + DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val); + + /* Enable proper MDIO port */ + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_get_mdio(dev->zmii_dev, dev->zmii_port); + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port); + + /* Wait for management interface to be idle */ + n = 20; + while (!emac_phy_done(dev, in_be32(&p->stacr))) { + udelay(1); + if (!--n) { + DBG2(dev, " -> timeout wait idle\n"); + goto bail; + } + } + + /* Issue write command */ + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + r = EMAC4_STACR_BASE(dev->opb_bus_freq); + else + r = EMAC_STACR_BASE(dev->opb_bus_freq); + if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT)) + r |= EMAC_STACR_OC; + if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR)) + r |= EMACX_STACR_STAC_WRITE; + else + r |= EMAC_STACR_STAC_WRITE; + r |= (reg & EMAC_STACR_PRA_MASK) | + ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) | + (val << EMAC_STACR_PHYD_SHIFT); + out_be32(&p->stacr, r); + + /* Wait for write to complete */ + n = 200; + while (!emac_phy_done(dev, in_be32(&p->stacr))) { + udelay(1); + if (!--n) { + DBG2(dev, " -> timeout wait complete\n"); + goto bail; + } + } + err = 0; + bail: + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port); + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_put_mdio(dev->zmii_dev, dev->zmii_port); + mutex_unlock(&dev->mdio_lock); +} + +static int emac_mdio_read(struct net_device *ndev, int id, int reg) +{ + struct emac_instance *dev = netdev_priv(ndev); + int res; + + res = __emac_mdio_read((dev->mdio_instance && + dev->phy.gpcs_address != id) ? + dev->mdio_instance : dev, + (u8) id, (u8) reg); + return res; +} + +static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val) +{ + struct emac_instance *dev = netdev_priv(ndev); + + __emac_mdio_write((dev->mdio_instance && + dev->phy.gpcs_address != id) ? + dev->mdio_instance : dev, + (u8) id, (u8) reg, (u16) val); +} + +/* Tx lock BH */ +static void __emac_set_multicast_list(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 rmr = emac_iff2rmr(dev->ndev); + + DBG(dev, "__multicast %08x" NL, rmr); + + /* I decided to relax register access rules here to avoid + * full EMAC reset. + * + * There is a real problem with EMAC4 core if we use MWSW_001 bit + * in MR1 register and do a full EMAC reset. + * One TX BD status update is delayed and, after EMAC reset, it + * never happens, resulting in TX hung (it'll be recovered by TX + * timeout handler eventually, but this is just gross). + * So we either have to do full TX reset or try to cheat here :) + * + * The only required change is to RX mode register, so I *think* all + * we need is just to stop RX channel. This seems to work on all + * tested SoCs. --ebs + * + * If we need the full reset, we might just trigger the workqueue + * and do it async... a bit nasty but should work --BenH + */ + dev->mcast_pending = 0; + emac_rx_disable(dev); + if (rmr & EMAC_RMR_MAE) + emac_hash_mc(dev); + out_be32(&p->rmr, rmr); + emac_rx_enable(dev); +} + +/* Tx lock BH */ +static void emac_set_multicast_list(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + + DBG(dev, "multicast" NL); + + BUG_ON(!netif_running(dev->ndev)); + + if (dev->no_mcast) { + dev->mcast_pending = 1; + return; + } + __emac_set_multicast_list(dev); +} + +static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu) +{ + int rx_sync_size = emac_rx_sync_size(new_mtu); + int rx_skb_size = emac_rx_skb_size(new_mtu); + int i, ret = 0; + int mr1_jumbo_bit_change = 0; + + mutex_lock(&dev->link_lock); + emac_netif_stop(dev); + emac_rx_disable(dev); + mal_disable_rx_channel(dev->mal, dev->mal_rx_chan); + + if (dev->rx_sg_skb) { + ++dev->estats.rx_dropped_resize; + dev_kfree_skb(dev->rx_sg_skb); + dev->rx_sg_skb = NULL; + } + + /* Make a first pass over RX ring and mark BDs ready, dropping + * non-processed packets on the way. We need this as a separate pass + * to simplify error recovery in the case of allocation failure later. + */ + for (i = 0; i < NUM_RX_BUFF; ++i) { + if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST) + ++dev->estats.rx_dropped_resize; + + dev->rx_desc[i].data_len = 0; + dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY | + (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0); + } + + /* Reallocate RX ring only if bigger skb buffers are required */ + if (rx_skb_size <= dev->rx_skb_size) + goto skip; + + /* Second pass, allocate new skbs */ + for (i = 0; i < NUM_RX_BUFF; ++i) { + struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC); + if (!skb) { + ret = -ENOMEM; + goto oom; + } + + BUG_ON(!dev->rx_skb[i]); + dev_kfree_skb(dev->rx_skb[i]); + + skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2); + dev->rx_desc[i].data_ptr = + dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size, + DMA_FROM_DEVICE) + 2; + dev->rx_skb[i] = skb; + } + skip: + /* Check if we need to change "Jumbo" bit in MR1 */ + if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) { + mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) || + (dev->ndev->mtu > ETH_DATA_LEN); + } else { + mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ^ + (dev->ndev->mtu > ETH_DATA_LEN); + } + + if (mr1_jumbo_bit_change) { + /* This is to prevent starting RX channel in emac_rx_enable() */ + set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags); + + dev->ndev->mtu = new_mtu; + emac_full_tx_reset(dev); + } + + mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu)); + oom: + /* Restart RX */ + clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags); + dev->rx_slot = 0; + mal_enable_rx_channel(dev->mal, dev->mal_rx_chan); + emac_rx_enable(dev); + emac_netif_start(dev); + mutex_unlock(&dev->link_lock); + + return ret; +} + +/* Process ctx, rtnl_lock semaphore */ +static int emac_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct emac_instance *dev = netdev_priv(ndev); + int ret = 0; + + if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu) + return -EINVAL; + + DBG(dev, "change_mtu(%d)" NL, new_mtu); + + if (netif_running(ndev)) { + /* Check if we really need to reinitialize RX ring */ + if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu)) + ret = emac_resize_rx_ring(dev, new_mtu); + } + + if (!ret) { + ndev->mtu = new_mtu; + dev->rx_skb_size = emac_rx_skb_size(new_mtu); + dev->rx_sync_size = emac_rx_sync_size(new_mtu); + } + + return ret; +} + +static void emac_clean_tx_ring(struct emac_instance *dev) +{ + int i; + + for (i = 0; i < NUM_TX_BUFF; ++i) { + if (dev->tx_skb[i]) { + dev_kfree_skb(dev->tx_skb[i]); + dev->tx_skb[i] = NULL; + if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY) + ++dev->estats.tx_dropped; + } + dev->tx_desc[i].ctrl = 0; + dev->tx_desc[i].data_ptr = 0; + } +} + +static void emac_clean_rx_ring(struct emac_instance *dev) +{ + int i; + + for (i = 0; i < NUM_RX_BUFF; ++i) + if (dev->rx_skb[i]) { + dev->rx_desc[i].ctrl = 0; + dev_kfree_skb(dev->rx_skb[i]); + dev->rx_skb[i] = NULL; + dev->rx_desc[i].data_ptr = 0; + } + + if (dev->rx_sg_skb) { + dev_kfree_skb(dev->rx_sg_skb); + dev->rx_sg_skb = NULL; + } +} + +static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot, + gfp_t flags) +{ + struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags); + if (unlikely(!skb)) + return -ENOMEM; + + dev->rx_skb[slot] = skb; + dev->rx_desc[slot].data_len = 0; + + skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2); + dev->rx_desc[slot].data_ptr = + dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size, + DMA_FROM_DEVICE) + 2; + wmb(); + dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY | + (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0); + + return 0; +} + +static void emac_print_link_status(struct emac_instance *dev) +{ + if (netif_carrier_ok(dev->ndev)) + printk(KERN_INFO "%s: link is up, %d %s%s\n", + dev->ndev->name, dev->phy.speed, + dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX", + dev->phy.pause ? ", pause enabled" : + dev->phy.asym_pause ? ", asymmetric pause enabled" : ""); + else + printk(KERN_INFO "%s: link is down\n", dev->ndev->name); +} + +/* Process ctx, rtnl_lock semaphore */ +static int emac_open(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + int err, i; + + DBG(dev, "open" NL); + + /* Setup error IRQ handler */ + err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev); + if (err) { + printk(KERN_ERR "%s: failed to request IRQ %d\n", + ndev->name, dev->emac_irq); + return err; + } + + /* Allocate RX ring */ + for (i = 0; i < NUM_RX_BUFF; ++i) + if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) { + printk(KERN_ERR "%s: failed to allocate RX ring\n", + ndev->name); + goto oom; + } + + dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0; + clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags); + dev->rx_sg_skb = NULL; + + mutex_lock(&dev->link_lock); + dev->opened = 1; + + /* Start PHY polling now. + */ + if (dev->phy.address >= 0) { + int link_poll_interval; + if (dev->phy.def->ops->poll_link(&dev->phy)) { + dev->phy.def->ops->read_link(&dev->phy); + emac_rx_clk_default(dev); + netif_carrier_on(dev->ndev); + link_poll_interval = PHY_POLL_LINK_ON; + } else { + emac_rx_clk_tx(dev); + netif_carrier_off(dev->ndev); + link_poll_interval = PHY_POLL_LINK_OFF; + } + dev->link_polling = 1; + wmb(); + schedule_delayed_work(&dev->link_work, link_poll_interval); + emac_print_link_status(dev); + } else + netif_carrier_on(dev->ndev); + + /* Required for Pause packet support in EMAC */ + dev_mc_add_global(ndev, default_mcast_addr); + + emac_configure(dev); + mal_poll_add(dev->mal, &dev->commac); + mal_enable_tx_channel(dev->mal, dev->mal_tx_chan); + mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu)); + mal_enable_rx_channel(dev->mal, dev->mal_rx_chan); + emac_tx_enable(dev); + emac_rx_enable(dev); + emac_netif_start(dev); + + mutex_unlock(&dev->link_lock); + + return 0; + oom: + emac_clean_rx_ring(dev); + free_irq(dev->emac_irq, dev); + + return -ENOMEM; +} + +/* BHs disabled */ +#if 0 +static int emac_link_differs(struct emac_instance *dev) +{ + u32 r = in_be32(&dev->emacp->mr1); + + int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF; + int speed, pause, asym_pause; + + if (r & EMAC_MR1_MF_1000) + speed = SPEED_1000; + else if (r & EMAC_MR1_MF_100) + speed = SPEED_100; + else + speed = SPEED_10; + + switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) { + case (EMAC_MR1_EIFC | EMAC_MR1_APP): + pause = 1; + asym_pause = 0; + break; + case EMAC_MR1_APP: + pause = 0; + asym_pause = 1; + break; + default: + pause = asym_pause = 0; + } + return speed != dev->phy.speed || duplex != dev->phy.duplex || + pause != dev->phy.pause || asym_pause != dev->phy.asym_pause; +} +#endif + +static void emac_link_timer(struct work_struct *work) +{ + struct emac_instance *dev = + container_of(to_delayed_work(work), + struct emac_instance, link_work); + int link_poll_interval; + + mutex_lock(&dev->link_lock); + DBG2(dev, "link timer" NL); + + if (!dev->opened) + goto bail; + + if (dev->phy.def->ops->poll_link(&dev->phy)) { + if (!netif_carrier_ok(dev->ndev)) { + emac_rx_clk_default(dev); + /* Get new link parameters */ + dev->phy.def->ops->read_link(&dev->phy); + + netif_carrier_on(dev->ndev); + emac_netif_stop(dev); + emac_full_tx_reset(dev); + emac_netif_start(dev); + emac_print_link_status(dev); + } + link_poll_interval = PHY_POLL_LINK_ON; + } else { + if (netif_carrier_ok(dev->ndev)) { + emac_rx_clk_tx(dev); + netif_carrier_off(dev->ndev); + netif_tx_disable(dev->ndev); + emac_reinitialize(dev); + emac_print_link_status(dev); + } + link_poll_interval = PHY_POLL_LINK_OFF; + } + schedule_delayed_work(&dev->link_work, link_poll_interval); + bail: + mutex_unlock(&dev->link_lock); +} + +static void emac_force_link_update(struct emac_instance *dev) +{ + netif_carrier_off(dev->ndev); + smp_rmb(); + if (dev->link_polling) { + cancel_delayed_work_sync(&dev->link_work); + if (dev->link_polling) + schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF); + } +} + +/* Process ctx, rtnl_lock semaphore */ +static int emac_close(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + + DBG(dev, "close" NL); + + if (dev->phy.address >= 0) { + dev->link_polling = 0; + cancel_delayed_work_sync(&dev->link_work); + } + mutex_lock(&dev->link_lock); + emac_netif_stop(dev); + dev->opened = 0; + mutex_unlock(&dev->link_lock); + + emac_rx_disable(dev); + emac_tx_disable(dev); + mal_disable_rx_channel(dev->mal, dev->mal_rx_chan); + mal_disable_tx_channel(dev->mal, dev->mal_tx_chan); + mal_poll_del(dev->mal, &dev->commac); + + emac_clean_tx_ring(dev); + emac_clean_rx_ring(dev); + + free_irq(dev->emac_irq, dev); + + netif_carrier_off(ndev); + + return 0; +} + +static inline u16 emac_tx_csum(struct emac_instance *dev, + struct sk_buff *skb) +{ + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) && + (skb->ip_summed == CHECKSUM_PARTIAL)) { + ++dev->stats.tx_packets_csum; + return EMAC_TX_CTRL_TAH_CSUM; + } + return 0; +} + +static inline int emac_xmit_finish(struct emac_instance *dev, int len) +{ + struct emac_regs __iomem *p = dev->emacp; + struct net_device *ndev = dev->ndev; + + /* Send the packet out. If the if makes a significant perf + * difference, then we can store the TMR0 value in "dev" + * instead + */ + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + out_be32(&p->tmr0, EMAC4_TMR0_XMIT); + else + out_be32(&p->tmr0, EMAC_TMR0_XMIT); + + if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) { + netif_stop_queue(ndev); + DBG2(dev, "stopped TX queue" NL); + } + + ndev->trans_start = jiffies; + ++dev->stats.tx_packets; + dev->stats.tx_bytes += len; + + return NETDEV_TX_OK; +} + +/* Tx lock BH */ +static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + unsigned int len = skb->len; + int slot; + + u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY | + MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb); + + slot = dev->tx_slot++; + if (dev->tx_slot == NUM_TX_BUFF) { + dev->tx_slot = 0; + ctrl |= MAL_TX_CTRL_WRAP; + } + + DBG2(dev, "xmit(%u) %d" NL, len, slot); + + dev->tx_skb[slot] = skb; + dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev, + skb->data, len, + DMA_TO_DEVICE); + dev->tx_desc[slot].data_len = (u16) len; + wmb(); + dev->tx_desc[slot].ctrl = ctrl; + + return emac_xmit_finish(dev, len); +} + +static inline int emac_xmit_split(struct emac_instance *dev, int slot, + u32 pd, int len, int last, u16 base_ctrl) +{ + while (1) { + u16 ctrl = base_ctrl; + int chunk = min(len, MAL_MAX_TX_SIZE); + len -= chunk; + + slot = (slot + 1) % NUM_TX_BUFF; + + if (last && !len) + ctrl |= MAL_TX_CTRL_LAST; + if (slot == NUM_TX_BUFF - 1) + ctrl |= MAL_TX_CTRL_WRAP; + + dev->tx_skb[slot] = NULL; + dev->tx_desc[slot].data_ptr = pd; + dev->tx_desc[slot].data_len = (u16) chunk; + dev->tx_desc[slot].ctrl = ctrl; + ++dev->tx_cnt; + + if (!len) + break; + + pd += chunk; + } + return slot; +} + +/* Tx lock BH disabled (SG version for TAH equipped EMACs) */ +static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + int nr_frags = skb_shinfo(skb)->nr_frags; + int len = skb->len, chunk; + int slot, i; + u16 ctrl; + u32 pd; + + /* This is common "fast" path */ + if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE)) + return emac_start_xmit(skb, ndev); + + len -= skb->data_len; + + /* Note, this is only an *estimation*, we can still run out of empty + * slots because of the additional fragmentation into + * MAL_MAX_TX_SIZE-sized chunks + */ + if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF)) + goto stop_queue; + + ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY | + emac_tx_csum(dev, skb); + slot = dev->tx_slot; + + /* skb data */ + dev->tx_skb[slot] = NULL; + chunk = min(len, MAL_MAX_TX_SIZE); + dev->tx_desc[slot].data_ptr = pd = + dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE); + dev->tx_desc[slot].data_len = (u16) chunk; + len -= chunk; + if (unlikely(len)) + slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags, + ctrl); + /* skb fragments */ + for (i = 0; i < nr_frags; ++i) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + len = skb_frag_size(frag); + + if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF)) + goto undo_frame; + + pd = skb_frag_dma_map(&dev->ofdev->dev, frag, 0, len, + DMA_TO_DEVICE); + + slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1, + ctrl); + } + + DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot); + + /* Attach skb to the last slot so we don't release it too early */ + dev->tx_skb[slot] = skb; + + /* Send the packet out */ + if (dev->tx_slot == NUM_TX_BUFF - 1) + ctrl |= MAL_TX_CTRL_WRAP; + wmb(); + dev->tx_desc[dev->tx_slot].ctrl = ctrl; + dev->tx_slot = (slot + 1) % NUM_TX_BUFF; + + return emac_xmit_finish(dev, skb->len); + + undo_frame: + /* Well, too bad. Our previous estimation was overly optimistic. + * Undo everything. + */ + while (slot != dev->tx_slot) { + dev->tx_desc[slot].ctrl = 0; + --dev->tx_cnt; + if (--slot < 0) + slot = NUM_TX_BUFF - 1; + } + ++dev->estats.tx_undo; + + stop_queue: + netif_stop_queue(ndev); + DBG2(dev, "stopped TX queue" NL); + return NETDEV_TX_BUSY; +} + +/* Tx lock BHs */ +static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl) +{ + struct emac_error_stats *st = &dev->estats; + + DBG(dev, "BD TX error %04x" NL, ctrl); + + ++st->tx_bd_errors; + if (ctrl & EMAC_TX_ST_BFCS) + ++st->tx_bd_bad_fcs; + if (ctrl & EMAC_TX_ST_LCS) + ++st->tx_bd_carrier_loss; + if (ctrl & EMAC_TX_ST_ED) + ++st->tx_bd_excessive_deferral; + if (ctrl & EMAC_TX_ST_EC) + ++st->tx_bd_excessive_collisions; + if (ctrl & EMAC_TX_ST_LC) + ++st->tx_bd_late_collision; + if (ctrl & EMAC_TX_ST_MC) + ++st->tx_bd_multple_collisions; + if (ctrl & EMAC_TX_ST_SC) + ++st->tx_bd_single_collision; + if (ctrl & EMAC_TX_ST_UR) + ++st->tx_bd_underrun; + if (ctrl & EMAC_TX_ST_SQE) + ++st->tx_bd_sqe; +} + +static void emac_poll_tx(void *param) +{ + struct emac_instance *dev = param; + u32 bad_mask; + + DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot); + + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) + bad_mask = EMAC_IS_BAD_TX_TAH; + else + bad_mask = EMAC_IS_BAD_TX; + + netif_tx_lock_bh(dev->ndev); + if (dev->tx_cnt) { + u16 ctrl; + int slot = dev->ack_slot, n = 0; + again: + ctrl = dev->tx_desc[slot].ctrl; + if (!(ctrl & MAL_TX_CTRL_READY)) { + struct sk_buff *skb = dev->tx_skb[slot]; + ++n; + + if (skb) { + dev_kfree_skb(skb); + dev->tx_skb[slot] = NULL; + } + slot = (slot + 1) % NUM_TX_BUFF; + + if (unlikely(ctrl & bad_mask)) + emac_parse_tx_error(dev, ctrl); + + if (--dev->tx_cnt) + goto again; + } + if (n) { + dev->ack_slot = slot; + if (netif_queue_stopped(dev->ndev) && + dev->tx_cnt < EMAC_TX_WAKEUP_THRESH) + netif_wake_queue(dev->ndev); + + DBG2(dev, "tx %d pkts" NL, n); + } + } + netif_tx_unlock_bh(dev->ndev); +} + +static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot, + int len) +{ + struct sk_buff *skb = dev->rx_skb[slot]; + + DBG2(dev, "recycle %d %d" NL, slot, len); + + if (len) + dma_map_single(&dev->ofdev->dev, skb->data - 2, + EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE); + + dev->rx_desc[slot].data_len = 0; + wmb(); + dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY | + (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0); +} + +static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl) +{ + struct emac_error_stats *st = &dev->estats; + + DBG(dev, "BD RX error %04x" NL, ctrl); + + ++st->rx_bd_errors; + if (ctrl & EMAC_RX_ST_OE) + ++st->rx_bd_overrun; + if (ctrl & EMAC_RX_ST_BP) + ++st->rx_bd_bad_packet; + if (ctrl & EMAC_RX_ST_RP) + ++st->rx_bd_runt_packet; + if (ctrl & EMAC_RX_ST_SE) + ++st->rx_bd_short_event; + if (ctrl & EMAC_RX_ST_AE) + ++st->rx_bd_alignment_error; + if (ctrl & EMAC_RX_ST_BFCS) + ++st->rx_bd_bad_fcs; + if (ctrl & EMAC_RX_ST_PTL) + ++st->rx_bd_packet_too_long; + if (ctrl & EMAC_RX_ST_ORE) + ++st->rx_bd_out_of_range; + if (ctrl & EMAC_RX_ST_IRE) + ++st->rx_bd_in_range; +} + +static inline void emac_rx_csum(struct emac_instance *dev, + struct sk_buff *skb, u16 ctrl) +{ +#ifdef CONFIG_IBM_EMAC_TAH + if (!ctrl && dev->tah_dev) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + ++dev->stats.rx_packets_csum; + } +#endif +} + +static inline int emac_rx_sg_append(struct emac_instance *dev, int slot) +{ + if (likely(dev->rx_sg_skb != NULL)) { + int len = dev->rx_desc[slot].data_len; + int tot_len = dev->rx_sg_skb->len + len; + + if (unlikely(tot_len + 2 > dev->rx_skb_size)) { + ++dev->estats.rx_dropped_mtu; + dev_kfree_skb(dev->rx_sg_skb); + dev->rx_sg_skb = NULL; + } else { + memcpy(skb_tail_pointer(dev->rx_sg_skb), + dev->rx_skb[slot]->data, len); + skb_put(dev->rx_sg_skb, len); + emac_recycle_rx_skb(dev, slot, len); + return 0; + } + } + emac_recycle_rx_skb(dev, slot, 0); + return -1; +} + +/* NAPI poll context */ +static int emac_poll_rx(void *param, int budget) +{ + struct emac_instance *dev = param; + int slot = dev->rx_slot, received = 0; + + DBG2(dev, "poll_rx(%d)" NL, budget); + + again: + while (budget > 0) { + int len; + struct sk_buff *skb; + u16 ctrl = dev->rx_desc[slot].ctrl; + + if (ctrl & MAL_RX_CTRL_EMPTY) + break; + + skb = dev->rx_skb[slot]; + mb(); + len = dev->rx_desc[slot].data_len; + + if (unlikely(!MAL_IS_SINGLE_RX(ctrl))) + goto sg; + + ctrl &= EMAC_BAD_RX_MASK; + if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) { + emac_parse_rx_error(dev, ctrl); + ++dev->estats.rx_dropped_error; + emac_recycle_rx_skb(dev, slot, 0); + len = 0; + goto next; + } + + if (len < ETH_HLEN) { + ++dev->estats.rx_dropped_stack; + emac_recycle_rx_skb(dev, slot, len); + goto next; + } + + if (len && len < EMAC_RX_COPY_THRESH) { + struct sk_buff *copy_skb = + alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC); + if (unlikely(!copy_skb)) + goto oom; + + skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2); + memcpy(copy_skb->data - 2, skb->data - 2, len + 2); + emac_recycle_rx_skb(dev, slot, len); + skb = copy_skb; + } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) + goto oom; + + skb_put(skb, len); + push_packet: + skb->protocol = eth_type_trans(skb, dev->ndev); + emac_rx_csum(dev, skb, ctrl); + + if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) + ++dev->estats.rx_dropped_stack; + next: + ++dev->stats.rx_packets; + skip: + dev->stats.rx_bytes += len; + slot = (slot + 1) % NUM_RX_BUFF; + --budget; + ++received; + continue; + sg: + if (ctrl & MAL_RX_CTRL_FIRST) { + BUG_ON(dev->rx_sg_skb); + if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) { + DBG(dev, "rx OOM %d" NL, slot); + ++dev->estats.rx_dropped_oom; + emac_recycle_rx_skb(dev, slot, 0); + } else { + dev->rx_sg_skb = skb; + skb_put(skb, len); + } + } else if (!emac_rx_sg_append(dev, slot) && + (ctrl & MAL_RX_CTRL_LAST)) { + + skb = dev->rx_sg_skb; + dev->rx_sg_skb = NULL; + + ctrl &= EMAC_BAD_RX_MASK; + if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) { + emac_parse_rx_error(dev, ctrl); + ++dev->estats.rx_dropped_error; + dev_kfree_skb(skb); + len = 0; + } else + goto push_packet; + } + goto skip; + oom: + DBG(dev, "rx OOM %d" NL, slot); + /* Drop the packet and recycle skb */ + ++dev->estats.rx_dropped_oom; + emac_recycle_rx_skb(dev, slot, 0); + goto next; + } + + if (received) { + DBG2(dev, "rx %d BDs" NL, received); + dev->rx_slot = slot; + } + + if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) { + mb(); + if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) { + DBG2(dev, "rx restart" NL); + received = 0; + goto again; + } + + if (dev->rx_sg_skb) { + DBG2(dev, "dropping partial rx packet" NL); + ++dev->estats.rx_dropped_error; + dev_kfree_skb(dev->rx_sg_skb); + dev->rx_sg_skb = NULL; + } + + clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags); + mal_enable_rx_channel(dev->mal, dev->mal_rx_chan); + emac_rx_enable(dev); + dev->rx_slot = 0; + } + return received; +} + +/* NAPI poll context */ +static int emac_peek_rx(void *param) +{ + struct emac_instance *dev = param; + + return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY); +} + +/* NAPI poll context */ +static int emac_peek_rx_sg(void *param) +{ + struct emac_instance *dev = param; + + int slot = dev->rx_slot; + while (1) { + u16 ctrl = dev->rx_desc[slot].ctrl; + if (ctrl & MAL_RX_CTRL_EMPTY) + return 0; + else if (ctrl & MAL_RX_CTRL_LAST) + return 1; + + slot = (slot + 1) % NUM_RX_BUFF; + + /* I'm just being paranoid here :) */ + if (unlikely(slot == dev->rx_slot)) + return 0; + } +} + +/* Hard IRQ */ +static void emac_rxde(void *param) +{ + struct emac_instance *dev = param; + + ++dev->estats.rx_stopped; + emac_rx_disable_async(dev); +} + +/* Hard IRQ */ +static irqreturn_t emac_irq(int irq, void *dev_instance) +{ + struct emac_instance *dev = dev_instance; + struct emac_regs __iomem *p = dev->emacp; + struct emac_error_stats *st = &dev->estats; + u32 isr; + + spin_lock(&dev->lock); + + isr = in_be32(&p->isr); + out_be32(&p->isr, isr); + + DBG(dev, "isr = %08x" NL, isr); + + if (isr & EMAC4_ISR_TXPE) + ++st->tx_parity; + if (isr & EMAC4_ISR_RXPE) + ++st->rx_parity; + if (isr & EMAC4_ISR_TXUE) + ++st->tx_underrun; + if (isr & EMAC4_ISR_RXOE) + ++st->rx_fifo_overrun; + if (isr & EMAC_ISR_OVR) + ++st->rx_overrun; + if (isr & EMAC_ISR_BP) + ++st->rx_bad_packet; + if (isr & EMAC_ISR_RP) + ++st->rx_runt_packet; + if (isr & EMAC_ISR_SE) + ++st->rx_short_event; + if (isr & EMAC_ISR_ALE) + ++st->rx_alignment_error; + if (isr & EMAC_ISR_BFCS) + ++st->rx_bad_fcs; + if (isr & EMAC_ISR_PTLE) + ++st->rx_packet_too_long; + if (isr & EMAC_ISR_ORE) + ++st->rx_out_of_range; + if (isr & EMAC_ISR_IRE) + ++st->rx_in_range; + if (isr & EMAC_ISR_SQE) + ++st->tx_sqe; + if (isr & EMAC_ISR_TE) + ++st->tx_errors; + + spin_unlock(&dev->lock); + + return IRQ_HANDLED; +} + +static struct net_device_stats *emac_stats(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + struct emac_stats *st = &dev->stats; + struct emac_error_stats *est = &dev->estats; + struct net_device_stats *nst = &dev->nstats; + unsigned long flags; + + DBG2(dev, "stats" NL); + + /* Compute "legacy" statistics */ + spin_lock_irqsave(&dev->lock, flags); + nst->rx_packets = (unsigned long)st->rx_packets; + nst->rx_bytes = (unsigned long)st->rx_bytes; + nst->tx_packets = (unsigned long)st->tx_packets; + nst->tx_bytes = (unsigned long)st->tx_bytes; + nst->rx_dropped = (unsigned long)(est->rx_dropped_oom + + est->rx_dropped_error + + est->rx_dropped_resize + + est->rx_dropped_mtu); + nst->tx_dropped = (unsigned long)est->tx_dropped; + + nst->rx_errors = (unsigned long)est->rx_bd_errors; + nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun + + est->rx_fifo_overrun + + est->rx_overrun); + nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error + + est->rx_alignment_error); + nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs + + est->rx_bad_fcs); + nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet + + est->rx_bd_short_event + + est->rx_bd_packet_too_long + + est->rx_bd_out_of_range + + est->rx_bd_in_range + + est->rx_runt_packet + + est->rx_short_event + + est->rx_packet_too_long + + est->rx_out_of_range + + est->rx_in_range); + + nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors); + nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun + + est->tx_underrun); + nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss; + nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral + + est->tx_bd_excessive_collisions + + est->tx_bd_late_collision + + est->tx_bd_multple_collisions); + spin_unlock_irqrestore(&dev->lock, flags); + return nst; +} + +static struct mal_commac_ops emac_commac_ops = { + .poll_tx = &emac_poll_tx, + .poll_rx = &emac_poll_rx, + .peek_rx = &emac_peek_rx, + .rxde = &emac_rxde, +}; + +static struct mal_commac_ops emac_commac_sg_ops = { + .poll_tx = &emac_poll_tx, + .poll_rx = &emac_poll_rx, + .peek_rx = &emac_peek_rx_sg, + .rxde = &emac_rxde, +}; + +/* Ethtool support */ +static int emac_ethtool_get_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct emac_instance *dev = netdev_priv(ndev); + + cmd->supported = dev->phy.features; + cmd->port = PORT_MII; + cmd->phy_address = dev->phy.address; + cmd->transceiver = + dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL; + + mutex_lock(&dev->link_lock); + cmd->advertising = dev->phy.advertising; + cmd->autoneg = dev->phy.autoneg; + cmd->speed = dev->phy.speed; + cmd->duplex = dev->phy.duplex; + mutex_unlock(&dev->link_lock); + + return 0; +} + +static int emac_ethtool_set_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct emac_instance *dev = netdev_priv(ndev); + u32 f = dev->phy.features; + + DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL, + cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising); + + /* Basic sanity checks */ + if (dev->phy.address < 0) + return -EOPNOTSUPP; + if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) + return -EINVAL; + if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0) + return -EINVAL; + if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_DISABLE) { + switch (cmd->speed) { + case SPEED_10: + if (cmd->duplex == DUPLEX_HALF && + !(f & SUPPORTED_10baseT_Half)) + return -EINVAL; + if (cmd->duplex == DUPLEX_FULL && + !(f & SUPPORTED_10baseT_Full)) + return -EINVAL; + break; + case SPEED_100: + if (cmd->duplex == DUPLEX_HALF && + !(f & SUPPORTED_100baseT_Half)) + return -EINVAL; + if (cmd->duplex == DUPLEX_FULL && + !(f & SUPPORTED_100baseT_Full)) + return -EINVAL; + break; + case SPEED_1000: + if (cmd->duplex == DUPLEX_HALF && + !(f & SUPPORTED_1000baseT_Half)) + return -EINVAL; + if (cmd->duplex == DUPLEX_FULL && + !(f & SUPPORTED_1000baseT_Full)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + mutex_lock(&dev->link_lock); + dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed, + cmd->duplex); + mutex_unlock(&dev->link_lock); + + } else { + if (!(f & SUPPORTED_Autoneg)) + return -EINVAL; + + mutex_lock(&dev->link_lock); + dev->phy.def->ops->setup_aneg(&dev->phy, + (cmd->advertising & f) | + (dev->phy.advertising & + (ADVERTISED_Pause | + ADVERTISED_Asym_Pause))); + mutex_unlock(&dev->link_lock); + } + emac_force_link_update(dev); + + return 0; +} + +static void emac_ethtool_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *rp) +{ + rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF; + rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF; +} + +static void emac_ethtool_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pp) +{ + struct emac_instance *dev = netdev_priv(ndev); + + mutex_lock(&dev->link_lock); + if ((dev->phy.features & SUPPORTED_Autoneg) && + (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause))) + pp->autoneg = 1; + + if (dev->phy.duplex == DUPLEX_FULL) { + if (dev->phy.pause) + pp->rx_pause = pp->tx_pause = 1; + else if (dev->phy.asym_pause) + pp->tx_pause = 1; + } + mutex_unlock(&dev->link_lock); +} + +static int emac_get_regs_len(struct emac_instance *dev) +{ + return sizeof(struct emac_ethtool_regs_subhdr) + + sizeof(struct emac_regs); +} + +static int emac_ethtool_get_regs_len(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + int size; + + size = sizeof(struct emac_ethtool_regs_hdr) + + emac_get_regs_len(dev) + mal_get_regs_len(dev->mal); + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + size += zmii_get_regs_len(dev->zmii_dev); + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + size += rgmii_get_regs_len(dev->rgmii_dev); + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) + size += tah_get_regs_len(dev->tah_dev); + + return size; +} + +static void *emac_dump_regs(struct emac_instance *dev, void *buf) +{ + struct emac_ethtool_regs_subhdr *hdr = buf; + + hdr->index = dev->cell_index; + if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) { + hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER; + } else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) { + hdr->version = EMAC4_ETHTOOL_REGS_VER; + } else { + hdr->version = EMAC_ETHTOOL_REGS_VER; + } + memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs)); + return (void *)(hdr + 1) + sizeof(struct emac_regs); +} + +static void emac_ethtool_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, void *buf) +{ + struct emac_instance *dev = netdev_priv(ndev); + struct emac_ethtool_regs_hdr *hdr = buf; + + hdr->components = 0; + buf = hdr + 1; + + buf = mal_dump_regs(dev->mal, buf); + buf = emac_dump_regs(dev, buf); + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) { + hdr->components |= EMAC_ETHTOOL_REGS_ZMII; + buf = zmii_dump_regs(dev->zmii_dev, buf); + } + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) { + hdr->components |= EMAC_ETHTOOL_REGS_RGMII; + buf = rgmii_dump_regs(dev->rgmii_dev, buf); + } + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) { + hdr->components |= EMAC_ETHTOOL_REGS_TAH; + buf = tah_dump_regs(dev->tah_dev, buf); + } +} + +static int emac_ethtool_nway_reset(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + int res = 0; + + DBG(dev, "nway_reset" NL); + + if (dev->phy.address < 0) + return -EOPNOTSUPP; + + mutex_lock(&dev->link_lock); + if (!dev->phy.autoneg) { + res = -EINVAL; + goto out; + } + + dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising); + out: + mutex_unlock(&dev->link_lock); + emac_force_link_update(dev); + return res; +} + +static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset) +{ + if (stringset == ETH_SS_STATS) + return EMAC_ETHTOOL_STATS_COUNT; + else + return -EINVAL; +} + +static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset, + u8 * buf) +{ + if (stringset == ETH_SS_STATS) + memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys)); +} + +static void emac_ethtool_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *estats, + u64 * tmp_stats) +{ + struct emac_instance *dev = netdev_priv(ndev); + + memcpy(tmp_stats, &dev->stats, sizeof(dev->stats)); + tmp_stats += sizeof(dev->stats) / sizeof(u64); + memcpy(tmp_stats, &dev->estats, sizeof(dev->estats)); +} + +static void emac_ethtool_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + struct emac_instance *dev = netdev_priv(ndev); + + strlcpy(info->driver, "ibm_emac", sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s", + dev->cell_index, dev->ofdev->dev.of_node->full_name); + info->regdump_len = emac_ethtool_get_regs_len(ndev); +} + +static const struct ethtool_ops emac_ethtool_ops = { + .get_settings = emac_ethtool_get_settings, + .set_settings = emac_ethtool_set_settings, + .get_drvinfo = emac_ethtool_get_drvinfo, + + .get_regs_len = emac_ethtool_get_regs_len, + .get_regs = emac_ethtool_get_regs, + + .nway_reset = emac_ethtool_nway_reset, + + .get_ringparam = emac_ethtool_get_ringparam, + .get_pauseparam = emac_ethtool_get_pauseparam, + + .get_strings = emac_ethtool_get_strings, + .get_sset_count = emac_ethtool_get_sset_count, + .get_ethtool_stats = emac_ethtool_get_ethtool_stats, + + .get_link = ethtool_op_get_link, +}; + +static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) +{ + struct emac_instance *dev = netdev_priv(ndev); + struct mii_ioctl_data *data = if_mii(rq); + + DBG(dev, "ioctl %08x" NL, cmd); + + if (dev->phy.address < 0) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = dev->phy.address; + /* Fall through */ + case SIOCGMIIREG: + data->val_out = emac_mdio_read(ndev, dev->phy.address, + data->reg_num); + return 0; + + case SIOCSMIIREG: + emac_mdio_write(ndev, dev->phy.address, data->reg_num, + data->val_in); + return 0; + default: + return -EOPNOTSUPP; + } +} + +struct emac_depentry { + u32 phandle; + struct device_node *node; + struct platform_device *ofdev; + void *drvdata; +}; + +#define EMAC_DEP_MAL_IDX 0 +#define EMAC_DEP_ZMII_IDX 1 +#define EMAC_DEP_RGMII_IDX 2 +#define EMAC_DEP_TAH_IDX 3 +#define EMAC_DEP_MDIO_IDX 4 +#define EMAC_DEP_PREV_IDX 5 +#define EMAC_DEP_COUNT 6 + +static int emac_check_deps(struct emac_instance *dev, + struct emac_depentry *deps) +{ + int i, there = 0; + struct device_node *np; + + for (i = 0; i < EMAC_DEP_COUNT; i++) { + /* no dependency on that item, allright */ + if (deps[i].phandle == 0) { + there++; + continue; + } + /* special case for blist as the dependency might go away */ + if (i == EMAC_DEP_PREV_IDX) { + np = *(dev->blist - 1); + if (np == NULL) { + deps[i].phandle = 0; + there++; + continue; + } + if (deps[i].node == NULL) + deps[i].node = of_node_get(np); + } + if (deps[i].node == NULL) + deps[i].node = of_find_node_by_phandle(deps[i].phandle); + if (deps[i].node == NULL) + continue; + if (deps[i].ofdev == NULL) + deps[i].ofdev = of_find_device_by_node(deps[i].node); + if (deps[i].ofdev == NULL) + continue; + if (deps[i].drvdata == NULL) + deps[i].drvdata = platform_get_drvdata(deps[i].ofdev); + if (deps[i].drvdata != NULL) + there++; + } + return there == EMAC_DEP_COUNT; +} + +static void emac_put_deps(struct emac_instance *dev) +{ + of_dev_put(dev->mal_dev); + of_dev_put(dev->zmii_dev); + of_dev_put(dev->rgmii_dev); + of_dev_put(dev->mdio_dev); + of_dev_put(dev->tah_dev); +} + +static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action, + void *data) +{ + /* We are only intereted in device addition */ + if (action == BUS_NOTIFY_BOUND_DRIVER) + wake_up_all(&emac_probe_wait); + return 0; +} + +static struct notifier_block emac_of_bus_notifier = { + .notifier_call = emac_of_bus_notify +}; + +static int emac_wait_deps(struct emac_instance *dev) +{ + struct emac_depentry deps[EMAC_DEP_COUNT]; + int i, err; + + memset(&deps, 0, sizeof(deps)); + + deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph; + deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph; + deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph; + if (dev->tah_ph) + deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph; + if (dev->mdio_ph) + deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph; + if (dev->blist && dev->blist > emac_boot_list) + deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu; + bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier); + wait_event_timeout(emac_probe_wait, + emac_check_deps(dev, deps), + EMAC_PROBE_DEP_TIMEOUT); + bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier); + err = emac_check_deps(dev, deps) ? 0 : -ENODEV; + for (i = 0; i < EMAC_DEP_COUNT; i++) { + of_node_put(deps[i].node); + if (err) + of_dev_put(deps[i].ofdev); + } + if (err == 0) { + dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev; + dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev; + dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev; + dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev; + dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev; + } + of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev); + return err; +} + +static int emac_read_uint_prop(struct device_node *np, const char *name, + u32 *val, int fatal) +{ + int len; + const u32 *prop = of_get_property(np, name, &len); + if (prop == NULL || len < sizeof(u32)) { + if (fatal) + printk(KERN_ERR "%s: missing %s property\n", + np->full_name, name); + return -ENODEV; + } + *val = *prop; + return 0; +} + +static int emac_init_phy(struct emac_instance *dev) +{ + struct device_node *np = dev->ofdev->dev.of_node; + struct net_device *ndev = dev->ndev; + u32 phy_map, adv; + int i; + + dev->phy.dev = ndev; + dev->phy.mode = dev->phy_mode; + + /* PHY-less configuration. + * XXX I probably should move these settings to the dev tree + */ + if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) { + emac_reset(dev); + + /* PHY-less configuration. + * XXX I probably should move these settings to the dev tree + */ + dev->phy.address = -1; + dev->phy.features = SUPPORTED_MII; + if (emac_phy_supports_gige(dev->phy_mode)) + dev->phy.features |= SUPPORTED_1000baseT_Full; + else + dev->phy.features |= SUPPORTED_100baseT_Full; + dev->phy.pause = 1; + + return 0; + } + + mutex_lock(&emac_phy_map_lock); + phy_map = dev->phy_map | busy_phy_map; + + DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map); + + dev->phy.mdio_read = emac_mdio_read; + dev->phy.mdio_write = emac_mdio_write; + + /* Enable internal clock source */ +#ifdef CONFIG_PPC_DCR_NATIVE + if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX)) + dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS); +#endif + /* PHY clock workaround */ + emac_rx_clk_tx(dev); + + /* Enable internal clock source on 440GX*/ +#ifdef CONFIG_PPC_DCR_NATIVE + if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX)) + dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS); +#endif + /* Configure EMAC with defaults so we can at least use MDIO + * This is needed mostly for 440GX + */ + if (emac_phy_gpcs(dev->phy.mode)) { + /* XXX + * Make GPCS PHY address equal to EMAC index. + * We probably should take into account busy_phy_map + * and/or phy_map here. + * + * Note that the busy_phy_map is currently global + * while it should probably be per-ASIC... + */ + dev->phy.gpcs_address = dev->gpcs_address; + if (dev->phy.gpcs_address == 0xffffffff) + dev->phy.address = dev->cell_index; + } + + emac_configure(dev); + + if (dev->phy_address != 0xffffffff) + phy_map = ~(1 << dev->phy_address); + + for (i = 0; i < 0x20; phy_map >>= 1, ++i) + if (!(phy_map & 1)) { + int r; + busy_phy_map |= 1 << i; + + /* Quick check if there is a PHY at the address */ + r = emac_mdio_read(dev->ndev, i, MII_BMCR); + if (r == 0xffff || r < 0) + continue; + if (!emac_mii_phy_probe(&dev->phy, i)) + break; + } + + /* Enable external clock source */ +#ifdef CONFIG_PPC_DCR_NATIVE + if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX)) + dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0); +#endif + mutex_unlock(&emac_phy_map_lock); + if (i == 0x20) { + printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name); + return -ENXIO; + } + + /* Init PHY */ + if (dev->phy.def->ops->init) + dev->phy.def->ops->init(&dev->phy); + + /* Disable any PHY features not supported by the platform */ + dev->phy.def->features &= ~dev->phy_feat_exc; + dev->phy.features &= ~dev->phy_feat_exc; + + /* Setup initial link parameters */ + if (dev->phy.features & SUPPORTED_Autoneg) { + adv = dev->phy.features; + if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x)) + adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; + /* Restart autonegotiation */ + dev->phy.def->ops->setup_aneg(&dev->phy, adv); + } else { + u32 f = dev->phy.def->features; + int speed = SPEED_10, fd = DUPLEX_HALF; + + /* Select highest supported speed/duplex */ + if (f & SUPPORTED_1000baseT_Full) { + speed = SPEED_1000; + fd = DUPLEX_FULL; + } else if (f & SUPPORTED_1000baseT_Half) + speed = SPEED_1000; + else if (f & SUPPORTED_100baseT_Full) { + speed = SPEED_100; + fd = DUPLEX_FULL; + } else if (f & SUPPORTED_100baseT_Half) + speed = SPEED_100; + else if (f & SUPPORTED_10baseT_Full) + fd = DUPLEX_FULL; + + /* Force link parameters */ + dev->phy.def->ops->setup_forced(&dev->phy, speed, fd); + } + return 0; +} + +static int emac_init_config(struct emac_instance *dev) +{ + struct device_node *np = dev->ofdev->dev.of_node; + const void *p; + + /* Read config from device-tree */ + if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1)) + return -ENXIO; + if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1)) + return -ENXIO; + if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1)) + return -ENXIO; + if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1)) + return -ENXIO; + if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0)) + dev->max_mtu = 1500; + if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0)) + dev->rx_fifo_size = 2048; + if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0)) + dev->tx_fifo_size = 2048; + if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0)) + dev->rx_fifo_size_gige = dev->rx_fifo_size; + if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0)) + dev->tx_fifo_size_gige = dev->tx_fifo_size; + if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0)) + dev->phy_address = 0xffffffff; + if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0)) + dev->phy_map = 0xffffffff; + if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0)) + dev->gpcs_address = 0xffffffff; + if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1)) + return -ENXIO; + if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0)) + dev->tah_ph = 0; + if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0)) + dev->tah_port = 0; + if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0)) + dev->mdio_ph = 0; + if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0)) + dev->zmii_ph = 0; + if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0)) + dev->zmii_port = 0xffffffff; + if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0)) + dev->rgmii_ph = 0; + if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0)) + dev->rgmii_port = 0xffffffff; + if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0)) + dev->fifo_entry_size = 16; + if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0)) + dev->mal_burst_size = 256; + + /* PHY mode needs some decoding */ + dev->phy_mode = of_get_phy_mode(np); + if (dev->phy_mode < 0) + dev->phy_mode = PHY_MODE_NA; + + /* Check EMAC version */ + if (of_device_is_compatible(np, "ibm,emac4sync")) { + dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC); + if (of_device_is_compatible(np, "ibm,emac-460ex") || + of_device_is_compatible(np, "ibm,emac-460gt")) + dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX; + if (of_device_is_compatible(np, "ibm,emac-405ex") || + of_device_is_compatible(np, "ibm,emac-405exr")) + dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX; + if (of_device_is_compatible(np, "ibm,emac-apm821xx")) { + dev->features |= (EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE | + EMAC_FTR_APM821XX_NO_HALF_DUPLEX | + EMAC_FTR_460EX_PHY_CLK_FIX); + } + } else if (of_device_is_compatible(np, "ibm,emac4")) { + dev->features |= EMAC_FTR_EMAC4; + if (of_device_is_compatible(np, "ibm,emac-440gx")) + dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX; + } else { + if (of_device_is_compatible(np, "ibm,emac-440ep") || + of_device_is_compatible(np, "ibm,emac-440gr")) + dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX; + if (of_device_is_compatible(np, "ibm,emac-405ez")) { +#ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL + dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x; +#else + printk(KERN_ERR "%s: Flow control not disabled!\n", + np->full_name); + return -ENXIO; +#endif + } + + } + + /* Fixup some feature bits based on the device tree */ + if (of_get_property(np, "has-inverted-stacr-oc", NULL)) + dev->features |= EMAC_FTR_STACR_OC_INVERT; + if (of_get_property(np, "has-new-stacr-staopc", NULL)) + dev->features |= EMAC_FTR_HAS_NEW_STACR; + + /* CAB lacks the appropriate properties */ + if (of_device_is_compatible(np, "ibm,emac-axon")) + dev->features |= EMAC_FTR_HAS_NEW_STACR | + EMAC_FTR_STACR_OC_INVERT; + + /* Enable TAH/ZMII/RGMII features as found */ + if (dev->tah_ph != 0) { +#ifdef CONFIG_IBM_EMAC_TAH + dev->features |= EMAC_FTR_HAS_TAH; +#else + printk(KERN_ERR "%s: TAH support not enabled !\n", + np->full_name); + return -ENXIO; +#endif + } + + if (dev->zmii_ph != 0) { +#ifdef CONFIG_IBM_EMAC_ZMII + dev->features |= EMAC_FTR_HAS_ZMII; +#else + printk(KERN_ERR "%s: ZMII support not enabled !\n", + np->full_name); + return -ENXIO; +#endif + } + + if (dev->rgmii_ph != 0) { +#ifdef CONFIG_IBM_EMAC_RGMII + dev->features |= EMAC_FTR_HAS_RGMII; +#else + printk(KERN_ERR "%s: RGMII support not enabled !\n", + np->full_name); + return -ENXIO; +#endif + } + + /* Read MAC-address */ + p = of_get_property(np, "local-mac-address", NULL); + if (p == NULL) { + printk(KERN_ERR "%s: Can't find local-mac-address property\n", + np->full_name); + return -ENXIO; + } + memcpy(dev->ndev->dev_addr, p, ETH_ALEN); + + /* IAHT and GAHT filter parameterization */ + if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) { + dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT; + dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT; + } else { + dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT; + dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT; + } + + DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE); + DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige); + DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige); + DBG(dev, "max_mtu : %d\n", dev->max_mtu); + DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq); + + return 0; +} + +static const struct net_device_ops emac_netdev_ops = { + .ndo_open = emac_open, + .ndo_stop = emac_close, + .ndo_get_stats = emac_stats, + .ndo_set_rx_mode = emac_set_multicast_list, + .ndo_do_ioctl = emac_ioctl, + .ndo_tx_timeout = emac_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_start_xmit = emac_start_xmit, + .ndo_change_mtu = eth_change_mtu, +}; + +static const struct net_device_ops emac_gige_netdev_ops = { + .ndo_open = emac_open, + .ndo_stop = emac_close, + .ndo_get_stats = emac_stats, + .ndo_set_rx_mode = emac_set_multicast_list, + .ndo_do_ioctl = emac_ioctl, + .ndo_tx_timeout = emac_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_start_xmit = emac_start_xmit_sg, + .ndo_change_mtu = emac_change_mtu, +}; + +static int emac_probe(struct platform_device *ofdev) +{ + struct net_device *ndev; + struct emac_instance *dev; + struct device_node *np = ofdev->dev.of_node; + struct device_node **blist = NULL; + int err, i; + + /* Skip unused/unwired EMACS. We leave the check for an unused + * property here for now, but new flat device trees should set a + * status property to "disabled" instead. + */ + if (of_get_property(np, "unused", NULL) || !of_device_is_available(np)) + return -ENODEV; + + /* Find ourselves in the bootlist if we are there */ + for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++) + if (emac_boot_list[i] == np) + blist = &emac_boot_list[i]; + + /* Allocate our net_device structure */ + err = -ENOMEM; + ndev = alloc_etherdev(sizeof(struct emac_instance)); + if (!ndev) + goto err_gone; + + dev = netdev_priv(ndev); + dev->ndev = ndev; + dev->ofdev = ofdev; + dev->blist = blist; + SET_NETDEV_DEV(ndev, &ofdev->dev); + + /* Initialize some embedded data structures */ + mutex_init(&dev->mdio_lock); + mutex_init(&dev->link_lock); + spin_lock_init(&dev->lock); + INIT_WORK(&dev->reset_work, emac_reset_work); + + /* Init various config data based on device-tree */ + err = emac_init_config(dev); + if (err != 0) + goto err_free; + + /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */ + dev->emac_irq = irq_of_parse_and_map(np, 0); + dev->wol_irq = irq_of_parse_and_map(np, 1); + if (dev->emac_irq == NO_IRQ) { + printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name); + goto err_free; + } + ndev->irq = dev->emac_irq; + + /* Map EMAC regs */ + if (of_address_to_resource(np, 0, &dev->rsrc_regs)) { + printk(KERN_ERR "%s: Can't get registers address\n", + np->full_name); + goto err_irq_unmap; + } + // TODO : request_mem_region + dev->emacp = ioremap(dev->rsrc_regs.start, + resource_size(&dev->rsrc_regs)); + if (dev->emacp == NULL) { + printk(KERN_ERR "%s: Can't map device registers!\n", + np->full_name); + err = -ENOMEM; + goto err_irq_unmap; + } + + /* Wait for dependent devices */ + err = emac_wait_deps(dev); + if (err) { + printk(KERN_ERR + "%s: Timeout waiting for dependent devices\n", + np->full_name); + /* display more info about what's missing ? */ + goto err_reg_unmap; + } + dev->mal = platform_get_drvdata(dev->mal_dev); + if (dev->mdio_dev != NULL) + dev->mdio_instance = platform_get_drvdata(dev->mdio_dev); + + /* Register with MAL */ + dev->commac.ops = &emac_commac_ops; + dev->commac.dev = dev; + dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan); + dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan); + err = mal_register_commac(dev->mal, &dev->commac); + if (err) { + printk(KERN_ERR "%s: failed to register with mal %s!\n", + np->full_name, dev->mal_dev->dev.of_node->full_name); + goto err_rel_deps; + } + dev->rx_skb_size = emac_rx_skb_size(ndev->mtu); + dev->rx_sync_size = emac_rx_sync_size(ndev->mtu); + + /* Get pointers to BD rings */ + dev->tx_desc = + dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan); + dev->rx_desc = + dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan); + + DBG(dev, "tx_desc %p" NL, dev->tx_desc); + DBG(dev, "rx_desc %p" NL, dev->rx_desc); + + /* Clean rings */ + memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor)); + memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor)); + memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *)); + memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *)); + + /* Attach to ZMII, if needed */ + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) && + (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0) + goto err_unreg_commac; + + /* Attach to RGMII, if needed */ + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) && + (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0) + goto err_detach_zmii; + + /* Attach to TAH, if needed */ + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) && + (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0) + goto err_detach_rgmii; + + /* Set some link defaults before we can find out real parameters */ + dev->phy.speed = SPEED_100; + dev->phy.duplex = DUPLEX_FULL; + dev->phy.autoneg = AUTONEG_DISABLE; + dev->phy.pause = dev->phy.asym_pause = 0; + dev->stop_timeout = STOP_TIMEOUT_100; + INIT_DELAYED_WORK(&dev->link_work, emac_link_timer); + + /* Some SoCs like APM821xx does not support Half Duplex mode. */ + if (emac_has_feature(dev, EMAC_FTR_APM821XX_NO_HALF_DUPLEX)) { + dev->phy_feat_exc = (SUPPORTED_1000baseT_Half | + SUPPORTED_100baseT_Half | + SUPPORTED_10baseT_Half); + } + + /* Find PHY if any */ + err = emac_init_phy(dev); + if (err != 0) + goto err_detach_tah; + + if (dev->tah_dev) { + ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG; + ndev->features |= ndev->hw_features | NETIF_F_RXCSUM; + } + ndev->watchdog_timeo = 5 * HZ; + if (emac_phy_supports_gige(dev->phy_mode)) { + ndev->netdev_ops = &emac_gige_netdev_ops; + dev->commac.ops = &emac_commac_sg_ops; + } else + ndev->netdev_ops = &emac_netdev_ops; + ndev->ethtool_ops = &emac_ethtool_ops; + + netif_carrier_off(ndev); + + err = register_netdev(ndev); + if (err) { + printk(KERN_ERR "%s: failed to register net device (%d)!\n", + np->full_name, err); + goto err_detach_tah; + } + + /* Set our drvdata last as we don't want them visible until we are + * fully initialized + */ + wmb(); + platform_set_drvdata(ofdev, dev); + + /* There's a new kid in town ! Let's tell everybody */ + wake_up_all(&emac_probe_wait); + + + printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n", + ndev->name, dev->cell_index, np->full_name, ndev->dev_addr); + + if (dev->phy_mode == PHY_MODE_SGMII) + printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name); + + if (dev->phy.address >= 0) + printk("%s: found %s PHY (0x%02x)\n", ndev->name, + dev->phy.def->name, dev->phy.address); + + emac_dbg_register(dev); + + /* Life is good */ + return 0; + + /* I have a bad feeling about this ... */ + + err_detach_tah: + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) + tah_detach(dev->tah_dev, dev->tah_port); + err_detach_rgmii: + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_detach(dev->rgmii_dev, dev->rgmii_port); + err_detach_zmii: + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_detach(dev->zmii_dev, dev->zmii_port); + err_unreg_commac: + mal_unregister_commac(dev->mal, &dev->commac); + err_rel_deps: + emac_put_deps(dev); + err_reg_unmap: + iounmap(dev->emacp); + err_irq_unmap: + if (dev->wol_irq != NO_IRQ) + irq_dispose_mapping(dev->wol_irq); + if (dev->emac_irq != NO_IRQ) + irq_dispose_mapping(dev->emac_irq); + err_free: + free_netdev(ndev); + err_gone: + /* if we were on the bootlist, remove us as we won't show up and + * wake up all waiters to notify them in case they were waiting + * on us + */ + if (blist) { + *blist = NULL; + wake_up_all(&emac_probe_wait); + } + return err; +} + +static int emac_remove(struct platform_device *ofdev) +{ + struct emac_instance *dev = platform_get_drvdata(ofdev); + + DBG(dev, "remove" NL); + + unregister_netdev(dev->ndev); + + cancel_work_sync(&dev->reset_work); + + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) + tah_detach(dev->tah_dev, dev->tah_port); + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_detach(dev->rgmii_dev, dev->rgmii_port); + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_detach(dev->zmii_dev, dev->zmii_port); + + busy_phy_map &= ~(1 << dev->phy.address); + DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map); + + mal_unregister_commac(dev->mal, &dev->commac); + emac_put_deps(dev); + + emac_dbg_unregister(dev); + iounmap(dev->emacp); + + if (dev->wol_irq != NO_IRQ) + irq_dispose_mapping(dev->wol_irq); + if (dev->emac_irq != NO_IRQ) + irq_dispose_mapping(dev->emac_irq); + + free_netdev(dev->ndev); + + return 0; +} + +/* XXX Features in here should be replaced by properties... */ +static const struct of_device_id emac_match[] = +{ + { + .type = "network", + .compatible = "ibm,emac", + }, + { + .type = "network", + .compatible = "ibm,emac4", + }, + { + .type = "network", + .compatible = "ibm,emac4sync", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, emac_match); + +static struct platform_driver emac_driver = { + .driver = { + .name = "emac", + .of_match_table = emac_match, + }, + .probe = emac_probe, + .remove = emac_remove, +}; + +static void __init emac_make_bootlist(void) +{ + struct device_node *np = NULL; + int j, max, i = 0, k; + int cell_indices[EMAC_BOOT_LIST_SIZE]; + + /* Collect EMACs */ + while((np = of_find_all_nodes(np)) != NULL) { + const u32 *idx; + + if (of_match_node(emac_match, np) == NULL) + continue; + if (of_get_property(np, "unused", NULL)) + continue; + idx = of_get_property(np, "cell-index", NULL); + if (idx == NULL) + continue; + cell_indices[i] = *idx; + emac_boot_list[i++] = of_node_get(np); + if (i >= EMAC_BOOT_LIST_SIZE) { + of_node_put(np); + break; + } + } + max = i; + + /* Bubble sort them (doh, what a creative algorithm :-) */ + for (i = 0; max > 1 && (i < (max - 1)); i++) + for (j = i; j < max; j++) { + if (cell_indices[i] > cell_indices[j]) { + np = emac_boot_list[i]; + emac_boot_list[i] = emac_boot_list[j]; + emac_boot_list[j] = np; + k = cell_indices[i]; + cell_indices[i] = cell_indices[j]; + cell_indices[j] = k; + } + } +} + +static int __init emac_init(void) +{ + int rc; + + printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n"); + + /* Init debug stuff */ + emac_init_debug(); + + /* Build EMAC boot list */ + emac_make_bootlist(); + + /* Init submodules */ + rc = mal_init(); + if (rc) + goto err; + rc = zmii_init(); + if (rc) + goto err_mal; + rc = rgmii_init(); + if (rc) + goto err_zmii; + rc = tah_init(); + if (rc) + goto err_rgmii; + rc = platform_driver_register(&emac_driver); + if (rc) + goto err_tah; + + return 0; + + err_tah: + tah_exit(); + err_rgmii: + rgmii_exit(); + err_zmii: + zmii_exit(); + err_mal: + mal_exit(); + err: + return rc; +} + +static void __exit emac_exit(void) +{ + int i; + + platform_driver_unregister(&emac_driver); + + tah_exit(); + rgmii_exit(); + zmii_exit(); + mal_exit(); + emac_fini_debug(); + + /* Destroy EMAC boot list */ + for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++) + of_node_put(emac_boot_list[i]); +} + +module_init(emac_init); +module_exit(emac_exit); diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h new file mode 100644 index 000000000..28df37420 --- /dev/null +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -0,0 +1,467 @@ +/* + * drivers/net/ethernet/ibm/emac/core.h + * + * Driver for PowerPC 4xx on-chip ethernet controller. + * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * + * + * Based on the arch/ppc version of the driver: + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin or + * + * Based on original work by + * Armin Kuster + * Johnnie Peters + * Copyright 2000, 2001 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __IBM_NEWEMAC_CORE_H +#define __IBM_NEWEMAC_CORE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "emac.h" +#include "phy.h" +#include "zmii.h" +#include "rgmii.h" +#include "mal.h" +#include "tah.h" +#include "debug.h" + +#define NUM_TX_BUFF CONFIG_IBM_EMAC_TXB +#define NUM_RX_BUFF CONFIG_IBM_EMAC_RXB + +/* Simple sanity check */ +#if NUM_TX_BUFF > 256 || NUM_RX_BUFF > 256 +#error Invalid number of buffer descriptors (greater than 256) +#endif + +#define EMAC_MIN_MTU 46 + +/* Maximum L2 header length (VLAN tagged, no FCS) */ +#define EMAC_MTU_OVERHEAD (6 * 2 + 2 + 4) + +/* RX BD size for the given MTU */ +static inline int emac_rx_size(int mtu) +{ + if (mtu > ETH_DATA_LEN) + return MAL_MAX_RX_SIZE; + else + return mal_rx_size(ETH_DATA_LEN + EMAC_MTU_OVERHEAD); +} + +#define EMAC_DMA_ALIGN(x) ALIGN((x), dma_get_cache_alignment()) + +#define EMAC_RX_SKB_HEADROOM \ + EMAC_DMA_ALIGN(CONFIG_IBM_EMAC_RX_SKB_HEADROOM) + +/* Size of RX skb for the given MTU */ +static inline int emac_rx_skb_size(int mtu) +{ + int size = max(mtu + EMAC_MTU_OVERHEAD, emac_rx_size(mtu)); + return EMAC_DMA_ALIGN(size + 2) + EMAC_RX_SKB_HEADROOM; +} + +/* RX DMA sync size */ +static inline int emac_rx_sync_size(int mtu) +{ + return EMAC_DMA_ALIGN(emac_rx_size(mtu) + 2); +} + +/* Driver statistcs is split into two parts to make it more cache friendly: + * - normal statistics (packet count, etc) + * - error statistics + * + * When statistics is requested by ethtool, these parts are concatenated, + * normal one goes first. + * + * Please, keep these structures in sync with emac_stats_keys. + */ + +/* Normal TX/RX Statistics */ +struct emac_stats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + u64 rx_packets_csum; + u64 tx_packets_csum; +}; + +/* Error statistics */ +struct emac_error_stats { + u64 tx_undo; + + /* Software RX Errors */ + u64 rx_dropped_stack; + u64 rx_dropped_oom; + u64 rx_dropped_error; + u64 rx_dropped_resize; + u64 rx_dropped_mtu; + u64 rx_stopped; + /* BD reported RX errors */ + u64 rx_bd_errors; + u64 rx_bd_overrun; + u64 rx_bd_bad_packet; + u64 rx_bd_runt_packet; + u64 rx_bd_short_event; + u64 rx_bd_alignment_error; + u64 rx_bd_bad_fcs; + u64 rx_bd_packet_too_long; + u64 rx_bd_out_of_range; + u64 rx_bd_in_range; + /* EMAC IRQ reported RX errors */ + u64 rx_parity; + u64 rx_fifo_overrun; + u64 rx_overrun; + u64 rx_bad_packet; + u64 rx_runt_packet; + u64 rx_short_event; + u64 rx_alignment_error; + u64 rx_bad_fcs; + u64 rx_packet_too_long; + u64 rx_out_of_range; + u64 rx_in_range; + + /* Software TX Errors */ + u64 tx_dropped; + /* BD reported TX errors */ + u64 tx_bd_errors; + u64 tx_bd_bad_fcs; + u64 tx_bd_carrier_loss; + u64 tx_bd_excessive_deferral; + u64 tx_bd_excessive_collisions; + u64 tx_bd_late_collision; + u64 tx_bd_multple_collisions; + u64 tx_bd_single_collision; + u64 tx_bd_underrun; + u64 tx_bd_sqe; + /* EMAC IRQ reported TX errors */ + u64 tx_parity; + u64 tx_underrun; + u64 tx_sqe; + u64 tx_errors; +}; + +#define EMAC_ETHTOOL_STATS_COUNT ((sizeof(struct emac_stats) + \ + sizeof(struct emac_error_stats)) \ + / sizeof(u64)) + +struct emac_instance { + struct net_device *ndev; + struct resource rsrc_regs; + struct emac_regs __iomem *emacp; + struct platform_device *ofdev; + struct device_node **blist; /* bootlist entry */ + + /* MAL linkage */ + u32 mal_ph; + struct platform_device *mal_dev; + u32 mal_rx_chan; + u32 mal_tx_chan; + struct mal_instance *mal; + struct mal_commac commac; + + /* PHY infos */ + u32 phy_mode; + u32 phy_map; + u32 phy_address; + u32 phy_feat_exc; + struct mii_phy phy; + struct mutex link_lock; + struct delayed_work link_work; + int link_polling; + + /* GPCS PHY infos */ + u32 gpcs_address; + + /* Shared MDIO if any */ + u32 mdio_ph; + struct platform_device *mdio_dev; + struct emac_instance *mdio_instance; + struct mutex mdio_lock; + + /* ZMII infos if any */ + u32 zmii_ph; + u32 zmii_port; + struct platform_device *zmii_dev; + + /* RGMII infos if any */ + u32 rgmii_ph; + u32 rgmii_port; + struct platform_device *rgmii_dev; + + /* TAH infos if any */ + u32 tah_ph; + u32 tah_port; + struct platform_device *tah_dev; + + /* IRQs */ + int wol_irq; + int emac_irq; + + /* OPB bus frequency in Mhz */ + u32 opb_bus_freq; + + /* Cell index within an ASIC (for clk mgmnt) */ + u32 cell_index; + + /* Max supported MTU */ + u32 max_mtu; + + /* Feature bits (from probe table) */ + unsigned int features; + + /* Tx and Rx fifo sizes & other infos in bytes */ + u32 tx_fifo_size; + u32 tx_fifo_size_gige; + u32 rx_fifo_size; + u32 rx_fifo_size_gige; + u32 fifo_entry_size; + u32 mal_burst_size; /* move to MAL ? */ + + /* IAHT and GAHT filter parameterization */ + u32 xaht_slots_shift; + u32 xaht_width_shift; + + /* Descriptor management + */ + struct mal_descriptor *tx_desc; + int tx_cnt; + int tx_slot; + int ack_slot; + + struct mal_descriptor *rx_desc; + int rx_slot; + struct sk_buff *rx_sg_skb; /* 1 */ + int rx_skb_size; + int rx_sync_size; + + struct sk_buff *tx_skb[NUM_TX_BUFF]; + struct sk_buff *rx_skb[NUM_RX_BUFF]; + + /* Stats + */ + struct emac_error_stats estats; + struct net_device_stats nstats; + struct emac_stats stats; + + /* Misc + */ + int reset_failed; + int stop_timeout; /* in us */ + int no_mcast; + int mcast_pending; + int opened; + struct work_struct reset_work; + spinlock_t lock; +}; + +/* + * Features of various EMAC implementations + */ + +/* + * No flow control on 40x according to the original driver + */ +#define EMAC_FTR_NO_FLOW_CONTROL_40x 0x00000001 +/* + * Cell is an EMAC4 + */ +#define EMAC_FTR_EMAC4 0x00000002 +/* + * For the 440SPe, AMCC inexplicably changed the polarity of + * the "operation complete" bit in the MII control register. + */ +#define EMAC_FTR_STACR_OC_INVERT 0x00000004 +/* + * Set if we have a TAH. + */ +#define EMAC_FTR_HAS_TAH 0x00000008 +/* + * Set if we have a ZMII. + */ +#define EMAC_FTR_HAS_ZMII 0x00000010 +/* + * Set if we have a RGMII. + */ +#define EMAC_FTR_HAS_RGMII 0x00000020 +/* + * Set if we have new type STACR with STAOPC + */ +#define EMAC_FTR_HAS_NEW_STACR 0x00000040 +/* + * Set if we need phy clock workaround for 440gx + */ +#define EMAC_FTR_440GX_PHY_CLK_FIX 0x00000080 +/* + * Set if we need phy clock workaround for 440ep or 440gr + */ +#define EMAC_FTR_440EP_PHY_CLK_FIX 0x00000100 +/* + * The 405EX and 460EX contain the EMAC4SYNC core + */ +#define EMAC_FTR_EMAC4SYNC 0x00000200 +/* + * Set if we need phy clock workaround for 460ex or 460gt + */ +#define EMAC_FTR_460EX_PHY_CLK_FIX 0x00000400 +/* + * APM821xx requires Jumbo frame size set explicitly + */ +#define EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE 0x00000800 +/* + * APM821xx does not support Half Duplex mode + */ +#define EMAC_FTR_APM821XX_NO_HALF_DUPLEX 0x00001000 + +/* Right now, we don't quite handle the always/possible masks on the + * most optimal way as we don't have a way to say something like + * always EMAC4. Patches welcome. + */ +enum { + EMAC_FTRS_ALWAYS = 0, + + EMAC_FTRS_POSSIBLE = +#ifdef CONFIG_IBM_EMAC_EMAC4 + EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC | + EMAC_FTR_HAS_NEW_STACR | + EMAC_FTR_STACR_OC_INVERT | EMAC_FTR_440GX_PHY_CLK_FIX | +#endif +#ifdef CONFIG_IBM_EMAC_TAH + EMAC_FTR_HAS_TAH | +#endif +#ifdef CONFIG_IBM_EMAC_ZMII + EMAC_FTR_HAS_ZMII | +#endif +#ifdef CONFIG_IBM_EMAC_RGMII + EMAC_FTR_HAS_RGMII | +#endif +#ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL + EMAC_FTR_NO_FLOW_CONTROL_40x | +#endif + EMAC_FTR_460EX_PHY_CLK_FIX | + EMAC_FTR_440EP_PHY_CLK_FIX | + EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE | + EMAC_FTR_APM821XX_NO_HALF_DUPLEX, +}; + +static inline int emac_has_feature(struct emac_instance *dev, + unsigned long feature) +{ + return (EMAC_FTRS_ALWAYS & feature) || + (EMAC_FTRS_POSSIBLE & dev->features & feature); +} + +/* + * Various instances of the EMAC core have varying 1) number of + * address match slots, 2) width of the registers for handling address + * match slots, 3) number of registers for handling address match + * slots and 4) base offset for those registers. + * + * These macros and inlines handle these differences based on + * parameters supplied by the device structure which are, in turn, + * initialized based on the "compatible" entry in the device tree. + */ + +#define EMAC4_XAHT_SLOTS_SHIFT 6 +#define EMAC4_XAHT_WIDTH_SHIFT 4 + +#define EMAC4SYNC_XAHT_SLOTS_SHIFT 8 +#define EMAC4SYNC_XAHT_WIDTH_SHIFT 5 + +#define EMAC_XAHT_SLOTS(dev) (1 << (dev)->xaht_slots_shift) +#define EMAC_XAHT_WIDTH(dev) (1 << (dev)->xaht_width_shift) +#define EMAC_XAHT_REGS(dev) (1 << ((dev)->xaht_slots_shift - \ + (dev)->xaht_width_shift)) + +#define EMAC_XAHT_CRC_TO_SLOT(dev, crc) \ + ((EMAC_XAHT_SLOTS(dev) - 1) - \ + ((crc) >> ((sizeof (u32) * BITS_PER_BYTE) - \ + (dev)->xaht_slots_shift))) + +#define EMAC_XAHT_SLOT_TO_REG(dev, slot) \ + ((slot) >> (dev)->xaht_width_shift) + +#define EMAC_XAHT_SLOT_TO_MASK(dev, slot) \ + ((u32)(1 << (EMAC_XAHT_WIDTH(dev) - 1)) >> \ + ((slot) & (u32)(EMAC_XAHT_WIDTH(dev) - 1))) + +static inline u32 *emac_xaht_base(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + int offset; + + /* The first IAHT entry always is the base of the block of + * IAHT and GAHT registers. + */ + if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) + offset = offsetof(struct emac_regs, u1.emac4sync.iaht1); + else + offset = offsetof(struct emac_regs, u0.emac4.iaht1); + + return (u32 *)((ptrdiff_t)p + offset); +} + +static inline u32 *emac_gaht_base(struct emac_instance *dev) +{ + /* GAHT registers always come after an identical number of + * IAHT registers. + */ + return emac_xaht_base(dev) + EMAC_XAHT_REGS(dev); +} + +static inline u32 *emac_iaht_base(struct emac_instance *dev) +{ + /* IAHT registers always come before an identical number of + * GAHT registers. + */ + return emac_xaht_base(dev); +} + +/* Ethtool get_regs complex data. + * We want to get not just EMAC registers, but also MAL, ZMII, RGMII, TAH + * when available. + * + * Returned BLOB consists of the ibm_emac_ethtool_regs_hdr, + * MAL registers, EMAC registers and optional ZMII, RGMII, TAH registers. + * Each register component is preceded with emac_ethtool_regs_subhdr. + * Order of the optional headers follows their relative bit posititions + * in emac_ethtool_regs_hdr.components + */ +#define EMAC_ETHTOOL_REGS_ZMII 0x00000001 +#define EMAC_ETHTOOL_REGS_RGMII 0x00000002 +#define EMAC_ETHTOOL_REGS_TAH 0x00000004 + +struct emac_ethtool_regs_hdr { + u32 components; +}; + +struct emac_ethtool_regs_subhdr { + u32 version; + u32 index; +}; + +#define EMAC_ETHTOOL_REGS_VER 0 +#define EMAC4_ETHTOOL_REGS_VER 1 +#define EMAC4SYNC_ETHTOOL_REGS_VER 2 + +#endif /* __IBM_NEWEMAC_CORE_H */ diff --git a/drivers/net/ethernet/ibm/emac/debug.c b/drivers/net/ethernet/ibm/emac/debug.c new file mode 100644 index 000000000..a559f326b --- /dev/null +++ b/drivers/net/ethernet/ibm/emac/debug.c @@ -0,0 +1,270 @@ +/* + * drivers/net/ethernet/ibm/emac/debug.c + * + * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines. + * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * + * + * Based on the arch/ppc version of the driver: + * + * Copyright (c) 2004, 2005 Zultys Technologies + * Eugene Surovegin or + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#include +#include +#include +#include +#include +#include + +#include "core.h" + +static DEFINE_SPINLOCK(emac_dbg_lock); + +static void emac_desc_dump(struct emac_instance *p) +{ + int i; + printk("** EMAC %s TX BDs **\n" + " tx_cnt = %d tx_slot = %d ack_slot = %d\n", + p->ofdev->dev.of_node->full_name, + p->tx_cnt, p->tx_slot, p->ack_slot); + for (i = 0; i < NUM_TX_BUFF / 2; ++i) + printk + ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n", + i, p->tx_desc[i].data_ptr, p->tx_skb[i] ? 'V' : ' ', + p->tx_desc[i].ctrl, p->tx_desc[i].data_len, + NUM_TX_BUFF / 2 + i, + p->tx_desc[NUM_TX_BUFF / 2 + i].data_ptr, + p->tx_skb[NUM_TX_BUFF / 2 + i] ? 'V' : ' ', + p->tx_desc[NUM_TX_BUFF / 2 + i].ctrl, + p->tx_desc[NUM_TX_BUFF / 2 + i].data_len); + + printk("** EMAC %s RX BDs **\n" + " rx_slot = %d flags = 0x%lx rx_skb_size = %d rx_sync_size = %d\n" + " rx_sg_skb = 0x%p\n", + p->ofdev->dev.of_node->full_name, + p->rx_slot, p->commac.flags, p->rx_skb_size, + p->rx_sync_size, p->rx_sg_skb); + for (i = 0; i < NUM_RX_BUFF / 2; ++i) + printk + ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n", + i, p->rx_desc[i].data_ptr, p->rx_skb[i] ? 'V' : ' ', + p->rx_desc[i].ctrl, p->rx_desc[i].data_len, + NUM_RX_BUFF / 2 + i, + p->rx_desc[NUM_RX_BUFF / 2 + i].data_ptr, + p->rx_skb[NUM_RX_BUFF / 2 + i] ? 'V' : ' ', + p->rx_desc[NUM_RX_BUFF / 2 + i].ctrl, + p->rx_desc[NUM_RX_BUFF / 2 + i].data_len); +} + +static void emac_mac_dump(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + const int xaht_regs = EMAC_XAHT_REGS(dev); + u32 *gaht_base = emac_gaht_base(dev); + u32 *iaht_base = emac_iaht_base(dev); + int emac4sync = emac_has_feature(dev, EMAC_FTR_EMAC4SYNC); + int n; + + printk("** EMAC %s registers **\n" + "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n" + "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n" + "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n", + dev->ofdev->dev.of_node->full_name, + in_be32(&p->mr0), in_be32(&p->mr1), + in_be32(&p->tmr0), in_be32(&p->tmr1), + in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser), + in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid), + in_be32(&p->vtci) + ); + + if (emac4sync) + printk("MAR = %04x%08x MMAR = %04x%08x\n", + in_be32(&p->u0.emac4sync.mahr), + in_be32(&p->u0.emac4sync.malr), + in_be32(&p->u0.emac4sync.mmahr), + in_be32(&p->u0.emac4sync.mmalr) + ); + + for (n = 0; n < xaht_regs; n++) + printk("IAHT%02d = 0x%08x\n", n + 1, in_be32(iaht_base + n)); + + for (n = 0; n < xaht_regs; n++) + printk("GAHT%02d = 0x%08x\n", n + 1, in_be32(gaht_base + n)); + + printk("LSA = %04x%08x IPGVR = 0x%04x\n" + "STACR = 0x%08x TRTR = 0x%08x RWMR = 0x%08x\n" + "OCTX = 0x%08x OCRX = 0x%08x\n", + in_be32(&p->lsah), in_be32(&p->lsal), in_be32(&p->ipgvr), + in_be32(&p->stacr), in_be32(&p->trtr), in_be32(&p->rwmr), + in_be32(&p->octx), in_be32(&p->ocrx) + ); + + if (!emac4sync) { + printk("IPCR = 0x%08x\n", + in_be32(&p->u1.emac4.ipcr) + ); + } else { + printk("REVID = 0x%08x TPC = 0x%08x\n", + in_be32(&p->u1.emac4sync.revid), + in_be32(&p->u1.emac4sync.tpc) + ); + } + + emac_desc_dump(dev); +} + +static void emac_mal_dump(struct mal_instance *mal) +{ + int i; + + printk("** MAL %s Registers **\n" + "CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n" + "TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n" + "RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n", + mal->ofdev->dev.of_node->full_name, + get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR), + get_mal_dcrn(mal, MAL_IER), + get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR), + get_mal_dcrn(mal, MAL_TXEOBISR), get_mal_dcrn(mal, MAL_TXDEIR), + get_mal_dcrn(mal, MAL_RXCASR), get_mal_dcrn(mal, MAL_RXCARR), + get_mal_dcrn(mal, MAL_RXEOBISR), get_mal_dcrn(mal, MAL_RXDEIR) + ); + + printk("TX|"); + for (i = 0; i < mal->num_tx_chans; ++i) { + if (i && !(i % 4)) + printk("\n "); + printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_TXCTPR(i))); + } + printk("\nRX|"); + for (i = 0; i < mal->num_rx_chans; ++i) { + if (i && !(i % 4)) + printk("\n "); + printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_RXCTPR(i))); + } + printk("\n "); + for (i = 0; i < mal->num_rx_chans; ++i) { + u32 r = get_mal_dcrn(mal, MAL_RCBS(i)); + if (i && !(i % 3)) + printk("\n "); + printk("RCBS%d = 0x%08x (%d) ", i, r, r * 16); + } + printk("\n"); +} + +static struct emac_instance *__emacs[4]; +static struct mal_instance *__mals[1]; + +void emac_dbg_register(struct emac_instance *dev) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&emac_dbg_lock, flags); + for (i = 0; i < ARRAY_SIZE(__emacs); i++) + if (__emacs[i] == NULL) { + __emacs[i] = dev; + break; + } + spin_unlock_irqrestore(&emac_dbg_lock, flags); +} + +void emac_dbg_unregister(struct emac_instance *dev) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&emac_dbg_lock, flags); + for (i = 0; i < ARRAY_SIZE(__emacs); i++) + if (__emacs[i] == dev) { + __emacs[i] = NULL; + break; + } + spin_unlock_irqrestore(&emac_dbg_lock, flags); +} + +void mal_dbg_register(struct mal_instance *mal) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&emac_dbg_lock, flags); + for (i = 0; i < ARRAY_SIZE(__mals); i++) + if (__mals[i] == NULL) { + __mals[i] = mal; + break; + } + spin_unlock_irqrestore(&emac_dbg_lock, flags); +} + +void mal_dbg_unregister(struct mal_instance *mal) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&emac_dbg_lock, flags); + for (i = 0; i < ARRAY_SIZE(__mals); i++) + if (__mals[i] == mal) { + __mals[i] = NULL; + break; + } + spin_unlock_irqrestore(&emac_dbg_lock, flags); +} + +void emac_dbg_dump_all(void) +{ + unsigned int i; + unsigned long flags; + + spin_lock_irqsave(&emac_dbg_lock, flags); + + for (i = 0; i < ARRAY_SIZE(__mals); ++i) + if (__mals[i]) + emac_mal_dump(__mals[i]); + + for (i = 0; i < ARRAY_SIZE(__emacs); ++i) + if (__emacs[i]) + emac_mac_dump(__emacs[i]); + + spin_unlock_irqrestore(&emac_dbg_lock, flags); +} + +#if defined(CONFIG_MAGIC_SYSRQ) +static void emac_sysrq_handler(int key) +{ + emac_dbg_dump_all(); +} + +static struct sysrq_key_op emac_sysrq_op = { + .handler = emac_sysrq_handler, + .help_msg = "emac(c)", + .action_msg = "Show EMAC(s) status", +}; + +int __init emac_init_debug(void) +{ + return register_sysrq_key('c', &emac_sysrq_op); +} + +void __exit emac_fini_debug(void) +{ + unregister_sysrq_key('c', &emac_sysrq_op); +} + +#else +int __init emac_init_debug(void) +{ + return 0; +} +void __exit emac_fini_debug(void) +{ +} +#endif /* CONFIG_MAGIC_SYSRQ */ diff --git a/drivers/net/ethernet/ibm/emac/debug.h b/drivers/net/ethernet/ibm/emac/debug.h new file mode 100644 index 000000000..9c45efe4c --- /dev/null +++ b/drivers/net/ethernet/ibm/emac/debug.h @@ -0,0 +1,83 @@ +/* + * drivers/net/ethernet/ibm/emac/debug.h + * + * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines. + * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * + * + * Based on the arch/ppc version of the driver: + * + * Copyright (c) 2004, 2005 Zultys Technologies + * Eugene Surovegin or + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __IBM_NEWEMAC_DEBUG_H +#define __IBM_NEWEMAC_DEBUG_H + +#include + +#include "core.h" + +#if defined(CONFIG_IBM_EMAC_DEBUG) + +struct emac_instance; +struct mal_instance; + +void emac_dbg_register(struct emac_instance *dev); +void emac_dbg_unregister(struct emac_instance *dev); +void mal_dbg_register(struct mal_instance *mal); +void mal_dbg_unregister(struct mal_instance *mal); +int emac_init_debug(void) __init; +void emac_fini_debug(void) __exit; +void emac_dbg_dump_all(void); + +# define DBG_LEVEL 1 + +#else + +# define emac_dbg_register(x) do { } while(0) +# define emac_dbg_unregister(x) do { } while(0) +# define mal_dbg_register(x) do { } while(0) +# define mal_dbg_unregister(x) do { } while(0) +# define emac_init_debug() do { } while(0) +# define emac_fini_debug() do { } while(0) +# define emac_dbg_dump_all() do { } while(0) + +# define DBG_LEVEL 0 + +#endif + +#define EMAC_DBG(d, name, fmt, arg...) \ + printk(KERN_DEBUG #name "%s: " fmt, d->ofdev->dev.of_node->full_name, ## arg) + +#if DBG_LEVEL > 0 +# define DBG(d,f,x...) EMAC_DBG(d, emac, f, ##x) +# define MAL_DBG(d,f,x...) EMAC_DBG(d, mal, f, ##x) +# define ZMII_DBG(d,f,x...) EMAC_DBG(d, zmii, f, ##x) +# define RGMII_DBG(d,f,x...) EMAC_DBG(d, rgmii, f, ##x) +# define NL "\n" +#else +# define DBG(f,x...) ((void)0) +# define MAL_DBG(d,f,x...) ((void)0) +# define ZMII_DBG(d,f,x...) ((void)0) +# define RGMII_DBG(d,f,x...) ((void)0) +#endif +#if DBG_LEVEL > 1 +# define DBG2(d,f,x...) DBG(d,f, ##x) +# define MAL_DBG2(d,f,x...) MAL_DBG(d,f, ##x) +# define ZMII_DBG2(d,f,x...) ZMII_DBG(d,f, ##x) +# define RGMII_DBG2(d,f,x...) RGMII_DBG(d,f, ##x) +#else +# define DBG2(f,x...) ((void)0) +# define MAL_DBG2(d,f,x...) ((void)0) +# define ZMII_DBG2(d,f,x...) ((void)0) +# define RGMII_DBG2(d,f,x...) ((void)0) +#endif + +#endif /* __IBM_NEWEMAC_DEBUG_H */ diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h new file mode 100644 index 000000000..5afcc27ce --- /dev/null +++ b/drivers/net/ethernet/ibm/emac/emac.h @@ -0,0 +1,314 @@ +/* + * drivers/net/ethernet/ibm/emac/emac.h + * + * Register definitions for PowerPC 4xx on-chip ethernet contoller + * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * + * + * Based on the arch/ppc version of the driver: + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin or + * + * Based on original work by + * Matt Porter + * Armin Kuster + * Copyright 2002-2004 MontaVista Software Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __IBM_NEWEMAC_H +#define __IBM_NEWEMAC_H + +#include +#include + +/* EMAC registers Write Access rules */ +struct emac_regs { + /* Common registers across all EMAC implementations. */ + u32 mr0; /* Special */ + u32 mr1; /* Reset */ + u32 tmr0; /* Special */ + u32 tmr1; /* Special */ + u32 rmr; /* Reset */ + u32 isr; /* Always */ + u32 iser; /* Reset */ + u32 iahr; /* Reset, R, T */ + u32 ialr; /* Reset, R, T */ + u32 vtpid; /* Reset, R, T */ + u32 vtci; /* Reset, R, T */ + u32 ptr; /* Reset, T */ + union { + /* Registers unique to EMAC4 implementations */ + struct { + u32 iaht1; /* Reset, R */ + u32 iaht2; /* Reset, R */ + u32 iaht3; /* Reset, R */ + u32 iaht4; /* Reset, R */ + u32 gaht1; /* Reset, R */ + u32 gaht2; /* Reset, R */ + u32 gaht3; /* Reset, R */ + u32 gaht4; /* Reset, R */ + } emac4; + /* Registers unique to EMAC4SYNC implementations */ + struct { + u32 mahr; /* Reset, R, T */ + u32 malr; /* Reset, R, T */ + u32 mmahr; /* Reset, R, T */ + u32 mmalr; /* Reset, R, T */ + u32 rsvd0[4]; + } emac4sync; + } u0; + /* Common registers across all EMAC implementations. */ + u32 lsah; + u32 lsal; + u32 ipgvr; /* Reset, T */ + u32 stacr; /* Special */ + u32 trtr; /* Special */ + u32 rwmr; /* Reset */ + u32 octx; + u32 ocrx; + union { + /* Registers unique to EMAC4 implementations */ + struct { + u32 ipcr; + } emac4; + /* Registers unique to EMAC4SYNC implementations */ + struct { + u32 rsvd1; + u32 revid; + u32 rsvd2[2]; + u32 iaht1; /* Reset, R */ + u32 iaht2; /* Reset, R */ + u32 iaht3; /* Reset, R */ + u32 iaht4; /* Reset, R */ + u32 iaht5; /* Reset, R */ + u32 iaht6; /* Reset, R */ + u32 iaht7; /* Reset, R */ + u32 iaht8; /* Reset, R */ + u32 gaht1; /* Reset, R */ + u32 gaht2; /* Reset, R */ + u32 gaht3; /* Reset, R */ + u32 gaht4; /* Reset, R */ + u32 gaht5; /* Reset, R */ + u32 gaht6; /* Reset, R */ + u32 gaht7; /* Reset, R */ + u32 gaht8; /* Reset, R */ + u32 tpc; /* Reset, T */ + } emac4sync; + } u1; +}; + +/* + * PHY mode settings (EMAC <-> ZMII/RGMII bridge <-> PHY) + */ +#define PHY_MODE_NA PHY_INTERFACE_MODE_NA +#define PHY_MODE_MII PHY_INTERFACE_MODE_MII +#define PHY_MODE_RMII PHY_INTERFACE_MODE_RMII +#define PHY_MODE_SMII PHY_INTERFACE_MODE_SMII +#define PHY_MODE_RGMII PHY_INTERFACE_MODE_RGMII +#define PHY_MODE_TBI PHY_INTERFACE_MODE_TBI +#define PHY_MODE_GMII PHY_INTERFACE_MODE_GMII +#define PHY_MODE_RTBI PHY_INTERFACE_MODE_RTBI +#define PHY_MODE_SGMII PHY_INTERFACE_MODE_SGMII + +/* EMACx_MR0 */ +#define EMAC_MR0_RXI 0x80000000 +#define EMAC_MR0_TXI 0x40000000 +#define EMAC_MR0_SRST 0x20000000 +#define EMAC_MR0_TXE 0x10000000 +#define EMAC_MR0_RXE 0x08000000 +#define EMAC_MR0_WKE 0x04000000 + +/* EMACx_MR1 */ +#define EMAC_MR1_FDE 0x80000000 +#define EMAC_MR1_ILE 0x40000000 +#define EMAC_MR1_VLE 0x20000000 +#define EMAC_MR1_EIFC 0x10000000 +#define EMAC_MR1_APP 0x08000000 +#define EMAC_MR1_IST 0x01000000 + +#define EMAC_MR1_MF_MASK 0x00c00000 +#define EMAC_MR1_MF_10 0x00000000 +#define EMAC_MR1_MF_100 0x00400000 +#define EMAC_MR1_MF_1000 0x00800000 +#define EMAC_MR1_MF_1000GPCS 0x00c00000 +#define EMAC_MR1_MF_IPPA(id) (((id) & 0x1f) << 6) + +#define EMAC_MR1_RFS_4K 0x00300000 +#define EMAC_MR1_RFS_16K 0x00000000 +#define EMAC_MR1_TFS_2K 0x00080000 +#define EMAC_MR1_TR0_MULT 0x00008000 +#define EMAC_MR1_JPSM 0x00000000 +#define EMAC_MR1_MWSW_001 0x00000000 +#define EMAC_MR1_BASE(opb) (EMAC_MR1_TFS_2K | EMAC_MR1_TR0_MULT) + + +#define EMAC4_MR1_RFS_2K 0x00100000 +#define EMAC4_MR1_RFS_4K 0x00180000 +#define EMAC4_MR1_RFS_16K 0x00280000 +#define EMAC4_MR1_TFS_2K 0x00020000 +#define EMAC4_MR1_TFS_4K 0x00030000 +#define EMAC4_MR1_TFS_16K 0x00050000 +#define EMAC4_MR1_TR 0x00008000 +#define EMAC4_MR1_MWSW_001 0x00001000 +#define EMAC4_MR1_JPSM 0x00000800 +#define EMAC4_MR1_OBCI_MASK 0x00000038 +#define EMAC4_MR1_OBCI_50 0x00000000 +#define EMAC4_MR1_OBCI_66 0x00000008 +#define EMAC4_MR1_OBCI_83 0x00000010 +#define EMAC4_MR1_OBCI_100 0x00000018 +#define EMAC4_MR1_OBCI_100P 0x00000020 +#define EMAC4_MR1_OBCI(freq) ((freq) <= 50 ? EMAC4_MR1_OBCI_50 : \ + (freq) <= 66 ? EMAC4_MR1_OBCI_66 : \ + (freq) <= 83 ? EMAC4_MR1_OBCI_83 : \ + (freq) <= 100 ? EMAC4_MR1_OBCI_100 : \ + EMAC4_MR1_OBCI_100P) + +/* EMACx_TMR0 */ +#define EMAC_TMR0_GNP 0x80000000 +#define EMAC_TMR0_DEFAULT 0x00000000 +#define EMAC4_TMR0_TFAE_2_32 0x00000001 +#define EMAC4_TMR0_TFAE_4_64 0x00000002 +#define EMAC4_TMR0_TFAE_8_128 0x00000003 +#define EMAC4_TMR0_TFAE_16_256 0x00000004 +#define EMAC4_TMR0_TFAE_32_512 0x00000005 +#define EMAC4_TMR0_TFAE_64_1024 0x00000006 +#define EMAC4_TMR0_TFAE_128_2048 0x00000007 +#define EMAC4_TMR0_DEFAULT EMAC4_TMR0_TFAE_2_32 +#define EMAC_TMR0_XMIT (EMAC_TMR0_GNP | EMAC_TMR0_DEFAULT) +#define EMAC4_TMR0_XMIT (EMAC_TMR0_GNP | EMAC4_TMR0_DEFAULT) + +/* EMACx_TMR1 */ + +#define EMAC_TMR1(l,h) (((l) << 27) | (((h) & 0xff) << 16)) +#define EMAC4_TMR1(l,h) (((l) << 27) | (((h) & 0x3ff) << 14)) + +/* EMACx_RMR */ +#define EMAC_RMR_SP 0x80000000 +#define EMAC_RMR_SFCS 0x40000000 +#define EMAC_RMR_RRP 0x20000000 +#define EMAC_RMR_RFP 0x10000000 +#define EMAC_RMR_ROP 0x08000000 +#define EMAC_RMR_RPIR 0x04000000 +#define EMAC_RMR_PPP 0x02000000 +#define EMAC_RMR_PME 0x01000000 +#define EMAC_RMR_PMME 0x00800000 +#define EMAC_RMR_IAE 0x00400000 +#define EMAC_RMR_MIAE 0x00200000 +#define EMAC_RMR_BAE 0x00100000 +#define EMAC_RMR_MAE 0x00080000 +#define EMAC_RMR_BASE 0x00000000 +#define EMAC4_RMR_RFAF_2_32 0x00000001 +#define EMAC4_RMR_RFAF_4_64 0x00000002 +#define EMAC4_RMR_RFAF_8_128 0x00000003 +#define EMAC4_RMR_RFAF_16_256 0x00000004 +#define EMAC4_RMR_RFAF_32_512 0x00000005 +#define EMAC4_RMR_RFAF_64_1024 0x00000006 +#define EMAC4_RMR_RFAF_128_2048 0x00000007 +#define EMAC4_RMR_BASE EMAC4_RMR_RFAF_128_2048 +#define EMAC4_RMR_MJS_MASK 0x0001fff8 +#define EMAC4_RMR_MJS(s) (((s) << 3) & EMAC4_RMR_MJS_MASK) + +/* EMACx_ISR & EMACx_ISER */ +#define EMAC4_ISR_TXPE 0x20000000 +#define EMAC4_ISR_RXPE 0x10000000 +#define EMAC4_ISR_TXUE 0x08000000 +#define EMAC4_ISR_RXOE 0x04000000 +#define EMAC_ISR_OVR 0x02000000 +#define EMAC_ISR_PP 0x01000000 +#define EMAC_ISR_BP 0x00800000 +#define EMAC_ISR_RP 0x00400000 +#define EMAC_ISR_SE 0x00200000 +#define EMAC_ISR_ALE 0x00100000 +#define EMAC_ISR_BFCS 0x00080000 +#define EMAC_ISR_PTLE 0x00040000 +#define EMAC_ISR_ORE 0x00020000 +#define EMAC_ISR_IRE 0x00010000 +#define EMAC_ISR_SQE 0x00000080 +#define EMAC_ISR_TE 0x00000040 +#define EMAC_ISR_MOS 0x00000002 +#define EMAC_ISR_MOF 0x00000001 + +/* EMACx_STACR */ +#define EMAC_STACR_PHYD_MASK 0xffff +#define EMAC_STACR_PHYD_SHIFT 16 +#define EMAC_STACR_OC 0x00008000 +#define EMAC_STACR_PHYE 0x00004000 +#define EMAC_STACR_STAC_MASK 0x00003000 +#define EMAC_STACR_STAC_READ 0x00001000 +#define EMAC_STACR_STAC_WRITE 0x00002000 +#define EMAC_STACR_OPBC_MASK 0x00000C00 +#define EMAC_STACR_OPBC_50 0x00000000 +#define EMAC_STACR_OPBC_66 0x00000400 +#define EMAC_STACR_OPBC_83 0x00000800 +#define EMAC_STACR_OPBC_100 0x00000C00 +#define EMAC_STACR_OPBC(freq) ((freq) <= 50 ? EMAC_STACR_OPBC_50 : \ + (freq) <= 66 ? EMAC_STACR_OPBC_66 : \ + (freq) <= 83 ? EMAC_STACR_OPBC_83 : EMAC_STACR_OPBC_100) +#define EMAC_STACR_BASE(opb) EMAC_STACR_OPBC(opb) +#define EMAC4_STACR_BASE(opb) 0x00000000 +#define EMAC_STACR_PCDA_MASK 0x1f +#define EMAC_STACR_PCDA_SHIFT 5 +#define EMAC_STACR_PRA_MASK 0x1f +#define EMACX_STACR_STAC_MASK 0x00003800 +#define EMACX_STACR_STAC_READ 0x00001000 +#define EMACX_STACR_STAC_WRITE 0x00000800 +#define EMACX_STACR_STAC_IND_ADDR 0x00002000 +#define EMACX_STACR_STAC_IND_READ 0x00003800 +#define EMACX_STACR_STAC_IND_READINC 0x00003000 +#define EMACX_STACR_STAC_IND_WRITE 0x00002800 + + +/* EMACx_TRTR */ +#define EMAC_TRTR_SHIFT_EMAC4 24 +#define EMAC_TRTR_SHIFT 27 + +/* EMAC specific TX descriptor control fields (write access) */ +#define EMAC_TX_CTRL_GFCS 0x0200 +#define EMAC_TX_CTRL_GP 0x0100 +#define EMAC_TX_CTRL_ISA 0x0080 +#define EMAC_TX_CTRL_RSA 0x0040 +#define EMAC_TX_CTRL_IVT 0x0020 +#define EMAC_TX_CTRL_RVT 0x0010 +#define EMAC_TX_CTRL_TAH_CSUM 0x000e + +/* EMAC specific TX descriptor status fields (read access) */ +#define EMAC_TX_ST_BFCS 0x0200 +#define EMAC_TX_ST_LCS 0x0080 +#define EMAC_TX_ST_ED 0x0040 +#define EMAC_TX_ST_EC 0x0020 +#define EMAC_TX_ST_LC 0x0010 +#define EMAC_TX_ST_MC 0x0008 +#define EMAC_TX_ST_SC 0x0004 +#define EMAC_TX_ST_UR 0x0002 +#define EMAC_TX_ST_SQE 0x0001 +#define EMAC_IS_BAD_TX (EMAC_TX_ST_LCS | EMAC_TX_ST_ED | \ + EMAC_TX_ST_EC | EMAC_TX_ST_LC | \ + EMAC_TX_ST_MC | EMAC_TX_ST_UR) +#define EMAC_IS_BAD_TX_TAH (EMAC_TX_ST_LCS | EMAC_TX_ST_ED | \ + EMAC_TX_ST_EC | EMAC_TX_ST_LC) + +/* EMAC specific RX descriptor status fields (read access) */ +#define EMAC_RX_ST_OE 0x0200 +#define EMAC_RX_ST_PP 0x0100 +#define EMAC_RX_ST_BP 0x0080 +#define EMAC_RX_ST_RP 0x0040 +#define EMAC_RX_ST_SE 0x0020 +#define EMAC_RX_ST_AE 0x0010 +#define EMAC_RX_ST_BFCS 0x0008 +#define EMAC_RX_ST_PTL 0x0004 +#define EMAC_RX_ST_ORE 0x0002 +#define EMAC_RX_ST_IRE 0x0001 +#define EMAC_RX_TAH_BAD_CSUM 0x0003 +#define EMAC_BAD_RX_MASK (EMAC_RX_ST_OE | EMAC_RX_ST_BP | \ + EMAC_RX_ST_RP | EMAC_RX_ST_SE | \ + EMAC_RX_ST_AE | EMAC_RX_ST_BFCS | \ + EMAC_RX_ST_PTL | EMAC_RX_ST_ORE | \ + EMAC_RX_ST_IRE ) +#endif /* __IBM_NEWEMAC_H */ diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c new file mode 100644 index 000000000..fdb5cdb3c --- /dev/null +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -0,0 +1,793 @@ +/* + * drivers/net/ethernet/ibm/emac/mal.c + * + * Memory Access Layer (MAL) support + * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * + * + * Based on the arch/ppc version of the driver: + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin or + * + * Based on original work by + * Benjamin Herrenschmidt , + * David Gibson , + * + * Armin Kuster + * Copyright 2002 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include +#include +#include + +#include "core.h" +#include + +static int mal_count; + +int mal_register_commac(struct mal_instance *mal, struct mal_commac *commac) +{ + unsigned long flags; + + spin_lock_irqsave(&mal->lock, flags); + + MAL_DBG(mal, "reg(%08x, %08x)" NL, + commac->tx_chan_mask, commac->rx_chan_mask); + + /* Don't let multiple commacs claim the same channel(s) */ + if ((mal->tx_chan_mask & commac->tx_chan_mask) || + (mal->rx_chan_mask & commac->rx_chan_mask)) { + spin_unlock_irqrestore(&mal->lock, flags); + printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n", + mal->index); + return -EBUSY; + } + + if (list_empty(&mal->list)) + napi_enable(&mal->napi); + mal->tx_chan_mask |= commac->tx_chan_mask; + mal->rx_chan_mask |= commac->rx_chan_mask; + list_add(&commac->list, &mal->list); + + spin_unlock_irqrestore(&mal->lock, flags); + + return 0; +} + +void mal_unregister_commac(struct mal_instance *mal, + struct mal_commac *commac) +{ + unsigned long flags; + + spin_lock_irqsave(&mal->lock, flags); + + MAL_DBG(mal, "unreg(%08x, %08x)" NL, + commac->tx_chan_mask, commac->rx_chan_mask); + + mal->tx_chan_mask &= ~commac->tx_chan_mask; + mal->rx_chan_mask &= ~commac->rx_chan_mask; + list_del_init(&commac->list); + if (list_empty(&mal->list)) + napi_disable(&mal->napi); + + spin_unlock_irqrestore(&mal->lock, flags); +} + +int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size) +{ + BUG_ON(channel < 0 || channel >= mal->num_rx_chans || + size > MAL_MAX_RX_SIZE); + + MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size); + + if (size & 0xf) { + printk(KERN_WARNING + "mal%d: incorrect RX size %lu for the channel %d\n", + mal->index, size, channel); + return -EINVAL; + } + + set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4); + return 0; +} + +int mal_tx_bd_offset(struct mal_instance *mal, int channel) +{ + BUG_ON(channel < 0 || channel >= mal->num_tx_chans); + + return channel * NUM_TX_BUFF; +} + +int mal_rx_bd_offset(struct mal_instance *mal, int channel) +{ + BUG_ON(channel < 0 || channel >= mal->num_rx_chans); + return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF; +} + +void mal_enable_tx_channel(struct mal_instance *mal, int channel) +{ + unsigned long flags; + + spin_lock_irqsave(&mal->lock, flags); + + MAL_DBG(mal, "enable_tx(%d)" NL, channel); + + set_mal_dcrn(mal, MAL_TXCASR, + get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel)); + + spin_unlock_irqrestore(&mal->lock, flags); +} + +void mal_disable_tx_channel(struct mal_instance *mal, int channel) +{ + set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel)); + + MAL_DBG(mal, "disable_tx(%d)" NL, channel); +} + +void mal_enable_rx_channel(struct mal_instance *mal, int channel) +{ + unsigned long flags; + + /* + * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple + * of 8, but enabling in MAL_RXCASR needs the divided by 8 value + * for the bitmask + */ + if (!(channel % 8)) + channel >>= 3; + + spin_lock_irqsave(&mal->lock, flags); + + MAL_DBG(mal, "enable_rx(%d)" NL, channel); + + set_mal_dcrn(mal, MAL_RXCASR, + get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel)); + + spin_unlock_irqrestore(&mal->lock, flags); +} + +void mal_disable_rx_channel(struct mal_instance *mal, int channel) +{ + /* + * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple + * of 8, but enabling in MAL_RXCASR needs the divided by 8 value + * for the bitmask + */ + if (!(channel % 8)) + channel >>= 3; + + set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel)); + + MAL_DBG(mal, "disable_rx(%d)" NL, channel); +} + +void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac) +{ + unsigned long flags; + + spin_lock_irqsave(&mal->lock, flags); + + MAL_DBG(mal, "poll_add(%p)" NL, commac); + + /* starts disabled */ + set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); + + list_add_tail(&commac->poll_list, &mal->poll_list); + + spin_unlock_irqrestore(&mal->lock, flags); +} + +void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac) +{ + unsigned long flags; + + spin_lock_irqsave(&mal->lock, flags); + + MAL_DBG(mal, "poll_del(%p)" NL, commac); + + list_del(&commac->poll_list); + + spin_unlock_irqrestore(&mal->lock, flags); +} + +/* synchronized by mal_poll() */ +static inline void mal_enable_eob_irq(struct mal_instance *mal) +{ + MAL_DBG2(mal, "enable_irq" NL); + + // XXX might want to cache MAL_CFG as the DCR read can be slooooow + set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE); +} + +/* synchronized by NAPI state */ +static inline void mal_disable_eob_irq(struct mal_instance *mal) +{ + // XXX might want to cache MAL_CFG as the DCR read can be slooooow + set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE); + + MAL_DBG2(mal, "disable_irq" NL); +} + +static irqreturn_t mal_serr(int irq, void *dev_instance) +{ + struct mal_instance *mal = dev_instance; + + u32 esr = get_mal_dcrn(mal, MAL_ESR); + + /* Clear the error status register */ + set_mal_dcrn(mal, MAL_ESR, esr); + + MAL_DBG(mal, "SERR %08x" NL, esr); + + if (esr & MAL_ESR_EVB) { + if (esr & MAL_ESR_DE) { + /* We ignore Descriptor error, + * TXDE or RXDE interrupt will be generated anyway. + */ + return IRQ_HANDLED; + } + + if (esr & MAL_ESR_PEIN) { + /* PLB error, it's probably buggy hardware or + * incorrect physical address in BD (i.e. bug) + */ + if (net_ratelimit()) + printk(KERN_ERR + "mal%d: system error, " + "PLB (ESR = 0x%08x)\n", + mal->index, esr); + return IRQ_HANDLED; + } + + /* OPB error, it's probably buggy hardware or incorrect + * EBC setup + */ + if (net_ratelimit()) + printk(KERN_ERR + "mal%d: system error, OPB (ESR = 0x%08x)\n", + mal->index, esr); + } + return IRQ_HANDLED; +} + +static inline void mal_schedule_poll(struct mal_instance *mal) +{ + if (likely(napi_schedule_prep(&mal->napi))) { + MAL_DBG2(mal, "schedule_poll" NL); + spin_lock(&mal->lock); + mal_disable_eob_irq(mal); + spin_unlock(&mal->lock); + __napi_schedule(&mal->napi); + } else + MAL_DBG2(mal, "already in poll" NL); +} + +static irqreturn_t mal_txeob(int irq, void *dev_instance) +{ + struct mal_instance *mal = dev_instance; + + u32 r = get_mal_dcrn(mal, MAL_TXEOBISR); + + MAL_DBG2(mal, "txeob %08x" NL, r); + + mal_schedule_poll(mal); + set_mal_dcrn(mal, MAL_TXEOBISR, r); + +#ifdef CONFIG_PPC_DCR_NATIVE + if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT)) + mtdcri(SDR0, DCRN_SDR_ICINTSTAT, + (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX)); +#endif + + return IRQ_HANDLED; +} + +static irqreturn_t mal_rxeob(int irq, void *dev_instance) +{ + struct mal_instance *mal = dev_instance; + + u32 r = get_mal_dcrn(mal, MAL_RXEOBISR); + + MAL_DBG2(mal, "rxeob %08x" NL, r); + + mal_schedule_poll(mal); + set_mal_dcrn(mal, MAL_RXEOBISR, r); + +#ifdef CONFIG_PPC_DCR_NATIVE + if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT)) + mtdcri(SDR0, DCRN_SDR_ICINTSTAT, + (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX)); +#endif + + return IRQ_HANDLED; +} + +static irqreturn_t mal_txde(int irq, void *dev_instance) +{ + struct mal_instance *mal = dev_instance; + + u32 deir = get_mal_dcrn(mal, MAL_TXDEIR); + set_mal_dcrn(mal, MAL_TXDEIR, deir); + + MAL_DBG(mal, "txde %08x" NL, deir); + + if (net_ratelimit()) + printk(KERN_ERR + "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n", + mal->index, deir); + + return IRQ_HANDLED; +} + +static irqreturn_t mal_rxde(int irq, void *dev_instance) +{ + struct mal_instance *mal = dev_instance; + struct list_head *l; + + u32 deir = get_mal_dcrn(mal, MAL_RXDEIR); + + MAL_DBG(mal, "rxde %08x" NL, deir); + + list_for_each(l, &mal->list) { + struct mal_commac *mc = list_entry(l, struct mal_commac, list); + if (deir & mc->rx_chan_mask) { + set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags); + mc->ops->rxde(mc->dev); + } + } + + mal_schedule_poll(mal); + set_mal_dcrn(mal, MAL_RXDEIR, deir); + + return IRQ_HANDLED; +} + +static irqreturn_t mal_int(int irq, void *dev_instance) +{ + struct mal_instance *mal = dev_instance; + u32 esr = get_mal_dcrn(mal, MAL_ESR); + + if (esr & MAL_ESR_EVB) { + /* descriptor error */ + if (esr & MAL_ESR_DE) { + if (esr & MAL_ESR_CIDT) + return mal_rxde(irq, dev_instance); + else + return mal_txde(irq, dev_instance); + } else { /* SERR */ + return mal_serr(irq, dev_instance); + } + } + return IRQ_HANDLED; +} + +void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac) +{ + /* Spinlock-type semantics: only one caller disable poll at a time */ + while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags)) + msleep(1); + + /* Synchronize with the MAL NAPI poller */ + napi_synchronize(&mal->napi); +} + +void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac) +{ + smp_wmb(); + clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); + + /* Feels better to trigger a poll here to catch up with events that + * may have happened on this channel while disabled. It will most + * probably be delayed until the next interrupt but that's mostly a + * non-issue in the context where this is called. + */ + napi_schedule(&mal->napi); +} + +static int mal_poll(struct napi_struct *napi, int budget) +{ + struct mal_instance *mal = container_of(napi, struct mal_instance, napi); + struct list_head *l; + int received = 0; + unsigned long flags; + + MAL_DBG2(mal, "poll(%d)" NL, budget); + again: + /* Process TX skbs */ + list_for_each(l, &mal->poll_list) { + struct mal_commac *mc = + list_entry(l, struct mal_commac, poll_list); + mc->ops->poll_tx(mc->dev); + } + + /* Process RX skbs. + * + * We _might_ need something more smart here to enforce polling + * fairness. + */ + list_for_each(l, &mal->poll_list) { + struct mal_commac *mc = + list_entry(l, struct mal_commac, poll_list); + int n; + if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) + continue; + n = mc->ops->poll_rx(mc->dev, budget); + if (n) { + received += n; + budget -= n; + if (budget <= 0) + goto more_work; // XXX What if this is the last one ? + } + } + + /* We need to disable IRQs to protect from RXDE IRQ here */ + spin_lock_irqsave(&mal->lock, flags); + __napi_complete(napi); + mal_enable_eob_irq(mal); + spin_unlock_irqrestore(&mal->lock, flags); + + /* Check for "rotting" packet(s) */ + list_for_each(l, &mal->poll_list) { + struct mal_commac *mc = + list_entry(l, struct mal_commac, poll_list); + if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) + continue; + if (unlikely(mc->ops->peek_rx(mc->dev) || + test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) { + MAL_DBG2(mal, "rotting packet" NL); + if (!napi_reschedule(napi)) + goto more_work; + + spin_lock_irqsave(&mal->lock, flags); + mal_disable_eob_irq(mal); + spin_unlock_irqrestore(&mal->lock, flags); + goto again; + } + mc->ops->poll_tx(mc->dev); + } + + more_work: + MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received); + return received; +} + +static void mal_reset(struct mal_instance *mal) +{ + int n = 10; + + MAL_DBG(mal, "reset" NL); + + set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR); + + /* Wait for reset to complete (1 system clock) */ + while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n) + --n; + + if (unlikely(!n)) + printk(KERN_ERR "mal%d: reset timeout\n", mal->index); +} + +int mal_get_regs_len(struct mal_instance *mal) +{ + return sizeof(struct emac_ethtool_regs_subhdr) + + sizeof(struct mal_regs); +} + +void *mal_dump_regs(struct mal_instance *mal, void *buf) +{ + struct emac_ethtool_regs_subhdr *hdr = buf; + struct mal_regs *regs = (struct mal_regs *)(hdr + 1); + int i; + + hdr->version = mal->version; + hdr->index = mal->index; + + regs->tx_count = mal->num_tx_chans; + regs->rx_count = mal->num_rx_chans; + + regs->cfg = get_mal_dcrn(mal, MAL_CFG); + regs->esr = get_mal_dcrn(mal, MAL_ESR); + regs->ier = get_mal_dcrn(mal, MAL_IER); + regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR); + regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR); + regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR); + regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR); + regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR); + regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR); + regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR); + regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR); + + for (i = 0; i < regs->tx_count; ++i) + regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i)); + + for (i = 0; i < regs->rx_count; ++i) { + regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i)); + regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i)); + } + return regs + 1; +} + +static int mal_probe(struct platform_device *ofdev) +{ + struct mal_instance *mal; + int err = 0, i, bd_size; + int index = mal_count++; + unsigned int dcr_base; + const u32 *prop; + u32 cfg; + unsigned long irqflags; + irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde; + + mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL); + if (!mal) + return -ENOMEM; + + mal->index = index; + mal->ofdev = ofdev; + mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1; + + MAL_DBG(mal, "probe" NL); + + prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL); + if (prop == NULL) { + printk(KERN_ERR + "mal%d: can't find MAL num-tx-chans property!\n", + index); + err = -ENODEV; + goto fail; + } + mal->num_tx_chans = prop[0]; + + prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL); + if (prop == NULL) { + printk(KERN_ERR + "mal%d: can't find MAL num-rx-chans property!\n", + index); + err = -ENODEV; + goto fail; + } + mal->num_rx_chans = prop[0]; + + dcr_base = dcr_resource_start(ofdev->dev.of_node, 0); + if (dcr_base == 0) { + printk(KERN_ERR + "mal%d: can't find DCR resource!\n", index); + err = -ENODEV; + goto fail; + } + mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100); + if (!DCR_MAP_OK(mal->dcr_host)) { + printk(KERN_ERR + "mal%d: failed to map DCRs !\n", index); + err = -ENODEV; + goto fail; + } + + if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) { +#if defined(CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT) && \ + defined(CONFIG_IBM_EMAC_MAL_COMMON_ERR) + mal->features |= (MAL_FTR_CLEAR_ICINTSTAT | + MAL_FTR_COMMON_ERR_INT); +#else + printk(KERN_ERR "%s: Support for 405EZ not enabled!\n", + ofdev->dev.of_node->full_name); + err = -ENODEV; + goto fail; +#endif + } + + mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); + mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1); + mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2); + + if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) { + mal->txde_irq = mal->rxde_irq = mal->serr_irq; + } else { + mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3); + mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4); + } + + if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ || + mal->serr_irq == NO_IRQ || mal->txde_irq == NO_IRQ || + mal->rxde_irq == NO_IRQ) { + printk(KERN_ERR + "mal%d: failed to map interrupts !\n", index); + err = -ENODEV; + goto fail_unmap; + } + + INIT_LIST_HEAD(&mal->poll_list); + INIT_LIST_HEAD(&mal->list); + spin_lock_init(&mal->lock); + + init_dummy_netdev(&mal->dummy_dev); + + netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll, + CONFIG_IBM_EMAC_POLL_WEIGHT); + + /* Load power-on reset defaults */ + mal_reset(mal); + + /* Set the MAL configuration register */ + cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT; + cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA; + + /* Current Axon is not happy with priority being non-0, it can + * deadlock, fix it up here + */ + if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon")) + cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10); + + /* Apply configuration */ + set_mal_dcrn(mal, MAL_CFG, cfg); + + /* Allocate space for BD rings */ + BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32); + BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32); + + bd_size = sizeof(struct mal_descriptor) * + (NUM_TX_BUFF * mal->num_tx_chans + + NUM_RX_BUFF * mal->num_rx_chans); + mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, + GFP_KERNEL); + if (mal->bd_virt == NULL) { + err = -ENOMEM; + goto fail_unmap; + } + + for (i = 0; i < mal->num_tx_chans; ++i) + set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma + + sizeof(struct mal_descriptor) * + mal_tx_bd_offset(mal, i)); + + for (i = 0; i < mal->num_rx_chans; ++i) + set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma + + sizeof(struct mal_descriptor) * + mal_rx_bd_offset(mal, i)); + + if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) { + irqflags = IRQF_SHARED; + hdlr_serr = hdlr_txde = hdlr_rxde = mal_int; + } else { + irqflags = 0; + hdlr_serr = mal_serr; + hdlr_txde = mal_txde; + hdlr_rxde = mal_rxde; + } + + err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal); + if (err) + goto fail2; + err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal); + if (err) + goto fail3; + err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal); + if (err) + goto fail4; + err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal); + if (err) + goto fail5; + err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal); + if (err) + goto fail6; + + /* Enable all MAL SERR interrupt sources */ + set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS); + + /* Enable EOB interrupt */ + mal_enable_eob_irq(mal); + + printk(KERN_INFO + "MAL v%d %s, %d TX channels, %d RX channels\n", + mal->version, ofdev->dev.of_node->full_name, + mal->num_tx_chans, mal->num_rx_chans); + + /* Advertise this instance to the rest of the world */ + wmb(); + platform_set_drvdata(ofdev, mal); + + mal_dbg_register(mal); + + return 0; + + fail6: + free_irq(mal->rxde_irq, mal); + fail5: + free_irq(mal->txeob_irq, mal); + fail4: + free_irq(mal->txde_irq, mal); + fail3: + free_irq(mal->serr_irq, mal); + fail2: + dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma); + fail_unmap: + dcr_unmap(mal->dcr_host, 0x100); + fail: + kfree(mal); + + return err; +} + +static int mal_remove(struct platform_device *ofdev) +{ + struct mal_instance *mal = platform_get_drvdata(ofdev); + + MAL_DBG(mal, "remove" NL); + + /* Synchronize with scheduled polling */ + napi_disable(&mal->napi); + + if (!list_empty(&mal->list)) + /* This is *very* bad */ + WARN(1, KERN_EMERG + "mal%d: commac list is not empty on remove!\n", + mal->index); + + free_irq(mal->serr_irq, mal); + free_irq(mal->txde_irq, mal); + free_irq(mal->txeob_irq, mal); + free_irq(mal->rxde_irq, mal); + free_irq(mal->rxeob_irq, mal); + + mal_reset(mal); + + mal_dbg_unregister(mal); + + dma_free_coherent(&ofdev->dev, + sizeof(struct mal_descriptor) * + (NUM_TX_BUFF * mal->num_tx_chans + + NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt, + mal->bd_dma); + kfree(mal); + + return 0; +} + +static const struct of_device_id mal_platform_match[] = +{ + { + .compatible = "ibm,mcmal", + }, + { + .compatible = "ibm,mcmal2", + }, + /* Backward compat */ + { + .type = "mcmal-dma", + .compatible = "ibm,mcmal", + }, + { + .type = "mcmal-dma", + .compatible = "ibm,mcmal2", + }, + {}, +}; + +static struct platform_driver mal_of_driver = { + .driver = { + .name = "mcmal", + .of_match_table = mal_platform_match, + }, + .probe = mal_probe, + .remove = mal_remove, +}; + +int __init mal_init(void) +{ + return platform_driver_register(&mal_of_driver); +} + +void mal_exit(void) +{ + platform_driver_unregister(&mal_of_driver); +} diff --git a/drivers/net/ethernet/ibm/emac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h new file mode 100644 index 000000000..eeade2ea8 --- /dev/null +++ b/drivers/net/ethernet/ibm/emac/mal.h @@ -0,0 +1,312 @@ +/* + * drivers/net/ethernet/ibm/emac/mal.h + * + * Memory Access Layer (MAL) support + * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * + * + * Based on the arch/ppc version of the driver: + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin or + * + * Based on original work by + * Armin Kuster + * Copyright 2002 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __IBM_NEWEMAC_MAL_H +#define __IBM_NEWEMAC_MAL_H + +/* + * There are some variations on the MAL, we express them in this driver as + * MAL Version 1 and 2 though that doesn't match any IBM terminology. + * + * We call MAL 1 the version in 405GP, 405GPR, 405EP, 440EP, 440GR and + * NP405H. + * + * We call MAL 2 the version in 440GP, 440GX, 440SP, 440SPE and Axon + * + * The driver expects a "version" property in the emac node containing + * a number 1 or 2. New device-trees for EMAC capable platforms are thus + * required to include that when porting to arch/powerpc. + */ + +/* MALx DCR registers */ +#define MAL_CFG 0x00 +#define MAL_CFG_SR 0x80000000 +#define MAL_CFG_PLBB 0x00004000 +#define MAL_CFG_OPBBL 0x00000080 +#define MAL_CFG_EOPIE 0x00000004 +#define MAL_CFG_LEA 0x00000002 +#define MAL_CFG_SD 0x00000001 + +/* MAL V1 CFG bits */ +#define MAL1_CFG_PLBP_MASK 0x00c00000 +#define MAL1_CFG_PLBP_10 0x00800000 +#define MAL1_CFG_GA 0x00200000 +#define MAL1_CFG_OA 0x00100000 +#define MAL1_CFG_PLBLE 0x00080000 +#define MAL1_CFG_PLBT_MASK 0x00078000 +#define MAL1_CFG_DEFAULT (MAL1_CFG_PLBP_10 | MAL1_CFG_PLBT_MASK) + +/* MAL V2 CFG bits */ +#define MAL2_CFG_RPP_MASK 0x00c00000 +#define MAL2_CFG_RPP_10 0x00800000 +#define MAL2_CFG_RMBS_MASK 0x00300000 +#define MAL2_CFG_WPP_MASK 0x000c0000 +#define MAL2_CFG_WPP_10 0x00080000 +#define MAL2_CFG_WMBS_MASK 0x00030000 +#define MAL2_CFG_PLBLE 0x00008000 +#define MAL2_CFG_DEFAULT (MAL2_CFG_RMBS_MASK | MAL2_CFG_WMBS_MASK | \ + MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10) + +#define MAL_ESR 0x01 +#define MAL_ESR_EVB 0x80000000 +#define MAL_ESR_CIDT 0x40000000 +#define MAL_ESR_CID_MASK 0x3e000000 +#define MAL_ESR_CID_SHIFT 25 +#define MAL_ESR_DE 0x00100000 +#define MAL_ESR_OTE 0x00040000 +#define MAL_ESR_OSE 0x00020000 +#define MAL_ESR_PEIN 0x00010000 +#define MAL_ESR_DEI 0x00000010 +#define MAL_ESR_OTEI 0x00000004 +#define MAL_ESR_OSEI 0x00000002 +#define MAL_ESR_PBEI 0x00000001 + +/* MAL V1 ESR bits */ +#define MAL1_ESR_ONE 0x00080000 +#define MAL1_ESR_ONEI 0x00000008 + +/* MAL V2 ESR bits */ +#define MAL2_ESR_PTE 0x00800000 +#define MAL2_ESR_PRE 0x00400000 +#define MAL2_ESR_PWE 0x00200000 +#define MAL2_ESR_PTEI 0x00000080 +#define MAL2_ESR_PREI 0x00000040 +#define MAL2_ESR_PWEI 0x00000020 + + +#define MAL_IER 0x02 +/* MAL IER bits */ +#define MAL_IER_DE 0x00000010 +#define MAL_IER_OTE 0x00000004 +#define MAL_IER_OE 0x00000002 +#define MAL_IER_PE 0x00000001 + +/* PLB read/write/timeout errors */ +#define MAL_IER_PTE 0x00000080 +#define MAL_IER_PRE 0x00000040 +#define MAL_IER_PWE 0x00000020 + +#define MAL_IER_SOC_EVENTS (MAL_IER_PTE | MAL_IER_PRE | MAL_IER_PWE) +#define MAL_IER_EVENTS (MAL_IER_SOC_EVENTS | MAL_IER_DE | \ + MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE) + +#define MAL_TXCASR 0x04 +#define MAL_TXCARR 0x05 +#define MAL_TXEOBISR 0x06 +#define MAL_TXDEIR 0x07 +#define MAL_RXCASR 0x10 +#define MAL_RXCARR 0x11 +#define MAL_RXEOBISR 0x12 +#define MAL_RXDEIR 0x13 +#define MAL_TXCTPR(n) ((n) + 0x20) +#define MAL_RXCTPR(n) ((n) + 0x40) +#define MAL_RCBS(n) ((n) + 0x60) + +/* In reality MAL can handle TX buffers up to 4095 bytes long, + * but this isn't a good round number :) --ebs + */ +#define MAL_MAX_TX_SIZE 4080 +#define MAL_MAX_RX_SIZE 4080 + +static inline int mal_rx_size(int len) +{ + len = (len + 0xf) & ~0xf; + return len > MAL_MAX_RX_SIZE ? MAL_MAX_RX_SIZE : len; +} + +static inline int mal_tx_chunks(int len) +{ + return (len + MAL_MAX_TX_SIZE - 1) / MAL_MAX_TX_SIZE; +} + +#define MAL_CHAN_MASK(n) (0x80000000 >> (n)) + +/* MAL Buffer Descriptor structure */ +struct mal_descriptor { + u16 ctrl; /* MAL / Commac status control bits */ + u16 data_len; /* Max length is 4K-1 (12 bits) */ + u32 data_ptr; /* pointer to actual data buffer */ +}; + +/* the following defines are for the MadMAL status and control registers. */ +/* MADMAL transmit and receive status/control bits */ +#define MAL_RX_CTRL_EMPTY 0x8000 +#define MAL_RX_CTRL_WRAP 0x4000 +#define MAL_RX_CTRL_CM 0x2000 +#define MAL_RX_CTRL_LAST 0x1000 +#define MAL_RX_CTRL_FIRST 0x0800 +#define MAL_RX_CTRL_INTR 0x0400 +#define MAL_RX_CTRL_SINGLE (MAL_RX_CTRL_LAST | MAL_RX_CTRL_FIRST) +#define MAL_IS_SINGLE_RX(ctrl) (((ctrl) & MAL_RX_CTRL_SINGLE) == MAL_RX_CTRL_SINGLE) + +#define MAL_TX_CTRL_READY 0x8000 +#define MAL_TX_CTRL_WRAP 0x4000 +#define MAL_TX_CTRL_CM 0x2000 +#define MAL_TX_CTRL_LAST 0x1000 +#define MAL_TX_CTRL_INTR 0x0400 + +struct mal_commac_ops { + void (*poll_tx) (void *dev); + int (*poll_rx) (void *dev, int budget); + int (*peek_rx) (void *dev); + void (*rxde) (void *dev); +}; + +struct mal_commac { + struct mal_commac_ops *ops; + void *dev; + struct list_head poll_list; + long flags; +#define MAL_COMMAC_RX_STOPPED 0 +#define MAL_COMMAC_POLL_DISABLED 1 + u32 tx_chan_mask; + u32 rx_chan_mask; + struct list_head list; +}; + +struct mal_instance { + int version; + dcr_host_t dcr_host; + + int num_tx_chans; /* Number of TX channels */ + int num_rx_chans; /* Number of RX channels */ + int txeob_irq; /* TX End Of Buffer IRQ */ + int rxeob_irq; /* RX End Of Buffer IRQ */ + int txde_irq; /* TX Descriptor Error IRQ */ + int rxde_irq; /* RX Descriptor Error IRQ */ + int serr_irq; /* MAL System Error IRQ */ + + struct list_head poll_list; + struct napi_struct napi; + + struct list_head list; + u32 tx_chan_mask; + u32 rx_chan_mask; + + dma_addr_t bd_dma; + struct mal_descriptor *bd_virt; + + struct platform_device *ofdev; + int index; + spinlock_t lock; + + struct net_device dummy_dev; + + unsigned int features; +}; + +static inline u32 get_mal_dcrn(struct mal_instance *mal, int reg) +{ + return dcr_read(mal->dcr_host, reg); +} + +static inline void set_mal_dcrn(struct mal_instance *mal, int reg, u32 val) +{ + dcr_write(mal->dcr_host, reg, val); +} + +/* Features of various MAL implementations */ + +/* Set if you have interrupt coalescing and you have to clear the SDR + * register for TXEOB and RXEOB interrupts to work + */ +#define MAL_FTR_CLEAR_ICINTSTAT 0x00000001 + +/* Set if your MAL has SERR, TXDE, and RXDE OR'd into a single UIC + * interrupt + */ +#define MAL_FTR_COMMON_ERR_INT 0x00000002 + +enum { + MAL_FTRS_ALWAYS = 0, + + MAL_FTRS_POSSIBLE = +#ifdef CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT + MAL_FTR_CLEAR_ICINTSTAT | +#endif +#ifdef CONFIG_IBM_EMAC_MAL_COMMON_ERR + MAL_FTR_COMMON_ERR_INT | +#endif + 0, +}; + +static inline int mal_has_feature(struct mal_instance *dev, + unsigned long feature) +{ + return (MAL_FTRS_ALWAYS & feature) || + (MAL_FTRS_POSSIBLE & dev->features & feature); +} + +/* Register MAL devices */ +int mal_init(void); +void mal_exit(void); + +int mal_register_commac(struct mal_instance *mal, + struct mal_commac *commac); +void mal_unregister_commac(struct mal_instance *mal, + struct mal_commac *commac); +int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size); + +/* Returns BD ring offset for a particular channel + (in 'struct mal_descriptor' elements) +*/ +int mal_tx_bd_offset(struct mal_instance *mal, int channel); +int mal_rx_bd_offset(struct mal_instance *mal, int channel); + +void mal_enable_tx_channel(struct mal_instance *mal, int channel); +void mal_disable_tx_channel(struct mal_instance *mal, int channel); +void mal_enable_rx_channel(struct mal_instance *mal, int channel); +void mal_disable_rx_channel(struct mal_instance *mal, int channel); + +void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac); +void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac); + +/* Add/remove EMAC to/from MAL polling list */ +void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac); +void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac); + +/* Ethtool MAL registers */ +struct mal_regs { + u32 tx_count; + u32 rx_count; + + u32 cfg; + u32 esr; + u32 ier; + u32 tx_casr; + u32 tx_carr; + u32 tx_eobisr; + u32 tx_deir; + u32 rx_casr; + u32 rx_carr; + u32 rx_eobisr; + u32 rx_deir; + u32 tx_ctpr[32]; + u32 rx_ctpr[32]; + u32 rcbs[32]; +}; + +int mal_get_regs_len(struct mal_instance *mal); +void *mal_dump_regs(struct mal_instance *mal, void *buf); + +#endif /* __IBM_NEWEMAC_MAL_H */ diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c new file mode 100644 index 000000000..d3b9d1033 --- /dev/null +++ b/drivers/net/ethernet/ibm/emac/phy.c @@ -0,0 +1,541 @@ +/* + * drivers/net/ethernet/ibm/emac/phy.c + * + * Driver for PowerPC 4xx on-chip ethernet controller, PHY support. + * Borrowed from sungem_phy.c, though I only kept the generic MII + * driver for now. + * + * This file should be shared with other drivers or eventually + * merged as the "low level" part of miilib + * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * + * + * Based on the arch/ppc version of the driver: + * + * (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org) + * (c) 2004-2005, Eugene Surovegin + * + */ +#include +#include +#include +#include +#include +#include +#include + +#include "emac.h" +#include "phy.h" + +#define phy_read _phy_read +#define phy_write _phy_write + +static inline int _phy_read(struct mii_phy *phy, int reg) +{ + return phy->mdio_read(phy->dev, phy->address, reg); +} + +static inline void _phy_write(struct mii_phy *phy, int reg, int val) +{ + phy->mdio_write(phy->dev, phy->address, reg, val); +} + +static inline int gpcs_phy_read(struct mii_phy *phy, int reg) +{ + return phy->mdio_read(phy->dev, phy->gpcs_address, reg); +} + +static inline void gpcs_phy_write(struct mii_phy *phy, int reg, int val) +{ + phy->mdio_write(phy->dev, phy->gpcs_address, reg, val); +} + +int emac_mii_reset_phy(struct mii_phy *phy) +{ + int val; + int limit = 10000; + + val = phy_read(phy, MII_BMCR); + val &= ~(BMCR_ISOLATE | BMCR_ANENABLE); + val |= BMCR_RESET; + phy_write(phy, MII_BMCR, val); + + udelay(300); + + while (--limit) { + val = phy_read(phy, MII_BMCR); + if (val >= 0 && (val & BMCR_RESET) == 0) + break; + udelay(10); + } + if ((val & BMCR_ISOLATE) && limit > 0) + phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE); + + return limit <= 0; +} + +int emac_mii_reset_gpcs(struct mii_phy *phy) +{ + int val; + int limit = 10000; + + val = gpcs_phy_read(phy, MII_BMCR); + val &= ~(BMCR_ISOLATE | BMCR_ANENABLE); + val |= BMCR_RESET; + gpcs_phy_write(phy, MII_BMCR, val); + + udelay(300); + + while (--limit) { + val = gpcs_phy_read(phy, MII_BMCR); + if (val >= 0 && (val & BMCR_RESET) == 0) + break; + udelay(10); + } + if ((val & BMCR_ISOLATE) && limit > 0) + gpcs_phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE); + + if (limit > 0 && phy->mode == PHY_MODE_SGMII) { + /* Configure GPCS interface to recommended setting for SGMII */ + gpcs_phy_write(phy, 0x04, 0x8120); /* AsymPause, FDX */ + gpcs_phy_write(phy, 0x07, 0x2801); /* msg_pg, toggle */ + gpcs_phy_write(phy, 0x00, 0x0140); /* 1Gbps, FDX */ + } + + return limit <= 0; +} + +static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) +{ + int ctl, adv; + + phy->autoneg = AUTONEG_ENABLE; + phy->speed = SPEED_10; + phy->duplex = DUPLEX_HALF; + phy->pause = phy->asym_pause = 0; + phy->advertising = advertise; + + ctl = phy_read(phy, MII_BMCR); + if (ctl < 0) + return ctl; + ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); + + /* First clear the PHY */ + phy_write(phy, MII_BMCR, ctl); + + /* Setup standard advertise */ + adv = phy_read(phy, MII_ADVERTISE); + if (adv < 0) + return adv; + adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | + ADVERTISE_PAUSE_ASYM); + if (advertise & ADVERTISED_10baseT_Half) + adv |= ADVERTISE_10HALF; + if (advertise & ADVERTISED_10baseT_Full) + adv |= ADVERTISE_10FULL; + if (advertise & ADVERTISED_100baseT_Half) + adv |= ADVERTISE_100HALF; + if (advertise & ADVERTISED_100baseT_Full) + adv |= ADVERTISE_100FULL; + if (advertise & ADVERTISED_Pause) + adv |= ADVERTISE_PAUSE_CAP; + if (advertise & ADVERTISED_Asym_Pause) + adv |= ADVERTISE_PAUSE_ASYM; + phy_write(phy, MII_ADVERTISE, adv); + + if (phy->features & + (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) { + adv = phy_read(phy, MII_CTRL1000); + if (adv < 0) + return adv; + adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); + if (advertise & ADVERTISED_1000baseT_Full) + adv |= ADVERTISE_1000FULL; + if (advertise & ADVERTISED_1000baseT_Half) + adv |= ADVERTISE_1000HALF; + phy_write(phy, MII_CTRL1000, adv); + } + + /* Start/Restart aneg */ + ctl = phy_read(phy, MII_BMCR); + ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); + phy_write(phy, MII_BMCR, ctl); + + return 0; +} + +static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd) +{ + int ctl; + + phy->autoneg = AUTONEG_DISABLE; + phy->speed = speed; + phy->duplex = fd; + phy->pause = phy->asym_pause = 0; + + ctl = phy_read(phy, MII_BMCR); + if (ctl < 0) + return ctl; + ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); + + /* First clear the PHY */ + phy_write(phy, MII_BMCR, ctl | BMCR_RESET); + + /* Select speed & duplex */ + switch (speed) { + case SPEED_10: + break; + case SPEED_100: + ctl |= BMCR_SPEED100; + break; + case SPEED_1000: + ctl |= BMCR_SPEED1000; + break; + default: + return -EINVAL; + } + if (fd == DUPLEX_FULL) + ctl |= BMCR_FULLDPLX; + phy_write(phy, MII_BMCR, ctl); + + return 0; +} + +static int genmii_poll_link(struct mii_phy *phy) +{ + int status; + + /* Clear latched value with dummy read */ + phy_read(phy, MII_BMSR); + status = phy_read(phy, MII_BMSR); + if (status < 0 || (status & BMSR_LSTATUS) == 0) + return 0; + if (phy->autoneg == AUTONEG_ENABLE && !(status & BMSR_ANEGCOMPLETE)) + return 0; + return 1; +} + +static int genmii_read_link(struct mii_phy *phy) +{ + if (phy->autoneg == AUTONEG_ENABLE) { + int glpa = 0; + int lpa = phy_read(phy, MII_LPA) & phy_read(phy, MII_ADVERTISE); + if (lpa < 0) + return lpa; + + if (phy->features & + (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) { + int adv = phy_read(phy, MII_CTRL1000); + glpa = phy_read(phy, MII_STAT1000); + + if (glpa < 0 || adv < 0) + return adv; + + glpa &= adv << 2; + } + + phy->speed = SPEED_10; + phy->duplex = DUPLEX_HALF; + phy->pause = phy->asym_pause = 0; + + if (glpa & (LPA_1000FULL | LPA_1000HALF)) { + phy->speed = SPEED_1000; + if (glpa & LPA_1000FULL) + phy->duplex = DUPLEX_FULL; + } else if (lpa & (LPA_100FULL | LPA_100HALF)) { + phy->speed = SPEED_100; + if (lpa & LPA_100FULL) + phy->duplex = DUPLEX_FULL; + } else if (lpa & LPA_10FULL) + phy->duplex = DUPLEX_FULL; + + if (phy->duplex == DUPLEX_FULL) { + phy->pause = lpa & LPA_PAUSE_CAP ? 1 : 0; + phy->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0; + } + } else { + int bmcr = phy_read(phy, MII_BMCR); + if (bmcr < 0) + return bmcr; + + if (bmcr & BMCR_FULLDPLX) + phy->duplex = DUPLEX_FULL; + else + phy->duplex = DUPLEX_HALF; + if (bmcr & BMCR_SPEED1000) + phy->speed = SPEED_1000; + else if (bmcr & BMCR_SPEED100) + phy->speed = SPEED_100; + else + phy->speed = SPEED_10; + + phy->pause = phy->asym_pause = 0; + } + return 0; +} + +/* Generic implementation for most 10/100/1000 PHYs */ +static struct mii_phy_ops generic_phy_ops = { + .setup_aneg = genmii_setup_aneg, + .setup_forced = genmii_setup_forced, + .poll_link = genmii_poll_link, + .read_link = genmii_read_link +}; + +static struct mii_phy_def genmii_phy_def = { + .phy_id = 0x00000000, + .phy_id_mask = 0x00000000, + .name = "Generic MII", + .ops = &generic_phy_ops +}; + +/* CIS8201 */ +#define MII_CIS8201_10BTCSR 0x16 +#define TENBTCSR_ECHO_DISABLE 0x2000 +#define MII_CIS8201_EPCR 0x17 +#define EPCR_MODE_MASK 0x3000 +#define EPCR_GMII_MODE 0x0000 +#define EPCR_RGMII_MODE 0x1000 +#define EPCR_TBI_MODE 0x2000 +#define EPCR_RTBI_MODE 0x3000 +#define MII_CIS8201_ACSR 0x1c +#define ACSR_PIN_PRIO_SELECT 0x0004 + +static int cis8201_init(struct mii_phy *phy) +{ + int epcr; + + epcr = phy_read(phy, MII_CIS8201_EPCR); + if (epcr < 0) + return epcr; + + epcr &= ~EPCR_MODE_MASK; + + switch (phy->mode) { + case PHY_MODE_TBI: + epcr |= EPCR_TBI_MODE; + break; + case PHY_MODE_RTBI: + epcr |= EPCR_RTBI_MODE; + break; + case PHY_MODE_GMII: + epcr |= EPCR_GMII_MODE; + break; + case PHY_MODE_RGMII: + default: + epcr |= EPCR_RGMII_MODE; + } + + phy_write(phy, MII_CIS8201_EPCR, epcr); + + /* MII regs override strap pins */ + phy_write(phy, MII_CIS8201_ACSR, + phy_read(phy, MII_CIS8201_ACSR) | ACSR_PIN_PRIO_SELECT); + + /* Disable TX_EN -> CRS echo mode, otherwise 10/HDX doesn't work */ + phy_write(phy, MII_CIS8201_10BTCSR, + phy_read(phy, MII_CIS8201_10BTCSR) | TENBTCSR_ECHO_DISABLE); + + return 0; +} + +static struct mii_phy_ops cis8201_phy_ops = { + .init = cis8201_init, + .setup_aneg = genmii_setup_aneg, + .setup_forced = genmii_setup_forced, + .poll_link = genmii_poll_link, + .read_link = genmii_read_link +}; + +static struct mii_phy_def cis8201_phy_def = { + .phy_id = 0x000fc410, + .phy_id_mask = 0x000ffff0, + .name = "CIS8201 Gigabit Ethernet", + .ops = &cis8201_phy_ops +}; + +static struct mii_phy_def bcm5248_phy_def = { + + .phy_id = 0x0143bc00, + .phy_id_mask = 0x0ffffff0, + .name = "BCM5248 10/100 SMII Ethernet", + .ops = &generic_phy_ops +}; + +static int m88e1111_init(struct mii_phy *phy) +{ + pr_debug("%s: Marvell 88E1111 Ethernet\n", __func__); + phy_write(phy, 0x14, 0x0ce3); + phy_write(phy, 0x18, 0x4101); + phy_write(phy, 0x09, 0x0e00); + phy_write(phy, 0x04, 0x01e1); + phy_write(phy, 0x00, 0x9140); + phy_write(phy, 0x00, 0x1140); + + return 0; +} + +static int m88e1112_init(struct mii_phy *phy) +{ + /* + * Marvell 88E1112 PHY needs to have the SGMII MAC + * interace (page 2) properly configured to + * communicate with the 460EX/GT GPCS interface. + */ + + u16 reg_short; + + pr_debug("%s: Marvell 88E1112 Ethernet\n", __func__); + + /* Set access to Page 2 */ + phy_write(phy, 0x16, 0x0002); + + phy_write(phy, 0x00, 0x0040); /* 1Gbps */ + reg_short = (u16)(phy_read(phy, 0x1a)); + reg_short |= 0x8000; /* bypass Auto-Negotiation */ + phy_write(phy, 0x1a, reg_short); + emac_mii_reset_phy(phy); /* reset MAC interface */ + + /* Reset access to Page 0 */ + phy_write(phy, 0x16, 0x0000); + + return 0; +} + +static int et1011c_init(struct mii_phy *phy) +{ + u16 reg_short; + + reg_short = (u16)(phy_read(phy, 0x16)); + reg_short &= ~(0x7); + reg_short |= 0x6; /* RGMII Trace Delay*/ + phy_write(phy, 0x16, reg_short); + + reg_short = (u16)(phy_read(phy, 0x17)); + reg_short &= ~(0x40); + phy_write(phy, 0x17, reg_short); + + phy_write(phy, 0x1c, 0x74f0); + return 0; +} + +static struct mii_phy_ops et1011c_phy_ops = { + .init = et1011c_init, + .setup_aneg = genmii_setup_aneg, + .setup_forced = genmii_setup_forced, + .poll_link = genmii_poll_link, + .read_link = genmii_read_link +}; + +static struct mii_phy_def et1011c_phy_def = { + .phy_id = 0x0282f000, + .phy_id_mask = 0x0fffff00, + .name = "ET1011C Gigabit Ethernet", + .ops = &et1011c_phy_ops +}; + + + + + +static struct mii_phy_ops m88e1111_phy_ops = { + .init = m88e1111_init, + .setup_aneg = genmii_setup_aneg, + .setup_forced = genmii_setup_forced, + .poll_link = genmii_poll_link, + .read_link = genmii_read_link +}; + +static struct mii_phy_def m88e1111_phy_def = { + + .phy_id = 0x01410CC0, + .phy_id_mask = 0x0ffffff0, + .name = "Marvell 88E1111 Ethernet", + .ops = &m88e1111_phy_ops, +}; + +static struct mii_phy_ops m88e1112_phy_ops = { + .init = m88e1112_init, + .setup_aneg = genmii_setup_aneg, + .setup_forced = genmii_setup_forced, + .poll_link = genmii_poll_link, + .read_link = genmii_read_link +}; + +static struct mii_phy_def m88e1112_phy_def = { + .phy_id = 0x01410C90, + .phy_id_mask = 0x0ffffff0, + .name = "Marvell 88E1112 Ethernet", + .ops = &m88e1112_phy_ops, +}; + +static struct mii_phy_def *mii_phy_table[] = { + &et1011c_phy_def, + &cis8201_phy_def, + &bcm5248_phy_def, + &m88e1111_phy_def, + &m88e1112_phy_def, + &genmii_phy_def, + NULL +}; + +int emac_mii_phy_probe(struct mii_phy *phy, int address) +{ + struct mii_phy_def *def; + int i; + u32 id; + + phy->autoneg = AUTONEG_DISABLE; + phy->advertising = 0; + phy->address = address; + phy->speed = SPEED_10; + phy->duplex = DUPLEX_HALF; + phy->pause = phy->asym_pause = 0; + + /* Take PHY out of isolate mode and reset it. */ + if (emac_mii_reset_phy(phy)) + return -ENODEV; + + /* Read ID and find matching entry */ + id = (phy_read(phy, MII_PHYSID1) << 16) | phy_read(phy, MII_PHYSID2); + for (i = 0; (def = mii_phy_table[i]) != NULL; i++) + if ((id & def->phy_id_mask) == def->phy_id) + break; + /* Should never be NULL (we have a generic entry), but... */ + if (!def) + return -ENODEV; + + phy->def = def; + + /* Determine PHY features if needed */ + phy->features = def->features; + if (!phy->features) { + u16 bmsr = phy_read(phy, MII_BMSR); + if (bmsr & BMSR_ANEGCAPABLE) + phy->features |= SUPPORTED_Autoneg; + if (bmsr & BMSR_10HALF) + phy->features |= SUPPORTED_10baseT_Half; + if (bmsr & BMSR_10FULL) + phy->features |= SUPPORTED_10baseT_Full; + if (bmsr & BMSR_100HALF) + phy->features |= SUPPORTED_100baseT_Half; + if (bmsr & BMSR_100FULL) + phy->features |= SUPPORTED_100baseT_Full; + if (bmsr & BMSR_ESTATEN) { + u16 esr = phy_read(phy, MII_ESTATUS); + if (esr & ESTATUS_1000_TFULL) + phy->features |= SUPPORTED_1000baseT_Full; + if (esr & ESTATUS_1000_THALF) + phy->features |= SUPPORTED_1000baseT_Half; + } + phy->features |= SUPPORTED_MII; + } + + /* Setup default advertising */ + phy->advertising = phy->features; + + return 0; +} + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/ibm/emac/phy.h b/drivers/net/ethernet/ibm/emac/phy.h new file mode 100644 index 000000000..d7e41ec37 --- /dev/null +++ b/drivers/net/ethernet/ibm/emac/phy.h @@ -0,0 +1,87 @@ +/* + * drivers/net/ethernet/ibm/emac/phy.h + * + * Driver for PowerPC 4xx on-chip ethernet controller, PHY support + * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * + * + * Based on the arch/ppc version of the driver: + * + * Benjamin Herrenschmidt + * February 2003 + * + * Minor additions by Eugene Surovegin , 2004 + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This file basically duplicates sungem_phy.{c,h} with different PHYs + * supported. I'm looking into merging that in a single mii layer more + * flexible than mii.c + */ + +#ifndef __IBM_NEWEMAC_PHY_H +#define __IBM_NEWEMAC_PHY_H + +struct mii_phy; + +/* Operations supported by any kind of PHY */ +struct mii_phy_ops { + int (*init) (struct mii_phy * phy); + int (*suspend) (struct mii_phy * phy, int wol_options); + int (*setup_aneg) (struct mii_phy * phy, u32 advertise); + int (*setup_forced) (struct mii_phy * phy, int speed, int fd); + int (*poll_link) (struct mii_phy * phy); + int (*read_link) (struct mii_phy * phy); +}; + +/* Structure used to statically define an mii/gii based PHY */ +struct mii_phy_def { + u32 phy_id; /* Concatenated ID1 << 16 | ID2 */ + u32 phy_id_mask; /* Significant bits */ + u32 features; /* Ethtool SUPPORTED_* defines or + 0 for autodetect */ + int magic_aneg; /* Autoneg does all speed test for us */ + const char *name; + const struct mii_phy_ops *ops; +}; + +/* An instance of a PHY, partially borrowed from mii_if_info */ +struct mii_phy { + struct mii_phy_def *def; + u32 advertising; /* Ethtool ADVERTISED_* defines */ + u32 features; /* Copied from mii_phy_def.features + or determined automaticaly */ + int address; /* PHY address */ + int mode; /* PHY mode */ + int gpcs_address; /* GPCS PHY address */ + + /* 1: autoneg enabled, 0: disabled */ + int autoneg; + + /* forced speed & duplex (no autoneg) + * partner speed & duplex & pause (autoneg) + */ + int speed; + int duplex; + int pause; + int asym_pause; + + /* Provided by host chip */ + struct net_device *dev; + int (*mdio_read) (struct net_device * dev, int addr, int reg); + void (*mdio_write) (struct net_device * dev, int addr, int reg, + int val); +}; + +/* Pass in a struct mii_phy with dev, mdio_read and mdio_write + * filled, the remaining fields will be filled on return + */ +int emac_mii_phy_probe(struct mii_phy *phy, int address); +int emac_mii_reset_phy(struct mii_phy *phy); +int emac_mii_reset_gpcs(struct mii_phy *phy); + +#endif /* __IBM_NEWEMAC_PHY_H */ diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c new file mode 100644 index 000000000..206ccbbae --- /dev/null +++ b/drivers/net/ethernet/ibm/emac/rgmii.c @@ -0,0 +1,336 @@ +/* + * drivers/net/ethernet/ibm/emac/rgmii.c + * + * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support. + * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * + * + * Based on the arch/ppc version of the driver: + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin or + * + * Based on original work by + * Matt Porter + * Copyright 2004 MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#include +#include +#include +#include +#include + +#include "emac.h" +#include "debug.h" + +// XXX FIXME: Axon seems to support a subset of the RGMII, we +// thus need to take that into account and possibly change some +// of the bit settings below that don't seem to quite match the +// AXON spec + +/* RGMIIx_FER */ +#define RGMII_FER_MASK(idx) (0x7 << ((idx) * 4)) +#define RGMII_FER_RTBI(idx) (0x4 << ((idx) * 4)) +#define RGMII_FER_RGMII(idx) (0x5 << ((idx) * 4)) +#define RGMII_FER_TBI(idx) (0x6 << ((idx) * 4)) +#define RGMII_FER_GMII(idx) (0x7 << ((idx) * 4)) +#define RGMII_FER_MII(idx) RGMII_FER_GMII(idx) + +/* RGMIIx_SSR */ +#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8)) +#define RGMII_SSR_10(idx) (0x1 << ((idx) * 8)) +#define RGMII_SSR_100(idx) (0x2 << ((idx) * 8)) +#define RGMII_SSR_1000(idx) (0x4 << ((idx) * 8)) + +/* RGMII bridge supports only GMII/TBI and RGMII/RTBI PHYs */ +static inline int rgmii_valid_mode(int phy_mode) +{ + return phy_mode == PHY_MODE_GMII || + phy_mode == PHY_MODE_MII || + phy_mode == PHY_MODE_RGMII || + phy_mode == PHY_MODE_TBI || + phy_mode == PHY_MODE_RTBI; +} + +static inline const char *rgmii_mode_name(int mode) +{ + switch (mode) { + case PHY_MODE_RGMII: + return "RGMII"; + case PHY_MODE_TBI: + return "TBI"; + case PHY_MODE_GMII: + return "GMII"; + case PHY_MODE_MII: + return "MII"; + case PHY_MODE_RTBI: + return "RTBI"; + default: + BUG(); + } +} + +static inline u32 rgmii_mode_mask(int mode, int input) +{ + switch (mode) { + case PHY_MODE_RGMII: + return RGMII_FER_RGMII(input); + case PHY_MODE_TBI: + return RGMII_FER_TBI(input); + case PHY_MODE_GMII: + return RGMII_FER_GMII(input); + case PHY_MODE_MII: + return RGMII_FER_MII(input); + case PHY_MODE_RTBI: + return RGMII_FER_RTBI(input); + default: + BUG(); + } +} + +int rgmii_attach(struct platform_device *ofdev, int input, int mode) +{ + struct rgmii_instance *dev = platform_get_drvdata(ofdev); + struct rgmii_regs __iomem *p = dev->base; + + RGMII_DBG(dev, "attach(%d)" NL, input); + + /* Check if we need to attach to a RGMII */ + if (input < 0 || !rgmii_valid_mode(mode)) { + printk(KERN_ERR "%s: unsupported settings !\n", + ofdev->dev.of_node->full_name); + return -ENODEV; + } + + mutex_lock(&dev->lock); + + /* Enable this input */ + out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input)); + + printk(KERN_NOTICE "%s: input %d in %s mode\n", + ofdev->dev.of_node->full_name, input, rgmii_mode_name(mode)); + + ++dev->users; + + mutex_unlock(&dev->lock); + + return 0; +} + +void rgmii_set_speed(struct platform_device *ofdev, int input, int speed) +{ + struct rgmii_instance *dev = platform_get_drvdata(ofdev); + struct rgmii_regs __iomem *p = dev->base; + u32 ssr; + + mutex_lock(&dev->lock); + + ssr = in_be32(&p->ssr) & ~RGMII_SSR_MASK(input); + + RGMII_DBG(dev, "speed(%d, %d)" NL, input, speed); + + if (speed == SPEED_1000) + ssr |= RGMII_SSR_1000(input); + else if (speed == SPEED_100) + ssr |= RGMII_SSR_100(input); + else if (speed == SPEED_10) + ssr |= RGMII_SSR_10(input); + + out_be32(&p->ssr, ssr); + + mutex_unlock(&dev->lock); +} + +void rgmii_get_mdio(struct platform_device *ofdev, int input) +{ + struct rgmii_instance *dev = platform_get_drvdata(ofdev); + struct rgmii_regs __iomem *p = dev->base; + u32 fer; + + RGMII_DBG2(dev, "get_mdio(%d)" NL, input); + + if (!(dev->flags & EMAC_RGMII_FLAG_HAS_MDIO)) + return; + + mutex_lock(&dev->lock); + + fer = in_be32(&p->fer); + fer |= 0x00080000u >> input; + out_be32(&p->fer, fer); + (void)in_be32(&p->fer); + + DBG2(dev, " fer = 0x%08x\n", fer); +} + +void rgmii_put_mdio(struct platform_device *ofdev, int input) +{ + struct rgmii_instance *dev = platform_get_drvdata(ofdev); + struct rgmii_regs __iomem *p = dev->base; + u32 fer; + + RGMII_DBG2(dev, "put_mdio(%d)" NL, input); + + if (!(dev->flags & EMAC_RGMII_FLAG_HAS_MDIO)) + return; + + fer = in_be32(&p->fer); + fer &= ~(0x00080000u >> input); + out_be32(&p->fer, fer); + (void)in_be32(&p->fer); + + DBG2(dev, " fer = 0x%08x\n", fer); + + mutex_unlock(&dev->lock); +} + +void rgmii_detach(struct platform_device *ofdev, int input) +{ + struct rgmii_instance *dev = platform_get_drvdata(ofdev); + struct rgmii_regs __iomem *p; + + BUG_ON(!dev || dev->users == 0); + p = dev->base; + + mutex_lock(&dev->lock); + + RGMII_DBG(dev, "detach(%d)" NL, input); + + /* Disable this input */ + out_be32(&p->fer, in_be32(&p->fer) & ~RGMII_FER_MASK(input)); + + --dev->users; + + mutex_unlock(&dev->lock); +} + +int rgmii_get_regs_len(struct platform_device *ofdev) +{ + return sizeof(struct emac_ethtool_regs_subhdr) + + sizeof(struct rgmii_regs); +} + +void *rgmii_dump_regs(struct platform_device *ofdev, void *buf) +{ + struct rgmii_instance *dev = platform_get_drvdata(ofdev); + struct emac_ethtool_regs_subhdr *hdr = buf; + struct rgmii_regs *regs = (struct rgmii_regs *)(hdr + 1); + + hdr->version = 0; + hdr->index = 0; /* for now, are there chips with more than one + * rgmii ? if yes, then we'll add a cell_index + * like we do for emac + */ + memcpy_fromio(regs, dev->base, sizeof(struct rgmii_regs)); + return regs + 1; +} + + +static int rgmii_probe(struct platform_device *ofdev) +{ + struct device_node *np = ofdev->dev.of_node; + struct rgmii_instance *dev; + struct resource regs; + int rc; + + rc = -ENOMEM; + dev = kzalloc(sizeof(struct rgmii_instance), GFP_KERNEL); + if (dev == NULL) + goto err_gone; + + mutex_init(&dev->lock); + dev->ofdev = ofdev; + + rc = -ENXIO; + if (of_address_to_resource(np, 0, ®s)) { + printk(KERN_ERR "%s: Can't get registers address\n", + np->full_name); + goto err_free; + } + + rc = -ENOMEM; + dev->base = (struct rgmii_regs __iomem *)ioremap(regs.start, + sizeof(struct rgmii_regs)); + if (dev->base == NULL) { + printk(KERN_ERR "%s: Can't map device registers!\n", + np->full_name); + goto err_free; + } + + /* Check for RGMII flags */ + if (of_get_property(ofdev->dev.of_node, "has-mdio", NULL)) + dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO; + + /* CAB lacks the right properties, fix this up */ + if (of_device_is_compatible(ofdev->dev.of_node, "ibm,rgmii-axon")) + dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO; + + DBG2(dev, " Boot FER = 0x%08x, SSR = 0x%08x\n", + in_be32(&dev->base->fer), in_be32(&dev->base->ssr)); + + /* Disable all inputs by default */ + out_be32(&dev->base->fer, 0); + + printk(KERN_INFO + "RGMII %s initialized with%s MDIO support\n", + ofdev->dev.of_node->full_name, + (dev->flags & EMAC_RGMII_FLAG_HAS_MDIO) ? "" : "out"); + + wmb(); + platform_set_drvdata(ofdev, dev); + + return 0; + + err_free: + kfree(dev); + err_gone: + return rc; +} + +static int rgmii_remove(struct platform_device *ofdev) +{ + struct rgmii_instance *dev = platform_get_drvdata(ofdev); + + WARN_ON(dev->users != 0); + + iounmap(dev->base); + kfree(dev); + + return 0; +} + +static const struct of_device_id rgmii_match[] = +{ + { + .compatible = "ibm,rgmii", + }, + { + .type = "emac-rgmii", + }, + {}, +}; + +static struct platform_driver rgmii_driver = { + .driver = { + .name = "emac-rgmii", + .of_match_table = rgmii_match, + }, + .probe = rgmii_probe, + .remove = rgmii_remove, +}; + +int __init rgmii_init(void) +{ + return platform_driver_register(&rgmii_driver); +} + +void rgmii_exit(void) +{ + platform_driver_unregister(&rgmii_driver); +} diff --git a/drivers/net/ethernet/ibm/emac/rgmii.h b/drivers/net/ethernet/ibm/emac/rgmii.h new file mode 100644 index 000000000..d4f1374d1 --- /dev/null +++ b/drivers/net/ethernet/ibm/emac/rgmii.h @@ -0,0 +1,82 @@ +/* + * drivers/net/ethernet/ibm/emac/rgmii.h + * + * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support. + * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * + * + * Based on the arch/ppc version of the driver: + * + * Based on ocp_zmii.h/ibm_emac_zmii.h + * Armin Kuster akuster@mvista.com + * + * Copyright 2004 MontaVista Software, Inc. + * Matt Porter + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin or + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __IBM_NEWEMAC_RGMII_H +#define __IBM_NEWEMAC_RGMII_H + +/* RGMII bridge type */ +#define RGMII_STANDARD 0 +#define RGMII_AXON 1 + +/* RGMII bridge */ +struct rgmii_regs { + u32 fer; /* Function enable register */ + u32 ssr; /* Speed select register */ +}; + +/* RGMII device */ +struct rgmii_instance { + struct rgmii_regs __iomem *base; + + /* RGMII bridge flags */ + int flags; +#define EMAC_RGMII_FLAG_HAS_MDIO 0x00000001 + + /* Only one EMAC whacks us at a time */ + struct mutex lock; + + /* number of EMACs using this RGMII bridge */ + int users; + + /* OF device instance */ + struct platform_device *ofdev; +}; + +#ifdef CONFIG_IBM_EMAC_RGMII + +int rgmii_init(void); +void rgmii_exit(void); +int rgmii_attach(struct platform_device *ofdev, int input, int mode); +void rgmii_detach(struct platform_device *ofdev, int input); +void rgmii_get_mdio(struct platform_device *ofdev, int input); +void rgmii_put_mdio(struct platform_device *ofdev, int input); +void rgmii_set_speed(struct platform_device *ofdev, int input, int speed); +int rgmii_get_regs_len(struct platform_device *ofdev); +void *rgmii_dump_regs(struct platform_device *ofdev, void *buf); + +#else + +# define rgmii_init() 0 +# define rgmii_exit() do { } while(0) +# define rgmii_attach(x,y,z) (-ENXIO) +# define rgmii_detach(x,y) do { } while(0) +# define rgmii_get_mdio(o,i) do { } while (0) +# define rgmii_put_mdio(o,i) do { } while (0) +# define rgmii_set_speed(x,y,z) do { } while(0) +# define rgmii_get_regs_len(x) 0 +# define rgmii_dump_regs(x,buf) (buf) +#endif /* !CONFIG_IBM_EMAC_RGMII */ + +#endif /* __IBM_NEWEMAC_RGMII_H */ diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c new file mode 100644 index 000000000..32cb6c900 --- /dev/null +++ b/drivers/net/ethernet/ibm/emac/tah.c @@ -0,0 +1,180 @@ +/* + * drivers/net/ethernet/ibm/emac/tah.c + * + * Driver for PowerPC 4xx on-chip ethernet controller, TAH support. + * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * + * + * Based on the arch/ppc version of the driver: + * + * Copyright 2004 MontaVista Software, Inc. + * Matt Porter + * + * Copyright (c) 2005 Eugene Surovegin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include +#include + +#include "emac.h" +#include "core.h" + +int tah_attach(struct platform_device *ofdev, int channel) +{ + struct tah_instance *dev = platform_get_drvdata(ofdev); + + mutex_lock(&dev->lock); + /* Reset has been done at probe() time... nothing else to do for now */ + ++dev->users; + mutex_unlock(&dev->lock); + + return 0; +} + +void tah_detach(struct platform_device *ofdev, int channel) +{ + struct tah_instance *dev = platform_get_drvdata(ofdev); + + mutex_lock(&dev->lock); + --dev->users; + mutex_unlock(&dev->lock); +} + +void tah_reset(struct platform_device *ofdev) +{ + struct tah_instance *dev = platform_get_drvdata(ofdev); + struct tah_regs __iomem *p = dev->base; + int n; + + /* Reset TAH */ + out_be32(&p->mr, TAH_MR_SR); + n = 100; + while ((in_be32(&p->mr) & TAH_MR_SR) && n) + --n; + + if (unlikely(!n)) + printk(KERN_ERR "%s: reset timeout\n", + ofdev->dev.of_node->full_name); + + /* 10KB TAH TX FIFO accommodates the max MTU of 9000 */ + out_be32(&p->mr, + TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP | + TAH_MR_DIG); +} + +int tah_get_regs_len(struct platform_device *ofdev) +{ + return sizeof(struct emac_ethtool_regs_subhdr) + + sizeof(struct tah_regs); +} + +void *tah_dump_regs(struct platform_device *ofdev, void *buf) +{ + struct tah_instance *dev = platform_get_drvdata(ofdev); + struct emac_ethtool_regs_subhdr *hdr = buf; + struct tah_regs *regs = (struct tah_regs *)(hdr + 1); + + hdr->version = 0; + hdr->index = 0; /* for now, are there chips with more than one + * zmii ? if yes, then we'll add a cell_index + * like we do for emac + */ + memcpy_fromio(regs, dev->base, sizeof(struct tah_regs)); + return regs + 1; +} + +static int tah_probe(struct platform_device *ofdev) +{ + struct device_node *np = ofdev->dev.of_node; + struct tah_instance *dev; + struct resource regs; + int rc; + + rc = -ENOMEM; + dev = kzalloc(sizeof(struct tah_instance), GFP_KERNEL); + if (dev == NULL) + goto err_gone; + + mutex_init(&dev->lock); + dev->ofdev = ofdev; + + rc = -ENXIO; + if (of_address_to_resource(np, 0, ®s)) { + printk(KERN_ERR "%s: Can't get registers address\n", + np->full_name); + goto err_free; + } + + rc = -ENOMEM; + dev->base = (struct tah_regs __iomem *)ioremap(regs.start, + sizeof(struct tah_regs)); + if (dev->base == NULL) { + printk(KERN_ERR "%s: Can't map device registers!\n", + np->full_name); + goto err_free; + } + + platform_set_drvdata(ofdev, dev); + + /* Initialize TAH and enable IPv4 checksum verification, no TSO yet */ + tah_reset(ofdev); + + printk(KERN_INFO + "TAH %s initialized\n", ofdev->dev.of_node->full_name); + wmb(); + + return 0; + + err_free: + kfree(dev); + err_gone: + return rc; +} + +static int tah_remove(struct platform_device *ofdev) +{ + struct tah_instance *dev = platform_get_drvdata(ofdev); + + WARN_ON(dev->users != 0); + + iounmap(dev->base); + kfree(dev); + + return 0; +} + +static const struct of_device_id tah_match[] = +{ + { + .compatible = "ibm,tah", + }, + /* For backward compat with old DT */ + { + .type = "tah", + }, + {}, +}; + +static struct platform_driver tah_driver = { + .driver = { + .name = "emac-tah", + .of_match_table = tah_match, + }, + .probe = tah_probe, + .remove = tah_remove, +}; + +int __init tah_init(void) +{ + return platform_driver_register(&tah_driver); +} + +void tah_exit(void) +{ + platform_driver_unregister(&tah_driver); +} diff --git a/drivers/net/ethernet/ibm/emac/tah.h b/drivers/net/ethernet/ibm/emac/tah.h new file mode 100644 index 000000000..4d5f336f0 --- /dev/null +++ b/drivers/net/ethernet/ibm/emac/tah.h @@ -0,0 +1,95 @@ +/* + * drivers/net/ethernet/ibm/emac/tah.h + * + * Driver for PowerPC 4xx on-chip ethernet controller, TAH support. + * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * + * + * Based on the arch/ppc version of the driver: + * + * Copyright 2004 MontaVista Software, Inc. + * Matt Porter + * + * Copyright (c) 2005 Eugene Surovegin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __IBM_NEWEMAC_TAH_H +#define __IBM_NEWEMAC_TAH_H + +/* TAH */ +struct tah_regs { + u32 revid; + u32 pad[3]; + u32 mr; + u32 ssr0; + u32 ssr1; + u32 ssr2; + u32 ssr3; + u32 ssr4; + u32 ssr5; + u32 tsr; +}; + + +/* TAH device */ +struct tah_instance { + struct tah_regs __iomem *base; + + /* Only one EMAC whacks us at a time */ + struct mutex lock; + + /* number of EMACs using this TAH */ + int users; + + /* OF device instance */ + struct platform_device *ofdev; +}; + + +/* TAH engine */ +#define TAH_MR_CVR 0x80000000 +#define TAH_MR_SR 0x40000000 +#define TAH_MR_ST_256 0x01000000 +#define TAH_MR_ST_512 0x02000000 +#define TAH_MR_ST_768 0x03000000 +#define TAH_MR_ST_1024 0x04000000 +#define TAH_MR_ST_1280 0x05000000 +#define TAH_MR_ST_1536 0x06000000 +#define TAH_MR_TFS_16KB 0x00000000 +#define TAH_MR_TFS_2KB 0x00200000 +#define TAH_MR_TFS_4KB 0x00400000 +#define TAH_MR_TFS_6KB 0x00600000 +#define TAH_MR_TFS_8KB 0x00800000 +#define TAH_MR_TFS_10KB 0x00a00000 +#define TAH_MR_DTFP 0x00100000 +#define TAH_MR_DIG 0x00080000 + +#ifdef CONFIG_IBM_EMAC_TAH + +int tah_init(void); +void tah_exit(void); +int tah_attach(struct platform_device *ofdev, int channel); +void tah_detach(struct platform_device *ofdev, int channel); +void tah_reset(struct platform_device *ofdev); +int tah_get_regs_len(struct platform_device *ofdev); +void *tah_dump_regs(struct platform_device *ofdev, void *buf); + +#else + +# define tah_init() 0 +# define tah_exit() do { } while(0) +# define tah_attach(x,y) (-ENXIO) +# define tah_detach(x,y) do { } while(0) +# define tah_reset(x) do { } while(0) +# define tah_get_regs_len(x) 0 +# define tah_dump_regs(x,buf) (buf) + +#endif /* !CONFIG_IBM_EMAC_TAH */ + +#endif /* __IBM_NEWEMAC_TAH_H */ diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c new file mode 100644 index 000000000..8727b865e --- /dev/null +++ b/drivers/net/ethernet/ibm/emac/zmii.c @@ -0,0 +1,327 @@ +/* + * drivers/net/ethernet/ibm/emac/zmii.c + * + * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support. + * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * + * + * Based on the arch/ppc version of the driver: + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin or + * + * Based on original work by + * Armin Kuster + * Copyright 2001 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#include +#include +#include +#include +#include + +#include "emac.h" +#include "core.h" + +/* ZMIIx_FER */ +#define ZMII_FER_MDI(idx) (0x80000000 >> ((idx) * 4)) +#define ZMII_FER_MDI_ALL (ZMII_FER_MDI(0) | ZMII_FER_MDI(1) | \ + ZMII_FER_MDI(2) | ZMII_FER_MDI(3)) + +#define ZMII_FER_SMII(idx) (0x40000000 >> ((idx) * 4)) +#define ZMII_FER_RMII(idx) (0x20000000 >> ((idx) * 4)) +#define ZMII_FER_MII(idx) (0x10000000 >> ((idx) * 4)) + +/* ZMIIx_SSR */ +#define ZMII_SSR_SCI(idx) (0x40000000 >> ((idx) * 4)) +#define ZMII_SSR_FSS(idx) (0x20000000 >> ((idx) * 4)) +#define ZMII_SSR_SP(idx) (0x10000000 >> ((idx) * 4)) + +/* ZMII only supports MII, RMII and SMII + * we also support autodetection for backward compatibility + */ +static inline int zmii_valid_mode(int mode) +{ + return mode == PHY_MODE_MII || + mode == PHY_MODE_RMII || + mode == PHY_MODE_SMII || + mode == PHY_MODE_NA; +} + +static inline const char *zmii_mode_name(int mode) +{ + switch (mode) { + case PHY_MODE_MII: + return "MII"; + case PHY_MODE_RMII: + return "RMII"; + case PHY_MODE_SMII: + return "SMII"; + default: + BUG(); + } +} + +static inline u32 zmii_mode_mask(int mode, int input) +{ + switch (mode) { + case PHY_MODE_MII: + return ZMII_FER_MII(input); + case PHY_MODE_RMII: + return ZMII_FER_RMII(input); + case PHY_MODE_SMII: + return ZMII_FER_SMII(input); + default: + return 0; + } +} + +int zmii_attach(struct platform_device *ofdev, int input, int *mode) +{ + struct zmii_instance *dev = platform_get_drvdata(ofdev); + struct zmii_regs __iomem *p = dev->base; + + ZMII_DBG(dev, "init(%d, %d)" NL, input, *mode); + + if (!zmii_valid_mode(*mode)) { + /* Probably an EMAC connected to RGMII, + * but it still may need ZMII for MDIO so + * we don't fail here. + */ + dev->users++; + return 0; + } + + mutex_lock(&dev->lock); + + /* Autodetect ZMII mode if not specified. + * This is only for backward compatibility with the old driver. + * Please, always specify PHY mode in your board port to avoid + * any surprises. + */ + if (dev->mode == PHY_MODE_NA) { + if (*mode == PHY_MODE_NA) { + u32 r = dev->fer_save; + + ZMII_DBG(dev, "autodetecting mode, FER = 0x%08x" NL, r); + + if (r & (ZMII_FER_MII(0) | ZMII_FER_MII(1))) + dev->mode = PHY_MODE_MII; + else if (r & (ZMII_FER_RMII(0) | ZMII_FER_RMII(1))) + dev->mode = PHY_MODE_RMII; + else + dev->mode = PHY_MODE_SMII; + } else + dev->mode = *mode; + + printk(KERN_NOTICE "%s: bridge in %s mode\n", + ofdev->dev.of_node->full_name, + zmii_mode_name(dev->mode)); + } else { + /* All inputs must use the same mode */ + if (*mode != PHY_MODE_NA && *mode != dev->mode) { + printk(KERN_ERR + "%s: invalid mode %d specified for input %d\n", + ofdev->dev.of_node->full_name, *mode, input); + mutex_unlock(&dev->lock); + return -EINVAL; + } + } + + /* Report back correct PHY mode, + * it may be used during PHY initialization. + */ + *mode = dev->mode; + + /* Enable this input */ + out_be32(&p->fer, in_be32(&p->fer) | zmii_mode_mask(dev->mode, input)); + ++dev->users; + + mutex_unlock(&dev->lock); + + return 0; +} + +void zmii_get_mdio(struct platform_device *ofdev, int input) +{ + struct zmii_instance *dev = platform_get_drvdata(ofdev); + u32 fer; + + ZMII_DBG2(dev, "get_mdio(%d)" NL, input); + + mutex_lock(&dev->lock); + + fer = in_be32(&dev->base->fer) & ~ZMII_FER_MDI_ALL; + out_be32(&dev->base->fer, fer | ZMII_FER_MDI(input)); +} + +void zmii_put_mdio(struct platform_device *ofdev, int input) +{ + struct zmii_instance *dev = platform_get_drvdata(ofdev); + + ZMII_DBG2(dev, "put_mdio(%d)" NL, input); + mutex_unlock(&dev->lock); +} + + +void zmii_set_speed(struct platform_device *ofdev, int input, int speed) +{ + struct zmii_instance *dev = platform_get_drvdata(ofdev); + u32 ssr; + + mutex_lock(&dev->lock); + + ssr = in_be32(&dev->base->ssr); + + ZMII_DBG(dev, "speed(%d, %d)" NL, input, speed); + + if (speed == SPEED_100) + ssr |= ZMII_SSR_SP(input); + else + ssr &= ~ZMII_SSR_SP(input); + + out_be32(&dev->base->ssr, ssr); + + mutex_unlock(&dev->lock); +} + +void zmii_detach(struct platform_device *ofdev, int input) +{ + struct zmii_instance *dev = platform_get_drvdata(ofdev); + + BUG_ON(!dev || dev->users == 0); + + mutex_lock(&dev->lock); + + ZMII_DBG(dev, "detach(%d)" NL, input); + + /* Disable this input */ + out_be32(&dev->base->fer, + in_be32(&dev->base->fer) & ~zmii_mode_mask(dev->mode, input)); + + --dev->users; + + mutex_unlock(&dev->lock); +} + +int zmii_get_regs_len(struct platform_device *ofdev) +{ + return sizeof(struct emac_ethtool_regs_subhdr) + + sizeof(struct zmii_regs); +} + +void *zmii_dump_regs(struct platform_device *ofdev, void *buf) +{ + struct zmii_instance *dev = platform_get_drvdata(ofdev); + struct emac_ethtool_regs_subhdr *hdr = buf; + struct zmii_regs *regs = (struct zmii_regs *)(hdr + 1); + + hdr->version = 0; + hdr->index = 0; /* for now, are there chips with more than one + * zmii ? if yes, then we'll add a cell_index + * like we do for emac + */ + memcpy_fromio(regs, dev->base, sizeof(struct zmii_regs)); + return regs + 1; +} + +static int zmii_probe(struct platform_device *ofdev) +{ + struct device_node *np = ofdev->dev.of_node; + struct zmii_instance *dev; + struct resource regs; + int rc; + + rc = -ENOMEM; + dev = kzalloc(sizeof(struct zmii_instance), GFP_KERNEL); + if (dev == NULL) + goto err_gone; + + mutex_init(&dev->lock); + dev->ofdev = ofdev; + dev->mode = PHY_MODE_NA; + + rc = -ENXIO; + if (of_address_to_resource(np, 0, ®s)) { + printk(KERN_ERR "%s: Can't get registers address\n", + np->full_name); + goto err_free; + } + + rc = -ENOMEM; + dev->base = (struct zmii_regs __iomem *)ioremap(regs.start, + sizeof(struct zmii_regs)); + if (dev->base == NULL) { + printk(KERN_ERR "%s: Can't map device registers!\n", + np->full_name); + goto err_free; + } + + /* We may need FER value for autodetection later */ + dev->fer_save = in_be32(&dev->base->fer); + + /* Disable all inputs by default */ + out_be32(&dev->base->fer, 0); + + printk(KERN_INFO + "ZMII %s initialized\n", ofdev->dev.of_node->full_name); + wmb(); + platform_set_drvdata(ofdev, dev); + + return 0; + + err_free: + kfree(dev); + err_gone: + return rc; +} + +static int zmii_remove(struct platform_device *ofdev) +{ + struct zmii_instance *dev = platform_get_drvdata(ofdev); + + WARN_ON(dev->users != 0); + + iounmap(dev->base); + kfree(dev); + + return 0; +} + +static const struct of_device_id zmii_match[] = +{ + { + .compatible = "ibm,zmii", + }, + /* For backward compat with old DT */ + { + .type = "emac-zmii", + }, + {}, +}; + +static struct platform_driver zmii_driver = { + .driver = { + .name = "emac-zmii", + .of_match_table = zmii_match, + }, + .probe = zmii_probe, + .remove = zmii_remove, +}; + +int __init zmii_init(void) +{ + return platform_driver_register(&zmii_driver); +} + +void zmii_exit(void) +{ + platform_driver_unregister(&zmii_driver); +} diff --git a/drivers/net/ethernet/ibm/emac/zmii.h b/drivers/net/ethernet/ibm/emac/zmii.h new file mode 100644 index 000000000..0959c55b1 --- /dev/null +++ b/drivers/net/ethernet/ibm/emac/zmii.h @@ -0,0 +1,78 @@ +/* + * drivers/net/ethernet/ibm/emac/zmii.h + * + * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support. + * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * + * + * Based on the arch/ppc version of the driver: + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin or + * + * Based on original work by + * Armin Kuster + * Copyright 2001 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __IBM_NEWEMAC_ZMII_H +#define __IBM_NEWEMAC_ZMII_H + +/* ZMII bridge registers */ +struct zmii_regs { + u32 fer; /* Function enable reg */ + u32 ssr; /* Speed select reg */ + u32 smiirs; /* SMII status reg */ +}; + +/* ZMII device */ +struct zmii_instance { + struct zmii_regs __iomem *base; + + /* Only one EMAC whacks us at a time */ + struct mutex lock; + + /* subset of PHY_MODE_XXXX */ + int mode; + + /* number of EMACs using this ZMII bridge */ + int users; + + /* FER value left by firmware */ + u32 fer_save; + + /* OF device instance */ + struct platform_device *ofdev; +}; + +#ifdef CONFIG_IBM_EMAC_ZMII + +int zmii_init(void); +void zmii_exit(void); +int zmii_attach(struct platform_device *ofdev, int input, int *mode); +void zmii_detach(struct platform_device *ofdev, int input); +void zmii_get_mdio(struct platform_device *ofdev, int input); +void zmii_put_mdio(struct platform_device *ofdev, int input); +void zmii_set_speed(struct platform_device *ofdev, int input, int speed); +int zmii_get_regs_len(struct platform_device *ocpdev); +void *zmii_dump_regs(struct platform_device *ofdev, void *buf); + +#else +# define zmii_init() 0 +# define zmii_exit() do { } while(0) +# define zmii_attach(x,y,z) (-ENXIO) +# define zmii_detach(x,y) do { } while(0) +# define zmii_get_mdio(x,y) do { } while(0) +# define zmii_put_mdio(x,y) do { } while(0) +# define zmii_set_speed(x,y,z) do { } while(0) +# define zmii_get_regs_len(x) 0 +# define zmii_dump_regs(x,buf) (buf) +#endif /* !CONFIG_IBM_EMAC_ZMII */ + +#endif /* __IBM_NEWEMAC_ZMII_H */ diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c new file mode 100644 index 000000000..18134766a --- /dev/null +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -0,0 +1,1663 @@ +/* + * IBM Power Virtual Ethernet Device Driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * Copyright (C) IBM Corporation, 2003, 2010 + * + * Authors: Dave Larson + * Santiago Leon + * Brian King + * Robert Jennings + * Anton Blanchard + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ibmveth.h" + +static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); +static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); +static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev); + +static struct kobj_type ktype_veth_pool; + + +static const char ibmveth_driver_name[] = "ibmveth"; +static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver"; +#define ibmveth_driver_version "1.04" + +MODULE_AUTHOR("Santiago Leon "); +MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ibmveth_driver_version); + +static unsigned int tx_copybreak __read_mostly = 128; +module_param(tx_copybreak, uint, 0644); +MODULE_PARM_DESC(tx_copybreak, + "Maximum size of packet that is copied to a new buffer on transmit"); + +static unsigned int rx_copybreak __read_mostly = 128; +module_param(rx_copybreak, uint, 0644); +MODULE_PARM_DESC(rx_copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +static unsigned int rx_flush __read_mostly = 0; +module_param(rx_flush, uint, 0644); +MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use"); + +struct ibmveth_stat { + char name[ETH_GSTRING_LEN]; + int offset; +}; + +#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat) +#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off)) + +struct ibmveth_stat ibmveth_stats[] = { + { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) }, + { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) }, + { "replenish_add_buff_failure", + IBMVETH_STAT_OFF(replenish_add_buff_failure) }, + { "replenish_add_buff_success", + IBMVETH_STAT_OFF(replenish_add_buff_success) }, + { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) }, + { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) }, + { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) }, + { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) }, + { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) }, + { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) }, +}; + +/* simple methods of getting data from the current rxq entry */ +static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter) +{ + return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off); +} + +static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter) +{ + return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> + IBMVETH_RXQ_TOGGLE_SHIFT; +} + +static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter) +{ + return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle; +} + +static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter) +{ + return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID; +} + +static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) +{ + return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK; +} + +static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) +{ + return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); +} + +static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter) +{ + return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD; +} + +/* setup the initial settings for a buffer pool */ +static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, + u32 pool_index, u32 pool_size, + u32 buff_size, u32 pool_active) +{ + pool->size = pool_size; + pool->index = pool_index; + pool->buff_size = buff_size; + pool->threshold = pool_size * 7 / 8; + pool->active = pool_active; +} + +/* allocate and setup an buffer pool - called during open */ +static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) +{ + int i; + + pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); + + if (!pool->free_map) + return -1; + + pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); + if (!pool->dma_addr) { + kfree(pool->free_map); + pool->free_map = NULL; + return -1; + } + + pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL); + + if (!pool->skbuff) { + kfree(pool->dma_addr); + pool->dma_addr = NULL; + + kfree(pool->free_map); + pool->free_map = NULL; + return -1; + } + + memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size); + + for (i = 0; i < pool->size; ++i) + pool->free_map[i] = i; + + atomic_set(&pool->available, 0); + pool->producer_index = 0; + pool->consumer_index = 0; + + return 0; +} + +static inline void ibmveth_flush_buffer(void *addr, unsigned long length) +{ + unsigned long offset; + + for (offset = 0; offset < length; offset += SMP_CACHE_BYTES) + asm("dcbfl %0,%1" :: "b" (addr), "r" (offset)); +} + +/* replenish the buffers for a pool. note that we don't need to + * skb_reserve these since they are used for incoming... + */ +static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, + struct ibmveth_buff_pool *pool) +{ + u32 i; + u32 count = pool->size - atomic_read(&pool->available); + u32 buffers_added = 0; + struct sk_buff *skb; + unsigned int free_index, index; + u64 correlator; + unsigned long lpar_rc; + dma_addr_t dma_addr; + + mb(); + + for (i = 0; i < count; ++i) { + union ibmveth_buf_desc desc; + + skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); + + if (!skb) { + netdev_dbg(adapter->netdev, + "replenish: unable to allocate skb\n"); + adapter->replenish_no_mem++; + break; + } + + free_index = pool->consumer_index; + pool->consumer_index++; + if (pool->consumer_index >= pool->size) + pool->consumer_index = 0; + index = pool->free_map[free_index]; + + BUG_ON(index == IBM_VETH_INVALID_MAP); + BUG_ON(pool->skbuff[index] != NULL); + + dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, + pool->buff_size, DMA_FROM_DEVICE); + + if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) + goto failure; + + pool->free_map[free_index] = IBM_VETH_INVALID_MAP; + pool->dma_addr[index] = dma_addr; + pool->skbuff[index] = skb; + + correlator = ((u64)pool->index << 32) | index; + *(u64 *)skb->data = correlator; + + desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; + desc.fields.address = dma_addr; + + if (rx_flush) { + unsigned int len = min(pool->buff_size, + adapter->netdev->mtu + + IBMVETH_BUFF_OH); + ibmveth_flush_buffer(skb->data, len); + } + lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, + desc.desc); + + if (lpar_rc != H_SUCCESS) { + goto failure; + } else { + buffers_added++; + adapter->replenish_add_buff_success++; + } + } + + mb(); + atomic_add(buffers_added, &(pool->available)); + return; + +failure: + pool->free_map[free_index] = index; + pool->skbuff[index] = NULL; + if (pool->consumer_index == 0) + pool->consumer_index = pool->size - 1; + else + pool->consumer_index--; + if (!dma_mapping_error(&adapter->vdev->dev, dma_addr)) + dma_unmap_single(&adapter->vdev->dev, + pool->dma_addr[index], pool->buff_size, + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + adapter->replenish_add_buff_failure++; + + mb(); + atomic_add(buffers_added, &(pool->available)); +} + +/* + * The final 8 bytes of the buffer list is a counter of frames dropped + * because there was not a buffer in the buffer list capable of holding + * the frame. + */ +static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter) +{ + __be64 *p = adapter->buffer_list_addr + 4096 - 8; + + adapter->rx_no_buffer = be64_to_cpup(p); +} + +/* replenish routine */ +static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) +{ + int i; + + adapter->replenish_task_cycles++; + + for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) { + struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i]; + + if (pool->active && + (atomic_read(&pool->available) < pool->threshold)) + ibmveth_replenish_buffer_pool(adapter, pool); + } + + ibmveth_update_rx_no_buffer(adapter); +} + +/* empty and free ana buffer pool - also used to do cleanup in error paths */ +static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, + struct ibmveth_buff_pool *pool) +{ + int i; + + kfree(pool->free_map); + pool->free_map = NULL; + + if (pool->skbuff && pool->dma_addr) { + for (i = 0; i < pool->size; ++i) { + struct sk_buff *skb = pool->skbuff[i]; + if (skb) { + dma_unmap_single(&adapter->vdev->dev, + pool->dma_addr[i], + pool->buff_size, + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + pool->skbuff[i] = NULL; + } + } + } + + if (pool->dma_addr) { + kfree(pool->dma_addr); + pool->dma_addr = NULL; + } + + if (pool->skbuff) { + kfree(pool->skbuff); + pool->skbuff = NULL; + } +} + +/* remove a buffer from a pool */ +static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, + u64 correlator) +{ + unsigned int pool = correlator >> 32; + unsigned int index = correlator & 0xffffffffUL; + unsigned int free_index; + struct sk_buff *skb; + + BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); + BUG_ON(index >= adapter->rx_buff_pool[pool].size); + + skb = adapter->rx_buff_pool[pool].skbuff[index]; + + BUG_ON(skb == NULL); + + adapter->rx_buff_pool[pool].skbuff[index] = NULL; + + dma_unmap_single(&adapter->vdev->dev, + adapter->rx_buff_pool[pool].dma_addr[index], + adapter->rx_buff_pool[pool].buff_size, + DMA_FROM_DEVICE); + + free_index = adapter->rx_buff_pool[pool].producer_index; + adapter->rx_buff_pool[pool].producer_index++; + if (adapter->rx_buff_pool[pool].producer_index >= + adapter->rx_buff_pool[pool].size) + adapter->rx_buff_pool[pool].producer_index = 0; + adapter->rx_buff_pool[pool].free_map[free_index] = index; + + mb(); + + atomic_dec(&(adapter->rx_buff_pool[pool].available)); +} + +/* get the current buffer on the rx queue */ +static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter) +{ + u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator; + unsigned int pool = correlator >> 32; + unsigned int index = correlator & 0xffffffffUL; + + BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); + BUG_ON(index >= adapter->rx_buff_pool[pool].size); + + return adapter->rx_buff_pool[pool].skbuff[index]; +} + +/* recycle the current buffer on the rx queue */ +static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) +{ + u32 q_index = adapter->rx_queue.index; + u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator; + unsigned int pool = correlator >> 32; + unsigned int index = correlator & 0xffffffffUL; + union ibmveth_buf_desc desc; + unsigned long lpar_rc; + int ret = 1; + + BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS); + BUG_ON(index >= adapter->rx_buff_pool[pool].size); + + if (!adapter->rx_buff_pool[pool].active) { + ibmveth_rxq_harvest_buffer(adapter); + ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); + goto out; + } + + desc.fields.flags_len = IBMVETH_BUF_VALID | + adapter->rx_buff_pool[pool].buff_size; + desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index]; + + lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); + + if (lpar_rc != H_SUCCESS) { + netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed " + "during recycle rc=%ld", lpar_rc); + ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); + ret = 0; + } + + if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { + adapter->rx_queue.index = 0; + adapter->rx_queue.toggle = !adapter->rx_queue.toggle; + } + +out: + return ret; +} + +static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) +{ + ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); + + if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) { + adapter->rx_queue.index = 0; + adapter->rx_queue.toggle = !adapter->rx_queue.toggle; + } +} + +static void ibmveth_cleanup(struct ibmveth_adapter *adapter) +{ + int i; + struct device *dev = &adapter->vdev->dev; + + if (adapter->buffer_list_addr != NULL) { + if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { + dma_unmap_single(dev, adapter->buffer_list_dma, 4096, + DMA_BIDIRECTIONAL); + adapter->buffer_list_dma = DMA_ERROR_CODE; + } + free_page((unsigned long)adapter->buffer_list_addr); + adapter->buffer_list_addr = NULL; + } + + if (adapter->filter_list_addr != NULL) { + if (!dma_mapping_error(dev, adapter->filter_list_dma)) { + dma_unmap_single(dev, adapter->filter_list_dma, 4096, + DMA_BIDIRECTIONAL); + adapter->filter_list_dma = DMA_ERROR_CODE; + } + free_page((unsigned long)adapter->filter_list_addr); + adapter->filter_list_addr = NULL; + } + + if (adapter->rx_queue.queue_addr != NULL) { + dma_free_coherent(dev, adapter->rx_queue.queue_len, + adapter->rx_queue.queue_addr, + adapter->rx_queue.queue_dma); + adapter->rx_queue.queue_addr = NULL; + } + + for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) + if (adapter->rx_buff_pool[i].active) + ibmveth_free_buffer_pool(adapter, + &adapter->rx_buff_pool[i]); + + if (adapter->bounce_buffer != NULL) { + if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) { + dma_unmap_single(&adapter->vdev->dev, + adapter->bounce_buffer_dma, + adapter->netdev->mtu + IBMVETH_BUFF_OH, + DMA_BIDIRECTIONAL); + adapter->bounce_buffer_dma = DMA_ERROR_CODE; + } + kfree(adapter->bounce_buffer); + adapter->bounce_buffer = NULL; + } +} + +static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter, + union ibmveth_buf_desc rxq_desc, u64 mac_address) +{ + int rc, try_again = 1; + + /* + * After a kexec the adapter will still be open, so our attempt to + * open it will fail. So if we get a failure we free the adapter and + * try again, but only once. + */ +retry: + rc = h_register_logical_lan(adapter->vdev->unit_address, + adapter->buffer_list_dma, rxq_desc.desc, + adapter->filter_list_dma, mac_address); + + if (rc != H_SUCCESS && try_again) { + do { + rc = h_free_logical_lan(adapter->vdev->unit_address); + } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); + + try_again = 0; + goto retry; + } + + return rc; +} + +static u64 ibmveth_encode_mac_addr(u8 *mac) +{ + int i; + u64 encoded = 0; + + for (i = 0; i < ETH_ALEN; i++) + encoded = (encoded << 8) | mac[i]; + + return encoded; +} + +static int ibmveth_open(struct net_device *netdev) +{ + struct ibmveth_adapter *adapter = netdev_priv(netdev); + u64 mac_address; + int rxq_entries = 1; + unsigned long lpar_rc; + int rc; + union ibmveth_buf_desc rxq_desc; + int i; + struct device *dev; + + netdev_dbg(netdev, "open starting\n"); + + napi_enable(&adapter->napi); + + for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) + rxq_entries += adapter->rx_buff_pool[i].size; + + adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); + adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); + + if (!adapter->buffer_list_addr || !adapter->filter_list_addr) { + netdev_err(netdev, "unable to allocate filter or buffer list " + "pages\n"); + rc = -ENOMEM; + goto err_out; + } + + dev = &adapter->vdev->dev; + + adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * + rxq_entries; + adapter->rx_queue.queue_addr = + dma_alloc_coherent(dev, adapter->rx_queue.queue_len, + &adapter->rx_queue.queue_dma, GFP_KERNEL); + if (!adapter->rx_queue.queue_addr) { + rc = -ENOMEM; + goto err_out; + } + + adapter->buffer_list_dma = dma_map_single(dev, + adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); + adapter->filter_list_dma = dma_map_single(dev, + adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); + + if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || + (dma_mapping_error(dev, adapter->filter_list_dma))) { + netdev_err(netdev, "unable to map filter or buffer list " + "pages\n"); + rc = -ENOMEM; + goto err_out; + } + + adapter->rx_queue.index = 0; + adapter->rx_queue.num_slots = rxq_entries; + adapter->rx_queue.toggle = 1; + + mac_address = ibmveth_encode_mac_addr(netdev->dev_addr); + + rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | + adapter->rx_queue.queue_len; + rxq_desc.fields.address = adapter->rx_queue.queue_dma; + + netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr); + netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr); + netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr); + + h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); + + lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address); + + if (lpar_rc != H_SUCCESS) { + netdev_err(netdev, "h_register_logical_lan failed with %ld\n", + lpar_rc); + netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq " + "desc:0x%llx MAC:0x%llx\n", + adapter->buffer_list_dma, + adapter->filter_list_dma, + rxq_desc.desc, + mac_address); + rc = -ENONET; + goto err_out; + } + + for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { + if (!adapter->rx_buff_pool[i].active) + continue; + if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { + netdev_err(netdev, "unable to alloc pool\n"); + adapter->rx_buff_pool[i].active = 0; + rc = -ENOMEM; + goto err_out; + } + } + + netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq); + rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, + netdev); + if (rc != 0) { + netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", + netdev->irq, rc); + do { + lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); + } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); + + goto err_out; + } + + adapter->bounce_buffer = + kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); + if (!adapter->bounce_buffer) { + rc = -ENOMEM; + goto err_out_free_irq; + } + adapter->bounce_buffer_dma = + dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, + netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { + netdev_err(netdev, "unable to map bounce buffer\n"); + rc = -ENOMEM; + goto err_out_free_irq; + } + + netdev_dbg(netdev, "initial replenish cycle\n"); + ibmveth_interrupt(netdev->irq, netdev); + + netif_start_queue(netdev); + + netdev_dbg(netdev, "open complete\n"); + + return 0; + +err_out_free_irq: + free_irq(netdev->irq, netdev); +err_out: + ibmveth_cleanup(adapter); + napi_disable(&adapter->napi); + return rc; +} + +static int ibmveth_close(struct net_device *netdev) +{ + struct ibmveth_adapter *adapter = netdev_priv(netdev); + long lpar_rc; + + netdev_dbg(netdev, "close starting\n"); + + napi_disable(&adapter->napi); + + if (!adapter->pool_config) + netif_stop_queue(netdev); + + h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); + + do { + lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); + } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); + + if (lpar_rc != H_SUCCESS) { + netdev_err(netdev, "h_free_logical_lan failed with %lx, " + "continuing with close\n", lpar_rc); + } + + free_irq(netdev->irq, netdev); + + ibmveth_update_rx_no_buffer(adapter); + + ibmveth_cleanup(adapter); + + netdev_dbg(netdev, "close complete\n"); + + return 0; +} + +static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | + SUPPORTED_FIBRE); + cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | + ADVERTISED_FIBRE); + ethtool_cmd_speed_set(cmd, SPEED_1000); + cmd->duplex = DUPLEX_FULL; + cmd->port = PORT_FIBRE; + cmd->phy_address = 0; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = AUTONEG_ENABLE; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 1; + return 0; +} + +static void netdev_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver)); + strlcpy(info->version, ibmveth_driver_version, sizeof(info->version)); +} + +static netdev_features_t ibmveth_fix_features(struct net_device *dev, + netdev_features_t features) +{ + /* + * Since the ibmveth firmware interface does not have the + * concept of separate tx/rx checksum offload enable, if rx + * checksum is disabled we also have to disable tx checksum + * offload. Once we disable rx checksum offload, we are no + * longer allowed to send tx buffers that are not properly + * checksummed. + */ + + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_ALL_CSUM; + + return features; +} + +static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) +{ + struct ibmveth_adapter *adapter = netdev_priv(dev); + unsigned long set_attr, clr_attr, ret_attr; + unsigned long set_attr6, clr_attr6; + long ret, ret4, ret6; + int rc1 = 0, rc2 = 0; + int restart = 0; + + if (netif_running(dev)) { + restart = 1; + adapter->pool_config = 1; + ibmveth_close(dev); + adapter->pool_config = 0; + } + + set_attr = 0; + clr_attr = 0; + set_attr6 = 0; + clr_attr6 = 0; + + if (data) { + set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; + set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM; + } else { + clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; + clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM; + } + + ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); + + if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && + !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && + (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { + ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, + set_attr, &ret_attr); + + if (ret4 != H_SUCCESS) { + netdev_err(dev, "unable to change IPv4 checksum " + "offload settings. %d rc=%ld\n", + data, ret4); + + h_illan_attributes(adapter->vdev->unit_address, + set_attr, clr_attr, &ret_attr); + + if (data == 1) + dev->features &= ~NETIF_F_IP_CSUM; + + } else { + adapter->fw_ipv4_csum_support = data; + } + + ret6 = h_illan_attributes(adapter->vdev->unit_address, + clr_attr6, set_attr6, &ret_attr); + + if (ret6 != H_SUCCESS) { + netdev_err(dev, "unable to change IPv6 checksum " + "offload settings. %d rc=%ld\n", + data, ret6); + + h_illan_attributes(adapter->vdev->unit_address, + set_attr6, clr_attr6, &ret_attr); + + if (data == 1) + dev->features &= ~NETIF_F_IPV6_CSUM; + + } else + adapter->fw_ipv6_csum_support = data; + + if (ret4 == H_SUCCESS || ret6 == H_SUCCESS) + adapter->rx_csum = data; + else + rc1 = -EIO; + } else { + rc1 = -EIO; + netdev_err(dev, "unable to change checksum offload settings." + " %d rc=%ld ret_attr=%lx\n", data, ret, + ret_attr); + } + + if (restart) + rc2 = ibmveth_open(dev); + + return rc1 ? rc1 : rc2; +} + +static int ibmveth_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct ibmveth_adapter *adapter = netdev_priv(dev); + int rx_csum = !!(features & NETIF_F_RXCSUM); + int rc; + + if (rx_csum == adapter->rx_csum) + return 0; + + rc = ibmveth_set_csum_offload(dev, rx_csum); + if (rc && !adapter->rx_csum) + dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); + + return rc; +} + +static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN) + memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN); +} + +static int ibmveth_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(ibmveth_stats); + default: + return -EOPNOTSUPP; + } +} + +static void ibmveth_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + int i; + struct ibmveth_adapter *adapter = netdev_priv(dev); + + for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++) + data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset); +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, + .get_settings = netdev_get_settings, + .get_link = ethtool_op_get_link, + .get_strings = ibmveth_get_strings, + .get_sset_count = ibmveth_get_sset_count, + .get_ethtool_stats = ibmveth_get_ethtool_stats, +}; + +static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + return -EOPNOTSUPP; +} + +#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1)) + +static int ibmveth_send(struct ibmveth_adapter *adapter, + union ibmveth_buf_desc *descs) +{ + unsigned long correlator; + unsigned int retry_count; + unsigned long ret; + + /* + * The retry count sets a maximum for the number of broadcast and + * multicast destinations within the system. + */ + retry_count = 1024; + correlator = 0; + do { + ret = h_send_logical_lan(adapter->vdev->unit_address, + descs[0].desc, descs[1].desc, + descs[2].desc, descs[3].desc, + descs[4].desc, descs[5].desc, + correlator, &correlator); + } while ((ret == H_BUSY) && (retry_count--)); + + if (ret != H_SUCCESS && ret != H_DROPPED) { + netdev_err(adapter->netdev, "tx: h_send_logical_lan failed " + "with rc=%ld\n", ret); + return 1; + } + + return 0; +} + +static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct ibmveth_adapter *adapter = netdev_priv(netdev); + unsigned int desc_flags; + union ibmveth_buf_desc descs[6]; + int last, i; + int force_bounce = 0; + dma_addr_t dma_addr; + + /* + * veth handles a maximum of 6 segments including the header, so + * we have to linearize the skb if there are more than this. + */ + if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) { + netdev->stats.tx_dropped++; + goto out; + } + + /* veth can't checksum offload UDP */ + if (skb->ip_summed == CHECKSUM_PARTIAL && + ((skb->protocol == htons(ETH_P_IP) && + ip_hdr(skb)->protocol != IPPROTO_TCP) || + (skb->protocol == htons(ETH_P_IPV6) && + ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) && + skb_checksum_help(skb)) { + + netdev_err(netdev, "tx: failed to checksum packet\n"); + netdev->stats.tx_dropped++; + goto out; + } + + desc_flags = IBMVETH_BUF_VALID; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + unsigned char *buf = skb_transport_header(skb) + + skb->csum_offset; + + desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD); + + /* Need to zero out the checksum */ + buf[0] = 0; + buf[1] = 0; + } + +retry_bounce: + memset(descs, 0, sizeof(descs)); + + /* + * If a linear packet is below the rx threshold then + * copy it into the static bounce buffer. This avoids the + * cost of a TCE insert and remove. + */ + if (force_bounce || (!skb_is_nonlinear(skb) && + (skb->len < tx_copybreak))) { + skb_copy_from_linear_data(skb, adapter->bounce_buffer, + skb->len); + + descs[0].fields.flags_len = desc_flags | skb->len; + descs[0].fields.address = adapter->bounce_buffer_dma; + + if (ibmveth_send(adapter, descs)) { + adapter->tx_send_failed++; + netdev->stats.tx_dropped++; + } else { + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += skb->len; + } + + goto out; + } + + /* Map the header */ + dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) + goto map_failed; + + descs[0].fields.flags_len = desc_flags | skb_headlen(skb); + descs[0].fields.address = dma_addr; + + /* Map the frags */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + + if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) + goto map_failed_frags; + + descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag); + descs[i+1].fields.address = dma_addr; + } + + if (ibmveth_send(adapter, descs)) { + adapter->tx_send_failed++; + netdev->stats.tx_dropped++; + } else { + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += skb->len; + } + + dma_unmap_single(&adapter->vdev->dev, + descs[0].fields.address, + descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK, + DMA_TO_DEVICE); + + for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++) + dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, + descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, + DMA_TO_DEVICE); + +out: + dev_consume_skb_any(skb); + return NETDEV_TX_OK; + +map_failed_frags: + last = i+1; + for (i = 0; i < last; i++) + dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address, + descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK, + DMA_TO_DEVICE); + +map_failed: + if (!firmware_has_feature(FW_FEATURE_CMO)) + netdev_err(netdev, "tx: unable to map xmit buffer\n"); + adapter->tx_map_failed++; + skb_linearize(skb); + force_bounce = 1; + goto retry_bounce; +} + +static int ibmveth_poll(struct napi_struct *napi, int budget) +{ + struct ibmveth_adapter *adapter = + container_of(napi, struct ibmveth_adapter, napi); + struct net_device *netdev = adapter->netdev; + int frames_processed = 0; + unsigned long lpar_rc; + +restart_poll: + while (frames_processed < budget) { + if (!ibmveth_rxq_pending_buffer(adapter)) + break; + + smp_rmb(); + if (!ibmveth_rxq_buffer_valid(adapter)) { + wmb(); /* suggested by larson1 */ + adapter->rx_invalid_buffer++; + netdev_dbg(netdev, "recycling invalid buffer\n"); + ibmveth_rxq_recycle_buffer(adapter); + } else { + struct sk_buff *skb, *new_skb; + int length = ibmveth_rxq_frame_length(adapter); + int offset = ibmveth_rxq_frame_offset(adapter); + int csum_good = ibmveth_rxq_csum_good(adapter); + + skb = ibmveth_rxq_get_buffer(adapter); + + new_skb = NULL; + if (length < rx_copybreak) + new_skb = netdev_alloc_skb(netdev, length); + + if (new_skb) { + skb_copy_to_linear_data(new_skb, + skb->data + offset, + length); + if (rx_flush) + ibmveth_flush_buffer(skb->data, + length + offset); + if (!ibmveth_rxq_recycle_buffer(adapter)) + kfree_skb(skb); + skb = new_skb; + } else { + ibmveth_rxq_harvest_buffer(adapter); + skb_reserve(skb, offset); + } + + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, netdev); + + if (csum_good) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + netif_receive_skb(skb); /* send it up */ + + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += length; + frames_processed++; + } + } + + ibmveth_replenish_task(adapter); + + if (frames_processed < budget) { + napi_complete(napi); + + /* We think we are done - reenable interrupts, + * then check once more to make sure we are done. + */ + lpar_rc = h_vio_signal(adapter->vdev->unit_address, + VIO_IRQ_ENABLE); + + BUG_ON(lpar_rc != H_SUCCESS); + + if (ibmveth_rxq_pending_buffer(adapter) && + napi_reschedule(napi)) { + lpar_rc = h_vio_signal(adapter->vdev->unit_address, + VIO_IRQ_DISABLE); + goto restart_poll; + } + } + + return frames_processed; +} + +static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) +{ + struct net_device *netdev = dev_instance; + struct ibmveth_adapter *adapter = netdev_priv(netdev); + unsigned long lpar_rc; + + if (napi_schedule_prep(&adapter->napi)) { + lpar_rc = h_vio_signal(adapter->vdev->unit_address, + VIO_IRQ_DISABLE); + BUG_ON(lpar_rc != H_SUCCESS); + __napi_schedule(&adapter->napi); + } + return IRQ_HANDLED; +} + +static void ibmveth_set_multicast_list(struct net_device *netdev) +{ + struct ibmveth_adapter *adapter = netdev_priv(netdev); + unsigned long lpar_rc; + + if ((netdev->flags & IFF_PROMISC) || + (netdev_mc_count(netdev) > adapter->mcastFilterSize)) { + lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, + IbmVethMcastEnableRecv | + IbmVethMcastDisableFiltering, + 0); + if (lpar_rc != H_SUCCESS) { + netdev_err(netdev, "h_multicast_ctrl rc=%ld when " + "entering promisc mode\n", lpar_rc); + } + } else { + struct netdev_hw_addr *ha; + /* clear the filter table & disable filtering */ + lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, + IbmVethMcastEnableRecv | + IbmVethMcastDisableFiltering | + IbmVethMcastClearFilterTable, + 0); + if (lpar_rc != H_SUCCESS) { + netdev_err(netdev, "h_multicast_ctrl rc=%ld when " + "attempting to clear filter table\n", + lpar_rc); + } + /* add the addresses to the filter table */ + netdev_for_each_mc_addr(ha, netdev) { + /* add the multicast address to the filter table */ + u64 mcast_addr; + mcast_addr = ibmveth_encode_mac_addr(ha->addr); + lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, + IbmVethMcastAddFilter, + mcast_addr); + if (lpar_rc != H_SUCCESS) { + netdev_err(netdev, "h_multicast_ctrl rc=%ld " + "when adding an entry to the filter " + "table\n", lpar_rc); + } + } + + /* re-enable filtering */ + lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, + IbmVethMcastEnableFiltering, + 0); + if (lpar_rc != H_SUCCESS) { + netdev_err(netdev, "h_multicast_ctrl rc=%ld when " + "enabling filtering\n", lpar_rc); + } + } +} + +static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) +{ + struct ibmveth_adapter *adapter = netdev_priv(dev); + struct vio_dev *viodev = adapter->vdev; + int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; + int i, rc; + int need_restart = 0; + + if (new_mtu < IBMVETH_MIN_MTU) + return -EINVAL; + + for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) + if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) + break; + + if (i == IBMVETH_NUM_BUFF_POOLS) + return -EINVAL; + + /* Deactivate all the buffer pools so that the next loop can activate + only the buffer pools necessary to hold the new MTU */ + if (netif_running(adapter->netdev)) { + need_restart = 1; + adapter->pool_config = 1; + ibmveth_close(adapter->netdev); + adapter->pool_config = 0; + } + + /* Look for an active buffer pool that can hold the new MTU */ + for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { + adapter->rx_buff_pool[i].active = 1; + + if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) { + dev->mtu = new_mtu; + vio_cmo_set_dev_desired(viodev, + ibmveth_get_desired_dma + (viodev)); + if (need_restart) { + return ibmveth_open(adapter->netdev); + } + return 0; + } + } + + if (need_restart && (rc = ibmveth_open(adapter->netdev))) + return rc; + + return -EINVAL; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void ibmveth_poll_controller(struct net_device *dev) +{ + ibmveth_replenish_task(netdev_priv(dev)); + ibmveth_interrupt(dev->irq, dev); +} +#endif + +/** + * ibmveth_get_desired_dma - Calculate IO memory desired by the driver + * + * @vdev: struct vio_dev for the device whose desired IO mem is to be returned + * + * Return value: + * Number of bytes of IO data the driver will need to perform well. + */ +static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev) +{ + struct net_device *netdev = dev_get_drvdata(&vdev->dev); + struct ibmveth_adapter *adapter; + struct iommu_table *tbl; + unsigned long ret; + int i; + int rxqentries = 1; + + tbl = get_iommu_table_base(&vdev->dev); + + /* netdev inits at probe time along with the structures we need below*/ + if (netdev == NULL) + return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl); + + adapter = netdev_priv(netdev); + + ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; + ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl); + + for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { + /* add the size of the active receive buffers */ + if (adapter->rx_buff_pool[i].active) + ret += + adapter->rx_buff_pool[i].size * + IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i]. + buff_size, tbl); + rxqentries += adapter->rx_buff_pool[i].size; + } + /* add the size of the receive queue entries */ + ret += IOMMU_PAGE_ALIGN( + rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl); + + return ret; +} + +static int ibmveth_set_mac_addr(struct net_device *dev, void *p) +{ + struct ibmveth_adapter *adapter = netdev_priv(dev); + struct sockaddr *addr = p; + u64 mac_address; + int rc; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + mac_address = ibmveth_encode_mac_addr(addr->sa_data); + rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address); + if (rc) { + netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc); + return rc; + } + + ether_addr_copy(dev->dev_addr, addr->sa_data); + + return 0; +} + +static const struct net_device_ops ibmveth_netdev_ops = { + .ndo_open = ibmveth_open, + .ndo_stop = ibmveth_close, + .ndo_start_xmit = ibmveth_start_xmit, + .ndo_set_rx_mode = ibmveth_set_multicast_list, + .ndo_do_ioctl = ibmveth_ioctl, + .ndo_change_mtu = ibmveth_change_mtu, + .ndo_fix_features = ibmveth_fix_features, + .ndo_set_features = ibmveth_set_features, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ibmveth_set_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ibmveth_poll_controller, +#endif +}; + +static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) +{ + int rc, i, mac_len; + struct net_device *netdev; + struct ibmveth_adapter *adapter; + unsigned char *mac_addr_p; + unsigned int *mcastFilterSize_p; + + dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n", + dev->unit_address); + + mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR, + &mac_len); + if (!mac_addr_p) { + dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n"); + return -EINVAL; + } + /* Workaround for old/broken pHyp */ + if (mac_len == 8) + mac_addr_p += 2; + else if (mac_len != 6) { + dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n", + mac_len); + return -EINVAL; + } + + mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev, + VETH_MCAST_FILTER_SIZE, NULL); + if (!mcastFilterSize_p) { + dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE " + "attribute\n"); + return -EINVAL; + } + + netdev = alloc_etherdev(sizeof(struct ibmveth_adapter)); + + if (!netdev) + return -ENOMEM; + + adapter = netdev_priv(netdev); + dev_set_drvdata(&dev->dev, netdev); + + adapter->vdev = dev; + adapter->netdev = netdev; + adapter->mcastFilterSize = *mcastFilterSize_p; + adapter->pool_config = 0; + + netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); + + netdev->irq = dev->irq; + netdev->netdev_ops = &ibmveth_netdev_ops; + netdev->ethtool_ops = &netdev_ethtool_ops; + SET_NETDEV_DEV(netdev, &dev->dev); + netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + netdev->features |= netdev->hw_features; + + memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); + + for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { + struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; + int error; + + ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, + pool_count[i], pool_size[i], + pool_active[i]); + error = kobject_init_and_add(kobj, &ktype_veth_pool, + &dev->dev.kobj, "pool%d", i); + if (!error) + kobject_uevent(kobj, KOBJ_ADD); + } + + netdev_dbg(netdev, "adapter @ 0x%p\n", adapter); + + adapter->buffer_list_dma = DMA_ERROR_CODE; + adapter->filter_list_dma = DMA_ERROR_CODE; + adapter->rx_queue.queue_dma = DMA_ERROR_CODE; + + netdev_dbg(netdev, "registering netdev...\n"); + + ibmveth_set_features(netdev, netdev->features); + + rc = register_netdev(netdev); + + if (rc) { + netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc); + free_netdev(netdev); + return rc; + } + + netdev_dbg(netdev, "registered\n"); + + return 0; +} + +static int ibmveth_remove(struct vio_dev *dev) +{ + struct net_device *netdev = dev_get_drvdata(&dev->dev); + struct ibmveth_adapter *adapter = netdev_priv(netdev); + int i; + + for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) + kobject_put(&adapter->rx_buff_pool[i].kobj); + + unregister_netdev(netdev); + + free_netdev(netdev); + dev_set_drvdata(&dev->dev, NULL); + + return 0; +} + +static struct attribute veth_active_attr; +static struct attribute veth_num_attr; +static struct attribute veth_size_attr; + +static ssize_t veth_pool_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct ibmveth_buff_pool *pool = container_of(kobj, + struct ibmveth_buff_pool, + kobj); + + if (attr == &veth_active_attr) + return sprintf(buf, "%d\n", pool->active); + else if (attr == &veth_num_attr) + return sprintf(buf, "%d\n", pool->size); + else if (attr == &veth_size_attr) + return sprintf(buf, "%d\n", pool->buff_size); + return 0; +} + +static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct ibmveth_buff_pool *pool = container_of(kobj, + struct ibmveth_buff_pool, + kobj); + struct net_device *netdev = dev_get_drvdata( + container_of(kobj->parent, struct device, kobj)); + struct ibmveth_adapter *adapter = netdev_priv(netdev); + long value = simple_strtol(buf, NULL, 10); + long rc; + + if (attr == &veth_active_attr) { + if (value && !pool->active) { + if (netif_running(netdev)) { + if (ibmveth_alloc_buffer_pool(pool)) { + netdev_err(netdev, + "unable to alloc pool\n"); + return -ENOMEM; + } + pool->active = 1; + adapter->pool_config = 1; + ibmveth_close(netdev); + adapter->pool_config = 0; + if ((rc = ibmveth_open(netdev))) + return rc; + } else { + pool->active = 1; + } + } else if (!value && pool->active) { + int mtu = netdev->mtu + IBMVETH_BUFF_OH; + int i; + /* Make sure there is a buffer pool with buffers that + can hold a packet of the size of the MTU */ + for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { + if (pool == &adapter->rx_buff_pool[i]) + continue; + if (!adapter->rx_buff_pool[i].active) + continue; + if (mtu <= adapter->rx_buff_pool[i].buff_size) + break; + } + + if (i == IBMVETH_NUM_BUFF_POOLS) { + netdev_err(netdev, "no active pool >= MTU\n"); + return -EPERM; + } + + if (netif_running(netdev)) { + adapter->pool_config = 1; + ibmveth_close(netdev); + pool->active = 0; + adapter->pool_config = 0; + if ((rc = ibmveth_open(netdev))) + return rc; + } + pool->active = 0; + } + } else if (attr == &veth_num_attr) { + if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) { + return -EINVAL; + } else { + if (netif_running(netdev)) { + adapter->pool_config = 1; + ibmveth_close(netdev); + adapter->pool_config = 0; + pool->size = value; + if ((rc = ibmveth_open(netdev))) + return rc; + } else { + pool->size = value; + } + } + } else if (attr == &veth_size_attr) { + if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) { + return -EINVAL; + } else { + if (netif_running(netdev)) { + adapter->pool_config = 1; + ibmveth_close(netdev); + adapter->pool_config = 0; + pool->buff_size = value; + if ((rc = ibmveth_open(netdev))) + return rc; + } else { + pool->buff_size = value; + } + } + } + + /* kick the interrupt handler to allocate/deallocate pools */ + ibmveth_interrupt(netdev->irq, netdev); + return count; +} + + +#define ATTR(_name, _mode) \ + struct attribute veth_##_name##_attr = { \ + .name = __stringify(_name), .mode = _mode, \ + }; + +static ATTR(active, 0644); +static ATTR(num, 0644); +static ATTR(size, 0644); + +static struct attribute *veth_pool_attrs[] = { + &veth_active_attr, + &veth_num_attr, + &veth_size_attr, + NULL, +}; + +static const struct sysfs_ops veth_pool_ops = { + .show = veth_pool_show, + .store = veth_pool_store, +}; + +static struct kobj_type ktype_veth_pool = { + .release = NULL, + .sysfs_ops = &veth_pool_ops, + .default_attrs = veth_pool_attrs, +}; + +static int ibmveth_resume(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + ibmveth_interrupt(netdev->irq, netdev); + return 0; +} + +static struct vio_device_id ibmveth_device_table[] = { + { "network", "IBM,l-lan"}, + { "", "" } +}; +MODULE_DEVICE_TABLE(vio, ibmveth_device_table); + +static struct dev_pm_ops ibmveth_pm_ops = { + .resume = ibmveth_resume +}; + +static struct vio_driver ibmveth_driver = { + .id_table = ibmveth_device_table, + .probe = ibmveth_probe, + .remove = ibmveth_remove, + .get_desired_dma = ibmveth_get_desired_dma, + .name = ibmveth_driver_name, + .pm = &ibmveth_pm_ops, +}; + +static int __init ibmveth_module_init(void) +{ + printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name, + ibmveth_driver_string, ibmveth_driver_version); + + return vio_register_driver(&ibmveth_driver); +} + +static void __exit ibmveth_module_exit(void) +{ + vio_unregister_driver(&ibmveth_driver); +} + +module_init(ibmveth_module_init); +module_exit(ibmveth_module_exit); diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h new file mode 100644 index 000000000..1f37499d4 --- /dev/null +++ b/drivers/net/ethernet/ibm/ibmveth.h @@ -0,0 +1,206 @@ +/* + * IBM Power Virtual Ethernet Device Driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * Copyright (C) IBM Corporation, 2003, 2010 + * + * Authors: Dave Larson + * Santiago Leon + * Brian King + * Robert Jennings + * Anton Blanchard + */ + +#ifndef _IBMVETH_H +#define _IBMVETH_H + +/* constants for H_MULTICAST_CTRL */ +#define IbmVethMcastReceptionModifyBit 0x80000UL +#define IbmVethMcastReceptionEnableBit 0x20000UL +#define IbmVethMcastFilterModifyBit 0x40000UL +#define IbmVethMcastFilterEnableBit 0x10000UL + +#define IbmVethMcastEnableRecv (IbmVethMcastReceptionModifyBit | IbmVethMcastReceptionEnableBit) +#define IbmVethMcastDisableRecv (IbmVethMcastReceptionModifyBit) +#define IbmVethMcastEnableFiltering (IbmVethMcastFilterModifyBit | IbmVethMcastFilterEnableBit) +#define IbmVethMcastDisableFiltering (IbmVethMcastFilterModifyBit) +#define IbmVethMcastAddFilter 0x1UL +#define IbmVethMcastRemoveFilter 0x2UL +#define IbmVethMcastClearFilterTable 0x3UL + +#define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000UL +#define IBMVETH_ILLAN_TRUNK_PRI_MASK 0x0000000000000F00UL +#define IBMVETH_ILLAN_IPV6_TCP_CSUM 0x0000000000000004UL +#define IBMVETH_ILLAN_IPV4_TCP_CSUM 0x0000000000000002UL +#define IBMVETH_ILLAN_ACTIVE_TRUNK 0x0000000000000001UL + +/* hcall macros */ +#define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \ + plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac) + +#define h_free_logical_lan(ua) \ + plpar_hcall_norets(H_FREE_LOGICAL_LAN, ua) + +#define h_add_logical_lan_buffer(ua, buf) \ + plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf) + +static inline long h_send_logical_lan(unsigned long unit_address, + unsigned long desc1, unsigned long desc2, unsigned long desc3, + unsigned long desc4, unsigned long desc5, unsigned long desc6, + unsigned long corellator_in, unsigned long *corellator_out) +{ + long rc; + unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; + + rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, desc1, + desc2, desc3, desc4, desc5, desc6, corellator_in); + + *corellator_out = retbuf[0]; + + return rc; +} + +static inline long h_illan_attributes(unsigned long unit_address, + unsigned long reset_mask, unsigned long set_mask, + unsigned long *ret_attributes) +{ + long rc; + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + rc = plpar_hcall(H_ILLAN_ATTRIBUTES, retbuf, unit_address, + reset_mask, set_mask); + + *ret_attributes = retbuf[0]; + + return rc; +} + +#define h_multicast_ctrl(ua, cmd, mac) \ + plpar_hcall_norets(H_MULTICAST_CTRL, ua, cmd, mac) + +#define h_change_logical_lan_mac(ua, mac) \ + plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac) + +#define IBMVETH_NUM_BUFF_POOLS 5 +#define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */ +#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */ +#define IBMVETH_MIN_MTU 68 +#define IBMVETH_MAX_POOL_COUNT 4096 +#define IBMVETH_BUFF_LIST_SIZE 4096 +#define IBMVETH_FILT_LIST_SIZE 4096 +#define IBMVETH_MAX_BUF_SIZE (1024 * 128) + +static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 }; +static int pool_count[] = { 256, 512, 256, 256, 256 }; +static int pool_active[] = { 1, 1, 0, 0, 0}; + +#define IBM_VETH_INVALID_MAP ((u16)0xffff) + +struct ibmveth_buff_pool { + u32 size; + u32 index; + u32 buff_size; + u32 threshold; + atomic_t available; + u32 consumer_index; + u32 producer_index; + u16 *free_map; + dma_addr_t *dma_addr; + struct sk_buff **skbuff; + int active; + struct kobject kobj; +}; + +struct ibmveth_rx_q { + u64 index; + u64 num_slots; + u64 toggle; + dma_addr_t queue_dma; + u32 queue_len; + struct ibmveth_rx_q_entry *queue_addr; +}; + +struct ibmveth_adapter { + struct vio_dev *vdev; + struct net_device *netdev; + struct napi_struct napi; + struct net_device_stats stats; + unsigned int mcastFilterSize; + void * buffer_list_addr; + void * filter_list_addr; + dma_addr_t buffer_list_dma; + dma_addr_t filter_list_dma; + struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS]; + struct ibmveth_rx_q rx_queue; + int pool_config; + int rx_csum; + void *bounce_buffer; + dma_addr_t bounce_buffer_dma; + + u64 fw_ipv6_csum_support; + u64 fw_ipv4_csum_support; + /* adapter specific stats */ + u64 replenish_task_cycles; + u64 replenish_no_mem; + u64 replenish_add_buff_failure; + u64 replenish_add_buff_success; + u64 rx_invalid_buffer; + u64 rx_no_buffer; + u64 tx_map_failed; + u64 tx_send_failed; +}; + +/* + * We pass struct ibmveth_buf_desc_fields to the hypervisor in registers, + * so we don't need to byteswap the two elements. However since we use + * a union (ibmveth_buf_desc) to convert from the struct to a u64 we + * do end up with endian specific ordering of the elements and that + * needs correcting. + */ +struct ibmveth_buf_desc_fields { +#ifdef __BIG_ENDIAN + u32 flags_len; + u32 address; +#else + u32 address; + u32 flags_len; +#endif +#define IBMVETH_BUF_VALID 0x80000000 +#define IBMVETH_BUF_TOGGLE 0x40000000 +#define IBMVETH_BUF_NO_CSUM 0x02000000 +#define IBMVETH_BUF_CSUM_GOOD 0x01000000 +#define IBMVETH_BUF_LEN_MASK 0x00FFFFFF +}; + +union ibmveth_buf_desc { + u64 desc; + struct ibmveth_buf_desc_fields fields; +}; + +struct ibmveth_rx_q_entry { + __be32 flags_off; +#define IBMVETH_RXQ_TOGGLE 0x80000000 +#define IBMVETH_RXQ_TOGGLE_SHIFT 31 +#define IBMVETH_RXQ_VALID 0x40000000 +#define IBMVETH_RXQ_NO_CSUM 0x02000000 +#define IBMVETH_RXQ_CSUM_GOOD 0x01000000 +#define IBMVETH_RXQ_OFF_MASK 0x0000FFFF + + __be32 length; + /* correlator is only used by the OS, no need to byte swap */ + u64 correlator; +}; + +#endif /* _IBMVETH_H */ diff --git a/drivers/net/ethernet/icplus/Kconfig b/drivers/net/ethernet/icplus/Kconfig new file mode 100644 index 000000000..14a66e9d2 --- /dev/null +++ b/drivers/net/ethernet/icplus/Kconfig @@ -0,0 +1,13 @@ +# +# IC Plus device configuration +# + +config IP1000 + tristate "IP1000 Gigabit Ethernet support" + depends on PCI + select MII + ---help--- + This driver supports IP1000 gigabit Ethernet cards. + + To compile this driver as a module, choose M here: the module + will be called ipg. This is recommended. diff --git a/drivers/net/ethernet/icplus/Makefile b/drivers/net/ethernet/icplus/Makefile new file mode 100644 index 000000000..5bc87c1f3 --- /dev/null +++ b/drivers/net/ethernet/icplus/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the IC Plus device drivers +# + +obj-$(CONFIG_IP1000) += ipg.o diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c new file mode 100644 index 000000000..ff2903652 --- /dev/null +++ b/drivers/net/ethernet/icplus/ipg.c @@ -0,0 +1,2300 @@ +/* + * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter + * + * Copyright (C) 2003, 2007 IC Plus Corp + * + * Original Author: + * + * Craig Rich + * Sundance Technology, Inc. + * www.sundanceti.com + * craig_rich@sundanceti.com + * + * Current Maintainer: + * + * Sorbica Shieh. + * http://www.icplus.com.tw + * sorbica@icplus.com.tw + * + * Jesse Huang + * http://www.icplus.com.tw + * jesse@icplus.com.tw + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#include + +#define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH) +#define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH) +#define IPG_RESET_MASK \ + (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \ + IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \ + IPG_AC_AUTO_INIT) + +#define ipg_w32(val32, reg) iowrite32((val32), ioaddr + (reg)) +#define ipg_w16(val16, reg) iowrite16((val16), ioaddr + (reg)) +#define ipg_w8(val8, reg) iowrite8((val8), ioaddr + (reg)) + +#define ipg_r32(reg) ioread32(ioaddr + (reg)) +#define ipg_r16(reg) ioread16(ioaddr + (reg)) +#define ipg_r8(reg) ioread8(ioaddr + (reg)) + +enum { + netdev_io_size = 128 +}; + +#include "ipg.h" +#define DRV_NAME "ipg" + +MODULE_AUTHOR("IC Plus Corp. 2003"); +MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver"); +MODULE_LICENSE("GPL"); + +/* + * Defaults + */ +#define IPG_MAX_RXFRAME_SIZE 0x0600 +#define IPG_RXFRAG_SIZE 0x0600 +#define IPG_RXSUPPORT_SIZE 0x0600 +#define IPG_IS_JUMBO false + +/* + * Variable record -- index by leading revision/length + * Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN + */ +static const unsigned short DefaultPhyParam[] = { + /* 11/12/03 IP1000A v1-3 rev=0x40 */ + /*-------------------------------------------------------------------------- + (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2, + 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6, + 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700, + --------------------------------------------------------------------------*/ + /* 12/17/03 IP1000A v1-4 rev=0x40 */ + (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, + 0x0000, + 30, 0x005e, 9, 0x0700, + /* 01/09/04 IP1000A v1-5 rev=0x41 */ + (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, + 0x0000, + 30, 0x005e, 9, 0x0700, + 0x0000 +}; + +static const char * const ipg_brand_name[] = { + "IC PLUS IP1000 1000/100/10 based NIC", + "Sundance Technology ST2021 based NIC", + "Tamarack Microelectronics TC9020/9021 based NIC", + "D-Link NIC IP1000A" +}; + +static const struct pci_device_id ipg_pci_tbl[] = { + { PCI_VDEVICE(SUNDANCE, 0x1023), 0 }, + { PCI_VDEVICE(SUNDANCE, 0x2021), 1 }, + { PCI_VDEVICE(DLINK, 0x9021), 2 }, + { PCI_VDEVICE(DLINK, 0x4020), 3 }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, ipg_pci_tbl); + +static inline void __iomem *ipg_ioaddr(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + return sp->ioaddr; +} + +#ifdef IPG_DEBUG +static void ipg_dump_rfdlist(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + u32 offset; + + IPG_DEBUG_MSG("_dump_rfdlist\n"); + + netdev_info(dev, "rx_current = %02x\n", sp->rx_current); + netdev_info(dev, "rx_dirty = %02x\n", sp->rx_dirty); + netdev_info(dev, "RFDList start address = %016lx\n", + (unsigned long)sp->rxd_map); + netdev_info(dev, "RFDListPtr register = %08x%08x\n", + ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0)); + + for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { + offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd; + netdev_info(dev, "%02x %04x RFDNextPtr = %016lx\n", + i, offset, (unsigned long)sp->rxd[i].next_desc); + offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd; + netdev_info(dev, "%02x %04x RFS = %016lx\n", + i, offset, (unsigned long)sp->rxd[i].rfs); + offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd; + netdev_info(dev, "%02x %04x frag_info = %016lx\n", + i, offset, (unsigned long)sp->rxd[i].frag_info); + } +} + +static void ipg_dump_tfdlist(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + u32 offset; + + IPG_DEBUG_MSG("_dump_tfdlist\n"); + + netdev_info(dev, "tx_current = %02x\n", sp->tx_current); + netdev_info(dev, "tx_dirty = %02x\n", sp->tx_dirty); + netdev_info(dev, "TFDList start address = %016lx\n", + (unsigned long) sp->txd_map); + netdev_info(dev, "TFDListPtr register = %08x%08x\n", + ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0)); + + for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { + offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd; + netdev_info(dev, "%02x %04x TFDNextPtr = %016lx\n", + i, offset, (unsigned long)sp->txd[i].next_desc); + + offset = (u32) &sp->txd[i].tfc - (u32) sp->txd; + netdev_info(dev, "%02x %04x TFC = %016lx\n", + i, offset, (unsigned long) sp->txd[i].tfc); + offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd; + netdev_info(dev, "%02x %04x frag_info = %016lx\n", + i, offset, (unsigned long) sp->txd[i].frag_info); + } +} +#endif + +static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data) +{ + ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL); + ndelay(IPG_PC_PHYCTRLWAIT_NS); +} + +static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data) +{ + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data); + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data); +} + +static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity) +{ + phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR; + + ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity); +} + +static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity) +{ + ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR | + phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL); +} + +static u16 read_phy_bit(void __iomem *ioaddr, u8 phyctrlpolarity) +{ + u16 bit_data; + + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity); + + bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1; + + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity); + + return bit_data; +} + +/* + * Read a register from the Physical Layer device located + * on the IPG NIC, using the IPG PHYCTRL register. + */ +static int mdio_read(struct net_device *dev, int phy_id, int phy_reg) +{ + void __iomem *ioaddr = ipg_ioaddr(dev); + /* + * The GMII mangement frame structure for a read is as follows: + * + * |Preamble|st|op|phyad|regad|ta| data |idle| + * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z | + * + * <32 1s> = 32 consecutive logic 1 values + * A = bit of Physical Layer device address (MSB first) + * R = bit of register address (MSB first) + * z = High impedance state + * D = bit of read data (MSB first) + * + * Transmission order is 'Preamble' field first, bits transmitted + * left to right (first to last). + */ + struct { + u32 field; + unsigned int len; + } p[] = { + { GMII_PREAMBLE, 32 }, /* Preamble */ + { GMII_ST, 2 }, /* ST */ + { GMII_READ, 2 }, /* OP */ + { phy_id, 5 }, /* PHYAD */ + { phy_reg, 5 }, /* REGAD */ + { 0x0000, 2 }, /* TA */ + { 0x0000, 16 }, /* DATA */ + { 0x0000, 1 } /* IDLE */ + }; + unsigned int i, j; + u8 polarity, data; + + polarity = ipg_r8(PHY_CTRL); + polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY); + + /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */ + for (j = 0; j < 5; j++) { + for (i = 0; i < p[j].len; i++) { + /* For each variable length field, the MSB must be + * transmitted first. Rotate through the field bits, + * starting with the MSB, and move each bit into the + * the 1st (2^1) bit position (this is the bit position + * corresponding to the MgmtData bit of the PhyCtrl + * register for the IPG). + * + * Example: ST = 01; + * + * First write a '0' to bit 1 of the PhyCtrl + * register, then write a '1' to bit 1 of the + * PhyCtrl register. + * + * To do this, right shift the MSB of ST by the value: + * [field length - 1 - #ST bits already written] + * then left shift this result by 1. + */ + data = (p[j].field >> (p[j].len - 1 - i)) << 1; + data &= IPG_PC_MGMTDATA; + data |= polarity | IPG_PC_MGMTDIR; + + ipg_drive_phy_ctl_low_high(ioaddr, data); + } + } + + send_three_state(ioaddr, polarity); + + read_phy_bit(ioaddr, polarity); + + /* + * For a read cycle, the bits for the next two fields (TA and + * DATA) are driven by the PHY (the IPG reads these bits). + */ + for (i = 0; i < p[6].len; i++) { + p[6].field |= + (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i)); + } + + send_three_state(ioaddr, polarity); + send_three_state(ioaddr, polarity); + send_three_state(ioaddr, polarity); + send_end(ioaddr, polarity); + + /* Return the value of the DATA field. */ + return p[6].field; +} + +/* + * Write to a register from the Physical Layer device located + * on the IPG NIC, using the IPG PHYCTRL register. + */ +static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val) +{ + void __iomem *ioaddr = ipg_ioaddr(dev); + /* + * The GMII mangement frame structure for a read is as follows: + * + * |Preamble|st|op|phyad|regad|ta| data |idle| + * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z | + * + * <32 1s> = 32 consecutive logic 1 values + * A = bit of Physical Layer device address (MSB first) + * R = bit of register address (MSB first) + * z = High impedance state + * D = bit of write data (MSB first) + * + * Transmission order is 'Preamble' field first, bits transmitted + * left to right (first to last). + */ + struct { + u32 field; + unsigned int len; + } p[] = { + { GMII_PREAMBLE, 32 }, /* Preamble */ + { GMII_ST, 2 }, /* ST */ + { GMII_WRITE, 2 }, /* OP */ + { phy_id, 5 }, /* PHYAD */ + { phy_reg, 5 }, /* REGAD */ + { 0x0002, 2 }, /* TA */ + { val & 0xffff, 16 }, /* DATA */ + { 0x0000, 1 } /* IDLE */ + }; + unsigned int i, j; + u8 polarity, data; + + polarity = ipg_r8(PHY_CTRL); + polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY); + + /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */ + for (j = 0; j < 7; j++) { + for (i = 0; i < p[j].len; i++) { + /* For each variable length field, the MSB must be + * transmitted first. Rotate through the field bits, + * starting with the MSB, and move each bit into the + * the 1st (2^1) bit position (this is the bit position + * corresponding to the MgmtData bit of the PhyCtrl + * register for the IPG). + * + * Example: ST = 01; + * + * First write a '0' to bit 1 of the PhyCtrl + * register, then write a '1' to bit 1 of the + * PhyCtrl register. + * + * To do this, right shift the MSB of ST by the value: + * [field length - 1 - #ST bits already written] + * then left shift this result by 1. + */ + data = (p[j].field >> (p[j].len - 1 - i)) << 1; + data &= IPG_PC_MGMTDATA; + data |= polarity | IPG_PC_MGMTDIR; + + ipg_drive_phy_ctl_low_high(ioaddr, data); + } + } + + /* The last cycle is a tri-state, so read from the PHY. */ + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity); + ipg_r8(PHY_CTRL); + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity); +} + +static void ipg_set_led_mode(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + u32 mode; + + mode = ipg_r32(ASIC_CTRL); + mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED); + + if ((sp->led_mode & 0x03) > 1) + mode |= IPG_AC_LED_MODE_BIT_1; /* Write Asic Control Bit 29 */ + + if ((sp->led_mode & 0x01) == 1) + mode |= IPG_AC_LED_MODE; /* Write Asic Control Bit 14 */ + + if ((sp->led_mode & 0x08) == 8) + mode |= IPG_AC_LED_SPEED; /* Write Asic Control Bit 27 */ + + ipg_w32(mode, ASIC_CTRL); +} + +static void ipg_set_phy_set(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + int physet; + + physet = ipg_r8(PHY_SET); + physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET); + physet |= ((sp->led_mode & 0x70) >> 4); + ipg_w8(physet, PHY_SET); +} + +static int ipg_reset(struct net_device *dev, u32 resetflags) +{ + /* Assert functional resets via the IPG AsicCtrl + * register as specified by the 'resetflags' input + * parameter. + */ + void __iomem *ioaddr = ipg_ioaddr(dev); + unsigned int timeout_count = 0; + + IPG_DEBUG_MSG("_reset\n"); + + ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL); + + /* Delay added to account for problem with 10Mbps reset. */ + mdelay(IPG_AC_RESETWAIT); + + while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) { + mdelay(IPG_AC_RESETWAIT); + if (++timeout_count > IPG_AC_RESET_TIMEOUT) + return -ETIME; + } + /* Set LED Mode in Asic Control */ + ipg_set_led_mode(dev); + + /* Set PHYSet Register Value */ + ipg_set_phy_set(dev); + return 0; +} + +/* Find the GMII PHY address. */ +static int ipg_find_phyaddr(struct net_device *dev) +{ + unsigned int phyaddr, i; + + for (i = 0; i < 32; i++) { + u32 status; + + /* Search for the correct PHY address among 32 possible. */ + phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32; + + /* 10/22/03 Grace change verify from GMII_PHY_STATUS to + GMII_PHY_ID1 + */ + + status = mdio_read(dev, phyaddr, MII_BMSR); + + if ((status != 0xFFFF) && (status != 0)) + return phyaddr; + } + + return 0x1f; +} + +/* + * Configure IPG based on result of IEEE 802.3 PHY + * auto-negotiation. + */ +static int ipg_config_autoneg(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int txflowcontrol; + unsigned int rxflowcontrol; + unsigned int fullduplex; + u32 mac_ctrl_val; + u32 asicctrl; + u8 phyctrl; + const char *speed; + const char *duplex; + const char *tx_desc; + const char *rx_desc; + + IPG_DEBUG_MSG("_config_autoneg\n"); + + asicctrl = ipg_r32(ASIC_CTRL); + phyctrl = ipg_r8(PHY_CTRL); + mac_ctrl_val = ipg_r32(MAC_CTRL); + + /* Set flags for use in resolving auto-negotiation, assuming + * non-1000Mbps, half duplex, no flow control. + */ + fullduplex = 0; + txflowcontrol = 0; + rxflowcontrol = 0; + + /* To accommodate a problem in 10Mbps operation, + * set a global flag if PHY running in 10Mbps mode. + */ + sp->tenmbpsmode = 0; + + /* Determine actual speed of operation. */ + switch (phyctrl & IPG_PC_LINK_SPEED) { + case IPG_PC_LINK_SPEED_10MBPS: + speed = "10Mbps"; + sp->tenmbpsmode = 1; + break; + case IPG_PC_LINK_SPEED_100MBPS: + speed = "100Mbps"; + break; + case IPG_PC_LINK_SPEED_1000MBPS: + speed = "1000Mbps"; + break; + default: + speed = "undefined!"; + return 0; + } + + netdev_info(dev, "Link speed = %s\n", speed); + if (sp->tenmbpsmode == 1) + netdev_info(dev, "10Mbps operational mode enabled\n"); + + if (phyctrl & IPG_PC_DUPLEX_STATUS) { + fullduplex = 1; + txflowcontrol = 1; + rxflowcontrol = 1; + } + + /* Configure full duplex, and flow control. */ + if (fullduplex == 1) { + + /* Configure IPG for full duplex operation. */ + + duplex = "full"; + + mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD; + + if (txflowcontrol == 1) { + tx_desc = ""; + mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE; + } else { + tx_desc = "no "; + mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE; + } + + if (rxflowcontrol == 1) { + rx_desc = ""; + mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE; + } else { + rx_desc = "no "; + mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE; + } + } else { + duplex = "half"; + tx_desc = "no "; + rx_desc = "no "; + mac_ctrl_val &= (~IPG_MC_DUPLEX_SELECT_FD & + ~IPG_MC_TX_FLOW_CONTROL_ENABLE & + ~IPG_MC_RX_FLOW_CONTROL_ENABLE); + } + + netdev_info(dev, "setting %s duplex, %sTX, %sRX flow control\n", + duplex, tx_desc, rx_desc); + ipg_w32(mac_ctrl_val, MAC_CTRL); + + return 0; +} + +/* Determine and configure multicast operation and set + * receive mode for IPG. + */ +static void ipg_nic_set_multicast_list(struct net_device *dev) +{ + void __iomem *ioaddr = ipg_ioaddr(dev); + struct netdev_hw_addr *ha; + unsigned int hashindex; + u32 hashtable[2]; + u8 receivemode; + + IPG_DEBUG_MSG("_nic_set_multicast_list\n"); + + receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST; + + if (dev->flags & IFF_PROMISC) { + /* NIC to be configured in promiscuous mode. */ + receivemode = IPG_RM_RECEIVEALLFRAMES; + } else if ((dev->flags & IFF_ALLMULTI) || + ((dev->flags & IFF_MULTICAST) && + (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) { + /* NIC to be configured to receive all multicast + * frames. */ + receivemode |= IPG_RM_RECEIVEMULTICAST; + } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) { + /* NIC to be configured to receive selected + * multicast addresses. */ + receivemode |= IPG_RM_RECEIVEMULTICASTHASH; + } + + /* Calculate the bits to set for the 64 bit, IPG HASHTABLE. + * The IPG applies a cyclic-redundancy-check (the same CRC + * used to calculate the frame data FCS) to the destination + * address all incoming multicast frames whose destination + * address has the multicast bit set. The least significant + * 6 bits of the CRC result are used as an addressing index + * into the hash table. If the value of the bit addressed by + * this index is a 1, the frame is passed to the host system. + */ + + /* Clear hashtable. */ + hashtable[0] = 0x00000000; + hashtable[1] = 0x00000000; + + /* Cycle through all multicast addresses to filter. */ + netdev_for_each_mc_addr(ha, dev) { + /* Calculate CRC result for each multicast address. */ + hashindex = crc32_le(0xffffffff, ha->addr, + ETH_ALEN); + + /* Use only the least significant 6 bits. */ + hashindex = hashindex & 0x3F; + + /* Within "hashtable", set bit number "hashindex" + * to a logic 1. + */ + set_bit(hashindex, (void *)hashtable); + } + + /* Write the value of the hashtable, to the 4, 16 bit + * HASHTABLE IPG registers. + */ + ipg_w32(hashtable[0], HASHTABLE_0); + ipg_w32(hashtable[1], HASHTABLE_1); + + ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE); + + IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE)); +} + +static int ipg_io_config(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = ipg_ioaddr(dev); + u32 origmacctrl; + u32 restoremacctrl; + + IPG_DEBUG_MSG("_io_config\n"); + + origmacctrl = ipg_r32(MAC_CTRL); + + restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE; + + /* Based on compilation option, determine if FCS is to be + * stripped on receive frames by IPG. + */ + if (!IPG_STRIP_FCS_ON_RX) + restoremacctrl |= IPG_MC_RCV_FCS; + + /* Determine if transmitter and/or receiver are + * enabled so we may restore MACCTRL correctly. + */ + if (origmacctrl & IPG_MC_TX_ENABLED) + restoremacctrl |= IPG_MC_TX_ENABLE; + + if (origmacctrl & IPG_MC_RX_ENABLED) + restoremacctrl |= IPG_MC_RX_ENABLE; + + /* Transmitter and receiver must be disabled before setting + * IFSSelect. + */ + ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) & + IPG_MC_RSVD_MASK, MAC_CTRL); + + /* Now that transmitter and receiver are disabled, write + * to IFSSelect. + */ + ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL); + + /* Set RECEIVEMODE register. */ + ipg_nic_set_multicast_list(dev); + + ipg_w16(sp->max_rxframe_size, MAX_FRAME_SIZE); + + ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD); + ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH); + ipg_w8(IPG_RXDMABURSTTHRESH_VALUE, RX_DMA_BURST_THRESH); + ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE, TX_DMA_POLL_PERIOD); + ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH); + ipg_w8(IPG_TXDMABURSTTHRESH_VALUE, TX_DMA_BURST_THRESH); + ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE | + IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED | + IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT | + IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE); + ipg_w16(IPG_FLOWONTHRESH_VALUE, FLOW_ON_THRESH); + ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH); + + /* IPG multi-frag frame bug workaround. + * Per silicon revision B3 eratta. + */ + ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL); + + /* IPG TX poll now bug workaround. + * Per silicon revision B3 eratta. + */ + ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL); + + /* IPG RX poll now bug workaround. + * Per silicon revision B3 eratta. + */ + ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL); + + /* Now restore MACCTRL to original setting. */ + ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL); + + /* Disable unused RMON statistics. */ + ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK); + + /* Disable unused MIB statistics. */ + ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD | + IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES | + IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES | + IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK | + IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS | + IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK); + + return 0; +} + +/* + * Create a receive buffer within system memory and update + * NIC private structure appropriately. + */ +static int ipg_get_rxbuff(struct net_device *dev, int entry) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + struct ipg_rx *rxfd = sp->rxd + entry; + struct sk_buff *skb; + u64 rxfragsize; + + IPG_DEBUG_MSG("_get_rxbuff\n"); + + skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size); + if (!skb) { + sp->rx_buff[entry] = NULL; + return -ENOMEM; + } + + /* Save the address of the sk_buff structure. */ + sp->rx_buff[entry] = skb; + + rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data, + sp->rx_buf_sz, PCI_DMA_FROMDEVICE)); + + /* Set the RFD fragment length. */ + rxfragsize = sp->rxfrag_size; + rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN); + + return 0; +} + +static int init_rfdlist(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + + IPG_DEBUG_MSG("_init_rfdlist\n"); + + for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { + struct ipg_rx *rxfd = sp->rxd + i; + + if (sp->rx_buff[i]) { + pci_unmap_single(sp->pdev, + le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + dev_kfree_skb_irq(sp->rx_buff[i]); + sp->rx_buff[i] = NULL; + } + + /* Clear out the RFS field. */ + rxfd->rfs = 0x0000000000000000; + + if (ipg_get_rxbuff(dev, i) < 0) { + /* + * A receive buffer was not ready, break the + * RFD list here. + */ + IPG_DEBUG_MSG("Cannot allocate Rx buffer\n"); + + /* Just in case we cannot allocate a single RFD. + * Should not occur. + */ + if (i == 0) { + netdev_err(dev, "No memory available for RFD list\n"); + return -ENOMEM; + } + } + + rxfd->next_desc = cpu_to_le64(sp->rxd_map + + sizeof(struct ipg_rx)*(i + 1)); + } + sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map); + + sp->rx_current = 0; + sp->rx_dirty = 0; + + /* Write the location of the RFDList to the IPG. */ + ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0); + ipg_w32(0x00000000, RFD_LIST_PTR_1); + + return 0; +} + +static void init_tfdlist(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + + IPG_DEBUG_MSG("_init_tfdlist\n"); + + for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { + struct ipg_tx *txfd = sp->txd + i; + + txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE); + + if (sp->tx_buff[i]) { + dev_kfree_skb_irq(sp->tx_buff[i]); + sp->tx_buff[i] = NULL; + } + + txfd->next_desc = cpu_to_le64(sp->txd_map + + sizeof(struct ipg_tx)*(i + 1)); + } + sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map); + + sp->tx_current = 0; + sp->tx_dirty = 0; + + /* Write the location of the TFDList to the IPG. */ + IPG_DDEBUG_MSG("Starting TFDListPtr = %08x\n", + (u32) sp->txd_map); + ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0); + ipg_w32(0x00000000, TFD_LIST_PTR_1); + + sp->reset_current_tfd = 1; +} + +/* + * Free all transmit buffers which have already been transferred + * via DMA to the IPG. + */ +static void ipg_nic_txfree(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + unsigned int released, pending, dirty; + + IPG_DEBUG_MSG("_nic_txfree\n"); + + pending = sp->tx_current - sp->tx_dirty; + dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH; + + for (released = 0; released < pending; released++) { + struct sk_buff *skb = sp->tx_buff[dirty]; + struct ipg_tx *txfd = sp->txd + dirty; + + IPG_DEBUG_MSG("TFC = %016lx\n", (unsigned long) txfd->tfc); + + /* Look at each TFD's TFC field beginning + * at the last freed TFD up to the current TFD. + * If the TFDDone bit is set, free the associated + * buffer. + */ + if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE))) + break; + + /* Free the transmit buffer. */ + if (skb) { + pci_unmap_single(sp->pdev, + le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN, + skb->len, PCI_DMA_TODEVICE); + + dev_kfree_skb_irq(skb); + + sp->tx_buff[dirty] = NULL; + } + dirty = (dirty + 1) % IPG_TFDLIST_LENGTH; + } + + sp->tx_dirty += released; + + if (netif_queue_stopped(dev) && + (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) { + netif_wake_queue(dev); + } +} + +static void ipg_tx_timeout(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + + ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK | + IPG_AC_FIFO); + + spin_lock_irq(&sp->lock); + + /* Re-configure after DMA reset. */ + if (ipg_io_config(dev) < 0) + netdev_info(dev, "Error during re-configuration\n"); + + init_tfdlist(dev); + + spin_unlock_irq(&sp->lock); + + ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK, + MAC_CTRL); +} + +/* + * For TxComplete interrupts, free all transmit + * buffers which have already been transferred via DMA + * to the IPG. + */ +static void ipg_nic_txcleanup(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + + IPG_DEBUG_MSG("_nic_txcleanup\n"); + + for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { + /* Reading the TXSTATUS register clears the + * TX_COMPLETE interrupt. + */ + u32 txstatusdword = ipg_r32(TX_STATUS); + + IPG_DEBUG_MSG("TxStatus = %08x\n", txstatusdword); + + /* Check for Transmit errors. Error bits only valid if + * TX_COMPLETE bit in the TXSTATUS register is a 1. + */ + if (!(txstatusdword & IPG_TS_TX_COMPLETE)) + break; + + /* If in 10Mbps mode, indicate transmit is ready. */ + if (sp->tenmbpsmode) { + netif_wake_queue(dev); + } + + /* Transmit error, increment stat counters. */ + if (txstatusdword & IPG_TS_TX_ERROR) { + IPG_DEBUG_MSG("Transmit error\n"); + sp->stats.tx_errors++; + } + + /* Late collision, re-enable transmitter. */ + if (txstatusdword & IPG_TS_LATE_COLLISION) { + IPG_DEBUG_MSG("Late collision on transmit\n"); + ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & + IPG_MC_RSVD_MASK, MAC_CTRL); + } + + /* Maximum collisions, re-enable transmitter. */ + if (txstatusdword & IPG_TS_TX_MAX_COLL) { + IPG_DEBUG_MSG("Maximum collisions on transmit\n"); + ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & + IPG_MC_RSVD_MASK, MAC_CTRL); + } + + /* Transmit underrun, reset and re-enable + * transmitter. + */ + if (txstatusdword & IPG_TS_TX_UNDERRUN) { + IPG_DEBUG_MSG("Transmitter underrun\n"); + sp->stats.tx_fifo_errors++; + ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | + IPG_AC_NETWORK | IPG_AC_FIFO); + + /* Re-configure after DMA reset. */ + if (ipg_io_config(dev) < 0) { + netdev_info(dev, "Error during re-configuration\n"); + } + init_tfdlist(dev); + + ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & + IPG_MC_RSVD_MASK, MAC_CTRL); + } + } + + ipg_nic_txfree(dev); +} + +/* Provides statistical information about the IPG NIC. */ +static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + u16 temp1; + u16 temp2; + + IPG_DEBUG_MSG("_nic_get_stats\n"); + + /* Check to see if the NIC has been initialized via nic_open, + * before trying to read statistic registers. + */ + if (!netif_running(dev)) + return &sp->stats; + + sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK); + sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK); + sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK); + sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK); + temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS); + sp->stats.rx_errors += temp1; + sp->stats.rx_missed_errors += temp1; + temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) + + ipg_r32(IPG_LATECOLLISIONS); + temp2 = ipg_r16(IPG_CARRIERSENSEERRORS); + sp->stats.collisions += temp1; + sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS); + sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) + + ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2; + sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK); + + /* detailed tx_errors */ + sp->stats.tx_carrier_errors += temp2; + + /* detailed rx_errors */ + sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) + + ipg_r16(IPG_FRAMETOOLONGERRRORS); + sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS); + + /* Unutilized IPG statistic registers. */ + ipg_r32(IPG_MCSTFRAMESRCVDOK); + + return &sp->stats; +} + +/* Restore used receive buffers. */ +static int ipg_nic_rxrestore(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + const unsigned int curr = sp->rx_current; + unsigned int dirty = sp->rx_dirty; + + IPG_DEBUG_MSG("_nic_rxrestore\n"); + + for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) { + unsigned int entry = dirty % IPG_RFDLIST_LENGTH; + + /* rx_copybreak may poke hole here and there. */ + if (sp->rx_buff[entry]) + continue; + + /* Generate a new receive buffer to replace the + * current buffer (which will be released by the + * Linux system). + */ + if (ipg_get_rxbuff(dev, entry) < 0) { + IPG_DEBUG_MSG("Cannot allocate new Rx buffer\n"); + + break; + } + + /* Reset the RFS field. */ + sp->rxd[entry].rfs = 0x0000000000000000; + } + sp->rx_dirty = dirty; + + return 0; +} + +/* use jumboindex and jumbosize to control jumbo frame status + * initial status is jumboindex=-1 and jumbosize=0 + * 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done. + * 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving + * 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump + * previous receiving and need to continue dumping the current one + */ +enum { + NORMAL_PACKET, + ERROR_PACKET +}; + +enum { + FRAME_NO_START_NO_END = 0, + FRAME_WITH_START = 1, + FRAME_WITH_END = 10, + FRAME_WITH_START_WITH_END = 11 +}; + +static void ipg_nic_rx_free_skb(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; + + if (sp->rx_buff[entry]) { + struct ipg_rx *rxfd = sp->rxd + entry; + + pci_unmap_single(sp->pdev, + le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + dev_kfree_skb_irq(sp->rx_buff[entry]); + sp->rx_buff[entry] = NULL; + } +} + +static int ipg_nic_rx_check_frame_type(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH); + int type = FRAME_NO_START_NO_END; + + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) + type += FRAME_WITH_START; + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND) + type += FRAME_WITH_END; + return type; +} + +static int ipg_nic_rx_check_error(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; + struct ipg_rx *rxfd = sp->rxd + entry; + + if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) & + (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | + IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | + IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) { + IPG_DEBUG_MSG("Rx error, RFS = %016lx\n", + (unsigned long) rxfd->rfs); + + /* Increment general receive error statistic. */ + sp->stats.rx_errors++; + + /* Increment detailed receive error statistics. */ + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { + IPG_DEBUG_MSG("RX FIFO overrun occurred\n"); + + sp->stats.rx_fifo_errors++; + } + + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { + IPG_DEBUG_MSG("RX runt occurred\n"); + sp->stats.rx_length_errors++; + } + + /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME, + * error count handled by a IPG statistic register. + */ + + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { + IPG_DEBUG_MSG("RX alignment error occurred\n"); + sp->stats.rx_frame_errors++; + } + + /* Do nothing for IPG_RFS_RXFCSERROR, error count + * handled by a IPG statistic register. + */ + + /* Free the memory associated with the RX + * buffer since it is erroneous and we will + * not pass it to higher layer processes. + */ + if (sp->rx_buff[entry]) { + pci_unmap_single(sp->pdev, + le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + + dev_kfree_skb_irq(sp->rx_buff[entry]); + sp->rx_buff[entry] = NULL; + } + return ERROR_PACKET; + } + return NORMAL_PACKET; +} + +static void ipg_nic_rx_with_start_and_end(struct net_device *dev, + struct ipg_nic_private *sp, + struct ipg_rx *rxfd, unsigned entry) +{ + struct ipg_jumbo *jumbo = &sp->jumbo; + struct sk_buff *skb; + int framelen; + + if (jumbo->found_start) { + dev_kfree_skb_irq(jumbo->skb); + jumbo->found_start = 0; + jumbo->current_size = 0; + jumbo->skb = NULL; + } + + /* 1: found error, 0 no error */ + if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET) + return; + + skb = sp->rx_buff[entry]; + if (!skb) + return; + + /* accept this frame and send to upper layer */ + framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; + if (framelen > sp->rxfrag_size) + framelen = sp->rxfrag_size; + + skb_put(skb, framelen); + skb->protocol = eth_type_trans(skb, dev); + skb_checksum_none_assert(skb); + netif_rx(skb); + sp->rx_buff[entry] = NULL; +} + +static void ipg_nic_rx_with_start(struct net_device *dev, + struct ipg_nic_private *sp, + struct ipg_rx *rxfd, unsigned entry) +{ + struct ipg_jumbo *jumbo = &sp->jumbo; + struct pci_dev *pdev = sp->pdev; + struct sk_buff *skb; + + /* 1: found error, 0 no error */ + if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET) + return; + + /* accept this frame and send to upper layer */ + skb = sp->rx_buff[entry]; + if (!skb) + return; + + if (jumbo->found_start) + dev_kfree_skb_irq(jumbo->skb); + + pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + + skb_put(skb, sp->rxfrag_size); + + jumbo->found_start = 1; + jumbo->current_size = sp->rxfrag_size; + jumbo->skb = skb; + + sp->rx_buff[entry] = NULL; +} + +static void ipg_nic_rx_with_end(struct net_device *dev, + struct ipg_nic_private *sp, + struct ipg_rx *rxfd, unsigned entry) +{ + struct ipg_jumbo *jumbo = &sp->jumbo; + + /* 1: found error, 0 no error */ + if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) { + struct sk_buff *skb = sp->rx_buff[entry]; + + if (!skb) + return; + + if (jumbo->found_start) { + int framelen, endframelen; + + framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; + + endframelen = framelen - jumbo->current_size; + if (framelen > sp->rxsupport_size) + dev_kfree_skb_irq(jumbo->skb); + else { + memcpy(skb_put(jumbo->skb, endframelen), + skb->data, endframelen); + + jumbo->skb->protocol = + eth_type_trans(jumbo->skb, dev); + + skb_checksum_none_assert(jumbo->skb); + netif_rx(jumbo->skb); + } + } + + jumbo->found_start = 0; + jumbo->current_size = 0; + jumbo->skb = NULL; + + ipg_nic_rx_free_skb(dev); + } else { + dev_kfree_skb_irq(jumbo->skb); + jumbo->found_start = 0; + jumbo->current_size = 0; + jumbo->skb = NULL; + } +} + +static void ipg_nic_rx_no_start_no_end(struct net_device *dev, + struct ipg_nic_private *sp, + struct ipg_rx *rxfd, unsigned entry) +{ + struct ipg_jumbo *jumbo = &sp->jumbo; + + /* 1: found error, 0 no error */ + if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) { + struct sk_buff *skb = sp->rx_buff[entry]; + + if (skb) { + if (jumbo->found_start) { + jumbo->current_size += sp->rxfrag_size; + if (jumbo->current_size <= sp->rxsupport_size) { + memcpy(skb_put(jumbo->skb, + sp->rxfrag_size), + skb->data, sp->rxfrag_size); + } + } + ipg_nic_rx_free_skb(dev); + } + } else { + dev_kfree_skb_irq(jumbo->skb); + jumbo->found_start = 0; + jumbo->current_size = 0; + jumbo->skb = NULL; + } +} + +static int ipg_nic_rx_jumbo(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + unsigned int curr = sp->rx_current; + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + + IPG_DEBUG_MSG("_nic_rx\n"); + + for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) { + unsigned int entry = curr % IPG_RFDLIST_LENGTH; + struct ipg_rx *rxfd = sp->rxd + entry; + + if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE))) + break; + + switch (ipg_nic_rx_check_frame_type(dev)) { + case FRAME_WITH_START_WITH_END: + ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry); + break; + case FRAME_WITH_START: + ipg_nic_rx_with_start(dev, sp, rxfd, entry); + break; + case FRAME_WITH_END: + ipg_nic_rx_with_end(dev, sp, rxfd, entry); + break; + case FRAME_NO_START_NO_END: + ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry); + break; + } + } + + sp->rx_current = curr; + + if (i == IPG_MAXRFDPROCESS_COUNT) { + /* There are more RFDs to process, however the + * allocated amount of RFD processing time has + * expired. Assert Interrupt Requested to make + * sure we come back to process the remaining RFDs. + */ + ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL); + } + + ipg_nic_rxrestore(dev); + + return 0; +} + +static int ipg_nic_rx(struct net_device *dev) +{ + /* Transfer received Ethernet frames to higher network layers. */ + struct ipg_nic_private *sp = netdev_priv(dev); + unsigned int curr = sp->rx_current; + void __iomem *ioaddr = sp->ioaddr; + struct ipg_rx *rxfd; + unsigned int i; + + IPG_DEBUG_MSG("_nic_rx\n"); + +#define __RFS_MASK \ + cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND) + + for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) { + unsigned int entry = curr % IPG_RFDLIST_LENGTH; + struct sk_buff *skb = sp->rx_buff[entry]; + unsigned int framelen; + + rxfd = sp->rxd + entry; + + if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb) + break; + + /* Get received frame length. */ + framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; + + /* Check for jumbo frame arrival with too small + * RXFRAG_SIZE. + */ + if (framelen > sp->rxfrag_size) { + IPG_DEBUG_MSG + ("RFS FrameLen > allocated fragment size\n"); + + framelen = sp->rxfrag_size; + } + + if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) & + (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | + IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | + IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) { + + IPG_DEBUG_MSG("Rx error, RFS = %016lx\n", + (unsigned long int) rxfd->rfs); + + /* Increment general receive error statistic. */ + sp->stats.rx_errors++; + + /* Increment detailed receive error statistics. */ + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { + IPG_DEBUG_MSG("RX FIFO overrun occurred\n"); + sp->stats.rx_fifo_errors++; + } + + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { + IPG_DEBUG_MSG("RX runt occurred\n"); + sp->stats.rx_length_errors++; + } + + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ; + /* Do nothing, error count handled by a IPG + * statistic register. + */ + + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { + IPG_DEBUG_MSG("RX alignment error occurred\n"); + sp->stats.rx_frame_errors++; + } + + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ; + /* Do nothing, error count handled by a IPG + * statistic register. + */ + + /* Free the memory associated with the RX + * buffer since it is erroneous and we will + * not pass it to higher layer processes. + */ + if (skb) { + __le64 info = rxfd->frag_info; + + pci_unmap_single(sp->pdev, + le64_to_cpu(info) & ~IPG_RFI_FRAGLEN, + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + + dev_kfree_skb_irq(skb); + } + } else { + + /* Adjust the new buffer length to accommodate the size + * of the received frame. + */ + skb_put(skb, framelen); + + /* Set the buffer's protocol field to Ethernet. */ + skb->protocol = eth_type_trans(skb, dev); + + /* The IPG encountered an error with (or + * there were no) IP/TCP/UDP checksums. + * This may or may not indicate an invalid + * IP/TCP/UDP frame was received. Let the + * upper layer decide. + */ + skb_checksum_none_assert(skb); + + /* Hand off frame for higher layer processing. + * The function netif_rx() releases the sk_buff + * when processing completes. + */ + netif_rx(skb); + } + + /* Assure RX buffer is not reused by IPG. */ + sp->rx_buff[entry] = NULL; + } + + /* + * If there are more RFDs to process and the allocated amount of RFD + * processing time has expired, assert Interrupt Requested to make + * sure we come back to process the remaining RFDs. + */ + if (i == IPG_MAXRFDPROCESS_COUNT) + ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL); + +#ifdef IPG_DEBUG + /* Check if the RFD list contained no receive frame data. */ + if (!i) + sp->EmptyRFDListCount++; +#endif + while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) && + !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) && + (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) { + unsigned int entry = curr++ % IPG_RFDLIST_LENGTH; + + rxfd = sp->rxd + entry; + + IPG_DEBUG_MSG("Frame requires multiple RFDs\n"); + + /* An unexpected event, additional code needed to handle + * properly. So for the time being, just disregard the + * frame. + */ + + /* Free the memory associated with the RX + * buffer since it is erroneous and we will + * not pass it to higher layer processes. + */ + if (sp->rx_buff[entry]) { + pci_unmap_single(sp->pdev, + le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + dev_kfree_skb_irq(sp->rx_buff[entry]); + } + + /* Assure RX buffer is not reused by IPG. */ + sp->rx_buff[entry] = NULL; + } + + sp->rx_current = curr; + + /* Check to see if there are a minimum number of used + * RFDs before restoring any (should improve performance.) + */ + if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE) + ipg_nic_rxrestore(dev); + + return 0; +} + +static void ipg_reset_after_host_error(struct work_struct *work) +{ + struct ipg_nic_private *sp = + container_of(work, struct ipg_nic_private, task.work); + struct net_device *dev = sp->dev; + + /* + * Acknowledge HostError interrupt by resetting + * IPG DMA and HOST. + */ + ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA); + + init_rfdlist(dev); + init_tfdlist(dev); + + if (ipg_io_config(dev) < 0) { + netdev_info(dev, "Cannot recover from PCI error\n"); + schedule_delayed_work(&sp->task, HZ); + } +} + +static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst) +{ + struct net_device *dev = dev_inst; + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int handled = 0; + u16 status; + + IPG_DEBUG_MSG("_interrupt_handler\n"); + + if (sp->is_jumbo) + ipg_nic_rxrestore(dev); + + spin_lock(&sp->lock); + + /* Get interrupt source information, and acknowledge + * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly, + * IntRequested, MacControlFrame, LinkEvent) interrupts + * if issued. Also, all IPG interrupts are disabled by + * reading IntStatusAck. + */ + status = ipg_r16(INT_STATUS_ACK); + + IPG_DEBUG_MSG("IntStatusAck = %04x\n", status); + + /* Shared IRQ of remove event. */ + if (!(status & IPG_IS_RSVD_MASK)) + goto out_enable; + + handled = 1; + + if (unlikely(!netif_running(dev))) + goto out_unlock; + + /* If RFDListEnd interrupt, restore all used RFDs. */ + if (status & IPG_IS_RFD_LIST_END) { + IPG_DEBUG_MSG("RFDListEnd Interrupt\n"); + + /* The RFD list end indicates an RFD was encountered + * with a 0 NextPtr, or with an RFDDone bit set to 1 + * (indicating the RFD is not read for use by the + * IPG.) Try to restore all RFDs. + */ + ipg_nic_rxrestore(dev); + +#ifdef IPG_DEBUG + /* Increment the RFDlistendCount counter. */ + sp->RFDlistendCount++; +#endif + } + + /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or + * IntRequested interrupt, process received frames. */ + if ((status & IPG_IS_RX_DMA_PRIORITY) || + (status & IPG_IS_RFD_LIST_END) || + (status & IPG_IS_RX_DMA_COMPLETE) || + (status & IPG_IS_INT_REQUESTED)) { +#ifdef IPG_DEBUG + /* Increment the RFD list checked counter if interrupted + * only to check the RFD list. */ + if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END | + IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) & + (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE | + IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE | + IPG_IS_UPDATE_STATS))) + sp->RFDListCheckedCount++; +#endif + + if (sp->is_jumbo) + ipg_nic_rx_jumbo(dev); + else + ipg_nic_rx(dev); + } + + /* If TxDMAComplete interrupt, free used TFDs. */ + if (status & IPG_IS_TX_DMA_COMPLETE) + ipg_nic_txfree(dev); + + /* TxComplete interrupts indicate one of numerous actions. + * Determine what action to take based on TXSTATUS register. + */ + if (status & IPG_IS_TX_COMPLETE) + ipg_nic_txcleanup(dev); + + /* If UpdateStats interrupt, update Linux Ethernet statistics */ + if (status & IPG_IS_UPDATE_STATS) + ipg_nic_get_stats(dev); + + /* If HostError interrupt, reset IPG. */ + if (status & IPG_IS_HOST_ERROR) { + IPG_DDEBUG_MSG("HostError Interrupt\n"); + + schedule_delayed_work(&sp->task, 0); + } + + /* If LinkEvent interrupt, resolve autonegotiation. */ + if (status & IPG_IS_LINK_EVENT) { + if (ipg_config_autoneg(dev) < 0) + netdev_info(dev, "Auto-negotiation error\n"); + } + + /* If MACCtrlFrame interrupt, do nothing. */ + if (status & IPG_IS_MAC_CTRL_FRAME) + IPG_DEBUG_MSG("MACCtrlFrame interrupt\n"); + + /* If RxComplete interrupt, do nothing. */ + if (status & IPG_IS_RX_COMPLETE) + IPG_DEBUG_MSG("RxComplete interrupt\n"); + + /* If RxEarly interrupt, do nothing. */ + if (status & IPG_IS_RX_EARLY) + IPG_DEBUG_MSG("RxEarly interrupt\n"); + +out_enable: + /* Re-enable IPG interrupts. */ + ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE | + IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE | + IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE); +out_unlock: + spin_unlock(&sp->lock); + + return IRQ_RETVAL(handled); +} + +static void ipg_rx_clear(struct ipg_nic_private *sp) +{ + unsigned int i; + + for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { + if (sp->rx_buff[i]) { + struct ipg_rx *rxfd = sp->rxd + i; + + dev_kfree_skb_irq(sp->rx_buff[i]); + sp->rx_buff[i] = NULL; + pci_unmap_single(sp->pdev, + le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + } + } +} + +static void ipg_tx_clear(struct ipg_nic_private *sp) +{ + unsigned int i; + + for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { + if (sp->tx_buff[i]) { + struct ipg_tx *txfd = sp->txd + i; + + pci_unmap_single(sp->pdev, + le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN, + sp->tx_buff[i]->len, PCI_DMA_TODEVICE); + + dev_kfree_skb_irq(sp->tx_buff[i]); + + sp->tx_buff[i] = NULL; + } + } +} + +static int ipg_nic_open(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + struct pci_dev *pdev = sp->pdev; + int rc; + + IPG_DEBUG_MSG("_nic_open\n"); + + sp->rx_buf_sz = sp->rxsupport_size; + + /* Check for interrupt line conflicts, and request interrupt + * line for IPG. + * + * IMPORTANT: Disable IPG interrupts prior to registering + * IRQ. + */ + ipg_w16(0x0000, INT_ENABLE); + + /* Register the interrupt line to be used by the IPG within + * the Linux system. + */ + rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED, + dev->name, dev); + if (rc < 0) { + netdev_info(dev, "Error when requesting interrupt\n"); + goto out; + } + + dev->irq = pdev->irq; + + rc = -ENOMEM; + + sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES, + &sp->rxd_map, GFP_KERNEL); + if (!sp->rxd) + goto err_free_irq_0; + + sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES, + &sp->txd_map, GFP_KERNEL); + if (!sp->txd) + goto err_free_rx_1; + + rc = init_rfdlist(dev); + if (rc < 0) { + netdev_info(dev, "Error during configuration\n"); + goto err_free_tx_2; + } + + init_tfdlist(dev); + + rc = ipg_io_config(dev); + if (rc < 0) { + netdev_info(dev, "Error during configuration\n"); + goto err_release_tfdlist_3; + } + + /* Resolve autonegotiation. */ + if (ipg_config_autoneg(dev) < 0) + netdev_info(dev, "Auto-negotiation error\n"); + + /* initialize JUMBO Frame control variable */ + sp->jumbo.found_start = 0; + sp->jumbo.current_size = 0; + sp->jumbo.skb = NULL; + + /* Enable transmit and receive operation of the IPG. */ + ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) & + IPG_MC_RSVD_MASK, MAC_CTRL); + + netif_start_queue(dev); +out: + return rc; + +err_release_tfdlist_3: + ipg_tx_clear(sp); + ipg_rx_clear(sp); +err_free_tx_2: + dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map); +err_free_rx_1: + dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map); +err_free_irq_0: + free_irq(pdev->irq, dev); + goto out; +} + +static int ipg_nic_stop(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + struct pci_dev *pdev = sp->pdev; + + IPG_DEBUG_MSG("_nic_stop\n"); + + netif_stop_queue(dev); + + IPG_DUMPTFDLIST(dev); + + do { + (void) ipg_r16(INT_STATUS_ACK); + + ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA); + + synchronize_irq(pdev->irq); + } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK); + + ipg_rx_clear(sp); + + ipg_tx_clear(sp); + + pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map); + pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map); + + free_irq(pdev->irq, dev); + + return 0; +} + +static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH; + unsigned long flags; + struct ipg_tx *txfd; + + IPG_DDEBUG_MSG("_nic_hard_start_xmit\n"); + + /* If in 10Mbps mode, stop the transmit queue so + * no more transmit frames are accepted. + */ + if (sp->tenmbpsmode) + netif_stop_queue(dev); + + if (sp->reset_current_tfd) { + sp->reset_current_tfd = 0; + entry = 0; + } + + txfd = sp->txd + entry; + + sp->tx_buff[entry] = skb; + + /* Clear all TFC fields, except TFDDONE. */ + txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE); + + /* Specify the TFC field within the TFD. */ + txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED | + (IPG_TFC_FRAMEID & sp->tx_current) | + (IPG_TFC_FRAGCOUNT & (1 << 24))); + /* + * 16--17 (WordAlign) <- 3 (disable), + * 0--15 (FrameId) <- sp->tx_current, + * 24--27 (FragCount) <- 1 + */ + + /* Request TxComplete interrupts at an interval defined + * by the constant IPG_FRAMESBETWEENTXCOMPLETES. + * Request TxComplete interrupt for every frame + * if in 10Mbps mode to accommodate problem with 10Mbps + * processing. + */ + if (sp->tenmbpsmode) + txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE); + txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE); + /* Based on compilation option, determine if FCS is to be + * appended to transmit frame by IPG. + */ + if (!(IPG_APPEND_FCS_ON_TX)) + txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE); + + /* Based on compilation option, determine if IP, TCP and/or + * UDP checksums are to be added to transmit frame by IPG. + */ + if (IPG_ADD_IPCHECKSUM_ON_TX) + txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE); + + if (IPG_ADD_TCPCHECKSUM_ON_TX) + txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE); + + if (IPG_ADD_UDPCHECKSUM_ON_TX) + txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE); + + /* Based on compilation option, determine if VLAN tag info is to be + * inserted into transmit frame by IPG. + */ + if (IPG_INSERT_MANUAL_VLAN_TAG) { + txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT | + ((u64) IPG_MANUAL_VLAN_VID << 32) | + ((u64) IPG_MANUAL_VLAN_CFI << 44) | + ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45)); + } + + /* The fragment start location within system memory is defined + * by the sk_buff structure's data field. The physical address + * of this location within the system's virtual memory space + * is determined using the IPG_HOST2BUS_MAP function. + */ + txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data, + skb->len, PCI_DMA_TODEVICE)); + + /* The length of the fragment within system memory is defined by + * the sk_buff structure's len field. + */ + txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN & + ((u64) (skb->len & 0xffff) << 48)); + + /* Clear the TFDDone bit last to indicate the TFD is ready + * for transfer to the IPG. + */ + txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE); + + spin_lock_irqsave(&sp->lock, flags); + + sp->tx_current++; + + mmiowb(); + + ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL); + + if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH)) + netif_stop_queue(dev); + + spin_unlock_irqrestore(&sp->lock, flags); + + return NETDEV_TX_OK; +} + +static void ipg_set_phy_default_param(unsigned char rev, + struct net_device *dev, int phy_address) +{ + unsigned short length; + unsigned char revision; + const unsigned short *phy_param; + unsigned short address, value; + + phy_param = &DefaultPhyParam[0]; + length = *phy_param & 0x00FF; + revision = (unsigned char)((*phy_param) >> 8); + phy_param++; + while (length != 0) { + if (rev == revision) { + while (length > 1) { + address = *phy_param; + value = *(phy_param + 1); + phy_param += 2; + mdio_write(dev, phy_address, address, value); + length -= 4; + } + break; + } else { + phy_param += length / 2; + length = *phy_param & 0x00FF; + revision = (unsigned char)((*phy_param) >> 8); + phy_param++; + } + } +} + +static int read_eeprom(struct net_device *dev, int eep_addr) +{ + void __iomem *ioaddr = ipg_ioaddr(dev); + unsigned int i; + int ret = 0; + u16 value; + + value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff); + ipg_w16(value, EEPROM_CTRL); + + for (i = 0; i < 1000; i++) { + u16 data; + + mdelay(10); + data = ipg_r16(EEPROM_CTRL); + if (!(data & IPG_EC_EEPROM_BUSY)) { + ret = ipg_r16(EEPROM_DATA); + break; + } + } + return ret; +} + +static void ipg_init_mii(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + struct mii_if_info *mii_if = &sp->mii_if; + int phyaddr; + + mii_if->dev = dev; + mii_if->mdio_read = mdio_read; + mii_if->mdio_write = mdio_write; + mii_if->phy_id_mask = 0x1f; + mii_if->reg_num_mask = 0x1f; + + mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev); + + if (phyaddr != 0x1f) { + u16 mii_phyctrl, mii_1000cr; + + mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000); + mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF | + GMII_PHY_1000BASETCONTROL_PreferMaster; + mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr); + + mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR); + + /* Set default phyparam */ + ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr); + + /* Reset PHY */ + mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART; + mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl); + + } +} + +static int ipg_hw_init(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + int rc; + + /* Read/Write and Reset EEPROM Value */ + /* Read LED Mode Configuration from EEPROM */ + sp->led_mode = read_eeprom(dev, 6); + + /* Reset all functions within the IPG. Do not assert + * RST_OUT as not compatible with some PHYs. + */ + rc = ipg_reset(dev, IPG_RESET_MASK); + if (rc < 0) + goto out; + + ipg_init_mii(dev); + + /* Read MAC Address from EEPROM */ + for (i = 0; i < 3; i++) + sp->station_addr[i] = read_eeprom(dev, 16 + i); + + for (i = 0; i < 3; i++) + ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i); + + /* Set station address in ethernet_device structure. */ + dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff; + dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8; + dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff; + dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8; + dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff; + dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8; +out: + return rc; +} + +static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + int rc; + + mutex_lock(&sp->mii_mutex); + rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL); + mutex_unlock(&sp->mii_mutex); + + return rc; +} + +static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + int err; + + /* Function to accommodate changes to Maximum Transfer Unit + * (or MTU) of IPG NIC. Cannot use default function since + * the default will not allow for MTU > 1500 bytes. + */ + + IPG_DEBUG_MSG("_nic_change_mtu\n"); + + /* + * Check that the new MTU value is between 68 (14 byte header, 46 byte + * payload, 4 byte FCS) and 10 KB, which is the largest supported MTU. + */ + if (new_mtu < 68 || new_mtu > 10240) + return -EINVAL; + + err = ipg_nic_stop(dev); + if (err) + return err; + + dev->mtu = new_mtu; + + sp->max_rxframe_size = new_mtu; + + sp->rxfrag_size = new_mtu; + if (sp->rxfrag_size > 4088) + sp->rxfrag_size = 4088; + + sp->rxsupport_size = sp->max_rxframe_size; + + if (new_mtu > 0x0600) + sp->is_jumbo = true; + else + sp->is_jumbo = false; + + return ipg_nic_open(dev); +} + +static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + int rc; + + mutex_lock(&sp->mii_mutex); + rc = mii_ethtool_gset(&sp->mii_if, cmd); + mutex_unlock(&sp->mii_mutex); + + return rc; +} + +static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + int rc; + + mutex_lock(&sp->mii_mutex); + rc = mii_ethtool_sset(&sp->mii_if, cmd); + mutex_unlock(&sp->mii_mutex); + + return rc; +} + +static int ipg_nway_reset(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + int rc; + + mutex_lock(&sp->mii_mutex); + rc = mii_nway_restart(&sp->mii_if); + mutex_unlock(&sp->mii_mutex); + + return rc; +} + +static const struct ethtool_ops ipg_ethtool_ops = { + .get_settings = ipg_get_settings, + .set_settings = ipg_set_settings, + .nway_reset = ipg_nway_reset, +}; + +static void ipg_remove(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct ipg_nic_private *sp = netdev_priv(dev); + + IPG_DEBUG_MSG("_remove\n"); + + /* Un-register Ethernet device. */ + unregister_netdev(dev); + + pci_iounmap(pdev, sp->ioaddr); + + pci_release_regions(pdev); + + free_netdev(dev); + pci_disable_device(pdev); +} + +static const struct net_device_ops ipg_netdev_ops = { + .ndo_open = ipg_nic_open, + .ndo_stop = ipg_nic_stop, + .ndo_start_xmit = ipg_nic_hard_start_xmit, + .ndo_get_stats = ipg_nic_get_stats, + .ndo_set_rx_mode = ipg_nic_set_multicast_list, + .ndo_do_ioctl = ipg_ioctl, + .ndo_tx_timeout = ipg_tx_timeout, + .ndo_change_mtu = ipg_nic_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int ipg_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + unsigned int i = id->driver_data; + struct ipg_nic_private *sp; + struct net_device *dev; + void __iomem *ioaddr; + int rc; + + rc = pci_enable_device(pdev); + if (rc < 0) + goto out; + + pr_info("%s: %s\n", pci_name(pdev), ipg_brand_name[i]); + + pci_set_master(pdev); + + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); + if (rc < 0) { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc < 0) { + pr_err("%s: DMA config failed\n", pci_name(pdev)); + goto err_disable_0; + } + } + + /* + * Initialize net device. + */ + dev = alloc_etherdev(sizeof(struct ipg_nic_private)); + if (!dev) { + rc = -ENOMEM; + goto err_disable_0; + } + + sp = netdev_priv(dev); + spin_lock_init(&sp->lock); + mutex_init(&sp->mii_mutex); + + sp->is_jumbo = IPG_IS_JUMBO; + sp->rxfrag_size = IPG_RXFRAG_SIZE; + sp->rxsupport_size = IPG_RXSUPPORT_SIZE; + sp->max_rxframe_size = IPG_MAX_RXFRAME_SIZE; + + /* Declare IPG NIC functions for Ethernet device methods. + */ + dev->netdev_ops = &ipg_netdev_ops; + SET_NETDEV_DEV(dev, &pdev->dev); + dev->ethtool_ops = &ipg_ethtool_ops; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_free_dev_1; + + ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1)); + if (!ioaddr) { + pr_err("%s: cannot map MMIO\n", pci_name(pdev)); + rc = -EIO; + goto err_release_regions_2; + } + + /* Save the pointer to the PCI device information. */ + sp->ioaddr = ioaddr; + sp->pdev = pdev; + sp->dev = dev; + + INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error); + + pci_set_drvdata(pdev, dev); + + rc = ipg_hw_init(dev); + if (rc < 0) + goto err_unmap_3; + + rc = register_netdev(dev); + if (rc < 0) + goto err_unmap_3; + + netdev_info(dev, "Ethernet device registered\n"); +out: + return rc; + +err_unmap_3: + pci_iounmap(pdev, ioaddr); +err_release_regions_2: + pci_release_regions(pdev); +err_free_dev_1: + free_netdev(dev); +err_disable_0: + pci_disable_device(pdev); + goto out; +} + +static struct pci_driver ipg_pci_driver = { + .name = IPG_DRIVER_NAME, + .id_table = ipg_pci_tbl, + .probe = ipg_probe, + .remove = ipg_remove, +}; + +module_pci_driver(ipg_pci_driver); diff --git a/drivers/net/ethernet/icplus/ipg.h b/drivers/net/ethernet/icplus/ipg.h new file mode 100644 index 000000000..a21e4f570 --- /dev/null +++ b/drivers/net/ethernet/icplus/ipg.h @@ -0,0 +1,748 @@ +/* + * Include file for Gigabit Ethernet device driver for Network + * Interface Cards (NICs) utilizing the Tamarack Microelectronics + * Inc. IPG Gigabit or Triple Speed Ethernet Media Access + * Controller. + */ +#ifndef __LINUX_IPG_H +#define __LINUX_IPG_H + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Constants + */ + +/* GMII based PHY IDs */ +#define NS 0x2000 +#define MARVELL 0x0141 +#define ICPLUS_PHY 0x243 + +/* NIC Physical Layer Device MII register fields. */ +#define MII_PHY_SELECTOR_IEEE8023 0x0001 +#define MII_PHY_TECHABILITYFIELD 0x1FE0 + +/* GMII_PHY_1000 need to set to prefer master */ +#define GMII_PHY_1000BASETCONTROL_PreferMaster 0x0400 + +/* NIC Physical Layer Device GMII constants. */ +#define GMII_PREAMBLE 0xFFFFFFFF +#define GMII_ST 0x1 +#define GMII_READ 0x2 +#define GMII_WRITE 0x1 +#define GMII_TA_READ_MASK 0x1 +#define GMII_TA_WRITE 0x2 + +/* I/O register offsets. */ +enum ipg_regs { + DMA_CTRL = 0x00, + RX_DMA_STATUS = 0x08, /* Unused + reserved */ + TFD_LIST_PTR_0 = 0x10, + TFD_LIST_PTR_1 = 0x14, + TX_DMA_BURST_THRESH = 0x18, + TX_DMA_URGENT_THRESH = 0x19, + TX_DMA_POLL_PERIOD = 0x1a, + RFD_LIST_PTR_0 = 0x1c, + RFD_LIST_PTR_1 = 0x20, + RX_DMA_BURST_THRESH = 0x24, + RX_DMA_URGENT_THRESH = 0x25, + RX_DMA_POLL_PERIOD = 0x26, + DEBUG_CTRL = 0x2c, + ASIC_CTRL = 0x30, + FIFO_CTRL = 0x38, /* Unused */ + FLOW_OFF_THRESH = 0x3c, + FLOW_ON_THRESH = 0x3e, + EEPROM_DATA = 0x48, + EEPROM_CTRL = 0x4a, + EXPROM_ADDR = 0x4c, /* Unused */ + EXPROM_DATA = 0x50, /* Unused */ + WAKE_EVENT = 0x51, /* Unused */ + COUNTDOWN = 0x54, /* Unused */ + INT_STATUS_ACK = 0x5a, + INT_ENABLE = 0x5c, + INT_STATUS = 0x5e, /* Unused */ + TX_STATUS = 0x60, + MAC_CTRL = 0x6c, + VLAN_TAG = 0x70, /* Unused */ + PHY_SET = 0x75, + PHY_CTRL = 0x76, + STATION_ADDRESS_0 = 0x78, + STATION_ADDRESS_1 = 0x7a, + STATION_ADDRESS_2 = 0x7c, + MAX_FRAME_SIZE = 0x86, + RECEIVE_MODE = 0x88, + HASHTABLE_0 = 0x8c, + HASHTABLE_1 = 0x90, + RMON_STATISTICS_MASK = 0x98, + STATISTICS_MASK = 0x9c, + RX_JUMBO_FRAMES = 0xbc, /* Unused */ + TCP_CHECKSUM_ERRORS = 0xc0, /* Unused */ + IP_CHECKSUM_ERRORS = 0xc2, /* Unused */ + UDP_CHECKSUM_ERRORS = 0xc4, /* Unused */ + TX_JUMBO_FRAMES = 0xf4 /* Unused */ +}; + +/* Ethernet MIB statistic register offsets. */ +#define IPG_OCTETRCVOK 0xA8 +#define IPG_MCSTOCTETRCVDOK 0xAC +#define IPG_BCSTOCTETRCVOK 0xB0 +#define IPG_FRAMESRCVDOK 0xB4 +#define IPG_MCSTFRAMESRCVDOK 0xB8 +#define IPG_BCSTFRAMESRCVDOK 0xBE +#define IPG_MACCONTROLFRAMESRCVD 0xC6 +#define IPG_FRAMETOOLONGERRRORS 0xC8 +#define IPG_INRANGELENGTHERRORS 0xCA +#define IPG_FRAMECHECKSEQERRORS 0xCC +#define IPG_FRAMESLOSTRXERRORS 0xCE +#define IPG_OCTETXMTOK 0xD0 +#define IPG_MCSTOCTETXMTOK 0xD4 +#define IPG_BCSTOCTETXMTOK 0xD8 +#define IPG_FRAMESXMTDOK 0xDC +#define IPG_MCSTFRAMESXMTDOK 0xE0 +#define IPG_FRAMESWDEFERREDXMT 0xE4 +#define IPG_LATECOLLISIONS 0xE8 +#define IPG_MULTICOLFRAMES 0xEC +#define IPG_SINGLECOLFRAMES 0xF0 +#define IPG_BCSTFRAMESXMTDOK 0xF6 +#define IPG_CARRIERSENSEERRORS 0xF8 +#define IPG_MACCONTROLFRAMESXMTDOK 0xFA +#define IPG_FRAMESABORTXSCOLLS 0xFC +#define IPG_FRAMESWEXDEFERRAL 0xFE + +/* RMON statistic register offsets. */ +#define IPG_ETHERSTATSCOLLISIONS 0x100 +#define IPG_ETHERSTATSOCTETSTRANSMIT 0x104 +#define IPG_ETHERSTATSPKTSTRANSMIT 0x108 +#define IPG_ETHERSTATSPKTS64OCTESTSTRANSMIT 0x10C +#define IPG_ETHERSTATSPKTS65TO127OCTESTSTRANSMIT 0x110 +#define IPG_ETHERSTATSPKTS128TO255OCTESTSTRANSMIT 0x114 +#define IPG_ETHERSTATSPKTS256TO511OCTESTSTRANSMIT 0x118 +#define IPG_ETHERSTATSPKTS512TO1023OCTESTSTRANSMIT 0x11C +#define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120 +#define IPG_ETHERSTATSCRCALIGNERRORS 0x124 +#define IPG_ETHERSTATSUNDERSIZEPKTS 0x128 +#define IPG_ETHERSTATSFRAGMENTS 0x12C +#define IPG_ETHERSTATSJABBERS 0x130 +#define IPG_ETHERSTATSOCTETS 0x134 +#define IPG_ETHERSTATSPKTS 0x138 +#define IPG_ETHERSTATSPKTS64OCTESTS 0x13C +#define IPG_ETHERSTATSPKTS65TO127OCTESTS 0x140 +#define IPG_ETHERSTATSPKTS128TO255OCTESTS 0x144 +#define IPG_ETHERSTATSPKTS256TO511OCTESTS 0x148 +#define IPG_ETHERSTATSPKTS512TO1023OCTESTS 0x14C +#define IPG_ETHERSTATSPKTS1024TO1518OCTESTS 0x150 + +/* RMON statistic register equivalents. */ +#define IPG_ETHERSTATSMULTICASTPKTSTRANSMIT 0xE0 +#define IPG_ETHERSTATSBROADCASTPKTSTRANSMIT 0xF6 +#define IPG_ETHERSTATSMULTICASTPKTS 0xB8 +#define IPG_ETHERSTATSBROADCASTPKTS 0xBE +#define IPG_ETHERSTATSOVERSIZEPKTS 0xC8 +#define IPG_ETHERSTATSDROPEVENTS 0xCE + +/* Serial EEPROM offsets */ +#define IPG_EEPROM_CONFIGPARAM 0x00 +#define IPG_EEPROM_ASICCTRL 0x01 +#define IPG_EEPROM_SUBSYSTEMVENDORID 0x02 +#define IPG_EEPROM_SUBSYSTEMID 0x03 +#define IPG_EEPROM_STATIONADDRESS0 0x10 +#define IPG_EEPROM_STATIONADDRESS1 0x11 +#define IPG_EEPROM_STATIONADDRESS2 0x12 + +/* Register & data structure bit masks */ + +/* PCI register masks. */ + +/* IOBaseAddress */ +#define IPG_PIB_RSVD_MASK 0xFFFFFE01 +#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00 +#define IPG_PIB_IOBASEADDRIND 0x00000001 + +/* MemBaseAddress */ +#define IPG_PMB_RSVD_MASK 0xFFFFFE07 +#define IPG_PMB_MEMBASEADDRIND 0x00000001 +#define IPG_PMB_MEMMAPTYPE 0x00000006 +#define IPG_PMB_MEMMAPTYPE0 0x00000002 +#define IPG_PMB_MEMMAPTYPE1 0x00000004 +#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00 + +/* ConfigStatus */ +#define IPG_CS_RSVD_MASK 0xFFB0 +#define IPG_CS_CAPABILITIES 0x0010 +#define IPG_CS_66MHZCAPABLE 0x0020 +#define IPG_CS_FASTBACK2BACK 0x0080 +#define IPG_CS_DATAPARITYREPORTED 0x0100 +#define IPG_CS_DEVSELTIMING 0x0600 +#define IPG_CS_SIGNALEDTARGETABORT 0x0800 +#define IPG_CS_RECEIVEDTARGETABORT 0x1000 +#define IPG_CS_RECEIVEDMASTERABORT 0x2000 +#define IPG_CS_SIGNALEDSYSTEMERROR 0x4000 +#define IPG_CS_DETECTEDPARITYERROR 0x8000 + +/* TFD data structure masks. */ + +/* TFDList, TFC */ +#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFFULL +#define IPG_TFC_FRAMEID 0x000000000000FFFFULL +#define IPG_TFC_WORDALIGN 0x0000000000030000ULL +#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000ULL +#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000ULL +#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000ULL +#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000ULL +#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000ULL +#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000ULL +#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000ULL +#define IPG_TFC_TXINDICATE 0x0000000000400000ULL +#define IPG_TFC_TXDMAINDICATE 0x0000000000800000ULL +#define IPG_TFC_FRAGCOUNT 0x000000000F000000ULL +#define IPG_TFC_VLANTAGINSERT 0x0000000010000000ULL +#define IPG_TFC_TFDDONE 0x0000000080000000ULL +#define IPG_TFC_VID 0x00000FFF00000000ULL +#define IPG_TFC_CFI 0x0000100000000000ULL +#define IPG_TFC_USERPRIORITY 0x0000E00000000000ULL + +/* TFDList, FragInfo */ +#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL +#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFFULL +#define IPG_TFI_FRAGLEN 0xFFFF000000000000ULL + +/* RFD data structure masks. */ + +/* RFDList, RFS */ +#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFFULL +#define IPG_RFS_RXFRAMELEN 0x000000000000FFFFULL +#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000ULL +#define IPG_RFS_RXRUNTFRAME 0x0000000000020000ULL +#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000ULL +#define IPG_RFS_RXFCSERROR 0x0000000000080000ULL +#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000ULL +#define IPG_RFS_RXLENGTHERROR 0x0000000000200000ULL +#define IPG_RFS_VLANDETECTED 0x0000000000400000ULL +#define IPG_RFS_TCPDETECTED 0x0000000000800000ULL +#define IPG_RFS_TCPERROR 0x0000000001000000ULL +#define IPG_RFS_UDPDETECTED 0x0000000002000000ULL +#define IPG_RFS_UDPERROR 0x0000000004000000ULL +#define IPG_RFS_IPDETECTED 0x0000000008000000ULL +#define IPG_RFS_IPERROR 0x0000000010000000ULL +#define IPG_RFS_FRAMESTART 0x0000000020000000ULL +#define IPG_RFS_FRAMEEND 0x0000000040000000ULL +#define IPG_RFS_RFDDONE 0x0000000080000000ULL +#define IPG_RFS_TCI 0x0000FFFF00000000ULL + +/* RFDList, FragInfo */ +#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL +#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFFULL +#define IPG_RFI_FRAGLEN 0xFFFF000000000000ULL + +/* I/O Register masks. */ + +/* RMON Statistics Mask */ +#define IPG_RZ_ALL 0x0FFFFFFF + +/* Statistics Mask */ +#define IPG_SM_ALL 0x0FFFFFFF +#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001 +#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002 +#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004 +#define IPG_SM_RXJUMBOFRAMES 0x00000008 +#define IPG_SM_TCPCHECKSUMERRORS 0x00000010 +#define IPG_SM_IPCHECKSUMERRORS 0x00000020 +#define IPG_SM_UDPCHECKSUMERRORS 0x00000040 +#define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080 +#define IPG_SM_FRAMESTOOLONGERRORS 0x00000100 +#define IPG_SM_INRANGELENGTHERRORS 0x00000200 +#define IPG_SM_FRAMECHECKSEQERRORS 0x00000400 +#define IPG_SM_FRAMESLOSTRXERRORS 0x00000800 +#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000 +#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000 +#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000 +#define IPG_SM_FRAMESWDEFERREDXMT 0x00008000 +#define IPG_SM_LATECOLLISIONS 0x00010000 +#define IPG_SM_MULTICOLFRAMES 0x00020000 +#define IPG_SM_SINGLECOLFRAMES 0x00040000 +#define IPG_SM_TXJUMBOFRAMES 0x00080000 +#define IPG_SM_CARRIERSENSEERRORS 0x00100000 +#define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000 +#define IPG_SM_FRAMESABORTXSCOLLS 0x00400000 +#define IPG_SM_FRAMESWEXDEFERAL 0x00800000 + +/* Countdown */ +#define IPG_CD_RSVD_MASK 0x0700FFFF +#define IPG_CD_COUNT 0x0000FFFF +#define IPG_CD_COUNTDOWNSPEED 0x01000000 +#define IPG_CD_COUNTDOWNMODE 0x02000000 +#define IPG_CD_COUNTINTENABLED 0x04000000 + +/* TxDMABurstThresh */ +#define IPG_TB_RSVD_MASK 0xFF + +/* TxDMAUrgentThresh */ +#define IPG_TU_RSVD_MASK 0xFF + +/* TxDMAPollPeriod */ +#define IPG_TP_RSVD_MASK 0xFF + +/* RxDMAUrgentThresh */ +#define IPG_RU_RSVD_MASK 0xFF + +/* RxDMAPollPeriod */ +#define IPG_RP_RSVD_MASK 0xFF + +/* ReceiveMode */ +#define IPG_RM_RSVD_MASK 0x3F +#define IPG_RM_RECEIVEUNICAST 0x01 +#define IPG_RM_RECEIVEMULTICAST 0x02 +#define IPG_RM_RECEIVEBROADCAST 0x04 +#define IPG_RM_RECEIVEALLFRAMES 0x08 +#define IPG_RM_RECEIVEMULTICASTHASH 0x10 +#define IPG_RM_RECEIVEIPMULTICAST 0x20 + +/* PhySet */ +#define IPG_PS_MEM_LENB9B 0x01 +#define IPG_PS_MEM_LEN9 0x02 +#define IPG_PS_NON_COMPDET 0x04 + +/* PhyCtrl */ +#define IPG_PC_RSVD_MASK 0xFF +#define IPG_PC_MGMTCLK_LO 0x00 +#define IPG_PC_MGMTCLK_HI 0x01 +#define IPG_PC_MGMTCLK 0x01 +#define IPG_PC_MGMTDATA 0x02 +#define IPG_PC_MGMTDIR 0x04 +#define IPG_PC_DUPLEX_POLARITY 0x08 +#define IPG_PC_DUPLEX_STATUS 0x10 +#define IPG_PC_LINK_POLARITY 0x20 +#define IPG_PC_LINK_SPEED 0xC0 +#define IPG_PC_LINK_SPEED_10MBPS 0x40 +#define IPG_PC_LINK_SPEED_100MBPS 0x80 +#define IPG_PC_LINK_SPEED_1000MBPS 0xC0 + +/* DMACtrl */ +#define IPG_DC_RSVD_MASK 0xC07D9818 +#define IPG_DC_RX_DMA_COMPLETE 0x00000008 +#define IPG_DC_RX_DMA_POLL_NOW 0x00000010 +#define IPG_DC_TX_DMA_COMPLETE 0x00000800 +#define IPG_DC_TX_DMA_POLL_NOW 0x00001000 +#define IPG_DC_TX_DMA_IN_PROG 0x00008000 +#define IPG_DC_RX_EARLY_DISABLE 0x00010000 +#define IPG_DC_MWI_DISABLE 0x00040000 +#define IPG_DC_TX_WRITE_BACK_DISABLE 0x00080000 +#define IPG_DC_TX_BURST_LIMIT 0x00700000 +#define IPG_DC_TARGET_ABORT 0x40000000 +#define IPG_DC_MASTER_ABORT 0x80000000 + +/* ASICCtrl */ +#define IPG_AC_RSVD_MASK 0x07FFEFF2 +#define IPG_AC_EXP_ROM_SIZE 0x00000002 +#define IPG_AC_PHY_SPEED10 0x00000010 +#define IPG_AC_PHY_SPEED100 0x00000020 +#define IPG_AC_PHY_SPEED1000 0x00000040 +#define IPG_AC_PHY_MEDIA 0x00000080 +#define IPG_AC_FORCED_CFG 0x00000700 +#define IPG_AC_D3RESETDISABLE 0x00000800 +#define IPG_AC_SPEED_UP_MODE 0x00002000 +#define IPG_AC_LED_MODE 0x00004000 +#define IPG_AC_RST_OUT_POLARITY 0x00008000 +#define IPG_AC_GLOBAL_RESET 0x00010000 +#define IPG_AC_RX_RESET 0x00020000 +#define IPG_AC_TX_RESET 0x00040000 +#define IPG_AC_DMA 0x00080000 +#define IPG_AC_FIFO 0x00100000 +#define IPG_AC_NETWORK 0x00200000 +#define IPG_AC_HOST 0x00400000 +#define IPG_AC_AUTO_INIT 0x00800000 +#define IPG_AC_RST_OUT 0x01000000 +#define IPG_AC_INT_REQUEST 0x02000000 +#define IPG_AC_RESET_BUSY 0x04000000 +#define IPG_AC_LED_SPEED 0x08000000 +#define IPG_AC_LED_MODE_BIT_1 0x20000000 + +/* EepromCtrl */ +#define IPG_EC_RSVD_MASK 0x83FF +#define IPG_EC_EEPROM_ADDR 0x00FF +#define IPG_EC_EEPROM_OPCODE 0x0300 +#define IPG_EC_EEPROM_SUBCOMMAD 0x0000 +#define IPG_EC_EEPROM_WRITEOPCODE 0x0100 +#define IPG_EC_EEPROM_READOPCODE 0x0200 +#define IPG_EC_EEPROM_ERASEOPCODE 0x0300 +#define IPG_EC_EEPROM_BUSY 0x8000 + +/* FIFOCtrl */ +#define IPG_FC_RSVD_MASK 0xC001 +#define IPG_FC_RAM_TEST_MODE 0x0001 +#define IPG_FC_TRANSMITTING 0x4000 +#define IPG_FC_RECEIVING 0x8000 + +/* TxStatus */ +#define IPG_TS_RSVD_MASK 0xFFFF00DD +#define IPG_TS_TX_ERROR 0x00000001 +#define IPG_TS_LATE_COLLISION 0x00000004 +#define IPG_TS_TX_MAX_COLL 0x00000008 +#define IPG_TS_TX_UNDERRUN 0x00000010 +#define IPG_TS_TX_IND_REQD 0x00000040 +#define IPG_TS_TX_COMPLETE 0x00000080 +#define IPG_TS_TX_FRAMEID 0xFFFF0000 + +/* WakeEvent */ +#define IPG_WE_WAKE_PKT_ENABLE 0x01 +#define IPG_WE_MAGIC_PKT_ENABLE 0x02 +#define IPG_WE_LINK_EVT_ENABLE 0x04 +#define IPG_WE_WAKE_POLARITY 0x08 +#define IPG_WE_WAKE_PKT_EVT 0x10 +#define IPG_WE_MAGIC_PKT_EVT 0x20 +#define IPG_WE_LINK_EVT 0x40 +#define IPG_WE_WOL_ENABLE 0x80 + +/* IntEnable */ +#define IPG_IE_RSVD_MASK 0x1FFE +#define IPG_IE_HOST_ERROR 0x0002 +#define IPG_IE_TX_COMPLETE 0x0004 +#define IPG_IE_MAC_CTRL_FRAME 0x0008 +#define IPG_IE_RX_COMPLETE 0x0010 +#define IPG_IE_RX_EARLY 0x0020 +#define IPG_IE_INT_REQUESTED 0x0040 +#define IPG_IE_UPDATE_STATS 0x0080 +#define IPG_IE_LINK_EVENT 0x0100 +#define IPG_IE_TX_DMA_COMPLETE 0x0200 +#define IPG_IE_RX_DMA_COMPLETE 0x0400 +#define IPG_IE_RFD_LIST_END 0x0800 +#define IPG_IE_RX_DMA_PRIORITY 0x1000 + +/* IntStatus */ +#define IPG_IS_RSVD_MASK 0x1FFF +#define IPG_IS_INTERRUPT_STATUS 0x0001 +#define IPG_IS_HOST_ERROR 0x0002 +#define IPG_IS_TX_COMPLETE 0x0004 +#define IPG_IS_MAC_CTRL_FRAME 0x0008 +#define IPG_IS_RX_COMPLETE 0x0010 +#define IPG_IS_RX_EARLY 0x0020 +#define IPG_IS_INT_REQUESTED 0x0040 +#define IPG_IS_UPDATE_STATS 0x0080 +#define IPG_IS_LINK_EVENT 0x0100 +#define IPG_IS_TX_DMA_COMPLETE 0x0200 +#define IPG_IS_RX_DMA_COMPLETE 0x0400 +#define IPG_IS_RFD_LIST_END 0x0800 +#define IPG_IS_RX_DMA_PRIORITY 0x1000 + +/* MACCtrl */ +#define IPG_MC_RSVD_MASK 0x7FE33FA3 +#define IPG_MC_IFS_SELECT 0x00000003 +#define IPG_MC_IFS_4352BIT 0x00000003 +#define IPG_MC_IFS_1792BIT 0x00000002 +#define IPG_MC_IFS_1024BIT 0x00000001 +#define IPG_MC_IFS_96BIT 0x00000000 +#define IPG_MC_DUPLEX_SELECT 0x00000020 +#define IPG_MC_DUPLEX_SELECT_FD 0x00000020 +#define IPG_MC_DUPLEX_SELECT_HD 0x00000000 +#define IPG_MC_TX_FLOW_CONTROL_ENABLE 0x00000080 +#define IPG_MC_RX_FLOW_CONTROL_ENABLE 0x00000100 +#define IPG_MC_RCV_FCS 0x00000200 +#define IPG_MC_FIFO_LOOPBACK 0x00000400 +#define IPG_MC_MAC_LOOPBACK 0x00000800 +#define IPG_MC_AUTO_VLAN_TAGGING 0x00001000 +#define IPG_MC_AUTO_VLAN_UNTAGGING 0x00002000 +#define IPG_MC_COLLISION_DETECT 0x00010000 +#define IPG_MC_CARRIER_SENSE 0x00020000 +#define IPG_MC_STATISTICS_ENABLE 0x00200000 +#define IPG_MC_STATISTICS_DISABLE 0x00400000 +#define IPG_MC_STATISTICS_ENABLED 0x00800000 +#define IPG_MC_TX_ENABLE 0x01000000 +#define IPG_MC_TX_DISABLE 0x02000000 +#define IPG_MC_TX_ENABLED 0x04000000 +#define IPG_MC_RX_ENABLE 0x08000000 +#define IPG_MC_RX_DISABLE 0x10000000 +#define IPG_MC_RX_ENABLED 0x20000000 +#define IPG_MC_PAUSED 0x40000000 + +/* + * Tune + */ + +/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS append on TX. */ +#define IPG_APPEND_FCS_ON_TX 1 + +/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS strip on RX. */ +#define IPG_STRIP_FCS_ON_RX 1 + +/* Assign IPG_DROP_ON_RX_ETH_ERRORS > 0 to drop RX frames with + * Ethernet errors. + */ +#define IPG_DROP_ON_RX_ETH_ERRORS 1 + +/* Assign IPG_INSERT_MANUAL_VLAN_TAG > 0 to insert VLAN tags manually + * (via TFC). + */ +#define IPG_INSERT_MANUAL_VLAN_TAG 0 + +/* Assign IPG_ADD_IPCHECKSUM_ON_TX > 0 for auto IP checksum on TX. */ +#define IPG_ADD_IPCHECKSUM_ON_TX 0 + +/* Assign IPG_ADD_TCPCHECKSUM_ON_TX > 0 for auto TCP checksum on TX. + * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER. + */ +#define IPG_ADD_TCPCHECKSUM_ON_TX 0 + +/* Assign IPG_ADD_UDPCHECKSUM_ON_TX > 0 for auto UDP checksum on TX. + * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER. + */ +#define IPG_ADD_UDPCHECKSUM_ON_TX 0 + +/* If inserting VLAN tags manually, assign the IPG_MANUAL_VLAN_xx + * constants as desired. + */ +#define IPG_MANUAL_VLAN_VID 0xABC +#define IPG_MANUAL_VLAN_CFI 0x1 +#define IPG_MANUAL_VLAN_USERPRIORITY 0x5 + +#define IPG_IO_REG_RANGE 0xFF +#define IPG_MEM_REG_RANGE 0x154 +#define IPG_DRIVER_NAME "Sundance Technology IPG Triple-Speed Ethernet" +#define IPG_NIC_PHY_ADDRESS 0x01 +#define IPG_DMALIST_ALIGN_PAD 0x07 +#define IPG_MULTICAST_HASHTABLE_SIZE 0x40 + +/* Number of milliseconds to wait after issuing a software reset. + * 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation. + */ +#define IPG_AC_RESETWAIT 0x05 + +/* Number of IPG_AC_RESETWAIT timeperiods before declaring timeout. */ +#define IPG_AC_RESET_TIMEOUT 0x0A + +/* Minimum number of nanoseconds used to toggle MDC clock during + * MII/GMII register access. + */ +#define IPG_PC_PHYCTRLWAIT_NS 200 + +#define IPG_TFDLIST_LENGTH 0x100 + +/* Number of frames between TxDMAComplete interrupt. + * 0 < IPG_FRAMESBETWEENTXDMACOMPLETES <= IPG_TFDLIST_LENGTH + */ +#define IPG_FRAMESBETWEENTXDMACOMPLETES 0x1 + +#define IPG_RFDLIST_LENGTH 0x100 + +/* Maximum number of RFDs to process per interrupt. + * 1 < IPG_MAXRFDPROCESS_COUNT < IPG_RFDLIST_LENGTH + */ +#define IPG_MAXRFDPROCESS_COUNT 0x80 + +/* Minimum margin between last freed RFD, and current RFD. + * 1 < IPG_MINUSEDRFDSTOFREE < IPG_RFDLIST_LENGTH + */ +#define IPG_MINUSEDRFDSTOFREE 0x80 + +/* specify the jumbo frame maximum size + * per unit is 0x600 (the rx_buffer size that one RFD can carry) + */ +#define MAX_JUMBOSIZE 0x8 /* max is 12K */ + +/* Key register values loaded at driver start up. */ + +/* TXDMAPollPeriod is specified in 320ns increments. + * + * Value Time + * --------------------- + * 0x00-0x01 320ns + * 0x03 ~1us + * 0x1F ~10us + * 0xFF ~82us + */ +#define IPG_TXDMAPOLLPERIOD_VALUE 0x26 + +/* TxDMAUrgentThresh specifies the minimum amount of + * data in the transmit FIFO before asserting an + * urgent transmit DMA request. + * + * Value Min TxFIFO occupied space before urgent TX request + * --------------------------------------------------------------- + * 0x00-0x04 128 bytes (1024 bits) + * 0x27 1248 bytes (~10000 bits) + * 0x30 1536 bytes (12288 bits) + * 0xFF 8192 bytes (65535 bits) + */ +#define IPG_TXDMAURGENTTHRESH_VALUE 0x04 + +/* TxDMABurstThresh specifies the minimum amount of + * free space in the transmit FIFO before asserting an + * transmit DMA request. + * + * Value Min TxFIFO free space before TX request + * ---------------------------------------------------- + * 0x00-0x08 256 bytes + * 0x30 1536 bytes + * 0xFF 8192 bytes + */ +#define IPG_TXDMABURSTTHRESH_VALUE 0x30 + +/* RXDMAPollPeriod is specified in 320ns increments. + * + * Value Time + * --------------------- + * 0x00-0x01 320ns + * 0x03 ~1us + * 0x1F ~10us + * 0xFF ~82us + */ +#define IPG_RXDMAPOLLPERIOD_VALUE 0x01 + +/* RxDMAUrgentThresh specifies the minimum amount of + * free space within the receive FIFO before asserting + * a urgent receive DMA request. + * + * Value Min RxFIFO free space before urgent RX request + * --------------------------------------------------------------- + * 0x00-0x04 128 bytes (1024 bits) + * 0x27 1248 bytes (~10000 bits) + * 0x30 1536 bytes (12288 bits) + * 0xFF 8192 bytes (65535 bits) + */ +#define IPG_RXDMAURGENTTHRESH_VALUE 0x30 + +/* RxDMABurstThresh specifies the minimum amount of + * occupied space within the receive FIFO before asserting + * a receive DMA request. + * + * Value Min TxFIFO free space before TX request + * ---------------------------------------------------- + * 0x00-0x08 256 bytes + * 0x30 1536 bytes + * 0xFF 8192 bytes + */ +#define IPG_RXDMABURSTTHRESH_VALUE 0x30 + +/* FlowOnThresh specifies the maximum amount of occupied + * space in the receive FIFO before a PAUSE frame with + * maximum pause time transmitted. + * + * Value Max RxFIFO occupied space before PAUSE + * --------------------------------------------------- + * 0x0000 0 bytes + * 0x0740 29,696 bytes + * 0x07FF 32,752 bytes + */ +#define IPG_FLOWONTHRESH_VALUE 0x0740 + +/* FlowOffThresh specifies the minimum amount of occupied + * space in the receive FIFO before a PAUSE frame with + * zero pause time is transmitted. + * + * Value Max RxFIFO occupied space before PAUSE + * --------------------------------------------------- + * 0x0000 0 bytes + * 0x00BF 3056 bytes + * 0x07FF 32,752 bytes + */ +#define IPG_FLOWOFFTHRESH_VALUE 0x00BF + +/* + * Miscellaneous macros. + */ + +/* Macros for printing debug statements. */ +#ifdef IPG_DEBUG +# define IPG_DEBUG_MSG(fmt, args...) \ +do { \ + if (0) \ + printk(KERN_DEBUG "IPG: " fmt, ##args); \ +} while (0) +# define IPG_DDEBUG_MSG(fmt, args...) \ + printk(KERN_DEBUG "IPG: " fmt, ##args) +# define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args) +# define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args) +#else +# define IPG_DEBUG_MSG(fmt, args...) \ +do { \ + if (0) \ + printk(KERN_DEBUG "IPG: " fmt, ##args); \ +} while (0) +# define IPG_DDEBUG_MSG(fmt, args...) \ +do { \ + if (0) \ + printk(KERN_DEBUG "IPG: " fmt, ##args); \ +} while (0) +# define IPG_DUMPRFDLIST(args) +# define IPG_DUMPTFDLIST(args) +#endif + +/* + * End miscellaneous macros. + */ + +/* Transmit Frame Descriptor. The IPG supports 15 fragments, + * however Linux requires only a single fragment. Note, each + * TFD field is 64 bits wide. + */ +struct ipg_tx { + __le64 next_desc; + __le64 tfc; + __le64 frag_info; +}; + +/* Receive Frame Descriptor. Note, each RFD field is 64 bits wide. + */ +struct ipg_rx { + __le64 next_desc; + __le64 rfs; + __le64 frag_info; +}; + +struct ipg_jumbo { + int found_start; + int current_size; + struct sk_buff *skb; +}; + +/* Structure of IPG NIC specific data. */ +struct ipg_nic_private { + void __iomem *ioaddr; + struct ipg_tx *txd; + struct ipg_rx *rxd; + dma_addr_t txd_map; + dma_addr_t rxd_map; + struct sk_buff *tx_buff[IPG_TFDLIST_LENGTH]; + struct sk_buff *rx_buff[IPG_RFDLIST_LENGTH]; + unsigned int tx_current; + unsigned int tx_dirty; + unsigned int rx_current; + unsigned int rx_dirty; + bool is_jumbo; + struct ipg_jumbo jumbo; + unsigned long rxfrag_size; + unsigned long rxsupport_size; + unsigned long max_rxframe_size; + unsigned int rx_buf_sz; + struct pci_dev *pdev; + struct net_device *dev; + struct net_device_stats stats; + spinlock_t lock; + int tenmbpsmode; + + u16 led_mode; + u16 station_addr[3]; /* Station Address in EEPROM Reg 0x10..0x12 */ + + struct mutex mii_mutex; + struct mii_if_info mii_if; + int reset_current_tfd; +#ifdef IPG_DEBUG + int RFDlistendCount; + int RFDListCheckedCount; + int EmptyRFDListCount; +#endif + struct delayed_work task; +}; + +#endif /* __LINUX_IPG_H */ diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig new file mode 100644 index 000000000..f4ff46558 --- /dev/null +++ b/drivers/net/ethernet/intel/Kconfig @@ -0,0 +1,356 @@ +# +# Intel network device configuration +# + +config NET_VENDOR_INTEL + bool "Intel devices" + default y + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Intel cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_INTEL + +config E100 + tristate "Intel(R) PRO/100+ support" + depends on PCI + select MII + ---help--- + This driver supports Intel(R) PRO/100 family of adapters. + To verify that your adapter is supported, find the board ID number + on the adapter. Look for a label that has a barcode and a number + in the format 123456-001 (six digits hyphen three digits). + + Use the above information and the Adapter & Driver ID Guide at: + + + + to identify the adapter. + + For the latest Intel PRO/100 network driver for Linux, see: + + + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called e100. + +config E1000 + tristate "Intel(R) PRO/1000 Gigabit Ethernet support" + depends on PCI + ---help--- + This driver supports Intel(R) PRO/1000 gigabit ethernet family of + adapters. For more information on how to identify your adapter, go + to the Adapter & Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called e1000. + +config E1000E + tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support" + depends on PCI && (!SPARC32 || BROKEN) + select CRC32 + select PTP_1588_CLOCK + ---help--- + This driver supports the PCI-Express Intel(R) PRO/1000 gigabit + ethernet family of adapters. For PCI or PCI-X e1000 adapters, + use the regular e1000 driver For more information on how to + identify your adapter, go to the Adapter & Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + To compile this driver as a module, choose M here. The module + will be called e1000e. + +config IGB + tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support" + depends on PCI + select PTP_1588_CLOCK + select I2C + select I2C_ALGOBIT + ---help--- + This driver supports Intel(R) 82575/82576 gigabit ethernet family of + adapters. For more information on how to identify your adapter, go + to the Adapter & Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called igb. + +config IGB_HWMON + bool "Intel(R) PCI-Express Gigabit adapters HWMON support" + default y + depends on IGB && HWMON && !(IGB=y && HWMON=m) + ---help--- + Say Y if you want to expose thermal sensor data on Intel devices. + + Some of our devices contain thermal sensors, both external and internal. + This data is available via the hwmon sysfs interface and exposes + the onboard sensors. + +config IGB_DCA + bool "Direct Cache Access (DCA) Support" + default y + depends on IGB && DCA && !(IGB=y && DCA=m) + ---help--- + Say Y here if you want to use Direct Cache Access (DCA) in the + driver. DCA is a method for warming the CPU cache before data + is used, with the intent of lessening the impact of cache misses. + +config IGBVF + tristate "Intel(R) 82576 Virtual Function Ethernet support" + depends on PCI + ---help--- + This driver supports Intel(R) 82576 virtual functions. For more + information on how to identify your adapter, go to the Adapter & + Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called igbvf. + +config IXGB + tristate "Intel(R) PRO/10GbE support" + depends on PCI + ---help--- + This driver supports Intel(R) PRO/10GbE family of adapters for + PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver + instead. For more information on how to identify your adapter, go + to the Adapter & Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called ixgb. + +config IXGBE + tristate "Intel(R) 10GbE PCI Express adapters support" + depends on PCI + select MDIO + select PTP_1588_CLOCK + ---help--- + This driver supports Intel(R) 10GbE PCI Express family of + adapters. For more information on how to identify your adapter, go + to the Adapter & Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + To compile this driver as a module, choose M here. The module + will be called ixgbe. + +config IXGBE_VXLAN + bool "Virtual eXtensible Local Area Network Support" + default n + depends on IXGBE && VXLAN && !(IXGBE=y && VXLAN=m) + ---help--- + This allows one to create VXLAN virtual interfaces that provide + Layer 2 Networks over Layer 3 Networks. VXLAN is often used + to tunnel virtual network infrastructure in virtualized environments. + Say Y here if you want to use Virtual eXtensible Local Area Network + (VXLAN) in the driver. + +config IXGBE_HWMON + bool "Intel(R) 10GbE PCI Express adapters HWMON support" + default y + depends on IXGBE && HWMON && !(IXGBE=y && HWMON=m) + ---help--- + Say Y if you want to expose the thermal sensor data on some of + our cards, via a hwmon sysfs interface. + +config IXGBE_DCA + bool "Direct Cache Access (DCA) Support" + default y + depends on IXGBE && DCA && !(IXGBE=y && DCA=m) + ---help--- + Say Y here if you want to use Direct Cache Access (DCA) in the + driver. DCA is a method for warming the CPU cache before data + is used, with the intent of lessening the impact of cache misses. + +config IXGBE_DCB + bool "Data Center Bridging (DCB) Support" + default n + depends on IXGBE && DCB + ---help--- + Say Y here if you want to use Data Center Bridging (DCB) in the + driver. + + If unsure, say N. + +config IXGBEVF + tristate "Intel(R) 10GbE PCI Express Virtual Function Ethernet support" + depends on PCI_MSI + ---help--- + This driver supports Intel(R) PCI Express virtual functions for the + Intel(R) ixgbe driver. For more information on how to identify your + adapter, go to the Adapter & Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called ixgbevf. MSI-X interrupt support is required + for this driver to work correctly. + +config I40E + tristate "Intel(R) Ethernet Controller XL710 Family support" + select PTP_1588_CLOCK + depends on PCI + ---help--- + This driver supports Intel(R) Ethernet Controller XL710 Family of + devices. For more information on how to identify your adapter, go + to the Adapter & Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + To compile this driver as a module, choose M here. The module + will be called i40e. + +config I40E_VXLAN + bool "Virtual eXtensible Local Area Network Support" + default n + depends on I40E && VXLAN && !(I40E=y && VXLAN=m) + ---help--- + This allows one to create VXLAN virtual interfaces that provide + Layer 2 Networks over Layer 3 Networks. VXLAN is often used + to tunnel virtual network infrastructure in virtualized environments. + Say Y here if you want to use Virtual eXtensible Local Area Network + (VXLAN) in the driver. + +config I40E_DCB + bool "Data Center Bridging (DCB) Support" + default n + depends on I40E && DCB + ---help--- + Say Y here if you want to use Data Center Bridging (DCB) in the + driver. + + If unsure, say N. + +config I40E_FCOE + bool "Fibre Channel over Ethernet (FCoE)" + default n + depends on I40E && DCB && FCOE + ---help--- + Say Y here if you want to use Fibre Channel over Ethernet (FCoE) + in the driver. This will create new netdev for exclusive FCoE + use with XL710 FCoE offloads enabled. + + If unsure, say N. + +config I40EVF + tristate "Intel(R) XL710 X710 Virtual Function Ethernet support" + depends on PCI_MSI + ---help--- + This driver supports Intel(R) XL710 and X710 virtual functions. + For more information on how to identify your adapter, go to the + Adapter & Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + To compile this driver as a module, choose M here. The module + will be called i40evf. MSI-X interrupt support is required + for this driver to work correctly. + +config FM10K + tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support" + default n + depends on PCI_MSI + select PTP_1588_CLOCK + ---help--- + This driver supports Intel(R) FM10000 Ethernet Switch Host + Interface. For more information on how to identify your adapter, + go to the Adapter & Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + To compile this driver as a module, choose M here. The module + will be called fm10k. MSI-X interrupt support is required + +config FM10K_VXLAN + bool "Virtual eXtensible Local Area Network Support" + default n + depends on FM10K && VXLAN && !(FM10K=y && VXLAN=m) + ---help--- + This allows one to create VXLAN virtual interfaces that provide + Layer 2 Networks over Layer 3 Networks. VXLAN is often used + to tunnel virtual network infrastructure in virtualized environments. + Say Y here if you want to use Virtual eXtensible Local Area Network + (VXLAN) in the driver. + +endif # NET_VENDOR_INTEL diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile new file mode 100644 index 000000000..5ea764d85 --- /dev/null +++ b/drivers/net/ethernet/intel/Makefile @@ -0,0 +1,15 @@ +# +# Makefile for the Intel network device drivers. +# + +obj-$(CONFIG_E100) += e100.o +obj-$(CONFIG_E1000) += e1000/ +obj-$(CONFIG_E1000E) += e1000e/ +obj-$(CONFIG_IGB) += igb/ +obj-$(CONFIG_IGBVF) += igbvf/ +obj-$(CONFIG_IXGBE) += ixgbe/ +obj-$(CONFIG_IXGBEVF) += ixgbevf/ +obj-$(CONFIG_I40E) += i40e/ +obj-$(CONFIG_IXGB) += ixgb/ +obj-$(CONFIG_I40EVF) += i40evf/ +obj-$(CONFIG_FM10K) += fm10k/ diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c new file mode 100644 index 000000000..f9fae3e6f --- /dev/null +++ b/drivers/net/ethernet/intel/e100.c @@ -0,0 +1,3201 @@ +/******************************************************************************* + + Intel PRO/100 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* + * e100.c: Intel(R) PRO/100 ethernet driver + * + * (Re)written 2003 by scott.feldman@intel.com. Based loosely on + * original e100 driver, but better described as a munging of + * e100, e1000, eepro100, tg3, 8139cp, and other drivers. + * + * References: + * Intel 8255x 10/100 Mbps Ethernet Controller Family, + * Open Source Software Developers Manual, + * http://sourceforge.net/projects/e1000 + * + * + * Theory of Operation + * + * I. General + * + * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet + * controller family, which includes the 82557, 82558, 82559, 82550, + * 82551, and 82562 devices. 82558 and greater controllers + * integrate the Intel 82555 PHY. The controllers are used in + * server and client network interface cards, as well as in + * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx + * configurations. 8255x supports a 32-bit linear addressing + * mode and operates at 33Mhz PCI clock rate. + * + * II. Driver Operation + * + * Memory-mapped mode is used exclusively to access the device's + * shared-memory structure, the Control/Status Registers (CSR). All + * setup, configuration, and control of the device, including queuing + * of Tx, Rx, and configuration commands is through the CSR. + * cmd_lock serializes accesses to the CSR command register. cb_lock + * protects the shared Command Block List (CBL). + * + * 8255x is highly MII-compliant and all access to the PHY go + * through the Management Data Interface (MDI). Consequently, the + * driver leverages the mii.c library shared with other MII-compliant + * devices. + * + * Big- and Little-Endian byte order as well as 32- and 64-bit + * archs are supported. Weak-ordered memory and non-cache-coherent + * archs are supported. + * + * III. Transmit + * + * A Tx skb is mapped and hangs off of a TCB. TCBs are linked + * together in a fixed-size ring (CBL) thus forming the flexible mode + * memory structure. A TCB marked with the suspend-bit indicates + * the end of the ring. The last TCB processed suspends the + * controller, and the controller can be restarted by issue a CU + * resume command to continue from the suspend point, or a CU start + * command to start at a given position in the ring. + * + * Non-Tx commands (config, multicast setup, etc) are linked + * into the CBL ring along with Tx commands. The common structure + * used for both Tx and non-Tx commands is the Command Block (CB). + * + * cb_to_use is the next CB to use for queuing a command; cb_to_clean + * is the next CB to check for completion; cb_to_send is the first + * CB to start on in case of a previous failure to resume. CB clean + * up happens in interrupt context in response to a CU interrupt. + * cbs_avail keeps track of number of free CB resources available. + * + * Hardware padding of short packets to minimum packet size is + * enabled. 82557 pads with 7Eh, while the later controllers pad + * with 00h. + * + * IV. Receive + * + * The Receive Frame Area (RFA) comprises a ring of Receive Frame + * Descriptors (RFD) + data buffer, thus forming the simplified mode + * memory structure. Rx skbs are allocated to contain both the RFD + * and the data buffer, but the RFD is pulled off before the skb is + * indicated. The data buffer is aligned such that encapsulated + * protocol headers are u32-aligned. Since the RFD is part of the + * mapped shared memory, and completion status is contained within + * the RFD, the RFD must be dma_sync'ed to maintain a consistent + * view from software and hardware. + * + * In order to keep updates to the RFD link field from colliding with + * hardware writes to mark packets complete, we use the feature that + * hardware will not write to a size 0 descriptor and mark the previous + * packet as end-of-list (EL). After updating the link, we remove EL + * and only then restore the size such that hardware may use the + * previous-to-end RFD. + * + * Under typical operation, the receive unit (RU) is start once, + * and the controller happily fills RFDs as frames arrive. If + * replacement RFDs cannot be allocated, or the RU goes non-active, + * the RU must be restarted. Frame arrival generates an interrupt, + * and Rx indication and re-allocation happen in the same context, + * therefore no locking is required. A software-generated interrupt + * is generated from the watchdog to recover from a failed allocation + * scenario where all Rx resources have been indicated and none re- + * placed. + * + * V. Miscellaneous + * + * VLAN offloading of tagging, stripping and filtering is not + * supported, but driver will accommodate the extra 4-byte VLAN tag + * for processing by upper layers. Tx/Rx Checksum offloading is not + * supported. Tx Scatter/Gather is not supported. Jumbo Frames is + * not supported (hardware limitation). + * + * MagicPacket(tm) WoL support is enabled/disabled via ethtool. + * + * Thanks to JC (jchapman@katalix.com) for helping with + * testing/troubleshooting the development driver. + * + * TODO: + * o several entry points race with dev->close + * o check for tx-no-resources/stop Q races with tx clean/wake Q + * + * FIXES: + * 2005/12/02 - Michael O'Donnell + * - Stratus87247: protect MDI control register manipulations + * 2009/06/01 - Andreas Mohr + * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define DRV_NAME "e100" +#define DRV_EXT "-NAPI" +#define DRV_VERSION "3.5.24-k2"DRV_EXT +#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" +#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" + +#define E100_WATCHDOG_PERIOD (2 * HZ) +#define E100_NAPI_WEIGHT 16 + +#define FIRMWARE_D101M "/*(DEBLOBBED)*/" +#define FIRMWARE_D101S "/*(DEBLOBBED)*/" +#define FIRMWARE_D102E "/*(DEBLOBBED)*/" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); +/*(DEBLOBBED)*/ + +static int debug = 3; +static int eeprom_bad_csum_allow = 0; +static int use_io = 0; +module_param(debug, int, 0); +module_param(eeprom_bad_csum_allow, int, 0); +module_param(use_io, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); +MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); +MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); + +#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ + PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ + PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } +static const struct pci_device_id e100_id_table[] = { + INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1032, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1033, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1034, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1038, 3), + INTEL_8255X_ETHERNET_DEVICE(0x1039, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103A, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103B, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103C, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103D, 4), + INTEL_8255X_ETHERNET_DEVICE(0x103E, 4), + INTEL_8255X_ETHERNET_DEVICE(0x1050, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1051, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1052, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1053, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1054, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1055, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1056, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1057, 5), + INTEL_8255X_ETHERNET_DEVICE(0x1059, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1064, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1065, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1066, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1067, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1068, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), + INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), + INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), + INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7), + INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), + INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), + INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), + INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), + INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), + INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), + { 0, } +}; +MODULE_DEVICE_TABLE(pci, e100_id_table); + +enum mac { + mac_82557_D100_A = 0, + mac_82557_D100_B = 1, + mac_82557_D100_C = 2, + mac_82558_D101_A4 = 4, + mac_82558_D101_B0 = 5, + mac_82559_D101M = 8, + mac_82559_D101S = 9, + mac_82550_D102 = 12, + mac_82550_D102_C = 13, + mac_82551_E = 14, + mac_82551_F = 15, + mac_82551_10 = 16, + mac_unknown = 0xFF, +}; + +enum phy { + phy_100a = 0x000003E0, + phy_100c = 0x035002A8, + phy_82555_tx = 0x015002A8, + phy_nsc_tx = 0x5C002000, + phy_82562_et = 0x033002A8, + phy_82562_em = 0x032002A8, + phy_82562_ek = 0x031002A8, + phy_82562_eh = 0x017002A8, + phy_82552_v = 0xd061004d, + phy_unknown = 0xFFFFFFFF, +}; + +/* CSR (Control/Status Registers) */ +struct csr { + struct { + u8 status; + u8 stat_ack; + u8 cmd_lo; + u8 cmd_hi; + u32 gen_ptr; + } scb; + u32 port; + u16 flash_ctrl; + u8 eeprom_ctrl_lo; + u8 eeprom_ctrl_hi; + u32 mdi_ctrl; + u32 rx_dma_count; +}; + +enum scb_status { + rus_no_res = 0x08, + rus_ready = 0x10, + rus_mask = 0x3C, +}; + +enum ru_state { + RU_SUSPENDED = 0, + RU_RUNNING = 1, + RU_UNINITIALIZED = -1, +}; + +enum scb_stat_ack { + stat_ack_not_ours = 0x00, + stat_ack_sw_gen = 0x04, + stat_ack_rnr = 0x10, + stat_ack_cu_idle = 0x20, + stat_ack_frame_rx = 0x40, + stat_ack_cu_cmd_done = 0x80, + stat_ack_not_present = 0xFF, + stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx), + stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done), +}; + +enum scb_cmd_hi { + irq_mask_none = 0x00, + irq_mask_all = 0x01, + irq_sw_gen = 0x02, +}; + +enum scb_cmd_lo { + cuc_nop = 0x00, + ruc_start = 0x01, + ruc_load_base = 0x06, + cuc_start = 0x10, + cuc_resume = 0x20, + cuc_dump_addr = 0x40, + cuc_dump_stats = 0x50, + cuc_load_base = 0x60, + cuc_dump_reset = 0x70, +}; + +enum cuc_dump { + cuc_dump_complete = 0x0000A005, + cuc_dump_reset_complete = 0x0000A007, +}; + +enum port { + software_reset = 0x0000, + selftest = 0x0001, + selective_reset = 0x0002, +}; + +enum eeprom_ctrl_lo { + eesk = 0x01, + eecs = 0x02, + eedi = 0x04, + eedo = 0x08, +}; + +enum mdi_ctrl { + mdi_write = 0x04000000, + mdi_read = 0x08000000, + mdi_ready = 0x10000000, +}; + +enum eeprom_op { + op_write = 0x05, + op_read = 0x06, + op_ewds = 0x10, + op_ewen = 0x13, +}; + +enum eeprom_offsets { + eeprom_cnfg_mdix = 0x03, + eeprom_phy_iface = 0x06, + eeprom_id = 0x0A, + eeprom_config_asf = 0x0D, + eeprom_smbus_addr = 0x90, +}; + +enum eeprom_cnfg_mdix { + eeprom_mdix_enabled = 0x0080, +}; + +enum eeprom_phy_iface { + NoSuchPhy = 0, + I82553AB, + I82553C, + I82503, + DP83840, + S80C240, + S80C24, + I82555, + DP83840A = 10, +}; + +enum eeprom_id { + eeprom_id_wol = 0x0020, +}; + +enum eeprom_config_asf { + eeprom_asf = 0x8000, + eeprom_gcl = 0x4000, +}; + +enum cb_status { + cb_complete = 0x8000, + cb_ok = 0x2000, +}; + +/** + * cb_command - Command Block flags + * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory + */ +enum cb_command { + cb_nop = 0x0000, + cb_iaaddr = 0x0001, + cb_config = 0x0002, + cb_multi = 0x0003, + cb_tx = 0x0004, + cb_ucode = 0x0005, + cb_dump = 0x0006, + cb_tx_sf = 0x0008, + cb_tx_nc = 0x0010, + cb_cid = 0x1f00, + cb_i = 0x2000, + cb_s = 0x4000, + cb_el = 0x8000, +}; + +struct rfd { + __le16 status; + __le16 command; + __le32 link; + __le32 rbd; + __le16 actual_size; + __le16 size; +}; + +struct rx { + struct rx *next, *prev; + struct sk_buff *skb; + dma_addr_t dma_addr; +}; + +#if defined(__BIG_ENDIAN_BITFIELD) +#define X(a,b) b,a +#else +#define X(a,b) a,b +#endif +struct config { +/*0*/ u8 X(byte_count:6, pad0:2); +/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1); +/*2*/ u8 adaptive_ifs; +/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1), + term_write_cache_line:1), pad3:4); +/*4*/ u8 X(rx_dma_max_count:7, pad4:1); +/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1); +/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1), + tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1), + rx_save_overruns : 1), rx_save_bad_frames : 1); +/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2), + pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1), + tx_dynamic_tbd:1); +/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1); +/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1), + link_status_wake:1), arp_wake:1), mcmatch_wake:1); +/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2), + loopback:2); +/*11*/ u8 X(linear_priority:3, pad11:5); +/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4); +/*13*/ u8 ip_addr_lo; +/*14*/ u8 ip_addr_hi; +/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1), + wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1), + pad15_2:1), crs_or_cdt:1); +/*16*/ u8 fc_delay_lo; +/*17*/ u8 fc_delay_hi; +/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1), + rx_long_ok:1), fc_priority_threshold:3), pad18:1); +/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1), + fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1), + full_duplex_force:1), full_duplex_pin:1); +/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1); +/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4); +/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6); + u8 pad_d102[9]; +}; + +#define E100_MAX_MULTICAST_ADDRS 64 +struct multi { + __le16 count; + u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/]; +}; + +/* Important: keep total struct u32-aligned */ +#define UCODE_SIZE 134 +struct cb { + __le16 status; + __le16 command; + __le32 link; + union { + u8 iaaddr[ETH_ALEN]; + __le32 ucode[UCODE_SIZE]; + struct config config; + struct multi multi; + struct { + u32 tbd_array; + u16 tcb_byte_count; + u8 threshold; + u8 tbd_count; + struct { + __le32 buf_addr; + __le16 size; + u16 eol; + } tbd; + } tcb; + __le32 dump_buffer_addr; + } u; + struct cb *next, *prev; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +enum loopback { + lb_none = 0, lb_mac = 1, lb_phy = 3, +}; + +struct stats { + __le32 tx_good_frames, tx_max_collisions, tx_late_collisions, + tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions, + tx_multiple_collisions, tx_total_collisions; + __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors, + rx_resource_errors, rx_overrun_errors, rx_cdt_errors, + rx_short_frame_errors; + __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported; + __le16 xmt_tco_frames, rcv_tco_frames; + __le32 complete; +}; + +struct mem { + struct { + u32 signature; + u32 result; + } selftest; + struct stats stats; + u8 dump_buf[596]; +}; + +struct param_range { + u32 min; + u32 max; + u32 count; +}; + +struct params { + struct param_range rfds; + struct param_range cbs; +}; + +struct nic { + /* Begin: frequently used values: keep adjacent for cache effect */ + u32 msg_enable ____cacheline_aligned; + struct net_device *netdev; + struct pci_dev *pdev; + u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data); + + struct rx *rxs ____cacheline_aligned; + struct rx *rx_to_use; + struct rx *rx_to_clean; + struct rfd blank_rfd; + enum ru_state ru_running; + + spinlock_t cb_lock ____cacheline_aligned; + spinlock_t cmd_lock; + struct csr __iomem *csr; + enum scb_cmd_lo cuc_cmd; + unsigned int cbs_avail; + struct napi_struct napi; + struct cb *cbs; + struct cb *cb_to_use; + struct cb *cb_to_send; + struct cb *cb_to_clean; + __le16 tx_command; + /* End: frequently used values: keep adjacent for cache effect */ + + enum { + ich = (1 << 0), + promiscuous = (1 << 1), + multicast_all = (1 << 2), + wol_magic = (1 << 3), + ich_10h_workaround = (1 << 4), + } flags ____cacheline_aligned; + + enum mac mac; + enum phy phy; + struct params params; + struct timer_list watchdog; + struct mii_if_info mii; + struct work_struct tx_timeout_task; + enum loopback loopback; + + struct mem *mem; + dma_addr_t dma_addr; + + struct pci_pool *cbs_pool; + dma_addr_t cbs_dma_addr; + u8 adaptive_ifs; + u8 tx_threshold; + u32 tx_frames; + u32 tx_collisions; + u32 tx_deferred; + u32 tx_single_collisions; + u32 tx_multiple_collisions; + u32 tx_fc_pause; + u32 tx_tco_frames; + + u32 rx_fc_pause; + u32 rx_fc_unsupported; + u32 rx_tco_frames; + u32 rx_short_frame_errors; + u32 rx_over_length_errors; + + u16 eeprom_wc; + __le16 eeprom[256]; + spinlock_t mdio_lock; + const struct firmware *fw; +}; + +static inline void e100_write_flush(struct nic *nic) +{ + /* Flush previous PCI writes through intermediate bridges + * by doing a benign read */ + (void)ioread8(&nic->csr->scb.status); +} + +static void e100_enable_irq(struct nic *nic) +{ + unsigned long flags; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_disable_irq(struct nic *nic) +{ + unsigned long flags; + + spin_lock_irqsave(&nic->cmd_lock, flags); + iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irqrestore(&nic->cmd_lock, flags); +} + +static void e100_hw_reset(struct nic *nic) +{ + /* Put CU and RU into idle with a selective reset to get + * device off of PCI bus */ + iowrite32(selective_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Now fully reset device */ + iowrite32(software_reset, &nic->csr->port); + e100_write_flush(nic); udelay(20); + + /* Mask off our interrupt line - it's unmasked after reset */ + e100_disable_irq(nic); +} + +static int e100_self_test(struct nic *nic) +{ + u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest); + + /* Passing the self-test is a pretty good indication + * that the device can DMA to/from host memory */ + + nic->mem->selftest.signature = 0; + nic->mem->selftest.result = 0xFFFFFFFF; + + iowrite32(selftest | dma_addr, &nic->csr->port); + e100_write_flush(nic); + /* Wait 10 msec for self-test to complete */ + msleep(10); + + /* Interrupts are enabled after self-test */ + e100_disable_irq(nic); + + /* Check results of self-test */ + if (nic->mem->selftest.result != 0) { + netif_err(nic, hw, nic->netdev, + "Self-test failed: result=0x%08X\n", + nic->mem->selftest.result); + return -ETIMEDOUT; + } + if (nic->mem->selftest.signature == 0) { + netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data) +{ + u32 cmd_addr_data[3]; + u8 ctrl; + int i, j; + + /* Three cmds: write/erase enable, write data, write/erase disable */ + cmd_addr_data[0] = op_ewen << (addr_len - 2); + cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) | + le16_to_cpu(data); + cmd_addr_data[2] = op_ewds << (addr_len - 2); + + /* Bit-bang cmds to write word to eeprom */ + for (j = 0; j < 3; j++) { + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data[j] & (1 << i)) ? + eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } + /* Wait 10 msec for cmd to complete */ + msleep(10); + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + } +}; + +/* General technique stolen from the eepro100 driver - very clever */ +static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) +{ + u32 cmd_addr_data; + u16 data = 0; + u8 ctrl; + int i; + + cmd_addr_data = ((op_read << *addr_len) | addr) << 16; + + /* Chip select */ + iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Bit-bang to read word from eeprom */ + for (i = 31; i >= 0; i--) { + ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; + iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + /* Eeprom drives a dummy zero to EEDO after receiving + * complete address. Use this to adjust addr_len. */ + ctrl = ioread8(&nic->csr->eeprom_ctrl_lo); + if (!(ctrl & eedo) && i > 16) { + *addr_len -= (i - 16); + i = 17; + } + + data = (data << 1) | (ctrl & eedo ? 1 : 0); + } + + /* Chip deselect */ + iowrite8(0, &nic->csr->eeprom_ctrl_lo); + e100_write_flush(nic); udelay(4); + + return cpu_to_le16(data); +}; + +/* Load entire EEPROM image into driver cache and validate checksum */ +static int e100_eeprom_load(struct nic *nic) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + for (addr = 0; addr < nic->eeprom_wc; addr++) { + nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr); + if (addr < nic->eeprom_wc - 1) + checksum += le16_to_cpu(nic->eeprom[addr]); + } + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { + netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n"); + if (!eeprom_bad_csum_allow) + return -EAGAIN; + } + + return 0; +} + +/* Save (portion of) driver EEPROM cache to device and update checksum */ +static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) +{ + u16 addr, addr_len = 8, checksum = 0; + + /* Try reading with an 8-bit addr len to discover actual addr len */ + e100_eeprom_read(nic, &addr_len, 0); + nic->eeprom_wc = 1 << addr_len; + + if (start + count >= nic->eeprom_wc) + return -EINVAL; + + for (addr = start; addr < start + count; addr++) + e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]); + + /* The checksum, stored in the last word, is calculated such that + * the sum of words should be 0xBABA */ + for (addr = 0; addr < nic->eeprom_wc - 1; addr++) + checksum += le16_to_cpu(nic->eeprom[addr]); + nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum); + e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1, + nic->eeprom[nic->eeprom_wc - 1]); + + return 0; +} + +#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ +#define E100_WAIT_SCB_FAST 20 /* delay like the old code */ +static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) +{ + unsigned long flags; + unsigned int i; + int err = 0; + + spin_lock_irqsave(&nic->cmd_lock, flags); + + /* Previous command is accepted when SCB clears */ + for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) { + if (likely(!ioread8(&nic->csr->scb.cmd_lo))) + break; + cpu_relax(); + if (unlikely(i > E100_WAIT_SCB_FAST)) + udelay(5); + } + if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) { + err = -EAGAIN; + goto err_unlock; + } + + if (unlikely(cmd != cuc_resume)) + iowrite32(dma_addr, &nic->csr->scb.gen_ptr); + iowrite8(cmd, &nic->csr->scb.cmd_lo); + +err_unlock: + spin_unlock_irqrestore(&nic->cmd_lock, flags); + + return err; +} + +static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, + int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) +{ + struct cb *cb; + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&nic->cb_lock, flags); + + if (unlikely(!nic->cbs_avail)) { + err = -ENOMEM; + goto err_unlock; + } + + cb = nic->cb_to_use; + nic->cb_to_use = cb->next; + nic->cbs_avail--; + cb->skb = skb; + + err = cb_prepare(nic, cb, skb); + if (err) + goto err_unlock; + + if (unlikely(!nic->cbs_avail)) + err = -ENOSPC; + + + /* Order is important otherwise we'll be in a race with h/w: + * set S-bit in current first, then clear S-bit in previous. */ + cb->command |= cpu_to_le16(cb_s); + dma_wmb(); + cb->prev->command &= cpu_to_le16(~cb_s); + + while (nic->cb_to_send != nic->cb_to_use) { + if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd, + nic->cb_to_send->dma_addr))) { + /* Ok, here's where things get sticky. It's + * possible that we can't schedule the command + * because the controller is too busy, so + * let's just queue the command and try again + * when another command is scheduled. */ + if (err == -ENOSPC) { + //request a reset + schedule_work(&nic->tx_timeout_task); + } + break; + } else { + nic->cuc_cmd = cuc_resume; + nic->cb_to_send = nic->cb_to_send->next; + } + } + +err_unlock: + spin_unlock_irqrestore(&nic->cb_lock, flags); + + return err; +} + +static int mdio_read(struct net_device *netdev, int addr, int reg) +{ + struct nic *nic = netdev_priv(netdev); + return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0); +} + +static void mdio_write(struct net_device *netdev, int addr, int reg, int data) +{ + struct nic *nic = netdev_priv(netdev); + + nic->mdio_ctrl(nic, addr, mdi_write, reg, data); +} + +/* the standard mdio_ctrl() function for usual MII-compliant hardware */ +static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) +{ + u32 data_out = 0; + unsigned int i; + unsigned long flags; + + + /* + * Stratus87247: we shouldn't be writing the MDI control + * register until the Ready bit shows True. Also, since + * manipulation of the MDI control registers is a multi-step + * procedure it should be done under lock. + */ + spin_lock_irqsave(&nic->mdio_lock, flags); + for (i = 100; i; --i) { + if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready) + break; + udelay(20); + } + if (unlikely(!i)) { + netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n"); + spin_unlock_irqrestore(&nic->mdio_lock, flags); + return 0; /* No way to indicate timeout error */ + } + iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); + + for (i = 0; i < 100; i++) { + udelay(20); + if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready) + break; + } + spin_unlock_irqrestore(&nic->mdio_lock, flags); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data, data_out); + return (u16)data_out; +} + +/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */ +static u16 mdio_ctrl_phy_82552_v(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + if ((reg == MII_BMCR) && (dir == mdi_write)) { + if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) { + u16 advert = mdio_read(nic->netdev, nic->mii.phy_id, + MII_ADVERTISE); + + /* + * Workaround Si issue where sometimes the part will not + * autoneg to 100Mbps even when advertised. + */ + if (advert & ADVERTISE_100FULL) + data |= BMCR_SPEED100 | BMCR_FULLDPLX; + else if (advert & ADVERTISE_100HALF) + data |= BMCR_SPEED100; + } + } + return mdio_ctrl_hw(nic, addr, dir, reg, data); +} + +/* Fully software-emulated mdio_ctrl() function for cards without + * MII-compliant PHYs. + * For now, this is mainly geared towards 80c24 support; in case of further + * requirements for other types (i82503, ...?) either extend this mechanism + * or split it, whichever is cleaner. + */ +static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic, + u32 addr, + u32 dir, + u32 reg, + u16 data) +{ + /* might need to allocate a netdev_priv'ed register array eventually + * to be able to record state changes, but for now + * some fully hardcoded register handling ought to be ok I guess. */ + + if (dir == mdi_read) { + switch (reg) { + case MII_BMCR: + /* Auto-negotiation, right? */ + return BMCR_ANENABLE | + BMCR_FULLDPLX; + case MII_BMSR: + return BMSR_LSTATUS /* for mii_link_ok() */ | + BMSR_ANEGCAPABLE | + BMSR_10FULL; + case MII_ADVERTISE: + /* 80c24 is a "combo card" PHY, right? */ + return ADVERTISE_10HALF | + ADVERTISE_10FULL; + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } else { + switch (reg) { + default: + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", + dir == mdi_read ? "READ" : "WRITE", + addr, reg, data); + return 0xFFFF; + } + } +} +static inline int e100_phy_supports_mii(struct nic *nic) +{ + /* for now, just check it by comparing whether we + are using MII software emulation. + */ + return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated); +} + +static void e100_get_defaults(struct nic *nic) +{ + struct param_range rfds = { .min = 16, .max = 256, .count = 256 }; + struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; + + /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ + nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision; + if (nic->mac == mac_unknown) + nic->mac = mac_82557_D100_A; + + nic->params.rfds = rfds; + nic->params.cbs = cbs; + + /* Quadwords to DMA into FIFO before starting frame transmit */ + nic->tx_threshold = 0xE0; + + /* no interrupt for every tx completion, delay = 256us if not 557 */ + nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | + ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); + + /* Template for a freshly allocated RFD */ + nic->blank_rfd.command = 0; + nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF); + nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); + + /* MII setup */ + nic->mii.phy_id_mask = 0x1F; + nic->mii.reg_num_mask = 0x1F; + nic->mii.dev = nic->netdev; + nic->mii.mdio_read = mdio_read; + nic->mii.mdio_write = mdio_write; +} + +static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct config *config = &cb->u.config; + u8 *c = (u8 *)config; + struct net_device *netdev = nic->netdev; + + cb->command = cpu_to_le16(cb_config); + + memset(config, 0, sizeof(struct config)); + + config->byte_count = 0x16; /* bytes in this struct */ + config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */ + config->direct_rx_dma = 0x1; /* reserved */ + config->standard_tcb = 0x1; /* 1=standard, 0=extended */ + config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ + config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ + config->tx_underrun_retry = 0x3; /* # of underrun retries */ + if (e100_phy_supports_mii(nic)) + config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */ + config->pad10 = 0x6; + config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ + config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ + config->ifs = 0x6; /* x16 = inter frame spacing */ + config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */ + config->pad15_1 = 0x1; + config->pad15_2 = 0x1; + config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */ + config->fc_delay_hi = 0x40; /* time delay for fc frame */ + config->tx_padding = 0x1; /* 1=pad short frames */ + config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */ + config->pad18 = 0x1; + config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */ + config->pad20_1 = 0x1F; + config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */ + config->pad21_1 = 0x5; + + config->adaptive_ifs = nic->adaptive_ifs; + config->loopback = nic->loopback; + + if (nic->mii.force_media && nic->mii.full_duplex) + config->full_duplex_force = 0x1; /* 1=force, 0=auto */ + + if (nic->flags & promiscuous || nic->loopback) { + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + config->promiscuous_mode = 0x1; /* 1=on, 0=off */ + } + + if (unlikely(netdev->features & NETIF_F_RXFCS)) + config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */ + + if (nic->flags & multicast_all) + config->multicast_all = 0x1; /* 1=accept, 0=no */ + + /* disable WoL when up */ + if (netif_running(nic->netdev) || !(nic->flags & wol_magic)) + config->magic_packet_disable = 0x1; /* 1=off, 0=on */ + + if (nic->mac >= mac_82558_D101_A4) { + config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */ + config->mwi_enable = 0x1; /* 1=enable, 0=disable */ + config->standard_tcb = 0x0; /* 1=standard, 0=extended */ + config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */ + if (nic->mac >= mac_82559_D101M) { + config->tno_intr = 0x1; /* TCO stats enable */ + /* Enable TCO in extended config */ + if (nic->mac >= mac_82551_10) { + config->byte_count = 0x20; /* extended bytes */ + config->rx_d102_mode = 0x1; /* GMRC for TCO */ + } + } else { + config->standard_stat_counter = 0x0; + } + } + + if (netdev->features & NETIF_F_RXALL) { + config->rx_save_overruns = 0x1; /* 1=save, 0=discard */ + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + } + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n", + c + 0); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n", + c + 8); + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n", + c + 16); + return 0; +} + +/************************************************************************* +* CPUSaver parameters +* +* All CPUSaver parameters are 16-bit literals that are part of a +* "move immediate value" instruction. By changing the value of +* the literal in the instruction before the code is loaded, the +* driver can change the algorithm. +* +* INTDELAY - This loads the dead-man timer with its initial value. +* When this timer expires the interrupt is asserted, and the +* timer is reset each time a new packet is received. (see +* BUNDLEMAX below to set the limit on number of chained packets) +* The current default is 0x600 or 1536. Experiments show that +* the value should probably stay within the 0x200 - 0x1000. +* +* BUNDLEMAX - +* This sets the maximum number of frames that will be bundled. In +* some situations, such as the TCP windowing algorithm, it may be +* better to limit the growth of the bundle size than let it go as +* high as it can, because that could cause too much added latency. +* The default is six, because this is the number of packets in the +* default TCP window size. A value of 1 would make CPUSaver indicate +* an interrupt for every frame received. If you do not want to put +* a limit on the bundle size, set this value to xFFFF. +* +* BUNDLESMALL - +* This contains a bit-mask describing the minimum size frame that +* will be bundled. The default masks the lower 7 bits, which means +* that any frame less than 128 bytes in length will not be bundled, +* but will instead immediately generate an interrupt. This does +* not affect the current bundle in any way. Any frame that is 128 +* bytes or large will be bundled normally. This feature is meant +* to provide immediate indication of ACK frames in a TCP environment. +* Customers were seeing poor performance when a machine with CPUSaver +* enabled was sending but not receiving. The delay introduced when +* the ACKs were received was enough to reduce total throughput, because +* the sender would sit idle until the ACK was finally seen. +* +* The current default is 0xFF80, which masks out the lower 7 bits. +* This means that any frame which is x7F (127) bytes or smaller +* will cause an immediate interrupt. Because this value must be a +* bit mask, there are only a few valid values that can be used. To +* turn this feature off, the driver can write the value xFFFF to the +* lower word of this instruction (in the same way that the other +* parameters are used). Likewise, a value of 0xF800 (2047) would +* cause an interrupt to be generated for every frame, because all +* standard Ethernet frames are <= 2047 bytes in length. +*************************************************************************/ + +/* if you wish to disable the ucode functionality, while maintaining the + * workarounds it provides, set the following defines to: + * BUNDLESMALL 0 + * BUNDLEMAX 1 + * INTDELAY 1 + */ +#define BUNDLESMALL 1 +#define BUNDLEMAX (u16)6 +#define INTDELAY (u16)1536 /* 0x600 */ + +/* Initialize firmware */ +static const struct firmware *e100_request_firmware(struct nic *nic) +{ + const char *fw_name; + const struct firmware *fw = nic->fw; + u8 timer, bundle, min_size; + int err = 0; + bool required = false; + + /* do not load u-code for ICH devices */ + if (nic->flags & ich) + return NULL; + + /* Search for ucode match against h/w revision + * + * Based on comments in the source code for the FreeBSD fxp + * driver, the FIRMWARE_D102E ucode includes both CPUSaver and + * + * "fixes for bugs in the B-step hardware (specifically, bugs + * with Inline Receive)." + * + * So we must fail if it cannot be loaded. + * + * The other microcode files are only required for the optional + * CPUSaver feature. Nice to have, but no reason to fail. + */ + if (nic->mac == mac_82559_D101M) { + fw_name = FIRMWARE_D101M; + } else if (nic->mac == mac_82559_D101S) { + fw_name = FIRMWARE_D101S; + } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) { + fw_name = FIRMWARE_D102E; + required = true; + } else { /* No ucode on other devices */ + return NULL; + } + + /* If the firmware has not previously been loaded, request a pointer + * to it. If it was previously loaded, we are reinitializing the + * adapter, possibly in a resume from hibernate, in which case + * reject_firmware() cannot be used. + */ + if (!fw) + err = reject_firmware(&fw, fw_name, &nic->pdev->dev); + + if (err) { + if (required) { + netif_err(nic, probe, nic->netdev, + "Failed to load firmware \"%s\": %d\n", + fw_name, err); + netif_err(nic, probe, nic->netdev, "Proceeding without firmware\n"); + return NULL; + } else { + netif_info(nic, probe, nic->netdev, + "CPUSaver disabled. Needs \"%s\": %d\n", + fw_name, err); + return NULL; + } + } + + /* Firmware should be precisely UCODE_SIZE (words) plus three bytes + indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ + if (fw->size != UCODE_SIZE * 4 + 3) { + netif_err(nic, probe, nic->netdev, + "Firmware \"%s\" has wrong size %zu\n", + fw_name, fw->size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || + min_size >= UCODE_SIZE) { + netif_err(nic, probe, nic->netdev, + "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", + fw_name, timer, bundle, min_size); + release_firmware(fw); + return ERR_PTR(-EINVAL); + } + + /* OK, firmware is validated and ready to use. Save a pointer + * to it in the nic */ + nic->fw = fw; + return fw; +} + +static int e100_setup_ucode(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + const struct firmware *fw = (void *)skb; + u8 timer, bundle, min_size; + + /* It's not a real skb; we just abused the fact that e100_exec_cb + will pass it through to here... */ + cb->skb = NULL; + + /* firmware is stored as little endian already */ + memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4); + + /* Read timer, bundle and min_size from end of firmware blob */ + timer = fw->data[UCODE_SIZE * 4]; + bundle = fw->data[UCODE_SIZE * 4 + 1]; + min_size = fw->data[UCODE_SIZE * 4 + 2]; + + /* Insert user-tunable settings in cb->u.ucode */ + cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[timer] |= cpu_to_le32(INTDELAY); + cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX); + cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000); + cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); + + cb->command = cpu_to_le16(cb_ucode | cb_el); + return 0; +} + +static inline int e100_load_ucode_wait(struct nic *nic) +{ + const struct firmware *fw; + int err = 0, counter = 50; + struct cb *cb = nic->cb_to_clean; + + fw = e100_request_firmware(nic); + /* If it's NULL, then no ucode is required */ + if (!fw || IS_ERR(fw)) + return PTR_ERR(fw); + + if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) + netif_err(nic, probe, nic->netdev, + "ucode cmd failed with error %d\n", err); + + /* must restart cuc */ + nic->cuc_cmd = cuc_start; + + /* wait for completion */ + e100_write_flush(nic); + udelay(10); + + /* wait for possibly (ouch) 500ms */ + while (!(cb->status & cpu_to_le16(cb_complete))) { + msleep(10); + if (!--counter) break; + } + + /* ack any interrupts, something could have been set */ + iowrite8(~0, &nic->csr->scb.stat_ack); + + /* if the command failed, or is not OK, notify and return */ + if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { + netif_err(nic, probe, nic->netdev, "ucode load failed\n"); + err = -EPERM; + } + + return err; +} + +static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_iaaddr); + memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); + return 0; +} + +static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + cb->command = cpu_to_le16(cb_dump); + cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + + offsetof(struct mem, dump_buf)); + return 0; +} + +static int e100_phy_check_without_mii(struct nic *nic) +{ + u8 phy_type; + int without_mii; + + phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f; + + switch (phy_type) { + case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ + case I82503: /* Non-MII PHY; UNTESTED! */ + case S80C24: /* Non-MII PHY; tested and working */ + /* paragraph from the FreeBSD driver, "FXP_PHY_80C24": + * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter + * doesn't have a programming interface of any sort. The + * media is sensed automatically based on how the link partner + * is configured. This is, in essence, manual configuration. + */ + netif_info(nic, probe, nic->netdev, + "found MII-less i82503 or 80c24 or other PHY\n"); + + nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; + nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ + + /* these might be needed for certain MII-less cards... + * nic->flags |= ich; + * nic->flags |= ich_10h_workaround; */ + + without_mii = 1; + break; + default: + without_mii = 0; + break; + } + return without_mii; +} + +#define NCONFIG_AUTO_SWITCH 0x0080 +#define MII_NSC_CONG MII_RESV1 +#define NSC_CONG_ENABLE 0x0100 +#define NSC_CONG_TXREADY 0x0400 +#define ADVERTISE_FC_SUPPORTED 0x0400 +static int e100_phy_init(struct nic *nic) +{ + struct net_device *netdev = nic->netdev; + u32 addr; + u16 bmcr, stat, id_lo, id_hi, cong; + + /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */ + for (addr = 0; addr < 32; addr++) { + nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR); + if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0)))) + break; + } + if (addr == 32) { + /* uhoh, no PHY detected: check whether we seem to be some + * weird, rare variant which is *known* to not have any MII. + * But do this AFTER MII checking only, since this does + * lookup of EEPROM values which may easily be unreliable. */ + if (e100_phy_check_without_mii(nic)) + return 0; /* simply return and hope for the best */ + else { + /* for unknown cases log a fatal error */ + netif_err(nic, hw, nic->netdev, + "Failed to locate any known PHY, aborting\n"); + return -EAGAIN; + } + } else + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy_addr = %d\n", nic->mii.phy_id); + + /* Get phy ID */ + id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); + id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); + nic->phy = (u32)id_hi << 16 | (u32)id_lo; + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "phy ID = 0x%08X\n", nic->phy); + + /* Select the phy and isolate the rest */ + for (addr = 0; addr < 32; addr++) { + if (addr != nic->mii.phy_id) { + mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE); + } else if (nic->phy != phy_82552_v) { + bmcr = mdio_read(netdev, addr, MII_BMCR); + mdio_write(netdev, addr, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + } + } + /* + * Workaround for 82552: + * Clear the ISOLATE bit on selected phy_id last (mirrored on all + * other phy_id's) using bmcr value from addr discovery loop above. + */ + if (nic->phy == phy_82552_v) + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + + /* Handle National tx phys */ +#define NCS_PHY_MODEL_MASK 0xFFF0FFFF + if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { + /* Disable congestion control */ + cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG); + cong |= NSC_CONG_TXREADY; + cong &= ~NSC_CONG_ENABLE; + mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); + } + + if (nic->phy == phy_82552_v) { + u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE); + + /* assign special tweaked mdio_ctrl() function */ + nic->mdio_ctrl = mdio_ctrl_phy_82552_v; + + /* Workaround Si not advertising flow-control during autoneg */ + advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert); + + /* Reset for the above changes to take effect */ + bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR); + bmcr |= BMCR_RESET; + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); + } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && + (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && + (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) { + /* enable/disable MDI/MDI-X auto-switching. */ + mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, + nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); + } + + return 0; +} + +static int e100_hw_init(struct nic *nic) +{ + int err = 0; + + e100_hw_reset(nic); + + netif_err(nic, hw, nic->netdev, "e100_hw_init\n"); + if (!in_interrupt() && (err = e100_self_test(nic))) + return err; + + if ((err = e100_phy_init(nic))) + return err; + if ((err = e100_exec_cmd(nic, cuc_load_base, 0))) + return err; + if ((err = e100_exec_cmd(nic, ruc_load_base, 0))) + return err; + if ((err = e100_load_ucode_wait(nic))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_configure))) + return err; + if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_addr, + nic->dma_addr + offsetof(struct mem, stats)))) + return err; + if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0))) + return err; + + e100_disable_irq(nic); + + return 0; +} + +static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) +{ + struct net_device *netdev = nic->netdev; + struct netdev_hw_addr *ha; + u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS); + + cb->command = cpu_to_le16(cb_multi); + cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == count) + break; + memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, + ETH_ALEN); + } + return 0; +} + +static void e100_set_multicast_list(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, + "mc_count=%d, flags=0x%04X\n", + netdev_mc_count(netdev), netdev->flags); + + if (netdev->flags & IFF_PROMISC) + nic->flags |= promiscuous; + else + nic->flags &= ~promiscuous; + + if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS) + nic->flags |= multicast_all; + else + nic->flags &= ~multicast_all; + + e100_exec_cb(nic, NULL, e100_configure); + e100_exec_cb(nic, NULL, e100_multi); +} + +static void e100_update_stats(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct net_device_stats *ns = &dev->stats; + struct stats *s = &nic->mem->stats; + __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : + (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames : + &s->complete; + + /* Device's stats reporting may take several microseconds to + * complete, so we're always waiting for results of the + * previous command. */ + + if (*complete == cpu_to_le32(cuc_dump_reset_complete)) { + *complete = 0; + nic->tx_frames = le32_to_cpu(s->tx_good_frames); + nic->tx_collisions = le32_to_cpu(s->tx_total_collisions); + ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions); + ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions); + ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs); + ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns); + ns->collisions += nic->tx_collisions; + ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + + le32_to_cpu(s->tx_lost_crs); + nic->rx_short_frame_errors += + le32_to_cpu(s->rx_short_frame_errors); + ns->rx_length_errors = nic->rx_short_frame_errors + + nic->rx_over_length_errors; + ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); + ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); + ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); + ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors); + ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + + le32_to_cpu(s->rx_alignment_errors) + + le32_to_cpu(s->rx_short_frame_errors) + + le32_to_cpu(s->rx_cdt_errors); + nic->tx_deferred += le32_to_cpu(s->tx_deferred); + nic->tx_single_collisions += + le32_to_cpu(s->tx_single_collisions); + nic->tx_multiple_collisions += + le32_to_cpu(s->tx_multiple_collisions); + if (nic->mac >= mac_82558_D101_A4) { + nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause); + nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause); + nic->rx_fc_unsupported += + le32_to_cpu(s->fc_rcv_unsupported); + if (nic->mac >= mac_82559_D101M) { + nic->tx_tco_frames += + le16_to_cpu(s->xmt_tco_frames); + nic->rx_tco_frames += + le16_to_cpu(s->rcv_tco_frames); + } + } + } + + + if (e100_exec_cmd(nic, cuc_dump_reset, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_dump_reset failed\n"); +} + +static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) +{ + /* Adjust inter-frame-spacing (IFS) between two transmits if + * we're getting collisions on a half-duplex connection. */ + + if (duplex == DUPLEX_HALF) { + u32 prev = nic->adaptive_ifs; + u32 min_frames = (speed == SPEED_100) ? 1000 : 100; + + if ((nic->tx_frames / 32 < nic->tx_collisions) && + (nic->tx_frames > min_frames)) { + if (nic->adaptive_ifs < 60) + nic->adaptive_ifs += 5; + } else if (nic->tx_frames < min_frames) { + if (nic->adaptive_ifs >= 5) + nic->adaptive_ifs -= 5; + } + if (nic->adaptive_ifs != prev) + e100_exec_cb(nic, NULL, e100_configure); + } +} + +static void e100_watchdog(unsigned long data) +{ + struct nic *nic = (struct nic *)data; + struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; + u32 speed; + + netif_printk(nic, timer, KERN_DEBUG, nic->netdev, + "right now = %ld\n", jiffies); + + /* mii library handles link maintenance tasks */ + + mii_ethtool_gset(&nic->mii, &cmd); + speed = ethtool_cmd_speed(&cmd); + + if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n", + speed == SPEED_100 ? 100 : 10, + cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); + } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { + netdev_info(nic->netdev, "NIC Link is Down\n"); + } + + mii_check_link(&nic->mii); + + /* Software generated interrupt to recover from (rare) Rx + * allocation failure. + * Unfortunately have to use a spinlock to not re-enable interrupts + * accidentally, due to hardware that shares a register between the + * interrupt mask bit and the SW Interrupt generation bit */ + spin_lock_irq(&nic->cmd_lock); + iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); + e100_write_flush(nic); + spin_unlock_irq(&nic->cmd_lock); + + e100_update_stats(nic); + e100_adjust_adaptive_ifs(nic, speed, cmd.duplex); + + if (nic->mac <= mac_82557_D100_C) + /* Issue a multicast command to workaround a 557 lock up */ + e100_set_multicast_list(nic->netdev); + + if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF) + /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */ + nic->flags |= ich_10h_workaround; + else + nic->flags &= ~ich_10h_workaround; + + mod_timer(&nic->watchdog, + round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); +} + +static int e100_xmit_prepare(struct nic *nic, struct cb *cb, + struct sk_buff *skb) +{ + dma_addr_t dma_addr; + cb->command = nic->tx_command; + + dma_addr = pci_map_single(nic->pdev, + skb->data, skb->len, PCI_DMA_TODEVICE); + /* If we can't map the skb, have the upper layer try later */ + if (pci_dma_mapping_error(nic->pdev, dma_addr)) + return -ENOMEM; + + /* + * Use the last 4 bytes of the SKB payload packet as the CRC, used for + * testing, ie sending frames with bad CRC. + */ + if (unlikely(skb->no_fcs)) + cb->command |= cpu_to_le16(cb_tx_nc); + else + cb->command &= ~cpu_to_le16(cb_tx_nc); + + /* interrupt every 16 packets regardless of delay */ + if ((nic->cbs_avail & ~15) == nic->cbs_avail) + cb->command |= cpu_to_le16(cb_i); + cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); + cb->u.tcb.tcb_byte_count = 0; + cb->u.tcb.threshold = nic->tx_threshold; + cb->u.tcb.tbd_count = 1; + cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); + cb->u.tcb.tbd.size = cpu_to_le16(skb->len); + skb_tx_timestamp(skb); + return 0; +} + +static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + if (nic->flags & ich_10h_workaround) { + /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. + Issue a NOP command followed by a 1us delay before + issuing the Tx command. */ + if (e100_exec_cmd(nic, cuc_nop, 0)) + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "exec cuc_nop failed\n"); + udelay(1); + } + + err = e100_exec_cb(nic, skb, e100_xmit_prepare); + + switch (err) { + case -ENOSPC: + /* We queued the skb, but now we're out of space. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "No space for CB\n"); + netif_stop_queue(netdev); + break; + case -ENOMEM: + /* This is a hard error - log it. */ + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "Out of Tx resources, returning skb\n"); + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +static int e100_tx_clean(struct nic *nic) +{ + struct net_device *dev = nic->netdev; + struct cb *cb; + int tx_cleaned = 0; + + spin_lock(&nic->cb_lock); + + /* Clean CBs marked complete */ + for (cb = nic->cb_to_clean; + cb->status & cpu_to_le16(cb_complete); + cb = nic->cb_to_clean = cb->next) { + dma_rmb(); /* read skb after status */ + netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, + "cb[%d]->status = 0x%04X\n", + (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), + cb->status); + + if (likely(cb->skb != NULL)) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += cb->skb->len; + + pci_unmap_single(nic->pdev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + PCI_DMA_TODEVICE); + dev_kfree_skb_any(cb->skb); + cb->skb = NULL; + tx_cleaned = 1; + } + cb->status = 0; + nic->cbs_avail++; + } + + spin_unlock(&nic->cb_lock); + + /* Recover from running out of Tx resources in xmit_frame */ + if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev))) + netif_wake_queue(nic->netdev); + + return tx_cleaned; +} + +static void e100_clean_cbs(struct nic *nic) +{ + if (nic->cbs) { + while (nic->cbs_avail != nic->params.cbs.count) { + struct cb *cb = nic->cb_to_clean; + if (cb->skb) { + pci_unmap_single(nic->pdev, + le32_to_cpu(cb->u.tcb.tbd.buf_addr), + le16_to_cpu(cb->u.tcb.tbd.size), + PCI_DMA_TODEVICE); + dev_kfree_skb(cb->skb); + } + nic->cb_to_clean = nic->cb_to_clean->next; + nic->cbs_avail++; + } + pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr); + nic->cbs = NULL; + nic->cbs_avail = 0; + } + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = + nic->cbs; +} + +static int e100_alloc_cbs(struct nic *nic) +{ + struct cb *cb; + unsigned int i, count = nic->params.cbs.count; + + nic->cuc_cmd = cuc_start; + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; + nic->cbs_avail = 0; + + nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL, + &nic->cbs_dma_addr); + if (!nic->cbs) + return -ENOMEM; + memset(nic->cbs, 0, count * sizeof(struct cb)); + + for (cb = nic->cbs, i = 0; i < count; cb++, i++) { + cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; + cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1; + + cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); + cb->link = cpu_to_le32(nic->cbs_dma_addr + + ((i+1) % count) * sizeof(struct cb)); + } + + nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; + nic->cbs_avail = count; + + return 0; +} + +static inline void e100_start_receiver(struct nic *nic, struct rx *rx) +{ + if (!nic->rxs) return; + if (RU_SUSPENDED != nic->ru_running) return; + + /* handle init time starts */ + if (!rx) rx = nic->rxs; + + /* (Re)start RU if suspended or idle and RFA is non-NULL */ + if (rx->skb) { + e100_exec_cmd(nic, ruc_start, rx->dma_addr); + nic->ru_running = RU_RUNNING; + } +} + +#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) +static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) +{ + if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN))) + return -ENOMEM; + + /* Init, and map the RFD. */ + skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); + rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + + if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) { + dev_kfree_skb_any(rx->skb); + rx->skb = NULL; + rx->dma_addr = 0; + return -ENOMEM; + } + + /* Link the RFD to end of RFA by linking previous RFD to + * this one. We are safe to touch the previous RFD because + * it is protected by the before last buffer's el bit being set */ + if (rx->prev->skb) { + struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; + put_unaligned_le32(rx->dma_addr, &prev_rfd->link); + pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + } + + return 0; +} + +static int e100_rx_indicate(struct nic *nic, struct rx *rx, + unsigned int *work_done, unsigned int work_to_do) +{ + struct net_device *dev = nic->netdev; + struct sk_buff *skb = rx->skb; + struct rfd *rfd = (struct rfd *)skb->data; + u16 rfd_status, actual_size; + u16 fcs_pad = 0; + + if (unlikely(work_done && *work_done >= work_to_do)) + return -EAGAIN; + + /* Need to sync before taking a peek at cb_complete bit */ + pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr, + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + rfd_status = le16_to_cpu(rfd->status); + + netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, + "status=0x%04X\n", rfd_status); + dma_rmb(); /* read size after status bit */ + + /* If data isn't ready, nothing to indicate */ + if (unlikely(!(rfd_status & cb_complete))) { + /* If the next buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling + * interrupts */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, + sizeof(struct rfd), + PCI_DMA_FROMDEVICE); + return -ENODATA; + } + + /* Get actual data size */ + if (unlikely(dev->features & NETIF_F_RXFCS)) + fcs_pad = 4; + actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; + if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) + actual_size = RFD_BUF_LEN - sizeof(struct rfd); + + /* Get data */ + pci_unmap_single(nic->pdev, rx->dma_addr, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + + /* If this buffer has the el bit, but we think the receiver + * is still running, check to see if it really stopped while + * we had interrupts off. + * This allows for a fast restart without re-enabling interrupts. + * This can happen when the RU sees the size change but also sees + * the el bit set. */ + if ((le16_to_cpu(rfd->command) & cb_el) && + (RU_RUNNING == nic->ru_running)) { + + if (ioread8(&nic->csr->scb.status) & rus_no_res) + nic->ru_running = RU_SUSPENDED; + } + + /* Pull off the RFD and put the actual data (minus eth hdr) */ + skb_reserve(skb, sizeof(struct rfd)); + skb_put(skb, actual_size); + skb->protocol = eth_type_trans(skb, nic->netdev); + + /* If we are receiving all frames, then don't bother + * checking for errors. + */ + if (unlikely(dev->features & NETIF_F_RXALL)) { + if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) + /* Received oversized frame, but keep it. */ + nic->rx_over_length_errors++; + goto process_skb; + } + + if (unlikely(!(rfd_status & cb_ok))) { + /* Don't indicate if hardware indicates errors */ + dev_kfree_skb_any(skb); + } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) { + /* Don't indicate oversized frames */ + nic->rx_over_length_errors++; + dev_kfree_skb_any(skb); + } else { +process_skb: + dev->stats.rx_packets++; + dev->stats.rx_bytes += (actual_size - fcs_pad); + netif_receive_skb(skb); + if (work_done) + (*work_done)++; + } + + rx->skb = NULL; + + return 0; +} + +static void e100_rx_clean(struct nic *nic, unsigned int *work_done, + unsigned int work_to_do) +{ + struct rx *rx; + int restart_required = 0, err = 0; + struct rx *old_before_last_rx, *new_before_last_rx; + struct rfd *old_before_last_rfd, *new_before_last_rfd; + + /* Indicate newly arrived packets */ + for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { + err = e100_rx_indicate(nic, rx, work_done, work_to_do); + /* Hit quota or no more to clean */ + if (-EAGAIN == err || -ENODATA == err) + break; + } + + + /* On EAGAIN, hit quota so have more work to do, restart once + * cleanup is complete. + * Else, are we already rnr? then pay attention!!! this ensures that + * the state machine progression never allows a start with a + * partially cleaned list, avoiding a race between hardware + * and rx_to_clean when in NAPI mode */ + if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running) + restart_required = 1; + + old_before_last_rx = nic->rx_to_use->prev->prev; + old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; + + /* Alloc new skbs to refill list */ + for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { + if (unlikely(e100_rx_alloc_skb(nic, rx))) + break; /* Better luck next time (see watchdog) */ + } + + new_before_last_rx = nic->rx_to_use->prev->prev; + if (new_before_last_rx != old_before_last_rx) { + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer + * without worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this + * buffer. + * When the hardware hits the before last buffer with el-bit + * and size of 0, it will RNR interrupt, the RUS will go into + * the No Resources state. It will not complete nor write to + * this buffer. */ + new_before_last_rfd = + (struct rfd *)new_before_last_rx->skb->data; + new_before_last_rfd->size = 0; + new_before_last_rfd->command |= cpu_to_le16(cb_el); + pci_dma_sync_single_for_device(nic->pdev, + new_before_last_rx->dma_addr, sizeof(struct rfd), + PCI_DMA_BIDIRECTIONAL); + + /* Now that we have a new stopping point, we can clear the old + * stopping point. We must sync twice to get the proper + * ordering on the hardware side of things. */ + old_before_last_rfd->command &= ~cpu_to_le16(cb_el); + pci_dma_sync_single_for_device(nic->pdev, + old_before_last_rx->dma_addr, sizeof(struct rfd), + PCI_DMA_BIDIRECTIONAL); + old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN + + ETH_FCS_LEN); + pci_dma_sync_single_for_device(nic->pdev, + old_before_last_rx->dma_addr, sizeof(struct rfd), + PCI_DMA_BIDIRECTIONAL); + } + + if (restart_required) { + // ack the rnr? + iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); + e100_start_receiver(nic, nic->rx_to_clean); + if (work_done) + (*work_done)++; + } +} + +static void e100_rx_clean_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + + nic->ru_running = RU_UNINITIALIZED; + + if (nic->rxs) { + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + if (rx->skb) { + pci_unmap_single(nic->pdev, rx->dma_addr, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + dev_kfree_skb(rx->skb); + } + } + kfree(nic->rxs); + nic->rxs = NULL; + } + + nic->rx_to_use = nic->rx_to_clean = NULL; +} + +static int e100_rx_alloc_list(struct nic *nic) +{ + struct rx *rx; + unsigned int i, count = nic->params.rfds.count; + struct rfd *before_last; + + nic->rx_to_use = nic->rx_to_clean = NULL; + nic->ru_running = RU_UNINITIALIZED; + + if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC))) + return -ENOMEM; + + for (rx = nic->rxs, i = 0; i < count; rx++, i++) { + rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; + rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1; + if (e100_rx_alloc_skb(nic, rx)) { + e100_rx_clean_list(nic); + return -ENOMEM; + } + } + /* Set the el-bit on the buffer that is before the last buffer. + * This lets us update the next pointer on the last buffer without + * worrying about hardware touching it. + * We set the size to 0 to prevent hardware from touching this buffer. + * When the hardware hits the before last buffer with el-bit and size + * of 0, it will RNR interrupt, the RU will go into the No Resources + * state. It will not complete nor write to this buffer. */ + rx = nic->rxs->prev->prev; + before_last = (struct rfd *)rx->skb->data; + before_last->command |= cpu_to_le16(cb_el); + before_last->size = 0; + pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, + sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); + + nic->rx_to_use = nic->rx_to_clean = nic->rxs; + nic->ru_running = RU_SUSPENDED; + + return 0; +} + +static irqreturn_t e100_intr(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct nic *nic = netdev_priv(netdev); + u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); + + netif_printk(nic, intr, KERN_DEBUG, nic->netdev, + "stat_ack = 0x%02X\n", stat_ack); + + if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ + stat_ack == stat_ack_not_present) /* Hardware is ejected */ + return IRQ_NONE; + + /* Ack interrupt(s) */ + iowrite8(stat_ack, &nic->csr->scb.stat_ack); + + /* We hit Receive No Resource (RNR); restart RU after cleaning */ + if (stat_ack & stat_ack_rnr) + nic->ru_running = RU_SUSPENDED; + + if (likely(napi_schedule_prep(&nic->napi))) { + e100_disable_irq(nic); + __napi_schedule(&nic->napi); + } + + return IRQ_HANDLED; +} + +static int e100_poll(struct napi_struct *napi, int budget) +{ + struct nic *nic = container_of(napi, struct nic, napi); + unsigned int work_done = 0; + + e100_rx_clean(nic, &work_done, budget); + e100_tx_clean(nic); + + /* If budget not fully consumed, exit the polling mode */ + if (work_done < budget) { + napi_complete(napi); + e100_enable_irq(nic); + } + + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void e100_netpoll(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + e100_disable_irq(nic); + e100_intr(nic->pdev->irq, netdev); + e100_tx_clean(nic); + e100_enable_irq(nic); +} +#endif + +static int e100_set_mac_address(struct net_device *netdev, void *p) +{ + struct nic *nic = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + e100_exec_cb(nic, NULL, e100_setup_iaaddr); + + return 0; +} + +static int e100_change_mtu(struct net_device *netdev, int new_mtu) +{ + if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN) + return -EINVAL; + netdev->mtu = new_mtu; + return 0; +} + +static int e100_asf(struct nic *nic) +{ + /* ASF can be enabled from eeprom */ + return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && + (nic->eeprom[eeprom_config_asf] & eeprom_asf) && + !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) && + ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE); +} + +static int e100_up(struct nic *nic) +{ + int err; + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_rx_clean_list; + if ((err = e100_hw_init(nic))) + goto err_clean_cbs; + e100_set_multicast_list(nic->netdev); + e100_start_receiver(nic, NULL); + mod_timer(&nic->watchdog, jiffies); + if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED, + nic->netdev->name, nic->netdev))) + goto err_no_irq; + netif_wake_queue(nic->netdev); + napi_enable(&nic->napi); + /* enable ints _after_ enabling poll, preventing a race between + * disable ints+schedule */ + e100_enable_irq(nic); + return 0; + +err_no_irq: + del_timer_sync(&nic->watchdog); +err_clean_cbs: + e100_clean_cbs(nic); +err_rx_clean_list: + e100_rx_clean_list(nic); + return err; +} + +static void e100_down(struct nic *nic) +{ + /* wait here for poll to complete */ + napi_disable(&nic->napi); + netif_stop_queue(nic->netdev); + e100_hw_reset(nic); + free_irq(nic->pdev->irq, nic->netdev); + del_timer_sync(&nic->watchdog); + netif_carrier_off(nic->netdev); + e100_clean_cbs(nic); + e100_rx_clean_list(nic); +} + +static void e100_tx_timeout(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + + /* Reset outside of interrupt context, to avoid request_irq + * in interrupt context */ + schedule_work(&nic->tx_timeout_task); +} + +static void e100_tx_timeout_task(struct work_struct *work) +{ + struct nic *nic = container_of(work, struct nic, tx_timeout_task); + struct net_device *netdev = nic->netdev; + + netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, + "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status)); + + rtnl_lock(); + if (netif_running(netdev)) { + e100_down(netdev_priv(netdev)); + e100_up(netdev_priv(netdev)); + } + rtnl_unlock(); +} + +static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) +{ + int err; + struct sk_buff *skb; + + /* Use driver resources to perform internal MAC or PHY + * loopback test. A single packet is prepared and transmitted + * in loopback mode, and the test passes if the received + * packet compares byte-for-byte to the transmitted packet. */ + + if ((err = e100_rx_alloc_list(nic))) + return err; + if ((err = e100_alloc_cbs(nic))) + goto err_clean_rx; + + /* ICH PHY loopback is broken so do MAC loopback instead */ + if (nic->flags & ich && loopback_mode == lb_phy) + loopback_mode = lb_mac; + + nic->loopback = loopback_mode; + if ((err = e100_hw_init(nic))) + goto err_loopback_none; + + if (loopback_mode == lb_phy) + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, + BMCR_LOOPBACK); + + e100_start_receiver(nic, NULL); + + if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) { + err = -ENOMEM; + goto err_loopback_none; + } + skb_put(skb, ETH_DATA_LEN); + memset(skb->data, 0xFF, ETH_DATA_LEN); + e100_xmit_frame(skb, nic->netdev); + + msleep(10); + + pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr, + RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); + + if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), + skb->data, ETH_DATA_LEN)) + err = -EAGAIN; + +err_loopback_none: + mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0); + nic->loopback = lb_none; + e100_clean_cbs(nic); + e100_hw_reset(nic); +err_clean_rx: + e100_rx_clean_list(nic); + return err; +} + +#define MII_LED_CONTROL 0x1B +#define E100_82552_LED_OVERRIDE 0x19 +#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */ +#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */ + +static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +{ + struct nic *nic = netdev_priv(netdev); + return mii_ethtool_gset(&nic->mii, cmd); +} + +static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +{ + struct nic *nic = netdev_priv(netdev); + int err; + + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET); + err = mii_ethtool_sset(&nic->mii, cmd); + e100_exec_cb(nic, NULL, e100_configure); + + return err; +} + +static void e100_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct nic *nic = netdev_priv(netdev); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(nic->pdev), + sizeof(info->bus_info)); +} + +#define E100_PHY_REGS 0x1C +static int e100_get_regs_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf); +} + +static void e100_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct nic *nic = netdev_priv(netdev); + u32 *buff = p; + int i; + + regs->version = (1 << 24) | nic->pdev->revision; + buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | + ioread8(&nic->csr->scb.cmd_lo) << 16 | + ioread16(&nic->csr->scb.status); + for (i = E100_PHY_REGS; i >= 0; i--) + buff[1 + E100_PHY_REGS - i] = + mdio_read(netdev, nic->mii.phy_id, i); + memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); + e100_exec_cb(nic, NULL, e100_dump); + msleep(10); + memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf, + sizeof(nic->mem->dump_buf)); +} + +static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0; + wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0; +} + +static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct nic *nic = netdev_priv(netdev); + + if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) || + !device_can_wakeup(&nic->pdev->dev)) + return -EOPNOTSUPP; + + if (wol->wolopts) + nic->flags |= wol_magic; + else + nic->flags &= ~wol_magic; + + device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts); + + e100_exec_cb(nic, NULL, e100_configure); + + return 0; +} + +static u32 e100_get_msglevel(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->msg_enable; +} + +static void e100_set_msglevel(struct net_device *netdev, u32 value) +{ + struct nic *nic = netdev_priv(netdev); + nic->msg_enable = value; +} + +static int e100_nway_reset(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_nway_restart(&nic->mii); +} + +static u32 e100_get_link(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return mii_link_ok(&nic->mii); +} + +static int e100_get_eeprom_len(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + return nic->eeprom_wc << 1; +} + +#define E100_EEPROM_MAGIC 0x1234 +static int e100_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + eeprom->magic = E100_EEPROM_MAGIC; + memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len); + + return 0; +} + +static int e100_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct nic *nic = netdev_priv(netdev); + + if (eeprom->magic != E100_EEPROM_MAGIC) + return -EINVAL; + + memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len); + + return e100_eeprom_save(nic, eeprom->offset >> 1, + (eeprom->len >> 1) + 1); +} + +static void e100_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + ring->rx_max_pending = rfds->max; + ring->tx_max_pending = cbs->max; + ring->rx_pending = rfds->count; + ring->tx_pending = cbs->count; +} + +static int e100_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nic *nic = netdev_priv(netdev); + struct param_range *rfds = &nic->params.rfds; + struct param_range *cbs = &nic->params.cbs; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if (netif_running(netdev)) + e100_down(nic); + rfds->count = max(ring->rx_pending, rfds->min); + rfds->count = min(rfds->count, rfds->max); + cbs->count = max(ring->tx_pending, cbs->min); + cbs->count = min(cbs->count, cbs->max); + netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n", + rfds->count, cbs->count); + if (netif_running(netdev)) + e100_up(nic); + + return 0; +} + +static const char e100_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test (on/offline)", + "Eeprom test (on/offline)", + "Self test (offline)", + "Mac loopback (offline)", + "Phy loopback (offline)", +}; +#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test) + +static void e100_diag_test(struct net_device *netdev, + struct ethtool_test *test, u64 *data) +{ + struct ethtool_cmd cmd; + struct nic *nic = netdev_priv(netdev); + int i, err; + + memset(data, 0, E100_TEST_LEN * sizeof(u64)); + data[0] = !mii_link_ok(&nic->mii); + data[1] = e100_eeprom_load(nic); + if (test->flags & ETH_TEST_FL_OFFLINE) { + + /* save speed, duplex & autoneg settings */ + err = mii_ethtool_gset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_down(nic); + data[2] = e100_self_test(nic); + data[3] = e100_loopback_test(nic, lb_mac); + data[4] = e100_loopback_test(nic, lb_phy); + + /* restore speed, duplex & autoneg settings */ + err = mii_ethtool_sset(&nic->mii, &cmd); + + if (netif_running(netdev)) + e100_up(nic); + } + for (i = 0; i < E100_TEST_LEN; i++) + test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; + + msleep_interruptible(4 * 1000); +} + +static int e100_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct nic *nic = netdev_priv(netdev); + enum led_state { + led_on = 0x01, + led_off = 0x04, + led_on_559 = 0x05, + led_on_557 = 0x07, + }; + u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE : + MII_LED_CONTROL; + u16 leds = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; + + case ETHTOOL_ID_ON: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON : + (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559; + break; + + case ETHTOOL_ID_OFF: + leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off; + break; + + case ETHTOOL_ID_INACTIVE: + break; + } + + mdio_write(netdev, nic->mii.phy_id, led_reg, leds); + return 0; +} + +static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", + "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", + "rx_length_errors", "rx_over_errors", "rx_crc_errors", + "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", + "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", + "tx_heartbeat_errors", "tx_window_errors", + /* device-specific stats */ + "tx_deferred", "tx_single_collisions", "tx_multi_collisions", + "tx_flow_control_pause", "rx_flow_control_pause", + "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets", + "rx_short_frame_errors", "rx_over_length_errors", +}; +#define E100_NET_STATS_LEN 21 +#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats) + +static int e100_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E100_TEST_LEN; + case ETH_SS_STATS: + return E100_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e100_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct nic *nic = netdev_priv(netdev); + int i; + + for (i = 0; i < E100_NET_STATS_LEN; i++) + data[i] = ((unsigned long *)&netdev->stats)[i]; + + data[i++] = nic->tx_deferred; + data[i++] = nic->tx_single_collisions; + data[i++] = nic->tx_multiple_collisions; + data[i++] = nic->tx_fc_pause; + data[i++] = nic->rx_fc_pause; + data[i++] = nic->rx_fc_unsupported; + data[i++] = nic->tx_tco_frames; + data[i++] = nic->rx_tco_frames; + data[i++] = nic->rx_short_frame_errors; + data[i++] = nic->rx_over_length_errors; +} + +static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test)); + break; + case ETH_SS_STATS: + memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats)); + break; + } +} + +static const struct ethtool_ops e100_ethtool_ops = { + .get_settings = e100_get_settings, + .set_settings = e100_set_settings, + .get_drvinfo = e100_get_drvinfo, + .get_regs_len = e100_get_regs_len, + .get_regs = e100_get_regs, + .get_wol = e100_get_wol, + .set_wol = e100_set_wol, + .get_msglevel = e100_get_msglevel, + .set_msglevel = e100_set_msglevel, + .nway_reset = e100_nway_reset, + .get_link = e100_get_link, + .get_eeprom_len = e100_get_eeprom_len, + .get_eeprom = e100_get_eeprom, + .set_eeprom = e100_set_eeprom, + .get_ringparam = e100_get_ringparam, + .set_ringparam = e100_set_ringparam, + .self_test = e100_diag_test, + .get_strings = e100_get_strings, + .set_phys_id = e100_set_phys_id, + .get_ethtool_stats = e100_get_ethtool_stats, + .get_sset_count = e100_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, +}; + +static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct nic *nic = netdev_priv(netdev); + + return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL); +} + +static int e100_alloc(struct nic *nic) +{ + nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem), + &nic->dma_addr); + return nic->mem ? 0 : -ENOMEM; +} + +static void e100_free(struct nic *nic) +{ + if (nic->mem) { + pci_free_consistent(nic->pdev, sizeof(struct mem), + nic->mem, nic->dma_addr); + nic->mem = NULL; + } +} + +static int e100_open(struct net_device *netdev) +{ + struct nic *nic = netdev_priv(netdev); + int err = 0; + + netif_carrier_off(netdev); + if ((err = e100_up(nic))) + netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n"); + return err; +} + +static int e100_close(struct net_device *netdev) +{ + e100_down(netdev_priv(netdev)); + return 0; +} + +static int e100_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct nic *nic = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + e100_exec_cb(nic, NULL, e100_configure); + return 0; +} + +static const struct net_device_ops e100_netdev_ops = { + .ndo_open = e100_open, + .ndo_stop = e100_close, + .ndo_start_xmit = e100_xmit_frame, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = e100_set_multicast_list, + .ndo_set_mac_address = e100_set_mac_address, + .ndo_change_mtu = e100_change_mtu, + .ndo_do_ioctl = e100_do_ioctl, + .ndo_tx_timeout = e100_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e100_netpoll, +#endif + .ndo_set_features = e100_set_features, +}; + +static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct nic *nic; + int err; + + if (!(netdev = alloc_etherdev(sizeof(struct nic)))) + return -ENOMEM; + + netdev->hw_features |= NETIF_F_RXFCS; + netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->hw_features |= NETIF_F_RXALL; + + netdev->netdev_ops = &e100_netdev_ops; + netdev->ethtool_ops = &e100_ethtool_ops; + netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + nic = netdev_priv(netdev); + netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); + nic->netdev = netdev; + nic->pdev = pdev; + nic->msg_enable = (1 << debug) - 1; + nic->mdio_ctrl = mdio_ctrl_hw; + pci_set_drvdata(pdev, netdev); + + if ((err = pci_enable_device(pdev))) { + netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n"); + goto err_out_free_dev; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n"); + err = -ENODEV; + goto err_out_disable_pdev; + } + + if ((err = pci_request_regions(pdev, DRV_NAME))) { + netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable_pdev; + } + + if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n"); + goto err_out_free_res; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + if (use_io) + netif_info(nic, probe, nic->netdev, "using i/o access mode\n"); + + nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); + if (!nic->csr) { + netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_free_res; + } + + if (ent->driver_data) + nic->flags |= ich; + else + nic->flags &= ~ich; + + e100_get_defaults(nic); + + /* D100 MAC doesn't allow rx of vlan packets with normal MTU */ + if (nic->mac < mac_82558_D101_A4) + netdev->features |= NETIF_F_VLAN_CHALLENGED; + + /* locks must be initialized before calling hw_reset */ + spin_lock_init(&nic->cb_lock); + spin_lock_init(&nic->cmd_lock); + spin_lock_init(&nic->mdio_lock); + + /* Reset the device before pci_set_master() in case device is in some + * funky state and has an interrupt pending - hint: we don't have the + * interrupt handler registered yet. */ + e100_hw_reset(nic); + + pci_set_master(pdev); + + init_timer(&nic->watchdog); + nic->watchdog.function = e100_watchdog; + nic->watchdog.data = (unsigned long)nic; + + INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); + + if ((err = e100_alloc(nic))) { + netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n"); + goto err_out_iounmap; + } + + if ((err = e100_eeprom_load(nic))) + goto err_out_free; + + e100_phy_init(nic); + + memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN); + if (!is_valid_ether_addr(netdev->dev_addr)) { + if (!eeprom_bad_csum_allow) { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n"); + err = -EAGAIN; + goto err_out_free; + } else { + netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n"); + } + } + + /* Wol magic packet can be enabled from eeprom */ + if ((nic->mac >= mac_82558_D101_A4) && + (nic->eeprom[eeprom_id] & eeprom_id_wol)) { + nic->flags |= wol_magic; + device_set_wakeup_enable(&pdev->dev, true); + } + + /* ack any pending wake events, disable PME */ + pci_pme_active(pdev, false); + + strcpy(netdev->name, "eth%d"); + if ((err = register_netdev(netdev))) { + netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n"); + goto err_out_free; + } + nic->cbs_pool = pci_pool_create(netdev->name, + nic->pdev, + nic->params.cbs.max * sizeof(struct cb), + sizeof(u32), + 0); + netif_info(nic, probe, nic->netdev, + "addr 0x%llx, irq %d, MAC addr %pM\n", + (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), + pdev->irq, netdev->dev_addr); + + return 0; + +err_out_free: + e100_free(nic); +err_out_iounmap: + pci_iounmap(pdev, nic->csr); +err_out_free_res: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); +err_out_free_dev: + free_netdev(netdev); + return err; +} + +static void e100_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + if (netdev) { + struct nic *nic = netdev_priv(netdev); + unregister_netdev(netdev); + e100_free(nic); + pci_iounmap(pdev, nic->csr); + pci_pool_destroy(nic->cbs_pool); + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + } +} + +#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */ +#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */ +#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */ +static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (netif_running(netdev)) + e100_down(nic); + netif_device_detach(netdev); + + pci_save_state(pdev); + + if ((nic->flags & wol_magic) | e100_asf(nic)) { + /* enable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, smartspeed | + E100_82552_REV_ANEG | E100_82552_ANEG_NOW); + } + *enable_wake = true; + } else { + *enable_wake = false; + } + + pci_clear_master(pdev); +} + +static int __e100_power_off(struct pci_dev *pdev, bool wake) +{ + if (wake) + return pci_prepare_to_sleep(pdev); + + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +#ifdef CONFIG_PM +static int e100_suspend(struct pci_dev *pdev, pm_message_t state) +{ + bool wake; + __e100_shutdown(pdev, &wake); + return __e100_power_off(pdev, wake); +} + +static int e100_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* ack any pending wake events, disable PME */ + pci_enable_wake(pdev, PCI_D0, 0); + + /* disable reverse auto-negotiation */ + if (nic->phy == phy_82552_v) { + u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED); + + mdio_write(netdev, nic->mii.phy_id, + E100_82552_SMARTSPEED, + smartspeed & ~(E100_82552_REV_ANEG)); + } + + netif_device_attach(netdev); + if (netif_running(netdev)) + e100_up(nic); + + return 0; +} +#endif /* CONFIG_PM */ + +static void e100_shutdown(struct pci_dev *pdev) +{ + bool wake; + __e100_shutdown(pdev, &wake); + if (system_state == SYSTEM_POWER_OFF) + __e100_power_off(pdev, wake); +} + +/* ------------------ PCI Error Recovery infrastructure -------------- */ +/** + * e100_io_error_detected - called when PCI error is detected. + * @pdev: Pointer to PCI device + * @state: The current pci connection state + */ +static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e100_down(nic); + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e100_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch. + */ +static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + pr_err("Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + /* Only one device per card can do a reset */ + if (0 != PCI_FUNC(pdev->devfn)) + return PCI_ERS_RESULT_RECOVERED; + e100_hw_reset(nic); + e100_phy_init(nic); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e100_io_resume - resume normal operations + * @pdev: Pointer to PCI device + * + * Resume normal operations after an error recovery + * sequence has been completed. + */ +static void e100_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); + + /* ack any pending wake events, disable PME */ + pci_enable_wake(pdev, PCI_D0, 0); + + netif_device_attach(netdev); + if (netif_running(netdev)) { + e100_open(netdev); + mod_timer(&nic->watchdog, jiffies); + } +} + +static const struct pci_error_handlers e100_err_handler = { + .error_detected = e100_io_error_detected, + .slot_reset = e100_io_slot_reset, + .resume = e100_io_resume, +}; + +static struct pci_driver e100_driver = { + .name = DRV_NAME, + .id_table = e100_id_table, + .probe = e100_probe, + .remove = e100_remove, +#ifdef CONFIG_PM + /* Power Management hooks */ + .suspend = e100_suspend, + .resume = e100_resume, +#endif + .shutdown = e100_shutdown, + .err_handler = &e100_err_handler, +}; + +static int __init e100_init_module(void) +{ + if (((1 << debug) - 1) & NETIF_MSG_DRV) { + pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); + pr_info("%s\n", DRV_COPYRIGHT); + } + return pci_register_driver(&e100_driver); +} + +static void __exit e100_cleanup_module(void) +{ + pci_unregister_driver(&e100_driver); +} + +module_init(e100_init_module); +module_exit(e100_cleanup_module); diff --git a/drivers/net/ethernet/intel/e1000/Makefile b/drivers/net/ethernet/intel/e1000/Makefile new file mode 100644 index 000000000..4a6ab1522 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000/Makefile @@ -0,0 +1,35 @@ +################################################################################ +# +# Intel PRO/1000 Linux driver +# Copyright(c) 1999 - 2006 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# Linux NICS +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) PRO/1000 ethernet driver +# + +obj-$(CONFIG_E1000) += e1000.o + +e1000-objs := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h new file mode 100644 index 000000000..69707108d --- /dev/null +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -0,0 +1,374 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BAR_0 0 +#define BAR_1 1 +#define BAR_5 5 + +#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +struct e1000_adapter; + +#include "e1000_hw.h" + +#define E1000_MAX_INTR 10 + +/* + * Count for polling __E1000_RESET condition every 10-20msec. + */ +#define E1000_CHECK_RESET_COUNT 50 + +/* TX/RX descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 256 +#define E1000_MIN_TXD 48 +#define E1000_MAX_82544_TXD 4096 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 256 +#define E1000_MIN_RXD 48 +#define E1000_MAX_82544_RXD 4096 + +#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +/* Supported Rx Buffer Sizes */ +#define E1000_RXBUFFER_128 128 /* Used for packet split */ +#define E1000_RXBUFFER_256 256 /* Used for packet split */ +#define E1000_RXBUFFER_512 512 +#define E1000_RXBUFFER_1024 1024 +#define E1000_RXBUFFER_2048 2048 +#define E1000_RXBUFFER_4096 4096 +#define E1000_RXBUFFER_8192 8192 +#define E1000_RXBUFFER_16384 16384 + +/* SmartSpeed delimiters */ +#define E1000_SMARTSPEED_DOWNSHIFT 3 +#define E1000_SMARTSPEED_MAX 15 + +/* Packet Buffer allocations */ +#define E1000_PBA_BYTES_SHIFT 0xA +#define E1000_TX_HEAD_ADDR_SHIFT 7 +#define E1000_PBA_TX_MASK 0xFFFF0000 + +/* Flow Control Watermarks */ +#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ +#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ + +#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +#define E1000_TX_QUEUE_WAKE 16 +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_82544_APM 0x0004 +#define E1000_EEPROM_APME 0x0400 + +#ifndef E1000_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define E1000_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define E1000_MNG_VLAN_NONE (-1) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct e1000_tx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + bool mapped_as_page; + unsigned short segs; + unsigned int bytecount; +}; + +struct e1000_rx_buffer { + union { + struct page *page; /* jumbo: alloc_page */ + u8 *data; /* else, netdev_alloc_frag */ + } rxbuf; + dma_addr_t dma; +}; + +struct e1000_tx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_tx_buffer *buffer_info; + + u16 tdh; + u16 tdt; + bool last_tx_tso; +}; + +struct e1000_rx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_rx_buffer *buffer_info; + struct sk_buff *rx_skb_top; + + /* cpu for rx queue */ + int cpu; + + u16 rdh; + u16 rdt; +}; + +#define E1000_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) \ + ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1) + +#define E1000_RX_DESC_EXT(R, i) \ + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +/* board specific private data structure */ + +struct e1000_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u16 mng_vlan_id; + u32 bd_number; + u32 rx_buffer_len; + u32 wol; + u32 smartspeed; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + spinlock_t stats_lock; + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + u8 fc_autoneg; + + /* TX */ + struct e1000_tx_ring *tx_ring; /* One per active queue */ + unsigned int restart_queue; + u32 txd_cmd; + u32 tx_int_delay; + u32 tx_abs_int_delay; + u32 gotcl; + u64 gotcl_old; + u64 tpt_old; + u64 colc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u8 tx_timeout_factor; + atomic_t tx_fifo_stall; + bool pcix_82544; + bool detect_tx_hung; + bool dump_buffers; + + /* RX */ + bool (*clean_rx)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); + void (*alloc_rx_buf)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); + struct e1000_rx_ring *rx_ring; /* One per active queue */ + struct napi_struct napi; + + int num_tx_queues; + int num_rx_queues; + + u64 hw_csum_err; + u64 hw_csum_good; + u32 alloc_rx_buff_failed; + u32 rx_int_delay; + u32 rx_abs_int_delay; + bool rx_csum; + u32 gorcl; + u64 gorcl_old; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + u32 test_icr; + struct e1000_tx_ring test_tx_ring; + struct e1000_rx_ring test_rx_ring; + + int msg_enable; + + /* to not mess up cache alignment, always add to the bottom */ + bool tso_force; + bool smart_power_down; /* phy smart power down */ + bool quad_port_a; + unsigned long flags; + u32 eeprom_wol; + + /* for ioport free */ + int bars; + int need_ioport; + + bool discarding; + + struct work_struct reset_task; + struct delayed_work watchdog_task; + struct delayed_work fifo_stall_task; + struct delayed_work phy_info_task; +}; + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_DOWN +}; + +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); +#define e_dbg(format, arg...) \ + netdev_dbg(e1000_get_hw_dev(hw), format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_notice(msglvl, format, arg...) \ + netif_notice(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(&adapter->pdev->dev, format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(&adapter->pdev->dev, format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg) + +extern char e1000_driver_name[]; +extern const char e1000_driver_version[]; + +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_update_stats(struct e1000_adapter *adapter); +bool e1000_has_link(struct e1000_adapter *adapter); +void e1000_power_up_phy(struct e1000_adapter *); +void e1000_set_ethtool_ops(struct net_device *netdev); +void e1000_check_options(struct e1000_adapter *adapter); +char *e1000_get_hw_dev_name(struct e1000_hw *hw); + +#endif /* _E1000_H_ */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c new file mode 100644 index 000000000..4270ad2d4 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -0,0 +1,1911 @@ +/******************************************************************************* + * Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2006 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +/* ethtool support for e1000 */ + +#include "e1000.h" +#include +#include + +enum {NETDEV_STATS, E1000_STATS}; + +struct e1000_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define E1000_STAT(m) E1000_STATS, \ + sizeof(((struct e1000_adapter *)0)->m), \ + offsetof(struct e1000_adapter, m) +#define E1000_NETDEV_STAT(m) NETDEV_STATS, \ + sizeof(((struct net_device *)0)->m), \ + offsetof(struct net_device, m) + +static const struct e1000_stats e1000_gstrings_stats[] = { + { "rx_packets", E1000_STAT(stats.gprc) }, + { "tx_packets", E1000_STAT(stats.gptc) }, + { "rx_bytes", E1000_STAT(stats.gorcl) }, + { "tx_bytes", E1000_STAT(stats.gotcl) }, + { "rx_broadcast", E1000_STAT(stats.bprc) }, + { "tx_broadcast", E1000_STAT(stats.bptc) }, + { "rx_multicast", E1000_STAT(stats.mprc) }, + { "tx_multicast", E1000_STAT(stats.mptc) }, + { "rx_errors", E1000_STAT(stats.rxerrc) }, + { "tx_errors", E1000_STAT(stats.txerrc) }, + { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) }, + { "multicast", E1000_STAT(stats.mprc) }, + { "collisions", E1000_STAT(stats.colc) }, + { "rx_length_errors", E1000_STAT(stats.rlerrc) }, + { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) }, + { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, + { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) }, + { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, + { "rx_missed_errors", E1000_STAT(stats.mpc) }, + { "tx_aborted_errors", E1000_STAT(stats.ecol) }, + { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, + { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) }, + { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) }, + { "tx_window_errors", E1000_STAT(stats.latecol) }, + { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, + { "tx_deferred_ok", E1000_STAT(stats.dc) }, + { "tx_single_coll_ok", E1000_STAT(stats.scc) }, + { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, + { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, + { "tx_restart_queue", E1000_STAT(restart_queue) }, + { "rx_long_length_errors", E1000_STAT(stats.roc) }, + { "rx_short_length_errors", E1000_STAT(stats.ruc) }, + { "rx_align_errors", E1000_STAT(stats.algnerrc) }, + { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) }, + { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) }, + { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) }, + { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) }, + { "tx_flow_control_xon", E1000_STAT(stats.xontxc) }, + { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, + { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, + { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, + { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, + { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, + { "tx_smbus", E1000_STAT(stats.mgptc) }, + { "rx_smbus", E1000_STAT(stats.mgprc) }, + { "dropped_smbus", E1000_STAT(stats.mgpdc) }, +}; + +#define E1000_QUEUE_STATS_LEN 0 +#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) +#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) +static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; + +#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) + +static int e1000_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->media_type == e1000_media_type_copper) { + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP); + ecmd->advertising = ADVERTISED_TP; + + if (hw->autoneg == 1) { + ecmd->advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + ecmd->advertising |= hw->autoneg_advertised; + } + + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy_addr; + + if (hw->mac_type == e1000_82543) + ecmd->transceiver = XCVR_EXTERNAL; + else + ecmd->transceiver = XCVR_INTERNAL; + + } else { + ecmd->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + ecmd->port = PORT_FIBRE; + + if (hw->mac_type >= e1000_82545) + ecmd->transceiver = XCVR_INTERNAL; + else + ecmd->transceiver = XCVR_EXTERNAL; + } + + if (er32(STATUS) & E1000_STATUS_LU) { + e1000_get_speed_and_duplex(hw, &adapter->link_speed, + &adapter->link_duplex); + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + + /* unfortunately FULL_DUPLEX != DUPLEX_FULL + * and HALF_DUPLEX != DUPLEX_HALF + */ + if (adapter->link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } + + ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) || + hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* MDI-X => 1; MDI => 0 */ + if ((hw->media_type == e1000_media_type_copper) && + netif_carrier_ok(netdev)) + ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ? + ETH_TP_MDI_X : ETH_TP_MDI); + else + ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->mdix == AUTO_ALL_MODES) + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ecmd->eth_tp_mdix_ctrl = hw->mdix; + return 0; +} + +static int e1000_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (ecmd->eth_tp_mdix_ctrl) { + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (ecmd->autoneg != AUTONEG_ENABLE)) { + e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + hw->autoneg = 1; + if (hw->media_type == e1000_media_type_fiber) + hw->autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + else + hw->autoneg_advertised = ecmd->advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + ecmd->advertising = hw->autoneg_advertised; + } else { + u32 speed = ethtool_cmd_speed(ecmd); + /* calling this overrides forced MDI setting */ + if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { + clear_bit(__E1000_RESETTING, &adapter->flags); + return -EINVAL; + } + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (ecmd->eth_tp_mdix_ctrl) { + if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->mdix = AUTO_ALL_MODES; + else + hw->mdix = ecmd->eth_tp_mdix_ctrl; + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + clear_bit(__E1000_RESETTING, &adapter->flags); + return 0; +} + +static u32 e1000_get_link(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + adapter->hw.get_link_status = 1; + + return e1000_has_link(adapter); +} + +static void e1000_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc == E1000_FC_RX_PAUSE) { + pause->rx_pause = 1; + } else if (hw->fc == E1000_FC_TX_PAUSE) { + pause->tx_pause = 1; + } else if (hw->fc == E1000_FC_FULL) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int e1000_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_FULL; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_RX_PAUSE; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_TX_PAUSE; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_NONE; + + hw->original_fc = hw->fc; + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + } else + retval = ((hw->media_type == e1000_media_type_fiber) ? + e1000_setup_link(hw) : e1000_force_mac_fc(hw)); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return retval; +} + +static u32 e1000_get_msglevel(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void e1000_set_msglevel(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int e1000_get_regs_len(struct net_device *netdev) +{ +#define E1000_REGS_LEN 32 + return E1000_REGS_LEN * sizeof(u32); +} + +static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN); + regs_buff[9] = er32(TDH); + regs_buff[10] = er32(TDT); + regs_buff[11] = er32(TIDV); + + regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */ + if (hw->phy_type == e1000_phy_igp) { + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_A); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_B); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[14] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_C); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[15] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_D); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[16] = (u32)phy_data; /* cable length */ + regs_buff[17] = 0; /* extended 10bt distance (not needed) */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[18] = (u32)phy_data; /* cable polarity */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_PCS_INIT_REG); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[19] = (u32)phy_data; /* cable polarity */ + regs_buff[20] = 0; /* polarity correction enabled (always) */ + regs_buff[22] = 0; /* phy receive errors (unavailable) */ + regs_buff[23] = regs_buff[18]; /* mdix mode */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + } else { + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ + regs_buff[18] = regs_buff[13]; /* cable polarity */ + regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[20] = regs_buff[17]; /* polarity correction */ + /* phy receive errors */ + regs_buff[22] = adapter->phy_stats.receive_errors; + regs_buff[23] = regs_buff[13]; /* mdix mode */ + } + regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ + if (hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + regs_buff[26] = er32(MANC); + } +} + +static int e1000_get_eeprom_len(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + return hw->eeprom.word_size * 2; +} + +static int e1000_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc(sizeof(u16) * + (last_word - first_word + 1), GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->eeprom.type == e1000_eeprom_spi) + ret_val = e1000_read_eeprom(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = e1000_read_eeprom(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int e1000_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); + + ret_val = e1000_write_eeprom(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum over the first part of the EEPROM if needed */ + if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG)) + e1000_update_eeprom_checksum(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void e1000_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, e1000_driver_name, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, e1000_driver_version, + sizeof(drvinfo->version)); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->regdump_len = e1000_get_regs_len(netdev); + drvinfo->eedump_len = e1000_get_eeprom_len(netdev); +} + +static void e1000_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr = adapter->tx_ring; + struct e1000_rx_ring *rxdr = adapter->rx_ring; + + ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD : + E1000_MAX_82544_RXD; + ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD : + E1000_MAX_82544_TXD; + ring->rx_pending = rxdr->count; + ring->tx_pending = txdr->count; +} + +static int e1000_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr, *tx_old; + struct e1000_rx_ring *rxdr, *rx_old; + int i, err; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (netif_running(adapter->netdev)) + e1000_down(adapter); + + tx_old = adapter->tx_ring; + rx_old = adapter->rx_ring; + + err = -ENOMEM; + txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), + GFP_KERNEL); + if (!txdr) + goto err_alloc_tx; + + rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), + GFP_KERNEL); + if (!rxdr) + goto err_alloc_rx; + + adapter->tx_ring = txdr; + adapter->rx_ring = rxdr; + + rxdr->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); + rxdr->count = min(rxdr->count, (u32)(mac_type < e1000_82544 ? + E1000_MAX_RXD : E1000_MAX_82544_RXD)); + rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); + txdr->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); + txdr->count = min(txdr->count, (u32)(mac_type < e1000_82544 ? + E1000_MAX_TXD : E1000_MAX_82544_TXD)); + txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); + + for (i = 0; i < adapter->num_tx_queues; i++) + txdr[i].count = txdr->count; + for (i = 0; i < adapter->num_rx_queues; i++) + rxdr[i].count = rxdr->count; + + if (netif_running(adapter->netdev)) { + /* Try to get new resources before deleting old */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* save the new, restore the old in order to free it, + * then restore the new back again + */ + + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + e1000_free_all_rx_resources(adapter); + e1000_free_all_tx_resources(adapter); + kfree(tx_old); + kfree(rx_old); + adapter->rx_ring = rxdr; + adapter->tx_ring = txdr; + err = e1000_up(adapter); + if (err) + goto err_setup; + } + + clear_bit(__E1000_RESETTING, &adapter->flags); + return 0; +err_setup_tx: + e1000_free_all_rx_resources(adapter); +err_setup_rx: + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + kfree(rxdr); +err_alloc_rx: + kfree(txdr); +err_alloc_tx: + e1000_up(adapter); +err_setup: + clear_bit(__E1000_RESETTING, &adapter->flags); + return err; +} + +static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + static const u32 test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + int i; + + for (i = 0; i < ARRAY_SIZE(test); i++) { + writel(write & test[i], address); + read = readl(address); + if (read != (write & test[i] & mask)) { + e_err(drv, "pattern test reg %04X failed: " + "got 0x%08X expected 0x%08X\n", + reg, read, (write & test[i] & mask)); + *data = reg; + return true; + } + } + return false; +} + +static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + + writel(write & mask, address); + read = readl(address); + if ((read & mask) != (write & mask)) { + e_err(drv, "set/check reg %04X test failed: " + "got 0x%08X expected 0x%08X\n", + reg, (read & mask), (write & mask)); + *data = reg; + return true; + } + return false; +} + +#define REG_PATTERN_TEST(reg, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) +{ + u32 value, before, after; + u32 i, toggle; + struct e1000_hw *hw = &adapter->hw; + + /* The status register is Read Only, so a write should fail. + * Some bits that get toggled are ignored. + */ + + /* there are several bits on newer hardware that are r/w */ + toggle = 0xFFFFF833; + + before = er32(STATUS); + value = (er32(STATUS) & toggle); + ew32(STATUS, toggle); + after = er32(STATUS) & toggle; + if (value != after) { + e_err(drv, "failed STATUS register test got: " + "0x%08X expected: 0x%08X\n", after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ew32(STATUS, before); + + REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF); + + REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8); + REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF); + REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF); + + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); + + before = 0x06DFB3FE; + REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); + REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); + + if (hw->mac_type >= e1000_82543) { + REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); + value = E1000_RAR_ENTRIES; + for (i = 0; i < value; i++) { + REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), + 0x8003FFFF, 0xFFFFFFFF); + } + } else { + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF); + } + + value = E1000_MC_TBL_SIZE; + for (i = 0; i < value; i++) + REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); + + *data = 0; + return 0; +} + +static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) { + *data = 1; + break; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16)EEPROM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t e1000_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *)data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= er32(ICR); + + return IRQ_HANDLED; +} + +static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u32 mask, i = 0; + bool shared_int = true; + u32 irq = adapter->pdev->irq; + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + + /* NOTE: we don't test MSI interrupts here, yet + * Hook up test interrupt handler just for this test + */ + if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, + netdev)) + shared_int = false; + else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", (shared_int ? + "shared" : "unshared")); + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Test each interrupt */ + for (; i < 10; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMS, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, ~mask & 0x00007FFF); + ew32(ICS, ~mask & 0x00007FFF); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void e1000_free_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i; + + if (txdr->desc && txdr->buffer_info) { + for (i = 0; i < txdr->count; i++) { + if (txdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + txdr->buffer_info[i].dma, + txdr->buffer_info[i].length, + DMA_TO_DEVICE); + if (txdr->buffer_info[i].skb) + dev_kfree_skb(txdr->buffer_info[i].skb); + } + } + + if (rxdr->desc && rxdr->buffer_info) { + for (i = 0; i < rxdr->count; i++) { + if (rxdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + rxdr->buffer_info[i].dma, + E1000_RXBUFFER_2048, + DMA_FROM_DEVICE); + kfree(rxdr->buffer_info[i].rxbuf.data); + } + } + + if (txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + txdr->desc = NULL; + } + if (rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + rxdr->desc = NULL; + } + + kfree(txdr->buffer_info); + txdr->buffer_info = NULL; + kfree(rxdr->buffer_info); + rxdr->buffer_info = NULL; +} + +static int e1000_setup_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + u32 rctl; + int i, ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!txdr->count) + txdr->count = E1000_DEFAULT_TXD; + + txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_tx_buffer), + GFP_KERNEL); + if (!txdr->buffer_info) { + ret_val = 1; + goto err_nomem; + } + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { + ret_val = 2; + goto err_nomem; + } + txdr->next_to_use = txdr->next_to_clean = 0; + + ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH, ((u64)txdr->dma >> 32)); + ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc)); + ew32(TDH, 0); + ew32(TDT, 0); + ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | + E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); + + for (i = 0; i < txdr->count; i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); + struct sk_buff *skb; + unsigned int size = 1024; + + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, size); + txdr->buffer_info[i].skb = skb; + txdr->buffer_info[i].length = skb->len; + txdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) { + ret_val = 4; + goto err_nomem; + } + tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); + tx_desc->lower.data = cpu_to_le32(skb->len); + tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS | + E1000_TXD_CMD_RPS); + tx_desc->upper.data = 0; + } + + /* Setup Rx descriptor ring and Rx buffers */ + + if (!rxdr->count) + rxdr->count = E1000_DEFAULT_RXD; + + rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_rx_buffer), + GFP_KERNEL); + if (!rxdr->buffer_info) { + ret_val = 5; + goto err_nomem; + } + + rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); + rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { + ret_val = 6; + goto err_nomem; + } + rxdr->next_to_use = rxdr->next_to_clean = 0; + + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF)); + ew32(RDBAH, ((u64)rxdr->dma >> 32)); + ew32(RDLEN, rxdr->size); + ew32(RDH, 0); + ew32(RDT, 0); + rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + ew32(RCTL, rctl); + + for (i = 0; i < rxdr->count; i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); + u8 *buf; + + buf = kzalloc(E1000_RXBUFFER_2048 + NET_SKB_PAD + NET_IP_ALIGN, + GFP_KERNEL); + if (!buf) { + ret_val = 7; + goto err_nomem; + } + rxdr->buffer_info[i].rxbuf.data = buf; + + rxdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, + buf + NET_SKB_PAD + NET_IP_ALIGN, + E1000_RXBUFFER_2048, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { + ret_val = 8; + goto err_nomem; + } + rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); + } + + return 0; + +err_nomem: + e1000_free_desc_rings(adapter); + return ret_val; +} + +static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_write_phy_reg(hw, 29, 0x001F); + e1000_write_phy_reg(hw, 30, 0x8FFC); + e1000_write_phy_reg(hw, 29, 0x001A); + e1000_write_phy_reg(hw, 30, 0x8FF0); +} + +static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg; + + /* Because we reset the PHY above, we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock. This + * value defaults back to a 2.5MHz clock when the PHY is reset. + */ + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_EPSCR_TX_CLK_25; + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); + + /* In addition, because of the s/w reset above, we need to enable + * CRS on TX. This must be set for both full and half duplex + * operation. + */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); +} + +static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg; + u16 phy_reg; + + /* Setup the Device Control Register for PHY loopback test. */ + + ctrl_reg = er32(CTRL); + ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */ + E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + ew32(CTRL, ctrl_reg); + + /* Read the PHY Specific Control Register (0x10) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + + /* Clear Auto-Crossover bits in PHY Specific Control Register + * (bits 6:5). + */ + phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); + + /* Perform software reset on the PHY */ + e1000_phy_reset(hw); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + e1000_write_phy_reg(hw, PHY_CTRL, 0x8100); + + /* Wait for reset to complete. */ + udelay(500); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_phy_disable_receiver(adapter); + + /* Set the loopback bit in the PHY control register. */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + + /* Setup TX_CLK and TX_CRS one more time. */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Check Phy Configuration */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg != 0x4100) + return 9; + + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + if (phy_reg != 0x0070) + return 10; + + e1000_read_phy_reg(hw, 29, &phy_reg); + if (phy_reg != 0x001A) + return 11; + + return 0; +} + +static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + u32 stat_reg = 0; + + hw->autoneg = false; + + if (hw->phy_type == e1000_phy_m88) { + /* Auto-MDI/MDIX Off */ + e1000_write_phy_reg(hw, + M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x9140); + /* autoneg off */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x8140); + } + + ctrl_reg = er32(CTRL); + + /* force 1000, set loopback */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + if (hw->media_type == e1000_media_type_copper && + hw->phy_type == e1000_phy_m88) + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + else { + /* Set the ILOS bit on the fiber Nic is half + * duplex link is detected. + */ + stat_reg = er32(STATUS); + if ((stat_reg & E1000_STATUS_FD) == 0) + ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); + } + + ew32(CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy_type == e1000_phy_m88) + e1000_phy_disable_receiver(adapter); + + udelay(500); + + return 0; +} + +static int e1000_set_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg = 0; + u16 count = 0; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->media_type == e1000_media_type_copper) { + /* Attempt to setup Loopback mode on Non-integrated PHY. + * Some PHY registers get corrupted at random, so + * attempt this 10 times. + */ + while (e1000_nonintegrated_phy_loopback(adapter) && + count++ < 10); + if (count < 11) + return 0; + } + break; + + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + return e1000_integrated_phy_loopback(adapter); + default: + /* Default PHY loopback work is to read the MII + * control register and assert bit 14 (loopback mode). + */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + return 0; + } + + return 8; +} + +static int e1000_setup_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + return e1000_set_phy_loopback(adapter); + default: + rctl = er32(RCTL); + rctl |= E1000_RCTL_LBM_TCVR; + ew32(RCTL, rctl); + return 0; + } + } else if (hw->media_type == e1000_media_type_copper) { + return e1000_set_phy_loopback(adapter); + } + + return 7; +} + +static void e1000_loopback_cleanup(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + ew32(RCTL, rctl); + + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + default: + hw->autoneg = true; + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + e1000_phy_reset(hw); + } + break; + } +} + +static void e1000_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); + memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); +} + +static int e1000_check_lbtest_frame(const unsigned char *data, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(data + 3) == 0xFF) { + if ((*(data + frame_size / 2 + 10) == 0xBE) && + (*(data + frame_size / 2 + 12) == 0xAF)) { + return 0; + } + } + return 13; +} + +static int e1000_run_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i, j, k, l, lc, good_cnt, ret_val = 0; + unsigned long time; + + ew32(RDT, rxdr->count - 1); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rxdr->count <= txdr->count) + lc = ((txdr->count / 64) * 2) + 1; + else + lc = ((rxdr->count / 64) * 2) + 1; + + k = l = 0; + for (j = 0; j <= lc; j++) { /* loop count loop */ + for (i = 0; i < 64; i++) { /* send the packets */ + e1000_create_lbtest_frame(txdr->buffer_info[i].skb, + 1024); + dma_sync_single_for_device(&pdev->dev, + txdr->buffer_info[k].dma, + txdr->buffer_info[k].length, + DMA_TO_DEVICE); + if (unlikely(++k == txdr->count)) + k = 0; + } + ew32(TDT, k); + E1000_WRITE_FLUSH(); + msleep(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + do { /* receive the sent packets */ + dma_sync_single_for_cpu(&pdev->dev, + rxdr->buffer_info[l].dma, + E1000_RXBUFFER_2048, + DMA_FROM_DEVICE); + + ret_val = e1000_check_lbtest_frame( + rxdr->buffer_info[l].rxbuf.data + + NET_SKB_PAD + NET_IP_ALIGN, + 1024); + if (!ret_val) + good_cnt++; + if (unlikely(++l == rxdr->count)) + l = 0; + /* time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while (good_cnt < 64 && time_after(time + 20, jiffies)); + + if (good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if (time_after_eq(jiffies, time + 2)) { + ret_val = 14; /* error code for time out error */ + break; + } + } /* end loop count loop */ + return ret_val; +} + +static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) +{ + *data = e1000_setup_desc_rings(adapter); + if (*data) + goto out; + *data = e1000_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + +err_loopback: + e1000_free_desc_rings(adapter); +out: + return *data; +} + +static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + if (hw->media_type == e1000_media_type_internal_serdes) { + int i = 0; + + hw->serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + e1000_check_for_link(hw); + if (hw->serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + e1000_check_for_link(hw); + if (hw->autoneg) /* if auto_neg is set wait for it */ + msleep(4000); + + if (!(er32(STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static int e1000_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e1000_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool if_running = netif_running(netdev); + + set_bit(__E1000_TESTING, &adapter->flags); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + u16 autoneg_advertised = hw->autoneg_advertised; + u8 forced_speed_duplex = hw->forced_speed_duplex; + u8 autoneg = hw->autoneg; + + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + e1000_reset(adapter); + + if (e1000_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + /* make sure the phy is powered up */ + e1000_power_up_phy(adapter); + if (e1000_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + hw->autoneg_advertised = autoneg_advertised; + hw->forced_speed_duplex = forced_speed_duplex; + hw->autoneg = autoneg; + + e1000_reset(adapter); + clear_bit(__E1000_TESTING, &adapter->flags); + if (if_running) + dev_open(netdev); + } else { + e_info(hw, "online testing starting\n"); + /* Online tests */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__E1000_TESTING, &adapter->flags); + } + msleep_interruptible(4 * 1000); +} + +static int e1000_wol_exclusion(struct e1000_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + struct e1000_hw *hw = &adapter->hw; + int retval = 1; /* fail by default */ + + switch (hw->device_id) { + case E1000_DEV_ID_82542: + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_PCIE: + /* these don't support WoL at all */ + wol->supported = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events not supported on port B */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* quad port adapters only support WoL on port A */ + if (!adapter->quad_port_a) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + default: + /* dual port cards only support WoL on port A from now on + * unless it was enabled in the eeprom for port B + * so exclude FUNC_1 ports from having WoL enabled + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1 && + !adapter->eeprom_wol) { + wol->supported = 0; + break; + } + + retval = 0; + } + + return retval; +} + +static void e1000_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + /* this function will set ->supported = 0 and return 1 if wol is not + * supported by this hardware + */ + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + /* apply any specific unsupported masks here */ + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* KSP3 does not support UCAST wake-ups */ + wol->supported &= ~WAKE_UCAST; + + if (adapter->wol & E1000_WUFC_EX) + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + break; + default: + break; + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + if (wol->wolopts & WAKE_UCAST) { + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + return -EOPNOTSUPP; + } + break; + default: + break; + } + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int e1000_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + e1000_setup_led(hw); + return 2; + + case ETHTOOL_ID_ON: + e1000_led_on(hw); + break; + + case ETHTOOL_ID_OFF: + e1000_led_off(hw); + break; + + case ETHTOOL_ID_INACTIVE: + e1000_cleanup_led(hw); + } + + return 0; +} + +static int e1000_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->hw.mac_type < e1000_82545) + return -EOPNOTSUPP; + + if (adapter->itr_setting <= 4) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; + + return 0; +} + +static int e1000_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->mac_type < e1000_82545) + return -EOPNOTSUPP; + + if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 4) && + (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 4) { + adapter->itr = adapter->itr_setting = 4; + } else if (ec->rx_coalesce_usecs <= 3) { + adapter->itr = 20000; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = (1000000 / ec->rx_coalesce_usecs); + adapter->itr_setting = adapter->itr & ~3; + } + + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + else + ew32(ITR, 0); + + return 0; +} + +static int e1000_nway_reset(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + return 0; +} + +static void e1000_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int i; + char *p = NULL; + const struct e1000_stats *stat = e1000_gstrings_stats; + + e1000_update_stats(adapter); + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + switch (stat->type) { + case NETDEV_STATS: + p = (char *)netdev + stat->stat_offset; + break; + case E1000_STATS: + p = (char *)adapter + stat->stat_offset; + break; + default: + WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n", + stat->type, i); + break; + } + + if (stat->sizeof_stat == sizeof(u64)) + data[i] = *(u64 *)p; + else + data[i] = *(u32 *)p; + + stat++; + } +/* BUG_ON(i != E1000_STATS_LEN); */ +} + +static void e1000_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + memcpy(p, e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + +static const struct ethtool_ops e1000_ethtool_ops = { + .get_settings = e1000_get_settings, + .set_settings = e1000_set_settings, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = e1000_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .set_phys_id = e1000_set_phys_id, + .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000_get_sset_count, + .get_coalesce = e1000_get_coalesce, + .set_coalesce = e1000_set_coalesce, + .get_ts_info = ethtool_op_get_ts_info, +}; + +void e1000_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &e1000_ethtool_ops; +} diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c new file mode 100644 index 000000000..45c8c8641 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -0,0 +1,5689 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + + */ + +/* e1000_hw.c + * Shared functions for accessing and configuring the MAC + */ + +#include "e1000.h" + +static s32 e1000_check_downshift(struct e1000_hw *hw); +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity); +static void e1000_clear_hw_cntrs(struct e1000_hw *hw); +static void e1000_clear_vfta(struct e1000_hw *hw); +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, + bool link_up); +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw); +static s32 e1000_detect_gig_phy(struct e1000_hw *hw); +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw); +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length); +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); +static s32 e1000_id_led_init(struct e1000_hw *hw); +static void e1000_init_rx_addrs(struct e1000_hw *hw); +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +static s32 e1000_wait_autoneg(struct e1000_hw *hw); +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value); +static s32 e1000_set_phy_type(struct e1000_hw *hw); +static void e1000_phy_init_script(struct e1000_hw *hw); +static s32 e1000_setup_copper_link(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw); +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw); +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw); +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count); +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw); +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw); +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw); +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count); +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data); +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data); +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count); +static s32 e1000_acquire_eeprom(struct e1000_hw *hw); +static void e1000_release_eeprom(struct e1000_hw *hw); +static void e1000_standby_eeprom(struct e1000_hw *hw); +static s32 e1000_set_vco_speed(struct e1000_hw *hw); +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw); +static s32 e1000_set_phy_mode(struct e1000_hw *hw); +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); + +/* IGP cable length table */ +static const +u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = { + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, + 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, + 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90, + 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, + 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, + 110, 110, + 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, + 120, 120 +}; + +static DEFINE_SPINLOCK(e1000_eeprom_lock); +static DEFINE_SPINLOCK(e1000_phy_lock); + +/** + * e1000_set_phy_type - Set the phy type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_set_phy_type(struct e1000_hw *hw) +{ + if (hw->mac_type == e1000_undefined) + return -E1000_ERR_PHY_TYPE; + + switch (hw->phy_id) { + case M88E1000_E_PHY_ID: + case M88E1000_I_PHY_ID: + case M88E1011_I_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1118_E_PHY_ID: + hw->phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: + if (hw->mac_type == e1000_82541 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82547_rev_2) + hw->phy_type = e1000_phy_igp; + break; + case RTL8211B_PHY_ID: + hw->phy_type = e1000_phy_8211; + break; + case RTL8201N_PHY_ID: + hw->phy_type = e1000_phy_8201; + break; + default: + /* Should never have loaded on this device */ + hw->phy_type = e1000_phy_undefined; + return -E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_init_script - IGP phy init script - initializes the GbE PHY + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_phy_init_script(struct e1000_hw *hw) +{ + u32 ret_val; + u16 phy_saved_data; + + if (hw->phy_init_script) { + msleep(20); + + /* Save off the current value of register 0x2F5B to be restored + * at the end of this routine. + */ + ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + /* Disabled the PHY transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + msleep(20); + + e1000_write_phy_reg(hw, 0x0000, 0x0140); + msleep(5); + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + e1000_write_phy_reg(hw, 0x1F95, 0x0001); + e1000_write_phy_reg(hw, 0x1F71, 0xBD21); + e1000_write_phy_reg(hw, 0x1F79, 0x0018); + e1000_write_phy_reg(hw, 0x1F30, 0x1600); + e1000_write_phy_reg(hw, 0x1F31, 0x0014); + e1000_write_phy_reg(hw, 0x1F32, 0x161C); + e1000_write_phy_reg(hw, 0x1F94, 0x0003); + e1000_write_phy_reg(hw, 0x1F96, 0x003F); + e1000_write_phy_reg(hw, 0x2010, 0x0008); + break; + + case e1000_82541_rev_2: + case e1000_82547_rev_2: + e1000_write_phy_reg(hw, 0x1F73, 0x0099); + break; + default: + break; + } + + e1000_write_phy_reg(hw, 0x0000, 0x3300); + msleep(20); + + /* Now enable the transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (hw->mac_type == e1000_82547) { + u16 fused, fine, coarse; + + /* Move to analog registers page */ + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_SPARE_FUSE_STATUS, + &fused); + + if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_STATUS, + &fused); + + fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; + coarse = + fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; + + if (coarse > + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { + coarse -= + IGP01E1000_ANALOG_FUSE_COARSE_10; + fine -= IGP01E1000_ANALOG_FUSE_FINE_1; + } else if (coarse == + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) + fine -= IGP01E1000_ANALOG_FUSE_FINE_10; + + fused = + (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | + (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | + (coarse & + IGP01E1000_ANALOG_FUSE_COARSE_MASK); + + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_CONTROL, + fused); + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_BYPASS, + IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); + } + } + } +} + +/** + * e1000_set_mac_type - Set the mac type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_set_mac_type(struct e1000_hw *hw) +{ + switch (hw->device_id) { + case E1000_DEV_ID_82542: + switch (hw->revision_id) { + case E1000_82542_2_0_REV_ID: + hw->mac_type = e1000_82542_rev2_0; + break; + case E1000_82542_2_1_REV_ID: + hw->mac_type = e1000_82542_rev2_1; + break; + default: + /* Invalid 82542 revision ID */ + return -E1000_ERR_MAC_TYPE; + } + break; + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + hw->mac_type = e1000_82543; + break; + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + hw->mac_type = e1000_82544; + break; + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + hw->mac_type = e1000_82540; + break; + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + hw->mac_type = e1000_82545; + break; + case E1000_DEV_ID_82545GM_COPPER: + case E1000_DEV_ID_82545GM_FIBER: + case E1000_DEV_ID_82545GM_SERDES: + hw->mac_type = e1000_82545_rev_3; + break; + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + hw->mac_type = e1000_82546; + break; + case E1000_DEV_ID_82546GB_COPPER: + case E1000_DEV_ID_82546GB_FIBER: + case E1000_DEV_ID_82546GB_SERDES: + case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + hw->mac_type = e1000_82546_rev_3; + break; + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER_LOM: + hw->mac_type = e1000_82541; + break; + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + hw->mac_type = e1000_82541_rev_2; + break; + case E1000_DEV_ID_82547EI: + case E1000_DEV_ID_82547EI_MOBILE: + hw->mac_type = e1000_82547; + break; + case E1000_DEV_ID_82547GI: + hw->mac_type = e1000_82547_rev_2; + break; + case E1000_DEV_ID_INTEL_CE4100_GBE: + hw->mac_type = e1000_ce4100; + break; + default: + /* Should never have loaded on this device */ + return -E1000_ERR_MAC_TYPE; + } + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->asf_firmware_present = true; + break; + default: + break; + } + + /* The 82543 chip does not count tx_carrier_errors properly in + * FD mode + */ + if (hw->mac_type == e1000_82543) + hw->bad_tx_carr_stats_fd = true; + + if (hw->mac_type > e1000_82544) + hw->has_smbus = true; + + return E1000_SUCCESS; +} + +/** + * e1000_set_media_type - Set media type and TBI compatibility. + * @hw: Struct containing variables accessed by shared code + */ +void e1000_set_media_type(struct e1000_hw *hw) +{ + u32 status; + + if (hw->mac_type != e1000_82543) { + /* tbi_compatibility is only valid on 82543 */ + hw->tbi_compatibility_en = false; + } + + switch (hw->device_id) { + case E1000_DEV_ID_82545GM_SERDES: + case E1000_DEV_ID_82546GB_SERDES: + hw->media_type = e1000_media_type_internal_serdes; + break; + default: + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->media_type = e1000_media_type_fiber; + break; + case e1000_ce4100: + hw->media_type = e1000_media_type_copper; + break; + default: + status = er32(STATUS); + if (status & E1000_STATUS_TBIMODE) { + hw->media_type = e1000_media_type_fiber; + /* tbi_compatibility not valid on fiber */ + hw->tbi_compatibility_en = false; + } else { + hw->media_type = e1000_media_type_copper; + } + break; + } + } +} + +/** + * e1000_reset_hw - reset the hardware completely + * @hw: Struct containing variables accessed by shared code + * + * Reset the transmit and receive units; mask and clear all interrupts. + */ +s32 e1000_reset_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 ctrl_ext; + u32 icr; + u32 manc; + u32 led_ctrl; + s32 ret_val; + + /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC with + * the global reset. + */ + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(); + + /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ + hw->tbi_compatibility_on = false; + + /* Delay to allow any outstanding PCI transactions to complete before + * resetting the device + */ + msleep(10); + + ctrl = er32(CTRL); + + /* Must reset the PHY before resetting the MAC */ + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST)); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Issue a global reset to the MAC. This will reset the chip's + * transmit, receive, DMA, and link units. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + e_dbg("Issuing a global reset to MAC\n"); + + switch (hw->mac_type) { + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82546: + case e1000_82541: + case e1000_82541_rev_2: + /* These controllers can't ack the 64-bit write when issuing the + * reset, so use IO-mapping as a workaround to issue the reset + */ + E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST)); + break; + case e1000_82545_rev_3: + case e1000_82546_rev_3: + /* Reset is performed on a shadow of the control register */ + ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); + break; + case e1000_ce4100: + default: + ew32(CTRL, (ctrl | E1000_CTRL_RST)); + break; + } + + /* After MAC reset, force reload of EEPROM to restore power-on settings + * to device. Later controllers reload the EEPROM automatically, so + * just wait for reload to complete. + */ + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* Wait for reset to complete */ + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + /* Wait for EEPROM reload */ + msleep(2); + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + /* Wait for EEPROM reload */ + msleep(20); + break; + default: + /* Auto read done will delay 5ms or poll based on mac type */ + ret_val = e1000_get_auto_rd_done(hw); + if (ret_val) + return ret_val; + break; + } + + /* Disable HW ARPs on ASF enabled adapters */ + if (hw->mac_type >= e1000_82540) { + manc = er32(MANC); + manc &= ~(E1000_MANC_ARP_EN); + ew32(MANC, manc); + } + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + e1000_phy_init_script(hw); + + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Clear any pending interrupt events. */ + icr = er32(ICR); + + /* If MWI was previously enabled, reenable it. */ + if (hw->mac_type == e1000_82542_rev2_0) { + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw - Performs basic configuration of the adapter. + * @hw: Struct containing variables accessed by shared code + * + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes the receive address registers, + * multicast table, and VLAN filter table. Calls routines to setup link + * configuration and flow control settings. Clears all on-chip counters. Leaves + * the transmit and receive units disabled and uninitialized. + */ +s32 e1000_init_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 i; + s32 ret_val; + u32 mta_size; + u32 ctrl_ext; + + /* Initialize Identification LED */ + ret_val = e1000_id_led_init(hw); + if (ret_val) { + e_dbg("Error Initializing Identification LED\n"); + return ret_val; + } + + /* Set the media type and TBI compatibility */ + e1000_set_media_type(hw); + + /* Disabling VLAN filtering. */ + e_dbg("Initializing the IEEE VLAN\n"); + if (hw->mac_type < e1000_82545_rev_3) + ew32(VET, 0); + e1000_clear_vfta(hw); + + /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + ew32(RCTL, E1000_RCTL_RST); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Setup the receive address. This involves initializing all of the + * Receive Address Registers (RARs 0 - 15). + */ + e1000_init_rx_addrs(hw); + + /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ + if (hw->mac_type == e1000_82542_rev2_0) { + ew32(RCTL, 0); + E1000_WRITE_FLUSH(); + msleep(1); + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + mta_size = E1000_MC_TBL_SIZE; + for (i = 0; i < mta_size; i++) { + E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); + /* use write flush to prevent Memory Write Block (MWB) from + * occurring when accessing our register space + */ + E1000_WRITE_FLUSH(); + } + + /* Set the PCI priority bit correctly in the CTRL register. This + * determines if the adapter gives priority to receives, or if it + * gives equal priority to transmits and receives. Valid only on + * 82542 and 82543 silicon. + */ + if (hw->dma_fairness && hw->mac_type <= e1000_82543) { + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PRIOR); + } + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + /* Workaround for PCI-X problem when BIOS sets MMRBC + * incorrectly. + */ + if (hw->bus_type == e1000_bus_type_pcix + && e1000_pcix_get_mmrbc(hw) > 2048) + e1000_pcix_set_mmrbc(hw, 2048); + break; + } + + /* Call a subroutine to configure the link and setup flow control. */ + ret_val = e1000_setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + if (hw->mac_type > e1000_82544) { + ctrl = er32(TXDCTL); + ctrl = + (ctrl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + ew32(TXDCTL, ctrl); + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs(hw); + + if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || + hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { + ctrl_ext = er32(CTRL_EXT); + /* Relaxed ordering must be disabled to avoid a parity + * error crash in a PCI slot. + */ + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + ew32(CTRL_EXT, ctrl_ext); + } + + return ret_val; +} + +/** + * e1000_adjust_serdes_amplitude - Adjust SERDES output amplitude based on EEPROM setting. + * @hw: Struct containing variables accessed by shared code. + */ +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw) +{ + u16 eeprom_data; + s32 ret_val; + + if (hw->media_type != e1000_media_type_internal_serdes) + return E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, + &eeprom_data); + if (ret_val) { + return ret_val; + } + + if (eeprom_data != EEPROM_RESERVED_WORD) { + /* Adjust SERDES output amplitude only. */ + eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_link - Configures flow control and link settings. + * @hw: Struct containing variables accessed by shared code + * + * Determines which flow control settings to use. Calls the appropriate media- + * specific link configuration function. Configures the flow control settings. + * Assuming the adapter has a valid link partner, a valid link should be + * established. Assumes the hardware has previously been reset and the + * transmitter and receiver are not enabled. + */ +s32 e1000_setup_link(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 ret_val; + u16 eeprom_data; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->fc == E1000_FC_DEFAULT) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0) + hw->fc = E1000_FC_NONE; + else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == + EEPROM_WORD0F_ASM_DIR) + hw->fc = E1000_FC_TX_PAUSE; + else + hw->fc = E1000_FC_FULL; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + if (hw->mac_type == e1000_82542_rev2_0) + hw->fc &= (~E1000_FC_TX_PAUSE); + + if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1)) + hw->fc &= (~E1000_FC_RX_PAUSE); + + hw->original_fc = hw->fc; + + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc); + + /* Take the 4 bits from EEPROM word 0x0F that determine the initial + * polarity value for the SW controlled pins, and setup the + * Extended Device Control reg with that info. + * This is needed because one of the SW controlled pins is used for + * signal detection. So this should be done before e1000_setup_pcs_link() + * or e1000_phy_setup() is called. + */ + if (hw->mac_type == e1000_82543) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << + SWDPIO__EXT_SHIFT); + ew32(CTRL_EXT, ctrl_ext); + } + + /* Call the necessary subroutine to configure the link. */ + ret_val = (hw->media_type == e1000_media_type_copper) ? + e1000_setup_copper_link(hw) : e1000_setup_fiber_serdes_link(hw); + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + e_dbg("Initializing the Flow Control address, type and timer regs\n"); + + ew32(FCT, FLOW_CONTROL_TYPE); + ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); + ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); + + ew32(FCTTV, hw->fc_pause_time); + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames in not enabled, then these + * registers will be set to 0. + */ + if (!(hw->fc & E1000_FC_TX_PAUSE)) { + ew32(FCRTL, 0); + ew32(FCRTH, 0); + } else { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + if (hw->fc_send_xon) { + ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); + ew32(FCRTH, hw->fc_high_water); + } else { + ew32(FCRTL, hw->fc_low_water); + ew32(FCRTH, hw->fc_high_water); + } + } + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link - prepare fiber or serdes link + * @hw: Struct containing variables accessed by shared code + * + * Manipulates Physical Coding Sublayer functions in order to configure + * link. Assumes the hardware has been previously reset and the transmitter + * and receiver are not enabled. + */ +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + u32 ctrl; + u32 status; + u32 txcw = 0; + u32 i; + u32 signal = 0; + s32 ret_val; + + /* On adapters with a MAC newer than 82544, SWDP 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + * If we're on serdes media, adjust the output amplitude to value + * set in the EEPROM. + */ + ctrl = er32(CTRL); + if (hw->media_type == e1000_media_type_fiber) + signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; + + ret_val = e1000_adjust_serdes_amplitude(hw); + if (ret_val) + return ret_val; + + /* Take the link out of reset */ + ctrl &= ~(E1000_CTRL_LRST); + + /* Adjust VCO speed to improve BER performance */ + ret_val = e1000_set_vco_speed(hw); + if (ret_val) + return ret_val; + + e1000_config_collision_dist(hw); + + /* Check for a software override of the flow control settings, and setup + * the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Tranmsit Config Word Register (TXCW) and re-start + * auto-negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, but + * not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we do + * not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + */ + switch (hw->fc) { + case E1000_FC_NONE: + /* Flow ctrl is completely disabled by a software over-ride */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case E1000_FC_RX_PAUSE: + /* Rx Flow control is enabled and Tx Flow control is disabled by + * a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case E1000_FC_TX_PAUSE: + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case E1000_FC_FULL: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + e_dbg("Auto-negotiation enabled\n"); + + ew32(TXCW, txcw); + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + hw->txcw = txcw; + msleep(1); + + /* If we have a signal (the cable is plugged in) then poll for a + * "Link-Up" indication in the Device Status Register. Time-out if a + * link isn't seen in 500 milliseconds seconds (Auto-negotiation should + * complete in less than 500 milliseconds even if the other end is doing + * it in SW). For internal serdes, we just assume a signal is present, + * then poll. + */ + if (hw->media_type == e1000_media_type_internal_serdes || + (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { + e_dbg("Looking for Link\n"); + for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { + msleep(10); + status = er32(STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == (LINK_UP_TIMEOUT / 10)) { + e_dbg("Never got a valid link from auto-neg!!!\n"); + hw->autoneg_failed = 1; + /* AutoNeg failed to achieve a link, so we'll call + * e1000_check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = e1000_check_for_link(hw); + if (ret_val) { + e_dbg("Error while checking for link\n"); + return ret_val; + } + hw->autoneg_failed = 0; + } else { + hw->autoneg_failed = 0; + e_dbg("Valid Link Found\n"); + } + } else { + e_dbg("No Signal Detected\n"); + } + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series. + * @hw: Struct containing variables accessed by shared code + * + * Commits changes to PHY configuration by calling e1000_phy_reset(). + */ +static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw) +{ + s32 ret_val; + + /* SW reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +static s32 gbe_dhg_phy_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u32 ctrl_aux; + + switch (hw->phy_type) { + case e1000_phy_8211: + ret_val = e1000_copper_link_rtl_setup(hw); + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + case e1000_phy_8201: + /* Set RMII mode */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= E1000_CTL_AUX_RMII; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + + /* Disable the J/K bits required for receive */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= 0x4; + ctrl_aux &= ~0x2; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + ret_val = e1000_copper_link_rtl_setup(hw); + + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + default: + e_dbg("Error Resetting the PHY\n"); + return E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_preconfig - early configuration for copper + * @hw: Struct containing variables accessed by shared code + * + * Make sure we have a valid PHY and change PHY mode before link setup. + */ +static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + ctrl = er32(CTRL); + /* With 82543, we need to force speed and duplex on the MAC equal to + * what the PHY speed and duplex configuration is. In addition, we need + * to perform a hardware reset on the PHY to take it out of reset. + */ + if (hw->mac_type > e1000_82543) { + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + } else { + ctrl |= + (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); + ew32(CTRL, ctrl); + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + } + + /* Make sure we have a valid PHY */ + ret_val = e1000_detect_gig_phy(hw); + if (ret_val) { + e_dbg("Error, did not detect valid phy.\n"); + return ret_val; + } + e_dbg("Phy ID = %x\n", hw->phy_id); + + /* Set PHY to class A mode (if necessary) */ + ret_val = e1000_set_phy_mode(hw); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82545_rev_3) || + (hw->mac_type == e1000_82546_rev_3)) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + phy_data |= 0x00000008; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + } + + if (hw->mac_type <= e1000_82543 || + hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82541_rev_2 + || hw->mac_type == e1000_82547_rev_2) + hw->phy_reset_disable = false; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_igp_setup - Copper link setup for e1000_phy_igp series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) +{ + u32 led_ctrl; + s32 ret_val; + u16 phy_data; + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + /* Wait 15ms for MAC to configure PHY from eeprom settings */ + msleep(15); + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + + /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ + if (hw->phy_type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + ret_val = e1000_set_d3_lplu_state(hw, false); + if (ret_val) { + e_dbg("Error Disabling LPLU D3\n"); + return ret_val; + } + } + + /* Configure mdi-mdix settings */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + hw->dsp_config_state = e1000_dsp_config_disabled; + /* Force MDI for earlier revs of the IGP PHY */ + phy_data &= + ~(IGP01E1000_PSCR_AUTO_MDIX | + IGP01E1000_PSCR_FORCE_MDI_MDIX); + hw->mdix = 1; + + } else { + hw->dsp_config_state = e1000_dsp_config_enabled; + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (hw->mdix) { + case 1: + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + phy_data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + } + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->autoneg) { + e1000_ms_type phy_ms_setting = hw->master_slave; + + if (hw->ffe_config_state == e1000_ffe_config_active) + hw->ffe_config_state = e1000_ffe_config_enabled; + + if (hw->dsp_config_state == e1000_dsp_config_activated) + hw->dsp_config_state = e1000_dsp_config_enabled; + + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (hw->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + /* Set auto Master/Slave resolution process */ + ret_val = + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~CR_1000T_MS_ENABLE; + ret_val = + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (phy_ms_setting) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + default: + break; + } + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_mgp_setup - Copper link setup for e1000_phy_m88 series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (hw->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (hw->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if (hw->phy_revision < M88E1011_I_REV_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((hw->phy_revision == E1000_REVISION_2) && + (hw->phy_id == M88E1111_I_PHY_ID)) { + /* Vidalia Phy, set the downshift counter to 5x */ + phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK); + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + } + + /* SW Reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_autoneg - setup auto-neg + * @hw: Struct containing variables accessed by shared code + * + * Setup auto-negotiation and flow control advertisements, + * and then perform auto-negotiation. + */ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Perform some bounds checking on the hw->autoneg_advertised + * parameter. If this variable is zero, then set it to the default. + */ + hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (hw->autoneg_advertised == 0) + hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* IFE/RTL8201N PHY only supports 10/100 */ + if (hw->phy_type == e1000_phy_8201) + hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; + + e_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + e_dbg("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + e_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (hw->wait_autoneg_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + e_dbg + ("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->get_link_status = true; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_postconfig - post link setup + * @hw: Struct containing variables accessed by shared code + * + * Config the MAC and the PHY after link is up. + * 1) Set up the MAC to the current PHY speed/duplex + * if we are on 82543. If we + * are on newer silicon, we only need to configure + * collision distance in the Transmit Control Register. + * 2) Set up flow control on the MAC to that established with + * the link partner. + * 3) Config DSP to improve Gigabit link quality for some PHY revisions. + */ +static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) +{ + s32 ret_val; + + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { + e1000_config_collision_dist(hw); + } else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error Configuring Flow Control\n"); + return ret_val; + } + + /* Config DSP to improve Giga link quality */ + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_config_dsp_after_link_change(hw, true); + if (ret_val) { + e_dbg("Error Configuring DSP after link up\n"); + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_copper_link - phy/speed/duplex setting + * @hw: Struct containing variables accessed by shared code + * + * Detects which PHY is present and sets up the speed and duplex + */ +static s32 e1000_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + /* Check if it is a valid PHY and set PHY mode if necessary. */ + ret_val = e1000_copper_link_preconfig(hw); + if (ret_val) + return ret_val; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_copper_link_igp_setup(hw); + if (ret_val) + return ret_val; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_copper_link_mgp_setup(hw); + if (ret_val) + return ret_val; + } else { + ret_val = gbe_dhg_phy_setup(hw); + if (ret_val) { + e_dbg("gbe_dhg_phy_setup failed!\n"); + return ret_val; + } + } + + if (hw->autoneg) { + /* Setup autoneg and flow control advertisement + * and perform autonegotiation + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H,or 100F + * depending on value from forced_speed_duplex. + */ + e_dbg("Forcing speed and duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if (ret_val) { + e_dbg("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + for (i = 0; i < 10; i++) { + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + /* Config the MAC and PHY after link is up */ + ret_val = e1000_copper_link_postconfig(hw); + if (ret_val) + return ret_val; + + e_dbg("Valid link established!!!\n"); + return E1000_SUCCESS; + } + udelay(10); + } + + e_dbg("Unable to establish link!!!\n"); + return E1000_SUCCESS; +} + +/** + * e1000_phy_setup_autoneg - phy settings + * @hw: Struct containing variables accessed by shared code + * + * Configures PHY autoneg and flow control advertisement settings + */ +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + else if (hw->phy_type == e1000_phy_8201) + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~REG4_SPEED_MASK; + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_HALF) { + e_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_FULL) { + e_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_HALF) { + e_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_FULL) { + e_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (hw->autoneg_advertised & ADVERTISE_1000_HALF) { + e_dbg + ("Advertise 1000mb Half duplex requested, request denied!\n"); + } + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_1000_FULL) { + e_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start + * auto-negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc) { + case E1000_FC_NONE: /* 0 */ + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_RX_PAUSE: /* 1 */ + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + */ + /* Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_TX_PAUSE: /* 2 */ + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case E1000_FC_FULL: /* 3 */ + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (hw->phy_type == e1000_phy_8201) { + mii_1000t_ctrl_reg = 0; + } else { + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_force_speed_duplex - force link settings + * @hw: Struct containing variables accessed by shared code + * + * Force PHY speed and duplex settings to hw->forced_speed_duplex + */ +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 mii_ctrl_reg; + u16 mii_status_reg; + u16 phy_data; + u16 i; + + /* Turn off Flow control if we are forcing speed and duplex. */ + hw->fc = E1000_FC_NONE; + + e_dbg("hw->fc = %d\n", hw->fc); + + /* Read the Device Control Register. */ + ctrl = er32(CTRL); + + /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */ + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(DEVICE_SPEED_MASK); + + /* Clear the Auto Speed Detect Enable bit. */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Read the MII Control Register. */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg); + if (ret_val) + return ret_val; + + /* We need to disable autoneg in order to force link and duplex. */ + + mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN; + + /* Are we forcing Full or Half Duplex? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_10_full) { + /* We want to force full duplex so we SET the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl |= E1000_CTRL_FD; + mii_ctrl_reg |= MII_CR_FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + /* We want to force half duplex so we CLEAR the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl &= ~E1000_CTRL_FD; + mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; + e_dbg("Half Duplex\n"); + } + + /* Are we forcing 100Mbps??? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_100_half) { + /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */ + ctrl |= E1000_CTRL_SPD_100; + mii_ctrl_reg |= MII_CR_SPEED_100; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + e_dbg("Forcing 100mb "); + } else { + /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + mii_ctrl_reg |= MII_CR_SPEED_10; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + e_dbg("Forcing 10mb "); + } + + e1000_config_collision_dist(hw); + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + + if (hw->phy_type == e1000_phy_m88) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires + * MDI forced whenever speed are duplex are forced. + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("M88E1000 PSCR: %x\n", phy_data); + + /* Need to reset the PHY or these changes will be ignored */ + mii_ctrl_reg |= MII_CR_RESET; + + /* Disable MDI-X support for 10/100 */ + } else { + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed or duplex are forced. + */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + /* Write back the modified PHY MII control register. */ + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg); + if (ret_val) + return ret_val; + + udelay(1); + + /* The wait_autoneg_complete flag may be a little misleading here. + * Since we are forcing speed and duplex, Auto-Neg is not enabled. + * But we do want to delay for a period while forcing only so we + * don't generate false No Link messages. So we will wait here + * only if the user has set wait_autoneg_complete to 1, which is + * the default. + */ + if (hw->wait_autoneg_complete) { + /* We will wait for autoneg to complete. */ + e_dbg("Waiting for forced speed/duplex link.\n"); + mii_status_reg = 0; + + /* Wait for autoneg to complete or 4.5 seconds to expire */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + if ((i == 0) && (hw->phy_type == e1000_phy_m88)) { + /* We didn't get link. Reset the DSP and wait again + * for link. + */ + ret_val = e1000_phy_reset_dsp(hw); + if (ret_val) { + e_dbg("Error Resetting PHY DSP\n"); + return ret_val; + } + } + /* This loop will early-out if the link condition has been + * met + */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + } + } + + if (hw->phy_type == e1000_phy_m88) { + /* Because we reset the PHY above, we need to re-force TX_CLK in + * the Extended PHY Specific Control Register to 25MHz clock. + * This value defaults back to a 2.5MHz clock when the PHY is + * reset. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + + /* In addition, because of the s/w reset above, we need to + * enable CRS on Tx. This must be set for both full and half + * duplex operation. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) + && (!hw->autoneg) + && (hw->forced_speed_duplex == e1000_10_full + || hw->forced_speed_duplex == e1000_10_half)) { + ret_val = e1000_polarity_reversal_workaround(hw); + if (ret_val) + return ret_val; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_config_collision_dist - set collision distance register + * @hw: Struct containing variables accessed by shared code + * + * Sets the collision distance in the Transmit Control register. + * Link should have been established previously. Reads the speed and duplex + * information from the Device Status register. + */ +void e1000_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl, coll_dist; + + if (hw->mac_type < e1000_82543) + coll_dist = E1000_COLLISION_DISTANCE_82542; + else + coll_dist = E1000_COLLISION_DISTANCE; + + tctl = er32(TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= coll_dist << E1000_COLD_SHIFT; + + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_config_mac_to_phy - sync phy and mac settings + * @hw: Struct containing variables accessed by shared code + * @mii_reg: data to write to the MII control register + * + * Sets MAC speed and duplex settings to reflect the those in the PHY + * The contents of the PHY register containing the needed information need to + * be passed in. + */ +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + /* 82544 or newer MAC, Auto Speed Detection takes care of + * MAC speed/duplex configuration. + */ + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) + return E1000_SUCCESS; + + /* Read the Device Control Register and set the bits to Force Speed + * and Duplex. + */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); + + switch (hw->phy_type) { + case e1000_phy_8201: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & RTL_PHY_CTRL_FD) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + if (phy_data & RTL_PHY_CTRL_SPD_100) + ctrl |= E1000_CTRL_SPD_100; + else + ctrl |= E1000_CTRL_SPD_10; + + e1000_config_collision_dist(hw); + break; + default: + /* Set up duplex in the Device Control and Transmit Control + * registers depending on negotiated values. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & M88E1000_PSSR_DPLX) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + e1000_config_collision_dist(hw); + + /* Set up speed in the Device Control register depending on + * negotiated values. + */ + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) + ctrl |= E1000_CTRL_SPD_1000; + else if ((phy_data & M88E1000_PSSR_SPEED) == + M88E1000_PSSR_100MBS) + ctrl |= E1000_CTRL_SPD_100; + } + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_force_mac_fc - force flow control settings + * @hw: Struct containing variables accessed by shared code + * + * Forces the MAC's flow control settings. + * Sets the TFCE and RFCE bits in the device control register to reflect + * the adapter settings. TFCE and RFCE need to be explicitly set by + * software when a Copper PHY is used because autonegotiation is managed + * by the PHY rather than the MAC. Software must also configure these + * bits when link is forced on a fiber connection. + */ +s32 e1000_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + + /* Get the current configuration of the Device Control Register */ + ctrl = er32(CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + + switch (hw->fc) { + case E1000_FC_NONE: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case E1000_FC_RX_PAUSE: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case E1000_FC_TX_PAUSE: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case E1000_FC_FULL: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Disable TX Flow Control for 82542 (rev 2.0) */ + if (hw->mac_type == e1000_82542_rev2_0) + ctrl &= (~E1000_CTRL_TFCE); + + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_config_fc_after_link_up - configure flow control after autoneg + * @hw: Struct containing variables accessed by shared code + * + * Configures flow control settings after link is established + * Should be called immediately after a valid link has been established. + * Forces MAC flow control settings if link was forced. When in MII/GMII mode + * and autonegotiation is enabled, the MAC flow control settings will be set + * based on the flow control negotiated by the PHY. In TBI mode, the TFCE + * and RFCE bits will be automatically set to the negotiated flow control mode. + */ +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 mii_nway_adv_reg; + u16 mii_nway_lp_ability_reg; + u16 speed; + u16 duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) + || ((hw->media_type == e1000_media_type_internal_serdes) + && (hw->autoneg_failed)) + || ((hw->media_type == e1000_media_type_copper) + && (!hw->autoneg))) { + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement Register + * (Address 4) and the Auto_Negotiation Base Page + * Ability Register (Address 5) to determine how flow + * control was negotiated. + */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement + * Register (Address 4) and two bits in the Auto + * Negotiation Base Page Ability Register (Address 5) + * determine flow control for both the PHY and the link + * partner. The following table, taken out of the IEEE + * 802.3ab/D6.0 dated March 25, 1999, describes these + * PAUSE resolution bits and how flow control is + * determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|------------------ + * 0 | 0 | DC | DC | E1000_FC_NONE + * 0 | 1 | 0 | DC | E1000_FC_NONE + * 0 | 1 | 1 | 0 | E1000_FC_NONE + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * 1 | 0 | 0 | DC | E1000_FC_NONE + * 1 | DC | 1 | DC | E1000_FC_FULL + * 1 | 1 | 0 | 0 | E1000_FC_NONE + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + /* Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | DC | 1 | DC | E1000_FC_FULL + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected Rx + * ONLY of pause frames. In this case, we had + * to advertise FULL flow control because we + * could not advertise Rx ONLY. Hence, we must + * now check to see if we need to turn OFF the + * TRANSMISSION of PAUSE frames. + */ + if (hw->original_fc == E1000_FC_FULL) { + hw->fc = E1000_FC_FULL; + e_dbg("Flow Control = FULL.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) + { + hw->fc = E1000_FC_TX_PAUSE; + e_dbg + ("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) + { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should + * be disabled. However, we want to consider that we + * could be connected to a legacy switch that doesn't + * advertise desired flow control, but can be forced on + * the link partner. So if we advertised no flow + * control, that is what we will resolve to. If we + * advertised some kind of receive capability (Rx Pause + * Only or Full Flow Control) and the link partner + * advertised none, we will configure ourselves to + * enable Rx Flow Control only. We can do this safely + * for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, + * no harm done since we won't be receiving any PAUSE + * frames anyway. If the intent on the link partner was + * to have flow control enabled, then by us enabling Rx + * only, we can at least receive pause frames and + * process them. This is a good idea because in most + * cases, since we are predominantly a server NIC, more + * times than not we will be asked to delay transmission + * of packets than asking our link partner to pause + * transmission of frames. + */ + else if ((hw->original_fc == E1000_FC_NONE || + hw->original_fc == E1000_FC_TX_PAUSE) || + hw->fc_strict_ieee) { + hw->fc = E1000_FC_NONE; + e_dbg("Flow Control = NONE.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc = E1000_FC_NONE; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg + ("Error forcing flow control settings\n"); + return ret_val; + } + } else { + e_dbg + ("Copper PHY and Auto Neg has not completed.\n"); + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + */ +static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) +{ + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val = E1000_SUCCESS; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { + if (hw->autoneg_failed == 0) { + hw->autoneg_failed = 1; + goto out; + } + e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, hw->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + hw->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & er32(TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - forced.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & er32(TXCW)) { + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - autoneg " + "completed successfully.\n"); + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - invalid" + "codewords detected in autoneg.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - no sync.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - autoneg failed\n"); + } + } + + out: + return ret_val; +} + +/** + * e1000_check_for_link + * @hw: Struct containing variables accessed by shared code + * + * Checks to see if the link status of the hardware has changed. + * Called by any function that needs to check the link status of the adapter. + */ +s32 e1000_check_for_link(struct e1000_hw *hw) +{ + u32 rxcw = 0; + u32 ctrl; + u32 status; + u32 rctl; + u32 icr; + u32 signal = 0; + s32 ret_val; + u16 phy_data; + + ctrl = er32(CTRL); + status = er32(STATUS); + + /* On adapters with a MAC newer than 82544, SW Definable pin 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + */ + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) { + rxcw = er32(RXCW); + + if (hw->media_type == e1000_media_type_fiber) { + signal = + (hw->mac_type > + e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; + if (status & E1000_STATUS_LU) + hw->get_link_status = false; + } + } + + /* If we have a copper PHY then we only want to go out to the PHY + * registers to see if Auto-Neg has completed and/or if our link + * status has changed. The get_link_status flag will be set if we + * receive a Link Status Change interrupt or we have Rx Sequence + * Errors. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) { + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + * Read the register twice since the link bit is sticky. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + hw->get_link_status = false; + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift(hw); + + /* If we are on 82544 or 82543 silicon and speed/duplex + * are forced to 10H or 10F, then we will implement the + * polarity reversal workaround. We disable interrupts + * first, and upon returning, place the devices + * interrupt state to its previous value except for the + * link status change interrupt which will + * happen due to the execution of this workaround. + */ + + if ((hw->mac_type == e1000_82544 + || hw->mac_type == e1000_82543) && (!hw->autoneg) + && (hw->forced_speed_duplex == e1000_10_full + || hw->forced_speed_duplex == e1000_10_half)) { + ew32(IMC, 0xffffffff); + ret_val = + e1000_polarity_reversal_workaround(hw); + icr = er32(ICR); + ew32(ICS, (icr & ~E1000_ICS_LSC)); + ew32(IMS, IMS_ENABLE_MASK); + } + + } else { + /* No link detected */ + e1000_config_dsp_after_link_change(hw, false); + return 0; + } + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!hw->autoneg) + return -E1000_ERR_CONFIG; + + /* optimize the dsp settings for the igp phy */ + e1000_config_dsp_after_link_change(hw, true); + + /* We have a M88E1000 PHY and Auto-Neg is enabled. If we + * have Si on board that is 82544 or newer, Auto + * Speed Detection takes care of MAC speed/duplex + * configuration. So we only need to configure Collision + * Distance in the MAC. Otherwise, we need to force + * speed/duplex on the MAC to the current PHY speed/duplex + * settings. + */ + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_ce4100)) + e1000_config_collision_dist(hw); + else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg + ("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control settings + * because we may have had to re-autoneg with a different link + * partner. + */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + + /* At this point we know that we are on copper and we have + * auto-negotiated link. These are conditions for checking the + * link partner capability register. We use the link speed to + * determine if TBI compatibility needs to be turned on or off. + * If the link is not at gigabit speed, then TBI compatibility + * is not needed. If we are at gigabit speed, we turn on TBI + * compatibility. + */ + if (hw->tbi_compatibility_en) { + u16 speed, duplex; + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + if (speed != SPEED_1000) { + /* If link speed is not set to gigabit speed, we + * do not need to enable TBI compatibility. + */ + if (hw->tbi_compatibility_on) { + /* If we previously were in the mode, + * turn it off. + */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_SBP; + ew32(RCTL, rctl); + hw->tbi_compatibility_on = false; + } + } else { + /* If TBI compatibility is was previously off, + * turn it on. For compatibility with a TBI link + * partner, we will store bad packets. Some + * frames have an additional byte on the end and + * will look like CRC errors to to the hardware. + */ + if (!hw->tbi_compatibility_on) { + hw->tbi_compatibility_on = true; + rctl = er32(RCTL); + rctl |= E1000_RCTL_SBP; + ew32(RCTL, rctl); + } + } + } + } + + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) + e1000_check_for_serdes_link_generic(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex + * @hw: Struct containing variables accessed by shared code + * @speed: Speed of the connection + * @duplex: Duplex setting of the connection + * + * Detects the current speed and duplex settings of the hardware. + */ +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + u32 status; + s32 ret_val; + u16 phy_data; + + if (hw->mac_type >= e1000_82543) { + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + e_dbg("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + e_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + e_dbg("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + e_dbg(" Half Duplex\n"); + } + } else { + e_dbg("1000 Mbs, Full Duplex\n"); + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + } + + /* IGP01 PHY may advertise full duplex operation after speed downgrade + * even if it is operating at half duplex. Here we set the duplex + * settings to match the duplex in the link partner's capabilities. + */ + if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); + if (ret_val) + return ret_val; + + if (!(phy_data & NWAY_ER_LP_NWAY_CAPS)) + *duplex = HALF_DUPLEX; + else { + ret_val = + e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); + if (ret_val) + return ret_val; + if ((*speed == SPEED_100 + && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) + || (*speed == SPEED_10 + && !(phy_data & NWAY_LPAR_10T_FD_CAPS))) + *duplex = HALF_DUPLEX; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_wait_autoneg + * @hw: Struct containing variables accessed by shared code + * + * Blocks until autoneg completes or times out (~4.5 seconds) + */ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + e_dbg("Waiting for Auto-Neg to complete.\n"); + + /* We will wait for autoneg to complete or 4.5 seconds to expire. */ + for (i = PHY_AUTO_NEG_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + if (phy_data & MII_SR_AUTONEG_COMPLETE) { + return E1000_SUCCESS; + } + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_raise_mdi_clk - Raises the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Raise the clock input to the Management Data Clock (by setting the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_lower_mdi_clk - Lowers the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Lower the clock input to the Management Data Clock (by clearing the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_shift_out_mdi_bits - Shifts data bits out to the PHY + * @hw: Struct containing variables accessed by shared code + * @data: Data to send out to the PHY + * @count: Number of bits to shift out + * + * Bits are shifted out in MSB to LSB order. + */ +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) +{ + u32 ctrl; + u32 mask; + + /* We need to shift "count" number of bits out to the PHY. So, the value + * in the "data" parameter will be shifted out to the PHY one bit at a + * time. In order to do this, "data" must be broken down into bits. + */ + mask = 0x01; + mask <<= (count - 1); + + ctrl = er32(CTRL); + + /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ + ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); + + while (mask) { + /* A "1" is shifted out to the PHY by setting the MDIO bit to + * "1" and then raising and lowering the Management Data Clock. + * A "0" is shifted out to the PHY by setting the MDIO bit to + * "0" and then raising and lowering the clock. + */ + if (data & mask) + ctrl |= E1000_CTRL_MDIO; + else + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + udelay(10); + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + mask = mask >> 1; + } +} + +/** + * e1000_shift_in_mdi_bits - Shifts data bits in from the PHY + * @hw: Struct containing variables accessed by shared code + * + * Bits are shifted in in MSB to LSB order. + */ +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) +{ + u32 ctrl; + u16 data = 0; + u8 i; + + /* In order to read a register from the PHY, we need to shift in a total + * of 18 bits from the PHY. The first two bit (turnaround) times are + * used to avoid contention on the MDIO pin when a read operation is + * performed. These two bits are ignored by us and thrown away. Bits are + * "shifted in" by raising the input to the Management Data Clock + * (setting the MDC bit), and then reading the value of the MDIO bit. + */ + ctrl = er32(CTRL); + + /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as + * input. + */ + ctrl &= ~E1000_CTRL_MDIO_DIR; + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + /* Raise and Lower the clock before reading in the data. This accounts + * for the turnaround bits. The first clock occurred when we clocked out + * the last bit of the Register Address. + */ + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + for (data = 0, i = 0; i < 16; i++) { + data = data << 1; + e1000_raise_mdi_clk(hw, &ctrl); + ctrl = er32(CTRL); + /* Check to see if we shifted in a "1". */ + if (ctrl & E1000_CTRL_MDIO) + data |= 1; + e1000_lower_mdi_clk(hw, &ctrl); + } + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + return data; +} + + +/** + * e1000_read_phy_reg - read a phy register + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to read + * + * Reads the value from a PHY register, if the value is on a specific non zero + * page, sets the page first. + */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) +{ + u32 ret_val; + unsigned long flags; + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16) reg_addr); + if (ret_val) { + spin_unlock_irqrestore(&e1000_phy_lock, flags); + return ret_val; + } + } + + ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, and register address in the MDI + * Control register. The MAC will take care of interfacing with + * the PHY to retrieve the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_READ) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + + mdic = readl(E1000_MDIO_STS); + if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) { + e_dbg("MDI Read Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16) mdic; + } else { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16) mdic; + } + } else { + /* We must first send a preamble through the MDIO pin to signal + * the beginning of an MII instruction. This is done by sending + * 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the next few fields that are required for a read + * operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine five different times. The + * format of a MII read instruction consists of a shift out of + * 14 bits and is defined as follows: + * + * followed by a shift in of 18 bits. This first two bits + * shifted in are TurnAround bits used to avoid contention on + * the MDIO pin when a READ operation is performed. These two + * bits are thrown away followed by a shift in of 16 bits which + * contains the desired data. + */ + mdic = ((reg_addr) | (phy_addr << 5) | + (PHY_OP_READ << 10) | (PHY_SOF << 12)); + + e1000_shift_out_mdi_bits(hw, mdic, 14); + + /* Now that we've shifted out the read command to the MII, we + * need to "shift in" the 16-bit value (18 total bits) of the + * requested PHY register address. + */ + *phy_data = e1000_shift_in_mdi_bits(hw); + } + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg - write a phy register + * + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to write + * @data: data to write to the PHY + * + * Writes a value to a PHY register + */ +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) +{ + u32 ret_val; + unsigned long flags; + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16) reg_addr); + if (ret_val) { + spin_unlock_irqrestore(&e1000_phy_lock, flags); + return ret_val; + } + } + + ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, register address, and data + * intended for the PHY register in the MDI Control register. + * The MAC will take care of interfacing with the PHY to send + * the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = (((u32) phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_WRITE) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 640; i++) { + udelay(5); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } else { + mdic = (((u32) phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 641; i++) { + udelay(5); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } + } else { + /* We'll need to use the SW defined pins to shift the write + * command out to the PHY. We first send a preamble to the PHY + * to signal the beginning of the MII instruction. This is done + * by sending 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the remaining required fields that will indicate + * a write operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine for each field in the + * command. The format of a MII write instruction is as follows: + * . + */ + mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | + (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); + mdic <<= 16; + mdic |= (u32) phy_data; + + e1000_shift_out_mdi_bits(hw, mdic, 32); + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_hw_reset - reset the phy, hardware style + * @hw: Struct containing variables accessed by shared code + * + * Returns the PHY to the power-on reset state + */ +s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + u32 ctrl, ctrl_ext; + u32 led_ctrl; + + e_dbg("Resetting Phy...\n"); + + if (hw->mac_type > e1000_82543) { + /* Read the device control register and assert the + * E1000_CTRL_PHY_RST bit. Then, take it out of reset. + * For e1000 hardware, we delay for 10ms between the assert + * and de-assert. + */ + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(); + + msleep(10); + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + } else { + /* Read the Extended Device Control Register, assert the + * PHY_RESET_DIR bit to put the PHY into reset. Then, take it + * out of reset. + */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; + ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + msleep(10); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + } + udelay(150); + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Wait for FW to finish PHY configuration. */ + return e1000_get_phy_cfg_done(hw); +} + +/** + * e1000_phy_reset - reset the phy to commit settings + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY + * Sets bit 15 of the MII Control register + */ +s32 e1000_phy_reset(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + switch (hw->phy_type) { + case e1000_phy_igp: + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + break; + default: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= MII_CR_RESET; + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + break; + } + + if (hw->phy_type == e1000_phy_igp) + e1000_phy_init_script(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_detect_gig_phy - check the phy type + * @hw: Struct containing variables accessed by shared code + * + * Probes the expected PHY address for known PHY IDs + */ +static s32 e1000_detect_gig_phy(struct e1000_hw *hw) +{ + s32 phy_init_status, ret_val; + u16 phy_id_high, phy_id_low; + bool match = false; + + if (hw->phy_id != 0) + return E1000_SUCCESS; + + /* Read the PHY ID Registers to identify which PHY is onboard. */ + ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); + if (ret_val) + return ret_val; + + hw->phy_id = (u32) (phy_id_high << 16); + udelay(20); + ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); + if (ret_val) + return ret_val; + + hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK); + hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->phy_id == M88E1000_E_PHY_ID) + match = true; + break; + case e1000_82544: + if (hw->phy_id == M88E1000_I_PHY_ID) + match = true; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + if (hw->phy_id == M88E1011_I_PHY_ID) + match = true; + break; + case e1000_ce4100: + if ((hw->phy_id == RTL8211B_PHY_ID) || + (hw->phy_id == RTL8201N_PHY_ID) || + (hw->phy_id == M88E1118_E_PHY_ID)) + match = true; + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (hw->phy_id == IGP01E1000_I_PHY_ID) + match = true; + break; + default: + e_dbg("Invalid MAC type %d\n", hw->mac_type); + return -E1000_ERR_CONFIG; + } + phy_init_status = e1000_set_phy_type(hw); + + if ((match) && (phy_init_status == E1000_SUCCESS)) { + e_dbg("PHY ID 0x%X detected\n", hw->phy_id); + return E1000_SUCCESS; + } + e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id); + return -E1000_ERR_PHY; +} + +/** + * e1000_phy_reset_dsp - reset DSP + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY's DSP + */ +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val; + + do { + ret_val = e1000_write_phy_reg(hw, 29, 0x001d); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x0000); + if (ret_val) + break; + ret_val = E1000_SUCCESS; + } while (0); + + return ret_val; +} + +/** + * e1000_phy_igp_get_info - get igp specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for igp PHY only. + */ +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data, min_length, max_length, average; + e1000_rev_polarity polarity; + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + /* IGP01E1000 does not need to support it. */ + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; + + /* IGP01E1000 always correct polarity reversal */ + phy_info->polarity_correction = e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >> + IGP01E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Local/Remote Receiver Information are only valid @ 1000 + * Mbps + */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + /* Get cable length */ + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + /* Translate to old method */ + average = (max_length + min_length) / 2; + + if (average <= e1000_igp_cable_length_50) + phy_info->cable_length = e1000_cable_length_50; + else if (average <= e1000_igp_cable_length_80) + phy_info->cable_length = e1000_cable_length_50_80; + else if (average <= e1000_igp_cable_length_110) + phy_info->cable_length = e1000_cable_length_80_110; + else if (average <= e1000_igp_cable_length_140) + phy_info->cable_length = e1000_cable_length_110_140; + else + phy_info->cable_length = e1000_cable_length_140; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_m88_get_info - get m88 specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for m88 PHY only. + */ +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + e1000_rev_polarity polarity; + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_info->extended_10bt_distance = + ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >> + M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ? + e1000_10bt_ext_dist_enable_lower : + e1000_10bt_ext_dist_enable_normal; + + phy_info->polarity_correction = + ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >> + M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ? + e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >> + M88E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + /* Cable Length Estimation and Local/Remote Receiver Information + * are only valid at 1000 Mbps. + */ + phy_info->cable_length = + (e1000_cable_length) ((phy_data & + M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_get_info - request phy info + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers + */ +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + + phy_info->cable_length = e1000_cable_length_undefined; + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; + phy_info->cable_polarity = e1000_rev_polarity_undefined; + phy_info->downshift = e1000_downshift_undefined; + phy_info->polarity_correction = e1000_polarity_reversal_undefined; + phy_info->mdix_mode = e1000_auto_x_mode_undefined; + phy_info->local_rx = e1000_1000t_rx_status_undefined; + phy_info->remote_rx = e1000_1000t_rx_status_undefined; + + if (hw->media_type != e1000_media_type_copper) { + e_dbg("PHY info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { + e_dbg("PHY info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + if (hw->phy_type == e1000_phy_igp) + return e1000_phy_igp_get_info(hw, phy_info); + else if ((hw->phy_type == e1000_phy_8211) || + (hw->phy_type == e1000_phy_8201)) + return E1000_SUCCESS; + else + return e1000_phy_m88_get_info(hw, phy_info); +} + +s32 e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { + e_dbg("Invalid MDI setting detected\n"); + hw->mdix = 1; + return -E1000_ERR_CONFIG; + } + return E1000_SUCCESS; +} + +/** + * e1000_init_eeprom_params - initialize sw eeprom vars + * @hw: Struct containing variables accessed by shared code + * + * Sets up eeprom variables in the hw struct. Must be called after mac_type + * is configured. + */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd = er32(EECD); + s32 ret_val = E1000_SUCCESS; + u16 eeprom_size; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + eeprom->type = e1000_eeprom_microwire; + eeprom->word_size = 64; + eeprom->opcode_bits = 3; + eeprom->address_bits = 6; + eeprom->delay_usec = 50; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_SIZE) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (eecd & E1000_EECD_TYPE) { + eeprom->type = e1000_eeprom_spi; + eeprom->opcode_bits = 8; + eeprom->delay_usec = 1; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->page_size = 32; + eeprom->address_bits = 16; + } else { + eeprom->page_size = 8; + eeprom->address_bits = 8; + } + } else { + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + } + break; + default: + break; + } + + if (eeprom->type == e1000_eeprom_spi) { + /* eeprom_size will be an enum [0..8] that maps to eeprom sizes + * 128B to 32KB (incremented by powers of 2). + */ + /* Set to default value for initial eeprom read. */ + eeprom->word_size = 64; + ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size); + if (ret_val) + return ret_val; + eeprom_size = + (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; + /* 256B eeprom size was not supported in earlier hardware, so we + * bump eeprom_size up one to ensure that "1" (which maps to + * 256B) is never the result used in the shifting logic below. + */ + if (eeprom_size) + eeprom_size++; + + eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT); + } + return ret_val; +} + +/** + * e1000_raise_ee_clk - Raises the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Raise the clock input to the EEPROM (by setting the SK bit), and then + * wait microseconds. + */ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_lower_ee_clk - Lowers the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Lower the clock input to the EEPROM (by clearing the SK bit), and + * then wait 50 microseconds. + */ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_shift_out_ee_bits - Shift data bits out to the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @data: data to send to the EEPROM + * @count: number of bits to shift out + */ +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u32 mask; + + /* We need to shift "count" bits out to the EEPROM. So, value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + */ + mask = 0x01 << (count - 1); + eecd = er32(EECD); + if (eeprom->type == e1000_eeprom_microwire) { + eecd &= ~E1000_EECD_DO; + } else if (eeprom->type == e1000_eeprom_spi) { + eecd |= E1000_EECD_DO; + } + do { + /* A "1" is shifted out to the EEPROM by setting bit "DI" to a + * "1", and then raising and then lowering the clock (the SK bit + * controls the clock input to the EEPROM). A "0" is shifted + * out to the EEPROM by setting "DI" to "0" and then raising and + * then lowering the clock. + */ + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(eeprom->delay_usec); + + e1000_raise_ee_clk(hw, &eecd); + e1000_lower_ee_clk(hw, &eecd); + + mask = mask >> 1; + + } while (mask); + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_ee_bits - Shift data bits in from the EEPROM + * @hw: Struct containing variables accessed by shared code + * @count: number of bits to shift in + */ +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + /* In order to read a register from the EEPROM, we need to shift 'count' + * bits in from the EEPROM. Bits are "shifted in" by raising the clock + * input to the EEPROM (setting the SK bit), and then reading the value + * of the "DO" bit. During this "shifting in" process the "DI" bit + * should always be clear. + */ + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data = data << 1; + e1000_raise_ee_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DI); + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_ee_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000_acquire_eeprom - Prepares EEPROM for access + * @hw: Struct containing variables accessed by shared code + * + * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This + * function should be called before issuing a command to the EEPROM. + */ +static s32 e1000_acquire_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd, i = 0; + + eecd = er32(EECD); + + /* Request EEPROM Access */ + if (hw->mac_type > e1000_82544) { + eecd |= E1000_EECD_REQ; + ew32(EECD, eecd); + eecd = er32(EECD); + while ((!(eecd & E1000_EECD_GNT)) && + (i < E1000_EEPROM_GRANT_ATTEMPTS)) { + i++; + udelay(5); + eecd = er32(EECD); + } + if (!(eecd & E1000_EECD_GNT)) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + e_dbg("Could not acquire EEPROM grant\n"); + return -E1000_ERR_EEPROM; + } + } + + /* Setup EEPROM for Read/Write */ + + if (eeprom->type == e1000_eeprom_microwire) { + /* Clear SK and DI */ + eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); + ew32(EECD, eecd); + + /* Set CS */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(1); + } + + return E1000_SUCCESS; +} + +/** + * e1000_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_standby_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + + eecd = er32(EECD); + + if (eeprom->type == e1000_eeprom_microwire) { + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock high */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Select EEPROM */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock low */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } +} + +/** + * e1000_release_eeprom - drop chip select + * @hw: Struct containing variables accessed by shared code + * + * Terminates a command by inverting the EEPROM's chip select pin + */ +static void e1000_release_eeprom(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = er32(EECD); + + if (hw->eeprom.type == e1000_eeprom_spi) { + eecd |= E1000_EECD_CS; /* Pull CS high */ + eecd &= ~E1000_EECD_SK; /* Lower SCK */ + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(hw->eeprom.delay_usec); + } else if (hw->eeprom.type == e1000_eeprom_microwire) { + /* cleanup eeprom */ + + /* CS on Microwire is active-high */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); + + ew32(EECD, eecd); + + /* Rising edge of clock */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + + /* Falling edge of clock */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + } + + /* Stop requesting EEPROM access */ + if (hw->mac_type > e1000_82544) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + } +} + +/** + * e1000_spi_eeprom_ready - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) +{ + u16 retry_count = 0; + u8 spi_stat_reg; + + /* Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing + * bit 0 of the internal status register. If it's not cleared within + * 5 milliseconds, then error out. + */ + retry_count = 0; + do { + e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, + hw->eeprom.opcode_bits); + spi_stat_reg = (u8) e1000_shift_in_ee_bits(hw, 8); + if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) + break; + + udelay(5); + retry_count += 5; + + e1000_standby_eeprom(hw); + } while (retry_count < EEPROM_MAX_RETRY_SPI); + + /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and + * only 0-5mSec on 5V devices) + */ + if (retry_count >= EEPROM_MAX_RETRY_SPI) { + e_dbg("SPI EEPROM Status error\n"); + return -E1000_ERR_EEPROM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_eeprom - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * @words: number of words to read + */ +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + spin_lock(&e1000_eeprom_lock); + ret = e1000_do_read_eeprom(hw, offset, words, data); + spin_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 i = 0; + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* If eeprom is not yet detected, do so now */ + if (eeprom->word_size == 0) + e1000_init_eeprom_params(hw); + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) + || (words > eeprom->word_size - offset) || (words == 0)) { + e_dbg("\"words\" parameter out of bounds. Words = %d," + "size = %d\n", offset, eeprom->word_size); + return -E1000_ERR_EEPROM; + } + + /* EEPROM's that don't use EERD to read require us to bit-bang the SPI + * directly. In this case, we need to acquire the EEPROM so that + * FW or other port software does not interrupt. + */ + /* Prepare the EEPROM for bit-bang reading */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have + * acquired the EEPROM at this point, so any returns should release it + */ + if (eeprom->type == e1000_eeprom_spi) { + u16 word_in; + u8 read_opcode = EEPROM_READ_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) { + e1000_release_eeprom(hw); + return -E1000_ERR_EEPROM; + } + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + read_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16) (offset * 2), + eeprom->address_bits); + + /* Read the data. The address of the eeprom internally + * increments with each byte (spi) being read, saving on the + * overhead of eeprom setup and tear-down. The address counter + * will roll over if reading beyond the size of the eeprom, thus + * allowing the entire memory to be read starting from any + * offset. + */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_ee_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + } else if (eeprom->type == e1000_eeprom_microwire) { + for (i = 0; i < words; i++) { + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, + EEPROM_READ_OPCODE_MICROWIRE, + eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16) (offset + i), + eeprom->address_bits); + + /* Read the data. For microwire, each word requires the + * overhead of eeprom setup and tear-down. + */ + data[i] = e1000_shift_in_ee_bits(hw, 16); + e1000_standby_eeprom(hw); + } + } + + /* End this read operation */ + e1000_release_eeprom(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_validate_eeprom_checksum - Verifies that the EEPROM has a valid checksum + * @hw: Struct containing variables accessed by shared code + * + * Reads the first 64 16 bit words of the EEPROM and sums the values read. + * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is + * valid. + */ +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + +#ifdef CONFIG_PARISC + /* This is a signature and not a checksum on HP c8000 */ + if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6)) + return E1000_SUCCESS; + +#endif + if (checksum == (u16) EEPROM_SUM) + return E1000_SUCCESS; + else { + e_dbg("EEPROM Checksum Invalid\n"); + return -E1000_ERR_EEPROM; + } +} + +/** + * e1000_update_eeprom_checksum - Calculates/writes the EEPROM checksum + * @hw: Struct containing variables accessed by shared code + * + * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. + * Writes the difference to word offset 63 of the EEPROM. + */ +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + checksum = (u16) EEPROM_SUM - checksum; + if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { + e_dbg("EEPROM Write Error\n"); + return -E1000_ERR_EEPROM; + } + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom - write words to the different EEPROM types. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word to be written to the EEPROM + * + * If e1000_update_eeprom_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + */ +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + spin_lock(&e1000_eeprom_lock); + ret = e1000_do_write_eeprom(hw, offset, words, data); + spin_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + s32 status = 0; + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* If eeprom is not yet detected, do so now */ + if (eeprom->word_size == 0) + e1000_init_eeprom_params(hw); + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) + || (words > eeprom->word_size - offset) || (words == 0)) { + e_dbg("\"words\" parameter out of bounds\n"); + return -E1000_ERR_EEPROM; + } + + /* Prepare the EEPROM for writing */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + if (eeprom->type == e1000_eeprom_microwire) { + status = e1000_write_eeprom_microwire(hw, offset, words, data); + } else { + status = e1000_write_eeprom_spi(hw, offset, words, data); + msleep(10); + } + + /* Done with writing */ + e1000_release_eeprom(hw); + + return status; +} + +/** + * e1000_write_eeprom_spi - Writes a 16 bit word to a given offset in an SPI EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u16 widx = 0; + + while (widx < words) { + u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) + return -E1000_ERR_EEPROM; + + e1000_standby_eeprom(hw); + + /* Send the WRITE ENABLE command (8 bit opcode ) */ + e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI, + eeprom->opcode_bits); + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + write_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16) ((offset + widx) * 2), + eeprom->address_bits); + + /* Send the data */ + + /* Loop to allow for up to whole page write (32 bytes) of + * eeprom + */ + while (widx < words) { + u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_ee_bits(hw, word_out, 16); + widx++; + + /* Some larger eeprom sizes are capable of a 32-byte + * PAGE WRITE operation, while the smaller eeproms are + * capable of an 8-byte PAGE WRITE operation. Break the + * inner loop to pass new address + */ + if ((((offset + widx) * 2) % eeprom->page_size) == 0) { + e1000_standby_eeprom(hw); + break; + } + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom_microwire - Writes a 16 bit word to a given offset in a Microwire EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u16 words_written = 0; + u16 i = 0; + + /* Send the write enable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 11). It's less work to include + * the 11 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This puts the + * EEPROM into write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, + (u16) (eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2)); + + /* Prepare the EEPROM */ + e1000_standby_eeprom(hw); + + while (words_written < words) { + /* Send the Write command (3-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, + eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16) (offset + words_written), + eeprom->address_bits); + + /* Send the data */ + e1000_shift_out_ee_bits(hw, data[words_written], 16); + + /* Toggle the CS line. This in effect tells the EEPROM to + * execute the previous command. + */ + e1000_standby_eeprom(hw); + + /* Read DO repeatedly until it is high (equal to '1'). The + * EEPROM will signal that the command has been completed by + * raising the DO signal. If DO does not go high in 10 + * milliseconds, then error out. + */ + for (i = 0; i < 200; i++) { + eecd = er32(EECD); + if (eecd & E1000_EECD_DO) + break; + udelay(50); + } + if (i == 200) { + e_dbg("EEPROM Write did not complete\n"); + return -E1000_ERR_EEPROM; + } + + /* Recover from write */ + e1000_standby_eeprom(hw); + + words_written++; + } + + /* Send the write disable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 10). It's less work to include + * the 10 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This takes the + * EEPROM out of write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, + (u16) (eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2)); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr - read the adapters MAC from eeprom + * @hw: Struct containing variables accessed by shared code + * + * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the + * second function of dual function devices + */ +s32 e1000_read_mac_addr(struct e1000_hw *hw) +{ + u16 offset; + u16 eeprom_data, i; + + for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { + offset = i >> 1; + if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF); + hw->perm_mac_addr[i + 1] = (u8) (eeprom_data >> 8); + } + + switch (hw->mac_type) { + default: + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1) + hw->perm_mac_addr[5] ^= 0x01; + break; + } + + for (i = 0; i < NODE_ADDRESS_SIZE; i++) + hw->mac_addr[i] = hw->perm_mac_addr[i]; + return E1000_SUCCESS; +} + +/** + * e1000_init_rx_addrs - Initializes receive address filters. + * @hw: Struct containing variables accessed by shared code + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + */ +static void e1000_init_rx_addrs(struct e1000_hw *hw) +{ + u32 i; + u32 rar_num; + + /* Setup the receive address. */ + e_dbg("Programming MAC Address into RAR[0]\n"); + + e1000_rar_set(hw, hw->mac_addr, 0); + + rar_num = E1000_RAR_ENTRIES; + + /* Zero out the other 15 receive addresses. */ + e_dbg("Clearing RAR[1-15]\n"); + for (i = 1; i < rar_num; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_hash_mc_addr - Hashes an address to determine its location in the multicast table + * @hw: Struct containing variables accessed by shared code + * @mc_addr: the multicast address to hash + */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value = 0; + + /* The portion of the address that is used for the hash table is + * determined by the mc_filter_type setting. + */ + switch (hw->mc_filter_type) { + /* [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + */ + case 0: + /* [47:36] i.e. 0x563 for above example address */ + hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4)); + break; + case 1: + /* [46:35] i.e. 0xAC6 for above example address */ + hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5)); + break; + case 2: + /* [45:34] i.e. 0x5D8 for above example address */ + hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6)); + break; + case 3: + /* [43:32] i.e. 0x634 for above example address */ + hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8)); + break; + } + + hash_value &= 0xFFF; + return hash_value; +} + +/** + * e1000_rar_set - Puts an ethernet address into a receive address register. + * @hw: Struct containing variables accessed by shared code + * @addr: Address to put into receive address register + * @index: Receive address register to write + */ +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx + * unit hang. + * + * Description: + * If there are any Rx frames queued up or otherwise present in the HW + * before RSS is enabled, and then we enable RSS, the HW Rx unit will + * hang. To work around this issue, we have to disable receives and + * flush out all Rx frames before we enable RSS. To do so, we modify we + * redirect all Rx traffic to manageability and then reset the HW. + * This flushes away Rx frames, and (since the redirections to + * manageability persists across resets) keeps new ones from coming in + * while we work. Then, we clear the Address Valid AV bit for all MAC + * addresses and undo the re-direction to manageability. + * Now, frames are coming in again, but the MAC won't accept them, so + * far so good. We now proceed to initialize RSS (if necessary) and + * configure the Rx unit. Last, we re-enable the AV bits and continue + * on our merry way. + */ + switch (hw->mac_type) { + default: + /* Indicate to hardware the Address is Valid. */ + rar_high |= E1000_RAH_AV; + break; + } + + E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table. + * @hw: Struct containing variables accessed by shared code + * @offset: Offset in VLAN filer table to write + * @value: Value to write into VLAN filter table + */ +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + u32 temp; + + if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { + temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); + E1000_WRITE_FLUSH(); + } else { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_clear_vfta - Clears the VLAN filer table + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + u32 vfta_value = 0; + u32 vfta_offset = 0; + u32 vfta_bit_in_reg = 0; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + /* If the offset we want to clear is the same offset of the + * manageability VLAN ID, then clear all bits except that of the + * manageability unit + */ + vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); + E1000_WRITE_FLUSH(); + } +} + +static s32 e1000_id_led_init(struct e1000_hw *hw) +{ + u32 ledctl; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 eeprom_data, i, temp; + const u16 led_mask = 0x0F; + + if (hw->mac_type < e1000_82540) { + /* Nothing to do */ + return E1000_SUCCESS; + } + + ledctl = er32(LEDCTL); + hw->ledctl_default = ledctl; + hw->ledctl_mode1 = hw->ledctl_default; + hw->ledctl_mode2 = hw->ledctl_default; + + if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + + if ((eeprom_data == ID_LED_RESERVED_0000) || + (eeprom_data == ID_LED_RESERVED_FFFF)) { + eeprom_data = ID_LED_DEFAULT; + } + + for (i = 0; i < 4; i++) { + temp = (eeprom_data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_setup_led + * @hw: Struct containing variables accessed by shared code + * + * Prepares SW controlable LED for use and saves the current state of the LED. + */ +s32 e1000_setup_led(struct e1000_hw *hw) +{ + u32 ledctl; + s32 ret_val = E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No setup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn off PHY Smart Power Down (if enabled) */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, + &hw->phy_spd_default); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + (u16) (hw->phy_spd_default & + ~IGP01E1000_GMII_SPD)); + if (ret_val) + return ret_val; + /* Fall Through */ + default: + if (hw->media_type == e1000_media_type_fiber) { + ledctl = er32(LEDCTL); + /* Save current LEDCTL settings */ + hw->ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | + E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + ew32(LEDCTL, ledctl); + } else if (hw->media_type == e1000_media_type_copper) + ew32(LEDCTL, hw->ledctl_mode1); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led - Restores the saved state of the SW controlable LED. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_cleanup_led(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No cleanup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn on PHY Smart Power Down (if previously enabled) */ + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + hw->phy_spd_default); + if (ret_val) + return ret_val; + /* Fall Through */ + default: + /* Restore LEDCTL settings */ + ew32(LEDCTL, hw->ledctl_default); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_led_on - Turns on the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_on(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode2); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off - Turns off the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_off(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode1); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_hw_cntrs - Clears all hardware statistics counters. + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_hw_cntrs(struct e1000_hw *hw) +{ + volatile u32 temp; + + temp = er32(CRCERRS); + temp = er32(SYMERRS); + temp = er32(MPC); + temp = er32(SCC); + temp = er32(ECOL); + temp = er32(MCC); + temp = er32(LATECOL); + temp = er32(COLC); + temp = er32(DC); + temp = er32(SEC); + temp = er32(RLEC); + temp = er32(XONRXC); + temp = er32(XONTXC); + temp = er32(XOFFRXC); + temp = er32(XOFFTXC); + temp = er32(FCRUC); + + temp = er32(PRC64); + temp = er32(PRC127); + temp = er32(PRC255); + temp = er32(PRC511); + temp = er32(PRC1023); + temp = er32(PRC1522); + + temp = er32(GPRC); + temp = er32(BPRC); + temp = er32(MPRC); + temp = er32(GPTC); + temp = er32(GORCL); + temp = er32(GORCH); + temp = er32(GOTCL); + temp = er32(GOTCH); + temp = er32(RNBC); + temp = er32(RUC); + temp = er32(RFC); + temp = er32(ROC); + temp = er32(RJC); + temp = er32(TORL); + temp = er32(TORH); + temp = er32(TOTL); + temp = er32(TOTH); + temp = er32(TPR); + temp = er32(TPT); + + temp = er32(PTC64); + temp = er32(PTC127); + temp = er32(PTC255); + temp = er32(PTC511); + temp = er32(PTC1023); + temp = er32(PTC1522); + + temp = er32(MPTC); + temp = er32(BPTC); + + if (hw->mac_type < e1000_82543) + return; + + temp = er32(ALGNERRC); + temp = er32(RXERRC); + temp = er32(TNCRS); + temp = er32(CEXTERR); + temp = er32(TSCTC); + temp = er32(TSCTFC); + + if (hw->mac_type <= e1000_82544) + return; + + temp = er32(MGTPRC); + temp = er32(MGTPDC); + temp = er32(MGTPTC); +} + +/** + * e1000_reset_adaptive - Resets Adaptive IFS to its default state. + * @hw: Struct containing variables accessed by shared code + * + * Call this after e1000_init_hw. You may override the IFS defaults by setting + * hw->ifs_params_forced to true. However, you must initialize hw-> + * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio + * before calling this function. + */ +void e1000_reset_adaptive(struct e1000_hw *hw) +{ + if (hw->adaptive_ifs) { + if (!hw->ifs_params_forced) { + hw->current_ifs_val = 0; + hw->ifs_min_val = IFS_MIN; + hw->ifs_max_val = IFS_MAX; + hw->ifs_step_size = IFS_STEP; + hw->ifs_ratio = IFS_RATIO; + } + hw->in_ifs_mode = false; + ew32(AIT, 0); + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_update_adaptive - update adaptive IFS + * @hw: Struct containing variables accessed by shared code + * @tx_packets: Number of transmits since last callback + * @total_collisions: Number of collisions since last callback + * + * Called during the callback/watchdog routine to update IFS value based on + * the ratio of transmits to collisions. + */ +void e1000_update_adaptive(struct e1000_hw *hw) +{ + if (hw->adaptive_ifs) { + if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) { + if (hw->tx_packet_delta > MIN_NUM_XMITS) { + hw->in_ifs_mode = true; + if (hw->current_ifs_val < hw->ifs_max_val) { + if (hw->current_ifs_val == 0) + hw->current_ifs_val = + hw->ifs_min_val; + else + hw->current_ifs_val += + hw->ifs_step_size; + ew32(AIT, hw->current_ifs_val); + } + } + } else { + if (hw->in_ifs_mode + && (hw->tx_packet_delta <= MIN_NUM_XMITS)) { + hw->current_ifs_val = 0; + hw->in_ifs_mode = false; + ew32(AIT, 0); + } + } + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_get_bus_info + * @hw: Struct containing variables accessed by shared code + * + * Gets the current PCI bus type, speed, and width of the hardware + */ +void e1000_get_bus_info(struct e1000_hw *hw) +{ + u32 status; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->bus_type = e1000_bus_type_pci; + hw->bus_speed = e1000_bus_speed_unknown; + hw->bus_width = e1000_bus_width_unknown; + break; + default: + status = er32(STATUS); + hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? + e1000_bus_type_pcix : e1000_bus_type_pci; + + if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { + hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ? + e1000_bus_speed_66 : e1000_bus_speed_120; + } else if (hw->bus_type == e1000_bus_type_pci) { + hw->bus_speed = (status & E1000_STATUS_PCI66) ? + e1000_bus_speed_66 : e1000_bus_speed_33; + } else { + switch (status & E1000_STATUS_PCIX_SPEED) { + case E1000_STATUS_PCIX_SPEED_66: + hw->bus_speed = e1000_bus_speed_66; + break; + case E1000_STATUS_PCIX_SPEED_100: + hw->bus_speed = e1000_bus_speed_100; + break; + case E1000_STATUS_PCIX_SPEED_133: + hw->bus_speed = e1000_bus_speed_133; + break; + default: + hw->bus_speed = e1000_bus_speed_reserved; + break; + } + } + hw->bus_width = (status & E1000_STATUS_BUS64) ? + e1000_bus_width_64 : e1000_bus_width_32; + break; + } +} + +/** + * e1000_write_reg_io + * @hw: Struct containing variables accessed by shared code + * @offset: offset to write to + * @value: value to write + * + * Writes a value to one of the devices registers using port I/O (as opposed to + * memory mapped I/O). Only 82544 and newer devices support port I/O. + */ +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value) +{ + unsigned long io_addr = hw->io_base; + unsigned long io_data = hw->io_base + 4; + + e1000_io_write(hw, io_addr, offset); + e1000_io_write(hw, io_data, value); +} + +/** + * e1000_get_cable_length - Estimates the cable length. + * @hw: Struct containing variables accessed by shared code + * @min_length: The estimated minimum length + * @max_length: The estimated maximum length + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * This function always returns a ranged length (minimum & maximum). + * So for M88 phy's, this function interprets the one value returned from the + * register to the minimum and maximum range. + * For IGP phy's, the function calculates the range by the AGC registers. + */ +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length) +{ + s32 ret_val; + u16 agc_value = 0; + u16 i, phy_data; + u16 cable_length; + + *min_length = *max_length = 0; + + /* Use old method for Phy older than IGP */ + if (hw->phy_type == e1000_phy_m88) { + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + + /* Convert the enum value to ranged values */ + switch (cable_length) { + case e1000_cable_length_50: + *min_length = 0; + *max_length = e1000_igp_cable_length_50; + break; + case e1000_cable_length_50_80: + *min_length = e1000_igp_cable_length_50; + *max_length = e1000_igp_cable_length_80; + break; + case e1000_cable_length_80_110: + *min_length = e1000_igp_cable_length_80; + *max_length = e1000_igp_cable_length_110; + break; + case e1000_cable_length_110_140: + *min_length = e1000_igp_cable_length_110; + *max_length = e1000_igp_cable_length_140; + break; + case e1000_cable_length_140: + *min_length = e1000_igp_cable_length_140; + *max_length = e1000_igp_cable_length_170; + break; + default: + return -E1000_ERR_PHY; + } + } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ + u16 cur_agc_value; + u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; + static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_A, + IGP01E1000_PHY_AGC_B, + IGP01E1000_PHY_AGC_C, + IGP01E1000_PHY_AGC_D + }; + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + + ret_val = + e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; + + /* Value bound check. */ + if ((cur_agc_value >= + IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) + || (cur_agc_value == 0)) + return -E1000_ERR_PHY; + + agc_value += cur_agc_value; + + /* Update minimal AGC value. */ + if (min_agc_value > cur_agc_value) + min_agc_value = cur_agc_value; + } + + /* Remove the minimal AGC result for length < 50m */ + if (agc_value < + IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) { + agc_value -= min_agc_value; + + /* Get the average length of the remaining 3 channels */ + agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); + } else { + /* Get the average length of all the 4 channels. */ + agc_value /= IGP01E1000_PHY_CHANNEL_NUM; + } + + /* Set the range of the calculated length. */ + *min_length = ((e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) > 0) ? + (e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) : 0; + *max_length = e1000_igp_cable_length_table[agc_value] + + IGP01E1000_AGC_RANGE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_polarity - Check the cable polarity + * @hw: Struct containing variables accessed by shared code + * @polarity: output parameter : 0 - Polarity is not reversed + * 1 - Polarity is reversed. + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function simply reads the polarity bit in the + * Phy Status register. For IGP phy's, this bit is valid only if link speed is + * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will + * return 0. If the link speed is 1000 Mbps the polarity status is in the + * IGP01E1000_PHY_PCS_INIT_REG. + */ +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type == e1000_phy_m88) { + /* return the Polarity bit in the Status register. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >> + M88E1000_PSSR_REV_POLARITY_SHIFT) ? + e1000_rev_polarity_reversed : e1000_rev_polarity_normal; + + } else if (hw->phy_type == e1000_phy_igp) { + /* Read the Status register to check the speed */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + /* If speed is 1000 Mbps, must read the + * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status + */ + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + + /* Read the GIG initialization PCS register (0x00B4) */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, + &phy_data); + if (ret_val) + return ret_val; + + /* Check the polarity bits */ + *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } else { + /* For 10 Mbps, read the polarity bit in the status + * register. (for 100 Mbps this bit is always 0) + */ + *polarity = + (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_downshift - Check if Downshift occurred + * @hw: Struct containing variables accessed by shared code + * @downshift: output parameter : 0 - No Downshift occurred. + * 1 - Downshift occurred. + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function reads the Downshift bit in the Phy + * Specific Status register. For IGP phy's, it reads the Downgrade bit in the + * Link Health register. In IGP this bit is latched high, so the driver must + * read it immediately after link is established. + */ +static s32 e1000_check_downshift(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = + (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> + M88E1000_PSSR_DOWNSHIFT_SHIFT; + } + + return E1000_SUCCESS; +} + +static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_PARAM_A, + IGP01E1000_PHY_AGC_PARAM_B, + IGP01E1000_PHY_AGC_PARAM_C, + IGP01E1000_PHY_AGC_PARAM_D +}; + +static s32 e1000_1000Mb_check_cable_length(struct e1000_hw *hw) +{ + u16 min_length, max_length; + u16 phy_data, i; + s32 ret_val; + + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + if (hw->dsp_config_state != e1000_dsp_config_enabled) + return 0; + + if (min_length >= e1000_igp_cable_length_50) { + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + + ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + hw->dsp_config_state = e1000_dsp_config_activated; + } else { + u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; + u32 idle_errs = 0; + + /* clear previous idle error counts */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + for (i = 0; i < ffe_idle_err_timeout; i++) { + udelay(1000); + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT); + if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { + hw->ffe_config_state = e1000_ffe_config_active; + + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_CM_CP); + if (ret_val) + return ret_val; + break; + } + + if (idle_errs) + ffe_idle_err_timeout = + FFE_IDLE_ERR_COUNT_TIMEOUT_100; + } + } + + return 0; +} + +/** + * e1000_config_dsp_after_link_change + * @hw: Struct containing variables accessed by shared code + * @link_up: was link up at the time this was called + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + * + * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a + * gigabit link is achieved to improve link quality. + */ + +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) +{ + s32 ret_val; + u16 phy_data, phy_saved_data, speed, duplex, i; + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + if (link_up) { + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg("Error getting link speed and duplex\n"); + return ret_val; + } + + if (speed == SPEED_1000) { + ret_val = e1000_1000Mb_check_cable_length(hw); + if (ret_val) + return ret_val; + } + } else { + if (hw->dsp_config_state == e1000_dsp_config_activated) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; + + ret_val = + e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->dsp_config_state = e1000_dsp_config_enabled; + } + + if (hw->ffe_config_state == e1000_ffe_config_active) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_DEFAULT); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->ffe_config_state = e1000_ffe_config_enabled; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_set_phy_mode - Set PHY to class A mode + * @hw: Struct containing variables accessed by shared code + * + * Assumes the following operations will follow to enable the new class mode. + * 1. Do a PHY soft reset + * 2. Restart auto-negotiation or force link. + */ +static s32 e1000_set_phy_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 eeprom_data; + + if ((hw->mac_type == e1000_82545_rev_3) && + (hw->media_type == e1000_media_type_copper)) { + ret_val = + e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, + &eeprom_data); + if (ret_val) { + return ret_val; + } + + if ((eeprom_data != EEPROM_RESERVED_WORD) && + (eeprom_data & EEPROM_PHY_CLASS_A)) { + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, + 0x000B); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, + 0x8104); + if (ret_val) + return ret_val; + + hw->phy_reset_disable = false; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state - set d3 link power state + * @hw: Struct containing variables accessed by shared code + * @active: true to enable lplu false to disable lplu. + * + * This function sets the lplu state according to the active flag. When + * activating lplu this function also disables smart speed and vise versa. + * lplu will not be activated unless the device autonegotiation advertisement + * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes. + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + */ +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + /* During driver activity LPLU should not be used or it will attain link + * from the lowest speeds starting from 10Mbps. The capability is used + * for Dx transitions and states + */ + if (hw->mac_type == e1000_82541_rev_2 + || hw->mac_type == e1000_82547_rev_2) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); + if (ret_val) + return ret_val; + } + + if (!active) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data &= ~IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (hw->smart_speed == e1000_smart_speed_on) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } else if (hw->smart_speed == e1000_smart_speed_off) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) + || (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL) + || (hw->autoneg_advertised == + AUTONEG_ADVERTISE_10_100_ALL)) { + + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data |= IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* When LPLU is enabled we should disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + + } + return E1000_SUCCESS; +} + +/** + * e1000_set_vco_speed + * @hw: Struct containing variables accessed by shared code + * + * Change VCO speed register to improve Bit Error Rate performance of SERDES. + */ +static s32 e1000_set_vco_speed(struct e1000_hw *hw) +{ + s32 ret_val; + u16 default_page = 0; + u16 phy_data; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + /* Set PHY register 30, page 5, bit 8 to 0 */ + + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Set PHY register 30, page 4, bit 11 to 1 */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PHY_VCO_REG_BIT11; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} + + +/** + * e1000_enable_mng_pass_thru - check for bmc pass through + * @hw: Struct containing variables accessed by shared code + * + * Verifies the hardware needs to allow ARPs to be processed by the host + * returns: - true/false + */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + + if (hw->asf_firmware_present) { + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN) || + !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) + return false; + if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) + return true; + } + return false; +} + +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 i; + + /* Polarity reversal workaround for forced 10F/10H links. */ + + /* Disable the transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the NO link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be clear. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) + break; + msleep(100); + } + + /* Recommended delay time after link has been lost */ + msleep(1000); + + /* Now we will re-enable th transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be set. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_get_auto_rd_done + * @hw: Struct containing variables accessed by shared code + * + * Check for EEPROM Auto Read bit done. + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) +{ + msleep(5); + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_cfg_done + * @hw: Struct containing variables accessed by shared code + * + * Checks if the PHY configuration is done + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) +{ + msleep(10); + return E1000_SUCCESS; +} diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h new file mode 100644 index 000000000..5cf7268cc --- /dev/null +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h @@ -0,0 +1,3110 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* e1000_hw.h + * Structures, enums, and macros for the MAC + */ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "e1000_osdep.h" + + +/* Forward declarations of structures used by the shared code */ +struct e1000_hw; +struct e1000_hw_stats; + +/* Enumerated types specific to the e1000 hardware */ +/* Media Access Controllers */ +typedef enum { + e1000_undefined = 0, + e1000_82542_rev2_0, + e1000_82542_rev2_1, + e1000_82543, + e1000_82544, + e1000_82540, + e1000_82545, + e1000_82545_rev_3, + e1000_82546, + e1000_ce4100, + e1000_82546_rev_3, + e1000_82541, + e1000_82541_rev_2, + e1000_82547, + e1000_82547_rev_2, + e1000_num_macs +} e1000_mac_type; + +typedef enum { + e1000_eeprom_uninitialized = 0, + e1000_eeprom_spi, + e1000_eeprom_microwire, + e1000_eeprom_flash, + e1000_eeprom_none, /* No NVM support */ + e1000_num_eeprom_types +} e1000_eeprom_type; + +/* Media Types */ +typedef enum { + e1000_media_type_copper = 0, + e1000_media_type_fiber = 1, + e1000_media_type_internal_serdes = 2, + e1000_num_media_types +} e1000_media_type; + +typedef enum { + e1000_10_half = 0, + e1000_10_full = 1, + e1000_100_half = 2, + e1000_100_full = 3 +} e1000_speed_duplex_type; + +/* Flow Control Settings */ +typedef enum { + E1000_FC_NONE = 0, + E1000_FC_RX_PAUSE = 1, + E1000_FC_TX_PAUSE = 2, + E1000_FC_FULL = 3, + E1000_FC_DEFAULT = 0xFF +} e1000_fc_type; + +struct e1000_shadow_ram { + u16 eeprom_word; + bool modified; +}; + +/* PCI bus types */ +typedef enum { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_reserved +} e1000_bus_type; + +/* PCI bus speeds */ +typedef enum { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_reserved +} e1000_bus_speed; + +/* PCI bus widths */ +typedef enum { + e1000_bus_width_unknown = 0, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +} e1000_bus_width; + +/* PHY status info structure and supporting enums */ +typedef enum { + e1000_cable_length_50 = 0, + e1000_cable_length_50_80, + e1000_cable_length_80_110, + e1000_cable_length_110_140, + e1000_cable_length_140, + e1000_cable_length_undefined = 0xFF +} e1000_cable_length; + +typedef enum { + e1000_gg_cable_length_60 = 0, + e1000_gg_cable_length_60_115 = 1, + e1000_gg_cable_length_115_150 = 2, + e1000_gg_cable_length_150 = 4 +} e1000_gg_cable_length; + +typedef enum { + e1000_igp_cable_length_10 = 10, + e1000_igp_cable_length_20 = 20, + e1000_igp_cable_length_30 = 30, + e1000_igp_cable_length_40 = 40, + e1000_igp_cable_length_50 = 50, + e1000_igp_cable_length_60 = 60, + e1000_igp_cable_length_70 = 70, + e1000_igp_cable_length_80 = 80, + e1000_igp_cable_length_90 = 90, + e1000_igp_cable_length_100 = 100, + e1000_igp_cable_length_110 = 110, + e1000_igp_cable_length_115 = 115, + e1000_igp_cable_length_120 = 120, + e1000_igp_cable_length_130 = 130, + e1000_igp_cable_length_140 = 140, + e1000_igp_cable_length_150 = 150, + e1000_igp_cable_length_160 = 160, + e1000_igp_cable_length_170 = 170, + e1000_igp_cable_length_180 = 180 +} e1000_igp_cable_length; + +typedef enum { + e1000_10bt_ext_dist_enable_normal = 0, + e1000_10bt_ext_dist_enable_lower, + e1000_10bt_ext_dist_enable_undefined = 0xFF +} e1000_10bt_ext_dist_enable; + +typedef enum { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +} e1000_rev_polarity; + +typedef enum { + e1000_downshift_normal = 0, + e1000_downshift_activated, + e1000_downshift_undefined = 0xFF +} e1000_downshift; + +typedef enum { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +} e1000_smart_speed; + +typedef enum { + e1000_polarity_reversal_enabled = 0, + e1000_polarity_reversal_disabled, + e1000_polarity_reversal_undefined = 0xFF +} e1000_polarity_reversal; + +typedef enum { + e1000_auto_x_mode_manual_mdi = 0, + e1000_auto_x_mode_manual_mdix, + e1000_auto_x_mode_auto1, + e1000_auto_x_mode_auto2, + e1000_auto_x_mode_undefined = 0xFF +} e1000_auto_x_mode; + +typedef enum { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +} e1000_1000t_rx_status; + +typedef enum { + e1000_phy_m88 = 0, + e1000_phy_igp, + e1000_phy_8211, + e1000_phy_8201, + e1000_phy_undefined = 0xFF +} e1000_phy_type; + +typedef enum { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +} e1000_ms_type; + +typedef enum { + e1000_ffe_config_enabled = 0, + e1000_ffe_config_active, + e1000_ffe_config_blocked +} e1000_ffe_config; + +typedef enum { + e1000_dsp_config_disabled = 0, + e1000_dsp_config_enabled, + e1000_dsp_config_activated, + e1000_dsp_config_undefined = 0xFF +} e1000_dsp_config; + +struct e1000_phy_info { + e1000_cable_length cable_length; + e1000_10bt_ext_dist_enable extended_10bt_distance; + e1000_rev_polarity cable_polarity; + e1000_downshift downshift; + e1000_polarity_reversal polarity_correction; + e1000_auto_x_mode mdix_mode; + e1000_1000t_rx_status local_rx; + e1000_1000t_rx_status remote_rx; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_eeprom_info { + e1000_eeprom_type type; + u16 word_size; + u16 opcode_bits; + u16 address_bits; + u16 delay_usec; + u16 page_size; +}; + +/* Flex ASF Information */ +#define E1000_HOST_IF_MAX_SIZE 2048 + +typedef enum { + e1000_byte_align = 0, + e1000_word_align = 1, + e1000_dword_align = 2 +} e1000_align_type; + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_EEPROM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_TYPE 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 + +#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \ + (((_value) & 0xff00) >> 8)) + +/* Function prototypes */ +/* Initialization */ +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_set_mac_type(struct e1000_hw *hw); +void e1000_set_media_type(struct e1000_hw *hw); + +/* Link Configuration */ +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 * speed, u16 * duplex); +s32 e1000_force_mac_fc(struct e1000_hw *hw); + +/* PHY */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 * phy_data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_reset(struct e1000_hw *hw); +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); + +/* EEPROM Functions */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw); + +/* MNG HOST IF functions */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw); + +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ + +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_ICH_IAMT_MODE 0x2 +#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ + +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */ +#define E1000_VFTA_ENTRY_SHIFT 0x5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658 */ +}; +#ifdef __BIG_ENDIAN +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u16 vlan_id; + u8 reserved0; + u8 status; + u32 reserved1; + u8 checksum; + u8 reserved3; + u16 reserved2; +}; +#else +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; +#endif + +bool e1000_check_mng_mode(struct e1000_hw *hw); +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_read_mac_addr(struct e1000_hw *hw); + +/* Filters (multicast, vlan, receive) */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr); +void e1000_mta_set(struct e1000_hw *hw, u32 hash_value); +void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index); +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); + +/* LED functions */ +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); +s32 e1000_blink_led_start(struct e1000_hw *hw); + +/* Adaptive IFS Functions */ + +/* Everything else */ +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +void e1000_get_bus_info(struct e1000_hw *hw); +void e1000_pci_set_mwi(struct e1000_hw *hw); +void e1000_pci_clear_mwi(struct e1000_hw *hw); +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); +int e1000_pcix_get_mmrbc(struct e1000_hw *hw); +/* Port I/O is only supported on 82544 and newer */ +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value); + +#define E1000_READ_REG_IO(a, reg) \ + e1000_read_reg_io((a), E1000_##reg) +#define E1000_WRITE_REG_IO(a, reg, val) \ + e1000_write_reg_io((a), E1000_##reg, val) + +/* PCI Device IDs */ +#define E1000_DEV_ID_82542 0x1000 +#define E1000_DEV_ID_82543GC_FIBER 0x1001 +#define E1000_DEV_ID_82543GC_COPPER 0x1004 +#define E1000_DEV_ID_82544EI_COPPER 0x1008 +#define E1000_DEV_ID_82544EI_FIBER 0x1009 +#define E1000_DEV_ID_82544GC_COPPER 0x100C +#define E1000_DEV_ID_82544GC_LOM 0x100D +#define E1000_DEV_ID_82540EM 0x100E +#define E1000_DEV_ID_82540EM_LOM 0x1015 +#define E1000_DEV_ID_82540EP_LOM 0x1016 +#define E1000_DEV_ID_82540EP 0x1017 +#define E1000_DEV_ID_82540EP_LP 0x101E +#define E1000_DEV_ID_82545EM_COPPER 0x100F +#define E1000_DEV_ID_82545EM_FIBER 0x1011 +#define E1000_DEV_ID_82545GM_COPPER 0x1026 +#define E1000_DEV_ID_82545GM_FIBER 0x1027 +#define E1000_DEV_ID_82545GM_SERDES 0x1028 +#define E1000_DEV_ID_82546EB_COPPER 0x1010 +#define E1000_DEV_ID_82546EB_FIBER 0x1012 +#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D +#define E1000_DEV_ID_82541EI 0x1013 +#define E1000_DEV_ID_82541EI_MOBILE 0x1018 +#define E1000_DEV_ID_82541ER_LOM 0x1014 +#define E1000_DEV_ID_82541ER 0x1078 +#define E1000_DEV_ID_82547GI 0x1075 +#define E1000_DEV_ID_82541GI 0x1076 +#define E1000_DEV_ID_82541GI_MOBILE 0x1077 +#define E1000_DEV_ID_82541GI_LF 0x107C +#define E1000_DEV_ID_82546GB_COPPER 0x1079 +#define E1000_DEV_ID_82546GB_FIBER 0x107A +#define E1000_DEV_ID_82546GB_SERDES 0x107B +#define E1000_DEV_ID_82546GB_PCIE 0x108A +#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 +#define E1000_DEV_ID_82547EI 0x1019 +#define E1000_DEV_ID_82547EI_MOBILE 0x101A +#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 +#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E + +#define NODE_ADDRESS_SIZE 6 + +/* MAC decode size is 128K - This is the size of BAR0 */ +#define MAC_DECODE_SIZE (128 * 1024) + +#define E1000_82542_2_0_REV_ID 2 +#define E1000_82542_2_1_REV_ID 3 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* The sizes (in bytes) of a ethernet packet */ +#define ENET_HEADER_SIZE 14 +#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ +#define ETHERNET_FCS_SIZE 4 +#define MINIMUM_ETHERNET_PACKET_SIZE \ + (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE) +#define CRC_LENGTH ETHERNET_FCS_SIZE +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* 802.1q VLAN Packet Sizes */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */ + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ +#define ETHERNET_IP_TYPE 0x0800 /* IP packets */ +#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */ + +/* Packet Header defines */ +#define IP_PROTOCOL_TCP 6 +#define IP_PROTOCOL_UDP 0x11 + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + */ +#define POLL_IMS_ENABLE_MASK ( \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ) + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. We + * reserve one of these spots for our directed address, allowing us room for + * E1000_RAR_ENTRIES - 1 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 + +#define MIN_NUMBER_OF_DESCRIPTORS 8 +#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 + +/* Receive Descriptor */ +struct e1000_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + __le16 length[3]; /* length of buffers 1-3 */ + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define E1000_RXD_SPC_PRI_SHIFT 13 +#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define E1000_RXD_SPC_CFI_SHIFT 12 + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; /* */ + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; /* */ + u8 cmd; /* */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; /* */ + } fields; + } upper; +}; + +/* Filters */ +#define E1000_NUM_UNICAST 16 /* Unicast filter entries */ +#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address Register */ +struct e1000_rar { + volatile __le32 low; /* receive address low */ + volatile __le32 high; /* receive address high */ +}; + +/* Number of entries in the Multicast Table Array (MTA). */ +#define E1000_NUM_MTA_REGISTERS 128 + +/* IPv4 Address Table Entry */ +struct e1000_ipv4_at_entry { + volatile u32 ipv4_addr; /* IP Address (RW) */ + volatile u32 reserved; +}; + +/* Four wakeup IP addresses are supported */ +#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 +#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX +#define E1000_IP6AT_SIZE 1 + +/* IPv6 Address Table Entry */ +struct e1000_ipv6_at_entry { + volatile u8 ipv6_addr[16]; +}; + +/* Flexible Filter Length Table Entry */ +struct e1000_fflt_entry { + volatile u32 length; /* Flexible Filter Length (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Mask Table Entry */ +struct e1000_ffmt_entry { + volatile u32 mask; /* Flexible Filter Mask (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Value Table Entry */ +struct e1000_ffvt_entry { + volatile u32 value; /* Flexible Filter Value (RW) */ + volatile u32 reserved; +}; + +/* Four Flexible Filters are supported */ +#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128 + +#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX +#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX +#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX + +#define E1000_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Register Set. (82543, 82544) + * + * Registers are defined to be 32 bits and should be accessed as 32 bit values. + * These registers are physically located on the NIC, but are mapped into the + * host memory address space. + * + * RW - register is both readable and writable + * RO - register is read only + * WO - register is write only + * R/clr - register is read only and is cleared when read + * A - register array + */ +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ + +#define INTEL_CE_GBE_MDIO_RCOMP_BASE (hw->ce4100_gbe_mdio_base_virt) +#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0) +#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4) +#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8) +#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC) +#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20) +#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24) + +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ + +/* Auxiliary Control Register. This register is CE4100 specific, + * RMII/RGMII function is switched by this register - RW + * Following are bits definitions of the Auxiliary Control Register + */ +#define E1000_CTL_AUX 0x000E0 +#define E1000_CTL_AUX_END_SEL_SHIFT 10 +#define E1000_CTL_AUX_ENDIANESS_SHIFT 8 +#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0 + +/* descriptor and packet transfer use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use CTL_AUX.ENDIANESS, packet use default */ +#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use default, packet use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT) +/* all use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT) + +#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT) +#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT) + +/* LW little endian, Byte big endian */ +#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT) + +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ +#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ +#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */ +#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */ +#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */ +#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_TBT 0x00448 /* TX Burst Timer - RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define FEXTNVM_SW_CONFIG 0x0001 +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_FLASH_UPDATES 1000 +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* RX Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* RX Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* RX Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* RX Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* RX Data FIFO Packet Count - RW */ +#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ +#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ +#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ +#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */ +#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */ +#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */ +#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */ +#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */ +#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */ +#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ +#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ +#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ +#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */ +#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */ +#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ +#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */ +#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */ +#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */ +#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */ +#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */ +#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */ +#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ +#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ +#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ +#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ +#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ +#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ +#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ +#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ +#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control */ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ +#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ + +#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MDPHYA 0x0003C /* PHY address - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ + +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ +#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ +/* Register Set (82542) + * + * Some of the 82542 registers are located at different offsets than they are + * in more current versions of the 8254x. Despite the difference in location, + * the registers function in the same manner. + */ +#define E1000_82542_CTL_AUX E1000_CTL_AUX +#define E1000_82542_CTRL E1000_CTRL +#define E1000_82542_CTRL_DUP E1000_CTRL_DUP +#define E1000_82542_STATUS E1000_STATUS +#define E1000_82542_EECD E1000_EECD +#define E1000_82542_EERD E1000_EERD +#define E1000_82542_CTRL_EXT E1000_CTRL_EXT +#define E1000_82542_FLA E1000_FLA +#define E1000_82542_MDIC E1000_MDIC +#define E1000_82542_SCTL E1000_SCTL +#define E1000_82542_FEXTNVM E1000_FEXTNVM +#define E1000_82542_FCAL E1000_FCAL +#define E1000_82542_FCAH E1000_FCAH +#define E1000_82542_FCT E1000_FCT +#define E1000_82542_VET E1000_VET +#define E1000_82542_RA 0x00040 +#define E1000_82542_ICR E1000_ICR +#define E1000_82542_ITR E1000_ITR +#define E1000_82542_ICS E1000_ICS +#define E1000_82542_IMS E1000_IMS +#define E1000_82542_IMC E1000_IMC +#define E1000_82542_RCTL E1000_RCTL +#define E1000_82542_RDTR 0x00108 +#define E1000_82542_RDFH E1000_RDFH +#define E1000_82542_RDFT E1000_RDFT +#define E1000_82542_RDFHS E1000_RDFHS +#define E1000_82542_RDFTS E1000_RDFTS +#define E1000_82542_RDFPC E1000_RDFPC +#define E1000_82542_RDBAL 0x00110 +#define E1000_82542_RDBAH 0x00114 +#define E1000_82542_RDLEN 0x00118 +#define E1000_82542_RDH 0x00120 +#define E1000_82542_RDT 0x00128 +#define E1000_82542_RDTR0 E1000_82542_RDTR +#define E1000_82542_RDBAL0 E1000_82542_RDBAL +#define E1000_82542_RDBAH0 E1000_82542_RDBAH +#define E1000_82542_RDLEN0 E1000_82542_RDLEN +#define E1000_82542_RDH0 E1000_82542_RDH +#define E1000_82542_RDT0 E1000_82542_RDT +#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication + * RX Control - RW */ +#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8)) +#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */ +#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */ +#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */ +#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */ +#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */ +#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */ +#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */ +#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */ +#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */ +#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */ +#define E1000_82542_RDTR1 0x00130 +#define E1000_82542_RDBAL1 0x00138 +#define E1000_82542_RDBAH1 0x0013C +#define E1000_82542_RDLEN1 0x00140 +#define E1000_82542_RDH1 0x00148 +#define E1000_82542_RDT1 0x00150 +#define E1000_82542_FCRTH 0x00160 +#define E1000_82542_FCRTL 0x00168 +#define E1000_82542_FCTTV E1000_FCTTV +#define E1000_82542_TXCW E1000_TXCW +#define E1000_82542_RXCW E1000_RXCW +#define E1000_82542_MTA 0x00200 +#define E1000_82542_TCTL E1000_TCTL +#define E1000_82542_TCTL_EXT E1000_TCTL_EXT +#define E1000_82542_TIPG E1000_TIPG +#define E1000_82542_TDBAL 0x00420 +#define E1000_82542_TDBAH 0x00424 +#define E1000_82542_TDLEN 0x00428 +#define E1000_82542_TDH 0x00430 +#define E1000_82542_TDT 0x00438 +#define E1000_82542_TIDV 0x00440 +#define E1000_82542_TBT E1000_TBT +#define E1000_82542_AIT E1000_AIT +#define E1000_82542_VFTA 0x00600 +#define E1000_82542_LEDCTL E1000_LEDCTL +#define E1000_82542_PBA E1000_PBA +#define E1000_82542_PBS E1000_PBS +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_EEARBC E1000_EEARBC +#define E1000_82542_FLASHT E1000_FLASHT +#define E1000_82542_EEWR E1000_EEWR +#define E1000_82542_FLSWCTL E1000_FLSWCTL +#define E1000_82542_FLSWDATA E1000_FLSWDATA +#define E1000_82542_FLSWCNT E1000_FLSWCNT +#define E1000_82542_FLOP E1000_FLOP +#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL +#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE +#define E1000_82542_PHY_CTRL E1000_PHY_CTRL +#define E1000_82542_ERT E1000_ERT +#define E1000_82542_RXDCTL E1000_RXDCTL +#define E1000_82542_RXDCTL1 E1000_RXDCTL1 +#define E1000_82542_RADV E1000_RADV +#define E1000_82542_RSRPD E1000_RSRPD +#define E1000_82542_TXDMAC E1000_TXDMAC +#define E1000_82542_KABGTXD E1000_KABGTXD +#define E1000_82542_TDFHS E1000_TDFHS +#define E1000_82542_TDFTS E1000_TDFTS +#define E1000_82542_TDFPC E1000_TDFPC +#define E1000_82542_TXDCTL E1000_TXDCTL +#define E1000_82542_TADV E1000_TADV +#define E1000_82542_TSPMT E1000_TSPMT +#define E1000_82542_CRCERRS E1000_CRCERRS +#define E1000_82542_ALGNERRC E1000_ALGNERRC +#define E1000_82542_SYMERRS E1000_SYMERRS +#define E1000_82542_RXERRC E1000_RXERRC +#define E1000_82542_MPC E1000_MPC +#define E1000_82542_SCC E1000_SCC +#define E1000_82542_ECOL E1000_ECOL +#define E1000_82542_MCC E1000_MCC +#define E1000_82542_LATECOL E1000_LATECOL +#define E1000_82542_COLC E1000_COLC +#define E1000_82542_DC E1000_DC +#define E1000_82542_TNCRS E1000_TNCRS +#define E1000_82542_SEC E1000_SEC +#define E1000_82542_CEXTERR E1000_CEXTERR +#define E1000_82542_RLEC E1000_RLEC +#define E1000_82542_XONRXC E1000_XONRXC +#define E1000_82542_XONTXC E1000_XONTXC +#define E1000_82542_XOFFRXC E1000_XOFFRXC +#define E1000_82542_XOFFTXC E1000_XOFFTXC +#define E1000_82542_FCRUC E1000_FCRUC +#define E1000_82542_PRC64 E1000_PRC64 +#define E1000_82542_PRC127 E1000_PRC127 +#define E1000_82542_PRC255 E1000_PRC255 +#define E1000_82542_PRC511 E1000_PRC511 +#define E1000_82542_PRC1023 E1000_PRC1023 +#define E1000_82542_PRC1522 E1000_PRC1522 +#define E1000_82542_GPRC E1000_GPRC +#define E1000_82542_BPRC E1000_BPRC +#define E1000_82542_MPRC E1000_MPRC +#define E1000_82542_GPTC E1000_GPTC +#define E1000_82542_GORCL E1000_GORCL +#define E1000_82542_GORCH E1000_GORCH +#define E1000_82542_GOTCL E1000_GOTCL +#define E1000_82542_GOTCH E1000_GOTCH +#define E1000_82542_RNBC E1000_RNBC +#define E1000_82542_RUC E1000_RUC +#define E1000_82542_RFC E1000_RFC +#define E1000_82542_ROC E1000_ROC +#define E1000_82542_RJC E1000_RJC +#define E1000_82542_MGTPRC E1000_MGTPRC +#define E1000_82542_MGTPDC E1000_MGTPDC +#define E1000_82542_MGTPTC E1000_MGTPTC +#define E1000_82542_TORL E1000_TORL +#define E1000_82542_TORH E1000_TORH +#define E1000_82542_TOTL E1000_TOTL +#define E1000_82542_TOTH E1000_TOTH +#define E1000_82542_TPR E1000_TPR +#define E1000_82542_TPT E1000_TPT +#define E1000_82542_PTC64 E1000_PTC64 +#define E1000_82542_PTC127 E1000_PTC127 +#define E1000_82542_PTC255 E1000_PTC255 +#define E1000_82542_PTC511 E1000_PTC511 +#define E1000_82542_PTC1023 E1000_PTC1023 +#define E1000_82542_PTC1522 E1000_PTC1522 +#define E1000_82542_MPTC E1000_MPTC +#define E1000_82542_BPTC E1000_BPTC +#define E1000_82542_TSCTC E1000_TSCTC +#define E1000_82542_TSCTFC E1000_TSCTFC +#define E1000_82542_RXCSUM E1000_RXCSUM +#define E1000_82542_WUC E1000_WUC +#define E1000_82542_WUFC E1000_WUFC +#define E1000_82542_WUS E1000_WUS +#define E1000_82542_MANC E1000_MANC +#define E1000_82542_IPAV E1000_IPAV +#define E1000_82542_IP4AT E1000_IP4AT +#define E1000_82542_IP6AT E1000_IP6AT +#define E1000_82542_WUPL E1000_WUPL +#define E1000_82542_WUPM E1000_WUPM +#define E1000_82542_FFLT E1000_FFLT +#define E1000_82542_TDFH 0x08010 +#define E1000_82542_TDFT 0x08018 +#define E1000_82542_FFMT E1000_FFMT +#define E1000_82542_FFVT E1000_FFVT +#define E1000_82542_HOST_IF E1000_HOST_IF +#define E1000_82542_IAM E1000_IAM +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_PSRCTL E1000_PSRCTL +#define E1000_82542_RAID E1000_RAID +#define E1000_82542_TARC0 E1000_TARC0 +#define E1000_82542_TDBAL1 E1000_TDBAL1 +#define E1000_82542_TDBAH1 E1000_TDBAH1 +#define E1000_82542_TDLEN1 E1000_TDLEN1 +#define E1000_82542_TDH1 E1000_TDH1 +#define E1000_82542_TDT1 E1000_TDT1 +#define E1000_82542_TXDCTL1 E1000_TXDCTL1 +#define E1000_82542_TARC1 E1000_TARC1 +#define E1000_82542_RFCTL E1000_RFCTL +#define E1000_82542_GCR E1000_GCR +#define E1000_82542_GSCL_1 E1000_GSCL_1 +#define E1000_82542_GSCL_2 E1000_GSCL_2 +#define E1000_82542_GSCL_3 E1000_GSCL_3 +#define E1000_82542_GSCL_4 E1000_GSCL_4 +#define E1000_82542_FACTPS E1000_FACTPS +#define E1000_82542_SWSM E1000_SWSM +#define E1000_82542_FWSM E1000_FWSM +#define E1000_82542_FFLT_DBG E1000_FFLT_DBG +#define E1000_82542_IAC E1000_IAC +#define E1000_82542_ICRXPTC E1000_ICRXPTC +#define E1000_82542_ICRXATC E1000_ICRXATC +#define E1000_82542_ICTXPTC E1000_ICTXPTC +#define E1000_82542_ICTXATC E1000_ICTXATC +#define E1000_82542_ICTXQEC E1000_ICTXQEC +#define E1000_82542_ICTXQMTC E1000_ICTXQMTC +#define E1000_82542_ICRXDMTC E1000_ICRXDMTC +#define E1000_82542_ICRXOC E1000_ICRXOC +#define E1000_82542_HICR E1000_HICR + +#define E1000_82542_CPUVEC E1000_CPUVEC +#define E1000_82542_MRQC E1000_MRQC +#define E1000_82542_RETA E1000_RETA +#define E1000_82542_RSSRK E1000_RSSRK +#define E1000_82542_RSSIM E1000_RSSIM +#define E1000_82542_RSSIR E1000_RSSIR +#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA +#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 txerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorcl; + u64 gorch; + u64 gotcl; + u64 gotch; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rlerrc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 torl; + u64 torh; + u64 totl; + u64 toth; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +/* Structure containing variables used by the shared code (e1000_hw.c) */ +struct e1000_hw { + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + void __iomem *ce4100_gbe_mdio_base_virt; + e1000_mac_type mac_type; + e1000_phy_type phy_type; + u32 phy_init_script; + e1000_media_type media_type; + void *back; + struct e1000_shadow_ram *eeprom_shadow_ram; + u32 flash_bank_size; + u32 flash_base_addr; + e1000_fc_type fc; + e1000_bus_speed bus_speed; + e1000_bus_width bus_width; + e1000_bus_type bus_type; + struct e1000_eeprom_info eeprom; + e1000_ms_type master_slave; + e1000_ms_type original_master_slave; + e1000_ffe_config ffe_config_state; + u32 asf_firmware_present; + u32 eeprom_semaphore_present; + unsigned long io_base; + u32 phy_id; + u32 phy_revision; + u32 phy_addr; + u32 original_fc; + u32 txcw; + u32 autoneg_failed; + u32 max_frame_size; + u32 min_frame_size; + u32 mc_filter_type; + u32 num_mc_addrs; + u32 collision_delta; + u32 tx_packet_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + bool tx_pkt_filtering; + struct e1000_host_mng_dhcp_cookie mng_cookie; + u16 phy_spd_default; + u16 autoneg_advertised; + u16 pci_cmd_word; + u16 fc_high_water; + u16 fc_low_water; + u16 fc_pause_time; + u16 current_ifs_val; + u16 ifs_min_val; + u16 ifs_max_val; + u16 ifs_step_size; + u16 ifs_ratio; + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 autoneg; + u8 mdix; + u8 forced_speed_duplex; + u8 wait_autoneg_complete; + u8 dma_fairness; + u8 mac_addr[NODE_ADDRESS_SIZE]; + u8 perm_mac_addr[NODE_ADDRESS_SIZE]; + bool disable_polarity_correction; + bool speed_downgraded; + e1000_smart_speed smart_speed; + e1000_dsp_config dsp_config_state; + bool get_link_status; + bool serdes_has_link; + bool tbi_compatibility_en; + bool tbi_compatibility_on; + bool laa_is_present; + bool phy_reset_disable; + bool initialize_hw_bits_disable; + bool fc_send_xon; + bool fc_strict_ieee; + bool report_tx_early; + bool adaptive_ifs; + bool ifs_params_forced; + bool in_ifs_mode; + bool mng_reg_access_disabled; + bool leave_av_bit_off; + bool bad_tx_carr_stats_fd; + bool has_smbus; +}; + +#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ +#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ +#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ +#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ +#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */ +/* Register Bit Masks */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ +#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ +#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion + by EEPROM/Flash */ +#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ +#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ +#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ +#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ +#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ +#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ +#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ +#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */ +#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ +#define E1000_STATUS_FUSE_8 0x04000000 +#define E1000_STATUS_FUSE_9 0x08000000 +#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ +#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ + +/* Constants used to interpret the masked PCI-X bus speed. */ +#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */ +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */ + +/* EEPROM/Flash Control */ +#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */ +#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */ +#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */ +#define E1000_EECD_FWE_MASK 0x00000030 +#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define E1000_EECD_FWE_SHIFT 4 +#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */ +#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */ +#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type + * (0-small, 1-large) */ +#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */ +#ifndef E1000_EEPROM_GRANT_ATTEMPTS +#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif +#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SECVAL_SHIFT 22 +#define E1000_STM_OPCODE 0xDB00 +#define E1000_HICR_FW_RESET 0xC0 + +#define E1000_SHADOW_RAM_WORDS 2048 +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC0 + +/* EEPROM Read */ +#define E1000_EERD_START 0x00000001 /* Start Read */ +#define E1000_EERD_DONE 0x00000010 /* Read Done */ +#define E1000_EERD_ADDR_SHIFT 8 +#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */ +#define E1000_EERD_DATA_SHIFT 16 +#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */ + +/* SPI EEPROM Status Register */ +#define EEPROM_STATUS_RDY_SPI 0x01 +#define EEPROM_STATUS_WEN_SPI 0x02 +#define EEPROM_STATUS_BP0_SPI 0x04 +#define EEPROM_STATUS_BP1_SPI 0x08 +#define EEPROM_STATUS_WPEN_SPI 0x80 + +/* Extended Device Control */ +#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ +#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ +#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN +#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ +#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */ +#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */ +#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */ +#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ +#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ +#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */ +#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 +#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 +#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 +#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 +#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ +#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */ +#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 + +#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000 +#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000 +#define INTEL_CE_GBE_MDIC_GO 0x80000000 +#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000 + +#define E1000_KUMCTRLSTA_MASK 0x0000FFFF +#define E1000_KUMCTRLSTA_OFFSET 0x001F0000 +#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KUMCTRLSTA_REN 0x00200000 + +#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000 +#define E1000_KUMCTRLSTA_OFFSET_CTRL 0x00000001 +#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002 +#define E1000_KUMCTRLSTA_OFFSET_DIAG 0x00000003 +#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004 +#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009 +#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010 +#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001E +#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001F + +/* FIFO Control */ +#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x00000008 +#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 + +/* In-Band Control */ +#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500 +#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 + +/* Half-Duplex Control */ +#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 +#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 + +#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E + +#define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000 +#define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000 + +#define E1000_KUMCTRLSTA_K0S_100_EN 0x2000 +#define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000 +#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +#define E1000_PHY_CTRL_SPD_EN 0x00000001 +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 +#define E1000_PHY_CTRL_B2B_EN 0x00000080 + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 +#define E1000_LEDCTL_LED1_MODE_SHIFT 8 +#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000 +#define E1000_LEDCTL_LED1_IVRT 0x00004000 +#define E1000_LEDCTL_LED1_BLINK 0x00008000 +#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 +#define E1000_LEDCTL_LED2_MODE_SHIFT 16 +#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 +#define E1000_LEDCTL_LED2_IVRT 0x00400000 +#define E1000_LEDCTL_LED2_BLINK 0x00800000 +#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 +#define E1000_LEDCTL_LED3_MODE_SHIFT 24 +#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 +#define E1000_LEDCTL_LED3_IVRT 0x40000000 +#define E1000_LEDCTL_LED3_BLINK 0x80000000 + +#define E1000_LEDCTL_MODE_LINK_10_1000 0x0 +#define E1000_LEDCTL_MODE_LINK_100_1000 0x1 +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_ACTIVITY 0x3 +#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4 +#define E1000_LEDCTL_MODE_LINK_10 0x5 +#define E1000_LEDCTL_MODE_LINK_100 0x6 +#define E1000_LEDCTL_MODE_LINK_1000 0x7 +#define E1000_LEDCTL_MODE_PCIX_MODE 0x8 +#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9 +#define E1000_LEDCTL_MODE_COLLISION 0xA +#define E1000_LEDCTL_MODE_BUS_SPEED 0xB +#define E1000_LEDCTL_MODE_BUS_SIZE 0xC +#define E1000_LEDCTL_MODE_PAUSED 0xD +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Receive Address */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ +#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_SRPD 0x00010000 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ +#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ +#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ +#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ +#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */ + +/* Interrupt Cause Set */ +#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_ICS_SRPD E1000_ICR_SRPD +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICS_DSW E1000_ICR_DSW +#define E1000_ICS_PHYINT E1000_ICR_PHYINT +#define E1000_ICS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_SRPD E1000_ICR_SRPD +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMS_DSW E1000_ICR_DSW +#define E1000_IMS_PHYINT E1000_ICR_PHYINT +#define E1000_IMS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Clear */ +#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMC_SRPD E1000_ICR_SRPD +#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMC_DSW E1000_ICR_DSW +#define E1000_IMC_PHYINT E1000_ICR_PHYINT +#define E1000_IMC_EPRST E1000_ICR_EPRST + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ +#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ +#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SW_W_SYNC definitions */ +#define E1000_SWFW_EEP_SM 0x0001 +#define E1000_SWFW_PHY0_SM 0x0002 +#define E1000_SWFW_PHY1_SM 0x0004 +#define E1000_SWFW_MAC_CSR_SM 0x0008 + +/* Receive Descriptor */ +#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ +#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ +#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */ +#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */ +#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */ + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Header split receive */ +#define E1000_RFCTL_ISCSI_DIS 0x00000001 +#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define E1000_RFCTL_ISCSI_DWC_SHIFT 1 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_NFS_VER_MASK 0x00000300 +#define E1000_RFCTL_NFS_VER_SHIFT 8 +#define E1000_RFCTL_IPV6_DIS 0x00000400 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_ACKD_DIS 0x00002000 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Receive Descriptor Control */ +#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ +#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ +#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */ +#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. + still to be processed. */ +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ +#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ +#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ +#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_CC 0x10000000 /* Receive config change */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ +#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ + +/* Transmit Control */ +#define E1000_TCTL_RST 0x00000001 /* software reset */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ +#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ +/* Extended Transmit Control */ +#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */ +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ + +/* Receive Checksum Control */ +#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Multiple Receive Queue Control */ +#define E1000_MRQC_ENABLE_MASK 0x00000003 +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */ +#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ +#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */ +#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */ +#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */ +#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */ +#define E1000_WUS_BC 0x00000010 /* Broadcast Received */ +#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */ +#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */ +#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */ +#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */ +#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */ +#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */ +#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */ +#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ +#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ +#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ +#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ +#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ +#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery + * Filtering */ +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ +#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ +#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address + * filtering */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host + * memory */ +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address + * filtering */ +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ +#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ +#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ +#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ +#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ +#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ +#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ + +#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ +#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +/* FW Semaphore Register */ +#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */ +#define E1000_FWSM_MODE_SHIFT 1 +#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ + +#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */ +#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */ +#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */ +#define E1000_FWSM_SKUEL_SHIFT 29 +#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */ +#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */ +#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */ +#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */ + +/* FFLT Debug Register */ +#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ + +typedef enum { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_interface_only +} e1000_mng_mode; + +/* Host Interface Control Register */ +#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */ +#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done + * to put command in RAM */ +#define E1000_HICR_SV 0x00000004 /* Status Validity */ +#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */ + +/* Host Interface Command Interface - Address range 0x8800-0x8EFF */ +#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */ +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ + +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; /* I/F bits for command, status for return */ + u8 checksum; +}; +struct e1000_host_command_info { + struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ +}; + +/* Host SMB register #0 */ +#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */ +#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */ +#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */ +#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */ + +/* Host SMB register #1 */ +#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN +#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN +#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT +#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT + +/* FW Status Register */ +#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */ + +/* Wake Up Packet Length */ +#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ + +#define E1000_MDALIGN 4096 + +/* PCI-Ex registers*/ + +/* PCI-Ex Control Register */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +/* Function Active and Power State to MNG */ +#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 +#define E1000_FACTPS_LAN0_VALID 0x00000004 +#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008 +#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0 +#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6 +#define E1000_FACTPS_LAN1_VALID 0x00000100 +#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200 +#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000 +#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12 +#define E1000_FACTPS_IDE_ENABLE 0x00004000 +#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000 +#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000 +#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18 +#define E1000_FACTPS_SP_ENABLE 0x00100000 +#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000 +#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000 +#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24 +#define E1000_FACTPS_IPMI_ENABLE 0x04000000 +#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000 +#define E1000_FACTPS_MNGCG 0x20000000 +#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000 +#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000 + +/* PCI-Ex Config Space */ +#define PCI_EX_LINK_STATUS 0x12 +#define PCI_EX_LINK_WIDTH_MASK 0x3F0 +#define PCI_EX_LINK_WIDTH_SHIFT 4 + +/* EEPROM Commands - Microwire */ +#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ +#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */ +#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */ +#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erase/write disable */ + +/* EEPROM Commands - SPI */ +#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */ +#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */ +#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */ +#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */ +#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Size definitions */ +#define EEPROM_WORD_SIZE_SHIFT 6 +#define EEPROM_SIZE_SHIFT 10 +#define EEPROM_SIZE_MASK 0x1C00 + +/* EEPROM Word Offsets */ +#define EEPROM_COMPAT 0x0003 +#define EEPROM_ID_LED_SETTINGS 0x0004 +#define EEPROM_VERSION 0x0005 +#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ +#define EEPROM_PHY_CLASS_WORD 0x0007 +#define EEPROM_INIT_CONTROL1_REG 0x000A +#define EEPROM_INIT_CONTROL2_REG 0x000F +#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010 +#define EEPROM_INIT_CONTROL3_PORT_B 0x0014 +#define EEPROM_INIT_3GIO_3 0x001A +#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define EEPROM_INIT_CONTROL3_PORT_A 0x0024 +#define EEPROM_CFG 0x0012 +#define EEPROM_FLASH_VERSION 0x0032 +#define EEPROM_CHECKSUM_REG 0x003F + +#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ +#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */ +#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F + +/* Mask bit for PHY class in Word 7 of the EEPROM */ +#define EEPROM_PHY_CLASS_A 0x8000 + +/* Mask bits for fields in Word 0x0a of the EEPROM */ +#define EEPROM_WORD0A_ILOS 0x0010 +#define EEPROM_WORD0A_SWDPIO 0x01E0 +#define EEPROM_WORD0A_LRST 0x0200 +#define EEPROM_WORD0A_FD 0x0400 +#define EEPROM_WORD0A_66MHZ 0x0800 + +/* Mask bits for fields in Word 0x0f of the EEPROM */ +#define EEPROM_WORD0F_PAUSE_MASK 0x3000 +#define EEPROM_WORD0F_PAUSE 0x1000 +#define EEPROM_WORD0F_ASM_DIR 0x2000 +#define EEPROM_WORD0F_ANE 0x0800 +#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 +#define EEPROM_WORD0F_LPLU 0x0001 + +/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */ +#define EEPROM_WORD1020_GIGA_DISABLE 0x0010 +#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008 + +/* Mask bits for fields in Word 0x1a of the EEPROM */ +#define EEPROM_WORD1A_ASPM_MASK 0x000C + +/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ +#define EEPROM_SUM 0xBABA + +/* EEPROM Map defines (WORD OFFSETS)*/ +#define EEPROM_NODE_ADDRESS_BYTE_0 0 +#define EEPROM_PBA_BYTE_1 8 + +#define EEPROM_RESERVED_WORD 0xFFFF + +/* EEPROM Map Sizes (Byte Counts) */ +#define PBA_SIZE 4 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +/* Collision distance is a 0-based value that applies to + * half-duplex-capable hardware only. */ +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLLISION_DISTANCE_82542 64 +#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_COLD_SHIFT 12 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82542_TIPG_IPGT 10 +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF +#define E1000_TIPG_IPGR1_MASK 0x000FFC00 +#define E1000_TIPG_IPGR2_MASK 0x3FF00000 + +#define DEFAULT_82542_TIPG_IPGR1 2 +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82542_TIPG_IPGR2 10 +#define DEFAULT_82543_TIPG_IPGR2 6 +#define E1000_TIPG_IPGR2_SHIFT 20 + +#define E1000_TXDMAC_DPP 0x00000001 + +/* Adaptive IFS defines */ +#define TX_THRESHOLD_START 8 +#define TX_THRESHOLD_INCREMENT 10 +#define TX_THRESHOLD_DECREMENT 1 +#define TX_THRESHOLD_STOP 190 +#define TX_THRESHOLD_DISABLE 0 +#define TX_THRESHOLD_TIMER_MS 10000 +#define MIN_NUM_XMITS 1000 +#define IFS_MAX 80 +#define IFS_STEP 10 +#define IFS_MIN 40 +#define IFS_RATIO 4 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002 +#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004 +#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008 +#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000 + +#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF +#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ +#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ +#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ + +#define E1000_PBS_16K E1000_PBA_16K + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* The historical defaults for the flow control values are given below. */ +#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */ +#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */ +#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */ + +/* PCIX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 + +/* Number of bits required to shift right the "pause" bits from the + * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register. + */ +#define PAUSE_SHIFT 5 + +/* Number of bits required to shift left the "SWDPIO" bits from the + * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register. + */ +#define SWDPIO_SHIFT 17 + +/* Number of bits required to shift left the "SWDPIO_EXT" bits from the + * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register. + */ +#define SWDPIO__EXT_SHIFT 4 + +/* Number of bits required to shift left the "ILOS" bit from the EEPROM + * (bit 4) to the "ILOS" (bit 7) field in the CTRL register. + */ +#define ILOS_SHIFT 3 + +#define RECEIVE_BUFFER_ALIGN_SIZE (256) + +/* Number of milliseconds we wait for auto-negotiation to complete */ +#define LINK_UP_TIMEOUT 500 + +/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ +#define AUTO_READ_DONE_TIMEOUT 10 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 + +#define E1000_TX_BUFFER_SIZE ((u32)1514) + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +/* TBI_ACCEPT macro definition: + * + * This macro requires: + * adapter = a pointer to struct e1000_hw + * status = the 8 bit status field of the RX descriptor with EOP set + * error = the 8 bit error field of the RX descriptor with EOP set + * length = the sum of all the length fields of the RX descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * max_frame_length = the maximum frame length we want to accept. + * min_frame_length = the minimum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = true; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = false; + * } + * ... + */ + +#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \ + ((adapter)->tbi_compatibility_on && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \ + ((length) <= ((adapter)->max_frame_size + 1))) : \ + (((length) > (adapter)->min_frame_size) && \ + ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1))))) + +/* Structures, enums, and macros for the PHY */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ +#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0 +#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0 +#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 +#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 +#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 +#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 +#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR +#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CTRL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */ + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ +#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ + +#define IGP01E1000_IEEE_REGS_PAGE 0x0000 +#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300 +#define IGP01E1000_IEEE_FORCE_GIGA 0x0140 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ +#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ +#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ + +/* IGP01E1000 AGC Registers - stores the cable length values*/ +#define IGP01E1000_PHY_AGC_A 0x1172 +#define IGP01E1000_PHY_AGC_B 0x1272 +#define IGP01E1000_PHY_AGC_C 0x1472 +#define IGP01E1000_PHY_AGC_D 0x1872 + +/* IGP02E1000 AGC Registers for cable length values */ +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +/* IGP01E1000 DSP Reset Register */ +#define IGP01E1000_PHY_DSP_RESET 0x1F33 +#define IGP01E1000_PHY_DSP_SET 0x1F71 +#define IGP01E1000_PHY_DSP_FFE 0x1F35 + +#define IGP01E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_CHANNEL_NUM 4 + +#define IGP01E1000_PHY_AGC_PARAM_A 0x1171 +#define IGP01E1000_PHY_AGC_PARAM_B 0x1271 +#define IGP01E1000_PHY_AGC_PARAM_C 0x1471 +#define IGP01E1000_PHY_AGC_PARAM_D 0x1871 + +#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000 +#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000 + +#define IGP01E1000_PHY_ANALOG_TX_STATE 0x2890 +#define IGP01E1000_PHY_ANALOG_CLASS_A 0x2000 +#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE 0x0004 +#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069 + +#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A +/* IGP01E1000 PCS Initialization register - stores the polarity status when + * speed = 1000 Mbps. */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_PCS_CTRL_REG 0x00B5 + +#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */ + +/* Next Page TX Register */ +#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* Link Partner Next Page Register */ +#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */ +#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ + /* 0=DTE device */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ +#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13 +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 + +/* Extended Status Register */ +#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ +#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ +#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ +#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ + +#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */ +#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */ + +#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */ + /* (0=enable, 1=disable) */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, + * 0=CLK125 toggling + */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, + * 100BASE-TX/10BASE-T: + * MDI Mode + */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080 + /* 1=Enable Extended 10BASE-T distance + * (Lower 10BASE-T RX Threshold) + * 0=Normal 10BASE-T RX Threshold */ +#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 + /* 1=5-Bit interface in 100BASE-TX + * 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5 +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; + * 3=110-140M;4=>140M */ +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ +#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_REV_POLARITY_SHIFT 1 +#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5 +#define M88E1000_PSSR_MDIX_SHIFT 6 +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ +#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 +#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 + +/* IGP01E1000 Specific Port Config Register - R/W */ +#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010 +#define IGP01E1000_PSCFR_PRE_EN 0x0020 +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 +#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK 0x0100 +#define IGP01E1000_PSCFR_DISABLE_JABBER 0x0400 +#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000 + +/* IGP01E1000 Specific Port Status Register - R/O */ +#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */ +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C +#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200 +#define IGP01E1000_PSSR_LINK_UP 0x0400 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */ +#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000 +#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */ +#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */ + +/* IGP01E1000 Specific Port Control Register - R/W */ +#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010 +#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR 0x0200 +#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400 +#define IGP01E1000_PSCR_FLIP_CHIP 0x0800 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */ + +/* IGP01E1000 Specific Port Link Health Register */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000 +#define IGP01E1000_PLHR_MASTER_FAULT 0x2000 +#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000 +#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */ +#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_0 0x0100 +#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040 +#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010 +#define IGP01E1000_PLHR_VALID_CHANNEL_D 0x0008 +#define IGP01E1000_PLHR_VALID_CHANNEL_C 0x0004 +#define IGP01E1000_PLHR_VALID_CHANNEL_B 0x0002 +#define IGP01E1000_PLHR_VALID_CHANNEL_A 0x0001 + +/* IGP01E1000 Channel Quality Register */ +#define IGP01E1000_MSE_CHANNEL_D 0x000F +#define IGP01E1000_MSE_CHANNEL_C 0x00F0 +#define IGP01E1000_MSE_CHANNEL_B 0x0F00 +#define IGP01E1000_MSE_CHANNEL_A 0xF000 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */ + +/* IGP01E1000 DSP reset macros */ +#define DSP_RESET_ENABLE 0x0 +#define DSP_RESET_DISABLE 0x2 +#define E1000_MAX_DSP_RESETS 10 + +/* IGP01E1000 & IGP02E1000 AGC Registers */ + +#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */ + +/* IGP02E1000 AGC Register Length 9-bit mask */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F + +/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ +#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 +#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113 + +/* The precision error of the cable length is +/- 10 meters */ +#define IGP01E1000_AGC_RANGE 10 +#define IGP02E1000_AGC_RANGE 15 + +/* IGP01E1000 PCS Initialization register */ +/* bits 3:6 in the PCS registers stores the channels polarity */ +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +/* IGP01E1000 GMII FIFO Register */ +#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed + * on Link-Up */ +#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */ + +/* IGP01E1000 Analog Register */ +#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1 +#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0 +#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC +#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE + +#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000 +#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80 +#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070 +#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100 +#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002 + +#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040 +#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010 +#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 +#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88_VENDOR 0x0141 +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID +#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID +#define M88E1011_I_REV_4 0x04 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1118_E_PHY_ID 0x01410E40 +#define L1LXT971A_PHY_ID 0x001378E0 + +#define RTL8211B_PHY_ID 0x001CC910 +#define RTL8201N_PHY_ID 0x8200 +#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */ +#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */ + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) \ + (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) + +#define IGP3_PHY_PORT_CTRL \ + PHY_REG(769, 17) /* Port General Configuration */ +#define IGP3_PHY_RATE_ADAPT_CTRL \ + PHY_REG(769, 25) /* Rate Adapter Control Register */ + +#define IGP3_KMRN_FIFO_CTRL_STATS \ + PHY_REG(770, 16) /* KMRN FIFO's control/status register */ +#define IGP3_KMRN_POWER_MNG_CTRL \ + PHY_REG(770, 17) /* KMRN Power Management Control Register */ +#define IGP3_KMRN_INBAND_CTRL \ + PHY_REG(770, 18) /* KMRN Inband Control Register */ +#define IGP3_KMRN_DIAG \ + PHY_REG(770, 19) /* KMRN Diagnostic register */ +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */ +#define IGP3_KMRN_ACK_TIMEOUT \ + PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */ + +#define IGP3_VR_CTRL \ + PHY_REG(776, 18) /* Voltage regulator control register */ +#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */ +#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */ + +#define IGP3_CAPABILITY \ + PHY_REG(776, 19) /* IGP3 Capability Register */ + +/* Capabilities for SKU Control */ +#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */ +#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */ +#define IGP3_CAP_ASF 0x0004 /* Support ASF */ +#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */ +#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */ +#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */ +#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */ +#define IGP3_CAP_RSS 0x0080 /* Support RSS */ +#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */ +#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */ + +#define IGP3_PPC_JORDAN_EN 0x0001 +#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002 + +#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001 +#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E +#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020 +#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040 + +#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */ +#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */ + +#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18) +#define IGP3_KMRN_EC_DIS_INBAND 0x0080 + +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */ +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */ +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */ +#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */ +#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnect Counter */ +#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */ +#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */ +#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */ +#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */ +#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */ +#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */ +#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */ + +#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Default 1 = Disable auto reduced power down */ +#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */ +#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */ +#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */ +#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */ +#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */ +#define IFE_PESC_POLARITY_REVERSED_SHIFT 8 + +#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dynamic Power Down disabled */ +#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */ +#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */ +#define IFE_PSC_FORCE_POLARITY_SHIFT 5 +#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4 + +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */ +#define IFE_PMC_MDIX_MODE_SHIFT 6 +#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */ + +#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */ +#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */ +#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */ +#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */ +#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */ +#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */ +#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */ +#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */ +#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */ +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */ +#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */ +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */ +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_64K 65536 + +#define ICH_CYCLE_READ 0x0 +#define ICH_CYCLE_RESERVED 0x1 +#define ICH_CYCLE_WRITE 0x2 +#define ICH_CYCLE_ERASE 0x3 + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 +#define ICH_FLASH_FRACC 0x0050 +#define ICH_FLASH_FREG0 0x0054 +#define ICH_FLASH_FREG1 0x0058 +#define ICH_FLASH_FREG2 0x005C +#define ICH_FLASH_FREG3 0x0060 +#define ICH_FLASH_FPR0 0x0074 +#define ICH_FLASH_FPR1 0x0078 +#define ICH_FLASH_SSFSTS 0x0090 +#define ICH_FLASH_SSFCTL 0x0092 +#define ICH_FLASH_PREOP 0x0094 +#define ICH_FLASH_OPTYPE 0x0096 +#define ICH_FLASH_OPMENU 0x0098 + +#define ICH_FLASH_REG_MAPSIZE 0x00A0 +#define ICH_FLASH_SECTOR_SIZE 4096 +#define ICH_GFPREG_BASE_MASK 0x1FFF +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF + +/* Miscellaneous PHY bit definitions. */ +#define PHY_PREAMBLE 0xFFFFFFFF +#define PHY_SOF 0x01 +#define PHY_OP_READ 0x02 +#define PHY_OP_WRITE 0x01 +#define PHY_TURNAROUND 0x02 +#define PHY_PREAMBLE_SIZE 32 +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 +#define E1000_PHY_ADDRESS 0x01 +#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ +#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */ +#define REG4_SPEED_MASK 0x01E0 +#define REG9_SPEED_MASK 0x0300 +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 +#define ADVERTISE_1000_FULL 0x0020 +#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */ +#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ +#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ + +#endif /* _E1000_HW_H_ */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c new file mode 100644 index 000000000..983eb4e6f --- /dev/null +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -0,0 +1,5322 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000.h" +#include +#include +#include +#include +#include + +char e1000_driver_name[] = "e1000"; +static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; +#define DRV_VERSION "7.3.21-k8-NAPI" +const char e1000_driver_version[] = DRV_VERSION; +static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; + +/* e1000_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * Macro expands to... + * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + */ +static const struct pci_device_id e1000_pci_tbl[] = { + INTEL_E1000_ETHERNET_DEVICE(0x1000), + INTEL_E1000_ETHERNET_DEVICE(0x1001), + INTEL_E1000_ETHERNET_DEVICE(0x1004), + INTEL_E1000_ETHERNET_DEVICE(0x1008), + INTEL_E1000_ETHERNET_DEVICE(0x1009), + INTEL_E1000_ETHERNET_DEVICE(0x100C), + INTEL_E1000_ETHERNET_DEVICE(0x100D), + INTEL_E1000_ETHERNET_DEVICE(0x100E), + INTEL_E1000_ETHERNET_DEVICE(0x100F), + INTEL_E1000_ETHERNET_DEVICE(0x1010), + INTEL_E1000_ETHERNET_DEVICE(0x1011), + INTEL_E1000_ETHERNET_DEVICE(0x1012), + INTEL_E1000_ETHERNET_DEVICE(0x1013), + INTEL_E1000_ETHERNET_DEVICE(0x1014), + INTEL_E1000_ETHERNET_DEVICE(0x1015), + INTEL_E1000_ETHERNET_DEVICE(0x1016), + INTEL_E1000_ETHERNET_DEVICE(0x1017), + INTEL_E1000_ETHERNET_DEVICE(0x1018), + INTEL_E1000_ETHERNET_DEVICE(0x1019), + INTEL_E1000_ETHERNET_DEVICE(0x101A), + INTEL_E1000_ETHERNET_DEVICE(0x101D), + INTEL_E1000_ETHERNET_DEVICE(0x101E), + INTEL_E1000_ETHERNET_DEVICE(0x1026), + INTEL_E1000_ETHERNET_DEVICE(0x1027), + INTEL_E1000_ETHERNET_DEVICE(0x1028), + INTEL_E1000_ETHERNET_DEVICE(0x1075), + INTEL_E1000_ETHERNET_DEVICE(0x1076), + INTEL_E1000_ETHERNET_DEVICE(0x1077), + INTEL_E1000_ETHERNET_DEVICE(0x1078), + INTEL_E1000_ETHERNET_DEVICE(0x1079), + INTEL_E1000_ETHERNET_DEVICE(0x107A), + INTEL_E1000_ETHERNET_DEVICE(0x107B), + INTEL_E1000_ETHERNET_DEVICE(0x107C), + INTEL_E1000_ETHERNET_DEVICE(0x108A), + INTEL_E1000_ETHERNET_DEVICE(0x1099), + INTEL_E1000_ETHERNET_DEVICE(0x10B5), + INTEL_E1000_ETHERNET_DEVICE(0x2E6E), + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr); +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr); +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +void e1000_update_stats(struct e1000_adapter *adapter); + +static int e1000_init_module(void); +static void e1000_exit_module(void); +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void e1000_remove(struct pci_dev *pdev); +static int e1000_alloc_queues(struct e1000_adapter *adapter); +static int e1000_sw_init(struct e1000_adapter *adapter); +static int e1000_open(struct net_device *netdev); +static int e1000_close(struct net_device *netdev); +static void e1000_configure_tx(struct e1000_adapter *adapter); +static void e1000_configure_rx(struct e1000_adapter *adapter); +static void e1000_setup_rctl(struct e1000_adapter *adapter); +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +static void e1000_set_rx_mode(struct net_device *netdev); +static void e1000_update_phy_info_task(struct work_struct *work); +static void e1000_watchdog(struct work_struct *work); +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev); +static struct net_device_stats * e1000_get_stats(struct net_device *netdev); +static int e1000_change_mtu(struct net_device *netdev, int new_mtu); +static int e1000_set_mac(struct net_device *netdev, void *p); +static irqreturn_t e1000_intr(int irq, void *data); +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static int e1000_clean(struct napi_struct *napi, int budget); +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ +} +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd); +static void e1000_enter_82542_rst(struct e1000_adapter *adapter); +static void e1000_leave_82542_rst(struct e1000_adapter *adapter); +static void e1000_tx_timeout(struct net_device *dev); +static void e1000_reset_task(struct work_struct *work); +static void e1000_smartspeed(struct e1000_adapter *adapter); +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb); + +static bool e1000_vlan_used(struct e1000_adapter *adapter); +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features); +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on); +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static void e1000_restore_vlan(struct e1000_adapter *adapter); + +#ifdef CONFIG_PM +static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); +static int e1000_resume(struct pci_dev *pdev); +#endif +static void e1000_shutdown(struct pci_dev *pdev); + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* for netdump / net console */ +static void e1000_netpoll (struct net_device *netdev); +#endif + +#define COPYBREAK_DEFAULT 256 +static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state); +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); +static void e1000_io_resume(struct pci_dev *pdev); + +static const struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; + +static struct pci_driver e1000_driver = { + .name = e1000_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = e1000_remove, +#ifdef CONFIG_PM + /* Power Management Hooks */ + .suspend = e1000_suspend, + .resume = e1000_resume, +#endif + .shutdown = e1000_shutdown, + .err_handler = &e1000_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +/** + * e1000_get_hw_dev - return device + * used by hardware layer to print debugging information + * + **/ +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init e1000_init_module(void) +{ + int ret; + pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version); + + pr_info("%s\n", e1000_copyright); + + ret = pci_register_driver(&e1000_driver); + if (copybreak != COPYBREAK_DEFAULT) { + if (copybreak == 0) + pr_info("copybreak disabled\n"); + else + pr_info("copybreak enabled for " + "packets <= %u bytes\n", copybreak); + } + return ret; +} + +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} + +module_exit(e1000_exit_module); + +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + irq_handler_t handler = e1000_intr; + int irq_flags = IRQF_SHARED; + int err; + + err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, + netdev); + if (err) { + e_err(probe, "Unable to allocate interrupt Error: %d\n", err); + } + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + synchronize_irq(adapter->pdev->irq); +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMS, IMS_ENABLE_MASK); + E1000_WRITE_FLUSH(); +} + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u16 vid = hw->mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (!e1000_vlan_used(adapter)) + return; + + if (!test_bit(vid, adapter->active_vlans)) { + if (hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { + e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && + (vid != old_vid) && + !test_bit(old_vid, adapter->active_vlans)) + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + old_vid); + } else { + adapter->mng_vlan_id = vid; + } +} + +static void e1000_init_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + ew32(MANC, manc); + } +} + +static void e1000_release_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* re-enable hardware interception of ARP */ + manc |= E1000_MANC_ARP_EN; + + ew32(MANC, manc); + } +} + +/** + * e1000_configure - configure the hardware for RX and TX + * @adapter = private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + e1000_set_rx_mode(netdev); + + e1000_restore_vlan(adapter); + e1000_init_manageability(adapter); + + e1000_configure_tx(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + /* call E1000_DESC_UNUSED which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct e1000_rx_ring *ring = &adapter->rx_ring[i]; + adapter->alloc_rx_buf(adapter, ring, + E1000_DESC_UNUSED(ring)); + } +} + +int e1000_up(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->flags); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_wake_queue(adapter->netdev); + + /* fire a link change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + return 0; +} + +/** + * e1000_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000_reset *** + **/ +void e1000_power_up_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 mii_reg = 0; + + /* Just clear the power down bit to wake the phy back up */ + if (hw->media_type == e1000_media_type_copper) { + /* according to the manual, the phy will retain its + * settings across a power-down/up cycle + */ + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + } +} + +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Power down the PHY so no link is implied when interface is down * + * The PHY cannot be powered down if any of the following is true * + * (a) WoL is enabled + * (b) AMT is active + * (c) SoL/IDER session is active + */ + if (!adapter->wol && hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + u16 mii_reg = 0; + + switch (hw->mac_type) { + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (er32(MANC) & E1000_MANC_SMBUS_EN) + goto out; + break; + default: + goto out; + } + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + msleep(1); + } +out: + return; +} + +static void e1000_down_and_stop(struct e1000_adapter *adapter) +{ + set_bit(__E1000_DOWN, &adapter->flags); + + cancel_delayed_work_sync(&adapter->watchdog_task); + + /* + * Since the watchdog task can reschedule other tasks, we should cancel + * it first, otherwise we can run into the situation when a work is + * still running after the adapter has been turned down. + */ + + cancel_delayed_work_sync(&adapter->phy_info_task); + cancel_delayed_work_sync(&adapter->fifo_stall_task); + + /* Only kill reset task if adapter is not resetting */ + if (!test_bit(__E1000_RESETTING, &adapter->flags)) + cancel_work_sync(&adapter->reset_task); +} + +void e1000_down(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl, tctl; + + netif_carrier_off(netdev); + + /* disable receives in the hardware */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + netif_tx_disable(netdev); + + /* disable transmits in the hardware */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + /* flush both disables and wait for them to finish */ + E1000_WRITE_FLUSH(); + msleep(10); + + napi_disable(&adapter->napi); + + e1000_irq_disable(adapter); + + /* Setting DOWN must be after irq_disable to prevent + * a screaming interrupt. Setting DOWN also prevents + * tasks from rescheduling. + */ + e1000_down_and_stop(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + e1000_reset(adapter); + e1000_clean_all_tx_rings(adapter); + e1000_clean_all_rx_rings(adapter); +} + +void e1000_reinit_locked(struct e1000_adapter *adapter) +{ + WARN_ON(in_interrupt()); + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + e1000_down(adapter); + e1000_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->flags); +} + +void e1000_reset(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pba = 0, tx_space, min_tx_space, min_rx_space; + bool legacy_pba_adjust = false; + u16 hwm; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + case e1000_82540: + case e1000_82541: + case e1000_82541_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_48K; + break; + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + pba = E1000_PBA_48K; + break; + case e1000_82547: + case e1000_82547_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_30K; + break; + case e1000_undefined: + case e1000_num_macs: + break; + } + + if (legacy_pba_adjust) { + if (hw->max_frame_size > E1000_RXBUFFER_8192) + pba -= 8; /* allocate more FIFO for Tx */ + + if (hw->mac_type == e1000_82547) { + adapter->tx_fifo_head = 0; + adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; + adapter->tx_fifo_size = + (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; + atomic_set(&adapter->tx_fifo_stall, 0); + } + } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { + /* adjust PBA for jumbo frames */ + ew32(PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + pba = er32(PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* the Tx fifo also stores 16 bytes of information about the Tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (hw->max_frame_size + + sizeof(struct e1000_tx_desc) - + ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = hw->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation + */ + if (tx_space < min_tx_space && + ((min_tx_space - tx_space) < pba)) { + pba = pba - (min_tx_space - tx_space); + + /* PCI/PCIx hardware has PBA alignment constraints */ + switch (hw->mac_type) { + case e1000_82545 ... e1000_82546_rev_3: + pba &= ~(E1000_PBA_8K - 1); + break; + default: + break; + } + + /* if short on Rx space, Rx wins and must trump Tx + * adjustment or use Early Receive if available + */ + if (pba < min_rx_space) + pba = min_rx_space; + } + } + + ew32(PBA, pba); + + /* flow control settings: + * The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus the early receive size (for parts + * with ERT support assuming ERT set to E1000_ERT_2048), or + * - the full Rx FIFO size minus one full frame + */ + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - hw->max_frame_size)); + + hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ + hw->fc_low_water = hw->fc_high_water - 8; + hw->fc_pause_time = E1000_FC_PAUSE_TIME; + hw->fc_send_xon = 1; + hw->fc = hw->original_fc; + + /* Allow time for pending master requests to run */ + e1000_reset_hw(hw); + if (hw->mac_type >= e1000_82544) + ew32(WUC, 0); + + if (e1000_init_hw(hw)) + e_dev_err("Hardware Error\n"); + e1000_update_mng_vlan(adapter); + + /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ + if (hw->mac_type >= e1000_82544 && + hw->autoneg == 1 && + hw->autoneg_advertised == ADVERTISE_1000_FULL) { + u32 ctrl = er32(CTRL); + /* clear phy power management bit if we are in gig only mode, + * which if enabled will attempt negotiation to 100Mb, which + * can cause a loss of link at power off or driver unload + */ + ctrl &= ~E1000_CTRL_SWDPIN3; + ew32(CTRL, ctrl); + } + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + ew32(VET, ETHERNET_IEEE_VLAN_TYPE); + + e1000_reset_adaptive(hw); + e1000_phy_get_info(hw, &adapter->phy_info); + + e1000_release_manageability(adapter); +} + +/* Dump the eeprom for users having checksum issues */ +static void e1000_dump_eeprom(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_eeprom eeprom; + const struct ethtool_ops *ops = netdev->ethtool_ops; + u8 *data; + int i; + u16 csum_old, csum_new = 0; + + eeprom.len = ops->get_eeprom_len(netdev); + eeprom.offset = 0; + + data = kmalloc(eeprom.len, GFP_KERNEL); + if (!data) + return; + + ops->get_eeprom(netdev, &eeprom, data); + + csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + + (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); + for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) + csum_new += data[i] + (data[i + 1] << 8); + csum_new = EEPROM_SUM - csum_new; + + pr_err("/*********************/\n"); + pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); + pr_err("Calculated : 0x%04x\n", csum_new); + + pr_err("Offset Values\n"); + pr_err("======== ======\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); + + pr_err("Include this output when contacting your support provider.\n"); + pr_err("This is not a software error! Something bad happened to\n"); + pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); + pr_err("result in further problems, possibly loss of data,\n"); + pr_err("corruption or system hangs!\n"); + pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); + pr_err("which is invalid and requires you to set the proper MAC\n"); + pr_err("address manually before continuing to enable this network\n"); + pr_err("device. Please inspect the EEPROM dump and report the\n"); + pr_err("issue to your hardware vendor or Intel Customer Support.\n"); + pr_err("/*********************/\n"); + + kfree(data); +} + +/** + * e1000_is_need_ioport - determine if an adapter needs ioport resources or not + * @pdev: PCI device information struct + * + * Return true if an adapter needs ioport resources + **/ +static int e1000_is_need_ioport(struct pci_dev *pdev) +{ + switch (pdev->device) { + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541ER_LOM: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + return true; + default: + return false; + } +} + +static netdev_features_t e1000_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int e1000_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + e1000_vlan_mode(netdev, features); + + if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + adapter->rx_csum = !!(features & NETIF_F_RXCSUM); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + + return 0; +} + +static const struct net_device_ops e1000_netdev_ops = { + .ndo_open = e1000_open, + .ndo_stop = e1000_close, + .ndo_start_xmit = e1000_xmit_frame, + .ndo_get_stats = e1000_get_stats, + .ndo_set_rx_mode = e1000_set_rx_mode, + .ndo_set_mac_address = e1000_set_mac, + .ndo_tx_timeout = e1000_tx_timeout, + .ndo_change_mtu = e1000_change_mtu, + .ndo_do_ioctl = e1000_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e1000_netpoll, +#endif + .ndo_fix_features = e1000_fix_features, + .ndo_set_features = e1000_set_features, +}; + +/** + * e1000_init_hw_struct - initialize members of hw struct + * @adapter: board private struct + * @hw: structure used by e1000_hw.c + * + * Factors out initialization of the e1000_hw struct to its own function + * that can be called very early at init (just after struct allocation). + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + * Returns negative error codes if MAC type setup fails. + */ +static int e1000_init_hw_struct(struct e1000_adapter *adapter, + struct e1000_hw *hw) +{ + struct pci_dev *pdev = adapter->pdev; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + hw->revision_id = pdev->revision; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); + + hw->max_frame_size = adapter->netdev->mtu + + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; + hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; + + /* identify the MAC */ + if (e1000_set_mac_type(hw)) { + e_err(probe, "Unknown MAC Type\n"); + return -EIO; + } + + switch (hw->mac_type) { + default: + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->phy_init_script = 1; + break; + } + + e1000_set_media_type(hw); + e1000_get_bus_info(hw); + + hw->wait_autoneg_complete = false; + hw->tbi_compatibility_en = true; + hw->adaptive_ifs = true; + + /* Copper options */ + + if (hw->media_type == e1000_media_type_copper) { + hw->mdix = AUTO_ALL_MODES; + hw->disable_polarity_correction = false; + hw->master_slave = E1000_MASTER_SLAVE; + } + + return 0; +} + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct e1000_adapter *adapter; + struct e1000_hw *hw; + + static int cards_found = 0; + static int global_quad_port_a = 0; /* global ksp3 port a indication */ + int i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 tmp = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + int bars, need_ioport; + + /* do not allocate ioport bars when not needed */ + need_ioport = e1000_is_need_ioport(pdev); + if (need_ioport) { + bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); + err = pci_enable_device(pdev); + } else { + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_enable_device_mem(pdev); + } + if (err) + return err; + + err = pci_request_selected_regions(pdev, bars, e1000_driver_name); + if (err) + goto err_pci_reg; + + pci_set_master(pdev); + err = pci_save_state(pdev); + if (err) + goto err_alloc_etherdev; + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct e1000_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + adapter->bars = bars; + adapter->need_ioport = need_ioport; + + hw = &adapter->hw; + hw->back = adapter; + + err = -EIO; + hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); + if (!hw->hw_addr) + goto err_ioremap; + + if (adapter->need_ioport) { + for (i = BAR_1; i <= BAR_5; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + hw->io_base = pci_resource_start(pdev, i); + break; + } + } + } + + /* make ready for any if (hw->...) below */ + err = e1000_init_hw_struct(adapter, hw); + if (err) + goto err_sw_init; + + /* there is a workaround being applied below that limits + * 64-bit DMA addresses to 64-bit hardware. There are some + * 32-bit adapters that Tx hang when given 64-bit DMA addresses + */ + pci_using_dac = 0; + if ((hw->bus_type == e1000_bus_type_pcix) && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + pr_err("No usable DMA config, aborting\n"); + goto err_dma; + } + } + + netdev->netdev_ops = &e1000_netdev_ops; + e1000_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; + + /* setup the private structure */ + + err = e1000_sw_init(adapter); + if (err) + goto err_sw_init; + + err = -EIO; + if (hw->mac_type == e1000_ce4100) { + hw->ce4100_gbe_mdio_base_virt = + ioremap(pci_resource_start(pdev, BAR_1), + pci_resource_len(pdev, BAR_1)); + + if (!hw->ce4100_gbe_mdio_base_virt) + goto err_mdio_ioremap; + } + + if (hw->mac_type >= e1000_82543) { + netdev->hw_features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_RX; + netdev->features = NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER; + } + + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_82547)) + netdev->hw_features |= NETIF_F_TSO; + + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->features |= netdev->hw_features; + netdev->hw_features |= (NETIF_F_RXCSUM | + NETIF_F_RXALL | + NETIF_F_RXFCS); + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + netdev->vlan_features |= (NETIF_F_TSO | + NETIF_F_HW_CSUM | + NETIF_F_SG); + + /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ + if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || + hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) + netdev->priv_flags |= IFF_UNICAST_FLT; + + adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); + + /* initialize eeprom parameters */ + if (e1000_init_eeprom_params(hw)) { + e_err(probe, "EEPROM initialization failed\n"); + goto err_eeprom; + } + + /* before reading the EEPROM, reset the controller to + * put the device in a known good starting state + */ + + e1000_reset_hw(hw); + + /* make sure the EEPROM is good */ + if (e1000_validate_eeprom_checksum(hw) < 0) { + e_err(probe, "The EEPROM Checksum Is Not Valid\n"); + e1000_dump_eeprom(adapter); + /* set MAC address to all zeroes to invalidate and temporary + * disable this device for the user. This blocks regular + * traffic while still permitting ethtool ioctls from reaching + * the hardware as well as allowing the user to run the + * interface after manually setting a hw addr using + * `ip set address` + */ + memset(hw->mac_addr, 0, netdev->addr_len); + } else { + /* copy the MAC address out of the EEPROM */ + if (e1000_read_mac_addr(hw)) + e_err(probe, "EEPROM Read Error\n"); + } + /* don't block initialization here due to bad MAC address */ + memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) + e_err(probe, "Invalid MAC Address\n"); + + + INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); + INIT_DELAYED_WORK(&adapter->fifo_stall_task, + e1000_82547_tx_fifo_stall_task); + INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); + INIT_WORK(&adapter->reset_task, e1000_reset_task); + + e1000_check_options(adapter); + + /* Initial Wake on LAN setting + * If APM wake is enabled in the EEPROM, + * enable the ACPI Magic Packet filter + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + break; + case e1000_82544: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); + eeprom_apme_mask = E1000_EEPROM_82544_APM; + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1){ + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + break; + } + /* Fall Through */ + default: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + break; + } + if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port + */ + switch (pdev->device) { + case E1000_DEV_ID_82546GB_PCIE: + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->eeprom_wol = 0; + else + adapter->quad_port_a = true; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; + } + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* Auto detect PHY address */ + if (hw->mac_type == e1000_ce4100) { + for (i = 0; i < 32; i++) { + hw->phy_addr = i; + e1000_read_phy_reg(hw, PHY_ID2, &tmp); + if (tmp == 0 || tmp == 0xFF) { + if (i == 31) + goto err_eeprom; + continue; + } else + break; + } + } + + /* reset the hardware with the new settings */ + e1000_reset(adapter); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + e1000_vlan_filter_on_off(adapter, false); + + /* print bus type/speed/width info */ + e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", + ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), + ((hw->bus_speed == e1000_bus_speed_133) ? 133 : + (hw->bus_speed == e1000_bus_speed_120) ? 120 : + (hw->bus_speed == e1000_bus_speed_100) ? 100 : + (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), + ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), + netdev->dev_addr); + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); + + cards_found++; + return 0; + +err_register: +err_eeprom: + e1000_phy_hw_reset(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_dma: +err_sw_init: +err_mdio_ioremap: + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, bars); +err_pci_reg: + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void e1000_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + e1000_down_and_stop(adapter); + e1000_release_manageability(adapter); + + unregister_netdev(netdev); + + e1000_phy_hw_reset(hw); + + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + if (hw->mac_type == e1000_ce4100) + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_selected_regions(pdev, adapter->bars); + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * e1000_init_hw_struct MUST be called before this function + **/ +static int e1000_sw_init(struct e1000_adapter *adapter) +{ + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + adapter->num_tx_queues = 1; + adapter->num_rx_queues = 1; + + if (e1000_alloc_queues(adapter)) { + e_err(probe, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + e1000_irq_disable(adapter); + + spin_lock_init(&adapter->stats_lock); + + set_bit(__E1000_DOWN, &adapter->flags); + + return 0; +} + +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. + **/ +static int e1000_alloc_queues(struct e1000_adapter *adapter) +{ + adapter->tx_ring = kcalloc(adapter->num_tx_queues, + sizeof(struct e1000_tx_ring), GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + + adapter->rx_ring = kcalloc(adapter->num_rx_queues, + sizeof(struct e1000_rx_ring), GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog task is started, + * and the stack is notified that the interface is ready. + **/ +static int e1000_open(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->flags)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + e1000_power_up_phy(adapter); + + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { + e1000_update_mng_vlan(adapter); + } + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + e1000_configure(adapter); + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* From here on the code is the same as e1000_up() */ + clear_bit(__E1000_DOWN, &adapter->flags); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_start_queue(netdev); + + /* fire a link status change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + + return E1000_SUCCESS; + +err_req_irq: + e1000_power_down_phy(adapter); + e1000_free_all_rx_resources(adapter); +err_setup_rx: + e1000_free_all_tx_resources(adapter); +err_setup_tx: + e1000_reset(adapter); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int e1000_close(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + e1000_down(adapter); + e1000_power_down_phy(adapter); + e1000_free_irq(adapter); + + e1000_free_all_tx_resources(adapter); + e1000_free_all_rx_resources(adapter); + + /* kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) + */ + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); + } + + return 0; +} + +/** + * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary + * @adapter: address of board private structure + * @start: address of beginning of memory + * @len: length of memory + **/ +static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, + unsigned long len) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned long begin = (unsigned long)start; + unsigned long end = begin + len; + + /* First rev 82545 and 82546 need to not allow any memory + * write location to cross 64k boundary due to errata 23 + */ + if (hw->mac_type == e1000_82545 || + hw->mac_type == e1000_ce4100 || + hw->mac_type == e1000_82546) { + return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; + } + + return true; +} + +/** + * e1000_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * @txdr: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct e1000_tx_buffer) * txdr->count; + txdr->buffer_info = vzalloc(size); + if (!txdr->buffer_info) + return -ENOMEM; + + /* round up to nearest 4K */ + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { +setup_tx_desc_die: + vfree(txdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + void *olddesc = txdr->desc; + dma_addr_t olddma = txdr->dma; + e_err(tx_err, "txdr align check failed: %u bytes at %p\n", + txdr->size, txdr->desc); + /* Try again, without freeing the previous */ + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, + &txdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + goto setup_tx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory " + "for the transmit descriptor ring\n"); + vfree(txdr->buffer_info); + return -ENOMEM; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + } + } + memset(txdr->desc, 0, txdr->size); + + txdr->next_to_use = 0; + txdr->next_to_clean = 0; + + return 0; +} + +/** + * e1000_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); + if (err) { + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_tx_resources(adapter, + &adapter->tx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_configure_tx - Configure 8254x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + u64 tdba; + struct e1000_hw *hw = &adapter->hw; + u32 tdlen, tctl, tipg; + u32 ipgr1, ipgr2; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + + switch (adapter->num_tx_queues) { + case 1: + default: + tdba = adapter->tx_ring[0].dma; + tdlen = adapter->tx_ring[0].count * + sizeof(struct e1000_tx_desc); + ew32(TDLEN, tdlen); + ew32(TDBAH, (tdba >> 32)); + ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); + ew32(TDT, 0); + ew32(TDH, 0); + adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? + E1000_TDH : E1000_82542_TDH); + adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? + E1000_TDT : E1000_82542_TDT); + break; + } + + /* Set the default values for the Tx Inter Packet Gap timer */ + if ((hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes)) + tipg = DEFAULT_82543_TIPG_IPGT_FIBER; + else + tipg = DEFAULT_82543_TIPG_IPGT_COPPER; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + tipg = DEFAULT_82542_TIPG_IPGT; + ipgr1 = DEFAULT_82542_TIPG_IPGR1; + ipgr2 = DEFAULT_82542_TIPG_IPGR2; + break; + default: + ipgr1 = DEFAULT_82543_TIPG_IPGR1; + ipgr2 = DEFAULT_82543_TIPG_IPGR2; + break; + } + tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; + ew32(TIPG, tipg); + + /* Set the Tx Interrupt Delay register */ + + ew32(TIDV, adapter->tx_int_delay); + if (hw->mac_type >= e1000_82540) + ew32(TADV, adapter->tx_abs_int_delay); + + /* Program the Transmit Control Register */ + + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + e1000_config_collision_dist(hw); + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + if (hw->mac_type < e1000_82543) + adapter->txd_cmd |= E1000_TXD_CMD_RPS; + else + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + /* Cache if we're 82544 running in PCI-X because we'll + * need this to apply a workaround later in the send path. + */ + if (hw->mac_type == e1000_82544 && + hw->bus_type == e1000_bus_type_pcix) + adapter->pcix_82544 = true; + + ew32(TCTL, tctl); + +} + +/** + * e1000_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * @rxdr: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size, desc_len; + + size = sizeof(struct e1000_rx_buffer) * rxdr->count; + rxdr->buffer_info = vzalloc(size); + if (!rxdr->buffer_info) + return -ENOMEM; + + desc_len = sizeof(struct e1000_rx_desc); + + /* Round up to nearest 4K */ + + rxdr->size = rxdr->count * desc_len; + rxdr->size = ALIGN(rxdr->size, 4096); + + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { +setup_rx_desc_die: + vfree(rxdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + void *olddesc = rxdr->desc; + dma_addr_t olddma = rxdr->dma; + e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", + rxdr->size, rxdr->desc); + /* Try again, without freeing the previous */ + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, + &rxdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + goto setup_rx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory for " + "the Rx descriptor ring\n"); + goto setup_rx_desc_die; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + } + } + memset(rxdr->desc, 0, rxdr->size); + + rxdr->next_to_clean = 0; + rxdr->next_to_use = 0; + rxdr->rx_skb_top = NULL; + + return 0; +} + +/** + * e1000_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); + if (err) { + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_rx_resources(adapter, + &adapter->rx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + rctl = er32(RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + + rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | + E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + + if (hw->tbi_compatibility_on == 1) + rctl |= E1000_RCTL_SBP; + else + rctl &= ~E1000_RCTL_SBP; + + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case E1000_RXBUFFER_2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case E1000_RXBUFFER_8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case E1000_RXBUFFER_16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode + */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + + ew32(RCTL, rctl); +} + +/** + * e1000_configure_rx - Configure 8254x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + u64 rdba; + struct e1000_hw *hw = &adapter->hw; + u32 rdlen, rctl, rxcsum; + + if (adapter->netdev->mtu > ETH_DATA_LEN) { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_jumbo_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; + } else { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + + /* set the Receive Delay Timer Register */ + ew32(RDTR, adapter->rx_int_delay); + + if (hw->mac_type >= e1000_82540) { + ew32(RADV, adapter->rx_abs_int_delay); + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + } + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + switch (adapter->num_rx_queues) { + case 1: + default: + rdba = adapter->rx_ring[0].dma; + ew32(RDLEN, rdlen); + ew32(RDBAH, (rdba >> 32)); + ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); + ew32(RDT, 0); + ew32(RDH, 0); + adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? + E1000_RDH : E1000_82542_RDH); + adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? + E1000_RDT : E1000_82542_RDT); + break; + } + + /* Enable 82543 Receive Checksum Offload for TCP and UDP */ + if (hw->mac_type >= e1000_82543) { + rxcsum = er32(RXCSUM); + if (adapter->rx_csum) + rxcsum |= E1000_RXCSUM_TUOFL; + else + /* don't need to clear IPPCSE as it defaults to 0 */ + rxcsum &= ~E1000_RXCSUM_TUOFL; + ew32(RXCSUM, rxcsum); + } + + /* Enable Receives */ + ew32(RCTL, rctl | E1000_RCTL_EN); +} + +/** + * e1000_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_tx_ring(adapter, tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * e1000_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +void e1000_free_all_tx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); +} + +static void +e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, + struct e1000_tx_buffer *buffer_info) +{ + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; + /* buffer_info must be completely set up in the transmit path */ +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + * @tx_ring: ring to be cleaned + **/ +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_buffer *buffer_info; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + netdev_reset_queue(adapter->netdev); + size = sizeof(struct e1000_tx_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->last_tx_tso = false; + + writel(0, hw->hw_addr + tx_ring->tdh); + writel(0, hw->hw_addr + tx_ring->tdt); +} + +/** + * e1000_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); +} + +/** + * e1000_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_rx_ring(adapter, rx_ring); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * e1000_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +void e1000_free_all_rx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); +} + +#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) +static unsigned int e1000_frag_len(const struct e1000_adapter *a) +{ + return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +static void *e1000_alloc_frag(const struct e1000_adapter *a) +{ + unsigned int len = e1000_frag_len(a); + u8 *data = netdev_alloc_frag(len); + + if (likely(data)) + data += E1000_HEADROOM; + return data; +} + +static void e1000_free_frag(const void *data) +{ + put_page(virt_to_head_page(data)); +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + * @rx_ring: ring to free buffers from + **/ +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_rx_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx netfrags */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (adapter->clean_rx == e1000_clean_rx_irq) { + if (buffer_info->dma) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.data) { + e1000_free_frag(buffer_info->rxbuf.data); + buffer_info->rxbuf.data = NULL; + } + } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) { + if (buffer_info->dma) + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.page) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + } + } + + buffer_info->dma = 0; + } + + /* there also may be some cached data from a chained receive */ + napi_free_frags(&adapter->napi); + rx_ring->rx_skb_top = NULL; + + size = sizeof(struct e1000_rx_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, hw->hw_addr + rx_ring->rdh); + writel(0, hw->hw_addr + rx_ring->rdt); +} + +/** + * e1000_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); +} + +/* The 82542 2.0 (revision 2) needs to have the receive unit in reset + * and memory write and invalidate disabled for certain operations + */ +static void e1000_enter_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + e1000_pci_clear_mwi(hw); + + rctl = er32(RCTL); + rctl |= E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (netif_running(netdev)) + e1000_clean_all_rx_rings(adapter); +} + +static void e1000_leave_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + + if (netif_running(netdev)) { + /* No need to loop, because 82542 supports only 1 queue */ + struct e1000_rx_ring *ring = &adapter->rx_ring[0]; + e1000_configure_rx(adapter); + adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); + } +} + +/** + * e1000_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int e1000_set_mac(struct net_device *netdev, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); + + e1000_rar_set(hw, hw->mac_addr, 0); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + return 0; +} + +/** + * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void e1000_set_rx_mode(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + bool use_uc = false; + u32 rctl; + u32 hash_value; + int i, rar_entries = E1000_RAR_ENTRIES; + int mta_reg_count = E1000_NUM_MTA_REGISTERS; + u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); + + if (!mcarray) + return; + + /* Check for Promiscuous and All Multicast modes */ + + rctl = er32(RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + rctl &= ~E1000_RCTL_VFE; + } else { + if (netdev->flags & IFF_ALLMULTI) + rctl |= E1000_RCTL_MPE; + else + rctl &= ~E1000_RCTL_MPE; + /* Enable VLAN filter if there is a VLAN */ + if (e1000_vlan_used(adapter)) + rctl |= E1000_RCTL_VFE; + } + + if (netdev_uc_count(netdev) > rar_entries - 1) { + rctl |= E1000_RCTL_UPE; + } else if (!(netdev->flags & IFF_PROMISC)) { + rctl &= ~E1000_RCTL_UPE; + use_uc = true; + } + + ew32(RCTL, rctl); + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + /* load the first 14 addresses into the exact filters 1-14. Unicast + * addresses take precedence to avoid disabling unicast filtering + * when possible. + * + * RAR 0 is used for the station MAC address + * if there are not 14 addresses, go ahead and clear the filters + */ + i = 1; + if (use_uc) + netdev_for_each_uc_addr(ha, netdev) { + if (i == rar_entries) + break; + e1000_rar_set(hw, ha->addr, i++); + } + + netdev_for_each_mc_addr(ha, netdev) { + if (i == rar_entries) { + /* load any remaining addresses into the hash table */ + u32 hash_reg, hash_bit, mta; + hash_value = e1000_hash_mc_addr(hw, ha->addr); + hash_reg = (hash_value >> 5) & 0x7F; + hash_bit = hash_value & 0x1F; + mta = (1 << hash_bit); + mcarray[hash_reg] |= mta; + } else { + e1000_rar_set(hw, ha->addr, i++); + } + } + + for (; i < rar_entries; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); + E1000_WRITE_FLUSH(); + } + + /* write the hash table completely, write from bottom to avoid + * both stupid write combining chipsets, and flushing each write + */ + for (i = mta_reg_count - 1; i >= 0 ; i--) { + /* If we are on an 82544 has an errata where writing odd + * offsets overwrites the previous even offset, but writing + * backwards over the range solves the issue by always + * writing the odd offset first + */ + E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); + } + E1000_WRITE_FLUSH(); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + kfree(mcarray); +} + +/** + * e1000_update_phy_info_task - get phy info + * @work: work struct contained inside adapter struct + * + * Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void e1000_update_phy_info_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + phy_info_task.work); + + e1000_phy_get_info(&adapter->hw, &adapter->phy_info); +} + +/** + * e1000_82547_tx_fifo_stall_task - task to complete work + * @work: work struct contained inside adapter struct + **/ +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + fifo_stall_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 tctl; + + if (atomic_read(&adapter->tx_fifo_stall)) { + if ((er32(TDT) == er32(TDH)) && + (er32(TDFT) == er32(TDFH)) && + (er32(TDFTS) == er32(TDFHS))) { + tctl = er32(TCTL); + ew32(TCTL, tctl & ~E1000_TCTL_EN); + ew32(TDFT, adapter->tx_head_addr); + ew32(TDFH, adapter->tx_head_addr); + ew32(TDFTS, adapter->tx_head_addr); + ew32(TDFHS, adapter->tx_head_addr); + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); + + adapter->tx_fifo_head = 0; + atomic_set(&adapter->tx_fifo_stall, 0); + netif_wake_queue(netdev); + } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { + schedule_delayed_work(&adapter->fifo_stall_task, 1); + } + } +} + +bool e1000_has_link(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or rx + * sequence error interrupt (except on intel ce4100). + * get_link_status will stay false until the + * e1000_check_for_link establishes link for copper adapters + * ONLY + */ + switch (hw->media_type) { + case e1000_media_type_copper: + if (hw->mac_type == e1000_ce4100) + hw->get_link_status = 1; + if (hw->get_link_status) { + e1000_check_for_link(hw); + link_active = !hw->get_link_status; + } else { + link_active = true; + } + break; + case e1000_media_type_fiber: + e1000_check_for_link(hw); + link_active = !!(er32(STATUS) & E1000_STATUS_LU); + break; + case e1000_media_type_internal_serdes: + e1000_check_for_link(hw); + link_active = hw->serdes_has_link; + break; + default: + break; + } + + return link_active; +} + +/** + * e1000_watchdog - work function + * @work: work struct contained inside adapter struct + **/ +static void e1000_watchdog(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + watchdog_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_ring *txdr = adapter->tx_ring; + u32 link, tctl; + + link = e1000_has_link(adapter); + if ((netif_carrier_ok(netdev)) && link) + goto link_up; + + if (link) { + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + bool txb2b = true; + /* update snapshot of PHY registers on LSC */ + e1000_get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = er32(CTRL); + pr_info("%s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & + E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & + E1000_CTRL_RFCE) ? "RX" : ((ctrl & + E1000_CTRL_TFCE) ? "TX" : "None"))); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + txb2b = false; + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + txb2b = false; + /* maybe add some timeout factor ? */ + break; + } + + /* enable transmits in the hardware */ + tctl = er32(TCTL); + tctl |= E1000_TCTL_EN; + ew32(TCTL, tctl); + + netif_carrier_on(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + adapter->smartspeed = 0; + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + pr_info("%s NIC Link is Down\n", + netdev->name); + netif_carrier_off(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + } + + e1000_smartspeed(adapter); + } + +link_up: + e1000_update_stats(adapter); + + hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + hw->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; + adapter->gorcl_old = adapter->stats.gorcl; + adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; + adapter->gotcl_old = adapter->stats.gotcl; + + e1000_update_adaptive(hw); + + if (!netif_carrier_ok(netdev)) { + if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* exit immediately since reset is imminent */ + return; + } + } + + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { + /* Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; + u32 dif = (adapter->gotcl > adapter->gorcl ? + adapter->gotcl - adapter->gorcl : + adapter->gorcl - adapter->gotcl) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + ew32(ITR, 1000000000 / (itr * 256)); + } + + /* Cause software interrupt to ensure rx ring is cleaned */ + ew32(ICS, E1000_ICS_RXDMT0); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = true; + + /* Reschedule the task */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see e1000_param.c) + **/ +static unsigned int e1000_update_itr(struct e1000_adapter *adapter, + u16 itr_setting, int packets, int bytes) +{ + unsigned int retval = itr_setting; + struct e1000_hw *hw = &adapter->hw; + + if (unlikely(hw->mac_type < e1000_82540)) + goto update_itr_done; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* jumbo frames get bulk treatment*/ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* jumbo frames need bulk latency setting */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes/packets > 2000) + retval = bulk_latency; + else if (packets <= 2 && bytes < 512) + retval = lowest_latency; + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 current_itr; + u32 new_itr = adapter->itr; + + if (unlikely(hw->mac_type < e1000_82540)) + return; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (unlikely(adapter->link_speed != SPEED_1000)) { + current_itr = 0; + new_itr = 4000; + goto set_itr_now; + } + + adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + +set_itr_now: + if (new_itr != adapter->itr) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->itr = new_itr; + ew32(ITR, 1000000000 / (new_itr * 256)); + } +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_NO_FCS 0x00000010 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +static int e1000_tso(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i; + u32 cmd_length = 0; + u16 ipcse = 0, tucse, mss; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + + if (skb_is_gso(skb)) { + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + if (protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb_transport_offset(skb) - 1; + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + ipcse = 0; + } + ipcss = skb_network_offset(skb); + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + tucse = 0; + + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->upper_setup.tcp_fields.tucss = tucss; + context_desc->upper_setup.tcp_fields.tucso = tucso; + context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; + context_desc->cmd_and_length = cpu_to_le32(cmd_length); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (++i == tx_ring->count) i = 0; + tx_ring->next_to_use = i; + + return true; + } + return false; +} + +static bool e1000_tx_csum(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i; + u8 css; + u32 cmd_len = E1000_TXD_CMD_DEXT; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + switch (protocol) { + case cpu_to_be16(ETH_P_IP): + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + case cpu_to_be16(ETH_P_IPV6): + /* XXX not handling all IPV6 headers */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + default: + if (unlikely(net_ratelimit())) + e_warn(drv, "checksum_partial proto=%x!\n", + skb->protocol); + break; + } + + css = skb_checksum_start_offset(skb); + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = + css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(cmd_len); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (unlikely(++i == tx_ring->count)) i = 0; + tx_ring->next_to_use = i; + + return true; +} + +#define E1000_MAX_TXD_PWR 12 +#define E1000_MAX_DATA_PER_TXD (1<hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_tx_buffer *buffer_info; + unsigned int len = skb_headlen(skb); + unsigned int offset = 0, size, count = 0, i; + unsigned int f, bytecount, segs; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for Controller erratum -- + * descriptor for non-tso packet in a linear SKB that follows a + * tso gets written back prematurely before the data is fully + * DMA'd to the controller + */ + if (!skb->data_len && tx_ring->last_tx_tso && + !skb_is_gso(skb)) { + tx_ring->last_tx_tso = false; + size -= 4; + } + + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && !nr_frags && size == len && size > 8)) + size -= 4; + /* work-around for errata 10 and it applies + * to all controllers in PCI-X mode + * The fix is to make sure that the first descriptor of a + * packet is smaller than 2048 - 16 - 16 (or 2016) bytes + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (size > 2015) && count == 0)) + size = 2015; + + /* Workaround for potential 82544 hang in PCI-X. Avoid + * terminating buffers within evenly-aligned dwords. + */ + if (unlikely(adapter->pcix_82544 && + !((unsigned long)(skb->data + offset + size - 1) & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + /* set time_stamp *before* dma to help avoid a possible race */ + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = false; + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + if (len) { + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + const struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + len = skb_frag_size(frag); + offset = 0; + + while (len) { + unsigned long bufend; + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && f == (nr_frags-1) && + size == len && size > 8)) + size -= 4; + /* Workaround for potential 82544 hang in PCI-X. + * Avoid terminating buffers within evenly-aligned + * dwords. + */ + bufend = (unsigned long) + page_to_phys(skb_frag_page(frag)); + bufend += offset + size - 1; + if (unlikely(adapter->pcix_82544 && + !(bufend & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = true; + buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, + offset, size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + } + } + + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].segs = segs; + tx_ring->buffer_info[i].bytecount = bytecount; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + dev_err(&pdev->dev, "TX DMA map failed\n"); + buffer_info->dma = 0; + if (count) + count--; + + while (count--) { + if (i==0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + return 0; +} + +static void e1000_tx_queue(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, int tx_flags, + int count) +{ + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_tx_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_TSE; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } + + if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + txd_lower &= ~(E1000_TXD_CMD_IFCS); + + i = tx_ring->next_to_use; + + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = + cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + if (unlikely(++i == tx_ring->count)) i = 0; + } + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + tx_ring->next_to_use = i; +} + +/* 82547 workaround to avoid controller hang in half-duplex environment. + * The workaround is to avoid queuing a large packet that would span + * the internal Tx FIFO ring boundary by notifying the stack to resend + * the packet at a later time. This gives the Tx FIFO an opportunity to + * flush all packets. When that occurs, we reset the Tx FIFO pointers + * to the beginning of the Tx FIFO. + */ + +#define E1000_FIFO_HDR 0x10 +#define E1000_82547_PAD_LEN 0x3E0 + +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; + u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; + + skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); + + if (adapter->link_duplex != HALF_DUPLEX) + goto no_fifo_stall_required; + + if (atomic_read(&adapter->tx_fifo_stall)) + return 1; + + if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { + atomic_set(&adapter->tx_fifo_stall, 1); + return 1; + } + +no_fifo_stall_required: + adapter->tx_fifo_head += skb_fifo_len; + if (adapter->tx_fifo_head >= adapter->tx_fifo_size) + adapter->tx_fifo_head -= adapter->tx_fifo_size; + return 0; +} + +static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + + netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(E1000_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(netdev); + ++adapter->restart_queue; + return 0; +} + +static int e1000_maybe_stop_tx(struct net_device *netdev, + struct e1000_tx_ring *tx_ring, int size) +{ + if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __e1000_maybe_stop_tx(netdev, size); +} + +#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *tx_ring; + unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; + unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; + unsigned int tx_flags = 0; + unsigned int len = skb_headlen(skb); + unsigned int nr_frags; + unsigned int mss; + int count = 0; + int tso; + unsigned int f; + __be16 protocol = vlan_get_protocol(skb); + + /* This goes back to the question of how to logically map a Tx queue + * to a flow. Right now, performance is impacted slightly negatively + * if using multiple Tx queues. If the stack breaks away from a + * single qdisc implementation, we can look at this again. + */ + tx_ring = adapter->tx_ring; + + /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, + * packets may get corrupted during padding by HW. + * To WA this issue, pad all small packets manually. + */ + if (eth_skb_pad(skb)) + return NETDEV_TX_OK; + + mss = skb_shinfo(skb)->gso_size; + /* The controller does a simple calculation to + * make sure there is enough room in the FIFO before + * initiating the DMA for each buffer. The calc is: + * 4 = ceil(buffer len/mss). To make sure we don't + * overrun the FIFO, adjust the max buffer len if mss + * drops. + */ + if (mss) { + u8 hdr_len; + max_per_txd = min(mss << 2, max_per_txd); + max_txd_pwr = fls(max_per_txd) - 1; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb->data_len && hdr_len == len) { + switch (hw->mac_type) { + unsigned int pull_size; + case e1000_82544: + /* Make sure we have room to chop off 4 bytes, + * and that the end alignment will work out to + * this hardware's requirements + * NOTE: this is a TSO only workaround + * if end byte alignment not correct move us + * into the next dword + */ + if ((unsigned long)(skb_tail_pointer(skb) - 1) + & 4) + break; + /* fall through */ + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + e_err(drv, "__pskb_pull_tail " + "failed.\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + len = skb_headlen(skb); + break; + default: + /* do nothing */ + break; + } + } + } + + /* reserve a descriptor for the offload context */ + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) + count++; + count++; + + /* Controller Erratum workaround */ + if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) + count++; + + count += TXD_USE_COUNT(len, max_txd_pwr); + + if (adapter->pcix_82544) + count++; + + /* work-around for errata 10 and it applies to all controllers + * in PCI-X mode, so add one more descriptor to the count + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (len > 2015))) + count++; + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), + max_txd_pwr); + if (adapter->pcix_82544) + count += nr_frags; + + /* need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time + */ + if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) + return NETDEV_TX_BUSY; + + if (unlikely((hw->mac_type == e1000_82547) && + (e1000_82547_fifo_workaround(adapter, skb)))) { + netif_stop_queue(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->fifo_stall_task, 1); + return NETDEV_TX_BUSY; + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= E1000_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << + E1000_TX_FLAGS_VLAN_SHIFT); + } + + first = tx_ring->next_to_use; + + tso = e1000_tso(adapter, tx_ring, skb, protocol); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (likely(tso)) { + if (likely(hw->mac_type != e1000_82544)) + tx_ring->last_tx_tso = true; + tx_flags |= E1000_TX_FLAGS_TSO; + } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) + tx_flags |= E1000_TX_FLAGS_CSUM; + + if (protocol == htons(ETH_P_IP)) + tx_flags |= E1000_TX_FLAGS_IPV4; + + if (unlikely(skb->no_fcs)) + tx_flags |= E1000_TX_FLAGS_NO_FCS; + + count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, + nr_frags, mss); + + if (count) { + netdev_sent_queue(netdev, skb->len); + skb_tx_timestamp(skb); + + e1000_tx_queue(adapter, tx_ring, tx_flags, count); + /* Make sure there is space in the ring for the next send. */ + e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); + + if (!skb->xmit_more || + netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { + writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); + /* we need this if more than one processor can write to + * our tail at a time, it synchronizes IO on IA64/Altix + * systems + */ + mmiowb(); + } + } else { + dev_kfree_skb_any(skb); + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +#define NUM_REGS 38 /* 1 based count */ +static void e1000_regdump(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 regs[NUM_REGS]; + u32 *regs_buff = regs; + int i = 0; + + static const char * const reg_name[] = { + "CTRL", "STATUS", + "RCTL", "RDLEN", "RDH", "RDT", "RDTR", + "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT", + "TIDV", "TXDCTL", "TADV", "TARC0", + "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1", + "TXDCTL1", "TARC1", + "CTRL_EXT", "ERT", "RDBAL", "RDBAH", + "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC", + "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC" + }; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDBAL); + regs_buff[9] = er32(TDBAH); + regs_buff[10] = er32(TDLEN); + regs_buff[11] = er32(TDH); + regs_buff[12] = er32(TDT); + regs_buff[13] = er32(TIDV); + regs_buff[14] = er32(TXDCTL); + regs_buff[15] = er32(TADV); + regs_buff[16] = er32(TARC0); + + regs_buff[17] = er32(TDBAL1); + regs_buff[18] = er32(TDBAH1); + regs_buff[19] = er32(TDLEN1); + regs_buff[20] = er32(TDH1); + regs_buff[21] = er32(TDT1); + regs_buff[22] = er32(TXDCTL1); + regs_buff[23] = er32(TARC1); + regs_buff[24] = er32(CTRL_EXT); + regs_buff[25] = er32(ERT); + regs_buff[26] = er32(RDBAL0); + regs_buff[27] = er32(RDBAH0); + regs_buff[28] = er32(TDFH); + regs_buff[29] = er32(TDFT); + regs_buff[30] = er32(TDFHS); + regs_buff[31] = er32(TDFTS); + regs_buff[32] = er32(TDFPC); + regs_buff[33] = er32(RDFH); + regs_buff[34] = er32(RDFT); + regs_buff[35] = er32(RDFHS); + regs_buff[36] = er32(RDFTS); + regs_buff[37] = er32(RDFPC); + + pr_info("Register dump\n"); + for (i = 0; i < NUM_REGS; i++) + pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]); +} + +/* + * e1000_dump: Print registers, tx ring and rx ring + */ +static void e1000_dump(struct e1000_adapter *adapter) +{ + /* this code doesn't handle multiple rings */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + + if (!netif_msg_hw(adapter)) + return; + + /* Print Registers */ + e1000_regdump(adapter); + + /* transmit dump */ + pr_info("TX Desc ring0 dump\n"); + + /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) + * + * Legacy Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] (Reserved on Write Back) | + * +--------------------------------------------------------------+ + * 8 | Special | CSS | Status | CMD | CSO | Length | + * +--------------------------------------------------------------+ + * 63 48 47 36 35 32 31 24 23 16 15 0 + * + * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload + * 63 48 47 40 39 32 31 16 15 8 7 0 + * +----------------------------------------------------------------+ + * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | + * +----------------------------------------------------------------+ + * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + * + * Extended Data Descriptor (DTYP=0x1) + * +----------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +----------------------------------------------------------------+ + * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + */ + pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n"); + pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n"); + + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); + struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)tx_desc; + const char *type; + + if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) + type = "NTC/U"; + else if (i == tx_ring->next_to_use) + type = "NTU"; + else if (i == tx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n", + ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i, + le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->length, + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, buffer_info->skb, type); + } + +rx_ring_summary: + /* receive dump */ + pr_info("\nRX Desc ring dump\n"); + + /* Legacy Receive Descriptor Format + * + * +-----------------------------------------------------+ + * | Buffer Address [63:0] | + * +-----------------------------------------------------+ + * | VLAN Tag | Errors | Status 0 | Packet csum | Length | + * +-----------------------------------------------------+ + * 63 48 47 40 39 32 31 16 15 0 + */ + pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n"); + + if (!netif_msg_rx_status(adapter)) + goto exit; + + for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); + struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)rx_desc; + const char *type; + + if (i == rx_ring->next_to_use) + type = "NTU"; + else if (i == rx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n", + i, le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->rxbuf.data, type); + } /* for */ + + /* dump the descriptor caches */ + /* rx */ + pr_info("Rx descriptor cache in 64bit format\n"); + for (i = 0x6000; i <= 0x63FF ; i += 0x10) { + pr_info("R%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } + /* tx */ + pr_info("Tx descriptor cache in 64bit format\n"); + for (i = 0x7000; i <= 0x73FF ; i += 0x10) { + pr_info("T%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } +exit: + return; +} + +/** + * e1000_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void e1000_tx_timeout(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = + container_of(work, struct e1000_adapter, reset_task); + + e_err(drv, "Reset adapter\n"); + e1000_reinit_locked(adapter); +} + +/** + * e1000_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the watchdog. + **/ +static struct net_device_stats *e1000_get_stats(struct net_device *netdev) +{ + /* only return the current stats */ + return &netdev->stats; +} + +/** + * e1000_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int e1000_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; + + if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || + (max_frame > MAX_JUMBO_FRAME_SIZE)) { + e_err(probe, "Invalid MTU setting\n"); + return -EINVAL; + } + + /* Adapter-specific max frame size limits. */ + switch (hw->mac_type) { + case e1000_undefined ... e1000_82542_rev2_1: + if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { + e_err(probe, "Jumbo Frames not supported.\n"); + return -EINVAL; + } + break; + default: + /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ + break; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + /* e1000_down has a dependency on max_frame_size */ + hw->max_frame_size = max_frame; + if (netif_running(netdev)) { + /* prevent buffers from being reallocated */ + adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; + e1000_down(adapter); + } + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * however with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ + + if (max_frame <= E1000_RXBUFFER_2048) + adapter->rx_buffer_len = E1000_RXBUFFER_2048; + else +#if (PAGE_SIZE >= E1000_RXBUFFER_16384) + adapter->rx_buffer_len = E1000_RXBUFFER_16384; +#elif (PAGE_SIZE >= E1000_RXBUFFER_4096) + adapter->rx_buffer_len = PAGE_SIZE; +#endif + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if (!hw->tbi_compatibility_on && + ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || + (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + pr_info("%s changing MTU from %d to %d\n", + netdev->name, netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + e1000_up(adapter); + else + e1000_reset(adapter); + + clear_bit(__E1000_RESETTING, &adapter->flags); + + return 0; +} + +/** + * e1000_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +void e1000_update_stats(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + unsigned long flags; + u16 phy_tmp; + +#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + spin_lock_irqsave(&adapter->stats_lock, flags); + + /* these counters are modified from e1000_tbi_adjust_stats, + * called from the interrupt context, so they must only + * be written while holding adapter->stats_lock + */ + + adapter->stats.crcerrs += er32(CRCERRS); + adapter->stats.gprc += er32(GPRC); + adapter->stats.gorcl += er32(GORCL); + adapter->stats.gorch += er32(GORCH); + adapter->stats.bprc += er32(BPRC); + adapter->stats.mprc += er32(MPRC); + adapter->stats.roc += er32(ROC); + + adapter->stats.prc64 += er32(PRC64); + adapter->stats.prc127 += er32(PRC127); + adapter->stats.prc255 += er32(PRC255); + adapter->stats.prc511 += er32(PRC511); + adapter->stats.prc1023 += er32(PRC1023); + adapter->stats.prc1522 += er32(PRC1522); + + adapter->stats.symerrs += er32(SYMERRS); + adapter->stats.mpc += er32(MPC); + adapter->stats.scc += er32(SCC); + adapter->stats.ecol += er32(ECOL); + adapter->stats.mcc += er32(MCC); + adapter->stats.latecol += er32(LATECOL); + adapter->stats.dc += er32(DC); + adapter->stats.sec += er32(SEC); + adapter->stats.rlec += er32(RLEC); + adapter->stats.xonrxc += er32(XONRXC); + adapter->stats.xontxc += er32(XONTXC); + adapter->stats.xoffrxc += er32(XOFFRXC); + adapter->stats.xofftxc += er32(XOFFTXC); + adapter->stats.fcruc += er32(FCRUC); + adapter->stats.gptc += er32(GPTC); + adapter->stats.gotcl += er32(GOTCL); + adapter->stats.gotch += er32(GOTCH); + adapter->stats.rnbc += er32(RNBC); + adapter->stats.ruc += er32(RUC); + adapter->stats.rfc += er32(RFC); + adapter->stats.rjc += er32(RJC); + adapter->stats.torl += er32(TORL); + adapter->stats.torh += er32(TORH); + adapter->stats.totl += er32(TOTL); + adapter->stats.toth += er32(TOTH); + adapter->stats.tpr += er32(TPR); + + adapter->stats.ptc64 += er32(PTC64); + adapter->stats.ptc127 += er32(PTC127); + adapter->stats.ptc255 += er32(PTC255); + adapter->stats.ptc511 += er32(PTC511); + adapter->stats.ptc1023 += er32(PTC1023); + adapter->stats.ptc1522 += er32(PTC1522); + + adapter->stats.mptc += er32(MPTC); + adapter->stats.bptc += er32(BPTC); + + /* used for adaptive IFS */ + + hw->tx_packet_delta = er32(TPT); + adapter->stats.tpt += hw->tx_packet_delta; + hw->collision_delta = er32(COLC); + adapter->stats.colc += hw->collision_delta; + + if (hw->mac_type >= e1000_82543) { + adapter->stats.algnerrc += er32(ALGNERRC); + adapter->stats.rxerrc += er32(RXERRC); + adapter->stats.tncrs += er32(TNCRS); + adapter->stats.cexterr += er32(CEXTERR); + adapter->stats.tsctc += er32(TSCTC); + adapter->stats.tsctfc += er32(TSCTFC); + } + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = adapter->stats.mprc; + netdev->stats.collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + netdev->stats.rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; + netdev->stats.rx_length_errors = adapter->stats.rlerrc; + netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_frame_errors = adapter->stats.algnerrc; + netdev->stats.rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; + netdev->stats.tx_errors = adapter->stats.txerrc; + netdev->stats.tx_aborted_errors = adapter->stats.ecol; + netdev->stats.tx_window_errors = adapter->stats.latecol; + netdev->stats.tx_carrier_errors = adapter->stats.tncrs; + if (hw->bad_tx_carr_stats_fd && + adapter->link_duplex == FULL_DUPLEX) { + netdev->stats.tx_carrier_errors = 0; + adapter->stats.tncrs = 0; + } + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Phy Stats */ + if (hw->media_type == e1000_media_type_copper) { + if ((adapter->link_speed == SPEED_1000) && + (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { + phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; + adapter->phy_stats.idle_errors += phy_tmp; + } + + if ((hw->mac_type <= e1000_82546) && + (hw->phy_type == e1000_phy_m88) && + !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) + adapter->phy_stats.receive_errors += phy_tmp; + } + + /* Management Stats */ + if (hw->has_smbus) { + adapter->stats.mgptc += er32(MGTPTC); + adapter->stats.mgprc += er32(MGTPRC); + adapter->stats.mgpdc += er32(MGTPDC); + } + + spin_unlock_irqrestore(&adapter->stats_lock, flags); +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + if (unlikely((!icr))) + return IRQ_NONE; /* Not our interrupt */ + + /* we might have caused the interrupt, but the above + * read cleared it, and just in case the driver is + * down there is nothing to do so return handled + */ + if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) + return IRQ_HANDLED; + + if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { + hw->get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 1); + } + + /* disable interrupts, without the synchronize_irq bit */ + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + + if (likely(napi_schedule_prep(&adapter->napi))) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } else { + /* this really should not happen! if it does it is basically a + * bug, but not a hard error, so enable ints and continue + */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + + return IRQ_HANDLED; +} + +/** + * e1000_clean - NAPI Rx polling callback + * @adapter: board private structure + **/ +static int e1000_clean(struct napi_struct *napi, int budget) +{ + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, + napi); + int tx_clean_complete = 0, work_done = 0; + + tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); + + adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); + + if (!tx_clean_complete) + work_done = budget; + + /* If budget not fully consumed, exit the polling mode */ + if (work_done < budget) { + if (likely(adapter->itr_setting & 3)) + e1000_set_itr(adapter); + napi_complete(napi); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + + return work_done; +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + **/ +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i, eop; + unsigned int count = 0; + unsigned int total_tx_bytes=0, total_tx_packets=0; + unsigned int bytes_compl = 0, pkts_compl = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + bool cleaned = false; + dma_rmb(); /* read buffer_info after eop_desc */ + for ( ; !cleaned; count++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (cleaned) { + total_tx_packets += buffer_info->segs; + total_tx_bytes += buffer_info->bytecount; + if (buffer_info->skb) { + bytes_compl += buffer_info->skb->len; + pkts_compl++; + } + + } + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + tx_desc->upper.data = 0; + + if (unlikely(++i == tx_ring->count)) i = 0; + } + + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + } + + tx_ring->next_to_clean = i; + + netdev_completed_queue(netdev, pkts_compl, bytes_compl); + +#define TX_WAKE_THRESHOLD 32 + if (unlikely(count && netif_carrier_ok(netdev) && + E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->flags))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + adapter->detect_tx_hung = false; + if (tx_ring->buffer_info[eop].time_stamp && + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(er32(STATUS) & E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%lu>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + (unsigned long)(tx_ring - adapter->tx_ring), + readl(hw->hw_addr + tx_ring->tdh), + readl(hw->hw_addr + tx_ring->tdt), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->upper.fields.status); + e1000_dump(adapter); + netif_stop_queue(netdev); + } + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + netdev->stats.tx_bytes += total_tx_bytes; + netdev->stats.tx_packets += total_tx_packets; + return count < tx_ring->count; +} + +/** + * e1000_rx_checksum - Receive Checksum Offload for 82543 + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @sk_buff: socket buffer with received data + **/ +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + u32 csum, struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + + skb_checksum_none_assert(skb); + + /* 82543 or newer only */ + if (unlikely(hw->mac_type < e1000_82543)) return; + /* Ignore Checksum bit is set */ + if (unlikely(status & E1000_RXD_STAT_IXSM)) return; + /* TCP/UDP checksum error bit is set */ + if (unlikely(errors & E1000_RXD_ERR_TCPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + /* TCP/UDP Checksum has not been calculated */ + if (!(status & E1000_RXD_STAT_TCPCS)) + return; + + /* It must be a TCP or UDP packet with a valid checksum */ + if (likely(status & E1000_RXD_STAT_TCPCS)) { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + adapter->hw_csum_good++; +} + +/** + * e1000_consume_page - helper function for jumbo Rx path + **/ +static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->rxbuf.page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += PAGE_SIZE; +} + +/** + * e1000_receive_skb - helper function to handle rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + */ +static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, + __le16 vlan, struct sk_buff *skb) +{ + skb->protocol = eth_type_trans(skb, adapter->netdev); + + if (status & E1000_RXD_STAT_VP) { + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + napi_gro_receive(&adapter->napi, skb); +} + +/** + * e1000_tbi_adjust_stats + * @hw: Struct containing variables accessed by shared code + * @frame_len: The length of the frame in question + * @mac_addr: The Ethernet destination address of the frame in question + * + * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT + */ +static void e1000_tbi_adjust_stats(struct e1000_hw *hw, + struct e1000_hw_stats *stats, + u32 frame_len, const u8 *mac_addr) +{ + u64 carry_bit; + + /* First adjust the frame length. */ + frame_len--; + /* We need to adjust the statistics counters, since the hardware + * counters overcount this packet as a CRC error and undercount + * the packet as a good packet + */ + /* This packet should not be counted as a CRC error. */ + stats->crcerrs--; + /* This packet does count as a Good Packet Received. */ + stats->gprc++; + + /* Adjust the Good Octets received counters */ + carry_bit = 0x80000000 & stats->gorcl; + stats->gorcl += frame_len; + /* If the high bit of Gorcl (the low 32 bits of the Good Octets + * Received Count) was one before the addition, + * AND it is zero after, then we lost the carry out, + * need to add one to Gorch (Good Octets Received Count High). + * This could be simplified if all environments supported + * 64-bit integers. + */ + if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) + stats->gorch++; + /* Is this a broadcast or multicast? Check broadcast first, + * since the test for a multicast frame will test positive on + * a broadcast frame. + */ + if (is_broadcast_ether_addr(mac_addr)) + stats->bprc++; + else if (is_multicast_ether_addr(mac_addr)) + stats->mprc++; + + if (frame_len == hw->max_frame_size) { + /* In this case, the hardware has overcounted the number of + * oversize frames. + */ + if (stats->roc > 0) + stats->roc--; + } + + /* Adjust the bin counters when the extra byte put the frame in the + * wrong bin. Remember that the frame_len was adjusted above. + */ + if (frame_len == 64) { + stats->prc64++; + stats->prc127--; + } else if (frame_len == 127) { + stats->prc127++; + stats->prc255--; + } else if (frame_len == 255) { + stats->prc255++; + stats->prc511--; + } else if (frame_len == 511) { + stats->prc511++; + stats->prc1023--; + } else if (frame_len == 1023) { + stats->prc1023++; + stats->prc1522--; + } else if (frame_len == 1522) { + stats->prc1522++; + } +} + +static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, + u8 status, u8 errors, + u32 length, const u8 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u8 last_byte = *(data + length - 1); + + if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { + unsigned long irq_flags; + + spin_lock_irqsave(&adapter->stats_lock, irq_flags); + e1000_tbi_adjust_stats(hw, &adapter->stats, length, data); + spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); + + return true; + } + + return false; +} + +static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, + unsigned int bufsz) +{ + struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz); + + if (unlikely(!skb)) + adapter->alloc_rx_buff_failed++; + return skb; +} + +/** + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + */ +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes=0, total_rx_packets=0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + + if (++i == rx_ring->count) i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* errors is only valid for DD + EOP descriptors */ + if (unlikely((status & E1000_RXD_STAT_EOP) && + (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { + u8 *mapped = page_address(buffer_info->rxbuf.page); + + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, mapped)) { + length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; + } else { + /* an error means any chain goes out the window + * too + */ + if (rx_ring->rx_skb_top) + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } + } + +#define rxtop rx_ring->rx_skb_top +process_skb: + if (!(status & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = napi_get_frags(&adapter->napi); + if (!rxtop) + break; + + skb_fill_page_desc(rxtop, 0, + buffer_info->rxbuf.page, + 0, length); + } else { + /* this is the middle of a chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->rxbuf.page, 0, length); + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->rxbuf.page, 0, length); + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + struct page *p; + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page + */ + p = buffer_info->rxbuf.page; + if (length <= copybreak) { + u8 *vaddr; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + length -= 4; + skb = e1000_alloc_rx_skb(adapter, + length); + if (!skb) + break; + + vaddr = kmap_atomic(p); + memcpy(skb_tail_pointer(skb), vaddr, + length); + kunmap_atomic(vaddr); + /* re-use the page, so don't erase + * buffer_info->rxbuf.page + */ + skb_put(skb, length); + e1000_rx_checksum(adapter, + status | rx_desc->errors << 24, + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += skb->len; + total_rx_packets++; + + e1000_receive_skb(adapter, status, + rx_desc->special, skb); + goto next_desc; + } else { + skb = napi_get_frags(&adapter->napi); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + skb_fill_page_desc(skb, 0, p, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload XXX recompute due to CRC strip? */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += (skb->len - 4); /* don't count FCS */ + if (likely(!(netdev->features & NETIF_F_RXFCS))) + pskb_trim(skb, skb->len - 4); + total_rx_packets++; + + if (status & E1000_RXD_STAT_VP) { + __le16 vlan = rx_desc->special; + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + napi_gro_frags(&adapter->napi); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/* this should improve performance for small packets with large amounts + * of reassembly being done in the stack + */ +static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter, + struct e1000_rx_buffer *buffer_info, + u32 length, const void *data) +{ + struct sk_buff *skb; + + if (length > copybreak) + return NULL; + + skb = e1000_alloc_rx_skb(adapter, length); + if (!skb) + return NULL; + + dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, + length, DMA_FROM_DEVICE); + + memcpy(skb_put(skb, length), data, length); + + return skb; +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + */ +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes=0, total_rx_packets=0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 *data; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + length = le16_to_cpu(rx_desc->length); + + data = buffer_info->rxbuf.data; + prefetch(data); + skb = e1000_copybreak(adapter, buffer_info, length, data); + if (!skb) { + unsigned int frag_len = e1000_frag_len(adapter); + + skb = build_skb(data - E1000_HEADROOM, frag_len); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + + skb_reserve(skb, E1000_HEADROOM); + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + buffer_info->rxbuf.data = NULL; + } + + if (++i == rx_ring->count) i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + + /* !EOP means multiple descriptors were used to store a single + * packet, if thats the case we need to toss it. In fact, we + * to toss every packet with the EOP bit clear and the next + * frame that _does_ have the EOP bit set, as it is by + * definition only a frame fragment + */ + if (unlikely(!(status & E1000_RXD_STAT_EOP))) + adapter->discarding = true; + + if (adapter->discarding) { + /* All receives must fit into a single buffer */ + netdev_dbg(netdev, "Receive packet consumed multiple buffers\n"); + dev_kfree_skb(skb); + if (status & E1000_RXD_STAT_EOP) + adapter->discarding = false; + goto next_desc; + } + + if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, data)) { + length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; + } else { + dev_kfree_skb(skb); + goto next_desc; + } + } + +process_skb: + total_rx_bytes += (length - 4); /* don't count FCS */ + total_rx_packets++; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + /* adjust length to remove Ethernet CRC, this must be + * done after the TBI_ACCEPT workaround above + */ + length -= 4; + + if (buffer_info->rxbuf.data == NULL) + skb_put(skb, length); + else /* copybreak skb */ + skb_trim(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + e1000_receive_skb(adapter, status, rx_desc->special, skb); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers + * @adapter: address of board private structure + * @rx_ring: pointer to receive ring structure + * @cleaned_count: number of buffers to allocate this pass + **/ +static void +e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, int cleaned_count) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + unsigned int i; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + /* allocate a new page if necessary */ + if (!buffer_info->rxbuf.page) { + buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC); + if (unlikely(!buffer_info->rxbuf.page)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) { + buffer_info->dma = dma_map_page(&pdev->dev, + buffer_info->rxbuf.page, 0, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; + } + } + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended + * @adapter: address of board private structure + **/ +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + void *data; + + if (buffer_info->rxbuf.data) + goto skip; + + data = e1000_alloc_frag(adapter); + if (!data) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + void *olddata = data; + e_err(rx_err, "skb align check failed: %u bytes at " + "%p\n", bufsz, data); + /* Try again, without freeing the previous */ + data = e1000_alloc_frag(adapter); + /* Failed allocation, critical failure */ + if (!data) { + e1000_free_frag(olddata); + adapter->alloc_rx_buff_failed++; + break; + } + + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + /* give up */ + e1000_free_frag(data); + e1000_free_frag(olddata); + adapter->alloc_rx_buff_failed++; + break; + } + + /* Use new allocation */ + e1000_free_frag(olddata); + } + buffer_info->dma = dma_map_single(&pdev->dev, + data, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + e1000_free_frag(data); + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; + } + + /* XXX if it was allocated cleanly it will never map to a + * boundary crossing + */ + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, + (void *)(unsigned long)buffer_info->dma, + adapter->rx_buffer_len)) { + e_err(rx_err, "dma align check failed: %u bytes at " + "%p\n", adapter->rx_buffer_len, + (void *)(unsigned long)buffer_info->dma); + + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + + e1000_free_frag(data); + buffer_info->rxbuf.data = NULL; + buffer_info->dma = 0; + + adapter->alloc_rx_buff_failed++; + break; + } + buffer_info->rxbuf.data = data; + skip: + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, hw->hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. + * @adapter: + **/ +static void e1000_smartspeed(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_status; + u16 phy_ctrl; + + if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || + !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) + return; + + if (adapter->smartspeed == 0) { + /* If Master/Slave config fault is asserted twice, + * we assume back-to-back + */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + if (phy_ctrl & CR_1000T_MS_ENABLE) { + phy_ctrl &= ~CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, + phy_ctrl); + adapter->smartspeed++; + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, + &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, + phy_ctrl); + } + } + return; + } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { + /* If still no link, perhaps using 2/3 pair cable */ + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + phy_ctrl |= CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); + } + } + /* Restart process after E1000_SMARTSPEED_MAX iterations */ + if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) + adapter->smartspeed = 0; +} + +/** + * e1000_ioctl - + * @netdev: + * @ifreq: + * @cmd: + **/ +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return e1000_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * e1000_mii_ioctl - + * @netdev: + * @ifreq: + * @cmd: + **/ +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct mii_ioctl_data *data = if_mii(ifr); + int retval; + u16 mii_reg; + unsigned long flags; + + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hw->phy_addr; + break; + case SIOCGMIIREG: + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, + &data->val_out)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + break; + case SIOCSMIIREG: + if (data->reg_num & ~(0x1F)) + return -EFAULT; + mii_reg = data->val_in; + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_write_phy_reg(hw, data->reg_num, + mii_reg)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + if (hw->media_type == e1000_media_type_copper) { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (mii_reg & MII_CR_AUTO_NEG_EN) { + hw->autoneg = 1; + hw->autoneg_advertised = 0x2F; + } else { + u32 speed; + if (mii_reg & 0x40) + speed = SPEED_1000; + else if (mii_reg & 0x2000) + speed = SPEED_100; + else + speed = SPEED_10; + retval = e1000_set_spd_dplx( + adapter, speed, + ((mii_reg & 0x100) + ? DUPLEX_FULL : + DUPLEX_HALF)); + if (retval) + return retval; + } + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + case M88E1000_PHY_SPEC_CTRL: + case M88E1000_EXT_PHY_SPEC_CTRL: + if (e1000_phy_reset(hw)) + return -EIO; + break; + } + } else { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + } + } + break; + default: + return -EOPNOTSUPP; + } + return E1000_SUCCESS; +} + +void e1000_pci_set_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + int ret_val = pci_set_mwi(adapter->pdev); + + if (ret_val) + e_err(probe, "Error in setting MWI\n"); +} + +void e1000_pci_clear_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + + pci_clear_mwi(adapter->pdev); +} + +int e1000_pcix_get_mmrbc(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return pcix_get_mmrbc(adapter->pdev); +} + +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) +{ + struct e1000_adapter *adapter = hw->back; + pcix_set_mmrbc(adapter->pdev, mmrbc); +} + +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) +{ + outl(value, port); +} + +static bool e1000_vlan_used(struct e1000_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + return true; + return false; +} + +static void __e1000_vlan_mode(struct e1000_adapter *adapter, + netdev_features_t features) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = er32(CTRL); + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + /* enable VLAN tag insert/strip */ + ctrl |= E1000_CTRL_VME; + } else { + /* disable VLAN tag insert/strip */ + ctrl &= ~E1000_CTRL_VME; + } + ew32(CTRL, ctrl); +} +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, adapter->netdev->features); + if (filter_on) { + /* enable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_CFIEN; + if (!(adapter->netdev->flags & IFF_PROMISC)) + rctl |= E1000_RCTL_VFE; + ew32(RCTL, rctl); + e1000_update_mng_vlan(adapter); + } else { + /* disable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_VFE; + ew32(RCTL, rctl); + } + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, features); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + (vid == adapter->mng_vlan_id)) + return 0; + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, true); + + /* add VID to filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta |= (1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + + /* remove VID from filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + clear_bit(vid, adapter->active_vlans); + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, false); + + return 0; +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + u16 vid; + + if (!e1000_vlan_used(adapter)) + return; + + e1000_vlan_filter_on_off(adapter, true); + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) +{ + struct e1000_hw *hw = &adapter->hw; + + hw->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((hw->media_type == e1000_media_type_fiber) && + spd != SPEED_1000 && + dplx != DUPLEX_FULL) + goto err_inval; + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_10_half; + break; + case SPEED_10 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_10_full; + break; + case SPEED_100 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_100_half; + break; + case SPEED_100 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_100_full; + break; + case SPEED_1000 + DUPLEX_FULL: + hw->autoneg = 1; + hw->autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + hw->mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + e_err(probe, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + u32 wufc = adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + e1000_down(adapter); + } + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + e1000_setup_rctl(adapter); + e1000_set_rx_mode(netdev); + + rctl = er32(RCTL); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) + rctl |= E1000_RCTL_MPE; + + /* enable receives in the hardware */ + ew32(RCTL, rctl | E1000_RCTL_EN); + + if (hw->mac_type >= e1000_82540) { + ctrl = er32(CTRL); + /* advertise wake from D3Cold */ + #define E1000_CTRL_ADVD3WUC 0x00100000 + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC | + E1000_CTRL_EN_PHY_PWR_MGMT; + ew32(CTRL, ctrl); + } + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + /* keep the laser running in D3 */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; + ew32(CTRL_EXT, ctrl_ext); + } + + ew32(WUC, E1000_WUC_PME_EN); + ew32(WUFC, wufc); + } else { + ew32(WUC, 0); + ew32(WUFC, 0); + } + + e1000_release_manageability(adapter); + + *enable_wake = !!wufc; + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->en_mng_pt) + *enable_wake = true; + + if (netif_running(netdev)) + e1000_free_irq(adapter); + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + bool wake; + + retval = __e1000_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} + +static int e1000_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (netif_running(netdev)) { + err = e1000_request_irq(adapter); + if (err) + return err; + } + + e1000_power_up_phy(adapter); + e1000_reset(adapter); + ew32(WUS, ~0); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) + e1000_up(adapter); + + netif_device_attach(netdev); + + return 0; +} +#endif + +static void e1000_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __e1000_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void e1000_netpoll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + e1000_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e1000_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000_reset(adapter); + ew32(WUS, ~0); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) { + if (e1000_up(adapter)) { + pr_info("can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +/* e1000_main.c */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_osdep.h b/drivers/net/ethernet/intel/e1000/e1000_osdep.h new file mode 100644 index 000000000..33e7c45a4 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000/e1000_osdep.h @@ -0,0 +1,109 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +/* glue for the OS independent part of e1000 + * includes register access macros + */ + +#ifndef _E1000_OSDEP_H_ +#define _E1000_OSDEP_H_ + +#include + +#define CONFIG_RAM_BASE 0x60000 +#define GBE_CONFIG_OFFSET 0x0 + +#define GBE_CONFIG_RAM_BASE \ + ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) + +#define GBE_CONFIG_BASE_VIRT \ + ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) + +#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ + (iowrite16_rep(base + offset, data, count)) + +#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \ + (ioread16_rep(base + (offset << 1), data, count)) + +#define er32(reg) \ + (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg))) + +#define ew32(reg, value) \ + (writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg)))) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2)))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) ( \ + readl((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2))) + +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ + writew((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1)))) + +#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ + readw((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1))) + +#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ + writeb((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset)))) + +#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ + readb((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset))) + +#define E1000_WRITE_FLUSH() er32(STATUS) + +#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \ + writel((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG(a, reg) ( \ + readl((a)->flash_address + reg)) + +#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \ + writew((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \ + readw((a)->flash_address + reg)) + +#endif /* _E1000_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c new file mode 100644 index 000000000..c9cde352b --- /dev/null +++ b/drivers/net/ethernet/intel/e1000/e1000_param.c @@ -0,0 +1,754 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#define E1000_PARAM(X, desc) \ + static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* Transmit Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(TxDescriptors, "Number of transmit descriptors"); + +/* Receive Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(RxDescriptors, "Number of receive descriptors"); + +/* User Specified Speed Override + * + * Valid Range: 0, 10, 100, 1000 + * - 0 - auto-negotiate at all supported speeds + * - 10 - only link at 10 Mbps + * - 100 - only link at 100 Mbps + * - 1000 - only link at 1000 Mbps + * + * Default Value: 0 + */ +E1000_PARAM(Speed, "Speed setting"); + +/* User Specified Duplex Override + * + * Valid Range: 0-2 + * - 0 - auto-negotiate for duplex + * - 1 - only link at half duplex + * - 2 - only link at full duplex + * + * Default Value: 0 + */ +E1000_PARAM(Duplex, "Duplex setting"); + +/* Auto-negotiation Advertisement Override + * + * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber) + * + * The AutoNeg value is a bit mask describing which speed and duplex + * combinations should be advertised during auto-negotiation. + * The supported speed and duplex modes are listed below + * + * Bit 7 6 5 4 3 2 1 0 + * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 + * Duplex Full Full Half Full Half + * + * Default Value: 0x2F (copper); 0x20 (fiber) + */ +E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting"); +#define AUTONEG_ADV_DEFAULT 0x2F +#define AUTONEG_ADV_MASK 0x2F + +/* User Specified Flow Control Override + * + * Valid Range: 0-3 + * - 0 - No Flow Control + * - 1 - Rx only, respond to PAUSE frames but do not generate them + * - 2 - Tx only, generate PAUSE frames but ignore them on receive + * - 3 - Full Flow Control Support + * + * Default Value: Read flow control settings from the EEPROM + */ +E1000_PARAM(FlowControl, "Flow Control setting"); +#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL + +/* XsumRX - Receive Checksum Offload Enable/Disable + * + * Valid Range: 0, 1 + * - 0 - disables all checksum offload + * - 1 - enables receive IP/TCP/UDP checksum offload + * on 82543 and newer -based NICs + * + * Default Value: 1 + */ +E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); + +/* Transmit Interrupt Delay in units of 1.024 microseconds + * Tx interrupt delay needs to typically be set to something non zero + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); +#define DEFAULT_TIDV 8 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); +#define DEFAULT_TADV 32 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +/* Receive Interrupt Delay in units of 1.024 microseconds + * hardware will likely hang if you set this to anything but zero. + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define DEFAULT_RDTR 0 +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +/* Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define DEFAULT_RADV 8 +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) + */ +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); +#define DEFAULT_ITR 3 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +/* Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct e1000_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int e1000_validate_option(unsigned int *value, + const struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + e_dev_info("%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + e_dev_info("%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + e_dev_info("%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + const struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + e_dev_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + e_dev_info("Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +static void e1000_check_fiber_options(struct e1000_adapter *adapter); +static void e1000_check_copper_options(struct e1000_adapter *adapter); + +/** + * e1000_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void e1000_check_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + int bd = adapter->bd_number; + + if (bd >= E1000_MAX_NIC) { + e_dev_warn("Warning: no configuration for board #%i " + "using defaults for all values\n", bd); + } + + { /* Transmit Descriptor Count */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_TXD), + .def = E1000_DEFAULT_TXD, + .arg = { .r = { + .min = E1000_MIN_TXD, + .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD + }} + }; + + if (num_TxDescriptors > bd) { + tx_ring->count = TxDescriptors[bd]; + e1000_validate_option(&tx_ring->count, &opt, adapter); + tx_ring->count = ALIGN(tx_ring->count, + REQ_TX_DESCRIPTOR_MULTIPLE); + } else { + tx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_tx_queues; i++) + tx_ring[i].count = tx_ring->count; + } + { /* Receive Descriptor Count */ + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_RXD), + .def = E1000_DEFAULT_RXD, + .arg = { .r = { + .min = E1000_MIN_RXD, + .max = mac_type < e1000_82544 ? E1000_MAX_RXD : + E1000_MAX_82544_RXD + }} + }; + + if (num_RxDescriptors > bd) { + rx_ring->count = RxDescriptors[bd]; + e1000_validate_option(&rx_ring->count, &opt, adapter); + rx_ring->count = ALIGN(rx_ring->count, + REQ_RX_DESCRIPTOR_MULTIPLE); + } else { + rx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_rx_queues; i++) + rx_ring[i].count = rx_ring->count; + } + { /* Checksum Offload Enable/Disable */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "Checksum Offload", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_XsumRX > bd) { + unsigned int rx_csum = XsumRX[bd]; + e1000_validate_option(&rx_csum, &opt, adapter); + adapter->rx_csum = rx_csum; + } else { + adapter->rx_csum = opt.def; + } + } + { /* Flow Control */ + + static const struct e1000_opt_list fc_list[] = { + { E1000_FC_NONE, "Flow Control Disabled" }, + { E1000_FC_RX_PAUSE, "Flow Control Receive Only" }, + { E1000_FC_TX_PAUSE, "Flow Control Transmit Only" }, + { E1000_FC_FULL, "Flow Control Enabled" }, + { E1000_FC_DEFAULT, "Flow Control Hardware Default" } + }; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Flow Control", + .err = "reading default settings from EEPROM", + .def = E1000_FC_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(fc_list), + .p = fc_list }} + }; + + if (num_FlowControl > bd) { + unsigned int fc = FlowControl[bd]; + e1000_validate_option(&fc, &opt, adapter); + adapter->hw.fc = adapter->hw.original_fc = fc; + } else { + adapter->hw.fc = adapter->hw.original_fc = opt.def; + } + } + { /* Transmit Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY }} + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); + } else { + adapter->tx_int_delay = opt.def; + } + } + { /* Transmit Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY }} + }; + + if (num_TxAbsIntDelay > bd) { + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); + } else { + adapter->tx_abs_int_delay = opt.def; + } + } + { /* Receive Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY }} + }; + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); + } else { + adapter->rx_int_delay = opt.def; + } + } + { /* Receive Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY }} + }; + + if (num_RxAbsIntDelay > bd) { + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); + } else { + adapter->rx_abs_int_delay = opt.def; + } + } + { /* Interrupt Throttling Rate */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR }} + }; + + if (num_InterruptThrottleRate > bd) { + adapter->itr = InterruptThrottleRate[bd]; + switch (adapter->itr) { + case 0: + e_dev_info("%s turned off\n", opt.name); + break; + case 1: + e_dev_info("%s set to dynamic mode\n", + opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 3: + e_dev_info("%s set to dynamic conservative " + "mode\n", opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 4: + e_dev_info("%s set to simplified " + "(2000-8000) ints mode\n", opt.name); + adapter->itr_setting = adapter->itr; + break; + default: + e1000_validate_option(&adapter->itr, &opt, + adapter); + /* save the setting, because the dynamic bits + * change itr. + * clear the lower two bits because they are + * used as control + */ + adapter->itr_setting = adapter->itr & ~3; + break; + } + } else { + adapter->itr_setting = opt.def; + adapter->itr = 20000; + } + } + { /* Smart Power Down */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + + if (num_SmartPowerDownEnable > bd) { + unsigned int spd = SmartPowerDownEnable[bd]; + e1000_validate_option(&spd, &opt, adapter); + adapter->smart_power_down = spd; + } else { + adapter->smart_power_down = opt.def; + } + } + + switch (adapter->hw.media_type) { + case e1000_media_type_fiber: + case e1000_media_type_internal_serdes: + e1000_check_fiber_options(adapter); + break; + case e1000_media_type_copper: + e1000_check_copper_options(adapter); + break; + default: + BUG(); + } +} + +/** + * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version + * @adapter: board private structure + * + * Handles speed and duplex options on fiber adapters + **/ +static void e1000_check_fiber_options(struct e1000_adapter *adapter) +{ + int bd = adapter->bd_number; + if (num_Speed > bd) { + e_dev_info("Speed not valid for fiber adapters, parameter " + "ignored\n"); + } + + if (num_Duplex > bd) { + e_dev_info("Duplex not valid for fiber adapters, parameter " + "ignored\n"); + } + + if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { + e_dev_info("AutoNeg other than 1000/Full is not valid for fiber" + "adapters, parameter ignored\n"); + } +} + +/** + * e1000_check_copper_options - Range Checking for Link Options, Copper Version + * @adapter: board private structure + * + * Handles speed and duplex options on copper adapters + **/ +static void e1000_check_copper_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + unsigned int speed, dplx, an; + int bd = adapter->bd_number; + + { /* Speed */ + static const struct e1000_opt_list speed_list[] = { + { 0, "" }, + { SPEED_10, "" }, + { SPEED_100, "" }, + { SPEED_1000, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Speed", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(speed_list), + .p = speed_list }} + }; + + if (num_Speed > bd) { + speed = Speed[bd]; + e1000_validate_option(&speed, &opt, adapter); + } else { + speed = opt.def; + } + } + { /* Duplex */ + static const struct e1000_opt_list dplx_list[] = { + { 0, "" }, + { HALF_DUPLEX, "" }, + { FULL_DUPLEX, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Duplex", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(dplx_list), + .p = dplx_list }} + }; + + if (num_Duplex > bd) { + dplx = Duplex[bd]; + e1000_validate_option(&dplx, &opt, adapter); + } else { + dplx = opt.def; + } + } + + if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { + e_dev_info("AutoNeg specified along with Speed or Duplex, " + "parameter ignored\n"); + adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; + } else { /* Autoneg */ + static const struct e1000_opt_list an_list[] = + #define AA "AutoNeg advertising " + {{ 0x01, AA "10/HD" }, + { 0x02, AA "10/FD" }, + { 0x03, AA "10/FD, 10/HD" }, + { 0x04, AA "100/HD" }, + { 0x05, AA "100/HD, 10/HD" }, + { 0x06, AA "100/HD, 10/FD" }, + { 0x07, AA "100/HD, 10/FD, 10/HD" }, + { 0x08, AA "100/FD" }, + { 0x09, AA "100/FD, 10/HD" }, + { 0x0a, AA "100/FD, 10/FD" }, + { 0x0b, AA "100/FD, 10/FD, 10/HD" }, + { 0x0c, AA "100/FD, 100/HD" }, + { 0x0d, AA "100/FD, 100/HD, 10/HD" }, + { 0x0e, AA "100/FD, 100/HD, 10/FD" }, + { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" }, + { 0x20, AA "1000/FD" }, + { 0x21, AA "1000/FD, 10/HD" }, + { 0x22, AA "1000/FD, 10/FD" }, + { 0x23, AA "1000/FD, 10/FD, 10/HD" }, + { 0x24, AA "1000/FD, 100/HD" }, + { 0x25, AA "1000/FD, 100/HD, 10/HD" }, + { 0x26, AA "1000/FD, 100/HD, 10/FD" }, + { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" }, + { 0x28, AA "1000/FD, 100/FD" }, + { 0x29, AA "1000/FD, 100/FD, 10/HD" }, + { 0x2a, AA "1000/FD, 100/FD, 10/FD" }, + { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" }, + { 0x2c, AA "1000/FD, 100/FD, 100/HD" }, + { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" }, + { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, + { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "AutoNeg", + .err = "parameter ignored", + .def = AUTONEG_ADV_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(an_list), + .p = an_list }} + }; + + if (num_AutoNeg > bd) { + an = AutoNeg[bd]; + e1000_validate_option(&an, &opt, adapter); + } else { + an = opt.def; + } + adapter->hw.autoneg_advertised = an; + } + + switch (speed + dplx) { + case 0: + adapter->hw.autoneg = adapter->fc_autoneg = 1; + if ((num_Speed > bd) && (speed != 0 || dplx != 0)) + e_dev_info("Speed and duplex autonegotiation " + "enabled\n"); + break; + case HALF_DUPLEX: + e_dev_info("Half Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Half Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_100_HALF; + break; + case FULL_DUPLEX: + e_dev_info("Full Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Full Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | + ADVERTISE_100_FULL | + ADVERTISE_1000_FULL; + break; + case SPEED_10: + e_dev_info("10 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 10 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_10_FULL; + break; + case SPEED_10 + HALF_DUPLEX: + e_dev_info("Forcing to 10 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_10 + FULL_DUPLEX: + e_dev_info("Forcing to 10 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100: + e_dev_info("100 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 100 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | + ADVERTISE_100_FULL; + break; + case SPEED_100 + HALF_DUPLEX: + e_dev_info("Forcing to 100 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100 + FULL_DUPLEX: + e_dev_info("Forcing to 100 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_1000: + e_dev_info("1000 Mbps Speed specified without Duplex\n"); + goto full_duplex_only; + case SPEED_1000 + HALF_DUPLEX: + e_dev_info("Half Duplex is not supported at 1000 Mbps\n"); + /* fall through */ + case SPEED_1000 + FULL_DUPLEX: +full_duplex_only: + e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex " + "only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; + break; + default: + BUG(); + } + + /* Speed, AutoNeg and MDI/MDI-X must all play nice */ + if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) { + e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. " + "Setting MDI-X to a compatible value.\n"); + } +} + diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c new file mode 100644 index 000000000..08f22f348 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -0,0 +1,1418 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* 80003ES2LAN Gigabit Ethernet Controller (Copper) + * 80003ES2LAN Gigabit Ethernet Controller (Serdes) + */ + +#include "e1000.h" + +/* A table for the GG82563 cable length where the range is defined + * with a lower bound at "index" and the upper bound at + * "index + 5". + */ +static const u16 e1000_gg82563_cable_length_table[] = { + 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF +}; + +#define GG82563_CABLE_LENGTH_TABLE_SIZE \ + ARRAY_SIZE(e1000_gg82563_cable_length_table) + +static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw); +static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); +static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); +static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw); +static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw); +static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex); +static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 *data); +static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 data); +static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw); + +/** + * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + return 0; + } else { + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + phy->type = e1000_phy_gg82563; + + /* This can only be done after all function pointers are setup. */ + ret_val = e1000e_get_phy_id(hw); + + /* Verify phy id */ + if (phy->id != GG82563_E_PHY_ID) + return -E1000_ERR_PHY; + + return ret_val; +} + +/** + * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u16 size; + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + nvm->type = e1000_nvm_eeprom_spi; + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* EEPROM access above 16k is unsupported */ + if (size > 14) + size = 14; + nvm->word_size = 1 << size; + + return 0; +} + +/** + * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + /* Set media type and media-dependent function pointers */ + switch (hw->adapter->pdev->device) { + case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: + hw->phy.media_type = e1000_media_type_internal_serdes; + mac->ops.check_for_link = e1000e_check_for_serdes_link; + mac->ops.setup_physical_interface = + e1000e_setup_fiber_serdes_link; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + mac->ops.check_for_link = e1000e_check_for_copper_link; + mac->ops.setup_physical_interface = + e1000_setup_copper_link_80003es2lan; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* FWSM register */ + mac->has_fwsm = true; + /* ARC supported; valid only if manageability features are enabled. */ + mac->arc_subsystem_valid = !!(er32(FWSM) & E1000_FWSM_MODE_MASK); + /* Adaptive IFS not supported */ + mac->adaptive_ifs = false; + + /* set lan id for port to determine which phy lock to use */ + hw->mac.ops.set_lan_id(hw); + + return 0; +} + +static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 rc; + + rc = e1000_init_mac_params_80003es2lan(hw); + if (rc) + return rc; + + rc = e1000_init_nvm_params_80003es2lan(hw); + if (rc) + return rc; + + rc = e1000_init_phy_params_80003es2lan(hw); + if (rc) + return rc; + + return 0; +} + +/** + * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to acquire access rights to the correct PHY. + **/ +static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + return e1000_acquire_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_release_phy_80003es2lan - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. + **/ +static void e1000_release_phy_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + e1000_release_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_acquire_mac_csr_80003es2lan - Acquire right to access Kumeran register + * @hw: pointer to the HW structure + * + * Acquire the semaphore to access the Kumeran interface. + * + **/ +static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = E1000_SWFW_CSR_SM; + + return e1000_acquire_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_release_mac_csr_80003es2lan - Release right to access Kumeran Register + * @hw: pointer to the HW structure + * + * Release the semaphore used to access the Kumeran interface + **/ +static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = E1000_SWFW_CSR_SM; + + e1000_release_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM + * @hw: pointer to the HW structure + * + * Acquire the semaphore to access the EEPROM. + **/ +static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); + if (ret_val) + return ret_val; + + ret_val = e1000e_acquire_nvm(hw); + + if (ret_val) + e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); + + return ret_val; +} + +/** + * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM + * @hw: pointer to the HW structure + * + * Release the semaphore used to access the EEPROM. + **/ +static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) +{ + e1000e_release_nvm(hw); + e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 i = 0; + s32 timeout = 50; + + while (i < timeout) { + if (e1000e_get_hw_semaphore(hw)) + return -E1000_ERR_SWFW_SYNC; + + swfw_sync = er32(SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + e1000e_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + return -E1000_ERR_SWFW_SYNC; + } + + swfw_sync |= swmask; + ew32(SW_FW_SYNC, swfw_sync); + + e1000e_put_hw_semaphore(hw); + + return 0; +} + +/** + * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (e1000e_get_hw_semaphore(hw) != 0) + ; /* Empty */ + + swfw_sync = er32(SW_FW_SYNC); + swfw_sync &= ~mask; + ew32(SW_FW_SYNC, swfw_sync); + + e1000e_put_hw_semaphore(hw); +} + +/** + * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @data: pointer to the data returned from the operation + * + * Read the GG82563 PHY register. + **/ +static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, u16 *data) +{ + s32 ret_val; + u32 page_select; + u16 temp; + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + + /* Select Configuration Page */ + if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { + page_select = GG82563_PHY_PAGE_SELECT; + } else { + /* Use Alternative Page Select register to access + * registers 30 and 31 + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + } + + temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp); + if (ret_val) { + e1000_release_phy_80003es2lan(hw); + return ret_val; + } + + if (hw->dev_spec.e80003es2lan.mdic_wa_enable) { + /* The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + usleep_range(200, 400); + + /* ...and verify the command was successful. */ + ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); + + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { + e1000_release_phy_80003es2lan(hw); + return -E1000_ERR_PHY; + } + + usleep_range(200, 400); + + ret_val = e1000e_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + usleep_range(200, 400); + } else { + ret_val = e1000e_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + } + + e1000_release_phy_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @data: value to write to the register + * + * Write to the GG82563 PHY register. + **/ +static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, u16 data) +{ + s32 ret_val; + u32 page_select; + u16 temp; + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + + /* Select Configuration Page */ + if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { + page_select = GG82563_PHY_PAGE_SELECT; + } else { + /* Use Alternative Page Select register to access + * registers 30 and 31 + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + } + + temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp); + if (ret_val) { + e1000_release_phy_80003es2lan(hw); + return ret_val; + } + + if (hw->dev_spec.e80003es2lan.mdic_wa_enable) { + /* The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + usleep_range(200, 400); + + /* ...and verify the command was successful. */ + ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); + + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { + e1000_release_phy_80003es2lan(hw); + return -E1000_ERR_PHY; + } + + usleep_range(200, 400); + + ret_val = e1000e_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & + offset, data); + + usleep_range(200, 400); + } else { + ret_val = e1000e_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & + offset, data); + } + + e1000_release_phy_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_nvm_80003es2lan - Write to ESB2 NVM + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @words: number of words to write + * @data: buffer of data to write to the NVM + * + * Write "words" of data to the ESB2 NVM. + **/ +static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + return e1000e_write_nvm_spi(hw, offset, words, data); +} + +/** + * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete + * @hw: pointer to the HW structure + * + * Wait a specific amount of time for manageability processes to complete. + * This is a function pointer entry point called by the phy module. + **/ +static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + if (hw->bus.func == 1) + mask = E1000_NVM_CFG_DONE_PORT_1; + + while (timeout) { + if (er32(EEMNGCTL) & mask) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) { + e_dbg("MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex + * @hw: pointer to the HW structure + * + * Force the speed and duplex settings onto the PHY. This is a + * function pointer entry point called by the phy module. + **/ +static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + bool link; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO; + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("GG82563 PSCR: %X\n", phy_data); + + ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + /* Reset the phy to commit changes. */ + phy_data |= BMCR_RESET; + + ret_val = e1e_wphy(hw, MII_BMCR, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + + if (hw->phy.autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on GG82563 phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = e1000e_phy_reset_dsp(hw); + if (ret_val) + return ret_val; + } + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Resetting the phy means we need to verify the TX_CLK corresponds + * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. + */ + phy_data &= ~GG82563_MSCR_TX_CLK_MASK; + if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED) + phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5; + else + phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data); + + return ret_val; +} + +/** + * e1000_get_cable_length_80003es2lan - Set approximate cable length + * @hw: pointer to the HW structure + * + * Find the approximate cable length as measured by the GG82563 PHY. + * This is a function pointer entry point called by the phy module. + **/ +static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); + if (ret_val) + return ret_val; + + index = phy_data & GG82563_DSPD_CABLE_LENGTH; + + if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) + return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_gg82563_cable_length_table[index]; + phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return 0; +} + +/** + * e1000_get_link_up_info_80003es2lan - Report speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to speed buffer + * @duplex: pointer to duplex buffer + * + * Retrieve the current speed and duplex configuration. + **/ +static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + if (hw->phy.media_type == e1000_media_type_copper) { + ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); + hw->phy.ops.cfg_on_link_up(hw); + } else { + ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw, + speed, + duplex); + } + + return ret_val; +} + +/** + * e1000_reset_hw_80003es2lan - Reset the ESB2 controller + * @hw: pointer to the HW structure + * + * Perform a global reset to the ESB2 controller. + **/ +static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 kum_reg_data; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + e_dbg("PCI-E Master disable polling has failed.\n"); + + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + e1e_flush(); + + usleep_range(10000, 20000); + + ctrl = er32(CTRL); + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + + e_dbg("Issuing a global reset to MAC\n"); + ew32(CTRL, ctrl | E1000_CTRL_RST); + e1000_release_phy_80003es2lan(hw); + + /* Disable IBIST slave mode (far-end loopback) */ + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + &kum_reg_data); + if (ret_val) + return ret_val; + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + + /* Clear any pending interrupt events. */ + ew32(IMC, 0xffffffff); + er32(ICR); + + return e1000_check_alt_mac_addr_generic(hw); +} + +/** + * e1000_init_hw_80003es2lan - Initialize the ESB2 controller + * @hw: pointer to the HW structure + * + * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. + **/ +static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 kum_reg_data; + u16 i; + + e1000_initialize_hw_bits_80003es2lan(hw); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ + if (ret_val) + e_dbg("Error initializing identification LED\n"); + + /* Disabling VLAN filtering */ + e_dbg("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address. */ + e1000e_init_rx_addrs(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + if (ret_val) + return ret_val; + + /* Disable IBIST slave mode (far-end loopback) */ + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + &kum_reg_data); + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + + /* Set the transmit descriptor write-back policy */ + reg_data = er32(TXDCTL(0)); + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); + ew32(TXDCTL(0), reg_data); + + /* ...for both queues. */ + reg_data = er32(TXDCTL(1)); + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); + ew32(TXDCTL(1), reg_data); + + /* Enable retransmit on late collisions */ + reg_data = er32(TCTL); + reg_data |= E1000_TCTL_RTLC; + ew32(TCTL, reg_data); + + /* Configure Gigabit Carry Extend Padding */ + reg_data = er32(TCTL_EXT); + reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; + reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN; + ew32(TCTL_EXT, reg_data); + + /* Configure Transmit Inter-Packet Gap */ + reg_data = er32(TIPG); + reg_data &= ~E1000_TIPG_IPGT_MASK; + reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; + ew32(TIPG, reg_data); + + reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001); + reg_data &= ~0x00100000; + E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); + + /* default to true to enable the MDIC W/A */ + hw->dev_spec.e80003es2lan.mdic_wa_enable = true; + + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >> + E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i); + if (!ret_val) { + if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) == + E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) + hw->dev_spec.e80003es2lan.mdic_wa_enable = false; + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2 + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) +{ + u32 reg; + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL(0)); + reg |= (1 << 22); + ew32(TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL(1)); + reg |= (1 << 22); + ew32(TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC(0)); + reg &= ~(0xF << 27); /* 30:27 */ + if (hw->phy.media_type != e1000_media_type_copper) + reg &= ~(1 << 20); + ew32(TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC(1)); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + ew32(TARC(1), reg); + + /* Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + reg = er32(RFCTL); + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + ew32(RFCTL, reg); +} + +/** + * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link + * @hw: pointer to the HW structure + * + * Setup some GG82563 PHY registers for obtaining link + **/ +static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 reg; + u16 data; + + ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data); + if (ret_val) + return ret_val; + + data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + /* Use 25MHz for both link down and 1000Base-T for Tx clock. */ + data |= GG82563_MSCR_TX_CLK_1000MBPS_25; + + ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, data); + if (ret_val) + return ret_val; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; + + switch (phy->mdix) { + case 1: + data |= GG82563_PSCR_CROSSOVER_MODE_MDI; + break; + case 2: + data |= GG82563_PSCR_CROSSOVER_MODE_MDIX; + break; + case 0: + default: + data |= GG82563_PSCR_CROSSOVER_MODE_AUTO; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + if (phy->disable_polarity_correction) + data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, data); + if (ret_val) + return ret_val; + + /* SW Reset the PHY so all changes take effect */ + ret_val = hw->phy.ops.commit(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + /* Bypass Rx and Tx FIFO's */ + reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL; + data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | + E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); + if (ret_val) + return ret_val; + + reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE; + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data); + if (ret_val) + return ret_val; + data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG; + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL_2, data); + if (ret_val) + return ret_val; + + reg = er32(CTRL_EXT); + reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + ew32(CTRL_EXT, reg); + + ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); + if (ret_val) + return ret_val; + + /* Do not init these registers when the HW is in IAMT mode, since the + * firmware will have already initialized them. We only initialize + * them if the HW is not in IAMT mode. + */ + if (!hw->mac.ops.check_mng_mode(hw)) { + /* Enable Electrical Idle on the PHY */ + data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; + ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data); + if (ret_val) + return ret_val; + } + + /* Workaround: Disable padding in Kumeran interface in the MAC + * and in the PHY to avoid CRC errors. + */ + ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data); + if (ret_val) + return ret_val; + + data |= GG82563_ICR_DIS_PADDING; + ret_val = e1e_wphy(hw, GG82563_PHY_INBAND_CTRL, data); + if (ret_val) + return ret_val; + + return 0; +} + +/** + * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2 + * @hw: pointer to the HW structure + * + * Essentially a wrapper for setting up all things "copper" related. + * This is a function pointer entry point called by the mac module. + **/ +static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + /* Set the mac to wait the maximum time between each + * iteration and increase the max iterations when + * polling the phy; this fixes erroneous timeouts at 10Mbps. + */ + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4), + 0xFFFF); + if (ret_val) + return ret_val; + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), + ®_data); + if (ret_val) + return ret_val; + reg_data |= 0x3F; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), + reg_data); + if (ret_val) + return ret_val; + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + ®_data); + if (ret_val) + return ret_val; + reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + reg_data); + if (ret_val) + return ret_val; + + ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw); + if (ret_val) + return ret_val; + + return e1000e_setup_copper_link(hw); +} + +/** + * e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up + * @hw: pointer to the HW structure + * @duplex: current duplex setting + * + * Configure the KMRN interface by applying last minute quirks for + * 10/100 operation. + **/ +static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 speed; + u16 duplex; + + if (hw->phy.media_type == e1000_media_type_copper) { + ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed, + &duplex); + if (ret_val) + return ret_val; + + if (speed == SPEED_1000) + ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw); + else + ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex); + } + + return ret_val; +} + +/** + * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation + * @hw: pointer to the HW structure + * @duplex: current duplex setting + * + * Configure the KMRN interface by applying last minute quirks for + * 10/100 operation. + **/ +static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) +{ + s32 ret_val; + u32 tipg; + u32 i = 0; + u16 reg_data, reg_data2; + + reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + return ret_val; + + /* Configure Transmit Inter-Packet Gap */ + tipg = er32(TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN; + ew32(TIPG, tipg); + + do { + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data2); + if (ret_val) + return ret_val; + i++; + } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); + + if (duplex == HALF_DUPLEX) + reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER; + else + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + + return e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); +} + +/** + * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation + * @hw: pointer to the HW structure + * + * Configure the KMRN interface by applying last minute quirks for + * gigabit operation. + **/ +static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 reg_data, reg_data2; + u32 tipg; + u32 i = 0; + + reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + return ret_val; + + /* Configure Transmit Inter-Packet Gap */ + tipg = er32(TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; + ew32(TIPG, tipg); + + do { + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data2); + if (ret_val) + return ret_val; + i++; + } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); + + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + + return e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); +} + +/** + * e1000_read_kmrn_reg_80003es2lan - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquire semaphore, then read the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release the semaphore before exiting. + **/ +static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + u32 kmrnctrlsta; + s32 ret_val; + + ret_val = e1000_acquire_mac_csr_80003es2lan(hw); + if (ret_val) + return ret_val; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); + + udelay(2); + + kmrnctrlsta = er32(KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + e1000_release_mac_csr_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_kmrn_reg_80003es2lan - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquire semaphore, then write the data to PHY register + * at the offset using the kumeran interface. Release semaphore + * before exiting. + **/ +static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 data) +{ + u32 kmrnctrlsta; + s32 ret_val; + + ret_val = e1000_acquire_mac_csr_80003es2lan(hw); + if (ret_val) + return ret_val; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); + + udelay(2); + + e1000_release_mac_csr_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_read_mac_addr_80003es2lan - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + + /* If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + return ret_val; + + return e1000_read_mac_addr_generic(hw); +} + +/** + * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(hw->mac.ops.check_mng_mode(hw) || + hw->phy.ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); +} + +/** + * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) +{ + e1000e_clear_hw_cntrs_base(hw); + + er32(PRC64); + er32(PRC127); + er32(PRC255); + er32(PRC511); + er32(PRC1023); + er32(PRC1522); + er32(PTC64); + er32(PTC127); + er32(PTC255); + er32(PTC511); + er32(PTC1023); + er32(PTC1522); + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); + + er32(IAC); + er32(ICRXOC); + + er32(ICRXPTC); + er32(ICRXATC); + er32(ICTXPTC); + er32(ICTXATC); + er32(ICTXQEC); + er32(ICTXQMTC); + er32(ICRXDMTC); +} + +static const struct e1000_mac_operations es2_mac_ops = { + .read_mac_addr = e1000_read_mac_addr_80003es2lan, + .id_led_init = e1000e_id_led_init_generic, + .blink_led = e1000e_blink_led_generic, + .check_mng_mode = e1000e_check_mng_mode_generic, + /* check_for_link dependent on media type */ + .cleanup_led = e1000e_cleanup_led_generic, + .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan, + .get_bus_info = e1000e_get_bus_info_pcie, + .set_lan_id = e1000_set_lan_id_multi_port_pcie, + .get_link_up_info = e1000_get_link_up_info_80003es2lan, + .led_on = e1000e_led_on_generic, + .led_off = e1000e_led_off_generic, + .update_mc_addr_list = e1000e_update_mc_addr_list_generic, + .write_vfta = e1000_write_vfta_generic, + .clear_vfta = e1000_clear_vfta_generic, + .reset_hw = e1000_reset_hw_80003es2lan, + .init_hw = e1000_init_hw_80003es2lan, + .setup_link = e1000e_setup_link_generic, + /* setup_physical_interface dependent on media type */ + .setup_led = e1000e_setup_led_generic, + .config_collision_dist = e1000e_config_collision_dist_generic, + .rar_set = e1000e_rar_set_generic, + .rar_get_count = e1000e_rar_get_count_generic, +}; + +static const struct e1000_phy_operations es2_phy_ops = { + .acquire = e1000_acquire_phy_80003es2lan, + .check_polarity = e1000_check_polarity_m88, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = e1000e_phy_sw_reset, + .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, + .get_cfg_done = e1000_get_cfg_done_80003es2lan, + .get_cable_length = e1000_get_cable_length_80003es2lan, + .get_info = e1000e_get_phy_info_m88, + .read_reg = e1000_read_phy_reg_gg82563_80003es2lan, + .release = e1000_release_phy_80003es2lan, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = NULL, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000_write_phy_reg_gg82563_80003es2lan, + .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, +}; + +static const struct e1000_nvm_operations es2_nvm_ops = { + .acquire = e1000_acquire_nvm_80003es2lan, + .read = e1000e_read_nvm_eerd, + .release = e1000_release_nvm_80003es2lan, + .reload = e1000e_reload_nvm_generic, + .update = e1000e_update_nvm_checksum_generic, + .valid_led_default = e1000e_valid_led_default, + .validate = e1000e_validate_nvm_checksum_generic, + .write = e1000_write_nvm_80003es2lan, +}; + +const struct e1000_info e1000_es2_info = { + .mac = e1000_80003es2lan, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_RX_NEEDS_RESTART /* errata */ + | FLAG_TARC_SET_BIT_ZERO /* errata */ + | FLAG_APME_CHECK_PORT_B + | FLAG_DISABLE_FC_PAUSE_TIME, /* errata */ + .flags2 = FLAG2_DMA_BURST, + .pba = 38, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_80003es2lan, + .mac_ops = &es2_mac_ops, + .phy_ops = &es2_phy_ops, + .nvm_ops = &es2_nvm_ops, +}; diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h new file mode 100644 index 000000000..535a94309 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h @@ -0,0 +1,88 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000E_80003ES2LAN_H_ +#define _E1000E_80003ES2LAN_H_ + +#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 +#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02 +#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10 +#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F + +#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008 +#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800 +#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010 + +#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004 +#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000 +#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000 + +#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C +#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004 + +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gig Carry Extend Padding */ +#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000 + +#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8 +#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9 + +/* GG82563 PHY Specific Status Register (Page 0, Register 16 */ +#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Dis */ +#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060 +#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */ +#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */ +#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */ + +/* PHY Specific Control Register 2 (Page 0, Register 26) */ +#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Neg */ + +/* MAC Specific Control Register (Page 2, Register 21) */ +/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */ +#define GG82563_MSCR_TX_CLK_MASK 0x0007 +#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004 +#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005 +#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007 + +#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */ + +/* DSP Distance Register (Page 5, Register 26) + * 0 = <50M + * 1 = 50-80M + * 2 = 80-100M + * 3 = 110-140M + * 4 = >140M + */ +#define GG82563_DSPD_CABLE_LENGTH 0x0007 + +/* Kumeran Mode Control Register (Page 193, Register 16) */ +#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 + +/* Max number of times Kumeran read/write should be validated */ +#define GG82563_MAX_KMRN_RETRY 0x5 + +/* Power Management Control Register (Page 193, Register 20) */ +/* 1=Enable SERDES Electrical Idle */ +#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 + +/* In-Band Control Register (Page 194, Register 18) */ +#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ + +#endif diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c new file mode 100644 index 000000000..32e77755a --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -0,0 +1,2063 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* 82571EB Gigabit Ethernet Controller + * 82571EB Gigabit Ethernet Controller (Copper) + * 82571EB Gigabit Ethernet Controller (Fiber) + * 82571EB Dual Port Gigabit Mezzanine Adapter + * 82571EB Quad Port Gigabit Mezzanine Adapter + * 82571PT Gigabit PT Quad Port Server ExpressModule + * 82572EI Gigabit Ethernet Controller (Copper) + * 82572EI Gigabit Ethernet Controller (Fiber) + * 82572EI Gigabit Ethernet Controller + * 82573V Gigabit Ethernet Controller (Copper) + * 82573E Gigabit Ethernet Controller (Copper) + * 82573L Gigabit Ethernet Controller + * 82574L Gigabit Network Connection + * 82583V Gigabit Network Connection + */ + +#include "e1000.h" + +static s32 e1000_get_phy_id_82571(struct e1000_hw *hw); +static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw); +static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw); +static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw); +static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); +static bool e1000_check_mng_mode_82574(struct e1000_hw *hw); +static s32 e1000_led_on_82574(struct e1000_hw *hw); +static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); +static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); +static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); +static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); +static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); +static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active); +static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active); + +/** + * e1000_init_phy_params_82571 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + return 0; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_82571; + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + phy->type = e1000_phy_igp_2; + break; + case e1000_82573: + phy->type = e1000_phy_m88; + break; + case e1000_82574: + case e1000_82583: + phy->type = e1000_phy_bm; + phy->ops.acquire = e1000_get_hw_semaphore_82574; + phy->ops.release = e1000_put_hw_semaphore_82574; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574; + break; + default: + return -E1000_ERR_PHY; + } + + /* This can only be done after all function pointers are setup. */ + ret_val = e1000_get_phy_id_82571(hw); + if (ret_val) { + e_dbg("Error getting PHY ID\n"); + return ret_val; + } + + /* Verify phy id */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + if (phy->id != IGP01E1000_I_PHY_ID) + ret_val = -E1000_ERR_PHY; + break; + case e1000_82573: + if (phy->id != M88E1111_I_PHY_ID) + ret_val = -E1000_ERR_PHY; + break; + case e1000_82574: + case e1000_82583: + if (phy->id != BME1000_E_PHY_ID_R2) + ret_val = -E1000_ERR_PHY; + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id); + + return ret_val; +} + +/** + * e1000_init_nvm_params_82571 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u16 size; + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (((eecd >> 15) & 0x3) == 0x3) { + nvm->type = e1000_nvm_flash_hw; + nvm->word_size = 2048; + /* Autonomous Flash update bit must be cleared due + * to Flash update issue. + */ + eecd &= ~E1000_EECD_AUPDEN; + ew32(EECD, eecd); + break; + } + /* Fall Through */ + default: + nvm->type = e1000_nvm_eeprom_spi; + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* EEPROM access above 16k is unsupported */ + if (size > 14) + size = 14; + nvm->word_size = 1 << size; + break; + } + + /* Function Pointers */ + switch (hw->mac.type) { + case e1000_82574: + case e1000_82583: + nvm->ops.acquire = e1000_get_hw_semaphore_82574; + nvm->ops.release = e1000_put_hw_semaphore_82574; + break; + default: + break; + } + + return 0; +} + +/** + * e1000_init_mac_params_82571 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_mac_params_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 swsm = 0; + u32 swsm2 = 0; + bool force_clear_smbi = false; + + /* Set media type and media-dependent function pointers */ + switch (hw->adapter->pdev->device) { + case E1000_DEV_ID_82571EB_FIBER: + case E1000_DEV_ID_82572EI_FIBER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + hw->phy.media_type = e1000_media_type_fiber; + mac->ops.setup_physical_interface = + e1000_setup_fiber_serdes_link_82571; + mac->ops.check_for_link = e1000e_check_for_fiber_link; + mac->ops.get_link_up_info = + e1000e_get_speed_and_duplex_fiber_serdes; + break; + case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82571EB_SERDES_DUAL: + case E1000_DEV_ID_82571EB_SERDES_QUAD: + case E1000_DEV_ID_82572EI_SERDES: + hw->phy.media_type = e1000_media_type_internal_serdes; + mac->ops.setup_physical_interface = + e1000_setup_fiber_serdes_link_82571; + mac->ops.check_for_link = e1000_check_for_serdes_link_82571; + mac->ops.get_link_up_info = + e1000e_get_speed_and_duplex_fiber_serdes; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + mac->ops.setup_physical_interface = + e1000_setup_copper_link_82571; + mac->ops.check_for_link = e1000e_check_for_copper_link; + mac->ops.get_link_up_info = e1000e_get_speed_and_duplex_copper; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* Adaptive IFS supported */ + mac->adaptive_ifs = true; + + /* MAC-specific function pointers */ + switch (hw->mac.type) { + case e1000_82573: + mac->ops.set_lan_id = e1000_set_lan_id_single_port; + mac->ops.check_mng_mode = e1000e_check_mng_mode_generic; + mac->ops.led_on = e1000e_led_on_generic; + mac->ops.blink_led = e1000e_blink_led_generic; + + /* FWSM register */ + mac->has_fwsm = true; + /* ARC supported; valid only if manageability features are + * enabled. + */ + mac->arc_subsystem_valid = !!(er32(FWSM) & + E1000_FWSM_MODE_MASK); + break; + case e1000_82574: + case e1000_82583: + mac->ops.set_lan_id = e1000_set_lan_id_single_port; + mac->ops.check_mng_mode = e1000_check_mng_mode_82574; + mac->ops.led_on = e1000_led_on_82574; + break; + default: + mac->ops.check_mng_mode = e1000e_check_mng_mode_generic; + mac->ops.led_on = e1000e_led_on_generic; + mac->ops.blink_led = e1000e_blink_led_generic; + + /* FWSM register */ + mac->has_fwsm = true; + break; + } + + /* Ensure that the inter-port SWSM.SMBI lock bit is clear before + * first NVM or PHY access. This should be done for single-port + * devices, and for one port only on dual-port devices so that + * for those devices we can still use the SMBI lock to synchronize + * inter-port accesses to the PHY & NVM. + */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + swsm2 = er32(SWSM2); + + if (!(swsm2 & E1000_SWSM2_LOCK)) { + /* Only do this for the first interface on this card */ + ew32(SWSM2, swsm2 | E1000_SWSM2_LOCK); + force_clear_smbi = true; + } else { + force_clear_smbi = false; + } + break; + default: + force_clear_smbi = true; + break; + } + + if (force_clear_smbi) { + /* Make sure SWSM.SMBI is clear */ + swsm = er32(SWSM); + if (swsm & E1000_SWSM_SMBI) { + /* This bit should not be set on a first interface, and + * indicates that the bootagent or EFI code has + * improperly left this bit enabled + */ + e_dbg("Please update your 82571 Bootagent\n"); + } + ew32(SWSM, swsm & ~E1000_SWSM_SMBI); + } + + /* Initialize device specific counter of SMBI acquisition timeouts. */ + hw->dev_spec.e82571.smb_counter = 0; + + return 0; +} + +static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + static int global_quad_port_a; /* global port a indication */ + struct pci_dev *pdev = adapter->pdev; + int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; + s32 rc; + + rc = e1000_init_mac_params_82571(hw); + if (rc) + return rc; + + rc = e1000_init_nvm_params_82571(hw); + if (rc) + return rc; + + rc = e1000_init_phy_params_82571(hw); + if (rc) + return rc; + + /* tag quad port adapters first, it's used below */ + switch (pdev->device) { + case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: + case E1000_DEV_ID_82571PT_QUAD_COPPER: + adapter->flags |= FLAG_IS_QUAD_PORT; + /* mark the first port */ + if (global_quad_port_a == 0) + adapter->flags |= FLAG_IS_QUAD_PORT_A; + /* Reset for multiple quad port adapters */ + global_quad_port_a++; + if (global_quad_port_a == 4) + global_quad_port_a = 0; + break; + default: + break; + } + + switch (adapter->hw.mac.type) { + case e1000_82571: + /* these dual ports don't have WoL on port B at all */ + if (((pdev->device == E1000_DEV_ID_82571EB_FIBER) || + (pdev->device == E1000_DEV_ID_82571EB_SERDES) || + (pdev->device == E1000_DEV_ID_82571EB_COPPER)) && + (is_port_b)) + adapter->flags &= ~FLAG_HAS_WOL; + /* quad ports only support WoL on port A */ + if (adapter->flags & FLAG_IS_QUAD_PORT && + (!(adapter->flags & FLAG_IS_QUAD_PORT_A))) + adapter->flags &= ~FLAG_HAS_WOL; + /* Does not support WoL on any port */ + if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) + adapter->flags &= ~FLAG_HAS_WOL; + break; + case e1000_82573: + if (pdev->device == E1000_DEV_ID_82573L) { + adapter->flags |= FLAG_HAS_JUMBO_FRAMES; + adapter->max_hw_frame_size = DEFAULT_JUMBO; + } + break; + default: + break; + } + + return 0; +} + +/** + * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_id = 0; + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* The 82571 firmware may still be configuring the PHY. + * In this case, we cannot access the PHY until the + * configuration is done. So we explicitly set the + * PHY ID. + */ + phy->id = IGP01E1000_I_PHY_ID; + break; + case e1000_82573: + return e1000e_get_phy_id(hw); + case e1000_82574: + case e1000_82583: + ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id); + if (ret_val) + return ret_val; + + phy->id = (u32)(phy_id << 16); + usleep_range(20, 40); + ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); + if (ret_val) + return ret_val; + + phy->id |= (u32)(phy_id); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + break; + default: + return -E1000_ERR_PHY; + } + + return 0; +} + +/** + * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + s32 sw_timeout = hw->nvm.word_size + 1; + s32 fw_timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* If we have timedout 3 times on trying to acquire + * the inter-port SMBI semaphore, there is old code + * operating on the other port, and it is not + * releasing SMBI. Modify the number of times that + * we try for the semaphore to interwork with this + * older code. + */ + if (hw->dev_spec.e82571.smb_counter > 2) + sw_timeout = 1; + + /* Get the SW semaphore */ + while (i < sw_timeout) { + swsm = er32(SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usleep_range(50, 100); + i++; + } + + if (i == sw_timeout) { + e_dbg("Driver can't access device - SMBI bit is set.\n"); + hw->dev_spec.e82571.smb_counter++; + } + /* Get the FW semaphore. */ + for (i = 0; i < fw_timeout; i++) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (er32(SWSM) & E1000_SWSM_SWESMBI) + break; + + usleep_range(50, 100); + } + + if (i == fw_timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_82571(hw); + e_dbg("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000_put_hw_semaphore_82571 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = er32(SWSM); + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + ew32(SWSM, swsm); +} + +/** + * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore during reset. + * + **/ +static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + s32 i = 0; + + extcnf_ctrl = er32(EXTCNF_CTRL); + do { + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + ew32(EXTCNF_CTRL, extcnf_ctrl); + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) + break; + + usleep_range(2000, 4000); + i++; + } while (i < MDIO_OWNERSHIP_TIMEOUT); + + if (i == MDIO_OWNERSHIP_TIMEOUT) { + /* Release semaphores */ + e1000_put_hw_semaphore_82573(hw); + e_dbg("Driver can't access the PHY\n"); + return -E1000_ERR_PHY; + } + + return 0; +} + +/** + * e1000_put_hw_semaphore_82573 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used during reset. + * + **/ +static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + + extcnf_ctrl = er32(EXTCNF_CTRL); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + ew32(EXTCNF_CTRL, extcnf_ctrl); +} + +static DEFINE_MUTEX(swflag_mutex); + +/** + * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM. + * + **/ +static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw) +{ + s32 ret_val; + + mutex_lock(&swflag_mutex); + ret_val = e1000_get_hw_semaphore_82573(hw); + if (ret_val) + mutex_unlock(&swflag_mutex); + return ret_val; +} + +/** + * e1000_put_hw_semaphore_82574 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + * + **/ +static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw) +{ + e1000_put_hw_semaphore_82573(hw); + mutex_unlock(&swflag_mutex); +} + +/** + * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. + * LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) +{ + u32 data = er32(POEMB); + + if (active) + data |= E1000_PHY_CTRL_D0A_LPLU; + else + data &= ~E1000_PHY_CTRL_D0A_LPLU; + + ew32(POEMB, data); + return 0; +} + +/** + * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * The low power link up (lplu) state is set to the power management level D3 + * when active is true, else clear lplu for D3. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) +{ + u32 data = er32(POEMB); + + if (!active) { + data &= ~E1000_PHY_CTRL_NOND0A_LPLU; + } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) || + (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_PHY_CTRL_NOND0A_LPLU; + } + + ew32(POEMB, data); + return 0; +} + +/** + * e1000_acquire_nvm_82571 - Request for access to the EEPROM + * @hw: pointer to the HW structure + * + * To gain access to the EEPROM, first we must obtain a hardware semaphore. + * Then for non-82573 hardware, set the EEPROM access request bit and wait + * for EEPROM access grant bit. If the access grant bit is not set, release + * hardware semaphore. + **/ +static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1000_get_hw_semaphore_82571(hw); + if (ret_val) + return ret_val; + + switch (hw->mac.type) { + case e1000_82573: + break; + default: + ret_val = e1000e_acquire_nvm(hw); + break; + } + + if (ret_val) + e1000_put_hw_semaphore_82571(hw); + + return ret_val; +} + +/** + * e1000_release_nvm_82571 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +static void e1000_release_nvm_82571(struct e1000_hw *hw) +{ + e1000e_release_nvm(hw); + e1000_put_hw_semaphore_82571(hw); +} + +/** + * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * For non-82573 silicon, write data to EEPROM at offset using SPI interface. + * + * If e1000e_update_nvm_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 ret_val; + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data); + break; + case e1000_82571: + case e1000_82572: + ret_val = e1000e_write_nvm_spi(hw, offset, words, data); + break; + default: + ret_val = -E1000_ERR_NVM; + break; + } + + return ret_val; +} + +/** + * e1000_update_nvm_checksum_82571 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) +{ + u32 eecd; + s32 ret_val; + u16 i; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + return ret_val; + + /* If our nvm is an EEPROM, then we're done + * otherwise, commit the checksum to the flash NVM. + */ + if (hw->nvm.type != e1000_nvm_flash_hw) + return 0; + + /* Check for pending operations. */ + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + usleep_range(1000, 2000); + if (!(er32(EECD) & E1000_EECD_FLUPD)) + break; + } + + if (i == E1000_FLASH_UPDATES) + return -E1000_ERR_NVM; + + /* Reset the firmware if using STM opcode. */ + if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) { + /* The enabling of and the actual reset must be done + * in two write cycles. + */ + ew32(HICR, E1000_HICR_FW_RESET_ENABLE); + e1e_flush(); + ew32(HICR, E1000_HICR_FW_RESET); + } + + /* Commit the write to flash */ + eecd = er32(EECD) | E1000_EECD_FLUPD; + ew32(EECD, eecd); + + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + usleep_range(1000, 2000); + if (!(er32(EECD) & E1000_EECD_FLUPD)) + break; + } + + if (i == E1000_FLASH_UPDATES) + return -E1000_ERR_NVM; + + return 0; +} + +/** + * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw) +{ + if (hw->nvm.type == e1000_nvm_flash_hw) + e1000_fix_nvm_checksum_82571(hw); + + return e1000e_validate_nvm_checksum_generic(hw); +} + +/** + * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * After checking for invalid values, poll the EEPROM to ensure the previous + * command has completed before trying to write the next word. After write + * poll for completion. + * + * If e1000e_update_nvm_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eewr = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eewr = ((data[i] << E1000_NVM_RW_REG_DATA) | + ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) | + E1000_NVM_RW_REG_START); + + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + + ew32(EEWR, eewr); + + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + } + + return ret_val; +} + +/** + * e1000_get_cfg_done_82571 - Poll for configuration done + * @hw: pointer to the HW structure + * + * Reads the management control register for the config done bit to be set. + **/ +static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + + while (timeout) { + if (er32(EEMNGCTL) & E1000_NVM_CFG_DONE_PORT_0) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) { + e_dbg("MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When activating LPLU + * this function also disables smart speed and vice versa. LPLU will not be + * activated unless the device autonegotiation advertisement meets standards + * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function + * pointer entry point only called by PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + return ret_val; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_reset_hw_82571 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +static s32 e1000_reset_hw_82571(struct e1000_hw *hw) +{ + u32 ctrl, ctrl_ext, eecd, tctl; + s32 ret_val; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + e_dbg("PCI-E Master disable polling has failed.\n"); + + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + ew32(RCTL, 0); + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + e1e_flush(); + + usleep_range(10000, 20000); + + /* Must acquire the MDIO ownership before MAC reset. + * Ownership defaults to firmware after a reset. + */ + switch (hw->mac.type) { + case e1000_82573: + ret_val = e1000_get_hw_semaphore_82573(hw); + break; + case e1000_82574: + case e1000_82583: + ret_val = e1000_get_hw_semaphore_82574(hw); + break; + default: + break; + } + + ctrl = er32(CTRL); + + e_dbg("Issuing a global reset to MAC\n"); + ew32(CTRL, ctrl | E1000_CTRL_RST); + + /* Must release MDIO ownership and mutex after MAC reset. */ + switch (hw->mac.type) { + case e1000_82573: + /* Release mutex only if the hw semaphore is acquired */ + if (!ret_val) + e1000_put_hw_semaphore_82573(hw); + break; + case e1000_82574: + case e1000_82583: + /* Release mutex only if the hw semaphore is acquired */ + if (!ret_val) + e1000_put_hw_semaphore_82574(hw); + break; + default: + break; + } + + if (hw->nvm.type == e1000_nvm_flash_hw) { + usleep_range(10, 20); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + } + + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + + /* Phy configuration from NVM just starts after EECD_AUTO_RD is set. + * Need to wait for Phy configuration completion before accessing + * NVM and Phy. + */ + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* REQ and GNT bits need to be cleared when using AUTO_RD + * to access the EEPROM. + */ + eecd = er32(EECD); + eecd &= ~(E1000_EECD_REQ | E1000_EECD_GNT); + ew32(EECD, eecd); + break; + case e1000_82573: + case e1000_82574: + case e1000_82583: + msleep(25); + break; + default: + break; + } + + /* Clear any pending interrupt events. */ + ew32(IMC, 0xffffffff); + er32(ICR); + + if (hw->mac.type == e1000_82571) { + /* Install any alternate MAC address into RAR0 */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + return ret_val; + + e1000e_set_laa_state_82571(hw, true); + } + + /* Reinitialize the 82571 serdes link state machine */ + if (hw->phy.media_type == e1000_media_type_internal_serdes) + hw->mac.serdes_link_state = e1000_serdes_link_down; + + return 0; +} + +/** + * e1000_init_hw_82571 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +static s32 e1000_init_hw_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + e1000_initialize_hw_bits_82571(hw); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ + if (ret_val) + e_dbg("Error initializing identification LED\n"); + + /* Disabling VLAN filtering */ + e_dbg("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address. + * If, however, a locally administered address was assigned to the + * 82571, we must reserve a RAR for it to work around an issue where + * resetting one port will reload the MAC on the other port. + */ + if (e1000e_get_laa_state_82571(hw)) + rar_count--; + e1000e_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + reg_data = er32(TXDCTL(0)); + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); + ew32(TXDCTL(0), reg_data); + + /* ...for both queues. */ + switch (mac->type) { + case e1000_82573: + e1000e_enable_tx_pkt_filtering(hw); + /* fall through */ + case e1000_82574: + case e1000_82583: + reg_data = er32(GCR); + reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; + ew32(GCR, reg_data); + break; + default: + reg_data = er32(TXDCTL(1)); + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | + E1000_TXDCTL_COUNT_DESC); + ew32(TXDCTL(1), reg_data); + break; + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82571(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) +{ + u32 reg; + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL(0)); + reg |= (1 << 22); + ew32(TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL(1)); + reg |= (1 << 22); + ew32(TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC(0)); + reg &= ~(0xF << 27); /* 30:27 */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26); + break; + case e1000_82574: + case e1000_82583: + reg |= (1 << 26); + break; + default: + break; + } + ew32(TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC(1)); + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg &= ~((1 << 29) | (1 << 30)); + reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + ew32(TARC(1), reg); + break; + default: + break; + } + + /* Device Control */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + reg = er32(CTRL); + reg &= ~(1 << 29); + ew32(CTRL, reg); + break; + default: + break; + } + + /* Extended Device Control */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + reg = er32(CTRL_EXT); + reg &= ~(1 << 23); + reg |= (1 << 22); + ew32(CTRL_EXT, reg); + break; + default: + break; + } + + if (hw->mac.type == e1000_82571) { + reg = er32(PBA_ECC); + reg |= E1000_PBA_ECC_CORR_EN; + ew32(PBA_ECC, reg); + } + + /* Workaround for hardware errata. + * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572 + */ + if ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572)) { + reg = er32(CTRL_EXT); + reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN; + ew32(CTRL_EXT, reg); + } + + /* Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + if (hw->mac.type <= e1000_82573) { + reg = er32(RFCTL); + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + ew32(RFCTL, reg); + } + + /* PCI-Ex Control Registers */ + switch (hw->mac.type) { + case e1000_82574: + case e1000_82583: + reg = er32(GCR); + reg |= (1 << 22); + ew32(GCR, reg); + + /* Workaround for hardware errata. + * apply workaround for hardware errata documented in errata + * docs Fixes issue where some error prone or unreliable PCIe + * completions are occurring, particularly with ASPM enabled. + * Without fix, issue can cause Tx timeouts. + */ + reg = er32(GCR2); + reg |= 1; + ew32(GCR2, reg); + break; + default: + break; + } +} + +/** + * e1000_clear_vfta_82571 - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +static void e1000_clear_vfta_82571(struct e1000_hw *hw) +{ + u32 offset; + u32 vfta_value = 0; + u32 vfta_offset = 0; + u32 vfta_bit_in_reg = 0; + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (hw->mng_cookie.vlan_id != 0) { + /* The VFTA is a 4096b bit-field, each identifying + * a single VLAN ID. The following operations + * determine which 32b entry (i.e. offset) into the + * array we want to set the VLAN ID (i.e. bit) of + * the manageability unit. + */ + vfta_offset = (hw->mng_cookie.vlan_id >> + E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK; + vfta_bit_in_reg = + 1 << (hw->mng_cookie.vlan_id & + E1000_VFTA_ENTRY_BIT_SHIFT_MASK); + } + break; + default: + break; + } + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + /* If the offset we want to clear is the same offset of the + * manageability VLAN ID, then clear all bits except that of + * the manageability unit. + */ + vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value); + e1e_flush(); + } +} + +/** + * e1000_check_mng_mode_82574 - Check manageability is enabled + * @hw: pointer to the HW structure + * + * Reads the NVM Initialization Control Word 2 and returns true + * (>0) if any manageability is enabled, else false (0). + **/ +static bool e1000_check_mng_mode_82574(struct e1000_hw *hw) +{ + u16 data; + + e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); + return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0; +} + +/** + * e1000_led_on_82574 - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +static s32 e1000_led_on_82574(struct e1000_hw *hw) +{ + u32 ctrl; + u32 i; + + ctrl = hw->mac.ledctl_mode2; + if (!(E1000_STATUS_LU & er32(STATUS))) { + /* If no link, then turn LED on by setting the invert bit + * for each LED that's "on" (0x0E) in ledctl_mode2. + */ + for (i = 0; i < 4; i++) + if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == + E1000_LEDCTL_MODE_LED_ON) + ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8)); + } + ew32(LEDCTL, ctrl); + + return 0; +} + +/** + * e1000_check_phy_82574 - check 82574 phy hung state + * @hw: pointer to the HW structure + * + * Returns whether phy is hung or not + **/ +bool e1000_check_phy_82574(struct e1000_hw *hw) +{ + u16 status_1kbt = 0; + u16 receive_errors = 0; + s32 ret_val; + + /* Read PHY Receive Error counter first, if its is max - all F's then + * read the Base1000T status register If both are max then PHY is hung. + */ + ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors); + if (ret_val) + return false; + if (receive_errors == E1000_RECEIVE_ERROR_MAX) { + ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt); + if (ret_val) + return false; + if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) == + E1000_IDLE_ERROR_COUNT_MASK) + return true; + } + + return false; +} + +/** + * e1000_setup_link_82571 - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +static s32 e1000_setup_link_82571(struct e1000_hw *hw) +{ + /* 82573 does not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (hw->fc.requested_mode == e1000_fc_default) + hw->fc.requested_mode = e1000_fc_full; + break; + default: + break; + } + + return e1000e_setup_link_generic(hw); +} + +/** + * e1000_setup_copper_link_82571 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + switch (hw->phy.type) { + case e1000_phy_m88: + case e1000_phy_bm: + ret_val = e1000e_copper_link_setup_m88(hw); + break; + case e1000_phy_igp_2: + ret_val = e1000e_copper_link_setup_igp(hw); + break; + default: + return -E1000_ERR_PHY; + } + + if (ret_val) + return ret_val; + + return e1000e_setup_copper_link(hw); +} + +/** + * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes links. + * Upon successful setup, poll for link. + **/ +static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) +{ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* If SerDes loopback mode is entered, there is no form + * of reset to take the adapter out of that mode. So we + * have to explicitly take the adapter out of loopback + * mode. This prevents drivers from twiddling their thumbs + * if another tool failed to take it out of loopback mode. + */ + ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + break; + default: + break; + } + + return e1000e_setup_fiber_serdes_link(hw); +} + +/** + * e1000_check_for_serdes_link_82571 - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Reports the link state as up or down. + * + * If autonegotiation is supported by the link partner, the link state is + * determined by the result of autonegotiation. This is the most likely case. + * If autonegotiation is not supported by the link partner, and the link + * has a valid signal, force the link up. + * + * The link state is represented internally here by 4 states: + * + * 1) down + * 2) autoneg_progress + * 3) autoneg_complete (the link successfully autonegotiated) + * 4) forced_up (the link has been forced up, it did not autonegotiate) + * + **/ +static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + u32 txcw; + u32 i; + s32 ret_val = 0; + + ctrl = er32(CTRL); + status = er32(STATUS); + er32(RXCW); + /* SYNCH bit and IV bit are sticky */ + usleep_range(10, 20); + rxcw = er32(RXCW); + + if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { + /* Receiver is synchronized with no invalid bits. */ + switch (mac->serdes_link_state) { + case e1000_serdes_link_autoneg_complete: + if (!(status & E1000_STATUS_LU)) { + /* We have lost link, retry autoneg before + * reporting link failure + */ + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("AN_UP -> AN_PROG\n"); + } else { + mac->serdes_has_link = true; + } + break; + + case e1000_serdes_link_forced_up: + /* If we are receiving /C/ ordered sets, re-enable + * auto-negotiation in the TXCW register and disable + * forced link in the Device Control register in an + * attempt to auto-negotiate with our link partner. + */ + if (rxcw & E1000_RXCW_C) { + /* Enable autoneg, and unforce link up */ + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("FORCED_UP -> AN_PROG\n"); + } else { + mac->serdes_has_link = true; + } + break; + + case e1000_serdes_link_autoneg_progress: + if (rxcw & E1000_RXCW_C) { + /* We received /C/ ordered sets, meaning the + * link partner has autonegotiated, and we can + * trust the Link Up (LU) status bit. + */ + if (status & E1000_STATUS_LU) { + mac->serdes_link_state = + e1000_serdes_link_autoneg_complete; + e_dbg("AN_PROG -> AN_UP\n"); + mac->serdes_has_link = true; + } else { + /* Autoneg completed, but failed. */ + mac->serdes_link_state = + e1000_serdes_link_down; + e_dbg("AN_PROG -> DOWN\n"); + } + } else { + /* The link partner did not autoneg. + * Force link up and full duplex, and change + * state to forced. + */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error config flow control\n"); + break; + } + mac->serdes_link_state = + e1000_serdes_link_forced_up; + mac->serdes_has_link = true; + e_dbg("AN_PROG -> FORCED_UP\n"); + } + break; + + case e1000_serdes_link_down: + default: + /* The link was down but the receiver has now gained + * valid sync, so lets see if we can bring the link + * up. + */ + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("DOWN -> AN_PROG\n"); + break; + } + } else { + if (!(rxcw & E1000_RXCW_SYNCH)) { + mac->serdes_has_link = false; + mac->serdes_link_state = e1000_serdes_link_down; + e_dbg("ANYSTATE -> DOWN\n"); + } else { + /* Check several times, if SYNCH bit and CONFIG + * bit both are consistently 1 then simply ignore + * the IV bit and restart Autoneg + */ + for (i = 0; i < AN_RETRY_COUNT; i++) { + usleep_range(10, 20); + rxcw = er32(RXCW); + if ((rxcw & E1000_RXCW_SYNCH) && + (rxcw & E1000_RXCW_C)) + continue; + + if (rxcw & E1000_RXCW_IV) { + mac->serdes_has_link = false; + mac->serdes_link_state = + e1000_serdes_link_down; + e_dbg("ANYSTATE -> DOWN\n"); + break; + } + } + + if (i == AN_RETRY_COUNT) { + txcw = er32(TXCW); + txcw |= E1000_TXCW_ANE; + ew32(TXCW, txcw); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("ANYSTATE -> AN_PROG\n"); + } + } + } + + return ret_val; +} + +/** + * e1000_valid_led_default_82571 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (*data == ID_LED_RESERVED_F746) + *data = ID_LED_DEFAULT_82573; + break; + default: + if (*data == ID_LED_RESERVED_0000 || + *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + break; + } + + return 0; +} + +/** + * e1000e_get_laa_state_82571 - Get locally administered address state + * @hw: pointer to the HW structure + * + * Retrieve and return the current locally administered address state. + **/ +bool e1000e_get_laa_state_82571(struct e1000_hw *hw) +{ + if (hw->mac.type != e1000_82571) + return false; + + return hw->dev_spec.e82571.laa_is_present; +} + +/** + * e1000e_set_laa_state_82571 - Set locally administered address state + * @hw: pointer to the HW structure + * @state: enable/disable locally administered address + * + * Enable/Disable the current locally administered address state. + **/ +void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) +{ + if (hw->mac.type != e1000_82571) + return; + + hw->dev_spec.e82571.laa_is_present = state; + + /* If workaround is activated... */ + if (state) + /* Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed, the actual LAA is in one of the RARs and no + * incoming packets directed to this port are dropped. + * Eventually the LAA will be in RAR[0] and RAR[14]. + */ + hw->mac.ops.rar_set(hw, hw->mac.addr, + hw->mac.rar_entry_count - 1); +} + +/** + * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum + * @hw: pointer to the HW structure + * + * Verifies that the EEPROM has completed the update. After updating the + * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If + * the checksum fix is not implemented, we need to set the bit and update + * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect, + * we need to return bad checksum. + **/ +static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u16 data; + + if (nvm->type != e1000_nvm_flash_hw) + return 0; + + /* Check bit 4 of word 10h. If it is 0, firmware is done updating + * 10h-12h. Checksum may need to be fixed. + */ + ret_val = e1000_read_nvm(hw, 0x10, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & 0x10)) { + /* Read 0x23 and check bit 15. This bit is a 1 + * when the checksum has already been fixed. If + * the checksum is still wrong and this bit is a + * 1, we need to return bad checksum. Otherwise, + * we need to set this bit to a 1 and update the + * checksum. + */ + ret_val = e1000_read_nvm(hw, 0x23, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & 0x8000)) { + data |= 0x8000; + ret_val = e1000_write_nvm(hw, 0x23, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_read_mac_addr_82571 - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) +{ + if (hw->mac.type == e1000_82571) { + s32 ret_val; + + /* If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + return ret_val; + } + + return e1000_read_mac_addr_generic(hw); +} + +/** + * e1000_power_down_phy_copper_82571 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_mac_info *mac = &hw->mac; + + if (!phy->ops.check_reset_block) + return; + + /* If the management interface is not enabled, then power down */ + if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); +} + +/** + * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) +{ + e1000e_clear_hw_cntrs_base(hw); + + er32(PRC64); + er32(PRC127); + er32(PRC255); + er32(PRC511); + er32(PRC1023); + er32(PRC1522); + er32(PTC64); + er32(PTC127); + er32(PTC255); + er32(PTC511); + er32(PTC1023); + er32(PTC1522); + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); + + er32(IAC); + er32(ICRXOC); + + er32(ICRXPTC); + er32(ICRXATC); + er32(ICTXPTC); + er32(ICTXATC); + er32(ICTXQEC); + er32(ICTXQMTC); + er32(ICRXDMTC); +} + +static const struct e1000_mac_operations e82571_mac_ops = { + /* .check_mng_mode: mac type dependent */ + /* .check_for_link: media type dependent */ + .id_led_init = e1000e_id_led_init_generic, + .cleanup_led = e1000e_cleanup_led_generic, + .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, + .get_bus_info = e1000e_get_bus_info_pcie, + .set_lan_id = e1000_set_lan_id_multi_port_pcie, + /* .get_link_up_info: media type dependent */ + /* .led_on: mac type dependent */ + .led_off = e1000e_led_off_generic, + .update_mc_addr_list = e1000e_update_mc_addr_list_generic, + .write_vfta = e1000_write_vfta_generic, + .clear_vfta = e1000_clear_vfta_82571, + .reset_hw = e1000_reset_hw_82571, + .init_hw = e1000_init_hw_82571, + .setup_link = e1000_setup_link_82571, + /* .setup_physical_interface: media type dependent */ + .setup_led = e1000e_setup_led_generic, + .config_collision_dist = e1000e_config_collision_dist_generic, + .read_mac_addr = e1000_read_mac_addr_82571, + .rar_set = e1000e_rar_set_generic, + .rar_get_count = e1000e_rar_get_count_generic, +}; + +static const struct e1000_phy_operations e82_phy_ops_igp = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_igp, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = NULL, + .force_speed_duplex = e1000e_phy_force_speed_duplex_igp, + .get_cfg_done = e1000_get_cfg_done_82571, + .get_cable_length = e1000e_get_cable_length_igp_2, + .get_info = e1000e_get_phy_info_igp, + .read_reg = e1000e_read_phy_reg_igp, + .release = e1000_put_hw_semaphore_82571, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000e_write_phy_reg_igp, + .cfg_on_link_up = NULL, +}; + +static const struct e1000_phy_operations e82_phy_ops_m88 = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_m88, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = e1000e_phy_sw_reset, + .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, + .get_cfg_done = e1000e_get_cfg_done_generic, + .get_cable_length = e1000e_get_cable_length_m88, + .get_info = e1000e_get_phy_info_m88, + .read_reg = e1000e_read_phy_reg_m88, + .release = e1000_put_hw_semaphore_82571, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000e_write_phy_reg_m88, + .cfg_on_link_up = NULL, +}; + +static const struct e1000_phy_operations e82_phy_ops_bm = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_m88, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = e1000e_phy_sw_reset, + .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, + .get_cfg_done = e1000e_get_cfg_done_generic, + .get_cable_length = e1000e_get_cable_length_m88, + .get_info = e1000e_get_phy_info_m88, + .read_reg = e1000e_read_phy_reg_bm2, + .release = e1000_put_hw_semaphore_82571, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000e_write_phy_reg_bm2, + .cfg_on_link_up = NULL, +}; + +static const struct e1000_nvm_operations e82571_nvm_ops = { + .acquire = e1000_acquire_nvm_82571, + .read = e1000e_read_nvm_eerd, + .release = e1000_release_nvm_82571, + .reload = e1000e_reload_nvm_generic, + .update = e1000_update_nvm_checksum_82571, + .valid_led_default = e1000_valid_led_default_82571, + .validate = e1000_validate_nvm_checksum_82571, + .write = e1000_write_nvm_82571, +}; + +const struct e1000_info e1000_82571_info = { + .mac = e1000_82571, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_RESET_OVERWRITES_LAA /* errata */ + | FLAG_TARC_SPEED_MODE_BIT /* errata */ + | FLAG_APME_CHECK_PORT_B, + .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */ + | FLAG2_DMA_BURST, + .pba = 38, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_igp, + .nvm_ops = &e82571_nvm_ops, +}; + +const struct e1000_info e1000_82572_info = { + .mac = e1000_82572, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_TARC_SPEED_MODE_BIT, /* errata */ + .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */ + | FLAG2_DMA_BURST, + .pba = 38, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_igp, + .nvm_ops = &e82571_nvm_ops, +}; + +const struct e1000_info e1000_82573_info = { + .mac = e1000_82573, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_HAS_AMT + | FLAG_HAS_SWSM_ON_LOAD, + .flags2 = FLAG2_DISABLE_ASPM_L1 + | FLAG2_DISABLE_ASPM_L0S, + .pba = 20, + .max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_m88, + .nvm_ops = &e82571_nvm_ops, +}; + +const struct e1000_info e1000_82574_info = { + .mac = e1000_82574, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_MSIX + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_HAS_AMT + | FLAG_HAS_CTRLEXT_ON_LOAD, + .flags2 = FLAG2_CHECK_PHY_HANG + | FLAG2_DISABLE_ASPM_L0S + | FLAG2_DISABLE_ASPM_L1 + | FLAG2_NO_DISABLE_RX + | FLAG2_DMA_BURST, + .pba = 32, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_bm, + .nvm_ops = &e82571_nvm_ops, +}; + +const struct e1000_info e1000_82583_info = { + .mac = e1000_82583, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_HAS_AMT + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_CTRLEXT_ON_LOAD, + .flags2 = FLAG2_DISABLE_ASPM_L0S + | FLAG2_DISABLE_ASPM_L1 + | FLAG2_NO_DISABLE_RX, + .pba = 32, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_bm, + .nvm_ops = &e82571_nvm_ops, +}; diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h new file mode 100644 index 000000000..2e758f796 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/82571.h @@ -0,0 +1,53 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000E_82571_H_ +#define _E1000E_82571_H_ + +#define ID_LED_RESERVED_F746 0xF746 +#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */ + +/* Intr Throttling - RW */ +#define E1000_EITR_82574(_n) (0x000E8 + (0x4 * (_n))) + +#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAC_MASK_82574 0x01F00000 + +#define E1000_IVAR_INT_ALLOC_VALID 0x8 + +/* Manageability Operation Mode mask */ +#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 + +#define E1000_BASE1000T_STATUS 10 +#define E1000_IDLE_ERROR_COUNT_MASK 0xFF +#define E1000_RECEIVE_ERROR_COUNTER 21 +#define E1000_RECEIVE_ERROR_MAX 0xFFFF +bool e1000_check_phy_82574(struct e1000_hw *hw); +bool e1000e_get_laa_state_82571(struct e1000_hw *hw); +void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state); + +#endif diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile new file mode 100644 index 000000000..106de4933 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/Makefile @@ -0,0 +1,37 @@ +################################################################################ +# +# Intel PRO/1000 Linux driver +# Copyright(c) 1999 - 2014 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# Linux NICS +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) PRO/1000 ethernet driver +# + +obj-$(CONFIG_E1000E) += e1000e.o + +e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \ + mac.o manage.o nvm.o phy.o \ + param.o ethtool.o netdev.o ptp.o + diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h new file mode 100644 index 000000000..0570c668e --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -0,0 +1,800 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC E1000_WUFC_LNKC +#define E1000_WUS_MAG E1000_WUFC_MAG +#define E1000_WUS_EX E1000_WUFC_EX +#define E1000_WUS_MC E1000_WUFC_MC +#define E1000_WUS_BC E1000_WUFC_BC + +/* Extended Device Control */ +#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */ +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */ +#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_LSECCK 0x00001000 +#define E1000_CTRL_EXT_PHYPDEN 0x00100000 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */ +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +/* Enable MAC address filtering */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 +/* Enable MNG packets to host memory */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 + +#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */ +#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */ +#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */ +#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */ + +/* Receive Control */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */ +#define E1000_RCTL_RDMTS_HEX 0x00010000 +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x1 +#define E1000_SWFW_PHY0_SM 0x2 +#define E1000_SWFW_PHY1_SM 0x4 +#define E1000_SWFW_CSR_SM 0x8 + +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */ +#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */ +#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ + +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 + +#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ +#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master Req status */ + +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_PHY_LED0_MODE_MASK 0x00000007 +#define E1000_PHY_LED0_IVRT 0x00000008 +#define E1000_PHY_LED0_MASK 0x0000001F + +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 + +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ +#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + +/* Transmit Control */ +#define E1000_TCTL_EN 0x00000002 /* enable Tx */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 +#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Header split receive */ +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF + +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82543_TIPG_IPGR2 6 +#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 +#define E1000_TIPG_IPGR2_SHIFT 20 + +#define MAX_JUMBO_FRAME_SIZE 0x3F00 +#define E1000_TX_PTR_GAP 0x1F + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 +#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 + +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +/* Low Power IDLE Control */ +#define E1000_LPIC_LPIET_SHIFT 24 /* Low Power Idle Entry Time */ + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB */ +#define E1000_PBA_16K 0x0010 /* 16KB */ + +#define E1000_PBA_RXA_MASK 0xFFFF + +#define E1000_PBS_16K E1000_PBA_16K + +/* Uncorrectable/correctable ECC Error counts and enable bits */ +#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF +#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00 +#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8 +#define E1000_PBECCSTS_ECC_ENABLE 0x00010000 + +#define IFS_MAX 80 +#define IFS_MIN 40 +#define IFS_RATIO 4 +#define IFS_STEP 10 +#define MIN_NUM_XMITS 1000 + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ +#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ +#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ +/* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_INT_ASSERTED 0x80000000 +#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ +#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ +#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ +#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ +#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ + +/* PBA ECC Register */ +#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */ +#define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */ +#define E1000_PBA_ECC_CORR_EN 0x00000001 /* ECC correction enable */ +#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */ +#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 for ECC */ + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ +#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ +#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ +#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ +#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */ +#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */ +#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */ + +/* Interrupt Cause Set */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ +/* Enable the counting of desc. still to be processed. */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* 802.1q VLAN Packet Size */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAL_MAC_ADDR_LEN 4 +#define E1000_RAH_MAC_ADDR_LEN 2 + +/* Error Codes */ +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 +#define E1000_ERR_INVALID_ARGUMENT 16 +#define E1000_ERR_NO_SPACE 17 +#define E1000_ERR_NVM_PBA_SECTION 18 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define FIBER_LINK_UP_LIMIT 50 +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +#define MDIO_OWNERSHIP_TIMEOUT 10 +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ + +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ + +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ +#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ + +#define E1000_RXMTRL_PTP_V1_SYNC_MESSAGE 0x00000000 +#define E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE 0x00010000 + +#define E1000_RXMTRL_PTP_V2_SYNC_MESSAGE 0x00000000 +#define E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE 0x01000000 + +#define E1000_TIMINCA_INCPERIOD_SHIFT 24 +#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF + +/* PCI Express Control */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ +/* NVM Addressing bits based on type (0-small, 1-large) */ +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) + +#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM r/w regs */ +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_WRITE 1 /* Flag for polling write complete */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling read complete */ +#define E1000_FLASH_UPDATES 2000 + +/* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 +#define NVM_FUTURE_INIT_WORD1 0x0019 +#define NVM_COMPAT_VALID_CSUM 0x0001 +#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040 + +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_3GIO_3 0x001A +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_CFG 0x0012 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F + +#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */ + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_PAUSE 0x1000 +#define NVM_WORD0F_ASM_DIR 0x2000 + +/* Mask bits for fields in Word 0x1a of the NVM */ +#define NVM_WORD1A_ASPM_MASK 0x000C + +/* Mask bits for fields in Word 0x03 of the EEPROM */ +#define NVM_COMPAT_LOM 0x0800 + +/* length of string needed to store PBA number */ +#define E1000_PBANUM_LENGTH 11 + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +/* PBA (printed board assembly) number words */ +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCI_HEADER_TYPE_REGISTER 0x0E +#define PCIE_LINK_STATUS 0x12 + +#define PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define PCIE_LINK_WIDTH_MASK 0x3F0 +#define PCIE_LINK_WIDTH_SHIFT 4 + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. + * I = Integrated + * E = External + */ +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define GG82563_E_PHY_ID 0x01410CA0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 +#define BME1000_E_PHY_ID 0x01410CB0 +#define BME1000_E_PHY_ID_R2 0x01410CB1 +#define I82577_E_PHY_ID 0x01540050 +#define I82578_E_PHY_ID 0x004DD040 +#define I82579_E_PHY_ID 0x01540090 +#define I217_E_PHY_ID 0x015400A0 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ + +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +/* Auto crossover enabled all speeds */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* 0=<50M; 1=50-80M; 2=80-110M; 3=110-140M; 4=>140M */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 + +#define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020 +#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C + +/* BME1000 PHY Specific Control Register */ +#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */ + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define GG82563_PAGE_SHIFT 5 +#define GG82563_REG(page, reg) \ + (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) +#define GG82563_MIN_ALT_REG 30 + +/* GG82563 Specific Registers */ +#define GG82563_PHY_SPEC_CTRL \ + GG82563_REG(0, 16) /* PHY Specific Control */ +#define GG82563_PHY_PAGE_SELECT \ + GG82563_REG(0, 22) /* Page Select */ +#define GG82563_PHY_SPEC_CTRL_2 \ + GG82563_REG(0, 26) /* PHY Specific Control 2 */ +#define GG82563_PHY_PAGE_SELECT_ALT \ + GG82563_REG(0, 29) /* Alternate Page Select */ + +#define GG82563_PHY_MAC_SPEC_CTRL \ + GG82563_REG(2, 21) /* MAC Specific Control Register */ + +#define GG82563_PHY_DSP_DISTANCE \ + GG82563_REG(5, 26) /* DSP Distance */ + +/* Page 193 - Port Control Registers */ +#define GG82563_PHY_KMRN_MODE_CTRL \ + GG82563_REG(193, 16) /* Kumeran Mode Control */ +#define GG82563_PHY_PWR_MGMT_CTRL \ + GG82563_REG(193, 20) /* Power Management Control */ + +/* Page 194 - KMRN Registers */ +#define GG82563_PHY_INBAND_CTRL \ + GG82563_REG(194, 18) /* Inband Control */ + +/* MDI Control */ +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_ERROR 0x40000000 + +/* SerDes Control */ +#define E1000_GEN_POLL_TIMEOUT 640 + +#endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h new file mode 100644 index 000000000..0abc942c9 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -0,0 +1,595 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hw.h" + +struct e1000_info; + +#define e_dbg(format, arg...) \ + netdev_dbg(hw->adapter->netdev, format, ## arg) +#define e_err(format, arg...) \ + netdev_err(adapter->netdev, format, ## arg) +#define e_info(format, arg...) \ + netdev_info(adapter->netdev, format, ## arg) +#define e_warn(format, arg...) \ + netdev_warn(adapter->netdev, format, ## arg) +#define e_notice(format, arg...) \ + netdev_notice(adapter->netdev, format, ## arg) + +/* Interrupt modes, as used by the IntMode parameter */ +#define E1000E_INT_MODE_LEGACY 0 +#define E1000E_INT_MODE_MSI 1 +#define E1000E_INT_MODE_MSIX 2 + +/* Tx/Rx descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 4096 +#define E1000_MIN_TXD 64 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 4096 +#define E1000_MIN_RXD 64 + +#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_APME 0x0400 + +#define E1000_MNG_VLAN_NONE (-1) + +#define DEFAULT_JUMBO 9234 + +/* Time to wait before putting the device into D3 if there's no link (in ms). */ +#define LINK_TIMEOUT 100 + +/* Count for polling __E1000_RESET condition every 10-20msec. + * Experimentation has shown the reset can take approximately 210msec. + */ +#define E1000_CHECK_RESET_COUNT 25 + +#define DEFAULT_RDTR 0 +#define DEFAULT_RADV 8 +#define BURST_RDTR 0x20 +#define BURST_RADV 0x20 + +/* in the case of WTHRESH, it appears at least the 82571/2 hardware + * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when + * WTHRESH=4, so a setting of 5 gives the most efficient bus + * utilization but to avoid possible Tx stalls, set it to 1 + */ +#define E1000_TXDCTL_DMA_BURST_ENABLE \ + (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \ + E1000_TXDCTL_COUNT_DESC | \ + (1 << 16) | /* wthresh must be +1 more than desired */\ + (1 << 8) | /* hthresh */ \ + 0x1f) /* pthresh */ + +#define E1000_RXDCTL_DMA_BURST_ENABLE \ + (0x01000000 | /* set descriptor granularity */ \ + (4 << 16) | /* set writeback threshold */ \ + (4 << 8) | /* set prefetch threshold */ \ + 0x20) /* set hthresh */ + +#define E1000_TIDV_FPD (1 << 31) +#define E1000_RDTR_FPD (1 << 31) + +enum e1000_boards { + board_82571, + board_82572, + board_82573, + board_82574, + board_82583, + board_80003es2lan, + board_ich8lan, + board_ich9lan, + board_ich10lan, + board_pchlan, + board_pch2lan, + board_pch_lpt, + board_pch_spt +}; + +struct e1000_ps_page { + struct page *page; + u64 dma; /* must be u64 - written to hw */ +}; + +/* wrappers around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct e1000_buffer { + dma_addr_t dma; + struct sk_buff *skb; + union { + /* Tx */ + struct { + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + unsigned int segs; + unsigned int bytecount; + u16 mapped_as_page; + }; + /* Rx */ + struct { + /* arrays of page information for packet split */ + struct e1000_ps_page *ps_pages; + struct page *page; + }; + }; +}; + +struct e1000_ring { + struct e1000_adapter *adapter; /* back pointer to adapter */ + void *desc; /* pointer to ring memory */ + dma_addr_t dma; /* phys address of ring */ + unsigned int size; /* length of ring in bytes */ + unsigned int count; /* number of desc. in ring */ + + u16 next_to_use; + u16 next_to_clean; + + void __iomem *head; + void __iomem *tail; + + /* array of buffer information structs */ + struct e1000_buffer *buffer_info; + + char name[IFNAMSIZ + 5]; + u32 ims_val; + u32 itr_val; + void __iomem *itr_register; + int set_itr; + + struct sk_buff *rx_skb_top; +}; + +/* PHY register snapshot values */ +struct e1000_phy_regs { + u16 bmcr; /* basic mode control register */ + u16 bmsr; /* basic mode status register */ + u16 advertise; /* auto-negotiation advertisement */ + u16 lpa; /* link partner ability register */ + u16 expansion; /* auto-negotiation expansion reg */ + u16 ctrl1000; /* 1000BASE-T control register */ + u16 stat1000; /* 1000BASE-T status register */ + u16 estatus; /* extended status register */ +}; + +/* board specific private data structure */ +struct e1000_adapter { + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + struct timer_list blink_timer; + + struct work_struct reset_task; + struct work_struct watchdog_task; + + const struct e1000_info *ei; + + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u32 bd_number; + u32 rx_buffer_len; + u16 mng_vlan_id; + u16 link_speed; + u16 link_duplex; + u16 eeprom_vers; + + /* track device up/down/testing state */ + unsigned long state; + + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + /* Tx - one ring per active queue */ + struct e1000_ring *tx_ring ____cacheline_aligned_in_smp; + u32 tx_fifo_limit; + + struct napi_struct napi; + + unsigned int uncorr_errors; /* uncorrectable ECC errors */ + unsigned int corr_errors; /* correctable ECC errors */ + unsigned int restart_queue; + u32 txd_cmd; + + bool detect_tx_hung; + bool tx_hang_recheck; + u8 tx_timeout_factor; + + u32 tx_int_delay; + u32 tx_abs_int_delay; + + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + + /* Tx stats */ + u64 tpt_old; + u64 colc_old; + u32 gotc; + u64 gotc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u32 tx_dma_failed; + u32 tx_hwtstamp_timeouts; + + /* Rx */ + bool (*clean_rx)(struct e1000_ring *ring, int *work_done, + int work_to_do) ____cacheline_aligned_in_smp; + void (*alloc_rx_buf)(struct e1000_ring *ring, int cleaned_count, + gfp_t gfp); + struct e1000_ring *rx_ring; + + u32 rx_int_delay; + u32 rx_abs_int_delay; + + /* Rx stats */ + u64 hw_csum_err; + u64 hw_csum_good; + u64 rx_hdr_split; + u32 gorc; + u64 gorc_old; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + u32 rx_hwtstamp_cleared; + + unsigned int rx_ps_pages; + u16 rx_ps_bsize0; + u32 max_frame_size; + u32 min_frame_size; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + + spinlock_t stats64_lock; /* protects statistics counters */ + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + /* Snapshot of PHY registers */ + struct e1000_phy_regs phy_regs; + + struct e1000_ring test_tx_ring; + struct e1000_ring test_rx_ring; + u32 test_icr; + + u32 msg_enable; + unsigned int num_vectors; + struct msix_entry *msix_entries; + int int_mode; + u32 eiac_mask; + + u32 eeprom_wol; + u32 wol; + u32 pba; + u32 max_hw_frame_size; + + bool fc_autoneg; + + unsigned int flags; + unsigned int flags2; + struct work_struct downshift_task; + struct work_struct update_phy_task; + struct work_struct print_hang_task; + + int phy_hang_count; + + u16 tx_ring_count; + u16 rx_ring_count; + + struct hwtstamp_config hwtstamp_config; + struct delayed_work systim_overflow_work; + struct sk_buff *tx_hwtstamp_skb; + unsigned long tx_hwtstamp_start; + struct work_struct tx_hwtstamp_work; + spinlock_t systim_lock; /* protects SYSTIML/H regsters */ + struct cyclecounter cc; + struct timecounter tc; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct pm_qos_request pm_qos_req; + + u16 eee_advert; +}; + +struct e1000_info { + enum e1000_mac_type mac; + unsigned int flags; + unsigned int flags2; + u32 pba; + u32 max_hw_frame_size; + s32 (*get_variants)(struct e1000_adapter *); + const struct e1000_mac_operations *mac_ops; + const struct e1000_phy_operations *phy_ops; + const struct e1000_nvm_operations *nvm_ops; +}; + +s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); + +/* The system time is maintained by a 64-bit counter comprised of the 32-bit + * SYSTIMH and SYSTIML registers. How the counter increments (and therefore + * its resolution) is based on the contents of the TIMINCA register - it + * increments every incperiod (bits 31:24) clock ticks by incvalue (bits 23:0). + * For the best accuracy, the incperiod should be as small as possible. The + * incvalue is scaled by a factor as large as possible (while still fitting + * in bits 23:0) so that relatively small clock corrections can be made. + * + * As a result, a shift of INCVALUE_SHIFT_n is used to fit a value of + * INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n) + * bits to count nanoseconds leaving the rest for fractional nonseconds. + */ +#define INCVALUE_96MHz 125 +#define INCVALUE_SHIFT_96MHz 17 +#define INCPERIOD_SHIFT_96MHz 2 +#define INCPERIOD_96MHz (12 >> INCPERIOD_SHIFT_96MHz) + +#define INCVALUE_25MHz 40 +#define INCVALUE_SHIFT_25MHz 18 +#define INCPERIOD_25MHz 1 + +/* Another drawback of scaling the incvalue by a large factor is the + * 64-bit SYSTIM register overflows more quickly. This is dealt with + * by simply reading the clock before it overflows. + * + * Clock ns bits Overflows after + * ~~~~~~ ~~~~~~~ ~~~~~~~~~~~~~~~ + * 96MHz 47-bit 2^(47-INCPERIOD_SHIFT_96MHz) / 10^9 / 3600 = 9.77 hrs + * 25MHz 46-bit 2^46 / 10^9 / 3600 = 19.55 hours + */ +#define E1000_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4) +#define E1000_MAX_82574_SYSTIM_REREADS 50 +#define E1000_82574_SYSTIM_EPSILON (1ULL << 35ULL) + +/* hardware capability, feature, and workaround flags */ +#define FLAG_HAS_AMT (1 << 0) +#define FLAG_HAS_FLASH (1 << 1) +#define FLAG_HAS_HW_VLAN_FILTER (1 << 2) +#define FLAG_HAS_WOL (1 << 3) +/* reserved bit4 */ +#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) +#define FLAG_HAS_SWSM_ON_LOAD (1 << 6) +#define FLAG_HAS_JUMBO_FRAMES (1 << 7) +#define FLAG_READ_ONLY_NVM (1 << 8) +#define FLAG_IS_ICH (1 << 9) +#define FLAG_HAS_MSIX (1 << 10) +#define FLAG_HAS_SMART_POWER_DOWN (1 << 11) +#define FLAG_IS_QUAD_PORT_A (1 << 12) +#define FLAG_IS_QUAD_PORT (1 << 13) +#define FLAG_HAS_HW_TIMESTAMP (1 << 14) +#define FLAG_APME_IN_WUC (1 << 15) +#define FLAG_APME_IN_CTRL3 (1 << 16) +#define FLAG_APME_CHECK_PORT_B (1 << 17) +#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18) +#define FLAG_NO_WAKE_UCAST (1 << 19) +#define FLAG_MNG_PT_ENABLED (1 << 20) +#define FLAG_RESET_OVERWRITES_LAA (1 << 21) +#define FLAG_TARC_SPEED_MODE_BIT (1 << 22) +#define FLAG_TARC_SET_BIT_ZERO (1 << 23) +#define FLAG_RX_NEEDS_RESTART (1 << 24) +#define FLAG_LSC_GIG_SPEED_DROP (1 << 25) +#define FLAG_SMART_POWER_DOWN (1 << 26) +#define FLAG_MSI_ENABLED (1 << 27) +/* reserved (1 << 28) */ +#define FLAG_TSO_FORCE (1 << 29) +#define FLAG_RESTART_NOW (1 << 30) +#define FLAG_MSI_TEST_FAILED (1 << 31) + +#define FLAG2_CRC_STRIPPING (1 << 0) +#define FLAG2_HAS_PHY_WAKEUP (1 << 1) +#define FLAG2_IS_DISCARDING (1 << 2) +#define FLAG2_DISABLE_ASPM_L1 (1 << 3) +#define FLAG2_HAS_PHY_STATS (1 << 4) +#define FLAG2_HAS_EEE (1 << 5) +#define FLAG2_DMA_BURST (1 << 6) +#define FLAG2_DISABLE_ASPM_L0S (1 << 7) +#define FLAG2_DISABLE_AIM (1 << 8) +#define FLAG2_CHECK_PHY_HANG (1 << 9) +#define FLAG2_NO_DISABLE_RX (1 << 10) +#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11) +#define FLAG2_DFLT_CRC_STRIPPING (1 << 12) +#define FLAG2_CHECK_RX_HWTSTAMP (1 << 13) + +#define E1000_RX_DESC_PS(R, i) \ + (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) +#define E1000_RX_DESC_EXT(R, i) \ + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_ACCESS_SHARED_RESOURCE, + __E1000_DOWN +}; + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +extern char e1000e_driver_name[]; +extern const char e1000e_driver_version[]; + +void e1000e_check_options(struct e1000_adapter *adapter); +void e1000e_set_ethtool_ops(struct net_device *netdev); + +int e1000e_up(struct e1000_adapter *adapter); +void e1000e_down(struct e1000_adapter *adapter, bool reset); +void e1000e_reinit_locked(struct e1000_adapter *adapter); +void e1000e_reset(struct e1000_adapter *adapter); +void e1000e_power_up_phy(struct e1000_adapter *adapter); +int e1000e_setup_rx_resources(struct e1000_ring *ring); +int e1000e_setup_tx_resources(struct e1000_ring *ring); +void e1000e_free_rx_resources(struct e1000_ring *ring); +void e1000e_free_tx_resources(struct e1000_ring *ring); +struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats); +void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); +void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); +void e1000e_get_hw_control(struct e1000_adapter *adapter); +void e1000e_release_hw_control(struct e1000_adapter *adapter); +void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr); + +extern unsigned int copybreak; + +extern const struct e1000_info e1000_82571_info; +extern const struct e1000_info e1000_82572_info; +extern const struct e1000_info e1000_82573_info; +extern const struct e1000_info e1000_82574_info; +extern const struct e1000_info e1000_82583_info; +extern const struct e1000_info e1000_ich8_info; +extern const struct e1000_info e1000_ich9_info; +extern const struct e1000_info e1000_ich10_info; +extern const struct e1000_info e1000_pch_info; +extern const struct e1000_info e1000_pch2_info; +extern const struct e1000_info e1000_pch_lpt_info; +extern const struct e1000_info e1000_pch_spt_info; +extern const struct e1000_info e1000_es2_info; + +void e1000e_ptp_init(struct e1000_adapter *adapter); +void e1000e_ptp_remove(struct e1000_adapter *adapter); + +static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + return hw->phy.ops.reset(hw); +} + +static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return hw->phy.ops.read_reg(hw, offset, data); +} + +static inline s32 e1e_rphy_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return hw->phy.ops.read_reg_locked(hw, offset, data); +} + +static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) +{ + return hw->phy.ops.write_reg(hw, offset, data); +} + +static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return hw->phy.ops.write_reg_locked(hw, offset, data); +} + +void e1000e_reload_nvm_generic(struct e1000_hw *hw); + +static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw) +{ + if (hw->mac.ops.read_mac_addr) + return hw->mac.ops.read_mac_addr(hw); + + return e1000_read_mac_addr_generic(hw); +} + +static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) +{ + return hw->nvm.ops.validate(hw); +} + +static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw) +{ + return hw->nvm.ops.update(hw); +} + +static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + return hw->nvm.ops.read(hw, offset, words, data); +} + +static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + return hw->nvm.ops.write(hw, offset, words, data); +} + +static inline s32 e1000_get_phy_info(struct e1000_hw *hw) +{ + return hw->phy.ops.get_info(hw); +} + +static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) +{ + return readl(hw->hw_addr + reg); +} + +#define er32(reg) __er32(hw, E1000_##reg) + +s32 __ew32_prepare(struct e1000_hw *hw); +void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val); + +#define ew32(reg, val) __ew32(hw, E1000_##reg, (val)) + +#define e1e_flush() er32(STATUS) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \ + (__ew32((a), (reg + ((offset) << 2)), (value))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) \ + (readl((a)->hw_addr + reg + ((offset) << 2))) + +#endif /* _E1000_H_ */ diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c new file mode 100644 index 000000000..11f486e4f --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -0,0 +1,2322 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* ethtool support for e1000 */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "e1000.h" + +enum { NETDEV_STATS, E1000_STATS }; + +struct e1000_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define E1000_STAT(str, m) { \ + .stat_string = str, \ + .type = E1000_STATS, \ + .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \ + .stat_offset = offsetof(struct e1000_adapter, m) } +#define E1000_NETDEV_STAT(str, m) { \ + .stat_string = str, \ + .type = NETDEV_STATS, \ + .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \ + .stat_offset = offsetof(struct rtnl_link_stats64, m) } + +static const struct e1000_stats e1000_gstrings_stats[] = { + E1000_STAT("rx_packets", stats.gprc), + E1000_STAT("tx_packets", stats.gptc), + E1000_STAT("rx_bytes", stats.gorc), + E1000_STAT("tx_bytes", stats.gotc), + E1000_STAT("rx_broadcast", stats.bprc), + E1000_STAT("tx_broadcast", stats.bptc), + E1000_STAT("rx_multicast", stats.mprc), + E1000_STAT("tx_multicast", stats.mptc), + E1000_NETDEV_STAT("rx_errors", rx_errors), + E1000_NETDEV_STAT("tx_errors", tx_errors), + E1000_NETDEV_STAT("tx_dropped", tx_dropped), + E1000_STAT("multicast", stats.mprc), + E1000_STAT("collisions", stats.colc), + E1000_NETDEV_STAT("rx_length_errors", rx_length_errors), + E1000_NETDEV_STAT("rx_over_errors", rx_over_errors), + E1000_STAT("rx_crc_errors", stats.crcerrs), + E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors), + E1000_STAT("rx_no_buffer_count", stats.rnbc), + E1000_STAT("rx_missed_errors", stats.mpc), + E1000_STAT("tx_aborted_errors", stats.ecol), + E1000_STAT("tx_carrier_errors", stats.tncrs), + E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors), + E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors), + E1000_STAT("tx_window_errors", stats.latecol), + E1000_STAT("tx_abort_late_coll", stats.latecol), + E1000_STAT("tx_deferred_ok", stats.dc), + E1000_STAT("tx_single_coll_ok", stats.scc), + E1000_STAT("tx_multi_coll_ok", stats.mcc), + E1000_STAT("tx_timeout_count", tx_timeout_count), + E1000_STAT("tx_restart_queue", restart_queue), + E1000_STAT("rx_long_length_errors", stats.roc), + E1000_STAT("rx_short_length_errors", stats.ruc), + E1000_STAT("rx_align_errors", stats.algnerrc), + E1000_STAT("tx_tcp_seg_good", stats.tsctc), + E1000_STAT("tx_tcp_seg_failed", stats.tsctfc), + E1000_STAT("rx_flow_control_xon", stats.xonrxc), + E1000_STAT("rx_flow_control_xoff", stats.xoffrxc), + E1000_STAT("tx_flow_control_xon", stats.xontxc), + E1000_STAT("tx_flow_control_xoff", stats.xofftxc), + E1000_STAT("rx_csum_offload_good", hw_csum_good), + E1000_STAT("rx_csum_offload_errors", hw_csum_err), + E1000_STAT("rx_header_split", rx_hdr_split), + E1000_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), + E1000_STAT("tx_smbus", stats.mgptc), + E1000_STAT("rx_smbus", stats.mgprc), + E1000_STAT("dropped_smbus", stats.mgpdc), + E1000_STAT("rx_dma_failed", rx_dma_failed), + E1000_STAT("tx_dma_failed", tx_dma_failed), + E1000_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + E1000_STAT("uncorr_ecc_errors", uncorr_errors), + E1000_STAT("corr_ecc_errors", corr_errors), + E1000_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), +}; + +#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) +#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN) +static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; + +#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) + +static int e1000_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 speed; + + if (hw->phy.media_type == e1000_media_type_copper) { + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + if (hw->phy.type == e1000_phy_ife) + ecmd->supported &= ~SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_TP; + + if (hw->mac.autoneg == 1) { + ecmd->advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + ecmd->advertising |= hw->phy.autoneg_advertised; + } + + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy.addr; + ecmd->transceiver = XCVR_INTERNAL; + + } else { + ecmd->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + } + + speed = SPEED_UNKNOWN; + ecmd->duplex = DUPLEX_UNKNOWN; + + if (netif_running(netdev)) { + if (netif_carrier_ok(netdev)) { + speed = adapter->link_speed; + ecmd->duplex = adapter->link_duplex - 1; + } + } else if (!pm_runtime_suspended(netdev->dev.parent)) { + u32 status = er32(STATUS); + + if (status & E1000_STATUS_LU) { + if (status & E1000_STATUS_SPEED_1000) + speed = SPEED_1000; + else if (status & E1000_STATUS_SPEED_100) + speed = SPEED_100; + else + speed = SPEED_10; + + if (status & E1000_STATUS_FD) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } + } + + ethtool_cmd_speed_set(ecmd, speed); + ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) || + hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if ((hw->phy.media_type == e1000_media_type_copper) && + netif_carrier_ok(netdev)) + ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI; + else + ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->phy.mdix == AUTO_ALL_MODES) + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; + + return 0; +} + +static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + + mac->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && + (spd != SPEED_1000) && (dplx != DUPLEX_FULL)) { + goto err_inval; + } + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_10_HALF; + break; + case SPEED_10 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_10_FULL; + break; + case SPEED_100 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_100_HALF; + break; + case SPEED_100 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_100_FULL; + break; + case SPEED_1000 + DUPLEX_FULL: + mac->autoneg = 1; + adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + adapter->hw.phy.mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + e_err("Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int e1000_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int ret_val = 0; + + pm_runtime_get_sync(netdev->dev.parent); + + /* When SoL/IDER sessions are active, autoneg/speed/duplex + * cannot be changed + */ + if (hw->phy.ops.check_reset_block && + hw->phy.ops.check_reset_block(hw)) { + e_err("Cannot change link characteristics when SoL/IDER is active.\n"); + ret_val = -EINVAL; + goto out; + } + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (ecmd->eth_tp_mdix_ctrl) { + if (hw->phy.media_type != e1000_media_type_copper) { + ret_val = -EOPNOTSUPP; + goto out; + } + + if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (ecmd->autoneg != AUTONEG_ENABLE)) { + e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + ret_val = -EINVAL; + goto out; + } + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + if (hw->phy.media_type == e1000_media_type_fiber) + hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | ADVERTISED_Autoneg; + else + hw->phy.autoneg_advertised = ecmd->advertising | + ADVERTISED_TP | ADVERTISED_Autoneg; + ecmd->advertising = hw->phy.autoneg_advertised; + if (adapter->fc_autoneg) + hw->fc.requested_mode = e1000_fc_default; + } else { + u32 speed = ethtool_cmd_speed(ecmd); + /* calling this overrides forced MDI setting */ + if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { + ret_val = -EINVAL; + goto out; + } + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (ecmd->eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = ecmd->eth_tp_mdix_ctrl; + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + e1000e_down(adapter, true); + e1000e_up(adapter); + } else { + e1000e_reset(adapter); + } + +out: + pm_runtime_put_sync(netdev->dev.parent); + clear_bit(__E1000_RESETTING, &adapter->state); + return ret_val; +} + +static void e1000_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == e1000_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == e1000_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == e1000_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int e1000_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + pm_runtime_get_sync(netdev->dev.parent); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = e1000_fc_default; + if (netif_running(adapter->netdev)) { + e1000e_down(adapter, true); + e1000e_up(adapter); + } else { + e1000e_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + if (hw->phy.media_type == e1000_media_type_fiber) { + retval = hw->mac.ops.setup_link(hw); + /* implicit goto out */ + } else { + retval = e1000e_force_mac_fc(hw); + if (retval) + goto out; + e1000e_set_fc_watermarks(hw); + } + } + +out: + pm_runtime_put_sync(netdev->dev.parent); + clear_bit(__E1000_RESETTING, &adapter->state); + return retval; +} + +static u32 e1000_get_msglevel(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void e1000_set_msglevel(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int e1000_get_regs_len(struct net_device __always_unused *netdev) +{ +#define E1000_REGS_LEN 32 /* overestimate */ + return E1000_REGS_LEN * sizeof(u32); +} + +static void e1000_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + pm_runtime_get_sync(netdev->dev.parent); + + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (adapter->pdev->revision << 16) | + adapter->pdev->device; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN(0)); + regs_buff[4] = er32(RDH(0)); + regs_buff[5] = er32(RDT(0)); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN(0)); + regs_buff[9] = er32(TDH(0)); + regs_buff[10] = er32(TDT(0)); + regs_buff[11] = er32(TIDV); + + regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ + + /* ethtool doesn't use anything past this point, so all this + * code is likely legacy junk for apps that may or may not exist + */ + if (hw->phy.type == e1000_phy_m88) { + e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ + regs_buff[18] = regs_buff[13]; /* cable polarity */ + regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[20] = regs_buff[17]; /* polarity correction */ + /* phy receive errors */ + regs_buff[22] = adapter->phy_stats.receive_errors; + regs_buff[23] = regs_buff[13]; /* mdix mode */ + } + regs_buff[21] = 0; /* was idle_errors */ + e1e_rphy(hw, MII_STAT1000, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ + + pm_runtime_put_sync(netdev->dev.parent); +} + +static int e1000_get_eeprom_len(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->hw.nvm.word_size * 2; +} + +static int e1000_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word; + int last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + pm_runtime_get_sync(netdev->dev.parent); + + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + ret_val = e1000_read_nvm(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + } else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = e1000_read_nvm(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + pm_runtime_put_sync(netdev->dev.parent); + + if (ret_val) { + /* a read error occurred, throw away the result */ + memset(eeprom_buff, 0xff, sizeof(u16) * + (last_word - first_word + 1)); + } else { + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + } + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int e1000_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len; + int first_word; + int last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != + (adapter->pdev->vendor | (adapter->pdev->device << 16))) + return -EFAULT; + + if (adapter->flags & FLAG_READ_ONLY_NVM) + return -EINVAL; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + pm_runtime_get_sync(netdev->dev.parent); + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word */ + /* only the second byte of the word is being modified */ + ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (!ret_val)) + /* need read/modify/write of last changed EEPROM word */ + /* only the first byte of the word is being modified */ + ret_val = e1000_read_nvm(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + + if (ret_val) + goto out; + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = e1000_write_nvm(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + if (ret_val) + goto out; + + /* Update the checksum over the first part of the EEPROM if needed + * and flush shadow RAM for applicable controllers + */ + if ((first_word <= NVM_CHECKSUM_REG) || + (hw->mac.type == e1000_82583) || + (hw->mac.type == e1000_82574) || + (hw->mac.type == e1000_82573)) + ret_val = e1000e_update_nvm_checksum(hw); + +out: + pm_runtime_put_sync(netdev->dev.parent); + kfree(eeprom_buff); + return ret_val; +} + +static void e1000_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, e1000e_driver_version, + sizeof(drvinfo->version)); + + /* EEPROM image version # is reported as firmware version # for + * PCI-E controllers + */ + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d-%d", + (adapter->eeprom_vers & 0xF000) >> 12, + (adapter->eeprom_vers & 0x0FF0) >> 4, + (adapter->eeprom_vers & 0x000F)); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->regdump_len = e1000_get_regs_len(netdev); + drvinfo->eedump_len = e1000_get_eeprom_len(netdev); +} + +static void e1000_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = E1000_MAX_RXD; + ring->tx_max_pending = E1000_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int e1000_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *temp_tx = NULL, *temp_rx = NULL; + int err = 0, size = sizeof(struct e1000_ring); + bool set_tx = false, set_rx = false; + u16 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_rx_count = clamp_t(u32, ring->rx_pending, E1000_MIN_RXD, + E1000_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = clamp_t(u32, ring->tx_pending, E1000_MIN_TXD, + E1000_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) + /* nothing to do */ + return 0; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + /* Set counts now and allocate resources during open() */ + adapter->tx_ring->count = new_tx_count; + adapter->rx_ring->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + set_tx = (new_tx_count != adapter->tx_ring_count); + set_rx = (new_rx_count != adapter->rx_ring_count); + + /* Allocate temporary storage for ring updates */ + if (set_tx) { + temp_tx = vmalloc(size); + if (!temp_tx) { + err = -ENOMEM; + goto free_temp; + } + } + if (set_rx) { + temp_rx = vmalloc(size); + if (!temp_rx) { + err = -ENOMEM; + goto free_temp; + } + } + + pm_runtime_get_sync(netdev->dev.parent); + + e1000e_down(adapter, true); + + /* We can't just free everything and then setup again, because the + * ISRs in MSI-X mode get passed pointers to the Tx and Rx ring + * structs. First, attempt to allocate new resources... + */ + if (set_tx) { + memcpy(temp_tx, adapter->tx_ring, size); + temp_tx->count = new_tx_count; + err = e1000e_setup_tx_resources(temp_tx); + if (err) + goto err_setup; + } + if (set_rx) { + memcpy(temp_rx, adapter->rx_ring, size); + temp_rx->count = new_rx_count; + err = e1000e_setup_rx_resources(temp_rx); + if (err) + goto err_setup_rx; + } + + /* ...then free the old resources and copy back any new ring data */ + if (set_tx) { + e1000e_free_tx_resources(adapter->tx_ring); + memcpy(adapter->tx_ring, temp_tx, size); + adapter->tx_ring_count = new_tx_count; + } + if (set_rx) { + e1000e_free_rx_resources(adapter->rx_ring); + memcpy(adapter->rx_ring, temp_rx, size); + adapter->rx_ring_count = new_rx_count; + } + +err_setup_rx: + if (err && set_tx) + e1000e_free_tx_resources(temp_tx); +err_setup: + e1000e_up(adapter); + pm_runtime_put_sync(netdev->dev.parent); +free_temp: + vfree(temp_tx); + vfree(temp_rx); +clear_reset: + clear_bit(__E1000_RESETTING, &adapter->state); + return err; +} + +static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, + int reg, int offset, u32 mask, u32 write) +{ + u32 pat, val; + static const u32 test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + for (pat = 0; pat < ARRAY_SIZE(test); pat++) { + E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset, + (test[pat] & write)); + val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); + if (val != (test[pat] & write & mask)) { + e_err("pattern test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n", + reg + (offset << 2), val, + (test[pat] & write & mask)); + *data = reg; + return true; + } + } + return false; +} + +static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + u32 val; + + __ew32(&adapter->hw, reg, write & mask); + val = __er32(&adapter->hw, reg); + if ((write & mask) != (val & mask)) { + e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + return true; + } + return false; +} + +#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \ + return 1; \ + } while (0) +#define REG_PATTERN_TEST(reg, mask, write) \ + REG_PATTERN_TEST_ARRAY(reg, 0, mask, write) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, reg, mask, write)) \ + return 1; \ + } while (0) + +static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &adapter->hw.mac; + u32 value; + u32 before; + u32 after; + u32 i; + u32 toggle; + u32 mask; + u32 wlock_mac = 0; + + /* The status register is Read Only, so a write should fail. + * Some bits that get toggled are ignored. There are several bits + * on newer hardware that are r/w. + */ + switch (mac->type) { + case e1000_82571: + case e1000_82572: + case e1000_80003es2lan: + toggle = 0x7FFFF3FF; + break; + default: + toggle = 0x7FFFF033; + break; + } + + before = er32(STATUS); + value = (er32(STATUS) & toggle); + ew32(STATUS, toggle); + after = er32(STATUS) & toggle; + if (value != after) { + e_err("failed STATUS register test got: 0x%08X expected: 0x%08X\n", + after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ew32(STATUS, before); + + if (!(adapter->flags & FLAG_IS_ICH)) { + REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF); + } + + REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8); + REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF); + REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF); + + REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); + + before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE); + REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); + REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); + + REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF); + if (!(adapter->flags & FLAG_IS_ICH)) + REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); + mask = 0x8003FFFF; + switch (mac->type) { + case e1000_ich10lan: + case e1000_pchlan: + case e1000_pch2lan: + case e1000_pch_lpt: + case e1000_pch_spt: + mask |= (1 << 18); + break; + default: + break; + } + + if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) + wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >> + E1000_FWSM_WLOCK_MAC_SHIFT; + + for (i = 0; i < mac->rar_entry_count; i++) { + if ((mac->type == e1000_pch_lpt) || + (mac->type == e1000_pch_spt)) { + /* Cannot test write-protected SHRAL[n] registers */ + if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac))) + continue; + + /* SHRAH[9] different than the others */ + if (i == 10) + mask |= (1 << 30); + else + mask &= ~(1 << 30); + } + if (mac->type == e1000_pch2lan) { + /* SHRAH[0,1,2] different than previous */ + if (i == 1) + mask &= 0xFFF4FFFF; + /* SHRAH[3] different than SHRAH[0,1,2] */ + if (i == 4) + mask |= (1 << 30); + /* RAR[1-6] owned by management engine - skipping */ + if (i > 0) + i += 6; + } + + REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask, + 0xFFFFFFFF); + /* reset index to actual value */ + if ((mac->type == e1000_pch2lan) && (i > 6)) + i -= 6; + } + + for (i = 0; i < mac->mta_reg_count; i++) + REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); + + *data = 0; + + return 0; +} + +static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) +{ + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) { + *data = 1; + return *data; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16)NVM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t e1000_test_intr(int __always_unused irq, void *data) +{ + struct net_device *netdev = (struct net_device *)data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= er32(ICR); + + return IRQ_HANDLED; +} + +static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 mask; + u32 shared_int = 1; + u32 irq = adapter->pdev->irq; + int i; + int ret_val = 0; + int int_mode = E1000E_INT_MODE_LEGACY; + + *data = 0; + + /* NOTE: we don't test MSI/MSI-X interrupts here, yet */ + if (adapter->int_mode == E1000E_INT_MODE_MSIX) { + int_mode = adapter->int_mode; + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = E1000E_INT_MODE_LEGACY; + e1000e_set_interrupt_capability(adapter); + } + /* Hook up test interrupt handler just for this test */ + if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, + netdev)) { + shared_int = 0; + } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, netdev->name, + netdev)) { + *data = 1; + ret_val = -1; + goto out; + } + e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + e1e_flush(); + usleep_range(10000, 20000); + + /* Test each interrupt */ + for (i = 0; i < 10; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (adapter->flags & FLAG_IS_ICH) { + switch (mask) { + case E1000_ICR_RXSEQ: + continue; + case 0x00000100: + if (adapter->hw.mac.type == e1000_ich8lan || + adapter->hw.mac.type == e1000_ich9lan) + continue; + break; + default: + break; + } + } + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, mask); + ew32(ICS, mask); + e1e_flush(); + usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMS, mask); + ew32(ICS, mask); + e1e_flush(); + usleep_range(10000, 20000); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, ~mask & 0x00007FFF); + ew32(ICS, ~mask & 0x00007FFF); + e1e_flush(); + usleep_range(10000, 20000); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + e1e_flush(); + usleep_range(10000, 20000); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + +out: + if (int_mode == E1000E_INT_MODE_MSIX) { + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = int_mode; + e1000e_set_interrupt_capability(adapter); + } + + return ret_val; +} + +static void e1000_free_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + struct e1000_buffer *buffer_info; + int i; + + if (tx_ring->desc && tx_ring->buffer_info) { + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + + if (buffer_info->dma) + dma_unmap_single(&pdev->dev, + buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + if (buffer_info->skb) + dev_kfree_skb(buffer_info->skb); + } + } + + if (rx_ring->desc && rx_ring->buffer_info) { + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + + if (buffer_info->dma) + dma_unmap_single(&pdev->dev, + buffer_info->dma, + 2048, DMA_FROM_DEVICE); + if (buffer_info->skb) + dev_kfree_skb(buffer_info->skb); + } + } + + if (tx_ring->desc) { + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + tx_ring->desc = NULL; + } + if (rx_ring->desc) { + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; + } + + kfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + kfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; +} + +static int e1000_setup_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + int i; + int ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!tx_ring->count) + tx_ring->count = E1000_DEFAULT_TXD; + + tx_ring->buffer_info = kcalloc(tx_ring->count, + sizeof(struct e1000_buffer), GFP_KERNEL); + if (!tx_ring->buffer_info) { + ret_val = 1; + goto err_nomem; + } + + tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) { + ret_val = 2; + goto err_nomem; + } + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + ew32(TDBAL(0), ((u64)tx_ring->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH(0), ((u64)tx_ring->dma >> 32)); + ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc)); + ew32(TDH(0), 0); + ew32(TDT(0), 0); + ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR | + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | + E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); + + for (i = 0; i < tx_ring->count; i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); + struct sk_buff *skb; + unsigned int skb_size = 1024; + + skb = alloc_skb(skb_size, GFP_KERNEL); + if (!skb) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, skb_size); + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].length = skb->len; + tx_ring->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, + tx_ring->buffer_info[i].dma)) { + ret_val = 4; + goto err_nomem; + } + tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma); + tx_desc->lower.data = cpu_to_le32(skb->len); + tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS | + E1000_TXD_CMD_RS); + tx_desc->upper.data = 0; + } + + /* Setup Rx descriptor ring and Rx buffers */ + + if (!rx_ring->count) + rx_ring->count = E1000_DEFAULT_RXD; + + rx_ring->buffer_info = kcalloc(rx_ring->count, + sizeof(struct e1000_buffer), GFP_KERNEL); + if (!rx_ring->buffer_info) { + ret_val = 5; + goto err_nomem; + } + + rx_ring->size = rx_ring->count * sizeof(union e1000_rx_desc_extended); + rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) { + ret_val = 6; + goto err_nomem; + } + rx_ring->next_to_use = 0; + rx_ring->next_to_clean = 0; + + rctl = er32(RCTL); + if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) + ew32(RCTL, rctl & ~E1000_RCTL_EN); + ew32(RDBAL(0), ((u64)rx_ring->dma & 0xFFFFFFFF)); + ew32(RDBAH(0), ((u64)rx_ring->dma >> 32)); + ew32(RDLEN(0), rx_ring->size); + ew32(RDH(0), 0); + ew32(RDT(0), 0); + rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | + E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | + E1000_RCTL_SBP | E1000_RCTL_SECRC | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + ew32(RCTL, rctl); + + for (i = 0; i < rx_ring->count; i++) { + union e1000_rx_desc_extended *rx_desc; + struct sk_buff *skb; + + skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL); + if (!skb) { + ret_val = 7; + goto err_nomem; + } + skb_reserve(skb, NET_IP_ALIGN); + rx_ring->buffer_info[i].skb = skb; + rx_ring->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, 2048, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, + rx_ring->buffer_info[i].dma)) { + ret_val = 8; + goto err_nomem; + } + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + rx_desc->read.buffer_addr = + cpu_to_le64(rx_ring->buffer_info[i].dma); + memset(skb->data, 0x00, skb->len); + } + + return 0; + +err_nomem: + e1000_free_desc_rings(adapter); + return ret_val; +} + +static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) +{ + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1e_wphy(&adapter->hw, 29, 0x001F); + e1e_wphy(&adapter->hw, 30, 0x8FFC); + e1e_wphy(&adapter->hw, 29, 0x001A); + e1e_wphy(&adapter->hw, 30, 0x8FF0); +} + +static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + u16 phy_reg = 0; + s32 ret_val = 0; + + hw->mac.autoneg = 0; + + if (hw->phy.type == e1000_phy_ife) { + /* force 100, set loopback */ + e1e_wphy(hw, MII_BMCR, 0x6100); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_100 |/* Force Speed to 100 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + ew32(CTRL, ctrl_reg); + e1e_flush(); + usleep_range(500, 1000); + + return 0; + } + + /* Specific PHY configuration for loopback */ + switch (hw->phy.type) { + case e1000_phy_m88: + /* Auto-MDI/MDIX Off */ + e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1e_wphy(hw, MII_BMCR, 0x9140); + /* autoneg off */ + e1e_wphy(hw, MII_BMCR, 0x8140); + break; + case e1000_phy_gg82563: + e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC); + break; + case e1000_phy_bm: + /* Set Default MAC Interface speed to 1GB */ + e1e_rphy(hw, PHY_REG(2, 21), &phy_reg); + phy_reg &= ~0x0007; + phy_reg |= 0x006; + e1e_wphy(hw, PHY_REG(2, 21), phy_reg); + /* Assert SW reset for above settings to take effect */ + hw->phy.ops.commit(hw); + usleep_range(1000, 2000); + /* Force Full Duplex */ + e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C); + /* Set Link Up (in force link) */ + e1e_rphy(hw, PHY_REG(776, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040); + /* Force Link */ + e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040); + /* Set Early Link Enable */ + e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400); + break; + case e1000_phy_82577: + case e1000_phy_82578: + /* Workaround: K1 must be disabled for stable 1Gbps operation */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_err("Cannot setup 1Gbps loopback.\n"); + return ret_val; + } + e1000_configure_k1_ich8lan(hw, false); + hw->phy.ops.release(hw); + break; + case e1000_phy_82579: + /* Disable PHY energy detect power down */ + e1e_rphy(hw, PHY_REG(0, 21), &phy_reg); + e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3)); + /* Disable full chip energy detect */ + e1e_rphy(hw, PHY_REG(776, 18), &phy_reg); + e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1); + /* Enable loopback on the PHY */ + e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001); + break; + default: + break; + } + + /* force 1000, set loopback */ + e1e_wphy(hw, MII_BMCR, 0x4140); + msleep(250); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + if (adapter->flags & FLAG_IS_ICH) + ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ + + if (hw->phy.media_type == e1000_media_type_copper && + hw->phy.type == e1000_phy_m88) { + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + } else { + /* Set the ILOS bit on the fiber Nic if half duplex link is + * detected. + */ + if ((er32(STATUS) & E1000_STATUS_FD) == 0) + ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); + } + + ew32(CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy.type == e1000_phy_m88) + e1000_phy_disable_receiver(adapter); + + usleep_range(500, 1000); + + return 0; +} + +static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl = er32(CTRL); + int link; + + /* special requirements for 82571/82572 fiber adapters */ + + /* jump through hoops to make sure link is up because serdes + * link is hardwired up + */ + ctrl |= E1000_CTRL_SLU; + ew32(CTRL, ctrl); + + /* disable autoneg */ + ctrl = er32(TXCW); + ctrl &= ~(1 << 31); + ew32(TXCW, ctrl); + + link = (er32(STATUS) & E1000_STATUS_LU); + + if (!link) { + /* set invert loss of signal */ + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_ILOS; + ew32(CTRL, ctrl); + } + + /* special write to serdes control register to enable SerDes analog + * loopback + */ + ew32(SCTL, E1000_SCTL_ENABLE_SERDES_LOOPBACK); + e1e_flush(); + usleep_range(10000, 20000); + + return 0; +} + +/* only call this for fiber/serdes connections to es2lan */ +static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrlext = er32(CTRL_EXT); + u32 ctrl = er32(CTRL); + + /* save CTRL_EXT to restore later, reuse an empty variable (unused + * on mac_type 80003es2lan) + */ + adapter->tx_fifo_head = ctrlext; + + /* clear the serdes mode bits, putting the device into mac loopback */ + ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + ew32(CTRL_EXT, ctrlext); + + /* force speed to 1000/FD, link up */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | + E1000_CTRL_SPD_1000 | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* set mac loopback */ + ctrl = er32(RCTL); + ctrl |= E1000_RCTL_LBM_MAC; + ew32(RCTL, ctrl); + + /* set testing mode parameters (no need to reset later) */ +#define KMRNCTRLSTA_OPMODE (0x1F << 16) +#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582 + ew32(KMRNCTRLSTA, + (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII)); + + return 0; +} + +static int e1000_setup_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) { + switch (hw->mac.type) { + case e1000_80003es2lan: + return e1000_set_es2lan_mac_loopback(adapter); + case e1000_82571: + case e1000_82572: + return e1000_set_82571_fiber_loopback(adapter); + default: + rctl = er32(RCTL); + rctl |= E1000_RCTL_LBM_TCVR; + ew32(RCTL, rctl); + return 0; + } + } else if (hw->phy.media_type == e1000_media_type_copper) { + return e1000_integrated_phy_loopback(adapter); + } + + return 7; +} + +static void e1000_loopback_cleanup(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + ew32(RCTL, rctl); + + switch (hw->mac.type) { + case e1000_80003es2lan: + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) { + /* restore CTRL_EXT, stealing space from tx_fifo_head */ + ew32(CTRL_EXT, adapter->tx_fifo_head); + adapter->tx_fifo_head = 0; + } + /* fall through */ + case e1000_82571: + case e1000_82572: + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) { + ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + e1e_flush(); + usleep_range(10000, 20000); + break; + } + /* Fall Through */ + default: + hw->mac.autoneg = 1; + if (hw->phy.type == e1000_phy_gg82563) + e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180); + e1e_rphy(hw, MII_BMCR, &phy_reg); + if (phy_reg & BMCR_LOOPBACK) { + phy_reg &= ~BMCR_LOOPBACK; + e1e_wphy(hw, MII_BMCR, phy_reg); + if (hw->phy.ops.commit) + hw->phy.ops.commit(hw); + } + break; + } +} + +static void e1000_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); + memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); +} + +static int e1000_check_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(skb->data + 3) == 0xFF) + if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && + (*(skb->data + frame_size / 2 + 12) == 0xAF)) + return 0; + return 13; +} + +static int e1000_run_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_buffer *buffer_info; + int i, j, k, l; + int lc; + int good_cnt; + int ret_val = 0; + unsigned long time; + + ew32(RDT(0), rx_ring->count - 1); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + k = 0; + l = 0; + /* loop count loop */ + for (j = 0; j <= lc; j++) { + /* send the packets */ + for (i = 0; i < 64; i++) { + buffer_info = &tx_ring->buffer_info[k]; + + e1000_create_lbtest_frame(buffer_info->skb, 1024); + dma_sync_single_for_device(&pdev->dev, + buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + k++; + if (k == tx_ring->count) + k = 0; + } + ew32(TDT(0), k); + e1e_flush(); + msleep(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + /* receive the sent packets */ + do { + buffer_info = &rx_ring->buffer_info[l]; + + dma_sync_single_for_cpu(&pdev->dev, + buffer_info->dma, 2048, + DMA_FROM_DEVICE); + + ret_val = e1000_check_lbtest_frame(buffer_info->skb, + 1024); + if (!ret_val) + good_cnt++; + l++; + if (l == rx_ring->count) + l = 0; + /* time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while ((good_cnt < 64) && !time_after(jiffies, time + 20)); + if (good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if (time_after(jiffies, time + 20)) { + ret_val = 14; /* error code for time out error */ + break; + } + } + return ret_val; +} + +static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + + /* PHY loopback cannot be performed if SoL/IDER sessions are active */ + if (hw->phy.ops.check_reset_block && + hw->phy.ops.check_reset_block(hw)) { + e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); + *data = 0; + goto out; + } + + *data = e1000_setup_desc_rings(adapter); + if (*data) + goto out; + + *data = e1000_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + +err_loopback: + e1000_free_desc_rings(adapter); +out: + return *data; +} + +static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + if (hw->phy.media_type == e1000_media_type_internal_serdes) { + int i = 0; + + hw->mac.serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + hw->mac.ops.check_for_link(hw); + if (hw->mac.serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + hw->mac.ops.check_for_link(hw); + if (hw->mac.autoneg) + /* On some Phy/switch combinations, link establishment + * can take a few seconds more than expected. + */ + msleep_interruptible(5000); + + if (!(er32(STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static int e1000e_get_sset_count(struct net_device __always_unused *netdev, + int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e1000_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + u16 autoneg_advertised; + u8 forced_speed_duplex; + u8 autoneg; + bool if_running = netif_running(netdev); + + pm_runtime_get_sync(netdev->dev.parent); + + set_bit(__E1000_TESTING, &adapter->state); + + if (!if_running) { + /* Get control of and reset hardware */ + if (adapter->flags & FLAG_HAS_AMT) + e1000e_get_hw_control(adapter); + + e1000e_power_up_phy(adapter); + + adapter->hw.phy.autoneg_wait_to_complete = 1; + e1000e_reset(adapter); + adapter->hw.phy.autoneg_wait_to_complete = 0; + } + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + autoneg_advertised = adapter->hw.phy.autoneg_advertised; + forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; + autoneg = adapter->hw.mac.autoneg; + + e_info("offline testing starting\n"); + + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + + if (e1000_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + if (e1000_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + if (e1000_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + if (e1000_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* force this routine to wait until autoneg complete/timeout */ + adapter->hw.phy.autoneg_wait_to_complete = 1; + e1000e_reset(adapter); + adapter->hw.phy.autoneg_wait_to_complete = 0; + + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + adapter->hw.phy.autoneg_advertised = autoneg_advertised; + adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; + adapter->hw.mac.autoneg = autoneg; + e1000e_reset(adapter); + + clear_bit(__E1000_TESTING, &adapter->state); + if (if_running) + dev_open(netdev); + } else { + /* Online tests */ + + e_info("online testing starting\n"); + + /* register, eeprom, intr and loopback tests not run online */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + clear_bit(__E1000_TESTING, &adapter->state); + } + + if (!if_running) { + e1000e_reset(adapter); + + if (adapter->flags & FLAG_HAS_AMT) + e1000e_release_hw_control(adapter); + } + + msleep_interruptible(4 * 1000); + + pm_runtime_put_sync(netdev->dev.parent); +} + +static void e1000_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + wol->supported = 0; + wol->wolopts = 0; + + if (!(adapter->flags & FLAG_HAS_WOL) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | WAKE_PHY; + + /* apply any specific unsupported masks here */ + if (adapter->flags & FLAG_NO_WAKE_UCAST) { + wol->supported &= ~WAKE_UCAST; + + if (adapter->wol & E1000_WUFC_EX) + e_err("Interface does not support directed (unicast) frame wake-up packets\n"); + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & E1000_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!(adapter->flags & FLAG_HAS_WOL) || + !device_can_wakeup(&adapter->pdev->dev) || + (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | + WAKE_MAGIC | WAKE_PHY))) + return -EOPNOTSUPP; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= E1000_WUFC_LNKC; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int e1000_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + pm_runtime_get_sync(netdev->dev.parent); + + if (!hw->mac.ops.blink_led) + return 2; /* cycle on/off twice per second */ + + hw->mac.ops.blink_led(hw); + break; + + case ETHTOOL_ID_INACTIVE: + if (hw->phy.type == e1000_phy_ife) + e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); + hw->mac.ops.led_off(hw); + hw->mac.ops.cleanup_led(hw); + pm_runtime_put_sync(netdev->dev.parent); + break; + + case ETHTOOL_ID_ON: + hw->mac.ops.led_on(hw); + break; + + case ETHTOOL_ID_OFF: + hw->mac.ops.led_off(hw); + break; + } + + return 0; +} + +static int e1000_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->itr_setting <= 4) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; + + return 0; +} + +static int e1000_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 4) && + (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 4) { + adapter->itr_setting = 4; + adapter->itr = adapter->itr_setting; + } else if (ec->rx_coalesce_usecs <= 3) { + adapter->itr = 20000; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = (1000000 / ec->rx_coalesce_usecs); + adapter->itr_setting = adapter->itr & ~3; + } + + pm_runtime_get_sync(netdev->dev.parent); + + if (adapter->itr_setting != 0) + e1000e_write_itr(adapter, adapter->itr); + else + e1000e_write_itr(adapter, 0); + + pm_runtime_put_sync(netdev->dev.parent); + + return 0; +} + +static int e1000_nway_reset(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!netif_running(netdev)) + return -EAGAIN; + + if (!adapter->hw.mac.autoneg) + return -EINVAL; + + pm_runtime_get_sync(netdev->dev.parent); + e1000e_reinit_locked(adapter); + pm_runtime_put_sync(netdev->dev.parent); + + return 0; +} + +static void e1000_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 net_stats; + int i; + char *p = NULL; + + pm_runtime_get_sync(netdev->dev.parent); + + e1000e_get_stats64(netdev, &net_stats); + + pm_runtime_put_sync(netdev->dev.parent); + + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + switch (e1000_gstrings_stats[i].type) { + case NETDEV_STATS: + p = (char *)&net_stats + + e1000_gstrings_stats[i].stat_offset; + break; + case E1000_STATS: + p = (char *)adapter + + e1000_gstrings_stats[i].stat_offset; + break; + default: + data[i] = 0; + continue; + } + + data[i] = (e1000_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } +} + +static void e1000_get_strings(struct net_device __always_unused *netdev, + u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + memcpy(p, e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int e1000_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *info, + u32 __always_unused *rule_locs) +{ + info->data = 0; + + switch (info->cmd) { + case ETHTOOL_GRXFH: { + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 mrqc; + + pm_runtime_get_sync(netdev->dev.parent); + mrqc = er32(MRQC); + pm_runtime_put_sync(netdev->dev.parent); + + if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK)) + return 0; + + switch (info->flow_type) { + case TCP_V4_FLOW: + if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case IPV4_FLOW: + if (mrqc & E1000_MRQC_RSS_FIELD_IPV4) + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case IPV6_FLOW: + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6) + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + break; + } + return 0; + } + default: + return -EOPNOTSUPP; + } +} + +static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 cap_addr, lpa_addr, pcs_stat_addr, phy_data; + u32 ret_val; + + if (!(adapter->flags2 & FLAG2_HAS_EEE)) + return -EOPNOTSUPP; + + switch (hw->phy.type) { + case e1000_phy_82579: + cap_addr = I82579_EEE_CAPABILITY; + lpa_addr = I82579_EEE_LP_ABILITY; + pcs_stat_addr = I82579_EEE_PCS_STATUS; + break; + case e1000_phy_i217: + cap_addr = I217_EEE_CAPABILITY; + lpa_addr = I217_EEE_LP_ABILITY; + pcs_stat_addr = I217_EEE_PCS_STATUS; + break; + default: + return -EOPNOTSUPP; + } + + pm_runtime_get_sync(netdev->dev.parent); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + pm_runtime_put_sync(netdev->dev.parent); + return -EBUSY; + } + + /* EEE Capability */ + ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data); + if (ret_val) + goto release; + edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data); + + /* EEE Advertised */ + edata->advertised = mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + /* EEE Link Partner Advertised */ + ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data); + if (ret_val) + goto release; + edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + + /* EEE PCS Status */ + ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data); + if (ret_val) + goto release; + if (hw->phy.type == e1000_phy_82579) + phy_data <<= 8; + + /* Result of the EEE auto negotiation - there is no register that + * has the status of the EEE negotiation so do a best-guess based + * on whether Tx or Rx LPI indications have been received. + */ + if (phy_data & (E1000_EEE_TX_LPI_RCVD | E1000_EEE_RX_LPI_RCVD)) + edata->eee_active = true; + + edata->eee_enabled = !hw->dev_spec.ich8lan.eee_disable; + edata->tx_lpi_enabled = true; + edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT; + +release: + hw->phy.ops.release(hw); + if (ret_val) + ret_val = -ENODATA; + + pm_runtime_put_sync(netdev->dev.parent); + + return ret_val; +} + +static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; + s32 ret_val; + + ret_val = e1000e_get_eee(netdev, &eee_curr); + if (ret_val) + return ret_val; + + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { + e_err("Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + if (eee_curr.tx_lpi_timer != edata->tx_lpi_timer) { + e_err("Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + + if (edata->advertised & ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) { + e_err("EEE advertisement supports only 100TX and/or 1000T full-duplex\n"); + return -EINVAL; + } + + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + + hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled; + + pm_runtime_get_sync(netdev->dev.parent); + + /* reset the link */ + if (netif_running(netdev)) + e1000e_reinit_locked(adapter); + else + e1000e_reset(adapter); + + pm_runtime_put_sync(netdev->dev.parent); + + return 0; +} + +static int e1000e_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *info) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + ethtool_op_get_ts_info(netdev, info); + + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) + return 0; + + info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE); + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_ALL)); + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + + return 0; +} + +static const struct ethtool_ops e1000_ethtool_ops = { + .get_settings = e1000_get_settings, + .set_settings = e1000_set_settings, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .set_phys_id = e1000_set_phys_id, + .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000e_get_sset_count, + .get_coalesce = e1000_get_coalesce, + .set_coalesce = e1000_set_coalesce, + .get_rxnfc = e1000_get_rxnfc, + .get_ts_info = e1000e_get_ts_info, + .get_eee = e1000e_get_eee, + .set_eee = e1000e_set_eee, +}; + +void e1000e_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &e1000_ethtool_ops; +} diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h new file mode 100644 index 000000000..19e8c487d --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -0,0 +1,698 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "regs.h" +#include "defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82571EB_COPPER 0x105E +#define E1000_DEV_ID_82571EB_FIBER 0x105F +#define E1000_DEV_ID_82571EB_SERDES 0x1060 +#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 +#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5 +#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 +#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC +#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 +#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA +#define E1000_DEV_ID_82572EI_COPPER 0x107D +#define E1000_DEV_ID_82572EI_FIBER 0x107E +#define E1000_DEV_ID_82572EI_SERDES 0x107F +#define E1000_DEV_ID_82572EI 0x10B9 +#define E1000_DEV_ID_82573E 0x108B +#define E1000_DEV_ID_82573E_IAMT 0x108C +#define E1000_DEV_ID_82573L 0x109A +#define E1000_DEV_ID_82574L 0x10D3 +#define E1000_DEV_ID_82574LA 0x10F6 +#define E1000_DEV_ID_82583V 0x150C +#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 +#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 +#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA +#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB +#define E1000_DEV_ID_ICH8_82567V_3 0x1501 +#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 +#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A +#define E1000_DEV_ID_ICH8_IGP_C 0x104B +#define E1000_DEV_ID_ICH8_IFE 0x104C +#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4 +#define E1000_DEV_ID_ICH8_IFE_G 0x10C5 +#define E1000_DEV_ID_ICH8_IGP_M 0x104D +#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD +#define E1000_DEV_ID_ICH9_BM 0x10E5 +#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5 +#define E1000_DEV_ID_ICH9_IGP_M 0x10BF +#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB +#define E1000_DEV_ID_ICH9_IGP_C 0x294C +#define E1000_DEV_ID_ICH9_IFE 0x10C0 +#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 +#define E1000_DEV_ID_ICH9_IFE_G 0x10C2 +#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC +#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD +#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE +#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE +#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF +#define E1000_DEV_ID_ICH10_D_BM_V 0x1525 +#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA +#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB +#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF +#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0 +#define E1000_DEV_ID_PCH2_LV_LM 0x1502 +#define E1000_DEV_ID_PCH2_LV_V 0x1503 +#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A +#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B +#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A +#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559 +#define E1000_DEV_ID_PCH_I218_LM2 0x15A0 +#define E1000_DEV_ID_PCH_I218_V2 0x15A1 +#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */ +#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_LM 0x156F /* SPT PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_V 0x1570 /* SPT PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* SPT-H PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* SPT-H PCH */ + +#define E1000_REVISION_4 4 + +#define E1000_FUNC_1 1 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 + +enum e1000_mac_type { + e1000_82571, + e1000_82572, + e1000_82573, + e1000_82574, + e1000_82583, + e1000_80003es2lan, + e1000_ich8lan, + e1000_ich9lan, + e1000_ich10lan, + e1000_pchlan, + e1000_pch2lan, + e1000_pch_lpt, + e1000_pch_spt, +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_flash_hw, + e1000_nvm_flash_sw +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_bm, + e1000_phy_82578, + e1000_phy_82577, + e1000_phy_82579, + e1000_phy_i217, +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +}; + +enum e1000_rev_polarity { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +}; + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +enum e1000_serdes_link_state { + e1000_serdes_link_down = 0, + e1000_serdes_link_autoneg_progress, + e1000_serdes_link_autoneg_complete, + e1000_serdes_link_forced_up +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + /* length of buffers 1-3 */ + __le16 length[PS_PAGE_BUFFERS]; + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; + u8 cmd; + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; + } fields; + } upper; +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +#include "mac.h" +#include "phy.h" +#include "nvm.h" +#include "manage.h" + +/* Function pointers for the MAC. */ +struct e1000_mac_operations { + s32 (*id_led_init)(struct e1000_hw *); + s32 (*blink_led)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*check_for_link)(struct e1000_hw *); + s32 (*cleanup_led)(struct e1000_hw *); + void (*clear_hw_cntrs)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + void (*set_lan_id)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + s32 (*led_on)(struct e1000_hw *); + s32 (*led_off)(struct e1000_hw *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + s32 (*setup_led)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + void (*config_collision_dist)(struct e1000_hw *); + int (*rar_set)(struct e1000_hw *, u8 *, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + u32 (*rar_get_count)(struct e1000_hw *); +}; + +/* When to use various PHY register access functions: + * + * Func Caller + * Function Does Does When to use + * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * X_reg L,P,A n/a for simple PHY reg accesses + * X_reg_locked P,A L for multiple accesses of different regs + * on different pages + * X_reg_page A L,P for multiple accesses of different regs + * on the same page + * + * Where X=[read|write], L=locking, P=sets page, A=register access + * + */ +struct e1000_phy_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*cfg_on_link_up)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*commit)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_info)(struct e1000_hw *); + s32 (*set_page)(struct e1000_hw *, u16); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); + s32 (*write_reg_page)(struct e1000_hw *, u32, u16); + void (*power_up)(struct e1000_hw *); + void (*power_down)(struct e1000_hw *); +}; + +/* Function pointers for the NVM. */ +struct e1000_nvm_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + void (*reload)(struct e1000_hw *); + s32 (*update)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); + s32 (*validate)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + + enum e1000_mac_type type; + + u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 tx_packet_delta; + u32 txcw; + + u16 current_ifs_val; + u16 ifs_max_val; + u16 ifs_min_val; + u16 ifs_ratio; + u16 ifs_step_size; + u16 mta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ +#define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool has_fwsm; + bool arc_subsystem_valid; + bool autoneg; + bool autoneg_failed; + bool get_link_status; + bool in_ifs_mode; + bool serdes_has_link; + bool tx_pkt_filtering; + enum e1000_serdes_link_state serdes_link_state; +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum e1000_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + + enum e1000_nvm_type type; + enum e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_width width; + + u16 func; +}; + +struct e1000_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + u16 refresh_time; /* Flow control refresh timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum e1000_fc_mode current_mode; /* FC mode in effect */ + enum e1000_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +struct e1000_dev_spec_82571 { + bool laa_is_present; + u32 smb_counter; +}; + +struct e1000_dev_spec_80003es2lan { + bool mdic_wa_enable; +}; + +struct e1000_shadow_ram { + u16 value; + bool modified; +}; + +#define E1000_ICH8_SHADOW_RAM_WORDS 2048 + +/* I218 PHY Ultra Low Power (ULP) states */ +enum e1000_ulp_state { + e1000_ulp_state_unknown, + e1000_ulp_state_off, + e1000_ulp_state_on, +}; + +struct e1000_dev_spec_ich8lan { + bool kmrn_lock_loss_workaround_enabled; + struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; + bool nvm_k1_enabled; + bool eee_disable; + u16 eee_lp_ability; + enum e1000_ulp_state ulp_state; +}; + +struct e1000_hw { + struct e1000_adapter *adapter; + + void __iomem *hw_addr; + void __iomem *flash_address; + + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { + struct e1000_dev_spec_82571 e82571; + struct e1000_dev_spec_80003es2lan e80003es2lan; + struct e1000_dev_spec_ich8lan ich8lan; + } dev_spec; +}; + +#include "82571.h" +#include "80003es2lan.h" +#include "ich8lan.h" + +#endif diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c new file mode 100644 index 000000000..e2498dbf3 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -0,0 +1,5802 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* 82562G 10/100 Network Connection + * 82562G-2 10/100 Network Connection + * 82562GT 10/100 Network Connection + * 82562GT-2 10/100 Network Connection + * 82562V 10/100 Network Connection + * 82562V-2 10/100 Network Connection + * 82566DC-2 Gigabit Network Connection + * 82566DC Gigabit Network Connection + * 82566DM-2 Gigabit Network Connection + * 82566DM Gigabit Network Connection + * 82566MC Gigabit Network Connection + * 82566MM Gigabit Network Connection + * 82567LM Gigabit Network Connection + * 82567LF Gigabit Network Connection + * 82567V Gigabit Network Connection + * 82567LM-2 Gigabit Network Connection + * 82567LF-2 Gigabit Network Connection + * 82567V-2 Gigabit Network Connection + * 82567LF-3 Gigabit Network Connection + * 82567LM-3 Gigabit Network Connection + * 82567LM-4 Gigabit Network Connection + * 82577LM Gigabit Network Connection + * 82577LC Gigabit Network Connection + * 82578DM Gigabit Network Connection + * 82578DC Gigabit Network Connection + * 82579LM Gigabit Network Connection + * 82579V Gigabit Network Connection + * Ethernet Connection I217-LM + * Ethernet Connection I217-V + * Ethernet Connection I218-V + * Ethernet Connection I218-LM + * Ethernet Connection (2) I218-LM + * Ethernet Connection (2) I218-V + * Ethernet Connection (3) I218-LM + * Ethernet Connection (3) I218-V + */ + +#include "e1000.h" + +/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ +/* Offset 04h HSFSTS */ +union ich8_hws_flash_status { + struct ich8_hsfsts { + u16 flcdone:1; /* bit 0 Flash Cycle Done */ + u16 flcerr:1; /* bit 1 Flash Cycle Error */ + u16 dael:1; /* bit 2 Direct Access error Log */ + u16 berasesz:2; /* bit 4:3 Sector Erase Size */ + u16 flcinprog:1; /* bit 5 flash cycle in Progress */ + u16 reserved1:2; /* bit 13:6 Reserved */ + u16 reserved2:6; /* bit 13:6 Reserved */ + u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */ + u16 flockdn:1; /* bit 15 Flash Config Lock-Down */ + } hsf_status; + u16 regval; +}; + +/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ +/* Offset 06h FLCTL */ +union ich8_hws_flash_ctrl { + struct ich8_hsflctl { + u16 flcgo:1; /* 0 Flash Cycle Go */ + u16 flcycle:2; /* 2:1 Flash Cycle */ + u16 reserved:5; /* 7:3 Reserved */ + u16 fldbcount:2; /* 9:8 Flash Data Byte Count */ + u16 flockdn:6; /* 15:10 Reserved */ + } hsf_ctrl; + u16 regval; +}; + +/* ICH Flash Region Access Permissions */ +union ich8_hws_flash_regacc { + struct ich8_flracc { + u32 grra:8; /* 0:7 GbE region Read Access */ + u32 grwa:8; /* 8:15 GbE region Write Access */ + u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */ + u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */ + } hsf_flregacc; + u16 regval; +}; + +/* ICH Flash Protected Region */ +union ich8_flash_protected_range { + struct ich8_pr { + u32 base:13; /* 0:12 Protected Range Base */ + u32 reserved1:2; /* 13:14 Reserved */ + u32 rpe:1; /* 15 Read Protection Enable */ + u32 limit:13; /* 16:28 Protected Range Limit */ + u32 reserved2:2; /* 29:30 Reserved */ + u32 wpe:1; /* 31 Write Protection Enable */ + } range; + u32 regval; +}; + +static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); +static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); +static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); +static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte); +static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 *data); +static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, + u16 *data); +static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 *data); +static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data); +static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 *data); +static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, + u32 offset, u32 data); +static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 dword); +static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); +static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); +static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); +static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); +static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw); +static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); +static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); +static s32 e1000_led_on_pchlan(struct e1000_hw *hw); +static s32 e1000_led_off_pchlan(struct e1000_hw *hw); +static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); +static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); +static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); +static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); +static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); +static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); +static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); +static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); +static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); +static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw); +static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); +static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); +static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force); +static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw); +static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state); + +static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) +{ + return readw(hw->flash_address + reg); +} + +static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg) +{ + return readl(hw->flash_address + reg); +} + +static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val) +{ + writew(val, hw->flash_address + reg); +} + +static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val) +{ + writel(val, hw->flash_address + reg); +} + +#define er16flash(reg) __er16flash(hw, (reg)) +#define er32flash(reg) __er32flash(hw, (reg)) +#define ew16flash(reg, val) __ew16flash(hw, (reg), (val)) +#define ew32flash(reg, val) __ew32flash(hw, (reg), (val)) + +/** + * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers + * @hw: pointer to the HW structure + * + * Test access to the PHY registers by reading the PHY ID registers. If + * the PHY ID is already known (e.g. resume path) compare it with known ID, + * otherwise assume the read PHY ID is correct if it is valid. + * + * Assumes the sw/fw/hw semaphore is already acquired. + **/ +static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) +{ + u16 phy_reg = 0; + u32 phy_id = 0; + s32 ret_val = 0; + u16 retry_count; + u32 mac_reg = 0; + + for (retry_count = 0; retry_count < 2; retry_count++) { + ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg); + if (ret_val || (phy_reg == 0xFFFF)) + continue; + phy_id = (u32)(phy_reg << 16); + + ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg); + if (ret_val || (phy_reg == 0xFFFF)) { + phy_id = 0; + continue; + } + phy_id |= (u32)(phy_reg & PHY_REVISION_MASK); + break; + } + + if (hw->phy.id) { + if (hw->phy.id == phy_id) + goto out; + } else if (phy_id) { + hw->phy.id = phy_id; + hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK); + goto out; + } + + /* In case the PHY needs to be in mdio slow mode, + * set slow mode and try to get the PHY id again. + */ + if (hw->mac.type < e1000_pch_lpt) { + hw->phy.ops.release(hw); + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (!ret_val) + ret_val = e1000e_get_phy_id(hw); + hw->phy.ops.acquire(hw); + } + + if (ret_val) + return false; +out: + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { + /* Unforce SMBus mode in PHY */ + e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); + phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; + e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Unforce SMBus mode in MAC */ + mac_reg = er32(CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + } + + return true; +} + +/** + * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value + * @hw: pointer to the HW structure + * + * Toggling the LANPHYPC pin value fully power-cycles the PHY and is + * used to reset the PHY to a quiescent state when necessary. + **/ +static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw) +{ + u32 mac_reg; + + /* Set Phy Config Counter to 50msec */ + mac_reg = er32(FEXTNVM3); + mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; + mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; + ew32(FEXTNVM3, mac_reg); + + /* Toggle LANPHYPC Value bit */ + mac_reg = er32(CTRL); + mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; + mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; + ew32(CTRL, mac_reg); + e1e_flush(); + usleep_range(10, 20); + mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; + ew32(CTRL, mac_reg); + e1e_flush(); + + if (hw->mac.type < e1000_pch_lpt) { + msleep(50); + } else { + u16 count = 20; + + do { + usleep_range(5000, 10000); + } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--); + + msleep(30); + } +} + +/** + * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds + * @hw: pointer to the HW structure + * + * Workarounds/flow necessary for PHY initialization during driver load + * and resume paths. + **/ +static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->adapter; + u32 mac_reg, fwsm = er32(FWSM); + s32 ret_val; + + /* Gate automatic PHY configuration by hardware on managed and + * non-managed 82579 and newer adapters. + */ + e1000_gate_hw_phy_config_ich8lan(hw, true); + + /* It is not possible to be certain of the current state of ULP + * so forcibly disable it. + */ + hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown; + e1000_disable_ulp_lpt_lp(hw, true); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_dbg("Failed to initialize PHY flow\n"); + goto out; + } + + /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is + * inaccessible and resetting the PHY is not blocked, toggle the + * LANPHYPC Value bit to force the interconnect to PCIe mode. + */ + switch (hw->mac.type) { + case e1000_pch_lpt: + case e1000_pch_spt: + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + /* Before toggling LANPHYPC, see if PHY is accessible by + * forcing MAC to SMBus mode first. + */ + mac_reg = er32(CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + + /* Wait 50 milliseconds for MAC to finish any retries + * that it might be trying to perform from previous + * attempts to acknowledge any phy read requests. + */ + msleep(50); + + /* fall-through */ + case e1000_pch2lan: + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + /* fall-through */ + case e1000_pchlan: + if ((hw->mac.type == e1000_pchlan) && + (fwsm & E1000_ICH_FWSM_FW_VALID)) + break; + + if (hw->phy.ops.check_reset_block(hw)) { + e_dbg("Required LANPHYPC toggle blocked by ME\n"); + ret_val = -E1000_ERR_PHY; + break; + } + + /* Toggle LANPHYPC Value bit */ + e1000_toggle_lanphypc_pch_lpt(hw); + if (hw->mac.type >= e1000_pch_lpt) { + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + /* Toggling LANPHYPC brings the PHY out of SMBus mode + * so ensure that the MAC is also out of SMBus mode + */ + mac_reg = er32(CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + ret_val = -E1000_ERR_PHY; + } + break; + default: + break; + } + + hw->phy.ops.release(hw); + if (!ret_val) { + + /* Check to see if able to reset PHY. Print error if not */ + if (hw->phy.ops.check_reset_block(hw)) { + e_err("Reset blocked by ME\n"); + goto out; + } + + /* Reset the PHY before any access to it. Doing so, ensures + * that the PHY is in a known good state before we read/write + * PHY registers. The generic reset is sufficient here, + * because we haven't determined the PHY type yet. + */ + ret_val = e1000e_phy_hw_reset_generic(hw); + if (ret_val) + goto out; + + /* On a successful reset, possibly need to wait for the PHY + * to quiesce to an accessible state before returning control + * to the calling function. If the PHY does not quiesce, then + * return E1000E_BLK_PHY_RESET, as this is the condition that + * the PHY is in. + */ + ret_val = hw->phy.ops.check_reset_block(hw); + if (ret_val) + e_err("ME blocked access to PHY after reset\n"); + } + +out: + /* Ungate automatic PHY configuration on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(fwsm & E1000_ICH_FWSM_FW_VALID)) { + usleep_range(10000, 20000); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + + return ret_val; +} + +/** + * e1000_init_phy_params_pchlan - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific PHY parameters and function pointers. + **/ +static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + phy->addr = 1; + phy->reset_delay_us = 100; + + phy->ops.set_page = e1000_set_page_igp; + phy->ops.read_reg = e1000_read_phy_reg_hv; + phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; + phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; + phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.write_reg = e1000_write_phy_reg_hv; + phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; + phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + phy->id = e1000_phy_unknown; + + ret_val = e1000_init_phy_workarounds_pchlan(hw); + if (ret_val) + return ret_val; + + if (phy->id == e1000_phy_unknown) + switch (hw->mac.type) { + default: + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + return ret_val; + if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) + break; + /* fall-through */ + case e1000_pch2lan: + case e1000_pch_lpt: + case e1000_pch_spt: + /* In case the PHY needs to be in mdio slow mode, + * set slow mode and try to get the PHY id again. + */ + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + return ret_val; + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + return ret_val; + break; + } + phy->type = e1000e_get_phy_type_from_id(phy->id); + + switch (phy->type) { + case e1000_phy_82577: + case e1000_phy_82579: + case e1000_phy_i217: + phy->ops.check_polarity = e1000_check_polarity_82577; + phy->ops.force_speed_duplex = + e1000_phy_force_speed_duplex_82577; + phy->ops.get_cable_length = e1000_get_cable_length_82577; + phy->ops.get_info = e1000_get_phy_info_82577; + phy->ops.commit = e1000e_phy_sw_reset; + break; + case e1000_phy_82578: + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; + phy->ops.get_cable_length = e1000e_get_cable_length_m88; + phy->ops.get_info = e1000e_get_phy_info_m88; + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + return ret_val; +} + +/** + * e1000_init_phy_params_ich8lan - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific PHY parameters and function pointers. + **/ +static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 i = 0; + + phy->addr = 1; + phy->reset_delay_us = 100; + + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; + + /* We may need to do this twice - once for IGP and if that fails, + * we'll set BM func pointers and try again + */ + ret_val = e1000e_determine_phy_address(hw); + if (ret_val) { + phy->ops.write_reg = e1000e_write_phy_reg_bm; + phy->ops.read_reg = e1000e_read_phy_reg_bm; + ret_val = e1000e_determine_phy_address(hw); + if (ret_val) { + e_dbg("Cannot determine PHY addr. Erroring out\n"); + return ret_val; + } + } + + phy->id = 0; + while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && + (i++ < 100)) { + usleep_range(1000, 2000); + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + return ret_val; + } + + /* Verify phy id */ + switch (phy->id) { + case IGP03E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked; + phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked; + phy->ops.get_info = e1000e_get_phy_info_igp; + phy->ops.check_polarity = e1000_check_polarity_igp; + phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy->type = e1000_phy_ife; + phy->autoneg_mask = E1000_ALL_NOT_GIG; + phy->ops.get_info = e1000_get_phy_info_ife; + phy->ops.check_polarity = e1000_check_polarity_ife; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; + break; + case BME1000_E_PHY_ID: + phy->type = e1000_phy_bm; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->ops.read_reg = e1000e_read_phy_reg_bm; + phy->ops.write_reg = e1000e_write_phy_reg_bm; + phy->ops.commit = e1000e_phy_sw_reset; + phy->ops.get_info = e1000e_get_phy_info_m88; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; + break; + default: + return -E1000_ERR_PHY; + } + + return 0; +} + +/** + * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific NVM parameters and function + * pointers. + **/ +static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 gfpreg, sector_base_addr, sector_end_addr; + u16 i; + u32 nvm_size; + + nvm->type = e1000_nvm_flash_sw; + + if (hw->mac.type == e1000_pch_spt) { + /* in SPT, gfpreg doesn't exist. NVM size is taken from the + * STRAP register. This is because in SPT the GbE Flash region + * is no longer accessed through the flash registers. Instead, + * the mechanism has changed, and the Flash region access + * registers are now implemented in GbE memory space. + */ + nvm->flash_base_addr = 0; + nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1) + * NVM_SIZE_MULTIPLIER; + nvm->flash_bank_size = nvm_size / 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + /* Set the base address for flash register access */ + hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR; + } else { + /* Can't read flash registers if register set isn't mapped. */ + if (!hw->flash_address) { + e_dbg("ERROR: Flash registers not mapped\n"); + return -E1000_ERR_CONFIG; + } + + gfpreg = er32flash(ICH_FLASH_GFPREG); + + /* sector_X_addr is a "sector"-aligned address (4096 bytes) + * Add 1 to sector_end_addr since this sector is included in + * the overall size. + */ + sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; + sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; + + /* flash_base_addr is byte-aligned */ + nvm->flash_base_addr = sector_base_addr + << FLASH_SECTOR_ADDR_SHIFT; + + /* find total size of the NVM, then cut in half since the total + * size represents two separate NVM banks. + */ + nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) + << FLASH_SECTOR_ADDR_SHIFT); + nvm->flash_bank_size /= 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + } + + nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; + + /* Clear shadow ram */ + for (i = 0; i < nvm->word_size; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + + return 0; +} + +/** + * e1000_init_mac_params_ich8lan - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific MAC parameters and function + * pointers. + **/ +static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + /* Set media type function pointer */ + hw->phy.media_type = e1000_media_type_copper; + + /* Set mta register count */ + mac->mta_reg_count = 32; + /* Set rar entry count */ + mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; + if (mac->type == e1000_ich8lan) + mac->rar_entry_count--; + /* FWSM register */ + mac->has_fwsm = true; + /* ARC subsystem not supported */ + mac->arc_subsystem_valid = false; + /* Adaptive IFS supported */ + mac->adaptive_ifs = true; + + /* LED and other operations */ + switch (mac->type) { + case e1000_ich8lan: + case e1000_ich9lan: + case e1000_ich10lan: + /* check management mode */ + mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; + /* ID LED init */ + mac->ops.id_led_init = e1000e_id_led_init_generic; + /* blink LED */ + mac->ops.blink_led = e1000e_blink_led_generic; + /* setup LED */ + mac->ops.setup_led = e1000e_setup_led_generic; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_ich8lan; + mac->ops.led_off = e1000_led_off_ich8lan; + break; + case e1000_pch2lan: + mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES; + mac->ops.rar_set = e1000_rar_set_pch2lan; + /* fall-through */ + case e1000_pch_lpt: + case e1000_pch_spt: + case e1000_pchlan: + /* check management mode */ + mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_pchlan; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_pchlan; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_pchlan; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_pchlan; + mac->ops.led_off = e1000_led_off_pchlan; + break; + default: + break; + } + + if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) { + mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; + mac->ops.rar_set = e1000_rar_set_pch_lpt; + mac->ops.setup_physical_interface = + e1000_setup_copper_link_pch_lpt; + mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt; + } + + /* Enable PCS Lock-loss workaround for ICH8 */ + if (mac->type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); + + return 0; +} + +/** + * __e1000_access_emi_reg_locked - Read/write EMI register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: pointer to value to read/write from/to the EMI address + * @read: boolean flag to indicate read or write + * + * This helper function assumes the SW/FW/HW Semaphore is already acquired. + **/ +static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address, + u16 *data, bool read) +{ + s32 ret_val; + + ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address); + if (ret_val) + return ret_val; + + if (read) + ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data); + else + ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data); + + return ret_val; +} + +/** + * e1000_read_emi_reg_locked - Read Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be read from the EMI address + * + * Assumes the SW/FW/HW Semaphore is already acquired. + **/ +s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data) +{ + return __e1000_access_emi_reg_locked(hw, addr, data, true); +} + +/** + * e1000_write_emi_reg_locked - Write Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be written to the EMI address + * + * Assumes the SW/FW/HW Semaphore is already acquired. + **/ +s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) +{ + return __e1000_access_emi_reg_locked(hw, addr, &data, false); +} + +/** + * e1000_set_eee_pchlan - Enable/disable EEE support + * @hw: pointer to the HW structure + * + * Enable/disable EEE based on setting in dev_spec structure, the duplex of + * the link and the EEE capabilities of the link partner. The LPI Control + * register bits will remain set only if/when link is up. + * + * EEE LPI must not be asserted earlier than one second after link is up. + * On 82579, EEE LPI should not be enabled until such time otherwise there + * can be link issues with some switches. Other devices can have EEE LPI + * enabled immediately upon link up since they have a timer in hardware which + * prevents LPI from being asserted too early. + **/ +s32 e1000_set_eee_pchlan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + s32 ret_val; + u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data; + + switch (hw->phy.type) { + case e1000_phy_82579: + lpa = I82579_EEE_LP_ABILITY; + pcs_status = I82579_EEE_PCS_STATUS; + adv_addr = I82579_EEE_ADVERTISEMENT; + break; + case e1000_phy_i217: + lpa = I217_EEE_LP_ABILITY; + pcs_status = I217_EEE_PCS_STATUS; + adv_addr = I217_EEE_ADVERTISEMENT; + break; + default: + return 0; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl); + if (ret_val) + goto release; + + /* Clear bits that enable EEE in various speeds */ + lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK; + + /* Enable EEE if not disabled by user */ + if (!dev_spec->eee_disable) { + /* Save off link partner's EEE ability */ + ret_val = e1000_read_emi_reg_locked(hw, lpa, + &dev_spec->eee_lp_ability); + if (ret_val) + goto release; + + /* Read EEE advertisement */ + ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv); + if (ret_val) + goto release; + + /* Enable EEE only for speeds in which the link partner is + * EEE capable and for which we advertise EEE. + */ + if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED) + lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; + + if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) { + e1e_rphy_locked(hw, MII_LPA, &data); + if (data & LPA_100FULL) + lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; + else + /* EEE is not supported in 100Half, so ignore + * partner's EEE in 100 ability if full-duplex + * is not advertised. + */ + dev_spec->eee_lp_ability &= + ~I82579_EEE_100_SUPPORTED; + } + } + + if (hw->phy.type == e1000_phy_82579) { + ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, + &data); + if (ret_val) + goto release; + + data &= ~I82579_LPI_100_PLL_SHUT; + ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, + data); + } + + /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ + ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); + if (ret_val) + goto release; + + ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl); +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP + * @hw: pointer to the HW structure + * @link: link up bool flag + * + * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications + * preventing further DMA write requests. Workaround the issue by disabling + * the de-assertion of the clock request when in 1Gpbs mode. + * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link + * speeds in order to avoid Tx hangs. + **/ +static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) +{ + u32 fextnvm6 = er32(FEXTNVM6); + u32 status = er32(STATUS); + s32 ret_val = 0; + u16 reg; + + if (link && (status & E1000_STATUS_SPEED_1000)) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = + e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, + ®); + if (ret_val) + goto release; + + ret_val = + e1000e_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + reg & + ~E1000_KMRNCTRLSTA_K1_ENABLE); + if (ret_val) + goto release; + + usleep_range(10, 20); + + ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK); + + ret_val = + e1000e_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + reg); +release: + hw->phy.ops.release(hw); + } else { + /* clear FEXTNVM6 bit 8 on link down or 10/100 */ + fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; + + if ((hw->phy.revision > 5) || !link || + ((status & E1000_STATUS_SPEED_100) && + (status & E1000_STATUS_FD))) + goto update_fextnvm6; + + ret_val = e1e_rphy(hw, I217_INBAND_CTRL, ®); + if (ret_val) + return ret_val; + + /* Clear link status transmit timeout */ + reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK; + + if (status & E1000_STATUS_SPEED_100) { + /* Set inband Tx timeout to 5x10us for 100Half */ + reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; + + /* Do not extend the K1 entry latency for 100Half */ + fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; + } else { + /* Set inband Tx timeout to 50x10us for 10Full/Half */ + reg |= 50 << + I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; + + /* Extend the K1 entry latency for 10 Mbps */ + fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; + } + + ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg); + if (ret_val) + return ret_val; + +update_fextnvm6: + ew32(FEXTNVM6, fextnvm6); + } + + return ret_val; +} + +/** + * e1000_platform_pm_pch_lpt - Set platform power management values + * @hw: pointer to the HW structure + * @link: bool indicating link status + * + * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like" + * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed + * when link is up (which must not exceed the maximum latency supported + * by the platform), otherwise specify there is no LTR requirement. + * Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop + * latencies in the LTR Extended Capability Structure in the PCIe Extended + * Capability register set, on this device LTR is set by writing the + * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and + * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB) + * message to the PMC. + **/ +static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) +{ + u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | + link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; + u16 lat_enc = 0; /* latency encoded */ + + if (link) { + u16 speed, duplex, scale = 0; + u16 max_snoop, max_nosnoop; + u16 max_ltr_enc; /* max LTR latency encoded */ + s64 lat_ns; /* latency (ns) */ + s64 value; + u32 rxa; + + if (!hw->adapter->max_frame_size) { + e_dbg("max_frame_size not set.\n"); + return -E1000_ERR_CONFIG; + } + + hw->mac.ops.get_link_up_info(hw, &speed, &duplex); + if (!speed) { + e_dbg("Speed not set.\n"); + return -E1000_ERR_CONFIG; + } + + /* Rx Packet Buffer Allocation size (KB) */ + rxa = er32(PBA) & E1000_PBA_RXA_MASK; + + /* Determine the maximum latency tolerated by the device. + * + * Per the PCIe spec, the tolerated latencies are encoded as + * a 3-bit encoded scale (only 0-5 are valid) multiplied by + * a 10-bit value (0-1023) to provide a range from 1 ns to + * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns, + * 1=2^5ns, 2=2^10ns,...5=2^25ns. + */ + lat_ns = ((s64)rxa * 1024 - + (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000; + if (lat_ns < 0) + lat_ns = 0; + else + do_div(lat_ns, speed); + + value = lat_ns; + while (value > PCI_LTR_VALUE_MASK) { + scale++; + value = DIV_ROUND_UP(value, (1 << 5)); + } + if (scale > E1000_LTRV_SCALE_MAX) { + e_dbg("Invalid LTR latency scale %d\n", scale); + return -E1000_ERR_CONFIG; + } + lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value); + + /* Determine the maximum latency tolerated by the platform */ + pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT, + &max_snoop); + pci_read_config_word(hw->adapter->pdev, + E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); + max_ltr_enc = max_t(u16, max_snoop, max_nosnoop); + + if (lat_enc > max_ltr_enc) + lat_enc = max_ltr_enc; + } + + /* Set Snoop and No-Snoop latencies the same */ + reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT); + ew32(LTRV, reg); + + return 0; +} + +/** + * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP + * @hw: pointer to the HW structure + * @to_sx: boolean indicating a system power state transition to Sx + * + * When link is down, configure ULP mode to significantly reduce the power + * to the PHY. If on a Manageability Engine (ME) enabled system, tell the + * ME firmware to start the ULP configuration. If not on an ME enabled + * system, configure the ULP mode by software. + */ +s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) +{ + u32 mac_reg; + s32 ret_val = 0; + u16 phy_reg; + + if ((hw->mac.type < e1000_pch_lpt) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) || + (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on)) + return 0; + + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { + /* Request ME configure ULP mode in the PHY */ + mac_reg = er32(H2ME); + mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS; + ew32(H2ME, mac_reg); + + goto out; + } + + if (!to_sx) { + int i = 0; + + /* Poll up to 5 seconds for Cable Disconnected indication */ + while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) { + /* Bail if link is re-acquired */ + if (er32(STATUS) & E1000_STATUS_LU) + return -E1000_ERR_PHY; + + if (i++ == 100) + break; + + msleep(50); + } + e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n", + (er32(FEXT) & + E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50); + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable + * LPLU and disable Gig speed when entering ULP + */ + if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) { + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS, + &phy_reg); + if (ret_val) + goto release; + phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS; + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, + phy_reg); + if (ret_val) + goto release; + } + + /* Force SMBus mode in PHY */ + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); + if (ret_val) + goto release; + phy_reg |= CV_SMB_CTRL_FORCE_SMBUS; + e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Force SMBus mode in MAC */ + mac_reg = er32(CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + + /* Set Inband ULP Exit, Reset to SMBus mode and + * Disable SMBus Release on PERST# in PHY + */ + ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); + if (ret_val) + goto release; + phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS | + I218_ULP_CONFIG1_DISABLE_SMB_PERST); + if (to_sx) { + if (er32(WUFC) & E1000_WUFC_LNKC) + phy_reg |= I218_ULP_CONFIG1_WOL_HOST; + + phy_reg |= I218_ULP_CONFIG1_STICKY_ULP; + } else { + phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT; + } + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + /* Set Disable SMBus Release on PERST# in MAC */ + mac_reg = er32(FEXTNVM7); + mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST; + ew32(FEXTNVM7, mac_reg); + + /* Commit ULP changes in PHY by starting auto ULP configuration */ + phy_reg |= I218_ULP_CONFIG1_START; + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); +release: + hw->phy.ops.release(hw); +out: + if (ret_val) + e_dbg("Error in ULP enable flow: %d\n", ret_val); + else + hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on; + + return ret_val; +} + +/** + * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP + * @hw: pointer to the HW structure + * @force: boolean indicating whether or not to force disabling ULP + * + * Un-configure ULP mode when link is up, the system is transitioned from + * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled + * system, poll for an indication from ME that ULP has been un-configured. + * If not on an ME enabled system, un-configure the ULP mode by software. + * + * During nominal operation, this function is called when link is acquired + * to disable ULP mode (force=false); otherwise, for example when unloading + * the driver or during Sx->S0 transitions, this is called with force=true + * to forcibly disable ULP. + */ +static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) +{ + s32 ret_val = 0; + u32 mac_reg; + u16 phy_reg; + int i = 0; + + if ((hw->mac.type < e1000_pch_lpt) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) || + (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off)) + return 0; + + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { + if (force) { + /* Request ME un-configure ULP mode in the PHY */ + mac_reg = er32(H2ME); + mac_reg &= ~E1000_H2ME_ULP; + mac_reg |= E1000_H2ME_ENFORCE_SETTINGS; + ew32(H2ME, mac_reg); + } + + /* Poll up to 100msec for ME to clear ULP_CFG_DONE */ + while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) { + if (i++ == 10) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + usleep_range(10000, 20000); + } + e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10); + + if (force) { + mac_reg = er32(H2ME); + mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS; + ew32(H2ME, mac_reg); + } else { + /* Clear H2ME.ULP after ME ULP configuration */ + mac_reg = er32(H2ME); + mac_reg &= ~E1000_H2ME_ULP; + ew32(H2ME, mac_reg); + } + + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (force) + /* Toggle LANPHYPC Value bit */ + e1000_toggle_lanphypc_pch_lpt(hw); + + /* Unforce SMBus mode in PHY */ + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); + if (ret_val) { + /* The MAC might be in PCIe mode, so temporarily force to + * SMBus mode in order to access the PHY. + */ + mac_reg = er32(CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + + msleep(50); + + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, + &phy_reg); + if (ret_val) + goto release; + } + phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; + e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Unforce SMBus mode in MAC */ + mac_reg = er32(CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + + /* When ULP mode was previously entered, K1 was disabled by the + * hardware. Re-Enable K1 in the PHY when exiting ULP. + */ + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg); + if (ret_val) + goto release; + phy_reg |= HV_PM_CTRL_K1_ENABLE; + e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg); + + /* Clear ULP enabled configuration */ + ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); + if (ret_val) + goto release; + phy_reg &= ~(I218_ULP_CONFIG1_IND | + I218_ULP_CONFIG1_STICKY_ULP | + I218_ULP_CONFIG1_RESET_TO_SMBUS | + I218_ULP_CONFIG1_WOL_HOST | + I218_ULP_CONFIG1_INBAND_EXIT | + I218_ULP_CONFIG1_DISABLE_SMB_PERST); + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + /* Commit ULP changes by starting auto ULP configuration */ + phy_reg |= I218_ULP_CONFIG1_START; + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + /* Clear Disable SMBus Release on PERST# in MAC */ + mac_reg = er32(FEXTNVM7); + mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST; + ew32(FEXTNVM7, mac_reg); + +release: + hw->phy.ops.release(hw); + if (force) { + e1000_phy_hw_reset(hw); + msleep(50); + } +out: + if (ret_val) + e_dbg("Error in ULP disable flow: %d\n", ret_val); + else + hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off; + + return ret_val; +} + +/** + * e1000_check_for_copper_link_ich8lan - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val, tipg_reg = 0; + u16 emi_addr, emi_val = 0; + bool link; + u16 phy_reg; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) + return 0; + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (hw->mac.type == e1000_pchlan) { + ret_val = e1000_k1_gig_workaround_hv(hw, link); + if (ret_val) + return ret_val; + } + + /* When connected at 10Mbps half-duplex, some parts are excessively + * aggressive resulting in many collisions. To avoid this, increase + * the IPG and reduce Rx latency in the PHY. + */ + if (((hw->mac.type == e1000_pch2lan) || + (hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) && link) { + u32 reg; + + reg = er32(STATUS); + tipg_reg = er32(TIPG); + tipg_reg &= ~E1000_TIPG_IPGT_MASK; + + if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { + tipg_reg |= 0xFF; + /* Reduce Rx latency in analog PHY */ + emi_val = 0; + } else { + + /* Roll back the default values */ + tipg_reg |= 0x08; + emi_val = 1; + } + + ew32(TIPG, tipg_reg); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + if (hw->mac.type == e1000_pch2lan) + emi_addr = I82579_RX_CONFIG; + else + emi_addr = I217_RX_CONFIG; + ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val); + + hw->phy.ops.release(hw); + + if (ret_val) + return ret_val; + } + + /* Work-around I218 hang issue */ + if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3) || + (hw->mac.type == e1000_pch_spt)) { + ret_val = e1000_k1_workaround_lpt_lp(hw, link); + if (ret_val) + return ret_val; + } + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { + /* Set platform power management values for + * Latency Tolerance Reporting (LTR) + */ + ret_val = e1000_platform_pm_pch_lpt(hw, link); + if (ret_val) + return ret_val; + } + + /* Clear link partner's EEE ability */ + hw->dev_spec.ich8lan.eee_lp_ability = 0; + + /* FEXTNVM6 K1-off workaround */ + if (hw->mac.type == e1000_pch_spt) { + u32 pcieanacfg = er32(PCIEANACFG); + u32 fextnvm6 = er32(FEXTNVM6); + + if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) + fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE; + else + fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE; + + ew32(FEXTNVM6, fextnvm6); + } + + if (!link) + return 0; /* No link detected */ + + mac->get_link_status = false; + + switch (hw->mac.type) { + case e1000_pch2lan: + ret_val = e1000_k1_workaround_lv(hw); + if (ret_val) + return ret_val; + /* fall-thru */ + case e1000_pchlan: + if (hw->phy.type == e1000_phy_82578) { + ret_val = e1000_link_stall_workaround_hv(hw); + if (ret_val) + return ret_val; + } + + /* Workaround for PCHx parts in half-duplex: + * Set the number of preambles removed from the packet + * when it is passed from the PHY to the MAC to prevent + * the MAC from misinterpreting the packet type. + */ + e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg); + phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; + + if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD) + phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); + + e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); + break; + default: + break; + } + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000e_check_downshift(hw); + + /* Enable/Disable EEE after link up */ + if (hw->phy.type > e1000_phy_82579) { + ret_val = e1000_set_eee_pchlan(hw); + if (ret_val) + return ret_val; + } + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) + return -E1000_ERR_CONFIG; + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + mac->ops.config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) + e_dbg("Error configuring flow control\n"); + + return ret_val; +} + +static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 rc; + + rc = e1000_init_mac_params_ich8lan(hw); + if (rc) + return rc; + + rc = e1000_init_nvm_params_ich8lan(hw); + if (rc) + return rc; + + switch (hw->mac.type) { + case e1000_ich8lan: + case e1000_ich9lan: + case e1000_ich10lan: + rc = e1000_init_phy_params_ich8lan(hw); + break; + case e1000_pchlan: + case e1000_pch2lan: + case e1000_pch_lpt: + case e1000_pch_spt: + rc = e1000_init_phy_params_pchlan(hw); + break; + default: + break; + } + if (rc) + return rc; + + /* Disable Jumbo Frame support on parts with Intel 10/100 PHY or + * on parts with MACsec enabled in NVM (reflected in CTRL_EXT). + */ + if ((adapter->hw.phy.type == e1000_phy_ife) || + ((adapter->hw.mac.type >= e1000_pch2lan) && + (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) { + adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES; + adapter->max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; + + hw->mac.ops.blink_led = NULL; + } + + if ((adapter->hw.mac.type == e1000_ich8lan) && + (adapter->hw.phy.type != e1000_phy_ife)) + adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; + + /* Enable workaround for 82579 w/ ME enabled */ + if ((adapter->hw.mac.type == e1000_pch2lan) && + (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA; + + return 0; +} + +static DEFINE_MUTEX(nvm_mutex); + +/** + * e1000_acquire_nvm_ich8lan - Acquire NVM mutex + * @hw: pointer to the HW structure + * + * Acquires the mutex for performing NVM operations. + **/ +static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw) +{ + mutex_lock(&nvm_mutex); + + return 0; +} + +/** + * e1000_release_nvm_ich8lan - Release NVM mutex + * @hw: pointer to the HW structure + * + * Releases the mutex used while performing NVM operations. + **/ +static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw) +{ + mutex_unlock(&nvm_mutex); +} + +/** + * e1000_acquire_swflag_ich8lan - Acquire software control flag + * @hw: pointer to the HW structure + * + * Acquires the software control flag for performing PHY and select + * MAC CSR accesses. + **/ +static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; + s32 ret_val = 0; + + if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE, + &hw->adapter->state)) { + e_dbg("contention for Phy access\n"); + return -E1000_ERR_PHY; + } + + while (timeout) { + extcnf_ctrl = er32(EXTCNF_CTRL); + if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) + break; + + mdelay(1); + timeout--; + } + + if (!timeout) { + e_dbg("SW has already locked the resource.\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + timeout = SW_FLAG_TIMEOUT; + + extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + + while (timeout) { + extcnf_ctrl = er32(EXTCNF_CTRL); + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) + break; + + mdelay(1); + timeout--; + } + + if (!timeout) { + e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n", + er32(FWSM), extcnf_ctrl); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + +out: + if (ret_val) + clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); + + return ret_val; +} + +/** + * e1000_release_swflag_ich8lan - Release software control flag + * @hw: pointer to the HW structure + * + * Releases the software control flag for performing PHY and select + * MAC CSR accesses. + **/ +static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) { + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + } else { + e_dbg("Semaphore unexpectedly released by sw/fw/hw\n"); + } + + clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); +} + +/** + * e1000_check_mng_mode_ich8lan - Checks management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has any manageability enabled. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) +{ + u32 fwsm; + + fwsm = er32(FWSM); + return (fwsm & E1000_ICH_FWSM_FW_VALID) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); +} + +/** + * e1000_check_mng_mode_pchlan - Checks management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has iAMT enabled. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) +{ + u32 fwsm; + + fwsm = er32(FWSM); + return (fwsm & E1000_ICH_FWSM_FW_VALID) && + (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); +} + +/** + * e1000_rar_set_pch2lan - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. For 82579, RAR[0] is the base address register that is to + * contain the MAC address but RAR[1-6] are reserved for manageability (ME). + * Use SHRA[0-3] in place of those reserved for ME. + **/ +static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + if (index == 0) { + ew32(RAL(index), rar_low); + e1e_flush(); + ew32(RAH(index), rar_high); + e1e_flush(); + return 0; + } + + /* RAR[1-6] are owned by manageability. Skip those and program the + * next address into the SHRA register array. + */ + if (index < (u32)(hw->mac.rar_entry_count)) { + s32 ret_val; + + ret_val = e1000_acquire_swflag_ich8lan(hw); + if (ret_val) + goto out; + + ew32(SHRAL(index - 1), rar_low); + e1e_flush(); + ew32(SHRAH(index - 1), rar_high); + e1e_flush(); + + e1000_release_swflag_ich8lan(hw); + + /* verify the register updates */ + if ((er32(SHRAL(index - 1)) == rar_low) && + (er32(SHRAH(index - 1)) == rar_high)) + return 0; + + e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n", + (index - 1), er32(FWSM)); + } + +out: + e_dbg("Failed to write receive address at index %d\n", index); + return -E1000_ERR_CONFIG; +} + +/** + * e1000_rar_get_count_pch_lpt - Get the number of available SHRA + * @hw: pointer to the HW structure + * + * Get the number of available receive registers that the Host can + * program. SHRA[0-10] are the shared receive address registers + * that are shared between the Host and manageability engine (ME). + * ME can reserve any number of addresses and the host needs to be + * able to tell how many available registers it has access to. + **/ +static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw) +{ + u32 wlock_mac; + u32 num_entries; + + wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK; + wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; + + switch (wlock_mac) { + case 0: + /* All SHRA[0..10] and RAR[0] available */ + num_entries = hw->mac.rar_entry_count; + break; + case 1: + /* Only RAR[0] available */ + num_entries = 1; + break; + default: + /* SHRA[0..(wlock_mac - 1)] available + RAR[0] */ + num_entries = wlock_mac + 1; + break; + } + + return num_entries; +} + +/** + * e1000_rar_set_pch_lpt - Set receive address registers + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address register array at index to the address passed + * in by addr. For LPT, RAR[0] is the base address register that is to + * contain the MAC address. SHRA[0-10] are the shared receive address + * registers that are shared between the Host and manageability engine (ME). + **/ +static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + u32 wlock_mac; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + if (index == 0) { + ew32(RAL(index), rar_low); + e1e_flush(); + ew32(RAH(index), rar_high); + e1e_flush(); + return 0; + } + + /* The manageability engine (ME) can lock certain SHRAR registers that + * it is using - those registers are unavailable for use. + */ + if (index < hw->mac.rar_entry_count) { + wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK; + wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; + + /* Check if all SHRAR registers are locked */ + if (wlock_mac == 1) + goto out; + + if ((wlock_mac == 0) || (index <= wlock_mac)) { + s32 ret_val; + + ret_val = e1000_acquire_swflag_ich8lan(hw); + + if (ret_val) + goto out; + + ew32(SHRAL_PCH_LPT(index - 1), rar_low); + e1e_flush(); + ew32(SHRAH_PCH_LPT(index - 1), rar_high); + e1e_flush(); + + e1000_release_swflag_ich8lan(hw); + + /* verify the register updates */ + if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) && + (er32(SHRAH_PCH_LPT(index - 1)) == rar_high)) + return 0; + } + } + +out: + e_dbg("Failed to write receive address at index %d\n", index); + return -E1000_ERR_CONFIG; +} + +/** + * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Checks if firmware is blocking the reset of the PHY. + * This is a function pointer entry point only called by + * reset routines. + **/ +static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) +{ + bool blocked = false; + int i = 0; + + while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) && + (i++ < 10)) + usleep_range(10000, 20000); + return blocked ? E1000_BLK_PHY_RESET : 0; +} + +/** + * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states + * @hw: pointer to the HW structure + * + * Assumes semaphore already acquired. + * + **/ +static s32 e1000_write_smbus_addr(struct e1000_hw *hw) +{ + u16 phy_data; + u32 strap = er32(STRAP); + u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >> + E1000_STRAP_SMT_FREQ_SHIFT; + s32 ret_val; + + strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; + + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~HV_SMB_ADDR_MASK; + phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); + phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; + + if (hw->phy.type == e1000_phy_i217) { + /* Restore SMBus frequency */ + if (freq--) { + phy_data &= ~HV_SMB_ADDR_FREQ_MASK; + phy_data |= (freq & (1 << 0)) << + HV_SMB_ADDR_FREQ_LOW_SHIFT; + phy_data |= (freq & (1 << 1)) << + (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1); + } else { + e_dbg("Unsupported SMB frequency in PHY\n"); + } + } + + return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); +} + +/** + * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration + * @hw: pointer to the HW structure + * + * SW should configure the LCD from the NVM extended configuration region + * as a workaround for certain parts. + **/ +static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; + s32 ret_val = 0; + u16 word_addr, reg_data, reg_addr, phy_page = 0; + + /* Initialize the PHY from the NVM on ICH platforms. This + * is needed due to an issue where the NVM configuration is + * not properly autoloaded after power transitions. + * Therefore, after each PHY reset, we will load the + * configuration data out of the NVM manually. + */ + switch (hw->mac.type) { + case e1000_ich8lan: + if (phy->type != e1000_phy_igp_3) + return ret_val; + + if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) || + (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) { + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; + break; + } + /* Fall-thru */ + case e1000_pchlan: + case e1000_pch2lan: + case e1000_pch_lpt: + case e1000_pch_spt: + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; + break; + default: + return ret_val; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + data = er32(FEXTNVM); + if (!(data & sw_cfg_mask)) + goto release; + + /* Make sure HW does not configure LCD from PHY + * extended configuration before SW configuration + */ + data = er32(EXTCNF_CTRL); + if ((hw->mac.type < e1000_pch2lan) && + (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)) + goto release; + + cnf_size = er32(EXTCNF_SIZE); + cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; + cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; + if (!cnf_size) + goto release; + + cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; + cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; + + if (((hw->mac.type == e1000_pchlan) && + !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) || + (hw->mac.type > e1000_pchlan)) { + /* HW configures the SMBus address and LEDs when the + * OEM and LCD Write Enable bits are set in the NVM. + * When both NVM bits are cleared, SW will configure + * them instead. + */ + ret_val = e1000_write_smbus_addr(hw); + if (ret_val) + goto release; + + data = er32(LEDCTL); + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, + (u16)data); + if (ret_val) + goto release; + } + + /* Configure LCD from extended configuration region. */ + + /* cnf_base_addr is in DWORD */ + word_addr = (u16)(cnf_base_addr << 1); + + for (i = 0; i < cnf_size; i++) { + ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, ®_data); + if (ret_val) + goto release; + + ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1), + 1, ®_addr); + if (ret_val) + goto release; + + /* Save off the PHY page for future writes. */ + if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { + phy_page = reg_data; + continue; + } + + reg_addr &= PHY_REG_MASK; + reg_addr |= phy_page; + + ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data); + if (ret_val) + goto release; + } + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_k1_gig_workaround_hv - K1 Si workaround + * @hw: pointer to the HW structure + * @link: link up bool flag + * + * If K1 is enabled for 1Gbps, the MAC might stall when transitioning + * from a lower speed. This workaround disables K1 whenever link is at 1Gig + * If link is down, the function will restore the default K1 setting located + * in the NVM. + **/ +static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) +{ + s32 ret_val = 0; + u16 status_reg = 0; + bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; + + if (hw->mac.type != e1000_pchlan) + return 0; + + /* Wrap the whole flow with the sw flag */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ + if (link) { + if (hw->phy.type == e1000_phy_82578) { + ret_val = e1e_rphy_locked(hw, BM_CS_STATUS, + &status_reg); + if (ret_val) + goto release; + + status_reg &= (BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK); + + if (status_reg == (BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) + k1_enable = false; + } + + if (hw->phy.type == e1000_phy_82577) { + ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg); + if (ret_val) + goto release; + + status_reg &= (HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_MASK); + + if (status_reg == (HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_1000)) + k1_enable = false; + } + + /* Link stall fix for link up */ + ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100); + if (ret_val) + goto release; + + } else { + /* Link stall fix for link down */ + ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100); + if (ret_val) + goto release; + } + + ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); + +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_configure_k1_ich8lan - Configure K1 power state + * @hw: pointer to the HW structure + * @enable: K1 state to configure + * + * Configure the K1 power state based on the provided parameter. + * Assumes semaphore already acquired. + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + **/ +s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) +{ + s32 ret_val; + u32 ctrl_reg = 0; + u32 ctrl_ext = 0; + u32 reg = 0; + u16 kmrn_reg = 0; + + ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, + &kmrn_reg); + if (ret_val) + return ret_val; + + if (k1_enable) + kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; + else + kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; + + ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, + kmrn_reg); + if (ret_val) + return ret_val; + + usleep_range(20, 40); + ctrl_ext = er32(CTRL_EXT); + ctrl_reg = er32(CTRL); + + reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + reg |= E1000_CTRL_FRCSPD; + ew32(CTRL, reg); + + ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); + e1e_flush(); + usleep_range(20, 40); + ew32(CTRL, ctrl_reg); + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + usleep_range(20, 40); + + return 0; +} + +/** + * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration + * @hw: pointer to the HW structure + * @d0_state: boolean if entering d0 or d3 device state + * + * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are + * collectively called OEM bits. The OEM Write Enable bit and SW Config bit + * in NVM determines whether HW should configure LPLU and Gbe Disable. + **/ +static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) +{ + s32 ret_val = 0; + u32 mac_reg; + u16 oem_reg; + + if (hw->mac.type < e1000_pchlan) + return ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + if (hw->mac.type == e1000_pchlan) { + mac_reg = er32(EXTCNF_CTRL); + if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) + goto release; + } + + mac_reg = er32(FEXTNVM); + if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) + goto release; + + mac_reg = er32(PHY_CTRL); + + ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg); + if (ret_val) + goto release; + + oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); + + if (d0_state) { + if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) + oem_reg |= HV_OEM_BITS_GBE_DIS; + + if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) + oem_reg |= HV_OEM_BITS_LPLU; + } else { + if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) + oem_reg |= HV_OEM_BITS_GBE_DIS; + + if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU | + E1000_PHY_CTRL_NOND0A_LPLU)) + oem_reg |= HV_OEM_BITS_LPLU; + } + + /* Set Restart auto-neg to activate the bits */ + if ((d0_state || (hw->mac.type != e1000_pchlan)) && + !hw->phy.ops.check_reset_block(hw)) + oem_reg |= HV_OEM_BITS_RESTART_AN; + + ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg); + +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode + * @hw: pointer to the HW structure + **/ +static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) +{ + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data); + if (ret_val) + return ret_val; + + data |= HV_KMRN_MDIO_SLOW; + + ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data); + + return ret_val; +} + +/** + * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be + * done after every PHY reset. + **/ +static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 phy_data; + + if (hw->mac.type != e1000_pchlan) + return 0; + + /* Set MDIO slow mode before any other MDIO access */ + if (hw->phy.type == e1000_phy_82577) { + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + return ret_val; + } + + if (((hw->phy.type == e1000_phy_82577) && + ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || + ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { + /* Disable generation of early preamble */ + ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431); + if (ret_val) + return ret_val; + + /* Preamble tuning for SSC */ + ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204); + if (ret_val) + return ret_val; + } + + if (hw->phy.type == e1000_phy_82578) { + /* Return registers to default by doing a soft reset then + * writing 0x3140 to the control register. + */ + if (hw->phy.revision < 2) { + e1000e_phy_sw_reset(hw); + ret_val = e1e_wphy(hw, MII_BMCR, 0x3140); + } + } + + /* Select page 0 */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + hw->phy.addr = 1; + ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); + hw->phy.ops.release(hw); + if (ret_val) + return ret_val; + + /* Configure the K1 Si workaround during phy reset assuming there is + * link so that it disables K1 if link is in 1Gbps. + */ + ret_val = e1000_k1_gig_workaround_hv(hw, true); + if (ret_val) + return ret_val; + + /* Workaround for link disconnects on a busy hub in half duplex */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data); + if (ret_val) + goto release; + ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF); + if (ret_val) + goto release; + + /* set MSE higher to enable link to stay up when noise is high */ + ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034); +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY + * @hw: pointer to the HW structure + **/ +void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) +{ + u32 mac_reg; + u16 i, phy_reg = 0; + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); + if (ret_val) + goto release; + + /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */ + for (i = 0; i < (hw->mac.rar_entry_count); i++) { + mac_reg = er32(RAL(i)); + hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_RAR_M(i), + (u16)((mac_reg >> 16) & 0xFFFF)); + + mac_reg = er32(RAH(i)); + hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), + (u16)((mac_reg & E1000_RAH_AV) + >> 16)); + } + + e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); + +release: + hw->phy.ops.release(hw); +} + +/** + * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation + * with 82579 PHY + * @hw: pointer to the HW structure + * @enable: flag to enable/disable workaround when enabling/disabling jumbos + **/ +s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) +{ + s32 ret_val = 0; + u16 phy_reg, data; + u32 mac_reg; + u16 i; + + if (hw->mac.type < e1000_pch2lan) + return 0; + + /* disable Rx path while enabling/disabling workaround */ + e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); + ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14)); + if (ret_val) + return ret_val; + + if (enable) { + /* Write Rx addresses (rar_entry_count for RAL/H, and + * SHRAL/H) and initial CRC values to the MAC + */ + for (i = 0; i < hw->mac.rar_entry_count; i++) { + u8 mac_addr[ETH_ALEN] = { 0 }; + u32 addr_high, addr_low; + + addr_high = er32(RAH(i)); + if (!(addr_high & E1000_RAH_AV)) + continue; + addr_low = er32(RAL(i)); + mac_addr[0] = (addr_low & 0xFF); + mac_addr[1] = ((addr_low >> 8) & 0xFF); + mac_addr[2] = ((addr_low >> 16) & 0xFF); + mac_addr[3] = ((addr_low >> 24) & 0xFF); + mac_addr[4] = (addr_high & 0xFF); + mac_addr[5] = ((addr_high >> 8) & 0xFF); + + ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr)); + } + + /* Write Rx addresses to the PHY */ + e1000_copy_rx_addrs_to_phy_ich8lan(hw); + + /* Enable jumbo frame workaround in the MAC */ + mac_reg = er32(FFLT_DBG); + mac_reg &= ~(1 << 14); + mac_reg |= (7 << 15); + ew32(FFLT_DBG, mac_reg); + + mac_reg = er32(RCTL); + mac_reg |= E1000_RCTL_SECRC; + ew32(RCTL, mac_reg); + + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); + if (ret_val) + return ret_val; + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + data | (1 << 0)); + if (ret_val) + return ret_val; + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + &data); + if (ret_val) + return ret_val; + data &= ~(0xF << 8); + data |= (0xB << 8); + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + data); + if (ret_val) + return ret_val; + + /* Enable jumbo frame workaround in the PHY */ + e1e_rphy(hw, PHY_REG(769, 23), &data); + data &= ~(0x7F << 5); + data |= (0x37 << 5); + ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); + if (ret_val) + return ret_val; + e1e_rphy(hw, PHY_REG(769, 16), &data); + data &= ~(1 << 13); + ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); + if (ret_val) + return ret_val; + e1e_rphy(hw, PHY_REG(776, 20), &data); + data &= ~(0x3FF << 2); + data |= (E1000_TX_PTR_GAP << 2); + ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); + if (ret_val) + return ret_val; + ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100); + if (ret_val) + return ret_val; + e1e_rphy(hw, HV_PM_CTRL, &data); + ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10)); + if (ret_val) + return ret_val; + } else { + /* Write MAC register values back to h/w defaults */ + mac_reg = er32(FFLT_DBG); + mac_reg &= ~(0xF << 14); + ew32(FFLT_DBG, mac_reg); + + mac_reg = er32(RCTL); + mac_reg &= ~E1000_RCTL_SECRC; + ew32(RCTL, mac_reg); + + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); + if (ret_val) + return ret_val; + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + data & ~(1 << 0)); + if (ret_val) + return ret_val; + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + &data); + if (ret_val) + return ret_val; + data &= ~(0xF << 8); + data |= (0xB << 8); + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + data); + if (ret_val) + return ret_val; + + /* Write PHY register values back to h/w defaults */ + e1e_rphy(hw, PHY_REG(769, 23), &data); + data &= ~(0x7F << 5); + ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); + if (ret_val) + return ret_val; + e1e_rphy(hw, PHY_REG(769, 16), &data); + data |= (1 << 13); + ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); + if (ret_val) + return ret_val; + e1e_rphy(hw, PHY_REG(776, 20), &data); + data &= ~(0x3FF << 2); + data |= (0x8 << 2); + ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); + if (ret_val) + return ret_val; + ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00); + if (ret_val) + return ret_val; + e1e_rphy(hw, HV_PM_CTRL, &data); + ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10)); + if (ret_val) + return ret_val; + } + + /* re-enable Rx path after enabling/disabling workaround */ + return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); +} + +/** + * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be + * done after every PHY reset. + **/ +static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + if (hw->mac.type != e1000_pch2lan) + return 0; + + /* Set MDIO slow mode before any other MDIO access */ + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + /* set MSE higher to enable link to stay up when noise is high */ + ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034); + if (ret_val) + goto release; + /* drop link after 5 times MSE threshold was reached */ + ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005); +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_k1_gig_workaround_lv - K1 Si workaround + * @hw: pointer to the HW structure + * + * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps + * Disable K1 in 1000Mbps and 100Mbps + **/ +static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 status_reg = 0; + + if (hw->mac.type != e1000_pch2lan) + return 0; + + /* Set K1 beacon duration based on 10Mbs speed */ + ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); + if (ret_val) + return ret_val; + + if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) + == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { + if (status_reg & + (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) { + u16 pm_phy_reg; + + /* LV 1G/100 Packet drop issue wa */ + ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg); + if (ret_val) + return ret_val; + pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE; + ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg); + if (ret_val) + return ret_val; + } else { + u32 mac_reg; + + mac_reg = er32(FEXTNVM4); + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; + ew32(FEXTNVM4, mac_reg); + } + } + + return ret_val; +} + +/** + * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware + * @hw: pointer to the HW structure + * @gate: boolean set to true to gate, false to ungate + * + * Gate/ungate the automatic PHY configuration via hardware; perform + * the configuration via software instead. + **/ +static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) +{ + u32 extcnf_ctrl; + + if (hw->mac.type < e1000_pch2lan) + return; + + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (gate) + extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; + else + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; + + ew32(EXTCNF_CTRL, extcnf_ctrl); +} + +/** + * e1000_lan_init_done_ich8lan - Check for PHY config completion + * @hw: pointer to the HW structure + * + * Check the appropriate indication the MAC has finished configuring the + * PHY after a software reset. + **/ +static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) +{ + u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; + + /* Wait for basic configuration completes before proceeding */ + do { + data = er32(STATUS); + data &= E1000_STATUS_LAN_INIT_DONE; + usleep_range(100, 200); + } while ((!data) && --loop); + + /* If basic configuration is incomplete before the above loop + * count reaches 0, loading the configuration from NVM will + * leave the PHY in a bad state possibly resulting in no link. + */ + if (loop == 0) + e_dbg("LAN_INIT_DONE not set, increase timeout\n"); + + /* Clear the Init Done bit for the next init event */ + data = er32(STATUS); + data &= ~E1000_STATUS_LAN_INIT_DONE; + ew32(STATUS, data); +} + +/** + * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset + * @hw: pointer to the HW structure + **/ +static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 reg; + + if (hw->phy.ops.check_reset_block(hw)) + return 0; + + /* Allow time for h/w to get to quiescent state after reset */ + usleep_range(10000, 20000); + + /* Perform any necessary post-reset workarounds */ + switch (hw->mac.type) { + case e1000_pchlan: + ret_val = e1000_hv_phy_workarounds_ich8lan(hw); + if (ret_val) + return ret_val; + break; + case e1000_pch2lan: + ret_val = e1000_lv_phy_workarounds_ich8lan(hw); + if (ret_val) + return ret_val; + break; + default: + break; + } + + /* Clear the host wakeup bit after lcd reset */ + if (hw->mac.type >= e1000_pchlan) { + e1e_rphy(hw, BM_PORT_GEN_CFG, ®); + reg &= ~BM_WUC_HOST_WU_BIT; + e1e_wphy(hw, BM_PORT_GEN_CFG, reg); + } + + /* Configure the LCD with the extended configuration region in NVM */ + ret_val = e1000_sw_lcd_config_ich8lan(hw); + if (ret_val) + return ret_val; + + /* Configure the LCD with the OEM bits in NVM */ + ret_val = e1000_oem_bits_config_ich8lan(hw, true); + + if (hw->mac.type == e1000_pch2lan) { + /* Ungate automatic PHY configuration on non-managed 82579 */ + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + usleep_range(10000, 20000); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + + /* Set EEE LPI Update Timer to 200usec */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = e1000_write_emi_reg_locked(hw, + I82579_LPI_UPDATE_TIMER, + 0x1387); + hw->phy.ops.release(hw); + } + + return ret_val; +} + +/** + * e1000_phy_hw_reset_ich8lan - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY + * This is a function pointer entry point called by drivers + * or other shared routines. + **/ +static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* Gate automatic PHY configuration by hardware on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + + ret_val = e1000e_phy_hw_reset_generic(hw); + if (ret_val) + return ret_val; + + return e1000_post_phy_reset_ich8lan(hw); +} + +/** + * e1000_set_lplu_state_pchlan - Set Low Power Link Up state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU state according to the active flag. For PCH, if OEM write + * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set + * the phy speed. This function will manually set the LPLU bit and restart + * auto-neg as hw would do. D3 and D0 LPLU will call the same function + * since it configures the same bit. + **/ +static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) +{ + s32 ret_val; + u16 oem_reg; + + ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg); + if (ret_val) + return ret_val; + + if (active) + oem_reg |= HV_OEM_BITS_LPLU; + else + oem_reg &= ~HV_OEM_BITS_LPLU; + + if (!hw->phy.ops.check_reset_block(hw)) + oem_reg |= HV_OEM_BITS_RESTART_AN; + + return e1e_wphy(hw, HV_OEM_BITS, oem_reg); +} + +/** + * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val = 0; + u16 data; + + if (phy->type == e1000_phy_ife) + return 0; + + phy_ctrl = er32(PHY_CTRL); + + if (active) { + phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* Call gig speed drop workaround on LPLU before accessing + * any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + return ret_val; + } else { + phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D3 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val = 0; + u16 data; + + phy_ctrl = er32(PHY_CTRL); + + if (!active) { + phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* Call gig speed drop workaround on LPLU before accessing + * any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + } + + return ret_val; +} + +/** + * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 + * @hw: pointer to the HW structure + * @bank: pointer to the variable that returns the active bank + * + * Reads signature byte from the NVM using the flash access registers. + * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank. + **/ +static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) +{ + u32 eecd; + struct e1000_nvm_info *nvm = &hw->nvm; + u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); + u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; + u8 sig_byte = 0; + s32 ret_val; + + switch (hw->mac.type) { + /* In SPT, read from the CTRL_EXT reg instead of + * accessing the sector valid bits from the nvm + */ + case e1000_pch_spt: + *bank = er32(CTRL_EXT) + & E1000_CTRL_EXT_NVMVS; + if ((*bank == 0) || (*bank == 1)) { + e_dbg("ERROR: No valid NVM bank present\n"); + return -E1000_ERR_NVM; + } else { + *bank = *bank - 2; + return 0; + } + break; + case e1000_ich8lan: + case e1000_ich9lan: + eecd = er32(EECD); + if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) == + E1000_EECD_SEC1VAL_VALID_MASK) { + if (eecd & E1000_EECD_SEC1VAL) + *bank = 1; + else + *bank = 0; + + return 0; + } + e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n"); + /* fall-thru */ + default: + /* set bank to 0 in case flash read fails */ + *bank = 0; + + /* Check bank 0 */ + ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, + &sig_byte); + if (ret_val) + return ret_val; + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 0; + return 0; + } + + /* Check bank 1 */ + ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + + bank1_offset, + &sig_byte); + if (ret_val) + return ret_val; + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 1; + return 0; + } + + e_dbg("ERROR: No valid NVM bank present\n"); + return -E1000_ERR_NVM; + } +} + +/** + * e1000_read_nvm_spt - NVM access for SPT + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to read. + * @words: Size of data to read in words. + * @data: pointer to the word(s) to read at offset. + * + * Reads a word(s) from the NVM + **/ +static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 act_offset; + s32 ret_val = 0; + u32 bank = 0; + u32 dword = 0; + u16 offset_to_read; + u16 i; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + nvm->ops.acquire(hw); + + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + act_offset = (bank) ? nvm->flash_bank_size : 0; + act_offset += offset; + + ret_val = 0; + + for (i = 0; i < words; i += 2) { + if (words - i == 1) { + if (dev_spec->shadow_ram[offset + i].modified) { + data[i] = + dev_spec->shadow_ram[offset + i].value; + } else { + offset_to_read = act_offset + i - + ((act_offset + i) % 2); + ret_val = + e1000_read_flash_dword_ich8lan(hw, + offset_to_read, + &dword); + if (ret_val) + break; + if ((act_offset + i) % 2 == 0) + data[i] = (u16)(dword & 0xFFFF); + else + data[i] = (u16)((dword >> 16) & 0xFFFF); + } + } else { + offset_to_read = act_offset + i; + if (!(dev_spec->shadow_ram[offset + i].modified) || + !(dev_spec->shadow_ram[offset + i + 1].modified)) { + ret_val = + e1000_read_flash_dword_ich8lan(hw, + offset_to_read, + &dword); + if (ret_val) + break; + } + if (dev_spec->shadow_ram[offset + i].modified) + data[i] = + dev_spec->shadow_ram[offset + i].value; + else + data[i] = (u16)(dword & 0xFFFF); + if (dev_spec->shadow_ram[offset + i].modified) + data[i + 1] = + dev_spec->shadow_ram[offset + i + 1].value; + else + data[i + 1] = (u16)(dword >> 16 & 0xFFFF); + } + } + + nvm->ops.release(hw); + +out: + if (ret_val) + e_dbg("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_read_nvm_ich8lan - Read word(s) from the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to read. + * @words: Size of data to read in words + * @data: Pointer to the word(s) to read at offset. + * + * Reads a word(s) from the NVM using the flash access registers. + **/ +static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 act_offset; + s32 ret_val = 0; + u32 bank = 0; + u16 i, word; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + nvm->ops.acquire(hw); + + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + act_offset = (bank) ? nvm->flash_bank_size : 0; + act_offset += offset; + + ret_val = 0; + for (i = 0; i < words; i++) { + if (dev_spec->shadow_ram[offset + i].modified) { + data[i] = dev_spec->shadow_ram[offset + i].value; + } else { + ret_val = e1000_read_flash_word_ich8lan(hw, + act_offset + i, + &word); + if (ret_val) + break; + data[i] = word; + } + } + + nvm->ops.release(hw); + +out: + if (ret_val) + e_dbg("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_flash_cycle_init_ich8lan - Initialize flash + * @hw: pointer to the HW structure + * + * This function does initial flash setup so that a new read/write/erase cycle + * can be started. + **/ +static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) +{ + union ich8_hws_flash_status hsfsts; + s32 ret_val = -E1000_ERR_NVM; + + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + /* Check if the flash descriptor is valid */ + if (!hsfsts.hsf_status.fldesvalid) { + e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n"); + return -E1000_ERR_NVM; + } + + /* Clear FCERR and DAEL in hw status by writing 1 */ + hsfsts.hsf_status.flcerr = 1; + hsfsts.hsf_status.dael = 1; + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); + else + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + + /* Either we should have a hardware SPI cycle in progress + * bit to check against, in order to start a new cycle or + * FDONE bit should be changed in the hardware so that it + * is 1 after hardware reset, which can then be used as an + * indication whether a cycle is in progress or has been + * completed. + */ + + if (!hsfsts.hsf_status.flcinprog) { + /* There is no cycle running at present, + * so we can start a cycle. + * Begin by setting Flash Cycle Done. + */ + hsfsts.hsf_status.flcdone = 1; + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); + else + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + ret_val = 0; + } else { + s32 i; + + /* Otherwise poll for sometime so the current + * cycle has a chance to end before giving up. + */ + for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (!hsfsts.hsf_status.flcinprog) { + ret_val = 0; + break; + } + udelay(1); + } + if (!ret_val) { + /* Successful in waiting for previous cycle to timeout, + * now set the Flash Cycle Done. + */ + hsfsts.hsf_status.flcdone = 1; + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, + hsfsts.regval & 0xFFFF); + else + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + } else { + e_dbg("Flash controller busy, cannot get access\n"); + } + } + + return ret_val; +} + +/** + * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) + * @hw: pointer to the HW structure + * @timeout: maximum time to wait for completion + * + * This function starts a flash cycle and waits for its completion. + **/ +static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) +{ + union ich8_hws_flash_ctrl hsflctl; + union ich8_hws_flash_status hsfsts; + u32 i = 0; + + /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcgo = 1; + + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + /* wait till FDONE bit is set to 1 */ + do { + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcdone) + break; + udelay(1); + } while (i++ < timeout); + + if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr) + return 0; + + return -E1000_ERR_NVM; +} + +/** + * e1000_read_flash_dword_ich8lan - Read dword from flash + * @hw: pointer to the HW structure + * @offset: offset to data location + * @data: pointer to the location for storing the data + * + * Reads the flash dword at offset into data. Offset is converted + * to bytes before read. + **/ +static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data) +{ + /* Must convert word offset into bytes. */ + offset <<= 1; + return e1000_read_flash_data32_ich8lan(hw, offset, data); +} + +/** + * e1000_read_flash_word_ich8lan - Read word from flash + * @hw: pointer to the HW structure + * @offset: offset to data location + * @data: pointer to the location for storing the data + * + * Reads the flash word at offset into data. Offset is converted + * to bytes before read. + **/ +static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + /* Must convert offset into bytes. */ + offset <<= 1; + + return e1000_read_flash_data_ich8lan(hw, offset, 2, data); +} + +/** + * e1000_read_flash_byte_ich8lan - Read byte from flash + * @hw: pointer to the HW structure + * @offset: The offset of the byte to read. + * @data: Pointer to a byte to store the value read. + * + * Reads a single byte from the NVM using the flash access registers. + **/ +static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 *data) +{ + s32 ret_val; + u16 word = 0; + + /* In SPT, only 32 bits access is supported, + * so this function should not be called. + */ + if (hw->mac.type == e1000_pch_spt) + return -E1000_ERR_NVM; + else + ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); + + if (ret_val) + return ret_val; + + *data = (u8)word; + + return 0; +} + +/** + * e1000_read_flash_data_ich8lan - Read byte or word from NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte or word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: Pointer to the word to store the value read. + * + * Reads a byte or word from the NVM using the flash access registers. + **/ +static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 *data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val = -E1000_ERR_NVM; + u8 count = 0; + + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); + + /* Check if FCERR is set to 1, if set to 1, clear it + * and try the whole sequence a few more times, else + * read in (shift in) the Flash Data0, the order is + * least significant byte first msb to lsb + */ + if (!ret_val) { + flash_data = er32flash(ICH_FLASH_FDATA0); + if (size == 1) + *data = (u8)(flash_data & 0x000000FF); + else if (size == 2) + *data = (u16)(flash_data & 0x0000FFFF); + break; + } else { + /* If we've gotten here, then things are probably + * completely hosed, but if the error condition is + * detected, it won't hurt to give it another try... + * ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) { + /* Repeat for some time before giving up. */ + continue; + } else if (!hsfsts.hsf_status.flcdone) { + e_dbg("Timeout error - flash cycle did not complete.\n"); + break; + } + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_read_flash_data32_ich8lan - Read dword from NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the dword to read. + * @data: Pointer to the dword to store the value read. + * + * Reads a byte or word from the NVM using the flash access registers. + **/ + +static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + s32 ret_val = -E1000_ERR_NVM; + u8 count = 0; + + if (offset > ICH_FLASH_LINEAR_ADDR_MASK || + hw->mac.type != e1000_pch_spt) + return -E1000_ERR_NVM; + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + /* In SPT, This register is in Lan memory space, not flash. + * Therefore, only 32 bit access is supported + */ + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; + + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; + /* In SPT, This register is in Lan memory space, not flash. + * Therefore, only 32 bit access is supported + */ + ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16); + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); + + /* Check if FCERR is set to 1, if set to 1, clear it + * and try the whole sequence a few more times, else + * read in (shift in) the Flash Data0, the order is + * least significant byte first msb to lsb + */ + if (!ret_val) { + *data = er32flash(ICH_FLASH_FDATA0); + break; + } else { + /* If we've gotten here, then things are probably + * completely hosed, but if the error condition is + * detected, it won't hurt to give it another try... + * ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) { + /* Repeat for some time before giving up. */ + continue; + } else if (!hsfsts.hsf_status.flcdone) { + e_dbg("Timeout error - flash cycle did not complete.\n"); + break; + } + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_write_nvm_ich8lan - Write word(s) to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to write. + * @words: Size of data to write in words + * @data: Pointer to the word(s) to write at offset. + * + * Writes a byte or word to the NVM using the flash access registers. + **/ +static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u16 i; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + nvm->ops.acquire(hw); + + for (i = 0; i < words; i++) { + dev_spec->shadow_ram[offset + i].modified = true; + dev_spec->shadow_ram[offset + i].value = data[i]; + } + + nvm->ops.release(hw); + + return 0; +} + +/** + * e1000_update_nvm_checksum_spt - Update the checksum for NVM + * @hw: pointer to the HW structure + * + * The NVM checksum is updated by calling the generic update_nvm_checksum, + * which writes the checksum to the shadow ram. The changes in the shadow + * ram are then committed to the EEPROM by processing each bank at a time + * checking for the modified bit and writing only the pending changes. + * After a successful commit, the shadow ram is cleared and is ready for + * future writes. + **/ +static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 i, act_offset, new_bank_offset, old_bank_offset, bank; + s32 ret_val; + u32 dword = 0; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + goto out; + + if (nvm->type != e1000_nvm_flash_sw) + goto out; + + nvm->ops.acquire(hw); + + /* We're writing to the opposite bank so if we're on bank 1, + * write to bank 0 etc. We also need to erase the segment that + * is going to be written + */ + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + if (bank == 0) { + new_bank_offset = nvm->flash_bank_size; + old_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); + if (ret_val) + goto release; + } else { + old_bank_offset = nvm->flash_bank_size; + new_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); + if (ret_val) + goto release; + } + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i += 2) { + /* Determine whether to write the value stored + * in the other NVM bank or a modified value stored + * in the shadow RAM + */ + ret_val = e1000_read_flash_dword_ich8lan(hw, + i + old_bank_offset, + &dword); + + if (dev_spec->shadow_ram[i].modified) { + dword &= 0xffff0000; + dword |= (dev_spec->shadow_ram[i].value & 0xffff); + } + if (dev_spec->shadow_ram[i + 1].modified) { + dword &= 0x0000ffff; + dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff) + << 16); + } + if (ret_val) + break; + + /* If the word is 0x13, then make sure the signature bits + * (15:14) are 11b until the commit has completed. + * This will allow us to write 10b which indicates the + * signature is valid. We want to do this after the write + * has completed so that we don't mark the segment valid + * while the write is still in progress + */ + if (i == E1000_ICH_NVM_SIG_WORD - 1) + dword |= E1000_ICH_NVM_SIG_MASK << 16; + + /* Convert offset to bytes. */ + act_offset = (i + new_bank_offset) << 1; + + usleep_range(100, 200); + + /* Write the data to the new bank. Offset in words */ + act_offset = i + new_bank_offset; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, + dword); + if (ret_val) + break; + } + + /* Don't bother writing the segment valid bits if sector + * programming failed. + */ + if (ret_val) { + /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ + e_dbg("Flash commit failed.\n"); + goto release; + } + + /* Finally validate the new segment by setting bit 15:14 + * to 10b in word 0x13 , this can be done without an + * erase as well since these bits are 11 to start with + * and we need to change bit 14 to 0b + */ + act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; + + /*offset in words but we read dword */ + --act_offset; + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); + + if (ret_val) + goto release; + + dword &= 0xBFFFFFFF; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); + + if (ret_val) + goto release; + + /* And invalidate the previously valid segment by setting + * its signature word (0x13) high_byte to 0b. This can be + * done without an erase because flash erase sets all bits + * to 1's. We can write 1's to 0's without an erase + */ + act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; + + /* offset in words but we read dword */ + act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1; + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); + + if (ret_val) + goto release; + + dword &= 0x00FFFFFF; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); + + if (ret_val) + goto release; + + /* Great! Everything worked, we can now clear the cached entries. */ + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + +release: + nvm->ops.release(hw); + + /* Reload the EEPROM, or else modifications will not appear + * until after the next adapter reset. + */ + if (!ret_val) { + nvm->ops.reload(hw); + usleep_range(10000, 20000); + } + +out: + if (ret_val) + e_dbg("NVM update error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM + * @hw: pointer to the HW structure + * + * The NVM checksum is updated by calling the generic update_nvm_checksum, + * which writes the checksum to the shadow ram. The changes in the shadow + * ram are then committed to the EEPROM by processing each bank at a time + * checking for the modified bit and writing only the pending changes. + * After a successful commit, the shadow ram is cleared and is ready for + * future writes. + **/ +static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 i, act_offset, new_bank_offset, old_bank_offset, bank; + s32 ret_val; + u16 data = 0; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + goto out; + + if (nvm->type != e1000_nvm_flash_sw) + goto out; + + nvm->ops.acquire(hw); + + /* We're writing to the opposite bank so if we're on bank 1, + * write to bank 0 etc. We also need to erase the segment that + * is going to be written + */ + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + if (bank == 0) { + new_bank_offset = nvm->flash_bank_size; + old_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); + if (ret_val) + goto release; + } else { + old_bank_offset = nvm->flash_bank_size; + new_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); + if (ret_val) + goto release; + } + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + if (dev_spec->shadow_ram[i].modified) { + data = dev_spec->shadow_ram[i].value; + } else { + ret_val = e1000_read_flash_word_ich8lan(hw, i + + old_bank_offset, + &data); + if (ret_val) + break; + } + + /* If the word is 0x13, then make sure the signature bits + * (15:14) are 11b until the commit has completed. + * This will allow us to write 10b which indicates the + * signature is valid. We want to do this after the write + * has completed so that we don't mark the segment valid + * while the write is still in progress + */ + if (i == E1000_ICH_NVM_SIG_WORD) + data |= E1000_ICH_NVM_SIG_MASK; + + /* Convert offset to bytes. */ + act_offset = (i + new_bank_offset) << 1; + + usleep_range(100, 200); + /* Write the bytes to the new bank. */ + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset, + (u8)data); + if (ret_val) + break; + + usleep_range(100, 200); + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset + 1, + (u8)(data >> 8)); + if (ret_val) + break; + } + + /* Don't bother writing the segment valid bits if sector + * programming failed. + */ + if (ret_val) { + /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ + e_dbg("Flash commit failed.\n"); + goto release; + } + + /* Finally validate the new segment by setting bit 15:14 + * to 10b in word 0x13 , this can be done without an + * erase as well since these bits are 11 to start with + * and we need to change bit 14 to 0b + */ + act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; + ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); + if (ret_val) + goto release; + + data &= 0xBFFF; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset * 2 + 1, + (u8)(data >> 8)); + if (ret_val) + goto release; + + /* And invalidate the previously valid segment by setting + * its signature word (0x13) high_byte to 0b. This can be + * done without an erase because flash erase sets all bits + * to 1's. We can write 1's to 0's without an erase + */ + act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); + if (ret_val) + goto release; + + /* Great! Everything worked, we can now clear the cached entries. */ + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + +release: + nvm->ops.release(hw); + + /* Reload the EEPROM, or else modifications will not appear + * until after the next adapter reset. + */ + if (!ret_val) { + nvm->ops.reload(hw); + usleep_range(10000, 20000); + } + +out: + if (ret_val) + e_dbg("NVM update error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. + * If the bit is 0, that the EEPROM had been modified, but the checksum was not + * calculated, in which case we need to calculate the checksum and set bit 6. + **/ +static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 data; + u16 word; + u16 valid_csum_mask; + + /* Read NVM and check Invalid Image CSUM bit. If this bit is 0, + * the checksum needs to be fixed. This bit is an indication that + * the NVM was prepared by OEM software and did not calculate + * the checksum...a likely scenario. + */ + switch (hw->mac.type) { + case e1000_pch_lpt: + case e1000_pch_spt: + word = NVM_COMPAT; + valid_csum_mask = NVM_COMPAT_VALID_CSUM; + break; + default: + word = NVM_FUTURE_INIT_WORD1; + valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM; + break; + } + + ret_val = e1000_read_nvm(hw, word, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & valid_csum_mask)) { + data |= valid_csum_mask; + ret_val = e1000_write_nvm(hw, word, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + if (ret_val) + return ret_val; + } + + return e1000e_validate_nvm_checksum_generic(hw); +} + +/** + * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only + * @hw: pointer to the HW structure + * + * To prevent malicious write/erase of the NVM, set it to be read-only + * so that the hardware ignores all write/erase cycles of the NVM via + * the flash control registers. The shadow-ram copy of the NVM will + * still be updated, however any updates to this copy will not stick + * across driver reloads. + **/ +void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + union ich8_flash_protected_range pr0; + union ich8_hws_flash_status hsfsts; + u32 gfpreg; + + nvm->ops.acquire(hw); + + gfpreg = er32flash(ICH_FLASH_GFPREG); + + /* Write-protect GbE Sector of NVM */ + pr0.regval = er32flash(ICH_FLASH_PR0); + pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK; + pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK); + pr0.range.wpe = true; + ew32flash(ICH_FLASH_PR0, pr0.regval); + + /* Lock down a subset of GbE Flash Control Registers, e.g. + * PR0 to prevent the write-protection from being lifted. + * Once FLOCKDN is set, the registers protected by it cannot + * be written until FLOCKDN is cleared by a hardware reset. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + hsfsts.hsf_status.flockdn = true; + ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); + + nvm->ops.release(hw); +} + +/** + * e1000_write_flash_data_ich8lan - Writes bytes to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte/word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: The byte(s) to write to the NVM. + * + * Writes one/two bytes to the NVM using the flash access registers. + **/ +static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val; + u8 count = 0; + + if (hw->mac.type == e1000_pch_spt) { + if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } else { + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } + + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + /* In SPT, This register is in Lan memory space, not + * flash. Therefore, only 32 bit access is supported + */ + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; + /* In SPT, This register is in Lan memory space, + * not flash. Therefore, only 32 bit access is + * supported + */ + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + if (size == 1) + flash_data = (u32)data & 0x00FF; + else + flash_data = (u32)data; + + ew32flash(ICH_FLASH_FDATA0, flash_data); + + /* check if FCERR is set to 1 , if set to 1, clear it + * and try the whole sequence a few more times else done + */ + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); + if (!ret_val) + break; + + /* If we're here, then things are most likely + * completely hosed, but if the error condition + * is detected, it won't hurt to give it another + * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) + /* Repeat for some time before giving up. */ + continue; + if (!hsfsts.hsf_status.flcdone) { + e_dbg("Timeout error - flash cycle did not complete.\n"); + break; + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** +* e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM +* @hw: pointer to the HW structure +* @offset: The offset (in bytes) of the dwords to read. +* @data: The 4 bytes to write to the NVM. +* +* Writes one/two/four bytes to the NVM using the flash access registers. +**/ +static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + s32 ret_val; + u8 count = 0; + + if (hw->mac.type == e1000_pch_spt) { + if (offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + + /* In SPT, This register is in Lan memory space, not + * flash. Therefore, only 32 bit access is supported + */ + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) + >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + + hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; + + /* In SPT, This register is in Lan memory space, + * not flash. Therefore, only 32 bit access is + * supported + */ + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ew32flash(ICH_FLASH_FDATA0, data); + + /* check if FCERR is set to 1 , if set to 1, clear it + * and try the whole sequence a few more times else done + */ + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); + + if (!ret_val) + break; + + /* If we're here, then things are most likely + * completely hosed, but if the error condition + * is detected, it won't hurt to give it another + * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + if (hsfsts.hsf_status.flcerr) + /* Repeat for some time before giving up. */ + continue; + if (!hsfsts.hsf_status.flcdone) { + e_dbg("Timeout error - flash cycle did not complete.\n"); + break; + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_write_flash_byte_ich8lan - Write a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The index of the byte to read. + * @data: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + **/ +static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 data) +{ + u16 word = (u16)data; + + return e1000_write_flash_data_ich8lan(hw, offset, 1, word); +} + +/** +* e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM +* @hw: pointer to the HW structure +* @offset: The offset of the word to write. +* @dword: The dword to write to the NVM. +* +* Writes a single dword to the NVM using the flash access registers. +* Goes through a retry algorithm before giving up. +**/ +static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 dword) +{ + s32 ret_val; + u16 program_retries; + + /* Must convert word offset into bytes. */ + offset <<= 1; + ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); + + if (!ret_val) + return ret_val; + for (program_retries = 0; program_retries < 100; program_retries++) { + e_dbg("Retrying Byte %8.8X at offset %u\n", dword, offset); + usleep_range(100, 200); + ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); + if (!ret_val) + break; + } + if (program_retries == 100) + return -E1000_ERR_NVM; + + return 0; +} + +/** + * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The offset of the byte to write. + * @byte: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + * Goes through a retry algorithm before giving up. + **/ +static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte) +{ + s32 ret_val; + u16 program_retries; + + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (!ret_val) + return ret_val; + + for (program_retries = 0; program_retries < 100; program_retries++) { + e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); + usleep_range(100, 200); + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (!ret_val) + break; + } + if (program_retries == 100) + return -E1000_ERR_NVM; + + return 0; +} + +/** + * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM + * @hw: pointer to the HW structure + * @bank: 0 for first bank, 1 for second bank, etc. + * + * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. + * bank N is 4096 * N + flash_reg_addr. + **/ +static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + /* bank size is in 16bit words - adjust to bytes */ + u32 flash_bank_size = nvm->flash_bank_size * 2; + s32 ret_val; + s32 count = 0; + s32 j, iteration, sector_size; + + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + /* Determine HW Sector size: Read BERASE bits of hw flash status + * register + * 00: The Hw sector is 256 bytes, hence we need to erase 16 + * consecutive sectors. The start index for the nth Hw sector + * can be calculated as = bank * 4096 + n * 256 + * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. + * The start index for the nth Hw sector can be calculated + * as = bank * 4096 + * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 + * (ich9 only, otherwise error condition) + * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 + */ + switch (hsfsts.hsf_status.berasesz) { + case 0: + /* Hw sector size 256 */ + sector_size = ICH_FLASH_SEG_SIZE_256; + iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; + break; + case 1: + sector_size = ICH_FLASH_SEG_SIZE_4K; + iteration = 1; + break; + case 2: + sector_size = ICH_FLASH_SEG_SIZE_8K; + iteration = 1; + break; + case 3: + sector_size = ICH_FLASH_SEG_SIZE_64K; + iteration = 1; + break; + default: + return -E1000_ERR_NVM; + } + + /* Start with the base address, then add the sector offset. */ + flash_linear_addr = hw->nvm.flash_base_addr; + flash_linear_addr += (bank) ? flash_bank_size : 0; + + for (j = 0; j < iteration; j++) { + do { + u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT; + + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + return ret_val; + + /* Write a value 11 (block Erase) in Flash + * Cycle field in hw flash control + */ + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = + er32flash(ICH_FLASH_HSFSTS) >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, + hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + /* Write the last 24 bits of an index within the + * block into Flash Linear address field in Flash + * Address. + */ + flash_linear_addr += (j * sector_size); + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, timeout); + if (!ret_val) + break; + + /* Check if FCERR is set to 1. If 1, + * clear it and try the whole sequence + * a few more times else Done + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) + /* repeat for some time before giving up */ + continue; + else if (!hsfsts.hsf_status.flcdone) + return ret_val; + } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); + } + + return 0; +} + +/** + * e1000_valid_led_default_ich8lan - Set the default LED settings + * @hw: pointer to the HW structure + * @data: Pointer to the LED settings + * + * Reads the LED default settings from the NVM to data. If the NVM LED + * settings is all 0's or F's, set the LED default to a valid LED default + * setting. + **/ +static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT_ICH8LAN; + + return 0; +} + +/** + * e1000_id_led_init_pchlan - store LED configurations + * @hw: pointer to the HW structure + * + * PCH does not control LEDs via the LEDCTL register, rather it uses + * the PHY LED configuration register. + * + * PCH also does not have an "always on" or "always off" mode which + * complicates the ID feature. Instead of using the "on" mode to indicate + * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()), + * use "link_up" mode. The LEDs will still ID on request if there is no + * link based on logic in e1000_led_[on|off]_pchlan(). + **/ +static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP; + const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT; + u16 data, i, temp, shift; + + /* Get default ID LED modes */ + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + return ret_val; + + mac->ledctl_default = er32(LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK; + shift = (i * 5); + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode1 |= (ledctl_on << shift); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode1 |= (ledctl_off << shift); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode2 |= (ledctl_on << shift); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode2 |= (ledctl_off << shift); + break; + default: + /* Do nothing */ + break; + } + } + + return 0; +} + +/** + * e1000_get_bus_info_ich8lan - Get/Set the bus type and width + * @hw: pointer to the HW structure + * + * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability + * register, so the the bus width is hard coded. + **/ +static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + + ret_val = e1000e_get_bus_info_pcie(hw); + + /* ICH devices are "PCI Express"-ish. They have + * a configuration space, but do not contain + * PCI Express Capability registers, so bus width + * must be hardcoded. + */ + if (bus->width == e1000_bus_width_unknown) + bus->width = e1000_bus_width_pcie_x1; + + return ret_val; +} + +/** + * e1000_reset_hw_ich8lan - Reset the hardware + * @hw: pointer to the HW structure + * + * Does a full reset of the hardware which includes a reset of the PHY and + * MAC. + **/ +static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u16 kum_cfg; + u32 ctrl, reg; + s32 ret_val; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + e_dbg("PCI-E Master disable polling has failed.\n"); + + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC + * with the global reset. + */ + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + e1e_flush(); + + usleep_range(10000, 20000); + + /* Workaround for ICH8 bit corruption issue in FIFO memory */ + if (hw->mac.type == e1000_ich8lan) { + /* Set Tx and Rx buffer allocation to 8k apiece. */ + ew32(PBA, E1000_PBA_8K); + /* Set Packet Buffer Size to 16k. */ + ew32(PBS, E1000_PBS_16K); + } + + if (hw->mac.type == e1000_pchlan) { + /* Save the NVM K1 bit setting */ + ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg); + if (ret_val) + return ret_val; + + if (kum_cfg & E1000_NVM_K1_ENABLE) + dev_spec->nvm_k1_enabled = true; + else + dev_spec->nvm_k1_enabled = false; + } + + ctrl = er32(CTRL); + + if (!hw->phy.ops.check_reset_block(hw)) { + /* Full-chip reset requires MAC and PHY reset at the same + * time to make sure the interface between MAC and the + * external PHY is reset. + */ + ctrl |= E1000_CTRL_PHY_RST; + + /* Gate automatic PHY configuration by hardware on + * non-managed 82579 + */ + if ((hw->mac.type == e1000_pch2lan) && + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + } + ret_val = e1000_acquire_swflag_ich8lan(hw); + e_dbg("Issuing a global reset to ich8lan\n"); + ew32(CTRL, (ctrl | E1000_CTRL_RST)); + /* cannot issue a flush here because it hangs the hardware */ + msleep(20); + + /* Set Phy Config Counter to 50msec */ + if (hw->mac.type == e1000_pch2lan) { + reg = er32(FEXTNVM3); + reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; + reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; + ew32(FEXTNVM3, reg); + } + + if (!ret_val) + clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); + + if (ctrl & E1000_CTRL_PHY_RST) { + ret_val = hw->phy.ops.get_cfg_done(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_post_phy_reset_ich8lan(hw); + if (ret_val) + return ret_val; + } + + /* For PCH, this write will make sure that any noise + * will be detected as a CRC error and be dropped rather than show up + * as a bad packet to the DMA engine. + */ + if (hw->mac.type == e1000_pchlan) + ew32(CRC_OFFSET, 0x65656565); + + ew32(IMC, 0xffffffff); + er32(ICR); + + reg = er32(KABGTXD); + reg |= E1000_KABGTXD_BGSQLBIAS; + ew32(KABGTXD, reg); + + return 0; +} + +/** + * e1000_init_hw_ich8lan - Initialize the hardware + * @hw: pointer to the HW structure + * + * Prepares the hardware for transmit and receive by doing the following: + * - initialize hardware bits + * - initialize LED identification + * - setup receive address registers + * - setup flow control + * - setup transmit descriptors + * - clear statistics + **/ +static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl_ext, txdctl, snoop; + s32 ret_val; + u16 i; + + e1000_initialize_hw_bits_ich8lan(hw); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ + if (ret_val) + e_dbg("Error initializing identification LED\n"); + + /* Setup the receive address. */ + e1000e_init_rx_addrs(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* The 82578 Rx buffer will stall if wakeup is enabled in host and + * the ME. Disable wakeup by clearing the host wakeup bit. + * Reset the phy after disabling host wakeup to reset the Rx buffer. + */ + if (hw->phy.type == e1000_phy_82578) { + e1e_rphy(hw, BM_PORT_GEN_CFG, &i); + i &= ~BM_WUC_HOST_WU_BIT; + e1e_wphy(hw, BM_PORT_GEN_CFG, i); + ret_val = e1000_phy_hw_reset_ich8lan(hw); + if (ret_val) + return ret_val; + } + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + /* Set the transmit descriptor write-back policy for both queues */ + txdctl = er32(TXDCTL(0)); + txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB); + txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH); + ew32(TXDCTL(0), txdctl); + txdctl = er32(TXDCTL(1)); + txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB); + txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH); + ew32(TXDCTL(1), txdctl); + + /* ICH8 has opposite polarity of no_snoop bits. + * By default, we should use snoop behavior. + */ + if (mac->type == e1000_ich8lan) + snoop = PCIE_ICH8_SNOOP_ALL; + else + snoop = (u32)~(PCIE_NO_SNOOP_ALL); + e1000e_set_pcie_no_snoop(hw, snoop); + + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + ew32(CTRL_EXT, ctrl_ext); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_ich8lan(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits + * @hw: pointer to the HW structure + * + * Sets/Clears required hardware bits necessary for correctly setting up the + * hardware for transmit and receive. + **/ +static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + + /* Extended Device Control */ + reg = er32(CTRL_EXT); + reg |= (1 << 22); + /* Enable PHY low-power state when MAC is at D3 w/o WoL */ + if (hw->mac.type >= e1000_pchlan) + reg |= E1000_CTRL_EXT_PHYPDEN; + ew32(CTRL_EXT, reg); + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL(0)); + reg |= (1 << 22); + ew32(TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL(1)); + reg |= (1 << 22); + ew32(TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC(0)); + if (hw->mac.type == e1000_ich8lan) + reg |= (1 << 28) | (1 << 29); + reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); + ew32(TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC(1)); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + reg |= (1 << 24) | (1 << 26) | (1 << 30); + ew32(TARC(1), reg); + + /* Device Status */ + if (hw->mac.type == e1000_ich8lan) { + reg = er32(STATUS); + reg &= ~(1 << 31); + ew32(STATUS, reg); + } + + /* work-around descriptor data corruption issue during nfs v2 udp + * traffic, just disable the nfs filtering capability + */ + reg = er32(RFCTL); + reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); + + /* Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + if (hw->mac.type == e1000_ich8lan) + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + ew32(RFCTL, reg); + + /* Enable ECC on Lynxpoint */ + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { + reg = er32(PBECCSTS); + reg |= E1000_PBECCSTS_ECC_ENABLE; + ew32(PBECCSTS, reg); + + reg = er32(CTRL); + reg |= E1000_CTRL_MEHE; + ew32(CTRL, reg); + } +} + +/** + * e1000_setup_link_ich8lan - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + + if (hw->phy.ops.check_reset_block(hw)) + return 0; + + /* ICH parts do not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + /* Workaround h/w hang when Tx flow control enabled */ + if (hw->mac.type == e1000_pchlan) + hw->fc.requested_mode = e1000_fc_rx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + } + + /* Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Continue to configure the copper link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + return ret_val; + + ew32(FCTTV, hw->fc.pause_time); + if ((hw->phy.type == e1000_phy_82578) || + (hw->phy.type == e1000_phy_82579) || + (hw->phy.type == e1000_phy_i217) || + (hw->phy.type == e1000_phy_82577)) { + ew32(FCRTV_PCH, hw->fc.refresh_time); + + ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27), + hw->fc.pause_time); + if (ret_val) + return ret_val; + } + + return e1000e_set_fc_watermarks(hw); +} + +/** + * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface + * @hw: pointer to the HW structure + * + * Configures the kumeran interface to the PHY to wait the appropriate time + * when polling the PHY, then call the generic setup_copper_link to finish + * configuring the copper link. + **/ +static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + /* Set the mac to wait the maximum time between each iteration + * and increase the max iterations when polling the phy; + * this fixes erroneous timeouts at 10Mbps. + */ + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF); + if (ret_val) + return ret_val; + ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + ®_data); + if (ret_val) + return ret_val; + reg_data |= 0x3F; + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + reg_data); + if (ret_val) + return ret_val; + + switch (hw->phy.type) { + case e1000_phy_igp_3: + ret_val = e1000e_copper_link_setup_igp(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_bm: + case e1000_phy_82578: + ret_val = e1000e_copper_link_setup_m88(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_82577: + case e1000_phy_82579: + ret_val = e1000_copper_link_setup_82577(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_ife: + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, ®_data); + if (ret_val) + return ret_val; + + reg_data &= ~IFE_PMC_AUTO_MDIX; + + switch (hw->phy.mdix) { + case 1: + reg_data &= ~IFE_PMC_FORCE_MDIX; + break; + case 2: + reg_data |= IFE_PMC_FORCE_MDIX; + break; + case 0: + default: + reg_data |= IFE_PMC_AUTO_MDIX; + break; + } + ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); + if (ret_val) + return ret_val; + break; + default: + break; + } + + return e1000e_setup_copper_link(hw); +} + +/** + * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface + * @hw: pointer to the HW structure + * + * Calls the PHY specific link setup function and then calls the + * generic setup_copper_link to finish configuring the link for + * Lynxpoint PCH devices + **/ +static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + ret_val = e1000_copper_link_setup_82577(hw); + if (ret_val) + return ret_val; + + return e1000e_setup_copper_link(hw); +} + +/** + * e1000_get_link_up_info_ich8lan - Get current link speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to store current link speed + * @duplex: pointer to store the current link duplex + * + * Calls the generic get_speed_and_duplex to retrieve the current link + * information and then calls the Kumeran lock loss workaround for links at + * gigabit speeds. + **/ +static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); + if (ret_val) + return ret_val; + + if ((hw->mac.type == e1000_ich8lan) && + (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) { + ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); + } + + return ret_val; +} + +/** + * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround + * @hw: pointer to the HW structure + * + * Work-around for 82566 Kumeran PCS lock loss: + * On link status change (i.e. PCI reset, speed change) and link is up and + * speed is gigabit- + * 0) if workaround is optionally disabled do nothing + * 1) wait 1ms for Kumeran link to come up + * 2) check Kumeran Diagnostic register PCS lock loss bit + * 3) if not set the link is locked (all is good), otherwise... + * 4) reset the PHY + * 5) repeat up to 10 times + * Note: this is only called for IGP3 copper when speed is 1gb. + **/ +static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 phy_ctrl; + s32 ret_val; + u16 i, data; + bool link; + + if (!dev_spec->kmrn_lock_loss_workaround_enabled) + return 0; + + /* Make sure link is up before proceeding. If not just return. + * Attempting this while link is negotiating fouled up link + * stability + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (!link) + return 0; + + for (i = 0; i < 10; i++) { + /* read once to clear */ + ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + return ret_val; + /* and again to get new status */ + ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + return ret_val; + + /* check for PCS lock */ + if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) + return 0; + + /* Issue PHY reset */ + e1000_phy_hw_reset(hw); + mdelay(5); + } + /* Disable GigE link negotiation */ + phy_ctrl = er32(PHY_CTRL); + phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + ew32(PHY_CTRL, phy_ctrl); + + /* Call gig speed drop workaround on Gig disable before accessing + * any PHY registers + */ + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* unable to acquire PCS lock */ + return -E1000_ERR_PHY; +} + +/** + * e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state + * @hw: pointer to the HW structure + * @state: boolean value used to set the current Kumeran workaround state + * + * If ICH8, set the current Kumeran workaround state (enabled - true + * /disabled - false). + **/ +void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + + if (hw->mac.type != e1000_ich8lan) { + e_dbg("Workaround applies to ICH8 only.\n"); + return; + } + + dev_spec->kmrn_lock_loss_workaround_enabled = state; +} + +/** + * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 + * @hw: pointer to the HW structure + * + * Workaround for 82566 power-down on D3 entry: + * 1) disable gigabit link + * 2) write VR power-down enable + * 3) read it back + * Continue if successful, else issue LCD reset and repeat + **/ +void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + u16 data; + u8 retry = 0; + + if (hw->phy.type != e1000_phy_igp_3) + return; + + /* Try the workaround twice (if needed) */ + do { + /* Disable link */ + reg = er32(PHY_CTRL); + reg |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + ew32(PHY_CTRL, reg); + + /* Call gig speed drop workaround on Gig disable before + * accessing any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* Write VR power-down enable */ + e1e_rphy(hw, IGP3_VR_CTRL, &data); + data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN); + + /* Read it back and test */ + e1e_rphy(hw, IGP3_VR_CTRL, &data); + data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) + break; + + /* Issue PHY reset and repeat at most one more time */ + reg = er32(CTRL); + ew32(CTRL, reg | E1000_CTRL_PHY_RST); + retry++; + } while (retry); +} + +/** + * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working + * @hw: pointer to the HW structure + * + * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), + * LPLU, Gig disable, MDIC PHY reset): + * 1) Set Kumeran Near-end loopback + * 2) Clear Kumeran Near-end loopback + * Should only be called for ICH8[m] devices with any 1G Phy. + **/ +void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 reg_data; + + if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife)) + return; + + ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + ®_data); + if (ret_val) + return; + reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + reg_data); + if (ret_val) + return; + reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; + e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data); +} + +/** + * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx + * @hw: pointer to the HW structure + * + * During S0 to Sx transition, it is possible the link remains at gig + * instead of negotiating to a lower speed. Before going to Sx, set + * 'Gig Disable' to force link speed negotiation to a lower speed based on + * the LPLU setting in the NVM or custom setting. For PCH and newer parts, + * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also + * needs to be written. + * Parts that support (and are linked to a partner which support) EEE in + * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power + * than 10Mbps w/o EEE. + **/ +void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 phy_ctrl; + s32 ret_val; + + phy_ctrl = er32(PHY_CTRL); + phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; + + if (hw->phy.type == e1000_phy_i217) { + u16 phy_reg, device_id = hw->adapter->pdev->device; + + if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || + (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || + (device_id == E1000_DEV_ID_PCH_I218_LM3) || + (device_id == E1000_DEV_ID_PCH_I218_V3) || + (hw->mac.type == e1000_pch_spt)) { + u32 fextnvm6 = er32(FEXTNVM6); + + ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (!dev_spec->eee_disable) { + u16 eee_advert; + + ret_val = + e1000_read_emi_reg_locked(hw, + I217_EEE_ADVERTISEMENT, + &eee_advert); + if (ret_val) + goto release; + + /* Disable LPLU if both link partners support 100BaseT + * EEE and 100Full is advertised on both ends of the + * link, and enable Auto Enable LPI since there will + * be no driver to enable LPI while in Sx. + */ + if ((eee_advert & I82579_EEE_100_SUPPORTED) && + (dev_spec->eee_lp_ability & + I82579_EEE_100_SUPPORTED) && + (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) { + phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU | + E1000_PHY_CTRL_NOND0A_LPLU); + + /* Set Auto Enable LPI after link up */ + e1e_rphy_locked(hw, + I217_LPI_GPIO_CTRL, &phy_reg); + phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI; + e1e_wphy_locked(hw, + I217_LPI_GPIO_CTRL, phy_reg); + } + } + + /* For i217 Intel Rapid Start Technology support, + * when the system is going into Sx and no manageability engine + * is present, the driver must configure proxy to reset only on + * power good. LPI (Low Power Idle) state must also reset only + * on power good, as well as the MTA (Multicast table array). + * The SMBus release must also be disabled on LCD reset. + */ + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + /* Enable proxy to reset only on power good. */ + e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg); + phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE; + e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg); + + /* Set bit enable LPI (EEE) to reset only on + * power good. + */ + e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg); + phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET; + e1e_wphy_locked(hw, I217_SxCTRL, phy_reg); + + /* Disable the SMB release on LCD reset. */ + e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); + phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE; + e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); + } + + /* Enable MTA to reset for Intel Rapid Start Technology + * Support + */ + e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); + phy_reg |= I217_CGFREG_ENABLE_MTA_RESET; + e1e_wphy_locked(hw, I217_CGFREG, phy_reg); + +release: + hw->phy.ops.release(hw); + } +out: + ew32(PHY_CTRL, phy_ctrl); + + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + if (hw->mac.type >= e1000_pchlan) { + e1000_oem_bits_config_ich8lan(hw, false); + + /* Reset PHY to activate OEM bits on 82577/8 */ + if (hw->mac.type == e1000_pchlan) + e1000e_phy_hw_reset_generic(hw); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + e1000_write_smbus_addr(hw); + hw->phy.ops.release(hw); + } +} + +/** + * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0 + * @hw: pointer to the HW structure + * + * During Sx to S0 transitions on non-managed devices or managed devices + * on which PHY resets are not blocked, if the PHY registers cannot be + * accessed properly by the s/w toggle the LANPHYPC value to power cycle + * the PHY. + * On i217, setup Intel Rapid Start Technology. + **/ +void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) +{ + s32 ret_val; + + if (hw->mac.type < e1000_pch2lan) + return; + + ret_val = e1000_init_phy_workarounds_pchlan(hw); + if (ret_val) { + e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val); + return; + } + + /* For i217 Intel Rapid Start Technology support when the system + * is transitioning from Sx and no manageability engine is present + * configure SMBus to restore on reset, disable proxy, and enable + * the reset on MTA (Multicast table array). + */ + if (hw->phy.type == e1000_phy_i217) { + u16 phy_reg; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_dbg("Failed to setup iRST\n"); + return; + } + + /* Clear Auto Enable LPI after link up */ + e1e_rphy_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg); + phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI; + e1e_wphy_locked(hw, I217_LPI_GPIO_CTRL, phy_reg); + + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + /* Restore clear on SMB if no manageability engine + * is present + */ + ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); + if (ret_val) + goto release; + phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE; + e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); + + /* Disable Proxy */ + e1e_wphy_locked(hw, I217_PROXY_CTRL, 0); + } + /* Enable reset on MTA */ + ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); + if (ret_val) + goto release; + phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET; + e1e_wphy_locked(hw, I217_CGFREG, phy_reg); +release: + if (ret_val) + e_dbg("Error %d in resume workarounds\n", ret_val); + hw->phy.ops.release(hw); + } +} + +/** + * e1000_cleanup_led_ich8lan - Restore the default LED operation + * @hw: pointer to the HW structure + * + * Return the LED back to the default configuration. + **/ +static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); + + ew32(LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * e1000_led_on_ich8lan - Turn LEDs on + * @hw: pointer to the HW structure + * + * Turn on the LEDs. + **/ +static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); + + ew32(LEDCTL, hw->mac.ledctl_mode2); + return 0; +} + +/** + * e1000_led_off_ich8lan - Turn LEDs off + * @hw: pointer to the HW structure + * + * Turn off the LEDs. + **/ +static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | + IFE_PSCL_PROBE_LEDS_OFF)); + + ew32(LEDCTL, hw->mac.ledctl_mode1); + return 0; +} + +/** + * e1000_setup_led_pchlan - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use. + **/ +static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) +{ + return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1); +} + +/** + * e1000_cleanup_led_pchlan - Restore the default LED operation + * @hw: pointer to the HW structure + * + * Return the LED back to the default configuration. + **/ +static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) +{ + return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default); +} + +/** + * e1000_led_on_pchlan - Turn LEDs on + * @hw: pointer to the HW structure + * + * Turn on the LEDs. + **/ +static s32 e1000_led_on_pchlan(struct e1000_hw *hw) +{ + u16 data = (u16)hw->mac.ledctl_mode2; + u32 i, led; + + /* If no link, then turn LED on by setting the invert bit + * for each LED that's mode is "link_up" in ledctl_mode2. + */ + if (!(er32(STATUS) & E1000_STATUS_LU)) { + for (i = 0; i < 3; i++) { + led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; + if ((led & E1000_PHY_LED0_MODE_MASK) != + E1000_LEDCTL_MODE_LINK_UP) + continue; + if (led & E1000_PHY_LED0_IVRT) + data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); + else + data |= (E1000_PHY_LED0_IVRT << (i * 5)); + } + } + + return e1e_wphy(hw, HV_LED_CONFIG, data); +} + +/** + * e1000_led_off_pchlan - Turn LEDs off + * @hw: pointer to the HW structure + * + * Turn off the LEDs. + **/ +static s32 e1000_led_off_pchlan(struct e1000_hw *hw) +{ + u16 data = (u16)hw->mac.ledctl_mode1; + u32 i, led; + + /* If no link, then turn LED off by clearing the invert bit + * for each LED that's mode is "link_up" in ledctl_mode1. + */ + if (!(er32(STATUS) & E1000_STATUS_LU)) { + for (i = 0; i < 3; i++) { + led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; + if ((led & E1000_PHY_LED0_MODE_MASK) != + E1000_LEDCTL_MODE_LINK_UP) + continue; + if (led & E1000_PHY_LED0_IVRT) + data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); + else + data |= (E1000_PHY_LED0_IVRT << (i * 5)); + } + } + + return e1e_wphy(hw, HV_LED_CONFIG, data); +} + +/** + * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset + * @hw: pointer to the HW structure + * + * Read appropriate register for the config done bit for completion status + * and configure the PHY through s/w for EEPROM-less parts. + * + * NOTE: some silicon which is EEPROM-less will fail trying to read the + * config done bit, so only an error is logged and continues. If we were + * to return with error, EEPROM-less silicon would not be able to be reset + * or change link. + **/ +static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 bank = 0; + u32 status; + + e1000e_get_cfg_done_generic(hw); + + /* Wait for indication from h/w that it has completed basic config */ + if (hw->mac.type >= e1000_ich10lan) { + e1000_lan_init_done_ich8lan(hw); + } else { + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + e_dbg("Auto Read Done did not complete\n"); + ret_val = 0; + } + } + + /* Clear PHY Reset Asserted bit */ + status = er32(STATUS); + if (status & E1000_STATUS_PHYRA) + ew32(STATUS, status & ~E1000_STATUS_PHYRA); + else + e_dbg("PHY Reset Asserted not set - needs delay\n"); + + /* If EEPROM is not marked present, init the IGP 3 PHY manually */ + if (hw->mac.type <= e1000_ich9lan) { + if (!(er32(EECD) & E1000_EECD_PRES) && + (hw->phy.type == e1000_phy_igp_3)) { + e1000e_phy_init_script_igp3(hw); + } + } else { + if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { + /* Maybe we should do a basic PHY config */ + e_dbg("EEPROM not present\n"); + ret_val = -E1000_ERR_CONFIG; + } + } + + return ret_val; +} + +/** + * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(hw->mac.ops.check_mng_mode(hw) || + hw->phy.ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); +} + +/** + * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters + * @hw: pointer to the HW structure + * + * Clears hardware counters specific to the silicon family and calls + * clear_hw_cntrs_generic to clear all general purpose counters. + **/ +static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) +{ + u16 phy_data; + s32 ret_val; + + e1000e_clear_hw_cntrs_base(hw); + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); + + er32(IAC); + er32(ICRXOC); + + /* Clear PHY statistics registers */ + if ((hw->phy.type == e1000_phy_82578) || + (hw->phy.type == e1000_phy_82579) || + (hw->phy.type == e1000_phy_i217) || + (hw->phy.type == e1000_phy_82577)) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + ret_val = hw->phy.ops.set_page(hw, + HV_STATS_PAGE << IGP_PAGE_SHIFT); + if (ret_val) + goto release; + hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); +release: + hw->phy.ops.release(hw); + } +} + +static const struct e1000_mac_operations ich8_mac_ops = { + /* check_mng_mode dependent on mac type */ + .check_for_link = e1000_check_for_copper_link_ich8lan, + /* cleanup_led dependent on mac type */ + .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, + .get_bus_info = e1000_get_bus_info_ich8lan, + .set_lan_id = e1000_set_lan_id_single_port, + .get_link_up_info = e1000_get_link_up_info_ich8lan, + /* led_on dependent on mac type */ + /* led_off dependent on mac type */ + .update_mc_addr_list = e1000e_update_mc_addr_list_generic, + .reset_hw = e1000_reset_hw_ich8lan, + .init_hw = e1000_init_hw_ich8lan, + .setup_link = e1000_setup_link_ich8lan, + .setup_physical_interface = e1000_setup_copper_link_ich8lan, + /* id_led_init dependent on mac type */ + .config_collision_dist = e1000e_config_collision_dist_generic, + .rar_set = e1000e_rar_set_generic, + .rar_get_count = e1000e_rar_get_count_generic, +}; + +static const struct e1000_phy_operations ich8_phy_ops = { + .acquire = e1000_acquire_swflag_ich8lan, + .check_reset_block = e1000_check_reset_block_ich8lan, + .commit = NULL, + .get_cfg_done = e1000_get_cfg_done_ich8lan, + .get_cable_length = e1000e_get_cable_length_igp_2, + .read_reg = e1000e_read_phy_reg_igp, + .release = e1000_release_swflag_ich8lan, + .reset = e1000_phy_hw_reset_ich8lan, + .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, + .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, + .write_reg = e1000e_write_phy_reg_igp, +}; + +static const struct e1000_nvm_operations ich8_nvm_ops = { + .acquire = e1000_acquire_nvm_ich8lan, + .read = e1000_read_nvm_ich8lan, + .release = e1000_release_nvm_ich8lan, + .reload = e1000e_reload_nvm_generic, + .update = e1000_update_nvm_checksum_ich8lan, + .valid_led_default = e1000_valid_led_default_ich8lan, + .validate = e1000_validate_nvm_checksum_ich8lan, + .write = e1000_write_nvm_ich8lan, +}; + +static const struct e1000_nvm_operations spt_nvm_ops = { + .acquire = e1000_acquire_nvm_ich8lan, + .release = e1000_release_nvm_ich8lan, + .read = e1000_read_nvm_spt, + .update = e1000_update_nvm_checksum_spt, + .reload = e1000e_reload_nvm_generic, + .valid_led_default = e1000_valid_led_default_ich8lan, + .validate = e1000_validate_nvm_checksum_ich8lan, + .write = e1000_write_nvm_ich8lan, +}; + +const struct e1000_info e1000_ich8_info = { + .mac = e1000_ich8lan, + .flags = FLAG_HAS_WOL + | FLAG_IS_ICH + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 8, + .max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_ich9_info = { + .mac = e1000_ich9lan, + .flags = FLAG_HAS_JUMBO_FRAMES + | FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 18, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_ich10_info = { + .mac = e1000_ich10lan, + .flags = FLAG_HAS_JUMBO_FRAMES + | FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 18, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_pch_info = { + .mac = e1000_pchlan, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS, + .pba = 26, + .max_hw_frame_size = 4096, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_pch2_info = { + .mac = e1000_pch2lan, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = 9022, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_pch_lpt_info = { + .mac = e1000_pch_lpt, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = 9022, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_pch_spt_info = { + .mac = e1000_pch_spt, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = 9022, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &spt_nvm_ops, +}; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h new file mode 100644 index 000000000..770a573b9 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -0,0 +1,303 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000E_ICH8LAN_H_ +#define _E1000E_ICH8LAN_H_ + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 +#define ICH_FLASH_PR0 0x0074 + +/* Requires up to 10 seconds when MNG might be accessing part. */ +#define ICH_FLASH_READ_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 + +#define ICH_CYCLE_READ 0 +#define ICH_CYCLE_WRITE 2 +#define ICH_CYCLE_ERASE 3 + +#define FLASH_GFPREG_BASE_MASK 0x1FFF +#define FLASH_SECTOR_ADDR_SHIFT 12 + +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_8K 8192 +#define ICH_FLASH_SEG_SIZE_64K 65536 + +#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ +/* FW established a valid mode */ +#define E1000_ICH_FWSM_FW_VALID 0x00008000 +#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */ +#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000 + +#define E1000_ICH_MNG_IAMT_MODE 0x2 + +#define E1000_FWSM_WLOCK_MAC_MASK 0x0380 +#define E1000_FWSM_WLOCK_MAC_SHIFT 7 +#define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */ + +/* Shared Receive Address Registers */ +#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8)) +#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8)) + +#define E1000_H2ME 0x05B50 /* Host to ME */ +#define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */ +#define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */ + +#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_OFF1_ON2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC000 +#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0 +#define E1000_ICH_NVM_SIG_VALUE 0x80 + +#define E1000_ICH8_LAN_INIT_TIMEOUT 1500 + +/* FEXT register bit definition */ +#define E1000_FEXT_PHY_CABLE_DISCONNECTED 0x00000004 + +#define E1000_FEXTNVM_SW_CONFIG 1 +#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */ + +#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000 +#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000 + +#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 +#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 +#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 + +#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 +#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200 +#define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000 +/* bit for disabling packet buffer read */ +#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000 + +#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020 + +#define K1_ENTRY_LATENCY 0 +#define K1_MIN_TIME 1 +#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */ +#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */ +#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */ + +#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL + +#define E1000_ICH_RAR_ENTRIES 7 +#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */ +#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */ + +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ + ((reg) & MAX_PHY_REG_ADDRESS)) +#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ +#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ + +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 +#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 +#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200 + +/* PHY Wakeup Registers and defines */ +#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17) +#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0) +#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) +#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) +#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) +#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2))) +#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2))) +#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2))) +#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2))) +#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1))) + +#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */ +#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */ +#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */ +#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */ +#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */ +#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */ +#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */ + +#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ +#define HV_MUX_DATA_CTRL PHY_REG(776, 16) +#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 +#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 +#define HV_STATS_PAGE 778 +/* Half-duplex collision counts */ +#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision */ +#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17) +#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. */ +#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19) +#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Collision */ +#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21) +#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision */ +#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24) +#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision */ +#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26) +#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */ +#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28) +#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Tx with no CRS */ +#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30) + +#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */ + +#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ +#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ + +/* SMBus Control Phy Register */ +#define CV_SMB_CTRL PHY_REG(769, 23) +#define CV_SMB_CTRL_FORCE_SMBUS 0x0001 + +/* I218 Ultra Low Power Configuration 1 Register */ +#define I218_ULP_CONFIG1 PHY_REG(779, 16) +#define I218_ULP_CONFIG1_START 0x0001 /* Start auto ULP config */ +#define I218_ULP_CONFIG1_IND 0x0004 /* Pwr up from ULP indication */ +#define I218_ULP_CONFIG1_STICKY_ULP 0x0010 /* Set sticky ULP mode */ +#define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */ +#define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */ +#define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */ +#define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */ + +/* SMBus Address Phy Register */ +#define HV_SMB_ADDR PHY_REG(768, 26) +#define HV_SMB_ADDR_MASK 0x007F +#define HV_SMB_ADDR_PEC_EN 0x0200 +#define HV_SMB_ADDR_VALID 0x0080 +#define HV_SMB_ADDR_FREQ_MASK 0x1100 +#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8 +#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12 + +/* Strapping Option Register - RO */ +#define E1000_STRAP 0x0000C +#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 +#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 +#define E1000_STRAP_SMT_FREQ_MASK 0x00003000 +#define E1000_STRAP_SMT_FREQ_SHIFT 12 + +/* OEM Bits Phy Register */ +#define HV_OEM_BITS PHY_REG(768, 25) +#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */ +#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */ +#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */ + +/* KMRN Mode Control */ +#define HV_KMRN_MODE_CTRL PHY_REG(769, 16) +#define HV_KMRN_MDIO_SLOW 0x0400 + +/* KMRN FIFO Control and Status */ +#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16) +#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000 +#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12 + +/* PHY Power Management Control */ +#define HV_PM_CTRL PHY_REG(770, 17) +#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 +#define HV_PM_CTRL_K1_ENABLE 0x4000 + +#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */ + +/* Inband Control */ +#define I217_INBAND_CTRL PHY_REG(770, 18) +#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK 0x3F00 +#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT 8 + +/* Low Power Idle GPIO Control */ +#define I217_LPI_GPIO_CTRL PHY_REG(772, 18) +#define I217_LPI_GPIO_CTRL_AUTO_EN_LPI 0x0800 + +/* PHY Low Power Idle Control */ +#define I82579_LPI_CTRL PHY_REG(772, 20) +#define I82579_LPI_CTRL_100_ENABLE 0x2000 +#define I82579_LPI_CTRL_1000_ENABLE 0x4000 +#define I82579_LPI_CTRL_ENABLE_MASK 0x6000 +#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80 + +/* Extended Management Interface (EMI) Registers */ +#define I82579_EMI_ADDR 0x10 +#define I82579_EMI_DATA 0x11 +#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ +#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */ +#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */ +#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ +#define I82579_RX_CONFIG 0x3412 /* Receive configuration */ +#define I82579_LPI_PLL_SHUT 0x4412 /* LPI PLL Shut Enable */ +#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */ +#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */ +#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */ +#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */ +#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */ +#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */ +#define I82579_LPI_100_PLL_SHUT (1 << 2) /* 100M LPI PLL Shut Enabled */ +#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */ +#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */ +#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */ +#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */ +#define I217_RX_CONFIG 0xB20C /* Receive configuration */ + +#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */ +#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */ + +/* Intel Rapid Start Technology Support */ +#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70) +#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080 +#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28) +#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000 +#define I217_CGFREG PHY_REG(772, 29) +#define I217_CGFREG_ENABLE_MTA_RESET 0x0002 +#define I217_MEMPWR PHY_REG(772, 26) +#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010 + +/* Receive Address Initial CRC Calculation */ +#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4)) + +/* Latency Tolerance Reporting */ +#define E1000_LTRV 0x000F8 +#define E1000_LTRV_SCALE_MAX 5 +#define E1000_LTRV_SCALE_FACTOR 5 +#define E1000_LTRV_REQ_SHIFT 15 +#define E1000_LTRV_NOSNOOP_SHIFT 16 +#define E1000_LTRV_SEND (1 << 30) + +/* Proprietary Latency Tolerance Reporting PCI Capability */ +#define E1000_PCI_LTR_CAP_LPT 0xA8 + +void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw); +void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state); +void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); +void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); +void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw); +void e1000_resume_workarounds_pchlan(struct e1000_hw *hw); +s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); +void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw); +s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); +s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data); +s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data); +s32 e1000_set_eee_pchlan(struct e1000_hw *hw); +s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx); +#endif /* _E1000E_ICH8LAN_H_ */ diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c new file mode 100644 index 000000000..30b74d590 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -0,0 +1,1800 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "e1000.h" + +/** + * e1000e_get_bus_info_pcie - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_bus_info *bus = &hw->bus; + struct e1000_adapter *adapter = hw->adapter; + u16 pcie_link_status, cap_offset; + + cap_offset = adapter->pdev->pcie_cap; + if (!cap_offset) { + bus->width = e1000_bus_width_unknown; + } else { + pci_read_config_word(adapter->pdev, + cap_offset + PCIE_LINK_STATUS, + &pcie_link_status); + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCIE_LINK_WIDTH_MASK) >> + PCIE_LINK_WIDTH_SHIFT); + } + + mac->ops.set_lan_id(hw); + + return 0; +} + +/** + * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + u32 reg; + + /* The status register reports the correct function number + * for the device regardless of function swap state. + */ + reg = er32(STATUS); + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; +} + +/** + * e1000_set_lan_id_single_port - Set LAN id for a single port device + * @hw: pointer to the HW structure + * + * Sets the LAN function id to zero for a single port device. + **/ +void e1000_set_lan_id_single_port(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + + bus->func = 0; +} + +/** + * e1000_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000_clear_vfta_generic(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); + e1e_flush(); + } +} + +/** + * e1000_write_vfta_generic - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) +{ + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + e1e_flush(); +} + +/** + * e1000e_init_rx_addrs - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + u8 mac_addr[ETH_ALEN] = { 0 }; + + /* Setup the receive address */ + e_dbg("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + e_dbg("Clearing RAR[1-%u]\n", rar_count - 1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is programmed into RAR0, replacing + * the permanent address that was installed into RAR0 by the Si on reset. + * This function will return SUCCESS unless it encounters an error while + * reading the EEPROM. + **/ +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ALEN]; + + ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data); + if (ret_val) + return ret_val; + + /* not supported on 82573 */ + if (hw->mac.type == e1000_82573) + return 0; + + ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if ((nvm_alt_mac_addr_offset == 0xFFFF) || + (nvm_alt_mac_addr_offset == 0x0000)) + /* There is no Alternate MAC Address */ + return 0; + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; + for (i = 0; i < ETH_ALEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (is_multicast_ether_addr(alt_mac_addr)) { + e_dbg("Ignoring Alternate Mac Address with MC bit set\n"); + return 0; + } + + /* We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); + + return 0; +} + +u32 e1000e_rar_get_count_generic(struct e1000_hw *hw) +{ + return hw->mac.rar_entry_count; +} + +/** + * e1000e_rar_set_generic - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + ew32(RAL(index), rar_low); + e1e_flush(); + ew32(RAH(index), rar_high); + e1e_flush(); + + return 0; +} + +/** + * e1000_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. + **/ +static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16)mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * e1000e_update_mc_addr_list_generic - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32)i < mc_addr_count; i++) { + hash_value = e1000_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); + mc_addr_list += (ETH_ALEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); + e1e_flush(); +} + +/** + * e1000e_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) +{ + er32(CRCERRS); + er32(SYMERRS); + er32(MPC); + er32(SCC); + er32(ECOL); + er32(MCC); + er32(LATECOL); + er32(COLC); + er32(DC); + er32(SEC); + er32(RLEC); + er32(XONRXC); + er32(XONTXC); + er32(XOFFRXC); + er32(XOFFTXC); + er32(FCRUC); + er32(GPRC); + er32(BPRC); + er32(MPRC); + er32(GPTC); + er32(GORCL); + er32(GORCH); + er32(GOTCL); + er32(GOTCH); + er32(RNBC); + er32(RUC); + er32(RFC); + er32(ROC); + er32(RJC); + er32(TORL); + er32(TORH); + er32(TOTL); + er32(TOTH); + er32(TPR); + er32(TPT); + er32(MPTC); + er32(BPTC); +} + +/** + * e1000e_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 e1000e_check_for_copper_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) + return 0; + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) + return 0; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000e_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) + return -E1000_ERR_CONFIG; + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + mac->ops.config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) + e_dbg("Error configuring flow control\n"); + + return ret_val; +} + +/** + * e1000e_check_for_fiber_link - Check for link (Fiber) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), the cable is plugged in (we have signal), + * and our link partner is not trying to auto-negotiate with us (we + * are receiving idles or data), we need to force link up. We also + * need to give auto-negotiation time to complete, in case the cable + * was just plugged in. The autoneg_failed flag does this. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) && + !(rxcw & E1000_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + return 0; + } + e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } + + return 0; +} + +/** + * e1000e_check_for_serdes_link - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + return 0; + } + e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & er32(TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + usleep_range(10, 20); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + e_dbg("SERDES: Link up - forced.\n"); + } + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & er32(TXCW)) { + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + usleep_range(10, 20); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + e_dbg("SERDES: Link up - autoneg completed successfully.\n"); + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - invalid codewords detected in autoneg.\n"); + } + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - no sync.\n"); + } + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - autoneg failed\n"); + } + } + + return 0; +} + +/** + * e1000_set_default_fc_generic - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 nvm_data; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); + + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if (!(nvm_data & NVM_WORD0F_PAUSE_MASK)) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + + return 0; +} + +/** + * e1000e_setup_link_generic - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 e1000e_setup_link_generic(struct e1000_hw *hw) +{ + s32 ret_val; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) + return 0; + + /* If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + ret_val = e1000_set_default_fc_generic(hw); + if (ret_val) + return ret_val; + } + + /* Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + return ret_val; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + e_dbg("Initializing the Flow Control address, type and timer regs\n"); + ew32(FCT, FLOW_CONTROL_TYPE); + ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); + ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); + + ew32(FCTTV, hw->fc.pause_time); + + return e1000e_set_fc_watermarks(hw); +} + +/** + * e1000_commit_fc_settings_generic - Configure flow control + * @hw: pointer to the HW structure + * + * Write the flow control settings to the Transmit Config Word Register (TXCW) + * base on the flow control settings in e1000_mac_info. + **/ +static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 txcw; + + /* Check for a software override of the flow control settings, and + * setup the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Transmit Config Word Register (TXCW) and re-start auto- + * negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we + * do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control completely disabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case e1000_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is disabled + * by a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case e1000_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case e1000_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ew32(TXCW, txcw); + mac->txcw = txcw; + + return 0; +} + +/** + * e1000_poll_fiber_serdes_link_generic - Poll for link up + * @hw: pointer to the HW structure + * + * Polls for link up by reading the status register, if link fails to come + * up with auto-negotiation, then the link is forced if a signal is detected. + **/ +static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 i, status; + s32 ret_val; + + /* If we have a signal (the cable is plugged in, or assumed true for + * serdes media) then poll for a "Link-Up" indication in the Device + * Status Register. Time-out if a link isn't seen in 500 milliseconds + * seconds (Auto-negotiation should complete in less than 500 + * milliseconds even if the other end is doing it in SW). + */ + for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { + usleep_range(10000, 20000); + status = er32(STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == FIBER_LINK_UP_LIMIT) { + e_dbg("Never got a valid link from auto-neg!!!\n"); + mac->autoneg_failed = true; + /* AutoNeg failed to achieve a link, so we'll call + * mac->check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = mac->ops.check_for_link(hw); + if (ret_val) { + e_dbg("Error while checking for link\n"); + return ret_val; + } + mac->autoneg_failed = false; + } else { + mac->autoneg_failed = false; + e_dbg("Valid Link Found\n"); + } + + return 0; +} + +/** + * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes + * links. Upon successful setup, poll for link. + **/ +s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + ctrl = er32(CTRL); + + /* Take the link out of reset */ + ctrl &= ~E1000_CTRL_LRST; + + hw->mac.ops.config_collision_dist(hw); + + ret_val = e1000_commit_fc_settings_generic(hw); + if (ret_val) + return ret_val; + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + e_dbg("Auto-negotiation enabled\n"); + + ew32(CTRL, ctrl); + e1e_flush(); + usleep_range(1000, 2000); + + /* For these adapters, the SW definable pin 1 is set when the optics + * detect a signal. If we have a signal, then poll for a "Link-Up" + * indication. + */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + (er32(CTRL) & E1000_CTRL_SWDPIN1)) { + ret_val = e1000_poll_fiber_serdes_link_generic(hw); + } else { + e_dbg("No signal detected\n"); + } + + return ret_val; +} + +/** + * e1000e_config_collision_dist_generic - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +void e1000e_config_collision_dist_generic(struct e1000_hw *hw) +{ + u32 tctl; + + tctl = er32(TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + ew32(TCTL, tctl); + e1e_flush(); +} + +/** + * e1000e_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + **/ +s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & e1000_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= E1000_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + ew32(FCRTL, fcrtl); + ew32(FCRTH, fcrth); + + return 0; +} + +/** + * e1000e_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 e1000e_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + + ctrl = er32(CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and Tx flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ew32(CTRL, ctrl); + + return 0; +} + +/** + * e1000e_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = e1000e_force_mac_fc(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = e1000e_force_mac_fc(hw); + } + + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg); + if (ret_val) + return ret_val; + + if (!(mii_status_reg & BMSR_ANEGCOMPLETE)) { + e_dbg("Copper PHY and Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = e1e_rphy(hw, MII_LPA, &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) && + (mii_nway_lp_ability_reg & LPA_PAUSE_CAP)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + e_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + e_dbg("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) && + (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) && + (mii_nway_lp_ability_reg & LPA_PAUSE_CAP) && + (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) { + hw->fc.current_mode = e1000_fc_tx_pause; + e_dbg("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) && + (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) && + !(mii_nway_lp_ability_reg & LPA_PAUSE_CAP) && + (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) { + hw->fc.current_mode = e1000_fc_rx_pause; + e_dbg("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + e_dbg("Flow Control = NONE.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); + if (ret_val) { + e_dbg("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = e1000_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000e_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + /* Check for the case where we have SerDes media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) && + mac->autoneg) { + /* Read the PCS_LSTS and check to see if AutoNeg + * has completed. + */ + pcs_status_reg = er32(PCS_LSTAT); + + if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { + e_dbg("PCS Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (PCS_ANADV) and the Auto_Negotiation Base + * Page Ability Register (PCS_LPAB) to determine how + * flow control was negotiated. + */ + pcs_adv_reg = er32(PCS_ANADV); + pcs_lp_ability_reg = er32(PCS_LPAB); + + /* Two bits in the Auto Negotiation Advertisement Register + * (PCS_ANADV) and two bits in the Auto Negotiation Base + * Page Ability Register (PCS_LPAB) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | e1000_fc_full + * + */ + if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + e_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + e_dbg("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + e_dbg("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + e_dbg("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + e_dbg("Flow Control = NONE.\n"); + } + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + pcs_ctrl_reg = er32(PCS_LCTL); + pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; + ew32(PCS_LCTL, pcs_ctrl_reg); + + ret_val = e1000e_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + return 0; +} + +/** + * e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) + *speed = SPEED_1000; + else if (status & E1000_STATUS_SPEED_100) + *speed = SPEED_100; + else + *speed = SPEED_10; + + if (status & E1000_STATUS_FD) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + e_dbg("%u Mbps, %s Duplex\n", + *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10, + *duplex == FULL_DUPLEX ? "Full" : "Half"); + + return 0; +} + +/** + * e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Sets the speed and duplex to gigabit full duplex (the only possible option) + * for fiber/serdes links. + **/ +s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw __always_unused + *hw, u16 *speed, u16 *duplex) +{ + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + + return 0; +} + +/** + * e1000e_get_hw_semaphore - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = er32(SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usleep_range(50, 100); + i++; + } + + if (i == timeout) { + e_dbg("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (er32(SWSM) & E1000_SWSM_SWESMBI) + break; + + usleep_range(50, 100); + } + + if (i == timeout) { + /* Release semaphores */ + e1000e_put_hw_semaphore(hw); + e_dbg("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000e_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void e1000e_put_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = er32(SWSM); + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + ew32(SWSM, swsm); +} + +/** + * e1000e_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 e1000e_get_auto_rd_done(struct e1000_hw *hw) +{ + s32 i = 0; + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (er32(EECD) & E1000_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + e_dbg("Auto read by HW from NVM has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000e_valid_led_default - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + + return 0; +} + +/** + * e1000e_id_led_init_generic - + * @hw: pointer to the HW structure + * + **/ +s32 e1000e_id_led_init_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + return ret_val; + + mac->ledctl_default = er32(LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + + return 0; +} + +/** + * e1000e_setup_led_generic - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. + **/ +s32 e1000e_setup_led_generic(struct e1000_hw *hw) +{ + u32 ledctl; + + if (hw->mac.ops.setup_led != e1000e_setup_led_generic) + return -E1000_ERR_CONFIG; + + if (hw->phy.media_type == e1000_media_type_fiber) { + ledctl = er32(LEDCTL); + hw->mac.ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + ew32(LEDCTL, ledctl); + } else if (hw->phy.media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->mac.ledctl_mode1); + } + + return 0; +} + +/** + * e1000e_cleanup_led_generic - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 e1000e_cleanup_led_generic(struct e1000_hw *hw) +{ + ew32(LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * e1000e_blink_led_generic - Blink LED + * @hw: pointer to the HW structure + * + * Blink the LEDs which are set to be on. + **/ +s32 e1000e_blink_led_generic(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* Set the blink bit for each LED that's "on" (0x0E) + * (or "off" if inverted) in ledctl_mode2. The blink + * logic in hardware only works when mode is set to "on" + * so it must be changed accordingly when the mode is + * "off" and inverted. + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 32; i += 8) { + u32 mode = (hw->mac.ledctl_mode2 >> i) & + E1000_LEDCTL_LED0_MODE_MASK; + u32 led_default = hw->mac.ledctl_default >> i; + + if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_ON)) || + ((led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_OFF))) { + ledctl_blink &= + ~(E1000_LEDCTL_LED0_MODE_MASK << i); + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_MODE_LED_ON) << i; + } + } + } + + ew32(LEDCTL, ledctl_blink); + + return 0; +} + +/** + * e1000e_led_on_generic - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +s32 e1000e_led_on_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = er32(CTRL); + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + ew32(CTRL, ctrl); + break; + case e1000_media_type_copper: + ew32(LEDCTL, hw->mac.ledctl_mode2); + break; + default: + break; + } + + return 0; +} + +/** + * e1000e_led_off_generic - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 e1000e_led_off_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + ew32(CTRL, ctrl); + break; + case e1000_media_type_copper: + ew32(LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return 0; +} + +/** + * e1000e_set_pcie_no_snoop - Set PCI-express capabilities + * @hw: pointer to the HW structure + * @no_snoop: bitmap of snoop events + * + * Set the PCI-express register to snoop for events enabled in 'no_snoop'. + **/ +void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop) +{ + u32 gcr; + + if (no_snoop) { + gcr = er32(GCR); + gcr &= ~(PCIE_NO_SNOOP_ALL); + gcr |= no_snoop; + ew32(GCR, gcr); + } +} + +/** + * e1000e_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 e1000e_disable_pcie_master(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + ew32(CTRL, ctrl); + + while (timeout) { + if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) + break; + usleep_range(100, 200); + timeout--; + } + + if (!timeout) { + e_dbg("Master requests are pending.\n"); + return -E1000_ERR_MASTER_REQUESTS_PENDING; + } + + return 0; +} + +/** + * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Reset the Adaptive Interframe Spacing throttle to default values. + **/ +void e1000e_reset_adaptive(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + if (!mac->adaptive_ifs) { + e_dbg("Not in Adaptive IFS mode!\n"); + return; + } + + mac->current_ifs_val = 0; + mac->ifs_min_val = IFS_MIN; + mac->ifs_max_val = IFS_MAX; + mac->ifs_step_size = IFS_STEP; + mac->ifs_ratio = IFS_RATIO; + + mac->in_ifs_mode = false; + ew32(AIT, 0); +} + +/** + * e1000e_update_adaptive - Update Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Update the Adaptive Interframe Spacing Throttle value based on the + * time between transmitted packets and time between collisions. + **/ +void e1000e_update_adaptive(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + if (!mac->adaptive_ifs) { + e_dbg("Not in Adaptive IFS mode!\n"); + return; + } + + if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { + if (mac->tx_packet_delta > MIN_NUM_XMITS) { + mac->in_ifs_mode = true; + if (mac->current_ifs_val < mac->ifs_max_val) { + if (!mac->current_ifs_val) + mac->current_ifs_val = mac->ifs_min_val; + else + mac->current_ifs_val += + mac->ifs_step_size; + ew32(AIT, mac->current_ifs_val); + } + } + } else { + if (mac->in_ifs_mode && + (mac->tx_packet_delta <= MIN_NUM_XMITS)) { + mac->current_ifs_val = 0; + mac->in_ifs_mode = false; + ew32(AIT, 0); + } + } +} diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h new file mode 100644 index 000000000..0513d90cd --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/mac.h @@ -0,0 +1,68 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000E_MAC_H_ +#define _E1000E_MAC_H_ + +s32 e1000e_blink_led_generic(struct e1000_hw *hw); +s32 e1000e_check_for_copper_link(struct e1000_hw *hw); +s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); +s32 e1000e_check_for_serdes_link(struct e1000_hw *hw); +s32 e1000e_cleanup_led_generic(struct e1000_hw *hw); +s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw); +s32 e1000e_disable_pcie_master(struct e1000_hw *hw); +s32 e1000e_force_mac_fc(struct e1000_hw *hw); +s32 e1000e_get_auto_rd_done(struct e1000_hw *hw); +s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw); +void e1000_set_lan_id_single_port(struct e1000_hw *hw); +s32 e1000e_get_hw_semaphore(struct e1000_hw *hw); +s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +s32 e1000e_id_led_init_generic(struct e1000_hw *hw); +s32 e1000e_led_on_generic(struct e1000_hw *hw); +s32 e1000e_led_off_generic(struct e1000_hw *hw); +void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); +s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); +s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw); +s32 e1000e_setup_led_generic(struct e1000_hw *hw); +s32 e1000e_setup_link_generic(struct e1000_hw *hw); +s32 e1000e_validate_mdi_setting_generic(struct e1000_hw *hw); +s32 e1000e_validate_mdi_setting_crossover_generic(struct e1000_hw *hw); + +void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw); +void e1000_clear_vfta_generic(struct e1000_hw *hw); +void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); +void e1000e_put_hw_semaphore(struct e1000_hw *hw); +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); +void e1000e_reset_adaptive(struct e1000_hw *hw); +void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); +void e1000e_update_adaptive(struct e1000_hw *hw); +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); + +void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); +u32 e1000e_rar_get_count_generic(struct e1000_hw *hw); +int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); +void e1000e_config_collision_dist_generic(struct e1000_hw *hw); + +#endif diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c new file mode 100644 index 000000000..06edfca1a --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/manage.c @@ -0,0 +1,347 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "e1000.h" + +/** + * e1000_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +static u8 e1000_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8)(0 - sum); +} + +/** + * e1000_mng_enable_host_if - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns 0 upon success, else -E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) +{ + u32 hicr; + u8 i; + + if (!hw->mac.arc_subsystem_valid) { + e_dbg("ARC subsystem not valid.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Check that the host interface is enabled. */ + hicr = er32(HICR); + if (!(hicr & E1000_HICR_EN)) { + e_dbg("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + /* check the previous command is completed */ + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { + hicr = er32(HICR); + if (!(hicr & E1000_HICR_C)) + break; + mdelay(1); + } + + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { + e_dbg("Previous command timeout failed.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + return 0; +} + +/** + * e1000e_check_mng_mode_generic - Generic check management mode + * @hw: pointer to the HW structure + * + * Reads the firmware semaphore register and returns true (>0) if + * manageability is enabled, else false (0). + **/ +bool e1000e_check_mng_mode_generic(struct e1000_hw *hw) +{ + u32 fwsm = er32(FWSM); + + return (fwsm & E1000_FWSM_MODE_MASK) == + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); +} + +/** + * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + **/ +bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) +{ + struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; + u32 *buffer = (u32 *)&hw->mng_cookie; + u32 offset; + s32 ret_val, hdr_csum, csum; + u8 i, len; + + hw->mac.tx_pkt_filtering = true; + + /* No manageability, no filtering */ + if (!hw->mac.ops.check_mng_mode(hw)) { + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; + } + + /* If we can't read from the host interface for whatever + * reason, disable filtering. + */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val) { + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; + } + + /* Read in the header. Length and offset are in dwords. */ + len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; + offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; + for (i = 0; i < len; i++) + *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, + offset + i); + hdr_csum = hdr->checksum; + hdr->checksum = 0; + csum = e1000_calculate_checksum((u8 *)hdr, + E1000_MNG_DHCP_COOKIE_LENGTH); + /* If either the checksums or signature don't match, then + * the cookie area isn't considered valid, in which case we + * take the safe route of assuming Tx filtering is enabled. + */ + if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { + hw->mac.tx_pkt_filtering = true; + return hw->mac.tx_pkt_filtering; + } + + /* Cookie area is valid, make the final check for filtering. */ + if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) + hw->mac.tx_pkt_filtering = false; + + return hw->mac.tx_pkt_filtering; +} + +/** + * e1000_mng_write_cmd_header - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + u16 i, length = sizeof(struct e1000_host_mng_command_header); + + /* Write the whole command header structure with new checksum. */ + + hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); + + length >>= 2; + /* Write the relevant command block into the ram area. */ + for (i = 0; i < length; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i, *((u32 *)hdr + i)); + e1e_flush(); + } + + return 0; +} + +/** + * e1000_mng_host_if_write - Write to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum) +{ + u8 *tmp; + u8 *bufptr = buffer; + u32 data = 0; + u16 remaining, i, j, prev_bytes; + + /* sum = only sum of the data and it is not checksum */ + + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) + return -E1000_ERR_PARAM; + + tmp = (u8 *)&data; + prev_bytes = offset & 0x3; + offset >>= 2; + + if (prev_bytes) { + data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset); + for (j = prev_bytes; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data); + length -= j - prev_bytes; + offset++; + } + + remaining = length & 0x3; + length -= remaining; + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant command block into the + * ram area. + */ + for (i = 0; i < length; i++) { + for (j = 0; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); + } + if (remaining) { + for (j = 0; j < sizeof(u32); j++) { + if (j < remaining) + *(tmp + j) = *bufptr++; + else + *(tmp + j) = 0; + + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); + } + + return 0; +} + +/** + * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) +{ + struct e1000_host_mng_command_header hdr; + s32 ret_val; + u32 hicr; + + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + /* Enable the host interface */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val) + return ret_val; + + /* Populate the host interface with the contents of "buffer". */ + ret_val = e1000_mng_host_if_write(hw, buffer, length, + sizeof(hdr), &(hdr.checksum)); + if (ret_val) + return ret_val; + + /* Write the manageability command header */ + ret_val = e1000_mng_write_cmd_header(hw, &hdr); + if (ret_val) + return ret_val; + + /* Tell the ARC a new command is pending. */ + hicr = er32(HICR); + ew32(HICR, hicr | E1000_HICR_C); + + return 0; +} + +/** + * e1000e_enable_mng_pass_thru - Check if management passthrough is needed + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + return false; + + if (hw->mac.has_fwsm) { + fwsm = er32(FWSM); + factps = er32(FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) + return true; + } else if ((hw->mac.type == e1000_82574) || + (hw->mac.type == e1000_82583)) { + u16 data; + s32 ret_val; + + factps = er32(FACTPS); + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); + if (ret_val) + return false; + + if (!(factps & E1000_FACTPS_MNGCG) && + ((data & E1000_NVM_INIT_CTRL2_MNGM) == + (e1000_mng_mode_pt << 13))) + return true; + } else if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + return true; + } + + return false; +} diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h new file mode 100644 index 000000000..a8c27f98f --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/manage.h @@ -0,0 +1,65 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000E_MANAGE_H_ +#define _E1000E_MANAGE_H_ + +bool e1000e_check_mng_mode_generic(struct e1000_hw *hw); +bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw); +s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); +bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw); + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 + +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +#define E1000_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define E1000_HICR_C 0x02 +#define E1000_HICR_SV 0x04 /* Status Validity */ +#define E1000_HICR_FW_RESET_ENABLE 0x40 +#define E1000_HICR_FW_RESET 0x80 + +/* Intel(R) Active Management Technology signature */ +#define E1000_IAMT_SIGNATURE 0x544D4149 + +#endif diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c new file mode 100644 index 000000000..68913d103 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -0,0 +1,7314 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "e1000.h" + +#define DRV_EXTRAVERSION "-k" + +#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION +char e1000e_driver_name[] = "e1000e"; +const char e1000e_driver_version[] = DRV_VERSION; + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static const struct e1000_info *e1000_info_tbl[] = { + [board_82571] = &e1000_82571_info, + [board_82572] = &e1000_82572_info, + [board_82573] = &e1000_82573_info, + [board_82574] = &e1000_82574_info, + [board_82583] = &e1000_82583_info, + [board_80003es2lan] = &e1000_es2_info, + [board_ich8lan] = &e1000_ich8_info, + [board_ich9lan] = &e1000_ich9_info, + [board_ich10lan] = &e1000_ich10_info, + [board_pchlan] = &e1000_pch_info, + [board_pch2lan] = &e1000_pch2_info, + [board_pch_lpt] = &e1000_pch_lpt_info, + [board_pch_spt] = &e1000_pch_spt_info, +}; + +struct e1000_reg_info { + u32 ofs; + char *name; +}; + +static const struct e1000_reg_info e1000_reg_info_tbl[] = { + /* General Registers */ + {E1000_CTRL, "CTRL"}, + {E1000_STATUS, "STATUS"}, + {E1000_CTRL_EXT, "CTRL_EXT"}, + + /* Interrupt Registers */ + {E1000_ICR, "ICR"}, + + /* Rx Registers */ + {E1000_RCTL, "RCTL"}, + {E1000_RDLEN(0), "RDLEN"}, + {E1000_RDH(0), "RDH"}, + {E1000_RDT(0), "RDT"}, + {E1000_RDTR, "RDTR"}, + {E1000_RXDCTL(0), "RXDCTL"}, + {E1000_ERT, "ERT"}, + {E1000_RDBAL(0), "RDBAL"}, + {E1000_RDBAH(0), "RDBAH"}, + {E1000_RDFH, "RDFH"}, + {E1000_RDFT, "RDFT"}, + {E1000_RDFHS, "RDFHS"}, + {E1000_RDFTS, "RDFTS"}, + {E1000_RDFPC, "RDFPC"}, + + /* Tx Registers */ + {E1000_TCTL, "TCTL"}, + {E1000_TDBAL(0), "TDBAL"}, + {E1000_TDBAH(0), "TDBAH"}, + {E1000_TDLEN(0), "TDLEN"}, + {E1000_TDH(0), "TDH"}, + {E1000_TDT(0), "TDT"}, + {E1000_TIDV, "TIDV"}, + {E1000_TXDCTL(0), "TXDCTL"}, + {E1000_TADV, "TADV"}, + {E1000_TARC(0), "TARC"}, + {E1000_TDFH, "TDFH"}, + {E1000_TDFT, "TDFT"}, + {E1000_TDFHS, "TDFHS"}, + {E1000_TDFTS, "TDFTS"}, + {E1000_TDFPC, "TDFPC"}, + + /* List Terminator */ + {0, NULL} +}; + +/** + * __ew32_prepare - prepare to write to MAC CSR register on certain parts + * @hw: pointer to the HW structure + * + * When updating the MAC CSR registers, the Manageability Engine (ME) could + * be accessing the registers at the same time. Normally, this is handled in + * h/w by an arbiter but on some parts there is a bug that acknowledges Host + * accesses later than it should which could result in the register to have + * an incorrect value. Workaround this by checking the FWSM register which + * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set + * and try again a number of times. + **/ +s32 __ew32_prepare(struct e1000_hw *hw) +{ + s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; + + while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) + udelay(50); + + return i; +} + +void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) +{ + if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + __ew32_prepare(hw); + + writel(val, hw->hw_addr + reg); +} + +/** + * e1000_regdump - register printout routine + * @hw: pointer to the HW structure + * @reginfo: pointer to the register info table + **/ +static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) +{ + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case E1000_RXDCTL(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_RXDCTL(n)); + break; + case E1000_TXDCTL(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_TXDCTL(n)); + break; + case E1000_TARC(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_TARC(n)); + break; + default: + pr_info("%-15s %08x\n", + reginfo->name, __er32(hw, reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); + pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); +} + +static void e1000e_dump_ps_pages(struct e1000_adapter *adapter, + struct e1000_buffer *bi) +{ + int i; + struct e1000_ps_page *ps_page; + + for (i = 0; i < adapter->rx_ps_pages; i++) { + ps_page = &bi->ps_pages[i]; + + if (ps_page->page) { + pr_info("packet dump for ps_page %d:\n", i); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, + 16, 1, page_address(ps_page->page), + PAGE_SIZE, true); + } + } +} + +/** + * e1000e_dump - Print registers, Tx-ring and Rx-ring + * @adapter: board private structure + **/ +static void e1000e_dump(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_reg_info *reginfo; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc; + struct my_u0 { + __le64 a; + __le64 b; + } *u0; + struct e1000_buffer *buffer_info; + struct e1000_ring *rx_ring = adapter->rx_ring; + union e1000_rx_desc_packet_split *rx_desc_ps; + union e1000_rx_desc_extended *rx_desc; + struct my_u1 { + __le64 a; + __le64 b; + __le64 c; + __le64 d; + } *u1; + u32 staterr; + int i = 0; + + if (!netif_msg_hw(adapter)) + return; + + /* Print netdevice Info */ + if (netdev) { + dev_info(&adapter->pdev->dev, "Net device Info\n"); + pr_info("Device Name state trans_start last_rx\n"); + pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, + netdev->state, netdev->trans_start, netdev->last_rx); + } + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + pr_info(" Register Name Value\n"); + for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl; + reginfo->name; reginfo++) { + e1000_regdump(hw, reginfo); + } + + /* Print Tx Ring Summary */ + if (!netdev || !netif_running(netdev)) + return; + + dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); + pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; + pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", + 0, tx_ring->next_to_use, tx_ring->next_to_clean, + (unsigned long long)buffer_info->dma, + buffer_info->length, + buffer_info->next_to_watch, + (unsigned long long)buffer_info->time_stamp); + + /* Print Tx Ring */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); + + /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) + * + * Legacy Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] (Reserved on Write Back) | + * +--------------------------------------------------------------+ + * 8 | Special | CSS | Status | CMD | CSO | Length | + * +--------------------------------------------------------------+ + * 63 48 47 36 35 32 31 24 23 16 15 0 + * + * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload + * 63 48 47 40 39 32 31 16 15 8 7 0 + * +----------------------------------------------------------------+ + * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | + * +----------------------------------------------------------------+ + * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + * + * Extended Data Descriptor (DTYP=0x1) + * +----------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +----------------------------------------------------------------+ + * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + */ + pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n"); + pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n"); + pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n"); + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + const char *next_desc; + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) + next_desc = " NTC/U"; + else if (i == tx_ring->next_to_use) + next_desc = " NTU"; + else if (i == tx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n", + (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : + ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), + i, + (unsigned long long)le64_to_cpu(u0->a), + (unsigned long long)le64_to_cpu(u0->b), + (unsigned long long)buffer_info->dma, + buffer_info->length, buffer_info->next_to_watch, + (unsigned long long)buffer_info->time_stamp, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter) && buffer_info->skb) + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + buffer_info->skb->len, true); + } + + /* Print Rx Ring Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); + pr_info("Queue [NTU] [NTC]\n"); + pr_info(" %5d %5X %5X\n", + 0, rx_ring->next_to_use, rx_ring->next_to_clean); + + /* Print Rx Ring */ + if (!netif_msg_rx_status(adapter)) + return; + + dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); + switch (adapter->rx_ps_pages) { + case 1: + case 2: + case 3: + /* [Extended] Packet Split Receive Descriptor Format + * + * +-----------------------------------------------------+ + * 0 | Buffer Address 0 [63:0] | + * +-----------------------------------------------------+ + * 8 | Buffer Address 1 [63:0] | + * +-----------------------------------------------------+ + * 16 | Buffer Address 2 [63:0] | + * +-----------------------------------------------------+ + * 24 | Buffer Address 3 [63:0] | + * +-----------------------------------------------------+ + */ + pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n"); + /* [Extended] Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 13 12 8 7 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS | + * | Checksum | Ident | | Queue | | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n"); + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + buffer_info = &rx_ring->buffer_info[i]; + rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); + u1 = (struct my_u1 *)rx_desc_ps; + staterr = + le32_to_cpu(rx_desc_ps->wb.middle.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & E1000_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n", + "RWB", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + buffer_info->skb, next_desc); + } else { + pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n", + "R ", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + (unsigned long long)buffer_info->dma, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter)) + e1000e_dump_ps_pages(adapter, + buffer_info); + } + } + break; + default: + case 0: + /* Extended Receive Descriptor (Read) Format + * + * +-----------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +-----------------------------------------------------+ + * 8 | Reserved | + * +-----------------------------------------------------+ + */ + pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n"); + /* Extended Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 24 23 4 3 0 + * +------------------------------------------------------+ + * | RSS Hash | | | | + * 0 +-------------------+ Rsvd | Reserved | MRQ RSS | + * | Packet | IP | | | Type | + * | Checksum | Ident | | | | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n"); + + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + + buffer_info = &rx_ring->buffer_info[i]; + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + u1 = (struct my_u1 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & E1000_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n", + "RWB", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + buffer_info->skb, next_desc); + } else { + pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n", + "R ", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)buffer_info->dma, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter) && + buffer_info->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, + 1, + buffer_info->skb->data, + adapter->rx_buffer_len, + true); + } + } + } +} + +/** + * e1000_desc_unused - calculate if we have unused descriptors + **/ +static int e1000_desc_unused(struct e1000_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +/** + * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp + * @adapter: board private structure + * @hwtstamps: time stamp structure to update + * @systim: unsigned 64bit system time value. + * + * Convert the system time value stored in the RX/TXSTMP registers into a + * hwtstamp which can be used by the upper level time stamping functions. + * + * The 'systim_lock' spinlock is used to protect the consistency of the + * system time value. This is needed because reading the 64 bit time + * value involves reading two 32 bit registers. The first read latches the + * value. + **/ +static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + u64 ns; + unsigned long flags; + + spin_lock_irqsave(&adapter->systim_lock, flags); + ns = timecounter_cyc2time(&adapter->tc, systim); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + memset(hwtstamps, 0, sizeof(*hwtstamps)); + hwtstamps->hwtstamp = ns_to_ktime(ns); +} + +/** + * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp + * @adapter: board private structure + * @status: descriptor extended error and status field + * @skb: particular skb to include time stamp + * + * If the time stamp is valid, convert it into the timecounter ns value + * and store that result into the shhwtstamps structure which is passed + * up the network stack. + **/ +static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status, + struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u64 rxstmp; + + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) || + !(status & E1000_RXDEXT_STATERR_TST) || + !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) + return; + + /* The Rx time stamp registers contain the time stamp. No other + * received packet will be time stamped until the Rx time stamp + * registers are read. Because only one packet can be time stamped + * at a time, the register values must belong to this packet and + * therefore none of the other additional attributes need to be + * compared. + */ + rxstmp = (u64)er32(RXSTMPL); + rxstmp |= (u64)er32(RXSTMPH) << 32; + e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp); + + adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP; +} + +/** + * e1000_receive_skb - helper function to handle Rx indications + * @adapter: board private structure + * @staterr: descriptor extended error and status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + **/ +static void e1000_receive_skb(struct e1000_adapter *adapter, + struct net_device *netdev, struct sk_buff *skb, + u32 staterr, __le16 vlan) +{ + u16 tag = le16_to_cpu(vlan); + + e1000e_rx_hwtstamp(adapter, staterr, skb); + + skb->protocol = eth_type_trans(skb, netdev); + + if (staterr & E1000_RXD_STAT_VP) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); + + napi_gro_receive(&adapter->napi, skb); +} + +/** + * e1000_rx_checksum - Receive Checksum Offload + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @sk_buff: socket buffer with received data + **/ +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + struct sk_buff *skb) +{ + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + + skb_checksum_none_assert(skb); + + /* Rx checksum disabled */ + if (!(adapter->netdev->features & NETIF_F_RXCSUM)) + return; + + /* Ignore Checksum bit is set */ + if (status & E1000_RXD_STAT_IXSM) + return; + + /* TCP/UDP checksum error bit or IP checksum error bit is set */ + if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + + /* TCP/UDP Checksum has not been calculated */ + if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) + return; + + /* It must be a TCP or UDP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + adapter->hw_csum_good++; +} + +static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; + s32 ret_val = __ew32_prepare(hw); + + writel(i, rx_ring->tail); + + if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { + u32 rctl = er32(RCTL); + + ew32(RCTL, rctl & ~E1000_RCTL_EN); + e_err("ME firmware caused invalid RDT - resetting\n"); + schedule_work(&adapter->reset_task); + } +} + +static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; + s32 ret_val = __ew32_prepare(hw); + + writel(i, tx_ring->tail); + + if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { + u32 tctl = er32(TCTL); + + ew32(TCTL, tctl & ~E1000_TCTL_EN); + e_err("ME firmware caused invalid TDT - resetting\n"); + schedule_work(&adapter->reset_task); + } +} + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: Rx descriptor ring + **/ +static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring, + int cleaned_count, gfp_t gfp) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_rx_desc_extended *rx_desc; + struct e1000_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto map_skb; + } + + skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); + if (!skb) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + buffer_info->skb = skb; +map_skb: + buffer_info->dma = dma_map_single(&pdev->dev, skb->data, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + dev_err(&pdev->dev, "Rx DMA map failed\n"); + adapter->rx_dma_failed++; + break; + } + + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(rx_ring, i); + else + writel(i, rx_ring->tail); + } + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + rx_ring->next_to_use = i; +} + +/** + * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split + * @rx_ring: Rx descriptor ring + **/ +static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring, + int cleaned_count, gfp_t gfp) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_rx_desc_packet_split *rx_desc; + struct e1000_buffer *buffer_info; + struct e1000_ps_page *ps_page; + struct sk_buff *skb; + unsigned int i, j; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + ps_page = &buffer_info->ps_pages[j]; + if (j >= adapter->rx_ps_pages) { + /* all unused desc entries get hw null ptr */ + rx_desc->read.buffer_addr[j + 1] = + ~cpu_to_le64(0); + continue; + } + if (!ps_page->page) { + ps_page->page = alloc_page(gfp); + if (!ps_page->page) { + adapter->alloc_rx_buff_failed++; + goto no_buffers; + } + ps_page->dma = dma_map_page(&pdev->dev, + ps_page->page, + 0, PAGE_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, + ps_page->dma)) { + dev_err(&adapter->pdev->dev, + "Rx DMA page map failed\n"); + adapter->rx_dma_failed++; + goto no_buffers; + } + } + /* Refresh the desc even if buffer_addrs + * didn't change because each write-back + * erases this info. + */ + rx_desc->read.buffer_addr[j + 1] = + cpu_to_le64(ps_page->dma); + } + + skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0, + gfp); + + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + + buffer_info->skb = skb; + buffer_info->dma = dma_map_single(&pdev->dev, skb->data, + adapter->rx_ps_bsize0, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + dev_err(&pdev->dev, "Rx DMA map failed\n"); + adapter->rx_dma_failed++; + /* cleanup skb */ + dev_kfree_skb_any(skb); + buffer_info->skb = NULL; + break; + } + + rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); + + if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(rx_ring, i << 1); + else + writel(i << 1, rx_ring->tail); + } + + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + +no_buffers: + rx_ring->next_to_use = i; +} + +/** + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers + * @rx_ring: Rx descriptor ring + * @cleaned_count: number of buffers to allocate this pass + **/ + +static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring, + int cleaned_count, gfp_t gfp) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_rx_desc_extended *rx_desc; + struct e1000_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = 256 - 16; /* for skb_reserve */ + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto check_page; + } + + skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + buffer_info->skb = skb; +check_page: + /* allocate a new page if necessary */ + if (!buffer_info->page) { + buffer_info->page = alloc_page(gfp); + if (unlikely(!buffer_info->page)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) { + buffer_info->dma = dma_map_page(&pdev->dev, + buffer_info->page, 0, + PAGE_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(rx_ring, i); + else + writel(i, rx_ring->tail); + } +} + +static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss, + struct sk_buff *skb) +{ + if (netdev->features & NETIF_F_RXHASH) + skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3); +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack + * @rx_ring: Rx descriptor ring + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, + int work_to_do) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + union e1000_rx_desc_extended *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + u32 length, staterr; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + buffer_info = &rx_ring->buffer_info[i]; + + while (staterr & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + skb = buffer_info->skb; + buffer_info->skb = NULL; + + prefetch(skb->data - NET_IP_ALIGN); + + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->wb.upper.length); + + /* !EOP means multiple descriptors were used to store a single + * packet, if that's the case we need to toss it. In fact, we + * need to toss every packet with the EOP bit clear and the + * next frame that _does_ have the EOP bit set, as it is by + * definition only a frame fragment + */ + if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) + adapter->flags2 |= FLAG2_IS_DISCARDING; + + if (adapter->flags2 & FLAG2_IS_DISCARDING) { + /* All receives must fit into a single buffer */ + e_dbg("Receive packet consumed multiple buffers\n"); + /* recycle */ + buffer_info->skb = skb; + if (staterr & E1000_RXD_STAT_EOP) + adapter->flags2 &= ~FLAG2_IS_DISCARDING; + goto next_desc; + } + + if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL))) { + /* recycle */ + buffer_info->skb = skb; + goto next_desc; + } + + /* adjust length to remove Ethernet CRC */ + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { + /* If configured to store CRC, don't subtract FCS, + * but keep the FCS bytes out of the total_rx_bytes + * counter + */ + if (netdev->features & NETIF_F_RXFCS) + total_rx_bytes -= 4; + else + length -= 4; + } + + total_rx_bytes += length; + total_rx_packets++; + + /* code added for copybreak, this should improve + * performance for small packets with large amounts + * of reassembly being done in the stack + */ + if (length < copybreak) { + struct sk_buff *new_skb = + napi_alloc_skb(&adapter->napi, length); + if (new_skb) { + skb_copy_to_linear_data_offset(new_skb, + -NET_IP_ALIGN, + (skb->data - + NET_IP_ALIGN), + (length + + NET_IP_ALIGN)); + /* save the skb in buffer_info as good */ + buffer_info->skb = skb; + skb = new_skb; + } + /* else just continue with the old one */ + } + /* end copybreak code */ + skb_put(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, staterr, skb); + + e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); + + e1000_receive_skb(adapter, netdev, skb, staterr, + rx_desc->wb.upper.vlan); + +next_desc: + rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= E1000_RX_BUFFER_WRITE) { + adapter->alloc_rx_buf(rx_ring, cleaned_count, + GFP_ATOMIC); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); + + adapter->total_rx_bytes += total_rx_bytes; + adapter->total_rx_packets += total_rx_packets; + return cleaned; +} + +static void e1000_put_txbuf(struct e1000_ring *tx_ring, + struct e1000_buffer *buffer_info) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; +} + +static void e1000_print_hw_hang(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + print_hang_task); + struct net_device *netdev = adapter->netdev; + struct e1000_ring *tx_ring = adapter->tx_ring; + unsigned int i = tx_ring->next_to_clean; + unsigned int eop = tx_ring->buffer_info[i].next_to_watch; + struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); + struct e1000_hw *hw = &adapter->hw; + u16 phy_status, phy_1000t_status, phy_ext_status; + u16 pci_status; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) { + /* May be block on write-back, flush and detect again + * flush pending descriptor writebacks to memory + */ + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); + /* execute the writes immediately */ + e1e_flush(); + /* Due to rare timing issues, write to TIDV again to ensure + * the write is successful + */ + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); + /* execute the writes immediately */ + e1e_flush(); + adapter->tx_hang_recheck = true; + return; + } + adapter->tx_hang_recheck = false; + + if (er32(TDH(0)) == er32(TDT(0))) { + e_dbg("false hang detected, ignoring\n"); + return; + } + + /* Real hang detected */ + netif_stop_queue(netdev); + + e1e_rphy(hw, MII_BMSR, &phy_status); + e1e_rphy(hw, MII_STAT1000, &phy_1000t_status); + e1e_rphy(hw, MII_ESTATUS, &phy_ext_status); + + pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); + + /* detected Hardware unit hang */ + e_err("Detected Hardware Unit Hang:\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]:\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n" + "MAC Status <%x>\n" + "PHY Status <%x>\n" + "PHY 1000BASE-T Status <%x>\n" + "PHY Extended Status <%x>\n" + "PCI Status <%x>\n", + readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use, + tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp, + eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), + phy_status, phy_1000t_status, phy_ext_status, pci_status); + + e1000e_dump(adapter); + + /* Suggest workaround for known h/w issue */ + if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) + e_err("Try turning off Tx pause (flow control) via ethtool\n"); +} + +/** + * e1000e_tx_hwtstamp_work - check for Tx time stamp + * @work: pointer to work struct + * + * This work function polls the TSYNCTXCTL valid bit to determine when a + * timestamp has been taken for the current stored skb. The timestamp must + * be for this skb because only one such packet is allowed in the queue. + */ +static void e1000e_tx_hwtstamp_work(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, + tx_hwtstamp_work); + struct e1000_hw *hw = &adapter->hw; + + if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) { + struct skb_shared_hwtstamps shhwtstamps; + u64 txstmp; + + txstmp = er32(TXSTMPL); + txstmp |= (u64)er32(TXSTMPH) << 32; + + e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp); + + skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps); + dev_kfree_skb_any(adapter->tx_hwtstamp_skb); + adapter->tx_hwtstamp_skb = NULL; + } else if (time_after(jiffies, adapter->tx_hwtstamp_start + + adapter->tx_timeout_factor * HZ)) { + dev_kfree_skb_any(adapter->tx_hwtstamp_skb); + adapter->tx_hwtstamp_skb = NULL; + adapter->tx_hwtstamp_timeouts++; + e_warn("clearing Tx timestamp hang\n"); + } else { + /* reschedule to check later */ + schedule_work(&adapter->tx_hwtstamp_work); + } +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @tx_ring: Tx descriptor ring + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_buffer *buffer_info; + unsigned int i, eop; + unsigned int count = 0; + unsigned int total_tx_bytes = 0, total_tx_packets = 0; + unsigned int bytes_compl = 0, pkts_compl = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + bool cleaned = false; + + dma_rmb(); /* read buffer_info after eop_desc */ + for (; !cleaned; count++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (cleaned) { + total_tx_packets += buffer_info->segs; + total_tx_bytes += buffer_info->bytecount; + if (buffer_info->skb) { + bytes_compl += buffer_info->skb->len; + pkts_compl++; + } + } + + e1000_put_txbuf(tx_ring, buffer_info); + tx_desc->upper.data = 0; + + i++; + if (i == tx_ring->count) + i = 0; + } + + if (i == tx_ring->next_to_use) + break; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + } + + tx_ring->next_to_clean = i; + + netdev_completed_queue(netdev, pkts_compl, bytes_compl); + +#define TX_WAKE_THRESHOLD 32 + if (count && netif_carrier_ok(netdev) && + e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->state))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + adapter->detect_tx_hung = false; + if (tx_ring->buffer_info[i].time_stamp && + time_after(jiffies, tx_ring->buffer_info[i].time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(er32(STATUS) & E1000_STATUS_TXOFF)) + schedule_work(&adapter->print_hang_task); + else + adapter->tx_hang_recheck = false; + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + return count < tx_ring->count; +} + +/** + * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split + * @rx_ring: Rx descriptor ring + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, + int work_to_do) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; + union e1000_rx_desc_packet_split *rx_desc, *next_rxd; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_buffer *buffer_info, *next_buffer; + struct e1000_ps_page *ps_page; + struct sk_buff *skb; + unsigned int i, j; + u32 length, staterr; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.middle.status_error); + buffer_info = &rx_ring->buffer_info[i]; + + while (staterr & E1000_RXD_STAT_DD) { + if (*work_done >= work_to_do) + break; + (*work_done)++; + skb = buffer_info->skb; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + /* in the packet split case this is header only */ + prefetch(skb->data - NET_IP_ALIGN); + + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC_PS(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_ps_bsize0, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + /* see !EOP comment in other Rx routine */ + if (!(staterr & E1000_RXD_STAT_EOP)) + adapter->flags2 |= FLAG2_IS_DISCARDING; + + if (adapter->flags2 & FLAG2_IS_DISCARDING) { + e_dbg("Packet Split buffers didn't pick up the full packet\n"); + dev_kfree_skb_irq(skb); + if (staterr & E1000_RXD_STAT_EOP) + adapter->flags2 &= ~FLAG2_IS_DISCARDING; + goto next_desc; + } + + if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL))) { + dev_kfree_skb_irq(skb); + goto next_desc; + } + + length = le16_to_cpu(rx_desc->wb.middle.length0); + + if (!length) { + e_dbg("Last part of the packet spanning multiple descriptors\n"); + dev_kfree_skb_irq(skb); + goto next_desc; + } + + /* Good Receive */ + skb_put(skb, length); + + { + /* this looks ugly, but it seems compiler issues make + * it more efficient than reusing j + */ + int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); + + /* page alloc/put takes too long and effects small + * packet throughput, so unsplit small packets and + * save the alloc/put only valid in softirq (napi) + * context to call kmap_* + */ + if (l1 && (l1 <= copybreak) && + ((length + l1) <= adapter->rx_ps_bsize0)) { + u8 *vaddr; + + ps_page = &buffer_info->ps_pages[0]; + + /* there is no documentation about how to call + * kmap_atomic, so we can't hold the mapping + * very long + */ + dma_sync_single_for_cpu(&pdev->dev, + ps_page->dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + vaddr = kmap_atomic(ps_page->page); + memcpy(skb_tail_pointer(skb), vaddr, l1); + kunmap_atomic(vaddr); + dma_sync_single_for_device(&pdev->dev, + ps_page->dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + + /* remove the CRC */ + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { + if (!(netdev->features & NETIF_F_RXFCS)) + l1 -= 4; + } + + skb_put(skb, l1); + goto copydone; + } /* if */ + } + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + length = le16_to_cpu(rx_desc->wb.upper.length[j]); + if (!length) + break; + + ps_page = &buffer_info->ps_pages[j]; + dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + ps_page->dma = 0; + skb_fill_page_desc(skb, j, ps_page->page, 0, length); + ps_page->page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += PAGE_SIZE; + } + + /* strip the ethernet crc, problem is we're using pages now so + * this whole operation can get a little cpu intensive + */ + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { + if (!(netdev->features & NETIF_F_RXFCS)) + pskb_trim(skb, skb->len - 4); + } + +copydone: + total_rx_bytes += skb->len; + total_rx_packets++; + + e1000_rx_checksum(adapter, staterr, skb); + + e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); + + if (rx_desc->wb.upper.header_status & + cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) + adapter->rx_hdr_split++; + + e1000_receive_skb(adapter, netdev, skb, staterr, + rx_desc->wb.middle.vlan); + +next_desc: + rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); + buffer_info->skb = NULL; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= E1000_RX_BUFFER_WRITE) { + adapter->alloc_rx_buf(rx_ring, cleaned_count, + GFP_ATOMIC); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.middle.status_error); + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); + + adapter->total_rx_bytes += total_rx_bytes; + adapter->total_rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_consume_page - helper function + **/ +static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += PAGE_SIZE; +} + +/** + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, + int work_to_do) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_rx_desc_extended *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + u32 length, staterr; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + struct skb_shared_info *shinfo; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + buffer_info = &rx_ring->buffer_info[i]; + + while (staterr & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + skb = buffer_info->skb; + buffer_info->skb = NULL; + + ++i; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->wb.upper.length); + + /* errors is only valid for DD + EOP descriptors */ + if (unlikely((staterr & E1000_RXD_STAT_EOP) && + ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL)))) { + /* recycle both page and skb */ + buffer_info->skb = skb; + /* an error means any chain goes out the window too */ + if (rx_ring->rx_skb_top) + dev_kfree_skb_irq(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } +#define rxtop (rx_ring->rx_skb_top) + if (!(staterr & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = skb; + skb_fill_page_desc(rxtop, 0, buffer_info->page, + 0, length); + } else { + /* this is the middle of a chain */ + shinfo = skb_shinfo(rxtop); + skb_fill_page_desc(rxtop, shinfo->nr_frags, + buffer_info->page, 0, + length); + /* re-use the skb, only consumed the page */ + buffer_info->skb = skb; + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + shinfo = skb_shinfo(rxtop); + skb_fill_page_desc(rxtop, shinfo->nr_frags, + buffer_info->page, 0, + length); + /* re-use the current skb, we only consumed the + * page + */ + buffer_info->skb = skb; + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page + */ + if (length <= copybreak && + skb_tailroom(skb) >= length) { + u8 *vaddr; + vaddr = kmap_atomic(buffer_info->page); + memcpy(skb_tail_pointer(skb), vaddr, + length); + kunmap_atomic(vaddr); + /* re-use the page, so don't erase + * buffer_info->page + */ + skb_put(skb, length); + } else { + skb_fill_page_desc(skb, 0, + buffer_info->page, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, staterr, skb); + + e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + /* eth type trans needs skb->data to point to something */ + if (!pskb_may_pull(skb, ETH_HLEN)) { + e_err("pskb_may_pull failed.\n"); + dev_kfree_skb_irq(skb); + goto next_desc; + } + + e1000_receive_skb(adapter, netdev, skb, staterr, + rx_desc->wb.upper.vlan); + +next_desc: + rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(rx_ring, cleaned_count, + GFP_ATOMIC); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); + + adapter->total_rx_bytes += total_rx_bytes; + adapter->total_rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: Rx descriptor ring + **/ +static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct e1000_buffer *buffer_info; + struct e1000_ps_page *ps_page; + struct pci_dev *pdev = adapter->pdev; + unsigned int i, j; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (buffer_info->dma) { + if (adapter->clean_rx == e1000_clean_rx_irq) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) + dma_unmap_page(&pdev->dev, buffer_info->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + else if (adapter->clean_rx == e1000_clean_rx_irq_ps) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_ps_bsize0, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + } + + if (buffer_info->page) { + put_page(buffer_info->page); + buffer_info->page = NULL; + } + + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + ps_page = &buffer_info->ps_pages[j]; + if (!ps_page->page) + break; + dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + ps_page->dma = 0; + put_page(ps_page->page); + ps_page->page = NULL; + } + } + + /* there also may be some cached data from a chained receive */ + if (rx_ring->rx_skb_top) { + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + } + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + adapter->flags2 &= ~FLAG2_IS_DISCARDING; + + writel(0, rx_ring->head); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(rx_ring, 0); + else + writel(0, rx_ring->tail); +} + +static void e1000e_downshift_workaround(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + downshift_task); + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); +} + +/** + * e1000_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + /* read ICR disables interrupts using IAM */ + if (icr & E1000_ICR_LSC) { + hw->mac.get_link_status = true; + /* ICH8 workaround-- Call gig speed drop workaround on cable + * disconnect (LSC) before accessing any PHY registers + */ + if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && + (!(er32(STATUS) & E1000_STATUS_LU))) + schedule_work(&adapter->downshift_task); + + /* 80003ES2LAN workaround-- For packet buffer work-around on + * link down event; disable receives here in the ISR and reset + * adapter in watchdog + */ + if (netif_carrier_ok(netdev) && + adapter->flags & FLAG_RX_NEEDS_RESTART) { + /* disable receives */ + u32 rctl = er32(RCTL); + + ew32(RCTL, rctl & ~E1000_RCTL_EN); + adapter->flags |= FLAG_RESTART_NOW; + } + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + /* Reset on uncorrectable ECC error */ + if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt))) { + u32 pbeccsts = er32(PBECCSTS); + + adapter->corr_errors += + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; + adapter->uncorr_errors += + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->reset_task); + + /* return immediately since reset is imminent */ + return IRQ_HANDLED; + } + + if (napi_schedule_prep(&adapter->napi)) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } + + return IRQ_HANDLED; +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 rctl, icr = er32(ICR); + + if (!icr || test_bit(__E1000_DOWN, &adapter->state)) + return IRQ_NONE; /* Not our interrupt */ + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & E1000_ICR_INT_ASSERTED)) + return IRQ_NONE; + + /* Interrupt Auto-Mask...upon reading ICR, + * interrupts are masked. No need for the + * IMC write + */ + + if (icr & E1000_ICR_LSC) { + hw->mac.get_link_status = true; + /* ICH8 workaround-- Call gig speed drop workaround on cable + * disconnect (LSC) before accessing any PHY registers + */ + if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && + (!(er32(STATUS) & E1000_STATUS_LU))) + schedule_work(&adapter->downshift_task); + + /* 80003ES2LAN workaround-- + * For packet buffer work-around on link down event; + * disable receives here in the ISR and + * reset adapter in watchdog + */ + if (netif_carrier_ok(netdev) && + (adapter->flags & FLAG_RX_NEEDS_RESTART)) { + /* disable receives */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + adapter->flags |= FLAG_RESTART_NOW; + } + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + /* Reset on uncorrectable ECC error */ + if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt))) { + u32 pbeccsts = er32(PBECCSTS); + + adapter->corr_errors += + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; + adapter->uncorr_errors += + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->reset_task); + + /* return immediately since reset is imminent */ + return IRQ_HANDLED; + } + + if (napi_schedule_prep(&adapter->napi)) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } + + return IRQ_HANDLED; +} + +static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + if (!(icr & E1000_ICR_INT_ASSERTED)) { + if (!test_bit(__E1000_DOWN, &adapter->state)) + ew32(IMS, E1000_IMS_OTHER); + return IRQ_NONE; + } + + if (icr & adapter->eiac_mask) + ew32(ICS, (icr & adapter->eiac_mask)); + + if (icr & E1000_ICR_OTHER) { + if (!(icr & E1000_ICR_LSC)) + goto no_link_interrupt; + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + +no_link_interrupt: + if (!test_bit(__E1000_DOWN, &adapter->state)) + ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER); + + return IRQ_HANDLED; +} + +static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + + if (!e1000_clean_tx_irq(tx_ring)) + /* Ring was not completely cleaned, so fire another interrupt */ + ew32(ICS, tx_ring->ims_val); + + return IRQ_HANDLED; +} + +static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *rx_ring = adapter->rx_ring; + + /* Write the ITR value calculated at the end of the + * previous interrupt. + */ + if (rx_ring->set_itr) { + writel(1000000000 / (rx_ring->itr_val * 256), + rx_ring->itr_register); + rx_ring->set_itr = 0; + } + + if (napi_schedule_prep(&adapter->napi)) { + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } + return IRQ_HANDLED; +} + +/** + * e1000_configure_msix - Configure MSI-X hardware + * + * e1000_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + **/ +static void e1000_configure_msix(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_ring *tx_ring = adapter->tx_ring; + int vector = 0; + u32 ctrl_ext, ivar = 0; + + adapter->eiac_mask = 0; + + /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ + if (hw->mac.type == e1000_82574) { + u32 rfctl = er32(RFCTL); + + rfctl |= E1000_RFCTL_ACK_DIS; + ew32(RFCTL, rfctl); + } + + /* Configure Rx vector */ + rx_ring->ims_val = E1000_IMS_RXQ0; + adapter->eiac_mask |= rx_ring->ims_val; + if (rx_ring->itr_val) + writel(1000000000 / (rx_ring->itr_val * 256), + rx_ring->itr_register); + else + writel(1, rx_ring->itr_register); + ivar = E1000_IVAR_INT_ALLOC_VALID | vector; + + /* Configure Tx vector */ + tx_ring->ims_val = E1000_IMS_TXQ0; + vector++; + if (tx_ring->itr_val) + writel(1000000000 / (tx_ring->itr_val * 256), + tx_ring->itr_register); + else + writel(1, tx_ring->itr_register); + adapter->eiac_mask |= tx_ring->ims_val; + ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); + + /* set vector for Other Causes, e.g. link changes */ + vector++; + ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16); + if (rx_ring->itr_val) + writel(1000000000 / (rx_ring->itr_val * 256), + hw->hw_addr + E1000_EITR_82574(vector)); + else + writel(1, hw->hw_addr + E1000_EITR_82574(vector)); + + /* Cause Tx interrupts on every write back */ + ivar |= (1 << 31); + + ew32(IVAR, ivar); + + /* enable MSI-X PBA support */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; + + /* Auto-Mask Other interrupts upon ICR read */ + ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); + ctrl_ext |= E1000_CTRL_EXT_EIAME; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); +} + +void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter) +{ + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & FLAG_MSI_ENABLED) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~FLAG_MSI_ENABLED; + } +} + +/** + * e1000e_set_interrupt_capability - set MSI or MSI-X if supported + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) +{ + int err; + int i; + + switch (adapter->int_mode) { + case E1000E_INT_MODE_MSIX: + if (adapter->flags & FLAG_HAS_MSIX) { + adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ + adapter->msix_entries = kcalloc(adapter->num_vectors, + sizeof(struct + msix_entry), + GFP_KERNEL); + if (adapter->msix_entries) { + struct e1000_adapter *a = adapter; + + for (i = 0; i < adapter->num_vectors; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix_range(a->pdev, + a->msix_entries, + a->num_vectors, + a->num_vectors); + if (err > 0) + return; + } + /* MSI-X failed, so fall through and try MSI */ + e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n"); + e1000e_reset_interrupt_capability(adapter); + } + adapter->int_mode = E1000E_INT_MODE_MSI; + /* Fall through */ + case E1000E_INT_MODE_MSI: + if (!pci_enable_msi(adapter->pdev)) { + adapter->flags |= FLAG_MSI_ENABLED; + } else { + adapter->int_mode = E1000E_INT_MODE_LEGACY; + e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n"); + } + /* Fall through */ + case E1000E_INT_MODE_LEGACY: + /* Don't do anything; this is the system default */ + break; + } + + /* store the number of vectors being used */ + adapter->num_vectors = 1; +} + +/** + * e1000_request_msix - Initialize MSI-X interrupts + * + * e1000_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + **/ +static int e1000_request_msix(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0, vector = 0; + + if (strlen(netdev->name) < (IFNAMSIZ - 5)) + snprintf(adapter->rx_ring->name, + sizeof(adapter->rx_ring->name) - 1, + "%s-rx-0", netdev->name); + else + memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); + err = request_irq(adapter->msix_entries[vector].vector, + e1000_intr_msix_rx, 0, adapter->rx_ring->name, + netdev); + if (err) + return err; + adapter->rx_ring->itr_register = adapter->hw.hw_addr + + E1000_EITR_82574(vector); + adapter->rx_ring->itr_val = adapter->itr; + vector++; + + if (strlen(netdev->name) < (IFNAMSIZ - 5)) + snprintf(adapter->tx_ring->name, + sizeof(adapter->tx_ring->name) - 1, + "%s-tx-0", netdev->name); + else + memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); + err = request_irq(adapter->msix_entries[vector].vector, + e1000_intr_msix_tx, 0, adapter->tx_ring->name, + netdev); + if (err) + return err; + adapter->tx_ring->itr_register = adapter->hw.hw_addr + + E1000_EITR_82574(vector); + adapter->tx_ring->itr_val = adapter->itr; + vector++; + + err = request_irq(adapter->msix_entries[vector].vector, + e1000_msix_other, 0, netdev->name, netdev); + if (err) + return err; + + e1000_configure_msix(adapter); + + return 0; +} + +/** + * e1000_request_irq - initialize interrupts + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (adapter->msix_entries) { + err = e1000_request_msix(adapter); + if (!err) + return err; + /* fall back to MSI */ + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = E1000E_INT_MODE_MSI; + e1000e_set_interrupt_capability(adapter); + } + if (adapter->flags & FLAG_MSI_ENABLED) { + err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, + netdev->name, netdev); + if (!err) + return err; + + /* fall back to legacy interrupt */ + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = E1000E_INT_MODE_LEGACY; + } + + err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, + netdev->name, netdev); + if (err) + e_err("Unable to allocate interrupt, Error: %d\n", err); + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->msix_entries) { + int vector = 0; + + free_irq(adapter->msix_entries[vector].vector, netdev); + vector++; + + free_irq(adapter->msix_entries[vector].vector, netdev); + vector++; + + /* Other Causes interrupt vector */ + free_irq(adapter->msix_entries[vector].vector, netdev); + return; + } + + free_irq(adapter->pdev->irq, netdev); +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + **/ +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMC, ~0); + if (adapter->msix_entries) + ew32(EIAC_82574, 0); + e1e_flush(); + + if (adapter->msix_entries) { + int i; + + for (i = 0; i < adapter->num_vectors; i++) + synchronize_irq(adapter->msix_entries[i].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + **/ +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); + ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); + } else if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { + ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); + } else { + ew32(IMS, IMS_ENABLE_MASK); + } + e1e_flush(); +} + +/** + * e1000e_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. For AMT version (only with 82573) + * of the f/w this means that the network i/f is open. + **/ +void e1000e_get_hw_control(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + u32 swsm; + + /* Let firmware know the driver has taken over */ + if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); + } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { + ctrl_ext = er32(CTRL_EXT); + ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + } +} + +/** + * e1000e_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. For AMT version (only with 82573) i + * of the f/w this means that the network i/f is closed. + * + **/ +void e1000e_release_hw_control(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + u32 swsm; + + /* Let firmware taken over control of h/w */ + if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { + swsm = er32(SWSM); + ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); + } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { + ctrl_ext = er32(CTRL_EXT); + ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + } +} + +/** + * e1000_alloc_ring_dma - allocate memory for a ring structure + **/ +static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, + struct e1000_ring *ring) +{ + struct pci_dev *pdev = adapter->pdev; + + ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, + GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + return 0; +} + +/** + * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: Tx descriptor ring + * + * Return 0 on success, negative on failure + **/ +int e1000e_setup_tx_resources(struct e1000_ring *tx_ring) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + int err = -ENOMEM, size; + + size = sizeof(struct e1000_buffer) * tx_ring->count; + tx_ring->buffer_info = vzalloc(size); + if (!tx_ring->buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + err = e1000_alloc_ring_dma(adapter, tx_ring); + if (err) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; +err: + vfree(tx_ring->buffer_info); + e_err("Unable to allocate memory for the transmit descriptor ring\n"); + return err; +} + +/** + * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: Rx descriptor ring + * + * Returns 0 on success, negative on failure + **/ +int e1000e_setup_rx_resources(struct e1000_ring *rx_ring) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct e1000_buffer *buffer_info; + int i, size, desc_len, err = -ENOMEM; + + size = sizeof(struct e1000_buffer) * rx_ring->count; + rx_ring->buffer_info = vzalloc(size); + if (!rx_ring->buffer_info) + goto err; + + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, + sizeof(struct e1000_ps_page), + GFP_KERNEL); + if (!buffer_info->ps_pages) + goto err_pages; + } + + desc_len = sizeof(union e1000_rx_desc_packet_split); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + err = e1000_alloc_ring_dma(adapter, rx_ring); + if (err) + goto err_pages; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->rx_skb_top = NULL; + + return 0; + +err_pages: + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + kfree(buffer_info->ps_pages); + } +err: + vfree(rx_ring->buffer_info); + e_err("Unable to allocate memory for the receive descriptor ring\n"); + return err; +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @tx_ring: Tx descriptor ring + **/ +static void e1000_clean_tx_ring(struct e1000_ring *tx_ring) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct e1000_buffer *buffer_info; + unsigned long size; + unsigned int i; + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_put_txbuf(tx_ring, buffer_info); + } + + netdev_reset_queue(adapter->netdev); + size = sizeof(struct e1000_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + writel(0, tx_ring->head); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_tdt_wa(tx_ring, 0); + else + writel(0, tx_ring->tail); +} + +/** + * e1000e_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring + * + * Free all transmit software resources + **/ +void e1000e_free_tx_resources(struct e1000_ring *tx_ring) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_tx_ring(tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + tx_ring->desc = NULL; +} + +/** + * e1000e_free_rx_resources - Free Rx Resources + * @rx_ring: Rx descriptor ring + * + * Free all receive software resources + **/ +void e1000e_free_rx_resources(struct e1000_ring *rx_ring) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct pci_dev *pdev = adapter->pdev; + int i; + + e1000_clean_rx_ring(rx_ring); + + for (i = 0; i < rx_ring->count; i++) + kfree(rx_ring->buffer_info[i].ps_pages); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; +} + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. This functionality is controlled + * by the InterruptThrottleRate module parameter. + **/ +static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes) +{ + unsigned int retval = itr_setting; + + if (packets == 0) + return itr_setting; + + switch (itr_setting) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes / packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes / packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes / packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes / packets > 2000) { + retval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + retval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + + return retval; +} + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ + u16 current_itr; + u32 new_itr = adapter->itr; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (adapter->link_speed != SPEED_1000) { + current_itr = 0; + new_itr = 4000; + goto set_itr_now; + } + + if (adapter->flags2 & FLAG2_DISABLE_AIM) { + new_itr = 0; + goto set_itr_now; + } + + adapter->tx_itr = e1000_update_itr(adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = e1000_update_itr(adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + /* counts and packets in update_itr are dependent on these numbers */ + switch (current_itr) { + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + +set_itr_now: + if (new_itr != adapter->itr) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : new_itr; + adapter->itr = new_itr; + adapter->rx_ring->itr_val = new_itr; + if (adapter->msix_entries) + adapter->rx_ring->set_itr = 1; + else + e1000e_write_itr(adapter, new_itr); + } +} + +/** + * e1000e_write_itr - write the ITR value to the appropriate registers + * @adapter: address of board private structure + * @itr: new ITR value to program + * + * e1000e_write_itr determines if the adapter is in MSI-X mode + * and, if so, writes the EITR registers with the ITR value. + * Otherwise, it writes the ITR value into the ITR register. + **/ +void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr) +{ + struct e1000_hw *hw = &adapter->hw; + u32 new_itr = itr ? 1000000000 / (itr * 256) : 0; + + if (adapter->msix_entries) { + int vector; + + for (vector = 0; vector < adapter->num_vectors; vector++) + writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector)); + } else { + ew32(ITR, new_itr); + } +} + +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + **/ +static int e1000_alloc_queues(struct e1000_adapter *adapter) +{ + int size = sizeof(struct e1000_ring); + + adapter->tx_ring = kzalloc(size, GFP_KERNEL); + if (!adapter->tx_ring) + goto err; + adapter->tx_ring->count = adapter->tx_ring_count; + adapter->tx_ring->adapter = adapter; + + adapter->rx_ring = kzalloc(size, GFP_KERNEL); + if (!adapter->rx_ring) + goto err; + adapter->rx_ring->count = adapter->rx_ring_count; + adapter->rx_ring->adapter = adapter; + + return 0; +err: + e_err("Unable to allocate memory for queues\n"); + kfree(adapter->rx_ring); + kfree(adapter->tx_ring); + return -ENOMEM; +} + +/** + * e1000e_poll - NAPI Rx polling callback + * @napi: struct associated with this polling callback + * @weight: number of packets driver is allowed to process this poll + **/ +static int e1000e_poll(struct napi_struct *napi, int weight) +{ + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, + napi); + struct e1000_hw *hw = &adapter->hw; + struct net_device *poll_dev = adapter->netdev; + int tx_cleaned = 1, work_done = 0; + + adapter = netdev_priv(poll_dev); + + if (!adapter->msix_entries || + (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) + tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); + + adapter->clean_rx(adapter->rx_ring, &work_done, weight); + + if (!tx_cleaned) + work_done = weight; + + /* If weight not fully consumed, exit the polling mode */ + if (work_done < weight) { + if (adapter->itr_setting & 3) + e1000_set_itr(adapter); + napi_complete(napi); + if (!test_bit(__E1000_DOWN, &adapter->state)) { + if (adapter->msix_entries) + ew32(IMS, adapter->rx_ring->ims_val); + else + e1000_irq_enable(adapter); + } + } + + return work_done; +} + +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + /* don't update vlan cookie if already programmed */ + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + (vid == adapter->mng_vlan_id)) + return 0; + + /* add VID to filter table */ + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); + vfta |= (1 << (vid & 0x1F)); + hw->mac.ops.write_vfta(hw, index, vfta); + } + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + (vid == adapter->mng_vlan_id)) { + /* release control to f/w */ + e1000e_release_hw_control(adapter); + return 0; + } + + /* remove VID from filter table */ + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + hw->mac.ops.write_vfta(hw, index, vfta); + } + + clear_bit(vid, adapter->active_vlans); + + return 0; +} + +/** + * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + /* disable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN); + ew32(RCTL, rctl); + + if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + } +} + +/** + * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + /* enable VLAN receive filtering */ + rctl = er32(RCTL); + rctl |= E1000_RCTL_VFE; + rctl &= ~E1000_RCTL_CFIEN; + ew32(RCTL, rctl); + } +} + +/** + * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + /* disable VLAN tag insert/strip */ + ctrl = er32(CTRL); + ctrl &= ~E1000_CTRL_VME; + ew32(CTRL, ctrl); +} + +/** + * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + /* enable VLAN tag insert/strip */ + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_VME; + ew32(CTRL, ctrl); +} + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 vid = adapter->hw.mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); + adapter->mng_vlan_id = vid; + } + + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid)) + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid); +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + u16 vid; + + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +static void e1000_init_manageability_pt(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 manc, manc2h, mdef, i, j; + + if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) + return; + + manc = er32(MANC); + + /* enable receiving management packets to the host. this will probably + * generate destination unreachable messages from the host OS, but + * the packets will be handled on SMBUS + */ + manc |= E1000_MANC_EN_MNG2HOST; + manc2h = er32(MANC2H); + + switch (hw->mac.type) { + default: + manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664); + break; + case e1000_82574: + case e1000_82583: + /* Check if IPMI pass-through decision filter already exists; + * if so, enable it. + */ + for (i = 0, j = 0; i < 8; i++) { + mdef = er32(MDEF(i)); + + /* Ignore filters with anything other than IPMI ports */ + if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) + continue; + + /* Enable this decision filter in MANC2H */ + if (mdef) + manc2h |= (1 << i); + + j |= mdef; + } + + if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) + break; + + /* Create new decision filter in an empty filter */ + for (i = 0, j = 0; i < 8; i++) + if (er32(MDEF(i)) == 0) { + ew32(MDEF(i), (E1000_MDEF_PORT_623 | + E1000_MDEF_PORT_664)); + manc2h |= (1 << 1); + j++; + break; + } + + if (!j) + e_warn("Unable to create IPMI pass-through filter\n"); + break; + } + + ew32(MANC2H, manc2h); + ew32(MANC, manc); +} + +/** + * e1000_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + u64 tdba; + u32 tdlen, tctl, tarc; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + tdba = tx_ring->dma; + tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); + ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); + ew32(TDBAH(0), (tdba >> 32)); + ew32(TDLEN(0), tdlen); + ew32(TDH(0), 0); + ew32(TDT(0), 0); + tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0); + tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0); + + /* Set the Tx Interrupt Delay register */ + ew32(TIDV, adapter->tx_int_delay); + /* Tx irq moderation */ + ew32(TADV, adapter->tx_abs_int_delay); + + if (adapter->flags2 & FLAG2_DMA_BURST) { + u32 txdctl = er32(TXDCTL(0)); + + txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | + E1000_TXDCTL_WTHRESH); + /* set up some performance related parameters to encourage the + * hardware to use the bus more efficiently in bursts, depends + * on the tx_int_delay to be enabled, + * wthresh = 1 ==> burst write is disabled to avoid Tx stalls + * hthresh = 1 ==> prefetch when one or more available + * pthresh = 0x1f ==> prefetch if internal cache 31 or less + * BEWARE: this seems to work but should be considered first if + * there are Tx hangs or other Tx related bugs + */ + txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; + ew32(TXDCTL(0), txdctl); + } + /* erratum work around: set txdctl the same for both queues */ + ew32(TXDCTL(1), er32(TXDCTL(0))); + + /* Program the Transmit Control Register */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { + tarc = er32(TARC(0)); + /* set the speed mode bit, we'll clear it if we're not at + * gigabit link later + */ +#define SPEED_MODE_BIT (1 << 21) + tarc |= SPEED_MODE_BIT; + ew32(TARC(0), tarc); + } + + /* errata: program both queues to unweighted RR */ + if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { + tarc = er32(TARC(0)); + tarc |= 1; + ew32(TARC(0), tarc); + tarc = er32(TARC(1)); + tarc |= 1; + ew32(TARC(1), tarc); + } + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + /* enable Report Status bit */ + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + ew32(TCTL, tctl); + + hw->mac.ops.config_collision_dist(hw); + + /* SPT Si errata workaround to avoid data corruption */ + if (hw->mac.type == e1000_pch_spt) { + u32 reg_val; + + reg_val = er32(IOSFPC); + reg_val |= E1000_RCTL_RDMTS_HEX; + ew32(IOSFPC, reg_val); + + reg_val = er32(TARC(0)); + reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ; + ew32(TARC(0), reg_val); + } +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ + (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl, rfctl; + u32 pages = 0; + + /* Workaround Si errata on PCHx - configure jumbo frame flow. + * If jumbo frames not set, program related MAC/PHY registers + * to h/w defaults + */ + if (hw->mac.type >= e1000_pch2lan) { + s32 ret_val; + + if (adapter->netdev->mtu > ETH_DATA_LEN) + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); + else + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); + + if (ret_val) + e_dbg("failed to enable|disable jumbo frame workaround mode\n"); + } + + /* Program MC offset vector base */ + rctl = er32(RCTL); + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* Do not Store bad packets */ + rctl &= ~E1000_RCTL_SBP; + + /* Enable Long Packet receive */ + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Some systems expect that the CRC is included in SMBUS traffic. The + * hardware strips the CRC before sending to both SMBUS (BMC) and to + * host memory when this is enabled + */ + if (adapter->flags2 & FLAG2_CRC_STRIPPING) + rctl |= E1000_RCTL_SECRC; + + /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ + if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { + u16 phy_data; + + e1e_rphy(hw, PHY_REG(770, 26), &phy_data); + phy_data &= 0xfff8; + phy_data |= (1 << 2); + e1e_wphy(hw, PHY_REG(770, 26), phy_data); + + e1e_rphy(hw, 22, &phy_data); + phy_data &= 0x0fff; + phy_data |= (1 << 14); + e1e_wphy(hw, 0x10, 0x2823); + e1e_wphy(hw, 0x11, 0x0003); + e1e_wphy(hw, 22, phy_data); + } + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case 2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case 4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case 8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case 16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + /* Enable Extended Status in all Receive Descriptors */ + rfctl = er32(RFCTL); + rfctl |= E1000_RFCTL_EXTEN; + ew32(RFCTL, rfctl); + + /* 82571 and greater support packet-split where the protocol + * header is placed in skb->data and the packet data is + * placed in pages hanging off of skb_shinfo(skb)->nr_frags. + * In the case of a non-split, skb->data is linearly filled, + * followed by the page buffers. Therefore, skb->data is + * sized to hold the largest protocol header. + * + * allocations using alloc_page take too long for regular MTU + * so only enable packet split for jumbo frames + * + * Using pages when the page size is greater than 16k wastes + * a lot of memory, since we allocate 3 pages at all times + * per packet. + */ + pages = PAGE_USE_COUNT(adapter->netdev->mtu); + if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) + adapter->rx_ps_pages = pages; + else + adapter->rx_ps_pages = 0; + + if (adapter->rx_ps_pages) { + u32 psrctl = 0; + + /* Enable Packet split descriptors */ + rctl |= E1000_RCTL_DTYP_PS; + + psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT; + + switch (adapter->rx_ps_pages) { + case 3: + psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT; + /* fall-through */ + case 2: + psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT; + /* fall-through */ + case 1: + psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT; + break; + } + + ew32(PSRCTL, psrctl); + } + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode + */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + + ew32(RCTL, rctl); + /* just started the receive unit, no need to restart */ + adapter->flags &= ~FLAG_RESTART_NOW; +} + +/** + * e1000_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *rx_ring = adapter->rx_ring; + u64 rdba; + u32 rdlen, rctl, rxcsum, ctrl_ext; + + if (adapter->rx_ps_pages) { + /* this is a 32 byte descriptor */ + rdlen = rx_ring->count * + sizeof(union e1000_rx_desc_packet_split); + adapter->clean_rx = e1000_clean_rx_irq_ps; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; + } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { + rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); + adapter->clean_rx = e1000_clean_jumbo_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; + } else { + rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = er32(RCTL); + if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) + ew32(RCTL, rctl & ~E1000_RCTL_EN); + e1e_flush(); + usleep_range(10000, 20000); + + if (adapter->flags2 & FLAG2_DMA_BURST) { + /* set the writeback threshold (only takes effect if the RDTR + * is set). set GRAN=1 and write back up to 0x4 worth, and + * enable prefetching of 0x20 Rx descriptors + * granularity = 01 + * wthresh = 04, + * hthresh = 04, + * pthresh = 0x20 + */ + ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); + ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); + + /* override the delay timers for enabling bursting, only if + * the value was not set by the user via module options + */ + if (adapter->rx_int_delay == DEFAULT_RDTR) + adapter->rx_int_delay = BURST_RDTR; + if (adapter->rx_abs_int_delay == DEFAULT_RADV) + adapter->rx_abs_int_delay = BURST_RADV; + } + + /* set the Receive Delay Timer Register */ + ew32(RDTR, adapter->rx_int_delay); + + /* irq moderation */ + ew32(RADV, adapter->rx_abs_int_delay); + if ((adapter->itr_setting != 0) && (adapter->itr != 0)) + e1000e_write_itr(adapter, adapter->itr); + + ctrl_ext = er32(CTRL_EXT); + /* Auto-Mask interrupts upon ICR access */ + ctrl_ext |= E1000_CTRL_EXT_IAME; + ew32(IAM, 0xffffffff); + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + rdba = rx_ring->dma; + ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); + ew32(RDBAH(0), (rdba >> 32)); + ew32(RDLEN(0), rdlen); + ew32(RDH(0), 0); + ew32(RDT(0), 0); + rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); + rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0); + + /* Enable Receive Checksum Offload for TCP and UDP */ + rxcsum = er32(RXCSUM); + if (adapter->netdev->features & NETIF_F_RXCSUM) + rxcsum |= E1000_RXCSUM_TUOFL; + else + rxcsum &= ~E1000_RXCSUM_TUOFL; + ew32(RXCSUM, rxcsum); + + /* With jumbo frames, excessive C-state transition latencies result + * in dropped transactions. + */ + if (adapter->netdev->mtu > ETH_DATA_LEN) { + u32 lat = + ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 - + adapter->max_frame_size) * 8 / 1000; + + if (adapter->flags & FLAG_IS_ICH) { + u32 rxdctl = er32(RXDCTL(0)); + + ew32(RXDCTL(0), rxdctl | 0x3); + } + + pm_qos_update_request(&adapter->pm_qos_req, lat); + } else { + pm_qos_update_request(&adapter->pm_qos_req, + PM_QOS_DEFAULT_VALUE); + } + + /* Enable Receives */ + ew32(RCTL, rctl); +} + +/** + * e1000e_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + */ +static int e1000e_write_mc_addr_list(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + int i; + + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + hw->mac.ops.update_mc_addr_list(hw, NULL, 0); + return 0; + } + + mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* update_mc_addr_list expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + hw->mac.ops.update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +} + +/** + * e1000e_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int e1000e_write_uc_addr_list(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + unsigned int rar_entries; + int count = 0; + + rar_entries = hw->mac.ops.rar_get_count(hw); + + /* save a rar entry for our hardware address */ + rar_entries--; + + /* save a rar entry for the LAA workaround */ + if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) + rar_entries--; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > rar_entries) + return -ENOMEM; + + if (!netdev_uc_empty(netdev) && rar_entries) { + struct netdev_hw_addr *ha; + + /* write the addresses in reverse order to avoid write + * combining + */ + netdev_for_each_uc_addr(ha, netdev) { + int rval; + + if (!rar_entries) + break; + rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); + if (rval < 0) + return -ENOMEM; + count++; + } + } + + /* zero out the remaining RAR entries not used above */ + for (; rar_entries > 0; rar_entries--) { + ew32(RAH(rar_entries), 0); + ew32(RAL(rar_entries), 0); + } + e1e_flush(); + + return count; +} + +/** + * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The ndo_set_rx_mode entry point is called whenever the unicast or multicast + * address list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void e1000e_set_rx_mode(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (pm_runtime_suspended(netdev->dev.parent)) + return; + + /* Check for Promiscuous and All Multicast modes */ + rctl = er32(RCTL); + + /* clear the affected bits */ + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + /* Do not hardware filter VLANs in promisc mode */ + e1000e_vlan_filter_disable(adapter); + } else { + int count; + + if (netdev->flags & IFF_ALLMULTI) { + rctl |= E1000_RCTL_MPE; + } else { + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = e1000e_write_mc_addr_list(netdev); + if (count < 0) + rctl |= E1000_RCTL_MPE; + } + e1000e_vlan_filter_enable(adapter); + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + count = e1000e_write_uc_addr_list(netdev); + if (count < 0) + rctl |= E1000_RCTL_UPE; + } + + ew32(RCTL, rctl); + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + e1000e_vlan_strip_enable(adapter); + else + e1000e_vlan_strip_disable(adapter); +} + +static void e1000e_setup_rss_hash(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 mrqc, rxcsum; + u32 rss_key[10]; + int i; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (i = 0; i < 10; i++) + ew32(RSSRK(i), rss_key[i]); + + /* Direct all traffic to queue 0 */ + for (i = 0; i < 32; i++) + ew32(RETA(i), 0); + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. + */ + rxcsum = er32(RXCSUM); + rxcsum |= E1000_RXCSUM_PCSD; + + ew32(RXCSUM, rxcsum); + + mrqc = (E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP | + E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); + + ew32(MRQC, mrqc); +} + +/** + * e1000e_get_base_timinca - get default SYSTIM time increment attributes + * @adapter: board private structure + * @timinca: pointer to returned time increment attributes + * + * Get attributes for incrementing the System Time Register SYSTIML/H at + * the default base frequency, and set the cyclecounter shift value. + **/ +s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) +{ + struct e1000_hw *hw = &adapter->hw; + u32 incvalue, incperiod, shift; + + /* Make sure clock is enabled on I217/I218/I219 before checking + * the frequency + */ + if (((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) && + !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) && + !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { + u32 fextnvm7 = er32(FEXTNVM7); + + if (!(fextnvm7 & (1 << 0))) { + ew32(FEXTNVM7, fextnvm7 | (1 << 0)); + e1e_flush(); + } + } + + switch (hw->mac.type) { + case e1000_pch2lan: + case e1000_pch_lpt: + case e1000_pch_spt: + /* On I217, I218 and I219, the clock frequency is 25MHz + * or 96MHz as indicated by the System Clock Frequency + * Indication + */ + if (((hw->mac.type != e1000_pch_lpt) && + (hw->mac.type != e1000_pch_spt)) || + (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { + /* Stable 96MHz frequency */ + incperiod = INCPERIOD_96MHz; + incvalue = INCVALUE_96MHz; + shift = INCVALUE_SHIFT_96MHz; + adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz; + break; + } + /* fall-through */ + case e1000_82574: + case e1000_82583: + /* Stable 25MHz frequency */ + incperiod = INCPERIOD_25MHz; + incvalue = INCVALUE_25MHz; + shift = INCVALUE_SHIFT_25MHz; + adapter->cc.shift = shift; + break; + default: + return -EINVAL; + } + + *timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) | + ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK)); + + return 0; +} + +/** + * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable + * @adapter: board private structure + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware filters. + * Not all combinations are supported, in particular event type has to be + * specified. Matching the kind of event packet is not supported, with the + * exception of "all V2 events regardless of level 2 or 4". + **/ +static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, + struct hwtstamp_config *config) +{ + struct e1000_hw *hw = &adapter->hw; + u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; + u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + u32 rxmtrl = 0; + u16 rxudp = 0; + bool is_l4 = false; + bool is_l2 = false; + u32 regval; + s32 ret_val; + + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) + return -EINVAL; + + /* flags reserved for future extensions - must be zero */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + break; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + /* Also time stamps V2 L2 Path Delay Request/Response */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2; + rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE; + is_l2 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + /* Also time stamps V2 L2 Path Delay Request/Response. */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2; + rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE; + is_l2 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + /* Hardware cannot filter just V2 L4 Sync messages; + * fall-through to V2 (both L2 and L4) Sync. + */ + case HWTSTAMP_FILTER_PTP_V2_SYNC: + /* Also time stamps V2 Path Delay Request/Response. */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; + rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + /* Hardware cannot filter just V2 L4 Delay Request messages; + * fall-through to V2 (both L2 and L4) Delay Request. + */ + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + /* Also time stamps V2 Path Delay Request/Response. */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; + rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + /* Hardware cannot filter just V2 L4 or L2 Event messages; + * fall-through to all V2 (both L2 and L4) Events. + */ + case HWTSTAMP_FILTER_PTP_V2_EVENT: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + /* For V1, the hardware can only filter Sync messages or + * Delay Request messages but not both so fall-through to + * time stamp all packets. + */ + case HWTSTAMP_FILTER_ALL: + is_l2 = true; + is_l4 = true; + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + adapter->hwtstamp_config = *config; + + /* enable/disable Tx h/w time stamping */ + regval = er32(TSYNCTXCTL); + regval &= ~E1000_TSYNCTXCTL_ENABLED; + regval |= tsync_tx_ctl; + ew32(TSYNCTXCTL, regval); + if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) != + (regval & E1000_TSYNCTXCTL_ENABLED)) { + e_err("Timesync Tx Control register not set as expected\n"); + return -EAGAIN; + } + + /* enable/disable Rx h/w time stamping */ + regval = er32(TSYNCRXCTL); + regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); + regval |= tsync_rx_ctl; + ew32(TSYNCRXCTL, regval); + if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED | + E1000_TSYNCRXCTL_TYPE_MASK)) != + (regval & (E1000_TSYNCRXCTL_ENABLED | + E1000_TSYNCRXCTL_TYPE_MASK))) { + e_err("Timesync Rx Control register not set as expected\n"); + return -EAGAIN; + } + + /* L2: define ethertype filter for time stamped packets */ + if (is_l2) + rxmtrl |= ETH_P_1588; + + /* define which PTP packets get time stamped */ + ew32(RXMTRL, rxmtrl); + + /* Filter by destination port */ + if (is_l4) { + rxudp = PTP_EV_PORT; + cpu_to_be16s(&rxudp); + } + ew32(RXUDP, rxudp); + + e1e_flush(); + + /* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */ + er32(RXSTMPH); + er32(TXSTMPH); + + /* Get and set the System Time Register SYSTIM base frequency */ + ret_val = e1000e_get_base_timinca(adapter, ®val); + if (ret_val) + return ret_val; + ew32(TIMINCA, regval); + + /* reset the ns time counter */ + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + + return 0; +} + +/** + * e1000_configure - configure the hardware for Rx and Tx + * @adapter: private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + struct e1000_ring *rx_ring = adapter->rx_ring; + + e1000e_set_rx_mode(adapter->netdev); + + e1000_restore_vlan(adapter); + e1000_init_manageability_pt(adapter); + + e1000_configure_tx(adapter); + + if (adapter->netdev->features & NETIF_F_RXHASH) + e1000e_setup_rss_hash(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL); +} + +/** + * e1000e_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000e_reset *** + **/ +void e1000e_power_up_phy(struct e1000_adapter *adapter) +{ + if (adapter->hw.phy.ops.power_up) + adapter->hw.phy.ops.power_up(&adapter->hw); + + adapter->hw.mac.ops.setup_link(&adapter->hw); +} + +/** + * e1000_power_down_phy - Power down the PHY + * + * Power down the PHY so no link is implied when interface is down. + * The PHY cannot be powered down if management or WoL is active. + */ +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + if (adapter->hw.phy.ops.power_down) + adapter->hw.phy.ops.power_down(&adapter->hw); +} + +/** + * e1000e_reset - bring the hardware into a known good state + * + * This function boots the hardware and enables some settings that + * require a configuration cycle of the hardware - those cannot be + * set/changed during runtime. After reset the device needs to be + * properly configured for Rx, Tx etc. + */ +void e1000e_reset(struct e1000_adapter *adapter) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + struct e1000_fc_info *fc = &adapter->hw.fc; + struct e1000_hw *hw = &adapter->hw; + u32 tx_space, min_tx_space, min_rx_space; + u32 pba = adapter->pba; + u16 hwm; + + /* reset Packet Buffer Allocation to default */ + ew32(PBA, pba); + + if (adapter->max_frame_size > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) { + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + pba = er32(PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* the Tx fifo also stores 16 bytes of information about the Tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (adapter->max_frame_size + + sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = adapter->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation + */ + if ((tx_space < min_tx_space) && + ((min_tx_space - tx_space) < pba)) { + pba -= min_tx_space - tx_space; + + /* if short on Rx space, Rx wins and must trump Tx + * adjustment + */ + if (pba < min_rx_space) + pba = min_rx_space; + } + + ew32(PBA, pba); + } + + /* flow control settings + * + * The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus one full frame + */ + if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) + fc->pause_time = 0xFFFF; + else + fc->pause_time = E1000_FC_PAUSE_TIME; + fc->send_xon = true; + fc->current_mode = fc->requested_mode; + + switch (hw->mac.type) { + case e1000_ich9lan: + case e1000_ich10lan: + if (adapter->netdev->mtu > ETH_DATA_LEN) { + pba = 14; + ew32(PBA, pba); + fc->high_water = 0x2800; + fc->low_water = fc->high_water - 8; + break; + } + /* fall-through */ + default: + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - adapter->max_frame_size)); + + fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ + fc->low_water = fc->high_water - 8; + break; + case e1000_pchlan: + /* Workaround PCH LOM adapter hangs with certain network + * loads. If hangs persist, try disabling Tx flow control. + */ + if (adapter->netdev->mtu > ETH_DATA_LEN) { + fc->high_water = 0x3500; + fc->low_water = 0x1500; + } else { + fc->high_water = 0x5000; + fc->low_water = 0x3000; + } + fc->refresh_time = 0x1000; + break; + case e1000_pch2lan: + case e1000_pch_lpt: + case e1000_pch_spt: + fc->refresh_time = 0x0400; + + if (adapter->netdev->mtu <= ETH_DATA_LEN) { + fc->high_water = 0x05C20; + fc->low_water = 0x05048; + fc->pause_time = 0x0650; + break; + } + + pba = 14; + ew32(PBA, pba); + fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH; + fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL; + break; + } + + /* Alignment of Tx data is on an arbitrary byte boundary with the + * maximum size per Tx descriptor limited only to the transmit + * allocation of the packet buffer minus 96 bytes with an upper + * limit of 24KB due to receive synchronization limitations. + */ + adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, + 24 << 10); + + /* Disable Adaptive Interrupt Moderation if 2 full packets cannot + * fit in receive buffer. + */ + if (adapter->itr_setting & 0x3) { + if ((adapter->max_frame_size * 2) > (pba << 10)) { + if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { + dev_info(&adapter->pdev->dev, + "Interrupt Throttle Rate off\n"); + adapter->flags2 |= FLAG2_DISABLE_AIM; + e1000e_write_itr(adapter, 0); + } + } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { + dev_info(&adapter->pdev->dev, + "Interrupt Throttle Rate on\n"); + adapter->flags2 &= ~FLAG2_DISABLE_AIM; + adapter->itr = 20000; + e1000e_write_itr(adapter, adapter->itr); + } + } + + /* Allow time for pending master requests to run */ + mac->ops.reset_hw(hw); + + /* For parts with AMT enabled, let the firmware know + * that the network interface is in control + */ + if (adapter->flags & FLAG_HAS_AMT) + e1000e_get_hw_control(adapter); + + ew32(WUC, 0); + + if (mac->ops.init_hw(hw)) + e_err("Hardware Error\n"); + + e1000_update_mng_vlan(adapter); + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + ew32(VET, ETH_P_8021Q); + + e1000e_reset_adaptive(hw); + + /* initialize systim and reset the ns time counter */ + e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); + + /* Set EEE advertisement as appropriate */ + if (adapter->flags2 & FLAG2_HAS_EEE) { + s32 ret_val; + u16 adv_addr; + + switch (hw->phy.type) { + case e1000_phy_82579: + adv_addr = I82579_EEE_ADVERTISEMENT; + break; + case e1000_phy_i217: + adv_addr = I217_EEE_ADVERTISEMENT; + break; + default: + dev_err(&adapter->pdev->dev, + "Invalid PHY type setting EEE advertisement\n"); + return; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + dev_err(&adapter->pdev->dev, + "EEE advertisement - unable to acquire PHY\n"); + return; + } + + e1000_write_emi_reg_locked(hw, adv_addr, + hw->dev_spec.ich8lan.eee_disable ? + 0 : adapter->eee_advert); + + hw->phy.ops.release(hw); + } + + if (!netif_running(adapter->netdev) && + !test_bit(__E1000_TESTING, &adapter->state)) + e1000_power_down_phy(adapter); + + e1000_get_phy_info(hw); + + if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && + !(adapter->flags & FLAG_SMART_POWER_DOWN)) { + u16 phy_data = 0; + /* speed up time to link by disabling smart power down, ignore + * the return value of this function because there is nothing + * different we would do if it failed + */ + e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); + phy_data &= ~IGP02E1000_PM_SPD; + e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); + } +} + +int e1000e_up(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->state); + + if (adapter->msix_entries) + e1000_configure_msix(adapter); + e1000_irq_enable(adapter); + + netif_start_queue(adapter->netdev); + + /* fire a link change interrupt to start the watchdog */ + if (adapter->msix_entries) + ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); + else + ew32(ICS, E1000_ICS_LSC); + + return 0; +} + +static void e1000e_flush_descriptors(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (!(adapter->flags2 & FLAG2_DMA_BURST)) + return; + + /* flush pending descriptor writebacks to memory */ + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); + ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); + + /* execute the writes immediately */ + e1e_flush(); + + /* due to rare timing issues, write to TIDV/RDTR again to ensure the + * write is successful + */ + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); + ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); + + /* execute the writes immediately */ + e1e_flush(); +} + +static void e1000e_update_stats(struct e1000_adapter *adapter); + +/** + * e1000e_down - quiesce the device and optionally reset the hardware + * @adapter: board private structure + * @reset: boolean flag to reset the hardware or not + */ +void e1000e_down(struct e1000_adapter *adapter, bool reset) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 tctl, rctl; + + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer + */ + set_bit(__E1000_DOWN, &adapter->state); + + netif_carrier_off(netdev); + + /* disable receives in the hardware */ + rctl = er32(RCTL); + if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) + ew32(RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + netif_stop_queue(netdev); + + /* disable transmits in the hardware */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + + /* flush both disables and wait for them to finish */ + e1e_flush(); + usleep_range(10000, 20000); + + e1000_irq_disable(adapter); + + napi_synchronize(&adapter->napi); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + spin_lock(&adapter->stats64_lock); + e1000e_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + e1000e_flush_descriptors(adapter); + e1000_clean_tx_ring(adapter->tx_ring); + e1000_clean_rx_ring(adapter->rx_ring); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + /* Disable Si errata workaround on PCHx for jumbo frame flow */ + if ((hw->mac.type >= e1000_pch2lan) && + (adapter->netdev->mtu > ETH_DATA_LEN) && + e1000_lv_jumbo_workaround_ich8lan(hw, false)) + e_dbg("failed to disable jumbo frame workaround mode\n"); + + if (reset && !pci_channel_offline(adapter->pdev)) + e1000e_reset(adapter); +} + +void e1000e_reinit_locked(struct e1000_adapter *adapter) +{ + might_sleep(); + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + e1000e_down(adapter, true); + e1000e_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->state); +} + +/** + * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) + * @cc: cyclecounter structure + **/ +static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) +{ + struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, + cc); + struct e1000_hw *hw = &adapter->hw; + cycle_t systim, systim_next; + + /* latch SYSTIMH on read of SYSTIML */ + systim = (cycle_t)er32(SYSTIML); + systim |= (cycle_t)er32(SYSTIMH) << 32; + + if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { + u64 incvalue, time_delta, rem, temp; + int i; + + /* errata for 82574/82583 possible bad bits read from SYSTIMH/L + * check to see that the time is incrementing at a reasonable + * rate and is a multiple of incvalue + */ + incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; + for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { + /* latch SYSTIMH on read of SYSTIML */ + systim_next = (cycle_t)er32(SYSTIML); + systim_next |= (cycle_t)er32(SYSTIMH) << 32; + + time_delta = systim_next - systim; + temp = time_delta; + rem = do_div(temp, incvalue); + + systim = systim_next; + + if ((time_delta < E1000_82574_SYSTIM_EPSILON) && + (rem == 0)) + break; + } + } + return systim; +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int e1000_sw_init(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; + adapter->rx_ps_bsize0 = 128; + adapter->max_frame_size = netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + adapter->tx_ring_count = E1000_DEFAULT_TXD; + adapter->rx_ring_count = E1000_DEFAULT_RXD; + + spin_lock_init(&adapter->stats64_lock); + + e1000e_set_interrupt_capability(adapter); + + if (e1000_alloc_queues(adapter)) + return -ENOMEM; + + /* Setup hardware time stamping cyclecounter */ + if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { + adapter->cc.read = e1000e_cyclecounter_read; + adapter->cc.mask = CYCLECOUNTER_MASK(64); + adapter->cc.mult = 1; + /* cc.shift set in e1000e_get_base_tininca() */ + + spin_lock_init(&adapter->systim_lock); + INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work); + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + e1000_irq_disable(adapter); + + set_bit(__E1000_DOWN, &adapter->state); + return 0; +} + +/** + * e1000_intr_msi_test - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + e_dbg("icr is %08X\n", icr); + if (icr & E1000_ICR_RXSEQ) { + adapter->flags &= ~FLAG_MSI_TEST_FAILED; + /* Force memory writes to complete before acknowledging the + * interrupt is handled. + */ + wmb(); + } + + return IRQ_HANDLED; +} + +/** + * e1000_test_msi_interrupt - Returns 0 for successful test + * @adapter: board private struct + * + * code flow taken from tg3.c + **/ +static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + int err; + + /* poll_enable hasn't been called yet, so don't need disable */ + /* clear any pending events */ + er32(ICR); + + /* free the real vector and request a test handler */ + e1000_free_irq(adapter); + e1000e_reset_interrupt_capability(adapter); + + /* Assume that the test fails, if it succeeds then the test + * MSI irq handler will unset this flag + */ + adapter->flags |= FLAG_MSI_TEST_FAILED; + + err = pci_enable_msi(adapter->pdev); + if (err) + goto msi_test_failed; + + err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, + netdev->name, netdev); + if (err) { + pci_disable_msi(adapter->pdev); + goto msi_test_failed; + } + + /* Force memory writes to complete before enabling and firing an + * interrupt. + */ + wmb(); + + e1000_irq_enable(adapter); + + /* fire an unusual interrupt on the test handler */ + ew32(ICS, E1000_ICS_RXSEQ); + e1e_flush(); + msleep(100); + + e1000_irq_disable(adapter); + + rmb(); /* read flags after interrupt has been fired */ + + if (adapter->flags & FLAG_MSI_TEST_FAILED) { + adapter->int_mode = E1000E_INT_MODE_LEGACY; + e_info("MSI interrupt test failed, using legacy interrupt.\n"); + } else { + e_dbg("MSI interrupt test succeeded!\n"); + } + + free_irq(adapter->pdev->irq, netdev); + pci_disable_msi(adapter->pdev); + +msi_test_failed: + e1000e_set_interrupt_capability(adapter); + return e1000_request_irq(adapter); +} + +/** + * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored + * @adapter: board private struct + * + * code flow taken from tg3.c, called with e1000 interrupts disabled. + **/ +static int e1000_test_msi(struct e1000_adapter *adapter) +{ + int err; + u16 pci_cmd; + + if (!(adapter->flags & FLAG_MSI_ENABLED)) + return 0; + + /* disable SERR in case the MSI write causes a master abort */ + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); + if (pci_cmd & PCI_COMMAND_SERR) + pci_write_config_word(adapter->pdev, PCI_COMMAND, + pci_cmd & ~PCI_COMMAND_SERR); + + err = e1000_test_msi_interrupt(adapter); + + /* re-enable SERR */ + if (pci_cmd & PCI_COMMAND_SERR) { + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd |= PCI_COMMAND_SERR; + pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); + } + + return err; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int e1000_open(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->state)) + return -EBUSY; + + pm_runtime_get_sync(&pdev->dev); + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = e1000e_setup_tx_resources(adapter->tx_ring); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000e_setup_rx_resources(adapter->rx_ring); + if (err) + goto err_setup_rx; + + /* If AMT is enabled, let the firmware know that the network + * interface is now open and reset the part to a known state. + */ + if (adapter->flags & FLAG_HAS_AMT) { + e1000e_get_hw_control(adapter); + e1000e_reset(adapter); + } + + e1000e_power_up_phy(adapter); + + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) + e1000_update_mng_vlan(adapter); + + /* DMA latency requirement to workaround jumbo issue */ + pm_qos_add_request(&adapter->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + e1000_configure(adapter); + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* Work around PCIe errata with MSI interrupts causing some chipsets to + * ignore e1000e MSI messages, which means we need to test our MSI + * interrupt now + */ + if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { + err = e1000_test_msi(adapter); + if (err) { + e_err("Interrupt allocation failed\n"); + goto err_req_irq; + } + } + + /* From here on the code is the same as e1000e_up() */ + clear_bit(__E1000_DOWN, &adapter->state); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + adapter->tx_hang_recheck = false; + netif_start_queue(netdev); + + hw->mac.get_link_status = true; + pm_runtime_put(&pdev->dev); + + /* fire a link status change interrupt to start the watchdog */ + if (adapter->msix_entries) + ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); + else + ew32(ICS, E1000_ICS_LSC); + + return 0; + +err_req_irq: + e1000e_release_hw_control(adapter); + e1000_power_down_phy(adapter); + e1000e_free_rx_resources(adapter->rx_ring); +err_setup_rx: + e1000e_free_tx_resources(adapter->tx_ring); +err_setup_tx: + e1000e_reset(adapter); + pm_runtime_put_sync(&pdev->dev); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int e1000_close(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->state) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + + pm_runtime_get_sync(&pdev->dev); + + if (!test_bit(__E1000_DOWN, &adapter->state)) { + e1000e_down(adapter, true); + e1000_free_irq(adapter); + + /* Link status message must follow this format */ + pr_info("%s NIC Link is Down\n", adapter->netdev->name); + } + + napi_disable(&adapter->napi); + + e1000e_free_tx_resources(adapter->tx_ring); + e1000e_free_rx_resources(adapter->rx_ring); + + /* kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) + */ + if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); + + /* If AMT is enabled, let the firmware know that the network + * interface is now closed + */ + if ((adapter->flags & FLAG_HAS_AMT) && + !test_bit(__E1000_TESTING, &adapter->state)) + e1000e_release_hw_control(adapter); + + pm_qos_remove_request(&adapter->pm_qos_req); + + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +/** + * e1000_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int e1000_set_mac(struct net_device *netdev, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); + + hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); + + if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { + /* activate the work around */ + e1000e_set_laa_state_82571(&adapter->hw, 1); + + /* Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed (in e1000_watchdog), the actual LAA is in one + * of the RARs and no incoming packets directed to this port + * are dropped. Eventually the LAA will be in RAR[0] and + * RAR[14] + */ + hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, + adapter->hw.mac.rar_entry_count - 1); + } + + return 0; +} + +/** + * e1000e_update_phy_task - work thread to update phy + * @work: pointer to our work struct + * + * this worker thread exists because we must acquire a + * semaphore to read the phy, which we could msleep while + * waiting for it, and we can't msleep in a timer. + **/ +static void e1000e_update_phy_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + update_phy_task); + struct e1000_hw *hw = &adapter->hw; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + e1000_get_phy_info(hw); + + /* Enable EEE on 82579 after link up */ + if (hw->phy.type >= e1000_phy_82579) + e1000_set_eee_pchlan(hw); +} + +/** + * e1000_update_phy_info - timre call-back to update PHY info + * @data: pointer to adapter cast into an unsigned long + * + * Need to wait a few seconds after link up to get diagnostic information from + * the phy + **/ +static void e1000_update_phy_info(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *)data; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + schedule_work(&adapter->update_phy_task); +} + +/** + * e1000e_update_phy_stats - Update the PHY statistics counters + * @adapter: board private structure + * + * Read/clear the upper 16-bit PHY registers and read/accumulate lower + **/ +static void e1000e_update_phy_stats(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 ret_val; + u16 phy_data; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + + /* A page set is expensive so check if already on desired page. + * If not, set to the page with the PHY status registers. + */ + hw->phy.addr = 1; + ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, + &phy_data); + if (ret_val) + goto release; + if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) { + ret_val = hw->phy.ops.set_page(hw, + HV_STATS_PAGE << IGP_PAGE_SHIFT); + if (ret_val) + goto release; + } + + /* Single Collision Count */ + hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); + if (!ret_val) + adapter->stats.scc += phy_data; + + /* Excessive Collision Count */ + hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); + if (!ret_val) + adapter->stats.ecol += phy_data; + + /* Multiple Collision Count */ + hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); + if (!ret_val) + adapter->stats.mcc += phy_data; + + /* Late Collision Count */ + hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); + if (!ret_val) + adapter->stats.latecol += phy_data; + + /* Collision Count - also used for adaptive IFS */ + hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); + if (!ret_val) + hw->mac.collision_delta = phy_data; + + /* Defer Count */ + hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); + if (!ret_val) + adapter->stats.dc += phy_data; + + /* Transmit with no CRS */ + hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); + if (!ret_val) + adapter->stats.tncrs += phy_data; + +release: + hw->phy.ops.release(hw); +} + +/** + * e1000e_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +static void e1000e_update_stats(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + adapter->stats.crcerrs += er32(CRCERRS); + adapter->stats.gprc += er32(GPRC); + adapter->stats.gorc += er32(GORCL); + er32(GORCH); /* Clear gorc */ + adapter->stats.bprc += er32(BPRC); + adapter->stats.mprc += er32(MPRC); + adapter->stats.roc += er32(ROC); + + adapter->stats.mpc += er32(MPC); + + /* Half-duplex statistics */ + if (adapter->link_duplex == HALF_DUPLEX) { + if (adapter->flags2 & FLAG2_HAS_PHY_STATS) { + e1000e_update_phy_stats(adapter); + } else { + adapter->stats.scc += er32(SCC); + adapter->stats.ecol += er32(ECOL); + adapter->stats.mcc += er32(MCC); + adapter->stats.latecol += er32(LATECOL); + adapter->stats.dc += er32(DC); + + hw->mac.collision_delta = er32(COLC); + + if ((hw->mac.type != e1000_82574) && + (hw->mac.type != e1000_82583)) + adapter->stats.tncrs += er32(TNCRS); + } + adapter->stats.colc += hw->mac.collision_delta; + } + + adapter->stats.xonrxc += er32(XONRXC); + adapter->stats.xontxc += er32(XONTXC); + adapter->stats.xoffrxc += er32(XOFFRXC); + adapter->stats.xofftxc += er32(XOFFTXC); + adapter->stats.gptc += er32(GPTC); + adapter->stats.gotc += er32(GOTCL); + er32(GOTCH); /* Clear gotc */ + adapter->stats.rnbc += er32(RNBC); + adapter->stats.ruc += er32(RUC); + + adapter->stats.mptc += er32(MPTC); + adapter->stats.bptc += er32(BPTC); + + /* used for adaptive IFS */ + + hw->mac.tx_packet_delta = er32(TPT); + adapter->stats.tpt += hw->mac.tx_packet_delta; + + adapter->stats.algnerrc += er32(ALGNERRC); + adapter->stats.rxerrc += er32(RXERRC); + adapter->stats.cexterr += er32(CEXTERR); + adapter->stats.tsctc += er32(TSCTC); + adapter->stats.tsctfc += er32(TSCTFC); + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = adapter->stats.mprc; + netdev->stats.collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + netdev->stats.rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; + netdev->stats.rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_frame_errors = adapter->stats.algnerrc; + netdev->stats.rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol; + netdev->stats.tx_aborted_errors = adapter->stats.ecol; + netdev->stats.tx_window_errors = adapter->stats.latecol; + netdev->stats.tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Management Stats */ + adapter->stats.mgptc += er32(MGTPTC); + adapter->stats.mgprc += er32(MGTPRC); + adapter->stats.mgpdc += er32(MGTPDC); + + /* Correctable ECC Errors */ + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { + u32 pbeccsts = er32(PBECCSTS); + + adapter->corr_errors += + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; + adapter->uncorr_errors += + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + } +} + +/** + * e1000_phy_read_status - Update the PHY register status snapshot + * @adapter: board private structure + **/ +static void e1000_phy_read_status(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_phy_regs *phy = &adapter->phy_regs; + + if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) && + (er32(STATUS) & E1000_STATUS_LU) && + (adapter->hw.phy.media_type == e1000_media_type_copper)) { + int ret_val; + + ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); + ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); + ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); + ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa); + ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion); + ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000); + ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000); + ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); + if (ret_val) + e_warn("Error reading PHY register\n"); + } else { + /* Do not read PHY registers if link is not up + * Set values to typical power-on defaults + */ + phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); + phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | + BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE | + BMSR_ERCAP); + phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | + ADVERTISE_ALL | ADVERTISE_CSMA); + phy->lpa = 0; + phy->expansion = EXPANSION_ENABLENPAGE; + phy->ctrl1000 = ADVERTISE_1000FULL; + phy->stat1000 = 0; + phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); + } +} + +static void e1000_print_link_info(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl = er32(CTRL); + + /* Link status message must follow this format for user tools */ + pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + adapter->netdev->name, adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", + (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" : + (ctrl & E1000_CTRL_RFCE) ? "Rx" : + (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"); +} + +static bool e1000e_has_link(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + s32 ret_val = 0; + + /* get_link_status is set on LSC (link status) interrupt or + * Rx sequence error interrupt. get_link_status will stay + * false until the check_for_link establishes link + * for copper adapters ONLY + */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + if (hw->mac.get_link_status) { + ret_val = hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + } else { + link_active = true; + } + break; + case e1000_media_type_fiber: + ret_val = hw->mac.ops.check_for_link(hw); + link_active = !!(er32(STATUS) & E1000_STATUS_LU); + break; + case e1000_media_type_internal_serdes: + ret_val = hw->mac.ops.check_for_link(hw); + link_active = adapter->hw.mac.serdes_has_link; + break; + default: + case e1000_media_type_unknown: + break; + } + + if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && + (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { + /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ + e_info("Gigabit has been disabled, downgrading speed\n"); + } + + return link_active; +} + +static void e1000e_enable_receives(struct e1000_adapter *adapter) +{ + /* make sure the receive unit is started */ + if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && + (adapter->flags & FLAG_RESTART_NOW)) { + struct e1000_hw *hw = &adapter->hw; + u32 rctl = er32(RCTL); + + ew32(RCTL, rctl | E1000_RCTL_EN); + adapter->flags &= ~FLAG_RESTART_NOW; + } +} + +static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* With 82574 controllers, PHY needs to be checked periodically + * for hung state and reset, if two calls return true + */ + if (e1000_check_phy_82574(hw)) + adapter->phy_hang_count++; + else + adapter->phy_hang_count = 0; + + if (adapter->phy_hang_count > 1) { + adapter->phy_hang_count = 0; + e_dbg("PHY appears hung - resetting\n"); + schedule_work(&adapter->reset_task); + } +} + +/** + * e1000_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void e1000_watchdog(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *)data; + + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); + + /* TODO: make this use queue_delayed_work() */ +} + +static void e1000_watchdog_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + watchdog_task); + struct net_device *netdev = adapter->netdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + struct e1000_phy_info *phy = &adapter->hw.phy; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_hw *hw = &adapter->hw; + u32 link, tctl; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + link = e1000e_has_link(adapter); + if ((netif_carrier_ok(netdev)) && link) { + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + e1000e_enable_receives(adapter); + goto link_up; + } + + if ((e1000e_enable_tx_pkt_filtering(hw)) && + (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) + e1000_update_mng_vlan(adapter); + + if (link) { + if (!netif_carrier_ok(netdev)) { + bool txb2b = true; + + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + /* update snapshot of PHY registers on LSC */ + e1000_phy_read_status(adapter); + mac->ops.get_link_up_info(&adapter->hw, + &adapter->link_speed, + &adapter->link_duplex); + e1000_print_link_info(adapter); + + /* check if SmartSpeed worked */ + e1000e_check_downshift(hw); + if (phy->speed_downgraded) + netdev_warn(netdev, + "Link Speed was downgraded by SmartSpeed\n"); + + /* On supported PHYs, check for duplex mismatch only + * if link has autonegotiated at 10/100 half + */ + if ((hw->phy.type == e1000_phy_igp_3 || + hw->phy.type == e1000_phy_bm) && + hw->mac.autoneg && + (adapter->link_speed == SPEED_10 || + adapter->link_speed == SPEED_100) && + (adapter->link_duplex == HALF_DUPLEX)) { + u16 autoneg_exp; + + e1e_rphy(hw, MII_EXPANSION, &autoneg_exp); + + if (!(autoneg_exp & EXPANSION_NWAY)) + e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n"); + } + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + txb2b = false; + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + txb2b = false; + adapter->tx_timeout_factor = 10; + break; + } + + /* workaround: re-program speed mode bit after + * link-up event + */ + if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && + !txb2b) { + u32 tarc0; + + tarc0 = er32(TARC(0)); + tarc0 &= ~SPEED_MODE_BIT; + ew32(TARC(0), tarc0); + } + + /* disable TSO for pcie and 10/100 speeds, to avoid + * some hardware issues + */ + if (!(adapter->flags & FLAG_TSO_FORCE)) { + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + e_info("10/100 speed: disabling TSO\n"); + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + break; + case SPEED_1000: + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + break; + default: + /* oops */ + break; + } + } + + /* enable transmits in the hardware, need to do this + * after setting TARC(0) + */ + tctl = er32(TCTL); + tctl |= E1000_TCTL_EN; + ew32(TCTL, tctl); + + /* Perform any post-link-up configuration before + * reporting link up. + */ + if (phy->ops.cfg_on_link_up) + phy->ops.cfg_on_link_up(hw); + + netif_carrier_on(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + /* Link status message must follow this format */ + pr_info("%s NIC Link is Down\n", adapter->netdev->name); + netif_carrier_off(netdev); + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + /* 8000ES2LAN requires a Rx packet buffer work-around + * on link down event; reset the controller to flush + * the Rx packet buffer. + */ + if (adapter->flags & FLAG_RX_NEEDS_RESTART) + adapter->flags |= FLAG_RESTART_NOW; + else + pm_schedule_suspend(netdev->dev.parent, + LINK_TIMEOUT); + } + } + +link_up: + spin_lock(&adapter->stats64_lock); + e1000e_update_stats(adapter); + + mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + mac->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorc = adapter->stats.gorc - adapter->gorc_old; + adapter->gorc_old = adapter->stats.gorc; + adapter->gotc = adapter->stats.gotc - adapter->gotc_old; + adapter->gotc_old = adapter->stats.gotc; + spin_unlock(&adapter->stats64_lock); + + /* If the link is lost the controller stops DMA, but + * if there is queued Tx work it cannot be done. So + * reset the controller to flush the Tx packet buffers. + */ + if (!netif_carrier_ok(netdev) && + (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) + adapter->flags |= FLAG_RESTART_NOW; + + /* If reset is necessary, do it outside of interrupt context. */ + if (adapter->flags & FLAG_RESTART_NOW) { + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + + e1000e_update_adaptive(&adapter->hw); + + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (adapter->itr_setting == 4) { + /* Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotc + adapter->gorc) / 10000; + u32 dif = (adapter->gotc > adapter->gorc ? + adapter->gotc - adapter->gorc : + adapter->gorc - adapter->gotc) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + e1000e_write_itr(adapter, itr); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->msix_entries) + ew32(ICS, adapter->rx_ring->ims_val); + else + ew32(ICS, E1000_ICS_RXDMT0); + + /* flush pending descriptors to memory before detecting Tx hang */ + e1000e_flush_descriptors(adapter); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = true; + + /* With 82571 controllers, LAA may be overwritten due to controller + * reset from the other port. Set the appropriate LAA in RAR[0] + */ + if (e1000e_get_laa_state_82571(hw)) + hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0); + + if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) + e1000e_check_82574_phy_workaround(adapter); + + /* Clear valid timestamp stuck in RXSTMPL/H due to a Rx error */ + if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { + if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) && + (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) { + er32(RXSTMPH); + adapter->rx_hwtstamp_cleared++; + } else { + adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP; + } + } + + /* Reset the timer */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_NO_FCS 0x00000010 +#define E1000_TX_FLAGS_HWTSTAMP 0x00000020 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + u32 cmd_length = 0; + u16 ipcse = 0, mss; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + int err; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + if (protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + 0, IPPROTO_TCP, 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb_transport_offset(skb) - 1; + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + ipcse = 0; + } + ipcss = skb_network_offset(skb); + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->upper_setup.tcp_fields.tucss = tucss; + context_desc->upper_setup.tcp_fields.tucso = tucso; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; + context_desc->cmd_and_length = cpu_to_le32(cmd_length); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return 1; +} + +static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + u8 css; + u32 cmd_len = E1000_TXD_CMD_DEXT; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + switch (protocol) { + case cpu_to_be16(ETH_P_IP): + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + case cpu_to_be16(ETH_P_IPV6): + /* XXX not handling all IPV6 headers */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + default: + if (unlikely(net_ratelimit())) + e_warn("checksum_partial proto=%x!\n", + be16_to_cpu(protocol)); + break; + } + + css = skb_checksum_start_offset(skb); + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(cmd_len); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return true; +} + +static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, + unsigned int first, unsigned int max_per_txd, + unsigned int nr_frags) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct pci_dev *pdev = adapter->pdev; + struct e1000_buffer *buffer_info; + unsigned int len = skb_headlen(skb); + unsigned int offset = 0, size, count = 0, i; + unsigned int f, bytecount, segs; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + buffer_info->mapped_as_page = false; + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + + len -= size; + offset += size; + count++; + + if (len) { + i++; + if (i == tx_ring->count) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + const struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + len = skb_frag_size(frag); + offset = 0; + + while (len) { + i++; + if (i == tx_ring->count) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, + offset, size, + DMA_TO_DEVICE); + buffer_info->mapped_as_page = true; + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + + len -= size; + offset += size; + count++; + } + } + + segs = skb_shinfo(skb)->gso_segs ? : 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].segs = segs; + tx_ring->buffer_info[i].bytecount = bytecount; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + dev_err(&pdev->dev, "Tx DMA map failed\n"); + buffer_info->dma = 0; + if (count) + count--; + + while (count--) { + if (i == 0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + e1000_put_txbuf(tx_ring, buffer_info); + } + + return 0; +} + +static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + if (tx_flags & E1000_TX_FLAGS_TSO) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_TSE; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if (tx_flags & E1000_TX_FLAGS_IPV4) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } + + if (tx_flags & E1000_TX_FLAGS_CSUM) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (tx_flags & E1000_TX_FLAGS_VLAN) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + txd_lower &= ~(E1000_TXD_CMD_IFCS); + + if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_EXTCMD_TSTAMP; + } + + i = tx_ring->next_to_use; + + do { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = cpu_to_le32(txd_lower | + buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + + i++; + if (i == tx_ring->count) + i = 0; + } while (--count > 0); + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + tx_ring->next_to_use = i; +} + +#define MINIMUM_DHCP_PACKET_SIZE 282 +static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 length, offset; + + if (skb_vlan_tag_present(skb) && + !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && + (adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) + return 0; + + if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) + return 0; + + if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP)) + return 0; + + { + const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14); + struct udphdr *udp; + + if (ip->protocol != IPPROTO_UDP) + return 0; + + udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); + if (ntohs(udp->dest) != 67) + return 0; + + offset = (u8 *)udp + 8 - skb->data; + length = skb->len - offset; + return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length); + } + + return 0; +} + +static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + + netif_stop_queue(adapter->netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (e1000_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(adapter->netdev); + ++adapter->restart_queue; + return 0; +} + +static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) +{ + BUG_ON(size > tx_ring->count); + + if (e1000_desc_unused(tx_ring) >= size) + return 0; + return __e1000_maybe_stop_tx(tx_ring, size); +} + +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *tx_ring = adapter->tx_ring; + unsigned int first; + unsigned int tx_flags = 0; + unsigned int len = skb_headlen(skb); + unsigned int nr_frags; + unsigned int mss; + int count = 0; + int tso; + unsigned int f; + __be16 protocol = vlan_get_protocol(skb); + + if (test_bit(__E1000_DOWN, &adapter->state)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* The minimum packet size with TCTL.PSP set is 17 bytes so + * pad skb in order to meet this minimum size requirement + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + + mss = skb_shinfo(skb)->gso_size; + if (mss) { + u8 hdr_len; + + /* TSO Workaround for 82571/2/3 Controllers -- if skb->data + * points to just header, pull a few bytes of payload from + * frags into skb->data + */ + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + /* we do this workaround for ES2LAN, but it is un-necessary, + * avoiding it could save a lot of cycles + */ + if (skb->data_len && (hdr_len == len)) { + unsigned int pull_size; + + pull_size = min_t(unsigned int, 4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + e_err("__pskb_pull_tail failed.\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + len = skb_headlen(skb); + } + } + + /* reserve a descriptor for the offload context */ + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) + count++; + count++; + + count += DIV_ROUND_UP(len, adapter->tx_fifo_limit); + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) + count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]), + adapter->tx_fifo_limit); + + if (adapter->hw.mac.tx_pkt_filtering) + e1000_transfer_dhcp_info(adapter, skb); + + /* need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time + */ + if (e1000_maybe_stop_tx(tx_ring, count + 2)) + return NETDEV_TX_BUSY; + + if (skb_vlan_tag_present(skb)) { + tx_flags |= E1000_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << + E1000_TX_FLAGS_VLAN_SHIFT); + } + + first = tx_ring->next_to_use; + + tso = e1000_tso(tx_ring, skb, protocol); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (tso) + tx_flags |= E1000_TX_FLAGS_TSO; + else if (e1000_tx_csum(tx_ring, skb, protocol)) + tx_flags |= E1000_TX_FLAGS_CSUM; + + /* Old method was to assume IPv4 packet by default if TSO was enabled. + * 82571 hardware supports TSO capabilities for IPv6 as well... + * no longer assume, we must. + */ + if (protocol == htons(ETH_P_IP)) + tx_flags |= E1000_TX_FLAGS_IPV4; + + if (unlikely(skb->no_fcs)) + tx_flags |= E1000_TX_FLAGS_NO_FCS; + + /* if count is 0 then mapping error has occurred */ + count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, + nr_frags); + if (count) { + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + (adapter->flags & FLAG_HAS_HW_TIMESTAMP) && + !adapter->tx_hwtstamp_skb) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= E1000_TX_FLAGS_HWTSTAMP; + adapter->tx_hwtstamp_skb = skb_get(skb); + adapter->tx_hwtstamp_start = jiffies; + schedule_work(&adapter->tx_hwtstamp_work); + } else { + skb_tx_timestamp(skb); + } + + netdev_sent_queue(netdev, skb->len); + e1000_tx_queue(tx_ring, tx_flags, count); + /* Make sure there is space in the ring for the next send. */ + e1000_maybe_stop_tx(tx_ring, + (MAX_SKB_FRAGS * + DIV_ROUND_UP(PAGE_SIZE, + adapter->tx_fifo_limit) + 2)); + + if (!skb->xmit_more || + netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_tdt_wa(tx_ring, + tx_ring->next_to_use); + else + writel(tx_ring->next_to_use, tx_ring->tail); + + /* we need this if more than one processor can write + * to our tail at a time, it synchronizes IO on + *IA64/Altix systems + */ + mmiowb(); + } + } else { + dev_kfree_skb_any(skb); + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +/** + * e1000_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void e1000_tx_timeout(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter; + adapter = container_of(work, struct e1000_adapter, reset_task); + + /* don't run the task if already down */ + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + if (!(adapter->flags & FLAG_RESTART_NOW)) { + e1000e_dump(adapter); + e_err("Reset adapter unexpectedly\n"); + } + e1000e_reinit_locked(adapter); +} + +/** + * e1000_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer + * + * Returns the address of the device statistics structure. + **/ +struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + memset(stats, 0, sizeof(struct rtnl_link_stats64)); + spin_lock(&adapter->stats64_lock); + e1000e_update_stats(adapter); + /* Fill out the OS statistics structure */ + stats->rx_bytes = adapter->stats.gorc; + stats->rx_packets = adapter->stats.gprc; + stats->tx_bytes = adapter->stats.gotc; + stats->tx_packets = adapter->stats.gptc; + stats->multicast = adapter->stats.mprc; + stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; + stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc; + stats->rx_crc_errors = adapter->stats.crcerrs; + stats->rx_frame_errors = adapter->stats.algnerrc; + stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol; + stats->tx_aborted_errors = adapter->stats.ecol; + stats->tx_window_errors = adapter->stats.latecol; + stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + spin_unlock(&adapter->stats64_lock); + return stats; +} + +/** + * e1000_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int e1000_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; + + /* Jumbo frame support */ + if ((max_frame > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) && + !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { + e_err("Jumbo Frames not supported.\n"); + return -EINVAL; + } + + /* Supported frame sizes */ + if ((new_mtu < (VLAN_ETH_ZLEN + ETH_FCS_LEN)) || + (max_frame > adapter->max_hw_frame_size)) { + e_err("Unsupported MTU setting\n"); + return -EINVAL; + } + + /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */ + if ((adapter->hw.mac.type >= e1000_pch2lan) && + !(adapter->flags2 & FLAG2_CRC_STRIPPING) && + (new_mtu > ETH_DATA_LEN)) { + e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n"); + return -EINVAL; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ + adapter->max_frame_size = max_frame; + e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + pm_runtime_get_sync(netdev->dev.parent); + + if (netif_running(netdev)) + e1000e_down(adapter, true); + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * However with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ + + if (max_frame <= 2048) + adapter->rx_buffer_len = 2048; + else + adapter->rx_buffer_len = 4096; + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if (max_frame <= (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) + adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; + + if (netif_running(netdev)) + e1000e_up(adapter); + else + e1000e_reset(adapter); + + pm_runtime_put_sync(netdev->dev.parent); + + clear_bit(__E1000_RESETTING, &adapter->state); + + return 0; +} + +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + + if (adapter->hw.phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = adapter->hw.phy.addr; + break; + case SIOCGMIIREG: + e1000_phy_read_status(adapter); + + switch (data->reg_num & 0x1F) { + case MII_BMCR: + data->val_out = adapter->phy_regs.bmcr; + break; + case MII_BMSR: + data->val_out = adapter->phy_regs.bmsr; + break; + case MII_PHYSID1: + data->val_out = (adapter->hw.phy.id >> 16); + break; + case MII_PHYSID2: + data->val_out = (adapter->hw.phy.id & 0xFFFF); + break; + case MII_ADVERTISE: + data->val_out = adapter->phy_regs.advertise; + break; + case MII_LPA: + data->val_out = adapter->phy_regs.lpa; + break; + case MII_EXPANSION: + data->val_out = adapter->phy_regs.expansion; + break; + case MII_CTRL1000: + data->val_out = adapter->phy_regs.ctrl1000; + break; + case MII_STAT1000: + data->val_out = adapter->phy_regs.stat1000; + break; + case MII_ESTATUS: + data->val_out = adapter->phy_regs.estatus; + break; + default: + return -EIO; + } + break; + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } + return 0; +} + +/** + * e1000e_hwtstamp_ioctl - control hardware time stamping + * @netdev: network interface device structure + * @ifreq: interface request + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware filters. + * Not all combinations are supported, in particular event type has to be + * specified. Matching the kind of event packet is not supported, with the + * exception of "all V2 events regardless of level 2 or 4". + **/ +static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + int ret_val; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + ret_val = e1000e_config_hwtstamp(adapter, &config); + if (ret_val) + return ret_val; + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + /* With V2 type filters which specify a Sync or Delay Request, + * Path Delay Request/Response messages are also time stamped + * by hardware so notify the caller the requested packets plus + * some others are time stamped. + */ + config.rx_filter = HWTSTAMP_FILTER_SOME; + break; + default: + break; + } + + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; +} + +static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config, + sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0; +} + +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return e1000_mii_ioctl(netdev, ifr, cmd); + case SIOCSHWTSTAMP: + return e1000e_hwtstamp_set(netdev, ifr); + case SIOCGHWTSTAMP: + return e1000e_hwtstamp_get(netdev, ifr); + default: + return -EOPNOTSUPP; + } +} + +static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) +{ + struct e1000_hw *hw = &adapter->hw; + u32 i, mac_reg, wuc; + u16 phy_reg, wuc_enable; + int retval; + + /* copy MAC RARs to PHY RARs */ + e1000_copy_rx_addrs_to_phy_ich8lan(hw); + + retval = hw->phy.ops.acquire(hw); + if (retval) { + e_err("Could not acquire PHY\n"); + return retval; + } + + /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */ + retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable); + if (retval) + goto release; + + /* copy MAC MTA to PHY MTA - only needed for pchlan */ + for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { + mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); + hw->phy.ops.write_reg_page(hw, BM_MTA(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1, + (u16)((mac_reg >> 16) & 0xFFFF)); + } + + /* configure PHY Rx Control register */ + hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg); + mac_reg = er32(RCTL); + if (mac_reg & E1000_RCTL_UPE) + phy_reg |= BM_RCTL_UPE; + if (mac_reg & E1000_RCTL_MPE) + phy_reg |= BM_RCTL_MPE; + phy_reg &= ~(BM_RCTL_MO_MASK); + if (mac_reg & E1000_RCTL_MO_3) + phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) + << BM_RCTL_MO_SHIFT); + if (mac_reg & E1000_RCTL_BAM) + phy_reg |= BM_RCTL_BAM; + if (mac_reg & E1000_RCTL_PMCF) + phy_reg |= BM_RCTL_PMCF; + mac_reg = er32(CTRL); + if (mac_reg & E1000_CTRL_RFCE) + phy_reg |= BM_RCTL_RFCE; + hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); + + wuc = E1000_WUC_PME_EN; + if (wufc & (E1000_WUFC_MAG | E1000_WUFC_LNKC)) + wuc |= E1000_WUC_APME; + + /* enable PHY wakeup in MAC register */ + ew32(WUFC, wufc); + ew32(WUC, (E1000_WUC_PHY_WAKE | E1000_WUC_APMPME | + E1000_WUC_PME_STATUS | wuc)); + + /* configure and enable PHY wakeup in PHY registers */ + hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); + hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc); + + /* activate PHY wakeup */ + wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; + retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable); + if (retval) + e_err("Could not set PHY Host Wakeup bit\n"); +release: + hw->phy.ops.release(hw); + + return retval; +} + +static void e1000e_flush_lpic(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ret_val; + + pm_runtime_get_sync(netdev->dev.parent); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto fl_out; + + pr_info("EEE TX LPI TIMER: %08X\n", + er32(LPIC) >> E1000_LPIC_LPIET_SHIFT); + + hw->phy.ops.release(hw); + +fl_out: + pm_runtime_put_sync(netdev->dev.parent); +} + +static int e1000e_pm_freeze(struct device *dev) +{ + struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->state) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + + /* Quiesce the device without resetting the hardware */ + e1000e_down(adapter, false); + e1000_free_irq(adapter); + } + e1000e_reset_interrupt_capability(adapter); + + /* Allow time for pending master requests to run */ + e1000e_disable_pcie_master(&adapter->hw); + + return 0; +} + +static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + /* Runtime suspend should only enable wakeup for link changes */ + u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; + int retval = 0; + + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + e1000_setup_rctl(adapter); + e1000e_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) { + rctl = er32(RCTL); + rctl |= E1000_RCTL_MPE; + ew32(RCTL, rctl); + } + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_ADVD3WUC; + if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) + ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; + ew32(CTRL, ctrl); + + if (adapter->hw.phy.media_type == e1000_media_type_fiber || + adapter->hw.phy.media_type == + e1000_media_type_internal_serdes) { + /* keep the laser running in D3 */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; + ew32(CTRL_EXT, ctrl_ext); + } + + if (!runtime) + e1000e_power_up_phy(adapter); + + if (adapter->flags & FLAG_IS_ICH) + e1000_suspend_workarounds_ich8lan(&adapter->hw); + + if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { + /* enable wakeup by the PHY */ + retval = e1000_init_phy_wakeup(adapter, wufc); + if (retval) + return retval; + } else { + /* enable wakeup by the MAC */ + ew32(WUFC, wufc); + ew32(WUC, E1000_WUC_PME_EN); + } + } else { + ew32(WUC, 0); + ew32(WUFC, 0); + + e1000_power_down_phy(adapter); + } + + if (adapter->hw.phy.type == e1000_phy_igp_3) { + e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); + } else if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { + if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) + /* ULP does not support wake from unicast, multicast + * or broadcast. + */ + retval = e1000_enable_ulp_lpt_lp(hw, !runtime); + + if (retval) + return retval; + } + + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + e1000e_release_hw_control(adapter); + + pci_clear_master(pdev); + + /* The pci-e switch on some quad port adapters will report a + * correctable error when the MAC transitions from D0 to D3. To + * prevent this we need to mask off the correctable errors on the + * downstream port of the pci-e switch. + * + * We don't have the associated upstream bridge while assigning + * the PCI device into guest. For example, the KVM on power is + * one of the cases. + */ + if (adapter->flags & FLAG_IS_QUAD_PORT) { + struct pci_dev *us_dev = pdev->bus->self; + u16 devctl; + + if (!us_dev) + return 0; + + pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl); + pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, + (devctl & ~PCI_EXP_DEVCTL_CERE)); + + pci_save_state(pdev); + pci_prepare_to_sleep(pdev); + + pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl); + } + + return 0; +} + +/** + * e1000e_disable_aspm - Disable ASPM states + * @pdev: pointer to PCI device struct + * @state: bit-mask of ASPM states to disable + * + * Some devices *must* have certain ASPM states disabled per hardware errata. + **/ +static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) +{ + struct pci_dev *parent = pdev->bus->self; + u16 aspm_dis_mask = 0; + u16 pdev_aspmc, parent_aspmc; + + switch (state) { + case PCIE_LINK_STATE_L0S: + case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1: + aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S; + /* fall-through - can't have L1 without L0s */ + case PCIE_LINK_STATE_L1: + aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1; + break; + default: + return; + } + + pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc); + pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC; + + if (parent) { + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, + &parent_aspmc); + parent_aspmc &= PCI_EXP_LNKCTL_ASPMC; + } + + /* Nothing to do if the ASPM states to be disabled already are */ + if (!(pdev_aspmc & aspm_dis_mask) && + (!parent || !(parent_aspmc & aspm_dis_mask))) + return; + + dev_info(&pdev->dev, "Disabling ASPM %s %s\n", + (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ? + "L0s" : "", + (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ? + "L1" : ""); + +#ifdef CONFIG_PCIEASPM + pci_disable_link_state_locked(pdev, state); + + /* Double-check ASPM control. If not disabled by the above, the + * BIOS is preventing that from happening (or CONFIG_PCIEASPM is + * not enabled); override by writing PCI config space directly. + */ + pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc); + pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC; + + if (!(aspm_dis_mask & pdev_aspmc)) + return; +#endif + + /* Both device and parent should have the same ASPM setting. + * Disable ASPM in downstream component first and then upstream. + */ + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask); + + if (parent) + pcie_capability_clear_word(parent, PCI_EXP_LNKCTL, + aspm_dis_mask); +} + +#ifdef CONFIG_PM +static int __e1000_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 aspm_disable_flag = 0; + + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) + aspm_disable_flag = PCIE_LINK_STATE_L0S; + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) + aspm_disable_flag |= PCIE_LINK_STATE_L1; + if (aspm_disable_flag) + e1000e_disable_aspm(pdev, aspm_disable_flag); + + pci_set_master(pdev); + + if (hw->mac.type >= e1000_pch2lan) + e1000_resume_workarounds_pchlan(&adapter->hw); + + e1000e_power_up_phy(adapter); + + /* report the system wakeup cause from S3/S4 */ + if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { + u16 phy_data; + + e1e_rphy(&adapter->hw, BM_WUS, &phy_data); + if (phy_data) { + e_info("PHY Wakeup cause - %s\n", + phy_data & E1000_WUS_EX ? "Unicast Packet" : + phy_data & E1000_WUS_MC ? "Multicast Packet" : + phy_data & E1000_WUS_BC ? "Broadcast Packet" : + phy_data & E1000_WUS_MAG ? "Magic Packet" : + phy_data & E1000_WUS_LNKC ? + "Link Status Change" : "other"); + } + e1e_wphy(&adapter->hw, BM_WUS, ~0); + } else { + u32 wus = er32(WUS); + + if (wus) { + e_info("MAC Wakeup cause - %s\n", + wus & E1000_WUS_EX ? "Unicast Packet" : + wus & E1000_WUS_MC ? "Multicast Packet" : + wus & E1000_WUS_BC ? "Broadcast Packet" : + wus & E1000_WUS_MAG ? "Magic Packet" : + wus & E1000_WUS_LNKC ? "Link Status Change" : + "other"); + } + ew32(WUS, ~0); + } + + e1000e_reset(adapter); + + e1000_init_manageability_pt(adapter); + + /* If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. + */ + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_get_hw_control(adapter); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int e1000e_pm_thaw(struct device *dev) +{ + struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000e_set_interrupt_capability(adapter); + if (netif_running(netdev)) { + u32 err = e1000_request_irq(adapter); + + if (err) + return err; + + e1000e_up(adapter); + } + + netif_device_attach(netdev); + + return 0; +} + +static int e1000e_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + e1000e_flush_lpic(pdev); + + e1000e_pm_freeze(dev); + + return __e1000_shutdown(pdev, false); +} + +static int e1000e_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int rc; + + rc = __e1000_resume(pdev); + if (rc) + return rc; + + return e1000e_pm_thaw(dev); +} +#endif /* CONFIG_PM_SLEEP */ + +static int e1000e_pm_runtime_idle(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + u16 eee_lp; + + eee_lp = adapter->hw.dev_spec.ich8lan.eee_lp_ability; + + if (!e1000e_has_link(adapter)) { + adapter->hw.dev_spec.ich8lan.eee_lp_ability = eee_lp; + pm_schedule_suspend(dev, 5 * MSEC_PER_SEC); + } + + return -EBUSY; +} + +static int e1000e_pm_runtime_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + int rc; + + rc = __e1000_resume(pdev); + if (rc) + return rc; + + if (netdev->flags & IFF_UP) + rc = e1000e_up(adapter); + + return rc; +} + +static int e1000e_pm_runtime_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (netdev->flags & IFF_UP) { + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->state) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + + /* Down the device without resetting the hardware */ + e1000e_down(adapter, false); + } + + if (__e1000_shutdown(pdev, true)) { + e1000e_pm_runtime_resume(dev); + return -EBUSY; + } + + return 0; +} +#endif /* CONFIG_PM */ + +static void e1000_shutdown(struct pci_dev *pdev) +{ + e1000e_flush_lpic(pdev); + + e1000e_pm_freeze(&pdev->dev); + + __e1000_shutdown(pdev, false); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER + +static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->msix_entries) { + int vector, msix_irq; + + vector = 0; + msix_irq = adapter->msix_entries[vector].vector; + disable_irq(msix_irq); + e1000_intr_msix_rx(msix_irq, netdev); + enable_irq(msix_irq); + + vector++; + msix_irq = adapter->msix_entries[vector].vector; + disable_irq(msix_irq); + e1000_intr_msix_tx(msix_irq, netdev); + enable_irq(msix_irq); + + vector++; + msix_irq = adapter->msix_entries[vector].vector; + disable_irq(msix_irq); + e1000_msix_other(msix_irq, netdev); + enable_irq(msix_irq); + } + + return IRQ_HANDLED; +} + +/** + * e1000_netpoll + * @netdev: network interface device structure + * + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void e1000_netpoll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + switch (adapter->int_mode) { + case E1000E_INT_MODE_MSIX: + e1000_intr_msix(adapter->pdev->irq, netdev); + break; + case E1000E_INT_MODE_MSI: + disable_irq(adapter->pdev->irq); + e1000_intr_msi(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); + break; + default: /* E1000E_INT_MODE_LEGACY */ + disable_irq(adapter->pdev->irq); + e1000_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); + break; + } +} +#endif + +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e1000e_down(adapter, true); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000e_pm_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 aspm_disable_flag = 0; + int err; + pci_ers_result_t result; + + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) + aspm_disable_flag = PCIE_LINK_STATE_L0S; + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) + aspm_disable_flag |= PCIE_LINK_STATE_L1; + if (aspm_disable_flag) + e1000e_disable_aspm(pdev, aspm_disable_flag); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pdev->state_saved = true; + pci_restore_state(pdev); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000e_reset(adapter); + ew32(WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + pci_cleanup_aer_uncorrect_error_status(pdev); + + return result; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000e_pm_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000_init_manageability_pt(adapter); + + if (netif_running(netdev)) { + if (e1000e_up(adapter)) { + dev_err(&pdev->dev, + "can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. + */ + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_get_hw_control(adapter); +} + +static void e1000_print_device_info(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 ret_val; + u8 pba_str[E1000_PBANUM_LENGTH]; + + /* print bus type/speed/width info */ + e_info("(PCI Express:2.5GT/s:%s) %pM\n", + /* bus width */ + ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : + "Width x1"), + /* MAC address */ + netdev->dev_addr); + e_info("Intel(R) PRO/%s Network Connection\n", + (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); + ret_val = e1000_read_pba_string_generic(hw, pba_str, + E1000_PBANUM_LENGTH); + if (ret_val) + strlcpy((char *)pba_str, "Unknown", sizeof(pba_str)); + e_info("MAC: %d, PHY: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, pba_str); +} + +static void e1000_eeprom_checks(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int ret_val; + u16 buf = 0; + + if (hw->mac.type != e1000_82573) + return; + + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); + le16_to_cpus(&buf); + if (!ret_val && (!(buf & (1 << 0)))) { + /* Deep Smart Power Down (DSPD) */ + dev_warn(&adapter->pdev->dev, + "Warning: detected DSPD enabled in EEPROM\n"); + } +} + +static int e1000_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) + adapter->flags |= FLAG_TSO_FORCE; + + if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS | + NETIF_F_RXALL))) + return 0; + + if (changed & NETIF_F_RXFCS) { + if (features & NETIF_F_RXFCS) { + adapter->flags2 &= ~FLAG2_CRC_STRIPPING; + } else { + /* We need to take it back to defaults, which might mean + * stripping is still disabled at the adapter level. + */ + if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING) + adapter->flags2 |= FLAG2_CRC_STRIPPING; + else + adapter->flags2 &= ~FLAG2_CRC_STRIPPING; + } + } + + netdev->features = features; + + if (netif_running(netdev)) + e1000e_reinit_locked(adapter); + else + e1000e_reset(adapter); + + return 0; +} + +static const struct net_device_ops e1000e_netdev_ops = { + .ndo_open = e1000_open, + .ndo_stop = e1000_close, + .ndo_start_xmit = e1000_xmit_frame, + .ndo_get_stats64 = e1000e_get_stats64, + .ndo_set_rx_mode = e1000e_set_rx_mode, + .ndo_set_mac_address = e1000_set_mac, + .ndo_change_mtu = e1000_change_mtu, + .ndo_do_ioctl = e1000_ioctl, + .ndo_tx_timeout = e1000_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + + .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e1000_netpoll, +#endif + .ndo_set_features = e1000_set_features, +}; + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct e1000_adapter *adapter; + struct e1000_hw *hw; + const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; + resource_size_t mmio_start, mmio_len; + resource_size_t flash_start, flash_len; + static int cards_found; + u16 aspm_disable_flag = 0; + int bars, i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + s32 rval = 0; + + if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) + aspm_disable_flag = PCIE_LINK_STATE_L0S; + if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) + aspm_disable_flag |= PCIE_LINK_STATE_L1; + if (aspm_disable_flag) + e1000e_disable_aspm(pdev, aspm_disable_flag); + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + } + + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_request_selected_regions_exclusive(pdev, bars, + e1000e_driver_name); + if (err) + goto err_pci_reg; + + /* AER (Advanced Error Reporting) hooks */ + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + /* PCI config space info */ + err = pci_save_state(pdev); + if (err) + goto err_alloc_etherdev; + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct e1000_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + netdev->irq = pdev->irq; + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + hw = &adapter->hw; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->ei = ei; + adapter->pba = ei->pba; + adapter->flags = ei->flags; + adapter->flags2 = ei->flags2; + adapter->hw.adapter = adapter; + adapter->hw.mac.type = ei->mac; + adapter->max_hw_frame_size = ei->max_hw_frame_size; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + mmio_start = pci_resource_start(pdev, 0); + mmio_len = pci_resource_len(pdev, 0); + + err = -EIO; + adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); + if (!adapter->hw.hw_addr) + goto err_ioremap; + + if ((adapter->flags & FLAG_HAS_FLASH) && + (pci_resource_flags(pdev, 1) & IORESOURCE_MEM) && + (hw->mac.type < e1000_pch_spt)) { + flash_start = pci_resource_start(pdev, 1); + flash_len = pci_resource_len(pdev, 1); + adapter->hw.flash_address = ioremap(flash_start, flash_len); + if (!adapter->hw.flash_address) + goto err_flashmap; + } + + /* Set default EEE advertisement */ + if (adapter->flags2 & FLAG2_HAS_EEE) + adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; + + /* construct the net_device struct */ + netdev->netdev_ops = &e1000e_netdev_ops; + e1000e_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64); + strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); + + netdev->mem_start = mmio_start; + netdev->mem_end = mmio_start + mmio_len; + + adapter->bd_number = cards_found++; + + e1000e_check_options(adapter); + + /* setup adapter struct */ + err = e1000_sw_init(adapter); + if (err) + goto err_sw_init; + + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + + err = ei->get_variants(adapter); + if (err) + goto err_hw_init; + + if ((adapter->flags & FLAG_IS_ICH) && + (adapter->flags & FLAG_READ_ONLY_NVM) && + (hw->mac.type < e1000_pch_spt)) + e1000e_write_protect_nvm_ich8lan(&adapter->hw); + + hw->mac.ops.get_bus_info(&adapter->hw); + + adapter->hw.phy.autoneg_wait_to_complete = 0; + + /* Copper options */ + if (adapter->hw.phy.media_type == e1000_media_type_copper) { + adapter->hw.phy.mdix = AUTO_ALL_MODES; + adapter->hw.phy.disable_polarity_correction = 0; + adapter->hw.phy.ms_type = e1000_ms_hw_default; + } + + if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) + dev_info(&pdev->dev, + "PHY reset is blocked due to SOL/IDER session.\n"); + + /* Set initial default active device features */ + netdev->features = (NETIF_F_SG | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM); + + /* Set user-changeable features (subset of all device features) */ + netdev->hw_features = netdev->features; + netdev->hw_features |= NETIF_F_RXFCS; + netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->hw_features |= NETIF_F_RXALL; + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + netdev->vlan_features |= (NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_HW_CSUM); + + netdev->priv_flags |= IFF_UNICAST_FLT; + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + if (e1000e_enable_mng_pass_thru(&adapter->hw)) + adapter->flags |= FLAG_MNG_PT_ENABLED; + + /* before reading the NVM, reset the controller to + * put the device in a known good starting state + */ + adapter->hw.mac.ops.reset_hw(&adapter->hw); + + /* systems with ASPM and others may see the checksum fail on the first + * attempt. Let's give it a few tries + */ + for (i = 0;; i++) { + if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) + break; + if (i == 2) { + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + + e1000_eeprom_checks(adapter); + + /* copy the MAC address */ + if (e1000e_read_mac_addr(&adapter->hw)) + dev_err(&pdev->dev, + "NVM Read Error while reading MAC address\n"); + + memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", + netdev->dev_addr); + err = -EIO; + goto err_eeprom; + } + + init_timer(&adapter->watchdog_timer); + adapter->watchdog_timer.function = e1000_watchdog; + adapter->watchdog_timer.data = (unsigned long)adapter; + + init_timer(&adapter->phy_info_timer); + adapter->phy_info_timer.function = e1000_update_phy_info; + adapter->phy_info_timer.data = (unsigned long)adapter; + + INIT_WORK(&adapter->reset_task, e1000_reset_task); + INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); + INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); + INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); + INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); + + /* Initialize link parameters. User can change them with ethtool */ + adapter->hw.mac.autoneg = 1; + adapter->fc_autoneg = true; + adapter->hw.fc.requested_mode = e1000_fc_default; + adapter->hw.fc.current_mode = e1000_fc_default; + adapter->hw.phy.autoneg_advertised = 0x2f; + + /* Initial Wake on LAN setting - If APM wake is enabled in + * the EEPROM, enable the ACPI Magic Packet filter + */ + if (adapter->flags & FLAG_APME_IN_WUC) { + /* APME bit in EEPROM is mapped to WUC.APME */ + eeprom_data = er32(WUC); + eeprom_apme_mask = E1000_WUC_APME; + if ((hw->mac.type > e1000_ich10lan) && + (eeprom_data & E1000_WUC_PHY_WAKE)) + adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; + } else if (adapter->flags & FLAG_APME_IN_CTRL3) { + if (adapter->flags & FLAG_APME_CHECK_PORT_B && + (adapter->hw.bus.func == 1)) + rval = e1000_read_nvm(&adapter->hw, + NVM_INIT_CONTROL3_PORT_B, + 1, &eeprom_data); + else + rval = e1000_read_nvm(&adapter->hw, + NVM_INIT_CONTROL3_PORT_A, + 1, &eeprom_data); + } + + /* fetch WoL from EEPROM */ + if (rval) + e_dbg("NVM read error getting WoL initial values: %d\n", rval); + else if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port + */ + if (!(adapter->flags & FLAG_HAS_WOL)) + adapter->eeprom_wol = 0; + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) || + (hw->mac.ops.check_mng_mode(hw))) + device_wakeup_enable(&pdev->dev); + + /* save off EEPROM version number */ + rval = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); + + if (rval) { + e_dbg("NVM read error getting EEPROM version: %d\n", rval); + adapter->eeprom_vers = 0; + } + + /* reset the hardware with the new settings */ + e1000e_reset(adapter); + + /* If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. + */ + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_get_hw_control(adapter); + + strlcpy(netdev->name, "eth%d", sizeof(netdev->name)); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + /* init PTP hardware clock */ + e1000e_ptp_init(adapter); + + e1000_print_device_info(adapter); + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_noidle(&pdev->dev); + + return 0; + +err_register: + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_release_hw_control(adapter); +err_eeprom: + if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw)) + e1000_phy_hw_reset(&adapter->hw); +err_hw_init: + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_sw_init: + if ((adapter->hw.flash_address) && (hw->mac.type < e1000_pch_spt)) + iounmap(adapter->hw.flash_address); + e1000e_reset_interrupt_capability(adapter); +err_flashmap: + iounmap(adapter->hw.hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void e1000_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + bool down = test_bit(__E1000_DOWN, &adapter->state); + + e1000e_ptp_remove(adapter); + + /* The timers may be rescheduled, so explicitly disable them + * from being rescheduled. + */ + if (!down) + set_bit(__E1000_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + cancel_work_sync(&adapter->downshift_task); + cancel_work_sync(&adapter->update_phy_task); + cancel_work_sync(&adapter->print_hang_task); + + if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { + cancel_work_sync(&adapter->tx_hwtstamp_work); + if (adapter->tx_hwtstamp_skb) { + dev_kfree_skb_any(adapter->tx_hwtstamp_skb); + adapter->tx_hwtstamp_skb = NULL; + } + } + + /* Don't lie to e1000_close() down the road. */ + if (!down) + clear_bit(__E1000_DOWN, &adapter->state); + unregister_netdev(netdev); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + e1000e_release_hw_control(adapter); + + e1000e_reset_interrupt_capability(adapter); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + iounmap(adapter->hw.hw_addr); + if ((adapter->hw.flash_address) && + (adapter->hw.mac.type < e1000_pch_spt)) + iounmap(adapter->hw.flash_address); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + free_netdev(netdev); + + /* AER disable */ + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +/* PCI Error Recovery (ERS) */ +static const struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; + +static const struct pci_device_id e1000_pci_tbl[] = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), + board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), + board_80003es2lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt }, + + { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +static const struct dev_pm_ops e1000_pm_ops = { +#ifdef CONFIG_PM_SLEEP + .suspend = e1000e_pm_suspend, + .resume = e1000e_pm_resume, + .freeze = e1000e_pm_freeze, + .thaw = e1000e_pm_thaw, + .poweroff = e1000e_pm_suspend, + .restore = e1000e_pm_resume, +#endif + SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume, + e1000e_pm_runtime_idle) +}; + +/* PCI Device API Driver */ +static struct pci_driver e1000_driver = { + .name = e1000e_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = e1000_remove, + .driver = { + .pm = &e1000_pm_ops, + }, + .shutdown = e1000_shutdown, + .err_handler = &e1000_err_handler +}; + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init e1000_init_module(void) +{ + int ret; + + pr_info("Intel(R) PRO/1000 Network Driver - %s\n", + e1000e_driver_version); + pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n"); + ret = pci_register_driver(&e1000_driver); + + return ret; +} +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} +module_exit(e1000_exit_module); + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/* netdev.c */ diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c new file mode 100644 index 000000000..fa6b1036a --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/nvm.c @@ -0,0 +1,633 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "e1000.h" + +/** + * e1000_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + e1e_flush(); + udelay(hw->nvm.delay_usec); +} + +/** + * e1000_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + e1e_flush(); + udelay(hw->nvm.delay_usec); +} + +/** + * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u32 mask; + + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + e1e_flush(); + + udelay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + eecd = er32(EECD); + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + e1000_raise_eec_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = er32(EERD); + else + reg = er32(EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) + return 0; + + udelay(5); + } + + return -E1000_ERR_NVM; +} + +/** + * e1000e_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 e1000e_acquire_nvm(struct e1000_hw *hw) +{ + u32 eecd = er32(EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + + ew32(EECD, eecd | E1000_EECD_REQ); + eecd = er32(EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + udelay(5); + eecd = er32(EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + e_dbg("Could not acquire NVM grant\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void e1000_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + e1e_flush(); + udelay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + e1e_flush(); + udelay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +static void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = er32(EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + e1000_lower_eec_clk(hw, &eecd); + } +} + +/** + * e1000e_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void e1000e_release_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + e1000_stop_nvm(hw); + + eecd = er32(EECD); + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); +} + +/** + * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u8 spi_stat_reg; + + if (nvm->type == e1000_nvm_eeprom_spi) { + u16 timeout = NVM_MAX_RETRY_SPI; + + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + e1e_flush(); + udelay(1); + + /* Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + udelay(5); + e1000_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + e_dbg("SPI NVM Status error\n"); + return -E1000_ERR_NVM; + } + } + + return 0; +} + +/** + * e1000e_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eerd = ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + ew32(EERD, eerd); + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) { + e_dbg("NVM read error: %d\n", ret_val); + break; + } + + data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA); + } + + return ret_val; +} + +/** + * e1000e_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000e_update_nvm_checksum is not called after this function , the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val = -E1000_ERR_NVM; + u16 widx = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } + + e1000_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + e1000_standby_nvm(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + e1000_standby_nvm(hw); + break; + } + } + usleep_range(10000, 20000); + nvm->ops.release(hw); + } + + return ret_val; +} + +/** + * e1000_read_pba_string_generic - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + **/ +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 offset; + u16 length; + + if (pba_num == NULL) { + e_dbg("PBA string buffer was null\n"); + return -E1000_ERR_INVALID_ARGUMENT; + } + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + /* if nvm_data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + e_dbg("NVM PBA number is not stored as string\n"); + + /* make sure callers buffer is big enough to store the PBA */ + if (pba_num_size < E1000_PBANUM_LENGTH) { + e_dbg("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (nvm_data >> 12) & 0xF; + pba_num[1] = (nvm_data >> 8) & 0xF; + pba_num[2] = (nvm_data >> 4) & 0xF; + pba_num[3] = nvm_data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + return 0; + } + + ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + e_dbg("NVM PBA number section invalid length\n"); + return -E1000_ERR_NVM_PBA_SECTION; + } + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + e_dbg("PBA string buffer too small\n"); + return -E1000_ERR_NO_SPACE; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + pba_num[offset * 2] = (u8)(nvm_data >> 8); + pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + pba_num[offset * 2] = '\0'; + + return 0; +} + +/** + * e1000_read_mac_addr_generic - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = er32(RAH(0)); + rar_low = er32(RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8)); + + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + checksum += nvm_data; + } + + if (checksum != (u16)NVM_SUM) { + e_dbg("NVM Checksum Invalid\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000e_update_nvm_checksum_generic - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error while updating checksum.\n"); + return ret_val; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + e_dbg("NVM Write Error while updating checksum.\n"); + + return ret_val; +} + +/** + * e1000e_reload_nvm_generic - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +void e1000e_reload_nvm_generic(struct e1000_hw *hw) +{ + u32 ctrl_ext; + + usleep_range(10, 20); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); +} diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h new file mode 100644 index 000000000..342bf69ef --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/nvm.h @@ -0,0 +1,40 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000E_NVM_H_ +#define _E1000E_NVM_H_ + +s32 e1000e_acquire_nvm(struct e1000_hw *hw); + +s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data); +s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); +s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw); +void e1000e_release_nvm(struct e1000_hw *hw); + +#define E1000_STM_OPCODE 0xDB00 + +#endif diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c new file mode 100644 index 000000000..aa1923f7e --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/param.c @@ -0,0 +1,533 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include +#include +#include + +#include "e1000.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define COPYBREAK_DEFAULT 256 +unsigned int copybreak = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#define E1000_PARAM(X, desc) \ + static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* Transmit Interrupt Delay in units of 1.024 microseconds + * Tx interrupt delay needs to typically be set to something non-zero + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); +#define DEFAULT_TIDV 8 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); +#define DEFAULT_TADV 32 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +/* Receive Interrupt Delay in units of 1.024 microseconds + * hardware will likely hang if you set this to anything but zero. + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +/* Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 or one of: 0=off, 1=dynamic, 3=dynamic conservative + */ +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); +#define DEFAULT_ITR 3 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +/* IntMode (Interrupt Mode) + * + * Valid Range: varies depending on kernel configuration & hardware support + * + * legacy=0, MSI=1, MSI-X=2 + * + * When MSI/MSI-X support is enabled in kernel- + * Default Value: 2 (MSI-X) when supported by hardware, 1 (MSI) otherwise + * When MSI/MSI-X support is not enabled in kernel- + * Default Value: 0 (legacy) + * + * When a mode is specified that is not allowed/supported, it will be + * demoted to the most advanced interrupt mode available. + */ +E1000_PARAM(IntMode, "Interrupt Mode"); +#define MAX_INTMODE 2 +#define MIN_INTMODE 0 + +/* Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +/* Enable Kumeran Lock Loss workaround + * + * Valid Range: 0, 1 + * + * Default Value: 1 (enabled) + */ +E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); + +/* Write Protect NVM + * + * Valid Range: 0, 1 + * + * Default Value: 1 (enabled) + */ +E1000_PARAM(WriteProtectNVM, + "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); + +/* Enable CRC Stripping + * + * Valid Range: 0, 1 + * + * Default Value: 1 (enabled) + */ +E1000_PARAM(CrcStripping, + "Enable CRC Stripping, disable if your BMC needs the CRC"); + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + /* range_option info */ + struct { + int min; + int max; + } r; + /* list_option info */ + struct { + int nr; + struct e1000_opt_list { + int i; + char *str; + } *p; + } l; + } arg; +}; + +static int e1000_validate_option(unsigned int *value, + const struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + dev_info(&adapter->pdev->dev, "%s Enabled\n", + opt->name); + return 0; + case OPTION_DISABLED: + dev_info(&adapter->pdev->dev, "%s Disabled\n", + opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + dev_info(&adapter->pdev->dev, "%s set to %i\n", + opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + dev_info(&adapter->pdev->dev, "%s\n", + ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + dev_info(&adapter->pdev->dev, "Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +/** + * e1000e_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void e1000e_check_options(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int bd = adapter->bd_number; + + if (bd >= E1000_MAX_NIC) { + dev_notice(&adapter->pdev->dev, + "Warning: no configuration for board #%i\n", bd); + dev_notice(&adapter->pdev->dev, + "Using defaults for all values\n"); + } + + /* Transmit Interrupt Delay */ + { + static const struct e1000_option opt = { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY } } + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); + } else { + adapter->tx_int_delay = opt.def; + } + } + /* Transmit Absolute Interrupt Delay */ + { + static const struct e1000_option opt = { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY } } + }; + + if (num_TxAbsIntDelay > bd) { + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); + } else { + adapter->tx_abs_int_delay = opt.def; + } + } + /* Receive Interrupt Delay */ + { + static struct e1000_option opt = { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY } } + }; + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); + } else { + adapter->rx_int_delay = opt.def; + } + } + /* Receive Absolute Interrupt Delay */ + { + static const struct e1000_option opt = { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY } } + }; + + if (num_RxAbsIntDelay > bd) { + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); + } else { + adapter->rx_abs_int_delay = opt.def; + } + } + /* Interrupt Throttling Rate */ + { + static const struct e1000_option opt = { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " + __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR } } + }; + + if (num_InterruptThrottleRate > bd) { + adapter->itr = InterruptThrottleRate[bd]; + + /* Make sure a message is printed for non-special + * values. And in case of an invalid option, display + * warning, use default and go through itr/itr_setting + * adjustment logic below + */ + if ((adapter->itr > 4) && + e1000_validate_option(&adapter->itr, &opt, adapter)) + adapter->itr = opt.def; + } else { + /* If no option specified, use default value and go + * through the logic below to adjust itr/itr_setting + */ + adapter->itr = opt.def; + + /* Make sure a message is printed for non-special + * default values + */ + if (adapter->itr > 4) + dev_info(&adapter->pdev->dev, + "%s set to default %d\n", opt.name, + adapter->itr); + } + + adapter->itr_setting = adapter->itr; + switch (adapter->itr) { + case 0: + dev_info(&adapter->pdev->dev, "%s turned off\n", + opt.name); + break; + case 1: + dev_info(&adapter->pdev->dev, + "%s set to dynamic mode\n", opt.name); + adapter->itr = 20000; + break; + case 2: + dev_info(&adapter->pdev->dev, + "%s Invalid mode - setting default\n", + opt.name); + adapter->itr_setting = opt.def; + /* fall-through */ + case 3: + dev_info(&adapter->pdev->dev, + "%s set to dynamic conservative mode\n", + opt.name); + adapter->itr = 20000; + break; + case 4: + dev_info(&adapter->pdev->dev, + "%s set to simplified (2000-8000 ints) mode\n", + opt.name); + break; + default: + /* Save the setting, because the dynamic bits + * change itr. + * + * Clear the lower two bits because + * they are used as control. + */ + adapter->itr_setting &= ~3; + break; + } + } + /* Interrupt Mode */ + { + static struct e1000_option opt = { + .type = range_option, + .name = "Interrupt Mode", +#ifndef CONFIG_PCI_MSI + .err = "defaulting to 0 (legacy)", + .def = E1000E_INT_MODE_LEGACY, + .arg = { .r = { .min = 0, + .max = 0 } } +#endif + }; + +#ifdef CONFIG_PCI_MSI + if (adapter->flags & FLAG_HAS_MSIX) { + opt.err = kstrdup("defaulting to 2 (MSI-X)", + GFP_KERNEL); + opt.def = E1000E_INT_MODE_MSIX; + opt.arg.r.max = E1000E_INT_MODE_MSIX; + } else { + opt.err = kstrdup("defaulting to 1 (MSI)", GFP_KERNEL); + opt.def = E1000E_INT_MODE_MSI; + opt.arg.r.max = E1000E_INT_MODE_MSI; + } + + if (!opt.err) { + dev_err(&adapter->pdev->dev, + "Failed to allocate memory\n"); + return; + } +#endif + + if (num_IntMode > bd) { + unsigned int int_mode = IntMode[bd]; + + e1000_validate_option(&int_mode, &opt, adapter); + adapter->int_mode = int_mode; + } else { + adapter->int_mode = opt.def; + } + +#ifdef CONFIG_PCI_MSI + kfree(opt.err); +#endif + } + /* Smart Power Down */ + { + static const struct e1000_option opt = { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + + if (num_SmartPowerDownEnable > bd) { + unsigned int spd = SmartPowerDownEnable[bd]; + + e1000_validate_option(&spd, &opt, adapter); + if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd) + adapter->flags |= FLAG_SMART_POWER_DOWN; + } + } + /* CRC Stripping */ + { + static const struct e1000_option opt = { + .type = enable_option, + .name = "CRC Stripping", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_CrcStripping > bd) { + unsigned int crc_stripping = CrcStripping[bd]; + + e1000_validate_option(&crc_stripping, &opt, adapter); + if (crc_stripping == OPTION_ENABLED) { + adapter->flags2 |= FLAG2_CRC_STRIPPING; + adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING; + } + } else { + adapter->flags2 |= FLAG2_CRC_STRIPPING; + adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING; + } + } + /* Kumeran Lock Loss Workaround */ + { + static const struct e1000_option opt = { + .type = enable_option, + .name = "Kumeran Lock Loss Workaround", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + bool enabled = opt.def; + + if (num_KumeranLockLoss > bd) { + unsigned int kmrn_lock_loss = KumeranLockLoss[bd]; + + e1000_validate_option(&kmrn_lock_loss, &opt, adapter); + enabled = kmrn_lock_loss; + } + + if (hw->mac.type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, + enabled); + } + /* Write-protect NVM */ + { + static const struct e1000_option opt = { + .type = enable_option, + .name = "Write-protect NVM", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (adapter->flags & FLAG_IS_ICH) { + if (num_WriteProtectNVM > bd) { + unsigned int write_protect_nvm = + WriteProtectNVM[bd]; + e1000_validate_option(&write_protect_nvm, &opt, + adapter); + if (write_protect_nvm) + adapter->flags |= FLAG_READ_ONLY_NVM; + } else { + if (opt.def) + adapter->flags |= FLAG_READ_ONLY_NVM; + } + } + } +} diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c new file mode 100644 index 000000000..b2005e13f --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -0,0 +1,3237 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "e1000.h" + +static s32 e1000_wait_autoneg(struct e1000_hw *hw); +static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, + u16 *data, bool read, bool page_set); +static u32 e1000_get_phy_addr_for_hv_page(u32 page); +static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, + u16 *data, bool read); + +/* Cable length tables */ +static const u16 e1000_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED +}; + +#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ + ARRAY_SIZE(e1000_m88_cable_length_table) + +static const u16 e1000_igp_2_cable_length_table[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, + 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, + 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, + 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, + 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, + 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, + 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, + 124 +}; + +#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ + ARRAY_SIZE(e1000_igp_2_cable_length_table) + +/** + * e1000e_check_reset_block_generic - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +s32 e1000e_check_reset_block_generic(struct e1000_hw *hw) +{ + u32 manc; + + manc = er32(MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0; +} + +/** + * e1000e_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 e1000e_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + u16 retry_count = 0; + + if (!phy->ops.read_reg) + return 0; + + while (retry_count < 2) { + ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id); + if (ret_val) + return ret_val; + + phy->id = (u32)(phy_id << 16); + usleep_range(20, 40); + ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); + if (ret_val) + return ret_val; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + + if (phy->id != 0 && phy->id != PHY_REVISION_MASK) + return 0; + + retry_count++; + } + + return 0; +} + +/** + * e1000e_phy_reset_dsp - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +s32 e1000e_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + return ret_val; + + return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0); +} + +/** + * e1000e_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { + e_dbg("MDI Read offset error - requested %d, returned %d\n", + offset, + (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + return -E1000_ERR_PHY; + } + *data = (u16)mdic; + + /* Allow some time after each MDIC transaction to avoid + * reading duplicate data in the next MDIC transaction. + */ + if (hw->mac.type == e1000_pch2lan) + udelay(100); + + return 0; +} + +/** + * e1000e_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { + e_dbg("MDI Write offset error - requested %d, returned %d\n", + offset, + (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + return -E1000_ERR_PHY; + } + + /* Allow some time after each MDIC transaction to avoid + * reading duplicate data in the next MDIC transaction. + */ + if (hw->mac.type == e1000_pch2lan) + udelay(100); + + return 0; +} + +/** + * e1000e_read_phy_reg_m88 - Read m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000e_write_phy_reg_m88 - Write m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_set_page_igp - Set page as on IGP-like PHY(s) + * @hw: pointer to the HW structure + * @page: page to set (shifted left when necessary) + * + * Sets PHY page required for PHY register access. Assumes semaphore is + * already acquired. Note, this function sets phy.addr to 1 so the caller + * must set it appropriately (if necessary) after this function returns. + **/ +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page) +{ + e_dbg("Setting page 0x%x\n", page); + + hw->phy.addr = 1; + + return e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page); +} + +/** + * __e1000e_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and stores the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + s32 ret_val = 0; + + if (!locked) { + if (!hw->phy.ops.acquire) + return 0; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) + ret_val = e1000e_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (!ret_val) + ret_val = e1000e_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000e_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores the + * retrieved information in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000e_read_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000e_read_phy_reg_igp_locked - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000e_read_phy_reg_igp(hw, offset, data, true); +} + +/** + * e1000e_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + s32 ret_val = 0; + + if (!locked) { + if (!hw->phy.ops.acquire) + return 0; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) + ret_val = e1000e_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (!ret_val) + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & + offset, data); + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000e_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000e_write_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000e_write_phy_reg_igp_locked - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. + * Assumes semaphore already acquired. + **/ +s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000e_write_phy_reg_igp(hw, offset, data, true); +} + +/** + * __e1000_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then reads the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release any acquired semaphores before exiting. + **/ +static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + u32 kmrnctrlsta; + + if (!locked) { + s32 ret_val = 0; + + if (!hw->phy.ops.acquire) + return 0; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); + + udelay(2); + + kmrnctrlsta = er32(KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + if (!locked) + hw->phy.ops.release(hw); + + return 0; +} + +/** + * e1000e_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset using the + * kumeran interface. The information retrieved is stored in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000e_read_kmrn_reg_locked - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the kumeran interface. The + * information retrieved is stored in data. + * Assumes semaphore already acquired. + **/ +s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, true); +} + +/** + * __e1000_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then write the data to PHY register + * at the offset using the kumeran interface. Release any acquired semaphores + * before exiting. + **/ +static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + u32 kmrnctrlsta; + + if (!locked) { + s32 ret_val = 0; + + if (!hw->phy.ops.acquire) + return 0; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); + + udelay(2); + + if (!locked) + hw->phy.ops.release(hw); + + return 0; +} + +/** + * e1000e_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to the PHY register at the offset + * using the kumeran interface. Release the acquired semaphore before exiting. + **/ +s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000e_write_kmrn_reg_locked - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Write the data to PHY register at the offset using the kumeran interface. + * Assumes semaphore already acquired. + **/ +s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, true); +} + +/** + * e1000_set_master_slave_mode - Setup PHY for Master/slave mode + * @hw: pointer to the HW structure + * + * Sets up Master/slave mode + **/ +static s32 e1000_set_master_slave_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Resolve Master/Slave mode */ + ret_val = e1e_rphy(hw, MII_CTRL1000, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->phy.original_ms_type = (phy_data & CTL1000_ENABLE_MASTER) ? + ((phy_data & CTL1000_AS_MASTER) ? + e1000_ms_force_master : e1000_ms_force_slave) : e1000_ms_auto; + + switch (hw->phy.ms_type) { + case e1000_ms_force_master: + phy_data |= (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER); + break; + case e1000_ms_force_slave: + phy_data |= CTL1000_ENABLE_MASTER; + phy_data &= ~(CTL1000_AS_MASTER); + break; + case e1000_ms_auto: + phy_data &= ~CTL1000_ENABLE_MASTER; + /* fall-through */ + default: + break; + } + + return e1e_wphy(hw, MII_CTRL1000, phy_data); +} + +/** + * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link + * @hw: pointer to the HW structure + * + * Sets up Carrier-sense on Transmit and downshift values. + **/ +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ + phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; + + ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data); + if (ret_val) + return ret_val; + + /* Set MDI/MDIX mode */ + ret_val = e1e_rphy(hw, I82577_PHY_CTRL_2, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK; + /* Options: + * 0 - Auto (default) + * 1 - MDI mode + * 2 - MDI-X mode + */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: + phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } + ret_val = e1e_wphy(hw, I82577_PHY_CTRL_2, phy_data); + if (ret_val) + return ret_val; + + return e1000_set_master_slave_mode(hw); +} + +/** + * e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* For BM PHY this bit is downshift enable */ + if (phy->type != e1000_phy_bm) + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift on BM (disabled by default) */ + if (phy->type == e1000_phy_bm) { + /* For 82574/82583, first disable then enable downshift */ + if (phy->id == BME1000_E_PHY_ID_R2) { + phy_data &= ~BME1000_PSCR_ENABLE_DOWNSHIFT; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + /* Commit the changes. */ + ret_val = phy->ops.commit(hw); + if (ret_val) { + e_dbg("Error committing the PHY changes\n"); + return ret_val; + } + } + + phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; + } + + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if ((phy->type == e1000_phy_m88) && + (phy->revision < E1000_REVISION_4) && + (phy->id != BME1000_E_PHY_ID_R2)) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == 2) && (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) { + /* Set PHY page 0, register 29 to 0x0003 */ + ret_val = e1e_wphy(hw, 29, 0x0003); + if (ret_val) + return ret_val; + + /* Set PHY page 0, register 30 to 0x0000 */ + ret_val = e1e_wphy(hw, 30, 0x0000); + if (ret_val) + return ret_val; + } + + /* Commit the changes. */ + if (phy->ops.commit) { + ret_val = phy->ops.commit(hw); + if (ret_val) { + e_dbg("Error committing the PHY changes\n"); + return ret_val; + } + } + + if (phy->type == e1000_phy_82578) { + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* 82578 PHY - set the downshift count to 1x. */ + phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; + phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + return 0; +} + +/** + * e1000e_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) { + e_dbg("Error resetting the PHY.\n"); + return ret_val; + } + + /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ + msleep(100); + + /* disable lplu d0 during driver init */ + if (hw->phy.ops.set_d0_lplu_state) { + ret_val = hw->phy.ops.set_d0_lplu_state(hw, false); + if (ret_val) { + e_dbg("Error Disabling LPLU D0\n"); + return ret_val; + } + } + /* Configure mdi-mdix settings */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + + /* Set auto Master/Slave resolution process */ + ret_val = e1e_rphy(hw, MII_CTRL1000, &data); + if (ret_val) + return ret_val; + + data &= ~CTL1000_ENABLE_MASTER; + ret_val = e1e_wphy(hw, MII_CTRL1000, data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_set_master_slave_mode(hw); + } + + return ret_val; +} + +/** + * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1e_rphy(hw, MII_CTRL1000, &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(ADVERTISE_100FULL | + ADVERTISE_100HALF | + ADVERTISE_10FULL | ADVERTISE_10HALF); + mii_1000t_ctrl_reg &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); + + e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + e_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= ADVERTISE_10HALF; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + e_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= ADVERTISE_10FULL; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + e_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= ADVERTISE_100HALF; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + e_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= ADVERTISE_100FULL; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + e_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + e_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= ADVERTISE_1000FULL; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (MII_ADVERTISE) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= + ~(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + break; + case e1000_fc_rx_pause: + /* Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in e1000e_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= + (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + break; + case e1000_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= ADVERTISE_PAUSE_ASYM; + mii_autoneg_adv_reg &= ~ADVERTISE_PAUSE_CAP; + break; + case e1000_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= + (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1e_wphy(hw, MII_ADVERTISE, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = e1e_wphy(hw, MII_CTRL1000, mii_1000t_ctrl_reg); + + return ret_val; +} + +/** + * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (!phy->autoneg_advertised) + phy->autoneg_advertised = phy->autoneg_mask; + + e_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + e_dbg("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + e_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1e_rphy(hw, MII_BMCR, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART); + ret_val = e1e_wphy(hw, MII_BMCR, phy_ctrl); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + e_dbg("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->mac.get_link_status = true; + + return ret_val; +} + +/** + * e1000e_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 e1000e_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + e_dbg("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + e_dbg("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = e1000e_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, + &link); + if (ret_val) + return ret_val; + + if (link) { + e_dbg("Valid link established!!!\n"); + hw->mac.ops.config_collision_dist(hw); + ret_val = e1000e_config_fc_after_link_up(hw); + } else { + e_dbg("Unable to establish link!!!\n"); + } + + return ret_val; +} + +/** + * e1000e_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = e1e_wphy(hw, MII_BMCR, phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("IGP PSCR: %X\n", phy_data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + e_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + } + + return ret_val; +} + +/** + * e1000e_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on Tx must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("M88E1000 PSCR: %X\n", phy_data); + + ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = e1e_wphy(hw, MII_BMCR, phy_data); + if (ret_val) + return ret_val; + + /* Reset the phy to commit changes. */ + if (hw->phy.ops.commit) { + ret_val = hw->phy.ops.commit(hw); + if (ret_val) + return ret_val; + } + + if (phy->autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + if (hw->phy.type != e1000_phy_m88) { + e_dbg("Link taking longer than expected.\n"); + } else { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + return ret_val; + ret_val = e1000e_phy_reset_dsp(hw); + if (ret_val) + return ret_val; + } + } + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + if (hw->phy.type != e1000_phy_m88) + return 0; + + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex + * @hw: pointer to the HW structure + * + * Forces the speed and duplex settings of the PHY. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1e_rphy(hw, MII_BMCR, &data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &data); + + ret_val = e1e_wphy(hw, MII_BMCR, data); + if (ret_val) + return ret_val; + + /* Disable MDI-X support for 10/100 */ + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + data &= ~IFE_PMC_AUTO_MDIX; + data &= ~IFE_PMC_FORCE_MDIX; + + ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data); + if (ret_val) + return ret_val; + + e_dbg("IFE PMC: %X\n", data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on IFE phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + e_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + return 0; +} + +/** + * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of MII_BMCR + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the MII_BMCR register for these settings to + * take affect. + **/ +void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + /* Turn off flow control when forcing speed/duplex */ + hw->fc.current_mode = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~BMCR_ANENABLE; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~BMCR_FULLDPLX; + e_dbg("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= BMCR_FULLDPLX; + e_dbg("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= BMCR_SPEED100; + *phy_ctrl &= ~BMCR_SPEED1000; + e_dbg("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + *phy_ctrl &= ~(BMCR_SPEED1000 | BMCR_SPEED100); + e_dbg("Forcing 10mb\n"); + } + + hw->mac.ops.config_collision_dist(hw); + + ew32(CTRL, ctrl); +} + +/** + * e1000e_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + } + + return ret_val; +} + +/** + * e1000e_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 e1000e_check_downshift(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + switch (phy->type) { + case e1000_phy_m88: + case e1000_phy_gg82563: + case e1000_phy_bm: + case e1000_phy_82578: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; + return 0; + } + + ret_val = e1e_rphy(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = !!(phy_data & mask); + + return ret_val; +} + +/** + * e1000_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +s32 e1000_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + /* Polarity is determined based on the speed of + * our connection. + */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = e1e_rphy(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = ((data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_check_polarity_ife - Check cable polarity for IFE PHY + * @hw: pointer to the HW structure + * + * Polarity is determined on the polarity reversal feature being enabled. + **/ +s32 e1000_check_polarity_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + /* Polarity is determined based on the reversal feature being enabled. + */ + if (phy->polarity_correction) { + offset = IFE_PHY_EXTENDED_STATUS_CONTROL; + mask = IFE_PESC_POLARITY_REVERSED; + } else { + offset = IFE_PHY_SPECIAL_CONTROL; + mask = IFE_PSC_FORCE_POLARITY; + } + + ret_val = e1e_rphy(hw, offset, &phy_data); + + if (!ret_val) + phy->cable_polarity = ((phy_data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 i, phy_status; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); + if (ret_val) + break; + ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); + if (ret_val) + break; + if (phy_status & BMSR_ANEGCOMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * e1000e_phy_has_link_generic - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = 0; + u16 i, phy_status; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the MII_BMSR register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); + if (ret_val) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + msleep(usec_interval / 1000); + else + udelay(usec_interval); + } + ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); + if (ret_val) + break; + if (phy_status & BMSR_LSTATUS) + break; + if (usec_interval >= 1000) + msleep(usec_interval / 1000); + else + udelay(usec_interval); + } + + *success = (i < iterations); + + return ret_val; +} + +/** + * e1000e_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) + return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return 0; +} + +/** + * e1000e_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which represent the + * combination of coarse and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; + static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D + }; + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1e_rphy(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + /* Getting bits 15:9, which represent the combination of + * coarse and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ + cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK); + + /* Array index bound check. */ + if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || + (cur_agc_index == 0)) + return -E1000_ERR_PHY; + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0); + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return 0; +} + +/** + * e1000e_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + if (phy->media_type != e1000_media_type_copper) { + e_dbg("Phy info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + e_dbg("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy->polarity_correction = !!(phy_data & + M88E1000_PSCR_POLARITY_REVERSAL); + + ret_val = e1000_check_polarity_m88(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, MII_STAT1000, &phy_data); + if (ret_val) + return ret_val; + + phy->local_rx = (phy_data & LPA_1000LOCALRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & LPA_1000REMRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000e_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + e_dbg("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = true; + + ret_val = e1000_check_polarity_igp(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX); + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, MII_STAT1000, &data); + if (ret_val) + return ret_val; + + phy->local_rx = (data & LPA_1000LOCALRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & LPA_1000REMRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000_get_phy_info_ife - Retrieves various IFE PHY states + * @hw: pointer to the HW structure + * + * Populates "phy" structure with various feature states. + **/ +s32 e1000_get_phy_info_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + e_dbg("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); + if (ret_val) + return ret_val; + phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE); + + if (phy->polarity_correction) { + ret_val = e1000_check_polarity_ife(hw); + if (ret_val) + return ret_val; + } else { + /* Polarity is forced */ + phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + } + + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS); + + /* The following parameters are undefined for 10/100 operation. */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + + return 0; +} + +/** + * e1000e_phy_sw_reset - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 e1000e_phy_sw_reset(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_ctrl; + + ret_val = e1e_rphy(hw, MII_BMCR, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= BMCR_RESET; + ret_val = e1e_wphy(hw, MII_BMCR, phy_ctrl); + if (ret_val) + return ret_val; + + udelay(1); + + return ret_val; +} + +/** + * e1000e_phy_hw_reset_generic - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + if (phy->ops.check_reset_block) { + ret_val = phy->ops.check_reset_block(hw); + if (ret_val) + return 0; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + return ret_val; + + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); + e1e_flush(); + + udelay(phy->reset_delay_us); + + ew32(CTRL, ctrl); + e1e_flush(); + + usleep_range(150, 300); + + phy->ops.release(hw); + + return phy->ops.get_cfg_done(hw); +} + +/** + * e1000e_get_cfg_done_generic - Generic configuration done + * @hw: pointer to the HW structure + * + * Generic function to wait 10 milli-seconds for configuration to complete + * and return success. + **/ +s32 e1000e_get_cfg_done_generic(struct e1000_hw __always_unused *hw) +{ + mdelay(10); + + return 0; +} + +/** + * e1000e_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) +{ + e_dbg("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ + e1e_wphy(hw, 0x2F5B, 0x9018); + /* Remove all caps from Replica path filter */ + e1e_wphy(hw, 0x2F52, 0x0000); + /* Bias trimming for ADC, AFE and Driver (Default) */ + e1e_wphy(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + e1e_wphy(hw, 0x2FB2, 0xF8F0); + /* Add 4% to Tx amplitude in Gig mode */ + e1e_wphy(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + e1e_wphy(hw, 0x2011, 0x0000); + /* Poly DC correction to 94.6% + 2% for all channels */ + e1e_wphy(hw, 0x20DD, 0x249A); + /* ABS DC correction to 95.9% */ + e1e_wphy(hw, 0x20DE, 0x00D3); + /* BG temp curve trim */ + e1e_wphy(hw, 0x28B4, 0x04CE); + /* Increasing ADC OPAMP stage 1 currents to max */ + e1e_wphy(hw, 0x2F70, 0x29E4); + /* Force 1000 ( required for enabling PHY regs configuration) */ + e1e_wphy(hw, 0x0000, 0x0140); + /* Set upd_freq to 6 */ + e1e_wphy(hw, 0x1F30, 0x1606); + /* Disable NPDFE */ + e1e_wphy(hw, 0x1F31, 0xB814); + /* Disable adaptive fixed FFE (Default) */ + e1e_wphy(hw, 0x1F35, 0x002A); + /* Enable FFE hysteresis */ + e1e_wphy(hw, 0x1F3E, 0x0067); + /* Fixed FFE for short cable lengths */ + e1e_wphy(hw, 0x1F54, 0x0065); + /* Fixed FFE for medium cable lengths */ + e1e_wphy(hw, 0x1F55, 0x002A); + /* Fixed FFE for long cable lengths */ + e1e_wphy(hw, 0x1F56, 0x002A); + /* Enable Adaptive Clip Threshold */ + e1e_wphy(hw, 0x1F72, 0x3FB0); + /* AHT reset limit to 1 */ + e1e_wphy(hw, 0x1F76, 0xC0FF); + /* Set AHT master delay to 127 msec */ + e1e_wphy(hw, 0x1F77, 0x1DEC); + /* Set scan bits for AHT */ + e1e_wphy(hw, 0x1F78, 0xF9EF); + /* Set AHT Preset bits */ + e1e_wphy(hw, 0x1F79, 0x0210); + /* Change integ_factor of channel A to 3 */ + e1e_wphy(hw, 0x1895, 0x0003); + /* Change prop_factor of channels BCD to 8 */ + e1e_wphy(hw, 0x1796, 0x0008); + /* Change cg_icount + enable integbp for channels BCD */ + e1e_wphy(hw, 0x1798, 0xD008); + /* Change cg_icount + enable integbp + change prop_factor_master + * to 8 for channel A + */ + e1e_wphy(hw, 0x1898, 0xD918); + /* Disable AHT in Slave mode on channel A */ + e1e_wphy(hw, 0x187A, 0x0800); + /* Enable LPLU and disable AN to 1000 in non-D0a states, + * Enable SPD+B2B + */ + e1e_wphy(hw, 0x0019, 0x008D); + /* Enable restart AN on an1000_dis change */ + e1e_wphy(hw, 0x001B, 0x2080); + /* Enable wh_fifo read clock in 10/100 modes */ + e1e_wphy(hw, 0x0014, 0x0045); + /* Restart AN, Speed selection is 1000 */ + e1e_wphy(hw, 0x0000, 0x1340); + + return 0; +} + +/** + * e1000e_get_phy_type_from_id - Get PHY type from id + * @phy_id: phy_id read from the phy + * + * Returns the phy type from the id. + **/ +enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) +{ + enum e1000_phy_type phy_type = e1000_phy_unknown; + + switch (phy_id) { + case M88E1000_I_PHY_ID: + case M88E1000_E_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1011_I_PHY_ID: + phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ + phy_type = e1000_phy_igp_2; + break; + case GG82563_E_PHY_ID: + phy_type = e1000_phy_gg82563; + break; + case IGP03E1000_E_PHY_ID: + phy_type = e1000_phy_igp_3; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy_type = e1000_phy_ife; + break; + case BME1000_E_PHY_ID: + case BME1000_E_PHY_ID_R2: + phy_type = e1000_phy_bm; + break; + case I82578_E_PHY_ID: + phy_type = e1000_phy_82578; + break; + case I82577_E_PHY_ID: + phy_type = e1000_phy_82577; + break; + case I82579_E_PHY_ID: + phy_type = e1000_phy_82579; + break; + case I217_E_PHY_ID: + phy_type = e1000_phy_i217; + break; + default: + phy_type = e1000_phy_unknown; + break; + } + return phy_type; +} + +/** + * e1000e_determine_phy_address - Determines PHY address. + * @hw: pointer to the HW structure + * + * This uses a trial and error method to loop through possible PHY + * addresses. It tests each by reading the PHY ID registers and + * checking for a match. + **/ +s32 e1000e_determine_phy_address(struct e1000_hw *hw) +{ + u32 phy_addr = 0; + u32 i; + enum e1000_phy_type phy_type = e1000_phy_unknown; + + hw->phy.id = phy_type; + + for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { + hw->phy.addr = phy_addr; + i = 0; + + do { + e1000e_get_phy_id(hw); + phy_type = e1000e_get_phy_type_from_id(hw->phy.id); + + /* If phy_type is valid, break - we found our + * PHY address + */ + if (phy_type != e1000_phy_unknown) + return 0; + + usleep_range(1000, 2000); + i++; + } while (i < 10); + } + + return -E1000_ERR_PHY_TYPE; +} + +/** + * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address + * @page: page to access + * + * Returns the phy address for the page requested. + **/ +static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) +{ + u32 phy_addr = 2; + + if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31)) + phy_addr = 1; + + return phy_addr; +} + +/** + * e1000e_write_phy_reg_bm - Write BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u32 page = offset >> IGP_PAGE_SHIFT; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, false); + goto release; + } + + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + u32 page_shift, page_select; + + /* Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01E1000_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) + goto release; + } + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000e_read_phy_reg_bm - Read BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u32 page = offset >> IGP_PAGE_SHIFT; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, false); + goto release; + } + + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + u32 page_shift, page_select; + + /* Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01E1000_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) + goto release; + } + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000e_read_phy_reg_bm2 - Read BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, false); + goto release; + } + + hw->phy.addr = 1; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, + page); + + if (ret_val) + goto release; + } + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000e_write_phy_reg_bm2 - Write BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, false); + goto release; + } + + hw->phy.addr = 1; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, + page); + + if (ret_val) + goto release; + } + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers + * @hw: pointer to the HW structure + * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG + * + * Assumes semaphore already acquired and phy_reg points to a valid memory + * address to store contents of the BM_WUC_ENABLE_REG register. + **/ +s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) +{ + s32 ret_val; + u16 temp; + + /* All page select, port ctrl and wakeup registers use phy address 1 */ + hw->phy.addr = 1; + + /* Select Port Control Registers page */ + ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); + if (ret_val) { + e_dbg("Could not set Port Control page\n"); + return ret_val; + } + + ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); + if (ret_val) { + e_dbg("Could not read PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + return ret_val; + } + + /* Enable both PHY wakeup mode and Wakeup register page writes. + * Prevent a power state change by disabling ME and Host PHY wakeup. + */ + temp = *phy_reg; + temp |= BM_WUC_ENABLE_BIT; + temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT); + + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp); + if (ret_val) { + e_dbg("Could not write PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + return ret_val; + } + + /* Select Host Wakeup Registers page - caller now able to write + * registers on the Wakeup registers page + */ + return e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT)); +} + +/** + * e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs + * @hw: pointer to the HW structure + * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG + * + * Restore BM_WUC_ENABLE_REG to its original value. + * + * Assumes semaphore already acquired and *phy_reg is the contents of the + * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by + * caller. + **/ +s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) +{ + s32 ret_val; + + /* Select Port Control Registers page */ + ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); + if (ret_val) { + e_dbg("Could not set Port Control page\n"); + return ret_val; + } + + /* Restore 769.17 to its original value */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg); + if (ret_val) + e_dbg("Could not restore PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + + return ret_val; +} + +/** + * e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register + * @hw: pointer to the HW structure + * @offset: register offset to be read or written + * @data: pointer to the data to read or write + * @read: determines if operation is read or write + * @page_set: BM_WUC_PAGE already set and access enabled + * + * Read the PHY register at offset and store the retrieved information in + * data, or write data to PHY register at offset. Note the procedure to + * access the PHY wakeup registers is different than reading the other PHY + * registers. It works as such: + * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1 + * 2) Set page to 800 for host (801 if we were manageability) + * 3) Write the address using the address opcode (0x11) + * 4) Read or write the data using the data opcode (0x12) + * 5) Restore 769.17.2 to its original value + * + * Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and + * step 5 is done by e1000_disable_phy_wakeup_reg_access_bm(). + * + * Assumes semaphore is already acquired. When page_set==true, assumes + * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack + * is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()). + **/ +static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, + u16 *data, bool read, bool page_set) +{ + s32 ret_val; + u16 reg = BM_PHY_REG_NUM(offset); + u16 page = BM_PHY_REG_PAGE(offset); + u16 phy_reg = 0; + + /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */ + if ((hw->mac.type == e1000_pchlan) && + (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) + e_dbg("Attempting to access page %d while gig enabled.\n", + page); + + if (!page_set) { + /* Enable access to PHY wakeup registers */ + ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); + if (ret_val) { + e_dbg("Could not enable PHY wakeup reg access\n"); + return ret_val; + } + } + + e_dbg("Accessing PHY page %d reg 0x%x\n", page, reg); + + /* Write the Wakeup register page offset value using opcode 0x11 */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); + if (ret_val) { + e_dbg("Could not write address opcode to page %d\n", page); + return ret_val; + } + + if (read) { + /* Read the Wakeup register page value using opcode 0x12 */ + ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, + data); + } else { + /* Write the Wakeup register page value using opcode 0x12 */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, + *data); + } + + if (ret_val) { + e_dbg("Could not access PHY reg %d.%d\n", page, reg); + return ret_val; + } + + if (!page_set) + ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); + + return ret_val; +} + +/** + * e1000_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_up_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + e1e_rphy(hw, MII_BMCR, &mii_reg); + mii_reg &= ~BMCR_PDOWN; + e1e_wphy(hw, MII_BMCR, mii_reg); +} + +/** + * e1000_power_down_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_down_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + e1e_rphy(hw, MII_BMCR, &mii_reg); + mii_reg |= BMCR_PDOWN; + e1e_wphy(hw, MII_BMCR, mii_reg); + usleep_range(1000, 2000); +} + +/** + * __e1000_read_phy_reg_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and stores the retrieved information in data. Release any acquired + * semaphore before exiting. + **/ +static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked, bool page_set) +{ + s32 ret_val; + u16 page = BM_PHY_REG_PAGE(offset); + u16 reg = BM_PHY_REG_NUM(offset); + u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); + + if (!locked) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, page_set); + goto out; + } + + if (page > 0 && page < HV_INTC_FC_PAGE_START) { + ret_val = e1000_access_phy_debug_regs_hv(hw, offset, + data, true); + goto out; + } + + if (!page_set) { + if (page == HV_INTC_FC_PAGE_START) + page = 0; + + if (reg > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_set_page_igp(hw, + (page << IGP_PAGE_SHIFT)); + + hw->phy.addr = phy_addr; + + if (ret_val) + goto out; + } + } + + e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page, + page << IGP_PAGE_SHIFT, reg); + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data); +out: + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_read_phy_reg_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores + * the retrieved information in data. Release the acquired semaphore + * before exiting. + **/ +s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, false, false); +} + +/** + * e1000_read_phy_reg_hv_locked - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, true, false); +} + +/** + * e1000_read_phy_reg_page_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired and page already set. + **/ +s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, true, true); +} + +/** + * __e1000_write_phy_reg_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, + bool locked, bool page_set) +{ + s32 ret_val; + u16 page = BM_PHY_REG_PAGE(offset); + u16 reg = BM_PHY_REG_NUM(offset); + u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); + + if (!locked) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, page_set); + goto out; + } + + if (page > 0 && page < HV_INTC_FC_PAGE_START) { + ret_val = e1000_access_phy_debug_regs_hv(hw, offset, + &data, false); + goto out; + } + + if (!page_set) { + if (page == HV_INTC_FC_PAGE_START) + page = 0; + + /* Workaround MDIO accesses being disabled after entering IEEE + * Power Down (when bit 11 of the PHY Control register is set) + */ + if ((hw->phy.type == e1000_phy_82578) && + (hw->phy.revision >= 1) && + (hw->phy.addr == 2) && + !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) { + u16 data2 = 0x7EFF; + + ret_val = e1000_access_phy_debug_regs_hv(hw, + (1 << 6) | 0x3, + &data2, false); + if (ret_val) + goto out; + } + + if (reg > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_set_page_igp(hw, + (page << IGP_PAGE_SHIFT)); + + hw->phy.addr = phy_addr; + + if (ret_val) + goto out; + } + } + + e_dbg("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page, + page << IGP_PAGE_SHIFT, reg); + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, + data); + +out: + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register at the offset. + * Release the acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, false, false); +} + +/** + * e1000_write_phy_reg_hv_locked - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. Assumes semaphore + * already acquired. + **/ +s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, true, false); +} + +/** + * e1000_write_phy_reg_page_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. Assumes semaphore + * already acquired and page already set. + **/ +s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, true, true); +} + +/** + * e1000_get_phy_addr_for_hv_page - Get PHY address based on page + * @page: page to be accessed + **/ +static u32 e1000_get_phy_addr_for_hv_page(u32 page) +{ + u32 phy_addr = 2; + + if (page >= HV_INTC_FC_PAGE_START) + phy_addr = 1; + + return phy_addr; +} + +/** + * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers + * @hw: pointer to the HW structure + * @offset: register offset to be read or written + * @data: pointer to the data to be read or written + * @read: determines if operation is read or write + * + * Reads the PHY register at offset and stores the retreived information + * in data. Assumes semaphore already acquired. Note that the procedure + * to access these regs uses the address port and data port to read/write. + * These accesses done with PHY address 2 and without using pages. + **/ +static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, + u16 *data, bool read) +{ + s32 ret_val; + u32 addr_reg; + u32 data_reg; + + /* This takes care of the difference with desktop vs mobile phy */ + addr_reg = ((hw->phy.type == e1000_phy_82578) ? + I82578_ADDR_REG : I82577_ADDR_REG); + data_reg = addr_reg + 1; + + /* All operations in this function are phy address 2 */ + hw->phy.addr = 2; + + /* masking with 0x3F to remove the page from offset */ + ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); + if (ret_val) { + e_dbg("Could not write the Address Offset port register\n"); + return ret_val; + } + + /* Read or write the data value next */ + if (read) + ret_val = e1000e_read_phy_reg_mdic(hw, data_reg, data); + else + ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); + + if (ret_val) + e_dbg("Could not access the Data port register\n"); + + return ret_val; +} + +/** + * e1000_link_stall_workaround_hv - Si workaround + * @hw: pointer to the HW structure + * + * This function works around a Si bug where the link partner can get + * a link up indication before the PHY does. If small packets are sent + * by the link partner they can be placed in the packet buffer without + * being properly accounted for by the PHY and will stall preventing + * further packets from being received. The workaround is to clear the + * packet buffer after the PHY detects link up. + **/ +s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 data; + + if (hw->phy.type != e1000_phy_82578) + return 0; + + /* Do not apply workaround if in PHY loopback bit 14 set */ + e1e_rphy(hw, MII_BMCR, &data); + if (data & BMCR_LOOPBACK) + return 0; + + /* check if link is up and at 1Gbps */ + ret_val = e1e_rphy(hw, BM_CS_STATUS, &data); + if (ret_val) + return ret_val; + + data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK); + + if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) + return 0; + + msleep(200); + + /* flush the packets in the fifo buffer */ + ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, + (HV_MUX_DATA_CTRL_GEN_TO_MAC | + HV_MUX_DATA_CTRL_FORCE_SPEED)); + if (ret_val) + return ret_val; + + return e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC); +} + +/** + * e1000_check_polarity_82577 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); + + if (!ret_val) + phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. + **/ +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = e1e_wphy(hw, MII_BMCR, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on 82577 phy\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + e_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + } + + return ret_val; +} + +/** + * e1000_get_phy_info_82577 - Retrieve I82577 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + e_dbg("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = true; + + ret_val = e1000_check_polarity_82577(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX); + + if ((data & I82577_PHY_STATUS2_SPEED_MASK) == + I82577_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, MII_STAT1000, &data); + if (ret_val) + return ret_val; + + phy->local_rx = (data & LPA_1000LOCALRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & LPA_1000REMRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return 0; +} + +/** + * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +s32 e1000_get_cable_length_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + + ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data); + if (ret_val) + return ret_val; + + length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >> + I82577_DSTATUS_CABLE_LENGTH_SHIFT); + + if (length == E1000_CABLE_LENGTH_UNDEFINED) + return -E1000_ERR_PHY; + + phy->cable_length = length; + + return 0; +} diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h new file mode 100644 index 000000000..537d2780b --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/phy.h @@ -0,0 +1,236 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000E_PHY_H_ +#define _E1000E_PHY_H_ + +s32 e1000e_check_downshift(struct e1000_hw *hw); +s32 e1000_check_polarity_m88(struct e1000_hw *hw); +s32 e1000_check_polarity_igp(struct e1000_hw *hw); +s32 e1000_check_polarity_ife(struct e1000_hw *hw); +s32 e1000e_check_reset_block_generic(struct e1000_hw *hw); +s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw); +s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw); +s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw); +s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); +s32 e1000e_get_cable_length_m88(struct e1000_hw *hw); +s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw); +s32 e1000e_get_cfg_done_generic(struct e1000_hw *hw); +s32 e1000e_get_phy_id(struct e1000_hw *hw); +s32 e1000e_get_phy_info_igp(struct e1000_hw *hw); +s32 e1000e_get_phy_info_m88(struct e1000_hw *hw); +s32 e1000_get_phy_info_ife(struct e1000_hw *hw); +s32 e1000e_phy_sw_reset(struct e1000_hw *hw); +void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); +s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); +s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); +s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page); +s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 e1000e_setup_copper_link(struct e1000_hw *hw); +s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw); +enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); +s32 e1000e_determine_phy_address(struct e1000_hw *hw); +s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); +void e1000_power_up_phy_copper(struct e1000_hw *hw); +void e1000_power_down_phy_copper(struct e1000_hw *hw); +s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); +s32 e1000_check_polarity_82577(struct e1000_hw *hw); +s32 e1000_get_phy_info_82577(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); +s32 e1000_get_cable_length_82577(struct e1000_hw *hw); + +#define E1000_MAX_PHY_ADDR 8 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ +#define IGP_PAGE_SHIFT 5 +#define PHY_REG_MASK 0x1F + +/* BM/HV Specific Registers */ +#define BM_PORT_CTRL_PAGE 769 +#define BM_WUC_PAGE 800 +#define BM_WUC_ADDRESS_OPCODE 0x11 +#define BM_WUC_DATA_OPCODE 0x12 +#define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE +#define BM_WUC_ENABLE_REG 17 +#define BM_WUC_ENABLE_BIT (1 << 2) +#define BM_WUC_HOST_WU_BIT (1 << 4) +#define BM_WUC_ME_WU_BIT (1 << 5) + +#define PHY_UPPER_SHIFT 21 +#define BM_PHY_REG(page, reg) \ + (((reg) & MAX_PHY_REG_ADDRESS) |\ + (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\ + (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT))) +#define BM_PHY_REG_PAGE(offset) \ + ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF)) +#define BM_PHY_REG_NUM(offset) \ + ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\ + (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\ + ~MAX_PHY_REG_ADDRESS))) + +#define HV_INTC_FC_PAGE_START 768 +#define I82578_ADDR_REG 29 +#define I82577_ADDR_REG 16 +#define I82577_CFG_REG 22 +#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) +#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */ +#define I82577_CTRL_REG 23 + +/* 82577 specific PHY registers */ +#define I82577_PHY_CTRL_2 18 +#define I82577_PHY_LBK_CTRL 19 +#define I82577_PHY_STATUS_2 26 +#define I82577_PHY_DIAG_STATUS 31 + +/* I82577 PHY Status 2 */ +#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82577_PHY_STATUS2_MDIX 0x0800 +#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 + +/* I82577 PHY Control 2 */ +#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600 + +/* I82577 PHY Diagnostics Status */ +#define I82577_DSTATUS_CABLE_LENGTH 0x03FC +#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* BM PHY Copper Specific Control 1 */ +#define BM_CS_CTRL1 16 + +/* BM PHY Copper Specific Status */ +#define BM_CS_STATUS 17 +#define BM_CS_STATUS_LINK_UP 0x0400 +#define BM_CS_STATUS_RESOLVED 0x0800 +#define BM_CS_STATUS_SPEED_MASK 0xC000 +#define BM_CS_STATUS_SPEED_1000 0x8000 + +/* 82577 Mobile Phy Status Register */ +#define HV_M_STATUS 26 +#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 +#define HV_M_STATUS_SPEED_MASK 0x0300 +#define HV_M_STATUS_SPEED_1000 0x0200 +#define HV_M_STATUS_SPEED_100 0x0100 +#define HV_M_STATUS_LINK_UP 0x0040 + +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ + +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ + +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 + +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 + +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 +#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KMRNCTRLSTA_REN 0x00200000 +#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */ +#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ +#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ +#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ +#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ +#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ +#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7 +#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 /* enable K1 */ +#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */ + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ + +/* IFE PHY Extended Status Control */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 + +/* IFE PHY Special Control */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 +#define IFE_PSC_FORCE_POLARITY 0x0020 + +/* IFE PHY Special Control and LED Control */ +#define IFE_PSCL_PROBE_MODE 0x0020 +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +/* IFE PHY MDIX Control */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */ + +#endif diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c new file mode 100644 index 000000000..8d7b21dc7 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -0,0 +1,273 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* PTP 1588 Hardware Clock (PHC) + * Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb) + * Copyright (C) 2011 Richard Cochran + */ + +#include "e1000.h" + +/** + * e1000e_phc_adjfreq - adjust the frequency of the hardware clock + * @ptp: ptp clock structure + * @delta: Desired frequency change in parts per billion + * + * Adjust the frequency of the PHC cycle counter by the indicated delta from + * the base frequency. + **/ +static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) +{ + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, + ptp_clock_info); + struct e1000_hw *hw = &adapter->hw; + bool neg_adj = false; + unsigned long flags; + u64 adjustment; + u32 timinca, incvalue; + s32 ret_val; + + if ((delta > ptp->max_adj) || (delta <= -1000000000)) + return -EINVAL; + + if (delta < 0) { + neg_adj = true; + delta = -delta; + } + + /* Get the System Time Register SYSTIM base frequency */ + ret_val = e1000e_get_base_timinca(adapter, &timinca); + if (ret_val) + return ret_val; + + spin_lock_irqsave(&adapter->systim_lock, flags); + + incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK; + + adjustment = incvalue; + adjustment *= delta; + adjustment = div_u64(adjustment, 1000000000); + + incvalue = neg_adj ? (incvalue - adjustment) : (incvalue + adjustment); + + timinca &= ~E1000_TIMINCA_INCVALUE_MASK; + timinca |= incvalue; + + ew32(TIMINCA, timinca); + + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + return 0; +} + +/** + * e1000e_phc_adjtime - Shift the time of the hardware clock + * @ptp: ptp clock structure + * @delta: Desired change in nanoseconds + * + * Adjust the timer by resetting the timecounter structure. + **/ +static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, + ptp_clock_info); + unsigned long flags; + + spin_lock_irqsave(&adapter->systim_lock, flags); + timecounter_adjtime(&adapter->tc, delta); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + return 0; +} + +/** + * e1000e_phc_gettime - Reads the current time from the hardware clock + * @ptp: ptp clock structure + * @ts: timespec structure to hold the current time value + * + * Read the timecounter and return the correct value in ns after converting + * it into a struct timespec. + **/ +static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, + ptp_clock_info); + unsigned long flags; + u64 ns; + + spin_lock_irqsave(&adapter->systim_lock, flags); + ns = timecounter_read(&adapter->tc); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/** + * e1000e_phc_settime - Set the current time on the hardware clock + * @ptp: ptp clock structure + * @ts: timespec containing the new time for the cycle counter + * + * Reset the timecounter to use a new base value instead of the kernel + * wall timer value. + **/ +static int e1000e_phc_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, + ptp_clock_info); + unsigned long flags; + u64 ns; + + ns = timespec64_to_ns(ts); + + /* reset the timecounter */ + spin_lock_irqsave(&adapter->systim_lock, flags); + timecounter_init(&adapter->tc, &adapter->cc, ns); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + return 0; +} + +/** + * e1000e_phc_enable - enable or disable an ancillary feature + * @ptp: ptp clock structure + * @request: Desired resource to enable or disable + * @on: Caller passes one to enable or zero to disable + * + * Enable (or disable) ancillary features of the PHC subsystem. + * Currently, no ancillary features are supported. + **/ +static int e1000e_phc_enable(struct ptp_clock_info __always_unused *ptp, + struct ptp_clock_request __always_unused *request, + int __always_unused on) +{ + return -EOPNOTSUPP; +} + +static void e1000e_systim_overflow_work(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, + systim_overflow_work.work); + struct e1000_hw *hw = &adapter->hw; + struct timespec64 ts; + + adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts); + + e_dbg("SYSTIM overflow check at %lld.%09lu\n", + (long long) ts.tv_sec, ts.tv_nsec); + + schedule_delayed_work(&adapter->systim_overflow_work, + E1000_SYSTIM_OVERFLOW_PERIOD); +} + +static const struct ptp_clock_info e1000e_ptp_clock_info = { + .owner = THIS_MODULE, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .n_pins = 0, + .pps = 0, + .adjfreq = e1000e_phc_adjfreq, + .adjtime = e1000e_phc_adjtime, + .gettime64 = e1000e_phc_gettime, + .settime64 = e1000e_phc_settime, + .enable = e1000e_phc_enable, +}; + +/** + * e1000e_ptp_init - initialize PTP for devices which support it + * @adapter: board private structure + * + * This function performs the required steps for enabling PTP support. + * If PTP support has already been loaded it simply calls the cyclecounter + * init routine and exits. + **/ +void e1000e_ptp_init(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + adapter->ptp_clock = NULL; + + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) + return; + + adapter->ptp_clock_info = e1000e_ptp_clock_info; + + snprintf(adapter->ptp_clock_info.name, + sizeof(adapter->ptp_clock_info.name), "%pm", + adapter->netdev->perm_addr); + + switch (hw->mac.type) { + case e1000_pch2lan: + case e1000_pch_lpt: + case e1000_pch_spt: + if (((hw->mac.type != e1000_pch_lpt) && + (hw->mac.type != e1000_pch_spt)) || + (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { + adapter->ptp_clock_info.max_adj = 24000000 - 1; + break; + } + /* fall-through */ + case e1000_82574: + case e1000_82583: + adapter->ptp_clock_info.max_adj = 600000000 - 1; + break; + default: + break; + } + + INIT_DELAYED_WORK(&adapter->systim_overflow_work, + e1000e_systim_overflow_work); + + schedule_delayed_work(&adapter->systim_overflow_work, + E1000_SYSTIM_OVERFLOW_PERIOD); + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; + e_err("ptp_clock_register failed\n"); + } else { + e_info("registered PHC clock\n"); + } +} + +/** + * e1000e_ptp_remove - disable PTP device and stop the overflow check + * @adapter: board private structure + * + * Stop the PTP support, and cancel the delayed work. + **/ +void e1000e_ptp_remove(struct e1000_adapter *adapter) +{ + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) + return; + + cancel_delayed_work_sync(&adapter->systim_overflow_work); + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_info("removed PHC\n"); + } +} diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h new file mode 100644 index 000000000..85eefc483 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -0,0 +1,250 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000E_REGS_H_ +#define _E1000E_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FEXT 0x0002C /* Future Extended - RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ +#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ +#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ +#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ +#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ +#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */ +#define E1000_SVCR 0x000F0 +#define E1000_SVT 0x000F4 +#define E1000_LPIC 0x000FC /* Low Power IDLE control */ +#define E1000_RCTL 0x00100 /* Rx Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */ +#define E1000_PBA_ECC 0x01100 /* PBA ECC Register */ +#define E1000_TCTL 0x00400 /* Tx Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */ +#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */ +#define E1000_IOSFPC 0x00F28 /* TX corrupted data */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ +/* Split and Replication Rx Control - RW */ +#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */ +#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ +/* Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ + (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ + (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ + (0x0C008 + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ + (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ + (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ + (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ + (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ + (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ + (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ + (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ + (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ + (0x0E028 + ((_n) * 0x40))) +#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100)) +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) +#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) +#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) +#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) +#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ +#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */ +#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_CRC_OFFSET 0x05F50 /* CRC Offset register */ + +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control */ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ + +#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +/* Management Decision Filters */ +#define E1000_MDEF(_n) (0x05890 + (4 * (_n))) +#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +/* Driver-only SW semaphore (not used by BOOT agents) */ +#define E1000_SWSM2 0x05B58 +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ + +/* RSS registers */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */ +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */ +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */ +#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */ + +#endif diff --git a/drivers/net/ethernet/intel/fm10k/Makefile b/drivers/net/ethernet/intel/fm10k/Makefile new file mode 100644 index 000000000..08859dd22 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/Makefile @@ -0,0 +1,33 @@ +################################################################################ +# +# Intel Ethernet Switch Host Interface Driver +# Copyright(c) 2013 - 2014 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) FM10000 Ethernet Switch Host Interface driver +# + +obj-$(CONFIG_FM10K) += fm10k.o + +fm10k-objs := fm10k_main.o fm10k_common.o fm10k_pci.o \ + fm10k_netdev.o fm10k_ethtool.o fm10k_pf.o fm10k_vf.o \ + fm10k_mbx.o fm10k_iov.o fm10k_tlv.o \ + fm10k_debugfs.o fm10k_ptp.o fm10k_dcbnl.o diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h new file mode 100644 index 000000000..c8c8c5bae --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -0,0 +1,536 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _FM10K_H_ +#define _FM10K_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "fm10k_pf.h" +#include "fm10k_vf.h" + +#define FM10K_MAX_JUMBO_FRAME_SIZE 15358 /* Maximum supported size 15K */ + +#define MAX_QUEUES FM10K_MAX_QUEUES_PF + +#define FM10K_MIN_RXD 128 +#define FM10K_MAX_RXD 4096 +#define FM10K_DEFAULT_RXD 256 + +#define FM10K_MIN_TXD 128 +#define FM10K_MAX_TXD 4096 +#define FM10K_DEFAULT_TXD 256 +#define FM10K_DEFAULT_TX_WORK 256 + +#define FM10K_RXBUFFER_256 256 +#define FM10K_RX_HDR_LEN FM10K_RXBUFFER_256 +#define FM10K_RXBUFFER_2048 2048 +#define FM10K_RX_BUFSZ FM10K_RXBUFFER_2048 + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define FM10K_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define FM10K_MAX_STATIONS 63 +struct fm10k_l2_accel { + int size; + u16 count; + u16 dglort; + struct rcu_head rcu; + struct net_device *macvlan[0]; +}; + +enum fm10k_ring_state_t { + __FM10K_TX_DETECT_HANG, + __FM10K_HANG_CHECK_ARMED, +}; + +#define check_for_tx_hang(ring) \ + test_bit(__FM10K_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__FM10K_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__FM10K_TX_DETECT_HANG, &(ring)->state) + +struct fm10k_tx_buffer { + struct fm10k_tx_desc *next_to_watch; + struct sk_buff *skb; + unsigned int bytecount; + u16 gso_segs; + u16 tx_flags; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); +}; + +struct fm10k_rx_buffer { + dma_addr_t dma; + struct page *page; + u32 page_offset; +}; + +struct fm10k_queue_stats { + u64 packets; + u64 bytes; +}; + +struct fm10k_tx_queue_stats { + u64 restart_queue; + u64 csum_err; + u64 tx_busy; + u64 tx_done_old; +}; + +struct fm10k_rx_queue_stats { + u64 alloc_failed; + u64 csum_err; + u64 errors; +}; + +struct fm10k_ring { + struct fm10k_q_vector *q_vector;/* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ + struct device *dev; /* device for DMA mapping */ + struct fm10k_l2_accel __rcu *l2_accel; /* L2 acceleration list */ + void *desc; /* descriptor ring memory */ + union { + struct fm10k_tx_buffer *tx_buffer; + struct fm10k_rx_buffer *rx_buffer; + }; + u32 __iomem *tail; + unsigned long state; + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + + u8 queue_index; /* needed for queue management */ + u8 reg_idx; /* holds the special value that gets + * the hardware register offset + * associated with this ring, which is + * different for DCB and RSS modes + */ + u8 qos_pc; /* priority class of queue */ + u16 vid; /* default vlan ID of queue */ + u16 count; /* amount of descriptors */ + + u16 next_to_alloc; + u16 next_to_use; + u16 next_to_clean; + + struct fm10k_queue_stats stats; + struct u64_stats_sync syncp; + union { + /* Tx */ + struct fm10k_tx_queue_stats tx_stats; + /* Rx */ + struct { + struct fm10k_rx_queue_stats rx_stats; + struct sk_buff *skb; + }; + }; +} ____cacheline_internodealigned_in_smp; + +struct fm10k_ring_container { + struct fm10k_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u16 itr; /* interrupt throttle rate value */ + u8 count; /* total number of rings in vector */ +}; + +#define FM10K_ITR_MAX 0x0FFF /* maximum value for ITR */ +#define FM10K_ITR_10K 100 /* 100us */ +#define FM10K_ITR_20K 50 /* 50us */ +#define FM10K_ITR_ADAPTIVE 0x8000 /* adaptive interrupt moderation flag */ + +#define FM10K_ITR_ENABLE (FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR) + +static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring) +{ + return &ring->netdev->_tx[ring->queue_index]; +} + +/* iterator for handling rings in ring container */ +#define fm10k_for_each_ring(pos, head) \ + for (pos = &(head).ring[(head).count]; (--pos) >= (head).ring;) + +#define MAX_Q_VECTORS 256 +#define MIN_Q_VECTORS 1 +enum fm10k_non_q_vectors { + FM10K_MBX_VECTOR, +#define NON_Q_VECTORS_VF NON_Q_VECTORS_PF + NON_Q_VECTORS_PF +}; + +#define NON_Q_VECTORS(hw) (((hw)->mac.type == fm10k_mac_pf) ? \ + NON_Q_VECTORS_PF : \ + NON_Q_VECTORS_VF) +#define MIN_MSIX_COUNT(hw) (MIN_Q_VECTORS + NON_Q_VECTORS(hw)) + +struct fm10k_q_vector { + struct fm10k_intfc *interface; + u32 __iomem *itr; /* pointer to ITR register for this vector */ + u16 v_idx; /* index of q_vector within interface array */ + struct fm10k_ring_container rx, tx; + + struct napi_struct napi; + char name[IFNAMSIZ + 9]; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dbg_q_vector; +#endif /* CONFIG_DEBUG_FS */ + struct rcu_head rcu; /* to avoid race with update stats on free */ + + /* for dynamic allocation of rings associated with this q_vector */ + struct fm10k_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +enum fm10k_ring_f_enum { + RING_F_RSS, + RING_F_QOS, + RING_F_ARRAY_SIZE /* must be last in enum set */ +}; + +struct fm10k_ring_feature { + u16 limit; /* upper limit on feature indices */ + u16 indices; /* current value of indices */ + u16 mask; /* Mask used for feature to ring mapping */ + u16 offset; /* offset to start of feature */ +}; + +struct fm10k_iov_data { + unsigned int num_vfs; + unsigned int next_vf_mbx; + struct rcu_head rcu; + struct fm10k_vf_info vf_info[0]; +}; + +#define fm10k_vxlan_port_for_each(vp, intfc) \ + list_for_each_entry(vp, &(intfc)->vxlan_port, list) +struct fm10k_vxlan_port { + struct list_head list; + sa_family_t sa_family; + __be16 port; +}; + +/* one work queue for entire driver */ +extern struct workqueue_struct *fm10k_workqueue; + +struct fm10k_intfc { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct net_device *netdev; + struct fm10k_l2_accel *l2_accel; /* pointer to L2 acceleration list */ + struct pci_dev *pdev; + unsigned long state; + + u32 flags; +#define FM10K_FLAG_RESET_REQUESTED (u32)(1 << 0) +#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(1 << 1) +#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(1 << 2) +#define FM10K_FLAG_RX_TS_ENABLED (u32)(1 << 3) +#define FM10K_FLAG_SWPRI_CONFIG (u32)(1 << 4) + int xcast_mode; + + /* Tx fast path data */ + int num_tx_queues; + u16 tx_itr; + + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr; + + /* TX */ + struct fm10k_ring *tx_ring[MAX_QUEUES] ____cacheline_aligned_in_smp; + + u64 restart_queue; + u64 tx_busy; + u64 tx_csum_errors; + u64 alloc_failed; + u64 rx_csum_errors; + + u64 tx_bytes_nic; + u64 tx_packets_nic; + u64 rx_bytes_nic; + u64 rx_packets_nic; + u64 rx_drops_nic; + u64 rx_overrun_pf; + u64 rx_overrun_vf; + u32 tx_timeout_count; + + /* RX */ + struct fm10k_ring *rx_ring[MAX_QUEUES]; + + /* Queueing vectors */ + struct fm10k_q_vector *q_vector[MAX_Q_VECTORS]; + struct msix_entry *msix_entries; + int num_q_vectors; /* current number of q_vectors for device */ + struct fm10k_ring_feature ring_feature[RING_F_ARRAY_SIZE]; + + /* SR-IOV information management structure */ + struct fm10k_iov_data *iov_data; + + struct fm10k_hw_stats stats; + struct fm10k_hw hw; + u32 __iomem *uc_addr; + u32 __iomem *sw_addr; + u16 msg_enable; + u16 tx_ring_count; + u16 rx_ring_count; + struct timer_list service_timer; + struct work_struct service_task; + unsigned long next_stats_update; + unsigned long next_tx_hang_check; + unsigned long last_reset; + unsigned long link_down_event; + bool host_ready; + + u32 reta[FM10K_RETA_SIZE]; + u32 rssrk[FM10K_RSSRK_SIZE]; + + /* VXLAN port tracking information */ + struct list_head vxlan_port; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dbg_intfc; + +#endif /* CONFIG_DEBUG_FS */ + struct ptp_clock_info ptp_caps; + struct ptp_clock *ptp_clock; + + struct sk_buff_head ts_tx_skb_queue; + u32 tx_hwtstamp_timeouts; + + struct hwtstamp_config ts_config; + /* We are unable to actually adjust the clock beyond the frequency + * value. Once the clock is started there is no resetting it. As + * such we maintain a separate offset from the actual hardware clock + * to allow for offset adjustment. + */ + s64 ptp_adjust; + rwlock_t systime_lock; +#ifdef CONFIG_DCB + u8 pfc_en; +#endif + u8 rx_pause; + + /* GLORT resources in use by PF */ + u16 glort; + u16 glort_count; + + /* VLAN ID for updating multicast/unicast lists */ + u16 vid; +}; + +enum fm10k_state_t { + __FM10K_RESETTING, + __FM10K_DOWN, + __FM10K_SERVICE_SCHED, + __FM10K_SERVICE_DISABLE, + __FM10K_MBX_LOCK, + __FM10K_LINK_DOWN, +}; + +static inline void fm10k_mbx_lock(struct fm10k_intfc *interface) +{ + /* busy loop if we cannot obtain the lock as some calls + * such as ndo_set_rx_mode may be made in atomic context + */ + while (test_and_set_bit(__FM10K_MBX_LOCK, &interface->state)) + udelay(20); +} + +static inline void fm10k_mbx_unlock(struct fm10k_intfc *interface) +{ + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__FM10K_MBX_LOCK, &interface->state); +} + +static inline int fm10k_mbx_trylock(struct fm10k_intfc *interface) +{ + return !test_and_set_bit(__FM10K_MBX_LOCK, &interface->state); +} + +/* fm10k_test_staterr - test bits in Rx descriptor status and error fields */ +static inline __le32 fm10k_test_staterr(union fm10k_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->d.staterr & cpu_to_le32(stat_err_bits); +} + +/* fm10k_desc_unused - calculate if we have unused descriptors */ +static inline u16 fm10k_desc_unused(struct fm10k_ring *ring) +{ + s16 unused = ring->next_to_clean - ring->next_to_use - 1; + + return likely(unused < 0) ? unused + ring->count : unused; +} + +#define FM10K_TX_DESC(R, i) \ + (&(((struct fm10k_tx_desc *)((R)->desc))[i])) +#define FM10K_RX_DESC(R, i) \ + (&(((union fm10k_rx_desc *)((R)->desc))[i])) + +#define FM10K_MAX_TXD_PWR 14 +#define FM10K_MAX_DATA_PER_TXD (1 << FM10K_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), FM10K_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +enum fm10k_tx_flags { + /* Tx offload flags */ + FM10K_TX_FLAGS_CSUM = 0x01, +}; + +/* This structure is stored as little endian values as that is the native + * format of the Rx descriptor. The ordering of these fields is reversed + * from the actual ftag header to allow for a single bswap to take care + * of placing all of the values in network order + */ +union fm10k_ftag_info { + __le64 ftag; + struct { + /* dglort and sglort combined into a single 32bit desc read */ + __le32 glort; + /* upper 16 bits of vlan are reserved 0 for swpri_type_user */ + __le32 vlan; + } d; + struct { + __le16 dglort; + __le16 sglort; + __le16 vlan; + __le16 swpri_type_user; + } w; +}; + +struct fm10k_cb { + union { + __le64 tstamp; + unsigned long ts_tx_timeout; + }; + union fm10k_ftag_info fi; +}; + +#define FM10K_CB(skb) ((struct fm10k_cb *)(skb)->cb) + +/* main */ +extern char fm10k_driver_name[]; +extern const char fm10k_driver_version[]; +int fm10k_init_queueing_scheme(struct fm10k_intfc *interface); +void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface); +__be16 fm10k_tx_encap_offload(struct sk_buff *skb); +netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, + struct fm10k_ring *tx_ring); +void fm10k_tx_timeout_reset(struct fm10k_intfc *interface); +bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring); +void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count); + +/* PCI */ +void fm10k_mbx_free_irq(struct fm10k_intfc *); +int fm10k_mbx_request_irq(struct fm10k_intfc *); +void fm10k_qv_free_irq(struct fm10k_intfc *interface); +int fm10k_qv_request_irq(struct fm10k_intfc *interface); +int fm10k_register_pci_driver(void); +void fm10k_unregister_pci_driver(void); +void fm10k_up(struct fm10k_intfc *interface); +void fm10k_down(struct fm10k_intfc *interface); +void fm10k_update_stats(struct fm10k_intfc *interface); +void fm10k_service_event_schedule(struct fm10k_intfc *interface); +void fm10k_update_rx_drop_en(struct fm10k_intfc *interface); +#ifdef CONFIG_NET_POLL_CONTROLLER +void fm10k_netpoll(struct net_device *netdev); +#endif + +/* Netdev */ +struct net_device *fm10k_alloc_netdev(void); +int fm10k_setup_rx_resources(struct fm10k_ring *); +int fm10k_setup_tx_resources(struct fm10k_ring *); +void fm10k_free_rx_resources(struct fm10k_ring *); +void fm10k_free_tx_resources(struct fm10k_ring *); +void fm10k_clean_all_rx_rings(struct fm10k_intfc *); +void fm10k_clean_all_tx_rings(struct fm10k_intfc *); +void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *, + struct fm10k_tx_buffer *); +void fm10k_restore_rx_state(struct fm10k_intfc *); +void fm10k_reset_rx_state(struct fm10k_intfc *); +int fm10k_setup_tc(struct net_device *dev, u8 tc); +int fm10k_open(struct net_device *netdev); +int fm10k_close(struct net_device *netdev); + +/* Ethtool */ +void fm10k_set_ethtool_ops(struct net_device *dev); + +/* IOV */ +s32 fm10k_iov_event(struct fm10k_intfc *interface); +s32 fm10k_iov_mbx(struct fm10k_intfc *interface); +void fm10k_iov_suspend(struct pci_dev *pdev); +int fm10k_iov_resume(struct pci_dev *pdev); +void fm10k_iov_disable(struct pci_dev *pdev); +int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs); +s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid); +int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac); +int fm10k_ndo_set_vf_vlan(struct net_device *netdev, + int vf_idx, u16 vid, u8 qos); +int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int rate, + int unused); +int fm10k_ndo_get_vf_config(struct net_device *netdev, + int vf_idx, struct ifla_vf_info *ivi); + +/* DebugFS */ +#ifdef CONFIG_DEBUG_FS +void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector); +void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector); +void fm10k_dbg_intfc_init(struct fm10k_intfc *interface); +void fm10k_dbg_intfc_exit(struct fm10k_intfc *interface); +void fm10k_dbg_init(void); +void fm10k_dbg_exit(void); +#else +static inline void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) {} +static inline void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector) {} +static inline void fm10k_dbg_intfc_init(struct fm10k_intfc *interface) {} +static inline void fm10k_dbg_intfc_exit(struct fm10k_intfc *interface) {} +static inline void fm10k_dbg_init(void) {} +static inline void fm10k_dbg_exit(void) {} +#endif /* CONFIG_DEBUG_FS */ + +/* Time Stamping */ +void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface, + struct skb_shared_hwtstamps *hwtstamp, + u64 systime); +void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb); +void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort, + u64 systime); +void fm10k_ts_reset(struct fm10k_intfc *interface); +void fm10k_ts_init(struct fm10k_intfc *interface); +void fm10k_ts_tx_subtask(struct fm10k_intfc *interface); +void fm10k_ptp_register(struct fm10k_intfc *interface); +void fm10k_ptp_unregister(struct fm10k_intfc *interface); +int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr); + +/* DCB */ +void fm10k_dcbnl_set_ops(struct net_device *dev); +#endif /* _FM10K_H_ */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c new file mode 100644 index 000000000..6cfae6ac0 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c @@ -0,0 +1,533 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "fm10k_common.h" + +/** + * fm10k_get_bus_info_generic - Generic set PCI bus info + * @hw: pointer to hardware structure + * + * Gets the PCI bus info (speed, width, type) then calls helper function to + * store this data within the fm10k_hw structure. + **/ +s32 fm10k_get_bus_info_generic(struct fm10k_hw *hw) +{ + u16 link_cap, link_status, device_cap, device_control; + + /* Get the maximum link width and speed from PCIe config space */ + link_cap = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_LINK_CAP); + + switch (link_cap & FM10K_PCIE_LINK_WIDTH) { + case FM10K_PCIE_LINK_WIDTH_1: + hw->bus_caps.width = fm10k_bus_width_pcie_x1; + break; + case FM10K_PCIE_LINK_WIDTH_2: + hw->bus_caps.width = fm10k_bus_width_pcie_x2; + break; + case FM10K_PCIE_LINK_WIDTH_4: + hw->bus_caps.width = fm10k_bus_width_pcie_x4; + break; + case FM10K_PCIE_LINK_WIDTH_8: + hw->bus_caps.width = fm10k_bus_width_pcie_x8; + break; + default: + hw->bus_caps.width = fm10k_bus_width_unknown; + break; + } + + switch (link_cap & FM10K_PCIE_LINK_SPEED) { + case FM10K_PCIE_LINK_SPEED_2500: + hw->bus_caps.speed = fm10k_bus_speed_2500; + break; + case FM10K_PCIE_LINK_SPEED_5000: + hw->bus_caps.speed = fm10k_bus_speed_5000; + break; + case FM10K_PCIE_LINK_SPEED_8000: + hw->bus_caps.speed = fm10k_bus_speed_8000; + break; + default: + hw->bus_caps.speed = fm10k_bus_speed_unknown; + break; + } + + /* Get the PCIe maximum payload size for the PCIe function */ + device_cap = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_DEV_CAP); + + switch (device_cap & FM10K_PCIE_DEV_CAP_PAYLOAD) { + case FM10K_PCIE_DEV_CAP_PAYLOAD_128: + hw->bus_caps.payload = fm10k_bus_payload_128; + break; + case FM10K_PCIE_DEV_CAP_PAYLOAD_256: + hw->bus_caps.payload = fm10k_bus_payload_256; + break; + case FM10K_PCIE_DEV_CAP_PAYLOAD_512: + hw->bus_caps.payload = fm10k_bus_payload_512; + break; + default: + hw->bus_caps.payload = fm10k_bus_payload_unknown; + break; + } + + /* Get the negotiated link width and speed from PCIe config space */ + link_status = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_LINK_STATUS); + + switch (link_status & FM10K_PCIE_LINK_WIDTH) { + case FM10K_PCIE_LINK_WIDTH_1: + hw->bus.width = fm10k_bus_width_pcie_x1; + break; + case FM10K_PCIE_LINK_WIDTH_2: + hw->bus.width = fm10k_bus_width_pcie_x2; + break; + case FM10K_PCIE_LINK_WIDTH_4: + hw->bus.width = fm10k_bus_width_pcie_x4; + break; + case FM10K_PCIE_LINK_WIDTH_8: + hw->bus.width = fm10k_bus_width_pcie_x8; + break; + default: + hw->bus.width = fm10k_bus_width_unknown; + break; + } + + switch (link_status & FM10K_PCIE_LINK_SPEED) { + case FM10K_PCIE_LINK_SPEED_2500: + hw->bus.speed = fm10k_bus_speed_2500; + break; + case FM10K_PCIE_LINK_SPEED_5000: + hw->bus.speed = fm10k_bus_speed_5000; + break; + case FM10K_PCIE_LINK_SPEED_8000: + hw->bus.speed = fm10k_bus_speed_8000; + break; + default: + hw->bus.speed = fm10k_bus_speed_unknown; + break; + } + + /* Get the negotiated PCIe maximum payload size for the PCIe function */ + device_control = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_DEV_CTRL); + + switch (device_control & FM10K_PCIE_DEV_CTRL_PAYLOAD) { + case FM10K_PCIE_DEV_CTRL_PAYLOAD_128: + hw->bus.payload = fm10k_bus_payload_128; + break; + case FM10K_PCIE_DEV_CTRL_PAYLOAD_256: + hw->bus.payload = fm10k_bus_payload_256; + break; + case FM10K_PCIE_DEV_CTRL_PAYLOAD_512: + hw->bus.payload = fm10k_bus_payload_512; + break; + default: + hw->bus.payload = fm10k_bus_payload_unknown; + break; + } + + return 0; +} + +static u16 fm10k_get_pcie_msix_count_generic(struct fm10k_hw *hw) +{ + u16 msix_count; + + /* read in value from MSI-X capability register */ + msix_count = fm10k_read_pci_cfg_word(hw, FM10K_PCI_MSIX_MSG_CTRL); + msix_count &= FM10K_PCI_MSIX_MSG_CTRL_TBL_SZ_MASK; + + /* MSI-X count is zero-based in HW */ + msix_count++; + + if (msix_count > FM10K_MAX_MSIX_VECTORS) + msix_count = FM10K_MAX_MSIX_VECTORS; + + return msix_count; +} + +/** + * fm10k_get_invariants_generic - Inits constant values + * @hw: pointer to the hardware structure + * + * Initialize the common invariants for the device. + **/ +s32 fm10k_get_invariants_generic(struct fm10k_hw *hw) +{ + struct fm10k_mac_info *mac = &hw->mac; + + /* initialize GLORT state to avoid any false hits */ + mac->dglort_map = FM10K_DGLORTMAP_NONE; + + /* record maximum number of MSI-X vectors */ + mac->max_msix_vectors = fm10k_get_pcie_msix_count_generic(hw); + + return 0; +} + +/** + * fm10k_start_hw_generic - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * This function sets the Tx ready flag to indicate that the Tx path has + * been initialized. + **/ +s32 fm10k_start_hw_generic(struct fm10k_hw *hw) +{ + /* set flag indicating we are beginning Tx */ + hw->mac.tx_ready = true; + + return 0; +} + +/** + * fm10k_disable_queues_generic - Stop Tx/Rx queues + * @hw: pointer to hardware structure + * @q_cnt: number of queues to be disabled + * + **/ +s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt) +{ + u32 reg; + u16 i, time; + + /* clear tx_ready to prevent any false hits for reset */ + hw->mac.tx_ready = false; + + /* clear the enable bit for all rings */ + for (i = 0; i < q_cnt; i++) { + reg = fm10k_read_reg(hw, FM10K_TXDCTL(i)); + fm10k_write_reg(hw, FM10K_TXDCTL(i), + reg & ~FM10K_TXDCTL_ENABLE); + reg = fm10k_read_reg(hw, FM10K_RXQCTL(i)); + fm10k_write_reg(hw, FM10K_RXQCTL(i), + reg & ~FM10K_RXQCTL_ENABLE); + } + + fm10k_write_flush(hw); + udelay(1); + + /* loop through all queues to verify that they are all disabled */ + for (i = 0, time = FM10K_QUEUE_DISABLE_TIMEOUT; time;) { + /* if we are at end of rings all rings are disabled */ + if (i == q_cnt) + return 0; + + /* if queue enables cleared, then move to next ring pair */ + reg = fm10k_read_reg(hw, FM10K_TXDCTL(i)); + if (!~reg || !(reg & FM10K_TXDCTL_ENABLE)) { + reg = fm10k_read_reg(hw, FM10K_RXQCTL(i)); + if (!~reg || !(reg & FM10K_RXQCTL_ENABLE)) { + i++; + continue; + } + } + + /* decrement time and wait 1 usec */ + time--; + if (time) + udelay(1); + } + + return FM10K_ERR_REQUESTS_PENDING; +} + +/** + * fm10k_stop_hw_generic - Stop Tx/Rx units + * @hw: pointer to hardware structure + * + **/ +s32 fm10k_stop_hw_generic(struct fm10k_hw *hw) +{ + return fm10k_disable_queues_generic(hw, hw->mac.max_queues); +} + +/** + * fm10k_read_hw_stats_32b - Reads value of 32-bit registers + * @hw: pointer to the hardware structure + * @addr: address of register containing a 32-bit value + * + * Function reads the content of the register and returns the delta + * between the base and the current value. + * **/ +u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr, + struct fm10k_hw_stat *stat) +{ + u32 delta = fm10k_read_reg(hw, addr) - stat->base_l; + + if (FM10K_REMOVED(hw->hw_addr)) + stat->base_h = 0; + + return delta; +} + +/** + * fm10k_read_hw_stats_48b - Reads value of 48-bit registers + * @hw: pointer to the hardware structure + * @addr: address of register containing the lower 32-bit value + * + * Function reads the content of 2 registers, combined to represent a 48-bit + * statistical value. Extra processing is required to handle overflowing. + * Finally, a delta value is returned representing the difference between the + * values stored in registers and values stored in the statistic counters. + * **/ +static u64 fm10k_read_hw_stats_48b(struct fm10k_hw *hw, u32 addr, + struct fm10k_hw_stat *stat) +{ + u32 count_l; + u32 count_h; + u32 count_tmp; + u64 delta; + + count_h = fm10k_read_reg(hw, addr + 1); + + /* Check for overflow */ + do { + count_tmp = count_h; + count_l = fm10k_read_reg(hw, addr); + count_h = fm10k_read_reg(hw, addr + 1); + } while (count_h != count_tmp); + + delta = ((u64)(count_h - stat->base_h) << 32) + count_l; + delta -= stat->base_l; + + return delta & FM10K_48_BIT_MASK; +} + +/** + * fm10k_update_hw_base_48b - Updates 48-bit statistic base value + * @stat: pointer to the hardware statistic structure + * @delta: value to be updated into the hardware statistic structure + * + * Function receives a value and determines if an update is required based on + * a delta calculation. Only the base value will be updated. + **/ +static void fm10k_update_hw_base_48b(struct fm10k_hw_stat *stat, u64 delta) +{ + if (!delta) + return; + + /* update lower 32 bits */ + delta += stat->base_l; + stat->base_l = (u32)delta; + + /* update upper 32 bits */ + stat->base_h += (u32)(delta >> 32); +} + +/** + * fm10k_update_hw_stats_tx_q - Updates TX queue statistics counters + * @hw: pointer to the hardware structure + * @q: pointer to the ring of hardware statistics queue + * @idx: index pointing to the start of the ring iteration + * + * Function updates the TX queue statistics counters that are related to the + * hardware. + **/ +static void fm10k_update_hw_stats_tx_q(struct fm10k_hw *hw, + struct fm10k_hw_stats_q *q, + u32 idx) +{ + u32 id_tx, id_tx_prev, tx_packets; + u64 tx_bytes = 0; + + /* Retrieve TX Owner Data */ + id_tx = fm10k_read_reg(hw, FM10K_TXQCTL(idx)); + + /* Process TX Ring */ + do { + tx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPTC(idx), + &q->tx_packets); + + if (tx_packets) + tx_bytes = fm10k_read_hw_stats_48b(hw, + FM10K_QBTC_L(idx), + &q->tx_bytes); + + /* Re-Check Owner Data */ + id_tx_prev = id_tx; + id_tx = fm10k_read_reg(hw, FM10K_TXQCTL(idx)); + } while ((id_tx ^ id_tx_prev) & FM10K_TXQCTL_ID_MASK); + + /* drop non-ID bits and set VALID ID bit */ + id_tx &= FM10K_TXQCTL_ID_MASK; + id_tx |= FM10K_STAT_VALID; + + /* update packet counts */ + if (q->tx_stats_idx == id_tx) { + q->tx_packets.count += tx_packets; + q->tx_bytes.count += tx_bytes; + } + + /* update bases and record ID */ + fm10k_update_hw_base_32b(&q->tx_packets, tx_packets); + fm10k_update_hw_base_48b(&q->tx_bytes, tx_bytes); + + q->tx_stats_idx = id_tx; +} + +/** + * fm10k_update_hw_stats_rx_q - Updates RX queue statistics counters + * @hw: pointer to the hardware structure + * @q: pointer to the ring of hardware statistics queue + * @idx: index pointing to the start of the ring iteration + * + * Function updates the RX queue statistics counters that are related to the + * hardware. + **/ +static void fm10k_update_hw_stats_rx_q(struct fm10k_hw *hw, + struct fm10k_hw_stats_q *q, + u32 idx) +{ + u32 id_rx, id_rx_prev, rx_packets, rx_drops; + u64 rx_bytes = 0; + + /* Retrieve RX Owner Data */ + id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx)); + + /* Process RX Ring */ + do { + rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx), + &q->rx_drops); + + rx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPRC(idx), + &q->rx_packets); + + if (rx_packets) + rx_bytes = fm10k_read_hw_stats_48b(hw, + FM10K_QBRC_L(idx), + &q->rx_bytes); + + /* Re-Check Owner Data */ + id_rx_prev = id_rx; + id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx)); + } while ((id_rx ^ id_rx_prev) & FM10K_RXQCTL_ID_MASK); + + /* drop non-ID bits and set VALID ID bit */ + id_rx &= FM10K_RXQCTL_ID_MASK; + id_rx |= FM10K_STAT_VALID; + + /* update packet counts */ + if (q->rx_stats_idx == id_rx) { + q->rx_drops.count += rx_drops; + q->rx_packets.count += rx_packets; + q->rx_bytes.count += rx_bytes; + } + + /* update bases and record ID */ + fm10k_update_hw_base_32b(&q->rx_drops, rx_drops); + fm10k_update_hw_base_32b(&q->rx_packets, rx_packets); + fm10k_update_hw_base_48b(&q->rx_bytes, rx_bytes); + + q->rx_stats_idx = id_rx; +} + +/** + * fm10k_update_hw_stats_q - Updates queue statistics counters + * @hw: pointer to the hardware structure + * @q: pointer to the ring of hardware statistics queue + * @idx: index pointing to the start of the ring iteration + * @count: number of queues to iterate over + * + * Function updates the queue statistics counters that are related to the + * hardware. + **/ +void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q, + u32 idx, u32 count) +{ + u32 i; + + for (i = 0; i < count; i++, idx++, q++) { + fm10k_update_hw_stats_tx_q(hw, q, idx); + fm10k_update_hw_stats_rx_q(hw, q, idx); + } +} + +/** + * fm10k_unbind_hw_stats_q - Unbind the queue counters from their queues + * @hw: pointer to the hardware structure + * @q: pointer to the ring of hardware statistics queue + * @idx: index pointing to the start of the ring iteration + * @count: number of queues to iterate over + * + * Function invalidates the index values for the queues so any updates that + * may have happened are ignored and the base for the queue stats is reset. + **/ +void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count) +{ + u32 i; + + for (i = 0; i < count; i++, idx++, q++) { + q->rx_stats_idx = 0; + q->tx_stats_idx = 0; + } +} + +/** + * fm10k_get_host_state_generic - Returns the state of the host + * @hw: pointer to hardware structure + * @host_ready: pointer to boolean value that will record host state + * + * This function will check the health of the mailbox and Tx queue 0 + * in order to determine if we should report that the link is up or not. + **/ +s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + struct fm10k_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u32 txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(0)); + + /* process upstream mailbox in case interrupts were disabled */ + mbx->ops.process(hw, mbx); + + /* If Tx is no longer enabled link should come down */ + if (!(~txdctl) || !(txdctl & FM10K_TXDCTL_ENABLE)) + mac->get_host_state = true; + + /* exit if not checking for link, or link cannot be changed */ + if (!mac->get_host_state || !(~txdctl)) + goto out; + + /* if we somehow dropped the Tx enable we should reset */ + if (hw->mac.tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) { + ret_val = FM10K_ERR_RESET_REQUESTED; + goto out; + } + + /* if Mailbox timed out we should request reset */ + if (!mbx->timeout) { + ret_val = FM10K_ERR_RESET_REQUESTED; + goto out; + } + + /* verify Mailbox is still valid */ + if (!mbx->ops.tx_ready(mbx, FM10K_VFMBX_MSG_MTU)) + goto out; + + /* interface cannot receive traffic without logical ports */ + if (mac->dglort_map == FM10K_DGLORTMAP_NONE) + goto out; + + /* if we passed all the tests above then the switch is ready and we no + * longer need to check for link + */ + mac->get_host_state = false; + +out: + *host_ready = !mac->get_host_state; + return ret_val; +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h new file mode 100644 index 000000000..45e4e5b1f --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h @@ -0,0 +1,65 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _FM10K_COMMON_H_ +#define _FM10K_COMMON_H_ + +#include "fm10k_type.h" + +#define FM10K_REMOVED(hw_addr) unlikely(!(hw_addr)) + +/* PCI configuration read */ +u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg); + +/* read operations, indexed using DWORDS */ +u32 fm10k_read_reg(struct fm10k_hw *hw, int reg); + +/* write operations, indexed using DWORDS */ +#define fm10k_write_reg(hw, reg, val) \ +do { \ + u32 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \ + if (!FM10K_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ +} while (0) + +/* Switch register write operations, index using DWORDS */ +#define fm10k_write_sw_reg(hw, reg, val) \ +do { \ + u32 __iomem *sw_addr = ACCESS_ONCE((hw)->sw_addr); \ + if (!FM10K_REMOVED(sw_addr)) \ + writel((val), &sw_addr[(reg)]); \ +} while (0) + +/* read ctrl register which has no clear on read fields as PCIe flush */ +#define fm10k_write_flush(hw) fm10k_read_reg((hw), FM10K_CTRL) +s32 fm10k_get_bus_info_generic(struct fm10k_hw *hw); +s32 fm10k_get_invariants_generic(struct fm10k_hw *hw); +s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt); +s32 fm10k_start_hw_generic(struct fm10k_hw *hw); +s32 fm10k_stop_hw_generic(struct fm10k_hw *hw); +u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr, + struct fm10k_hw_stat *stat); +#define fm10k_update_hw_base_32b(stat, delta) ((stat)->base_l += (delta)) +void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q, + u32 idx, u32 count); +#define fm10k_unbind_hw_stats_32b(s) ((s)->base_h = 0) +void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count); +s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready); +#endif /* _FM10K_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c new file mode 100644 index 000000000..5c7a4d766 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c @@ -0,0 +1,174 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "fm10k.h" + +#ifdef CONFIG_DCB +/** + * fm10k_dcbnl_ieee_getets - get the ETS configuration for the device + * @dev: netdev interface for the device + * @ets: ETS structure to push configuration to + **/ +static int fm10k_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) +{ + int i; + + /* we support 8 TCs in all modes */ + ets->ets_cap = IEEE_8021QAZ_MAX_TCS; + ets->cbs = 0; + + /* we only support strict priority and cannot do traffic shaping */ + memset(ets->tc_tx_bw, 0, sizeof(ets->tc_tx_bw)); + memset(ets->tc_rx_bw, 0, sizeof(ets->tc_rx_bw)); + memset(ets->tc_tsa, IEEE_8021QAZ_TSA_STRICT, sizeof(ets->tc_tsa)); + + /* populate the prio map based on the netdev */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + ets->prio_tc[i] = netdev_get_prio_tc_map(dev, i); + + return 0; +} + +/** + * fm10k_dcbnl_ieee_setets - set the ETS configuration for the device + * @dev: netdev interface for the device + * @ets: ETS structure to pull configuration from + **/ +static int fm10k_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) +{ + u8 num_tc = 0; + int i, err; + + /* verify type and determine num_tcs needed */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->tc_tx_bw[i] || ets->tc_rx_bw[i]) + return -EINVAL; + if (ets->tc_tsa[i] != IEEE_8021QAZ_TSA_STRICT) + return -EINVAL; + if (ets->prio_tc[i] > num_tc) + num_tc = ets->prio_tc[i]; + } + + /* if requested TC is greater than 0 then num_tcs is max + 1 */ + if (num_tc) + num_tc++; + + if (num_tc > IEEE_8021QAZ_MAX_TCS) + return -EINVAL; + + /* update TC hardware mapping if necessary */ + if (num_tc != netdev_get_num_tc(dev)) { + err = fm10k_setup_tc(dev, num_tc); + if (err) + return err; + } + + /* update priority mapping */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + netdev_set_prio_tc_map(dev, i, ets->prio_tc[i]); + + return 0; +} + +/** + * fm10k_dcbnl_ieee_getpfc - get the PFC configuration for the device + * @dev: netdev interface for the device + * @pfc: PFC structure to push configuration to + **/ +static int fm10k_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + + /* record flow control max count and state of TCs */ + pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS; + pfc->pfc_en = interface->pfc_en; + + return 0; +} + +/** + * fm10k_dcbnl_ieee_setpfc - set the PFC configuration for the device + * @dev: netdev interface for the device + * @pfc: PFC structure to pull configuration from + **/ +static int fm10k_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + + /* record PFC configuration to interface */ + interface->pfc_en = pfc->pfc_en; + + /* if we are running update the drop_en state for all queues */ + if (netif_running(dev)) + fm10k_update_rx_drop_en(interface); + + return 0; +} + +/** + * fm10k_dcbnl_ieee_getdcbx - get the DCBX configuration for the device + * @dev: netdev interface for the device + * + * Returns that we support only IEEE DCB for this interface + **/ +static u8 fm10k_dcbnl_getdcbx(struct net_device __always_unused *dev) +{ + return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; +} + +/** + * fm10k_dcbnl_ieee_setdcbx - get the DCBX configuration for the device + * @dev: netdev interface for the device + * @mode: new mode for this device + * + * Returns error on attempt to enable anything but IEEE DCB for this interface + **/ +static u8 fm10k_dcbnl_setdcbx(struct net_device __always_unused *dev, u8 mode) +{ + return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0; +} + +static const struct dcbnl_rtnl_ops fm10k_dcbnl_ops = { + .ieee_getets = fm10k_dcbnl_ieee_getets, + .ieee_setets = fm10k_dcbnl_ieee_setets, + .ieee_getpfc = fm10k_dcbnl_ieee_getpfc, + .ieee_setpfc = fm10k_dcbnl_ieee_setpfc, + + .getdcbx = fm10k_dcbnl_getdcbx, + .setdcbx = fm10k_dcbnl_setdcbx, +}; + +#endif /* CONFIG_DCB */ +/** + * fm10k_dcbnl_set_ops - Configures dcbnl ops pointer for netdev + * @dev: netdev interface for the device + * + * Enables PF for DCB by assigning DCBNL ops pointer. + **/ +void fm10k_dcbnl_set_ops(struct net_device *dev) +{ +#ifdef CONFIG_DCB + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_hw *hw = &interface->hw; + + if (hw->mac.type == fm10k_mac_pf) + dev->dcbnl_ops = &fm10k_dcbnl_ops; +#endif /* CONFIG_DCB */ +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c new file mode 100644 index 000000000..f45b4d71a --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c @@ -0,0 +1,261 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifdef CONFIG_DEBUG_FS + +#include "fm10k.h" + +#include +#include + +static struct dentry *dbg_root; + +/* Descriptor Seq Functions */ + +static void *fm10k_dbg_desc_seq_start(struct seq_file *s, loff_t *pos) +{ + struct fm10k_ring *ring = s->private; + + return (*pos < ring->count) ? pos : NULL; +} + +static void *fm10k_dbg_desc_seq_next(struct seq_file *s, + void __always_unused *v, loff_t *pos) +{ + struct fm10k_ring *ring = s->private; + + return (++(*pos) < ring->count) ? pos : NULL; +} + +static void fm10k_dbg_desc_seq_stop(struct seq_file __always_unused *s, + __always_unused void *v) +{ + /* Do nothing. */ +} + +static void fm10k_dbg_desc_break(struct seq_file *s, int i) +{ + while (i--) + seq_puts(s, "-"); + + seq_puts(s, "\n"); +} + +static int fm10k_dbg_tx_desc_seq_show(struct seq_file *s, void *v) +{ + struct fm10k_ring *ring = s->private; + int i = *(loff_t *)v; + static const char tx_desc_hdr[] = + "DES BUFFER_ADDRESS LENGTH VLAN MSS HDRLEN FLAGS\n"; + + /* Generate header */ + if (!i) { + seq_printf(s, tx_desc_hdr); + fm10k_dbg_desc_break(s, sizeof(tx_desc_hdr) - 1); + } + + /* Validate descriptor allocation */ + if (!ring->desc) { + seq_printf(s, "%03X Descriptor ring not allocated.\n", i); + } else { + struct fm10k_tx_desc *txd = FM10K_TX_DESC(ring, i); + + seq_printf(s, "%03X %#018llx %#06x %#06x %#06x %#06x %#04x\n", + i, txd->buffer_addr, txd->buflen, txd->vlan, + txd->mss, txd->hdrlen, txd->flags); + } + + return 0; +} + +static int fm10k_dbg_rx_desc_seq_show(struct seq_file *s, void *v) +{ + struct fm10k_ring *ring = s->private; + int i = *(loff_t *)v; + static const char rx_desc_hdr[] = + "DES DATA RSS STATERR LENGTH VLAN DGLORT SGLORT TIMESTAMP\n"; + + /* Generate header */ + if (!i) { + seq_printf(s, rx_desc_hdr); + fm10k_dbg_desc_break(s, sizeof(rx_desc_hdr) - 1); + } + + /* Validate descriptor allocation */ + if (!ring->desc) { + seq_printf(s, "%03X Descriptor ring not allocated.\n", i); + } else { + union fm10k_rx_desc *rxd = FM10K_RX_DESC(ring, i); + + seq_printf(s, + "%03X %#010x %#010x %#010x %#06x %#06x %#06x %#06x %#018llx\n", + i, rxd->d.data, rxd->d.rss, rxd->d.staterr, + rxd->w.length, rxd->w.vlan, rxd->w.dglort, + rxd->w.sglort, rxd->q.timestamp); + } + + return 0; +} + +static const struct seq_operations fm10k_dbg_tx_desc_seq_ops = { + .start = fm10k_dbg_desc_seq_start, + .next = fm10k_dbg_desc_seq_next, + .stop = fm10k_dbg_desc_seq_stop, + .show = fm10k_dbg_tx_desc_seq_show, +}; + +static const struct seq_operations fm10k_dbg_rx_desc_seq_ops = { + .start = fm10k_dbg_desc_seq_start, + .next = fm10k_dbg_desc_seq_next, + .stop = fm10k_dbg_desc_seq_stop, + .show = fm10k_dbg_rx_desc_seq_show, +}; + +static int fm10k_dbg_desc_open(struct inode *inode, struct file *filep) +{ + struct fm10k_ring *ring = inode->i_private; + struct fm10k_q_vector *q_vector = ring->q_vector; + const struct seq_operations *desc_seq_ops; + int err; + + if (ring < q_vector->rx.ring) + desc_seq_ops = &fm10k_dbg_tx_desc_seq_ops; + else + desc_seq_ops = &fm10k_dbg_rx_desc_seq_ops; + + err = seq_open(filep, desc_seq_ops); + if (err) + return err; + + ((struct seq_file *)filep->private_data)->private = ring; + + return 0; +} + +static const struct file_operations fm10k_dbg_desc_fops = { + .owner = THIS_MODULE, + .open = fm10k_dbg_desc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/** + * fm10k_dbg_q_vector_init - setup debugfs for the q_vectors + * @q_vector: q_vector to allocate directories for + * + * A folder is created for each q_vector found. In each q_vector + * folder, a debugfs file is created for each tx and rx ring + * allocated to the q_vector. + **/ +void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) +{ + struct fm10k_intfc *interface = q_vector->interface; + char name[16]; + int i; + + if (!interface->dbg_intfc) + return; + + /* Generate a folder for each q_vector */ + sprintf(name, "q_vector.%03d", q_vector->v_idx); + + q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc); + if (!q_vector->dbg_q_vector) + return; + + /* Generate a file for each rx ring in the q_vector */ + for (i = 0; i < q_vector->tx.count; i++) { + struct fm10k_ring *ring = &q_vector->tx.ring[i]; + + sprintf(name, "tx_ring.%03d", ring->queue_index); + + debugfs_create_file(name, 0600, + q_vector->dbg_q_vector, ring, + &fm10k_dbg_desc_fops); + } + + /* Generate a file for each rx ring in the q_vector */ + for (i = 0; i < q_vector->rx.count; i++) { + struct fm10k_ring *ring = &q_vector->rx.ring[i]; + + sprintf(name, "rx_ring.%03d", ring->queue_index); + + debugfs_create_file(name, 0600, + q_vector->dbg_q_vector, ring, + &fm10k_dbg_desc_fops); + } +} + +/** + * fm10k_dbg_free_q_vector_dir - setup debugfs for the q_vectors + * @q_vector: q_vector to allocate directories for + **/ +void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector) +{ + struct fm10k_intfc *interface = q_vector->interface; + + if (interface->dbg_intfc) + debugfs_remove_recursive(q_vector->dbg_q_vector); + q_vector->dbg_q_vector = NULL; +} + +/** + * fm10k_dbg_intfc_init - setup the debugfs directory for the intferface + * @interface: the interface that is starting up + **/ + +void fm10k_dbg_intfc_init(struct fm10k_intfc *interface) +{ + const char *name = pci_name(interface->pdev); + + if (dbg_root) + interface->dbg_intfc = debugfs_create_dir(name, dbg_root); +} + +/** + * fm10k_dbg_intfc_exit - clean out the interface's debugfs entries + * @interface: the interface that is stopping + **/ +void fm10k_dbg_intfc_exit(struct fm10k_intfc *interface) +{ + if (dbg_root) + debugfs_remove_recursive(interface->dbg_intfc); + interface->dbg_intfc = NULL; +} + +/** + * fm10k_dbg_init - start up debugfs for the driver + **/ +void fm10k_dbg_init(void) +{ + dbg_root = debugfs_create_dir(fm10k_driver_name, NULL); +} + +/** + * fm10k_dbg_exit - clean out the driver's debugfs entries + **/ +void fm10k_dbg_exit(void) +{ + debugfs_remove_recursive(dbg_root); + dbg_root = NULL; +} + +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c new file mode 100644 index 000000000..4b9d9f88a --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -0,0 +1,1108 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include + +#include "fm10k.h" + +struct fm10k_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define FM10K_NETDEV_STAT(_net_stat) { \ + .stat_string = #_net_stat, \ + .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ +} + +static const struct fm10k_stats fm10k_gstrings_net_stats[] = { + FM10K_NETDEV_STAT(tx_packets), + FM10K_NETDEV_STAT(tx_bytes), + FM10K_NETDEV_STAT(tx_errors), + FM10K_NETDEV_STAT(rx_packets), + FM10K_NETDEV_STAT(rx_bytes), + FM10K_NETDEV_STAT(rx_errors), + FM10K_NETDEV_STAT(rx_dropped), + + /* detailed Rx errors */ + FM10K_NETDEV_STAT(rx_length_errors), + FM10K_NETDEV_STAT(rx_crc_errors), + FM10K_NETDEV_STAT(rx_fifo_errors), +}; + +#define FM10K_NETDEV_STATS_LEN ARRAY_SIZE(fm10k_gstrings_net_stats) + +#define FM10K_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(struct fm10k_intfc, _stat), \ + .stat_offset = offsetof(struct fm10k_intfc, _stat) \ +} + +static const struct fm10k_stats fm10k_gstrings_global_stats[] = { + FM10K_STAT("tx_restart_queue", restart_queue), + FM10K_STAT("tx_busy", tx_busy), + FM10K_STAT("tx_csum_errors", tx_csum_errors), + FM10K_STAT("rx_alloc_failed", alloc_failed), + FM10K_STAT("rx_csum_errors", rx_csum_errors), + + FM10K_STAT("tx_packets_nic", tx_packets_nic), + FM10K_STAT("tx_bytes_nic", tx_bytes_nic), + FM10K_STAT("rx_packets_nic", rx_packets_nic), + FM10K_STAT("rx_bytes_nic", rx_bytes_nic), + FM10K_STAT("rx_drops_nic", rx_drops_nic), + FM10K_STAT("rx_overrun_pf", rx_overrun_pf), + FM10K_STAT("rx_overrun_vf", rx_overrun_vf), + + FM10K_STAT("swapi_status", hw.swapi.status), + FM10K_STAT("mac_rules_used", hw.swapi.mac.used), + FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail), + + FM10K_STAT("mbx_tx_busy", hw.mbx.tx_busy), + FM10K_STAT("mbx_tx_oversized", hw.mbx.tx_dropped), + FM10K_STAT("mbx_tx_messages", hw.mbx.tx_messages), + FM10K_STAT("mbx_tx_dwords", hw.mbx.tx_dwords), + FM10K_STAT("mbx_rx_messages", hw.mbx.rx_messages), + FM10K_STAT("mbx_rx_dwords", hw.mbx.rx_dwords), + FM10K_STAT("mbx_rx_parse_err", hw.mbx.rx_parse_err), + + FM10K_STAT("tx_hang_count", tx_timeout_count), + + FM10K_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), +}; + +static const struct fm10k_stats fm10k_gstrings_pf_stats[] = { + FM10K_STAT("timeout", stats.timeout.count), + FM10K_STAT("ur", stats.ur.count), + FM10K_STAT("ca", stats.ca.count), + FM10K_STAT("um", stats.um.count), + FM10K_STAT("xec", stats.xec.count), + FM10K_STAT("vlan_drop", stats.vlan_drop.count), + FM10K_STAT("loopback_drop", stats.loopback_drop.count), + FM10K_STAT("nodesc_drop", stats.nodesc_drop.count), +}; + +#define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats) +#define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats) + +#define FM10K_QUEUE_STATS_LEN(_n) \ + ( (_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64))) + +#define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \ + FM10K_NETDEV_STATS_LEN) + +static const char fm10k_gstrings_test[][ETH_GSTRING_LEN] = { + "Mailbox test (on/offline)" +}; + +#define FM10K_TEST_LEN (sizeof(fm10k_gstrings_test) / ETH_GSTRING_LEN) + +enum fm10k_self_test_types { + FM10K_TEST_MBX, + FM10K_TEST_MAX = FM10K_TEST_LEN +}; + +static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + char *p = (char *)data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *fm10k_gstrings_test, + FM10K_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) { + memcpy(p, fm10k_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) { + memcpy(p, fm10k_gstrings_global_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + if (interface->hw.mac.type != fm10k_mac_vf) + for (i = 0; i < FM10K_PF_STATS_LEN; i++) { + memcpy(p, fm10k_gstrings_pf_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < interface->hw.mac.max_queues; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int fm10k_get_sset_count(struct net_device *dev, int sset) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_hw *hw = &interface->hw; + int stats_len = FM10K_STATIC_STATS_LEN; + + switch (sset) { + case ETH_SS_TEST: + return FM10K_TEST_LEN; + case ETH_SS_STATS: + stats_len += FM10K_QUEUE_STATS_LEN(hw->mac.max_queues); + + if (hw->mac.type != fm10k_mac_vf) + stats_len += FM10K_PF_STATS_LEN; + + return stats_len; + default: + return -EOPNOTSUPP; + } +} + +static void fm10k_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + const int stat_count = sizeof(struct fm10k_queue_stats) / sizeof(u64); + struct fm10k_intfc *interface = netdev_priv(netdev); + struct net_device_stats *net_stats = &netdev->stats; + char *p; + int i, j; + + fm10k_update_stats(interface); + + for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) { + p = (char *)net_stats + fm10k_gstrings_net_stats[i].stat_offset; + *(data++) = (fm10k_gstrings_net_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) { + p = (char *)interface + + fm10k_gstrings_global_stats[i].stat_offset; + *(data++) = (fm10k_gstrings_global_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + if (interface->hw.mac.type != fm10k_mac_vf) + for (i = 0; i < FM10K_PF_STATS_LEN; i++) { + p = (char *)interface + + fm10k_gstrings_pf_stats[i].stat_offset; + *(data++) = (fm10k_gstrings_pf_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (i = 0; i < interface->hw.mac.max_queues; i++) { + struct fm10k_ring *ring; + u64 *queue_stat; + + ring = interface->tx_ring[i]; + if (ring) + queue_stat = (u64 *)&ring->stats; + for (j = 0; j < stat_count; j++) + *(data++) = ring ? queue_stat[j] : 0; + + ring = interface->rx_ring[i]; + if (ring) + queue_stat = (u64 *)&ring->stats; + for (j = 0; j < stat_count; j++) + *(data++) = ring ? queue_stat[j] : 0; + } +} + +/* If function below adds more registers this define needs to be updated */ +#define FM10K_REGS_LEN_Q 29 + +static void fm10k_get_reg_q(struct fm10k_hw *hw, u32 *buff, int i) +{ + int idx = 0; + + buff[idx++] = fm10k_read_reg(hw, FM10K_RDBAL(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_RDBAH(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_RDLEN(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TPH_RXCTRL(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_RDH(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_RDT(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_RXQCTL(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_RXDCTL(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_RXINT(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_SRRCTL(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_QPRC(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_QPRDC(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_QBRC_L(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_QBRC_H(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TDBAL(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TDBAH(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TDLEN(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TPH_TXCTRL(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TDH(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TDT(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TXDCTL(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TXQCTL(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TXINT(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_QPTC(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_QBTC_L(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_QBTC_H(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TQDLOC(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_TX_SGLORT(i)); + buff[idx++] = fm10k_read_reg(hw, FM10K_PFVTCTL(i)); + + BUG_ON(idx != FM10K_REGS_LEN_Q); +} + +/* If function above adds more registers this define needs to be updated */ +#define FM10K_REGS_LEN_VSI 43 + +static void fm10k_get_reg_vsi(struct fm10k_hw *hw, u32 *buff, int i) +{ + int idx = 0, j; + + buff[idx++] = fm10k_read_reg(hw, FM10K_MRQC(i)); + for (j = 0; j < 10; j++) + buff[idx++] = fm10k_read_reg(hw, FM10K_RSSRK(i, j)); + for (j = 0; j < 32; j++) + buff[idx++] = fm10k_read_reg(hw, FM10K_RETA(i, j)); + + BUG_ON(idx != FM10K_REGS_LEN_VSI); +} + +static void fm10k_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_hw *hw = &interface->hw; + u32 *buff = p; + u16 i; + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + switch (hw->mac.type) { + case fm10k_mac_pf: + /* General PF Registers */ + *(buff++) = fm10k_read_reg(hw, FM10K_CTRL); + *(buff++) = fm10k_read_reg(hw, FM10K_CTRL_EXT); + *(buff++) = fm10k_read_reg(hw, FM10K_GCR); + *(buff++) = fm10k_read_reg(hw, FM10K_GCR_EXT); + + for (i = 0; i < 8; i++) { + *(buff++) = fm10k_read_reg(hw, FM10K_DGLORTMAP(i)); + *(buff++) = fm10k_read_reg(hw, FM10K_DGLORTDEC(i)); + } + + for (i = 0; i < 65; i++) { + fm10k_get_reg_vsi(hw, buff, i); + buff += FM10K_REGS_LEN_VSI; + } + + *(buff++) = fm10k_read_reg(hw, FM10K_DMA_CTRL); + *(buff++) = fm10k_read_reg(hw, FM10K_DMA_CTRL2); + + for (i = 0; i < FM10K_MAX_QUEUES_PF; i++) { + fm10k_get_reg_q(hw, buff, i); + buff += FM10K_REGS_LEN_Q; + } + + *(buff++) = fm10k_read_reg(hw, FM10K_TPH_CTRL); + + for (i = 0; i < 8; i++) + *(buff++) = fm10k_read_reg(hw, FM10K_INT_MAP(i)); + + /* Interrupt Throttling Registers */ + for (i = 0; i < 130; i++) + *(buff++) = fm10k_read_reg(hw, FM10K_ITR(i)); + + break; + case fm10k_mac_vf: + /* General VF registers */ + *(buff++) = fm10k_read_reg(hw, FM10K_VFCTRL); + *(buff++) = fm10k_read_reg(hw, FM10K_VFINT_MAP); + *(buff++) = fm10k_read_reg(hw, FM10K_VFSYSTIME); + + /* Interrupt Throttling Registers */ + for (i = 0; i < 8; i++) + *(buff++) = fm10k_read_reg(hw, FM10K_VFITR(i)); + + fm10k_get_reg_vsi(hw, buff, 0); + buff += FM10K_REGS_LEN_VSI; + + for (i = 0; i < FM10K_MAX_QUEUES_POOL; i++) { + if (i < hw->mac.max_queues) + fm10k_get_reg_q(hw, buff, i); + else + memset(buff, 0, sizeof(u32) * FM10K_REGS_LEN_Q); + buff += FM10K_REGS_LEN_Q; + } + + break; + default: + return; + } +} + +/* If function above adds more registers these define need to be updated */ +#define FM10K_REGS_LEN_PF \ +(162 + (65 * FM10K_REGS_LEN_VSI) + (FM10K_MAX_QUEUES_PF * FM10K_REGS_LEN_Q)) +#define FM10K_REGS_LEN_VF \ +(11 + FM10K_REGS_LEN_VSI + (FM10K_MAX_QUEUES_POOL * FM10K_REGS_LEN_Q)) + +static int fm10k_get_regs_len(struct net_device *netdev) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_hw *hw = &interface->hw; + + switch (hw->mac.type) { + case fm10k_mac_pf: + return FM10K_REGS_LEN_PF * sizeof(u32); + case fm10k_mac_vf: + return FM10K_REGS_LEN_VF * sizeof(u32); + default: + return 0; + } +} + +static void fm10k_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + + strncpy(info->driver, fm10k_driver_name, + sizeof(info->driver) - 1); + strncpy(info->version, fm10k_driver_version, + sizeof(info->version) - 1); + strncpy(info->bus_info, pci_name(interface->pdev), + sizeof(info->bus_info) - 1); + + info->n_stats = fm10k_get_sset_count(dev, ETH_SS_STATS); + + info->regdump_len = fm10k_get_regs_len(dev); +} + +static void fm10k_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + + /* record fixed values for autoneg and tx pause */ + pause->autoneg = 0; + pause->tx_pause = 1; + + pause->rx_pause = interface->rx_pause ? 1 : 0; +} + +static int fm10k_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_hw *hw = &interface->hw; + + if (pause->autoneg || !pause->tx_pause) + return -EINVAL; + + /* we can only support pause on the PF to avoid head-of-line blocking */ + if (hw->mac.type == fm10k_mac_pf) + interface->rx_pause = pause->rx_pause ? ~0 : 0; + else if (pause->rx_pause) + return -EINVAL; + + if (netif_running(dev)) + fm10k_update_rx_drop_en(interface); + + return 0; +} + +static u32 fm10k_get_msglevel(struct net_device *netdev) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + + return interface->msg_enable; +} + +static void fm10k_set_msglevel(struct net_device *netdev, u32 data) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + + interface->msg_enable = data; +} + +static void fm10k_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + + ring->rx_max_pending = FM10K_MAX_RXD; + ring->tx_max_pending = FM10K_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = interface->rx_ring_count; + ring->tx_pending = interface->tx_ring_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int fm10k_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_ring *temp_ring; + int i, err = 0; + u32 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_tx_count = clamp_t(u32, ring->tx_pending, + FM10K_MIN_TXD, FM10K_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, FM10K_REQ_TX_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, + FM10K_MIN_RXD, FM10K_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, FM10K_REQ_RX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == interface->tx_ring_count) && + (new_rx_count == interface->rx_ring_count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__FM10K_RESETTING, &interface->state)) + usleep_range(1000, 2000); + + if (!netif_running(interface->netdev)) { + for (i = 0; i < interface->num_tx_queues; i++) + interface->tx_ring[i]->count = new_tx_count; + for (i = 0; i < interface->num_rx_queues; i++) + interface->rx_ring[i]->count = new_rx_count; + interface->tx_ring_count = new_tx_count; + interface->rx_ring_count = new_rx_count; + goto clear_reset; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, interface->num_tx_queues, interface->num_rx_queues); + temp_ring = vmalloc(i * sizeof(struct fm10k_ring)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + fm10k_down(interface); + + /* Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ + if (new_tx_count != interface->tx_ring_count) { + for (i = 0; i < interface->num_tx_queues; i++) { + memcpy(&temp_ring[i], interface->tx_ring[i], + sizeof(struct fm10k_ring)); + + temp_ring[i].count = new_tx_count; + err = fm10k_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + fm10k_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < interface->num_tx_queues; i++) { + fm10k_free_tx_resources(interface->tx_ring[i]); + + memcpy(interface->tx_ring[i], &temp_ring[i], + sizeof(struct fm10k_ring)); + } + + interface->tx_ring_count = new_tx_count; + } + + /* Repeat the process for the Rx rings if needed */ + if (new_rx_count != interface->rx_ring_count) { + for (i = 0; i < interface->num_rx_queues; i++) { + memcpy(&temp_ring[i], interface->rx_ring[i], + sizeof(struct fm10k_ring)); + + temp_ring[i].count = new_rx_count; + err = fm10k_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + fm10k_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < interface->num_rx_queues; i++) { + fm10k_free_rx_resources(interface->rx_ring[i]); + + memcpy(interface->rx_ring[i], &temp_ring[i], + sizeof(struct fm10k_ring)); + } + + interface->rx_ring_count = new_rx_count; + } + +err_setup: + fm10k_up(interface); + vfree(temp_ring); +clear_reset: + clear_bit(__FM10K_RESETTING, &interface->state); + return err; +} + +static int fm10k_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + + ec->use_adaptive_tx_coalesce = + !!(interface->tx_itr & FM10K_ITR_ADAPTIVE); + ec->tx_coalesce_usecs = interface->tx_itr & ~FM10K_ITR_ADAPTIVE; + + ec->use_adaptive_rx_coalesce = + !!(interface->rx_itr & FM10K_ITR_ADAPTIVE); + ec->rx_coalesce_usecs = interface->rx_itr & ~FM10K_ITR_ADAPTIVE; + + return 0; +} + +static int fm10k_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_q_vector *qv; + u16 tx_itr, rx_itr; + int i; + + /* verify limits */ + if ((ec->rx_coalesce_usecs > FM10K_ITR_MAX) || + (ec->tx_coalesce_usecs > FM10K_ITR_MAX)) + return -EINVAL; + + /* record settings */ + tx_itr = ec->tx_coalesce_usecs; + rx_itr = ec->rx_coalesce_usecs; + + /* set initial values for adaptive ITR */ + if (ec->use_adaptive_tx_coalesce) + tx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_10K; + + if (ec->use_adaptive_rx_coalesce) + rx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_20K; + + /* update interface */ + interface->tx_itr = tx_itr; + interface->rx_itr = rx_itr; + + /* update q_vectors */ + for (i = 0; i < interface->num_q_vectors; i++) { + qv = interface->q_vector[i]; + qv->tx.itr = tx_itr; + qv->rx.itr = rx_itr; + } + + return 0; +} + +static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on fm10k */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case UDP_V4_FLOW: + if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V4_FLOW: + case AH_V6_FLOW: + case ESP_V4_FLOW: + case ESP_V6_FLOW: + case IPV4_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case UDP_V6_FLOW: + if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 __always_unused *rule_locs) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = interface->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXFH: + ret = fm10k_get_rss_hash_opts(interface, cmd); + break; + default: + break; + } + + return ret; +} + +#define UDP_RSS_FLAGS (FM10K_FLAG_RSS_FIELD_IPV4_UDP | \ + FM10K_FLAG_RSS_FIELD_IPV6_UDP) +static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface, + struct ethtool_rxnfc *nfc) +{ + u32 flags = interface->flags; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~FM10K_FLAG_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= FM10K_FLAG_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~FM10K_FLAG_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= FM10K_FLAG_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != interface->flags) { + struct fm10k_hw *hw = &interface->hw; + u32 mrqc; + + if ((flags & UDP_RSS_FLAGS) && + !(interface->flags & UDP_RSS_FLAGS)) + netif_warn(interface, drv, interface->netdev, + "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + interface->flags = flags; + + /* Perform hash on these packet types */ + mrqc = FM10K_MRQC_IPV4 | + FM10K_MRQC_TCP_IPV4 | + FM10K_MRQC_IPV6 | + FM10K_MRQC_TCP_IPV6; + + if (flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= FM10K_MRQC_UDP_IPV4; + if (flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= FM10K_MRQC_UDP_IPV6; + + fm10k_write_reg(hw, FM10K_MRQC(0), mrqc); + } + + return 0; +} + +static int fm10k_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = fm10k_set_rss_hash_opt(interface, cmd); + break; + default: + break; + } + + return ret; +} + +static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data) +{ + struct fm10k_hw *hw = &interface->hw; + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 attr_flag, test_msg[6]; + unsigned long timeout; + int err; + + /* For now this is a VF only feature */ + if (hw->mac.type != fm10k_mac_vf) + return 0; + + /* loop through both nested and unnested attribute types */ + for (attr_flag = (1 << FM10K_TEST_MSG_UNSET); + attr_flag < (1 << (2 * FM10K_TEST_MSG_NESTED)); + attr_flag += attr_flag) { + /* generate message to be tested */ + fm10k_tlv_msg_test_create(test_msg, attr_flag); + + fm10k_mbx_lock(interface); + mbx->test_result = FM10K_NOT_IMPLEMENTED; + err = mbx->ops.enqueue_tx(hw, mbx, test_msg); + fm10k_mbx_unlock(interface); + + /* wait up to 1 second for response */ + timeout = jiffies + HZ; + do { + if (err < 0) + goto err_out; + + usleep_range(500, 1000); + + fm10k_mbx_lock(interface); + mbx->ops.process(hw, mbx); + fm10k_mbx_unlock(interface); + + err = mbx->test_result; + if (!err) + break; + } while (time_is_after_jiffies(timeout)); + + /* reporting errors */ + if (err) + goto err_out; + } + +err_out: + *data = err < 0 ? (attr_flag) : (err > 0); + return err; +} + +static void fm10k_self_test(struct net_device *dev, + struct ethtool_test *eth_test, u64 *data) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_hw *hw = &interface->hw; + + memset(data, 0, sizeof(*data) * FM10K_TEST_LEN); + + if (FM10K_REMOVED(hw)) { + netif_err(interface, drv, dev, + "Interface removed - test blocked\n"); + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + + if (fm10k_mbx_test(interface, &data[FM10K_TEST_MBX])) + eth_test->flags |= ETH_TEST_FL_FAILED; +} + +static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev) +{ + return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG; +} + +static int fm10k_get_reta(struct net_device *netdev, u32 *indir) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + int i; + + if (!indir) + return 0; + + for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) { + u32 reta = interface->reta[i]; + + indir[0] = (reta << 24) >> 24; + indir[1] = (reta << 16) >> 24; + indir[2] = (reta << 8) >> 24; + indir[3] = (reta) >> 24; + } + + return 0; +} + +static int fm10k_set_reta(struct net_device *netdev, const u32 *indir) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_hw *hw = &interface->hw; + int i; + u16 rss_i; + + if (!indir) + return 0; + + /* Verify user input. */ + rss_i = interface->ring_feature[RING_F_RSS].indices; + for (i = fm10k_get_reta_size(netdev); i--;) { + if (indir[i] < rss_i) + continue; + return -EINVAL; + } + + /* record entries to reta table */ + for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) { + u32 reta = indir[0] | + (indir[1] << 8) | + (indir[2] << 16) | + (indir[3] << 24); + + if (interface->reta[i] == reta) + continue; + + interface->reta[i] = reta; + fm10k_write_reg(hw, FM10K_RETA(0, i), reta); + } + + return 0; +} + +static u32 fm10k_get_rssrk_size(struct net_device __always_unused *netdev) +{ + return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG; +} + +static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + int i, err; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + err = fm10k_get_reta(netdev, indir); + if (err || !key) + return err; + + for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4) + *(__le32 *)key = cpu_to_le32(interface->rssrk[i]); + + return 0; +} + +static int fm10k_set_rssh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_hw *hw = &interface->hw; + int i, err; + + /* We do not allow change in unsupported parameters */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + err = fm10k_set_reta(netdev, indir); + if (err || !key) + return err; + + for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4) { + u32 rssrk = le32_to_cpu(*(__le32 *)key); + + if (interface->rssrk[i] == rssrk) + continue; + + interface->rssrk[i] = rssrk; + fm10k_write_reg(hw, FM10K_RSSRK(0, i), rssrk); + } + + return 0; +} + +static unsigned int fm10k_max_channels(struct net_device *dev) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + unsigned int max_combined = interface->hw.mac.max_queues; + u8 tcs = netdev_get_num_tc(dev); + + /* For QoS report channels per traffic class */ + if (tcs > 1) + max_combined = 1 << (fls(max_combined / tcs) - 1); + + return max_combined; +} + +static void fm10k_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_hw *hw = &interface->hw; + + /* report maximum channels */ + ch->max_combined = fm10k_max_channels(dev); + + /* report info for other vector */ + ch->max_other = NON_Q_VECTORS(hw); + ch->other_count = ch->max_other; + + /* record RSS queues */ + ch->combined_count = interface->ring_feature[RING_F_RSS].indices; +} + +static int fm10k_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + unsigned int count = ch->combined_count; + struct fm10k_hw *hw = &interface->hw; + + /* verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* verify other_count has not changed */ + if (ch->other_count != NON_Q_VECTORS(hw)) + return -EINVAL; + + /* verify the number of channels does not exceed hardware limits */ + if (count > fm10k_max_channels(dev)) + return -EINVAL; + + interface->ring_feature[RING_F_RSS].limit = count; + + /* use setup TC to update any traffic class queue mapping */ + return fm10k_setup_tc(dev, netdev_get_num_tc(dev)); +} + +static int fm10k_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (interface->ptp_clock) + info->phc_index = ptp_clock_index(interface->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + + return 0; +} + +static const struct ethtool_ops fm10k_ethtool_ops = { + .get_strings = fm10k_get_strings, + .get_sset_count = fm10k_get_sset_count, + .get_ethtool_stats = fm10k_get_ethtool_stats, + .get_drvinfo = fm10k_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_pauseparam = fm10k_get_pauseparam, + .set_pauseparam = fm10k_set_pauseparam, + .get_msglevel = fm10k_get_msglevel, + .set_msglevel = fm10k_set_msglevel, + .get_ringparam = fm10k_get_ringparam, + .set_ringparam = fm10k_set_ringparam, + .get_coalesce = fm10k_get_coalesce, + .set_coalesce = fm10k_set_coalesce, + .get_rxnfc = fm10k_get_rxnfc, + .set_rxnfc = fm10k_set_rxnfc, + .get_regs = fm10k_get_regs, + .get_regs_len = fm10k_get_regs_len, + .self_test = fm10k_self_test, + .get_rxfh_indir_size = fm10k_get_reta_size, + .get_rxfh_key_size = fm10k_get_rssrk_size, + .get_rxfh = fm10k_get_rssh, + .set_rxfh = fm10k_set_rssh, + .get_channels = fm10k_get_channels, + .set_channels = fm10k_set_channels, + .get_ts_info = fm10k_get_ts_info, +}; + +void fm10k_set_ethtool_ops(struct net_device *dev) +{ + dev->ethtool_ops = &fm10k_ethtool_ops; +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c new file mode 100644 index 000000000..5b08e6284 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -0,0 +1,517 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "fm10k.h" +#include "fm10k_vf.h" +#include "fm10k_pf.h" + +static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; + struct fm10k_intfc *interface = hw->back; + struct pci_dev *pdev = interface->pdev; + + dev_err(&pdev->dev, "Unknown message ID %u on VF %d\n", + **results & FM10K_TLV_ID_MASK, vf_info->vf_idx); + + return fm10k_tlv_msg_error(hw, results, mbx); +} + +static const struct fm10k_msg_data iov_mbx_data[] = { + FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), + FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf), + FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf), + FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf), + FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error), +}; + +s32 fm10k_iov_event(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + struct fm10k_iov_data *iov_data; + s64 vflre; + int i; + + /* if there is no iov_data then there is no mailboxes to process */ + if (!ACCESS_ONCE(interface->iov_data)) + return 0; + + rcu_read_lock(); + + iov_data = interface->iov_data; + + /* check again now that we are in the RCU block */ + if (!iov_data) + goto read_unlock; + + if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR)) + goto read_unlock; + + /* read VFLRE to determine if any VFs have been reset */ + do { + vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(0)); + vflre <<= 32; + vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(1)); + vflre = (vflre << 32) | (vflre >> 32); + vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0)); + + i = iov_data->num_vfs; + + for (vflre <<= 64 - i; vflre && i--; vflre += vflre) { + struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; + + if (vflre >= 0) + continue; + + hw->iov.ops.reset_resources(hw, vf_info); + vf_info->mbx.ops.connect(hw, &vf_info->mbx); + } + } while (i != iov_data->num_vfs); + +read_unlock: + rcu_read_unlock(); + + return 0; +} + +s32 fm10k_iov_mbx(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + struct fm10k_iov_data *iov_data; + int i; + + /* if there is no iov_data then there is no mailboxes to process */ + if (!ACCESS_ONCE(interface->iov_data)) + return 0; + + rcu_read_lock(); + + iov_data = interface->iov_data; + + /* check again now that we are in the RCU block */ + if (!iov_data) + goto read_unlock; + + /* lock the mailbox for transmit and receive */ + fm10k_mbx_lock(interface); + + /* Most VF messages sent to the PF cause the PF to respond by + * requesting from the SM mailbox. This means that too many VF + * messages processed at once could cause a mailbox timeout on the PF. + * To prevent this, store a pointer to the next VF mbx to process. Use + * that as the start of the loop so that we don't starve whichever VF + * got ignored on the previous run. + */ +process_mbx: + for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) { + struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; + struct fm10k_mbx_info *mbx = &vf_info->mbx; + u16 glort = vf_info->glort; + + /* verify port mapping is valid, if not reset port */ + if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) + hw->iov.ops.reset_lport(hw, vf_info); + + /* reset VFs that have mailbox timed out */ + if (!mbx->timeout) { + hw->iov.ops.reset_resources(hw, vf_info); + mbx->ops.connect(hw, mbx); + } + + /* guarantee we have free space in the SM mailbox */ + if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) + break; + + /* cleanup mailbox and process received messages */ + mbx->ops.process(hw, mbx); + } + + /* if we stopped processing mailboxes early, update next_vf_mbx. + * Otherwise, reset next_vf_mbx, and restart loop so that we process + * the remaining mailboxes we skipped at the start. + */ + if (i >= 0) { + iov_data->next_vf_mbx = i + 1; + } else if (iov_data->next_vf_mbx) { + iov_data->next_vf_mbx = 0; + goto process_mbx; + } + + /* free the lock */ + fm10k_mbx_unlock(interface); + +read_unlock: + rcu_read_unlock(); + + return 0; +} + +void fm10k_iov_suspend(struct pci_dev *pdev) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + struct fm10k_iov_data *iov_data = interface->iov_data; + struct fm10k_hw *hw = &interface->hw; + int num_vfs, i; + + /* pull out num_vfs from iov_data */ + num_vfs = iov_data ? iov_data->num_vfs : 0; + + /* shut down queue mapping for VFs */ + fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_vf_rss), + FM10K_DGLORTMAP_NONE); + + /* Stop any active VFs and reset their resources */ + for (i = 0; i < num_vfs; i++) { + struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; + + hw->iov.ops.reset_resources(hw, vf_info); + hw->iov.ops.reset_lport(hw, vf_info); + } +} + +int fm10k_iov_resume(struct pci_dev *pdev) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + struct fm10k_iov_data *iov_data = interface->iov_data; + struct fm10k_dglort_cfg dglort = { 0 }; + struct fm10k_hw *hw = &interface->hw; + int num_vfs, i; + + /* pull out num_vfs from iov_data */ + num_vfs = iov_data ? iov_data->num_vfs : 0; + + /* return error if iov_data is not already populated */ + if (!iov_data) + return -ENOMEM; + + /* allocate hardware resources for the VFs */ + hw->iov.ops.assign_resources(hw, num_vfs, num_vfs); + + /* configure DGLORT mapping for RSS */ + dglort.glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE; + dglort.idx = fm10k_dglort_vf_rss; + dglort.inner_rss = 1; + dglort.rss_l = fls(fm10k_queues_per_pool(hw) - 1); + dglort.queue_b = fm10k_vf_queue_index(hw, 0); + dglort.vsi_l = fls(hw->iov.total_vfs - 1); + dglort.vsi_b = 1; + + hw->mac.ops.configure_dglort_map(hw, &dglort); + + /* assign resources to the device */ + for (i = 0; i < num_vfs; i++) { + struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; + + /* allocate all but the last GLORT to the VFs */ + if (i == ((~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT)) + break; + + /* assign GLORT to VF, and restrict it to multicast */ + hw->iov.ops.set_lport(hw, vf_info, i, + FM10K_VF_FLAG_MULTI_CAPABLE); + + /* assign our default vid to the VF following reset */ + vf_info->sw_vid = hw->mac.default_vid; + + /* mailbox is disconnected so we don't send a message */ + hw->iov.ops.assign_default_mac_vlan(hw, vf_info); + + /* now we are ready so we can connect */ + vf_info->mbx.ops.connect(hw, &vf_info->mbx); + } + + return 0; +} + +s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid) +{ + struct fm10k_iov_data *iov_data = interface->iov_data; + struct fm10k_hw *hw = &interface->hw; + struct fm10k_vf_info *vf_info; + u16 vf_idx = (glort - hw->mac.dglort_map) & FM10K_DGLORTMAP_NONE; + + /* no IOV support, not our message to process */ + if (!iov_data) + return FM10K_ERR_PARAM; + + /* glort outside our range, not our message to process */ + if (vf_idx >= iov_data->num_vfs) + return FM10K_ERR_PARAM; + + /* determine if an update has occurred and if so notify the VF */ + vf_info = &iov_data->vf_info[vf_idx]; + if (vf_info->sw_vid != pvid) { + vf_info->sw_vid = pvid; + hw->iov.ops.assign_default_mac_vlan(hw, vf_info); + } + + return 0; +} + +static void fm10k_iov_free_data(struct pci_dev *pdev) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + + if (!interface->iov_data) + return; + + /* reclaim hardware resources */ + fm10k_iov_suspend(pdev); + + /* drop iov_data from interface */ + kfree_rcu(interface->iov_data, rcu); + interface->iov_data = NULL; +} + +static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + struct fm10k_iov_data *iov_data = interface->iov_data; + struct fm10k_hw *hw = &interface->hw; + size_t size; + int i, err; + + /* return error if iov_data is already populated */ + if (iov_data) + return -EBUSY; + + /* The PF should always be able to assign resources */ + if (!hw->iov.ops.assign_resources) + return -ENODEV; + + /* nothing to do if no VFs are requested */ + if (!num_vfs) + return 0; + + /* allocate memory for VF storage */ + size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]); + iov_data = kzalloc(size, GFP_KERNEL); + if (!iov_data) + return -ENOMEM; + + /* record number of VFs */ + iov_data->num_vfs = num_vfs; + + /* loop through vf_info structures initializing each entry */ + for (i = 0; i < num_vfs; i++) { + struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; + + /* Record VF VSI value */ + vf_info->vsi = i + 1; + vf_info->vf_idx = i; + + /* initialize mailbox memory */ + err = fm10k_pfvf_mbx_init(hw, &vf_info->mbx, iov_mbx_data, i); + if (err) { + dev_err(&pdev->dev, + "Unable to initialize SR-IOV mailbox\n"); + kfree(iov_data); + return err; + } + } + + /* assign iov_data to interface */ + interface->iov_data = iov_data; + + /* allocate hardware resources for the VFs */ + fm10k_iov_resume(pdev); + + return 0; +} + +void fm10k_iov_disable(struct pci_dev *pdev) +{ + if (pci_num_vf(pdev) && pci_vfs_assigned(pdev)) + dev_err(&pdev->dev, + "Cannot disable SR-IOV while VFs are assigned\n"); + else + pci_disable_sriov(pdev); + + fm10k_iov_free_data(pdev); +} + +static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev) +{ + u32 err_sev; + int pos; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); + if (!pos) + return; + + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev); + err_sev &= ~PCI_ERR_UNC_COMP_ABORT; + pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev); +} + +int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs) +{ + int current_vfs = pci_num_vf(pdev); + int err = 0; + + if (current_vfs && pci_vfs_assigned(pdev)) { + dev_err(&pdev->dev, + "Cannot modify SR-IOV while VFs are assigned\n"); + num_vfs = current_vfs; + } else { + pci_disable_sriov(pdev); + fm10k_iov_free_data(pdev); + } + + /* allocate resources for the VFs */ + err = fm10k_iov_alloc_data(pdev, num_vfs); + if (err) + return err; + + /* allocate VFs if not already allocated */ + if (num_vfs && (num_vfs != current_vfs)) { + /* Disable completer abort error reporting as + * the VFs can trigger this any time they read a queue + * that they don't own. + */ + fm10k_disable_aer_comp_abort(pdev); + + err = pci_enable_sriov(pdev, num_vfs); + if (err) { + dev_err(&pdev->dev, + "Enable PCI SR-IOV failed: %d\n", err); + return err; + } + } + + return num_vfs; +} + +int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_iov_data *iov_data = interface->iov_data; + struct fm10k_hw *hw = &interface->hw; + struct fm10k_vf_info *vf_info; + + /* verify SR-IOV is active and that vf idx is valid */ + if (!iov_data || vf_idx >= iov_data->num_vfs) + return -EINVAL; + + /* verify MAC addr is valid */ + if (!is_zero_ether_addr(mac) && !is_valid_ether_addr(mac)) + return -EINVAL; + + /* record new MAC address */ + vf_info = &iov_data->vf_info[vf_idx]; + ether_addr_copy(vf_info->mac, mac); + + /* assigning the MAC will send a mailbox message so lock is needed */ + fm10k_mbx_lock(interface); + + /* assign MAC address to VF */ + hw->iov.ops.assign_default_mac_vlan(hw, vf_info); + + fm10k_mbx_unlock(interface); + + return 0; +} + +int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid, + u8 qos) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_iov_data *iov_data = interface->iov_data; + struct fm10k_hw *hw = &interface->hw; + struct fm10k_vf_info *vf_info; + + /* verify SR-IOV is active and that vf idx is valid */ + if (!iov_data || vf_idx >= iov_data->num_vfs) + return -EINVAL; + + /* QOS is unsupported and VLAN IDs accepted range 0-4094 */ + if (qos || (vid > (VLAN_VID_MASK - 1))) + return -EINVAL; + + vf_info = &iov_data->vf_info[vf_idx]; + + /* exit if there is nothing to do */ + if (vf_info->pf_vid == vid) + return 0; + + /* record default VLAN ID for VF */ + vf_info->pf_vid = vid; + + /* assigning the VLAN will send a mailbox message so lock is needed */ + fm10k_mbx_lock(interface); + + /* Clear the VLAN table for the VF */ + hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false); + + /* Update VF assignment and trigger reset */ + hw->iov.ops.assign_default_mac_vlan(hw, vf_info); + + fm10k_mbx_unlock(interface); + + return 0; +} + +int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, + int __always_unused unused, int rate) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_iov_data *iov_data = interface->iov_data; + struct fm10k_hw *hw = &interface->hw; + + /* verify SR-IOV is active and that vf idx is valid */ + if (!iov_data || vf_idx >= iov_data->num_vfs) + return -EINVAL; + + /* rate limit cannot be less than 10Mbs or greater than link speed */ + if (rate && ((rate < FM10K_VF_TC_MIN) || rate > FM10K_VF_TC_MAX)) + return -EINVAL; + + /* store values */ + iov_data->vf_info[vf_idx].rate = rate; + + /* update hardware configuration */ + hw->iov.ops.configure_tc(hw, vf_idx, rate); + + return 0; +} + +int fm10k_ndo_get_vf_config(struct net_device *netdev, + int vf_idx, struct ifla_vf_info *ivi) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_iov_data *iov_data = interface->iov_data; + struct fm10k_vf_info *vf_info; + + /* verify SR-IOV is active and that vf idx is valid */ + if (!iov_data || vf_idx >= iov_data->num_vfs) + return -EINVAL; + + vf_info = &iov_data->vf_info[vf_idx]; + + ivi->vf = vf_idx; + ivi->max_tx_rate = vf_info->rate; + ivi->min_tx_rate = 0; + ether_addr_copy(ivi->mac, vf_info->mac); + ivi->vlan = vf_info->pf_vid; + ivi->qos = 0; + + return 0; +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c new file mode 100644 index 000000000..c754b2027 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -0,0 +1,2002 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "fm10k.h" + +#define DRV_VERSION "0.15.2-k" +const char fm10k_driver_version[] = DRV_VERSION; +char fm10k_driver_name[] = "fm10k"; +static const char fm10k_driver_string[] = + "Intel(R) Ethernet Switch Host Interface Driver"; +static const char fm10k_copyright[] = + "Copyright (c) 2013 Intel Corporation."; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/* single workqueue for entire fm10k driver */ +struct workqueue_struct *fm10k_workqueue = NULL; + +/** + * fm10k_init_module - Driver Registration Routine + * + * fm10k_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init fm10k_init_module(void) +{ + pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version); + pr_info("%s\n", fm10k_copyright); + + /* create driver workqueue */ + if (!fm10k_workqueue) + fm10k_workqueue = create_workqueue("fm10k"); + + fm10k_dbg_init(); + + return fm10k_register_pci_driver(); +} +module_init(fm10k_init_module); + +/** + * fm10k_exit_module - Driver Exit Cleanup Routine + * + * fm10k_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit fm10k_exit_module(void) +{ + fm10k_unregister_pci_driver(); + + fm10k_dbg_exit(); + + /* destroy driver workqueue */ + flush_workqueue(fm10k_workqueue); + destroy_workqueue(fm10k_workqueue); + fm10k_workqueue = NULL; +} +module_exit(fm10k_exit_module); + +static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, + struct fm10k_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* Only page will be NULL if buffer was consumed */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_page(); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_page(page); + + rx_ring->rx_stats.alloc_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; + + return true; +} + +/** + * fm10k_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) +{ + union fm10k_rx_desc *rx_desc; + struct fm10k_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = FM10K_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer[i]; + i -= rx_ring->count; + + do { + if (!fm10k_alloc_mapped_page(rx_ring, bi)) + break; + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = FM10K_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer; + i -= rx_ring->count; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->d.staterr = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + /* notify hardware of new descriptors */ + writel(i, rx_ring->tail); + } +} + +/** + * fm10k_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the interface + **/ +static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, + struct fm10k_rx_buffer *old_buff) +{ + struct fm10k_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + *new_buff = *old_buff; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, + old_buff->page_offset, + FM10K_RX_BUFSZ, + DMA_FROM_DEVICE); +} + +static inline bool fm10k_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; +} + +static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, + struct page *page, + unsigned int __maybe_unused truesize) +{ + /* avoid re-using remote pages */ + if (unlikely(fm10k_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= FM10K_RX_BUFSZ; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ)) + return false; +#endif + + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + atomic_inc(&page->_count); + + return true; +} + +/** + * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the interface. + **/ +static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, + union fm10k_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->w.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = FM10K_RX_BUFSZ; +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); +#endif + + if ((size <= FM10K_RX_HDR_LEN) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* page is not reserved, we can reuse buffer as-is */ + if (likely(!fm10k_page_is_reserved(page))) + return true; + + /* this page cannot be reused so discard it */ + __free_page(page); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + return fm10k_can_reuse_rx_page(rx_buffer, page, truesize); +} + +static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, + union fm10k_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct fm10k_rx_buffer *rx_buffer; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, + FM10K_RX_HDR_LEN); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_failed++; + return NULL; + } + + /* we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + FM10K_RX_BUFSZ, + DMA_FROM_DEVICE); + + /* pull page into skb */ + if (fm10k_add_rx_frag(rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + fm10k_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; + + return skb; +} + +static inline void fm10k_rx_checksum(struct fm10k_ring *ring, + union fm10k_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Rx checksum disabled via ethtool */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* TCP/UDP checksum error bit is set */ + if (fm10k_test_staterr(rx_desc, + FM10K_RXD_STATUS_L4E | + FM10K_RXD_STATUS_L4E2 | + FM10K_RXD_STATUS_IPE | + FM10K_RXD_STATUS_IPE2)) { + ring->rx_stats.csum_err++; + return; + } + + /* It must be a TCP or UDP packet with a valid checksum */ + if (fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS2)) + skb->encapsulation = true; + else if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS)) + return; + + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +#define FM10K_RSS_L4_TYPES_MASK \ + ((1ul << FM10K_RSSTYPE_IPV4_TCP) | \ + (1ul << FM10K_RSSTYPE_IPV4_UDP) | \ + (1ul << FM10K_RSSTYPE_IPV6_TCP) | \ + (1ul << FM10K_RSSTYPE_IPV6_UDP)) + +static inline void fm10k_rx_hash(struct fm10k_ring *ring, + union fm10k_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->w.pkt_info) & FM10K_RXD_RSSTYPE_MASK; + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss), + (FM10K_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} + +static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring, + union fm10k_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct fm10k_intfc *interface = rx_ring->q_vector->interface; + + FM10K_CB(skb)->tstamp = rx_desc->q.timestamp; + + if (unlikely(interface->flags & FM10K_FLAG_RX_TS_ENABLED)) + fm10k_systime_to_hwtstamp(interface, skb_hwtstamps(skb), + le64_to_cpu(rx_desc->q.timestamp)); +} + +static void fm10k_type_trans(struct fm10k_ring *rx_ring, + union fm10k_rx_desc __maybe_unused *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel); + + /* check to see if DGLORT belongs to a MACVLAN */ + if (l2_accel) { + u16 idx = le16_to_cpu(FM10K_CB(skb)->fi.w.dglort) - 1; + + idx -= l2_accel->dglort; + if (idx < l2_accel->size && l2_accel->macvlan[idx]) + dev = l2_accel->macvlan[idx]; + else + l2_accel = NULL; + } + + skb->protocol = eth_type_trans(skb, dev); + + if (!l2_accel) + return; + + /* update MACVLAN statistics */ + macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, 1, + !!(rx_desc->w.hdr_info & + cpu_to_le16(FM10K_RXD_HDR_INFO_XC_MASK))); +} + +/** + * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, + union fm10k_rx_desc *rx_desc, + struct sk_buff *skb) +{ + unsigned int len = skb->len; + + fm10k_rx_hash(rx_ring, rx_desc, skb); + + fm10k_rx_checksum(rx_ring, rx_desc, skb); + + fm10k_rx_hwtstamp(rx_ring, rx_desc, skb); + + FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan; + + skb_record_rx_queue(skb, rx_ring->queue_index); + + FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort; + + if (rx_desc->w.vlan) { + u16 vid = le16_to_cpu(rx_desc->w.vlan); + + if (vid != rx_ring->vid) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + fm10k_type_trans(rx_ring, rx_desc, skb); + + return len; +} + +/** + * fm10k_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring, + union fm10k_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(FM10K_RX_DESC(rx_ring, ntc)); + + if (likely(fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_EOP))) + return false; + + return true; +} + +/** + * fm10k_pull_tail - fm10k specific version of skb_pull_tail + * @skb: pointer to current skb being adjusted + * + * This function is an fm10k specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void fm10k_pull_tail(struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * fm10k_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, + union fm10k_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (unlikely((fm10k_test_staterr(rx_desc, + FM10K_RXD_STATUS_RXE)))) { + dev_kfree_skb_any(skb); + rx_ring->rx_stats.errors++; + return true; + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + fm10k_pull_tail(skb); + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * fm10k_receive_skb - helper function to handle rx indications + * @q_vector: structure containing interrupt and ring information + * @skb: packet to send up + **/ +static void fm10k_receive_skb(struct fm10k_q_vector *q_vector, + struct sk_buff *skb) +{ + napi_gro_receive(&q_vector->napi, skb); +} + +static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, + struct fm10k_ring *rx_ring, + int budget) +{ + struct sk_buff *skb = rx_ring->skb; + unsigned int total_bytes = 0, total_packets = 0; + u16 cleaned_count = fm10k_desc_unused(rx_ring); + + while (likely(total_packets < budget)) { + union fm10k_rx_desc *rx_desc; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= FM10K_RX_BUFFER_WRITE) { + fm10k_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean); + + if (!rx_desc->d.staterr) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + /* retrieve a buffer from the ring */ + skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb); + + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; + + cleaned_count++; + + /* fetch next buffer in frame if non-eop */ + if (fm10k_is_non_eop(rx_ring, rx_desc)) + continue; + + /* verify the packet layout is correct */ + if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + /* populate checksum, timestamp, VLAN, and protocol */ + total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb); + + fm10k_receive_skb(q_vector, skb); + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_packets++; + } + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_packets; + rx_ring->stats.bytes += total_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_packets; + q_vector->rx.total_bytes += total_bytes; + + return total_packets < budget; +} + +#define VXLAN_HLEN (sizeof(struct udphdr) + 8) +static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb) +{ + struct fm10k_intfc *interface = netdev_priv(skb->dev); + struct fm10k_vxlan_port *vxlan_port; + + /* we can only offload a vxlan if we recognize it as such */ + vxlan_port = list_first_entry_or_null(&interface->vxlan_port, + struct fm10k_vxlan_port, list); + + if (!vxlan_port) + return NULL; + if (vxlan_port->port != udp_hdr(skb)->dest) + return NULL; + + /* return offset of udp_hdr plus 8 bytes for VXLAN header */ + return (struct ethhdr *)(skb_transport_header(skb) + VXLAN_HLEN); +} + +#define FM10K_NVGRE_RESERVED0_FLAGS htons(0x9FFF) +#define NVGRE_TNI htons(0x2000) +struct fm10k_nvgre_hdr { + __be16 flags; + __be16 proto; + __be32 tni; +}; + +static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb) +{ + struct fm10k_nvgre_hdr *nvgre_hdr; + int hlen = ip_hdrlen(skb); + + /* currently only IPv4 is supported due to hlen above */ + if (vlan_get_protocol(skb) != htons(ETH_P_IP)) + return NULL; + + /* our transport header should be NVGRE */ + nvgre_hdr = (struct fm10k_nvgre_hdr *)(skb_network_header(skb) + hlen); + + /* verify all reserved flags are 0 */ + if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS) + return NULL; + + /* report start of ethernet header */ + if (nvgre_hdr->flags & NVGRE_TNI) + return (struct ethhdr *)(nvgre_hdr + 1); + + return (struct ethhdr *)(&nvgre_hdr->tni); +} + +__be16 fm10k_tx_encap_offload(struct sk_buff *skb) +{ + u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen; + struct ethhdr *eth_hdr; + + if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB)) + return 0; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + l4_hdr = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + l4_hdr = ipv6_hdr(skb)->nexthdr; + break; + default: + return 0; + } + + switch (l4_hdr) { + case IPPROTO_UDP: + eth_hdr = fm10k_port_is_vxlan(skb); + break; + case IPPROTO_GRE: + eth_hdr = fm10k_gre_is_nvgre(skb); + break; + default: + return 0; + } + + if (!eth_hdr) + return 0; + + switch (eth_hdr->h_proto) { + case htons(ETH_P_IP): + inner_l4_hdr = inner_ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr; + break; + default: + return 0; + } + + switch (inner_l4_hdr) { + case IPPROTO_TCP: + inner_l4_hlen = inner_tcp_hdrlen(skb); + break; + case IPPROTO_UDP: + inner_l4_hlen = 8; + break; + default: + return 0; + } + + /* The hardware allows tunnel offloads only if the combined inner and + * outer header is 184 bytes or less + */ + if (skb_inner_transport_header(skb) + inner_l4_hlen - + skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH) + return 0; + + return eth_hdr->h_proto; +} + +static int fm10k_tso(struct fm10k_ring *tx_ring, + struct fm10k_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + struct fm10k_tx_desc *tx_desc; + unsigned char *th; + u8 hdrlen; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + /* compute header lengths */ + if (skb->encapsulation) { + if (!fm10k_tx_encap_offload(skb)) + goto err_vxlan; + th = skb_inner_transport_header(skb); + } else { + th = skb_transport_header(skb); + } + + /* compute offset from SOF to transport header and add header len */ + hdrlen = (th - skb->data) + (((struct tcphdr *)th)->doff << 2); + + first->tx_flags |= FM10K_TX_FLAGS_CSUM; + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * hdrlen; + + /* populate Tx descriptor header size and mss */ + tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); + tx_desc->hdrlen = hdrlen; + tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); + + return 1; +err_vxlan: + tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; + if (!net_ratelimit()) + netdev_err(tx_ring->netdev, + "TSO requested for unsupported tunnel, disabling offload\n"); + return -1; +} + +static void fm10k_tx_csum(struct fm10k_ring *tx_ring, + struct fm10k_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + struct fm10k_tx_desc *tx_desc; + union { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + u8 *raw; + } network_hdr; + __be16 protocol; + u8 l4_hdr = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + goto no_csum; + + if (skb->encapsulation) { + protocol = fm10k_tx_encap_offload(skb); + if (!protocol) { + if (skb_checksum_help(skb)) { + dev_warn(tx_ring->dev, + "failed to offload encap csum!\n"); + tx_ring->tx_stats.csum_err++; + } + goto no_csum; + } + network_hdr.raw = skb_inner_network_header(skb); + } else { + protocol = vlan_get_protocol(skb); + network_hdr.raw = skb_network_header(skb); + } + + switch (protocol) { + case htons(ETH_P_IP): + l4_hdr = network_hdr.ipv4->protocol; + break; + case htons(ETH_P_IPV6): + l4_hdr = network_hdr.ipv6->nexthdr; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but ip version=%x!\n", + protocol); + } + tx_ring->tx_stats.csum_err++; + goto no_csum; + } + + switch (l4_hdr) { + case IPPROTO_TCP: + case IPPROTO_UDP: + break; + case IPPROTO_GRE: + if (skb->encapsulation) + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but l4 proto=%x!\n", + l4_hdr); + } + tx_ring->tx_stats.csum_err++; + goto no_csum; + } + + /* update TX checksum flag */ + first->tx_flags |= FM10K_TX_FLAGS_CSUM; + +no_csum: + /* populate Tx descriptor header size and mss */ + tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use); + tx_desc->hdrlen = 0; + tx_desc->mss = 0; +} + +#define FM10K_SET_FLAG(_input, _flag, _result) \ + ((_flag <= _result) ? \ + ((u32)(_input & _flag) * (_result / _flag)) : \ + ((u32)(_input & _flag) / (_flag / _result))) + +static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 desc_flags = 0; + + /* set timestamping bits */ + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) + desc_flags |= FM10K_TXD_FLAG_TIME; + + /* set checksum offload bits */ + desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM, + FM10K_TXD_FLAG_CSUM); + + return desc_flags; +} + +static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring, + struct fm10k_tx_desc *tx_desc, u16 i, + dma_addr_t dma, unsigned int size, u8 desc_flags) +{ + /* set RS and INT for last frame in a cache line */ + if ((++i & (FM10K_TXD_WB_FIFO_SIZE - 1)) == 0) + desc_flags |= FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_INT; + + /* record values to descriptor */ + tx_desc->buffer_addr = cpu_to_le64(dma); + tx_desc->flags = desc_flags; + tx_desc->buflen = cpu_to_le16(size); + + /* return true if we just wrapped the ring */ + return i == tx_ring->count; +} + +static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* Memory barrier before checking head and tail */ + smp_mb(); + + /* Check again in a case another CPU has just made room available */ + if (likely(fm10k_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) +{ + if (likely(fm10k_desc_unused(tx_ring) >= size)) + return 0; + return __fm10k_maybe_stop_tx(tx_ring, size); +} + +static void fm10k_tx_map(struct fm10k_ring *tx_ring, + struct fm10k_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + struct fm10k_tx_buffer *tx_buffer; + struct fm10k_tx_desc *tx_desc; + struct skb_frag_struct *frag; + unsigned char *data; + dma_addr_t dma; + unsigned int data_len, size; + u32 tx_flags = first->tx_flags; + u16 i = tx_ring->next_to_use; + u8 flags = fm10k_tx_desc_flags(skb, tx_flags); + + tx_desc = FM10K_TX_DESC(tx_ring, i); + + /* add HW VLAN tag */ + if (skb_vlan_tag_present(skb)) + tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb)); + else + tx_desc->vlan = 0; + + size = skb_headlen(skb); + data = skb->data; + + dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE); + + data_len = skb->data_len; + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + while (unlikely(size > FM10K_MAX_DATA_PER_TXD)) { + if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma, + FM10K_MAX_DATA_PER_TXD, flags)) { + tx_desc = FM10K_TX_DESC(tx_ring, 0); + i = 0; + } + + dma += FM10K_MAX_DATA_PER_TXD; + size -= FM10K_MAX_DATA_PER_TXD; + } + + if (likely(!data_len)) + break; + + if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, + dma, size, flags)) { + tx_desc = FM10K_TX_DESC(tx_ring, 0); + i = 0; + } + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer[i]; + } + + /* write last descriptor with LAST bit set */ + flags |= FM10K_TXD_FLAG_LAST; + + if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags)) + i = 0; + + /* record bytecount for BQL */ + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* record SW timestamp if HW timestamp is not available */ + skb_tx_timestamp(first->skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + tx_ring->next_to_use = i; + + /* Make sure there is space in the ring for the next send. */ + fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED); + + /* notify HW of packet */ + if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + writel(i, tx_ring->tail); + + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); + } + + return; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer[i]; + fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); + if (tx_buffer == first) + break; + if (i == 0) + i = tx_ring->count; + i--; + } + + tx_ring->next_to_use = i; +} + +netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, + struct fm10k_ring *tx_ring) +{ + struct fm10k_tx_buffer *first; + int tso; + u32 tx_flags = 0; +#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD + unsigned short f; +#endif + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + + /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head + * otherwise try next time + */ +#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); +#else + count += skb_shinfo(skb)->nr_frags; +#endif + if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); + first->gso_segs = 1; + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + + tso = fm10k_tso(tx_ring, first); + if (tso < 0) + goto out_drop; + else if (!tso) + fm10k_tx_csum(tx_ring, first); + + fm10k_tx_map(tx_ring, first); + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + return NETDEV_TX_OK; +} + +static u64 fm10k_get_tx_completed(struct fm10k_ring *ring) +{ + return ring->stats.packets; +} + +static u64 fm10k_get_tx_pending(struct fm10k_ring *ring) +{ + /* use SW head and tail until we have real hardware */ + u32 head = ring->next_to_clean; + u32 tail = ring->next_to_use; + + return ((head <= tail) ? tail : tail + ring->count) - head; +} + +bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring) +{ + u32 tx_done = fm10k_get_tx_completed(tx_ring); + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; + u32 tx_pending = fm10k_get_tx_pending(tx_ring); + + clear_check_for_tx_hang(tx_ring); + + /* Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. By + * requiring this to fail twice we avoid races with + * clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if (!tx_pending || (tx_done_old != tx_done)) { + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + /* reset the countdown */ + clear_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); + + return false; + } + + /* make sure it is true for two checks in a row */ + return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state); +} + +/** + * fm10k_tx_timeout_reset - initiate reset due to Tx timeout + * @interface: driver private struct + **/ +void fm10k_tx_timeout_reset(struct fm10k_intfc *interface) +{ + /* Do the reset outside of interrupt context */ + if (!test_bit(__FM10K_DOWN, &interface->state)) { + interface->tx_timeout_count++; + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + fm10k_service_event_schedule(interface); + } +} + +/** + * fm10k_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean + **/ +static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, + struct fm10k_ring *tx_ring) +{ + struct fm10k_intfc *interface = q_vector->interface; + struct fm10k_tx_buffer *tx_buffer; + struct fm10k_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + unsigned int i = tx_ring->next_to_clean; + + if (test_bit(__FM10K_DOWN, &interface->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer[i]; + tx_desc = FM10K_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + struct fm10k_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE)) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + dev_consume_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer; + tx_desc = FM10K_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer; + tx_desc = FM10K_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) { + /* schedule immediate reset if we believe we hung */ + struct fm10k_hw *hw = &interface->hw; + + netif_err(interface, drv, tx_ring->netdev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH, TDT <%x>, <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n", + tx_ring->queue_index, + fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)), + fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)), + tx_ring->next_to_use, i); + + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + netif_info(interface, probe, tx_ring->netdev, + "tx hang %d detected on queue %d, resetting interface\n", + interface->tx_timeout_count + 1, + tx_ring->queue_index); + + fm10k_tx_timeout_reset(interface); + + /* the netdev is about to reset, no point in enabling stuff */ + return true; + } + + /* notify netdev of completed buffers */ + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD min_t(u16, FM10K_MIN_TXD - 1, DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !test_bit(__FM10K_DOWN, &interface->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + + return !!budget; +} + +/** + * fm10k_update_itr - update the dynamic ITR value based on packet size + * + * Stores a new ITR value based on strictly on packet size. The + * divisors and thresholds used by this function were determined based + * on theoretical maximum wire speed and testing data, in order to + * minimize response time while increasing bulk throughput. + * + * @ring_container: Container for rings to have ITR updated + **/ +static void fm10k_update_itr(struct fm10k_ring_container *ring_container) +{ + unsigned int avg_wire_size, packets; + + /* Only update ITR if we are using adaptive setting */ + if (!(ring_container->itr & FM10K_ITR_ADAPTIVE)) + goto clear_counts; + + packets = ring_container->total_packets; + if (!packets) + goto clear_counts; + + avg_wire_size = ring_container->total_bytes / packets; + + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + if (avg_wire_size > 3000) + avg_wire_size = 3000; + + /* Give a little boost to mid-size frames */ + if ((avg_wire_size > 300) && (avg_wire_size < 1200)) + avg_wire_size /= 3; + else + avg_wire_size /= 2; + + /* write back value and retain adaptive flag */ + ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE; + +clear_counts: + ring_container->total_bytes = 0; + ring_container->total_packets = 0; +} + +static void fm10k_qv_enable(struct fm10k_q_vector *q_vector) +{ + /* Enable auto-mask and clear the current mask */ + u32 itr = FM10K_ITR_ENABLE; + + /* Update Tx ITR */ + fm10k_update_itr(&q_vector->tx); + + /* Update Rx ITR */ + fm10k_update_itr(&q_vector->rx); + + /* Store Tx itr in timer slot 0 */ + itr |= (q_vector->tx.itr & FM10K_ITR_MAX); + + /* Shift Rx itr to timer slot 1 */ + itr |= (q_vector->rx.itr & FM10K_ITR_MAX) << FM10K_ITR_INTERVAL1_SHIFT; + + /* Write the final value to the ITR register */ + writel(itr, q_vector->itr); +} + +static int fm10k_poll(struct napi_struct *napi, int budget) +{ + struct fm10k_q_vector *q_vector = + container_of(napi, struct fm10k_q_vector, napi); + struct fm10k_ring *ring; + int per_ring_budget; + bool clean_complete = true; + + fm10k_for_each_ring(ring, q_vector->tx) + clean_complete &= fm10k_clean_tx_irq(q_vector, ring); + + /* attempt to distribute budget to each queue fairly, but don't + * allow the budget to go below 1 because we'll exit polling + */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget/q_vector->rx.count, 1); + else + per_ring_budget = budget; + + fm10k_for_each_ring(ring, q_vector->rx) + clean_complete &= fm10k_clean_rx_irq(q_vector, ring, + per_ring_budget); + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* all work done, exit the polling mode */ + napi_complete(napi); + + /* re-enable the q_vector */ + fm10k_qv_enable(q_vector); + + return 0; +} + +/** + * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device + * @interface: board private structure to initialize + * + * When QoS (Quality of Service) is enabled, allocate queues for + * each traffic class. If multiqueue isn't available,then abort QoS + * initialization. + * + * This function handles all combinations of Qos and RSS. + * + **/ +static bool fm10k_set_qos_queues(struct fm10k_intfc *interface) +{ + struct net_device *dev = interface->netdev; + struct fm10k_ring_feature *f; + int rss_i, i; + int pcs; + + /* Map queue offset and counts onto allocated tx queues */ + pcs = netdev_get_num_tc(dev); + + if (pcs <= 1) + return false; + + /* set QoS mask and indices */ + f = &interface->ring_feature[RING_F_QOS]; + f->indices = pcs; + f->mask = (1 << fls(pcs - 1)) - 1; + + /* determine the upper limit for our current DCB mode */ + rss_i = interface->hw.mac.max_queues / pcs; + rss_i = 1 << (fls(rss_i) - 1); + + /* set RSS mask and indices */ + f = &interface->ring_feature[RING_F_RSS]; + rss_i = min_t(u16, rss_i, f->limit); + f->indices = rss_i; + f->mask = (1 << fls(rss_i - 1)) - 1; + + /* configure pause class to queue mapping */ + for (i = 0; i < pcs; i++) + netdev_set_tc_queue(dev, i, rss_i, rss_i * i); + + interface->num_rx_queues = rss_i * pcs; + interface->num_tx_queues = rss_i * pcs; + + return true; +} + +/** + * fm10k_set_rss_queues: Allocate queues for RSS + * @interface: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static bool fm10k_set_rss_queues(struct fm10k_intfc *interface) +{ + struct fm10k_ring_feature *f; + u16 rss_i; + + f = &interface->ring_feature[RING_F_RSS]; + rss_i = min_t(u16, interface->hw.mac.max_queues, f->limit); + + /* record indices and power of 2 mask for RSS */ + f->indices = rss_i; + f->mask = (1 << fls(rss_i - 1)) - 1; + + interface->num_rx_queues = rss_i; + interface->num_tx_queues = rss_i; + + return true; +} + +/** + * fm10k_set_num_queues: Allocate queues for device, feature dependent + * @interface: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static void fm10k_set_num_queues(struct fm10k_intfc *interface) +{ + /* Start with base case */ + interface->num_rx_queues = 1; + interface->num_tx_queues = 1; + + if (fm10k_set_qos_queues(interface)) + return; + + fm10k_set_rss_queues(interface); +} + +/** + * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector + * @interface: board private structure to initialize + * @v_count: q_vectors allocated on interface, used for ring interleaving + * @v_idx: index of vector in interface struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int fm10k_alloc_q_vector(struct fm10k_intfc *interface, + unsigned int v_count, unsigned int v_idx, + unsigned int txr_count, unsigned int txr_idx, + unsigned int rxr_count, unsigned int rxr_idx) +{ + struct fm10k_q_vector *q_vector; + struct fm10k_ring *ring; + int ring_count, size; + + ring_count = txr_count + rxr_count; + size = sizeof(struct fm10k_q_vector) + + (sizeof(struct fm10k_ring) * ring_count); + + /* allocate q_vector and rings */ + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* initialize NAPI */ + netif_napi_add(interface->netdev, &q_vector->napi, + fm10k_poll, NAPI_POLL_WEIGHT); + + /* tie q_vector and interface together */ + interface->q_vector[v_idx] = q_vector; + q_vector->interface = interface; + q_vector->v_idx = v_idx; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* save Tx ring container info */ + q_vector->tx.ring = ring; + q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK; + q_vector->tx.itr = interface->tx_itr; + q_vector->tx.count = txr_count; + + while (txr_count) { + /* assign generic ring traits */ + ring->dev = &interface->pdev->dev; + ring->netdev = interface->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* apply Tx specific ring traits */ + ring->count = interface->tx_ring_count; + ring->queue_index = txr_idx; + + /* assign ring to interface */ + interface->tx_ring[txr_idx] = ring; + + /* update count and index */ + txr_count--; + txr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + /* save Rx ring container info */ + q_vector->rx.ring = ring; + q_vector->rx.itr = interface->rx_itr; + q_vector->rx.count = rxr_count; + + while (rxr_count) { + /* assign generic ring traits */ + ring->dev = &interface->pdev->dev; + ring->netdev = interface->netdev; + rcu_assign_pointer(ring->l2_accel, interface->l2_accel); + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* apply Rx specific ring traits */ + ring->count = interface->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to interface */ + interface->rx_ring[rxr_idx] = ring; + + /* update count and index */ + rxr_count--; + rxr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + fm10k_dbg_q_vector_init(q_vector); + + return 0; +} + +/** + * fm10k_free_q_vector - Free memory allocated for specific interrupt vector + * @interface: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx) +{ + struct fm10k_q_vector *q_vector = interface->q_vector[v_idx]; + struct fm10k_ring *ring; + + fm10k_dbg_q_vector_exit(q_vector); + + fm10k_for_each_ring(ring, q_vector->tx) + interface->tx_ring[ring->queue_index] = NULL; + + fm10k_for_each_ring(ring, q_vector->rx) + interface->rx_ring[ring->queue_index] = NULL; + + interface->q_vector[v_idx] = NULL; + netif_napi_del(&q_vector->napi); + kfree_rcu(q_vector, rcu); +} + +/** + * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors + * @interface: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface) +{ + unsigned int q_vectors = interface->num_q_vectors; + unsigned int rxr_remaining = interface->num_rx_queues; + unsigned int txr_remaining = interface->num_tx_queues; + unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = fm10k_alloc_q_vector(interface, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + + err = fm10k_alloc_q_vector(interface, q_vectors, v_idx, + tqpv, txr_idx, + rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + interface->num_tx_queues = 0; + interface->num_rx_queues = 0; + interface->num_q_vectors = 0; + + while (v_idx--) + fm10k_free_q_vector(interface, v_idx); + + return -ENOMEM; +} + +/** + * fm10k_free_q_vectors - Free memory allocated for interrupt vectors + * @interface: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void fm10k_free_q_vectors(struct fm10k_intfc *interface) +{ + int v_idx = interface->num_q_vectors; + + interface->num_tx_queues = 0; + interface->num_rx_queues = 0; + interface->num_q_vectors = 0; + + while (v_idx--) + fm10k_free_q_vector(interface, v_idx); +} + +/** + * f10k_reset_msix_capability - reset MSI-X capability + * @interface: board private structure to initialize + * + * Reset the MSI-X capability back to its starting state + **/ +static void fm10k_reset_msix_capability(struct fm10k_intfc *interface) +{ + pci_disable_msix(interface->pdev); + kfree(interface->msix_entries); + interface->msix_entries = NULL; +} + +/** + * f10k_init_msix_capability - configure MSI-X capability + * @interface: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int fm10k_init_msix_capability(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + int v_budget, vector; + + /* It's easy to be greedy for MSI-X vectors, but it really + * doesn't do us much good if we have a lot more vectors + * than CPU's. So let's be conservative and only ask for + * (roughly) the same number of vectors as there are CPU's. + * the default is to use pairs of vectors + */ + v_budget = max(interface->num_rx_queues, interface->num_tx_queues); + v_budget = min_t(u16, v_budget, num_online_cpus()); + + /* account for vectors not related to queues */ + v_budget += NON_Q_VECTORS(hw); + + /* At the same time, hardware can only support a maximum of + * hw.mac->max_msix_vectors vectors. With features + * such as RSS and VMDq, we can easily surpass the number of Rx and Tx + * descriptor queues supported by our device. Thus, we cap it off in + * those rare cases where the cpu count also exceeds our vector limit. + */ + v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); + + /* A failure in MSI-X entry allocation is fatal. */ + interface->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), + GFP_KERNEL); + if (!interface->msix_entries) + return -ENOMEM; + + /* populate entry values */ + for (vector = 0; vector < v_budget; vector++) + interface->msix_entries[vector].entry = vector; + + /* Attempt to enable MSI-X with requested value */ + v_budget = pci_enable_msix_range(interface->pdev, + interface->msix_entries, + MIN_MSIX_COUNT(hw), + v_budget); + if (v_budget < 0) { + kfree(interface->msix_entries); + interface->msix_entries = NULL; + return -ENOMEM; + } + + /* record the number of queues available for q_vectors */ + interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw); + + return 0; +} + +/** + * fm10k_cache_ring_qos - Descriptor ring to register mapping for QoS + * @interface: Interface structure continaining rings and devices + * + * Cache the descriptor ring offsets for Qos + **/ +static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface) +{ + struct net_device *dev = interface->netdev; + int pc, offset, rss_i, i, q_idx; + u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1; + u8 num_pcs = netdev_get_num_tc(dev); + + if (num_pcs <= 1) + return false; + + rss_i = interface->ring_feature[RING_F_RSS].indices; + + for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) { + q_idx = pc; + for (i = 0; i < rss_i; i++) { + interface->tx_ring[offset + i]->reg_idx = q_idx; + interface->tx_ring[offset + i]->qos_pc = pc; + interface->rx_ring[offset + i]->reg_idx = q_idx; + interface->rx_ring[offset + i]->qos_pc = pc; + q_idx += pc_stride; + } + } + + return true; +} + +/** + * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS + * @interface: Interface structure continaining rings and devices + * + * Cache the descriptor ring offsets for RSS + **/ +static void fm10k_cache_ring_rss(struct fm10k_intfc *interface) +{ + int i; + + for (i = 0; i < interface->num_rx_queues; i++) + interface->rx_ring[i]->reg_idx = i; + + for (i = 0; i < interface->num_tx_queues; i++) + interface->tx_ring[i]->reg_idx = i; +} + +/** + * fm10k_assign_rings - Map rings to network devices + * @interface: Interface structure containing rings and devices + * + * This function is meant to go though and configure both the network + * devices so that they contain rings, and configure the rings so that + * they function with their network devices. + **/ +static void fm10k_assign_rings(struct fm10k_intfc *interface) +{ + if (fm10k_cache_ring_qos(interface)) + return; + + fm10k_cache_ring_rss(interface); +} + +static void fm10k_init_reta(struct fm10k_intfc *interface) +{ + u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices; + u32 reta, base; + + /* If the netdev is initialized we have to maintain table if possible */ + if (interface->netdev->reg_state) { + for (i = FM10K_RETA_SIZE; i--;) { + reta = interface->reta[i]; + if ((((reta << 24) >> 24) < rss_i) && + (((reta << 16) >> 24) < rss_i) && + (((reta << 8) >> 24) < rss_i) && + (((reta) >> 24) < rss_i)) + continue; + goto repopulate_reta; + } + + /* do nothing if all of the elements are in bounds */ + return; + } + +repopulate_reta: + /* Populate the redirection table 4 entries at a time. To do this + * we are generating the results for n and n+2 and then interleaving + * those with the results with n+1 and n+3. + */ + for (i = FM10K_RETA_SIZE; i--;) { + /* first pass generates n and n+2 */ + base = ((i * 0x00040004) + 0x00020000) * rss_i; + reta = (base & 0x3F803F80) >> 7; + + /* second pass generates n+1 and n+3 */ + base += 0x00010001 * rss_i; + reta |= (base & 0x3F803F80) << 1; + + interface->reta[i] = reta; + } +} + +/** + * fm10k_init_queueing_scheme - Determine proper queueing scheme + * @interface: board private structure to initialize + * + * We determine which queueing scheme to use based on... + * - Hardware queue count (num_*_queues) + * - defined by miscellaneous hardware support/features (RSS, etc.) + **/ +int fm10k_init_queueing_scheme(struct fm10k_intfc *interface) +{ + int err; + + /* Number of supported queues */ + fm10k_set_num_queues(interface); + + /* Configure MSI-X capability */ + err = fm10k_init_msix_capability(interface); + if (err) { + dev_err(&interface->pdev->dev, + "Unable to initialize MSI-X capability\n"); + return err; + } + + /* Allocate memory for queues */ + err = fm10k_alloc_q_vectors(interface); + if (err) + return err; + + /* Map rings to devices, and map devices to physical queues */ + fm10k_assign_rings(interface); + + /* Initialize RSS redirection table */ + fm10k_init_reta(interface); + + return 0; +} + +/** + * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings + * @interface: board private structure to clear queueing scheme on + * + * We go through and clear queueing specific resources and reset the structure + * to pre-load conditions + **/ +void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface) +{ + fm10k_free_q_vectors(interface); + fm10k_reset_msix_capability(interface); +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c new file mode 100644 index 000000000..1b2738380 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -0,0 +1,2140 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "fm10k_common.h" + +/** + * fm10k_fifo_init - Initialize a message FIFO + * @fifo: pointer to FIFO + * @buffer: pointer to memory to be used to store FIFO + * @size: maximum message size to store in FIFO, must be 2^n - 1 + **/ +static void fm10k_fifo_init(struct fm10k_mbx_fifo *fifo, u32 *buffer, u16 size) +{ + fifo->buffer = buffer; + fifo->size = size; + fifo->head = 0; + fifo->tail = 0; +} + +/** + * fm10k_fifo_used - Retrieve used space in FIFO + * @fifo: pointer to FIFO + * + * This function returns the number of DWORDs used in the FIFO + **/ +static u16 fm10k_fifo_used(struct fm10k_mbx_fifo *fifo) +{ + return fifo->tail - fifo->head; +} + +/** + * fm10k_fifo_unused - Retrieve unused space in FIFO + * @fifo: pointer to FIFO + * + * This function returns the number of unused DWORDs in the FIFO + **/ +static u16 fm10k_fifo_unused(struct fm10k_mbx_fifo *fifo) +{ + return fifo->size + fifo->head - fifo->tail; +} + +/** + * fm10k_fifo_empty - Test to verify if fifo is empty + * @fifo: pointer to FIFO + * + * This function returns true if the FIFO is empty, else false + **/ +static bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo) +{ + return fifo->head == fifo->tail; +} + +/** + * fm10k_fifo_head_offset - returns indices of head with given offset + * @fifo: pointer to FIFO + * @offset: offset to add to head + * + * This function returns the indices into the fifo based on head + offset + **/ +static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) +{ + return (fifo->head + offset) & (fifo->size - 1); +} + +/** + * fm10k_fifo_tail_offset - returns indices of tail with given offset + * @fifo: pointer to FIFO + * @offset: offset to add to tail + * + * This function returns the indices into the fifo based on tail + offset + **/ +static u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset) +{ + return (fifo->tail + offset) & (fifo->size - 1); +} + +/** + * fm10k_fifo_head_len - Retrieve length of first message in FIFO + * @fifo: pointer to FIFO + * + * This function returns the size of the first message in the FIFO + **/ +static u16 fm10k_fifo_head_len(struct fm10k_mbx_fifo *fifo) +{ + u32 *head = fifo->buffer + fm10k_fifo_head_offset(fifo, 0); + + /* verify there is at least 1 DWORD in the fifo so *head is valid */ + if (fm10k_fifo_empty(fifo)) + return 0; + + /* retieve the message length */ + return FM10K_TLV_DWORD_LEN(*head); +} + +/** + * fm10k_fifo_head_drop - Drop the first message in FIFO + * @fifo: pointer to FIFO + * + * This function returns the size of the message dropped from the FIFO + **/ +static u16 fm10k_fifo_head_drop(struct fm10k_mbx_fifo *fifo) +{ + u16 len = fm10k_fifo_head_len(fifo); + + /* update head so it is at the start of next frame */ + fifo->head += len; + + return len; +} + +/** + * fm10k_fifo_drop_all - Drop all messages in FIFO + * @fifo: pointer to FIFO + * + * This function resets the head pointer to drop all messages in the FIFO, + * and ensure the FIFO is empty. + **/ +static void fm10k_fifo_drop_all(struct fm10k_mbx_fifo *fifo) +{ + fifo->head = fifo->tail; +} + +/** + * fm10k_mbx_index_len - Convert a head/tail index into a length value + * @mbx: pointer to mailbox + * @head: head index + * @tail: head index + * + * This function takes the head and tail index and determines the length + * of the data indicated by this pair. + **/ +static u16 fm10k_mbx_index_len(struct fm10k_mbx_info *mbx, u16 head, u16 tail) +{ + u16 len = tail - head; + + /* we wrapped so subtract 2, one for index 0, one for all 1s index */ + if (len > tail) + len -= 2; + + return len & ((mbx->mbmem_len << 1) - 1); +} + +/** + * fm10k_mbx_tail_add - Determine new tail value with added offset + * @mbx: pointer to mailbox + * @offset: length to add to head offset + * + * This function takes the local tail index and recomputes it for + * a given length added as an offset. + **/ +static u16 fm10k_mbx_tail_add(struct fm10k_mbx_info *mbx, u16 offset) +{ + u16 tail = (mbx->tail + offset + 1) & ((mbx->mbmem_len << 1) - 1); + + /* add/sub 1 because we cannot have offset 0 or all 1s */ + return (tail > mbx->tail) ? --tail : ++tail; +} + +/** + * fm10k_mbx_tail_sub - Determine new tail value with subtracted offset + * @mbx: pointer to mailbox + * @offset: length to add to head offset + * + * This function takes the local tail index and recomputes it for + * a given length added as an offset. + **/ +static u16 fm10k_mbx_tail_sub(struct fm10k_mbx_info *mbx, u16 offset) +{ + u16 tail = (mbx->tail - offset - 1) & ((mbx->mbmem_len << 1) - 1); + + /* sub/add 1 because we cannot have offset 0 or all 1s */ + return (tail < mbx->tail) ? ++tail : --tail; +} + +/** + * fm10k_mbx_head_add - Determine new head value with added offset + * @mbx: pointer to mailbox + * @offset: length to add to head offset + * + * This function takes the local head index and recomputes it for + * a given length added as an offset. + **/ +static u16 fm10k_mbx_head_add(struct fm10k_mbx_info *mbx, u16 offset) +{ + u16 head = (mbx->head + offset + 1) & ((mbx->mbmem_len << 1) - 1); + + /* add/sub 1 because we cannot have offset 0 or all 1s */ + return (head > mbx->head) ? --head : ++head; +} + +/** + * fm10k_mbx_head_sub - Determine new head value with subtracted offset + * @mbx: pointer to mailbox + * @offset: length to add to head offset + * + * This function takes the local head index and recomputes it for + * a given length added as an offset. + **/ +static u16 fm10k_mbx_head_sub(struct fm10k_mbx_info *mbx, u16 offset) +{ + u16 head = (mbx->head - offset - 1) & ((mbx->mbmem_len << 1) - 1); + + /* sub/add 1 because we cannot have offset 0 or all 1s */ + return (head < mbx->head) ? ++head : --head; +} + +/** + * fm10k_mbx_pushed_tail_len - Retrieve the length of message being pushed + * @mbx: pointer to mailbox + * + * This function will return the length of the message currently being + * pushed onto the tail of the Rx queue. + **/ +static u16 fm10k_mbx_pushed_tail_len(struct fm10k_mbx_info *mbx) +{ + u32 *tail = mbx->rx.buffer + fm10k_fifo_tail_offset(&mbx->rx, 0); + + /* pushed tail is only valid if pushed is set */ + if (!mbx->pushed) + return 0; + + return FM10K_TLV_DWORD_LEN(*tail); +} + +/** + * fm10k_fifo_write_copy - pulls data off of msg and places it in fifo + * @fifo: pointer to FIFO + * @msg: message array to populate + * @tail_offset: additional offset to add to tail pointer + * @len: length of FIFO to copy into message header + * + * This function will take a message and copy it into a section of the + * FIFO. In order to get something into a location other than just + * the tail you can use tail_offset to adjust the pointer. + **/ +static void fm10k_fifo_write_copy(struct fm10k_mbx_fifo *fifo, + const u32 *msg, u16 tail_offset, u16 len) +{ + u16 end = fm10k_fifo_tail_offset(fifo, tail_offset); + u32 *tail = fifo->buffer + end; + + /* track when we should cross the end of the FIFO */ + end = fifo->size - end; + + /* copy end of message before start of message */ + if (end < len) + memcpy(fifo->buffer, msg + end, (len - end) << 2); + else + end = len; + + /* Copy remaining message into Tx FIFO */ + memcpy(tail, msg, end << 2); +} + +/** + * fm10k_fifo_enqueue - Enqueues the message to the tail of the FIFO + * @fifo: pointer to FIFO + * @msg: message array to read + * + * This function enqueues a message up to the size specified by the length + * contained in the first DWORD of the message and will place at the tail + * of the FIFO. It will return 0 on success, or a negative value on error. + **/ +static s32 fm10k_fifo_enqueue(struct fm10k_mbx_fifo *fifo, const u32 *msg) +{ + u16 len = FM10K_TLV_DWORD_LEN(*msg); + + /* verify parameters */ + if (len > fifo->size) + return FM10K_MBX_ERR_SIZE; + + /* verify there is room for the message */ + if (len > fm10k_fifo_unused(fifo)) + return FM10K_MBX_ERR_NO_SPACE; + + /* Copy message into FIFO */ + fm10k_fifo_write_copy(fifo, msg, 0, len); + + /* memory barrier to guarantee FIFO is written before tail update */ + wmb(); + + /* Update Tx FIFO tail */ + fifo->tail += len; + + return 0; +} + +/** + * fm10k_mbx_validate_msg_size - Validate incoming message based on size + * @mbx: pointer to mailbox + * @len: length of data pushed onto buffer + * + * This function analyzes the frame and will return a non-zero value when + * the start of a message larger than the mailbox is detected. + **/ +static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len) +{ + struct fm10k_mbx_fifo *fifo = &mbx->rx; + u16 total_len = 0, msg_len; + u32 *msg; + + /* length should include previous amounts pushed */ + len += mbx->pushed; + + /* offset in message is based off of current message size */ + do { + msg = fifo->buffer + fm10k_fifo_tail_offset(fifo, total_len); + msg_len = FM10K_TLV_DWORD_LEN(*msg); + total_len += msg_len; + } while (total_len < len); + + /* message extends out of pushed section, but fits in FIFO */ + if ((len < total_len) && (msg_len <= mbx->max_size)) + return 0; + + /* return length of invalid section */ + return (len < total_len) ? len : (len - total_len); +} + +/** + * fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem + * @mbx: pointer to mailbox + * + * This function will take a section of the Tx FIFO and copy it into the + * mailbox memory. The offset in mbmem is based on the lower bits of the + * tail and len determines the length to copy. + **/ +static void fm10k_mbx_write_copy(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_mbx_fifo *fifo = &mbx->tx; + u32 mbmem = mbx->mbmem_reg; + u32 *head = fifo->buffer; + u16 end, len, tail, mask; + + if (!mbx->tail_len) + return; + + /* determine data length and mbmem tail index */ + mask = mbx->mbmem_len - 1; + len = mbx->tail_len; + tail = fm10k_mbx_tail_sub(mbx, len); + if (tail > mask) + tail++; + + /* determine offset in the ring */ + end = fm10k_fifo_head_offset(fifo, mbx->pulled); + head += end; + + /* memory barrier to guarantee data is ready to be read */ + rmb(); + + /* Copy message from Tx FIFO */ + for (end = fifo->size - end; len; head = fifo->buffer) { + do { + /* adjust tail to match offset for FIFO */ + tail &= mask; + if (!tail) + tail++; + + /* write message to hardware FIFO */ + fm10k_write_reg(hw, mbmem + tail++, *(head++)); + } while (--len && --end); + } +} + +/** + * fm10k_mbx_pull_head - Pulls data off of head of Tx FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @head: acknowledgement number last received + * + * This function will push the tail index forward based on the remote + * head index. It will then pull up to mbmem_len DWORDs off of the + * head of the FIFO and will place it in the MBMEM registers + * associated with the mailbox. + **/ +static void fm10k_mbx_pull_head(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, u16 head) +{ + u16 mbmem_len, len, ack = fm10k_mbx_index_len(mbx, head, mbx->tail); + struct fm10k_mbx_fifo *fifo = &mbx->tx; + + /* update number of bytes pulled and update bytes in transit */ + mbx->pulled += mbx->tail_len - ack; + + /* determine length of data to pull, reserve space for mbmem header */ + mbmem_len = mbx->mbmem_len - 1; + len = fm10k_fifo_used(fifo) - mbx->pulled; + if (len > mbmem_len) + len = mbmem_len; + + /* update tail and record number of bytes in transit */ + mbx->tail = fm10k_mbx_tail_add(mbx, len - ack); + mbx->tail_len = len; + + /* drop pulled messages from the FIFO */ + for (len = fm10k_fifo_head_len(fifo); + len && (mbx->pulled >= len); + len = fm10k_fifo_head_len(fifo)) { + mbx->pulled -= fm10k_fifo_head_drop(fifo); + mbx->tx_messages++; + mbx->tx_dwords += len; + } + + /* Copy message out from the Tx FIFO */ + fm10k_mbx_write_copy(hw, mbx); +} + +/** + * fm10k_mbx_read_copy - pulls data off of mbmem and places it in Rx FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will take a section of the mailbox memory and copy it + * into the Rx FIFO. The offset is based on the lower bits of the + * head and len determines the length to copy. + **/ +static void fm10k_mbx_read_copy(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_mbx_fifo *fifo = &mbx->rx; + u32 mbmem = mbx->mbmem_reg ^ mbx->mbmem_len; + u32 *tail = fifo->buffer; + u16 end, len, head; + + /* determine data length and mbmem head index */ + len = mbx->head_len; + head = fm10k_mbx_head_sub(mbx, len); + if (head >= mbx->mbmem_len) + head++; + + /* determine offset in the ring */ + end = fm10k_fifo_tail_offset(fifo, mbx->pushed); + tail += end; + + /* Copy message into Rx FIFO */ + for (end = fifo->size - end; len; tail = fifo->buffer) { + do { + /* adjust head to match offset for FIFO */ + head &= mbx->mbmem_len - 1; + if (!head) + head++; + + /* read message from hardware FIFO */ + *(tail++) = fm10k_read_reg(hw, mbmem + head++); + } while (--len && --end); + } + + /* memory barrier to guarantee FIFO is written before tail update */ + wmb(); +} + +/** + * fm10k_mbx_push_tail - Pushes up to 15 DWORDs on to tail of FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @tail: tail index of message + * + * This function will first validate the tail index and size for the + * incoming message. It then updates the acknowledgment number and + * copies the data into the FIFO. It will return the number of messages + * dequeued on success and a negative value on error. + **/ +static s32 fm10k_mbx_push_tail(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, + u16 tail) +{ + struct fm10k_mbx_fifo *fifo = &mbx->rx; + u16 len, seq = fm10k_mbx_index_len(mbx, mbx->head, tail); + + /* determine length of data to push */ + len = fm10k_fifo_unused(fifo) - mbx->pushed; + if (len > seq) + len = seq; + + /* update head and record bytes received */ + mbx->head = fm10k_mbx_head_add(mbx, len); + mbx->head_len = len; + + /* nothing to do if there is no data */ + if (!len) + return 0; + + /* Copy msg into Rx FIFO */ + fm10k_mbx_read_copy(hw, mbx); + + /* determine if there are any invalid lengths in message */ + if (fm10k_mbx_validate_msg_size(mbx, len)) + return FM10K_MBX_ERR_SIZE; + + /* Update pushed */ + mbx->pushed += len; + + /* flush any completed messages */ + for (len = fm10k_mbx_pushed_tail_len(mbx); + len && (mbx->pushed >= len); + len = fm10k_mbx_pushed_tail_len(mbx)) { + fifo->tail += len; + mbx->pushed -= len; + mbx->rx_messages++; + mbx->rx_dwords += len; + } + + return 0; +} + +/* pre-generated data for generating the CRC based on the poly 0xAC9A. */ +static const u16 fm10k_crc_16b_table[256] = { + 0x0000, 0x7956, 0xF2AC, 0x8BFA, 0xBC6D, 0xC53B, 0x4EC1, 0x3797, + 0x21EF, 0x58B9, 0xD343, 0xAA15, 0x9D82, 0xE4D4, 0x6F2E, 0x1678, + 0x43DE, 0x3A88, 0xB172, 0xC824, 0xFFB3, 0x86E5, 0x0D1F, 0x7449, + 0x6231, 0x1B67, 0x909D, 0xE9CB, 0xDE5C, 0xA70A, 0x2CF0, 0x55A6, + 0x87BC, 0xFEEA, 0x7510, 0x0C46, 0x3BD1, 0x4287, 0xC97D, 0xB02B, + 0xA653, 0xDF05, 0x54FF, 0x2DA9, 0x1A3E, 0x6368, 0xE892, 0x91C4, + 0xC462, 0xBD34, 0x36CE, 0x4F98, 0x780F, 0x0159, 0x8AA3, 0xF3F5, + 0xE58D, 0x9CDB, 0x1721, 0x6E77, 0x59E0, 0x20B6, 0xAB4C, 0xD21A, + 0x564D, 0x2F1B, 0xA4E1, 0xDDB7, 0xEA20, 0x9376, 0x188C, 0x61DA, + 0x77A2, 0x0EF4, 0x850E, 0xFC58, 0xCBCF, 0xB299, 0x3963, 0x4035, + 0x1593, 0x6CC5, 0xE73F, 0x9E69, 0xA9FE, 0xD0A8, 0x5B52, 0x2204, + 0x347C, 0x4D2A, 0xC6D0, 0xBF86, 0x8811, 0xF147, 0x7ABD, 0x03EB, + 0xD1F1, 0xA8A7, 0x235D, 0x5A0B, 0x6D9C, 0x14CA, 0x9F30, 0xE666, + 0xF01E, 0x8948, 0x02B2, 0x7BE4, 0x4C73, 0x3525, 0xBEDF, 0xC789, + 0x922F, 0xEB79, 0x6083, 0x19D5, 0x2E42, 0x5714, 0xDCEE, 0xA5B8, + 0xB3C0, 0xCA96, 0x416C, 0x383A, 0x0FAD, 0x76FB, 0xFD01, 0x8457, + 0xAC9A, 0xD5CC, 0x5E36, 0x2760, 0x10F7, 0x69A1, 0xE25B, 0x9B0D, + 0x8D75, 0xF423, 0x7FD9, 0x068F, 0x3118, 0x484E, 0xC3B4, 0xBAE2, + 0xEF44, 0x9612, 0x1DE8, 0x64BE, 0x5329, 0x2A7F, 0xA185, 0xD8D3, + 0xCEAB, 0xB7FD, 0x3C07, 0x4551, 0x72C6, 0x0B90, 0x806A, 0xF93C, + 0x2B26, 0x5270, 0xD98A, 0xA0DC, 0x974B, 0xEE1D, 0x65E7, 0x1CB1, + 0x0AC9, 0x739F, 0xF865, 0x8133, 0xB6A4, 0xCFF2, 0x4408, 0x3D5E, + 0x68F8, 0x11AE, 0x9A54, 0xE302, 0xD495, 0xADC3, 0x2639, 0x5F6F, + 0x4917, 0x3041, 0xBBBB, 0xC2ED, 0xF57A, 0x8C2C, 0x07D6, 0x7E80, + 0xFAD7, 0x8381, 0x087B, 0x712D, 0x46BA, 0x3FEC, 0xB416, 0xCD40, + 0xDB38, 0xA26E, 0x2994, 0x50C2, 0x6755, 0x1E03, 0x95F9, 0xECAF, + 0xB909, 0xC05F, 0x4BA5, 0x32F3, 0x0564, 0x7C32, 0xF7C8, 0x8E9E, + 0x98E6, 0xE1B0, 0x6A4A, 0x131C, 0x248B, 0x5DDD, 0xD627, 0xAF71, + 0x7D6B, 0x043D, 0x8FC7, 0xF691, 0xC106, 0xB850, 0x33AA, 0x4AFC, + 0x5C84, 0x25D2, 0xAE28, 0xD77E, 0xE0E9, 0x99BF, 0x1245, 0x6B13, + 0x3EB5, 0x47E3, 0xCC19, 0xB54F, 0x82D8, 0xFB8E, 0x7074, 0x0922, + 0x1F5A, 0x660C, 0xEDF6, 0x94A0, 0xA337, 0xDA61, 0x519B, 0x28CD }; + +/** + * fm10k_crc_16b - Generate a 16 bit CRC for a region of 16 bit data + * @data: pointer to data to process + * @seed: seed value for CRC + * @len: length measured in 16 bits words + * + * This function will generate a CRC based on the polynomial 0xAC9A and + * whatever value is stored in the seed variable. Note that this + * value inverts the local seed and the result in order to capture all + * leading and trailing zeros. + */ +static u16 fm10k_crc_16b(const u32 *data, u16 seed, u16 len) +{ + u32 result = seed; + + while (len--) { + result ^= *(data++); + result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF]; + result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF]; + + if (!(len--)) + break; + + result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF]; + result = (result >> 8) ^ fm10k_crc_16b_table[result & 0xFF]; + } + + return (u16)result; +} + +/** + * fm10k_fifo_crc - generate a CRC based off of FIFO data + * @fifo: pointer to FIFO + * @offset: offset point for start of FIFO + * @len: number of DWORDS words to process + * @seed: seed value for CRC + * + * This function generates a CRC for some region of the FIFO + **/ +static u16 fm10k_fifo_crc(struct fm10k_mbx_fifo *fifo, u16 offset, + u16 len, u16 seed) +{ + u32 *data = fifo->buffer + offset; + + /* track when we should cross the end of the FIFO */ + offset = fifo->size - offset; + + /* if we are in 2 blocks process the end of the FIFO first */ + if (offset < len) { + seed = fm10k_crc_16b(data, seed, offset * 2); + data = fifo->buffer; + len -= offset; + } + + /* process any remaining bits */ + return fm10k_crc_16b(data, seed, len * 2); +} + +/** + * fm10k_mbx_update_local_crc - Update the local CRC for outgoing data + * @mbx: pointer to mailbox + * @head: head index provided by remote mailbox + * + * This function will generate the CRC for all data from the end of the + * last head update to the current one. It uses the result of the + * previous CRC as the seed for this update. The result is stored in + * mbx->local. + **/ +static void fm10k_mbx_update_local_crc(struct fm10k_mbx_info *mbx, u16 head) +{ + u16 len = mbx->tail_len - fm10k_mbx_index_len(mbx, head, mbx->tail); + + /* determine the offset for the start of the region to be pulled */ + head = fm10k_fifo_head_offset(&mbx->tx, mbx->pulled); + + /* update local CRC to include all of the pulled data */ + mbx->local = fm10k_fifo_crc(&mbx->tx, head, len, mbx->local); +} + +/** + * fm10k_mbx_verify_remote_crc - Verify the CRC is correct for current data + * @mbx: pointer to mailbox + * + * This function will take all data that has been provided from the remote + * end and generate a CRC for it. This is stored in mbx->remote. The + * CRC for the header is then computed and if the result is non-zero this + * is an error and we signal an error dropping all data and resetting the + * connection. + */ +static s32 fm10k_mbx_verify_remote_crc(struct fm10k_mbx_info *mbx) +{ + struct fm10k_mbx_fifo *fifo = &mbx->rx; + u16 len = mbx->head_len; + u16 offset = fm10k_fifo_tail_offset(fifo, mbx->pushed) - len; + u16 crc; + + /* update the remote CRC if new data has been received */ + if (len) + mbx->remote = fm10k_fifo_crc(fifo, offset, len, mbx->remote); + + /* process the full header as we have to validate the CRC */ + crc = fm10k_crc_16b(&mbx->mbx_hdr, mbx->remote, 1); + + /* notify other end if we have a problem */ + return crc ? FM10K_MBX_ERR_CRC : 0; +} + +/** + * fm10k_mbx_rx_ready - Indicates that a message is ready in the Rx FIFO + * @mbx: pointer to mailbox + * + * This function returns true if there is a message in the Rx FIFO to dequeue. + **/ +static bool fm10k_mbx_rx_ready(struct fm10k_mbx_info *mbx) +{ + u16 msg_size = fm10k_fifo_head_len(&mbx->rx); + + return msg_size && (fm10k_fifo_used(&mbx->rx) >= msg_size); +} + +/** + * fm10k_mbx_tx_ready - Indicates that the mailbox is in state ready for Tx + * @mbx: pointer to mailbox + * @len: verify free space is >= this value + * + * This function returns true if the mailbox is in a state ready to transmit. + **/ +static bool fm10k_mbx_tx_ready(struct fm10k_mbx_info *mbx, u16 len) +{ + u16 fifo_unused = fm10k_fifo_unused(&mbx->tx); + + return (mbx->state == FM10K_STATE_OPEN) && (fifo_unused >= len); +} + +/** + * fm10k_mbx_tx_complete - Indicates that the Tx FIFO has been emptied + * @mbx: pointer to mailbox + * + * This function returns true if the Tx FIFO is empty. + **/ +static bool fm10k_mbx_tx_complete(struct fm10k_mbx_info *mbx) +{ + return fm10k_fifo_empty(&mbx->tx); +} + +/** + * fm10k_mbx_deqeueue_rx - Dequeues the message from the head in the Rx FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function dequeues messages and hands them off to the tlv parser. + * It will return the number of messages processed when called. + **/ +static u16 fm10k_mbx_dequeue_rx(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_mbx_fifo *fifo = &mbx->rx; + s32 err; + u16 cnt; + + /* parse Rx messages out of the Rx FIFO to empty it */ + for (cnt = 0; !fm10k_fifo_empty(fifo); cnt++) { + err = fm10k_tlv_msg_parse(hw, fifo->buffer + fifo->head, + mbx, mbx->msg_data); + if (err < 0) + mbx->rx_parse_err++; + + fm10k_fifo_head_drop(fifo); + } + + /* shift remaining bytes back to start of FIFO */ + memmove(fifo->buffer, fifo->buffer + fifo->tail, mbx->pushed << 2); + + /* shift head and tail based on the memory we moved */ + fifo->tail -= fifo->head; + fifo->head = 0; + + return cnt; +} + +/** + * fm10k_mbx_enqueue_tx - Enqueues the message to the tail of the Tx FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @msg: message array to read + * + * This function enqueues a message up to the size specified by the length + * contained in the first DWORD of the message and will place at the tail + * of the FIFO. It will return 0 on success, or a negative value on error. + **/ +static s32 fm10k_mbx_enqueue_tx(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, const u32 *msg) +{ + u32 countdown = mbx->timeout; + s32 err; + + switch (mbx->state) { + case FM10K_STATE_CLOSED: + case FM10K_STATE_DISCONNECT: + return FM10K_MBX_ERR_NO_MBX; + default: + break; + } + + /* enqueue the message on the Tx FIFO */ + err = fm10k_fifo_enqueue(&mbx->tx, msg); + + /* if it failed give the FIFO a chance to drain */ + while (err && countdown) { + countdown--; + udelay(mbx->udelay); + mbx->ops.process(hw, mbx); + err = fm10k_fifo_enqueue(&mbx->tx, msg); + } + + /* if we failed treat the error */ + if (err) { + mbx->timeout = 0; + mbx->tx_busy++; + } + + /* begin processing message, ignore errors as this is just meant + * to start the mailbox flow so we are not concerned if there + * is a bad error, or the mailbox is already busy with a request + */ + if (!mbx->tail_len) + mbx->ops.process(hw, mbx); + + return 0; +} + +/** + * fm10k_mbx_read - Copies the mbmem to local message buffer + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function copies the message from the mbmem to the message array + **/ +static s32 fm10k_mbx_read(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) +{ + /* only allow one reader in here at a time */ + if (mbx->mbx_hdr) + return FM10K_MBX_ERR_BUSY; + + /* read to capture initial interrupt bits */ + if (fm10k_read_reg(hw, mbx->mbx_reg) & FM10K_MBX_REQ_INTERRUPT) + mbx->mbx_lock = FM10K_MBX_ACK; + + /* write back interrupt bits to clear */ + fm10k_write_reg(hw, mbx->mbx_reg, + FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT); + + /* read remote header */ + mbx->mbx_hdr = fm10k_read_reg(hw, mbx->mbmem_reg ^ mbx->mbmem_len); + + return 0; +} + +/** + * fm10k_mbx_write - Copies the local message buffer to mbmem + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function copies the message from the the message array to mbmem + **/ +static void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) +{ + u32 mbmem = mbx->mbmem_reg; + + /* write new msg header to notify recipient of change */ + fm10k_write_reg(hw, mbmem, mbx->mbx_hdr); + + /* write mailbox to send interrupt */ + if (mbx->mbx_lock) + fm10k_write_reg(hw, mbx->mbx_reg, mbx->mbx_lock); + + /* we no longer are using the header so free it */ + mbx->mbx_hdr = 0; + mbx->mbx_lock = 0; +} + +/** + * fm10k_mbx_create_connect_hdr - Generate a connect mailbox header + * @mbx: pointer to mailbox + * + * This function returns a connection mailbox header + **/ +static void fm10k_mbx_create_connect_hdr(struct fm10k_mbx_info *mbx) +{ + mbx->mbx_lock |= FM10K_MBX_REQ; + + mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_CONNECT, TYPE) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD) | + FM10K_MSG_HDR_FIELD_SET(mbx->rx.size - 1, CONNECT_SIZE); +} + +/** + * fm10k_mbx_create_data_hdr - Generate a data mailbox header + * @mbx: pointer to mailbox + * + * This function returns a data mailbox header + **/ +static void fm10k_mbx_create_data_hdr(struct fm10k_mbx_info *mbx) +{ + u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DATA, TYPE) | + FM10K_MSG_HDR_FIELD_SET(mbx->tail, TAIL) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); + struct fm10k_mbx_fifo *fifo = &mbx->tx; + u16 crc; + + if (mbx->tail_len) + mbx->mbx_lock |= FM10K_MBX_REQ; + + /* generate CRC for data in flight and header */ + crc = fm10k_fifo_crc(fifo, fm10k_fifo_head_offset(fifo, mbx->pulled), + mbx->tail_len, mbx->local); + crc = fm10k_crc_16b(&hdr, crc, 1); + + /* load header to memory to be written */ + mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC); +} + +/** + * fm10k_mbx_create_disconnect_hdr - Generate a disconnect mailbox header + * @mbx: pointer to mailbox + * + * This function returns a disconnect mailbox header + **/ +static void fm10k_mbx_create_disconnect_hdr(struct fm10k_mbx_info *mbx) +{ + u32 hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_DISCONNECT, TYPE) | + FM10K_MSG_HDR_FIELD_SET(mbx->tail, TAIL) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); + u16 crc = fm10k_crc_16b(&hdr, mbx->local, 1); + + mbx->mbx_lock |= FM10K_MBX_ACK; + + /* load header to memory to be written */ + mbx->mbx_hdr = hdr | FM10K_MSG_HDR_FIELD_SET(crc, CRC); +} + +/** + * fm10k_mbx_create_error_msg - Generate a error message + * @mbx: pointer to mailbox + * @err: local error encountered + * + * This function will interpret the error provided by err, and based on + * that it may shift the message by 1 DWORD and then place an error header + * at the start of the message. + **/ +static void fm10k_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err) +{ + /* only generate an error message for these types */ + switch (err) { + case FM10K_MBX_ERR_TAIL: + case FM10K_MBX_ERR_HEAD: + case FM10K_MBX_ERR_TYPE: + case FM10K_MBX_ERR_SIZE: + case FM10K_MBX_ERR_RSVD0: + case FM10K_MBX_ERR_CRC: + break; + default: + return; + } + + mbx->mbx_lock |= FM10K_MBX_REQ; + + mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(FM10K_MSG_ERROR, TYPE) | + FM10K_MSG_HDR_FIELD_SET(err, ERR_NO) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); +} + +/** + * fm10k_mbx_validate_msg_hdr - Validate common fields in the message header + * @mbx: pointer to mailbox + * @msg: message array to read + * + * This function will parse up the fields in the mailbox header and return + * an error if the header contains any of a number of invalid configurations + * including unrecognized type, invalid route, or a malformed message. + **/ +static s32 fm10k_mbx_validate_msg_hdr(struct fm10k_mbx_info *mbx) +{ + u16 type, rsvd0, head, tail, size; + const u32 *hdr = &mbx->mbx_hdr; + + type = FM10K_MSG_HDR_FIELD_GET(*hdr, TYPE); + rsvd0 = FM10K_MSG_HDR_FIELD_GET(*hdr, RSVD0); + tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL); + head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); + size = FM10K_MSG_HDR_FIELD_GET(*hdr, CONNECT_SIZE); + + if (rsvd0) + return FM10K_MBX_ERR_RSVD0; + + switch (type) { + case FM10K_MSG_DISCONNECT: + /* validate that all data has been received */ + if (tail != mbx->head) + return FM10K_MBX_ERR_TAIL; + + /* fall through */ + case FM10K_MSG_DATA: + /* validate that head is moving correctly */ + if (!head || (head == FM10K_MSG_HDR_MASK(HEAD))) + return FM10K_MBX_ERR_HEAD; + if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len) + return FM10K_MBX_ERR_HEAD; + + /* validate that tail is moving correctly */ + if (!tail || (tail == FM10K_MSG_HDR_MASK(TAIL))) + return FM10K_MBX_ERR_TAIL; + if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len) + break; + + return FM10K_MBX_ERR_TAIL; + case FM10K_MSG_CONNECT: + /* validate size is in range and is power of 2 mask */ + if ((size < FM10K_VFMBX_MSG_MTU) || (size & (size + 1))) + return FM10K_MBX_ERR_SIZE; + + /* fall through */ + case FM10K_MSG_ERROR: + if (!head || (head == FM10K_MSG_HDR_MASK(HEAD))) + return FM10K_MBX_ERR_HEAD; + /* neither create nor error include a tail offset */ + if (tail) + return FM10K_MBX_ERR_TAIL; + + break; + default: + return FM10K_MBX_ERR_TYPE; + } + + return 0; +} + +/** + * fm10k_mbx_create_reply - Generate reply based on state and remote head + * @mbx: pointer to mailbox + * @head: acknowledgement number + * + * This function will generate an outgoing message based on the current + * mailbox state and the remote fifo head. It will return the length + * of the outgoing message excluding header on success, and a negative value + * on error. + **/ +static s32 fm10k_mbx_create_reply(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, u16 head) +{ + switch (mbx->state) { + case FM10K_STATE_OPEN: + case FM10K_STATE_DISCONNECT: + /* update our checksum for the outgoing data */ + fm10k_mbx_update_local_crc(mbx, head); + + /* as long as other end recognizes us keep sending data */ + fm10k_mbx_pull_head(hw, mbx, head); + + /* generate new header based on data */ + if (mbx->tail_len || (mbx->state == FM10K_STATE_OPEN)) + fm10k_mbx_create_data_hdr(mbx); + else + fm10k_mbx_create_disconnect_hdr(mbx); + break; + case FM10K_STATE_CONNECT: + /* send disconnect even if we aren't connected */ + fm10k_mbx_create_connect_hdr(mbx); + break; + case FM10K_STATE_CLOSED: + /* generate new header based on data */ + fm10k_mbx_create_disconnect_hdr(mbx); + default: + break; + } + + return 0; +} + +/** + * fm10k_mbx_reset_work- Reset internal pointers for any pending work + * @mbx: pointer to mailbox + * + * This function will reset all internal pointers so any work in progress + * is dropped. This call should occur every time we transition from the + * open state to the connect state. + **/ +static void fm10k_mbx_reset_work(struct fm10k_mbx_info *mbx) +{ + /* reset our outgoing max size back to Rx limits */ + mbx->max_size = mbx->rx.size - 1; + + /* just do a quick resysnc to start of message */ + mbx->pushed = 0; + mbx->pulled = 0; + mbx->tail_len = 0; + mbx->head_len = 0; + mbx->rx.tail = 0; + mbx->rx.head = 0; +} + +/** + * fm10k_mbx_update_max_size - Update the max_size and drop any large messages + * @mbx: pointer to mailbox + * @size: new value for max_size + * + * This function updates the max_size value and drops any outgoing messages + * at the head of the Tx FIFO if they are larger than max_size. It does not + * drop all messages, as this is too difficult to parse and remove them from + * the FIFO. Instead, rely on the checking to ensure that messages larger + * than max_size aren't pushed into the memory buffer. + **/ +static void fm10k_mbx_update_max_size(struct fm10k_mbx_info *mbx, u16 size) +{ + u16 len; + + mbx->max_size = size; + + /* flush any oversized messages from the queue */ + for (len = fm10k_fifo_head_len(&mbx->tx); + len > size; + len = fm10k_fifo_head_len(&mbx->tx)) { + fm10k_fifo_head_drop(&mbx->tx); + mbx->tx_dropped++; + } +} + +/** + * fm10k_mbx_connect_reset - Reset following request for reset + * @mbx: pointer to mailbox + * + * This function resets the mailbox to either a disconnected state + * or a connect state depending on the current mailbox state + **/ +static void fm10k_mbx_connect_reset(struct fm10k_mbx_info *mbx) +{ + /* just do a quick resysnc to start of frame */ + fm10k_mbx_reset_work(mbx); + + /* reset CRC seeds */ + mbx->local = FM10K_MBX_CRC_SEED; + mbx->remote = FM10K_MBX_CRC_SEED; + + /* we cannot exit connect until the size is good */ + if (mbx->state == FM10K_STATE_OPEN) + mbx->state = FM10K_STATE_CONNECT; + else + mbx->state = FM10K_STATE_CLOSED; +} + +/** + * fm10k_mbx_process_connect - Process connect header + * @mbx: pointer to mailbox + * @msg: message array to process + * + * This function will read an incoming connect header and reply with the + * appropriate message. It will return a value indicating the number of + * data DWORDs on success, or will return a negative value on failure. + **/ +static s32 fm10k_mbx_process_connect(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const enum fm10k_mbx_state state = mbx->state; + const u32 *hdr = &mbx->mbx_hdr; + u16 size, head; + + /* we will need to pull all of the fields for verification */ + size = FM10K_MSG_HDR_FIELD_GET(*hdr, CONNECT_SIZE); + head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); + + switch (state) { + case FM10K_STATE_DISCONNECT: + case FM10K_STATE_OPEN: + /* reset any in-progress work */ + fm10k_mbx_connect_reset(mbx); + break; + case FM10K_STATE_CONNECT: + /* we cannot exit connect until the size is good */ + if (size > mbx->rx.size) { + mbx->max_size = mbx->rx.size - 1; + } else { + /* record the remote system requesting connection */ + mbx->state = FM10K_STATE_OPEN; + + fm10k_mbx_update_max_size(mbx, size); + } + break; + default: + break; + } + + /* align our tail index to remote head index */ + mbx->tail = head; + + return fm10k_mbx_create_reply(hw, mbx, head); +} + +/** + * fm10k_mbx_process_data - Process data header + * @mbx: pointer to mailbox + * + * This function will read an incoming data header and reply with the + * appropriate message. It will return a value indicating the number of + * data DWORDs on success, or will return a negative value on failure. + **/ +static s32 fm10k_mbx_process_data(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const u32 *hdr = &mbx->mbx_hdr; + u16 head, tail; + s32 err; + + /* we will need to pull all of the fields for verification */ + head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); + tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL); + + /* if we are in connect just update our data and go */ + if (mbx->state == FM10K_STATE_CONNECT) { + mbx->tail = head; + mbx->state = FM10K_STATE_OPEN; + } + + /* abort on message size errors */ + err = fm10k_mbx_push_tail(hw, mbx, tail); + if (err < 0) + return err; + + /* verify the checksum on the incoming data */ + err = fm10k_mbx_verify_remote_crc(mbx); + if (err) + return err; + + /* process messages if we have received any */ + fm10k_mbx_dequeue_rx(hw, mbx); + + return fm10k_mbx_create_reply(hw, mbx, head); +} + +/** + * fm10k_mbx_process_disconnect - Process disconnect header + * @mbx: pointer to mailbox + * + * This function will read an incoming disconnect header and reply with the + * appropriate message. It will return a value indicating the number of + * data DWORDs on success, or will return a negative value on failure. + **/ +static s32 fm10k_mbx_process_disconnect(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const enum fm10k_mbx_state state = mbx->state; + const u32 *hdr = &mbx->mbx_hdr; + u16 head; + s32 err; + + /* we will need to pull the header field for verification */ + head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); + + /* We should not be receiving disconnect if Rx is incomplete */ + if (mbx->pushed) + return FM10K_MBX_ERR_TAIL; + + /* we have already verified mbx->head == tail so we know this is 0 */ + mbx->head_len = 0; + + /* verify the checksum on the incoming header is correct */ + err = fm10k_mbx_verify_remote_crc(mbx); + if (err) + return err; + + switch (state) { + case FM10K_STATE_DISCONNECT: + case FM10K_STATE_OPEN: + /* state doesn't change if we still have work to do */ + if (!fm10k_mbx_tx_complete(mbx)) + break; + + /* verify the head indicates we completed all transmits */ + if (head != mbx->tail) + return FM10K_MBX_ERR_HEAD; + + /* reset any in-progress work */ + fm10k_mbx_connect_reset(mbx); + break; + default: + break; + } + + return fm10k_mbx_create_reply(hw, mbx, head); +} + +/** + * fm10k_mbx_process_error - Process error header + * @mbx: pointer to mailbox + * + * This function will read an incoming error header and reply with the + * appropriate message. It will return a value indicating the number of + * data DWORDs on success, or will return a negative value on failure. + **/ +static s32 fm10k_mbx_process_error(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const u32 *hdr = &mbx->mbx_hdr; + s32 err_no; + u16 head; + + /* we will need to pull all of the fields for verification */ + head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); + + /* we only have lower 10 bits of error number so add upper bits */ + err_no = FM10K_MSG_HDR_FIELD_GET(*hdr, ERR_NO); + err_no |= ~FM10K_MSG_HDR_MASK(ERR_NO); + + switch (mbx->state) { + case FM10K_STATE_OPEN: + case FM10K_STATE_DISCONNECT: + /* flush any uncompleted work */ + fm10k_mbx_reset_work(mbx); + + /* reset CRC seeds */ + mbx->local = FM10K_MBX_CRC_SEED; + mbx->remote = FM10K_MBX_CRC_SEED; + + /* reset tail index and size to prepare for reconnect */ + mbx->tail = head; + + /* if open then reset max_size and go back to connect */ + if (mbx->state == FM10K_STATE_OPEN) { + mbx->state = FM10K_STATE_CONNECT; + break; + } + + /* send a connect message to get data flowing again */ + fm10k_mbx_create_connect_hdr(mbx); + return 0; + default: + break; + } + + return fm10k_mbx_create_reply(hw, mbx, mbx->tail); +} + +/** + * fm10k_mbx_process - Process mailbox interrupt + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will process incoming mailbox events and generate mailbox + * replies. It will return a value indicating the number of DWORDs + * transmitted excluding header on success or a negative value on error. + **/ +static s32 fm10k_mbx_process(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + s32 err; + + /* we do not read mailbox if closed */ + if (mbx->state == FM10K_STATE_CLOSED) + return 0; + + /* copy data from mailbox */ + err = fm10k_mbx_read(hw, mbx); + if (err) + return err; + + /* validate type, source, and destination */ + err = fm10k_mbx_validate_msg_hdr(mbx); + if (err < 0) + goto msg_err; + + switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, TYPE)) { + case FM10K_MSG_CONNECT: + err = fm10k_mbx_process_connect(hw, mbx); + break; + case FM10K_MSG_DATA: + err = fm10k_mbx_process_data(hw, mbx); + break; + case FM10K_MSG_DISCONNECT: + err = fm10k_mbx_process_disconnect(hw, mbx); + break; + case FM10K_MSG_ERROR: + err = fm10k_mbx_process_error(hw, mbx); + break; + default: + err = FM10K_MBX_ERR_TYPE; + break; + } + +msg_err: + /* notify partner of errors on our end */ + if (err < 0) + fm10k_mbx_create_error_msg(mbx, err); + + /* copy data from mailbox */ + fm10k_mbx_write(hw, mbx); + + return err; +} + +/** + * fm10k_mbx_disconnect - Shutdown mailbox connection + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will shut down the mailbox. It places the mailbox first + * in the disconnect state, it then allows up to a predefined timeout for + * the mailbox to transition to close on its own. If this does not occur + * then the mailbox will be forced into the closed state. + * + * Any mailbox transactions not completed before calling this function + * are not guaranteed to complete and may be dropped. + **/ +static void fm10k_mbx_disconnect(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + int timeout = mbx->timeout ? FM10K_MBX_DISCONNECT_TIMEOUT : 0; + + /* Place mbx in ready to disconnect state */ + mbx->state = FM10K_STATE_DISCONNECT; + + /* trigger interrupt to start shutdown process */ + fm10k_write_reg(hw, mbx->mbx_reg, FM10K_MBX_REQ | + FM10K_MBX_INTERRUPT_DISABLE); + do { + udelay(FM10K_MBX_POLL_DELAY); + mbx->ops.process(hw, mbx); + timeout -= FM10K_MBX_POLL_DELAY; + } while ((timeout > 0) && (mbx->state != FM10K_STATE_CLOSED)); + + /* in case we didn't close, just force the mailbox into shutdown and + * drop all left over messages in the FIFO. + */ + fm10k_mbx_connect_reset(mbx); + fm10k_fifo_drop_all(&mbx->tx); + + fm10k_write_reg(hw, mbx->mbmem_reg, 0); +} + +/** + * fm10k_mbx_connect - Start mailbox connection + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will initiate a mailbox connection. It will populate the + * mailbox with a broadcast connect message and then initialize the lock. + * This is safe since the connect message is a single DWORD so the mailbox + * transaction is guaranteed to be atomic. + * + * This function will return an error if the mailbox has not been initiated + * or is currently in use. + **/ +static s32 fm10k_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) +{ + /* we cannot connect an uninitialized mailbox */ + if (!mbx->rx.buffer) + return FM10K_MBX_ERR_NO_SPACE; + + /* we cannot connect an already connected mailbox */ + if (mbx->state != FM10K_STATE_CLOSED) + return FM10K_MBX_ERR_BUSY; + + /* mailbox timeout can now become active */ + mbx->timeout = FM10K_MBX_INIT_TIMEOUT; + + /* Place mbx in ready to connect state */ + mbx->state = FM10K_STATE_CONNECT; + + /* initialize header of remote mailbox */ + fm10k_mbx_create_disconnect_hdr(mbx); + fm10k_write_reg(hw, mbx->mbmem_reg ^ mbx->mbmem_len, mbx->mbx_hdr); + + /* enable interrupt and notify other party of new message */ + mbx->mbx_lock = FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT | + FM10K_MBX_INTERRUPT_ENABLE; + + /* generate and load connect header into mailbox */ + fm10k_mbx_create_connect_hdr(mbx); + fm10k_mbx_write(hw, mbx); + + return 0; +} + +/** + * fm10k_mbx_validate_handlers - Validate layout of message parsing data + * @msg_data: handlers for mailbox events + * + * This function validates the layout of the message parsing data. This + * should be mostly static, but it is important to catch any errors that + * are made when constructing the parsers. + **/ +static s32 fm10k_mbx_validate_handlers(const struct fm10k_msg_data *msg_data) +{ + const struct fm10k_tlv_attr *attr; + unsigned int id; + + /* Allow NULL mailboxes that transmit but don't receive */ + if (!msg_data) + return 0; + + while (msg_data->id != FM10K_TLV_ERROR) { + /* all messages should have a function handler */ + if (!msg_data->func) + return FM10K_ERR_PARAM; + + /* parser is optional */ + attr = msg_data->attr; + if (attr) { + while (attr->id != FM10K_TLV_ERROR) { + id = attr->id; + attr++; + /* ID should always be increasing */ + if (id >= attr->id) + return FM10K_ERR_PARAM; + /* ID should fit in results array */ + if (id >= FM10K_TLV_RESULTS_MAX) + return FM10K_ERR_PARAM; + } + + /* verify terminator is in the list */ + if (attr->id != FM10K_TLV_ERROR) + return FM10K_ERR_PARAM; + } + + id = msg_data->id; + msg_data++; + /* ID should always be increasing */ + if (id >= msg_data->id) + return FM10K_ERR_PARAM; + } + + /* verify terminator is in the list */ + if ((msg_data->id != FM10K_TLV_ERROR) || !msg_data->func) + return FM10K_ERR_PARAM; + + return 0; +} + +/** + * fm10k_mbx_register_handlers - Register a set of handler ops for mailbox + * @mbx: pointer to mailbox + * @msg_data: handlers for mailbox events + * + * This function associates a set of message handling ops with a mailbox. + **/ +static s32 fm10k_mbx_register_handlers(struct fm10k_mbx_info *mbx, + const struct fm10k_msg_data *msg_data) +{ + /* validate layout of handlers before assigning them */ + if (fm10k_mbx_validate_handlers(msg_data)) + return FM10K_ERR_PARAM; + + /* initialize the message handlers */ + mbx->msg_data = msg_data; + + return 0; +} + +/** + * fm10k_pfvf_mbx_init - Initialize mailbox memory for PF/VF mailbox + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @msg_data: handlers for mailbox events + * @id: ID reference for PF as it supports up to 64 PF/VF mailboxes + * + * This function initializes the mailbox for use. It will split the + * buffer provided an use that th populate both the Tx and Rx FIFO by + * evenly splitting it. In order to allow for easy masking of head/tail + * the value reported in size must be a power of 2 and is reported in + * DWORDs, not bytes. Any invalid values will cause the mailbox to return + * error. + **/ +s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, + const struct fm10k_msg_data *msg_data, u8 id) +{ + /* initialize registers */ + switch (hw->mac.type) { + case fm10k_mac_vf: + mbx->mbx_reg = FM10K_VFMBX; + mbx->mbmem_reg = FM10K_VFMBMEM(FM10K_VFMBMEM_VF_XOR); + break; + case fm10k_mac_pf: + /* there are only 64 VF <-> PF mailboxes */ + if (id < 64) { + mbx->mbx_reg = FM10K_MBX(id); + mbx->mbmem_reg = FM10K_MBMEM_VF(id, 0); + break; + } + /* fallthough */ + default: + return FM10K_MBX_ERR_NO_MBX; + } + + /* start out in closed state */ + mbx->state = FM10K_STATE_CLOSED; + + /* validate layout of handlers before assigning them */ + if (fm10k_mbx_validate_handlers(msg_data)) + return FM10K_ERR_PARAM; + + /* initialize the message handlers */ + mbx->msg_data = msg_data; + + /* start mailbox as timed out and let the reset_hw call + * set the timeout value to begin communications + */ + mbx->timeout = 0; + mbx->udelay = FM10K_MBX_INIT_DELAY; + + /* initialize tail and head */ + mbx->tail = 1; + mbx->head = 1; + + /* initialize CRC seeds */ + mbx->local = FM10K_MBX_CRC_SEED; + mbx->remote = FM10K_MBX_CRC_SEED; + + /* Split buffer for use by Tx/Rx FIFOs */ + mbx->max_size = FM10K_MBX_MSG_MAX_SIZE; + mbx->mbmem_len = FM10K_VFMBMEM_VF_XOR; + + /* initialize the FIFOs, sizes are in 4 byte increments */ + fm10k_fifo_init(&mbx->tx, mbx->buffer, FM10K_MBX_TX_BUFFER_SIZE); + fm10k_fifo_init(&mbx->rx, &mbx->buffer[FM10K_MBX_TX_BUFFER_SIZE], + FM10K_MBX_RX_BUFFER_SIZE); + + /* initialize function pointers */ + mbx->ops.connect = fm10k_mbx_connect; + mbx->ops.disconnect = fm10k_mbx_disconnect; + mbx->ops.rx_ready = fm10k_mbx_rx_ready; + mbx->ops.tx_ready = fm10k_mbx_tx_ready; + mbx->ops.tx_complete = fm10k_mbx_tx_complete; + mbx->ops.enqueue_tx = fm10k_mbx_enqueue_tx; + mbx->ops.process = fm10k_mbx_process; + mbx->ops.register_handlers = fm10k_mbx_register_handlers; + + return 0; +} + +/** + * fm10k_sm_mbx_create_data_hdr - Generate a mailbox header for local FIFO + * @mbx: pointer to mailbox + * + * This function returns a connection mailbox header + **/ +static void fm10k_sm_mbx_create_data_hdr(struct fm10k_mbx_info *mbx) +{ + if (mbx->tail_len) + mbx->mbx_lock |= FM10K_MBX_REQ; + + mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(mbx->tail, SM_TAIL) | + FM10K_MSG_HDR_FIELD_SET(mbx->remote, SM_VER) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, SM_HEAD); +} + +/** + * fm10k_sm_mbx_create_connect_hdr - Generate a mailbox header for local FIFO + * @mbx: pointer to mailbox + * @err: error flags to report if any + * + * This function returns a connection mailbox header + **/ +static void fm10k_sm_mbx_create_connect_hdr(struct fm10k_mbx_info *mbx, u8 err) +{ + if (mbx->local) + mbx->mbx_lock |= FM10K_MBX_REQ; + + mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(mbx->tail, SM_TAIL) | + FM10K_MSG_HDR_FIELD_SET(mbx->remote, SM_VER) | + FM10K_MSG_HDR_FIELD_SET(mbx->head, SM_HEAD) | + FM10K_MSG_HDR_FIELD_SET(err, SM_ERR); +} + +/** + * fm10k_sm_mbx_connect_reset - Reset following request for reset + * @mbx: pointer to mailbox + * + * This function resets the mailbox to a just connected state + **/ +static void fm10k_sm_mbx_connect_reset(struct fm10k_mbx_info *mbx) +{ + /* flush any uncompleted work */ + fm10k_mbx_reset_work(mbx); + + /* set local version to max and remote version to 0 */ + mbx->local = FM10K_SM_MBX_VERSION; + mbx->remote = 0; + + /* initialize tail and head */ + mbx->tail = 1; + mbx->head = 1; + + /* reset state back to connect */ + mbx->state = FM10K_STATE_CONNECT; +} + +/** + * fm10k_sm_mbx_connect - Start switch manager mailbox connection + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will initiate a mailbox connection with the switch + * manager. To do this it will first disconnect the mailbox, and then + * reconnect it in order to complete a reset of the mailbox. + * + * This function will return an error if the mailbox has not been initiated + * or is currently in use. + **/ +static s32 fm10k_sm_mbx_connect(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) +{ + /* we cannot connect an uninitialized mailbox */ + if (!mbx->rx.buffer) + return FM10K_MBX_ERR_NO_SPACE; + + /* we cannot connect an already connected mailbox */ + if (mbx->state != FM10K_STATE_CLOSED) + return FM10K_MBX_ERR_BUSY; + + /* mailbox timeout can now become active */ + mbx->timeout = FM10K_MBX_INIT_TIMEOUT; + + /* Place mbx in ready to connect state */ + mbx->state = FM10K_STATE_CONNECT; + mbx->max_size = FM10K_MBX_MSG_MAX_SIZE; + + /* reset interface back to connect */ + fm10k_sm_mbx_connect_reset(mbx); + + /* enable interrupt and notify other party of new message */ + mbx->mbx_lock = FM10K_MBX_REQ_INTERRUPT | FM10K_MBX_ACK_INTERRUPT | + FM10K_MBX_INTERRUPT_ENABLE; + + /* generate and load connect header into mailbox */ + fm10k_sm_mbx_create_connect_hdr(mbx, 0); + fm10k_mbx_write(hw, mbx); + + /* enable interrupt and notify other party of new message */ + + return 0; +} + +/** + * fm10k_sm_mbx_disconnect - Shutdown mailbox connection + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will shut down the mailbox. It places the mailbox first + * in the disconnect state, it then allows up to a predefined timeout for + * the mailbox to transition to close on its own. If this does not occur + * then the mailbox will be forced into the closed state. + * + * Any mailbox transactions not completed before calling this function + * are not guaranteed to complete and may be dropped. + **/ +static void fm10k_sm_mbx_disconnect(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + int timeout = mbx->timeout ? FM10K_MBX_DISCONNECT_TIMEOUT : 0; + + /* Place mbx in ready to disconnect state */ + mbx->state = FM10K_STATE_DISCONNECT; + + /* trigger interrupt to start shutdown process */ + fm10k_write_reg(hw, mbx->mbx_reg, FM10K_MBX_REQ | + FM10K_MBX_INTERRUPT_DISABLE); + do { + udelay(FM10K_MBX_POLL_DELAY); + mbx->ops.process(hw, mbx); + timeout -= FM10K_MBX_POLL_DELAY; + } while ((timeout > 0) && (mbx->state != FM10K_STATE_CLOSED)); + + /* in case we didn't close just force the mailbox into shutdown */ + mbx->state = FM10K_STATE_CLOSED; + mbx->remote = 0; + fm10k_mbx_reset_work(mbx); + fm10k_mbx_update_max_size(mbx, 0); + + fm10k_write_reg(hw, mbx->mbmem_reg, 0); +} + +/** + * fm10k_mbx_validate_fifo_hdr - Validate fields in the remote FIFO header + * @mbx: pointer to mailbox + * + * This function will parse up the fields in the mailbox header and return + * an error if the header contains any of a number of invalid configurations + * including unrecognized offsets or version numbers. + **/ +static s32 fm10k_sm_mbx_validate_fifo_hdr(struct fm10k_mbx_info *mbx) +{ + const u32 *hdr = &mbx->mbx_hdr; + u16 tail, head, ver; + + tail = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_TAIL); + ver = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_VER); + head = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_HEAD); + + switch (ver) { + case 0: + break; + case FM10K_SM_MBX_VERSION: + if (!head || head > FM10K_SM_MBX_FIFO_LEN) + return FM10K_MBX_ERR_HEAD; + if (!tail || tail > FM10K_SM_MBX_FIFO_LEN) + return FM10K_MBX_ERR_TAIL; + if (mbx->tail < head) + head += mbx->mbmem_len - 1; + if (tail < mbx->head) + tail += mbx->mbmem_len - 1; + if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len) + return FM10K_MBX_ERR_HEAD; + if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len) + break; + return FM10K_MBX_ERR_TAIL; + default: + return FM10K_MBX_ERR_SRC; + } + + return 0; +} + +/** + * fm10k_sm_mbx_process_error - Process header with error flag set + * @mbx: pointer to mailbox + * + * This function is meant to respond to a request where the error flag + * is set. As a result we will terminate a connection if one is present + * and fall back into the reset state with a connection header of version + * 0 (RESET). + **/ +static void fm10k_sm_mbx_process_error(struct fm10k_mbx_info *mbx) +{ + const enum fm10k_mbx_state state = mbx->state; + + switch (state) { + case FM10K_STATE_DISCONNECT: + /* if there is an error just disconnect */ + mbx->remote = 0; + break; + case FM10K_STATE_OPEN: + /* flush any uncompleted work */ + fm10k_sm_mbx_connect_reset(mbx); + break; + case FM10K_STATE_CONNECT: + /* try connnecting at lower version */ + if (mbx->remote) { + while (mbx->local > 1) + mbx->local--; + mbx->remote = 0; + } + break; + default: + break; + } + + fm10k_sm_mbx_create_connect_hdr(mbx, 0); +} + +/** + * fm10k_sm_mbx_create_error_message - Process an error in FIFO hdr + * @mbx: pointer to mailbox + * @err: local error encountered + * + * This function will interpret the error provided by err, and based on + * that it may set the error bit in the local message header + **/ +static void fm10k_sm_mbx_create_error_msg(struct fm10k_mbx_info *mbx, s32 err) +{ + /* only generate an error message for these types */ + switch (err) { + case FM10K_MBX_ERR_TAIL: + case FM10K_MBX_ERR_HEAD: + case FM10K_MBX_ERR_SRC: + case FM10K_MBX_ERR_SIZE: + case FM10K_MBX_ERR_RSVD0: + break; + default: + return; + } + + /* process it as though we received an error, and send error reply */ + fm10k_sm_mbx_process_error(mbx); + fm10k_sm_mbx_create_connect_hdr(mbx, 1); +} + +/** + * fm10k_sm_mbx_receive - Take message from Rx mailbox FIFO and put it in Rx + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will dequeue one message from the Rx switch manager mailbox + * FIFO and place it in the Rx mailbox FIFO for processing by software. + **/ +static s32 fm10k_sm_mbx_receive(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, + u16 tail) +{ + /* reduce length by 1 to convert to a mask */ + u16 mbmem_len = mbx->mbmem_len - 1; + s32 err; + + /* push tail in front of head */ + if (tail < mbx->head) + tail += mbmem_len; + + /* copy data to the Rx FIFO */ + err = fm10k_mbx_push_tail(hw, mbx, tail); + if (err < 0) + return err; + + /* process messages if we have received any */ + fm10k_mbx_dequeue_rx(hw, mbx); + + /* guarantee head aligns with the end of the last message */ + mbx->head = fm10k_mbx_head_sub(mbx, mbx->pushed); + mbx->pushed = 0; + + /* clear any extra bits left over since index adds 1 extra bit */ + if (mbx->head > mbmem_len) + mbx->head -= mbmem_len; + + return err; +} + +/** + * fm10k_sm_mbx_transmit - Take message from Tx and put it in Tx mailbox FIFO + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will dequeue one message from the Tx mailbox FIFO and place + * it in the Tx switch manager mailbox FIFO for processing by hardware. + **/ +static void fm10k_sm_mbx_transmit(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, u16 head) +{ + struct fm10k_mbx_fifo *fifo = &mbx->tx; + /* reduce length by 1 to convert to a mask */ + u16 mbmem_len = mbx->mbmem_len - 1; + u16 tail_len, len = 0; + u32 *msg; + + /* push head behind tail */ + if (mbx->tail < head) + head += mbmem_len; + + fm10k_mbx_pull_head(hw, mbx, head); + + /* determine msg aligned offset for end of buffer */ + do { + msg = fifo->buffer + fm10k_fifo_head_offset(fifo, len); + tail_len = len; + len += FM10K_TLV_DWORD_LEN(*msg); + } while ((len <= mbx->tail_len) && (len < mbmem_len)); + + /* guarantee we stop on a message boundary */ + if (mbx->tail_len > tail_len) { + mbx->tail = fm10k_mbx_tail_sub(mbx, mbx->tail_len - tail_len); + mbx->tail_len = tail_len; + } + + /* clear any extra bits left over since index adds 1 extra bit */ + if (mbx->tail > mbmem_len) + mbx->tail -= mbmem_len; +} + +/** + * fm10k_sm_mbx_create_reply - Generate reply based on state and remote head + * @mbx: pointer to mailbox + * @head: acknowledgement number + * + * This function will generate an outgoing message based on the current + * mailbox state and the remote fifo head. It will return the length + * of the outgoing message excluding header on success, and a negative value + * on error. + **/ +static void fm10k_sm_mbx_create_reply(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx, u16 head) +{ + switch (mbx->state) { + case FM10K_STATE_OPEN: + case FM10K_STATE_DISCONNECT: + /* flush out Tx data */ + fm10k_sm_mbx_transmit(hw, mbx, head); + + /* generate new header based on data */ + if (mbx->tail_len || (mbx->state == FM10K_STATE_OPEN)) { + fm10k_sm_mbx_create_data_hdr(mbx); + } else { + mbx->remote = 0; + fm10k_sm_mbx_create_connect_hdr(mbx, 0); + } + break; + case FM10K_STATE_CONNECT: + case FM10K_STATE_CLOSED: + fm10k_sm_mbx_create_connect_hdr(mbx, 0); + break; + default: + break; + } +} + +/** + * fm10k_sm_mbx_process_reset - Process header with version == 0 (RESET) + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function is meant to respond to a request where the version data + * is set to 0. As such we will either terminate the connection or go + * into the connect state in order to re-establish the connection. This + * function can also be used to respond to an error as the connection + * resetting would also be a means of dealing with errors. + **/ +static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const enum fm10k_mbx_state state = mbx->state; + + switch (state) { + case FM10K_STATE_DISCONNECT: + /* drop remote connections and disconnect */ + mbx->state = FM10K_STATE_CLOSED; + mbx->remote = 0; + mbx->local = 0; + break; + case FM10K_STATE_OPEN: + /* flush any incomplete work */ + fm10k_sm_mbx_connect_reset(mbx); + break; + case FM10K_STATE_CONNECT: + /* Update remote value to match local value */ + mbx->remote = mbx->local; + default: + break; + } + + fm10k_sm_mbx_create_reply(hw, mbx, mbx->tail); +} + +/** + * fm10k_sm_mbx_process_version_1 - Process header with version == 1 + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function is meant to process messages received when the remote + * mailbox is active. + **/ +static s32 fm10k_sm_mbx_process_version_1(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + const u32 *hdr = &mbx->mbx_hdr; + u16 head, tail; + s32 len; + + /* pull all fields needed for verification */ + tail = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_TAIL); + head = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_HEAD); + + /* if we are in connect and wanting version 1 then start up and go */ + if (mbx->state == FM10K_STATE_CONNECT) { + if (!mbx->remote) + goto send_reply; + if (mbx->remote != 1) + return FM10K_MBX_ERR_SRC; + + mbx->state = FM10K_STATE_OPEN; + } + + do { + /* abort on message size errors */ + len = fm10k_sm_mbx_receive(hw, mbx, tail); + if (len < 0) + return len; + + /* continue until we have flushed the Rx FIFO */ + } while (len); + +send_reply: + fm10k_sm_mbx_create_reply(hw, mbx, head); + + return 0; +} + +/** + * fm10k_sm_mbx_process - Process mailbox switch mailbox interrupt + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * + * This function will process incoming mailbox events and generate mailbox + * replies. It will return a value indicating the number of DWORDs + * transmitted excluding header on success or a negative value on error. + **/ +static s32 fm10k_sm_mbx_process(struct fm10k_hw *hw, + struct fm10k_mbx_info *mbx) +{ + s32 err; + + /* we do not read mailbox if closed */ + if (mbx->state == FM10K_STATE_CLOSED) + return 0; + + /* retrieve data from switch manager */ + err = fm10k_mbx_read(hw, mbx); + if (err) + return err; + + err = fm10k_sm_mbx_validate_fifo_hdr(mbx); + if (err < 0) + goto fifo_err; + + if (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_ERR)) { + fm10k_sm_mbx_process_error(mbx); + goto fifo_err; + } + + switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_VER)) { + case 0: + fm10k_sm_mbx_process_reset(hw, mbx); + break; + case FM10K_SM_MBX_VERSION: + err = fm10k_sm_mbx_process_version_1(hw, mbx); + break; + } + +fifo_err: + if (err < 0) + fm10k_sm_mbx_create_error_msg(mbx, err); + + /* report data to switch manager */ + fm10k_mbx_write(hw, mbx); + + return err; +} + +/** + * fm10k_sm_mbx_init - Initialize mailbox memory for PF/SM mailbox + * @hw: pointer to hardware structure + * @mbx: pointer to mailbox + * @msg_data: handlers for mailbox events + * + * This function for now is used to stub out the PF/SM mailbox + **/ +s32 fm10k_sm_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, + const struct fm10k_msg_data *msg_data) +{ + mbx->mbx_reg = FM10K_GMBX; + mbx->mbmem_reg = FM10K_MBMEM_PF(0); + /* start out in closed state */ + mbx->state = FM10K_STATE_CLOSED; + + /* validate layout of handlers before assigning them */ + if (fm10k_mbx_validate_handlers(msg_data)) + return FM10K_ERR_PARAM; + + /* initialize the message handlers */ + mbx->msg_data = msg_data; + + /* start mailbox as timed out and let the reset_hw call + * set the timeout value to begin communications + */ + mbx->timeout = 0; + mbx->udelay = FM10K_MBX_INIT_DELAY; + + /* Split buffer for use by Tx/Rx FIFOs */ + mbx->max_size = FM10K_MBX_MSG_MAX_SIZE; + mbx->mbmem_len = FM10K_MBMEM_PF_XOR; + + /* initialize the FIFOs, sizes are in 4 byte increments */ + fm10k_fifo_init(&mbx->tx, mbx->buffer, FM10K_MBX_TX_BUFFER_SIZE); + fm10k_fifo_init(&mbx->rx, &mbx->buffer[FM10K_MBX_TX_BUFFER_SIZE], + FM10K_MBX_RX_BUFFER_SIZE); + + /* initialize function pointers */ + mbx->ops.connect = fm10k_sm_mbx_connect; + mbx->ops.disconnect = fm10k_sm_mbx_disconnect; + mbx->ops.rx_ready = fm10k_mbx_rx_ready; + mbx->ops.tx_ready = fm10k_mbx_tx_ready; + mbx->ops.tx_complete = fm10k_mbx_tx_complete; + mbx->ops.enqueue_tx = fm10k_mbx_enqueue_tx; + mbx->ops.process = fm10k_sm_mbx_process; + mbx->ops.register_handlers = fm10k_mbx_register_handlers; + + return 0; +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h new file mode 100644 index 000000000..0419a7f00 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h @@ -0,0 +1,307 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _FM10K_MBX_H_ +#define _FM10K_MBX_H_ + +/* forward declaration */ +struct fm10k_mbx_info; + +#include "fm10k_type.h" +#include "fm10k_tlv.h" + +/* PF Mailbox Registers */ +#define FM10K_MBMEM(_n) ((_n) + 0x18000) +#define FM10K_MBMEM_VF(_n, _m) (((_n) * 0x10) + (_m) + 0x18000) +#define FM10K_MBMEM_SM(_n) ((_n) + 0x18400) +#define FM10K_MBMEM_PF(_n) ((_n) + 0x18600) +/* XOR provides means of switching from Tx to Rx FIFO */ +#define FM10K_MBMEM_PF_XOR (FM10K_MBMEM_SM(0) ^ FM10K_MBMEM_PF(0)) +#define FM10K_MBX(_n) ((_n) + 0x18800) +#define FM10K_MBX_REQ 0x00000002 +#define FM10K_MBX_ACK 0x00000004 +#define FM10K_MBX_REQ_INTERRUPT 0x00000008 +#define FM10K_MBX_ACK_INTERRUPT 0x00000010 +#define FM10K_MBX_INTERRUPT_ENABLE 0x00000020 +#define FM10K_MBX_INTERRUPT_DISABLE 0x00000040 +#define FM10K_MBICR(_n) ((_n) + 0x18840) +#define FM10K_GMBX 0x18842 + +/* VF Mailbox Registers */ +#define FM10K_VFMBX 0x00010 +#define FM10K_VFMBMEM(_n) ((_n) + 0x00020) +#define FM10K_VFMBMEM_LEN 16 +#define FM10K_VFMBMEM_VF_XOR (FM10K_VFMBMEM_LEN / 2) + +/* Delays/timeouts */ +#define FM10K_MBX_DISCONNECT_TIMEOUT 500 +#define FM10K_MBX_POLL_DELAY 19 +#define FM10K_MBX_INT_DELAY 20 + +/* PF/VF Mailbox state machine + * + * +----------+ connect() +----------+ + * | CLOSED | --------------> | CONNECT | + * +----------+ +----------+ + * ^ ^ | + * | rcv: rcv: | | rcv: + * | Connect Disconnect | | Connect + * | Disconnect Error | | Data + * | | | + * | | V + * +----------+ disconnect() +----------+ + * |DISCONNECT| <-------------- | OPEN | + * +----------+ +----------+ + * + * The diagram above describes the PF/VF mailbox state machine. There + * are four main states to this machine. + * Closed: This state represents a mailbox that is in a standby state + * with interrupts disabled. In this state the mailbox should not + * read the mailbox or write any data. The only means of exiting + * this state is for the system to make the connect() call for the + * mailbox, it will then transition to the connect state. + * Connect: In this state the mailbox is seeking a connection. It will + * post a connect message with no specified destination and will + * wait for a reply from the other side of the mailbox. This state + * is exited when either a connect with the local mailbox as the + * destination is received or when a data message is received with + * a valid sequence number. + * Open: In this state the mailbox is able to transfer data between the local + * entity and the remote. It will fall back to connect in the event of + * receiving either an error message, or a disconnect message. It will + * transition to disconnect on a call to disconnect(); + * Disconnect: In this state the mailbox is attempting to gracefully terminate + * the connection. It will do so at the first point where it knows + * that the remote endpoint is either done sending, or when the + * remote endpoint has fallen back into connect. + */ +enum fm10k_mbx_state { + FM10K_STATE_CLOSED, + FM10K_STATE_CONNECT, + FM10K_STATE_OPEN, + FM10K_STATE_DISCONNECT, +}; + +/* PF/VF Mailbox header format + * 3 2 1 0 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Size/Err_no/CRC | Rsvd0 | Head | Tail | Type | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * The layout above describes the format for the header used in the PF/VF + * mailbox. The header is broken out into the following fields: + * Type: There are 4 supported message types + * 0x8: Data header - used to transport message data + * 0xC: Connect header - used to establish connection + * 0xD: Disconnect header - used to tear down a connection + * 0xE: Error header - used to address message exceptions + * Tail: Tail index for local FIFO + * Tail index actually consists of two parts. The MSB of + * the head is a loop tracker, it is 0 on an even numbered + * loop through the FIFO, and 1 on the odd numbered loops. + * To get the actual mailbox offset based on the tail it + * is necessary to add bit 3 to bit 0 and clear bit 3. This + * gives us a valid range of 0x1 - 0xE. + * Head: Head index for remote FIFO + * Head index follows the same format as the tail index. + * Rsvd0: Reserved 0 portion of the mailbox header + * CRC: Running CRC for all data since connect plus current message header + * Size: Maximum message size - Applies only to connect headers + * The maximum message size is provided during connect to avoid + * jamming the mailbox with messages that do not fit. + * Err_no: Error number - Applies only to error headers + * The error number provides a indication of the type of error + * experienced. + */ + +/* macros for retriving and setting header values */ +#define FM10K_MSG_HDR_MASK(name) \ + ((0x1u << FM10K_MSG_##name##_SIZE) - 1) +#define FM10K_MSG_HDR_FIELD_SET(value, name) \ + (((u32)(value) & FM10K_MSG_HDR_MASK(name)) << FM10K_MSG_##name##_SHIFT) +#define FM10K_MSG_HDR_FIELD_GET(value, name) \ + ((u16)((value) >> FM10K_MSG_##name##_SHIFT) & FM10K_MSG_HDR_MASK(name)) + +/* offsets shared between all headers */ +#define FM10K_MSG_TYPE_SHIFT 0 +#define FM10K_MSG_TYPE_SIZE 4 +#define FM10K_MSG_TAIL_SHIFT 4 +#define FM10K_MSG_TAIL_SIZE 4 +#define FM10K_MSG_HEAD_SHIFT 8 +#define FM10K_MSG_HEAD_SIZE 4 +#define FM10K_MSG_RSVD0_SHIFT 12 +#define FM10K_MSG_RSVD0_SIZE 4 + +/* offsets for data/disconnect headers */ +#define FM10K_MSG_CRC_SHIFT 16 +#define FM10K_MSG_CRC_SIZE 16 + +/* offsets for connect headers */ +#define FM10K_MSG_CONNECT_SIZE_SHIFT 16 +#define FM10K_MSG_CONNECT_SIZE_SIZE 16 + +/* offsets for error headers */ +#define FM10K_MSG_ERR_NO_SHIFT 16 +#define FM10K_MSG_ERR_NO_SIZE 16 + +enum fm10k_msg_type { + FM10K_MSG_DATA = 0x8, + FM10K_MSG_CONNECT = 0xC, + FM10K_MSG_DISCONNECT = 0xD, + FM10K_MSG_ERROR = 0xE, +}; + +/* HNI/SM Mailbox FIFO format + * 3 2 1 0 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-------+-----------------------+-------+-----------------------+ + * | Error | Remote Head |Version| Local Tail | + * +-------+-----------------------+-------+-----------------------+ + * | | + * . Local FIFO Data . + * . . + * +-------+-----------------------+-------+-----------------------+ + * + * The layout above describes the format for the FIFOs used by the host + * network interface and the switch manager to communicate messages back + * and forth. Both the HNI and the switch maintain one such FIFO. The + * layout in memory has the switch manager FIFO followed immediately by + * the HNI FIFO. For this reason I am using just the pointer to the + * HNI FIFO in the mailbox ops as the offset between the two is fixed. + * + * The header for the FIFO is broken out into the following fields: + * Local Tail: Offset into FIFO region for next DWORD to write. + * Version: Version info for mailbox, only values of 0/1 are supported. + * Remote Head: Offset into remote FIFO to indicate how much we have read. + * Error: Error indication, values TBD. + */ + +/* version number for switch manager mailboxes */ +#define FM10K_SM_MBX_VERSION 1 +#define FM10K_SM_MBX_FIFO_LEN (FM10K_MBMEM_PF_XOR - 1) + +/* offsets shared between all SM FIFO headers */ +#define FM10K_MSG_SM_TAIL_SHIFT 0 +#define FM10K_MSG_SM_TAIL_SIZE 12 +#define FM10K_MSG_SM_VER_SHIFT 12 +#define FM10K_MSG_SM_VER_SIZE 4 +#define FM10K_MSG_SM_HEAD_SHIFT 16 +#define FM10K_MSG_SM_HEAD_SIZE 12 +#define FM10K_MSG_SM_ERR_SHIFT 28 +#define FM10K_MSG_SM_ERR_SIZE 4 + +/* All error messages returned by mailbox functions + * The value -511 is 0xFE01 in hex. The idea is to order the errors + * from 0xFE01 - 0xFEFF so error codes are easily visible in the mailbox + * messages. This also helps to avoid error number collisions as Linux + * doesn't appear to use error numbers 256 - 511. + */ +#define FM10K_MBX_ERR(_n) ((_n) - 512) +#define FM10K_MBX_ERR_NO_MBX FM10K_MBX_ERR(0x01) +#define FM10K_MBX_ERR_NO_SPACE FM10K_MBX_ERR(0x03) +#define FM10K_MBX_ERR_TAIL FM10K_MBX_ERR(0x05) +#define FM10K_MBX_ERR_HEAD FM10K_MBX_ERR(0x06) +#define FM10K_MBX_ERR_SRC FM10K_MBX_ERR(0x08) +#define FM10K_MBX_ERR_TYPE FM10K_MBX_ERR(0x09) +#define FM10K_MBX_ERR_SIZE FM10K_MBX_ERR(0x0B) +#define FM10K_MBX_ERR_BUSY FM10K_MBX_ERR(0x0C) +#define FM10K_MBX_ERR_RSVD0 FM10K_MBX_ERR(0x0E) +#define FM10K_MBX_ERR_CRC FM10K_MBX_ERR(0x0F) + +#define FM10K_MBX_CRC_SEED 0xFFFF + +struct fm10k_mbx_ops { + s32 (*connect)(struct fm10k_hw *, struct fm10k_mbx_info *); + void (*disconnect)(struct fm10k_hw *, struct fm10k_mbx_info *); + bool (*rx_ready)(struct fm10k_mbx_info *); + bool (*tx_ready)(struct fm10k_mbx_info *, u16); + bool (*tx_complete)(struct fm10k_mbx_info *); + s32 (*enqueue_tx)(struct fm10k_hw *, struct fm10k_mbx_info *, + const u32 *); + s32 (*process)(struct fm10k_hw *, struct fm10k_mbx_info *); + s32 (*register_handlers)(struct fm10k_mbx_info *, + const struct fm10k_msg_data *); +}; + +struct fm10k_mbx_fifo { + u32 *buffer; + u16 head; + u16 tail; + u16 size; +}; + +/* size of buffer to be stored in mailbox for FIFOs */ +#define FM10K_MBX_TX_BUFFER_SIZE 512 +#define FM10K_MBX_RX_BUFFER_SIZE 128 +#define FM10K_MBX_BUFFER_SIZE \ + (FM10K_MBX_TX_BUFFER_SIZE + FM10K_MBX_RX_BUFFER_SIZE) + +/* minimum and maximum message size in dwords */ +#define FM10K_MBX_MSG_MAX_SIZE \ + ((FM10K_MBX_TX_BUFFER_SIZE - 1) & (FM10K_MBX_RX_BUFFER_SIZE - 1)) +#define FM10K_VFMBX_MSG_MTU ((FM10K_VFMBMEM_LEN / 2) - 1) + +#define FM10K_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define FM10K_MBX_INIT_DELAY 500 /* microseconds between retries */ + +struct fm10k_mbx_info { + /* function pointers for mailbox operations */ + struct fm10k_mbx_ops ops; + const struct fm10k_msg_data *msg_data; + + /* message FIFOs */ + struct fm10k_mbx_fifo rx; + struct fm10k_mbx_fifo tx; + + /* delay for handling timeouts */ + u32 timeout; + u32 udelay; + + /* mailbox state info */ + u32 mbx_reg, mbmem_reg, mbx_lock, mbx_hdr; + u16 max_size, mbmem_len; + u16 tail, tail_len, pulled; + u16 head, head_len, pushed; + u16 local, remote; + enum fm10k_mbx_state state; + + /* result of last mailbox test */ + s32 test_result; + + /* statistics */ + u64 tx_busy; + u64 tx_dropped; + u64 tx_messages; + u64 tx_dwords; + u64 rx_messages; + u64 rx_dwords; + u64 rx_parse_err; + + /* Buffer to store messages */ + u32 buffer[FM10K_MBX_BUFFER_SIZE]; +}; + +s32 fm10k_pfvf_mbx_init(struct fm10k_hw *, struct fm10k_mbx_info *, + const struct fm10k_msg_data *, u8); +s32 fm10k_sm_mbx_init(struct fm10k_hw *, struct fm10k_mbx_info *, + const struct fm10k_msg_data *); + +#endif /* _FM10K_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c new file mode 100644 index 000000000..2f4f41b7e --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -0,0 +1,1449 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "fm10k.h" +#include +#if IS_ENABLED(CONFIG_FM10K_VXLAN) +#include +#endif /* CONFIG_FM10K_VXLAN */ + +/** + * fm10k_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int fm10k_setup_tx_resources(struct fm10k_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int size; + + size = sizeof(struct fm10k_tx_buffer) * tx_ring->count; + + tx_ring->tx_buffer = vzalloc(size); + if (!tx_ring->tx_buffer) + goto err; + + u64_stats_init(&tx_ring->syncp); + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct fm10k_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + return 0; + +err: + vfree(tx_ring->tx_buffer); + tx_ring->tx_buffer = NULL; + return -ENOMEM; +} + +/** + * fm10k_setup_all_tx_resources - allocate all queues Tx resources + * @interface: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int fm10k_setup_all_tx_resources(struct fm10k_intfc *interface) +{ + int i, err = 0; + + for (i = 0; i < interface->num_tx_queues; i++) { + err = fm10k_setup_tx_resources(interface->tx_ring[i]); + if (!err) + continue; + + netif_err(interface, probe, interface->netdev, + "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + fm10k_free_tx_resources(interface->tx_ring[i]); + return err; +} + +/** + * fm10k_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int fm10k_setup_rx_resources(struct fm10k_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int size; + + size = sizeof(struct fm10k_rx_buffer) * rx_ring->count; + + rx_ring->rx_buffer = vzalloc(size); + if (!rx_ring->rx_buffer) + goto err; + + u64_stats_init(&rx_ring->syncp); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union fm10k_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + + return 0; +err: + vfree(rx_ring->rx_buffer); + rx_ring->rx_buffer = NULL; + return -ENOMEM; +} + +/** + * fm10k_setup_all_rx_resources - allocate all queues Rx resources + * @interface: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int fm10k_setup_all_rx_resources(struct fm10k_intfc *interface) +{ + int i, err = 0; + + for (i = 0; i < interface->num_rx_queues; i++) { + err = fm10k_setup_rx_resources(interface->rx_ring[i]); + if (!err) + continue; + + netif_err(interface, probe, interface->netdev, + "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + fm10k_free_rx_resources(interface->rx_ring[i]); + return err; +} + +void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *ring, + struct fm10k_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ +} + +/** + * fm10k_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring) +{ + struct fm10k_tx_buffer *tx_buffer; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buffer) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) { + tx_buffer = &tx_ring->tx_buffer[i]; + fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer); + } + + /* reset BQL values */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + + size = sizeof(struct fm10k_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); +} + +/** + * fm10k_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void fm10k_free_tx_resources(struct fm10k_ring *tx_ring) +{ + fm10k_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer); + tx_ring->tx_buffer = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; +} + +/** + * fm10k_clean_all_tx_rings - Free Tx Buffers for all queues + * @interface: board private structure + **/ +void fm10k_clean_all_tx_rings(struct fm10k_intfc *interface) +{ + int i; + + for (i = 0; i < interface->num_tx_queues; i++) + fm10k_clean_tx_ring(interface->tx_ring[i]); + + /* remove any stale timestamp buffers and free them */ + skb_queue_purge(&interface->ts_tx_skb_queue); +} + +/** + * fm10k_free_all_tx_resources - Free Tx Resources for All Queues + * @interface: board private structure + * + * Free all transmit software resources + **/ +static void fm10k_free_all_tx_resources(struct fm10k_intfc *interface) +{ + int i = interface->num_tx_queues; + + while (i--) + fm10k_free_tx_resources(interface->tx_ring[i]); +} + +/** + * fm10k_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void fm10k_clean_rx_ring(struct fm10k_ring *rx_ring) +{ + unsigned long size; + u16 i; + + if (!rx_ring->rx_buffer) + return; + + if (rx_ring->skb) + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct fm10k_rx_buffer *buffer = &rx_ring->rx_buffer[i]; + /* clean-up will only set page pointer to NULL */ + if (!buffer->page) + continue; + + dma_unmap_page(rx_ring->dev, buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + __free_page(buffer->page); + + buffer->page = NULL; + } + + size = sizeof(struct fm10k_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * fm10k_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void fm10k_free_rx_resources(struct fm10k_ring *rx_ring) +{ + fm10k_clean_rx_ring(rx_ring); + + vfree(rx_ring->rx_buffer); + rx_ring->rx_buffer = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * fm10k_clean_all_rx_rings - Free Rx Buffers for all queues + * @interface: board private structure + **/ +void fm10k_clean_all_rx_rings(struct fm10k_intfc *interface) +{ + int i; + + for (i = 0; i < interface->num_rx_queues; i++) + fm10k_clean_rx_ring(interface->rx_ring[i]); +} + +/** + * fm10k_free_all_rx_resources - Free Rx Resources for All Queues + * @interface: board private structure + * + * Free all receive software resources + **/ +static void fm10k_free_all_rx_resources(struct fm10k_intfc *interface) +{ + int i = interface->num_rx_queues; + + while (i--) + fm10k_free_rx_resources(interface->rx_ring[i]); +} + +/** + * fm10k_request_glort_range - Request GLORTs for use in configuring rules + * @interface: board private structure + * + * This function allocates a range of glorts for this interface to use. + **/ +static void fm10k_request_glort_range(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + u16 mask = (~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT; + + /* establish GLORT base */ + interface->glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE; + interface->glort_count = 0; + + /* nothing we can do until mask is allocated */ + if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE) + return; + + /* we support 3 possible GLORT configurations. + * 1: VFs consume all but the last 1 + * 2: VFs and PF split glorts with possible gap between + * 3: VFs allocated first 64, all others belong to PF + */ + if (mask <= hw->iov.total_vfs) { + interface->glort_count = 1; + interface->glort += mask; + } else if (mask < 64) { + interface->glort_count = (mask + 1) / 2; + interface->glort += interface->glort_count; + } else { + interface->glort_count = mask - 63; + interface->glort += 64; + } +} + +/** + * fm10k_del_vxlan_port_all + * @interface: board private structure + * + * This function frees the entire vxlan_port list + **/ +static void fm10k_del_vxlan_port_all(struct fm10k_intfc *interface) +{ + struct fm10k_vxlan_port *vxlan_port; + + /* flush all entries from list */ + vxlan_port = list_first_entry_or_null(&interface->vxlan_port, + struct fm10k_vxlan_port, list); + while (vxlan_port) { + list_del(&vxlan_port->list); + kfree(vxlan_port); + vxlan_port = list_first_entry_or_null(&interface->vxlan_port, + struct fm10k_vxlan_port, + list); + } +} + +/** + * fm10k_restore_vxlan_port + * @interface: board private structure + * + * This function restores the value in the tunnel_cfg register after reset + **/ +static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + struct fm10k_vxlan_port *vxlan_port; + + /* only the PF supports configuring tunnels */ + if (hw->mac.type != fm10k_mac_pf) + return; + + vxlan_port = list_first_entry_or_null(&interface->vxlan_port, + struct fm10k_vxlan_port, list); + + /* restore tunnel configuration register */ + fm10k_write_reg(hw, FM10K_TUNNEL_CFG, + (vxlan_port ? ntohs(vxlan_port->port) : 0) | + (ETH_P_TEB << FM10K_TUNNEL_CFG_NVGRE_SHIFT)); +} + +/** + * fm10k_add_vxlan_port + * @netdev: network interface device structure + * @sa_family: Address family of new port + * @port: port number used for VXLAN + * + * This funciton is called when a new VXLAN interface has added a new port + * number to the range that is currently in use for VXLAN. The new port + * number is always added to the tail so that the port number list should + * match the order in which the ports were allocated. The head of the list + * is always used as the VXLAN port number for offloads. + **/ +static void fm10k_add_vxlan_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) { + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_vxlan_port *vxlan_port; + + /* only the PF supports configuring tunnels */ + if (interface->hw.mac.type != fm10k_mac_pf) + return; + + /* existing ports are pulled out so our new entry is always last */ + fm10k_vxlan_port_for_each(vxlan_port, interface) { + if ((vxlan_port->port == port) && + (vxlan_port->sa_family == sa_family)) { + list_del(&vxlan_port->list); + goto insert_tail; + } + } + + /* allocate memory to track ports */ + vxlan_port = kmalloc(sizeof(*vxlan_port), GFP_ATOMIC); + if (!vxlan_port) + return; + vxlan_port->port = port; + vxlan_port->sa_family = sa_family; + +insert_tail: + /* add new port value to list */ + list_add_tail(&vxlan_port->list, &interface->vxlan_port); + + fm10k_restore_vxlan_port(interface); +} + +/** + * fm10k_del_vxlan_port + * @netdev: network interface device structure + * @sa_family: Address family of freed port + * @port: port number used for VXLAN + * + * This funciton is called when a new VXLAN interface has freed a port + * number from the range that is currently in use for VXLAN. The freed + * port is removed from the list and the new head is used to determine + * the port number for offloads. + **/ +static void fm10k_del_vxlan_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) { + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_vxlan_port *vxlan_port; + + if (interface->hw.mac.type != fm10k_mac_pf) + return; + + /* find the port in the list and free it */ + fm10k_vxlan_port_for_each(vxlan_port, interface) { + if ((vxlan_port->port == port) && + (vxlan_port->sa_family == sa_family)) { + list_del(&vxlan_port->list); + kfree(vxlan_port); + break; + } + } + + fm10k_restore_vxlan_port(interface); +} + +/** + * fm10k_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +int fm10k_open(struct net_device *netdev) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + int err; + + /* allocate transmit descriptors */ + err = fm10k_setup_all_tx_resources(interface); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = fm10k_setup_all_rx_resources(interface); + if (err) + goto err_setup_rx; + + /* allocate interrupt resources */ + err = fm10k_qv_request_irq(interface); + if (err) + goto err_req_irq; + + /* setup GLORT assignment for this port */ + fm10k_request_glort_range(interface); + + /* Notify the stack of the actual queue counts */ + err = netif_set_real_num_tx_queues(netdev, + interface->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(netdev, + interface->num_rx_queues); + if (err) + goto err_set_queues; + +#if IS_ENABLED(CONFIG_FM10K_VXLAN) + /* update VXLAN port configuration */ + vxlan_get_rx_port(netdev); + +#endif + fm10k_up(interface); + + return 0; + +err_set_queues: + fm10k_qv_free_irq(interface); +err_req_irq: + fm10k_free_all_rx_resources(interface); +err_setup_rx: + fm10k_free_all_tx_resources(interface); +err_setup_tx: + return err; +} + +/** + * fm10k_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +int fm10k_close(struct net_device *netdev) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + + fm10k_down(interface); + + fm10k_qv_free_irq(interface); + + fm10k_del_vxlan_port_all(interface); + + fm10k_free_all_tx_resources(interface); + fm10k_free_all_rx_resources(interface); + + return 0; +} + +static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + unsigned int r_idx = skb->queue_mapping; + int err; + + if ((skb->protocol == htons(ETH_P_8021Q)) && + !skb_vlan_tag_present(skb)) { + /* FM10K only supports hardware tagging, any tags in frame + * are considered 2nd level or "outer" tags + */ + struct vlan_hdr *vhdr; + __be16 proto; + + /* make sure skb is not shared */ + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + return NETDEV_TX_OK; + + /* make sure there is enough room to move the ethernet header */ + if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) + return NETDEV_TX_OK; + + /* verify the skb head is not shared */ + err = skb_cow_head(skb, 0); + if (err) + return NETDEV_TX_OK; + + /* locate vlan header */ + vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); + + /* pull the 2 key pieces of data out of it */ + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + ntohs(vhdr->h_vlan_TCI)); + proto = vhdr->h_vlan_encapsulated_proto; + skb->protocol = (ntohs(proto) >= 1536) ? proto : + htons(ETH_P_802_2); + + /* squash it by moving the ethernet addresses up 4 bytes */ + memmove(skb->data + VLAN_HLEN, skb->data, 12); + __skb_pull(skb, VLAN_HLEN); + skb_reset_mac_header(skb); + } + + /* The minimum packet size for a single buffer is 17B so pad the skb + * in order to meet this minimum size requirement. + */ + if (unlikely(skb->len < 17)) { + int pad_len = 17 - skb->len; + + if (skb_pad(skb, pad_len)) + return NETDEV_TX_OK; + __skb_put(skb, pad_len); + } + + /* prepare packet for hardware time stamping */ + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) + fm10k_ts_tx_enqueue(interface, skb); + + if (r_idx >= interface->num_tx_queues) + r_idx %= interface->num_tx_queues; + + err = fm10k_xmit_frame_ring(skb, interface->tx_ring[r_idx]); + + return err; +} + +static int fm10k_change_mtu(struct net_device *dev, int new_mtu) +{ + if (new_mtu < 68 || new_mtu > FM10K_MAX_JUMBO_FRAME_SIZE) + return -EINVAL; + + dev->mtu = new_mtu; + + return 0; +} + +/** + * fm10k_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void fm10k_tx_timeout(struct net_device *netdev) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + bool real_tx_hang = false; + int i; + +#define TX_TIMEO_LIMIT 16000 + for (i = 0; i < interface->num_tx_queues; i++) { + struct fm10k_ring *tx_ring = interface->tx_ring[i]; + + if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) + real_tx_hang = true; + } + + if (real_tx_hang) { + fm10k_tx_timeout_reset(interface); + } else { + netif_info(interface, drv, netdev, + "Fake Tx hang detected with timeout of %d seconds\n", + netdev->watchdog_timeo/HZ); + + /* fake Tx hang - increase the kernel timeout */ + if (netdev->watchdog_timeo < TX_TIMEO_LIMIT) + netdev->watchdog_timeo *= 2; + } +} + +static int fm10k_uc_vlan_unsync(struct net_device *netdev, + const unsigned char *uc_addr) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_hw *hw = &interface->hw; + u16 glort = interface->glort; + u16 vid = interface->vid; + bool set = !!(vid / VLAN_N_VID); + int err; + + /* drop any leading bits on the VLAN ID */ + vid &= VLAN_N_VID - 1; + + err = hw->mac.ops.update_uc_addr(hw, glort, uc_addr, vid, set, 0); + if (err) + return err; + + /* return non-zero value as we are only doing a partial sync/unsync */ + return 1; +} + +static int fm10k_mc_vlan_unsync(struct net_device *netdev, + const unsigned char *mc_addr) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_hw *hw = &interface->hw; + u16 glort = interface->glort; + u16 vid = interface->vid; + bool set = !!(vid / VLAN_N_VID); + int err; + + /* drop any leading bits on the VLAN ID */ + vid &= VLAN_N_VID - 1; + + err = hw->mac.ops.update_mc_addr(hw, glort, mc_addr, vid, set); + if (err) + return err; + + /* return non-zero value as we are only doing a partial sync/unsync */ + return 1; +} + +static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_hw *hw = &interface->hw; + s32 err; + + /* updates do not apply to VLAN 0 */ + if (!vid) + return 0; + + if (vid >= VLAN_N_VID) + return -EINVAL; + + /* Verify we have permission to add VLANs */ + if (hw->mac.vlan_override) + return -EACCES; + + /* update active_vlans bitmask */ + set_bit(vid, interface->active_vlans); + if (!set) + clear_bit(vid, interface->active_vlans); + + /* if default VLAN is already present do nothing */ + if (vid == hw->mac.default_vid) + return 0; + + fm10k_mbx_lock(interface); + + /* only need to update the VLAN if not in promiscuous mode */ + if (!(netdev->flags & IFF_PROMISC)) { + err = hw->mac.ops.update_vlan(hw, vid, 0, set); + if (err) + goto err_out; + } + + /* update our base MAC address */ + err = hw->mac.ops.update_uc_addr(hw, interface->glort, hw->mac.addr, + vid, set, 0); + if (err) + goto err_out; + + /* set vid prior to syncing/unsyncing the VLAN */ + interface->vid = vid + (set ? VLAN_N_VID : 0); + + /* Update the unicast and multicast address list to add/drop VLAN */ + __dev_uc_unsync(netdev, fm10k_uc_vlan_unsync); + __dev_mc_unsync(netdev, fm10k_mc_vlan_unsync); + +err_out: + fm10k_mbx_unlock(interface); + + return err; +} + +static int fm10k_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + /* update VLAN and address table based on changes */ + return fm10k_update_vid(netdev, vid, true); +} + +static int fm10k_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + /* update VLAN and address table based on changes */ + return fm10k_update_vid(netdev, vid, false); +} + +static u16 fm10k_find_next_vlan(struct fm10k_intfc *interface, u16 vid) +{ + struct fm10k_hw *hw = &interface->hw; + u16 default_vid = hw->mac.default_vid; + u16 vid_limit = vid < default_vid ? default_vid : VLAN_N_VID; + + vid = find_next_bit(interface->active_vlans, vid_limit, ++vid); + + return vid; +} + +static void fm10k_clear_unused_vlans(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + u32 vid, prev_vid; + + /* loop through and find any gaps in the table */ + for (vid = 0, prev_vid = 0; + prev_vid < VLAN_N_VID; + prev_vid = vid + 1, vid = fm10k_find_next_vlan(interface, vid)) { + if (prev_vid == vid) + continue; + + /* send request to clear multiple bits at a time */ + prev_vid += (vid - prev_vid - 1) << FM10K_VLAN_LENGTH_SHIFT; + hw->mac.ops.update_vlan(hw, prev_vid, 0, false); + } +} + +static int __fm10k_uc_sync(struct net_device *dev, + const unsigned char *addr, bool sync) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_hw *hw = &interface->hw; + u16 vid, glort = interface->glort; + s32 err; + + if (!is_valid_ether_addr(addr)) + return -EADDRNOTAVAIL; + + /* update table with current entries */ + for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0; + vid < VLAN_N_VID; + vid = fm10k_find_next_vlan(interface, vid)) { + err = hw->mac.ops.update_uc_addr(hw, glort, addr, + vid, sync, 0); + if (err) + return err; + } + + return 0; +} + +static int fm10k_uc_sync(struct net_device *dev, + const unsigned char *addr) +{ + return __fm10k_uc_sync(dev, addr, true); +} + +static int fm10k_uc_unsync(struct net_device *dev, + const unsigned char *addr) +{ + return __fm10k_uc_sync(dev, addr, false); +} + +static int fm10k_set_mac(struct net_device *dev, void *p) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_hw *hw = &interface->hw; + struct sockaddr *addr = p; + s32 err = 0; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (dev->flags & IFF_UP) { + /* setting MAC address requires mailbox */ + fm10k_mbx_lock(interface); + + err = fm10k_uc_sync(dev, addr->sa_data); + if (!err) + fm10k_uc_unsync(dev, hw->mac.addr); + + fm10k_mbx_unlock(interface); + } + + if (!err) { + ether_addr_copy(dev->dev_addr, addr->sa_data); + ether_addr_copy(hw->mac.addr, addr->sa_data); + dev->addr_assign_type &= ~NET_ADDR_RANDOM; + } + + /* if we had a mailbox error suggest trying again */ + return err ? -EAGAIN : 0; +} + +static int __fm10k_mc_sync(struct net_device *dev, + const unsigned char *addr, bool sync) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_hw *hw = &interface->hw; + u16 vid, glort = interface->glort; + s32 err; + + if (!is_multicast_ether_addr(addr)) + return -EADDRNOTAVAIL; + + /* update table with current entries */ + for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0; + vid < VLAN_N_VID; + vid = fm10k_find_next_vlan(interface, vid)) { + err = hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync); + if (err) + return err; + } + + return 0; +} + +static int fm10k_mc_sync(struct net_device *dev, + const unsigned char *addr) +{ + return __fm10k_mc_sync(dev, addr, true); +} + +static int fm10k_mc_unsync(struct net_device *dev, + const unsigned char *addr) +{ + return __fm10k_mc_sync(dev, addr, false); +} + +static void fm10k_set_rx_mode(struct net_device *dev) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_hw *hw = &interface->hw; + int xcast_mode; + + /* no need to update the harwdare if we are not running */ + if (!(dev->flags & IFF_UP)) + return; + + /* determine new mode based on flags */ + xcast_mode = (dev->flags & IFF_PROMISC) ? FM10K_XCAST_MODE_PROMISC : + (dev->flags & IFF_ALLMULTI) ? FM10K_XCAST_MODE_ALLMULTI : + (dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) ? + FM10K_XCAST_MODE_MULTI : FM10K_XCAST_MODE_NONE; + + fm10k_mbx_lock(interface); + + /* update xcast mode first, but only if it changed */ + if (interface->xcast_mode != xcast_mode) { + /* update VLAN table */ + if (xcast_mode == FM10K_XCAST_MODE_PROMISC) + hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, 0, true); + if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC) + fm10k_clear_unused_vlans(interface); + + /* update xcast mode */ + hw->mac.ops.update_xcast_mode(hw, interface->glort, xcast_mode); + + /* record updated xcast mode state */ + interface->xcast_mode = xcast_mode; + } + + /* synchronize all of the addresses */ + if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { + __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync); + if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) + __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync); + } + + fm10k_mbx_unlock(interface); +} + +void fm10k_restore_rx_state(struct fm10k_intfc *interface) +{ + struct net_device *netdev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + int xcast_mode; + u16 vid, glort; + + /* restore our address if perm_addr is set */ + if (hw->mac.type == fm10k_mac_vf) { + if (is_valid_ether_addr(hw->mac.perm_addr)) { + ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); + ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr); + ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr); + netdev->addr_assign_type &= ~NET_ADDR_RANDOM; + } + + if (hw->mac.vlan_override) + netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + else + netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; + } + + /* record glort for this interface */ + glort = interface->glort; + + /* convert interface flags to xcast mode */ + if (netdev->flags & IFF_PROMISC) + xcast_mode = FM10K_XCAST_MODE_PROMISC; + else if (netdev->flags & IFF_ALLMULTI) + xcast_mode = FM10K_XCAST_MODE_ALLMULTI; + else if (netdev->flags & (IFF_BROADCAST | IFF_MULTICAST)) + xcast_mode = FM10K_XCAST_MODE_MULTI; + else + xcast_mode = FM10K_XCAST_MODE_NONE; + + fm10k_mbx_lock(interface); + + /* Enable logical port */ + hw->mac.ops.update_lport_state(hw, glort, interface->glort_count, true); + + /* update VLAN table */ + hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, 0, + xcast_mode == FM10K_XCAST_MODE_PROMISC); + + /* Add filter for VLAN 0 */ + hw->mac.ops.update_vlan(hw, 0, 0, true); + + /* update table with current entries */ + for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0; + vid < VLAN_N_VID; + vid = fm10k_find_next_vlan(interface, vid)) { + hw->mac.ops.update_vlan(hw, vid, 0, true); + hw->mac.ops.update_uc_addr(hw, glort, hw->mac.addr, + vid, true, 0); + } + + /* update xcast mode before syncronizing addresses */ + hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode); + + /* synchronize all of the addresses */ + if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { + __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync); + if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) + __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync); + } + + fm10k_mbx_unlock(interface); + + /* record updated xcast mode state */ + interface->xcast_mode = xcast_mode; + + /* Restore tunnel configuration */ + fm10k_restore_vxlan_port(interface); +} + +void fm10k_reset_rx_state(struct fm10k_intfc *interface) +{ + struct net_device *netdev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + + fm10k_mbx_lock(interface); + + /* clear the logical port state on lower device */ + hw->mac.ops.update_lport_state(hw, interface->glort, + interface->glort_count, false); + + fm10k_mbx_unlock(interface); + + /* reset flags to default state */ + interface->xcast_mode = FM10K_XCAST_MODE_NONE; + + /* clear the sync flag since the lport has been dropped */ + __dev_uc_unsync(netdev, NULL); + __dev_mc_unsync(netdev, NULL); +} + +/** + * fm10k_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: storage space for 64bit statistics + * + * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This + * function replaces fm10k_get_stats for kernels which support it. + */ +static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_ring *ring; + unsigned int start, i; + u64 bytes, packets; + + rcu_read_lock(); + + for (i = 0; i < interface->num_rx_queues; i++) { + ring = ACCESS_ONCE(interface->rx_ring[i]); + + if (!ring) + continue; + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + + for (i = 0; i < interface->num_tx_queues; i++) { + ring = ACCESS_ONCE(interface->tx_ring[i]); + + if (!ring) + continue; + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + + rcu_read_unlock(); + + /* following stats updated by fm10k_service_task() */ + stats->rx_missed_errors = netdev->stats.rx_missed_errors; + + return stats; +} + +int fm10k_setup_tc(struct net_device *dev, u8 tc) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + + /* Currently only the PF supports priority classes */ + if (tc && (interface->hw.mac.type != fm10k_mac_pf)) + return -EINVAL; + + /* Hardware supports up to 8 traffic classes */ + if (tc > 8) + return -EINVAL; + + /* Hardware has to reinitialize queues to match packet + * buffer alignment. Unfortunately, the hardware is not + * flexible enough to do this dynamically. + */ + if (netif_running(dev)) + fm10k_close(dev); + + fm10k_mbx_free_irq(interface); + + fm10k_clear_queueing_scheme(interface); + + /* we expect the prio_tc map to be repopulated later */ + netdev_reset_tc(dev); + netdev_set_num_tc(dev, tc); + + fm10k_init_queueing_scheme(interface); + + fm10k_mbx_request_irq(interface); + + if (netif_running(dev)) + fm10k_open(dev); + + /* flag to indicate SWPRI has yet to be updated */ + interface->flags |= FM10K_FLAG_SWPRI_CONFIG; + + return 0; +} + +static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGHWTSTAMP: + return fm10k_get_ts_config(netdev, ifr); + case SIOCSHWTSTAMP: + return fm10k_set_ts_config(netdev, ifr); + default: + return -EOPNOTSUPP; + } +} + +static void fm10k_assign_l2_accel(struct fm10k_intfc *interface, + struct fm10k_l2_accel *l2_accel) +{ + struct fm10k_ring *ring; + int i; + + for (i = 0; i < interface->num_rx_queues; i++) { + ring = interface->rx_ring[i]; + rcu_assign_pointer(ring->l2_accel, l2_accel); + } + + interface->l2_accel = l2_accel; +} + +static void *fm10k_dfwd_add_station(struct net_device *dev, + struct net_device *sdev) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_l2_accel *l2_accel = interface->l2_accel; + struct fm10k_l2_accel *old_l2_accel = NULL; + struct fm10k_dglort_cfg dglort = { 0 }; + struct fm10k_hw *hw = &interface->hw; + int size = 0, i; + u16 glort; + + /* allocate l2 accel structure if it is not available */ + if (!l2_accel) { + /* verify there is enough free GLORTs to support l2_accel */ + if (interface->glort_count < 7) + return ERR_PTR(-EBUSY); + + size = offsetof(struct fm10k_l2_accel, macvlan[7]); + l2_accel = kzalloc(size, GFP_KERNEL); + if (!l2_accel) + return ERR_PTR(-ENOMEM); + + l2_accel->size = 7; + l2_accel->dglort = interface->glort; + + /* update pointers */ + fm10k_assign_l2_accel(interface, l2_accel); + /* do not expand if we are at our limit */ + } else if ((l2_accel->count == FM10K_MAX_STATIONS) || + (l2_accel->count == (interface->glort_count - 1))) { + return ERR_PTR(-EBUSY); + /* expand if we have hit the size limit */ + } else if (l2_accel->count == l2_accel->size) { + old_l2_accel = l2_accel; + size = offsetof(struct fm10k_l2_accel, + macvlan[(l2_accel->size * 2) + 1]); + l2_accel = kzalloc(size, GFP_KERNEL); + if (!l2_accel) + return ERR_PTR(-ENOMEM); + + memcpy(l2_accel, old_l2_accel, + offsetof(struct fm10k_l2_accel, + macvlan[old_l2_accel->size])); + + l2_accel->size = (old_l2_accel->size * 2) + 1; + + /* update pointers */ + fm10k_assign_l2_accel(interface, l2_accel); + kfree_rcu(old_l2_accel, rcu); + } + + /* add macvlan to accel table, and record GLORT for position */ + for (i = 0; i < l2_accel->size; i++) { + if (!l2_accel->macvlan[i]) + break; + } + + /* record station */ + l2_accel->macvlan[i] = sdev; + l2_accel->count++; + + /* configure default DGLORT mapping for RSS/DCB */ + dglort.idx = fm10k_dglort_pf_rss; + dglort.inner_rss = 1; + dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask); + dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask); + dglort.glort = interface->glort; + dglort.shared_l = fls(l2_accel->size); + hw->mac.ops.configure_dglort_map(hw, &dglort); + + /* Add rules for this specific dglort to the switch */ + fm10k_mbx_lock(interface); + + glort = l2_accel->dglort + 1 + i; + hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_MULTI); + hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr, 0, true, 0); + + fm10k_mbx_unlock(interface); + + return sdev; +} + +static void fm10k_dfwd_del_station(struct net_device *dev, void *priv) +{ + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_l2_accel *l2_accel = ACCESS_ONCE(interface->l2_accel); + struct fm10k_dglort_cfg dglort = { 0 }; + struct fm10k_hw *hw = &interface->hw; + struct net_device *sdev = priv; + int i; + u16 glort; + + if (!l2_accel) + return; + + /* search table for matching interface */ + for (i = 0; i < l2_accel->size; i++) { + if (l2_accel->macvlan[i] == sdev) + break; + } + + /* exit if macvlan not found */ + if (i == l2_accel->size) + return; + + /* Remove any rules specific to this dglort */ + fm10k_mbx_lock(interface); + + glort = l2_accel->dglort + 1 + i; + hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_NONE); + hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr, 0, false, 0); + + fm10k_mbx_unlock(interface); + + /* record removal */ + l2_accel->macvlan[i] = NULL; + l2_accel->count--; + + /* configure default DGLORT mapping for RSS/DCB */ + dglort.idx = fm10k_dglort_pf_rss; + dglort.inner_rss = 1; + dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask); + dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask); + dglort.glort = interface->glort; + if (l2_accel) + dglort.shared_l = fls(l2_accel->size); + hw->mac.ops.configure_dglort_map(hw, &dglort); + + /* If table is empty remove it */ + if (l2_accel->count == 0) { + fm10k_assign_l2_accel(interface, NULL); + kfree_rcu(l2_accel, rcu); + } +} + +static netdev_features_t fm10k_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + if (!skb->encapsulation || fm10k_tx_encap_offload(skb)) + return features; + + return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); +} + +static const struct net_device_ops fm10k_netdev_ops = { + .ndo_open = fm10k_open, + .ndo_stop = fm10k_close, + .ndo_validate_addr = eth_validate_addr, + .ndo_start_xmit = fm10k_xmit_frame, + .ndo_set_mac_address = fm10k_set_mac, + .ndo_change_mtu = fm10k_change_mtu, + .ndo_tx_timeout = fm10k_tx_timeout, + .ndo_vlan_rx_add_vid = fm10k_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = fm10k_vlan_rx_kill_vid, + .ndo_set_rx_mode = fm10k_set_rx_mode, + .ndo_get_stats64 = fm10k_get_stats64, + .ndo_setup_tc = fm10k_setup_tc, + .ndo_set_vf_mac = fm10k_ndo_set_vf_mac, + .ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan, + .ndo_set_vf_rate = fm10k_ndo_set_vf_bw, + .ndo_get_vf_config = fm10k_ndo_get_vf_config, + .ndo_add_vxlan_port = fm10k_add_vxlan_port, + .ndo_del_vxlan_port = fm10k_del_vxlan_port, + .ndo_do_ioctl = fm10k_ioctl, + .ndo_dfwd_add_station = fm10k_dfwd_add_station, + .ndo_dfwd_del_station = fm10k_dfwd_del_station, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = fm10k_netpoll, +#endif + .ndo_features_check = fm10k_features_check, +}; + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 + +struct net_device *fm10k_alloc_netdev(void) +{ + struct fm10k_intfc *interface; + struct net_device *dev; + + dev = alloc_etherdev_mq(sizeof(struct fm10k_intfc), MAX_QUEUES); + if (!dev) + return NULL; + + /* set net device and ethtool ops */ + dev->netdev_ops = &fm10k_netdev_ops; + fm10k_set_ethtool_ops(dev); + + /* configure default debug level */ + interface = netdev_priv(dev); + interface->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + + /* configure default features */ + dev->features |= NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_TSO_ECN | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_RXHASH | + NETIF_F_RXCSUM; + + /* all features defined to this point should be changeable */ + dev->hw_features |= dev->features; + + /* allow user to enable L2 forwarding acceleration */ + dev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; + + /* configure VLAN features */ + dev->vlan_features |= dev->features; + + /* configure tunnel offloads */ + dev->hw_enc_features |= NETIF_F_IP_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_TSO_ECN | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_IPV6_CSUM; + + /* we want to leave these both on as we cannot disable VLAN tag + * insertion or stripping on the hardware since it is contained + * in the FTAG and not in the frame itself. + */ + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + + dev->priv_flags |= IFF_UNICAST_FLT; + + return dev; +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c new file mode 100644 index 000000000..df9fda38b --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -0,0 +1,2190 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include +#include + +#include "fm10k.h" + +static const struct fm10k_info *fm10k_info_tbl[] = { + [fm10k_device_pf] = &fm10k_pf_info, + [fm10k_device_vf] = &fm10k_vf_info, +}; + +/** + * fm10k_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id fm10k_pci_tbl[] = { + { PCI_VDEVICE(INTEL, FM10K_DEV_ID_PF), fm10k_device_pf }, + { PCI_VDEVICE(INTEL, FM10K_DEV_ID_VF), fm10k_device_vf }, + /* required last entry */ + { 0, } +}; +MODULE_DEVICE_TABLE(pci, fm10k_pci_tbl); + +u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg) +{ + struct fm10k_intfc *interface = hw->back; + u16 value = 0; + + if (FM10K_REMOVED(hw->hw_addr)) + return ~value; + + pci_read_config_word(interface->pdev, reg, &value); + if (value == 0xFFFF) + fm10k_write_flush(hw); + + return value; +} + +u32 fm10k_read_reg(struct fm10k_hw *hw, int reg) +{ + u32 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr); + u32 value = 0; + + if (FM10K_REMOVED(hw_addr)) + return ~value; + + value = readl(&hw_addr[reg]); + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct fm10k_intfc *interface = hw->back; + struct net_device *netdev = interface->netdev; + + hw->hw_addr = NULL; + netif_device_detach(netdev); + netdev_err(netdev, "PCIe link lost, device now detached\n"); + } + + return value; +} + +static int fm10k_hw_ready(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + + fm10k_write_flush(hw); + + return FM10K_REMOVED(hw->hw_addr) ? -ENODEV : 0; +} + +void fm10k_service_event_schedule(struct fm10k_intfc *interface) +{ + if (!test_bit(__FM10K_SERVICE_DISABLE, &interface->state) && + !test_and_set_bit(__FM10K_SERVICE_SCHED, &interface->state)) + queue_work(fm10k_workqueue, &interface->service_task); +} + +static void fm10k_service_event_complete(struct fm10k_intfc *interface) +{ + BUG_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state)); + + /* flush memory to make sure state is correct before next watchog */ + smp_mb__before_atomic(); + clear_bit(__FM10K_SERVICE_SCHED, &interface->state); +} + +/** + * fm10k_service_timer - Timer Call-back + * @data: pointer to interface cast into an unsigned long + **/ +static void fm10k_service_timer(unsigned long data) +{ + struct fm10k_intfc *interface = (struct fm10k_intfc *)data; + + /* Reset the timer */ + mod_timer(&interface->service_timer, (HZ * 2) + jiffies); + + fm10k_service_event_schedule(interface); +} + +static void fm10k_detach_subtask(struct fm10k_intfc *interface) +{ + struct net_device *netdev = interface->netdev; + + /* do nothing if device is still present or hw_addr is set */ + if (netif_device_present(netdev) || interface->hw.hw_addr) + return; + + rtnl_lock(); + + if (netif_running(netdev)) + dev_close(netdev); + + rtnl_unlock(); +} + +static void fm10k_reinit(struct fm10k_intfc *interface) +{ + struct net_device *netdev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + int err; + + WARN_ON(in_interrupt()); + + /* put off any impending NetWatchDogTimeout */ + netdev->trans_start = jiffies; + + while (test_and_set_bit(__FM10K_RESETTING, &interface->state)) + usleep_range(1000, 2000); + + rtnl_lock(); + + fm10k_iov_suspend(interface->pdev); + + if (netif_running(netdev)) + fm10k_close(netdev); + + fm10k_mbx_free_irq(interface); + + /* delay any future reset requests */ + interface->last_reset = jiffies + (10 * HZ); + + /* reset and initialize the hardware so it is in a known state */ + err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw); + if (err) + dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err); + + /* reassociate interrupts */ + fm10k_mbx_request_irq(interface); + + /* reset clock */ + fm10k_ts_reset(interface); + + if (netif_running(netdev)) + fm10k_open(netdev); + + fm10k_iov_resume(interface->pdev); + + rtnl_unlock(); + + clear_bit(__FM10K_RESETTING, &interface->state); +} + +static void fm10k_reset_subtask(struct fm10k_intfc *interface) +{ + if (!(interface->flags & FM10K_FLAG_RESET_REQUESTED)) + return; + + interface->flags &= ~FM10K_FLAG_RESET_REQUESTED; + + netdev_err(interface->netdev, "Reset interface\n"); + + fm10k_reinit(interface); +} + +/** + * fm10k_configure_swpri_map - Configure Receive SWPRI to PC mapping + * @interface: board private structure + * + * Configure the SWPRI to PC mapping for the port. + **/ +static void fm10k_configure_swpri_map(struct fm10k_intfc *interface) +{ + struct net_device *netdev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + int i; + + /* clear flag indicating update is needed */ + interface->flags &= ~FM10K_FLAG_SWPRI_CONFIG; + + /* these registers are only available on the PF */ + if (hw->mac.type != fm10k_mac_pf) + return; + + /* configure SWPRI to PC map */ + for (i = 0; i < FM10K_SWPRI_MAX; i++) + fm10k_write_reg(hw, FM10K_SWPRI_MAP(i), + netdev_get_prio_tc_map(netdev, i)); +} + +/** + * fm10k_watchdog_update_host_state - Update the link status based on host. + * @interface: board private structure + **/ +static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + s32 err; + + if (test_bit(__FM10K_LINK_DOWN, &interface->state)) { + interface->host_ready = false; + if (time_is_after_jiffies(interface->link_down_event)) + return; + clear_bit(__FM10K_LINK_DOWN, &interface->state); + } + + if (interface->flags & FM10K_FLAG_SWPRI_CONFIG) { + if (rtnl_trylock()) { + fm10k_configure_swpri_map(interface); + rtnl_unlock(); + } + } + + /* lock the mailbox for transmit and receive */ + fm10k_mbx_lock(interface); + + err = hw->mac.ops.get_host_state(hw, &interface->host_ready); + if (err && time_is_before_jiffies(interface->last_reset)) + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + + /* free the lock */ + fm10k_mbx_unlock(interface); +} + +/** + * fm10k_mbx_subtask - Process upstream and downstream mailboxes + * @interface: board private structure + * + * This function will process both the upstream and downstream mailboxes. + * It is necessary for us to hold the rtnl_lock while doing this as the + * mailbox accesses are protected by this lock. + **/ +static void fm10k_mbx_subtask(struct fm10k_intfc *interface) +{ + /* process upstream mailbox and update device state */ + fm10k_watchdog_update_host_state(interface); + + /* process downstream mailboxes */ + fm10k_iov_mbx(interface); +} + +/** + * fm10k_watchdog_host_is_ready - Update netdev status based on host ready + * @interface: board private structure + **/ +static void fm10k_watchdog_host_is_ready(struct fm10k_intfc *interface) +{ + struct net_device *netdev = interface->netdev; + + /* only continue if link state is currently down */ + if (netif_carrier_ok(netdev)) + return; + + netif_info(interface, drv, netdev, "NIC Link is up\n"); + + netif_carrier_on(netdev); + netif_tx_wake_all_queues(netdev); +} + +/** + * fm10k_watchdog_host_not_ready - Update netdev status based on host not ready + * @interface: board private structure + **/ +static void fm10k_watchdog_host_not_ready(struct fm10k_intfc *interface) +{ + struct net_device *netdev = interface->netdev; + + /* only continue if link state is currently up */ + if (!netif_carrier_ok(netdev)) + return; + + netif_info(interface, drv, netdev, "NIC Link is down\n"); + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); +} + +/** + * fm10k_update_stats - Update the board statistics counters. + * @interface: board private structure + **/ +void fm10k_update_stats(struct fm10k_intfc *interface) +{ + struct net_device_stats *net_stats = &interface->netdev->stats; + struct fm10k_hw *hw = &interface->hw; + u64 rx_errors = 0, rx_csum_errors = 0, tx_csum_errors = 0; + u64 restart_queue = 0, tx_busy = 0, alloc_failed = 0; + u64 rx_bytes_nic = 0, rx_pkts_nic = 0, rx_drops_nic = 0; + u64 tx_bytes_nic = 0, tx_pkts_nic = 0; + u64 bytes, pkts; + int i; + + /* do not allow stats update via service task for next second */ + interface->next_stats_update = jiffies + HZ; + + /* gather some stats to the interface struct that are per queue */ + for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) { + struct fm10k_ring *tx_ring = interface->tx_ring[i]; + + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + tx_csum_errors += tx_ring->tx_stats.csum_err; + bytes += tx_ring->stats.bytes; + pkts += tx_ring->stats.packets; + } + + interface->restart_queue = restart_queue; + interface->tx_busy = tx_busy; + net_stats->tx_bytes = bytes; + net_stats->tx_packets = pkts; + interface->tx_csum_errors = tx_csum_errors; + /* gather some stats to the interface struct that are per queue */ + for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) { + struct fm10k_ring *rx_ring = interface->rx_ring[i]; + + bytes += rx_ring->stats.bytes; + pkts += rx_ring->stats.packets; + alloc_failed += rx_ring->rx_stats.alloc_failed; + rx_csum_errors += rx_ring->rx_stats.csum_err; + rx_errors += rx_ring->rx_stats.errors; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = pkts; + interface->alloc_failed = alloc_failed; + interface->rx_csum_errors = rx_csum_errors; + + hw->mac.ops.update_hw_stats(hw, &interface->stats); + + for (i = 0; i < hw->mac.max_queues; i++) { + struct fm10k_hw_stats_q *q = &interface->stats.q[i]; + + tx_bytes_nic += q->tx_bytes.count; + tx_pkts_nic += q->tx_packets.count; + rx_bytes_nic += q->rx_bytes.count; + rx_pkts_nic += q->rx_packets.count; + rx_drops_nic += q->rx_drops.count; + } + + interface->tx_bytes_nic = tx_bytes_nic; + interface->tx_packets_nic = tx_pkts_nic; + interface->rx_bytes_nic = rx_bytes_nic; + interface->rx_packets_nic = rx_pkts_nic; + interface->rx_drops_nic = rx_drops_nic; + + /* Fill out the OS statistics structure */ + net_stats->rx_errors = rx_errors; + net_stats->rx_dropped = interface->stats.nodesc_drop.count; +} + +/** + * fm10k_watchdog_flush_tx - flush queues on host not ready + * @interface - pointer to the device interface structure + **/ +static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface) +{ + int some_tx_pending = 0; + int i; + + /* nothing to do if carrier is up */ + if (netif_carrier_ok(interface->netdev)) + return; + + for (i = 0; i < interface->num_tx_queues; i++) { + struct fm10k_ring *tx_ring = interface->tx_ring[i]; + + if (tx_ring->next_to_use != tx_ring->next_to_clean) { + some_tx_pending = 1; + break; + } + } + + /* We've lost link, so the controller stops DMA, but we've got + * queued Tx work that's never going to get done, so reset + * controller to flush Tx. + */ + if (some_tx_pending) + interface->flags |= FM10K_FLAG_RESET_REQUESTED; +} + +/** + * fm10k_watchdog_subtask - check and bring link up + * @interface - pointer to the device interface structure + **/ +static void fm10k_watchdog_subtask(struct fm10k_intfc *interface) +{ + /* if interface is down do nothing */ + if (test_bit(__FM10K_DOWN, &interface->state) || + test_bit(__FM10K_RESETTING, &interface->state)) + return; + + if (interface->host_ready) + fm10k_watchdog_host_is_ready(interface); + else + fm10k_watchdog_host_not_ready(interface); + + /* update stats only once every second */ + if (time_is_before_jiffies(interface->next_stats_update)) + fm10k_update_stats(interface); + + /* flush any uncompleted work */ + fm10k_watchdog_flush_tx(interface); +} + +/** + * fm10k_check_hang_subtask - check for hung queues and dropped interrupts + * @interface - pointer to the device interface structure + * + * This function serves two purposes. First it strobes the interrupt lines + * in order to make certain interrupts are occurring. Secondly it sets the + * bits needed to check for TX hangs. As a result we should immediately + * determine if a hang has occurred. + */ +static void fm10k_check_hang_subtask(struct fm10k_intfc *interface) +{ + int i; + + /* If we're down or resetting, just bail */ + if (test_bit(__FM10K_DOWN, &interface->state) || + test_bit(__FM10K_RESETTING, &interface->state)) + return; + + /* rate limit tx hang checks to only once every 2 seconds */ + if (time_is_after_eq_jiffies(interface->next_tx_hang_check)) + return; + interface->next_tx_hang_check = jiffies + (2 * HZ); + + if (netif_carrier_ok(interface->netdev)) { + /* Force detection of hung controller */ + for (i = 0; i < interface->num_tx_queues; i++) + set_check_for_tx_hang(interface->tx_ring[i]); + + /* Rearm all in-use q_vectors for immediate firing */ + for (i = 0; i < interface->num_q_vectors; i++) { + struct fm10k_q_vector *qv = interface->q_vector[i]; + + if (!qv->tx.count && !qv->rx.count) + continue; + writel(FM10K_ITR_ENABLE | FM10K_ITR_PENDING2, qv->itr); + } + } +} + +/** + * fm10k_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void fm10k_service_task(struct work_struct *work) +{ + struct fm10k_intfc *interface; + + interface = container_of(work, struct fm10k_intfc, service_task); + + /* tasks always capable of running, but must be rtnl protected */ + fm10k_mbx_subtask(interface); + fm10k_detach_subtask(interface); + fm10k_reset_subtask(interface); + + /* tasks only run when interface is up */ + fm10k_watchdog_subtask(interface); + fm10k_check_hang_subtask(interface); + fm10k_ts_tx_subtask(interface); + + /* release lock on service events to allow scheduling next event */ + fm10k_service_event_complete(interface); +} + +/** + * fm10k_configure_tx_ring - Configure Tx ring after Reset + * @interface: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +static void fm10k_configure_tx_ring(struct fm10k_intfc *interface, + struct fm10k_ring *ring) +{ + struct fm10k_hw *hw = &interface->hw; + u64 tdba = ring->dma; + u32 size = ring->count * sizeof(struct fm10k_tx_desc); + u32 txint = FM10K_INT_MAP_DISABLE; + u32 txdctl = FM10K_TXDCTL_ENABLE | (1 << FM10K_TXDCTL_MAX_TIME_SHIFT); + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), 0); + fm10k_write_flush(hw); + + /* possible poll here to verify ring resources have been cleaned */ + + /* set location and size for descriptor ring */ + fm10k_write_reg(hw, FM10K_TDBAL(reg_idx), tdba & DMA_BIT_MASK(32)); + fm10k_write_reg(hw, FM10K_TDBAH(reg_idx), tdba >> 32); + fm10k_write_reg(hw, FM10K_TDLEN(reg_idx), size); + + /* reset head and tail pointers */ + fm10k_write_reg(hw, FM10K_TDH(reg_idx), 0); + fm10k_write_reg(hw, FM10K_TDT(reg_idx), 0); + + /* store tail pointer */ + ring->tail = &interface->uc_addr[FM10K_TDT(reg_idx)]; + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + /* Map interrupt */ + if (ring->q_vector) { + txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw); + txint |= FM10K_INT_MAP_TIMER0; + } + + fm10k_write_reg(hw, FM10K_TXINT(reg_idx), txint); + + /* enable use of FTAG bit in Tx descriptor, register is RO for VF */ + fm10k_write_reg(hw, FM10K_PFVTCTL(reg_idx), + FM10K_PFVTCTL_FTAG_DESC_ENABLE); + + /* enable queue */ + fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), txdctl); +} + +/** + * fm10k_enable_tx_ring - Verify Tx ring is enabled after configuration + * @interface: board private structure + * @ring: structure containing ring specific data + * + * Verify the Tx descriptor ring is ready for transmit. + **/ +static void fm10k_enable_tx_ring(struct fm10k_intfc *interface, + struct fm10k_ring *ring) +{ + struct fm10k_hw *hw = &interface->hw; + int wait_loop = 10; + u32 txdctl; + u8 reg_idx = ring->reg_idx; + + /* if we are already enabled just exit */ + if (fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx)) & FM10K_TXDCTL_ENABLE) + return; + + /* poll to verify queue is enabled */ + do { + usleep_range(1000, 2000); + txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(reg_idx)); + } while (!(txdctl & FM10K_TXDCTL_ENABLE) && --wait_loop); + if (!wait_loop) + netif_err(interface, drv, interface->netdev, + "Could not enable Tx Queue %d\n", reg_idx); +} + +/** + * fm10k_configure_tx - Configure Transmit Unit after Reset + * @interface: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void fm10k_configure_tx(struct fm10k_intfc *interface) +{ + int i; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < interface->num_tx_queues; i++) + fm10k_configure_tx_ring(interface, interface->tx_ring[i]); + + /* poll here to verify that Tx rings are now enabled */ + for (i = 0; i < interface->num_tx_queues; i++) + fm10k_enable_tx_ring(interface, interface->tx_ring[i]); +} + +/** + * fm10k_configure_rx_ring - Configure Rx ring after Reset + * @interface: board private structure + * @ring: structure containing ring specific data + * + * Configure the Rx descriptor ring after a reset. + **/ +static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, + struct fm10k_ring *ring) +{ + u64 rdba = ring->dma; + struct fm10k_hw *hw = &interface->hw; + u32 size = ring->count * sizeof(union fm10k_rx_desc); + u32 rxqctl = FM10K_RXQCTL_ENABLE | FM10K_RXQCTL_PF; + u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; + u32 srrctl = FM10K_SRRCTL_BUFFER_CHAINING_EN; + u32 rxint = FM10K_INT_MAP_DISABLE; + u8 rx_pause = interface->rx_pause; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), 0); + fm10k_write_flush(hw); + + /* possible poll here to verify ring resources have been cleaned */ + + /* set location and size for descriptor ring */ + fm10k_write_reg(hw, FM10K_RDBAL(reg_idx), rdba & DMA_BIT_MASK(32)); + fm10k_write_reg(hw, FM10K_RDBAH(reg_idx), rdba >> 32); + fm10k_write_reg(hw, FM10K_RDLEN(reg_idx), size); + + /* reset head and tail pointers */ + fm10k_write_reg(hw, FM10K_RDH(reg_idx), 0); + fm10k_write_reg(hw, FM10K_RDT(reg_idx), 0); + + /* store tail pointer */ + ring->tail = &interface->uc_addr[FM10K_RDT(reg_idx)]; + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + ring->next_to_alloc = 0; + + /* Configure the Rx buffer size for one buff without split */ + srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT; + + /* Configure the Rx ring to suppress loopback packets */ + srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS; + fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl); + + /* Enable drop on empty */ +#ifdef CONFIG_DCB + if (interface->pfc_en) + rx_pause = interface->pfc_en; +#endif + if (!(rx_pause & (1 << ring->qos_pc))) + rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY; + + fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl); + + /* assign default VLAN to queue */ + ring->vid = hw->mac.default_vid; + + /* Map interrupt */ + if (ring->q_vector) { + rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw); + rxint |= FM10K_INT_MAP_TIMER1; + } + + fm10k_write_reg(hw, FM10K_RXINT(reg_idx), rxint); + + /* enable queue */ + fm10k_write_reg(hw, FM10K_RXQCTL(reg_idx), rxqctl); + + /* place buffers on ring for receive data */ + fm10k_alloc_rx_buffers(ring, fm10k_desc_unused(ring)); +} + +/** + * fm10k_update_rx_drop_en - Configures the drop enable bits for Rx rings + * @interface: board private structure + * + * Configure the drop enable bits for the Rx rings. + **/ +void fm10k_update_rx_drop_en(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + u8 rx_pause = interface->rx_pause; + int i; + +#ifdef CONFIG_DCB + if (interface->pfc_en) + rx_pause = interface->pfc_en; + +#endif + for (i = 0; i < interface->num_rx_queues; i++) { + struct fm10k_ring *ring = interface->rx_ring[i]; + u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; + u8 reg_idx = ring->reg_idx; + + if (!(rx_pause & (1 << ring->qos_pc))) + rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY; + + fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl); + } +} + +/** + * fm10k_configure_dglort - Configure Receive DGLORT after reset + * @interface: board private structure + * + * Configure the DGLORT description and RSS tables. + **/ +static void fm10k_configure_dglort(struct fm10k_intfc *interface) +{ + struct fm10k_dglort_cfg dglort = { 0 }; + struct fm10k_hw *hw = &interface->hw; + int i; + u32 mrqc; + + /* Fill out hash function seeds */ + for (i = 0; i < FM10K_RSSRK_SIZE; i++) + fm10k_write_reg(hw, FM10K_RSSRK(0, i), interface->rssrk[i]); + + /* Write RETA table to hardware */ + for (i = 0; i < FM10K_RETA_SIZE; i++) + fm10k_write_reg(hw, FM10K_RETA(0, i), interface->reta[i]); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = FM10K_MRQC_IPV4 | + FM10K_MRQC_TCP_IPV4 | + FM10K_MRQC_IPV6 | + FM10K_MRQC_TCP_IPV6; + + if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= FM10K_MRQC_UDP_IPV4; + if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= FM10K_MRQC_UDP_IPV6; + + fm10k_write_reg(hw, FM10K_MRQC(0), mrqc); + + /* configure default DGLORT mapping for RSS/DCB */ + dglort.inner_rss = 1; + dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask); + dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask); + hw->mac.ops.configure_dglort_map(hw, &dglort); + + /* assign GLORT per queue for queue mapped testing */ + if (interface->glort_count > 64) { + memset(&dglort, 0, sizeof(dglort)); + dglort.inner_rss = 1; + dglort.glort = interface->glort + 64; + dglort.idx = fm10k_dglort_pf_queue; + dglort.queue_l = fls(interface->num_rx_queues - 1); + hw->mac.ops.configure_dglort_map(hw, &dglort); + } + + /* assign glort value for RSS/DCB specific to this interface */ + memset(&dglort, 0, sizeof(dglort)); + dglort.inner_rss = 1; + dglort.glort = interface->glort; + dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask); + dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask); + /* configure DGLORT mapping for RSS/DCB */ + dglort.idx = fm10k_dglort_pf_rss; + if (interface->l2_accel) + dglort.shared_l = fls(interface->l2_accel->size); + hw->mac.ops.configure_dglort_map(hw, &dglort); +} + +/** + * fm10k_configure_rx - Configure Receive Unit after Reset + * @interface: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void fm10k_configure_rx(struct fm10k_intfc *interface) +{ + int i; + + /* Configure SWPRI to PC map */ + fm10k_configure_swpri_map(interface); + + /* Configure RSS and DGLORT map */ + fm10k_configure_dglort(interface); + + /* Setup the HW Rx Head and Tail descriptor pointers */ + for (i = 0; i < interface->num_rx_queues; i++) + fm10k_configure_rx_ring(interface, interface->rx_ring[i]); + + /* possible poll here to verify that Rx rings are now enabled */ +} + +static void fm10k_napi_enable_all(struct fm10k_intfc *interface) +{ + struct fm10k_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) { + q_vector = interface->q_vector[q_idx]; + napi_enable(&q_vector->napi); + } +} + +static irqreturn_t fm10k_msix_clean_rings(int __always_unused irq, void *data) +{ + struct fm10k_q_vector *q_vector = data; + + if (q_vector->rx.count || q_vector->tx.count) + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data) +{ + struct fm10k_intfc *interface = data; + struct fm10k_hw *hw = &interface->hw; + struct fm10k_mbx_info *mbx = &hw->mbx; + + /* re-enable mailbox interrupt and indicate 20us delay */ + fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR), + FM10K_ITR_ENABLE | FM10K_MBX_INT_DELAY); + + /* service upstream mailbox */ + if (fm10k_mbx_trylock(interface)) { + mbx->ops.process(hw, mbx); + fm10k_mbx_unlock(interface); + } + + hw->mac.get_host_state = 1; + fm10k_service_event_schedule(interface); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * fm10k_netpoll - A Polling 'interrupt' handler + * @netdev: network interface device structure + * + * This is used by netconsole to send skbs without having to re-enable + * interrupts. It's not called while the normal interrupt routine is executing. + **/ +void fm10k_netpoll(struct net_device *netdev) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + int i; + + /* if interface is down do nothing */ + if (test_bit(__FM10K_DOWN, &interface->state)) + return; + + for (i = 0; i < interface->num_q_vectors; i++) + fm10k_msix_clean_rings(0, interface->q_vector[i]); +} + +#endif +#define FM10K_ERR_MSG(type) case (type): error = #type; break +static void fm10k_print_fault(struct fm10k_intfc *interface, int type, + struct fm10k_fault *fault) +{ + struct pci_dev *pdev = interface->pdev; + char *error; + + switch (type) { + case FM10K_PCA_FAULT: + switch (fault->type) { + default: + error = "Unknown PCA error"; + break; + FM10K_ERR_MSG(PCA_NO_FAULT); + FM10K_ERR_MSG(PCA_UNMAPPED_ADDR); + FM10K_ERR_MSG(PCA_BAD_QACCESS_PF); + FM10K_ERR_MSG(PCA_BAD_QACCESS_VF); + FM10K_ERR_MSG(PCA_MALICIOUS_REQ); + FM10K_ERR_MSG(PCA_POISONED_TLP); + FM10K_ERR_MSG(PCA_TLP_ABORT); + } + break; + case FM10K_THI_FAULT: + switch (fault->type) { + default: + error = "Unknown THI error"; + break; + FM10K_ERR_MSG(THI_NO_FAULT); + FM10K_ERR_MSG(THI_MAL_DIS_Q_FAULT); + } + break; + case FM10K_FUM_FAULT: + switch (fault->type) { + default: + error = "Unknown FUM error"; + break; + FM10K_ERR_MSG(FUM_NO_FAULT); + FM10K_ERR_MSG(FUM_UNMAPPED_ADDR); + FM10K_ERR_MSG(FUM_BAD_VF_QACCESS); + FM10K_ERR_MSG(FUM_ADD_DECODE_ERR); + FM10K_ERR_MSG(FUM_RO_ERROR); + FM10K_ERR_MSG(FUM_QPRC_CRC_ERROR); + FM10K_ERR_MSG(FUM_CSR_TIMEOUT); + FM10K_ERR_MSG(FUM_INVALID_TYPE); + FM10K_ERR_MSG(FUM_INVALID_LENGTH); + FM10K_ERR_MSG(FUM_INVALID_BE); + FM10K_ERR_MSG(FUM_INVALID_ALIGN); + } + break; + default: + error = "Undocumented fault"; + break; + } + + dev_warn(&pdev->dev, + "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n", + error, fault->address, fault->specinfo, + PCI_SLOT(fault->func), PCI_FUNC(fault->func)); +} + +static void fm10k_report_fault(struct fm10k_intfc *interface, u32 eicr) +{ + struct fm10k_hw *hw = &interface->hw; + struct fm10k_fault fault = { 0 }; + int type, err; + + for (eicr &= FM10K_EICR_FAULT_MASK, type = FM10K_PCA_FAULT; + eicr; + eicr >>= 1, type += FM10K_FAULT_SIZE) { + /* only check if there is an error reported */ + if (!(eicr & 0x1)) + continue; + + /* retrieve fault info */ + err = hw->mac.ops.get_fault(hw, type, &fault); + if (err) { + dev_err(&interface->pdev->dev, + "error reading fault\n"); + continue; + } + + fm10k_print_fault(interface, type, &fault); + } +} + +static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr) +{ + struct fm10k_hw *hw = &interface->hw; + const u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; + u32 maxholdq; + int q; + + if (!(eicr & FM10K_EICR_MAXHOLDTIME)) + return; + + maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(7)); + if (maxholdq) + fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq); + for (q = 255;;) { + if (maxholdq & (1 << 31)) { + if (q < FM10K_MAX_QUEUES_PF) { + interface->rx_overrun_pf++; + fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl); + } else { + interface->rx_overrun_vf++; + } + } + + maxholdq *= 2; + if (!maxholdq) + q &= ~(32 - 1); + + if (!q) + break; + + if (q-- % 32) + continue; + + maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(q / 32)); + if (maxholdq) + fm10k_write_reg(hw, FM10K_MAXHOLDQ(q / 32), maxholdq); + } +} + +static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) +{ + struct fm10k_intfc *interface = data; + struct fm10k_hw *hw = &interface->hw; + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 eicr; + + /* unmask any set bits related to this interrupt */ + eicr = fm10k_read_reg(hw, FM10K_EICR); + fm10k_write_reg(hw, FM10K_EICR, eicr & (FM10K_EICR_MAILBOX | + FM10K_EICR_SWITCHREADY | + FM10K_EICR_SWITCHNOTREADY)); + + /* report any faults found to the message log */ + fm10k_report_fault(interface, eicr); + + /* reset any queues disabled due to receiver overrun */ + fm10k_reset_drop_on_empty(interface, eicr); + + /* service mailboxes */ + if (fm10k_mbx_trylock(interface)) { + mbx->ops.process(hw, mbx); + /* handle VFLRE events */ + fm10k_iov_event(interface); + fm10k_mbx_unlock(interface); + } + + /* if switch toggled state we should reset GLORTs */ + if (eicr & FM10K_EICR_SWITCHNOTREADY) { + /* force link down for at least 4 seconds */ + interface->link_down_event = jiffies + (4 * HZ); + set_bit(__FM10K_LINK_DOWN, &interface->state); + + /* reset dglort_map back to no config */ + hw->mac.dglort_map = FM10K_DGLORTMAP_NONE; + } + + /* we should validate host state after interrupt event */ + hw->mac.get_host_state = 1; + + /* validate host state, and handle VF mailboxes in the service task */ + fm10k_service_event_schedule(interface); + + /* re-enable mailbox interrupt and indicate 20us delay */ + fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR), + FM10K_ITR_ENABLE | FM10K_MBX_INT_DELAY); + + return IRQ_HANDLED; +} + +void fm10k_mbx_free_irq(struct fm10k_intfc *interface) +{ + struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR]; + struct fm10k_hw *hw = &interface->hw; + int itr_reg; + + /* disconnect the mailbox */ + hw->mbx.ops.disconnect(hw, &hw->mbx); + + /* disable Mailbox cause */ + if (hw->mac.type == fm10k_mac_pf) { + fm10k_write_reg(hw, FM10K_EIMR, + FM10K_EIMR_DISABLE(PCA_FAULT) | + FM10K_EIMR_DISABLE(FUM_FAULT) | + FM10K_EIMR_DISABLE(MAILBOX) | + FM10K_EIMR_DISABLE(SWITCHREADY) | + FM10K_EIMR_DISABLE(SWITCHNOTREADY) | + FM10K_EIMR_DISABLE(SRAMERROR) | + FM10K_EIMR_DISABLE(VFLR) | + FM10K_EIMR_DISABLE(MAXHOLDTIME)); + itr_reg = FM10K_ITR(FM10K_MBX_VECTOR); + } else { + itr_reg = FM10K_VFITR(FM10K_MBX_VECTOR); + } + + fm10k_write_reg(hw, itr_reg, FM10K_ITR_MASK_SET); + + free_irq(entry->vector, interface); +} + +static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + bool vlan_override = hw->mac.vlan_override; + u16 default_vid = hw->mac.default_vid; + struct fm10k_intfc *interface; + s32 err; + + err = fm10k_msg_mac_vlan_vf(hw, results, mbx); + if (err) + return err; + + interface = container_of(hw, struct fm10k_intfc, hw); + + /* MAC was changed so we need reset */ + if (is_valid_ether_addr(hw->mac.perm_addr) && + memcmp(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN)) + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + + /* VLAN override was changed, or default VLAN changed */ + if ((vlan_override != hw->mac.vlan_override) || + (default_vid != hw->mac.default_vid)) + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + + return 0; +} + +static s32 fm10k_1588_msg_vf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info __always_unused *mbx) +{ + struct fm10k_intfc *interface; + u64 timestamp; + s32 err; + + err = fm10k_tlv_attr_get_u64(results[FM10K_1588_MSG_TIMESTAMP], + ×tamp); + if (err) + return err; + + interface = container_of(hw, struct fm10k_intfc, hw); + + fm10k_ts_tx_hwtstamp(interface, 0, timestamp); + + return 0; +} + +/* generic error handler for mailbox issues */ +static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info __always_unused *mbx) +{ + struct fm10k_intfc *interface; + struct pci_dev *pdev; + + interface = container_of(hw, struct fm10k_intfc, hw); + pdev = interface->pdev; + + dev_err(&pdev->dev, "Unknown message ID %u\n", + **results & FM10K_TLV_ID_MASK); + + return 0; +} + +static const struct fm10k_msg_data vf_mbx_data[] = { + FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), + FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr), + FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf), + FM10K_VF_MSG_1588_HANDLER(fm10k_1588_msg_vf), + FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error), +}; + +static int fm10k_mbx_request_irq_vf(struct fm10k_intfc *interface) +{ + struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR]; + struct net_device *dev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + int err; + + /* Use timer0 for interrupt moderation on the mailbox */ + u32 itr = FM10K_INT_MAP_TIMER0 | entry->entry; + + /* register mailbox handlers */ + err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data); + if (err) + return err; + + /* request the IRQ */ + err = request_irq(entry->vector, fm10k_msix_mbx_vf, 0, + dev->name, interface); + if (err) { + netif_err(interface, probe, dev, + "request_irq for msix_mbx failed: %d\n", err); + return err; + } + + /* map all of the interrupt sources */ + fm10k_write_reg(hw, FM10K_VFINT_MAP, itr); + + /* enable interrupt */ + fm10k_write_reg(hw, FM10K_VFITR(entry->entry), FM10K_ITR_ENABLE); + + return 0; +} + +static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_intfc *interface; + u32 dglort_map = hw->mac.dglort_map; + s32 err; + + err = fm10k_msg_lport_map_pf(hw, results, mbx); + if (err) + return err; + + interface = container_of(hw, struct fm10k_intfc, hw); + + /* we need to reset if port count was just updated */ + if (dglort_map != hw->mac.dglort_map) + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + + return 0; +} + +static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info __always_unused *mbx) +{ + struct fm10k_intfc *interface; + u16 glort, pvid; + u32 pvid_update; + s32 err; + + err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID], + &pvid_update); + if (err) + return err; + + /* extract values from the pvid update */ + glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT); + pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID); + + /* if glort is not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort)) + return FM10K_ERR_PARAM; + + /* verify VID is valid */ + if (pvid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + interface = container_of(hw, struct fm10k_intfc, hw); + + /* check to see if this belongs to one of the VFs */ + err = fm10k_iov_update_pvid(interface, glort, pvid); + if (!err) + return 0; + + /* we need to reset if default VLAN was just updated */ + if (pvid != hw->mac.default_vid) + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + + hw->mac.default_vid = pvid; + + return 0; +} + +static s32 fm10k_1588_msg_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info __always_unused *mbx) +{ + struct fm10k_swapi_1588_timestamp timestamp; + struct fm10k_iov_data *iov_data; + struct fm10k_intfc *interface; + u16 sglort, vf_idx; + s32 err; + + err = fm10k_tlv_attr_get_le_struct( + results[FM10K_PF_ATTR_ID_1588_TIMESTAMP], + ×tamp, sizeof(timestamp)); + if (err) + return err; + + interface = container_of(hw, struct fm10k_intfc, hw); + + if (timestamp.dglort) { + fm10k_ts_tx_hwtstamp(interface, timestamp.dglort, + le64_to_cpu(timestamp.egress)); + return 0; + } + + /* either dglort or sglort must be set */ + if (!timestamp.sglort) + return FM10K_ERR_PARAM; + + /* verify GLORT is at least one of the ones we own */ + sglort = le16_to_cpu(timestamp.sglort); + if (!fm10k_glort_valid_pf(hw, sglort)) + return FM10K_ERR_PARAM; + + if (sglort == interface->glort) { + fm10k_ts_tx_hwtstamp(interface, 0, + le64_to_cpu(timestamp.ingress)); + return 0; + } + + /* if there is no iov_data then there is no mailboxes to process */ + if (!ACCESS_ONCE(interface->iov_data)) + return FM10K_ERR_PARAM; + + rcu_read_lock(); + + /* notify VF if this timestamp belongs to it */ + iov_data = interface->iov_data; + vf_idx = (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE) - sglort; + + if (!iov_data || vf_idx >= iov_data->num_vfs) { + err = FM10K_ERR_PARAM; + goto err_unlock; + } + + err = hw->iov.ops.report_timestamp(hw, &iov_data->vf_info[vf_idx], + le64_to_cpu(timestamp.ingress)); + +err_unlock: + rcu_read_unlock(); + + return err; +} + +static const struct fm10k_msg_data pf_mbx_data[] = { + FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf), + FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf), + FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_lport_map), + FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf), + FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf), + FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid), + FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(fm10k_1588_msg_pf), + FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error), +}; + +static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface) +{ + struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR]; + struct net_device *dev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + int err; + + /* Use timer0 for interrupt moderation on the mailbox */ + u32 mbx_itr = FM10K_INT_MAP_TIMER0 | entry->entry; + u32 other_itr = FM10K_INT_MAP_IMMEDIATE | entry->entry; + + /* register mailbox handlers */ + err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data); + if (err) + return err; + + /* request the IRQ */ + err = request_irq(entry->vector, fm10k_msix_mbx_pf, 0, + dev->name, interface); + if (err) { + netif_err(interface, probe, dev, + "request_irq for msix_mbx failed: %d\n", err); + return err; + } + + /* Enable interrupts w/ no moderation for "other" interrupts */ + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_SRAM), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_MaxHoldTime), other_itr); + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_VFLR), other_itr); + + /* Enable interrupts w/ moderation for mailbox */ + fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_Mailbox), mbx_itr); + + /* Enable individual interrupt causes */ + fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) | + FM10K_EIMR_ENABLE(FUM_FAULT) | + FM10K_EIMR_ENABLE(MAILBOX) | + FM10K_EIMR_ENABLE(SWITCHREADY) | + FM10K_EIMR_ENABLE(SWITCHNOTREADY) | + FM10K_EIMR_ENABLE(SRAMERROR) | + FM10K_EIMR_ENABLE(VFLR) | + FM10K_EIMR_ENABLE(MAXHOLDTIME)); + + /* enable interrupt */ + fm10k_write_reg(hw, FM10K_ITR(entry->entry), FM10K_ITR_ENABLE); + + return 0; +} + +int fm10k_mbx_request_irq(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + int err; + + /* enable Mailbox cause */ + if (hw->mac.type == fm10k_mac_pf) + err = fm10k_mbx_request_irq_pf(interface); + else + err = fm10k_mbx_request_irq_vf(interface); + + /* connect mailbox */ + if (!err) + err = hw->mbx.ops.connect(hw, &hw->mbx); + + return err; +} + +/** + * fm10k_qv_free_irq - release interrupts associated with queue vectors + * @interface: board private structure + * + * Release all interrupts associated with this interface + **/ +void fm10k_qv_free_irq(struct fm10k_intfc *interface) +{ + int vector = interface->num_q_vectors; + struct fm10k_hw *hw = &interface->hw; + struct msix_entry *entry; + + entry = &interface->msix_entries[NON_Q_VECTORS(hw) + vector]; + + while (vector) { + struct fm10k_q_vector *q_vector; + + vector--; + entry--; + q_vector = interface->q_vector[vector]; + + if (!q_vector->tx.count && !q_vector->rx.count) + continue; + + /* disable interrupts */ + + writel(FM10K_ITR_MASK_SET, q_vector->itr); + + free_irq(entry->vector, q_vector); + } +} + +/** + * fm10k_qv_request_irq - initialize interrupts for queue vectors + * @interface: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +int fm10k_qv_request_irq(struct fm10k_intfc *interface) +{ + struct net_device *dev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + struct msix_entry *entry; + int ri = 0, ti = 0; + int vector, err; + + entry = &interface->msix_entries[NON_Q_VECTORS(hw)]; + + for (vector = 0; vector < interface->num_q_vectors; vector++) { + struct fm10k_q_vector *q_vector = interface->q_vector[vector]; + + /* name the vector */ + if (q_vector->tx.count && q_vector->rx.count) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-TxRx-%d", dev->name, ri++); + ti++; + } else if (q_vector->rx.count) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-rx-%d", dev->name, ri++); + } else if (q_vector->tx.count) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-tx-%d", dev->name, ti++); + } else { + /* skip this unused q_vector */ + continue; + } + + /* Assign ITR register to q_vector */ + q_vector->itr = (hw->mac.type == fm10k_mac_pf) ? + &interface->uc_addr[FM10K_ITR(entry->entry)] : + &interface->uc_addr[FM10K_VFITR(entry->entry)]; + + /* request the IRQ */ + err = request_irq(entry->vector, &fm10k_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { + netif_err(interface, probe, dev, + "request_irq failed for MSIX interrupt Error: %d\n", + err); + goto err_out; + } + + /* Enable q_vector */ + writel(FM10K_ITR_ENABLE, q_vector->itr); + + entry++; + } + + return 0; + +err_out: + /* wind through the ring freeing all entries and vectors */ + while (vector) { + struct fm10k_q_vector *q_vector; + + entry--; + vector--; + q_vector = interface->q_vector[vector]; + + if (!q_vector->tx.count && !q_vector->rx.count) + continue; + + /* disable interrupts */ + + writel(FM10K_ITR_MASK_SET, q_vector->itr); + + free_irq(entry->vector, q_vector); + } + + return err; +} + +void fm10k_up(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + + /* Enable Tx/Rx DMA */ + hw->mac.ops.start_hw(hw); + + /* configure Tx descriptor rings */ + fm10k_configure_tx(interface); + + /* configure Rx descriptor rings */ + fm10k_configure_rx(interface); + + /* configure interrupts */ + hw->mac.ops.update_int_moderator(hw); + + /* clear down bit to indicate we are ready to go */ + clear_bit(__FM10K_DOWN, &interface->state); + + /* enable polling cleanups */ + fm10k_napi_enable_all(interface); + + /* re-establish Rx filters */ + fm10k_restore_rx_state(interface); + + /* enable transmits */ + netif_tx_start_all_queues(interface->netdev); + + /* kick off the service timer now */ + hw->mac.get_host_state = 1; + mod_timer(&interface->service_timer, jiffies); +} + +static void fm10k_napi_disable_all(struct fm10k_intfc *interface) +{ + struct fm10k_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < interface->num_q_vectors; q_idx++) { + q_vector = interface->q_vector[q_idx]; + napi_disable(&q_vector->napi); + } +} + +void fm10k_down(struct fm10k_intfc *interface) +{ + struct net_device *netdev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + + /* signal that we are down to the interrupt handler and service task */ + set_bit(__FM10K_DOWN, &interface->state); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + + /* disable transmits */ + netif_tx_stop_all_queues(netdev); + netif_tx_disable(netdev); + + /* reset Rx filters */ + fm10k_reset_rx_state(interface); + + /* allow 10ms for device to quiesce */ + usleep_range(10000, 20000); + + /* disable polling routines */ + fm10k_napi_disable_all(interface); + + /* capture stats one last time before stopping interface */ + fm10k_update_stats(interface); + + /* Disable DMA engine for Tx/Rx */ + hw->mac.ops.stop_hw(hw); + + /* free any buffers still on the rings */ + fm10k_clean_all_tx_rings(interface); +} + +/** + * fm10k_sw_init - Initialize general software structures + * @interface: host interface private structure to initialize + * + * fm10k_sw_init initializes the interface private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int fm10k_sw_init(struct fm10k_intfc *interface, + const struct pci_device_id *ent) +{ + const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data]; + struct fm10k_hw *hw = &interface->hw; + struct pci_dev *pdev = interface->pdev; + struct net_device *netdev = interface->netdev; + u32 rss_key[FM10K_RSSRK_SIZE]; + unsigned int rss; + int err; + + /* initialize back pointer */ + hw->back = interface; + hw->hw_addr = interface->uc_addr; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Setup hw api */ + memcpy(&hw->mac.ops, fi->mac_ops, sizeof(hw->mac.ops)); + hw->mac.type = fi->mac; + + /* Setup IOV handlers */ + if (fi->iov_ops) + memcpy(&hw->iov.ops, fi->iov_ops, sizeof(hw->iov.ops)); + + /* Set common capability flags and settings */ + rss = min_t(int, FM10K_MAX_RSS_INDICES, num_online_cpus()); + interface->ring_feature[RING_F_RSS].limit = rss; + fi->get_invariants(hw); + + /* pick up the PCIe bus settings for reporting later */ + if (hw->mac.ops.get_bus_info) + hw->mac.ops.get_bus_info(hw); + + /* limit the usable DMA range */ + if (hw->mac.ops.set_dma_mask) + hw->mac.ops.set_dma_mask(hw, dma_get_mask(&pdev->dev)); + + /* update netdev with DMA restrictions */ + if (dma_get_mask(&pdev->dev) > DMA_BIT_MASK(32)) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + /* delay any future reset requests */ + interface->last_reset = jiffies + (10 * HZ); + + /* reset and initialize the hardware so it is in a known state */ + err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw); + if (err) { + dev_err(&pdev->dev, "init_hw failed: %d\n", err); + return err; + } + + /* initialize hardware statistics */ + hw->mac.ops.update_hw_stats(hw, &interface->stats); + + /* Set upper limit on IOV VFs that can be allocated */ + pci_sriov_set_totalvfs(pdev, hw->iov.total_vfs); + + /* Start with random Ethernet address */ + eth_random_addr(hw->mac.addr); + + /* Initialize MAC address from hardware */ + err = hw->mac.ops.read_mac_addr(hw); + if (err) { + dev_warn(&pdev->dev, + "Failed to obtain MAC address defaulting to random\n"); + /* tag address assignment as random */ + netdev->addr_assign_type |= NET_ADDR_RANDOM; + } + + memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->perm_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address\n"); + return -EIO; + } + + /* assign BAR 4 resources for use with PTP */ + if (fm10k_read_reg(hw, FM10K_CTRL) & FM10K_CTRL_BAR4_ALLOWED) + interface->sw_addr = ioremap(pci_resource_start(pdev, 4), + pci_resource_len(pdev, 4)); + hw->sw_addr = interface->sw_addr; + + /* Only the PF can support VXLAN and NVGRE offloads */ + if (hw->mac.type != fm10k_mac_pf) { + netdev->hw_enc_features = 0; + netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; + netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL; + } + + /* initialize DCBNL interface */ + fm10k_dcbnl_set_ops(netdev); + + /* Initialize service timer and service task */ + set_bit(__FM10K_SERVICE_DISABLE, &interface->state); + setup_timer(&interface->service_timer, &fm10k_service_timer, + (unsigned long)interface); + INIT_WORK(&interface->service_task, fm10k_service_task); + + /* kick off service timer now, even when interface is down */ + mod_timer(&interface->service_timer, (HZ * 2) + jiffies); + + /* Intitialize timestamp data */ + fm10k_ts_init(interface); + + /* set default ring sizes */ + interface->tx_ring_count = FM10K_DEFAULT_TXD; + interface->rx_ring_count = FM10K_DEFAULT_RXD; + + /* set default interrupt moderation */ + interface->tx_itr = FM10K_ITR_10K; + interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_20K; + + /* initialize vxlan_port list */ + INIT_LIST_HEAD(&interface->vxlan_port); + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + memcpy(interface->rssrk, rss_key, sizeof(rss_key)); + + /* Start off interface as being down */ + set_bit(__FM10K_DOWN, &interface->state); + + return 0; +} + +static void fm10k_slot_warn(struct fm10k_intfc *interface) +{ + struct device *dev = &interface->pdev->dev; + struct fm10k_hw *hw = &interface->hw; + + if (hw->mac.ops.is_slot_appropriate(hw)) + return; + + dev_warn(dev, + "For optimal performance, a %s %s slot is recommended.\n", + (hw->bus_caps.width == fm10k_bus_width_pcie_x1 ? "x1" : + hw->bus_caps.width == fm10k_bus_width_pcie_x4 ? "x4" : + "x8"), + (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s" : + hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s" : + "8.0GT/s")); + dev_warn(dev, + "A slot with more lanes and/or higher speed is suggested.\n"); +} + +/** + * fm10k_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in fm10k_pci_tbl + * + * Returns 0 on success, negative on failure + * + * fm10k_probe initializes an interface identified by a pci_dev structure. + * The OS initialization, configuring of the interface private structure, + * and a hardware reset occur. + **/ +static int fm10k_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct fm10k_intfc *interface; + struct fm10k_hw *hw; + int err; + u64 dma_mask; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + /* By default fm10k only supports a 48 bit DMA mask */ + dma_mask = DMA_BIT_MASK(48) | dma_get_required_mask(&pdev->dev); + + if ((dma_mask <= DMA_BIT_MASK(32)) || + dma_set_mask_and_coherent(&pdev->dev, dma_mask)) { + dma_mask &= DMA_BIT_MASK(32); + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + } + } + + err = pci_request_selected_regions(pdev, + pci_select_bars(pdev, + IORESOURCE_MEM), + fm10k_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + pci_save_state(pdev); + + netdev = fm10k_alloc_netdev(); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_netdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + interface = netdev_priv(netdev); + pci_set_drvdata(pdev, interface); + + interface->netdev = netdev; + interface->pdev = pdev; + hw = &interface->hw; + + interface->uc_addr = ioremap(pci_resource_start(pdev, 0), + FM10K_UC_ADDR_SIZE); + if (!interface->uc_addr) { + err = -EIO; + goto err_ioremap; + } + + err = fm10k_sw_init(interface, ent); + if (err) + goto err_sw_init; + + /* enable debugfs support */ + fm10k_dbg_intfc_init(interface); + + err = fm10k_init_queueing_scheme(interface); + if (err) + goto err_sw_init; + + err = fm10k_mbx_request_irq(interface); + if (err) + goto err_mbx_interrupt; + + /* final check of hardware state before registering the interface */ + err = fm10k_hw_ready(interface); + if (err) + goto err_register; + + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + /* stop all the transmit queues from transmitting until link is up */ + netif_tx_stop_all_queues(netdev); + + /* Register PTP interface */ + fm10k_ptp_register(interface); + + /* print bus type/speed/width info */ + dev_info(&pdev->dev, "(PCI Express:%s Width: %s Payload: %s)\n", + (hw->bus.speed == fm10k_bus_speed_8000 ? "8.0GT/s" : + hw->bus.speed == fm10k_bus_speed_5000 ? "5.0GT/s" : + hw->bus.speed == fm10k_bus_speed_2500 ? "2.5GT/s" : + "Unknown"), + (hw->bus.width == fm10k_bus_width_pcie_x8 ? "x8" : + hw->bus.width == fm10k_bus_width_pcie_x4 ? "x4" : + hw->bus.width == fm10k_bus_width_pcie_x1 ? "x1" : + "Unknown"), + (hw->bus.payload == fm10k_bus_payload_128 ? "128B" : + hw->bus.payload == fm10k_bus_payload_256 ? "256B" : + hw->bus.payload == fm10k_bus_payload_512 ? "512B" : + "Unknown")); + + /* print warning for non-optimal configurations */ + fm10k_slot_warn(interface); + + /* enable SR-IOV after registering netdev to enforce PF/VF ordering */ + fm10k_iov_configure(pdev, 0); + + /* clear the service task disable bit to allow service task to start */ + clear_bit(__FM10K_SERVICE_DISABLE, &interface->state); + + return 0; + +err_register: + fm10k_mbx_free_irq(interface); +err_mbx_interrupt: + fm10k_clear_queueing_scheme(interface); +err_sw_init: + if (interface->sw_addr) + iounmap(interface->sw_addr); + iounmap(interface->uc_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_netdev: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * fm10k_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * fm10k_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void fm10k_remove(struct pci_dev *pdev) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + struct net_device *netdev = interface->netdev; + + del_timer_sync(&interface->service_timer); + + set_bit(__FM10K_SERVICE_DISABLE, &interface->state); + cancel_work_sync(&interface->service_task); + + /* free netdev, this may bounce the interrupts due to setup_tc */ + if (netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(netdev); + + /* cleanup timestamp handling */ + fm10k_ptp_unregister(interface); + + /* release VFs */ + fm10k_iov_disable(pdev); + + /* disable mailbox interrupt */ + fm10k_mbx_free_irq(interface); + + /* free interrupts */ + fm10k_clear_queueing_scheme(interface); + + /* remove any debugfs interfaces */ + fm10k_dbg_intfc_exit(interface); + + if (interface->sw_addr) + iounmap(interface->sw_addr); + iounmap(interface->uc_addr); + + free_netdev(netdev); + + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +#ifdef CONFIG_PM +/** + * fm10k_resume - Restore device to pre-sleep state + * @pdev: PCI device information struct + * + * fm10k_resume is called after the system has powered back up from a sleep + * state and is ready to resume operation. This function is meant to restore + * the device back to its pre-sleep state. + **/ +static int fm10k_resume(struct pci_dev *pdev) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + struct net_device *netdev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + /* pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_wake_from_d3(pdev, false); + + /* refresh hw_addr in case it was dropped */ + hw->hw_addr = interface->uc_addr; + + /* reset hardware to known state */ + err = hw->mac.ops.init_hw(&interface->hw); + if (err) + return err; + + /* reset statistics starting values */ + hw->mac.ops.rebind_hw_stats(hw, &interface->stats); + + /* reset clock */ + fm10k_ts_reset(interface); + + rtnl_lock(); + + err = fm10k_init_queueing_scheme(interface); + if (!err) { + fm10k_mbx_request_irq(interface); + if (netif_running(netdev)) + err = fm10k_open(netdev); + } + + rtnl_unlock(); + + if (err) + return err; + + /* restore SR-IOV interface */ + fm10k_iov_resume(pdev); + + netif_device_attach(netdev); + + return 0; +} + +/** + * fm10k_suspend - Prepare the device for a system sleep state + * @pdev: PCI device information struct + * + * fm10k_suspend is meant to shutdown the device prior to the system entering + * a sleep state. The fm10k hardware does not support wake on lan so the + * driver simply needs to shut down the device so it is in a low power state. + **/ +static int fm10k_suspend(struct pci_dev *pdev, + pm_message_t __always_unused state) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + struct net_device *netdev = interface->netdev; + int err = 0; + + netif_device_detach(netdev); + + fm10k_iov_suspend(pdev); + + rtnl_lock(); + + if (netif_running(netdev)) + fm10k_close(netdev); + + fm10k_mbx_free_irq(interface); + + fm10k_clear_queueing_scheme(interface); + + rtnl_unlock(); + + err = pci_save_state(pdev); + if (err) + return err; + + pci_disable_device(pdev); + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +#endif /* CONFIG_PM */ +/** + * fm10k_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + struct net_device *netdev = interface->netdev; + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + fm10k_close(netdev); + + fm10k_mbx_free_irq(interface); + + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * fm10k_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + pci_ers_result_t result; + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + + /* After second error pci->state_saved is false, this + * resets it so EEH doesn't break. + */ + pci_save_state(pdev); + + pci_wake_from_d3(pdev, false); + + /* refresh hw_addr in case it was dropped */ + interface->hw.hw_addr = interface->uc_addr; + + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + fm10k_service_event_schedule(interface); + + result = PCI_ERS_RESULT_RECOVERED; + } + + pci_cleanup_aer_uncorrect_error_status(pdev); + + return result; +} + +/** + * fm10k_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void fm10k_io_resume(struct pci_dev *pdev) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + struct net_device *netdev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + int err = 0; + + /* reset hardware to known state */ + hw->mac.ops.init_hw(&interface->hw); + + /* reset statistics starting values */ + hw->mac.ops.rebind_hw_stats(hw, &interface->stats); + + /* reassociate interrupts */ + fm10k_mbx_request_irq(interface); + + /* reset clock */ + fm10k_ts_reset(interface); + + if (netif_running(netdev)) + err = fm10k_open(netdev); + + /* final check of hardware state before registering the interface */ + err = err ? : fm10k_hw_ready(interface); + + if (!err) + netif_device_attach(netdev); +} + +static const struct pci_error_handlers fm10k_err_handler = { + .error_detected = fm10k_io_error_detected, + .slot_reset = fm10k_io_slot_reset, + .resume = fm10k_io_resume, +}; + +static struct pci_driver fm10k_driver = { + .name = fm10k_driver_name, + .id_table = fm10k_pci_tbl, + .probe = fm10k_probe, + .remove = fm10k_remove, +#ifdef CONFIG_PM + .suspend = fm10k_suspend, + .resume = fm10k_resume, +#endif + .sriov_configure = fm10k_iov_configure, + .err_handler = &fm10k_err_handler +}; + +/** + * fm10k_register_pci_driver - register driver interface + * + * This funciton is called on module load in order to register the driver. + **/ +int fm10k_register_pci_driver(void) +{ + return pci_register_driver(&fm10k_driver); +} + +/** + * fm10k_unregister_pci_driver - unregister driver interface + * + * This funciton is called on module unload in order to remove the driver. + **/ +void fm10k_unregister_pci_driver(void) +{ + pci_unregister_driver(&fm10k_driver); +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c new file mode 100644 index 000000000..891e21874 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -0,0 +1,1880 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "fm10k_pf.h" +#include "fm10k_vf.h" + +/** + * fm10k_reset_hw_pf - PF hardware reset + * @hw: pointer to hardware structure + * + * This function should return the hardware to a state similar to the + * one it is in after being powered on. + **/ +static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw) +{ + s32 err; + u32 reg; + u16 i; + + /* Disable interrupts */ + fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(ALL)); + + /* Lock ITR2 reg 0 into itself and disable interrupt moderation */ + fm10k_write_reg(hw, FM10K_ITR2(0), 0); + fm10k_write_reg(hw, FM10K_INT_CTRL, 0); + + /* We assume here Tx and Rx queue 0 are owned by the PF */ + + /* Shut off VF access to their queues forcing them to queue 0 */ + for (i = 0; i < FM10K_TQMAP_TABLE_SIZE; i++) { + fm10k_write_reg(hw, FM10K_TQMAP(i), 0); + fm10k_write_reg(hw, FM10K_RQMAP(i), 0); + } + + /* shut down all rings */ + err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES); + if (err) + return err; + + /* Verify that DMA is no longer active */ + reg = fm10k_read_reg(hw, FM10K_DMA_CTRL); + if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE)) + return FM10K_ERR_DMA_PENDING; + + /* Inititate data path reset */ + reg |= FM10K_DMA_CTRL_DATAPATH_RESET; + fm10k_write_reg(hw, FM10K_DMA_CTRL, reg); + + /* Flush write and allow 100us for reset to complete */ + fm10k_write_flush(hw); + udelay(FM10K_RESET_TIMEOUT); + + /* Verify we made it out of reset */ + reg = fm10k_read_reg(hw, FM10K_IP); + if (!(reg & FM10K_IP_NOTINRESET)) + err = FM10K_ERR_RESET_FAILED; + + return err; +} + +/** + * fm10k_is_ari_hierarchy_pf - Indicate ARI hierarchy support + * @hw: pointer to hardware structure + * + * Looks at the ARI hierarchy bit to determine whether ARI is supported or not. + **/ +static bool fm10k_is_ari_hierarchy_pf(struct fm10k_hw *hw) +{ + u16 sriov_ctrl = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_SRIOV_CTRL); + + return !!(sriov_ctrl & FM10K_PCIE_SRIOV_CTRL_VFARI); +} + +/** + * fm10k_init_hw_pf - PF hardware initialization + * @hw: pointer to hardware structure + * + **/ +static s32 fm10k_init_hw_pf(struct fm10k_hw *hw) +{ + u32 dma_ctrl, txqctl; + u16 i; + + /* Establish default VSI as valid */ + fm10k_write_reg(hw, FM10K_DGLORTDEC(fm10k_dglort_default), 0); + fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_default), + FM10K_DGLORTMAP_ANY); + + /* Invalidate all other GLORT entries */ + for (i = 1; i < FM10K_DGLORT_COUNT; i++) + fm10k_write_reg(hw, FM10K_DGLORTMAP(i), FM10K_DGLORTMAP_NONE); + + /* reset ITR2(0) to point to itself */ + fm10k_write_reg(hw, FM10K_ITR2(0), 0); + + /* reset VF ITR2(0) to point to 0 avoid PF registers */ + fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), 0); + + /* loop through all PF ITR2 registers pointing them to the previous */ + for (i = 1; i < FM10K_ITR_REG_COUNT_PF; i++) + fm10k_write_reg(hw, FM10K_ITR2(i), i - 1); + + /* Enable interrupt moderator if not already enabled */ + fm10k_write_reg(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR); + + /* compute the default txqctl configuration */ + txqctl = FM10K_TXQCTL_PF | FM10K_TXQCTL_UNLIMITED_BW | + (hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT); + + for (i = 0; i < FM10K_MAX_QUEUES; i++) { + /* configure rings for 256 Queue / 32 Descriptor cache mode */ + fm10k_write_reg(hw, FM10K_TQDLOC(i), + (i * FM10K_TQDLOC_BASE_32_DESC) | + FM10K_TQDLOC_SIZE_32_DESC); + fm10k_write_reg(hw, FM10K_TXQCTL(i), txqctl); + + /* configure rings to provide TPH processing hints */ + fm10k_write_reg(hw, FM10K_TPH_TXCTRL(i), + FM10K_TPH_TXCTRL_DESC_TPHEN | + FM10K_TPH_TXCTRL_DESC_RROEN | + FM10K_TPH_TXCTRL_DESC_WROEN | + FM10K_TPH_TXCTRL_DATA_RROEN); + fm10k_write_reg(hw, FM10K_TPH_RXCTRL(i), + FM10K_TPH_RXCTRL_DESC_TPHEN | + FM10K_TPH_RXCTRL_DESC_RROEN | + FM10K_TPH_RXCTRL_DATA_WROEN | + FM10K_TPH_RXCTRL_HDR_WROEN); + } + + /* set max hold interval to align with 1.024 usec in all modes */ + switch (hw->bus.speed) { + case fm10k_bus_speed_2500: + dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1; + break; + case fm10k_bus_speed_5000: + dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2; + break; + case fm10k_bus_speed_8000: + dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3; + break; + default: + dma_ctrl = 0; + break; + } + + /* Configure TSO flags */ + fm10k_write_reg(hw, FM10K_DTXTCPFLGL, FM10K_TSO_FLAGS_LOW); + fm10k_write_reg(hw, FM10K_DTXTCPFLGH, FM10K_TSO_FLAGS_HI); + + /* Enable DMA engine + * Set Rx Descriptor size to 32 + * Set Minimum MSS to 64 + * Set Maximum number of Rx queues to 256 / 32 Descriptor + */ + dma_ctrl |= FM10K_DMA_CTRL_TX_ENABLE | FM10K_DMA_CTRL_RX_ENABLE | + FM10K_DMA_CTRL_RX_DESC_SIZE | FM10K_DMA_CTRL_MINMSS_64 | + FM10K_DMA_CTRL_32_DESC; + + fm10k_write_reg(hw, FM10K_DMA_CTRL, dma_ctrl); + + /* record maximum queue count, we limit ourselves to 128 */ + hw->mac.max_queues = FM10K_MAX_QUEUES_PF; + + /* We support either 64 VFs or 7 VFs depending on if we have ARI */ + hw->iov.total_vfs = fm10k_is_ari_hierarchy_pf(hw) ? 64 : 7; + + return 0; +} + +/** + * fm10k_is_slot_appropriate_pf - Indicate appropriate slot for this SKU + * @hw: pointer to hardware structure + * + * Looks at the PCIe bus info to confirm whether or not this slot can support + * the necessary bandwidth for this device. + **/ +static bool fm10k_is_slot_appropriate_pf(struct fm10k_hw *hw) +{ + return (hw->bus.speed == hw->bus_caps.speed) && + (hw->bus.width == hw->bus_caps.width); +} + +/** + * fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table + * @hw: pointer to hardware structure + * @vid: VLAN ID to add to table + * @vsi: Index indicating VF ID or PF ID in table + * @set: Indicates if this is a set or clear operation + * + * This function adds or removes the corresponding VLAN ID from the VLAN + * filter table for the corresponding function. In addition to the + * standard set/clear that supports one bit a multi-bit write is + * supported to set 64 bits at a time. + **/ +static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) +{ + u32 vlan_table, reg, mask, bit, len; + + /* verify the VSI index is valid */ + if (vsi > FM10K_VLAN_TABLE_VSI_MAX) + return FM10K_ERR_PARAM; + + /* VLAN multi-bit write: + * The multi-bit write has several parts to it. + * 3 2 1 0 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | RSVD0 | Length |C|RSVD0| VLAN ID | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * VLAN ID: Vlan Starting value + * RSVD0: Reserved section, must be 0 + * C: Flag field, 0 is set, 1 is clear (Used in VF VLAN message) + * Length: Number of times to repeat the bit being set + */ + len = vid >> 16; + vid = (vid << 17) >> 17; + + /* verify the reserved 0 fields are 0 */ + if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + /* Loop through the table updating all required VLANs */ + for (reg = FM10K_VLAN_TABLE(vsi, vid / 32), bit = vid % 32; + len < FM10K_VLAN_TABLE_VID_MAX; + len -= 32 - bit, reg++, bit = 0) { + /* record the initial state of the register */ + vlan_table = fm10k_read_reg(hw, reg); + + /* truncate mask if we are at the start or end of the run */ + mask = (~(u32)0 >> ((len < 31) ? 31 - len : 0)) << bit; + + /* make necessary modifications to the register */ + mask &= set ? ~vlan_table : vlan_table; + if (mask) + fm10k_write_reg(hw, reg, vlan_table ^ mask); + } + + return 0; +} + +/** + * fm10k_read_mac_addr_pf - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the SM_AREA and stores the value. + **/ +static s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw) +{ + u8 perm_addr[ETH_ALEN]; + u32 serial_num; + int i; + + serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(1)); + + /* last byte should be all 1's */ + if ((~serial_num) << 24) + return FM10K_ERR_INVALID_MAC_ADDR; + + perm_addr[0] = (u8)(serial_num >> 24); + perm_addr[1] = (u8)(serial_num >> 16); + perm_addr[2] = (u8)(serial_num >> 8); + + serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(0)); + + /* first byte should be all 1's */ + if ((~serial_num) >> 24) + return FM10K_ERR_INVALID_MAC_ADDR; + + perm_addr[3] = (u8)(serial_num >> 16); + perm_addr[4] = (u8)(serial_num >> 8); + perm_addr[5] = (u8)(serial_num); + + for (i = 0; i < ETH_ALEN; i++) { + hw->mac.perm_addr[i] = perm_addr[i]; + hw->mac.addr[i] = perm_addr[i]; + } + + return 0; +} + +/** + * fm10k_glort_valid_pf - Validate that the provided glort is valid + * @hw: pointer to the HW structure + * @glort: base glort to be validated + * + * This function will return an error if the provided glort is invalid + **/ +bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort) +{ + glort &= hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT; + + return glort == (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE); +} + +/** + * fm10k_update_xc_addr_pf - Update device addresses + * @hw: pointer to the HW structure + * @glort: base resource tag for this request + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * @flags: flags field to indicate add and secure + * + * This function generates a message to the Switch API requesting + * that the given logical port add/remove the given L2 MAC/VLAN address. + **/ +static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort, + const u8 *mac, u16 vid, bool add, u8 flags) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + struct fm10k_mac_update mac_update; + u32 msg[5]; + + /* clear set bit from VLAN ID */ + vid &= ~FM10K_VLAN_CLEAR; + + /* if glort or vlan are not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + /* record fields */ + mac_update.mac_lower = cpu_to_le32(((u32)mac[2] << 24) | + ((u32)mac[3] << 16) | + ((u32)mac[4] << 8) | + ((u32)mac[5])); + mac_update.mac_upper = cpu_to_le16(((u32)mac[0] << 8) | + ((u32)mac[1])); + mac_update.vlan = cpu_to_le16(vid); + mac_update.glort = cpu_to_le16(glort); + mac_update.action = add ? 0 : 1; + mac_update.flags = flags; + + /* populate mac_update fields */ + fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE); + fm10k_tlv_attr_put_le_struct(msg, FM10K_PF_ATTR_ID_MAC_UPDATE, + &mac_update, sizeof(mac_update)); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_update_uc_addr_pf - Update device unicast addresses + * @hw: pointer to the HW structure + * @glort: base resource tag for this request + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * @flags: flags field to indicate add and secure + * + * This function is used to add or remove unicast addresses for + * the PF. + **/ +static s32 fm10k_update_uc_addr_pf(struct fm10k_hw *hw, u16 glort, + const u8 *mac, u16 vid, bool add, u8 flags) +{ + /* verify MAC address is valid */ + if (!is_valid_ether_addr(mac)) + return FM10K_ERR_PARAM; + + return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, flags); +} + +/** + * fm10k_update_mc_addr_pf - Update device multicast addresses + * @hw: pointer to the HW structure + * @glort: base resource tag for this request + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * + * This function is used to add or remove multicast MAC addresses for + * the PF. + **/ +static s32 fm10k_update_mc_addr_pf(struct fm10k_hw *hw, u16 glort, + const u8 *mac, u16 vid, bool add) +{ + /* verify multicast address is valid */ + if (!is_multicast_ether_addr(mac)) + return FM10K_ERR_PARAM; + + return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, 0); +} + +/** + * fm10k_update_xcast_mode_pf - Request update of multicast mode + * @hw: pointer to hardware structure + * @glort: base resource tag for this request + * @mode: integer value indicating mode being requested + * + * This function will attempt to request a higher mode for the port + * so that it can enable either multicast, multicast promiscuous, or + * promiscuous mode of operation. + **/ +static s32 fm10k_update_xcast_mode_pf(struct fm10k_hw *hw, u16 glort, u8 mode) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[3], xcast_mode; + + if (mode > FM10K_XCAST_MODE_NONE) + return FM10K_ERR_PARAM; + /* if glort is not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort)) + return FM10K_ERR_PARAM; + + /* write xcast mode as a single u32 value, + * lower 16 bits: glort + * upper 16 bits: mode + */ + xcast_mode = ((u32)mode << 16) | glort; + + /* generate message requesting to change xcast mode */ + fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_XCAST_MODES); + fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_XCAST_MODE, xcast_mode); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_update_int_moderator_pf - Update interrupt moderator linked list + * @hw: pointer to hardware structure + * + * This function walks through the MSI-X vector table to determine the + * number of active interrupts and based on that information updates the + * interrupt moderator linked list. + **/ +static void fm10k_update_int_moderator_pf(struct fm10k_hw *hw) +{ + u32 i; + + /* Disable interrupt moderator */ + fm10k_write_reg(hw, FM10K_INT_CTRL, 0); + + /* loop through PF from last to first looking enabled vectors */ + for (i = FM10K_ITR_REG_COUNT_PF - 1; i; i--) { + if (!fm10k_read_reg(hw, FM10K_MSIX_VECTOR_MASK(i))) + break; + } + + /* always reset VFITR2[0] to point to last enabled PF vector */ + fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i); + + /* reset ITR2[0] to point to last enabled PF vector */ + if (!hw->iov.num_vfs) + fm10k_write_reg(hw, FM10K_ITR2(0), i); + + /* Enable interrupt moderator */ + fm10k_write_reg(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR); +} + +/** + * fm10k_update_lport_state_pf - Notify the switch of a change in port state + * @hw: pointer to the HW structure + * @glort: base resource tag for this request + * @count: number of logical ports being updated + * @enable: boolean value indicating enable or disable + * + * This function is used to add/remove a logical port from the switch. + **/ +static s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort, + u16 count, bool enable) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[3], lport_msg; + + /* do nothing if we are being asked to create or destroy 0 ports */ + if (!count) + return 0; + + /* if glort is not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort)) + return FM10K_ERR_PARAM; + + /* construct the lport message from the 2 pieces of data we have */ + lport_msg = ((u32)count << 16) | glort; + + /* generate lport create/delete message */ + fm10k_tlv_msg_init(msg, enable ? FM10K_PF_MSG_ID_LPORT_CREATE : + FM10K_PF_MSG_ID_LPORT_DELETE); + fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_PORT, lport_msg); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_configure_dglort_map_pf - Configures GLORT entry and queues + * @hw: pointer to hardware structure + * @dglort: pointer to dglort configuration structure + * + * Reads the configuration structure contained in dglort_cfg and uses + * that information to then populate a DGLORTMAP/DEC entry and the queues + * to which it has been assigned. + **/ +static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw, + struct fm10k_dglort_cfg *dglort) +{ + u16 glort, queue_count, vsi_count, pc_count; + u16 vsi, queue, pc, q_idx; + u32 txqctl, dglortdec, dglortmap; + + /* verify the dglort pointer */ + if (!dglort) + return FM10K_ERR_PARAM; + + /* verify the dglort values */ + if ((dglort->idx > 7) || (dglort->rss_l > 7) || (dglort->pc_l > 3) || + (dglort->vsi_l > 6) || (dglort->vsi_b > 64) || + (dglort->queue_l > 8) || (dglort->queue_b >= 256)) + return FM10K_ERR_PARAM; + + /* determine count of VSIs and queues */ + queue_count = 1 << (dglort->rss_l + dglort->pc_l); + vsi_count = 1 << (dglort->vsi_l + dglort->queue_l); + glort = dglort->glort; + q_idx = dglort->queue_b; + + /* configure SGLORT for queues */ + for (vsi = 0; vsi < vsi_count; vsi++, glort++) { + for (queue = 0; queue < queue_count; queue++, q_idx++) { + if (q_idx >= FM10K_MAX_QUEUES) + break; + + fm10k_write_reg(hw, FM10K_TX_SGLORT(q_idx), glort); + fm10k_write_reg(hw, FM10K_RX_SGLORT(q_idx), glort); + } + } + + /* determine count of PCs and queues */ + queue_count = 1 << (dglort->queue_l + dglort->rss_l + dglort->vsi_l); + pc_count = 1 << dglort->pc_l; + + /* configure PC for Tx queues */ + for (pc = 0; pc < pc_count; pc++) { + q_idx = pc + dglort->queue_b; + for (queue = 0; queue < queue_count; queue++) { + if (q_idx >= FM10K_MAX_QUEUES) + break; + + txqctl = fm10k_read_reg(hw, FM10K_TXQCTL(q_idx)); + txqctl &= ~FM10K_TXQCTL_PC_MASK; + txqctl |= pc << FM10K_TXQCTL_PC_SHIFT; + fm10k_write_reg(hw, FM10K_TXQCTL(q_idx), txqctl); + + q_idx += pc_count; + } + } + + /* configure DGLORTDEC */ + dglortdec = ((u32)(dglort->rss_l) << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | + ((u32)(dglort->queue_b) << FM10K_DGLORTDEC_QBASE_SHIFT) | + ((u32)(dglort->pc_l) << FM10K_DGLORTDEC_PCLENGTH_SHIFT) | + ((u32)(dglort->vsi_b) << FM10K_DGLORTDEC_VSIBASE_SHIFT) | + ((u32)(dglort->vsi_l) << FM10K_DGLORTDEC_VSILENGTH_SHIFT) | + ((u32)(dglort->queue_l)); + if (dglort->inner_rss) + dglortdec |= FM10K_DGLORTDEC_INNERRSS_ENABLE; + + /* configure DGLORTMAP */ + dglortmap = (dglort->idx == fm10k_dglort_default) ? + FM10K_DGLORTMAP_ANY : FM10K_DGLORTMAP_ZERO; + dglortmap <<= dglort->vsi_l + dglort->queue_l + dglort->shared_l; + dglortmap |= dglort->glort; + + /* write values to hardware */ + fm10k_write_reg(hw, FM10K_DGLORTDEC(dglort->idx), dglortdec); + fm10k_write_reg(hw, FM10K_DGLORTMAP(dglort->idx), dglortmap); + + return 0; +} + +u16 fm10k_queues_per_pool(struct fm10k_hw *hw) +{ + u16 num_pools = hw->iov.num_pools; + + return (num_pools > 32) ? 2 : (num_pools > 16) ? 4 : (num_pools > 8) ? + 8 : FM10K_MAX_QUEUES_POOL; +} + +u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx) +{ + u16 num_vfs = hw->iov.num_vfs; + u16 vf_q_idx = FM10K_MAX_QUEUES; + + vf_q_idx -= fm10k_queues_per_pool(hw) * (num_vfs - vf_idx); + + return vf_q_idx; +} + +static u16 fm10k_vectors_per_pool(struct fm10k_hw *hw) +{ + u16 num_pools = hw->iov.num_pools; + + return (num_pools > 32) ? 8 : (num_pools > 16) ? 16 : + FM10K_MAX_VECTORS_POOL; +} + +static u16 fm10k_vf_vector_index(struct fm10k_hw *hw, u16 vf_idx) +{ + u16 vf_v_idx = FM10K_MAX_VECTORS_PF; + + vf_v_idx += fm10k_vectors_per_pool(hw) * vf_idx; + + return vf_v_idx; +} + +/** + * fm10k_iov_assign_resources_pf - Assign pool resources for virtualization + * @hw: pointer to the HW structure + * @num_vfs: number of VFs to be allocated + * @num_pools: number of virtualization pools to be allocated + * + * Allocates queues and traffic classes to virtualization entities to prepare + * the PF for SR-IOV and VMDq + **/ +static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs, + u16 num_pools) +{ + u16 qmap_stride, qpp, vpp, vf_q_idx, vf_q_idx0, qmap_idx; + u32 vid = hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT; + int i, j; + + /* hardware only supports up to 64 pools */ + if (num_pools > 64) + return FM10K_ERR_PARAM; + + /* the number of VFs cannot exceed the number of pools */ + if ((num_vfs > num_pools) || (num_vfs > hw->iov.total_vfs)) + return FM10K_ERR_PARAM; + + /* record number of virtualization entities */ + hw->iov.num_vfs = num_vfs; + hw->iov.num_pools = num_pools; + + /* determine qmap offsets and counts */ + qmap_stride = (num_vfs > 8) ? 32 : 256; + qpp = fm10k_queues_per_pool(hw); + vpp = fm10k_vectors_per_pool(hw); + + /* calculate starting index for queues */ + vf_q_idx = fm10k_vf_queue_index(hw, 0); + qmap_idx = 0; + + /* establish TCs with -1 credits and no quanta to prevent transmit */ + for (i = 0; i < num_vfs; i++) { + fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(i), 0); + fm10k_write_reg(hw, FM10K_TC_RATE(i), 0); + fm10k_write_reg(hw, FM10K_TC_CREDIT(i), + FM10K_TC_CREDIT_CREDIT_MASK); + } + + /* zero out all mbmem registers */ + for (i = FM10K_VFMBMEM_LEN * num_vfs; i--;) + fm10k_write_reg(hw, FM10K_MBMEM(i), 0); + + /* clear event notification of VF FLR */ + fm10k_write_reg(hw, FM10K_PFVFLREC(0), ~0); + fm10k_write_reg(hw, FM10K_PFVFLREC(1), ~0); + + /* loop through unallocated rings assigning them back to PF */ + for (i = FM10K_MAX_QUEUES_PF; i < vf_q_idx; i++) { + fm10k_write_reg(hw, FM10K_TXDCTL(i), 0); + fm10k_write_reg(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF | + FM10K_TXQCTL_UNLIMITED_BW | vid); + fm10k_write_reg(hw, FM10K_RXQCTL(i), FM10K_RXQCTL_PF); + } + + /* PF should have already updated VFITR2[0] */ + + /* update all ITR registers to flow to VFITR2[0] */ + for (i = FM10K_ITR_REG_COUNT_PF + 1; i < FM10K_ITR_REG_COUNT; i++) { + if (!(i & (vpp - 1))) + fm10k_write_reg(hw, FM10K_ITR2(i), i - vpp); + else + fm10k_write_reg(hw, FM10K_ITR2(i), i - 1); + } + + /* update PF ITR2[0] to reference the last vector */ + fm10k_write_reg(hw, FM10K_ITR2(0), + fm10k_vf_vector_index(hw, num_vfs - 1)); + + /* loop through rings populating rings and TCs */ + for (i = 0; i < num_vfs; i++) { + /* record index for VF queue 0 for use in end of loop */ + vf_q_idx0 = vf_q_idx; + + for (j = 0; j < qpp; j++, qmap_idx++, vf_q_idx++) { + /* assign VF and locked TC to queues */ + fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0); + fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx), + (i << FM10K_TXQCTL_TC_SHIFT) | i | + FM10K_TXQCTL_VF | vid); + fm10k_write_reg(hw, FM10K_RXDCTL(vf_q_idx), + FM10K_RXDCTL_WRITE_BACK_MIN_DELAY | + FM10K_RXDCTL_DROP_ON_EMPTY); + fm10k_write_reg(hw, FM10K_RXQCTL(vf_q_idx), + FM10K_RXQCTL_VF | + (i << FM10K_RXQCTL_VF_SHIFT)); + + /* map queue pair to VF */ + fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx); + fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), vf_q_idx); + } + + /* repeat the first ring for all of the remaining VF rings */ + for (; j < qmap_stride; j++, qmap_idx++) { + fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx0); + fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), vf_q_idx0); + } + } + + /* loop through remaining indexes assigning all to queue 0 */ + while (qmap_idx < FM10K_TQMAP_TABLE_SIZE) { + fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0); + fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), 0); + qmap_idx++; + } + + return 0; +} + +/** + * fm10k_iov_configure_tc_pf - Configure the shaping group for VF + * @hw: pointer to the HW structure + * @vf_idx: index of VF receiving GLORT + * @rate: Rate indicated in Mb/s + * + * Configured the TC for a given VF to allow only up to a given number + * of Mb/s of outgoing Tx throughput. + **/ +static s32 fm10k_iov_configure_tc_pf(struct fm10k_hw *hw, u16 vf_idx, int rate) +{ + /* configure defaults */ + u32 interval = FM10K_TC_RATE_INTERVAL_4US_GEN3; + u32 tc_rate = FM10K_TC_RATE_QUANTA_MASK; + + /* verify vf is in range */ + if (vf_idx >= hw->iov.num_vfs) + return FM10K_ERR_PARAM; + + /* set interval to align with 4.096 usec in all modes */ + switch (hw->bus.speed) { + case fm10k_bus_speed_2500: + interval = FM10K_TC_RATE_INTERVAL_4US_GEN1; + break; + case fm10k_bus_speed_5000: + interval = FM10K_TC_RATE_INTERVAL_4US_GEN2; + break; + default: + break; + } + + if (rate) { + if (rate > FM10K_VF_TC_MAX || rate < FM10K_VF_TC_MIN) + return FM10K_ERR_PARAM; + + /* The quanta is measured in Bytes per 4.096 or 8.192 usec + * The rate is provided in Mbits per second + * To tralslate from rate to quanta we need to multiply the + * rate by 8.192 usec and divide by 8 bits/byte. To avoid + * dealing with floating point we can round the values up + * to the nearest whole number ratio which gives us 128 / 125. + */ + tc_rate = (rate * 128) / 125; + + /* try to keep the rate limiting accurate by increasing + * the number of credits and interval for rates less than 4Gb/s + */ + if (rate < 4000) + interval <<= 1; + else + tc_rate >>= 1; + } + + /* update rate limiter with new values */ + fm10k_write_reg(hw, FM10K_TC_RATE(vf_idx), tc_rate | interval); + fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K); + fm10k_write_reg(hw, FM10K_TC_CREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K); + + return 0; +} + +/** + * fm10k_iov_assign_int_moderator_pf - Add VF interrupts to moderator list + * @hw: pointer to the HW structure + * @vf_idx: index of VF receiving GLORT + * + * Update the interrupt moderator linked list to include any MSI-X + * interrupts which the VF has enabled in the MSI-X vector table. + **/ +static s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx) +{ + u16 vf_v_idx, vf_v_limit, i; + + /* verify vf is in range */ + if (vf_idx >= hw->iov.num_vfs) + return FM10K_ERR_PARAM; + + /* determine vector offset and count */ + vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); + vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); + + /* search for first vector that is not masked */ + for (i = vf_v_limit - 1; i > vf_v_idx; i--) { + if (!fm10k_read_reg(hw, FM10K_MSIX_VECTOR_MASK(i))) + break; + } + + /* reset linked list so it now includes our active vectors */ + if (vf_idx == (hw->iov.num_vfs - 1)) + fm10k_write_reg(hw, FM10K_ITR2(0), i); + else + fm10k_write_reg(hw, FM10K_ITR2(vf_v_limit), i); + + return 0; +} + +/** + * fm10k_iov_assign_default_mac_vlan_pf - Assign a MAC and VLAN to VF + * @hw: pointer to the HW structure + * @vf_info: pointer to VF information structure + * + * Assign a MAC address and default VLAN to a VF and notify it of the update + **/ +static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw, + struct fm10k_vf_info *vf_info) +{ + u16 qmap_stride, queues_per_pool, vf_q_idx, timeout, qmap_idx, i; + u32 msg[4], txdctl, txqctl, tdbal = 0, tdbah = 0; + s32 err = 0; + u16 vf_idx, vf_vid; + + /* verify vf is in range */ + if (!vf_info || vf_info->vf_idx >= hw->iov.num_vfs) + return FM10K_ERR_PARAM; + + /* determine qmap offsets and counts */ + qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256; + queues_per_pool = fm10k_queues_per_pool(hw); + + /* calculate starting index for queues */ + vf_idx = vf_info->vf_idx; + vf_q_idx = fm10k_vf_queue_index(hw, vf_idx); + qmap_idx = qmap_stride * vf_idx; + + /* MAP Tx queue back to 0 temporarily, and disable it */ + fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0); + fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0); + + /* determine correct default VLAN ID */ + if (vf_info->pf_vid) + vf_vid = vf_info->pf_vid | FM10K_VLAN_CLEAR; + else + vf_vid = vf_info->sw_vid; + + /* generate MAC_ADDR request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN); + fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC, + vf_info->mac, vf_vid); + + /* load onto outgoing mailbox, ignore any errors on enqueue */ + if (vf_info->mbx.ops.enqueue_tx) + vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg); + + /* verify ring has disabled before modifying base address registers */ + txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx)); + for (timeout = 0; txdctl & FM10K_TXDCTL_ENABLE; timeout++) { + /* limit ourselves to a 1ms timeout */ + if (timeout == 10) { + err = FM10K_ERR_DMA_PENDING; + goto err_out; + } + + usleep_range(100, 200); + txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx)); + } + + /* Update base address registers to contain MAC address */ + if (is_valid_ether_addr(vf_info->mac)) { + tdbal = (((u32)vf_info->mac[3]) << 24) | + (((u32)vf_info->mac[4]) << 16) | + (((u32)vf_info->mac[5]) << 8); + + tdbah = (((u32)0xFF) << 24) | + (((u32)vf_info->mac[0]) << 16) | + (((u32)vf_info->mac[1]) << 8) | + ((u32)vf_info->mac[2]); + } + + /* Record the base address into queue 0 */ + fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx), tdbal); + fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx), tdbah); + +err_out: + /* configure Queue control register */ + txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) & + FM10K_TXQCTL_VID_MASK; + txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) | + FM10K_TXQCTL_VF | vf_idx; + + /* assign VID */ + for (i = 0; i < queues_per_pool; i++) + fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl); + + /* restore the queue back to VF ownership */ + fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx); + return err; +} + +/** + * fm10k_iov_reset_resources_pf - Reassign queues and interrupts to a VF + * @hw: pointer to the HW structure + * @vf_info: pointer to VF information structure + * + * Reassign the interrupts and queues to a VF following an FLR + **/ +static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, + struct fm10k_vf_info *vf_info) +{ + u16 qmap_stride, queues_per_pool, vf_q_idx, qmap_idx; + u32 tdbal = 0, tdbah = 0, txqctl, rxqctl; + u16 vf_v_idx, vf_v_limit, vf_vid; + u8 vf_idx = vf_info->vf_idx; + int i; + + /* verify vf is in range */ + if (vf_idx >= hw->iov.num_vfs) + return FM10K_ERR_PARAM; + + /* clear event notification of VF FLR */ + fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), 1 << (vf_idx % 32)); + + /* force timeout and then disconnect the mailbox */ + vf_info->mbx.timeout = 0; + if (vf_info->mbx.ops.disconnect) + vf_info->mbx.ops.disconnect(hw, &vf_info->mbx); + + /* determine vector offset and count */ + vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); + vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); + + /* determine qmap offsets and counts */ + qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256; + queues_per_pool = fm10k_queues_per_pool(hw); + qmap_idx = qmap_stride * vf_idx; + + /* make all the queues inaccessible to the VF */ + for (i = qmap_idx; i < (qmap_idx + qmap_stride); i++) { + fm10k_write_reg(hw, FM10K_TQMAP(i), 0); + fm10k_write_reg(hw, FM10K_RQMAP(i), 0); + } + + /* calculate starting index for queues */ + vf_q_idx = fm10k_vf_queue_index(hw, vf_idx); + + /* determine correct default VLAN ID */ + if (vf_info->pf_vid) + vf_vid = vf_info->pf_vid; + else + vf_vid = vf_info->sw_vid; + + /* configure Queue control register */ + txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) | + (vf_idx << FM10K_TXQCTL_TC_SHIFT) | + FM10K_TXQCTL_VF | vf_idx; + rxqctl = FM10K_RXQCTL_VF | (vf_idx << FM10K_RXQCTL_VF_SHIFT); + + /* stop further DMA and reset queue ownership back to VF */ + for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) { + fm10k_write_reg(hw, FM10K_TXDCTL(i), 0); + fm10k_write_reg(hw, FM10K_TXQCTL(i), txqctl); + fm10k_write_reg(hw, FM10K_RXDCTL(i), + FM10K_RXDCTL_WRITE_BACK_MIN_DELAY | + FM10K_RXDCTL_DROP_ON_EMPTY); + fm10k_write_reg(hw, FM10K_RXQCTL(i), rxqctl); + } + + /* reset TC with -1 credits and no quanta to prevent transmit */ + fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(vf_idx), 0); + fm10k_write_reg(hw, FM10K_TC_RATE(vf_idx), 0); + fm10k_write_reg(hw, FM10K_TC_CREDIT(vf_idx), + FM10K_TC_CREDIT_CREDIT_MASK); + + /* update our first entry in the table based on previous VF */ + if (!vf_idx) + hw->mac.ops.update_int_moderator(hw); + else + hw->iov.ops.assign_int_moderator(hw, vf_idx - 1); + + /* reset linked list so it now includes our active vectors */ + if (vf_idx == (hw->iov.num_vfs - 1)) + fm10k_write_reg(hw, FM10K_ITR2(0), vf_v_idx); + else + fm10k_write_reg(hw, FM10K_ITR2(vf_v_limit), vf_v_idx); + + /* link remaining vectors so that next points to previous */ + for (vf_v_idx++; vf_v_idx < vf_v_limit; vf_v_idx++) + fm10k_write_reg(hw, FM10K_ITR2(vf_v_idx), vf_v_idx - 1); + + /* zero out MBMEM, VLAN_TABLE, RETA, RSSRK, and MRQC registers */ + for (i = FM10K_VFMBMEM_LEN; i--;) + fm10k_write_reg(hw, FM10K_MBMEM_VF(vf_idx, i), 0); + for (i = FM10K_VLAN_TABLE_SIZE; i--;) + fm10k_write_reg(hw, FM10K_VLAN_TABLE(vf_info->vsi, i), 0); + for (i = FM10K_RETA_SIZE; i--;) + fm10k_write_reg(hw, FM10K_RETA(vf_info->vsi, i), 0); + for (i = FM10K_RSSRK_SIZE; i--;) + fm10k_write_reg(hw, FM10K_RSSRK(vf_info->vsi, i), 0); + fm10k_write_reg(hw, FM10K_MRQC(vf_info->vsi), 0); + + /* Update base address registers to contain MAC address */ + if (is_valid_ether_addr(vf_info->mac)) { + tdbal = (((u32)vf_info->mac[3]) << 24) | + (((u32)vf_info->mac[4]) << 16) | + (((u32)vf_info->mac[5]) << 8); + tdbah = (((u32)0xFF) << 24) | + (((u32)vf_info->mac[0]) << 16) | + (((u32)vf_info->mac[1]) << 8) | + ((u32)vf_info->mac[2]); + } + + /* map queue pairs back to VF from last to first */ + for (i = queues_per_pool; i--;) { + fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal); + fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah); + fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx + i); + fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i); + } + + return 0; +} + +/** + * fm10k_iov_set_lport_pf - Assign and enable a logical port for a given VF + * @hw: pointer to hardware structure + * @vf_info: pointer to VF information structure + * @lport_idx: Logical port offset from the hardware glort + * @flags: Set of capability flags to extend port beyond basic functionality + * + * This function allows enabling a VF port by assigning it a GLORT and + * setting the flags so that it can enable an Rx mode. + **/ +static s32 fm10k_iov_set_lport_pf(struct fm10k_hw *hw, + struct fm10k_vf_info *vf_info, + u16 lport_idx, u8 flags) +{ + u16 glort = (hw->mac.dglort_map + lport_idx) & FM10K_DGLORTMAP_NONE; + + /* if glort is not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort)) + return FM10K_ERR_PARAM; + + vf_info->vf_flags = flags | FM10K_VF_FLAG_NONE_CAPABLE; + vf_info->glort = glort; + + return 0; +} + +/** + * fm10k_iov_reset_lport_pf - Disable a logical port for a given VF + * @hw: pointer to hardware structure + * @vf_info: pointer to VF information structure + * + * This function disables a VF port by stripping it of a GLORT and + * setting the flags so that it cannot enable any Rx mode. + **/ +static void fm10k_iov_reset_lport_pf(struct fm10k_hw *hw, + struct fm10k_vf_info *vf_info) +{ + u32 msg[1]; + + /* need to disable the port if it is already enabled */ + if (FM10K_VF_FLAG_ENABLED(vf_info)) { + /* notify switch that this port has been disabled */ + fm10k_update_lport_state_pf(hw, vf_info->glort, 1, false); + + /* generate port state response to notify VF it is not ready */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); + vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg); + } + + /* clear flags and glort if it exists */ + vf_info->vf_flags = 0; + vf_info->glort = 0; +} + +/** + * fm10k_iov_update_stats_pf - Updates hardware related statistics for VFs + * @hw: pointer to hardware structure + * @q: stats for all queues of a VF + * @vf_idx: index of VF + * + * This function collects queue stats for VFs. + **/ +static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw, + struct fm10k_hw_stats_q *q, + u16 vf_idx) +{ + u32 idx, qpp; + + /* get stats for all of the queues */ + qpp = fm10k_queues_per_pool(hw); + idx = fm10k_vf_queue_index(hw, vf_idx); + fm10k_update_hw_stats_q(hw, q, idx, qpp); +} + +static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw, + struct fm10k_vf_info *vf_info, + u64 timestamp) +{ + u32 msg[4]; + + /* generate port state response to notify VF it is not ready */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_1588); + fm10k_tlv_attr_put_u64(msg, FM10K_1588_MSG_TIMESTAMP, timestamp); + + return vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg); +} + +/** + * fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF + * @hw: Pointer to hardware structure + * @results: Pointer array to message, results[0] is pointer to message + * @mbx: Pointer to mailbox information structure + * + * This function is a default handler for MSI-X requests from the VF. The + * assumption is that in this case it is acceptable to just directly + * hand off the message from the VF to the underlying shared code. + **/ +s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; + u8 vf_idx = vf_info->vf_idx; + + return hw->iov.ops.assign_int_moderator(hw, vf_idx); +} + +/** + * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF + * @hw: Pointer to hardware structure + * @results: Pointer array to message, results[0] is pointer to message + * @mbx: Pointer to mailbox information structure + * + * This function is a default handler for MAC/VLAN requests from the VF. + * The assumption is that in this case it is acceptable to just directly + * hand off the message from the VF to the underlying shared code. + **/ +s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; + int err = 0; + u8 mac[ETH_ALEN]; + u32 *result; + u16 vlan; + u32 vid; + + /* we shouldn't be updating rules on a disabled interface */ + if (!FM10K_VF_FLAG_ENABLED(vf_info)) + err = FM10K_ERR_PARAM; + + if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) { + result = results[FM10K_MAC_VLAN_MSG_VLAN]; + + /* record VLAN id requested */ + err = fm10k_tlv_attr_get_u32(result, &vid); + if (err) + return err; + + /* if VLAN ID is 0, set the default VLAN ID instead of 0 */ + if (!vid || (vid == FM10K_VLAN_CLEAR)) { + if (vf_info->pf_vid) + vid |= vf_info->pf_vid; + else + vid |= vf_info->sw_vid; + } else if (vid != vf_info->pf_vid) { + return FM10K_ERR_PARAM; + } + + /* update VSI info for VF in regards to VLAN table */ + err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, + !(vid & FM10K_VLAN_CLEAR)); + } + + if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) { + result = results[FM10K_MAC_VLAN_MSG_MAC]; + + /* record unicast MAC address requested */ + err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan); + if (err) + return err; + + /* block attempts to set MAC for a locked device */ + if (is_valid_ether_addr(vf_info->mac) && + memcmp(mac, vf_info->mac, ETH_ALEN)) + return FM10K_ERR_PARAM; + + /* if VLAN ID is 0, set the default VLAN ID instead of 0 */ + if (!vlan || (vlan == FM10K_VLAN_CLEAR)) { + if (vf_info->pf_vid) + vlan |= vf_info->pf_vid; + else + vlan |= vf_info->sw_vid; + } else if (vf_info->pf_vid) { + return FM10K_ERR_PARAM; + } + + /* notify switch of request for new unicast address */ + err = hw->mac.ops.update_uc_addr(hw, vf_info->glort, mac, vlan, + !(vlan & FM10K_VLAN_CLEAR), 0); + } + + if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) { + result = results[FM10K_MAC_VLAN_MSG_MULTICAST]; + + /* record multicast MAC address requested */ + err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan); + if (err) + return err; + + /* verify that the VF is allowed to request multicast */ + if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED)) + return FM10K_ERR_PARAM; + + /* if VLAN ID is 0, set the default VLAN ID instead of 0 */ + if (!vlan || (vlan == FM10K_VLAN_CLEAR)) { + if (vf_info->pf_vid) + vlan |= vf_info->pf_vid; + else + vlan |= vf_info->sw_vid; + } else if (vf_info->pf_vid) { + return FM10K_ERR_PARAM; + } + + /* notify switch of request for new multicast address */ + err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, mac, vlan, + !(vlan & FM10K_VLAN_CLEAR)); + } + + return err; +} + +/** + * fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode + * @vf_info: VF info structure containing capability flags + * @mode: Requested xcast mode + * + * This function outputs the mode that most closely matches the requested + * mode. If not modes match it will request we disable the port + **/ +static u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info, + u8 mode) +{ + u8 vf_flags = vf_info->vf_flags; + + /* match up mode to capabilities as best as possible */ + switch (mode) { + case FM10K_XCAST_MODE_PROMISC: + if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE) + return FM10K_XCAST_MODE_PROMISC; + /* fallthough */ + case FM10K_XCAST_MODE_ALLMULTI: + if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE) + return FM10K_XCAST_MODE_ALLMULTI; + /* fallthough */ + case FM10K_XCAST_MODE_MULTI: + if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE) + return FM10K_XCAST_MODE_MULTI; + /* fallthough */ + case FM10K_XCAST_MODE_NONE: + if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE) + return FM10K_XCAST_MODE_NONE; + /* fallthough */ + default: + break; + } + + /* disable interface as it should not be able to request any */ + return FM10K_XCAST_MODE_DISABLE; +} + +/** + * fm10k_iov_msg_lport_state_pf - Message handler for port state requests + * @hw: Pointer to hardware structure + * @results: Pointer array to message, results[0] is pointer to message + * @mbx: Pointer to mailbox information structure + * + * This function is a default handler for port state requests. The port + * state requests for now are basic and consist of enabling or disabling + * the port. + **/ +s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; + u32 *result; + s32 err = 0; + u32 msg[2]; + u8 mode = 0; + + /* verify VF is allowed to enable even minimal mode */ + if (!(vf_info->vf_flags & FM10K_VF_FLAG_NONE_CAPABLE)) + return FM10K_ERR_PARAM; + + if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) { + result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE]; + + /* XCAST mode update requested */ + err = fm10k_tlv_attr_get_u8(result, &mode); + if (err) + return FM10K_ERR_PARAM; + + /* prep for possible demotion depending on capabilities */ + mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode); + + /* if mode is not currently enabled, enable it */ + if (!(FM10K_VF_FLAG_ENABLED(vf_info) & (1 << mode))) + fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode); + + /* swap mode back to a bit flag */ + mode = FM10K_VF_FLAG_SET_MODE(mode); + } else if (!results[FM10K_LPORT_STATE_MSG_DISABLE]) { + /* need to disable the port if it is already enabled */ + if (FM10K_VF_FLAG_ENABLED(vf_info)) + err = fm10k_update_lport_state_pf(hw, vf_info->glort, + 1, false); + + /* when enabling the port we should reset the rate limiters */ + hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate); + + /* set mode for minimal functionality */ + mode = FM10K_VF_FLAG_SET_MODE_NONE; + + /* generate port state response to notify VF it is ready */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); + fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_READY); + mbx->ops.enqueue_tx(hw, mbx, msg); + } + + /* if enable state toggled note the update */ + if (!err && (!FM10K_VF_FLAG_ENABLED(vf_info) != !mode)) + err = fm10k_update_lport_state_pf(hw, vf_info->glort, 1, + !!mode); + + /* if state change succeeded, then update our stored state */ + mode |= FM10K_VF_FLAG_CAPABLE(vf_info); + if (!err) + vf_info->vf_flags = mode; + + return err; +} + +const struct fm10k_msg_data fm10k_iov_msg_data_pf[] = { + FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), + FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf), + FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf), + FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf), + FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), +}; + +/** + * fm10k_update_stats_hw_pf - Updates hardware related statistics of PF + * @hw: pointer to hardware structure + * @stats: pointer to the stats structure to update + * + * This function collects and aggregates global and per queue hardware + * statistics. + **/ +static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats) +{ + u32 timeout, ur, ca, um, xec, vlan_drop, loopback_drop, nodesc_drop; + u32 id, id_prev; + + /* Use Tx queue 0 as a canary to detect a reset */ + id = fm10k_read_reg(hw, FM10K_TXQCTL(0)); + + /* Read Global Statistics */ + do { + timeout = fm10k_read_hw_stats_32b(hw, FM10K_STATS_TIMEOUT, + &stats->timeout); + ur = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UR, &stats->ur); + ca = fm10k_read_hw_stats_32b(hw, FM10K_STATS_CA, &stats->ca); + um = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UM, &stats->um); + xec = fm10k_read_hw_stats_32b(hw, FM10K_STATS_XEC, &stats->xec); + vlan_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_VLAN_DROP, + &stats->vlan_drop); + loopback_drop = fm10k_read_hw_stats_32b(hw, + FM10K_STATS_LOOPBACK_DROP, + &stats->loopback_drop); + nodesc_drop = fm10k_read_hw_stats_32b(hw, + FM10K_STATS_NODESC_DROP, + &stats->nodesc_drop); + + /* if value has not changed then we have consistent data */ + id_prev = id; + id = fm10k_read_reg(hw, FM10K_TXQCTL(0)); + } while ((id ^ id_prev) & FM10K_TXQCTL_ID_MASK); + + /* drop non-ID bits and set VALID ID bit */ + id &= FM10K_TXQCTL_ID_MASK; + id |= FM10K_STAT_VALID; + + /* Update Global Statistics */ + if (stats->stats_idx == id) { + stats->timeout.count += timeout; + stats->ur.count += ur; + stats->ca.count += ca; + stats->um.count += um; + stats->xec.count += xec; + stats->vlan_drop.count += vlan_drop; + stats->loopback_drop.count += loopback_drop; + stats->nodesc_drop.count += nodesc_drop; + } + + /* Update bases and record current PF id */ + fm10k_update_hw_base_32b(&stats->timeout, timeout); + fm10k_update_hw_base_32b(&stats->ur, ur); + fm10k_update_hw_base_32b(&stats->ca, ca); + fm10k_update_hw_base_32b(&stats->um, um); + fm10k_update_hw_base_32b(&stats->xec, xec); + fm10k_update_hw_base_32b(&stats->vlan_drop, vlan_drop); + fm10k_update_hw_base_32b(&stats->loopback_drop, loopback_drop); + fm10k_update_hw_base_32b(&stats->nodesc_drop, nodesc_drop); + stats->stats_idx = id; + + /* Update Queue Statistics */ + fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues); +} + +/** + * fm10k_rebind_hw_stats_pf - Resets base for hardware statistics of PF + * @hw: pointer to hardware structure + * @stats: pointer to the stats structure to update + * + * This function resets the base for global and per queue hardware + * statistics. + **/ +static void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats) +{ + /* Unbind Global Statistics */ + fm10k_unbind_hw_stats_32b(&stats->timeout); + fm10k_unbind_hw_stats_32b(&stats->ur); + fm10k_unbind_hw_stats_32b(&stats->ca); + fm10k_unbind_hw_stats_32b(&stats->um); + fm10k_unbind_hw_stats_32b(&stats->xec); + fm10k_unbind_hw_stats_32b(&stats->vlan_drop); + fm10k_unbind_hw_stats_32b(&stats->loopback_drop); + fm10k_unbind_hw_stats_32b(&stats->nodesc_drop); + + /* Unbind Queue Statistics */ + fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues); + + /* Reinitialize bases for all stats */ + fm10k_update_hw_stats_pf(hw, stats); +} + +/** + * fm10k_set_dma_mask_pf - Configures PhyAddrSpace to limit DMA to system + * @hw: pointer to hardware structure + * @dma_mask: 64 bit DMA mask required for platform + * + * This function sets the PHYADDR.PhyAddrSpace bits for the endpoint in order + * to limit the access to memory beyond what is physically in the system. + **/ +static void fm10k_set_dma_mask_pf(struct fm10k_hw *hw, u64 dma_mask) +{ + /* we need to write the upper 32 bits of DMA mask to PhyAddrSpace */ + u32 phyaddr = (u32)(dma_mask >> 32); + + fm10k_write_reg(hw, FM10K_PHYADDR, phyaddr); +} + +/** + * fm10k_get_fault_pf - Record a fault in one of the interface units + * @hw: pointer to hardware structure + * @type: pointer to fault type register offset + * @fault: pointer to memory location to record the fault + * + * Record the fault register contents to the fault data structure and + * clear the entry from the register. + * + * Returns ERR_PARAM if invalid register is specified or no error is present. + **/ +static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type, + struct fm10k_fault *fault) +{ + u32 func; + + /* verify the fault register is in range and is aligned */ + switch (type) { + case FM10K_PCA_FAULT: + case FM10K_THI_FAULT: + case FM10K_FUM_FAULT: + break; + default: + return FM10K_ERR_PARAM; + } + + /* only service faults that are valid */ + func = fm10k_read_reg(hw, type + FM10K_FAULT_FUNC); + if (!(func & FM10K_FAULT_FUNC_VALID)) + return FM10K_ERR_PARAM; + + /* read remaining fields */ + fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_HI); + fault->address <<= 32; + fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO); + fault->specinfo = fm10k_read_reg(hw, type + FM10K_FAULT_SPECINFO); + + /* clear valid bit to allow for next error */ + fm10k_write_reg(hw, type + FM10K_FAULT_FUNC, FM10K_FAULT_FUNC_VALID); + + /* Record which function triggered the error */ + if (func & FM10K_FAULT_FUNC_PF) + fault->func = 0; + else + fault->func = 1 + ((func & FM10K_FAULT_FUNC_VF_MASK) >> + FM10K_FAULT_FUNC_VF_SHIFT); + + /* record fault type */ + fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK; + + return 0; +} + +/** + * fm10k_request_lport_map_pf - Request LPORT map from the switch API + * @hw: pointer to hardware structure + * + **/ +static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[1]; + + /* issue request asking for LPORT map */ + fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_LPORT_MAP); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_get_host_state_pf - Returns the state of the switch and mailbox + * @hw: pointer to hardware structure + * @switch_ready: pointer to boolean value that will record switch state + * + * This funciton will check the DMA_CTRL2 register and mailbox in order + * to determine if the switch is ready for the PF to begin requesting + * addresses and mapping traffic to the local interface. + **/ +static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready) +{ + s32 ret_val = 0; + u32 dma_ctrl2; + + /* verify the switch is ready for interaction */ + dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2); + if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY)) + goto out; + + /* retrieve generic host state info */ + ret_val = fm10k_get_host_state_generic(hw, switch_ready); + if (ret_val) + goto out; + + /* interface cannot receive traffic without logical ports */ + if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE) + ret_val = fm10k_request_lport_map_pf(hw); + +out: + return ret_val; +} + +/* This structure defines the attibutes to be parsed below */ +const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = { + FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_msg_lport_map_pf - Message handler for lport_map message from SM + * @hw: Pointer to hardware structure + * @results: pointer array containing parsed data + * @mbx: Pointer to mailbox information structure + * + * This handler configures the lport mapping based on the reply from the + * switch API. + **/ +s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + u16 glort, mask; + u32 dglort_map; + s32 err; + + err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_LPORT_MAP], + &dglort_map); + if (err) + return err; + + /* extract values out of the header */ + glort = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_GLORT); + mask = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_MASK); + + /* verify mask is set and none of the masked bits in glort are set */ + if (!mask || (glort & ~mask)) + return FM10K_ERR_PARAM; + + /* verify the mask is contiguous, and that it is 1's followed by 0's */ + if (((~(mask - 1) & mask) + mask) & FM10K_DGLORTMAP_NONE) + return FM10K_ERR_PARAM; + + /* record the glort, mask, and port count */ + hw->mac.dglort_map = dglort_map; + + return 0; +} + +const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = { + FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_UPDATE_PVID), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_msg_update_pvid_pf - Message handler for port VLAN message from SM + * @hw: Pointer to hardware structure + * @results: pointer array containing parsed data + * @mbx: Pointer to mailbox information structure + * + * This handler configures the default VLAN for the PF + **/ +s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + u16 glort, pvid; + u32 pvid_update; + s32 err; + + err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID], + &pvid_update); + if (err) + return err; + + /* extract values from the pvid update */ + glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT); + pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID); + + /* if glort is not valid return error */ + if (!fm10k_glort_valid_pf(hw, glort)) + return FM10K_ERR_PARAM; + + /* verify VID is valid */ + if (pvid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + /* record the port VLAN ID value */ + hw->mac.default_vid = pvid; + + return 0; +} + +/** + * fm10k_record_global_table_data - Move global table data to swapi table info + * @from: pointer to source table data structure + * @to: pointer to destination table info structure + * + * This function is will copy table_data to the table_info contained in + * the hw struct. + **/ +static void fm10k_record_global_table_data(struct fm10k_global_table_data *from, + struct fm10k_swapi_table_info *to) +{ + /* convert from le32 struct to CPU byte ordered values */ + to->used = le32_to_cpu(from->used); + to->avail = le32_to_cpu(from->avail); +} + +const struct fm10k_tlv_attr fm10k_err_msg_attr[] = { + FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR, + sizeof(struct fm10k_swapi_error)), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_msg_err_pf - Message handler for error reply + * @hw: Pointer to hardware structure + * @results: pointer array containing parsed data + * @mbx: Pointer to mailbox information structure + * + * This handler will capture the data for any error replies to previous + * messages that the PF has sent. + **/ +s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_swapi_error err_msg; + s32 err; + + /* extract structure from message */ + err = fm10k_tlv_attr_get_le_struct(results[FM10K_PF_ATTR_ID_ERR], + &err_msg, sizeof(err_msg)); + if (err) + return err; + + /* record table status */ + fm10k_record_global_table_data(&err_msg.mac, &hw->swapi.mac); + fm10k_record_global_table_data(&err_msg.nexthop, &hw->swapi.nexthop); + fm10k_record_global_table_data(&err_msg.ffu, &hw->swapi.ffu); + + /* record SW API status value */ + hw->swapi.status = le32_to_cpu(err_msg.status); + + return 0; +} + +const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[] = { + FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_TIMESTAMP, + sizeof(struct fm10k_swapi_1588_timestamp)), + FM10K_TLV_ATTR_LAST +}; + +/* currently there is no shared 1588 timestamp handler */ + +/** + * fm10k_adjust_systime_pf - Adjust systime frequency + * @hw: pointer to hardware structure + * @ppb: adjustment rate in parts per billion + * + * This function will adjust the SYSTIME_CFG register contained in BAR 4 + * if this function is supported for BAR 4 access. The adjustment amount + * is based on the parts per billion value provided and adjusted to a + * value based on parts per 2^48 clock cycles. + * + * If adjustment is not supported or the requested value is too large + * we will return an error. + **/ +static s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb) +{ + u64 systime_adjust; + + /* if sw_addr is not set we don't have switch register access */ + if (!hw->sw_addr) + return ppb ? FM10K_ERR_PARAM : 0; + + /* we must convert the value from parts per billion to parts per + * 2^48 cycles. In addition I have opted to only use the 30 most + * significant bits of the adjustment value as the 8 least + * significant bits are located in another register and represent + * a value significantly less than a part per billion, the result + * of dropping the 8 least significant bits is that the adjustment + * value is effectively multiplied by 2^8 when we write it. + * + * As a result of all this the math for this breaks down as follows: + * ppb / 10^9 == adjust * 2^8 / 2^48 + * If we solve this for adjust, and simplify it comes out as: + * ppb * 2^31 / 5^9 == adjust + */ + systime_adjust = (ppb < 0) ? -ppb : ppb; + systime_adjust <<= 31; + do_div(systime_adjust, 1953125); + + /* verify the requested adjustment value is in range */ + if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK) + return FM10K_ERR_PARAM; + + if (ppb < 0) + systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_NEGATIVE; + + fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust); + + return 0; +} + +/** + * fm10k_read_systime_pf - Reads value of systime registers + * @hw: pointer to the hardware structure + * + * Function reads the content of 2 registers, combined to represent a 64 bit + * value measured in nanosecods. In order to guarantee the value is accurate + * we check the 32 most significant bits both before and after reading the + * 32 least significant bits to verify they didn't change as we were reading + * the registers. + **/ +static u64 fm10k_read_systime_pf(struct fm10k_hw *hw) +{ + u32 systime_l, systime_h, systime_tmp; + + systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1); + + do { + systime_tmp = systime_h; + systime_l = fm10k_read_reg(hw, FM10K_SYSTIME); + systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1); + } while (systime_tmp != systime_h); + + return ((u64)systime_h << 32) | systime_l; +} + +static const struct fm10k_msg_data fm10k_msg_data_pf[] = { + FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf), + FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf), + FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf), + FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf), + FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf), + FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf), + FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), +}; + +static struct fm10k_mac_ops mac_ops_pf = { + .get_bus_info = &fm10k_get_bus_info_generic, + .reset_hw = &fm10k_reset_hw_pf, + .init_hw = &fm10k_init_hw_pf, + .start_hw = &fm10k_start_hw_generic, + .stop_hw = &fm10k_stop_hw_generic, + .is_slot_appropriate = &fm10k_is_slot_appropriate_pf, + .update_vlan = &fm10k_update_vlan_pf, + .read_mac_addr = &fm10k_read_mac_addr_pf, + .update_uc_addr = &fm10k_update_uc_addr_pf, + .update_mc_addr = &fm10k_update_mc_addr_pf, + .update_xcast_mode = &fm10k_update_xcast_mode_pf, + .update_int_moderator = &fm10k_update_int_moderator_pf, + .update_lport_state = &fm10k_update_lport_state_pf, + .update_hw_stats = &fm10k_update_hw_stats_pf, + .rebind_hw_stats = &fm10k_rebind_hw_stats_pf, + .configure_dglort_map = &fm10k_configure_dglort_map_pf, + .set_dma_mask = &fm10k_set_dma_mask_pf, + .get_fault = &fm10k_get_fault_pf, + .get_host_state = &fm10k_get_host_state_pf, + .adjust_systime = &fm10k_adjust_systime_pf, + .read_systime = &fm10k_read_systime_pf, +}; + +static struct fm10k_iov_ops iov_ops_pf = { + .assign_resources = &fm10k_iov_assign_resources_pf, + .configure_tc = &fm10k_iov_configure_tc_pf, + .assign_int_moderator = &fm10k_iov_assign_int_moderator_pf, + .assign_default_mac_vlan = fm10k_iov_assign_default_mac_vlan_pf, + .reset_resources = &fm10k_iov_reset_resources_pf, + .set_lport = &fm10k_iov_set_lport_pf, + .reset_lport = &fm10k_iov_reset_lport_pf, + .update_stats = &fm10k_iov_update_stats_pf, + .report_timestamp = &fm10k_iov_report_timestamp_pf, +}; + +static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw) +{ + fm10k_get_invariants_generic(hw); + + return fm10k_sm_mbx_init(hw, &hw->mbx, fm10k_msg_data_pf); +} + +struct fm10k_info fm10k_pf_info = { + .mac = fm10k_mac_pf, + .get_invariants = &fm10k_get_invariants_pf, + .mac_ops = &mac_ops_pf, + .iov_ops = &iov_ops_pf, +}; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h new file mode 100644 index 000000000..7ab1db4ff --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h @@ -0,0 +1,135 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _FM10K_PF_H_ +#define _FM10K_PF_H_ + +#include "fm10k_type.h" +#include "fm10k_common.h" + +bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort); +u16 fm10k_queues_per_pool(struct fm10k_hw *hw); +u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx); + +enum fm10k_pf_tlv_msg_id_v1 { + FM10K_PF_MSG_ID_TEST = 0x000, /* msg ID reserved */ + FM10K_PF_MSG_ID_XCAST_MODES = 0x001, + FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE = 0x002, + FM10K_PF_MSG_ID_LPORT_MAP = 0x100, + FM10K_PF_MSG_ID_LPORT_CREATE = 0x200, + FM10K_PF_MSG_ID_LPORT_DELETE = 0x201, + FM10K_PF_MSG_ID_CONFIG = 0x300, + FM10K_PF_MSG_ID_UPDATE_PVID = 0x400, + FM10K_PF_MSG_ID_CREATE_FLOW_TABLE = 0x501, + FM10K_PF_MSG_ID_DELETE_FLOW_TABLE = 0x502, + FM10K_PF_MSG_ID_UPDATE_FLOW = 0x503, + FM10K_PF_MSG_ID_DELETE_FLOW = 0x504, + FM10K_PF_MSG_ID_SET_FLOW_STATE = 0x505, + FM10K_PF_MSG_ID_GET_1588_INFO = 0x506, + FM10K_PF_MSG_ID_1588_TIMESTAMP = 0x701, +}; + +enum fm10k_pf_tlv_attr_id_v1 { + FM10K_PF_ATTR_ID_ERR = 0x00, + FM10K_PF_ATTR_ID_LPORT_MAP = 0x01, + FM10K_PF_ATTR_ID_XCAST_MODE = 0x02, + FM10K_PF_ATTR_ID_MAC_UPDATE = 0x03, + FM10K_PF_ATTR_ID_VLAN_UPDATE = 0x04, + FM10K_PF_ATTR_ID_CONFIG = 0x05, + FM10K_PF_ATTR_ID_CREATE_FLOW_TABLE = 0x06, + FM10K_PF_ATTR_ID_DELETE_FLOW_TABLE = 0x07, + FM10K_PF_ATTR_ID_UPDATE_FLOW = 0x08, + FM10K_PF_ATTR_ID_FLOW_STATE = 0x09, + FM10K_PF_ATTR_ID_FLOW_HANDLE = 0x0A, + FM10K_PF_ATTR_ID_DELETE_FLOW = 0x0B, + FM10K_PF_ATTR_ID_PORT = 0x0C, + FM10K_PF_ATTR_ID_UPDATE_PVID = 0x0D, + FM10K_PF_ATTR_ID_1588_TIMESTAMP = 0x10, +}; + +#define FM10K_MSG_LPORT_MAP_GLORT_SHIFT 0 +#define FM10K_MSG_LPORT_MAP_GLORT_SIZE 16 +#define FM10K_MSG_LPORT_MAP_MASK_SHIFT 16 +#define FM10K_MSG_LPORT_MAP_MASK_SIZE 16 + +#define FM10K_MSG_UPDATE_PVID_GLORT_SHIFT 0 +#define FM10K_MSG_UPDATE_PVID_GLORT_SIZE 16 +#define FM10K_MSG_UPDATE_PVID_PVID_SHIFT 16 +#define FM10K_MSG_UPDATE_PVID_PVID_SIZE 16 + +struct fm10k_mac_update { + __le32 mac_lower; + __le16 mac_upper; + __le16 vlan; + __le16 glort; + u8 flags; + u8 action; +}; + +struct fm10k_global_table_data { + __le32 used; + __le32 avail; +}; + +struct fm10k_swapi_error { + __le32 status; + struct fm10k_global_table_data mac; + struct fm10k_global_table_data nexthop; + struct fm10k_global_table_data ffu; +}; + +struct fm10k_swapi_1588_timestamp { + __le64 egress; + __le64 ingress; + __le16 dglort; + __le16 sglort; +}; + +s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); +extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[]; +#define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_LPORT_MAP, \ + fm10k_lport_map_msg_attr, func) +s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *, u32 **, + struct fm10k_mbx_info *); +extern const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[]; +#define FM10K_PF_MSG_UPDATE_PVID_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_UPDATE_PVID, \ + fm10k_update_pvid_msg_attr, func) + +s32 fm10k_msg_err_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); +extern const struct fm10k_tlv_attr fm10k_err_msg_attr[]; +#define FM10K_PF_MSG_ERR_HANDLER(msg, func) \ + FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_##msg, fm10k_err_msg_attr, func) + +extern const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[]; +#define FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_TIMESTAMP, \ + fm10k_1588_timestamp_msg_attr, func) + +s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); +s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **, + struct fm10k_mbx_info *); +s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **, + struct fm10k_mbx_info *); +extern const struct fm10k_msg_data fm10k_iov_msg_data_pf[]; + +extern struct fm10k_info fm10k_pf_info; +#endif /* _FM10K_PF_H */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c new file mode 100644 index 000000000..9043633c3 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c @@ -0,0 +1,461 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include +#include + +#include "fm10k.h" + +#define FM10K_TS_TX_TIMEOUT (HZ * 15) + +void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface, + struct skb_shared_hwtstamps *hwtstamp, + u64 systime) +{ + unsigned long flags; + + read_lock_irqsave(&interface->systime_lock, flags); + systime += interface->ptp_adjust; + read_unlock_irqrestore(&interface->systime_lock, flags); + + hwtstamp->hwtstamp = ns_to_ktime(systime); +} + +static struct sk_buff *fm10k_ts_tx_skb(struct fm10k_intfc *interface, + __le16 dglort) +{ + struct sk_buff_head *list = &interface->ts_tx_skb_queue; + struct sk_buff *skb; + + skb_queue_walk(list, skb) { + if (FM10K_CB(skb)->fi.w.dglort == dglort) + return skb; + } + + return NULL; +} + +void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb) +{ + struct sk_buff_head *list = &interface->ts_tx_skb_queue; + struct sk_buff *clone; + unsigned long flags; + + /* create clone for us to return on the Tx path */ + clone = skb_clone_sk(skb); + if (!clone) + return; + + FM10K_CB(clone)->ts_tx_timeout = jiffies + FM10K_TS_TX_TIMEOUT; + spin_lock_irqsave(&list->lock, flags); + + /* attempt to locate any buffers with the same dglort, + * if none are present then insert skb in tail of list + */ + skb = fm10k_ts_tx_skb(interface, FM10K_CB(clone)->fi.w.dglort); + if (!skb) + __skb_queue_tail(list, clone); + + spin_unlock_irqrestore(&list->lock, flags); + + /* if list is already has one then we just free the clone */ + if (skb) + kfree_skb(skb); + else + skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; +} + +void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort, + u64 systime) +{ + struct skb_shared_hwtstamps shhwtstamps; + struct sk_buff_head *list = &interface->ts_tx_skb_queue; + struct sk_buff *skb; + unsigned long flags; + + spin_lock_irqsave(&list->lock, flags); + + /* attempt to locate and pull the sk_buff out of the list */ + skb = fm10k_ts_tx_skb(interface, dglort); + if (skb) + __skb_unlink(skb, list); + + spin_unlock_irqrestore(&list->lock, flags); + + /* if not found do nothing */ + if (!skb) + return; + + /* timestamp the sk_buff and return it to the socket */ + fm10k_systime_to_hwtstamp(interface, &shhwtstamps, systime); + skb_complete_tx_timestamp(skb, &shhwtstamps); +} + +void fm10k_ts_tx_subtask(struct fm10k_intfc *interface) +{ + struct sk_buff_head *list = &interface->ts_tx_skb_queue; + struct sk_buff *skb, *tmp; + unsigned long flags; + + /* If we're down or resetting, just bail */ + if (test_bit(__FM10K_DOWN, &interface->state) || + test_bit(__FM10K_RESETTING, &interface->state)) + return; + + spin_lock_irqsave(&list->lock, flags); + + /* walk though the list and flush any expired timestamp packets */ + skb_queue_walk_safe(list, skb, tmp) { + if (!time_is_after_jiffies(FM10K_CB(skb)->ts_tx_timeout)) + continue; + __skb_unlink(skb, list); + kfree_skb(skb); + interface->tx_hwtstamp_timeouts++; + } + + spin_unlock_irqrestore(&list->lock, flags); +} + +static u64 fm10k_systime_read(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + + return hw->mac.ops.read_systime(hw); +} + +void fm10k_ts_reset(struct fm10k_intfc *interface) +{ + s64 ns = ktime_to_ns(ktime_get_real()); + unsigned long flags; + + /* reinitialize the clock */ + write_lock_irqsave(&interface->systime_lock, flags); + interface->ptp_adjust = fm10k_systime_read(interface) - ns; + write_unlock_irqrestore(&interface->systime_lock, flags); +} + +void fm10k_ts_init(struct fm10k_intfc *interface) +{ + /* Initialize lock protecting systime access */ + rwlock_init(&interface->systime_lock); + + /* Initialize skb queue for pending timestamp requests */ + skb_queue_head_init(&interface->ts_tx_skb_queue); + + /* reset the clock to current kernel time */ + fm10k_ts_reset(interface); +} + +/** + * fm10k_get_ts_config - get current hardware timestamping configuration + * @netdev: network interface device structure + * @ifreq: ioctl data + * + * This function returns the current timestamping settings. Rather than + * attempt to deconstruct registers to fill in the values, simply keep a copy + * of the old settings around, and return a copy when requested. + */ +int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct hwtstamp_config *config = &interface->ts_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/** + * fm10k_set_ts_config - control hardware time stamping + * @netdev: network interface device structure + * @ifreq: ioctl data + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware + * filters. Not all combinations are supported, in particular event + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". + * + * Since hardware always timestamps Path delay packets when timestamping V2 + * packets, regardless of the type specified in the register, only use V2 + * Event mode. This more accurately tells the user what the hardware is going + * to do anyways. + */ +int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct hwtstamp_config ts_config; + + if (copy_from_user(&ts_config, ifr->ifr_data, sizeof(ts_config))) + return -EFAULT; + + /* reserved for future extensions */ + if (ts_config.flags) + return -EINVAL; + + switch (ts_config.tx_type) { + case HWTSTAMP_TX_OFF: + break; + case HWTSTAMP_TX_ON: + /* we likely need some check here to see if this is supported */ + break; + default: + return -ERANGE; + } + + switch (ts_config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + interface->flags &= ~FM10K_FLAG_RX_TS_ENABLED; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_ALL: + interface->flags |= FM10K_FLAG_RX_TS_ENABLED; + ts_config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + /* save these settings for future reference */ + interface->ts_config = ts_config; + + return copy_to_user(ifr->ifr_data, &ts_config, sizeof(ts_config)) ? + -EFAULT : 0; +} + +static int fm10k_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct fm10k_intfc *interface; + struct fm10k_hw *hw; + int err; + + interface = container_of(ptp, struct fm10k_intfc, ptp_caps); + hw = &interface->hw; + + err = hw->mac.ops.adjust_systime(hw, ppb); + + /* the only error we should see is if the value is out of range */ + return (err == FM10K_ERR_PARAM) ? -ERANGE : err; +} + +static int fm10k_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct fm10k_intfc *interface; + unsigned long flags; + + interface = container_of(ptp, struct fm10k_intfc, ptp_caps); + + write_lock_irqsave(&interface->systime_lock, flags); + interface->ptp_adjust += delta; + write_unlock_irqrestore(&interface->systime_lock, flags); + + return 0; +} + +static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct fm10k_intfc *interface; + unsigned long flags; + u64 now; + + interface = container_of(ptp, struct fm10k_intfc, ptp_caps); + + read_lock_irqsave(&interface->systime_lock, flags); + now = fm10k_systime_read(interface) + interface->ptp_adjust; + read_unlock_irqrestore(&interface->systime_lock, flags); + + *ts = ns_to_timespec64(now); + + return 0; +} + +static int fm10k_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct fm10k_intfc *interface; + unsigned long flags; + u64 ns = timespec64_to_ns(ts); + + interface = container_of(ptp, struct fm10k_intfc, ptp_caps); + + write_lock_irqsave(&interface->systime_lock, flags); + interface->ptp_adjust = fm10k_systime_read(interface) - ns; + write_unlock_irqrestore(&interface->systime_lock, flags); + + return 0; +} + +static int fm10k_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int __always_unused on) +{ + struct ptp_clock_time *t = &rq->perout.period; + struct fm10k_intfc *interface; + struct fm10k_hw *hw; + u64 period; + u32 step; + + /* we can only support periodic output */ + if (rq->type != PTP_CLK_REQ_PEROUT) + return -EINVAL; + + /* verify the requested channel is there */ + if (rq->perout.index >= ptp->n_per_out) + return -EINVAL; + + /* we cannot enforce start time as there is no + * mechanism for that in the hardware, we can only control + * the period. + */ + + /* we cannot support periods greater than 4 seconds due to reg limit */ + if (t->sec > 4 || t->sec < 0) + return -ERANGE; + + interface = container_of(ptp, struct fm10k_intfc, ptp_caps); + hw = &interface->hw; + + /* we simply cannot support the operation if we don't have BAR4 */ + if (!hw->sw_addr) + return -ENOTSUPP; + + /* convert to unsigned 64b ns, verify we can put it in a 32b register */ + period = t->sec * 1000000000LL + t->nsec; + + /* determine the minimum size for period */ + step = 2 * (fm10k_read_reg(hw, FM10K_SYSTIME_CFG) & + FM10K_SYSTIME_CFG_STEP_MASK); + + /* verify the value is in range supported by hardware */ + if ((period && (period < step)) || (period > U32_MAX)) + return -ERANGE; + + /* notify hardware of request to being sending pulses */ + fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_PULSE(rq->perout.index), + (u32)period); + + return 0; +} + +static struct ptp_pin_desc fm10k_ptp_pd[2] = { + { + .name = "IEEE1588_PULSE0", + .index = 0, + .func = PTP_PF_PEROUT, + .chan = 0 + }, + { + .name = "IEEE1588_PULSE1", + .index = 1, + .func = PTP_PF_PEROUT, + .chan = 1 + } +}; + +static int fm10k_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + /* verify the requested pin is there */ + if (pin >= ptp->n_pins || !ptp->pin_config) + return -EINVAL; + + /* enforce locked channels, no changing them */ + if (chan != ptp->pin_config[pin].chan) + return -EINVAL; + + /* we want to keep the functions locked as well */ + if (func != ptp->pin_config[pin].func) + return -EINVAL; + + return 0; +} + +void fm10k_ptp_register(struct fm10k_intfc *interface) +{ + struct ptp_clock_info *ptp_caps = &interface->ptp_caps; + struct device *dev = &interface->pdev->dev; + struct ptp_clock *ptp_clock; + + snprintf(ptp_caps->name, sizeof(ptp_caps->name), + "%s", interface->netdev->name); + ptp_caps->owner = THIS_MODULE; + /* This math is simply the inverse of the math in + * fm10k_adjust_systime_pf applied to an adjustment value + * of 2^30 - 1 which is the maximum value of the register: + * max_ppb == ((2^30 - 1) * 5^9) / 2^31 + */ + ptp_caps->max_adj = 976562; + ptp_caps->adjfreq = fm10k_ptp_adjfreq; + ptp_caps->adjtime = fm10k_ptp_adjtime; + ptp_caps->gettime64 = fm10k_ptp_gettime; + ptp_caps->settime64 = fm10k_ptp_settime; + + /* provide pins if BAR4 is accessible */ + if (interface->sw_addr) { + /* enable periodic outputs */ + ptp_caps->n_per_out = 2; + ptp_caps->enable = fm10k_ptp_enable; + + /* enable clock pins */ + ptp_caps->verify = fm10k_ptp_verify; + ptp_caps->n_pins = 2; + ptp_caps->pin_config = fm10k_ptp_pd; + } + + ptp_clock = ptp_clock_register(ptp_caps, dev); + if (IS_ERR(ptp_clock)) { + ptp_clock = NULL; + dev_err(dev, "ptp_clock_register failed\n"); + } else { + dev_info(dev, "registered PHC device %s\n", ptp_caps->name); + } + + interface->ptp_clock = ptp_clock; +} + +void fm10k_ptp_unregister(struct fm10k_intfc *interface) +{ + struct ptp_clock *ptp_clock = interface->ptp_clock; + struct device *dev = &interface->pdev->dev; + + if (!ptp_clock) + return; + + interface->ptp_clock = NULL; + + ptp_clock_unregister(ptp_clock); + dev_info(dev, "removed PHC %s\n", interface->ptp_caps.name); +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c new file mode 100644 index 000000000..9b29d7b03 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c @@ -0,0 +1,863 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "fm10k_tlv.h" + +/** + * fm10k_tlv_msg_init - Initialize message block for TLV data storage + * @msg: Pointer to message block + * @msg_id: Message ID indicating message type + * + * This function return success if provided with a valid message pointer + **/ +s32 fm10k_tlv_msg_init(u32 *msg, u16 msg_id) +{ + /* verify pointer is not NULL */ + if (!msg) + return FM10K_ERR_PARAM; + + *msg = (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT) | msg_id; + + return 0; +} + +/** + * fm10k_tlv_attr_put_null_string - Place null terminated string on message + * @msg: Pointer to message block + * @attr_id: Attribute ID + * @string: Pointer to string to be stored in attribute + * + * This function will reorder a string to be CPU endian and store it in + * the attribute buffer. It will return success if provided with a valid + * pointers. + **/ +s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id, + const unsigned char *string) +{ + u32 attr_data = 0, len = 0; + u32 *attr; + + /* verify pointers are not NULL */ + if (!string || !msg) + return FM10K_ERR_PARAM; + + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + + /* copy string into local variable and then write to msg */ + do { + /* write data to message */ + if (len && !(len % 4)) { + attr[len / 4] = attr_data; + attr_data = 0; + } + + /* record character to offset location */ + attr_data |= (u32)(*string) << (8 * (len % 4)); + len++; + + /* test for NULL and then increment */ + } while (*(string++)); + + /* write last piece of data to message */ + attr[(len + 3) / 4] = attr_data; + + /* record attribute header, update message length */ + len <<= FM10K_TLV_LEN_SHIFT; + attr[0] = len | attr_id; + + /* add header length to length */ + len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + *msg += FM10K_TLV_LEN_ALIGN(len); + + return 0; +} + +/** + * fm10k_tlv_attr_get_null_string - Get null terminated string from attribute + * @attr: Pointer to attribute + * @string: Pointer to location of destination string + * + * This function pulls the string back out of the attribute and will place + * it in the array pointed by by string. It will return success if provided + * with a valid pointers. + **/ +s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string) +{ + u32 len; + + /* verify pointers are not NULL */ + if (!string || !attr) + return FM10K_ERR_PARAM; + + len = *attr >> FM10K_TLV_LEN_SHIFT; + attr++; + + while (len--) + string[len] = (u8)(attr[len / 4] >> (8 * (len % 4))); + + return 0; +} + +/** + * fm10k_tlv_attr_put_mac_vlan - Store MAC/VLAN attribute in message + * @msg: Pointer to message block + * @attr_id: Attribute ID + * @mac_addr: MAC address to be stored + * + * This function will reorder a MAC address to be CPU endian and store it + * in the attribute buffer. It will return success if provided with a + * valid pointers. + **/ +s32 fm10k_tlv_attr_put_mac_vlan(u32 *msg, u16 attr_id, + const u8 *mac_addr, u16 vlan) +{ + u32 len = ETH_ALEN << FM10K_TLV_LEN_SHIFT; + u32 *attr; + + /* verify pointers are not NULL */ + if (!msg || !mac_addr) + return FM10K_ERR_PARAM; + + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + + /* record attribute header, update message length */ + attr[0] = len | attr_id; + + /* copy value into local variable and then write to msg */ + attr[1] = le32_to_cpu(*(const __le32 *)&mac_addr[0]); + attr[2] = le16_to_cpu(*(const __le16 *)&mac_addr[4]); + attr[2] |= (u32)vlan << 16; + + /* add header length to length */ + len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + *msg += FM10K_TLV_LEN_ALIGN(len); + + return 0; +} + +/** + * fm10k_tlv_attr_get_mac_vlan - Get MAC/VLAN stored in attribute + * @attr: Pointer to attribute + * @attr_id: Attribute ID + * @mac_addr: location of buffer to store MAC address + * + * This function pulls the MAC address back out of the attribute and will + * place it in the array pointed by by mac_addr. It will return success + * if provided with a valid pointers. + **/ +s32 fm10k_tlv_attr_get_mac_vlan(u32 *attr, u8 *mac_addr, u16 *vlan) +{ + /* verify pointers are not NULL */ + if (!mac_addr || !attr) + return FM10K_ERR_PARAM; + + *(__le32 *)&mac_addr[0] = cpu_to_le32(attr[1]); + *(__le16 *)&mac_addr[4] = cpu_to_le16((u16)(attr[2])); + *vlan = (u16)(attr[2] >> 16); + + return 0; +} + +/** + * fm10k_tlv_attr_put_bool - Add header indicating value "true" + * @msg: Pointer to message block + * @attr_id: Attribute ID + * + * This function will simply add an attribute header, the fact + * that the header is here means the attribute value is true, else + * it is false. The function will return success if provided with a + * valid pointers. + **/ +s32 fm10k_tlv_attr_put_bool(u32 *msg, u16 attr_id) +{ + /* verify pointers are not NULL */ + if (!msg) + return FM10K_ERR_PARAM; + + /* record attribute header */ + msg[FM10K_TLV_DWORD_LEN(*msg)] = attr_id; + + /* add header length to length */ + *msg += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + + return 0; +} + +/** + * fm10k_tlv_attr_put_value - Store integer value attribute in message + * @msg: Pointer to message block + * @attr_id: Attribute ID + * @value: Value to be written + * @len: Size of value + * + * This function will place an integer value of up to 8 bytes in size + * in a message attribute. The function will return success provided + * that msg is a valid pointer, and len is 1, 2, 4, or 8. + **/ +s32 fm10k_tlv_attr_put_value(u32 *msg, u16 attr_id, s64 value, u32 len) +{ + u32 *attr; + + /* verify non-null msg and len is 1, 2, 4, or 8 */ + if (!msg || !len || len > 8 || (len & (len - 1))) + return FM10K_ERR_PARAM; + + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + + if (len < 4) { + attr[1] = (u32)value & ((0x1ul << (8 * len)) - 1); + } else { + attr[1] = (u32)value; + if (len > 4) + attr[2] = (u32)(value >> 32); + } + + /* record attribute header, update message length */ + len <<= FM10K_TLV_LEN_SHIFT; + attr[0] = len | attr_id; + + /* add header length to length */ + len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + *msg += FM10K_TLV_LEN_ALIGN(len); + + return 0; +} + +/** + * fm10k_tlv_attr_get_value - Get integer value stored in attribute + * @attr: Pointer to attribute + * @value: Pointer to destination buffer + * @len: Size of value + * + * This function will place an integer value of up to 8 bytes in size + * in the offset pointed to by value. The function will return success + * provided that pointers are valid and the len value matches the + * attribute length. + **/ +s32 fm10k_tlv_attr_get_value(u32 *attr, void *value, u32 len) +{ + /* verify pointers are not NULL */ + if (!attr || !value) + return FM10K_ERR_PARAM; + + if ((*attr >> FM10K_TLV_LEN_SHIFT) != len) + return FM10K_ERR_PARAM; + + if (len == 8) + *(u64 *)value = ((u64)attr[2] << 32) | attr[1]; + else if (len == 4) + *(u32 *)value = attr[1]; + else if (len == 2) + *(u16 *)value = (u16)attr[1]; + else + *(u8 *)value = (u8)attr[1]; + + return 0; +} + +/** + * fm10k_tlv_attr_put_le_struct - Store little endian structure in message + * @msg: Pointer to message block + * @attr_id: Attribute ID + * @le_struct: Pointer to structure to be written + * @len: Size of le_struct + * + * This function will place a little endian structure value in a message + * attribute. The function will return success provided that all pointers + * are valid and length is a non-zero multiple of 4. + **/ +s32 fm10k_tlv_attr_put_le_struct(u32 *msg, u16 attr_id, + const void *le_struct, u32 len) +{ + const __le32 *le32_ptr = (const __le32 *)le_struct; + u32 *attr; + u32 i; + + /* verify non-null msg and len is in 32 bit words */ + if (!msg || !len || (len % 4)) + return FM10K_ERR_PARAM; + + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + + /* copy le32 structure into host byte order at 32b boundaries */ + for (i = 0; i < (len / 4); i++) + attr[i + 1] = le32_to_cpu(le32_ptr[i]); + + /* record attribute header, update message length */ + len <<= FM10K_TLV_LEN_SHIFT; + attr[0] = len | attr_id; + + /* add header length to length */ + len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + *msg += FM10K_TLV_LEN_ALIGN(len); + + return 0; +} + +/** + * fm10k_tlv_attr_get_le_struct - Get little endian struct form attribute + * @attr: Pointer to attribute + * @le_struct: Pointer to structure to be written + * @len: Size of structure + * + * This function will place a little endian structure in the buffer + * pointed to by le_struct. The function will return success + * provided that pointers are valid and the len value matches the + * attribute length. + **/ +s32 fm10k_tlv_attr_get_le_struct(u32 *attr, void *le_struct, u32 len) +{ + __le32 *le32_ptr = (__le32 *)le_struct; + u32 i; + + /* verify pointers are not NULL */ + if (!le_struct || !attr) + return FM10K_ERR_PARAM; + + if ((*attr >> FM10K_TLV_LEN_SHIFT) != len) + return FM10K_ERR_PARAM; + + attr++; + + for (i = 0; len; i++, len -= 4) + le32_ptr[i] = cpu_to_le32(attr[i]); + + return 0; +} + +/** + * fm10k_tlv_attr_nest_start - Start a set of nested attributes + * @msg: Pointer to message block + * @attr_id: Attribute ID + * + * This function will mark off a new nested region for encapsulating + * a given set of attributes. The idea is if you wish to place a secondary + * structure within the message this mechanism allows for that. The + * function will return NULL on failure, and a pointer to the start + * of the nested attributes on success. + **/ +u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id) +{ + u32 *attr; + + /* verify pointer is not NULL */ + if (!msg) + return NULL; + + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + + attr[0] = attr_id; + + /* return pointer to nest header */ + return attr; +} + +/** + * fm10k_tlv_attr_nest_start - Start a set of nested attributes + * @msg: Pointer to message block + * + * This function closes off an existing set of nested attributes. The + * message pointer should be pointing to the parent of the nest. So in + * the case of a nest within the nest this would be the outer nest pointer. + * This function will return success provided all pointers are valid. + **/ +s32 fm10k_tlv_attr_nest_stop(u32 *msg) +{ + u32 *attr; + u32 len; + + /* verify pointer is not NULL */ + if (!msg) + return FM10K_ERR_PARAM; + + /* locate the nested header and retrieve its length */ + attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; + len = (attr[0] >> FM10K_TLV_LEN_SHIFT) << FM10K_TLV_LEN_SHIFT; + + /* only include nest if data was added to it */ + if (len) { + len += FM10K_TLV_HDR_LEN << FM10K_TLV_LEN_SHIFT; + *msg += len; + } + + return 0; +} + +/** + * fm10k_tlv_attr_validate - Validate attribute metadata + * @attr: Pointer to attribute + * @tlv_attr: Type and length info for attribute + * + * This function does some basic validation of the input TLV. It + * verifies the length, and in the case of null terminated strings + * it verifies that the last byte is null. The function will + * return FM10K_ERR_PARAM if any attribute is malformed, otherwise + * it returns 0. + **/ +static s32 fm10k_tlv_attr_validate(u32 *attr, + const struct fm10k_tlv_attr *tlv_attr) +{ + u32 attr_id = *attr & FM10K_TLV_ID_MASK; + u16 len = *attr >> FM10K_TLV_LEN_SHIFT; + + /* verify this is an attribute and not a message */ + if (*attr & (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT)) + return FM10K_ERR_PARAM; + + /* search through the list of attributes to find a matching ID */ + while (tlv_attr->id < attr_id) + tlv_attr++; + + /* if didn't find a match then we should exit */ + if (tlv_attr->id != attr_id) + return FM10K_NOT_IMPLEMENTED; + + /* move to start of attribute data */ + attr++; + + switch (tlv_attr->type) { + case FM10K_TLV_NULL_STRING: + if (!len || + (attr[(len - 1) / 4] & (0xFF << (8 * ((len - 1) % 4))))) + return FM10K_ERR_PARAM; + if (len > tlv_attr->len) + return FM10K_ERR_PARAM; + break; + case FM10K_TLV_MAC_ADDR: + if (len != ETH_ALEN) + return FM10K_ERR_PARAM; + break; + case FM10K_TLV_BOOL: + if (len) + return FM10K_ERR_PARAM; + break; + case FM10K_TLV_UNSIGNED: + case FM10K_TLV_SIGNED: + if (len != tlv_attr->len) + return FM10K_ERR_PARAM; + break; + case FM10K_TLV_LE_STRUCT: + /* struct must be 4 byte aligned */ + if ((len % 4) || len != tlv_attr->len) + return FM10K_ERR_PARAM; + break; + case FM10K_TLV_NESTED: + /* nested attributes must be 4 byte aligned */ + if (len % 4) + return FM10K_ERR_PARAM; + break; + default: + /* attribute id is mapped to bad value */ + return FM10K_ERR_PARAM; + } + + return 0; +} + +/** + * fm10k_tlv_attr_parse - Parses stream of attribute data + * @attr: Pointer to attribute list + * @results: Pointer array to store pointers to attributes + * @tlv_attr: Type and length info for attributes + * + * This function validates a stream of attributes and parses them + * up into an array of pointers stored in results. The function will + * return FM10K_ERR_PARAM on any input or message error, + * FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array + * and 0 on success. + **/ +s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results, + const struct fm10k_tlv_attr *tlv_attr) +{ + u32 i, attr_id, offset = 0; + s32 err = 0; + u16 len; + + /* verify pointers are not NULL */ + if (!attr || !results) + return FM10K_ERR_PARAM; + + /* initialize results to NULL */ + for (i = 0; i < FM10K_TLV_RESULTS_MAX; i++) + results[i] = NULL; + + /* pull length from the message header */ + len = *attr >> FM10K_TLV_LEN_SHIFT; + + /* no attributes to parse if there is no length */ + if (!len) + return 0; + + /* no attributes to parse, just raw data, message becomes attribute */ + if (!tlv_attr) { + results[0] = attr; + return 0; + } + + /* move to start of attribute data */ + attr++; + + /* run through list parsing all attributes */ + while (offset < len) { + attr_id = *attr & FM10K_TLV_ID_MASK; + + if (attr_id < FM10K_TLV_RESULTS_MAX) + err = fm10k_tlv_attr_validate(attr, tlv_attr); + else + err = FM10K_NOT_IMPLEMENTED; + + if (err < 0) + return err; + if (!err) + results[attr_id] = attr; + + /* update offset */ + offset += FM10K_TLV_DWORD_LEN(*attr) * 4; + + /* move to next attribute */ + attr = &attr[FM10K_TLV_DWORD_LEN(*attr)]; + } + + /* we should find ourselves at the end of the list */ + if (offset != len) + return FM10K_ERR_PARAM; + + return 0; +} + +/** + * fm10k_tlv_msg_parse - Parses message header and calls function handler + * @hw: Pointer to hardware structure + * @msg: Pointer to message + * @mbx: Pointer to mailbox information structure + * @func: Function array containing list of message handling functions + * + * This function should be the first function called upon receiving a + * message. The handler will identify the message type and call the correct + * handler for the given message. It will return the value from the function + * call on a recognized message type, otherwise it will return + * FM10K_NOT_IMPLEMENTED on an unrecognized type. + **/ +s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg, + struct fm10k_mbx_info *mbx, + const struct fm10k_msg_data *data) +{ + u32 *results[FM10K_TLV_RESULTS_MAX]; + u32 msg_id; + s32 err; + + /* verify pointer is not NULL */ + if (!msg || !data) + return FM10K_ERR_PARAM; + + /* verify this is a message and not an attribute */ + if (!(*msg & (FM10K_TLV_FLAGS_MSG << FM10K_TLV_FLAGS_SHIFT))) + return FM10K_ERR_PARAM; + + /* grab message ID */ + msg_id = *msg & FM10K_TLV_ID_MASK; + + while (data->id < msg_id) + data++; + + /* if we didn't find it then pass it up as an error */ + if (data->id != msg_id) { + while (data->id != FM10K_TLV_ERROR) + data++; + } + + /* parse the attributes into the results list */ + err = fm10k_tlv_attr_parse(msg, results, data->attr); + if (err < 0) + return err; + + return data->func(hw, results, mbx); +} + +/** + * fm10k_tlv_msg_error - Default handler for unrecognized TLV message IDs + * @hw: Pointer to hardware structure + * @results: Pointer array to message, results[0] is pointer to message + * @mbx: Unused mailbox pointer + * + * This function is a default handler for unrecognized messages. At a + * a minimum it just indicates that the message requested was + * unimplemented. + **/ +s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + return FM10K_NOT_IMPLEMENTED; +} + +static const unsigned char test_str[] = "fm10k"; +static const unsigned char test_mac[ETH_ALEN] = { 0x12, 0x34, 0x56, + 0x78, 0x9a, 0xbc }; +static const u16 test_vlan = 0x0FED; +static const u64 test_u64 = 0xfedcba9876543210ull; +static const u32 test_u32 = 0x87654321; +static const u16 test_u16 = 0x8765; +static const u8 test_u8 = 0x87; +static const s64 test_s64 = -0x123456789abcdef0ll; +static const s32 test_s32 = -0x1235678; +static const s16 test_s16 = -0x1234; +static const s8 test_s8 = -0x12; +static const __le32 test_le[2] = { cpu_to_le32(0x12345678), + cpu_to_le32(0x9abcdef0)}; + +/* The message below is meant to be used as a test message to demonstrate + * how to use the TLV interface and to test the types. Normally this code + * be compiled out by stripping the code wrapped in FM10K_TLV_TEST_MSG + */ +const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[] = { + FM10K_TLV_ATTR_NULL_STRING(FM10K_TEST_MSG_STRING, 80), + FM10K_TLV_ATTR_MAC_ADDR(FM10K_TEST_MSG_MAC_ADDR), + FM10K_TLV_ATTR_U8(FM10K_TEST_MSG_U8), + FM10K_TLV_ATTR_U16(FM10K_TEST_MSG_U16), + FM10K_TLV_ATTR_U32(FM10K_TEST_MSG_U32), + FM10K_TLV_ATTR_U64(FM10K_TEST_MSG_U64), + FM10K_TLV_ATTR_S8(FM10K_TEST_MSG_S8), + FM10K_TLV_ATTR_S16(FM10K_TEST_MSG_S16), + FM10K_TLV_ATTR_S32(FM10K_TEST_MSG_S32), + FM10K_TLV_ATTR_S64(FM10K_TEST_MSG_S64), + FM10K_TLV_ATTR_LE_STRUCT(FM10K_TEST_MSG_LE_STRUCT, 8), + FM10K_TLV_ATTR_NESTED(FM10K_TEST_MSG_NESTED), + FM10K_TLV_ATTR_S32(FM10K_TEST_MSG_RESULT), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_tlv_msg_test_generate_data - Stuff message with data + * @msg: Pointer to message + * @attr_flags: List of flags indicating what attributes to add + * + * This function is meant to load a message buffer with attribute data + **/ +static void fm10k_tlv_msg_test_generate_data(u32 *msg, u32 attr_flags) +{ + if (attr_flags & (1 << FM10K_TEST_MSG_STRING)) + fm10k_tlv_attr_put_null_string(msg, FM10K_TEST_MSG_STRING, + test_str); + if (attr_flags & (1 << FM10K_TEST_MSG_MAC_ADDR)) + fm10k_tlv_attr_put_mac_vlan(msg, FM10K_TEST_MSG_MAC_ADDR, + test_mac, test_vlan); + if (attr_flags & (1 << FM10K_TEST_MSG_U8)) + fm10k_tlv_attr_put_u8(msg, FM10K_TEST_MSG_U8, test_u8); + if (attr_flags & (1 << FM10K_TEST_MSG_U16)) + fm10k_tlv_attr_put_u16(msg, FM10K_TEST_MSG_U16, test_u16); + if (attr_flags & (1 << FM10K_TEST_MSG_U32)) + fm10k_tlv_attr_put_u32(msg, FM10K_TEST_MSG_U32, test_u32); + if (attr_flags & (1 << FM10K_TEST_MSG_U64)) + fm10k_tlv_attr_put_u64(msg, FM10K_TEST_MSG_U64, test_u64); + if (attr_flags & (1 << FM10K_TEST_MSG_S8)) + fm10k_tlv_attr_put_s8(msg, FM10K_TEST_MSG_S8, test_s8); + if (attr_flags & (1 << FM10K_TEST_MSG_S16)) + fm10k_tlv_attr_put_s16(msg, FM10K_TEST_MSG_S16, test_s16); + if (attr_flags & (1 << FM10K_TEST_MSG_S32)) + fm10k_tlv_attr_put_s32(msg, FM10K_TEST_MSG_S32, test_s32); + if (attr_flags & (1 << FM10K_TEST_MSG_S64)) + fm10k_tlv_attr_put_s64(msg, FM10K_TEST_MSG_S64, test_s64); + if (attr_flags & (1 << FM10K_TEST_MSG_LE_STRUCT)) + fm10k_tlv_attr_put_le_struct(msg, FM10K_TEST_MSG_LE_STRUCT, + test_le, 8); +} + +/** + * fm10k_tlv_msg_test_create - Create a test message testing all attributes + * @msg: Pointer to message + * @attr_flags: List of flags indicating what attributes to add + * + * This function is meant to load a message buffer with all attribute types + * including a nested attribute. + **/ +void fm10k_tlv_msg_test_create(u32 *msg, u32 attr_flags) +{ + u32 *nest = NULL; + + fm10k_tlv_msg_init(msg, FM10K_TLV_MSG_ID_TEST); + + fm10k_tlv_msg_test_generate_data(msg, attr_flags); + + /* check for nested attributes */ + attr_flags >>= FM10K_TEST_MSG_NESTED; + + if (attr_flags) { + nest = fm10k_tlv_attr_nest_start(msg, FM10K_TEST_MSG_NESTED); + + fm10k_tlv_msg_test_generate_data(nest, attr_flags); + + fm10k_tlv_attr_nest_stop(msg); + } +} + +/** + * fm10k_tlv_msg_test - Validate all results on test message receive + * @hw: Pointer to hardware structure + * @results: Pointer array to attributes in the message + * @mbx: Pointer to mailbox information structure + * + * This function does a check to verify all attributes match what the test + * message placed in the message buffer. It is the default handler + * for TLV test messages. + **/ +s32 fm10k_tlv_msg_test(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + u32 *nest_results[FM10K_TLV_RESULTS_MAX]; + unsigned char result_str[80]; + unsigned char result_mac[ETH_ALEN]; + s32 err = 0; + __le32 result_le[2]; + u16 result_vlan; + u64 result_u64; + u32 result_u32; + u16 result_u16; + u8 result_u8; + s64 result_s64; + s32 result_s32; + s16 result_s16; + s8 result_s8; + u32 reply[3]; + + /* retrieve results of a previous test */ + if (!!results[FM10K_TEST_MSG_RESULT]) + return fm10k_tlv_attr_get_s32(results[FM10K_TEST_MSG_RESULT], + &mbx->test_result); + +parse_nested: + if (!!results[FM10K_TEST_MSG_STRING]) { + err = fm10k_tlv_attr_get_null_string( + results[FM10K_TEST_MSG_STRING], + result_str); + if (!err && memcmp(test_str, result_str, sizeof(test_str))) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_MAC_ADDR]) { + err = fm10k_tlv_attr_get_mac_vlan( + results[FM10K_TEST_MSG_MAC_ADDR], + result_mac, &result_vlan); + if (!err && memcmp(test_mac, result_mac, ETH_ALEN)) + err = FM10K_ERR_INVALID_VALUE; + if (!err && test_vlan != result_vlan) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_U8]) { + err = fm10k_tlv_attr_get_u8(results[FM10K_TEST_MSG_U8], + &result_u8); + if (!err && test_u8 != result_u8) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_U16]) { + err = fm10k_tlv_attr_get_u16(results[FM10K_TEST_MSG_U16], + &result_u16); + if (!err && test_u16 != result_u16) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_U32]) { + err = fm10k_tlv_attr_get_u32(results[FM10K_TEST_MSG_U32], + &result_u32); + if (!err && test_u32 != result_u32) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_U64]) { + err = fm10k_tlv_attr_get_u64(results[FM10K_TEST_MSG_U64], + &result_u64); + if (!err && test_u64 != result_u64) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_S8]) { + err = fm10k_tlv_attr_get_s8(results[FM10K_TEST_MSG_S8], + &result_s8); + if (!err && test_s8 != result_s8) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_S16]) { + err = fm10k_tlv_attr_get_s16(results[FM10K_TEST_MSG_S16], + &result_s16); + if (!err && test_s16 != result_s16) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_S32]) { + err = fm10k_tlv_attr_get_s32(results[FM10K_TEST_MSG_S32], + &result_s32); + if (!err && test_s32 != result_s32) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_S64]) { + err = fm10k_tlv_attr_get_s64(results[FM10K_TEST_MSG_S64], + &result_s64); + if (!err && test_s64 != result_s64) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + if (!!results[FM10K_TEST_MSG_LE_STRUCT]) { + err = fm10k_tlv_attr_get_le_struct( + results[FM10K_TEST_MSG_LE_STRUCT], + result_le, + sizeof(result_le)); + if (!err && memcmp(test_le, result_le, sizeof(test_le))) + err = FM10K_ERR_INVALID_VALUE; + if (err) + goto report_result; + } + + if (!!results[FM10K_TEST_MSG_NESTED]) { + /* clear any pointers */ + memset(nest_results, 0, sizeof(nest_results)); + + /* parse the nested attributes into the nest results list */ + err = fm10k_tlv_attr_parse(results[FM10K_TEST_MSG_NESTED], + nest_results, + fm10k_tlv_msg_test_attr); + if (err) + goto report_result; + + /* loop back through to the start */ + results = nest_results; + goto parse_nested; + } + +report_result: + /* generate reply with test result */ + fm10k_tlv_msg_init(reply, FM10K_TLV_MSG_ID_TEST); + fm10k_tlv_attr_put_s32(reply, FM10K_TEST_MSG_RESULT, err); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, reply); +} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h new file mode 100644 index 000000000..7e045e8bf --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h @@ -0,0 +1,186 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _FM10K_TLV_H_ +#define _FM10K_TLV_H_ + +/* forward declaration */ +struct fm10k_msg_data; + +#include "fm10k_type.h" + +/* Message / Argument header format + * 3 2 1 0 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Length | Flags | Type / ID | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * The message header format described here is used for messages that are + * passed between the PF and the VF. To allow for messages larger then + * mailbox size we will provide a message with the above header and it + * will be segmented and transported to the mailbox to the other side where + * it is reassembled. It contains the following fields: + * Len: Length of the message in bytes excluding the message header + * Flags: TBD + * Rule: These will be the message/argument types we pass + */ +/* message data header */ +#define FM10K_TLV_ID_SHIFT 0 +#define FM10K_TLV_ID_SIZE 16 +#define FM10K_TLV_ID_MASK ((1u << FM10K_TLV_ID_SIZE) - 1) +#define FM10K_TLV_FLAGS_SHIFT 16 +#define FM10K_TLV_FLAGS_MSG 0x1 +#define FM10K_TLV_FLAGS_SIZE 4 +#define FM10K_TLV_LEN_SHIFT 20 +#define FM10K_TLV_LEN_SIZE 12 + +#define FM10K_TLV_HDR_LEN 4ul +#define FM10K_TLV_LEN_ALIGN_MASK \ + ((FM10K_TLV_HDR_LEN - 1) << FM10K_TLV_LEN_SHIFT) +#define FM10K_TLV_LEN_ALIGN(tlv) \ + (((tlv) + FM10K_TLV_LEN_ALIGN_MASK) & ~FM10K_TLV_LEN_ALIGN_MASK) +#define FM10K_TLV_DWORD_LEN(tlv) \ + ((u16)((FM10K_TLV_LEN_ALIGN(tlv)) >> (FM10K_TLV_LEN_SHIFT + 2)) + 1) + +#define FM10K_TLV_RESULTS_MAX 32 + +enum fm10k_tlv_type { + FM10K_TLV_NULL_STRING, + FM10K_TLV_MAC_ADDR, + FM10K_TLV_BOOL, + FM10K_TLV_UNSIGNED, + FM10K_TLV_SIGNED, + FM10K_TLV_LE_STRUCT, + FM10K_TLV_NESTED, + FM10K_TLV_MAX_TYPE +}; + +#define FM10K_TLV_ERROR (~0u) + +struct fm10k_tlv_attr { + unsigned int id; + enum fm10k_tlv_type type; + u16 len; +}; + +#define FM10K_TLV_ATTR_NULL_STRING(id, len) { id, FM10K_TLV_NULL_STRING, len } +#define FM10K_TLV_ATTR_MAC_ADDR(id) { id, FM10K_TLV_MAC_ADDR, 6 } +#define FM10K_TLV_ATTR_BOOL(id) { id, FM10K_TLV_BOOL, 0 } +#define FM10K_TLV_ATTR_U8(id) { id, FM10K_TLV_UNSIGNED, 1 } +#define FM10K_TLV_ATTR_U16(id) { id, FM10K_TLV_UNSIGNED, 2 } +#define FM10K_TLV_ATTR_U32(id) { id, FM10K_TLV_UNSIGNED, 4 } +#define FM10K_TLV_ATTR_U64(id) { id, FM10K_TLV_UNSIGNED, 8 } +#define FM10K_TLV_ATTR_S8(id) { id, FM10K_TLV_SIGNED, 1 } +#define FM10K_TLV_ATTR_S16(id) { id, FM10K_TLV_SIGNED, 2 } +#define FM10K_TLV_ATTR_S32(id) { id, FM10K_TLV_SIGNED, 4 } +#define FM10K_TLV_ATTR_S64(id) { id, FM10K_TLV_SIGNED, 8 } +#define FM10K_TLV_ATTR_LE_STRUCT(id, len) { id, FM10K_TLV_LE_STRUCT, len } +#define FM10K_TLV_ATTR_NESTED(id) { id, FM10K_TLV_NESTED } +#define FM10K_TLV_ATTR_LAST { FM10K_TLV_ERROR } + +struct fm10k_msg_data { + unsigned int id; + const struct fm10k_tlv_attr *attr; + s32 (*func)(struct fm10k_hw *, u32 **, + struct fm10k_mbx_info *); +}; + +#define FM10K_MSG_HANDLER(id, attr, func) { id, attr, func } + +s32 fm10k_tlv_msg_init(u32 *, u16); +s32 fm10k_tlv_attr_put_null_string(u32 *, u16, const unsigned char *); +s32 fm10k_tlv_attr_get_null_string(u32 *, unsigned char *); +s32 fm10k_tlv_attr_put_mac_vlan(u32 *, u16, const u8 *, u16); +s32 fm10k_tlv_attr_get_mac_vlan(u32 *, u8 *, u16 *); +s32 fm10k_tlv_attr_put_bool(u32 *, u16); +s32 fm10k_tlv_attr_put_value(u32 *, u16, s64, u32); +#define fm10k_tlv_attr_put_u8(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 1) +#define fm10k_tlv_attr_put_u16(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 2) +#define fm10k_tlv_attr_put_u32(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 4) +#define fm10k_tlv_attr_put_u64(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 8) +#define fm10k_tlv_attr_put_s8(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 1) +#define fm10k_tlv_attr_put_s16(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 2) +#define fm10k_tlv_attr_put_s32(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 4) +#define fm10k_tlv_attr_put_s64(msg, attr_id, val) \ + fm10k_tlv_attr_put_value(msg, attr_id, val, 8) +s32 fm10k_tlv_attr_get_value(u32 *, void *, u32); +#define fm10k_tlv_attr_get_u8(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(u8)) +#define fm10k_tlv_attr_get_u16(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(u16)) +#define fm10k_tlv_attr_get_u32(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(u32)) +#define fm10k_tlv_attr_get_u64(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(u64)) +#define fm10k_tlv_attr_get_s8(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(s8)) +#define fm10k_tlv_attr_get_s16(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(s16)) +#define fm10k_tlv_attr_get_s32(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(s32)) +#define fm10k_tlv_attr_get_s64(attr, ptr) \ + fm10k_tlv_attr_get_value(attr, ptr, sizeof(s64)) +s32 fm10k_tlv_attr_put_le_struct(u32 *, u16, const void *, u32); +s32 fm10k_tlv_attr_get_le_struct(u32 *, void *, u32); +u32 *fm10k_tlv_attr_nest_start(u32 *, u16); +s32 fm10k_tlv_attr_nest_stop(u32 *); +s32 fm10k_tlv_attr_parse(u32 *, u32 **, const struct fm10k_tlv_attr *); +s32 fm10k_tlv_msg_parse(struct fm10k_hw *, u32 *, struct fm10k_mbx_info *, + const struct fm10k_msg_data *); +s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *); + +#define FM10K_TLV_MSG_ID_TEST 0 + +enum fm10k_tlv_test_attr_id { + FM10K_TEST_MSG_UNSET, + FM10K_TEST_MSG_STRING, + FM10K_TEST_MSG_MAC_ADDR, + FM10K_TEST_MSG_U8, + FM10K_TEST_MSG_U16, + FM10K_TEST_MSG_U32, + FM10K_TEST_MSG_U64, + FM10K_TEST_MSG_S8, + FM10K_TEST_MSG_S16, + FM10K_TEST_MSG_S32, + FM10K_TEST_MSG_S64, + FM10K_TEST_MSG_LE_STRUCT, + FM10K_TEST_MSG_NESTED, + FM10K_TEST_MSG_RESULT, + FM10K_TEST_MSG_MAX +}; + +extern const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[]; +void fm10k_tlv_msg_test_create(u32 *, u32); +s32 fm10k_tlv_msg_test(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); + +#define FM10K_TLV_MSG_TEST_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_TLV_MSG_ID_TEST, fm10k_tlv_msg_test_attr, func) +#define FM10K_TLV_MSG_ERROR_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_TLV_ERROR, NULL, func) +#endif /* _FM10K_MSG_H_ */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h new file mode 100644 index 000000000..4af96686c --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -0,0 +1,773 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _FM10K_TYPE_H_ +#define _FM10K_TYPE_H_ + +/* forward declaration */ +struct fm10k_hw; + +#include +#include +#include + +#include "fm10k_mbx.h" + +#define FM10K_DEV_ID_PF 0x15A4 +#define FM10K_DEV_ID_VF 0x15A5 + +#define FM10K_MAX_QUEUES 256 +#define FM10K_MAX_QUEUES_PF 128 +#define FM10K_MAX_QUEUES_POOL 16 + +#define FM10K_48_BIT_MASK 0x0000FFFFFFFFFFFFull +#define FM10K_STAT_VALID 0x80000000 + +/* PCI Bus Info */ +#define FM10K_PCIE_LINK_CAP 0x7C +#define FM10K_PCIE_LINK_STATUS 0x82 +#define FM10K_PCIE_LINK_WIDTH 0x3F0 +#define FM10K_PCIE_LINK_WIDTH_1 0x10 +#define FM10K_PCIE_LINK_WIDTH_2 0x20 +#define FM10K_PCIE_LINK_WIDTH_4 0x40 +#define FM10K_PCIE_LINK_WIDTH_8 0x80 +#define FM10K_PCIE_LINK_SPEED 0xF +#define FM10K_PCIE_LINK_SPEED_2500 0x1 +#define FM10K_PCIE_LINK_SPEED_5000 0x2 +#define FM10K_PCIE_LINK_SPEED_8000 0x3 + +/* PCIe payload size */ +#define FM10K_PCIE_DEV_CAP 0x74 +#define FM10K_PCIE_DEV_CAP_PAYLOAD 0x07 +#define FM10K_PCIE_DEV_CAP_PAYLOAD_128 0x00 +#define FM10K_PCIE_DEV_CAP_PAYLOAD_256 0x01 +#define FM10K_PCIE_DEV_CAP_PAYLOAD_512 0x02 +#define FM10K_PCIE_DEV_CTRL 0x78 +#define FM10K_PCIE_DEV_CTRL_PAYLOAD 0xE0 +#define FM10K_PCIE_DEV_CTRL_PAYLOAD_128 0x00 +#define FM10K_PCIE_DEV_CTRL_PAYLOAD_256 0x20 +#define FM10K_PCIE_DEV_CTRL_PAYLOAD_512 0x40 + +/* PCIe MSI-X Capability info */ +#define FM10K_PCI_MSIX_MSG_CTRL 0xB2 +#define FM10K_PCI_MSIX_MSG_CTRL_TBL_SZ_MASK 0x7FF +#define FM10K_MAX_MSIX_VECTORS 256 +#define FM10K_MAX_VECTORS_PF 256 +#define FM10K_MAX_VECTORS_POOL 32 + +/* PCIe SR-IOV Info */ +#define FM10K_PCIE_SRIOV_CTRL 0x190 +#define FM10K_PCIE_SRIOV_CTRL_VFARI 0x10 + +#define FM10K_ERR_PARAM -2 +#define FM10K_ERR_REQUESTS_PENDING -4 +#define FM10K_ERR_RESET_REQUESTED -5 +#define FM10K_ERR_DMA_PENDING -6 +#define FM10K_ERR_RESET_FAILED -7 +#define FM10K_ERR_INVALID_MAC_ADDR -8 +#define FM10K_ERR_INVALID_VALUE -9 +#define FM10K_NOT_IMPLEMENTED 0x7FFFFFFF + +/* Start of PF registers */ +#define FM10K_CTRL 0x0000 +#define FM10K_CTRL_BAR4_ALLOWED 0x00000004 + +#define FM10K_CTRL_EXT 0x0001 +#define FM10K_GCR 0x0003 +#define FM10K_GCR_EXT 0x0005 + +/* Interrupt control registers */ +#define FM10K_EICR 0x0006 +#define FM10K_EICR_FAULT_MASK 0x0000003F +#define FM10K_EICR_MAILBOX 0x00000040 +#define FM10K_EICR_SWITCHREADY 0x00000080 +#define FM10K_EICR_SWITCHNOTREADY 0x00000100 +#define FM10K_EICR_SWITCHINTERRUPT 0x00000200 +#define FM10K_EICR_VFLR 0x00000800 +#define FM10K_EICR_MAXHOLDTIME 0x00001000 +#define FM10K_EIMR 0x0007 +#define FM10K_EIMR_PCA_FAULT 0x00000001 +#define FM10K_EIMR_THI_FAULT 0x00000010 +#define FM10K_EIMR_FUM_FAULT 0x00000400 +#define FM10K_EIMR_MAILBOX 0x00001000 +#define FM10K_EIMR_SWITCHREADY 0x00004000 +#define FM10K_EIMR_SWITCHNOTREADY 0x00010000 +#define FM10K_EIMR_SWITCHINTERRUPT 0x00040000 +#define FM10K_EIMR_SRAMERROR 0x00100000 +#define FM10K_EIMR_VFLR 0x00400000 +#define FM10K_EIMR_MAXHOLDTIME 0x01000000 +#define FM10K_EIMR_ALL 0x55555555 +#define FM10K_EIMR_DISABLE(NAME) ((FM10K_EIMR_ ## NAME) << 0) +#define FM10K_EIMR_ENABLE(NAME) ((FM10K_EIMR_ ## NAME) << 1) +#define FM10K_FAULT_ADDR_LO 0x0 +#define FM10K_FAULT_ADDR_HI 0x1 +#define FM10K_FAULT_SPECINFO 0x2 +#define FM10K_FAULT_FUNC 0x3 +#define FM10K_FAULT_SIZE 0x4 +#define FM10K_FAULT_FUNC_VALID 0x00008000 +#define FM10K_FAULT_FUNC_PF 0x00004000 +#define FM10K_FAULT_FUNC_VF_MASK 0x00003F00 +#define FM10K_FAULT_FUNC_VF_SHIFT 8 +#define FM10K_FAULT_FUNC_TYPE_MASK 0x000000FF + +#define FM10K_PCA_FAULT 0x0008 +#define FM10K_THI_FAULT 0x0010 +#define FM10K_FUM_FAULT 0x001C + +/* Rx queue timeout indicator */ +#define FM10K_MAXHOLDQ(_n) ((_n) + 0x0020) + +/* Switch Manager info */ +#define FM10K_SM_AREA(_n) ((_n) + 0x0028) + +/* GLORT mapping registers */ +#define FM10K_DGLORTMAP(_n) ((_n) + 0x0030) +#define FM10K_DGLORT_COUNT 8 +#define FM10K_DGLORTMAP_MASK_SHIFT 16 +#define FM10K_DGLORTMAP_ANY 0x00000000 +#define FM10K_DGLORTMAP_NONE 0x0000FFFF +#define FM10K_DGLORTMAP_ZERO 0xFFFF0000 +#define FM10K_DGLORTDEC(_n) ((_n) + 0x0038) +#define FM10K_DGLORTDEC_VSILENGTH_SHIFT 4 +#define FM10K_DGLORTDEC_VSIBASE_SHIFT 7 +#define FM10K_DGLORTDEC_PCLENGTH_SHIFT 14 +#define FM10K_DGLORTDEC_QBASE_SHIFT 16 +#define FM10K_DGLORTDEC_RSSLENGTH_SHIFT 24 +#define FM10K_DGLORTDEC_INNERRSS_ENABLE 0x08000000 +#define FM10K_TUNNEL_CFG 0x0040 +#define FM10K_TUNNEL_CFG_NVGRE_SHIFT 16 +#define FM10K_SWPRI_MAP(_n) ((_n) + 0x0050) +#define FM10K_SWPRI_MAX 16 +#define FM10K_RSSRK(_n, _m) (((_n) * 0x10) + (_m) + 0x0800) +#define FM10K_RSSRK_SIZE 10 +#define FM10K_RSSRK_ENTRIES_PER_REG 4 +#define FM10K_RETA(_n, _m) (((_n) * 0x20) + (_m) + 0x1000) +#define FM10K_RETA_SIZE 32 +#define FM10K_RETA_ENTRIES_PER_REG 4 +#define FM10K_MAX_RSS_INDICES 128 + +/* Rate limiting registers */ +#define FM10K_TC_CREDIT(_n) ((_n) + 0x2000) +#define FM10K_TC_CREDIT_CREDIT_MASK 0x001FFFFF +#define FM10K_TC_MAXCREDIT(_n) ((_n) + 0x2040) +#define FM10K_TC_MAXCREDIT_64K 0x00010000 +#define FM10K_TC_RATE(_n) ((_n) + 0x2080) +#define FM10K_TC_RATE_QUANTA_MASK 0x0000FFFF +#define FM10K_TC_RATE_INTERVAL_4US_GEN1 0x00020000 +#define FM10K_TC_RATE_INTERVAL_4US_GEN2 0x00040000 +#define FM10K_TC_RATE_INTERVAL_4US_GEN3 0x00080000 + +/* DMA control registers */ +#define FM10K_DMA_CTRL 0x20C3 +#define FM10K_DMA_CTRL_TX_ENABLE 0x00000001 +#define FM10K_DMA_CTRL_TX_ACTIVE 0x00000008 +#define FM10K_DMA_CTRL_RX_ENABLE 0x00000010 +#define FM10K_DMA_CTRL_RX_ACTIVE 0x00000080 +#define FM10K_DMA_CTRL_RX_DESC_SIZE 0x00000100 +#define FM10K_DMA_CTRL_MINMSS_64 0x00008000 +#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3 0x04800000 +#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2 0x04000000 +#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1 0x03800000 +#define FM10K_DMA_CTRL_DATAPATH_RESET 0x20000000 +#define FM10K_DMA_CTRL_32_DESC 0x00000000 + +#define FM10K_DMA_CTRL2 0x20C4 +#define FM10K_DMA_CTRL2_SWITCH_READY 0x00002000 + +/* TSO flags configuration + * First packet contains all flags except for fin and psh + * Middle packet contains only urg and ack + * Last packet contains urg, ack, fin, and psh + */ +#define FM10K_TSO_FLAGS_LOW 0x00300FF6 +#define FM10K_TSO_FLAGS_HI 0x00000039 +#define FM10K_DTXTCPFLGL 0x20C5 +#define FM10K_DTXTCPFLGH 0x20C6 + +#define FM10K_TPH_CTRL 0x20C7 +#define FM10K_MRQC(_n) ((_n) + 0x2100) +#define FM10K_MRQC_TCP_IPV4 0x00000001 +#define FM10K_MRQC_IPV4 0x00000002 +#define FM10K_MRQC_IPV6 0x00000010 +#define FM10K_MRQC_TCP_IPV6 0x00000020 +#define FM10K_MRQC_UDP_IPV4 0x00000040 +#define FM10K_MRQC_UDP_IPV6 0x00000080 + +#define FM10K_TQMAP(_n) ((_n) + 0x2800) +#define FM10K_TQMAP_TABLE_SIZE 2048 +#define FM10K_RQMAP(_n) ((_n) + 0x3000) + +/* Hardware Statistics */ +#define FM10K_STATS_TIMEOUT 0x3800 +#define FM10K_STATS_UR 0x3801 +#define FM10K_STATS_CA 0x3802 +#define FM10K_STATS_UM 0x3803 +#define FM10K_STATS_XEC 0x3804 +#define FM10K_STATS_VLAN_DROP 0x3805 +#define FM10K_STATS_LOOPBACK_DROP 0x3806 +#define FM10K_STATS_NODESC_DROP 0x3807 + +/* Timesync registers */ +#define FM10K_SYSTIME 0x3814 +#define FM10K_SYSTIME_CFG 0x3818 +#define FM10K_SYSTIME_CFG_STEP_MASK 0x0000000F + +/* PCIe state registers */ +#define FM10K_PHYADDR 0x381C + +/* Rx ring registers */ +#define FM10K_RDBAL(_n) ((0x40 * (_n)) + 0x4000) +#define FM10K_RDBAH(_n) ((0x40 * (_n)) + 0x4001) +#define FM10K_RDLEN(_n) ((0x40 * (_n)) + 0x4002) +#define FM10K_TPH_RXCTRL(_n) ((0x40 * (_n)) + 0x4003) +#define FM10K_TPH_RXCTRL_DESC_TPHEN 0x00000020 +#define FM10K_TPH_RXCTRL_DESC_RROEN 0x00000200 +#define FM10K_TPH_RXCTRL_DATA_WROEN 0x00002000 +#define FM10K_TPH_RXCTRL_HDR_WROEN 0x00008000 +#define FM10K_RDH(_n) ((0x40 * (_n)) + 0x4004) +#define FM10K_RDT(_n) ((0x40 * (_n)) + 0x4005) +#define FM10K_RXQCTL(_n) ((0x40 * (_n)) + 0x4006) +#define FM10K_RXQCTL_ENABLE 0x00000001 +#define FM10K_RXQCTL_PF 0x000000FC +#define FM10K_RXQCTL_VF_SHIFT 2 +#define FM10K_RXQCTL_VF 0x00000100 +#define FM10K_RXQCTL_ID_MASK (FM10K_RXQCTL_PF | FM10K_RXQCTL_VF) +#define FM10K_RXDCTL(_n) ((0x40 * (_n)) + 0x4007) +#define FM10K_RXDCTL_WRITE_BACK_MIN_DELAY 0x00000001 +#define FM10K_RXDCTL_DROP_ON_EMPTY 0x00000200 +#define FM10K_RXINT(_n) ((0x40 * (_n)) + 0x4008) +#define FM10K_SRRCTL(_n) ((0x40 * (_n)) + 0x4009) +#define FM10K_SRRCTL_BSIZEPKT_SHIFT 8 /* shift _right_ */ +#define FM10K_SRRCTL_LOOPBACK_SUPPRESS 0x40000000 +#define FM10K_SRRCTL_BUFFER_CHAINING_EN 0x80000000 + +/* Rx Statistics */ +#define FM10K_QPRC(_n) ((0x40 * (_n)) + 0x400A) +#define FM10K_QPRDC(_n) ((0x40 * (_n)) + 0x400B) +#define FM10K_QBRC_L(_n) ((0x40 * (_n)) + 0x400C) +#define FM10K_QBRC_H(_n) ((0x40 * (_n)) + 0x400D) + +/* Rx GLORT register */ +#define FM10K_RX_SGLORT(_n) ((0x40 * (_n)) + 0x400E) + +/* Tx ring registers */ +#define FM10K_TDBAL(_n) ((0x40 * (_n)) + 0x8000) +#define FM10K_TDBAH(_n) ((0x40 * (_n)) + 0x8001) +#define FM10K_TDLEN(_n) ((0x40 * (_n)) + 0x8002) +#define FM10K_TPH_TXCTRL(_n) ((0x40 * (_n)) + 0x8003) +#define FM10K_TPH_TXCTRL_DESC_TPHEN 0x00000020 +#define FM10K_TPH_TXCTRL_DESC_RROEN 0x00000200 +#define FM10K_TPH_TXCTRL_DESC_WROEN 0x00000800 +#define FM10K_TPH_TXCTRL_DATA_RROEN 0x00002000 +#define FM10K_TDH(_n) ((0x40 * (_n)) + 0x8004) +#define FM10K_TDT(_n) ((0x40 * (_n)) + 0x8005) +#define FM10K_TXDCTL(_n) ((0x40 * (_n)) + 0x8006) +#define FM10K_TXDCTL_ENABLE 0x00004000 +#define FM10K_TXDCTL_MAX_TIME_SHIFT 16 +#define FM10K_TXQCTL(_n) ((0x40 * (_n)) + 0x8007) +#define FM10K_TXQCTL_PF 0x0000003F +#define FM10K_TXQCTL_VF 0x00000040 +#define FM10K_TXQCTL_ID_MASK (FM10K_TXQCTL_PF | FM10K_TXQCTL_VF) +#define FM10K_TXQCTL_PC_SHIFT 7 +#define FM10K_TXQCTL_PC_MASK 0x00000380 +#define FM10K_TXQCTL_TC_SHIFT 10 +#define FM10K_TXQCTL_VID_SHIFT 16 +#define FM10K_TXQCTL_VID_MASK 0x0FFF0000 +#define FM10K_TXQCTL_UNLIMITED_BW 0x10000000 +#define FM10K_TXINT(_n) ((0x40 * (_n)) + 0x8008) + +/* Tx Statistics */ +#define FM10K_QPTC(_n) ((0x40 * (_n)) + 0x8009) +#define FM10K_QBTC_L(_n) ((0x40 * (_n)) + 0x800A) +#define FM10K_QBTC_H(_n) ((0x40 * (_n)) + 0x800B) + +/* Tx Push registers */ +#define FM10K_TQDLOC(_n) ((0x40 * (_n)) + 0x800C) +#define FM10K_TQDLOC_BASE_32_DESC 0x08 +#define FM10K_TQDLOC_SIZE_32_DESC 0x00050000 + +/* Tx GLORT registers */ +#define FM10K_TX_SGLORT(_n) ((0x40 * (_n)) + 0x800D) +#define FM10K_PFVTCTL(_n) ((0x40 * (_n)) + 0x800E) +#define FM10K_PFVTCTL_FTAG_DESC_ENABLE 0x00000001 + +/* Interrupt moderation and control registers */ +#define FM10K_INT_MAP(_n) ((_n) + 0x10080) +#define FM10K_INT_MAP_TIMER0 0x00000000 +#define FM10K_INT_MAP_TIMER1 0x00000100 +#define FM10K_INT_MAP_IMMEDIATE 0x00000200 +#define FM10K_INT_MAP_DISABLE 0x00000300 +#define FM10K_MSIX_VECTOR_MASK(_n) ((0x4 * (_n)) + 0x11003) +#define FM10K_INT_CTRL 0x12000 +#define FM10K_INT_CTRL_ENABLEMODERATOR 0x00000400 +#define FM10K_ITR(_n) ((_n) + 0x12400) +#define FM10K_ITR_INTERVAL1_SHIFT 12 +#define FM10K_ITR_PENDING2 0x10000000 +#define FM10K_ITR_AUTOMASK 0x20000000 +#define FM10K_ITR_MASK_SET 0x40000000 +#define FM10K_ITR_MASK_CLEAR 0x80000000 +#define FM10K_ITR2(_n) ((0x2 * (_n)) + 0x12800) +#define FM10K_ITR_REG_COUNT 768 +#define FM10K_ITR_REG_COUNT_PF 256 + +/* Switch manager interrupt registers */ +#define FM10K_IP 0x13000 +#define FM10K_IP_NOTINRESET 0x00000100 + +/* VLAN registers */ +#define FM10K_VLAN_TABLE(_n, _m) ((0x80 * (_n)) + (_m) + 0x14000) +#define FM10K_VLAN_TABLE_SIZE 128 + +/* VLAN specific message offsets */ +#define FM10K_VLAN_TABLE_VID_MAX 4096 +#define FM10K_VLAN_TABLE_VSI_MAX 64 +#define FM10K_VLAN_LENGTH_SHIFT 16 +#define FM10K_VLAN_CLEAR (1 << 15) +#define FM10K_VLAN_ALL \ + ((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT) + +/* VF FLR event notification registers */ +#define FM10K_PFVFLRE(_n) ((0x1 * (_n)) + 0x18844) +#define FM10K_PFVFLREC(_n) ((0x1 * (_n)) + 0x18846) + +/* Defines for size of uncacheable memories */ +#define FM10K_UC_ADDR_START 0x000000 /* start of standard regs */ +#define FM10K_UC_ADDR_END 0x100000 /* end of standard regs */ +#define FM10K_UC_ADDR_SIZE (FM10K_UC_ADDR_END - FM10K_UC_ADDR_START) + +/* Define timeouts for resets and disables */ +#define FM10K_QUEUE_DISABLE_TIMEOUT 100 +#define FM10K_RESET_TIMEOUT 150 + +/* Maximum supported combined inner and outer header length for encapsulation */ +#define FM10K_TUNNEL_HEADER_LENGTH 184 + +/* VF registers */ +#define FM10K_VFCTRL 0x00000 +#define FM10K_VFCTRL_RST 0x00000008 +#define FM10K_VFINT_MAP 0x00030 +#define FM10K_VFSYSTIME 0x00040 +#define FM10K_VFITR(_n) ((_n) + 0x00060) + +/* Registers contained in BAR 4 for Switch management */ +#define FM10K_SW_SYSTIME_ADJUST 0x0224D +#define FM10K_SW_SYSTIME_ADJUST_MASK 0x3FFFFFFF +#define FM10K_SW_SYSTIME_ADJUST_DIR_NEGATIVE 0x80000000 +#define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252) + +enum fm10k_int_source { + fm10k_int_Mailbox = 0, + fm10k_int_PCIeFault = 1, + fm10k_int_SwitchUpDown = 2, + fm10k_int_SwitchEvent = 3, + fm10k_int_SRAM = 4, + fm10k_int_VFLR = 5, + fm10k_int_MaxHoldTime = 6, + fm10k_int_sources_max_pf +}; + +/* PCIe bus speeds */ +enum fm10k_bus_speed { + fm10k_bus_speed_unknown = 0, + fm10k_bus_speed_2500 = 2500, + fm10k_bus_speed_5000 = 5000, + fm10k_bus_speed_8000 = 8000, + fm10k_bus_speed_reserved +}; + +/* PCIe bus widths */ +enum fm10k_bus_width { + fm10k_bus_width_unknown = 0, + fm10k_bus_width_pcie_x1 = 1, + fm10k_bus_width_pcie_x2 = 2, + fm10k_bus_width_pcie_x4 = 4, + fm10k_bus_width_pcie_x8 = 8, + fm10k_bus_width_reserved +}; + +/* PCIe payload sizes */ +enum fm10k_bus_payload { + fm10k_bus_payload_unknown = 0, + fm10k_bus_payload_128 = 1, + fm10k_bus_payload_256 = 2, + fm10k_bus_payload_512 = 3, + fm10k_bus_payload_reserved +}; + +/* Bus parameters */ +struct fm10k_bus_info { + enum fm10k_bus_speed speed; + enum fm10k_bus_width width; + enum fm10k_bus_payload payload; +}; + +/* Statistics related declarations */ +struct fm10k_hw_stat { + u64 count; + u32 base_l; + u32 base_h; +}; + +struct fm10k_hw_stats_q { + struct fm10k_hw_stat tx_bytes; + struct fm10k_hw_stat tx_packets; +#define tx_stats_idx tx_packets.base_h + struct fm10k_hw_stat rx_bytes; + struct fm10k_hw_stat rx_packets; +#define rx_stats_idx rx_packets.base_h + struct fm10k_hw_stat rx_drops; +}; + +struct fm10k_hw_stats { + struct fm10k_hw_stat timeout; +#define stats_idx timeout.base_h + struct fm10k_hw_stat ur; + struct fm10k_hw_stat ca; + struct fm10k_hw_stat um; + struct fm10k_hw_stat xec; + struct fm10k_hw_stat vlan_drop; + struct fm10k_hw_stat loopback_drop; + struct fm10k_hw_stat nodesc_drop; + struct fm10k_hw_stats_q q[FM10K_MAX_QUEUES_PF]; +}; + +/* Establish DGLORT feature priority */ +enum fm10k_dglortdec_idx { + fm10k_dglort_default = 0, + fm10k_dglort_vf_rsvd0 = 1, + fm10k_dglort_vf_rss = 2, + fm10k_dglort_pf_rsvd0 = 3, + fm10k_dglort_pf_queue = 4, + fm10k_dglort_pf_vsi = 5, + fm10k_dglort_pf_rsvd1 = 6, + fm10k_dglort_pf_rss = 7 +}; + +struct fm10k_dglort_cfg { + u16 glort; /* GLORT base */ + u16 queue_b; /* Base value for queue */ + u8 vsi_b; /* Base value for VSI */ + u8 idx; /* index of DGLORTDEC entry */ + u8 rss_l; /* RSS indices */ + u8 pc_l; /* Priority Class indices */ + u8 vsi_l; /* Number of bits from GLORT used to determine VSI */ + u8 queue_l; /* Number of bits from GLORT used to determine queue */ + u8 shared_l; /* Ignored bits from GLORT resulting in shared VSI */ + u8 inner_rss; /* Boolean value if inner header is used for RSS */ +}; + +enum fm10k_pca_fault { + PCA_NO_FAULT, + PCA_UNMAPPED_ADDR, + PCA_BAD_QACCESS_PF, + PCA_BAD_QACCESS_VF, + PCA_MALICIOUS_REQ, + PCA_POISONED_TLP, + PCA_TLP_ABORT, + __PCA_MAX +}; + +enum fm10k_thi_fault { + THI_NO_FAULT, + THI_MAL_DIS_Q_FAULT, + __THI_MAX +}; + +enum fm10k_fum_fault { + FUM_NO_FAULT, + FUM_UNMAPPED_ADDR, + FUM_POISONED_TLP, + FUM_BAD_VF_QACCESS, + FUM_ADD_DECODE_ERR, + FUM_RO_ERROR, + FUM_QPRC_CRC_ERROR, + FUM_CSR_TIMEOUT, + FUM_INVALID_TYPE, + FUM_INVALID_LENGTH, + FUM_INVALID_BE, + FUM_INVALID_ALIGN, + __FUM_MAX +}; + +struct fm10k_fault { + u64 address; /* Address at the time fault was detected */ + u32 specinfo; /* Extra info on this fault (fault dependent) */ + u8 type; /* Fault value dependent on subunit */ + u8 func; /* Function number of the fault */ +}; + +struct fm10k_mac_ops { + /* basic bring-up and tear-down */ + s32 (*reset_hw)(struct fm10k_hw *); + s32 (*init_hw)(struct fm10k_hw *); + s32 (*start_hw)(struct fm10k_hw *); + s32 (*stop_hw)(struct fm10k_hw *); + s32 (*get_bus_info)(struct fm10k_hw *); + s32 (*get_host_state)(struct fm10k_hw *, bool *); + bool (*is_slot_appropriate)(struct fm10k_hw *); + s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool); + s32 (*read_mac_addr)(struct fm10k_hw *); + s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *, + u16, bool, u8); + s32 (*update_mc_addr)(struct fm10k_hw *, u16, const u8 *, u16, bool); + s32 (*update_xcast_mode)(struct fm10k_hw *, u16, u8); + void (*update_int_moderator)(struct fm10k_hw *); + s32 (*update_lport_state)(struct fm10k_hw *, u16, u16, bool); + void (*update_hw_stats)(struct fm10k_hw *, struct fm10k_hw_stats *); + void (*rebind_hw_stats)(struct fm10k_hw *, struct fm10k_hw_stats *); + s32 (*configure_dglort_map)(struct fm10k_hw *, + struct fm10k_dglort_cfg *); + void (*set_dma_mask)(struct fm10k_hw *, u64); + s32 (*get_fault)(struct fm10k_hw *, int, struct fm10k_fault *); + void (*request_lport_map)(struct fm10k_hw *); + s32 (*adjust_systime)(struct fm10k_hw *, s32 ppb); + u64 (*read_systime)(struct fm10k_hw *); +}; + +enum fm10k_mac_type { + fm10k_mac_unknown = 0, + fm10k_mac_pf, + fm10k_mac_vf, + fm10k_num_macs +}; + +struct fm10k_mac_info { + struct fm10k_mac_ops ops; + enum fm10k_mac_type type; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u16 default_vid; + u16 max_msix_vectors; + u16 max_queues; + bool vlan_override; + bool get_host_state; + bool tx_ready; + u32 dglort_map; +}; + +struct fm10k_swapi_table_info { + u32 used; + u32 avail; +}; + +struct fm10k_swapi_info { + u32 status; + struct fm10k_swapi_table_info mac; + struct fm10k_swapi_table_info nexthop; + struct fm10k_swapi_table_info ffu; +}; + +enum fm10k_xcast_modes { + FM10K_XCAST_MODE_ALLMULTI = 0, + FM10K_XCAST_MODE_MULTI = 1, + FM10K_XCAST_MODE_PROMISC = 2, + FM10K_XCAST_MODE_NONE = 3, + FM10K_XCAST_MODE_DISABLE = 4 +}; + +#define FM10K_VF_TC_MAX 100000 /* 100,000 Mb/s aka 100Gb/s */ +#define FM10K_VF_TC_MIN 1 /* 1 Mb/s is the slowest rate */ + +struct fm10k_vf_info { + /* mbx must be first field in struct unless all default IOV message + * handlers are redone as the assumption is that vf_info starts + * at the same offset as the mailbox + */ + struct fm10k_mbx_info mbx; /* PF side of VF mailbox */ + int rate; /* Tx BW cap as defined by OS */ + u16 glort; /* resource tag for this VF */ + u16 sw_vid; /* Switch API assigned VLAN */ + u16 pf_vid; /* PF assigned Default VLAN */ + u8 mac[ETH_ALEN]; /* PF Default MAC address */ + u8 vsi; /* VSI identifier */ + u8 vf_idx; /* which VF this is */ + u8 vf_flags; /* flags indicating what modes + * are supported for the port + */ +}; + +#define FM10K_VF_FLAG_ALLMULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_ALLMULTI) +#define FM10K_VF_FLAG_MULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_MULTI) +#define FM10K_VF_FLAG_PROMISC_CAPABLE ((u8)1 << FM10K_XCAST_MODE_PROMISC) +#define FM10K_VF_FLAG_NONE_CAPABLE ((u8)1 << FM10K_XCAST_MODE_NONE) +#define FM10K_VF_FLAG_CAPABLE(vf_info) ((vf_info)->vf_flags & (u8)0xF) +#define FM10K_VF_FLAG_ENABLED(vf_info) ((vf_info)->vf_flags >> 4) +#define FM10K_VF_FLAG_SET_MODE(mode) ((u8)0x10 << (mode)) +#define FM10K_VF_FLAG_SET_MODE_NONE \ + FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_NONE) +#define FM10K_VF_FLAG_MULTI_ENABLED \ + (FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_ALLMULTI) | \ + FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_MULTI) | \ + FM10K_VF_FLAG_SET_MODE(FM10K_XCAST_MODE_PROMISC)) + +struct fm10k_iov_ops { + /* IOV related bring-up and tear-down */ + s32 (*assign_resources)(struct fm10k_hw *, u16, u16); + s32 (*configure_tc)(struct fm10k_hw *, u16, int); + s32 (*assign_int_moderator)(struct fm10k_hw *, u16); + s32 (*assign_default_mac_vlan)(struct fm10k_hw *, + struct fm10k_vf_info *); + s32 (*reset_resources)(struct fm10k_hw *, + struct fm10k_vf_info *); + s32 (*set_lport)(struct fm10k_hw *, struct fm10k_vf_info *, u16, u8); + void (*reset_lport)(struct fm10k_hw *, struct fm10k_vf_info *); + void (*update_stats)(struct fm10k_hw *, struct fm10k_hw_stats_q *, u16); + s32 (*report_timestamp)(struct fm10k_hw *, struct fm10k_vf_info *, u64); +}; + +struct fm10k_iov_info { + struct fm10k_iov_ops ops; + u16 total_vfs; + u16 num_vfs; + u16 num_pools; +}; + +enum fm10k_devices { + fm10k_device_pf, + fm10k_device_vf, +}; + +struct fm10k_info { + enum fm10k_mac_type mac; + s32 (*get_invariants)(struct fm10k_hw *); + struct fm10k_mac_ops *mac_ops; + struct fm10k_iov_ops *iov_ops; +}; + +struct fm10k_hw { + u32 __iomem *hw_addr; + u32 __iomem *sw_addr; + void *back; + struct fm10k_mac_info mac; + struct fm10k_bus_info bus; + struct fm10k_bus_info bus_caps; + struct fm10k_iov_info iov; + struct fm10k_mbx_info mbx; + struct fm10k_swapi_info swapi; + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; +}; + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define FM10K_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define FM10K_REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Transmit Descriptor */ +struct fm10k_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 buflen; /* Length of data to be DMAed */ + __le16 vlan; /* VLAN_ID and VPRI to be inserted in FTAG */ + __le16 mss; /* MSS for segmentation offload */ + u8 hdrlen; /* Header size for segmentation offload */ + u8 flags; /* Status and offload request flags */ +}; + +/* Transmit Descriptor Cache Structure */ +struct fm10k_tx_desc_cache { + struct fm10k_tx_desc tx_desc[256]; +}; + +#define FM10K_TXD_FLAG_INT 0x01 +#define FM10K_TXD_FLAG_TIME 0x02 +#define FM10K_TXD_FLAG_CSUM 0x04 +#define FM10K_TXD_FLAG_FTAG 0x10 +#define FM10K_TXD_FLAG_RS 0x20 +#define FM10K_TXD_FLAG_LAST 0x40 +#define FM10K_TXD_FLAG_DONE 0x80 + +/* These macros are meant to enable optimal placement of the RS and INT + * bits. It will point us to the last descriptor in the cache for either the + * start of the packet, or the end of the packet. If the index is actually + * at the start of the FIFO it will point to the offset for the last index + * in the FIFO to prevent an unnecessary write. + */ +#define FM10K_TXD_WB_FIFO_SIZE 4 + +/* Receive Descriptor - 32B */ +union fm10k_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + __le64 reserved; /* Empty space, RSS hash */ + __le64 timestamp; + } q; /* Read, Writeback, 64b quad-words */ + struct { + __le32 data; /* RSS and header data */ + __le32 rss; /* RSS Hash */ + __le32 staterr; + __le32 vlan_len; + __le32 glort; /* sglort/dglort */ + } d; /* Writeback, 32b double-words */ + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen, xC */ + __le16 rss_lower; + __le16 rss_upper; + __le16 status; /* status/error */ + __le16 csum_err; /* checksum or extended error value */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + __le16 dglort; + __le16 sglort; + } w; /* Writeback, 16b words */ +}; + +#define FM10K_RXD_RSSTYPE_MASK 0x000F +enum fm10k_rdesc_rss_type { + FM10K_RSSTYPE_NONE = 0x0, + FM10K_RSSTYPE_IPV4_TCP = 0x1, + FM10K_RSSTYPE_IPV4 = 0x2, + FM10K_RSSTYPE_IPV6_TCP = 0x3, + /* Reserved 0x4 */ + FM10K_RSSTYPE_IPV6 = 0x5, + /* Reserved 0x6 */ + FM10K_RSSTYPE_IPV4_UDP = 0x7, + FM10K_RSSTYPE_IPV6_UDP = 0x8 + /* Reserved 0x9 - 0xF */ +}; + +#define FM10K_RXD_HDR_INFO_XC_MASK 0x0006 +enum fm10k_rxdesc_xc { + FM10K_XC_UNICAST = 0x0, + FM10K_XC_MULTICAST = 0x4, + FM10K_XC_BROADCAST = 0x6 +}; + +#define FM10K_RXD_STATUS_DD 0x0001 /* Descriptor done */ +#define FM10K_RXD_STATUS_EOP 0x0002 /* End of packet */ +#define FM10K_RXD_STATUS_L4CS 0x0010 /* Indicates an L4 csum */ +#define FM10K_RXD_STATUS_L4CS2 0x0040 /* Inner header L4 csum */ +#define FM10K_RXD_STATUS_L4E2 0x0800 /* Inner header L4 csum err */ +#define FM10K_RXD_STATUS_IPE2 0x1000 /* Inner header IPv4 csum err */ +#define FM10K_RXD_STATUS_RXE 0x2000 /* Generic Rx error */ +#define FM10K_RXD_STATUS_L4E 0x4000 /* L4 csum error */ +#define FM10K_RXD_STATUS_IPE 0x8000 /* IPv4 csum error */ + +struct fm10k_ftag { + __be16 swpri_type_user; + __be16 vlan; + __be16 sglort; + __be16 dglort; +}; + +#endif /* _FM10K_TYPE_H */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c new file mode 100644 index 000000000..94f0f6a14 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -0,0 +1,582 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "fm10k_vf.h" + +/** + * fm10k_stop_hw_vf - Stop Tx/Rx units + * @hw: pointer to hardware structure + * + **/ +static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) +{ + u8 *perm_addr = hw->mac.perm_addr; + u32 bal = 0, bah = 0; + s32 err; + u16 i; + + /* we need to disable the queues before taking further steps */ + err = fm10k_stop_hw_generic(hw); + if (err) + return err; + + /* If permanent address is set then we need to restore it */ + if (is_valid_ether_addr(perm_addr)) { + bal = (((u32)perm_addr[3]) << 24) | + (((u32)perm_addr[4]) << 16) | + (((u32)perm_addr[5]) << 8); + bah = (((u32)0xFF) << 24) | + (((u32)perm_addr[0]) << 16) | + (((u32)perm_addr[1]) << 8) | + ((u32)perm_addr[2]); + } + + /* The queues have already been disabled so we just need to + * update their base address registers + */ + for (i = 0; i < hw->mac.max_queues; i++) { + fm10k_write_reg(hw, FM10K_TDBAL(i), bal); + fm10k_write_reg(hw, FM10K_TDBAH(i), bah); + fm10k_write_reg(hw, FM10K_RDBAL(i), bal); + fm10k_write_reg(hw, FM10K_RDBAH(i), bah); + } + + return 0; +} + +/** + * fm10k_reset_hw_vf - VF hardware reset + * @hw: pointer to hardware structure + * + * This function should return the hardware to a state similar to the + * one it is in after just being initialized. + **/ +static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw) +{ + s32 err; + + /* shut down queues we own and reset DMA configuration */ + err = fm10k_stop_hw_vf(hw); + if (err) + return err; + + /* Inititate VF reset */ + fm10k_write_reg(hw, FM10K_VFCTRL, FM10K_VFCTRL_RST); + + /* Flush write and allow 100us for reset to complete */ + fm10k_write_flush(hw); + udelay(FM10K_RESET_TIMEOUT); + + /* Clear reset bit and verify it was cleared */ + fm10k_write_reg(hw, FM10K_VFCTRL, 0); + if (fm10k_read_reg(hw, FM10K_VFCTRL) & FM10K_VFCTRL_RST) + err = FM10K_ERR_RESET_FAILED; + + return err; +} + +/** + * fm10k_init_hw_vf - VF hardware initialization + * @hw: pointer to hardware structure + * + **/ +static s32 fm10k_init_hw_vf(struct fm10k_hw *hw) +{ + u32 tqdloc, tqdloc0 = ~fm10k_read_reg(hw, FM10K_TQDLOC(0)); + s32 err; + u16 i; + + /* assume we always have at least 1 queue */ + for (i = 1; tqdloc0 && (i < FM10K_MAX_QUEUES_POOL); i++) { + /* verify the Descriptor cache offsets are increasing */ + tqdloc = ~fm10k_read_reg(hw, FM10K_TQDLOC(i)); + if (!tqdloc || (tqdloc == tqdloc0)) + break; + + /* check to verify the PF doesn't own any of our queues */ + if (!~fm10k_read_reg(hw, FM10K_TXQCTL(i)) || + !~fm10k_read_reg(hw, FM10K_RXQCTL(i))) + break; + } + + /* shut down queues we own and reset DMA configuration */ + err = fm10k_disable_queues_generic(hw, i); + if (err) + return err; + + /* record maximum queue count */ + hw->mac.max_queues = i; + + /* fetch default VLAN */ + hw->mac.default_vid = (fm10k_read_reg(hw, FM10K_TXQCTL(0)) & + FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT; + + return 0; +} + +/** + * fm10k_is_slot_appropriate_vf - Indicate appropriate slot for this SKU + * @hw: pointer to hardware structure + * + * Looks at the PCIe bus info to confirm whether or not this slot can support + * the necessary bandwidth for this device. Since the VF has no control over + * the "slot" it is in, always indicate that the slot is appropriate. + **/ +static bool fm10k_is_slot_appropriate_vf(struct fm10k_hw *hw) +{ + return true; +} + +/* This structure defines the attibutes to be parsed below */ +const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[] = { + FM10K_TLV_ATTR_U32(FM10K_MAC_VLAN_MSG_VLAN), + FM10K_TLV_ATTR_BOOL(FM10K_MAC_VLAN_MSG_SET), + FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_MAC), + FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_DEFAULT_MAC), + FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_MULTICAST), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_update_vlan_vf - Update status of VLAN ID in VLAN filter table + * @hw: pointer to hardware structure + * @vid: VLAN ID to add to table + * @vsi: Reserved, should always be 0 + * @set: Indicates if this is a set or clear operation + * + * This function adds or removes the corresponding VLAN ID from the VLAN + * filter table for this VF. + **/ +static s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[4]; + + /* verify the index is not set */ + if (vsi) + return FM10K_ERR_PARAM; + + /* verify upper 4 bits of vid and length are 0 */ + if ((vid << 16 | vid) >> 28) + return FM10K_ERR_PARAM; + + /* encode set bit into the VLAN ID */ + if (!set) + vid |= FM10K_VLAN_CLEAR; + + /* generate VLAN request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN); + fm10k_tlv_attr_put_u32(msg, FM10K_MAC_VLAN_MSG_VLAN, vid); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_msg_mac_vlan_vf - Read device MAC address from mailbox message + * @hw: pointer to the HW structure + * @results: Attributes for message + * @mbx: unused mailbox data + * + * This function should determine the MAC address for the VF + **/ +s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + u8 perm_addr[ETH_ALEN]; + u16 vid; + s32 err; + + /* record MAC address requested */ + err = fm10k_tlv_attr_get_mac_vlan( + results[FM10K_MAC_VLAN_MSG_DEFAULT_MAC], + perm_addr, &vid); + if (err) + return err; + + ether_addr_copy(hw->mac.perm_addr, perm_addr); + hw->mac.default_vid = vid & (FM10K_VLAN_TABLE_VID_MAX - 1); + hw->mac.vlan_override = !!(vid & FM10K_VLAN_CLEAR); + + return 0; +} + +/** + * fm10k_read_mac_addr_vf - Read device MAC address + * @hw: pointer to the HW structure + * + * This function should determine the MAC address for the VF + **/ +static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw) +{ + u8 perm_addr[ETH_ALEN]; + u32 base_addr; + + base_addr = fm10k_read_reg(hw, FM10K_TDBAL(0)); + + /* last byte should be 0 */ + if (base_addr << 24) + return FM10K_ERR_INVALID_MAC_ADDR; + + perm_addr[3] = (u8)(base_addr >> 24); + perm_addr[4] = (u8)(base_addr >> 16); + perm_addr[5] = (u8)(base_addr >> 8); + + base_addr = fm10k_read_reg(hw, FM10K_TDBAH(0)); + + /* first byte should be all 1's */ + if ((~base_addr) >> 24) + return FM10K_ERR_INVALID_MAC_ADDR; + + perm_addr[0] = (u8)(base_addr >> 16); + perm_addr[1] = (u8)(base_addr >> 8); + perm_addr[2] = (u8)(base_addr); + + ether_addr_copy(hw->mac.perm_addr, perm_addr); + ether_addr_copy(hw->mac.addr, perm_addr); + + return 0; +} + +/** + * fm10k_update_uc_addr_vf - Update device unicast addresses + * @hw: pointer to the HW structure + * @glort: unused + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * @flags: flags field to indicate add and secure - unused + * + * This function is used to add or remove unicast MAC addresses for + * the VF. + **/ +static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, + const u8 *mac, u16 vid, bool add, u8 flags) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[7]; + + /* verify VLAN ID is valid */ + if (vid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + /* verify MAC address is valid */ + if (!is_valid_ether_addr(mac)) + return FM10K_ERR_PARAM; + + /* verify we are not locked down on the MAC address */ + if (is_valid_ether_addr(hw->mac.perm_addr) && + memcmp(hw->mac.perm_addr, mac, ETH_ALEN)) + return FM10K_ERR_PARAM; + + /* add bit to notify us if this is a set or clear operation */ + if (!add) + vid |= FM10K_VLAN_CLEAR; + + /* generate VLAN request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN); + fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_MAC, mac, vid); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_update_mc_addr_vf - Update device multicast addresses + * @hw: pointer to the HW structure + * @glort: unused + * @mac: MAC address to add/remove from table + * @vid: VLAN ID to add/remove from table + * @add: Indicates if this is an add or remove operation + * + * This function is used to add or remove multicast MAC addresses for + * the VF. + **/ +static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort, + const u8 *mac, u16 vid, bool add) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[7]; + + /* verify VLAN ID is valid */ + if (vid >= FM10K_VLAN_TABLE_VID_MAX) + return FM10K_ERR_PARAM; + + /* verify multicast address is valid */ + if (!is_multicast_ether_addr(mac)) + return FM10K_ERR_PARAM; + + /* add bit to notify us if this is a set or clear operation */ + if (!add) + vid |= FM10K_VLAN_CLEAR; + + /* generate VLAN request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN); + fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_MULTICAST, + mac, vid); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_update_int_moderator_vf - Request update of interrupt moderator list + * @hw: pointer to hardware structure + * + * This function will issue a request to the PF to rescan our MSI-X table + * and to update the interrupt moderator linked list. + **/ +static void fm10k_update_int_moderator_vf(struct fm10k_hw *hw) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[1]; + + /* generate MSI-X request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MSIX); + + /* load onto outgoing mailbox */ + mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/* This structure defines the attibutes to be parsed below */ +const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[] = { + FM10K_TLV_ATTR_BOOL(FM10K_LPORT_STATE_MSG_DISABLE), + FM10K_TLV_ATTR_U8(FM10K_LPORT_STATE_MSG_XCAST_MODE), + FM10K_TLV_ATTR_BOOL(FM10K_LPORT_STATE_MSG_READY), + FM10K_TLV_ATTR_LAST +}; + +/** + * fm10k_msg_lport_state_vf - Message handler for lport_state message from PF + * @hw: Pointer to hardware structure + * @results: pointer array containing parsed data + * @mbx: Pointer to mailbox information structure + * + * This handler is meant to capture the indication from the PF that we + * are ready to bring up the interface. + **/ +s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + hw->mac.dglort_map = !results[FM10K_LPORT_STATE_MSG_READY] ? + FM10K_DGLORTMAP_NONE : FM10K_DGLORTMAP_ZERO; + + return 0; +} + +/** + * fm10k_update_lport_state_vf - Update device state in lower device + * @hw: pointer to the HW structure + * @glort: unused + * @count: number of logical ports to enable - unused (always 1) + * @enable: boolean value indicating if this is an enable or disable request + * + * Notify the lower device of a state change. If the lower device is + * enabled we can add filters, if it is disabled all filters for this + * logical port are flushed. + **/ +static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort, + u16 count, bool enable) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[2]; + + /* reset glort mask 0 as we have to wait to be enabled */ + hw->mac.dglort_map = FM10K_DGLORTMAP_NONE; + + /* generate port state request */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); + if (!enable) + fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_DISABLE); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +/** + * fm10k_update_xcast_mode_vf - Request update of multicast mode + * @hw: pointer to hardware structure + * @glort: unused + * @mode: integer value indicating mode being requested + * + * This function will attempt to request a higher mode for the port + * so that it can enable either multicast, multicast promiscuous, or + * promiscuous mode of operation. + **/ +static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode) +{ + struct fm10k_mbx_info *mbx = &hw->mbx; + u32 msg[3]; + + if (mode > FM10K_XCAST_MODE_NONE) + return FM10K_ERR_PARAM; + /* generate message requesting to change xcast mode */ + fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); + fm10k_tlv_attr_put_u8(msg, FM10K_LPORT_STATE_MSG_XCAST_MODE, mode); + + /* load onto outgoing mailbox */ + return mbx->ops.enqueue_tx(hw, mbx, msg); +} + +const struct fm10k_tlv_attr fm10k_1588_msg_attr[] = { + FM10K_TLV_ATTR_U64(FM10K_1588_MSG_TIMESTAMP), + FM10K_TLV_ATTR_LAST +}; + +/* currently there is no shared 1588 timestamp handler */ + +/** + * fm10k_update_hw_stats_vf - Updates hardware related statistics of VF + * @hw: pointer to hardware structure + * @stats: pointer to statistics structure + * + * This function collects and aggregates per queue hardware statistics. + **/ +static void fm10k_update_hw_stats_vf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats) +{ + fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues); +} + +/** + * fm10k_rebind_hw_stats_vf - Resets base for hardware statistics of VF + * @hw: pointer to hardware structure + * @stats: pointer to the stats structure to update + * + * This function resets the base for queue hardware statistics. + **/ +static void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats) +{ + /* Unbind Queue Statistics */ + fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues); + + /* Reinitialize bases for all stats */ + fm10k_update_hw_stats_vf(hw, stats); +} + +/** + * fm10k_configure_dglort_map_vf - Configures GLORT entry and queues + * @hw: pointer to hardware structure + * @dglort: pointer to dglort configuration structure + * + * Reads the configuration structure contained in dglort_cfg and uses + * that information to then populate a DGLORTMAP/DEC entry and the queues + * to which it has been assigned. + **/ +static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw, + struct fm10k_dglort_cfg *dglort) +{ + /* verify the dglort pointer */ + if (!dglort) + return FM10K_ERR_PARAM; + + /* stub for now until we determine correct message for this */ + + return 0; +} + +/** + * fm10k_adjust_systime_vf - Adjust systime frequency + * @hw: pointer to hardware structure + * @ppb: adjustment rate in parts per billion + * + * This function takes an adjustment rate in parts per billion and will + * verify that this value is 0 as the VF cannot support adjusting the + * systime clock. + * + * If the ppb value is non-zero the return is ERR_PARAM else success + **/ +static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb) +{ + /* The VF cannot adjust the clock frequency, however it should + * already have a syntonic clock with whichever host interface is + * running as the master for the host interface clock domain so + * there should be not frequency adjustment necessary. + */ + return ppb ? FM10K_ERR_PARAM : 0; +} + +/** + * fm10k_read_systime_vf - Reads value of systime registers + * @hw: pointer to the hardware structure + * + * Function reads the content of 2 registers, combined to represent a 64 bit + * value measured in nanoseconds. In order to guarantee the value is accurate + * we check the 32 most significant bits both before and after reading the + * 32 least significant bits to verify they didn't change as we were reading + * the registers. + **/ +static u64 fm10k_read_systime_vf(struct fm10k_hw *hw) +{ + u32 systime_l, systime_h, systime_tmp; + + systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1); + + do { + systime_tmp = systime_h; + systime_l = fm10k_read_reg(hw, FM10K_VFSYSTIME); + systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1); + } while (systime_tmp != systime_h); + + return ((u64)systime_h << 32) | systime_l; +} + +static const struct fm10k_msg_data fm10k_msg_data_vf[] = { + FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), + FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf), + FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf), + FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), +}; + +static struct fm10k_mac_ops mac_ops_vf = { + .get_bus_info = &fm10k_get_bus_info_generic, + .reset_hw = &fm10k_reset_hw_vf, + .init_hw = &fm10k_init_hw_vf, + .start_hw = &fm10k_start_hw_generic, + .stop_hw = &fm10k_stop_hw_vf, + .is_slot_appropriate = &fm10k_is_slot_appropriate_vf, + .update_vlan = &fm10k_update_vlan_vf, + .read_mac_addr = &fm10k_read_mac_addr_vf, + .update_uc_addr = &fm10k_update_uc_addr_vf, + .update_mc_addr = &fm10k_update_mc_addr_vf, + .update_xcast_mode = &fm10k_update_xcast_mode_vf, + .update_int_moderator = &fm10k_update_int_moderator_vf, + .update_lport_state = &fm10k_update_lport_state_vf, + .update_hw_stats = &fm10k_update_hw_stats_vf, + .rebind_hw_stats = &fm10k_rebind_hw_stats_vf, + .configure_dglort_map = &fm10k_configure_dglort_map_vf, + .get_host_state = &fm10k_get_host_state_generic, + .adjust_systime = &fm10k_adjust_systime_vf, + .read_systime = &fm10k_read_systime_vf, +}; + +static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw) +{ + fm10k_get_invariants_generic(hw); + + return fm10k_pfvf_mbx_init(hw, &hw->mbx, fm10k_msg_data_vf, 0); +} + +struct fm10k_info fm10k_vf_info = { + .mac = fm10k_mac_vf, + .get_invariants = &fm10k_get_invariants_vf, + .mac_ops = &mac_ops_vf, +}; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h new file mode 100644 index 000000000..06a99d794 --- /dev/null +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h @@ -0,0 +1,78 @@ +/* Intel Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _FM10K_VF_H_ +#define _FM10K_VF_H_ + +#include "fm10k_type.h" +#include "fm10k_common.h" + +enum fm10k_vf_tlv_msg_id { + FM10K_VF_MSG_ID_TEST = 0, /* msg ID reserved for testing */ + FM10K_VF_MSG_ID_MSIX, + FM10K_VF_MSG_ID_MAC_VLAN, + FM10K_VF_MSG_ID_LPORT_STATE, + FM10K_VF_MSG_ID_1588, + FM10K_VF_MSG_ID_MAX, +}; + +enum fm10k_tlv_mac_vlan_attr_id { + FM10K_MAC_VLAN_MSG_VLAN, + FM10K_MAC_VLAN_MSG_SET, + FM10K_MAC_VLAN_MSG_MAC, + FM10K_MAC_VLAN_MSG_DEFAULT_MAC, + FM10K_MAC_VLAN_MSG_MULTICAST, + FM10K_MAC_VLAN_MSG_ID_MAX +}; + +enum fm10k_tlv_lport_state_attr_id { + FM10K_LPORT_STATE_MSG_DISABLE, + FM10K_LPORT_STATE_MSG_XCAST_MODE, + FM10K_LPORT_STATE_MSG_READY, + FM10K_LPORT_STATE_MSG_MAX +}; + +enum fm10k_tlv_1588_attr_id { + FM10K_1588_MSG_TIMESTAMP, + FM10K_1588_MSG_MAX +}; + +#define FM10K_VF_MSG_MSIX_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MSIX, NULL, func) + +s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); +extern const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[]; +#define FM10K_VF_MSG_MAC_VLAN_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MAC_VLAN, \ + fm10k_mac_vlan_msg_attr, func) + +s32 fm10k_msg_lport_state_vf(struct fm10k_hw *, u32 **, + struct fm10k_mbx_info *); +extern const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[]; +#define FM10K_VF_MSG_LPORT_STATE_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_LPORT_STATE, \ + fm10k_lport_state_msg_attr, func) + +extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[]; +#define FM10K_VF_MSG_1588_HANDLER(func) \ + FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func) + +extern struct fm10k_info fm10k_vf_info; +#endif /* _FM10K_VF_H */ diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile new file mode 100644 index 000000000..b4729ba57 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -0,0 +1,47 @@ +################################################################################ +# +# Intel Ethernet Controller XL710 Family Linux Driver +# Copyright(c) 2013 - 2015 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver +# + +obj-$(CONFIG_I40E) += i40e.o + +i40e-objs := i40e_main.o \ + i40e_ethtool.o \ + i40e_adminq.o \ + i40e_common.o \ + i40e_hmc.o \ + i40e_lan_hmc.o \ + i40e_nvm.o \ + i40e_debugfs.o \ + i40e_diag.o \ + i40e_txrx.o \ + i40e_ptp.o \ + i40e_virtchnl_pf.o + +i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o +i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h new file mode 100644 index 000000000..5d4730712 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -0,0 +1,759 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_H_ +#define _I40E_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "i40e_type.h" +#include "i40e_prototype.h" +#ifdef I40E_FCOE +#include "i40e_fcoe.h" +#endif +#include "i40e_virtchnl.h" +#include "i40e_virtchnl_pf.h" +#include "i40e_txrx.h" +#include "i40e_dcb.h" + +/* Useful i40e defaults */ +#define I40E_BASE_PF_SEID 16 +#define I40E_BASE_VSI_SEID 512 +#define I40E_BASE_VEB_SEID 288 +#define I40E_MAX_VEB 16 + +#define I40E_MAX_NUM_DESCRIPTORS 4096 +#define I40E_MAX_REGISTER 0x800000 +#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024) +#define I40E_DEFAULT_NUM_DESCRIPTORS 512 +#define I40E_REQ_DESCRIPTOR_MULTIPLE 32 +#define I40E_MIN_NUM_DESCRIPTORS 64 +#define I40E_MIN_MSIX 2 +#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */ +#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */ +#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */ +#define I40E_DEFAULT_QUEUES_PER_VF 4 +#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ +#define I40E_MAX_QUEUES_PER_TC 64 /* should be a power of 2 */ +#define I40E_FDIR_RING 0 +#define I40E_FDIR_RING_COUNT 32 +#ifdef I40E_FCOE +#define I40E_DEFAULT_FCOE 8 /* default number of QPs for FCoE */ +#define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */ +#endif /* I40E_FCOE */ +#define I40E_MAX_AQ_BUF_SIZE 4096 +#define I40E_AQ_LEN 256 +#define I40E_AQ_WORK_LIMIT 32 +#define I40E_MAX_USER_PRIORITY 8 +#define I40E_DEFAULT_MSG_ENABLE 4 +#define I40E_QUEUE_WAIT_RETRY_LIMIT 10 +#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9) + +/* Ethtool Private Flags */ +#define I40E_PRIV_FLAGS_NPAR_FLAG (1 << 0) + +#define I40E_NVM_VERSION_LO_SHIFT 0 +#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) +#define I40E_NVM_VERSION_HI_SHIFT 12 +#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT) + +/* The values in here are decimal coded as hex as is the case in the NVM map*/ +#define I40E_CURRENT_NVM_VERSION_HI 0x2 +#define I40E_CURRENT_NVM_VERSION_LO 0x40 + +/* magic for getting defines into strings */ +#define STRINGIFY(foo) #foo +#define XSTRINGIFY(bar) STRINGIFY(bar) + +#define I40E_RX_DESC(R, i) \ + ((ring_is_16byte_desc_enabled(R)) \ + ? (union i40e_32byte_rx_desc *) \ + (&(((union i40e_16byte_rx_desc *)((R)->desc))[i])) \ + : (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))) +#define I40E_TX_DESC(R, i) \ + (&(((struct i40e_tx_desc *)((R)->desc))[i])) +#define I40E_TX_CTXTDESC(R, i) \ + (&(((struct i40e_tx_context_desc *)((R)->desc))[i])) +#define I40E_TX_FDIRDESC(R, i) \ + (&(((struct i40e_filter_program_desc *)((R)->desc))[i])) + +/* default to trying for four seconds */ +#define I40E_TRY_LINK_TIMEOUT (4 * HZ) + +/* driver state flags */ +enum i40e_state_t { + __I40E_TESTING, + __I40E_CONFIG_BUSY, + __I40E_CONFIG_DONE, + __I40E_DOWN, + __I40E_NEEDS_RESTART, + __I40E_SERVICE_SCHED, + __I40E_ADMINQ_EVENT_PENDING, + __I40E_MDD_EVENT_PENDING, + __I40E_VFLR_EVENT_PENDING, + __I40E_RESET_RECOVERY_PENDING, + __I40E_RESET_INTR_RECEIVED, + __I40E_REINIT_REQUESTED, + __I40E_PF_RESET_REQUESTED, + __I40E_CORE_RESET_REQUESTED, + __I40E_GLOBAL_RESET_REQUESTED, + __I40E_EMP_RESET_REQUESTED, + __I40E_EMP_RESET_INTR_RECEIVED, + __I40E_FILTER_OVERFLOW_PROMISC, + __I40E_SUSPENDED, + __I40E_PTP_TX_IN_PROGRESS, + __I40E_BAD_EEPROM, + __I40E_DOWN_REQUESTED, + __I40E_FD_FLUSH_REQUESTED, + __I40E_RESET_FAILED, + __I40E_PORT_TX_SUSPENDED, + __I40E_VF_DISABLE, +}; + +enum i40e_interrupt_policy { + I40E_INTERRUPT_BEST_CASE, + I40E_INTERRUPT_MEDIUM, + I40E_INTERRUPT_LOWEST +}; + +struct i40e_lump_tracking { + u16 num_entries; + u16 search_hint; + u16 list[0]; +#define I40E_PILE_VALID_BIT 0x8000 +}; + +#define I40E_DEFAULT_ATR_SAMPLE_RATE 20 +#define I40E_FDIR_MAX_RAW_PACKET_SIZE 512 +#define I40E_FDIR_BUFFER_FULL_MARGIN 10 +#define I40E_FDIR_BUFFER_HEAD_ROOM 32 +#define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4) + +#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4) + +enum i40e_fd_stat_idx { + I40E_FD_STAT_ATR, + I40E_FD_STAT_SB, + I40E_FD_STAT_PF_COUNT +}; +#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT) +#define I40E_FD_ATR_STAT_IDX(pf_id) \ + (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR) +#define I40E_FD_SB_STAT_IDX(pf_id) \ + (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB) + +struct i40e_fdir_filter { + struct hlist_node fdir_node; + /* filter ipnut set */ + u8 flow_type; + u8 ip4_proto; + /* TX packet view of src and dst */ + __be32 dst_ip[4]; + __be32 src_ip[4]; + __be16 src_port; + __be16 dst_port; + __be32 sctp_v_tag; + /* filter control */ + u16 q_index; + u8 flex_off; + u8 pctype; + u16 dest_vsi; + u8 dest_ctl; + u8 fd_status; + u16 cnt_index; + u32 fd_id; +}; + +#define I40E_ETH_P_LLDP 0x88cc + +#define I40E_DCB_PRIO_TYPE_STRICT 0 +#define I40E_DCB_PRIO_TYPE_ETS 1 +#define I40E_DCB_STRICT_PRIO_CREDITS 127 +#define I40E_MAX_USER_PRIORITY 8 +/* DCB per TC information data structure */ +struct i40e_tc_info { + u16 qoffset; /* Queue offset from base queue */ + u16 qcount; /* Total Queues */ + u8 netdev_tc; /* Netdev TC index if netdev associated */ +}; + +/* TC configuration data structure */ +struct i40e_tc_configuration { + u8 numtc; /* Total number of enabled TCs */ + u8 enabled_tc; /* TC map */ + struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS]; +}; + +/* struct that defines the Ethernet device */ +struct i40e_pf { + struct pci_dev *pdev; + struct i40e_hw hw; + unsigned long state; + unsigned long link_check_timeout; + struct msix_entry *msix_entries; + bool fc_autoneg_status; + + u16 eeprom_version; + u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */ + u16 num_vmdq_qps; /* num queue pairs per vmdq pool */ + u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ + u16 num_req_vfs; /* num VFs requested for this VF */ + u16 num_vf_qps; /* num queue pairs per VF */ +#ifdef I40E_FCOE + u16 num_fcoe_qps; /* num fcoe queues this PF has set up */ + u16 num_fcoe_msix; /* num queue vectors per fcoe pool */ +#endif /* I40E_FCOE */ + u16 num_lan_qps; /* num lan queues this PF has set up */ + u16 num_lan_msix; /* num queue vectors for the base PF vsi */ + int queues_left; /* queues left unclaimed */ + u16 rss_size; /* num queues in the RSS array */ + u16 rss_size_max; /* HW defined max RSS queues */ + u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */ + u16 num_alloc_vsi; /* num VSIs this driver supports */ + u8 atr_sample_rate; + bool wol_en; + + struct hlist_head fdir_filter_list; + u16 fdir_pf_active_filters; + u16 fd_sb_cnt_idx; + u16 fd_atr_cnt_idx; + unsigned long fd_flush_timestamp; + u32 fd_flush_cnt; + u32 fd_add_err; + u32 fd_atr_cnt; + u32 fd_tcp_rule; + +#ifdef CONFIG_I40E_VXLAN + __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; + u16 pending_vxlan_bitmap; + +#endif + enum i40e_interrupt_policy int_policy; + u16 rx_itr_default; + u16 tx_itr_default; + u32 msg_enable; + char int_name[I40E_INT_NAME_STR_LEN]; + u16 adminq_work_limit; /* num of admin receive queue desc to process */ + unsigned long service_timer_period; + unsigned long service_timer_previous; + struct timer_list service_timer; + struct work_struct service_task; + + u64 flags; +#define I40E_FLAG_RX_CSUM_ENABLED (u64)(1 << 1) +#define I40E_FLAG_MSI_ENABLED (u64)(1 << 2) +#define I40E_FLAG_MSIX_ENABLED (u64)(1 << 3) +#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4) +#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5) +#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6) +#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7) +#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8) +#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9) +#ifdef I40E_FCOE +#define I40E_FLAG_FCOE_ENABLED (u64)(1 << 11) +#endif /* I40E_FCOE */ +#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12) +#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13) +#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14) +#define I40E_FLAG_FILTER_SYNC (u64)(1 << 15) +#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 17) +#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 18) +#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 19) +#define I40E_FLAG_DCB_ENABLED (u64)(1 << 20) +#define I40E_FLAG_FD_SB_ENABLED (u64)(1 << 21) +#define I40E_FLAG_FD_ATR_ENABLED (u64)(1 << 22) +#define I40E_FLAG_PTP (u64)(1 << 25) +#define I40E_FLAG_MFP_ENABLED (u64)(1 << 26) +#ifdef CONFIG_I40E_VXLAN +#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27) +#endif +#define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28) +#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29) +#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) + + /* tracks features that get auto disabled by errors */ + u64 auto_disable_flags; + +#ifdef I40E_FCOE + struct i40e_fcoe fcoe; + +#endif /* I40E_FCOE */ + bool stat_offsets_loaded; + struct i40e_hw_port_stats stats; + struct i40e_hw_port_stats stats_offsets; + u32 tx_timeout_count; + u32 tx_timeout_recovery_level; + unsigned long tx_timeout_last_recovery; + u32 tx_sluggish_count; + u32 hw_csum_rx_error; + u32 led_status; + u16 corer_count; /* Core reset count */ + u16 globr_count; /* Global reset count */ + u16 empr_count; /* EMP reset count */ + u16 pfr_count; /* PF reset count */ + u16 sw_int_count; /* SW interrupt count */ + + struct mutex switch_mutex; + u16 lan_vsi; /* our default LAN VSI */ + u16 lan_veb; /* initial relay, if exists */ +#define I40E_NO_VEB 0xffff +#define I40E_NO_VSI 0xffff + u16 next_vsi; /* Next unallocated VSI - 0-based! */ + struct i40e_vsi **vsi; + struct i40e_veb *veb[I40E_MAX_VEB]; + + struct i40e_lump_tracking *qp_pile; + struct i40e_lump_tracking *irq_pile; + + /* switch config info */ + u16 pf_seid; + u16 main_vsi_seid; + u16 mac_seid; + struct kobject *switch_kobj; +#ifdef CONFIG_DEBUG_FS + struct dentry *i40e_dbg_pf; +#endif /* CONFIG_DEBUG_FS */ + + u16 instance; /* A unique number per i40e_pf instance in the system */ + + /* sr-iov config info */ + struct i40e_vf *vf; + int num_alloc_vfs; /* actual number of VFs allocated */ + u32 vf_aq_requests; + + /* DCBx/DCBNL capability for PF that indicates + * whether DCBx is managed by firmware or host + * based agent (LLDPAD). Also, indicates what + * flavor of DCBx protocol (IEEE/CEE) is supported + * by the device. For now we're supporting IEEE + * mode only. + */ + u16 dcbx_cap; + + u32 fcoe_hmc_filt_num; + u32 fcoe_hmc_cntx_num; + struct i40e_filter_control_settings filter_settings; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long last_rx_ptp_check; + spinlock_t tmreg_lock; /* Used to protect the device time registers. */ + u64 ptp_base_adj; + u32 tx_hwtstamp_timeouts; + u32 rx_hwtstamp_cleared; + bool ptp_tx; + bool ptp_rx; + u16 rss_table_size; + /* These are only valid in NPAR modes */ + u32 npar_max_bw; + u32 npar_min_bw; +}; + +struct i40e_mac_filter { + struct list_head list; + u8 macaddr[ETH_ALEN]; +#define I40E_VLAN_ANY -1 + s16 vlan; + u8 counter; /* number of instances of this filter */ + bool is_vf; /* filter belongs to a VF */ + bool is_netdev; /* filter belongs to a netdev */ + bool changed; /* filter needs to be sync'd to the HW */ + bool is_laa; /* filter is a Locally Administered Address */ +}; + +struct i40e_veb { + struct i40e_pf *pf; + u16 idx; + u16 veb_idx; /* index of VEB parent */ + u16 seid; + u16 uplink_seid; + u16 stats_idx; /* index of VEB parent */ + u8 enabled_tc; + u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */ + u16 flags; + u16 bw_limit; + u8 bw_max_quanta; + bool is_abs_credits; + u8 bw_tc_share_credits[I40E_MAX_TRAFFIC_CLASS]; + u16 bw_tc_limit_credits[I40E_MAX_TRAFFIC_CLASS]; + u8 bw_tc_max_quanta[I40E_MAX_TRAFFIC_CLASS]; + struct kobject *kobj; + bool stat_offsets_loaded; + struct i40e_eth_stats stats; + struct i40e_eth_stats stats_offsets; +}; + +/* struct that defines a VSI, associated with a dev */ +struct i40e_vsi { + struct net_device *netdev; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + bool netdev_registered; + bool stat_offsets_loaded; + + u32 current_netdev_flags; + unsigned long state; +#define I40E_VSI_FLAG_FILTER_CHANGED (1<<0) +#define I40E_VSI_FLAG_VEB_OWNER (1<<1) + unsigned long flags; + + struct list_head mac_filter_list; + + /* VSI stats */ + struct rtnl_link_stats64 net_stats; + struct rtnl_link_stats64 net_stats_offsets; + struct i40e_eth_stats eth_stats; + struct i40e_eth_stats eth_stats_offsets; +#ifdef I40E_FCOE + struct i40e_fcoe_stats fcoe_stats; + struct i40e_fcoe_stats fcoe_stats_offsets; + bool fcoe_stat_offsets_loaded; +#endif + u32 tx_restart; + u32 tx_busy; + u32 rx_buf_failed; + u32 rx_page_failed; + + /* These are containers of ring pointers, allocated at run-time */ + struct i40e_ring **rx_rings; + struct i40e_ring **tx_rings; + + u16 work_limit; + /* high bit set means dynamic, use accessor routines to read/write. + * hardware only supports 2us resolution for the ITR registers. + * these values always store the USER setting, and must be converted + * before programming to a register. + */ + u16 rx_itr_setting; + u16 tx_itr_setting; + + u16 rss_table_size; + u16 rss_size; + + u16 max_frame; + u16 rx_hdr_len; + u16 rx_buf_len; + u8 dtype; + + /* List of q_vectors allocated to this VSI */ + struct i40e_q_vector **q_vectors; + int num_q_vectors; + int base_vector; + bool irqs_ready; + + u16 seid; /* HW index of this VSI (absolute index) */ + u16 id; /* VSI number */ + u16 uplink_seid; + + u16 base_queue; /* vsi's first queue in hw array */ + u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */ + u16 req_queue_pairs; /* User requested queue pairs */ + u16 num_queue_pairs; /* Used tx and rx pairs */ + u16 num_desc; + enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */ + u16 vf_id; /* Virtual function ID for SRIOV VSIs */ + + struct i40e_tc_configuration tc_config; + struct i40e_aqc_vsi_properties_data info; + + /* VSI BW limit (absolute across all TCs) */ + u16 bw_limit; /* VSI BW Limit (0 = disabled) */ + u8 bw_max_quanta; /* Max Quanta when BW limit is enabled */ + + /* Relative TC credits across VSIs */ + u8 bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS]; + /* TC BW limit credits within VSI */ + u16 bw_ets_limit_credits[I40E_MAX_TRAFFIC_CLASS]; + /* TC BW limit max quanta within VSI */ + u8 bw_ets_max_quanta[I40E_MAX_TRAFFIC_CLASS]; + + struct i40e_pf *back; /* Backreference to associated PF */ + u16 idx; /* index in pf->vsi[] */ + u16 veb_idx; /* index of VEB parent */ + struct kobject *kobj; /* sysfs object */ + + /* VSI specific handlers */ + irqreturn_t (*irq_handler)(int irq, void *data); + + /* current rxnfc data */ + struct ethtool_rxnfc rxnfc; /* current rss hash opts */ +} ____cacheline_internodealigned_in_smp; + +struct i40e_netdev_priv { + struct i40e_vsi *vsi; +}; + +/* struct that defines an interrupt vector */ +struct i40e_q_vector { + struct i40e_vsi *vsi; + + u16 v_idx; /* index in the vsi->q_vector array. */ + u16 reg_idx; /* register index of the interrupt */ + + struct napi_struct napi; + + struct i40e_ring_container rx; + struct i40e_ring_container tx; + + u8 num_ringpairs; /* total number of ring pairs in vector */ + + cpumask_t affinity_mask; + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[I40E_INT_NAME_STR_LEN]; +} ____cacheline_internodealigned_in_smp; + +/* lan device */ +struct i40e_device { + struct list_head list; + struct i40e_pf *pf; +}; + +/** + * i40e_fw_version_str - format the FW and NVM version strings + * @hw: ptr to the hardware info + **/ +static inline char *i40e_fw_version_str(struct i40e_hw *hw) +{ + static char buf[32]; + + snprintf(buf, sizeof(buf), + "f%d.%d.%05d a%d.%d n%x.%02x e%x", + hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, + hw->aq.api_maj_ver, hw->aq.api_min_ver, + (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >> + I40E_NVM_VERSION_HI_SHIFT, + (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >> + I40E_NVM_VERSION_LO_SHIFT, + (hw->nvm.eetrack & 0xffffff)); + + return buf; +} + +/** + * i40e_netdev_to_pf: Retrieve the PF struct for given netdev + * @netdev: the corresponding netdev + * + * Return the PF struct for the given netdev + **/ +static inline struct i40e_pf *i40e_netdev_to_pf(struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + + return vsi->back; +} + +static inline void i40e_vsi_setup_irqhandler(struct i40e_vsi *vsi, + irqreturn_t (*irq_handler)(int, void *)) +{ + vsi->irq_handler = irq_handler; +} + +/** + * i40e_rx_is_programming_status - check for programming status descriptor + * @qw: the first quad word of the program status descriptor + * + * The value of in the descriptor length field indicate if this + * is a programming status descriptor for flow director or FCoE + * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise + * it is a packet descriptor. + **/ +static inline bool i40e_rx_is_programming_status(u64 qw) +{ + return I40E_RX_PROG_STATUS_DESC_LENGTH == + (qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT); +} + +/** + * i40e_get_fd_cnt_all - get the total FD filter space available + * @pf: pointer to the PF struct + **/ +static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf) +{ + return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count; +} + +/* needed by i40e_ethtool.c */ +int i40e_up(struct i40e_vsi *vsi); +void i40e_down(struct i40e_vsi *vsi); +extern const char i40e_driver_name[]; +extern const char i40e_driver_version_str[]; +void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags); +void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags); +struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id); +void i40e_update_stats(struct i40e_vsi *vsi); +void i40e_update_eth_stats(struct i40e_vsi *vsi); +struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi); +int i40e_fetch_switch_configuration(struct i40e_pf *pf, + bool printconfig); + +int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, + struct i40e_pf *pf, bool add); +int i40e_add_del_fdir(struct i40e_vsi *vsi, + struct i40e_fdir_filter *input, bool add); +void i40e_fdir_check_and_reenable(struct i40e_pf *pf); +u32 i40e_get_current_fd_count(struct i40e_pf *pf); +u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf); +u32 i40e_get_current_atr_cnt(struct i40e_pf *pf); +u32 i40e_get_global_fd_count(struct i40e_pf *pf); +bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features); +void i40e_set_ethtool_ops(struct net_device *netdev); +struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, + u8 *macaddr, s16 vlan, + bool is_vf, bool is_netdev); +void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, + bool is_vf, bool is_netdev); +int i40e_sync_vsi_filters(struct i40e_vsi *vsi); +struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, + u16 uplink, u32 param1); +int i40e_vsi_release(struct i40e_vsi *vsi); +struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type, + struct i40e_vsi *start_vsi); +#ifdef I40E_FCOE +void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, + struct i40e_vsi_context *ctxt, + u8 enabled_tc, bool is_add); +#endif +int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable); +int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count); +struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid, + u16 downlink_seid, u8 enabled_tc); +void i40e_veb_release(struct i40e_veb *veb); + +int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc); +i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid); +void i40e_vsi_remove_pvid(struct i40e_vsi *vsi); +void i40e_vsi_reset_stats(struct i40e_vsi *vsi); +void i40e_pf_reset_stats(struct i40e_pf *pf); +#ifdef CONFIG_DEBUG_FS +void i40e_dbg_pf_init(struct i40e_pf *pf); +void i40e_dbg_pf_exit(struct i40e_pf *pf); +void i40e_dbg_init(void); +void i40e_dbg_exit(void); +#else +static inline void i40e_dbg_pf_init(struct i40e_pf *pf) {} +static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {} +static inline void i40e_dbg_init(void) {} +static inline void i40e_dbg_exit(void) {} +#endif /* CONFIG_DEBUG_FS*/ +void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector); +void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector); +void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); +void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf); +#ifdef I40E_FCOE +struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( + struct net_device *netdev, + struct rtnl_link_stats64 *storage); +int i40e_set_mac(struct net_device *netdev, void *p); +void i40e_set_rx_mode(struct net_device *netdev); +#endif +int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +#ifdef I40E_FCOE +void i40e_tx_timeout(struct net_device *netdev); +int i40e_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid); +int i40e_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid); +#endif +int i40e_open(struct net_device *netdev); +int i40e_vsi_open(struct i40e_vsi *vsi); +void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); +int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); +int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid); +struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, + bool is_vf, bool is_netdev); +bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); +struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, + bool is_vf, bool is_netdev); +#ifdef I40E_FCOE +int i40e_close(struct net_device *netdev); +int i40e_setup_tc(struct net_device *netdev, u8 tc); +void i40e_netpoll(struct net_device *netdev); +int i40e_fcoe_enable(struct net_device *netdev); +int i40e_fcoe_disable(struct net_device *netdev); +int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt); +u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf); +void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi); +void i40e_fcoe_vsi_setup(struct i40e_pf *pf); +int i40e_init_pf_fcoe(struct i40e_pf *pf); +int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi); +void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi); +int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb); +void i40e_fcoe_handle_status(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, u8 prog_id); +#endif /* I40E_FCOE */ +void i40e_vlan_stripping_enable(struct i40e_vsi *vsi); +#ifdef CONFIG_I40E_DCB +void i40e_dcbnl_flush_apps(struct i40e_pf *pf, + struct i40e_dcbx_config *old_cfg, + struct i40e_dcbx_config *new_cfg); +void i40e_dcbnl_set_all(struct i40e_vsi *vsi); +void i40e_dcbnl_setup(struct i40e_vsi *vsi); +bool i40e_dcb_need_reconfig(struct i40e_pf *pf, + struct i40e_dcbx_config *old_cfg, + struct i40e_dcbx_config *new_cfg); +#endif /* CONFIG_I40E_DCB */ +void i40e_ptp_rx_hang(struct i40e_vsi *vsi); +void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf); +void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index); +void i40e_ptp_set_increment(struct i40e_pf *pf); +int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr); +int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr); +void i40e_ptp_init(struct i40e_pf *pf); +void i40e_ptp_stop(struct i40e_pf *pf); +int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi); +i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf); +i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf); +i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf); +#endif /* _I40E_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c new file mode 100644 index 000000000..3e0d20037 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -0,0 +1,1034 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e_status.h" +#include "i40e_type.h" +#include "i40e_register.h" +#include "i40e_adminq.h" +#include "i40e_prototype.h" + +static void i40e_resume_aq(struct i40e_hw *hw); + +/** + * i40e_is_nvm_update_op - return true if this is an NVM update operation + * @desc: API request descriptor + **/ +static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) +{ + return (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_erase)) || + (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_update)); +} + +/** + * i40e_adminq_init_regs - Initialize AdminQ registers + * @hw: pointer to the hardware structure + * + * This assumes the alloc_asq and alloc_arq functions have already been called + **/ +static void i40e_adminq_init_regs(struct i40e_hw *hw) +{ + /* set head and tail registers in our local struct */ + if (i40e_is_vf(hw)) { + hw->aq.asq.tail = I40E_VF_ATQT1; + hw->aq.asq.head = I40E_VF_ATQH1; + hw->aq.asq.len = I40E_VF_ATQLEN1; + hw->aq.asq.bal = I40E_VF_ATQBAL1; + hw->aq.asq.bah = I40E_VF_ATQBAH1; + hw->aq.arq.tail = I40E_VF_ARQT1; + hw->aq.arq.head = I40E_VF_ARQH1; + hw->aq.arq.len = I40E_VF_ARQLEN1; + hw->aq.arq.bal = I40E_VF_ARQBAL1; + hw->aq.arq.bah = I40E_VF_ARQBAH1; + } else { + hw->aq.asq.tail = I40E_PF_ATQT; + hw->aq.asq.head = I40E_PF_ATQH; + hw->aq.asq.len = I40E_PF_ATQLEN; + hw->aq.asq.bal = I40E_PF_ATQBAL; + hw->aq.asq.bah = I40E_PF_ATQBAH; + hw->aq.arq.tail = I40E_PF_ARQT; + hw->aq.arq.head = I40E_PF_ARQH; + hw->aq.arq.len = I40E_PF_ARQLEN; + hw->aq.arq.bal = I40E_PF_ARQBAL; + hw->aq.arq.bah = I40E_PF_ARQBAH; + } +} + +/** + * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings + * @hw: pointer to the hardware structure + **/ +static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) +{ + i40e_status ret_code; + + ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, + i40e_mem_atq_ring, + (hw->aq.num_asq_entries * + sizeof(struct i40e_aq_desc)), + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + return ret_code; + + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, + (hw->aq.num_asq_entries * + sizeof(struct i40e_asq_cmd_details))); + if (ret_code) { + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + return ret_code; + } + + return ret_code; +} + +/** + * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings + * @hw: pointer to the hardware structure + **/ +static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) +{ + i40e_status ret_code; + + ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, + i40e_mem_arq_ring, + (hw->aq.num_arq_entries * + sizeof(struct i40e_aq_desc)), + I40E_ADMINQ_DESC_ALIGNMENT); + + return ret_code; +} + +/** + * i40e_free_adminq_asq - Free Admin Queue send rings + * @hw: pointer to the hardware structure + * + * This assumes the posted send buffers have already been cleaned + * and de-allocated + **/ +static void i40e_free_adminq_asq(struct i40e_hw *hw) +{ + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); +} + +/** + * i40e_free_adminq_arq - Free Admin Queue receive rings + * @hw: pointer to the hardware structure + * + * This assumes the posted receive buffers have already been cleaned + * and de-allocated + **/ +static void i40e_free_adminq_arq(struct i40e_hw *hw) +{ + i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); +} + +/** + * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue + * @hw: pointer to the hardware structure + **/ +static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) +{ + i40e_status ret_code; + struct i40e_aq_desc *desc; + struct i40e_dma_mem *bi; + int i; + + /* We'll be allocating the buffer info memory first, then we can + * allocate the mapped buffers for the event processing + */ + + /* buffer_info structures do not need alignment */ + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, + (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); + if (ret_code) + goto alloc_arq_bufs; + hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; + + /* allocate the mapped buffers */ + for (i = 0; i < hw->aq.num_arq_entries; i++) { + bi = &hw->aq.arq.r.arq_bi[i]; + ret_code = i40e_allocate_dma_mem(hw, bi, + i40e_mem_arq_buf, + hw->aq.arq_buf_size, + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + goto unwind_alloc_arq_bufs; + + /* now configure the descriptors for use */ + desc = I40E_ADMINQ_DESC(hw->aq.arq, i); + + desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); + if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) + desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); + desc->opcode = 0; + /* This is in accordance with Admin queue design, there is no + * register for buffer size configuration + */ + desc->datalen = cpu_to_le16((u16)bi->size); + desc->retval = 0; + desc->cookie_high = 0; + desc->cookie_low = 0; + desc->params.external.addr_high = + cpu_to_le32(upper_32_bits(bi->pa)); + desc->params.external.addr_low = + cpu_to_le32(lower_32_bits(bi->pa)); + desc->params.external.param0 = 0; + desc->params.external.param1 = 0; + } + +alloc_arq_bufs: + return ret_code; + +unwind_alloc_arq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) + i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); + + return ret_code; +} + +/** + * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue + * @hw: pointer to the hardware structure + **/ +static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw) +{ + i40e_status ret_code; + struct i40e_dma_mem *bi; + int i; + + /* No mapped memory needed yet, just the buffer info structures */ + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, + (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); + if (ret_code) + goto alloc_asq_bufs; + hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; + + /* allocate the mapped buffers */ + for (i = 0; i < hw->aq.num_asq_entries; i++) { + bi = &hw->aq.asq.r.asq_bi[i]; + ret_code = i40e_allocate_dma_mem(hw, bi, + i40e_mem_asq_buf, + hw->aq.asq_buf_size, + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + goto unwind_alloc_asq_bufs; + } +alloc_asq_bufs: + return ret_code; + +unwind_alloc_asq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) + i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); + + return ret_code; +} + +/** + * i40e_free_arq_bufs - Free receive queue buffer info elements + * @hw: pointer to the hardware structure + **/ +static void i40e_free_arq_bufs(struct i40e_hw *hw) +{ + int i; + + /* free descriptors */ + for (i = 0; i < hw->aq.num_arq_entries; i++) + i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + + /* free the descriptor memory */ + i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); + + /* free the dma header */ + i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); +} + +/** + * i40e_free_asq_bufs - Free send queue buffer info elements + * @hw: pointer to the hardware structure + **/ +static void i40e_free_asq_bufs(struct i40e_hw *hw) +{ + int i; + + /* only unmap if the address is non-NULL */ + for (i = 0; i < hw->aq.num_asq_entries; i++) + if (hw->aq.asq.r.asq_bi[i].pa) + i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + + /* free the buffer info list */ + i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); + + /* free the descriptor memory */ + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + + /* free the dma header */ + i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); +} + +/** + * i40e_config_asq_regs - configure ASQ registers + * @hw: pointer to the hardware structure + * + * Configure base address and length registers for the transmit queue + **/ +static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + u32 reg = 0; + + /* Clear Head and Tail */ + wr32(hw, hw->aq.asq.head, 0); + wr32(hw, hw->aq.asq.tail, 0); + + /* set starting point */ + wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + I40E_PF_ATQLEN_ATQENABLE_MASK)); + wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); + + /* Check one register to verify that config was applied */ + reg = rd32(hw, hw->aq.asq.bal); + if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) + ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + + return ret_code; +} + +/** + * i40e_config_arq_regs - ARQ register configuration + * @hw: pointer to the hardware structure + * + * Configure base address and length registers for the receive (event queue) + **/ +static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + u32 reg = 0; + + /* Clear Head and Tail */ + wr32(hw, hw->aq.arq.head, 0); + wr32(hw, hw->aq.arq.tail, 0); + + /* set starting point */ + wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + I40E_PF_ARQLEN_ARQENABLE_MASK)); + wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); + + /* Update tail in the HW to post pre-allocated buffers */ + wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); + + /* Check one register to verify that config was applied */ + reg = rd32(hw, hw->aq.arq.bal); + if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) + ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + + return ret_code; +} + +/** + * i40e_init_asq - main initialization routine for ASQ + * @hw: pointer to the hardware structure + * + * This is the main initialization routine for the Admin Send Queue + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.arq_buf_size + * + * Do *NOT* hold the lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + **/ +static i40e_status i40e_init_asq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (hw->aq.asq.count > 0) { + /* queue already initialized */ + ret_code = I40E_ERR_NOT_READY; + goto init_adminq_exit; + } + + /* verify input for valid configuration */ + if ((hw->aq.num_asq_entries == 0) || + (hw->aq.asq_buf_size == 0)) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + + hw->aq.asq.next_to_use = 0; + hw->aq.asq.next_to_clean = 0; + hw->aq.asq.count = hw->aq.num_asq_entries; + + /* allocate the ring memory */ + ret_code = i40e_alloc_adminq_asq_ring(hw); + if (ret_code) + goto init_adminq_exit; + + /* allocate buffers in the rings */ + ret_code = i40e_alloc_asq_bufs(hw); + if (ret_code) + goto init_adminq_free_rings; + + /* initialize base registers */ + ret_code = i40e_config_asq_regs(hw); + if (ret_code) + goto init_adminq_free_rings; + + /* success! */ + goto init_adminq_exit; + +init_adminq_free_rings: + i40e_free_adminq_asq(hw); + +init_adminq_exit: + return ret_code; +} + +/** + * i40e_init_arq - initialize ARQ + * @hw: pointer to the hardware structure + * + * The main initialization routine for the Admin Receive (Event) Queue. + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.arq_buf_size + * + * Do *NOT* hold the lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + **/ +static i40e_status i40e_init_arq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (hw->aq.arq.count > 0) { + /* queue already initialized */ + ret_code = I40E_ERR_NOT_READY; + goto init_adminq_exit; + } + + /* verify input for valid configuration */ + if ((hw->aq.num_arq_entries == 0) || + (hw->aq.arq_buf_size == 0)) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + + hw->aq.arq.next_to_use = 0; + hw->aq.arq.next_to_clean = 0; + hw->aq.arq.count = hw->aq.num_arq_entries; + + /* allocate the ring memory */ + ret_code = i40e_alloc_adminq_arq_ring(hw); + if (ret_code) + goto init_adminq_exit; + + /* allocate buffers in the rings */ + ret_code = i40e_alloc_arq_bufs(hw); + if (ret_code) + goto init_adminq_free_rings; + + /* initialize base registers */ + ret_code = i40e_config_arq_regs(hw); + if (ret_code) + goto init_adminq_free_rings; + + /* success! */ + goto init_adminq_exit; + +init_adminq_free_rings: + i40e_free_adminq_arq(hw); + +init_adminq_exit: + return ret_code; +} + +/** + * i40e_shutdown_asq - shutdown the ASQ + * @hw: pointer to the hardware structure + * + * The main shutdown routine for the Admin Send Queue + **/ +static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (hw->aq.asq.count == 0) + return I40E_ERR_NOT_READY; + + /* Stop firmware AdminQ processing */ + wr32(hw, hw->aq.asq.head, 0); + wr32(hw, hw->aq.asq.tail, 0); + wr32(hw, hw->aq.asq.len, 0); + wr32(hw, hw->aq.asq.bal, 0); + wr32(hw, hw->aq.asq.bah, 0); + + /* make sure lock is available */ + mutex_lock(&hw->aq.asq_mutex); + + hw->aq.asq.count = 0; /* to indicate uninitialized queue */ + + /* free ring buffers */ + i40e_free_asq_bufs(hw); + + mutex_unlock(&hw->aq.asq_mutex); + + return ret_code; +} + +/** + * i40e_shutdown_arq - shutdown ARQ + * @hw: pointer to the hardware structure + * + * The main shutdown routine for the Admin Receive Queue + **/ +static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (hw->aq.arq.count == 0) + return I40E_ERR_NOT_READY; + + /* Stop firmware AdminQ processing */ + wr32(hw, hw->aq.arq.head, 0); + wr32(hw, hw->aq.arq.tail, 0); + wr32(hw, hw->aq.arq.len, 0); + wr32(hw, hw->aq.arq.bal, 0); + wr32(hw, hw->aq.arq.bah, 0); + + /* make sure lock is available */ + mutex_lock(&hw->aq.arq_mutex); + + hw->aq.arq.count = 0; /* to indicate uninitialized queue */ + + /* free ring buffers */ + i40e_free_arq_bufs(hw); + + mutex_unlock(&hw->aq.arq_mutex); + + return ret_code; +} + +/** + * i40e_init_adminq - main initialization routine for Admin Queue + * @hw: pointer to the hardware structure + * + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.num_arq_entries + * - hw->aq.arq_buf_size + * - hw->aq.asq_buf_size + **/ +i40e_status i40e_init_adminq(struct i40e_hw *hw) +{ + i40e_status ret_code; + u16 eetrack_lo, eetrack_hi; + int retry = 0; + + /* verify input for valid configuration */ + if ((hw->aq.num_arq_entries == 0) || + (hw->aq.num_asq_entries == 0) || + (hw->aq.arq_buf_size == 0) || + (hw->aq.asq_buf_size == 0)) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + + /* initialize locks */ + mutex_init(&hw->aq.asq_mutex); + mutex_init(&hw->aq.arq_mutex); + + /* Set up register offsets */ + i40e_adminq_init_regs(hw); + + /* setup ASQ command write back timeout */ + hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; + + /* allocate the ASQ */ + ret_code = i40e_init_asq(hw); + if (ret_code) + goto init_adminq_destroy_locks; + + /* allocate the ARQ */ + ret_code = i40e_init_arq(hw); + if (ret_code) + goto init_adminq_free_asq; + + /* There are some cases where the firmware may not be quite ready + * for AdminQ operations, so we retry the AdminQ setup a few times + * if we see timeouts in this first AQ call. + */ + do { + ret_code = i40e_aq_get_firmware_version(hw, + &hw->aq.fw_maj_ver, + &hw->aq.fw_min_ver, + &hw->aq.fw_build, + &hw->aq.api_maj_ver, + &hw->aq.api_min_ver, + NULL); + if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT) + break; + retry++; + msleep(100); + i40e_resume_aq(hw); + } while (retry < 10); + if (ret_code != I40E_SUCCESS) + goto init_adminq_free_arq; + + /* get the NVM version info */ + i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION, + &hw->nvm.version); + i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); + i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); + hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; + + if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { + ret_code = I40E_ERR_FIRMWARE_API_VERSION; + goto init_adminq_free_arq; + } + + /* pre-emptive resource lock release */ + i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); + hw->aq.nvm_release_on_done = false; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + + ret_code = i40e_aq_set_hmc_resource_profile(hw, + I40E_HMC_PROFILE_DEFAULT, + 0, + NULL); + ret_code = 0; + + /* success! */ + goto init_adminq_exit; + +init_adminq_free_arq: + i40e_shutdown_arq(hw); +init_adminq_free_asq: + i40e_shutdown_asq(hw); +init_adminq_destroy_locks: + +init_adminq_exit: + return ret_code; +} + +/** + * i40e_shutdown_adminq - shutdown routine for the Admin Queue + * @hw: pointer to the hardware structure + **/ +i40e_status i40e_shutdown_adminq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (i40e_check_asq_alive(hw)) + i40e_aq_queue_shutdown(hw, true); + + i40e_shutdown_asq(hw); + i40e_shutdown_arq(hw); + + /* destroy the locks */ + + return ret_code; +} + +/** + * i40e_clean_asq - cleans Admin send queue + * @hw: pointer to the hardware structure + * + * returns the number of free desc + **/ +static u16 i40e_clean_asq(struct i40e_hw *hw) +{ + struct i40e_adminq_ring *asq = &(hw->aq.asq); + struct i40e_asq_cmd_details *details; + u16 ntc = asq->next_to_clean; + struct i40e_aq_desc desc_cb; + struct i40e_aq_desc *desc; + + desc = I40E_ADMINQ_DESC(*asq, ntc); + details = I40E_ADMINQ_DETAILS(*asq, ntc); + while (rd32(hw, hw->aq.asq.head) != ntc) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "%s: ntc %d head %d.\n", __func__, ntc, + rd32(hw, hw->aq.asq.head)); + + if (details->callback) { + I40E_ADMINQ_CALLBACK cb_func = + (I40E_ADMINQ_CALLBACK)details->callback; + desc_cb = *desc; + cb_func(hw, &desc_cb); + } + memset(desc, 0, sizeof(*desc)); + memset(details, 0, sizeof(*details)); + ntc++; + if (ntc == asq->count) + ntc = 0; + desc = I40E_ADMINQ_DESC(*asq, ntc); + details = I40E_ADMINQ_DETAILS(*asq, ntc); + } + + asq->next_to_clean = ntc; + + return I40E_DESC_UNUSED(asq); +} + +/** + * i40e_asq_done - check if FW has processed the Admin Send Queue + * @hw: pointer to the hw struct + * + * Returns true if the firmware has processed all descriptors on the + * admin send queue. Returns false if there are still requests pending. + **/ +static bool i40e_asq_done(struct i40e_hw *hw) +{ + /* AQ designers suggest use of head for better + * timing reliability than DD bit + */ + return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; + +} + +/** + * i40e_asq_send_command - send command to Admin Queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor describing the command (non DMA mem) + * @buff: buffer to use for indirect commands + * @buff_size: size of buffer for indirect commands + * @cmd_details: pointer to command details structure + * + * This is the main send command driver routine for the Admin Queue send + * queue. It runs the queue, cleans the queue, etc + **/ +i40e_status i40e_asq_send_command(struct i40e_hw *hw, + struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + i40e_status status = 0; + struct i40e_dma_mem *dma_buff = NULL; + struct i40e_asq_cmd_details *details; + struct i40e_aq_desc *desc_on_ring; + bool cmd_completed = false; + u16 retval = 0; + u32 val = 0; + + val = rd32(hw, hw->aq.asq.head); + if (val >= hw->aq.num_asq_entries) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: head overrun at %d\n", val); + status = I40E_ERR_QUEUE_EMPTY; + goto asq_send_command_exit; + } + + if (hw->aq.asq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: Admin queue not initialized.\n"); + status = I40E_ERR_QUEUE_EMPTY; + goto asq_send_command_exit; + } + + details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); + if (cmd_details) { + *details = *cmd_details; + + /* If the cmd_details are defined copy the cookie. The + * cpu_to_le32 is not needed here because the data is ignored + * by the FW, only used by the driver + */ + if (details->cookie) { + desc->cookie_high = + cpu_to_le32(upper_32_bits(details->cookie)); + desc->cookie_low = + cpu_to_le32(lower_32_bits(details->cookie)); + } + } else { + memset(details, 0, sizeof(struct i40e_asq_cmd_details)); + } + + /* clear requested flags and then set additional flags if defined */ + desc->flags &= ~cpu_to_le16(details->flags_dis); + desc->flags |= cpu_to_le16(details->flags_ena); + + mutex_lock(&hw->aq.asq_mutex); + + if (buff_size > hw->aq.asq_buf_size) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Invalid buffer size: %d.\n", + buff_size); + status = I40E_ERR_INVALID_SIZE; + goto asq_send_command_error; + } + + if (details->postpone && !details->async) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Async flag not set along with postpone flag"); + status = I40E_ERR_PARAM; + goto asq_send_command_error; + } + + /* call clean and check queue available function to reclaim the + * descriptors that were processed by FW, the function returns the + * number of desc available + */ + /* the clean function called here could be called in a separate thread + * in case of asynchronous completions + */ + if (i40e_clean_asq(hw) == 0) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Error queue is full.\n"); + status = I40E_ERR_ADMIN_QUEUE_FULL; + goto asq_send_command_error; + } + + /* initialize the temp desc pointer with the right desc */ + desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); + + /* if the desc is available copy the temp desc to the right place */ + *desc_on_ring = *desc; + + /* if buff is not NULL assume indirect command */ + if (buff != NULL) { + dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); + /* copy the user buff into the respective DMA buff */ + memcpy(dma_buff->va, buff, buff_size); + desc_on_ring->datalen = cpu_to_le16(buff_size); + + /* Update the address values in the desc with the pa value + * for respective buffer + */ + desc_on_ring->params.external.addr_high = + cpu_to_le32(upper_32_bits(dma_buff->pa)); + desc_on_ring->params.external.addr_low = + cpu_to_le32(lower_32_bits(dma_buff->pa)); + } + + /* bump the tail */ + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); + i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, + buff, buff_size); + (hw->aq.asq.next_to_use)++; + if (hw->aq.asq.next_to_use == hw->aq.asq.count) + hw->aq.asq.next_to_use = 0; + if (!details->postpone) + wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); + + /* if cmd_details are not defined or async flag is not set, + * we need to wait for desc write back + */ + if (!details->async && !details->postpone) { + u32 total_delay = 0; + + do { + /* AQ designers suggest use of head for better + * timing reliability than DD bit + */ + if (i40e_asq_done(hw)) + break; + usleep_range(1000, 2000); + total_delay++; + } while (total_delay < hw->aq.asq_cmd_timeout); + } + + /* if ready, copy the desc back to temp */ + if (i40e_asq_done(hw)) { + *desc = *desc_on_ring; + if (buff != NULL) + memcpy(buff, dma_buff->va, buff_size); + retval = le16_to_cpu(desc->retval); + if (retval != 0) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Command completed with error 0x%X.\n", + retval); + + /* strip off FW internal code */ + retval &= 0xff; + } + cmd_completed = true; + if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) + status = 0; + else + status = I40E_ERR_ADMIN_QUEUE_ERROR; + hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; + } + + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: desc and buffer writeback:\n"); + i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); + + /* update the error if time out occurred */ + if ((!cmd_completed) && + (!details->async && !details->postpone)) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Writeback timeout.\n"); + status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; + } + +asq_send_command_error: + mutex_unlock(&hw->aq.asq_mutex); +asq_send_command_exit: + return status; +} + +/** + * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function + * @desc: pointer to the temp descriptor (non DMA mem) + * @opcode: the opcode can be used to decide which flags to turn off or on + * + * Fill the desc with default values + **/ +void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, + u16 opcode) +{ + /* zero out the desc */ + memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); + desc->opcode = cpu_to_le16(opcode); + desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI); +} + +/** + * i40e_clean_arq_element + * @hw: pointer to the hw struct + * @e: event info from the receive descriptor, includes any buffers + * @pending: number of events that could be left to process + * + * This function cleans one Admin Receive Queue element and returns + * the contents through e. It can also return how many events are + * left to process through 'pending' + **/ +i40e_status i40e_clean_arq_element(struct i40e_hw *hw, + struct i40e_arq_event_info *e, + u16 *pending) +{ + i40e_status ret_code = 0; + u16 ntc = hw->aq.arq.next_to_clean; + struct i40e_aq_desc *desc; + struct i40e_dma_mem *bi; + u16 desc_idx; + u16 datalen; + u16 flags; + u16 ntu; + + /* take the lock before we start messing with the ring */ + mutex_lock(&hw->aq.arq_mutex); + + /* set next_to_use to head */ + ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); + if (ntu == ntc) { + /* nothing to do - shouldn't need to update ring's values */ + ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; + goto clean_arq_element_out; + } + + /* now clean the next descriptor */ + desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); + desc_idx = ntc; + + flags = le16_to_cpu(desc->flags); + if (flags & I40E_AQ_FLAG_ERR) { + ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + hw->aq.arq_last_status = + (enum i40e_admin_queue_err)le16_to_cpu(desc->retval); + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQRX: Event received with error 0x%X.\n", + hw->aq.arq_last_status); + } + + e->desc = *desc; + datalen = le16_to_cpu(desc->datalen); + e->msg_len = min(datalen, e->buf_len); + if (e->msg_buf != NULL && (e->msg_len != 0)) + memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, + e->msg_len); + + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); + i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, + hw->aq.arq_buf_size); + + /* Restore the original datalen and buffer address in the desc, + * FW updates datalen to indicate the event message + * size + */ + bi = &hw->aq.arq.r.arq_bi[ntc]; + memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); + + desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); + if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) + desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); + desc->datalen = cpu_to_le16((u16)bi->size); + desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); + desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); + + /* set tail = the last cleaned desc index. */ + wr32(hw, hw->aq.arq.tail, ntc); + /* ntc is updated to tail + 1 */ + ntc++; + if (ntc == hw->aq.num_arq_entries) + ntc = 0; + hw->aq.arq.next_to_clean = ntc; + hw->aq.arq.next_to_use = ntu; + +clean_arq_element_out: + /* Set pending if needed, unlock and return */ + if (pending != NULL) + *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); + mutex_unlock(&hw->aq.arq_mutex); + + if (i40e_is_nvm_update_op(&e->desc)) { + if (hw->aq.nvm_release_on_done) { + i40e_release_nvm(hw); + hw->aq.nvm_release_on_done = false; + } + } + + return ret_code; +} + +static void i40e_resume_aq(struct i40e_hw *hw) +{ + /* Registers are reset after PF reset */ + hw->aq.asq.next_to_use = 0; + hw->aq.asq.next_to_clean = 0; + + i40e_config_asq_regs(hw); + + hw->aq.arq.next_to_use = 0; + hw->aq.arq.next_to_clean = 0; + + i40e_config_arq_regs(hw); +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h new file mode 100644 index 000000000..28e519a50 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -0,0 +1,157 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_ADMINQ_H_ +#define _I40E_ADMINQ_H_ + +#include "i40e_osdep.h" +#include "i40e_status.h" +#include "i40e_adminq_cmd.h" + +#define I40E_ADMINQ_DESC(R, i) \ + (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i])) + +#define I40E_ADMINQ_DESC_ALIGNMENT 4096 + +struct i40e_adminq_ring { + struct i40e_virt_mem dma_head; /* space for dma structures */ + struct i40e_dma_mem desc_buf; /* descriptor ring memory */ + struct i40e_virt_mem cmd_buf; /* command buffer memory */ + + union { + struct i40e_dma_mem *asq_bi; + struct i40e_dma_mem *arq_bi; + } r; + + u16 count; /* Number of descriptors */ + u16 rx_buf_len; /* Admin Receive Queue buffer length */ + + /* used for interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + + /* used for queue tracking */ + u32 head; + u32 tail; + u32 len; + u32 bah; + u32 bal; +}; + +/* ASQ transaction details */ +struct i40e_asq_cmd_details { + void *callback; /* cast from type I40E_ADMINQ_CALLBACK */ + u64 cookie; + u16 flags_ena; + u16 flags_dis; + bool async; + bool postpone; +}; + +#define I40E_ADMINQ_DETAILS(R, i) \ + (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i])) + +/* ARQ event information */ +struct i40e_arq_event_info { + struct i40e_aq_desc desc; + u16 msg_len; + u16 buf_len; + u8 *msg_buf; +}; + +/* Admin Queue information */ +struct i40e_adminq_info { + struct i40e_adminq_ring arq; /* receive queue */ + struct i40e_adminq_ring asq; /* send queue */ + u32 asq_cmd_timeout; /* send queue cmd write back timeout*/ + u16 num_arq_entries; /* receive queue depth */ + u16 num_asq_entries; /* send queue depth */ + u16 arq_buf_size; /* receive queue buffer size */ + u16 asq_buf_size; /* send queue buffer size */ + u16 fw_maj_ver; /* firmware major version */ + u16 fw_min_ver; /* firmware minor version */ + u32 fw_build; /* firmware build number */ + u16 api_maj_ver; /* api major version */ + u16 api_min_ver; /* api minor version */ + bool nvm_release_on_done; + + struct mutex asq_mutex; /* Send queue lock */ + struct mutex arq_mutex; /* Receive queue lock */ + + /* last status values on send and receive queues */ + enum i40e_admin_queue_err asq_last_status; + enum i40e_admin_queue_err arq_last_status; +}; + +/** + * i40e_aq_rc_to_posix - convert errors to user-land codes + * aq_rc: AdminQ error code to convert + **/ +static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc) +{ + int aq_to_posix[] = { + 0, /* I40E_AQ_RC_OK */ + -EPERM, /* I40E_AQ_RC_EPERM */ + -ENOENT, /* I40E_AQ_RC_ENOENT */ + -ESRCH, /* I40E_AQ_RC_ESRCH */ + -EINTR, /* I40E_AQ_RC_EINTR */ + -EIO, /* I40E_AQ_RC_EIO */ + -ENXIO, /* I40E_AQ_RC_ENXIO */ + -E2BIG, /* I40E_AQ_RC_E2BIG */ + -EAGAIN, /* I40E_AQ_RC_EAGAIN */ + -ENOMEM, /* I40E_AQ_RC_ENOMEM */ + -EACCES, /* I40E_AQ_RC_EACCES */ + -EFAULT, /* I40E_AQ_RC_EFAULT */ + -EBUSY, /* I40E_AQ_RC_EBUSY */ + -EEXIST, /* I40E_AQ_RC_EEXIST */ + -EINVAL, /* I40E_AQ_RC_EINVAL */ + -ENOTTY, /* I40E_AQ_RC_ENOTTY */ + -ENOSPC, /* I40E_AQ_RC_ENOSPC */ + -ENOSYS, /* I40E_AQ_RC_ENOSYS */ + -ERANGE, /* I40E_AQ_RC_ERANGE */ + -EPIPE, /* I40E_AQ_RC_EFLUSHED */ + -ESPIPE, /* I40E_AQ_RC_BAD_ADDR */ + -EROFS, /* I40E_AQ_RC_EMODE */ + -EFBIG, /* I40E_AQ_RC_EFBIG */ + }; + + /* aq_rc is invalid if AQ timed out */ + if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT) + return -EAGAIN; + + if (aq_rc >= ARRAY_SIZE(aq_to_posix)) + return -ERANGE; + return aq_to_posix[aq_rc]; +} + +/* general information */ +#define I40E_AQ_LARGE_BUF 512 +#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */ + +void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, + u16 opcode); + +#endif /* _I40E_ADMINQ_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h new file mode 100644 index 000000000..929e3d72a --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -0,0 +1,2337 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_ADMINQ_CMD_H_ +#define _I40E_ADMINQ_CMD_H_ + +/* This header file defines the i40e Admin Queue commands and is shared between + * i40e Firmware and Software. + * + * This file needs to comply with the Linux Kernel coding style. + */ + +#define I40E_FW_API_VERSION_MAJOR 0x0001 +#define I40E_FW_API_VERSION_MINOR 0x0002 + +struct i40e_aq_desc { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 retval; + __le32 cookie_high; + __le32 cookie_low; + union { + struct { + __le32 param0; + __le32 param1; + __le32 param2; + __le32 param3; + } internal; + struct { + __le32 param0; + __le32 param1; + __le32 addr_high; + __le32 addr_low; + } external; + u8 raw[16]; + } params; +}; + +/* Flags sub-structure + * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | + * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | + */ + +/* command flags and offsets*/ +#define I40E_AQ_FLAG_DD_SHIFT 0 +#define I40E_AQ_FLAG_CMP_SHIFT 1 +#define I40E_AQ_FLAG_ERR_SHIFT 2 +#define I40E_AQ_FLAG_VFE_SHIFT 3 +#define I40E_AQ_FLAG_LB_SHIFT 9 +#define I40E_AQ_FLAG_RD_SHIFT 10 +#define I40E_AQ_FLAG_VFC_SHIFT 11 +#define I40E_AQ_FLAG_BUF_SHIFT 12 +#define I40E_AQ_FLAG_SI_SHIFT 13 +#define I40E_AQ_FLAG_EI_SHIFT 14 +#define I40E_AQ_FLAG_FE_SHIFT 15 + +#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ + +/* error codes */ +enum i40e_admin_queue_err { + I40E_AQ_RC_OK = 0, /* success */ + I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ + I40E_AQ_RC_ENOENT = 2, /* No such element */ + I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ + I40E_AQ_RC_EINTR = 4, /* operation interrupted */ + I40E_AQ_RC_EIO = 5, /* I/O error */ + I40E_AQ_RC_ENXIO = 6, /* No such resource */ + I40E_AQ_RC_E2BIG = 7, /* Arg too long */ + I40E_AQ_RC_EAGAIN = 8, /* Try again */ + I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ + I40E_AQ_RC_EACCES = 10, /* Permission denied */ + I40E_AQ_RC_EFAULT = 11, /* Bad address */ + I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ + I40E_AQ_RC_EEXIST = 13, /* object already exists */ + I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ + I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ + I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ + I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ + I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ + I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ + I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ + I40E_AQ_RC_EFBIG = 22, /* File too large */ +}; + +/* Admin Queue command opcodes */ +enum i40e_admin_queue_opc { + /* aq commands */ + i40e_aqc_opc_get_version = 0x0001, + i40e_aqc_opc_driver_version = 0x0002, + i40e_aqc_opc_queue_shutdown = 0x0003, + i40e_aqc_opc_set_pf_context = 0x0004, + + /* resource ownership */ + i40e_aqc_opc_request_resource = 0x0008, + i40e_aqc_opc_release_resource = 0x0009, + + i40e_aqc_opc_list_func_capabilities = 0x000A, + i40e_aqc_opc_list_dev_capabilities = 0x000B, + + i40e_aqc_opc_set_cppm_configuration = 0x0103, + i40e_aqc_opc_set_arp_proxy_entry = 0x0104, + i40e_aqc_opc_set_ns_proxy_entry = 0x0105, + + /* LAA */ + i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ + i40e_aqc_opc_mac_address_read = 0x0107, + i40e_aqc_opc_mac_address_write = 0x0108, + + /* PXE */ + i40e_aqc_opc_clear_pxe_mode = 0x0110, + + /* internal switch commands */ + i40e_aqc_opc_get_switch_config = 0x0200, + i40e_aqc_opc_add_statistics = 0x0201, + i40e_aqc_opc_remove_statistics = 0x0202, + i40e_aqc_opc_set_port_parameters = 0x0203, + i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + + i40e_aqc_opc_add_vsi = 0x0210, + i40e_aqc_opc_update_vsi_parameters = 0x0211, + i40e_aqc_opc_get_vsi_parameters = 0x0212, + + i40e_aqc_opc_add_pv = 0x0220, + i40e_aqc_opc_update_pv_parameters = 0x0221, + i40e_aqc_opc_get_pv_parameters = 0x0222, + + i40e_aqc_opc_add_veb = 0x0230, + i40e_aqc_opc_update_veb_parameters = 0x0231, + i40e_aqc_opc_get_veb_parameters = 0x0232, + + i40e_aqc_opc_delete_element = 0x0243, + + i40e_aqc_opc_add_macvlan = 0x0250, + i40e_aqc_opc_remove_macvlan = 0x0251, + i40e_aqc_opc_add_vlan = 0x0252, + i40e_aqc_opc_remove_vlan = 0x0253, + i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, + i40e_aqc_opc_add_tag = 0x0255, + i40e_aqc_opc_remove_tag = 0x0256, + i40e_aqc_opc_add_multicast_etag = 0x0257, + i40e_aqc_opc_remove_multicast_etag = 0x0258, + i40e_aqc_opc_update_tag = 0x0259, + i40e_aqc_opc_add_control_packet_filter = 0x025A, + i40e_aqc_opc_remove_control_packet_filter = 0x025B, + i40e_aqc_opc_add_cloud_filters = 0x025C, + i40e_aqc_opc_remove_cloud_filters = 0x025D, + + i40e_aqc_opc_add_mirror_rule = 0x0260, + i40e_aqc_opc_delete_mirror_rule = 0x0261, + + /* DCB commands */ + i40e_aqc_opc_dcb_ignore_pfc = 0x0301, + i40e_aqc_opc_dcb_updated = 0x0302, + + /* TX scheduler */ + i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, + i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, + i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, + i40e_aqc_opc_query_vsi_bw_config = 0x0408, + i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, + i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, + + i40e_aqc_opc_enable_switching_comp_ets = 0x0413, + i40e_aqc_opc_modify_switching_comp_ets = 0x0414, + i40e_aqc_opc_disable_switching_comp_ets = 0x0415, + i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, + i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, + i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, + i40e_aqc_opc_query_port_ets_config = 0x0419, + i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, + i40e_aqc_opc_suspend_port_tx = 0x041B, + i40e_aqc_opc_resume_port_tx = 0x041C, + i40e_aqc_opc_configure_partition_bw = 0x041D, + + /* hmc */ + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, + + /* phy commands*/ + i40e_aqc_opc_get_phy_abilities = 0x0600, + i40e_aqc_opc_set_phy_config = 0x0601, + i40e_aqc_opc_set_mac_config = 0x0603, + i40e_aqc_opc_set_link_restart_an = 0x0605, + i40e_aqc_opc_get_link_status = 0x0607, + i40e_aqc_opc_set_phy_int_mask = 0x0613, + i40e_aqc_opc_get_local_advt_reg = 0x0614, + i40e_aqc_opc_set_local_advt_reg = 0x0615, + i40e_aqc_opc_get_partner_advt = 0x0616, + i40e_aqc_opc_set_lb_modes = 0x0618, + i40e_aqc_opc_get_phy_wol_caps = 0x0621, + i40e_aqc_opc_set_phy_debug = 0x0622, + i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + + /* NVM commands */ + i40e_aqc_opc_nvm_read = 0x0701, + i40e_aqc_opc_nvm_erase = 0x0702, + i40e_aqc_opc_nvm_update = 0x0703, + i40e_aqc_opc_nvm_config_read = 0x0704, + i40e_aqc_opc_nvm_config_write = 0x0705, + + /* virtualization commands */ + i40e_aqc_opc_send_msg_to_pf = 0x0801, + i40e_aqc_opc_send_msg_to_vf = 0x0802, + i40e_aqc_opc_send_msg_to_peer = 0x0803, + + /* alternate structure */ + i40e_aqc_opc_alternate_write = 0x0900, + i40e_aqc_opc_alternate_write_indirect = 0x0901, + i40e_aqc_opc_alternate_read = 0x0902, + i40e_aqc_opc_alternate_read_indirect = 0x0903, + i40e_aqc_opc_alternate_write_done = 0x0904, + i40e_aqc_opc_alternate_set_mode = 0x0905, + i40e_aqc_opc_alternate_clear_port = 0x0906, + + /* LLDP commands */ + i40e_aqc_opc_lldp_get_mib = 0x0A00, + i40e_aqc_opc_lldp_update_mib = 0x0A01, + i40e_aqc_opc_lldp_add_tlv = 0x0A02, + i40e_aqc_opc_lldp_update_tlv = 0x0A03, + i40e_aqc_opc_lldp_delete_tlv = 0x0A04, + i40e_aqc_opc_lldp_stop = 0x0A05, + i40e_aqc_opc_lldp_start = 0x0A06, + i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07, + i40e_aqc_opc_lldp_set_local_mib = 0x0A08, + i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09, + + /* Tunnel commands */ + i40e_aqc_opc_add_udp_tunnel = 0x0B00, + i40e_aqc_opc_del_udp_tunnel = 0x0B01, + i40e_aqc_opc_tunnel_key_structure = 0x0B10, + + /* Async Events */ + i40e_aqc_opc_event_lan_overflow = 0x1001, + + /* OEM commands */ + i40e_aqc_opc_oem_parameter_change = 0xFE00, + i40e_aqc_opc_oem_device_status_change = 0xFE01, + i40e_aqc_opc_oem_ocsd_initialize = 0xFE02, + i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, + + /* debug commands */ + i40e_aqc_opc_debug_get_deviceid = 0xFF00, + i40e_aqc_opc_debug_set_mode = 0xFF01, + i40e_aqc_opc_debug_read_reg = 0xFF03, + i40e_aqc_opc_debug_write_reg = 0xFF04, + i40e_aqc_opc_debug_modify_reg = 0xFF07, + i40e_aqc_opc_debug_dump_internals = 0xFF08, +}; + +/* command structures and indirect data structures */ + +/* Structure naming conventions: + * - no suffix for direct command descriptor structures + * - _data for indirect sent data + * - _resp for indirect return data (data which is both will use _data) + * - _completion for direct return data + * - _element_ for repeated elements (may also be _data or _resp) + * + * Command structures are expected to overlay the params.raw member of the basic + * descriptor, and as such cannot exceed 16 bytes in length. + */ + +/* This macro is used to generate a compilation error if a structure + * is not exactly the correct length. It gives a divide by zero error if the + * structure is not of the correct size, otherwise it creates an enum that is + * never used. + */ +#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \ + { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } + +/* This macro is used extensively to ensure that command structures are 16 + * bytes in length as they have to map to the raw array of that size. + */ +#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) + +/* internal (0x00XX) commands */ + +/* Get version (direct 0x0001) */ +struct i40e_aqc_get_version { + __le32 rom_ver; + __le32 fw_build; + __le16 fw_major; + __le16 fw_minor; + __le16 api_major; + __le16 api_minor; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); + +/* Send driver version (indirect 0x0002) */ +struct i40e_aqc_driver_version { + u8 driver_major_ver; + u8 driver_minor_ver; + u8 driver_build_ver; + u8 driver_subbuild_ver; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); + +/* Queue Shutdown (direct 0x0003) */ +struct i40e_aqc_queue_shutdown { + __le32 driver_unloading; +#define I40E_AQ_DRIVER_UNLOADING 0x1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); + +/* Set PF context (0x0004, direct) */ +struct i40e_aqc_set_pf_context { + u8 pf_id; + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); + +/* Request resource ownership (direct 0x0008) + * Release resource ownership (direct 0x0009) + */ +#define I40E_AQ_RESOURCE_NVM 1 +#define I40E_AQ_RESOURCE_SDP 2 +#define I40E_AQ_RESOURCE_ACCESS_READ 1 +#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 +#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 +#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 + +struct i40e_aqc_request_resource { + __le16 resource_id; + __le16 access_type; + __le32 timeout; + __le32 resource_number; + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); + +/* Get function capabilities (indirect 0x000A) + * Get device capabilities (indirect 0x000B) + */ +struct i40e_aqc_list_capabilites { + u8 command_flags; +#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 + u8 pf_index; + u8 reserved[2]; + __le32 count; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); + +struct i40e_aqc_list_capabilities_element_resp { + __le16 id; + u8 major_rev; + u8 minor_rev; + __le32 number; + __le32 logical_id; + __le32 phys_id; + u8 reserved[16]; +}; + +/* list of caps */ + +#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 +#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 +#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 +#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 +#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 +#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 +#define I40E_AQ_CAP_ID_SRIOV 0x0012 +#define I40E_AQ_CAP_ID_VF 0x0013 +#define I40E_AQ_CAP_ID_VMDQ 0x0014 +#define I40E_AQ_CAP_ID_8021QBG 0x0015 +#define I40E_AQ_CAP_ID_8021QBR 0x0016 +#define I40E_AQ_CAP_ID_VSI 0x0017 +#define I40E_AQ_CAP_ID_DCB 0x0018 +#define I40E_AQ_CAP_ID_FCOE 0x0021 +#define I40E_AQ_CAP_ID_ISCSI 0x0022 +#define I40E_AQ_CAP_ID_RSS 0x0040 +#define I40E_AQ_CAP_ID_RXQ 0x0041 +#define I40E_AQ_CAP_ID_TXQ 0x0042 +#define I40E_AQ_CAP_ID_MSIX 0x0043 +#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 +#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 +#define I40E_AQ_CAP_ID_1588 0x0046 +#define I40E_AQ_CAP_ID_IWARP 0x0051 +#define I40E_AQ_CAP_ID_LED 0x0061 +#define I40E_AQ_CAP_ID_SDP 0x0062 +#define I40E_AQ_CAP_ID_MDIO 0x0063 +#define I40E_AQ_CAP_ID_FLEX10 0x00F1 +#define I40E_AQ_CAP_ID_CEM 0x00F2 + +/* Set CPPM Configuration (direct 0x0103) */ +struct i40e_aqc_cppm_configuration { + __le16 command_flags; +#define I40E_AQ_CPPM_EN_LTRC 0x0800 +#define I40E_AQ_CPPM_EN_DMCTH 0x1000 +#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 +#define I40E_AQ_CPPM_EN_HPTC 0x4000 +#define I40E_AQ_CPPM_EN_DMARC 0x8000 + __le16 ttlx; + __le32 dmacr; + __le16 dmcth; + u8 hptc; + u8 reserved; + __le32 pfltrc; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); + +/* Set ARP Proxy command / response (indirect 0x0104) */ +struct i40e_aqc_arp_proxy_data { + __le16 command_flags; +#define I40E_AQ_ARP_INIT_IPV4 0x0008 +#define I40E_AQ_ARP_UNSUP_CTL 0x0010 +#define I40E_AQ_ARP_ENA 0x0020 +#define I40E_AQ_ARP_ADD_IPV4 0x0040 +#define I40E_AQ_ARP_DEL_IPV4 0x0080 + __le16 table_id; + __le32 pfpm_proxyfc; + __le32 ip_addr; + u8 mac_addr[6]; + u8 reserved[2]; +}; + +I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data); + +/* Set NS Proxy Table Entry Command (indirect 0x0105) */ +struct i40e_aqc_ns_proxy_data { + __le16 table_idx_mac_addr_0; + __le16 table_idx_mac_addr_1; + __le16 table_idx_ipv6_0; + __le16 table_idx_ipv6_1; + __le16 control; +#define I40E_AQ_NS_PROXY_ADD_0 0x0100 +#define I40E_AQ_NS_PROXY_DEL_0 0x0200 +#define I40E_AQ_NS_PROXY_ADD_1 0x0400 +#define I40E_AQ_NS_PROXY_DEL_1 0x0800 +#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 +#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 +#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 +#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 +#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 + u8 mac_addr_0[6]; + u8 mac_addr_1[6]; + u8 local_mac_addr[6]; + u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ + u8 ipv6_addr_1[16]; +}; + +I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data); + +/* Manage LAA Command (0x0106) - obsolete */ +struct i40e_aqc_mng_laa { + __le16 command_flags; +#define I40E_AQ_LAA_FLAG_WR 0x8000 + u8 reserved[2]; + __le32 sal; + __le16 sah; + u8 reserved2[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa); + +/* Manage MAC Address Read Command (indirect 0x0107) */ +struct i40e_aqc_mac_address_read { + __le16 command_flags; +#define I40E_AQC_LAN_ADDR_VALID 0x10 +#define I40E_AQC_SAN_ADDR_VALID 0x20 +#define I40E_AQC_PORT_ADDR_VALID 0x40 +#define I40E_AQC_WOL_ADDR_VALID 0x80 +#define I40E_AQC_ADDR_VALID_MASK 0xf0 + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); + +struct i40e_aqc_mac_address_read_data { + u8 pf_lan_mac[6]; + u8 pf_san_mac[6]; + u8 port_mac[6]; + u8 pf_wol_mac[6]; +}; + +I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); + +/* Manage MAC Address Write Command (0x0108) */ +struct i40e_aqc_mac_address_write { + __le16 command_flags; +#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 +#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 +#define I40E_AQC_WRITE_TYPE_PORT 0x8000 +#define I40E_AQC_WRITE_TYPE_MASK 0xc000 + __le16 mac_sah; + __le32 mac_sal; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); + +/* PXE commands (0x011x) */ + +/* Clear PXE Command and response (direct 0x0110) */ +struct i40e_aqc_clear_pxe { + u8 rx_cnt; + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); + +/* Switch configuration commands (0x02xx) */ + +/* Used by many indirect commands that only pass an seid and a buffer in the + * command + */ +struct i40e_aqc_switch_seid { + __le16 seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); + +/* Get Switch Configuration command (indirect 0x0200) + * uses i40e_aqc_switch_seid for the descriptor + */ +struct i40e_aqc_get_switch_config_header_resp { + __le16 num_reported; + __le16 num_total; + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp); + +struct i40e_aqc_switch_config_element_resp { + u8 element_type; +#define I40E_AQ_SW_ELEM_TYPE_MAC 1 +#define I40E_AQ_SW_ELEM_TYPE_PF 2 +#define I40E_AQ_SW_ELEM_TYPE_VF 3 +#define I40E_AQ_SW_ELEM_TYPE_EMP 4 +#define I40E_AQ_SW_ELEM_TYPE_BMC 5 +#define I40E_AQ_SW_ELEM_TYPE_PV 16 +#define I40E_AQ_SW_ELEM_TYPE_VEB 17 +#define I40E_AQ_SW_ELEM_TYPE_PA 18 +#define I40E_AQ_SW_ELEM_TYPE_VSI 19 + u8 revision; +#define I40E_AQ_SW_ELEM_REV_1 1 + __le16 seid; + __le16 uplink_seid; + __le16 downlink_seid; + u8 reserved[3]; + u8 connection_type; +#define I40E_AQ_CONN_TYPE_REGULAR 0x1 +#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_CONN_TYPE_CASCADED 0x3 + __le16 scheduler_id; + __le16 element_info; +}; + +I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp); + +/* Get Switch Configuration (indirect 0x0200) + * an array of elements are returned in the response buffer + * the first in the array is the header, remainder are elements + */ +struct i40e_aqc_get_switch_config_resp { + struct i40e_aqc_get_switch_config_header_resp header; + struct i40e_aqc_switch_config_element_resp element[1]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp); + +/* Add Statistics (direct 0x0201) + * Remove Statistics (direct 0x0202) + */ +struct i40e_aqc_add_remove_statistics { + __le16 seid; + __le16 vlan; + __le16 stat_index; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); + +/* Set Port Parameters command (direct 0x0203) */ +struct i40e_aqc_set_port_parameters { + __le16 command_flags; +#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 +#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ +#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 + __le16 bad_frame_vsi; + __le16 default_seid; /* reserved for command */ + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); + +/* Get Switch Resource Allocation (indirect 0x0204) */ +struct i40e_aqc_get_switch_resource_alloc { + u8 num_entries; /* reserved for command */ + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); + +/* expect an array of these structs in the response buffer */ +struct i40e_aqc_switch_resource_alloc_element_resp { + u8 resource_type; +#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 +#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 +#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 +#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 +#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 +#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 +#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 +#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 +#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 +#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 +#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA +#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB +#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC +#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD +#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF +#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 +#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 +#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 +#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 + u8 reserved1; + __le16 guaranteed; + __le16 total; + __le16 used; + __le16 total_unalloced; + u8 reserved2[6]; +}; + +I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); + +/* Add VSI (indirect 0x0210) + * this indirect command uses struct i40e_aqc_vsi_properties_data + * as the indirect buffer (128 bytes) + * + * Update VSI (indirect 0x211) + * uses the same data structure as Add VSI + * + * Get VSI (indirect 0x0212) + * uses the same completion and data structure as Add VSI + */ +struct i40e_aqc_add_get_update_vsi { + __le16 uplink_seid; + u8 connection_type; +#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 +#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 + u8 reserved1; + u8 vf_id; + u8 reserved2; + __le16 vsi_flags; +#define I40E_AQ_VSI_TYPE_SHIFT 0x0 +#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) +#define I40E_AQ_VSI_TYPE_VF 0x0 +#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 +#define I40E_AQ_VSI_TYPE_PF 0x2 +#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 +#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); + +struct i40e_aqc_add_get_update_vsi_completion { + __le16 seid; + __le16 vsi_number; + __le16 vsi_used; + __le16 vsi_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); + +struct i40e_aqc_vsi_properties_data { + /* first 96 byte are written by SW */ + __le16 valid_sections; +#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 +#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 +#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 +#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 +#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 +#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 +#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 +#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 +#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 +#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 + /* switch section */ + __le16 switch_id; /* 12bit id combined with flags below */ +#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 +#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) +#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 +#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 +#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 + u8 sw_reserved[2]; + /* security section */ + u8 sec_flags; +#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 + u8 sec_reserved; + /* VLAN section */ + __le16 pvid; /* VLANS include priority bits */ + __le16 fcoe_pvid; + u8 port_vlan_flags; +#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 +#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ + I40E_AQ_VSI_PVLAN_MODE_SHIFT) +#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 +#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 +#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 +#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 +#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 +#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ + I40E_AQ_VSI_PVLAN_EMOD_SHIFT) +#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 +#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 +#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 +#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 + u8 pvlan_reserved[3]; + /* ingress egress up sections */ + __le32 ingress_table; /* bitmap, 3 bits per up */ +#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 +#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 +#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 +#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 +#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 +#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 +#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 +#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 +#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) + __le32 egress_table; /* same defines as for ingress table */ + /* cascaded PV section */ + __le16 cas_pv_tag; + u8 cas_pv_flags; +#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ + I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) +#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 +#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 +#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 +#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 +#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 + u8 cas_pv_reserved; + /* queue mapping section */ + __le16 mapping_flags; +#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 +#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 + __le16 queue_mapping[16]; +#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 +#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) + __le16 tc_mapping[8]; +#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 +#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) +#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 +#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) + /* queueing option section */ + u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 +#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 + u8 queueing_opt_reserved[3]; + /* scheduler section */ + u8 up_enable_bits; + u8 sched_reserved; + /* outer up section */ + __le32 outer_up_table; /* same structure and defines as ingress tbl */ + u8 cmd_reserved[8]; + /* last 32 bytes are written by FW */ + __le16 qs_handle[8]; +#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF + __le16 stat_counter_idx; + __le16 sched_id; + u8 resp_reserved[12]; +}; + +I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); + +/* Add Port Virtualizer (direct 0x0220) + * also used for update PV (direct 0x0221) but only flags are used + * (IS_CTRL_PORT only works on add PV) + */ +struct i40e_aqc_add_update_pv { + __le16 command_flags; +#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 +#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 + __le16 uplink_seid; + __le16 connected_seid; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); + +struct i40e_aqc_add_update_pv_completion { + /* reserved for update; for add also encodes error if rc == ENOSPC */ + __le16 pv_seid; +#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 +#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); + +/* Get PV Params (direct 0x0222) + * uses i40e_aqc_switch_seid for the descriptor + */ + +struct i40e_aqc_get_pv_params_completion { + __le16 seid; + __le16 default_stag; + __le16 pv_flags; /* same flags as add_pv */ +#define I40E_AQC_GET_PV_PV_TYPE 0x1 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 + u8 reserved[8]; + __le16 default_port_seid; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); + +/* Add VEB (direct 0x0230) */ +struct i40e_aqc_add_veb { + __le16 uplink_seid; + __le16 downlink_seid; + __le16 veb_flags; +#define I40E_AQC_ADD_VEB_FLOATING 0x1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ + I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) +#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 +#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 +#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 + u8 enable_tcs; + u8 reserved[9]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); + +struct i40e_aqc_add_veb_completion { + u8 reserved[6]; + __le16 switch_seid; + /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ + __le16 veb_seid; +#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 +#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); + +/* Get VEB Parameters (direct 0x0232) + * uses i40e_aqc_switch_seid for the descriptor + */ +struct i40e_aqc_get_veb_parameters_completion { + __le16 seid; + __le16 switch_id; + __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); + +/* Delete Element (direct 0x0243) + * uses the generic i40e_aqc_switch_seid + */ + +/* Add MAC-VLAN (indirect 0x0250) */ + +/* used for the command for most vlan commands */ +struct i40e_aqc_macvlan { + __le16 num_addresses; + __le16 seid[3]; +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) +#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); + +/* indirect data for command and response */ +struct i40e_aqc_add_macvlan_element_data { + u8 mac_addr[6]; + __le16 vlan_tag; + __le16 flags; +#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 +#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 +#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 +#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 + __le16 queue_number; +#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ + I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) + /* response section */ + u8 match_method; +#define I40E_AQC_MM_PERFECT_MATCH 0x01 +#define I40E_AQC_MM_HASH_MATCH 0x02 +#define I40E_AQC_MM_ERR_NO_RES 0xFF + u8 reserved1[3]; +}; + +struct i40e_aqc_add_remove_macvlan_completion { + __le16 perfect_mac_used; + __le16 perfect_mac_free; + __le16 unicast_hash_free; + __le16 multicast_hash_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); + +/* Remove MAC-VLAN (indirect 0x0251) + * uses i40e_aqc_macvlan for the descriptor + * data points to an array of num_addresses of elements + */ + +struct i40e_aqc_remove_macvlan_element_data { + u8 mac_addr[6]; + __le16 vlan_tag; + u8 flags; +#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 +#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 +#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 +#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 + u8 reserved[3]; + /* reply section */ + u8 error_code; +#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF + u8 reply_reserved[3]; +}; + +/* Add VLAN (indirect 0x0252) + * Remove VLAN (indirect 0x0253) + * use the generic i40e_aqc_macvlan for the command + */ +struct i40e_aqc_add_remove_vlan_element_data { + __le16 vlan_tag; + u8 vlan_flags; +/* flags for add VLAN */ +#define I40E_AQC_ADD_VLAN_LOCAL 0x1 +#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 +#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) +#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 +#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 +#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 +#define I40E_AQC_VLAN_PTYPE_SHIFT 3 +#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) +#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 +#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 +#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 +#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 +/* flags for remove VLAN */ +#define I40E_AQC_REMOVE_VLAN_ALL 0x1 + u8 reserved; + u8 result; +/* flags for add VLAN */ +#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 +#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE +#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF +/* flags for remove VLAN */ +#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF + u8 reserved1[3]; +}; + +struct i40e_aqc_add_remove_vlan_completion { + u8 reserved[4]; + __le16 vlans_used; + __le16 vlans_free; + __le32 addr_high; + __le32 addr_low; +}; + +/* Set VSI Promiscuous Modes (direct 0x0254) */ +struct i40e_aqc_set_vsi_promiscuous_modes { + __le16 promiscuous_flags; + __le16 valid_flags; +/* flags used for both fields above */ +#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 +#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 +#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 +#define I40E_AQC_SET_VSI_DEFAULT 0x08 +#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 + __le16 seid; +#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF + __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); + +/* Add S/E-tag command (direct 0x0255) + * Uses generic i40e_aqc_add_remove_tag_completion for completion + */ +struct i40e_aqc_add_tag { + __le16 flags; +#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 + __le16 seid; +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) + __le16 tag; + __le16 queue_number; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); + +struct i40e_aqc_add_remove_tag_completion { + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); + +/* Remove S/E-tag command (direct 0x0256) + * Uses generic i40e_aqc_add_remove_tag_completion for completion + */ +struct i40e_aqc_remove_tag { + __le16 seid; +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) + __le16 tag; + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag); + +/* Add multicast E-Tag (direct 0x0257) + * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields + * and no external data + */ +struct i40e_aqc_add_remove_mcast_etag { + __le16 pv_seid; + __le16 etag; + u8 num_unicast_etags; + u8 reserved[3]; + __le32 addr_high; /* address of array of 2-byte s-tags */ + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); + +struct i40e_aqc_add_remove_mcast_etag_completion { + u8 reserved[4]; + __le16 mcast_etags_used; + __le16 mcast_etags_free; + __le32 addr_high; + __le32 addr_low; + +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); + +/* Update S/E-Tag (direct 0x0259) */ +struct i40e_aqc_update_tag { + __le16 seid; +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) + __le16 old_tag; + __le16 new_tag; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); + +struct i40e_aqc_update_tag_completion { + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); + +/* Add Control Packet filter (direct 0x025A) + * Remove Control Packet filter (direct 0x025B) + * uses the i40e_aqc_add_oveb_cloud, + * and the generic direct completion structure + */ +struct i40e_aqc_add_remove_control_packet_filter { + u8 mac[6]; + __le16 etype; + __le16 flags; +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 + __le16 seid; +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) + __le16 queue; + u8 reserved[2]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); + +struct i40e_aqc_add_remove_control_packet_filter_completion { + __le16 mac_etype_used; + __le16 etype_used; + __le16 mac_etype_free; + __le16 etype_free; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); + +/* Add Cloud filters (indirect 0x025C) + * Remove Cloud filters (indirect 0x025D) + * uses the i40e_aqc_add_remove_cloud_filters, + * and the generic indirect completion structure + */ +struct i40e_aqc_add_remove_cloud_filters { + u8 num_filters; + u8 reserved; + __le16 seid; +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); + +struct i40e_aqc_add_remove_cloud_filters_element_data { + u8 outer_mac[6]; + u8 inner_mac[6]; + __le16 inner_vlan; + union { + struct { + u8 reserved[12]; + u8 data[4]; + } v4; + struct { + u8 data[16]; + } v6; + } ipaddr; + __le16 flags; +#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ + I40E_AQC_ADD_CLOUD_FILTER_SHIFT) +/* 0x0000 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 +/* 0x0002 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 +/* 0x0005 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 +/* 0x0007 reserved */ +/* 0x0008 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B +#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C + +#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 +#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 +#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 + +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 + + __le32 tenant_id; + u8 reserved[4]; + __le16 queue_number; +#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \ + I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) + u8 reserved2[14]; + /* response section */ + u8 allocation_result; +#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 +#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF + u8 response_reserved[7]; +}; + +struct i40e_aqc_remove_cloud_filters_completion { + __le16 perfect_ovlan_used; + __le16 perfect_ovlan_free; + __le16 vlan_used; + __le16 vlan_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); + +/* Add Mirror Rule (indirect or direct 0x0260) + * Delete Mirror Rule (indirect or direct 0x0261) + * note: some rule types (4,5) do not use an external buffer. + * take care to set the flags correctly. + */ +struct i40e_aqc_add_delete_mirror_rule { + __le16 seid; + __le16 rule_type; +#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 +#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ + I40E_AQC_MIRROR_RULE_TYPE_SHIFT) +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 +#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 + __le16 num_entries; + __le16 destination; /* VSI for add, rule id for delete */ + __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); + +struct i40e_aqc_add_delete_mirror_rule_completion { + u8 reserved[2]; + __le16 rule_id; /* only used on add */ + __le16 mirror_rules_used; + __le16 mirror_rules_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); + +/* DCB 0x03xx*/ + +/* PFC Ignore (direct 0x0301) + * the command and response use the same descriptor structure + */ +struct i40e_aqc_pfc_ignore { + u8 tc_bitmap; + u8 command_flags; /* unused on response */ +#define I40E_AQC_PFC_IGNORE_SET 0x80 +#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); + +/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure + * with no parameters + */ + +/* TX scheduler 0x04xx */ + +/* Almost all the indirect commands use + * this generic struct to pass the SEID in param0 + */ +struct i40e_aqc_tx_sched_ind { + __le16 vsi_seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); + +/* Several commands respond with a set of queue set handles */ +struct i40e_aqc_qs_handles_resp { + __le16 qs_handles[8]; +}; + +/* Configure VSI BW limits (direct 0x0400) */ +struct i40e_aqc_configure_vsi_bw_limit { + __le16 vsi_seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_credit; /* 0-3, limit = 2^max */ + u8 reserved2[7]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); + +/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406) + * responds with i40e_aqc_qs_handles_resp + */ +struct i40e_aqc_configure_vsi_ets_sla_bw_data { + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved1[28]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data); + +/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) + * responds with i40e_aqc_qs_handles_resp + */ +struct i40e_aqc_configure_vsi_tc_bw_data { + u8 tc_valid_bits; + u8 reserved[3]; + u8 tc_bw_credits[8]; + u8 reserved1[4]; + __le16 qs_handles[8]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data); + +/* Query vsi bw configuration (indirect 0x0408) */ +struct i40e_aqc_query_vsi_bw_config_resp { + u8 tc_valid_bits; + u8 tc_suspended_bits; + u8 reserved[14]; + __le16 qs_handles[8]; + u8 reserved1[4]; + __le16 port_bw_limit; + u8 reserved2[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved3[23]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp); + +/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ +struct i40e_aqc_query_vsi_ets_sla_config_resp { + u8 tc_valid_bits; + u8 reserved[3]; + u8 share_credits[8]; + __le16 credits[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp); + +/* Configure Switching Component Bandwidth Limit (direct 0x0410) */ +struct i40e_aqc_configure_switching_comp_bw_limit { + __le16 seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved2[7]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); + +/* Enable Physical Port ETS (indirect 0x0413) + * Modify Physical Port ETS (indirect 0x0414) + * Disable Physical Port ETS (indirect 0x0415) + */ +struct i40e_aqc_configure_switching_comp_ets_data { + u8 reserved[4]; + u8 tc_valid_bits; + u8 seepage; +#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 + u8 tc_strict_priority_flags; + u8 reserved1[17]; + u8 tc_bw_share_credits[8]; + u8 reserved2[96]; +}; + +I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data); + +/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ +struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credit[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved1[28]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, + i40e_aqc_configure_switching_comp_ets_bw_limit_data); + +/* Configure Switching Component Bandwidth Allocation per Tc + * (indirect 0x0417) + */ +struct i40e_aqc_configure_switching_comp_bw_config_data { + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits; /* bool */ + u8 tc_bw_share_credits[8]; + u8 reserved1[20]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data); + +/* Query Switching Component Configuration (indirect 0x0418) */ +struct i40e_aqc_query_switching_comp_ets_config_resp { + u8 tc_valid_bits; + u8 reserved[35]; + __le16 port_bw_limit; + u8 reserved1[2]; + u8 tc_bw_max; /* 0-3, limit = 2^max */ + u8 reserved2[23]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp); + +/* Query PhysicalPort ETS Configuration (indirect 0x0419) */ +struct i40e_aqc_query_port_ets_config_resp { + u8 reserved[4]; + u8 tc_valid_bits; + u8 reserved1; + u8 tc_strict_priority_bits; + u8 reserved2; + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; + + /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved3[32]; +}; + +I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp); + +/* Query Switching Component Bandwidth Allocation per Traffic Type + * (indirect 0x041A) + */ +struct i40e_aqc_query_switching_comp_bw_config_resp { + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits_enable; /* bool */ + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp); + +/* Suspend/resume port TX traffic + * (direct 0x041B and 0x041C) uses the generic SEID struct + */ + +/* Configure partition BW + * (indirect 0x041D) + */ +struct i40e_aqc_configure_partition_bw_data { + __le16 pf_valid_bits; + u8 min_bw[16]; /* guaranteed bandwidth */ + u8 max_bw[16]; /* bandwidth limit */ +}; + +I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); + +/* Get and set the active HMC resource profile and status. + * (direct 0x0500) and (direct 0x0501) + */ +struct i40e_aq_get_set_hmc_resource_profile { + u8 pm_profile; + u8 pe_vf_enabled; + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); + +enum i40e_aq_hmc_profile { + /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ + I40E_HMC_PROFILE_DEFAULT = 1, + I40E_HMC_PROFILE_FAVOR_VF = 2, + I40E_HMC_PROFILE_EQUAL = 3, +}; + +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F + +/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ + +/* set in param0 for get phy abilities to report qualified modules */ +#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 +#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 + +enum i40e_aq_phy_type { + I40E_PHY_TYPE_SGMII = 0x0, + I40E_PHY_TYPE_1000BASE_KX = 0x1, + I40E_PHY_TYPE_10GBASE_KX4 = 0x2, + I40E_PHY_TYPE_10GBASE_KR = 0x3, + I40E_PHY_TYPE_40GBASE_KR4 = 0x4, + I40E_PHY_TYPE_XAUI = 0x5, + I40E_PHY_TYPE_XFI = 0x6, + I40E_PHY_TYPE_SFI = 0x7, + I40E_PHY_TYPE_XLAUI = 0x8, + I40E_PHY_TYPE_XLPPI = 0x9, + I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, + I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, + I40E_PHY_TYPE_10GBASE_AOC = 0xC, + I40E_PHY_TYPE_40GBASE_AOC = 0xD, + I40E_PHY_TYPE_100BASE_TX = 0x11, + I40E_PHY_TYPE_1000BASE_T = 0x12, + I40E_PHY_TYPE_10GBASE_T = 0x13, + I40E_PHY_TYPE_10GBASE_SR = 0x14, + I40E_PHY_TYPE_10GBASE_LR = 0x15, + I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16, + I40E_PHY_TYPE_10GBASE_CR1 = 0x17, + I40E_PHY_TYPE_40GBASE_CR4 = 0x18, + I40E_PHY_TYPE_40GBASE_SR4 = 0x19, + I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, + I40E_PHY_TYPE_1000BASE_SX = 0x1B, + I40E_PHY_TYPE_1000BASE_LX = 0x1C, + I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D, + I40E_PHY_TYPE_20GBASE_KR2 = 0x1E, + I40E_PHY_TYPE_MAX +}; + +#define I40E_LINK_SPEED_100MB_SHIFT 0x1 +#define I40E_LINK_SPEED_1000MB_SHIFT 0x2 +#define I40E_LINK_SPEED_10GB_SHIFT 0x3 +#define I40E_LINK_SPEED_40GB_SHIFT 0x4 +#define I40E_LINK_SPEED_20GB_SHIFT 0x5 + +enum i40e_aq_link_speed { + I40E_LINK_SPEED_UNKNOWN = 0, + I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT), + I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT), + I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT), + I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT), + I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT) +}; + +struct i40e_aqc_module_desc { + u8 oui[3]; + u8 reserved1; + u8 part_number[16]; + u8 revision[4]; + u8 reserved2[8]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc); + +struct i40e_aq_get_phy_abilities_resp { + __le32 phy_type; /* bitmap using the above enum for offsets */ + u8 link_speed; /* bitmap using the above enum bit patterns */ + u8 abilities; +#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 +#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 +#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 +#define I40E_AQ_PHY_LINK_ENABLED 0x08 +#define I40E_AQ_PHY_AN_ENABLED 0x10 +#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 + __le16 eee_capability; +#define I40E_AQ_EEE_100BASE_TX 0x0002 +#define I40E_AQ_EEE_1000BASE_T 0x0004 +#define I40E_AQ_EEE_10GBASE_T 0x0008 +#define I40E_AQ_EEE_1000BASE_KX 0x0010 +#define I40E_AQ_EEE_10GBASE_KX4 0x0020 +#define I40E_AQ_EEE_10GBASE_KR 0x0040 + __le32 eeer_val; + u8 d3_lpan; +#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 + u8 reserved[3]; + u8 phy_id[4]; + u8 module_type[3]; + u8 qualified_module_count; +#define I40E_AQ_PHY_MAX_QMS 16 + struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; +}; + +I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp); + +/* Set PHY Config (direct 0x0601) */ +struct i40e_aq_set_phy_config { /* same bits as above in all */ + __le32 phy_type; + u8 link_speed; + u8 abilities; +/* bits 0-2 use the values from get_phy_abilities_resp */ +#define I40E_AQ_PHY_ENABLE_LINK 0x08 +#define I40E_AQ_PHY_ENABLE_AN 0x10 +#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 + __le16 eee_capability; + __le32 eeer; + u8 low_power_ctrl; + u8 reserved[3]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); + +/* Set MAC Config command data structure (direct 0x0603) */ +struct i40e_aq_set_mac_config { + __le16 max_frame_size; + u8 params; +#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 +#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 +#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 +#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 +#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 +#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 + u8 tx_timer_priority; /* bitmap */ + __le16 tx_timer_value; + __le16 fc_refresh_threshold; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); + +/* Restart Auto-Negotiation (direct 0x605) */ +struct i40e_aqc_set_link_restart_an { + u8 command; +#define I40E_AQ_PHY_RESTART_AN 0x02 +#define I40E_AQ_PHY_LINK_ENABLE 0x04 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); + +/* Get Link Status cmd & response data structure (direct 0x0607) */ +struct i40e_aqc_get_link_status { + __le16 command_flags; /* only field set on command */ +#define I40E_AQ_LSE_MASK 0x3 +#define I40E_AQ_LSE_NOP 0x0 +#define I40E_AQ_LSE_DISABLE 0x2 +#define I40E_AQ_LSE_ENABLE 0x3 +/* only response uses this flag */ +#define I40E_AQ_LSE_IS_ENABLED 0x1 + u8 phy_type; /* i40e_aq_phy_type */ + u8 link_speed; /* i40e_aq_link_speed */ + u8 link_info; +#define I40E_AQ_LINK_UP 0x01 +#define I40E_AQ_LINK_FAULT 0x02 +#define I40E_AQ_LINK_FAULT_TX 0x04 +#define I40E_AQ_LINK_FAULT_RX 0x08 +#define I40E_AQ_LINK_FAULT_REMOTE 0x10 +#define I40E_AQ_MEDIA_AVAILABLE 0x40 +#define I40E_AQ_SIGNAL_DETECT 0x80 + u8 an_info; +#define I40E_AQ_AN_COMPLETED 0x01 +#define I40E_AQ_LP_AN_ABILITY 0x02 +#define I40E_AQ_PD_FAULT 0x04 +#define I40E_AQ_FEC_EN 0x08 +#define I40E_AQ_PHY_LOW_POWER 0x10 +#define I40E_AQ_LINK_PAUSE_TX 0x20 +#define I40E_AQ_LINK_PAUSE_RX 0x40 +#define I40E_AQ_QUALIFIED_MODULE 0x80 + u8 ext_info; +#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 +#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 +#define I40E_AQ_LINK_TX_SHIFT 0x02 +#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) +#define I40E_AQ_LINK_TX_ACTIVE 0x00 +#define I40E_AQ_LINK_TX_DRAINED 0x01 +#define I40E_AQ_LINK_TX_FLUSHED 0x03 +#define I40E_AQ_LINK_FORCED_40G 0x10 + u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ + __le16 max_frame_size; + u8 config; +#define I40E_AQ_CONFIG_CRC_ENA 0x04 +#define I40E_AQ_CONFIG_PACING_MASK 0x78 + u8 reserved[5]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); + +/* Set event mask command (direct 0x613) */ +struct i40e_aqc_set_phy_int_mask { + u8 reserved[8]; + __le16 event_mask; +#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 +#define I40E_AQ_EVENT_MEDIA_NA 0x0004 +#define I40E_AQ_EVENT_LINK_FAULT 0x0008 +#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 +#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 +#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 +#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 +#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 +#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 + u8 reserved1[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); + +/* Get Local AN advt register (direct 0x0614) + * Set Local AN advt register (direct 0x0615) + * Get Link Partner AN advt register (direct 0x0616) + */ +struct i40e_aqc_an_advt_reg { + __le32 local_an_reg0; + __le16 local_an_reg1; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); + +/* Set Loopback mode (0x0618) */ +struct i40e_aqc_set_lb_mode { + __le16 lb_mode; +#define I40E_AQ_LB_PHY_LOCAL 0x01 +#define I40E_AQ_LB_PHY_REMOTE 0x02 +#define I40E_AQ_LB_MAC_LOCAL 0x04 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); + +/* Set PHY Debug command (0x0622) */ +struct i40e_aqc_set_phy_debug { + u8 command_flags; +#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ + I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT) +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 +#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); + +enum i40e_aq_phy_reg_type { + I40E_AQC_PHY_REG_INTERNAL = 0x1, + I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, + I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 +}; + +/* NVM Read command (indirect 0x0701) + * NVM Erase commands (direct 0x0702) + * NVM Update commands (indirect 0x0703) + */ +struct i40e_aqc_nvm_update { + u8 command_flags; +#define I40E_AQ_NVM_LAST_CMD 0x01 +#define I40E_AQ_NVM_FLASH_ONLY 0x80 + u8 module_pointer; + __le16 length; + __le32 offset; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); + +/* NVM Config Read (indirect 0x0704) */ +struct i40e_aqc_nvm_config_read { + __le16 cmd_flags; +#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 +#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0 +#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1 + __le16 element_count; + __le16 element_id; /* Feature/field ID */ + __le16 element_id_msw; /* MSWord of field ID */ + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); + +/* NVM Config Write (indirect 0x0705) */ +struct i40e_aqc_nvm_config_write { + __le16 cmd_flags; + __le16 element_count; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); + +/* Used for 0x0704 as well as for 0x0705 commands */ +#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 +#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ + (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) +#define I40E_AQ_ANVM_FEATURE 0 +#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT) +struct i40e_aqc_nvm_config_data_feature { + __le16 feature_id; +#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 +#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08 +#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10 + __le16 feature_options; + __le16 feature_selection; +}; + +I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature); + +struct i40e_aqc_nvm_config_data_immediate_field { + __le32 field_id; + __le32 field_value; + __le16 field_options; + __le16 reserved; +}; + +I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); + +/* Send to PF command (indirect 0x0801) id is only used by PF + * Send to VF command (indirect 0x0802) id is only used by PF + * Send to Peer PF command (indirect 0x0803) + */ +struct i40e_aqc_pf_vf_message { + __le32 id; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); + +/* Alternate structure */ + +/* Direct write (direct 0x0900) + * Direct read (direct 0x0902) + */ +struct i40e_aqc_alternate_write { + __le32 address0; + __le32 data0; + __le32 address1; + __le32 data1; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write); + +/* Indirect write (indirect 0x0901) + * Indirect read (indirect 0x0903) + */ + +struct i40e_aqc_alternate_ind_write { + __le32 address; + __le32 length; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); + +/* Done alternate write (direct 0x0904) + * uses i40e_aq_desc + */ +struct i40e_aqc_alternate_write_done { + __le16 cmd_flags; +#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 +#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 +#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 +#define I40E_AQ_ALTERNATE_RESET_NEEDED 2 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); + +/* Set OEM mode (direct 0x0905) */ +struct i40e_aqc_alternate_set_mode { + __le32 mode; +#define I40E_AQ_ALTERNATE_MODE_NONE 0 +#define I40E_AQ_ALTERNATE_MODE_OEM 1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); + +/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */ + +/* async events 0x10xx */ + +/* Lan Queue Overflow Event (direct, 0x1001) */ +struct i40e_aqc_lan_overflow { + __le32 prtdcb_rupto; + __le32 otx_ctl; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); + +/* Get LLDP MIB (indirect 0x0A00) */ +struct i40e_aqc_lldp_get_mib { + u8 type; + u8 reserved1; +#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 +#define I40E_AQ_LLDP_MIB_LOCAL 0x0 +#define I40E_AQ_LLDP_MIB_REMOTE 0x1 +#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC +#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 +#define I40E_AQ_LLDP_TX_SHIFT 0x4 +#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) +/* TX pause flags use I40E_AQ_LINK_TX_* above */ + __le16 local_len; + __le16 remote_len; + u8 reserved2[2]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); + +/* Configure LLDP MIB Change Event (direct 0x0A01) + * also used for the event (with type in the command field) + */ +struct i40e_aqc_lldp_update_mib { + u8 command; +#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 +#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); + +/* Add LLDP TLV (indirect 0x0A02) + * Delete LLDP TLV (indirect 0x0A04) + */ +struct i40e_aqc_lldp_add_tlv { + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved1[1]; + __le16 len; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); + +/* Update LLDP TLV (indirect 0x0A03) */ +struct i40e_aqc_lldp_update_tlv { + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved; + __le16 old_len; + __le16 new_offset; + __le16 new_len; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); + +/* Stop LLDP (direct 0x0A05) */ +struct i40e_aqc_lldp_stop { + u8 command; +#define I40E_AQ_LLDP_AGENT_STOP 0x0 +#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); + +/* Start LLDP (direct 0x0A06) */ + +struct i40e_aqc_lldp_start { + u8 command; +#define I40E_AQ_LLDP_AGENT_START 0x1 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); + +/* Get CEE DCBX Oper Config (0x0A07) + * uses the generic descriptor struct + * returns below as indirect response + */ + +#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0 +#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT) +#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3 +#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT) +#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8 +#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT) +#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0 +#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT) +#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3 +#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT) +#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8 +#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT) +struct i40e_aqc_get_cee_dcb_cfg_v1_resp { + u8 reserved1; + u8 oper_num_tc; + u8 oper_prio_tc[4]; + u8 reserved2; + u8 oper_tc_bw[8]; + u8 oper_pfc_en; + u8 reserved3; + __le16 oper_app_prio; + u8 reserved4; + __le16 tlv_status; +}; + +I40E_CHECK_STRUCT_LEN(0x18, i40e_aqc_get_cee_dcb_cfg_v1_resp); + +struct i40e_aqc_get_cee_dcb_cfg_resp { + u8 oper_num_tc; + u8 oper_prio_tc[4]; + u8 oper_tc_bw[8]; + u8 oper_pfc_en; + __le16 oper_app_prio; +#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0 +#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT) +#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3 +#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT) +#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8 +#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT) +#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT) + __le32 tlv_status; +#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0 +#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT) +#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3 +#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT) +#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8 +#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT) + u8 reserved[12]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp); + +/* Set Local LLDP MIB (indirect 0x0A08) + * Used to replace the local MIB of a given LLDP agent. e.g. DCBx + */ +struct i40e_aqc_lldp_set_local_mib { +#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0 +#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) + u8 type; + u8 reserved0; + __le16 length; + u8 reserved1[4]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib); + +/* Stop/Start LLDP Agent (direct 0x0A09) + * Used for stopping/starting specific LLDP agent. e.g. DCBx + */ +struct i40e_aqc_lldp_stop_start_specific_agent { +#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0 +#define I40E_AQC_START_SPECIFIC_AGENT_MASK \ + (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT) + u8 command; + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent); + +/* Add Udp Tunnel command and completion (direct 0x0B00) */ +struct i40e_aqc_add_udp_tunnel { + __le16 udp_port; + u8 reserved0[3]; + u8 protocol_type; +#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 +#define I40E_AQC_TUNNEL_TYPE_NGE 0x01 +#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 + u8 reserved1[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); + +struct i40e_aqc_add_udp_tunnel_completion { + __le16 udp_port; + u8 filter_entry_index; + u8 multiple_pfs; +#define I40E_AQC_SINGLE_PF 0x0 +#define I40E_AQC_MULTIPLE_PFS 0x1 + u8 total_filters; + u8 reserved[11]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); + +/* remove UDP Tunnel command (0x0B01) */ +struct i40e_aqc_remove_udp_tunnel { + u8 reserved[2]; + u8 index; /* 0 to 15 */ + u8 reserved2[13]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); + +struct i40e_aqc_del_udp_tunnel_completion { + __le16 udp_port; + u8 index; /* 0 to 15 */ + u8 multiple_pfs; + u8 total_filters_used; + u8 reserved1[11]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); + +/* tunnel key structure 0x0B10 */ + +struct i40e_aqc_tunnel_key_structure { + u8 key1_off; + u8 key2_off; + u8 key1_len; /* 0 to 15 */ + u8 key2_len; /* 0 to 15 */ + u8 flags; +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 +/* response flags */ +#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 + u8 network_key_index; +#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 +#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 +#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2 +#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3 + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); + +/* OEM mode commands (direct 0xFE0x) */ +struct i40e_aqc_oem_param_change { + __le32 param_type; +#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 +#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 +#define I40E_AQ_OEM_PARAM_MAC 2 + __le32 param_value1; + __le16 param_value2; + u8 reserved[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); + +struct i40e_aqc_oem_state_change { + __le32 state; +#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 +#define I40E_AQ_OEM_STATE_LINK_UP 0x1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); + +/* Initialize OCSD (0xFE02, direct) */ +struct i40e_aqc_opc_oem_ocsd_initialize { + u8 type_status; + u8 reserved1[3]; + __le32 ocsd_memory_block_addr_high; + __le32 ocsd_memory_block_addr_low; + __le32 requested_update_interval; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize); + +/* Initialize OCBB (0xFE03, direct) */ +struct i40e_aqc_opc_oem_ocbb_initialize { + u8 type_status; + u8 reserved1[3]; + __le32 ocbb_memory_block_addr_high; + __le32 ocbb_memory_block_addr_low; + u8 reserved2[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize); + +/* debug commands */ + +/* get device id (0xFF00) uses the generic structure */ + +/* set test more (0xFF01, internal) */ + +struct i40e_acq_set_test_mode { + u8 mode; +#define I40E_AQ_TEST_PARTIAL 0 +#define I40E_AQ_TEST_FULL 1 +#define I40E_AQ_TEST_NVM 2 + u8 reserved[3]; + u8 command; +#define I40E_AQ_TEST_OPEN 0 +#define I40E_AQ_TEST_CLOSE 1 +#define I40E_AQ_TEST_INC 2 + u8 reserved2[3]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); + +/* Debug Read Register command (0xFF03) + * Debug Write Register command (0xFF04) + */ +struct i40e_aqc_debug_reg_read_write { + __le32 reserved; + __le32 address; + __le32 value_high; + __le32 value_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write); + +/* Scatter/gather Reg Read (indirect 0xFF05) + * Scatter/gather Reg Write (indirect 0xFF06) + */ + +/* i40e_aq_desc is used for the command */ +struct i40e_aqc_debug_reg_sg_element_data { + __le32 address; + __le32 value; +}; + +/* Debug Modify register (direct 0xFF07) */ +struct i40e_aqc_debug_modify_reg { + __le32 address; + __le32 value; + __le32 clear_mask; + __le32 set_mask; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); + +/* dump internal data (0xFF08, indirect) */ + +#define I40E_AQ_CLUSTER_ID_AUX 0 +#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1 +#define I40E_AQ_CLUSTER_ID_TXSCHED 2 +#define I40E_AQ_CLUSTER_ID_HMC 3 +#define I40E_AQ_CLUSTER_ID_MAC0 4 +#define I40E_AQ_CLUSTER_ID_MAC1 5 +#define I40E_AQ_CLUSTER_ID_MAC2 6 +#define I40E_AQ_CLUSTER_ID_MAC3 7 +#define I40E_AQ_CLUSTER_ID_DCB 8 +#define I40E_AQ_CLUSTER_ID_EMP_MEM 9 +#define I40E_AQ_CLUSTER_ID_PKT_BUF 10 +#define I40E_AQ_CLUSTER_ID_ALTRAM 11 + +struct i40e_aqc_debug_dump_internals { + u8 cluster_id; + u8 table_id; + __le16 data_size; + __le32 idx; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); + +struct i40e_aqc_debug_modify_internals { + u8 cluster_id; + u8 cluster_specific_params[7]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); + +#endif diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h new file mode 100644 index 000000000..926811ad4 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h @@ -0,0 +1,58 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_ALLOC_H_ +#define _I40E_ALLOC_H_ + +struct i40e_hw; + +/* Memory allocation types */ +enum i40e_memory_type { + i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */ + i40e_mem_asq_buf = 1, + i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */ + i40e_mem_arq_ring = 3, /* ARQ descriptor ring */ + i40e_mem_atq_ring = 4, /* ATQ descriptor ring */ + i40e_mem_pd = 5, /* Page Descriptor */ + i40e_mem_bp = 6, /* Backing Page - 4KB */ + i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */ + i40e_mem_reserved +}; + +/* prototype for functions used for dynamic memory allocation */ +i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw, + struct i40e_dma_mem *mem, + enum i40e_memory_type type, + u64 size, u32 alignment); +i40e_status i40e_free_dma_mem(struct i40e_hw *hw, + struct i40e_dma_mem *mem); +i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw, + struct i40e_virt_mem *mem, + u32 size); +i40e_status i40e_free_virt_mem(struct i40e_hw *hw, + struct i40e_virt_mem *mem); + +#endif /* _I40E_ALLOC_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c new file mode 100644 index 000000000..0bae22da0 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -0,0 +1,3644 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e_type.h" +#include "i40e_adminq.h" +#include "i40e_prototype.h" +#include "i40e_virtchnl.h" + +/** + * i40e_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * vendor ID and device ID stored in the hw structure. + **/ +static i40e_status i40e_set_mac_type(struct i40e_hw *hw) +{ + i40e_status status = 0; + + if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { + switch (hw->device_id) { + case I40E_DEV_ID_SFP_XL710: + case I40E_DEV_ID_QEMU: + case I40E_DEV_ID_KX_A: + case I40E_DEV_ID_KX_B: + case I40E_DEV_ID_KX_C: + case I40E_DEV_ID_QSFP_A: + case I40E_DEV_ID_QSFP_B: + case I40E_DEV_ID_QSFP_C: + case I40E_DEV_ID_10G_BASE_T: + case I40E_DEV_ID_20G_KR2: + hw->mac.type = I40E_MAC_XL710; + break; + case I40E_DEV_ID_VF: + case I40E_DEV_ID_VF_HV: + hw->mac.type = I40E_MAC_VF; + break; + default: + hw->mac.type = I40E_MAC_GENERIC; + break; + } + } else { + status = I40E_ERR_DEVICE_NOT_SUPPORTED; + } + + hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", + hw->mac.type, status); + return status; +} + +/** + * i40e_debug_aq + * @hw: debug mask related to admin queue + * @mask: debug mask + * @desc: pointer to admin queue descriptor + * @buffer: pointer to command buffer + * @buf_len: max length of buffer + * + * Dumps debug log about adminq command with descriptor contents. + **/ +void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, + void *buffer, u16 buf_len) +{ + struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; + u16 len = le16_to_cpu(aq_desc->datalen); + u8 *buf = (u8 *)buffer; + u16 i = 0; + + if ((!(mask & hw->debug_mask)) || (desc == NULL)) + return; + + i40e_debug(hw, mask, + "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", + le16_to_cpu(aq_desc->opcode), + le16_to_cpu(aq_desc->flags), + le16_to_cpu(aq_desc->datalen), + le16_to_cpu(aq_desc->retval)); + i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", + le32_to_cpu(aq_desc->cookie_high), + le32_to_cpu(aq_desc->cookie_low)); + i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", + le32_to_cpu(aq_desc->params.internal.param0), + le32_to_cpu(aq_desc->params.internal.param1)); + i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", + le32_to_cpu(aq_desc->params.external.addr_high), + le32_to_cpu(aq_desc->params.external.addr_low)); + + if ((buffer != NULL) && (aq_desc->datalen != 0)) { + i40e_debug(hw, mask, "AQ CMD Buffer:\n"); + if (buf_len < len) + len = buf_len; + /* write the full 16-byte chunks */ + for (i = 0; i < (len - 16); i += 16) + i40e_debug(hw, mask, + "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", + i, buf[i], buf[i + 1], buf[i + 2], + buf[i + 3], buf[i + 4], buf[i + 5], + buf[i + 6], buf[i + 7], buf[i + 8], + buf[i + 9], buf[i + 10], buf[i + 11], + buf[i + 12], buf[i + 13], buf[i + 14], + buf[i + 15]); + /* write whatever's left over without overrunning the buffer */ + if (i < len) { + char d_buf[80]; + int j = 0; + + memset(d_buf, 0, sizeof(d_buf)); + j += sprintf(d_buf, "\t0x%04X ", i); + while (i < len) + j += sprintf(&d_buf[j], " %02X", buf[i++]); + i40e_debug(hw, mask, "%s\n", d_buf); + } + } +} + +/** + * i40e_check_asq_alive + * @hw: pointer to the hw struct + * + * Returns true if Queue is enabled else false. + **/ +bool i40e_check_asq_alive(struct i40e_hw *hw) +{ + if (hw->aq.asq.len) + return !!(rd32(hw, hw->aq.asq.len) & + I40E_PF_ATQLEN_ATQENABLE_MASK); + else + return false; +} + +/** + * i40e_aq_queue_shutdown + * @hw: pointer to the hw struct + * @unloading: is the driver unloading itself + * + * Tell the Firmware that we're shutting down the AdminQ and whether + * or not the driver is unloading as well. + **/ +i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, + bool unloading) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_queue_shutdown *cmd = + (struct i40e_aqc_queue_shutdown *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_queue_shutdown); + + if (unloading) + cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + + return status; +} + +/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the + * hardware to a bit-field that can be used by SW to more easily determine the + * packet type. + * + * Macros are used to shorten the table lines and make this table human + * readable. + * + * We store the PTYPE in the top byte of the bit field - this is just so that + * we can check that the table doesn't have a row missing, as the index into + * the table should be the PTYPE. + * + * Typical work flow: + * + * IF NOT i40e_ptype_lookup[ptype].known + * THEN + * Packet is unknown + * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP + * Use the rest of the fields to look at the tunnels, inner protocols, etc + * ELSE + * Use the enum i40e_rx_l2_ptype to decode the packet type + * ENDIF + */ + +/* macro to make the table lines short */ +#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ + { PTYPE, \ + 1, \ + I40E_RX_PTYPE_OUTER_##OUTER_IP, \ + I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ + I40E_RX_PTYPE_##OUTER_FRAG, \ + I40E_RX_PTYPE_TUNNEL_##T, \ + I40E_RX_PTYPE_TUNNEL_END_##TE, \ + I40E_RX_PTYPE_##TEF, \ + I40E_RX_PTYPE_INNER_PROT_##I, \ + I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } + +#define I40E_PTT_UNUSED_ENTRY(PTYPE) \ + { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } + +/* shorter macros makes the table fit but are terse */ +#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG +#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG +#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { + /* L2 Packet types */ + I40E_PTT_UNUSED_ENTRY(0), + I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), + I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT_UNUSED_ENTRY(4), + I40E_PTT_UNUSED_ENTRY(5), + I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT_UNUSED_ENTRY(8), + I40E_PTT_UNUSED_ENTRY(9), + I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + + /* Non Tunneled IPv4 */ + I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(25), + I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), + I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), + I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv4 --> IPv4 */ + I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(32), + I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> IPv6 */ + I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(39), + I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT */ + I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> IPv4 */ + I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(47), + I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> IPv6 */ + I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(54), + I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC */ + I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ + I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(62), + I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ + I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(69), + I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC/VLAN */ + I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ + I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(77), + I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ + I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(84), + I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* Non Tunneled IPv6 */ + I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + I40E_PTT_UNUSED_ENTRY(91), + I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), + I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), + I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv6 --> IPv4 */ + I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(98), + I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> IPv6 */ + I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(105), + I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT */ + I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> IPv4 */ + I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(113), + I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> IPv6 */ + I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(120), + I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC */ + I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ + I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(128), + I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ + I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(135), + I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN */ + I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ + I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(143), + I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ + I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(150), + I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* unused entries */ + I40E_PTT_UNUSED_ENTRY(154), + I40E_PTT_UNUSED_ENTRY(155), + I40E_PTT_UNUSED_ENTRY(156), + I40E_PTT_UNUSED_ENTRY(157), + I40E_PTT_UNUSED_ENTRY(158), + I40E_PTT_UNUSED_ENTRY(159), + + I40E_PTT_UNUSED_ENTRY(160), + I40E_PTT_UNUSED_ENTRY(161), + I40E_PTT_UNUSED_ENTRY(162), + I40E_PTT_UNUSED_ENTRY(163), + I40E_PTT_UNUSED_ENTRY(164), + I40E_PTT_UNUSED_ENTRY(165), + I40E_PTT_UNUSED_ENTRY(166), + I40E_PTT_UNUSED_ENTRY(167), + I40E_PTT_UNUSED_ENTRY(168), + I40E_PTT_UNUSED_ENTRY(169), + + I40E_PTT_UNUSED_ENTRY(170), + I40E_PTT_UNUSED_ENTRY(171), + I40E_PTT_UNUSED_ENTRY(172), + I40E_PTT_UNUSED_ENTRY(173), + I40E_PTT_UNUSED_ENTRY(174), + I40E_PTT_UNUSED_ENTRY(175), + I40E_PTT_UNUSED_ENTRY(176), + I40E_PTT_UNUSED_ENTRY(177), + I40E_PTT_UNUSED_ENTRY(178), + I40E_PTT_UNUSED_ENTRY(179), + + I40E_PTT_UNUSED_ENTRY(180), + I40E_PTT_UNUSED_ENTRY(181), + I40E_PTT_UNUSED_ENTRY(182), + I40E_PTT_UNUSED_ENTRY(183), + I40E_PTT_UNUSED_ENTRY(184), + I40E_PTT_UNUSED_ENTRY(185), + I40E_PTT_UNUSED_ENTRY(186), + I40E_PTT_UNUSED_ENTRY(187), + I40E_PTT_UNUSED_ENTRY(188), + I40E_PTT_UNUSED_ENTRY(189), + + I40E_PTT_UNUSED_ENTRY(190), + I40E_PTT_UNUSED_ENTRY(191), + I40E_PTT_UNUSED_ENTRY(192), + I40E_PTT_UNUSED_ENTRY(193), + I40E_PTT_UNUSED_ENTRY(194), + I40E_PTT_UNUSED_ENTRY(195), + I40E_PTT_UNUSED_ENTRY(196), + I40E_PTT_UNUSED_ENTRY(197), + I40E_PTT_UNUSED_ENTRY(198), + I40E_PTT_UNUSED_ENTRY(199), + + I40E_PTT_UNUSED_ENTRY(200), + I40E_PTT_UNUSED_ENTRY(201), + I40E_PTT_UNUSED_ENTRY(202), + I40E_PTT_UNUSED_ENTRY(203), + I40E_PTT_UNUSED_ENTRY(204), + I40E_PTT_UNUSED_ENTRY(205), + I40E_PTT_UNUSED_ENTRY(206), + I40E_PTT_UNUSED_ENTRY(207), + I40E_PTT_UNUSED_ENTRY(208), + I40E_PTT_UNUSED_ENTRY(209), + + I40E_PTT_UNUSED_ENTRY(210), + I40E_PTT_UNUSED_ENTRY(211), + I40E_PTT_UNUSED_ENTRY(212), + I40E_PTT_UNUSED_ENTRY(213), + I40E_PTT_UNUSED_ENTRY(214), + I40E_PTT_UNUSED_ENTRY(215), + I40E_PTT_UNUSED_ENTRY(216), + I40E_PTT_UNUSED_ENTRY(217), + I40E_PTT_UNUSED_ENTRY(218), + I40E_PTT_UNUSED_ENTRY(219), + + I40E_PTT_UNUSED_ENTRY(220), + I40E_PTT_UNUSED_ENTRY(221), + I40E_PTT_UNUSED_ENTRY(222), + I40E_PTT_UNUSED_ENTRY(223), + I40E_PTT_UNUSED_ENTRY(224), + I40E_PTT_UNUSED_ENTRY(225), + I40E_PTT_UNUSED_ENTRY(226), + I40E_PTT_UNUSED_ENTRY(227), + I40E_PTT_UNUSED_ENTRY(228), + I40E_PTT_UNUSED_ENTRY(229), + + I40E_PTT_UNUSED_ENTRY(230), + I40E_PTT_UNUSED_ENTRY(231), + I40E_PTT_UNUSED_ENTRY(232), + I40E_PTT_UNUSED_ENTRY(233), + I40E_PTT_UNUSED_ENTRY(234), + I40E_PTT_UNUSED_ENTRY(235), + I40E_PTT_UNUSED_ENTRY(236), + I40E_PTT_UNUSED_ENTRY(237), + I40E_PTT_UNUSED_ENTRY(238), + I40E_PTT_UNUSED_ENTRY(239), + + I40E_PTT_UNUSED_ENTRY(240), + I40E_PTT_UNUSED_ENTRY(241), + I40E_PTT_UNUSED_ENTRY(242), + I40E_PTT_UNUSED_ENTRY(243), + I40E_PTT_UNUSED_ENTRY(244), + I40E_PTT_UNUSED_ENTRY(245), + I40E_PTT_UNUSED_ENTRY(246), + I40E_PTT_UNUSED_ENTRY(247), + I40E_PTT_UNUSED_ENTRY(248), + I40E_PTT_UNUSED_ENTRY(249), + + I40E_PTT_UNUSED_ENTRY(250), + I40E_PTT_UNUSED_ENTRY(251), + I40E_PTT_UNUSED_ENTRY(252), + I40E_PTT_UNUSED_ENTRY(253), + I40E_PTT_UNUSED_ENTRY(254), + I40E_PTT_UNUSED_ENTRY(255) +}; + +/** + * i40e_init_shared_code - Initialize the shared code + * @hw: pointer to hardware structure + * + * This assigns the MAC type and PHY code and inits the NVM. + * Does not touch the hardware. This function must be called prior to any + * other function in the shared code. The i40e_hw structure should be + * memset to 0 prior to calling this function. The following fields in + * hw structure should be filled in prior to calling this function: + * hw_addr, back, device_id, vendor_id, subsystem_device_id, + * subsystem_vendor_id, and revision_id + **/ +i40e_status i40e_init_shared_code(struct i40e_hw *hw) +{ + i40e_status status = 0; + u32 port, ari, func_rid; + + i40e_set_mac_type(hw); + + switch (hw->mac.type) { + case I40E_MAC_XL710: + break; + default: + return I40E_ERR_DEVICE_NOT_SUPPORTED; + } + + hw->phy.get_link_info = true; + + /* Determine port number and PF number*/ + port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) + >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; + hw->port = (u8)port; + ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> + I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; + func_rid = rd32(hw, I40E_PF_FUNC_RID); + if (ari) + hw->pf_id = (u8)(func_rid & 0xff); + else + hw->pf_id = (u8)(func_rid & 0x7); + + status = i40e_init_nvm(hw); + return status; +} + +/** + * i40e_aq_mac_address_read - Retrieve the MAC addresses + * @hw: pointer to the hw struct + * @flags: a return indicator of what addresses were added to the addr store + * @addrs: the requestor's mac addr store + * @cmd_details: pointer to command details structure or NULL + **/ +static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw, + u16 *flags, + struct i40e_aqc_mac_address_read_data *addrs, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_mac_address_read *cmd_data = + (struct i40e_aqc_mac_address_read *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); + desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); + + status = i40e_asq_send_command(hw, &desc, addrs, + sizeof(*addrs), cmd_details); + *flags = le16_to_cpu(cmd_data->command_flags); + + return status; +} + +/** + * i40e_aq_mac_address_write - Change the MAC addresses + * @hw: pointer to the hw struct + * @flags: indicates which MAC to be written + * @mac_addr: address to write + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, + u16 flags, u8 *mac_addr, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_mac_address_write *cmd_data = + (struct i40e_aqc_mac_address_write *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_mac_address_write); + cmd_data->command_flags = cpu_to_le16(flags); + cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); + cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | + ((u32)mac_addr[3] << 16) | + ((u32)mac_addr[4] << 8) | + mac_addr[5]); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_get_mac_addr - get MAC address + * @hw: pointer to the HW structure + * @mac_addr: pointer to MAC address + * + * Reads the adapter's MAC address from register + **/ +i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) +{ + struct i40e_aqc_mac_address_read_data addrs; + i40e_status status; + u16 flags = 0; + + status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); + + if (flags & I40E_AQC_LAN_ADDR_VALID) + memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac)); + + return status; +} + +/** + * i40e_get_port_mac_addr - get Port MAC address + * @hw: pointer to the HW structure + * @mac_addr: pointer to Port MAC address + * + * Reads the adapter's Port MAC address + **/ +i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) +{ + struct i40e_aqc_mac_address_read_data addrs; + i40e_status status; + u16 flags = 0; + + status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); + if (status) + return status; + + if (flags & I40E_AQC_PORT_ADDR_VALID) + memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac)); + else + status = I40E_ERR_INVALID_MAC_ADDR; + + return status; +} + +/** + * i40e_pre_tx_queue_cfg - pre tx queue configure + * @hw: pointer to the HW structure + * @queue: target PF queue index + * @enable: state change request + * + * Handles hw requirement to indicate intention to enable + * or disable target queue. + **/ +void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) +{ + u32 abs_queue_idx = hw->func_caps.base_queue + queue; + u32 reg_block = 0; + u32 reg_val; + + if (abs_queue_idx >= 128) { + reg_block = abs_queue_idx / 128; + abs_queue_idx %= 128; + } + + reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); + reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; + reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); + + if (enable) + reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; + else + reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; + + wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); +} +#ifdef I40E_FCOE + +/** + * i40e_get_san_mac_addr - get SAN MAC address + * @hw: pointer to the HW structure + * @mac_addr: pointer to SAN MAC address + * + * Reads the adapter's SAN MAC address from NVM + **/ +i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr) +{ + struct i40e_aqc_mac_address_read_data addrs; + i40e_status status; + u16 flags = 0; + + status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); + if (status) + return status; + + if (flags & I40E_AQC_SAN_ADDR_VALID) + memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac)); + else + status = I40E_ERR_INVALID_MAC_ADDR; + + return status; +} +#endif + +/** + * i40e_read_pba_string - Reads part number string from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number string from the EEPROM + * @pba_num_size: part number string buffer length + * + * Reads the part number string from the EEPROM. + **/ +i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + i40e_status status = 0; + u16 pba_word = 0; + u16 pba_size = 0; + u16 pba_ptr = 0; + u16 i = 0; + + status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); + if (status || (pba_word != 0xFAFA)) { + hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n"); + return status; + } + + status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); + if (status) { + hw_dbg(hw, "Failed to read PBA Block pointer.\n"); + return status; + } + + status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); + if (status) { + hw_dbg(hw, "Failed to read PBA Block size.\n"); + return status; + } + + /* Subtract one to get PBA word count (PBA Size word is included in + * total size) + */ + pba_size--; + if (pba_num_size < (((u32)pba_size * 2) + 1)) { + hw_dbg(hw, "Buffer to small for PBA data.\n"); + return I40E_ERR_PARAM; + } + + for (i = 0; i < pba_size; i++) { + status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); + if (status) { + hw_dbg(hw, "Failed to read PBA Block word %d.\n", i); + return status; + } + + pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; + pba_num[(i * 2) + 1] = pba_word & 0xFF; + } + pba_num[(pba_size * 2)] = '\0'; + + return status; +} + +/** + * i40e_get_media_type - Gets media type + * @hw: pointer to the hardware structure + **/ +static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) +{ + enum i40e_media_type media; + + switch (hw->phy.link_info.phy_type) { + case I40E_PHY_TYPE_10GBASE_SR: + case I40E_PHY_TYPE_10GBASE_LR: + case I40E_PHY_TYPE_1000BASE_SX: + case I40E_PHY_TYPE_1000BASE_LX: + case I40E_PHY_TYPE_40GBASE_SR4: + case I40E_PHY_TYPE_40GBASE_LR4: + media = I40E_MEDIA_TYPE_FIBER; + break; + case I40E_PHY_TYPE_100BASE_TX: + case I40E_PHY_TYPE_1000BASE_T: + case I40E_PHY_TYPE_10GBASE_T: + media = I40E_MEDIA_TYPE_BASET; + break; + case I40E_PHY_TYPE_10GBASE_CR1_CU: + case I40E_PHY_TYPE_40GBASE_CR4_CU: + case I40E_PHY_TYPE_10GBASE_CR1: + case I40E_PHY_TYPE_40GBASE_CR4: + case I40E_PHY_TYPE_10GBASE_SFPP_CU: + case I40E_PHY_TYPE_40GBASE_AOC: + case I40E_PHY_TYPE_10GBASE_AOC: + media = I40E_MEDIA_TYPE_DA; + break; + case I40E_PHY_TYPE_1000BASE_KX: + case I40E_PHY_TYPE_10GBASE_KX4: + case I40E_PHY_TYPE_10GBASE_KR: + case I40E_PHY_TYPE_40GBASE_KR4: + case I40E_PHY_TYPE_20GBASE_KR2: + media = I40E_MEDIA_TYPE_BACKPLANE; + break; + case I40E_PHY_TYPE_SGMII: + case I40E_PHY_TYPE_XAUI: + case I40E_PHY_TYPE_XFI: + case I40E_PHY_TYPE_XLAUI: + case I40E_PHY_TYPE_XLPPI: + default: + media = I40E_MEDIA_TYPE_UNKNOWN; + break; + } + + return media; +} + +#define I40E_PF_RESET_WAIT_COUNT_A0 200 +#define I40E_PF_RESET_WAIT_COUNT 200 +/** + * i40e_pf_reset - Reset the PF + * @hw: pointer to the hardware structure + * + * Assuming someone else has triggered a global reset, + * assure the global reset is complete and then reset the PF + **/ +i40e_status i40e_pf_reset(struct i40e_hw *hw) +{ + u32 cnt = 0; + u32 cnt1 = 0; + u32 reg = 0; + u32 grst_del; + + /* Poll for Global Reset steady state in case of recent GRST. + * The grst delay value is in 100ms units, and we'll wait a + * couple counts longer to be sure we don't just miss the end. + */ + grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & + I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> + I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; + for (cnt = 0; cnt < grst_del + 2; cnt++) { + reg = rd32(hw, I40E_GLGEN_RSTAT); + if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) + break; + msleep(100); + } + if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { + hw_dbg(hw, "Global reset polling failed to complete.\n"); + return I40E_ERR_RESET_FAILED; + } + + /* Now Wait for the FW to be ready */ + for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { + reg = rd32(hw, I40E_GLNVM_ULD); + reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | + I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); + if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | + I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { + hw_dbg(hw, "Core and Global modules ready %d\n", cnt1); + break; + } + usleep_range(10000, 20000); + } + if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | + I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { + hw_dbg(hw, "wait for FW Reset complete timedout\n"); + hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); + return I40E_ERR_RESET_FAILED; + } + + /* If there was a Global Reset in progress when we got here, + * we don't need to do the PF Reset + */ + if (!cnt) { + if (hw->revision_id == 0) + cnt = I40E_PF_RESET_WAIT_COUNT_A0; + else + cnt = I40E_PF_RESET_WAIT_COUNT; + reg = rd32(hw, I40E_PFGEN_CTRL); + wr32(hw, I40E_PFGEN_CTRL, + (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); + for (; cnt; cnt--) { + reg = rd32(hw, I40E_PFGEN_CTRL); + if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) + break; + usleep_range(1000, 2000); + } + if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { + hw_dbg(hw, "PF reset polling failed to complete.\n"); + return I40E_ERR_RESET_FAILED; + } + } + + i40e_clear_pxe_mode(hw); + + return 0; +} + +/** + * i40e_clear_hw - clear out any left over hw state + * @hw: pointer to the hw struct + * + * Clear queues and interrupts, typically called at init time, + * but after the capabilities have been found so we know how many + * queues and msix vectors have been allocated. + **/ +void i40e_clear_hw(struct i40e_hw *hw) +{ + u32 num_queues, base_queue; + u32 num_pf_int; + u32 num_vf_int; + u32 num_vfs; + u32 i, j; + u32 val; + u32 eol = 0x7ff; + + /* get number of interrupts, queues, and VFs */ + val = rd32(hw, I40E_GLPCI_CNF2); + num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> + I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; + num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> + I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; + + val = rd32(hw, I40E_PFLAN_QALLOC); + base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> + I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; + j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> + I40E_PFLAN_QALLOC_LASTQ_SHIFT; + if (val & I40E_PFLAN_QALLOC_VALID_MASK) + num_queues = (j - base_queue) + 1; + else + num_queues = 0; + + val = rd32(hw, I40E_PF_VT_PFALLOC); + i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> + I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; + j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> + I40E_PF_VT_PFALLOC_LASTVF_SHIFT; + if (val & I40E_PF_VT_PFALLOC_VALID_MASK) + num_vfs = (j - i) + 1; + else + num_vfs = 0; + + /* stop all the interrupts */ + wr32(hw, I40E_PFINT_ICR0_ENA, 0); + val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; + for (i = 0; i < num_pf_int - 2; i++) + wr32(hw, I40E_PFINT_DYN_CTLN(i), val); + + /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ + val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; + wr32(hw, I40E_PFINT_LNKLST0, val); + for (i = 0; i < num_pf_int - 2; i++) + wr32(hw, I40E_PFINT_LNKLSTN(i), val); + val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; + for (i = 0; i < num_vfs; i++) + wr32(hw, I40E_VPINT_LNKLST0(i), val); + for (i = 0; i < num_vf_int - 2; i++) + wr32(hw, I40E_VPINT_LNKLSTN(i), val); + + /* warn the HW of the coming Tx disables */ + for (i = 0; i < num_queues; i++) { + u32 abs_queue_idx = base_queue + i; + u32 reg_block = 0; + + if (abs_queue_idx >= 128) { + reg_block = abs_queue_idx / 128; + abs_queue_idx %= 128; + } + + val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); + val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; + val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); + val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; + + wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); + } + udelay(400); + + /* stop all the queues */ + for (i = 0; i < num_queues; i++) { + wr32(hw, I40E_QINT_TQCTL(i), 0); + wr32(hw, I40E_QTX_ENA(i), 0); + wr32(hw, I40E_QINT_RQCTL(i), 0); + wr32(hw, I40E_QRX_ENA(i), 0); + } + + /* short wait for all queue disables to settle */ + udelay(50); +} + +/** + * i40e_clear_pxe_mode - clear pxe operations mode + * @hw: pointer to the hw struct + * + * Make sure all PXE mode settings are cleared, including things + * like descriptor fetch/write-back mode. + **/ +void i40e_clear_pxe_mode(struct i40e_hw *hw) +{ + u32 reg; + + if (i40e_check_asq_alive(hw)) + i40e_aq_clear_pxe_mode(hw, NULL); + + /* Clear single descriptor fetch/write-back mode */ + reg = rd32(hw, I40E_GLLAN_RCTL_0); + + if (hw->revision_id == 0) { + /* As a work around clear PXE_MODE instead of setting it */ + wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); + } else { + wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); + } +} + +/** + * i40e_led_is_mine - helper to find matching led + * @hw: pointer to the hw struct + * @idx: index into GPIO registers + * + * returns: 0 if no match, otherwise the value of the GPIO_CTL register + */ +static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) +{ + u32 gpio_val = 0; + u32 port; + + if (!hw->func_caps.led[idx]) + return 0; + + gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); + port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> + I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; + + /* if PRT_NUM_NA is 1 then this LED is not port specific, OR + * if it is not our port then ignore + */ + if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || + (port != hw->port)) + return 0; + + return gpio_val; +} + +#define I40E_COMBINED_ACTIVITY 0xA +#define I40E_FILTER_ACTIVITY 0xE +#define I40E_LINK_ACTIVITY 0xC +#define I40E_MAC_ACTIVITY 0xD +#define I40E_LED0 22 + +/** + * i40e_led_get - return current on/off mode + * @hw: pointer to the hw struct + * + * The value returned is the 'mode' field as defined in the + * GPIO register definitions: 0x0 = off, 0xf = on, and other + * values are variations of possible behaviors relating to + * blink, link, and wire. + **/ +u32 i40e_led_get(struct i40e_hw *hw) +{ + u32 current_mode = 0; + u32 mode = 0; + int i; + + /* as per the documentation GPIO 22-29 are the LED + * GPIO pins named LED0..LED7 + */ + for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { + u32 gpio_val = i40e_led_is_mine(hw, i); + + if (!gpio_val) + continue; + + /* ignore gpio LED src mode entries related to the activity + * LEDs + */ + current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) + >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); + switch (current_mode) { + case I40E_COMBINED_ACTIVITY: + case I40E_FILTER_ACTIVITY: + case I40E_MAC_ACTIVITY: + continue; + default: + break; + } + + mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> + I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; + break; + } + + return mode; +} + +/** + * i40e_led_set - set new on/off mode + * @hw: pointer to the hw struct + * @mode: 0=off, 0xf=on (else see manual for mode details) + * @blink: true if the LED should blink when on, false if steady + * + * if this function is used to turn on the blink it should + * be used to disable the blink when restoring the original state. + **/ +void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) +{ + u32 current_mode = 0; + int i; + + if (mode & 0xfffffff0) + hw_dbg(hw, "invalid mode passed in %X\n", mode); + + /* as per the documentation GPIO 22-29 are the LED + * GPIO pins named LED0..LED7 + */ + for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { + u32 gpio_val = i40e_led_is_mine(hw, i); + + if (!gpio_val) + continue; + + /* ignore gpio LED src mode entries related to the activity + * LEDs + */ + current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) + >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); + switch (current_mode) { + case I40E_COMBINED_ACTIVITY: + case I40E_FILTER_ACTIVITY: + case I40E_MAC_ACTIVITY: + continue; + default: + break; + } + + gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; + /* this & is a bit of paranoia, but serves as a range check */ + gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & + I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); + + if (mode == I40E_LINK_ACTIVITY) + blink = false; + + if (blink) + gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); + else + gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); + + wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); + break; + } +} + +/* Admin command wrappers */ + +/** + * i40e_aq_get_phy_capabilities + * @hw: pointer to the hw struct + * @abilities: structure for PHY capabilities to be filled + * @qualified_modules: report Qualified Modules + * @report_init: report init capabilities (active are default) + * @cmd_details: pointer to command details structure or NULL + * + * Returns the various PHY abilities supported on the Port. + **/ +i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, + bool qualified_modules, bool report_init, + struct i40e_aq_get_phy_abilities_resp *abilities, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + i40e_status status; + u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); + + if (!abilities) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_phy_abilities); + + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + if (abilities_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + if (qualified_modules) + desc.params.external.param0 |= + cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); + + if (report_init) + desc.params.external.param0 |= + cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); + + status = i40e_asq_send_command(hw, &desc, abilities, abilities_size, + cmd_details); + + if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) + status = I40E_ERR_UNKNOWN_PHY; + + return status; +} + +/** + * i40e_aq_set_phy_config + * @hw: pointer to the hw struct + * @config: structure with PHY configuration to be set + * @cmd_details: pointer to command details structure or NULL + * + * Set the various PHY configuration parameters + * supported on the Port.One or more of the Set PHY config parameters may be + * ignored in an MFP mode as the PF may not have the privilege to set some + * of the PHY Config parameters. This status will be indicated by the + * command response. + **/ +enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, + struct i40e_aq_set_phy_config *config, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aq_set_phy_config *cmd = + (struct i40e_aq_set_phy_config *)&desc.params.raw; + enum i40e_status_code status; + + if (!config) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_phy_config); + + *cmd = *config; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_set_fc + * @hw: pointer to the hw struct + * + * Set the requested flow control mode using set_phy_config. + **/ +enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, + bool atomic_restart) +{ + enum i40e_fc_mode fc_mode = hw->fc.requested_mode; + struct i40e_aq_get_phy_abilities_resp abilities; + struct i40e_aq_set_phy_config config; + enum i40e_status_code status; + u8 pause_mask = 0x0; + + *aq_failures = 0x0; + + switch (fc_mode) { + case I40E_FC_FULL: + pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; + pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; + break; + case I40E_FC_RX_PAUSE: + pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; + break; + case I40E_FC_TX_PAUSE: + pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; + break; + default: + break; + } + + /* Get the current phy config */ + status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, + NULL); + if (status) { + *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; + return status; + } + + memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); + /* clear the old pause settings */ + config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & + ~(I40E_AQ_PHY_FLAG_PAUSE_RX); + /* set the new abilities */ + config.abilities |= pause_mask; + /* If the abilities have changed, then set the new config */ + if (config.abilities != abilities.abilities) { + /* Auto restart link so settings take effect */ + if (atomic_restart) + config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + /* Copy over all the old settings */ + config.phy_type = abilities.phy_type; + config.link_speed = abilities.link_speed; + config.eee_capability = abilities.eee_capability; + config.eeer = abilities.eeer_val; + config.low_power_ctrl = abilities.d3_lpan; + status = i40e_aq_set_phy_config(hw, &config, NULL); + + if (status) + *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; + } + /* Update the link info */ + status = i40e_aq_get_link_info(hw, true, NULL, NULL); + if (status) { + /* Wait a little bit (on 40G cards it sometimes takes a really + * long time for link to come back from the atomic reset) + * and try once more + */ + msleep(1000); + status = i40e_aq_get_link_info(hw, true, NULL, NULL); + } + if (status) + *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; + + return status; +} + +/** + * i40e_aq_clear_pxe_mode + * @hw: pointer to the hw struct + * @cmd_details: pointer to command details structure or NULL + * + * Tell the firmware that the driver is taking over from PXE + **/ +i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + i40e_status status; + struct i40e_aq_desc desc; + struct i40e_aqc_clear_pxe *cmd = + (struct i40e_aqc_clear_pxe *)&desc.params.raw; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_clear_pxe_mode); + + cmd->rx_cnt = 0x2; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + wr32(hw, I40E_GLLAN_RCTL_0, 0x1); + + return status; +} + +/** + * i40e_aq_set_link_restart_an + * @hw: pointer to the hw struct + * @enable_link: if true: enable link, if false: disable link + * @cmd_details: pointer to command details structure or NULL + * + * Sets up the link and restarts the Auto-Negotiation over the link. + **/ +i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, + bool enable_link, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_link_restart_an *cmd = + (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_link_restart_an); + + cmd->command = I40E_AQ_PHY_RESTART_AN; + if (enable_link) + cmd->command |= I40E_AQ_PHY_LINK_ENABLE; + else + cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_get_link_info + * @hw: pointer to the hw struct + * @enable_lse: enable/disable LinkStatusEvent reporting + * @link: pointer to link status structure - optional + * @cmd_details: pointer to command details structure or NULL + * + * Returns the link status of the adapter. + **/ +i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, + bool enable_lse, struct i40e_link_status *link, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_link_status *resp = + (struct i40e_aqc_get_link_status *)&desc.params.raw; + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + i40e_status status; + bool tx_pause, rx_pause; + u16 command_flags; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); + + if (enable_lse) + command_flags = I40E_AQ_LSE_ENABLE; + else + command_flags = I40E_AQ_LSE_DISABLE; + resp->command_flags = cpu_to_le16(command_flags); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status) + goto aq_get_link_info_exit; + + /* save off old link status information */ + hw->phy.link_info_old = *hw_link_info; + + /* update link status */ + hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; + hw->phy.media_type = i40e_get_media_type(hw); + hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; + hw_link_info->link_info = resp->link_info; + hw_link_info->an_info = resp->an_info; + hw_link_info->ext_info = resp->ext_info; + hw_link_info->loopback = resp->loopback; + hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); + hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; + + /* update fc info */ + tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); + rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); + if (tx_pause & rx_pause) + hw->fc.current_mode = I40E_FC_FULL; + else if (tx_pause) + hw->fc.current_mode = I40E_FC_TX_PAUSE; + else if (rx_pause) + hw->fc.current_mode = I40E_FC_RX_PAUSE; + else + hw->fc.current_mode = I40E_FC_NONE; + + if (resp->config & I40E_AQ_CONFIG_CRC_ENA) + hw_link_info->crc_enable = true; + else + hw_link_info->crc_enable = false; + + if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE)) + hw_link_info->lse_enable = true; + else + hw_link_info->lse_enable = false; + + if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && + hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) + hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; + + /* save link status information */ + if (link) + *link = *hw_link_info; + + /* flag cleared so helper functions don't call AQ again */ + hw->phy.get_link_info = false; + +aq_get_link_info_exit: + return status; +} + +/** + * i40e_aq_set_phy_int_mask + * @hw: pointer to the hw struct + * @mask: interrupt mask to be set + * @cmd_details: pointer to command details structure or NULL + * + * Set link interrupt mask. + **/ +i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, + u16 mask, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_phy_int_mask *cmd = + (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_phy_int_mask); + + cmd->event_mask = cpu_to_le16(mask); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_add_vsi + * @hw: pointer to the hw struct + * @vsi_ctx: pointer to a vsi context struct + * @cmd_details: pointer to command details structure or NULL + * + * Add a VSI context to the hardware. +**/ +i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_get_update_vsi *cmd = + (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; + struct i40e_aqc_add_get_update_vsi_completion *resp = + (struct i40e_aqc_add_get_update_vsi_completion *) + &desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_add_vsi); + + cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid); + cmd->connection_type = vsi_ctx->connection_type; + cmd->vf_id = vsi_ctx->vf_num; + cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); + + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + + status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, + sizeof(vsi_ctx->info), cmd_details); + + if (status) + goto aq_add_vsi_exit; + + vsi_ctx->seid = le16_to_cpu(resp->seid); + vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); + vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); + vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); + +aq_add_vsi_exit: + return status; +} + +/** + * i40e_aq_set_vsi_unicast_promiscuous + * @hw: pointer to the hw struct + * @seid: vsi number + * @set: set unicast promiscuous enable/disable + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, + u16 seid, bool set, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + i40e_status status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (set) + flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; + + cmd->promiscuous_flags = cpu_to_le16(flags); + + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); + + cmd->seid = cpu_to_le16(seid); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_vsi_multicast_promiscuous + * @hw: pointer to the hw struct + * @seid: vsi number + * @set: set multicast promiscuous enable/disable + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, + u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + i40e_status status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (set) + flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; + + cmd->promiscuous_flags = cpu_to_le16(flags); + + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); + + cmd->seid = cpu_to_le16(seid); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_vsi_broadcast + * @hw: pointer to the hw struct + * @seid: vsi number + * @set_filter: true to set filter, false to clear filter + * @cmd_details: pointer to command details structure or NULL + * + * Set or clear the broadcast promiscuous flag (filter) for a given VSI. + **/ +i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, + u16 seid, bool set_filter, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (set_filter) + cmd->promiscuous_flags + |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); + else + cmd->promiscuous_flags + &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); + + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); + cmd->seid = cpu_to_le16(seid); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_get_vsi_params - get VSI configuration info + * @hw: pointer to the hw struct + * @vsi_ctx: pointer to a vsi context struct + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_get_update_vsi *cmd = + (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; + struct i40e_aqc_add_get_update_vsi_completion *resp = + (struct i40e_aqc_add_get_update_vsi_completion *) + &desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_vsi_parameters); + + cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); + + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + + status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, + sizeof(vsi_ctx->info), NULL); + + if (status) + goto aq_get_vsi_params_exit; + + vsi_ctx->seid = le16_to_cpu(resp->seid); + vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); + vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); + vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); + +aq_get_vsi_params_exit: + return status; +} + +/** + * i40e_aq_update_vsi_params + * @hw: pointer to the hw struct + * @vsi_ctx: pointer to a vsi context struct + * @cmd_details: pointer to command details structure or NULL + * + * Update a VSI context. + **/ +i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_get_update_vsi *cmd = + (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_update_vsi_parameters); + cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); + + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + + status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, + sizeof(vsi_ctx->info), cmd_details); + + return status; +} + +/** + * i40e_aq_get_switch_config + * @hw: pointer to the hardware structure + * @buf: pointer to the result buffer + * @buf_size: length of input buffer + * @start_seid: seid to start for the report, 0 == beginning + * @cmd_details: pointer to command details structure or NULL + * + * Fill the buf with switch configuration returned from AdminQ command + **/ +i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, + struct i40e_aqc_get_switch_config_resp *buf, + u16 buf_size, u16 *start_seid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_switch_seid *scfg = + (struct i40e_aqc_switch_seid *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_switch_config); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + scfg->seid = cpu_to_le16(*start_seid); + + status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); + *start_seid = le16_to_cpu(scfg->seid); + + return status; +} + +/** + * i40e_aq_get_firmware_version + * @hw: pointer to the hw struct + * @fw_major_version: firmware major version + * @fw_minor_version: firmware minor version + * @fw_build: firmware build number + * @api_major_version: major queue version + * @api_minor_version: minor queue version + * @cmd_details: pointer to command details structure or NULL + * + * Get the firmware version from the admin queue commands + **/ +i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, + u16 *fw_major_version, u16 *fw_minor_version, + u32 *fw_build, + u16 *api_major_version, u16 *api_minor_version, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_version *resp = + (struct i40e_aqc_get_version *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status) { + if (fw_major_version) + *fw_major_version = le16_to_cpu(resp->fw_major); + if (fw_minor_version) + *fw_minor_version = le16_to_cpu(resp->fw_minor); + if (fw_build) + *fw_build = le32_to_cpu(resp->fw_build); + if (api_major_version) + *api_major_version = le16_to_cpu(resp->api_major); + if (api_minor_version) + *api_minor_version = le16_to_cpu(resp->api_minor); + } + + return status; +} + +/** + * i40e_aq_send_driver_version + * @hw: pointer to the hw struct + * @dv: driver's major, minor version + * @cmd_details: pointer to command details structure or NULL + * + * Send the driver version to the firmware + **/ +i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, + struct i40e_driver_version *dv, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_driver_version *cmd = + (struct i40e_aqc_driver_version *)&desc.params.raw; + i40e_status status; + u16 len; + + if (dv == NULL) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); + + desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); + cmd->driver_major_ver = dv->major_version; + cmd->driver_minor_ver = dv->minor_version; + cmd->driver_build_ver = dv->build_version; + cmd->driver_subbuild_ver = dv->subbuild_version; + + len = 0; + while (len < sizeof(dv->driver_string) && + (dv->driver_string[len] < 0x80) && + dv->driver_string[len]) + len++; + status = i40e_asq_send_command(hw, &desc, dv->driver_string, + len, cmd_details); + + return status; +} + +/** + * i40e_get_link_status - get status of the HW network link + * @hw: pointer to the hw struct + * + * Returns true if link is up, false if link is down. + * + * Side effect: LinkStatusEvent reporting becomes enabled + **/ +bool i40e_get_link_status(struct i40e_hw *hw) +{ + i40e_status status = 0; + bool link_status = false; + + if (hw->phy.get_link_info) { + status = i40e_aq_get_link_info(hw, true, NULL, NULL); + + if (status) + goto i40e_get_link_status_exit; + } + + link_status = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; + +i40e_get_link_status_exit: + return link_status; +} + +/** + * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC + * @hw: pointer to the hw struct + * @uplink_seid: the MAC or other gizmo SEID + * @downlink_seid: the VSI SEID + * @enabled_tc: bitmap of TCs to be enabled + * @default_port: true for default port VSI, false for control port + * @enable_l2_filtering: true to add L2 filter table rules to regular forwarding rules for cloud support + * @veb_seid: pointer to where to put the resulting VEB SEID + * @cmd_details: pointer to command details structure or NULL + * + * This asks the FW to add a VEB between the uplink and downlink + * elements. If the uplink SEID is 0, this will be a floating VEB. + **/ +i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, + u16 downlink_seid, u8 enabled_tc, + bool default_port, bool enable_l2_filtering, + u16 *veb_seid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_veb *cmd = + (struct i40e_aqc_add_veb *)&desc.params.raw; + struct i40e_aqc_add_veb_completion *resp = + (struct i40e_aqc_add_veb_completion *)&desc.params.raw; + i40e_status status; + u16 veb_flags = 0; + + /* SEIDs need to either both be set or both be 0 for floating VEB */ + if (!!uplink_seid != !!downlink_seid) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); + + cmd->uplink_seid = cpu_to_le16(uplink_seid); + cmd->downlink_seid = cpu_to_le16(downlink_seid); + cmd->enable_tcs = enabled_tc; + if (!uplink_seid) + veb_flags |= I40E_AQC_ADD_VEB_FLOATING; + if (default_port) + veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; + else + veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; + + if (enable_l2_filtering) + veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER; + + cmd->veb_flags = cpu_to_le16(veb_flags); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status && veb_seid) + *veb_seid = le16_to_cpu(resp->veb_seid); + + return status; +} + +/** + * i40e_aq_get_veb_parameters - Retrieve VEB parameters + * @hw: pointer to the hw struct + * @veb_seid: the SEID of the VEB to query + * @switch_id: the uplink switch id + * @floating: set to true if the VEB is floating + * @statistic_index: index of the stats counter block for this VEB + * @vebs_used: number of VEB's used by function + * @vebs_free: total VEB's not reserved by any function + * @cmd_details: pointer to command details structure or NULL + * + * This retrieves the parameters for a particular VEB, specified by + * uplink_seid, and returns them to the caller. + **/ +i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, + u16 veb_seid, u16 *switch_id, + bool *floating, u16 *statistic_index, + u16 *vebs_used, u16 *vebs_free, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_get_veb_parameters_completion *cmd_resp = + (struct i40e_aqc_get_veb_parameters_completion *) + &desc.params.raw; + i40e_status status; + + if (veb_seid == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_veb_parameters); + cmd_resp->seid = cpu_to_le16(veb_seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + if (status) + goto get_veb_exit; + + if (switch_id) + *switch_id = le16_to_cpu(cmd_resp->switch_id); + if (statistic_index) + *statistic_index = le16_to_cpu(cmd_resp->statistic_index); + if (vebs_used) + *vebs_used = le16_to_cpu(cmd_resp->vebs_used); + if (vebs_free) + *vebs_free = le16_to_cpu(cmd_resp->vebs_free); + if (floating) { + u16 flags = le16_to_cpu(cmd_resp->veb_flags); + if (flags & I40E_AQC_ADD_VEB_FLOATING) + *floating = true; + else + *floating = false; + } + +get_veb_exit: + return status; +} + +/** + * i40e_aq_add_macvlan + * @hw: pointer to the hw struct + * @seid: VSI for the mac address + * @mv_list: list of macvlans to be added + * @count: length of the list + * @cmd_details: pointer to command details structure or NULL + * + * Add MAC/VLAN addresses to the HW filtering + **/ +i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_add_macvlan_element_data *mv_list, + u16 count, struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_macvlan *cmd = + (struct i40e_aqc_macvlan *)&desc.params.raw; + i40e_status status; + u16 buf_size; + + if (count == 0 || !mv_list || !hw) + return I40E_ERR_PARAM; + + buf_size = count * sizeof(*mv_list); + + /* prep the rest of the request */ + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan); + cmd->num_addresses = cpu_to_le16(count); + cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); + cmd->seid[1] = 0; + cmd->seid[2] = 0; + + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, + cmd_details); + + return status; +} + +/** + * i40e_aq_remove_macvlan + * @hw: pointer to the hw struct + * @seid: VSI for the mac address + * @mv_list: list of macvlans to be removed + * @count: length of the list + * @cmd_details: pointer to command details structure or NULL + * + * Remove MAC/VLAN addresses from the HW filtering + **/ +i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_remove_macvlan_element_data *mv_list, + u16 count, struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_macvlan *cmd = + (struct i40e_aqc_macvlan *)&desc.params.raw; + i40e_status status; + u16 buf_size; + + if (count == 0 || !mv_list || !hw) + return I40E_ERR_PARAM; + + buf_size = count * sizeof(*mv_list); + + /* prep the rest of the request */ + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); + cmd->num_addresses = cpu_to_le16(count); + cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); + cmd->seid[1] = 0; + cmd->seid[2] = 0; + + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, + cmd_details); + + return status; +} + +/** + * i40e_aq_send_msg_to_vf + * @hw: pointer to the hardware structure + * @vfid: VF id to send msg + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code + * @msg: pointer to the msg buffer + * @msglen: msg length + * @cmd_details: pointer to command details + * + * send msg to vf + **/ +i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, + u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_pf_vf_message *cmd = + (struct i40e_aqc_pf_vf_message *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); + cmd->id = cpu_to_le32(vfid); + desc.cookie_high = cpu_to_le32(v_opcode); + desc.cookie_low = cpu_to_le32(v_retval); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); + if (msglen) { + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | + I40E_AQ_FLAG_RD)); + if (msglen > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.datalen = cpu_to_le16(msglen); + } + status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); + + return status; +} + +/** + * i40e_aq_debug_read_register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + * @cmd_details: pointer to command details structure or NULL + * + * Read the register using the admin queue commands + **/ +i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, + u32 reg_addr, u64 *reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_debug_reg_read_write *cmd_resp = + (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; + i40e_status status; + + if (reg_val == NULL) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); + + cmd_resp->address = cpu_to_le32(reg_addr); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status) { + *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | + (u64)le32_to_cpu(cmd_resp->value_low); + } + + return status; +} + +/** + * i40e_aq_debug_write_register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + * @cmd_details: pointer to command details structure or NULL + * + * Write to a register using the admin queue commands + **/ +i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, + u32 reg_addr, u64 reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_debug_reg_read_write *cmd = + (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); + + cmd->address = cpu_to_le32(reg_addr); + cmd->value_high = cpu_to_le32((u32)(reg_val >> 32)); + cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF)); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_hmc_resource_profile + * @hw: pointer to the hw struct + * @profile: type of profile the HMC is to be set as + * @pe_vf_enabled_count: the number of PE enabled VFs the system has + * @cmd_details: pointer to command details structure or NULL + * + * set the HMC profile of the device. + **/ +i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw, + enum i40e_aq_hmc_profile profile, + u8 pe_vf_enabled_count, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aq_get_set_hmc_resource_profile *cmd = + (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_hmc_resource_profile); + + cmd->pm_profile = (u8)profile; + cmd->pe_vf_enabled = pe_vf_enabled_count; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_request_resource + * @hw: pointer to the hw struct + * @resource: resource id + * @access: access type + * @sdp_number: resource number + * @timeout: the maximum time in ms that the driver may hold the resource + * @cmd_details: pointer to command details structure or NULL + * + * requests common resource using the admin queue commands + **/ +i40e_status i40e_aq_request_resource(struct i40e_hw *hw, + enum i40e_aq_resources_ids resource, + enum i40e_aq_resource_access_type access, + u8 sdp_number, u64 *timeout, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_request_resource *cmd_resp = + (struct i40e_aqc_request_resource *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); + + cmd_resp->resource_id = cpu_to_le16(resource); + cmd_resp->access_type = cpu_to_le16(access); + cmd_resp->resource_number = cpu_to_le32(sdp_number); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + /* The completion specifies the maximum time in ms that the driver + * may hold the resource in the Timeout field. + * If the resource is held by someone else, the command completes with + * busy return value and the timeout field indicates the maximum time + * the current owner of the resource has to free it. + */ + if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) + *timeout = le32_to_cpu(cmd_resp->timeout); + + return status; +} + +/** + * i40e_aq_release_resource + * @hw: pointer to the hw struct + * @resource: resource id + * @sdp_number: resource number + * @cmd_details: pointer to command details structure or NULL + * + * release common resource using the admin queue commands + **/ +i40e_status i40e_aq_release_resource(struct i40e_hw *hw, + enum i40e_aq_resources_ids resource, + u8 sdp_number, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_request_resource *cmd = + (struct i40e_aqc_request_resource *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); + + cmd->resource_id = cpu_to_le16(resource); + cmd->resource_number = cpu_to_le32(sdp_number); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_read_nvm + * @hw: pointer to the hw struct + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: byte offset from the module beginning + * @length: length of the section to be read (in bytes from the offset) + * @data: command buffer (size [bytes] = length) + * @last_command: tells if this is the last command in a series + * @cmd_details: pointer to command details structure or NULL + * + * Read the NVM using the admin queue commands + **/ +i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, void *data, + bool last_command, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_nvm_update *cmd = + (struct i40e_aqc_nvm_update *)&desc.params.raw; + i40e_status status; + + /* In offset the highest byte must be zeroed. */ + if (offset & 0xFF000000) { + status = I40E_ERR_PARAM; + goto i40e_aq_read_nvm_exit; + } + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); + + /* If this is the last command in a series, set the proper flag. */ + if (last_command) + cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; + cmd->module_pointer = module_pointer; + cmd->offset = cpu_to_le32(offset); + cmd->length = cpu_to_le16(length); + + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + if (length > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); + +i40e_aq_read_nvm_exit: + return status; +} + +/** + * i40e_aq_erase_nvm + * @hw: pointer to the hw struct + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: offset in the module (expressed in 4 KB from module's beginning) + * @length: length of the section to be erased (expressed in 4 KB) + * @last_command: tells if this is the last command in a series + * @cmd_details: pointer to command details structure or NULL + * + * Erase the NVM sector using the admin queue commands + **/ +i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, bool last_command, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_nvm_update *cmd = + (struct i40e_aqc_nvm_update *)&desc.params.raw; + i40e_status status; + + /* In offset the highest byte must be zeroed. */ + if (offset & 0xFF000000) { + status = I40E_ERR_PARAM; + goto i40e_aq_erase_nvm_exit; + } + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); + + /* If this is the last command in a series, set the proper flag. */ + if (last_command) + cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; + cmd->module_pointer = module_pointer; + cmd->offset = cpu_to_le32(offset); + cmd->length = cpu_to_le16(length); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + +i40e_aq_erase_nvm_exit: + return status; +} + +#define I40E_DEV_FUNC_CAP_SWITCH_MODE 0x01 +#define I40E_DEV_FUNC_CAP_MGMT_MODE 0x02 +#define I40E_DEV_FUNC_CAP_NPAR 0x03 +#define I40E_DEV_FUNC_CAP_OS2BMC 0x04 +#define I40E_DEV_FUNC_CAP_VALID_FUNC 0x05 +#define I40E_DEV_FUNC_CAP_SRIOV_1_1 0x12 +#define I40E_DEV_FUNC_CAP_VF 0x13 +#define I40E_DEV_FUNC_CAP_VMDQ 0x14 +#define I40E_DEV_FUNC_CAP_802_1_QBG 0x15 +#define I40E_DEV_FUNC_CAP_802_1_QBH 0x16 +#define I40E_DEV_FUNC_CAP_VSI 0x17 +#define I40E_DEV_FUNC_CAP_DCB 0x18 +#define I40E_DEV_FUNC_CAP_FCOE 0x21 +#define I40E_DEV_FUNC_CAP_ISCSI 0x22 +#define I40E_DEV_FUNC_CAP_RSS 0x40 +#define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41 +#define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42 +#define I40E_DEV_FUNC_CAP_MSIX 0x43 +#define I40E_DEV_FUNC_CAP_MSIX_VF 0x44 +#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45 +#define I40E_DEV_FUNC_CAP_IEEE_1588 0x46 +#define I40E_DEV_FUNC_CAP_MFP_MODE_1 0xF1 +#define I40E_DEV_FUNC_CAP_CEM 0xF2 +#define I40E_DEV_FUNC_CAP_IWARP 0x51 +#define I40E_DEV_FUNC_CAP_LED 0x61 +#define I40E_DEV_FUNC_CAP_SDP 0x62 +#define I40E_DEV_FUNC_CAP_MDIO 0x63 +#define I40E_DEV_FUNC_CAP_WR_CSR_PROT 0x64 + +/** + * i40e_parse_discover_capabilities + * @hw: pointer to the hw struct + * @buff: pointer to a buffer containing device/function capability records + * @cap_count: number of capability records in the list + * @list_type_opc: type of capabilities list to parse + * + * Parse the device/function capabilities list. + **/ +static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, + u32 cap_count, + enum i40e_admin_queue_opc list_type_opc) +{ + struct i40e_aqc_list_capabilities_element_resp *cap; + u32 valid_functions, num_functions; + u32 number, logical_id, phys_id; + struct i40e_hw_capabilities *p; + u32 i = 0; + u16 id; + + cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; + + if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) + p = &hw->dev_caps; + else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) + p = &hw->func_caps; + else + return; + + for (i = 0; i < cap_count; i++, cap++) { + id = le16_to_cpu(cap->id); + number = le32_to_cpu(cap->number); + logical_id = le32_to_cpu(cap->logical_id); + phys_id = le32_to_cpu(cap->phys_id); + + switch (id) { + case I40E_DEV_FUNC_CAP_SWITCH_MODE: + p->switch_mode = number; + break; + case I40E_DEV_FUNC_CAP_MGMT_MODE: + p->management_mode = number; + break; + case I40E_DEV_FUNC_CAP_NPAR: + p->npar_enable = number; + break; + case I40E_DEV_FUNC_CAP_OS2BMC: + p->os2bmc = number; + break; + case I40E_DEV_FUNC_CAP_VALID_FUNC: + p->valid_functions = number; + break; + case I40E_DEV_FUNC_CAP_SRIOV_1_1: + if (number == 1) + p->sr_iov_1_1 = true; + break; + case I40E_DEV_FUNC_CAP_VF: + p->num_vfs = number; + p->vf_base_id = logical_id; + break; + case I40E_DEV_FUNC_CAP_VMDQ: + if (number == 1) + p->vmdq = true; + break; + case I40E_DEV_FUNC_CAP_802_1_QBG: + if (number == 1) + p->evb_802_1_qbg = true; + break; + case I40E_DEV_FUNC_CAP_802_1_QBH: + if (number == 1) + p->evb_802_1_qbh = true; + break; + case I40E_DEV_FUNC_CAP_VSI: + p->num_vsis = number; + break; + case I40E_DEV_FUNC_CAP_DCB: + if (number == 1) { + p->dcb = true; + p->enabled_tcmap = logical_id; + p->maxtc = phys_id; + } + break; + case I40E_DEV_FUNC_CAP_FCOE: + if (number == 1) + p->fcoe = true; + break; + case I40E_DEV_FUNC_CAP_ISCSI: + if (number == 1) + p->iscsi = true; + break; + case I40E_DEV_FUNC_CAP_RSS: + p->rss = true; + p->rss_table_size = number; + p->rss_table_entry_width = logical_id; + break; + case I40E_DEV_FUNC_CAP_RX_QUEUES: + p->num_rx_qp = number; + p->base_queue = phys_id; + break; + case I40E_DEV_FUNC_CAP_TX_QUEUES: + p->num_tx_qp = number; + p->base_queue = phys_id; + break; + case I40E_DEV_FUNC_CAP_MSIX: + p->num_msix_vectors = number; + break; + case I40E_DEV_FUNC_CAP_MSIX_VF: + p->num_msix_vectors_vf = number; + break; + case I40E_DEV_FUNC_CAP_MFP_MODE_1: + if (number == 1) + p->mfp_mode_1 = true; + break; + case I40E_DEV_FUNC_CAP_CEM: + if (number == 1) + p->mgmt_cem = true; + break; + case I40E_DEV_FUNC_CAP_IWARP: + if (number == 1) + p->iwarp = true; + break; + case I40E_DEV_FUNC_CAP_LED: + if (phys_id < I40E_HW_CAP_MAX_GPIO) + p->led[phys_id] = true; + break; + case I40E_DEV_FUNC_CAP_SDP: + if (phys_id < I40E_HW_CAP_MAX_GPIO) + p->sdp[phys_id] = true; + break; + case I40E_DEV_FUNC_CAP_MDIO: + if (number == 1) { + p->mdio_port_num = phys_id; + p->mdio_port_mode = logical_id; + } + break; + case I40E_DEV_FUNC_CAP_IEEE_1588: + if (number == 1) + p->ieee_1588 = true; + break; + case I40E_DEV_FUNC_CAP_FLOW_DIRECTOR: + p->fd = true; + p->fd_filters_guaranteed = number; + p->fd_filters_best_effort = logical_id; + break; + case I40E_DEV_FUNC_CAP_WR_CSR_PROT: + p->wr_csr_prot = (u64)number; + p->wr_csr_prot |= (u64)logical_id << 32; + break; + default: + break; + } + } + + if (p->fcoe) + i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); + + /* Software override ensuring FCoE is disabled if npar or mfp + * mode because it is not supported in these modes. + */ + if (p->npar_enable || p->mfp_mode_1) + p->fcoe = false; + + /* count the enabled ports (aka the "not disabled" ports) */ + hw->num_ports = 0; + for (i = 0; i < 4; i++) { + u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); + u64 port_cfg = 0; + + /* use AQ read to get the physical register offset instead + * of the port relative offset + */ + i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); + if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) + hw->num_ports++; + } + + valid_functions = p->valid_functions; + num_functions = 0; + while (valid_functions) { + if (valid_functions & 1) + num_functions++; + valid_functions >>= 1; + } + + /* partition id is 1-based, and functions are evenly spread + * across the ports as partitions + */ + hw->partition_id = (hw->pf_id / hw->num_ports) + 1; + hw->num_partitions = num_functions / hw->num_ports; + + /* additional HW specific goodies that might + * someday be HW version specific + */ + p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; +} + +/** + * i40e_aq_discover_capabilities + * @hw: pointer to the hw struct + * @buff: a virtual buffer to hold the capabilities + * @buff_size: Size of the virtual buffer + * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM + * @list_type_opc: capabilities type to discover - pass in the command opcode + * @cmd_details: pointer to command details structure or NULL + * + * Get the device capabilities descriptions from the firmware + **/ +i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, + void *buff, u16 buff_size, u16 *data_size, + enum i40e_admin_queue_opc list_type_opc, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aqc_list_capabilites *cmd; + struct i40e_aq_desc desc; + i40e_status status = 0; + + cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; + + if (list_type_opc != i40e_aqc_opc_list_func_capabilities && + list_type_opc != i40e_aqc_opc_list_dev_capabilities) { + status = I40E_ERR_PARAM; + goto exit; + } + + i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); + + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + *data_size = le16_to_cpu(desc.datalen); + + if (status) + goto exit; + + i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count), + list_type_opc); + +exit: + return status; +} + +/** + * i40e_aq_update_nvm + * @hw: pointer to the hw struct + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: byte offset from the module beginning + * @length: length of the section to be written (in bytes from the offset) + * @data: command buffer (size [bytes] = length) + * @last_command: tells if this is the last command in a series + * @cmd_details: pointer to command details structure or NULL + * + * Update the NVM using the admin queue commands + **/ +i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, void *data, + bool last_command, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_nvm_update *cmd = + (struct i40e_aqc_nvm_update *)&desc.params.raw; + i40e_status status; + + /* In offset the highest byte must be zeroed. */ + if (offset & 0xFF000000) { + status = I40E_ERR_PARAM; + goto i40e_aq_update_nvm_exit; + } + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); + + /* If this is the last command in a series, set the proper flag. */ + if (last_command) + cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; + cmd->module_pointer = module_pointer; + cmd->offset = cpu_to_le32(offset); + cmd->length = cpu_to_le16(length); + + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + if (length > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); + +i40e_aq_update_nvm_exit: + return status; +} + +/** + * i40e_aq_get_lldp_mib + * @hw: pointer to the hw struct + * @bridge_type: type of bridge requested + * @mib_type: Local, Remote or both Local and Remote MIBs + * @buff: pointer to a user supplied buffer to store the MIB block + * @buff_size: size of the buffer (in bytes) + * @local_len : length of the returned Local LLDP MIB + * @remote_len: length of the returned Remote LLDP MIB + * @cmd_details: pointer to command details structure or NULL + * + * Requests the complete LLDP MIB (entire packet). + **/ +i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, + u8 mib_type, void *buff, u16 buff_size, + u16 *local_len, u16 *remote_len, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_get_mib *cmd = + (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; + struct i40e_aqc_lldp_get_mib *resp = + (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; + i40e_status status; + + if (buff_size == 0 || !buff) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); + /* Indirect Command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + + cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; + cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & + I40E_AQ_LLDP_BRIDGE_TYPE_MASK); + + desc.datalen = cpu_to_le16(buff_size); + + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + if (!status) { + if (local_len != NULL) + *local_len = le16_to_cpu(resp->local_len); + if (remote_len != NULL) + *remote_len = le16_to_cpu(resp->remote_len); + } + + return status; +} + +/** + * i40e_aq_cfg_lldp_mib_change_event + * @hw: pointer to the hw struct + * @enable_update: Enable or Disable event posting + * @cmd_details: pointer to command details structure or NULL + * + * Enable or Disable posting of an event on ARQ when LLDP MIB + * associated with the interface changes + **/ +i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, + bool enable_update, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_update_mib *cmd = + (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); + + if (!enable_update) + cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_stop_lldp + * @hw: pointer to the hw struct + * @shutdown_agent: True if LLDP Agent needs to be Shutdown + * @cmd_details: pointer to command details structure or NULL + * + * Stop or Shutdown the embedded LLDP Agent + **/ +i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_stop *cmd = + (struct i40e_aqc_lldp_stop *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); + + if (shutdown_agent) + cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_start_lldp + * @hw: pointer to the hw struct + * @cmd_details: pointer to command details structure or NULL + * + * Start the embedded LLDP Agent on all ports. + **/ +i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_lldp_start *cmd = + (struct i40e_aqc_lldp_start *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); + + cmd->command = I40E_AQ_LLDP_AGENT_START; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_get_cee_dcb_config + * @hw: pointer to the hw struct + * @buff: response buffer that stores CEE operational configuration + * @buff_size: size of the buffer passed + * @cmd_details: pointer to command details structure or NULL + * + * Get CEE DCBX mode operational configuration from firmware + **/ +i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + i40e_status status; + + if (buff_size == 0 || !buff) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); + + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, + cmd_details); + + return status; +} + +/** + * i40e_aq_add_udp_tunnel + * @hw: pointer to the hw struct + * @udp_port: the UDP port to add + * @header_len: length of the tunneling header length in DWords + * @protocol_index: protocol index type + * @filter_index: pointer to filter index + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, + u16 udp_port, u8 protocol_index, + u8 *filter_index, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_udp_tunnel *cmd = + (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; + struct i40e_aqc_del_udp_tunnel_completion *resp = + (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); + + cmd->udp_port = cpu_to_le16(udp_port); + cmd->protocol_type = protocol_index; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status && filter_index) + *filter_index = resp->index; + + return status; +} + +/** + * i40e_aq_del_udp_tunnel + * @hw: pointer to the hw struct + * @index: filter index + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_remove_udp_tunnel *cmd = + (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); + + cmd->index = index; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_delete_element - Delete switch element + * @hw: pointer to the hw struct + * @seid: the SEID to delete from the switch + * @cmd_details: pointer to command details structure or NULL + * + * This deletes a switch element from the switch. + **/ +i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_switch_seid *cmd = + (struct i40e_aqc_switch_seid *)&desc.params.raw; + i40e_status status; + + if (seid == 0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); + + cmd->seid = cpu_to_le16(seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_dcb_updated - DCB Updated Command + * @hw: pointer to the hw struct + * @cmd_details: pointer to command details structure or NULL + * + * EMP will return when the shared RPB settings have been + * recomputed and modified. The retval field in the descriptor + * will be set to 0 when RPB is modified. + **/ +i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler + * @hw: pointer to the hw struct + * @seid: seid for the physical port/switching component/vsi + * @buff: Indirect buffer to hold data parameters and response + * @buff_size: Indirect buffer size + * @opcode: Tx scheduler AQ command opcode + * @cmd_details: pointer to command details structure or NULL + * + * Generic command handler for Tx scheduler AQ commands + **/ +static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, + void *buff, u16 buff_size, + enum i40e_admin_queue_opc opcode, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_tx_sched_ind *cmd = + (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; + i40e_status status; + bool cmd_param_flag = false; + + switch (opcode) { + case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: + case i40e_aqc_opc_configure_vsi_tc_bw: + case i40e_aqc_opc_enable_switching_comp_ets: + case i40e_aqc_opc_modify_switching_comp_ets: + case i40e_aqc_opc_disable_switching_comp_ets: + case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: + case i40e_aqc_opc_configure_switching_comp_bw_config: + cmd_param_flag = true; + break; + case i40e_aqc_opc_query_vsi_bw_config: + case i40e_aqc_opc_query_vsi_ets_sla_config: + case i40e_aqc_opc_query_switching_comp_ets_config: + case i40e_aqc_opc_query_port_ets_config: + case i40e_aqc_opc_query_switching_comp_bw_config: + cmd_param_flag = false; + break; + default: + return I40E_ERR_PARAM; + } + + i40e_fill_default_direct_cmd_desc(&desc, opcode); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + if (cmd_param_flag) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + desc.datalen = cpu_to_le16(buff_size); + + cmd->vsi_seid = cpu_to_le16(seid); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + + return status; +} + +/** + * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit + * @hw: pointer to the hw struct + * @seid: VSI seid + * @credit: BW limit credits (0 = disabled) + * @max_credit: Max BW limit credits + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, + u16 seid, u16 credit, u8 max_credit, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_configure_vsi_bw_limit *cmd = + (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_configure_vsi_bw_limit); + + cmd->vsi_seid = cpu_to_le16(seid); + cmd->credit = cpu_to_le16(credit); + cmd->max_credit = max_credit; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC + * @hw: pointer to the hw struct + * @seid: VSI seid + * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_configure_vsi_tc_bw, + cmd_details); +} + +/** + * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port + * @hw: pointer to the hw struct + * @seid: seid of the switching component connected to Physical Port + * @ets_data: Buffer holding ETS parameters + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_ets_data *ets_data, + enum i40e_admin_queue_opc opcode, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, + sizeof(*ets_data), opcode, cmd_details); +} + +/** + * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC + * @hw: pointer to the hw struct + * @seid: seid of the switching component + * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_configure_switching_comp_bw_config, + cmd_details); +} + +/** + * i40e_aq_query_vsi_bw_config - Query VSI BW configuration + * @hw: pointer to the hw struct + * @seid: seid of the VSI + * @bw_data: Buffer to hold VSI BW configuration + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_vsi_bw_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_query_vsi_bw_config, + cmd_details); +} + +/** + * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC + * @hw: pointer to the hw struct + * @seid: seid of the VSI + * @bw_data: Buffer to hold VSI BW configuration per TC + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_query_vsi_ets_sla_config, + cmd_details); +} + +/** + * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC + * @hw: pointer to the hw struct + * @seid: seid of the switching component + * @bw_data: Buffer to hold switching component's per TC BW config + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_query_switching_comp_ets_config, + cmd_details); +} + +/** + * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration + * @hw: pointer to the hw struct + * @seid: seid of the VSI or switching component connected to Physical Port + * @bw_data: Buffer to hold current ETS configuration for the Physical Port + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_port_ets_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_query_port_ets_config, + cmd_details); +} + +/** + * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration + * @hw: pointer to the hw struct + * @seid: seid of the switching component + * @bw_data: Buffer to hold switching component's BW configuration + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_query_switching_comp_bw_config, + cmd_details); +} + +/** + * i40e_validate_filter_settings + * @hw: pointer to the hardware structure + * @settings: Filter control settings + * + * Check and validate the filter control settings passed. + * The function checks for the valid filter/context sizes being + * passed for FCoE and PE. + * + * Returns 0 if the values passed are valid and within + * range else returns an error. + **/ +static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, + struct i40e_filter_control_settings *settings) +{ + u32 fcoe_cntx_size, fcoe_filt_size; + u32 pe_cntx_size, pe_filt_size; + u32 fcoe_fmax; + u32 val; + + /* Validate FCoE settings passed */ + switch (settings->fcoe_filt_num) { + case I40E_HASH_FILTER_SIZE_1K: + case I40E_HASH_FILTER_SIZE_2K: + case I40E_HASH_FILTER_SIZE_4K: + case I40E_HASH_FILTER_SIZE_8K: + case I40E_HASH_FILTER_SIZE_16K: + case I40E_HASH_FILTER_SIZE_32K: + fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; + fcoe_filt_size <<= (u32)settings->fcoe_filt_num; + break; + default: + return I40E_ERR_PARAM; + } + + switch (settings->fcoe_cntx_num) { + case I40E_DMA_CNTX_SIZE_512: + case I40E_DMA_CNTX_SIZE_1K: + case I40E_DMA_CNTX_SIZE_2K: + case I40E_DMA_CNTX_SIZE_4K: + fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; + fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; + break; + default: + return I40E_ERR_PARAM; + } + + /* Validate PE settings passed */ + switch (settings->pe_filt_num) { + case I40E_HASH_FILTER_SIZE_1K: + case I40E_HASH_FILTER_SIZE_2K: + case I40E_HASH_FILTER_SIZE_4K: + case I40E_HASH_FILTER_SIZE_8K: + case I40E_HASH_FILTER_SIZE_16K: + case I40E_HASH_FILTER_SIZE_32K: + case I40E_HASH_FILTER_SIZE_64K: + case I40E_HASH_FILTER_SIZE_128K: + case I40E_HASH_FILTER_SIZE_256K: + case I40E_HASH_FILTER_SIZE_512K: + case I40E_HASH_FILTER_SIZE_1M: + pe_filt_size = I40E_HASH_FILTER_BASE_SIZE; + pe_filt_size <<= (u32)settings->pe_filt_num; + break; + default: + return I40E_ERR_PARAM; + } + + switch (settings->pe_cntx_num) { + case I40E_DMA_CNTX_SIZE_512: + case I40E_DMA_CNTX_SIZE_1K: + case I40E_DMA_CNTX_SIZE_2K: + case I40E_DMA_CNTX_SIZE_4K: + case I40E_DMA_CNTX_SIZE_8K: + case I40E_DMA_CNTX_SIZE_16K: + case I40E_DMA_CNTX_SIZE_32K: + case I40E_DMA_CNTX_SIZE_64K: + case I40E_DMA_CNTX_SIZE_128K: + case I40E_DMA_CNTX_SIZE_256K: + pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; + pe_cntx_size <<= (u32)settings->pe_cntx_num; + break; + default: + return I40E_ERR_PARAM; + } + + /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ + val = rd32(hw, I40E_GLHMC_FCOEFMAX); + fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) + >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; + if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) + return I40E_ERR_INVALID_SIZE; + + return 0; +} + +/** + * i40e_set_filter_control + * @hw: pointer to the hardware structure + * @settings: Filter control settings + * + * Set the Queue Filters for PE/FCoE and enable filters required + * for a single PF. It is expected that these settings are programmed + * at the driver initialization time. + **/ +i40e_status i40e_set_filter_control(struct i40e_hw *hw, + struct i40e_filter_control_settings *settings) +{ + i40e_status ret = 0; + u32 hash_lut_size = 0; + u32 val; + + if (!settings) + return I40E_ERR_PARAM; + + /* Validate the input settings */ + ret = i40e_validate_filter_settings(hw, settings); + if (ret) + return ret; + + /* Read the PF Queue Filter control register */ + val = rd32(hw, I40E_PFQF_CTL_0); + + /* Program required PE hash buckets for the PF */ + val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; + val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & + I40E_PFQF_CTL_0_PEHSIZE_MASK; + /* Program required PE contexts for the PF */ + val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; + val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & + I40E_PFQF_CTL_0_PEDSIZE_MASK; + + /* Program required FCoE hash buckets for the PF */ + val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; + val |= ((u32)settings->fcoe_filt_num << + I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & + I40E_PFQF_CTL_0_PFFCHSIZE_MASK; + /* Program required FCoE DDP contexts for the PF */ + val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; + val |= ((u32)settings->fcoe_cntx_num << + I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & + I40E_PFQF_CTL_0_PFFCDSIZE_MASK; + + /* Program Hash LUT size for the PF */ + val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; + if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) + hash_lut_size = 1; + val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & + I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; + + /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ + if (settings->enable_fdir) + val |= I40E_PFQF_CTL_0_FD_ENA_MASK; + if (settings->enable_ethtype) + val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; + if (settings->enable_macvlan) + val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; + + wr32(hw, I40E_PFQF_CTL_0, val); + + return 0; +} + +/** + * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter + * @hw: pointer to the hw struct + * @mac_addr: MAC address to use in the filter + * @ethtype: Ethertype to use in the filter + * @flags: Flags that needs to be applied to the filter + * @vsi_seid: seid of the control VSI + * @queue: VSI queue number to send the packet to + * @is_add: Add control packet filter if True else remove + * @stats: Structure to hold information on control filter counts + * @cmd_details: pointer to command details structure or NULL + * + * This command will Add or Remove control packet filter for a control VSI. + * In return it will update the total number of perfect filter count in + * the stats member. + **/ +i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, + u8 *mac_addr, u16 ethtype, u16 flags, + u16 vsi_seid, u16 queue, bool is_add, + struct i40e_control_filter_stats *stats, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_control_packet_filter *cmd = + (struct i40e_aqc_add_remove_control_packet_filter *) + &desc.params.raw; + struct i40e_aqc_add_remove_control_packet_filter_completion *resp = + (struct i40e_aqc_add_remove_control_packet_filter_completion *) + &desc.params.raw; + i40e_status status; + + if (vsi_seid == 0) + return I40E_ERR_PARAM; + + if (is_add) { + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_add_control_packet_filter); + cmd->queue = cpu_to_le16(queue); + } else { + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_remove_control_packet_filter); + } + + if (mac_addr) + memcpy(cmd->mac, mac_addr, ETH_ALEN); + + cmd->etype = cpu_to_le16(ethtype); + cmd->flags = cpu_to_le16(flags); + cmd->seid = cpu_to_le16(vsi_seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status && stats) { + stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); + stats->etype_used = le16_to_cpu(resp->etype_used); + stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); + stats->etype_free = le16_to_cpu(resp->etype_free); + } + + return status; +} + +/** + * i40e_aq_alternate_read + * @hw: pointer to the hardware structure + * @reg_addr0: address of first dword to be read + * @reg_val0: pointer for data read from 'reg_addr0' + * @reg_addr1: address of second dword to be read + * @reg_val1: pointer for data read from 'reg_addr1' + * + * Read one or two dwords from alternate structure. Fields are indicated + * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer + * is not passed then only register at 'reg_addr0' is read. + * + **/ +static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, + u32 reg_addr0, u32 *reg_val0, + u32 reg_addr1, u32 *reg_val1) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_alternate_write *cmd_resp = + (struct i40e_aqc_alternate_write *)&desc.params.raw; + i40e_status status; + + if (!reg_val0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); + cmd_resp->address0 = cpu_to_le32(reg_addr0); + cmd_resp->address1 = cpu_to_le32(reg_addr1); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + + if (!status) { + *reg_val0 = le32_to_cpu(cmd_resp->data0); + + if (reg_val1) + *reg_val1 = le32_to_cpu(cmd_resp->data1); + } + + return status; +} + +/** + * i40e_aq_resume_port_tx + * @hw: pointer to the hardware structure + * @cmd_details: pointer to command details structure or NULL + * + * Resume port's Tx traffic + **/ +i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_set_pci_config_data - store PCI bus info + * @hw: pointer to hardware structure + * @link_status: the link status word from PCI config space + * + * Stores the PCI bus info (speed, width, type) within the i40e_hw structure + **/ +void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) +{ + hw->bus.type = i40e_bus_type_pci_express; + + switch (link_status & PCI_EXP_LNKSTA_NLW) { + case PCI_EXP_LNKSTA_NLW_X1: + hw->bus.width = i40e_bus_width_pcie_x1; + break; + case PCI_EXP_LNKSTA_NLW_X2: + hw->bus.width = i40e_bus_width_pcie_x2; + break; + case PCI_EXP_LNKSTA_NLW_X4: + hw->bus.width = i40e_bus_width_pcie_x4; + break; + case PCI_EXP_LNKSTA_NLW_X8: + hw->bus.width = i40e_bus_width_pcie_x8; + break; + default: + hw->bus.width = i40e_bus_width_unknown; + break; + } + + switch (link_status & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_2_5GB: + hw->bus.speed = i40e_bus_speed_2500; + break; + case PCI_EXP_LNKSTA_CLS_5_0GB: + hw->bus.speed = i40e_bus_speed_5000; + break; + case PCI_EXP_LNKSTA_CLS_8_0GB: + hw->bus.speed = i40e_bus_speed_8000; + break; + default: + hw->bus.speed = i40e_bus_speed_unknown; + break; + } +} + +/** + * i40e_aq_debug_dump + * @hw: pointer to the hardware structure + * @cluster_id: specific cluster to dump + * @table_id: table id within cluster + * @start_index: index of line in the block to read + * @buff_size: dump buffer size + * @buff: dump buffer + * @ret_buff_size: actual buffer size returned + * @ret_next_table: next block to read + * @ret_next_index: next index to read + * + * Dump internal FW/HW data for debug purposes. + * + **/ +i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, + u8 table_id, u32 start_index, u16 buff_size, + void *buff, u16 *ret_buff_size, + u8 *ret_next_table, u32 *ret_next_index, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_debug_dump_internals *cmd = + (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; + struct i40e_aqc_debug_dump_internals *resp = + (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; + i40e_status status; + + if (buff_size == 0 || !buff) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_debug_dump_internals); + /* Indirect Command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + if (buff_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + cmd->cluster_id = cluster_id; + cmd->table_id = table_id; + cmd->idx = cpu_to_le32(start_index); + + desc.datalen = cpu_to_le16(buff_size); + + status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); + if (!status) { + if (ret_buff_size) + *ret_buff_size = le16_to_cpu(desc.datalen); + if (ret_next_table) + *ret_next_table = resp->table_id; + if (ret_next_index) + *ret_next_index = le32_to_cpu(resp->idx); + } + + return status; +} + +/** + * i40e_read_bw_from_alt_ram + * @hw: pointer to the hardware structure + * @max_bw: pointer for max_bw read + * @min_bw: pointer for min_bw read + * @min_valid: pointer for bool that is true if min_bw is a valid value + * @max_valid: pointer for bool that is true if max_bw is a valid value + * + * Read bw from the alternate ram for the given pf + **/ +i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, + u32 *max_bw, u32 *min_bw, + bool *min_valid, bool *max_valid) +{ + i40e_status status; + u32 max_bw_addr, min_bw_addr; + + /* Calculate the address of the min/max bw registers */ + max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + + I40E_ALT_STRUCT_MAX_BW_OFFSET + + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); + min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + + I40E_ALT_STRUCT_MIN_BW_OFFSET + + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); + + /* Read the bandwidths from alt ram */ + status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, + min_bw_addr, min_bw); + + if (*min_bw & I40E_ALT_BW_VALID_MASK) + *min_valid = true; + else + *min_valid = false; + + if (*max_bw & I40E_ALT_BW_VALID_MASK) + *max_valid = true; + else + *max_valid = false; + + return status; +} + +/** + * i40e_aq_configure_partition_bw + * @hw: pointer to the hardware structure + * @bw_data: Buffer holding valid pfs and bw limits + * @cmd_details: pointer to command details + * + * Configure partitions guaranteed/max bw + **/ +i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, + struct i40e_aqc_configure_partition_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + i40e_status status; + struct i40e_aq_desc desc; + u16 bwd_size = sizeof(*bw_data); + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_configure_partition_bw); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + if (bwd_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + desc.datalen = cpu_to_le16(bwd_size); + + status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, + cmd_details); + + return status; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c new file mode 100644 index 000000000..2547aa21b --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -0,0 +1,720 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e_adminq.h" +#include "i40e_prototype.h" +#include "i40e_dcb.h" + +/** + * i40e_get_dcbx_status + * @hw: pointer to the hw struct + * @status: Embedded DCBX Engine Status + * + * Get the DCBX status from the Firmware + **/ +i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status) +{ + u32 reg; + + if (!status) + return I40E_ERR_PARAM; + + reg = rd32(hw, I40E_PRTDCB_GENS); + *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >> + I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT); + + return 0; +} + +/** + * i40e_parse_ieee_etscfg_tlv + * @tlv: IEEE 802.1Qaz ETS CFG TLV + * @dcbcfg: Local store to update ETS CFG data + * + * Parses IEEE 802.1Qaz ETS CFG TLV + **/ +static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + struct i40e_dcb_ets_config *etscfg; + u8 *buf = tlv->tlvinfo; + u16 offset = 0; + u8 priority; + int i; + + /* First Octet post subtype + * -------------------------- + * |will-|CBS | Re- | Max | + * |ing | |served| TCs | + * -------------------------- + * |1bit | 1bit|3 bits|3bits| + */ + etscfg = &dcbcfg->etscfg; + etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >> + I40E_IEEE_ETS_WILLING_SHIFT); + etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >> + I40E_IEEE_ETS_CBS_SHIFT); + etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >> + I40E_IEEE_ETS_MAXTC_SHIFT); + + /* Move offset to Priority Assignment Table */ + offset++; + + /* Priority Assignment Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >> + I40E_IEEE_ETS_PRIO_1_SHIFT); + etscfg->prioritytable[i * 2] = priority; + priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >> + I40E_IEEE_ETS_PRIO_0_SHIFT); + etscfg->prioritytable[i * 2 + 1] = priority; + offset++; + } + + /* TC Bandwidth Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + etscfg->tcbwtable[i] = buf[offset++]; + + /* TSA Assignment Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + etscfg->tsatable[i] = buf[offset++]; +} + +/** + * i40e_parse_ieee_etsrec_tlv + * @tlv: IEEE 802.1Qaz ETS REC TLV + * @dcbcfg: Local store to update ETS REC data + * + * Parses IEEE 802.1Qaz ETS REC TLV + **/ +static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + u16 offset = 0; + u8 priority; + int i; + + /* Move offset to priority table */ + offset++; + + /* Priority Assignment Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >> + I40E_IEEE_ETS_PRIO_1_SHIFT); + dcbcfg->etsrec.prioritytable[i*2] = priority; + priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >> + I40E_IEEE_ETS_PRIO_0_SHIFT); + dcbcfg->etsrec.prioritytable[i*2 + 1] = priority; + offset++; + } + + /* TC Bandwidth Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etsrec.tcbwtable[i] = buf[offset++]; + + /* TSA Assignment Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etsrec.tsatable[i] = buf[offset++]; +} + +/** + * i40e_parse_ieee_pfccfg_tlv + * @tlv: IEEE 802.1Qaz PFC CFG TLV + * @dcbcfg: Local store to update PFC CFG data + * + * Parses IEEE 802.1Qaz PFC CFG TLV + **/ +static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + + /* ---------------------------------------- + * |will-|MBC | Re- | PFC | PFC Enable | + * |ing | |served| cap | | + * ----------------------------------------- + * |1bit | 1bit|2 bits|4bits| 1 octet | + */ + dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >> + I40E_IEEE_PFC_WILLING_SHIFT); + dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >> + I40E_IEEE_PFC_MBC_SHIFT); + dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >> + I40E_IEEE_PFC_CAP_SHIFT); + dcbcfg->pfc.pfcenable = buf[1]; +} + +/** + * i40e_parse_ieee_app_tlv + * @tlv: IEEE 802.1Qaz APP TLV + * @dcbcfg: Local store to update APP PRIO data + * + * Parses IEEE 802.1Qaz APP PRIO TLV + **/ +static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u16 typelength; + u16 offset = 0; + u16 length; + int i = 0; + u8 *buf; + + typelength = ntohs(tlv->typelength); + length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + buf = tlv->tlvinfo; + + /* The App priority table starts 5 octets after TLV header */ + length -= (sizeof(tlv->ouisubtype) + 1); + + /* Move offset to App Priority Table */ + offset++; + + /* Application Priority Table (3 octets) + * Octets:| 1 | 2 | 3 | + * ----------------------------------------- + * |Priority|Rsrvd| Sel | Protocol ID | + * ----------------------------------------- + * Bits:|23 21|20 19|18 16|15 0| + * ----------------------------------------- + */ + while (offset < length) { + dcbcfg->app[i].priority = (u8)((buf[offset] & + I40E_IEEE_APP_PRIO_MASK) >> + I40E_IEEE_APP_PRIO_SHIFT); + dcbcfg->app[i].selector = (u8)((buf[offset] & + I40E_IEEE_APP_SEL_MASK) >> + I40E_IEEE_APP_SEL_SHIFT); + dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) | + buf[offset + 2]; + /* Move to next app */ + offset += 3; + i++; + if (i >= I40E_DCBX_MAX_APPS) + break; + } + + dcbcfg->numapps = i; +} + +/** + * i40e_parse_ieee_etsrec_tlv + * @tlv: IEEE 802.1Qaz TLV + * @dcbcfg: Local store to update ETS REC data + * + * Get the TLV subtype and send it to parsing function + * based on the subtype value + **/ +static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u32 ouisubtype; + u8 subtype; + + ouisubtype = ntohl(tlv->ouisubtype); + subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >> + I40E_LLDP_TLV_SUBTYPE_SHIFT); + switch (subtype) { + case I40E_IEEE_SUBTYPE_ETS_CFG: + i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_SUBTYPE_ETS_REC: + i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_SUBTYPE_PFC_CFG: + i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_SUBTYPE_APP_PRI: + i40e_parse_ieee_app_tlv(tlv, dcbcfg); + break; + default: + break; + } +} + +/** + * i40e_parse_org_tlv + * @tlv: Organization specific TLV + * @dcbcfg: Local store to update ETS REC data + * + * Currently only IEEE 802.1Qaz TLV is supported, all others + * will be returned + **/ +static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u32 ouisubtype; + u32 oui; + + ouisubtype = ntohl(tlv->ouisubtype); + oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >> + I40E_LLDP_TLV_OUI_SHIFT); + switch (oui) { + case I40E_IEEE_8021QAZ_OUI: + i40e_parse_ieee_tlv(tlv, dcbcfg); + break; + default: + break; + } +} + +/** + * i40e_lldp_to_dcb_config + * @lldpmib: LLDPDU to be parsed + * @dcbcfg: store for LLDPDU data + * + * Parse DCB configuration from the LLDPDU + **/ +i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib, + struct i40e_dcbx_config *dcbcfg) +{ + i40e_status ret = 0; + struct i40e_lldp_org_tlv *tlv; + u16 type; + u16 length; + u16 typelength; + u16 offset = 0; + + if (!lldpmib || !dcbcfg) + return I40E_ERR_PARAM; + + /* set to the start of LLDPDU */ + lldpmib += ETH_HLEN; + tlv = (struct i40e_lldp_org_tlv *)lldpmib; + while (1) { + typelength = ntohs(tlv->typelength); + type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >> + I40E_LLDP_TLV_TYPE_SHIFT); + length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + offset += sizeof(typelength) + length; + + /* END TLV or beyond LLDPDU size */ + if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE)) + break; + + switch (type) { + case I40E_TLV_TYPE_ORG: + i40e_parse_org_tlv(tlv, dcbcfg); + break; + default: + break; + } + + /* Move to next TLV */ + tlv = (struct i40e_lldp_org_tlv *)((char *)tlv + + sizeof(tlv->typelength) + + length); + } + + return ret; +} + +/** + * i40e_aq_get_dcb_config + * @hw: pointer to the hw struct + * @mib_type: mib type for the query + * @bridgetype: bridge type for the query (remote) + * @dcbcfg: store for LLDPDU data + * + * Query DCB configuration from the Firmware + **/ +i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, + u8 bridgetype, + struct i40e_dcbx_config *dcbcfg) +{ + i40e_status ret = 0; + struct i40e_virt_mem mem; + u8 *lldpmib; + + /* Allocate the LLDPDU */ + ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE); + if (ret) + return ret; + + lldpmib = (u8 *)mem.va; + ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type, + (void *)lldpmib, I40E_LLDPDU_SIZE, + NULL, NULL, NULL); + if (ret) + goto free_mem; + + /* Parse LLDP MIB to get dcb configuration */ + ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg); + +free_mem: + i40e_free_virt_mem(hw, &mem); + return ret; +} + +/** + * i40e_cee_to_dcb_v1_config + * @cee_cfg: pointer to CEE v1 response configuration struct + * @dcbcfg: DCB configuration struct + * + * Convert CEE v1 configuration from firmware to DCB configuration + **/ +static void i40e_cee_to_dcb_v1_config( + struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg, + struct i40e_dcbx_config *dcbcfg) +{ + u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status); + u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); + u8 i, tc, err; + + /* CEE PG data to ETS config */ + dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; + + for (i = 0; i < 4; i++) { + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_1_MASK) >> + I40E_CEE_PGID_PRIO_1_SHIFT); + dcbcfg->etscfg.prioritytable[i*2] = tc; + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_0_MASK) >> + I40E_CEE_PGID_PRIO_0_SHIFT); + dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) { + /* Map it to next empty TC */ + dcbcfg->etscfg.prioritytable[i] = + cee_cfg->oper_num_tc - 1; + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT; + } else { + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; + } + } + + /* CEE PFC data to ETS config */ + dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; + dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + + status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> + I40E_AQC_CEE_APP_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + /* Add APPs if Error is False */ + if (!err) { + /* CEE operating configuration supports FCoE/iSCSI/FIP only */ + dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; + + /* FCoE APP */ + dcbcfg->app[0].priority = + (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> + I40E_AQC_CEE_APP_FCOE_SHIFT; + dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; + + /* iSCSI APP */ + dcbcfg->app[1].priority = + (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> + I40E_AQC_CEE_APP_ISCSI_SHIFT; + dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP; + dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI; + + /* FIP APP */ + dcbcfg->app[2].priority = + (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> + I40E_AQC_CEE_APP_FIP_SHIFT; + dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP; + } +} + +/** + * i40e_cee_to_dcb_config + * @cee_cfg: pointer to CEE configuration struct + * @dcbcfg: DCB configuration struct + * + * Convert CEE configuration from firmware to DCB configuration + **/ +static void i40e_cee_to_dcb_config( + struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg, + struct i40e_dcbx_config *dcbcfg) +{ + u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status); + u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); + u8 i, tc, err, sync, oper; + + /* CEE PG data to ETS config */ + dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; + + for (i = 0; i < 4; i++) { + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_1_MASK) >> + I40E_CEE_PGID_PRIO_1_SHIFT); + dcbcfg->etscfg.prioritytable[i*2] = tc; + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_0_MASK) >> + I40E_CEE_PGID_PRIO_0_SHIFT); + dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) { + /* Map it to next empty TC */ + dcbcfg->etscfg.prioritytable[i] = + cee_cfg->oper_num_tc - 1; + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT; + } else { + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; + } + } + + /* CEE PFC data to ETS config */ + dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; + dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + + status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> + I40E_AQC_CEE_APP_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; + oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; + /* Add APPs if Error is False and Oper/Sync is True */ + if (!err && sync && oper) { + /* CEE operating configuration supports FCoE/iSCSI/FIP only */ + dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; + + /* FCoE APP */ + dcbcfg->app[0].priority = + (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> + I40E_AQC_CEE_APP_FCOE_SHIFT; + dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; + + /* iSCSI APP */ + dcbcfg->app[1].priority = + (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> + I40E_AQC_CEE_APP_ISCSI_SHIFT; + dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP; + dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI; + + /* FIP APP */ + dcbcfg->app[2].priority = + (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> + I40E_AQC_CEE_APP_FIP_SHIFT; + dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP; + } +} + +/** + * i40e_get_dcb_config + * @hw: pointer to the hw struct + * + * Get DCB configuration from the Firmware + **/ +i40e_status i40e_get_dcb_config(struct i40e_hw *hw) +{ + i40e_status ret = 0; + struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg; + struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg; + + /* If Firmware version < v4.33 IEEE only */ + if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || + (hw->aq.fw_maj_ver < 4)) + goto ieee; + + /* If Firmware version == v4.33 use old CEE struct */ + if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) { + ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg, + sizeof(cee_v1_cfg), NULL); + if (!ret) { + /* CEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; + i40e_cee_to_dcb_v1_config(&cee_v1_cfg, + &hw->local_dcbx_config); + } + } else { + ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg, + sizeof(cee_cfg), NULL); + if (!ret) { + /* CEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; + i40e_cee_to_dcb_config(&cee_cfg, + &hw->local_dcbx_config); + } + } + + /* CEE mode not enabled try querying IEEE data */ + if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) + goto ieee; + else + goto out; + +ieee: + /* IEEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE; + /* Get Local DCB Config */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, + &hw->local_dcbx_config); + if (ret) + goto out; + + /* Get Remote DCB Config */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, + I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, + &hw->remote_dcbx_config); + /* Don't treat ENOENT as an error for Remote MIBs */ + if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) + ret = 0; + +out: + return ret; +} + +/** + * i40e_init_dcb + * @hw: pointer to the hw struct + * + * Update DCB configuration from the Firmware + **/ +i40e_status i40e_init_dcb(struct i40e_hw *hw) +{ + i40e_status ret = 0; + struct i40e_lldp_variables lldp_cfg; + u8 adminstatus = 0; + + if (!hw->func_caps.dcb) + return ret; + + /* Read LLDP NVM area */ + ret = i40e_read_lldp_cfg(hw, &lldp_cfg); + if (ret) + return ret; + + /* Get the LLDP AdminStatus for the current port */ + adminstatus = lldp_cfg.adminstatus >> (hw->port * 4); + adminstatus &= 0xF; + + /* LLDP agent disabled */ + if (!adminstatus) { + hw->dcbx_status = I40E_DCBX_STATUS_DISABLED; + return ret; + } + + /* Get DCBX status */ + ret = i40e_get_dcbx_status(hw, &hw->dcbx_status); + if (ret) + return ret; + + /* Check the DCBX Status */ + switch (hw->dcbx_status) { + case I40E_DCBX_STATUS_DONE: + case I40E_DCBX_STATUS_IN_PROGRESS: + /* Get current DCBX configuration */ + ret = i40e_get_dcb_config(hw); + if (ret) + return ret; + break; + case I40E_DCBX_STATUS_DISABLED: + return ret; + case I40E_DCBX_STATUS_NOT_STARTED: + case I40E_DCBX_STATUS_MULTIPLE_PEERS: + default: + break; + } + + /* Configure the LLDP MIB change event */ + ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL); + if (ret) + return ret; + + return ret; +} + +/** + * i40e_read_lldp_cfg - read LLDP Configuration data from NVM + * @hw: pointer to the HW structure + * @lldp_cfg: pointer to hold lldp configuration variables + * + * Reads the LLDP configuration data from NVM + **/ +i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg) +{ + i40e_status ret = 0; + u32 offset = (2 * I40E_NVM_LLDP_CFG_PTR); + + if (!lldp_cfg) + return I40E_ERR_PARAM; + + ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret) + goto err_lldp_cfg; + + ret = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, offset, + sizeof(struct i40e_lldp_variables), + (u8 *)lldp_cfg, + true, NULL); + i40e_release_nvm(hw); + +err_lldp_cfg: + return ret; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h new file mode 100644 index 000000000..e137e3fac --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -0,0 +1,112 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_DCB_H_ +#define _I40E_DCB_H_ + +#include "i40e_type.h" + +#define I40E_DCBX_STATUS_NOT_STARTED 0 +#define I40E_DCBX_STATUS_IN_PROGRESS 1 +#define I40E_DCBX_STATUS_DONE 2 +#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3 +#define I40E_DCBX_STATUS_DISABLED 7 + +#define I40E_TLV_TYPE_END 0 +#define I40E_TLV_TYPE_ORG 127 + +#define I40E_IEEE_8021QAZ_OUI 0x0080C2 +#define I40E_IEEE_SUBTYPE_ETS_CFG 9 +#define I40E_IEEE_SUBTYPE_ETS_REC 10 +#define I40E_IEEE_SUBTYPE_PFC_CFG 11 +#define I40E_IEEE_SUBTYPE_APP_PRI 12 + +/* Defines for LLDP TLV header */ +#define I40E_LLDP_TLV_LEN_SHIFT 0 +#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT) +#define I40E_LLDP_TLV_TYPE_SHIFT 9 +#define I40E_LLDP_TLV_TYPE_MASK (0x7F << I40E_LLDP_TLV_TYPE_SHIFT) +#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0 +#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT) +#define I40E_LLDP_TLV_OUI_SHIFT 8 +#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT) + +/* Defines for IEEE ETS TLV */ +#define I40E_IEEE_ETS_MAXTC_SHIFT 0 +#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT) +#define I40E_IEEE_ETS_CBS_SHIFT 6 +#define I40E_IEEE_ETS_CBS_MASK (0x1 << I40E_IEEE_ETS_CBS_SHIFT) +#define I40E_IEEE_ETS_WILLING_SHIFT 7 +#define I40E_IEEE_ETS_WILLING_MASK (0x1 << I40E_IEEE_ETS_WILLING_SHIFT) +#define I40E_IEEE_ETS_PRIO_0_SHIFT 0 +#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT) +#define I40E_IEEE_ETS_PRIO_1_SHIFT 4 +#define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT) +#define I40E_CEE_PGID_PRIO_0_SHIFT 0 +#define I40E_CEE_PGID_PRIO_0_MASK (0xF << I40E_CEE_PGID_PRIO_0_SHIFT) +#define I40E_CEE_PGID_PRIO_1_SHIFT 4 +#define I40E_CEE_PGID_PRIO_1_MASK (0xF << I40E_CEE_PGID_PRIO_1_SHIFT) +#define I40E_CEE_PGID_STRICT 15 + +/* Defines for IEEE TSA types */ +#define I40E_IEEE_TSA_STRICT 0 +#define I40E_IEEE_TSA_ETS 2 + +/* Defines for IEEE PFC TLV */ +#define I40E_IEEE_PFC_CAP_SHIFT 0 +#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT) +#define I40E_IEEE_PFC_MBC_SHIFT 6 +#define I40E_IEEE_PFC_MBC_MASK (0x1 << I40E_IEEE_PFC_MBC_SHIFT) +#define I40E_IEEE_PFC_WILLING_SHIFT 7 +#define I40E_IEEE_PFC_WILLING_MASK (0x1 << I40E_IEEE_PFC_WILLING_SHIFT) + +/* Defines for IEEE APP TLV */ +#define I40E_IEEE_APP_SEL_SHIFT 0 +#define I40E_IEEE_APP_SEL_MASK (0x7 << I40E_IEEE_APP_SEL_SHIFT) +#define I40E_IEEE_APP_PRIO_SHIFT 5 +#define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT) + + +#pragma pack(1) + +/* IEEE 802.1AB LLDP Organization specific TLV */ +struct i40e_lldp_org_tlv { + __be16 typelength; + __be32 ouisubtype; + u8 tlvinfo[1]; +}; +#pragma pack() + +i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, + u16 *status); +i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib, + struct i40e_dcbx_config *dcbcfg); +i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, + u8 bridgetype, + struct i40e_dcbx_config *dcbcfg); +i40e_status i40e_get_dcb_config(struct i40e_hw *hw); +i40e_status i40e_init_dcb(struct i40e_hw *hw); +#endif /* _I40E_DCB_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c new file mode 100644 index 000000000..bd5079d5c --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -0,0 +1,321 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifdef CONFIG_I40E_DCB +#include "i40e.h" +#include + +/** + * i40e_get_pfc_delay - retrieve PFC Link Delay + * @hw: pointer to hardware struct + * @delay: holds the PFC Link delay value + * + * Returns PFC Link Delay from the PRTDCB_GENC.PFCLDA + **/ +static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay) +{ + u32 val; + + val = rd32(hw, I40E_PRTDCB_GENC); + *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >> + I40E_PRTDCB_GENC_PFCLDA_SHIFT); +} + +/** + * i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration + * @netdev: the corresponding netdev + * @ets: structure to hold the ETS information + * + * Returns local IEEE ETS configuration + **/ +static int i40e_dcbnl_ieee_getets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + struct i40e_dcbx_config *dcbxcfg; + struct i40e_hw *hw = &pf->hw; + + if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + dcbxcfg = &hw->local_dcbx_config; + ets->willing = dcbxcfg->etscfg.willing; + ets->ets_cap = dcbxcfg->etscfg.maxtcs; + ets->cbs = dcbxcfg->etscfg.cbs; + memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable, + sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_rx_bw, dcbxcfg->etscfg.tcbwtable, + sizeof(ets->tc_rx_bw)); + memcpy(ets->tc_tsa, dcbxcfg->etscfg.tsatable, + sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, dcbxcfg->etscfg.prioritytable, + sizeof(ets->prio_tc)); + memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbwtable, + sizeof(ets->tc_reco_bw)); + memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsatable, + sizeof(ets->tc_reco_tsa)); + memcpy(ets->reco_prio_tc, dcbxcfg->etscfg.prioritytable, + sizeof(ets->reco_prio_tc)); + + return 0; +} + +/** + * i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration + * @netdev: the corresponding netdev + * @ets: structure to hold the PFC information + * + * Returns local IEEE PFC configuration + **/ +static int i40e_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + struct i40e_dcbx_config *dcbxcfg; + struct i40e_hw *hw = &pf->hw; + int i; + + if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + dcbxcfg = &hw->local_dcbx_config; + pfc->pfc_cap = dcbxcfg->pfc.pfccap; + pfc->pfc_en = dcbxcfg->pfc.pfcenable; + pfc->mbc = dcbxcfg->pfc.mbc; + i40e_get_pfc_delay(hw, &pfc->delay); + + /* Get Requests/Indicatiosn */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + pfc->requests[i] = pf->stats.priority_xoff_tx[i]; + pfc->indications[i] = pf->stats.priority_xoff_rx[i]; + } + + return 0; +} + +/** + * i40e_dcbnl_getdcbx - retrieve current DCBx capability + * @netdev: the corresponding netdev + * + * Returns DCBx capability features + **/ +static u8 i40e_dcbnl_getdcbx(struct net_device *dev) +{ + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + + return pf->dcbx_cap; +} + +/** + * i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx + * @netdev: the corresponding netdev + * + * Returns the SAN MAC address used for LLDP exchange + **/ +static void i40e_dcbnl_get_perm_hw_addr(struct net_device *dev, + u8 *perm_addr) +{ + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + int i, j; + + memset(perm_addr, 0xff, MAX_ADDR_LEN); + + for (i = 0; i < dev->addr_len; i++) + perm_addr[i] = pf->hw.mac.perm_addr[i]; + + for (j = 0; j < dev->addr_len; j++, i++) + perm_addr[i] = pf->hw.mac.san_addr[j]; +} + +static const struct dcbnl_rtnl_ops dcbnl_ops = { + .ieee_getets = i40e_dcbnl_ieee_getets, + .ieee_getpfc = i40e_dcbnl_ieee_getpfc, + .getdcbx = i40e_dcbnl_getdcbx, + .getpermhwaddr = i40e_dcbnl_get_perm_hw_addr, +}; + +/** + * i40e_dcbnl_set_all - set all the apps and ieee data from DCBx config + * @vsi: the corresponding vsi + * + * Set up all the IEEE APPs in the DCBNL App Table and generate event for + * other settings + **/ +void i40e_dcbnl_set_all(struct i40e_vsi *vsi) +{ + struct net_device *dev = vsi->netdev; + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + struct i40e_dcbx_config *dcbxcfg; + struct i40e_hw *hw = &pf->hw; + struct dcb_app sapp; + u8 prio, tc_map; + int i; + + /* DCB not enabled */ + if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) + return; + + /* MFP mode but not an iSCSI PF so return */ + if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi)) + return; + + dcbxcfg = &hw->local_dcbx_config; + + /* Set up all the App TLVs if DCBx is negotiated */ + for (i = 0; i < dcbxcfg->numapps; i++) { + prio = dcbxcfg->app[i].priority; + tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]); + + /* Add APP only if the TC is enabled for this VSI */ + if (tc_map & vsi->tc_config.enabled_tc) { + sapp.selector = dcbxcfg->app[i].selector; + sapp.protocol = dcbxcfg->app[i].protocolid; + sapp.priority = prio; + dcb_ieee_setapp(dev, &sapp); + } + } + + /* Notify user-space of the changes */ + dcbnl_ieee_notify(dev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0); +} + +/** + * i40e_dcbnl_vsi_del_app - Delete APP for given VSI + * @vsi: the corresponding vsi + * @app: APP to delete + * + * Delete given APP from the DCBNL APP table for given + * VSI + **/ +static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi, + struct i40e_dcb_app_priority_table *app) +{ + struct net_device *dev = vsi->netdev; + struct dcb_app sapp; + + if (!dev) + return -EINVAL; + + sapp.selector = app->selector; + sapp.protocol = app->protocolid; + sapp.priority = app->priority; + return dcb_ieee_delapp(dev, &sapp); +} + +/** + * i40e_dcbnl_del_app - Delete APP on all VSIs + * @pf: the corresponding PF + * @app: APP to delete + * + * Delete given APP from all the VSIs for given PF + **/ +static void i40e_dcbnl_del_app(struct i40e_pf *pf, + struct i40e_dcb_app_priority_table *app) +{ + int v, err; + for (v = 0; v < pf->num_alloc_vsi; v++) { + if (pf->vsi[v] && pf->vsi[v]->netdev) { + err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app); + if (err) + dev_info(&pf->pdev->dev, "%s: Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n", + __func__, pf->vsi[v]->seid, + err, app->selector, + app->protocolid, app->priority); + } + } +} + +/** + * i40e_dcbnl_find_app - Search APP in given DCB config + * @cfg: DCBX configuration data + * @app: APP to search for + * + * Find given APP in the DCB configuration + **/ +static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg, + struct i40e_dcb_app_priority_table *app) +{ + int i; + + for (i = 0; i < cfg->numapps; i++) { + if (app->selector == cfg->app[i].selector && + app->protocolid == cfg->app[i].protocolid && + app->priority == cfg->app[i].priority) + return true; + } + + return false; +} + +/** + * i40e_dcbnl_flush_apps - Delete all removed APPs + * @pf: the corresponding PF + * @old_cfg: old DCBX configuration data + * @new_cfg: new DCBX configuration data + * + * Find and delete all APPs that are not present in the passed + * DCB configuration + **/ +void i40e_dcbnl_flush_apps(struct i40e_pf *pf, + struct i40e_dcbx_config *old_cfg, + struct i40e_dcbx_config *new_cfg) +{ + struct i40e_dcb_app_priority_table app; + int i; + + /* MFP mode but not an iSCSI PF so return */ + if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi)) + return; + + for (i = 0; i < old_cfg->numapps; i++) { + app = old_cfg->app[i]; + /* The APP is not available anymore delete it */ + if (!i40e_dcbnl_find_app(new_cfg, &app)) + i40e_dcbnl_del_app(pf, &app); + } +} + +/** + * i40e_dcbnl_setup - DCBNL setup + * @vsi: the corresponding vsi + * + * Set up DCBNL ops and initial APP TLVs + **/ +void i40e_dcbnl_setup(struct i40e_vsi *vsi) +{ + struct net_device *dev = vsi->netdev; + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + + /* Not DCB capable */ + if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) + return; + + dev->dcbnl_ops = &dcbnl_ops; + + /* Set initial IEEE DCB settings */ + i40e_dcbnl_set_all(vsi); +} +#endif /* CONFIG_I40E_DCB */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c new file mode 100644 index 000000000..da0faf478 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -0,0 +1,2258 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifdef CONFIG_DEBUG_FS + +#include +#include + +#include "i40e.h" + +static struct dentry *i40e_dbg_root; + +/** + * i40e_dbg_find_vsi - searches for the vsi with the given seid + * @pf - the PF structure to search for the vsi + * @seid - seid of the vsi it is searching for + **/ +static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) +{ + int i; + + if (seid < 0) + dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); + else + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i] && (pf->vsi[i]->seid == seid)) + return pf->vsi[i]; + + return NULL; +} + +/** + * i40e_dbg_find_veb - searches for the veb with the given seid + * @pf - the PF structure to search for the veb + * @seid - seid of the veb it is searching for + **/ +static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid) +{ + int i; + + if ((seid < I40E_BASE_VEB_SEID) || + (seid > (I40E_BASE_VEB_SEID + I40E_MAX_VEB))) + dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); + else + for (i = 0; i < I40E_MAX_VEB; i++) + if (pf->veb[i] && pf->veb[i]->seid == seid) + return pf->veb[i]; + return NULL; +} + +/************************************************************** + * dump + * The dump entry in debugfs is for getting a data snapshow of + * the driver's current configuration and runtime details. + * When the filesystem entry is written, a snapshot is taken. + * When the entry is read, the most recent snapshot data is dumped. + **************************************************************/ +static char *i40e_dbg_dump_buf; +static ssize_t i40e_dbg_dump_data_len; +static ssize_t i40e_dbg_dump_buffer_len; + +/** + * i40e_dbg_dump_read - read the dump data + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + int bytes_not_copied; + int len; + + /* is *ppos bigger than the available data? */ + if (*ppos >= i40e_dbg_dump_data_len || !i40e_dbg_dump_buf) + return 0; + + /* be sure to not read beyond the end of available data */ + len = min_t(int, count, (i40e_dbg_dump_data_len - *ppos)); + + bytes_not_copied = copy_to_user(buffer, &i40e_dbg_dump_buf[*ppos], len); + if (bytes_not_copied < 0) + return bytes_not_copied; + + *ppos += len; + return len; +} + +/** + * i40e_dbg_prep_dump_buf + * @pf: the PF we're working with + * @buflen: the desired buffer length + * + * Return positive if success, 0 if failed + **/ +static int i40e_dbg_prep_dump_buf(struct i40e_pf *pf, int buflen) +{ + /* if not already big enough, prep for re alloc */ + if (i40e_dbg_dump_buffer_len && i40e_dbg_dump_buffer_len < buflen) { + kfree(i40e_dbg_dump_buf); + i40e_dbg_dump_buffer_len = 0; + i40e_dbg_dump_buf = NULL; + } + + /* get a new buffer if needed */ + if (!i40e_dbg_dump_buf) { + i40e_dbg_dump_buf = kzalloc(buflen, GFP_KERNEL); + if (i40e_dbg_dump_buf != NULL) + i40e_dbg_dump_buffer_len = buflen; + } + + return i40e_dbg_dump_buffer_len; +} + +/** + * i40e_dbg_dump_write - trigger a datadump snapshot + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + * + * Any write clears the stats + **/ +static ssize_t i40e_dbg_dump_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct i40e_pf *pf = filp->private_data; + bool seid_found = false; + long seid = -1; + int buflen = 0; + int i, ret; + int len; + u8 *p; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + /* decode the SEID given to be dumped */ + ret = kstrtol_from_user(buffer, count, 0, &seid); + + if (ret) { + dev_info(&pf->pdev->dev, "bad seid value\n"); + } else if (seid == 0) { + seid_found = true; + + kfree(i40e_dbg_dump_buf); + i40e_dbg_dump_buffer_len = 0; + i40e_dbg_dump_data_len = 0; + i40e_dbg_dump_buf = NULL; + dev_info(&pf->pdev->dev, "debug buffer freed\n"); + + } else if (seid == pf->pf_seid || seid == 1) { + seid_found = true; + + buflen = sizeof(struct i40e_pf); + buflen += (sizeof(struct i40e_aq_desc) + * (pf->hw.aq.num_arq_entries + pf->hw.aq.num_asq_entries)); + + if (i40e_dbg_prep_dump_buf(pf, buflen)) { + p = i40e_dbg_dump_buf; + + len = sizeof(struct i40e_pf); + memcpy(p, pf, len); + p += len; + + len = (sizeof(struct i40e_aq_desc) + * pf->hw.aq.num_asq_entries); + memcpy(p, pf->hw.aq.asq.desc_buf.va, len); + p += len; + + len = (sizeof(struct i40e_aq_desc) + * pf->hw.aq.num_arq_entries); + memcpy(p, pf->hw.aq.arq.desc_buf.va, len); + p += len; + + i40e_dbg_dump_data_len = buflen; + dev_info(&pf->pdev->dev, + "PF seid %ld dumped %d bytes\n", + seid, (int)i40e_dbg_dump_data_len); + } + } else if (seid >= I40E_BASE_VSI_SEID) { + struct i40e_vsi *vsi = NULL; + struct i40e_mac_filter *f; + int filter_count = 0; + + mutex_lock(&pf->switch_mutex); + vsi = i40e_dbg_find_vsi(pf, seid); + if (!vsi) { + mutex_unlock(&pf->switch_mutex); + goto write_exit; + } + + buflen = sizeof(struct i40e_vsi); + buflen += sizeof(struct i40e_q_vector) * vsi->num_q_vectors; + buflen += sizeof(struct i40e_ring) * 2 * vsi->num_queue_pairs; + buflen += sizeof(struct i40e_tx_buffer) * vsi->num_queue_pairs; + buflen += sizeof(struct i40e_rx_buffer) * vsi->num_queue_pairs; + list_for_each_entry(f, &vsi->mac_filter_list, list) + filter_count++; + buflen += sizeof(struct i40e_mac_filter) * filter_count; + + if (i40e_dbg_prep_dump_buf(pf, buflen)) { + p = i40e_dbg_dump_buf; + seid_found = true; + + len = sizeof(struct i40e_vsi); + memcpy(p, vsi, len); + p += len; + + if (vsi->num_q_vectors) { + len = (sizeof(struct i40e_q_vector) + * vsi->num_q_vectors); + memcpy(p, vsi->q_vectors, len); + p += len; + } + + if (vsi->num_queue_pairs) { + len = (sizeof(struct i40e_ring) * + vsi->num_queue_pairs); + memcpy(p, vsi->tx_rings, len); + p += len; + memcpy(p, vsi->rx_rings, len); + p += len; + } + + if (vsi->tx_rings[0]) { + len = sizeof(struct i40e_tx_buffer); + for (i = 0; i < vsi->num_queue_pairs; i++) { + memcpy(p, vsi->tx_rings[i]->tx_bi, len); + p += len; + } + len = sizeof(struct i40e_rx_buffer); + for (i = 0; i < vsi->num_queue_pairs; i++) { + memcpy(p, vsi->rx_rings[i]->rx_bi, len); + p += len; + } + } + + /* macvlan filter list */ + len = sizeof(struct i40e_mac_filter); + list_for_each_entry(f, &vsi->mac_filter_list, list) { + memcpy(p, f, len); + p += len; + } + + i40e_dbg_dump_data_len = buflen; + dev_info(&pf->pdev->dev, + "VSI seid %ld dumped %d bytes\n", + seid, (int)i40e_dbg_dump_data_len); + } + mutex_unlock(&pf->switch_mutex); + } else if (seid >= I40E_BASE_VEB_SEID) { + struct i40e_veb *veb = NULL; + + mutex_lock(&pf->switch_mutex); + veb = i40e_dbg_find_veb(pf, seid); + if (!veb) { + mutex_unlock(&pf->switch_mutex); + goto write_exit; + } + + buflen = sizeof(struct i40e_veb); + if (i40e_dbg_prep_dump_buf(pf, buflen)) { + seid_found = true; + memcpy(i40e_dbg_dump_buf, veb, buflen); + i40e_dbg_dump_data_len = buflen; + dev_info(&pf->pdev->dev, + "VEB seid %ld dumped %d bytes\n", + seid, (int)i40e_dbg_dump_data_len); + } + mutex_unlock(&pf->switch_mutex); + } + +write_exit: + if (!seid_found) + dev_info(&pf->pdev->dev, "unknown seid %ld\n", seid); + + return count; +} + +static const struct file_operations i40e_dbg_dump_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = i40e_dbg_dump_read, + .write = i40e_dbg_dump_write, +}; + +/************************************************************** + * command + * The command entry in debugfs is for giving the driver commands + * to be executed - these may be for changing the internal switch + * setup, adding or removing filters, or other things. Many of + * these will be useful for some forms of unit testing. + **************************************************************/ +static char i40e_dbg_command_buf[256] = ""; + +/** + * i40e_dbg_command_read - read for command datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct i40e_pf *pf = filp->private_data; + int bytes_not_copied; + int buf_size = 256; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + if (count < buf_size) + return -ENOSPC; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOSPC; + + len = snprintf(buf, buf_size, "%s: %s\n", + pf->vsi[pf->lan_vsi]->netdev->name, + i40e_dbg_command_buf); + + bytes_not_copied = copy_to_user(buffer, buf, len); + kfree(buf); + + if (bytes_not_copied < 0) + return bytes_not_copied; + + *ppos = len; + return len; +} + +/** + * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum + * @pf: the i40e_pf created in command write + * @seid: the seid the user put in + **/ +static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) +{ + struct rtnl_link_stats64 *nstat; + struct i40e_mac_filter *f; + struct i40e_vsi *vsi; + int i; + + vsi = i40e_dbg_find_vsi(pf, seid); + if (!vsi) { + dev_info(&pf->pdev->dev, + "dump %d: seid not found\n", seid); + return; + } + dev_info(&pf->pdev->dev, "vsi seid %d\n", seid); + if (vsi->netdev) + dev_info(&pf->pdev->dev, + " netdev: name = %s\n", + vsi->netdev->name); + if (vsi->active_vlans) + dev_info(&pf->pdev->dev, + " vlgrp: & = %p\n", vsi->active_vlans); + dev_info(&pf->pdev->dev, + " netdev_registered = %i, current_netdev_flags = 0x%04x, state = %li flags = 0x%08lx\n", + vsi->netdev_registered, + vsi->current_netdev_flags, vsi->state, vsi->flags); + if (vsi == pf->vsi[pf->lan_vsi]) + dev_info(&pf->pdev->dev, "MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", + pf->hw.mac.addr, + pf->hw.mac.san_addr, + pf->hw.mac.port_addr); + list_for_each_entry(f, &vsi->mac_filter_list, list) { + dev_info(&pf->pdev->dev, + " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n", + f->macaddr, f->vlan, f->is_netdev, f->is_vf, + f->counter); + } + nstat = i40e_get_vsi_stats_struct(vsi); + dev_info(&pf->pdev->dev, + " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", + (long unsigned int)nstat->rx_packets, + (long unsigned int)nstat->rx_bytes, + (long unsigned int)nstat->rx_errors, + (long unsigned int)nstat->rx_dropped); + dev_info(&pf->pdev->dev, + " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n", + (long unsigned int)nstat->tx_packets, + (long unsigned int)nstat->tx_bytes, + (long unsigned int)nstat->tx_errors, + (long unsigned int)nstat->tx_dropped); + dev_info(&pf->pdev->dev, + " net_stats: multicast = %lu, collisions = %lu\n", + (long unsigned int)nstat->multicast, + (long unsigned int)nstat->collisions); + dev_info(&pf->pdev->dev, + " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n", + (long unsigned int)nstat->rx_length_errors, + (long unsigned int)nstat->rx_over_errors, + (long unsigned int)nstat->rx_crc_errors); + dev_info(&pf->pdev->dev, + " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n", + (long unsigned int)nstat->rx_frame_errors, + (long unsigned int)nstat->rx_fifo_errors, + (long unsigned int)nstat->rx_missed_errors); + dev_info(&pf->pdev->dev, + " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n", + (long unsigned int)nstat->tx_aborted_errors, + (long unsigned int)nstat->tx_carrier_errors, + (long unsigned int)nstat->tx_fifo_errors); + dev_info(&pf->pdev->dev, + " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n", + (long unsigned int)nstat->tx_heartbeat_errors, + (long unsigned int)nstat->tx_window_errors); + dev_info(&pf->pdev->dev, + " net_stats: rx_compressed = %lu, tx_compressed = %lu\n", + (long unsigned int)nstat->rx_compressed, + (long unsigned int)nstat->tx_compressed); + dev_info(&pf->pdev->dev, + " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", + (long unsigned int)vsi->net_stats_offsets.rx_packets, + (long unsigned int)vsi->net_stats_offsets.rx_bytes, + (long unsigned int)vsi->net_stats_offsets.rx_errors, + (long unsigned int)vsi->net_stats_offsets.rx_dropped); + dev_info(&pf->pdev->dev, + " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n", + (long unsigned int)vsi->net_stats_offsets.tx_packets, + (long unsigned int)vsi->net_stats_offsets.tx_bytes, + (long unsigned int)vsi->net_stats_offsets.tx_errors, + (long unsigned int)vsi->net_stats_offsets.tx_dropped); + dev_info(&pf->pdev->dev, + " net_stats_offsets: multicast = %lu, collisions = %lu\n", + (long unsigned int)vsi->net_stats_offsets.multicast, + (long unsigned int)vsi->net_stats_offsets.collisions); + dev_info(&pf->pdev->dev, + " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n", + (long unsigned int)vsi->net_stats_offsets.rx_length_errors, + (long unsigned int)vsi->net_stats_offsets.rx_over_errors, + (long unsigned int)vsi->net_stats_offsets.rx_crc_errors); + dev_info(&pf->pdev->dev, + " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n", + (long unsigned int)vsi->net_stats_offsets.rx_frame_errors, + (long unsigned int)vsi->net_stats_offsets.rx_fifo_errors, + (long unsigned int)vsi->net_stats_offsets.rx_missed_errors); + dev_info(&pf->pdev->dev, + " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n", + (long unsigned int)vsi->net_stats_offsets.tx_aborted_errors, + (long unsigned int)vsi->net_stats_offsets.tx_carrier_errors, + (long unsigned int)vsi->net_stats_offsets.tx_fifo_errors); + dev_info(&pf->pdev->dev, + " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n", + (long unsigned int)vsi->net_stats_offsets.tx_heartbeat_errors, + (long unsigned int)vsi->net_stats_offsets.tx_window_errors); + dev_info(&pf->pdev->dev, + " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n", + (long unsigned int)vsi->net_stats_offsets.rx_compressed, + (long unsigned int)vsi->net_stats_offsets.tx_compressed); + dev_info(&pf->pdev->dev, + " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n", + vsi->tx_restart, vsi->tx_busy, + vsi->rx_buf_failed, vsi->rx_page_failed); + rcu_read_lock(); + for (i = 0; i < vsi->num_queue_pairs; i++) { + struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]); + if (!rx_ring) + continue; + + dev_info(&pf->pdev->dev, + " rx_rings[%i]: desc = %p\n", + i, rx_ring->desc); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n", + i, rx_ring->dev, + rx_ring->netdev, + rx_ring->rx_bi); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", + i, rx_ring->state, + rx_ring->queue_index, + rx_ring->reg_idx); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n", + i, rx_ring->rx_hdr_len, + rx_ring->rx_buf_len, + rx_ring->dtype); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", + i, rx_ring->hsplit, + rx_ring->next_to_use, + rx_ring->next_to_clean, + rx_ring->ring_active); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n", + i, rx_ring->stats.packets, + rx_ring->stats.bytes, + rx_ring->rx_stats.non_eop_descs); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n", + i, + rx_ring->rx_stats.alloc_page_failed, + rx_ring->rx_stats.alloc_buff_failed); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: size = %i, dma = 0x%08lx\n", + i, rx_ring->size, + (long unsigned int)rx_ring->dma); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: vsi = %p, q_vector = %p\n", + i, rx_ring->vsi, + rx_ring->q_vector); + } + for (i = 0; i < vsi->num_queue_pairs; i++) { + struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); + if (!tx_ring) + continue; + + dev_info(&pf->pdev->dev, + " tx_rings[%i]: desc = %p\n", + i, tx_ring->desc); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n", + i, tx_ring->dev, + tx_ring->netdev, + tx_ring->tx_bi); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", + i, tx_ring->state, + tx_ring->queue_index, + tx_ring->reg_idx); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: dtype = %d\n", + i, tx_ring->dtype); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", + i, tx_ring->hsplit, + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->ring_active); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", + i, tx_ring->stats.packets, + tx_ring->stats.bytes, + tx_ring->tx_stats.restart_queue); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n", + i, + tx_ring->tx_stats.tx_busy, + tx_ring->tx_stats.tx_done_old); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: size = %i, dma = 0x%08lx\n", + i, tx_ring->size, + (long unsigned int)tx_ring->dma); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: vsi = %p, q_vector = %p\n", + i, tx_ring->vsi, + tx_ring->q_vector); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: DCB tc = %d\n", + i, tx_ring->dcb_tc); + } + rcu_read_unlock(); + dev_info(&pf->pdev->dev, + " work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n", + vsi->work_limit, vsi->rx_itr_setting, + ITR_IS_DYNAMIC(vsi->rx_itr_setting) ? "dynamic" : "fixed", + vsi->tx_itr_setting, + ITR_IS_DYNAMIC(vsi->tx_itr_setting) ? "dynamic" : "fixed"); + dev_info(&pf->pdev->dev, + " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n", + vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype); + dev_info(&pf->pdev->dev, + " num_q_vectors = %i, base_vector = %i\n", + vsi->num_q_vectors, vsi->base_vector); + dev_info(&pf->pdev->dev, + " seid = %d, id = %d, uplink_seid = %d\n", + vsi->seid, vsi->id, vsi->uplink_seid); + dev_info(&pf->pdev->dev, + " base_queue = %d, num_queue_pairs = %d, num_desc = %d\n", + vsi->base_queue, vsi->num_queue_pairs, vsi->num_desc); + dev_info(&pf->pdev->dev, " type = %i\n", vsi->type); + dev_info(&pf->pdev->dev, + " info: valid_sections = 0x%04x, switch_id = 0x%04x\n", + vsi->info.valid_sections, vsi->info.switch_id); + dev_info(&pf->pdev->dev, + " info: sw_reserved[] = 0x%02x 0x%02x\n", + vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]); + dev_info(&pf->pdev->dev, + " info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n", + vsi->info.sec_flags, vsi->info.sec_reserved); + dev_info(&pf->pdev->dev, + " info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n", + vsi->info.pvid, vsi->info.fcoe_pvid, + vsi->info.port_vlan_flags); + dev_info(&pf->pdev->dev, + " info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n", + vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1], + vsi->info.pvlan_reserved[2]); + dev_info(&pf->pdev->dev, + " info: ingress_table = 0x%08x, egress_table = 0x%08x\n", + vsi->info.ingress_table, vsi->info.egress_table); + dev_info(&pf->pdev->dev, + " info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n", + vsi->info.cas_pv_tag, vsi->info.cas_pv_flags, + vsi->info.cas_pv_reserved); + dev_info(&pf->pdev->dev, + " info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", + vsi->info.queue_mapping[0], vsi->info.queue_mapping[1], + vsi->info.queue_mapping[2], vsi->info.queue_mapping[3], + vsi->info.queue_mapping[4], vsi->info.queue_mapping[5], + vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]); + dev_info(&pf->pdev->dev, + " info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", + vsi->info.queue_mapping[8], vsi->info.queue_mapping[9], + vsi->info.queue_mapping[10], vsi->info.queue_mapping[11], + vsi->info.queue_mapping[12], vsi->info.queue_mapping[13], + vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]); + dev_info(&pf->pdev->dev, + " info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", + vsi->info.tc_mapping[0], vsi->info.tc_mapping[1], + vsi->info.tc_mapping[2], vsi->info.tc_mapping[3], + vsi->info.tc_mapping[4], vsi->info.tc_mapping[5], + vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]); + dev_info(&pf->pdev->dev, + " info: queueing_opt_flags = 0x%02x queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n", + vsi->info.queueing_opt_flags, + vsi->info.queueing_opt_reserved[0], + vsi->info.queueing_opt_reserved[1], + vsi->info.queueing_opt_reserved[2]); + dev_info(&pf->pdev->dev, + " info: up_enable_bits = 0x%02x\n", + vsi->info.up_enable_bits); + dev_info(&pf->pdev->dev, + " info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n", + vsi->info.sched_reserved, vsi->info.outer_up_table); + dev_info(&pf->pdev->dev, + " info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n", + vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1], + vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3], + vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5], + vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]); + dev_info(&pf->pdev->dev, + " info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", + vsi->info.qs_handle[0], vsi->info.qs_handle[1], + vsi->info.qs_handle[2], vsi->info.qs_handle[3], + vsi->info.qs_handle[4], vsi->info.qs_handle[5], + vsi->info.qs_handle[6], vsi->info.qs_handle[7]); + dev_info(&pf->pdev->dev, + " info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n", + vsi->info.stat_counter_idx, vsi->info.sched_id); + dev_info(&pf->pdev->dev, + " info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n", + vsi->info.resp_reserved[0], vsi->info.resp_reserved[1], + vsi->info.resp_reserved[2], vsi->info.resp_reserved[3], + vsi->info.resp_reserved[4], vsi->info.resp_reserved[5], + vsi->info.resp_reserved[6], vsi->info.resp_reserved[7], + vsi->info.resp_reserved[8], vsi->info.resp_reserved[9], + vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]); + if (vsi->back) + dev_info(&pf->pdev->dev, " PF = %p\n", vsi->back); + dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx); + dev_info(&pf->pdev->dev, + " tc_config: numtc = %d, enabled_tc = 0x%x\n", + vsi->tc_config.numtc, vsi->tc_config.enabled_tc); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + dev_info(&pf->pdev->dev, + " tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n", + i, vsi->tc_config.tc_info[i].qoffset, + vsi->tc_config.tc_info[i].qcount, + vsi->tc_config.tc_info[i].netdev_tc); + } + dev_info(&pf->pdev->dev, + " bw: bw_limit = %d, bw_max_quanta = %d\n", + vsi->bw_limit, vsi->bw_max_quanta); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + dev_info(&pf->pdev->dev, + " bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n", + i, vsi->bw_ets_share_credits[i], + vsi->bw_ets_limit_credits[i], + vsi->bw_ets_max_quanta[i]); + } +#ifdef I40E_FCOE + if (vsi->type == I40E_VSI_FCOE) { + dev_info(&pf->pdev->dev, + " fcoe_stats: rx_packets = %llu, rx_dwords = %llu, rx_dropped = %llu\n", + vsi->fcoe_stats.rx_fcoe_packets, + vsi->fcoe_stats.rx_fcoe_dwords, + vsi->fcoe_stats.rx_fcoe_dropped); + dev_info(&pf->pdev->dev, + " fcoe_stats: tx_packets = %llu, tx_dwords = %llu\n", + vsi->fcoe_stats.tx_fcoe_packets, + vsi->fcoe_stats.tx_fcoe_dwords); + dev_info(&pf->pdev->dev, + " fcoe_stats: bad_crc = %llu, last_error = %llu\n", + vsi->fcoe_stats.fcoe_bad_fccrc, + vsi->fcoe_stats.fcoe_last_error); + dev_info(&pf->pdev->dev, " fcoe_stats: ddp_count = %llu\n", + vsi->fcoe_stats.fcoe_ddp_count); + } +#endif +} + +/** + * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum + * @pf: the i40e_pf created in command write + **/ +static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf) +{ + struct i40e_adminq_ring *ring; + struct i40e_hw *hw = &pf->hw; + char hdr[32]; + int i; + + snprintf(hdr, sizeof(hdr), "%s %s: ", + dev_driver_string(&pf->pdev->dev), + dev_name(&pf->pdev->dev)); + + /* first the send (command) ring, then the receive (event) ring */ + dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n"); + ring = &(hw->aq.asq); + for (i = 0; i < ring->count; i++) { + struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i); + dev_info(&pf->pdev->dev, + " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", + i, d->flags, d->opcode, d->datalen, d->retval, + d->cookie_high, d->cookie_low); + print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE, + 16, 1, d->params.raw, 16, 0); + } + + dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n"); + ring = &(hw->aq.arq); + for (i = 0; i < ring->count; i++) { + struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i); + dev_info(&pf->pdev->dev, + " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", + i, d->flags, d->opcode, d->datalen, d->retval, + d->cookie_high, d->cookie_low); + print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE, + 16, 1, d->params.raw, 16, 0); + } +} + +/** + * i40e_dbg_dump_desc - handles dump desc write into command datum + * @cnt: number of arguments that the user supplied + * @vsi_seid: vsi id entered by user + * @ring_id: ring id entered by user + * @desc_n: descriptor number entered by user + * @pf: the i40e_pf created in command write + * @is_rx_ring: true if rx, false if tx + **/ +static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, + struct i40e_pf *pf, bool is_rx_ring) +{ + struct i40e_tx_desc *txd; + union i40e_rx_desc *rxd; + struct i40e_ring *ring; + struct i40e_vsi *vsi; + int i; + + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid); + return; + } + if (ring_id >= vsi->num_queue_pairs || ring_id < 0) { + dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id); + return; + } + if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) { + dev_info(&pf->pdev->dev, + "descriptor rings have not been allocated for vsi %d\n", + vsi_seid); + return; + } + + ring = kmemdup(is_rx_ring + ? vsi->rx_rings[ring_id] : vsi->tx_rings[ring_id], + sizeof(*ring), GFP_KERNEL); + if (!ring) + return; + + if (cnt == 2) { + dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n", + vsi_seid, is_rx_ring ? "rx" : "tx", ring_id); + for (i = 0; i < ring->count; i++) { + if (!is_rx_ring) { + txd = I40E_TX_DESC(ring, i); + dev_info(&pf->pdev->dev, + " d[%03i] = 0x%016llx 0x%016llx\n", + i, txd->buffer_addr, + txd->cmd_type_offset_bsz); + } else if (sizeof(union i40e_rx_desc) == + sizeof(union i40e_16byte_rx_desc)) { + rxd = I40E_RX_DESC(ring, i); + dev_info(&pf->pdev->dev, + " d[%03i] = 0x%016llx 0x%016llx\n", + i, rxd->read.pkt_addr, + rxd->read.hdr_addr); + } else { + rxd = I40E_RX_DESC(ring, i); + dev_info(&pf->pdev->dev, + " d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", + i, rxd->read.pkt_addr, + rxd->read.hdr_addr, + rxd->read.rsvd1, rxd->read.rsvd2); + } + } + } else if (cnt == 3) { + if (desc_n >= ring->count || desc_n < 0) { + dev_info(&pf->pdev->dev, + "descriptor %d not found\n", desc_n); + goto out; + } + if (!is_rx_ring) { + txd = I40E_TX_DESC(ring, desc_n); + dev_info(&pf->pdev->dev, + "vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", + vsi_seid, ring_id, desc_n, + txd->buffer_addr, txd->cmd_type_offset_bsz); + } else if (sizeof(union i40e_rx_desc) == + sizeof(union i40e_16byte_rx_desc)) { + rxd = I40E_RX_DESC(ring, desc_n); + dev_info(&pf->pdev->dev, + "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", + vsi_seid, ring_id, desc_n, + rxd->read.pkt_addr, rxd->read.hdr_addr); + } else { + rxd = I40E_RX_DESC(ring, desc_n); + dev_info(&pf->pdev->dev, + "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", + vsi_seid, ring_id, desc_n, + rxd->read.pkt_addr, rxd->read.hdr_addr, + rxd->read.rsvd1, rxd->read.rsvd2); + } + } else { + dev_info(&pf->pdev->dev, "dump desc rx/tx []\n"); + } + +out: + kfree(ring); +} + +/** + * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum + * @pf: the i40e_pf created in command write + **/ +static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf) +{ + int i; + + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i]) + dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", + i, pf->vsi[i]->seid); +} + +/** + * i40e_dbg_dump_stats - handles dump stats write into command datum + * @pf: the i40e_pf created in command write + * @estats: the eth stats structure to be dumped + **/ +static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf, + struct i40e_eth_stats *estats) +{ + dev_info(&pf->pdev->dev, " ethstats:\n"); + dev_info(&pf->pdev->dev, + " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n", + estats->rx_bytes, estats->rx_unicast, estats->rx_multicast); + dev_info(&pf->pdev->dev, + " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n", + estats->rx_broadcast, estats->rx_discards); + dev_info(&pf->pdev->dev, + " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n", + estats->rx_unknown_protocol, estats->tx_bytes); + dev_info(&pf->pdev->dev, + " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n", + estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast); + dev_info(&pf->pdev->dev, + " tx_discards = \t%lld \ttx_errors = \t\t%lld\n", + estats->tx_discards, estats->tx_errors); +} + +/** + * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb + * @pf: the i40e_pf created in command write + * @seid: the seid the user put in + **/ +static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid) +{ + struct i40e_veb *veb; + + if ((seid < I40E_BASE_VEB_SEID) || + (seid >= (I40E_MAX_VEB + I40E_BASE_VEB_SEID))) { + dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); + return; + } + + veb = i40e_dbg_find_veb(pf, seid); + if (!veb) { + dev_info(&pf->pdev->dev, "can't find veb %d\n", seid); + return; + } + dev_info(&pf->pdev->dev, + "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n", + veb->idx, veb->veb_idx, veb->stats_idx, veb->seid, + veb->uplink_seid, + veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + i40e_dbg_dump_eth_stats(pf, &veb->stats); +} + +/** + * i40e_dbg_dump_veb_all - dumps all known veb's stats + * @pf: the i40e_pf created in command write + **/ +static void i40e_dbg_dump_veb_all(struct i40e_pf *pf) +{ + struct i40e_veb *veb; + int i; + + for (i = 0; i < I40E_MAX_VEB; i++) { + veb = pf->veb[i]; + if (veb) + i40e_dbg_dump_veb_seid(pf, veb->seid); + } +} + +/** + * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR + * @pf: the PF that would be altered + * @flag: flag that needs enabling or disabling + * @enable: Enable/disable FD SD/ATR + **/ +static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable) +{ + if (enable) { + pf->flags |= flag; + } else { + pf->flags &= ~flag; + pf->auto_disable_flags |= flag; + } + dev_info(&pf->pdev->dev, "requesting a PF reset\n"); + i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); +} + +#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4) +/** + * i40e_dbg_command_write - write into command datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t i40e_dbg_command_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct i40e_pf *pf = filp->private_data; + char *cmd_buf, *cmd_buf_tmp; + int bytes_not_copied; + struct i40e_vsi *vsi; + int vsi_seid; + int veb_seid; + int cnt; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return count; + bytes_not_copied = copy_from_user(cmd_buf, buffer, count); + if (bytes_not_copied < 0) { + kfree(cmd_buf); + return bytes_not_copied; + } + if (bytes_not_copied > 0) + count -= bytes_not_copied; + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + if (strncmp(cmd_buf, "add vsi", 7) == 0) { + vsi_seid = -1; + cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid); + if (cnt == 0) { + /* default to PF VSI */ + vsi_seid = pf->vsi[pf->lan_vsi]->seid; + } else if (vsi_seid < 0) { + dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n", + vsi_seid); + goto command_write_done; + } + + /* By default we are in VEPA mode, if this is the first VF/VMDq + * VSI to be added switch to VEB mode. + */ + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset_safe(pf, + BIT_ULL(__I40E_PF_RESET_REQUESTED)); + } + + vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0); + if (vsi) + dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n", + vsi->seid, vsi->uplink_seid); + else + dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf); + + } else if (strncmp(cmd_buf, "del vsi", 7) == 0) { + sscanf(&cmd_buf[7], "%i", &vsi_seid); + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n", + vsi_seid); + goto command_write_done; + } + + dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid); + i40e_vsi_release(vsi); + + } else if (strncmp(cmd_buf, "add relay", 9) == 0) { + struct i40e_veb *veb; + int uplink_seid, i; + + cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid); + if (cnt != 2) { + dev_info(&pf->pdev->dev, + "add relay: bad command string, cnt=%d\n", + cnt); + goto command_write_done; + } else if (uplink_seid < 0) { + dev_info(&pf->pdev->dev, + "add relay %d: bad uplink seid\n", + uplink_seid); + goto command_write_done; + } + + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, + "add relay: VSI %d not found\n", vsi_seid); + goto command_write_done; + } + + for (i = 0; i < I40E_MAX_VEB; i++) + if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) + break; + if (i >= I40E_MAX_VEB && uplink_seid != 0 && + uplink_seid != pf->mac_seid) { + dev_info(&pf->pdev->dev, + "add relay: relay uplink %d not found\n", + uplink_seid); + goto command_write_done; + } + + veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid, + vsi->tc_config.enabled_tc); + if (veb) + dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid); + else + dev_info(&pf->pdev->dev, "add relay failed\n"); + + } else if (strncmp(cmd_buf, "del relay", 9) == 0) { + int i; + cnt = sscanf(&cmd_buf[9], "%i", &veb_seid); + if (cnt != 1) { + dev_info(&pf->pdev->dev, + "del relay: bad command string, cnt=%d\n", + cnt); + goto command_write_done; + } else if (veb_seid < 0) { + dev_info(&pf->pdev->dev, + "del relay %d: bad relay seid\n", veb_seid); + goto command_write_done; + } + + /* find the veb */ + for (i = 0; i < I40E_MAX_VEB; i++) + if (pf->veb[i] && pf->veb[i]->seid == veb_seid) + break; + if (i >= I40E_MAX_VEB) { + dev_info(&pf->pdev->dev, + "del relay: relay %d not found\n", veb_seid); + goto command_write_done; + } + + dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid); + i40e_veb_release(pf->veb[i]); + + } else if (strncmp(cmd_buf, "add macaddr", 11) == 0) { + struct i40e_mac_filter *f; + int vlan = 0; + u8 ma[6]; + int ret; + + cnt = sscanf(&cmd_buf[11], + "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i", + &vsi_seid, + &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5], + &vlan); + if (cnt == 7) { + vlan = 0; + } else if (cnt != 8) { + dev_info(&pf->pdev->dev, + "add macaddr: bad command string, cnt=%d\n", + cnt); + goto command_write_done; + } + + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, + "add macaddr: VSI %d not found\n", vsi_seid); + goto command_write_done; + } + + f = i40e_add_filter(vsi, ma, vlan, false, false); + ret = i40e_sync_vsi_filters(vsi); + if (f && !ret) + dev_info(&pf->pdev->dev, + "add macaddr: %pM vlan=%d added to VSI %d\n", + ma, vlan, vsi_seid); + else + dev_info(&pf->pdev->dev, + "add macaddr: %pM vlan=%d to VSI %d failed, f=%p ret=%d\n", + ma, vlan, vsi_seid, f, ret); + + } else if (strncmp(cmd_buf, "del macaddr", 11) == 0) { + int vlan = 0; + u8 ma[6]; + int ret; + + cnt = sscanf(&cmd_buf[11], + "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i", + &vsi_seid, + &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5], + &vlan); + if (cnt == 7) { + vlan = 0; + } else if (cnt != 8) { + dev_info(&pf->pdev->dev, + "del macaddr: bad command string, cnt=%d\n", + cnt); + goto command_write_done; + } + + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, + "del macaddr: VSI %d not found\n", vsi_seid); + goto command_write_done; + } + + i40e_del_filter(vsi, ma, vlan, false, false); + ret = i40e_sync_vsi_filters(vsi); + if (!ret) + dev_info(&pf->pdev->dev, + "del macaddr: %pM vlan=%d removed from VSI %d\n", + ma, vlan, vsi_seid); + else + dev_info(&pf->pdev->dev, + "del macaddr: %pM vlan=%d from VSI %d failed, ret=%d\n", + ma, vlan, vsi_seid, ret); + + } else if (strncmp(cmd_buf, "add pvid", 8) == 0) { + i40e_status ret; + u16 vid; + unsigned int v; + + cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v); + if (cnt != 2) { + dev_info(&pf->pdev->dev, + "add pvid: bad command string, cnt=%d\n", cnt); + goto command_write_done; + } + + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n", + vsi_seid); + goto command_write_done; + } + + vid = v; + ret = i40e_vsi_add_pvid(vsi, vid); + if (!ret) + dev_info(&pf->pdev->dev, + "add pvid: %d added to VSI %d\n", + vid, vsi_seid); + else + dev_info(&pf->pdev->dev, + "add pvid: %d to VSI %d failed, ret=%d\n", + vid, vsi_seid, ret); + + } else if (strncmp(cmd_buf, "del pvid", 8) == 0) { + + cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); + if (cnt != 1) { + dev_info(&pf->pdev->dev, + "del pvid: bad command string, cnt=%d\n", + cnt); + goto command_write_done; + } + + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, + "del pvid: VSI %d not found\n", vsi_seid); + goto command_write_done; + } + + i40e_vsi_remove_pvid(vsi); + dev_info(&pf->pdev->dev, + "del pvid: removed from VSI %d\n", vsi_seid); + + } else if (strncmp(cmd_buf, "dump", 4) == 0) { + if (strncmp(&cmd_buf[5], "switch", 6) == 0) { + i40e_fetch_switch_configuration(pf, true); + } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) { + cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); + if (cnt > 0) + i40e_dbg_dump_vsi_seid(pf, vsi_seid); + else + i40e_dbg_dump_vsi_no_seid(pf); + } else if (strncmp(&cmd_buf[5], "veb", 3) == 0) { + cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); + if (cnt > 0) + i40e_dbg_dump_veb_seid(pf, vsi_seid); + else + i40e_dbg_dump_veb_all(pf); + } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) { + int ring_id, desc_n; + if (strncmp(&cmd_buf[10], "rx", 2) == 0) { + cnt = sscanf(&cmd_buf[12], "%i %i %i", + &vsi_seid, &ring_id, &desc_n); + i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, + desc_n, pf, true); + } else if (strncmp(&cmd_buf[10], "tx", 2) + == 0) { + cnt = sscanf(&cmd_buf[12], "%i %i %i", + &vsi_seid, &ring_id, &desc_n); + i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, + desc_n, pf, false); + } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) { + i40e_dbg_dump_aq_desc(pf); + } else { + dev_info(&pf->pdev->dev, + "dump desc tx []\n"); + dev_info(&pf->pdev->dev, + "dump desc rx []\n"); + dev_info(&pf->pdev->dev, "dump desc aq\n"); + } + } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) { + dev_info(&pf->pdev->dev, + "core reset count: %d\n", pf->corer_count); + dev_info(&pf->pdev->dev, + "global reset count: %d\n", pf->globr_count); + dev_info(&pf->pdev->dev, + "emp reset count: %d\n", pf->empr_count); + dev_info(&pf->pdev->dev, + "pf reset count: %d\n", pf->pfr_count); + dev_info(&pf->pdev->dev, + "pf tx sluggish count: %d\n", + pf->tx_sluggish_count); + } else if (strncmp(&cmd_buf[5], "port", 4) == 0) { + struct i40e_aqc_query_port_ets_config_resp *bw_data; + struct i40e_dcbx_config *cfg = + &pf->hw.local_dcbx_config; + struct i40e_dcbx_config *r_cfg = + &pf->hw.remote_dcbx_config; + int i, ret; + + bw_data = kzalloc(sizeof( + struct i40e_aqc_query_port_ets_config_resp), + GFP_KERNEL); + if (!bw_data) { + ret = -ENOMEM; + goto command_write_done; + } + + ret = i40e_aq_query_port_ets_config(&pf->hw, + pf->mac_seid, + bw_data, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "Query Port ETS Config AQ command failed =0x%x\n", + pf->hw.aq.asq_last_status); + kfree(bw_data); + bw_data = NULL; + goto command_write_done; + } + dev_info(&pf->pdev->dev, + "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n", + bw_data->tc_valid_bits, + bw_data->tc_strict_priority_bits, + le16_to_cpu(bw_data->tc_bw_max[0]), + le16_to_cpu(bw_data->tc_bw_max[1])); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n", + bw_data->tc_bw_share_credits[i], + le16_to_cpu(bw_data->tc_bw_limits[i])); + } + + kfree(bw_data); + bw_data = NULL; + + dev_info(&pf->pdev->dev, + "port dcbx_mode=%d\n", cfg->dcbx_mode); + dev_info(&pf->pdev->dev, + "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n", + cfg->etscfg.willing, cfg->etscfg.cbs, + cfg->etscfg.maxtcs); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n", + i, cfg->etscfg.prioritytable[i], + cfg->etscfg.tcbwtable[i], + cfg->etscfg.tsatable[i]); + } + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n", + i, cfg->etsrec.prioritytable[i], + cfg->etsrec.tcbwtable[i], + cfg->etsrec.tsatable[i]); + } + dev_info(&pf->pdev->dev, + "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n", + cfg->pfc.willing, cfg->pfc.mbc, + cfg->pfc.pfccap, cfg->pfc.pfcenable); + dev_info(&pf->pdev->dev, + "port app_table: num_apps=%d\n", cfg->numapps); + for (i = 0; i < cfg->numapps; i++) { + dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n", + i, cfg->app[i].priority, + cfg->app[i].selector, + cfg->app[i].protocolid); + } + /* Peer TLV DCBX data */ + dev_info(&pf->pdev->dev, + "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n", + r_cfg->etscfg.willing, + r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n", + i, r_cfg->etscfg.prioritytable[i], + r_cfg->etscfg.tcbwtable[i], + r_cfg->etscfg.tsatable[i]); + } + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n", + i, r_cfg->etsrec.prioritytable[i], + r_cfg->etsrec.tcbwtable[i], + r_cfg->etsrec.tsatable[i]); + } + dev_info(&pf->pdev->dev, + "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n", + r_cfg->pfc.willing, + r_cfg->pfc.mbc, + r_cfg->pfc.pfccap, + r_cfg->pfc.pfcenable); + dev_info(&pf->pdev->dev, + "remote port app_table: num_apps=%d\n", + r_cfg->numapps); + for (i = 0; i < r_cfg->numapps; i++) { + dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n", + i, r_cfg->app[i].priority, + r_cfg->app[i].selector, + r_cfg->app[i].protocolid); + } + } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) { + int cluster_id, table_id; + int index, ret; + u16 buff_len = 4096; + u32 next_index; + u8 next_table; + u8 *buff; + u16 rlen; + + cnt = sscanf(&cmd_buf[18], "%i %i %i", + &cluster_id, &table_id, &index); + if (cnt != 3) { + dev_info(&pf->pdev->dev, + "dump debug fwdata \n"); + goto command_write_done; + } + + dev_info(&pf->pdev->dev, + "AQ debug dump fwdata params %x %x %x %x\n", + cluster_id, table_id, index, buff_len); + buff = kzalloc(buff_len, GFP_KERNEL); + if (!buff) + goto command_write_done; + + ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id, + index, buff_len, buff, &rlen, + &next_table, &next_index, + NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "debug dump fwdata AQ Failed %d 0x%x\n", + ret, pf->hw.aq.asq_last_status); + kfree(buff); + buff = NULL; + goto command_write_done; + } + dev_info(&pf->pdev->dev, + "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n", + rlen, next_table, next_index); + print_hex_dump(KERN_INFO, "AQ buffer WB: ", + DUMP_PREFIX_OFFSET, 16, 1, + buff, rlen, true); + kfree(buff); + buff = NULL; + } else { + dev_info(&pf->pdev->dev, + "dump desc tx [], dump desc rx [],\n"); + dev_info(&pf->pdev->dev, "dump switch\n"); + dev_info(&pf->pdev->dev, "dump vsi [seid]\n"); + dev_info(&pf->pdev->dev, "dump reset stats\n"); + dev_info(&pf->pdev->dev, "dump port\n"); + dev_info(&pf->pdev->dev, + "dump debug fwdata \n"); + } + + } else if (strncmp(cmd_buf, "msg_enable", 10) == 0) { + u32 level; + cnt = sscanf(&cmd_buf[10], "%i", &level); + if (cnt) { + if (I40E_DEBUG_USER & level) { + pf->hw.debug_mask = level; + dev_info(&pf->pdev->dev, + "set hw.debug_mask = 0x%08x\n", + pf->hw.debug_mask); + } + pf->msg_enable = level; + dev_info(&pf->pdev->dev, "set msg_enable = 0x%08x\n", + pf->msg_enable); + } else { + dev_info(&pf->pdev->dev, "msg_enable = 0x%08x\n", + pf->msg_enable); + } + } else if (strncmp(cmd_buf, "pfr", 3) == 0) { + dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n"); + i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); + + } else if (strncmp(cmd_buf, "corer", 5) == 0) { + dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n"); + i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED)); + + } else if (strncmp(cmd_buf, "globr", 5) == 0) { + dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n"); + i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED)); + + } else if (strncmp(cmd_buf, "empr", 4) == 0) { + dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n"); + i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED)); + + } else if (strncmp(cmd_buf, "read", 4) == 0) { + u32 address; + u32 value; + cnt = sscanf(&cmd_buf[4], "%i", &address); + if (cnt != 1) { + dev_info(&pf->pdev->dev, "read \n"); + goto command_write_done; + } + + /* check the range on address */ + if (address >= I40E_MAX_REGISTER) { + dev_info(&pf->pdev->dev, "read reg address 0x%08x too large\n", + address); + goto command_write_done; + } + + value = rd32(&pf->hw, address); + dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n", + address, value); + + } else if (strncmp(cmd_buf, "write", 5) == 0) { + u32 address, value; + cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value); + if (cnt != 2) { + dev_info(&pf->pdev->dev, "write \n"); + goto command_write_done; + } + + /* check the range on address */ + if (address >= I40E_MAX_REGISTER) { + dev_info(&pf->pdev->dev, "write reg address 0x%08x too large\n", + address); + goto command_write_done; + } + wr32(&pf->hw, address, value); + value = rd32(&pf->hw, address); + dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n", + address, value); + } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) { + if (strncmp(&cmd_buf[12], "vsi", 3) == 0) { + cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid); + if (cnt == 0) { + int i; + for (i = 0; i < pf->num_alloc_vsi; i++) + i40e_vsi_reset_stats(pf->vsi[i]); + dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n"); + } else if (cnt == 1) { + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, + "clear_stats vsi: bad vsi %d\n", + vsi_seid); + goto command_write_done; + } + i40e_vsi_reset_stats(vsi); + dev_info(&pf->pdev->dev, + "vsi clear stats called for vsi %d\n", + vsi_seid); + } else { + dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n"); + } + } else if (strncmp(&cmd_buf[12], "port", 4) == 0) { + if (pf->hw.partition_id == 1) { + i40e_pf_reset_stats(pf); + dev_info(&pf->pdev->dev, "port stats cleared\n"); + } else { + dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n"); + } + } else { + dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n"); + } + } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) { + struct i40e_aq_desc *desc; + i40e_status ret; + + desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); + if (!desc) + goto command_write_done; + cnt = sscanf(&cmd_buf[11], + "%hi %hi %hi %hi %i %i %i %i %i %i", + &desc->flags, + &desc->opcode, &desc->datalen, &desc->retval, + &desc->cookie_high, &desc->cookie_low, + &desc->params.internal.param0, + &desc->params.internal.param1, + &desc->params.internal.param2, + &desc->params.internal.param3); + if (cnt != 10) { + dev_info(&pf->pdev->dev, + "send aq_cmd: bad command string, cnt=%d\n", + cnt); + kfree(desc); + desc = NULL; + goto command_write_done; + } + ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL); + if (!ret) { + dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); + } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) { + dev_info(&pf->pdev->dev, + "AQ command send failed Opcode %x AQ Error: %d\n", + desc->opcode, pf->hw.aq.asq_last_status); + } else { + dev_info(&pf->pdev->dev, + "AQ command send failed Opcode %x Status: %d\n", + desc->opcode, ret); + } + dev_info(&pf->pdev->dev, + "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + desc->flags, desc->opcode, desc->datalen, desc->retval, + desc->cookie_high, desc->cookie_low, + desc->params.internal.param0, + desc->params.internal.param1, + desc->params.internal.param2, + desc->params.internal.param3); + kfree(desc); + desc = NULL; + } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) { + struct i40e_aq_desc *desc; + i40e_status ret; + u16 buffer_len; + u8 *buff; + + desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); + if (!desc) + goto command_write_done; + cnt = sscanf(&cmd_buf[20], + "%hi %hi %hi %hi %i %i %i %i %i %i %hi", + &desc->flags, + &desc->opcode, &desc->datalen, &desc->retval, + &desc->cookie_high, &desc->cookie_low, + &desc->params.internal.param0, + &desc->params.internal.param1, + &desc->params.internal.param2, + &desc->params.internal.param3, + &buffer_len); + if (cnt != 11) { + dev_info(&pf->pdev->dev, + "send indirect aq_cmd: bad command string, cnt=%d\n", + cnt); + kfree(desc); + desc = NULL; + goto command_write_done; + } + /* Just stub a buffer big enough in case user messed up */ + if (buffer_len == 0) + buffer_len = 1280; + + buff = kzalloc(buffer_len, GFP_KERNEL); + if (!buff) { + kfree(desc); + desc = NULL; + goto command_write_done; + } + desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + ret = i40e_asq_send_command(&pf->hw, desc, buff, + buffer_len, NULL); + if (!ret) { + dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); + } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) { + dev_info(&pf->pdev->dev, + "AQ command send failed Opcode %x AQ Error: %d\n", + desc->opcode, pf->hw.aq.asq_last_status); + } else { + dev_info(&pf->pdev->dev, + "AQ command send failed Opcode %x Status: %d\n", + desc->opcode, ret); + } + dev_info(&pf->pdev->dev, + "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + desc->flags, desc->opcode, desc->datalen, desc->retval, + desc->cookie_high, desc->cookie_low, + desc->params.internal.param0, + desc->params.internal.param1, + desc->params.internal.param2, + desc->params.internal.param3); + print_hex_dump(KERN_INFO, "AQ buffer WB: ", + DUMP_PREFIX_OFFSET, 16, 1, + buff, buffer_len, true); + kfree(buff); + buff = NULL; + kfree(desc); + desc = NULL; + } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) || + (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) { + struct i40e_fdir_filter fd_data; + u16 packet_len, i, j = 0; + char *asc_packet; + u8 *raw_packet; + bool add = false; + int ret; + + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + goto command_write_done; + + if (strncmp(cmd_buf, "add", 3) == 0) + add = true; + + if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) + goto command_write_done; + + asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, + GFP_KERNEL); + if (!asc_packet) + goto command_write_done; + + raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, + GFP_KERNEL); + + if (!raw_packet) { + kfree(asc_packet); + asc_packet = NULL; + goto command_write_done; + } + + cnt = sscanf(&cmd_buf[13], + "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s", + &fd_data.q_index, + &fd_data.flex_off, &fd_data.pctype, + &fd_data.dest_vsi, &fd_data.dest_ctl, + &fd_data.fd_status, &fd_data.cnt_index, + &fd_data.fd_id, &packet_len, asc_packet); + if (cnt != 10) { + dev_info(&pf->pdev->dev, + "program fd_filter: bad command string, cnt=%d\n", + cnt); + kfree(asc_packet); + asc_packet = NULL; + kfree(raw_packet); + goto command_write_done; + } + + /* fix packet length if user entered 0 */ + if (packet_len == 0) + packet_len = I40E_FDIR_MAX_RAW_PACKET_SIZE; + + /* make sure to check the max as well */ + packet_len = min_t(u16, + packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE); + + for (i = 0; i < packet_len; i++) { + sscanf(&asc_packet[j], "%2hhx ", + &raw_packet[i]); + j += 3; + } + dev_info(&pf->pdev->dev, "FD raw packet dump\n"); + print_hex_dump(KERN_INFO, "FD raw packet: ", + DUMP_PREFIX_OFFSET, 16, 1, + raw_packet, packet_len, true); + ret = i40e_program_fdir_filter(&fd_data, raw_packet, pf, add); + if (!ret) { + dev_info(&pf->pdev->dev, "Filter command send Status : Success\n"); + } else { + dev_info(&pf->pdev->dev, + "Filter command send failed %d\n", ret); + } + kfree(raw_packet); + raw_packet = NULL; + kfree(asc_packet); + asc_packet = NULL; + } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) { + i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false); + } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) { + i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true); + } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) { + dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n", + i40e_get_current_fd_count(pf)); + } else if (strncmp(cmd_buf, "lldp", 4) == 0) { + if (strncmp(&cmd_buf[5], "stop", 4) == 0) { + int ret; + ret = i40e_aq_stop_lldp(&pf->hw, false, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "Stop LLDP AQ command failed =0x%x\n", + pf->hw.aq.asq_last_status); + goto command_write_done; + } + ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, + pf->hw.mac.addr, + I40E_ETH_P_LLDP, 0, + pf->vsi[pf->lan_vsi]->seid, + 0, true, NULL, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "%s: Add Control Packet Filter AQ command failed =0x%x\n", + __func__, pf->hw.aq.asq_last_status); + goto command_write_done; + } +#ifdef CONFIG_I40E_DCB + pf->dcbx_cap = DCB_CAP_DCBX_HOST | + DCB_CAP_DCBX_VER_IEEE; +#endif /* CONFIG_I40E_DCB */ + } else if (strncmp(&cmd_buf[5], "start", 5) == 0) { + int ret; + ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, + pf->hw.mac.addr, + I40E_ETH_P_LLDP, 0, + pf->vsi[pf->lan_vsi]->seid, + 0, false, NULL, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "%s: Remove Control Packet Filter AQ command failed =0x%x\n", + __func__, pf->hw.aq.asq_last_status); + /* Continue and start FW LLDP anyways */ + } + + ret = i40e_aq_start_lldp(&pf->hw, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "Start LLDP AQ command failed =0x%x\n", + pf->hw.aq.asq_last_status); + goto command_write_done; + } +#ifdef CONFIG_I40E_DCB + pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | + DCB_CAP_DCBX_VER_IEEE; +#endif /* CONFIG_I40E_DCB */ + } else if (strncmp(&cmd_buf[5], + "get local", 9) == 0) { + u16 llen, rlen; + int ret; + u8 *buff; + buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); + if (!buff) + goto command_write_done; + + ret = i40e_aq_get_lldp_mib(&pf->hw, 0, + I40E_AQ_LLDP_MIB_LOCAL, + buff, I40E_LLDPDU_SIZE, + &llen, &rlen, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "Get LLDP MIB (local) AQ command failed =0x%x\n", + pf->hw.aq.asq_last_status); + kfree(buff); + buff = NULL; + goto command_write_done; + } + dev_info(&pf->pdev->dev, "LLDP MIB (local)\n"); + print_hex_dump(KERN_INFO, "LLDP MIB (local): ", + DUMP_PREFIX_OFFSET, 16, 1, + buff, I40E_LLDPDU_SIZE, true); + kfree(buff); + buff = NULL; + } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) { + u16 llen, rlen; + int ret; + u8 *buff; + buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); + if (!buff) + goto command_write_done; + + ret = i40e_aq_get_lldp_mib(&pf->hw, + I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, + I40E_AQ_LLDP_MIB_REMOTE, + buff, I40E_LLDPDU_SIZE, + &llen, &rlen, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "Get LLDP MIB (remote) AQ command failed =0x%x\n", + pf->hw.aq.asq_last_status); + kfree(buff); + buff = NULL; + goto command_write_done; + } + dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n"); + print_hex_dump(KERN_INFO, "LLDP MIB (remote): ", + DUMP_PREFIX_OFFSET, 16, 1, + buff, I40E_LLDPDU_SIZE, true); + kfree(buff); + buff = NULL; + } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) { + int ret; + ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw, + true, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n", + pf->hw.aq.asq_last_status); + goto command_write_done; + } + } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) { + int ret; + ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw, + false, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n", + pf->hw.aq.asq_last_status); + goto command_write_done; + } + } + } else if (strncmp(cmd_buf, "nvm read", 8) == 0) { + u16 buffer_len, bytes; + u16 module; + u32 offset; + u16 *buff; + int ret; + + cnt = sscanf(&cmd_buf[8], "%hx %x %hx", + &module, &offset, &buffer_len); + if (cnt == 0) { + module = 0; + offset = 0; + buffer_len = 0; + } else if (cnt == 1) { + offset = 0; + buffer_len = 0; + } else if (cnt == 2) { + buffer_len = 0; + } else if (cnt > 3) { + dev_info(&pf->pdev->dev, + "nvm read: bad command string, cnt=%d\n", cnt); + goto command_write_done; + } + + /* set the max length */ + buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2); + + bytes = 2 * buffer_len; + + /* read at least 1k bytes, no more than 4kB */ + bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE); + buff = kzalloc(bytes, GFP_KERNEL); + if (!buff) + goto command_write_done; + + ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); + if (ret) { + dev_info(&pf->pdev->dev, + "Failed Acquiring NVM resource for read err=%d status=0x%x\n", + ret, pf->hw.aq.asq_last_status); + kfree(buff); + goto command_write_done; + } + + ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset), + bytes, (u8 *)buff, true, NULL); + i40e_release_nvm(&pf->hw); + if (ret) { + dev_info(&pf->pdev->dev, + "Read NVM AQ failed err=%d status=0x%x\n", + ret, pf->hw.aq.asq_last_status); + } else { + dev_info(&pf->pdev->dev, + "Read NVM module=0x%x offset=0x%x words=%d\n", + module, offset, buffer_len); + if (bytes) + print_hex_dump(KERN_INFO, "NVM Dump: ", + DUMP_PREFIX_OFFSET, 16, 2, + buff, bytes, true); + } + kfree(buff); + buff = NULL; + } else { + dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); + dev_info(&pf->pdev->dev, "available commands\n"); + dev_info(&pf->pdev->dev, " add vsi [relay_seid]\n"); + dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n"); + dev_info(&pf->pdev->dev, " add relay \n"); + dev_info(&pf->pdev->dev, " del relay \n"); + dev_info(&pf->pdev->dev, " add macaddr [vlan]\n"); + dev_info(&pf->pdev->dev, " del macaddr [vlan]\n"); + dev_info(&pf->pdev->dev, " add pvid \n"); + dev_info(&pf->pdev->dev, " del pvid \n"); + dev_info(&pf->pdev->dev, " dump switch\n"); + dev_info(&pf->pdev->dev, " dump vsi [seid]\n"); + dev_info(&pf->pdev->dev, " dump desc tx []\n"); + dev_info(&pf->pdev->dev, " dump desc rx []\n"); + dev_info(&pf->pdev->dev, " dump desc aq\n"); + dev_info(&pf->pdev->dev, " dump reset stats\n"); + dev_info(&pf->pdev->dev, " dump debug fwdata \n"); + dev_info(&pf->pdev->dev, " msg_enable [level]\n"); + dev_info(&pf->pdev->dev, " read \n"); + dev_info(&pf->pdev->dev, " write \n"); + dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n"); + dev_info(&pf->pdev->dev, " clear_stats port\n"); + dev_info(&pf->pdev->dev, " pfr\n"); + dev_info(&pf->pdev->dev, " corer\n"); + dev_info(&pf->pdev->dev, " globr\n"); + dev_info(&pf->pdev->dev, " send aq_cmd \n"); + dev_info(&pf->pdev->dev, " send indirect aq_cmd \n"); + dev_info(&pf->pdev->dev, " add fd_filter \n"); + dev_info(&pf->pdev->dev, " rem fd_filter \n"); + dev_info(&pf->pdev->dev, " fd-atr off\n"); + dev_info(&pf->pdev->dev, " fd-atr on\n"); + dev_info(&pf->pdev->dev, " fd current cnt"); + dev_info(&pf->pdev->dev, " lldp start\n"); + dev_info(&pf->pdev->dev, " lldp stop\n"); + dev_info(&pf->pdev->dev, " lldp get local\n"); + dev_info(&pf->pdev->dev, " lldp get remote\n"); + dev_info(&pf->pdev->dev, " lldp event on\n"); + dev_info(&pf->pdev->dev, " lldp event off\n"); + dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n"); + } + +command_write_done: + kfree(cmd_buf); + cmd_buf = NULL; + return count; +} + +static const struct file_operations i40e_dbg_command_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = i40e_dbg_command_read, + .write = i40e_dbg_command_write, +}; + +/************************************************************** + * netdev_ops + * The netdev_ops entry in debugfs is for giving the driver commands + * to be executed from the netdev operations. + **************************************************************/ +static char i40e_dbg_netdev_ops_buf[256] = ""; + +/** + * i40e_dbg_netdev_ops - read for netdev_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct i40e_pf *pf = filp->private_data; + int bytes_not_copied; + int buf_size = 256; + char *buf; + int len; + + /* don't allow partal reads */ + if (*ppos != 0) + return 0; + if (count < buf_size) + return -ENOSPC; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) + return -ENOSPC; + + len = snprintf(buf, buf_size, "%s: %s\n", + pf->vsi[pf->lan_vsi]->netdev->name, + i40e_dbg_netdev_ops_buf); + + bytes_not_copied = copy_to_user(buffer, buf, len); + kfree(buf); + + if (bytes_not_copied < 0) + return bytes_not_copied; + + *ppos = len; + return len; +} + +/** + * i40e_dbg_netdev_ops_write - write into netdev_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct i40e_pf *pf = filp->private_data; + int bytes_not_copied; + struct i40e_vsi *vsi; + char *buf_tmp; + int vsi_seid; + int i, cnt; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(i40e_dbg_netdev_ops_buf)) + return -ENOSPC; + + memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf)); + bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf, + buffer, count); + if (bytes_not_copied < 0) + return bytes_not_copied; + else if (bytes_not_copied > 0) + count -= bytes_not_copied; + i40e_dbg_netdev_ops_buf[count] = '\0'; + + buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n'); + if (buf_tmp) { + *buf_tmp = '\0'; + count = buf_tmp - i40e_dbg_netdev_ops_buf + 1; + } + + if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { + cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid); + if (cnt != 1) { + dev_info(&pf->pdev->dev, "tx_timeout \n"); + goto netdev_ops_write_done; + } + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, + "tx_timeout: VSI %d not found\n", vsi_seid); + } else if (!vsi->netdev) { + dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n", + vsi_seid); + } else if (test_bit(__I40E_DOWN, &vsi->state)) { + dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n", + vsi_seid); + } else if (rtnl_trylock()) { + vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev); + rtnl_unlock(); + dev_info(&pf->pdev->dev, "tx_timeout called\n"); + } else { + dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); + } + } else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) { + int mtu; + cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i", + &vsi_seid, &mtu); + if (cnt != 2) { + dev_info(&pf->pdev->dev, "change_mtu \n"); + goto netdev_ops_write_done; + } + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, + "change_mtu: VSI %d not found\n", vsi_seid); + } else if (!vsi->netdev) { + dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n", + vsi_seid); + } else if (rtnl_trylock()) { + vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev, + mtu); + rtnl_unlock(); + dev_info(&pf->pdev->dev, "change_mtu called\n"); + } else { + dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); + } + + } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) { + cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid); + if (cnt != 1) { + dev_info(&pf->pdev->dev, "set_rx_mode \n"); + goto netdev_ops_write_done; + } + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, + "set_rx_mode: VSI %d not found\n", vsi_seid); + } else if (!vsi->netdev) { + dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n", + vsi_seid); + } else if (rtnl_trylock()) { + vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev); + rtnl_unlock(); + dev_info(&pf->pdev->dev, "set_rx_mode called\n"); + } else { + dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); + } + + } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) { + cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid); + if (cnt != 1) { + dev_info(&pf->pdev->dev, "napi \n"); + goto netdev_ops_write_done; + } + vsi = i40e_dbg_find_vsi(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, "napi: VSI %d not found\n", + vsi_seid); + } else if (!vsi->netdev) { + dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n", + vsi_seid); + } else { + for (i = 0; i < vsi->num_q_vectors; i++) + napi_schedule(&vsi->q_vectors[i]->napi); + dev_info(&pf->pdev->dev, "napi called\n"); + } + } else { + dev_info(&pf->pdev->dev, "unknown command '%s'\n", + i40e_dbg_netdev_ops_buf); + dev_info(&pf->pdev->dev, "available commands\n"); + dev_info(&pf->pdev->dev, " tx_timeout \n"); + dev_info(&pf->pdev->dev, " change_mtu \n"); + dev_info(&pf->pdev->dev, " set_rx_mode \n"); + dev_info(&pf->pdev->dev, " napi \n"); + } +netdev_ops_write_done: + return count; +} + +static const struct file_operations i40e_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = i40e_dbg_netdev_ops_read, + .write = i40e_dbg_netdev_ops_write, +}; + +/** + * i40e_dbg_pf_init - setup the debugfs directory for the PF + * @pf: the PF that is starting up + **/ +void i40e_dbg_pf_init(struct i40e_pf *pf) +{ + struct dentry *pfile; + const char *name = pci_name(pf->pdev); + const struct device *dev = &pf->pdev->dev; + + pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root); + if (!pf->i40e_dbg_pf) + return; + + pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf, + &i40e_dbg_command_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf, + &i40e_dbg_dump_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, + &i40e_dbg_netdev_ops_fops); + if (!pfile) + goto create_failed; + + return; + +create_failed: + dev_info(dev, "debugfs dir/file for %s failed\n", name); + debugfs_remove_recursive(pf->i40e_dbg_pf); + return; +} + +/** + * i40e_dbg_pf_exit - clear out the PF's debugfs entries + * @pf: the PF that is stopping + **/ +void i40e_dbg_pf_exit(struct i40e_pf *pf) +{ + debugfs_remove_recursive(pf->i40e_dbg_pf); + pf->i40e_dbg_pf = NULL; + + kfree(i40e_dbg_dump_buf); + i40e_dbg_dump_buf = NULL; +} + +/** + * i40e_dbg_init - start up debugfs for the driver + **/ +void i40e_dbg_init(void) +{ + i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL); + if (!i40e_dbg_root) + pr_info("init of debugfs failed\n"); +} + +/** + * i40e_dbg_exit - clean out the driver's debugfs entries + **/ +void i40e_dbg_exit(void) +{ + debugfs_remove_recursive(i40e_dbg_root); + i40e_dbg_root = NULL; +} + +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c new file mode 100644 index 000000000..56438bd57 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c @@ -0,0 +1,154 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e_diag.h" +#include "i40e_prototype.h" + +/** + * i40e_diag_reg_pattern_test + * @hw: pointer to the hw struct + * @reg: reg to be tested + * @mask: bits to be touched + **/ +static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw, + u32 reg, u32 mask) +{ + const u32 patterns[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + u32 pat, val, orig_val; + int i; + + orig_val = rd32(hw, reg); + for (i = 0; i < ARRAY_SIZE(patterns); i++) { + pat = patterns[i]; + wr32(hw, reg, (pat & mask)); + val = rd32(hw, reg); + if ((val & mask) != (pat & mask)) { + i40e_debug(hw, I40E_DEBUG_DIAG, + "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n", + __func__, reg, pat, val); + return I40E_ERR_DIAG_TEST_FAILED; + } + } + + wr32(hw, reg, orig_val); + val = rd32(hw, reg); + if (val != orig_val) { + i40e_debug(hw, I40E_DEBUG_DIAG, + "%s: reg restore test failed - reg 0x%08x orig_val 0x%08x val 0x%08x\n", + __func__, reg, orig_val, val); + return I40E_ERR_DIAG_TEST_FAILED; + } + + return 0; +} + +struct i40e_diag_reg_test_info i40e_reg_list[] = { + /* offset mask elements stride */ + {I40E_QTX_CTL(0), 0x0000FFBF, 1, + I40E_QTX_CTL(1) - I40E_QTX_CTL(0)}, + {I40E_PFINT_ITR0(0), 0x00000FFF, 3, + I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)}, + {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1, + I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)}, + {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1, + I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)}, + {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1, + I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)}, + {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0}, + {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0}, + {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1, + I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)}, + {I40E_QINT_TQCTL(0), 0x000000FF, 1, + I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)}, + {I40E_QINT_RQCTL(0), 0x000000FF, 1, + I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)}, + {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0}, + { 0 } +}; + +/** + * i40e_diag_reg_test + * @hw: pointer to the hw struct + * + * Perform registers diagnostic test + **/ +i40e_status i40e_diag_reg_test(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + u32 reg, mask; + u32 i, j; + + for (i = 0; i40e_reg_list[i].offset != 0 && + !ret_code; i++) { + + /* set actual reg range for dynamically allocated resources */ + if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) && + hw->func_caps.num_tx_qp != 0) + i40e_reg_list[i].elements = hw->func_caps.num_tx_qp; + if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) || + i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) || + i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) || + i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) || + i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) && + hw->func_caps.num_msix_vectors != 0) + i40e_reg_list[i].elements = + hw->func_caps.num_msix_vectors - 1; + + /* test register access */ + mask = i40e_reg_list[i].mask; + for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) { + reg = i40e_reg_list[i].offset + + (j * i40e_reg_list[i].stride); + ret_code = i40e_diag_reg_pattern_test(hw, reg, mask); + } + } + + return ret_code; +} + +/** + * i40e_diag_eeprom_test + * @hw: pointer to the hw struct + * + * Perform EEPROM diagnostic test + **/ +i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw) +{ + i40e_status ret_code; + u16 reg_val; + + /* read NVM control word and if NVM valid, validate EEPROM checksum*/ + ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, ®_val); + if (!ret_code && + ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) == + (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) { + ret_code = i40e_validate_nvm_checksum(hw, NULL); + } else { + ret_code = I40E_ERR_DIAG_TEST_FAILED; + } + + return ret_code; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h new file mode 100644 index 000000000..0b5911652 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h @@ -0,0 +1,51 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_DIAG_H_ +#define _I40E_DIAG_H_ + +#include "i40e_type.h" + +enum i40e_lb_mode { + I40E_LB_MODE_NONE = 0x0, + I40E_LB_MODE_PHY_LOCAL = I40E_AQ_LB_PHY_LOCAL, + I40E_LB_MODE_PHY_REMOTE = I40E_AQ_LB_PHY_REMOTE, + I40E_LB_MODE_MAC_LOCAL = I40E_AQ_LB_MAC_LOCAL, +}; + +struct i40e_diag_reg_test_info { + u32 offset; /* the base register */ + u32 mask; /* bits that can be tested */ + u32 elements; /* number of elements if array */ + u32 stride; /* bytes between each element */ +}; + +extern struct i40e_diag_reg_test_info i40e_reg_list[]; + +i40e_status i40e_diag_reg_test(struct i40e_hw *hw); +i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw); + +#endif /* _I40E_DIAG_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c new file mode 100644 index 000000000..4cbaaeb90 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -0,0 +1,2587 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +/* ethtool support for i40e */ + +#include "i40e.h" +#include "i40e_diag.h" + +struct i40e_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define I40E_STAT(_type, _name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ + .stat_offset = offsetof(_type, _stat) \ +} + +#define I40E_NETDEV_STAT(_net_stat) \ + I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) +#define I40E_PF_STAT(_name, _stat) \ + I40E_STAT(struct i40e_pf, _name, _stat) +#define I40E_VSI_STAT(_name, _stat) \ + I40E_STAT(struct i40e_vsi, _name, _stat) +#define I40E_VEB_STAT(_name, _stat) \ + I40E_STAT(struct i40e_veb, _name, _stat) + +static const struct i40e_stats i40e_gstrings_net_stats[] = { + I40E_NETDEV_STAT(rx_packets), + I40E_NETDEV_STAT(tx_packets), + I40E_NETDEV_STAT(rx_bytes), + I40E_NETDEV_STAT(tx_bytes), + I40E_NETDEV_STAT(rx_errors), + I40E_NETDEV_STAT(tx_errors), + I40E_NETDEV_STAT(rx_dropped), + I40E_NETDEV_STAT(tx_dropped), + I40E_NETDEV_STAT(collisions), + I40E_NETDEV_STAT(rx_length_errors), + I40E_NETDEV_STAT(rx_crc_errors), +}; + +static const struct i40e_stats i40e_gstrings_veb_stats[] = { + I40E_VEB_STAT("rx_bytes", stats.rx_bytes), + I40E_VEB_STAT("tx_bytes", stats.tx_bytes), + I40E_VEB_STAT("rx_unicast", stats.rx_unicast), + I40E_VEB_STAT("tx_unicast", stats.tx_unicast), + I40E_VEB_STAT("rx_multicast", stats.rx_multicast), + I40E_VEB_STAT("tx_multicast", stats.tx_multicast), + I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast), + I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast), + I40E_VEB_STAT("rx_discards", stats.rx_discards), + I40E_VEB_STAT("tx_discards", stats.tx_discards), + I40E_VEB_STAT("tx_errors", stats.tx_errors), + I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol), +}; + +static const struct i40e_stats i40e_gstrings_misc_stats[] = { + I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast), + I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast), + I40E_VSI_STAT("rx_multicast", eth_stats.rx_multicast), + I40E_VSI_STAT("tx_multicast", eth_stats.tx_multicast), + I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast), + I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast), + I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), +}; + +static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, + struct ethtool_rxnfc *cmd); + +/* These PF_STATs might look like duplicates of some NETDEV_STATs, + * but they are separate. This device supports Virtualization, and + * as such might have several netdevs supporting VMDq and FCoE going + * through a single port. The NETDEV_STATs are for individual netdevs + * seen at the top of the stack, and the PF_STATs are for the physical + * function at the bottom of the stack hosting those netdevs. + * + * The PF_STATs are appended to the netdev stats only when ethtool -S + * is queried on the base PF netdev, not on the VMDq or FCoE netdev. + */ +static struct i40e_stats i40e_gstrings_stats[] = { + I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes), + I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes), + I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast), + I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast), + I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast), + I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast), + I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast), + I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast), + I40E_PF_STAT("tx_errors", stats.eth.tx_errors), + I40E_PF_STAT("rx_dropped", stats.eth.rx_discards), + I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down), + I40E_PF_STAT("crc_errors", stats.crc_errors), + I40E_PF_STAT("illegal_bytes", stats.illegal_bytes), + I40E_PF_STAT("mac_local_faults", stats.mac_local_faults), + I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults), + I40E_PF_STAT("tx_timeout", tx_timeout_count), + I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error), + I40E_PF_STAT("rx_length_errors", stats.rx_length_errors), + I40E_PF_STAT("link_xon_rx", stats.link_xon_rx), + I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx), + I40E_PF_STAT("link_xon_tx", stats.link_xon_tx), + I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx), + I40E_PF_STAT("rx_size_64", stats.rx_size_64), + I40E_PF_STAT("rx_size_127", stats.rx_size_127), + I40E_PF_STAT("rx_size_255", stats.rx_size_255), + I40E_PF_STAT("rx_size_511", stats.rx_size_511), + I40E_PF_STAT("rx_size_1023", stats.rx_size_1023), + I40E_PF_STAT("rx_size_1522", stats.rx_size_1522), + I40E_PF_STAT("rx_size_big", stats.rx_size_big), + I40E_PF_STAT("tx_size_64", stats.tx_size_64), + I40E_PF_STAT("tx_size_127", stats.tx_size_127), + I40E_PF_STAT("tx_size_255", stats.tx_size_255), + I40E_PF_STAT("tx_size_511", stats.tx_size_511), + I40E_PF_STAT("tx_size_1023", stats.tx_size_1023), + I40E_PF_STAT("tx_size_1522", stats.tx_size_1522), + I40E_PF_STAT("tx_size_big", stats.tx_size_big), + I40E_PF_STAT("rx_undersize", stats.rx_undersize), + I40E_PF_STAT("rx_fragments", stats.rx_fragments), + I40E_PF_STAT("rx_oversize", stats.rx_oversize), + I40E_PF_STAT("rx_jabber", stats.rx_jabber), + I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), + I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), + I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), + I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match), + + /* LPI stats */ + I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status), + I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status), + I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count), + I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count), +}; + +#ifdef I40E_FCOE +static const struct i40e_stats i40e_gstrings_fcoe_stats[] = { + I40E_VSI_STAT("fcoe_bad_fccrc", fcoe_stats.fcoe_bad_fccrc), + I40E_VSI_STAT("rx_fcoe_dropped", fcoe_stats.rx_fcoe_dropped), + I40E_VSI_STAT("rx_fcoe_packets", fcoe_stats.rx_fcoe_packets), + I40E_VSI_STAT("rx_fcoe_dwords", fcoe_stats.rx_fcoe_dwords), + I40E_VSI_STAT("fcoe_ddp_count", fcoe_stats.fcoe_ddp_count), + I40E_VSI_STAT("fcoe_last_error", fcoe_stats.fcoe_last_error), + I40E_VSI_STAT("tx_fcoe_packets", fcoe_stats.tx_fcoe_packets), + I40E_VSI_STAT("tx_fcoe_dwords", fcoe_stats.tx_fcoe_dwords), +}; + +#endif /* I40E_FCOE */ +#define I40E_QUEUE_STATS_LEN(n) \ + (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \ + * 2 /* Tx and Rx together */ \ + * (sizeof(struct i40e_queue_stats) / sizeof(u64))) +#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) +#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats) +#define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats) +#ifdef I40E_FCOE +#define I40E_FCOE_STATS_LEN ARRAY_SIZE(i40e_gstrings_fcoe_stats) +#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ + I40E_FCOE_STATS_LEN + \ + I40E_MISC_STATS_LEN + \ + I40E_QUEUE_STATS_LEN((n))) +#else +#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ + I40E_MISC_STATS_LEN + \ + I40E_QUEUE_STATS_LEN((n))) +#endif /* I40E_FCOE */ +#define I40E_PFC_STATS_LEN ( \ + (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \ + FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \ + FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \ + FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \ + FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \ + / sizeof(u64)) +#define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats) +#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \ + I40E_PFC_STATS_LEN + \ + I40E_VSI_STATS_LEN((n))) + +enum i40e_ethtool_test_id { + I40E_ETH_TEST_REG = 0, + I40E_ETH_TEST_EEPROM, + I40E_ETH_TEST_INTR, + I40E_ETH_TEST_LOOPBACK, + I40E_ETH_TEST_LINK, +}; + +static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", + "Eeprom test (offline)", + "Interrupt test (offline)", + "Loopback test (offline)", + "Link test (on/offline)" +}; + +#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN) + +static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { + "NPAR", +}; + +#define I40E_PRIV_FLAGS_STR_LEN \ + (sizeof(i40e_priv_flags_strings) / ETH_GSTRING_LEN) + +/** + * i40e_partition_setting_complaint - generic complaint for MFP restriction + * @pf: the PF struct + **/ +static void i40e_partition_setting_complaint(struct i40e_pf *pf) +{ + dev_info(&pf->pdev->dev, + "The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting.\n"); +} + +/** + * i40e_get_settings_link_up - Get the Link settings for when link is up + * @hw: hw structure + * @ecmd: ethtool command to fill in + * @netdev: network interface device structure + * + **/ +static void i40e_get_settings_link_up(struct i40e_hw *hw, + struct ethtool_cmd *ecmd, + struct net_device *netdev) +{ + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + u32 link_speed = hw_link_info->link_speed; + + /* Initialize supported and advertised settings based on phy settings */ + switch (hw_link_info->phy_type) { + case I40E_PHY_TYPE_40GBASE_CR4: + case I40E_PHY_TYPE_40GBASE_CR4_CU: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_40000baseCR4_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_40000baseCR4_Full; + break; + case I40E_PHY_TYPE_XLAUI: + case I40E_PHY_TYPE_XLPPI: + case I40E_PHY_TYPE_40GBASE_AOC: + ecmd->supported = SUPPORTED_40000baseCR4_Full; + break; + case I40E_PHY_TYPE_40GBASE_KR4: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_40000baseKR4_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_40000baseKR4_Full; + break; + case I40E_PHY_TYPE_40GBASE_SR4: + ecmd->supported = SUPPORTED_40000baseSR4_Full; + break; + case I40E_PHY_TYPE_40GBASE_LR4: + ecmd->supported = SUPPORTED_40000baseLR4_Full; + break; + case I40E_PHY_TYPE_20GBASE_KR2: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_20000baseKR2_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_20000baseKR2_Full; + break; + case I40E_PHY_TYPE_10GBASE_KX4: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_10000baseKX4_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_10000baseKX4_Full; + break; + case I40E_PHY_TYPE_10GBASE_KR: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_10000baseKR_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_10000baseKR_Full; + break; + case I40E_PHY_TYPE_10GBASE_SR: + case I40E_PHY_TYPE_10GBASE_LR: + case I40E_PHY_TYPE_1000BASE_SX: + case I40E_PHY_TYPE_1000BASE_LX: + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + break; + case I40E_PHY_TYPE_1000BASE_KX: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_1000baseKX_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_1000baseKX_Full; + break; + case I40E_PHY_TYPE_10GBASE_T: + case I40E_PHY_TYPE_1000BASE_T: + case I40E_PHY_TYPE_100BASE_TX: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; + ecmd->advertising = ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; + break; + case I40E_PHY_TYPE_10GBASE_CR1_CU: + case I40E_PHY_TYPE_10GBASE_CR1: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_10000baseT_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_10000baseT_Full; + break; + case I40E_PHY_TYPE_XAUI: + case I40E_PHY_TYPE_XFI: + case I40E_PHY_TYPE_SFI: + case I40E_PHY_TYPE_10GBASE_SFPP_CU: + case I40E_PHY_TYPE_10GBASE_AOC: + ecmd->supported = SUPPORTED_10000baseT_Full; + break; + case I40E_PHY_TYPE_SGMII: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; + break; + default: + /* if we got here and link is up something bad is afoot */ + netdev_info(netdev, "WARNING: Link is up but PHY type 0x%x is not recognized.\n", + hw_link_info->phy_type); + } + + /* Set speed and duplex */ + switch (link_speed) { + case I40E_LINK_SPEED_40GB: + ethtool_cmd_speed_set(ecmd, SPEED_40000); + break; + case I40E_LINK_SPEED_20GB: + ethtool_cmd_speed_set(ecmd, SPEED_20000); + break; + case I40E_LINK_SPEED_10GB: + ethtool_cmd_speed_set(ecmd, SPEED_10000); + break; + case I40E_LINK_SPEED_1GB: + ethtool_cmd_speed_set(ecmd, SPEED_1000); + break; + case I40E_LINK_SPEED_100MB: + ethtool_cmd_speed_set(ecmd, SPEED_100); + break; + default: + break; + } + ecmd->duplex = DUPLEX_FULL; +} + +/** + * i40e_get_settings_link_down - Get the Link settings for when link is down + * @hw: hw structure + * @ecmd: ethtool command to fill in + * + * Reports link settings that can be determined when link is down + **/ +static void i40e_get_settings_link_down(struct i40e_hw *hw, + struct ethtool_cmd *ecmd) +{ + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + + /* link is down and the driver needs to fall back on + * device ID to determine what kinds of info to display, + * it's mostly a guess that may change when link is up + */ + switch (hw->device_id) { + case I40E_DEV_ID_QSFP_A: + case I40E_DEV_ID_QSFP_B: + case I40E_DEV_ID_QSFP_C: + /* pluggable QSFP */ + ecmd->supported = SUPPORTED_40000baseSR4_Full | + SUPPORTED_40000baseCR4_Full | + SUPPORTED_40000baseLR4_Full; + ecmd->advertising = ADVERTISED_40000baseSR4_Full | + ADVERTISED_40000baseCR4_Full | + ADVERTISED_40000baseLR4_Full; + break; + case I40E_DEV_ID_KX_B: + /* backplane 40G */ + ecmd->supported = SUPPORTED_40000baseKR4_Full; + ecmd->advertising = ADVERTISED_40000baseKR4_Full; + break; + case I40E_DEV_ID_KX_C: + /* backplane 10G */ + ecmd->supported = SUPPORTED_10000baseKR_Full; + ecmd->advertising = ADVERTISED_10000baseKR_Full; + break; + case I40E_DEV_ID_10G_BASE_T: + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; + /* Figure out what has been requested */ + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; + break; + case I40E_DEV_ID_20G_KR2: + /* backplane 20G */ + ecmd->supported = SUPPORTED_20000baseKR2_Full; + ecmd->advertising = ADVERTISED_20000baseKR2_Full; + break; + default: + /* all the rest are 10G/1G */ + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full; + /* Figure out what has been requested */ + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + break; + } + + /* With no link speed and duplex are unknown */ + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; +} + +/** + * i40e_get_settings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @ecmd: ethtool command + * + * Reports speed/duplex settings based on media_type + **/ +static int i40e_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; + + if (link_up) + i40e_get_settings_link_up(hw, ecmd, netdev); + else + i40e_get_settings_link_down(hw, ecmd); + + /* Now set the settings that don't rely on link being up/down */ + + /* Set autoneg settings */ + ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? + AUTONEG_ENABLE : AUTONEG_DISABLE); + + switch (hw->phy.media_type) { + case I40E_MEDIA_TYPE_BACKPLANE: + ecmd->supported |= SUPPORTED_Autoneg | + SUPPORTED_Backplane; + ecmd->advertising |= ADVERTISED_Autoneg | + ADVERTISED_Backplane; + ecmd->port = PORT_NONE; + break; + case I40E_MEDIA_TYPE_BASET: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + break; + case I40E_MEDIA_TYPE_DA: + case I40E_MEDIA_TYPE_CX4: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_DA; + break; + case I40E_MEDIA_TYPE_FIBER: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->port = PORT_FIBRE; + break; + case I40E_MEDIA_TYPE_UNKNOWN: + default: + ecmd->port = PORT_OTHER; + break; + } + + /* Set transceiver */ + ecmd->transceiver = XCVR_EXTERNAL; + + /* Set flow control settings */ + ecmd->supported |= SUPPORTED_Pause; + + switch (hw->fc.requested_mode) { + case I40E_FC_FULL: + ecmd->advertising |= ADVERTISED_Pause; + break; + case I40E_FC_TX_PAUSE: + ecmd->advertising |= ADVERTISED_Asym_Pause; + break; + case I40E_FC_RX_PAUSE: + ecmd->advertising |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + break; + default: + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + break; + } + + return 0; +} + +/** + * i40e_set_settings - Set Speed and Duplex + * @netdev: network interface device structure + * @ecmd: ethtool command + * + * Set speed/duplex per media_types advertised/forced + **/ +static int i40e_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_aq_get_phy_abilities_resp abilities; + struct i40e_aq_set_phy_config config; + struct i40e_pf *pf = np->vsi->back; + struct i40e_vsi *vsi = np->vsi; + struct i40e_hw *hw = &pf->hw; + struct ethtool_cmd safe_ecmd; + i40e_status status = 0; + bool change = false; + int err = 0; + u8 autoneg; + u32 advertise; + + /* Changing port settings is not supported if this isn't the + * port's controlling PF + */ + if (hw->partition_id != 1) { + i40e_partition_setting_complaint(pf); + return -EOPNOTSUPP; + } + + if (vsi != pf->vsi[pf->lan_vsi]) + return -EOPNOTSUPP; + + if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && + hw->phy.media_type != I40E_MEDIA_TYPE_FIBER && + hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE && + hw->phy.link_info.link_info & I40E_AQ_LINK_UP) + return -EOPNOTSUPP; + + /* get our own copy of the bits to check against */ + memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd)); + i40e_get_settings(netdev, &safe_ecmd); + + /* save autoneg and speed out of ecmd */ + autoneg = ecmd->autoneg; + advertise = ecmd->advertising; + + /* set autoneg and speed back to what they currently are */ + ecmd->autoneg = safe_ecmd.autoneg; + ecmd->advertising = safe_ecmd.advertising; + + ecmd->cmd = safe_ecmd.cmd; + /* If ecmd and safe_ecmd are not the same now, then they are + * trying to set something that we do not support + */ + if (memcmp(ecmd, &safe_ecmd, sizeof(struct ethtool_cmd))) + return -EOPNOTSUPP; + + while (test_bit(__I40E_CONFIG_BUSY, &vsi->state)) + usleep_range(1000, 2000); + + /* Get the current phy config */ + status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, + NULL); + if (status) + return -EAGAIN; + + /* Copy abilities to config in case autoneg is not + * set below + */ + memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); + config.abilities = abilities.abilities; + + /* Check autoneg */ + if (autoneg == AUTONEG_ENABLE) { + /* If autoneg is not supported, return error */ + if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) { + netdev_info(netdev, "Autoneg not supported on this phy\n"); + return -EINVAL; + } + /* If autoneg was not already enabled */ + if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) { + config.abilities = abilities.abilities | + I40E_AQ_PHY_ENABLE_AN; + change = true; + } + } else { + /* If autoneg is supported 10GBASE_T is the only phy that + * can disable it, so otherwise return error + */ + if (safe_ecmd.supported & SUPPORTED_Autoneg && + hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) { + netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); + return -EINVAL; + } + /* If autoneg is currently enabled */ + if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) { + config.abilities = abilities.abilities & + ~I40E_AQ_PHY_ENABLE_AN; + change = true; + } + } + + if (advertise & ~safe_ecmd.supported) + return -EINVAL; + + if (advertise & ADVERTISED_100baseT_Full) + config.link_speed |= I40E_LINK_SPEED_100MB; + if (advertise & ADVERTISED_1000baseT_Full || + advertise & ADVERTISED_1000baseKX_Full) + config.link_speed |= I40E_LINK_SPEED_1GB; + if (advertise & ADVERTISED_10000baseT_Full || + advertise & ADVERTISED_10000baseKX4_Full || + advertise & ADVERTISED_10000baseKR_Full) + config.link_speed |= I40E_LINK_SPEED_10GB; + if (advertise & ADVERTISED_20000baseKR2_Full) + config.link_speed |= I40E_LINK_SPEED_20GB; + if (advertise & ADVERTISED_40000baseKR4_Full || + advertise & ADVERTISED_40000baseCR4_Full || + advertise & ADVERTISED_40000baseSR4_Full || + advertise & ADVERTISED_40000baseLR4_Full) + config.link_speed |= I40E_LINK_SPEED_40GB; + + if (change || (abilities.link_speed != config.link_speed)) { + /* copy over the rest of the abilities */ + config.phy_type = abilities.phy_type; + config.eee_capability = abilities.eee_capability; + config.eeer = abilities.eeer_val; + config.low_power_ctrl = abilities.d3_lpan; + + /* save the requested speeds */ + hw->phy.link_info.requested_speeds = config.link_speed; + /* set link and auto negotiation so changes take effect */ + config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + /* If link is up put link down */ + if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) { + /* Tell the OS link is going down, the link will go + * back up when fw says it is ready asynchronously + */ + netdev_info(netdev, "PHY settings change requested, NIC Link is going down.\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } + + /* make the aq call */ + status = i40e_aq_set_phy_config(hw, &config, NULL); + if (status) { + netdev_info(netdev, "Set phy config failed with error %d.\n", + status); + return -EAGAIN; + } + + status = i40e_aq_get_link_info(hw, true, NULL, NULL); + if (status) + netdev_info(netdev, "Updating link info failed with error %d\n", + status); + + } else { + netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); + } + + return err; +} + +static int i40e_nway_reset(struct net_device *netdev) +{ + /* restart autonegotiation */ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; + i40e_status ret = 0; + + ret = i40e_aq_set_link_restart_an(hw, link_up, NULL); + if (ret) { + netdev_info(netdev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); + return -EIO; + } + + return 0; +} + +/** + * i40e_get_pauseparam - Get Flow Control status + * Return tx/rx-pause status + **/ +static void i40e_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; + + pause->autoneg = + ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? + AUTONEG_ENABLE : AUTONEG_DISABLE); + + /* PFC enabled so report LFC as off */ + if (dcbx_cfg->pfc.pfcenable) { + pause->rx_pause = 0; + pause->tx_pause = 0; + return; + } + + if (hw->fc.current_mode == I40E_FC_RX_PAUSE) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == I40E_FC_FULL) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +/** + * i40e_set_pauseparam - Set Flow Control parameter + * @netdev: network interface device structure + * @pause: return tx/rx flow control status + **/ +static int i40e_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_vsi *vsi = np->vsi; + struct i40e_hw *hw = &pf->hw; + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; + bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; + i40e_status status; + u8 aq_failures; + int err = 0; + + /* Changing the port's flow control is not supported if this isn't the + * port's controlling PF + */ + if (hw->partition_id != 1) { + i40e_partition_setting_complaint(pf); + return -EOPNOTSUPP; + } + + if (vsi != pf->vsi[pf->lan_vsi]) + return -EOPNOTSUPP; + + if (pause->autoneg != ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? + AUTONEG_ENABLE : AUTONEG_DISABLE)) { + netdev_info(netdev, "To change autoneg please use: ethtool -s autoneg \n"); + return -EOPNOTSUPP; + } + + /* If we have link and don't have autoneg */ + if (!test_bit(__I40E_DOWN, &pf->state) && + !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) { + /* Send message that it might not necessarily work*/ + netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); + } + + if (dcbx_cfg->pfc.pfcenable) { + netdev_info(netdev, + "Priority flow control enabled. Cannot set link flow control.\n"); + return -EOPNOTSUPP; + } + + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = I40E_FC_FULL; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = I40E_FC_RX_PAUSE; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = I40E_FC_TX_PAUSE; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = I40E_FC_NONE; + else + return -EINVAL; + + /* Tell the OS link is going down, the link will go back up when fw + * says it is ready asynchronously + */ + netdev_info(netdev, "Flow control settings change requested, NIC Link is going down.\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + /* Set the fc mode and only restart an if link is up*/ + status = i40e_set_fc(hw, &aq_failures, link_up); + + if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) { + netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n", + status, hw->aq.asq_last_status); + err = -EAGAIN; + } + if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) { + netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n", + status, hw->aq.asq_last_status); + err = -EAGAIN; + } + if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) { + netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n", + status, hw->aq.asq_last_status); + err = -EAGAIN; + } + + if (!test_bit(__I40E_DOWN, &pf->state)) { + /* Give it a little more time to try to come back */ + msleep(75); + if (!test_bit(__I40E_DOWN, &pf->state)) + return i40e_nway_reset(netdev); + } + + return err; +} + +static u32 i40e_get_msglevel(struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + return pf->msg_enable; +} + +static void i40e_set_msglevel(struct net_device *netdev, u32 data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + if (I40E_DEBUG_USER & data) + pf->hw.debug_mask = data; + pf->msg_enable = data; +} + +static int i40e_get_regs_len(struct net_device *netdev) +{ + int reg_count = 0; + int i; + + for (i = 0; i40e_reg_list[i].offset != 0; i++) + reg_count += i40e_reg_list[i].elements; + + return reg_count * sizeof(u32); +} + +static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 *reg_buf = p; + int i, j, ri; + u32 reg; + + /* Tell ethtool which driver-version-specific regs output we have. + * + * At some point, if we have ethtool doing special formatting of + * this data, it will rely on this version number to know how to + * interpret things. Hence, this needs to be updated if/when the + * diags register table is changed. + */ + regs->version = 1; + + /* loop through the diags reg table for what to print */ + ri = 0; + for (i = 0; i40e_reg_list[i].offset != 0; i++) { + for (j = 0; j < i40e_reg_list[i].elements; j++) { + reg = i40e_reg_list[i].offset + + (j * i40e_reg_list[i].stride); + reg_buf[ri++] = rd32(hw, reg); + } + } + +} + +static int i40e_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_hw *hw = &np->vsi->back->hw; + struct i40e_pf *pf = np->vsi->back; + int ret_val = 0, len, offset; + u8 *eeprom_buff; + u16 i, sectors; + bool last; + u32 magic; + +#define I40E_NVM_SECTOR_SIZE 4096 + if (eeprom->len == 0) + return -EINVAL; + + /* check for NVMUpdate access method */ + magic = hw->vendor_id | (hw->device_id << 16); + if (eeprom->magic && eeprom->magic != magic) { + struct i40e_nvm_access *cmd; + int errno; + + /* make sure it is the right magic for NVMUpdate */ + if ((eeprom->magic >> 16) != hw->device_id) + return -EINVAL; + + cmd = (struct i40e_nvm_access *)eeprom; + ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); + if (ret_val && + ((hw->aq.asq_last_status != I40E_AQ_RC_EACCES) || + (hw->debug_mask & I40E_DEBUG_NVM))) + dev_info(&pf->pdev->dev, + "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", + ret_val, hw->aq.asq_last_status, errno, + (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK), + cmd->offset, cmd->data_size); + + return errno; + } + + /* normal ethtool get_eeprom support */ + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret_val) { + dev_info(&pf->pdev->dev, + "Failed Acquiring NVM resource for read err=%d status=0x%x\n", + ret_val, hw->aq.asq_last_status); + goto free_buff; + } + + sectors = eeprom->len / I40E_NVM_SECTOR_SIZE; + sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0; + len = I40E_NVM_SECTOR_SIZE; + last = false; + for (i = 0; i < sectors; i++) { + if (i == (sectors - 1)) { + len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i); + last = true; + } + offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i), + ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len, + (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i), + last, NULL); + if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) { + dev_info(&pf->pdev->dev, + "read NVM failed, invalid offset 0x%x\n", + offset); + break; + } else if (ret_val && + hw->aq.asq_last_status == I40E_AQ_RC_EACCES) { + dev_info(&pf->pdev->dev, + "read NVM failed, access, offset 0x%x\n", + offset); + break; + } else if (ret_val) { + dev_info(&pf->pdev->dev, + "read NVM failed offset %d err=%d status=0x%x\n", + offset, ret_val, hw->aq.asq_last_status); + break; + } + } + + i40e_release_nvm(hw); + memcpy(bytes, (u8 *)eeprom_buff, eeprom->len); +free_buff: + kfree(eeprom_buff); + return ret_val; +} + +static int i40e_get_eeprom_len(struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_hw *hw = &np->vsi->back->hw; + u32 val; + + val = (rd32(hw, I40E_GLPCI_LBARCTRL) + & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK) + >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT; + /* register returns value in power of 2, 64Kbyte chunks. */ + val = (64 * 1024) * (1 << val); + return val; +} + +static int i40e_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_hw *hw = &np->vsi->back->hw; + struct i40e_pf *pf = np->vsi->back; + struct i40e_nvm_access *cmd; + int ret_val = 0; + int errno; + u32 magic; + + /* normal ethtool set_eeprom is not supported */ + magic = hw->vendor_id | (hw->device_id << 16); + if (eeprom->magic == magic) + return -EOPNOTSUPP; + + /* check for NVMUpdate access method */ + if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id) + return -EINVAL; + + if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + return -EBUSY; + + cmd = (struct i40e_nvm_access *)eeprom; + ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); + if (ret_val && + ((hw->aq.asq_last_status != I40E_AQ_RC_EPERM && + hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) || + (hw->debug_mask & I40E_DEBUG_NVM))) + dev_info(&pf->pdev->dev, + "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", + ret_val, hw->aq.asq_last_status, errno, + (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK), + cmd->offset, cmd->data_size); + + return errno; +} + +static void i40e_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + + strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, i40e_driver_version_str, + sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw), + sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(pf->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN; +} + +static void i40e_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + + ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS; + ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = vsi->rx_rings[0]->count; + ring->tx_pending = vsi->tx_rings[0]->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int i40e_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct i40e_ring *tx_rings = NULL, *rx_rings = NULL; + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u32 new_rx_count, new_tx_count; + int i, err = 0; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS || + ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS || + ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS || + ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) { + netdev_info(netdev, + "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + ring->tx_pending, ring->rx_pending, + I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS); + return -EINVAL; + } + + new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE); + new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE); + + /* if nothing to do return success */ + if ((new_tx_count == vsi->tx_rings[0]->count) && + (new_rx_count == vsi->rx_rings[0]->count)) + return 0; + + while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) + usleep_range(1000, 2000); + + if (!netif_running(vsi->netdev)) { + /* simple case - set for the next time the netdev is started */ + for (i = 0; i < vsi->num_queue_pairs; i++) { + vsi->tx_rings[i]->count = new_tx_count; + vsi->rx_rings[i]->count = new_rx_count; + } + goto done; + } + + /* We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the Tx and Rx ring structs. + */ + + /* alloc updated Tx resources */ + if (new_tx_count != vsi->tx_rings[0]->count) { + netdev_info(netdev, + "Changing Tx descriptor count from %d to %d.\n", + vsi->tx_rings[0]->count, new_tx_count); + tx_rings = kcalloc(vsi->alloc_queue_pairs, + sizeof(struct i40e_ring), GFP_KERNEL); + if (!tx_rings) { + err = -ENOMEM; + goto done; + } + + for (i = 0; i < vsi->num_queue_pairs; i++) { + /* clone ring and setup updated count */ + tx_rings[i] = *vsi->tx_rings[i]; + tx_rings[i].count = new_tx_count; + err = i40e_setup_tx_descriptors(&tx_rings[i]); + if (err) { + while (i) { + i--; + i40e_free_tx_resources(&tx_rings[i]); + } + kfree(tx_rings); + tx_rings = NULL; + + goto done; + } + } + } + + /* alloc updated Rx resources */ + if (new_rx_count != vsi->rx_rings[0]->count) { + netdev_info(netdev, + "Changing Rx descriptor count from %d to %d\n", + vsi->rx_rings[0]->count, new_rx_count); + rx_rings = kcalloc(vsi->alloc_queue_pairs, + sizeof(struct i40e_ring), GFP_KERNEL); + if (!rx_rings) { + err = -ENOMEM; + goto free_tx; + } + + for (i = 0; i < vsi->num_queue_pairs; i++) { + /* clone ring and setup updated count */ + rx_rings[i] = *vsi->rx_rings[i]; + rx_rings[i].count = new_rx_count; + err = i40e_setup_rx_descriptors(&rx_rings[i]); + if (err) { + while (i) { + i--; + i40e_free_rx_resources(&rx_rings[i]); + } + kfree(rx_rings); + rx_rings = NULL; + + goto free_tx; + } + } + } + + /* Bring interface down, copy in the new ring info, + * then restore the interface + */ + i40e_down(vsi); + + if (tx_rings) { + for (i = 0; i < vsi->num_queue_pairs; i++) { + i40e_free_tx_resources(vsi->tx_rings[i]); + *vsi->tx_rings[i] = tx_rings[i]; + } + kfree(tx_rings); + tx_rings = NULL; + } + + if (rx_rings) { + for (i = 0; i < vsi->num_queue_pairs; i++) { + i40e_free_rx_resources(vsi->rx_rings[i]); + *vsi->rx_rings[i] = rx_rings[i]; + } + kfree(rx_rings); + rx_rings = NULL; + } + + i40e_up(vsi); + +free_tx: + /* error cleanup if the Rx allocations failed after getting Tx */ + if (tx_rings) { + for (i = 0; i < vsi->num_queue_pairs; i++) + i40e_free_tx_resources(&tx_rings[i]); + kfree(tx_rings); + tx_rings = NULL; + } + +done: + clear_bit(__I40E_CONFIG_BUSY, &pf->state); + + return err; +} + +static int i40e_get_sset_count(struct net_device *netdev, int sset) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + + switch (sset) { + case ETH_SS_TEST: + return I40E_TEST_LEN; + case ETH_SS_STATS: + if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) { + int len = I40E_PF_STATS_LEN(netdev); + + if (pf->lan_veb != I40E_NO_VEB) + len += I40E_VEB_STATS_LEN; + return len; + } else { + return I40E_VSI_STATS_LEN(netdev); + } + case ETH_SS_PRIV_FLAGS: + return I40E_PRIV_FLAGS_STR_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void i40e_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_ring *tx_ring, *rx_ring; + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + int i = 0; + char *p; + int j; + struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi); + unsigned int start; + + i40e_update_stats(vsi); + + for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) { + p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset; + data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < I40E_MISC_STATS_LEN; j++) { + p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset; + data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } +#ifdef I40E_FCOE + for (j = 0; j < I40E_FCOE_STATS_LEN; j++) { + p = (char *)vsi + i40e_gstrings_fcoe_stats[j].stat_offset; + data[i++] = (i40e_gstrings_fcoe_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } +#endif + rcu_read_lock(); + for (j = 0; j < vsi->num_queue_pairs; j++) { + tx_ring = ACCESS_ONCE(vsi->tx_rings[j]); + + if (!tx_ring) + continue; + + /* process Tx ring statistics */ + do { + start = u64_stats_fetch_begin_irq(&tx_ring->syncp); + data[i] = tx_ring->stats.packets; + data[i + 1] = tx_ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); + i += 2; + + /* Rx ring is the 2nd half of the queue pair */ + rx_ring = &tx_ring[1]; + do { + start = u64_stats_fetch_begin_irq(&rx_ring->syncp); + data[i] = rx_ring->stats.packets; + data[i + 1] = rx_ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); + i += 2; + } + rcu_read_unlock(); + if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) + return; + + if (pf->lan_veb != I40E_NO_VEB) { + struct i40e_veb *veb = pf->veb[pf->lan_veb]; + for (j = 0; j < I40E_VEB_STATS_LEN; j++) { + p = (char *)veb; + p += i40e_gstrings_veb_stats[j].stat_offset; + data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + } + for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { + p = (char *)pf + i40e_gstrings_stats[j].stat_offset; + data[i++] = (i40e_gstrings_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { + data[i++] = pf->stats.priority_xon_tx[j]; + data[i++] = pf->stats.priority_xoff_tx[j]; + } + for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { + data[i++] = pf->stats.priority_xon_rx[j]; + data[i++] = pf->stats.priority_xoff_rx[j]; + } + for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) + data[i++] = pf->stats.priority_xon_2_xoff[j]; +} + +static void i40e_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + char *p = (char *)data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + for (i = 0; i < I40E_TEST_LEN; i++) { + memcpy(data, i40e_gstrings_test[i], ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + case ETH_SS_STATS: + for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + i40e_gstrings_net_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MISC_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + i40e_gstrings_misc_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } +#ifdef I40E_FCOE + for (i = 0; i < I40E_FCOE_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + i40e_gstrings_fcoe_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } +#endif + for (i = 0; i < vsi->num_queue_pairs; i++) { + snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); + p += ETH_GSTRING_LEN; + } + if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) + return; + + if (pf->lan_veb != I40E_NO_VEB) { + for (i = 0; i < I40E_VEB_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "veb.%s", + i40e_gstrings_veb_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } + } + for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "port.%s", + i40e_gstrings_stats[i].stat_string); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + snprintf(p, ETH_GSTRING_LEN, + "port.tx_priority_%u_xon", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "port.tx_priority_%u_xoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + snprintf(p, ETH_GSTRING_LEN, + "port.rx_priority_%u_xon", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, + "port.rx_priority_%u_xoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + snprintf(p, ETH_GSTRING_LEN, + "port.rx_priority_%u_xon_2_xoff", i); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ + break; + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { + memcpy(data, i40e_priv_flags_strings[i], + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + default: + break; + } +} + +static int i40e_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + + /* only report HW timestamping if PTP is enabled */ + if (!(pf->flags & I40E_FLAG_PTP)) + return ethtool_op_get_ts_info(dev, info); + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (pf->ptp_clock) + info->phc_index = ptp_clock_index(pf->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); + + return 0; +} + +static int i40e_link_test(struct net_device *netdev, u64 *data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + netif_info(pf, hw, netdev, "link test\n"); + if (i40e_get_link_status(&pf->hw)) + *data = 0; + else + *data = 1; + + return *data; +} + +static int i40e_reg_test(struct net_device *netdev, u64 *data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + netif_info(pf, hw, netdev, "register test\n"); + *data = i40e_diag_reg_test(&pf->hw); + + return *data; +} + +static int i40e_eeprom_test(struct net_device *netdev, u64 *data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + netif_info(pf, hw, netdev, "eeprom test\n"); + *data = i40e_diag_eeprom_test(&pf->hw); + + /* forcebly clear the NVM Update state machine */ + pf->hw.nvmupd_state = I40E_NVMUPD_STATE_INIT; + + return *data; +} + +static int i40e_intr_test(struct net_device *netdev, u64 *data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + u16 swc_old = pf->sw_int_count; + + netif_info(pf, hw, netdev, "interrupt test\n"); + wr32(&pf->hw, I40E_PFINT_DYN_CTL0, + (I40E_PFINT_DYN_CTL0_INTENA_MASK | + I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK)); + usleep_range(1000, 2000); + *data = (swc_old == pf->sw_int_count); + + return *data; +} + +static int i40e_loopback_test(struct net_device *netdev, u64 *data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + netif_info(pf, hw, netdev, "loopback test not implemented\n"); + *data = 0; + + return *data; +} + +static void i40e_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + bool if_running = netif_running(netdev); + struct i40e_pf *pf = np->vsi->back; + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + netif_info(pf, drv, netdev, "offline testing starting\n"); + + set_bit(__I40E_TESTING, &pf->state); + /* If the device is online then take it offline */ + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + + /* Link test performed before hardware reset + * so autoneg doesn't interfere with test result + */ + if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* run reg test last, a reset is required after it */ + if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + clear_bit(__I40E_TESTING, &pf->state); + i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + + if (if_running) + dev_open(netdev); + } else { + /* Online tests */ + netif_info(pf, drv, netdev, "online testing starting\n"); + + if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Offline only tests, not run in online; pass by default */ + data[I40E_ETH_TEST_REG] = 0; + data[I40E_ETH_TEST_EEPROM] = 0; + data[I40E_ETH_TEST_INTR] = 0; + data[I40E_ETH_TEST_LOOPBACK] = 0; + } + + netif_info(pf, drv, netdev, "testing finished\n"); +} + +static void i40e_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + u16 wol_nvm_bits; + + /* NVM bit on means WoL disabled for the port */ + i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); + if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) { + wol->supported = 0; + wol->wolopts = 0; + } else { + wol->supported = WAKE_MAGIC; + wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0); + } +} + +/** + * i40e_set_wol - set the WakeOnLAN configuration + * @netdev: the netdev in question + * @wol: the ethtool WoL setting data + **/ +static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_vsi *vsi = np->vsi; + struct i40e_hw *hw = &pf->hw; + u16 wol_nvm_bits; + + /* WoL not supported if this isn't the controlling PF on the port */ + if (hw->partition_id != 1) { + i40e_partition_setting_complaint(pf); + return -EOPNOTSUPP; + } + + if (vsi != pf->vsi[pf->lan_vsi]) + return -EOPNOTSUPP; + + /* NVM bit on means WoL disabled for the port */ + i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); + if (((1 << hw->port) & wol_nvm_bits)) + return -EOPNOTSUPP; + + /* only magic packet is supported */ + if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)) + return -EOPNOTSUPP; + + /* is this a new value? */ + if (pf->wol_en != !!wol->wolopts) { + pf->wol_en = !!wol->wolopts; + device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); + } + + return 0; +} + +static int i40e_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + int blink_freq = 2; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + pf->led_status = i40e_led_get(hw); + return blink_freq; + case ETHTOOL_ID_ON: + i40e_led_set(hw, 0xF, false); + break; + case ETHTOOL_ID_OFF: + i40e_led_set(hw, 0x0, false); + break; + case ETHTOOL_ID_INACTIVE: + i40e_led_set(hw, pf->led_status, false); + break; + default: + break; + } + + return 0; +} + +/* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt + * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also + * 125us (8000 interrupts per second) == ITR(62) + */ + +static int i40e_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + + ec->tx_max_coalesced_frames_irq = vsi->work_limit; + ec->rx_max_coalesced_frames_irq = vsi->work_limit; + + if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) + ec->use_adaptive_rx_coalesce = 1; + + if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) + ec->use_adaptive_tx_coalesce = 1; + + ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC; + ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC; + + return 0; +} + +static int i40e_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_q_vector *q_vector; + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u16 vector; + int i; + + if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) + vsi->work_limit = ec->tx_max_coalesced_frames_irq; + + vector = vsi->base_vector; + if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) && + (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) { + vsi->rx_itr_setting = ec->rx_coalesce_usecs; + } else if (ec->rx_coalesce_usecs == 0) { + vsi->rx_itr_setting = ec->rx_coalesce_usecs; + if (ec->use_adaptive_rx_coalesce) + netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); + } else { + netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); + return -EINVAL; + } + + if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) && + (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) { + vsi->tx_itr_setting = ec->tx_coalesce_usecs; + } else if (ec->tx_coalesce_usecs == 0) { + vsi->tx_itr_setting = ec->tx_coalesce_usecs; + if (ec->use_adaptive_tx_coalesce) + netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); + } else { + netif_info(pf, drv, netdev, + "Invalid value, tx-usecs range is 0-8160\n"); + return -EINVAL; + } + + if (ec->use_adaptive_rx_coalesce) + vsi->rx_itr_setting |= I40E_ITR_DYNAMIC; + else + vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC; + + if (ec->use_adaptive_tx_coalesce) + vsi->tx_itr_setting |= I40E_ITR_DYNAMIC; + else + vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC; + + for (i = 0; i < vsi->num_q_vectors; i++, vector++) { + q_vector = vsi->q_vectors[i]; + q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); + wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr); + q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); + wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr); + i40e_flush(hw); + } + + return 0; +} + +/** + * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type + * @pf: pointer to the physical function struct + * @cmd: ethtool rxnfc command + * + * Returns Success if the flow is supported, else Invalid Input. + **/ +static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + if (pf->vsi[pf->lan_vsi]->rxnfc.data != 0) { + cmd->data = pf->vsi[pf->lan_vsi]->rxnfc.data; + cmd->flow_type = pf->vsi[pf->lan_vsi]->rxnfc.flow_type; + return 0; + } + /* Report default options for RSS on i40e */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through to add IP fields */ + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through to add IP fields */ + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * i40e_get_ethtool_fdir_all - Populates the rule count of a command + * @pf: Pointer to the physical function struct + * @cmd: The command to get or set Rx flow classification rules + * @rule_locs: Array of used rule locations + * + * This function populates both the total and actual rule count of + * the ethtool flow classification command + * + * Returns 0 on success or -EMSGSIZE if entry not found + **/ +static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct i40e_fdir_filter *rule; + struct hlist_node *node2; + int cnt = 0; + + /* report total rule count */ + cmd->data = i40e_get_fd_cnt_all(pf); + + hlist_for_each_entry_safe(rule, node2, + &pf->fdir_filter_list, fdir_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + + rule_locs[cnt] = rule->fd_id; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +/** + * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow + * @pf: Pointer to the physical function struct + * @cmd: The command to get or set Rx flow classification rules + * + * This function looks up a filter based on the Rx flow classification + * command and fills the flow spec info for it if found + * + * Returns 0 on success or -EINVAL if filter not found + **/ +static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct i40e_fdir_filter *rule = NULL; + struct hlist_node *node2; + + hlist_for_each_entry_safe(rule, node2, + &pf->fdir_filter_list, fdir_node) { + if (fsp->location <= rule->fd_id) + break; + } + + if (!rule || fsp->location != rule->fd_id) + return -EINVAL; + + fsp->flow_type = rule->flow_type; + if (fsp->flow_type == IP_USER_FLOW) { + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + fsp->h_u.usr_ip4_spec.proto = 0; + fsp->m_u.usr_ip4_spec.proto = 0; + } + + /* Reverse the src and dest notion, since the HW views them from + * Tx perspective where as the user expects it from Rx filter view. + */ + fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->src_port; + fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0]; + + if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else + fsp->ring_cookie = rule->q_index; + + if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) { + struct i40e_vsi *vsi; + + vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi); + if (vsi && vsi->type == I40E_VSI_SRIOV) { + fsp->h_ext.data[1] = htonl(vsi->vf_id); + fsp->m_ext.data[1] = htonl(0x1); + } + } + + return 0; +} + +/** + * i40e_get_rxnfc - command to get RX flow classification rules + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * + * Returns Success if the command is supported. + **/ +static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = vsi->alloc_queue_pairs; + ret = 0; + break; + case ETHTOOL_GRXFH: + ret = i40e_get_rss_hash_opts(pf, cmd); + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = pf->fdir_pf_active_filters; + /* report total rule count */ + cmd->data = i40e_get_fd_cnt_all(pf); + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = i40e_get_ethtool_fdir_entry(pf, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs); + break; + default: + break; + } + + return ret; +} + +/** + * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash + * @pf: pointer to the physical function struct + * @cmd: ethtool rxnfc command + * + * Returns Success if the flow input set is supported. + **/ +static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) +{ + struct i40e_hw *hw = &pf->hw; + u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | + ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + /* We need at least the IP SRC and DEST fields for hashing */ + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + break; + default: + return -EINVAL; + } + break; + case TCP_V6_FLOW: + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + break; + default: + return -EINVAL; + } + break; + case UDP_V4_FLOW: + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + if ((nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); + break; + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if ((nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); + break; + case IPV4_FLOW: + hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4); + break; + case IPV6_FLOW: + hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); + break; + default: + return -EINVAL; + } + + wr32(hw, I40E_PFQF_HENA(0), (u32)hena); + wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); + i40e_flush(hw); + + /* Save setting for future output/update */ + pf->vsi[pf->lan_vsi]->rxnfc = *nfc; + + return 0; +} + +/** + * i40e_match_fdir_input_set - Match a new filter against an existing one + * @rule: The filter already added + * @input: The new filter to comapre against + * + * Returns true if the two input set match + **/ +static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule, + struct i40e_fdir_filter *input) +{ + if ((rule->dst_ip[0] != input->dst_ip[0]) || + (rule->src_ip[0] != input->src_ip[0]) || + (rule->dst_port != input->dst_port) || + (rule->src_port != input->src_port)) + return false; + return true; +} + +/** + * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry + * @vsi: Pointer to the targeted VSI + * @input: The filter to update or NULL to indicate deletion + * @sw_idx: Software index to the filter + * @cmd: The command to get or set Rx flow classification rules + * + * This function updates (or deletes) a Flow Director entry from + * the hlist of the corresponding PF + * + * Returns 0 on success + **/ +static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi, + struct i40e_fdir_filter *input, + u16 sw_idx, + struct ethtool_rxnfc *cmd) +{ + struct i40e_fdir_filter *rule, *parent; + struct i40e_pf *pf = vsi->back; + struct hlist_node *node2; + int err = -EINVAL; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry_safe(rule, node2, + &pf->fdir_filter_list, fdir_node) { + /* hash found, or no matching entry */ + if (rule->fd_id >= sw_idx) + break; + parent = rule; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->fd_id == sw_idx)) { + if (input && !i40e_match_fdir_input_set(rule, input)) + err = i40e_add_del_fdir(vsi, rule, false); + else if (!input) + err = i40e_add_del_fdir(vsi, rule, false); + hlist_del(&rule->fdir_node); + kfree(rule); + pf->fdir_pf_active_filters--; + } + + /* If no input this was a delete, err should be 0 if a rule was + * successfully found and removed from the list else -EINVAL + */ + if (!input) + return err; + + /* initialize node and set software index */ + INIT_HLIST_NODE(&input->fdir_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&input->fdir_node, &parent->fdir_node); + else + hlist_add_head(&input->fdir_node, + &pf->fdir_filter_list); + + /* update counts */ + pf->fdir_pf_active_filters++; + + return 0; +} + +/** + * i40e_del_fdir_entry - Deletes a Flow Director filter entry + * @vsi: Pointer to the targeted VSI + * @cmd: The command to get or set Rx flow classification rules + * + * The function removes a Flow Director filter entry from the + * hlist of the corresponding PF + * + * Returns 0 on success + */ +static int i40e_del_fdir_entry(struct i40e_vsi *vsi, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct i40e_pf *pf = vsi->back; + int ret = 0; + + if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + return -EBUSY; + + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + return -EBUSY; + + ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd); + + i40e_fdir_check_and_reenable(pf); + return ret; +} + +/** + * i40e_add_fdir_ethtool - Add/Remove Flow Director filters + * @vsi: pointer to the targeted VSI + * @cmd: command to get or set RX flow classification rules + * + * Add Flow Director filters for a specific flow spec based on their + * protocol. Returns 0 if the filters were successfully added. + **/ +static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp; + struct i40e_fdir_filter *input; + struct i40e_pf *pf; + int ret = -EINVAL; + u16 vf_id; + + if (!vsi) + return -EINVAL; + + pf = vsi->back; + + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + return -EOPNOTSUPP; + + if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED) + return -ENOSPC; + + if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + return -EBUSY; + + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + return -EBUSY; + + fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + + if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort + + pf->hw.func_caps.fd_filters_guaranteed)) { + return -EINVAL; + } + + if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && + (fsp->ring_cookie >= vsi->num_queue_pairs)) + return -EINVAL; + + input = kzalloc(sizeof(*input), GFP_KERNEL); + + if (!input) + return -ENOMEM; + + input->fd_id = fsp->location; + + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) + input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; + else + input->dest_ctl = + I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX; + + input->q_index = fsp->ring_cookie; + input->flex_off = 0; + input->pctype = 0; + input->dest_vsi = vsi->id; + input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; + input->cnt_index = pf->fd_sb_cnt_idx; + input->flow_type = fsp->flow_type; + input->ip4_proto = fsp->h_u.usr_ip4_spec.proto; + + /* Reverse the src and dest notion, since the HW expects them to be from + * Tx perspective where as the input from user is from Rx filter view. + */ + input->dst_port = fsp->h_u.tcp_ip4_spec.psrc; + input->src_port = fsp->h_u.tcp_ip4_spec.pdst; + input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; + input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + + if (ntohl(fsp->m_ext.data[1])) { + if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) { + netif_info(pf, drv, vsi->netdev, "Invalid VF id\n"); + goto free_input; + } + vf_id = ntohl(fsp->h_ext.data[1]); + /* Find vsi id from vf id and override dest vsi */ + input->dest_vsi = pf->vf[vf_id].lan_vsi_id; + if (input->q_index >= pf->vf[vf_id].num_queue_pairs) { + netif_info(pf, drv, vsi->netdev, "Invalid queue id\n"); + goto free_input; + } + } + + ret = i40e_add_del_fdir(vsi, input, true); +free_input: + if (ret) + kfree(input); + else + i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL); + + return ret; +} + +/** + * i40e_set_rxnfc - command to set RX flow classification rules + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * + * Returns Success if the command is supported. + **/ +static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = i40e_set_rss_hash_opt(pf, cmd); + break; + case ETHTOOL_SRXCLSRLINS: + ret = i40e_add_fdir_ethtool(vsi, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = i40e_del_fdir_entry(vsi, cmd); + break; + default: + break; + } + + return ret; +} + +/** + * i40e_max_channels - get Max number of combined channels supported + * @vsi: vsi pointer + **/ +static unsigned int i40e_max_channels(struct i40e_vsi *vsi) +{ + /* TODO: This code assumes DCB and FD is disabled for now. */ + return vsi->alloc_queue_pairs; +} + +/** + * i40e_get_channels - Get the current channels enabled and max supported etc. + * @netdev: network interface device structure + * @ch: ethtool channels structure + * + * We don't support separate tx and rx queues as channels. The other count + * represents how many queues are being used for control. max_combined counts + * how many queue pairs we can support. They may not be mapped 1 to 1 with + * q_vectors since we support a lot more queue pairs than q_vectors. + **/ +static void i40e_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + + /* report maximum channels */ + ch->max_combined = i40e_max_channels(vsi); + + /* report info for other vector */ + ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0; + ch->max_other = ch->other_count; + + /* Note: This code assumes DCB is disabled for now. */ + ch->combined_count = vsi->num_queue_pairs; +} + +/** + * i40e_set_channels - Set the new channels count. + * @netdev: network interface device structure + * @ch: ethtool channels structure + * + * The new channels count may not be the same as requested by the user + * since it gets rounded down to a power of 2 value. + **/ +static int i40e_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + unsigned int count = ch->combined_count; + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + int new_count; + + /* We do not support setting channels for any other VSI at present */ + if (vsi->type != I40E_VSI_MAIN) + return -EINVAL; + + /* verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* verify other_count has not changed */ + if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0)) + return -EINVAL; + + /* verify the number of channels does not exceed hardware limits */ + if (count > i40e_max_channels(vsi)) + return -EINVAL; + + /* update feature limits from largest to smallest supported values */ + /* TODO: Flow director limit, DCB etc */ + + /* use rss_reconfig to rebuild with new queue count and update traffic + * class queue mapping + */ + new_count = i40e_reconfig_rss_queues(pf, count); + if (new_count > 0) + return 0; + else + return -EINVAL; +} + +#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4) +/** + * i40e_get_rxfh_key_size - get the RSS hash key size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 i40e_get_rxfh_key_size(struct net_device *netdev) +{ + return I40E_HKEY_ARRAY_SIZE; +} + +/** + * i40e_get_rxfh_indir_size - get the rx flow hash indirection table size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 i40e_get_rxfh_indir_size(struct net_device *netdev) +{ + return I40E_HLUT_ARRAY_SIZE; +} + +static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 reg_val; + int i, j; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (!indir) + return 0; + + for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { + reg_val = rd32(hw, I40E_PFQF_HLUT(i)); + indir[j++] = reg_val & 0xff; + indir[j++] = (reg_val >> 8) & 0xff; + indir[j++] = (reg_val >> 16) & 0xff; + indir[j++] = (reg_val >> 24) & 0xff; + } + + if (key) { + for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) { + reg_val = rd32(hw, I40E_PFQF_HKEY(i)); + key[j++] = (u8)(reg_val & 0xff); + key[j++] = (u8)((reg_val >> 8) & 0xff); + key[j++] = (u8)((reg_val >> 16) & 0xff); + key[j++] = (u8)((reg_val >> 24) & 0xff); + } + } + return 0; +} + +/** + * i40e_set_rxfh - set the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * + * Returns -EINVAL if the table specifies an inavlid queue id, otherwise + * returns 0 after programming the table. + **/ +static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 reg_val; + int i, j; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (!indir) + return 0; + + for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { + reg_val = indir[j++]; + reg_val |= indir[j++] << 8; + reg_val |= indir[j++] << 16; + reg_val |= indir[j++] << 24; + wr32(hw, I40E_PFQF_HLUT(i), reg_val); + } + + if (key) { + for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) { + reg_val = key[j++]; + reg_val |= key[j++] << 8; + reg_val |= key[j++] << 16; + reg_val |= key[j++] << 24; + wr32(hw, I40E_PFQF_HKEY(i), reg_val); + } + } + return 0; +} + +/** + * i40e_get_priv_flags - report device private flags + * @dev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the i40e_priv_flags_strings + * array. + * + * Returns a u32 bitmap of flags. + **/ +static u32 i40e_get_priv_flags(struct net_device *dev) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u32 ret_flags = 0; + + ret_flags |= pf->hw.func_caps.npar_enable ? + I40E_PRIV_FLAGS_NPAR_FLAG : 0; + + return ret_flags; +} + +static const struct ethtool_ops i40e_ethtool_ops = { + .get_settings = i40e_get_settings, + .set_settings = i40e_set_settings, + .get_drvinfo = i40e_get_drvinfo, + .get_regs_len = i40e_get_regs_len, + .get_regs = i40e_get_regs, + .nway_reset = i40e_nway_reset, + .get_link = ethtool_op_get_link, + .get_wol = i40e_get_wol, + .set_wol = i40e_set_wol, + .set_eeprom = i40e_set_eeprom, + .get_eeprom_len = i40e_get_eeprom_len, + .get_eeprom = i40e_get_eeprom, + .get_ringparam = i40e_get_ringparam, + .set_ringparam = i40e_set_ringparam, + .get_pauseparam = i40e_get_pauseparam, + .set_pauseparam = i40e_set_pauseparam, + .get_msglevel = i40e_get_msglevel, + .set_msglevel = i40e_set_msglevel, + .get_rxnfc = i40e_get_rxnfc, + .set_rxnfc = i40e_set_rxnfc, + .self_test = i40e_diag_test, + .get_strings = i40e_get_strings, + .set_phys_id = i40e_set_phys_id, + .get_sset_count = i40e_get_sset_count, + .get_ethtool_stats = i40e_get_ethtool_stats, + .get_coalesce = i40e_get_coalesce, + .set_coalesce = i40e_set_coalesce, + .get_rxfh_key_size = i40e_get_rxfh_key_size, + .get_rxfh_indir_size = i40e_get_rxfh_indir_size, + .get_rxfh = i40e_get_rxfh, + .set_rxfh = i40e_set_rxfh, + .get_channels = i40e_get_channels, + .set_channels = i40e_set_channels, + .get_ts_info = i40e_get_ts_info, + .get_priv_flags = i40e_get_priv_flags, +}; + +void i40e_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &i40e_ethtool_ops; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c new file mode 100644 index 000000000..1803afeef --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -0,0 +1,1565 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i40e.h" +#include "i40e_fcoe.h" + +/** + * i40e_rx_is_fcoe - returns true if the rx packet type is FCoE + * @ptype: the packet type field from rx descriptor write-back + **/ +static inline bool i40e_rx_is_fcoe(u16 ptype) +{ + return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) && + (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER); +} + +/** + * i40e_fcoe_sof_is_class2 - returns true if this is a FC Class 2 SOF + * @sof: the FCoE start of frame delimiter + **/ +static inline bool i40e_fcoe_sof_is_class2(u8 sof) +{ + return (sof == FC_SOF_I2) || (sof == FC_SOF_N2); +} + +/** + * i40e_fcoe_sof_is_class3 - returns true if this is a FC Class 3 SOF + * @sof: the FCoE start of frame delimiter + **/ +static inline bool i40e_fcoe_sof_is_class3(u8 sof) +{ + return (sof == FC_SOF_I3) || (sof == FC_SOF_N3); +} + +/** + * i40e_fcoe_sof_is_supported - returns true if the FC SOF is supported by HW + * @sof: the input SOF value from the frame + **/ +static inline bool i40e_fcoe_sof_is_supported(u8 sof) +{ + return i40e_fcoe_sof_is_class2(sof) || + i40e_fcoe_sof_is_class3(sof); +} + +/** + * i40e_fcoe_fc_sof - pull the SOF from FCoE header in the frame + * @skb: the frame whose EOF is to be pulled from + **/ +static inline int i40e_fcoe_fc_sof(struct sk_buff *skb, u8 *sof) +{ + *sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; + + if (!i40e_fcoe_sof_is_supported(*sof)) + return -EINVAL; + return 0; +} + +/** + * i40e_fcoe_eof_is_supported - returns true if the EOF is supported by HW + * @eof: the input EOF value from the frame + **/ +static inline bool i40e_fcoe_eof_is_supported(u8 eof) +{ + return (eof == FC_EOF_N) || (eof == FC_EOF_T) || + (eof == FC_EOF_NI) || (eof == FC_EOF_A); +} + +/** + * i40e_fcoe_fc_eof - pull EOF from FCoE trailer in the frame + * @skb: the frame whose EOF is to be pulled from + **/ +static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof) +{ + /* the first byte of the last dword is EOF */ + skb_copy_bits(skb, skb->len - 4, eof, 1); + + if (!i40e_fcoe_eof_is_supported(*eof)) + return -EINVAL; + return 0; +} + +/** + * i40e_fcoe_ctxt_eof - convert input FC EOF for descriptor programming + * @eof: the input eof value from the frame + * + * The FC EOF is converted to the value understood by HW for descriptor + * programming. Never call this w/o calling i40e_fcoe_eof_is_supported() + * first. + **/ +static inline u32 i40e_fcoe_ctxt_eof(u8 eof) +{ + switch (eof) { + case FC_EOF_N: + return I40E_TX_DESC_CMD_L4T_EOFT_EOF_N; + case FC_EOF_T: + return I40E_TX_DESC_CMD_L4T_EOFT_EOF_T; + case FC_EOF_NI: + return I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI; + case FC_EOF_A: + return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A; + default: + /* FIXME: still returns 0 */ + pr_err("Unrecognized EOF %x\n", eof); + return 0; + } +} + +/** + * i40e_fcoe_xid_is_valid - returns true if the exchange id is valid + * @xid: the exchange id + **/ +static inline bool i40e_fcoe_xid_is_valid(u16 xid) +{ + return (xid != FC_XID_UNKNOWN) && (xid < I40E_FCOE_DDP_MAX); +} + +/** + * i40e_fcoe_ddp_unmap - unmap the mapped sglist associated + * @pf: pointer to PF + * @ddp: sw DDP context + * + * Unmap the scatter-gather list associated with the given SW DDP context + * + * Returns: data length already ddp-ed in bytes + * + **/ +static inline void i40e_fcoe_ddp_unmap(struct i40e_pf *pf, + struct i40e_fcoe_ddp *ddp) +{ + if (test_and_set_bit(__I40E_FCOE_DDP_UNMAPPED, &ddp->flags)) + return; + + if (ddp->sgl) { + dma_unmap_sg(&pf->pdev->dev, ddp->sgl, ddp->sgc, + DMA_FROM_DEVICE); + ddp->sgl = NULL; + ddp->sgc = 0; + } + + if (ddp->pool) { + dma_pool_free(ddp->pool, ddp->udl, ddp->udp); + ddp->pool = NULL; + } +} + +/** + * i40e_fcoe_ddp_clear - clear the given SW DDP context + * @ddp - SW DDP context + **/ +static inline void i40e_fcoe_ddp_clear(struct i40e_fcoe_ddp *ddp) +{ + memset(ddp, 0, sizeof(struct i40e_fcoe_ddp)); + ddp->xid = FC_XID_UNKNOWN; + ddp->flags = __I40E_FCOE_DDP_NONE; +} + +/** + * i40e_fcoe_progid_is_fcoe - check if the prog_id is for FCoE + * @id: the prog id for the programming status Rx descriptor write-back + **/ +static inline bool i40e_fcoe_progid_is_fcoe(u8 id) +{ + return (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) || + (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS); +} + +/** + * i40e_fcoe_fc_get_xid - get xid from the frame header + * @fh: the fc frame header + * + * In case the incoming frame's exchange is originated from + * the initiator, then received frame's exchange id is ANDed + * with fc_cpu_mask bits to get the same cpu on which exchange + * was originated, otherwise just use the current cpu. + * + * Returns ox_id if exchange originator, rx_id if responder + **/ +static inline u16 i40e_fcoe_fc_get_xid(struct fc_frame_header *fh) +{ + u32 f_ctl = ntoh24(fh->fh_f_ctl); + + return (f_ctl & FC_FC_EX_CTX) ? + be16_to_cpu(fh->fh_ox_id) : + be16_to_cpu(fh->fh_rx_id); +} + +/** + * i40e_fcoe_fc_frame_header - get fc frame header from skb + * @skb: packet + * + * This checks if there is a VLAN header and returns the data + * pointer to the start of the fc_frame_header. + * + * Returns pointer to the fc_frame_header + **/ +static inline struct fc_frame_header *i40e_fcoe_fc_frame_header( + struct sk_buff *skb) +{ + void *fh = skb->data + sizeof(struct fcoe_hdr); + + if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) + fh += sizeof(struct vlan_hdr); + + return (struct fc_frame_header *)fh; +} + +/** + * i40e_fcoe_ddp_put - release the DDP context for a given exchange id + * @netdev: the corresponding net_device + * @xid: the exchange id that corresponding DDP context will be released + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_done + * and it is expected to be called by ULD, i.e., FCP layer of libfc + * to release the corresponding ddp context when the I/O is done. + * + * Returns : data length already ddp-ed in bytes + **/ +static int i40e_fcoe_ddp_put(struct net_device *netdev, u16 xid) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_fcoe *fcoe = &pf->fcoe; + int len = 0; + struct i40e_fcoe_ddp *ddp = &fcoe->ddp[xid]; + + if (!fcoe || !ddp) + goto out; + + if (test_bit(__I40E_FCOE_DDP_DONE, &ddp->flags)) + len = ddp->len; + i40e_fcoe_ddp_unmap(pf, ddp); +out: + return len; +} + +/** + * i40e_fcoe_sw_init - sets up the HW for FCoE + * @pf: pointer to PF + * + * Returns 0 if FCoE is supported otherwise the error code + **/ +int i40e_init_pf_fcoe(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + u32 val; + + pf->flags &= ~I40E_FLAG_FCOE_ENABLED; + pf->num_fcoe_qps = 0; + pf->fcoe_hmc_cntx_num = 0; + pf->fcoe_hmc_filt_num = 0; + + if (!pf->hw.func_caps.fcoe) { + dev_info(&pf->pdev->dev, "FCoE capability is disabled\n"); + return 0; + } + + if (!pf->hw.func_caps.dcb) { + dev_warn(&pf->pdev->dev, + "Hardware is not DCB capable not enabling FCoE.\n"); + return 0; + } + + /* enable FCoE hash filter */ + val = rd32(hw, I40E_PFQF_HENA(1)); + val |= 1 << (I40E_FILTER_PCTYPE_FCOE_OX - 32); + val |= 1 << (I40E_FILTER_PCTYPE_FCOE_RX - 32); + val &= I40E_PFQF_HENA_PTYPE_ENA_MASK; + wr32(hw, I40E_PFQF_HENA(1), val); + + /* enable flag */ + pf->flags |= I40E_FLAG_FCOE_ENABLED; + pf->num_fcoe_qps = I40E_DEFAULT_FCOE; + + /* Reserve 4K DDP contexts and 20K filter size for FCoE */ + pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) * + I40E_DMA_CNTX_BASE_SIZE; + pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num + + (1 << I40E_HASH_FILTER_SIZE_16K) * + I40E_HASH_FILTER_BASE_SIZE; + + /* FCoE object: max 16K filter buckets and 4K DMA contexts */ + pf->filter_settings.fcoe_filt_num = I40E_HASH_FILTER_SIZE_16K; + pf->filter_settings.fcoe_cntx_num = I40E_DMA_CNTX_SIZE_4K; + + /* Setup max frame with FCoE_MTU plus L2 overheads */ + val = rd32(hw, I40E_GLFCOE_RCTL); + val &= ~I40E_GLFCOE_RCTL_MAX_SIZE_MASK; + val |= ((FCOE_MTU + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) + << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT); + wr32(hw, I40E_GLFCOE_RCTL, val); + + dev_info(&pf->pdev->dev, "FCoE is supported.\n"); + return 0; +} + +/** + * i40e_get_fcoe_tc_map - Return TC map for FCoE APP + * @pf: pointer to PF + * + **/ +u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf) +{ + struct i40e_dcb_app_priority_table app; + struct i40e_hw *hw = &pf->hw; + u8 enabled_tc = 0; + u8 tc, i; + /* Get the FCoE APP TLV */ + struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; + + for (i = 0; i < dcbcfg->numapps; i++) { + app = dcbcfg->app[i]; + if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app.protocolid == ETH_P_FCOE) { + tc = dcbcfg->etscfg.prioritytable[app.priority]; + enabled_tc |= (1 << tc); + break; + } + } + + /* TC0 if there is no TC defined for FCoE APP TLV */ + enabled_tc = enabled_tc ? enabled_tc : 0x1; + + return enabled_tc; +} + +/** + * i40e_fcoe_vsi_init - prepares the VSI context for creating a FCoE VSI + * @vsi: pointer to the associated VSI struct + * @ctxt: pointer to the associated VSI context to be passed to HW + * + * Returns 0 on success or < 0 on error + **/ +int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt) +{ + struct i40e_aqc_vsi_properties_data *info = &ctxt->info; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u8 enabled_tc = 0; + + if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) { + dev_err(&pf->pdev->dev, + "FCoE is not enabled for this device\n"); + return -EPERM; + } + + /* initialize the hardware for FCoE */ + ctxt->pf_num = hw->pf_id; + ctxt->vf_num = 0; + ctxt->uplink_seid = vsi->uplink_seid; + ctxt->connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; + ctxt->flags = I40E_AQ_VSI_TYPE_PF; + + /* FCoE VSI would need the following sections */ + info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); + + /* FCoE VSI does not need these sections */ + info->valid_sections &= cpu_to_le16(~(I40E_AQ_VSI_PROP_SECURITY_VALID | + I40E_AQ_VSI_PROP_VLAN_VALID | + I40E_AQ_VSI_PROP_CAS_PV_VALID | + I40E_AQ_VSI_PROP_INGRESS_UP_VALID | + I40E_AQ_VSI_PROP_EGRESS_UP_VALID)); + + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + info->valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + info->switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } + enabled_tc = i40e_get_fcoe_tc_map(pf); + i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true); + + /* set up queue option section: only enable FCoE */ + info->queueing_opt_flags = I40E_AQ_VSI_QUE_OPT_FCOE_ENA; + + return 0; +} + +/** + * i40e_fcoe_enable - this is the implementation of ndo_fcoe_enable, + * indicating the upper FCoE protocol stack is ready to use FCoE + * offload features. + * + * @netdev: pointer to the netdev that FCoE is created on + * + * Returns 0 on success + * + * in RTNL + * + **/ +int i40e_fcoe_enable(struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_fcoe *fcoe = &pf->fcoe; + + if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) { + netdev_err(netdev, "HW does not support FCoE.\n"); + return -ENODEV; + } + + if (vsi->type != I40E_VSI_FCOE) { + netdev_err(netdev, "interface does not support FCoE.\n"); + return -EBUSY; + } + + atomic_inc(&fcoe->refcnt); + + return 0; +} + +/** + * i40e_fcoe_disable- disables FCoE for upper FCoE protocol stack. + * @dev: pointer to the netdev that FCoE is created on + * + * Returns 0 on success + * + **/ +int i40e_fcoe_disable(struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_fcoe *fcoe = &pf->fcoe; + + if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) { + netdev_err(netdev, "device does not support FCoE\n"); + return -ENODEV; + } + if (vsi->type != I40E_VSI_FCOE) + return -EBUSY; + + if (!atomic_dec_and_test(&fcoe->refcnt)) + return -EINVAL; + + netdev_info(netdev, "FCoE disabled\n"); + + return 0; +} + +/** + * i40e_fcoe_dma_pool_free - free the per cpu pool for FCoE DDP + * @fcoe: the FCoE sw object + * @dev: the device that the pool is associated with + * @cpu: the cpu for this pool + * + **/ +static void i40e_fcoe_dma_pool_free(struct i40e_fcoe *fcoe, + struct device *dev, + unsigned int cpu) +{ + struct i40e_fcoe_ddp_pool *ddp_pool; + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + if (!ddp_pool->pool) { + dev_warn(dev, "DDP pool already freed for cpu %d\n", cpu); + return; + } + dma_pool_destroy(ddp_pool->pool); + ddp_pool->pool = NULL; +} + +/** + * i40e_fcoe_dma_pool_create - per cpu pool for FCoE DDP + * @fcoe: the FCoE sw object + * @dev: the device that the pool is associated with + * @cpu: the cpu for this pool + * + * Returns 0 on successful or non zero on failure + * + **/ +static int i40e_fcoe_dma_pool_create(struct i40e_fcoe *fcoe, + struct device *dev, + unsigned int cpu) +{ + struct i40e_fcoe_ddp_pool *ddp_pool; + struct dma_pool *pool; + char pool_name[32]; + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + if (ddp_pool && ddp_pool->pool) { + dev_warn(dev, "DDP pool already allocated for cpu %d\n", cpu); + return 0; + } + snprintf(pool_name, sizeof(pool_name), "i40e_fcoe_ddp_%d", cpu); + pool = dma_pool_create(pool_name, dev, I40E_FCOE_DDP_PTR_MAX, + I40E_FCOE_DDP_PTR_ALIGN, PAGE_SIZE); + if (!pool) { + dev_err(dev, "dma_pool_create %s failed\n", pool_name); + return -ENOMEM; + } + ddp_pool->pool = pool; + return 0; +} + +/** + * i40e_fcoe_free_ddp_resources - release FCoE DDP resources + * @vsi: the vsi FCoE is associated with + * + **/ +void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_fcoe *fcoe = &pf->fcoe; + int cpu, i; + + /* do nothing if not FCoE VSI */ + if (vsi->type != I40E_VSI_FCOE) + return; + + /* do nothing if no DDP pools were allocated */ + if (!fcoe->ddp_pool) + return; + + for (i = 0; i < I40E_FCOE_DDP_MAX; i++) + i40e_fcoe_ddp_put(vsi->netdev, i); + + for_each_possible_cpu(cpu) + i40e_fcoe_dma_pool_free(fcoe, &pf->pdev->dev, cpu); + + free_percpu(fcoe->ddp_pool); + fcoe->ddp_pool = NULL; + + netdev_info(vsi->netdev, "VSI %d,%d FCoE DDP resources released\n", + vsi->id, vsi->seid); +} + +/** + * i40e_fcoe_setup_ddp_resources - allocate per cpu DDP resources + * @vsi: the VSI FCoE is associated with + * + * Returns 0 on successful or non zero on failure + * + **/ +int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + struct device *dev = &pf->pdev->dev; + struct i40e_fcoe *fcoe = &pf->fcoe; + unsigned int cpu; + int i; + + if (vsi->type != I40E_VSI_FCOE) + return -ENODEV; + + /* do nothing if no DDP pools were allocated */ + if (fcoe->ddp_pool) + return -EEXIST; + + /* allocate per CPU memory to track DDP pools */ + fcoe->ddp_pool = alloc_percpu(struct i40e_fcoe_ddp_pool); + if (!fcoe->ddp_pool) { + dev_err(&pf->pdev->dev, "failed to allocate percpu DDP\n"); + return -ENOMEM; + } + + /* allocate pci pool for each cpu */ + for_each_possible_cpu(cpu) { + if (!i40e_fcoe_dma_pool_create(fcoe, dev, cpu)) + continue; + + dev_err(dev, "failed to alloc DDP pool on cpu:%d\n", cpu); + i40e_fcoe_free_ddp_resources(vsi); + return -ENOMEM; + } + + /* initialize the sw context */ + for (i = 0; i < I40E_FCOE_DDP_MAX; i++) + i40e_fcoe_ddp_clear(&fcoe->ddp[i]); + + netdev_info(vsi->netdev, "VSI %d,%d FCoE DDP resources allocated\n", + vsi->id, vsi->seid); + + return 0; +} + +/** + * i40e_fcoe_handle_status - check the Programming Status for FCoE + * @rx_ring: the Rx ring for this descriptor + * @rx_desc: the Rx descriptor for Programming Status, not a packet descriptor. + * + * Check if this is the Rx Programming Status descriptor write-back for FCoE. + * This is used to verify if the context/filter programming or invalidation + * requested by SW to the HW is successful or not and take actions accordingly. + **/ +void i40e_fcoe_handle_status(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, u8 prog_id) +{ + struct i40e_pf *pf = rx_ring->vsi->back; + struct i40e_fcoe *fcoe = &pf->fcoe; + struct i40e_fcoe_ddp *ddp; + u32 error; + u16 xid; + u64 qw; + + /* we only care for FCoE here */ + if (!i40e_fcoe_progid_is_fcoe(prog_id)) + return; + + xid = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fcoe_param) & + (I40E_FCOE_DDP_MAX - 1); + + if (!i40e_fcoe_xid_is_valid(xid)) + return; + + ddp = &fcoe->ddp[xid]; + WARN_ON(xid != ddp->xid); + + qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> + I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; + + /* DDP context programming status: failure or success */ + if (prog_id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) { + if (I40E_RX_PROG_FCOE_ERROR_TBL_FULL(error)) { + dev_err(&pf->pdev->dev, "xid %x ddp->xid %x TABLE FULL\n", + xid, ddp->xid); + ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT; + } + if (I40E_RX_PROG_FCOE_ERROR_CONFLICT(error)) { + dev_err(&pf->pdev->dev, "xid %x ddp->xid %x CONFLICT\n", + xid, ddp->xid); + ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT; + } + } + + /* DDP context invalidation status: failure or success */ + if (prog_id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS) { + if (I40E_RX_PROG_FCOE_ERROR_INVLFAIL(error)) { + dev_err(&pf->pdev->dev, "xid %x ddp->xid %x INVALIDATION FAILURE\n", + xid, ddp->xid); + ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_INVLFAIL_BIT; + } + /* clear the flag so we can retry invalidation */ + clear_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags); + } + + /* unmap DMA */ + i40e_fcoe_ddp_unmap(pf, ddp); + i40e_fcoe_ddp_clear(ddp); +} + +/** + * i40e_fcoe_handle_offload - check ddp status and mark it done + * @adapter: i40e adapter + * @rx_desc: advanced rx descriptor + * @skb: the skb holding the received data + * + * This checks ddp status. + * + * Returns : < 0 indicates an error or not a FCOE ddp, 0 indicates + * not passing the skb to ULD, > 0 indicates is the length of data + * being ddped. + * + **/ +int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct i40e_pf *pf = rx_ring->vsi->back; + struct i40e_fcoe *fcoe = &pf->fcoe; + struct fc_frame_header *fh = NULL; + struct i40e_fcoe_ddp *ddp = NULL; + u32 status, fltstat; + u32 error, fcerr; + int rc = -EINVAL; + u16 ptype; + u16 xid; + u64 qw; + + /* check this rxd is for programming status */ + qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + /* packet descriptor, check packet type */ + ptype = (qw & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; + if (!i40e_rx_is_fcoe(ptype)) + goto out_no_ddp; + + error = (qw & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT; + fcerr = (error >> I40E_RX_DESC_ERROR_L3L4E_SHIFT) & + I40E_RX_DESC_FCOE_ERROR_MASK; + + /* check stateless offload error */ + if (unlikely(fcerr == I40E_RX_DESC_ERROR_L3L4E_PROT)) { + dev_err(&pf->pdev->dev, "Protocol Error\n"); + skb->ip_summed = CHECKSUM_NONE; + } else { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + /* check hw status on ddp */ + status = (qw & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; + fltstat = (status >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) & + I40E_RX_DESC_FLTSTAT_FCMASK; + + /* now we are ready to check DDP */ + fh = i40e_fcoe_fc_frame_header(skb); + xid = i40e_fcoe_fc_get_xid(fh); + if (!i40e_fcoe_xid_is_valid(xid)) + goto out_no_ddp; + + /* non DDP normal receive, return to the protocol stack */ + if (fltstat == I40E_RX_DESC_FLTSTAT_NOMTCH) + goto out_no_ddp; + + /* do we have a sw ddp context setup ? */ + ddp = &fcoe->ddp[xid]; + if (!ddp->sgl) + goto out_no_ddp; + + /* fetch xid from hw rxd wb, which should match up the sw ctxt */ + xid = le16_to_cpu(rx_desc->wb.qword0.lo_dword.mirr_fcoe.fcoe_ctx_id); + if (ddp->xid != xid) { + dev_err(&pf->pdev->dev, "xid 0x%x does not match ctx_xid 0x%x\n", + ddp->xid, xid); + goto out_put_ddp; + } + + /* the same exchange has already errored out */ + if (ddp->fcerr) { + dev_err(&pf->pdev->dev, "xid 0x%x fcerr 0x%x reported fcer 0x%x\n", + xid, ddp->fcerr, fcerr); + goto out_put_ddp; + } + + /* fcoe param is valid by now with correct DDPed length */ + ddp->len = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fcoe_param); + ddp->fcerr = fcerr; + /* header posting only, useful only for target mode and debugging */ + if (fltstat == I40E_RX_DESC_FLTSTAT_DDP) { + /* For target mode, we get header of the last packet but it + * does not have the FCoE trailer field, i.e., CRC and EOF + * Ordered Set since they are offloaded by the HW, so fill + * it up correspondingly to allow the packet to pass through + * to the upper protocol stack. + */ + u32 f_ctl = ntoh24(fh->fh_f_ctl); + + if ((f_ctl & FC_FC_END_SEQ) && + (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA)) { + struct fcoe_crc_eof *crc = NULL; + + crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); + crc->fcoe_eof = FC_EOF_T; + } else { + /* otherwise, drop the header only frame */ + rc = 0; + goto out_no_ddp; + } + } + +out_put_ddp: + /* either we got RSP or we have an error, unmap DMA in both cases */ + i40e_fcoe_ddp_unmap(pf, ddp); + if (ddp->len && !ddp->fcerr) { + int pkts; + + rc = ddp->len; + i40e_fcoe_ddp_clear(ddp); + ddp->len = rc; + pkts = DIV_ROUND_UP(rc, 2048); + rx_ring->stats.bytes += rc; + rx_ring->stats.packets += pkts; + rx_ring->q_vector->rx.total_bytes += rc; + rx_ring->q_vector->rx.total_packets += pkts; + set_bit(__I40E_FCOE_DDP_DONE, &ddp->flags); + } + +out_no_ddp: + return rc; +} + +/** + * i40e_fcoe_ddp_setup - called to set up ddp context + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * @target_mode: indicates this is a DDP request for target + * + * Returns : 1 for success and 0 for no DDP on this I/O + **/ +static int i40e_fcoe_ddp_setup(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc, + int target_mode) +{ + static const unsigned int bufflen = I40E_FCOE_DDP_BUF_MIN; + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_fcoe_ddp_pool *ddp_pool; + struct i40e_pf *pf = np->vsi->back; + struct i40e_fcoe *fcoe = &pf->fcoe; + unsigned int i, j, dmacount; + struct i40e_fcoe_ddp *ddp; + unsigned int firstoff = 0; + unsigned int thisoff = 0; + unsigned int thislen = 0; + struct scatterlist *sg; + dma_addr_t addr = 0; + unsigned int len; + + if (xid >= I40E_FCOE_DDP_MAX) { + dev_warn(&pf->pdev->dev, "xid=0x%x out-of-range\n", xid); + return 0; + } + + /* no DDP if we are already down or resetting */ + if (test_bit(__I40E_DOWN, &pf->state) || + test_bit(__I40E_NEEDS_RESTART, &pf->state)) { + dev_info(&pf->pdev->dev, "xid=0x%x device in reset/down\n", + xid); + return 0; + } + + ddp = &fcoe->ddp[xid]; + if (ddp->sgl) { + dev_info(&pf->pdev->dev, "xid 0x%x w/ non-null sgl=%p nents=%d\n", + xid, ddp->sgl, ddp->sgc); + return 0; + } + i40e_fcoe_ddp_clear(ddp); + + if (!fcoe->ddp_pool) { + dev_info(&pf->pdev->dev, "No DDP pool, xid 0x%x\n", xid); + return 0; + } + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); + if (!ddp_pool->pool) { + dev_info(&pf->pdev->dev, "No percpu ddp pool, xid 0x%x\n", xid); + goto out_noddp; + } + + /* setup dma from scsi command sgl */ + dmacount = dma_map_sg(&pf->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); + if (dmacount == 0) { + dev_info(&pf->pdev->dev, "dma_map_sg for sgl %p, sgc %d failed\n", + sgl, sgc); + goto out_noddp_unmap; + } + + /* alloc the udl from our ddp pool */ + ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); + if (!ddp->udl) { + dev_info(&pf->pdev->dev, + "Failed allocated ddp context, xid 0x%x\n", xid); + goto out_noddp_unmap; + } + + j = 0; + ddp->len = 0; + for_each_sg(sgl, sg, dmacount, i) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + ddp->len += len; + while (len) { + /* max number of buffers allowed in one DDP context */ + if (j >= I40E_FCOE_DDP_BUFFCNT_MAX) { + dev_info(&pf->pdev->dev, + "xid=%x:%d,%d,%d:addr=%llx not enough descriptors\n", + xid, i, j, dmacount, (u64)addr); + goto out_noddp_free; + } + + /* get the offset of length of current buffer */ + thisoff = addr & ((dma_addr_t)bufflen - 1); + thislen = min_t(unsigned int, (bufflen - thisoff), len); + /* all but the 1st buffer (j == 0) + * must be aligned on bufflen + */ + if ((j != 0) && (thisoff)) + goto out_noddp_free; + + /* all but the last buffer + * ((i == (dmacount - 1)) && (thislen == len)) + * must end at bufflen + */ + if (((i != (dmacount - 1)) || (thislen != len)) && + ((thislen + thisoff) != bufflen)) + goto out_noddp_free; + + ddp->udl[j] = (u64)(addr - thisoff); + /* only the first buffer may have none-zero offset */ + if (j == 0) + firstoff = thisoff; + len -= thislen; + addr += thislen; + j++; + } + } + /* only the last buffer may have non-full bufflen */ + ddp->lastsize = thisoff + thislen; + ddp->firstoff = firstoff; + ddp->list_len = j; + ddp->pool = ddp_pool->pool; + ddp->sgl = sgl; + ddp->sgc = sgc; + ddp->xid = xid; + if (target_mode) + set_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags); + set_bit(__I40E_FCOE_DDP_INITALIZED, &ddp->flags); + + put_cpu(); + return 1; /* Success */ + +out_noddp_free: + dma_pool_free(ddp->pool, ddp->udl, ddp->udp); + i40e_fcoe_ddp_clear(ddp); + +out_noddp_unmap: + dma_unmap_sg(&pf->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); +out_noddp: + put_cpu(); + return 0; +} + +/** + * i40e_fcoe_ddp_get - called to set up ddp context in initiator mode + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup + * and is expected to be called from ULD, e.g., FCP layer of libfc + * to set up ddp for the corresponding xid of the given sglist for + * the corresponding I/O. + * + * Returns : 1 for success and 0 for no ddp + **/ +static int i40e_fcoe_ddp_get(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + return i40e_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); +} + +/** + * i40e_fcoe_ddp_target - called to set up ddp context in target mode + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_target + * and is expected to be called from ULD, e.g., FCP layer of libfc + * to set up ddp for the corresponding xid of the given sglist for + * the corresponding I/O. The DDP in target mode is a write I/O request + * from the initiator. + * + * Returns : 1 for success and 0 for no ddp + **/ +static int i40e_fcoe_ddp_target(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + return i40e_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); +} + +/** + * i40e_fcoe_program_ddp - programs the HW DDP related descriptors + * @tx_ring: transmit ring for this packet + * @skb: the packet to be sent out + * @sof: the SOF to indicate class of service + * + * Determine if it is READ/WRITE command, and finds out if there is + * a matching SW DDP context for this command. DDP is applicable + * only in case of READ if initiator or WRITE in case of + * responder (via checking XFER_RDY). + * + * Note: caller checks sof and ddp sw context + * + * Returns : none + * + **/ +static void i40e_fcoe_program_ddp(struct i40e_ring *tx_ring, + struct sk_buff *skb, + struct i40e_fcoe_ddp *ddp, u8 sof) +{ + struct i40e_fcoe_filter_context_desc *filter_desc = NULL; + struct i40e_fcoe_queue_context_desc *queue_desc = NULL; + struct i40e_fcoe_ddp_context_desc *ddp_desc = NULL; + struct i40e_pf *pf = tx_ring->vsi->back; + u16 i = tx_ring->next_to_use; + struct fc_frame_header *fh; + u64 flags_rsvd_lanq = 0; + bool target_mode; + + /* check if abort is still pending */ + if (test_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags)) { + dev_warn(&pf->pdev->dev, + "DDP abort is still pending xid:%hx and ddp->flags:%lx:\n", + ddp->xid, ddp->flags); + return; + } + + /* set the flag to indicate this is programmed */ + if (test_and_set_bit(__I40E_FCOE_DDP_PROGRAMMED, &ddp->flags)) { + dev_warn(&pf->pdev->dev, + "DDP is already programmed for xid:%hx and ddp->flags:%lx:\n", + ddp->xid, ddp->flags); + return; + } + + /* Prepare the DDP context descriptor */ + ddp_desc = I40E_DDP_CONTEXT_DESC(tx_ring, i); + i++; + if (i == tx_ring->count) + i = 0; + + ddp_desc->type_cmd_foff_lsize = + cpu_to_le64(I40E_TX_DESC_DTYPE_DDP_CTX | + ((u64)I40E_FCOE_DDP_CTX_DESC_BSIZE_4K << + I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT) | + ((u64)ddp->firstoff << + I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT) | + ((u64)ddp->lastsize << + I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT)); + ddp_desc->rsvd = cpu_to_le64(0); + + /* target mode needs last packet in the sequence */ + target_mode = test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags); + if (target_mode) + ddp_desc->type_cmd_foff_lsize |= + cpu_to_le64(I40E_FCOE_DDP_CTX_DESC_LASTSEQH); + + /* Prepare queue_context descriptor */ + queue_desc = I40E_QUEUE_CONTEXT_DESC(tx_ring, i++); + if (i == tx_ring->count) + i = 0; + queue_desc->dmaindx_fbase = cpu_to_le64(ddp->xid | ((u64)ddp->udp)); + queue_desc->flen_tph = cpu_to_le64(ddp->list_len | + ((u64)(I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC | + I40E_FCOE_QUEUE_CTX_DESC_TPHDATA) << + I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT)); + + /* Prepare filter_context_desc */ + filter_desc = I40E_FILTER_CONTEXT_DESC(tx_ring, i); + i++; + if (i == tx_ring->count) + i = 0; + + fh = (struct fc_frame_header *)skb_transport_header(skb); + filter_desc->param = cpu_to_le32(ntohl(fh->fh_parm_offset)); + filter_desc->seqn = cpu_to_le16(ntohs(fh->fh_seq_cnt)); + filter_desc->rsvd_dmaindx = cpu_to_le16(ddp->xid << + I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT); + + flags_rsvd_lanq = I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP; + flags_rsvd_lanq |= (u64)(target_mode ? + I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP : + I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT); + + flags_rsvd_lanq |= (u64)((sof == FC_SOF_I2 || sof == FC_SOF_N2) ? + I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 : + I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3); + + flags_rsvd_lanq |= ((u64)skb->queue_mapping << + I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT); + filter_desc->flags_rsvd_lanq = cpu_to_le64(flags_rsvd_lanq); + + /* By this time, all offload related descriptors has been programmed */ + tx_ring->next_to_use = i; +} + +/** + * i40e_fcoe_invalidate_ddp - invalidates DDP in case of abort + * @tx_ring: transmit ring for this packet + * @skb: the packet associated w/ this DDP invalidation, i.e., ABTS + * @ddp: the SW DDP context for this DDP + * + * Programs the Tx context descriptor to do DDP invalidation. + **/ +static void i40e_fcoe_invalidate_ddp(struct i40e_ring *tx_ring, + struct sk_buff *skb, + struct i40e_fcoe_ddp *ddp) +{ + struct i40e_tx_context_desc *context_desc; + int i; + + if (test_and_set_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags)) + return; + + i = tx_ring->next_to_use; + context_desc = I40E_TX_CTXTDESC(tx_ring, i); + i++; + if (i == tx_ring->count) + i = 0; + + context_desc->tunneling_params = cpu_to_le32(0); + context_desc->l2tag2 = cpu_to_le16(0); + context_desc->rsvd = cpu_to_le16(0); + context_desc->type_cmd_tso_mss = cpu_to_le64( + I40E_TX_DESC_DTYPE_FCOE_CTX | + (I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL << + I40E_TXD_CTX_QW1_CMD_SHIFT) | + (I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND << + I40E_TXD_CTX_QW1_CMD_SHIFT)); + tx_ring->next_to_use = i; +} + +/** + * i40e_fcoe_handle_ddp - check we should setup or invalidate DDP + * @tx_ring: transmit ring for this packet + * @skb: the packet to be sent out + * @sof: the SOF to indicate class of service + * + * Determine if it is ABTS/READ/XFER_RDY, and finds out if there is + * a matching SW DDP context for this command. DDP is applicable + * only in case of READ if initiator or WRITE in case of + * responder (via checking XFER_RDY). In case this is an ABTS, send + * just invalidate the context. + **/ +static void i40e_fcoe_handle_ddp(struct i40e_ring *tx_ring, + struct sk_buff *skb, u8 sof) +{ + struct i40e_pf *pf = tx_ring->vsi->back; + struct i40e_fcoe *fcoe = &pf->fcoe; + struct fc_frame_header *fh; + struct i40e_fcoe_ddp *ddp; + u32 f_ctl; + u8 r_ctl; + u16 xid; + + fh = (struct fc_frame_header *)skb_transport_header(skb); + f_ctl = ntoh24(fh->fh_f_ctl); + r_ctl = fh->fh_r_ctl; + ddp = NULL; + + if ((r_ctl == FC_RCTL_DD_DATA_DESC) && (f_ctl & FC_FC_EX_CTX)) { + /* exchange responder? if so, XFER_RDY for write */ + xid = ntohs(fh->fh_rx_id); + if (i40e_fcoe_xid_is_valid(xid)) { + ddp = &fcoe->ddp[xid]; + if ((ddp->xid == xid) && + (test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags))) + i40e_fcoe_program_ddp(tx_ring, skb, ddp, sof); + } + } else if (r_ctl == FC_RCTL_DD_UNSOL_CMD) { + /* exchange originator, check READ cmd */ + xid = ntohs(fh->fh_ox_id); + if (i40e_fcoe_xid_is_valid(xid)) { + ddp = &fcoe->ddp[xid]; + if ((ddp->xid == xid) && + (!test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags))) + i40e_fcoe_program_ddp(tx_ring, skb, ddp, sof); + } + } else if (r_ctl == FC_RCTL_BA_ABTS) { + /* exchange originator, check ABTS */ + xid = ntohs(fh->fh_ox_id); + if (i40e_fcoe_xid_is_valid(xid)) { + ddp = &fcoe->ddp[xid]; + if ((ddp->xid == xid) && + (!test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags))) + i40e_fcoe_invalidate_ddp(tx_ring, skb, ddp); + } + } +} + +/** + * i40e_fcoe_tso - set up FCoE TSO + * @tx_ring: ring to send buffer on + * @skb: send buffer + * @tx_flags: collected send information + * @hdr_len: the tso header length + * @sof: the SOF to indicate class of service + * + * Note must already have sof checked to be either class 2 or class 3 before + * calling this function. + * + * Returns 1 to indicate sequence segmentation offload is properly setup + * or returns 0 to indicate no tso is needed, otherwise returns error + * code to drop the frame. + **/ +static int i40e_fcoe_tso(struct i40e_ring *tx_ring, + struct sk_buff *skb, + u32 tx_flags, u8 *hdr_len, u8 sof) +{ + struct i40e_tx_context_desc *context_desc; + u32 cd_type, cd_cmd, cd_tso_len, cd_mss; + struct fc_frame_header *fh; + u64 cd_type_cmd_tso_mss; + + /* must match gso type as FCoE */ + if (!skb_is_gso(skb)) + return 0; + + /* is it the expected gso type for FCoE ?*/ + if (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE) { + netdev_err(skb->dev, + "wrong gso type %d:expecting SKB_GSO_FCOE\n", + skb_shinfo(skb)->gso_type); + return -EINVAL; + } + + /* header and trailer are inserted by hw */ + *hdr_len = skb_transport_offset(skb) + sizeof(struct fc_frame_header) + + sizeof(struct fcoe_crc_eof); + + /* check sof to decide a class 2 or 3 TSO */ + if (likely(i40e_fcoe_sof_is_class3(sof))) + cd_cmd = I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3; + else + cd_cmd = I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2; + + /* param field valid? */ + fh = (struct fc_frame_header *)skb_transport_header(skb); + if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) + cd_cmd |= I40E_FCOE_TX_CTX_DESC_RELOFF; + + /* fill the field values */ + cd_type = I40E_TX_DESC_DTYPE_FCOE_CTX; + cd_tso_len = skb->len - *hdr_len; + cd_mss = skb_shinfo(skb)->gso_size; + cd_type_cmd_tso_mss = + ((u64)cd_type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) | + ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | + ((u64)cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | + ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); + + /* grab the next descriptor */ + context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use); + tx_ring->next_to_use++; + if (tx_ring->next_to_use == tx_ring->count) + tx_ring->next_to_use = 0; + + context_desc->tunneling_params = 0; + context_desc->l2tag2 = cpu_to_le16((tx_flags & I40E_TX_FLAGS_VLAN_MASK) + >> I40E_TX_FLAGS_VLAN_SHIFT); + context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); + + return 1; +} + +/** + * i40e_fcoe_tx_map - build the tx descriptor + * @tx_ring: ring to send buffer on + * @skb: send buffer + * @first: first buffer info buffer to use + * @tx_flags: collected send information + * @hdr_len: ptr to the size of the packet header + * @eof: the frame eof value + * + * Note, for FCoE, sof and eof are already checked + **/ +static void i40e_fcoe_tx_map(struct i40e_ring *tx_ring, + struct sk_buff *skb, + struct i40e_tx_buffer *first, + u32 tx_flags, u8 hdr_len, u8 eof) +{ + u32 td_offset = 0; + u32 td_cmd = 0; + u32 maclen; + + /* insert CRC */ + td_cmd = I40E_TX_DESC_CMD_ICRC; + + /* setup MACLEN */ + maclen = skb_network_offset(skb); + if (tx_flags & I40E_TX_FLAGS_SW_VLAN) + maclen += sizeof(struct vlan_hdr); + + if (skb->protocol == htons(ETH_P_FCOE)) { + /* for FCoE, maclen should exclude ether type */ + maclen -= 2; + /* setup type as FCoE and EOF insertion */ + td_cmd |= (I40E_TX_DESC_CMD_FCOET | i40e_fcoe_ctxt_eof(eof)); + /* setup FCoELEN and FCLEN */ + td_offset |= ((((sizeof(struct fcoe_hdr) + 2) >> 2) << + I40E_TX_DESC_LENGTH_IPLEN_SHIFT) | + ((sizeof(struct fc_frame_header) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)); + /* trim to exclude trailer */ + pskb_trim(skb, skb->len - sizeof(struct fcoe_crc_eof)); + } + + /* MACLEN is ether header length in words not bytes */ + td_offset |= (maclen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + + i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset); +} + +/** + * i40e_fcoe_set_skb_header - adjust skb header point for FIP/FCoE/FC + * @skb: the skb to be adjusted + * + * Returns true if this skb is a FCoE/FIP or VLAN carried FCoE/FIP and then + * adjusts the skb header pointers correspondingly. Otherwise, returns false. + **/ +static inline int i40e_fcoe_set_skb_header(struct sk_buff *skb) +{ + __be16 protocol = skb->protocol; + + skb_reset_mac_header(skb); + skb->mac_len = sizeof(struct ethhdr); + if (protocol == htons(ETH_P_8021Q)) { + struct vlan_ethhdr *veth = (struct vlan_ethhdr *)eth_hdr(skb); + + protocol = veth->h_vlan_encapsulated_proto; + skb->mac_len += sizeof(struct vlan_hdr); + } + + /* FCoE or FIP only */ + if ((protocol != htons(ETH_P_FIP)) && + (protocol != htons(ETH_P_FCOE))) + return -EINVAL; + + /* set header to L2 of FCoE/FIP */ + skb_set_network_header(skb, skb->mac_len); + if (protocol == htons(ETH_P_FIP)) + return 0; + + /* set header to L3 of FC */ + skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr)); + return 0; +} + +/** + * i40e_fcoe_xmit_frame - transmit buffer + * @skb: send buffer + * @netdev: the fcoe netdev + * + * Returns 0 if sent, else an error code + **/ +static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(skb->dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; + struct i40e_tx_buffer *first; + u32 tx_flags = 0; + u8 hdr_len = 0; + u8 sof = 0; + u8 eof = 0; + int fso; + + if (i40e_fcoe_set_skb_header(skb)) + goto out_drop; + + if (!i40e_xmit_descriptor_count(skb, tx_ring)) + return NETDEV_TX_BUSY; + + /* prepare the xmit flags */ + if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) + goto out_drop; + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_bi[tx_ring->next_to_use]; + + /* FIP is a regular L2 traffic w/o offload */ + if (skb->protocol == htons(ETH_P_FIP)) + goto out_send; + + /* check sof and eof, only supports FC Class 2 or 3 */ + if (i40e_fcoe_fc_sof(skb, &sof) || i40e_fcoe_fc_eof(skb, &eof)) { + netdev_err(netdev, "SOF/EOF error:%02x - %02x\n", sof, eof); + goto out_drop; + } + + /* always do FCCRC for FCoE */ + tx_flags |= I40E_TX_FLAGS_FCCRC; + + /* check we should do sequence offload */ + fso = i40e_fcoe_tso(tx_ring, skb, tx_flags, &hdr_len, sof); + if (fso < 0) + goto out_drop; + else if (fso) + tx_flags |= I40E_TX_FLAGS_FSO; + else + i40e_fcoe_handle_ddp(tx_ring, skb, sof); + +out_send: + /* send out the packet */ + i40e_fcoe_tx_map(tx_ring, skb, first, tx_flags, hdr_len, eof); + + i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/** + * i40e_fcoe_change_mtu - NDO callback to change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns error as operation not permitted + * + **/ +static int i40e_fcoe_change_mtu(struct net_device *netdev, int new_mtu) +{ + netdev_warn(netdev, "MTU change is not supported on FCoE interfaces\n"); + return -EPERM; +} + +/** + * i40e_fcoe_set_features - set the netdev feature flags + * @netdev: ptr to the netdev being adjusted + * @features: the feature set that the stack is suggesting + * + **/ +static int i40e_fcoe_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + i40e_vlan_stripping_enable(vsi); + else + i40e_vlan_stripping_disable(vsi); + + return 0; +} + +static const struct net_device_ops i40e_fcoe_netdev_ops = { + .ndo_open = i40e_open, + .ndo_stop = i40e_close, + .ndo_get_stats64 = i40e_get_netdev_stats_struct, + .ndo_set_rx_mode = i40e_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = i40e_set_mac, + .ndo_change_mtu = i40e_fcoe_change_mtu, + .ndo_do_ioctl = i40e_ioctl, + .ndo_tx_timeout = i40e_tx_timeout, + .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, + .ndo_setup_tc = i40e_setup_tc, + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = i40e_netpoll, +#endif + .ndo_start_xmit = i40e_fcoe_xmit_frame, + .ndo_fcoe_enable = i40e_fcoe_enable, + .ndo_fcoe_disable = i40e_fcoe_disable, + .ndo_fcoe_ddp_setup = i40e_fcoe_ddp_get, + .ndo_fcoe_ddp_done = i40e_fcoe_ddp_put, + .ndo_fcoe_ddp_target = i40e_fcoe_ddp_target, + .ndo_set_features = i40e_fcoe_set_features, +}; + +/* fcoe network device type */ +static struct device_type fcoe_netdev_type = { + .name = "fcoe", +}; + +/** + * i40e_fcoe_config_netdev - prepares the VSI context for creating a FCoE VSI + * @vsi: pointer to the associated VSI struct + * @ctxt: pointer to the associated VSI context to be passed to HW + * + * Returns 0 on success or < 0 on error + **/ +void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi) +{ + struct i40e_hw *hw = &vsi->back->hw; + struct i40e_pf *pf = vsi->back; + + if (vsi->type != I40E_VSI_FCOE) + return; + + netdev->features = (NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER); + + netdev->vlan_features = netdev->features; + netdev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER); + netdev->fcoe_ddp_xid = I40E_FCOE_DDP_MAX - 1; + netdev->features |= NETIF_F_ALL_FCOE; + netdev->vlan_features |= NETIF_F_ALL_FCOE; + netdev->hw_features |= netdev->features; + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + + strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1); + netdev->mtu = FCOE_MTU; + SET_NETDEV_DEV(netdev, &pf->pdev->dev); + SET_NETDEV_DEVTYPE(netdev, &fcoe_netdev_type); + /* set different dev_port value 1 for FCoE netdev than the default + * zero dev_port value for PF netdev, this helps biosdevname user + * tool to differentiate them correctly while both attached to the + * same PCI function. + */ + netdev->dev_port = 1; + i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false); + i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false); + i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false); + i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false); + + /* use san mac */ + ether_addr_copy(netdev->dev_addr, hw->mac.san_addr); + ether_addr_copy(netdev->perm_addr, hw->mac.san_addr); + /* fcoe netdev ops */ + netdev->netdev_ops = &i40e_fcoe_netdev_ops; +} + +/** + * i40e_fcoe_vsi_setup - allocate and set up FCoE VSI + * @pf: the PF that VSI is associated with + * + **/ +void i40e_fcoe_vsi_setup(struct i40e_pf *pf) +{ + struct i40e_vsi *vsi; + u16 seid; + int i; + + if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) + return; + + BUG_ON(!pf->vsi[pf->lan_vsi]); + + for (i = 0; i < pf->num_alloc_vsi; i++) { + vsi = pf->vsi[i]; + if (vsi && vsi->type == I40E_VSI_FCOE) { + dev_warn(&pf->pdev->dev, + "FCoE VSI already created\n"); + return; + } + } + + seid = pf->vsi[pf->lan_vsi]->seid; + vsi = i40e_vsi_setup(pf, I40E_VSI_FCOE, seid, 0); + if (vsi) { + dev_dbg(&pf->pdev->dev, + "Successfully created FCoE VSI seid %d id %d uplink_seid %d PF seid %d\n", + vsi->seid, vsi->id, vsi->uplink_seid, seid); + } else { + dev_info(&pf->pdev->dev, "Failed to create FCoE VSI\n"); + } +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h new file mode 100644 index 000000000..0d49e2d15 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h @@ -0,0 +1,127 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_FCOE_H_ +#define _I40E_FCOE_H_ + +/* FCoE HW context helper macros */ +#define I40E_DDP_CONTEXT_DESC(R, i) \ + (&(((struct i40e_fcoe_ddp_context_desc *)((R)->desc))[i])) + +#define I40E_QUEUE_CONTEXT_DESC(R, i) \ + (&(((struct i40e_fcoe_queue_context_desc *)((R)->desc))[i])) + +#define I40E_FILTER_CONTEXT_DESC(R, i) \ + (&(((struct i40e_fcoe_filter_context_desc *)((R)->desc))[i])) + +/* receive queue descriptor filter status for FCoE */ +#define I40E_RX_DESC_FLTSTAT_FCMASK 0x3 +#define I40E_RX_DESC_FLTSTAT_NOMTCH 0x0 /* no ddp context match */ +#define I40E_RX_DESC_FLTSTAT_NODDP 0x1 /* no ddp due to error */ +#define I40E_RX_DESC_FLTSTAT_DDP 0x2 /* DDPed payload, post header */ +#define I40E_RX_DESC_FLTSTAT_FCPRSP 0x3 /* FCP_RSP */ + +/* receive queue descriptor error codes for FCoE */ +#define I40E_RX_DESC_FCOE_ERROR_MASK \ + (I40E_RX_DESC_ERROR_L3L4E_PROT | \ + I40E_RX_DESC_ERROR_L3L4E_FC | \ + I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR | \ + I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN) + +/* receive queue descriptor programming error */ +#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL(e) \ + (((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT) & 0x1) + +#define I40E_RX_PROG_FCOE_ERROR_CONFLICT(e) \ + (((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1) + +#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT \ + (1 << I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT) +#define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT \ + (1 << I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) + +#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e) \ + I40E_RX_PROG_FCOE_ERROR_CONFLICT(e) +#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL_BIT \ + I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT + +/* FCoE DDP related definitions */ +#define I40E_FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */ +#define I40E_FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */ +#define I40E_FCOE_DDP_BUFFCNT_MAX 512 /* 9 bits bufcnt */ +#define I40E_FCOE_DDP_PTR_ALIGN 16 +#define I40E_FCOE_DDP_PTR_MAX (I40E_FCOE_DDP_BUFFCNT_MAX * sizeof(dma_addr_t)) +#define I40E_FCOE_DDP_BUF_MIN 4096 +#define I40E_FCOE_DDP_MAX 2048 +#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8 + +/* supported netdev features for FCoE */ +#define I40E_FCOE_NETIF_FEATURES (NETIF_F_ALL_FCOE | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_CTAG_FILTER) + +/* DDP context flags */ +enum i40e_fcoe_ddp_flags { + __I40E_FCOE_DDP_NONE = 1, + __I40E_FCOE_DDP_TARGET, + __I40E_FCOE_DDP_INITALIZED, + __I40E_FCOE_DDP_PROGRAMMED, + __I40E_FCOE_DDP_DONE, + __I40E_FCOE_DDP_ABORTED, + __I40E_FCOE_DDP_UNMAPPED, +}; + +/* DDP SW context struct */ +struct i40e_fcoe_ddp { + int len; + u16 xid; + u16 firstoff; + u16 lastsize; + u16 list_len; + u8 fcerr; + u8 prerr; + unsigned long flags; + unsigned int sgc; + struct scatterlist *sgl; + dma_addr_t udp; + u64 *udl; + struct dma_pool *pool; + +}; + +struct i40e_fcoe_ddp_pool { + struct dma_pool *pool; +}; + +struct i40e_fcoe { + unsigned long mode; + atomic_t refcnt; + struct i40e_fcoe_ddp_pool __percpu *ddp_pool; + struct i40e_fcoe_ddp ddp[I40E_FCOE_DDP_MAX]; +}; + +#endif /* _I40E_FCOE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c new file mode 100644 index 000000000..9b987ccc9 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -0,0 +1,360 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e_osdep.h" +#include "i40e_register.h" +#include "i40e_status.h" +#include "i40e_alloc.h" +#include "i40e_hmc.h" +#include "i40e_type.h" + +/** + * i40e_add_sd_table_entry - Adds a segment descriptor to the table + * @hw: pointer to our hw struct + * @hmc_info: pointer to the HMC configuration information struct + * @sd_index: segment descriptor index to manipulate + * @type: what type of segment descriptor we're manipulating + * @direct_mode_sz: size to alloc in direct mode + **/ +i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 sd_index, + enum i40e_sd_entry_type type, + u64 direct_mode_sz) +{ + enum i40e_memory_type mem_type __attribute__((unused)); + struct i40e_hmc_sd_entry *sd_entry; + bool dma_mem_alloc_done = false; + struct i40e_dma_mem mem; + i40e_status ret_code; + u64 alloc_len; + + if (NULL == hmc_info->sd_table.sd_entry) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n"); + goto exit; + } + + if (sd_index >= hmc_info->sd_table.sd_cnt) { + ret_code = I40E_ERR_INVALID_SD_INDEX; + hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n"); + goto exit; + } + + sd_entry = &hmc_info->sd_table.sd_entry[sd_index]; + if (!sd_entry->valid) { + if (I40E_SD_TYPE_PAGED == type) { + mem_type = i40e_mem_pd; + alloc_len = I40E_HMC_PAGED_BP_SIZE; + } else { + mem_type = i40e_mem_bp_jumbo; + alloc_len = direct_mode_sz; + } + + /* allocate a 4K pd page or 2M backing page */ + ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len, + I40E_HMC_PD_BP_BUF_ALIGNMENT); + if (ret_code) + goto exit; + dma_mem_alloc_done = true; + if (I40E_SD_TYPE_PAGED == type) { + ret_code = i40e_allocate_virt_mem(hw, + &sd_entry->u.pd_table.pd_entry_virt_mem, + sizeof(struct i40e_hmc_pd_entry) * 512); + if (ret_code) + goto exit; + sd_entry->u.pd_table.pd_entry = + (struct i40e_hmc_pd_entry *) + sd_entry->u.pd_table.pd_entry_virt_mem.va; + sd_entry->u.pd_table.pd_page_addr = mem; + } else { + sd_entry->u.bp.addr = mem; + sd_entry->u.bp.sd_pd_index = sd_index; + } + /* initialize the sd entry */ + hmc_info->sd_table.sd_entry[sd_index].entry_type = type; + + /* increment the ref count */ + I40E_INC_SD_REFCNT(&hmc_info->sd_table); + } + /* Increment backing page reference count */ + if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type) + I40E_INC_BP_REFCNT(&sd_entry->u.bp); +exit: + if (ret_code) + if (dma_mem_alloc_done) + i40e_free_dma_mem(hw, &mem); + + return ret_code; +} + +/** + * i40e_add_pd_table_entry - Adds page descriptor to the specified table + * @hw: pointer to our HW structure + * @hmc_info: pointer to the HMC configuration information structure + * @pd_index: which page descriptor index to manipulate + * + * This function: + * 1. Initializes the pd entry + * 2. Adds pd_entry in the pd_table + * 3. Mark the entry valid in i40e_hmc_pd_entry structure + * 4. Initializes the pd_entry's ref count to 1 + * assumptions: + * 1. The memory for pd should be pinned down, physically contiguous and + * aligned on 4K boundary and zeroed memory. + * 2. It should be 4K in size. + **/ +i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 pd_index) +{ + i40e_status ret_code = 0; + struct i40e_hmc_pd_table *pd_table; + struct i40e_hmc_pd_entry *pd_entry; + struct i40e_dma_mem mem; + u32 sd_idx, rel_pd_idx; + u64 *pd_addr; + u64 page_desc; + + if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) { + ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; + hw_dbg(hw, "i40e_add_pd_table_entry: bad pd_index\n"); + goto exit; + } + + /* find corresponding sd */ + sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD); + if (I40E_SD_TYPE_PAGED != + hmc_info->sd_table.sd_entry[sd_idx].entry_type) + goto exit; + + rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD); + pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; + pd_entry = &pd_table->pd_entry[rel_pd_idx]; + if (!pd_entry->valid) { + /* allocate a 4K backing page */ + ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp, + I40E_HMC_PAGED_BP_SIZE, + I40E_HMC_PD_BP_BUF_ALIGNMENT); + if (ret_code) + goto exit; + + pd_entry->bp.addr = mem; + pd_entry->bp.sd_pd_index = pd_index; + pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED; + /* Set page address and valid bit */ + page_desc = mem.pa | 0x1; + + pd_addr = (u64 *)pd_table->pd_page_addr.va; + pd_addr += rel_pd_idx; + + /* Add the backing page physical address in the pd entry */ + memcpy(pd_addr, &page_desc, sizeof(u64)); + + pd_entry->sd_index = sd_idx; + pd_entry->valid = true; + I40E_INC_PD_REFCNT(pd_table); + } + I40E_INC_BP_REFCNT(&pd_entry->bp); +exit: + return ret_code; +} + +/** + * i40e_remove_pd_bp - remove a backing page from a page descriptor + * @hw: pointer to our HW structure + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + * @is_pf: distinguishes a VF from a PF + * + * This function: + * 1. Marks the entry in pd tabe (for paged address mode) or in sd table + * (for direct address mode) invalid. + * 2. Write to register PMPDINV to invalidate the backing page in FV cache + * 3. Decrement the ref count for the pd _entry + * assumptions: + * 1. Caller can deallocate the memory used by backing storage after this + * function returns. + **/ +i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx) +{ + i40e_status ret_code = 0; + struct i40e_hmc_pd_entry *pd_entry; + struct i40e_hmc_pd_table *pd_table; + struct i40e_hmc_sd_entry *sd_entry; + u32 sd_idx, rel_pd_idx; + u64 *pd_addr; + + /* calculate index */ + sd_idx = idx / I40E_HMC_PD_CNT_IN_SD; + rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD; + if (sd_idx >= hmc_info->sd_table.sd_cnt) { + ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; + hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n"); + goto exit; + } + sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; + if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) { + ret_code = I40E_ERR_INVALID_SD_TYPE; + hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n"); + goto exit; + } + /* get the entry and decrease its ref counter */ + pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; + pd_entry = &pd_table->pd_entry[rel_pd_idx]; + I40E_DEC_BP_REFCNT(&pd_entry->bp); + if (pd_entry->bp.ref_cnt) + goto exit; + + /* mark the entry invalid */ + pd_entry->valid = false; + I40E_DEC_PD_REFCNT(pd_table); + pd_addr = (u64 *)pd_table->pd_page_addr.va; + pd_addr += rel_pd_idx; + memset(pd_addr, 0, sizeof(u64)); + I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx); + + /* free memory here */ + ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); + if (ret_code) + goto exit; + if (!pd_table->ref_cnt) + i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem); +exit: + return ret_code; +} + +/** + * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + **/ +i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, + u32 idx) +{ + i40e_status ret_code = 0; + struct i40e_hmc_sd_entry *sd_entry; + + /* get the entry and decrease its ref counter */ + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + I40E_DEC_BP_REFCNT(&sd_entry->u.bp); + if (sd_entry->u.bp.ref_cnt) { + ret_code = I40E_ERR_NOT_READY; + goto exit; + } + I40E_DEC_SD_REFCNT(&hmc_info->sd_table); + + /* mark the entry invalid */ + sd_entry->valid = false; +exit: + return ret_code; +} + +/** + * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor + * @hw: pointer to our hw struct + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + * @is_pf: used to distinguish between VF and PF + **/ +i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf) +{ + struct i40e_hmc_sd_entry *sd_entry; + i40e_status ret_code = 0; + + /* get the entry and decrease its ref counter */ + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + if (is_pf) { + I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT); + } else { + ret_code = I40E_NOT_SUPPORTED; + goto exit; + } + ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr)); + if (ret_code) + goto exit; +exit: + return ret_code; +} + +/** + * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry. + * @hmc_info: pointer to the HMC configuration information structure + * @idx: segment descriptor index to find the relevant page descriptor + **/ +i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, + u32 idx) +{ + i40e_status ret_code = 0; + struct i40e_hmc_sd_entry *sd_entry; + + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + + if (sd_entry->u.pd_table.ref_cnt) { + ret_code = I40E_ERR_NOT_READY; + goto exit; + } + + /* mark the entry invalid */ + sd_entry->valid = false; + + I40E_DEC_SD_REFCNT(&hmc_info->sd_table); +exit: + return ret_code; +} + +/** + * i40e_remove_pd_page_new - Removes a PD page from sd entry. + * @hw: pointer to our hw struct + * @hmc_info: pointer to the HMC configuration information structure + * @idx: segment descriptor index to find the relevant page descriptor + * @is_pf: used to distinguish between VF and PF + **/ +i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf) +{ + i40e_status ret_code = 0; + struct i40e_hmc_sd_entry *sd_entry; + + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + if (is_pf) { + I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED); + } else { + ret_code = I40E_NOT_SUPPORTED; + goto exit; + } + /* free memory here */ + ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr)); + if (ret_code) + goto exit; +exit: + return ret_code; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h new file mode 100644 index 000000000..732a02660 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h @@ -0,0 +1,236 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_HMC_H_ +#define _I40E_HMC_H_ + +#define I40E_HMC_MAX_BP_COUNT 512 + +/* forward-declare the HW struct for the compiler */ +struct i40e_hw; + +#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */ +#define I40E_HMC_PD_CNT_IN_SD 512 +#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */ +#define I40E_HMC_PAGED_BP_SIZE 4096 +#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096 +#define I40E_FIRST_VF_FPM_ID 16 + +struct i40e_hmc_obj_info { + u64 base; /* base addr in FPM */ + u32 max_cnt; /* max count available for this hmc func */ + u32 cnt; /* count of objects driver actually wants to create */ + u64 size; /* size in bytes of one object */ +}; + +enum i40e_sd_entry_type { + I40E_SD_TYPE_INVALID = 0, + I40E_SD_TYPE_PAGED = 1, + I40E_SD_TYPE_DIRECT = 2 +}; + +struct i40e_hmc_bp { + enum i40e_sd_entry_type entry_type; + struct i40e_dma_mem addr; /* populate to be used by hw */ + u32 sd_pd_index; + u32 ref_cnt; +}; + +struct i40e_hmc_pd_entry { + struct i40e_hmc_bp bp; + u32 sd_index; + bool valid; +}; + +struct i40e_hmc_pd_table { + struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */ + struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */ + struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */ + + u32 ref_cnt; + u32 sd_index; +}; + +struct i40e_hmc_sd_entry { + enum i40e_sd_entry_type entry_type; + bool valid; + + union { + struct i40e_hmc_pd_table pd_table; + struct i40e_hmc_bp bp; + } u; +}; + +struct i40e_hmc_sd_table { + struct i40e_virt_mem addr; /* used to track sd_entry allocations */ + u32 sd_cnt; + u32 ref_cnt; + struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */ +}; + +struct i40e_hmc_info { + u32 signature; + /* equals to pci func num for PF and dynamically allocated for VFs */ + u8 hmc_fn_id; + u16 first_sd_index; /* index of the first available SD */ + + /* hmc objects */ + struct i40e_hmc_obj_info *hmc_obj; + struct i40e_virt_mem hmc_obj_virt_mem; + struct i40e_hmc_sd_table sd_table; +}; + +#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++) +#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++) +#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++) + +#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--) +#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--) +#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--) + +/** + * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware + * @hw: pointer to our hw struct + * @pa: pointer to physical address + * @sd_index: segment descriptor index + * @type: if sd entry is direct or paged + **/ +#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \ +{ \ + u32 val1, val2, val3; \ + val1 = (u32)(upper_32_bits(pa)); \ + val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \ + I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ + ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ + I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ + (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ + val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ + wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ + wr32((hw), I40E_PFHMC_SDCMD, val3); \ +} + +/** + * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware + * @hw: pointer to our hw struct + * @sd_index: segment descriptor index + * @type: if sd entry is direct or paged + **/ +#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \ +{ \ + u32 val2, val3; \ + val2 = (I40E_HMC_MAX_BP_COUNT << \ + I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ + ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ + I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ + val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ + wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ + wr32((hw), I40E_PFHMC_SDCMD, val3); \ +} + +/** + * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware + * @hw: pointer to our hw struct + * @sd_idx: segment descriptor index + * @pd_idx: page descriptor index + **/ +#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ + wr32((hw), I40E_PFHMC_PDINV, \ + (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ + ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) + +/** + * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit + * @hmc_info: pointer to the HMC configuration information structure + * @type: type of HMC resources we're searching + * @index: starting index for the object + * @cnt: number of objects we're trying to create + * @sd_idx: pointer to return index of the segment descriptor in question + * @sd_limit: pointer to return the maximum number of segment descriptors + * + * This function calculates the segment descriptor index and index limit + * for the resource defined by i40e_hmc_rsrc_type. + **/ +#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\ +{ \ + u64 fpm_addr, fpm_limit; \ + fpm_addr = (hmc_info)->hmc_obj[(type)].base + \ + (hmc_info)->hmc_obj[(type)].size * (index); \ + fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\ + *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \ + *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \ + /* add one more to the limit to correct our range */ \ + *(sd_limit) += 1; \ +} + +/** + * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit + * @hmc_info: pointer to the HMC configuration information struct + * @type: HMC resource type we're examining + * @idx: starting index for the object + * @cnt: number of objects we're trying to create + * @pd_index: pointer to return page descriptor index + * @pd_limit: pointer to return page descriptor index limit + * + * Calculates the page descriptor index and index limit for the resource + * defined by i40e_hmc_rsrc_type. + **/ +#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\ +{ \ + u64 fpm_adr, fpm_limit; \ + fpm_adr = (hmc_info)->hmc_obj[(type)].base + \ + (hmc_info)->hmc_obj[(type)].size * (idx); \ + fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \ + *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \ + *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \ + /* add one more to the limit to correct our range */ \ + *(pd_limit) += 1; \ +} +i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 sd_index, + enum i40e_sd_entry_type type, + u64 direct_mode_sz); + +i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 pd_index); +i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx); +i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, + u32 idx); +i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf); +i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, + u32 idx); +i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf); + +#endif /* _I40E_HMC_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c new file mode 100644 index 000000000..0079ad7bc --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -0,0 +1,1143 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e_osdep.h" +#include "i40e_register.h" +#include "i40e_type.h" +#include "i40e_hmc.h" +#include "i40e_lan_hmc.h" +#include "i40e_prototype.h" + +/* lan specific interface functions */ + +/** + * i40e_align_l2obj_base - aligns base object pointer to 512 bytes + * @offset: base address offset needing alignment + * + * Aligns the layer 2 function private memory so it's 512-byte aligned. + **/ +static u64 i40e_align_l2obj_base(u64 offset) +{ + u64 aligned_offset = offset; + + if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0) + aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT - + (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT)); + + return aligned_offset; +} + +/** + * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size + * @txq_num: number of Tx queues needing backing context + * @rxq_num: number of Rx queues needing backing context + * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context + * @fcoe_filt_num: number of FCoE filters needing backing context + * + * Calculates the maximum amount of memory for the function required, based + * on the number of resources it must provide context for. + **/ +static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num, + u32 fcoe_cntx_num, u32 fcoe_filt_num) +{ + u64 fpm_size = 0; + + fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ; + fpm_size = i40e_align_l2obj_base(fpm_size); + + fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ); + fpm_size = i40e_align_l2obj_base(fpm_size); + + fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX); + fpm_size = i40e_align_l2obj_base(fpm_size); + + fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT); + fpm_size = i40e_align_l2obj_base(fpm_size); + + return fpm_size; +} + +/** + * i40e_init_lan_hmc - initialize i40e_hmc_info struct + * @hw: pointer to the HW structure + * @txq_num: number of Tx queues needing backing context + * @rxq_num: number of Rx queues needing backing context + * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context + * @fcoe_filt_num: number of FCoE filters needing backing context + * + * This function will be called once per physical function initialization. + * It will fill out the i40e_hmc_obj_info structure for LAN objects based on + * the driver's provided input, as well as information from the HMC itself + * loaded from NVRAM. + * + * Assumptions: + * - HMC Resource Profile has been selected before calling this function. + **/ +i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, + u32 rxq_num, u32 fcoe_cntx_num, + u32 fcoe_filt_num) +{ + struct i40e_hmc_obj_info *obj, *full_obj; + i40e_status ret_code = 0; + u64 l2fpm_size; + u32 size_exp; + + hw->hmc.signature = I40E_HMC_INFO_SIGNATURE; + hw->hmc.hmc_fn_id = hw->pf_id; + + /* allocate memory for hmc_obj */ + ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem, + sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX); + if (ret_code) + goto init_lan_hmc_out; + hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *) + hw->hmc.hmc_obj_virt_mem.va; + + /* The full object will be used to create the LAN HMC SD */ + full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL]; + full_obj->max_cnt = 0; + full_obj->cnt = 0; + full_obj->base = 0; + full_obj->size = 0; + + /* Tx queue context information */ + obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX]; + obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX); + obj->cnt = txq_num; + obj->base = 0; + size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ); + obj->size = (u64)1 << size_exp; + + /* validate values requested by driver don't exceed HMC capacity */ + if (txq_num > obj->max_cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", + txq_num, obj->max_cnt, ret_code); + goto init_lan_hmc_out; + } + + /* aggregate values into the full LAN object for later */ + full_obj->max_cnt += obj->max_cnt; + full_obj->cnt += obj->cnt; + + /* Rx queue context information */ + obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX]; + obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX); + obj->cnt = rxq_num; + obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base + + (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt * + hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size); + obj->base = i40e_align_l2obj_base(obj->base); + size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ); + obj->size = (u64)1 << size_exp; + + /* validate values requested by driver don't exceed HMC capacity */ + if (rxq_num > obj->max_cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", + rxq_num, obj->max_cnt, ret_code); + goto init_lan_hmc_out; + } + + /* aggregate values into the full LAN object for later */ + full_obj->max_cnt += obj->max_cnt; + full_obj->cnt += obj->cnt; + + /* FCoE context information */ + obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX]; + obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX); + obj->cnt = fcoe_cntx_num; + obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base + + (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt * + hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size); + obj->base = i40e_align_l2obj_base(obj->base); + size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ); + obj->size = (u64)1 << size_exp; + + /* validate values requested by driver don't exceed HMC capacity */ + if (fcoe_cntx_num > obj->max_cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", + fcoe_cntx_num, obj->max_cnt, ret_code); + goto init_lan_hmc_out; + } + + /* aggregate values into the full LAN object for later */ + full_obj->max_cnt += obj->max_cnt; + full_obj->cnt += obj->cnt; + + /* FCoE filter information */ + obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT]; + obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX); + obj->cnt = fcoe_filt_num; + obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base + + (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt * + hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size); + obj->base = i40e_align_l2obj_base(obj->base); + size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ); + obj->size = (u64)1 << size_exp; + + /* validate values requested by driver don't exceed HMC capacity */ + if (fcoe_filt_num > obj->max_cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n", + fcoe_filt_num, obj->max_cnt, ret_code); + goto init_lan_hmc_out; + } + + /* aggregate values into the full LAN object for later */ + full_obj->max_cnt += obj->max_cnt; + full_obj->cnt += obj->cnt; + + hw->hmc.first_sd_index = 0; + hw->hmc.sd_table.ref_cnt = 0; + l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num, + fcoe_filt_num); + if (NULL == hw->hmc.sd_table.sd_entry) { + hw->hmc.sd_table.sd_cnt = (u32) + (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) / + I40E_HMC_DIRECT_BP_SIZE; + + /* allocate the sd_entry members in the sd_table */ + ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr, + (sizeof(struct i40e_hmc_sd_entry) * + hw->hmc.sd_table.sd_cnt)); + if (ret_code) + goto init_lan_hmc_out; + hw->hmc.sd_table.sd_entry = + (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va; + } + /* store in the LAN full object for later */ + full_obj->size = l2fpm_size; + +init_lan_hmc_out: + return ret_code; +} + +/** + * i40e_remove_pd_page - Remove a page from the page descriptor table + * @hw: pointer to the HW structure + * @hmc_info: pointer to the HMC configuration information structure + * @idx: segment descriptor index to find the relevant page descriptor + * + * This function: + * 1. Marks the entry in pd table (for paged address mode) invalid + * 2. write to register PMPDINV to invalidate the backing page in FV cache + * 3. Decrement the ref count for pd_entry + * assumptions: + * 1. caller can deallocate the memory used by pd after this function + * returns. + **/ +static i40e_status i40e_remove_pd_page(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx) +{ + i40e_status ret_code = 0; + + if (!i40e_prep_remove_pd_page(hmc_info, idx)) + ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true); + + return ret_code; +} + +/** + * i40e_remove_sd_bp - remove a backing page from a segment descriptor + * @hw: pointer to our HW structure + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + * + * This function: + * 1. Marks the entry in sd table (for direct address mode) invalid + * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set + * to 0) and PMSDDATAHIGH to invalidate the sd page + * 3. Decrement the ref count for the sd_entry + * assumptions: + * 1. caller can deallocate the memory used by backing storage after this + * function returns. + **/ +static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx) +{ + i40e_status ret_code = 0; + + if (!i40e_prep_remove_sd_bp(hmc_info, idx)) + ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true); + + return ret_code; +} + +/** + * i40e_create_lan_hmc_object - allocate backing store for hmc objects + * @hw: pointer to the HW structure + * @info: pointer to i40e_hmc_create_obj_info struct + * + * This will allocate memory for PDs and backing pages and populate + * the sd and pd entries. + **/ +static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw, + struct i40e_hmc_lan_create_obj_info *info) +{ + i40e_status ret_code = 0; + struct i40e_hmc_sd_entry *sd_entry; + u32 pd_idx1 = 0, pd_lmt1 = 0; + u32 pd_idx = 0, pd_lmt = 0; + bool pd_error = false; + u32 sd_idx, sd_lmt; + u64 sd_size; + u32 i, j; + + if (NULL == info) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_create_lan_hmc_object: bad info ptr\n"); + goto exit; + } + if (NULL == info->hmc_info) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_create_lan_hmc_object: bad hmc_info ptr\n"); + goto exit; + } + if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_create_lan_hmc_object: bad signature\n"); + goto exit; + } + + if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; + hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n", + ret_code); + goto exit; + } + if ((info->start_idx + info->count) > + info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n", + ret_code); + goto exit; + } + + /* find sd index and limit */ + I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, + &sd_idx, &sd_lmt); + if (sd_idx >= info->hmc_info->sd_table.sd_cnt || + sd_lmt > info->hmc_info->sd_table.sd_cnt) { + ret_code = I40E_ERR_INVALID_SD_INDEX; + goto exit; + } + /* find pd index */ + I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, &pd_idx, + &pd_lmt); + + /* This is to cover for cases where you may not want to have an SD with + * the full 2M memory but something smaller. By not filling out any + * size, the function will default the SD size to be 2M. + */ + if (info->direct_mode_sz == 0) + sd_size = I40E_HMC_DIRECT_BP_SIZE; + else + sd_size = info->direct_mode_sz; + + /* check if all the sds are valid. If not, allocate a page and + * initialize it. + */ + for (j = sd_idx; j < sd_lmt; j++) { + /* update the sd table entry */ + ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j, + info->entry_type, + sd_size); + if (ret_code) + goto exit_sd_error; + sd_entry = &info->hmc_info->sd_table.sd_entry[j]; + if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) { + /* check if all the pds in this sd are valid. If not, + * allocate a page and initialize it. + */ + + /* find pd_idx and pd_lmt in this sd */ + pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT)); + pd_lmt1 = min(pd_lmt, + ((j + 1) * I40E_HMC_MAX_BP_COUNT)); + for (i = pd_idx1; i < pd_lmt1; i++) { + /* update the pd table entry */ + ret_code = i40e_add_pd_table_entry(hw, + info->hmc_info, + i); + if (ret_code) { + pd_error = true; + break; + } + } + if (pd_error) { + /* remove the backing pages from pd_idx1 to i */ + while (i && (i > pd_idx1)) { + i40e_remove_pd_bp(hw, info->hmc_info, + (i - 1)); + i--; + } + } + } + if (!sd_entry->valid) { + sd_entry->valid = true; + switch (sd_entry->entry_type) { + case I40E_SD_TYPE_PAGED: + I40E_SET_PF_SD_ENTRY(hw, + sd_entry->u.pd_table.pd_page_addr.pa, + j, sd_entry->entry_type); + break; + case I40E_SD_TYPE_DIRECT: + I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa, + j, sd_entry->entry_type); + break; + default: + ret_code = I40E_ERR_INVALID_SD_TYPE; + goto exit; + } + } + } + goto exit; + +exit_sd_error: + /* cleanup for sd entries from j to sd_idx */ + while (j && (j > sd_idx)) { + sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1]; + switch (sd_entry->entry_type) { + case I40E_SD_TYPE_PAGED: + pd_idx1 = max(pd_idx, + ((j - 1) * I40E_HMC_MAX_BP_COUNT)); + pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT)); + for (i = pd_idx1; i < pd_lmt1; i++) { + i40e_remove_pd_bp(hw, info->hmc_info, i); + } + i40e_remove_pd_page(hw, info->hmc_info, (j - 1)); + break; + case I40E_SD_TYPE_DIRECT: + i40e_remove_sd_bp(hw, info->hmc_info, (j - 1)); + break; + default: + ret_code = I40E_ERR_INVALID_SD_TYPE; + break; + } + j--; + } +exit: + return ret_code; +} + +/** + * i40e_configure_lan_hmc - prepare the HMC backing store + * @hw: pointer to the hw structure + * @model: the model for the layout of the SD/PD tables + * + * - This function will be called once per physical function initialization. + * - This function will be called after i40e_init_lan_hmc() and before + * any LAN/FCoE HMC objects can be created. + **/ +i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw, + enum i40e_hmc_model model) +{ + struct i40e_hmc_lan_create_obj_info info; + i40e_status ret_code = 0; + u8 hmc_fn_id = hw->hmc.hmc_fn_id; + struct i40e_hmc_obj_info *obj; + + /* Initialize part of the create object info struct */ + info.hmc_info = &hw->hmc; + info.rsrc_type = I40E_HMC_LAN_FULL; + info.start_idx = 0; + info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size; + + /* Build the SD entry for the LAN objects */ + switch (model) { + case I40E_HMC_MODEL_DIRECT_PREFERRED: + case I40E_HMC_MODEL_DIRECT_ONLY: + info.entry_type = I40E_SD_TYPE_DIRECT; + /* Make one big object, a single SD */ + info.count = 1; + ret_code = i40e_create_lan_hmc_object(hw, &info); + if (ret_code && (model == I40E_HMC_MODEL_DIRECT_PREFERRED)) + goto try_type_paged; + else if (ret_code) + goto configure_lan_hmc_out; + /* else clause falls through the break */ + break; + case I40E_HMC_MODEL_PAGED_ONLY: +try_type_paged: + info.entry_type = I40E_SD_TYPE_PAGED; + /* Make one big object in the PD table */ + info.count = 1; + ret_code = i40e_create_lan_hmc_object(hw, &info); + if (ret_code) + goto configure_lan_hmc_out; + break; + default: + /* unsupported type */ + ret_code = I40E_ERR_INVALID_SD_TYPE; + hw_dbg(hw, "i40e_configure_lan_hmc: Unknown SD type: %d\n", + ret_code); + goto configure_lan_hmc_out; + } + + /* Configure and program the FPM registers so objects can be created */ + + /* Tx contexts */ + obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX]; + wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id), + (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512)); + wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt); + + /* Rx contexts */ + obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX]; + wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id), + (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512)); + wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt); + + /* FCoE contexts */ + obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX]; + wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id), + (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512)); + wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt); + + /* FCoE filters */ + obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT]; + wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id), + (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512)); + wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt); + +configure_lan_hmc_out: + return ret_code; +} + +/** + * i40e_delete_hmc_object - remove hmc objects + * @hw: pointer to the HW structure + * @info: pointer to i40e_hmc_delete_obj_info struct + * + * This will de-populate the SDs and PDs. It frees + * the memory for PDS and backing storage. After this function is returned, + * caller should deallocate memory allocated previously for + * book-keeping information about PDs and backing storage. + **/ +static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw, + struct i40e_hmc_lan_delete_obj_info *info) +{ + i40e_status ret_code = 0; + struct i40e_hmc_pd_table *pd_table; + u32 pd_idx, pd_lmt, rel_pd_idx; + u32 sd_idx, sd_lmt; + u32 i, j; + + if (NULL == info) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_delete_hmc_object: bad info ptr\n"); + goto exit; + } + if (NULL == info->hmc_info) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_delete_hmc_object: bad info->hmc_info ptr\n"); + goto exit; + } + if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->signature\n"); + goto exit; + } + + if (NULL == info->hmc_info->sd_table.sd_entry) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_delete_hmc_object: bad sd_entry\n"); + goto exit; + } + + if (NULL == info->hmc_info->hmc_obj) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->hmc_obj\n"); + goto exit; + } + if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; + hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n", + ret_code); + goto exit; + } + + if ((info->start_idx + info->count) > + info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; + hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n", + ret_code); + goto exit; + } + + I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, &pd_idx, + &pd_lmt); + + for (j = pd_idx; j < pd_lmt; j++) { + sd_idx = j / I40E_HMC_PD_CNT_IN_SD; + + if (I40E_SD_TYPE_PAGED != + info->hmc_info->sd_table.sd_entry[sd_idx].entry_type) + continue; + + rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD; + + pd_table = + &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; + if (pd_table->pd_entry[rel_pd_idx].valid) { + ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j); + if (ret_code) + goto exit; + } + } + + /* find sd index and limit */ + I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, + &sd_idx, &sd_lmt); + if (sd_idx >= info->hmc_info->sd_table.sd_cnt || + sd_lmt > info->hmc_info->sd_table.sd_cnt) { + ret_code = I40E_ERR_INVALID_SD_INDEX; + goto exit; + } + + for (i = sd_idx; i < sd_lmt; i++) { + if (!info->hmc_info->sd_table.sd_entry[i].valid) + continue; + switch (info->hmc_info->sd_table.sd_entry[i].entry_type) { + case I40E_SD_TYPE_DIRECT: + ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i); + if (ret_code) + goto exit; + break; + case I40E_SD_TYPE_PAGED: + ret_code = i40e_remove_pd_page(hw, info->hmc_info, i); + if (ret_code) + goto exit; + break; + default: + break; + } + } +exit: + return ret_code; +} + +/** + * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory + * @hw: pointer to the hw structure + * + * This must be called by drivers as they are shutting down and being + * removed from the OS. + **/ +i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw) +{ + struct i40e_hmc_lan_delete_obj_info info; + i40e_status ret_code; + + info.hmc_info = &hw->hmc; + info.rsrc_type = I40E_HMC_LAN_FULL; + info.start_idx = 0; + info.count = 1; + + /* delete the object */ + ret_code = i40e_delete_lan_hmc_object(hw, &info); + + /* free the SD table entry for LAN */ + i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr); + hw->hmc.sd_table.sd_cnt = 0; + hw->hmc.sd_table.sd_entry = NULL; + + /* free memory used for hmc_obj */ + i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem); + hw->hmc.hmc_obj = NULL; + + return ret_code; +} + +#define I40E_HMC_STORE(_struct, _ele) \ + offsetof(struct _struct, _ele), \ + FIELD_SIZEOF(struct _struct, _ele) + +struct i40e_context_ele { + u16 offset; + u16 size_of; + u16 width; + u16 lsb; +}; + +/* LAN Tx Queue Context */ +static struct i40e_context_ele i40e_hmc_txq_ce_info[] = { + /* Field Width LSB */ + {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 }, +/* line 1 */ + {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 }, +/* line 7 */ + {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) }, + {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) }, + { 0 } +}; + +/* LAN Rx Queue Context */ +static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = { + /* Field Width LSB */ + { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 }, + { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 }, + { 0 } +}; + +/** + * i40e_write_byte - replace HMC context byte + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be read from + * @src: the struct to be read from + **/ +static void i40e_write_byte(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *src) +{ + u8 src_byte, dest_byte, mask; + u8 *from, *dest; + u16 shift_width; + + /* copy from the next struct field */ + from = src + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + mask = ((u8)1 << ce_info->width) - 1; + + src_byte = *from; + src_byte &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_byte <<= shift_width; + + /* get the current bits from the target bit string */ + dest = hmc_bits + (ce_info->lsb / 8); + + memcpy(&dest_byte, dest, sizeof(dest_byte)); + + dest_byte &= ~mask; /* get the bits not changing */ + dest_byte |= src_byte; /* add in the new bits */ + + /* put it all back */ + memcpy(dest, &dest_byte, sizeof(dest_byte)); +} + +/** + * i40e_write_word - replace HMC context word + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be read from + * @src: the struct to be read from + **/ +static void i40e_write_word(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *src) +{ + u16 src_word, mask; + u8 *from, *dest; + u16 shift_width; + __le16 dest_word; + + /* copy from the next struct field */ + from = src + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + mask = ((u16)1 << ce_info->width) - 1; + + /* don't swizzle the bits until after the mask because the mask bits + * will be in a different bit position on big endian machines + */ + src_word = *(u16 *)from; + src_word &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_word <<= shift_width; + + /* get the current bits from the target bit string */ + dest = hmc_bits + (ce_info->lsb / 8); + + memcpy(&dest_word, dest, sizeof(dest_word)); + + dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ + dest_word |= cpu_to_le16(src_word); /* add in the new bits */ + + /* put it all back */ + memcpy(dest, &dest_word, sizeof(dest_word)); +} + +/** + * i40e_write_dword - replace HMC context dword + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be read from + * @src: the struct to be read from + **/ +static void i40e_write_dword(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *src) +{ + u32 src_dword, mask; + u8 *from, *dest; + u16 shift_width; + __le32 dest_dword; + + /* copy from the next struct field */ + from = src + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + + /* if the field width is exactly 32 on an x86 machine, then the shift + * operation will not work because the SHL instructions count is masked + * to 5 bits so the shift will do nothing + */ + if (ce_info->width < 32) + mask = ((u32)1 << ce_info->width) - 1; + else + mask = ~(u32)0; + + /* don't swizzle the bits until after the mask because the mask bits + * will be in a different bit position on big endian machines + */ + src_dword = *(u32 *)from; + src_dword &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_dword <<= shift_width; + + /* get the current bits from the target bit string */ + dest = hmc_bits + (ce_info->lsb / 8); + + memcpy(&dest_dword, dest, sizeof(dest_dword)); + + dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ + dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ + + /* put it all back */ + memcpy(dest, &dest_dword, sizeof(dest_dword)); +} + +/** + * i40e_write_qword - replace HMC context qword + * @hmc_bits: pointer to the HMC memory + * @ce_info: a description of the struct to be read from + * @src: the struct to be read from + **/ +static void i40e_write_qword(u8 *hmc_bits, + struct i40e_context_ele *ce_info, + u8 *src) +{ + u64 src_qword, mask; + u8 *from, *dest; + u16 shift_width; + __le64 dest_qword; + + /* copy from the next struct field */ + from = src + ce_info->offset; + + /* prepare the bits and mask */ + shift_width = ce_info->lsb % 8; + + /* if the field width is exactly 64 on an x86 machine, then the shift + * operation will not work because the SHL instructions count is masked + * to 6 bits so the shift will do nothing + */ + if (ce_info->width < 64) + mask = ((u64)1 << ce_info->width) - 1; + else + mask = ~(u64)0; + + /* don't swizzle the bits until after the mask because the mask bits + * will be in a different bit position on big endian machines + */ + src_qword = *(u64 *)from; + src_qword &= mask; + + /* shift to correct alignment */ + mask <<= shift_width; + src_qword <<= shift_width; + + /* get the current bits from the target bit string */ + dest = hmc_bits + (ce_info->lsb / 8); + + memcpy(&dest_qword, dest, sizeof(dest_qword)); + + dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ + dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ + + /* put it all back */ + memcpy(dest, &dest_qword, sizeof(dest_qword)); +} + +/** + * i40e_clear_hmc_context - zero out the HMC context bits + * @hw: the hardware struct + * @context_bytes: pointer to the context bit array (DMA memory) + * @hmc_type: the type of HMC resource + **/ +static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw, + u8 *context_bytes, + enum i40e_hmc_lan_rsrc_type hmc_type) +{ + /* clean the bit array */ + memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size); + + return 0; +} + +/** + * i40e_set_hmc_context - replace HMC context bits + * @context_bytes: pointer to the context bit array + * @ce_info: a description of the struct to be filled + * @dest: the struct to be filled + **/ +static i40e_status i40e_set_hmc_context(u8 *context_bytes, + struct i40e_context_ele *ce_info, + u8 *dest) +{ + int f; + + for (f = 0; ce_info[f].width != 0; f++) { + + /* we have to deal with each element of the HMC using the + * correct size so that we are correct regardless of the + * endianness of the machine + */ + switch (ce_info[f].size_of) { + case 1: + i40e_write_byte(context_bytes, &ce_info[f], dest); + break; + case 2: + i40e_write_word(context_bytes, &ce_info[f], dest); + break; + case 4: + i40e_write_dword(context_bytes, &ce_info[f], dest); + break; + case 8: + i40e_write_qword(context_bytes, &ce_info[f], dest); + break; + } + } + + return 0; +} + +/** + * i40e_hmc_get_object_va - retrieves an object's virtual address + * @hmc_info: pointer to i40e_hmc_info struct + * @object_base: pointer to u64 to get the va + * @rsrc_type: the hmc resource type + * @obj_idx: hmc object index + * + * This function retrieves the object's virtual address from the object + * base pointer. This function is used for LAN Queue contexts. + **/ +static +i40e_status i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info, + u8 **object_base, + enum i40e_hmc_lan_rsrc_type rsrc_type, + u32 obj_idx) +{ + u32 obj_offset_in_sd, obj_offset_in_pd; + i40e_status ret_code = 0; + struct i40e_hmc_sd_entry *sd_entry; + struct i40e_hmc_pd_entry *pd_entry; + u32 pd_idx, pd_lmt, rel_pd_idx; + u64 obj_offset_in_fpm; + u32 sd_idx, sd_lmt; + + if (NULL == hmc_info) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n"); + goto exit; + } + if (NULL == hmc_info->hmc_obj) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n"); + goto exit; + } + if (NULL == object_base) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_hmc_get_object_va: bad object_base ptr\n"); + goto exit; + } + if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) { + ret_code = I40E_ERR_BAD_PTR; + hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->signature\n"); + goto exit; + } + if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) { + hw_dbg(hw, "i40e_hmc_get_object_va: returns error %d\n", + ret_code); + ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; + goto exit; + } + /* find sd index and limit */ + I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1, + &sd_idx, &sd_lmt); + + sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; + obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base + + hmc_info->hmc_obj[rsrc_type].size * obj_idx; + + if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) { + I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1, + &pd_idx, &pd_lmt); + rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD; + pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx]; + obj_offset_in_pd = (u32)(obj_offset_in_fpm % + I40E_HMC_PAGED_BP_SIZE); + *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd; + } else { + obj_offset_in_sd = (u32)(obj_offset_in_fpm % + I40E_HMC_DIRECT_BP_SIZE); + *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd; + } +exit: + return ret_code; +} + +/** + * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue + * @hw: the hardware struct + * @queue: the queue we care about + **/ +i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue) +{ + i40e_status err; + u8 *context_bytes; + + err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, + I40E_HMC_LAN_TX, queue); + if (err < 0) + return err; + + return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX); +} + +/** + * i40e_set_lan_tx_queue_context - set the HMC context for the queue + * @hw: the hardware struct + * @queue: the queue we care about + * @s: the struct to be filled + **/ +i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_txq *s) +{ + i40e_status err; + u8 *context_bytes; + + err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, + I40E_HMC_LAN_TX, queue); + if (err < 0) + return err; + + return i40e_set_hmc_context(context_bytes, + i40e_hmc_txq_ce_info, (u8 *)s); +} + +/** + * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue + * @hw: the hardware struct + * @queue: the queue we care about + **/ +i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue) +{ + i40e_status err; + u8 *context_bytes; + + err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, + I40E_HMC_LAN_RX, queue); + if (err < 0) + return err; + + return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX); +} + +/** + * i40e_set_lan_rx_queue_context - set the HMC context for the queue + * @hw: the hardware struct + * @queue: the queue we care about + * @s: the struct to be filled + **/ +i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_rxq *s) +{ + i40e_status err; + u8 *context_bytes; + + err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, + I40E_HMC_LAN_RX, queue); + if (err < 0) + return err; + + return i40e_set_hmc_context(context_bytes, + i40e_hmc_rxq_ce_info, (u8 *)s); +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h new file mode 100644 index 000000000..e74128db5 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h @@ -0,0 +1,181 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_LAN_HMC_H_ +#define _I40E_LAN_HMC_H_ + +/* forward-declare the HW struct for the compiler */ +struct i40e_hw; + +/* HMC element context information */ + +/* Rx queue context data + * + * The sizes of the variables may be larger than needed due to crossing byte + * boundaries. If we do not have the width of the variable set to the correct + * size then we could end up shifting bits off the top of the variable when the + * variable is at the top of a byte and crosses over into the next byte. + */ +struct i40e_hmc_obj_rxq { + u16 head; + u16 cpuid; /* bigger than needed, see above for reason */ + u64 base; + u16 qlen; +#define I40E_RXQ_CTX_DBUFF_SHIFT 7 + u16 dbuff; /* bigger than needed, see above for reason */ +#define I40E_RXQ_CTX_HBUFF_SHIFT 6 + u16 hbuff; /* bigger than needed, see above for reason */ + u8 dtype; + u8 dsize; + u8 crcstrip; + u8 fc_ena; + u8 l2tsel; + u8 hsplit_0; + u8 hsplit_1; + u8 showiv; + u32 rxmax; /* bigger than needed, see above for reason */ + u8 tphrdesc_ena; + u8 tphwdesc_ena; + u8 tphdata_ena; + u8 tphhead_ena; + u16 lrxqthresh; /* bigger than needed, see above for reason */ + u8 prefena; /* NOTE: normally must be set to 1 at init */ +}; + +/* Tx queue context data +* +* The sizes of the variables may be larger than needed due to crossing byte +* boundaries. If we do not have the width of the variable set to the correct +* size then we could end up shifting bits off the top of the variable when the +* variable is at the top of a byte and crosses over into the next byte. +*/ +struct i40e_hmc_obj_txq { + u16 head; + u8 new_context; + u64 base; + u8 fc_ena; + u8 timesync_ena; + u8 fd_ena; + u8 alt_vlan_ena; + u16 thead_wb; + u8 cpuid; + u8 head_wb_ena; + u16 qlen; + u8 tphrdesc_ena; + u8 tphrpacket_ena; + u8 tphwdesc_ena; + u64 head_wb_addr; + u32 crc; + u16 rdylist; + u8 rdylist_act; +}; + +/* for hsplit_0 field of Rx HMC context */ +enum i40e_hmc_obj_rx_hsplit_0 { + I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8, +}; + +/* fcoe_cntx and fcoe_filt are for debugging purpose only */ +struct i40e_hmc_obj_fcoe_cntx { + u32 rsv[32]; +}; + +struct i40e_hmc_obj_fcoe_filt { + u32 rsv[8]; +}; + +/* Context sizes for LAN objects */ +enum i40e_hmc_lan_object_size { + I40E_HMC_LAN_OBJ_SZ_8 = 0x3, + I40E_HMC_LAN_OBJ_SZ_16 = 0x4, + I40E_HMC_LAN_OBJ_SZ_32 = 0x5, + I40E_HMC_LAN_OBJ_SZ_64 = 0x6, + I40E_HMC_LAN_OBJ_SZ_128 = 0x7, + I40E_HMC_LAN_OBJ_SZ_256 = 0x8, + I40E_HMC_LAN_OBJ_SZ_512 = 0x9, +}; + +#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512 +#define I40E_HMC_OBJ_SIZE_TXQ 128 +#define I40E_HMC_OBJ_SIZE_RXQ 32 +#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 64 +#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64 + +enum i40e_hmc_lan_rsrc_type { + I40E_HMC_LAN_FULL = 0, + I40E_HMC_LAN_TX = 1, + I40E_HMC_LAN_RX = 2, + I40E_HMC_FCOE_CTX = 3, + I40E_HMC_FCOE_FILT = 4, + I40E_HMC_LAN_MAX = 5 +}; + +enum i40e_hmc_model { + I40E_HMC_MODEL_DIRECT_PREFERRED = 0, + I40E_HMC_MODEL_DIRECT_ONLY = 1, + I40E_HMC_MODEL_PAGED_ONLY = 2, + I40E_HMC_MODEL_UNKNOWN, +}; + +struct i40e_hmc_lan_create_obj_info { + struct i40e_hmc_info *hmc_info; + u32 rsrc_type; + u32 start_idx; + u32 count; + enum i40e_sd_entry_type entry_type; + u64 direct_mode_sz; +}; + +struct i40e_hmc_lan_delete_obj_info { + struct i40e_hmc_info *hmc_info; + u32 rsrc_type; + u32 start_idx; + u32 count; +}; + +i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, + u32 rxq_num, u32 fcoe_cntx_num, + u32 fcoe_filt_num); +i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw, + enum i40e_hmc_model model); +i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw); + +i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue); +i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_txq *s); +i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue); +i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_rxq *s); + +#endif /* _I40E_LAN_HMC_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c new file mode 100644 index 000000000..5b5bea159 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -0,0 +1,10368 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +/* Local includes */ +#include "i40e.h" +#include "i40e_diag.h" +#ifdef CONFIG_I40E_VXLAN +#include +#endif + +const char i40e_driver_name[] = "i40e"; +static const char i40e_driver_string[] = + "Intel(R) Ethernet Connection XL710 Network Driver"; + +#define DRV_KERN "-k" + +#define DRV_VERSION_MAJOR 1 +#define DRV_VERSION_MINOR 3 +#define DRV_VERSION_BUILD 2 +#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ + __stringify(DRV_VERSION_MINOR) "." \ + __stringify(DRV_VERSION_BUILD) DRV_KERN +const char i40e_driver_version_str[] = DRV_VERSION; +static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; + +/* a bit of forward declarations */ +static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); +static void i40e_handle_reset_warning(struct i40e_pf *pf); +static int i40e_add_vsi(struct i40e_vsi *vsi); +static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); +static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); +static int i40e_setup_misc_vector(struct i40e_pf *pf); +static void i40e_determine_queue_usage(struct i40e_pf *pf); +static int i40e_setup_pf_filter_control(struct i40e_pf *pf); +static void i40e_fdir_sb_setup(struct i40e_pf *pf); +static int i40e_veb_get_bw_info(struct i40e_veb *veb); + +/* i40e_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id i40e_pci_tbl[] = { + {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, + /* required last entry */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, i40e_pci_tbl); + +#define I40E_MAX_VF_COUNT 128 +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/** + * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to fill out + * @size: size of memory requested + * @alignment: what to align the allocation to + **/ +int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem, + u64 size, u32 alignment) +{ + struct i40e_pf *pf = (struct i40e_pf *)hw->back; + + mem->size = ALIGN(size, alignment); + mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, + &mem->pa, GFP_KERNEL); + if (!mem->va) + return -ENOMEM; + + return 0; +} + +/** + * i40e_free_dma_mem_d - OS specific memory free for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to free + **/ +int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) +{ + struct i40e_pf *pf = (struct i40e_pf *)hw->back; + + dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); + mem->va = NULL; + mem->pa = 0; + mem->size = 0; + + return 0; +} + +/** + * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to fill out + * @size: size of memory requested + **/ +int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem, + u32 size) +{ + mem->size = size; + mem->va = kzalloc(size, GFP_KERNEL); + + if (!mem->va) + return -ENOMEM; + + return 0; +} + +/** + * i40e_free_virt_mem_d - OS specific memory free for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to free + **/ +int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) +{ + /* it's ok to kfree a NULL pointer */ + kfree(mem->va); + mem->va = NULL; + mem->size = 0; + + return 0; +} + +/** + * i40e_get_lump - find a lump of free generic resource + * @pf: board private structure + * @pile: the pile of resource to search + * @needed: the number of items needed + * @id: an owner id to stick on the items assigned + * + * Returns the base item index of the lump, or negative for error + * + * The search_hint trick and lack of advanced fit-finding only work + * because we're highly likely to have all the same size lump requests. + * Linear search time and any fragmentation should be minimal. + **/ +static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, + u16 needed, u16 id) +{ + int ret = -ENOMEM; + int i, j; + + if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) { + dev_info(&pf->pdev->dev, + "param err: pile=%p needed=%d id=0x%04x\n", + pile, needed, id); + return -EINVAL; + } + + /* start the linear search with an imperfect hint */ + i = pile->search_hint; + while (i < pile->num_entries) { + /* skip already allocated entries */ + if (pile->list[i] & I40E_PILE_VALID_BIT) { + i++; + continue; + } + + /* do we have enough in this lump? */ + for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { + if (pile->list[i+j] & I40E_PILE_VALID_BIT) + break; + } + + if (j == needed) { + /* there was enough, so assign it to the requestor */ + for (j = 0; j < needed; j++) + pile->list[i+j] = id | I40E_PILE_VALID_BIT; + ret = i; + pile->search_hint = i + j; + break; + } else { + /* not enough, so skip over it and continue looking */ + i += j; + } + } + + return ret; +} + +/** + * i40e_put_lump - return a lump of generic resource + * @pile: the pile of resource to search + * @index: the base item index + * @id: the owner id of the items assigned + * + * Returns the count of items in the lump + **/ +static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) +{ + int valid_id = (id | I40E_PILE_VALID_BIT); + int count = 0; + int i; + + if (!pile || index >= pile->num_entries) + return -EINVAL; + + for (i = index; + i < pile->num_entries && pile->list[i] == valid_id; + i++) { + pile->list[i] = 0; + count++; + } + + if (count && index < pile->search_hint) + pile->search_hint = index; + + return count; +} + +/** + * i40e_find_vsi_from_id - searches for the vsi with the given id + * @pf - the pf structure to search for the vsi + * @id - id of the vsi it is searching for + **/ +struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) +{ + int i; + + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i] && (pf->vsi[i]->id == id)) + return pf->vsi[i]; + + return NULL; +} + +/** + * i40e_service_event_schedule - Schedule the service task to wake up + * @pf: board private structure + * + * If not already scheduled, this puts the task into the work queue + **/ +static void i40e_service_event_schedule(struct i40e_pf *pf) +{ + if (!test_bit(__I40E_DOWN, &pf->state) && + !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) && + !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) + schedule_work(&pf->service_task); +} + +/** + * i40e_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + * + * If any port has noticed a Tx timeout, it is likely that the whole + * device is munged, not just the one netdev port, so go for the full + * reset. + **/ +#ifdef I40E_FCOE +void i40e_tx_timeout(struct net_device *netdev) +#else +static void i40e_tx_timeout(struct net_device *netdev) +#endif +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + + pf->tx_timeout_count++; + + if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) + pf->tx_timeout_recovery_level = 1; + pf->tx_timeout_last_recovery = jiffies; + netdev_info(netdev, "tx_timeout recovery level %d\n", + pf->tx_timeout_recovery_level); + + switch (pf->tx_timeout_recovery_level) { + case 0: + /* disable and re-enable queues for the VSI */ + if (in_interrupt()) { + set_bit(__I40E_REINIT_REQUESTED, &pf->state); + set_bit(__I40E_REINIT_REQUESTED, &vsi->state); + } else { + i40e_vsi_reinit_locked(vsi); + } + break; + case 1: + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + break; + case 2: + set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); + break; + case 3: + set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); + break; + default: + netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); + set_bit(__I40E_DOWN_REQUESTED, &pf->state); + set_bit(__I40E_DOWN_REQUESTED, &vsi->state); + break; + } + i40e_service_event_schedule(pf); + pf->tx_timeout_recovery_level++; +} + +/** + * i40e_release_rx_desc - Store the new tail and head values + * @rx_ring: ring to bump + * @val: new head index + **/ +static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); +} + +/** + * i40e_get_vsi_stats_struct - Get System Network Statistics + * @vsi: the VSI we care about + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the service task. + **/ +struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) +{ + return &vsi->net_stats; +} + +/** + * i40e_get_netdev_stats_struct - Get statistics for netdev interface + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the service task. + **/ +#ifdef I40E_FCOE +struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( + struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#else +static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( + struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#endif +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_ring *tx_ring, *rx_ring; + struct i40e_vsi *vsi = np->vsi; + struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); + int i; + + if (test_bit(__I40E_DOWN, &vsi->state)) + return stats; + + if (!vsi->tx_rings) + return stats; + + rcu_read_lock(); + for (i = 0; i < vsi->num_queue_pairs; i++) { + u64 bytes, packets; + unsigned int start; + + tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); + if (!tx_ring) + continue; + + do { + start = u64_stats_fetch_begin_irq(&tx_ring->syncp); + packets = tx_ring->stats.packets; + bytes = tx_ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); + + stats->tx_packets += packets; + stats->tx_bytes += bytes; + rx_ring = &tx_ring[1]; + + do { + start = u64_stats_fetch_begin_irq(&rx_ring->syncp); + packets = rx_ring->stats.packets; + bytes = rx_ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); + + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + rcu_read_unlock(); + + /* following stats updated by i40e_watchdog_subtask() */ + stats->multicast = vsi_stats->multicast; + stats->tx_errors = vsi_stats->tx_errors; + stats->tx_dropped = vsi_stats->tx_dropped; + stats->rx_errors = vsi_stats->rx_errors; + stats->rx_crc_errors = vsi_stats->rx_crc_errors; + stats->rx_length_errors = vsi_stats->rx_length_errors; + + return stats; +} + +/** + * i40e_vsi_reset_stats - Resets all stats of the given vsi + * @vsi: the VSI to have its stats reset + **/ +void i40e_vsi_reset_stats(struct i40e_vsi *vsi) +{ + struct rtnl_link_stats64 *ns; + int i; + + if (!vsi) + return; + + ns = i40e_get_vsi_stats_struct(vsi); + memset(ns, 0, sizeof(*ns)); + memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); + memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); + memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); + if (vsi->rx_rings && vsi->rx_rings[0]) { + for (i = 0; i < vsi->num_queue_pairs; i++) { + memset(&vsi->rx_rings[i]->stats, 0 , + sizeof(vsi->rx_rings[i]->stats)); + memset(&vsi->rx_rings[i]->rx_stats, 0 , + sizeof(vsi->rx_rings[i]->rx_stats)); + memset(&vsi->tx_rings[i]->stats, 0 , + sizeof(vsi->tx_rings[i]->stats)); + memset(&vsi->tx_rings[i]->tx_stats, 0, + sizeof(vsi->tx_rings[i]->tx_stats)); + } + } + vsi->stat_offsets_loaded = false; +} + +/** + * i40e_pf_reset_stats - Reset all of the stats for the given PF + * @pf: the PF to be reset + **/ +void i40e_pf_reset_stats(struct i40e_pf *pf) +{ + int i; + + memset(&pf->stats, 0, sizeof(pf->stats)); + memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); + pf->stat_offsets_loaded = false; + + for (i = 0; i < I40E_MAX_VEB; i++) { + if (pf->veb[i]) { + memset(&pf->veb[i]->stats, 0, + sizeof(pf->veb[i]->stats)); + memset(&pf->veb[i]->stats_offsets, 0, + sizeof(pf->veb[i]->stats_offsets)); + pf->veb[i]->stat_offsets_loaded = false; + } + } +} + +/** + * i40e_stat_update48 - read and update a 48 bit stat from the chip + * @hw: ptr to the hardware info + * @hireg: the high 32 bit reg to read + * @loreg: the low 32 bit reg to read + * @offset_loaded: has the initial offset been loaded yet + * @offset: ptr to current offset value + * @stat: ptr to the stat + * + * Since the device stats are not reset at PFReset, they likely will not + * be zeroed when the driver starts. We'll save the first values read + * and use them as offsets to be subtracted from the raw values in order + * to report stats that count from zero. In the process, we also manage + * the potential roll-over. + **/ +static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, + bool offset_loaded, u64 *offset, u64 *stat) +{ + u64 new_data; + + if (hw->device_id == I40E_DEV_ID_QEMU) { + new_data = rd32(hw, loreg); + new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; + } else { + new_data = rd64(hw, loreg); + } + if (!offset_loaded) + *offset = new_data; + if (likely(new_data >= *offset)) + *stat = new_data - *offset; + else + *stat = (new_data + ((u64)1 << 48)) - *offset; + *stat &= 0xFFFFFFFFFFFFULL; +} + +/** + * i40e_stat_update32 - read and update a 32 bit stat from the chip + * @hw: ptr to the hardware info + * @reg: the hw reg to read + * @offset_loaded: has the initial offset been loaded yet + * @offset: ptr to current offset value + * @stat: ptr to the stat + **/ +static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, + bool offset_loaded, u64 *offset, u64 *stat) +{ + u32 new_data; + + new_data = rd32(hw, reg); + if (!offset_loaded) + *offset = new_data; + if (likely(new_data >= *offset)) + *stat = (u32)(new_data - *offset); + else + *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); +} + +/** + * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. + * @vsi: the VSI to be updated + **/ +void i40e_update_eth_stats(struct i40e_vsi *vsi) +{ + int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_eth_stats *oes; + struct i40e_eth_stats *es; /* device's eth stats */ + + es = &vsi->eth_stats; + oes = &vsi->eth_stats_offsets; + + /* Gather up the stats that the hw collects */ + i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), + vsi->stat_offsets_loaded, + &oes->tx_errors, &es->tx_errors); + i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), + vsi->stat_offsets_loaded, + &oes->rx_discards, &es->rx_discards); + i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), + vsi->stat_offsets_loaded, + &oes->rx_unknown_protocol, &es->rx_unknown_protocol); + i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), + vsi->stat_offsets_loaded, + &oes->tx_errors, &es->tx_errors); + + i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), + I40E_GLV_GORCL(stat_idx), + vsi->stat_offsets_loaded, + &oes->rx_bytes, &es->rx_bytes); + i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), + I40E_GLV_UPRCL(stat_idx), + vsi->stat_offsets_loaded, + &oes->rx_unicast, &es->rx_unicast); + i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), + I40E_GLV_MPRCL(stat_idx), + vsi->stat_offsets_loaded, + &oes->rx_multicast, &es->rx_multicast); + i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), + I40E_GLV_BPRCL(stat_idx), + vsi->stat_offsets_loaded, + &oes->rx_broadcast, &es->rx_broadcast); + + i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), + I40E_GLV_GOTCL(stat_idx), + vsi->stat_offsets_loaded, + &oes->tx_bytes, &es->tx_bytes); + i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), + I40E_GLV_UPTCL(stat_idx), + vsi->stat_offsets_loaded, + &oes->tx_unicast, &es->tx_unicast); + i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), + I40E_GLV_MPTCL(stat_idx), + vsi->stat_offsets_loaded, + &oes->tx_multicast, &es->tx_multicast); + i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), + I40E_GLV_BPTCL(stat_idx), + vsi->stat_offsets_loaded, + &oes->tx_broadcast, &es->tx_broadcast); + vsi->stat_offsets_loaded = true; +} + +/** + * i40e_update_veb_stats - Update Switch component statistics + * @veb: the VEB being updated + **/ +static void i40e_update_veb_stats(struct i40e_veb *veb) +{ + struct i40e_pf *pf = veb->pf; + struct i40e_hw *hw = &pf->hw; + struct i40e_eth_stats *oes; + struct i40e_eth_stats *es; /* device's eth stats */ + int idx = 0; + + idx = veb->stats_idx; + es = &veb->stats; + oes = &veb->stats_offsets; + + /* Gather up the stats that the hw collects */ + i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), + veb->stat_offsets_loaded, + &oes->tx_discards, &es->tx_discards); + if (hw->revision_id > 0) + i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), + veb->stat_offsets_loaded, + &oes->rx_unknown_protocol, + &es->rx_unknown_protocol); + i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), + veb->stat_offsets_loaded, + &oes->rx_bytes, &es->rx_bytes); + i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx), + veb->stat_offsets_loaded, + &oes->rx_unicast, &es->rx_unicast); + i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx), + veb->stat_offsets_loaded, + &oes->rx_multicast, &es->rx_multicast); + i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx), + veb->stat_offsets_loaded, + &oes->rx_broadcast, &es->rx_broadcast); + + i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx), + veb->stat_offsets_loaded, + &oes->tx_bytes, &es->tx_bytes); + i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx), + veb->stat_offsets_loaded, + &oes->tx_unicast, &es->tx_unicast); + i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx), + veb->stat_offsets_loaded, + &oes->tx_multicast, &es->tx_multicast); + i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx), + veb->stat_offsets_loaded, + &oes->tx_broadcast, &es->tx_broadcast); + veb->stat_offsets_loaded = true; +} + +#ifdef I40E_FCOE +/** + * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters. + * @vsi: the VSI that is capable of doing FCoE + **/ +static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_fcoe_stats *ofs; + struct i40e_fcoe_stats *fs; /* device's eth stats */ + int idx; + + if (vsi->type != I40E_VSI_FCOE) + return; + + idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET; + fs = &vsi->fcoe_stats; + ofs = &vsi->fcoe_stats_offsets; + + i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx), + vsi->fcoe_stat_offsets_loaded, + &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets); + i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx), + vsi->fcoe_stat_offsets_loaded, + &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords); + i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx), + vsi->fcoe_stat_offsets_loaded, + &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped); + i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx), + vsi->fcoe_stat_offsets_loaded, + &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets); + i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx), + vsi->fcoe_stat_offsets_loaded, + &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords); + i40e_stat_update32(hw, I40E_GL_FCOECRC(idx), + vsi->fcoe_stat_offsets_loaded, + &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc); + i40e_stat_update32(hw, I40E_GL_FCOELAST(idx), + vsi->fcoe_stat_offsets_loaded, + &ofs->fcoe_last_error, &fs->fcoe_last_error); + i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx), + vsi->fcoe_stat_offsets_loaded, + &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count); + + vsi->fcoe_stat_offsets_loaded = true; +} + +#endif +/** + * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode + * @pf: the corresponding PF + * + * Update the Rx XOFF counter (PAUSE frames) in link flow control mode + **/ +static void i40e_update_link_xoff_rx(struct i40e_pf *pf) +{ + struct i40e_hw_port_stats *osd = &pf->stats_offsets; + struct i40e_hw_port_stats *nsd = &pf->stats; + struct i40e_hw *hw = &pf->hw; + u64 xoff = 0; + u16 i, v; + + if ((hw->fc.current_mode != I40E_FC_FULL) && + (hw->fc.current_mode != I40E_FC_RX_PAUSE)) + return; + + xoff = nsd->link_xoff_rx; + i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), + pf->stat_offsets_loaded, + &osd->link_xoff_rx, &nsd->link_xoff_rx); + + /* No new LFC xoff rx */ + if (!(nsd->link_xoff_rx - xoff)) + return; + + /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */ + for (v = 0; v < pf->num_alloc_vsi; v++) { + struct i40e_vsi *vsi = pf->vsi[v]; + + if (!vsi || !vsi->tx_rings[0]) + continue; + + for (i = 0; i < vsi->num_queue_pairs; i++) { + struct i40e_ring *ring = vsi->tx_rings[i]; + clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); + } + } +} + +/** + * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode + * @pf: the corresponding PF + * + * Update the Rx XOFF counter (PAUSE frames) in PFC mode + **/ +static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) +{ + struct i40e_hw_port_stats *osd = &pf->stats_offsets; + struct i40e_hw_port_stats *nsd = &pf->stats; + bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false}; + struct i40e_dcbx_config *dcb_cfg; + struct i40e_hw *hw = &pf->hw; + u16 i, v; + u8 tc; + + dcb_cfg = &hw->local_dcbx_config; + + /* See if DCB enabled with PFC TC */ + if (!(pf->flags & I40E_FLAG_DCB_ENABLED) || + !(dcb_cfg->pfc.pfcenable)) { + i40e_update_link_xoff_rx(pf); + return; + } + + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + u64 prio_xoff = nsd->priority_xoff_rx[i]; + i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), + pf->stat_offsets_loaded, + &osd->priority_xoff_rx[i], + &nsd->priority_xoff_rx[i]); + + /* No new PFC xoff rx */ + if (!(nsd->priority_xoff_rx[i] - prio_xoff)) + continue; + /* Get the TC for given priority */ + tc = dcb_cfg->etscfg.prioritytable[i]; + xoff[tc] = true; + } + + /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */ + for (v = 0; v < pf->num_alloc_vsi; v++) { + struct i40e_vsi *vsi = pf->vsi[v]; + + if (!vsi || !vsi->tx_rings[0]) + continue; + + for (i = 0; i < vsi->num_queue_pairs; i++) { + struct i40e_ring *ring = vsi->tx_rings[i]; + + tc = ring->dcb_tc; + if (xoff[tc]) + clear_bit(__I40E_HANG_CHECK_ARMED, + &ring->state); + } + } +} + +/** + * i40e_update_vsi_stats - Update the vsi statistics counters. + * @vsi: the VSI to be updated + * + * There are a few instances where we store the same stat in a + * couple of different structs. This is partly because we have + * the netdev stats that need to be filled out, which is slightly + * different from the "eth_stats" defined by the chip and used in + * VF communications. We sort it out here. + **/ +static void i40e_update_vsi_stats(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + struct rtnl_link_stats64 *ons; + struct rtnl_link_stats64 *ns; /* netdev stats */ + struct i40e_eth_stats *oes; + struct i40e_eth_stats *es; /* device's eth stats */ + u32 tx_restart, tx_busy; + struct i40e_ring *p; + u32 rx_page, rx_buf; + u64 bytes, packets; + unsigned int start; + u64 rx_p, rx_b; + u64 tx_p, tx_b; + u16 q; + + if (test_bit(__I40E_DOWN, &vsi->state) || + test_bit(__I40E_CONFIG_BUSY, &pf->state)) + return; + + ns = i40e_get_vsi_stats_struct(vsi); + ons = &vsi->net_stats_offsets; + es = &vsi->eth_stats; + oes = &vsi->eth_stats_offsets; + + /* Gather up the netdev and vsi stats that the driver collects + * on the fly during packet processing + */ + rx_b = rx_p = 0; + tx_b = tx_p = 0; + tx_restart = tx_busy = 0; + rx_page = 0; + rx_buf = 0; + rcu_read_lock(); + for (q = 0; q < vsi->num_queue_pairs; q++) { + /* locate Tx ring */ + p = ACCESS_ONCE(vsi->tx_rings[q]); + + do { + start = u64_stats_fetch_begin_irq(&p->syncp); + packets = p->stats.packets; + bytes = p->stats.bytes; + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + tx_b += bytes; + tx_p += packets; + tx_restart += p->tx_stats.restart_queue; + tx_busy += p->tx_stats.tx_busy; + + /* Rx queue is part of the same block as Tx queue */ + p = &p[1]; + do { + start = u64_stats_fetch_begin_irq(&p->syncp); + packets = p->stats.packets; + bytes = p->stats.bytes; + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + rx_b += bytes; + rx_p += packets; + rx_buf += p->rx_stats.alloc_buff_failed; + rx_page += p->rx_stats.alloc_page_failed; + } + rcu_read_unlock(); + vsi->tx_restart = tx_restart; + vsi->tx_busy = tx_busy; + vsi->rx_page_failed = rx_page; + vsi->rx_buf_failed = rx_buf; + + ns->rx_packets = rx_p; + ns->rx_bytes = rx_b; + ns->tx_packets = tx_p; + ns->tx_bytes = tx_b; + + /* update netdev stats from eth stats */ + i40e_update_eth_stats(vsi); + ons->tx_errors = oes->tx_errors; + ns->tx_errors = es->tx_errors; + ons->multicast = oes->rx_multicast; + ns->multicast = es->rx_multicast; + ons->rx_dropped = oes->rx_discards; + ns->rx_dropped = es->rx_discards; + ons->tx_dropped = oes->tx_discards; + ns->tx_dropped = es->tx_discards; + + /* pull in a couple PF stats if this is the main vsi */ + if (vsi == pf->vsi[pf->lan_vsi]) { + ns->rx_crc_errors = pf->stats.crc_errors; + ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; + ns->rx_length_errors = pf->stats.rx_length_errors; + } +} + +/** + * i40e_update_pf_stats - Update the PF statistics counters. + * @pf: the PF to be updated + **/ +static void i40e_update_pf_stats(struct i40e_pf *pf) +{ + struct i40e_hw_port_stats *osd = &pf->stats_offsets; + struct i40e_hw_port_stats *nsd = &pf->stats; + struct i40e_hw *hw = &pf->hw; + u32 val; + int i; + + i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), + I40E_GLPRT_GORCL(hw->port), + pf->stat_offsets_loaded, + &osd->eth.rx_bytes, &nsd->eth.rx_bytes); + i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), + I40E_GLPRT_GOTCL(hw->port), + pf->stat_offsets_loaded, + &osd->eth.tx_bytes, &nsd->eth.tx_bytes); + i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), + pf->stat_offsets_loaded, + &osd->eth.rx_discards, + &nsd->eth.rx_discards); + i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), + I40E_GLPRT_UPRCL(hw->port), + pf->stat_offsets_loaded, + &osd->eth.rx_unicast, + &nsd->eth.rx_unicast); + i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), + I40E_GLPRT_MPRCL(hw->port), + pf->stat_offsets_loaded, + &osd->eth.rx_multicast, + &nsd->eth.rx_multicast); + i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), + I40E_GLPRT_BPRCL(hw->port), + pf->stat_offsets_loaded, + &osd->eth.rx_broadcast, + &nsd->eth.rx_broadcast); + i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), + I40E_GLPRT_UPTCL(hw->port), + pf->stat_offsets_loaded, + &osd->eth.tx_unicast, + &nsd->eth.tx_unicast); + i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), + I40E_GLPRT_MPTCL(hw->port), + pf->stat_offsets_loaded, + &osd->eth.tx_multicast, + &nsd->eth.tx_multicast); + i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), + I40E_GLPRT_BPTCL(hw->port), + pf->stat_offsets_loaded, + &osd->eth.tx_broadcast, + &nsd->eth.tx_broadcast); + + i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), + pf->stat_offsets_loaded, + &osd->tx_dropped_link_down, + &nsd->tx_dropped_link_down); + + i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), + pf->stat_offsets_loaded, + &osd->crc_errors, &nsd->crc_errors); + + i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), + pf->stat_offsets_loaded, + &osd->illegal_bytes, &nsd->illegal_bytes); + + i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), + pf->stat_offsets_loaded, + &osd->mac_local_faults, + &nsd->mac_local_faults); + i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), + pf->stat_offsets_loaded, + &osd->mac_remote_faults, + &nsd->mac_remote_faults); + + i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), + pf->stat_offsets_loaded, + &osd->rx_length_errors, + &nsd->rx_length_errors); + + i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), + pf->stat_offsets_loaded, + &osd->link_xon_rx, &nsd->link_xon_rx); + i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), + pf->stat_offsets_loaded, + &osd->link_xon_tx, &nsd->link_xon_tx); + i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */ + i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), + pf->stat_offsets_loaded, + &osd->link_xoff_tx, &nsd->link_xoff_tx); + + for (i = 0; i < 8; i++) { + i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), + pf->stat_offsets_loaded, + &osd->priority_xon_rx[i], + &nsd->priority_xon_rx[i]); + i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), + pf->stat_offsets_loaded, + &osd->priority_xon_tx[i], + &nsd->priority_xon_tx[i]); + i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), + pf->stat_offsets_loaded, + &osd->priority_xoff_tx[i], + &nsd->priority_xoff_tx[i]); + i40e_stat_update32(hw, + I40E_GLPRT_RXON2OFFCNT(hw->port, i), + pf->stat_offsets_loaded, + &osd->priority_xon_2_xoff[i], + &nsd->priority_xon_2_xoff[i]); + } + + i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), + I40E_GLPRT_PRC64L(hw->port), + pf->stat_offsets_loaded, + &osd->rx_size_64, &nsd->rx_size_64); + i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), + I40E_GLPRT_PRC127L(hw->port), + pf->stat_offsets_loaded, + &osd->rx_size_127, &nsd->rx_size_127); + i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), + I40E_GLPRT_PRC255L(hw->port), + pf->stat_offsets_loaded, + &osd->rx_size_255, &nsd->rx_size_255); + i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), + I40E_GLPRT_PRC511L(hw->port), + pf->stat_offsets_loaded, + &osd->rx_size_511, &nsd->rx_size_511); + i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), + I40E_GLPRT_PRC1023L(hw->port), + pf->stat_offsets_loaded, + &osd->rx_size_1023, &nsd->rx_size_1023); + i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), + I40E_GLPRT_PRC1522L(hw->port), + pf->stat_offsets_loaded, + &osd->rx_size_1522, &nsd->rx_size_1522); + i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), + I40E_GLPRT_PRC9522L(hw->port), + pf->stat_offsets_loaded, + &osd->rx_size_big, &nsd->rx_size_big); + + i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), + I40E_GLPRT_PTC64L(hw->port), + pf->stat_offsets_loaded, + &osd->tx_size_64, &nsd->tx_size_64); + i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), + I40E_GLPRT_PTC127L(hw->port), + pf->stat_offsets_loaded, + &osd->tx_size_127, &nsd->tx_size_127); + i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), + I40E_GLPRT_PTC255L(hw->port), + pf->stat_offsets_loaded, + &osd->tx_size_255, &nsd->tx_size_255); + i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), + I40E_GLPRT_PTC511L(hw->port), + pf->stat_offsets_loaded, + &osd->tx_size_511, &nsd->tx_size_511); + i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), + I40E_GLPRT_PTC1023L(hw->port), + pf->stat_offsets_loaded, + &osd->tx_size_1023, &nsd->tx_size_1023); + i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), + I40E_GLPRT_PTC1522L(hw->port), + pf->stat_offsets_loaded, + &osd->tx_size_1522, &nsd->tx_size_1522); + i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), + I40E_GLPRT_PTC9522L(hw->port), + pf->stat_offsets_loaded, + &osd->tx_size_big, &nsd->tx_size_big); + + i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), + pf->stat_offsets_loaded, + &osd->rx_undersize, &nsd->rx_undersize); + i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), + pf->stat_offsets_loaded, + &osd->rx_fragments, &nsd->rx_fragments); + i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), + pf->stat_offsets_loaded, + &osd->rx_oversize, &nsd->rx_oversize); + i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), + pf->stat_offsets_loaded, + &osd->rx_jabber, &nsd->rx_jabber); + + /* FDIR stats */ + i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx), + pf->stat_offsets_loaded, + &osd->fd_atr_match, &nsd->fd_atr_match); + i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx), + pf->stat_offsets_loaded, + &osd->fd_sb_match, &nsd->fd_sb_match); + + val = rd32(hw, I40E_PRTPM_EEE_STAT); + nsd->tx_lpi_status = + (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> + I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; + nsd->rx_lpi_status = + (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> + I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; + i40e_stat_update32(hw, I40E_PRTPM_TLPIC, + pf->stat_offsets_loaded, + &osd->tx_lpi_count, &nsd->tx_lpi_count); + i40e_stat_update32(hw, I40E_PRTPM_RLPIC, + pf->stat_offsets_loaded, + &osd->rx_lpi_count, &nsd->rx_lpi_count); + + pf->stat_offsets_loaded = true; +} + +/** + * i40e_update_stats - Update the various statistics counters. + * @vsi: the VSI to be updated + * + * Update the various stats for this VSI and its related entities. + **/ +void i40e_update_stats(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + + if (vsi == pf->vsi[pf->lan_vsi]) + i40e_update_pf_stats(pf); + + i40e_update_vsi_stats(vsi); +#ifdef I40E_FCOE + i40e_update_fcoe_stats(vsi); +#endif +} + +/** + * i40e_find_filter - Search VSI filter list for specific mac/vlan filter + * @vsi: the VSI to be searched + * @macaddr: the MAC address + * @vlan: the vlan + * @is_vf: make sure its a VF filter, else doesn't matter + * @is_netdev: make sure its a netdev filter, else doesn't matter + * + * Returns ptr to the filter object or NULL + **/ +static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, + u8 *macaddr, s16 vlan, + bool is_vf, bool is_netdev) +{ + struct i40e_mac_filter *f; + + if (!vsi || !macaddr) + return NULL; + + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if ((ether_addr_equal(macaddr, f->macaddr)) && + (vlan == f->vlan) && + (!is_vf || f->is_vf) && + (!is_netdev || f->is_netdev)) + return f; + } + return NULL; +} + +/** + * i40e_find_mac - Find a mac addr in the macvlan filters list + * @vsi: the VSI to be searched + * @macaddr: the MAC address we are searching for + * @is_vf: make sure its a VF filter, else doesn't matter + * @is_netdev: make sure its a netdev filter, else doesn't matter + * + * Returns the first filter with the provided MAC address or NULL if + * MAC address was not found + **/ +struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, + bool is_vf, bool is_netdev) +{ + struct i40e_mac_filter *f; + + if (!vsi || !macaddr) + return NULL; + + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if ((ether_addr_equal(macaddr, f->macaddr)) && + (!is_vf || f->is_vf) && + (!is_netdev || f->is_netdev)) + return f; + } + return NULL; +} + +/** + * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode + * @vsi: the VSI to be searched + * + * Returns true if VSI is in vlan mode or false otherwise + **/ +bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f; + + /* Only -1 for all the filters denotes not in vlan mode + * so we have to go through all the list in order to make sure + */ + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if (f->vlan >= 0) + return true; + } + + return false; +} + +/** + * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans + * @vsi: the VSI to be searched + * @macaddr: the mac address to be filtered + * @is_vf: true if it is a VF + * @is_netdev: true if it is a netdev + * + * Goes through all the macvlan filters and adds a + * macvlan filter for each unique vlan that already exists + * + * Returns first filter found on success, else NULL + **/ +struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, + bool is_vf, bool is_netdev) +{ + struct i40e_mac_filter *f; + + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if (!i40e_find_filter(vsi, macaddr, f->vlan, + is_vf, is_netdev)) { + if (!i40e_add_filter(vsi, macaddr, f->vlan, + is_vf, is_netdev)) + return NULL; + } + } + + return list_first_entry_or_null(&vsi->mac_filter_list, + struct i40e_mac_filter, list); +} + +/** + * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM + * @vsi: the PF Main VSI - inappropriate for any other VSI + * @macaddr: the MAC address + * + * Some older firmware configurations set up a default promiscuous VLAN + * filter that needs to be removed. + **/ +static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) +{ + struct i40e_aqc_remove_macvlan_element_data element; + struct i40e_pf *pf = vsi->back; + i40e_status aq_ret; + + /* Only appropriate for the PF main VSI */ + if (vsi->type != I40E_VSI_MAIN) + return -EINVAL; + + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, macaddr); + element.vlan_tag = 0; + element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | + I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; + aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); + if (aq_ret) + return -ENOENT; + + return 0; +} + +/** + * i40e_add_filter - Add a mac/vlan filter to the VSI + * @vsi: the VSI to be searched + * @macaddr: the MAC address + * @vlan: the vlan + * @is_vf: make sure its a VF filter, else doesn't matter + * @is_netdev: make sure its a netdev filter, else doesn't matter + * + * Returns ptr to the filter object or NULL when no memory available. + **/ +struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, + u8 *macaddr, s16 vlan, + bool is_vf, bool is_netdev) +{ + struct i40e_mac_filter *f; + + if (!vsi || !macaddr) + return NULL; + + f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto add_filter_out; + + ether_addr_copy(f->macaddr, macaddr); + f->vlan = vlan; + f->changed = true; + + INIT_LIST_HEAD(&f->list); + list_add(&f->list, &vsi->mac_filter_list); + } + + /* increment counter and add a new flag if needed */ + if (is_vf) { + if (!f->is_vf) { + f->is_vf = true; + f->counter++; + } + } else if (is_netdev) { + if (!f->is_netdev) { + f->is_netdev = true; + f->counter++; + } + } else { + f->counter++; + } + + /* changed tells sync_filters_subtask to + * push the filter down to the firmware + */ + if (f->changed) { + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + vsi->back->flags |= I40E_FLAG_FILTER_SYNC; + } + +add_filter_out: + return f; +} + +/** + * i40e_del_filter - Remove a mac/vlan filter from the VSI + * @vsi: the VSI to be searched + * @macaddr: the MAC address + * @vlan: the vlan + * @is_vf: make sure it's a VF filter, else doesn't matter + * @is_netdev: make sure it's a netdev filter, else doesn't matter + **/ +void i40e_del_filter(struct i40e_vsi *vsi, + u8 *macaddr, s16 vlan, + bool is_vf, bool is_netdev) +{ + struct i40e_mac_filter *f; + + if (!vsi || !macaddr) + return; + + f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); + if (!f || f->counter == 0) + return; + + if (is_vf) { + if (f->is_vf) { + f->is_vf = false; + f->counter--; + } + } else if (is_netdev) { + if (f->is_netdev) { + f->is_netdev = false; + f->counter--; + } + } else { + /* make sure we don't remove a filter in use by VF or netdev */ + int min_f = 0; + min_f += (f->is_vf ? 1 : 0); + min_f += (f->is_netdev ? 1 : 0); + + if (f->counter > min_f) + f->counter--; + } + + /* counter == 0 tells sync_filters_subtask to + * remove the filter from the firmware's list + */ + if (f->counter == 0) { + f->changed = true; + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + vsi->back->flags |= I40E_FLAG_FILTER_SYNC; + } +} + +/** + * i40e_set_mac - NDO callback to set mac address + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +#ifdef I40E_FCOE +int i40e_set_mac(struct net_device *netdev, void *p) +#else +static int i40e_set_mac(struct net_device *netdev, void *p) +#endif +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + struct sockaddr *addr = p; + struct i40e_mac_filter *f; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { + netdev_info(netdev, "already using mac address %pM\n", + addr->sa_data); + return 0; + } + + if (test_bit(__I40E_DOWN, &vsi->back->state) || + test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(hw->mac.addr, addr->sa_data)) + netdev_info(netdev, "returning to hw mac address %pM\n", + hw->mac.addr); + else + netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); + + if (vsi->type == I40E_VSI_MAIN) { + i40e_status ret; + ret = i40e_aq_mac_address_write(&vsi->back->hw, + I40E_AQC_WRITE_TYPE_LAA_WOL, + addr->sa_data, NULL); + if (ret) { + netdev_info(netdev, + "Addr change for Main VSI failed: %d\n", + ret); + return -EADDRNOTAVAIL; + } + } + + if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) { + struct i40e_aqc_remove_macvlan_element_data element; + + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, netdev->dev_addr); + element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); + } else { + i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, + false, false); + } + + if (ether_addr_equal(addr->sa_data, hw->mac.addr)) { + struct i40e_aqc_add_macvlan_element_data element; + + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, hw->mac.addr); + element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); + i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); + } else { + f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, + false, false); + if (f) + f->is_laa = true; + } + + i40e_sync_vsi_filters(vsi); + ether_addr_copy(netdev->dev_addr, addr->sa_data); + + return 0; +} + +/** + * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc + * @vsi: the VSI being setup + * @ctxt: VSI context structure + * @enabled_tc: Enabled TCs bitmap + * @is_add: True if called before Add VSI + * + * Setup VSI queue mapping for enabled traffic classes. + **/ +#ifdef I40E_FCOE +void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, + struct i40e_vsi_context *ctxt, + u8 enabled_tc, + bool is_add) +#else +static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, + struct i40e_vsi_context *ctxt, + u8 enabled_tc, + bool is_add) +#endif +{ + struct i40e_pf *pf = vsi->back; + u16 sections = 0; + u8 netdev_tc = 0; + u16 numtc = 0; + u16 qcount; + u8 offset; + u16 qmap; + int i; + u16 num_tc_qps = 0; + + sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; + offset = 0; + + if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { + /* Find numtc from enabled TC bitmap */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (enabled_tc & (1 << i)) /* TC is enabled */ + numtc++; + } + if (!numtc) { + dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); + numtc = 1; + } + } else { + /* At least TC0 is enabled in case of non-DCB case */ + numtc = 1; + } + + vsi->tc_config.numtc = numtc; + vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; + /* Number of queues per enabled TC */ + /* In MFP case we can have a much lower count of MSIx + * vectors available and so we need to lower the used + * q count. + */ + qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); + num_tc_qps = qcount / numtc; + num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); + + /* Setup queue offset/count for all TCs for given VSI */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + /* See if the given TC is enabled for the given VSI */ + if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */ + int pow, num_qps; + + switch (vsi->type) { + case I40E_VSI_MAIN: + qcount = min_t(int, pf->rss_size, num_tc_qps); + break; +#ifdef I40E_FCOE + case I40E_VSI_FCOE: + qcount = num_tc_qps; + break; +#endif + case I40E_VSI_FDIR: + case I40E_VSI_SRIOV: + case I40E_VSI_VMDQ2: + default: + qcount = num_tc_qps; + WARN_ON(i != 0); + break; + } + vsi->tc_config.tc_info[i].qoffset = offset; + vsi->tc_config.tc_info[i].qcount = qcount; + + /* find the next higher power-of-2 of num queue pairs */ + num_qps = qcount; + pow = 0; + while (num_qps && ((1 << pow) < qcount)) { + pow++; + num_qps >>= 1; + } + + vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; + qmap = + (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | + (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); + + offset += qcount; + } else { + /* TC is not enabled so set the offset to + * default queue and allocate one queue + * for the given TC. + */ + vsi->tc_config.tc_info[i].qoffset = 0; + vsi->tc_config.tc_info[i].qcount = 1; + vsi->tc_config.tc_info[i].netdev_tc = 0; + + qmap = 0; + } + ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); + } + + /* Set actual Tx/Rx queue pairs */ + vsi->num_queue_pairs = offset; + if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { + if (vsi->req_queue_pairs > 0) + vsi->num_queue_pairs = vsi->req_queue_pairs; + else + vsi->num_queue_pairs = pf->num_lan_msix; + } + + /* Scheduler section valid can only be set for ADD VSI */ + if (is_add) { + sections |= I40E_AQ_VSI_PROP_SCHED_VALID; + + ctxt->info.up_enable_bits = enabled_tc; + } + if (vsi->type == I40E_VSI_SRIOV) { + ctxt->info.mapping_flags |= + cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); + for (i = 0; i < vsi->num_queue_pairs; i++) + ctxt->info.queue_mapping[i] = + cpu_to_le16(vsi->base_queue + i); + } else { + ctxt->info.mapping_flags |= + cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); + ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); + } + ctxt->info.valid_sections |= cpu_to_le16(sections); +} + +/** + * i40e_set_rx_mode - NDO callback to set the netdev filters + * @netdev: network interface device structure + **/ +#ifdef I40E_FCOE +void i40e_set_rx_mode(struct net_device *netdev) +#else +static void i40e_set_rx_mode(struct net_device *netdev) +#endif +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_mac_filter *f, *ftmp; + struct i40e_vsi *vsi = np->vsi; + struct netdev_hw_addr *uca; + struct netdev_hw_addr *mca; + struct netdev_hw_addr *ha; + + /* add addr if not already in the filter list */ + netdev_for_each_uc_addr(uca, netdev) { + if (!i40e_find_mac(vsi, uca->addr, false, true)) { + if (i40e_is_vsi_in_vlan(vsi)) + i40e_put_mac_in_vlan(vsi, uca->addr, + false, true); + else + i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY, + false, true); + } + } + + netdev_for_each_mc_addr(mca, netdev) { + if (!i40e_find_mac(vsi, mca->addr, false, true)) { + if (i40e_is_vsi_in_vlan(vsi)) + i40e_put_mac_in_vlan(vsi, mca->addr, + false, true); + else + i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY, + false, true); + } + } + + /* remove filter if not in netdev list */ + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { + bool found = false; + + if (!f->is_netdev) + continue; + + if (is_multicast_ether_addr(f->macaddr)) { + netdev_for_each_mc_addr(mca, netdev) { + if (ether_addr_equal(mca->addr, f->macaddr)) { + found = true; + break; + } + } + } else { + netdev_for_each_uc_addr(uca, netdev) { + if (ether_addr_equal(uca->addr, f->macaddr)) { + found = true; + break; + } + } + + for_each_dev_addr(netdev, ha) { + if (ether_addr_equal(ha->addr, f->macaddr)) { + found = true; + break; + } + } + } + if (!found) + i40e_del_filter( + vsi, f->macaddr, I40E_VLAN_ANY, false, true); + } + + /* check for other flag changes */ + if (vsi->current_netdev_flags != vsi->netdev->flags) { + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + vsi->back->flags |= I40E_FLAG_FILTER_SYNC; + } +} + +/** + * i40e_sync_vsi_filters - Update the VSI filter list to the HW + * @vsi: ptr to the VSI + * + * Push any outstanding VSI filter changes through the AdminQ. + * + * Returns 0 or error value + **/ +int i40e_sync_vsi_filters(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f, *ftmp; + bool promisc_forced_on = false; + bool add_happened = false; + int filter_list_len = 0; + u32 changed_flags = 0; + i40e_status aq_ret = 0; + struct i40e_pf *pf; + int num_add = 0; + int num_del = 0; + u16 cmd_flags; + + /* empty array typed pointers, kcalloc later */ + struct i40e_aqc_add_macvlan_element_data *add_list; + struct i40e_aqc_remove_macvlan_element_data *del_list; + + while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state)) + usleep_range(1000, 2000); + pf = vsi->back; + + if (vsi->netdev) { + changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; + vsi->current_netdev_flags = vsi->netdev->flags; + } + + if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { + vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; + + filter_list_len = pf->hw.aq.asq_buf_size / + sizeof(struct i40e_aqc_remove_macvlan_element_data); + del_list = kcalloc(filter_list_len, + sizeof(struct i40e_aqc_remove_macvlan_element_data), + GFP_KERNEL); + if (!del_list) + return -ENOMEM; + + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { + if (!f->changed) + continue; + + if (f->counter != 0) + continue; + f->changed = false; + cmd_flags = 0; + + /* add to delete list */ + ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); + del_list[num_del].vlan_tag = + cpu_to_le16((u16)(f->vlan == + I40E_VLAN_ANY ? 0 : f->vlan)); + + cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + del_list[num_del].flags = cmd_flags; + num_del++; + + /* unlink from filter list */ + list_del(&f->list); + kfree(f); + + /* flush a full buffer */ + if (num_del == filter_list_len) { + aq_ret = i40e_aq_remove_macvlan(&pf->hw, + vsi->seid, del_list, num_del, + NULL); + num_del = 0; + memset(del_list, 0, sizeof(*del_list)); + + if (aq_ret && + pf->hw.aq.asq_last_status != + I40E_AQ_RC_ENOENT) + dev_info(&pf->pdev->dev, + "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n", + aq_ret, + pf->hw.aq.asq_last_status); + } + } + if (num_del) { + aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, + del_list, num_del, NULL); + num_del = 0; + + if (aq_ret && + pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT) + dev_info(&pf->pdev->dev, + "ignoring delete macvlan error, err %d, aq_err %d\n", + aq_ret, pf->hw.aq.asq_last_status); + } + + kfree(del_list); + del_list = NULL; + + /* do all the adds now */ + filter_list_len = pf->hw.aq.asq_buf_size / + sizeof(struct i40e_aqc_add_macvlan_element_data), + add_list = kcalloc(filter_list_len, + sizeof(struct i40e_aqc_add_macvlan_element_data), + GFP_KERNEL); + if (!add_list) + return -ENOMEM; + + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { + if (!f->changed) + continue; + + if (f->counter == 0) + continue; + f->changed = false; + add_happened = true; + cmd_flags = 0; + + /* add to add array */ + ether_addr_copy(add_list[num_add].mac_addr, f->macaddr); + add_list[num_add].vlan_tag = + cpu_to_le16( + (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); + add_list[num_add].queue_number = 0; + + cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; + add_list[num_add].flags = cpu_to_le16(cmd_flags); + num_add++; + + /* flush a full buffer */ + if (num_add == filter_list_len) { + aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + add_list, num_add, + NULL); + num_add = 0; + + if (aq_ret) + break; + memset(add_list, 0, sizeof(*add_list)); + } + } + if (num_add) { + aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + add_list, num_add, NULL); + num_add = 0; + } + kfree(add_list); + add_list = NULL; + + if (add_happened && aq_ret && + pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) { + dev_info(&pf->pdev->dev, + "add filter failed, err %d, aq_err %d\n", + aq_ret, pf->hw.aq.asq_last_status); + if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && + !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, + &vsi->state)) { + promisc_forced_on = true; + set_bit(__I40E_FILTER_OVERFLOW_PROMISC, + &vsi->state); + dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); + } + } + } + + /* check for changes in promiscuous modes */ + if (changed_flags & IFF_ALLMULTI) { + bool cur_multipromisc; + cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); + aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, + vsi->seid, + cur_multipromisc, + NULL); + if (aq_ret) + dev_info(&pf->pdev->dev, + "set multi promisc failed, err %d, aq_err %d\n", + aq_ret, pf->hw.aq.asq_last_status); + } + if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { + bool cur_promisc; + cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || + test_bit(__I40E_FILTER_OVERFLOW_PROMISC, + &vsi->state)); + aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw, + vsi->seid, + cur_promisc, NULL); + if (aq_ret) + dev_info(&pf->pdev->dev, + "set uni promisc failed, err %d, aq_err %d\n", + aq_ret, pf->hw.aq.asq_last_status); + aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, + vsi->seid, + cur_promisc, NULL); + if (aq_ret) + dev_info(&pf->pdev->dev, + "set brdcast promisc failed, err %d, aq_err %d\n", + aq_ret, pf->hw.aq.asq_last_status); + } + + clear_bit(__I40E_CONFIG_BUSY, &vsi->state); + return 0; +} + +/** + * i40e_sync_filters_subtask - Sync the VSI filter list with HW + * @pf: board private structure + **/ +static void i40e_sync_filters_subtask(struct i40e_pf *pf) +{ + int v; + + if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC)) + return; + pf->flags &= ~I40E_FLAG_FILTER_SYNC; + + for (v = 0; v < pf->num_alloc_vsi; v++) { + if (pf->vsi[v] && + (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) + i40e_sync_vsi_filters(pf->vsi[v]); + } +} + +/** + * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int i40e_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + struct i40e_vsi *vsi = np->vsi; + + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) + return -EINVAL; + + netdev_info(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + if (netif_running(netdev)) + i40e_vsi_reinit_locked(vsi); + + return 0; +} + +/** + * i40e_ioctl - Access the hwtstamp interface + * @netdev: network interface device structure + * @ifr: interface request data + * @cmd: ioctl command + **/ +int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + switch (cmd) { + case SIOCGHWTSTAMP: + return i40e_ptp_get_ts_config(pf, ifr); + case SIOCSHWTSTAMP: + return i40e_ptp_set_ts_config(pf, ifr); + default: + return -EOPNOTSUPP; + } +} + +/** + * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI + * @vsi: the vsi being adjusted + **/ +void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) +{ + struct i40e_vsi_context ctxt; + i40e_status ret; + + if ((vsi->info.valid_sections & + cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && + ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) + return; /* already enabled */ + + vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); + vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | + I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; + + ctxt.seid = vsi->seid; + ctxt.info = vsi->info; + ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "%s: update vsi failed, aq_err=%d\n", + __func__, vsi->back->hw.aq.asq_last_status); + } +} + +/** + * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI + * @vsi: the vsi being adjusted + **/ +void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) +{ + struct i40e_vsi_context ctxt; + i40e_status ret; + + if ((vsi->info.valid_sections & + cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) && + ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == + I40E_AQ_VSI_PVLAN_EMOD_MASK)) + return; /* already disabled */ + + vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); + vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | + I40E_AQ_VSI_PVLAN_EMOD_NOTHING; + + ctxt.seid = vsi->seid; + ctxt.info = vsi->info; + ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "%s: update vsi failed, aq_err=%d\n", + __func__, vsi->back->hw.aq.asq_last_status); + } +} + +/** + * i40e_vlan_rx_register - Setup or shutdown vlan offload + * @netdev: network interface to be adjusted + * @features: netdev features to test if VLAN offload is enabled or not + **/ +static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + i40e_vlan_stripping_enable(vsi); + else + i40e_vlan_stripping_disable(vsi); +} + +/** + * i40e_vsi_add_vlan - Add vsi membership for given vlan + * @vsi: the vsi being configured + * @vid: vlan id to be added (0 = untagged only , -1 = any) + **/ +int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) +{ + struct i40e_mac_filter *f, *add_f; + bool is_netdev, is_vf; + + is_vf = (vsi->type == I40E_VSI_SRIOV); + is_netdev = !!(vsi->netdev); + + if (is_netdev) { + add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid, + is_vf, is_netdev); + if (!add_f) { + dev_info(&vsi->back->pdev->dev, + "Could not add vlan filter %d for %pM\n", + vid, vsi->netdev->dev_addr); + return -ENOMEM; + } + } + + list_for_each_entry(f, &vsi->mac_filter_list, list) { + add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev); + if (!add_f) { + dev_info(&vsi->back->pdev->dev, + "Could not add vlan filter %d for %pM\n", + vid, f->macaddr); + return -ENOMEM; + } + } + + /* Now if we add a vlan tag, make sure to check if it is the first + * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag" + * with 0, so we now accept untagged and specified tagged traffic + * (and not any taged and untagged) + */ + if (vid > 0) { + if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr, + I40E_VLAN_ANY, + is_vf, is_netdev)) { + i40e_del_filter(vsi, vsi->netdev->dev_addr, + I40E_VLAN_ANY, is_vf, is_netdev); + add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0, + is_vf, is_netdev); + if (!add_f) { + dev_info(&vsi->back->pdev->dev, + "Could not add filter 0 for %pM\n", + vsi->netdev->dev_addr); + return -ENOMEM; + } + } + } + + /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ + if (vid > 0 && !vsi->info.pvid) { + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, + is_vf, is_netdev)) { + i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, + is_vf, is_netdev); + add_f = i40e_add_filter(vsi, f->macaddr, + 0, is_vf, is_netdev); + if (!add_f) { + dev_info(&vsi->back->pdev->dev, + "Could not add filter 0 for %pM\n", + f->macaddr); + return -ENOMEM; + } + } + } + } + + if (test_bit(__I40E_DOWN, &vsi->back->state) || + test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) + return 0; + + return i40e_sync_vsi_filters(vsi); +} + +/** + * i40e_vsi_kill_vlan - Remove vsi membership for given vlan + * @vsi: the vsi being configured + * @vid: vlan id to be removed (0 = untagged only , -1 = any) + * + * Return: 0 on success or negative otherwise + **/ +int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) +{ + struct net_device *netdev = vsi->netdev; + struct i40e_mac_filter *f, *add_f; + bool is_vf, is_netdev; + int filter_count = 0; + + is_vf = (vsi->type == I40E_VSI_SRIOV); + is_netdev = !!(netdev); + + if (is_netdev) + i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); + + list_for_each_entry(f, &vsi->mac_filter_list, list) + i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev); + + /* go through all the filters for this VSI and if there is only + * vid == 0 it means there are no other filters, so vid 0 must + * be replaced with -1. This signifies that we should from now + * on accept any traffic (with any tag present, or untagged) + */ + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if (is_netdev) { + if (f->vlan && + ether_addr_equal(netdev->dev_addr, f->macaddr)) + filter_count++; + } + + if (f->vlan) + filter_count++; + } + + if (!filter_count && is_netdev) { + i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev); + f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, + is_vf, is_netdev); + if (!f) { + dev_info(&vsi->back->pdev->dev, + "Could not add filter %d for %pM\n", + I40E_VLAN_ANY, netdev->dev_addr); + return -ENOMEM; + } + } + + if (!filter_count) { + list_for_each_entry(f, &vsi->mac_filter_list, list) { + i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); + add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, + is_vf, is_netdev); + if (!add_f) { + dev_info(&vsi->back->pdev->dev, + "Could not add filter %d for %pM\n", + I40E_VLAN_ANY, f->macaddr); + return -ENOMEM; + } + } + } + + if (test_bit(__I40E_DOWN, &vsi->back->state) || + test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) + return 0; + + return i40e_sync_vsi_filters(vsi); +} + +/** + * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload + * @netdev: network interface to be adjusted + * @vid: vlan id to be added + * + * net_device_ops implementation for adding vlan ids + **/ +#ifdef I40E_FCOE +int i40e_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +#else +static int i40e_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +#endif +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + int ret = 0; + + if (vid > 4095) + return -EINVAL; + + netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); + + /* If the network stack called us with vid = 0 then + * it is asking to receive priority tagged packets with + * vlan id 0. Our HW receives them by default when configured + * to receive untagged packets so there is no need to add an + * extra filter for vlan 0 tagged packets. + */ + if (vid) + ret = i40e_vsi_add_vlan(vsi, vid); + + if (!ret && (vid < VLAN_N_VID)) + set_bit(vid, vsi->active_vlans); + + return ret; +} + +/** + * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload + * @netdev: network interface to be adjusted + * @vid: vlan id to be removed + * + * net_device_ops implementation for removing vlan ids + **/ +#ifdef I40E_FCOE +int i40e_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +#else +static int i40e_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +#endif +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + + netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid); + + /* return code is ignored as there is nothing a user + * can do about failure to remove and a log message was + * already printed from the other function + */ + i40e_vsi_kill_vlan(vsi, vid); + + clear_bit(vid, vsi->active_vlans); + + return 0; +} + +/** + * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up + * @vsi: the vsi being brought back up + **/ +static void i40e_restore_vlan(struct i40e_vsi *vsi) +{ + u16 vid; + + if (!vsi->netdev) + return; + + i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); + + for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) + i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), + vid); +} + +/** + * i40e_vsi_add_pvid - Add pvid for the VSI + * @vsi: the vsi being adjusted + * @vid: the vlan id to set as a PVID + **/ +int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) +{ + struct i40e_vsi_context ctxt; + i40e_status aq_ret; + + vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); + vsi->info.pvid = cpu_to_le16(vid); + vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | + I40E_AQ_VSI_PVLAN_INSERT_PVID | + I40E_AQ_VSI_PVLAN_EMOD_STR; + + ctxt.seid = vsi->seid; + ctxt.info = vsi->info; + aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&vsi->back->pdev->dev, + "%s: update vsi failed, aq_err=%d\n", + __func__, vsi->back->hw.aq.asq_last_status); + return -ENOENT; + } + + return 0; +} + +/** + * i40e_vsi_remove_pvid - Remove the pvid from the VSI + * @vsi: the vsi being adjusted + * + * Just use the vlan_rx_register() service to put it back to normal + **/ +void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) +{ + i40e_vlan_stripping_disable(vsi); + + vsi->info.pvid = 0; +} + +/** + * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources + * @vsi: ptr to the VSI + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) +{ + int i, err = 0; + + for (i = 0; i < vsi->num_queue_pairs && !err; i++) + err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); + + return err; +} + +/** + * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues + * @vsi: ptr to the VSI + * + * Free VSI's transmit software resources + **/ +static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) +{ + int i; + + if (!vsi->tx_rings) + return; + + for (i = 0; i < vsi->num_queue_pairs; i++) + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) + i40e_free_tx_resources(vsi->tx_rings[i]); +} + +/** + * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources + * @vsi: ptr to the VSI + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) +{ + int i, err = 0; + + for (i = 0; i < vsi->num_queue_pairs && !err; i++) + err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); +#ifdef I40E_FCOE + i40e_fcoe_setup_ddp_resources(vsi); +#endif + return err; +} + +/** + * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues + * @vsi: ptr to the VSI + * + * Free all receive software resources + **/ +static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) +{ + int i; + + if (!vsi->rx_rings) + return; + + for (i = 0; i < vsi->num_queue_pairs; i++) + if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) + i40e_free_rx_resources(vsi->rx_rings[i]); +#ifdef I40E_FCOE + i40e_fcoe_free_ddp_resources(vsi); +#endif +} + +/** + * i40e_config_xps_tx_ring - Configure XPS for a Tx ring + * @ring: The Tx ring to configure + * + * This enables/disables XPS for a given Tx descriptor ring + * based on the TCs enabled for the VSI that ring belongs to. + **/ +static void i40e_config_xps_tx_ring(struct i40e_ring *ring) +{ + struct i40e_vsi *vsi = ring->vsi; + cpumask_var_t mask; + + if (!ring->q_vector || !ring->netdev) + return; + + /* Single TC mode enable XPS */ + if (vsi->tc_config.numtc <= 1) { + if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) + netif_set_xps_queue(ring->netdev, + &ring->q_vector->affinity_mask, + ring->queue_index); + } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { + /* Disable XPS to allow selection based on TC */ + bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); + netif_set_xps_queue(ring->netdev, mask, ring->queue_index); + free_cpumask_var(mask); + } +} + +/** + * i40e_configure_tx_ring - Configure a transmit ring context and rest + * @ring: The Tx ring to configure + * + * Configure the Tx descriptor ring in the HMC context. + **/ +static int i40e_configure_tx_ring(struct i40e_ring *ring) +{ + struct i40e_vsi *vsi = ring->vsi; + u16 pf_q = vsi->base_queue + ring->queue_index; + struct i40e_hw *hw = &vsi->back->hw; + struct i40e_hmc_obj_txq tx_ctx; + i40e_status err = 0; + u32 qtx_ctl = 0; + + /* some ATR related tx ring init */ + if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { + ring->atr_sample_rate = vsi->back->atr_sample_rate; + ring->atr_count = 0; + } else { + ring->atr_sample_rate = 0; + } + + /* configure XPS */ + i40e_config_xps_tx_ring(ring); + + /* clear the context structure first */ + memset(&tx_ctx, 0, sizeof(tx_ctx)); + + tx_ctx.new_context = 1; + tx_ctx.base = (ring->dma / 128); + tx_ctx.qlen = ring->count; + tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | + I40E_FLAG_FD_ATR_ENABLED)); +#ifdef I40E_FCOE + tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); +#endif + tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); + /* FDIR VSI tx ring can still use RS bit and writebacks */ + if (vsi->type != I40E_VSI_FDIR) + tx_ctx.head_wb_ena = 1; + tx_ctx.head_wb_addr = ring->dma + + (ring->count * sizeof(struct i40e_tx_desc)); + + /* As part of VSI creation/update, FW allocates certain + * Tx arbitration queue sets for each TC enabled for + * the VSI. The FW returns the handles to these queue + * sets as part of the response buffer to Add VSI, + * Update VSI, etc. AQ commands. It is expected that + * these queue set handles be associated with the Tx + * queues by the driver as part of the TX queue context + * initialization. This has to be done regardless of + * DCB as by default everything is mapped to TC0. + */ + tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); + tx_ctx.rdylist_act = 0; + + /* clear the context in the HMC */ + err = i40e_clear_lan_tx_queue_context(hw, pf_q); + if (err) { + dev_info(&vsi->back->pdev->dev, + "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n", + ring->queue_index, pf_q, err); + return -ENOMEM; + } + + /* set the context in the HMC */ + err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx); + if (err) { + dev_info(&vsi->back->pdev->dev, + "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n", + ring->queue_index, pf_q, err); + return -ENOMEM; + } + + /* Now associate this queue with this PCI function */ + if (vsi->type == I40E_VSI_VMDQ2) { + qtx_ctl = I40E_QTX_CTL_VM_QUEUE; + qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & + I40E_QTX_CTL_VFVM_INDX_MASK; + } else { + qtx_ctl = I40E_QTX_CTL_PF_QUEUE; + } + + qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & + I40E_QTX_CTL_PF_INDX_MASK); + wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); + i40e_flush(hw); + + clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); + + /* cache tail off for easier writes later */ + ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); + + return 0; +} + +/** + * i40e_configure_rx_ring - Configure a receive ring context + * @ring: The Rx ring to configure + * + * Configure the Rx descriptor ring in the HMC context. + **/ +static int i40e_configure_rx_ring(struct i40e_ring *ring) +{ + struct i40e_vsi *vsi = ring->vsi; + u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; + u16 pf_q = vsi->base_queue + ring->queue_index; + struct i40e_hw *hw = &vsi->back->hw; + struct i40e_hmc_obj_rxq rx_ctx; + i40e_status err = 0; + + ring->state = 0; + + /* clear the context structure first */ + memset(&rx_ctx, 0, sizeof(rx_ctx)); + + ring->rx_buf_len = vsi->rx_buf_len; + ring->rx_hdr_len = vsi->rx_hdr_len; + + rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; + rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT; + + rx_ctx.base = (ring->dma / 128); + rx_ctx.qlen = ring->count; + + if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) { + set_ring_16byte_desc_enabled(ring); + rx_ctx.dsize = 0; + } else { + rx_ctx.dsize = 1; + } + + rx_ctx.dtype = vsi->dtype; + if (vsi->dtype) { + set_ring_ps_enabled(ring); + rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | + I40E_RX_SPLIT_IP | + I40E_RX_SPLIT_TCP_UDP | + I40E_RX_SPLIT_SCTP; + } else { + rx_ctx.hsplit_0 = 0; + } + + rx_ctx.rxmax = min_t(u16, vsi->max_frame, + (chain_len * ring->rx_buf_len)); + if (hw->revision_id == 0) + rx_ctx.lrxqthresh = 0; + else + rx_ctx.lrxqthresh = 2; + rx_ctx.crcstrip = 1; + rx_ctx.l2tsel = 1; + rx_ctx.showiv = 1; +#ifdef I40E_FCOE + rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); +#endif + /* set the prefena field to 1 because the manual says to */ + rx_ctx.prefena = 1; + + /* clear the context in the HMC */ + err = i40e_clear_lan_rx_queue_context(hw, pf_q); + if (err) { + dev_info(&vsi->back->pdev->dev, + "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", + ring->queue_index, pf_q, err); + return -ENOMEM; + } + + /* set the context in the HMC */ + err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); + if (err) { + dev_info(&vsi->back->pdev->dev, + "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", + ring->queue_index, pf_q, err); + return -ENOMEM; + } + + /* cache tail for quicker writes, and clear the reg before use */ + ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); + writel(0, ring->tail); + + if (ring_is_ps_enabled(ring)) { + i40e_alloc_rx_headers(ring); + i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring)); + } else { + i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring)); + } + + return 0; +} + +/** + * i40e_vsi_configure_tx - Configure the VSI for Tx + * @vsi: VSI structure describing this set of rings and resources + * + * Configure the Tx VSI for operation. + **/ +static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) +{ + int err = 0; + u16 i; + + for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) + err = i40e_configure_tx_ring(vsi->tx_rings[i]); + + return err; +} + +/** + * i40e_vsi_configure_rx - Configure the VSI for Rx + * @vsi: the VSI being configured + * + * Configure the Rx VSI for operation. + **/ +static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) +{ + int err = 0; + u16 i; + + if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN)) + vsi->max_frame = vsi->netdev->mtu + ETH_HLEN + + ETH_FCS_LEN + VLAN_HLEN; + else + vsi->max_frame = I40E_RXBUFFER_2048; + + /* figure out correct receive buffer length */ + switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED | + I40E_FLAG_RX_PS_ENABLED)) { + case I40E_FLAG_RX_1BUF_ENABLED: + vsi->rx_hdr_len = 0; + vsi->rx_buf_len = vsi->max_frame; + vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; + break; + case I40E_FLAG_RX_PS_ENABLED: + vsi->rx_hdr_len = I40E_RX_HDR_SIZE; + vsi->rx_buf_len = I40E_RXBUFFER_2048; + vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT; + break; + default: + vsi->rx_hdr_len = I40E_RX_HDR_SIZE; + vsi->rx_buf_len = I40E_RXBUFFER_2048; + vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS; + break; + } + +#ifdef I40E_FCOE + /* setup rx buffer for FCoE */ + if ((vsi->type == I40E_VSI_FCOE) && + (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) { + vsi->rx_hdr_len = 0; + vsi->rx_buf_len = I40E_RXBUFFER_3072; + vsi->max_frame = I40E_RXBUFFER_3072; + vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; + } + +#endif /* I40E_FCOE */ + /* round up for the chip's needs */ + vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len, + (1 << I40E_RXQ_CTX_HBUFF_SHIFT)); + vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, + (1 << I40E_RXQ_CTX_DBUFF_SHIFT)); + + /* set up individual rings */ + for (i = 0; i < vsi->num_queue_pairs && !err; i++) + err = i40e_configure_rx_ring(vsi->rx_rings[i]); + + return err; +} + +/** + * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC + * @vsi: ptr to the VSI + **/ +static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) +{ + struct i40e_ring *tx_ring, *rx_ring; + u16 qoffset, qcount; + int i, n; + + if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { + /* Reset the TC information */ + for (i = 0; i < vsi->num_queue_pairs; i++) { + rx_ring = vsi->rx_rings[i]; + tx_ring = vsi->tx_rings[i]; + rx_ring->dcb_tc = 0; + tx_ring->dcb_tc = 0; + } + } + + for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { + if (!(vsi->tc_config.enabled_tc & (1 << n))) + continue; + + qoffset = vsi->tc_config.tc_info[n].qoffset; + qcount = vsi->tc_config.tc_info[n].qcount; + for (i = qoffset; i < (qoffset + qcount); i++) { + rx_ring = vsi->rx_rings[i]; + tx_ring = vsi->tx_rings[i]; + rx_ring->dcb_tc = n; + tx_ring->dcb_tc = n; + } + } +} + +/** + * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI + * @vsi: ptr to the VSI + **/ +static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) +{ + if (vsi->netdev) + i40e_set_rx_mode(vsi->netdev); +} + +/** + * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters + * @vsi: Pointer to the targeted VSI + * + * This function replays the hlist on the hw where all the SB Flow Director + * filters were saved. + **/ +static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) +{ + struct i40e_fdir_filter *filter; + struct i40e_pf *pf = vsi->back; + struct hlist_node *node; + + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + return; + + hlist_for_each_entry_safe(filter, node, + &pf->fdir_filter_list, fdir_node) { + i40e_add_del_fdir(vsi, filter, true); + } +} + +/** + * i40e_vsi_configure - Set up the VSI for action + * @vsi: the VSI being configured + **/ +static int i40e_vsi_configure(struct i40e_vsi *vsi) +{ + int err; + + i40e_set_vsi_rx_mode(vsi); + i40e_restore_vlan(vsi); + i40e_vsi_config_dcb_rings(vsi); + err = i40e_vsi_configure_tx(vsi); + if (!err) + err = i40e_vsi_configure_rx(vsi); + + return err; +} + +/** + * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW + * @vsi: the VSI being configured + **/ +static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_q_vector *q_vector; + struct i40e_hw *hw = &pf->hw; + u16 vector; + int i, q; + u32 val; + u32 qp; + + /* The interrupt indexing is offset by 1 in the PFINT_ITRn + * and PFINT_LNKLSTn registers, e.g.: + * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) + */ + qp = vsi->base_queue; + vector = vsi->base_vector; + for (i = 0; i < vsi->num_q_vectors; i++, vector++) { + q_vector = vsi->q_vectors[i]; + q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); + q_vector->rx.latency_range = I40E_LOW_LATENCY; + wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), + q_vector->rx.itr); + q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); + q_vector->tx.latency_range = I40E_LOW_LATENCY; + wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), + q_vector->tx.itr); + + /* Linked list for the queuepairs assigned to this vector */ + wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); + for (q = 0; q < q_vector->num_ringpairs; q++) { + val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | + (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | + (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | + (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| + (I40E_QUEUE_TYPE_TX + << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); + + wr32(hw, I40E_QINT_RQCTL(qp), val); + + val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | + (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | + (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | + ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)| + (I40E_QUEUE_TYPE_RX + << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); + + /* Terminate the linked list */ + if (q == (q_vector->num_ringpairs - 1)) + val |= (I40E_QUEUE_END_OF_LIST + << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); + + wr32(hw, I40E_QINT_TQCTL(qp), val); + qp++; + } + } + + i40e_flush(hw); +} + +/** + * i40e_enable_misc_int_causes - enable the non-queue interrupts + * @hw: ptr to the hardware info + **/ +static void i40e_enable_misc_int_causes(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + u32 val; + + /* clear things first */ + wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ + rd32(hw, I40E_PFINT_ICR0); /* read to clear */ + + val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | + I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | + I40E_PFINT_ICR0_ENA_GRST_MASK | + I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | + I40E_PFINT_ICR0_ENA_GPIO_MASK | + I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | + I40E_PFINT_ICR0_ENA_VFLR_MASK | + I40E_PFINT_ICR0_ENA_ADMINQ_MASK; + + if (pf->flags & I40E_FLAG_PTP) + val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; + + wr32(hw, I40E_PFINT_ICR0_ENA, val); + + /* SW_ITR_IDX = 0, but don't change INTENA */ + wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | + I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); + + /* OTHER_ITR_IDX = 0 */ + wr32(hw, I40E_PFINT_STAT_CTL0, 0); +} + +/** + * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW + * @vsi: the VSI being configured + **/ +static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) +{ + struct i40e_q_vector *q_vector = vsi->q_vectors[0]; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 val; + + /* set the ITR configuration */ + q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); + q_vector->rx.latency_range = I40E_LOW_LATENCY; + wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); + q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); + q_vector->tx.latency_range = I40E_LOW_LATENCY; + wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); + + i40e_enable_misc_int_causes(pf); + + /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ + wr32(hw, I40E_PFINT_LNKLST0, 0); + + /* Associate the queue pair to the vector and enable the queue int */ + val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | + (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | + (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); + + wr32(hw, I40E_QINT_RQCTL(0), val); + + val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | + (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | + (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); + + wr32(hw, I40E_QINT_TQCTL(0), val); + i40e_flush(hw); +} + +/** + * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0 + * @pf: board private structure + **/ +void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + + wr32(hw, I40E_PFINT_DYN_CTL0, + I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); + i40e_flush(hw); +} + +/** + * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 + * @pf: board private structure + **/ +void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + u32 val; + + val = I40E_PFINT_DYN_CTL0_INTENA_MASK | + I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | + (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); + + wr32(hw, I40E_PFINT_DYN_CTL0, val); + i40e_flush(hw); +} + +/** + * i40e_irq_dynamic_enable - Enable default interrupt generation settings + * @vsi: pointer to a vsi + * @vector: enable a particular Hw Interrupt vector + **/ +void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 val; + + val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); + wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); + /* skip the flush */ +} + +/** + * i40e_irq_dynamic_disable - Disable default interrupt generation settings + * @vsi: pointer to a vsi + * @vector: disable a particular Hw Interrupt vector + **/ +void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 val; + + val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; + wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); + i40e_flush(hw); +} + +/** + * i40e_msix_clean_rings - MSIX mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a q_vector + **/ +static irqreturn_t i40e_msix_clean_rings(int irq, void *data) +{ + struct i40e_q_vector *q_vector = data; + + if (!q_vector->tx.ring && !q_vector->rx.ring) + return IRQ_HANDLED; + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts + * @vsi: the VSI being configured + * @basename: name for the vector + * + * Allocates MSI-X vectors and requests interrupts from the kernel. + **/ +static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) +{ + int q_vectors = vsi->num_q_vectors; + struct i40e_pf *pf = vsi->back; + int base = vsi->base_vector; + int rx_int_idx = 0; + int tx_int_idx = 0; + int vector, err; + + for (vector = 0; vector < q_vectors; vector++) { + struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", basename, "TxRx", rx_int_idx++); + tx_int_idx++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", basename, "rx", rx_int_idx++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", basename, "tx", tx_int_idx++); + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq(pf->msix_entries[base + vector].vector, + vsi->irq_handler, + 0, + q_vector->name, + q_vector); + if (err) { + dev_info(&pf->pdev->dev, + "%s: request_irq failed, error: %d\n", + __func__, err); + goto free_queue_irqs; + } + /* assign the mask for this irq */ + irq_set_affinity_hint(pf->msix_entries[base + vector].vector, + &q_vector->affinity_mask); + } + + vsi->irqs_ready = true; + return 0; + +free_queue_irqs: + while (vector) { + vector--; + irq_set_affinity_hint(pf->msix_entries[base + vector].vector, + NULL); + free_irq(pf->msix_entries[base + vector].vector, + &(vsi->q_vectors[vector])); + } + return err; +} + +/** + * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI + * @vsi: the VSI being un-configured + **/ +static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + int base = vsi->base_vector; + int i; + + for (i = 0; i < vsi->num_queue_pairs; i++) { + wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0); + wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0); + } + + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + for (i = vsi->base_vector; + i < (vsi->num_q_vectors + vsi->base_vector); i++) + wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); + + i40e_flush(hw); + for (i = 0; i < vsi->num_q_vectors; i++) + synchronize_irq(pf->msix_entries[i + base].vector); + } else { + /* Legacy and MSI mode - this stops all interrupt handling */ + wr32(hw, I40E_PFINT_ICR0_ENA, 0); + wr32(hw, I40E_PFINT_DYN_CTL0, 0); + i40e_flush(hw); + synchronize_irq(pf->pdev->irq); + } +} + +/** + * i40e_vsi_enable_irq - Enable IRQ for the given VSI + * @vsi: the VSI being configured + **/ +static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + int i; + + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + for (i = vsi->base_vector; + i < (vsi->num_q_vectors + vsi->base_vector); i++) + i40e_irq_dynamic_enable(vsi, i); + } else { + i40e_irq_dynamic_enable_icr0(pf); + } + + i40e_flush(&pf->hw); + return 0; +} + +/** + * i40e_stop_misc_vector - Stop the vector that handles non-queue events + * @pf: board private structure + **/ +static void i40e_stop_misc_vector(struct i40e_pf *pf) +{ + /* Disable ICR 0 */ + wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); + i40e_flush(&pf->hw); +} + +/** + * i40e_intr - MSI/Legacy and non-queue interrupt handler + * @irq: interrupt number + * @data: pointer to a q_vector + * + * This is the handler used for all MSI/Legacy interrupts, and deals + * with both queue and non-queue interrupts. This is also used in + * MSIX mode to handle the non-queue interrupts. + **/ +static irqreturn_t i40e_intr(int irq, void *data) +{ + struct i40e_pf *pf = (struct i40e_pf *)data; + struct i40e_hw *hw = &pf->hw; + irqreturn_t ret = IRQ_NONE; + u32 icr0, icr0_remaining; + u32 val, ena_mask; + + icr0 = rd32(hw, I40E_PFINT_ICR0); + ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); + + /* if sharing a legacy IRQ, we might get called w/o an intr pending */ + if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) + goto enable_intr; + + /* if interrupt but no bits showing, must be SWINT */ + if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || + (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) + pf->sw_int_count++; + + /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ + if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { + + /* temporarily disable queue cause for NAPI processing */ + u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); + qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; + wr32(hw, I40E_QINT_RQCTL(0), qval); + + qval = rd32(hw, I40E_QINT_TQCTL(0)); + qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; + wr32(hw, I40E_QINT_TQCTL(0), qval); + + if (!test_bit(__I40E_DOWN, &pf->state)) + napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi); + } + + if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { + ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; + set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); + } + + if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { + ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; + set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); + } + + if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { + ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; + set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); + } + + if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { + if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) + set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); + ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; + val = rd32(hw, I40E_GLGEN_RSTAT); + val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) + >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; + if (val == I40E_RESET_CORER) { + pf->corer_count++; + } else if (val == I40E_RESET_GLOBR) { + pf->globr_count++; + } else if (val == I40E_RESET_EMPR) { + pf->empr_count++; + set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state); + } + } + + if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { + icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; + dev_info(&pf->pdev->dev, "HMC error interrupt\n"); + dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", + rd32(hw, I40E_PFHMC_ERRORINFO), + rd32(hw, I40E_PFHMC_ERRORDATA)); + } + + if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { + u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); + + if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { + icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; + i40e_ptp_tx_hwtstamp(pf); + } + } + + /* If a critical error is pending we have no choice but to reset the + * device. + * Report and mask out any remaining unexpected interrupts. + */ + icr0_remaining = icr0 & ena_mask; + if (icr0_remaining) { + dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", + icr0_remaining); + if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || + (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || + (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { + dev_info(&pf->pdev->dev, "device will be reset\n"); + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + i40e_service_event_schedule(pf); + } + ena_mask &= ~icr0_remaining; + } + ret = IRQ_HANDLED; + +enable_intr: + /* re-enable interrupt causes */ + wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); + if (!test_bit(__I40E_DOWN, &pf->state)) { + i40e_service_event_schedule(pf); + i40e_irq_dynamic_enable_icr0(pf); + } + + return ret; +} + +/** + * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes + * @tx_ring: tx ring to clean + * @budget: how many cleans we're allowed + * + * Returns true if there's any budget left (e.g. the clean is finished) + **/ +static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) +{ + struct i40e_vsi *vsi = tx_ring->vsi; + u16 i = tx_ring->next_to_clean; + struct i40e_tx_buffer *tx_buf; + struct i40e_tx_desc *tx_desc; + + tx_buf = &tx_ring->tx_bi[i]; + tx_desc = I40E_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + + /* if the descriptor isn't done, no work yet to do */ + if (!(eop_desc->cmd_type_offset_bsz & + cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buf->next_to_watch = NULL; + + tx_desc->buffer_addr = 0; + tx_desc->cmd_type_offset_bsz = 0; + /* move past filter desc */ + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buf = tx_ring->tx_bi; + tx_desc = I40E_TX_DESC(tx_ring, 0); + } + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB) + kfree(tx_buf->raw_buf); + + tx_buf->raw_buf = NULL; + tx_buf->tx_flags = 0; + tx_buf->next_to_watch = NULL; + dma_unmap_len_set(tx_buf, len, 0); + tx_desc->buffer_addr = 0; + tx_desc->cmd_type_offset_bsz = 0; + + /* move us past the eop_desc for start of next FD desc */ + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buf = tx_ring->tx_bi; + tx_desc = I40E_TX_DESC(tx_ring, 0); + } + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + + if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + i40e_irq_dynamic_enable(vsi, + tx_ring->q_vector->v_idx + vsi->base_vector); + } + return budget > 0; +} + +/** + * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring + * @irq: interrupt number + * @data: pointer to a q_vector + **/ +static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) +{ + struct i40e_q_vector *q_vector = data; + struct i40e_vsi *vsi; + + if (!q_vector->tx.ring) + return IRQ_HANDLED; + + vsi = q_vector->tx.ring->vsi; + i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); + + return IRQ_HANDLED; +} + +/** + * i40e_map_vector_to_qp - Assigns the queue pair to the vector + * @vsi: the VSI being configured + * @v_idx: vector index + * @qp_idx: queue pair index + **/ +static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) +{ + struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; + struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; + struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; + + tx_ring->q_vector = q_vector; + tx_ring->next = q_vector->tx.ring; + q_vector->tx.ring = tx_ring; + q_vector->tx.count++; + + rx_ring->q_vector = q_vector; + rx_ring->next = q_vector->rx.ring; + q_vector->rx.ring = rx_ring; + q_vector->rx.count++; +} + +/** + * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors + * @vsi: the VSI being configured + * + * This function maps descriptor rings to the queue-specific vectors + * we were allotted through the MSI-X enabling code. Ideally, we'd have + * one vector per queue pair, but on a constrained vector budget, we + * group the queue pairs as "efficiently" as possible. + **/ +static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi) +{ + int qp_remaining = vsi->num_queue_pairs; + int q_vectors = vsi->num_q_vectors; + int num_ringpairs; + int v_start = 0; + int qp_idx = 0; + + /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to + * group them so there are multiple queues per vector. + * It is also important to go through all the vectors available to be + * sure that if we don't use all the vectors, that the remaining vectors + * are cleared. This is especially important when decreasing the + * number of queues in use. + */ + for (; v_start < q_vectors; v_start++) { + struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; + + num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); + + q_vector->num_ringpairs = num_ringpairs; + + q_vector->rx.count = 0; + q_vector->tx.count = 0; + q_vector->rx.ring = NULL; + q_vector->tx.ring = NULL; + + while (num_ringpairs--) { + map_vector_to_qp(vsi, v_start, qp_idx); + qp_idx++; + qp_remaining--; + } + } +} + +/** + * i40e_vsi_request_irq - Request IRQ from the OS + * @vsi: the VSI being configured + * @basename: name for the vector + **/ +static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) +{ + struct i40e_pf *pf = vsi->back; + int err; + + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + err = i40e_vsi_request_irq_msix(vsi, basename); + else if (pf->flags & I40E_FLAG_MSI_ENABLED) + err = request_irq(pf->pdev->irq, i40e_intr, 0, + pf->int_name, pf); + else + err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, + pf->int_name, pf); + + if (err) + dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); + + return err; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * i40e_netpoll - A Polling 'interrupt'handler + * @netdev: network interface device structure + * + * This is used by netconsole to send skbs without having to re-enable + * interrupts. It's not called while the normal interrupt routine is executing. + **/ +#ifdef I40E_FCOE +void i40e_netpoll(struct net_device *netdev) +#else +static void i40e_netpoll(struct net_device *netdev) +#endif +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + int i; + + /* if interface is down do nothing */ + if (test_bit(__I40E_DOWN, &vsi->state)) + return; + + pf->flags |= I40E_FLAG_IN_NETPOLL; + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + for (i = 0; i < vsi->num_q_vectors; i++) + i40e_msix_clean_rings(0, vsi->q_vectors[i]); + } else { + i40e_intr(pf->pdev->irq, netdev); + } + pf->flags &= ~I40E_FLAG_IN_NETPOLL; +} +#endif + +/** + * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled + * @pf: the PF being configured + * @pf_q: the PF queue + * @enable: enable or disable state of the queue + * + * This routine will wait for the given Tx queue of the PF to reach the + * enabled or disabled state. + * Returns -ETIMEDOUT in case of failing to reach the requested state after + * multiple retries; else will return 0 in case of success. + **/ +static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) +{ + int i; + u32 tx_reg; + + for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { + tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); + if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) + break; + + usleep_range(10, 20); + } + if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) + return -ETIMEDOUT; + + return 0; +} + +/** + * i40e_vsi_control_tx - Start or stop a VSI's rings + * @vsi: the VSI being configured + * @enable: start or stop the rings + **/ +static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + int i, j, pf_q, ret = 0; + u32 tx_reg; + + pf_q = vsi->base_queue; + for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { + + /* warn the TX unit of coming changes */ + i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); + if (!enable) + usleep_range(10, 20); + + for (j = 0; j < 50; j++) { + tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); + if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == + ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) + break; + usleep_range(1000, 2000); + } + /* Skip if the queue is already in the requested state */ + if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) + continue; + + /* turn on/off the queue */ + if (enable) { + wr32(hw, I40E_QTX_HEAD(pf_q), 0); + tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; + } else { + tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; + } + + wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); + /* No waiting for the Tx queue to disable */ + if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) + continue; + + /* wait for the change to finish */ + ret = i40e_pf_txq_wait(pf, pf_q, enable); + if (ret) { + dev_info(&pf->pdev->dev, + "%s: VSI seid %d Tx ring %d %sable timeout\n", + __func__, vsi->seid, pf_q, + (enable ? "en" : "dis")); + break; + } + } + + if (hw->revision_id == 0) + mdelay(50); + return ret; +} + +/** + * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled + * @pf: the PF being configured + * @pf_q: the PF queue + * @enable: enable or disable state of the queue + * + * This routine will wait for the given Rx queue of the PF to reach the + * enabled or disabled state. + * Returns -ETIMEDOUT in case of failing to reach the requested state after + * multiple retries; else will return 0 in case of success. + **/ +static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) +{ + int i; + u32 rx_reg; + + for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) { + rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); + if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) + break; + + usleep_range(10, 20); + } + if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) + return -ETIMEDOUT; + + return 0; +} + +/** + * i40e_vsi_control_rx - Start or stop a VSI's rings + * @vsi: the VSI being configured + * @enable: start or stop the rings + **/ +static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + int i, j, pf_q, ret = 0; + u32 rx_reg; + + pf_q = vsi->base_queue; + for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { + for (j = 0; j < 50; j++) { + rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); + if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == + ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) + break; + usleep_range(1000, 2000); + } + + /* Skip if the queue is already in the requested state */ + if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) + continue; + + /* turn on/off the queue */ + if (enable) + rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; + else + rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; + wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); + + /* wait for the change to finish */ + ret = i40e_pf_rxq_wait(pf, pf_q, enable); + if (ret) { + dev_info(&pf->pdev->dev, + "%s: VSI seid %d Rx ring %d %sable timeout\n", + __func__, vsi->seid, pf_q, + (enable ? "en" : "dis")); + break; + } + } + + return ret; +} + +/** + * i40e_vsi_control_rings - Start or stop a VSI's rings + * @vsi: the VSI being configured + * @enable: start or stop the rings + **/ +int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) +{ + int ret = 0; + + /* do rx first for enable and last for disable */ + if (request) { + ret = i40e_vsi_control_rx(vsi, request); + if (ret) + return ret; + ret = i40e_vsi_control_tx(vsi, request); + } else { + /* Ignore return value, we need to shutdown whatever we can */ + i40e_vsi_control_tx(vsi, request); + i40e_vsi_control_rx(vsi, request); + } + + return ret; +} + +/** + * i40e_vsi_free_irq - Free the irq association with the OS + * @vsi: the VSI being configured + **/ +static void i40e_vsi_free_irq(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + int base = vsi->base_vector; + u32 val, qp; + int i; + + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (!vsi->q_vectors) + return; + + if (!vsi->irqs_ready) + return; + + vsi->irqs_ready = false; + for (i = 0; i < vsi->num_q_vectors; i++) { + u16 vector = i + base; + + /* free only the irqs that were actually requested */ + if (!vsi->q_vectors[i] || + !vsi->q_vectors[i]->num_ringpairs) + continue; + + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(pf->msix_entries[vector].vector, + NULL); + free_irq(pf->msix_entries[vector].vector, + vsi->q_vectors[i]); + + /* Tear down the interrupt queue link list + * + * We know that they come in pairs and always + * the Rx first, then the Tx. To clear the + * link list, stick the EOL value into the + * next_q field of the registers. + */ + val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); + qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) + >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; + val |= I40E_QUEUE_END_OF_LIST + << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; + wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); + + while (qp != I40E_QUEUE_END_OF_LIST) { + u32 next; + + val = rd32(hw, I40E_QINT_RQCTL(qp)); + + val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | + I40E_QINT_RQCTL_MSIX0_INDX_MASK | + I40E_QINT_RQCTL_CAUSE_ENA_MASK | + I40E_QINT_RQCTL_INTEVENT_MASK); + + val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | + I40E_QINT_RQCTL_NEXTQ_INDX_MASK); + + wr32(hw, I40E_QINT_RQCTL(qp), val); + + val = rd32(hw, I40E_QINT_TQCTL(qp)); + + next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK) + >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT; + + val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | + I40E_QINT_TQCTL_MSIX0_INDX_MASK | + I40E_QINT_TQCTL_CAUSE_ENA_MASK | + I40E_QINT_TQCTL_INTEVENT_MASK); + + val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | + I40E_QINT_TQCTL_NEXTQ_INDX_MASK); + + wr32(hw, I40E_QINT_TQCTL(qp), val); + qp = next; + } + } + } else { + free_irq(pf->pdev->irq, pf); + + val = rd32(hw, I40E_PFINT_LNKLST0); + qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) + >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT; + val |= I40E_QUEUE_END_OF_LIST + << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; + wr32(hw, I40E_PFINT_LNKLST0, val); + + val = rd32(hw, I40E_QINT_RQCTL(qp)); + val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK | + I40E_QINT_RQCTL_MSIX0_INDX_MASK | + I40E_QINT_RQCTL_CAUSE_ENA_MASK | + I40E_QINT_RQCTL_INTEVENT_MASK); + + val |= (I40E_QINT_RQCTL_ITR_INDX_MASK | + I40E_QINT_RQCTL_NEXTQ_INDX_MASK); + + wr32(hw, I40E_QINT_RQCTL(qp), val); + + val = rd32(hw, I40E_QINT_TQCTL(qp)); + + val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK | + I40E_QINT_TQCTL_MSIX0_INDX_MASK | + I40E_QINT_TQCTL_CAUSE_ENA_MASK | + I40E_QINT_TQCTL_INTEVENT_MASK); + + val |= (I40E_QINT_TQCTL_ITR_INDX_MASK | + I40E_QINT_TQCTL_NEXTQ_INDX_MASK); + + wr32(hw, I40E_QINT_TQCTL(qp), val); + } +} + +/** + * i40e_free_q_vector - Free memory allocated for specific interrupt vector + * @vsi: the VSI being configured + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) +{ + struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; + struct i40e_ring *ring; + + if (!q_vector) + return; + + /* disassociate q_vector from rings */ + i40e_for_each_ring(ring, q_vector->tx) + ring->q_vector = NULL; + + i40e_for_each_ring(ring, q_vector->rx) + ring->q_vector = NULL; + + /* only VSI w/ an associated netdev is set up w/ NAPI */ + if (vsi->netdev) + netif_napi_del(&q_vector->napi); + + vsi->q_vectors[v_idx] = NULL; + + kfree_rcu(q_vector, rcu); +} + +/** + * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors + * @vsi: the VSI being un-configured + * + * This frees the memory allocated to the q_vectors and + * deletes references to the NAPI struct. + **/ +static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi) +{ + int v_idx; + + for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) + i40e_free_q_vector(vsi, v_idx); +} + +/** + * i40e_reset_interrupt_capability - Disable interrupt setup in OS + * @pf: board private structure + **/ +static void i40e_reset_interrupt_capability(struct i40e_pf *pf) +{ + /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */ + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + pci_disable_msix(pf->pdev); + kfree(pf->msix_entries); + pf->msix_entries = NULL; + kfree(pf->irq_pile); + pf->irq_pile = NULL; + } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { + pci_disable_msi(pf->pdev); + } + pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); +} + +/** + * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @pf: board private structure + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) +{ + int i; + + i40e_stop_misc_vector(pf); + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + synchronize_irq(pf->msix_entries[0].vector); + free_irq(pf->msix_entries[0].vector, pf); + } + + i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i]) + i40e_vsi_free_q_vectors(pf->vsi[i]); + i40e_reset_interrupt_capability(pf); +} + +/** + * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI + * @vsi: the VSI being configured + **/ +static void i40e_napi_enable_all(struct i40e_vsi *vsi) +{ + int q_idx; + + if (!vsi->netdev) + return; + + for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) + napi_enable(&vsi->q_vectors[q_idx]->napi); +} + +/** + * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI + * @vsi: the VSI being configured + **/ +static void i40e_napi_disable_all(struct i40e_vsi *vsi) +{ + int q_idx; + + if (!vsi->netdev) + return; + + for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) + napi_disable(&vsi->q_vectors[q_idx]->napi); +} + +/** + * i40e_vsi_close - Shut down a VSI + * @vsi: the vsi to be quelled + **/ +static void i40e_vsi_close(struct i40e_vsi *vsi) +{ + if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) + i40e_down(vsi); + i40e_vsi_free_irq(vsi); + i40e_vsi_free_tx_resources(vsi); + i40e_vsi_free_rx_resources(vsi); +} + +/** + * i40e_quiesce_vsi - Pause a given VSI + * @vsi: the VSI being paused + **/ +static void i40e_quiesce_vsi(struct i40e_vsi *vsi) +{ + if (test_bit(__I40E_DOWN, &vsi->state)) + return; + + /* No need to disable FCoE VSI when Tx suspended */ + if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) && + vsi->type == I40E_VSI_FCOE) { + dev_dbg(&vsi->back->pdev->dev, + "%s: VSI seid %d skipping FCoE VSI disable\n", + __func__, vsi->seid); + return; + } + + set_bit(__I40E_NEEDS_RESTART, &vsi->state); + if (vsi->netdev && netif_running(vsi->netdev)) { + vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); + } else { + i40e_vsi_close(vsi); + } +} + +/** + * i40e_unquiesce_vsi - Resume a given VSI + * @vsi: the VSI being resumed + **/ +static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) +{ + if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state)) + return; + + clear_bit(__I40E_NEEDS_RESTART, &vsi->state); + if (vsi->netdev && netif_running(vsi->netdev)) + vsi->netdev->netdev_ops->ndo_open(vsi->netdev); + else + i40e_vsi_open(vsi); /* this clears the DOWN bit */ +} + +/** + * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF + * @pf: the PF + **/ +static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) +{ + int v; + + for (v = 0; v < pf->num_alloc_vsi; v++) { + if (pf->vsi[v]) + i40e_quiesce_vsi(pf->vsi[v]); + } +} + +/** + * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF + * @pf: the PF + **/ +static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) +{ + int v; + + for (v = 0; v < pf->num_alloc_vsi; v++) { + if (pf->vsi[v]) + i40e_unquiesce_vsi(pf->vsi[v]); + } +} + +#ifdef CONFIG_I40E_DCB +/** + * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled + * @vsi: the VSI being configured + * + * This function waits for the given VSI's Tx queues to be disabled. + **/ +static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + int i, pf_q, ret; + + pf_q = vsi->base_queue; + for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { + /* Check and wait for the disable status of the queue */ + ret = i40e_pf_txq_wait(pf, pf_q, false); + if (ret) { + dev_info(&pf->pdev->dev, + "%s: VSI seid %d Tx ring %d disable timeout\n", + __func__, vsi->seid, pf_q); + return ret; + } + } + + return 0; +} + +/** + * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled + * @pf: the PF + * + * This function waits for the Tx queues to be in disabled state for all the + * VSIs that are managed by this PF. + **/ +static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf) +{ + int v, ret = 0; + + for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { + /* No need to wait for FCoE VSI queues */ + if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) { + ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]); + if (ret) + break; + } + } + + return ret; +} + +#endif +/** + * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP + * @pf: pointer to PF + * + * Get TC map for ISCSI PF type that will include iSCSI TC + * and LAN TC. + **/ +static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) +{ + struct i40e_dcb_app_priority_table app; + struct i40e_hw *hw = &pf->hw; + u8 enabled_tc = 1; /* TC0 is always enabled */ + u8 tc, i; + /* Get the iSCSI APP TLV */ + struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; + + for (i = 0; i < dcbcfg->numapps; i++) { + app = dcbcfg->app[i]; + if (app.selector == I40E_APP_SEL_TCPIP && + app.protocolid == I40E_APP_PROTOID_ISCSI) { + tc = dcbcfg->etscfg.prioritytable[app.priority]; + enabled_tc |= (1 << tc); + break; + } + } + + return enabled_tc; +} + +/** + * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config + * @dcbcfg: the corresponding DCBx configuration structure + * + * Return the number of TCs from given DCBx configuration + **/ +static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) +{ + u8 num_tc = 0; + int i; + + /* Scan the ETS Config Priority Table to find + * traffic class enabled for a given priority + * and use the traffic class index to get the + * number of traffic classes enabled + */ + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + if (dcbcfg->etscfg.prioritytable[i] > num_tc) + num_tc = dcbcfg->etscfg.prioritytable[i]; + } + + /* Traffic class index starts from zero so + * increment to return the actual count + */ + return num_tc + 1; +} + +/** + * i40e_dcb_get_enabled_tc - Get enabled traffic classes + * @dcbcfg: the corresponding DCBx configuration structure + * + * Query the current DCB configuration and return the number of + * traffic classes enabled from the given DCBX config + **/ +static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) +{ + u8 num_tc = i40e_dcb_get_num_tc(dcbcfg); + u8 enabled_tc = 1; + u8 i; + + for (i = 0; i < num_tc; i++) + enabled_tc |= 1 << i; + + return enabled_tc; +} + +/** + * i40e_pf_get_num_tc - Get enabled traffic classes for PF + * @pf: PF being queried + * + * Return number of traffic classes enabled for the given PF + **/ +static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + u8 i, enabled_tc; + u8 num_tc = 0; + struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; + + /* If DCB is not enabled then always in single TC */ + if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) + return 1; + + /* SFP mode will be enabled for all TCs on port */ + if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) + return i40e_dcb_get_num_tc(dcbcfg); + + /* MFP mode return count of enabled TCs for this PF */ + if (pf->hw.func_caps.iscsi) + enabled_tc = i40e_get_iscsi_tc_map(pf); + else + return 1; /* Only TC0 */ + + /* At least have TC0 */ + enabled_tc = (enabled_tc ? enabled_tc : 0x1); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (enabled_tc & (1 << i)) + num_tc++; + } + return num_tc; +} + +/** + * i40e_pf_get_default_tc - Get bitmap for first enabled TC + * @pf: PF being queried + * + * Return a bitmap for first enabled traffic class for this PF. + **/ +static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) +{ + u8 enabled_tc = pf->hw.func_caps.enabled_tcmap; + u8 i = 0; + + if (!enabled_tc) + return 0x1; /* TC0 */ + + /* Find the first enabled TC */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (enabled_tc & (1 << i)) + break; + } + + return 1 << i; +} + +/** + * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes + * @pf: PF being queried + * + * Return a bitmap for enabled traffic classes for this PF. + **/ +static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) +{ + /* If DCB is not enabled for this PF then just return default TC */ + if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) + return i40e_pf_get_default_tc(pf); + + /* SFP mode we want PF to be enabled for all TCs */ + if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) + return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); + + /* MFP enabled and iSCSI PF type */ + if (pf->hw.func_caps.iscsi) + return i40e_get_iscsi_tc_map(pf); + else + return i40e_pf_get_default_tc(pf); +} + +/** + * i40e_vsi_get_bw_info - Query VSI BW Information + * @vsi: the VSI being queried + * + * Returns 0 on success, negative value on failure + **/ +static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) +{ + struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0}; + struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0}; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + i40e_status aq_ret; + u32 tc_bw_max; + int i; + + /* Get the VSI level BW configuration */ + aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "couldn't get PF vsi bw config, err %d, aq_err %d\n", + aq_ret, pf->hw.aq.asq_last_status); + return -EINVAL; + } + + /* Get the VSI level BW configuration per TC */ + aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, + NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "couldn't get PF vsi ets bw config, err %d, aq_err %d\n", + aq_ret, pf->hw.aq.asq_last_status); + return -EINVAL; + } + + if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) { + dev_info(&pf->pdev->dev, + "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", + bw_config.tc_valid_bits, + bw_ets_config.tc_valid_bits); + /* Still continuing */ + } + + vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); + vsi->bw_max_quanta = bw_config.max_bw; + tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) | + (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; + vsi->bw_ets_limit_credits[i] = + le16_to_cpu(bw_ets_config.credits[i]); + /* 3 bits out of 4 for each TC */ + vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); + } + + return 0; +} + +/** + * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC + * @vsi: the VSI being configured + * @enabled_tc: TC bitmap + * @bw_credits: BW shared credits per TC + * + * Returns 0 on success, negative value on failure + **/ +static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, + u8 *bw_share) +{ + struct i40e_aqc_configure_vsi_tc_bw_data bw_data; + i40e_status aq_ret; + int i; + + bw_data.tc_valid_bits = enabled_tc; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + bw_data.tc_bw_credits[i] = bw_share[i]; + + aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, + NULL); + if (aq_ret) { + dev_info(&vsi->back->pdev->dev, + "AQ command Config VSI BW allocation per TC failed = %d\n", + vsi->back->hw.aq.asq_last_status); + return -EINVAL; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + vsi->info.qs_handle[i] = bw_data.qs_handles[i]; + + return 0; +} + +/** + * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration + * @vsi: the VSI being configured + * @enabled_tc: TC map to be enabled + * + **/ +static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) +{ + struct net_device *netdev = vsi->netdev; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u8 netdev_tc = 0; + int i; + struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; + + if (!netdev) + return; + + if (!enabled_tc) { + netdev_reset_tc(netdev); + return; + } + + /* Set up actual enabled TCs on the VSI */ + if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) + return; + + /* set per TC queues for the VSI */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + /* Only set TC queues for enabled tcs + * + * e.g. For a VSI that has TC0 and TC3 enabled the + * enabled_tc bitmap would be 0x00001001; the driver + * will set the numtc for netdev as 2 that will be + * referenced by the netdev layer as TC 0 and 1. + */ + if (vsi->tc_config.enabled_tc & (1 << i)) + netdev_set_tc_queue(netdev, + vsi->tc_config.tc_info[i].netdev_tc, + vsi->tc_config.tc_info[i].qcount, + vsi->tc_config.tc_info[i].qoffset); + } + + /* Assign UP2TC map for the VSI */ + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + /* Get the actual TC# for the UP */ + u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; + /* Get the mapped netdev TC# for the UP */ + netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; + netdev_set_prio_tc_map(netdev, i, netdev_tc); + } +} + +/** + * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map + * @vsi: the VSI being configured + * @ctxt: the ctxt buffer returned from AQ VSI update param command + **/ +static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi, + struct i40e_vsi_context *ctxt) +{ + /* copy just the sections touched not the entire info + * since not all sections are valid as returned by + * update vsi params + */ + vsi->info.mapping_flags = ctxt->info.mapping_flags; + memcpy(&vsi->info.queue_mapping, + &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); + memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, + sizeof(vsi->info.tc_mapping)); +} + +/** + * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map + * @vsi: VSI to be configured + * @enabled_tc: TC bitmap + * + * This configures a particular VSI for TCs that are mapped to the + * given TC bitmap. It uses default bandwidth share for TCs across + * VSIs to configure TC for a particular VSI. + * + * NOTE: + * It is expected that the VSI queues have been quisced before calling + * this function. + **/ +static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) +{ + u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; + struct i40e_vsi_context ctxt; + int ret = 0; + int i; + + /* Check if enabled_tc is same as existing or new TCs */ + if (vsi->tc_config.enabled_tc == enabled_tc) + return ret; + + /* Enable ETS TCs with equal BW Share for now across all VSIs */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (enabled_tc & (1 << i)) + bw_share[i] = 1; + } + + ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "Failed configuring TC map %d for VSI %d\n", + enabled_tc, vsi->seid); + goto out; + } + + /* Update Queue Pairs Mapping for currently enabled UPs */ + ctxt.seid = vsi->seid; + ctxt.pf_num = vsi->back->hw.pf_id; + ctxt.vf_num = 0; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.info = vsi->info; + i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); + + /* Update the VSI after updating the VSI queue-mapping information */ + ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "update vsi failed, aq_err=%d\n", + vsi->back->hw.aq.asq_last_status); + goto out; + } + /* update the local VSI info with updated queue map */ + i40e_vsi_update_queue_map(vsi, &ctxt); + vsi->info.valid_sections = 0; + + /* Update current VSI BW information */ + ret = i40e_vsi_get_bw_info(vsi); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "Failed updating vsi bw info, aq_err=%d\n", + vsi->back->hw.aq.asq_last_status); + goto out; + } + + /* Update the netdev TC setup */ + i40e_vsi_config_netdev_tc(vsi, enabled_tc); +out: + return ret; +} + +/** + * i40e_veb_config_tc - Configure TCs for given VEB + * @veb: given VEB + * @enabled_tc: TC bitmap + * + * Configures given TC bitmap for VEB (switching) element + **/ +int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) +{ + struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; + struct i40e_pf *pf = veb->pf; + int ret = 0; + int i; + + /* No TCs or already enabled TCs just return */ + if (!enabled_tc || veb->enabled_tc == enabled_tc) + return ret; + + bw_data.tc_valid_bits = enabled_tc; + /* bw_data.absolute_credits is not set (relative) */ + + /* Enable ETS TCs with equal BW Share for now */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (enabled_tc & (1 << i)) + bw_data.tc_bw_share_credits[i] = 1; + } + + ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, + &bw_data, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "veb bw config failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); + goto out; + } + + /* Update the BW information */ + ret = i40e_veb_get_bw_info(veb); + if (ret) { + dev_info(&pf->pdev->dev, + "Failed getting veb bw config, aq_err=%d\n", + pf->hw.aq.asq_last_status); + } + +out: + return ret; +} + +#ifdef CONFIG_I40E_DCB +/** + * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs + * @pf: PF struct + * + * Reconfigure VEB/VSIs on a given PF; it is assumed that + * the caller would've quiesce all the VSIs before calling + * this function + **/ +static void i40e_dcb_reconfigure(struct i40e_pf *pf) +{ + u8 tc_map = 0; + int ret; + u8 v; + + /* Enable the TCs available on PF to all VEBs */ + tc_map = i40e_pf_get_tc_map(pf); + for (v = 0; v < I40E_MAX_VEB; v++) { + if (!pf->veb[v]) + continue; + ret = i40e_veb_config_tc(pf->veb[v], tc_map); + if (ret) { + dev_info(&pf->pdev->dev, + "Failed configuring TC for VEB seid=%d\n", + pf->veb[v]->seid); + /* Will try to configure as many components */ + } + } + + /* Update each VSI */ + for (v = 0; v < pf->num_alloc_vsi; v++) { + if (!pf->vsi[v]) + continue; + + /* - Enable all TCs for the LAN VSI +#ifdef I40E_FCOE + * - For FCoE VSI only enable the TC configured + * as per the APP TLV +#endif + * - For all others keep them at TC0 for now + */ + if (v == pf->lan_vsi) + tc_map = i40e_pf_get_tc_map(pf); + else + tc_map = i40e_pf_get_default_tc(pf); +#ifdef I40E_FCOE + if (pf->vsi[v]->type == I40E_VSI_FCOE) + tc_map = i40e_get_fcoe_tc_map(pf); +#endif /* #ifdef I40E_FCOE */ + + ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); + if (ret) { + dev_info(&pf->pdev->dev, + "Failed configuring TC for VSI seid=%d\n", + pf->vsi[v]->seid); + /* Will try to configure as many components */ + } else { + /* Re-configure VSI vectors based on updated TC map */ + i40e_vsi_map_rings_to_vectors(pf->vsi[v]); + if (pf->vsi[v]->netdev) + i40e_dcbnl_set_all(pf->vsi[v]); + } + } +} + +/** + * i40e_resume_port_tx - Resume port Tx + * @pf: PF struct + * + * Resume a port's Tx and issue a PF reset in case of failure to + * resume. + **/ +static int i40e_resume_port_tx(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + int ret; + + ret = i40e_aq_resume_port_tx(hw, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "AQ command Resume Port Tx failed = %d\n", + pf->hw.aq.asq_last_status); + /* Schedule PF reset to recover */ + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + i40e_service_event_schedule(pf); + } + + return ret; +} + +/** + * i40e_init_pf_dcb - Initialize DCB configuration + * @pf: PF being configured + * + * Query the current DCB configuration and cache it + * in the hardware structure + **/ +static int i40e_init_pf_dcb(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + int err = 0; + + /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4)) + goto out; + + /* Get the initial DCB configuration */ + err = i40e_init_dcb(hw); + if (!err) { + /* Device/Function is not DCBX capable */ + if ((!hw->func_caps.dcb) || + (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { + dev_info(&pf->pdev->dev, + "DCBX offload is not supported or is disabled for this PF.\n"); + + if (pf->flags & I40E_FLAG_MFP_ENABLED) + goto out; + + } else { + /* When status is not DISABLED then DCBX in FW */ + pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | + DCB_CAP_DCBX_VER_IEEE; + + pf->flags |= I40E_FLAG_DCB_CAPABLE; + /* Enable DCB tagging only when more than one TC */ + if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) + pf->flags |= I40E_FLAG_DCB_ENABLED; + dev_dbg(&pf->pdev->dev, + "DCBX offload is supported for this PF.\n"); + } + } else { + dev_info(&pf->pdev->dev, + "AQ Querying DCB configuration failed: aq_err %d\n", + pf->hw.aq.asq_last_status); + } + +out: + return err; +} +#endif /* CONFIG_I40E_DCB */ +#define SPEED_SIZE 14 +#define FC_SIZE 8 +/** + * i40e_print_link_message - print link up or down + * @vsi: the VSI for which link needs a message + */ +static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) +{ + char speed[SPEED_SIZE] = "Unknown"; + char fc[FC_SIZE] = "RX/TX"; + + if (!isup) { + netdev_info(vsi->netdev, "NIC Link is Down\n"); + return; + } + + /* Warn user if link speed on NPAR enabled partition is not at + * least 10GB + */ + if (vsi->back->hw.func_caps.npar_enable && + (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || + vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) + netdev_warn(vsi->netdev, + "The partition detected link speed that is less than 10Gbps\n"); + + switch (vsi->back->hw.phy.link_info.link_speed) { + case I40E_LINK_SPEED_40GB: + strlcpy(speed, "40 Gbps", SPEED_SIZE); + break; + case I40E_LINK_SPEED_20GB: + strncpy(speed, "20 Gbps", SPEED_SIZE); + break; + case I40E_LINK_SPEED_10GB: + strlcpy(speed, "10 Gbps", SPEED_SIZE); + break; + case I40E_LINK_SPEED_1GB: + strlcpy(speed, "1000 Mbps", SPEED_SIZE); + break; + case I40E_LINK_SPEED_100MB: + strncpy(speed, "100 Mbps", SPEED_SIZE); + break; + default: + break; + } + + switch (vsi->back->hw.fc.current_mode) { + case I40E_FC_FULL: + strlcpy(fc, "RX/TX", FC_SIZE); + break; + case I40E_FC_TX_PAUSE: + strlcpy(fc, "TX", FC_SIZE); + break; + case I40E_FC_RX_PAUSE: + strlcpy(fc, "RX", FC_SIZE); + break; + default: + strlcpy(fc, "None", FC_SIZE); + break; + } + + netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n", + speed, fc); +} + +/** + * i40e_up_complete - Finish the last steps of bringing up a connection + * @vsi: the VSI being configured + **/ +static int i40e_up_complete(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + int err; + + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + i40e_vsi_configure_msix(vsi); + else + i40e_configure_msi_and_legacy(vsi); + + /* start rings */ + err = i40e_vsi_control_rings(vsi, true); + if (err) + return err; + + clear_bit(__I40E_DOWN, &vsi->state); + i40e_napi_enable_all(vsi); + i40e_vsi_enable_irq(vsi); + + if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && + (vsi->netdev)) { + i40e_print_link_message(vsi, true); + netif_tx_start_all_queues(vsi->netdev); + netif_carrier_on(vsi->netdev); + } else if (vsi->netdev) { + i40e_print_link_message(vsi, false); + /* need to check for qualified module here*/ + if ((pf->hw.phy.link_info.link_info & + I40E_AQ_MEDIA_AVAILABLE) && + (!(pf->hw.phy.link_info.an_info & + I40E_AQ_QUALIFIED_MODULE))) + netdev_err(vsi->netdev, + "the driver failed to link because an unqualified module was detected."); + } + + /* replay FDIR SB filters */ + if (vsi->type == I40E_VSI_FDIR) { + /* reset fd counters */ + pf->fd_add_err = pf->fd_atr_cnt = 0; + if (pf->fd_tcp_rule > 0) { + pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; + dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); + pf->fd_tcp_rule = 0; + } + i40e_fdir_filter_restore(vsi); + } + i40e_service_event_schedule(pf); + + return 0; +} + +/** + * i40e_vsi_reinit_locked - Reset the VSI + * @vsi: the VSI being configured + * + * Rebuild the ring structs after some configuration + * has changed, e.g. MTU size. + **/ +static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + + WARN_ON(in_interrupt()); + while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) + usleep_range(1000, 2000); + i40e_down(vsi); + + /* Give a VF some time to respond to the reset. The + * two second wait is based upon the watchdog cycle in + * the VF driver. + */ + if (vsi->type == I40E_VSI_SRIOV) + msleep(2000); + i40e_up(vsi); + clear_bit(__I40E_CONFIG_BUSY, &pf->state); +} + +/** + * i40e_up - Bring the connection back up after being down + * @vsi: the VSI being configured + **/ +int i40e_up(struct i40e_vsi *vsi) +{ + int err; + + err = i40e_vsi_configure(vsi); + if (!err) + err = i40e_up_complete(vsi); + + return err; +} + +/** + * i40e_down - Shutdown the connection processing + * @vsi: the VSI being stopped + **/ +void i40e_down(struct i40e_vsi *vsi) +{ + int i; + + /* It is assumed that the caller of this function + * sets the vsi->state __I40E_DOWN bit. + */ + if (vsi->netdev) { + netif_carrier_off(vsi->netdev); + netif_tx_disable(vsi->netdev); + } + i40e_vsi_disable_irq(vsi); + i40e_vsi_control_rings(vsi, false); + i40e_napi_disable_all(vsi); + + for (i = 0; i < vsi->num_queue_pairs; i++) { + i40e_clean_tx_ring(vsi->tx_rings[i]); + i40e_clean_rx_ring(vsi->rx_rings[i]); + } +} + +/** + * i40e_setup_tc - configure multiple traffic classes + * @netdev: net device to configure + * @tc: number of traffic classes to enable + **/ +#ifdef I40E_FCOE +int i40e_setup_tc(struct net_device *netdev, u8 tc) +#else +static int i40e_setup_tc(struct net_device *netdev, u8 tc) +#endif +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u8 enabled_tc = 0; + int ret = -EINVAL; + int i; + + /* Check if DCB enabled to continue */ + if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { + netdev_info(netdev, "DCB is not enabled for adapter\n"); + goto exit; + } + + /* Check if MFP enabled */ + if (pf->flags & I40E_FLAG_MFP_ENABLED) { + netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); + goto exit; + } + + /* Check whether tc count is within enabled limit */ + if (tc > i40e_pf_get_num_tc(pf)) { + netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); + goto exit; + } + + /* Generate TC map for number of tc requested */ + for (i = 0; i < tc; i++) + enabled_tc |= (1 << i); + + /* Requesting same TC configuration as already enabled */ + if (enabled_tc == vsi->tc_config.enabled_tc) + return 0; + + /* Quiesce VSI queues */ + i40e_quiesce_vsi(vsi); + + /* Configure VSI for enabled TCs */ + ret = i40e_vsi_config_tc(vsi, enabled_tc); + if (ret) { + netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", + vsi->seid); + goto exit; + } + + /* Unquiesce VSI */ + i40e_unquiesce_vsi(vsi); + +exit: + return ret; +} + +/** + * i40e_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the netdev watchdog subtask is + * enabled, and the stack is notified that the interface is ready. + * + * Returns 0 on success, negative value on failure + **/ +int i40e_open(struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + int err; + + /* disallow open during test or if eeprom is broken */ + if (test_bit(__I40E_TESTING, &pf->state) || + test_bit(__I40E_BAD_EEPROM, &pf->state)) + return -EBUSY; + + netif_carrier_off(netdev); + + err = i40e_vsi_open(vsi); + if (err) + return err; + + /* configure global TSO hardware offload settings */ + wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | + TCP_FLAG_FIN) >> 16); + wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | + TCP_FLAG_FIN | + TCP_FLAG_CWR) >> 16); + wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); + +#ifdef CONFIG_I40E_VXLAN + vxlan_get_rx_port(netdev); +#endif + + return 0; +} + +/** + * i40e_vsi_open - + * @vsi: the VSI to open + * + * Finish initialization of the VSI. + * + * Returns 0 on success, negative value on failure + **/ +int i40e_vsi_open(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + char int_name[I40E_INT_NAME_STR_LEN]; + int err; + + /* allocate descriptors */ + err = i40e_vsi_setup_tx_resources(vsi); + if (err) + goto err_setup_tx; + err = i40e_vsi_setup_rx_resources(vsi); + if (err) + goto err_setup_rx; + + err = i40e_vsi_configure(vsi); + if (err) + goto err_setup_rx; + + if (vsi->netdev) { + snprintf(int_name, sizeof(int_name) - 1, "%s-%s", + dev_driver_string(&pf->pdev->dev), vsi->netdev->name); + err = i40e_vsi_request_irq(vsi, int_name); + if (err) + goto err_setup_rx; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(vsi->netdev, + vsi->num_queue_pairs); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(vsi->netdev, + vsi->num_queue_pairs); + if (err) + goto err_set_queues; + + } else if (vsi->type == I40E_VSI_FDIR) { + snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir", + dev_driver_string(&pf->pdev->dev), + dev_name(&pf->pdev->dev)); + err = i40e_vsi_request_irq(vsi, int_name); + + } else { + err = -EINVAL; + goto err_setup_rx; + } + + err = i40e_up_complete(vsi); + if (err) + goto err_up_complete; + + return 0; + +err_up_complete: + i40e_down(vsi); +err_set_queues: + i40e_vsi_free_irq(vsi); +err_setup_rx: + i40e_vsi_free_rx_resources(vsi); +err_setup_tx: + i40e_vsi_free_tx_resources(vsi); + if (vsi == pf->vsi[pf->lan_vsi]) + i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + + return err; +} + +/** + * i40e_fdir_filter_exit - Cleans up the Flow Director accounting + * @pf: Pointer to PF + * + * This function destroys the hlist where all the Flow Director + * filters were saved. + **/ +static void i40e_fdir_filter_exit(struct i40e_pf *pf) +{ + struct i40e_fdir_filter *filter; + struct hlist_node *node2; + + hlist_for_each_entry_safe(filter, node2, + &pf->fdir_filter_list, fdir_node) { + hlist_del(&filter->fdir_node); + kfree(filter); + } + pf->fdir_pf_active_filters = 0; +} + +/** + * i40e_close - Disables a network interface + * @netdev: network interface device structure + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the driver's control, but + * this netdev interface is disabled. + * + * Returns 0, this is not allowed to fail + **/ +#ifdef I40E_FCOE +int i40e_close(struct net_device *netdev) +#else +static int i40e_close(struct net_device *netdev) +#endif +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + + i40e_vsi_close(vsi); + + return 0; +} + +/** + * i40e_do_reset - Start a PF or Core Reset sequence + * @pf: board private structure + * @reset_flags: which reset is requested + * + * The essential difference in resets is that the PF Reset + * doesn't clear the packet buffers, doesn't reset the PE + * firmware, and doesn't bother the other PFs on the chip. + **/ +void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) +{ + u32 val; + + WARN_ON(in_interrupt()); + + if (i40e_check_asq_alive(&pf->hw)) + i40e_vc_notify_reset(pf); + + /* do the biggest reset indicated */ + if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) { + + /* Request a Global Reset + * + * This will start the chip's countdown to the actual full + * chip reset event, and a warning interrupt to be sent + * to all PFs, including the requestor. Our handler + * for the warning interrupt will deal with the shutdown + * and recovery of the switch setup. + */ + dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); + val = rd32(&pf->hw, I40E_GLGEN_RTRIG); + val |= I40E_GLGEN_RTRIG_GLOBR_MASK; + wr32(&pf->hw, I40E_GLGEN_RTRIG, val); + + } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) { + + /* Request a Core Reset + * + * Same as Global Reset, except does *not* include the MAC/PHY + */ + dev_dbg(&pf->pdev->dev, "CoreR requested\n"); + val = rd32(&pf->hw, I40E_GLGEN_RTRIG); + val |= I40E_GLGEN_RTRIG_CORER_MASK; + wr32(&pf->hw, I40E_GLGEN_RTRIG, val); + i40e_flush(&pf->hw); + + } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) { + + /* Request a PF Reset + * + * Resets only the PF-specific registers + * + * This goes directly to the tear-down and rebuild of + * the switch, since we need to do all the recovery as + * for the Core Reset. + */ + dev_dbg(&pf->pdev->dev, "PFR requested\n"); + i40e_handle_reset_warning(pf); + + } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) { + int v; + + /* Find the VSI(s) that requested a re-init */ + dev_info(&pf->pdev->dev, + "VSI reinit requested\n"); + for (v = 0; v < pf->num_alloc_vsi; v++) { + struct i40e_vsi *vsi = pf->vsi[v]; + if (vsi != NULL && + test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { + i40e_vsi_reinit_locked(pf->vsi[v]); + clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); + } + } + + /* no further action needed, so return now */ + return; + } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) { + int v; + + /* Find the VSI(s) that needs to be brought down */ + dev_info(&pf->pdev->dev, "VSI down requested\n"); + for (v = 0; v < pf->num_alloc_vsi; v++) { + struct i40e_vsi *vsi = pf->vsi[v]; + if (vsi != NULL && + test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) { + set_bit(__I40E_DOWN, &vsi->state); + i40e_down(vsi); + clear_bit(__I40E_DOWN_REQUESTED, &vsi->state); + } + } + + /* no further action needed, so return now */ + return; + } else { + dev_info(&pf->pdev->dev, + "bad reset request 0x%08x\n", reset_flags); + return; + } +} + +#ifdef CONFIG_I40E_DCB +/** + * i40e_dcb_need_reconfig - Check if DCB needs reconfig + * @pf: board private structure + * @old_cfg: current DCB config + * @new_cfg: new DCB config + **/ +bool i40e_dcb_need_reconfig(struct i40e_pf *pf, + struct i40e_dcbx_config *old_cfg, + struct i40e_dcbx_config *new_cfg) +{ + bool need_reconfig = false; + + /* Check if ETS configuration has changed */ + if (memcmp(&new_cfg->etscfg, + &old_cfg->etscfg, + sizeof(new_cfg->etscfg))) { + /* If Priority Table has changed reconfig is needed */ + if (memcmp(&new_cfg->etscfg.prioritytable, + &old_cfg->etscfg.prioritytable, + sizeof(new_cfg->etscfg.prioritytable))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); + } + + if (memcmp(&new_cfg->etscfg.tcbwtable, + &old_cfg->etscfg.tcbwtable, + sizeof(new_cfg->etscfg.tcbwtable))) + dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); + + if (memcmp(&new_cfg->etscfg.tsatable, + &old_cfg->etscfg.tsatable, + sizeof(new_cfg->etscfg.tsatable))) + dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); + } + + /* Check if PFC configuration has changed */ + if (memcmp(&new_cfg->pfc, + &old_cfg->pfc, + sizeof(new_cfg->pfc))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); + } + + /* Check if APP Table has changed */ + if (memcmp(&new_cfg->app, + &old_cfg->app, + sizeof(new_cfg->app))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); + } + + dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__, + need_reconfig); + return need_reconfig; +} + +/** + * i40e_handle_lldp_event - Handle LLDP Change MIB event + * @pf: board private structure + * @e: event info posted on ARQ + **/ +static int i40e_handle_lldp_event(struct i40e_pf *pf, + struct i40e_arq_event_info *e) +{ + struct i40e_aqc_lldp_get_mib *mib = + (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; + struct i40e_hw *hw = &pf->hw; + struct i40e_dcbx_config tmp_dcbx_cfg; + bool need_reconfig = false; + int ret = 0; + u8 type; + + /* Not DCB capable or capability disabled */ + if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) + return ret; + + /* Ignore if event is not for Nearest Bridge */ + type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) + & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); + dev_dbg(&pf->pdev->dev, + "%s: LLDP event mib bridge type 0x%x\n", __func__, type); + if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) + return ret; + + /* Check MIB Type and return if event for Remote MIB update */ + type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; + dev_dbg(&pf->pdev->dev, + "%s: LLDP event mib type %s\n", __func__, + type ? "remote" : "local"); + if (type == I40E_AQ_LLDP_MIB_REMOTE) { + /* Update the remote cached instance and return */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, + I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, + &hw->remote_dcbx_config); + goto exit; + } + + /* Store the old configuration */ + tmp_dcbx_cfg = hw->local_dcbx_config; + + /* Reset the old DCBx configuration data */ + memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); + /* Get updated DCBX data from firmware */ + ret = i40e_get_dcb_config(&pf->hw); + if (ret) { + dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n"); + goto exit; + } + + /* No change detected in DCBX configs */ + if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, + sizeof(tmp_dcbx_cfg))) { + dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); + goto exit; + } + + need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, + &hw->local_dcbx_config); + + i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); + + if (!need_reconfig) + goto exit; + + /* Enable DCB tagging only when more than one TC */ + if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) + pf->flags |= I40E_FLAG_DCB_ENABLED; + else + pf->flags &= ~I40E_FLAG_DCB_ENABLED; + + set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); + /* Reconfiguration needed quiesce all VSIs */ + i40e_pf_quiesce_all_vsi(pf); + + /* Changes in configuration update VEB/VSI */ + i40e_dcb_reconfigure(pf); + + ret = i40e_resume_port_tx(pf); + + clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); + /* In case of error no point in resuming VSIs */ + if (ret) + goto exit; + + /* Wait for the PF's Tx queues to be disabled */ + ret = i40e_pf_wait_txq_disabled(pf); + if (ret) { + /* Schedule PF reset to recover */ + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + i40e_service_event_schedule(pf); + } else { + i40e_pf_unquiesce_all_vsi(pf); + } + +exit: + return ret; +} +#endif /* CONFIG_I40E_DCB */ + +/** + * i40e_do_reset_safe - Protected reset path for userland calls. + * @pf: board private structure + * @reset_flags: which reset is requested + * + **/ +void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) +{ + rtnl_lock(); + i40e_do_reset(pf, reset_flags); + rtnl_unlock(); +} + +/** + * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event + * @pf: board private structure + * @e: event info posted on ARQ + * + * Handler for LAN Queue Overflow Event generated by the firmware for PF + * and VF queues + **/ +static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, + struct i40e_arq_event_info *e) +{ + struct i40e_aqc_lan_overflow *data = + (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; + u32 queue = le32_to_cpu(data->prtdcb_rupto); + u32 qtx_ctl = le32_to_cpu(data->otx_ctl); + struct i40e_hw *hw = &pf->hw; + struct i40e_vf *vf; + u16 vf_id; + + dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", + queue, qtx_ctl); + + /* Queue belongs to VF, find the VF and issue VF reset */ + if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) + >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) { + vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK) + >> I40E_QTX_CTL_VFVM_INDX_SHIFT); + vf_id -= hw->func_caps.vf_base_id; + vf = &pf->vf[vf_id]; + i40e_vc_notify_vf_reset(vf); + /* Allow VF to process pending reset notification */ + msleep(20); + i40e_reset_vf(vf, false); + } +} + +/** + * i40e_service_event_complete - Finish up the service event + * @pf: board private structure + **/ +static void i40e_service_event_complete(struct i40e_pf *pf) +{ + BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); + + /* flush memory to make sure state is correct before next watchog */ + smp_mb__before_atomic(); + clear_bit(__I40E_SERVICE_SCHED, &pf->state); +} + +/** + * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters + * @pf: board private structure + **/ +u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) +{ + u32 val, fcnt_prog; + + val = rd32(&pf->hw, I40E_PFQF_FDSTAT); + fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); + return fcnt_prog; +} + +/** + * i40e_get_current_fd_count - Get total FD filters programmed for this PF + * @pf: board private structure + **/ +u32 i40e_get_current_fd_count(struct i40e_pf *pf) +{ + u32 val, fcnt_prog; + + val = rd32(&pf->hw, I40E_PFQF_FDSTAT); + fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + + ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> + I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); + return fcnt_prog; +} + +/** + * i40e_get_global_fd_count - Get total FD filters programmed on device + * @pf: board private structure + **/ +u32 i40e_get_global_fd_count(struct i40e_pf *pf) +{ + u32 val, fcnt_prog; + + val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); + fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) + + ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >> + I40E_GLQF_FDCNT_0_BESTCNT_SHIFT); + return fcnt_prog; +} + +/** + * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled + * @pf: board private structure + **/ +void i40e_fdir_check_and_reenable(struct i40e_pf *pf) +{ + u32 fcnt_prog, fcnt_avail; + + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + return; + + /* Check if, FD SB or ATR was auto disabled and if there is enough room + * to re-enable + */ + fcnt_prog = i40e_get_global_fd_count(pf); + fcnt_avail = pf->fdir_pf_filter_count; + if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || + (pf->fd_add_err == 0) || + (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { + if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && + (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { + pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; + dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); + } + } + /* Wait for some more space to be available to turn on ATR */ + if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { + if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { + pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; + dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); + } + } +} + +#define I40E_MIN_FD_FLUSH_INTERVAL 10 +#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30 +/** + * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB + * @pf: board private structure + **/ +static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) +{ + unsigned long min_flush_time; + int flush_wait_retry = 50; + bool disable_atr = false; + int fd_room; + int reg; + + if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) + return; + + if (time_after(jiffies, pf->fd_flush_timestamp + + (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) { + /* If the flush is happening too quick and we have mostly + * SB rules we should not re-enable ATR for some time. + */ + min_flush_time = pf->fd_flush_timestamp + + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ); + fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; + + if (!(time_after(jiffies, min_flush_time)) && + (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { + dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); + disable_atr = true; + } + + pf->fd_flush_timestamp = jiffies; + pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; + /* flush all filters */ + wr32(&pf->hw, I40E_PFQF_CTL_1, + I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); + i40e_flush(&pf->hw); + pf->fd_flush_cnt++; + pf->fd_add_err = 0; + do { + /* Check FD flush status every 5-6msec */ + usleep_range(5000, 6000); + reg = rd32(&pf->hw, I40E_PFQF_CTL_1); + if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) + break; + } while (flush_wait_retry--); + if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { + dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); + } else { + /* replay sideband filters */ + i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); + if (!disable_atr) + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); + } + } +} + +/** + * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed + * @pf: board private structure + **/ +u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) +{ + return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; +} + +/* We can see up to 256 filter programming desc in transit if the filters are + * being applied really fast; before we see the first + * filter miss error on Rx queue 0. Accumulating enough error messages before + * reacting will make sure we don't cause flush too often. + */ +#define I40E_MAX_FD_PROGRAM_ERROR 256 + +/** + * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table + * @pf: board private structure + **/ +static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) +{ + + /* if interface is down do nothing */ + if (test_bit(__I40E_DOWN, &pf->state)) + return; + + if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) + return; + + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + i40e_fdir_flush_and_replay(pf); + + i40e_fdir_check_and_reenable(pf); + +} + +/** + * i40e_vsi_link_event - notify VSI of a link event + * @vsi: vsi to be notified + * @link_up: link up or down + **/ +static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) +{ + if (!vsi || test_bit(__I40E_DOWN, &vsi->state)) + return; + + switch (vsi->type) { + case I40E_VSI_MAIN: +#ifdef I40E_FCOE + case I40E_VSI_FCOE: +#endif + if (!vsi->netdev || !vsi->netdev_registered) + break; + + if (link_up) { + netif_carrier_on(vsi->netdev); + netif_tx_wake_all_queues(vsi->netdev); + } else { + netif_carrier_off(vsi->netdev); + netif_tx_stop_all_queues(vsi->netdev); + } + break; + + case I40E_VSI_SRIOV: + case I40E_VSI_VMDQ2: + case I40E_VSI_CTRL: + case I40E_VSI_MIRROR: + default: + /* there is no notification for other VSIs */ + break; + } +} + +/** + * i40e_veb_link_event - notify elements on the veb of a link event + * @veb: veb to be notified + * @link_up: link up or down + **/ +static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) +{ + struct i40e_pf *pf; + int i; + + if (!veb || !veb->pf) + return; + pf = veb->pf; + + /* depth first... */ + for (i = 0; i < I40E_MAX_VEB; i++) + if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) + i40e_veb_link_event(pf->veb[i], link_up); + + /* ... now the local VSIs */ + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) + i40e_vsi_link_event(pf->vsi[i], link_up); +} + +/** + * i40e_link_event - Update netif_carrier status + * @pf: board private structure + **/ +static void i40e_link_event(struct i40e_pf *pf) +{ + bool new_link, old_link; + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + u8 new_link_speed, old_link_speed; + + /* set this to force the get_link_status call to refresh state */ + pf->hw.phy.get_link_info = true; + + old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); + new_link = i40e_get_link_status(&pf->hw); + old_link_speed = pf->hw.phy.link_info_old.link_speed; + new_link_speed = pf->hw.phy.link_info.link_speed; + + if (new_link == old_link && + new_link_speed == old_link_speed && + (test_bit(__I40E_DOWN, &vsi->state) || + new_link == netif_carrier_ok(vsi->netdev))) + return; + + if (!test_bit(__I40E_DOWN, &vsi->state)) + i40e_print_link_message(vsi, new_link); + + /* Notify the base of the switch tree connected to + * the link. Floating VEBs are not notified. + */ + if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) + i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); + else + i40e_vsi_link_event(vsi, new_link); + + if (pf->vf) + i40e_vc_notify_link_state(pf); + + if (pf->flags & I40E_FLAG_PTP) + i40e_ptp_set_increment(pf); +} + +/** + * i40e_check_hang_subtask - Check for hung queues and dropped interrupts + * @pf: board private structure + * + * Set the per-queue flags to request a check for stuck queues in the irq + * clean functions, then force interrupts to be sure the irq clean is called. + **/ +static void i40e_check_hang_subtask(struct i40e_pf *pf) +{ + int i, v; + + /* If we're down or resetting, just bail */ + if (test_bit(__I40E_DOWN, &pf->state) || + test_bit(__I40E_CONFIG_BUSY, &pf->state)) + return; + + /* for each VSI/netdev + * for each Tx queue + * set the check flag + * for each q_vector + * force an interrupt + */ + for (v = 0; v < pf->num_alloc_vsi; v++) { + struct i40e_vsi *vsi = pf->vsi[v]; + int armed = 0; + + if (!pf->vsi[v] || + test_bit(__I40E_DOWN, &vsi->state) || + (vsi->netdev && !netif_carrier_ok(vsi->netdev))) + continue; + + for (i = 0; i < vsi->num_queue_pairs; i++) { + set_check_for_tx_hang(vsi->tx_rings[i]); + if (test_bit(__I40E_HANG_CHECK_ARMED, + &vsi->tx_rings[i]->state)) + armed++; + } + + if (armed) { + if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) { + wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, + (I40E_PFINT_DYN_CTL0_INTENA_MASK | + I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK)); + } else { + u16 vec = vsi->base_vector - 1; + u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK | + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK); + for (i = 0; i < vsi->num_q_vectors; i++, vec++) + wr32(&vsi->back->hw, + I40E_PFINT_DYN_CTLN(vec), val); + } + i40e_flush(&vsi->back->hw); + } + } +} + +/** + * i40e_watchdog_subtask - periodic checks not using event driven response + * @pf: board private structure + **/ +static void i40e_watchdog_subtask(struct i40e_pf *pf) +{ + int i; + + /* if interface is down do nothing */ + if (test_bit(__I40E_DOWN, &pf->state) || + test_bit(__I40E_CONFIG_BUSY, &pf->state)) + return; + + /* make sure we don't do these things too often */ + if (time_before(jiffies, (pf->service_timer_previous + + pf->service_timer_period))) + return; + pf->service_timer_previous = jiffies; + + i40e_check_hang_subtask(pf); + i40e_link_event(pf); + + /* Update the stats for active netdevs so the network stack + * can look at updated numbers whenever it cares to + */ + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i] && pf->vsi[i]->netdev) + i40e_update_stats(pf->vsi[i]); + + /* Update the stats for the active switching components */ + for (i = 0; i < I40E_MAX_VEB; i++) + if (pf->veb[i]) + i40e_update_veb_stats(pf->veb[i]); + + i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]); +} + +/** + * i40e_reset_subtask - Set up for resetting the device and driver + * @pf: board private structure + **/ +static void i40e_reset_subtask(struct i40e_pf *pf) +{ + u32 reset_flags = 0; + + rtnl_lock(); + if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { + reset_flags |= (1 << __I40E_REINIT_REQUESTED); + clear_bit(__I40E_REINIT_REQUESTED, &pf->state); + } + if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { + reset_flags |= (1 << __I40E_PF_RESET_REQUESTED); + clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + } + if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { + reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED); + clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); + } + if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { + reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED); + clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); + } + if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { + reset_flags |= (1 << __I40E_DOWN_REQUESTED); + clear_bit(__I40E_DOWN_REQUESTED, &pf->state); + } + + /* If there's a recovery already waiting, it takes + * precedence before starting a new reset sequence. + */ + if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { + i40e_handle_reset_warning(pf); + goto unlock; + } + + /* If we're already down or resetting, just bail */ + if (reset_flags && + !test_bit(__I40E_DOWN, &pf->state) && + !test_bit(__I40E_CONFIG_BUSY, &pf->state)) + i40e_do_reset(pf, reset_flags); + +unlock: + rtnl_unlock(); +} + +/** + * i40e_handle_link_event - Handle link event + * @pf: board private structure + * @e: event info posted on ARQ + **/ +static void i40e_handle_link_event(struct i40e_pf *pf, + struct i40e_arq_event_info *e) +{ + struct i40e_hw *hw = &pf->hw; + struct i40e_aqc_get_link_status *status = + (struct i40e_aqc_get_link_status *)&e->desc.params.raw; + + /* save off old link status information */ + hw->phy.link_info_old = hw->phy.link_info; + + /* Do a new status request to re-enable LSE reporting + * and load new status information into the hw struct + * This completely ignores any state information + * in the ARQ event info, instead choosing to always + * issue the AQ update link status command. + */ + i40e_link_event(pf); + + /* check for unqualified module, if link is down */ + if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && + (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && + (!(status->link_info & I40E_AQ_LINK_UP))) + dev_err(&pf->pdev->dev, + "The driver failed to link because an unqualified module was detected.\n"); +} + +/** + * i40e_clean_adminq_subtask - Clean the AdminQ rings + * @pf: board private structure + **/ +static void i40e_clean_adminq_subtask(struct i40e_pf *pf) +{ + struct i40e_arq_event_info event; + struct i40e_hw *hw = &pf->hw; + u16 pending, i = 0; + i40e_status ret; + u16 opcode; + u32 oldval; + u32 val; + + /* Do not run clean AQ when PF reset fails */ + if (test_bit(__I40E_RESET_FAILED, &pf->state)) + return; + + /* check for error indications */ + val = rd32(&pf->hw, pf->hw.aq.arq.len); + oldval = val; + if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { + dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); + val &= ~I40E_PF_ARQLEN_ARQVFE_MASK; + } + if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) { + dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); + val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; + } + if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { + dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); + val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; + } + if (oldval != val) + wr32(&pf->hw, pf->hw.aq.arq.len, val); + + val = rd32(&pf->hw, pf->hw.aq.asq.len); + oldval = val; + if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { + dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); + val &= ~I40E_PF_ATQLEN_ATQVFE_MASK; + } + if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) { + dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); + val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK; + } + if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) { + dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); + val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; + } + if (oldval != val) + wr32(&pf->hw, pf->hw.aq.asq.len, val); + + event.buf_len = I40E_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) + return; + + do { + ret = i40e_clean_arq_element(hw, &event, &pending); + if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) + break; + else if (ret) { + dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); + break; + } + + opcode = le16_to_cpu(event.desc.opcode); + switch (opcode) { + + case i40e_aqc_opc_get_link_status: + i40e_handle_link_event(pf, &event); + break; + case i40e_aqc_opc_send_msg_to_pf: + ret = i40e_vc_process_vf_msg(pf, + le16_to_cpu(event.desc.retval), + le32_to_cpu(event.desc.cookie_high), + le32_to_cpu(event.desc.cookie_low), + event.msg_buf, + event.msg_len); + break; + case i40e_aqc_opc_lldp_update_mib: + dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); +#ifdef CONFIG_I40E_DCB + rtnl_lock(); + ret = i40e_handle_lldp_event(pf, &event); + rtnl_unlock(); +#endif /* CONFIG_I40E_DCB */ + break; + case i40e_aqc_opc_event_lan_overflow: + dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); + i40e_handle_lan_overflow_event(pf, &event); + break; + case i40e_aqc_opc_send_msg_to_peer: + dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); + break; + case i40e_aqc_opc_nvm_erase: + case i40e_aqc_opc_nvm_update: + i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n"); + break; + default: + dev_info(&pf->pdev->dev, + "ARQ Error: Unknown event 0x%04x received\n", + opcode); + break; + } + } while (pending && (i++ < pf->adminq_work_limit)); + + clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); + /* re-enable Admin queue interrupt cause */ + val = rd32(hw, I40E_PFINT_ICR0_ENA); + val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; + wr32(hw, I40E_PFINT_ICR0_ENA, val); + i40e_flush(hw); + + kfree(event.msg_buf); +} + +/** + * i40e_verify_eeprom - make sure eeprom is good to use + * @pf: board private structure + **/ +static void i40e_verify_eeprom(struct i40e_pf *pf) +{ + int err; + + err = i40e_diag_eeprom_test(&pf->hw); + if (err) { + /* retry in case of garbage read */ + err = i40e_diag_eeprom_test(&pf->hw); + if (err) { + dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", + err); + set_bit(__I40E_BAD_EEPROM, &pf->state); + } + } + + if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { + dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); + clear_bit(__I40E_BAD_EEPROM, &pf->state); + } +} + +/** + * i40e_enable_pf_switch_lb + * @pf: pointer to the PF structure + * + * enable switch loop back or die - no point in a return value + **/ +static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) +{ + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi_context ctxt; + int aq_ret; + + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = pf->hw.pf_id; + ctxt.vf_num = 0; + aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s couldn't get PF vsi config, err %d, aq_err %d\n", + __func__, aq_ret, pf->hw.aq.asq_last_status); + return; + } + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + + aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s: update vsi switch failed, aq_err=%d\n", + __func__, vsi->back->hw.aq.asq_last_status); + } +} + +/** + * i40e_disable_pf_switch_lb + * @pf: pointer to the PF structure + * + * disable switch loop back or die - no point in a return value + **/ +static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) +{ + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi_context ctxt; + int aq_ret; + + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = pf->hw.pf_id; + ctxt.vf_num = 0; + aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s couldn't get PF vsi config, err %d, aq_err %d\n", + __func__, aq_ret, pf->hw.aq.asq_last_status); + return; + } + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + + aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s: update vsi switch failed, aq_err=%d\n", + __func__, vsi->back->hw.aq.asq_last_status); + } +} + +/** + * i40e_config_bridge_mode - Configure the HW bridge mode + * @veb: pointer to the bridge instance + * + * Configure the loop back mode for the LAN VSI that is downlink to the + * specified HW bridge instance. It is expected this function is called + * when a new HW bridge is instantiated. + **/ +static void i40e_config_bridge_mode(struct i40e_veb *veb) +{ + struct i40e_pf *pf = veb->pf; + + dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", + veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + if (veb->bridge_mode & BRIDGE_MODE_VEPA) + i40e_disable_pf_switch_lb(pf); + else + i40e_enable_pf_switch_lb(pf); +} + +/** + * i40e_reconstitute_veb - rebuild the VEB and anything connected to it + * @veb: pointer to the VEB instance + * + * This is a recursive function that first builds the attached VSIs then + * recurses in to build the next layer of VEB. We track the connections + * through our own index numbers because the seid's from the HW could + * change across the reset. + **/ +static int i40e_reconstitute_veb(struct i40e_veb *veb) +{ + struct i40e_vsi *ctl_vsi = NULL; + struct i40e_pf *pf = veb->pf; + int v, veb_idx; + int ret; + + /* build VSI that owns this VEB, temporarily attached to base VEB */ + for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { + if (pf->vsi[v] && + pf->vsi[v]->veb_idx == veb->idx && + pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { + ctl_vsi = pf->vsi[v]; + break; + } + } + if (!ctl_vsi) { + dev_info(&pf->pdev->dev, + "missing owner VSI for veb_idx %d\n", veb->idx); + ret = -ENOENT; + goto end_reconstitute; + } + if (ctl_vsi != pf->vsi[pf->lan_vsi]) + ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; + ret = i40e_add_vsi(ctl_vsi); + if (ret) { + dev_info(&pf->pdev->dev, + "rebuild of owner VSI failed: %d\n", ret); + goto end_reconstitute; + } + i40e_vsi_reset_stats(ctl_vsi); + + /* create the VEB in the switch and move the VSI onto the VEB */ + ret = i40e_add_veb(veb, ctl_vsi); + if (ret) + goto end_reconstitute; + + if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) + veb->bridge_mode = BRIDGE_MODE_VEB; + else + veb->bridge_mode = BRIDGE_MODE_VEPA; + i40e_config_bridge_mode(veb); + + /* create the remaining VSIs attached to this VEB */ + for (v = 0; v < pf->num_alloc_vsi; v++) { + if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) + continue; + + if (pf->vsi[v]->veb_idx == veb->idx) { + struct i40e_vsi *vsi = pf->vsi[v]; + vsi->uplink_seid = veb->seid; + ret = i40e_add_vsi(vsi); + if (ret) { + dev_info(&pf->pdev->dev, + "rebuild of vsi_idx %d failed: %d\n", + v, ret); + goto end_reconstitute; + } + i40e_vsi_reset_stats(vsi); + } + } + + /* create any VEBs attached to this VEB - RECURSION */ + for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { + if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { + pf->veb[veb_idx]->uplink_seid = veb->seid; + ret = i40e_reconstitute_veb(pf->veb[veb_idx]); + if (ret) + break; + } + } + +end_reconstitute: + return ret; +} + +/** + * i40e_get_capabilities - get info about the HW + * @pf: the PF struct + **/ +static int i40e_get_capabilities(struct i40e_pf *pf) +{ + struct i40e_aqc_list_capabilities_element_resp *cap_buf; + u16 data_size; + int buf_len; + int err; + + buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); + do { + cap_buf = kzalloc(buf_len, GFP_KERNEL); + if (!cap_buf) + return -ENOMEM; + + /* this loads the data into the hw struct for us */ + err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, + &data_size, + i40e_aqc_opc_list_func_capabilities, + NULL); + /* data loaded, buffer no longer needed */ + kfree(cap_buf); + + if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { + /* retry with a larger buffer */ + buf_len = data_size; + } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { + dev_info(&pf->pdev->dev, + "capability discovery failed: aq=%d\n", + pf->hw.aq.asq_last_status); + return -ENODEV; + } + } while (err); + + if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) || + (pf->hw.aq.fw_maj_ver < 2)) { + pf->hw.func_caps.num_msix_vectors++; + pf->hw.func_caps.num_msix_vectors_vf++; + } + + if (pf->hw.debug_mask & I40E_DEBUG_USER) + dev_info(&pf->pdev->dev, + "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", + pf->hw.pf_id, pf->hw.func_caps.num_vfs, + pf->hw.func_caps.num_msix_vectors, + pf->hw.func_caps.num_msix_vectors_vf, + pf->hw.func_caps.fd_filters_guaranteed, + pf->hw.func_caps.fd_filters_best_effort, + pf->hw.func_caps.num_tx_qp, + pf->hw.func_caps.num_vsis); + +#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ + + pf->hw.func_caps.num_vfs) + if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) { + dev_info(&pf->pdev->dev, + "got num_vsis %d, setting num_vsis to %d\n", + pf->hw.func_caps.num_vsis, DEF_NUM_VSI); + pf->hw.func_caps.num_vsis = DEF_NUM_VSI; + } + + return 0; +} + +static int i40e_vsi_clear(struct i40e_vsi *vsi); + +/** + * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband + * @pf: board private structure + **/ +static void i40e_fdir_sb_setup(struct i40e_pf *pf) +{ + struct i40e_vsi *vsi; + int i; + + /* quick workaround for an NVM issue that leaves a critical register + * uninitialized + */ + if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { + static const u32 hkey[] = { + 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36, + 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb, + 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21, + 0x95b3a76d}; + + for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++) + wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); + } + + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + return; + + /* find existing VSI and see if it needs configuring */ + vsi = NULL; + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { + vsi = pf->vsi[i]; + break; + } + } + + /* create a new VSI if none exists */ + if (!vsi) { + vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, + pf->vsi[pf->lan_vsi]->seid, 0); + if (!vsi) { + dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + return; + } + } + + i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); +} + +/** + * i40e_fdir_teardown - release the Flow Director resources + * @pf: board private structure + **/ +static void i40e_fdir_teardown(struct i40e_pf *pf) +{ + int i; + + i40e_fdir_filter_exit(pf); + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { + i40e_vsi_release(pf->vsi[i]); + break; + } + } +} + +/** + * i40e_prep_for_reset - prep for the core to reset + * @pf: board private structure + * + * Close up the VFs and other things in prep for PF Reset. + **/ +static void i40e_prep_for_reset(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + i40e_status ret = 0; + u32 v; + + clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); + if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) + return; + + dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); + + /* quiesce the VSIs and their queues that are not already DOWN */ + i40e_pf_quiesce_all_vsi(pf); + + for (v = 0; v < pf->num_alloc_vsi; v++) { + if (pf->vsi[v]) + pf->vsi[v]->seid = 0; + } + + i40e_shutdown_adminq(&pf->hw); + + /* call shutdown HMC */ + if (hw->hmc.hmc_obj) { + ret = i40e_shutdown_lan_hmc(hw); + if (ret) + dev_warn(&pf->pdev->dev, + "shutdown_lan_hmc failed: %d\n", ret); + } +} + +/** + * i40e_send_version - update firmware with driver version + * @pf: PF struct + */ +static void i40e_send_version(struct i40e_pf *pf) +{ + struct i40e_driver_version dv; + + dv.major_version = DRV_VERSION_MAJOR; + dv.minor_version = DRV_VERSION_MINOR; + dv.build_version = DRV_VERSION_BUILD; + dv.subbuild_version = 0; + strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); + i40e_aq_send_driver_version(&pf->hw, &dv, NULL); +} + +/** + * i40e_reset_and_rebuild - reset and rebuild using a saved config + * @pf: board private structure + * @reinit: if the Main VSI needs to re-initialized. + **/ +static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) +{ + struct i40e_hw *hw = &pf->hw; + u8 set_fc_aq_fail = 0; + i40e_status ret; + u32 v; + + /* Now we wait for GRST to settle out. + * We don't have to delete the VEBs or VSIs from the hw switch + * because the reset will make them disappear. + */ + ret = i40e_pf_reset(hw); + if (ret) { + dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); + set_bit(__I40E_RESET_FAILED, &pf->state); + goto clear_recovery; + } + pf->pfr_count++; + + if (test_bit(__I40E_DOWN, &pf->state)) + goto clear_recovery; + dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); + + /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ + ret = i40e_init_adminq(&pf->hw); + if (ret) { + dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret); + goto clear_recovery; + } + + /* re-verify the eeprom if we just had an EMP reset */ + if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) + i40e_verify_eeprom(pf); + + i40e_clear_pxe_mode(hw); + ret = i40e_get_capabilities(pf); + if (ret) { + dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", + ret); + goto end_core_reset; + } + + ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, + hw->func_caps.num_rx_qp, + pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); + if (ret) { + dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); + goto end_core_reset; + } + ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); + if (ret) { + dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); + goto end_core_reset; + } + +#ifdef CONFIG_I40E_DCB + ret = i40e_init_pf_dcb(pf); + if (ret) { + dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret); + pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + /* Continue without DCB enabled */ + } +#endif /* CONFIG_I40E_DCB */ +#ifdef I40E_FCOE + ret = i40e_init_pf_fcoe(pf); + if (ret) + dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret); + +#endif + /* do basic switch setup */ + ret = i40e_setup_pf_switch(pf, reinit); + if (ret) + goto end_core_reset; + + /* driver is only interested in link up/down and module qualification + * reports from firmware + */ + ret = i40e_aq_set_phy_int_mask(&pf->hw, + I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); + if (ret) + dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret); + + /* make sure our flow control settings are restored */ + ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); + if (ret) + dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret); + + /* Rebuild the VSIs and VEBs that existed before reset. + * They are still in our local switch element arrays, so only + * need to rebuild the switch model in the HW. + * + * If there were VEBs but the reconstitution failed, we'll try + * try to recover minimal use by getting the basic PF VSI working. + */ + if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { + dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); + /* find the one VEB connected to the MAC, and find orphans */ + for (v = 0; v < I40E_MAX_VEB; v++) { + if (!pf->veb[v]) + continue; + + if (pf->veb[v]->uplink_seid == pf->mac_seid || + pf->veb[v]->uplink_seid == 0) { + ret = i40e_reconstitute_veb(pf->veb[v]); + + if (!ret) + continue; + + /* If Main VEB failed, we're in deep doodoo, + * so give up rebuilding the switch and set up + * for minimal rebuild of PF VSI. + * If orphan failed, we'll report the error + * but try to keep going. + */ + if (pf->veb[v]->uplink_seid == pf->mac_seid) { + dev_info(&pf->pdev->dev, + "rebuild of switch failed: %d, will try to set up simple PF connection\n", + ret); + pf->vsi[pf->lan_vsi]->uplink_seid + = pf->mac_seid; + break; + } else if (pf->veb[v]->uplink_seid == 0) { + dev_info(&pf->pdev->dev, + "rebuild of orphan VEB failed: %d\n", + ret); + } + } + } + } + + if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) { + dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); + /* no VEB, so rebuild only the Main VSI */ + ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]); + if (ret) { + dev_info(&pf->pdev->dev, + "rebuild of Main VSI failed: %d\n", ret); + goto end_core_reset; + } + } + + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4)) { + msleep(75); + ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); + if (ret) + dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); + } + /* reinit the misc interrupt */ + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + ret = i40e_setup_misc_vector(pf); + + /* restart the VSIs that were rebuilt and running before the reset */ + i40e_pf_unquiesce_all_vsi(pf); + + if (pf->num_alloc_vfs) { + for (v = 0; v < pf->num_alloc_vfs; v++) + i40e_reset_vf(&pf->vf[v], true); + } + + /* tell the firmware that we're starting */ + i40e_send_version(pf); + +end_core_reset: + clear_bit(__I40E_RESET_FAILED, &pf->state); +clear_recovery: + clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); +} + +/** + * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild + * @pf: board private structure + * + * Close up the VFs and other things in prep for a Core Reset, + * then get ready to rebuild the world. + **/ +static void i40e_handle_reset_warning(struct i40e_pf *pf) +{ + i40e_prep_for_reset(pf); + i40e_reset_and_rebuild(pf, false); +} + +/** + * i40e_handle_mdd_event + * @pf: pointer to the PF structure + * + * Called from the MDD irq handler to identify possibly malicious vfs + **/ +static void i40e_handle_mdd_event(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + bool mdd_detected = false; + bool pf_mdd_detected = false; + struct i40e_vf *vf; + u32 reg; + int i; + + if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) + return; + + /* find what triggered the MDD event */ + reg = rd32(hw, I40E_GL_MDET_TX); + if (reg & I40E_GL_MDET_TX_VALID_MASK) { + u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> + I40E_GL_MDET_TX_PF_NUM_SHIFT; + u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> + I40E_GL_MDET_TX_VF_NUM_SHIFT; + u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> + I40E_GL_MDET_TX_EVENT_SHIFT; + u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> + I40E_GL_MDET_TX_QUEUE_SHIFT) - + pf->hw.func_caps.base_queue; + if (netif_msg_tx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n", + event, queue, pf_num, vf_num); + wr32(hw, I40E_GL_MDET_TX, 0xffffffff); + mdd_detected = true; + } + reg = rd32(hw, I40E_GL_MDET_RX); + if (reg & I40E_GL_MDET_RX_VALID_MASK) { + u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> + I40E_GL_MDET_RX_FUNCTION_SHIFT; + u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> + I40E_GL_MDET_RX_EVENT_SHIFT; + u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> + I40E_GL_MDET_RX_QUEUE_SHIFT) - + pf->hw.func_caps.base_queue; + if (netif_msg_rx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", + event, queue, func); + wr32(hw, I40E_GL_MDET_RX, 0xffffffff); + mdd_detected = true; + } + + if (mdd_detected) { + reg = rd32(hw, I40E_PF_MDET_TX); + if (reg & I40E_PF_MDET_TX_VALID_MASK) { + wr32(hw, I40E_PF_MDET_TX, 0xFFFF); + dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); + pf_mdd_detected = true; + } + reg = rd32(hw, I40E_PF_MDET_RX); + if (reg & I40E_PF_MDET_RX_VALID_MASK) { + wr32(hw, I40E_PF_MDET_RX, 0xFFFF); + dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); + pf_mdd_detected = true; + } + /* Queue belongs to the PF, initiate a reset */ + if (pf_mdd_detected) { + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + i40e_service_event_schedule(pf); + } + } + + /* see if one of the VFs needs its hand slapped */ + for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { + vf = &(pf->vf[i]); + reg = rd32(hw, I40E_VP_MDET_TX(i)); + if (reg & I40E_VP_MDET_TX_VALID_MASK) { + wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); + vf->num_mdd_events++; + dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", + i); + } + + reg = rd32(hw, I40E_VP_MDET_RX(i)); + if (reg & I40E_VP_MDET_RX_VALID_MASK) { + wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); + vf->num_mdd_events++; + dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", + i); + } + + if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { + dev_info(&pf->pdev->dev, + "Too many MDD events on VF %d, disabled\n", i); + dev_info(&pf->pdev->dev, + "Use PF Control I/F to re-enable the VF\n"); + set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); + } + } + + /* re-enable mdd interrupt cause */ + clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); + reg = rd32(hw, I40E_PFINT_ICR0_ENA); + reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; + wr32(hw, I40E_PFINT_ICR0_ENA, reg); + i40e_flush(hw); +} + +#ifdef CONFIG_I40E_VXLAN +/** + * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW + * @pf: board private structure + **/ +static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + i40e_status ret; + __be16 port; + int i; + + if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC)) + return; + + pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC; + + for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { + if (pf->pending_vxlan_bitmap & (1 << i)) { + pf->pending_vxlan_bitmap &= ~(1 << i); + port = pf->vxlan_ports[i]; + if (port) + ret = i40e_aq_add_udp_tunnel(hw, ntohs(port), + I40E_AQC_TUNNEL_TYPE_VXLAN, + NULL, NULL); + else + ret = i40e_aq_del_udp_tunnel(hw, i, NULL); + + if (ret) { + dev_info(&pf->pdev->dev, + "%s vxlan port %d, index %d failed, err %d, aq_err %d\n", + port ? "add" : "delete", + ntohs(port), i, ret, + pf->hw.aq.asq_last_status); + pf->vxlan_ports[i] = 0; + } + } + } +} + +#endif +/** + * i40e_service_task - Run the driver's async subtasks + * @work: pointer to work_struct containing our data + **/ +static void i40e_service_task(struct work_struct *work) +{ + struct i40e_pf *pf = container_of(work, + struct i40e_pf, + service_task); + unsigned long start_time = jiffies; + + /* don't bother with service tasks if a reset is in progress */ + if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { + i40e_service_event_complete(pf); + return; + } + + i40e_reset_subtask(pf); + i40e_handle_mdd_event(pf); + i40e_vc_process_vflr_event(pf); + i40e_watchdog_subtask(pf); + i40e_fdir_reinit_subtask(pf); + i40e_sync_filters_subtask(pf); +#ifdef CONFIG_I40E_VXLAN + i40e_sync_vxlan_filters_subtask(pf); +#endif + i40e_clean_adminq_subtask(pf); + + i40e_service_event_complete(pf); + + /* If the tasks have taken longer than one timer cycle or there + * is more work to be done, reschedule the service task now + * rather than wait for the timer to tick again. + */ + if (time_after(jiffies, (start_time + pf->service_timer_period)) || + test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || + test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || + test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) + i40e_service_event_schedule(pf); +} + +/** + * i40e_service_timer - timer callback + * @data: pointer to PF struct + **/ +static void i40e_service_timer(unsigned long data) +{ + struct i40e_pf *pf = (struct i40e_pf *)data; + + mod_timer(&pf->service_timer, + round_jiffies(jiffies + pf->service_timer_period)); + i40e_service_event_schedule(pf); +} + +/** + * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI + * @vsi: the VSI being configured + **/ +static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + + switch (vsi->type) { + case I40E_VSI_MAIN: + vsi->alloc_queue_pairs = pf->num_lan_qps; + vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, + I40E_REQ_DESCRIPTOR_MULTIPLE); + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + vsi->num_q_vectors = pf->num_lan_msix; + else + vsi->num_q_vectors = 1; + + break; + + case I40E_VSI_FDIR: + vsi->alloc_queue_pairs = 1; + vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, + I40E_REQ_DESCRIPTOR_MULTIPLE); + vsi->num_q_vectors = 1; + break; + + case I40E_VSI_VMDQ2: + vsi->alloc_queue_pairs = pf->num_vmdq_qps; + vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, + I40E_REQ_DESCRIPTOR_MULTIPLE); + vsi->num_q_vectors = pf->num_vmdq_msix; + break; + + case I40E_VSI_SRIOV: + vsi->alloc_queue_pairs = pf->num_vf_qps; + vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, + I40E_REQ_DESCRIPTOR_MULTIPLE); + break; + +#ifdef I40E_FCOE + case I40E_VSI_FCOE: + vsi->alloc_queue_pairs = pf->num_fcoe_qps; + vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, + I40E_REQ_DESCRIPTOR_MULTIPLE); + vsi->num_q_vectors = pf->num_fcoe_msix; + break; + +#endif /* I40E_FCOE */ + default: + WARN_ON(1); + return -ENODATA; + } + + return 0; +} + +/** + * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi + * @type: VSI pointer + * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. + * + * On error: returns error code (negative) + * On success: returns 0 + **/ +static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) +{ + int size; + int ret = 0; + + /* allocate memory for both Tx and Rx ring pointers */ + size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; + vsi->tx_rings = kzalloc(size, GFP_KERNEL); + if (!vsi->tx_rings) + return -ENOMEM; + vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; + + if (alloc_qvectors) { + /* allocate memory for q_vector pointers */ + size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; + vsi->q_vectors = kzalloc(size, GFP_KERNEL); + if (!vsi->q_vectors) { + ret = -ENOMEM; + goto err_vectors; + } + } + return ret; + +err_vectors: + kfree(vsi->tx_rings); + return ret; +} + +/** + * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF + * @pf: board private structure + * @type: type of VSI + * + * On error: returns error code (negative) + * On success: returns vsi index in PF (positive) + **/ +static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) +{ + int ret = -ENODEV; + struct i40e_vsi *vsi; + int vsi_idx; + int i; + + /* Need to protect the allocation of the VSIs at the PF level */ + mutex_lock(&pf->switch_mutex); + + /* VSI list may be fragmented if VSI creation/destruction has + * been happening. We can afford to do a quick scan to look + * for any free VSIs in the list. + * + * find next empty vsi slot, looping back around if necessary + */ + i = pf->next_vsi; + while (i < pf->num_alloc_vsi && pf->vsi[i]) + i++; + if (i >= pf->num_alloc_vsi) { + i = 0; + while (i < pf->next_vsi && pf->vsi[i]) + i++; + } + + if (i < pf->num_alloc_vsi && !pf->vsi[i]) { + vsi_idx = i; /* Found one! */ + } else { + ret = -ENODEV; + goto unlock_pf; /* out of VSI slots! */ + } + pf->next_vsi = ++i; + + vsi = kzalloc(sizeof(*vsi), GFP_KERNEL); + if (!vsi) { + ret = -ENOMEM; + goto unlock_pf; + } + vsi->type = type; + vsi->back = pf; + set_bit(__I40E_DOWN, &vsi->state); + vsi->flags = 0; + vsi->idx = vsi_idx; + vsi->rx_itr_setting = pf->rx_itr_default; + vsi->tx_itr_setting = pf->tx_itr_default; + vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? + pf->rss_table_size : 64; + vsi->netdev_registered = false; + vsi->work_limit = I40E_DEFAULT_IRQ_WORK; + INIT_LIST_HEAD(&vsi->mac_filter_list); + vsi->irqs_ready = false; + + ret = i40e_set_num_rings_in_vsi(vsi); + if (ret) + goto err_rings; + + ret = i40e_vsi_alloc_arrays(vsi, true); + if (ret) + goto err_rings; + + /* Setup default MSIX irq handler for VSI */ + i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); + + pf->vsi[vsi_idx] = vsi; + ret = vsi_idx; + goto unlock_pf; + +err_rings: + pf->next_vsi = i - 1; + kfree(vsi); +unlock_pf: + mutex_unlock(&pf->switch_mutex); + return ret; +} + +/** + * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI + * @type: VSI pointer + * @free_qvectors: a bool to specify if q_vectors need to be freed. + * + * On error: returns error code (negative) + * On success: returns 0 + **/ +static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) +{ + /* free the ring and vector containers */ + if (free_qvectors) { + kfree(vsi->q_vectors); + vsi->q_vectors = NULL; + } + kfree(vsi->tx_rings); + vsi->tx_rings = NULL; + vsi->rx_rings = NULL; +} + +/** + * i40e_vsi_clear - Deallocate the VSI provided + * @vsi: the VSI being un-configured + **/ +static int i40e_vsi_clear(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf; + + if (!vsi) + return 0; + + if (!vsi->back) + goto free_vsi; + pf = vsi->back; + + mutex_lock(&pf->switch_mutex); + if (!pf->vsi[vsi->idx]) { + dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n", + vsi->idx, vsi->idx, vsi, vsi->type); + goto unlock_vsi; + } + + if (pf->vsi[vsi->idx] != vsi) { + dev_err(&pf->pdev->dev, + "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n", + pf->vsi[vsi->idx]->idx, + pf->vsi[vsi->idx], + pf->vsi[vsi->idx]->type, + vsi->idx, vsi, vsi->type); + goto unlock_vsi; + } + + /* updates the PF for this cleared vsi */ + i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); + i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); + + i40e_vsi_free_arrays(vsi, true); + + pf->vsi[vsi->idx] = NULL; + if (vsi->idx < pf->next_vsi) + pf->next_vsi = vsi->idx; + +unlock_vsi: + mutex_unlock(&pf->switch_mutex); +free_vsi: + kfree(vsi); + + return 0; +} + +/** + * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI + * @vsi: the VSI being cleaned + **/ +static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) +{ + int i; + + if (vsi->tx_rings && vsi->tx_rings[0]) { + for (i = 0; i < vsi->alloc_queue_pairs; i++) { + kfree_rcu(vsi->tx_rings[i], rcu); + vsi->tx_rings[i] = NULL; + vsi->rx_rings[i] = NULL; + } + } +} + +/** + * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI + * @vsi: the VSI being configured + **/ +static int i40e_alloc_rings(struct i40e_vsi *vsi) +{ + struct i40e_ring *tx_ring, *rx_ring; + struct i40e_pf *pf = vsi->back; + int i; + + /* Set basic values in the rings to be used later during open() */ + for (i = 0; i < vsi->alloc_queue_pairs; i++) { + /* allocate space for both Tx and Rx in one shot */ + tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); + if (!tx_ring) + goto err_out; + + tx_ring->queue_index = i; + tx_ring->reg_idx = vsi->base_queue + i; + tx_ring->ring_active = false; + tx_ring->vsi = vsi; + tx_ring->netdev = vsi->netdev; + tx_ring->dev = &pf->pdev->dev; + tx_ring->count = vsi->num_desc; + tx_ring->size = 0; + tx_ring->dcb_tc = 0; + vsi->tx_rings[i] = tx_ring; + + rx_ring = &tx_ring[1]; + rx_ring->queue_index = i; + rx_ring->reg_idx = vsi->base_queue + i; + rx_ring->ring_active = false; + rx_ring->vsi = vsi; + rx_ring->netdev = vsi->netdev; + rx_ring->dev = &pf->pdev->dev; + rx_ring->count = vsi->num_desc; + rx_ring->size = 0; + rx_ring->dcb_tc = 0; + if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) + set_ring_16byte_desc_enabled(rx_ring); + else + clear_ring_16byte_desc_enabled(rx_ring); + vsi->rx_rings[i] = rx_ring; + } + + return 0; + +err_out: + i40e_vsi_clear_rings(vsi); + return -ENOMEM; +} + +/** + * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel + * @pf: board private structure + * @vectors: the number of MSI-X vectors to request + * + * Returns the number of vectors reserved, or error + **/ +static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) +{ + vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, + I40E_MIN_MSIX, vectors); + if (vectors < 0) { + dev_info(&pf->pdev->dev, + "MSI-X vector reservation failed: %d\n", vectors); + vectors = 0; + } + + return vectors; +} + +/** + * i40e_init_msix - Setup the MSIX capability + * @pf: board private structure + * + * Work with the OS to set up the MSIX vectors needed. + * + * Returns the number of vectors reserved or negative on failure + **/ +static int i40e_init_msix(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + int vectors_left; + int v_budget, i; + int v_actual; + + if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) + return -ENODEV; + + /* The number of vectors we'll request will be comprised of: + * - Add 1 for "other" cause for Admin Queue events, etc. + * - The number of LAN queue pairs + * - Queues being used for RSS. + * We don't need as many as max_rss_size vectors. + * use rss_size instead in the calculation since that + * is governed by number of cpus in the system. + * - assumes symmetric Tx/Rx pairing + * - The number of VMDq pairs +#ifdef I40E_FCOE + * - The number of FCOE qps. +#endif + * Once we count this up, try the request. + * + * If we can't get what we want, we'll simplify to nearly nothing + * and try again. If that still fails, we punt. + */ + vectors_left = hw->func_caps.num_msix_vectors; + v_budget = 0; + + /* reserve one vector for miscellaneous handler */ + if (vectors_left) { + v_budget++; + vectors_left--; + } + + /* reserve vectors for the main PF traffic queues */ + pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left); + vectors_left -= pf->num_lan_msix; + v_budget += pf->num_lan_msix; + + /* reserve one vector for sideband flow director */ + if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (vectors_left) { + v_budget++; + vectors_left--; + } else { + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + } + } + +#ifdef I40E_FCOE + /* can we reserve enough for FCoE? */ + if (pf->flags & I40E_FLAG_FCOE_ENABLED) { + if (!vectors_left) + pf->num_fcoe_msix = 0; + else if (vectors_left >= pf->num_fcoe_qps) + pf->num_fcoe_msix = pf->num_fcoe_qps; + else + pf->num_fcoe_msix = 1; + v_budget += pf->num_fcoe_msix; + vectors_left -= pf->num_fcoe_msix; + } + +#endif + /* any vectors left over go for VMDq support */ + if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { + int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; + int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); + + /* if we're short on vectors for what's desired, we limit + * the queues per vmdq. If this is still more than are + * available, the user will need to change the number of + * queues/vectors used by the PF later with the ethtool + * channels command + */ + if (vmdq_vecs < vmdq_vecs_wanted) + pf->num_vmdq_qps = 1; + pf->num_vmdq_msix = pf->num_vmdq_qps; + + v_budget += vmdq_vecs; + vectors_left -= vmdq_vecs; + } + + pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), + GFP_KERNEL); + if (!pf->msix_entries) + return -ENOMEM; + + for (i = 0; i < v_budget; i++) + pf->msix_entries[i].entry = i; + v_actual = i40e_reserve_msix_vectors(pf, v_budget); + + if (v_actual != v_budget) { + /* If we have limited resources, we will start with no vectors + * for the special features and then allocate vectors to some + * of these features based on the policy and at the end disable + * the features that did not get any vectors. + */ +#ifdef I40E_FCOE + pf->num_fcoe_qps = 0; + pf->num_fcoe_msix = 0; +#endif + pf->num_vmdq_msix = 0; + } + + if (v_actual < I40E_MIN_MSIX) { + pf->flags &= ~I40E_FLAG_MSIX_ENABLED; + kfree(pf->msix_entries); + pf->msix_entries = NULL; + return -ENODEV; + + } else if (v_actual == I40E_MIN_MSIX) { + /* Adjust for minimal MSIX use */ + pf->num_vmdq_vsis = 0; + pf->num_vmdq_qps = 0; + pf->num_lan_qps = 1; + pf->num_lan_msix = 1; + + } else if (v_actual != v_budget) { + int vec; + + /* reserve the misc vector */ + vec = v_actual - 1; + + /* Scale vector usage down */ + pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ + pf->num_vmdq_vsis = 1; + pf->num_vmdq_qps = 1; + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + + /* partition out the remaining vectors */ + switch (vec) { + case 2: + pf->num_lan_msix = 1; + break; + case 3: +#ifdef I40E_FCOE + /* give one vector to FCoE */ + if (pf->flags & I40E_FLAG_FCOE_ENABLED) { + pf->num_lan_msix = 1; + pf->num_fcoe_msix = 1; + } +#else + pf->num_lan_msix = 2; +#endif + break; + default: +#ifdef I40E_FCOE + /* give one vector to FCoE */ + if (pf->flags & I40E_FLAG_FCOE_ENABLED) { + pf->num_fcoe_msix = 1; + vec--; + } +#endif + /* give the rest to the PF */ + pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps); + break; + } + } + + if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && + (pf->num_vmdq_msix == 0)) { + dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); + pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; + } +#ifdef I40E_FCOE + + if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) { + dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n"); + pf->flags &= ~I40E_FLAG_FCOE_ENABLED; + } +#endif + return v_actual; +} + +/** + * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector + * @vsi: the VSI being configured + * @v_idx: index of the vector in the vsi struct + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) +{ + struct i40e_q_vector *q_vector; + + /* allocate q_vector */ + q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + q_vector->vsi = vsi; + q_vector->v_idx = v_idx; + cpumask_set_cpu(v_idx, &q_vector->affinity_mask); + if (vsi->netdev) + netif_napi_add(vsi->netdev, &q_vector->napi, + i40e_napi_poll, NAPI_POLL_WEIGHT); + + q_vector->rx.latency_range = I40E_LOW_LATENCY; + q_vector->tx.latency_range = I40E_LOW_LATENCY; + + /* tie q_vector and vsi together */ + vsi->q_vectors[v_idx] = q_vector; + + return 0; +} + +/** + * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors + * @vsi: the VSI being configured + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + int v_idx, num_q_vectors; + int err; + + /* if not MSIX, give the one vector only to the LAN VSI */ + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + num_q_vectors = vsi->num_q_vectors; + else if (vsi == pf->vsi[pf->lan_vsi]) + num_q_vectors = 1; + else + return -EINVAL; + + for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { + err = i40e_vsi_alloc_q_vector(vsi, v_idx); + if (err) + goto err_out; + } + + return 0; + +err_out: + while (v_idx--) + i40e_free_q_vector(vsi, v_idx); + + return err; +} + +/** + * i40e_init_interrupt_scheme - Determine proper interrupt scheme + * @pf: board private structure to initialize + **/ +static int i40e_init_interrupt_scheme(struct i40e_pf *pf) +{ + int vectors = 0; + ssize_t size; + + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + vectors = i40e_init_msix(pf); + if (vectors < 0) { + pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | +#ifdef I40E_FCOE + I40E_FLAG_FCOE_ENABLED | +#endif + I40E_FLAG_RSS_ENABLED | + I40E_FLAG_DCB_CAPABLE | + I40E_FLAG_SRIOV_ENABLED | + I40E_FLAG_FD_SB_ENABLED | + I40E_FLAG_FD_ATR_ENABLED | + I40E_FLAG_VMDQ_ENABLED); + + /* rework the queue expectations without MSIX */ + i40e_determine_queue_usage(pf); + } + } + + if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && + (pf->flags & I40E_FLAG_MSI_ENABLED)) { + dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); + vectors = pci_enable_msi(pf->pdev); + if (vectors < 0) { + dev_info(&pf->pdev->dev, "MSI init failed - %d\n", + vectors); + pf->flags &= ~I40E_FLAG_MSI_ENABLED; + } + vectors = 1; /* one MSI or Legacy vector */ + } + + if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) + dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); + + /* set up vector assignment tracking */ + size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); + pf->irq_pile = kzalloc(size, GFP_KERNEL); + if (!pf->irq_pile) { + dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n"); + return -ENOMEM; + } + pf->irq_pile->num_entries = vectors; + pf->irq_pile->search_hint = 0; + + /* track first vector for misc interrupts, ignore return */ + (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); + + return 0; +} + +/** + * i40e_setup_misc_vector - Setup the misc vector to handle non queue events + * @pf: board private structure + * + * This sets up the handler for MSIX 0, which is used to manage the + * non-queue interrupts, e.g. AdminQ and errors. This is not used + * when in MSI or Legacy interrupt mode. + **/ +static int i40e_setup_misc_vector(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + int err = 0; + + /* Only request the irq if this is the first time through, and + * not when we're rebuilding after a Reset + */ + if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { + err = request_irq(pf->msix_entries[0].vector, + i40e_intr, 0, pf->int_name, pf); + if (err) { + dev_info(&pf->pdev->dev, + "request_irq for %s failed: %d\n", + pf->int_name, err); + return -EFAULT; + } + } + + i40e_enable_misc_int_causes(pf); + + /* associate no queues to the misc vector */ + wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST); + wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K); + + i40e_flush(hw); + + i40e_irq_dynamic_enable_icr0(pf); + + return err; +} + +/** + * i40e_config_rss - Prepare for RSS if used + * @pf: board private structure + **/ +static int i40e_config_rss(struct i40e_pf *pf) +{ + u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1]; + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_hw *hw = &pf->hw; + u32 lut = 0; + int i, j; + u64 hena; + u32 reg_val; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) + wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]); + + /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ + hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | + ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); + hena |= I40E_DEFAULT_RSS_HENA; + wr32(hw, I40E_PFQF_HENA(0), (u32)hena); + wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); + + vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); + + /* Check capability and Set table size and register per hw expectation*/ + reg_val = rd32(hw, I40E_PFQF_CTL_0); + if (pf->rss_table_size == 512) + reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512; + else + reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512; + wr32(hw, I40E_PFQF_CTL_0, reg_val); + + /* Populate the LUT with max no. of queues in round robin fashion */ + for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) { + + /* The assumption is that lan qp count will be the highest + * qp count for any PF VSI that needs RSS. + * If multiple VSIs need RSS support, all the qp counts + * for those VSIs should be a power of 2 for RSS to work. + * If LAN VSI is the only consumer for RSS then this requirement + * is not necessary. + */ + if (j == vsi->rss_size) + j = 0; + /* lut = 4-byte sliding window of 4 lut entries */ + lut = (lut << 8) | (j & + ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1)); + /* On i = 3, we have 4 entries in lut; write to the register */ + if ((i & 3) == 3) + wr32(hw, I40E_PFQF_HLUT(i >> 2), lut); + } + i40e_flush(hw); + + return 0; +} + +/** + * i40e_reconfig_rss_queues - change number of queues for rss and rebuild + * @pf: board private structure + * @queue_count: the requested queue count for rss. + * + * returns 0 if rss is not enabled, if enabled returns the final rss queue + * count which may be different from the requested queue count. + **/ +int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) +{ + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + int new_rss_size; + + if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) + return 0; + + new_rss_size = min_t(int, queue_count, pf->rss_size_max); + + if (queue_count != vsi->num_queue_pairs) { + vsi->req_queue_pairs = queue_count; + i40e_prep_for_reset(pf); + + pf->rss_size = new_rss_size; + + i40e_reset_and_rebuild(pf, true); + i40e_config_rss(pf); + } + dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size); + return pf->rss_size; +} + +/** + * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition + * @pf: board private structure + **/ +i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf) +{ + i40e_status status; + bool min_valid, max_valid; + u32 max_bw, min_bw; + + status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, + &min_valid, &max_valid); + + if (!status) { + if (min_valid) + pf->npar_min_bw = min_bw; + if (max_valid) + pf->npar_max_bw = max_bw; + } + + return status; +} + +/** + * i40e_set_npar_bw_setting - Set BW settings for this PF partition + * @pf: board private structure + **/ +i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf) +{ + struct i40e_aqc_configure_partition_bw_data bw_data; + i40e_status status; + + /* Set the valid bit for this PF */ + bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id); + bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK; + bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK; + + /* Set the new bandwidths */ + status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); + + return status; +} + +/** + * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition + * @pf: board private structure + **/ +i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) +{ + /* Commit temporary BW setting to permanent NVM image */ + enum i40e_admin_queue_err last_aq_status; + i40e_status ret; + u16 nvm_word; + + if (pf->hw.partition_id != 1) { + dev_info(&pf->pdev->dev, + "Commit BW only works on partition 1! This is partition %d", + pf->hw.partition_id); + ret = I40E_NOT_SUPPORTED; + goto bw_commit_out; + } + + /* Acquire NVM for read access */ + ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); + last_aq_status = pf->hw.aq.asq_last_status; + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot acquire NVM for read access, err %d: aq_err %d\n", + ret, last_aq_status); + goto bw_commit_out; + } + + /* Read word 0x10 of NVM - SW compatibility word 1 */ + ret = i40e_aq_read_nvm(&pf->hw, + I40E_SR_NVM_CONTROL_WORD, + 0x10, sizeof(nvm_word), &nvm_word, + false, NULL); + /* Save off last admin queue command status before releasing + * the NVM + */ + last_aq_status = pf->hw.aq.asq_last_status; + i40e_release_nvm(&pf->hw); + if (ret) { + dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n", + ret, last_aq_status); + goto bw_commit_out; + } + + /* Wait a bit for NVM release to complete */ + msleep(50); + + /* Acquire NVM for write access */ + ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); + last_aq_status = pf->hw.aq.asq_last_status; + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot acquire NVM for write access, err %d: aq_err %d\n", + ret, last_aq_status); + goto bw_commit_out; + } + /* Write it back out unchanged to initiate update NVM, + * which will force a write of the shadow (alt) RAM to + * the NVM - thus storing the bandwidth values permanently. + */ + ret = i40e_aq_update_nvm(&pf->hw, + I40E_SR_NVM_CONTROL_WORD, + 0x10, sizeof(nvm_word), + &nvm_word, true, NULL); + /* Save off last admin queue command status before releasing + * the NVM + */ + last_aq_status = pf->hw.aq.asq_last_status; + i40e_release_nvm(&pf->hw); + if (ret) + dev_info(&pf->pdev->dev, + "BW settings NOT SAVED, err %d aq_err %d\n", + ret, last_aq_status); +bw_commit_out: + + return ret; +} + +/** + * i40e_sw_init - Initialize general software structures (struct i40e_pf) + * @pf: board private structure to initialize + * + * i40e_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int i40e_sw_init(struct i40e_pf *pf) +{ + int err = 0; + int size; + + pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, + (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); + pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG; + if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { + if (I40E_DEBUG_USER & debug) + pf->hw.debug_mask = debug; + pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER), + I40E_DEFAULT_MSG_ENABLE); + } + + /* Set default capability flags */ + pf->flags = I40E_FLAG_RX_CSUM_ENABLED | + I40E_FLAG_MSI_ENABLED | + I40E_FLAG_MSIX_ENABLED; + + if (iommu_present(&pci_bus_type)) + pf->flags |= I40E_FLAG_RX_PS_ENABLED; + else + pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; + + /* Set default ITR */ + pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; + pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF; + + /* Depending on PF configurations, it is possible that the RSS + * maximum might end up larger than the available queues + */ + pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; + pf->rss_size = 1; + pf->rss_table_size = pf->hw.func_caps.rss_table_size; + pf->rss_size_max = min_t(int, pf->rss_size_max, + pf->hw.func_caps.num_tx_qp); + if (pf->hw.func_caps.rss) { + pf->flags |= I40E_FLAG_RSS_ENABLED; + pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); + } + + /* MFP mode enabled */ + if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { + pf->flags |= I40E_FLAG_MFP_ENABLED; + dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); + if (i40e_get_npar_bw_setting(pf)) + dev_warn(&pf->pdev->dev, + "Could not get NPAR bw settings\n"); + else + dev_info(&pf->pdev->dev, + "Min BW = %8.8x, Max BW = %8.8x\n", + pf->npar_min_bw, pf->npar_max_bw); + } + + /* FW/NVM is not yet fixed in this regard */ + if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || + (pf->hw.func_caps.fd_filters_best_effort > 0)) { + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; + /* Setup a counter for fd_atr per PF */ + pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id); + if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { + pf->flags |= I40E_FLAG_FD_SB_ENABLED; + /* Setup a counter for fd_sb per PF */ + pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); + } else { + dev_info(&pf->pdev->dev, + "Flow Director Sideband mode Disabled in MFP mode\n"); + } + pf->fdir_pf_filter_count = + pf->hw.func_caps.fd_filters_guaranteed; + pf->hw.fdir_shared_filter_count = + pf->hw.func_caps.fd_filters_best_effort; + } + + if (pf->hw.func_caps.vmdq) { + pf->flags |= I40E_FLAG_VMDQ_ENABLED; + pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; + pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ; + } + +#ifdef I40E_FCOE + err = i40e_init_pf_fcoe(pf); + if (err) + dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err); + +#endif /* I40E_FCOE */ +#ifdef CONFIG_PCI_IOV + if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { + pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; + pf->flags |= I40E_FLAG_SRIOV_ENABLED; + pf->num_req_vfs = min_t(int, + pf->hw.func_caps.num_vfs, + I40E_MAX_VF_COUNT); + } +#endif /* CONFIG_PCI_IOV */ + pf->eeprom_version = 0xDEAD; + pf->lan_veb = I40E_NO_VEB; + pf->lan_vsi = I40E_NO_VSI; + + /* set up queue assignment tracking */ + size = sizeof(struct i40e_lump_tracking) + + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); + pf->qp_pile = kzalloc(size, GFP_KERNEL); + if (!pf->qp_pile) { + err = -ENOMEM; + goto sw_init_done; + } + pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; + pf->qp_pile->search_hint = 0; + + pf->tx_timeout_recovery_level = 1; + + mutex_init(&pf->switch_mutex); + + /* If NPAR is enabled nudge the Tx scheduler */ + if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf))) + i40e_set_npar_bw_setting(pf); + +sw_init_done: + return err; +} + +/** + * i40e_set_ntuple - set the ntuple feature flag and take action + * @pf: board private structure to initialize + * @features: the feature set that the stack is suggesting + * + * returns a bool to indicate if reset needs to happen + **/ +bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) +{ + bool need_reset = false; + + /* Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. + */ + if (features & NETIF_F_NTUPLE) { + /* Enable filters and mark for reset */ + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + need_reset = true; + pf->flags |= I40E_FLAG_FD_SB_ENABLED; + } else { + /* turn off filters, mark for reset and clear SW filter list */ + if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + need_reset = true; + i40e_fdir_filter_exit(pf); + } + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; + /* reset fd counters */ + pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; + pf->fdir_pf_active_filters = 0; + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); + /* if ATR was auto disabled it can be re-enabled. */ + if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) + pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; + } + return need_reset; +} + +/** + * i40e_set_features - set the netdev feature flags + * @netdev: ptr to the netdev being adjusted + * @features: the feature set that the stack is suggesting + **/ +static int i40e_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + bool need_reset; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + i40e_vlan_stripping_enable(vsi); + else + i40e_vlan_stripping_disable(vsi); + + need_reset = i40e_set_ntuple(pf, features); + + if (need_reset) + i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + + return 0; +} + +#ifdef CONFIG_I40E_VXLAN +/** + * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port + * @pf: board private structure + * @port: The UDP port to look up + * + * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found + **/ +static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port) +{ + u8 i; + + for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { + if (pf->vxlan_ports[i] == port) + return i; + } + + return i; +} + +/** + * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up + * @netdev: This physical port's netdev + * @sa_family: Socket Family that VXLAN is notifying us about + * @port: New UDP port number that VXLAN started listening to + **/ +static void i40e_add_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u8 next_idx; + u8 idx; + + if (sa_family == AF_INET6) + return; + + idx = i40e_get_vxlan_port_idx(pf, port); + + /* Check if port already exists */ + if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { + netdev_info(netdev, "vxlan port %d already offloaded\n", + ntohs(port)); + return; + } + + /* Now check if there is space to add the new port */ + next_idx = i40e_get_vxlan_port_idx(pf, 0); + + if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { + netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n", + ntohs(port)); + return; + } + + /* New port: add it and mark its index in the bitmap */ + pf->vxlan_ports[next_idx] = port; + pf->pending_vxlan_bitmap |= (1 << next_idx); + pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; + + dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port)); +} + +/** + * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away + * @netdev: This physical port's netdev + * @sa_family: Socket Family that VXLAN is notifying us about + * @port: UDP port number that VXLAN stopped listening to + **/ +static void i40e_del_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u8 idx; + + if (sa_family == AF_INET6) + return; + + idx = i40e_get_vxlan_port_idx(pf, port); + + /* Check if port already exists */ + if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { + /* if port exists, set it to 0 (mark for deletion) + * and make it pending + */ + pf->vxlan_ports[idx] = 0; + pf->pending_vxlan_bitmap |= (1 << idx); + pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; + + dev_info(&pf->pdev->dev, "deleting vxlan port %d\n", + ntohs(port)); + } else { + netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", + ntohs(port)); + } +} + +#endif +static int i40e_get_phys_port_id(struct net_device *netdev, + struct netdev_phys_item_id *ppid) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + + if (!(pf->flags & I40E_FLAG_PORT_ID_VALID)) + return -EOPNOTSUPP; + + ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); + memcpy(ppid->id, hw->mac.port_addr, ppid->id_len); + + return 0; +} + +/** + * i40e_ndo_fdb_add - add an entry to the hardware database + * @ndm: the input from the stack + * @tb: pointer to array of nladdr (unused) + * @dev: the net device pointer + * @addr: the MAC address entry being added + * @flags: instructions from stack about fdb operation + */ +static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, + u16 flags) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_pf *pf = np->vsi->back; + int err = 0; + + if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) + return -EOPNOTSUPP; + + if (vid) { + pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); + return -EINVAL; + } + + /* Hardware does not support aging addresses so if a + * ndm_state is given only allow permanent addresses + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + netdev_info(dev, "FDB only supports static addresses\n"); + return -EINVAL; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + else + err = -EINVAL; + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; +} + +#ifdef HAVE_BRIDGE_ATTRIBS +/** + * i40e_ndo_bridge_setlink - Set the hardware bridge mode + * @dev: the netdev being configured + * @nlh: RTNL message + * + * Inserts a new hardware bridge if not already created and + * enables the bridging mode requested (VEB or VEPA). If the + * hardware bridge has already been inserted and the request + * is to change the mode then that requires a PF reset to + * allow rebuild of the components with required hardware + * bridge mode enabled. + **/ +static int i40e_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_veb *veb = NULL; + struct nlattr *attr, *br_spec; + int i, rem; + + /* Only for PF VSI for now */ + if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) + return -EOPNOTSUPP; + + /* Find the HW bridge for PF VSI */ + for (i = 0; i < I40E_MAX_VEB && !veb; i++) { + if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) + veb = pf->veb[i]; + } + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if ((mode != BRIDGE_MODE_VEPA) && + (mode != BRIDGE_MODE_VEB)) + return -EINVAL; + + /* Insert a new HW bridge */ + if (!veb) { + veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, + vsi->tc_config.enabled_tc); + if (veb) { + veb->bridge_mode = mode; + i40e_config_bridge_mode(veb); + } else { + /* No Bridge HW offload available */ + return -ENOENT; + } + break; + } else if (mode != veb->bridge_mode) { + /* Existing HW bridge but different mode needs reset */ + veb->bridge_mode = mode; + /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ + if (mode == BRIDGE_MODE_VEB) + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + else + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); + break; + } + } + + return 0; +} + +/** + * i40e_ndo_bridge_getlink - Get the hardware bridge mode + * @skb: skb buff + * @pid: process id + * @seq: RTNL message seq # + * @dev: the netdev being configured + * @filter_mask: unused + * + * Return the mode in which the hardware bridge is operating in + * i.e VEB or VEPA. + **/ +#ifdef HAVE_BRIDGE_FILTER +static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __always_unused filter_mask, int nlflags) +#else +static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, int nlflags) +#endif /* HAVE_BRIDGE_FILTER */ +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_veb *veb = NULL; + int i; + + /* Only for PF VSI for now */ + if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) + return -EOPNOTSUPP; + + /* Find the HW bridge for the PF VSI */ + for (i = 0; i < I40E_MAX_VEB && !veb; i++) { + if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) + veb = pf->veb[i]; + } + + if (!veb) + return 0; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, + nlflags); +} +#endif /* HAVE_BRIDGE_ATTRIBS */ + +static const struct net_device_ops i40e_netdev_ops = { + .ndo_open = i40e_open, + .ndo_stop = i40e_close, + .ndo_start_xmit = i40e_lan_xmit_frame, + .ndo_get_stats64 = i40e_get_netdev_stats_struct, + .ndo_set_rx_mode = i40e_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = i40e_set_mac, + .ndo_change_mtu = i40e_change_mtu, + .ndo_do_ioctl = i40e_ioctl, + .ndo_tx_timeout = i40e_tx_timeout, + .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = i40e_netpoll, +#endif + .ndo_setup_tc = i40e_setup_tc, +#ifdef I40E_FCOE + .ndo_fcoe_enable = i40e_fcoe_enable, + .ndo_fcoe_disable = i40e_fcoe_disable, +#endif + .ndo_set_features = i40e_set_features, + .ndo_set_vf_mac = i40e_ndo_set_vf_mac, + .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, + .ndo_set_vf_rate = i40e_ndo_set_vf_bw, + .ndo_get_vf_config = i40e_ndo_get_vf_config, + .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, + .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, +#ifdef CONFIG_I40E_VXLAN + .ndo_add_vxlan_port = i40e_add_vxlan_port, + .ndo_del_vxlan_port = i40e_del_vxlan_port, +#endif + .ndo_get_phys_port_id = i40e_get_phys_port_id, + .ndo_fdb_add = i40e_ndo_fdb_add, +#ifdef HAVE_BRIDGE_ATTRIBS + .ndo_bridge_getlink = i40e_ndo_bridge_getlink, + .ndo_bridge_setlink = i40e_ndo_bridge_setlink, +#endif /* HAVE_BRIDGE_ATTRIBS */ +}; + +/** + * i40e_config_netdev - Setup the netdev flags + * @vsi: the VSI being configured + * + * Returns 0 on success, negative value on failure + **/ +static int i40e_config_netdev(struct i40e_vsi *vsi) +{ + u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_netdev_priv *np; + struct net_device *netdev; + u8 mac_addr[ETH_ALEN]; + int etherdev_size; + + etherdev_size = sizeof(struct i40e_netdev_priv); + netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); + if (!netdev) + return -ENOMEM; + + vsi->netdev = netdev; + np = netdev_priv(netdev); + np->vsi = vsi; + + netdev->hw_enc_features |= NETIF_F_IP_CSUM | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_TSO; + + netdev->features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_SCTP_CSUM | + NETIF_F_HIGHDMA | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_RXCSUM | + NETIF_F_RXHASH | + 0; + + if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) + netdev->features |= NETIF_F_NTUPLE; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features; + + if (vsi->type == I40E_VSI_MAIN) { + SET_NETDEV_DEV(netdev, &pf->pdev->dev); + ether_addr_copy(mac_addr, hw->mac.perm_addr); + /* The following steps are necessary to prevent reception + * of tagged packets - some older NVM configurations load a + * default a MAC-VLAN filter that accepts any tagged packet + * which must be replaced by a normal filter. + */ + if (!i40e_rm_default_mac_filter(vsi, mac_addr)) + i40e_add_filter(vsi, mac_addr, + I40E_VLAN_ANY, false, true); + } else { + /* relate the VSI_VMDQ name to the VSI_MAIN name */ + snprintf(netdev->name, IFNAMSIZ, "%sv%%d", + pf->vsi[pf->lan_vsi]->netdev->name); + random_ether_addr(mac_addr); + i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); + } + i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); + + ether_addr_copy(netdev->dev_addr, mac_addr); + ether_addr_copy(netdev->perm_addr, mac_addr); + /* vlan gets same features (except vlan offload) + * after any tweaks for specific VSI types + */ + netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER); + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + /* Setup netdev TC information */ + i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); + + netdev->netdev_ops = &i40e_netdev_ops; + netdev->watchdog_timeo = 5 * HZ; + i40e_set_ethtool_ops(netdev); +#ifdef I40E_FCOE + i40e_fcoe_config_netdev(netdev, vsi); +#endif + + return 0; +} + +/** + * i40e_vsi_delete - Delete a VSI from the switch + * @vsi: the VSI being removed + * + * Returns 0 on success, negative value on failure + **/ +static void i40e_vsi_delete(struct i40e_vsi *vsi) +{ + /* remove default VSI is not allowed */ + if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) + return; + + i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); +} + +/** + * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB + * @vsi: the VSI being queried + * + * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode + **/ +int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) +{ + struct i40e_veb *veb; + struct i40e_pf *pf = vsi->back; + + /* Uplink is not a bridge so default to VEB */ + if (vsi->veb_idx == I40E_NO_VEB) + return 1; + + veb = pf->veb[vsi->veb_idx]; + /* Uplink is a bridge in VEPA mode */ + if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA)) + return 0; + + /* Uplink is a bridge in VEB mode */ + return 1; +} + +/** + * i40e_add_vsi - Add a VSI to the switch + * @vsi: the VSI being configured + * + * This initializes a VSI context depending on the VSI type to be added and + * passes it down to the add_vsi aq command. + **/ +static int i40e_add_vsi(struct i40e_vsi *vsi) +{ + int ret = -ENODEV; + struct i40e_mac_filter *f, *ftmp; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_vsi_context ctxt; + u8 enabled_tc = 0x1; /* TC0 enabled */ + int f_count = 0; + + memset(&ctxt, 0, sizeof(ctxt)); + switch (vsi->type) { + case I40E_VSI_MAIN: + /* The PF's main VSI is already setup as part of the + * device initialization, so we'll not bother with + * the add_vsi call, but we will retrieve the current + * VSI context. + */ + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = pf->hw.pf_id; + ctxt.vf_num = 0; + ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + if (ret) { + dev_info(&pf->pdev->dev, + "couldn't get PF vsi config, err %d, aq_err %d\n", + ret, pf->hw.aq.asq_last_status); + return -ENOENT; + } + vsi->info = ctxt.info; + vsi->info.valid_sections = 0; + + vsi->seid = ctxt.seid; + vsi->id = ctxt.vsi_number; + + enabled_tc = i40e_pf_get_tc_map(pf); + + /* MFP mode setup queue map and update VSI */ + if ((pf->flags & I40E_FLAG_MFP_ENABLED) && + !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ + memset(&ctxt, 0, sizeof(ctxt)); + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = pf->hw.pf_id; + ctxt.vf_num = 0; + i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "update vsi failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); + ret = -ENOENT; + goto err; + } + /* update the local VSI info queue map */ + i40e_vsi_update_queue_map(vsi, &ctxt); + vsi->info.valid_sections = 0; + } else { + /* Default/Main VSI is only enabled for TC0 + * reconfigure it to enable all TCs that are + * available on the port in SFP mode. + * For MFP case the iSCSI PF would use this + * flow to enable LAN+iSCSI TC. + */ + ret = i40e_vsi_config_tc(vsi, enabled_tc); + if (ret) { + dev_info(&pf->pdev->dev, + "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n", + enabled_tc, ret, + pf->hw.aq.asq_last_status); + ret = -ENOENT; + } + } + break; + + case I40E_VSI_FDIR: + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = 0; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && + (i40e_is_vsi_uplink_mode_veb(vsi))) { + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } + i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); + break; + + case I40E_VSI_VMDQ2: + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = 0; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; + ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; + + /* This VSI is connected to VEB so the switch_id + * should be set to zero by default. + */ + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } + + /* Setup the VSI tx/rx queue map for TC0 only for now */ + i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); + break; + + case I40E_VSI_SRIOV: + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; + ctxt.flags = I40E_AQ_VSI_TYPE_VF; + + /* This VSI is connected to VEB so the switch_id + * should be set to zero by default. + */ + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } + + ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); + ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; + if (pf->vf[vsi->vf_id].spoofchk) { + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); + ctxt.info.sec_flags |= + (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | + I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); + } + /* Setup the VSI tx/rx queue map for TC0 only for now */ + i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); + break; + +#ifdef I40E_FCOE + case I40E_VSI_FCOE: + ret = i40e_fcoe_vsi_init(vsi, &ctxt); + if (ret) { + dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n"); + return ret; + } + break; + +#endif /* I40E_FCOE */ + default: + return -ENODEV; + } + + if (vsi->type != I40E_VSI_MAIN) { + ret = i40e_aq_add_vsi(hw, &ctxt, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "add vsi failed, aq_err=%d\n", + vsi->back->hw.aq.asq_last_status); + ret = -ENOENT; + goto err; + } + vsi->info = ctxt.info; + vsi->info.valid_sections = 0; + vsi->seid = ctxt.seid; + vsi->id = ctxt.vsi_number; + } + + /* If macvlan filters already exist, force them to get loaded */ + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { + f->changed = true; + f_count++; + + if (f->is_laa && vsi->type == I40E_VSI_MAIN) { + struct i40e_aqc_remove_macvlan_element_data element; + + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, f->macaddr); + element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + ret = i40e_aq_remove_macvlan(hw, vsi->seid, + &element, 1, NULL); + if (ret) { + /* some older FW has a different default */ + element.flags |= + I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; + i40e_aq_remove_macvlan(hw, vsi->seid, + &element, 1, NULL); + } + + i40e_aq_mac_address_write(hw, + I40E_AQC_WRITE_TYPE_LAA_WOL, + f->macaddr, NULL); + } + } + if (f_count) { + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + pf->flags |= I40E_FLAG_FILTER_SYNC; + } + + /* Update VSI BW information */ + ret = i40e_vsi_get_bw_info(vsi); + if (ret) { + dev_info(&pf->pdev->dev, + "couldn't get vsi bw info, err %d, aq_err %d\n", + ret, pf->hw.aq.asq_last_status); + /* VSI is already added so not tearing that up */ + ret = 0; + } + +err: + return ret; +} + +/** + * i40e_vsi_release - Delete a VSI and free its resources + * @vsi: the VSI being removed + * + * Returns 0 on success or < 0 on error + **/ +int i40e_vsi_release(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f, *ftmp; + struct i40e_veb *veb = NULL; + struct i40e_pf *pf; + u16 uplink_seid; + int i, n; + + pf = vsi->back; + + /* release of a VEB-owner or last VSI is not allowed */ + if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { + dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", + vsi->seid, vsi->uplink_seid); + return -ENODEV; + } + if (vsi == pf->vsi[pf->lan_vsi] && + !test_bit(__I40E_DOWN, &pf->state)) { + dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); + return -ENODEV; + } + + uplink_seid = vsi->uplink_seid; + if (vsi->type != I40E_VSI_SRIOV) { + if (vsi->netdev_registered) { + vsi->netdev_registered = false; + if (vsi->netdev) { + /* results in a call to i40e_close() */ + unregister_netdev(vsi->netdev); + } + } else { + i40e_vsi_close(vsi); + } + i40e_vsi_disable_irq(vsi); + } + + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) + i40e_del_filter(vsi, f->macaddr, f->vlan, + f->is_vf, f->is_netdev); + i40e_sync_vsi_filters(vsi); + + i40e_vsi_delete(vsi); + i40e_vsi_free_q_vectors(vsi); + if (vsi->netdev) { + free_netdev(vsi->netdev); + vsi->netdev = NULL; + } + i40e_vsi_clear_rings(vsi); + i40e_vsi_clear(vsi); + + /* If this was the last thing on the VEB, except for the + * controlling VSI, remove the VEB, which puts the controlling + * VSI onto the next level down in the switch. + * + * Well, okay, there's one more exception here: don't remove + * the orphan VEBs yet. We'll wait for an explicit remove request + * from up the network stack. + */ + for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { + if (pf->vsi[i] && + pf->vsi[i]->uplink_seid == uplink_seid && + (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { + n++; /* count the VSIs */ + } + } + for (i = 0; i < I40E_MAX_VEB; i++) { + if (!pf->veb[i]) + continue; + if (pf->veb[i]->uplink_seid == uplink_seid) + n++; /* count the VEBs */ + if (pf->veb[i]->seid == uplink_seid) + veb = pf->veb[i]; + } + if (n == 0 && veb && veb->uplink_seid != 0) + i40e_veb_release(veb); + + return 0; +} + +/** + * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI + * @vsi: ptr to the VSI + * + * This should only be called after i40e_vsi_mem_alloc() which allocates the + * corresponding SW VSI structure and initializes num_queue_pairs for the + * newly allocated VSI. + * + * Returns 0 on success or negative on failure + **/ +static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) +{ + int ret = -ENOENT; + struct i40e_pf *pf = vsi->back; + + if (vsi->q_vectors[0]) { + dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", + vsi->seid); + return -EEXIST; + } + + if (vsi->base_vector) { + dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", + vsi->seid, vsi->base_vector); + return -EEXIST; + } + + ret = i40e_vsi_alloc_q_vectors(vsi); + if (ret) { + dev_info(&pf->pdev->dev, + "failed to allocate %d q_vector for VSI %d, ret=%d\n", + vsi->num_q_vectors, vsi->seid, ret); + vsi->num_q_vectors = 0; + goto vector_setup_out; + } + + if (vsi->num_q_vectors) + vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, + vsi->num_q_vectors, vsi->idx); + if (vsi->base_vector < 0) { + dev_info(&pf->pdev->dev, + "failed to get tracking for %d vectors for VSI %d, err=%d\n", + vsi->num_q_vectors, vsi->seid, vsi->base_vector); + i40e_vsi_free_q_vectors(vsi); + ret = -ENOENT; + goto vector_setup_out; + } + +vector_setup_out: + return ret; +} + +/** + * i40e_vsi_reinit_setup - return and reallocate resources for a VSI + * @vsi: pointer to the vsi. + * + * This re-allocates a vsi's queue resources. + * + * Returns pointer to the successfully allocated and configured VSI sw struct + * on success, otherwise returns NULL on failure. + **/ +static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + u8 enabled_tc; + int ret; + + i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); + i40e_vsi_clear_rings(vsi); + + i40e_vsi_free_arrays(vsi, false); + i40e_set_num_rings_in_vsi(vsi); + ret = i40e_vsi_alloc_arrays(vsi, false); + if (ret) + goto err_vsi; + + ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); + if (ret < 0) { + dev_info(&pf->pdev->dev, + "failed to get tracking for %d queues for VSI %d err=%d\n", + vsi->alloc_queue_pairs, vsi->seid, ret); + goto err_vsi; + } + vsi->base_queue = ret; + + /* Update the FW view of the VSI. Force a reset of TC and queue + * layout configurations. + */ + enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; + pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; + pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; + i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); + + /* assign it some queues */ + ret = i40e_alloc_rings(vsi); + if (ret) + goto err_rings; + + /* map all of the rings to the q_vectors */ + i40e_vsi_map_rings_to_vectors(vsi); + return vsi; + +err_rings: + i40e_vsi_free_q_vectors(vsi); + if (vsi->netdev_registered) { + vsi->netdev_registered = false; + unregister_netdev(vsi->netdev); + free_netdev(vsi->netdev); + vsi->netdev = NULL; + } + i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); +err_vsi: + i40e_vsi_clear(vsi); + return NULL; +} + +/** + * i40e_vsi_setup - Set up a VSI by a given type + * @pf: board private structure + * @type: VSI type + * @uplink_seid: the switch element to link to + * @param1: usage depends upon VSI type. For VF types, indicates VF id + * + * This allocates the sw VSI structure and its queue resources, then add a VSI + * to the identified VEB. + * + * Returns pointer to the successfully allocated and configure VSI sw struct on + * success, otherwise returns NULL on failure. + **/ +struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, + u16 uplink_seid, u32 param1) +{ + struct i40e_vsi *vsi = NULL; + struct i40e_veb *veb = NULL; + int ret, i; + int v_idx; + + /* The requested uplink_seid must be either + * - the PF's port seid + * no VEB is needed because this is the PF + * or this is a Flow Director special case VSI + * - seid of an existing VEB + * - seid of a VSI that owns an existing VEB + * - seid of a VSI that doesn't own a VEB + * a new VEB is created and the VSI becomes the owner + * - seid of the PF VSI, which is what creates the first VEB + * this is a special case of the previous + * + * Find which uplink_seid we were given and create a new VEB if needed + */ + for (i = 0; i < I40E_MAX_VEB; i++) { + if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { + veb = pf->veb[i]; + break; + } + } + + if (!veb && uplink_seid != pf->mac_seid) { + + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { + vsi = pf->vsi[i]; + break; + } + } + if (!vsi) { + dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", + uplink_seid); + return NULL; + } + + if (vsi->uplink_seid == pf->mac_seid) + veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, + vsi->tc_config.enabled_tc); + else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) + veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, + vsi->tc_config.enabled_tc); + if (veb) { + if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { + dev_info(&vsi->back->pdev->dev, + "%s: New VSI creation error, uplink seid of LAN VSI expected.\n", + __func__); + return NULL; + } + /* We come up by default in VEPA mode if SRIOV is not + * already enabled, in which case we can't force VEPA + * mode. + */ + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + veb->bridge_mode = BRIDGE_MODE_VEPA; + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + } + i40e_config_bridge_mode(veb); + } + for (i = 0; i < I40E_MAX_VEB && !veb; i++) { + if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) + veb = pf->veb[i]; + } + if (!veb) { + dev_info(&pf->pdev->dev, "couldn't add VEB\n"); + return NULL; + } + + vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; + uplink_seid = veb->seid; + } + + /* get vsi sw struct */ + v_idx = i40e_vsi_mem_alloc(pf, type); + if (v_idx < 0) + goto err_alloc; + vsi = pf->vsi[v_idx]; + if (!vsi) + goto err_alloc; + vsi->type = type; + vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); + + if (type == I40E_VSI_MAIN) + pf->lan_vsi = v_idx; + else if (type == I40E_VSI_SRIOV) + vsi->vf_id = param1; + /* assign it some queues */ + ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, + vsi->idx); + if (ret < 0) { + dev_info(&pf->pdev->dev, + "failed to get tracking for %d queues for VSI %d err=%d\n", + vsi->alloc_queue_pairs, vsi->seid, ret); + goto err_vsi; + } + vsi->base_queue = ret; + + /* get a VSI from the hardware */ + vsi->uplink_seid = uplink_seid; + ret = i40e_add_vsi(vsi); + if (ret) + goto err_vsi; + + switch (vsi->type) { + /* setup the netdev if needed */ + case I40E_VSI_MAIN: + case I40E_VSI_VMDQ2: + case I40E_VSI_FCOE: + ret = i40e_config_netdev(vsi); + if (ret) + goto err_netdev; + ret = register_netdev(vsi->netdev); + if (ret) + goto err_netdev; + vsi->netdev_registered = true; + netif_carrier_off(vsi->netdev); +#ifdef CONFIG_I40E_DCB + /* Setup DCB netlink interface */ + i40e_dcbnl_setup(vsi); +#endif /* CONFIG_I40E_DCB */ + /* fall through */ + + case I40E_VSI_FDIR: + /* set up vectors and rings if needed */ + ret = i40e_vsi_setup_vectors(vsi); + if (ret) + goto err_msix; + + ret = i40e_alloc_rings(vsi); + if (ret) + goto err_rings; + + /* map all of the rings to the q_vectors */ + i40e_vsi_map_rings_to_vectors(vsi); + + i40e_vsi_reset_stats(vsi); + break; + + default: + /* no netdev or rings for the other VSI types */ + break; + } + + return vsi; + +err_rings: + i40e_vsi_free_q_vectors(vsi); +err_msix: + if (vsi->netdev_registered) { + vsi->netdev_registered = false; + unregister_netdev(vsi->netdev); + free_netdev(vsi->netdev); + vsi->netdev = NULL; + } +err_netdev: + i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); +err_vsi: + i40e_vsi_clear(vsi); +err_alloc: + return NULL; +} + +/** + * i40e_veb_get_bw_info - Query VEB BW information + * @veb: the veb to query + * + * Query the Tx scheduler BW configuration data for given VEB + **/ +static int i40e_veb_get_bw_info(struct i40e_veb *veb) +{ + struct i40e_aqc_query_switching_comp_ets_config_resp ets_data; + struct i40e_aqc_query_switching_comp_bw_config_resp bw_data; + struct i40e_pf *pf = veb->pf; + struct i40e_hw *hw = &pf->hw; + u32 tc_bw_max; + int ret = 0; + int i; + + ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, + &bw_data, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "query veb bw config failed, aq_err=%d\n", + hw->aq.asq_last_status); + goto out; + } + + ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, + &ets_data, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "query veb bw ets config failed, aq_err=%d\n", + hw->aq.asq_last_status); + goto out; + } + + veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); + veb->bw_max_quanta = ets_data.tc_bw_max; + veb->is_abs_credits = bw_data.absolute_credits_enable; + veb->enabled_tc = ets_data.tc_valid_bits; + tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | + (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; + veb->bw_tc_limit_credits[i] = + le16_to_cpu(bw_data.tc_bw_limits[i]); + veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); + } + +out: + return ret; +} + +/** + * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF + * @pf: board private structure + * + * On error: returns error code (negative) + * On success: returns vsi index in PF (positive) + **/ +static int i40e_veb_mem_alloc(struct i40e_pf *pf) +{ + int ret = -ENOENT; + struct i40e_veb *veb; + int i; + + /* Need to protect the allocation of switch elements at the PF level */ + mutex_lock(&pf->switch_mutex); + + /* VEB list may be fragmented if VEB creation/destruction has + * been happening. We can afford to do a quick scan to look + * for any free slots in the list. + * + * find next empty veb slot, looping back around if necessary + */ + i = 0; + while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) + i++; + if (i >= I40E_MAX_VEB) { + ret = -ENOMEM; + goto err_alloc_veb; /* out of VEB slots! */ + } + + veb = kzalloc(sizeof(*veb), GFP_KERNEL); + if (!veb) { + ret = -ENOMEM; + goto err_alloc_veb; + } + veb->pf = pf; + veb->idx = i; + veb->enabled_tc = 1; + + pf->veb[i] = veb; + ret = i; +err_alloc_veb: + mutex_unlock(&pf->switch_mutex); + return ret; +} + +/** + * i40e_switch_branch_release - Delete a branch of the switch tree + * @branch: where to start deleting + * + * This uses recursion to find the tips of the branch to be + * removed, deleting until we get back to and can delete this VEB. + **/ +static void i40e_switch_branch_release(struct i40e_veb *branch) +{ + struct i40e_pf *pf = branch->pf; + u16 branch_seid = branch->seid; + u16 veb_idx = branch->idx; + int i; + + /* release any VEBs on this VEB - RECURSION */ + for (i = 0; i < I40E_MAX_VEB; i++) { + if (!pf->veb[i]) + continue; + if (pf->veb[i]->uplink_seid == branch->seid) + i40e_switch_branch_release(pf->veb[i]); + } + + /* Release the VSIs on this VEB, but not the owner VSI. + * + * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing + * the VEB itself, so don't use (*branch) after this loop. + */ + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (!pf->vsi[i]) + continue; + if (pf->vsi[i]->uplink_seid == branch_seid && + (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { + i40e_vsi_release(pf->vsi[i]); + } + } + + /* There's one corner case where the VEB might not have been + * removed, so double check it here and remove it if needed. + * This case happens if the veb was created from the debugfs + * commands and no VSIs were added to it. + */ + if (pf->veb[veb_idx]) + i40e_veb_release(pf->veb[veb_idx]); +} + +/** + * i40e_veb_clear - remove veb struct + * @veb: the veb to remove + **/ +static void i40e_veb_clear(struct i40e_veb *veb) +{ + if (!veb) + return; + + if (veb->pf) { + struct i40e_pf *pf = veb->pf; + + mutex_lock(&pf->switch_mutex); + if (pf->veb[veb->idx] == veb) + pf->veb[veb->idx] = NULL; + mutex_unlock(&pf->switch_mutex); + } + + kfree(veb); +} + +/** + * i40e_veb_release - Delete a VEB and free its resources + * @veb: the VEB being removed + **/ +void i40e_veb_release(struct i40e_veb *veb) +{ + struct i40e_vsi *vsi = NULL; + struct i40e_pf *pf; + int i, n = 0; + + pf = veb->pf; + + /* find the remaining VSI and check for extras */ + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { + n++; + vsi = pf->vsi[i]; + } + } + if (n != 1) { + dev_info(&pf->pdev->dev, + "can't remove VEB %d with %d VSIs left\n", + veb->seid, n); + return; + } + + /* move the remaining VSI to uplink veb */ + vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; + if (veb->uplink_seid) { + vsi->uplink_seid = veb->uplink_seid; + if (veb->uplink_seid == pf->mac_seid) + vsi->veb_idx = I40E_NO_VEB; + else + vsi->veb_idx = veb->veb_idx; + } else { + /* floating VEB */ + vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; + vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; + } + + i40e_aq_delete_element(&pf->hw, veb->seid, NULL); + i40e_veb_clear(veb); +} + +/** + * i40e_add_veb - create the VEB in the switch + * @veb: the VEB to be instantiated + * @vsi: the controlling VSI + **/ +static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) +{ + bool is_default = false; + bool is_cloud = false; + int ret; + + /* get a VEB from the hardware */ + ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid, + veb->enabled_tc, is_default, + is_cloud, &veb->seid, NULL); + if (ret) { + dev_info(&veb->pf->pdev->dev, + "couldn't add VEB, err %d, aq_err %d\n", + ret, veb->pf->hw.aq.asq_last_status); + return -EPERM; + } + + /* get statistics counter */ + ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL, + &veb->stats_idx, NULL, NULL, NULL); + if (ret) { + dev_info(&veb->pf->pdev->dev, + "couldn't get VEB statistics idx, err %d, aq_err %d\n", + ret, veb->pf->hw.aq.asq_last_status); + return -EPERM; + } + ret = i40e_veb_get_bw_info(veb); + if (ret) { + dev_info(&veb->pf->pdev->dev, + "couldn't get VEB bw info, err %d, aq_err %d\n", + ret, veb->pf->hw.aq.asq_last_status); + i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL); + return -ENOENT; + } + + vsi->uplink_seid = veb->seid; + vsi->veb_idx = veb->idx; + vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; + + return 0; +} + +/** + * i40e_veb_setup - Set up a VEB + * @pf: board private structure + * @flags: VEB setup flags + * @uplink_seid: the switch element to link to + * @vsi_seid: the initial VSI seid + * @enabled_tc: Enabled TC bit-map + * + * This allocates the sw VEB structure and links it into the switch + * It is possible and legal for this to be a duplicate of an already + * existing VEB. It is also possible for both uplink and vsi seids + * to be zero, in order to create a floating VEB. + * + * Returns pointer to the successfully allocated VEB sw struct on + * success, otherwise returns NULL on failure. + **/ +struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, + u16 uplink_seid, u16 vsi_seid, + u8 enabled_tc) +{ + struct i40e_veb *veb, *uplink_veb = NULL; + int vsi_idx, veb_idx; + int ret; + + /* if one seid is 0, the other must be 0 to create a floating relay */ + if ((uplink_seid == 0 || vsi_seid == 0) && + (uplink_seid + vsi_seid != 0)) { + dev_info(&pf->pdev->dev, + "one, not both seid's are 0: uplink=%d vsi=%d\n", + uplink_seid, vsi_seid); + return NULL; + } + + /* make sure there is such a vsi and uplink */ + for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) + if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) + break; + if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) { + dev_info(&pf->pdev->dev, "vsi seid %d not found\n", + vsi_seid); + return NULL; + } + + if (uplink_seid && uplink_seid != pf->mac_seid) { + for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { + if (pf->veb[veb_idx] && + pf->veb[veb_idx]->seid == uplink_seid) { + uplink_veb = pf->veb[veb_idx]; + break; + } + } + if (!uplink_veb) { + dev_info(&pf->pdev->dev, + "uplink seid %d not found\n", uplink_seid); + return NULL; + } + } + + /* get veb sw struct */ + veb_idx = i40e_veb_mem_alloc(pf); + if (veb_idx < 0) + goto err_alloc; + veb = pf->veb[veb_idx]; + veb->flags = flags; + veb->uplink_seid = uplink_seid; + veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); + veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); + + /* create the VEB in the switch */ + ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); + if (ret) + goto err_veb; + if (vsi_idx == pf->lan_vsi) + pf->lan_veb = veb->idx; + + return veb; + +err_veb: + i40e_veb_clear(veb); +err_alloc: + return NULL; +} + +/** + * i40e_setup_pf_switch_element - set PF vars based on switch type + * @pf: board private structure + * @ele: element we are building info from + * @num_reported: total number of elements + * @printconfig: should we print the contents + * + * helper function to assist in extracting a few useful SEID values. + **/ +static void i40e_setup_pf_switch_element(struct i40e_pf *pf, + struct i40e_aqc_switch_config_element_resp *ele, + u16 num_reported, bool printconfig) +{ + u16 downlink_seid = le16_to_cpu(ele->downlink_seid); + u16 uplink_seid = le16_to_cpu(ele->uplink_seid); + u8 element_type = ele->element_type; + u16 seid = le16_to_cpu(ele->seid); + + if (printconfig) + dev_info(&pf->pdev->dev, + "type=%d seid=%d uplink=%d downlink=%d\n", + element_type, seid, uplink_seid, downlink_seid); + + switch (element_type) { + case I40E_SWITCH_ELEMENT_TYPE_MAC: + pf->mac_seid = seid; + break; + case I40E_SWITCH_ELEMENT_TYPE_VEB: + /* Main VEB? */ + if (uplink_seid != pf->mac_seid) + break; + if (pf->lan_veb == I40E_NO_VEB) { + int v; + + /* find existing or else empty VEB */ + for (v = 0; v < I40E_MAX_VEB; v++) { + if (pf->veb[v] && (pf->veb[v]->seid == seid)) { + pf->lan_veb = v; + break; + } + } + if (pf->lan_veb == I40E_NO_VEB) { + v = i40e_veb_mem_alloc(pf); + if (v < 0) + break; + pf->lan_veb = v; + } + } + + pf->veb[pf->lan_veb]->seid = seid; + pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; + pf->veb[pf->lan_veb]->pf = pf; + pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; + break; + case I40E_SWITCH_ELEMENT_TYPE_VSI: + if (num_reported != 1) + break; + /* This is immediately after a reset so we can assume this is + * the PF's VSI + */ + pf->mac_seid = uplink_seid; + pf->pf_seid = downlink_seid; + pf->main_vsi_seid = seid; + if (printconfig) + dev_info(&pf->pdev->dev, + "pf_seid=%d main_vsi_seid=%d\n", + pf->pf_seid, pf->main_vsi_seid); + break; + case I40E_SWITCH_ELEMENT_TYPE_PF: + case I40E_SWITCH_ELEMENT_TYPE_VF: + case I40E_SWITCH_ELEMENT_TYPE_EMP: + case I40E_SWITCH_ELEMENT_TYPE_BMC: + case I40E_SWITCH_ELEMENT_TYPE_PE: + case I40E_SWITCH_ELEMENT_TYPE_PA: + /* ignore these for now */ + break; + default: + dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", + element_type, seid); + break; + } +} + +/** + * i40e_fetch_switch_configuration - Get switch config from firmware + * @pf: board private structure + * @printconfig: should we print the contents + * + * Get the current switch configuration from the device and + * extract a few useful SEID values. + **/ +int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) +{ + struct i40e_aqc_get_switch_config_resp *sw_config; + u16 next_seid = 0; + int ret = 0; + u8 *aq_buf; + int i; + + aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL); + if (!aq_buf) + return -ENOMEM; + + sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; + do { + u16 num_reported, num_total; + + ret = i40e_aq_get_switch_config(&pf->hw, sw_config, + I40E_AQ_LARGE_BUF, + &next_seid, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "get switch config failed %d aq_err=%x\n", + ret, pf->hw.aq.asq_last_status); + kfree(aq_buf); + return -ENOENT; + } + + num_reported = le16_to_cpu(sw_config->header.num_reported); + num_total = le16_to_cpu(sw_config->header.num_total); + + if (printconfig) + dev_info(&pf->pdev->dev, + "header: %d reported %d total\n", + num_reported, num_total); + + for (i = 0; i < num_reported; i++) { + struct i40e_aqc_switch_config_element_resp *ele = + &sw_config->element[i]; + + i40e_setup_pf_switch_element(pf, ele, num_reported, + printconfig); + } + } while (next_seid != 0); + + kfree(aq_buf); + return ret; +} + +/** + * i40e_setup_pf_switch - Setup the HW switch on startup or after reset + * @pf: board private structure + * @reinit: if the Main VSI needs to re-initialized. + * + * Returns 0 on success, negative value on failure + **/ +static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) +{ + int ret; + + /* find out what's out there already */ + ret = i40e_fetch_switch_configuration(pf, false); + if (ret) { + dev_info(&pf->pdev->dev, + "couldn't fetch switch config, err %d, aq_err %d\n", + ret, pf->hw.aq.asq_last_status); + return ret; + } + i40e_pf_reset_stats(pf); + + /* first time setup */ + if (pf->lan_vsi == I40E_NO_VSI || reinit) { + struct i40e_vsi *vsi = NULL; + u16 uplink_seid; + + /* Set up the PF VSI associated with the PF's main VSI + * that is already in the HW switch + */ + if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) + uplink_seid = pf->veb[pf->lan_veb]->seid; + else + uplink_seid = pf->mac_seid; + if (pf->lan_vsi == I40E_NO_VSI) + vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); + else if (reinit) + vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); + if (!vsi) { + dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); + i40e_fdir_teardown(pf); + return -EAGAIN; + } + } else { + /* force a reset of TC and queue layout configurations */ + u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; + pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; + pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; + i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); + } + i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); + + i40e_fdir_sb_setup(pf); + + /* Setup static PF queue filter control settings */ + ret = i40e_setup_pf_filter_control(pf); + if (ret) { + dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", + ret); + /* Failure here should not stop continuing other steps */ + } + + /* enable RSS in the HW, even for only one queue, as the stack can use + * the hash + */ + if ((pf->flags & I40E_FLAG_RSS_ENABLED)) + i40e_config_rss(pf); + + /* fill in link information and enable LSE reporting */ + i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); + i40e_link_event(pf); + + /* Initialize user-specific link properties */ + pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & + I40E_AQ_AN_COMPLETED) ? true : false); + + i40e_ptp_init(pf); + + return ret; +} + +/** + * i40e_determine_queue_usage - Work out queue distribution + * @pf: board private structure + **/ +static void i40e_determine_queue_usage(struct i40e_pf *pf) +{ + int queues_left; + + pf->num_lan_qps = 0; +#ifdef I40E_FCOE + pf->num_fcoe_qps = 0; +#endif + + /* Find the max queues to be put into basic use. We'll always be + * using TC0, whether or not DCB is running, and TC0 will get the + * big RSS set. + */ + queues_left = pf->hw.func_caps.num_tx_qp; + + if ((queues_left == 1) || + !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { + /* one qp for PF, no queues for anything else */ + queues_left = 0; + pf->rss_size = pf->num_lan_qps = 1; + + /* make sure all the fancies are disabled */ + pf->flags &= ~(I40E_FLAG_RSS_ENABLED | +#ifdef I40E_FCOE + I40E_FLAG_FCOE_ENABLED | +#endif + I40E_FLAG_FD_SB_ENABLED | + I40E_FLAG_FD_ATR_ENABLED | + I40E_FLAG_DCB_CAPABLE | + I40E_FLAG_SRIOV_ENABLED | + I40E_FLAG_VMDQ_ENABLED); + } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | + I40E_FLAG_FD_SB_ENABLED | + I40E_FLAG_FD_ATR_ENABLED | + I40E_FLAG_DCB_CAPABLE))) { + /* one qp for PF */ + pf->rss_size = pf->num_lan_qps = 1; + queues_left -= pf->num_lan_qps; + + pf->flags &= ~(I40E_FLAG_RSS_ENABLED | +#ifdef I40E_FCOE + I40E_FLAG_FCOE_ENABLED | +#endif + I40E_FLAG_FD_SB_ENABLED | + I40E_FLAG_FD_ATR_ENABLED | + I40E_FLAG_DCB_ENABLED | + I40E_FLAG_VMDQ_ENABLED); + } else { + /* Not enough queues for all TCs */ + if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && + (queues_left < I40E_MAX_TRAFFIC_CLASS)) { + pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); + } + pf->num_lan_qps = max_t(int, pf->rss_size_max, + num_online_cpus()); + pf->num_lan_qps = min_t(int, pf->num_lan_qps, + pf->hw.func_caps.num_tx_qp); + + queues_left -= pf->num_lan_qps; + } + +#ifdef I40E_FCOE + if (pf->flags & I40E_FLAG_FCOE_ENABLED) { + if (I40E_DEFAULT_FCOE <= queues_left) { + pf->num_fcoe_qps = I40E_DEFAULT_FCOE; + } else if (I40E_MINIMUM_FCOE <= queues_left) { + pf->num_fcoe_qps = I40E_MINIMUM_FCOE; + } else { + pf->num_fcoe_qps = 0; + pf->flags &= ~I40E_FLAG_FCOE_ENABLED; + dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n"); + } + + queues_left -= pf->num_fcoe_qps; + } + +#endif + if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (queues_left > 1) { + queues_left -= 1; /* save 1 queue for FD */ + } else { + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); + } + } + + if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && + pf->num_vf_qps && pf->num_req_vfs && queues_left) { + pf->num_req_vfs = min_t(int, pf->num_req_vfs, + (queues_left / pf->num_vf_qps)); + queues_left -= (pf->num_req_vfs * pf->num_vf_qps); + } + + if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && + pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { + pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, + (queues_left / pf->num_vmdq_qps)); + queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); + } + + pf->queues_left = queues_left; +#ifdef I40E_FCOE + dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); +#endif +} + +/** + * i40e_setup_pf_filter_control - Setup PF static filter control + * @pf: PF to be setup + * + * i40e_setup_pf_filter_control sets up a PF's initial filter control + * settings. If PE/FCoE are enabled then it will also set the per PF + * based filter sizes required for them. It also enables Flow director, + * ethertype and macvlan type filter settings for the pf. + * + * Returns 0 on success, negative on failure + **/ +static int i40e_setup_pf_filter_control(struct i40e_pf *pf) +{ + struct i40e_filter_control_settings *settings = &pf->filter_settings; + + settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; + + /* Flow Director is enabled */ + if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) + settings->enable_fdir = true; + + /* Ethtype and MACVLAN filters enabled for PF */ + settings->enable_ethtype = true; + settings->enable_macvlan = true; + + if (i40e_set_filter_control(&pf->hw, settings)) + return -ENOENT; + + return 0; +} + +#define INFO_STRING_LEN 255 +static void i40e_print_features(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + char *buf, *string; + + string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); + if (!string) { + dev_err(&pf->pdev->dev, "Features string allocation failed\n"); + return; + } + + buf = string; + + buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id); +#ifdef CONFIG_PCI_IOV + buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); +#endif + buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ", + pf->hw.func_caps.num_vsis, + pf->vsi[pf->lan_vsi]->num_queue_pairs, + pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); + + if (pf->flags & I40E_FLAG_RSS_ENABLED) + buf += sprintf(buf, "RSS "); + if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) + buf += sprintf(buf, "FD_ATR "); + if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + buf += sprintf(buf, "FD_SB "); + buf += sprintf(buf, "NTUPLE "); + } + if (pf->flags & I40E_FLAG_DCB_CAPABLE) + buf += sprintf(buf, "DCB "); + if (pf->flags & I40E_FLAG_PTP) + buf += sprintf(buf, "PTP "); +#ifdef I40E_FCOE + if (pf->flags & I40E_FLAG_FCOE_ENABLED) + buf += sprintf(buf, "FCOE "); +#endif + + BUG_ON(buf > (string + INFO_STRING_LEN)); + dev_info(&pf->pdev->dev, "%s\n", string); + kfree(string); +} + +/** + * i40e_probe - Device initialization routine + * @pdev: PCI device information struct + * @ent: entry in i40e_pci_tbl + * + * i40e_probe initializes a PF identified by a pci_dev structure. + * The OS initialization, configuring of the PF private structure, + * and a hardware reset occur. + * + * Returns 0 on success, negative on failure + **/ +static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct i40e_aq_get_phy_abilities_resp abilities; + unsigned long ioremap_len; + struct i40e_pf *pf; + struct i40e_hw *hw; + static u16 pfs_found; + u16 link_status; + int err = 0; + u32 len; + u32 i; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + /* set up for high or low dma */ + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + /* set up pci connections */ + err = pci_request_selected_regions(pdev, pci_select_bars(pdev, + IORESOURCE_MEM), i40e_driver_name); + if (err) { + dev_info(&pdev->dev, + "pci_request_selected_regions failed %d\n", err); + goto err_pci_reg; + } + + pci_enable_pcie_error_reporting(pdev); + pci_set_master(pdev); + + /* Now that we have a PCI connection, we need to do the + * low level device setup. This is primarily setting up + * the Admin Queue structures and then querying for the + * device's current profile information. + */ + pf = kzalloc(sizeof(*pf), GFP_KERNEL); + if (!pf) { + err = -ENOMEM; + goto err_pf_alloc; + } + pf->next_vsi = 0; + pf->pdev = pdev; + set_bit(__I40E_DOWN, &pf->state); + + hw = &pf->hw; + hw->back = pf; + + ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0), + I40E_MAX_CSR_SPACE); + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len); + if (!hw->hw_addr) { + err = -EIO; + dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", + (unsigned int)pci_resource_start(pdev, 0), + (unsigned int)pci_resource_len(pdev, 0), err); + goto err_ioremap; + } + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + hw->bus.device = PCI_SLOT(pdev->devfn); + hw->bus.func = PCI_FUNC(pdev->devfn); + pf->instance = pfs_found; + + if (debug != -1) { + pf->msg_enable = pf->hw.debug_mask; + pf->msg_enable = debug; + } + + /* do a special CORER for clearing PXE mode once at init */ + if (hw->revision_id == 0 && + (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { + wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); + i40e_flush(hw); + msleep(200); + pf->corer_count++; + + i40e_clear_pxe_mode(hw); + } + + /* Reset here to make sure all is clean and to define PF 'n' */ + i40e_clear_hw(hw); + err = i40e_pf_reset(hw); + if (err) { + dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); + goto err_pf_reset; + } + pf->pfr_count++; + + hw->aq.num_arq_entries = I40E_AQ_LEN; + hw->aq.num_asq_entries = I40E_AQ_LEN; + hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; + hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; + pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; + + snprintf(pf->int_name, sizeof(pf->int_name) - 1, + "%s-%s:misc", + dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); + + err = i40e_init_shared_code(hw); + if (err) { + dev_info(&pdev->dev, "init_shared_code failed: %d\n", err); + goto err_pf_reset; + } + + /* set up a default setting for link flow control */ + pf->hw.fc.requested_mode = I40E_FC_NONE; + + err = i40e_init_adminq(hw); + dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw)); + if (err) { + dev_info(&pdev->dev, + "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); + goto err_pf_reset; + } + + if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && + hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) + dev_info(&pdev->dev, + "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); + else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR || + hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) + dev_info(&pdev->dev, + "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + + i40e_verify_eeprom(pf); + + /* Rev 0 hardware was never productized */ + if (hw->revision_id < 1) + dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); + + i40e_clear_pxe_mode(hw); + err = i40e_get_capabilities(pf); + if (err) + goto err_adminq_setup; + + err = i40e_sw_init(pf); + if (err) { + dev_info(&pdev->dev, "sw_init failed: %d\n", err); + goto err_sw_init; + } + + err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, + hw->func_caps.num_rx_qp, + pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); + if (err) { + dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); + goto err_init_lan_hmc; + } + + err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); + if (err) { + dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); + err = -ENOENT; + goto err_configure_lan_hmc; + } + + /* Disable LLDP for NICs that have firmware versions lower than v4.3. + * Ignore error return codes because if it was already disabled via + * hardware settings this will fail + */ + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || + (pf->hw.aq.fw_maj_ver < 4)) { + dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); + i40e_aq_stop_lldp(hw, true, NULL); + } + + i40e_get_mac_addr(hw, hw->mac.addr); + if (!is_valid_ether_addr(hw->mac.addr)) { + dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); + err = -EIO; + goto err_mac_addr; + } + dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); + ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); + i40e_get_port_mac_addr(hw, hw->mac.port_addr); + if (is_valid_ether_addr(hw->mac.port_addr)) + pf->flags |= I40E_FLAG_PORT_ID_VALID; +#ifdef I40E_FCOE + err = i40e_get_san_mac_addr(hw, hw->mac.san_addr); + if (err) + dev_info(&pdev->dev, + "(non-fatal) SAN MAC retrieval failed: %d\n", err); + if (!is_valid_ether_addr(hw->mac.san_addr)) { + dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n", + hw->mac.san_addr); + ether_addr_copy(hw->mac.san_addr, hw->mac.addr); + } + dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr); +#endif /* I40E_FCOE */ + + pci_set_drvdata(pdev, pf); + pci_save_state(pdev); +#ifdef CONFIG_I40E_DCB + err = i40e_init_pf_dcb(pf); + if (err) { + dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); + pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + /* Continue without DCB enabled */ + } +#endif /* CONFIG_I40E_DCB */ + + /* set up periodic task facility */ + setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf); + pf->service_timer_period = HZ; + + INIT_WORK(&pf->service_task, i40e_service_task); + clear_bit(__I40E_SERVICE_SCHED, &pf->state); + pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; + pf->link_check_timeout = jiffies; + + /* WoL defaults to disabled */ + pf->wol_en = false; + device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); + + /* set up the main switch operations */ + i40e_determine_queue_usage(pf); + err = i40e_init_interrupt_scheme(pf); + if (err) + goto err_switch_setup; + + /* The number of VSIs reported by the FW is the minimum guaranteed + * to us; HW supports far more and we share the remaining pool with + * the other PFs. We allocate space for more than the guarantee with + * the understanding that we might not get them all later. + */ + if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) + pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; + else + pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; + + /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ + len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi; + pf->vsi = kzalloc(len, GFP_KERNEL); + if (!pf->vsi) { + err = -ENOMEM; + goto err_switch_setup; + } + +#ifdef CONFIG_PCI_IOV + /* prep for VF support */ + if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && + (pf->flags & I40E_FLAG_MSIX_ENABLED) && + !test_bit(__I40E_BAD_EEPROM, &pf->state)) { + if (pci_num_vf(pdev)) + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + } +#endif + err = i40e_setup_pf_switch(pf, false); + if (err) { + dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); + goto err_vsis; + } + /* if FDIR VSI was set up, start it now */ + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { + i40e_vsi_open(pf->vsi[i]); + break; + } + } + + /* driver is only interested in link up/down and module qualification + * reports from firmware + */ + err = i40e_aq_set_phy_int_mask(&pf->hw, + I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); + if (err) + dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err); + + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4)) { + msleep(75); + err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); + if (err) + dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); + } + /* The main driver is (mostly) up and happy. We need to set this state + * before setting up the misc vector or we get a race and the vector + * ends up disabled forever. + */ + clear_bit(__I40E_DOWN, &pf->state); + + /* In case of MSIX we are going to setup the misc vector right here + * to handle admin queue events etc. In case of legacy and MSI + * the misc functionality and queue processing is combined in + * the same vector and that gets setup at open. + */ + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + err = i40e_setup_misc_vector(pf); + if (err) { + dev_info(&pdev->dev, + "setup of misc vector failed: %d\n", err); + goto err_vsis; + } + } + +#ifdef CONFIG_PCI_IOV + /* prep for VF support */ + if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && + (pf->flags & I40E_FLAG_MSIX_ENABLED) && + !test_bit(__I40E_BAD_EEPROM, &pf->state)) { + u32 val; + + /* disable link interrupts for VFs */ + val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); + val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; + wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); + i40e_flush(hw); + + if (pci_num_vf(pdev)) { + dev_info(&pdev->dev, + "Active VFs found, allocating resources.\n"); + err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); + if (err) + dev_info(&pdev->dev, + "Error %d allocating resources for existing VFs\n", + err); + } + } +#endif /* CONFIG_PCI_IOV */ + + pfs_found++; + + i40e_dbg_pf_init(pf); + + /* tell the firmware that we're starting */ + i40e_send_version(pf); + + /* since everything's happy, start the service_task timer */ + mod_timer(&pf->service_timer, + round_jiffies(jiffies + pf->service_timer_period)); + +#ifdef I40E_FCOE + /* create FCoE interface */ + i40e_fcoe_vsi_setup(pf); + +#endif + /* Get the negotiated link width and speed from PCI config space */ + pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status); + + i40e_set_pci_config_data(hw, link_status); + + dev_info(&pdev->dev, "PCI-Express: %s %s\n", + (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" : + hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" : + hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" : + "Unknown"), + (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" : + hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" : + hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" : + hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" : + "Unknown")); + + if (hw->bus.width < i40e_bus_width_pcie_x8 || + hw->bus.speed < i40e_bus_speed_8000) { + dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); + dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); + } + + /* get the requested speeds from the fw */ + err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); + if (err) + dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n", + err); + pf->hw.phy.link_info.requested_speeds = abilities.link_speed; + + /* print a string summarizing features */ + i40e_print_features(pf); + + return 0; + + /* Unwind what we've done if something failed in the setup */ +err_vsis: + set_bit(__I40E_DOWN, &pf->state); + i40e_clear_interrupt_scheme(pf); + kfree(pf->vsi); +err_switch_setup: + i40e_reset_interrupt_capability(pf); + del_timer_sync(&pf->service_timer); +err_mac_addr: +err_configure_lan_hmc: + (void)i40e_shutdown_lan_hmc(hw); +err_init_lan_hmc: + kfree(pf->qp_pile); +err_sw_init: +err_adminq_setup: + (void)i40e_shutdown_adminq(hw); +err_pf_reset: + iounmap(hw->hw_addr); +err_ioremap: + kfree(pf); +err_pf_alloc: + pci_disable_pcie_error_reporting(pdev); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * i40e_remove - Device removal routine + * @pdev: PCI device information struct + * + * i40e_remove is called by the PCI subsystem to alert the driver + * that is should release a PCI device. This could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void i40e_remove(struct pci_dev *pdev) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + i40e_status ret_code; + int i; + + i40e_dbg_pf_exit(pf); + + i40e_ptp_stop(pf); + + /* no more scheduling of any task */ + set_bit(__I40E_DOWN, &pf->state); + del_timer_sync(&pf->service_timer); + cancel_work_sync(&pf->service_task); + i40e_fdir_teardown(pf); + + if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { + i40e_free_vfs(pf); + pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; + } + + i40e_fdir_teardown(pf); + + /* If there is a switch structure or any orphans, remove them. + * This will leave only the PF's VSI remaining. + */ + for (i = 0; i < I40E_MAX_VEB; i++) { + if (!pf->veb[i]) + continue; + + if (pf->veb[i]->uplink_seid == pf->mac_seid || + pf->veb[i]->uplink_seid == 0) + i40e_switch_branch_release(pf->veb[i]); + } + + /* Now we can shutdown the PF's VSI, just before we kill + * adminq and hmc. + */ + if (pf->vsi[pf->lan_vsi]) + i40e_vsi_release(pf->vsi[pf->lan_vsi]); + + /* shutdown and destroy the HMC */ + if (pf->hw.hmc.hmc_obj) { + ret_code = i40e_shutdown_lan_hmc(&pf->hw); + if (ret_code) + dev_warn(&pdev->dev, + "Failed to destroy the HMC resources: %d\n", + ret_code); + } + + /* shutdown the adminq */ + ret_code = i40e_shutdown_adminq(&pf->hw); + if (ret_code) + dev_warn(&pdev->dev, + "Failed to destroy the Admin Queue resources: %d\n", + ret_code); + + /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ + i40e_clear_interrupt_scheme(pf); + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (pf->vsi[i]) { + i40e_vsi_clear_rings(pf->vsi[i]); + i40e_vsi_clear(pf->vsi[i]); + pf->vsi[i] = NULL; + } + } + + for (i = 0; i < I40E_MAX_VEB; i++) { + kfree(pf->veb[i]); + pf->veb[i] = NULL; + } + + kfree(pf->qp_pile); + kfree(pf->vsi); + + iounmap(pf->hw.hw_addr); + kfree(pf); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); +} + +/** + * i40e_pci_error_detected - warning that something funky happened in PCI land + * @pdev: PCI device information struct + * + * Called to warn that something happened and the error handling steps + * are in progress. Allows the driver to quiesce things, be ready for + * remediation. + **/ +static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, + enum pci_channel_state error) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + + dev_info(&pdev->dev, "%s: error %d\n", __func__, error); + + /* shutdown all operations */ + if (!test_bit(__I40E_SUSPENDED, &pf->state)) { + rtnl_lock(); + i40e_prep_for_reset(pf); + rtnl_unlock(); + } + + /* Request a slot reset */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * i40e_pci_error_slot_reset - a PCI slot reset just happened + * @pdev: PCI device information struct + * + * Called to find if the driver can work with the device now that + * the pci slot has been reset. If a basic connection seems good + * (registers are readable and have sane content) then return a + * happy little PCI_ERS_RESULT_xxx. + **/ +static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + pci_ers_result_t result; + int err; + u32 reg; + + dev_info(&pdev->dev, "%s\n", __func__); + if (pci_enable_device_mem(pdev)) { + dev_info(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + pci_wake_from_d3(pdev, false); + + reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); + if (reg == 0) + result = PCI_ERS_RESULT_RECOVERED; + else + result = PCI_ERS_RESULT_DISCONNECT; + } + + err = pci_cleanup_aer_uncorrect_error_status(pdev); + if (err) { + dev_info(&pdev->dev, + "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", + err); + /* non-fatal, continue */ + } + + return result; +} + +/** + * i40e_pci_error_resume - restart operations after PCI error recovery + * @pdev: PCI device information struct + * + * Called to allow the driver to bring things back up after PCI error + * and/or reset recovery has finished. + **/ +static void i40e_pci_error_resume(struct pci_dev *pdev) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + + dev_info(&pdev->dev, "%s\n", __func__); + if (test_bit(__I40E_SUSPENDED, &pf->state)) + return; + + rtnl_lock(); + i40e_handle_reset_warning(pf); + rtnl_lock(); +} + +/** + * i40e_shutdown - PCI callback for shutting down + * @pdev: PCI device information struct + **/ +static void i40e_shutdown(struct pci_dev *pdev) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_hw *hw = &pf->hw; + + set_bit(__I40E_SUSPENDED, &pf->state); + set_bit(__I40E_DOWN, &pf->state); + rtnl_lock(); + i40e_prep_for_reset(pf); + rtnl_unlock(); + + wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); + wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + + i40e_clear_interrupt_scheme(pf); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, pf->wol_en); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_PM +/** + * i40e_suspend - PCI callback for moving to D3 + * @pdev: PCI device information struct + **/ +static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_hw *hw = &pf->hw; + + set_bit(__I40E_SUSPENDED, &pf->state); + set_bit(__I40E_DOWN, &pf->state); + del_timer_sync(&pf->service_timer); + cancel_work_sync(&pf->service_task); + i40e_fdir_teardown(pf); + + rtnl_lock(); + i40e_prep_for_reset(pf); + rtnl_unlock(); + + wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); + wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + + pci_wake_from_d3(pdev, pf->wol_en); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +/** + * i40e_resume - PCI callback for waking up from D3 + * @pdev: PCI device information struct + **/ +static int i40e_resume(struct pci_dev *pdev) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* pci_restore_state() clears dev->state_saves, so + * call pci_save_state() again to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, + "%s: Cannot enable PCI device from suspend\n", + __func__); + return err; + } + pci_set_master(pdev); + + /* no wakeup events while running */ + pci_wake_from_d3(pdev, false); + + /* handling the reset will rebuild the device state */ + if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { + clear_bit(__I40E_DOWN, &pf->state); + rtnl_lock(); + i40e_reset_and_rebuild(pf, false); + rtnl_unlock(); + } + + return 0; +} + +#endif +static const struct pci_error_handlers i40e_err_handler = { + .error_detected = i40e_pci_error_detected, + .slot_reset = i40e_pci_error_slot_reset, + .resume = i40e_pci_error_resume, +}; + +static struct pci_driver i40e_driver = { + .name = i40e_driver_name, + .id_table = i40e_pci_tbl, + .probe = i40e_probe, + .remove = i40e_remove, +#ifdef CONFIG_PM + .suspend = i40e_suspend, + .resume = i40e_resume, +#endif + .shutdown = i40e_shutdown, + .err_handler = &i40e_err_handler, + .sriov_configure = i40e_pci_sriov_configure, +}; + +/** + * i40e_init_module - Driver registration routine + * + * i40e_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init i40e_init_module(void) +{ + pr_info("%s: %s - version %s\n", i40e_driver_name, + i40e_driver_string, i40e_driver_version_str); + pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); + + i40e_dbg_init(); + return pci_register_driver(&i40e_driver); +} +module_init(i40e_init_module); + +/** + * i40e_exit_module - Driver exit cleanup routine + * + * i40e_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit i40e_exit_module(void) +{ + pci_unregister_driver(&i40e_driver); + i40e_dbg_exit(); +} +module_exit(i40e_exit_module); diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c new file mode 100644 index 000000000..554e49d02 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -0,0 +1,1006 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e_prototype.h" + +/** + * i40e_init_nvm_ops - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Setup the function pointers and the NVM info structure. Should be called + * once per NVM initialization, e.g. inside the i40e_init_shared_code(). + * Please notice that the NVM term is used here (& in all methods covered + * in this file) as an equivalent of the FLASH part mapped into the SR. + * We are accessing FLASH always thru the Shadow RAM. + **/ +i40e_status i40e_init_nvm(struct i40e_hw *hw) +{ + struct i40e_nvm_info *nvm = &hw->nvm; + i40e_status ret_code = 0; + u32 fla, gens; + u8 sr_size; + + /* The SR size is stored regardless of the nvm programming mode + * as the blank mode may be used in the factory line. + */ + gens = rd32(hw, I40E_GLNVM_GENS); + sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >> + I40E_GLNVM_GENS_SR_SIZE_SHIFT); + /* Switching to words (sr_size contains power of 2KB) */ + nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB; + + /* Check if we are in the normal or blank NVM programming mode */ + fla = rd32(hw, I40E_GLNVM_FLA); + if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */ + /* Max NVM timeout */ + nvm->timeout = I40E_MAX_NVM_TIMEOUT; + nvm->blank_nvm_mode = false; + } else { /* Blank programming mode */ + nvm->blank_nvm_mode = true; + ret_code = I40E_ERR_NVM_BLANK_MODE; + i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n"); + } + + return ret_code; +} + +/** + * i40e_acquire_nvm - Generic request for acquiring the NVM ownership + * @hw: pointer to the HW structure + * @access: NVM access type (read or write) + * + * This function will request NVM ownership for reading + * via the proper Admin Command. + **/ +i40e_status i40e_acquire_nvm(struct i40e_hw *hw, + enum i40e_aq_resource_access_type access) +{ + i40e_status ret_code = 0; + u64 gtime, timeout; + u64 time_left = 0; + + if (hw->nvm.blank_nvm_mode) + goto i40e_i40e_acquire_nvm_exit; + + ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, + 0, &time_left, NULL); + /* Reading the Global Device Timer */ + gtime = rd32(hw, I40E_GLVFGEN_TIMER); + + /* Store the timeout */ + hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime; + + if (ret_code) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n", + access, time_left, ret_code, hw->aq.asq_last_status); + + if (ret_code && time_left) { + /* Poll until the current NVM owner timeouts */ + timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime; + while ((gtime < timeout) && time_left) { + usleep_range(10000, 20000); + gtime = rd32(hw, I40E_GLVFGEN_TIMER); + ret_code = i40e_aq_request_resource(hw, + I40E_NVM_RESOURCE_ID, + access, 0, &time_left, + NULL); + if (!ret_code) { + hw->nvm.hw_semaphore_timeout = + I40E_MS_TO_GTIME(time_left) + gtime; + break; + } + } + if (ret_code) { + hw->nvm.hw_semaphore_timeout = 0; + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n", + time_left, ret_code, hw->aq.asq_last_status); + } + } + +i40e_i40e_acquire_nvm_exit: + return ret_code; +} + +/** + * i40e_release_nvm - Generic request for releasing the NVM ownership + * @hw: pointer to the HW structure + * + * This function will release NVM resource via the proper Admin Command. + **/ +void i40e_release_nvm(struct i40e_hw *hw) +{ + if (!hw->nvm.blank_nvm_mode) + i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); +} + +/** + * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit + * @hw: pointer to the HW structure + * + * Polls the SRCTL Shadow RAM register done bit. + **/ +static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) +{ + i40e_status ret_code = I40E_ERR_TIMEOUT; + u32 srctl, wait_cnt; + + /* Poll the I40E_GLNVM_SRCTL until the done bit is set */ + for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) { + srctl = rd32(hw, I40E_GLNVM_SRCTL); + if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) { + ret_code = 0; + break; + } + udelay(5); + } + if (ret_code == I40E_ERR_TIMEOUT) + i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set"); + return ret_code; +} + +/** + * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + **/ +static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, + u16 *data) +{ + i40e_status ret_code = I40E_ERR_TIMEOUT; + u32 sr_reg; + + if (offset >= hw->nvm.sr_size) { + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM read error: offset %d beyond Shadow RAM limit %d\n", + offset, hw->nvm.sr_size); + ret_code = I40E_ERR_PARAM; + goto read_nvm_exit; + } + + /* Poll the done bit first */ + ret_code = i40e_poll_sr_srctl_done_bit(hw); + if (!ret_code) { + /* Write the address and start reading */ + sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | + (1 << I40E_GLNVM_SRCTL_START_SHIFT); + wr32(hw, I40E_GLNVM_SRCTL, sr_reg); + + /* Poll I40E_GLNVM_SRCTL until the done bit is set */ + ret_code = i40e_poll_sr_srctl_done_bit(hw); + if (!ret_code) { + sr_reg = rd32(hw, I40E_GLNVM_SRDATA); + *data = (u16)((sr_reg & + I40E_GLNVM_SRDATA_RDDATA_MASK) + >> I40E_GLNVM_SRDATA_RDDATA_SHIFT); + } + } + if (ret_code) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM read error: Couldn't access Shadow RAM address: 0x%x\n", + offset); + +read_nvm_exit: + return ret_code; +} + +/** + * i40e_read_nvm_word - Reads Shadow RAM + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + **/ +i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, + u16 *data) +{ + return i40e_read_nvm_word_srctl(hw, offset, data); +} + +/** + * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + i40e_status ret_code = 0; + u16 index, word; + + /* Loop thru the selected region */ + for (word = 0; word < *words; word++) { + index = offset + word; + ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]); + if (ret_code) + break; + } + + /* Update the number of words read from the Shadow RAM */ + *words = word; + + return ret_code; +} + +/** + * i40e_read_nvm_buffer - Reads Shadow RAM buffer + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + return i40e_read_nvm_buffer_srctl(hw, offset, words, data); +} + +/** + * i40e_write_nvm_aq - Writes Shadow RAM. + * @hw: pointer to the HW structure. + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: offset in words from module start + * @words: number of words to write + * @data: buffer with words to write to the Shadow RAM + * @last_command: tells the AdminQ that this is the last command + * + * Writes a 16 bit words buffer to the Shadow RAM using the admin command. + **/ +static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 words, void *data, + bool last_command) +{ + i40e_status ret_code = I40E_ERR_NVM; + + /* Here we are checking the SR limit only for the flat memory model. + * We cannot do it for the module-based model, as we did not acquire + * the NVM resource yet (we cannot get the module pointer value). + * Firmware will check the module-based model. + */ + if ((offset + words) > hw->nvm.sr_size) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: offset %d beyond Shadow RAM limit %d\n", + (offset + words), hw->nvm.sr_size); + else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) + /* We can write only up to 4KB (one sector), in one AQ write */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write fail error: tried to write %d words, limit is %d.\n", + words, I40E_SR_SECTOR_SIZE_IN_WORDS); + else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) + != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) + /* A single write cannot spread over two sectors */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", + offset, words); + else + ret_code = i40e_aq_update_nvm(hw, module_pointer, + 2 * offset, /*bytes*/ + 2 * words, /*bytes*/ + data, last_command, NULL); + + return ret_code; +} + +/** + * i40e_calc_nvm_checksum - Calculates and returns the checksum + * @hw: pointer to hardware structure + * @checksum: pointer to the checksum + * + * This function calculates SW Checksum that covers the whole 64kB shadow RAM + * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD + * is customer specific and unknown. Therefore, this function skips all maximum + * possible size of VPD (1kB). + **/ +static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, + u16 *checksum) +{ + i40e_status ret_code = 0; + struct i40e_virt_mem vmem; + u16 pcie_alt_module = 0; + u16 checksum_local = 0; + u16 vpd_module = 0; + u16 *data; + u16 i = 0; + + ret_code = i40e_allocate_virt_mem(hw, &vmem, + I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16)); + if (ret_code) + goto i40e_calc_nvm_checksum_exit; + data = (u16 *)vmem.va; + + /* read pointer to VPD area */ + ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); + if (ret_code) { + ret_code = I40E_ERR_NVM_CHECKSUM; + goto i40e_calc_nvm_checksum_exit; + } + + /* read pointer to PCIe Alt Auto-load module */ + ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, + &pcie_alt_module); + if (ret_code) { + ret_code = I40E_ERR_NVM_CHECKSUM; + goto i40e_calc_nvm_checksum_exit; + } + + /* Calculate SW checksum that covers the whole 64kB shadow RAM + * except the VPD and PCIe ALT Auto-load modules + */ + for (i = 0; i < hw->nvm.sr_size; i++) { + /* Read SR page */ + if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) { + u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS; + + ret_code = i40e_read_nvm_buffer(hw, i, &words, data); + if (ret_code) { + ret_code = I40E_ERR_NVM_CHECKSUM; + goto i40e_calc_nvm_checksum_exit; + } + } + + /* Skip Checksum word */ + if (i == I40E_SR_SW_CHECKSUM_WORD) + continue; + /* Skip VPD module (convert byte size to word count) */ + if ((i >= (u32)vpd_module) && + (i < ((u32)vpd_module + + (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) { + continue; + } + /* Skip PCIe ALT module (convert byte size to word count) */ + if ((i >= (u32)pcie_alt_module) && + (i < ((u32)pcie_alt_module + + (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) { + continue; + } + + checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS]; + } + + *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local; + +i40e_calc_nvm_checksum_exit: + i40e_free_virt_mem(hw, &vmem); + return ret_code; +} + +/** + * i40e_update_nvm_checksum - Updates the NVM checksum + * @hw: pointer to hardware structure + * + * NVM ownership must be acquired before calling this function and released + * on ARQ completion event reception by caller. + * This function will commit SR to NVM. + **/ +i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + u16 checksum; + + ret_code = i40e_calc_nvm_checksum(hw, &checksum); + if (!ret_code) + ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD, + 1, &checksum, true); + + return ret_code; +} + +/** + * i40e_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum: calculated checksum + * + * Performs checksum calculation and validates the NVM SW checksum. If the + * caller does not need checksum, the value can be NULL. + **/ +i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, + u16 *checksum) +{ + i40e_status ret_code = 0; + u16 checksum_sr = 0; + u16 checksum_local = 0; + + ret_code = i40e_calc_nvm_checksum(hw, &checksum_local); + if (ret_code) + goto i40e_validate_nvm_checksum_exit; + + /* Do not use i40e_read_nvm_word() because we do not want to take + * the synchronization semaphores twice here. + */ + i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (checksum_local != checksum_sr) + ret_code = I40E_ERR_NVM_CHECKSUM; + + /* If the user cares, return the calculated checksum */ + if (checksum) + *checksum = checksum_local; + +i40e_validate_nvm_checksum_exit: + return ret_code; +} + +static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno); +static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno); +static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno); +static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + int *errno); +static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + int *errno); +static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno); +static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno); +static inline u8 i40e_nvmupd_get_module(u32 val) +{ + return (u8)(val & I40E_NVM_MOD_PNT_MASK); +} +static inline u8 i40e_nvmupd_get_transaction(u32 val) +{ + return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); +} + +static char *i40e_nvm_update_state_str[] = { + "I40E_NVMUPD_INVALID", + "I40E_NVMUPD_READ_CON", + "I40E_NVMUPD_READ_SNT", + "I40E_NVMUPD_READ_LCB", + "I40E_NVMUPD_READ_SA", + "I40E_NVMUPD_WRITE_ERA", + "I40E_NVMUPD_WRITE_CON", + "I40E_NVMUPD_WRITE_SNT", + "I40E_NVMUPD_WRITE_LCB", + "I40E_NVMUPD_WRITE_SA", + "I40E_NVMUPD_CSUM_CON", + "I40E_NVMUPD_CSUM_SA", + "I40E_NVMUPD_CSUM_LCB", +}; + +/** + * i40e_nvmupd_command - Process an NVM update command + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command + * @bytes: pointer to the data buffer + * @errno: pointer to return error code + * + * Dispatches command depending on what update state is current + **/ +i40e_status i40e_nvmupd_command(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno) +{ + i40e_status status; + + /* assume success */ + *errno = 0; + + switch (hw->nvmupd_state) { + case I40E_NVMUPD_STATE_INIT: + status = i40e_nvmupd_state_init(hw, cmd, bytes, errno); + break; + + case I40E_NVMUPD_STATE_READING: + status = i40e_nvmupd_state_reading(hw, cmd, bytes, errno); + break; + + case I40E_NVMUPD_STATE_WRITING: + status = i40e_nvmupd_state_writing(hw, cmd, bytes, errno); + break; + + default: + /* invalid state, should never happen */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: no such state %d\n", hw->nvmupd_state); + status = I40E_NOT_SUPPORTED; + *errno = -ESRCH; + break; + } + return status; +} + +/** + * i40e_nvmupd_state_init - Handle NVM update state Init + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @errno: pointer to return error code + * + * Process legitimate commands of the Init state and conditionally set next + * state. Reject all other commands. + **/ +static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno) +{ + i40e_status status = 0; + enum i40e_nvmupd_cmd upd_cmd; + + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); + + switch (upd_cmd) { + case I40E_NVMUPD_READ_SA: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (status) { + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); + i40e_release_nvm(hw); + } + break; + + case I40E_NVMUPD_READ_SNT: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (status) { + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); + if (status) + i40e_release_nvm(hw); + else + hw->nvmupd_state = I40E_NVMUPD_STATE_READING; + } + break; + + case I40E_NVMUPD_WRITE_ERA: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_nvmupd_nvm_erase(hw, cmd, errno); + if (status) + i40e_release_nvm(hw); + else + hw->aq.nvm_release_on_done = true; + } + break; + + case I40E_NVMUPD_WRITE_SA: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); + if (status) + i40e_release_nvm(hw); + else + hw->aq.nvm_release_on_done = true; + } + break; + + case I40E_NVMUPD_WRITE_SNT: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); + if (status) + i40e_release_nvm(hw); + else + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; + } + break; + + case I40E_NVMUPD_CSUM_SA: + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); + } else { + status = i40e_update_nvm_checksum(hw); + if (status) { + *errno = hw->aq.asq_last_status ? + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : + -EIO; + i40e_release_nvm(hw); + } else { + hw->aq.nvm_release_on_done = true; + } + } + break; + + default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in init state\n", + i40e_nvm_update_state_str[upd_cmd]); + status = I40E_ERR_NVM; + *errno = -ESRCH; + break; + } + return status; +} + +/** + * i40e_nvmupd_state_reading - Handle NVM update state Reading + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @errno: pointer to return error code + * + * NVM ownership is already held. Process legitimate commands and set any + * change in state; reject all other commands. + **/ +static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno) +{ + i40e_status status; + enum i40e_nvmupd_cmd upd_cmd; + + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); + + switch (upd_cmd) { + case I40E_NVMUPD_READ_SA: + case I40E_NVMUPD_READ_CON: + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); + break; + + case I40E_NVMUPD_READ_LCB: + status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); + i40e_release_nvm(hw); + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + break; + + default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in reading state.\n", + i40e_nvm_update_state_str[upd_cmd]); + status = I40E_NOT_SUPPORTED; + *errno = -ESRCH; + break; + } + return status; +} + +/** + * i40e_nvmupd_state_writing - Handle NVM update state Writing + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @errno: pointer to return error code + * + * NVM ownership is already held. Process legitimate commands and set any + * change in state; reject all other commands + **/ +static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno) +{ + i40e_status status; + enum i40e_nvmupd_cmd upd_cmd; + bool retry_attempt = false; + + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); + +retry: + switch (upd_cmd) { + case I40E_NVMUPD_WRITE_CON: + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); + break; + + case I40E_NVMUPD_WRITE_LCB: + status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); + if (!status) + hw->aq.nvm_release_on_done = true; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + break; + + case I40E_NVMUPD_CSUM_CON: + status = i40e_update_nvm_checksum(hw); + if (status) { + *errno = hw->aq.asq_last_status ? + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : + -EIO; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + } + break; + + case I40E_NVMUPD_CSUM_LCB: + status = i40e_update_nvm_checksum(hw); + if (status) + *errno = hw->aq.asq_last_status ? + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : + -EIO; + else + hw->aq.nvm_release_on_done = true; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + break; + + default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in writing state.\n", + i40e_nvm_update_state_str[upd_cmd]); + status = I40E_NOT_SUPPORTED; + *errno = -ESRCH; + break; + } + + /* In some circumstances, a multi-write transaction takes longer + * than the default 3 minute timeout on the write semaphore. If + * the write failed with an EBUSY status, this is likely the problem, + * so here we try to reacquire the semaphore then retry the write. + * We only do one retry, then give up. + */ + if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) && + !retry_attempt) { + i40e_status old_status = status; + u32 old_asq_status = hw->aq.asq_last_status; + u32 gtime; + + gtime = rd32(hw, I40E_GLVFGEN_TIMER); + if (gtime >= hw->nvm.hw_semaphore_timeout) { + i40e_debug(hw, I40E_DEBUG_ALL, + "NVMUPD: write semaphore expired (%d >= %lld), retrying\n", + gtime, hw->nvm.hw_semaphore_timeout); + i40e_release_nvm(hw); + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + i40e_debug(hw, I40E_DEBUG_ALL, + "NVMUPD: write semaphore reacquire failed aq_err = %d\n", + hw->aq.asq_last_status); + status = old_status; + hw->aq.asq_last_status = old_asq_status; + } else { + retry_attempt = true; + goto retry; + } + } + } + + return status; +} + +/** + * i40e_nvmupd_validate_command - Validate given command + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @errno: pointer to return error code + * + * Return one of the valid command types or I40E_NVMUPD_INVALID + **/ +static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + int *errno) +{ + enum i40e_nvmupd_cmd upd_cmd; + u8 transaction; + + /* anything that doesn't match a recognized case is an error */ + upd_cmd = I40E_NVMUPD_INVALID; + + transaction = i40e_nvmupd_get_transaction(cmd->config); + + /* limits on data size */ + if ((cmd->data_size < 1) || + (cmd->data_size > I40E_NVMUPD_MAX_DATA)) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_validate_command data_size %d\n", + cmd->data_size); + *errno = -EFAULT; + return I40E_NVMUPD_INVALID; + } + + switch (cmd->command) { + case I40E_NVM_READ: + switch (transaction) { + case I40E_NVM_CON: + upd_cmd = I40E_NVMUPD_READ_CON; + break; + case I40E_NVM_SNT: + upd_cmd = I40E_NVMUPD_READ_SNT; + break; + case I40E_NVM_LCB: + upd_cmd = I40E_NVMUPD_READ_LCB; + break; + case I40E_NVM_SA: + upd_cmd = I40E_NVMUPD_READ_SA; + break; + } + break; + + case I40E_NVM_WRITE: + switch (transaction) { + case I40E_NVM_CON: + upd_cmd = I40E_NVMUPD_WRITE_CON; + break; + case I40E_NVM_SNT: + upd_cmd = I40E_NVMUPD_WRITE_SNT; + break; + case I40E_NVM_LCB: + upd_cmd = I40E_NVMUPD_WRITE_LCB; + break; + case I40E_NVM_SA: + upd_cmd = I40E_NVMUPD_WRITE_SA; + break; + case I40E_NVM_ERA: + upd_cmd = I40E_NVMUPD_WRITE_ERA; + break; + case I40E_NVM_CSUM: + upd_cmd = I40E_NVMUPD_CSUM_CON; + break; + case (I40E_NVM_CSUM|I40E_NVM_SA): + upd_cmd = I40E_NVMUPD_CSUM_SA; + break; + case (I40E_NVM_CSUM|I40E_NVM_LCB): + upd_cmd = I40E_NVMUPD_CSUM_LCB; + break; + } + break; + } + i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n", + i40e_nvm_update_state_str[upd_cmd], + hw->nvmupd_state, + hw->aq.nvm_release_on_done); + + if (upd_cmd == I40E_NVMUPD_INVALID) { + *errno = -EFAULT; + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_validate_command returns %d errno %d\n", + upd_cmd, *errno); + } + return upd_cmd; +} + +/** + * i40e_nvmupd_nvm_read - Read NVM + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @errno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno) +{ + i40e_status status; + u8 module, transaction; + bool last; + + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); + last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); + + status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, + bytes, last, NULL); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_read status %d aq %d\n", + status, hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } + + return status; +} + +/** + * i40e_nvmupd_nvm_erase - Erase an NVM module + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @errno: pointer to return error code + * + * module, offset, data_size and data are in cmd structure + **/ +static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + int *errno) +{ + i40e_status status = 0; + u8 module, transaction; + bool last; + + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); + last = (transaction & I40E_NVM_LCB); + status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, + last, NULL); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_erase status %d aq %d\n", + status, hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } + + return status; +} + +/** + * i40e_nvmupd_nvm_write - Write NVM + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @errno: pointer to return error code + * + * module, offset, data_size and data are in cmd structure + **/ +static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *errno) +{ + i40e_status status = 0; + u8 module, transaction; + bool last; + + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); + last = (transaction & I40E_NVM_LCB); + + status = i40e_aq_update_nvm(hw, module, cmd->offset, + (u16)cmd->data_size, bytes, last, NULL); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_write status %d aq %d\n", + status, hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } + + return status; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h new file mode 100644 index 000000000..ad802dd0f --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -0,0 +1,84 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_OSDEP_H_ +#define _I40E_OSDEP_H_ + +#include +#include +#include +#include +#include +#include + +/* get readq/writeq support for 32 bit kernels, use the low-first version */ +#include + +/* File to be the magic between shared code and + * actual OS primitives + */ + +#define hw_dbg(hw, S, A...) do {} while (0) + +#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) +#define rd32(a, reg) readl((a)->hw_addr + (reg)) + +#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) +#define rd64(a, reg) readq((a)->hw_addr + (reg)) +#define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT) + +/* memory allocation tracking */ +struct i40e_dma_mem { + void *va; + dma_addr_t pa; + u32 size; +} __packed; + +#define i40e_allocate_dma_mem(h, m, unused, s, a) \ + i40e_allocate_dma_mem_d(h, m, s, a) +#define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m) + +struct i40e_virt_mem { + void *va; + u32 size; +} __packed; + +#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s) +#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m) + +#define i40e_debug(h, m, s, ...) \ +do { \ + if (((m) & (h)->debug_mask)) \ + pr_info("i40e %02x.%x " s, \ + (h)->bus.device, (h)->bus.func, \ + ##__VA_ARGS__); \ +} while (0) + +typedef enum i40e_status_code i40e_status; +#ifdef CONFIG_I40E_FCOE +#define I40E_FCOE +#endif +#endif /* _I40E_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h new file mode 100644 index 000000000..7b34f1e66 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -0,0 +1,311 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_PROTOTYPE_H_ +#define _I40E_PROTOTYPE_H_ + +#include "i40e_type.h" +#include "i40e_alloc.h" +#include "i40e_virtchnl.h" + +/* Prototypes for shared code functions that are not in + * the standard function pointer structures. These are + * mostly because they are needed even before the init + * has happened and will assist in the early SW and FW + * setup. + */ + +/* adminq functions */ +i40e_status i40e_init_adminq(struct i40e_hw *hw); +i40e_status i40e_shutdown_adminq(struct i40e_hw *hw); +void i40e_adminq_init_ring_data(struct i40e_hw *hw); +i40e_status i40e_clean_arq_element(struct i40e_hw *hw, + struct i40e_arq_event_info *e, + u16 *events_pending); +i40e_status i40e_asq_send_command(struct i40e_hw *hw, + struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); + +/* debug function for adminq */ +void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, + void *desc, void *buffer, u16 buf_len); + +void i40e_idle_aq(struct i40e_hw *hw); +bool i40e_check_asq_alive(struct i40e_hw *hw); +i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); + +u32 i40e_led_get(struct i40e_hw *hw); +void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); + +/* admin send queue commands */ + +i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, + u16 *fw_major_version, u16 *fw_minor_version, + u32 *fw_build, + u16 *api_major_version, u16 *api_minor_version, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, + u32 reg_addr, u64 reg_val, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, + u32 reg_addr, u64 *reg_val, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, + bool qualified_modules, bool report_init, + struct i40e_aq_get_phy_abilities_resp *abilities, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, + struct i40e_aq_set_phy_config *config, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, + bool atomic_reset); +i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, + bool enable_link, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, + bool enable_lse, struct i40e_link_status *link, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw, + u64 advt_reg, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, + struct i40e_driver_version *dv, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, + u16 vsi_id, bool set_filter, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, + u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, + u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, + struct i40e_vsi_context *vsi_ctx, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, + u16 downlink_seid, u8 enabled_tc, + bool default_port, bool enable_l2_filtering, + u16 *pveb_seid, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, + u16 veb_seid, u16 *switch_id, bool *floating, + u16 *statistic_index, u16 *vebs_used, + u16 *vebs_free, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id, + struct i40e_aqc_add_macvlan_element_data *mv_list, + u16 count, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id, + struct i40e_aqc_remove_macvlan_element_data *mv_list, + u16 count, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, + u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, + struct i40e_aqc_get_switch_config_resp *buf, + u16 buf_size, u16 *start_seid, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_request_resource(struct i40e_hw *hw, + enum i40e_aq_resources_ids resource, + enum i40e_aq_resource_access_type access, + u8 sdp_number, u64 *timeout, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_release_resource(struct i40e_hw *hw, + enum i40e_aq_resources_ids resource, + u8 sdp_number, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, void *data, + bool last_command, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, bool last_command, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, + void *buff, u16 buff_size, u16 *data_size, + enum i40e_admin_queue_opc list_type_opc, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 length, void *data, + bool last_command, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, + u8 mib_type, void *buff, u16 buff_size, + u16 *local_len, u16 *remote_len, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, + bool enable_update, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, + u16 udp_port, u8 protocol_index, + u8 *filter_index, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, + u16 flags, u8 *mac_addr, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, + u16 seid, u16 credit, u8 max_credit, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw, + enum i40e_aq_hmc_profile profile, + u8 pe_vf_enabled_count, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw, + u16 seid, u16 credit, u8 max_bw, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_ets_data *ets_data, + enum i40e_admin_queue_opc opcode, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_vsi_bw_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_port_ets_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg); +/* i40e_common */ +i40e_status i40e_init_shared_code(struct i40e_hw *hw); +i40e_status i40e_pf_reset(struct i40e_hw *hw); +void i40e_clear_hw(struct i40e_hw *hw); +void i40e_clear_pxe_mode(struct i40e_hw *hw); +bool i40e_get_link_status(struct i40e_hw *hw); +i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); +i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, + u32 *max_bw, u32 *min_bw, bool *min_valid, + bool *max_valid); +i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, + struct i40e_aqc_configure_partition_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); +i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, + u32 pba_num_size); +i40e_status i40e_validate_mac_addr(u8 *mac_addr); +void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable); +#ifdef I40E_FCOE +i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr); +#endif +/* prototype for functions used for NVM access */ +i40e_status i40e_init_nvm(struct i40e_hw *hw); +i40e_status i40e_acquire_nvm(struct i40e_hw *hw, + enum i40e_aq_resource_access_type access); +void i40e_release_nvm(struct i40e_hw *hw); +i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, + u16 *data); +i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data); +i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw); +i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, + u16 *checksum); +i40e_status i40e_nvmupd_command(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *); +void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); + +extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; + +static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) +{ + return i40e_ptype_lookup[ptype]; +} + +/* prototype for functions used for SW locks */ + +/* i40e_common for VF drivers*/ +void i40e_vf_parse_hw_config(struct i40e_hw *hw, + struct i40e_virtchnl_vf_resource *msg); +i40e_status i40e_vf_reset(struct i40e_hw *hw); +i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, + enum i40e_virtchnl_ops v_opcode, + i40e_status v_retval, + u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_set_filter_control(struct i40e_hw *hw, + struct i40e_filter_control_settings *settings); +i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, + u8 *mac_addr, u16 ethtype, u16 flags, + u16 vsi_seid, u16 queue, bool is_add, + struct i40e_control_filter_stats *stats, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, + u8 table_id, u32 start_index, u16 buff_size, + void *buff, u16 *ret_buff_size, + u8 *ret_next_table, u32 *ret_next_index, + struct i40e_asq_cmd_details *cmd_details); +#endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c new file mode 100644 index 000000000..a92b7725d --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -0,0 +1,728 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e.h" +#include + +/* The XL710 timesync is very much like Intel's 82599 design when it comes to + * the fundamental clock design. However, the clock operations are much simpler + * in the XL710 because the device supports a full 64 bits of nanoseconds. + * Because the field is so wide, we can forgo the cycle counter and just + * operate with the nanosecond field directly without fear of overflow. + * + * Much like the 82599, the update period is dependent upon the link speed: + * At 40Gb link or no link, the period is 1.6ns. + * At 10Gb link, the period is multiplied by 2. (3.2ns) + * At 1Gb link, the period is multiplied by 20. (32ns) + * 1588 functionality is not supported at 100Mbps. + */ +#define I40E_PTP_40GB_INCVAL 0x0199999999ULL +#define I40E_PTP_10GB_INCVAL 0x0333333333ULL +#define I40E_PTP_1GB_INCVAL 0x2000000000ULL + +#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 (0x1 << \ + I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) +#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \ + I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) + +/** + * i40e_ptp_read - Read the PHC time from the device + * @pf: Board private structure + * @ts: timespec structure to hold the current time value + * + * This function reads the PRTTSYN_TIME registers and stores them in a + * timespec. However, since the registers are 64 bits of nanoseconds, we must + * convert the result to a timespec before we can return. + **/ +static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts) +{ + struct i40e_hw *hw = &pf->hw; + u32 hi, lo; + u64 ns; + + /* The timer latches on the lowest register read. */ + lo = rd32(hw, I40E_PRTTSYN_TIME_L); + hi = rd32(hw, I40E_PRTTSYN_TIME_H); + + ns = (((u64)hi) << 32) | lo; + + *ts = ns_to_timespec64(ns); +} + +/** + * i40e_ptp_write - Write the PHC time to the device + * @pf: Board private structure + * @ts: timespec structure that holds the new time value + * + * This function writes the PRTTSYN_TIME registers with the user value. Since + * we receive a timespec from the stack, we must convert that timespec into + * nanoseconds before programming the registers. + **/ +static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec64 *ts) +{ + struct i40e_hw *hw = &pf->hw; + u64 ns = timespec64_to_ns(ts); + + /* The timer will not update until the high register is written, so + * write the low register first. + */ + wr32(hw, I40E_PRTTSYN_TIME_L, ns & 0xFFFFFFFF); + wr32(hw, I40E_PRTTSYN_TIME_H, ns >> 32); +} + +/** + * i40e_ptp_convert_to_hwtstamp - Convert device clock to system time + * @hwtstamps: Timestamp structure to update + * @timestamp: Timestamp from the hardware + * + * We need to convert the NIC clock value into a hwtstamp which can be used by + * the upper level timestamping functions. Since the timestamp is simply a 64- + * bit nanosecond value, we can call ns_to_ktime directly to handle this. + **/ +static void i40e_ptp_convert_to_hwtstamp(struct skb_shared_hwtstamps *hwtstamps, + u64 timestamp) +{ + memset(hwtstamps, 0, sizeof(*hwtstamps)); + + hwtstamps->hwtstamp = ns_to_ktime(timestamp); +} + +/** + * i40e_ptp_adjfreq - Adjust the PHC frequency + * @ptp: The PTP clock structure + * @ppb: Parts per billion adjustment from the base + * + * Adjust the frequency of the PHC by the indicated parts per billion from the + * base frequency. + **/ +static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); + struct i40e_hw *hw = &pf->hw; + u64 adj, freq, diff; + int neg_adj = 0; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + smp_mb(); /* Force any pending update before accessing. */ + adj = ACCESS_ONCE(pf->ptp_base_adj); + + freq = adj; + freq *= ppb; + diff = div_u64(freq, 1000000000ULL); + + if (neg_adj) + adj -= diff; + else + adj += diff; + + wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF); + wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32); + + return 0; +} + +/** + * i40e_ptp_adjtime - Adjust the PHC time + * @ptp: The PTP clock structure + * @delta: Offset in nanoseconds to adjust the PHC time by + * + * Adjust the frequency of the PHC by the indicated parts per billion from the + * base frequency. + **/ +static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); + struct timespec64 now, then = ns_to_timespec64(delta); + unsigned long flags; + + spin_lock_irqsave(&pf->tmreg_lock, flags); + + i40e_ptp_read(pf, &now); + now = timespec64_add(now, then); + i40e_ptp_write(pf, (const struct timespec64 *)&now); + + spin_unlock_irqrestore(&pf->tmreg_lock, flags); + + return 0; +} + +/** + * i40e_ptp_gettime - Get the time of the PHC + * @ptp: The PTP clock structure + * @ts: timespec structure to hold the current time value + * + * Read the device clock and return the correct value on ns, after converting it + * into a timespec struct. + **/ +static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&pf->tmreg_lock, flags); + i40e_ptp_read(pf, ts); + spin_unlock_irqrestore(&pf->tmreg_lock, flags); + + return 0; +} + +/** + * i40e_ptp_settime - Set the time of the PHC + * @ptp: The PTP clock structure + * @ts: timespec structure that holds the new time value + * + * Set the device clock to the user input value. The conversion from timespec + * to ns happens in the write function. + **/ +static int i40e_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&pf->tmreg_lock, flags); + i40e_ptp_write(pf, ts); + spin_unlock_irqrestore(&pf->tmreg_lock, flags); + + return 0; +} + +/** + * i40e_ptp_feature_enable - Enable/disable ancillary features of the PHC subsystem + * @ptp: The PTP clock structure + * @rq: The requested feature to change + * @on: Enable/disable flag + * + * The XL710 does not support any of the ancillary features of the PHC + * subsystem, so this function may just return. + **/ +static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +/** + * i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung + * @vsi: The VSI with the rings relevant to 1588 + * + * This watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + **/ +void i40e_ptp_rx_hang(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_ring *rx_ring; + unsigned long rx_event; + u32 prttsyn_stat; + int n; + + /* Since we cannot turn off the Rx timestamp logic if the device is + * configured for Tx timestamping, we check if Rx timestamping is + * configured. We don't want to spuriously warn about Rx timestamp + * hangs if we don't care about the timestamps. + */ + if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx) + return; + + prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); + + /* Unless all four receive timestamp registers are latched, we are not + * concerned about a possible PTP Rx hang, so just update the timeout + * counter and exit. + */ + if (!(prttsyn_stat & ((I40E_PRTTSYN_STAT_1_RXT0_MASK << + I40E_PRTTSYN_STAT_1_RXT0_SHIFT) | + (I40E_PRTTSYN_STAT_1_RXT1_MASK << + I40E_PRTTSYN_STAT_1_RXT1_SHIFT) | + (I40E_PRTTSYN_STAT_1_RXT2_MASK << + I40E_PRTTSYN_STAT_1_RXT2_SHIFT) | + (I40E_PRTTSYN_STAT_1_RXT3_MASK << + I40E_PRTTSYN_STAT_1_RXT3_SHIFT)))) { + pf->last_rx_ptp_check = jiffies; + return; + } + + /* Determine the most recent watchdog or rx_timestamp event. */ + rx_event = pf->last_rx_ptp_check; + for (n = 0; n < vsi->num_queue_pairs; n++) { + rx_ring = vsi->rx_rings[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } + + /* Only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { + rd32(hw, I40E_PRTTSYN_RXTIME_H(0)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(1)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(2)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(3)); + pf->last_rx_ptp_check = jiffies; + pf->rx_hwtstamp_cleared++; + dev_warn(&vsi->back->pdev->dev, + "%s: clearing Rx timestamp hang\n", + __func__); + } +} + +/** + * i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp + * @pf: Board private structure + * + * Read the value of the Tx timestamp from the registers, convert it into a + * value consumable by the stack, and store that result into the shhwtstamps + * struct before returning it up the stack. + **/ +void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf) +{ + struct skb_shared_hwtstamps shhwtstamps; + struct i40e_hw *hw = &pf->hw; + u32 hi, lo; + u64 ns; + + if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx) + return; + + /* don't attempt to timestamp if we don't have an skb */ + if (!pf->ptp_tx_skb) + return; + + lo = rd32(hw, I40E_PRTTSYN_TXTIME_L); + hi = rd32(hw, I40E_PRTTSYN_TXTIME_H); + + ns = (((u64)hi) << 32) | lo; + + i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns); + skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps); + dev_kfree_skb_any(pf->ptp_tx_skb); + pf->ptp_tx_skb = NULL; + clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state); +} + +/** + * i40e_ptp_rx_hwtstamp - Utility function which checks for an Rx timestamp + * @pf: Board private structure + * @skb: Particular skb to send timestamp with + * @index: Index into the receive timestamp registers for the timestamp + * + * The XL710 receives a notification in the receive descriptor with an offset + * into the set of RXTIME registers where the timestamp is for that skb. This + * function goes and fetches the receive timestamp from that offset, if a valid + * one exists. The RXTIME registers are in ns, so we must convert the result + * first. + **/ +void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index) +{ + u32 prttsyn_stat, hi, lo; + struct i40e_hw *hw; + u64 ns; + + /* Since we cannot turn off the Rx timestamp logic if the device is + * doing Tx timestamping, check if Rx timestamping is configured. + */ + if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx) + return; + + hw = &pf->hw; + + prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); + + if (!(prttsyn_stat & (1 << index))) + return; + + lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index)); + hi = rd32(hw, I40E_PRTTSYN_RXTIME_H(index)); + + ns = (((u64)hi) << 32) | lo; + + i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns); +} + +/** + * i40e_ptp_set_increment - Utility function to update clock increment rate + * @pf: Board private structure + * + * During a link change, the DMA frequency that drives the 1588 logic will + * change. In order to keep the PRTTSYN_TIME registers in units of nanoseconds, + * we must update the increment value per clock tick. + **/ +void i40e_ptp_set_increment(struct i40e_pf *pf) +{ + struct i40e_link_status *hw_link_info; + struct i40e_hw *hw = &pf->hw; + u64 incval; + + hw_link_info = &hw->phy.link_info; + + i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); + + switch (hw_link_info->link_speed) { + case I40E_LINK_SPEED_10GB: + incval = I40E_PTP_10GB_INCVAL; + break; + case I40E_LINK_SPEED_1GB: + incval = I40E_PTP_1GB_INCVAL; + break; + case I40E_LINK_SPEED_100MB: + { + static int warn_once; + + if (!warn_once) { + dev_warn(&pf->pdev->dev, + "1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n"); + warn_once++; + } + incval = 0; + break; + } + case I40E_LINK_SPEED_40GB: + default: + incval = I40E_PTP_40GB_INCVAL; + break; + } + + /* Write the new increment value into the increment register. The + * hardware will not update the clock until both registers have been + * written. + */ + wr32(hw, I40E_PRTTSYN_INC_L, incval & 0xFFFFFFFF); + wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); + + /* Update the base adjustement value. */ + ACCESS_ONCE(pf->ptp_base_adj) = incval; + smp_mb(); /* Force the above update. */ +} + +/** + * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping + * @pf: Board private structure + * @ifreq: ioctl data + * + * Obtain the current hardware timestamping settigs as requested. To do this, + * keep a shadow copy of the timestamp settings rather than attempting to + * deconstruct it from the registers. + **/ +int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr) +{ + struct hwtstamp_config *config = &pf->tstamp_config; + + if (!(pf->flags & I40E_FLAG_PTP)) + return -EOPNOTSUPP; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/** + * i40e_ptp_set_timestamp_mode - setup hardware for requested timestamp mode + * @pf: Board private structure + * @config: hwtstamp settings requested or saved + * + * Control hardware registers to enter the specific mode requested by the + * user. Also used during reset path to ensure that timestamp settings are + * maintained. + * + * Note: modifies config in place, and may update the requested mode to be + * more broad if the specific filter is not directly supported. + **/ +static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, + struct hwtstamp_config *config) +{ + struct i40e_hw *hw = &pf->hw; + u32 tsyntype, regval; + + /* Reserved for future extensions. */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + pf->ptp_tx = false; + break; + case HWTSTAMP_TX_ON: + pf->ptp_tx = true; + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + pf->ptp_rx = false; + /* We set the type to V1, but do not enable UDP packet + * recognition. In this way, we should be as close to + * disabling PTP Rx timestamps as possible since V1 packets + * are always UDP, since L2 packets are a V2 feature. + */ + tsyntype = I40E_PRTTSYN_CTL1_TSYNTYPE_V1; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + pf->ptp_rx = true; + tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK | + I40E_PRTTSYN_CTL1_TSYNTYPE_V1 | + I40E_PRTTSYN_CTL1_UDP_ENA_MASK; + config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + pf->ptp_rx = true; + tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK | + I40E_PRTTSYN_CTL1_TSYNTYPE_V2 | + I40E_PRTTSYN_CTL1_UDP_ENA_MASK; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + case HWTSTAMP_FILTER_ALL: + default: + return -ERANGE; + } + + /* Clear out all 1588-related registers to clear and unlatch them. */ + rd32(hw, I40E_PRTTSYN_STAT_0); + rd32(hw, I40E_PRTTSYN_TXTIME_H); + rd32(hw, I40E_PRTTSYN_RXTIME_H(0)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(1)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(2)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(3)); + + /* Enable/disable the Tx timestamp interrupt based on user input. */ + regval = rd32(hw, I40E_PRTTSYN_CTL0); + if (pf->ptp_tx) + regval |= I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK; + else + regval &= ~I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK; + wr32(hw, I40E_PRTTSYN_CTL0, regval); + + regval = rd32(hw, I40E_PFINT_ICR0_ENA); + if (pf->ptp_tx) + regval |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; + else + regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; + wr32(hw, I40E_PFINT_ICR0_ENA, regval); + + /* Although there is no simple on/off switch for Rx, we "disable" Rx + * timestamps by setting to V1 only mode and clear the UDP + * recognition. This ought to disable all PTP Rx timestamps as V1 + * packets are always over UDP. Note that software is configured to + * ignore Rx timestamps via the pf->ptp_rx flag. + */ + regval = rd32(hw, I40E_PRTTSYN_CTL1); + /* clear everything but the enable bit */ + regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK; + /* now enable bits for desired Rx timestamps */ + regval |= tsyntype; + wr32(hw, I40E_PRTTSYN_CTL1, regval); + + return 0; +} + +/** + * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping + * @pf: Board private structure + * @ifreq: ioctl data + * + * Respond to the user filter requests and make the appropriate hardware + * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping + * logic, so keep track in software of whether to indicate these timestamps + * or not. + * + * It is permissible to "upgrade" the user request to a broader filter, as long + * as the user receives the timestamps they care about and the user is notified + * the filter has been broadened. + **/ +int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int err; + + if (!(pf->flags & I40E_FLAG_PTP)) + return -EOPNOTSUPP; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = i40e_ptp_set_timestamp_mode(pf, &config); + if (err) + return err; + + /* save these settings for future reference */ + pf->tstamp_config = config; + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/** + * i40e_ptp_create_clock - Create PTP clock device for userspace + * @pf: Board private structure + * + * This function creates a new PTP clock device. It only creates one if we + * don't already have one, so it is safe to call. Will return error if it + * can't create one, but success if we already have a device. Should be used + * by i40e_ptp_init to create clock initially, and prevent global resets from + * creating new clock devices. + **/ +static long i40e_ptp_create_clock(struct i40e_pf *pf) +{ + /* no need to create a clock device if we already have one */ + if (!IS_ERR_OR_NULL(pf->ptp_clock)) + return 0; + + strncpy(pf->ptp_caps.name, i40e_driver_name, sizeof(pf->ptp_caps.name)); + pf->ptp_caps.owner = THIS_MODULE; + pf->ptp_caps.max_adj = 999999999; + pf->ptp_caps.n_ext_ts = 0; + pf->ptp_caps.pps = 0; + pf->ptp_caps.adjfreq = i40e_ptp_adjfreq; + pf->ptp_caps.adjtime = i40e_ptp_adjtime; + pf->ptp_caps.gettime64 = i40e_ptp_gettime; + pf->ptp_caps.settime64 = i40e_ptp_settime; + pf->ptp_caps.enable = i40e_ptp_feature_enable; + + /* Attempt to register the clock before enabling the hardware. */ + pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev); + if (IS_ERR(pf->ptp_clock)) { + return PTR_ERR(pf->ptp_clock); + } + + /* clear the hwtstamp settings here during clock create, instead of + * during regular init, so that we can maintain settings across a + * reset or suspend. + */ + pf->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + pf->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + return 0; +} + +/** + * i40e_ptp_init - Initialize the 1588 support after device probe or reset + * @pf: Board private structure + * + * This function sets device up for 1588 support. The first time it is run, it + * will create a PHC clock device. It does not create a clock device if one + * already exists. It also reconfigures the device after a reset. + **/ +void i40e_ptp_init(struct i40e_pf *pf) +{ + struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev; + struct i40e_hw *hw = &pf->hw; + u32 pf_id; + long err; + + /* Only one PF is assigned to control 1588 logic per port. Do not + * enable any support for PFs not assigned via PRTTSYN_CTL0.PF_ID + */ + pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >> + I40E_PRTTSYN_CTL0_PF_ID_SHIFT; + if (hw->pf_id != pf_id) { + pf->flags &= ~I40E_FLAG_PTP; + dev_info(&pf->pdev->dev, "%s: PTP not supported on %s\n", + __func__, + netdev->name); + return; + } + + /* we have to initialize the lock first, since we can't control + * when the user will enter the PHC device entry points + */ + spin_lock_init(&pf->tmreg_lock); + + /* ensure we have a clock device */ + err = i40e_ptp_create_clock(pf); + if (err) { + pf->ptp_clock = NULL; + dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n", + __func__); + } else { + struct timespec64 ts; + u32 regval; + + dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__, + netdev->name); + pf->flags |= I40E_FLAG_PTP; + + /* Ensure the clocks are running. */ + regval = rd32(hw, I40E_PRTTSYN_CTL0); + regval |= I40E_PRTTSYN_CTL0_TSYNENA_MASK; + wr32(hw, I40E_PRTTSYN_CTL0, regval); + regval = rd32(hw, I40E_PRTTSYN_CTL1); + regval |= I40E_PRTTSYN_CTL1_TSYNENA_MASK; + wr32(hw, I40E_PRTTSYN_CTL1, regval); + + /* Set the increment value per clock tick. */ + i40e_ptp_set_increment(pf); + + /* reset timestamping mode */ + i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config); + + /* Set the clock value. */ + ts = ktime_to_timespec64(ktime_get_real()); + i40e_ptp_settime(&pf->ptp_caps, &ts); + } +} + +/** + * i40e_ptp_stop - Disable the driver/hardware support and unregister the PHC + * @pf: Board private structure + * + * This function handles the cleanup work required from the initialization by + * clearing out the important information and unregistering the PHC. + **/ +void i40e_ptp_stop(struct i40e_pf *pf) +{ + pf->flags &= ~I40E_FLAG_PTP; + pf->ptp_tx = false; + pf->ptp_rx = false; + + if (pf->ptp_tx_skb) { + dev_kfree_skb_any(pf->ptp_tx_skb); + pf->ptp_tx_skb = NULL; + clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state); + } + + if (pf->ptp_clock) { + ptp_clock_unregister(pf->ptp_clock); + pf->ptp_clock = NULL; + dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__, + pf->vsi[pf->lan_vsi]->netdev->name); + } +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h new file mode 100644 index 000000000..522d6df51 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -0,0 +1,3369 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_REGISTER_H_ +#define _I40E_REGISTER_H_ + +#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */ +#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0 +#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT) +#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */ +#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0 +#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT) +#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */ +#define I40E_GL_ARQH_ARQH_SHIFT 0 +#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT) +#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */ +#define I40E_GL_ARQT_ARQT_SHIFT 0 +#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT) +#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */ +#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0 +#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT) +#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */ +#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0 +#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT) +#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */ +#define I40E_GL_ATQH_ATQH_SHIFT 0 +#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT) +#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */ +#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0 +#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT) +#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28 +#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT) +#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29 +#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT) +#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30 +#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT) +#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31 +#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT) +#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */ +#define I40E_GL_ATQT_ATQT_SHIFT 0 +#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT) +#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */ +#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0 +#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT) +#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */ +#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0 +#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT) +#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */ +#define I40E_PF_ARQH_ARQH_SHIFT 0 +#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT) +#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */ +#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0 +#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT) +#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28 +#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT) +#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29 +#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT) +#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30 +#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT) +#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31 +#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */ +#define I40E_PF_ARQT_ARQT_SHIFT 0 +#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT) +#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */ +#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0 +#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT) +#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */ +#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0 +#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT) +#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */ +#define I40E_PF_ATQH_ATQH_SHIFT 0 +#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT) +#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */ +#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0 +#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT) +#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28 +#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT) +#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29 +#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT) +#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30 +#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT) +#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31 +#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */ +#define I40E_PF_ATQT_ATQT_SHIFT 0 +#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT) +#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQBAH_MAX_INDEX 127 +#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0 +#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT) +#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQBAL_MAX_INDEX 127 +#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0 +#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT) +#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQH_MAX_INDEX 127 +#define I40E_VF_ARQH_ARQH_SHIFT 0 +#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT) +#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQLEN_MAX_INDEX 127 +#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0 +#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT) +#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28 +#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT) +#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29 +#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT) +#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30 +#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT) +#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31 +#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQT_MAX_INDEX 127 +#define I40E_VF_ARQT_ARQT_SHIFT 0 +#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT) +#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQBAH_MAX_INDEX 127 +#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0 +#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT) +#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQBAL_MAX_INDEX 127 +#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0 +#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT) +#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQH_MAX_INDEX 127 +#define I40E_VF_ATQH_ATQH_SHIFT 0 +#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT) +#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQLEN_MAX_INDEX 127 +#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0 +#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT) +#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28 +#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT) +#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29 +#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT) +#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30 +#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT) +#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31 +#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQT_MAX_INDEX 127 +#define I40E_VF_ATQT_ATQT_SHIFT 0 +#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT) +#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */ +#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0 +#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT) +#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */ +#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0 +#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT) +#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4 +#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT) +#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8 +#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT) +#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */ +#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0 +#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4 +#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8 +#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16 +#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24 +#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT) +#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */ +#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0 +#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT) +#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12 +#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT) +#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15 +#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT) +#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17 +#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT) +#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3 +#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0 +#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT) +#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */ +#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0 +#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT) +#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1 +#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT) +#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127 +#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0 +#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT) +#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4 +#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT) +#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8 +#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT) +#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127 +#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0 +#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4 +#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8 +#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16 +#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24 +#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT) +#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */ +#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0 +#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT) +#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */ +#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0 +#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT) +#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */ +#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3 +#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT) +#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */ +#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0 +#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT) +#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */ +#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3 +#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0 +#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT) +#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16 +#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT) +#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */ +#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0 +#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT) +#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2 +#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT) +#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6 +#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT) +#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9 +#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT) +#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16 +#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT) +#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */ +#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0 +#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT) +#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */ +#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0 +#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT) +#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1 +#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT) +#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2 +#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT) +#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3 +#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT) +#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4 +#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT) +#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */ +#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0 +#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT) +#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1 +#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT) +#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2 +#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT) +#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8 +#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT) +#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7 +#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0 +#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) +#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30 +#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) +#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31 +#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) +#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */ +#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0 +#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT) +#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8 +#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT) +#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16 +#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT) +#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */ +#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0 +#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT) +#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */ +#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0 +#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3 +#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6 +#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9 +#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12 +#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15 +#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18 +#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 +#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) +#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 +#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 +#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) +#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ +#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 +#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) +#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7 +#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0 +#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT) +#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */ +#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0 +#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT) +#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13 +#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT) +#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30 +#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT) +#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7 +#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0 +#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT) +#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */ +#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0 +#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT) +#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30 +#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT) +#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */ +#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0 +#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT) +#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8 +#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT) +#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */ +#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0 +#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT) +#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8 +#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT) +#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */ +#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0 +#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8 +#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9 +#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10 +#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11 +#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12 +#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13 +#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14 +#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15 +#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT) +#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */ +#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7 +#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0 +#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT) +#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */ +#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0 +#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT) +#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4 +#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT) +#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5 +#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT) +#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16 +#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT) +#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */ +#define I40E_GL_FWSTS_FWS0B_SHIFT 0 +#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT) +#define I40E_GL_FWSTS_FWRI_SHIFT 9 +#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT) +#define I40E_GL_FWSTS_FWS1B_SHIFT 16 +#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */ +#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0 +#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT) +#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4 +#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8 +#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12 +#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16 +#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20 +#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT) +#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */ +#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29 +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0 +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3 +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4 +#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT) +#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5 +#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT) +#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6 +#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7 +#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) +#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10 +#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT) +#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11 +#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT) +#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12 +#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) +#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17 +#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT) +#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19 +#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 +#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) +#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ +#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 +#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) +#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5 +#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT) +#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6 +#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT) +#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */ +#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0 +#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT) +#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */ +#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0 +#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT) +#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_I2CCMD_MAX_INDEX 3 +#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0 +#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT) +#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16 +#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT) +#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24 +#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT) +#define I40E_GLGEN_I2CCMD_OP_SHIFT 27 +#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT) +#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28 +#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT) +#define I40E_GLGEN_I2CCMD_R_SHIFT 29 +#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT) +#define I40E_GLGEN_I2CCMD_E_SHIFT 31 +#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT) +#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3 +#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0 +#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT) +#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5 +#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT) +#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8 +#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9 +#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT) +#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10 +#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT) +#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11 +#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT) +#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12 +#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13 +#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14 +#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15 +#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT) +#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31 +#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT) +#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */ +#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0 +#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT) +#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 +#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25 +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31 +#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT) +#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_MSCA_MAX_INDEX 3 +#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0 +#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT) +#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16 +#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT) +#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21 +#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT) +#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26 +#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT) +#define I40E_GLGEN_MSCA_STCODE_SHIFT 28 +#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT) +#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 +#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) +#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 +#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) +#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_MSRWD_MAX_INDEX 3 +#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 +#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT) +#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16 +#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT) +#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */ +#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0 +#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT) +#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16 +#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT) +#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */ +#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0 +#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT) +#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2 +#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT) +#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4 +#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT) +#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6 +#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT) +#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8 +#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT) +#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10 +#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT) +#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */ +#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0 +#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) +#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 +#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) +#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ +#define I40E_GLGEN_RTRIG_CORER_SHIFT 0 +#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) +#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1 +#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT) +#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2 +#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT) +#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */ +#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0 +#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT) +#define I40E_GLGEN_STAT_DCBEN_SHIFT 2 +#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT) +#define I40E_GLGEN_STAT_VTEN_SHIFT 3 +#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT) +#define I40E_GLGEN_STAT_FCOEN_SHIFT 4 +#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT) +#define I40E_GLGEN_STAT_EVBEN_SHIFT 5 +#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT) +#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6 +#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT) +#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3 +#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0 +#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT) +#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */ +#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0 +#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT) +#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */ +#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0 +#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT) +#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */ +#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0 +#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT) +#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */ +#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0 +#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT) +#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */ +#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0 +#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT) +#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1 +#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT) +#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2 +#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT) +#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3 +#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT) +#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */ +#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0 +#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT) +#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1 +#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT) +#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2 +#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT) +#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */ +#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0 +#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT) +#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */ +#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0 +#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT) +#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1 +#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT) +#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFGEN_RSTAT1_MAX_INDEX 127 +#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0 +#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT) +#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127 +#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0 +#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT) +#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127 +#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0 +#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT) +#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_VSIGEN_RSTAT_MAX_INDEX 383 +#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0 +#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT) +#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_VSIGEN_RTRIG_MAX_INDEX 383 +#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0 +#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT) +#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15 +#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0 +#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT) +#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15 +#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0 +#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT) +#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */ +#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0 +#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT) +#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15 +#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0 +#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT) +#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15 +#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0 +#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT) +#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */ +#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0 +#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT) +#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */ +#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0 +#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT) +#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */ +#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0 +#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT) +#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15 +#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0 +#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT) +#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15 +#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0 +#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT) +#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29 +#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT) +#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */ +#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0 +#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT) +#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */ +#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0 +#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT) +#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15 +#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0 +#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT) +#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15 +#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0 +#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT) +#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */ +#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0 +#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT) +#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */ +#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0 +#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT) +#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */ +#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0 +#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT) +#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15 +#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0 +#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT) +#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15 +#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0 +#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT) +#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */ +#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0 +#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT) +#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15 +#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0 +#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT) +#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24 +#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT) +#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15 +#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0 +#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT) +#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */ +#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0 +#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT) +#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15 +#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0 +#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT) +#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_SDPART_MAX_INDEX 15 +#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0 +#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT) +#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16 +#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT) +#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */ +#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0 +#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT) +#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */ +#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0 +#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT) +#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7 +#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT) +#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8 +#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT) +#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16 +#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT) +#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31 +#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT) +#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */ +#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0 +#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT) +#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16 +#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT) +#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */ +#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0 +#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT) +#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31 +#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT) +#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */ +#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0 +#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT) +#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */ +#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0 +#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1 +#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2 +#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12 +#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT) +#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */ +#define I40E_GL_GP_FUSE_MAX_INDEX 28 +#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0 +#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT) +#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */ +#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1 +#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT) +#define I40E_GL_UFUSE_NIC_ID_SHIFT 2 +#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT) +#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10 +#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT) +#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11 +#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT) +#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */ +#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 +#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 +#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 +#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 +#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 +#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 +#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 +#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 +#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 +#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 +#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 +#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 +#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 +#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 +#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 +#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 +#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 +#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 +#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 +#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 +#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 +#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 +#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 +#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 +#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 +#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 +#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 +#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 +#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 +#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 +#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT) +#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */ +#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0 +#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT) +#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4 +#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT) +#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */ +#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) +#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11 +#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT) +#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT) +#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT) +#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31 +#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT) +#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */ +#define I40E_PFINT_CEQCTL_MAX_INDEX 511 +#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11 +#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT) +#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31 +#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT) +#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */ +#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0 +#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT) +#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1 +#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT) +#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 +#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT) +#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3 +#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5 +#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT) +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 +#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT) +#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511 +#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0 +#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT) +#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1 +#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT) +#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 +#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT) +#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3 +#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5 +#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT) +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 +#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT) +#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */ +#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 +#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 +#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 +#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 +#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 +#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 +#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 +#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 +#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 +#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 +#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 +#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 +#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 +#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 +#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 +#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 +#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 +#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 +#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 +#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 +#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 +#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 +#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 +#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 +#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 +#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 +#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 +#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 +#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 +#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 +#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT) +#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */ +#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0 +#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1 +#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2 +#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3 +#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4 +#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5 +#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6 +#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7 +#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8 +#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT) +#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16 +#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT) +#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19 +#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT) +#define I40E_PFINT_ICR0_GRST_SHIFT 20 +#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT) +#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21 +#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT) +#define I40E_PFINT_ICR0_GPIO_SHIFT 22 +#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT) +#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23 +#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT) +#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24 +#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT) +#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT) +#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26 +#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT) +#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28 +#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT) +#define I40E_PFINT_ICR0_VFLR_SHIFT 29 +#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT) +#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30 +#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT) +#define I40E_PFINT_ICR0_SWINT_SHIFT 31 +#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT) +#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */ +#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16 +#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT) +#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19 +#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT) +#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20 +#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT) +#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21 +#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT) +#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22 +#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT) +#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23 +#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT) +#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24 +#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT) +#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) +#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26 +#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT) +#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28 +#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT) +#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29 +#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT) +#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30 +#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT) +#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31 +#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT) +#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */ +#define I40E_PFINT_ITR0_MAX_INDEX 2 +#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0 +#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT) +#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_ITRN_MAX_INDEX 2 +#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0 +#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT) +#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */ +#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 +#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) +#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 +#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT) +#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_LNKLSTN_MAX_INDEX 511 +#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 +#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) +#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 +#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) +#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */ +#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0 +#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT) +#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6 +#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT) +#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_RATEN_MAX_INDEX 511 +#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0 +#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) +#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 +#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) +#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ +#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 +#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) +#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QINT_RQCTL_MAX_INDEX 1535 +#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0 +#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT) +#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11 +#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT) +#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) +#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) +#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) +#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31 +#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT) +#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QINT_TQCTL_MAX_INDEX 1535 +#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0 +#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT) +#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11 +#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT) +#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) +#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) +#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT) +#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31 +#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT) +#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127 +#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT) +#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ +#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511 +#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT) +#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VFINT_ICR0_MAX_INDEX 127 +#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0 +#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1 +#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2 +#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3 +#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4 +#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT) +#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_SWINT_SHIFT 31 +#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT) +#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127 +#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31 +#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT) +#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */ +#define I40E_VFINT_ITR0_MAX_INDEX 2 +#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT) +#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */ +#define I40E_VFINT_ITRN_MAX_INDEX 2 +#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) +#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 +#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 +#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) +#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VPINT_AEQCTL_MAX_INDEX 127 +#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) +#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11 +#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT) +#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT) +#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT) +#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31 +#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT) +#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */ +#define I40E_VPINT_CEQCTL_MAX_INDEX 511 +#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11 +#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT) +#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31 +#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT) +#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPINT_LNKLST0_MAX_INDEX 127 +#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 +#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) +#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 +#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT) +#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ +#define I40E_VPINT_LNKLSTN_MAX_INDEX 511 +#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 +#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) +#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 +#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) +#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPINT_RATE0_MAX_INDEX 127 +#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0 +#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT) +#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6 +#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT) +#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ +#define I40E_VPINT_RATEN_MAX_INDEX 511 +#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0 +#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT) +#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6 +#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT) +#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */ +#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0 +#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT) +#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1 +#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT) +#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */ +#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0 +#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT) +#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */ +#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0 +#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT) +#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */ +#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0 +#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT) +#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */ +#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0 +#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */ +#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11 +#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0 +#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16 +#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30 +#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31 +#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) +#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */ +#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 +#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) +#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16 +#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT) +#define I40E_PFLAN_QALLOC_VALID_SHIFT 31 +#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT) +#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ +#define I40E_QRX_ENA_MAX_INDEX 1535 +#define I40E_QRX_ENA_QENA_REQ_SHIFT 0 +#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT) +#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1 +#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT) +#define I40E_QRX_ENA_QENA_STAT_SHIFT 2 +#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT) +#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QRX_TAIL_MAX_INDEX 1535 +#define I40E_QRX_TAIL_TAIL_SHIFT 0 +#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT) +#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QTX_CTL_MAX_INDEX 1535 +#define I40E_QTX_CTL_PFVF_Q_SHIFT 0 +#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT) +#define I40E_QTX_CTL_PF_INDX_SHIFT 2 +#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT) +#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7 +#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT) +#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ +#define I40E_QTX_ENA_MAX_INDEX 1535 +#define I40E_QTX_ENA_QENA_REQ_SHIFT 0 +#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT) +#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1 +#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT) +#define I40E_QTX_ENA_QENA_STAT_SHIFT 2 +#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT) +#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QTX_HEAD_MAX_INDEX 1535 +#define I40E_QTX_HEAD_HEAD_SHIFT 0 +#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT) +#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16 +#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT) +#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ +#define I40E_QTX_TAIL_MAX_INDEX 1535 +#define I40E_QTX_TAIL_TAIL_SHIFT 0 +#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT) +#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPLAN_MAPENA_MAX_INDEX 127 +#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0 +#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT) +#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */ +#define I40E_VPLAN_QTABLE_MAX_INDEX 15 +#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0 +#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT) +#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ +#define I40E_VSILAN_QBASE_MAX_INDEX 383 +#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0 +#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT) +#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11 +#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT) +#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */ +#define I40E_VSILAN_QTABLE_MAX_INDEX 7 +#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0 +#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT) +#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16 +#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) +#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */ +#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0 +#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT) +#define I40E_PRTGL_SAH_MFS_SHIFT 16 +#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT) +#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */ +#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0 +#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */ +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */ +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT) +#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */ +#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0 +#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT) +#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */ +#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0 +#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT) +#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10 +#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT) +#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11 +#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT) +#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15 +#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT) +#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16 +#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT) +#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19 +#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26 +#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27 +#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28 +#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29 +#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */ +#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0 +#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT) +#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */ +#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31 +#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0 +#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT) +#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */ +#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0 +#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT) +#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7 +#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0 +#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT) +#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */ +#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0 +#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT) +#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1 +#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT) +#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17 +#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT) +#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19 +#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT) +#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25 +#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT) +#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26 +#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT) +#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28 +#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT) +#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29 +#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT) +#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7 +#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0 +#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT) +#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PRT_MNG_MDEF_MAX_INDEX 7 +#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0 +#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4 +#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5 +#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13 +#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17 +#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21 +#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25 +#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26 +#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27 +#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28 +#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29 +#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30 +#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31 +#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7 +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0 +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4 +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8 +#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24 +#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25 +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26 +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27 +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28 +#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29 +#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30 +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31 +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT) +#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3 +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0 +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT) +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16 +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT) +#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_METF_MAX_INDEX 3 +#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0 +#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT) +#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30 +#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT) +#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ +#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15 +#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0 +#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT) +#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16 +#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT) +#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17 +#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT) +#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18 +#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT) +#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3 +#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0 +#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT) +#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ +#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15 +#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0 +#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT) +#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_MMAH_MAX_INDEX 3 +#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0 +#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT) +#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_MMAL_MAX_INDEX 3 +#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0 +#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT) +#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */ +#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0 +#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT) +#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */ +#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0 +#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT) +#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1 +#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT) +#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2 +#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT) +#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3 +#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4 +#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5 +#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6 +#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7 +#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT) +#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */ +#define I40E_MSIX_PBA_MAX_INDEX 5 +#define I40E_MSIX_PBA_PENBIT_SHIFT 0 +#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT) +#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ +#define I40E_MSIX_TADD_MAX_INDEX 128 +#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0 +#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT) +#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2 +#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT) +#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ +#define I40E_MSIX_TMSG_MAX_INDEX 128 +#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0 +#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT) +#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ +#define I40E_MSIX_TUADD_MAX_INDEX 128 +#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0 +#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT) +#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ +#define I40E_MSIX_TVCTRL_MAX_INDEX 128 +#define I40E_MSIX_TVCTRL_MASK_SHIFT 0 +#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT) +#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */ +#define I40E_VFMSIX_PBA1_MAX_INDEX 19 +#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0 +#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT) +#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TADD1_MAX_INDEX 639 +#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0 +#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT) +#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2 +#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT) +#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TMSG1_MAX_INDEX 639 +#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0 +#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT) +#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TUADD1_MAX_INDEX 639 +#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0 +#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT) +#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639 +#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0 +#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT) +#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */ +#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0 +#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT) +#define I40E_GLNVM_FLA_FL_CE_SHIFT 1 +#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT) +#define I40E_GLNVM_FLA_FL_SI_SHIFT 2 +#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT) +#define I40E_GLNVM_FLA_FL_SO_SHIFT 3 +#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT) +#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4 +#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT) +#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5 +#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT) +#define I40E_GLNVM_FLA_LOCKED_SHIFT 6 +#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT) +#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18 +#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT) +#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30 +#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT) +#define I40E_GLNVM_FLA_FL_DER_SHIFT 31 +#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT) +#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */ +#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0 +#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT) +#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31 +#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT) +#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */ +#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0 +#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT) +#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5 +#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT) +#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8 +#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT) +#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23 +#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT) +#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25 +#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT) +#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */ +#define I40E_GLNVM_PROTCSR_MAX_INDEX 59 +#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0 +#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT) +#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */ +#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0 +#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT) +#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14 +#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT) +#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29 +#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT) +#define I40E_GLNVM_SRCTL_START_SHIFT 30 +#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT) +#define I40E_GLNVM_SRCTL_DONE_SHIFT 31 +#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT) +#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */ +#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0 +#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT) +#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16 +#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT) +#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */ +#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0 +#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1 +#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2 +#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3 +#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4 +#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5 +#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6 +#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7 +#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8 +#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9 +#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT) +#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */ +#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0 +#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT) +#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */ +#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0 +#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT) +#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */ +#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0 +#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT) +#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */ +#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0 +#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT) +#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2 +#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3 +#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4 +#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5 +#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6 +#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7 +#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16 +#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17 +#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18 +#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19 +#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT) +#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20 +#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30 +#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT) +#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31 +#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT) +#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */ +#define I40E_GLPCI_CNF_FLEX10_SHIFT 1 +#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT) +#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2 +#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT) +#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */ +#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0 +#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT) +#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1 +#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT) +#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2 +#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT) +#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13 +#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT) +#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */ +#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0 +#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT) +#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */ +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28 +#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT) +#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */ +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT) +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT) +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT) +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT) +#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ +#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3 +#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0 +#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT) +#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16 +#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT) +#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ +#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 +#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 +#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) +#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ +#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 +#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) +#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1 +#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT) +#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3 +#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT) +#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4 +#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT) +#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6 +#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT) +#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10 +#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT) +#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11 +#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT) +#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */ +#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0 +#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT) +#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6 +#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT) +#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9 +#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT) +#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */ +#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0 +#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT) +#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */ +#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0 +#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT) +#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */ +#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0 +#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT) +#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16 +#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT) +#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */ +#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0 +#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT) +#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16 +#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT) +#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */ +#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0 +#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT) +#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2 +#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5 +#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8 +#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11 +#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14 +#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT) +#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15 +#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT) +#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */ +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0 +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT) +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8 +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT) +#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */ +#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0 +#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT) +#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8 +#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT) +#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16 +#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT) +#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24 +#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT) +#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */ +#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0 +#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT) +#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */ +#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0 +#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT) +#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */ +#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0 +#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT) +#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */ +#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0 +#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT) +#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */ +#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0 +#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT) +#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */ +#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0 +#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT) +#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */ +#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1 +#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT) +#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */ +#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0 +#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT) +#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */ +#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0 +#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) +#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 +#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) +#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ +#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 +#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) +#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 +#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) +#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ +#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 +#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) +#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3 +#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT) +#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8 +#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT) +#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */ +#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0 +#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT) +#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12 +#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT) +#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */ +#define I40E_PF_PCI_CIAD_DATA_SHIFT 0 +#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT) +#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */ +#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0 +#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT) +#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1 +#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT) +#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2 +#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT) +#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */ +#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2 +#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT) +#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3 +#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT) +#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4 +#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT) +#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5 +#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT) +#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */ +#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0 +#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT) +#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16 +#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT) +#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */ +#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0 +#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT) +#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3 +#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT) +#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */ +#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0 +#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT) +#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1 +#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT) +#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2 +#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT) +#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */ +#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0 +#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT) +#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */ +#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0 +#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT) +#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */ +#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0 +#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT) +#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */ +#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */ +#define I40E_PFPCI_PM_PME_EN_SHIFT 0 +#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT) +#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */ +#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0 +#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT) +#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */ +#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0 +#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT) +#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16 +#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT) +#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */ +#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */ +#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127 +#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */ +#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */ +#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0 +#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT) +#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */ +#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0 +#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT) +#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */ +#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29 +#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT) +#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30 +#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT) +#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31 +#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT) +#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */ +#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16 +#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT) +#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24 +#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT) +#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26 +#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT) +#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */ +#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31 +#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT) +#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */ +#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0 +#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT) +#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16 +#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT) +#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */ +#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0 +#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT) +#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */ +#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0 +#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT) +#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1 +#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT) +#define I40E_PRTPM_GC_RATD_SHIFT 2 +#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT) +#define I40E_PRTPM_GC_LCDMP_SHIFT 3 +#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT) +#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31 +#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT) +#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */ +#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0 +#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT) +#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */ +#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0 +#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT) +#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */ +#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0 +#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT) +#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */ +#define I40E_GLRPB_GHW_GHW_SHIFT 0 +#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT) +#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */ +#define I40E_GLRPB_GLW_GLW_SHIFT 0 +#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT) +#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */ +#define I40E_GLRPB_PHW_PHW_SHIFT 0 +#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT) +#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */ +#define I40E_GLRPB_PLW_PLW_SHIFT 0 +#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT) +#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_DHW_MAX_INDEX 7 +#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0 +#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT) +#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_DLW_MAX_INDEX 7 +#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0 +#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT) +#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_DPS_MAX_INDEX 7 +#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0 +#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT) +#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_SHT_MAX_INDEX 7 +#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0 +#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT) +#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */ +#define I40E_PRTRPB_SHW_SHW_SHIFT 0 +#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT) +#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_SLT_MAX_INDEX 7 +#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0 +#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT) +#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */ +#define I40E_PRTRPB_SLW_SLW_SHIFT 0 +#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT) +#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */ +#define I40E_PRTRPB_SPS_SPS_SHIFT 0 +#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT) +#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */ +#define I40E_GLQF_CTL_HTOEP_SHIFT 1 +#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT) +#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2 +#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT) +#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3 +#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT) +#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6 +#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT) +#define I40E_GLQF_CTL_RSVD_SHIFT 7 +#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT) +#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8 +#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT) +#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11 +#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT) +#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14 +#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT) +#define I40E_GLQF_CTL_FDBEST_SHIFT 17 +#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT) +#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25 +#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT) +#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26 +#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT) +#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27 +#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT) +#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */ +#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0 +#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT) +#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13 +#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT) +#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ +#define I40E_GLQF_HKEY_MAX_INDEX 12 +#define I40E_GLQF_HKEY_KEY_0_SHIFT 0 +#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT) +#define I40E_GLQF_HKEY_KEY_1_SHIFT 8 +#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT) +#define I40E_GLQF_HKEY_KEY_2_SHIFT 16 +#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT) +#define I40E_GLQF_HKEY_KEY_3_SHIFT 24 +#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT) +#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_HSYM_MAX_INDEX 63 +#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0 +#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT) +#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */ +#define I40E_GLQF_PCNT_MAX_INDEX 511 +#define I40E_GLQF_PCNT_PCNT_SHIFT 0 +#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT) +#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_SWAP_MAX_INDEX 1 +#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0 +#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT) +#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6 +#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT) +#define I40E_GLQF_SWAP_FLEN0_SHIFT 12 +#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT) +#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16 +#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT) +#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22 +#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT) +#define I40E_GLQF_SWAP_FLEN1_SHIFT 28 +#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT) +#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */ +#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0 +#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT) +#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5 +#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT) +#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10 +#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) +#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14 +#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) +#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16 +#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) +#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17 +#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT) +#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18 +#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT) +#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19 +#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT) +#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20 +#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT) +#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24 +#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT) +#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */ +#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0 +#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT) +#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */ +#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0 +#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT) +#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8 +#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT) +#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */ +#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0 +#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT) +#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16 +#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT) +#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_PFQF_HENA_MAX_INDEX 1 +#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0 +#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT) +#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */ +#define I40E_PFQF_HKEY_MAX_INDEX 12 +#define I40E_PFQF_HKEY_KEY_0_SHIFT 0 +#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT) +#define I40E_PFQF_HKEY_KEY_1_SHIFT 8 +#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT) +#define I40E_PFQF_HKEY_KEY_2_SHIFT 16 +#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT) +#define I40E_PFQF_HKEY_KEY_3_SHIFT 24 +#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT) +#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_PFQF_HLUT_MAX_INDEX 127 +#define I40E_PFQF_HLUT_LUT0_SHIFT 0 +#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT) +#define I40E_PFQF_HLUT_LUT1_SHIFT 8 +#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT) +#define I40E_PFQF_HLUT_LUT2_SHIFT 16 +#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT) +#define I40E_PFQF_HLUT_LUT3_SHIFT 24 +#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT) +#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */ +#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0 +#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT) +#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */ +#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63 +#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0 +#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) +#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ +#define I40E_PRTQF_FD_MSK_MAX_INDEX 63 +#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0 +#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT) +#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16 +#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT) +#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */ +#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8 +#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0 +#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) +#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5 +#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) +#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10 +#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) +#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */ +#define I40E_VFQF_HENA1_MAX_INDEX 1 +#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0 +#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT) +#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */ +#define I40E_VFQF_HKEY1_MAX_INDEX 12 +#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0 +#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT) +#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8 +#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT) +#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16 +#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT) +#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24 +#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT) +#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */ +#define I40E_VFQF_HLUT1_MAX_INDEX 15 +#define I40E_VFQF_HLUT1_LUT0_SHIFT 0 +#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT) +#define I40E_VFQF_HLUT1_LUT1_SHIFT 8 +#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT) +#define I40E_VFQF_HLUT1_LUT2_SHIFT 16 +#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT) +#define I40E_VFQF_HLUT1_LUT3_SHIFT 24 +#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT) +#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */ +#define I40E_VFQF_HREGION1_MAX_INDEX 7 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT) +#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1 +#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT) +#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5 +#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT) +#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9 +#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT) +#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13 +#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT) +#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17 +#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT) +#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21 +#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT) +#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25 +#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT) +#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29 +#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT) +#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPQF_CTL_MAX_INDEX 127 +#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0 +#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT) +#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5 +#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT) +#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10 +#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT) +#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14 +#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT) +#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ +#define I40E_VSIQF_CTL_MAX_INDEX 383 +#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0 +#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT) +#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1 +#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2 +#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3 +#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4 +#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5 +#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT) +#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */ +#define I40E_VSIQF_TCREGION_MAX_INDEX 3 +#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0 +#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT) +#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9 +#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT) +#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16 +#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT) +#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25 +#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT) +#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOECRC_MAX_INDEX 143 +#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0 +#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT) +#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDDPC_MAX_INDEX 143 +#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0 +#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT) +#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDIFEC_MAX_INDEX 143 +#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0 +#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT) +#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143 +#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0 +#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT) +#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDIXEC_MAX_INDEX 143 +#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0 +#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT) +#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDIXVC_MAX_INDEX 143 +#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0 +#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT) +#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDWRCH_MAX_INDEX 143 +#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0 +#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT) +#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDWRCL_MAX_INDEX 143 +#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0 +#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT) +#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDWTCH_MAX_INDEX 143 +#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0 +#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT) +#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDWTCL_MAX_INDEX 143 +#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0 +#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT) +#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOELAST_MAX_INDEX 143 +#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0 +#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT) +#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEPRC_MAX_INDEX 143 +#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0 +#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT) +#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEPTC_MAX_INDEX 143 +#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0 +#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT) +#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOERPDC_MAX_INDEX 143 +#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0 +#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT) +#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_RXERR1_L_MAX_INDEX 143 +#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0 +#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT) +#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_RXERR2_L_MAX_INDEX 143 +#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0 +#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) +#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_BPRCH_MAX_INDEX 3 +#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) +#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_BPRCL_MAX_INDEX 3 +#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) +#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_BPTCH_MAX_INDEX 3 +#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) +#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_BPTCL_MAX_INDEX 3 +#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) +#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_CRCERRS_MAX_INDEX 3 +#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 +#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT) +#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_GORCH_MAX_INDEX 3 +#define I40E_GLPRT_GORCH_GORCH_SHIFT 0 +#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT) +#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_GORCL_MAX_INDEX 3 +#define I40E_GLPRT_GORCL_GORCL_SHIFT 0 +#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT) +#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_GOTCH_MAX_INDEX 3 +#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0 +#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT) +#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_GOTCL_MAX_INDEX 3 +#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0 +#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT) +#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_ILLERRC_MAX_INDEX 3 +#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0 +#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT) +#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LDPC_MAX_INDEX 3 +#define I40E_GLPRT_LDPC_LDPC_SHIFT 0 +#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT) +#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3 +#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0 +#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT) +#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3 +#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0 +#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT) +#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LXONRXC_MAX_INDEX 3 +#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0 +#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT) +#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LXONTXC_MAX_INDEX 3 +#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0 +#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT) +#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MLFC_MAX_INDEX 3 +#define I40E_GLPRT_MLFC_MLFC_SHIFT 0 +#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT) +#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MPRCH_MAX_INDEX 3 +#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0 +#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT) +#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MPRCL_MAX_INDEX 3 +#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0 +#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT) +#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MPTCH_MAX_INDEX 3 +#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0 +#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT) +#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MPTCL_MAX_INDEX 3 +#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0 +#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT) +#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MRFC_MAX_INDEX 3 +#define I40E_GLPRT_MRFC_MRFC_SHIFT 0 +#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT) +#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC1023H_MAX_INDEX 3 +#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0 +#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT) +#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC1023L_MAX_INDEX 3 +#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0 +#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT) +#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC127H_MAX_INDEX 3 +#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0 +#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT) +#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC127L_MAX_INDEX 3 +#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0 +#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT) +#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC1522H_MAX_INDEX 3 +#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0 +#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT) +#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC1522L_MAX_INDEX 3 +#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0 +#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT) +#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC255H_MAX_INDEX 3 +#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0 +#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT) +#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC255L_MAX_INDEX 3 +#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0 +#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT) +#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC511H_MAX_INDEX 3 +#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0 +#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT) +#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC511L_MAX_INDEX 3 +#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0 +#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT) +#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC64H_MAX_INDEX 3 +#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0 +#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT) +#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC64L_MAX_INDEX 3 +#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0 +#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT) +#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC9522H_MAX_INDEX 3 +#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0 +#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT) +#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC9522L_MAX_INDEX 3 +#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0 +#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT) +#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC1023H_MAX_INDEX 3 +#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0 +#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT) +#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC1023L_MAX_INDEX 3 +#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0 +#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT) +#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC127H_MAX_INDEX 3 +#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0 +#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT) +#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC127L_MAX_INDEX 3 +#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0 +#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT) +#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC1522H_MAX_INDEX 3 +#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0 +#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT) +#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC1522L_MAX_INDEX 3 +#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0 +#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT) +#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC255H_MAX_INDEX 3 +#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0 +#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT) +#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC255L_MAX_INDEX 3 +#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0 +#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT) +#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC511H_MAX_INDEX 3 +#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0 +#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT) +#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC511L_MAX_INDEX 3 +#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0 +#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT) +#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC64H_MAX_INDEX 3 +#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0 +#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT) +#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC64L_MAX_INDEX 3 +#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0 +#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT) +#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC9522H_MAX_INDEX 3 +#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0 +#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT) +#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC9522L_MAX_INDEX 3 +#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0 +#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT) +#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3 +#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0 +#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT) +#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3 +#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0 +#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT) +#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_PXONRXC_MAX_INDEX 3 +#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0 +#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT) +#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_PXONTXC_MAX_INDEX 3 +#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0 +#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT) +#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RDPC_MAX_INDEX 3 +#define I40E_GLPRT_RDPC_RDPC_SHIFT 0 +#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT) +#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RFC_MAX_INDEX 3 +#define I40E_GLPRT_RFC_RFC_SHIFT 0 +#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT) +#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RJC_MAX_INDEX 3 +#define I40E_GLPRT_RJC_RJC_SHIFT 0 +#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT) +#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RLEC_MAX_INDEX 3 +#define I40E_GLPRT_RLEC_RLEC_SHIFT 0 +#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT) +#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_ROC_MAX_INDEX 3 +#define I40E_GLPRT_ROC_ROC_SHIFT 0 +#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT) +#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RUC_MAX_INDEX 3 +#define I40E_GLPRT_RUC_RUC_SHIFT 0 +#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT) +#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RUPP_MAX_INDEX 3 +#define I40E_GLPRT_RUPP_RUPP_SHIFT 0 +#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT) +#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3 +#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0 +#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT) +#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_TDOLD_MAX_INDEX 3 +#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 +#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) +#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_UPRCH_MAX_INDEX 3 +#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 +#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT) +#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_UPRCL_MAX_INDEX 3 +#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0 +#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT) +#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_UPTCH_MAX_INDEX 3 +#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0 +#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT) +#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_UPTCL_MAX_INDEX 3 +#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0 +#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT) +#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_BPRCH_MAX_INDEX 15 +#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT) +#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_BPRCL_MAX_INDEX 15 +#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT) +#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_BPTCH_MAX_INDEX 15 +#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT) +#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_BPTCL_MAX_INDEX 15 +#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT) +#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_GORCH_MAX_INDEX 15 +#define I40E_GLSW_GORCH_GORCH_SHIFT 0 +#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT) +#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_GORCL_MAX_INDEX 15 +#define I40E_GLSW_GORCL_GORCL_SHIFT 0 +#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT) +#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_GOTCH_MAX_INDEX 15 +#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0 +#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT) +#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_GOTCL_MAX_INDEX 15 +#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0 +#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT) +#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_MPRCH_MAX_INDEX 15 +#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0 +#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT) +#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_MPRCL_MAX_INDEX 15 +#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0 +#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT) +#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_MPTCH_MAX_INDEX 15 +#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0 +#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT) +#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_MPTCL_MAX_INDEX 15 +#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0 +#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT) +#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_RUPP_MAX_INDEX 15 +#define I40E_GLSW_RUPP_RUPP_SHIFT 0 +#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT) +#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_TDPC_MAX_INDEX 15 +#define I40E_GLSW_TDPC_TDPC_SHIFT 0 +#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT) +#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_UPRCH_MAX_INDEX 15 +#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0 +#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT) +#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_UPRCL_MAX_INDEX 15 +#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0 +#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT) +#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_UPTCH_MAX_INDEX 15 +#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0 +#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT) +#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_UPTCL_MAX_INDEX 15 +#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0 +#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT) +#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_BPRCH_MAX_INDEX 383 +#define I40E_GLV_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT) +#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_BPRCL_MAX_INDEX 383 +#define I40E_GLV_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT) +#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_BPTCH_MAX_INDEX 383 +#define I40E_GLV_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT) +#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_BPTCL_MAX_INDEX 383 +#define I40E_GLV_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT) +#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_GORCH_MAX_INDEX 383 +#define I40E_GLV_GORCH_GORCH_SHIFT 0 +#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT) +#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_GORCL_MAX_INDEX 383 +#define I40E_GLV_GORCL_GORCL_SHIFT 0 +#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT) +#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_GOTCH_MAX_INDEX 383 +#define I40E_GLV_GOTCH_GOTCH_SHIFT 0 +#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT) +#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_GOTCL_MAX_INDEX 383 +#define I40E_GLV_GOTCL_GOTCL_SHIFT 0 +#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT) +#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_MPRCH_MAX_INDEX 383 +#define I40E_GLV_MPRCH_MPRCH_SHIFT 0 +#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT) +#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_MPRCL_MAX_INDEX 383 +#define I40E_GLV_MPRCL_MPRCL_SHIFT 0 +#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT) +#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_MPTCH_MAX_INDEX 383 +#define I40E_GLV_MPTCH_MPTCH_SHIFT 0 +#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT) +#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_MPTCL_MAX_INDEX 383 +#define I40E_GLV_MPTCL_MPTCL_SHIFT 0 +#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT) +#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_RDPC_MAX_INDEX 383 +#define I40E_GLV_RDPC_RDPC_SHIFT 0 +#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT) +#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_RUPP_MAX_INDEX 383 +#define I40E_GLV_RUPP_RUPP_SHIFT 0 +#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT) +#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_TEPC_MAX_INDEX 383 +#define I40E_GLV_TEPC_TEPC_SHIFT 0 +#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT) +#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_UPRCH_MAX_INDEX 383 +#define I40E_GLV_UPRCH_UPRCH_SHIFT 0 +#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT) +#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_UPRCL_MAX_INDEX 383 +#define I40E_GLV_UPRCL_UPRCL_SHIFT 0 +#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT) +#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_UPTCH_MAX_INDEX 383 +#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0 +#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT) +#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_UPTCL_MAX_INDEX 383 +#define I40E_GLV_UPTCL_UPTCL_SHIFT 0 +#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT) +#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_RBCH_MAX_INDEX 7 +#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0 +#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT) +#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_RBCL_MAX_INDEX 7 +#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0 +#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT) +#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_RPCH_MAX_INDEX 7 +#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0 +#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT) +#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_RPCL_MAX_INDEX 7 +#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0 +#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT) +#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_TBCH_MAX_INDEX 7 +#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0 +#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT) +#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_TBCL_MAX_INDEX 7 +#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0 +#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT) +#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_TPCH_MAX_INDEX 7 +#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0 +#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT) +#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_TPCL_MAX_INDEX 7 +#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0 +#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT) +#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_BPCH_MAX_INDEX 127 +#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0 +#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT) +#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_BPCL_MAX_INDEX 127 +#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0 +#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT) +#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_GORCH_MAX_INDEX 127 +#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0 +#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT) +#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_GORCL_MAX_INDEX 127 +#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0 +#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT) +#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127 +#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0 +#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT) +#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127 +#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0 +#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT) +#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_MPCH_MAX_INDEX 127 +#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0 +#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT) +#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_MPCL_MAX_INDEX 127 +#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0 +#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT) +#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_UPCH_MAX_INDEX 127 +#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0 +#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT) +#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_UPCL_MAX_INDEX 127 +#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0 +#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT) +#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */ +#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0 +#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT) +#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */ +#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35 +#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0 +#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT) +#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1 +#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0 +#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT) +#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */ +#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0 +#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT) +#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31 +#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT) +#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1 +#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0 +#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT) +#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1 +#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT) +#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3 +#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT) +#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8 +#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT) +#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16 +#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT) +#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1 +#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0 +#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT) +#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1 +#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT) +#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_CLKO_MAX_INDEX 1 +#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0 +#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT) +#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */ +#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0 +#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT) +#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1 +#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT) +#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2 +#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT) +#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3 +#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT) +#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8 +#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT) +#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12 +#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT) +#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31 +#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT) +#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */ +#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0 +#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT) +#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8 +#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT) +#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16 +#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT) +#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20 +#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT) +#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24 +#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) +#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26 +#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT) +#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31 +#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT) +#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1 +#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0 +#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT) +#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1 +#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0 +#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT) +#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */ +#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0 +#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT) +#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */ +#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0 +#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT) +#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3 +#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0 +#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT) +#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3 +#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0 +#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT) +#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */ +#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0 +#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT) +#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1 +#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT) +#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2 +#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT) +#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3 +#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT) +#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4 +#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT) +#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */ +#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0 +#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT) +#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1 +#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT) +#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2 +#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT) +#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3 +#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT) +#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1 +#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0 +#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT) +#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1 +#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0 +#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT) +#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */ +#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0 +#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT) +#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */ +#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0 +#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT) +#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */ +#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0 +#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT) +#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ +#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 +#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) +#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ +#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 +#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) +#define I40E_GL_MDET_RX_EVENT_SHIFT 8 +#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT) +#define I40E_GL_MDET_RX_QUEUE_SHIFT 17 +#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT) +#define I40E_GL_MDET_RX_VALID_SHIFT 31 +#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT) +#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */ +#define I40E_GL_MDET_TX_QUEUE_SHIFT 0 +#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT) +#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12 +#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT) +#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21 +#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT) +#define I40E_GL_MDET_TX_EVENT_SHIFT 25 +#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT) +#define I40E_GL_MDET_TX_VALID_SHIFT 31 +#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT) +#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */ +#define I40E_PF_MDET_RX_VALID_SHIFT 0 +#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT) +#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */ +#define I40E_PF_MDET_TX_VALID_SHIFT 0 +#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT) +#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */ +#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0 +#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT) +#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8 +#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT) +#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31 +#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT) +#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VP_MDET_RX_MAX_INDEX 127 +#define I40E_VP_MDET_RX_VALID_SHIFT 0 +#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT) +#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VP_MDET_TX_MAX_INDEX 127 +#define I40E_VP_MDET_TX_VALID_SHIFT 0 +#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT) +#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */ +#define I40E_GLPM_WUMC_NOTCO_SHIFT 0 +#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT) +#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1 +#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT) +#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2 +#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT) +#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3 +#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT) +#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16 +#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT) +#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */ +#define I40E_PFPM_APM_APME_SHIFT 0 +#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT) +#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7 +#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0 +#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT) +#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */ +#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5 +#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT) +#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */ +#define I40E_PFPM_WUFC_LNKC_SHIFT 0 +#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT) +#define I40E_PFPM_WUFC_MAG_SHIFT 1 +#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT) +#define I40E_PFPM_WUFC_MNG_SHIFT 3 +#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT) +#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4 +#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5 +#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6 +#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7 +#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8 +#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9 +#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10 +#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11 +#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX0_SHIFT 16 +#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT) +#define I40E_PFPM_WUFC_FLX1_SHIFT 17 +#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT) +#define I40E_PFPM_WUFC_FLX2_SHIFT 18 +#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT) +#define I40E_PFPM_WUFC_FLX3_SHIFT 19 +#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT) +#define I40E_PFPM_WUFC_FLX4_SHIFT 20 +#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT) +#define I40E_PFPM_WUFC_FLX5_SHIFT 21 +#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT) +#define I40E_PFPM_WUFC_FLX6_SHIFT 22 +#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT) +#define I40E_PFPM_WUFC_FLX7_SHIFT 23 +#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT) +#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31 +#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT) +#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */ +#define I40E_PFPM_WUS_LNKC_SHIFT 0 +#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT) +#define I40E_PFPM_WUS_MAG_SHIFT 1 +#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT) +#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2 +#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT) +#define I40E_PFPM_WUS_MNG_SHIFT 3 +#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT) +#define I40E_PFPM_WUS_FLX0_SHIFT 16 +#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT) +#define I40E_PFPM_WUS_FLX1_SHIFT 17 +#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT) +#define I40E_PFPM_WUS_FLX2_SHIFT 18 +#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT) +#define I40E_PFPM_WUS_FLX3_SHIFT 19 +#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT) +#define I40E_PFPM_WUS_FLX4_SHIFT 20 +#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT) +#define I40E_PFPM_WUS_FLX5_SHIFT 21 +#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT) +#define I40E_PFPM_WUS_FLX6_SHIFT 22 +#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT) +#define I40E_PFPM_WUS_FLX7_SHIFT 23 +#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT) +#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31 +#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT) +#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */ +#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0 +#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT) +#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1 +#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT) +#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ +#define I40E_PRTPM_SAH_MAX_INDEX 3 +#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0 +#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT) +#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26 +#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT) +#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30 +#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT) +#define I40E_PRTPM_SAH_AV_SHIFT 31 +#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT) +#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ +#define I40E_PRTPM_SAL_MAX_INDEX 3 +#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0 +#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT) +#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ +#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0 +#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT) +#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ +#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0 +#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT) +#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */ +#define I40E_VF_ARQH1_ARQH_SHIFT 0 +#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT) +#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ +#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0 +#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT) +#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28 +#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT) +#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29 +#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT) +#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 +#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) +#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 +#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) +#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ +#define I40E_VF_ARQT1_ARQT_SHIFT 0 +#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) +#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ +#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0 +#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT) +#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ +#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0 +#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT) +#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */ +#define I40E_VF_ATQH1_ATQH_SHIFT 0 +#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT) +#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ +#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0 +#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT) +#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28 +#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT) +#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29 +#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT) +#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 +#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) +#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 +#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) +#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ +#define I40E_VF_ATQT1_ATQT_SHIFT 0 +#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) +#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */ +#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0 +#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT) +#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ +#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT) +#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ +#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15 +#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT) +#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */ +#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31 +#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT) +#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */ +#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0 +#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1 +#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2 +#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3 +#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4 +#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT) +#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT) +#define I40E_VFINT_ICR01_SWINT_SHIFT 31 +#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT) +#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */ +#define I40E_VFINT_ITR01_MAX_INDEX 2 +#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT) +#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ +#define I40E_VFINT_ITRN1_MAX_INDEX 2 +#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) +#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ +#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 +#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) +#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_QRX_TAIL1_MAX_INDEX 15 +#define I40E_QRX_TAIL1_TAIL_SHIFT 0 +#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT) +#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */ +#define I40E_QTX_TAIL1_MAX_INDEX 15 +#define I40E_QTX_TAIL1_TAIL_SHIFT 0 +#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT) +#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */ +#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0 +#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT) +#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TADD_MAX_INDEX 16 +#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0 +#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT) +#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2 +#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT) +#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TMSG_MAX_INDEX 16 +#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0 +#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT) +#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TUADD_MAX_INDEX 16 +#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0 +#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT) +#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16 +#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0 +#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT) +#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */ +#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 +#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT) +#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 +#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT) +#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8 +#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT) +#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */ +#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 +#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT) +#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 +#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT) +#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 +#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 +#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 +#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) +#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_VFQF_HENA_MAX_INDEX 1 +#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0 +#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT) +#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ +#define I40E_VFQF_HKEY_MAX_INDEX 12 +#define I40E_VFQF_HKEY_KEY_0_SHIFT 0 +#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT) +#define I40E_VFQF_HKEY_KEY_1_SHIFT 8 +#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT) +#define I40E_VFQF_HKEY_KEY_2_SHIFT 16 +#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT) +#define I40E_VFQF_HKEY_KEY_3_SHIFT 24 +#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT) +#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_VFQF_HLUT_MAX_INDEX 15 +#define I40E_VFQF_HLUT_LUT0_SHIFT 0 +#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT) +#define I40E_VFQF_HLUT_LUT1_SHIFT 8 +#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT) +#define I40E_VFQF_HLUT_LUT2_SHIFT 16 +#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT) +#define I40E_VFQF_HLUT_LUT3_SHIFT 24 +#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT) +#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_VFQF_HREGION_MAX_INDEX 7 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT) +#define I40E_VFQF_HREGION_REGION_0_SHIFT 1 +#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT) +#define I40E_VFQF_HREGION_REGION_1_SHIFT 5 +#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT) +#define I40E_VFQF_HREGION_REGION_2_SHIFT 9 +#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT) +#define I40E_VFQF_HREGION_REGION_3_SHIFT 13 +#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT) +#define I40E_VFQF_HREGION_REGION_4_SHIFT 17 +#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT) +#define I40E_VFQF_HREGION_REGION_5_SHIFT 21 +#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT) +#define I40E_VFQF_HREGION_REGION_6_SHIFT 25 +#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) +#define I40E_VFQF_HREGION_REGION_7_SHIFT 29 +#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) +#endif diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h new file mode 100644 index 000000000..5f9cac55a --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_status.h @@ -0,0 +1,100 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_STATUS_H_ +#define _I40E_STATUS_H_ + +/* Error Codes */ +enum i40e_status_code { + I40E_SUCCESS = 0, + I40E_ERR_NVM = -1, + I40E_ERR_NVM_CHECKSUM = -2, + I40E_ERR_PHY = -3, + I40E_ERR_CONFIG = -4, + I40E_ERR_PARAM = -5, + I40E_ERR_MAC_TYPE = -6, + I40E_ERR_UNKNOWN_PHY = -7, + I40E_ERR_LINK_SETUP = -8, + I40E_ERR_ADAPTER_STOPPED = -9, + I40E_ERR_INVALID_MAC_ADDR = -10, + I40E_ERR_DEVICE_NOT_SUPPORTED = -11, + I40E_ERR_MASTER_REQUESTS_PENDING = -12, + I40E_ERR_INVALID_LINK_SETTINGS = -13, + I40E_ERR_AUTONEG_NOT_COMPLETE = -14, + I40E_ERR_RESET_FAILED = -15, + I40E_ERR_SWFW_SYNC = -16, + I40E_ERR_NO_AVAILABLE_VSI = -17, + I40E_ERR_NO_MEMORY = -18, + I40E_ERR_BAD_PTR = -19, + I40E_ERR_RING_FULL = -20, + I40E_ERR_INVALID_PD_ID = -21, + I40E_ERR_INVALID_QP_ID = -22, + I40E_ERR_INVALID_CQ_ID = -23, + I40E_ERR_INVALID_CEQ_ID = -24, + I40E_ERR_INVALID_AEQ_ID = -25, + I40E_ERR_INVALID_SIZE = -26, + I40E_ERR_INVALID_ARP_INDEX = -27, + I40E_ERR_INVALID_FPM_FUNC_ID = -28, + I40E_ERR_QP_INVALID_MSG_SIZE = -29, + I40E_ERR_QP_TOOMANY_WRS_POSTED = -30, + I40E_ERR_INVALID_FRAG_COUNT = -31, + I40E_ERR_QUEUE_EMPTY = -32, + I40E_ERR_INVALID_ALIGNMENT = -33, + I40E_ERR_FLUSHED_QUEUE = -34, + I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35, + I40E_ERR_INVALID_IMM_DATA_SIZE = -36, + I40E_ERR_TIMEOUT = -37, + I40E_ERR_OPCODE_MISMATCH = -38, + I40E_ERR_CQP_COMPL_ERROR = -39, + I40E_ERR_INVALID_VF_ID = -40, + I40E_ERR_INVALID_HMCFN_ID = -41, + I40E_ERR_BACKING_PAGE_ERROR = -42, + I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43, + I40E_ERR_INVALID_PBLE_INDEX = -44, + I40E_ERR_INVALID_SD_INDEX = -45, + I40E_ERR_INVALID_PAGE_DESC_INDEX = -46, + I40E_ERR_INVALID_SD_TYPE = -47, + I40E_ERR_MEMCPY_FAILED = -48, + I40E_ERR_INVALID_HMC_OBJ_INDEX = -49, + I40E_ERR_INVALID_HMC_OBJ_COUNT = -50, + I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51, + I40E_ERR_SRQ_ENABLED = -52, + I40E_ERR_ADMIN_QUEUE_ERROR = -53, + I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54, + I40E_ERR_BUF_TOO_SHORT = -55, + I40E_ERR_ADMIN_QUEUE_FULL = -56, + I40E_ERR_ADMIN_QUEUE_NO_WORK = -57, + I40E_ERR_BAD_IWARP_CQE = -58, + I40E_ERR_NVM_BLANK_MODE = -59, + I40E_ERR_NOT_IMPLEMENTED = -60, + I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61, + I40E_ERR_DIAG_TEST_FAILED = -62, + I40E_ERR_NOT_READY = -63, + I40E_NOT_SUPPORTED = -64, + I40E_ERR_FIRMWARE_API_VERSION = -65, +}; + +#endif /* _I40E_STATUS_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c new file mode 100644 index 000000000..9d95042d5 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -0,0 +1,2778 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include +#include +#include "i40e.h" +#include "i40e_prototype.h" + +static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, + u32 td_tag) +{ + return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA | + ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | + ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | + ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | + ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); +} + +#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) +#define I40E_FD_CLEAN_DELAY 10 +/** + * i40e_program_fdir_filter - Program a Flow Director filter + * @fdir_data: Packet data that will be filter parameters + * @raw_packet: the pre-allocated packet buffer for FDir + * @pf: The PF pointer + * @add: True for add/update, False for remove + **/ +int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, + struct i40e_pf *pf, bool add) +{ + struct i40e_filter_program_desc *fdir_desc; + struct i40e_tx_buffer *tx_buf, *first; + struct i40e_tx_desc *tx_desc; + struct i40e_ring *tx_ring; + unsigned int fpt, dcc; + struct i40e_vsi *vsi; + struct device *dev; + dma_addr_t dma; + u32 td_cmd = 0; + u16 delay = 0; + u16 i; + + /* find existing FDIR VSI */ + vsi = NULL; + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) + vsi = pf->vsi[i]; + if (!vsi) + return -ENOENT; + + tx_ring = vsi->tx_rings[0]; + dev = tx_ring->dev; + + /* we need two descriptors to add/del a filter and we can wait */ + do { + if (I40E_DESC_UNUSED(tx_ring) > 1) + break; + msleep_interruptible(1); + delay++; + } while (delay < I40E_FD_CLEAN_DELAY); + + if (!(I40E_DESC_UNUSED(tx_ring) > 1)) + return -EAGAIN; + + dma = dma_map_single(dev, raw_packet, + I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma)) + goto dma_fail; + + /* grab the next descriptor */ + i = tx_ring->next_to_use; + fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); + first = &tx_ring->tx_bi[i]; + memset(first, 0, sizeof(struct i40e_tx_buffer)); + + tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0; + + fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & + I40E_TXD_FLTR_QW0_QINDEX_MASK; + + fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) & + I40E_TXD_FLTR_QW0_FLEXOFF_MASK; + + fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) & + I40E_TXD_FLTR_QW0_PCTYPE_MASK; + + /* Use LAN VSI Id if not programmed by user */ + if (fdir_data->dest_vsi == 0) + fpt |= (pf->vsi[pf->lan_vsi]->id) << + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; + else + fpt |= ((u32)fdir_data->dest_vsi << + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) & + I40E_TXD_FLTR_QW0_DEST_VSI_MASK; + + dcc = I40E_TX_DESC_DTYPE_FILTER_PROG; + + if (add) + dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT; + else + dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT; + + dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) & + I40E_TXD_FLTR_QW1_DEST_MASK; + + dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) & + I40E_TXD_FLTR_QW1_FD_STATUS_MASK; + + if (fdir_data->cnt_index != 0) { + dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; + dcc |= ((u32)fdir_data->cnt_index << + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & + I40E_TXD_FLTR_QW1_CNTINDEX_MASK; + } + + fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt); + fdir_desc->rsvd = cpu_to_le32(0); + fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc); + fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id); + + /* Now program a dummy descriptor */ + i = tx_ring->next_to_use; + tx_desc = I40E_TX_DESC(tx_ring, i); + tx_buf = &tx_ring->tx_bi[i]; + + tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0; + + memset(tx_buf, 0, sizeof(struct i40e_tx_buffer)); + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE); + dma_unmap_addr_set(tx_buf, dma, dma); + + tx_desc->buffer_addr = cpu_to_le64(dma); + td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY; + + tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB; + tx_buf->raw_buf = (void *)raw_packet; + + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0); + + /* set the timestamp */ + tx_buf->time_stamp = jiffies; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. + */ + wmb(); + + /* Mark the data descriptor to be watched */ + first->next_to_watch = tx_desc; + + writel(tx_ring->next_to_use, tx_ring->tail); + return 0; + +dma_fail: + return -1; +} + +#define IP_HEADER_OFFSET 14 +#define I40E_UDPIP_DUMMY_PACKET_LEN 42 +/** + * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @add: true adds a filter, false removes it + * + * Returns 0 if the filters were successfully added or removed + **/ +static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, + struct i40e_fdir_filter *fd_data, + bool add) +{ + struct i40e_pf *pf = vsi->back; + struct udphdr *udp; + struct iphdr *ip; + bool err = false; + u8 *raw_packet; + int ret; + static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, + 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + + raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); + if (!raw_packet) + return -ENOMEM; + memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN); + + ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); + udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET + + sizeof(struct iphdr)); + + ip->daddr = fd_data->dst_ip[0]; + udp->dest = fd_data->dst_port; + ip->saddr = fd_data->src_ip[0]; + udp->source = fd_data->src_port; + + fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); + if (ret) { + dev_info(&pf->pdev->dev, + "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", + fd_data->pctype, fd_data->fd_id, ret); + err = true; + } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { + if (add) + dev_info(&pf->pdev->dev, + "Filter OK for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); + else + dev_info(&pf->pdev->dev, + "Filter deleted for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); + } + return err ? -EOPNOTSUPP : 0; +} + +#define I40E_TCPIP_DUMMY_PACKET_LEN 54 +/** + * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @add: true adds a filter, false removes it + * + * Returns 0 if the filters were successfully added or removed + **/ +static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, + struct i40e_fdir_filter *fd_data, + bool add) +{ + struct i40e_pf *pf = vsi->back; + struct tcphdr *tcp; + struct iphdr *ip; + bool err = false; + u8 *raw_packet; + int ret; + /* Dummy packet */ + static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, + 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11, + 0x0, 0x72, 0, 0, 0, 0}; + + raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); + if (!raw_packet) + return -ENOMEM; + memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN); + + ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); + tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET + + sizeof(struct iphdr)); + + ip->daddr = fd_data->dst_ip[0]; + tcp->dest = fd_data->dst_port; + ip->saddr = fd_data->src_ip[0]; + tcp->source = fd_data->src_port; + + if (add) { + pf->fd_tcp_rule++; + if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) { + dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); + pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; + } + } else { + pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ? + (pf->fd_tcp_rule - 1) : 0; + if (pf->fd_tcp_rule == 0) { + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n"); + } + } + + fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); + + if (ret) { + dev_info(&pf->pdev->dev, + "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", + fd_data->pctype, fd_data->fd_id, ret); + err = true; + } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { + if (add) + dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n", + fd_data->pctype, fd_data->fd_id); + else + dev_info(&pf->pdev->dev, + "Filter deleted for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); + } + + return err ? -EOPNOTSUPP : 0; +} + +/** + * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for + * a specific flow spec + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @add: true adds a filter, false removes it + * + * Always returns -EOPNOTSUPP + **/ +static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi, + struct i40e_fdir_filter *fd_data, + bool add) +{ + return -EOPNOTSUPP; +} + +#define I40E_IP_DUMMY_PACKET_LEN 34 +/** + * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for + * a specific flow spec + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @add: true adds a filter, false removes it + * + * Returns 0 if the filters were successfully added or removed + **/ +static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, + struct i40e_fdir_filter *fd_data, + bool add) +{ + struct i40e_pf *pf = vsi->back; + struct iphdr *ip; + bool err = false; + u8 *raw_packet; + int ret; + int i; + static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, + 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0}; + + for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; + i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) { + raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); + if (!raw_packet) + return -ENOMEM; + memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN); + ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); + + ip->saddr = fd_data->src_ip[0]; + ip->daddr = fd_data->dst_ip[0]; + ip->protocol = 0; + + fd_data->pctype = i; + ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); + + if (ret) { + dev_info(&pf->pdev->dev, + "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", + fd_data->pctype, fd_data->fd_id, ret); + err = true; + } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { + if (add) + dev_info(&pf->pdev->dev, + "Filter OK for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); + else + dev_info(&pf->pdev->dev, + "Filter deleted for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); + } + } + + return err ? -EOPNOTSUPP : 0; +} + +/** + * i40e_add_del_fdir - Build raw packets to add/del fdir filter + * @vsi: pointer to the targeted VSI + * @cmd: command to get or set RX flow classification rules + * @add: true adds a filter, false removes it + * + **/ +int i40e_add_del_fdir(struct i40e_vsi *vsi, + struct i40e_fdir_filter *input, bool add) +{ + struct i40e_pf *pf = vsi->back; + int ret; + + switch (input->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + ret = i40e_add_del_fdir_tcpv4(vsi, input, add); + break; + case UDP_V4_FLOW: + ret = i40e_add_del_fdir_udpv4(vsi, input, add); + break; + case SCTP_V4_FLOW: + ret = i40e_add_del_fdir_sctpv4(vsi, input, add); + break; + case IPV4_FLOW: + ret = i40e_add_del_fdir_ipv4(vsi, input, add); + break; + case IP_USER_FLOW: + switch (input->ip4_proto) { + case IPPROTO_TCP: + ret = i40e_add_del_fdir_tcpv4(vsi, input, add); + break; + case IPPROTO_UDP: + ret = i40e_add_del_fdir_udpv4(vsi, input, add); + break; + case IPPROTO_SCTP: + ret = i40e_add_del_fdir_sctpv4(vsi, input, add); + break; + default: + ret = i40e_add_del_fdir_ipv4(vsi, input, add); + break; + } + break; + default: + dev_info(&pf->pdev->dev, "Could not specify spec type %d\n", + input->flow_type); + ret = -EINVAL; + } + + /* The buffer allocated here is freed by the i40e_clean_tx_ring() */ + return ret; +} + +/** + * i40e_fd_handle_status - check the Programming Status for FD + * @rx_ring: the Rx ring for this descriptor + * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor. + * @prog_id: the id originally used for programming + * + * This is used to verify if the FD programming or invalidation + * requested by SW to the HW is successful or not and take actions accordingly. + **/ +static void i40e_fd_handle_status(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, u8 prog_id) +{ + struct i40e_pf *pf = rx_ring->vsi->back; + struct pci_dev *pdev = pf->pdev; + u32 fcnt_prog, fcnt_avail; + u32 error; + u64 qw; + + qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> + I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; + + if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { + if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) || + (I40E_DEBUG_FD & pf->hw.debug_mask)) + dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", + rx_desc->wb.qword0.hi_dword.fd_id); + + /* Check if the programming error is for ATR. + * If so, auto disable ATR and set a state for + * flush in progress. Next time we come here if flush is in + * progress do nothing, once flush is complete the state will + * be cleared. + */ + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + return; + + pf->fd_add_err++; + /* store the current atr filter count */ + pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); + + if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) && + (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { + pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; + set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + } + + /* filter programming failed most likely due to table full */ + fcnt_prog = i40e_get_global_fd_count(pf); + fcnt_avail = pf->fdir_pf_filter_count; + /* If ATR is running fcnt_prog can quickly change, + * if we are very close to full, it makes sense to disable + * FD ATR/SB and then re-enable it when there is room. + */ + if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { + if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && + !(pf->auto_disable_flags & + I40E_FLAG_FD_SB_ENABLED)) { + dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); + pf->auto_disable_flags |= + I40E_FLAG_FD_SB_ENABLED; + } + } else { + dev_info(&pdev->dev, + "FD filter programming failed due to incorrect filter parameters\n"); + } + } else if (error == + (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n", + rx_desc->wb.qword0.hi_dword.fd_id); + } +} + +/** + * i40e_unmap_and_free_tx_resource - Release a Tx buffer + * @ring: the ring that owns the buffer + * @tx_buffer: the buffer to free + **/ +static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, + struct i40e_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) + kfree(tx_buffer->raw_buf); + else + dev_kfree_skb_any(tx_buffer->skb); + + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ +} + +/** + * i40e_clean_tx_ring - Free any empty Tx buffers + * @tx_ring: ring to be cleaned + **/ +void i40e_clean_tx_ring(struct i40e_ring *tx_ring) +{ + unsigned long bi_size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_bi) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) + i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); + + bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_bi, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + if (!tx_ring->netdev) + return; + + /* cleanup Tx queue statistics */ + netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index)); +} + +/** + * i40e_free_tx_resources - Free Tx resources per queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void i40e_free_tx_resources(struct i40e_ring *tx_ring) +{ + i40e_clean_tx_ring(tx_ring); + kfree(tx_ring->tx_bi); + tx_ring->tx_bi = NULL; + + if (tx_ring->desc) { + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + } +} + +/** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} + +/** + * i40e_get_tx_pending - how many tx descriptors not processed + * @tx_ring: the ring of descriptors + * + * Since there is no access to the ring head register + * in XL710, we need to use our local copies + **/ +static u32 i40e_get_tx_pending(struct i40e_ring *ring) +{ + u32 head, tail; + + head = i40e_get_head(ring); + tail = readl(ring->tail); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; +} + +/** + * i40e_check_tx_hang - Is there a hang in the Tx queue + * @tx_ring: the ring of descriptors + **/ +static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) +{ + u32 tx_done = tx_ring->stats.packets; + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; + u32 tx_pending = i40e_get_tx_pending(tx_ring); + struct i40e_pf *pf = tx_ring->vsi->back; + bool ret = false; + + clear_check_for_tx_hang(tx_ring); + + /* Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. The + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with + * PFC clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if ((tx_done_old == tx_done) && tx_pending) { + /* make sure it is true for two checks in a row */ + ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, + &tx_ring->state); + } else if (tx_done_old == tx_done && + (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) { + if (I40E_DEBUG_FLOW & pf->hw.debug_mask) + dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d", + tx_pending, tx_ring->queue_index); + pf->tx_sluggish_count++; + } else { + /* update completed stats and disarm the hang check */ + tx_ring->tx_stats.tx_done_old = tx_done; + clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); + } + + return ret; +} + +#define WB_STRIDE 0x3 + +/** + * i40e_clean_tx_irq - Reclaim resources after transmit completes + * @tx_ring: tx ring to clean + * @budget: how many cleans we're allowed + * + * Returns true if there's any budget left (e.g. the clean is finished) + **/ +static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) +{ + u16 i = tx_ring->next_to_clean; + struct i40e_tx_buffer *tx_buf; + struct i40e_tx_desc *tx_head; + struct i40e_tx_desc *tx_desc; + unsigned int total_packets = 0; + unsigned int total_bytes = 0; + + tx_buf = &tx_ring->tx_bi[i]; + tx_desc = I40E_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); + + do { + struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + + /* we have caught up to head, no work left to do */ + if (tx_head == tx_desc) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buf->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buf->bytecount; + total_packets += tx_buf->gso_segs; + + /* free the skb */ + dev_consume_skb_any(tx_buf->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buf->skb = NULL; + dma_unmap_len_set(tx_buf, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buf = tx_ring->tx_bi; + tx_desc = I40E_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buf = tx_ring->tx_bi; + tx_desc = I40E_TX_DESC(tx_ring, 0); + } + + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + tx_ring->q_vector->tx.total_bytes += total_bytes; + tx_ring->q_vector->tx.total_packets += total_packets; + + /* check to see if there are any non-cache aligned descriptors + * waiting to be written back, and kick the hardware to force + * them to be written back in case of napi polling + */ + if (budget && + !((i & WB_STRIDE) == WB_STRIDE) && + !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && + (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) + tx_ring->arm_wb = true; + else + tx_ring->arm_wb = false; + + if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { + /* schedule immediate reset if we believe we hung */ + dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" + " VSI <%d>\n" + " Tx Queue <%d>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n", + tx_ring->vsi->seid, + tx_ring->queue_index, + tx_ring->next_to_use, i); + dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", + tx_ring->tx_bi[i].time_stamp, jiffies); + + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + dev_info(tx_ring->dev, + "tx hang detected on queue %d, reset requested\n", + tx_ring->queue_index); + + /* do not fire the reset immediately, wait for the stack to + * decide we are truly stuck, also prevents every queue from + * simultaneously requesting a reset + */ + + /* the adapter is about to reset, no point in enabling polling */ + budget = 1; + } + + netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index), + total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + + return !!budget; +} + +/** + * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors + * @vsi: the VSI we care about + * @q_vector: the vector on which to force writeback + * + **/ +static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +{ + u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ + I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; + /* allow 00 to be written to the index */ + + wr32(&vsi->back->hw, + I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1), + val); +} + +/** + * i40e_set_new_dynamic_itr - Find new ITR level + * @rc: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte counts during + * the last interrupt. The advantage of per interrupt computation + * is faster updates and more accurate ITR for the current traffic + * pattern. Constants in this function were computed based on + * theoretical maximum wire speed and thresholds were set based on + * testing data as well as attempting to minimize response time + * while increasing bulk throughput. + **/ +static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) +{ + enum i40e_latency_range new_latency_range = rc->latency_range; + u32 new_itr = rc->itr; + int bytes_per_int; + + if (rc->total_packets == 0 || !rc->itr) + return; + + /* simple throttlerate management + * 0-10MB/s lowest (100000 ints/s) + * 10-20MB/s low (20000 ints/s) + * 20-1249MB/s bulk (8000 ints/s) + */ + bytes_per_int = rc->total_bytes / rc->itr; + switch (rc->itr) { + case I40E_LOWEST_LATENCY: + if (bytes_per_int > 10) + new_latency_range = I40E_LOW_LATENCY; + break; + case I40E_LOW_LATENCY: + if (bytes_per_int > 20) + new_latency_range = I40E_BULK_LATENCY; + else if (bytes_per_int <= 10) + new_latency_range = I40E_LOWEST_LATENCY; + break; + case I40E_BULK_LATENCY: + if (bytes_per_int <= 20) + rc->latency_range = I40E_LOW_LATENCY; + break; + } + + switch (new_latency_range) { + case I40E_LOWEST_LATENCY: + new_itr = I40E_ITR_100K; + break; + case I40E_LOW_LATENCY: + new_itr = I40E_ITR_20K; + break; + case I40E_BULK_LATENCY: + new_itr = I40E_ITR_8K; + break; + default: + break; + } + + if (new_itr != rc->itr) { + /* do an exponential smoothing */ + new_itr = (10 * new_itr * rc->itr) / + ((9 * new_itr) + rc->itr); + rc->itr = new_itr & I40E_MAX_ITR; + } + + rc->total_bytes = 0; + rc->total_packets = 0; +} + +/** + * i40e_update_dynamic_itr - Adjust ITR based on bytes per int + * @q_vector: the vector to adjust + **/ +static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector) +{ + u16 vector = q_vector->vsi->base_vector + q_vector->v_idx; + struct i40e_hw *hw = &q_vector->vsi->back->hw; + u32 reg_addr; + u16 old_itr; + + reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1); + old_itr = q_vector->rx.itr; + i40e_set_new_dynamic_itr(&q_vector->rx); + if (old_itr != q_vector->rx.itr) + wr32(hw, reg_addr, q_vector->rx.itr); + + reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1); + old_itr = q_vector->tx.itr; + i40e_set_new_dynamic_itr(&q_vector->tx); + if (old_itr != q_vector->tx.itr) + wr32(hw, reg_addr, q_vector->tx.itr); +} + +/** + * i40e_clean_programming_status - clean the programming status descriptor + * @rx_ring: the rx ring that has this descriptor + * @rx_desc: the rx descriptor written back by HW + * + * Flow director should handle FD_FILTER_STATUS to check its filter programming + * status being successful or not and take actions accordingly. FCoE should + * handle its context/filter programming/invalidation status and take actions. + * + **/ +static void i40e_clean_programming_status(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc) +{ + u64 qw; + u8 id; + + qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> + I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; + + if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) + i40e_fd_handle_status(rx_ring, rx_desc, id); +#ifdef I40E_FCOE + else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) || + (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS)) + i40e_fcoe_handle_status(rx_ring, rx_desc, id); +#endif +} + +/** + * i40e_setup_tx_descriptors - Allocate the Tx descriptors + * @tx_ring: the tx ring to set up + * + * Return 0 on success, negative on error + **/ +int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int bi_size; + + if (!dev) + return -ENOMEM; + + bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; + tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); + if (!tx_ring->tx_bi) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); + /* add u32 for head writeback, align after this takes care of + * guaranteeing this is at least one cache line in size + */ + tx_ring->size += sizeof(u32); + tx_ring->size = ALIGN(tx_ring->size, 4096); + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", + tx_ring->size); + goto err; + } + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + return 0; + +err: + kfree(tx_ring->tx_bi); + tx_ring->tx_bi = NULL; + return -ENOMEM; +} + +/** + * i40e_clean_rx_ring - Free Rx buffers + * @rx_ring: ring to be cleaned + **/ +void i40e_clean_rx_ring(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + struct i40e_rx_buffer *rx_bi; + unsigned long bi_size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_bi) + return; + + if (ring_is_ps_enabled(rx_ring)) { + int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; + + rx_bi = &rx_ring->rx_bi[0]; + if (rx_bi->hdr_buf) { + dma_free_coherent(dev, + bufsz, + rx_bi->hdr_buf, + rx_bi->dma); + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = 0; + rx_bi->hdr_buf = NULL; + } + } + } + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + if (rx_bi->dma) { + dma_unmap_single(dev, + rx_bi->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_bi->dma = 0; + } + if (rx_bi->skb) { + dev_kfree_skb(rx_bi->skb); + rx_bi->skb = NULL; + } + if (rx_bi->page) { + if (rx_bi->page_dma) { + dma_unmap_page(dev, + rx_bi->page_dma, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + rx_bi->page_dma = 0; + } + __free_page(rx_bi->page); + rx_bi->page = NULL; + rx_bi->page_offset = 0; + } + } + + bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_bi, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * i40e_free_rx_resources - Free Rx resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void i40e_free_rx_resources(struct i40e_ring *rx_ring) +{ + i40e_clean_rx_ring(rx_ring); + kfree(rx_ring->rx_bi); + rx_ring->rx_bi = NULL; + + if (rx_ring->desc) { + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + rx_ring->desc = NULL; + } +} + +/** + * i40e_alloc_rx_headers - allocate rx header buffers + * @rx_ring: ring to alloc buffers + * + * Allocate rx header buffers for the entire ring. As these are static, + * this is only called when setting up a new ring. + **/ +void i40e_alloc_rx_headers(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + struct i40e_rx_buffer *rx_bi; + dma_addr_t dma; + void *buffer; + int buf_size; + int i; + + if (rx_ring->rx_bi[0].hdr_buf) + return; + /* Make sure the buffers don't cross cache line boundaries. */ + buf_size = ALIGN(rx_ring->rx_hdr_len, 256); + buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, + &dma, GFP_KERNEL); + if (!buffer) + return; + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = dma + (i * buf_size); + rx_bi->hdr_buf = buffer + (i * buf_size); + } +} + +/** + * i40e_setup_rx_descriptors - Allocate Rx descriptors + * @rx_ring: Rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int bi_size; + + bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; + rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); + if (!rx_ring->rx_bi) + goto err; + + u64_stats_init(&rx_ring->syncp); + + /* Round up to nearest 4K */ + rx_ring->size = ring_is_16byte_desc_enabled(rx_ring) + ? rx_ring->count * sizeof(union i40e_16byte_rx_desc) + : rx_ring->count * sizeof(union i40e_32byte_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", + rx_ring->size); + goto err; + } + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; +err: + kfree(rx_ring->rx_bi); + rx_ring->rx_bi = NULL; + return -ENOMEM; +} + +/** + * i40e_release_rx_desc - Store the new tail and head values + * @rx_ring: ring to bump + * @val: new head index + **/ +static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); +} + +/** + * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +{ + u16 i = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return; + + while (cleaned_count--) { + rx_desc = I40E_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_bi[i]; + + if (bi->skb) /* desc is in use */ + goto no_buffers; + if (!bi->page) { + bi->page = alloc_page(GFP_ATOMIC); + if (!bi->page) { + rx_ring->rx_stats.alloc_page_failed++; + goto no_buffers; + } + } + + if (!bi->page_dma) { + /* use a half page if we're re-using */ + bi->page_offset ^= PAGE_SIZE / 2; + bi->page_dma = dma_map_page(rx_ring->dev, + bi->page, + bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, + bi->page_dma)) { + rx_ring->rx_stats.alloc_page_failed++; + bi->page_dma = 0; + goto no_buffers; + } + } + + dma_sync_single_range_for_device(rx_ring->dev, + bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + i++; + if (i == rx_ring->count) + i = 0; + } + +no_buffers: + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); +} + +/** + * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) +{ + u16 i = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + struct sk_buff *skb; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return; + + while (cleaned_count--) { + rx_desc = I40E_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_bi[i]; + skb = bi->skb; + + if (!skb) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len); + if (!skb) { + rx_ring->rx_stats.alloc_buff_failed++; + goto no_buffers; + } + /* initialize queue mapping */ + skb_record_rx_queue(skb, rx_ring->queue_index); + bi->skb = skb; + } + + if (!bi->dma) { + bi->dma = dma_map_single(rx_ring->dev, + skb->data, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, bi->dma)) { + rx_ring->rx_stats.alloc_buff_failed++; + bi->dma = 0; + goto no_buffers; + } + } + + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + rx_desc->read.hdr_addr = 0; + i++; + if (i == rx_ring->count) + i = 0; + } + +no_buffers: + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); +} + +/** + * i40e_receive_skb - Send a completed packet up the stack + * @rx_ring: rx ring in play + * @skb: packet to send up + * @vlan_tag: vlan tag for packet + **/ +static void i40e_receive_skb(struct i40e_ring *rx_ring, + struct sk_buff *skb, u16 vlan_tag) +{ + struct i40e_q_vector *q_vector = rx_ring->q_vector; + struct i40e_vsi *vsi = rx_ring->vsi; + u64 flags = vsi->back->flags; + + if (vlan_tag & VLAN_VID_MASK) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + + if (flags & I40E_FLAG_IN_NETPOLL) + netif_rx(skb); + else + napi_gro_receive(&q_vector->napi, skb); +} + +/** + * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum + * @vsi: the VSI we care about + * @skb: skb currently being received and modified + * @rx_status: status value of last descriptor in packet + * @rx_error: error value of last descriptor in packet + * @rx_ptype: ptype value of last descriptor in packet + **/ +static inline void i40e_rx_checksum(struct i40e_vsi *vsi, + struct sk_buff *skb, + u32 rx_status, + u32 rx_error, + u16 rx_ptype) +{ + struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); + bool ipv4 = false, ipv6 = false; + bool ipv4_tunnel, ipv6_tunnel; + __wsum rx_udp_csum; + struct iphdr *iph; + __sum16 csum; + + ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); + ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + + skb->ip_summed = CHECKSUM_NONE; + + /* Rx csum enabled and ip headers found? */ + if (!(vsi->netdev->features & NETIF_F_RXCSUM)) + return; + + /* did the hardware decode the packet and checksum? */ + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) + return; + + /* both known and outer_ip must be set for the below code to work */ + if (!(decoded.known && decoded.outer_ip)) + return; + + if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) + ipv4 = true; + else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) + ipv6 = true; + + if (ipv4 && + (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | + (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))) + goto checksum_fail; + + /* likely incorrect csum if alternate IP extension headers found */ + if (ipv6 && + rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) + /* don't increment checksum err here, non-fatal err */ + return; + + /* there was some L4 error, count error and punt packet to the stack */ + if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)) + goto checksum_fail; + + /* handle packets that were not able to be checksummed due + * to arrival speed, in this case the stack can compute + * the csum. + */ + if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT)) + return; + + /* If VXLAN traffic has an outer UDPv4 checksum we need to check + * it in the driver, hardware does not do it for us. + * Since L3L4P bit was set we assume a valid IHL value (>=5) + * so the total length of IPv4 header is IHL*4 bytes + * The UDP_0 bit *may* bet set if the *inner* header is UDP + */ + if (ipv4_tunnel) { + skb->transport_header = skb->mac_header + + sizeof(struct ethhdr) + + (ip_hdr(skb)->ihl * 4); + + /* Add 4 bytes for VLAN tagged packets */ + skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) || + skb->protocol == htons(ETH_P_8021AD)) + ? VLAN_HLEN : 0; + + if ((ip_hdr(skb)->protocol == IPPROTO_UDP) && + (udp_hdr(skb)->check != 0)) { + rx_udp_csum = udp_csum(skb); + iph = ip_hdr(skb); + csum = csum_tcpudp_magic( + iph->saddr, iph->daddr, + (skb->len - skb_transport_offset(skb)), + IPPROTO_UDP, rx_udp_csum); + + if (udp_hdr(skb)->check != csum) + goto checksum_fail; + + } /* else its GRE and so no outer UDP header */ + } + + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = ipv4_tunnel || ipv6_tunnel; + + return; + +checksum_fail: + vsi->back->hw_csum_rx_error++; +} + +/** + * i40e_rx_hash - returns the hash value from the Rx descriptor + * @ring: descriptor ring + * @rx_desc: specific descriptor + **/ +static inline u32 i40e_rx_hash(struct i40e_ring *ring, + union i40e_rx_desc *rx_desc) +{ + const __le64 rss_mask = + cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); + + if ((ring->netdev->features & NETIF_F_RXHASH) && + (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) + return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); + else + return 0; +} + +/** + * i40e_ptype_to_hash - get a hash type + * @ptype: the ptype value from the descriptor + * + * Returns a hash type to be used by skb_set_hash + **/ +static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) +{ + struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); + + if (!decoded.known) + return PKT_HASH_TYPE_NONE; + + if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) + return PKT_HASH_TYPE_L4; + else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) + return PKT_HASH_TYPE_L3; + else + return PKT_HASH_TYPE_L2; +} + +/** + * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split + * @rx_ring: rx ring to clean + * @budget: how many cleans we're allowed + * + * Returns true if there's any budget left (e.g. the clean is finished) + **/ +static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; + u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + const int current_node = numa_node_id(); + struct i40e_vsi *vsi = rx_ring->vsi; + u16 i = rx_ring->next_to_clean; + union i40e_rx_desc *rx_desc; + u32 rx_error, rx_status; + u8 rx_ptype; + u64 qword; + + if (budget <= 0) + return 0; + + do { + struct i40e_rx_buffer *rx_bi; + struct sk_buff *skb; + u16 vlan_tag; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count); + cleaned_count = 0; + } + + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + dma_rmb(); + if (i40e_rx_is_programming_status(qword)) { + i40e_clean_programming_status(rx_ring, rx_desc); + I40E_RX_INCREMENT(rx_ring, i); + continue; + } + rx_bi = &rx_ring->rx_bi[i]; + skb = rx_bi->skb; + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_hdr_len); + if (!skb) { + rx_ring->rx_stats.alloc_buff_failed++; + break; + } + + /* initialize queue mapping */ + skb_record_rx_queue(skb, rx_ring->queue_index); + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + } + rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> + I40E_RXD_QW1_LENGTH_HBUF_SHIFT; + rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >> + I40E_RXD_QW1_LENGTH_SPH_SHIFT; + + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + prefetch(rx_bi->page); + rx_bi->skb = NULL; + cleaned_count++; + if (rx_hbo || rx_sph) { + int len; + if (rx_hbo) + len = I40E_RX_HDR_SIZE; + else + len = rx_header_len; + memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); + } else if (skb->len == 0) { + int len; + + len = (rx_packet_len > skb_headlen(skb) ? + skb_headlen(skb) : rx_packet_len); + memcpy(__skb_put(skb, len), + rx_bi->page + rx_bi->page_offset, + len); + rx_bi->page_offset += len; + rx_packet_len -= len; + } + + /* Get the rest of the data if this was a header split */ + if (rx_packet_len) { + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + rx_bi->page, + rx_bi->page_offset, + rx_packet_len); + + skb->len += rx_packet_len; + skb->data_len += rx_packet_len; + skb->truesize += rx_packet_len; + + if ((page_count(rx_bi->page) == 1) && + (page_to_nid(rx_bi->page) == current_node)) + get_page(rx_bi->page); + else + rx_bi->page = NULL; + + dma_unmap_page(rx_ring->dev, + rx_bi->page_dma, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + rx_bi->page_dma = 0; + } + I40E_RX_INCREMENT(rx_ring, i); + + if (unlikely( + !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + struct i40e_rx_buffer *next_buffer; + + next_buffer = &rx_ring->rx_bi[i]; + next_buffer->skb = skb; + rx_ring->rx_stats.non_eop_descs++; + continue; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + dev_kfree_skb_any(skb); + /* TODO: shouldn't we increment a counter indicating the + * drop? + */ + continue; + } + + skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), + i40e_ptype_to_hash(rx_ptype)); + if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { + i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & + I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> + I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT); + rx_ring->last_rx_timestamp = jiffies; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + + i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + + vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) + : 0; +#ifdef I40E_FCOE + if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { + dev_kfree_skb_any(skb); + continue; + } +#endif + skb_mark_napi_id(skb, &rx_ring->q_vector->napi); + i40e_receive_skb(rx_ring, skb, vlan_tag); + + rx_ring->netdev->last_rx = jiffies; + rx_desc->wb.qword1.status_error_len = 0; + + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer + * @rx_ring: rx ring to clean + * @budget: how many cleans we're allowed + * + * Returns number of packets cleaned + **/ +static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + struct i40e_vsi *vsi = rx_ring->vsi; + union i40e_rx_desc *rx_desc; + u32 rx_error, rx_status; + u16 rx_packet_len; + u8 rx_ptype; + u64 qword; + u16 i; + + do { + struct i40e_rx_buffer *rx_bi; + struct sk_buff *skb; + u16 vlan_tag; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count); + cleaned_count = 0; + } + + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + dma_rmb(); + + if (i40e_rx_is_programming_status(qword)) { + i40e_clean_programming_status(rx_ring, rx_desc); + I40E_RX_INCREMENT(rx_ring, i); + continue; + } + rx_bi = &rx_ring->rx_bi[i]; + skb = rx_bi->skb; + prefetch(skb->data); + + rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + rx_bi->skb = NULL; + cleaned_count++; + + /* Get the header and possibly the whole packet + * If this is an skb from previous receive dma will be 0 + */ + skb_put(skb, rx_packet_len); + dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_bi->dma = 0; + + I40E_RX_INCREMENT(rx_ring, i); + + if (unlikely( + !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + rx_ring->rx_stats.non_eop_descs++; + continue; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + dev_kfree_skb_any(skb); + /* TODO: shouldn't we increment a counter indicating the + * drop? + */ + continue; + } + + skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), + i40e_ptype_to_hash(rx_ptype)); + if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { + i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & + I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> + I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT); + rx_ring->last_rx_timestamp = jiffies; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + + i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + + vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) + : 0; +#ifdef I40E_FCOE + if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { + dev_kfree_skb_any(skb); + continue; + } +#endif + i40e_receive_skb(rx_ring, skb, vlan_tag); + + rx_ring->netdev->last_rx = jiffies; + rx_desc->wb.qword1.status_error_len = 0; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean all queues associated with a q_vector. + * + * Returns the amount of work done + **/ +int i40e_napi_poll(struct napi_struct *napi, int budget) +{ + struct i40e_q_vector *q_vector = + container_of(napi, struct i40e_q_vector, napi); + struct i40e_vsi *vsi = q_vector->vsi; + struct i40e_ring *ring; + bool clean_complete = true; + bool arm_wb = false; + int budget_per_ring; + int cleaned; + + if (test_bit(__I40E_DOWN, &vsi->state)) { + napi_complete(napi); + return 0; + } + + /* Since the actual Tx work is minimal, we can give the Tx a larger + * budget and be more aggressive about cleaning up the Tx descriptors. + */ + i40e_for_each_ring(ring, q_vector->tx) { + clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); + arm_wb |= ring->arm_wb; + } + + /* We attempt to distribute budget to each Rx queue fairly, but don't + * allow the budget to go below 1 because that would exit polling early. + */ + budget_per_ring = max(budget/q_vector->num_ringpairs, 1); + + i40e_for_each_ring(ring, q_vector->rx) { + if (ring_is_ps_enabled(ring)) + cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); + else + cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + /* if we didn't clean as many as budgeted, we must be done */ + clean_complete &= (budget_per_ring != cleaned); + } + + /* If work not completed, return budget and polling will return */ + if (!clean_complete) { + if (arm_wb) + i40e_force_wb(vsi, q_vector); + return budget; + } + + /* Work is done so exit the polling mode and re-enable the interrupt */ + napi_complete(napi); + if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) || + ITR_IS_DYNAMIC(vsi->tx_itr_setting)) + i40e_update_dynamic_itr(q_vector); + + if (!test_bit(__I40E_DOWN, &vsi->state)) { + if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + i40e_irq_dynamic_enable(vsi, + q_vector->v_idx + vsi->base_vector); + } else { + struct i40e_hw *hw = &vsi->back->hw; + /* We re-enable the queue 0 cause, but + * don't worry about dynamic_enable + * because we left it on for the other + * possible interrupts during napi + */ + u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); + qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK; + wr32(hw, I40E_QINT_RQCTL(0), qval); + + qval = rd32(hw, I40E_QINT_TQCTL(0)); + qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK; + wr32(hw, I40E_QINT_TQCTL(0), qval); + + i40e_irq_dynamic_enable_icr0(vsi->back); + } + } + + return 0; +} + +/** + * i40e_atr - Add a Flow Director ATR filter + * @tx_ring: ring to add programming descriptor to + * @skb: send buffer + * @flags: send flags + * @protocol: wire protocol + **/ +static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, + u32 flags, __be16 protocol) +{ + struct i40e_filter_program_desc *fdir_desc; + struct i40e_pf *pf = tx_ring->vsi->back; + union { + unsigned char *network; + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + struct tcphdr *th; + unsigned int hlen; + u32 flex_ptype, dtype_cmd; + u16 i; + + /* make sure ATR is enabled */ + if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) + return; + + if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) + return; + + /* if sampling is disabled do nothing */ + if (!tx_ring->atr_sample_rate) + return; + + /* snag network header to get L4 type and address */ + hdr.network = skb_network_header(skb); + + /* Currently only IPv4/IPv6 with TCP is supported */ + if (protocol == htons(ETH_P_IP)) { + if (hdr.ipv4->protocol != IPPROTO_TCP) + return; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + } else if (protocol == htons(ETH_P_IPV6)) { + if (hdr.ipv6->nexthdr != IPPROTO_TCP) + return; + + hlen = sizeof(struct ipv6hdr); + } else { + return; + } + + th = (struct tcphdr *)(hdr.network + hlen); + + /* Due to lack of space, no more new filters can be programmed */ + if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) + return; + + tx_ring->atr_count++; + + /* sample on all syn/fin/rst packets or once every atr sample rate */ + if (!th->fin && + !th->syn && + !th->rst && + (tx_ring->atr_count < tx_ring->atr_sample_rate)) + return; + + tx_ring->atr_count = 0; + + /* grab the next descriptor */ + i = tx_ring->next_to_use; + fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & + I40E_TXD_FLTR_QW0_QINDEX_MASK; + flex_ptype |= (protocol == htons(ETH_P_IP)) ? + (I40E_FILTER_PCTYPE_NONF_IPV4_TCP << + I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) : + (I40E_FILTER_PCTYPE_NONF_IPV6_TCP << + I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); + + flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; + + dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG; + + dtype_cmd |= (th->fin || th->rst) ? + (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT) : + (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << + I40E_TXD_FLTR_QW1_PCMD_SHIFT); + + dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX << + I40E_TXD_FLTR_QW1_DEST_SHIFT; + + dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID << + I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; + + dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; + dtype_cmd |= + ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & + I40E_TXD_FLTR_QW1_CNTINDEX_MASK; + + fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); + fdir_desc->rsvd = cpu_to_le32(0); + fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); + fdir_desc->fd_id = cpu_to_le32(0); +} + +/** + * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW + * @skb: send buffer + * @tx_ring: ring to send buffer on + * @flags: the tx flags to be set + * + * Checks the skb and set up correspondingly several generic transmit flags + * related to VLAN tagging for the HW, such as VLAN, DCB, etc. + * + * Returns error code indicate the frame should be dropped upon error and the + * otherwise returns 0 to indicate the flags has been set properly. + **/ +#ifdef I40E_FCOE +int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, + struct i40e_ring *tx_ring, + u32 *flags) +#else +static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, + struct i40e_ring *tx_ring, + u32 *flags) +#endif +{ + __be16 protocol = skb->protocol; + u32 tx_flags = 0; + + if (protocol == htons(ETH_P_8021Q) && + !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { + /* When HW VLAN acceleration is turned off by the user the + * stack sets the protocol to 8021q so that the driver + * can take any steps required to support the SW only + * VLAN handling. In our case the driver doesn't need + * to take any further steps so just set the protocol + * to the encapsulated ethertype. + */ + skb->protocol = vlan_get_protocol(skb); + goto out; + } + + /* if we have a HW VLAN tag being added, default to the HW one */ + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; + tx_flags |= I40E_TX_FLAGS_HW_VLAN; + /* else if it is a SW VLAN, check the next protocol and store the tag */ + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + return -EINVAL; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT; + tx_flags |= I40E_TX_FLAGS_SW_VLAN; + } + + if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED)) + goto out; + + /* Insert 802.1p priority into VLAN header */ + if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) || + (skb->priority != TC_PRIO_CONTROL)) { + tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK; + tx_flags |= (skb->priority & 0x7) << + I40E_TX_FLAGS_VLAN_PRIO_SHIFT; + if (tx_flags & I40E_TX_FLAGS_SW_VLAN) { + struct vlan_ethhdr *vhdr; + int rc; + + rc = skb_cow_head(skb, 0); + if (rc < 0) + return rc; + vhdr = (struct vlan_ethhdr *)skb->data; + vhdr->h_vlan_TCI = htons(tx_flags >> + I40E_TX_FLAGS_VLAN_SHIFT); + } else { + tx_flags |= I40E_TX_FLAGS_HW_VLAN; + } + } + +out: + *flags = tx_flags; + return 0; +} + +/** + * i40e_tso - set up the tso context descriptor + * @tx_ring: ptr to the ring to send + * @skb: ptr to the skb we're sending + * @tx_flags: the collected send information + * @protocol: the send protocol + * @hdr_len: ptr to the size of the packet header + * @cd_tunneling: ptr to context descriptor bits + * + * Returns 0 if no TSO can happen, 1 if tso is going, or error + **/ +static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, + u32 tx_flags, __be16 protocol, u8 *hdr_len, + u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) +{ + u32 cd_cmd, cd_tso_len, cd_mss; + struct ipv6hdr *ipv6h; + struct tcphdr *tcph; + struct iphdr *iph; + u32 l4len; + int err; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); + ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + + if (iph->version == 4) { + tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + 0, IPPROTO_TCP, 0); + } else if (ipv6h->version == 6) { + tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); + ipv6h->payload_len = 0; + tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, + 0, IPPROTO_TCP, 0); + } + + l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); + *hdr_len = (skb->encapsulation + ? (skb_inner_transport_header(skb) - skb->data) + : skb_transport_offset(skb)) + l4len; + + /* find the field values */ + cd_cmd = I40E_TX_CTX_DESC_TSO; + cd_tso_len = skb->len - *hdr_len; + cd_mss = skb_shinfo(skb)->gso_size; + *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | + ((u64)cd_tso_len << + I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | + ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); + return 1; +} + +/** + * i40e_tsyn - set up the tsyn context descriptor + * @tx_ring: ptr to the ring to send + * @skb: ptr to the skb we're sending + * @tx_flags: the collected send information + * + * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen + **/ +static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, + u32 tx_flags, u64 *cd_type_cmd_tso_mss) +{ + struct i40e_pf *pf; + + if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) + return 0; + + /* Tx timestamps cannot be sampled when doing TSO */ + if (tx_flags & I40E_TX_FLAGS_TSO) + return 0; + + /* only timestamp the outbound packet if the user has requested it and + * we are not already transmitting a packet to be timestamped + */ + pf = i40e_netdev_to_pf(tx_ring->netdev); + if (!(pf->flags & I40E_FLAG_PTP)) + return 0; + + if (pf->ptp_tx && + !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + pf->ptp_tx_skb = skb_get(skb); + } else { + return 0; + } + + *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN << + I40E_TXD_CTX_QW1_CMD_SHIFT; + + return 1; +} + +/** + * i40e_tx_enable_csum - Enable Tx checksum offloads + * @skb: send buffer + * @tx_flags: Tx flags currently set + * @td_cmd: Tx descriptor command bits to set + * @td_offset: Tx descriptor header offsets to set + * @cd_tunneling: ptr to context desc bits + **/ +static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, + u32 *td_cmd, u32 *td_offset, + struct i40e_ring *tx_ring, + u32 *cd_tunneling) +{ + struct ipv6hdr *this_ipv6_hdr; + unsigned int this_tcp_hdrlen; + struct iphdr *this_ip_hdr; + u32 network_hdr_len; + u8 l4_hdr = 0; + u32 l4_tunnel = 0; + + if (skb->encapsulation) { + switch (ip_hdr(skb)->protocol) { + case IPPROTO_UDP: + l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + break; + default: + return; + } + network_hdr_len = skb_inner_network_header_len(skb); + this_ip_hdr = inner_ip_hdr(skb); + this_ipv6_hdr = inner_ipv6_hdr(skb); + this_tcp_hdrlen = inner_tcp_hdrlen(skb); + + if (tx_flags & I40E_TX_FLAGS_IPV4) { + + if (tx_flags & I40E_TX_FLAGS_TSO) { + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; + ip_hdr(skb)->check = 0; + } else { + *cd_tunneling |= + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + } + } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + if (tx_flags & I40E_TX_FLAGS_TSO) + ip_hdr(skb)->check = 0; + } + + /* Now set the ctx descriptor fields */ + *cd_tunneling |= (skb_network_header_len(skb) >> 2) << + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | + l4_tunnel | + ((skb_inner_network_offset(skb) - + skb_transport_offset(skb)) >> 1) << + I40E_TXD_CTX_QW0_NATLEN_SHIFT; + if (this_ip_hdr->version == 6) { + tx_flags &= ~I40E_TX_FLAGS_IPV4; + tx_flags |= I40E_TX_FLAGS_IPV6; + } + } else { + network_hdr_len = skb_network_header_len(skb); + this_ip_hdr = ip_hdr(skb); + this_ipv6_hdr = ipv6_hdr(skb); + this_tcp_hdrlen = tcp_hdrlen(skb); + } + + /* Enable IP checksum offloads */ + if (tx_flags & I40E_TX_FLAGS_IPV4) { + l4_hdr = this_ip_hdr->protocol; + /* the stack computes the IP header already, the only time we + * need the hardware to recompute it is in the case of TSO. + */ + if (tx_flags & I40E_TX_FLAGS_TSO) { + *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; + this_ip_hdr->check = 0; + } else { + *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; + } + /* Now set the td_offset for IP header length */ + *td_offset = (network_hdr_len >> 2) << + I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + l4_hdr = this_ipv6_hdr->nexthdr; + *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; + /* Now set the td_offset for IP header length */ + *td_offset = (network_hdr_len >> 2) << + I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + } + /* words in MACLEN + dwords in IPLEN + dwords in L4Len */ + *td_offset |= (skb_network_offset(skb) >> 1) << + I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + + /* Enable L4 checksum offloads */ + switch (l4_hdr) { + case IPPROTO_TCP: + /* enable checksum offloads */ + *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; + *td_offset |= (this_tcp_hdrlen >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + case IPPROTO_SCTP: + /* enable SCTP checksum offload */ + *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; + *td_offset |= (sizeof(struct sctphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + case IPPROTO_UDP: + /* enable UDP checksum offload */ + *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; + *td_offset |= (sizeof(struct udphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + default: + break; + } +} + +/** + * i40e_create_tx_ctx Build the Tx context descriptor + * @tx_ring: ring to create the descriptor on + * @cd_type_cmd_tso_mss: Quad Word 1 + * @cd_tunneling: Quad Word 0 - bits 0-31 + * @cd_l2tag2: Quad Word 0 - bits 32-63 + **/ +static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, + const u64 cd_type_cmd_tso_mss, + const u32 cd_tunneling, const u32 cd_l2tag2) +{ + struct i40e_tx_context_desc *context_desc; + int i = tx_ring->next_to_use; + + if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) && + !cd_tunneling && !cd_l2tag2) + return; + + /* grab the next descriptor */ + context_desc = I40E_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* cpu_to_le32 and assign to struct fields */ + context_desc->tunneling_params = cpu_to_le32(cd_tunneling); + context_desc->l2tag2 = cpu_to_le16(cd_l2tag2); + context_desc->rsvd = cpu_to_le16(0); + context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); +} + +/** + * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns -EBUSY if a stop is needed, else 0 + **/ +static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ + smp_mb(); + + /* Check again in a case another CPU has just made room available. */ + if (likely(I40E_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +/** + * i40e_maybe_stop_tx - 1st level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns 0 if stop is not needed + **/ +#ifdef I40E_FCOE +int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +#else +static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +#endif +{ + if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __i40e_maybe_stop_tx(tx_ring, size); +} + +/** + * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * @skb: send buffer + * @tx_flags: collected send information + * + * Note: Our HW can't scatter-gather more than 8 fragments to build + * a packet on the wire and so we need to figure out the cases where we + * need to linearize the skb. + **/ +static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) +{ + struct skb_frag_struct *frag; + bool linearize = false; + unsigned int size = 0; + u16 num_frags; + u16 gso_segs; + + num_frags = skb_shinfo(skb)->nr_frags; + gso_segs = skb_shinfo(skb)->gso_segs; + + if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { + u16 j = 0; + + if (num_frags < (I40E_MAX_BUFFER_TXD)) + goto linearize_chk_done; + /* try the simple math, if we have too many frags per segment */ + if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > + I40E_MAX_BUFFER_TXD) { + linearize = true; + goto linearize_chk_done; + } + frag = &skb_shinfo(skb)->frags[0]; + /* we might still have more fragments per segment */ + do { + size += skb_frag_size(frag); + frag++; j++; + if ((size >= skb_shinfo(skb)->gso_size) && + (j < I40E_MAX_BUFFER_TXD)) { + size = (size % skb_shinfo(skb)->gso_size); + j = (size) ? 1 : 0; + } + if (j == I40E_MAX_BUFFER_TXD) { + linearize = true; + break; + } + num_frags--; + } while (num_frags); + } else { + if (num_frags >= I40E_MAX_BUFFER_TXD) + linearize = true; + } + +linearize_chk_done: + return linearize; +} + +/** + * i40e_tx_map - Build the Tx descriptor + * @tx_ring: ring to send buffer on + * @skb: send buffer + * @first: first buffer info buffer to use + * @tx_flags: collected send information + * @hdr_len: size of the packet header + * @td_cmd: the command field in the descriptor + * @td_offset: offset for checksum or crc + **/ +#ifdef I40E_FCOE +void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, + struct i40e_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) +#else +static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, + struct i40e_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) +#endif +{ + unsigned int data_len = skb->data_len; + unsigned int size = skb_headlen(skb); + struct skb_frag_struct *frag; + struct i40e_tx_buffer *tx_bi; + struct i40e_tx_desc *tx_desc; + u16 i = tx_ring->next_to_use; + u32 td_tag = 0; + dma_addr_t dma; + u16 gso_segs; + + if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { + td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; + td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> + I40E_TX_FLAGS_VLAN_SHIFT; + } + + if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) + gso_segs = skb_shinfo(skb)->gso_segs; + else + gso_segs = 1; + + /* multiply data chunks by size of headers */ + first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len); + first->gso_segs = gso_segs; + first->skb = skb; + first->tx_flags = tx_flags; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_desc = I40E_TX_DESC(tx_ring, i); + tx_bi = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_bi, len, size); + dma_unmap_addr_set(tx_bi, dma, dma); + + tx_desc->buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, + I40E_MAX_DATA_PER_TXD, td_tag); + + tx_desc++; + i++; + if (i == tx_ring->count) { + tx_desc = I40E_TX_DESC(tx_ring, 0); + i = 0; + } + + dma += I40E_MAX_DATA_PER_TXD; + size -= I40E_MAX_DATA_PER_TXD; + + tx_desc->buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, + size, td_tag); + + tx_desc++; + i++; + if (i == tx_ring->count) { + tx_desc = I40E_TX_DESC(tx_ring, 0); + i = 0; + } + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_bi = &tx_ring->tx_bi[i]; + } + + /* Place RS bit on last descriptor of any packet that spans across the + * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. + */ + if (((i & WB_STRIDE) != WB_STRIDE) && + (first <= &tx_ring->tx_bi[i]) && + (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP << + I40E_TXD_QW1_CMD_SHIFT); + } else { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)I40E_TXD_CMD << + I40E_TXD_QW1_CMD_SHIFT); + } + + netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index), + first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); + /* notify HW of packet */ + if (!skb->xmit_more || + netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index))) + writel(i, tx_ring->tail); + + return; + +dma_error: + dev_info(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_bi map */ + for (;;) { + tx_bi = &tx_ring->tx_bi[i]; + i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); + if (tx_bi == first) + break; + if (i == 0) + i = tx_ring->count; + i--; + } + + tx_ring->next_to_use = i; +} + +/** + * i40e_xmit_descriptor_count - calculate number of tx descriptors needed + * @skb: send buffer + * @tx_ring: ring to send buffer on + * + * Returns number of data descriptors needed for this skb. Returns 0 to indicate + * there is not enough descriptors available in this ring since we need at least + * one descriptor. + **/ +#ifdef I40E_FCOE +int i40e_xmit_descriptor_count(struct sk_buff *skb, + struct i40e_ring *tx_ring) +#else +static int i40e_xmit_descriptor_count(struct sk_buff *skb, + struct i40e_ring *tx_ring) +#endif +{ + unsigned int f; + int count = 0; + + /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, + * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, + * + 4 desc gap to avoid the cache line where head is, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + + count += TXD_USE_COUNT(skb_headlen(skb)); + if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { + tx_ring->tx_stats.tx_busy++; + return 0; + } + return count; +} + +/** + * i40e_xmit_frame_ring - Sends buffer on Tx ring + * @skb: send buffer + * @tx_ring: ring to send buffer on + * + * Returns NETDEV_TX_OK if sent, else an error code + **/ +static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, + struct i40e_ring *tx_ring) +{ + u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT; + u32 cd_tunneling = 0, cd_l2tag2 = 0; + struct i40e_tx_buffer *first; + u32 td_offset = 0; + u32 tx_flags = 0; + __be16 protocol; + u32 td_cmd = 0; + u8 hdr_len = 0; + int tsyn; + int tso; + if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) + return NETDEV_TX_BUSY; + + /* prepare the xmit flags */ + if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) + goto out_drop; + + /* obtain protocol of skb */ + protocol = vlan_get_protocol(skb); + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_bi[tx_ring->next_to_use]; + + /* setup IPv4/IPv6 offloads */ + if (protocol == htons(ETH_P_IP)) + tx_flags |= I40E_TX_FLAGS_IPV4; + else if (protocol == htons(ETH_P_IPV6)) + tx_flags |= I40E_TX_FLAGS_IPV6; + + tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, + &cd_type_cmd_tso_mss, &cd_tunneling); + + if (tso < 0) + goto out_drop; + else if (tso) + tx_flags |= I40E_TX_FLAGS_TSO; + + tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss); + + if (tsyn) + tx_flags |= I40E_TX_FLAGS_TSYN; + + if (i40e_chk_linearize(skb, tx_flags)) + if (skb_linearize(skb)) + goto out_drop; + + skb_tx_timestamp(skb); + + /* always enable CRC insertion offload */ + td_cmd |= I40E_TX_DESC_CMD_ICRC; + + /* Always offload the checksum, since it's in the data descriptor */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + tx_flags |= I40E_TX_FLAGS_CSUM; + + i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, + tx_ring, &cd_tunneling); + } + + i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, + cd_tunneling, cd_l2tag2); + + /* Add Flow Director ATR if it's enabled. + * + * NOTE: this must always be directly before the data descriptor. + */ + i40e_atr(tx_ring, skb, tx_flags, protocol); + + i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, + td_cmd, td_offset); + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/** + * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer + * @skb: send buffer + * @netdev: network interface device structure + * + * Returns NETDEV_TX_OK if sent, else an error code + **/ +netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; + + /* hardware can't handle really short frames, hardware padding works + * beyond this point + */ + if (skb_put_padto(skb, I40E_MIN_TX_LEN)) + return NETDEV_TX_OK; + + return i40e_xmit_frame_ring(skb, tx_ring); +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h new file mode 100644 index 000000000..4b0b8102c --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -0,0 +1,313 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_TXRX_H_ +#define _I40E_TXRX_H_ + +/* Interrupt Throttling and Rate Limiting Goodies */ + +#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ +#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ +#define I40E_ITR_100K 0x0005 +#define I40E_ITR_20K 0x0019 +#define I40E_ITR_8K 0x003E +#define I40E_ITR_4K 0x007A +#define I40E_ITR_RX_DEF I40E_ITR_8K +#define I40E_ITR_TX_DEF I40E_ITR_4K +#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ +#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ +#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ +#define I40E_DEFAULT_IRQ_WORK 256 +#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1) +#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC)) +#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1) + +#define I40E_QUEUE_END_OF_LIST 0x7FF + +/* this enum matches hardware bits and is meant to be used by DYN_CTLN + * registers and QINT registers or more generally anywhere in the manual + * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any + * register but instead is a special value meaning "don't update" ITR0/1/2. + */ +enum i40e_dyn_idx_t { + I40E_IDX_ITR0 = 0, + I40E_IDX_ITR1 = 1, + I40E_IDX_ITR2 = 2, + I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ +}; + +/* these are indexes into ITRN registers */ +#define I40E_RX_ITR I40E_IDX_ITR0 +#define I40E_TX_ITR I40E_IDX_ITR1 +#define I40E_PE_ITR I40E_IDX_ITR2 + +/* Supported RSS offloads */ +#define I40E_DEFAULT_RSS_HENA ( \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \ + ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD)) + +/* Supported Rx Buffer Sizes */ +#define I40E_RXBUFFER_512 512 /* Used for packet split */ +#define I40E_RXBUFFER_2048 2048 +#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */ +#define I40E_RXBUFFER_4096 4096 +#define I40E_RXBUFFER_8192 8192 +#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */ + +/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we + * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, + * this adds up to 512 bytes of extra data meaning the smallest allocation + * we could have is 1K. + * i.e. RXBUFFER_512 --> size-1024 slab + */ +#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512 + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define I40E_RX_INCREMENT(r, i) \ + do { \ + (i)++; \ + if ((i) == (r)->count) \ + i = 0; \ + r->next_to_clean = i; \ + } while (0) + +#define I40E_RX_NEXT_DESC(r, i, n) \ + do { \ + (i)++; \ + if ((i) == (r)->count) \ + i = 0; \ + (n) = I40E_RX_DESC((r), (i)); \ + } while (0) + +#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \ + do { \ + I40E_RX_NEXT_DESC((r), (i), (n)); \ + prefetch((n)); \ + } while (0) + +#define i40e_rx_desc i40e_32byte_rx_desc + +#define I40E_MAX_BUFFER_TXD 8 +#define I40E_MIN_TX_LEN 17 +#define I40E_MAX_DATA_PER_TXD 8192 + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#define I40E_MIN_DESC_PENDING 4 + +#define I40E_TX_FLAGS_CSUM (u32)(1) +#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) +#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2) +#define I40E_TX_FLAGS_TSO (u32)(1 << 3) +#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4) +#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5) +#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) +#define I40E_TX_FLAGS_FSO (u32)(1 << 7) +#define I40E_TX_FLAGS_TSYN (u32)(1 << 8) +#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) +#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 +#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 +#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 +#define I40E_TX_FLAGS_VLAN_SHIFT 16 + +struct i40e_tx_buffer { + struct i40e_tx_desc *next_to_watch; + unsigned long time_stamp; + union { + struct sk_buff *skb; + void *raw_buf; + }; + unsigned int bytecount; + unsigned short gso_segs; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct i40e_rx_buffer { + struct sk_buff *skb; + void *hdr_buf; + dma_addr_t dma; + struct page *page; + dma_addr_t page_dma; + unsigned int page_offset; +}; + +struct i40e_queue_stats { + u64 packets; + u64 bytes; +}; + +struct i40e_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct i40e_rx_queue_stats { + u64 non_eop_descs; + u64 alloc_page_failed; + u64 alloc_buff_failed; +}; + +enum i40e_ring_state_t { + __I40E_TX_FDIR_INIT_DONE, + __I40E_TX_XPS_INIT_DONE, + __I40E_TX_DETECT_HANG, + __I40E_HANG_CHECK_ARMED, + __I40E_RX_PS_ENABLED, + __I40E_RX_16BYTE_DESC_ENABLED, +}; + +#define ring_is_ps_enabled(ring) \ + test_bit(__I40E_RX_PS_ENABLED, &(ring)->state) +#define set_ring_ps_enabled(ring) \ + set_bit(__I40E_RX_PS_ENABLED, &(ring)->state) +#define clear_ring_ps_enabled(ring) \ + clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state) +#define check_for_tx_hang(ring) \ + test_bit(__I40E_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__I40E_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state) +#define ring_is_16byte_desc_enabled(ring) \ + test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) +#define set_ring_16byte_desc_enabled(ring) \ + set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) +#define clear_ring_16byte_desc_enabled(ring) \ + clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) + +/* struct that defines a descriptor ring, associated with a VSI */ +struct i40e_ring { + struct i40e_ring *next; /* pointer to next ring in q_vector */ + void *desc; /* Descriptor ring memory */ + struct device *dev; /* Used for DMA mapping */ + struct net_device *netdev; /* netdev ring maps to */ + union { + struct i40e_tx_buffer *tx_bi; + struct i40e_rx_buffer *rx_bi; + }; + unsigned long state; + u16 queue_index; /* Queue number of ring */ + u8 dcb_tc; /* Traffic class of ring */ + u8 __iomem *tail; + + u16 count; /* Number of descriptors */ + u16 reg_idx; /* HW register index of the ring */ + u16 rx_hdr_len; + u16 rx_buf_len; + u8 dtype; +#define I40E_RX_DTYPE_NO_SPLIT 0 +#define I40E_RX_DTYPE_HEADER_SPLIT 1 +#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 + u8 hsplit; +#define I40E_RX_SPLIT_L2 0x1 +#define I40E_RX_SPLIT_IP 0x2 +#define I40E_RX_SPLIT_TCP_UDP 0x4 +#define I40E_RX_SPLIT_SCTP 0x8 + + /* used in interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + + u8 atr_sample_rate; + u8 atr_count; + + unsigned long last_rx_timestamp; + + bool ring_active; /* is ring online or not */ + bool arm_wb; /* do something to arm write back */ + + /* stats structs */ + struct i40e_queue_stats stats; + struct u64_stats_sync syncp; + union { + struct i40e_tx_queue_stats tx_stats; + struct i40e_rx_queue_stats rx_stats; + }; + + unsigned int size; /* length of descriptor ring in bytes */ + dma_addr_t dma; /* physical address of ring */ + + struct i40e_vsi *vsi; /* Backreference to associated VSI */ + struct i40e_q_vector *q_vector; /* Backreference to associated vector */ + + struct rcu_head rcu; /* to avoid race on free */ +} ____cacheline_internodealigned_in_smp; + +enum i40e_latency_range { + I40E_LOWEST_LATENCY = 0, + I40E_LOW_LATENCY = 1, + I40E_BULK_LATENCY = 2, +}; + +struct i40e_ring_container { + /* array of pointers to rings */ + struct i40e_ring *ring; + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 count; + enum i40e_latency_range latency_range; + u16 itr; +}; + +/* iterator for handling rings in ring container */ +#define i40e_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + +void i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); +void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); +void i40e_alloc_rx_headers(struct i40e_ring *rxr); +netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +void i40e_clean_tx_ring(struct i40e_ring *tx_ring); +void i40e_clean_rx_ring(struct i40e_ring *rx_ring); +int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring); +int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring); +void i40e_free_tx_resources(struct i40e_ring *tx_ring); +void i40e_free_rx_resources(struct i40e_ring *rx_ring); +int i40e_napi_poll(struct napi_struct *napi, int budget); +#ifdef I40E_FCOE +void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, + struct i40e_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset); +int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); +int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring); +int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, + struct i40e_ring *tx_ring, u32 *flags); +#endif +#endif /* _I40E_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h new file mode 100644 index 000000000..568e855da --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -0,0 +1,1421 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_TYPE_H_ +#define _I40E_TYPE_H_ + +#include "i40e_status.h" +#include "i40e_osdep.h" +#include "i40e_register.h" +#include "i40e_adminq.h" +#include "i40e_hmc.h" +#include "i40e_lan_hmc.h" + +/* Device IDs */ +#define I40E_DEV_ID_SFP_XL710 0x1572 +#define I40E_DEV_ID_QEMU 0x1574 +#define I40E_DEV_ID_KX_A 0x157F +#define I40E_DEV_ID_KX_B 0x1580 +#define I40E_DEV_ID_KX_C 0x1581 +#define I40E_DEV_ID_QSFP_A 0x1583 +#define I40E_DEV_ID_QSFP_B 0x1584 +#define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_10G_BASE_T 0x1586 +#define I40E_DEV_ID_20G_KR2 0x1587 +#define I40E_DEV_ID_VF 0x154C +#define I40E_DEV_ID_VF_HV 0x1571 + +#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ + (d) == I40E_DEV_ID_QSFP_B || \ + (d) == I40E_DEV_ID_QSFP_C) + +/* I40E_MASK is a macro used on 32 bit registers */ +#define I40E_MASK(mask, shift) (mask << shift) + +#define I40E_MAX_VSI_QP 16 +#define I40E_MAX_VF_VSI 3 +#define I40E_MAX_CHAINED_RX_BUFFERS 5 +#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16 + +/* Max default timeout in ms, */ +#define I40E_MAX_NVM_TIMEOUT 18000 + +/* Switch from ms to the 1usec global time (this is the GTIME resolution) */ +#define I40E_MS_TO_GTIME(time) ((time) * 1000) + +/* forward declaration */ +struct i40e_hw; +typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); + +/* Data type manipulation macros. */ + +#define I40E_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +/* bitfields for Tx queue mapping in QTX_CTL */ +#define I40E_QTX_CTL_VF_QUEUE 0x0 +#define I40E_QTX_CTL_VM_QUEUE 0x1 +#define I40E_QTX_CTL_PF_QUEUE 0x2 + +/* debug masks - set these bits in hw->debug_mask to control output */ +enum i40e_debug_mask { + I40E_DEBUG_INIT = 0x00000001, + I40E_DEBUG_RELEASE = 0x00000002, + + I40E_DEBUG_LINK = 0x00000010, + I40E_DEBUG_PHY = 0x00000020, + I40E_DEBUG_HMC = 0x00000040, + I40E_DEBUG_NVM = 0x00000080, + I40E_DEBUG_LAN = 0x00000100, + I40E_DEBUG_FLOW = 0x00000200, + I40E_DEBUG_DCB = 0x00000400, + I40E_DEBUG_DIAG = 0x00000800, + I40E_DEBUG_FD = 0x00001000, + + I40E_DEBUG_AQ_MESSAGE = 0x01000000, + I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, + I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, + I40E_DEBUG_AQ_COMMAND = 0x06000000, + I40E_DEBUG_AQ = 0x0F000000, + + I40E_DEBUG_USER = 0xF0000000, + + I40E_DEBUG_ALL = 0xFFFFFFFF +}; + +/* These are structs for managing the hardware information and the operations. + * The structures of function pointers are filled out at init time when we + * know for sure exactly which hardware we're working with. This gives us the + * flexibility of using the same main driver code but adapting to slightly + * different hardware needs as new parts are developed. For this architecture, + * the Firmware and AdminQ are intended to insulate the driver from most of the + * future changes, but these structures will also do part of the job. + */ +enum i40e_mac_type { + I40E_MAC_UNKNOWN = 0, + I40E_MAC_X710, + I40E_MAC_XL710, + I40E_MAC_VF, + I40E_MAC_GENERIC, +}; + +enum i40e_media_type { + I40E_MEDIA_TYPE_UNKNOWN = 0, + I40E_MEDIA_TYPE_FIBER, + I40E_MEDIA_TYPE_BASET, + I40E_MEDIA_TYPE_BACKPLANE, + I40E_MEDIA_TYPE_CX4, + I40E_MEDIA_TYPE_DA, + I40E_MEDIA_TYPE_VIRTUAL +}; + +enum i40e_fc_mode { + I40E_FC_NONE = 0, + I40E_FC_RX_PAUSE, + I40E_FC_TX_PAUSE, + I40E_FC_FULL, + I40E_FC_PFC, + I40E_FC_DEFAULT +}; + +enum i40e_set_fc_aq_failures { + I40E_SET_FC_AQ_FAIL_NONE = 0, + I40E_SET_FC_AQ_FAIL_GET = 1, + I40E_SET_FC_AQ_FAIL_SET = 2, + I40E_SET_FC_AQ_FAIL_UPDATE = 4, + I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6 +}; + +enum i40e_vsi_type { + I40E_VSI_MAIN = 0, + I40E_VSI_VMDQ1, + I40E_VSI_VMDQ2, + I40E_VSI_CTRL, + I40E_VSI_FCOE, + I40E_VSI_MIRROR, + I40E_VSI_SRIOV, + I40E_VSI_FDIR, + I40E_VSI_TYPE_UNKNOWN +}; + +enum i40e_queue_type { + I40E_QUEUE_TYPE_RX = 0, + I40E_QUEUE_TYPE_TX, + I40E_QUEUE_TYPE_PE_CEQ, + I40E_QUEUE_TYPE_UNKNOWN +}; + +struct i40e_link_status { + enum i40e_aq_phy_type phy_type; + enum i40e_aq_link_speed link_speed; + u8 link_info; + u8 an_info; + u8 ext_info; + u8 loopback; + /* is Link Status Event notification to SW enabled */ + bool lse_enable; + u16 max_frame_size; + bool crc_enable; + u8 pacing; + u8 requested_speeds; +}; + +struct i40e_phy_info { + struct i40e_link_status link_info; + struct i40e_link_status link_info_old; + u32 autoneg_advertised; + u32 phy_id; + u32 module_type; + bool get_link_info; + enum i40e_media_type media_type; +}; + +#define I40E_HW_CAP_MAX_GPIO 30 +/* Capabilities of a PF or a VF or the whole device */ +struct i40e_hw_capabilities { + u32 switch_mode; +#define I40E_NVM_IMAGE_TYPE_EVB 0x0 +#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2 +#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3 + + u32 management_mode; + u32 npar_enable; + u32 os2bmc; + u32 valid_functions; + bool sr_iov_1_1; + bool vmdq; + bool evb_802_1_qbg; /* Edge Virtual Bridging */ + bool evb_802_1_qbh; /* Bridge Port Extension */ + bool dcb; + bool fcoe; + bool iscsi; /* Indicates iSCSI enabled */ + bool mfp_mode_1; + bool mgmt_cem; + bool ieee_1588; + bool iwarp; + bool fd; + u32 fd_filters_guaranteed; + u32 fd_filters_best_effort; + bool rss; + u32 rss_table_size; + u32 rss_table_entry_width; + bool led[I40E_HW_CAP_MAX_GPIO]; + bool sdp[I40E_HW_CAP_MAX_GPIO]; + u32 nvm_image_type; + u32 num_flow_director_filters; + u32 num_vfs; + u32 vf_base_id; + u32 num_vsis; + u32 num_rx_qp; + u32 num_tx_qp; + u32 base_queue; + u32 num_msix_vectors; + u32 num_msix_vectors_vf; + u32 led_pin_num; + u32 sdp_pin_num; + u32 mdio_port_num; + u32 mdio_port_mode; + u8 rx_buf_chain_len; + u32 enabled_tcmap; + u32 maxtc; + u64 wr_csr_prot; +}; + +struct i40e_mac_info { + enum i40e_mac_type type; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u8 san_addr[ETH_ALEN]; + u8 port_addr[ETH_ALEN]; + u16 max_fcoeq; +}; + +enum i40e_aq_resources_ids { + I40E_NVM_RESOURCE_ID = 1 +}; + +enum i40e_aq_resource_access_type { + I40E_RESOURCE_READ = 1, + I40E_RESOURCE_WRITE +}; + +struct i40e_nvm_info { + u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */ + u32 timeout; /* [ms] */ + u16 sr_size; /* Shadow RAM size in words */ + bool blank_nvm_mode; /* is NVM empty (no FW present)*/ + u16 version; /* NVM package version */ + u32 eetrack; /* NVM data version */ +}; + +/* definitions used in NVM update support */ + +enum i40e_nvmupd_cmd { + I40E_NVMUPD_INVALID, + I40E_NVMUPD_READ_CON, + I40E_NVMUPD_READ_SNT, + I40E_NVMUPD_READ_LCB, + I40E_NVMUPD_READ_SA, + I40E_NVMUPD_WRITE_ERA, + I40E_NVMUPD_WRITE_CON, + I40E_NVMUPD_WRITE_SNT, + I40E_NVMUPD_WRITE_LCB, + I40E_NVMUPD_WRITE_SA, + I40E_NVMUPD_CSUM_CON, + I40E_NVMUPD_CSUM_SA, + I40E_NVMUPD_CSUM_LCB, +}; + +enum i40e_nvmupd_state { + I40E_NVMUPD_STATE_INIT, + I40E_NVMUPD_STATE_READING, + I40E_NVMUPD_STATE_WRITING +}; + +/* nvm_access definition and its masks/shifts need to be accessible to + * application, core driver, and shared code. Where is the right file? + */ +#define I40E_NVM_READ 0xB +#define I40E_NVM_WRITE 0xC + +#define I40E_NVM_MOD_PNT_MASK 0xFF + +#define I40E_NVM_TRANS_SHIFT 8 +#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) +#define I40E_NVM_CON 0x0 +#define I40E_NVM_SNT 0x1 +#define I40E_NVM_LCB 0x2 +#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) +#define I40E_NVM_ERA 0x4 +#define I40E_NVM_CSUM 0x8 + +#define I40E_NVM_ADAPT_SHIFT 16 +#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) + +#define I40E_NVMUPD_MAX_DATA 4096 +#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */ + +struct i40e_nvm_access { + u32 command; + u32 config; + u32 offset; /* in bytes */ + u32 data_size; /* in bytes */ + u8 data[1]; +}; + +/* PCI bus types */ +enum i40e_bus_type { + i40e_bus_type_unknown = 0, + i40e_bus_type_pci, + i40e_bus_type_pcix, + i40e_bus_type_pci_express, + i40e_bus_type_reserved +}; + +/* PCI bus speeds */ +enum i40e_bus_speed { + i40e_bus_speed_unknown = 0, + i40e_bus_speed_33 = 33, + i40e_bus_speed_66 = 66, + i40e_bus_speed_100 = 100, + i40e_bus_speed_120 = 120, + i40e_bus_speed_133 = 133, + i40e_bus_speed_2500 = 2500, + i40e_bus_speed_5000 = 5000, + i40e_bus_speed_8000 = 8000, + i40e_bus_speed_reserved +}; + +/* PCI bus widths */ +enum i40e_bus_width { + i40e_bus_width_unknown = 0, + i40e_bus_width_pcie_x1 = 1, + i40e_bus_width_pcie_x2 = 2, + i40e_bus_width_pcie_x4 = 4, + i40e_bus_width_pcie_x8 = 8, + i40e_bus_width_32 = 32, + i40e_bus_width_64 = 64, + i40e_bus_width_reserved +}; + +/* Bus parameters */ +struct i40e_bus_info { + enum i40e_bus_speed speed; + enum i40e_bus_width width; + enum i40e_bus_type type; + + u16 func; + u16 device; + u16 lan_id; +}; + +/* Flow control (FC) parameters */ +struct i40e_fc_info { + enum i40e_fc_mode current_mode; /* FC mode in effect */ + enum i40e_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +#define I40E_MAX_TRAFFIC_CLASS 8 +#define I40E_MAX_USER_PRIORITY 8 +#define I40E_DCBX_MAX_APPS 32 +#define I40E_LLDPDU_SIZE 1500 +#define I40E_TLV_STATUS_OPER 0x1 +#define I40E_TLV_STATUS_SYNC 0x2 +#define I40E_TLV_STATUS_ERR 0x4 +#define I40E_CEE_OPER_MAX_APPS 3 +#define I40E_APP_PROTOID_FCOE 0x8906 +#define I40E_APP_PROTOID_ISCSI 0x0cbc +#define I40E_APP_PROTOID_FIP 0x8914 +#define I40E_APP_SEL_ETHTYPE 0x1 +#define I40E_APP_SEL_TCPIP 0x2 + +/* CEE or IEEE 802.1Qaz ETS Configuration data */ +struct i40e_dcb_ets_config { + u8 willing; + u8 cbs; + u8 maxtcs; + u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; + u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; + u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; +}; + +/* CEE or IEEE 802.1Qaz PFC Configuration data */ +struct i40e_dcb_pfc_config { + u8 willing; + u8 mbc; + u8 pfccap; + u8 pfcenable; +}; + +/* CEE or IEEE 802.1Qaz Application Priority data */ +struct i40e_dcb_app_priority_table { + u8 priority; + u8 selector; + u16 protocolid; +}; + +struct i40e_dcbx_config { + u8 dcbx_mode; +#define I40E_DCBX_MODE_CEE 0x1 +#define I40E_DCBX_MODE_IEEE 0x2 + u32 numapps; + struct i40e_dcb_ets_config etscfg; + struct i40e_dcb_ets_config etsrec; + struct i40e_dcb_pfc_config pfc; + struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS]; +}; + +/* Port hardware description */ +struct i40e_hw { + u8 __iomem *hw_addr; + void *back; + + /* subsystem structs */ + struct i40e_phy_info phy; + struct i40e_mac_info mac; + struct i40e_bus_info bus; + struct i40e_nvm_info nvm; + struct i40e_fc_info fc; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 port; + bool adapter_stopped; + + /* capabilities for entire device and PCI func */ + struct i40e_hw_capabilities dev_caps; + struct i40e_hw_capabilities func_caps; + + /* Flow Director shared filter space */ + u16 fdir_shared_filter_count; + + /* device profile info */ + u8 pf_id; + u16 main_vsi_seid; + + /* for multi-function MACs */ + u16 partition_id; + u16 num_partitions; + u16 num_ports; + + /* Closest numa node to the device */ + u16 numa_node; + + /* Admin Queue info */ + struct i40e_adminq_info aq; + + /* state of nvm update process */ + enum i40e_nvmupd_state nvmupd_state; + + /* HMC info */ + struct i40e_hmc_info hmc; /* HMC info struct */ + + /* LLDP/DCBX Status */ + u16 dcbx_status; + + /* DCBX info */ + struct i40e_dcbx_config local_dcbx_config; + struct i40e_dcbx_config remote_dcbx_config; + + /* debug mask */ + u32 debug_mask; +}; + +static inline bool i40e_is_vf(struct i40e_hw *hw) +{ + return hw->mac.type == I40E_MAC_VF; +} + +struct i40e_driver_version { + u8 major_version; + u8 minor_version; + u8 build_version; + u8 subbuild_version; + u8 driver_string[32]; +}; + +/* RX Descriptors */ +union i40e_16byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + union { + __le16 mirroring_status; + __le16 fcoe_ctx_id; + } mirr_fcoe; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fd_id; /* Flow director filter id */ + __le32 fcoe_param; /* FCoE DDP Context id */ + } hi_dword; + } qword0; + struct { + /* ext status/error/pktype/length */ + __le64 status_error_len; + } qword1; + } wb; /* writeback */ +}; + +union i40e_32byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + /* bit 0 of hdr_buffer_addr is DD bit */ + __le64 rsvd1; + __le64 rsvd2; + } read; + struct { + struct { + struct { + union { + __le16 mirroring_status; + __le16 fcoe_ctx_id; + } mirr_fcoe; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fcoe_param; /* FCoE DDP Context id */ + /* Flow director filter id in case of + * Programming status desc WB + */ + __le32 fd_id; + } hi_dword; + } qword0; + struct { + /* status/error/pktype/length */ + __le64 status_error_len; + } qword1; + struct { + __le16 ext_status; /* extended status */ + __le16 rsvd; + __le16 l2tag2_1; + __le16 l2tag2_2; + } qword2; + struct { + union { + __le32 flex_bytes_lo; + __le32 pe_status; + } lo_dword; + union { + __le32 flex_bytes_hi; + __le32 fd_id; + } hi_dword; + } qword3; + } wb; /* writeback */ +}; + +enum i40e_rx_desc_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_STATUS_DD_SHIFT = 0, + I40E_RX_DESC_STATUS_EOF_SHIFT = 1, + I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2, + I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3, + I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, + I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ + I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, + I40E_RX_DESC_STATUS_PIF_SHIFT = 8, + I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ + I40E_RX_DESC_STATUS_FLM_SHIFT = 11, + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ + I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, + I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, + I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ + I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18, + I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ +}; + +#define I40E_RXD_QW1_STATUS_SHIFT 0 +#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \ + << I40E_RXD_QW1_STATUS_SHIFT) + +#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT +#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ + I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) + +#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT +#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \ + I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) + +enum i40e_rx_desc_fltstat_values { + I40E_RX_DESC_FLTSTAT_NO_DATA = 0, + I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ + I40E_RX_DESC_FLTSTAT_RSV = 2, + I40E_RX_DESC_FLTSTAT_RSS_HASH = 3, +}; + +#define I40E_RXD_QW1_ERROR_SHIFT 19 +#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT) + +enum i40e_rx_desc_error_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_ERROR_RXE_SHIFT = 0, + I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1, + I40E_RX_DESC_ERROR_HBO_SHIFT = 2, + I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */ + I40E_RX_DESC_ERROR_IPE_SHIFT = 3, + I40E_RX_DESC_ERROR_L4E_SHIFT = 4, + I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, + I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6, + I40E_RX_DESC_ERROR_PPRS_SHIFT = 7 +}; + +enum i40e_rx_desc_error_l3l4e_fcoe_masks { + I40E_RX_DESC_ERROR_L3L4E_NONE = 0, + I40E_RX_DESC_ERROR_L3L4E_PROT = 1, + I40E_RX_DESC_ERROR_L3L4E_FC = 2, + I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3, + I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4 +}; + +#define I40E_RXD_QW1_PTYPE_SHIFT 30 +#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT) + +/* Packet type non-ip values */ +enum i40e_rx_l2_ptype { + I40E_RX_PTYPE_L2_RESERVED = 0, + I40E_RX_PTYPE_L2_MAC_PAY2 = 1, + I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, + I40E_RX_PTYPE_L2_FIP_PAY2 = 3, + I40E_RX_PTYPE_L2_OUI_PAY2 = 4, + I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, + I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, + I40E_RX_PTYPE_L2_ECP_PAY2 = 7, + I40E_RX_PTYPE_L2_EVB_PAY2 = 8, + I40E_RX_PTYPE_L2_QCN_PAY2 = 9, + I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, + I40E_RX_PTYPE_L2_ARP = 11, + I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, + I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, + I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, + I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, + I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, + I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, + I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, + I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, + I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, + I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, + I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, + I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, + I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, + I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 +}; + +struct i40e_rx_ptype_decoded { + u32 ptype:8; + u32 known:1; + u32 outer_ip:1; + u32 outer_ip_ver:1; + u32 outer_frag:1; + u32 tunnel_type:3; + u32 tunnel_end_prot:2; + u32 tunnel_end_frag:1; + u32 inner_prot:4; + u32 payload_layer:3; +}; + +enum i40e_rx_ptype_outer_ip { + I40E_RX_PTYPE_OUTER_L2 = 0, + I40E_RX_PTYPE_OUTER_IP = 1 +}; + +enum i40e_rx_ptype_outer_ip_ver { + I40E_RX_PTYPE_OUTER_NONE = 0, + I40E_RX_PTYPE_OUTER_IPV4 = 0, + I40E_RX_PTYPE_OUTER_IPV6 = 1 +}; + +enum i40e_rx_ptype_outer_fragmented { + I40E_RX_PTYPE_NOT_FRAG = 0, + I40E_RX_PTYPE_FRAG = 1 +}; + +enum i40e_rx_ptype_tunnel_type { + I40E_RX_PTYPE_TUNNEL_NONE = 0, + I40E_RX_PTYPE_TUNNEL_IP_IP = 1, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, +}; + +enum i40e_rx_ptype_tunnel_end_prot { + I40E_RX_PTYPE_TUNNEL_END_NONE = 0, + I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1, + I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2, +}; + +enum i40e_rx_ptype_inner_prot { + I40E_RX_PTYPE_INNER_PROT_NONE = 0, + I40E_RX_PTYPE_INNER_PROT_UDP = 1, + I40E_RX_PTYPE_INNER_PROT_TCP = 2, + I40E_RX_PTYPE_INNER_PROT_SCTP = 3, + I40E_RX_PTYPE_INNER_PROT_ICMP = 4, + I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5 +}; + +enum i40e_rx_ptype_payload_layer { + I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, +}; + +#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38 +#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ + I40E_RXD_QW1_LENGTH_PBUF_SHIFT) + +#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52 +#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ + I40E_RXD_QW1_LENGTH_HBUF_SHIFT) + +#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 +#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \ + I40E_RXD_QW1_LENGTH_SPH_SHIFT) + +enum i40e_rx_desc_ext_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0, + I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, + I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ + I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ + I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, + I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, + I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, +}; + +enum i40e_rx_desc_pe_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */ + I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */ + I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */ + I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24, + I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25, + I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26, + I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27, + I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28, + I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 +}; + +#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 +#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000 + +#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 +#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \ + I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT) + +#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19 +#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \ + I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT) + +enum i40e_rx_prog_status_desc_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0, + I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */ +}; + +enum i40e_rx_prog_status_desc_prog_id_masks { + I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1, + I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2, + I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4, +}; + +enum i40e_rx_prog_status_desc_error_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, + I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, + I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, + I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 +}; + +/* TX Descriptor */ +struct i40e_tx_desc { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le64 cmd_type_offset_bsz; +}; + +#define I40E_TXD_QW1_DTYPE_SHIFT 0 +#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT) + +enum i40e_tx_desc_dtype_value { + I40E_TX_DESC_DTYPE_DATA = 0x0, + I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */ + I40E_TX_DESC_DTYPE_CONTEXT = 0x1, + I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2, + I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8, + I40E_TX_DESC_DTYPE_DDP_CTX = 0x9, + I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB, + I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC, + I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD, + I40E_TX_DESC_DTYPE_DESC_DONE = 0xF +}; + +#define I40E_TXD_QW1_CMD_SHIFT 4 +#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT) + +enum i40e_tx_desc_cmd_bits { + I40E_TX_DESC_CMD_EOP = 0x0001, + I40E_TX_DESC_CMD_RS = 0x0002, + I40E_TX_DESC_CMD_ICRC = 0x0004, + I40E_TX_DESC_CMD_IL2TAG1 = 0x0008, + I40E_TX_DESC_CMD_DUMMY = 0x0010, + I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ + I40E_TX_DESC_CMD_FCOET = 0x0080, + I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */ +}; + +#define I40E_TXD_QW1_OFFSET_SHIFT 16 +#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ + I40E_TXD_QW1_OFFSET_SHIFT) + +enum i40e_tx_desc_length_fields { + /* Note: These are predefined bit offsets */ + I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */ + I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */ + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */ +}; + +#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34 +#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ + I40E_TXD_QW1_TX_BUF_SZ_SHIFT) + +#define I40E_TXD_QW1_L2TAG1_SHIFT 48 +#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT) + +/* Context descriptors */ +struct i40e_tx_context_desc { + __le32 tunneling_params; + __le16 l2tag2; + __le16 rsvd; + __le64 type_cmd_tso_mss; +}; + +#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0 +#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT) + +#define I40E_TXD_CTX_QW1_CMD_SHIFT 4 +#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT) + +enum i40e_tx_ctx_desc_cmd_bits { + I40E_TX_CTX_DESC_TSO = 0x01, + I40E_TX_CTX_DESC_TSYN = 0x02, + I40E_TX_CTX_DESC_IL2TAG2 = 0x04, + I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, + I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00, + I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10, + I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20, + I40E_TX_CTX_DESC_SWTCH_VSI = 0x30, + I40E_TX_CTX_DESC_SWPE = 0x40 +}; + +#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30 +#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ + I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) + +#define I40E_TXD_CTX_QW1_MSS_SHIFT 50 +#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \ + I40E_TXD_CTX_QW1_MSS_SHIFT) + +#define I40E_TXD_CTX_QW1_VSI_SHIFT 50 +#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT) + +#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0 +#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \ + I40E_TXD_CTX_QW0_EXT_IP_SHIFT) + +enum i40e_tx_ctx_desc_eipt_offload { + I40E_TX_CTX_EXT_IP_NONE = 0x0, + I40E_TX_CTX_EXT_IP_IPV6 = 0x1, + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2, + I40E_TX_CTX_EXT_IP_IPV4 = 0x3 +}; + +#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2 +#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \ + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT) + +#define I40E_TXD_CTX_QW0_NATT_SHIFT 9 +#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) + +#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) +#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) + +#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 +#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \ + I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) + +#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK + +#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12 +#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \ + I40E_TXD_CTX_QW0_NATLEN_SHIFT) + +#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19 +#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ + I40E_TXD_CTX_QW0_DECTTL_SHIFT) + +struct i40e_filter_program_desc { + __le32 qindex_flex_ptype_vsi; + __le32 rsvd; + __le32 dtype_cmd_cntindex; + __le32 fd_id; +}; +#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0 +#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \ + I40E_TXD_FLTR_QW0_QINDEX_SHIFT) +#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11 +#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \ + I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) +#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17 +#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \ + I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) + +/* Packet Classifier Types for filters */ +enum i40e_filter_pctype { + /* Note: Values 0-30 are reserved for future use */ + I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, + /* Note: Value 32 is reserved for future use */ + I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, + I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, + I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, + I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, + /* Note: Values 37-40 are reserved for future use */ + I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, + I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, + I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, + I40E_FILTER_PCTYPE_FRAG_IPV6 = 46, + /* Note: Value 47 is reserved for future use */ + I40E_FILTER_PCTYPE_FCOE_OX = 48, + I40E_FILTER_PCTYPE_FCOE_RX = 49, + I40E_FILTER_PCTYPE_FCOE_OTHER = 50, + /* Note: Values 51-62 are reserved for future use */ + I40E_FILTER_PCTYPE_L2_PAYLOAD = 63, +}; + +enum i40e_filter_program_desc_dest { + I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0, + I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1, + I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2, +}; + +enum i40e_filter_program_desc_fd_status { + I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3, +}; + +#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 +#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) + +#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 +#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) + +#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT) + +enum i40e_filter_program_desc_pcmd { + I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1, + I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2, +}; + +#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) + +#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \ + I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) + +#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \ + I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) + +#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 +#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) + +enum i40e_filter_type { + I40E_FLOW_DIRECTOR_FLTR = 0, + I40E_PE_QUAD_HASH_FLTR = 1, + I40E_ETHERTYPE_FLTR, + I40E_FCOE_CTX_FLTR, + I40E_MAC_VLAN_FLTR, + I40E_HASH_FLTR +}; + +struct i40e_vsi_context { + u16 seid; + u16 uplink_seid; + u16 vsi_number; + u16 vsis_allocated; + u16 vsis_unallocated; + u16 flags; + u8 pf_num; + u8 vf_num; + u8 connection_type; + struct i40e_aqc_vsi_properties_data info; +}; + +struct i40e_veb_context { + u16 seid; + u16 uplink_seid; + u16 veb_number; + u16 vebs_allocated; + u16 vebs_unallocated; + u16 flags; + struct i40e_aqc_get_veb_parameters_completion info; +}; + +/* Statistics collected by each port, VSI, VEB, and S-channel */ +struct i40e_eth_stats { + u64 rx_bytes; /* gorc */ + u64 rx_unicast; /* uprc */ + u64 rx_multicast; /* mprc */ + u64 rx_broadcast; /* bprc */ + u64 rx_discards; /* rdpc */ + u64 rx_unknown_protocol; /* rupp */ + u64 tx_bytes; /* gotc */ + u64 tx_unicast; /* uptc */ + u64 tx_multicast; /* mptc */ + u64 tx_broadcast; /* bptc */ + u64 tx_discards; /* tdpc */ + u64 tx_errors; /* tepc */ +}; + +#ifdef I40E_FCOE +/* Statistics collected per function for FCoE */ +struct i40e_fcoe_stats { + u64 rx_fcoe_packets; /* fcoeprc */ + u64 rx_fcoe_dwords; /* focedwrc */ + u64 rx_fcoe_dropped; /* fcoerpdc */ + u64 tx_fcoe_packets; /* fcoeptc */ + u64 tx_fcoe_dwords; /* focedwtc */ + u64 fcoe_bad_fccrc; /* fcoecrc */ + u64 fcoe_last_error; /* fcoelast */ + u64 fcoe_ddp_count; /* fcoeddpc */ +}; + +/* offset to per function FCoE statistics block */ +#define I40E_FCOE_VF_STAT_OFFSET 0 +#define I40E_FCOE_PF_STAT_OFFSET 128 +#define I40E_FCOE_STAT_MAX (I40E_FCOE_PF_STAT_OFFSET + I40E_MAX_PF) + +#endif +/* Statistics collected by the MAC */ +struct i40e_hw_port_stats { + /* eth stats collected by the port */ + struct i40e_eth_stats eth; + + /* additional port specific stats */ + u64 tx_dropped_link_down; /* tdold */ + u64 crc_errors; /* crcerrs */ + u64 illegal_bytes; /* illerrc */ + u64 error_bytes; /* errbc */ + u64 mac_local_faults; /* mlfc */ + u64 mac_remote_faults; /* mrfc */ + u64 rx_length_errors; /* rlec */ + u64 link_xon_rx; /* lxonrxc */ + u64 link_xoff_rx; /* lxoffrxc */ + u64 priority_xon_rx[8]; /* pxonrxc[8] */ + u64 priority_xoff_rx[8]; /* pxoffrxc[8] */ + u64 link_xon_tx; /* lxontxc */ + u64 link_xoff_tx; /* lxofftxc */ + u64 priority_xon_tx[8]; /* pxontxc[8] */ + u64 priority_xoff_tx[8]; /* pxofftxc[8] */ + u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */ + u64 rx_size_64; /* prc64 */ + u64 rx_size_127; /* prc127 */ + u64 rx_size_255; /* prc255 */ + u64 rx_size_511; /* prc511 */ + u64 rx_size_1023; /* prc1023 */ + u64 rx_size_1522; /* prc1522 */ + u64 rx_size_big; /* prc9522 */ + u64 rx_undersize; /* ruc */ + u64 rx_fragments; /* rfc */ + u64 rx_oversize; /* roc */ + u64 rx_jabber; /* rjc */ + u64 tx_size_64; /* ptc64 */ + u64 tx_size_127; /* ptc127 */ + u64 tx_size_255; /* ptc255 */ + u64 tx_size_511; /* ptc511 */ + u64 tx_size_1023; /* ptc1023 */ + u64 tx_size_1522; /* ptc1522 */ + u64 tx_size_big; /* ptc9522 */ + u64 mac_short_packet_dropped; /* mspdc */ + u64 checksum_error; /* xec */ + /* flow director stats */ + u64 fd_atr_match; + u64 fd_sb_match; + /* EEE LPI */ + u32 tx_lpi_status; + u32 rx_lpi_status; + u64 tx_lpi_count; /* etlpic */ + u64 rx_lpi_count; /* erlpic */ +}; + +/* Checksum and Shadow RAM pointers */ +#define I40E_SR_NVM_CONTROL_WORD 0x00 +#define I40E_SR_EMP_MODULE_PTR 0x0F +#define I40E_SR_PBA_FLAGS 0x15 +#define I40E_SR_PBA_BLOCK_PTR 0x16 +#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 +#define I40E_SR_NVM_WAKE_ON_LAN 0x19 +#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 +#define I40E_SR_NVM_EETRACK_LO 0x2D +#define I40E_SR_NVM_EETRACK_HI 0x2E +#define I40E_SR_VPD_PTR 0x2F +#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E +#define I40E_SR_SW_CHECKSUM_WORD 0x3F + +/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ +#define I40E_SR_VPD_MODULE_MAX_SIZE 1024 +#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 +#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 +#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) + +/* Shadow RAM related */ +#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800 +#define I40E_SR_WORDS_IN_1KB 512 +/* Checksum should be calculated such that after adding all the words, + * including the checksum word itself, the sum should be 0xBABA. + */ +#define I40E_SR_SW_CHECKSUM_BASE 0xBABA + +#define I40E_SRRD_SRCTL_ATTEMPTS 100000 + +#ifdef I40E_FCOE +/* FCoE Tx context descriptor - Use the i40e_tx_context_desc struct */ + +enum i40E_fcoe_tx_ctx_desc_cmd_bits { + I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND = 0x00, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2 = 0x01, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3 = 0x05, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS2 = 0x02, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS3 = 0x06, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS2 = 0x03, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS3 = 0x07, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL = 0x08, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_CTX_INVL = 0x09, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_RELOFF = 0x10, + I40E_FCOE_TX_CTX_DESC_CLRSEQ = 0x20, + I40E_FCOE_TX_CTX_DESC_DIFENA = 0x40, + I40E_FCOE_TX_CTX_DESC_IL2TAG2 = 0x80 +}; + +/* FCoE DDP Context descriptor */ +struct i40e_fcoe_ddp_context_desc { + __le64 rsvd; + __le64 type_cmd_foff_lsize; +}; + +#define I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT 0 +#define I40E_FCOE_DDP_CTX_QW1_DTYPE_MASK (0xFULL << \ + I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT) + +#define I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT 4 +#define I40E_FCOE_DDP_CTX_QW1_CMD_MASK (0xFULL << \ + I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT) + +enum i40e_fcoe_ddp_ctx_desc_cmd_bits { + I40E_FCOE_DDP_CTX_DESC_BSIZE_512B = 0x00, /* 2 BITS */ + I40E_FCOE_DDP_CTX_DESC_BSIZE_4K = 0x01, /* 2 BITS */ + I40E_FCOE_DDP_CTX_DESC_BSIZE_8K = 0x02, /* 2 BITS */ + I40E_FCOE_DDP_CTX_DESC_BSIZE_16K = 0x03, /* 2 BITS */ + I40E_FCOE_DDP_CTX_DESC_DIFENA = 0x04, /* 1 BIT */ + I40E_FCOE_DDP_CTX_DESC_LASTSEQH = 0x08, /* 1 BIT */ +}; + +#define I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT 16 +#define I40E_FCOE_DDP_CTX_QW1_FOFF_MASK (0x3FFFULL << \ + I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT) + +#define I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT 32 +#define I40E_FCOE_DDP_CTX_QW1_LSIZE_MASK (0x3FFFULL << \ + I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT) + +/* FCoE DDP/DWO Queue Context descriptor */ +struct i40e_fcoe_queue_context_desc { + __le64 dmaindx_fbase; /* 0:11 DMAINDX, 12:63 FBASE */ + __le64 flen_tph; /* 0:12 FLEN, 13:15 TPH */ +}; + +#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT 0 +#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_MASK (0xFFFULL << \ + I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT) + +#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT 12 +#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_MASK (0xFFFFFFFFFFFFFULL << \ + I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT) + +#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT 0 +#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_MASK (0x1FFFULL << \ + I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT) + +#define I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT 13 +#define I40E_FCOE_QUEUE_CTX_QW1_TPH_MASK (0x7ULL << \ + I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT) + +enum i40e_fcoe_queue_ctx_desc_tph_bits { + I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC = 0x1, + I40E_FCOE_QUEUE_CTX_DESC_TPHDATA = 0x2 +}; + +#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT 30 +#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_MASK (0x3ULL << \ + I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT) + +/* FCoE DDP/DWO Filter Context descriptor */ +struct i40e_fcoe_filter_context_desc { + __le32 param; + __le16 seqn; + + /* 48:51(0:3) RSVD, 52:63(4:15) DMAINDX */ + __le16 rsvd_dmaindx; + + /* 0:7 FLAGS, 8:52 RSVD, 53:63 LANQ */ + __le64 flags_rsvd_lanq; +}; + +#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT 4 +#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_MASK (0xFFF << \ + I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT) + +enum i40e_fcoe_filter_ctx_desc_flags_bits { + I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP = 0x00, + I40E_FCOE_FILTER_CTX_DESC_CTYP_DWO = 0x01, + I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT = 0x00, + I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP = 0x02, + I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 = 0x00, + I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3 = 0x04 +}; + +#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT 0 +#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_MASK (0xFFULL << \ + I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT) + +#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8 +#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_MASK (0x3FULL << \ + I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT) + +#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT 53 +#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_MASK (0x7FFULL << \ + I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT) + +#endif /* I40E_FCOE */ +enum i40e_switch_element_types { + I40E_SWITCH_ELEMENT_TYPE_MAC = 1, + I40E_SWITCH_ELEMENT_TYPE_PF = 2, + I40E_SWITCH_ELEMENT_TYPE_VF = 3, + I40E_SWITCH_ELEMENT_TYPE_EMP = 4, + I40E_SWITCH_ELEMENT_TYPE_BMC = 6, + I40E_SWITCH_ELEMENT_TYPE_PE = 16, + I40E_SWITCH_ELEMENT_TYPE_VEB = 17, + I40E_SWITCH_ELEMENT_TYPE_PA = 18, + I40E_SWITCH_ELEMENT_TYPE_VSI = 19, +}; + +/* Supported EtherType filters */ +enum i40e_ether_type_index { + I40E_ETHER_TYPE_1588 = 0, + I40E_ETHER_TYPE_FIP = 1, + I40E_ETHER_TYPE_OUI_EXTENDED = 2, + I40E_ETHER_TYPE_MAC_CONTROL = 3, + I40E_ETHER_TYPE_LLDP = 4, + I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5, + I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6, + I40E_ETHER_TYPE_QCN_CNM = 7, + I40E_ETHER_TYPE_8021X = 8, + I40E_ETHER_TYPE_ARP = 9, + I40E_ETHER_TYPE_RSV1 = 10, + I40E_ETHER_TYPE_RSV2 = 11, +}; + +/* Filter context base size is 1K */ +#define I40E_HASH_FILTER_BASE_SIZE 1024 +/* Supported Hash filter values */ +enum i40e_hash_filter_size { + I40E_HASH_FILTER_SIZE_1K = 0, + I40E_HASH_FILTER_SIZE_2K = 1, + I40E_HASH_FILTER_SIZE_4K = 2, + I40E_HASH_FILTER_SIZE_8K = 3, + I40E_HASH_FILTER_SIZE_16K = 4, + I40E_HASH_FILTER_SIZE_32K = 5, + I40E_HASH_FILTER_SIZE_64K = 6, + I40E_HASH_FILTER_SIZE_128K = 7, + I40E_HASH_FILTER_SIZE_256K = 8, + I40E_HASH_FILTER_SIZE_512K = 9, + I40E_HASH_FILTER_SIZE_1M = 10, +}; + +/* DMA context base size is 0.5K */ +#define I40E_DMA_CNTX_BASE_SIZE 512 +/* Supported DMA context values */ +enum i40e_dma_cntx_size { + I40E_DMA_CNTX_SIZE_512 = 0, + I40E_DMA_CNTX_SIZE_1K = 1, + I40E_DMA_CNTX_SIZE_2K = 2, + I40E_DMA_CNTX_SIZE_4K = 3, + I40E_DMA_CNTX_SIZE_8K = 4, + I40E_DMA_CNTX_SIZE_16K = 5, + I40E_DMA_CNTX_SIZE_32K = 6, + I40E_DMA_CNTX_SIZE_64K = 7, + I40E_DMA_CNTX_SIZE_128K = 8, + I40E_DMA_CNTX_SIZE_256K = 9, +}; + +/* Supported Hash look up table (LUT) sizes */ +enum i40e_hash_lut_size { + I40E_HASH_LUT_SIZE_128 = 0, + I40E_HASH_LUT_SIZE_512 = 1, +}; + +/* Structure to hold a per PF filter control settings */ +struct i40e_filter_control_settings { + /* number of PE Quad Hash filter buckets */ + enum i40e_hash_filter_size pe_filt_num; + /* number of PE Quad Hash contexts */ + enum i40e_dma_cntx_size pe_cntx_num; + /* number of FCoE filter buckets */ + enum i40e_hash_filter_size fcoe_filt_num; + /* number of FCoE DDP contexts */ + enum i40e_dma_cntx_size fcoe_cntx_num; + /* size of the Hash LUT */ + enum i40e_hash_lut_size hash_lut_size; + /* enable FDIR filters for PF and its VFs */ + bool enable_fdir; + /* enable Ethertype filters for PF and its VFs */ + bool enable_ethtype; + /* enable MAC/VLAN filters for PF and its VFs */ + bool enable_macvlan; +}; + +/* Structure to hold device level control filter counts */ +struct i40e_control_filter_stats { + u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */ + u16 etype_used; /* Used perfect EtherType filters */ + u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */ + u16 etype_free; /* Un-used perfect EtherType filters */ +}; + +enum i40e_reset_type { + I40E_RESET_POR = 0, + I40E_RESET_CORER = 1, + I40E_RESET_GLOBR = 2, + I40E_RESET_EMPR = 3, +}; + +/* IEEE 802.1AB LLDP Agent Variables from NVM */ +#define I40E_NVM_LLDP_CFG_PTR 0xD +struct i40e_lldp_variables { + u16 length; + u16 adminstatus; + u16 msgfasttx; + u16 msgtxinterval; + u16 txparams; + u16 timers; + u16 crc8; +}; + +/* Offsets into Alternate Ram */ +#define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */ +#define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */ +#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */ +#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */ +#define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */ +#define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */ + +/* Alternate Ram Bandwidth Masks */ +#define I40E_ALT_BW_VALUE_MASK 0xFF +#define I40E_ALT_BW_RELATIVE_MASK 0x40000000 +#define I40E_ALT_BW_VALID_MASK 0x80000000 + +/* RSS Hash Table Size */ +#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 +#endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h new file mode 100644 index 000000000..2d20af290 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -0,0 +1,362 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_VIRTCHNL_H_ +#define _I40E_VIRTCHNL_H_ + +#include "i40e_type.h" + +/* Description: + * This header file describes the VF-PF communication protocol used + * by the various i40e drivers. + * + * Admin queue buffer usage: + * desc->opcode is always i40e_aqc_opc_send_msg_to_pf + * flags, retval, datalen, and data addr are all used normally. + * Firmware copies the cookie fields when sending messages between the PF and + * VF, but uses all other fields internally. Due to this limitation, we + * must send all messages as "indirect", i.e. using an external buffer. + * + * All the vsi indexes are relative to the VF. Each VF can have maximum of + * three VSIs. All the queue indexes are relative to the VSI. Each VF can + * have a maximum of sixteen queues for all of its VSIs. + * + * The PF is required to return a status code in v_retval for all messages + * except RESET_VF, which does not require any response. The return value is of + * i40e_status_code type, defined in the i40e_type.h. + * + * In general, VF driver initialization should roughly follow the order of these + * opcodes. The VF driver must first validate the API version of the PF driver, + * then request a reset, then get resources, then configure queues and + * interrupts. After these operations are complete, the VF driver may start + * its queues, optionally add MAC and VLAN filters, and process traffic. + */ + +/* Opcodes for VF-PF communication. These are placed in the v_opcode field + * of the virtchnl_msg structure. + */ +enum i40e_virtchnl_ops { +/* The PF sends status change events to VFs using + * the I40E_VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. + */ + I40E_VIRTCHNL_OP_UNKNOWN = 0, + I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ + I40E_VIRTCHNL_OP_RESET_VF = 2, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3, + I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8, + I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9, + I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10, + I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11, + I40E_VIRTCHNL_OP_ADD_VLAN = 12, + I40E_VIRTCHNL_OP_DEL_VLAN = 13, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + I40E_VIRTCHNL_OP_GET_STATS = 15, + I40E_VIRTCHNL_OP_FCOE = 16, + I40E_VIRTCHNL_OP_EVENT = 17, + I40E_VIRTCHNL_OP_CONFIG_RSS = 18, +}; + +/* Virtual channel message descriptor. This overlays the admin queue + * descriptor. All other data is passed in external buffers. + */ + +struct i40e_virtchnl_msg { + u8 pad[8]; /* AQ flags/opcode/len/retval fields */ + enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ + i40e_status v_retval; /* ditto for desc->retval */ + u32 vfid; /* used by PF when sending to VF */ +}; + +/* Message descriptions and data structures.*/ + +/* I40E_VIRTCHNL_OP_VERSION + * VF posts its version number to the PF. PF responds with its version number + * in the same format, along with a return code. + * Reply from PF has its major/minor versions also in param0 and param1. + * If there is a major version mismatch, then the VF cannot operate. + * If there is a minor version mismatch, then the VF can operate but should + * add a warning to the system log. + * + * This enum element MUST always be specified as == 1, regardless of other + * changes in the API. The PF must always respond to this message without + * error regardless of version mismatch. + */ +#define I40E_VIRTCHNL_VERSION_MAJOR 1 +#define I40E_VIRTCHNL_VERSION_MINOR 0 +struct i40e_virtchnl_version_info { + u32 major; + u32 minor; +}; + +/* I40E_VIRTCHNL_OP_RESET_VF + * VF sends this request to PF with no parameters + * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register + * until reset completion is indicated. The admin queue must be reinitialized + * after this operation. + * + * When reset is complete, PF must ensure that all queues in all VSIs associated + * with the VF are stopped, all queue configurations in the HMC are set to 0, + * and all MAC and VLAN filters (except the default MAC address) on all VSIs + * are cleared. + */ + +/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES + * VF sends this request to PF with no parameters + * PF responds with an indirect message containing + * i40e_virtchnl_vf_resource and one or more + * i40e_virtchnl_vsi_resource structures. + */ + +struct i40e_virtchnl_vsi_resource { + u16 vsi_id; + u16 num_queue_pairs; + enum i40e_vsi_type vsi_type; + u16 qset_handle; + u8 default_mac_addr[ETH_ALEN]; +}; +/* VF offload flags */ +#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 +#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 + +struct i40e_virtchnl_vf_resource { + u16 num_vsis; + u16 num_queue_pairs; + u16 max_vectors; + u16 max_mtu; + + u32 vf_offload_flags; + u32 max_fcoe_contexts; + u32 max_fcoe_filters; + + struct i40e_virtchnl_vsi_resource vsi_res[1]; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE + * VF sends this message to set up parameters for one TX queue. + * External data buffer contains one instance of i40e_virtchnl_txq_info. + * PF configures requested queue and returns a status code. + */ + +/* Tx queue config info */ +struct i40e_virtchnl_txq_info { + u16 vsi_id; + u16 queue_id; + u16 ring_len; /* number of descriptors, multiple of 8 */ + u16 headwb_enabled; + u64 dma_ring_addr; + u64 dma_headwb_addr; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE + * VF sends this message to set up parameters for one RX queue. + * External data buffer contains one instance of i40e_virtchnl_rxq_info. + * PF configures requested queue and returns a status code. + */ + +/* Rx queue config info */ +struct i40e_virtchnl_rxq_info { + u16 vsi_id; + u16 queue_id; + u32 ring_len; /* number of descriptors, multiple of 32 */ + u16 hdr_size; + u16 splithdr_enabled; + u32 databuffer_size; + u32 max_pkt_size; + u64 dma_ring_addr; + enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES + * VF sends this message to set parameters for all active TX and RX queues + * associated with the specified VSI. + * PF configures queues and returns status. + * If the number of queues specified is greater than the number of queues + * associated with the VSI, an error is returned and no queues are configured. + */ +struct i40e_virtchnl_queue_pair_info { + /* NOTE: vsi_id and queue_id should be identical for both queues. */ + struct i40e_virtchnl_txq_info txq; + struct i40e_virtchnl_rxq_info rxq; +}; + +struct i40e_virtchnl_vsi_queue_config_info { + u16 vsi_id; + u16 num_queue_pairs; + struct i40e_virtchnl_queue_pair_info qpair[1]; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP + * VF uses this message to map vectors to queues. + * The rxq_map and txq_map fields are bitmaps used to indicate which queues + * are to be associated with the specified vector. + * The "other" causes are always mapped to vector 0. + * PF configures interrupt mapping and returns status. + */ +struct i40e_virtchnl_vector_map { + u16 vsi_id; + u16 vector_id; + u16 rxq_map; + u16 txq_map; + u16 rxitr_idx; + u16 txitr_idx; +}; + +struct i40e_virtchnl_irq_map_info { + u16 num_vectors; + struct i40e_virtchnl_vector_map vecmap[1]; +}; + +/* I40E_VIRTCHNL_OP_ENABLE_QUEUES + * I40E_VIRTCHNL_OP_DISABLE_QUEUES + * VF sends these message to enable or disable TX/RX queue pairs. + * The queues fields are bitmaps indicating which queues to act upon. + * (Currently, we only support 16 queues per VF, but we make the field + * u32 to allow for expansion.) + * PF performs requested action and returns status. + */ +struct i40e_virtchnl_queue_select { + u16 vsi_id; + u16 pad; + u32 rx_queues; + u32 tx_queues; +}; + +/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS + * VF sends this message in order to add one or more unicast or multicast + * address filters for the specified VSI. + * PF adds the filters and returns status. + */ + +/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS + * VF sends this message in order to remove one or more unicast or multicast + * filters for the specified VSI. + * PF removes the filters and returns status. + */ + +struct i40e_virtchnl_ether_addr { + u8 addr[ETH_ALEN]; + u8 pad[2]; +}; + +struct i40e_virtchnl_ether_addr_list { + u16 vsi_id; + u16 num_elements; + struct i40e_virtchnl_ether_addr list[1]; +}; + +/* I40E_VIRTCHNL_OP_ADD_VLAN + * VF sends this message to add one or more VLAN tag filters for receives. + * PF adds the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +/* I40E_VIRTCHNL_OP_DEL_VLAN + * VF sends this message to remove one or more VLAN tag filters for receives. + * PF removes the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +struct i40e_virtchnl_vlan_filter_list { + u16 vsi_id; + u16 num_elements; + u16 vlan_id[1]; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE + * VF sends VSI id and flags. + * PF returns status code in retval. + * Note: we assume that broadcast accept mode is always enabled. + */ +struct i40e_virtchnl_promisc_info { + u16 vsi_id; + u16 flags; +}; + +#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001 +#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002 + +/* I40E_VIRTCHNL_OP_GET_STATS + * VF sends this message to request stats for the selected VSI. VF uses + * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id + * field is ignored by the PF. + * + * PF replies with struct i40e_eth_stats in an external buffer. + */ + +/* I40E_VIRTCHNL_OP_EVENT + * PF sends this message to inform the VF driver of events that may affect it. + * No direct response is expected from the VF, though it may generate other + * messages in response to this one. + */ +enum i40e_virtchnl_event_codes { + I40E_VIRTCHNL_EVENT_UNKNOWN = 0, + I40E_VIRTCHNL_EVENT_LINK_CHANGE, + I40E_VIRTCHNL_EVENT_RESET_IMPENDING, + I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE, +}; +#define I40E_PF_EVENT_SEVERITY_INFO 0 +#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255 + +struct i40e_virtchnl_pf_event { + enum i40e_virtchnl_event_codes event; + union { + struct { + enum i40e_aq_link_speed link_speed; + bool link_status; + } link_event; + } event_data; + + int severity; +}; + +/* VF reset states - these are written into the RSTAT register: + * I40E_VFGEN_RSTAT1 on the PF + * I40E_VFGEN_RSTAT on the VF + * When the PF initiates a reset, it writes 0 + * When the reset is complete, it writes 1 + * When the PF detects that the VF has recovered, it writes 2 + * VF checks this register periodically to determine if a reset has occurred, + * then polls it to know when the reset is complete. + * If either the PF or VF reads the register while the hardware + * is in a reset state, it will return DEADBEEF, which, when masked + * will result in 3. + */ +enum i40e_vfr_states { + I40E_VFR_INPROGRESS = 0, + I40E_VFR_COMPLETED, + I40E_VFR_VFACTIVE, + I40E_VFR_UNKNOWN, +}; + +#endif /* _I40E_VIRTCHNL_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c new file mode 100644 index 000000000..4e9376da0 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -0,0 +1,2402 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e.h" + +/*********************notification routines***********************/ + +/** + * i40e_vc_vf_broadcast + * @pf: pointer to the PF structure + * @opcode: operation code + * @retval: return value + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * send a message to all VFs on a given PF + **/ +static void i40e_vc_vf_broadcast(struct i40e_pf *pf, + enum i40e_virtchnl_ops v_opcode, + i40e_status v_retval, u8 *msg, + u16 msglen) +{ + struct i40e_hw *hw = &pf->hw; + struct i40e_vf *vf = pf->vf; + int i; + + for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { + int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + /* Not all vfs are enabled so skip the ones that are not */ + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && + !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) + continue; + + /* Ignore return value on purpose - a given VF may fail, but + * we need to keep going and send to all of them + */ + i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, + msg, msglen, NULL); + } +} + +/** + * i40e_vc_notify_link_state + * @vf: pointer to the VF structure + * + * send a link status message to a single VF + **/ +static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) +{ + struct i40e_virtchnl_pf_event pfe; + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + struct i40e_link_status *ls = &pf->hw.phy.link_info; + int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + + pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; + pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; + if (vf->link_forced) { + pfe.event_data.link_event.link_status = vf->link_up; + pfe.event_data.link_event.link_speed = + (vf->link_up ? I40E_LINK_SPEED_40GB : 0); + } else { + pfe.event_data.link_event.link_status = + ls->link_info & I40E_AQ_LINK_UP; + pfe.event_data.link_event.link_speed = ls->link_speed; + } + i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, + 0, (u8 *)&pfe, sizeof(pfe), NULL); +} + +/** + * i40e_vc_notify_link_state + * @pf: pointer to the PF structure + * + * send a link status message to all VFs on a given PF + **/ +void i40e_vc_notify_link_state(struct i40e_pf *pf) +{ + int i; + + for (i = 0; i < pf->num_alloc_vfs; i++) + i40e_vc_notify_vf_link_state(&pf->vf[i]); +} + +/** + * i40e_vc_notify_reset + * @pf: pointer to the PF structure + * + * indicate a pending reset to all VFs on a given PF + **/ +void i40e_vc_notify_reset(struct i40e_pf *pf) +{ + struct i40e_virtchnl_pf_event pfe; + + pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; + pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; + i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, 0, + (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); +} + +/** + * i40e_vc_notify_vf_reset + * @vf: pointer to the VF structure + * + * indicate a pending reset to the given VF + **/ +void i40e_vc_notify_vf_reset(struct i40e_vf *vf) +{ + struct i40e_virtchnl_pf_event pfe; + int abs_vf_id; + + /* validate the request */ + if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) + return; + + /* verify if the VF is in either init or active before proceeding */ + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && + !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) + return; + + abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; + + pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; + pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; + i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, + 0, (u8 *)&pfe, + sizeof(struct i40e_virtchnl_pf_event), NULL); +} +/***********************misc routines*****************************/ + +/** + * i40e_vc_disable_vf + * @pf: pointer to the PF info + * @vf: pointer to the VF info + * + * Disable the VF through a SW reset + **/ +static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) +{ + struct i40e_hw *hw = &pf->hw; + u32 reg; + + reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); + reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; + wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); + i40e_flush(hw); +} + +/** + * i40e_vc_isvalid_vsi_id + * @vf: pointer to the VF info + * @vsi_id: VF relative VSI id + * + * check for the valid VSI id + **/ +static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); + + return (vsi && (vsi->vf_id == vf->vf_id)); +} + +/** + * i40e_vc_isvalid_queue_id + * @vf: pointer to the VF info + * @vsi_id: vsi id + * @qid: vsi relative queue id + * + * check for the valid queue id + **/ +static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, + u8 qid) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); + + return (vsi && (qid < vsi->alloc_queue_pairs)); +} + +/** + * i40e_vc_isvalid_vector_id + * @vf: pointer to the VF info + * @vector_id: VF relative vector id + * + * check for the valid vector id + **/ +static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) +{ + struct i40e_pf *pf = vf->pf; + + return vector_id < pf->hw.func_caps.num_msix_vectors_vf; +} + +/***********************vf resource mgmt routines*****************/ + +/** + * i40e_vc_get_pf_queue_id + * @vf: pointer to the VF info + * @vsi_id: id of VSI as provided by the FW + * @vsi_queue_id: vsi relative queue id + * + * return PF relative queue id + **/ +static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, + u8 vsi_queue_id) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); + u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; + + if (!vsi) + return pf_queue_id; + + if (le16_to_cpu(vsi->info.mapping_flags) & + I40E_AQ_VSI_QUE_MAP_NONCONTIG) + pf_queue_id = + le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]); + else + pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) + + vsi_queue_id; + + return pf_queue_id; +} + +/** + * i40e_config_irq_link_list + * @vf: pointer to the VF info + * @vsi_id: id of VSI as given by the FW + * @vecmap: irq map info + * + * configure irq link list from the map + **/ +static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, + struct i40e_virtchnl_vector_map *vecmap) +{ + unsigned long linklistmap = 0, tempmap; + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + u16 vsi_queue_id, pf_queue_id; + enum i40e_queue_type qtype; + u16 next_q, vector_id; + u32 reg, reg_idx; + u16 itr_idx = 0; + + vector_id = vecmap->vector_id; + /* setup the head */ + if (0 == vector_id) + reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); + else + reg_idx = I40E_VPINT_LNKLSTN( + ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + + (vector_id - 1)); + + if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { + /* Special case - No queues mapped on this vector */ + wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK); + goto irq_list_done; + } + tempmap = vecmap->rxq_map; + for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { + linklistmap |= (1 << + (I40E_VIRTCHNL_SUPPORTED_QTYPES * + vsi_queue_id)); + } + + tempmap = vecmap->txq_map; + for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { + linklistmap |= (1 << + (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id + + 1)); + } + + next_q = find_first_bit(&linklistmap, + (I40E_MAX_VSI_QP * + I40E_VIRTCHNL_SUPPORTED_QTYPES)); + vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES; + qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES; + pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); + reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); + + wr32(hw, reg_idx, reg); + + while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { + switch (qtype) { + case I40E_QUEUE_TYPE_RX: + reg_idx = I40E_QINT_RQCTL(pf_queue_id); + itr_idx = vecmap->rxitr_idx; + break; + case I40E_QUEUE_TYPE_TX: + reg_idx = I40E_QINT_TQCTL(pf_queue_id); + itr_idx = vecmap->txitr_idx; + break; + default: + break; + } + + next_q = find_next_bit(&linklistmap, + (I40E_MAX_VSI_QP * + I40E_VIRTCHNL_SUPPORTED_QTYPES), + next_q + 1); + if (next_q < + (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { + vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; + qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; + pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, + vsi_queue_id); + } else { + pf_queue_id = I40E_QUEUE_END_OF_LIST; + qtype = 0; + } + + /* format for the RQCTL & TQCTL regs is same */ + reg = (vector_id) | + (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | + (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | + (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) | + (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT); + wr32(hw, reg_idx, reg); + } + +irq_list_done: + i40e_flush(hw); +} + +/** + * i40e_config_vsi_tx_queue + * @vf: pointer to the VF info + * @vsi_id: id of VSI as provided by the FW + * @vsi_queue_id: vsi relative queue index + * @info: config. info + * + * configure tx queue + **/ +static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, + u16 vsi_queue_id, + struct i40e_virtchnl_txq_info *info) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + struct i40e_hmc_obj_txq tx_ctx; + struct i40e_vsi *vsi; + u16 pf_queue_id; + u32 qtx_ctl; + int ret = 0; + + pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); + vsi = i40e_find_vsi_from_id(pf, vsi_id); + + /* clear the context structure first */ + memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); + + /* only set the required fields */ + tx_ctx.base = info->dma_ring_addr / 128; + tx_ctx.qlen = info->ring_len; + tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); + tx_ctx.rdylist_act = 0; + tx_ctx.head_wb_ena = info->headwb_enabled; + tx_ctx.head_wb_addr = info->dma_headwb_addr; + + /* clear the context in the HMC */ + ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); + if (ret) { + dev_err(&pf->pdev->dev, + "Failed to clear VF LAN Tx queue context %d, error: %d\n", + pf_queue_id, ret); + ret = -ENOENT; + goto error_context; + } + + /* set the context in the HMC */ + ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx); + if (ret) { + dev_err(&pf->pdev->dev, + "Failed to set VF LAN Tx queue context %d error: %d\n", + pf_queue_id, ret); + ret = -ENOENT; + goto error_context; + } + + /* associate this queue with the PCI VF function */ + qtx_ctl = I40E_QTX_CTL_VF_QUEUE; + qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) + & I40E_QTX_CTL_PF_INDX_MASK); + qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) + << I40E_QTX_CTL_VFVM_INDX_SHIFT) + & I40E_QTX_CTL_VFVM_INDX_MASK); + wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl); + i40e_flush(hw); + +error_context: + return ret; +} + +/** + * i40e_config_vsi_rx_queue + * @vf: pointer to the VF info + * @vsi_id: id of VSI as provided by the FW + * @vsi_queue_id: vsi relative queue index + * @info: config. info + * + * configure rx queue + **/ +static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, + u16 vsi_queue_id, + struct i40e_virtchnl_rxq_info *info) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + struct i40e_hmc_obj_rxq rx_ctx; + u16 pf_queue_id; + int ret = 0; + + pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); + + /* clear the context structure first */ + memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); + + /* only set the required fields */ + rx_ctx.base = info->dma_ring_addr / 128; + rx_ctx.qlen = info->ring_len; + + if (info->splithdr_enabled) { + rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | + I40E_RX_SPLIT_IP | + I40E_RX_SPLIT_TCP_UDP | + I40E_RX_SPLIT_SCTP; + /* header length validation */ + if (info->hdr_size > ((2 * 1024) - 64)) { + ret = -EINVAL; + goto error_param; + } + rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; + + /* set splitalways mode 10b */ + rx_ctx.dtype = 0x2; + } + + /* databuffer length validation */ + if (info->databuffer_size > ((16 * 1024) - 128)) { + ret = -EINVAL; + goto error_param; + } + rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; + + /* max pkt. length validation */ + if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) { + ret = -EINVAL; + goto error_param; + } + rx_ctx.rxmax = info->max_pkt_size; + + /* enable 32bytes desc always */ + rx_ctx.dsize = 1; + + /* default values */ + rx_ctx.lrxqthresh = 2; + rx_ctx.crcstrip = 1; + rx_ctx.prefena = 1; + rx_ctx.l2tsel = 1; + + /* clear the context in the HMC */ + ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); + if (ret) { + dev_err(&pf->pdev->dev, + "Failed to clear VF LAN Rx queue context %d, error: %d\n", + pf_queue_id, ret); + ret = -ENOENT; + goto error_param; + } + + /* set the context in the HMC */ + ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); + if (ret) { + dev_err(&pf->pdev->dev, + "Failed to set VF LAN Rx queue context %d error: %d\n", + pf_queue_id, ret); + ret = -ENOENT; + goto error_param; + } + +error_param: + return ret; +} + +/** + * i40e_alloc_vsi_res + * @vf: pointer to the VF info + * @type: type of VSI to allocate + * + * alloc VF vsi context & resources + **/ +static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) +{ + struct i40e_mac_filter *f = NULL; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi; + int ret = 0; + + vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id); + + if (!vsi) { + dev_err(&pf->pdev->dev, + "add vsi failed for VF %d, aq_err %d\n", + vf->vf_id, pf->hw.aq.asq_last_status); + ret = -ENOENT; + goto error_alloc_vsi_res; + } + if (type == I40E_VSI_SRIOV) { + u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + vf->lan_vsi_idx = vsi->idx; + vf->lan_vsi_id = vsi->id; + /* If the port VLAN has been configured and then the + * VF driver was removed then the VSI port VLAN + * configuration was destroyed. Check if there is + * a port VLAN and restore the VSI configuration if + * needed. + */ + if (vf->port_vlan_id) + i40e_vsi_add_pvid(vsi, vf->port_vlan_id); + f = i40e_add_filter(vsi, vf->default_lan_addr.addr, + vf->port_vlan_id, true, false); + if (!f) + dev_info(&pf->pdev->dev, + "Could not allocate VF MAC addr\n"); + f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id, + true, false); + if (!f) + dev_info(&pf->pdev->dev, + "Could not allocate VF broadcast filter\n"); + } + + /* program mac filter */ + ret = i40e_sync_vsi_filters(vsi); + if (ret) + dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); + + /* Set VF bandwidth if specified */ + if (vf->tx_rate) { + ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, + vf->tx_rate / 50, 0, NULL); + if (ret) + dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", + vf->vf_id, ret); + } + +error_alloc_vsi_res: + return ret; +} + +/** + * i40e_enable_vf_mappings + * @vf: pointer to the VF info + * + * enable VF mappings + **/ +static void i40e_enable_vf_mappings(struct i40e_vf *vf) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + u32 reg, total_queue_pairs = 0; + int j; + + /* Tell the hardware we're using noncontiguous mapping. HW requires + * that VF queues be mapped using this method, even when they are + * contiguous in real life + */ + wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), + I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); + + /* enable VF vplan_qtable mappings */ + reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; + wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); + + /* map PF queues to VF queues */ + for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) { + u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j); + reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); + wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); + total_queue_pairs++; + } + + /* map PF queues to VSI */ + for (j = 0; j < 7; j++) { + if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) { + reg = 0x07FF07FF; /* unused */ + } else { + u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, + j * 2); + reg = qid; + qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, + (j * 2) + 1); + reg |= qid << 16; + } + wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg); + } + + i40e_flush(hw); +} + +/** + * i40e_disable_vf_mappings + * @vf: pointer to the VF info + * + * disable VF mappings + **/ +static void i40e_disable_vf_mappings(struct i40e_vf *vf) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + int i; + + /* disable qp mappings */ + wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0); + for (i = 0; i < I40E_MAX_VSI_QP; i++) + wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id), + I40E_QUEUE_END_OF_LIST); + i40e_flush(hw); +} + +/** + * i40e_free_vf_res + * @vf: pointer to the VF info + * + * free VF resources + **/ +static void i40e_free_vf_res(struct i40e_vf *vf) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + u32 reg_idx, reg; + int i, msix_vf; + + /* free vsi & disconnect it from the parent uplink */ + if (vf->lan_vsi_idx) { + i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); + vf->lan_vsi_idx = 0; + vf->lan_vsi_id = 0; + } + msix_vf = pf->hw.func_caps.num_msix_vectors_vf; + + /* disable interrupts so the VF starts in a known state */ + for (i = 0; i < msix_vf; i++) { + /* format is same for both registers */ + if (0 == i) + reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); + else + reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * + (vf->vf_id)) + + (i - 1)); + wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); + i40e_flush(hw); + } + + /* clear the irq settings */ + for (i = 0; i < msix_vf; i++) { + /* format is same for both registers */ + if (0 == i) + reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); + else + reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * + (vf->vf_id)) + + (i - 1)); + reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | + I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); + wr32(hw, reg_idx, reg); + i40e_flush(hw); + } + /* reset some of the state varibles keeping + * track of the resources + */ + vf->num_queue_pairs = 0; + vf->vf_states = 0; +} + +/** + * i40e_alloc_vf_res + * @vf: pointer to the VF info + * + * allocate VF resources + **/ +static int i40e_alloc_vf_res(struct i40e_vf *vf) +{ + struct i40e_pf *pf = vf->pf; + int total_queue_pairs = 0; + int ret; + + /* allocate hw vsi context & associated resources */ + ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); + if (ret) + goto error_alloc; + total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; + set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); + + /* store the total qps number for the runtime + * VF req validation + */ + vf->num_queue_pairs = total_queue_pairs; + + /* VF is now completely initialized */ + set_bit(I40E_VF_STAT_INIT, &vf->vf_states); + +error_alloc: + if (ret) + i40e_free_vf_res(vf); + + return ret; +} + +#define VF_DEVICE_STATUS 0xAA +#define VF_TRANS_PENDING_MASK 0x20 +/** + * i40e_quiesce_vf_pci + * @vf: pointer to the VF structure + * + * Wait for VF PCI transactions to be cleared after reset. Returns -EIO + * if the transactions never clear. + **/ +static int i40e_quiesce_vf_pci(struct i40e_vf *vf) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + int vf_abs_id, i; + u32 reg; + + vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; + + wr32(hw, I40E_PF_PCI_CIAA, + VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); + for (i = 0; i < 100; i++) { + reg = rd32(hw, I40E_PF_PCI_CIAD); + if ((reg & VF_TRANS_PENDING_MASK) == 0) + return 0; + udelay(1); + } + return -EIO; +} + +/** + * i40e_reset_vf + * @vf: pointer to the VF structure + * @flr: VFLR was issued or not + * + * reset the VF + **/ +void i40e_reset_vf(struct i40e_vf *vf, bool flr) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + bool rsd = false; + int i; + u32 reg; + + if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) + return; + + /* warn the VF */ + clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); + + /* In the case of a VFLR, the HW has already reset the VF and we + * just need to clean up, so don't hit the VFRTRIG register. + */ + if (!flr) { + /* reset VF using VPGEN_VFRTRIG reg */ + reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); + reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; + wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); + i40e_flush(hw); + } + + if (i40e_quiesce_vf_pci(vf)) + dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", + vf->vf_id); + + /* poll VPGEN_VFRSTAT reg to make sure + * that reset is complete + */ + for (i = 0; i < 10; i++) { + /* VF reset requires driver to first reset the VF and then + * poll the status register to make sure that the reset + * completed successfully. Due to internal HW FIFO flushes, + * we must wait 10ms before the register will be valid. + */ + usleep_range(10000, 20000); + reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); + if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { + rsd = true; + break; + } + } + + if (flr) + usleep_range(10000, 20000); + + if (!rsd) + dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", + vf->vf_id); + wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED); + /* clear the reset bit in the VPGEN_VFRTRIG reg */ + reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); + reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; + wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); + + /* On initial reset, we won't have any queues */ + if (vf->lan_vsi_idx == 0) + goto complete_reset; + + i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false); +complete_reset: + /* reallocate VF resources to reset the VSI state */ + i40e_free_vf_res(vf); + i40e_alloc_vf_res(vf); + i40e_enable_vf_mappings(vf); + set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); + + /* tell the VF the reset is done */ + wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); + i40e_flush(hw); + clear_bit(__I40E_VF_DISABLE, &pf->state); +} + +/** + * i40e_free_vfs + * @pf: pointer to the PF structure + * + * free VF resources + **/ +void i40e_free_vfs(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + u32 reg_idx, bit_idx; + int i, tmp, vf_id; + + if (!pf->vf) + return; + while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) + usleep_range(1000, 2000); + + for (i = 0; i < pf->num_alloc_vfs; i++) + if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) + i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx], + false); + + /* Disable IOV before freeing resources. This lets any VF drivers + * running in the host get themselves cleaned up before we yank + * the carpet out from underneath their feet. + */ + if (!pci_vfs_assigned(pf->pdev)) + pci_disable_sriov(pf->pdev); + else + dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); + + msleep(20); /* let any messages in transit get finished up */ + + /* free up VF resources */ + tmp = pf->num_alloc_vfs; + pf->num_alloc_vfs = 0; + for (i = 0; i < tmp; i++) { + if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) + i40e_free_vf_res(&pf->vf[i]); + /* disable qp mappings */ + i40e_disable_vf_mappings(&pf->vf[i]); + } + + kfree(pf->vf); + pf->vf = NULL; + + /* This check is for when the driver is unloaded while VFs are + * assigned. Setting the number of VFs to 0 through sysfs is caught + * before this function ever gets called. + */ + if (!pci_vfs_assigned(pf->pdev)) { + /* Acknowledge VFLR for all VFS. Without this, VFs will fail to + * work correctly when SR-IOV gets re-enabled. + */ + for (vf_id = 0; vf_id < tmp; vf_id++) { + reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; + wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); + } + } + clear_bit(__I40E_VF_DISABLE, &pf->state); +} + +#ifdef CONFIG_PCI_IOV +/** + * i40e_alloc_vfs + * @pf: pointer to the PF structure + * @num_alloc_vfs: number of VFs to allocate + * + * allocate VF resources + **/ +int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) +{ + struct i40e_vf *vfs; + int i, ret = 0; + + /* Disable interrupt 0 so we don't try to handle the VFLR. */ + i40e_irq_dynamic_disable_icr0(pf); + + /* Check to see if we're just allocating resources for extant VFs */ + if (pci_num_vf(pf->pdev) != num_alloc_vfs) { + ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); + if (ret) { + dev_err(&pf->pdev->dev, + "Failed to enable SR-IOV, error %d.\n", ret); + pf->num_alloc_vfs = 0; + goto err_iov; + } + } + /* allocate memory */ + vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); + if (!vfs) { + ret = -ENOMEM; + goto err_alloc; + } + pf->vf = vfs; + + /* apply default profile */ + for (i = 0; i < num_alloc_vfs; i++) { + vfs[i].pf = pf; + vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB; + vfs[i].vf_id = i; + + /* assign default capabilities */ + set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); + vfs[i].spoofchk = true; + /* VF resources get allocated during reset */ + i40e_reset_vf(&vfs[i], false); + + /* enable VF vplan_qtable mappings */ + i40e_enable_vf_mappings(&vfs[i]); + } + pf->num_alloc_vfs = num_alloc_vfs; + +err_alloc: + if (ret) + i40e_free_vfs(pf); +err_iov: + /* Re-enable interrupt 0. */ + i40e_irq_dynamic_enable_icr0(pf); + return ret; +} + +#endif +/** + * i40e_pci_sriov_enable + * @pdev: pointer to a pci_dev structure + * @num_vfs: number of VFs to allocate + * + * Enable or change the number of VFs + **/ +static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + struct i40e_pf *pf = pci_get_drvdata(pdev); + int pre_existing_vfs = pci_num_vf(pdev); + int err = 0; + + dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + i40e_free_vfs(pf); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + goto out; + + if (num_vfs > pf->num_req_vfs) { + err = -EPERM; + goto err_out; + } + + err = i40e_alloc_vfs(pf, num_vfs); + if (err) { + dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err); + goto err_out; + } + +out: + return num_vfs; + +err_out: + return err; +#endif + return 0; +} + +/** + * i40e_pci_sriov_configure + * @pdev: pointer to a pci_dev structure + * @num_vfs: number of VFs to allocate + * + * Enable or change the number of VFs. Called when the user updates the number + * of VFs in sysfs. + **/ +int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + + if (num_vfs) { + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset_safe(pf, + BIT_ULL(__I40E_PF_RESET_REQUESTED)); + } + return i40e_pci_sriov_enable(pdev, num_vfs); + } + + if (!pci_vfs_assigned(pf->pdev)) { + i40e_free_vfs(pf); + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); + } else { + dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); + return -EINVAL; + } + return 0; +} + +/***********************virtual channel routines******************/ + +/** + * i40e_vc_send_msg_to_vf + * @vf: pointer to the VF info + * @v_opcode: virtual channel opcode + * @v_retval: virtual channel return value + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * send msg to VF + **/ +static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen) +{ + struct i40e_pf *pf; + struct i40e_hw *hw; + int abs_vf_id; + i40e_status aq_ret; + + /* validate the request */ + if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) + return -EINVAL; + + pf = vf->pf; + hw = &pf->hw; + abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + + /* single place to detect unsuccessful return values */ + if (v_retval) { + vf->num_invalid_msgs++; + dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n", + v_opcode, v_retval); + if (vf->num_invalid_msgs > + I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { + dev_err(&pf->pdev->dev, + "Number of invalid messages exceeded for VF %d\n", + vf->vf_id); + dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); + set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); + } + } else { + vf->num_valid_msgs++; + } + + aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, + msg, msglen, NULL); + if (aq_ret) { + dev_err(&pf->pdev->dev, + "Unable to send the message to VF %d aq_err %d\n", + vf->vf_id, pf->hw.aq.asq_last_status); + return -EIO; + } + + return 0; +} + +/** + * i40e_vc_send_resp_to_vf + * @vf: pointer to the VF info + * @opcode: operation code + * @retval: return value + * + * send resp msg to VF + **/ +static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, + enum i40e_virtchnl_ops opcode, + i40e_status retval) +{ + return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); +} + +/** + * i40e_vc_get_version_msg + * @vf: pointer to the VF info + * + * called from the VF to request the API version used by the PF + **/ +static int i40e_vc_get_version_msg(struct i40e_vf *vf) +{ + struct i40e_virtchnl_version_info info = { + I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR + }; + + return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, + I40E_SUCCESS, (u8 *)&info, + sizeof(struct + i40e_virtchnl_version_info)); +} + +/** + * i40e_vc_get_vf_resources_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * called from the VF to request its resources + **/ +static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) +{ + struct i40e_virtchnl_vf_resource *vfres = NULL; + struct i40e_pf *pf = vf->pf; + i40e_status aq_ret = 0; + struct i40e_vsi *vsi; + int i = 0, len = 0; + int num_vsis = 1; + int ret; + + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto err; + } + + len = (sizeof(struct i40e_virtchnl_vf_resource) + + sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis); + + vfres = kzalloc(len, GFP_KERNEL); + if (!vfres) { + aq_ret = I40E_ERR_NO_MEMORY; + len = 0; + goto err; + } + + vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi->info.pvid) + vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; + + vfres->num_vsis = num_vsis; + vfres->num_queue_pairs = vf->num_queue_pairs; + vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; + if (vf->lan_vsi_idx) { + vfres->vsi_res[i].vsi_id = vf->lan_vsi_id; + vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; + vfres->vsi_res[i].num_queue_pairs = + pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; + memcpy(vfres->vsi_res[i].default_mac_addr, + vf->default_lan_addr.addr, ETH_ALEN); + i++; + } + set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); + +err: + /* send the response back to the VF */ + ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + aq_ret, (u8 *)vfres, len); + + kfree(vfres); + return ret; +} + +/** + * i40e_vc_reset_vf_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * called from the VF to reset itself, + * unlike other virtchnl messages, PF driver + * doesn't send the response back to the VF + **/ +static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) +{ + if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) + i40e_reset_vf(vf, false); +} + +/** + * i40e_vc_config_promiscuous_mode_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * called from the VF to configure the promiscuous mode of + * VF vsis + **/ +static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, + u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_promisc_info *info = + (struct i40e_virtchnl_promisc_info *)msg; + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + struct i40e_vsi *vsi; + bool allmulti = false; + i40e_status aq_ret; + + vsi = i40e_find_vsi_from_id(pf, info->vsi_id); + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || + !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || + (vsi->type != I40E_VSI_FCOE)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) + allmulti = true; + aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, + allmulti, NULL); + +error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, + aq_ret); +} + +/** + * i40e_vc_config_queues_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * called from the VF to configure the rx/tx + * queues + **/ +static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_vsi_queue_config_info *qci = + (struct i40e_virtchnl_vsi_queue_config_info *)msg; + struct i40e_virtchnl_queue_pair_info *qpi; + struct i40e_pf *pf = vf->pf; + u16 vsi_id, vsi_queue_id; + i40e_status aq_ret = 0; + int i; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + vsi_id = qci->vsi_id; + if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + for (i = 0; i < qci->num_queue_pairs; i++) { + qpi = &qci->qpair[i]; + vsi_queue_id = qpi->txq.queue_id; + if ((qpi->txq.vsi_id != vsi_id) || + (qpi->rxq.vsi_id != vsi_id) || + (qpi->rxq.queue_id != vsi_queue_id) || + !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id, + &qpi->rxq) || + i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id, + &qpi->txq)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + } + /* set vsi num_queue_pairs in use to num configured by VF */ + pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs; + +error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, + aq_ret); +} + +/** + * i40e_vc_config_irq_map_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * called from the VF to configure the irq to + * queue map + **/ +static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_irq_map_info *irqmap_info = + (struct i40e_virtchnl_irq_map_info *)msg; + struct i40e_virtchnl_vector_map *map; + u16 vsi_id, vsi_queue_id, vector_id; + i40e_status aq_ret = 0; + unsigned long tempmap; + int i; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < irqmap_info->num_vectors; i++) { + map = &irqmap_info->vecmap[i]; + + vector_id = map->vector_id; + vsi_id = map->vsi_id; + /* validate msg params */ + if (!i40e_vc_isvalid_vector_id(vf, vector_id) || + !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + /* lookout for the invalid queue index */ + tempmap = map->rxq_map; + for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { + if (!i40e_vc_isvalid_queue_id(vf, vsi_id, + vsi_queue_id)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + } + + tempmap = map->txq_map; + for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { + if (!i40e_vc_isvalid_queue_id(vf, vsi_id, + vsi_queue_id)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + } + + i40e_config_irq_link_list(vf, vsi_id, map); + } +error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, + aq_ret); +} + +/** + * i40e_vc_enable_queues_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * called from the VF to enable all or specific queue(s) + **/ +static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_queue_select *vqs = + (struct i40e_virtchnl_queue_select *)msg; + struct i40e_pf *pf = vf->pf; + u16 vsi_id = vqs->vsi_id; + i40e_status aq_ret = 0; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true)) + aq_ret = I40E_ERR_TIMEOUT; +error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, + aq_ret); +} + +/** + * i40e_vc_disable_queues_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * called from the VF to disable all or specific + * queue(s) + **/ +static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_queue_select *vqs = + (struct i40e_virtchnl_queue_select *)msg; + struct i40e_pf *pf = vf->pf; + i40e_status aq_ret = 0; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false)) + aq_ret = I40E_ERR_TIMEOUT; + +error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, + aq_ret); +} + +/** + * i40e_vc_get_stats_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * called from the VF to get vsi stats + **/ +static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_queue_select *vqs = + (struct i40e_virtchnl_queue_select *)msg; + struct i40e_pf *pf = vf->pf; + struct i40e_eth_stats stats; + i40e_status aq_ret = 0; + struct i40e_vsi *vsi; + + memset(&stats, 0, sizeof(struct i40e_eth_stats)); + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + i40e_update_eth_stats(vsi); + stats = vsi->eth_stats; + +error_param: + /* send the response back to the VF */ + return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret, + (u8 *)&stats, sizeof(stats)); +} + +/** + * i40e_check_vf_permission + * @vf: pointer to the VF info + * @macaddr: pointer to the MAC Address being checked + * + * Check if the VF has permission to add or delete unicast MAC address + * filters and return error code -EPERM if not. Then check if the + * address filter requested is broadcast or zero and if so return + * an invalid MAC address error code. + **/ +static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) +{ + struct i40e_pf *pf = vf->pf; + int ret = 0; + + if (is_broadcast_ether_addr(macaddr) || + is_zero_ether_addr(macaddr)) { + dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr); + ret = I40E_ERR_INVALID_MAC_ADDR; + } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) && + !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) { + /* If the host VMM administrator has set the VF MAC address + * administratively via the ndo_set_vf_mac command then deny + * permission to the VF to add or delete unicast MAC addresses. + * The VF may request to set the MAC address filter already + * assigned to it so do not return an error in that case. + */ + dev_err(&pf->pdev->dev, + "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n"); + ret = -EPERM; + } + return ret; +} + +/** + * i40e_vc_add_mac_addr_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * add guest mac address filter + **/ +static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_ether_addr_list *al = + (struct i40e_virtchnl_ether_addr_list *)msg; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = NULL; + u16 vsi_id = al->vsi_id; + i40e_status ret = 0; + int i; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || + !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { + ret = I40E_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < al->num_elements; i++) { + ret = i40e_check_vf_permission(vf, al->list[i].addr); + if (ret) + goto error_param; + } + vsi = pf->vsi[vf->lan_vsi_idx]; + + /* add new addresses to the list */ + for (i = 0; i < al->num_elements; i++) { + struct i40e_mac_filter *f; + + f = i40e_find_mac(vsi, al->list[i].addr, true, false); + if (!f) { + if (i40e_is_vsi_in_vlan(vsi)) + f = i40e_put_mac_in_vlan(vsi, al->list[i].addr, + true, false); + else + f = i40e_add_filter(vsi, al->list[i].addr, -1, + true, false); + } + + if (!f) { + dev_err(&pf->pdev->dev, + "Unable to add VF MAC filter\n"); + ret = I40E_ERR_PARAM; + goto error_param; + } + } + + /* program the updated filter list */ + if (i40e_sync_vsi_filters(vsi)) + dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); + +error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, + ret); +} + +/** + * i40e_vc_del_mac_addr_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * remove guest mac address filter + **/ +static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_ether_addr_list *al = + (struct i40e_virtchnl_ether_addr_list *)msg; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = NULL; + u16 vsi_id = al->vsi_id; + i40e_status ret = 0; + int i; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || + !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { + ret = I40E_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < al->num_elements; i++) { + if (is_broadcast_ether_addr(al->list[i].addr) || + is_zero_ether_addr(al->list[i].addr)) { + dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", + al->list[i].addr); + ret = I40E_ERR_INVALID_MAC_ADDR; + goto error_param; + } + } + vsi = pf->vsi[vf->lan_vsi_idx]; + + /* delete addresses from the list */ + for (i = 0; i < al->num_elements; i++) + i40e_del_filter(vsi, al->list[i].addr, + I40E_VLAN_ANY, true, false); + + /* program the updated filter list */ + if (i40e_sync_vsi_filters(vsi)) + dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); + +error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, + ret); +} + +/** + * i40e_vc_add_vlan_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * program guest vlan id + **/ +static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_vlan_filter_list *vfl = + (struct i40e_virtchnl_vlan_filter_list *)msg; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = NULL; + u16 vsi_id = vfl->vsi_id; + i40e_status aq_ret = 0; + int i; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || + !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < vfl->num_elements; i++) { + if (vfl->vlan_id[i] > I40E_MAX_VLANID) { + aq_ret = I40E_ERR_PARAM; + dev_err(&pf->pdev->dev, + "invalid VF VLAN id %d\n", vfl->vlan_id[i]); + goto error_param; + } + } + vsi = pf->vsi[vf->lan_vsi_idx]; + if (vsi->info.pvid) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + i40e_vlan_stripping_enable(vsi); + for (i = 0; i < vfl->num_elements; i++) { + /* add new VLAN filter */ + int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); + if (ret) + dev_err(&pf->pdev->dev, + "Unable to add VF vlan filter %d, error %d\n", + vfl->vlan_id[i], ret); + } + +error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret); +} + +/** + * i40e_vc_remove_vlan_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * remove programmed guest vlan id + **/ +static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_vlan_filter_list *vfl = + (struct i40e_virtchnl_vlan_filter_list *)msg; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = NULL; + u16 vsi_id = vfl->vsi_id; + i40e_status aq_ret = 0; + int i; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || + !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < vfl->num_elements; i++) { + if (vfl->vlan_id[i] > I40E_MAX_VLANID) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + if (vsi->info.pvid) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + + for (i = 0; i < vfl->num_elements; i++) { + int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); + if (ret) + dev_err(&pf->pdev->dev, + "Unable to delete VF vlan filter %d, error %d\n", + vfl->vlan_id[i], ret); + } + +error_param: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret); +} + +/** + * i40e_vc_validate_vf_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * @msghndl: msg handle + * + * validate msg + **/ +static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen) +{ + bool err_msg_format = false; + int valid_len; + + /* Check if VF is disabled. */ + if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states)) + return I40E_ERR_PARAM; + + /* Validate message length. */ + switch (v_opcode) { + case I40E_VIRTCHNL_OP_VERSION: + valid_len = sizeof(struct i40e_virtchnl_version_info); + break; + case I40E_VIRTCHNL_OP_RESET_VF: + case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: + valid_len = 0; + break; + case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: + valid_len = sizeof(struct i40e_virtchnl_txq_info); + break; + case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: + valid_len = sizeof(struct i40e_virtchnl_rxq_info); + break; + case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: + valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info); + if (msglen >= valid_len) { + struct i40e_virtchnl_vsi_queue_config_info *vqc = + (struct i40e_virtchnl_vsi_queue_config_info *)msg; + valid_len += (vqc->num_queue_pairs * + sizeof(struct + i40e_virtchnl_queue_pair_info)); + if (vqc->num_queue_pairs == 0) + err_msg_format = true; + } + break; + case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: + valid_len = sizeof(struct i40e_virtchnl_irq_map_info); + if (msglen >= valid_len) { + struct i40e_virtchnl_irq_map_info *vimi = + (struct i40e_virtchnl_irq_map_info *)msg; + valid_len += (vimi->num_vectors * + sizeof(struct i40e_virtchnl_vector_map)); + if (vimi->num_vectors == 0) + err_msg_format = true; + } + break; + case I40E_VIRTCHNL_OP_ENABLE_QUEUES: + case I40E_VIRTCHNL_OP_DISABLE_QUEUES: + valid_len = sizeof(struct i40e_virtchnl_queue_select); + break; + case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: + case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: + valid_len = sizeof(struct i40e_virtchnl_ether_addr_list); + if (msglen >= valid_len) { + struct i40e_virtchnl_ether_addr_list *veal = + (struct i40e_virtchnl_ether_addr_list *)msg; + valid_len += veal->num_elements * + sizeof(struct i40e_virtchnl_ether_addr); + if (veal->num_elements == 0) + err_msg_format = true; + } + break; + case I40E_VIRTCHNL_OP_ADD_VLAN: + case I40E_VIRTCHNL_OP_DEL_VLAN: + valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list); + if (msglen >= valid_len) { + struct i40e_virtchnl_vlan_filter_list *vfl = + (struct i40e_virtchnl_vlan_filter_list *)msg; + valid_len += vfl->num_elements * sizeof(u16); + if (vfl->num_elements == 0) + err_msg_format = true; + } + break; + case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + valid_len = sizeof(struct i40e_virtchnl_promisc_info); + break; + case I40E_VIRTCHNL_OP_GET_STATS: + valid_len = sizeof(struct i40e_virtchnl_queue_select); + break; + /* These are always errors coming from the VF. */ + case I40E_VIRTCHNL_OP_EVENT: + case I40E_VIRTCHNL_OP_UNKNOWN: + default: + return -EPERM; + break; + } + /* few more checks */ + if ((valid_len != msglen) || (err_msg_format)) { + i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); + return -EINVAL; + } else { + return 0; + } +} + +/** + * i40e_vc_process_vf_msg + * @pf: pointer to the PF structure + * @vf_id: source VF id + * @msg: pointer to the msg buffer + * @msglen: msg length + * @msghndl: msg handle + * + * called from the common aeq/arq handler to + * process request from VF + **/ +int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen) +{ + struct i40e_hw *hw = &pf->hw; + unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id; + struct i40e_vf *vf; + int ret; + + pf->vf_aq_requests++; + if (local_vf_id >= pf->num_alloc_vfs) + return -EINVAL; + vf = &(pf->vf[local_vf_id]); + /* perform basic checks on the msg */ + ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen); + + if (ret) { + dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", + local_vf_id, v_opcode, msglen); + return ret; + } + + switch (v_opcode) { + case I40E_VIRTCHNL_OP_VERSION: + ret = i40e_vc_get_version_msg(vf); + break; + case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: + ret = i40e_vc_get_vf_resources_msg(vf); + break; + case I40E_VIRTCHNL_OP_RESET_VF: + i40e_vc_reset_vf_msg(vf); + ret = 0; + break; + case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: + ret = i40e_vc_config_queues_msg(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: + ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_ENABLE_QUEUES: + ret = i40e_vc_enable_queues_msg(vf, msg, msglen); + i40e_vc_notify_vf_link_state(vf); + break; + case I40E_VIRTCHNL_OP_DISABLE_QUEUES: + ret = i40e_vc_disable_queues_msg(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: + ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: + ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_ADD_VLAN: + ret = i40e_vc_add_vlan_msg(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_DEL_VLAN: + ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_GET_STATS: + ret = i40e_vc_get_stats_msg(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_UNKNOWN: + default: + dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", + v_opcode, local_vf_id); + ret = i40e_vc_send_resp_to_vf(vf, v_opcode, + I40E_ERR_NOT_IMPLEMENTED); + break; + } + + return ret; +} + +/** + * i40e_vc_process_vflr_event + * @pf: pointer to the PF structure + * + * called from the vlfr irq handler to + * free up VF resources and state variables + **/ +int i40e_vc_process_vflr_event(struct i40e_pf *pf) +{ + u32 reg, reg_idx, bit_idx, vf_id; + struct i40e_hw *hw = &pf->hw; + struct i40e_vf *vf; + + if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) + return 0; + + /* re-enable vflr interrupt cause */ + reg = rd32(hw, I40E_PFINT_ICR0_ENA); + reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; + wr32(hw, I40E_PFINT_ICR0_ENA, reg); + i40e_flush(hw); + + clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); + for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { + reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; + /* read GLGEN_VFLRSTAT register to find out the flr VFs */ + vf = &pf->vf[vf_id]; + reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); + if (reg & (1 << bit_idx)) { + /* clear the bit in GLGEN_VFLRSTAT */ + wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); + + if (!test_bit(__I40E_DOWN, &pf->state)) + i40e_reset_vf(vf, true); + } + } + + return 0; +} + +/** + * i40e_ndo_set_vf_mac + * @netdev: network interface device structure + * @vf_id: VF identifier + * @mac: mac address + * + * program VF mac address + **/ +int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_mac_filter *f; + struct i40e_vf *vf; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, + "Invalid VF Identifier %d\n", vf_id); + ret = -EINVAL; + goto error_param; + } + + vf = &(pf->vf[vf_id]); + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + dev_err(&pf->pdev->dev, + "Uninitialized VF %d\n", vf_id); + ret = -EINVAL; + goto error_param; + } + + if (!is_valid_ether_addr(mac)) { + dev_err(&pf->pdev->dev, + "Invalid VF ethernet address\n"); + ret = -EINVAL; + goto error_param; + } + + /* delete the temporary mac address */ + i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id, + true, false); + + /* Delete all the filters for this VSI - we're going to kill it + * anyway. + */ + list_for_each_entry(f, &vsi->mac_filter_list, list) + i40e_del_filter(vsi, f->macaddr, f->vlan, true, false); + + dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); + /* program mac filter */ + if (i40e_sync_vsi_filters(vsi)) { + dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); + ret = -EIO; + goto error_param; + } + ether_addr_copy(vf->default_lan_addr.addr, mac); + vf->pf_set_mac = true; + /* Force the VF driver stop so it has to reload with new MAC address */ + i40e_vc_disable_vf(pf, vf); + dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); + +error_param: + return ret; +} + +/** + * i40e_ndo_set_vf_port_vlan + * @netdev: network interface device structure + * @vf_id: VF identifier + * @vlan_id: mac address + * @qos: priority setting + * + * program VF vlan id and/or qos + **/ +int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, + int vf_id, u16 vlan_id, u8 qos) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); + ret = -EINVAL; + goto error_pvid; + } + + if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) { + dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); + ret = -EINVAL; + goto error_pvid; + } + + vf = &(pf->vf[vf_id]); + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); + ret = -EINVAL; + goto error_pvid; + } + + if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) { + dev_err(&pf->pdev->dev, + "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", + vf_id); + /* Administrator Error - knock the VF offline until he does + * the right thing by reconfiguring his network correctly + * and then reloading the VF driver. + */ + i40e_vc_disable_vf(pf, vf); + } + + /* Check for condition where there was already a port VLAN ID + * filter set and now it is being deleted by setting it to zero. + * Additionally check for the condition where there was a port + * VLAN but now there is a new and different port VLAN being set. + * Before deleting all the old VLAN filters we must add new ones + * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our + * MAC addresses deleted. + */ + if ((!(vlan_id || qos) || + (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) && + vsi->info.pvid) + ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY); + + if (vsi->info.pvid) { + /* kill old VLAN */ + ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & + VLAN_VID_MASK)); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "remove VLAN failed, ret=%d, aq_err=%d\n", + ret, pf->hw.aq.asq_last_status); + } + } + if (vlan_id || qos) + ret = i40e_vsi_add_pvid(vsi, + vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)); + else + i40e_vsi_remove_pvid(vsi); + + if (vlan_id) { + dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", + vlan_id, qos, vf_id); + + /* add new VLAN filter */ + ret = i40e_vsi_add_vlan(vsi, vlan_id); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "add VF VLAN failed, ret=%d aq_err=%d\n", ret, + vsi->back->hw.aq.asq_last_status); + goto error_pvid; + } + /* Kill non-vlan MAC filters - ignore error return since + * there might not be any non-vlan MAC filters. + */ + i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY); + } + + if (ret) { + dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); + goto error_pvid; + } + /* The Port VLAN needs to be saved across resets the same as the + * default LAN MAC address. + */ + vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); + ret = 0; + +error_pvid: + return ret; +} + +#define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */ +#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */ +/** + * i40e_ndo_set_vf_bw + * @netdev: network interface device structure + * @vf_id: VF identifier + * @tx_rate: Tx rate + * + * configure VF Tx rate + **/ +int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, + int max_tx_rate) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_vsi *vsi; + struct i40e_vf *vf; + int speed = 0; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id); + ret = -EINVAL; + goto error; + } + + if (min_tx_rate) { + dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", + min_tx_rate, vf_id); + return -EINVAL; + } + + vf = &(pf->vf[vf_id]); + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id); + ret = -EINVAL; + goto error; + } + + switch (pf->hw.phy.link_info.link_speed) { + case I40E_LINK_SPEED_40GB: + speed = 40000; + break; + case I40E_LINK_SPEED_10GB: + speed = 10000; + break; + case I40E_LINK_SPEED_1GB: + speed = 1000; + break; + default: + break; + } + + if (max_tx_rate > speed) { + dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.", + max_tx_rate, vf->vf_id); + ret = -EINVAL; + goto error; + } + + if ((max_tx_rate < 50) && (max_tx_rate > 0)) { + dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n"); + max_tx_rate = 50; + } + + /* Tx rate credits are in values of 50Mbps, 0 is disabled*/ + ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, + max_tx_rate / I40E_BW_CREDIT_DIVISOR, + I40E_MAX_BW_INACTIVE_ACCUM, NULL); + if (ret) { + dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n", + ret); + ret = -EIO; + goto error; + } + vf->tx_rate = max_tx_rate; +error: + return ret; +} + +/** + * i40e_ndo_get_vf_config + * @netdev: network interface device structure + * @vf_id: VF identifier + * @ivi: VF configuration structure + * + * return VF configuration + **/ +int i40e_ndo_get_vf_config(struct net_device *netdev, + int vf_id, struct ifla_vf_info *ivi) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_vf *vf; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); + ret = -EINVAL; + goto error_param; + } + + vf = &(pf->vf[vf_id]); + /* first vsi is always the LAN vsi */ + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { + dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); + ret = -EINVAL; + goto error_param; + } + + ivi->vf = vf_id; + + memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN); + + ivi->max_tx_rate = vf->tx_rate; + ivi->min_tx_rate = 0; + ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; + ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >> + I40E_VLAN_PRIORITY_SHIFT; + if (vf->link_forced == false) + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vf->link_up == true) + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; + ivi->spoofchk = vf->spoofchk; + ret = 0; + +error_param: + return ret; +} + +/** + * i40e_ndo_set_vf_link_state + * @netdev: network interface device structure + * @vf_id: VF identifier + * @link: required link state + * + * Set the link state of a specified VF, regardless of physical link state + **/ +int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_virtchnl_pf_event pfe; + struct i40e_hw *hw = &pf->hw; + struct i40e_vf *vf; + int abs_vf_id; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); + ret = -EINVAL; + goto error_out; + } + + vf = &pf->vf[vf_id]; + abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + + pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; + pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; + + switch (link) { + case IFLA_VF_LINK_STATE_AUTO: + vf->link_forced = false; + pfe.event_data.link_event.link_status = + pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; + pfe.event_data.link_event.link_speed = + pf->hw.phy.link_info.link_speed; + break; + case IFLA_VF_LINK_STATE_ENABLE: + vf->link_forced = true; + vf->link_up = true; + pfe.event_data.link_event.link_status = true; + pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; + break; + case IFLA_VF_LINK_STATE_DISABLE: + vf->link_forced = true; + vf->link_up = false; + pfe.event_data.link_event.link_status = false; + pfe.event_data.link_event.link_speed = 0; + break; + default: + ret = -EINVAL; + goto error_out; + } + /* Notify the VF of its new link state */ + i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, + 0, (u8 *)&pfe, sizeof(pfe), NULL); + +error_out: + return ret; +} + +/** + * i40e_ndo_set_vf_spoofchk + * @netdev: network interface device structure + * @vf_id: VF identifier + * @enable: flag to enable or disable feature + * + * Enable or disable VF spoof checking + **/ +int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_vsi_context ctxt; + struct i40e_hw *hw = &pf->hw; + struct i40e_vf *vf; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); + ret = -EINVAL; + goto out; + } + + vf = &(pf->vf[vf_id]); + + if (enable == vf->spoofchk) + goto out; + + vf->spoofchk = enable; + memset(&ctxt, 0, sizeof(ctxt)); + ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; + ctxt.pf_num = pf->hw.pf_id; + ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); + if (enable) + ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | + I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) { + dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", + ret); + ret = -EIO; + } +out: + return ret; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h new file mode 100644 index 000000000..09043c1aa --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -0,0 +1,130 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_VIRTCHNL_PF_H_ +#define _I40E_VIRTCHNL_PF_H_ + +#include "i40e.h" + +#define I40E_MAX_MACVLAN_FILTERS 256 +#define I40E_MAX_VLAN_FILTERS 256 +#define I40E_MAX_VLANID 4095 + +#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2 + +#define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED 3 +#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10 + +#define I40E_VLAN_PRIORITY_SHIFT 12 +#define I40E_VLAN_MASK 0xFFF +#define I40E_PRIORITY_MASK 0x7000 + +/* Various queue ctrls */ +enum i40e_queue_ctrl { + I40E_QUEUE_CTRL_UNKNOWN = 0, + I40E_QUEUE_CTRL_ENABLE, + I40E_QUEUE_CTRL_ENABLECHECK, + I40E_QUEUE_CTRL_DISABLE, + I40E_QUEUE_CTRL_DISABLECHECK, + I40E_QUEUE_CTRL_FASTDISABLE, + I40E_QUEUE_CTRL_FASTDISABLECHECK, +}; + +/* VF states */ +enum i40e_vf_states { + I40E_VF_STAT_INIT = 0, + I40E_VF_STAT_ACTIVE, + I40E_VF_STAT_FCOEENA, + I40E_VF_STAT_DISABLED, +}; + +/* VF capabilities */ +enum i40e_vf_capabilities { + I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0, + I40E_VIRTCHNL_VF_CAP_L2, +}; + +/* VF information structure */ +struct i40e_vf { + struct i40e_pf *pf; + + /* VF id in the PF space */ + u16 vf_id; + /* all VF vsis connect to the same parent */ + enum i40e_switch_element_types parent_type; + + /* VF Port Extender (PE) stag if used */ + u16 stag; + + struct i40e_virtchnl_ether_addr default_lan_addr; + struct i40e_virtchnl_ether_addr default_fcoe_addr; + u16 port_vlan_id; + bool pf_set_mac; /* The VMM admin set the VF MAC address */ + + /* VSI indices - actual VSI pointers are maintained in the PF structure + * When assigned, these will be non-zero, because VSI 0 is always + * the main LAN VSI for the PF. + */ + u8 lan_vsi_idx; /* index into PF struct */ + u8 lan_vsi_id; /* ID as used by firmware */ + + u8 num_queue_pairs; /* num of qps assigned to VF vsis */ + u64 num_mdd_events; /* num of mdd events detected */ + u64 num_invalid_msgs; /* num of malformed or invalid msgs detected */ + u64 num_valid_msgs; /* num of valid msgs detected */ + + unsigned long vf_caps; /* vf's adv. capabilities */ + unsigned long vf_states; /* vf's runtime states */ + unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ + bool link_forced; + bool link_up; /* only valid if VF link is forced */ + bool spoofchk; +}; + +void i40e_free_vfs(struct i40e_pf *pf); +int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs); +int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen); +int i40e_vc_process_vflr_event(struct i40e_pf *pf); +void i40e_reset_vf(struct i40e_vf *vf, bool flr); +void i40e_vc_notify_vf_reset(struct i40e_vf *vf); + +/* VF configuration related iplink handlers */ +int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); +int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, + int vf_id, u16 vlan_id, u8 qos); +int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, + int max_tx_rate); +int i40e_ndo_get_vf_config(struct net_device *netdev, + int vf_id, struct ifla_vf_info *ivi); +int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); +int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable); + +void i40e_vc_notify_link_state(struct i40e_pf *pf); +void i40e_vc_notify_reset(struct i40e_pf *pf); + +#endif /* _I40E_VIRTCHNL_PF_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile new file mode 100644 index 000000000..3a423836a --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/Makefile @@ -0,0 +1,36 @@ +################################################################################ +# +# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver +# Copyright(c) 2013 - 2014 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +## Makefile for the Intel(R) 40GbE VF driver +# +# + +obj-$(CONFIG_I40EVF) += i40evf.o + +i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \ + i40e_txrx.o i40e_common.o i40e_adminq.o + diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c new file mode 100644 index 000000000..c1d25f8c1 --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -0,0 +1,979 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e_status.h" +#include "i40e_type.h" +#include "i40e_register.h" +#include "i40e_adminq.h" +#include "i40e_prototype.h" + +/** + * i40e_is_nvm_update_op - return true if this is an NVM update operation + * @desc: API request descriptor + **/ +static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) +{ + return (desc->opcode == i40e_aqc_opc_nvm_erase) || + (desc->opcode == i40e_aqc_opc_nvm_update); +} + +/** + * i40e_adminq_init_regs - Initialize AdminQ registers + * @hw: pointer to the hardware structure + * + * This assumes the alloc_asq and alloc_arq functions have already been called + **/ +static void i40e_adminq_init_regs(struct i40e_hw *hw) +{ + /* set head and tail registers in our local struct */ + if (i40e_is_vf(hw)) { + hw->aq.asq.tail = I40E_VF_ATQT1; + hw->aq.asq.head = I40E_VF_ATQH1; + hw->aq.asq.len = I40E_VF_ATQLEN1; + hw->aq.asq.bal = I40E_VF_ATQBAL1; + hw->aq.asq.bah = I40E_VF_ATQBAH1; + hw->aq.arq.tail = I40E_VF_ARQT1; + hw->aq.arq.head = I40E_VF_ARQH1; + hw->aq.arq.len = I40E_VF_ARQLEN1; + hw->aq.arq.bal = I40E_VF_ARQBAL1; + hw->aq.arq.bah = I40E_VF_ARQBAH1; + } else { + hw->aq.asq.tail = I40E_PF_ATQT; + hw->aq.asq.head = I40E_PF_ATQH; + hw->aq.asq.len = I40E_PF_ATQLEN; + hw->aq.asq.bal = I40E_PF_ATQBAL; + hw->aq.asq.bah = I40E_PF_ATQBAH; + hw->aq.arq.tail = I40E_PF_ARQT; + hw->aq.arq.head = I40E_PF_ARQH; + hw->aq.arq.len = I40E_PF_ARQLEN; + hw->aq.arq.bal = I40E_PF_ARQBAL; + hw->aq.arq.bah = I40E_PF_ARQBAH; + } +} + +/** + * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings + * @hw: pointer to the hardware structure + **/ +static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) +{ + i40e_status ret_code; + + ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, + i40e_mem_atq_ring, + (hw->aq.num_asq_entries * + sizeof(struct i40e_aq_desc)), + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + return ret_code; + + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, + (hw->aq.num_asq_entries * + sizeof(struct i40e_asq_cmd_details))); + if (ret_code) { + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + return ret_code; + } + + return ret_code; +} + +/** + * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings + * @hw: pointer to the hardware structure + **/ +static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) +{ + i40e_status ret_code; + + ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, + i40e_mem_arq_ring, + (hw->aq.num_arq_entries * + sizeof(struct i40e_aq_desc)), + I40E_ADMINQ_DESC_ALIGNMENT); + + return ret_code; +} + +/** + * i40e_free_adminq_asq - Free Admin Queue send rings + * @hw: pointer to the hardware structure + * + * This assumes the posted send buffers have already been cleaned + * and de-allocated + **/ +static void i40e_free_adminq_asq(struct i40e_hw *hw) +{ + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); +} + +/** + * i40e_free_adminq_arq - Free Admin Queue receive rings + * @hw: pointer to the hardware structure + * + * This assumes the posted receive buffers have already been cleaned + * and de-allocated + **/ +static void i40e_free_adminq_arq(struct i40e_hw *hw) +{ + i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); +} + +/** + * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue + * @hw: pointer to the hardware structure + **/ +static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) +{ + i40e_status ret_code; + struct i40e_aq_desc *desc; + struct i40e_dma_mem *bi; + int i; + + /* We'll be allocating the buffer info memory first, then we can + * allocate the mapped buffers for the event processing + */ + + /* buffer_info structures do not need alignment */ + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, + (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); + if (ret_code) + goto alloc_arq_bufs; + hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; + + /* allocate the mapped buffers */ + for (i = 0; i < hw->aq.num_arq_entries; i++) { + bi = &hw->aq.arq.r.arq_bi[i]; + ret_code = i40e_allocate_dma_mem(hw, bi, + i40e_mem_arq_buf, + hw->aq.arq_buf_size, + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + goto unwind_alloc_arq_bufs; + + /* now configure the descriptors for use */ + desc = I40E_ADMINQ_DESC(hw->aq.arq, i); + + desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); + if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) + desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); + desc->opcode = 0; + /* This is in accordance with Admin queue design, there is no + * register for buffer size configuration + */ + desc->datalen = cpu_to_le16((u16)bi->size); + desc->retval = 0; + desc->cookie_high = 0; + desc->cookie_low = 0; + desc->params.external.addr_high = + cpu_to_le32(upper_32_bits(bi->pa)); + desc->params.external.addr_low = + cpu_to_le32(lower_32_bits(bi->pa)); + desc->params.external.param0 = 0; + desc->params.external.param1 = 0; + } + +alloc_arq_bufs: + return ret_code; + +unwind_alloc_arq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) + i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); + + return ret_code; +} + +/** + * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue + * @hw: pointer to the hardware structure + **/ +static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw) +{ + i40e_status ret_code; + struct i40e_dma_mem *bi; + int i; + + /* No mapped memory needed yet, just the buffer info structures */ + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, + (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); + if (ret_code) + goto alloc_asq_bufs; + hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; + + /* allocate the mapped buffers */ + for (i = 0; i < hw->aq.num_asq_entries; i++) { + bi = &hw->aq.asq.r.asq_bi[i]; + ret_code = i40e_allocate_dma_mem(hw, bi, + i40e_mem_asq_buf, + hw->aq.asq_buf_size, + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + goto unwind_alloc_asq_bufs; + } +alloc_asq_bufs: + return ret_code; + +unwind_alloc_asq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) + i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); + + return ret_code; +} + +/** + * i40e_free_arq_bufs - Free receive queue buffer info elements + * @hw: pointer to the hardware structure + **/ +static void i40e_free_arq_bufs(struct i40e_hw *hw) +{ + int i; + + /* free descriptors */ + for (i = 0; i < hw->aq.num_arq_entries; i++) + i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + + /* free the descriptor memory */ + i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); + + /* free the dma header */ + i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); +} + +/** + * i40e_free_asq_bufs - Free send queue buffer info elements + * @hw: pointer to the hardware structure + **/ +static void i40e_free_asq_bufs(struct i40e_hw *hw) +{ + int i; + + /* only unmap if the address is non-NULL */ + for (i = 0; i < hw->aq.num_asq_entries; i++) + if (hw->aq.asq.r.asq_bi[i].pa) + i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + + /* free the buffer info list */ + i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); + + /* free the descriptor memory */ + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + + /* free the dma header */ + i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); +} + +/** + * i40e_config_asq_regs - configure ASQ registers + * @hw: pointer to the hardware structure + * + * Configure base address and length registers for the transmit queue + **/ +static i40e_status i40e_config_asq_regs(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + u32 reg = 0; + + /* Clear Head and Tail */ + wr32(hw, hw->aq.asq.head, 0); + wr32(hw, hw->aq.asq.tail, 0); + + /* set starting point */ + wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | + I40E_PF_ATQLEN_ATQENABLE_MASK)); + wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); + + /* Check one register to verify that config was applied */ + reg = rd32(hw, hw->aq.asq.bal); + if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) + ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + + return ret_code; +} + +/** + * i40e_config_arq_regs - ARQ register configuration + * @hw: pointer to the hardware structure + * + * Configure base address and length registers for the receive (event queue) + **/ +static i40e_status i40e_config_arq_regs(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + u32 reg = 0; + + /* Clear Head and Tail */ + wr32(hw, hw->aq.arq.head, 0); + wr32(hw, hw->aq.arq.tail, 0); + + /* set starting point */ + wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | + I40E_PF_ARQLEN_ARQENABLE_MASK)); + wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); + + /* Update tail in the HW to post pre-allocated buffers */ + wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); + + /* Check one register to verify that config was applied */ + reg = rd32(hw, hw->aq.arq.bal); + if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) + ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + + return ret_code; +} + +/** + * i40e_init_asq - main initialization routine for ASQ + * @hw: pointer to the hardware structure + * + * This is the main initialization routine for the Admin Send Queue + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.arq_buf_size + * + * Do *NOT* hold the lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + **/ +static i40e_status i40e_init_asq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (hw->aq.asq.count > 0) { + /* queue already initialized */ + ret_code = I40E_ERR_NOT_READY; + goto init_adminq_exit; + } + + /* verify input for valid configuration */ + if ((hw->aq.num_asq_entries == 0) || + (hw->aq.asq_buf_size == 0)) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + + hw->aq.asq.next_to_use = 0; + hw->aq.asq.next_to_clean = 0; + hw->aq.asq.count = hw->aq.num_asq_entries; + + /* allocate the ring memory */ + ret_code = i40e_alloc_adminq_asq_ring(hw); + if (ret_code) + goto init_adminq_exit; + + /* allocate buffers in the rings */ + ret_code = i40e_alloc_asq_bufs(hw); + if (ret_code) + goto init_adminq_free_rings; + + /* initialize base registers */ + ret_code = i40e_config_asq_regs(hw); + if (ret_code) + goto init_adminq_free_rings; + + /* success! */ + goto init_adminq_exit; + +init_adminq_free_rings: + i40e_free_adminq_asq(hw); + +init_adminq_exit: + return ret_code; +} + +/** + * i40e_init_arq - initialize ARQ + * @hw: pointer to the hardware structure + * + * The main initialization routine for the Admin Receive (Event) Queue. + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.arq_buf_size + * + * Do *NOT* hold the lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + **/ +static i40e_status i40e_init_arq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (hw->aq.arq.count > 0) { + /* queue already initialized */ + ret_code = I40E_ERR_NOT_READY; + goto init_adminq_exit; + } + + /* verify input for valid configuration */ + if ((hw->aq.num_arq_entries == 0) || + (hw->aq.arq_buf_size == 0)) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + + hw->aq.arq.next_to_use = 0; + hw->aq.arq.next_to_clean = 0; + hw->aq.arq.count = hw->aq.num_arq_entries; + + /* allocate the ring memory */ + ret_code = i40e_alloc_adminq_arq_ring(hw); + if (ret_code) + goto init_adminq_exit; + + /* allocate buffers in the rings */ + ret_code = i40e_alloc_arq_bufs(hw); + if (ret_code) + goto init_adminq_free_rings; + + /* initialize base registers */ + ret_code = i40e_config_arq_regs(hw); + if (ret_code) + goto init_adminq_free_rings; + + /* success! */ + goto init_adminq_exit; + +init_adminq_free_rings: + i40e_free_adminq_arq(hw); + +init_adminq_exit: + return ret_code; +} + +/** + * i40e_shutdown_asq - shutdown the ASQ + * @hw: pointer to the hardware structure + * + * The main shutdown routine for the Admin Send Queue + **/ +static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (hw->aq.asq.count == 0) + return I40E_ERR_NOT_READY; + + /* Stop firmware AdminQ processing */ + wr32(hw, hw->aq.asq.head, 0); + wr32(hw, hw->aq.asq.tail, 0); + wr32(hw, hw->aq.asq.len, 0); + wr32(hw, hw->aq.asq.bal, 0); + wr32(hw, hw->aq.asq.bah, 0); + + /* make sure lock is available */ + mutex_lock(&hw->aq.asq_mutex); + + hw->aq.asq.count = 0; /* to indicate uninitialized queue */ + + /* free ring buffers */ + i40e_free_asq_bufs(hw); + + mutex_unlock(&hw->aq.asq_mutex); + + return ret_code; +} + +/** + * i40e_shutdown_arq - shutdown ARQ + * @hw: pointer to the hardware structure + * + * The main shutdown routine for the Admin Receive Queue + **/ +static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (hw->aq.arq.count == 0) + return I40E_ERR_NOT_READY; + + /* Stop firmware AdminQ processing */ + wr32(hw, hw->aq.arq.head, 0); + wr32(hw, hw->aq.arq.tail, 0); + wr32(hw, hw->aq.arq.len, 0); + wr32(hw, hw->aq.arq.bal, 0); + wr32(hw, hw->aq.arq.bah, 0); + + /* make sure lock is available */ + mutex_lock(&hw->aq.arq_mutex); + + hw->aq.arq.count = 0; /* to indicate uninitialized queue */ + + /* free ring buffers */ + i40e_free_arq_bufs(hw); + + mutex_unlock(&hw->aq.arq_mutex); + + return ret_code; +} + +/** + * i40evf_init_adminq - main initialization routine for Admin Queue + * @hw: pointer to the hardware structure + * + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.num_arq_entries + * - hw->aq.arq_buf_size + * - hw->aq.asq_buf_size + **/ +i40e_status i40evf_init_adminq(struct i40e_hw *hw) +{ + i40e_status ret_code; + + /* verify input for valid configuration */ + if ((hw->aq.num_arq_entries == 0) || + (hw->aq.num_asq_entries == 0) || + (hw->aq.arq_buf_size == 0) || + (hw->aq.asq_buf_size == 0)) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + + /* initialize locks */ + mutex_init(&hw->aq.asq_mutex); + mutex_init(&hw->aq.arq_mutex); + + /* Set up register offsets */ + i40e_adminq_init_regs(hw); + + /* setup ASQ command write back timeout */ + hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; + + /* allocate the ASQ */ + ret_code = i40e_init_asq(hw); + if (ret_code) + goto init_adminq_destroy_locks; + + /* allocate the ARQ */ + ret_code = i40e_init_arq(hw); + if (ret_code) + goto init_adminq_free_asq; + + /* success! */ + goto init_adminq_exit; + +init_adminq_free_asq: + i40e_shutdown_asq(hw); +init_adminq_destroy_locks: + +init_adminq_exit: + return ret_code; +} + +/** + * i40evf_shutdown_adminq - shutdown routine for the Admin Queue + * @hw: pointer to the hardware structure + **/ +i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (i40evf_check_asq_alive(hw)) + i40evf_aq_queue_shutdown(hw, true); + + i40e_shutdown_asq(hw); + i40e_shutdown_arq(hw); + + /* destroy the locks */ + + return ret_code; +} + +/** + * i40e_clean_asq - cleans Admin send queue + * @hw: pointer to the hardware structure + * + * returns the number of free desc + **/ +static u16 i40e_clean_asq(struct i40e_hw *hw) +{ + struct i40e_adminq_ring *asq = &(hw->aq.asq); + struct i40e_asq_cmd_details *details; + u16 ntc = asq->next_to_clean; + struct i40e_aq_desc desc_cb; + struct i40e_aq_desc *desc; + + desc = I40E_ADMINQ_DESC(*asq, ntc); + details = I40E_ADMINQ_DETAILS(*asq, ntc); + while (rd32(hw, hw->aq.asq.head) != ntc) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "%s: ntc %d head %d.\n", __func__, ntc, + rd32(hw, hw->aq.asq.head)); + + if (details->callback) { + I40E_ADMINQ_CALLBACK cb_func = + (I40E_ADMINQ_CALLBACK)details->callback; + desc_cb = *desc; + cb_func(hw, &desc_cb); + } + memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); + memset((void *)details, 0, + sizeof(struct i40e_asq_cmd_details)); + ntc++; + if (ntc == asq->count) + ntc = 0; + desc = I40E_ADMINQ_DESC(*asq, ntc); + details = I40E_ADMINQ_DETAILS(*asq, ntc); + } + + asq->next_to_clean = ntc; + + return I40E_DESC_UNUSED(asq); +} + +/** + * i40evf_asq_done - check if FW has processed the Admin Send Queue + * @hw: pointer to the hw struct + * + * Returns true if the firmware has processed all descriptors on the + * admin send queue. Returns false if there are still requests pending. + **/ +bool i40evf_asq_done(struct i40e_hw *hw) +{ + /* AQ designers suggest use of head for better + * timing reliability than DD bit + */ + return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; + +} + +/** + * i40evf_asq_send_command - send command to Admin Queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor describing the command (non DMA mem) + * @buff: buffer to use for indirect commands + * @buff_size: size of buffer for indirect commands + * @cmd_details: pointer to command details structure + * + * This is the main send command driver routine for the Admin Queue send + * queue. It runs the queue, cleans the queue, etc + **/ +i40e_status i40evf_asq_send_command(struct i40e_hw *hw, + struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + i40e_status status = 0; + struct i40e_dma_mem *dma_buff = NULL; + struct i40e_asq_cmd_details *details; + struct i40e_aq_desc *desc_on_ring; + bool cmd_completed = false; + u16 retval = 0; + u32 val = 0; + + val = rd32(hw, hw->aq.asq.head); + if (val >= hw->aq.num_asq_entries) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: head overrun at %d\n", val); + status = I40E_ERR_QUEUE_EMPTY; + goto asq_send_command_exit; + } + + if (hw->aq.asq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: Admin queue not initialized.\n"); + status = I40E_ERR_QUEUE_EMPTY; + goto asq_send_command_exit; + } + + details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); + if (cmd_details) { + *details = *cmd_details; + + /* If the cmd_details are defined copy the cookie. The + * cpu_to_le32 is not needed here because the data is ignored + * by the FW, only used by the driver + */ + if (details->cookie) { + desc->cookie_high = + cpu_to_le32(upper_32_bits(details->cookie)); + desc->cookie_low = + cpu_to_le32(lower_32_bits(details->cookie)); + } + } else { + memset(details, 0, sizeof(struct i40e_asq_cmd_details)); + } + + /* clear requested flags and then set additional flags if defined */ + desc->flags &= ~cpu_to_le16(details->flags_dis); + desc->flags |= cpu_to_le16(details->flags_ena); + + mutex_lock(&hw->aq.asq_mutex); + + if (buff_size > hw->aq.asq_buf_size) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Invalid buffer size: %d.\n", + buff_size); + status = I40E_ERR_INVALID_SIZE; + goto asq_send_command_error; + } + + if (details->postpone && !details->async) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Async flag not set along with postpone flag"); + status = I40E_ERR_PARAM; + goto asq_send_command_error; + } + + /* call clean and check queue available function to reclaim the + * descriptors that were processed by FW, the function returns the + * number of desc available + */ + /* the clean function called here could be called in a separate thread + * in case of asynchronous completions + */ + if (i40e_clean_asq(hw) == 0) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Error queue is full.\n"); + status = I40E_ERR_ADMIN_QUEUE_FULL; + goto asq_send_command_error; + } + + /* initialize the temp desc pointer with the right desc */ + desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); + + /* if the desc is available copy the temp desc to the right place */ + *desc_on_ring = *desc; + + /* if buff is not NULL assume indirect command */ + if (buff != NULL) { + dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); + /* copy the user buff into the respective DMA buff */ + memcpy(dma_buff->va, buff, buff_size); + desc_on_ring->datalen = cpu_to_le16(buff_size); + + /* Update the address values in the desc with the pa value + * for respective buffer + */ + desc_on_ring->params.external.addr_high = + cpu_to_le32(upper_32_bits(dma_buff->pa)); + desc_on_ring->params.external.addr_low = + cpu_to_le32(lower_32_bits(dma_buff->pa)); + } + + /* bump the tail */ + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); + i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, + buff, buff_size); + (hw->aq.asq.next_to_use)++; + if (hw->aq.asq.next_to_use == hw->aq.asq.count) + hw->aq.asq.next_to_use = 0; + if (!details->postpone) + wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); + + /* if cmd_details are not defined or async flag is not set, + * we need to wait for desc write back + */ + if (!details->async && !details->postpone) { + u32 total_delay = 0; + + do { + /* AQ designers suggest use of head for better + * timing reliability than DD bit + */ + if (i40evf_asq_done(hw)) + break; + usleep_range(1000, 2000); + total_delay++; + } while (total_delay < hw->aq.asq_cmd_timeout); + } + + /* if ready, copy the desc back to temp */ + if (i40evf_asq_done(hw)) { + *desc = *desc_on_ring; + if (buff != NULL) + memcpy(buff, dma_buff->va, buff_size); + retval = le16_to_cpu(desc->retval); + if (retval != 0) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Command completed with error 0x%X.\n", + retval); + + /* strip off FW internal code */ + retval &= 0xff; + } + cmd_completed = true; + if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) + status = 0; + else + status = I40E_ERR_ADMIN_QUEUE_ERROR; + hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; + } + + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: desc and buffer writeback:\n"); + i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, + buff_size); + + /* update the error if time out occurred */ + if ((!cmd_completed) && + (!details->async && !details->postpone)) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Writeback timeout.\n"); + status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; + } + +asq_send_command_error: + mutex_unlock(&hw->aq.asq_mutex); +asq_send_command_exit: + return status; +} + +/** + * i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function + * @desc: pointer to the temp descriptor (non DMA mem) + * @opcode: the opcode can be used to decide which flags to turn off or on + * + * Fill the desc with default values + **/ +void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, + u16 opcode) +{ + /* zero out the desc */ + memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); + desc->opcode = cpu_to_le16(opcode); + desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI); +} + +/** + * i40evf_clean_arq_element + * @hw: pointer to the hw struct + * @e: event info from the receive descriptor, includes any buffers + * @pending: number of events that could be left to process + * + * This function cleans one Admin Receive Queue element and returns + * the contents through e. It can also return how many events are + * left to process through 'pending' + **/ +i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, + struct i40e_arq_event_info *e, + u16 *pending) +{ + i40e_status ret_code = 0; + u16 ntc = hw->aq.arq.next_to_clean; + struct i40e_aq_desc *desc; + struct i40e_dma_mem *bi; + u16 desc_idx; + u16 datalen; + u16 flags; + u16 ntu; + + /* take the lock before we start messing with the ring */ + mutex_lock(&hw->aq.arq_mutex); + + /* set next_to_use to head */ + ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); + if (ntu == ntc) { + /* nothing to do - shouldn't need to update ring's values */ + ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; + goto clean_arq_element_out; + } + + /* now clean the next descriptor */ + desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); + desc_idx = ntc; + + flags = le16_to_cpu(desc->flags); + if (flags & I40E_AQ_FLAG_ERR) { + ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + hw->aq.arq_last_status = + (enum i40e_admin_queue_err)le16_to_cpu(desc->retval); + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQRX: Event received with error 0x%X.\n", + hw->aq.arq_last_status); + } + + e->desc = *desc; + datalen = le16_to_cpu(desc->datalen); + e->msg_len = min(datalen, e->buf_len); + if (e->msg_buf != NULL && (e->msg_len != 0)) + memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, + e->msg_len); + + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); + i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, + hw->aq.arq_buf_size); + + /* Restore the original datalen and buffer address in the desc, + * FW updates datalen to indicate the event message + * size + */ + bi = &hw->aq.arq.r.arq_bi[ntc]; + memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); + + desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); + if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) + desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); + desc->datalen = cpu_to_le16((u16)bi->size); + desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); + desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); + + /* set tail = the last cleaned desc index. */ + wr32(hw, hw->aq.arq.tail, ntc); + /* ntc is updated to tail + 1 */ + ntc++; + if (ntc == hw->aq.num_arq_entries) + ntc = 0; + hw->aq.arq.next_to_clean = ntc; + hw->aq.arq.next_to_use = ntu; + +clean_arq_element_out: + /* Set pending if needed, unlock and return */ + if (pending != NULL) + *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); + mutex_unlock(&hw->aq.arq_mutex); + + return ret_code; +} + +void i40evf_resume_aq(struct i40e_hw *hw) +{ + /* Registers are reset after PF reset */ + hw->aq.asq.next_to_use = 0; + hw->aq.asq.next_to_clean = 0; + + i40e_config_asq_regs(hw); + + hw->aq.arq.next_to_use = 0; + hw->aq.arq.next_to_clean = 0; + + i40e_config_arq_regs(hw); +} diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h new file mode 100644 index 000000000..ef43d68f6 --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h @@ -0,0 +1,157 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_ADMINQ_H_ +#define _I40E_ADMINQ_H_ + +#include "i40e_osdep.h" +#include "i40e_status.h" +#include "i40e_adminq_cmd.h" + +#define I40E_ADMINQ_DESC(R, i) \ + (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i])) + +#define I40E_ADMINQ_DESC_ALIGNMENT 4096 + +struct i40e_adminq_ring { + struct i40e_virt_mem dma_head; /* space for dma structures */ + struct i40e_dma_mem desc_buf; /* descriptor ring memory */ + struct i40e_virt_mem cmd_buf; /* command buffer memory */ + + union { + struct i40e_dma_mem *asq_bi; + struct i40e_dma_mem *arq_bi; + } r; + + u16 count; /* Number of descriptors */ + u16 rx_buf_len; /* Admin Receive Queue buffer length */ + + /* used for interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + + /* used for queue tracking */ + u32 head; + u32 tail; + u32 len; + u32 bah; + u32 bal; +}; + +/* ASQ transaction details */ +struct i40e_asq_cmd_details { + void *callback; /* cast from type I40E_ADMINQ_CALLBACK */ + u64 cookie; + u16 flags_ena; + u16 flags_dis; + bool async; + bool postpone; +}; + +#define I40E_ADMINQ_DETAILS(R, i) \ + (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i])) + +/* ARQ event information */ +struct i40e_arq_event_info { + struct i40e_aq_desc desc; + u16 msg_len; + u16 buf_len; + u8 *msg_buf; +}; + +/* Admin Queue information */ +struct i40e_adminq_info { + struct i40e_adminq_ring arq; /* receive queue */ + struct i40e_adminq_ring asq; /* send queue */ + u32 asq_cmd_timeout; /* send queue cmd write back timeout*/ + u16 num_arq_entries; /* receive queue depth */ + u16 num_asq_entries; /* send queue depth */ + u16 arq_buf_size; /* receive queue buffer size */ + u16 asq_buf_size; /* send queue buffer size */ + u16 fw_maj_ver; /* firmware major version */ + u16 fw_min_ver; /* firmware minor version */ + u32 fw_build; /* firmware build number */ + u16 api_maj_ver; /* api major version */ + u16 api_min_ver; /* api minor version */ + bool nvm_release_on_done; + + struct mutex asq_mutex; /* Send queue lock */ + struct mutex arq_mutex; /* Receive queue lock */ + + /* last status values on send and receive queues */ + enum i40e_admin_queue_err asq_last_status; + enum i40e_admin_queue_err arq_last_status; +}; + +/** + * i40e_aq_rc_to_posix - convert errors to user-land codes + * aq_rc: AdminQ error code to convert + **/ +static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc) +{ + int aq_to_posix[] = { + 0, /* I40E_AQ_RC_OK */ + -EPERM, /* I40E_AQ_RC_EPERM */ + -ENOENT, /* I40E_AQ_RC_ENOENT */ + -ESRCH, /* I40E_AQ_RC_ESRCH */ + -EINTR, /* I40E_AQ_RC_EINTR */ + -EIO, /* I40E_AQ_RC_EIO */ + -ENXIO, /* I40E_AQ_RC_ENXIO */ + -E2BIG, /* I40E_AQ_RC_E2BIG */ + -EAGAIN, /* I40E_AQ_RC_EAGAIN */ + -ENOMEM, /* I40E_AQ_RC_ENOMEM */ + -EACCES, /* I40E_AQ_RC_EACCES */ + -EFAULT, /* I40E_AQ_RC_EFAULT */ + -EBUSY, /* I40E_AQ_RC_EBUSY */ + -EEXIST, /* I40E_AQ_RC_EEXIST */ + -EINVAL, /* I40E_AQ_RC_EINVAL */ + -ENOTTY, /* I40E_AQ_RC_ENOTTY */ + -ENOSPC, /* I40E_AQ_RC_ENOSPC */ + -ENOSYS, /* I40E_AQ_RC_ENOSYS */ + -ERANGE, /* I40E_AQ_RC_ERANGE */ + -EPIPE, /* I40E_AQ_RC_EFLUSHED */ + -ESPIPE, /* I40E_AQ_RC_BAD_ADDR */ + -EROFS, /* I40E_AQ_RC_EMODE */ + -EFBIG, /* I40E_AQ_RC_EFBIG */ + }; + + /* aq_rc is invalid if AQ timed out */ + if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT) + return -EAGAIN; + + if (aq_rc >= ARRAY_SIZE(aq_to_posix)) + return -ERANGE; + return aq_to_posix[aq_rc]; +} + +/* general information */ +#define I40E_AQ_LARGE_BUF 512 +#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */ + +void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, + u16 opcode); + +#endif /* _I40E_ADMINQ_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h new file mode 100644 index 000000000..e715bccfb --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -0,0 +1,2269 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_ADMINQ_CMD_H_ +#define _I40E_ADMINQ_CMD_H_ + +/* This header file defines the i40e Admin Queue commands and is shared between + * i40e Firmware and Software. + * + * This file needs to comply with the Linux Kernel coding style. + */ + +#define I40E_FW_API_VERSION_MAJOR 0x0001 +#define I40E_FW_API_VERSION_MINOR 0x0002 +#define I40E_FW_API_VERSION_A0_MINOR 0x0000 + +struct i40e_aq_desc { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 retval; + __le32 cookie_high; + __le32 cookie_low; + union { + struct { + __le32 param0; + __le32 param1; + __le32 param2; + __le32 param3; + } internal; + struct { + __le32 param0; + __le32 param1; + __le32 addr_high; + __le32 addr_low; + } external; + u8 raw[16]; + } params; +}; + +/* Flags sub-structure + * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | + * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | + */ + +/* command flags and offsets*/ +#define I40E_AQ_FLAG_DD_SHIFT 0 +#define I40E_AQ_FLAG_CMP_SHIFT 1 +#define I40E_AQ_FLAG_ERR_SHIFT 2 +#define I40E_AQ_FLAG_VFE_SHIFT 3 +#define I40E_AQ_FLAG_LB_SHIFT 9 +#define I40E_AQ_FLAG_RD_SHIFT 10 +#define I40E_AQ_FLAG_VFC_SHIFT 11 +#define I40E_AQ_FLAG_BUF_SHIFT 12 +#define I40E_AQ_FLAG_SI_SHIFT 13 +#define I40E_AQ_FLAG_EI_SHIFT 14 +#define I40E_AQ_FLAG_FE_SHIFT 15 + +#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ + +/* error codes */ +enum i40e_admin_queue_err { + I40E_AQ_RC_OK = 0, /* success */ + I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ + I40E_AQ_RC_ENOENT = 2, /* No such element */ + I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ + I40E_AQ_RC_EINTR = 4, /* operation interrupted */ + I40E_AQ_RC_EIO = 5, /* I/O error */ + I40E_AQ_RC_ENXIO = 6, /* No such resource */ + I40E_AQ_RC_E2BIG = 7, /* Arg too long */ + I40E_AQ_RC_EAGAIN = 8, /* Try again */ + I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ + I40E_AQ_RC_EACCES = 10, /* Permission denied */ + I40E_AQ_RC_EFAULT = 11, /* Bad address */ + I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ + I40E_AQ_RC_EEXIST = 13, /* object already exists */ + I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ + I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ + I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ + I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ + I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ + I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ + I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ + I40E_AQ_RC_EFBIG = 22, /* File too large */ +}; + +/* Admin Queue command opcodes */ +enum i40e_admin_queue_opc { + /* aq commands */ + i40e_aqc_opc_get_version = 0x0001, + i40e_aqc_opc_driver_version = 0x0002, + i40e_aqc_opc_queue_shutdown = 0x0003, + i40e_aqc_opc_set_pf_context = 0x0004, + + /* resource ownership */ + i40e_aqc_opc_request_resource = 0x0008, + i40e_aqc_opc_release_resource = 0x0009, + + i40e_aqc_opc_list_func_capabilities = 0x000A, + i40e_aqc_opc_list_dev_capabilities = 0x000B, + + i40e_aqc_opc_set_cppm_configuration = 0x0103, + i40e_aqc_opc_set_arp_proxy_entry = 0x0104, + i40e_aqc_opc_set_ns_proxy_entry = 0x0105, + + /* LAA */ + i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ + i40e_aqc_opc_mac_address_read = 0x0107, + i40e_aqc_opc_mac_address_write = 0x0108, + + /* PXE */ + i40e_aqc_opc_clear_pxe_mode = 0x0110, + + /* internal switch commands */ + i40e_aqc_opc_get_switch_config = 0x0200, + i40e_aqc_opc_add_statistics = 0x0201, + i40e_aqc_opc_remove_statistics = 0x0202, + i40e_aqc_opc_set_port_parameters = 0x0203, + i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + + i40e_aqc_opc_add_vsi = 0x0210, + i40e_aqc_opc_update_vsi_parameters = 0x0211, + i40e_aqc_opc_get_vsi_parameters = 0x0212, + + i40e_aqc_opc_add_pv = 0x0220, + i40e_aqc_opc_update_pv_parameters = 0x0221, + i40e_aqc_opc_get_pv_parameters = 0x0222, + + i40e_aqc_opc_add_veb = 0x0230, + i40e_aqc_opc_update_veb_parameters = 0x0231, + i40e_aqc_opc_get_veb_parameters = 0x0232, + + i40e_aqc_opc_delete_element = 0x0243, + + i40e_aqc_opc_add_macvlan = 0x0250, + i40e_aqc_opc_remove_macvlan = 0x0251, + i40e_aqc_opc_add_vlan = 0x0252, + i40e_aqc_opc_remove_vlan = 0x0253, + i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, + i40e_aqc_opc_add_tag = 0x0255, + i40e_aqc_opc_remove_tag = 0x0256, + i40e_aqc_opc_add_multicast_etag = 0x0257, + i40e_aqc_opc_remove_multicast_etag = 0x0258, + i40e_aqc_opc_update_tag = 0x0259, + i40e_aqc_opc_add_control_packet_filter = 0x025A, + i40e_aqc_opc_remove_control_packet_filter = 0x025B, + i40e_aqc_opc_add_cloud_filters = 0x025C, + i40e_aqc_opc_remove_cloud_filters = 0x025D, + + i40e_aqc_opc_add_mirror_rule = 0x0260, + i40e_aqc_opc_delete_mirror_rule = 0x0261, + + /* DCB commands */ + i40e_aqc_opc_dcb_ignore_pfc = 0x0301, + i40e_aqc_opc_dcb_updated = 0x0302, + + /* TX scheduler */ + i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, + i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, + i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, + i40e_aqc_opc_query_vsi_bw_config = 0x0408, + i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, + i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, + + i40e_aqc_opc_enable_switching_comp_ets = 0x0413, + i40e_aqc_opc_modify_switching_comp_ets = 0x0414, + i40e_aqc_opc_disable_switching_comp_ets = 0x0415, + i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, + i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, + i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, + i40e_aqc_opc_query_port_ets_config = 0x0419, + i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, + i40e_aqc_opc_suspend_port_tx = 0x041B, + i40e_aqc_opc_resume_port_tx = 0x041C, + i40e_aqc_opc_configure_partition_bw = 0x041D, + + /* hmc */ + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, + + /* phy commands*/ + i40e_aqc_opc_get_phy_abilities = 0x0600, + i40e_aqc_opc_set_phy_config = 0x0601, + i40e_aqc_opc_set_mac_config = 0x0603, + i40e_aqc_opc_set_link_restart_an = 0x0605, + i40e_aqc_opc_get_link_status = 0x0607, + i40e_aqc_opc_set_phy_int_mask = 0x0613, + i40e_aqc_opc_get_local_advt_reg = 0x0614, + i40e_aqc_opc_set_local_advt_reg = 0x0615, + i40e_aqc_opc_get_partner_advt = 0x0616, + i40e_aqc_opc_set_lb_modes = 0x0618, + i40e_aqc_opc_get_phy_wol_caps = 0x0621, + i40e_aqc_opc_set_phy_debug = 0x0622, + i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + + /* NVM commands */ + i40e_aqc_opc_nvm_read = 0x0701, + i40e_aqc_opc_nvm_erase = 0x0702, + i40e_aqc_opc_nvm_update = 0x0703, + i40e_aqc_opc_nvm_config_read = 0x0704, + i40e_aqc_opc_nvm_config_write = 0x0705, + + /* virtualization commands */ + i40e_aqc_opc_send_msg_to_pf = 0x0801, + i40e_aqc_opc_send_msg_to_vf = 0x0802, + i40e_aqc_opc_send_msg_to_peer = 0x0803, + + /* alternate structure */ + i40e_aqc_opc_alternate_write = 0x0900, + i40e_aqc_opc_alternate_write_indirect = 0x0901, + i40e_aqc_opc_alternate_read = 0x0902, + i40e_aqc_opc_alternate_read_indirect = 0x0903, + i40e_aqc_opc_alternate_write_done = 0x0904, + i40e_aqc_opc_alternate_set_mode = 0x0905, + i40e_aqc_opc_alternate_clear_port = 0x0906, + + /* LLDP commands */ + i40e_aqc_opc_lldp_get_mib = 0x0A00, + i40e_aqc_opc_lldp_update_mib = 0x0A01, + i40e_aqc_opc_lldp_add_tlv = 0x0A02, + i40e_aqc_opc_lldp_update_tlv = 0x0A03, + i40e_aqc_opc_lldp_delete_tlv = 0x0A04, + i40e_aqc_opc_lldp_stop = 0x0A05, + i40e_aqc_opc_lldp_start = 0x0A06, + + /* Tunnel commands */ + i40e_aqc_opc_add_udp_tunnel = 0x0B00, + i40e_aqc_opc_del_udp_tunnel = 0x0B01, + i40e_aqc_opc_tunnel_key_structure = 0x0B10, + + /* Async Events */ + i40e_aqc_opc_event_lan_overflow = 0x1001, + + /* OEM commands */ + i40e_aqc_opc_oem_parameter_change = 0xFE00, + i40e_aqc_opc_oem_device_status_change = 0xFE01, + i40e_aqc_opc_oem_ocsd_initialize = 0xFE02, + i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, + + /* debug commands */ + i40e_aqc_opc_debug_get_deviceid = 0xFF00, + i40e_aqc_opc_debug_set_mode = 0xFF01, + i40e_aqc_opc_debug_read_reg = 0xFF03, + i40e_aqc_opc_debug_write_reg = 0xFF04, + i40e_aqc_opc_debug_modify_reg = 0xFF07, + i40e_aqc_opc_debug_dump_internals = 0xFF08, +}; + +/* command structures and indirect data structures */ + +/* Structure naming conventions: + * - no suffix for direct command descriptor structures + * - _data for indirect sent data + * - _resp for indirect return data (data which is both will use _data) + * - _completion for direct return data + * - _element_ for repeated elements (may also be _data or _resp) + * + * Command structures are expected to overlay the params.raw member of the basic + * descriptor, and as such cannot exceed 16 bytes in length. + */ + +/* This macro is used to generate a compilation error if a structure + * is not exactly the correct length. It gives a divide by zero error if the + * structure is not of the correct size, otherwise it creates an enum that is + * never used. + */ +#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \ + { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } + +/* This macro is used extensively to ensure that command structures are 16 + * bytes in length as they have to map to the raw array of that size. + */ +#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) + +/* internal (0x00XX) commands */ + +/* Get version (direct 0x0001) */ +struct i40e_aqc_get_version { + __le32 rom_ver; + __le32 fw_build; + __le16 fw_major; + __le16 fw_minor; + __le16 api_major; + __le16 api_minor; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); + +/* Send driver version (indirect 0x0002) */ +struct i40e_aqc_driver_version { + u8 driver_major_ver; + u8 driver_minor_ver; + u8 driver_build_ver; + u8 driver_subbuild_ver; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); + +/* Queue Shutdown (direct 0x0003) */ +struct i40e_aqc_queue_shutdown { + __le32 driver_unloading; +#define I40E_AQ_DRIVER_UNLOADING 0x1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); + +/* Set PF context (0x0004, direct) */ +struct i40e_aqc_set_pf_context { + u8 pf_id; + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); + +/* Request resource ownership (direct 0x0008) + * Release resource ownership (direct 0x0009) + */ +#define I40E_AQ_RESOURCE_NVM 1 +#define I40E_AQ_RESOURCE_SDP 2 +#define I40E_AQ_RESOURCE_ACCESS_READ 1 +#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 +#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 +#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 + +struct i40e_aqc_request_resource { + __le16 resource_id; + __le16 access_type; + __le32 timeout; + __le32 resource_number; + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); + +/* Get function capabilities (indirect 0x000A) + * Get device capabilities (indirect 0x000B) + */ +struct i40e_aqc_list_capabilites { + u8 command_flags; +#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 + u8 pf_index; + u8 reserved[2]; + __le32 count; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); + +struct i40e_aqc_list_capabilities_element_resp { + __le16 id; + u8 major_rev; + u8 minor_rev; + __le32 number; + __le32 logical_id; + __le32 phys_id; + u8 reserved[16]; +}; + +/* list of caps */ + +#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 +#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 +#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 +#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 +#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 +#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 +#define I40E_AQ_CAP_ID_SRIOV 0x0012 +#define I40E_AQ_CAP_ID_VF 0x0013 +#define I40E_AQ_CAP_ID_VMDQ 0x0014 +#define I40E_AQ_CAP_ID_8021QBG 0x0015 +#define I40E_AQ_CAP_ID_8021QBR 0x0016 +#define I40E_AQ_CAP_ID_VSI 0x0017 +#define I40E_AQ_CAP_ID_DCB 0x0018 +#define I40E_AQ_CAP_ID_FCOE 0x0021 +#define I40E_AQ_CAP_ID_ISCSI 0x0022 +#define I40E_AQ_CAP_ID_RSS 0x0040 +#define I40E_AQ_CAP_ID_RXQ 0x0041 +#define I40E_AQ_CAP_ID_TXQ 0x0042 +#define I40E_AQ_CAP_ID_MSIX 0x0043 +#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 +#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 +#define I40E_AQ_CAP_ID_1588 0x0046 +#define I40E_AQ_CAP_ID_IWARP 0x0051 +#define I40E_AQ_CAP_ID_LED 0x0061 +#define I40E_AQ_CAP_ID_SDP 0x0062 +#define I40E_AQ_CAP_ID_MDIO 0x0063 +#define I40E_AQ_CAP_ID_FLEX10 0x00F1 +#define I40E_AQ_CAP_ID_CEM 0x00F2 + +/* Set CPPM Configuration (direct 0x0103) */ +struct i40e_aqc_cppm_configuration { + __le16 command_flags; +#define I40E_AQ_CPPM_EN_LTRC 0x0800 +#define I40E_AQ_CPPM_EN_DMCTH 0x1000 +#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 +#define I40E_AQ_CPPM_EN_HPTC 0x4000 +#define I40E_AQ_CPPM_EN_DMARC 0x8000 + __le16 ttlx; + __le32 dmacr; + __le16 dmcth; + u8 hptc; + u8 reserved; + __le32 pfltrc; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); + +/* Set ARP Proxy command / response (indirect 0x0104) */ +struct i40e_aqc_arp_proxy_data { + __le16 command_flags; +#define I40E_AQ_ARP_INIT_IPV4 0x0008 +#define I40E_AQ_ARP_UNSUP_CTL 0x0010 +#define I40E_AQ_ARP_ENA 0x0020 +#define I40E_AQ_ARP_ADD_IPV4 0x0040 +#define I40E_AQ_ARP_DEL_IPV4 0x0080 + __le16 table_id; + __le32 pfpm_proxyfc; + __le32 ip_addr; + u8 mac_addr[6]; + u8 reserved[2]; +}; + +I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data); + +/* Set NS Proxy Table Entry Command (indirect 0x0105) */ +struct i40e_aqc_ns_proxy_data { + __le16 table_idx_mac_addr_0; + __le16 table_idx_mac_addr_1; + __le16 table_idx_ipv6_0; + __le16 table_idx_ipv6_1; + __le16 control; +#define I40E_AQ_NS_PROXY_ADD_0 0x0100 +#define I40E_AQ_NS_PROXY_DEL_0 0x0200 +#define I40E_AQ_NS_PROXY_ADD_1 0x0400 +#define I40E_AQ_NS_PROXY_DEL_1 0x0800 +#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 +#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 +#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 +#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 +#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 + u8 mac_addr_0[6]; + u8 mac_addr_1[6]; + u8 local_mac_addr[6]; + u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ + u8 ipv6_addr_1[16]; +}; + +I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data); + +/* Manage LAA Command (0x0106) - obsolete */ +struct i40e_aqc_mng_laa { + __le16 command_flags; +#define I40E_AQ_LAA_FLAG_WR 0x8000 + u8 reserved[2]; + __le32 sal; + __le16 sah; + u8 reserved2[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa); + +/* Manage MAC Address Read Command (indirect 0x0107) */ +struct i40e_aqc_mac_address_read { + __le16 command_flags; +#define I40E_AQC_LAN_ADDR_VALID 0x10 +#define I40E_AQC_SAN_ADDR_VALID 0x20 +#define I40E_AQC_PORT_ADDR_VALID 0x40 +#define I40E_AQC_WOL_ADDR_VALID 0x80 +#define I40E_AQC_ADDR_VALID_MASK 0xf0 + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); + +struct i40e_aqc_mac_address_read_data { + u8 pf_lan_mac[6]; + u8 pf_san_mac[6]; + u8 port_mac[6]; + u8 pf_wol_mac[6]; +}; + +I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); + +/* Manage MAC Address Write Command (0x0108) */ +struct i40e_aqc_mac_address_write { + __le16 command_flags; +#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 +#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 +#define I40E_AQC_WRITE_TYPE_PORT 0x8000 +#define I40E_AQC_WRITE_TYPE_MASK 0xc000 + __le16 mac_sah; + __le32 mac_sal; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); + +/* PXE commands (0x011x) */ + +/* Clear PXE Command and response (direct 0x0110) */ +struct i40e_aqc_clear_pxe { + u8 rx_cnt; + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); + +/* Switch configuration commands (0x02xx) */ + +/* Used by many indirect commands that only pass an seid and a buffer in the + * command + */ +struct i40e_aqc_switch_seid { + __le16 seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); + +/* Get Switch Configuration command (indirect 0x0200) + * uses i40e_aqc_switch_seid for the descriptor + */ +struct i40e_aqc_get_switch_config_header_resp { + __le16 num_reported; + __le16 num_total; + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp); + +struct i40e_aqc_switch_config_element_resp { + u8 element_type; +#define I40E_AQ_SW_ELEM_TYPE_MAC 1 +#define I40E_AQ_SW_ELEM_TYPE_PF 2 +#define I40E_AQ_SW_ELEM_TYPE_VF 3 +#define I40E_AQ_SW_ELEM_TYPE_EMP 4 +#define I40E_AQ_SW_ELEM_TYPE_BMC 5 +#define I40E_AQ_SW_ELEM_TYPE_PV 16 +#define I40E_AQ_SW_ELEM_TYPE_VEB 17 +#define I40E_AQ_SW_ELEM_TYPE_PA 18 +#define I40E_AQ_SW_ELEM_TYPE_VSI 19 + u8 revision; +#define I40E_AQ_SW_ELEM_REV_1 1 + __le16 seid; + __le16 uplink_seid; + __le16 downlink_seid; + u8 reserved[3]; + u8 connection_type; +#define I40E_AQ_CONN_TYPE_REGULAR 0x1 +#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_CONN_TYPE_CASCADED 0x3 + __le16 scheduler_id; + __le16 element_info; +}; + +I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp); + +/* Get Switch Configuration (indirect 0x0200) + * an array of elements are returned in the response buffer + * the first in the array is the header, remainder are elements + */ +struct i40e_aqc_get_switch_config_resp { + struct i40e_aqc_get_switch_config_header_resp header; + struct i40e_aqc_switch_config_element_resp element[1]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp); + +/* Add Statistics (direct 0x0201) + * Remove Statistics (direct 0x0202) + */ +struct i40e_aqc_add_remove_statistics { + __le16 seid; + __le16 vlan; + __le16 stat_index; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); + +/* Set Port Parameters command (direct 0x0203) */ +struct i40e_aqc_set_port_parameters { + __le16 command_flags; +#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 +#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ +#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 + __le16 bad_frame_vsi; + __le16 default_seid; /* reserved for command */ + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); + +/* Get Switch Resource Allocation (indirect 0x0204) */ +struct i40e_aqc_get_switch_resource_alloc { + u8 num_entries; /* reserved for command */ + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); + +/* expect an array of these structs in the response buffer */ +struct i40e_aqc_switch_resource_alloc_element_resp { + u8 resource_type; +#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 +#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 +#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 +#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 +#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 +#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 +#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 +#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 +#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 +#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 +#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA +#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB +#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC +#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD +#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF +#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 +#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 +#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 +#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 + u8 reserved1; + __le16 guaranteed; + __le16 total; + __le16 used; + __le16 total_unalloced; + u8 reserved2[6]; +}; + +I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); + +/* Add VSI (indirect 0x0210) + * this indirect command uses struct i40e_aqc_vsi_properties_data + * as the indirect buffer (128 bytes) + * + * Update VSI (indirect 0x211) + * uses the same data structure as Add VSI + * + * Get VSI (indirect 0x0212) + * uses the same completion and data structure as Add VSI + */ +struct i40e_aqc_add_get_update_vsi { + __le16 uplink_seid; + u8 connection_type; +#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 +#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 + u8 reserved1; + u8 vf_id; + u8 reserved2; + __le16 vsi_flags; +#define I40E_AQ_VSI_TYPE_SHIFT 0x0 +#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) +#define I40E_AQ_VSI_TYPE_VF 0x0 +#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 +#define I40E_AQ_VSI_TYPE_PF 0x2 +#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 +#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); + +struct i40e_aqc_add_get_update_vsi_completion { + __le16 seid; + __le16 vsi_number; + __le16 vsi_used; + __le16 vsi_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); + +struct i40e_aqc_vsi_properties_data { + /* first 96 byte are written by SW */ + __le16 valid_sections; +#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 +#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 +#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 +#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 +#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 +#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 +#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 +#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 +#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 +#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 + /* switch section */ + __le16 switch_id; /* 12bit id combined with flags below */ +#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 +#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) +#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 +#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 +#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 + u8 sw_reserved[2]; + /* security section */ + u8 sec_flags; +#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 + u8 sec_reserved; + /* VLAN section */ + __le16 pvid; /* VLANS include priority bits */ + __le16 fcoe_pvid; + u8 port_vlan_flags; +#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 +#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ + I40E_AQ_VSI_PVLAN_MODE_SHIFT) +#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 +#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 +#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 +#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 +#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 +#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ + I40E_AQ_VSI_PVLAN_EMOD_SHIFT) +#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 +#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 +#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 +#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 + u8 pvlan_reserved[3]; + /* ingress egress up sections */ + __le32 ingress_table; /* bitmap, 3 bits per up */ +#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 +#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 +#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 +#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 +#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 +#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 +#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 +#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 +#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) + __le32 egress_table; /* same defines as for ingress table */ + /* cascaded PV section */ + __le16 cas_pv_tag; + u8 cas_pv_flags; +#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ + I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) +#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 +#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 +#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 +#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 +#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 + u8 cas_pv_reserved; + /* queue mapping section */ + __le16 mapping_flags; +#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 +#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 + __le16 queue_mapping[16]; +#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 +#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) + __le16 tc_mapping[8]; +#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 +#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) +#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 +#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) + /* queueing option section */ + u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 +#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 + u8 queueing_opt_reserved[3]; + /* scheduler section */ + u8 up_enable_bits; + u8 sched_reserved; + /* outer up section */ + __le32 outer_up_table; /* same structure and defines as ingress tbl */ + u8 cmd_reserved[8]; + /* last 32 bytes are written by FW */ + __le16 qs_handle[8]; +#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF + __le16 stat_counter_idx; + __le16 sched_id; + u8 resp_reserved[12]; +}; + +I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); + +/* Add Port Virtualizer (direct 0x0220) + * also used for update PV (direct 0x0221) but only flags are used + * (IS_CTRL_PORT only works on add PV) + */ +struct i40e_aqc_add_update_pv { + __le16 command_flags; +#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 +#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 + __le16 uplink_seid; + __le16 connected_seid; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); + +struct i40e_aqc_add_update_pv_completion { + /* reserved for update; for add also encodes error if rc == ENOSPC */ + __le16 pv_seid; +#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 +#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); + +/* Get PV Params (direct 0x0222) + * uses i40e_aqc_switch_seid for the descriptor + */ + +struct i40e_aqc_get_pv_params_completion { + __le16 seid; + __le16 default_stag; + __le16 pv_flags; /* same flags as add_pv */ +#define I40E_AQC_GET_PV_PV_TYPE 0x1 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 + u8 reserved[8]; + __le16 default_port_seid; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); + +/* Add VEB (direct 0x0230) */ +struct i40e_aqc_add_veb { + __le16 uplink_seid; + __le16 downlink_seid; + __le16 veb_flags; +#define I40E_AQC_ADD_VEB_FLOATING 0x1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ + I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) +#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 +#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 +#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 + u8 enable_tcs; + u8 reserved[9]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); + +struct i40e_aqc_add_veb_completion { + u8 reserved[6]; + __le16 switch_seid; + /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ + __le16 veb_seid; +#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 +#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); + +/* Get VEB Parameters (direct 0x0232) + * uses i40e_aqc_switch_seid for the descriptor + */ +struct i40e_aqc_get_veb_parameters_completion { + __le16 seid; + __le16 switch_id; + __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); + +/* Delete Element (direct 0x0243) + * uses the generic i40e_aqc_switch_seid + */ + +/* Add MAC-VLAN (indirect 0x0250) */ + +/* used for the command for most vlan commands */ +struct i40e_aqc_macvlan { + __le16 num_addresses; + __le16 seid[3]; +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) +#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); + +/* indirect data for command and response */ +struct i40e_aqc_add_macvlan_element_data { + u8 mac_addr[6]; + __le16 vlan_tag; + __le16 flags; +#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 +#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 +#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 +#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 + __le16 queue_number; +#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ + I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) + /* response section */ + u8 match_method; +#define I40E_AQC_MM_PERFECT_MATCH 0x01 +#define I40E_AQC_MM_HASH_MATCH 0x02 +#define I40E_AQC_MM_ERR_NO_RES 0xFF + u8 reserved1[3]; +}; + +struct i40e_aqc_add_remove_macvlan_completion { + __le16 perfect_mac_used; + __le16 perfect_mac_free; + __le16 unicast_hash_free; + __le16 multicast_hash_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); + +/* Remove MAC-VLAN (indirect 0x0251) + * uses i40e_aqc_macvlan for the descriptor + * data points to an array of num_addresses of elements + */ + +struct i40e_aqc_remove_macvlan_element_data { + u8 mac_addr[6]; + __le16 vlan_tag; + u8 flags; +#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 +#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 +#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 +#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 + u8 reserved[3]; + /* reply section */ + u8 error_code; +#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF + u8 reply_reserved[3]; +}; + +/* Add VLAN (indirect 0x0252) + * Remove VLAN (indirect 0x0253) + * use the generic i40e_aqc_macvlan for the command + */ +struct i40e_aqc_add_remove_vlan_element_data { + __le16 vlan_tag; + u8 vlan_flags; +/* flags for add VLAN */ +#define I40E_AQC_ADD_VLAN_LOCAL 0x1 +#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 +#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) +#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 +#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 +#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 +#define I40E_AQC_VLAN_PTYPE_SHIFT 3 +#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) +#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 +#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 +#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 +#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 +/* flags for remove VLAN */ +#define I40E_AQC_REMOVE_VLAN_ALL 0x1 + u8 reserved; + u8 result; +/* flags for add VLAN */ +#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 +#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE +#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF +/* flags for remove VLAN */ +#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF + u8 reserved1[3]; +}; + +struct i40e_aqc_add_remove_vlan_completion { + u8 reserved[4]; + __le16 vlans_used; + __le16 vlans_free; + __le32 addr_high; + __le32 addr_low; +}; + +/* Set VSI Promiscuous Modes (direct 0x0254) */ +struct i40e_aqc_set_vsi_promiscuous_modes { + __le16 promiscuous_flags; + __le16 valid_flags; +/* flags used for both fields above */ +#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 +#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 +#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 +#define I40E_AQC_SET_VSI_DEFAULT 0x08 +#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 + __le16 seid; +#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF + __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); + +/* Add S/E-tag command (direct 0x0255) + * Uses generic i40e_aqc_add_remove_tag_completion for completion + */ +struct i40e_aqc_add_tag { + __le16 flags; +#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 + __le16 seid; +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) + __le16 tag; + __le16 queue_number; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); + +struct i40e_aqc_add_remove_tag_completion { + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); + +/* Remove S/E-tag command (direct 0x0256) + * Uses generic i40e_aqc_add_remove_tag_completion for completion + */ +struct i40e_aqc_remove_tag { + __le16 seid; +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) + __le16 tag; + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag); + +/* Add multicast E-Tag (direct 0x0257) + * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields + * and no external data + */ +struct i40e_aqc_add_remove_mcast_etag { + __le16 pv_seid; + __le16 etag; + u8 num_unicast_etags; + u8 reserved[3]; + __le32 addr_high; /* address of array of 2-byte s-tags */ + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); + +struct i40e_aqc_add_remove_mcast_etag_completion { + u8 reserved[4]; + __le16 mcast_etags_used; + __le16 mcast_etags_free; + __le32 addr_high; + __le32 addr_low; + +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); + +/* Update S/E-Tag (direct 0x0259) */ +struct i40e_aqc_update_tag { + __le16 seid; +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) + __le16 old_tag; + __le16 new_tag; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); + +struct i40e_aqc_update_tag_completion { + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); + +/* Add Control Packet filter (direct 0x025A) + * Remove Control Packet filter (direct 0x025B) + * uses the i40e_aqc_add_oveb_cloud, + * and the generic direct completion structure + */ +struct i40e_aqc_add_remove_control_packet_filter { + u8 mac[6]; + __le16 etype; + __le16 flags; +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 + __le16 seid; +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) + __le16 queue; + u8 reserved[2]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); + +struct i40e_aqc_add_remove_control_packet_filter_completion { + __le16 mac_etype_used; + __le16 etype_used; + __le16 mac_etype_free; + __le16 etype_free; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); + +/* Add Cloud filters (indirect 0x025C) + * Remove Cloud filters (indirect 0x025D) + * uses the i40e_aqc_add_remove_cloud_filters, + * and the generic indirect completion structure + */ +struct i40e_aqc_add_remove_cloud_filters { + u8 num_filters; + u8 reserved; + __le16 seid; +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); + +struct i40e_aqc_add_remove_cloud_filters_element_data { + u8 outer_mac[6]; + u8 inner_mac[6]; + __le16 inner_vlan; + union { + struct { + u8 reserved[12]; + u8 data[4]; + } v4; + struct { + u8 data[16]; + } v6; + } ipaddr; + __le16 flags; +#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ + I40E_AQC_ADD_CLOUD_FILTER_SHIFT) +/* 0x0000 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 +/* 0x0002 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 +/* 0x0005 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 +/* 0x0007 reserved */ +/* 0x0008 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B +#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C + +#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 +#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 +#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 + +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 + + __le32 tenant_id; + u8 reserved[4]; + __le16 queue_number; +#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \ + I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) + u8 reserved2[14]; + /* response section */ + u8 allocation_result; +#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 +#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF + u8 response_reserved[7]; +}; + +struct i40e_aqc_remove_cloud_filters_completion { + __le16 perfect_ovlan_used; + __le16 perfect_ovlan_free; + __le16 vlan_used; + __le16 vlan_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); + +/* Add Mirror Rule (indirect or direct 0x0260) + * Delete Mirror Rule (indirect or direct 0x0261) + * note: some rule types (4,5) do not use an external buffer. + * take care to set the flags correctly. + */ +struct i40e_aqc_add_delete_mirror_rule { + __le16 seid; + __le16 rule_type; +#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 +#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ + I40E_AQC_MIRROR_RULE_TYPE_SHIFT) +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 +#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 + __le16 num_entries; + __le16 destination; /* VSI for add, rule id for delete */ + __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); + +struct i40e_aqc_add_delete_mirror_rule_completion { + u8 reserved[2]; + __le16 rule_id; /* only used on add */ + __le16 mirror_rules_used; + __le16 mirror_rules_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); + +/* DCB 0x03xx*/ + +/* PFC Ignore (direct 0x0301) + * the command and response use the same descriptor structure + */ +struct i40e_aqc_pfc_ignore { + u8 tc_bitmap; + u8 command_flags; /* unused on response */ +#define I40E_AQC_PFC_IGNORE_SET 0x80 +#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); + +/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure + * with no parameters + */ + +/* TX scheduler 0x04xx */ + +/* Almost all the indirect commands use + * this generic struct to pass the SEID in param0 + */ +struct i40e_aqc_tx_sched_ind { + __le16 vsi_seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); + +/* Several commands respond with a set of queue set handles */ +struct i40e_aqc_qs_handles_resp { + __le16 qs_handles[8]; +}; + +/* Configure VSI BW limits (direct 0x0400) */ +struct i40e_aqc_configure_vsi_bw_limit { + __le16 vsi_seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_credit; /* 0-3, limit = 2^max */ + u8 reserved2[7]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); + +/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406) + * responds with i40e_aqc_qs_handles_resp + */ +struct i40e_aqc_configure_vsi_ets_sla_bw_data { + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved1[28]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data); + +/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) + * responds with i40e_aqc_qs_handles_resp + */ +struct i40e_aqc_configure_vsi_tc_bw_data { + u8 tc_valid_bits; + u8 reserved[3]; + u8 tc_bw_credits[8]; + u8 reserved1[4]; + __le16 qs_handles[8]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data); + +/* Query vsi bw configuration (indirect 0x0408) */ +struct i40e_aqc_query_vsi_bw_config_resp { + u8 tc_valid_bits; + u8 tc_suspended_bits; + u8 reserved[14]; + __le16 qs_handles[8]; + u8 reserved1[4]; + __le16 port_bw_limit; + u8 reserved2[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved3[23]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp); + +/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ +struct i40e_aqc_query_vsi_ets_sla_config_resp { + u8 tc_valid_bits; + u8 reserved[3]; + u8 share_credits[8]; + __le16 credits[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp); + +/* Configure Switching Component Bandwidth Limit (direct 0x0410) */ +struct i40e_aqc_configure_switching_comp_bw_limit { + __le16 seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved2[7]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); + +/* Enable Physical Port ETS (indirect 0x0413) + * Modify Physical Port ETS (indirect 0x0414) + * Disable Physical Port ETS (indirect 0x0415) + */ +struct i40e_aqc_configure_switching_comp_ets_data { + u8 reserved[4]; + u8 tc_valid_bits; + u8 seepage; +#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 + u8 tc_strict_priority_flags; + u8 reserved1[17]; + u8 tc_bw_share_credits[8]; + u8 reserved2[96]; +}; + +I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data); + +/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ +struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credit[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved1[28]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, + i40e_aqc_configure_switching_comp_ets_bw_limit_data); + +/* Configure Switching Component Bandwidth Allocation per Tc + * (indirect 0x0417) + */ +struct i40e_aqc_configure_switching_comp_bw_config_data { + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits; /* bool */ + u8 tc_bw_share_credits[8]; + u8 reserved1[20]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data); + +/* Query Switching Component Configuration (indirect 0x0418) */ +struct i40e_aqc_query_switching_comp_ets_config_resp { + u8 tc_valid_bits; + u8 reserved[35]; + __le16 port_bw_limit; + u8 reserved1[2]; + u8 tc_bw_max; /* 0-3, limit = 2^max */ + u8 reserved2[23]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp); + +/* Query PhysicalPort ETS Configuration (indirect 0x0419) */ +struct i40e_aqc_query_port_ets_config_resp { + u8 reserved[4]; + u8 tc_valid_bits; + u8 reserved1; + u8 tc_strict_priority_bits; + u8 reserved2; + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; + + /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved3[32]; +}; + +I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp); + +/* Query Switching Component Bandwidth Allocation per Traffic Type + * (indirect 0x041A) + */ +struct i40e_aqc_query_switching_comp_bw_config_resp { + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits_enable; /* bool */ + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp); + +/* Suspend/resume port TX traffic + * (direct 0x041B and 0x041C) uses the generic SEID struct + */ + +/* Configure partition BW + * (indirect 0x041D) + */ +struct i40e_aqc_configure_partition_bw_data { + __le16 pf_valid_bits; + u8 min_bw[16]; /* guaranteed bandwidth */ + u8 max_bw[16]; /* bandwidth limit */ +}; + +I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); + +/* Get and set the active HMC resource profile and status. + * (direct 0x0500) and (direct 0x0501) + */ +struct i40e_aq_get_set_hmc_resource_profile { + u8 pm_profile; + u8 pe_vf_enabled; + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); + +enum i40e_aq_hmc_profile { + /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ + I40E_HMC_PROFILE_DEFAULT = 1, + I40E_HMC_PROFILE_FAVOR_VF = 2, + I40E_HMC_PROFILE_EQUAL = 3, +}; + +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F + +/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ + +/* set in param0 for get phy abilities to report qualified modules */ +#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 +#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 + +enum i40e_aq_phy_type { + I40E_PHY_TYPE_SGMII = 0x0, + I40E_PHY_TYPE_1000BASE_KX = 0x1, + I40E_PHY_TYPE_10GBASE_KX4 = 0x2, + I40E_PHY_TYPE_10GBASE_KR = 0x3, + I40E_PHY_TYPE_40GBASE_KR4 = 0x4, + I40E_PHY_TYPE_XAUI = 0x5, + I40E_PHY_TYPE_XFI = 0x6, + I40E_PHY_TYPE_SFI = 0x7, + I40E_PHY_TYPE_XLAUI = 0x8, + I40E_PHY_TYPE_XLPPI = 0x9, + I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, + I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, + I40E_PHY_TYPE_10GBASE_AOC = 0xC, + I40E_PHY_TYPE_40GBASE_AOC = 0xD, + I40E_PHY_TYPE_100BASE_TX = 0x11, + I40E_PHY_TYPE_1000BASE_T = 0x12, + I40E_PHY_TYPE_10GBASE_T = 0x13, + I40E_PHY_TYPE_10GBASE_SR = 0x14, + I40E_PHY_TYPE_10GBASE_LR = 0x15, + I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16, + I40E_PHY_TYPE_10GBASE_CR1 = 0x17, + I40E_PHY_TYPE_40GBASE_CR4 = 0x18, + I40E_PHY_TYPE_40GBASE_SR4 = 0x19, + I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, + I40E_PHY_TYPE_1000BASE_SX = 0x1B, + I40E_PHY_TYPE_1000BASE_LX = 0x1C, + I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D, + I40E_PHY_TYPE_20GBASE_KR2 = 0x1E, + I40E_PHY_TYPE_MAX +}; + +#define I40E_LINK_SPEED_100MB_SHIFT 0x1 +#define I40E_LINK_SPEED_1000MB_SHIFT 0x2 +#define I40E_LINK_SPEED_10GB_SHIFT 0x3 +#define I40E_LINK_SPEED_40GB_SHIFT 0x4 +#define I40E_LINK_SPEED_20GB_SHIFT 0x5 + +enum i40e_aq_link_speed { + I40E_LINK_SPEED_UNKNOWN = 0, + I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT), + I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT), + I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT), + I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT), + I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT) +}; + +struct i40e_aqc_module_desc { + u8 oui[3]; + u8 reserved1; + u8 part_number[16]; + u8 revision[4]; + u8 reserved2[8]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc); + +struct i40e_aq_get_phy_abilities_resp { + __le32 phy_type; /* bitmap using the above enum for offsets */ + u8 link_speed; /* bitmap using the above enum bit patterns */ + u8 abilities; +#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 +#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 +#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 +#define I40E_AQ_PHY_LINK_ENABLED 0x08 +#define I40E_AQ_PHY_AN_ENABLED 0x10 +#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 + __le16 eee_capability; +#define I40E_AQ_EEE_100BASE_TX 0x0002 +#define I40E_AQ_EEE_1000BASE_T 0x0004 +#define I40E_AQ_EEE_10GBASE_T 0x0008 +#define I40E_AQ_EEE_1000BASE_KX 0x0010 +#define I40E_AQ_EEE_10GBASE_KX4 0x0020 +#define I40E_AQ_EEE_10GBASE_KR 0x0040 + __le32 eeer_val; + u8 d3_lpan; +#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 + u8 reserved[3]; + u8 phy_id[4]; + u8 module_type[3]; + u8 qualified_module_count; +#define I40E_AQ_PHY_MAX_QMS 16 + struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; +}; + +I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp); + +/* Set PHY Config (direct 0x0601) */ +struct i40e_aq_set_phy_config { /* same bits as above in all */ + __le32 phy_type; + u8 link_speed; + u8 abilities; +/* bits 0-2 use the values from get_phy_abilities_resp */ +#define I40E_AQ_PHY_ENABLE_LINK 0x08 +#define I40E_AQ_PHY_ENABLE_AN 0x10 +#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 + __le16 eee_capability; + __le32 eeer; + u8 low_power_ctrl; + u8 reserved[3]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); + +/* Set MAC Config command data structure (direct 0x0603) */ +struct i40e_aq_set_mac_config { + __le16 max_frame_size; + u8 params; +#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 +#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 +#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 +#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 +#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 +#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 + u8 tx_timer_priority; /* bitmap */ + __le16 tx_timer_value; + __le16 fc_refresh_threshold; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); + +/* Restart Auto-Negotiation (direct 0x605) */ +struct i40e_aqc_set_link_restart_an { + u8 command; +#define I40E_AQ_PHY_RESTART_AN 0x02 +#define I40E_AQ_PHY_LINK_ENABLE 0x04 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); + +/* Get Link Status cmd & response data structure (direct 0x0607) */ +struct i40e_aqc_get_link_status { + __le16 command_flags; /* only field set on command */ +#define I40E_AQ_LSE_MASK 0x3 +#define I40E_AQ_LSE_NOP 0x0 +#define I40E_AQ_LSE_DISABLE 0x2 +#define I40E_AQ_LSE_ENABLE 0x3 +/* only response uses this flag */ +#define I40E_AQ_LSE_IS_ENABLED 0x1 + u8 phy_type; /* i40e_aq_phy_type */ + u8 link_speed; /* i40e_aq_link_speed */ + u8 link_info; +#define I40E_AQ_LINK_UP 0x01 +#define I40E_AQ_LINK_FAULT 0x02 +#define I40E_AQ_LINK_FAULT_TX 0x04 +#define I40E_AQ_LINK_FAULT_RX 0x08 +#define I40E_AQ_LINK_FAULT_REMOTE 0x10 +#define I40E_AQ_MEDIA_AVAILABLE 0x40 +#define I40E_AQ_SIGNAL_DETECT 0x80 + u8 an_info; +#define I40E_AQ_AN_COMPLETED 0x01 +#define I40E_AQ_LP_AN_ABILITY 0x02 +#define I40E_AQ_PD_FAULT 0x04 +#define I40E_AQ_FEC_EN 0x08 +#define I40E_AQ_PHY_LOW_POWER 0x10 +#define I40E_AQ_LINK_PAUSE_TX 0x20 +#define I40E_AQ_LINK_PAUSE_RX 0x40 +#define I40E_AQ_QUALIFIED_MODULE 0x80 + u8 ext_info; +#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 +#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 +#define I40E_AQ_LINK_TX_SHIFT 0x02 +#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) +#define I40E_AQ_LINK_TX_ACTIVE 0x00 +#define I40E_AQ_LINK_TX_DRAINED 0x01 +#define I40E_AQ_LINK_TX_FLUSHED 0x03 +#define I40E_AQ_LINK_FORCED_40G 0x10 + u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ + __le16 max_frame_size; + u8 config; +#define I40E_AQ_CONFIG_CRC_ENA 0x04 +#define I40E_AQ_CONFIG_PACING_MASK 0x78 + u8 reserved[5]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); + +/* Set event mask command (direct 0x613) */ +struct i40e_aqc_set_phy_int_mask { + u8 reserved[8]; + __le16 event_mask; +#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 +#define I40E_AQ_EVENT_MEDIA_NA 0x0004 +#define I40E_AQ_EVENT_LINK_FAULT 0x0008 +#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 +#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 +#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 +#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 +#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 +#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 + u8 reserved1[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); + +/* Get Local AN advt register (direct 0x0614) + * Set Local AN advt register (direct 0x0615) + * Get Link Partner AN advt register (direct 0x0616) + */ +struct i40e_aqc_an_advt_reg { + __le32 local_an_reg0; + __le16 local_an_reg1; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); + +/* Set Loopback mode (0x0618) */ +struct i40e_aqc_set_lb_mode { + __le16 lb_mode; +#define I40E_AQ_LB_PHY_LOCAL 0x01 +#define I40E_AQ_LB_PHY_REMOTE 0x02 +#define I40E_AQ_LB_MAC_LOCAL 0x04 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); + +/* Set PHY Debug command (0x0622) */ +struct i40e_aqc_set_phy_debug { + u8 command_flags; +#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ + I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT) +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 +#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); + +enum i40e_aq_phy_reg_type { + I40E_AQC_PHY_REG_INTERNAL = 0x1, + I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, + I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 +}; + +/* NVM Read command (indirect 0x0701) + * NVM Erase commands (direct 0x0702) + * NVM Update commands (indirect 0x0703) + */ +struct i40e_aqc_nvm_update { + u8 command_flags; +#define I40E_AQ_NVM_LAST_CMD 0x01 +#define I40E_AQ_NVM_FLASH_ONLY 0x80 + u8 module_pointer; + __le16 length; + __le32 offset; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); + +/* NVM Config Read (indirect 0x0704) */ +struct i40e_aqc_nvm_config_read { + __le16 cmd_flags; +#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 +#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0 +#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1 + __le16 element_count; + __le16 element_id; /* Feature/field ID */ + __le16 element_id_msw; /* MSWord of field ID */ + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); + +/* NVM Config Write (indirect 0x0705) */ +struct i40e_aqc_nvm_config_write { + __le16 cmd_flags; + __le16 element_count; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); + +/* Used for 0x0704 as well as for 0x0705 commands */ +#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 +#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ + (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) +#define I40E_AQ_ANVM_FEATURE 0 +#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT) +struct i40e_aqc_nvm_config_data_feature { + __le16 feature_id; +#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 +#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08 +#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10 + __le16 feature_options; + __le16 feature_selection; +}; + +I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature); + +struct i40e_aqc_nvm_config_data_immediate_field { + __le32 field_id; + __le32 field_value; + __le16 field_options; + __le16 reserved; +}; + +I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); + +/* Send to PF command (indirect 0x0801) id is only used by PF + * Send to VF command (indirect 0x0802) id is only used by PF + * Send to Peer PF command (indirect 0x0803) + */ +struct i40e_aqc_pf_vf_message { + __le32 id; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); + +/* Alternate structure */ + +/* Direct write (direct 0x0900) + * Direct read (direct 0x0902) + */ +struct i40e_aqc_alternate_write { + __le32 address0; + __le32 data0; + __le32 address1; + __le32 data1; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write); + +/* Indirect write (indirect 0x0901) + * Indirect read (indirect 0x0903) + */ + +struct i40e_aqc_alternate_ind_write { + __le32 address; + __le32 length; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); + +/* Done alternate write (direct 0x0904) + * uses i40e_aq_desc + */ +struct i40e_aqc_alternate_write_done { + __le16 cmd_flags; +#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 +#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 +#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 +#define I40E_AQ_ALTERNATE_RESET_NEEDED 2 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); + +/* Set OEM mode (direct 0x0905) */ +struct i40e_aqc_alternate_set_mode { + __le32 mode; +#define I40E_AQ_ALTERNATE_MODE_NONE 0 +#define I40E_AQ_ALTERNATE_MODE_OEM 1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); + +/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */ + +/* async events 0x10xx */ + +/* Lan Queue Overflow Event (direct, 0x1001) */ +struct i40e_aqc_lan_overflow { + __le32 prtdcb_rupto; + __le32 otx_ctl; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); + +/* Get LLDP MIB (indirect 0x0A00) */ +struct i40e_aqc_lldp_get_mib { + u8 type; + u8 reserved1; +#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 +#define I40E_AQ_LLDP_MIB_LOCAL 0x0 +#define I40E_AQ_LLDP_MIB_REMOTE 0x1 +#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC +#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 +#define I40E_AQ_LLDP_TX_SHIFT 0x4 +#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) +/* TX pause flags use I40E_AQ_LINK_TX_* above */ + __le16 local_len; + __le16 remote_len; + u8 reserved2[2]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); + +/* Configure LLDP MIB Change Event (direct 0x0A01) + * also used for the event (with type in the command field) + */ +struct i40e_aqc_lldp_update_mib { + u8 command; +#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 +#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); + +/* Add LLDP TLV (indirect 0x0A02) + * Delete LLDP TLV (indirect 0x0A04) + */ +struct i40e_aqc_lldp_add_tlv { + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved1[1]; + __le16 len; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); + +/* Update LLDP TLV (indirect 0x0A03) */ +struct i40e_aqc_lldp_update_tlv { + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved; + __le16 old_len; + __le16 new_offset; + __le16 new_len; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); + +/* Stop LLDP (direct 0x0A05) */ +struct i40e_aqc_lldp_stop { + u8 command; +#define I40E_AQ_LLDP_AGENT_STOP 0x0 +#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); + +/* Start LLDP (direct 0x0A06) */ + +struct i40e_aqc_lldp_start { + u8 command; +#define I40E_AQ_LLDP_AGENT_START 0x1 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); + +/* Apply MIB changes (0x0A07) + * uses the generic struc as it contains no data + */ + +/* Add Udp Tunnel command and completion (direct 0x0B00) */ +struct i40e_aqc_add_udp_tunnel { + __le16 udp_port; + u8 reserved0[3]; + u8 protocol_type; +#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 +#define I40E_AQC_TUNNEL_TYPE_NGE 0x01 +#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 + u8 reserved1[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); + +struct i40e_aqc_add_udp_tunnel_completion { + __le16 udp_port; + u8 filter_entry_index; + u8 multiple_pfs; +#define I40E_AQC_SINGLE_PF 0x0 +#define I40E_AQC_MULTIPLE_PFS 0x1 + u8 total_filters; + u8 reserved[11]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); + +/* remove UDP Tunnel command (0x0B01) */ +struct i40e_aqc_remove_udp_tunnel { + u8 reserved[2]; + u8 index; /* 0 to 15 */ + u8 reserved2[13]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); + +struct i40e_aqc_del_udp_tunnel_completion { + __le16 udp_port; + u8 index; /* 0 to 15 */ + u8 multiple_pfs; + u8 total_filters_used; + u8 reserved1[11]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); + +/* tunnel key structure 0x0B10 */ + +struct i40e_aqc_tunnel_key_structure_A0 { + __le16 key1_off; + __le16 key1_len; + __le16 key2_off; + __le16 key2_len; + __le16 flags; +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 +/* response flags */ +#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 + u8 resreved[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure_A0); + +struct i40e_aqc_tunnel_key_structure { + u8 key1_off; + u8 key2_off; + u8 key1_len; /* 0 to 15 */ + u8 key2_len; /* 0 to 15 */ + u8 flags; +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 +/* response flags */ +#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 + u8 network_key_index; +#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 +#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 +#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2 +#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3 + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); + +/* OEM mode commands (direct 0xFE0x) */ +struct i40e_aqc_oem_param_change { + __le32 param_type; +#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 +#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 +#define I40E_AQ_OEM_PARAM_MAC 2 + __le32 param_value1; + __le16 param_value2; + u8 reserved[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); + +struct i40e_aqc_oem_state_change { + __le32 state; +#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 +#define I40E_AQ_OEM_STATE_LINK_UP 0x1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); + +/* Initialize OCSD (0xFE02, direct) */ +struct i40e_aqc_opc_oem_ocsd_initialize { + u8 type_status; + u8 reserved1[3]; + __le32 ocsd_memory_block_addr_high; + __le32 ocsd_memory_block_addr_low; + __le32 requested_update_interval; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize); + +/* Initialize OCBB (0xFE03, direct) */ +struct i40e_aqc_opc_oem_ocbb_initialize { + u8 type_status; + u8 reserved1[3]; + __le32 ocbb_memory_block_addr_high; + __le32 ocbb_memory_block_addr_low; + u8 reserved2[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize); + +/* debug commands */ + +/* get device id (0xFF00) uses the generic structure */ + +/* set test more (0xFF01, internal) */ + +struct i40e_acq_set_test_mode { + u8 mode; +#define I40E_AQ_TEST_PARTIAL 0 +#define I40E_AQ_TEST_FULL 1 +#define I40E_AQ_TEST_NVM 2 + u8 reserved[3]; + u8 command; +#define I40E_AQ_TEST_OPEN 0 +#define I40E_AQ_TEST_CLOSE 1 +#define I40E_AQ_TEST_INC 2 + u8 reserved2[3]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); + +/* Debug Read Register command (0xFF03) + * Debug Write Register command (0xFF04) + */ +struct i40e_aqc_debug_reg_read_write { + __le32 reserved; + __le32 address; + __le32 value_high; + __le32 value_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write); + +/* Scatter/gather Reg Read (indirect 0xFF05) + * Scatter/gather Reg Write (indirect 0xFF06) + */ + +/* i40e_aq_desc is used for the command */ +struct i40e_aqc_debug_reg_sg_element_data { + __le32 address; + __le32 value; +}; + +/* Debug Modify register (direct 0xFF07) */ +struct i40e_aqc_debug_modify_reg { + __le32 address; + __le32 value; + __le32 clear_mask; + __le32 set_mask; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); + +/* dump internal data (0xFF08, indirect) */ + +#define I40E_AQ_CLUSTER_ID_AUX 0 +#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1 +#define I40E_AQ_CLUSTER_ID_TXSCHED 2 +#define I40E_AQ_CLUSTER_ID_HMC 3 +#define I40E_AQ_CLUSTER_ID_MAC0 4 +#define I40E_AQ_CLUSTER_ID_MAC1 5 +#define I40E_AQ_CLUSTER_ID_MAC2 6 +#define I40E_AQ_CLUSTER_ID_MAC3 7 +#define I40E_AQ_CLUSTER_ID_DCB 8 +#define I40E_AQ_CLUSTER_ID_EMP_MEM 9 +#define I40E_AQ_CLUSTER_ID_PKT_BUF 10 +#define I40E_AQ_CLUSTER_ID_ALTRAM 11 + +struct i40e_aqc_debug_dump_internals { + u8 cluster_id; + u8 table_id; + __le16 data_size; + __le32 idx; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); + +struct i40e_aqc_debug_modify_internals { + u8 cluster_id; + u8 cluster_specific_params[7]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); + +#endif diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h new file mode 100644 index 000000000..8e6a6dd92 --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h @@ -0,0 +1,58 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_ALLOC_H_ +#define _I40E_ALLOC_H_ + +struct i40e_hw; + +/* Memory allocation types */ +enum i40e_memory_type { + i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */ + i40e_mem_asq_buf = 1, + i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */ + i40e_mem_arq_ring = 3, /* ARQ descriptor ring */ + i40e_mem_atq_ring = 4, /* ATQ descriptor ring */ + i40e_mem_pd = 5, /* Page Descriptor */ + i40e_mem_bp = 6, /* Backing Page - 4KB */ + i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */ + i40e_mem_reserved +}; + +/* prototype for functions used for dynamic memory allocation */ +i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw, + struct i40e_dma_mem *mem, + enum i40e_memory_type type, + u64 size, u32 alignment); +i40e_status i40e_free_dma_mem(struct i40e_hw *hw, + struct i40e_dma_mem *mem); +i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw, + struct i40e_virt_mem *mem, + u32 size); +i40e_status i40e_free_virt_mem(struct i40e_hw *hw, + struct i40e_virt_mem *mem); + +#endif /* _I40E_ALLOC_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c new file mode 100644 index 000000000..39fcb1dc4 --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -0,0 +1,636 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e_type.h" +#include "i40e_adminq.h" +#include "i40e_prototype.h" +#include "i40e_virtchnl.h" + +/** + * i40e_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * vendor ID and device ID stored in the hw structure. + **/ +i40e_status i40e_set_mac_type(struct i40e_hw *hw) +{ + i40e_status status = 0; + + if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { + switch (hw->device_id) { + case I40E_DEV_ID_SFP_XL710: + case I40E_DEV_ID_QEMU: + case I40E_DEV_ID_KX_A: + case I40E_DEV_ID_KX_B: + case I40E_DEV_ID_KX_C: + case I40E_DEV_ID_QSFP_A: + case I40E_DEV_ID_QSFP_B: + case I40E_DEV_ID_QSFP_C: + case I40E_DEV_ID_10G_BASE_T: + case I40E_DEV_ID_20G_KR2: + hw->mac.type = I40E_MAC_XL710; + break; + case I40E_DEV_ID_VF: + case I40E_DEV_ID_VF_HV: + hw->mac.type = I40E_MAC_VF; + break; + default: + hw->mac.type = I40E_MAC_GENERIC; + break; + } + } else { + status = I40E_ERR_DEVICE_NOT_SUPPORTED; + } + + hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", + hw->mac.type, status); + return status; +} + +/** + * i40evf_debug_aq + * @hw: debug mask related to admin queue + * @mask: debug mask + * @desc: pointer to admin queue descriptor + * @buffer: pointer to command buffer + * @buf_len: max length of buffer + * + * Dumps debug log about adminq command with descriptor contents. + **/ +void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, + void *buffer, u16 buf_len) +{ + struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; + u16 len = le16_to_cpu(aq_desc->datalen); + u8 *buf = (u8 *)buffer; + u16 i = 0; + + if ((!(mask & hw->debug_mask)) || (desc == NULL)) + return; + + i40e_debug(hw, mask, + "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", + le16_to_cpu(aq_desc->opcode), + le16_to_cpu(aq_desc->flags), + le16_to_cpu(aq_desc->datalen), + le16_to_cpu(aq_desc->retval)); + i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", + le32_to_cpu(aq_desc->cookie_high), + le32_to_cpu(aq_desc->cookie_low)); + i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", + le32_to_cpu(aq_desc->params.internal.param0), + le32_to_cpu(aq_desc->params.internal.param1)); + i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", + le32_to_cpu(aq_desc->params.external.addr_high), + le32_to_cpu(aq_desc->params.external.addr_low)); + + if ((buffer != NULL) && (aq_desc->datalen != 0)) { + i40e_debug(hw, mask, "AQ CMD Buffer:\n"); + if (buf_len < len) + len = buf_len; + /* write the full 16-byte chunks */ + for (i = 0; i < (len - 16); i += 16) + i40e_debug(hw, mask, + "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", + i, buf[i], buf[i + 1], buf[i + 2], + buf[i + 3], buf[i + 4], buf[i + 5], + buf[i + 6], buf[i + 7], buf[i + 8], + buf[i + 9], buf[i + 10], buf[i + 11], + buf[i + 12], buf[i + 13], buf[i + 14], + buf[i + 15]); + /* write whatever's left over without overrunning the buffer */ + if (i < len) { + char d_buf[80]; + int j = 0; + + memset(d_buf, 0, sizeof(d_buf)); + j += sprintf(d_buf, "\t0x%04X ", i); + while (i < len) + j += sprintf(&d_buf[j], " %02X", buf[i++]); + i40e_debug(hw, mask, "%s\n", d_buf); + } + } +} + +/** + * i40evf_check_asq_alive + * @hw: pointer to the hw struct + * + * Returns true if Queue is enabled else false. + **/ +bool i40evf_check_asq_alive(struct i40e_hw *hw) +{ + if (hw->aq.asq.len) + return !!(rd32(hw, hw->aq.asq.len) & + I40E_PF_ATQLEN_ATQENABLE_MASK); + else + return false; +} + +/** + * i40evf_aq_queue_shutdown + * @hw: pointer to the hw struct + * @unloading: is the driver unloading itself + * + * Tell the Firmware that we're shutting down the AdminQ and whether + * or not the driver is unloading as well. + **/ +i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, + bool unloading) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_queue_shutdown *cmd = + (struct i40e_aqc_queue_shutdown *)&desc.params.raw; + i40e_status status; + + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_queue_shutdown); + + if (unloading) + cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); + status = i40evf_asq_send_command(hw, &desc, NULL, 0, NULL); + + return status; +} + + +/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the + * hardware to a bit-field that can be used by SW to more easily determine the + * packet type. + * + * Macros are used to shorten the table lines and make this table human + * readable. + * + * We store the PTYPE in the top byte of the bit field - this is just so that + * we can check that the table doesn't have a row missing, as the index into + * the table should be the PTYPE. + * + * Typical work flow: + * + * IF NOT i40evf_ptype_lookup[ptype].known + * THEN + * Packet is unknown + * ELSE IF i40evf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP + * Use the rest of the fields to look at the tunnels, inner protocols, etc + * ELSE + * Use the enum i40e_rx_l2_ptype to decode the packet type + * ENDIF + */ + +/* macro to make the table lines short */ +#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ + { PTYPE, \ + 1, \ + I40E_RX_PTYPE_OUTER_##OUTER_IP, \ + I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ + I40E_RX_PTYPE_##OUTER_FRAG, \ + I40E_RX_PTYPE_TUNNEL_##T, \ + I40E_RX_PTYPE_TUNNEL_END_##TE, \ + I40E_RX_PTYPE_##TEF, \ + I40E_RX_PTYPE_INNER_PROT_##I, \ + I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } + +#define I40E_PTT_UNUSED_ENTRY(PTYPE) \ + { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } + +/* shorter macros makes the table fit but are terse */ +#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG +#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG +#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = { + /* L2 Packet types */ + I40E_PTT_UNUSED_ENTRY(0), + I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), + I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT_UNUSED_ENTRY(4), + I40E_PTT_UNUSED_ENTRY(5), + I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT_UNUSED_ENTRY(8), + I40E_PTT_UNUSED_ENTRY(9), + I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + + /* Non Tunneled IPv4 */ + I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(25), + I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), + I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), + I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv4 --> IPv4 */ + I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(32), + I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> IPv6 */ + I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(39), + I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT */ + I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> IPv4 */ + I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(47), + I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> IPv6 */ + I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(54), + I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC */ + I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ + I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(62), + I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ + I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(69), + I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC/VLAN */ + I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ + I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(77), + I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ + I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(84), + I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* Non Tunneled IPv6 */ + I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + I40E_PTT_UNUSED_ENTRY(91), + I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), + I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), + I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv6 --> IPv4 */ + I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(98), + I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> IPv6 */ + I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(105), + I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT */ + I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> IPv4 */ + I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(113), + I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> IPv6 */ + I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(120), + I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC */ + I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ + I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(128), + I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ + I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(135), + I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN */ + I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ + I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(143), + I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ + I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(150), + I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* unused entries */ + I40E_PTT_UNUSED_ENTRY(154), + I40E_PTT_UNUSED_ENTRY(155), + I40E_PTT_UNUSED_ENTRY(156), + I40E_PTT_UNUSED_ENTRY(157), + I40E_PTT_UNUSED_ENTRY(158), + I40E_PTT_UNUSED_ENTRY(159), + + I40E_PTT_UNUSED_ENTRY(160), + I40E_PTT_UNUSED_ENTRY(161), + I40E_PTT_UNUSED_ENTRY(162), + I40E_PTT_UNUSED_ENTRY(163), + I40E_PTT_UNUSED_ENTRY(164), + I40E_PTT_UNUSED_ENTRY(165), + I40E_PTT_UNUSED_ENTRY(166), + I40E_PTT_UNUSED_ENTRY(167), + I40E_PTT_UNUSED_ENTRY(168), + I40E_PTT_UNUSED_ENTRY(169), + + I40E_PTT_UNUSED_ENTRY(170), + I40E_PTT_UNUSED_ENTRY(171), + I40E_PTT_UNUSED_ENTRY(172), + I40E_PTT_UNUSED_ENTRY(173), + I40E_PTT_UNUSED_ENTRY(174), + I40E_PTT_UNUSED_ENTRY(175), + I40E_PTT_UNUSED_ENTRY(176), + I40E_PTT_UNUSED_ENTRY(177), + I40E_PTT_UNUSED_ENTRY(178), + I40E_PTT_UNUSED_ENTRY(179), + + I40E_PTT_UNUSED_ENTRY(180), + I40E_PTT_UNUSED_ENTRY(181), + I40E_PTT_UNUSED_ENTRY(182), + I40E_PTT_UNUSED_ENTRY(183), + I40E_PTT_UNUSED_ENTRY(184), + I40E_PTT_UNUSED_ENTRY(185), + I40E_PTT_UNUSED_ENTRY(186), + I40E_PTT_UNUSED_ENTRY(187), + I40E_PTT_UNUSED_ENTRY(188), + I40E_PTT_UNUSED_ENTRY(189), + + I40E_PTT_UNUSED_ENTRY(190), + I40E_PTT_UNUSED_ENTRY(191), + I40E_PTT_UNUSED_ENTRY(192), + I40E_PTT_UNUSED_ENTRY(193), + I40E_PTT_UNUSED_ENTRY(194), + I40E_PTT_UNUSED_ENTRY(195), + I40E_PTT_UNUSED_ENTRY(196), + I40E_PTT_UNUSED_ENTRY(197), + I40E_PTT_UNUSED_ENTRY(198), + I40E_PTT_UNUSED_ENTRY(199), + + I40E_PTT_UNUSED_ENTRY(200), + I40E_PTT_UNUSED_ENTRY(201), + I40E_PTT_UNUSED_ENTRY(202), + I40E_PTT_UNUSED_ENTRY(203), + I40E_PTT_UNUSED_ENTRY(204), + I40E_PTT_UNUSED_ENTRY(205), + I40E_PTT_UNUSED_ENTRY(206), + I40E_PTT_UNUSED_ENTRY(207), + I40E_PTT_UNUSED_ENTRY(208), + I40E_PTT_UNUSED_ENTRY(209), + + I40E_PTT_UNUSED_ENTRY(210), + I40E_PTT_UNUSED_ENTRY(211), + I40E_PTT_UNUSED_ENTRY(212), + I40E_PTT_UNUSED_ENTRY(213), + I40E_PTT_UNUSED_ENTRY(214), + I40E_PTT_UNUSED_ENTRY(215), + I40E_PTT_UNUSED_ENTRY(216), + I40E_PTT_UNUSED_ENTRY(217), + I40E_PTT_UNUSED_ENTRY(218), + I40E_PTT_UNUSED_ENTRY(219), + + I40E_PTT_UNUSED_ENTRY(220), + I40E_PTT_UNUSED_ENTRY(221), + I40E_PTT_UNUSED_ENTRY(222), + I40E_PTT_UNUSED_ENTRY(223), + I40E_PTT_UNUSED_ENTRY(224), + I40E_PTT_UNUSED_ENTRY(225), + I40E_PTT_UNUSED_ENTRY(226), + I40E_PTT_UNUSED_ENTRY(227), + I40E_PTT_UNUSED_ENTRY(228), + I40E_PTT_UNUSED_ENTRY(229), + + I40E_PTT_UNUSED_ENTRY(230), + I40E_PTT_UNUSED_ENTRY(231), + I40E_PTT_UNUSED_ENTRY(232), + I40E_PTT_UNUSED_ENTRY(233), + I40E_PTT_UNUSED_ENTRY(234), + I40E_PTT_UNUSED_ENTRY(235), + I40E_PTT_UNUSED_ENTRY(236), + I40E_PTT_UNUSED_ENTRY(237), + I40E_PTT_UNUSED_ENTRY(238), + I40E_PTT_UNUSED_ENTRY(239), + + I40E_PTT_UNUSED_ENTRY(240), + I40E_PTT_UNUSED_ENTRY(241), + I40E_PTT_UNUSED_ENTRY(242), + I40E_PTT_UNUSED_ENTRY(243), + I40E_PTT_UNUSED_ENTRY(244), + I40E_PTT_UNUSED_ENTRY(245), + I40E_PTT_UNUSED_ENTRY(246), + I40E_PTT_UNUSED_ENTRY(247), + I40E_PTT_UNUSED_ENTRY(248), + I40E_PTT_UNUSED_ENTRY(249), + + I40E_PTT_UNUSED_ENTRY(250), + I40E_PTT_UNUSED_ENTRY(251), + I40E_PTT_UNUSED_ENTRY(252), + I40E_PTT_UNUSED_ENTRY(253), + I40E_PTT_UNUSED_ENTRY(254), + I40E_PTT_UNUSED_ENTRY(255) +}; + +/** + * i40e_aq_send_msg_to_pf + * @hw: pointer to the hardware structure + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code + * @msg: pointer to the msg buffer + * @msglen: msg length + * @cmd_details: pointer to command details + * + * Send message to PF driver using admin queue. By default, this message + * is sent asynchronously, i.e. i40evf_asq_send_command() does not wait for + * completion before returning. + **/ +i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, + enum i40e_virtchnl_ops v_opcode, + i40e_status v_retval, + u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_asq_cmd_details details; + i40e_status status; + + i40evf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); + desc.cookie_high = cpu_to_le32(v_opcode); + desc.cookie_low = cpu_to_le32(v_retval); + if (msglen) { + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF + | I40E_AQ_FLAG_RD)); + if (msglen > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.datalen = cpu_to_le16(msglen); + } + if (!cmd_details) { + memset(&details, 0, sizeof(details)); + details.async = true; + cmd_details = &details; + } + status = i40evf_asq_send_command(hw, &desc, msg, msglen, cmd_details); + return status; +} + +/** + * i40e_vf_parse_hw_config + * @hw: pointer to the hardware structure + * @msg: pointer to the virtual channel VF resource structure + * + * Given a VF resource message from the PF, populate the hw struct + * with appropriate information. + **/ +void i40e_vf_parse_hw_config(struct i40e_hw *hw, + struct i40e_virtchnl_vf_resource *msg) +{ + struct i40e_virtchnl_vsi_resource *vsi_res; + int i; + + vsi_res = &msg->vsi_res[0]; + + hw->dev_caps.num_vsis = msg->num_vsis; + hw->dev_caps.num_rx_qp = msg->num_queue_pairs; + hw->dev_caps.num_tx_qp = msg->num_queue_pairs; + hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; + hw->dev_caps.dcb = msg->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_L2; + hw->dev_caps.fcoe = (msg->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0; + for (i = 0; i < msg->num_vsis; i++) { + if (vsi_res->vsi_type == I40E_VSI_SRIOV) { + memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr, + ETH_ALEN); + memcpy(hw->mac.addr, vsi_res->default_mac_addr, + ETH_ALEN); + } + vsi_res++; + } +} + +/** + * i40e_vf_reset + * @hw: pointer to the hardware structure + * + * Send a VF_RESET message to the PF. Does not wait for response from PF + * as none will be forthcoming. Immediately after calling this function, + * the admin queue should be shut down and (optionally) reinitialized. + **/ +i40e_status i40e_vf_reset(struct i40e_hw *hw) +{ + return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF, + 0, NULL, 0, NULL); +} diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h new file mode 100644 index 000000000..931c88044 --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h @@ -0,0 +1,236 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_HMC_H_ +#define _I40E_HMC_H_ + +#define I40E_HMC_MAX_BP_COUNT 512 + +/* forward-declare the HW struct for the compiler */ +struct i40e_hw; + +#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */ +#define I40E_HMC_PD_CNT_IN_SD 512 +#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */ +#define I40E_HMC_PAGED_BP_SIZE 4096 +#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096 +#define I40E_FIRST_VF_FPM_ID 16 + +struct i40e_hmc_obj_info { + u64 base; /* base addr in FPM */ + u32 max_cnt; /* max count available for this hmc func */ + u32 cnt; /* count of objects driver actually wants to create */ + u64 size; /* size in bytes of one object */ +}; + +enum i40e_sd_entry_type { + I40E_SD_TYPE_INVALID = 0, + I40E_SD_TYPE_PAGED = 1, + I40E_SD_TYPE_DIRECT = 2 +}; + +struct i40e_hmc_bp { + enum i40e_sd_entry_type entry_type; + struct i40e_dma_mem addr; /* populate to be used by hw */ + u32 sd_pd_index; + u32 ref_cnt; +}; + +struct i40e_hmc_pd_entry { + struct i40e_hmc_bp bp; + u32 sd_index; + bool valid; +}; + +struct i40e_hmc_pd_table { + struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */ + struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */ + struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */ + + u32 ref_cnt; + u32 sd_index; +}; + +struct i40e_hmc_sd_entry { + enum i40e_sd_entry_type entry_type; + bool valid; + + union { + struct i40e_hmc_pd_table pd_table; + struct i40e_hmc_bp bp; + } u; +}; + +struct i40e_hmc_sd_table { + struct i40e_virt_mem addr; /* used to track sd_entry allocations */ + u32 sd_cnt; + u32 ref_cnt; + struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */ +}; + +struct i40e_hmc_info { + u32 signature; + /* equals to pci func num for PF and dynamically allocated for VFs */ + u8 hmc_fn_id; + u16 first_sd_index; /* index of the first available SD */ + + /* hmc objects */ + struct i40e_hmc_obj_info *hmc_obj; + struct i40e_virt_mem hmc_obj_virt_mem; + struct i40e_hmc_sd_table sd_table; +}; + +#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++) +#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++) +#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++) + +#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--) +#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--) +#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--) + +/** + * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware + * @hw: pointer to our hw struct + * @pa: pointer to physical address + * @sd_index: segment descriptor index + * @type: if sd entry is direct or paged + **/ +#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \ +{ \ + u32 val1, val2, val3; \ + val1 = (u32)(upper_32_bits(pa)); \ + val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \ + I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ + ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ + I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ + (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ + val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ + wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ + wr32((hw), I40E_PFHMC_SDCMD, val3); \ +} + +/** + * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware + * @hw: pointer to our hw struct + * @sd_index: segment descriptor index + * @type: if sd entry is direct or paged + **/ +#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \ +{ \ + u32 val2, val3; \ + val2 = (I40E_HMC_MAX_BP_COUNT << \ + I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ + ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ + I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ + val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ + wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ + wr32((hw), I40E_PFHMC_SDCMD, val3); \ +} + +/** + * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware + * @hw: pointer to our hw struct + * @sd_idx: segment descriptor index + * @pd_idx: page descriptor index + **/ +#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ + wr32((hw), I40E_PFHMC_PDINV, \ + (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ + ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) + +/** + * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit + * @hmc_info: pointer to the HMC configuration information structure + * @type: type of HMC resources we're searching + * @index: starting index for the object + * @cnt: number of objects we're trying to create + * @sd_idx: pointer to return index of the segment descriptor in question + * @sd_limit: pointer to return the maximum number of segment descriptors + * + * This function calculates the segment descriptor index and index limit + * for the resource defined by i40e_hmc_rsrc_type. + **/ +#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\ +{ \ + u64 fpm_addr, fpm_limit; \ + fpm_addr = (hmc_info)->hmc_obj[(type)].base + \ + (hmc_info)->hmc_obj[(type)].size * (index); \ + fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\ + *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \ + *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \ + /* add one more to the limit to correct our range */ \ + *(sd_limit) += 1; \ +} + +/** + * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit + * @hmc_info: pointer to the HMC configuration information struct + * @type: HMC resource type we're examining + * @idx: starting index for the object + * @cnt: number of objects we're trying to create + * @pd_index: pointer to return page descriptor index + * @pd_limit: pointer to return page descriptor index limit + * + * Calculates the page descriptor index and index limit for the resource + * defined by i40e_hmc_rsrc_type. + **/ +#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\ +{ \ + u64 fpm_adr, fpm_limit; \ + fpm_adr = (hmc_info)->hmc_obj[(type)].base + \ + (hmc_info)->hmc_obj[(type)].size * (idx); \ + fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \ + *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \ + *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \ + /* add one more to the limit to correct our range */ \ + *(pd_limit) += 1; \ +} +i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 sd_index, + enum i40e_sd_entry_type type, + u64 direct_mode_sz); + +i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 pd_index); +i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx); +i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, + u32 idx); +i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf); +i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, + u32 idx); +i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf); + +#endif /* _I40E_HMC_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h new file mode 100644 index 000000000..a5d798773 --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h @@ -0,0 +1,181 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_LAN_HMC_H_ +#define _I40E_LAN_HMC_H_ + +/* forward-declare the HW struct for the compiler */ +struct i40e_hw; + +/* HMC element context information */ + +/* Rx queue context data + * + * The sizes of the variables may be larger than needed due to crossing byte + * boundaries. If we do not have the width of the variable set to the correct + * size then we could end up shifting bits off the top of the variable when the + * variable is at the top of a byte and crosses over into the next byte. + */ +struct i40e_hmc_obj_rxq { + u16 head; + u16 cpuid; /* bigger than needed, see above for reason */ + u64 base; + u16 qlen; +#define I40E_RXQ_CTX_DBUFF_SHIFT 7 + u16 dbuff; /* bigger than needed, see above for reason */ +#define I40E_RXQ_CTX_HBUFF_SHIFT 6 + u16 hbuff; /* bigger than needed, see above for reason */ + u8 dtype; + u8 dsize; + u8 crcstrip; + u8 fc_ena; + u8 l2tsel; + u8 hsplit_0; + u8 hsplit_1; + u8 showiv; + u32 rxmax; /* bigger than needed, see above for reason */ + u8 tphrdesc_ena; + u8 tphwdesc_ena; + u8 tphdata_ena; + u8 tphhead_ena; + u16 lrxqthresh; /* bigger than needed, see above for reason */ + u8 prefena; /* NOTE: normally must be set to 1 at init */ +}; + +/* Tx queue context data +* +* The sizes of the variables may be larger than needed due to crossing byte +* boundaries. If we do not have the width of the variable set to the correct +* size then we could end up shifting bits off the top of the variable when the +* variable is at the top of a byte and crosses over into the next byte. +*/ +struct i40e_hmc_obj_txq { + u16 head; + u8 new_context; + u64 base; + u8 fc_ena; + u8 timesync_ena; + u8 fd_ena; + u8 alt_vlan_ena; + u16 thead_wb; + u8 cpuid; + u8 head_wb_ena; + u16 qlen; + u8 tphrdesc_ena; + u8 tphrpacket_ena; + u8 tphwdesc_ena; + u64 head_wb_addr; + u32 crc; + u16 rdylist; + u8 rdylist_act; +}; + +/* for hsplit_0 field of Rx HMC context */ +enum i40e_hmc_obj_rx_hsplit_0 { + I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8, +}; + +/* fcoe_cntx and fcoe_filt are for debugging purpose only */ +struct i40e_hmc_obj_fcoe_cntx { + u32 rsv[32]; +}; + +struct i40e_hmc_obj_fcoe_filt { + u32 rsv[8]; +}; + +/* Context sizes for LAN objects */ +enum i40e_hmc_lan_object_size { + I40E_HMC_LAN_OBJ_SZ_8 = 0x3, + I40E_HMC_LAN_OBJ_SZ_16 = 0x4, + I40E_HMC_LAN_OBJ_SZ_32 = 0x5, + I40E_HMC_LAN_OBJ_SZ_64 = 0x6, + I40E_HMC_LAN_OBJ_SZ_128 = 0x7, + I40E_HMC_LAN_OBJ_SZ_256 = 0x8, + I40E_HMC_LAN_OBJ_SZ_512 = 0x9, +}; + +#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512 +#define I40E_HMC_OBJ_SIZE_TXQ 128 +#define I40E_HMC_OBJ_SIZE_RXQ 32 +#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128 +#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64 + +enum i40e_hmc_lan_rsrc_type { + I40E_HMC_LAN_FULL = 0, + I40E_HMC_LAN_TX = 1, + I40E_HMC_LAN_RX = 2, + I40E_HMC_FCOE_CTX = 3, + I40E_HMC_FCOE_FILT = 4, + I40E_HMC_LAN_MAX = 5 +}; + +enum i40e_hmc_model { + I40E_HMC_MODEL_DIRECT_PREFERRED = 0, + I40E_HMC_MODEL_DIRECT_ONLY = 1, + I40E_HMC_MODEL_PAGED_ONLY = 2, + I40E_HMC_MODEL_UNKNOWN, +}; + +struct i40e_hmc_lan_create_obj_info { + struct i40e_hmc_info *hmc_info; + u32 rsrc_type; + u32 start_idx; + u32 count; + enum i40e_sd_entry_type entry_type; + u64 direct_mode_sz; +}; + +struct i40e_hmc_lan_delete_obj_info { + struct i40e_hmc_info *hmc_info; + u32 rsrc_type; + u32 start_idx; + u32 count; +}; + +i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, + u32 rxq_num, u32 fcoe_cntx_num, + u32 fcoe_filt_num); +i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw, + enum i40e_hmc_model model); +i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw); + +i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue); +i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_txq *s); +i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue); +i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_rxq *s); + +#endif /* _I40E_LAN_HMC_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h new file mode 100644 index 000000000..21a91b14b --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h @@ -0,0 +1,75 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_OSDEP_H_ +#define _I40E_OSDEP_H_ + +#include +#include +#include +#include +#include + +/* get readq/writeq support for 32 bit kernels, use the low-first version */ +#include + +/* File to be the magic between shared code and + * actual OS primitives + */ + +#define hw_dbg(hw, S, A...) do {} while (0) + +#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) +#define rd32(a, reg) readl((a)->hw_addr + (reg)) + +#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) +#define rd64(a, reg) readq((a)->hw_addr + (reg)) +#define i40e_flush(a) readl((a)->hw_addr + I40E_VFGEN_RSTAT) + +/* memory allocation tracking */ +struct i40e_dma_mem { + void *va; + dma_addr_t pa; + u32 size; +} __packed; + +#define i40e_allocate_dma_mem(h, m, unused, s, a) \ + i40evf_allocate_dma_mem_d(h, m, s, a) +#define i40e_free_dma_mem(h, m) i40evf_free_dma_mem_d(h, m) + +struct i40e_virt_mem { + void *va; + u32 size; +} __packed; +#define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s) +#define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m) + +#define i40e_debug(h, m, s, ...) i40evf_debug_d(h, m, s, ##__VA_ARGS__) +extern void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...) + __attribute__ ((format(gnu_printf, 3, 4))); + +typedef enum i40e_status_code i40e_status; +#endif /* _I40E_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h new file mode 100644 index 000000000..58e37a44b --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -0,0 +1,91 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_PROTOTYPE_H_ +#define _I40E_PROTOTYPE_H_ + +#include "i40e_type.h" +#include "i40e_alloc.h" +#include "i40e_virtchnl.h" + +/* Prototypes for shared code functions that are not in + * the standard function pointer structures. These are + * mostly because they are needed even before the init + * has happened and will assist in the early SW and FW + * setup. + */ + +/* adminq functions */ +i40e_status i40evf_init_adminq(struct i40e_hw *hw); +i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw); +void i40e_adminq_init_ring_data(struct i40e_hw *hw); +i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, + struct i40e_arq_event_info *e, + u16 *events_pending); +i40e_status i40evf_asq_send_command(struct i40e_hw *hw, + struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); +bool i40evf_asq_done(struct i40e_hw *hw); + +/* debug function for adminq */ +void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, + void *desc, void *buffer, u16 buf_len); + +void i40e_idle_aq(struct i40e_hw *hw); +void i40evf_resume_aq(struct i40e_hw *hw); +bool i40evf_check_asq_alive(struct i40e_hw *hw); +i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); + +i40e_status i40e_set_mac_type(struct i40e_hw *hw); + +extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[]; + +static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) +{ + return i40evf_ptype_lookup[ptype]; +} + +/* prototype for functions used for SW locks */ + +/* i40e_common for VF drivers*/ +void i40e_vf_parse_hw_config(struct i40e_hw *hw, + struct i40e_virtchnl_vf_resource *msg); +i40e_status i40e_vf_reset(struct i40e_hw *hw); +i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, + enum i40e_virtchnl_ops v_opcode, + i40e_status v_retval, + u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_set_filter_control(struct i40e_hw *hw, + struct i40e_filter_control_settings *settings); +i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, + u8 *mac_addr, u16 ethtype, u16 flags, + u16 vsi_seid, u16 queue, bool is_add, + struct i40e_control_filter_stats *stats, + struct i40e_asq_cmd_details *cmd_details); +#endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h new file mode 100644 index 000000000..3cc737629 --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h @@ -0,0 +1,3369 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_REGISTER_H_ +#define _I40E_REGISTER_H_ + +#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */ +#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0 +#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT) +#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */ +#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0 +#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT) +#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */ +#define I40E_GL_ARQH_ARQH_SHIFT 0 +#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT) +#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */ +#define I40E_GL_ARQT_ARQT_SHIFT 0 +#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT) +#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */ +#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0 +#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT) +#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */ +#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0 +#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT) +#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */ +#define I40E_GL_ATQH_ATQH_SHIFT 0 +#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT) +#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */ +#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0 +#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT) +#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28 +#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT) +#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29 +#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT) +#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30 +#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT) +#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31 +#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT) +#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */ +#define I40E_GL_ATQT_ATQT_SHIFT 0 +#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT) +#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */ +#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0 +#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT) +#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */ +#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0 +#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT) +#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */ +#define I40E_PF_ARQH_ARQH_SHIFT 0 +#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT) +#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */ +#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0 +#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT) +#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28 +#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT) +#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29 +#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT) +#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30 +#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT) +#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31 +#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */ +#define I40E_PF_ARQT_ARQT_SHIFT 0 +#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT) +#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */ +#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0 +#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT) +#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */ +#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0 +#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT) +#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */ +#define I40E_PF_ATQH_ATQH_SHIFT 0 +#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT) +#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */ +#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0 +#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT) +#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28 +#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT) +#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29 +#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT) +#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30 +#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT) +#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31 +#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */ +#define I40E_PF_ATQT_ATQT_SHIFT 0 +#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT) +#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQBAH_MAX_INDEX 127 +#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0 +#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT) +#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQBAL_MAX_INDEX 127 +#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0 +#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT) +#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQH_MAX_INDEX 127 +#define I40E_VF_ARQH_ARQH_SHIFT 0 +#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT) +#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQLEN_MAX_INDEX 127 +#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0 +#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT) +#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28 +#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT) +#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29 +#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT) +#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30 +#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT) +#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31 +#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ARQT_MAX_INDEX 127 +#define I40E_VF_ARQT_ARQT_SHIFT 0 +#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT) +#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQBAH_MAX_INDEX 127 +#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0 +#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT) +#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQBAL_MAX_INDEX 127 +#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0 +#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT) +#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQH_MAX_INDEX 127 +#define I40E_VF_ATQH_ATQH_SHIFT 0 +#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT) +#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQLEN_MAX_INDEX 127 +#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0 +#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT) +#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28 +#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT) +#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29 +#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT) +#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30 +#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT) +#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31 +#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_VF_ATQT_MAX_INDEX 127 +#define I40E_VF_ATQT_ATQT_SHIFT 0 +#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT) +#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */ +#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0 +#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT) +#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */ +#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0 +#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT) +#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4 +#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT) +#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8 +#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT) +#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */ +#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0 +#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4 +#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8 +#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16 +#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24 +#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT) +#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */ +#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0 +#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT) +#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12 +#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT) +#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15 +#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT) +#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17 +#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT) +#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3 +#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0 +#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT) +#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */ +#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0 +#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT) +#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1 +#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT) +#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127 +#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0 +#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT) +#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4 +#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT) +#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8 +#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT) +#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127 +#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0 +#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4 +#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8 +#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16 +#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24 +#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT) +#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */ +#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0 +#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT) +#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */ +#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0 +#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT) +#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */ +#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3 +#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT) +#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */ +#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0 +#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT) +#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */ +#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3 +#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0 +#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT) +#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16 +#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT) +#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */ +#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0 +#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT) +#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2 +#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT) +#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6 +#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT) +#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9 +#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT) +#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16 +#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT) +#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */ +#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0 +#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT) +#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */ +#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0 +#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT) +#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1 +#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT) +#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2 +#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT) +#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3 +#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT) +#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4 +#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT) +#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */ +#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0 +#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT) +#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1 +#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT) +#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2 +#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT) +#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8 +#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT) +#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7 +#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0 +#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) +#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30 +#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) +#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31 +#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) +#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */ +#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0 +#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT) +#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8 +#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT) +#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16 +#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT) +#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */ +#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0 +#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT) +#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */ +#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0 +#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3 +#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6 +#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9 +#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12 +#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15 +#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18 +#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 +#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) +#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 +#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 +#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) +#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ +#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 +#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) +#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7 +#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0 +#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT) +#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */ +#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0 +#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT) +#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13 +#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT) +#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30 +#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT) +#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7 +#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0 +#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT) +#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */ +#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0 +#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT) +#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30 +#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT) +#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */ +#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0 +#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT) +#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8 +#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT) +#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */ +#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0 +#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT) +#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8 +#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT) +#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */ +#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0 +#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8 +#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9 +#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10 +#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11 +#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12 +#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13 +#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14 +#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15 +#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT) +#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */ +#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7 +#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0 +#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT) +#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */ +#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0 +#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT) +#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4 +#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT) +#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5 +#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT) +#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16 +#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT) +#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */ +#define I40E_GL_FWSTS_FWS0B_SHIFT 0 +#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT) +#define I40E_GL_FWSTS_FWRI_SHIFT 9 +#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT) +#define I40E_GL_FWSTS_FWS1B_SHIFT 16 +#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */ +#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0 +#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT) +#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4 +#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8 +#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12 +#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16 +#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20 +#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT) +#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */ +#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29 +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0 +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3 +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4 +#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT) +#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5 +#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT) +#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6 +#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7 +#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) +#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10 +#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT) +#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11 +#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT) +#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12 +#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) +#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17 +#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT) +#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19 +#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 +#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) +#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ +#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 +#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) +#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5 +#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT) +#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6 +#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT) +#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */ +#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0 +#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT) +#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */ +#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0 +#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT) +#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_I2CCMD_MAX_INDEX 3 +#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0 +#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT) +#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16 +#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT) +#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24 +#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT) +#define I40E_GLGEN_I2CCMD_OP_SHIFT 27 +#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT) +#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28 +#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT) +#define I40E_GLGEN_I2CCMD_R_SHIFT 29 +#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT) +#define I40E_GLGEN_I2CCMD_E_SHIFT 31 +#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT) +#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3 +#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0 +#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT) +#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5 +#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT) +#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8 +#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9 +#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT) +#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10 +#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT) +#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11 +#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT) +#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12 +#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13 +#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14 +#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15 +#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT) +#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31 +#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT) +#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */ +#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0 +#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT) +#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 +#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25 +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31 +#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT) +#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_MSCA_MAX_INDEX 3 +#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0 +#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT) +#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16 +#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT) +#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21 +#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT) +#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26 +#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT) +#define I40E_GLGEN_MSCA_STCODE_SHIFT 28 +#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT) +#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 +#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) +#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 +#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) +#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_GLGEN_MSRWD_MAX_INDEX 3 +#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 +#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT) +#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16 +#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT) +#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */ +#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0 +#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT) +#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16 +#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT) +#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */ +#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0 +#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT) +#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2 +#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT) +#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4 +#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT) +#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6 +#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT) +#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8 +#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT) +#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10 +#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT) +#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */ +#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0 +#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) +#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 +#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) +#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ +#define I40E_GLGEN_RTRIG_CORER_SHIFT 0 +#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) +#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1 +#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT) +#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2 +#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT) +#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */ +#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0 +#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT) +#define I40E_GLGEN_STAT_DCBEN_SHIFT 2 +#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT) +#define I40E_GLGEN_STAT_VTEN_SHIFT 3 +#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT) +#define I40E_GLGEN_STAT_FCOEN_SHIFT 4 +#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT) +#define I40E_GLGEN_STAT_EVBEN_SHIFT 5 +#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT) +#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6 +#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT) +#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3 +#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0 +#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT) +#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */ +#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0 +#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT) +#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */ +#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0 +#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT) +#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */ +#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0 +#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT) +#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */ +#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0 +#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT) +#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */ +#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0 +#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT) +#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1 +#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT) +#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2 +#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT) +#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3 +#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT) +#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */ +#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0 +#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT) +#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1 +#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT) +#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2 +#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT) +#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */ +#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0 +#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT) +#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */ +#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0 +#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT) +#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1 +#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT) +#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFGEN_RSTAT1_MAX_INDEX 127 +#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0 +#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT) +#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127 +#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0 +#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT) +#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127 +#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0 +#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT) +#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_VSIGEN_RSTAT_MAX_INDEX 383 +#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0 +#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT) +#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_VSIGEN_RTRIG_MAX_INDEX 383 +#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0 +#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT) +#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15 +#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0 +#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT) +#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15 +#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0 +#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT) +#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */ +#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0 +#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT) +#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15 +#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0 +#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT) +#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15 +#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0 +#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT) +#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */ +#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0 +#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT) +#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */ +#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0 +#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT) +#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */ +#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0 +#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT) +#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15 +#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0 +#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT) +#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15 +#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0 +#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT) +#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29 +#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT) +#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */ +#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0 +#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT) +#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */ +#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0 +#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT) +#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15 +#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0 +#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT) +#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15 +#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0 +#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT) +#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */ +#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0 +#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT) +#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */ +#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0 +#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT) +#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */ +#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0 +#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT) +#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15 +#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0 +#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT) +#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15 +#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0 +#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT) +#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */ +#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0 +#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT) +#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15 +#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0 +#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT) +#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24 +#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT) +#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15 +#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0 +#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT) +#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */ +#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0 +#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT) +#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15 +#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0 +#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT) +#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_SDPART_MAX_INDEX 15 +#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0 +#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT) +#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16 +#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT) +#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */ +#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0 +#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT) +#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */ +#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0 +#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT) +#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7 +#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT) +#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8 +#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT) +#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16 +#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT) +#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31 +#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT) +#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */ +#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0 +#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT) +#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16 +#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT) +#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */ +#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0 +#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT) +#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31 +#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT) +#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */ +#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0 +#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT) +#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */ +#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0 +#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1 +#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2 +#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12 +#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT) +#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */ +#define I40E_GL_GP_FUSE_MAX_INDEX 28 +#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0 +#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT) +#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */ +#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1 +#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT) +#define I40E_GL_UFUSE_NIC_ID_SHIFT 2 +#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT) +#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10 +#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT) +#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11 +#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT) +#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */ +#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 +#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 +#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 +#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 +#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 +#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 +#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 +#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 +#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 +#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 +#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 +#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 +#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 +#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 +#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 +#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 +#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 +#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 +#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 +#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 +#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 +#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 +#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 +#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 +#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 +#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 +#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 +#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 +#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 +#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 +#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT) +#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */ +#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0 +#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT) +#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4 +#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT) +#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */ +#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) +#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11 +#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT) +#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT) +#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT) +#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31 +#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT) +#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */ +#define I40E_PFINT_CEQCTL_MAX_INDEX 511 +#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11 +#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT) +#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31 +#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT) +#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */ +#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0 +#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT) +#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1 +#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT) +#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 +#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT) +#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3 +#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5 +#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT) +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 +#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT) +#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511 +#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0 +#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT) +#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1 +#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT) +#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 +#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT) +#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3 +#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5 +#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT) +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 +#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT) +#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */ +#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 +#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 +#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 +#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 +#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 +#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 +#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 +#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 +#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 +#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 +#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 +#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 +#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 +#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 +#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 +#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 +#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 +#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 +#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 +#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 +#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 +#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 +#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 +#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 +#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 +#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 +#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 +#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 +#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 +#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 +#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT) +#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */ +#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0 +#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1 +#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2 +#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3 +#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4 +#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5 +#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6 +#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7 +#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8 +#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT) +#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16 +#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT) +#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19 +#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT) +#define I40E_PFINT_ICR0_GRST_SHIFT 20 +#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT) +#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21 +#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT) +#define I40E_PFINT_ICR0_GPIO_SHIFT 22 +#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT) +#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23 +#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT) +#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24 +#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT) +#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT) +#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26 +#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT) +#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28 +#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT) +#define I40E_PFINT_ICR0_VFLR_SHIFT 29 +#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT) +#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30 +#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT) +#define I40E_PFINT_ICR0_SWINT_SHIFT 31 +#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT) +#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */ +#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16 +#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT) +#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19 +#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT) +#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20 +#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT) +#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21 +#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT) +#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22 +#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT) +#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23 +#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT) +#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24 +#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT) +#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) +#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26 +#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT) +#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28 +#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT) +#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29 +#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT) +#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30 +#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT) +#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31 +#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT) +#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */ +#define I40E_PFINT_ITR0_MAX_INDEX 2 +#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0 +#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT) +#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_ITRN_MAX_INDEX 2 +#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0 +#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT) +#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */ +#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 +#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) +#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 +#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT) +#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_LNKLSTN_MAX_INDEX 511 +#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 +#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) +#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 +#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) +#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */ +#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0 +#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT) +#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6 +#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT) +#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_RATEN_MAX_INDEX 511 +#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0 +#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) +#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 +#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) +#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ +#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 +#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) +#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QINT_RQCTL_MAX_INDEX 1535 +#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0 +#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT) +#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11 +#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT) +#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) +#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) +#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) +#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31 +#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT) +#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QINT_TQCTL_MAX_INDEX 1535 +#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0 +#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT) +#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11 +#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT) +#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) +#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) +#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT) +#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31 +#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT) +#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127 +#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT) +#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ +#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511 +#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT) +#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VFINT_ICR0_MAX_INDEX 127 +#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0 +#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1 +#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2 +#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3 +#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4 +#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT) +#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_SWINT_SHIFT 31 +#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT) +#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127 +#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31 +#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT) +#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */ +#define I40E_VFINT_ITR0_MAX_INDEX 2 +#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT) +#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */ +#define I40E_VFINT_ITRN_MAX_INDEX 2 +#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) +#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 +#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 +#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) +#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VPINT_AEQCTL_MAX_INDEX 127 +#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) +#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11 +#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT) +#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT) +#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT) +#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31 +#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT) +#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */ +#define I40E_VPINT_CEQCTL_MAX_INDEX 511 +#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11 +#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT) +#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31 +#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT) +#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPINT_LNKLST0_MAX_INDEX 127 +#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 +#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) +#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 +#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT) +#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ +#define I40E_VPINT_LNKLSTN_MAX_INDEX 511 +#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 +#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) +#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 +#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) +#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPINT_RATE0_MAX_INDEX 127 +#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0 +#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT) +#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6 +#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT) +#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ +#define I40E_VPINT_RATEN_MAX_INDEX 511 +#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0 +#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT) +#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6 +#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT) +#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */ +#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0 +#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT) +#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1 +#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT) +#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */ +#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0 +#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT) +#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */ +#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0 +#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT) +#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */ +#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0 +#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT) +#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */ +#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0 +#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */ +#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11 +#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0 +#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16 +#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30 +#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31 +#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) +#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */ +#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 +#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) +#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16 +#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT) +#define I40E_PFLAN_QALLOC_VALID_SHIFT 31 +#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT) +#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ +#define I40E_QRX_ENA_MAX_INDEX 1535 +#define I40E_QRX_ENA_QENA_REQ_SHIFT 0 +#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT) +#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1 +#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT) +#define I40E_QRX_ENA_QENA_STAT_SHIFT 2 +#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT) +#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QRX_TAIL_MAX_INDEX 1535 +#define I40E_QRX_TAIL_TAIL_SHIFT 0 +#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT) +#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QTX_CTL_MAX_INDEX 1535 +#define I40E_QTX_CTL_PFVF_Q_SHIFT 0 +#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT) +#define I40E_QTX_CTL_PF_INDX_SHIFT 2 +#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT) +#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7 +#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT) +#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ +#define I40E_QTX_ENA_MAX_INDEX 1535 +#define I40E_QTX_ENA_QENA_REQ_SHIFT 0 +#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT) +#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1 +#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT) +#define I40E_QTX_ENA_QENA_STAT_SHIFT 2 +#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT) +#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ +#define I40E_QTX_HEAD_MAX_INDEX 1535 +#define I40E_QTX_HEAD_HEAD_SHIFT 0 +#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT) +#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16 +#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT) +#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ +#define I40E_QTX_TAIL_MAX_INDEX 1535 +#define I40E_QTX_TAIL_TAIL_SHIFT 0 +#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT) +#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPLAN_MAPENA_MAX_INDEX 127 +#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0 +#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT) +#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */ +#define I40E_VPLAN_QTABLE_MAX_INDEX 15 +#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0 +#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT) +#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ +#define I40E_VSILAN_QBASE_MAX_INDEX 383 +#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0 +#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT) +#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11 +#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT) +#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */ +#define I40E_VSILAN_QTABLE_MAX_INDEX 7 +#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0 +#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT) +#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16 +#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) +#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */ +#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0 +#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT) +#define I40E_PRTGL_SAH_MFS_SHIFT 16 +#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT) +#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */ +#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0 +#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */ +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */ +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */ +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT) +#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */ +#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0 +#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT) +#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */ +#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0 +#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT) +#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10 +#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT) +#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11 +#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT) +#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15 +#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT) +#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16 +#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT) +#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19 +#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26 +#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27 +#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28 +#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29 +#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */ +#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0 +#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT) +#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */ +#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31 +#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0 +#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT) +#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */ +#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0 +#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT) +#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7 +#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0 +#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT) +#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */ +#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0 +#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT) +#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1 +#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT) +#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17 +#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT) +#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19 +#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT) +#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25 +#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT) +#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26 +#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT) +#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28 +#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT) +#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29 +#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT) +#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7 +#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0 +#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT) +#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PRT_MNG_MDEF_MAX_INDEX 7 +#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0 +#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4 +#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5 +#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13 +#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17 +#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21 +#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25 +#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26 +#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27 +#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28 +#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29 +#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30 +#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31 +#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7 +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0 +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4 +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8 +#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24 +#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25 +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26 +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27 +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28 +#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29 +#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30 +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31 +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT) +#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3 +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0 +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT) +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16 +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT) +#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_METF_MAX_INDEX 3 +#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0 +#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT) +#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30 +#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT) +#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ +#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15 +#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0 +#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT) +#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16 +#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT) +#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17 +#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT) +#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18 +#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT) +#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3 +#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0 +#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT) +#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ +#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15 +#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0 +#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT) +#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_MMAH_MAX_INDEX 3 +#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0 +#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT) +#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ +#define I40E_PRT_MNG_MMAL_MAX_INDEX 3 +#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0 +#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT) +#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */ +#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0 +#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT) +#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */ +#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0 +#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT) +#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1 +#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT) +#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2 +#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT) +#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3 +#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4 +#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5 +#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6 +#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7 +#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT) +#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */ +#define I40E_MSIX_PBA_MAX_INDEX 5 +#define I40E_MSIX_PBA_PENBIT_SHIFT 0 +#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT) +#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ +#define I40E_MSIX_TADD_MAX_INDEX 128 +#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0 +#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT) +#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2 +#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT) +#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ +#define I40E_MSIX_TMSG_MAX_INDEX 128 +#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0 +#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT) +#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ +#define I40E_MSIX_TUADD_MAX_INDEX 128 +#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0 +#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT) +#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ +#define I40E_MSIX_TVCTRL_MAX_INDEX 128 +#define I40E_MSIX_TVCTRL_MASK_SHIFT 0 +#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT) +#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */ +#define I40E_VFMSIX_PBA1_MAX_INDEX 19 +#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0 +#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT) +#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TADD1_MAX_INDEX 639 +#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0 +#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT) +#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2 +#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT) +#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TMSG1_MAX_INDEX 639 +#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0 +#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT) +#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TUADD1_MAX_INDEX 639 +#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0 +#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT) +#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639 +#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0 +#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT) +#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */ +#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0 +#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT) +#define I40E_GLNVM_FLA_FL_CE_SHIFT 1 +#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT) +#define I40E_GLNVM_FLA_FL_SI_SHIFT 2 +#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT) +#define I40E_GLNVM_FLA_FL_SO_SHIFT 3 +#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT) +#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4 +#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT) +#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5 +#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT) +#define I40E_GLNVM_FLA_LOCKED_SHIFT 6 +#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT) +#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18 +#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT) +#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30 +#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT) +#define I40E_GLNVM_FLA_FL_DER_SHIFT 31 +#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT) +#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */ +#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0 +#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT) +#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31 +#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT) +#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */ +#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0 +#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT) +#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5 +#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT) +#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8 +#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT) +#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23 +#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT) +#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25 +#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT) +#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */ +#define I40E_GLNVM_PROTCSR_MAX_INDEX 59 +#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0 +#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT) +#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */ +#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0 +#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT) +#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14 +#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT) +#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29 +#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT) +#define I40E_GLNVM_SRCTL_START_SHIFT 30 +#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT) +#define I40E_GLNVM_SRCTL_DONE_SHIFT 31 +#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT) +#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */ +#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0 +#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT) +#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16 +#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT) +#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */ +#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0 +#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1 +#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2 +#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3 +#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4 +#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5 +#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6 +#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7 +#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8 +#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9 +#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT) +#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */ +#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0 +#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT) +#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */ +#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0 +#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT) +#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */ +#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0 +#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT) +#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */ +#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0 +#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT) +#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2 +#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3 +#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4 +#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5 +#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6 +#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7 +#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16 +#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17 +#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18 +#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19 +#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT) +#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20 +#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30 +#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT) +#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31 +#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT) +#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */ +#define I40E_GLPCI_CNF_FLEX10_SHIFT 1 +#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT) +#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2 +#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT) +#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */ +#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0 +#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT) +#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1 +#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT) +#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2 +#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT) +#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13 +#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT) +#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */ +#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0 +#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT) +#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */ +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28 +#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT) +#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */ +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT) +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT) +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT) +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT) +#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ +#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3 +#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0 +#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT) +#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16 +#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT) +#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ +#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 +#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 +#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) +#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ +#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 +#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) +#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1 +#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT) +#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3 +#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT) +#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4 +#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT) +#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6 +#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT) +#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10 +#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT) +#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11 +#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT) +#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */ +#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0 +#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT) +#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6 +#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT) +#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9 +#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT) +#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */ +#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0 +#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT) +#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */ +#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0 +#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT) +#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */ +#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0 +#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT) +#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16 +#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT) +#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */ +#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0 +#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT) +#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16 +#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT) +#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */ +#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0 +#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT) +#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2 +#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5 +#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8 +#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11 +#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14 +#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT) +#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15 +#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT) +#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */ +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0 +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT) +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8 +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT) +#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */ +#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0 +#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT) +#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8 +#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT) +#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16 +#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT) +#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24 +#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT) +#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */ +#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0 +#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT) +#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */ +#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0 +#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT) +#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */ +#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0 +#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT) +#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */ +#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0 +#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT) +#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */ +#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0 +#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT) +#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */ +#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0 +#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT) +#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */ +#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1 +#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT) +#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */ +#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0 +#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT) +#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */ +#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0 +#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) +#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 +#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) +#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ +#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 +#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) +#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 +#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) +#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ +#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 +#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) +#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3 +#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT) +#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8 +#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT) +#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */ +#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0 +#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT) +#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12 +#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT) +#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */ +#define I40E_PF_PCI_CIAD_DATA_SHIFT 0 +#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT) +#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */ +#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0 +#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT) +#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1 +#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT) +#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2 +#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT) +#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */ +#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2 +#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT) +#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3 +#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT) +#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4 +#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT) +#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5 +#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT) +#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */ +#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0 +#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT) +#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16 +#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT) +#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */ +#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0 +#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT) +#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3 +#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT) +#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */ +#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0 +#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT) +#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1 +#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT) +#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2 +#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT) +#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */ +#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0 +#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT) +#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */ +#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0 +#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT) +#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */ +#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0 +#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT) +#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */ +#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */ +#define I40E_PFPCI_PM_PME_EN_SHIFT 0 +#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT) +#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */ +#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0 +#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT) +#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */ +#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0 +#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT) +#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16 +#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT) +#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */ +#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */ +#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127 +#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */ +#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */ +#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0 +#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT) +#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */ +#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0 +#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT) +#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */ +#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29 +#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT) +#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30 +#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT) +#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31 +#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT) +#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */ +#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16 +#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT) +#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24 +#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT) +#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26 +#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT) +#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */ +#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31 +#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT) +#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */ +#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0 +#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT) +#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16 +#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT) +#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */ +#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0 +#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT) +#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */ +#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0 +#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT) +#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1 +#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT) +#define I40E_PRTPM_GC_RATD_SHIFT 2 +#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT) +#define I40E_PRTPM_GC_LCDMP_SHIFT 3 +#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT) +#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31 +#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT) +#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */ +#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0 +#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT) +#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */ +#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0 +#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT) +#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */ +#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0 +#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT) +#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */ +#define I40E_GLRPB_GHW_GHW_SHIFT 0 +#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT) +#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */ +#define I40E_GLRPB_GLW_GLW_SHIFT 0 +#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT) +#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */ +#define I40E_GLRPB_PHW_PHW_SHIFT 0 +#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT) +#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */ +#define I40E_GLRPB_PLW_PLW_SHIFT 0 +#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT) +#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_DHW_MAX_INDEX 7 +#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0 +#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT) +#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_DLW_MAX_INDEX 7 +#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0 +#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT) +#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_DPS_MAX_INDEX 7 +#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0 +#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT) +#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_SHT_MAX_INDEX 7 +#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0 +#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT) +#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */ +#define I40E_PRTRPB_SHW_SHW_SHIFT 0 +#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT) +#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTRPB_SLT_MAX_INDEX 7 +#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0 +#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT) +#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */ +#define I40E_PRTRPB_SLW_SLW_SHIFT 0 +#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT) +#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */ +#define I40E_PRTRPB_SPS_SPS_SHIFT 0 +#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT) +#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */ +#define I40E_GLQF_CTL_HTOEP_SHIFT 1 +#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT) +#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2 +#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT) +#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3 +#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT) +#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6 +#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT) +#define I40E_GLQF_CTL_RSVD_SHIFT 7 +#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT) +#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8 +#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT) +#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11 +#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT) +#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14 +#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT) +#define I40E_GLQF_CTL_FDBEST_SHIFT 17 +#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT) +#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25 +#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT) +#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26 +#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT) +#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27 +#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT) +#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */ +#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0 +#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT) +#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13 +#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT) +#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ +#define I40E_GLQF_HKEY_MAX_INDEX 12 +#define I40E_GLQF_HKEY_KEY_0_SHIFT 0 +#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT) +#define I40E_GLQF_HKEY_KEY_1_SHIFT 8 +#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT) +#define I40E_GLQF_HKEY_KEY_2_SHIFT 16 +#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT) +#define I40E_GLQF_HKEY_KEY_3_SHIFT 24 +#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT) +#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_HSYM_MAX_INDEX 63 +#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0 +#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT) +#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */ +#define I40E_GLQF_PCNT_MAX_INDEX 511 +#define I40E_GLQF_PCNT_PCNT_SHIFT 0 +#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT) +#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_SWAP_MAX_INDEX 1 +#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0 +#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT) +#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6 +#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT) +#define I40E_GLQF_SWAP_FLEN0_SHIFT 12 +#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT) +#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16 +#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT) +#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22 +#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT) +#define I40E_GLQF_SWAP_FLEN1_SHIFT 28 +#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT) +#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */ +#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0 +#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT) +#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5 +#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT) +#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10 +#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) +#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14 +#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) +#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16 +#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) +#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17 +#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT) +#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18 +#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT) +#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19 +#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT) +#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20 +#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT) +#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24 +#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT) +#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */ +#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0 +#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT) +#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */ +#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0 +#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT) +#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8 +#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT) +#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */ +#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0 +#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT) +#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16 +#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT) +#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_PFQF_HENA_MAX_INDEX 1 +#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0 +#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT) +#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */ +#define I40E_PFQF_HKEY_MAX_INDEX 12 +#define I40E_PFQF_HKEY_KEY_0_SHIFT 0 +#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT) +#define I40E_PFQF_HKEY_KEY_1_SHIFT 8 +#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT) +#define I40E_PFQF_HKEY_KEY_2_SHIFT 16 +#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT) +#define I40E_PFQF_HKEY_KEY_3_SHIFT 24 +#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT) +#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_PFQF_HLUT_MAX_INDEX 127 +#define I40E_PFQF_HLUT_LUT0_SHIFT 0 +#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT) +#define I40E_PFQF_HLUT_LUT1_SHIFT 8 +#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT) +#define I40E_PFQF_HLUT_LUT2_SHIFT 16 +#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT) +#define I40E_PFQF_HLUT_LUT3_SHIFT 24 +#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT) +#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */ +#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0 +#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT) +#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */ +#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63 +#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0 +#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) +#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ +#define I40E_PRTQF_FD_MSK_MAX_INDEX 63 +#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0 +#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT) +#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16 +#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT) +#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */ +#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8 +#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0 +#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) +#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5 +#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) +#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10 +#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) +#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */ +#define I40E_VFQF_HENA1_MAX_INDEX 1 +#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0 +#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT) +#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */ +#define I40E_VFQF_HKEY1_MAX_INDEX 12 +#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0 +#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT) +#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8 +#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT) +#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16 +#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT) +#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24 +#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT) +#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */ +#define I40E_VFQF_HLUT1_MAX_INDEX 15 +#define I40E_VFQF_HLUT1_LUT0_SHIFT 0 +#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT) +#define I40E_VFQF_HLUT1_LUT1_SHIFT 8 +#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT) +#define I40E_VFQF_HLUT1_LUT2_SHIFT 16 +#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT) +#define I40E_VFQF_HLUT1_LUT3_SHIFT 24 +#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT) +#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */ +#define I40E_VFQF_HREGION1_MAX_INDEX 7 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT) +#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1 +#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT) +#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5 +#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT) +#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9 +#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT) +#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13 +#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT) +#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17 +#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT) +#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21 +#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT) +#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25 +#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT) +#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29 +#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT) +#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPQF_CTL_MAX_INDEX 127 +#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0 +#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT) +#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5 +#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT) +#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10 +#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT) +#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14 +#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT) +#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ +#define I40E_VSIQF_CTL_MAX_INDEX 383 +#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0 +#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT) +#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1 +#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2 +#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3 +#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4 +#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5 +#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT) +#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */ +#define I40E_VSIQF_TCREGION_MAX_INDEX 3 +#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0 +#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT) +#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9 +#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT) +#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16 +#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT) +#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25 +#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT) +#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOECRC_MAX_INDEX 143 +#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0 +#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT) +#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDDPC_MAX_INDEX 143 +#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0 +#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT) +#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDIFEC_MAX_INDEX 143 +#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0 +#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT) +#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143 +#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0 +#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT) +#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDIXEC_MAX_INDEX 143 +#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0 +#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT) +#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDIXVC_MAX_INDEX 143 +#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0 +#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT) +#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDWRCH_MAX_INDEX 143 +#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0 +#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT) +#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDWRCL_MAX_INDEX 143 +#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0 +#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT) +#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDWTCH_MAX_INDEX 143 +#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0 +#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT) +#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEDWTCL_MAX_INDEX 143 +#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0 +#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT) +#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOELAST_MAX_INDEX 143 +#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0 +#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT) +#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEPRC_MAX_INDEX 143 +#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0 +#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT) +#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOEPTC_MAX_INDEX 143 +#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0 +#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT) +#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_FCOERPDC_MAX_INDEX 143 +#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0 +#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT) +#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_RXERR1_L_MAX_INDEX 143 +#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0 +#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT) +#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ +#define I40E_GL_RXERR2_L_MAX_INDEX 143 +#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0 +#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) +#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_BPRCH_MAX_INDEX 3 +#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) +#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_BPRCL_MAX_INDEX 3 +#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) +#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_BPTCH_MAX_INDEX 3 +#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) +#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_BPTCL_MAX_INDEX 3 +#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) +#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_CRCERRS_MAX_INDEX 3 +#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 +#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT) +#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_GORCH_MAX_INDEX 3 +#define I40E_GLPRT_GORCH_GORCH_SHIFT 0 +#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT) +#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_GORCL_MAX_INDEX 3 +#define I40E_GLPRT_GORCL_GORCL_SHIFT 0 +#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT) +#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_GOTCH_MAX_INDEX 3 +#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0 +#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT) +#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_GOTCL_MAX_INDEX 3 +#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0 +#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT) +#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_ILLERRC_MAX_INDEX 3 +#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0 +#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT) +#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LDPC_MAX_INDEX 3 +#define I40E_GLPRT_LDPC_LDPC_SHIFT 0 +#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT) +#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3 +#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0 +#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT) +#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3 +#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0 +#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT) +#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LXONRXC_MAX_INDEX 3 +#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0 +#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT) +#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_LXONTXC_MAX_INDEX 3 +#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0 +#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT) +#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MLFC_MAX_INDEX 3 +#define I40E_GLPRT_MLFC_MLFC_SHIFT 0 +#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT) +#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MPRCH_MAX_INDEX 3 +#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0 +#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT) +#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MPRCL_MAX_INDEX 3 +#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0 +#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT) +#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MPTCH_MAX_INDEX 3 +#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0 +#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT) +#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MPTCL_MAX_INDEX 3 +#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0 +#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT) +#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_MRFC_MAX_INDEX 3 +#define I40E_GLPRT_MRFC_MRFC_SHIFT 0 +#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT) +#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC1023H_MAX_INDEX 3 +#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0 +#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT) +#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC1023L_MAX_INDEX 3 +#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0 +#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT) +#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC127H_MAX_INDEX 3 +#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0 +#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT) +#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC127L_MAX_INDEX 3 +#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0 +#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT) +#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC1522H_MAX_INDEX 3 +#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0 +#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT) +#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC1522L_MAX_INDEX 3 +#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0 +#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT) +#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC255H_MAX_INDEX 3 +#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0 +#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT) +#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC255L_MAX_INDEX 3 +#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0 +#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT) +#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC511H_MAX_INDEX 3 +#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0 +#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT) +#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC511L_MAX_INDEX 3 +#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0 +#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT) +#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC64H_MAX_INDEX 3 +#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0 +#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT) +#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC64L_MAX_INDEX 3 +#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0 +#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT) +#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC9522H_MAX_INDEX 3 +#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0 +#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT) +#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PRC9522L_MAX_INDEX 3 +#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0 +#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT) +#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC1023H_MAX_INDEX 3 +#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0 +#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT) +#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC1023L_MAX_INDEX 3 +#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0 +#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT) +#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC127H_MAX_INDEX 3 +#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0 +#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT) +#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC127L_MAX_INDEX 3 +#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0 +#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT) +#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC1522H_MAX_INDEX 3 +#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0 +#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT) +#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC1522L_MAX_INDEX 3 +#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0 +#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT) +#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC255H_MAX_INDEX 3 +#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0 +#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT) +#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC255L_MAX_INDEX 3 +#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0 +#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT) +#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC511H_MAX_INDEX 3 +#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0 +#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT) +#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC511L_MAX_INDEX 3 +#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0 +#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT) +#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC64H_MAX_INDEX 3 +#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0 +#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT) +#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC64L_MAX_INDEX 3 +#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0 +#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT) +#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC9522H_MAX_INDEX 3 +#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0 +#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT) +#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_PTC9522L_MAX_INDEX 3 +#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0 +#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT) +#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3 +#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0 +#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT) +#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3 +#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0 +#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT) +#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_PXONRXC_MAX_INDEX 3 +#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0 +#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT) +#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_PXONTXC_MAX_INDEX 3 +#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0 +#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT) +#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RDPC_MAX_INDEX 3 +#define I40E_GLPRT_RDPC_RDPC_SHIFT 0 +#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT) +#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RFC_MAX_INDEX 3 +#define I40E_GLPRT_RFC_RFC_SHIFT 0 +#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT) +#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RJC_MAX_INDEX 3 +#define I40E_GLPRT_RJC_RJC_SHIFT 0 +#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT) +#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RLEC_MAX_INDEX 3 +#define I40E_GLPRT_RLEC_RLEC_SHIFT 0 +#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT) +#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_ROC_MAX_INDEX 3 +#define I40E_GLPRT_ROC_ROC_SHIFT 0 +#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT) +#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RUC_MAX_INDEX 3 +#define I40E_GLPRT_RUC_RUC_SHIFT 0 +#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT) +#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_RUPP_MAX_INDEX 3 +#define I40E_GLPRT_RUPP_RUPP_SHIFT 0 +#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT) +#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ +#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3 +#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0 +#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT) +#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_TDOLD_MAX_INDEX 3 +#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 +#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) +#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_UPRCH_MAX_INDEX 3 +#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 +#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT) +#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_UPRCL_MAX_INDEX 3 +#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0 +#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT) +#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_UPTCH_MAX_INDEX 3 +#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0 +#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT) +#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GLPRT_UPTCL_MAX_INDEX 3 +#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0 +#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT) +#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_BPRCH_MAX_INDEX 15 +#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT) +#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_BPRCL_MAX_INDEX 15 +#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT) +#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_BPTCH_MAX_INDEX 15 +#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT) +#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_BPTCL_MAX_INDEX 15 +#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT) +#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_GORCH_MAX_INDEX 15 +#define I40E_GLSW_GORCH_GORCH_SHIFT 0 +#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT) +#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_GORCL_MAX_INDEX 15 +#define I40E_GLSW_GORCL_GORCL_SHIFT 0 +#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT) +#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_GOTCH_MAX_INDEX 15 +#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0 +#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT) +#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_GOTCL_MAX_INDEX 15 +#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0 +#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT) +#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_MPRCH_MAX_INDEX 15 +#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0 +#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT) +#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_MPRCL_MAX_INDEX 15 +#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0 +#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT) +#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_MPTCH_MAX_INDEX 15 +#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0 +#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT) +#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_MPTCL_MAX_INDEX 15 +#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0 +#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT) +#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_RUPP_MAX_INDEX 15 +#define I40E_GLSW_RUPP_RUPP_SHIFT 0 +#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT) +#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_TDPC_MAX_INDEX 15 +#define I40E_GLSW_TDPC_TDPC_SHIFT 0 +#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT) +#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_UPRCH_MAX_INDEX 15 +#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0 +#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT) +#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_UPRCL_MAX_INDEX 15 +#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0 +#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT) +#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_UPTCH_MAX_INDEX 15 +#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0 +#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT) +#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLSW_UPTCL_MAX_INDEX 15 +#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0 +#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT) +#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_BPRCH_MAX_INDEX 383 +#define I40E_GLV_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT) +#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_BPRCL_MAX_INDEX 383 +#define I40E_GLV_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT) +#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_BPTCH_MAX_INDEX 383 +#define I40E_GLV_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT) +#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_BPTCL_MAX_INDEX 383 +#define I40E_GLV_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT) +#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_GORCH_MAX_INDEX 383 +#define I40E_GLV_GORCH_GORCH_SHIFT 0 +#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT) +#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_GORCL_MAX_INDEX 383 +#define I40E_GLV_GORCL_GORCL_SHIFT 0 +#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT) +#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_GOTCH_MAX_INDEX 383 +#define I40E_GLV_GOTCH_GOTCH_SHIFT 0 +#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT) +#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_GOTCL_MAX_INDEX 383 +#define I40E_GLV_GOTCL_GOTCL_SHIFT 0 +#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT) +#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_MPRCH_MAX_INDEX 383 +#define I40E_GLV_MPRCH_MPRCH_SHIFT 0 +#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT) +#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_MPRCL_MAX_INDEX 383 +#define I40E_GLV_MPRCL_MPRCL_SHIFT 0 +#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT) +#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_MPTCH_MAX_INDEX 383 +#define I40E_GLV_MPTCH_MPTCH_SHIFT 0 +#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT) +#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_MPTCL_MAX_INDEX 383 +#define I40E_GLV_MPTCL_MPTCL_SHIFT 0 +#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT) +#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_RDPC_MAX_INDEX 383 +#define I40E_GLV_RDPC_RDPC_SHIFT 0 +#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT) +#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_RUPP_MAX_INDEX 383 +#define I40E_GLV_RUPP_RUPP_SHIFT 0 +#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT) +#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_TEPC_MAX_INDEX 383 +#define I40E_GLV_TEPC_TEPC_SHIFT 0 +#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT) +#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_UPRCH_MAX_INDEX 383 +#define I40E_GLV_UPRCH_UPRCH_SHIFT 0 +#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT) +#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_UPRCL_MAX_INDEX 383 +#define I40E_GLV_UPRCL_UPRCL_SHIFT 0 +#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT) +#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_UPTCH_MAX_INDEX 383 +#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0 +#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT) +#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_UPTCL_MAX_INDEX 383 +#define I40E_GLV_UPTCL_UPTCL_SHIFT 0 +#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT) +#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_RBCH_MAX_INDEX 7 +#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0 +#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT) +#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_RBCL_MAX_INDEX 7 +#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0 +#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT) +#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_RPCH_MAX_INDEX 7 +#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0 +#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT) +#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_RPCL_MAX_INDEX 7 +#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0 +#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT) +#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_TBCH_MAX_INDEX 7 +#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0 +#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT) +#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_TBCL_MAX_INDEX 7 +#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0 +#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT) +#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_TPCH_MAX_INDEX 7 +#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0 +#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT) +#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ +#define I40E_GLVEBTC_TPCL_MAX_INDEX 7 +#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0 +#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT) +#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_BPCH_MAX_INDEX 127 +#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0 +#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT) +#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_BPCL_MAX_INDEX 127 +#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0 +#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT) +#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_GORCH_MAX_INDEX 127 +#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0 +#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT) +#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_GORCL_MAX_INDEX 127 +#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0 +#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT) +#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127 +#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0 +#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT) +#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127 +#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0 +#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT) +#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_MPCH_MAX_INDEX 127 +#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0 +#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT) +#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_MPCL_MAX_INDEX 127 +#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0 +#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT) +#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_UPCH_MAX_INDEX 127 +#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0 +#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT) +#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_GLVEBVL_UPCL_MAX_INDEX 127 +#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0 +#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT) +#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */ +#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0 +#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT) +#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */ +#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35 +#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0 +#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT) +#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1 +#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0 +#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT) +#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */ +#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0 +#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT) +#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31 +#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT) +#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1 +#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0 +#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT) +#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1 +#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT) +#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3 +#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT) +#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8 +#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT) +#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16 +#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT) +#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1 +#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0 +#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT) +#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1 +#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT) +#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_CLKO_MAX_INDEX 1 +#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0 +#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT) +#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */ +#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0 +#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT) +#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1 +#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT) +#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2 +#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT) +#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3 +#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT) +#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8 +#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT) +#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12 +#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT) +#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31 +#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT) +#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */ +#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0 +#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT) +#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8 +#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT) +#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16 +#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT) +#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20 +#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT) +#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24 +#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) +#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26 +#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT) +#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31 +#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT) +#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1 +#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0 +#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT) +#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1 +#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0 +#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT) +#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */ +#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0 +#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT) +#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */ +#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0 +#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT) +#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3 +#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0 +#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT) +#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3 +#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0 +#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT) +#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */ +#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0 +#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT) +#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1 +#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT) +#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2 +#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT) +#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3 +#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT) +#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4 +#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT) +#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */ +#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0 +#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT) +#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1 +#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT) +#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2 +#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT) +#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3 +#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT) +#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1 +#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0 +#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT) +#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1 +#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0 +#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT) +#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */ +#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0 +#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT) +#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */ +#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0 +#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT) +#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */ +#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0 +#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT) +#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ +#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 +#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) +#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ +#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 +#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) +#define I40E_GL_MDET_RX_EVENT_SHIFT 8 +#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT) +#define I40E_GL_MDET_RX_QUEUE_SHIFT 17 +#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT) +#define I40E_GL_MDET_RX_VALID_SHIFT 31 +#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT) +#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */ +#define I40E_GL_MDET_TX_QUEUE_SHIFT 0 +#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT) +#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12 +#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT) +#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21 +#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT) +#define I40E_GL_MDET_TX_EVENT_SHIFT 25 +#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT) +#define I40E_GL_MDET_TX_VALID_SHIFT 31 +#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT) +#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */ +#define I40E_PF_MDET_RX_VALID_SHIFT 0 +#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT) +#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */ +#define I40E_PF_MDET_TX_VALID_SHIFT 0 +#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT) +#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */ +#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0 +#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT) +#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8 +#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT) +#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31 +#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT) +#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VP_MDET_RX_MAX_INDEX 127 +#define I40E_VP_MDET_RX_VALID_SHIFT 0 +#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT) +#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_VP_MDET_TX_MAX_INDEX 127 +#define I40E_VP_MDET_TX_VALID_SHIFT 0 +#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT) +#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */ +#define I40E_GLPM_WUMC_NOTCO_SHIFT 0 +#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT) +#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1 +#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT) +#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2 +#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT) +#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3 +#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT) +#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16 +#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT) +#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */ +#define I40E_PFPM_APM_APME_SHIFT 0 +#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT) +#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */ +#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7 +#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0 +#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT) +#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */ +#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5 +#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT) +#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */ +#define I40E_PFPM_WUFC_LNKC_SHIFT 0 +#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT) +#define I40E_PFPM_WUFC_MAG_SHIFT 1 +#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT) +#define I40E_PFPM_WUFC_MNG_SHIFT 3 +#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT) +#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4 +#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5 +#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6 +#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7 +#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8 +#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9 +#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10 +#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11 +#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX0_SHIFT 16 +#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT) +#define I40E_PFPM_WUFC_FLX1_SHIFT 17 +#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT) +#define I40E_PFPM_WUFC_FLX2_SHIFT 18 +#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT) +#define I40E_PFPM_WUFC_FLX3_SHIFT 19 +#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT) +#define I40E_PFPM_WUFC_FLX4_SHIFT 20 +#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT) +#define I40E_PFPM_WUFC_FLX5_SHIFT 21 +#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT) +#define I40E_PFPM_WUFC_FLX6_SHIFT 22 +#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT) +#define I40E_PFPM_WUFC_FLX7_SHIFT 23 +#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT) +#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31 +#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT) +#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */ +#define I40E_PFPM_WUS_LNKC_SHIFT 0 +#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT) +#define I40E_PFPM_WUS_MAG_SHIFT 1 +#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT) +#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2 +#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT) +#define I40E_PFPM_WUS_MNG_SHIFT 3 +#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT) +#define I40E_PFPM_WUS_FLX0_SHIFT 16 +#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT) +#define I40E_PFPM_WUS_FLX1_SHIFT 17 +#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT) +#define I40E_PFPM_WUS_FLX2_SHIFT 18 +#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT) +#define I40E_PFPM_WUS_FLX3_SHIFT 19 +#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT) +#define I40E_PFPM_WUS_FLX4_SHIFT 20 +#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT) +#define I40E_PFPM_WUS_FLX5_SHIFT 21 +#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT) +#define I40E_PFPM_WUS_FLX6_SHIFT 22 +#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT) +#define I40E_PFPM_WUS_FLX7_SHIFT 23 +#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT) +#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31 +#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT) +#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */ +#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0 +#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT) +#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1 +#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT) +#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ +#define I40E_PRTPM_SAH_MAX_INDEX 3 +#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0 +#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT) +#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26 +#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT) +#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30 +#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT) +#define I40E_PRTPM_SAH_AV_SHIFT 31 +#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT) +#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ +#define I40E_PRTPM_SAL_MAX_INDEX 3 +#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0 +#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT) +#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ +#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0 +#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT) +#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ +#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0 +#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT) +#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */ +#define I40E_VF_ARQH1_ARQH_SHIFT 0 +#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT) +#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ +#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0 +#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT) +#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28 +#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT) +#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29 +#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT) +#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 +#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) +#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 +#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) +#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ +#define I40E_VF_ARQT1_ARQT_SHIFT 0 +#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) +#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ +#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0 +#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT) +#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ +#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0 +#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT) +#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */ +#define I40E_VF_ATQH1_ATQH_SHIFT 0 +#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT) +#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ +#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0 +#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT) +#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28 +#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT) +#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29 +#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT) +#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 +#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) +#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 +#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) +#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ +#define I40E_VF_ATQT1_ATQT_SHIFT 0 +#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) +#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */ +#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0 +#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT) +#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ +#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT) +#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ +#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15 +#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT) +#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */ +#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31 +#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT) +#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */ +#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0 +#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1 +#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2 +#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3 +#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4 +#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT) +#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT) +#define I40E_VFINT_ICR01_SWINT_SHIFT 31 +#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT) +#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */ +#define I40E_VFINT_ITR01_MAX_INDEX 2 +#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT) +#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ +#define I40E_VFINT_ITRN1_MAX_INDEX 2 +#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) +#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ +#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 +#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) +#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_QRX_TAIL1_MAX_INDEX 15 +#define I40E_QRX_TAIL1_TAIL_SHIFT 0 +#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT) +#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */ +#define I40E_QTX_TAIL1_MAX_INDEX 15 +#define I40E_QTX_TAIL1_TAIL_SHIFT 0 +#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT) +#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */ +#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0 +#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT) +#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TADD_MAX_INDEX 16 +#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0 +#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT) +#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2 +#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT) +#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TMSG_MAX_INDEX 16 +#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0 +#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT) +#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TUADD_MAX_INDEX 16 +#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0 +#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT) +#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ +#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16 +#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0 +#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT) +#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */ +#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 +#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT) +#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 +#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT) +#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8 +#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT) +#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */ +#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 +#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT) +#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 +#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT) +#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 +#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 +#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 +#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) +#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_VFQF_HENA_MAX_INDEX 1 +#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0 +#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT) +#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ +#define I40E_VFQF_HKEY_MAX_INDEX 12 +#define I40E_VFQF_HKEY_KEY_0_SHIFT 0 +#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT) +#define I40E_VFQF_HKEY_KEY_1_SHIFT 8 +#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT) +#define I40E_VFQF_HKEY_KEY_2_SHIFT 16 +#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT) +#define I40E_VFQF_HKEY_KEY_3_SHIFT 24 +#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT) +#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_VFQF_HLUT_MAX_INDEX 15 +#define I40E_VFQF_HLUT_LUT0_SHIFT 0 +#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT) +#define I40E_VFQF_HLUT_LUT1_SHIFT 8 +#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT) +#define I40E_VFQF_HLUT_LUT2_SHIFT 16 +#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT) +#define I40E_VFQF_HLUT_LUT3_SHIFT 24 +#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT) +#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_VFQF_HREGION_MAX_INDEX 7 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT) +#define I40E_VFQF_HREGION_REGION_0_SHIFT 1 +#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT) +#define I40E_VFQF_HREGION_REGION_1_SHIFT 5 +#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT) +#define I40E_VFQF_HREGION_REGION_2_SHIFT 9 +#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT) +#define I40E_VFQF_HREGION_REGION_3_SHIFT 13 +#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT) +#define I40E_VFQF_HREGION_REGION_4_SHIFT 17 +#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT) +#define I40E_VFQF_HREGION_REGION_5_SHIFT 21 +#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT) +#define I40E_VFQF_HREGION_REGION_6_SHIFT 25 +#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) +#define I40E_VFQF_HREGION_REGION_7_SHIFT 29 +#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) +#endif diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h new file mode 100644 index 000000000..7fa7a4191 --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h @@ -0,0 +1,100 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_STATUS_H_ +#define _I40E_STATUS_H_ + +/* Error Codes */ +enum i40e_status_code { + I40E_SUCCESS = 0, + I40E_ERR_NVM = -1, + I40E_ERR_NVM_CHECKSUM = -2, + I40E_ERR_PHY = -3, + I40E_ERR_CONFIG = -4, + I40E_ERR_PARAM = -5, + I40E_ERR_MAC_TYPE = -6, + I40E_ERR_UNKNOWN_PHY = -7, + I40E_ERR_LINK_SETUP = -8, + I40E_ERR_ADAPTER_STOPPED = -9, + I40E_ERR_INVALID_MAC_ADDR = -10, + I40E_ERR_DEVICE_NOT_SUPPORTED = -11, + I40E_ERR_MASTER_REQUESTS_PENDING = -12, + I40E_ERR_INVALID_LINK_SETTINGS = -13, + I40E_ERR_AUTONEG_NOT_COMPLETE = -14, + I40E_ERR_RESET_FAILED = -15, + I40E_ERR_SWFW_SYNC = -16, + I40E_ERR_NO_AVAILABLE_VSI = -17, + I40E_ERR_NO_MEMORY = -18, + I40E_ERR_BAD_PTR = -19, + I40E_ERR_RING_FULL = -20, + I40E_ERR_INVALID_PD_ID = -21, + I40E_ERR_INVALID_QP_ID = -22, + I40E_ERR_INVALID_CQ_ID = -23, + I40E_ERR_INVALID_CEQ_ID = -24, + I40E_ERR_INVALID_AEQ_ID = -25, + I40E_ERR_INVALID_SIZE = -26, + I40E_ERR_INVALID_ARP_INDEX = -27, + I40E_ERR_INVALID_FPM_FUNC_ID = -28, + I40E_ERR_QP_INVALID_MSG_SIZE = -29, + I40E_ERR_QP_TOOMANY_WRS_POSTED = -30, + I40E_ERR_INVALID_FRAG_COUNT = -31, + I40E_ERR_QUEUE_EMPTY = -32, + I40E_ERR_INVALID_ALIGNMENT = -33, + I40E_ERR_FLUSHED_QUEUE = -34, + I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35, + I40E_ERR_INVALID_IMM_DATA_SIZE = -36, + I40E_ERR_TIMEOUT = -37, + I40E_ERR_OPCODE_MISMATCH = -38, + I40E_ERR_CQP_COMPL_ERROR = -39, + I40E_ERR_INVALID_VF_ID = -40, + I40E_ERR_INVALID_HMCFN_ID = -41, + I40E_ERR_BACKING_PAGE_ERROR = -42, + I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43, + I40E_ERR_INVALID_PBLE_INDEX = -44, + I40E_ERR_INVALID_SD_INDEX = -45, + I40E_ERR_INVALID_PAGE_DESC_INDEX = -46, + I40E_ERR_INVALID_SD_TYPE = -47, + I40E_ERR_MEMCPY_FAILED = -48, + I40E_ERR_INVALID_HMC_OBJ_INDEX = -49, + I40E_ERR_INVALID_HMC_OBJ_COUNT = -50, + I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51, + I40E_ERR_SRQ_ENABLED = -52, + I40E_ERR_ADMIN_QUEUE_ERROR = -53, + I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54, + I40E_ERR_BUF_TOO_SHORT = -55, + I40E_ERR_ADMIN_QUEUE_FULL = -56, + I40E_ERR_ADMIN_QUEUE_NO_WORK = -57, + I40E_ERR_BAD_IWARP_CQE = -58, + I40E_ERR_NVM_BLANK_MODE = -59, + I40E_ERR_NOT_IMPLEMENTED = -60, + I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61, + I40E_ERR_DIAG_TEST_FAILED = -62, + I40E_ERR_NOT_READY = -63, + I40E_NOT_SUPPORTED = -64, + I40E_ERR_FIRMWARE_API_VERSION = -65, +}; + +#endif /* _I40E_STATUS_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c new file mode 100644 index 000000000..458fbb421 --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -0,0 +1,2003 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include +#include + +#include "i40evf.h" +#include "i40e_prototype.h" + +static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, + u32 td_tag) +{ + return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA | + ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | + ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | + ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | + ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); +} + +#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) + +/** + * i40e_unmap_and_free_tx_resource - Release a Tx buffer + * @ring: the ring that owns the buffer + * @tx_buffer: the buffer to free + **/ +static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, + struct i40e_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) + kfree(tx_buffer->raw_buf); + else + dev_kfree_skb_any(tx_buffer->skb); + + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ +} + +/** + * i40evf_clean_tx_ring - Free any empty Tx buffers + * @tx_ring: ring to be cleaned + **/ +void i40evf_clean_tx_ring(struct i40e_ring *tx_ring) +{ + unsigned long bi_size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_bi) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) + i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); + + bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_bi, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + if (!tx_ring->netdev) + return; + + /* cleanup Tx queue statistics */ + netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index)); +} + +/** + * i40evf_free_tx_resources - Free Tx resources per queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void i40evf_free_tx_resources(struct i40e_ring *tx_ring) +{ + i40evf_clean_tx_ring(tx_ring); + kfree(tx_ring->tx_bi); + tx_ring->tx_bi = NULL; + + if (tx_ring->desc) { + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + } +} + +/** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} + +/** + * i40e_get_tx_pending - how many tx descriptors not processed + * @tx_ring: the ring of descriptors + * + * Since there is no access to the ring head register + * in XL710, we need to use our local copies + **/ +static u32 i40e_get_tx_pending(struct i40e_ring *ring) +{ + u32 head, tail; + + head = i40e_get_head(ring); + tail = readl(ring->tail); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; +} + +/** + * i40e_check_tx_hang - Is there a hang in the Tx queue + * @tx_ring: the ring of descriptors + **/ +static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) +{ + u32 tx_done = tx_ring->stats.packets; + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; + u32 tx_pending = i40e_get_tx_pending(tx_ring); + bool ret = false; + + clear_check_for_tx_hang(tx_ring); + + /* Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. The + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with + * PFC clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if ((tx_done_old == tx_done) && tx_pending) { + /* make sure it is true for two checks in a row */ + ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, + &tx_ring->state); + } else if (tx_done_old == tx_done && + (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) { + /* update completed stats and disarm the hang check */ + tx_ring->tx_stats.tx_done_old = tx_done; + clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); + } + + return ret; +} + +#define WB_STRIDE 0x3 + +/** + * i40e_clean_tx_irq - Reclaim resources after transmit completes + * @tx_ring: tx ring to clean + * @budget: how many cleans we're allowed + * + * Returns true if there's any budget left (e.g. the clean is finished) + **/ +static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) +{ + u16 i = tx_ring->next_to_clean; + struct i40e_tx_buffer *tx_buf; + struct i40e_tx_desc *tx_head; + struct i40e_tx_desc *tx_desc; + unsigned int total_packets = 0; + unsigned int total_bytes = 0; + + tx_buf = &tx_ring->tx_bi[i]; + tx_desc = I40E_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); + + do { + struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + + /* we have caught up to head, no work left to do */ + if (tx_head == tx_desc) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buf->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buf->bytecount; + total_packets += tx_buf->gso_segs; + + /* free the skb */ + dev_kfree_skb_any(tx_buf->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buf->skb = NULL; + dma_unmap_len_set(tx_buf, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buf = tx_ring->tx_bi; + tx_desc = I40E_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buf = tx_ring->tx_bi; + tx_desc = I40E_TX_DESC(tx_ring, 0); + } + + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + tx_ring->q_vector->tx.total_bytes += total_bytes; + tx_ring->q_vector->tx.total_packets += total_packets; + + if (budget && + !((i & WB_STRIDE) == WB_STRIDE) && + !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && + (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) + tx_ring->arm_wb = true; + else + tx_ring->arm_wb = false; + + if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { + /* schedule immediate reset if we believe we hung */ + dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" + " VSI <%d>\n" + " Tx Queue <%d>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n", + tx_ring->vsi->seid, + tx_ring->queue_index, + tx_ring->next_to_use, i); + dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", + tx_ring->tx_bi[i].time_stamp, jiffies); + + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + dev_info(tx_ring->dev, + "tx hang detected on queue %d, resetting adapter\n", + tx_ring->queue_index); + + tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev); + + /* the adapter is about to reset, no point in enabling stuff */ + return true; + } + + netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index), + total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + + return budget > 0; +} + +/** + * i40e_force_wb -Arm hardware to do a wb on noncache aligned descriptors + * @vsi: the VSI we care about + * @q_vector: the vector on which to force writeback + * + **/ +static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +{ + u32 val = I40E_VFINT_DYN_CTLN_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ + I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; + /* allow 00 to be written to the index */ + + wr32(&vsi->back->hw, + I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1), + val); +} + +/** + * i40e_set_new_dynamic_itr - Find new ITR level + * @rc: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte counts during + * the last interrupt. The advantage of per interrupt computation + * is faster updates and more accurate ITR for the current traffic + * pattern. Constants in this function were computed based on + * theoretical maximum wire speed and thresholds were set based on + * testing data as well as attempting to minimize response time + * while increasing bulk throughput. + **/ +static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) +{ + enum i40e_latency_range new_latency_range = rc->latency_range; + u32 new_itr = rc->itr; + int bytes_per_int; + + if (rc->total_packets == 0 || !rc->itr) + return; + + /* simple throttlerate management + * 0-10MB/s lowest (100000 ints/s) + * 10-20MB/s low (20000 ints/s) + * 20-1249MB/s bulk (8000 ints/s) + */ + bytes_per_int = rc->total_bytes / rc->itr; + switch (rc->itr) { + case I40E_LOWEST_LATENCY: + if (bytes_per_int > 10) + new_latency_range = I40E_LOW_LATENCY; + break; + case I40E_LOW_LATENCY: + if (bytes_per_int > 20) + new_latency_range = I40E_BULK_LATENCY; + else if (bytes_per_int <= 10) + new_latency_range = I40E_LOWEST_LATENCY; + break; + case I40E_BULK_LATENCY: + if (bytes_per_int <= 20) + rc->latency_range = I40E_LOW_LATENCY; + break; + } + + switch (new_latency_range) { + case I40E_LOWEST_LATENCY: + new_itr = I40E_ITR_100K; + break; + case I40E_LOW_LATENCY: + new_itr = I40E_ITR_20K; + break; + case I40E_BULK_LATENCY: + new_itr = I40E_ITR_8K; + break; + default: + break; + } + + if (new_itr != rc->itr) { + /* do an exponential smoothing */ + new_itr = (10 * new_itr * rc->itr) / + ((9 * new_itr) + rc->itr); + rc->itr = new_itr & I40E_MAX_ITR; + } + + rc->total_bytes = 0; + rc->total_packets = 0; +} + +/** + * i40e_update_dynamic_itr - Adjust ITR based on bytes per int + * @q_vector: the vector to adjust + **/ +static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector) +{ + u16 vector = q_vector->vsi->base_vector + q_vector->v_idx; + struct i40e_hw *hw = &q_vector->vsi->back->hw; + u32 reg_addr; + u16 old_itr; + + reg_addr = I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1); + old_itr = q_vector->rx.itr; + i40e_set_new_dynamic_itr(&q_vector->rx); + if (old_itr != q_vector->rx.itr) + wr32(hw, reg_addr, q_vector->rx.itr); + + reg_addr = I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1); + old_itr = q_vector->tx.itr; + i40e_set_new_dynamic_itr(&q_vector->tx); + if (old_itr != q_vector->tx.itr) + wr32(hw, reg_addr, q_vector->tx.itr); +} + +/** + * i40evf_setup_tx_descriptors - Allocate the Tx descriptors + * @tx_ring: the tx ring to set up + * + * Return 0 on success, negative on error + **/ +int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int bi_size; + + if (!dev) + return -ENOMEM; + + bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; + tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); + if (!tx_ring->tx_bi) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); + /* add u32 for head writeback, align after this takes care of + * guaranteeing this is at least one cache line in size + */ + tx_ring->size += sizeof(u32); + tx_ring->size = ALIGN(tx_ring->size, 4096); + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", + tx_ring->size); + goto err; + } + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + return 0; + +err: + kfree(tx_ring->tx_bi); + tx_ring->tx_bi = NULL; + return -ENOMEM; +} + +/** + * i40evf_clean_rx_ring - Free Rx buffers + * @rx_ring: ring to be cleaned + **/ +void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + struct i40e_rx_buffer *rx_bi; + unsigned long bi_size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_bi) + return; + + if (ring_is_ps_enabled(rx_ring)) { + int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; + + rx_bi = &rx_ring->rx_bi[0]; + if (rx_bi->hdr_buf) { + dma_free_coherent(dev, + bufsz, + rx_bi->hdr_buf, + rx_bi->dma); + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = 0; + rx_bi->hdr_buf = NULL; + } + } + } + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + if (rx_bi->dma) { + dma_unmap_single(dev, + rx_bi->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_bi->dma = 0; + } + if (rx_bi->skb) { + dev_kfree_skb(rx_bi->skb); + rx_bi->skb = NULL; + } + if (rx_bi->page) { + if (rx_bi->page_dma) { + dma_unmap_page(dev, + rx_bi->page_dma, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + rx_bi->page_dma = 0; + } + __free_page(rx_bi->page); + rx_bi->page = NULL; + rx_bi->page_offset = 0; + } + } + + bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_bi, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * i40evf_free_rx_resources - Free Rx resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void i40evf_free_rx_resources(struct i40e_ring *rx_ring) +{ + i40evf_clean_rx_ring(rx_ring); + kfree(rx_ring->rx_bi); + rx_ring->rx_bi = NULL; + + if (rx_ring->desc) { + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + rx_ring->desc = NULL; + } +} + +/** + * i40evf_alloc_rx_headers - allocate rx header buffers + * @rx_ring: ring to alloc buffers + * + * Allocate rx header buffers for the entire ring. As these are static, + * this is only called when setting up a new ring. + **/ +void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + struct i40e_rx_buffer *rx_bi; + dma_addr_t dma; + void *buffer; + int buf_size; + int i; + + if (rx_ring->rx_bi[0].hdr_buf) + return; + /* Make sure the buffers don't cross cache line boundaries. */ + buf_size = ALIGN(rx_ring->rx_hdr_len, 256); + buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, + &dma, GFP_KERNEL); + if (!buffer) + return; + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = dma + (i * buf_size); + rx_bi->hdr_buf = buffer + (i * buf_size); + } +} + +/** + * i40evf_setup_rx_descriptors - Allocate Rx descriptors + * @rx_ring: Rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int bi_size; + + bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; + rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); + if (!rx_ring->rx_bi) + goto err; + + u64_stats_init(&rx_ring->syncp); + + /* Round up to nearest 4K */ + rx_ring->size = ring_is_16byte_desc_enabled(rx_ring) + ? rx_ring->count * sizeof(union i40e_16byte_rx_desc) + : rx_ring->count * sizeof(union i40e_32byte_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", + rx_ring->size); + goto err; + } + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; +err: + kfree(rx_ring->rx_bi); + rx_ring->rx_bi = NULL; + return -ENOMEM; +} + +/** + * i40e_release_rx_desc - Store the new tail and head values + * @rx_ring: ring to bump + * @val: new head index + **/ +static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); +} + +/** + * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +{ + u16 i = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return; + + while (cleaned_count--) { + rx_desc = I40E_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_bi[i]; + + if (bi->skb) /* desc is in use */ + goto no_buffers; + if (!bi->page) { + bi->page = alloc_page(GFP_ATOMIC); + if (!bi->page) { + rx_ring->rx_stats.alloc_page_failed++; + goto no_buffers; + } + } + + if (!bi->page_dma) { + /* use a half page if we're re-using */ + bi->page_offset ^= PAGE_SIZE / 2; + bi->page_dma = dma_map_page(rx_ring->dev, + bi->page, + bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, + bi->page_dma)) { + rx_ring->rx_stats.alloc_page_failed++; + bi->page_dma = 0; + goto no_buffers; + } + } + + dma_sync_single_range_for_device(rx_ring->dev, + bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + i++; + if (i == rx_ring->count) + i = 0; + } + +no_buffers: + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); +} + +/** + * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) +{ + u16 i = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + struct sk_buff *skb; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return; + + while (cleaned_count--) { + rx_desc = I40E_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_bi[i]; + skb = bi->skb; + + if (!skb) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len); + if (!skb) { + rx_ring->rx_stats.alloc_buff_failed++; + goto no_buffers; + } + /* initialize queue mapping */ + skb_record_rx_queue(skb, rx_ring->queue_index); + bi->skb = skb; + } + + if (!bi->dma) { + bi->dma = dma_map_single(rx_ring->dev, + skb->data, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, bi->dma)) { + rx_ring->rx_stats.alloc_buff_failed++; + bi->dma = 0; + goto no_buffers; + } + } + + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + rx_desc->read.hdr_addr = 0; + i++; + if (i == rx_ring->count) + i = 0; + } + +no_buffers: + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); +} + +/** + * i40e_receive_skb - Send a completed packet up the stack + * @rx_ring: rx ring in play + * @skb: packet to send up + * @vlan_tag: vlan tag for packet + **/ +static void i40e_receive_skb(struct i40e_ring *rx_ring, + struct sk_buff *skb, u16 vlan_tag) +{ + struct i40e_q_vector *q_vector = rx_ring->q_vector; + struct i40e_vsi *vsi = rx_ring->vsi; + u64 flags = vsi->back->flags; + + if (vlan_tag & VLAN_VID_MASK) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + + if (flags & I40E_FLAG_IN_NETPOLL) + netif_rx(skb); + else + napi_gro_receive(&q_vector->napi, skb); +} + +/** + * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum + * @vsi: the VSI we care about + * @skb: skb currently being received and modified + * @rx_status: status value of last descriptor in packet + * @rx_error: error value of last descriptor in packet + * @rx_ptype: ptype value of last descriptor in packet + **/ +static inline void i40e_rx_checksum(struct i40e_vsi *vsi, + struct sk_buff *skb, + u32 rx_status, + u32 rx_error, + u16 rx_ptype) +{ + struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); + bool ipv4 = false, ipv6 = false; + bool ipv4_tunnel, ipv6_tunnel; + __wsum rx_udp_csum; + struct iphdr *iph; + __sum16 csum; + + ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); + ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + + skb->ip_summed = CHECKSUM_NONE; + + /* Rx csum enabled and ip headers found? */ + if (!(vsi->netdev->features & NETIF_F_RXCSUM)) + return; + + /* did the hardware decode the packet and checksum? */ + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) + return; + + /* both known and outer_ip must be set for the below code to work */ + if (!(decoded.known && decoded.outer_ip)) + return; + + if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) + ipv4 = true; + else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) + ipv6 = true; + + if (ipv4 && + (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | + (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))) + goto checksum_fail; + + /* likely incorrect csum if alternate IP extension headers found */ + if (ipv6 && + rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) + /* don't increment checksum err here, non-fatal err */ + return; + + /* there was some L4 error, count error and punt packet to the stack */ + if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)) + goto checksum_fail; + + /* handle packets that were not able to be checksummed due + * to arrival speed, in this case the stack can compute + * the csum. + */ + if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT)) + return; + + /* If VXLAN traffic has an outer UDPv4 checksum we need to check + * it in the driver, hardware does not do it for us. + * Since L3L4P bit was set we assume a valid IHL value (>=5) + * so the total length of IPv4 header is IHL*4 bytes + * The UDP_0 bit *may* bet set if the *inner* header is UDP + */ + if (ipv4_tunnel) { + skb->transport_header = skb->mac_header + + sizeof(struct ethhdr) + + (ip_hdr(skb)->ihl * 4); + + /* Add 4 bytes for VLAN tagged packets */ + skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) || + skb->protocol == htons(ETH_P_8021AD)) + ? VLAN_HLEN : 0; + + if ((ip_hdr(skb)->protocol == IPPROTO_UDP) && + (udp_hdr(skb)->check != 0)) { + rx_udp_csum = udp_csum(skb); + iph = ip_hdr(skb); + csum = csum_tcpudp_magic(iph->saddr, iph->daddr, + (skb->len - + skb_transport_offset(skb)), + IPPROTO_UDP, rx_udp_csum); + + if (udp_hdr(skb)->check != csum) + goto checksum_fail; + + } /* else its GRE and so no outer UDP header */ + } + + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = ipv4_tunnel || ipv6_tunnel; + + return; + +checksum_fail: + vsi->back->hw_csum_rx_error++; +} + +/** + * i40e_rx_hash - returns the hash value from the Rx descriptor + * @ring: descriptor ring + * @rx_desc: specific descriptor + **/ +static inline u32 i40e_rx_hash(struct i40e_ring *ring, + union i40e_rx_desc *rx_desc) +{ + const __le64 rss_mask = + cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); + + if ((ring->netdev->features & NETIF_F_RXHASH) && + (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) + return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); + else + return 0; +} + +/** + * i40e_ptype_to_hash - get a hash type + * @ptype: the ptype value from the descriptor + * + * Returns a hash type to be used by skb_set_hash + **/ +static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) +{ + struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); + + if (!decoded.known) + return PKT_HASH_TYPE_NONE; + + if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) + return PKT_HASH_TYPE_L4; + else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) + return PKT_HASH_TYPE_L3; + else + return PKT_HASH_TYPE_L2; +} + +/** + * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split + * @rx_ring: rx ring to clean + * @budget: how many cleans we're allowed + * + * Returns true if there's any budget left (e.g. the clean is finished) + **/ +static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; + u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + const int current_node = numa_node_id(); + struct i40e_vsi *vsi = rx_ring->vsi; + u16 i = rx_ring->next_to_clean; + union i40e_rx_desc *rx_desc; + u32 rx_error, rx_status; + u8 rx_ptype; + u64 qword; + + do { + struct i40e_rx_buffer *rx_bi; + struct sk_buff *skb; + u16 vlan_tag; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + i40evf_alloc_rx_buffers_ps(rx_ring, cleaned_count); + cleaned_count = 0; + } + + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + dma_rmb(); + rx_bi = &rx_ring->rx_bi[i]; + skb = rx_bi->skb; + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_hdr_len); + if (!skb) { + rx_ring->rx_stats.alloc_buff_failed++; + break; + } + + /* initialize queue mapping */ + skb_record_rx_queue(skb, rx_ring->queue_index); + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + } + rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> + I40E_RXD_QW1_LENGTH_HBUF_SHIFT; + rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >> + I40E_RXD_QW1_LENGTH_SPH_SHIFT; + + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + prefetch(rx_bi->page); + rx_bi->skb = NULL; + cleaned_count++; + if (rx_hbo || rx_sph) { + int len; + if (rx_hbo) + len = I40E_RX_HDR_SIZE; + else + len = rx_header_len; + memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); + } else if (skb->len == 0) { + int len; + + len = (rx_packet_len > skb_headlen(skb) ? + skb_headlen(skb) : rx_packet_len); + memcpy(__skb_put(skb, len), + rx_bi->page + rx_bi->page_offset, + len); + rx_bi->page_offset += len; + rx_packet_len -= len; + } + + /* Get the rest of the data if this was a header split */ + if (rx_packet_len) { + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + rx_bi->page, + rx_bi->page_offset, + rx_packet_len); + + skb->len += rx_packet_len; + skb->data_len += rx_packet_len; + skb->truesize += rx_packet_len; + + if ((page_count(rx_bi->page) == 1) && + (page_to_nid(rx_bi->page) == current_node)) + get_page(rx_bi->page); + else + rx_bi->page = NULL; + + dma_unmap_page(rx_ring->dev, + rx_bi->page_dma, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + rx_bi->page_dma = 0; + } + I40E_RX_INCREMENT(rx_ring, i); + + if (unlikely( + !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + struct i40e_rx_buffer *next_buffer; + + next_buffer = &rx_ring->rx_bi[i]; + next_buffer->skb = skb; + rx_ring->rx_stats.non_eop_descs++; + continue; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + dev_kfree_skb_any(skb); + /* TODO: shouldn't we increment a counter indicating the + * drop? + */ + continue; + } + + skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), + i40e_ptype_to_hash(rx_ptype)); + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + + i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + + vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) + : 0; +#ifdef I40E_FCOE + if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { + dev_kfree_skb_any(skb); + continue; + } +#endif + skb_mark_napi_id(skb, &rx_ring->q_vector->napi); + i40e_receive_skb(rx_ring, skb, vlan_tag); + + rx_ring->netdev->last_rx = jiffies; + rx_desc->wb.qword1.status_error_len = 0; + + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer + * @rx_ring: rx ring to clean + * @budget: how many cleans we're allowed + * + * Returns number of packets cleaned + **/ +static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + struct i40e_vsi *vsi = rx_ring->vsi; + union i40e_rx_desc *rx_desc; + u32 rx_error, rx_status; + u16 rx_packet_len; + u8 rx_ptype; + u64 qword; + u16 i; + + do { + struct i40e_rx_buffer *rx_bi; + struct sk_buff *skb; + u16 vlan_tag; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + i40evf_alloc_rx_buffers_1buf(rx_ring, cleaned_count); + cleaned_count = 0; + } + + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + dma_rmb(); + + rx_bi = &rx_ring->rx_bi[i]; + skb = rx_bi->skb; + prefetch(skb->data); + + rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + rx_bi->skb = NULL; + cleaned_count++; + + /* Get the header and possibly the whole packet + * If this is an skb from previous receive dma will be 0 + */ + skb_put(skb, rx_packet_len); + dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_bi->dma = 0; + + I40E_RX_INCREMENT(rx_ring, i); + + if (unlikely( + !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + rx_ring->rx_stats.non_eop_descs++; + continue; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + dev_kfree_skb_any(skb); + /* TODO: shouldn't we increment a counter indicating the + * drop? + */ + continue; + } + + skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), + i40e_ptype_to_hash(rx_ptype)); + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + + i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + + vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) + : 0; + i40e_receive_skb(rx_ring, skb, vlan_tag); + + rx_ring->netdev->last_rx = jiffies; + rx_desc->wb.qword1.status_error_len = 0; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean all queues associated with a q_vector. + * + * Returns the amount of work done + **/ +int i40evf_napi_poll(struct napi_struct *napi, int budget) +{ + struct i40e_q_vector *q_vector = + container_of(napi, struct i40e_q_vector, napi); + struct i40e_vsi *vsi = q_vector->vsi; + struct i40e_ring *ring; + bool clean_complete = true; + bool arm_wb = false; + int budget_per_ring; + int cleaned; + + if (test_bit(__I40E_DOWN, &vsi->state)) { + napi_complete(napi); + return 0; + } + + /* Since the actual Tx work is minimal, we can give the Tx a larger + * budget and be more aggressive about cleaning up the Tx descriptors. + */ + i40e_for_each_ring(ring, q_vector->tx) { + clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); + arm_wb |= ring->arm_wb; + } + + /* We attempt to distribute budget to each Rx queue fairly, but don't + * allow the budget to go below 1 because that would exit polling early. + */ + budget_per_ring = max(budget/q_vector->num_ringpairs, 1); + + i40e_for_each_ring(ring, q_vector->rx) { + if (ring_is_ps_enabled(ring)) + cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); + else + cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + /* if we didn't clean as many as budgeted, we must be done */ + clean_complete &= (budget_per_ring != cleaned); + } + + /* If work not completed, return budget and polling will return */ + if (!clean_complete) { + if (arm_wb) + i40e_force_wb(vsi, q_vector); + return budget; + } + + /* Work is done so exit the polling mode and re-enable the interrupt */ + napi_complete(napi); + if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) || + ITR_IS_DYNAMIC(vsi->tx_itr_setting)) + i40e_update_dynamic_itr(q_vector); + + if (!test_bit(__I40E_DOWN, &vsi->state)) + i40evf_irq_enable_queues(vsi->back, 1 << q_vector->v_idx); + + return 0; +} + +/** + * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW + * @skb: send buffer + * @tx_ring: ring to send buffer on + * @flags: the tx flags to be set + * + * Checks the skb and set up correspondingly several generic transmit flags + * related to VLAN tagging for the HW, such as VLAN, DCB, etc. + * + * Returns error code indicate the frame should be dropped upon error and the + * otherwise returns 0 to indicate the flags has been set properly. + **/ +static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, + struct i40e_ring *tx_ring, + u32 *flags) +{ + __be16 protocol = skb->protocol; + u32 tx_flags = 0; + + if (protocol == htons(ETH_P_8021Q) && + !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { + /* When HW VLAN acceleration is turned off by the user the + * stack sets the protocol to 8021q so that the driver + * can take any steps required to support the SW only + * VLAN handling. In our case the driver doesn't need + * to take any further steps so just set the protocol + * to the encapsulated ethertype. + */ + skb->protocol = vlan_get_protocol(skb); + goto out; + } + + /* if we have a HW VLAN tag being added, default to the HW one */ + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; + tx_flags |= I40E_TX_FLAGS_HW_VLAN; + /* else if it is a SW VLAN, check the next protocol and store the tag */ + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + return -EINVAL; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT; + tx_flags |= I40E_TX_FLAGS_SW_VLAN; + } + +out: + *flags = tx_flags; + return 0; +} + +/** + * i40e_tso - set up the tso context descriptor + * @tx_ring: ptr to the ring to send + * @skb: ptr to the skb we're sending + * @tx_flags: the collected send information + * @protocol: the send protocol + * @hdr_len: ptr to the size of the packet header + * @cd_tunneling: ptr to context descriptor bits + * + * Returns 0 if no TSO can happen, 1 if tso is going, or error + **/ +static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, + u32 tx_flags, __be16 protocol, u8 *hdr_len, + u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) +{ + u32 cd_cmd, cd_tso_len, cd_mss; + struct ipv6hdr *ipv6h; + struct tcphdr *tcph; + struct iphdr *iph; + u32 l4len; + int err; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); + ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + + if (iph->version == 4) { + tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + 0, IPPROTO_TCP, 0); + } else if (ipv6h->version == 6) { + tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); + ipv6h->payload_len = 0; + tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, + 0, IPPROTO_TCP, 0); + } + + l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); + *hdr_len = (skb->encapsulation + ? (skb_inner_transport_header(skb) - skb->data) + : skb_transport_offset(skb)) + l4len; + + /* find the field values */ + cd_cmd = I40E_TX_CTX_DESC_TSO; + cd_tso_len = skb->len - *hdr_len; + cd_mss = skb_shinfo(skb)->gso_size; + *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | + ((u64)cd_tso_len << + I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | + ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); + return 1; +} + +/** + * i40e_tx_enable_csum - Enable Tx checksum offloads + * @skb: send buffer + * @tx_flags: Tx flags currently set + * @td_cmd: Tx descriptor command bits to set + * @td_offset: Tx descriptor header offsets to set + * @cd_tunneling: ptr to context desc bits + **/ +static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, + u32 *td_cmd, u32 *td_offset, + struct i40e_ring *tx_ring, + u32 *cd_tunneling) +{ + struct ipv6hdr *this_ipv6_hdr; + unsigned int this_tcp_hdrlen; + struct iphdr *this_ip_hdr; + u32 network_hdr_len; + u8 l4_hdr = 0; + u32 l4_tunnel = 0; + + if (skb->encapsulation) { + switch (ip_hdr(skb)->protocol) { + case IPPROTO_UDP: + l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + break; + default: + return; + } + network_hdr_len = skb_inner_network_header_len(skb); + this_ip_hdr = inner_ip_hdr(skb); + this_ipv6_hdr = inner_ipv6_hdr(skb); + this_tcp_hdrlen = inner_tcp_hdrlen(skb); + + if (tx_flags & I40E_TX_FLAGS_IPV4) { + + if (tx_flags & I40E_TX_FLAGS_TSO) { + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; + ip_hdr(skb)->check = 0; + } else { + *cd_tunneling |= + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + } + } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + if (tx_flags & I40E_TX_FLAGS_TSO) + ip_hdr(skb)->check = 0; + } + + /* Now set the ctx descriptor fields */ + *cd_tunneling |= (skb_network_header_len(skb) >> 2) << + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | + l4_tunnel | + ((skb_inner_network_offset(skb) - + skb_transport_offset(skb)) >> 1) << + I40E_TXD_CTX_QW0_NATLEN_SHIFT; + if (this_ip_hdr->version == 6) { + tx_flags &= ~I40E_TX_FLAGS_IPV4; + tx_flags |= I40E_TX_FLAGS_IPV6; + } + + + } else { + network_hdr_len = skb_network_header_len(skb); + this_ip_hdr = ip_hdr(skb); + this_ipv6_hdr = ipv6_hdr(skb); + this_tcp_hdrlen = tcp_hdrlen(skb); + } + + /* Enable IP checksum offloads */ + if (tx_flags & I40E_TX_FLAGS_IPV4) { + l4_hdr = this_ip_hdr->protocol; + /* the stack computes the IP header already, the only time we + * need the hardware to recompute it is in the case of TSO. + */ + if (tx_flags & I40E_TX_FLAGS_TSO) { + *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; + this_ip_hdr->check = 0; + } else { + *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; + } + /* Now set the td_offset for IP header length */ + *td_offset = (network_hdr_len >> 2) << + I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + l4_hdr = this_ipv6_hdr->nexthdr; + *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; + /* Now set the td_offset for IP header length */ + *td_offset = (network_hdr_len >> 2) << + I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + } + /* words in MACLEN + dwords in IPLEN + dwords in L4Len */ + *td_offset |= (skb_network_offset(skb) >> 1) << + I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + + /* Enable L4 checksum offloads */ + switch (l4_hdr) { + case IPPROTO_TCP: + /* enable checksum offloads */ + *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; + *td_offset |= (this_tcp_hdrlen >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + case IPPROTO_SCTP: + /* enable SCTP checksum offload */ + *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; + *td_offset |= (sizeof(struct sctphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + case IPPROTO_UDP: + /* enable UDP checksum offload */ + *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; + *td_offset |= (sizeof(struct udphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + default: + break; + } +} + +/** + * i40e_create_tx_ctx Build the Tx context descriptor + * @tx_ring: ring to create the descriptor on + * @cd_type_cmd_tso_mss: Quad Word 1 + * @cd_tunneling: Quad Word 0 - bits 0-31 + * @cd_l2tag2: Quad Word 0 - bits 32-63 + **/ +static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, + const u64 cd_type_cmd_tso_mss, + const u32 cd_tunneling, const u32 cd_l2tag2) +{ + struct i40e_tx_context_desc *context_desc; + int i = tx_ring->next_to_use; + + if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) && + !cd_tunneling && !cd_l2tag2) + return; + + /* grab the next descriptor */ + context_desc = I40E_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* cpu_to_le32 and assign to struct fields */ + context_desc->tunneling_params = cpu_to_le32(cd_tunneling); + context_desc->l2tag2 = cpu_to_le16(cd_l2tag2); + context_desc->rsvd = cpu_to_le16(0); + context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); +} + + /** + * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * @skb: send buffer + * @tx_flags: collected send information + * + * Note: Our HW can't scatter-gather more than 8 fragments to build + * a packet on the wire and so we need to figure out the cases where we + * need to linearize the skb. + **/ +static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) +{ + struct skb_frag_struct *frag; + bool linearize = false; + unsigned int size = 0; + u16 num_frags; + u16 gso_segs; + + num_frags = skb_shinfo(skb)->nr_frags; + gso_segs = skb_shinfo(skb)->gso_segs; + + if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { + u16 j = 0; + + if (num_frags < (I40E_MAX_BUFFER_TXD)) + goto linearize_chk_done; + /* try the simple math, if we have too many frags per segment */ + if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > + I40E_MAX_BUFFER_TXD) { + linearize = true; + goto linearize_chk_done; + } + frag = &skb_shinfo(skb)->frags[0]; + /* we might still have more fragments per segment */ + do { + size += skb_frag_size(frag); + frag++; j++; + if ((size >= skb_shinfo(skb)->gso_size) && + (j < I40E_MAX_BUFFER_TXD)) { + size = (size % skb_shinfo(skb)->gso_size); + j = (size) ? 1 : 0; + } + if (j == I40E_MAX_BUFFER_TXD) { + linearize = true; + break; + } + num_frags--; + } while (num_frags); + } else { + if (num_frags >= I40E_MAX_BUFFER_TXD) + linearize = true; + } + +linearize_chk_done: + return linearize; +} + +/** + * i40e_tx_map - Build the Tx descriptor + * @tx_ring: ring to send buffer on + * @skb: send buffer + * @first: first buffer info buffer to use + * @tx_flags: collected send information + * @hdr_len: size of the packet header + * @td_cmd: the command field in the descriptor + * @td_offset: offset for checksum or crc + **/ +static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, + struct i40e_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) +{ + unsigned int data_len = skb->data_len; + unsigned int size = skb_headlen(skb); + struct skb_frag_struct *frag; + struct i40e_tx_buffer *tx_bi; + struct i40e_tx_desc *tx_desc; + u16 i = tx_ring->next_to_use; + u32 td_tag = 0; + dma_addr_t dma; + u16 gso_segs; + + if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { + td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; + td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> + I40E_TX_FLAGS_VLAN_SHIFT; + } + + if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) + gso_segs = skb_shinfo(skb)->gso_segs; + else + gso_segs = 1; + + /* multiply data chunks by size of headers */ + first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len); + first->gso_segs = gso_segs; + first->skb = skb; + first->tx_flags = tx_flags; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_desc = I40E_TX_DESC(tx_ring, i); + tx_bi = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_bi, len, size); + dma_unmap_addr_set(tx_bi, dma, dma); + + tx_desc->buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, + I40E_MAX_DATA_PER_TXD, td_tag); + + tx_desc++; + i++; + if (i == tx_ring->count) { + tx_desc = I40E_TX_DESC(tx_ring, 0); + i = 0; + } + + dma += I40E_MAX_DATA_PER_TXD; + size -= I40E_MAX_DATA_PER_TXD; + + tx_desc->buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, + size, td_tag); + + tx_desc++; + i++; + if (i == tx_ring->count) { + tx_desc = I40E_TX_DESC(tx_ring, 0); + i = 0; + } + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_bi = &tx_ring->tx_bi[i]; + } + + /* Place RS bit on last descriptor of any packet that spans across the + * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. + */ +#define WB_STRIDE 0x3 + if (((i & WB_STRIDE) != WB_STRIDE) && + (first <= &tx_ring->tx_bi[i]) && + (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP << + I40E_TXD_QW1_CMD_SHIFT); + } else { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)I40E_TXD_CMD << + I40E_TXD_QW1_CMD_SHIFT); + } + + netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index), + first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + /* notify HW of packet */ + writel(i, tx_ring->tail); + + return; + +dma_error: + dev_info(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_bi map */ + for (;;) { + tx_bi = &tx_ring->tx_bi[i]; + i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); + if (tx_bi == first) + break; + if (i == 0) + i = tx_ring->count; + i--; + } + + tx_ring->next_to_use = i; +} + +/** + * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns -EBUSY if a stop is needed, else 0 + **/ +static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ + smp_mb(); + + /* Check again in a case another CPU has just made room available. */ + if (likely(I40E_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +/** + * i40e_maybe_stop_tx - 1st level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns 0 if stop is not needed + **/ +static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __i40e_maybe_stop_tx(tx_ring, size); +} + +/** + * i40e_xmit_descriptor_count - calculate number of tx descriptors needed + * @skb: send buffer + * @tx_ring: ring to send buffer on + * + * Returns number of data descriptors needed for this skb. Returns 0 to indicate + * there is not enough descriptors available in this ring since we need at least + * one descriptor. + **/ +static int i40e_xmit_descriptor_count(struct sk_buff *skb, + struct i40e_ring *tx_ring) +{ + unsigned int f; + int count = 0; + + /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, + * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, + * + 4 desc gap to avoid the cache line where head is, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + + count += TXD_USE_COUNT(skb_headlen(skb)); + if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { + tx_ring->tx_stats.tx_busy++; + return 0; + } + return count; +} + +/** + * i40e_xmit_frame_ring - Sends buffer on Tx ring + * @skb: send buffer + * @tx_ring: ring to send buffer on + * + * Returns NETDEV_TX_OK if sent, else an error code + **/ +static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, + struct i40e_ring *tx_ring) +{ + u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT; + u32 cd_tunneling = 0, cd_l2tag2 = 0; + struct i40e_tx_buffer *first; + u32 td_offset = 0; + u32 tx_flags = 0; + __be16 protocol; + u32 td_cmd = 0; + u8 hdr_len = 0; + int tso; + if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) + return NETDEV_TX_BUSY; + + /* prepare the xmit flags */ + if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) + goto out_drop; + + /* obtain protocol of skb */ + protocol = vlan_get_protocol(skb); + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_bi[tx_ring->next_to_use]; + + /* setup IPv4/IPv6 offloads */ + if (protocol == htons(ETH_P_IP)) + tx_flags |= I40E_TX_FLAGS_IPV4; + else if (protocol == htons(ETH_P_IPV6)) + tx_flags |= I40E_TX_FLAGS_IPV6; + + tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, + &cd_type_cmd_tso_mss, &cd_tunneling); + + if (tso < 0) + goto out_drop; + else if (tso) + tx_flags |= I40E_TX_FLAGS_TSO; + + if (i40e_chk_linearize(skb, tx_flags)) + if (skb_linearize(skb)) + goto out_drop; + + skb_tx_timestamp(skb); + + /* always enable CRC insertion offload */ + td_cmd |= I40E_TX_DESC_CMD_ICRC; + + /* Always offload the checksum, since it's in the data descriptor */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + tx_flags |= I40E_TX_FLAGS_CSUM; + + i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, + tx_ring, &cd_tunneling); + } + + i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, + cd_tunneling, cd_l2tag2); + + i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, + td_cmd, td_offset); + + i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/** + * i40evf_xmit_frame - Selects the correct VSI and Tx queue to send buffer + * @skb: send buffer + * @netdev: network interface device structure + * + * Returns NETDEV_TX_OK if sent, else an error code + **/ +netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40e_ring *tx_ring = adapter->tx_rings[skb->queue_mapping]; + + /* hardware can't handle really short frames, hardware padding works + * beyond this point + */ + if (unlikely(skb->len < I40E_MIN_TX_LEN)) { + if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len)) + return NETDEV_TX_OK; + skb->len = I40E_MIN_TX_LEN; + skb_set_tail_pointer(skb, I40E_MIN_TX_LEN); + } + + return i40e_xmit_frame_ring(skb, tx_ring); +} diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h new file mode 100644 index 000000000..1e49bb1fb --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -0,0 +1,301 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_TXRX_H_ +#define _I40E_TXRX_H_ + +/* Interrupt Throttling and Rate Limiting Goodies */ + +#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ +#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ +#define I40E_ITR_100K 0x0005 +#define I40E_ITR_20K 0x0019 +#define I40E_ITR_8K 0x003E +#define I40E_ITR_4K 0x007A +#define I40E_ITR_RX_DEF I40E_ITR_8K +#define I40E_ITR_TX_DEF I40E_ITR_4K +#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ +#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ +#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ +#define I40E_DEFAULT_IRQ_WORK 256 +#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1) +#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC)) +#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1) + +#define I40E_QUEUE_END_OF_LIST 0x7FF + +/* this enum matches hardware bits and is meant to be used by DYN_CTLN + * registers and QINT registers or more generally anywhere in the manual + * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any + * register but instead is a special value meaning "don't update" ITR0/1/2. + */ +enum i40e_dyn_idx_t { + I40E_IDX_ITR0 = 0, + I40E_IDX_ITR1 = 1, + I40E_IDX_ITR2 = 2, + I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ +}; + +/* these are indexes into ITRN registers */ +#define I40E_RX_ITR I40E_IDX_ITR0 +#define I40E_TX_ITR I40E_IDX_ITR1 +#define I40E_PE_ITR I40E_IDX_ITR2 + +/* Supported RSS offloads */ +#define I40E_DEFAULT_RSS_HENA ( \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \ + ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD)) + +/* Supported Rx Buffer Sizes */ +#define I40E_RXBUFFER_512 512 /* Used for packet split */ +#define I40E_RXBUFFER_2048 2048 +#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */ +#define I40E_RXBUFFER_4096 4096 +#define I40E_RXBUFFER_8192 8192 +#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */ + +/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we + * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, + * this adds up to 512 bytes of extra data meaning the smallest allocation + * we could have is 1K. + * i.e. RXBUFFER_512 --> size-1024 slab + */ +#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512 + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define I40E_RX_INCREMENT(r, i) \ + do { \ + (i)++; \ + if ((i) == (r)->count) \ + i = 0; \ + r->next_to_clean = i; \ + } while (0) + +#define I40E_RX_NEXT_DESC(r, i, n) \ + do { \ + (i)++; \ + if ((i) == (r)->count) \ + i = 0; \ + (n) = I40E_RX_DESC((r), (i)); \ + } while (0) + +#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \ + do { \ + I40E_RX_NEXT_DESC((r), (i), (n)); \ + prefetch((n)); \ + } while (0) + +#define i40e_rx_desc i40e_32byte_rx_desc + +#define I40E_MAX_BUFFER_TXD 8 +#define I40E_MIN_TX_LEN 17 +#define I40E_MAX_DATA_PER_TXD 8192 + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#define I40E_MIN_DESC_PENDING 4 + +#define I40E_TX_FLAGS_CSUM (u32)(1) +#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) +#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2) +#define I40E_TX_FLAGS_TSO (u32)(1 << 3) +#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4) +#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5) +#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) +#define I40E_TX_FLAGS_FSO (u32)(1 << 7) +#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) +#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 +#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 +#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 +#define I40E_TX_FLAGS_VLAN_SHIFT 16 + +struct i40e_tx_buffer { + struct i40e_tx_desc *next_to_watch; + unsigned long time_stamp; + union { + struct sk_buff *skb; + void *raw_buf; + }; + unsigned int bytecount; + unsigned short gso_segs; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct i40e_rx_buffer { + struct sk_buff *skb; + void *hdr_buf; + dma_addr_t dma; + struct page *page; + dma_addr_t page_dma; + unsigned int page_offset; +}; + +struct i40e_queue_stats { + u64 packets; + u64 bytes; +}; + +struct i40e_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct i40e_rx_queue_stats { + u64 non_eop_descs; + u64 alloc_page_failed; + u64 alloc_buff_failed; +}; + +enum i40e_ring_state_t { + __I40E_TX_FDIR_INIT_DONE, + __I40E_TX_XPS_INIT_DONE, + __I40E_TX_DETECT_HANG, + __I40E_HANG_CHECK_ARMED, + __I40E_RX_PS_ENABLED, + __I40E_RX_16BYTE_DESC_ENABLED, +}; + +#define ring_is_ps_enabled(ring) \ + test_bit(__I40E_RX_PS_ENABLED, &(ring)->state) +#define set_ring_ps_enabled(ring) \ + set_bit(__I40E_RX_PS_ENABLED, &(ring)->state) +#define clear_ring_ps_enabled(ring) \ + clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state) +#define check_for_tx_hang(ring) \ + test_bit(__I40E_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__I40E_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state) +#define ring_is_16byte_desc_enabled(ring) \ + test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) +#define set_ring_16byte_desc_enabled(ring) \ + set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) +#define clear_ring_16byte_desc_enabled(ring) \ + clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) + +/* struct that defines a descriptor ring, associated with a VSI */ +struct i40e_ring { + struct i40e_ring *next; /* pointer to next ring in q_vector */ + void *desc; /* Descriptor ring memory */ + struct device *dev; /* Used for DMA mapping */ + struct net_device *netdev; /* netdev ring maps to */ + union { + struct i40e_tx_buffer *tx_bi; + struct i40e_rx_buffer *rx_bi; + }; + unsigned long state; + u16 queue_index; /* Queue number of ring */ + u8 dcb_tc; /* Traffic class of ring */ + u8 __iomem *tail; + + u16 count; /* Number of descriptors */ + u16 reg_idx; /* HW register index of the ring */ + u16 rx_hdr_len; + u16 rx_buf_len; + u8 dtype; +#define I40E_RX_DTYPE_NO_SPLIT 0 +#define I40E_RX_DTYPE_HEADER_SPLIT 1 +#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 + u8 hsplit; +#define I40E_RX_SPLIT_L2 0x1 +#define I40E_RX_SPLIT_IP 0x2 +#define I40E_RX_SPLIT_TCP_UDP 0x4 +#define I40E_RX_SPLIT_SCTP 0x8 + + /* used in interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + + u8 atr_sample_rate; + u8 atr_count; + + bool ring_active; /* is ring online or not */ + bool arm_wb; /* do something to arm write back */ + + /* stats structs */ + struct i40e_queue_stats stats; + struct u64_stats_sync syncp; + union { + struct i40e_tx_queue_stats tx_stats; + struct i40e_rx_queue_stats rx_stats; + }; + + unsigned int size; /* length of descriptor ring in bytes */ + dma_addr_t dma; /* physical address of ring */ + + struct i40e_vsi *vsi; /* Backreference to associated VSI */ + struct i40e_q_vector *q_vector; /* Backreference to associated vector */ + + struct rcu_head rcu; /* to avoid race on free */ +} ____cacheline_internodealigned_in_smp; + +enum i40e_latency_range { + I40E_LOWEST_LATENCY = 0, + I40E_LOW_LATENCY = 1, + I40E_BULK_LATENCY = 2, +}; + +struct i40e_ring_container { + /* array of pointers to rings */ + struct i40e_ring *ring; + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 count; + enum i40e_latency_range latency_range; + u16 itr; +}; + +/* iterator for handling rings in ring container */ +#define i40e_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + +void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); +void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); +void i40evf_alloc_rx_headers(struct i40e_ring *rxr); +netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); +void i40evf_clean_rx_ring(struct i40e_ring *rx_ring); +int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring); +int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring); +void i40evf_free_tx_resources(struct i40e_ring *tx_ring); +void i40evf_free_rx_resources(struct i40e_ring *rx_ring); +int i40evf_napi_poll(struct napi_struct *napi, int budget); +#endif /* _I40E_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h new file mode 100644 index 000000000..ec9d83a93 --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -0,0 +1,1250 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_TYPE_H_ +#define _I40E_TYPE_H_ + +#include "i40e_status.h" +#include "i40e_osdep.h" +#include "i40e_register.h" +#include "i40e_adminq.h" +#include "i40e_hmc.h" +#include "i40e_lan_hmc.h" + +/* Device IDs */ +#define I40E_DEV_ID_SFP_XL710 0x1572 +#define I40E_DEV_ID_QEMU 0x1574 +#define I40E_DEV_ID_KX_A 0x157F +#define I40E_DEV_ID_KX_B 0x1580 +#define I40E_DEV_ID_KX_C 0x1581 +#define I40E_DEV_ID_QSFP_A 0x1583 +#define I40E_DEV_ID_QSFP_B 0x1584 +#define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_10G_BASE_T 0x1586 +#define I40E_DEV_ID_20G_KR2 0x1587 +#define I40E_DEV_ID_VF 0x154C +#define I40E_DEV_ID_VF_HV 0x1571 + +#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ + (d) == I40E_DEV_ID_QSFP_B || \ + (d) == I40E_DEV_ID_QSFP_C) + +/* I40E_MASK is a macro used on 32 bit registers */ +#define I40E_MASK(mask, shift) (mask << shift) + +#define I40E_MAX_VSI_QP 16 +#define I40E_MAX_VF_VSI 3 +#define I40E_MAX_CHAINED_RX_BUFFERS 5 +#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16 + +/* Max default timeout in ms, */ +#define I40E_MAX_NVM_TIMEOUT 18000 + +/* Switch from ms to the 1usec global time (this is the GTIME resolution) */ +#define I40E_MS_TO_GTIME(time) ((time) * 1000) + +/* forward declaration */ +struct i40e_hw; +typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); + +/* Data type manipulation macros. */ + +#define I40E_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +/* bitfields for Tx queue mapping in QTX_CTL */ +#define I40E_QTX_CTL_VF_QUEUE 0x0 +#define I40E_QTX_CTL_VM_QUEUE 0x1 +#define I40E_QTX_CTL_PF_QUEUE 0x2 + +/* debug masks - set these bits in hw->debug_mask to control output */ +enum i40e_debug_mask { + I40E_DEBUG_INIT = 0x00000001, + I40E_DEBUG_RELEASE = 0x00000002, + + I40E_DEBUG_LINK = 0x00000010, + I40E_DEBUG_PHY = 0x00000020, + I40E_DEBUG_HMC = 0x00000040, + I40E_DEBUG_NVM = 0x00000080, + I40E_DEBUG_LAN = 0x00000100, + I40E_DEBUG_FLOW = 0x00000200, + I40E_DEBUG_DCB = 0x00000400, + I40E_DEBUG_DIAG = 0x00000800, + I40E_DEBUG_FD = 0x00001000, + + I40E_DEBUG_AQ_MESSAGE = 0x01000000, + I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, + I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, + I40E_DEBUG_AQ_COMMAND = 0x06000000, + I40E_DEBUG_AQ = 0x0F000000, + + I40E_DEBUG_USER = 0xF0000000, + + I40E_DEBUG_ALL = 0xFFFFFFFF +}; + +/* These are structs for managing the hardware information and the operations. + * The structures of function pointers are filled out at init time when we + * know for sure exactly which hardware we're working with. This gives us the + * flexibility of using the same main driver code but adapting to slightly + * different hardware needs as new parts are developed. For this architecture, + * the Firmware and AdminQ are intended to insulate the driver from most of the + * future changes, but these structures will also do part of the job. + */ +enum i40e_mac_type { + I40E_MAC_UNKNOWN = 0, + I40E_MAC_X710, + I40E_MAC_XL710, + I40E_MAC_VF, + I40E_MAC_GENERIC, +}; + +enum i40e_media_type { + I40E_MEDIA_TYPE_UNKNOWN = 0, + I40E_MEDIA_TYPE_FIBER, + I40E_MEDIA_TYPE_BASET, + I40E_MEDIA_TYPE_BACKPLANE, + I40E_MEDIA_TYPE_CX4, + I40E_MEDIA_TYPE_DA, + I40E_MEDIA_TYPE_VIRTUAL +}; + +enum i40e_fc_mode { + I40E_FC_NONE = 0, + I40E_FC_RX_PAUSE, + I40E_FC_TX_PAUSE, + I40E_FC_FULL, + I40E_FC_PFC, + I40E_FC_DEFAULT +}; + +enum i40e_set_fc_aq_failures { + I40E_SET_FC_AQ_FAIL_NONE = 0, + I40E_SET_FC_AQ_FAIL_GET = 1, + I40E_SET_FC_AQ_FAIL_SET = 2, + I40E_SET_FC_AQ_FAIL_UPDATE = 4, + I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6 +}; + +enum i40e_vsi_type { + I40E_VSI_MAIN = 0, + I40E_VSI_VMDQ1, + I40E_VSI_VMDQ2, + I40E_VSI_CTRL, + I40E_VSI_FCOE, + I40E_VSI_MIRROR, + I40E_VSI_SRIOV, + I40E_VSI_FDIR, + I40E_VSI_TYPE_UNKNOWN +}; + +enum i40e_queue_type { + I40E_QUEUE_TYPE_RX = 0, + I40E_QUEUE_TYPE_TX, + I40E_QUEUE_TYPE_PE_CEQ, + I40E_QUEUE_TYPE_UNKNOWN +}; + +struct i40e_link_status { + enum i40e_aq_phy_type phy_type; + enum i40e_aq_link_speed link_speed; + u8 link_info; + u8 an_info; + u8 ext_info; + u8 loopback; + /* is Link Status Event notification to SW enabled */ + bool lse_enable; + u16 max_frame_size; + bool crc_enable; + u8 pacing; + u8 requested_speeds; +}; + +struct i40e_phy_info { + struct i40e_link_status link_info; + struct i40e_link_status link_info_old; + u32 autoneg_advertised; + u32 phy_id; + u32 module_type; + bool get_link_info; + enum i40e_media_type media_type; +}; + +#define I40E_HW_CAP_MAX_GPIO 30 +/* Capabilities of a PF or a VF or the whole device */ +struct i40e_hw_capabilities { + u32 switch_mode; +#define I40E_NVM_IMAGE_TYPE_EVB 0x0 +#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2 +#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3 + + u32 management_mode; + u32 npar_enable; + u32 os2bmc; + u32 valid_functions; + bool sr_iov_1_1; + bool vmdq; + bool evb_802_1_qbg; /* Edge Virtual Bridging */ + bool evb_802_1_qbh; /* Bridge Port Extension */ + bool dcb; + bool fcoe; + bool iscsi; /* Indicates iSCSI enabled */ + bool mfp_mode_1; + bool mgmt_cem; + bool ieee_1588; + bool iwarp; + bool fd; + u32 fd_filters_guaranteed; + u32 fd_filters_best_effort; + bool rss; + u32 rss_table_size; + u32 rss_table_entry_width; + bool led[I40E_HW_CAP_MAX_GPIO]; + bool sdp[I40E_HW_CAP_MAX_GPIO]; + u32 nvm_image_type; + u32 num_flow_director_filters; + u32 num_vfs; + u32 vf_base_id; + u32 num_vsis; + u32 num_rx_qp; + u32 num_tx_qp; + u32 base_queue; + u32 num_msix_vectors; + u32 num_msix_vectors_vf; + u32 led_pin_num; + u32 sdp_pin_num; + u32 mdio_port_num; + u32 mdio_port_mode; + u8 rx_buf_chain_len; + u32 enabled_tcmap; + u32 maxtc; + u64 wr_csr_prot; +}; + +struct i40e_mac_info { + enum i40e_mac_type type; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u8 san_addr[ETH_ALEN]; + u16 max_fcoeq; +}; + +enum i40e_aq_resources_ids { + I40E_NVM_RESOURCE_ID = 1 +}; + +enum i40e_aq_resource_access_type { + I40E_RESOURCE_READ = 1, + I40E_RESOURCE_WRITE +}; + +struct i40e_nvm_info { + u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */ + u32 timeout; /* [ms] */ + u16 sr_size; /* Shadow RAM size in words */ + bool blank_nvm_mode; /* is NVM empty (no FW present)*/ + u16 version; /* NVM package version */ + u32 eetrack; /* NVM data version */ +}; + +/* definitions used in NVM update support */ + +enum i40e_nvmupd_cmd { + I40E_NVMUPD_INVALID, + I40E_NVMUPD_READ_CON, + I40E_NVMUPD_READ_SNT, + I40E_NVMUPD_READ_LCB, + I40E_NVMUPD_READ_SA, + I40E_NVMUPD_WRITE_ERA, + I40E_NVMUPD_WRITE_CON, + I40E_NVMUPD_WRITE_SNT, + I40E_NVMUPD_WRITE_LCB, + I40E_NVMUPD_WRITE_SA, + I40E_NVMUPD_CSUM_CON, + I40E_NVMUPD_CSUM_SA, + I40E_NVMUPD_CSUM_LCB, +}; + +enum i40e_nvmupd_state { + I40E_NVMUPD_STATE_INIT, + I40E_NVMUPD_STATE_READING, + I40E_NVMUPD_STATE_WRITING +}; + +/* nvm_access definition and its masks/shifts need to be accessible to + * application, core driver, and shared code. Where is the right file? + */ +#define I40E_NVM_READ 0xB +#define I40E_NVM_WRITE 0xC + +#define I40E_NVM_MOD_PNT_MASK 0xFF + +#define I40E_NVM_TRANS_SHIFT 8 +#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) +#define I40E_NVM_CON 0x0 +#define I40E_NVM_SNT 0x1 +#define I40E_NVM_LCB 0x2 +#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) +#define I40E_NVM_ERA 0x4 +#define I40E_NVM_CSUM 0x8 + +#define I40E_NVM_ADAPT_SHIFT 16 +#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) + +#define I40E_NVMUPD_MAX_DATA 4096 +#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */ + +struct i40e_nvm_access { + u32 command; + u32 config; + u32 offset; /* in bytes */ + u32 data_size; /* in bytes */ + u8 data[1]; +}; + +/* PCI bus types */ +enum i40e_bus_type { + i40e_bus_type_unknown = 0, + i40e_bus_type_pci, + i40e_bus_type_pcix, + i40e_bus_type_pci_express, + i40e_bus_type_reserved +}; + +/* PCI bus speeds */ +enum i40e_bus_speed { + i40e_bus_speed_unknown = 0, + i40e_bus_speed_33 = 33, + i40e_bus_speed_66 = 66, + i40e_bus_speed_100 = 100, + i40e_bus_speed_120 = 120, + i40e_bus_speed_133 = 133, + i40e_bus_speed_2500 = 2500, + i40e_bus_speed_5000 = 5000, + i40e_bus_speed_8000 = 8000, + i40e_bus_speed_reserved +}; + +/* PCI bus widths */ +enum i40e_bus_width { + i40e_bus_width_unknown = 0, + i40e_bus_width_pcie_x1 = 1, + i40e_bus_width_pcie_x2 = 2, + i40e_bus_width_pcie_x4 = 4, + i40e_bus_width_pcie_x8 = 8, + i40e_bus_width_32 = 32, + i40e_bus_width_64 = 64, + i40e_bus_width_reserved +}; + +/* Bus parameters */ +struct i40e_bus_info { + enum i40e_bus_speed speed; + enum i40e_bus_width width; + enum i40e_bus_type type; + + u16 func; + u16 device; + u16 lan_id; +}; + +/* Flow control (FC) parameters */ +struct i40e_fc_info { + enum i40e_fc_mode current_mode; /* FC mode in effect */ + enum i40e_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +#define I40E_MAX_TRAFFIC_CLASS 8 +#define I40E_MAX_USER_PRIORITY 8 +#define I40E_DCBX_MAX_APPS 32 +#define I40E_LLDPDU_SIZE 1500 + +/* IEEE 802.1Qaz ETS Configuration data */ +struct i40e_ieee_ets_config { + u8 willing; + u8 cbs; + u8 maxtcs; + u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; + u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; + u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; +}; + +/* IEEE 802.1Qaz ETS Recommendation data */ +struct i40e_ieee_ets_recommend { + u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; + u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; + u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; +}; + +/* IEEE 802.1Qaz PFC Configuration data */ +struct i40e_ieee_pfc_config { + u8 willing; + u8 mbc; + u8 pfccap; + u8 pfcenable; +}; + +/* IEEE 802.1Qaz Application Priority data */ +struct i40e_ieee_app_priority_table { + u8 priority; + u8 selector; + u16 protocolid; +}; + +struct i40e_dcbx_config { + u32 numapps; + struct i40e_ieee_ets_config etscfg; + struct i40e_ieee_ets_recommend etsrec; + struct i40e_ieee_pfc_config pfc; + struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS]; +}; + +/* Port hardware description */ +struct i40e_hw { + u8 __iomem *hw_addr; + void *back; + + /* subsystem structs */ + struct i40e_phy_info phy; + struct i40e_mac_info mac; + struct i40e_bus_info bus; + struct i40e_nvm_info nvm; + struct i40e_fc_info fc; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 port; + bool adapter_stopped; + + /* capabilities for entire device and PCI func */ + struct i40e_hw_capabilities dev_caps; + struct i40e_hw_capabilities func_caps; + + /* Flow Director shared filter space */ + u16 fdir_shared_filter_count; + + /* device profile info */ + u8 pf_id; + u16 main_vsi_seid; + + /* for multi-function MACs */ + u16 partition_id; + u16 num_partitions; + u16 num_ports; + + /* Closest numa node to the device */ + u16 numa_node; + + /* Admin Queue info */ + struct i40e_adminq_info aq; + + /* state of nvm update process */ + enum i40e_nvmupd_state nvmupd_state; + + /* HMC info */ + struct i40e_hmc_info hmc; /* HMC info struct */ + + /* LLDP/DCBX Status */ + u16 dcbx_status; + + /* DCBX info */ + struct i40e_dcbx_config local_dcbx_config; + struct i40e_dcbx_config remote_dcbx_config; + + /* debug mask */ + u32 debug_mask; +}; + +static inline bool i40e_is_vf(struct i40e_hw *hw) +{ + return hw->mac.type == I40E_MAC_VF; +} + +struct i40e_driver_version { + u8 major_version; + u8 minor_version; + u8 build_version; + u8 subbuild_version; + u8 driver_string[32]; +}; + +/* RX Descriptors */ +union i40e_16byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + union { + __le16 mirroring_status; + __le16 fcoe_ctx_id; + } mirr_fcoe; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fd_id; /* Flow director filter id */ + __le32 fcoe_param; /* FCoE DDP Context id */ + } hi_dword; + } qword0; + struct { + /* ext status/error/pktype/length */ + __le64 status_error_len; + } qword1; + } wb; /* writeback */ +}; + +union i40e_32byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + /* bit 0 of hdr_buffer_addr is DD bit */ + __le64 rsvd1; + __le64 rsvd2; + } read; + struct { + struct { + struct { + union { + __le16 mirroring_status; + __le16 fcoe_ctx_id; + } mirr_fcoe; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fcoe_param; /* FCoE DDP Context id */ + /* Flow director filter id in case of + * Programming status desc WB + */ + __le32 fd_id; + } hi_dword; + } qword0; + struct { + /* status/error/pktype/length */ + __le64 status_error_len; + } qword1; + struct { + __le16 ext_status; /* extended status */ + __le16 rsvd; + __le16 l2tag2_1; + __le16 l2tag2_2; + } qword2; + struct { + union { + __le32 flex_bytes_lo; + __le32 pe_status; + } lo_dword; + union { + __le32 flex_bytes_hi; + __le32 fd_id; + } hi_dword; + } qword3; + } wb; /* writeback */ +}; + +enum i40e_rx_desc_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_STATUS_DD_SHIFT = 0, + I40E_RX_DESC_STATUS_EOF_SHIFT = 1, + I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2, + I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3, + I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, + I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ + I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, + I40E_RX_DESC_STATUS_PIF_SHIFT = 8, + I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ + I40E_RX_DESC_STATUS_FLM_SHIFT = 11, + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ + I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, + I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, + I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ + I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18, + I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ +}; + +#define I40E_RXD_QW1_STATUS_SHIFT 0 +#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \ + << I40E_RXD_QW1_STATUS_SHIFT) + +#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT +#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ + I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) + +#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT +#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \ + I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) + +enum i40e_rx_desc_fltstat_values { + I40E_RX_DESC_FLTSTAT_NO_DATA = 0, + I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ + I40E_RX_DESC_FLTSTAT_RSV = 2, + I40E_RX_DESC_FLTSTAT_RSS_HASH = 3, +}; + +#define I40E_RXD_QW1_ERROR_SHIFT 19 +#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT) + +enum i40e_rx_desc_error_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_ERROR_RXE_SHIFT = 0, + I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1, + I40E_RX_DESC_ERROR_HBO_SHIFT = 2, + I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */ + I40E_RX_DESC_ERROR_IPE_SHIFT = 3, + I40E_RX_DESC_ERROR_L4E_SHIFT = 4, + I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, + I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6, + I40E_RX_DESC_ERROR_PPRS_SHIFT = 7 +}; + +enum i40e_rx_desc_error_l3l4e_fcoe_masks { + I40E_RX_DESC_ERROR_L3L4E_NONE = 0, + I40E_RX_DESC_ERROR_L3L4E_PROT = 1, + I40E_RX_DESC_ERROR_L3L4E_FC = 2, + I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3, + I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4 +}; + +#define I40E_RXD_QW1_PTYPE_SHIFT 30 +#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT) + +/* Packet type non-ip values */ +enum i40e_rx_l2_ptype { + I40E_RX_PTYPE_L2_RESERVED = 0, + I40E_RX_PTYPE_L2_MAC_PAY2 = 1, + I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, + I40E_RX_PTYPE_L2_FIP_PAY2 = 3, + I40E_RX_PTYPE_L2_OUI_PAY2 = 4, + I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, + I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, + I40E_RX_PTYPE_L2_ECP_PAY2 = 7, + I40E_RX_PTYPE_L2_EVB_PAY2 = 8, + I40E_RX_PTYPE_L2_QCN_PAY2 = 9, + I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, + I40E_RX_PTYPE_L2_ARP = 11, + I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, + I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, + I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, + I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, + I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, + I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, + I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, + I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, + I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, + I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, + I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, + I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, + I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, + I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 +}; + +struct i40e_rx_ptype_decoded { + u32 ptype:8; + u32 known:1; + u32 outer_ip:1; + u32 outer_ip_ver:1; + u32 outer_frag:1; + u32 tunnel_type:3; + u32 tunnel_end_prot:2; + u32 tunnel_end_frag:1; + u32 inner_prot:4; + u32 payload_layer:3; +}; + +enum i40e_rx_ptype_outer_ip { + I40E_RX_PTYPE_OUTER_L2 = 0, + I40E_RX_PTYPE_OUTER_IP = 1 +}; + +enum i40e_rx_ptype_outer_ip_ver { + I40E_RX_PTYPE_OUTER_NONE = 0, + I40E_RX_PTYPE_OUTER_IPV4 = 0, + I40E_RX_PTYPE_OUTER_IPV6 = 1 +}; + +enum i40e_rx_ptype_outer_fragmented { + I40E_RX_PTYPE_NOT_FRAG = 0, + I40E_RX_PTYPE_FRAG = 1 +}; + +enum i40e_rx_ptype_tunnel_type { + I40E_RX_PTYPE_TUNNEL_NONE = 0, + I40E_RX_PTYPE_TUNNEL_IP_IP = 1, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, +}; + +enum i40e_rx_ptype_tunnel_end_prot { + I40E_RX_PTYPE_TUNNEL_END_NONE = 0, + I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1, + I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2, +}; + +enum i40e_rx_ptype_inner_prot { + I40E_RX_PTYPE_INNER_PROT_NONE = 0, + I40E_RX_PTYPE_INNER_PROT_UDP = 1, + I40E_RX_PTYPE_INNER_PROT_TCP = 2, + I40E_RX_PTYPE_INNER_PROT_SCTP = 3, + I40E_RX_PTYPE_INNER_PROT_ICMP = 4, + I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5 +}; + +enum i40e_rx_ptype_payload_layer { + I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, +}; + +#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38 +#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ + I40E_RXD_QW1_LENGTH_PBUF_SHIFT) + +#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52 +#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ + I40E_RXD_QW1_LENGTH_HBUF_SHIFT) + +#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 +#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \ + I40E_RXD_QW1_LENGTH_SPH_SHIFT) + +enum i40e_rx_desc_ext_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0, + I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, + I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ + I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ + I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, + I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, + I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, +}; + +enum i40e_rx_desc_pe_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */ + I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */ + I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */ + I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24, + I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25, + I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26, + I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27, + I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28, + I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 +}; + +#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 +#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000 + +#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 +#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \ + I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT) + +#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19 +#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \ + I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT) + +enum i40e_rx_prog_status_desc_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0, + I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */ +}; + +enum i40e_rx_prog_status_desc_prog_id_masks { + I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1, + I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2, + I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4, +}; + +enum i40e_rx_prog_status_desc_error_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, + I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, + I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, + I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 +}; + +/* TX Descriptor */ +struct i40e_tx_desc { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le64 cmd_type_offset_bsz; +}; + +#define I40E_TXD_QW1_DTYPE_SHIFT 0 +#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT) + +enum i40e_tx_desc_dtype_value { + I40E_TX_DESC_DTYPE_DATA = 0x0, + I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */ + I40E_TX_DESC_DTYPE_CONTEXT = 0x1, + I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2, + I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8, + I40E_TX_DESC_DTYPE_DDP_CTX = 0x9, + I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB, + I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC, + I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD, + I40E_TX_DESC_DTYPE_DESC_DONE = 0xF +}; + +#define I40E_TXD_QW1_CMD_SHIFT 4 +#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT) + +enum i40e_tx_desc_cmd_bits { + I40E_TX_DESC_CMD_EOP = 0x0001, + I40E_TX_DESC_CMD_RS = 0x0002, + I40E_TX_DESC_CMD_ICRC = 0x0004, + I40E_TX_DESC_CMD_IL2TAG1 = 0x0008, + I40E_TX_DESC_CMD_DUMMY = 0x0010, + I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ + I40E_TX_DESC_CMD_FCOET = 0x0080, + I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */ +}; + +#define I40E_TXD_QW1_OFFSET_SHIFT 16 +#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ + I40E_TXD_QW1_OFFSET_SHIFT) + +enum i40e_tx_desc_length_fields { + /* Note: These are predefined bit offsets */ + I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */ + I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */ + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */ +}; + +#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34 +#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ + I40E_TXD_QW1_TX_BUF_SZ_SHIFT) + +#define I40E_TXD_QW1_L2TAG1_SHIFT 48 +#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT) + +/* Context descriptors */ +struct i40e_tx_context_desc { + __le32 tunneling_params; + __le16 l2tag2; + __le16 rsvd; + __le64 type_cmd_tso_mss; +}; + +#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0 +#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT) + +#define I40E_TXD_CTX_QW1_CMD_SHIFT 4 +#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT) + +enum i40e_tx_ctx_desc_cmd_bits { + I40E_TX_CTX_DESC_TSO = 0x01, + I40E_TX_CTX_DESC_TSYN = 0x02, + I40E_TX_CTX_DESC_IL2TAG2 = 0x04, + I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, + I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00, + I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10, + I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20, + I40E_TX_CTX_DESC_SWTCH_VSI = 0x30, + I40E_TX_CTX_DESC_SWPE = 0x40 +}; + +#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30 +#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ + I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) + +#define I40E_TXD_CTX_QW1_MSS_SHIFT 50 +#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \ + I40E_TXD_CTX_QW1_MSS_SHIFT) + +#define I40E_TXD_CTX_QW1_VSI_SHIFT 50 +#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT) + +#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0 +#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \ + I40E_TXD_CTX_QW0_EXT_IP_SHIFT) + +enum i40e_tx_ctx_desc_eipt_offload { + I40E_TX_CTX_EXT_IP_NONE = 0x0, + I40E_TX_CTX_EXT_IP_IPV6 = 0x1, + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2, + I40E_TX_CTX_EXT_IP_IPV4 = 0x3 +}; + +#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2 +#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \ + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT) + +#define I40E_TXD_CTX_QW0_NATT_SHIFT 9 +#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) + +#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) +#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) + +#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 +#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \ + I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) + +#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK + +#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12 +#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \ + I40E_TXD_CTX_QW0_NATLEN_SHIFT) + +#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19 +#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ + I40E_TXD_CTX_QW0_DECTTL_SHIFT) + +struct i40e_filter_program_desc { + __le32 qindex_flex_ptype_vsi; + __le32 rsvd; + __le32 dtype_cmd_cntindex; + __le32 fd_id; +}; +#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0 +#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \ + I40E_TXD_FLTR_QW0_QINDEX_SHIFT) +#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11 +#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \ + I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) +#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17 +#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \ + I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) + +/* Packet Classifier Types for filters */ +enum i40e_filter_pctype { + /* Note: Values 0-30 are reserved for future use */ + I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, + /* Note: Value 32 is reserved for future use */ + I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, + I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, + I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, + I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, + /* Note: Values 37-40 are reserved for future use */ + I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, + I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, + I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, + I40E_FILTER_PCTYPE_FRAG_IPV6 = 46, + /* Note: Value 47 is reserved for future use */ + I40E_FILTER_PCTYPE_FCOE_OX = 48, + I40E_FILTER_PCTYPE_FCOE_RX = 49, + I40E_FILTER_PCTYPE_FCOE_OTHER = 50, + /* Note: Values 51-62 are reserved for future use */ + I40E_FILTER_PCTYPE_L2_PAYLOAD = 63, +}; + +enum i40e_filter_program_desc_dest { + I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0, + I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1, + I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2, +}; + +enum i40e_filter_program_desc_fd_status { + I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3, +}; + +#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 +#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) + +#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 +#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) + +#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT) + +enum i40e_filter_program_desc_pcmd { + I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1, + I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2, +}; + +#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) + +#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \ + I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) + +#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \ + I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) + +#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 +#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) + +enum i40e_filter_type { + I40E_FLOW_DIRECTOR_FLTR = 0, + I40E_PE_QUAD_HASH_FLTR = 1, + I40E_ETHERTYPE_FLTR, + I40E_FCOE_CTX_FLTR, + I40E_MAC_VLAN_FLTR, + I40E_HASH_FLTR +}; + +struct i40e_vsi_context { + u16 seid; + u16 uplink_seid; + u16 vsi_number; + u16 vsis_allocated; + u16 vsis_unallocated; + u16 flags; + u8 pf_num; + u8 vf_num; + u8 connection_type; + struct i40e_aqc_vsi_properties_data info; +}; + +struct i40e_veb_context { + u16 seid; + u16 uplink_seid; + u16 veb_number; + u16 vebs_allocated; + u16 vebs_unallocated; + u16 flags; + struct i40e_aqc_get_veb_parameters_completion info; +}; + +/* Statistics collected by each port, VSI, VEB, and S-channel */ +struct i40e_eth_stats { + u64 rx_bytes; /* gorc */ + u64 rx_unicast; /* uprc */ + u64 rx_multicast; /* mprc */ + u64 rx_broadcast; /* bprc */ + u64 rx_discards; /* rdpc */ + u64 rx_unknown_protocol; /* rupp */ + u64 tx_bytes; /* gotc */ + u64 tx_unicast; /* uptc */ + u64 tx_multicast; /* mptc */ + u64 tx_broadcast; /* bptc */ + u64 tx_discards; /* tdpc */ + u64 tx_errors; /* tepc */ +}; + +/* Statistics collected by the MAC */ +struct i40e_hw_port_stats { + /* eth stats collected by the port */ + struct i40e_eth_stats eth; + + /* additional port specific stats */ + u64 tx_dropped_link_down; /* tdold */ + u64 crc_errors; /* crcerrs */ + u64 illegal_bytes; /* illerrc */ + u64 error_bytes; /* errbc */ + u64 mac_local_faults; /* mlfc */ + u64 mac_remote_faults; /* mrfc */ + u64 rx_length_errors; /* rlec */ + u64 link_xon_rx; /* lxonrxc */ + u64 link_xoff_rx; /* lxoffrxc */ + u64 priority_xon_rx[8]; /* pxonrxc[8] */ + u64 priority_xoff_rx[8]; /* pxoffrxc[8] */ + u64 link_xon_tx; /* lxontxc */ + u64 link_xoff_tx; /* lxofftxc */ + u64 priority_xon_tx[8]; /* pxontxc[8] */ + u64 priority_xoff_tx[8]; /* pxofftxc[8] */ + u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */ + u64 rx_size_64; /* prc64 */ + u64 rx_size_127; /* prc127 */ + u64 rx_size_255; /* prc255 */ + u64 rx_size_511; /* prc511 */ + u64 rx_size_1023; /* prc1023 */ + u64 rx_size_1522; /* prc1522 */ + u64 rx_size_big; /* prc9522 */ + u64 rx_undersize; /* ruc */ + u64 rx_fragments; /* rfc */ + u64 rx_oversize; /* roc */ + u64 rx_jabber; /* rjc */ + u64 tx_size_64; /* ptc64 */ + u64 tx_size_127; /* ptc127 */ + u64 tx_size_255; /* ptc255 */ + u64 tx_size_511; /* ptc511 */ + u64 tx_size_1023; /* ptc1023 */ + u64 tx_size_1522; /* ptc1522 */ + u64 tx_size_big; /* ptc9522 */ + u64 mac_short_packet_dropped; /* mspdc */ + u64 checksum_error; /* xec */ + /* flow director stats */ + u64 fd_atr_match; + u64 fd_sb_match; + /* EEE LPI */ + u32 tx_lpi_status; + u32 rx_lpi_status; + u64 tx_lpi_count; /* etlpic */ + u64 rx_lpi_count; /* erlpic */ +}; + +/* Checksum and Shadow RAM pointers */ +#define I40E_SR_NVM_CONTROL_WORD 0x00 +#define I40E_SR_EMP_MODULE_PTR 0x0F +#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 +#define I40E_SR_NVM_WAKE_ON_LAN 0x19 +#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 +#define I40E_SR_NVM_EETRACK_LO 0x2D +#define I40E_SR_NVM_EETRACK_HI 0x2E +#define I40E_SR_VPD_PTR 0x2F +#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E +#define I40E_SR_SW_CHECKSUM_WORD 0x3F + +/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ +#define I40E_SR_VPD_MODULE_MAX_SIZE 1024 +#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 +#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 +#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) + +/* Shadow RAM related */ +#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800 +#define I40E_SR_WORDS_IN_1KB 512 +/* Checksum should be calculated such that after adding all the words, + * including the checksum word itself, the sum should be 0xBABA. + */ +#define I40E_SR_SW_CHECKSUM_BASE 0xBABA + +#define I40E_SRRD_SRCTL_ATTEMPTS 100000 + +enum i40e_switch_element_types { + I40E_SWITCH_ELEMENT_TYPE_MAC = 1, + I40E_SWITCH_ELEMENT_TYPE_PF = 2, + I40E_SWITCH_ELEMENT_TYPE_VF = 3, + I40E_SWITCH_ELEMENT_TYPE_EMP = 4, + I40E_SWITCH_ELEMENT_TYPE_BMC = 6, + I40E_SWITCH_ELEMENT_TYPE_PE = 16, + I40E_SWITCH_ELEMENT_TYPE_VEB = 17, + I40E_SWITCH_ELEMENT_TYPE_PA = 18, + I40E_SWITCH_ELEMENT_TYPE_VSI = 19, +}; + +/* Supported EtherType filters */ +enum i40e_ether_type_index { + I40E_ETHER_TYPE_1588 = 0, + I40E_ETHER_TYPE_FIP = 1, + I40E_ETHER_TYPE_OUI_EXTENDED = 2, + I40E_ETHER_TYPE_MAC_CONTROL = 3, + I40E_ETHER_TYPE_LLDP = 4, + I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5, + I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6, + I40E_ETHER_TYPE_QCN_CNM = 7, + I40E_ETHER_TYPE_8021X = 8, + I40E_ETHER_TYPE_ARP = 9, + I40E_ETHER_TYPE_RSV1 = 10, + I40E_ETHER_TYPE_RSV2 = 11, +}; + +/* Filter context base size is 1K */ +#define I40E_HASH_FILTER_BASE_SIZE 1024 +/* Supported Hash filter values */ +enum i40e_hash_filter_size { + I40E_HASH_FILTER_SIZE_1K = 0, + I40E_HASH_FILTER_SIZE_2K = 1, + I40E_HASH_FILTER_SIZE_4K = 2, + I40E_HASH_FILTER_SIZE_8K = 3, + I40E_HASH_FILTER_SIZE_16K = 4, + I40E_HASH_FILTER_SIZE_32K = 5, + I40E_HASH_FILTER_SIZE_64K = 6, + I40E_HASH_FILTER_SIZE_128K = 7, + I40E_HASH_FILTER_SIZE_256K = 8, + I40E_HASH_FILTER_SIZE_512K = 9, + I40E_HASH_FILTER_SIZE_1M = 10, +}; + +/* DMA context base size is 0.5K */ +#define I40E_DMA_CNTX_BASE_SIZE 512 +/* Supported DMA context values */ +enum i40e_dma_cntx_size { + I40E_DMA_CNTX_SIZE_512 = 0, + I40E_DMA_CNTX_SIZE_1K = 1, + I40E_DMA_CNTX_SIZE_2K = 2, + I40E_DMA_CNTX_SIZE_4K = 3, + I40E_DMA_CNTX_SIZE_8K = 4, + I40E_DMA_CNTX_SIZE_16K = 5, + I40E_DMA_CNTX_SIZE_32K = 6, + I40E_DMA_CNTX_SIZE_64K = 7, + I40E_DMA_CNTX_SIZE_128K = 8, + I40E_DMA_CNTX_SIZE_256K = 9, +}; + +/* Supported Hash look up table (LUT) sizes */ +enum i40e_hash_lut_size { + I40E_HASH_LUT_SIZE_128 = 0, + I40E_HASH_LUT_SIZE_512 = 1, +}; + +/* Structure to hold a per PF filter control settings */ +struct i40e_filter_control_settings { + /* number of PE Quad Hash filter buckets */ + enum i40e_hash_filter_size pe_filt_num; + /* number of PE Quad Hash contexts */ + enum i40e_dma_cntx_size pe_cntx_num; + /* number of FCoE filter buckets */ + enum i40e_hash_filter_size fcoe_filt_num; + /* number of FCoE DDP contexts */ + enum i40e_dma_cntx_size fcoe_cntx_num; + /* size of the Hash LUT */ + enum i40e_hash_lut_size hash_lut_size; + /* enable FDIR filters for PF and its VFs */ + bool enable_fdir; + /* enable Ethertype filters for PF and its VFs */ + bool enable_ethtype; + /* enable MAC/VLAN filters for PF and its VFs */ + bool enable_macvlan; +}; + +/* Structure to hold device level control filter counts */ +struct i40e_control_filter_stats { + u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */ + u16 etype_used; /* Used perfect EtherType filters */ + u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */ + u16 etype_free; /* Un-used perfect EtherType filters */ +}; + +enum i40e_reset_type { + I40E_RESET_POR = 0, + I40E_RESET_CORER = 1, + I40E_RESET_GLOBR = 2, + I40E_RESET_EMPR = 3, +}; + +/* RSS Hash Table Size */ +#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 +#endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h new file mode 100644 index 000000000..59f62f0e6 --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -0,0 +1,362 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_VIRTCHNL_H_ +#define _I40E_VIRTCHNL_H_ + +#include "i40e_type.h" + +/* Description: + * This header file describes the VF-PF communication protocol used + * by the various i40e drivers. + * + * Admin queue buffer usage: + * desc->opcode is always i40e_aqc_opc_send_msg_to_pf + * flags, retval, datalen, and data addr are all used normally. + * Firmware copies the cookie fields when sending messages between the PF and + * VF, but uses all other fields internally. Due to this limitation, we + * must send all messages as "indirect", i.e. using an external buffer. + * + * All the vsi indexes are relative to the VF. Each VF can have maximum of + * three VSIs. All the queue indexes are relative to the VSI. Each VF can + * have a maximum of sixteen queues for all of its VSIs. + * + * The PF is required to return a status code in v_retval for all messages + * except RESET_VF, which does not require any response. The return value is of + * i40e_status_code type, defined in the i40e_type.h. + * + * In general, VF driver initialization should roughly follow the order of these + * opcodes. The VF driver must first validate the API version of the PF driver, + * then request a reset, then get resources, then configure queues and + * interrupts. After these operations are complete, the VF driver may start + * its queues, optionally add MAC and VLAN filters, and process traffic. + */ + +/* Opcodes for VF-PF communication. These are placed in the v_opcode field + * of the virtchnl_msg structure. + */ +enum i40e_virtchnl_ops { +/* The PF sends status change events to VFs using + * the I40E_VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. + */ + I40E_VIRTCHNL_OP_UNKNOWN = 0, + I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ + I40E_VIRTCHNL_OP_RESET_VF = 2, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3, + I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8, + I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9, + I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10, + I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11, + I40E_VIRTCHNL_OP_ADD_VLAN = 12, + I40E_VIRTCHNL_OP_DEL_VLAN = 13, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + I40E_VIRTCHNL_OP_GET_STATS = 15, + I40E_VIRTCHNL_OP_FCOE = 16, + I40E_VIRTCHNL_OP_EVENT = 17, + I40E_VIRTCHNL_OP_CONFIG_RSS = 18, +}; + +/* Virtual channel message descriptor. This overlays the admin queue + * descriptor. All other data is passed in external buffers. + */ + +struct i40e_virtchnl_msg { + u8 pad[8]; /* AQ flags/opcode/len/retval fields */ + enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ + i40e_status v_retval; /* ditto for desc->retval */ + u32 vfid; /* used by PF when sending to VF */ +}; + +/* Message descriptions and data structures.*/ + +/* I40E_VIRTCHNL_OP_VERSION + * VF posts its version number to the PF. PF responds with its version number + * in the same format, along with a return code. + * Reply from PF has its major/minor versions also in param0 and param1. + * If there is a major version mismatch, then the VF cannot operate. + * If there is a minor version mismatch, then the VF can operate but should + * add a warning to the system log. + * + * This enum element MUST always be specified as == 1, regardless of other + * changes in the API. The PF must always respond to this message without + * error regardless of version mismatch. + */ +#define I40E_VIRTCHNL_VERSION_MAJOR 1 +#define I40E_VIRTCHNL_VERSION_MINOR 0 +struct i40e_virtchnl_version_info { + u32 major; + u32 minor; +}; + +/* I40E_VIRTCHNL_OP_RESET_VF + * VF sends this request to PF with no parameters + * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register + * until reset completion is indicated. The admin queue must be reinitialized + * after this operation. + * + * When reset is complete, PF must ensure that all queues in all VSIs associated + * with the VF are stopped, all queue configurations in the HMC are set to 0, + * and all MAC and VLAN filters (except the default MAC address) on all VSIs + * are cleared. + */ + +/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES + * VF sends this request to PF with no parameters + * PF responds with an indirect message containing + * i40e_virtchnl_vf_resource and one or more + * i40e_virtchnl_vsi_resource structures. + */ + +struct i40e_virtchnl_vsi_resource { + u16 vsi_id; + u16 num_queue_pairs; + enum i40e_vsi_type vsi_type; + u16 qset_handle; + u8 default_mac_addr[ETH_ALEN]; +}; +/* VF offload flags */ +#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 +#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 + +struct i40e_virtchnl_vf_resource { + u16 num_vsis; + u16 num_queue_pairs; + u16 max_vectors; + u16 max_mtu; + + u32 vf_offload_flags; + u32 max_fcoe_contexts; + u32 max_fcoe_filters; + + struct i40e_virtchnl_vsi_resource vsi_res[1]; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE + * VF sends this message to set up parameters for one TX queue. + * External data buffer contains one instance of i40e_virtchnl_txq_info. + * PF configures requested queue and returns a status code. + */ + +/* Tx queue config info */ +struct i40e_virtchnl_txq_info { + u16 vsi_id; + u16 queue_id; + u16 ring_len; /* number of descriptors, multiple of 8 */ + u16 headwb_enabled; + u64 dma_ring_addr; + u64 dma_headwb_addr; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE + * VF sends this message to set up parameters for one RX queue. + * External data buffer contains one instance of i40e_virtchnl_rxq_info. + * PF configures requested queue and returns a status code. + */ + +/* Rx queue config info */ +struct i40e_virtchnl_rxq_info { + u16 vsi_id; + u16 queue_id; + u32 ring_len; /* number of descriptors, multiple of 32 */ + u16 hdr_size; + u16 splithdr_enabled; + u32 databuffer_size; + u32 max_pkt_size; + u64 dma_ring_addr; + enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES + * VF sends this message to set parameters for all active TX and RX queues + * associated with the specified VSI. + * PF configures queues and returns status. + * If the number of queues specified is greater than the number of queues + * associated with the VSI, an error is returned and no queues are configured. + */ +struct i40e_virtchnl_queue_pair_info { + /* NOTE: vsi_id and queue_id should be identical for both queues. */ + struct i40e_virtchnl_txq_info txq; + struct i40e_virtchnl_rxq_info rxq; +}; + +struct i40e_virtchnl_vsi_queue_config_info { + u16 vsi_id; + u16 num_queue_pairs; + struct i40e_virtchnl_queue_pair_info qpair[1]; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP + * VF uses this message to map vectors to queues. + * The rxq_map and txq_map fields are bitmaps used to indicate which queues + * are to be associated with the specified vector. + * The "other" causes are always mapped to vector 0. + * PF configures interrupt mapping and returns status. + */ +struct i40e_virtchnl_vector_map { + u16 vsi_id; + u16 vector_id; + u16 rxq_map; + u16 txq_map; + u16 rxitr_idx; + u16 txitr_idx; +}; + +struct i40e_virtchnl_irq_map_info { + u16 num_vectors; + struct i40e_virtchnl_vector_map vecmap[1]; +}; + +/* I40E_VIRTCHNL_OP_ENABLE_QUEUES + * I40E_VIRTCHNL_OP_DISABLE_QUEUES + * VF sends these message to enable or disable TX/RX queue pairs. + * The queues fields are bitmaps indicating which queues to act upon. + * (Currently, we only support 16 queues per VF, but we make the field + * u32 to allow for expansion.) + * PF performs requested action and returns status. + */ +struct i40e_virtchnl_queue_select { + u16 vsi_id; + u16 pad; + u32 rx_queues; + u32 tx_queues; +}; + +/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS + * VF sends this message in order to add one or more unicast or multicast + * address filters for the specified VSI. + * PF adds the filters and returns status. + */ + +/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS + * VF sends this message in order to remove one or more unicast or multicast + * filters for the specified VSI. + * PF removes the filters and returns status. + */ + +struct i40e_virtchnl_ether_addr { + u8 addr[ETH_ALEN]; + u8 pad[2]; +}; + +struct i40e_virtchnl_ether_addr_list { + u16 vsi_id; + u16 num_elements; + struct i40e_virtchnl_ether_addr list[1]; +}; + +/* I40E_VIRTCHNL_OP_ADD_VLAN + * VF sends this message to add one or more VLAN tag filters for receives. + * PF adds the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +/* I40E_VIRTCHNL_OP_DEL_VLAN + * VF sends this message to remove one or more VLAN tag filters for receives. + * PF removes the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +struct i40e_virtchnl_vlan_filter_list { + u16 vsi_id; + u16 num_elements; + u16 vlan_id[1]; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE + * VF sends VSI id and flags. + * PF returns status code in retval. + * Note: we assume that broadcast accept mode is always enabled. + */ +struct i40e_virtchnl_promisc_info { + u16 vsi_id; + u16 flags; +}; + +#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001 +#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002 + +/* I40E_VIRTCHNL_OP_GET_STATS + * VF sends this message to request stats for the selected VSI. VF uses + * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id + * field is ignored by the PF. + * + * PF replies with struct i40e_eth_stats in an external buffer. + */ + +/* I40E_VIRTCHNL_OP_EVENT + * PF sends this message to inform the VF driver of events that may affect it. + * No direct response is expected from the VF, though it may generate other + * messages in response to this one. + */ +enum i40e_virtchnl_event_codes { + I40E_VIRTCHNL_EVENT_UNKNOWN = 0, + I40E_VIRTCHNL_EVENT_LINK_CHANGE, + I40E_VIRTCHNL_EVENT_RESET_IMPENDING, + I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE, +}; +#define I40E_PF_EVENT_SEVERITY_INFO 0 +#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255 + +struct i40e_virtchnl_pf_event { + enum i40e_virtchnl_event_codes event; + union { + struct { + enum i40e_aq_link_speed link_speed; + bool link_status; + } link_event; + } event_data; + + int severity; +}; + +/* VF reset states - these are written into the RSTAT register: + * I40E_VFGEN_RSTAT1 on the PF + * I40E_VFGEN_RSTAT on the VF + * When the PF initiates a reset, it writes 0 + * When the reset is complete, it writes 1 + * When the PF detects that the VF has recovered, it writes 2 + * VF checks this register periodically to determine if a reset has occurred, + * then polls it to know when the reset is complete. + * If either the PF or VF reads the register while the hardware + * is in a reset state, it will return DEADBEEF, which, when masked + * will result in 3. + */ +enum i40e_vfr_states { + I40E_VFR_INPROGRESS = 0, + I40E_VFR_COMPLETED, + I40E_VFR_VFACTIVE, + I40E_VFR_UNKNOWN, +}; + +#endif /* _I40E_VIRTCHNL_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h new file mode 100644 index 000000000..1b98c25b3 --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -0,0 +1,300 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40EVF_H_ +#define _I40EVF_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i40e_type.h" +#include "i40e_virtchnl.h" +#include "i40e_txrx.h" + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 +#define PFX "i40evf: " +#define DPRINTK(nlevel, klevel, fmt, args...) \ + ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ + printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ + __func__ , ## args))) + +/* dummy struct to make common code less painful */ +struct i40e_vsi { + struct i40evf_adapter *back; + struct net_device *netdev; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u16 seid; + u16 id; + unsigned long state; + int base_vector; + u16 work_limit; + /* high bit set means dynamic, use accessor routines to read/write. + * hardware only supports 2us resolution for the ITR registers. + * these values always store the USER setting, and must be converted + * before programming to a register. + */ + u16 rx_itr_setting; + u16 tx_itr_setting; +}; + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define I40EVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define I40EVF_DEFAULT_TXD 512 +#define I40EVF_DEFAULT_RXD 512 +#define I40EVF_MAX_TXD 4096 +#define I40EVF_MIN_TXD 64 +#define I40EVF_MAX_RXD 4096 +#define I40EVF_MIN_RXD 64 +#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32 + +/* Supported Rx Buffer Sizes */ +#define I40EVF_RXBUFFER_64 64 /* Used for packet split */ +#define I40EVF_RXBUFFER_128 128 /* Used for packet split */ +#define I40EVF_RXBUFFER_256 256 /* Used for packet split */ +#define I40EVF_RXBUFFER_2048 2048 +#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ +#define I40EVF_MAX_AQ_BUF_SIZE 4096 +#define I40EVF_AQ_LEN 32 +#define I40EVF_AQ_MAX_ERR 10 /* times to try before resetting AQ */ + +#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) + +#define I40E_RX_DESC(R, i) (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])) +#define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i])) +#define I40E_TX_CTXTDESC(R, i) \ + (&(((struct i40e_tx_context_desc *)((R)->desc))[i])) +#define MAX_RX_QUEUES 8 +#define MAX_TX_QUEUES MAX_RX_QUEUES + +/* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct i40e_q_vector { + struct i40evf_adapter *adapter; + struct i40e_vsi *vsi; + struct napi_struct napi; + unsigned long reg_idx; + struct i40e_ring_container rx; + struct i40e_ring_container tx; + u32 ring_mask; + u8 num_ringpairs; /* total number of ring pairs in vector */ + int v_idx; /* vector index in list */ + char name[IFNAMSIZ + 9]; + cpumask_var_t affinity_mask; +}; + +/* Helper macros to switch between ints/sec and what the register uses. + * And yes, it's the same math going both ways. The lowest value + * supported by all of the i40e hardware is 8. + */ +#define EITR_INTS_PER_SEC_TO_REG(_eitr) \ + ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) +#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG + +#define I40EVF_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +#define I40EVF_RX_DESC_ADV(R, i) \ + (&(((union i40e_adv_rx_desc *)((R).desc))[i])) +#define I40EVF_TX_DESC_ADV(R, i) \ + (&(((union i40e_adv_tx_desc *)((R).desc))[i])) +#define I40EVF_TX_CTXTDESC_ADV(R, i) \ + (&(((struct i40e_adv_tx_context_desc *)((R).desc))[i])) + +#define OTHER_VECTOR 1 +#define NONQ_VECS (OTHER_VECTOR) + +#define MAX_MSIX_Q_VECTORS 4 +#define MAX_MSIX_COUNT 5 + +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS) + +#define I40EVF_QUEUE_END_OF_LIST 0x7FF +#define I40EVF_FREE_VECTOR 0x7FFF +struct i40evf_mac_filter { + struct list_head list; + u8 macaddr[ETH_ALEN]; + bool remove; /* filter needs to be removed */ + bool add; /* filter needs to be added */ +}; + +struct i40evf_vlan_filter { + struct list_head list; + u16 vlan; + bool remove; /* filter needs to be removed */ + bool add; /* filter needs to be added */ +}; + +/* Driver state. The order of these is important! */ +enum i40evf_state_t { + __I40EVF_STARTUP, /* driver loaded, probe complete */ + __I40EVF_REMOVE, /* driver is being unloaded */ + __I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */ + __I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ + __I40EVF_INIT_SW, /* got resources, setting up structs */ + __I40EVF_RESETTING, /* in reset */ + /* Below here, watchdog is running */ + __I40EVF_DOWN, /* ready, can be opened */ + __I40EVF_TESTING, /* in ethtool self-test */ + __I40EVF_RUNNING, /* opened, working */ +}; + +enum i40evf_critical_section_t { + __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */ +}; +/* make common code happy */ +#define __I40E_DOWN __I40EVF_DOWN + +/* board specific private data structure */ +struct i40evf_adapter { + struct timer_list watchdog_timer; + struct work_struct reset_task; + struct work_struct adminq_task; + struct delayed_work init_task; + struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + struct list_head vlan_filter_list; + char misc_vector_name[IFNAMSIZ + 9]; + int num_active_queues; + + /* TX */ + struct i40e_ring *tx_rings[I40E_MAX_VSI_QP]; + u32 tx_timeout_count; + struct list_head mac_filter_list; + u32 tx_desc_count; + + /* RX */ + struct i40e_ring *rx_rings[I40E_MAX_VSI_QP]; + u64 hw_csum_rx_error; + u32 rx_desc_count; + int num_msix_vectors; + struct msix_entry *msix_entries; + + u32 flags; +#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1) +#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1) +#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2) +#define I40EVF_FLAG_RX_PS_ENABLED (u32)(1 << 3) +#define I40EVF_FLAG_IN_NETPOLL (u32)(1 << 4) +#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5) +#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6) +#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7) +#define I40EVF_FLAG_PF_COMMS_FAILED (u32)(1 << 8) +#define I40EVF_FLAG_RESET_PENDING (u32)(1 << 9) +#define I40EVF_FLAG_RESET_NEEDED (u32)(1 << 10) +/* duplcates for common code */ +#define I40E_FLAG_FDIR_ATR_ENABLED 0 +#define I40E_FLAG_DCB_ENABLED 0 +#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL +#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED + /* flags for admin queue service task */ + u32 aq_required; +#define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1) +#define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1) +#define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2) +#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3) +#define I40EVF_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4) +#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5) +#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6) +#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7) +#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8) + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + struct net_device_stats net_stats; + + struct i40e_hw hw; /* defined in i40e_type.h */ + + enum i40evf_state_t state; + unsigned long crit_section; + + struct work_struct watchdog_task; + bool netdev_registered; + bool link_up; + enum i40e_virtchnl_ops current_op; + struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */ + struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ + u16 msg_enable; + struct i40e_eth_stats current_stats; + struct i40e_vsi vsi; + u32 aq_wait_count; +}; + + +/* needed by i40evf_ethtool.c */ +extern char i40evf_driver_name[]; +extern const char i40evf_driver_version[]; + +int i40evf_up(struct i40evf_adapter *adapter); +void i40evf_down(struct i40evf_adapter *adapter); +void i40evf_reinit_locked(struct i40evf_adapter *adapter); +void i40evf_reset(struct i40evf_adapter *adapter); +void i40evf_set_ethtool_ops(struct net_device *netdev); +void i40evf_update_stats(struct i40evf_adapter *adapter); +void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter); +int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter); +void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask); +void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter); +void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter); + +void i40e_napi_add_all(struct i40evf_adapter *adapter); +void i40e_napi_del_all(struct i40evf_adapter *adapter); + +int i40evf_send_api_ver(struct i40evf_adapter *adapter); +int i40evf_verify_api_ver(struct i40evf_adapter *adapter); +int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter); +int i40evf_get_vf_config(struct i40evf_adapter *adapter); +void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush); +void i40evf_configure_queues(struct i40evf_adapter *adapter); +void i40evf_deconfigure_queues(struct i40evf_adapter *adapter); +void i40evf_enable_queues(struct i40evf_adapter *adapter); +void i40evf_disable_queues(struct i40evf_adapter *adapter); +void i40evf_map_queues(struct i40evf_adapter *adapter); +void i40evf_add_ether_addrs(struct i40evf_adapter *adapter); +void i40evf_del_ether_addrs(struct i40evf_adapter *adapter); +void i40evf_add_vlans(struct i40evf_adapter *adapter); +void i40evf_del_vlans(struct i40evf_adapter *adapter); +void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags); +void i40evf_request_stats(struct i40evf_adapter *adapter); +void i40evf_request_reset(struct i40evf_adapter *adapter); +void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, + enum i40e_virtchnl_ops v_opcode, + i40e_status v_retval, u8 *msg, u16 msglen); +#endif /* _I40EVF_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c new file mode 100644 index 000000000..f4e77665b --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -0,0 +1,722 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +/* ethtool support for i40evf */ +#include "i40evf.h" + +#include + +struct i40evf_stats { + char stat_string[ETH_GSTRING_LEN]; + int stat_offset; +}; + +#define I40EVF_STAT(_name, _stat) { \ + .stat_string = _name, \ + .stat_offset = offsetof(struct i40evf_adapter, _stat) \ +} + +/* All stats are u64, so we don't need to track the size of the field. */ +static const struct i40evf_stats i40evf_gstrings_stats[] = { + I40EVF_STAT("rx_bytes", current_stats.rx_bytes), + I40EVF_STAT("rx_unicast", current_stats.rx_unicast), + I40EVF_STAT("rx_multicast", current_stats.rx_multicast), + I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast), + I40EVF_STAT("rx_discards", current_stats.rx_discards), + I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol), + I40EVF_STAT("tx_bytes", current_stats.tx_bytes), + I40EVF_STAT("tx_unicast", current_stats.tx_unicast), + I40EVF_STAT("tx_multicast", current_stats.tx_multicast), + I40EVF_STAT("tx_broadcast", current_stats.tx_broadcast), + I40EVF_STAT("tx_discards", current_stats.tx_discards), + I40EVF_STAT("tx_errors", current_stats.tx_errors), +}; + +#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats) +#define I40EVF_QUEUE_STATS_LEN(_dev) \ + (((struct i40evf_adapter *)\ + netdev_priv(_dev))->num_active_queues \ + * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64))) +#define I40EVF_STATS_LEN(_dev) \ + (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev)) + +/** + * i40evf_get_settings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @ecmd: ethtool command + * + * Reports speed/duplex settings. Because this is a VF, we don't know what + * kind of link we really have, so we fake it. + **/ +static int i40evf_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + /* In the future the VF will be able to query the PF for + * some information - for now use a dummy value + */ + ecmd->supported = 0; + ecmd->autoneg = AUTONEG_DISABLE; + ecmd->transceiver = XCVR_DUMMY1; + ecmd->port = PORT_NONE; + + return 0; +} + +/** + * i40evf_get_sset_count - Get length of string set + * @netdev: network interface device structure + * @sset: id of string set + * + * Reports size of string table. This driver only supports + * strings for statistics. + **/ +static int i40evf_get_sset_count(struct net_device *netdev, int sset) +{ + if (sset == ETH_SS_STATS) + return I40EVF_STATS_LEN(netdev); + else + return -EINVAL; +} + +/** + * i40evf_get_ethtool_stats - report device statistics + * @netdev: network interface device structure + * @stats: ethtool statistics structure + * @data: pointer to data buffer + * + * All statistics are added to the data buffer as an array of u64. + **/ +static void i40evf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + int i, j; + char *p; + + for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset; + data[i] = *(u64 *)p; + } + for (j = 0; j < adapter->num_active_queues; j++) { + data[i++] = adapter->tx_rings[j]->stats.packets; + data[i++] = adapter->tx_rings[j]->stats.bytes; + } + for (j = 0; j < adapter->num_active_queues; j++) { + data[i++] = adapter->rx_rings[j]->stats.packets; + data[i++] = adapter->rx_rings[j]->stats.bytes; + } +} + +/** + * i40evf_get_strings - Get string set + * @netdev: network interface device structure + * @sset: id of string set + * @data: buffer for string data + * + * Builds stats string table. + **/ +static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + if (sset == ETH_SS_STATS) { + for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) { + memcpy(p, i40evf_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_active_queues; i++) { + snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_active_queues; i++) { + snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i); + p += ETH_GSTRING_LEN; + } + } +} + +/** + * i40evf_get_msglevel - Get debug message level + * @netdev: network interface device structure + * + * Returns current debug message level. + **/ +static u32 i40evf_get_msglevel(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +/** + * i40evf_set_msglevel - Set debug message level + * @netdev: network interface device structure + * @data: message level + * + * Set current debug message level. Higher values cause the driver to + * be noisier. + **/ +static void i40evf_set_msglevel(struct net_device *netdev, u32 data) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + if (I40E_DEBUG_USER & data) + adapter->hw.debug_mask = data; + adapter->msg_enable = data; +} + +/** + * i40evf_get_drvinfo - Get driver info + * @netdev: network interface device structure + * @drvinfo: ethool driver info structure + * + * Returns information about the driver and device for display to the user. + **/ +static void i40evf_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, i40evf_driver_name, 32); + strlcpy(drvinfo->version, i40evf_driver_version, 32); + strlcpy(drvinfo->fw_version, "N/A", 4); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); +} + +/** + * i40evf_get_ringparam - Get ring parameters + * @netdev: network interface device structure + * @ring: ethtool ringparam structure + * + * Returns current ring parameters. TX and RX rings are reported separately, + * but the number of rings is not reported. + **/ +static void i40evf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = I40EVF_MAX_RXD; + ring->tx_max_pending = I40EVF_MAX_TXD; + ring->rx_pending = adapter->rx_desc_count; + ring->tx_pending = adapter->tx_desc_count; +} + +/** + * i40evf_set_ringparam - Set ring parameters + * @netdev: network interface device structure + * @ring: ethtool ringparam structure + * + * Sets ring parameters. TX and RX rings are controlled separately, but the + * number of rings is not specified, so all rings get the same settings. + **/ +static int i40evf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + u32 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_tx_count = clamp_t(u32, ring->tx_pending, + I40EVF_MIN_TXD, + I40EVF_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, + I40EVF_MIN_RXD, + I40EVF_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); + + /* if nothing to do return success */ + if ((new_tx_count == adapter->tx_desc_count) && + (new_rx_count == adapter->rx_desc_count)) + return 0; + + adapter->tx_desc_count = new_tx_count; + adapter->rx_desc_count = new_rx_count; + + if (netif_running(netdev)) + i40evf_reinit_locked(adapter); + + return 0; +} + +/** + * i40evf_get_coalesce - Get interrupt coalescing settings + * @netdev: network interface device structure + * @ec: ethtool coalesce structure + * + * Returns current coalescing settings. This is referred to elsewhere in the + * driver as Interrupt Throttle Rate, as this is how the hardware describes + * this functionality. + **/ +static int i40evf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40e_vsi *vsi = &adapter->vsi; + + ec->tx_max_coalesced_frames = vsi->work_limit; + ec->rx_max_coalesced_frames = vsi->work_limit; + + if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) + ec->use_adaptive_rx_coalesce = 1; + + if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) + ec->use_adaptive_tx_coalesce = 1; + + ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC; + ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC; + + return 0; +} + +/** + * i40evf_set_coalesce - Set interrupt coalescing settings + * @netdev: network interface device structure + * @ec: ethtool coalesce structure + * + * Change current coalescing settings. + **/ +static int i40evf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40e_hw *hw = &adapter->hw; + struct i40e_vsi *vsi = &adapter->vsi; + struct i40e_q_vector *q_vector; + int i; + + if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) + vsi->work_limit = ec->tx_max_coalesced_frames_irq; + + if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) && + (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) + vsi->rx_itr_setting = ec->rx_coalesce_usecs; + + else + return -EINVAL; + + if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) && + (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) + vsi->tx_itr_setting = ec->tx_coalesce_usecs; + else if (ec->use_adaptive_tx_coalesce) + vsi->tx_itr_setting = (I40E_ITR_DYNAMIC | + ITR_REG_TO_USEC(I40E_ITR_RX_DEF)); + else + return -EINVAL; + + if (ec->use_adaptive_rx_coalesce) + vsi->rx_itr_setting |= I40E_ITR_DYNAMIC; + else + vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC; + + if (ec->use_adaptive_tx_coalesce) + vsi->tx_itr_setting |= I40E_ITR_DYNAMIC; + else + vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC; + + for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) { + q_vector = adapter->q_vector[i]; + q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); + wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr); + q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); + wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr); + i40e_flush(hw); + } + + return 0; +} + +/** + * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type + * @adapter: board private structure + * @cmd: ethtool rxnfc command + * + * Returns Success if the flow is supported, else Invalid Input. + **/ +static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct i40e_hw *hw = &adapter->hw; + u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) | + ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32); + + /* We always hash on IP src and dest addresses */ + cmd->data = RXH_IP_SRC | RXH_IP_DST; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP)) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V4_FLOW: + if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP)) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + break; + + case TCP_V6_FLOW: + if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP)) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V6_FLOW: + if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP)) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + break; + default: + cmd->data = 0; + return -EINVAL; + } + + return 0; +} + +/** + * i40evf_get_rxnfc - command to get RX flow classification rules + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * + * Returns Success if the command is supported. + **/ +static int i40evf_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_active_queues; + ret = 0; + break; + case ETHTOOL_GRXFH: + ret = i40evf_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +/** + * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash + * @adapter: board private structure + * @cmd: ethtool rxnfc command + * + * Returns Success if the flow input set is supported. + **/ +static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + struct i40e_hw *hw = &adapter->hw; + + u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) | + ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32); + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + /* We need at least the IP SRC and DEST fields for hashing */ + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + break; + default: + return -EINVAL; + } + break; + case TCP_V6_FLOW: + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + break; + default: + return -EINVAL; + } + break; + case UDP_V4_FLOW: + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + if ((nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); + break; + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if ((nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); + break; + case IPV4_FLOW: + hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4); + break; + case IPV6_FLOW: + hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); + break; + default: + return -EINVAL; + } + + wr32(hw, I40E_VFQF_HENA(0), (u32)hena); + wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); + i40e_flush(hw); + + return 0; +} + +/** + * i40evf_set_rxnfc - command to set RX flow classification rules + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * + * Returns Success if the command is supported. + **/ +static int i40evf_set_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = i40evf_set_rss_hash_opt(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +/** + * i40evf_get_channels: get the number of channels supported by the device + * @netdev: network interface device structure + * @ch: channel information structure + * + * For the purposes of our device, we only use combined channels, i.e. a tx/rx + * queue pair. Report one extra channel to match our "other" MSI-X vector. + **/ +static void i40evf_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + /* Report maximum channels */ + ch->max_combined = adapter->num_active_queues; + + ch->max_other = NONQ_VECS; + ch->other_count = NONQ_VECS; + + ch->combined_count = adapter->num_active_queues; +} + +/** + * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev) +{ + return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4; +} + +/** + * i40evf_get_rxfh - get the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * + * Reads the indirection table directly from the hardware. Always returns 0. + **/ +static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40e_hw *hw = &adapter->hw; + u32 hlut_val; + int i, j; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + + if (indir) { + for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { + hlut_val = rd32(hw, I40E_VFQF_HLUT(i)); + indir[j++] = hlut_val & 0xff; + indir[j++] = (hlut_val >> 8) & 0xff; + indir[j++] = (hlut_val >> 16) & 0xff; + indir[j++] = (hlut_val >> 24) & 0xff; + } + } + return 0; +} + +/** + * i40evf_set_rxfh - set the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * + * Returns -EINVAL if the table specifies an inavlid queue id, otherwise + * returns 0 after programming the table. + **/ +static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40e_hw *hw = &adapter->hw; + u32 hlut_val; + int i, j; + + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { + hlut_val = indir[j++]; + hlut_val |= indir[j++] << 8; + hlut_val |= indir[j++] << 16; + hlut_val |= indir[j++] << 24; + wr32(hw, I40E_VFQF_HLUT(i), hlut_val); + } + + return 0; +} + +static const struct ethtool_ops i40evf_ethtool_ops = { + .get_settings = i40evf_get_settings, + .get_drvinfo = i40evf_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ringparam = i40evf_get_ringparam, + .set_ringparam = i40evf_set_ringparam, + .get_strings = i40evf_get_strings, + .get_ethtool_stats = i40evf_get_ethtool_stats, + .get_sset_count = i40evf_get_sset_count, + .get_msglevel = i40evf_get_msglevel, + .set_msglevel = i40evf_set_msglevel, + .get_coalesce = i40evf_get_coalesce, + .set_coalesce = i40evf_set_coalesce, + .get_rxnfc = i40evf_get_rxnfc, + .set_rxnfc = i40evf_set_rxnfc, + .get_rxfh_indir_size = i40evf_get_rxfh_indir_size, + .get_rxfh = i40evf_get_rxfh, + .set_rxfh = i40evf_set_rxfh, + .get_channels = i40evf_get_channels, +}; + +/** + * i40evf_set_ethtool_ops - Initialize ethtool ops struct + * @netdev: network interface device structure + * + * Sets ethtool ops struct in our netdev so that ethtool can call + * our functions. + **/ +void i40evf_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &i40evf_ethtool_ops; +} diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c new file mode 100644 index 000000000..7c53aca4b --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -0,0 +1,2561 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40evf.h" +#include "i40e_prototype.h" +static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter); +static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter); +static int i40evf_close(struct net_device *netdev); + +char i40evf_driver_name[] = "i40evf"; +static const char i40evf_driver_string[] = + "Intel(R) XL710/X710 Virtual Function Network Driver"; + +#define DRV_VERSION "1.2.25" +const char i40evf_driver_version[] = DRV_VERSION; +static const char i40evf_copyright[] = + "Copyright (c) 2013 - 2014 Intel Corporation."; + +/* i40evf_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id i40evf_pci_tbl[] = { + {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0}, + /* required last entry */ + {0, } +}; + +MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl); + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/** + * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to fill out + * @size: size of memory requested + * @alignment: what to align the allocation to + **/ +i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw, + struct i40e_dma_mem *mem, + u64 size, u32 alignment) +{ + struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back; + + if (!mem) + return I40E_ERR_PARAM; + + mem->size = ALIGN(size, alignment); + mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, + (dma_addr_t *)&mem->pa, GFP_KERNEL); + if (mem->va) + return 0; + else + return I40E_ERR_NO_MEMORY; +} + +/** + * i40evf_free_dma_mem_d - OS specific memory free for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to free + **/ +i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) +{ + struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back; + + if (!mem || !mem->va) + return I40E_ERR_PARAM; + dma_free_coherent(&adapter->pdev->dev, mem->size, + mem->va, (dma_addr_t)mem->pa); + return 0; +} + +/** + * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to fill out + * @size: size of memory requested + **/ +i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw, + struct i40e_virt_mem *mem, u32 size) +{ + if (!mem) + return I40E_ERR_PARAM; + + mem->size = size; + mem->va = kzalloc(size, GFP_KERNEL); + + if (mem->va) + return 0; + else + return I40E_ERR_NO_MEMORY; +} + +/** + * i40evf_free_virt_mem_d - OS specific memory free for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to free + **/ +i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw, + struct i40e_virt_mem *mem) +{ + if (!mem) + return I40E_ERR_PARAM; + + /* it's ok to kfree a NULL pointer */ + kfree(mem->va); + + return 0; +} + +/** + * i40evf_debug_d - OS dependent version of debug printing + * @hw: pointer to the HW structure + * @mask: debug level mask + * @fmt_str: printf-type format description + **/ +void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...) +{ + char buf[512]; + va_list argptr; + + if (!(mask & ((struct i40e_hw *)hw)->debug_mask)) + return; + + va_start(argptr, fmt_str); + vsnprintf(buf, sizeof(buf), fmt_str, argptr); + va_end(argptr); + + /* the debug string is already formatted with a newline */ + pr_info("%s", buf); +} + +/** + * i40evf_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void i40evf_tx_timeout(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + adapter->tx_timeout_count++; + if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { + adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + } +} + +/** + * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter) +{ + struct i40e_hw *hw = &adapter->hw; + + wr32(hw, I40E_VFINT_DYN_CTL01, 0); + + /* read flush */ + rd32(hw, I40E_VFGEN_RSTAT); + + synchronize_irq(adapter->msix_entries[0].vector); +} + +/** + * i40evf_misc_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter) +{ + struct i40e_hw *hw = &adapter->hw; + + wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK | + I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); + wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK); + + /* read flush */ + rd32(hw, I40E_VFGEN_RSTAT); +} + +/** + * i40evf_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void i40evf_irq_disable(struct i40evf_adapter *adapter) +{ + int i; + struct i40e_hw *hw = &adapter->hw; + + if (!adapter->msix_entries) + return; + + for (i = 1; i < adapter->num_msix_vectors; i++) { + wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0); + synchronize_irq(adapter->msix_entries[i].vector); + } + /* read flush */ + rd32(hw, I40E_VFGEN_RSTAT); +} + +/** + * i40evf_irq_enable_queues - Enable interrupt for specified queues + * @adapter: board private structure + * @mask: bitmap of queues to enable + **/ +void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask) +{ + struct i40e_hw *hw = &adapter->hw; + int i; + + for (i = 1; i < adapter->num_msix_vectors; i++) { + if (mask & (1 << (i - 1))) { + wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), + I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | + I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); + } + } +} + +/** + * i40evf_fire_sw_int - Generate SW interrupt for specified vectors + * @adapter: board private structure + * @mask: bitmap of vectors to trigger + **/ +static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) +{ + struct i40e_hw *hw = &adapter->hw; + int i; + uint32_t dyn_ctl; + + if (mask & 1) { + dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01); + dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | + I40E_VFINT_DYN_CTLN_CLEARPBA_MASK; + wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl); + } + for (i = 1; i < adapter->num_msix_vectors; i++) { + if (mask & (1 << i)) { + dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1)); + dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | + I40E_VFINT_DYN_CTLN_CLEARPBA_MASK; + wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl); + } + } +} + +/** + * i40evf_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush) +{ + struct i40e_hw *hw = &adapter->hw; + + i40evf_misc_irq_enable(adapter); + i40evf_irq_enable_queues(adapter, ~0); + + if (flush) + rd32(hw, I40E_VFGEN_RSTAT); +} + +/** + * i40evf_msix_aq - Interrupt handler for vector 0 + * @irq: interrupt number + * @data: pointer to netdev + **/ +static irqreturn_t i40evf_msix_aq(int irq, void *data) +{ + struct net_device *netdev = data; + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40e_hw *hw = &adapter->hw; + u32 val; + u32 ena_mask; + + /* handle non-queue interrupts */ + val = rd32(hw, I40E_VFINT_ICR01); + ena_mask = rd32(hw, I40E_VFINT_ICR0_ENA1); + + + val = rd32(hw, I40E_VFINT_DYN_CTL01); + val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; + wr32(hw, I40E_VFINT_DYN_CTL01, val); + + /* schedule work on the private workqueue */ + schedule_work(&adapter->adminq_task); + + return IRQ_HANDLED; +} + +/** + * i40evf_msix_clean_rings - MSIX mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a q_vector + **/ +static irqreturn_t i40evf_msix_clean_rings(int irq, void *data) +{ + struct i40e_q_vector *q_vector = data; + + if (!q_vector->tx.ring && !q_vector->rx.ring) + return IRQ_HANDLED; + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * i40evf_map_vector_to_rxq - associate irqs with rx queues + * @adapter: board private structure + * @v_idx: interrupt number + * @r_idx: queue number + **/ +static void +i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) +{ + struct i40e_q_vector *q_vector = adapter->q_vector[v_idx]; + struct i40e_ring *rx_ring = adapter->rx_rings[r_idx]; + + rx_ring->q_vector = q_vector; + rx_ring->next = q_vector->rx.ring; + rx_ring->vsi = &adapter->vsi; + q_vector->rx.ring = rx_ring; + q_vector->rx.count++; + q_vector->rx.latency_range = I40E_LOW_LATENCY; +} + +/** + * i40evf_map_vector_to_txq - associate irqs with tx queues + * @adapter: board private structure + * @v_idx: interrupt number + * @t_idx: queue number + **/ +static void +i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) +{ + struct i40e_q_vector *q_vector = adapter->q_vector[v_idx]; + struct i40e_ring *tx_ring = adapter->tx_rings[t_idx]; + + tx_ring->q_vector = q_vector; + tx_ring->next = q_vector->tx.ring; + tx_ring->vsi = &adapter->vsi; + q_vector->tx.ring = tx_ring; + q_vector->tx.count++; + q_vector->tx.latency_range = I40E_LOW_LATENCY; + q_vector->num_ringpairs++; + q_vector->ring_mask |= (1 << t_idx); +} + +/** + * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors + * @adapter: board private structure to initialize + * + * This function maps descriptor rings to the queue-specific vectors + * we were allotted through the MSI-X enabling code. Ideally, we'd have + * one vector per ring/queue, but on a constrained vector budget, we + * group the rings as "efficiently" as possible. You would add new + * mapping configurations in here. + **/ +static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) +{ + int q_vectors; + int v_start = 0; + int rxr_idx = 0, txr_idx = 0; + int rxr_remaining = adapter->num_active_queues; + int txr_remaining = adapter->num_active_queues; + int i, j; + int rqpv, tqpv; + int err = 0; + + q_vectors = adapter->num_msix_vectors - NONQ_VECS; + + /* The ideal configuration... + * We have enough vectors to map one per queue. + */ + if (q_vectors == (rxr_remaining * 2)) { + for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) + i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx); + + for (; txr_idx < txr_remaining; v_start++, txr_idx++) + i40evf_map_vector_to_txq(adapter, v_start, txr_idx); + goto out; + } + + /* If we don't have enough vectors for a 1-to-1 + * mapping, we'll have to group them so there are + * multiple queues per vector. + * Re-adjusting *qpv takes care of the remainder. + */ + for (i = v_start; i < q_vectors; i++) { + rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); + for (j = 0; j < rqpv; j++) { + i40evf_map_vector_to_rxq(adapter, i, rxr_idx); + rxr_idx++; + rxr_remaining--; + } + } + for (i = v_start; i < q_vectors; i++) { + tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); + for (j = 0; j < tqpv; j++) { + i40evf_map_vector_to_txq(adapter, i, txr_idx); + txr_idx++; + txr_remaining--; + } + } + +out: + adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; + + return err; +} + +/** + * i40evf_request_traffic_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * Allocates MSI-X vectors for tx and rx handling, and requests + * interrupts from the kernel. + **/ +static int +i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) +{ + int vector, err, q_vectors; + int rx_int_idx = 0, tx_int_idx = 0; + + i40evf_irq_disable(adapter); + /* Decrement for Other and TCP Timer vectors */ + q_vectors = adapter->num_msix_vectors - NONQ_VECS; + + for (vector = 0; vector < q_vectors; vector++) { + struct i40e_q_vector *q_vector = adapter->q_vector[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "i40evf-%s-%s-%d", basename, + "TxRx", rx_int_idx++); + tx_int_idx++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "i40evf-%s-%s-%d", basename, + "rx", rx_int_idx++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "i40evf-%s-%s-%d", basename, + "tx", tx_int_idx++); + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq( + adapter->msix_entries[vector + NONQ_VECS].vector, + i40evf_msix_clean_rings, + 0, + q_vector->name, + q_vector); + if (err) { + dev_info(&adapter->pdev->dev, + "%s: request_irq failed, error: %d\n", + __func__, err); + goto free_queue_irqs; + } + /* assign the mask for this irq */ + irq_set_affinity_hint( + adapter->msix_entries[vector + NONQ_VECS].vector, + q_vector->affinity_mask); + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + irq_set_affinity_hint( + adapter->msix_entries[vector + NONQ_VECS].vector, + NULL); + free_irq(adapter->msix_entries[vector + NONQ_VECS].vector, + adapter->q_vector[vector]); + } + return err; +} + +/** + * i40evf_request_misc_irq - Initialize MSI-X interrupts + * @adapter: board private structure + * + * Allocates MSI-X vector 0 and requests interrupts from the kernel. This + * vector is only for the admin queue, and stays active even when the netdev + * is closed. + **/ +static int i40evf_request_misc_irq(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + snprintf(adapter->misc_vector_name, + sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx", + dev_name(&adapter->pdev->dev)); + err = request_irq(adapter->msix_entries[0].vector, + &i40evf_msix_aq, 0, + adapter->misc_vector_name, netdev); + if (err) { + dev_err(&adapter->pdev->dev, + "request_irq for %s failed: %d\n", + adapter->misc_vector_name, err); + free_irq(adapter->msix_entries[0].vector, netdev); + } + return err; +} + +/** + * i40evf_free_traffic_irqs - Free MSI-X interrupts + * @adapter: board private structure + * + * Frees all MSI-X vectors other than 0. + **/ +static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter) +{ + int i; + int q_vectors; + + q_vectors = adapter->num_msix_vectors - NONQ_VECS; + + for (i = 0; i < q_vectors; i++) { + irq_set_affinity_hint(adapter->msix_entries[i+1].vector, + NULL); + free_irq(adapter->msix_entries[i+1].vector, + adapter->q_vector[i]); + } +} + +/** + * i40evf_free_misc_irq - Free MSI-X miscellaneous vector + * @adapter: board private structure + * + * Frees MSI-X vector 0. + **/ +static void i40evf_free_misc_irq(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->msix_entries[0].vector, netdev); +} + +/** + * i40evf_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void i40evf_configure_tx(struct i40evf_adapter *adapter) +{ + struct i40e_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < adapter->num_active_queues; i++) + adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i); +} + +/** + * i40evf_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void i40evf_configure_rx(struct i40evf_adapter *adapter) +{ + struct i40e_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + int i; + int rx_buf_len; + + + adapter->flags &= ~I40EVF_FLAG_RX_PS_CAPABLE; + adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE; + + /* Decide whether to use packet split mode or not */ + if (netdev->mtu > ETH_DATA_LEN) { + if (adapter->flags & I40EVF_FLAG_RX_PS_CAPABLE) + adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED; + else + adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; + } else { + if (adapter->flags & I40EVF_FLAG_RX_1BUF_CAPABLE) + adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; + else + adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED; + } + + /* Set the RX buffer length according to the mode */ + if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { + rx_buf_len = I40E_RX_HDR_SIZE; + } else { + if (netdev->mtu <= ETH_DATA_LEN) + rx_buf_len = I40EVF_RXBUFFER_2048; + else + rx_buf_len = ALIGN(max_frame, 1024); + } + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i); + adapter->rx_rings[i]->rx_buf_len = rx_buf_len; + } +} + +/** + * i40evf_find_vlan - Search filter list for specific vlan filter + * @adapter: board private structure + * @vlan: vlan tag + * + * Returns ptr to the filter object or NULL + **/ +static struct +i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan) +{ + struct i40evf_vlan_filter *f; + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (vlan == f->vlan) + return f; + } + return NULL; +} + +/** + * i40evf_add_vlan - Add a vlan filter to the list + * @adapter: board private structure + * @vlan: VLAN tag + * + * Returns ptr to the filter object or NULL when no memory available. + **/ +static struct +i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) +{ + struct i40evf_vlan_filter *f = NULL; + int count = 50; + + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) { + udelay(1); + if (--count == 0) + goto out; + } + + f = i40evf_find_vlan(adapter, vlan); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto clearout; + + f->vlan = vlan; + + INIT_LIST_HEAD(&f->list); + list_add(&f->list, &adapter->vlan_filter_list); + f->add = true; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + } + +clearout: + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); +out: + return f; +} + +/** + * i40evf_del_vlan - Remove a vlan filter from the list + * @adapter: board private structure + * @vlan: VLAN tag + **/ +static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan) +{ + struct i40evf_vlan_filter *f; + int count = 50; + + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) { + udelay(1); + if (--count == 0) + return; + } + + f = i40evf_find_vlan(adapter, vlan); + if (f) { + f->remove = true; + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + } + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); +} + +/** + * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device + * @netdev: network device struct + * @vid: VLAN tag + **/ +static int i40evf_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + if (i40evf_add_vlan(adapter, vid) == NULL) + return -ENOMEM; + return 0; +} + +/** + * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device + * @netdev: network device struct + * @vid: VLAN tag + **/ +static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + i40evf_del_vlan(adapter, vid); + return 0; +} + +/** + * i40evf_find_filter - Search filter list for specific mac filter + * @adapter: board private structure + * @macaddr: the MAC address + * + * Returns ptr to the filter object or NULL + **/ +static struct +i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter, + u8 *macaddr) +{ + struct i40evf_mac_filter *f; + + if (!macaddr) + return NULL; + + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (ether_addr_equal(macaddr, f->macaddr)) + return f; + } + return NULL; +} + +/** + * i40e_add_filter - Add a mac filter to the filter list + * @adapter: board private structure + * @macaddr: the MAC address + * + * Returns ptr to the filter object or NULL when no memory available. + **/ +static struct +i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, + u8 *macaddr) +{ + struct i40evf_mac_filter *f; + int count = 50; + + if (!macaddr) + return NULL; + + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) { + udelay(1); + if (--count == 0) + return NULL; + } + + f = i40evf_find_filter(adapter, macaddr); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) { + clear_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section); + return NULL; + } + + ether_addr_copy(f->macaddr, macaddr); + + list_add(&f->list, &adapter->mac_filter_list); + f->add = true; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; + } + + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + return f; +} + +/** + * i40evf_set_mac - NDO callback to set port mac address + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int i40evf_set_mac(struct net_device *netdev, void *p) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40e_hw *hw = &adapter->hw; + struct i40evf_mac_filter *f; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) + return 0; + + f = i40evf_add_filter(adapter, addr->sa_data); + if (f) { + ether_addr_copy(hw->mac.addr, addr->sa_data); + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + } + + return (f == NULL) ? -ENOMEM : 0; +} + +/** + * i40evf_set_rx_mode - NDO callback to set the netdev filters + * @netdev: network interface device structure + **/ +static void i40evf_set_rx_mode(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40evf_mac_filter *f, *ftmp; + struct netdev_hw_addr *uca; + struct netdev_hw_addr *mca; + int count = 50; + + /* add addr if not already in the filter list */ + netdev_for_each_uc_addr(uca, netdev) { + i40evf_add_filter(adapter, uca->addr); + } + netdev_for_each_mc_addr(mca, netdev) { + i40evf_add_filter(adapter, mca->addr); + } + + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) { + udelay(1); + if (--count == 0) { + dev_err(&adapter->pdev->dev, + "Failed to get lock in %s\n", __func__); + return; + } + } + /* remove filter if not in netdev list */ + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + bool found = false; + + if (is_multicast_ether_addr(f->macaddr)) { + netdev_for_each_mc_addr(mca, netdev) { + if (ether_addr_equal(mca->addr, f->macaddr)) { + found = true; + break; + } + } + } else { + netdev_for_each_uc_addr(uca, netdev) { + if (ether_addr_equal(uca->addr, f->macaddr)) { + found = true; + break; + } + } + } + if (found) { + f->remove = true; + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; + } + } + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); +} + +/** + * i40evf_napi_enable_all - enable NAPI on all queue vectors + * @adapter: board private structure + **/ +static void i40evf_napi_enable_all(struct i40evf_adapter *adapter) +{ + int q_idx; + struct i40e_q_vector *q_vector; + int q_vectors = adapter->num_msix_vectors - NONQ_VECS; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + struct napi_struct *napi; + + q_vector = adapter->q_vector[q_idx]; + napi = &q_vector->napi; + napi_enable(napi); + } +} + +/** + * i40evf_napi_disable_all - disable NAPI on all queue vectors + * @adapter: board private structure + **/ +static void i40evf_napi_disable_all(struct i40evf_adapter *adapter) +{ + int q_idx; + struct i40e_q_vector *q_vector; + int q_vectors = adapter->num_msix_vectors - NONQ_VECS; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + napi_disable(&q_vector->napi); + } +} + +/** + * i40evf_configure - set up transmit and receive data structures + * @adapter: board private structure + **/ +static void i40evf_configure(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + i40evf_set_rx_mode(netdev); + + i40evf_configure_tx(adapter); + i40evf_configure_rx(adapter); + adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES; + + for (i = 0; i < adapter->num_active_queues; i++) { + struct i40e_ring *ring = adapter->rx_rings[i]; + + i40evf_alloc_rx_buffers_1buf(ring, ring->count); + ring->next_to_use = ring->count - 1; + writel(ring->next_to_use, ring->tail); + } +} + +/** + * i40evf_up_complete - Finish the last steps of bringing up a connection + * @adapter: board private structure + **/ +static int i40evf_up_complete(struct i40evf_adapter *adapter) +{ + adapter->state = __I40EVF_RUNNING; + clear_bit(__I40E_DOWN, &adapter->vsi.state); + + i40evf_napi_enable_all(adapter); + + adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES; + mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); + return 0; +} + +/** + * i40e_down - Shutdown the connection processing + * @adapter: board private structure + **/ +void i40evf_down(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct i40evf_mac_filter *f; + + if (adapter->state == __I40EVF_DOWN) + return; + + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) + usleep_range(500, 1000); + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + i40evf_napi_disable_all(adapter); + i40evf_irq_disable(adapter); + + /* remove all MAC filters */ + list_for_each_entry(f, &adapter->mac_filter_list, list) { + f->remove = true; + } + /* remove all VLAN filters */ + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + f->remove = true; + } + if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) && + adapter->state != __I40EVF_RESETTING) { + /* cancel any current operation */ + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + /* Schedule operations to close down the HW. Don't wait + * here for this to complete. The watchdog is still running + * and it will take care of this. + */ + adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; + } + + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); +} + +/** + * i40evf_acquire_msix_vectors - Setup the MSIX capability + * @adapter: board private structure + * @vectors: number of vectors to request + * + * Work with the OS to set up the MSIX vectors needed. + * + * Returns 0 on success, negative on failure + **/ +static int +i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors) +{ + int err, vector_threshold; + + /* We'll want at least 3 (vector_threshold): + * 0) Other (Admin Queue and link, mostly) + * 1) TxQ[0] Cleanup + * 2) RxQ[0] Cleanup + */ + vector_threshold = MIN_MSIX_COUNT; + + /* The more we get, the more we will assign to Tx/Rx Cleanup + * for the separate queues...where Rx Cleanup >= Tx Cleanup. + * Right now, we simply care about how many we'll get; we'll + * set them up later while requesting irq's. + */ + err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + vector_threshold, vectors); + if (err < 0) { + dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return err; + } + + /* Adjust for only the vectors we'll use, which is minimum + * of max_msix_q_vectors + NONQ_VECS, or the number of + * vectors we were allocated. + */ + adapter->num_msix_vectors = err; + return 0; +} + +/** + * i40evf_free_queues - Free memory for all rings + * @adapter: board private structure to initialize + * + * Free all of the memory associated with queue pairs. + **/ +static void i40evf_free_queues(struct i40evf_adapter *adapter) +{ + int i; + + if (!adapter->vsi_res) + return; + for (i = 0; i < adapter->num_active_queues; i++) { + if (adapter->tx_rings[i]) + kfree_rcu(adapter->tx_rings[i], rcu); + adapter->tx_rings[i] = NULL; + adapter->rx_rings[i] = NULL; + } +} + +/** + * i40evf_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. The polling_netdev array is + * intended for Multiqueue, but should work fine with a single queue. + **/ +static int i40evf_alloc_queues(struct i40evf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_active_queues; i++) { + struct i40e_ring *tx_ring; + struct i40e_ring *rx_ring; + + tx_ring = kzalloc(sizeof(*tx_ring) * 2, GFP_KERNEL); + if (!tx_ring) + goto err_out; + + tx_ring->queue_index = i; + tx_ring->netdev = adapter->netdev; + tx_ring->dev = &adapter->pdev->dev; + tx_ring->count = adapter->tx_desc_count; + adapter->tx_rings[i] = tx_ring; + + rx_ring = &tx_ring[1]; + rx_ring->queue_index = i; + rx_ring->netdev = adapter->netdev; + rx_ring->dev = &adapter->pdev->dev; + rx_ring->count = adapter->rx_desc_count; + adapter->rx_rings[i] = rx_ring; + } + + return 0; + +err_out: + i40evf_free_queues(adapter); + return -ENOMEM; +} + +/** + * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) +{ + int vector, v_budget; + int pairs = 0; + int err = 0; + + if (!adapter->vsi_res) { + err = -EIO; + goto out; + } + pairs = adapter->num_active_queues; + + /* It's easy to be greedy for MSI-X vectors, but it really + * doesn't do us much good if we have a lot more vectors + * than CPU's. So let's be conservative and only ask for + * (roughly) twice the number of vectors as there are CPU's. + */ + v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS; + v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors); + + adapter->msix_entries = kcalloc(v_budget, + sizeof(struct msix_entry), GFP_KERNEL); + if (!adapter->msix_entries) { + err = -ENOMEM; + goto out; + } + + for (vector = 0; vector < v_budget; vector++) + adapter->msix_entries[vector].entry = vector; + + i40evf_acquire_msix_vectors(adapter, v_budget); + +out: + adapter->netdev->real_num_tx_queues = pairs; + return err; +} + +/** + * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) +{ + int q_idx, num_q_vectors; + struct i40e_q_vector *q_vector; + + num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); + if (!q_vector) + goto err_out; + q_vector->adapter = adapter; + q_vector->vsi = &adapter->vsi; + q_vector->v_idx = q_idx; + netif_napi_add(adapter->netdev, &q_vector->napi, + i40evf_napi_poll, NAPI_POLL_WEIGHT); + adapter->q_vector[q_idx] = q_vector; + } + + return 0; + +err_out: + while (q_idx) { + q_idx--; + q_vector = adapter->q_vector[q_idx]; + netif_napi_del(&q_vector->napi); + kfree(q_vector); + adapter->q_vector[q_idx] = NULL; + } + return -ENOMEM; +} + +/** + * i40evf_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void i40evf_free_q_vectors(struct i40evf_adapter *adapter) +{ + int q_idx, num_q_vectors; + int napi_vectors; + + num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; + napi_vectors = adapter->num_active_queues; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + struct i40e_q_vector *q_vector = adapter->q_vector[q_idx]; + + adapter->q_vector[q_idx] = NULL; + if (q_idx < napi_vectors) + netif_napi_del(&q_vector->napi); + kfree(q_vector); + } +} + +/** + * i40evf_reset_interrupt_capability - Reset MSIX setup + * @adapter: board private structure + * + **/ +void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter) +{ + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; +} + +/** + * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init + * @adapter: board private structure to initialize + * + **/ +int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) +{ + int err; + + err = i40evf_set_interrupt_capability(adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "Unable to setup interrupt capabilities\n"); + goto err_set_interrupt; + } + + err = i40evf_alloc_q_vectors(adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "Unable to allocate memory for queue vectors\n"); + goto err_alloc_q_vectors; + } + + err = i40evf_alloc_queues(adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "Unable to allocate memory for queues\n"); + goto err_alloc_queues; + } + + dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", + (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", + adapter->num_active_queues); + + return 0; +err_alloc_queues: + i40evf_free_q_vectors(adapter); +err_alloc_q_vectors: + i40evf_reset_interrupt_capability(adapter); +err_set_interrupt: + return err; +} + +/** + * i40evf_watchdog_timer - Periodic call-back timer + * @data: pointer to adapter disguised as unsigned long + **/ +static void i40evf_watchdog_timer(unsigned long data) +{ + struct i40evf_adapter *adapter = (struct i40evf_adapter *)data; + + schedule_work(&adapter->watchdog_task); + /* timer will be rescheduled in watchdog task */ +} + +/** + * i40evf_watchdog_task - Periodic call-back task + * @work: pointer to work_struct + **/ +static void i40evf_watchdog_task(struct work_struct *work) +{ + struct i40evf_adapter *adapter = container_of(work, + struct i40evf_adapter, + watchdog_task); + struct i40e_hw *hw = &adapter->hw; + uint32_t rstat_val; + + if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) + goto restart_watchdog; + + if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { + rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + if ((rstat_val == I40E_VFR_VFACTIVE) || + (rstat_val == I40E_VFR_COMPLETED)) { + /* A chance for redemption! */ + dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); + adapter->state = __I40EVF_STARTUP; + adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED; + schedule_delayed_work(&adapter->init_task, 10); + clear_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section); + /* Don't reschedule the watchdog, since we've restarted + * the init task. When init_task contacts the PF and + * gets everything set up again, it'll restart the + * watchdog for us. Down, boy. Sit. Stay. Woof. + */ + return; + } + adapter->aq_required = 0; + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + goto watchdog_done; + } + + if ((adapter->state < __I40EVF_DOWN) || + (adapter->flags & I40EVF_FLAG_RESET_PENDING)) + goto watchdog_done; + + /* check for reset */ + rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && + (rstat_val != I40E_VFR_VFACTIVE) && + (rstat_val != I40E_VFR_COMPLETED)) { + adapter->state = __I40EVF_RESETTING; + adapter->flags |= I40EVF_FLAG_RESET_PENDING; + dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); + schedule_work(&adapter->reset_task); + adapter->aq_required = 0; + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + goto watchdog_done; + } + + /* Process admin queue tasks. After init, everything gets done + * here so we don't race on the admin queue. + */ + if (adapter->current_op) { + if (!i40evf_asq_done(hw)) { + dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n"); + i40evf_send_api_ver(adapter); + } + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) { + i40evf_disable_queues(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) { + i40evf_map_queues(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) { + i40evf_add_ether_addrs(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) { + i40evf_add_vlans(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) { + i40evf_del_ether_addrs(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) { + i40evf_del_vlans(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) { + i40evf_configure_queues(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) { + i40evf_enable_queues(adapter); + goto watchdog_done; + } + + if (adapter->state == __I40EVF_RUNNING) + i40evf_request_stats(adapter); +watchdog_done: + if (adapter->state == __I40EVF_RUNNING) { + i40evf_irq_enable_queues(adapter, ~0); + i40evf_fire_sw_int(adapter, 0xFF); + } else { + i40evf_fire_sw_int(adapter, 0x1); + } + + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); +restart_watchdog: + if (adapter->state == __I40EVF_REMOVE) + return; + if (adapter->aq_required) + mod_timer(&adapter->watchdog_timer, + jiffies + msecs_to_jiffies(20)); + else + mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2)); + schedule_work(&adapter->adminq_task); +} + +/** + * i40evf_configure_rss - Prepare for RSS + * @adapter: board private structure + **/ +static void i40evf_configure_rss(struct i40evf_adapter *adapter) +{ + u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1]; + struct i40e_hw *hw = &adapter->hw; + u32 cqueue = 0; + u32 lut = 0; + int i, j; + u64 hena; + + /* Hash type is configured by the PF - we just supply the key */ + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + + /* Fill out hash function seed */ + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]); + + /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ + hena = I40E_DEFAULT_RSS_HENA; + wr32(hw, I40E_VFQF_HENA(0), (u32)hena); + wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); + + /* Populate the LUT with max no. of queues in round robin fashion */ + for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { + lut = 0; + for (j = 0; j < 4; j++) { + if (cqueue == adapter->vsi_res->num_queue_pairs) + cqueue = 0; + lut |= ((cqueue) << (8 * j)); + cqueue++; + } + wr32(hw, I40E_VFQF_HLUT(i), lut); + } + i40e_flush(hw); +} + +#define I40EVF_RESET_WAIT_MS 100 +#define I40EVF_RESET_WAIT_COUNT 200 +/** + * i40evf_reset_task - Call-back task to handle hardware reset + * @work: pointer to work_struct + * + * During reset we need to shut down and reinitialize the admin queue + * before we can use it to communicate with the PF again. We also clear + * and reinit the rings because that context is lost as well. + **/ +static void i40evf_reset_task(struct work_struct *work) +{ + struct i40evf_adapter *adapter = container_of(work, + struct i40evf_adapter, + reset_task); + struct net_device *netdev = adapter->netdev; + struct i40e_hw *hw = &adapter->hw; + struct i40evf_mac_filter *f; + uint32_t rstat_val; + int i = 0, err; + + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) + usleep_range(500, 1000); + + if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { + dev_info(&adapter->pdev->dev, "Requesting reset from PF\n"); + i40evf_request_reset(adapter); + } + + /* poll until we see the reset actually happen */ + for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { + rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + if ((rstat_val != I40E_VFR_VFACTIVE) && + (rstat_val != I40E_VFR_COMPLETED)) + break; + msleep(I40EVF_RESET_WAIT_MS); + } + if (i == I40EVF_RESET_WAIT_COUNT) { + adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + goto continue_reset; /* act like the reset happened */ + } + + /* wait until the reset is complete and the PF is responding to us */ + for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { + rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + if ((rstat_val == I40E_VFR_VFACTIVE) || + (rstat_val == I40E_VFR_COMPLETED)) + break; + msleep(I40EVF_RESET_WAIT_MS); + } + if (i == I40EVF_RESET_WAIT_COUNT) { + struct i40evf_mac_filter *f, *ftmp; + struct i40evf_vlan_filter *fv, *fvtmp; + + /* reset never finished */ + dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", + rstat_val); + adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; + + if (netif_running(adapter->netdev)) { + set_bit(__I40E_DOWN, &adapter->vsi.state); + i40evf_irq_disable(adapter); + i40evf_napi_disable_all(adapter); + netif_tx_disable(netdev); + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + i40evf_free_traffic_irqs(adapter); + i40evf_free_all_tx_resources(adapter); + i40evf_free_all_rx_resources(adapter); + } + + /* Delete all of the filters, both MAC and VLAN. */ + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, + list) { + list_del(&f->list); + kfree(f); + } + list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, + list) { + list_del(&fv->list); + kfree(fv); + } + + i40evf_free_misc_irq(adapter); + i40evf_reset_interrupt_capability(adapter); + i40evf_free_queues(adapter); + i40evf_free_q_vectors(adapter); + kfree(adapter->vf_res); + i40evf_shutdown_adminq(hw); + adapter->netdev->flags &= ~IFF_UP; + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + return; /* Do not attempt to reinit. It's dead, Jim. */ + } + +continue_reset: + adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + + i40evf_irq_disable(adapter); + + if (netif_running(adapter->netdev)) { + i40evf_napi_disable_all(adapter); + netif_tx_disable(netdev); + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } + + adapter->state = __I40EVF_RESETTING; + + /* kill and reinit the admin queue */ + if (i40evf_shutdown_adminq(hw)) + dev_warn(&adapter->pdev->dev, "Failed to shut down adminq\n"); + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + err = i40evf_init_adminq(hw); + if (err) + dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", + err); + + i40evf_map_queues(adapter); + + /* re-add all MAC filters */ + list_for_each_entry(f, &adapter->mac_filter_list, list) { + f->add = true; + } + /* re-add all VLAN filters */ + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + f->add = true; + } + adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + + mod_timer(&adapter->watchdog_timer, jiffies + 2); + + if (netif_running(adapter->netdev)) { + /* allocate transmit descriptors */ + err = i40evf_setup_all_tx_resources(adapter); + if (err) + goto reset_err; + + /* allocate receive descriptors */ + err = i40evf_setup_all_rx_resources(adapter); + if (err) + goto reset_err; + + i40evf_configure(adapter); + + err = i40evf_up_complete(adapter); + if (err) + goto reset_err; + + i40evf_irq_enable(adapter, true); + } + return; +reset_err: + dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); + i40evf_close(adapter->netdev); +} + +/** + * i40evf_adminq_task - worker thread to clean the admin queue + * @work: pointer to work_struct containing our data + **/ +static void i40evf_adminq_task(struct work_struct *work) +{ + struct i40evf_adapter *adapter = + container_of(work, struct i40evf_adapter, adminq_task); + struct i40e_hw *hw = &adapter->hw; + struct i40e_arq_event_info event; + struct i40e_virtchnl_msg *v_msg; + i40e_status ret; + u32 val, oldval; + u16 pending; + + if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) + goto out; + + event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) + goto out; + + v_msg = (struct i40e_virtchnl_msg *)&event.desc; + do { + ret = i40evf_clean_arq_element(hw, &event, &pending); + if (ret || !v_msg->v_opcode) + break; /* No event to process or error cleaning ARQ */ + + i40evf_virtchnl_completion(adapter, v_msg->v_opcode, + v_msg->v_retval, event.msg_buf, + event.msg_len); + if (pending != 0) + memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); + } while (pending); + + /* check for error indications */ + val = rd32(hw, hw->aq.arq.len); + oldval = val; + if (val & I40E_VF_ARQLEN_ARQVFE_MASK) { + dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); + val &= ~I40E_VF_ARQLEN_ARQVFE_MASK; + } + if (val & I40E_VF_ARQLEN_ARQOVFL_MASK) { + dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n"); + val &= ~I40E_VF_ARQLEN_ARQOVFL_MASK; + } + if (val & I40E_VF_ARQLEN_ARQCRIT_MASK) { + dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n"); + val &= ~I40E_VF_ARQLEN_ARQCRIT_MASK; + } + if (oldval != val) + wr32(hw, hw->aq.arq.len, val); + + val = rd32(hw, hw->aq.asq.len); + oldval = val; + if (val & I40E_VF_ATQLEN_ATQVFE_MASK) { + dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n"); + val &= ~I40E_VF_ATQLEN_ATQVFE_MASK; + } + if (val & I40E_VF_ATQLEN_ATQOVFL_MASK) { + dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n"); + val &= ~I40E_VF_ATQLEN_ATQOVFL_MASK; + } + if (val & I40E_VF_ATQLEN_ATQCRIT_MASK) { + dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n"); + val &= ~I40E_VF_ATQLEN_ATQCRIT_MASK; + } + if (oldval != val) + wr32(hw, hw->aq.asq.len, val); + + kfree(event.msg_buf); +out: + /* re-enable Admin queue interrupt cause */ + i40evf_misc_irq_enable(adapter); +} + +/** + * i40evf_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->tx_rings[i]->desc) + i40evf_free_tx_resources(adapter->tx_rings[i]); +} + +/** + * i40evf_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->tx_rings[i]->count = adapter->tx_desc_count; + err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]); + if (!err) + continue; + dev_err(&adapter->pdev->dev, + "%s: Allocation for Tx Queue %u failed\n", + __func__, i); + break; + } + + return err; +} + +/** + * i40evf_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->rx_rings[i]->count = adapter->rx_desc_count; + err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]); + if (!err) + continue; + dev_err(&adapter->pdev->dev, + "%s: Allocation for Rx Queue %u failed\n", + __func__, i); + break; + } + return err; +} + +/** + * i40evf_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->rx_rings[i]->desc) + i40evf_free_rx_resources(adapter->rx_rings[i]); +} + +/** + * i40evf_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int i40evf_open(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + int err; + + if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { + dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); + return -EIO; + } + if (adapter->state != __I40EVF_DOWN || adapter->aq_required) + return -EBUSY; + + /* allocate transmit descriptors */ + err = i40evf_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = i40evf_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + /* clear any pending interrupts, may auto mask */ + err = i40evf_request_traffic_irqs(adapter, netdev->name); + if (err) + goto err_req_irq; + + i40evf_configure(adapter); + + err = i40evf_up_complete(adapter); + if (err) + goto err_req_irq; + + i40evf_irq_enable(adapter, true); + + return 0; + +err_req_irq: + i40evf_down(adapter); + i40evf_free_traffic_irqs(adapter); +err_setup_rx: + i40evf_free_all_rx_resources(adapter); +err_setup_tx: + i40evf_free_all_tx_resources(adapter); + + return err; +} + +/** + * i40evf_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) + * are freed, along with all transmit and receive resources. + **/ +static int i40evf_close(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + if (adapter->state <= __I40EVF_DOWN) + return 0; + + + set_bit(__I40E_DOWN, &adapter->vsi.state); + + i40evf_down(adapter); + adapter->state = __I40EVF_DOWN; + i40evf_free_traffic_irqs(adapter); + + return 0; +} + +/** + * i40evf_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ +static struct net_device_stats *i40evf_get_stats(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + /* only return the current stats */ + return &adapter->net_stats; +} + +/** + * i40evf_reinit_locked - Software reinit + * @adapter: board private structure + * + * Reinititalizes the ring structures in response to a software configuration + * change. Roughly the same as close followed by open, but skips releasing + * and reallocating the interrupts. + **/ +void i40evf_reinit_locked(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + WARN_ON(in_interrupt()); + + i40evf_down(adapter); + + /* allocate transmit descriptors */ + err = i40evf_setup_all_tx_resources(adapter); + if (err) + goto err_reinit; + + /* allocate receive descriptors */ + err = i40evf_setup_all_rx_resources(adapter); + if (err) + goto err_reinit; + + i40evf_configure(adapter); + + err = i40evf_up_complete(adapter); + if (err) + goto err_reinit; + + i40evf_irq_enable(adapter, true); + return; + +err_reinit: + dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); + i40evf_close(netdev); +} + +/** + * i40evf_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int i40evf_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) + return -EINVAL; + + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + i40evf_reinit_locked(adapter); + return 0; +} + +static const struct net_device_ops i40evf_netdev_ops = { + .ndo_open = i40evf_open, + .ndo_stop = i40evf_close, + .ndo_start_xmit = i40evf_xmit_frame, + .ndo_get_stats = i40evf_get_stats, + .ndo_set_rx_mode = i40evf_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = i40evf_set_mac, + .ndo_change_mtu = i40evf_change_mtu, + .ndo_tx_timeout = i40evf_tx_timeout, + .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid, +}; + +/** + * i40evf_check_reset_complete - check that VF reset is complete + * @hw: pointer to hw struct + * + * Returns 0 if device is ready to use, or -EBUSY if it's in reset. + **/ +static int i40evf_check_reset_complete(struct i40e_hw *hw) +{ + u32 rstat; + int i; + + for (i = 0; i < 100; i++) { + rstat = rd32(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + if ((rstat == I40E_VFR_VFACTIVE) || + (rstat == I40E_VFR_COMPLETED)) + return 0; + usleep_range(10, 20); + } + return -EBUSY; +} + +/** + * i40evf_init_task - worker thread to perform delayed initialization + * @work: pointer to work_struct containing our data + * + * This task completes the work that was begun in probe. Due to the nature + * of VF-PF communications, we may need to wait tens of milliseconds to get + * responses back from the PF. Rather than busy-wait in probe and bog down the + * whole system, we'll do it in a task so we can sleep. + * This task only runs during driver init. Once we've established + * communications with the PF driver and set up our netdev, the watchdog + * takes over. + **/ +static void i40evf_init_task(struct work_struct *work) +{ + struct i40evf_adapter *adapter = container_of(work, + struct i40evf_adapter, + init_task.work); + struct net_device *netdev = adapter->netdev; + struct i40evf_mac_filter *f; + struct i40e_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int i, err, bufsz; + + switch (adapter->state) { + case __I40EVF_STARTUP: + /* driver loaded, probe complete */ + adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED; + adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + err = i40e_set_mac_type(hw); + if (err) { + dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", + err); + goto err; + } + err = i40evf_check_reset_complete(hw); + if (err) { + dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", + err); + goto err; + } + hw->aq.num_arq_entries = I40EVF_AQ_LEN; + hw->aq.num_asq_entries = I40EVF_AQ_LEN; + hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE; + hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE; + + err = i40evf_init_adminq(hw); + if (err) { + dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", + err); + goto err; + } + err = i40evf_send_api_ver(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); + i40evf_shutdown_adminq(hw); + goto err; + } + adapter->state = __I40EVF_INIT_VERSION_CHECK; + goto restart; + case __I40EVF_INIT_VERSION_CHECK: + if (!i40evf_asq_done(hw)) { + dev_err(&pdev->dev, "Admin queue command never completed\n"); + i40evf_shutdown_adminq(hw); + adapter->state = __I40EVF_STARTUP; + goto err; + } + + /* aq msg sent, awaiting reply */ + err = i40evf_verify_api_ver(adapter); + if (err) { + if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) + err = i40evf_send_api_ver(adapter); + goto err; + } + err = i40evf_send_vf_config_msg(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to send config request (%d)\n", + err); + goto err; + } + adapter->state = __I40EVF_INIT_GET_RESOURCES; + goto restart; + case __I40EVF_INIT_GET_RESOURCES: + /* aq msg sent, awaiting reply */ + if (!adapter->vf_res) { + bufsz = sizeof(struct i40e_virtchnl_vf_resource) + + (I40E_MAX_VF_VSI * + sizeof(struct i40e_virtchnl_vsi_resource)); + adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); + if (!adapter->vf_res) + goto err; + } + err = i40evf_get_vf_config(adapter); + if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { + err = i40evf_send_vf_config_msg(adapter); + goto err; + } + if (err) { + dev_err(&pdev->dev, "Unable to get VF config (%d)\n", + err); + goto err_alloc; + } + adapter->state = __I40EVF_INIT_SW; + break; + default: + goto err_alloc; + } + /* got VF config message back from PF, now we can parse it */ + for (i = 0; i < adapter->vf_res->num_vsis; i++) { + if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) + adapter->vsi_res = &adapter->vf_res->vsi_res[i]; + } + if (!adapter->vsi_res) { + dev_err(&pdev->dev, "No LAN VSI found\n"); + goto err_alloc; + } + + adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; + + netdev->netdev_ops = &i40evf_netdev_ops; + i40evf_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netdev->features |= NETIF_F_HIGHDMA | + NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_SCTP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM | + NETIF_F_GRO; + + if (adapter->vf_res->vf_offload_flags + & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) { + netdev->vlan_features = netdev->features; + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + } + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features; + netdev->hw_features &= ~NETIF_F_RXCSUM; + + if (!is_valid_ether_addr(adapter->hw.mac.addr)) { + dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", + adapter->hw.mac.addr); + random_ether_addr(adapter->hw.mac.addr); + } + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); + + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto err_sw_init; + + ether_addr_copy(f->macaddr, adapter->hw.mac.addr); + f->add = true; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; + + list_add(&f->list, &adapter->mac_filter_list); + + init_timer(&adapter->watchdog_timer); + adapter->watchdog_timer.function = &i40evf_watchdog_timer; + adapter->watchdog_timer.data = (unsigned long)adapter; + mod_timer(&adapter->watchdog_timer, jiffies + 1); + + adapter->num_active_queues = min_t(int, + adapter->vsi_res->num_queue_pairs, + (int)(num_online_cpus())); + adapter->tx_desc_count = I40EVF_DEFAULT_TXD; + adapter->rx_desc_count = I40EVF_DEFAULT_RXD; + err = i40evf_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + i40evf_map_rings_to_vectors(adapter); + i40evf_configure_rss(adapter); + err = i40evf_request_misc_irq(adapter); + if (err) + goto err_sw_init; + + netif_carrier_off(netdev); + + adapter->vsi.id = adapter->vsi_res->vsi_id; + adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */ + adapter->vsi.back = adapter; + adapter->vsi.base_vector = 1; + adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK; + adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC | + ITR_REG_TO_USEC(I40E_ITR_RX_DEF)); + adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC | + ITR_REG_TO_USEC(I40E_ITR_TX_DEF)); + adapter->vsi.netdev = adapter->netdev; + + if (!adapter->netdev_registered) { + err = register_netdev(netdev); + if (err) + goto err_register; + } + + adapter->netdev_registered = true; + + netif_tx_stop_all_queues(netdev); + + dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr); + if (netdev->features & NETIF_F_GRO) + dev_info(&pdev->dev, "GRO is enabled\n"); + + dev_info(&pdev->dev, "%s\n", i40evf_driver_string); + adapter->state = __I40EVF_DOWN; + set_bit(__I40E_DOWN, &adapter->vsi.state); + i40evf_misc_irq_enable(adapter); + return; +restart: + schedule_delayed_work(&adapter->init_task, + msecs_to_jiffies(50)); + return; + +err_register: + i40evf_free_misc_irq(adapter); +err_sw_init: + i40evf_reset_interrupt_capability(adapter); +err_alloc: + kfree(adapter->vf_res); + adapter->vf_res = NULL; +err: + /* Things went into the weeds, so try again later */ + if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) { + dev_err(&pdev->dev, "Failed to communicate with PF; giving up\n"); + adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; + return; /* do not reschedule */ + } + schedule_delayed_work(&adapter->init_task, HZ * 3); +} + +/** + * i40evf_shutdown - Shutdown the device in preparation for a reboot + * @pdev: pci device structure + **/ +static void i40evf_shutdown(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct i40evf_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (netif_running(netdev)) + i40evf_close(netdev); + + /* Prevent the watchdog from running. */ + adapter->state = __I40EVF_REMOVE; + adapter->aq_required = 0; + +#ifdef CONFIG_PM + pci_save_state(pdev); + +#endif + pci_disable_device(pdev); +} + +/** + * i40evf_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in i40evf_pci_tbl + * + * Returns 0 on success, negative on failure + * + * i40evf_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct i40evf_adapter *adapter = NULL; + struct i40e_hw *hw = NULL; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + err = pci_request_regions(pdev, i40evf_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), + MAX_TX_QUEUES); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + + adapter->netdev = netdev; + adapter->pdev = pdev; + + hw = &adapter->hw; + hw->back = adapter; + + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + adapter->state = __I40EVF_STARTUP; + + /* Call save state here because it relies on the adapter struct. */ + pci_save_state(pdev); + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!hw->hw_addr) { + err = -EIO; + goto err_ioremap; + } + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + hw->bus.device = PCI_SLOT(pdev->devfn); + hw->bus.func = PCI_FUNC(pdev->devfn); + + INIT_LIST_HEAD(&adapter->mac_filter_list); + INIT_LIST_HEAD(&adapter->vlan_filter_list); + + INIT_WORK(&adapter->reset_task, i40evf_reset_task); + INIT_WORK(&adapter->adminq_task, i40evf_adminq_task); + INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task); + INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task); + schedule_delayed_work(&adapter->init_task, 10); + + return 0; + +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +#ifdef CONFIG_PM +/** + * i40evf_suspend - Power management suspend routine + * @pdev: PCI device information struct + * @state: unused + * + * Called when the system (VM) is entering sleep/suspend. + **/ +static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct i40evf_adapter *adapter = netdev_priv(netdev); + int retval = 0; + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + rtnl_lock(); + i40evf_down(adapter); + rtnl_unlock(); + } + i40evf_free_misc_irq(adapter); + i40evf_reset_interrupt_capability(adapter); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + pci_disable_device(pdev); + + return 0; +} + +/** + * i40evf_resume - Power management resume routine + * @pdev: PCI device information struct + * + * Called when the system (VM) is resumed from sleep/suspend. + **/ +static int i40evf_resume(struct pci_dev *pdev) +{ + struct i40evf_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n"); + return err; + } + pci_set_master(pdev); + + rtnl_lock(); + err = i40evf_set_interrupt_capability(adapter); + if (err) { + dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); + return err; + } + err = i40evf_request_misc_irq(adapter); + rtnl_unlock(); + if (err) { + dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); + return err; + } + + schedule_work(&adapter->reset_task); + + netif_device_attach(netdev); + + return err; +} + +#endif /* CONFIG_PM */ +/** + * i40evf_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * i40evf_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void i40evf_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40evf_mac_filter *f, *ftmp; + struct i40e_hw *hw = &adapter->hw; + + cancel_delayed_work_sync(&adapter->init_task); + cancel_work_sync(&adapter->reset_task); + + if (adapter->netdev_registered) { + unregister_netdev(netdev); + adapter->netdev_registered = false; + } + + /* Shut down all the garbage mashers on the detention level */ + adapter->state = __I40EVF_REMOVE; + adapter->aq_required = 0; + i40evf_request_reset(adapter); + msleep(20); + /* If the FW isn't responding, kick it once, but only once. */ + if (!i40evf_asq_done(hw)) { + i40evf_request_reset(adapter); + msleep(20); + } + + if (adapter->msix_entries) { + i40evf_misc_irq_disable(adapter); + i40evf_free_misc_irq(adapter); + i40evf_reset_interrupt_capability(adapter); + i40evf_free_q_vectors(adapter); + } + + if (adapter->watchdog_timer.function) + del_timer_sync(&adapter->watchdog_timer); + + flush_scheduled_work(); + + if (hw->aq.asq.count) + i40evf_shutdown_adminq(hw); + + iounmap(hw->hw_addr); + pci_release_regions(pdev); + + i40evf_free_all_tx_resources(adapter); + i40evf_free_all_rx_resources(adapter); + i40evf_free_queues(adapter); + kfree(adapter->vf_res); + /* If we got removed before an up/down sequence, we've got a filter + * hanging out there that we need to get rid of. + */ + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + list_del(&f->list); + kfree(f); + } + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + list_del(&f->list); + kfree(f); + } + + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +static struct pci_driver i40evf_driver = { + .name = i40evf_driver_name, + .id_table = i40evf_pci_tbl, + .probe = i40evf_probe, + .remove = i40evf_remove, +#ifdef CONFIG_PM + .suspend = i40evf_suspend, + .resume = i40evf_resume, +#endif + .shutdown = i40evf_shutdown, +}; + +/** + * i40e_init_module - Driver Registration Routine + * + * i40e_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init i40evf_init_module(void) +{ + int ret; + + pr_info("i40evf: %s - version %s\n", i40evf_driver_string, + i40evf_driver_version); + + pr_info("%s\n", i40evf_copyright); + + ret = pci_register_driver(&i40evf_driver); + return ret; +} + +module_init(i40evf_init_module); + +/** + * i40e_exit_module - Driver Exit Cleanup Routine + * + * i40e_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit i40evf_exit_module(void) +{ + pci_unregister_driver(&i40evf_driver); +} + +module_exit(i40evf_exit_module); + +/* i40evf_main.c */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c new file mode 100644 index 000000000..61e090558 --- /dev/null +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -0,0 +1,759 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40evf.h" +#include "i40e_prototype.h" + +/* busy wait delay in msec */ +#define I40EVF_BUSY_WAIT_DELAY 10 +#define I40EVF_BUSY_WAIT_COUNT 50 + +/** + * i40evf_send_pf_msg + * @adapter: adapter structure + * @op: virtual channel opcode + * @msg: pointer to message buffer + * @len: message length + * + * Send message to PF and print status if failure. + **/ +static int i40evf_send_pf_msg(struct i40evf_adapter *adapter, + enum i40e_virtchnl_ops op, u8 *msg, u16 len) +{ + struct i40e_hw *hw = &adapter->hw; + i40e_status err; + + if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) + return 0; /* nothing to see here, move along */ + + err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); + if (err) + dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n", + op, err, hw->aq.asq_last_status); + return err; +} + +/** + * i40evf_send_api_ver + * @adapter: adapter structure + * + * Send API version admin queue message to the PF. The reply is not checked + * in this function. Returns 0 if the message was successfully + * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. + **/ +int i40evf_send_api_ver(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_version_info vvi; + + vvi.major = I40E_VIRTCHNL_VERSION_MAJOR; + vvi.minor = I40E_VIRTCHNL_VERSION_MINOR; + + return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_VERSION, (u8 *)&vvi, + sizeof(vvi)); +} + +/** + * i40evf_verify_api_ver + * @adapter: adapter structure + * + * Compare API versions with the PF. Must be called after admin queue is + * initialized. Returns 0 if API versions match, -EIO if they do not, + * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors + * from the firmware are propagated. + **/ +int i40evf_verify_api_ver(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_version_info *pf_vvi; + struct i40e_hw *hw = &adapter->hw; + struct i40e_arq_event_info event; + enum i40e_virtchnl_ops op; + i40e_status err; + + event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) { + err = -ENOMEM; + goto out; + } + + while (1) { + err = i40evf_clean_arq_element(hw, &event, NULL); + /* When the AQ is empty, i40evf_clean_arq_element will return + * nonzero and this loop will terminate. + */ + if (err) + goto out_alloc; + op = + (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high); + if (op == I40E_VIRTCHNL_OP_VERSION) + break; + } + + + err = (i40e_status)le32_to_cpu(event.desc.cookie_low); + if (err) + goto out_alloc; + + if (op != I40E_VIRTCHNL_OP_VERSION) { + dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n", + op); + err = -EIO; + goto out_alloc; + } + + pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf; + if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) || + (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR)) + err = -EIO; + +out_alloc: + kfree(event.msg_buf); +out: + return err; +} + +/** + * i40evf_send_vf_config_msg + * @adapter: adapter structure + * + * Send VF configuration request admin queue message to the PF. The reply + * is not checked in this function. Returns 0 if the message was + * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. + **/ +int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) +{ + return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + NULL, 0); +} + +/** + * i40evf_get_vf_config + * @hw: pointer to the hardware structure + * @len: length of buffer + * + * Get VF configuration from PF and populate hw structure. Must be called after + * admin queue is initialized. Busy waits until response is received from PF, + * with maximum timeout. Response from PF is returned in the buffer for further + * processing by the caller. + **/ +int i40evf_get_vf_config(struct i40evf_adapter *adapter) +{ + struct i40e_hw *hw = &adapter->hw; + struct i40e_arq_event_info event; + enum i40e_virtchnl_ops op; + i40e_status err; + u16 len; + + len = sizeof(struct i40e_virtchnl_vf_resource) + + I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource); + event.buf_len = len; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) { + err = -ENOMEM; + goto out; + } + + while (1) { + /* When the AQ is empty, i40evf_clean_arq_element will return + * nonzero and this loop will terminate. + */ + err = i40evf_clean_arq_element(hw, &event, NULL); + if (err) + goto out_alloc; + op = + (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high); + if (op == I40E_VIRTCHNL_OP_GET_VF_RESOURCES) + break; + } + + err = (i40e_status)le32_to_cpu(event.desc.cookie_low); + memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); + + i40e_vf_parse_hw_config(hw, adapter->vf_res); +out_alloc: + kfree(event.msg_buf); +out: + return err; +} + +/** + * i40evf_configure_queues + * @adapter: adapter structure + * + * Request that the PF set up our (previously allocated) queues. + **/ +void i40evf_configure_queues(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_vsi_queue_config_info *vqci; + struct i40e_virtchnl_queue_pair_info *vqpi; + int pairs = adapter->num_active_queues; + int i, len; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "%s: command %d pending\n", + __func__, adapter->current_op); + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES; + len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + + (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); + vqci = kzalloc(len, GFP_ATOMIC); + if (!vqci) + return; + + vqci->vsi_id = adapter->vsi_res->vsi_id; + vqci->num_queue_pairs = pairs; + vqpi = vqci->qpair; + /* Size check is not needed here - HW max is 16 queue pairs, and we + * can fit info for 31 of them into the AQ buffer before it overflows. + */ + for (i = 0; i < pairs; i++) { + vqpi->txq.vsi_id = vqci->vsi_id; + vqpi->txq.queue_id = i; + vqpi->txq.ring_len = adapter->tx_rings[i]->count; + vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma; + vqpi->txq.headwb_enabled = 1; + vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr + + (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc)); + + vqpi->rxq.vsi_id = vqci->vsi_id; + vqpi->rxq.queue_id = i; + vqpi->rxq.ring_len = adapter->rx_rings[i]->count; + vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma; + vqpi->rxq.max_pkt_size = adapter->netdev->mtu + + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; + vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len; + vqpi++; + } + + adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, + (u8 *)vqci, len); + kfree(vqci); +} + +/** + * i40evf_enable_queues + * @adapter: adapter structure + * + * Request that the PF enable all of our queues. + **/ +void i40evf_enable_queues(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_queue_select vqs; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "%s: command %d pending\n", + __func__, adapter->current_op); + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES; + vqs.vsi_id = adapter->vsi_res->vsi_id; + vqs.tx_queues = (1 << adapter->num_active_queues) - 1; + vqs.rx_queues = vqs.tx_queues; + adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES, + (u8 *)&vqs, sizeof(vqs)); +} + +/** + * i40evf_disable_queues + * @adapter: adapter structure + * + * Request that the PF disable all of our queues. + **/ +void i40evf_disable_queues(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_queue_select vqs; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "%s: command %d pending\n", + __func__, adapter->current_op); + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES; + vqs.vsi_id = adapter->vsi_res->vsi_id; + vqs.tx_queues = (1 << adapter->num_active_queues) - 1; + vqs.rx_queues = vqs.tx_queues; + adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES, + (u8 *)&vqs, sizeof(vqs)); +} + +/** + * i40evf_map_queues + * @adapter: adapter structure + * + * Request that the PF map queues to interrupt vectors. Misc causes, including + * admin queue, are always mapped to vector 0. + **/ +void i40evf_map_queues(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_irq_map_info *vimi; + int v_idx, q_vectors, len; + struct i40e_q_vector *q_vector; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "%s: command %d pending\n", + __func__, adapter->current_op); + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP; + + q_vectors = adapter->num_msix_vectors - NONQ_VECS; + + len = sizeof(struct i40e_virtchnl_irq_map_info) + + (adapter->num_msix_vectors * + sizeof(struct i40e_virtchnl_vector_map)); + vimi = kzalloc(len, GFP_ATOMIC); + if (!vimi) + return; + + vimi->num_vectors = adapter->num_msix_vectors; + /* Queue vectors first */ + for (v_idx = 0; v_idx < q_vectors; v_idx++) { + q_vector = adapter->q_vector[v_idx]; + vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id; + vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS; + vimi->vecmap[v_idx].txq_map = q_vector->ring_mask; + vimi->vecmap[v_idx].rxq_map = q_vector->ring_mask; + } + /* Misc vector last - this is only for AdminQ messages */ + vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id; + vimi->vecmap[v_idx].vector_id = 0; + vimi->vecmap[v_idx].txq_map = 0; + vimi->vecmap[v_idx].rxq_map = 0; + + adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, + (u8 *)vimi, len); + kfree(vimi); +} + +/** + * i40evf_add_ether_addrs + * @adapter: adapter structure + * @addrs: the MAC address filters to add (contiguous) + * @count: number of filters + * + * Request that the PF add one or more addresses to our filters. + **/ +void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_ether_addr_list *veal; + int len, i = 0, count = 0; + struct i40evf_mac_filter *f; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "%s: command %d pending\n", + __func__, adapter->current_op); + return; + } + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->add) + count++; + } + if (!count) { + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS; + + len = sizeof(struct i40e_virtchnl_ether_addr_list) + + (count * sizeof(struct i40e_virtchnl_ether_addr)); + if (len > I40EVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n", + __func__); + count = (I40EVF_MAX_AQ_BUF_SIZE - + sizeof(struct i40e_virtchnl_ether_addr_list)) / + sizeof(struct i40e_virtchnl_ether_addr); + len = I40EVF_MAX_AQ_BUF_SIZE; + } + + veal = kzalloc(len, GFP_ATOMIC); + if (!veal) + return; + + veal->vsi_id = adapter->vsi_res->vsi_id; + veal->num_elements = count; + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->add) { + ether_addr_copy(veal->list[i].addr, f->macaddr); + i++; + f->add = false; + } + } + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, + (u8 *)veal, len); + kfree(veal); +} + +/** + * i40evf_del_ether_addrs + * @adapter: adapter structure + * @addrs: the MAC address filters to remove (contiguous) + * @count: number of filtes + * + * Request that the PF remove one or more addresses from our filters. + **/ +void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_ether_addr_list *veal; + struct i40evf_mac_filter *f, *ftmp; + int len, i = 0, count = 0; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "%s: command %d pending\n", + __func__, adapter->current_op); + return; + } + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->remove) + count++; + } + if (!count) { + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS; + + len = sizeof(struct i40e_virtchnl_ether_addr_list) + + (count * sizeof(struct i40e_virtchnl_ether_addr)); + if (len > I40EVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n", + __func__); + count = (I40EVF_MAX_AQ_BUF_SIZE - + sizeof(struct i40e_virtchnl_ether_addr_list)) / + sizeof(struct i40e_virtchnl_ether_addr); + len = I40EVF_MAX_AQ_BUF_SIZE; + } + veal = kzalloc(len, GFP_ATOMIC); + if (!veal) + return; + + veal->vsi_id = adapter->vsi_res->vsi_id; + veal->num_elements = count; + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + if (f->remove) { + ether_addr_copy(veal->list[i].addr, f->macaddr); + i++; + list_del(&f->list); + kfree(f); + } + } + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, + (u8 *)veal, len); + kfree(veal); +} + +/** + * i40evf_add_vlans + * @adapter: adapter structure + * @vlans: the VLANs to add + * @count: number of VLANs + * + * Request that the PF add one or more VLAN filters to our VSI. + **/ +void i40evf_add_vlans(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_vlan_filter_list *vvfl; + int len, i = 0, count = 0; + struct i40evf_vlan_filter *f; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "%s: command %d pending\n", + __func__, adapter->current_op); + return; + } + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->add) + count++; + } + if (!count) { + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_ADD_VLAN; + + len = sizeof(struct i40e_virtchnl_vlan_filter_list) + + (count * sizeof(u16)); + if (len > I40EVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n", + __func__); + count = (I40EVF_MAX_AQ_BUF_SIZE - + sizeof(struct i40e_virtchnl_vlan_filter_list)) / + sizeof(u16); + len = I40EVF_MAX_AQ_BUF_SIZE; + } + vvfl = kzalloc(len, GFP_ATOMIC); + if (!vvfl) + return; + + vvfl->vsi_id = adapter->vsi_res->vsi_id; + vvfl->num_elements = count; + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->add) { + vvfl->vlan_id[i] = f->vlan; + i++; + f->add = false; + } + } + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); + kfree(vvfl); +} + +/** + * i40evf_del_vlans + * @adapter: adapter structure + * @vlans: the VLANs to remove + * @count: number of VLANs + * + * Request that the PF remove one or more VLAN filters from our VSI. + **/ +void i40evf_del_vlans(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_vlan_filter_list *vvfl; + struct i40evf_vlan_filter *f, *ftmp; + int len, i = 0, count = 0; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "%s: command %d pending\n", + __func__, adapter->current_op); + return; + } + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->remove) + count++; + } + if (!count) { + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_DEL_VLAN; + + len = sizeof(struct i40e_virtchnl_vlan_filter_list) + + (count * sizeof(u16)); + if (len > I40EVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n", + __func__); + count = (I40EVF_MAX_AQ_BUF_SIZE - + sizeof(struct i40e_virtchnl_vlan_filter_list)) / + sizeof(u16); + len = I40EVF_MAX_AQ_BUF_SIZE; + } + vvfl = kzalloc(len, GFP_ATOMIC); + if (!vvfl) + return; + + vvfl->vsi_id = adapter->vsi_res->vsi_id; + vvfl->num_elements = count; + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + if (f->remove) { + vvfl->vlan_id[i] = f->vlan; + i++; + list_del(&f->list); + kfree(f); + } + } + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); + kfree(vvfl); +} + +/** + * i40evf_set_promiscuous + * @adapter: adapter structure + * @flags: bitmask to control unicast/multicast promiscuous. + * + * Request that the PF enable promiscuous mode for our VSI. + **/ +void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) +{ + struct i40e_virtchnl_promisc_info vpi; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "%s: command %d pending\n", + __func__, adapter->current_op); + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; + vpi.vsi_id = adapter->vsi_res->vsi_id; + vpi.flags = flags; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, + (u8 *)&vpi, sizeof(vpi)); +} + +/** + * i40evf_request_stats + * @adapter: adapter structure + * + * Request VSI statistics from PF. + **/ +void i40evf_request_stats(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_queue_select vqs; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* no error message, this isn't crucial */ + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_GET_STATS; + vqs.vsi_id = adapter->vsi_res->vsi_id; + /* queue maps are ignored for this message - only the vsi is used */ + if (i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_STATS, + (u8 *)&vqs, sizeof(vqs))) + /* if the request failed, don't lock out others */ + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; +} +/** + * i40evf_request_reset + * @adapter: adapter structure + * + * Request that the PF reset this VF. No response is expected. + **/ +void i40evf_request_reset(struct i40evf_adapter *adapter) +{ + /* Don't check CURRENT_OP - this is always higher priority */ + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0); + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; +} + +/** + * i40evf_virtchnl_completion + * @adapter: adapter structure + * @v_opcode: opcode sent by PF + * @v_retval: retval sent by PF + * @msg: message sent by PF + * @msglen: message length + * + * Asynchronous completion function for admin queue messages. Rather than busy + * wait, we fire off our requests and assume that no errors will be returned. + * This function handles the reply messages. + **/ +void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, + enum i40e_virtchnl_ops v_opcode, + i40e_status v_retval, + u8 *msg, u16 msglen) +{ + struct net_device *netdev = adapter->netdev; + + if (v_opcode == I40E_VIRTCHNL_OP_EVENT) { + struct i40e_virtchnl_pf_event *vpe = + (struct i40e_virtchnl_pf_event *)msg; + switch (vpe->event) { + case I40E_VIRTCHNL_EVENT_LINK_CHANGE: + adapter->link_up = + vpe->event_data.link_event.link_status; + if (adapter->link_up && !netif_carrier_ok(netdev)) { + dev_info(&adapter->pdev->dev, "NIC Link is Up\n"); + netif_carrier_on(netdev); + netif_tx_wake_all_queues(netdev); + } else if (!adapter->link_up) { + dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } + break; + case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: + dev_info(&adapter->pdev->dev, "PF reset warning received\n"); + if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { + adapter->flags |= I40EVF_FLAG_RESET_PENDING; + dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); + schedule_work(&adapter->reset_task); + } + break; + default: + dev_err(&adapter->pdev->dev, + "%s: Unknown event %d from pf\n", + __func__, vpe->event); + break; + } + return; + } + if (v_retval) { + dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n", + __func__, v_retval, v_opcode); + } + switch (v_opcode) { + case I40E_VIRTCHNL_OP_GET_STATS: { + struct i40e_eth_stats *stats = + (struct i40e_eth_stats *)msg; + adapter->net_stats.rx_packets = stats->rx_unicast + + stats->rx_multicast + + stats->rx_broadcast; + adapter->net_stats.tx_packets = stats->tx_unicast + + stats->tx_multicast + + stats->tx_broadcast; + adapter->net_stats.rx_bytes = stats->rx_bytes; + adapter->net_stats.tx_bytes = stats->tx_bytes; + adapter->net_stats.tx_errors = stats->tx_errors; + adapter->net_stats.rx_dropped = stats->rx_discards; + adapter->net_stats.tx_dropped = stats->tx_discards; + adapter->current_stats = *stats; + } + break; + case I40E_VIRTCHNL_OP_ENABLE_QUEUES: + /* enable transmits */ + i40evf_irq_enable(adapter, true); + netif_tx_start_all_queues(adapter->netdev); + netif_carrier_on(adapter->netdev); + break; + case I40E_VIRTCHNL_OP_DISABLE_QUEUES: + i40evf_free_all_tx_resources(adapter); + i40evf_free_all_rx_resources(adapter); + break; + case I40E_VIRTCHNL_OP_VERSION: + case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: + case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: + /* Don't display an error if we get these out of sequence. + * If the firmware needed to get kicked, we'll get these and + * it's no problem. + */ + if (v_opcode != adapter->current_op) + return; + break; + default: + if (v_opcode != adapter->current_op) + dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", + adapter->current_op, v_opcode); + break; + } /* switch v_opcode */ + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; +} diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile new file mode 100644 index 000000000..5bcb2de75 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/Makefile @@ -0,0 +1,36 @@ +################################################################################ +# +# Intel 82575 PCI-Express Ethernet Linux driver +# Copyright(c) 1999 - 2014 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# Linux NICS +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) 82575 PCI-Express ethernet driver +# + +obj-$(CONFIG_IGB) += igb.o + +igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \ + e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \ + e1000_i210.o igb_ptp.o igb_hwmon.o diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c new file mode 100644 index 000000000..0f69ef817 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -0,0 +1,2884 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* e1000_82575 + * e1000_82576 + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include + +#include "e1000_mac.h" +#include "e1000_82575.h" +#include "e1000_i210.h" + +static s32 igb_get_invariants_82575(struct e1000_hw *); +static s32 igb_acquire_phy_82575(struct e1000_hw *); +static void igb_release_phy_82575(struct e1000_hw *); +static s32 igb_acquire_nvm_82575(struct e1000_hw *); +static void igb_release_nvm_82575(struct e1000_hw *); +static s32 igb_check_for_link_82575(struct e1000_hw *); +static s32 igb_get_cfg_done_82575(struct e1000_hw *); +static s32 igb_init_hw_82575(struct e1000_hw *); +static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); +static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); +static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); +static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); +static s32 igb_reset_hw_82575(struct e1000_hw *); +static s32 igb_reset_hw_82580(struct e1000_hw *); +static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); +static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool); +static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool); +static s32 igb_setup_copper_link_82575(struct e1000_hw *); +static s32 igb_setup_serdes_link_82575(struct e1000_hw *); +static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); +static void igb_clear_hw_cntrs_82575(struct e1000_hw *); +static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); +static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, + u16 *); +static s32 igb_get_phy_id_82575(struct e1000_hw *); +static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); +static bool igb_sgmii_active_82575(struct e1000_hw *); +static s32 igb_reset_init_script_82575(struct e1000_hw *); +static s32 igb_read_mac_addr_82575(struct e1000_hw *); +static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); +static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); +static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); +static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); +static const u16 e1000_82580_rxpbs_table[] = { + 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; + +/** + * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO + * @hw: pointer to the HW structure + * + * Called to determine if the I2C pins are being used for I2C or as an + * external MDIO interface since the two options are mutually exclusive. + **/ +static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) +{ + u32 reg = 0; + bool ext_mdio = false; + + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + reg = rd32(E1000_MDIC); + ext_mdio = !!(reg & E1000_MDIC_DEST); + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + reg = rd32(E1000_MDICNFG); + ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); + break; + default: + break; + } + return ext_mdio; +} + +/** + * igb_check_for_link_media_swap - Check which M88E1112 interface linked + * @hw: pointer to the HW structure + * + * Poll the M88E1112 interfaces to see which interface achieved link. + */ +static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + u8 port = 0; + + /* Check the copper medium. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_COPPER; + + /* Check the other medium. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_OTHER; + + /* Determine if a swap needs to happen. */ + if (port && (hw->dev_spec._82575.media_port != port)) { + hw->dev_spec._82575.media_port = port; + hw->dev_spec._82575.media_changed = true; + } else { + ret_val = igb_check_for_link_82575(hw); + } + + return 0; +} + +/** + * igb_init_phy_params_82575 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igb_init_phy_params_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u32 ctrl_ext; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; + } + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + ctrl_ext = rd32(E1000_CTRL_EXT); + + if (igb_sgmii_active_82575(hw)) { + phy->ops.reset = igb_phy_hw_reset_sgmii_82575; + ctrl_ext |= E1000_CTRL_I2C_ENA; + } else { + phy->ops.reset = igb_phy_hw_reset; + ctrl_ext &= ~E1000_CTRL_I2C_ENA; + } + + wr32(E1000_CTRL_EXT, ctrl_ext); + igb_reset_mdicnfg_82580(hw); + + if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { + phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; + phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; + } else { + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i354: + phy->ops.read_reg = igb_read_phy_reg_82580; + phy->ops.write_reg = igb_write_phy_reg_82580; + break; + case e1000_i210: + case e1000_i211: + phy->ops.read_reg = igb_read_phy_reg_gs40g; + phy->ops.write_reg = igb_write_phy_reg_gs40g; + break; + default: + phy->ops.read_reg = igb_read_phy_reg_igp; + phy->ops.write_reg = igb_write_phy_reg_igp; + } + } + + /* set lan id */ + hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> + E1000_STATUS_FUNC_SHIFT; + + /* Set phy->phy_addr and phy->id. */ + ret_val = igb_get_phy_id_82575(hw); + if (ret_val) + return ret_val; + + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case M88E1543_E_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1111_I_PHY_ID: + phy->type = e1000_phy_m88; + phy->ops.check_polarity = igb_check_polarity_m88; + phy->ops.get_phy_info = igb_get_phy_info_m88; + if (phy->id != M88E1111_I_PHY_ID) + phy->ops.get_cable_length = + igb_get_cable_length_m88_gen2; + else + phy->ops.get_cable_length = igb_get_cable_length_m88; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; + /* Check if this PHY is confgured for media swap. */ + if (phy->id == M88E1112_E_PHY_ID) { + u16 data; + + ret_val = phy->ops.write_reg(hw, + E1000_M88E1112_PAGE_ADDR, + 2); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, + E1000_M88E1112_MAC_CTRL_1, + &data); + if (ret_val) + goto out; + + data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >> + E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT; + if (data == E1000_M88E1112_AUTO_COPPER_SGMII || + data == E1000_M88E1112_AUTO_COPPER_BASEX) + hw->mac.ops.check_for_link = + igb_check_for_link_media_swap; + } + break; + case IGP03E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->ops.get_phy_info = igb_get_phy_info_igp; + phy->ops.get_cable_length = igb_get_cable_length_igp_2; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; + break; + case I82580_I_PHY_ID: + case I350_I_PHY_ID: + phy->type = e1000_phy_82580; + phy->ops.force_speed_duplex = + igb_phy_force_speed_duplex_82580; + phy->ops.get_cable_length = igb_get_cable_length_82580; + phy->ops.get_phy_info = igb_get_phy_info_82580; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; + break; + case I210_I_PHY_ID: + phy->type = e1000_phy_i210; + phy->ops.check_polarity = igb_check_polarity_m88; + phy->ops.get_phy_info = igb_get_phy_info_m88; + phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; + phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; + phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_init_nvm_params_82575 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + u16 size; + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + + nvm->word_size = 1 << size; + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? + 16 : 8; + break; + } + if (nvm->word_size == (1 << 15)) + nvm->page_size = 128; + + nvm->type = e1000_nvm_eeprom_spi; + + /* NVM Function Pointers */ + nvm->ops.acquire = igb_acquire_nvm_82575; + nvm->ops.release = igb_release_nvm_82575; + nvm->ops.write = igb_write_nvm_spi; + nvm->ops.validate = igb_validate_nvm_checksum; + nvm->ops.update = igb_update_nvm_checksum; + if (nvm->word_size < (1 << 15)) + nvm->ops.read = igb_read_nvm_eerd; + else + nvm->ops.read = igb_read_nvm_spi; + + /* override generic family function pointers for specific descendants */ + switch (hw->mac.type) { + case e1000_82580: + nvm->ops.validate = igb_validate_nvm_checksum_82580; + nvm->ops.update = igb_update_nvm_checksum_82580; + break; + case e1000_i354: + case e1000_i350: + nvm->ops.validate = igb_validate_nvm_checksum_i350; + nvm->ops.update = igb_update_nvm_checksum_i350; + break; + default: + break; + } + + return 0; +} + +/** + * igb_init_mac_params_82575 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 igb_init_mac_params_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + switch (mac->type) { + case e1000_82576: + mac->rar_entry_count = E1000_RAR_ENTRIES_82576; + break; + case e1000_82580: + mac->rar_entry_count = E1000_RAR_ENTRIES_82580; + break; + case e1000_i350: + case e1000_i354: + mac->rar_entry_count = E1000_RAR_ENTRIES_I350; + break; + default: + mac->rar_entry_count = E1000_RAR_ENTRIES_82575; + break; + } + /* reset */ + if (mac->type >= e1000_82580) + mac->ops.reset_hw = igb_reset_hw_82580; + else + mac->ops.reset_hw = igb_reset_hw_82575; + + if (mac->type >= e1000_i210) { + mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210; + mac->ops.release_swfw_sync = igb_release_swfw_sync_i210; + + } else { + mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575; + mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; + } + + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + /* Set if manageability features are enabled. */ + mac->arc_subsystem_valid = + (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) + ? true : false; + /* enable EEE on i350 parts and later parts */ + if (mac->type >= e1000_i350) + dev_spec->eee_disable = false; + else + dev_spec->eee_disable = true; + /* Allow a single clear of the SW semaphore on I210 and newer */ + if (mac->type >= e1000_i210) + dev_spec->clear_semaphore_once = true; + /* physical interface link setup */ + mac->ops.setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? igb_setup_copper_link_82575 + : igb_setup_serdes_link_82575; + + if (mac->type == e1000_82580) { + switch (hw->device_id) { + /* feature not supported on these id's */ + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: + break; + default: + hw->dev_spec._82575.mas_capable = true; + break; + } + } + return 0; +} + +/** + * igb_set_sfp_media_type_82575 - derives SFP module media type. + * @hw: pointer to the HW structure + * + * The media type is chosen based on SFP module. + * compatibility flags retrieved from SFP ID EEPROM. + **/ +static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_ERR_CONFIG; + u32 ctrl_ext = 0; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; + u8 tranceiver_type = 0; + s32 timeout = 3; + + /* Turn I2C interface ON and power on sfp cage */ + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); + + wrfl(); + + /* Read SFP module data */ + while (timeout) { + ret_val = igb_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), + &tranceiver_type); + if (ret_val == 0) + break; + msleep(100); + timeout--; + } + if (ret_val != 0) + goto out; + + ret_val = igb_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), + (u8 *)eth_flags); + if (ret_val != 0) + goto out; + + /* Check if there is some SFP module plugged and powered */ + if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || + (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { + dev_spec->module_plugged = true; + if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e100_base_fx) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e1000_base_t) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_copper; + } else { + hw->phy.media_type = e1000_media_type_unknown; + hw_dbg("PHY module has not been recognized\n"); + goto out; + } + } else { + hw->phy.media_type = e1000_media_type_unknown; + } + ret_val = 0; +out: + /* Restore I2C interface setting */ + wr32(E1000_CTRL_EXT, ctrl_ext); + return ret_val; +} + +static s32 igb_get_invariants_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + s32 ret_val; + u32 ctrl_ext = 0; + u32 link_mode = 0; + + switch (hw->device_id) { + case E1000_DEV_ID_82575EB_COPPER: + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82575GB_QUAD_COPPER: + mac->type = e1000_82575; + break; + case E1000_DEV_ID_82576: + case E1000_DEV_ID_82576_NS: + case E1000_DEV_ID_82576_NS_SERDES: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + case E1000_DEV_ID_82576_SERDES_QUAD: + mac->type = e1000_82576; + break; + case E1000_DEV_ID_82580_COPPER: + case E1000_DEV_ID_82580_FIBER: + case E1000_DEV_ID_82580_QUAD_FIBER: + case E1000_DEV_ID_82580_SERDES: + case E1000_DEV_ID_82580_SGMII: + case E1000_DEV_ID_82580_COPPER_DUAL: + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: + mac->type = e1000_82580; + break; + case E1000_DEV_ID_I350_COPPER: + case E1000_DEV_ID_I350_FIBER: + case E1000_DEV_ID_I350_SERDES: + case E1000_DEV_ID_I350_SGMII: + mac->type = e1000_i350; + break; + case E1000_DEV_ID_I210_COPPER: + case E1000_DEV_ID_I210_FIBER: + case E1000_DEV_ID_I210_SERDES: + case E1000_DEV_ID_I210_SGMII: + case E1000_DEV_ID_I210_COPPER_FLASHLESS: + case E1000_DEV_ID_I210_SERDES_FLASHLESS: + mac->type = e1000_i210; + break; + case E1000_DEV_ID_I211_COPPER: + mac->type = e1000_i211; + break; + case E1000_DEV_ID_I354_BACKPLANE_1GBPS: + case E1000_DEV_ID_I354_SGMII: + case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: + mac->type = e1000_i354; + break; + default: + return -E1000_ERR_MAC_INIT; + } + + /* Set media type */ + /* The 82575 uses bits 22:23 for link mode. The mode can be changed + * based on the EEPROM. We cannot rely upon device ID. There + * is no distinguishable difference between fiber and internal + * SerDes mode on the 82575. There can be an external PHY attached + * on the SGMII interface. For this, we'll set sgmii_active to true. + */ + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = false; + dev_spec->module_plugged = false; + + ctrl_ext = rd32(E1000_CTRL_EXT); + + link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; + switch (link_mode) { + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* Get phy control interface type set (MDIO vs. I2C)*/ + if (igb_sgmii_uses_mdio_82575(hw)) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + break; + } + /* fall through for I2C based SGMII */ + case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: + /* read media type from SFP EEPROM */ + ret_val = igb_set_sfp_media_type_82575(hw); + if ((ret_val != 0) || + (hw->phy.media_type == e1000_media_type_unknown)) { + /* If media type was not identified then return media + * type defined by the CTRL_EXT settings. + */ + hw->phy.media_type = e1000_media_type_internal_serdes; + + if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + } + + break; + } + + /* do not change link mode for 100BaseFX */ + if (dev_spec->eth_flags.e100_base_fx) + break; + + /* change current link mode setting */ + ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + + if (hw->phy.media_type == e1000_media_type_copper) + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; + else + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + + wr32(E1000_CTRL_EXT, ctrl_ext); + + break; + default: + break; + } + + /* mac initialization and operations */ + ret_val = igb_init_mac_params_82575(hw); + if (ret_val) + goto out; + + /* NVM initialization */ + ret_val = igb_init_nvm_params_82575(hw); + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + ret_val = igb_init_nvm_params_i210(hw); + break; + default: + break; + } + + if (ret_val) + goto out; + + /* if part supports SR-IOV then initialize mailbox parameters */ + switch (mac->type) { + case e1000_82576: + case e1000_i350: + igb_init_mbx_params_pf(hw); + break; + default: + break; + } + + /* setup PHY parameters */ + ret_val = igb_init_phy_params_82575(hw); + +out: + return ret_val; +} + +/** + * igb_acquire_phy_82575 - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + **/ +static s32 igb_acquire_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + return hw->mac.ops.acquire_swfw_sync(hw, mask); +} + +/** + * igb_release_phy_82575 - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + **/ +static void igb_release_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the serial gigabit media independent + * interface and stores the retrieved information in data. + **/ +static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + hw_dbg("PHY Address %u is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_read_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the serial gigabit + * media independent interface. + **/ +static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + hw_dbg("PHY Address %d is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_write_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_get_phy_id_82575 - Retrieve PHY addr and id + * @hw: pointer to the HW structure + * + * Retrieves the PHY address and ID for both PHY's which do and do not use + * sgmi interface. + **/ +static s32 igb_get_phy_id_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + u32 ctrl_ext; + u32 mdic; + + /* Extra read required for some PHY's on i354 */ + if (hw->mac.type == e1000_i354) + igb_get_phy_id(hw); + + /* For SGMII PHYs, we try the list of possible addresses until + * we find one that works. For non-SGMII PHYs + * (e.g. integrated copper PHYs), an address of 1 should + * work. The result of this function should mean phy->phy_addr + * and phy->id are set correctly. + */ + if (!(igb_sgmii_active_82575(hw))) { + phy->addr = 1; + ret_val = igb_get_phy_id(hw); + goto out; + } + + if (igb_sgmii_uses_mdio_82575(hw)) { + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + mdic = rd32(E1000_MDIC); + mdic &= E1000_MDIC_PHY_MASK; + phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + mdic = rd32(E1000_MDICNFG); + mdic &= E1000_MDICNFG_PHY_MASK; + phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + ret_val = igb_get_phy_id(hw); + goto out; + } + + /* Power on sgmii phy if it is disabled */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); + wrfl(); + msleep(300); + + /* The address field in the I2CCMD register is 3 bits and 0 is invalid. + * Therefore, we need to test 1-7 + */ + for (phy->addr = 1; phy->addr < 8; phy->addr++) { + ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); + if (ret_val == 0) { + hw_dbg("Vendor ID 0x%08X read at address %u\n", + phy_id, phy->addr); + /* At the time of this writing, The M88 part is + * the only supported SGMII PHY product. + */ + if (phy_id == M88_VENDOR) + break; + } else { + hw_dbg("PHY address %u was unreadable\n", phy->addr); + } + } + + /* A valid PHY type couldn't be found. */ + if (phy->addr == 8) { + phy->addr = 0; + ret_val = -E1000_ERR_PHY; + goto out; + } else { + ret_val = igb_get_phy_id(hw); + } + + /* restore previous sfp cage power state */ + wr32(E1000_CTRL_EXT, ctrl_ext); + +out: + return ret_val; +} + +/** + * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY using the serial gigabit media independent interface. + **/ +static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) +{ + s32 ret_val; + + /* This isn't a true "hard" reset, but is the only reset + * available to us at this time. + */ + + hw_dbg("Soft resetting SGMII attached PHY...\n"); + + /* SFP documentation requires the following to configure the SPF module + * to work on SGMII. No further documentation is given. + */ + ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + +out: + return ret_val; +} + +/** + * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + goto out; + } + } + +out: + return ret_val; +} + +/** + * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u16 data; + + data = rd32(E1000_82580_PHY_POWER_MGMT); + + if (active) { + data |= E1000_82580_PM_D0_LPLU; + + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } else { + data &= ~E1000_82580_PM_D0_LPLU; + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; } + + wr32(E1000_82580_PHY_POWER_MGMT, data); + return 0; +} + +/** + * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u16 data; + + data = rd32(E1000_82580_PHY_POWER_MGMT); + + if (!active) { + data &= ~E1000_82580_PM_D3_LPLU; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_82580_PM_D3_LPLU; + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } + + wr32(E1000_82580_PHY_POWER_MGMT, data); + return 0; +} + +/** + * igb_acquire_nvm_82575 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM); + if (ret_val) + goto out; + + ret_val = igb_acquire_nvm(hw); + + if (ret_val) + hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); + +out: + return ret_val; +} + +/** + * igb_release_nvm_82575 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +static void igb_release_nvm_82575(struct e1000_hw *hw) +{ + igb_release_nvm(hw); + hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = 0; + s32 i = 0, timeout = 200; + + while (i < timeout) { + if (igb_get_hw_semaphore(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + igb_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); + +out: + return ret_val; +} + +/** + * igb_release_swfw_sync_82575 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (igb_get_hw_semaphore(hw) != 0) + ; /* Empty */ + + swfw_sync = rd32(E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); +} + +/** + * igb_get_cfg_done_82575 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * 0. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + if (hw->bus.func == 1) + mask = E1000_NVM_CFG_DONE_PORT_1; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_NVM_CFG_DONE_PORT_2; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_NVM_CFG_DONE_PORT_3; + + while (timeout) { + if (rd32(E1000_EEMNGCTL) & mask) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) + hw_dbg("MNG configuration cycle has not completed.\n"); + + /* If EEPROM is not marked present, init the PHY manually */ + if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && + (hw->phy.type == e1000_phy_igp_3)) + igb_phy_init_script_igp3(hw); + + return 0; +} + +/** + * igb_get_link_up_info_82575 - Get link speed/duplex info + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * This is a wrapper function, if using the serial gigabit media independent + * interface, use PCS to retrieve the link speed and duplex information. + * Otherwise, use the generic function to get the link speed and duplex info. + **/ +static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + if (hw->phy.media_type != e1000_media_type_copper) + ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed, + duplex); + else + ret_val = igb_get_speed_and_duplex_copper(hw, speed, + duplex); + + return ret_val; +} + +/** + * igb_check_for_link_82575 - Check for link + * @hw: pointer to the HW structure + * + * If sgmii is enabled, then use the pcs register to determine link, otherwise + * use the generic interface for determining link. + **/ +static s32 igb_check_for_link_82575(struct e1000_hw *hw) +{ + s32 ret_val; + u16 speed, duplex; + + if (hw->phy.media_type != e1000_media_type_copper) { + ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, + &duplex); + /* Use this flag to determine if link needs to be checked or + * not. If we have link clear the flag so that we do not + * continue to check for link. + */ + hw->mac.get_link_status = !hw->mac.serdes_has_link; + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igb_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + } else { + ret_val = igb_check_for_copper_link(hw); + } + + return ret_val; +} + +/** + * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown + * @hw: pointer to the HW structure + **/ +void igb_power_up_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !igb_sgmii_active_82575(hw)) + return; + + /* Enable PCS to turn on link */ + reg = rd32(E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_PCS_EN; + wr32(E1000_PCS_CFG0, reg); + + /* Power up the laser */ + reg = rd32(E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + wrfl(); + usleep_range(1000, 2000); +} + +/** + * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Using the physical coding sub-layer (PCS), retrieve the current speed and + * duplex, then store the values in the pointers provided. + **/ +static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 pcs, status; + + /* Set up defaults for the return values of this function */ + mac->serdes_has_link = false; + *speed = 0; + *duplex = 0; + + /* Read the PCS Status register for link state. For non-copper mode, + * the status register is not accurate. The PCS status register is + * used instead. + */ + pcs = rd32(E1000_PCS_LSTAT); + + /* The link up bit determines when link is up on autoneg. The sync ok + * gets set once both sides sync up and agree upon link. Stable link + * can be determined by checking for both link up and link sync ok + */ + if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { + mac->serdes_has_link = true; + + /* Detect and store PCS speed */ + if (pcs & E1000_PCS_LSTS_SPEED_1000) + *speed = SPEED_1000; + else if (pcs & E1000_PCS_LSTS_SPEED_100) + *speed = SPEED_100; + else + *speed = SPEED_10; + + /* Detect and store PCS duplex */ + if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + /* Check if it is an I354 2.5Gb backplane connection. */ + if (mac->type == e1000_i354) { + status = rd32(E1000_STATUS); + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + *speed = SPEED_2500; + *duplex = FULL_DUPLEX; + hw_dbg("2500 Mbs, "); + hw_dbg("Full Duplex\n"); + } + } + + } + + return 0; +} + +/** + * igb_shutdown_serdes_link_82575 - Remove link during power down + * @hw: pointer to the HW structure + * + * In the case of fiber serdes, shut down optics and PCS on driver unload + * when management pass thru is not enabled. + **/ +void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + if (hw->phy.media_type != e1000_media_type_internal_serdes && + igb_sgmii_active_82575(hw)) + return; + + if (!igb_enable_mng_pass_thru(hw)) { + /* Disable PCS to turn off link */ + reg = rd32(E1000_PCS_CFG0); + reg &= ~E1000_PCS_CFG_PCS_EN; + wr32(E1000_PCS_CFG0, reg); + + /* shutdown the laser */ + reg = rd32(E1000_CTRL_EXT); + reg |= E1000_CTRL_EXT_SDP3_DATA; + wr32(E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + wrfl(); + usleep_range(1000, 2000); + } +} + +/** + * igb_reset_hw_82575 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + **/ +static s32 igb_reset_hw_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igb_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed.\n"); + + /* set the completion timeout for interface */ + ret_val = igb_set_pcie_completion_timeout(hw); + if (ret_val) + hw_dbg("PCI-E Set completion timeout has failed.\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(E1000_IMC, 0xffffffff); + + wr32(E1000_RCTL, 0); + wr32(E1000_TCTL, E1000_TCTL_PSP); + wrfl(); + + usleep_range(10000, 20000); + + ctrl = rd32(E1000_CTRL); + + hw_dbg("Issuing a global reset to MAC\n"); + wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); + + ret_val = igb_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* If EEPROM is not present, run manual init scripts */ + if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) + igb_reset_init_script_82575(hw); + + /* Clear any pending interrupt events. */ + wr32(E1000_IMC, 0xffffffff); + rd32(E1000_ICR); + + /* Install any alternate MAC address into RAR0 */ + ret_val = igb_check_alt_mac_addr(hw); + + return ret_val; +} + +/** + * igb_init_hw_82575 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +static s32 igb_init_hw_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + if ((hw->mac.type >= e1000_i210) && + !(igb_get_flash_presence_i210(hw))) { + ret_val = igb_pll_workaround_i210(hw); + if (ret_val) + return ret_val; + } + + /* Initialize identification LED */ + ret_val = igb_id_led_init(hw); + if (ret_val) { + hw_dbg("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ + hw_dbg("Initializing the IEEE VLAN\n"); + if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) + igb_clear_vfta_i350(hw); + else + igb_clear_vfta(hw); + + /* Setup the receive address */ + igb_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + hw_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + array_wr32(E1000_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + hw_dbg("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + array_wr32(E1000_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = igb_setup_link(hw); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + igb_clear_hw_cntrs_82575(hw); + return ret_val; +} + +/** + * igb_setup_copper_link_82575 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u32 phpm_reg; + + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + wr32(E1000_CTRL, ctrl); + + /* Clear Go Link Disconnect bit on supported devices */ + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i210: + case e1000_i211: + phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); + phpm_reg &= ~E1000_82580_PM_GO_LINKD; + wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); + break; + default: + break; + } + + ret_val = igb_setup_serdes_link_82575(hw); + if (ret_val) + goto out; + + if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { + /* allow time for SFP cage time to power up phy */ + msleep(300); + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + } + switch (hw->phy.type) { + case e1000_phy_i210: + case e1000_phy_m88: + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1543_E_PHY_ID: + case I210_I_PHY_ID: + ret_val = igb_copper_link_setup_m88_gen2(hw); + break; + default: + ret_val = igb_copper_link_setup_m88(hw); + break; + } + break; + case e1000_phy_igp_3: + ret_val = igb_copper_link_setup_igp(hw); + break; + case e1000_phy_82580: + ret_val = igb_copper_link_setup_82580(hw); + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + goto out; + + ret_val = igb_setup_copper_link(hw); +out: + return ret_val; +} + +/** + * igb_setup_serdes_link_82575 - Setup link for serdes + * @hw: pointer to the HW structure + * + * Configure the physical coding sub-layer (PCS) link. The PCS link is + * used on copper connections where the serialized gigabit media independent + * interface (sgmii), or serdes fiber is being used. Configures the link + * for auto-negotiation or forces speed/duplex. + **/ +static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) +{ + u32 ctrl_ext, ctrl_reg, reg, anadv_reg; + bool pcs_autoneg; + s32 ret_val = 0; + u16 data; + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !igb_sgmii_active_82575(hw)) + return ret_val; + + + /* On the 82575, SerDes loopback mode persists until it is + * explicitly turned off or a power cycle is performed. A read to + * the register does not indicate its status. Therefore, we ensure + * loopback mode is disabled during initialization. + */ + wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + + /* power on the sfp cage if present and turn on I2C */ + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + ctrl_ext |= E1000_CTRL_I2C_ENA; + wr32(E1000_CTRL_EXT, ctrl_ext); + + ctrl_reg = rd32(E1000_CTRL); + ctrl_reg |= E1000_CTRL_SLU; + + if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { + /* set both sw defined pins */ + ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; + + /* Set switch control to serdes energy detect */ + reg = rd32(E1000_CONNSW); + reg |= E1000_CONNSW_ENRGSRC; + wr32(E1000_CONNSW, reg); + } + + reg = rd32(E1000_PCS_LCTL); + + /* default pcs_autoneg to the same setting as mac autoneg */ + pcs_autoneg = hw->mac.autoneg; + + switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* sgmii mode lets the phy handle forcing speed/duplex */ + pcs_autoneg = true; + /* autoneg time out should be disabled for SGMII mode */ + reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); + break; + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + /* disable PCS autoneg and support parallel detect only */ + pcs_autoneg = false; + default: + if (hw->mac.type == e1000_82575 || + hw->mac.type == e1000_82576) { + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); + if (ret_val) { + hw_dbg(KERN_DEBUG "NVM Read Error\n\n"); + return ret_val; + } + + if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) + pcs_autoneg = false; + } + + /* non-SGMII modes only supports a speed of 1000/Full for the + * link so it is best to just force the MAC and let the pcs + * link either autoneg or be forced to 1000/Full + */ + ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | + E1000_CTRL_FD | E1000_CTRL_FRCDPX; + + /* set speed of 1000/Full if speed/duplex is forced */ + reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; + break; + } + + wr32(E1000_CTRL, ctrl_reg); + + /* New SerDes mode allows for forcing speed or autonegotiating speed + * at 1gb. Autoneg should be default set by most drivers. This is the + * mode that will be compatible with older link partners and switches. + * However, both are supported by the hardware and some drivers/tools. + */ + reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | + E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); + + if (pcs_autoneg) { + /* Set PCS register for autoneg */ + reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ + E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ + + /* Disable force flow control for autoneg */ + reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; + + /* Configure flow control advertisement for autoneg */ + anadv_reg = rd32(E1000_PCS_ANADV); + anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); + switch (hw->fc.requested_mode) { + case e1000_fc_full: + case e1000_fc_rx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + anadv_reg |= E1000_TXCW_PAUSE; + break; + case e1000_fc_tx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + break; + default: + break; + } + wr32(E1000_PCS_ANADV, anadv_reg); + + hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); + } else { + /* Set PCS register for forced link */ + reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ + + /* Force flow control for forced link */ + reg |= E1000_PCS_LCTL_FORCE_FCTRL; + + hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); + } + + wr32(E1000_PCS_LCTL, reg); + + if (!pcs_autoneg && !igb_sgmii_active_82575(hw)) + igb_force_mac_fc(hw); + + return ret_val; +} + +/** + * igb_sgmii_active_82575 - Return sgmii state + * @hw: pointer to the HW structure + * + * 82575 silicon has a serialized gigabit media independent interface (sgmii) + * which can be enabled for use in the embedded applications. Simply + * return the current state of the sgmii interface. + **/ +static bool igb_sgmii_active_82575(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + return dev_spec->sgmii_active; +} + +/** + * igb_reset_init_script_82575 - Inits HW defaults after reset + * @hw: pointer to the HW structure + * + * Inits recommended HW defaults after a reset when there is no EEPROM + * detected. This is only for the 82575. + **/ +static s32 igb_reset_init_script_82575(struct e1000_hw *hw) +{ + if (hw->mac.type == e1000_82575) { + hw_dbg("Running reset init script for 82575\n"); + /* SerDes configuration via SERDESCTRL */ + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); + igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); + + /* CCM configuration via CCMCTL register */ + igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); + igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); + + /* PCIe lanes configuration */ + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); + igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); + + /* PCIe PLL Configuration */ + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); + igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); + } + + return 0; +} + +/** + * igb_read_mac_addr_82575 - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = igb_check_alt_mac_addr(hw); + if (ret_val) + goto out; + + ret_val = igb_read_mac_addr(hw); + +out: + return ret_val; +} + +/** + * igb_power_down_phy_copper_82575 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +void igb_power_down_phy_copper_82575(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) + igb_power_down_phy_copper(hw); +} + +/** + * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) +{ + igb_clear_hw_cntrs_base(hw); + + rd32(E1000_PRC64); + rd32(E1000_PRC127); + rd32(E1000_PRC255); + rd32(E1000_PRC511); + rd32(E1000_PRC1023); + rd32(E1000_PRC1522); + rd32(E1000_PTC64); + rd32(E1000_PTC127); + rd32(E1000_PTC255); + rd32(E1000_PTC511); + rd32(E1000_PTC1023); + rd32(E1000_PTC1522); + + rd32(E1000_ALGNERRC); + rd32(E1000_RXERRC); + rd32(E1000_TNCRS); + rd32(E1000_CEXTERR); + rd32(E1000_TSCTC); + rd32(E1000_TSCTFC); + + rd32(E1000_MGTPRC); + rd32(E1000_MGTPDC); + rd32(E1000_MGTPTC); + + rd32(E1000_IAC); + rd32(E1000_ICRXOC); + + rd32(E1000_ICRXPTC); + rd32(E1000_ICRXATC); + rd32(E1000_ICTXPTC); + rd32(E1000_ICTXATC); + rd32(E1000_ICTXQEC); + rd32(E1000_ICTXQMTC); + rd32(E1000_ICRXDMTC); + + rd32(E1000_CBTMPC); + rd32(E1000_HTDPMC); + rd32(E1000_CBRMPC); + rd32(E1000_RPTHC); + rd32(E1000_HGPTC); + rd32(E1000_HTCBDPC); + rd32(E1000_HGORCL); + rd32(E1000_HGORCH); + rd32(E1000_HGOTCL); + rd32(E1000_HGOTCH); + rd32(E1000_LENERRS); + + /* This register should not be read in copper configurations */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + igb_sgmii_active_82575(hw)) + rd32(E1000_SCVPC); +} + +/** + * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable + * @hw: pointer to the HW structure + * + * After rx enable if managability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + **/ +void igb_rx_fifo_flush_82575(struct e1000_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + if (hw->mac.type != e1000_82575 || + !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) + return; + + /* Disable all RX queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = rd32(E1000_RXDCTL(i)); + wr32(E1000_RXDCTL(i), + rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + usleep_range(1000, 2000); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= rd32(E1000_RXDCTL(i)); + if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + hw_dbg("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + rfctl = rd32(E1000_RFCTL); + wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); + + rlpml = rd32(E1000_RLPML); + wr32(E1000_RLPML, 0); + + rctl = rd32(E1000_RCTL); + temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); + temp_rctl |= E1000_RCTL_LPE; + + wr32(E1000_RCTL, temp_rctl); + wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); + wrfl(); + usleep_range(2000, 3000); + + /* Enable RX queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + wr32(E1000_RXDCTL(i), rxdctl[i]); + wr32(E1000_RCTL, rctl); + wrfl(); + + wr32(E1000_RLPML, rlpml); + wr32(E1000_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + rd32(E1000_ROC); + rd32(E1000_RNBC); + rd32(E1000_MPC); +} + +/** + * igb_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, + * however the hardware default for these parts is 500us to 1ms which is less + * than the 10ms recommended by the pci-e spec. To address this we need to + * increase the value to either 10ms to 200ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) +{ + u32 gcr = rd32(E1000_GCR); + s32 ret_val = 0; + u16 pcie_devctl2; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & E1000_GCR_CMPL_TMOUT_MASK) + goto out; + + /* if capabilities version is type 1 we can write the + * timeout of 10ms to 200ms through the GCR register + */ + if (!(gcr & E1000_GCR_CAP_VER2)) { + gcr |= E1000_GCR_CMPL_TMOUT_10ms; + goto out; + } + + /* for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ + ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); + if (ret_val) + goto out; + + pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; + + ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); +out: + /* disable completion timeout resend */ + gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; + + wr32(E1000_GCR, gcr); + return ret_val; +} + +/** + * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * @pf: Physical Function pool - do not set anti-spoofing for the PF + * + * enables/disables L2 switch anti-spoofing functionality. + **/ +void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) +{ + u32 reg_val, reg_offset; + + switch (hw->mac.type) { + case e1000_82576: + reg_offset = E1000_DTXSWC; + break; + case e1000_i350: + case e1000_i354: + reg_offset = E1000_TXSWC; + break; + default: + return; + } + + reg_val = rd32(reg_offset); + if (enable) { + reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + /* The PF can spoof - it has to in order to + * support emulation mode NICs + */ + reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); + } else { + reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + } + wr32(reg_offset, reg_val); +} + +/** + * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables L2 switch loopback functionality. + **/ +void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) +{ + u32 dtxswc; + + switch (hw->mac.type) { + case e1000_82576: + dtxswc = rd32(E1000_DTXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + wr32(E1000_DTXSWC, dtxswc); + break; + case e1000_i354: + case e1000_i350: + dtxswc = rd32(E1000_TXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + wr32(E1000_TXSWC, dtxswc); + break; + default: + /* Currently no other hardware supports loopback */ + break; + } + +} + +/** + * igb_vmdq_set_replication_pf - enable or disable vmdq replication + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables replication of packets across multiple pools. + **/ +void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) +{ + u32 vt_ctl = rd32(E1000_VT_CTL); + + if (enable) + vt_ctl |= E1000_VT_CTL_VM_REPL_EN; + else + vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; + + wr32(E1000_VT_CTL, vt_ctl); +} + +/** + * igb_read_phy_reg_82580 - Read 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_read_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_82580 - Write 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_write_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits + * @hw: pointer to the HW structure + * + * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on + * the values found in the EEPROM. This addresses an issue in which these + * bits are not restored from EEPROM after reset. + **/ +static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 mdicnfg; + u16 nvm_data = 0; + + if (hw->mac.type != e1000_82580) + goto out; + if (!igb_sgmii_active_82575(hw)) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + mdicnfg = rd32(E1000_MDICNFG); + if (nvm_data & NVM_WORD24_EXT_MDIO) + mdicnfg |= E1000_MDICNFG_EXT_MDIO; + if (nvm_data & NVM_WORD24_COM_MDIO) + mdicnfg |= E1000_MDICNFG_COM_MDIO; + wr32(E1000_MDICNFG, mdicnfg); +out: + return ret_val; +} + +/** + * igb_reset_hw_82580 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets function or entire device (all ports, etc.) + * to a known state. + **/ +static s32 igb_reset_hw_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + /* BH SW mailbox bit in SW_FW_SYNC */ + u16 swmbsw_mask = E1000_SW_SYNCH_MB; + u32 ctrl; + bool global_device_reset = hw->dev_spec._82575.global_device_reset; + + hw->dev_spec._82575.global_device_reset = false; + + /* due to hw errata, global device reset doesn't always + * work on 82580 + */ + if (hw->mac.type == e1000_82580) + global_device_reset = false; + + /* Get current control state. */ + ctrl = rd32(E1000_CTRL); + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = igb_disable_pcie_master(hw); + if (ret_val) + hw_dbg("PCI-E Master disable polling has failed.\n"); + + hw_dbg("Masking off all interrupts\n"); + wr32(E1000_IMC, 0xffffffff); + wr32(E1000_RCTL, 0); + wr32(E1000_TCTL, E1000_TCTL_PSP); + wrfl(); + + usleep_range(10000, 11000); + + /* Determine whether or not a global dev reset is requested */ + if (global_device_reset && + hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask)) + global_device_reset = false; + + if (global_device_reset && + !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) + ctrl |= E1000_CTRL_DEV_RST; + else + ctrl |= E1000_CTRL_RST; + + wr32(E1000_CTRL, ctrl); + wrfl(); + + /* Add delay to insure DEV_RST has time to complete */ + if (global_device_reset) + usleep_range(5000, 6000); + + ret_val = igb_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg("Auto Read Done did not complete\n"); + } + + /* clear global device reset status bit */ + wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); + + /* Clear any pending interrupt events. */ + wr32(E1000_IMC, 0xffffffff); + rd32(E1000_ICR); + + ret_val = igb_reset_mdicnfg_82580(hw); + if (ret_val) + hw_dbg("Could not reset MDICNFG based on EEPROM\n"); + + /* Install any alternate MAC address into RAR0 */ + ret_val = igb_check_alt_mac_addr(hw); + + /* Release semaphore */ + if (global_device_reset) + hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); + + return ret_val; +} + +/** + * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size + * @data: data received by reading RXPBS register + * + * The 82580 uses a table based approach for packet buffer allocation sizes. + * This function converts the retrieved value into the correct table value + * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 + * 0x0 36 72 144 1 2 4 8 16 + * 0x8 35 70 140 rsv rsv rsv rsv rsv + */ +u16 igb_rxpbs_adjust_82580(u32 data) +{ + u16 ret_val = 0; + + if (data < ARRAY_SIZE(e1000_82580_rxpbs_table)) + ret_val = e1000_82580_rxpbs_table[data]; + + return ret_val; +} + +/** + * igb_validate_nvm_checksum_with_offset - Validate EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_with_offset - Update EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, + &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 eeprom_regions_count = 1; + u16 j, nvm_data; + u16 nvm_offset; + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { + /* if checksums compatibility bit is set validate checksums + * for all 4 ports. + */ + eeprom_regions_count = 4; + } + + for (j = 0; j < eeprom_regions_count; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_82580 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val; + u16 j, nvm_data; + u16 nvm_offset; + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum compatibility bit.\n"); + goto out; + } + + if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { + /* set compatibility bit to validate checksums appropriately */ + nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; + ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, + &nvm_data); + if (ret_val) { + hw_dbg("NVM Write Error while updating checksum compatibility bit.\n"); + goto out; + } + } + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 j; + u16 nvm_offset; + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_i350 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 j; + u16 nvm_offset; + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * __igb_access_emi_reg - Read/write EMI register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: pointer to value to read/write from/to the EMI address + * @read: boolean flag to indicate read or write + **/ +static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address, + u16 *data, bool read) +{ + s32 ret_val = 0; + + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); + + return ret_val; +} + +/** + * igb_read_emi_reg - Read Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be read from the EMI address + **/ +s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) +{ + return __igb_access_emi_reg(hw, addr, data, true); +} + +/** + * igb_set_eee_i350 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100m: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE based on setting in dev_spec structure. + * + **/ +s32 igb_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M) +{ + u32 ipcnfg, eeer; + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + goto out; + ipcnfg = rd32(E1000_IPCNFG); + eeer = rd32(E1000_EEER); + + /* enable or disable per user setting */ + if (!(hw->dev_spec._82575.eee_disable)) { + u32 eee_su = rd32(E1000_EEE_SU); + + if (adv100M) + ipcnfg |= E1000_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= E1000_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN; + + eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & E1000_EEE_SU_LPI_CLK_STP) + hw_dbg("LPI Clock Stop Bit should not be set!\n"); + + } else { + ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | + E1000_IPCNFG_EEE_100M_AN); + eeer &= ~(E1000_EEER_TX_LPI_EN | + E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + } + wr32(E1000_IPCNFG, ipcnfg); + wr32(E1000_EEER, eeer); + rd32(E1000_IPCNFG); + rd32(E1000_EEER); +out: + + return 0; +} + +/** + * igb_set_eee_i354 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100m: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE legacy mode based on setting in dev_spec structure. + * + **/ +s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data; + + if ((hw->phy.media_type != e1000_media_type_copper) || + (phy->id != M88E1543_E_PHY_ID)) + goto out; + + if (!hw->dev_spec._82575.eee_disable) { + /* Switch to PHY page 18. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, + &phy_data); + if (ret_val) + goto out; + + phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, + phy_data); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + /* Turn on EEE advertisement. */ + ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + if (adv100M) + phy_data |= E1000_EEE_ADV_100_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_100_SUPPORTED; + + if (adv1G) + phy_data |= E1000_EEE_ADV_1000_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED; + + ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } else { + /* Turn off EEE advertisement. */ + ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | + E1000_EEE_ADV_1000_SUPPORTED); + ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } + +out: + return ret_val; +} + +/** + * igb_get_eee_status_i354 - Get EEE status + * @hw: pointer to the HW structure + * @status: EEE status + * + * Get EEE status by guessing based on whether Tx or Rx LPI indications have + * been received. + **/ +s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data; + + /* Check if EEE is supported on this device. */ + if ((hw->phy.media_type != e1000_media_type_copper) || + (phy->id != M88E1543_E_PHY_ID)) + goto out; + + ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, + E1000_PCS_STATUS_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | + E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; + +out: + return ret_val; +} + +static const u8 e1000_emc_temp_data[4] = { + E1000_EMC_INTERNAL_DATA, + E1000_EMC_DIODE1_DATA, + E1000_EMC_DIODE2_DATA, + E1000_EMC_DIODE3_DATA +}; +static const u8 e1000_emc_therm_limit[4] = { + E1000_EMC_INTERNAL_THERM_LIMIT, + E1000_EMC_DIODE1_THERM_LIMIT, + E1000_EMC_DIODE2_THERM_LIMIT, + E1000_EMC_DIODE3_THERM_LIMIT +}; + +#ifdef CONFIG_IGB_HWMON +/** + * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + **/ +static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) +{ + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return 0; + + hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + if (num_sensors > E1000_MAX_SENSORS) + num_sensors = E1000_MAX_SENSORS; + + for (i = 1; i < num_sensors; i++) { + hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + + if (sensor_location != 0) + hw->phy.ops.read_i2c_byte(hw, + e1000_emc_temp_data[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + &data->sensor[i].temp); + } + return 0; +} + +/** + * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Sets the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) +{ + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 low_thresh_delta; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 therm_limit; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + memset(data, 0, sizeof(struct e1000_thermal_sensor_data)); + + data->sensor[0].location = 0x1; + data->sensor[0].caution_thresh = + (rd32(E1000_THHIGHTC) & 0xFF); + data->sensor[0].max_op_thresh = + (rd32(E1000_THLOWTC) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return 0; + + hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >> + NVM_ETS_LTHRES_DELTA_SHIFT); + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + + for (i = 1; i <= num_sensors; i++) { + hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK; + + hw->phy.ops.write_i2c_byte(hw, + e1000_emc_therm_limit[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + therm_limit); + + if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) { + data->sensor[i].location = sensor_location; + data->sensor[i].caution_thresh = therm_limit; + data->sensor[i].max_op_thresh = therm_limit - + low_thresh_delta; + } + } + return 0; +} + +#endif +static struct e1000_mac_operations e1000_mac_ops_82575 = { + .init_hw = igb_init_hw_82575, + .check_for_link = igb_check_for_link_82575, + .rar_set = igb_rar_set, + .read_mac_addr = igb_read_mac_addr_82575, + .get_speed_and_duplex = igb_get_link_up_info_82575, +#ifdef CONFIG_IGB_HWMON + .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic, + .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic, +#endif +}; + +static struct e1000_phy_operations e1000_phy_ops_82575 = { + .acquire = igb_acquire_phy_82575, + .get_cfg_done = igb_get_cfg_done_82575, + .release = igb_release_phy_82575, + .write_i2c_byte = igb_write_i2c_byte, + .read_i2c_byte = igb_read_i2c_byte, +}; + +static struct e1000_nvm_operations e1000_nvm_ops_82575 = { + .acquire = igb_acquire_nvm_82575, + .read = igb_read_nvm_eerd, + .release = igb_release_nvm_82575, + .write = igb_write_nvm_spi, +}; + +const struct e1000_info e1000_82575_info = { + .get_invariants = igb_get_invariants_82575, + .mac_ops = &e1000_mac_ops_82575, + .phy_ops = &e1000_phy_ops_82575, + .nvm_ops = &e1000_nvm_ops_82575, +}; + diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h new file mode 100644 index 000000000..2154aea7a --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -0,0 +1,279 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_82575_H_ +#define _E1000_82575_H_ + +void igb_shutdown_serdes_link_82575(struct e1000_hw *hw); +void igb_power_up_serdes_link_82575(struct e1000_hw *hw); +void igb_power_down_phy_copper_82575(struct e1000_hw *hw); +void igb_rx_fifo_flush_82575(struct e1000_hw *hw); +s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, + u8 *data); +s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, + u8 data); + +#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +#define E1000_RAR_ENTRIES_82575 16 +#define E1000_RAR_ENTRIES_82576 24 +#define E1000_RAR_ENTRIES_82580 24 +#define E1000_RAR_ENTRIES_I350 32 + +#define E1000_SW_SYNCH_MB 0x00000100 +#define E1000_STAT_DEV_RST_SET 0x00100000 +#define E1000_CTRL_DEV_RST 0x20000000 + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 +#define E1000_SRRCTL_TIMESTAMP 0x40000000 + + +#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 +#define E1000_MRQC_ENABLE_VMDQ 0x00000003 +#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 + +#define E1000_EICR_TX_QUEUE ( \ + E1000_EICR_TX_QUEUE0 | \ + E1000_EICR_TX_QUEUE1 | \ + E1000_EICR_TX_QUEUE2 | \ + E1000_EICR_TX_QUEUE3) + +#define E1000_EICR_RX_QUEUE ( \ + E1000_EICR_RX_QUEUE0 | \ + E1000_EICR_RX_QUEUE1 | \ + E1000_EICR_RX_QUEUE2 | \ + E1000_EICR_RX_QUEUE3) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + __le16 pkt_info; /* RSS type, Packet type */ + __le16 hdr_info; /* Split Head, buf len */ + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ +#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ + +/* Transmit Descriptor - Advanced */ +union e1000_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Context descriptors */ +struct e1000_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ +/* IPSec Encrypt Enable for ESP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +/* Adv ctxt IPSec SA IDX mask */ +/* Adv ctxt IPSec ESP len mask */ + +/* Additional Transmit Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ +/* Tx Queue Arbitration Priority 0=low, 1=high */ + +/* Additional Receive Descriptor Control definitions */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ + +/* Direct Cache Access (DCA) definitions */ +#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */ +#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ +#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ +#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ + +#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ + +/* Additional DCA related definitions, note change in position of CPUID */ +#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ +#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ +#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ +#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ + +/* ETQF register bit definitions */ +#define E1000_ETQF_FILTER_ENABLE (1 << 26) +#define E1000_ETQF_1588 (1 << 30) + +/* FTQF register bit definitions */ +#define E1000_FTQF_VF_BP 0x00008000 +#define E1000_FTQF_1588_TIME_STAMP 0x08000000 +#define E1000_FTQF_MASK 0xF0000000 +#define E1000_FTQF_MASK_PROTO_BP 0x10000000 +#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 + +#define E1000_NVM_APME_82575 0x0400 +#define MAX_NUM_VFS 8 + +#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */ +#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */ +#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ +#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 +#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ + +/* Easy defines for setting default pool, would normally be left a zero */ +#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 +#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) + +/* Other useful VMD_CTL register defines */ +#define E1000_VT_CTL_IGNORE_MAC (1 << 28) +#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) +#define E1000_VT_CTL_VM_REPL_EN (1 << 30) + +/* Per VM Offload register setup */ +#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ +#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ +#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ +#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ +#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ +#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ +#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ +#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ +#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_DVMOLR_HIDEVLAN 0x20000000 /* Hide vlan enable */ +#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_VLVF_ARRAY_SIZE 32 +#define E1000_VLVF_VLANID_MASK 0x00000FFF +#define E1000_VLVF_POOLSEL_SHIFT 12 +#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) +#define E1000_VLVF_LVLAN 0x00100000 +#define E1000_VLVF_VLANID_ENABLE 0x80000000 + +#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define E1000_IOVCTL 0x05BBC +#define E1000_IOVCTL_REUSE_VFQ 0x00000001 + +#define E1000_RPLOLR_STRVLAN 0x40000000 +#define E1000_RPLOLR_STRCRC 0x80000000 + +#define E1000_DTXCTL_8023LL 0x0004 +#define E1000_DTXCTL_VLAN_ADDED 0x0008 +#define E1000_DTXCTL_OOS_ENABLE 0x0010 +#define E1000_DTXCTL_MDP_EN 0x0020 +#define E1000_DTXCTL_SPOOF_INT 0x0040 + +#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14) + +#define ALL_QUEUES 0xFFFF + +/* RX packet buffer size defines */ +#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F +void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int); +void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); +void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); +u16 igb_rxpbs_adjust_82580(u32 data); +s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data); +s32 igb_set_eee_i350(struct e1000_hw *, bool adv1G, bool adv100M); +s32 igb_set_eee_i354(struct e1000_hw *, bool adv1G, bool adv100M); +s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status); + +#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define E1000_EMC_INTERNAL_DATA 0x00 +#define E1000_EMC_INTERNAL_THERM_LIMIT 0x20 +#define E1000_EMC_DIODE1_DATA 0x01 +#define E1000_EMC_DIODE1_THERM_LIMIT 0x19 +#define E1000_EMC_DIODE2_DATA 0x23 +#define E1000_EMC_DIODE2_THERM_LIMIT 0x1A +#define E1000_EMC_DIODE3_DATA 0x2A +#define E1000_EMC_DIODE3_THERM_LIMIT 0x30 +#endif diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h new file mode 100644 index 000000000..217f81388 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -0,0 +1,1016 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +/* Extended Device Control */ +#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */ +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */ +#define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ +#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ + +/* Physical Func Reset Done Indication */ +#define E1000_CTRL_EXT_PFRSTD 0x00004000 +#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IRCA 0x00000001 +/* Interrupt delay cancellation */ +/* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 +/* Interrupt acknowledge Auto-mask */ +/* Clear Interrupt timers after IMS clear */ +/* packet buffer parity error detection enabled */ +/* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_PHYPDEN 0x00100000 +#define E1000_I2CCMD_REG_ADDR_SHIFT 16 +#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 +#define E1000_I2CCMD_OPCODE_READ 0x08000000 +#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 +#define E1000_I2CCMD_READY 0x20000000 +#define E1000_I2CCMD_ERROR 0x80000000 +#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a)) +#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a)) +#define E1000_MAX_SGMII_PHY_REG_ADDR 255 +#define E1000_I2CCMD_PHY_TIMEOUT 200 +#define E1000_IVAR_VALID 0x80 +#define E1000_GPIE_NSICR 0x00000001 +#define E1000_GPIE_MSIX_MODE 0x00000010 +#define E1000_GPIE_EIAME 0x40000000 +#define E1000_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */ + +#define E1000_RXDEXT_STATERR_LB 0x00040000 +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */ +/* Enable Neighbor Discovery Filtering */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +/* Enable MAC address filtering */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 + +/* Receive Control */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x1 +#define E1000_SWFW_PHY0_SM 0x2 +#define E1000_SWFW_PHY1_SM 0x4 +#define E1000_SWFW_PHY2_SM 0x20 +#define E1000_SWFW_PHY3_SM 0x40 + +/* FACTPS Definitions */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +/* Defined polarity of Dock/Undock indication in SDP[0] */ +/* Reset both PHY ports, through PHYRST_N pin */ +/* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +/* Initiate an interrupt to manageability engine */ +#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ + +#define E1000_CONNSW_ENRGSRC 0x4 +#define E1000_CONNSW_PHYSD 0x400 +#define E1000_CONNSW_PHY_PDN 0x800 +#define E1000_CONNSW_SERDESD 0x200 +#define E1000_CONNSW_AUTOSENSE_CONF 0x2 +#define E1000_CONNSW_AUTOSENSE_EN 0x1 +#define E1000_PCS_CFG_PCS_EN 8 +#define E1000_PCS_LCTL_FLV_LINK_UP 1 +#define E1000_PCS_LCTL_FSV_100 2 +#define E1000_PCS_LCTL_FSV_1000 4 +#define E1000_PCS_LCTL_FDV_FULL 8 +#define E1000_PCS_LCTL_FSD 0x10 +#define E1000_PCS_LCTL_FORCE_LINK 0x20 +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +#define E1000_PCS_LCTL_AN_ENABLE 0x10000 +#define E1000_PCS_LCTL_AN_RESTART 0x20000 +#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 +#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 + +#define E1000_PCS_LSTS_LINK_OK 1 +#define E1000_PCS_LSTS_SPEED_100 2 +#define E1000_PCS_LSTS_SPEED_1000 4 +#define E1000_PCS_LSTS_DUPLEX_FULL 8 +#define E1000_PCS_LSTS_SYNK_OK 0x10 + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +/* Change in Dock/Undock state. Clear on write '0'. */ +/* Status of Master requests. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 +/* BMC external code execution disabled */ + +#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */ +#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */ +/* Constants used to intrepret the masked PCI-X bus speed. */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_IVRT 0x00000040 + +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +/* Extended desc bits for Linksec and timesync */ + +/* Transmit Control */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ + +/* DMA Coalescing register fields */ +#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coal Watchdog Timer */ +#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coal Rx Threshold */ +#define E1000_DMACR_DMACTHR_SHIFT 16 +#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe trans */ +#define E1000_DMACR_DMAC_LX_SHIFT 28 +#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +/* DMA Coalescing BMC-to-OS Watchdog Enable */ +#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 + +#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coal Tx Threshold */ + +#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ + +#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate Thresh */ +#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx pkt rate curr window */ + +#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Current Cnt */ + +#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* FC Rx Thresh High val */ +#define E1000_FCRTC_RTH_COAL_SHIFT 4 +#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ + +/* Timestamp in Rx buffer */ +#define E1000_RXPBS_CFG_TS_EN 0x80000000 + +#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Header split receive */ +#define E1000_RFCTL_LEF 0x00040000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ + +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* PBA constants */ +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_64K 0x0040 /* 64KB */ + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ +#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ +#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ +/* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_INT_ASSERTED 0x80000000 +/* LAN connected device generates an interrupt */ +#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ + +/* Extended Interrupt Cause Read */ +#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ +#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ +#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ +#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ +#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ +#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ +#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ +#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ +#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ +/* TCP Timer */ + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC | \ + E1000_IMS_DOUTSYNC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ +#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ +#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ + +/* Extended Interrupt Mask Set */ +#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +/* Interrupt Cause Set */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */ + +/* Extended Interrupt Cause Set */ +/* E1000_EITR_CNT_IGNR is only for 82576 and newer */ +#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ + + +/* Transmit Descriptor Control */ +/* Enable the counting of descriptors still to be processed. */ + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* Transmit Config Word */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ + +/* 802.1q VLAN Packet Size */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address */ +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAL_MAC_ADDR_LEN 4 +#define E1000_RAH_MAC_ADDR_LEN 2 +#define E1000_RAH_POOL_MASK 0x03FC0000 +#define E1000_RAH_POOL_1 0x00040000 + +/* Error Codes */ +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 +#define E1000_ERR_MBX 15 +#define E1000_ERR_INVALID_ARGUMENT 16 +#define E1000_ERR_NO_SPACE 17 +#define E1000_ERR_NVM_PBA_SECTION 18 +#define E1000_ERR_INVM_VALUE_NOT_FOUND 19 +#define E1000_ERR_I2C 20 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */ + +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */ +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */ +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */ + +#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 +#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 +#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 + +#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 +#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 +#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 +#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 +#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 +#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 + +#define E1000_TIMINCA_16NS_SHIFT 24 + +/* Time Sync Interrupt Cause/Mask Register Bits */ + +#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */ +#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */ +#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */ +#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */ +#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */ +#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */ +#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */ +#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */ + +#define TSYNC_INTERRUPTS TSINTR_TXTS +#define E1000_TSICR_TXTS TSINTR_TXTS + +/* TSAUXC Configuration Bits */ +#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */ +#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */ +#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */ +#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */ +#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */ +#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */ +#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */ +#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */ +#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */ +#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */ + +/* SDP Configuration Bits */ +#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */ +#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */ +#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */ +#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */ +#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */ +#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */ +#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */ +#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */ +#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */ +#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */ +#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */ +#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */ +#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */ +#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */ +#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */ +#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */ +#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */ +#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */ +#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */ +#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */ +#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */ +#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */ + +#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ +#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ +#define E1000_MDICNFG_PHY_MASK 0x03E00000 +#define E1000_MDICNFG_PHY_SHIFT 21 + +#define E1000_MEDIA_PORT_COPPER 1 +#define E1000_MEDIA_PORT_OTHER 2 +#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2 +#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3 +#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */ +#define E1000_M88E1112_MAC_CTRL_1 0x10 +#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */ +#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7 +#define E1000_M88E1112_PAGE_ADDR 0x16 +#define E1000_M88E1112_STATUS 0x01 + +/* PCI Express Control */ +#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 +#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define E1000_GCR_CAP_VER2 0x00040000 + +/* mPHY Address Control and Data Registers */ +#define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */ +#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 +#define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */ + +/* mPHY PCS CLK Register */ +#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */ +/* mPHY Near End Digital Loopback Override Bit */ +#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 + +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 + +/* PHY Control Register */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ + +/* Autoneg Expansion Register */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ + + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ +#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */ +#define E1000_FLUDONE_ATTEMPTS 20000 +#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define E1000_I210_FIFO_SEL_RX 0x00 +#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 +#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */ +/* Secure FLASH mode requires removing MSb */ +#define E1000_I210_FW_PTR_MASK 0x7FFF +/* Firmware code revision field word offset*/ +#define E1000_I210_FW_VER_OFFSET 328 +#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/ +#define E1000_FLUDONE_ATTEMPTS 20000 +#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define E1000_I210_FIFO_SEL_RX 0x00 +#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 + + +/* Offset to data in NVM read/write registers */ +#define E1000_NVM_RW_REG_DATA 16 +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ + +/* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */ +#define NVM_VERSION 0x0005 +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F +#define NVM_COMPATIBILITY_REG_3 0x0003 +#define NVM_COMPATIBILITY_BIT_MASK 0x8000 +#define NVM_MAC_ADDR 0x0000 +#define NVM_SUB_DEV_ID 0x000B +#define NVM_SUB_VEN_ID 0x000C +#define NVM_DEV_ID 0x000D +#define NVM_VEN_ID 0x000E +#define NVM_INIT_CTRL_2 0x000F +#define NVM_INIT_CTRL_4 0x0013 +#define NVM_LED_1_CFG 0x001C +#define NVM_LED_0_2_CFG 0x001F +#define NVM_ETRACK_WORD 0x0042 +#define NVM_ETRACK_HIWORD 0x0043 +#define NVM_COMB_VER_OFF 0x0083 +#define NVM_COMB_VER_PTR 0x003d + +/* NVM version defines */ +#define NVM_MAJOR_MASK 0xF000 +#define NVM_MINOR_MASK 0x0FF0 +#define NVM_IMAGE_ID_MASK 0x000F +#define NVM_COMB_VER_MASK 0x00FF +#define NVM_MAJOR_SHIFT 12 +#define NVM_MINOR_SHIFT 4 +#define NVM_COMB_VER_SHFT 8 +#define NVM_VER_INVALID 0xFFFF +#define NVM_ETRACK_SHIFT 16 +#define NVM_ETRACK_VALID 0x8000 +#define NVM_NEW_DEC_MASK 0x0F00 +#define NVM_HEX_CONV 16 +#define NVM_HEX_TENS 10 + +#define NVM_ETS_CFG 0x003E +#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0 +#define NVM_ETS_LTHRES_DELTA_SHIFT 6 +#define NVM_ETS_TYPE_MASK 0x0038 +#define NVM_ETS_TYPE_SHIFT 3 +#define NVM_ETS_TYPE_EMC 0x000 +#define NVM_ETS_NUM_SENSORS_MASK 0x0007 +#define NVM_ETS_DATA_LOC_MASK 0x3C00 +#define NVM_ETS_DATA_LOC_SHIFT 10 +#define NVM_ETS_DATA_INDEX_MASK 0x0300 +#define NVM_ETS_DATA_INDEX_SHIFT 8 +#define NVM_ETS_DATA_HTHRESH_MASK 0x00FF + +#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ +#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ +#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ + +#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) + +/* Mask bits for fields in Word 0x24 of the NVM */ +#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ +#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */ + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_ASM_DIR 0x2000 + +/* Mask bits for fields in Word 0x1a of the NVM */ + +/* length of string needed to store part num */ +#define E1000_PBANUM_LENGTH 11 + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_RESERVED_WORD 0xFFFF +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* NVM Commands - Microwire */ + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCIE_DEVICE_CONTROL2 0x28 +#define PCIE_DEVICE_CONTROL2_16ms 0x0005 + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1112_E_PHY_ID 0x01410C90 +#define I347AT4_E_PHY_ID 0x01410DC0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define I82580_I_PHY_ID 0x015403A0 +#define I350_I_PHY_ID 0x015403B0 +#define M88_VENDOR 0x0141 +#define I210_I_PHY_ID 0x01410C00 +#define M88E1543_E_PHY_ID 0x01410EA0 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ + +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +/* 1=CLK125 low, 0=CLK125 toggling */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +/* Auto crossover enabled all speeds */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold + * 0=Normal 10BASE-T Rx Threshold + */ +/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* 0 = <50M + * 1 = 50-80M + * 2 = 80-110M + * 3 = 110-140M + * 4 = >140M + */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +/* 1 = Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ + +/* Intel i347-AT4 Registers */ + +#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ +#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ +#define I347AT4_PAGE_SELECT 0x16 + +/* i347-AT4 Extended PHY Specific Control Register */ + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 +#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 +#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 +#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 +#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 +#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 +#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 +#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 +#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 +#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 + +/* i347-AT4 PHY Cable Diagnostics Control */ +#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ + +/* Marvell 1112 only registers */ +#define M88E1112_VCT_DSP_DISTANCE 0x001A + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 +#define E1000_MDIC_DEST 0x80000000 + +/* Thermal Sensor */ +#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ +#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */ + +/* Energy Efficient Ethernet */ +#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */ +#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */ +#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */ +#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */ +#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */ +#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ +#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */ +#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ +#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ +#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ +#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ +#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ +#define E1000_M88E1543_EEE_CTRL_1 0x0 +#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ +#define E1000_EEE_ADV_DEV_I354 7 +#define E1000_EEE_ADV_ADDR_I354 60 +#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ +#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ +#define E1000_PCS_STATUS_DEV_I354 3 +#define E1000_PCS_STATUS_ADDR_I354 1 +#define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */ +#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 +#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 + +/* SerDes Control */ +#define E1000_GEN_CTL_READY 0x80000000 +#define E1000_GEN_CTL_ADDRESS_SHIFT 8 +#define E1000_GEN_POLL_TIMEOUT 640 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +/* DMA Coalescing register fields */ +#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */ + +/* Tx Rate-Scheduler Config fields */ +#define E1000_RTTBCNRC_RS_ENA 0x80000000 +#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define E1000_RTTBCNRC_RF_INT_SHIFT 14 +#define E1000_RTTBCNRC_RF_INT_MASK \ + (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) + +#endif diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h new file mode 100644 index 000000000..2003b3756 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -0,0 +1,568 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include +#include +#include +#include + +#include "e1000_regs.h" +#include "e1000_defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82576 0x10C9 +#define E1000_DEV_ID_82576_FIBER 0x10E6 +#define E1000_DEV_ID_82576_SERDES 0x10E7 +#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 +#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 +#define E1000_DEV_ID_82576_NS 0x150A +#define E1000_DEV_ID_82576_NS_SERDES 0x1518 +#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D +#define E1000_DEV_ID_82575EB_COPPER 0x10A7 +#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 +#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 +#define E1000_DEV_ID_82580_COPPER 0x150E +#define E1000_DEV_ID_82580_FIBER 0x150F +#define E1000_DEV_ID_82580_SERDES 0x1510 +#define E1000_DEV_ID_82580_SGMII 0x1511 +#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 +#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 +#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 +#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A +#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C +#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 +#define E1000_DEV_ID_I350_COPPER 0x1521 +#define E1000_DEV_ID_I350_FIBER 0x1522 +#define E1000_DEV_ID_I350_SERDES 0x1523 +#define E1000_DEV_ID_I350_SGMII 0x1524 +#define E1000_DEV_ID_I210_COPPER 0x1533 +#define E1000_DEV_ID_I210_FIBER 0x1536 +#define E1000_DEV_ID_I210_SERDES 0x1537 +#define E1000_DEV_ID_I210_SGMII 0x1538 +#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B +#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C +#define E1000_DEV_ID_I211_COPPER 0x1539 +#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 +#define E1000_DEV_ID_I354_SGMII 0x1F41 +#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45 + +#define E1000_REVISION_2 2 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 +#define E1000_FUNC_2 2 +#define E1000_FUNC_3 3 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_82575, + e1000_82576, + e1000_82580, + e1000_i350, + e1000_i354, + e1000_i210, + e1000_i211, + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_flash_hw, + e1000_nvm_invm, + e1000_nvm_flash_sw +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large, +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_82580, + e1000_phy_i210, +}; + +enum e1000_bus_type { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_pci_express, + e1000_bus_type_reserved +}; + +enum e1000_bus_speed { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_2500, + e1000_bus_speed_5000, + e1000_bus_speed_reserved +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +}; + +enum e1000_rev_polarity { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +#include "e1000_mac.h" +#include "e1000_phy.h" +#include "e1000_nvm.h" +#include "e1000_mbx.h" + +struct e1000_mac_operations { + s32 (*check_for_link)(struct e1000_hw *); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + void (*rar_set)(struct e1000_hw *, u8 *, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); + s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); + void (*release_swfw_sync)(struct e1000_hw *, u16); +#ifdef CONFIG_IGB_HWMON + s32 (*get_thermal_sensor_data)(struct e1000_hw *); + s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); +#endif + +}; + +struct e1000_phy_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_phy_info)(struct e1000_hw *); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); +}; + +struct e1000_nvm_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); + s32 (*update)(struct e1000_hw *); + s32 (*validate)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); +}; + +#define E1000_MAX_SENSORS 3 + +struct e1000_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; +}; + +struct e1000_thermal_sensor_data { + struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS]; +}; + +struct e1000_info { + s32 (*get_invariants)(struct e1000_hw *); + struct e1000_mac_operations *mac_ops; + struct e1000_phy_operations *phy_ops; + struct e1000_nvm_operations *nvm_ops; +}; + +extern const struct e1000_info e1000_82575_info; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + + u8 addr[6]; + u8 perm_addr[6]; + + enum e1000_mac_type type; + + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 txcw; + + u16 mta_reg_count; + u16 uta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ + #define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; + bool disable_hw_init_bits; + bool get_link_status; + bool ifs_params_forced; + bool in_ifs_mode; + bool report_tx_early; + bool serdes_has_link; + bool tx_pkt_filtering; + struct e1000_thermal_sensor_data thermal_sensor_data; +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum e1000_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool reset_disable; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + enum e1000_nvm_type type; + enum e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_type type; + enum e1000_bus_speed speed; + enum e1000_bus_width width; + + u32 snoop; + + u16 func; + u16 pci_cmd_word; +}; + +struct e1000_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum e1000_fc_mode current_mode; /* Type of flow control */ + enum e1000_fc_mode requested_mode; +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *hw); + s32 (*read)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write)(struct e1000_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct e1000_hw *, u16); + s32 (*check_for_ack)(struct e1000_hw *, u16); + s32 (*check_for_rst)(struct e1000_hw *, u16); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_82575 { + bool sgmii_active; + bool global_device_reset; + bool eee_disable; + bool clear_semaphore_once; + struct e1000_sfp_flags eth_flags; + bool module_plugged; + u8 media_port; + bool media_changed; + bool mas_capable; +}; + +struct e1000_hw { + void *back; + + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + unsigned long io_base; + + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_mbx_info mbx; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { + struct e1000_dev_spec_82575 _82575; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +struct net_device *igb_get_hw_dev(struct e1000_hw *hw); +#define hw_dbg(format, arg...) \ + netdev_dbg(igb_get_hw_dev(hw), format, ##arg) + +/* These functions must be implemented by drivers */ +s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); + +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +#endif /* _E1000_HW_H_ */ diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c new file mode 100644 index 000000000..65d931669 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -0,0 +1,902 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* e1000_i210 + * e1000_i211 + */ + +#include +#include + +#include "e1000_hw.h" +#include "e1000_i210.h" + +static s32 igb_update_flash_i210(struct e1000_hw *hw); + +/** + * igb_get_hw_semaphore_i210 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + */ +static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._82575.clear_semaphore_once) { + hw->dev_spec._82575.clear_semaphore_once = false; + igb_put_hw_semaphore(hw); + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + igb_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * igb_acquire_nvm_i210 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +static s32 igb_acquire_nvm_i210(struct e1000_hw *hw) +{ + return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_release_nvm_i210 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +static void igb_release_nvm_i210(struct e1000_hw *hw) +{ + igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); +} + +/** + * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = 0; + s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + + while (i < timeout) { + if (igb_get_hw_semaphore_i210(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = rd32(E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) */ + igb_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); +out: + return ret_val; +} + +/** + * igb_release_swfw_sync_i210 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (igb_get_hw_semaphore_i210(hw)) + ; /* Empty */ + + swfw_sync = rd32(E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + wr32(E1000_SW_FW_SYNC, swfw_sync); + + igb_put_hw_semaphore(hw); +} + +/** + * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + **/ +static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (!(hw->nvm.ops.acquire(hw))) { + status = igb_read_nvm_eerd(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status) + break; + } + + return status; +} + +/** + * igb_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If igb_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + **/ +static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, k, eewr = 0; + u32 attempts = 100000; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | + (data[i] << E1000_NVM_RW_REG_DATA) | + E1000_NVM_RW_REG_START; + + wr32(E1000_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (E1000_NVM_RW_REG_DONE & + rd32(E1000_SRWR)) { + ret_val = 0; + break; + } + udelay(5); + } + + if (ret_val) { + hw_dbg("Shadow RAM write EEWR timed out\n"); + break; + } + } + +out: + return ret_val; +} + +/** + * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If e1000_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + **/ +static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u16 i, count; + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (!(hw->nvm.ops.acquire(hw))) { + status = igb_write_nvm_srwr(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status) + break; + } + + return status; +} + +/** + * igb_read_invm_word_i210 - Reads OTP + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Reads 16-bit words from the OTP. Return error when the word is not + * stored in OTP. + **/ +static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) +{ + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u32 invm_dword; + u16 i; + u8 record_type, word_address; + + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = rd32(E1000_INVM_DATA_REG(i)); + /* Get record type */ + record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); + if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) + break; + if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) + i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) + i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { + word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); + if (word_address == address) { + *data = INVM_DWORD_TO_WORD_DATA(invm_dword); + hw_dbg("Read INVM Word 0x%02x = %x\n", + address, *data); + status = 0; + break; + } + } + } + if (status) + hw_dbg("Requested word 0x%02x not found in OTP\n", address); + return status; +} + +/** + * igb_read_invm_i210 - Read invm wrapper function for I210/I211 + * @hw: pointer to the HW structure + * @words: number of words to read + * @data: pointer to the data read + * + * Wrapper function to return data formerly found in the NVM. + **/ +static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset, + u16 words __always_unused, u16 *data) +{ + s32 ret_val = 0; + + /* Only the MAC addr is required to be present in the iNVM */ + switch (offset) { + case NVM_MAC_ADDR: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]); + ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1, + &data[1]); + ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2, + &data[2]); + if (ret_val) + hw_dbg("MAC Addr not found in iNVM\n"); + break; + case NVM_INIT_CTRL_2: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_INIT_CTRL_2_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_INIT_CTRL_4: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_INIT_CTRL_4_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_LED_1_CFG: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_LED_1_CFG_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_LED_0_2_CFG: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = NVM_LED_0_2_CFG_DEFAULT_I211; + ret_val = 0; + } + break; + case NVM_ID_LED_SETTINGS: + ret_val = igb_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val) { + *data = ID_LED_RESERVED_FFFF; + ret_val = 0; + } + break; + case NVM_SUB_DEV_ID: + *data = hw->subsystem_device_id; + break; + case NVM_SUB_VEN_ID: + *data = hw->subsystem_vendor_id; + break; + case NVM_DEV_ID: + *data = hw->device_id; + break; + case NVM_VEN_ID: + *data = hw->vendor_id; + break; + default: + hw_dbg("NVM word 0x%02x is not mapped.\n", offset); + *data = NVM_RESERVED_WORD; + break; + } + return ret_val; +} + +/** + * igb_read_invm_version - Reads iNVM version and image type + * @hw: pointer to the HW structure + * @invm_ver: version structure for the version read + * + * Reads iNVM version and image type. + **/ +s32 igb_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver) { + u32 *record = NULL; + u32 *next_record = NULL; + u32 i = 0; + u32 invm_dword = 0; + u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE / + E1000_INVM_RECORD_SIZE_IN_BYTES); + u32 buffer[E1000_INVM_SIZE]; + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u16 version = 0; + + /* Read iNVM memory */ + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = rd32(E1000_INVM_DATA_REG(i)); + buffer[i] = invm_dword; + } + + /* Read version number */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have first version location used */ + if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { + version = 0; + status = 0; + break; + } + /* Check if we have second version location used */ + else if ((i == 1) && + ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = 0; + break; + } + /* Check if we have odd version location + * used and it is the last one used + */ + else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && + ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) && + (i != 1))) { + version = (*next_record & E1000_INVM_VER_FIELD_TWO) + >> 13; + status = 0; + break; + } + /* Check if we have even version location + * used and it is the last one used + */ + else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && + ((*record & 0x3) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = 0; + break; + } + } + + if (!status) { + invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) + >> E1000_INVM_MAJOR_SHIFT; + invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; + } + /* Read Image Type */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have image type in first location used */ + if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { + invm_ver->invm_img_type = 0; + status = 0; + break; + } + /* Check if we have image type in first location used */ + else if ((((*record & 0x3) == 0) && + ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) || + ((((*record & 0x3) != 0) && (i != 1)))) { + invm_ver->invm_img_type = + (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; + status = 0; + break; + } + } + return status; +} + +/** + * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 status = 0; + s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); + + if (!(hw->nvm.ops.acquire(hw))) { + + /* Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = igb_read_nvm_eerd; + + status = igb_validate_nvm_checksum(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * igb_update_nvm_checksum_i210 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + **/ +static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val) { + hw_dbg("EEPROM read failed\n"); + goto out; + } + + if (!(hw->nvm.ops.acquire(hw))) { + /* Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val) { + hw->nvm.ops.release(hw); + hw_dbg("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = igb_update_flash_i210(hw); + } else { + ret_val = -E1000_ERR_SWFW_SYNC; + } +out: + return ret_val; +} + +/** + * igb_pool_flash_update_done_i210 - Pool FLUDONE status. + * @hw: pointer to the HW structure + * + **/ +static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_NVM; + u32 i, reg; + + for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { + reg = rd32(E1000_EECD); + if (reg & E1000_EECD_FLUDONE_I210) { + ret_val = 0; + break; + } + udelay(5); + } + + return ret_val; +} + +/** + * igb_get_flash_presence_i210 - Check if flash device is detected. + * @hw: pointer to the HW structure + * + **/ +bool igb_get_flash_presence_i210(struct e1000_hw *hw) +{ + u32 eec = 0; + bool ret_val = false; + + eec = rd32(E1000_EECD); + if (eec & E1000_EECD_FLASH_DETECTED_I210) + ret_val = true; + + return ret_val; +} + +/** + * igb_update_flash_i210 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + * + **/ +static s32 igb_update_flash_i210(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 flup; + + ret_val = igb_pool_flash_update_done_i210(hw); + if (ret_val == -E1000_ERR_NVM) { + hw_dbg("Flash update time out\n"); + goto out; + } + + flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210; + wr32(E1000_EECD, flup); + + ret_val = igb_pool_flash_update_done_i210(hw); + if (ret_val) + hw_dbg("Flash update complete\n"); + else + hw_dbg("Flash update time out\n"); + +out: + return ret_val; +} + +/** + * igb_valid_led_default_i210 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_I210_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT_I210; + break; + } + } +out: + return ret_val; +} + +/** + * __igb_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + **/ +static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val = 0; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * igb_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + **/ +s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) +{ + return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * igb_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + **/ +s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) +{ + return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} + +/** + * igb_init_nvm_params_i210 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +s32 igb_init_nvm_params_i210(struct e1000_hw *hw) +{ + s32 ret_val = 0; + struct e1000_nvm_info *nvm = &hw->nvm; + + nvm->ops.acquire = igb_acquire_nvm_i210; + nvm->ops.release = igb_release_nvm_i210; + nvm->ops.valid_led_default = igb_valid_led_default_i210; + + /* NVM Function Pointers */ + if (igb_get_flash_presence_i210(hw)) { + hw->nvm.type = e1000_nvm_flash_hw; + nvm->ops.read = igb_read_nvm_srrd_i210; + nvm->ops.write = igb_write_nvm_srwr_i210; + nvm->ops.validate = igb_validate_nvm_checksum_i210; + nvm->ops.update = igb_update_nvm_checksum_i210; + } else { + hw->nvm.type = e1000_nvm_invm; + nvm->ops.read = igb_read_invm_i210; + nvm->ops.write = NULL; + nvm->ops.validate = NULL; + nvm->ops.update = NULL; + } + return ret_val; +} + +/** + * igb_pll_workaround_i210 + * @hw: pointer to the HW structure + * + * Works around an errata in the PLL circuit where it occasionally + * provides the wrong clock frequency after power up. + **/ +s32 igb_pll_workaround_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; + u16 nvm_word, phy_word, pci_word, tmp_nvm; + int i; + + /* Get and set needed register values */ + wuc = rd32(E1000_WUC); + mdicnfg = rd32(E1000_MDICNFG); + reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; + wr32(E1000_MDICNFG, reg_val); + + /* Get data from NVM, or set default */ + ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, + &nvm_word); + if (ret_val) + nvm_word = E1000_INVM_DEFAULT_AL; + tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; + for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { + /* check current state directly from internal PHY */ + igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | + E1000_PHY_PLL_FREQ_REG), &phy_word); + if ((phy_word & E1000_PHY_PLL_UNCONF) + != E1000_PHY_PLL_UNCONF) { + ret_val = 0; + break; + } else { + ret_val = -E1000_ERR_PHY; + } + /* directly reset the internal PHY */ + ctrl = rd32(E1000_CTRL); + wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); + + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); + wr32(E1000_CTRL_EXT, ctrl_ext); + + wr32(E1000_WUC, 0); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); + wr32(E1000_EEARBC_I210, reg_val); + + igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + pci_word |= E1000_PCI_PMCSR_D3; + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + usleep_range(1000, 2000); + pci_word &= ~E1000_PCI_PMCSR_D3; + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); + wr32(E1000_EEARBC_I210, reg_val); + + /* restore WUC register */ + wr32(E1000_WUC, wuc); + } + /* restore MDICNFG setting */ + wr32(E1000_MDICNFG, mdicnfg); + return ret_val; +} diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h new file mode 100644 index 000000000..3442b6357 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -0,0 +1,93 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_I210_H_ +#define _E1000_I210_H_ + +s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); +s32 igb_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver); +s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data); +s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); +s32 igb_init_nvm_params_i210(struct e1000_hw *hw); +bool igb_get_flash_presence_i210(struct e1000_hw *hw); +s32 igb_pll_workaround_i210(struct e1000_hw *hw); + +#define E1000_STM_OPCODE 0xDB00 +#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 + +#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \ + (u8)((invm_dword) & 0x7) +#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \ + (u8)(((invm_dword) & 0x0000FE00) >> 9) +#define INVM_DWORD_TO_WORD_DATA(invm_dword) \ + (u16)(((invm_dword) & 0xFFFF0000) >> 16) + +enum E1000_INVM_STRUCTURE_TYPE { + E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00, + E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01, + E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02, + E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03, + E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04, + E1000_INVM_INVALIDATED_STRUCTURE = 0x0F, +}; + +#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8 +#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1 +#define E1000_INVM_ULT_BYTES_SIZE 8 +#define E1000_INVM_RECORD_SIZE_IN_BYTES 4 +#define E1000_INVM_VER_FIELD_ONE 0x1FF8 +#define E1000_INVM_VER_FIELD_TWO 0x7FE000 +#define E1000_INVM_IMGTYPE_FIELD 0x1F800000 + +#define E1000_INVM_MAJOR_MASK 0x3F0 +#define E1000_INVM_MINOR_MASK 0xF +#define E1000_INVM_MAJOR_SHIFT 4 + +#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_OFF2)) +#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +/* NVM offset defaults for i211 device */ +#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243 +#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1 +#define NVM_LED_1_CFG_DEFAULT_I211 0x0184 +#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C + +/* PLL Defines */ +#define E1000_PCI_PMCSR 0x44 +#define E1000_PCI_PMCSR_D3 0x03 +#define E1000_MAX_PLL_TRIES 5 +#define E1000_PHY_PLL_UNCONF 0xFF +#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000 +#define E1000_PHY_PLL_FREQ_REG 0x000E +#define E1000_INVM_DEFAULT_AL 0x202F +#define E1000_INVM_AUTOLOAD 0x0A +#define E1000_INVM_PLL_WO_VAL 0x0010 + +#endif diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c new file mode 100644 index 000000000..2a88595f9 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -0,0 +1,1606 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include +#include +#include +#include +#include + +#include "e1000_mac.h" + +#include "igb.h" + +static s32 igb_set_default_fc(struct e1000_hw *hw); +static s32 igb_set_fc_watermarks(struct e1000_hw *hw); + +/** + * igb_get_bus_info_pcie - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 igb_get_bus_info_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + u32 reg; + u16 pcie_link_status; + + bus->type = e1000_bus_type_pci_express; + + ret_val = igb_read_pcie_cap_reg(hw, + PCI_EXP_LNKSTA, + &pcie_link_status); + if (ret_val) { + bus->width = e1000_bus_width_unknown; + bus->speed = e1000_bus_speed_unknown; + } else { + switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_2_5GB: + bus->speed = e1000_bus_speed_2500; + break; + case PCI_EXP_LNKSTA_CLS_5_0GB: + bus->speed = e1000_bus_speed_5000; + break; + default: + bus->speed = e1000_bus_speed_unknown; + break; + } + + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT); + } + + reg = rd32(E1000_STATUS); + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; + + return 0; +} + +/** + * igb_clear_vfta - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void igb_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + array_wr32(E1000_VFTA, offset, 0); + wrfl(); + } +} + +/** + * igb_write_vfta - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + array_wr32(E1000_VFTA, offset, value); + wrfl(); +} + +/* Due to a hw errata, if the host tries to configure the VFTA register + * while performing queries from the BMC or DMA, then the VFTA in some + * cases won't be written. + */ + +/** + * igb_clear_vfta_i350 - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void igb_clear_vfta_i350(struct e1000_hw *hw) +{ + u32 offset; + int i; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + for (i = 0; i < 10; i++) + array_wr32(E1000_VFTA, offset, 0); + + wrfl(); + } +} + +/** + * igb_write_vfta_i350 - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) +{ + int i; + + for (i = 0; i < 10; i++) + array_wr32(E1000_VFTA, offset, value); + + wrfl(); +} + +/** + * igb_init_rx_addrs - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setups the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + u8 mac_addr[ETH_ALEN] = {0}; + + /* Setup the receive address */ + hw_dbg("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * igb_vfta_set - enable or disable vlan in VLAN filter table + * @hw: pointer to the HW structure + * @vid: VLAN id to add or remove + * @add: if true add filter, if false remove + * + * Sets or clears a bit in the VLAN filter table array based on VLAN id + * and if we are adding or removing the filter + **/ +s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add) +{ + u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; + u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); + u32 vfta; + struct igb_adapter *adapter = hw->back; + s32 ret_val = 0; + + vfta = adapter->shadow_vfta[index]; + + /* bit was set/cleared before we started */ + if ((!!(vfta & mask)) == add) { + ret_val = -E1000_ERR_CONFIG; + } else { + if (add) + vfta |= mask; + else + vfta &= ~mask; + } + if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) + igb_write_vfta_i350(hw, index, vfta); + else + igb_write_vfta(hw, index, vfta); + adapter->shadow_vfta[index] = vfta; + + return ret_val; +} + +/** + * igb_check_alt_mac_addr - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is saved in the hw struct and + * programmed into RAR0 and the function returns success, otherwise the + * function returns an error. + **/ +s32 igb_check_alt_mac_addr(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val = 0; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ALEN]; + + /* Alternate MAC address is handled by the option ROM for 82580 + * and newer. SW support not required. + */ + if (hw->mac.type >= e1000_82580) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if ((nvm_alt_mac_addr_offset == 0xFFFF) || + (nvm_alt_mac_addr_offset == 0x0000)) + /* There is no Alternate MAC Address */ + goto out; + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; + if (hw->bus.func == E1000_FUNC_2) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2; + + if (hw->bus.func == E1000_FUNC_3) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3; + for (i = 0; i < ETH_ALEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (is_multicast_ether_addr(alt_mac_addr)) { + hw_dbg("Ignoring Alternate Mac Address with MC bit set\n"); + goto out; + } + + /* We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); + +out: + return ret_val; +} + +/** + * igb_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | + ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + wr32(E1000_RAL(index), rar_low); + wrfl(); + wr32(E1000_RAH(index), rar_high); + wrfl(); +} + +/** + * igb_mta_set - Set multicast filter table address + * @hw: pointer to the HW structure + * @hash_value: determines the MTA register and bit to set + * + * The multicast table address is a register array of 32-bit registers. + * The hash_value is used to determine what register the bit is in, the + * current value is read, the new bit is OR'd in and the new value is + * written back into the register. + **/ +void igb_mta_set(struct e1000_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg, mta; + + /* The MTA is a register array of 32-bit registers. It is + * treated like an array of (32*mta_reg_count) bits. We want to + * set bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The (hw->mac.mta_reg_count - 1) serves as a + * mask to bits 31:5 of the hash value which gives us the + * register we're modifying. The hash bit within that register + * is determined by the lower 5 bits of the hash value. + */ + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + mta = array_rd32(E1000_MTA, hash_reg); + + mta |= (1 << hash_bit); + + array_wr32(E1000_MTA, hash_reg, mta); + wrfl(); +} + +/** + * igb_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * igb_mta_set() + **/ +static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * igb_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void igb_update_mc_addr_list(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32) i < mc_addr_count; i++) { + hash_value = igb_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); + mc_addr_list += (ETH_ALEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); + wrfl(); +} + +/** + * igb_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void igb_clear_hw_cntrs_base(struct e1000_hw *hw) +{ + rd32(E1000_CRCERRS); + rd32(E1000_SYMERRS); + rd32(E1000_MPC); + rd32(E1000_SCC); + rd32(E1000_ECOL); + rd32(E1000_MCC); + rd32(E1000_LATECOL); + rd32(E1000_COLC); + rd32(E1000_DC); + rd32(E1000_SEC); + rd32(E1000_RLEC); + rd32(E1000_XONRXC); + rd32(E1000_XONTXC); + rd32(E1000_XOFFRXC); + rd32(E1000_XOFFTXC); + rd32(E1000_FCRUC); + rd32(E1000_GPRC); + rd32(E1000_BPRC); + rd32(E1000_MPRC); + rd32(E1000_GPTC); + rd32(E1000_GORCL); + rd32(E1000_GORCH); + rd32(E1000_GOTCL); + rd32(E1000_GOTCH); + rd32(E1000_RNBC); + rd32(E1000_RUC); + rd32(E1000_RFC); + rd32(E1000_ROC); + rd32(E1000_RJC); + rd32(E1000_TORL); + rd32(E1000_TORH); + rd32(E1000_TOTL); + rd32(E1000_TOTH); + rd32(E1000_TPR); + rd32(E1000_TPT); + rd32(E1000_MPTC); + rd32(E1000_BPTC); +} + +/** + * igb_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 igb_check_for_copper_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = 0; + goto out; + } + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + igb_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + igb_config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = igb_config_fc_after_link_up(hw); + if (ret_val) + hw_dbg("Error configuring flow control\n"); + +out: + return ret_val; +} + +/** + * igb_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 igb_setup_link(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (igb_check_reset_block(hw)) + goto out; + + /* If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + ret_val = igb_set_default_fc(hw); + if (ret_val) + goto out; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + goto out; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + hw_dbg("Initializing the Flow Control address, type and timer regs\n"); + wr32(E1000_FCT, FLOW_CONTROL_TYPE); + wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + wr32(E1000_FCTTV, hw->fc.pause_time); + + ret_val = igb_set_fc_watermarks(hw); + +out: + + return ret_val; +} + +/** + * igb_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void igb_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl; + + tctl = rd32(E1000_TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + wr32(E1000_TCTL, tctl); + wrfl(); +} + +/** + * igb_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * tansmission as well. + **/ +static s32 igb_set_fc_watermarks(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & e1000_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= E1000_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + wr32(E1000_FCRTL, fcrtl); + wr32(E1000_FCRTH, fcrth); + + return ret_val; +} + +/** + * igb_set_default_fc - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 igb_set_default_fc(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 lan_offset; + u16 nvm_data; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->mac.type == e1000_i350) { + lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + + lan_offset, 1, &nvm_data); + } else { + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, + 1, &nvm_data); + } + + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == + NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + +out: + return ret_val; +} + +/** + * igb_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 igb_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val = 0; + + ctrl = rd32(E1000_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + wr32(E1000_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * igb_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 igb_config_fc_after_link_up(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = igb_force_mac_fc(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = igb_force_mac_fc(hw); + } + + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, + &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + hw_dbg("Copper PHY and Auto Neg has not completed.\n"); + goto out; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + goto out; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + hw_dbg("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((hw->fc.requested_mode == e1000_fc_none) || + (hw->fc.requested_mode == e1000_fc_tx_pause) || + (hw->fc.strict_ieee)) { + hw->fc.current_mode = e1000_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + hw_dbg("Error getting link speed and duplex\n"); + goto out; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = e1000_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = igb_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + goto out; + } + } + /* Check for the case where we have SerDes media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) + && mac->autoneg) { + /* Read the PCS_LSTS and check to see if AutoNeg + * has completed. + */ + pcs_status_reg = rd32(E1000_PCS_LSTAT); + + if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { + hw_dbg("PCS Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (PCS_ANADV) and the Auto_Negotiation Base + * Page Ability Register (PCS_LPAB) to determine how + * flow control was negotiated. + */ + pcs_adv_reg = rd32(E1000_PCS_ANADV); + pcs_lp_ability_reg = rd32(E1000_PCS_LPAB); + + /* Two bits in the Auto Negotiation Advertisement Register + * (PCS_ANADV) and two bits in the Auto Negotiation Base + * Page Ability Register (PCS_LPAB) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | e1000_fc_full + * + */ + if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + hw_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + hw_dbg("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + hw_dbg("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + hw_dbg("Flow Control = NONE.\n"); + } + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + pcs_ctrl_reg = rd32(E1000_PCS_LCTL); + pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; + wr32(E1000_PCS_LCTL, pcs_ctrl_reg); + + ret_val = igb_force_mac_fc(hw); + if (ret_val) { + hw_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + +out: + return ret_val; +} + +/** + * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + status = rd32(E1000_STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + hw_dbg("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + hw_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + hw_dbg("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + hw_dbg("Half Duplex\n"); + } + + return 0; +} + +/** + * igb_get_hw_semaphore - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 igb_get_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + s32 ret_val = 0; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = rd32(E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == timeout) { + hw_dbg("Driver can't access device - SMBI bit is set.\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = rd32(E1000_SWSM); + wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + igb_put_hw_semaphore(hw); + hw_dbg("Driver can't access the NVM\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void igb_put_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = rd32(E1000_SWSM); + + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + + wr32(E1000_SWSM, swsm); +} + +/** + * igb_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 igb_get_auto_rd_done(struct e1000_hw *hw) +{ + s32 i = 0; + s32 ret_val = 0; + + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + hw_dbg("Auto read by HW from NVM has not completed.\n"); + ret_val = -E1000_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_valid_led_default - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_82575_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT; + break; + } + } +out: + return ret_val; +} + +/** + * igb_id_led_init - + * @hw: pointer to the HW structure + * + **/ +s32 igb_id_led_init(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + /* i210 and i211 devices have different LED mechanism */ + if ((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) + ret_val = igb_valid_led_default_i210(hw, &data); + else + ret_val = igb_valid_led_default(hw, &data); + + if (ret_val) + goto out; + + mac->ledctl_default = rd32(E1000_LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + +out: + return ret_val; +} + +/** + * igb_cleanup_led - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 igb_cleanup_led(struct e1000_hw *hw) +{ + wr32(E1000_LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * igb_blink_led - Blink LED + * @hw: pointer to the HW structure + * + * Blink the led's which are set to be on. + **/ +s32 igb_blink_led(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* Set the blink bit for each LED that's "on" (0x0E) + * (or "off" if inverted) in ledctl_mode2. The blink + * logic in hardware only works when mode is set to "on" + * so it must be changed accordingly when the mode is + * "off" and inverted. + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 32; i += 8) { + u32 mode = (hw->mac.ledctl_mode2 >> i) & + E1000_LEDCTL_LED0_MODE_MASK; + u32 led_default = hw->mac.ledctl_default >> i; + + if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_ON)) || + ((led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_OFF))) { + ledctl_blink &= + ~(E1000_LEDCTL_LED0_MODE_MASK << i); + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_MODE_LED_ON) << i; + } + } + } + + wr32(E1000_LEDCTL, ledctl_blink); + + return 0; +} + +/** + * igb_led_off - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 igb_led_off(struct e1000_hw *hw) +{ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + wr32(E1000_LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return 0; +} + +/** + * igb_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 (0) if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 igb_disable_pcie_master(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + s32 ret_val = 0; + + if (hw->bus.type != e1000_bus_type_pci_express) + goto out; + + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + wr32(E1000_CTRL, ctrl); + + while (timeout) { + if (!(rd32(E1000_STATUS) & + E1000_STATUS_GIO_MASTER_ENABLE)) + break; + udelay(100); + timeout--; + } + + if (!timeout) { + hw_dbg("Master requests are pending.\n"); + ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_validate_mdi_setting - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Verify that when not using auto-negotitation that MDI/MDIx is correctly + * set, which is forced to MDI mode only. + **/ +s32 igb_validate_mdi_setting(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* All MDI settings are supported on 82580 and newer. */ + if (hw->mac.type >= e1000_82580) + goto out; + + if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { + hw_dbg("Invalid MDI setting detected\n"); + hw->phy.mdix = 1; + ret_val = -E1000_ERR_CONFIG; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register + * @hw: pointer to the HW structure + * @reg: 32bit register offset such as E1000_SCTL + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes an address/data control type register. There are several of these + * and they all have the format address << 8 | data and bit 31 is polled for + * completion. + **/ +s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data) +{ + u32 i, regvalue = 0; + s32 ret_val = 0; + + /* Set up the address and data */ + regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); + wr32(reg, regvalue); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { + udelay(5); + regvalue = rd32(reg); + if (regvalue & E1000_GEN_CTL_READY) + break; + } + if (!(regvalue & E1000_GEN_CTL_READY)) { + hw_dbg("Reg %08x did not indicate ready\n", reg); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool igb_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + bool ret_val = false; + + if (!hw->mac.asf_firmware_present) + goto out; + + manc = rd32(E1000_MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.arc_subsystem_valid) { + fwsm = rd32(E1000_FWSM); + factps = rd32(E1000_FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { + ret_val = true; + goto out; + } + } else { + if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + ret_val = true; + goto out; + } + } + +out: + return ret_val; +} diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h new file mode 100644 index 000000000..ea24961b0 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_mac.h @@ -0,0 +1,87 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_MAC_H_ +#define _E1000_MAC_H_ + +#include "e1000_hw.h" + +#include "e1000_phy.h" +#include "e1000_nvm.h" +#include "e1000_defines.h" +#include "e1000_i210.h" + +/* Functions that should not be called directly from drivers but can be used + * by other files in this 'shared code' + */ +s32 igb_blink_led(struct e1000_hw *hw); +s32 igb_check_for_copper_link(struct e1000_hw *hw); +s32 igb_cleanup_led(struct e1000_hw *hw); +s32 igb_config_fc_after_link_up(struct e1000_hw *hw); +s32 igb_disable_pcie_master(struct e1000_hw *hw); +s32 igb_force_mac_fc(struct e1000_hw *hw); +s32 igb_get_auto_rd_done(struct e1000_hw *hw); +s32 igb_get_bus_info_pcie(struct e1000_hw *hw); +s32 igb_get_hw_semaphore(struct e1000_hw *hw); +s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 igb_id_led_init(struct e1000_hw *hw); +s32 igb_led_off(struct e1000_hw *hw); +void igb_update_mc_addr_list(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); +s32 igb_setup_link(struct e1000_hw *hw); +s32 igb_validate_mdi_setting(struct e1000_hw *hw); +s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data); + +void igb_clear_hw_cntrs_base(struct e1000_hw *hw); +void igb_clear_vfta(struct e1000_hw *hw); +void igb_clear_vfta_i350(struct e1000_hw *hw); +s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add); +void igb_config_collision_dist(struct e1000_hw *hw); +void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); +void igb_mta_set(struct e1000_hw *hw, u32 hash_value); +void igb_put_hw_semaphore(struct e1000_hw *hw); +void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +s32 igb_check_alt_mac_addr(struct e1000_hw *hw); + +bool igb_enable_mng_pass_thru(struct e1000_hw *hw); + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 + +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +void e1000_init_function_pointers_82575(struct e1000_hw *hw); + +#endif diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c new file mode 100644 index 000000000..162cc4934 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -0,0 +1,443 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "e1000_mbx.h" + +/** + * igb_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (mbx->ops.read) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * igb_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = 0; + + if (size > mbx->size) + ret_val = -E1000_ERR_MBX; + + else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + + return ret_val; +} + +/** + * igb_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.check_for_rst) + ret_val = mbx->ops.check_for_rst(hw, mbx_id); + + return ret_val; +} + +/** + * igb_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? 0 : -E1000_ERR_MBX; +} + +/** + * igb_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? 0 : -E1000_ERR_MBX; +} + +/** + * igb_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + ret_val = igb_poll_for_msg(hw, mbx_id); + + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); +out: + return ret_val; +} + +/** + * igb_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = igb_poll_for_ack(hw, mbx_id); +out: + return ret_val; +} + +static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) +{ + u32 mbvficr = rd32(E1000_MBVFICR); + s32 ret_val = -E1000_ERR_MBX; + + if (mbvficr & mask) { + ret_val = 0; + wr32(E1000_MBVFICR, mask); + } + + return ret_val; +} + +/** + * igb_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * igb_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { + ret_val = 0; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * igb_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) +{ + u32 vflre = rd32(E1000_VFLRE); + s32 ret_val = -E1000_ERR_MBX; + + if (vflre & (1 << vf_number)) { + ret_val = 0; + wr32(E1000_VFLRE, (1 << vf_number)); + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * igb_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + u32 p2v_mailbox; + + /* Take ownership of the buffer */ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); + + /* reserve mailbox for vf use */ + p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) + ret_val = 0; + + return ret_val; +} + +/** + * igb_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + igb_check_for_msg_pf(hw, vf_number); + igb_check_for_ack_pf(hw, vf_number); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + array_wr32(E1000_VMBMEM(vf_number), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return ret_val; + +} + +/** + * igb_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = igb_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = array_rd32(E1000_VMBMEM(vf_number), i); + + /* Acknowledge the message and release buffer */ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * e1000_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +s32 igb_init_mbx_params_pf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = igb_read_mbx_pf; + mbx->ops.write = igb_write_mbx_pf; + mbx->ops.read_posted = igb_read_posted_mbx; + mbx->ops.write_posted = igb_write_posted_mbx; + mbx->ops.check_for_msg = igb_check_for_msg_pf; + mbx->ops.check_for_ack = igb_check_for_ack_pf; + mbx->ops.check_for_rst = igb_check_for_rst_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + return 0; +} + diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h new file mode 100644 index 000000000..d20af6b2f --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h @@ -0,0 +1,73 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_MBX_H_ +#define _E1000_MBX_H_ + +#include "e1000_hw.h" + +#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ +#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ +#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is E1000_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +/* Messages below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for exra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ +#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ +#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ +#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ +#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) + +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ + +s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 igb_check_for_msg(struct e1000_hw *, u16); +s32 igb_check_for_ack(struct e1000_hw *, u16); +s32 igb_check_for_rst(struct e1000_hw *, u16); +s32 igb_init_mbx_params_pf(struct e1000_hw *); + +#endif /* _E1000_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c new file mode 100644 index 000000000..e8280d0d7 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c @@ -0,0 +1,801 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include +#include + +#include "e1000_mac.h" +#include "e1000_nvm.h" + +/** + * igb_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + wr32(E1000_EECD, *eecd); + wrfl(); + udelay(hw->nvm.delay_usec); +} + +/** + * igb_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + wr32(E1000_EECD, *eecd); + wrfl(); + udelay(hw->nvm.delay_usec); +} + +/** + * igb_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + u32 mask; + + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + wr32(E1000_EECD, eecd); + wrfl(); + + udelay(nvm->delay_usec); + + igb_raise_eec_clk(hw, &eecd); + igb_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + wr32(E1000_EECD, eecd); +} + +/** + * igb_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + eecd = rd32(E1000_EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + igb_raise_eec_clk(hw, &eecd); + + eecd = rd32(E1000_EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + igb_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + s32 ret_val = -E1000_ERR_NVM; + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = rd32(E1000_EERD); + else + reg = rd32(E1000_EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) { + ret_val = 0; + break; + } + + udelay(5); + } + + return ret_val; +} + +/** + * igb_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 igb_acquire_nvm(struct e1000_hw *hw) +{ + u32 eecd = rd32(E1000_EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + s32 ret_val = 0; + + + wr32(E1000_EECD, eecd | E1000_EECD_REQ); + eecd = rd32(E1000_EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + udelay(5); + eecd = rd32(E1000_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + wr32(E1000_EECD, eecd); + hw_dbg("Could not acquire NVM grant\n"); + ret_val = -E1000_ERR_NVM; + } + + return ret_val; +} + +/** + * igb_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void igb_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + wr32(E1000_EECD, eecd); + wrfl(); + udelay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + wr32(E1000_EECD, eecd); + wrfl(); + udelay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +static void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = rd32(E1000_EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + igb_lower_eec_clk(hw, &eecd); + } +} + +/** + * igb_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void igb_release_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + e1000_stop_nvm(hw); + + eecd = rd32(E1000_EECD); + eecd &= ~E1000_EECD_REQ; + wr32(E1000_EECD, eecd); +} + +/** + * igb_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = rd32(E1000_EECD); + s32 ret_val = 0; + u16 timeout = 0; + u8 spi_stat_reg; + + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + wr32(E1000_EECD, eecd); + wrfl(); + udelay(1); + timeout = NVM_MAX_RETRY_SPI; + + /* Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + udelay(5); + igb_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + hw_dbg("SPI NVM Status error\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + } + +out: + return ret_val; +} + +/** + * igb_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + igb_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset + */ + for (i = 0; i < words; i++) { + word_in = igb_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + +release: + nvm->ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + wr32(E1000_EERD, eerd); + ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (rd32(E1000_EERD) >> + E1000_NVM_RW_REG_DATA); + } + +out: + return ret_val; +} + +/** + * igb_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000_update_nvm_checksum is not called after this function , the + * EEPROM will most likley contain an invalid checksum. + **/ +s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val = -E1000_ERR_NVM; + u16 widx = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + return ret_val; + } + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = igb_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } + + igb_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + igb_standby_nvm(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + + word_out = (word_out >> 8) | (word_out << 8); + igb_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + igb_standby_nvm(hw); + break; + } + } + usleep_range(1000, 2000); + nvm->ops.release(hw); + } + + return ret_val; +} + +/** + * igb_read_part_string - Read device part number + * @hw: pointer to the HW structure + * @part_num: pointer to device part number + * @part_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in part_num. + **/ +s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pointer; + u16 offset; + u16 length; + + if (part_num == NULL) { + hw_dbg("PBA string buffer was null\n"); + ret_val = E1000_ERR_INVALID_ARGUMENT; + goto out; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + /* if nvm_data is not ptr guard the PBA must be in legacy format which + * means pointer is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + hw_dbg("NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (part_num_size < 11) { + hw_dbg("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + + /* extract hex string from data and pointer */ + part_num[0] = (nvm_data >> 12) & 0xF; + part_num[1] = (nvm_data >> 8) & 0xF; + part_num[2] = (nvm_data >> 4) & 0xF; + part_num[3] = nvm_data & 0xF; + part_num[4] = (pointer >> 12) & 0xF; + part_num[5] = (pointer >> 8) & 0xF; + part_num[6] = '-'; + part_num[7] = 0; + part_num[8] = (pointer >> 4) & 0xF; + part_num[9] = pointer & 0xF; + + /* put a null character on the end of our string */ + part_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (part_num[offset] < 0xA) + part_num[offset] += '0'; + else if (part_num[offset] < 0x10) + part_num[offset] += 'A' - 0xA; + } + + goto out; + } + + ret_val = hw->nvm.ops.read(hw, pointer, 1, &length); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (length == 0xFFFF || length == 0) { + hw_dbg("NVM PBA number section invalid length\n"); + ret_val = E1000_ERR_NVM_PBA_SECTION; + goto out; + } + /* check if part_num buffer is big enough */ + if (part_num_size < (((u32)length * 2) - 1)) { + hw_dbg("PBA string buffer too small\n"); + ret_val = E1000_ERR_NO_SPACE; + goto out; + } + + /* trim pba length from start of string */ + pointer++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + part_num[offset * 2] = (u8)(nvm_data >> 8); + part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + part_num[offset * 2] = '\0'; + +out: + return ret_val; +} + +/** + * igb_read_mac_addr - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 igb_read_mac_addr(struct e1000_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = rd32(E1000_RAH(0)); + rar_low = rd32(E1000_RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * igb_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 igb_validate_nvm_checksum(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 igb_update_nvm_checksum(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * igb_get_fw_version - Get firmware version information + * @hw: pointer to the HW structure + * @fw_vers: pointer to output structure + * + * unsupported MAC types will return all 0 version structure + **/ +void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) +{ + u16 eeprom_verh, eeprom_verl, etrack_test, fw_version; + u8 q, hval, rem, result; + u16 comb_verh, comb_verl, comb_offset; + + memset(fw_vers, 0, sizeof(struct e1000_fw_version)); + + /* basic eeprom version numbers and bits used vary by part and by tool + * used to create the nvm images. Check which data format we have. + */ + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + switch (hw->mac.type) { + case e1000_i211: + igb_read_invm_version(hw, fw_vers); + return; + case e1000_82575: + case e1000_82576: + case e1000_82580: + /* Use this format, unless EETRACK ID exists, + * then use alternate format + */ + if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK); + goto etrack_id; + } + break; + case e1000_i210: + if (!(igb_get_flash_presence_i210(hw))) { + igb_read_invm_version(hw, fw_vers); + return; + } + /* fall through */ + case e1000_i350: + /* find combo image version */ + hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); + if ((comb_offset != 0x0) && + (comb_offset != NVM_VER_INVALID)) { + + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset + + 1), 1, &comb_verh); + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset), + 1, &comb_verl); + + /* get Option Rom version if it exists and is valid */ + if ((comb_verh && comb_verl) && + ((comb_verh != NVM_VER_INVALID) && + (comb_verl != NVM_VER_INVALID))) { + + fw_vers->or_valid = true; + fw_vers->or_major = + comb_verl >> NVM_COMB_VER_SHFT; + fw_vers->or_build = + (comb_verl << NVM_COMB_VER_SHFT) + | (comb_verh >> NVM_COMB_VER_SHFT); + fw_vers->or_patch = + comb_verh & NVM_COMB_VER_MASK; + } + } + break; + default: + return; + } + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + + /* check for old style version format in newer images*/ + if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) { + eeprom_verl = (fw_version & NVM_COMB_VER_MASK); + } else { + eeprom_verl = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + } + /* Convert minor value to hex before assigning to output struct + * Val to be converted will not be higher than 99, per tool output + */ + q = eeprom_verl / NVM_HEX_CONV; + hval = q * NVM_HEX_TENS; + rem = eeprom_verl % NVM_HEX_CONV; + result = hval + rem; + fw_vers->eep_minor = result; + +etrack_id: + if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) + | eeprom_verl; + } +} diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h new file mode 100644 index 000000000..febc9cdb7 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h @@ -0,0 +1,56 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_NVM_H_ +#define _E1000_NVM_H_ + +s32 igb_acquire_nvm(struct e1000_hw *hw); +void igb_release_nvm(struct e1000_hw *hw); +s32 igb_read_mac_addr(struct e1000_hw *hw); +s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num); +s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, + u32 part_num_size); +s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_validate_nvm_checksum(struct e1000_hw *hw); +s32 igb_update_nvm_checksum(struct e1000_hw *hw); + +struct e1000_fw_version { + u32 etrack_id; + u16 eep_major; + u16 eep_minor; + u16 eep_build; + + u8 invm_major; + u8 invm_minor; + u8 invm_img_type; + + bool or_valid; + u16 or_major; + u16 or_build; + u16 or_patch; +}; +void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers); + +#endif diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c new file mode 100644 index 000000000..c1bb64d83 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -0,0 +1,2511 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include +#include + +#include "e1000_mac.h" +#include "e1000_phy.h" + +static s32 igb_phy_setup_autoneg(struct e1000_hw *hw); +static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, + u16 *phy_ctrl); +static s32 igb_wait_autoneg(struct e1000_hw *hw); +static s32 igb_set_master_slave_mode(struct e1000_hw *hw); + +/* Cable length tables */ +static const u16 e1000_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; +#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_m88_cable_length_table) / \ + sizeof(e1000_m88_cable_length_table[0])) + +static const u16 e1000_igp_2_cable_length_table[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, + 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, + 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, + 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, + 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, + 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, + 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, + 104, 109, 114, 118, 121, 124}; +#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_igp_2_cable_length_table) / \ + sizeof(e1000_igp_2_cable_length_table[0])) + +/** + * igb_check_reset_block - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +s32 igb_check_reset_block(struct e1000_hw *hw) +{ + u32 manc; + + manc = rd32(E1000_MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0; +} + +/** + * igb_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 igb_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + udelay(20); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +out: + return ret_val; +} + +/** + * igb_phy_reset_dsp - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +static s32 igb_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.write_reg)) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); + +out: + return ret_val; +} + +/** + * igb_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control regsiter in the PHY at offset and stores the + * information read to data. + **/ +s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + wr32(E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = rd32(E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + hw_dbg("MDI Read did not complete\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + if (mdic & E1000_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + *data = (u16) mdic; + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg("PHY Address %d is out of range\n", offset); + ret_val = -E1000_ERR_PARAM; + goto out; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + wr32(E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = rd32(E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + hw_dbg("MDI Write did not complete\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + if (mdic & E1000_MDIC_ERROR) { + hw_dbg("MDI Error\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_read_phy_reg_i2c - Read PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the i2c interface and stores the + * retrieved information in data. + **/ +s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + (E1000_I2CCMD_OPCODE_READ)); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + i2ccmd = rd32(E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + /* Need to byte-swap the 16-bit value. */ + *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); + + return 0; +} + +/** + * igb_write_phy_reg_i2c - Write PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the i2c interface. + **/ +s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + u16 phy_data_swapped; + + /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/ + if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) { + hw_dbg("PHY I2C Address %d is out of range.\n", + hw->phy.addr); + return -E1000_ERR_CONFIG; + } + + /* Swap the data bytes for the I2C interface */ + phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | + phy_data_swapped); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + i2ccmd = rd32(E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + return 0; +} + +/** + * igb_read_sfp_data_byte - Reads SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to be read + * @data: read data buffer pointer + * + * Reads one byte from SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + hw_dbg("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing with the + * EEPROM to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + + wr32(E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + udelay(50); + data_local = rd32(E1000_I2CCMD); + if (data_local & E1000_I2CCMD_READY) + break; + } + if (!(data_local & E1000_I2CCMD_READY)) { + hw_dbg("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (data_local & E1000_I2CCMD_ERROR) { + hw_dbg("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + *data = (u8) data_local & 0xFF; + + return 0; +} + +/** + * igb_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = igb_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) { + hw->phy.ops.release(hw); + goto out; + } + } + + ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val = 0; + + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = igb_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) { + hw->phy.ops.release(hw); + goto out; + } + } + + ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_82580 - Setup 82580 PHY for copper link + * @hw: pointer to the HW structure + * + * Sets up Carrier-sense on Transmit and downshift values. + **/ +s32 igb_copper_link_setup_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + if (phy->type == e1000_phy_82580) { + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + } + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data); + if (ret_val) + goto out; + + phy_data |= I82580_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ + phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; + + ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); + if (ret_val) + goto out; + + /* Set MDI/MDIX mode */ + ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); + if (ret_val) + goto out; + phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; + /* Options: + * 0 - Auto (default) + * 1 - MDI mode + * 2 - MDI-X mode + */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: + phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } + ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 igb_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + if (phy->revision < E1000_REVISION_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == E1000_REVISION_2) && + (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + goto out; + } + + /* Commit the changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + goto out; + } + +out: + return ret_val; +} + +/** + * igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's. + * Also enables and sets the downshift parameters. + **/ +s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + if (phy->reset_disable) + return 0; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + /* M88E1112 does not support this mode) */ + if (phy->id != M88E1112_E_PHY_ID) { + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + } + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift and setting it to X6 */ + if (phy->id == M88E1543_E_PHY_ID) { + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE; + ret_val = + phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + } + + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK; + phy_data |= I347AT4_PSCR_DOWNSHIFT_6X; + phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Commit the changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + ret_val = igb_set_master_slave_mode(hw); + if (ret_val) + return ret_val; + + return 0; +} + +/** + * igb_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 igb_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + if (phy->reset_disable) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.reset(hw); + if (ret_val) { + hw_dbg("Error resetting the PHY.\n"); + goto out; + } + + /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ + msleep(100); + + /* The NVM settings will configure LPLU in D3 for + * non-IGP1 PHYs. + */ + if (phy->type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + if (phy->ops.set_d3_lplu_state) + ret_val = phy->ops.set_d3_lplu_state(hw, false); + if (ret_val) { + hw_dbg("Error Disabling LPLU D3\n"); + goto out; + } + } + + /* disable lplu d0 during driver init */ + ret_val = phy->ops.set_d0_lplu_state(hw, false); + if (ret_val) { + hw_dbg("Error Disabling LPLU D0\n"); + goto out; + } + /* Configure mdi-mdix settings */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + goto out; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + + /* Set auto Master/Slave resolution process */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + goto out; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + goto out; + } + + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + goto out; + + /* load defaults for future use */ + phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? + ((data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : + e1000_ms_auto; + + switch (phy->ms_type) { + case e1000_ms_force_master: + data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + data |= CR_1000T_MS_ENABLE; + data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + data &= ~CR_1000T_MS_ENABLE; + default: + break; + } + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +static s32 igb_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + hw_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = igb_phy_setup_autoneg(hw); + if (ret_val) { + hw_dbg("Error Setting up Auto-Negotiation\n"); + goto out; + } + hw_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = igb_wait_autoneg(hw); + if (ret_val) { + hw_dbg("Error while waiting for autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = true; + +out: + return ret_val; +} + +/** + * igb_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +static s32 igb_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + goto out; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + goto out; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + hw_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + hw_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + hw_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + hw_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + hw_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + hw_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + hw_dbg("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + goto out; + + hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + ret_val = phy->ops.write_reg(hw, + PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 igb_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = igb_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + hw_dbg("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + hw_dbg("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link); + if (ret_val) + goto out; + + if (link) { + hw_dbg("Valid link established!!!\n"); + igb_config_collision_dist(hw); + ret_val = igb_config_fc_after_link_up(hw); + } else { + hw_dbg("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + goto out; + + hw_dbg("IGP PSCR: %X\n", phy_data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link); + if (ret_val) + goto out; + + if (!link) + hw_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on TX must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + /* I210 and I211 devices support Auto-Crossover in forced operation. */ + if (phy->type != e1000_phy_i210) { + /* Clear Auto-Crossover to force MDI manually. M88E1000 + * requires MDI forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + goto out; + + hw_dbg("M88E1000 PSCR: %X\n", phy_data); + } + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Reset the phy to commit changes. */ + ret_val = igb_phy_sw_reset(hw); + if (ret_val) + goto out; + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); + if (ret_val) + goto out; + + if (!link) { + bool reset_dsp = true; + + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case I210_I_PHY_ID: + reset_dsp = false; + break; + default: + if (hw->phy.type != e1000_phy_m88) + reset_dsp = false; + break; + } + if (!reset_dsp) + hw_dbg("Link taking longer than expected.\n"); + else { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = phy->ops.write_reg(hw, + M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + goto out; + ret_val = igb_phy_reset_dsp(hw); + if (ret_val) + goto out; + } + } + + /* Try once more */ + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + goto out; + } + + if (hw->phy.type != e1000_phy_m88 || + hw->phy.id == I347AT4_E_PHY_ID || + hw->phy.id == M88E1112_E_PHY_ID || + hw->phy.id == I210_I_PHY_ID) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + /* Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + +out: + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + **/ +static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, + u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + /* Turn off flow control when forcing speed/duplex */ + hw->fc.current_mode = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = rd32(E1000_CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; + hw_dbg("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; + hw_dbg("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + hw_dbg("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + *phy_ctrl |= MII_CR_SPEED_10; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + hw_dbg("Forcing 10mb\n"); + } + + igb_config_collision_dist(hw); + + wr32(E1000_CTRL, ctrl); +} + +/** + * igb_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 data; + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + } + +out: + return ret_val; +} + +/** + * igb_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 igb_check_downshift(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + switch (phy->type) { + case e1000_phy_i210: + case e1000_phy_m88: + case e1000_phy_gg82563: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: + case e1000_phy_igp: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; + ret_val = 0; + goto out; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = (phy_data & mask) ? true : false; + +out: + return ret_val; +} + +/** + * igb_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 igb_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * igb_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +static s32 igb_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + /* Polarity is determined based on the speed of + * our connection. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + goto out; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = phy->ops.read_reg(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = (data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + +out: + return ret_val; +} + +/** + * igb_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +static s32 igb_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 i, phy_status; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * igb_phy_has_link - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = 0; + u16 i, phy_status; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val && usec_interval > 0) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); + } + + *success = (i < iterations) ? true : false; + + return ret_val; +} + +/** + * igb_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 igb_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, phy_data2, index, default_page, is_cm; + + switch (hw->phy.id) { + case I210_I_PHY_ID: + /* Get cable length from PHY Cable Diagnostics Control Reg */ + ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + + (I347AT4_PCDL + phy->addr), + &phy_data); + if (ret_val) + return ret_val; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + + I347AT4_PCDC, &phy_data2); + if (ret_val) + return ret_val; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = phy_data / (is_cm ? 100 : 1); + phy->max_cable_length = phy_data / (is_cm ? 100 : 1); + phy->cable_length = phy_data / (is_cm ? 100 : 1); + break; + case M88E1543_E_PHY_ID: + case I347AT4_E_PHY_ID: + /* Remember the original page select and set it to 7 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + if (ret_val) + goto out; + + /* Get cable length from PHY Cable Diagnostics Control Reg */ + ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr), + &phy_data); + if (ret_val) + goto out; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); + if (ret_val) + goto out; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = phy_data / (is_cm ? 100 : 1); + phy->max_cable_length = phy_data / (is_cm ? 100 : 1); + phy->cable_length = phy_data / (is_cm ? 100 : 1); + + /* Reset the page selec to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + goto out; + break; + case M88E1112_E_PHY_ID: + /* Remember the original page select and set it to 5 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE, + &phy_data); + if (ret_val) + goto out; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + + phy->max_cable_length) / 2; + + /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + goto out; + + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which represent the + * combination of coarse and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; + static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D + }; + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + goto out; + + /* Getting bits 15:9, which represent the combination of + * coarse and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ + cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK; + + /* Array index bound check. */ + if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || + (cur_agc_index == 0)) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0; + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +/** + * igb_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + if (phy->media_type != e1000_media_type_copper) { + hw_dbg("Phy info is only valid for copper media\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) + ? true : false; + + ret_val = igb_check_polarity_m88(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false; + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + goto out; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = true; + + ret_val = igb_check_polarity_igp(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_phy_sw_reset - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 igb_phy_sw_reset(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 phy_ctrl; + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= MII_CR_RESET; + ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + udelay(1); + +out: + return ret_val; +} + +/** + * igb_phy_hw_reset - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +s32 igb_phy_hw_reset(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + ret_val = igb_check_reset_block(hw); + if (ret_val) { + ret_val = 0; + goto out; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + goto out; + + ctrl = rd32(E1000_CTRL); + wr32(E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); + wrfl(); + + udelay(phy->reset_delay_us); + + wr32(E1000_CTRL, ctrl); + wrfl(); + + udelay(150); + + phy->ops.release(hw); + + ret_val = phy->ops.get_cfg_done(hw); + +out: + return ret_val; +} + +/** + * igb_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +s32 igb_phy_init_script_igp3(struct e1000_hw *hw) +{ + hw_dbg("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ + hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018); + /* Remove all caps from Replica path filter */ + hw->phy.ops.write_reg(hw, 0x2F52, 0x0000); + /* Bias trimming for ADC, AFE and Driver (Default) */ + hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0); + /* Add 4% to TX amplitude in Giga mode */ + hw->phy.ops.write_reg(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + hw->phy.ops.write_reg(hw, 0x2011, 0x0000); + /* Poly DC correction to 94.6% + 2% for all channels */ + hw->phy.ops.write_reg(hw, 0x20DD, 0x249A); + /* ABS DC correction to 95.9% */ + hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3); + /* BG temp curve trim */ + hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE); + /* Increasing ADC OPAMP stage 1 currents to max */ + hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4); + /* Force 1000 ( required for enabling PHY regs configuration) */ + hw->phy.ops.write_reg(hw, 0x0000, 0x0140); + /* Set upd_freq to 6 */ + hw->phy.ops.write_reg(hw, 0x1F30, 0x1606); + /* Disable NPDFE */ + hw->phy.ops.write_reg(hw, 0x1F31, 0xB814); + /* Disable adaptive fixed FFE (Default) */ + hw->phy.ops.write_reg(hw, 0x1F35, 0x002A); + /* Enable FFE hysteresis */ + hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067); + /* Fixed FFE for short cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F54, 0x0065); + /* Fixed FFE for medium cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F55, 0x002A); + /* Fixed FFE for long cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F56, 0x002A); + /* Enable Adaptive Clip Threshold */ + hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0); + /* AHT reset limit to 1 */ + hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF); + /* Set AHT master delay to 127 msec */ + hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC); + /* Set scan bits for AHT */ + hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF); + /* Set AHT Preset bits */ + hw->phy.ops.write_reg(hw, 0x1F79, 0x0210); + /* Change integ_factor of channel A to 3 */ + hw->phy.ops.write_reg(hw, 0x1895, 0x0003); + /* Change prop_factor of channels BCD to 8 */ + hw->phy.ops.write_reg(hw, 0x1796, 0x0008); + /* Change cg_icount + enable integbp for channels BCD */ + hw->phy.ops.write_reg(hw, 0x1798, 0xD008); + /* Change cg_icount + enable integbp + change prop_factor_master + * to 8 for channel A + */ + hw->phy.ops.write_reg(hw, 0x1898, 0xD918); + /* Disable AHT in Slave mode on channel A */ + hw->phy.ops.write_reg(hw, 0x187A, 0x0800); + /* Enable LPLU and disable AN to 1000 in non-D0a states, + * Enable SPD+B2B + */ + hw->phy.ops.write_reg(hw, 0x0019, 0x008D); + /* Enable restart AN on an1000_dis change */ + hw->phy.ops.write_reg(hw, 0x001B, 0x2080); + /* Enable wh_fifo read clock in 10/100 modes */ + hw->phy.ops.write_reg(hw, 0x0014, 0x0045); + /* Restart AN, Speed selection is 1000 */ + hw->phy.ops.write_reg(hw, 0x0000, 0x1340); + + return 0; +} + +/** + * igb_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, restore the link to previous settings. + **/ +void igb_power_up_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * igb_power_down_phy_copper - Power down copper PHY + * @hw: pointer to the HW structure + * + * Power down PHY to save power when interface is down and wake on lan + * is not enabled. + **/ +void igb_power_down_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); + usleep_range(1000, 2000); +} + +/** + * igb_check_polarity_82580 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +static s32 igb_check_polarity_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + + ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); + + if (!ret_val) + phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + igb_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Clear Auto-Crossover to force MDI manually. 82580 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; + + ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); + if (ret_val) + goto out; + + hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n"); + + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); + if (ret_val) + goto out; + + if (!link) + hw_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_get_phy_info_82580 - Retrieve I82580 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 igb_get_phy_info_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = igb_phy_has_link(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + hw_dbg("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = true; + + ret_val = igb_check_polarity_82580(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false; + + if ((data & I82580_PHY_STATUS2_SPEED_MASK) == + I82580_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * igb_get_cable_length_82580 - Determine cable length for 82580 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +s32 igb_get_cable_length_82580(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + + ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data); + if (ret_val) + goto out; + + length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >> + I82580_DSTATUS_CABLE_LENGTH_SHIFT; + + if (length == E1000_CABLE_LENGTH_UNDEFINED) + ret_val = -E1000_ERR_PHY; + + phy->cable_length = length; + +out: + return ret_val; +} + +/** + * igb_write_phy_reg_gs40g - Write GS40G PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to write to + * upper half is page to use. + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; + ret_val = igb_write_phy_reg_mdic(hw, offset, data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * igb_read_phy_reg_gs40g - Read GS40G PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is page to use. + * @data: data to read at register offset + * + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; + ret_val = igb_read_phy_reg_mdic(hw, offset, data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * igb_set_master_slave_mode - Setup PHY for Master/slave mode + * @hw: pointer to the HW structure + * + * Sets up Master/slave mode + **/ +static s32 igb_set_master_slave_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Resolve Master/Slave mode */ + ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (hw->phy.ms_type) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + /* fall-through */ + default: + break; + } + + return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); +} diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h new file mode 100644 index 000000000..7af4ffab0 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -0,0 +1,174 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_PHY_H_ +#define _E1000_PHY_H_ + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +s32 igb_check_downshift(struct e1000_hw *hw); +s32 igb_check_reset_block(struct e1000_hw *hw); +s32 igb_copper_link_setup_igp(struct e1000_hw *hw); +s32 igb_copper_link_setup_m88(struct e1000_hw *hw); +s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw); +s32 igb_get_cable_length_m88(struct e1000_hw *hw); +s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw); +s32 igb_get_cable_length_igp_2(struct e1000_hw *hw); +s32 igb_get_phy_id(struct e1000_hw *hw); +s32 igb_get_phy_info_igp(struct e1000_hw *hw); +s32 igb_get_phy_info_m88(struct e1000_hw *hw); +s32 igb_phy_sw_reset(struct e1000_hw *hw); +s32 igb_phy_hw_reset(struct e1000_hw *hw); +s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 igb_setup_copper_link(struct e1000_hw *hw); +s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +void igb_power_up_phy_copper(struct e1000_hw *hw); +void igb_power_down_phy_copper(struct e1000_hw *hw); +s32 igb_phy_init_script_igp3(struct e1000_hw *hw); +s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); +s32 igb_copper_link_setup_82580(struct e1000_hw *hw); +s32 igb_get_phy_info_82580(struct e1000_hw *hw); +s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); +s32 igb_get_cable_length_82580(struct e1000_hw *hw); +s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); +s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data); +s32 igb_check_polarity_m88(struct e1000_hw *hw); + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +#define I82580_ADDR_REG 16 +#define I82580_CFG_REG 22 +#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15) +#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ +#define I82580_CTRL_REG 23 +#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10) + +/* 82580 specific PHY registers */ +#define I82580_PHY_CTRL_2 18 +#define I82580_PHY_LBK_CTRL 19 +#define I82580_PHY_STATUS_2 26 +#define I82580_PHY_DIAG_STATUS 31 + +/* I82580 PHY Status 2 */ +#define I82580_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82580_PHY_STATUS2_MDIX 0x0800 +#define I82580_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200 +#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100 + +/* I82580 PHY Control 2 */ +#define I82580_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82580_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82580_PHY_CTRL2_MDIX_CFG_MASK 0x0600 + +/* I82580 PHY Diagnostics Status */ +#define I82580_DSTATUS_CABLE_LENGTH 0x03FC +#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* 82580 PHY Power Management */ +#define E1000_82580_PHY_POWER_MGMT 0xE14 +#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ +#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ +#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ +#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */ + +/* Enable flexible speed on link-up */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +/* GS40G - I210 PHY defines */ +#define GS40G_PAGE_SELECT 0x16 +#define GS40G_PAGE_SHIFT 16 +#define GS40G_OFFSET_MASK 0xFFFF +#define GS40G_PAGE_2 0x20000 +#define GS40G_MAC_REG2 0x15 +#define GS40G_MAC_LB 0x4140 +#define GS40G_MAC_SPEED_1G 0X0006 +#define GS40G_COPPER_SPEC 0x0010 +#define GS40G_LINE_LB 0x4000 + +/* SFP modules ID memory locations */ +#define E1000_SFF_IDENTIFIER_OFFSET 0x00 +#define E1000_SFF_IDENTIFIER_SFF 0x02 +#define E1000_SFF_IDENTIFIER_SFP 0x03 + +#define E1000_SFF_ETH_FLAGS_OFFSET 0x06 +/* Flags for SFP modules compatible with ETH up to 1Gb */ +struct e1000_sfp_flags { + u8 e1000_base_sx:1; + u8 e1000_base_lx:1; + u8 e1000_base_cx:1; + u8 e1000_base_t:1; + u8 e100_base_lx:1; + u8 e100_base_fx:1; + u8 e10_base_bx10:1; + u8 e10_base_px:1; +}; + +#endif diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h new file mode 100644 index 000000000..6f0490d0e --- /dev/null +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -0,0 +1,426 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_REGS_H_ +#define _E1000_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_LEDMUX 0x08130 /* LED MUX Control */ +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ +#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ +#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ +#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ +#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ +#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ +#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ +#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ +#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ +#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ +#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */ +#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */ +#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */ + +/* IEEE 1588 TIMESYNCH */ +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ +#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define E1000_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +#define E1000_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ +#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ +#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ + +/* Filtering Registers */ +#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) +#define E1000_DAQF(_n) (0x59A0 + 4 * (_n)) +#define E1000_SPQF(_n) (0x59C0 + 4 * (_n)) +#define E1000_FTQF(_n) (0x59E0 + 4 * (_n)) +#define E1000_SAQF0 E1000_SAQF(0) +#define E1000_DAQF0 E1000_DAQF(0) +#define E1000_SPQF0 E1000_SPQF(0) +#define E1000_FTQF0 E1000_FTQF(0) +#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ +#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + +#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) + +/* DMA Coalescing registers */ +#define E1000_DMACR 0x02508 /* Control Register */ +#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ +#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ +#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ +#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ +#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +/* TX Rate Limit Registers */ +#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ +#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */ +#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ + +/* Split and Replication RX Control - RW */ +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ + +/* Thermal sensor configuration and status registers */ +#define E1000_THMJT 0x08100 /* Junction Temperature */ +#define E1000_THLOWTC 0x08104 /* Low Threshold Control */ +#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ +#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) \ + : (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) \ + : (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) \ + : (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) \ + : (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) \ + : (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) \ + : (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) \ + : (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) \ + : (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) \ + : (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) \ + : (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) \ + : (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) \ + : (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \ + : (0x0E028 + ((_n) * 0x40))) +#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ + (0x0C014 + ((_n) * 0x40))) +#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) +#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ + (0x0E014 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) +#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \ + : (0x0E038 + ((_n) * 0x40))) +#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ + : (0x0E03C + ((_n) * 0x40))) + +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ + +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_DTXCTL 0x03590 /* DMA TX Control - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +/* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXPTC 0x04104 +/* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICRXATC 0x04108 +/* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C +/* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXATC 0x04110 +/* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQEC 0x04118 +/* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICTXQMTC 0x0411C +/* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define E1000_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */ +#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ +#define E1000_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */ +#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ +#define E1000_HGPTC 0x04118 /* Host Good Packets TX Count */ +#define E1000_HTCBDPC 0x04124 /* Host TX Circuit Breaker Dropped Count */ +#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define E1000_LENERRS 0x04138 /* Length Errors Count */ +#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ +#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RLPML 0x05004 /* RX Long Packet Max Length */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ +#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) +#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) +#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) +#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) +#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) +#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ + +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ +#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ +#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ +#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ + +/* RSS registers */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/ +#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */ +/* MSI-X Allocation Register (_i) - RW */ +#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) +/* Redirection Table - RW Array */ +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ + +/* VT Registers */ +#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ +#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ +#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ +#define E1000_VFRE 0x00C8C /* VF Receive Enables */ +#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ +#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ +#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ +#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ +#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ +#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ +#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ +#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ +#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */ +/* These act per VF so an array friendly macro is used */ +#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) +#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n))) +#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */ +#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) + +struct e1000_hw; + +u32 igb_rd32(struct e1000_hw *hw, u32 reg); + +/* write operations, indexed using DWORDS */ +#define wr32(reg, val) \ +do { \ + u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \ + if (!E1000_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ +} while (0) + +#define rd32(reg) (igb_rd32(hw, reg)) + +#define wrfl() ((void)rd32(E1000_STATUS)) + +#define array_wr32(reg, offset, value) \ + wr32((reg) + ((offset) << 2), (value)) + +#define array_rd32(reg, offset) \ + (readl(hw->hw_addr + reg + ((offset) << 2))) + +/* DMA Coalescing registers */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +/* Energy Efficient Ethernet "EEE" register */ +#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ +#define E1000_EEE_SU 0X0E34 /* EEE Setup */ +#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ +#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ +#define E1000_MMDAC 13 /* MMD Access Control */ +#define E1000_MMDAAD 14 /* MMD Access Address/Data */ + +/* Thermal Sensor Register */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* OS2BMC Registers */ +#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ +#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ +#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ +#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ + +#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ +#define E1000_I210_FLMNGCTL 0x12038 +#define E1000_I210_FLMNGDATA 0x1203C +#define E1000_I210_FLMNGCNT 0x12040 + +#define E1000_I210_FLSWCTL 0x12048 +#define E1000_I210_FLSWDATA 0x1204C +#define E1000_I210_FLSWCNT 0x12050 + +#define E1000_I210_FLA 0x1201C + +#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) +#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ + +#define E1000_REMOVED(h) unlikely(!(h)) + +#endif diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h new file mode 100644 index 000000000..c2bd4f98a --- /dev/null +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -0,0 +1,584 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _IGB_H_ +#define _IGB_H_ + +#include "e1000_mac.h" +#include "e1000_82575.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct igb_adapter; + +#define E1000_PCS_CFG_IGN_SD 1 + +/* Interrupt defines */ +#define IGB_START_ITR 648 /* ~6000 ints/sec */ +#define IGB_4K_ITR 980 +#define IGB_20K_ITR 196 +#define IGB_70K_ITR 56 + +/* TX/RX descriptor defines */ +#define IGB_DEFAULT_TXD 256 +#define IGB_DEFAULT_TX_WORK 128 +#define IGB_MIN_TXD 80 +#define IGB_MAX_TXD 4096 + +#define IGB_DEFAULT_RXD 256 +#define IGB_MIN_RXD 80 +#define IGB_MAX_RXD 4096 + +#define IGB_DEFAULT_ITR 3 /* dynamic */ +#define IGB_MAX_ITR_USECS 10000 +#define IGB_MIN_ITR_USECS 10 +#define NON_Q_VECTORS 1 +#define MAX_Q_VECTORS 8 +#define MAX_MSIX_ENTRIES 10 + +/* Transmit and receive queues */ +#define IGB_MAX_RX_QUEUES 8 +#define IGB_MAX_RX_QUEUES_82575 4 +#define IGB_MAX_RX_QUEUES_I211 2 +#define IGB_MAX_TX_QUEUES 8 +#define IGB_MAX_VF_MC_ENTRIES 30 +#define IGB_MAX_VF_FUNCTIONS 8 +#define IGB_MAX_VFTA_ENTRIES 128 +#define IGB_82576_VF_DEV_ID 0x10CA +#define IGB_I350_VF_DEV_ID 0x1520 + +/* NVM version defines */ +#define IGB_MAJOR_MASK 0xF000 +#define IGB_MINOR_MASK 0x0FF0 +#define IGB_BUILD_MASK 0x000F +#define IGB_COMB_VER_MASK 0x00FF +#define IGB_MAJOR_SHIFT 12 +#define IGB_MINOR_SHIFT 4 +#define IGB_COMB_VER_SHFT 8 +#define IGB_NVM_VER_INVALID 0xFFFF +#define IGB_ETRACK_SHIFT 16 +#define NVM_ETRACK_WORD 0x0042 +#define NVM_COMB_VER_OFF 0x0083 +#define NVM_COMB_VER_PTR 0x003d + +struct vf_data_storage { + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u16 vlans_enabled; + u32 flags; + unsigned long last_nack; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 tx_rate; + bool spoofchk_enabled; +}; + +#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ +#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */ +#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */ +#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */ + +/* RX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8) +#define IGB_RX_HTHRESH 8 +#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) +#define IGB_TX_HTHRESH 1 +#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ + (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4) +#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ + (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16) + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +/* Supported Rx Buffer Sizes */ +#define IGB_RXBUFFER_256 256 +#define IGB_RXBUFFER_2048 2048 +#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 +#define IGB_RX_BUFSZ IGB_RXBUFFER_2048 + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define IGB_EEPROM_APME 0x0400 + +#ifndef IGB_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define IGB_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define IGB_MNG_VLAN_NONE -1 + +enum igb_tx_flags { + /* cmd_type flags */ + IGB_TX_FLAGS_VLAN = 0x01, + IGB_TX_FLAGS_TSO = 0x02, + IGB_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IGB_TX_FLAGS_IPV4 = 0x10, + IGB_TX_FLAGS_CSUM = 0x20, +}; + +/* VLAN info */ +#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGB_TX_FLAGS_VLAN_SHIFT 16 + +/* The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +#define IGB_MAX_TXD_PWR 15 +#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +/* EEPROM byte offsets */ +#define IGB_SFF_8472_SWAP 0x5C +#define IGB_SFF_8472_COMP 0x5E + +/* Bitmasks */ +#define IGB_SFF_ADDRESSING_MODE 0x4 +#define IGB_SFF_8472_UNSUP 0x00 + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct igb_tx_buffer { + union e1000_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + struct sk_buff *skb; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct igb_rx_buffer { + dma_addr_t dma; + struct page *page; + unsigned int page_offset; +}; + +struct igb_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; + u64 restart_queue2; +}; + +struct igb_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igb_ring_container { + struct igb_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +struct igb_ring { + struct igb_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct device *dev; /* device pointer for dma mapping */ + union { /* array of buffer info structs */ + struct igb_tx_buffer *tx_buffer_info; + struct igb_rx_buffer *rx_buffer_info; + }; + void *desc; /* descriptor ring memory */ + unsigned long flags; /* ring specific flags */ + void __iomem *tail; /* pointer to ring tail register */ + dma_addr_t dma; /* phys address of the ring */ + unsigned int size; /* length of desc. ring in bytes */ + + u16 count; /* number of desc. in the ring */ + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + + /* everything past this point are written often */ + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + + union { + /* TX */ + struct { + struct igb_tx_queue_stats tx_stats; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync tx_syncp2; + }; + /* RX */ + struct { + struct sk_buff *skb; + struct igb_rx_queue_stats rx_stats; + struct u64_stats_sync rx_syncp; + }; + }; +} ____cacheline_internodealigned_in_smp; + +struct igb_q_vector { + struct igb_adapter *adapter; /* backlink */ + int cpu; /* CPU for DCA */ + u32 eims_value; /* EIMS mask value */ + + u16 itr_val; + u8 set_itr; + void __iomem *itr_register; + + struct igb_ring_container rx, tx; + + struct napi_struct napi; + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; + + /* for dynamic allocation of rings associated with this q_vector */ + struct igb_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +enum e1000_ring_flags_t { + IGB_RING_FLAG_RX_SCTP_CSUM, + IGB_RING_FLAG_RX_LB_VLAN_BSWAP, + IGB_RING_FLAG_TX_CTX_IDX, + IGB_RING_FLAG_TX_DETECT_HANG +}; + +#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) + +#define IGB_RX_DESC(R, i) \ + (&(((union e1000_adv_rx_desc *)((R)->desc))[i])) +#define IGB_TX_DESC(R, i) \ + (&(((union e1000_adv_tx_desc *)((R)->desc))[i])) +#define IGB_TX_CTXTDESC(R, i) \ + (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i])) + +/* igb_test_staterr - tests bits within Rx descriptor status and error fields */ +static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +/* igb_desc_unused - calculate if we have unused descriptors */ +static inline int igb_desc_unused(struct igb_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +#ifdef CONFIG_IGB_HWMON + +#define IGB_HWMON_TYPE_LOC 0 +#define IGB_HWMON_TYPE_TEMP 1 +#define IGB_HWMON_TYPE_CAUTION 2 +#define IGB_HWMON_TYPE_MAX 3 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct e1000_hw *hw; + struct e1000_thermal_diode_data *sensor; + char name[12]; + }; + +struct hwmon_buff { + struct attribute_group group; + const struct attribute_group *groups[2]; + struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1]; + struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4]; + unsigned int n_hwmon; + }; +#endif + +#define IGB_N_EXTTS 2 +#define IGB_N_PEROUT 2 +#define IGB_N_SDP 4 +#define IGB_RETA_SIZE 128 + +/* board specific private data structure */ +struct igb_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + struct net_device *netdev; + + unsigned long state; + unsigned int flags; + + unsigned int num_q_vectors; + struct msix_entry msix_entries[MAX_MSIX_ENTRIES]; + + /* Interrupt Throttle Rate */ + u32 rx_itr_setting; + u32 tx_itr_setting; + u16 tx_itr; + u16 rx_itr; + + /* TX */ + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igb_ring *tx_ring[16]; + + /* RX */ + int num_rx_queues; + struct igb_ring *rx_ring[16]; + + u32 max_frame_size; + u32 min_frame_size; + + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + + u16 mng_vlan_id; + u32 bd_number; + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + + struct work_struct reset_task; + struct work_struct watchdog_task; + bool fc_autoneg; + u8 tx_timeout_factor; + struct timer_list blink_timer; + unsigned long led_status; + + /* OS defined structs */ + struct pci_dev *pdev; + + spinlock_t stats64_lock; + struct rtnl_link_stats64 stats64; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + + u32 test_icr; + struct igb_ring test_tx_ring; + struct igb_ring test_rx_ring; + + int msg_enable; + + struct igb_q_vector *q_vector[MAX_Q_VECTORS]; + u32 eims_enable_mask; + u32 eims_other; + + /* to not mess up cache alignment, always add to the bottom */ + u16 tx_ring_count; + u16 rx_ring_count; + unsigned int vfs_allocated_count; + struct vf_data_storage *vf_data; + int vf_rate_link_speed; + u32 rss_queues; + u32 wvbr; + u32 *shadow_vfta; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct delayed_work ptp_overflow_work; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned long last_rx_ptp_check; + unsigned long last_rx_timestamp; + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + u32 tx_hwtstamp_timeouts; + u32 rx_hwtstamp_cleared; + + struct ptp_pin_desc sdp_config[IGB_N_SDP]; + struct { + struct timespec start; + struct timespec period; + } perout[IGB_N_PEROUT]; + + char fw_version[32]; +#ifdef CONFIG_IGB_HWMON + struct hwmon_buff *igb_hwmon_buff; + bool ets; +#endif + struct i2c_algo_bit_data i2c_algo; + struct i2c_adapter i2c_adap; + struct i2c_client *i2c_client; + u32 rss_indir_tbl_init; + u8 rss_indir_tbl[IGB_RETA_SIZE]; + + unsigned long link_check_timeout; + int copper_tries; + struct e1000_info ei; + u16 eee_advert; +}; + +#define IGB_FLAG_HAS_MSI (1 << 0) +#define IGB_FLAG_DCA_ENABLED (1 << 1) +#define IGB_FLAG_QUAD_PORT_A (1 << 2) +#define IGB_FLAG_QUEUE_PAIRS (1 << 3) +#define IGB_FLAG_DMAC (1 << 4) +#define IGB_FLAG_PTP (1 << 5) +#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6) +#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7) +#define IGB_FLAG_WOL_SUPPORTED (1 << 8) +#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9) +#define IGB_FLAG_MEDIA_RESET (1 << 10) +#define IGB_FLAG_MAS_CAPABLE (1 << 11) +#define IGB_FLAG_MAS_ENABLE (1 << 12) +#define IGB_FLAG_HAS_MSIX (1 << 13) +#define IGB_FLAG_EEE (1 << 14) + +/* Media Auto Sense */ +#define IGB_MAS_ENABLE_0 0X0001 +#define IGB_MAS_ENABLE_1 0X0002 +#define IGB_MAS_ENABLE_2 0X0004 +#define IGB_MAS_ENABLE_3 0X0008 + +/* DMA Coalescing defines */ +#define IGB_MIN_TXPBSIZE 20408 +#define IGB_TX_BUF_4096 4096 +#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ + +#define IGB_82576_TSYNC_SHIFT 19 +#define IGB_TS_HDR_LEN 16 +enum e1000_state_t { + __IGB_TESTING, + __IGB_RESETTING, + __IGB_DOWN, + __IGB_PTP_TX_IN_PROGRESS, +}; + +enum igb_boards { + board_82575, +}; + +extern char igb_driver_name[]; +extern char igb_driver_version[]; + +int igb_up(struct igb_adapter *); +void igb_down(struct igb_adapter *); +void igb_reinit_locked(struct igb_adapter *); +void igb_reset(struct igb_adapter *); +int igb_reinit_queues(struct igb_adapter *); +void igb_write_rss_indir_tbl(struct igb_adapter *); +int igb_set_spd_dplx(struct igb_adapter *, u32, u8); +int igb_setup_tx_resources(struct igb_ring *); +int igb_setup_rx_resources(struct igb_ring *); +void igb_free_tx_resources(struct igb_ring *); +void igb_free_rx_resources(struct igb_ring *); +void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); +void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); +void igb_setup_tctl(struct igb_adapter *); +void igb_setup_rctl(struct igb_adapter *); +netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); +void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *); +void igb_alloc_rx_buffers(struct igb_ring *, u16); +void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *); +bool igb_has_link(struct igb_adapter *adapter); +void igb_set_ethtool_ops(struct net_device *); +void igb_power_up_link(struct igb_adapter *); +void igb_set_fw_version(struct igb_adapter *); +void igb_ptp_init(struct igb_adapter *adapter); +void igb_ptp_stop(struct igb_adapter *adapter); +void igb_ptp_reset(struct igb_adapter *adapter); +void igb_ptp_rx_hang(struct igb_adapter *adapter); +void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); +void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, + struct sk_buff *skb); +int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); +int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +#ifdef CONFIG_IGB_HWMON +void igb_sysfs_exit(struct igb_adapter *adapter); +int igb_sysfs_init(struct igb_adapter *adapter); +#endif +static inline s32 igb_reset_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return 0; +} + +static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return 0; +} + +static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + if (hw->phy.ops.write_reg) + return hw->phy.ops.write_reg(hw, offset, data); + + return 0; +} + +static inline s32 igb_get_phy_info(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_phy_info) + return hw->phy.ops.get_phy_info(hw); + + return 0; +} + +static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) +{ + return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +} + +#endif /* _IGB_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c new file mode 100644 index 000000000..d5673eb90 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -0,0 +1,3064 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* ethtool support for igb */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "igb.h" + +struct igb_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define IGB_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \ + .stat_offset = offsetof(struct igb_adapter, _stat) \ +} +static const struct igb_stats igb_gstrings_stats[] = { + IGB_STAT("rx_packets", stats.gprc), + IGB_STAT("tx_packets", stats.gptc), + IGB_STAT("rx_bytes", stats.gorc), + IGB_STAT("tx_bytes", stats.gotc), + IGB_STAT("rx_broadcast", stats.bprc), + IGB_STAT("tx_broadcast", stats.bptc), + IGB_STAT("rx_multicast", stats.mprc), + IGB_STAT("tx_multicast", stats.mptc), + IGB_STAT("multicast", stats.mprc), + IGB_STAT("collisions", stats.colc), + IGB_STAT("rx_crc_errors", stats.crcerrs), + IGB_STAT("rx_no_buffer_count", stats.rnbc), + IGB_STAT("rx_missed_errors", stats.mpc), + IGB_STAT("tx_aborted_errors", stats.ecol), + IGB_STAT("tx_carrier_errors", stats.tncrs), + IGB_STAT("tx_window_errors", stats.latecol), + IGB_STAT("tx_abort_late_coll", stats.latecol), + IGB_STAT("tx_deferred_ok", stats.dc), + IGB_STAT("tx_single_coll_ok", stats.scc), + IGB_STAT("tx_multi_coll_ok", stats.mcc), + IGB_STAT("tx_timeout_count", tx_timeout_count), + IGB_STAT("rx_long_length_errors", stats.roc), + IGB_STAT("rx_short_length_errors", stats.ruc), + IGB_STAT("rx_align_errors", stats.algnerrc), + IGB_STAT("tx_tcp_seg_good", stats.tsctc), + IGB_STAT("tx_tcp_seg_failed", stats.tsctfc), + IGB_STAT("rx_flow_control_xon", stats.xonrxc), + IGB_STAT("rx_flow_control_xoff", stats.xoffrxc), + IGB_STAT("tx_flow_control_xon", stats.xontxc), + IGB_STAT("tx_flow_control_xoff", stats.xofftxc), + IGB_STAT("rx_long_byte_count", stats.gorc), + IGB_STAT("tx_dma_out_of_sync", stats.doosync), + IGB_STAT("tx_smbus", stats.mgptc), + IGB_STAT("rx_smbus", stats.mgprc), + IGB_STAT("dropped_smbus", stats.mgpdc), + IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGB_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc), + IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +}; + +#define IGB_NETDEV_STAT(_net_stat) { \ + .stat_string = __stringify(_net_stat), \ + .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \ + .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ +} +static const struct igb_stats igb_gstrings_net_stats[] = { + IGB_NETDEV_STAT(rx_errors), + IGB_NETDEV_STAT(tx_errors), + IGB_NETDEV_STAT(tx_dropped), + IGB_NETDEV_STAT(rx_length_errors), + IGB_NETDEV_STAT(rx_over_errors), + IGB_NETDEV_STAT(rx_frame_errors), + IGB_NETDEV_STAT(rx_fifo_errors), + IGB_NETDEV_STAT(tx_fifo_errors), + IGB_NETDEV_STAT(tx_heartbeat_errors) +}; + +#define IGB_GLOBAL_STATS_LEN \ + (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)) +#define IGB_NETDEV_STATS_LEN \ + (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) +#define IGB_RX_QUEUE_STATS_LEN \ + (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) + +#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */ + +#define IGB_QUEUE_STATS_LEN \ + ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ + IGB_RX_QUEUE_STATS_LEN) + \ + (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \ + IGB_TX_QUEUE_STATS_LEN)) +#define IGB_STATS_LEN \ + (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN) + +static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; +#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN) + +static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; + u32 status; + u32 speed; + + status = rd32(E1000_STATUS); + if (hw->phy.media_type == e1000_media_type_copper) { + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP | + SUPPORTED_Pause); + ecmd->advertising = ADVERTISED_TP; + + if (hw->mac.autoneg == 1) { + ecmd->advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + ecmd->advertising |= hw->phy.autoneg_advertised; + } + + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy.addr; + ecmd->transceiver = XCVR_INTERNAL; + } else { + ecmd->supported = (SUPPORTED_FIBRE | + SUPPORTED_1000baseKX_Full | + SUPPORTED_Autoneg | + SUPPORTED_Pause); + ecmd->advertising = (ADVERTISED_FIBRE | + ADVERTISED_1000baseKX_Full); + if (hw->mac.type == e1000_i354) { + if ((hw->device_id == + E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + ecmd->supported |= SUPPORTED_2500baseX_Full; + ecmd->supported &= + ~SUPPORTED_1000baseKX_Full; + ecmd->advertising |= ADVERTISED_2500baseX_Full; + ecmd->advertising &= + ~ADVERTISED_1000baseKX_Full; + } + } + if (eth_flags->e100_base_fx) { + ecmd->supported |= SUPPORTED_100baseT_Full; + ecmd->advertising |= ADVERTISED_100baseT_Full; + } + if (hw->mac.autoneg == 1) + ecmd->advertising |= ADVERTISED_Autoneg; + + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + } + if (hw->mac.autoneg != 1) + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + + switch (hw->fc.requested_mode) { + case e1000_fc_full: + ecmd->advertising |= ADVERTISED_Pause; + break; + case e1000_fc_rx_pause: + ecmd->advertising |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + break; + case e1000_fc_tx_pause: + ecmd->advertising |= ADVERTISED_Asym_Pause; + break; + default: + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + } + if (status & E1000_STATUS_LU) { + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + speed = SPEED_2500; + } else if (status & E1000_STATUS_SPEED_1000) { + speed = SPEED_1000; + } else if (status & E1000_STATUS_SPEED_100) { + speed = SPEED_100; + } else { + speed = SPEED_10; + } + if ((status & E1000_STATUS_FD) || + hw->phy.media_type != e1000_media_type_copper) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + speed = SPEED_UNKNOWN; + ecmd->duplex = DUPLEX_UNKNOWN; + } + ethtool_cmd_speed_set(ecmd, speed); + if ((hw->phy.media_type == e1000_media_type_fiber) || + hw->mac.autoneg) + ecmd->autoneg = AUTONEG_ENABLE; + else + ecmd->autoneg = AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if (hw->phy.media_type == e1000_media_type_copper) + ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->phy.mdix == AUTO_ALL_MODES) + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; + + return 0; +} + +static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* When SoL/IDER sessions are active, autoneg/speed/duplex + * cannot be changed + */ + if (igb_check_reset_block(hw)) { + dev_err(&adapter->pdev->dev, + "Cannot change link characteristics when SoL/IDER is active.\n"); + return -EINVAL; + } + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (ecmd->eth_tp_mdix_ctrl) { + if (hw->phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (ecmd->autoneg != AUTONEG_ENABLE)) { + dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + if (hw->phy.media_type == e1000_media_type_fiber) { + hw->phy.autoneg_advertised = ecmd->advertising | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + switch (adapter->link_speed) { + case SPEED_2500: + hw->phy.autoneg_advertised = + ADVERTISED_2500baseX_Full; + break; + case SPEED_1000: + hw->phy.autoneg_advertised = + ADVERTISED_1000baseT_Full; + break; + case SPEED_100: + hw->phy.autoneg_advertised = + ADVERTISED_100baseT_Full; + break; + default: + break; + } + } else { + hw->phy.autoneg_advertised = ecmd->advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + } + ecmd->advertising = hw->phy.autoneg_advertised; + if (adapter->fc_autoneg) + hw->fc.requested_mode = e1000_fc_default; + } else { + u32 speed = ethtool_cmd_speed(ecmd); + /* calling this overrides forced MDI setting */ + if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) { + clear_bit(__IGB_RESETTING, &adapter->state); + return -EINVAL; + } + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (ecmd->eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = ecmd->eth_tp_mdix_ctrl; + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + igb_down(adapter); + igb_up(adapter); + } else + igb_reset(adapter); + + clear_bit(__IGB_RESETTING, &adapter->state); + return 0; +} + +static u32 igb_get_link(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_mac_info *mac = &adapter->hw.mac; + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + mac->get_link_status = 1; + + return igb_has_link(adapter); +} + +static void igb_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == e1000_fc_rx_pause) + pause->rx_pause = 1; + else if (hw->fc.current_mode == e1000_fc_tx_pause) + pause->tx_pause = 1; + else if (hw->fc.current_mode == e1000_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int igb_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + /* 100basefx does not support setting link flow control */ + if (hw->dev_spec._82575.eth_flags.e100_base_fx) + return -EINVAL; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = e1000_fc_default; + if (netif_running(adapter->netdev)) { + igb_down(adapter); + igb_up(adapter); + } else { + igb_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + retval = ((hw->phy.media_type == e1000_media_type_copper) ? + igb_force_mac_fc(hw) : igb_setup_link(hw)); + } + + clear_bit(__IGB_RESETTING, &adapter->state); + return retval; +} + +static u32 igb_get_msglevel(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void igb_set_msglevel(struct net_device *netdev, u32 data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int igb_get_regs_len(struct net_device *netdev) +{ +#define IGB_REGS_LEN 739 + return IGB_REGS_LEN * sizeof(u32); +} + +static void igb_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IGB_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + /* General Registers */ + regs_buff[0] = rd32(E1000_CTRL); + regs_buff[1] = rd32(E1000_STATUS); + regs_buff[2] = rd32(E1000_CTRL_EXT); + regs_buff[3] = rd32(E1000_MDIC); + regs_buff[4] = rd32(E1000_SCTL); + regs_buff[5] = rd32(E1000_CONNSW); + regs_buff[6] = rd32(E1000_VET); + regs_buff[7] = rd32(E1000_LEDCTL); + regs_buff[8] = rd32(E1000_PBA); + regs_buff[9] = rd32(E1000_PBS); + regs_buff[10] = rd32(E1000_FRTIMER); + regs_buff[11] = rd32(E1000_TCPTIMER); + + /* NVM Register */ + regs_buff[12] = rd32(E1000_EECD); + + /* Interrupt */ + /* Reading EICS for EICR because they read the + * same but EICS does not clear on read + */ + regs_buff[13] = rd32(E1000_EICS); + regs_buff[14] = rd32(E1000_EICS); + regs_buff[15] = rd32(E1000_EIMS); + regs_buff[16] = rd32(E1000_EIMC); + regs_buff[17] = rd32(E1000_EIAC); + regs_buff[18] = rd32(E1000_EIAM); + /* Reading ICS for ICR because they read the + * same but ICS does not clear on read + */ + regs_buff[19] = rd32(E1000_ICS); + regs_buff[20] = rd32(E1000_ICS); + regs_buff[21] = rd32(E1000_IMS); + regs_buff[22] = rd32(E1000_IMC); + regs_buff[23] = rd32(E1000_IAC); + regs_buff[24] = rd32(E1000_IAM); + regs_buff[25] = rd32(E1000_IMIRVP); + + /* Flow Control */ + regs_buff[26] = rd32(E1000_FCAL); + regs_buff[27] = rd32(E1000_FCAH); + regs_buff[28] = rd32(E1000_FCTTV); + regs_buff[29] = rd32(E1000_FCRTL); + regs_buff[30] = rd32(E1000_FCRTH); + regs_buff[31] = rd32(E1000_FCRTV); + + /* Receive */ + regs_buff[32] = rd32(E1000_RCTL); + regs_buff[33] = rd32(E1000_RXCSUM); + regs_buff[34] = rd32(E1000_RLPML); + regs_buff[35] = rd32(E1000_RFCTL); + regs_buff[36] = rd32(E1000_MRQC); + regs_buff[37] = rd32(E1000_VT_CTL); + + /* Transmit */ + regs_buff[38] = rd32(E1000_TCTL); + regs_buff[39] = rd32(E1000_TCTL_EXT); + regs_buff[40] = rd32(E1000_TIPG); + regs_buff[41] = rd32(E1000_DTXCTL); + + /* Wake Up */ + regs_buff[42] = rd32(E1000_WUC); + regs_buff[43] = rd32(E1000_WUFC); + regs_buff[44] = rd32(E1000_WUS); + regs_buff[45] = rd32(E1000_IPAV); + regs_buff[46] = rd32(E1000_WUPL); + + /* MAC */ + regs_buff[47] = rd32(E1000_PCS_CFG0); + regs_buff[48] = rd32(E1000_PCS_LCTL); + regs_buff[49] = rd32(E1000_PCS_LSTAT); + regs_buff[50] = rd32(E1000_PCS_ANADV); + regs_buff[51] = rd32(E1000_PCS_LPAB); + regs_buff[52] = rd32(E1000_PCS_NPTX); + regs_buff[53] = rd32(E1000_PCS_LPABNP); + + /* Statistics */ + regs_buff[54] = adapter->stats.crcerrs; + regs_buff[55] = adapter->stats.algnerrc; + regs_buff[56] = adapter->stats.symerrs; + regs_buff[57] = adapter->stats.rxerrc; + regs_buff[58] = adapter->stats.mpc; + regs_buff[59] = adapter->stats.scc; + regs_buff[60] = adapter->stats.ecol; + regs_buff[61] = adapter->stats.mcc; + regs_buff[62] = adapter->stats.latecol; + regs_buff[63] = adapter->stats.colc; + regs_buff[64] = adapter->stats.dc; + regs_buff[65] = adapter->stats.tncrs; + regs_buff[66] = adapter->stats.sec; + regs_buff[67] = adapter->stats.htdpmc; + regs_buff[68] = adapter->stats.rlec; + regs_buff[69] = adapter->stats.xonrxc; + regs_buff[70] = adapter->stats.xontxc; + regs_buff[71] = adapter->stats.xoffrxc; + regs_buff[72] = adapter->stats.xofftxc; + regs_buff[73] = adapter->stats.fcruc; + regs_buff[74] = adapter->stats.prc64; + regs_buff[75] = adapter->stats.prc127; + regs_buff[76] = adapter->stats.prc255; + regs_buff[77] = adapter->stats.prc511; + regs_buff[78] = adapter->stats.prc1023; + regs_buff[79] = adapter->stats.prc1522; + regs_buff[80] = adapter->stats.gprc; + regs_buff[81] = adapter->stats.bprc; + regs_buff[82] = adapter->stats.mprc; + regs_buff[83] = adapter->stats.gptc; + regs_buff[84] = adapter->stats.gorc; + regs_buff[86] = adapter->stats.gotc; + regs_buff[88] = adapter->stats.rnbc; + regs_buff[89] = adapter->stats.ruc; + regs_buff[90] = adapter->stats.rfc; + regs_buff[91] = adapter->stats.roc; + regs_buff[92] = adapter->stats.rjc; + regs_buff[93] = adapter->stats.mgprc; + regs_buff[94] = adapter->stats.mgpdc; + regs_buff[95] = adapter->stats.mgptc; + regs_buff[96] = adapter->stats.tor; + regs_buff[98] = adapter->stats.tot; + regs_buff[100] = adapter->stats.tpr; + regs_buff[101] = adapter->stats.tpt; + regs_buff[102] = adapter->stats.ptc64; + regs_buff[103] = adapter->stats.ptc127; + regs_buff[104] = adapter->stats.ptc255; + regs_buff[105] = adapter->stats.ptc511; + regs_buff[106] = adapter->stats.ptc1023; + regs_buff[107] = adapter->stats.ptc1522; + regs_buff[108] = adapter->stats.mptc; + regs_buff[109] = adapter->stats.bptc; + regs_buff[110] = adapter->stats.tsctc; + regs_buff[111] = adapter->stats.iac; + regs_buff[112] = adapter->stats.rpthc; + regs_buff[113] = adapter->stats.hgptc; + regs_buff[114] = adapter->stats.hgorc; + regs_buff[116] = adapter->stats.hgotc; + regs_buff[118] = adapter->stats.lenerrs; + regs_buff[119] = adapter->stats.scvpc; + regs_buff[120] = adapter->stats.hrmpc; + + for (i = 0; i < 4; i++) + regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[125 + i] = rd32(E1000_PSRTYPE(i)); + for (i = 0; i < 4; i++) + regs_buff[129 + i] = rd32(E1000_RDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[133 + i] = rd32(E1000_RDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[137 + i] = rd32(E1000_RDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[141 + i] = rd32(E1000_RDH(i)); + for (i = 0; i < 4; i++) + regs_buff[145 + i] = rd32(E1000_RDT(i)); + for (i = 0; i < 4; i++) + regs_buff[149 + i] = rd32(E1000_RXDCTL(i)); + + for (i = 0; i < 10; i++) + regs_buff[153 + i] = rd32(E1000_EITR(i)); + for (i = 0; i < 8; i++) + regs_buff[163 + i] = rd32(E1000_IMIR(i)); + for (i = 0; i < 8; i++) + regs_buff[171 + i] = rd32(E1000_IMIREXT(i)); + for (i = 0; i < 16; i++) + regs_buff[179 + i] = rd32(E1000_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[195 + i] = rd32(E1000_RAH(i)); + + for (i = 0; i < 4; i++) + regs_buff[211 + i] = rd32(E1000_TDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[215 + i] = rd32(E1000_TDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[219 + i] = rd32(E1000_TDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[223 + i] = rd32(E1000_TDH(i)); + for (i = 0; i < 4; i++) + regs_buff[227 + i] = rd32(E1000_TDT(i)); + for (i = 0; i < 4; i++) + regs_buff[231 + i] = rd32(E1000_TXDCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[235 + i] = rd32(E1000_TDWBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[239 + i] = rd32(E1000_TDWBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i)); + + for (i = 0; i < 4; i++) + regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i)); + for (i = 0; i < 4; i++) + regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i)); + for (i = 0; i < 32; i++) + regs_buff[255 + i] = rd32(E1000_WUPM_REG(i)); + for (i = 0; i < 128; i++) + regs_buff[287 + i] = rd32(E1000_FFMT_REG(i)); + for (i = 0; i < 128; i++) + regs_buff[415 + i] = rd32(E1000_FFVT_REG(i)); + for (i = 0; i < 4; i++) + regs_buff[543 + i] = rd32(E1000_FFLT_REG(i)); + + regs_buff[547] = rd32(E1000_TDFH); + regs_buff[548] = rd32(E1000_TDFT); + regs_buff[549] = rd32(E1000_TDFHS); + regs_buff[550] = rd32(E1000_TDFPC); + + if (hw->mac.type > e1000_82580) { + regs_buff[551] = adapter->stats.o2bgptc; + regs_buff[552] = adapter->stats.b2ospc; + regs_buff[553] = adapter->stats.o2bspc; + regs_buff[554] = adapter->stats.b2ogprc; + } + + if (hw->mac.type != e1000_82576) + return; + for (i = 0; i < 12; i++) + regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4)); + for (i = 0; i < 4; i++) + regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[607 + i] = rd32(E1000_RDH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[619 + i] = rd32(E1000_RDT(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4)); + + for (i = 0; i < 12; i++) + regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[679 + i] = rd32(E1000_TDH(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[691 + i] = rd32(E1000_TDT(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4)); + for (i = 0; i < 12; i++) + regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4)); +} + +static int igb_get_eeprom_len(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + return adapter->hw.nvm.word_size * 2; +} + +static int igb_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc(sizeof(u16) * + (last_word - first_word + 1), GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == e1000_nvm_eeprom_spi) + ret_val = hw->nvm.ops.read(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = hw->nvm.ops.read(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int igb_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if ((hw->mac.type >= e1000_i210) && + !igb_get_flash_presence_i210(hw)) { + return -EOPNOTSUPP; + } + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); + + ret_val = hw->nvm.ops.write(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum if nvm write succeeded */ + if (ret_val == 0) + hw->nvm.ops.update(hw); + + igb_set_fw_version(adapter); + kfree(eeprom_buff); + return ret_val; +} + +static void igb_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version)); + + /* EEPROM image version # is reported as firmware version # for + * 82575 controllers + */ + strlcpy(drvinfo->fw_version, adapter->fw_version, + sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = IGB_STATS_LEN; + drvinfo->testinfo_len = IGB_TEST_LEN; + drvinfo->regdump_len = igb_get_regs_len(netdev); + drvinfo->eedump_len = igb_get_eeprom_len(netdev); +} + +static void igb_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IGB_MAX_RXD; + ring->tx_max_pending = IGB_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int igb_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct igb_ring *temp_ring; + int i, err = 0; + u16 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD); + new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD); + new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + if (adapter->num_tx_queues > adapter->num_rx_queues) + temp_ring = vmalloc(adapter->num_tx_queues * + sizeof(struct igb_ring)); + else + temp_ring = vmalloc(adapter->num_rx_queues * + sizeof(struct igb_ring)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + igb_down(adapter); + + /* We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the Tx and Rx ring structs. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct igb_ring)); + + temp_ring[i].count = new_tx_count; + err = igb_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igb_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + igb_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct igb_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct igb_ring)); + + temp_ring[i].count = new_rx_count; + err = igb_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igb_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + igb_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct igb_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } +err_setup: + igb_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__IGB_RESETTING, &adapter->state); + return err; +} + +/* ethtool register test data */ +struct igb_reg_test { + u16 reg; + u16 reg_offset; + u16 array_len; + u16 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x100 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* i210 reg test */ +static struct igb_reg_test reg_test_i210[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* RDH is read-only for i210, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0x900FFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0, 0 } +}; + +/* i350 reg test */ +static struct igb_reg_test reg_test_i350[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* RDH is read-only for i350, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0xC3FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 16, TABLE64_TEST_HI, + 0xC3FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82580 reg test */ +static struct igb_reg_test reg_test_82580[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + /* RDH is read-only for 82580, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, + 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_LO, + 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_HI, + 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, + 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82576 reg test */ +static struct igb_reg_test reg_test_82576[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + /* Enable all RX queues before testing. */ + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, + E1000_RXDCTL_QUEUE_ENABLE }, + { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, + E1000_RXDCTL_QUEUE_ENABLE }, + /* RDH is read-only for 82576, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, + { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +/* 82575 register test */ +static struct igb_reg_test reg_test_82575[] = { + { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF }, + { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* Enable all four RX queues before testing. */ + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, + E1000_RXDCTL_QUEUE_ENABLE }, + /* RDH is read-only for 82575, only test RDT. */ + { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, + { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, + { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, + { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB }, + { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF }, + { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, + { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF }, + { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { 0, 0, 0, 0 } +}; + +static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pat, val; + static const u32 _test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { + wr32(reg, (_test[pat] & write)); + val = rd32(reg) & mask; + if (val != (_test[pat] & write & mask)) { + dev_err(&adapter->pdev->dev, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg, val, (_test[pat] & write & mask)); + *data = reg; + return true; + } + } + + return false; +} + +static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u32 val; + + wr32(reg, write & mask); + val = rd32(reg); + if ((write & mask) != (val & mask)) { + dev_err(&adapter->pdev->dev, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + return true; + } + + return false; +} + +#define REG_PATTERN_TEST(reg, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, reg, mask, write)) \ + return 1; \ + } while (0) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, reg, mask, write)) \ + return 1; \ + } while (0) + +static int igb_reg_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + struct igb_reg_test *test; + u32 value, before, after; + u32 i, toggle; + + switch (adapter->hw.mac.type) { + case e1000_i350: + case e1000_i354: + test = reg_test_i350; + toggle = 0x7FEFF3FF; + break; + case e1000_i210: + case e1000_i211: + test = reg_test_i210; + toggle = 0x7FEFF3FF; + break; + case e1000_82580: + test = reg_test_82580; + toggle = 0x7FEFF3FF; + break; + case e1000_82576: + test = reg_test_82576; + toggle = 0x7FFFF3FF; + break; + default: + test = reg_test_82575; + toggle = 0x7FFFF3FF; + break; + } + + /* Because the status register is such a special case, + * we handle it separately from the rest of the register + * tests. Some bits are read-only, some toggle, and some + * are writable on newer MACs. + */ + before = rd32(E1000_STATUS); + value = (rd32(E1000_STATUS) & toggle); + wr32(E1000_STATUS, toggle); + after = rd32(E1000_STATUS) & toggle; + if (value != after) { + dev_err(&adapter->pdev->dev, + "failed STATUS register test got: 0x%08X expected: 0x%08X\n", + after, value); + *data = 1; + return 1; + } + /* restore previous status */ + wr32(E1000_STATUS, before); + + /* Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + switch (test->test_type) { + case PATTERN_TEST: + REG_PATTERN_TEST(test->reg + + (i * test->reg_offset), + test->mask, + test->write); + break; + case SET_READ_TEST: + REG_SET_AND_CHECK(test->reg + + (i * test->reg_offset), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + writel(test->write, + (adapter->hw.hw_addr + test->reg) + + (i * test->reg_offset)); + break; + case TABLE32_TEST: + REG_PATTERN_TEST(test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + REG_PATTERN_TEST(test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + REG_PATTERN_TEST((test->reg + 4) + (i * 8), + test->mask, + test->write); + break; + } + } + test++; + } + + *data = 0; + return 0; +} + +static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + + /* Validate eeprom on all parts but flashless */ + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (igb_get_flash_presence_i210(hw)) { + if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) + *data = 2; + } + break; + default: + if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0) + *data = 2; + break; + } + + return *data; +} + +static irqreturn_t igb_test_intr(int irq, void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *) data; + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= rd32(E1000_ICR); + + return IRQ_HANDLED; +} + +static int igb_intr_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 mask, ics_mask, i = 0, shared_int = true; + u32 irq = adapter->pdev->irq; + + *data = 0; + + /* Hook up test interrupt handler just for this test */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + if (request_irq(adapter->msix_entries[0].vector, + igb_test_intr, 0, netdev->name, adapter)) { + *data = 1; + return -1; + } + } else if (adapter->flags & IGB_FLAG_HAS_MSI) { + shared_int = false; + if (request_irq(irq, + igb_test_intr, 0, netdev->name, adapter)) { + *data = 1; + return -1; + } + } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED, + netdev->name, adapter)) { + shared_int = false; + } else if (request_irq(irq, igb_test_intr, IRQF_SHARED, + netdev->name, adapter)) { + *data = 1; + return -1; + } + dev_info(&adapter->pdev->dev, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + wr32(E1000_IMC, ~0); + wrfl(); + usleep_range(10000, 11000); + + /* Define all writable bits for ICS */ + switch (hw->mac.type) { + case e1000_82575: + ics_mask = 0x37F47EDD; + break; + case e1000_82576: + ics_mask = 0x77D4FBFD; + break; + case e1000_82580: + ics_mask = 0x77DCFED5; + break; + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + ics_mask = 0x77DCFED5; + break; + default: + ics_mask = 0x7FFFFFFF; + break; + } + + /* Test each interrupt */ + for (; i < 31; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (!(mask & ics_mask)) + continue; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ + wr32(E1000_ICR, ~0); + + wr32(E1000_IMC, mask); + wr32(E1000_ICS, mask); + wrfl(); + usleep_range(10000, 11000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ + wr32(E1000_ICR, ~0); + + wr32(E1000_IMS, mask); + wr32(E1000_ICS, mask); + wrfl(); + usleep_range(10000, 11000); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + + /* Flush any pending interrupts */ + wr32(E1000_ICR, ~0); + + wr32(E1000_IMC, ~mask); + wr32(E1000_ICS, ~mask); + wrfl(); + usleep_range(10000, 11000); + + if (adapter->test_icr & mask) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + wr32(E1000_IMC, ~0); + wrfl(); + usleep_range(10000, 11000); + + /* Unhook test interrupt handler */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) + free_irq(adapter->msix_entries[0].vector, adapter); + else + free_irq(irq, adapter); + + return *data; +} + +static void igb_free_desc_rings(struct igb_adapter *adapter) +{ + igb_free_tx_resources(&adapter->test_tx_ring); + igb_free_rx_resources(&adapter->test_rx_ring); +} + +static int igb_setup_desc_rings(struct igb_adapter *adapter) +{ + struct igb_ring *tx_ring = &adapter->test_tx_ring; + struct igb_ring *rx_ring = &adapter->test_rx_ring; + struct e1000_hw *hw = &adapter->hw; + int ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = IGB_DEFAULT_TXD; + tx_ring->dev = &adapter->pdev->dev; + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->vfs_allocated_count; + + if (igb_setup_tx_resources(tx_ring)) { + ret_val = 1; + goto err_nomem; + } + + igb_setup_tctl(adapter); + igb_configure_tx_ring(adapter, tx_ring); + + /* Setup Rx descriptor ring and Rx buffers */ + rx_ring->count = IGB_DEFAULT_RXD; + rx_ring->dev = &adapter->pdev->dev; + rx_ring->netdev = adapter->netdev; + rx_ring->reg_idx = adapter->vfs_allocated_count; + + if (igb_setup_rx_resources(rx_ring)) { + ret_val = 3; + goto err_nomem; + } + + /* set the default queue to queue 0 of PF */ + wr32(E1000_MRQC, adapter->vfs_allocated_count << 3); + + /* enable receive ring */ + igb_setup_rctl(adapter); + igb_configure_rx_ring(adapter, rx_ring); + + igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring)); + + return 0; + +err_nomem: + igb_free_desc_rings(adapter); + return ret_val; +} + +static void igb_phy_disable_receiver(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + igb_write_phy_reg(hw, 29, 0x001F); + igb_write_phy_reg(hw, 30, 0x8FFC); + igb_write_phy_reg(hw, 29, 0x001A); + igb_write_phy_reg(hw, 30, 0x8FF0); +} + +static int igb_integrated_phy_loopback(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + + hw->mac.autoneg = false; + + if (hw->phy.type == e1000_phy_m88) { + if (hw->phy.id != I210_I_PHY_ID) { + /* Auto-MDI/MDIX Off */ + igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); + /* autoneg off */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); + } else { + /* force 1000, set loopback */ + igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); + igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); + } + } else if (hw->phy.type == e1000_phy_82580) { + /* enable MII loopback */ + igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); + } + + /* add small delay to avoid loopback test failure */ + msleep(50); + + /* force 1000, set loopback */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = rd32(E1000_CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD | /* Force Duplex to FULL */ + E1000_CTRL_SLU); /* Set link up enable bit */ + + if (hw->phy.type == e1000_phy_m88) + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + + wr32(E1000_CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy.type == e1000_phy_m88) + igb_phy_disable_receiver(adapter); + + mdelay(500); + return 0; +} + +static int igb_set_phy_loopback(struct igb_adapter *adapter) +{ + return igb_integrated_phy_loopback(adapter); +} + +static int igb_setup_loopback_test(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg; + + reg = rd32(E1000_CTRL_EXT); + + /* use CTRL_EXT to identify link type as SGMII can appear as copper */ + if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) { + if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || + (hw->device_id == E1000_DEV_ID_I354_SGMII) || + (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) { + /* Enable DH89xxCC MPHY for near end loopback */ + reg = rd32(E1000_MPHY_ADDR_CTL); + reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | + E1000_MPHY_PCS_CLK_REG_OFFSET; + wr32(E1000_MPHY_ADDR_CTL, reg); + + reg = rd32(E1000_MPHY_DATA); + reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN; + wr32(E1000_MPHY_DATA, reg); + } + + reg = rd32(E1000_RCTL); + reg |= E1000_RCTL_LBM_TCVR; + wr32(E1000_RCTL, reg); + + wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK); + + reg = rd32(E1000_CTRL); + reg &= ~(E1000_CTRL_RFCE | + E1000_CTRL_TFCE | + E1000_CTRL_LRST); + reg |= E1000_CTRL_SLU | + E1000_CTRL_FD; + wr32(E1000_CTRL, reg); + + /* Unset switch control to serdes energy detect */ + reg = rd32(E1000_CONNSW); + reg &= ~E1000_CONNSW_ENRGSRC; + wr32(E1000_CONNSW, reg); + + /* Unset sigdetect for SERDES loopback on + * 82580 and newer devices. + */ + if (hw->mac.type >= e1000_82580) { + reg = rd32(E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_IGN_SD; + wr32(E1000_PCS_CFG0, reg); + } + + /* Set PCS register for forced speed */ + reg = rd32(E1000_PCS_LCTL); + reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/ + reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ + E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ + E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ + E1000_PCS_LCTL_FSD | /* Force Speed */ + E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ + wr32(E1000_PCS_LCTL, reg); + + return 0; + } + + return igb_set_phy_loopback(adapter); +} + +static void igb_loopback_cleanup(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) || + (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) || + (hw->device_id == E1000_DEV_ID_I354_SGMII)) { + u32 reg; + + /* Disable near end loopback on DH89xxCC */ + reg = rd32(E1000_MPHY_ADDR_CTL); + reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) | + E1000_MPHY_PCS_CLK_REG_OFFSET; + wr32(E1000_MPHY_ADDR_CTL, reg); + + reg = rd32(E1000_MPHY_DATA); + reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN; + wr32(E1000_MPHY_DATA, reg); + } + + rctl = rd32(E1000_RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + wr32(E1000_RCTL, rctl); + + hw->mac.autoneg = true; + igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + igb_write_phy_reg(hw, PHY_CONTROL, phy_reg); + igb_phy_sw_reset(hw); + } +} + +static void igb_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size /= 2; + memset(&skb->data[frame_size], 0xAA, frame_size - 1); + memset(&skb->data[frame_size + 10], 0xBE, 1); + memset(&skb->data[frame_size + 12], 0xAF, 1); +} + +static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer, + unsigned int frame_size) +{ + unsigned char *data; + bool match = true; + + frame_size >>= 1; + + data = kmap(rx_buffer->page); + + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + + kunmap(rx_buffer->page); + + return match; +} + +static int igb_clean_test_rings(struct igb_ring *rx_ring, + struct igb_ring *tx_ring, + unsigned int size) +{ + union e1000_adv_rx_desc *rx_desc; + struct igb_rx_buffer *rx_buffer_info; + struct igb_tx_buffer *tx_buffer_info; + u16 rx_ntc, tx_ntc, count = 0; + + /* initialize next to clean and descriptor values */ + rx_ntc = rx_ring->next_to_clean; + tx_ntc = tx_ring->next_to_clean; + rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); + + while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { + /* check Rx buffer */ + rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; + + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer_info->dma, + IGB_RX_BUFSZ, + DMA_FROM_DEVICE); + + /* verify contents of skb */ + if (igb_check_lbtest_frame(rx_buffer_info, size)) + count++; + + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, + rx_buffer_info->dma, + IGB_RX_BUFSZ, + DMA_FROM_DEVICE); + + /* unmap buffer on Tx side */ + tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; + igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + + /* increment Rx/Tx next to clean counters */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; + tx_ntc++; + if (tx_ntc == tx_ring->count) + tx_ntc = 0; + + /* fetch next descriptor */ + rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); + } + + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* re-map buffers to ring, store next to clean values */ + igb_alloc_rx_buffers(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; + tx_ring->next_to_clean = tx_ntc; + + return count; +} + +static int igb_run_loopback_test(struct igb_adapter *adapter) +{ + struct igb_ring *tx_ring = &adapter->test_tx_ring; + struct igb_ring *rx_ring = &adapter->test_rx_ring; + u16 i, j, lc, good_cnt; + int ret_val = 0; + unsigned int size = IGB_RX_HDR_LEN; + netdev_tx_t tx_ret_val; + struct sk_buff *skb; + + /* allocate test skb */ + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return 11; + + /* place data into test skb */ + igb_create_lbtest_frame(skb, size); + skb_put(skb, size); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + for (j = 0; j <= lc; j++) { /* loop count loop */ + /* reset count of good packets */ + good_cnt = 0; + + /* place 64 packets on the transmit queue*/ + for (i = 0; i < 64; i++) { + skb_get(skb); + tx_ret_val = igb_xmit_frame_ring(skb, tx_ring); + if (tx_ret_val == NETDEV_TX_OK) + good_cnt++; + } + + if (good_cnt != 64) { + ret_val = 12; + break; + } + + /* allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size); + if (good_cnt != 64) { + ret_val = 13; + break; + } + } /* end loop count loop */ + + /* free the original skb */ + kfree_skb(skb); + + return ret_val; +} + +static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) +{ + /* PHY loopback cannot be performed if SoL/IDER + * sessions are active + */ + if (igb_check_reset_block(&adapter->hw)) { + dev_err(&adapter->pdev->dev, + "Cannot do PHY loopback test when SoL/IDER is active.\n"); + *data = 0; + goto out; + } + + if (adapter->hw.mac.type == e1000_i354) { + dev_info(&adapter->pdev->dev, + "Loopback test not supported on i354.\n"); + *data = 0; + goto out; + } + *data = igb_setup_desc_rings(adapter); + if (*data) + goto out; + *data = igb_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = igb_run_loopback_test(adapter); + igb_loopback_cleanup(adapter); + +err_loopback: + igb_free_desc_rings(adapter); +out: + return *data; +} + +static int igb_link_test(struct igb_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + if (hw->phy.media_type == e1000_media_type_internal_serdes) { + int i = 0; + + hw->mac.serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + hw->mac.ops.check_for_link(&adapter->hw); + if (hw->mac.serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + hw->mac.ops.check_for_link(&adapter->hw); + if (hw->mac.autoneg) + msleep(5000); + + if (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static void igb_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + u16 autoneg_advertised; + u8 forced_speed_duplex, autoneg; + bool if_running = netif_running(netdev); + + set_bit(__IGB_TESTING, &adapter->state); + + /* can't do offline tests on media switching devices */ + if (adapter->hw.dev_spec._82575.mas_capable) + eth_test->flags &= ~ETH_TEST_FL_OFFLINE; + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + autoneg_advertised = adapter->hw.phy.autoneg_advertised; + forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; + autoneg = adapter->hw.mac.autoneg; + + dev_info(&adapter->pdev->dev, "offline testing starting\n"); + + /* power up link for link test */ + igb_power_up_link(adapter); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (igb_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + igb_reset(adapter); + + if (igb_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igb_reset(adapter); + if (igb_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igb_reset(adapter); + if (igb_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + igb_reset(adapter); + /* power up link for loopback test */ + igb_power_up_link(adapter); + if (igb_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + adapter->hw.phy.autoneg_advertised = autoneg_advertised; + adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; + adapter->hw.mac.autoneg = autoneg; + + /* force this routine to wait until autoneg complete/timeout */ + adapter->hw.phy.autoneg_wait_to_complete = true; + igb_reset(adapter); + adapter->hw.phy.autoneg_wait_to_complete = false; + + clear_bit(__IGB_TESTING, &adapter->state); + if (if_running) + dev_open(netdev); + } else { + dev_info(&adapter->pdev->dev, "online testing starting\n"); + + /* PHY is powered down when interface is down */ + if (if_running && igb_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + else + data[4] = 0; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__IGB_TESTING, &adapter->state); + } + msleep_interruptible(4 * 1000); +} + +static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + wol->wolopts = 0; + + if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | + WAKE_PHY; + + /* apply any specific unsupported masks here */ + switch (adapter->hw.device_id) { + default: + break; + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & E1000_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= E1000_WUFC_LNKC; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +/* bit defines for adapter->led_status */ +#define IGB_LED_ON 0 + +static int igb_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + igb_blink_led(hw); + return 2; + case ETHTOOL_ID_ON: + igb_blink_led(hw); + break; + case ETHTOOL_ID_OFF: + igb_led_off(hw); + break; + case ETHTOOL_ID_INACTIVE: + igb_led_off(hw); + clear_bit(IGB_LED_ON, &adapter->led_status); + igb_cleanup_led(hw); + break; + } + + return 0; +} + +static int igb_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int i; + + if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 3) && + (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) || + ((ec->tx_coalesce_usecs > 3) && + (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) || + (ec->tx_coalesce_usecs == 2)) + return -EINVAL; + + if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) + return -EINVAL; + + /* If ITR is disabled, disable DMAC */ + if (ec->rx_coalesce_usecs == 0) { + if (adapter->flags & IGB_FLAG_DMAC) + adapter->flags &= ~IGB_FLAG_DMAC; + } + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + + /* convert to rate of irq's per second */ + if (adapter->flags & IGB_FLAG_QUEUE_PAIRS) + adapter->tx_itr_setting = adapter->rx_itr_setting; + else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + q_vector->tx.work_limit = adapter->tx_work_limit; + if (q_vector->rx.ring) + q_vector->itr_val = adapter->rx_itr_setting; + else + q_vector->itr_val = adapter->tx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGB_START_ITR; + q_vector->set_itr = 1; + } + + return 0; +} + +static int igb_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (adapter->rx_itr_setting <= 3) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) { + if (adapter->tx_itr_setting <= 3) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + } + + return 0; +} + +static int igb_nway_reset(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + igb_reinit_locked(adapter); + return 0; +} + +static int igb_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IGB_STATS_LEN; + case ETH_SS_TEST: + return IGB_TEST_LEN; + default: + return -ENOTSUPP; + } +} + +static void igb_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 *net_stats = &adapter->stats64; + unsigned int start; + struct igb_ring *ring; + int i, j; + char *p; + + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter, net_stats); + + for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + igb_gstrings_stats[i].stat_offset; + data[i] = (igb_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) { + p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset; + data[i] = (igb_gstrings_net_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + u64 restart2; + + ring = adapter->tx_ring[j]; + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + data[i] = ring->tx_stats.packets; + data[i+1] = ring->tx_stats.bytes; + data[i+2] = ring->tx_stats.restart_queue; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); + restart2 = ring->tx_stats.restart_queue2; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); + data[i+2] += restart2; + + i += IGB_TX_QUEUE_STATS_LEN; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + do { + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + data[i] = ring->rx_stats.packets; + data[i+1] = ring->rx_stats.bytes; + data[i+2] = ring->rx_stats.drops; + data[i+3] = ring->rx_stats.csum_err; + data[i+4] = ring->rx_stats.alloc_failed; + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + i += IGB_RX_QUEUE_STATS_LEN; + } + spin_unlock(&adapter->stats64_lock); +} + +static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *igb_gstrings_test, + IGB_TEST_LEN*ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { + memcpy(p, igb_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) { + memcpy(p, igb_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_tx_queues; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_restart", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_rx_queues; i++) { + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_drops", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_csum_err", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_alloc_failed", i); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + +static int igb_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct igb_adapter *adapter = netdev_priv(dev); + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + switch (adapter->hw.mac.type) { + case e1000_82575: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + return 0; + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters = 1 << HWTSTAMP_FILTER_NONE; + + /* 82576 does not support timestamping all packets. */ + if (adapter->hw.mac.type >= e1000_82580) + info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL; + else + info->rx_filters |= + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int igb_get_rss_hash_opts(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on igb */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ + case UDP_V4_FLOW: + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ + case UDP_V6_FLOW: + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* Fall through */ + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igb_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXFH: + ret = igb_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \ + IGB_FLAG_RSS_FIELD_IPV6_UDP) +static int igb_set_rss_hash_opt(struct igb_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags = adapter->flags; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags != adapter->flags) { + struct e1000_hw *hw = &adapter->hw; + u32 mrqc = rd32(E1000_MRQC); + + if ((flags & UDP_RSS_FLAGS) && + !(adapter->flags & UDP_RSS_FLAGS)) + dev_err(&adapter->pdev->dev, + "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flags = flags; + + /* Perform hash on these packet types */ + mrqc |= E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP | + E1000_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; + + wr32(E1000_MRQC, mrqc); + } + + return 0; +} + +static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct igb_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = igb_set_rss_hash_opt(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ret_val; + u16 phy_data; + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + return -EOPNOTSUPP; + + edata->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full); + if (!hw->dev_spec._82575.eee_disable) + edata->advertised = + mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + /* The IPCNFG and EEER registers are not supported on I354. */ + if (hw->mac.type == e1000_i354) { + igb_get_eee_status_i354(hw, (bool *)&edata->eee_active); + } else { + u32 eeer; + + eeer = rd32(E1000_EEER); + + /* EEE status on negotiated link */ + if (eeer & E1000_EEER_EEE_NEG) + edata->eee_active = true; + + if (eeer & E1000_EEER_TX_LPI_EN) + edata->tx_lpi_enabled = true; + } + + /* EEE Link Partner Advertised */ + switch (hw->mac.type) { + case e1000_i350: + ret_val = igb_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350, + &phy_data); + if (ret_val) + return -ENODATA; + + edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + break; + case e1000_i354: + case e1000_i210: + case e1000_i211: + ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210, + E1000_EEE_LP_ADV_DEV_I210, + &phy_data); + if (ret_val) + return -ENODATA; + + edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + + break; + default: + break; + } + + edata->eee_enabled = !hw->dev_spec._82575.eee_disable; + + if ((hw->mac.type == e1000_i354) && + (edata->eee_enabled)) + edata->tx_lpi_enabled = true; + + /* Report correct negotiated EEE status for devices that + * wrongly report EEE at half-duplex + */ + if (adapter->link_duplex == HALF_DUPLEX) { + edata->eee_enabled = false; + edata->eee_active = false; + edata->tx_lpi_enabled = false; + edata->advertised &= ~edata->advertised; + } + + return 0; +} + +static int igb_set_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; + bool adv1g_eee = true, adv100m_eee = true; + s32 ret_val; + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + return -EOPNOTSUPP; + + memset(&eee_curr, 0, sizeof(struct ethtool_eee)); + + ret_val = igb_get_eee(netdev, &eee_curr); + if (ret_val) + return ret_val; + + if (eee_curr.eee_enabled) { + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { + dev_err(&adapter->pdev->dev, + "Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + /* Tx LPI timer is not implemented currently */ + if (edata->tx_lpi_timer) { + dev_err(&adapter->pdev->dev, + "Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + + if (!edata->advertised || (edata->advertised & + ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) { + dev_err(&adapter->pdev->dev, + "EEE Advertisement supports only 100Tx and/or 100T full duplex\n"); + return -EINVAL; + } + adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL); + adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL); + + } else if (!edata->eee_enabled) { + dev_err(&adapter->pdev->dev, + "Setting EEE options are not supported with EEE disabled\n"); + return -EINVAL; + } + + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) { + hw->dev_spec._82575.eee_disable = !edata->eee_enabled; + adapter->flags |= IGB_FLAG_EEE; + + /* reset link */ + if (netif_running(netdev)) + igb_reinit_locked(adapter); + else + igb_reset(adapter); + } + + if (hw->mac.type == e1000_i354) + ret_val = igb_set_eee_i354(hw, adv1g_eee, adv100m_eee); + else + ret_val = igb_set_eee_i350(hw, adv1g_eee, adv100m_eee); + + if (ret_val) { + dev_err(&adapter->pdev->dev, + "Problem setting EEE advertisement options\n"); + return -EINVAL; + } + + return 0; +} + +static int igb_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 status = 0; + u16 sff8472_rev, addr_mode; + bool page_swap = false; + + if ((hw->phy.media_type == e1000_media_type_copper) || + (hw->phy.media_type == e1000_media_type_unknown)) + return -EOPNOTSUPP; + + /* Check whether we support SFF-8472 or not */ + status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); + if (status) + return -EIO; + + /* addressing mode is not supported */ + status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); + if (status) + return -EIO; + + /* addressing mode is not supported */ + if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) { + hw_dbg("Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); + page_swap = true; + } + + if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) { + /* We have an SFP, but it does not support SFF-8472 */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have an SFP which supports a revision of SFF-8472 */ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int igb_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 status = 0; + u16 *dataword; + u16 first_word, last_word; + int i = 0; + + if (ee->len == 0) + return -EINVAL; + + first_word = ee->offset >> 1; + last_word = (ee->offset + ee->len - 1) >> 1; + + dataword = kmalloc(sizeof(u16) * (last_word - first_word + 1), + GFP_KERNEL); + if (!dataword) + return -ENOMEM; + + /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ + for (i = 0; i < last_word - first_word + 1; i++) { + status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]); + if (status) { + /* Error occurred while reading module */ + kfree(dataword); + return -EIO; + } + + be16_to_cpus(&dataword[i]); + } + + memcpy(data, (u8 *)dataword + (ee->offset & 1), ee->len); + kfree(dataword); + + return 0; +} + +static int igb_ethtool_begin(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + pm_runtime_get_sync(&adapter->pdev->dev); + return 0; +} + +static void igb_ethtool_complete(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + pm_runtime_put(&adapter->pdev->dev); +} + +static u32 igb_get_rxfh_indir_size(struct net_device *netdev) +{ + return IGB_RETA_SIZE; +} + +static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + for (i = 0; i < IGB_RETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + + return 0; +} + +void igb_write_rss_indir_tbl(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg = E1000_RETA(0); + u32 shift = 0; + int i = 0; + + switch (hw->mac.type) { + case e1000_82575: + shift = 6; + break; + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) + shift = 3; + break; + default: + break; + } + + while (i < IGB_RETA_SIZE) { + u32 val = 0; + int j; + + for (j = 3; j >= 0; j--) { + val <<= 8; + val |= adapter->rss_indir_tbl[i + j]; + } + + wr32(reg, val << shift); + reg += 4; + i += 4; + } +} + +static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int i; + u32 num_queues; + + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + num_queues = adapter->rss_queues; + + switch (hw->mac.type) { + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) + num_queues = 2; + break; + default: + break; + } + + /* Verify user input. */ + for (i = 0; i < IGB_RETA_SIZE; i++) + if (indir[i] >= num_queues) + return -EINVAL; + + + for (i = 0; i < IGB_RETA_SIZE; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + igb_write_rss_indir_tbl(adapter); + + return 0; +} + +static unsigned int igb_max_channels(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned int max_combined = 0; + + switch (hw->mac.type) { + case e1000_i211: + max_combined = IGB_MAX_RX_QUEUES_I211; + break; + case e1000_82575: + case e1000_i210: + max_combined = IGB_MAX_RX_QUEUES_82575; + break; + case e1000_i350: + if (!!adapter->vfs_allocated_count) { + max_combined = 1; + break; + } + /* fall through */ + case e1000_82576: + if (!!adapter->vfs_allocated_count) { + max_combined = 2; + break; + } + /* fall through */ + case e1000_82580: + case e1000_i354: + default: + max_combined = IGB_MAX_RX_QUEUES; + break; + } + + return max_combined; +} + +static void igb_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + /* Report maximum channels */ + ch->max_combined = igb_max_channels(adapter); + + /* Report info for other vector */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + ch->combined_count = adapter->rss_queues; +} + +static int igb_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + unsigned int count = ch->combined_count; + + /* Verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* Verify other_count is valid and has not been changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* Verify the number of channels doesn't exceed hw limits */ + if (count > igb_max_channels(adapter)) + return -EINVAL; + + if (count != adapter->rss_queues) { + adapter->rss_queues = count; + + /* Hardware has to reinitialize queues and interrupts to + * match the new configuration. + */ + return igb_reinit_queues(adapter); + } + + return 0; +} + +static const struct ethtool_ops igb_ethtool_ops = { + .get_settings = igb_get_settings, + .set_settings = igb_set_settings, + .get_drvinfo = igb_get_drvinfo, + .get_regs_len = igb_get_regs_len, + .get_regs = igb_get_regs, + .get_wol = igb_get_wol, + .set_wol = igb_set_wol, + .get_msglevel = igb_get_msglevel, + .set_msglevel = igb_set_msglevel, + .nway_reset = igb_nway_reset, + .get_link = igb_get_link, + .get_eeprom_len = igb_get_eeprom_len, + .get_eeprom = igb_get_eeprom, + .set_eeprom = igb_set_eeprom, + .get_ringparam = igb_get_ringparam, + .set_ringparam = igb_set_ringparam, + .get_pauseparam = igb_get_pauseparam, + .set_pauseparam = igb_set_pauseparam, + .self_test = igb_diag_test, + .get_strings = igb_get_strings, + .set_phys_id = igb_set_phys_id, + .get_sset_count = igb_get_sset_count, + .get_ethtool_stats = igb_get_ethtool_stats, + .get_coalesce = igb_get_coalesce, + .set_coalesce = igb_set_coalesce, + .get_ts_info = igb_get_ts_info, + .get_rxnfc = igb_get_rxnfc, + .set_rxnfc = igb_set_rxnfc, + .get_eee = igb_get_eee, + .set_eee = igb_set_eee, + .get_module_info = igb_get_module_info, + .get_module_eeprom = igb_get_module_eeprom, + .get_rxfh_indir_size = igb_get_rxfh_indir_size, + .get_rxfh = igb_get_rxfh, + .set_rxfh = igb_set_rxfh, + .get_channels = igb_get_channels, + .set_channels = igb_set_channels, + .begin = igb_ethtool_begin, + .complete = igb_ethtool_complete, +}; + +void igb_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &igb_ethtool_ops; +} diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c new file mode 100644 index 000000000..44b6a68f1 --- /dev/null +++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c @@ -0,0 +1,249 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "igb.h" +#include "e1000_82575.h" +#include "e1000_hw.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_IGB_HWMON +static struct i2c_board_info i350_sensor_info = { + I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)), +}; + +/* hwmon callback functions */ +static ssize_t igb_hwmon_show_location(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + return sprintf(buf, "loc%u\n", + igb_attr->sensor->location); +} + +static ssize_t igb_hwmon_show_temp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value; + + /* reset the temp field */ + igb_attr->hw->mac.ops.get_thermal_sensor_data(igb_attr->hw); + + value = igb_attr->sensor->temp; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t igb_hwmon_show_cautionthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = igb_attr->sensor->caution_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t igb_hwmon_show_maxopthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = igb_attr->sensor->max_op_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +/* igb_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. + * @ adapter: pointer to the adapter structure + * @ offset: offset in the eeprom sensor data table + * @ type: type of sensor data to display + * + * For each file we want in hwmon's sysfs interface we need a device_attribute + * This is included in our hwmon_attr struct that contains the references to + * the data structures we need to get the data to display. + */ +static int igb_add_hwmon_attr(struct igb_adapter *adapter, + unsigned int offset, int type) +{ + int rc; + unsigned int n_attr; + struct hwmon_attr *igb_attr; + + n_attr = adapter->igb_hwmon_buff->n_hwmon; + igb_attr = &adapter->igb_hwmon_buff->hwmon_list[n_attr]; + + switch (type) { + case IGB_HWMON_TYPE_LOC: + igb_attr->dev_attr.show = igb_hwmon_show_location; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_label", offset + 1); + break; + case IGB_HWMON_TYPE_TEMP: + igb_attr->dev_attr.show = igb_hwmon_show_temp; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_input", offset + 1); + break; + case IGB_HWMON_TYPE_CAUTION: + igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_max", offset + 1); + break; + case IGB_HWMON_TYPE_MAX: + igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh; + snprintf(igb_attr->name, sizeof(igb_attr->name), + "temp%u_crit", offset + 1); + break; + default: + rc = -EPERM; + return rc; + } + + /* These always the same regardless of type */ + igb_attr->sensor = + &adapter->hw.mac.thermal_sensor_data.sensor[offset]; + igb_attr->hw = &adapter->hw; + igb_attr->dev_attr.store = NULL; + igb_attr->dev_attr.attr.mode = S_IRUGO; + igb_attr->dev_attr.attr.name = igb_attr->name; + sysfs_attr_init(&igb_attr->dev_attr.attr); + + adapter->igb_hwmon_buff->attrs[n_attr] = &igb_attr->dev_attr.attr; + + ++adapter->igb_hwmon_buff->n_hwmon; + + return 0; +} + +static void igb_sysfs_del_adapter(struct igb_adapter *adapter) +{ +} + +/* called from igb_main.c */ +void igb_sysfs_exit(struct igb_adapter *adapter) +{ + igb_sysfs_del_adapter(adapter); +} + +/* called from igb_main.c */ +int igb_sysfs_init(struct igb_adapter *adapter) +{ + struct hwmon_buff *igb_hwmon; + struct i2c_client *client; + struct device *hwmon_dev; + unsigned int i; + int rc = 0; + + /* If this method isn't defined we don't support thermals */ + if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) + goto exit; + + /* Don't create thermal hwmon interface if no sensors present */ + rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)); + if (rc) + goto exit; + + igb_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*igb_hwmon), + GFP_KERNEL); + if (!igb_hwmon) { + rc = -ENOMEM; + goto exit; + } + adapter->igb_hwmon_buff = igb_hwmon; + + for (i = 0; i < E1000_MAX_SENSORS; i++) { + + /* Only create hwmon sysfs entries for sensors that have + * meaningful data. + */ + if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0) + continue; + + /* Bail if any hwmon attr struct fails to initialize */ + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION); + if (rc) + goto exit; + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC); + if (rc) + goto exit; + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP); + if (rc) + goto exit; + rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX); + if (rc) + goto exit; + } + + /* init i2c_client */ + client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info); + if (client == NULL) { + dev_info(&adapter->pdev->dev, + "Failed to create new i2c device.\n"); + rc = -ENODEV; + goto exit; + } + adapter->i2c_client = client; + + igb_hwmon->groups[0] = &igb_hwmon->group; + igb_hwmon->group.attrs = igb_hwmon->attrs; + + hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev, + client->name, + igb_hwmon, + igb_hwmon->groups); + if (IS_ERR(hwmon_dev)) { + rc = PTR_ERR(hwmon_dev); + goto err; + } + + goto exit; + +err: + igb_sysfs_del_adapter(adapter); +exit: + return rc; +} +#endif diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c new file mode 100644 index 000000000..a0a9b1fcb --- /dev/null +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -0,0 +1,8140 @@ +/* Intel(R) Gigabit Ethernet Linux driver + * Copyright(c) 2007-2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_IGB_DCA +#include +#endif +#include +#include "igb.h" + +#define MAJ 5 +#define MIN 2 +#define BUILD 15 +#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ +__stringify(BUILD) "-k" +char igb_driver_name[] = "igb"; +char igb_driver_version[] = DRV_VERSION; +static const char igb_driver_string[] = + "Intel(R) Gigabit Ethernet Network Driver"; +static const char igb_copyright[] = + "Copyright (c) 2007-2014 Intel Corporation."; + +static const struct e1000_info *igb_info_tbl[] = { + [board_82575] = &e1000_82575_info, +}; + +static const struct pci_device_id igb_pci_tbl[] = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, + /* required last entry */ + {0, } +}; + +MODULE_DEVICE_TABLE(pci, igb_pci_tbl); + +static int igb_setup_all_tx_resources(struct igb_adapter *); +static int igb_setup_all_rx_resources(struct igb_adapter *); +static void igb_free_all_tx_resources(struct igb_adapter *); +static void igb_free_all_rx_resources(struct igb_adapter *); +static void igb_setup_mrqc(struct igb_adapter *); +static int igb_probe(struct pci_dev *, const struct pci_device_id *); +static void igb_remove(struct pci_dev *pdev); +static int igb_sw_init(struct igb_adapter *); +static int igb_open(struct net_device *); +static int igb_close(struct net_device *); +static void igb_configure(struct igb_adapter *); +static void igb_configure_tx(struct igb_adapter *); +static void igb_configure_rx(struct igb_adapter *); +static void igb_clean_all_tx_rings(struct igb_adapter *); +static void igb_clean_all_rx_rings(struct igb_adapter *); +static void igb_clean_tx_ring(struct igb_ring *); +static void igb_clean_rx_ring(struct igb_ring *); +static void igb_set_rx_mode(struct net_device *); +static void igb_update_phy_info(unsigned long); +static void igb_watchdog(unsigned long); +static void igb_watchdog_task(struct work_struct *); +static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); +static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); +static int igb_change_mtu(struct net_device *, int); +static int igb_set_mac(struct net_device *, void *); +static void igb_set_uta(struct igb_adapter *adapter); +static irqreturn_t igb_intr(int irq, void *); +static irqreturn_t igb_intr_msi(int irq, void *); +static irqreturn_t igb_msix_other(int irq, void *); +static irqreturn_t igb_msix_ring(int irq, void *); +#ifdef CONFIG_IGB_DCA +static void igb_update_dca(struct igb_q_vector *); +static void igb_setup_dca(struct igb_adapter *); +#endif /* CONFIG_IGB_DCA */ +static int igb_poll(struct napi_struct *, int); +static bool igb_clean_tx_irq(struct igb_q_vector *); +static bool igb_clean_rx_irq(struct igb_q_vector *, int); +static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); +static void igb_tx_timeout(struct net_device *); +static void igb_reset_task(struct work_struct *); +static void igb_vlan_mode(struct net_device *netdev, + netdev_features_t features); +static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16); +static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16); +static void igb_restore_vlan(struct igb_adapter *); +static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8); +static void igb_ping_all_vfs(struct igb_adapter *); +static void igb_msg_task(struct igb_adapter *); +static void igb_vmm_control(struct igb_adapter *); +static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *); +static void igb_restore_vf_multicasts(struct igb_adapter *adapter); +static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac); +static int igb_ndo_set_vf_vlan(struct net_device *netdev, + int vf, u16 vlan, u8 qos); +static int igb_ndo_set_vf_bw(struct net_device *, int, int, int); +static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, + bool setting); +static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi); +static void igb_check_vf_rate_limit(struct igb_adapter *); + +#ifdef CONFIG_PCI_IOV +static int igb_vf_configure(struct igb_adapter *adapter, int vf); +static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); +#endif + +#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP +static int igb_suspend(struct device *); +#endif +static int igb_resume(struct device *); +static int igb_runtime_suspend(struct device *dev); +static int igb_runtime_resume(struct device *dev); +static int igb_runtime_idle(struct device *dev); +static const struct dev_pm_ops igb_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume) + SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume, + igb_runtime_idle) +}; +#endif +static void igb_shutdown(struct pci_dev *); +static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +#ifdef CONFIG_IGB_DCA +static int igb_notify_dca(struct notifier_block *, unsigned long, void *); +static struct notifier_block dca_notifier = { + .notifier_call = igb_notify_dca, + .next = NULL, + .priority = 0 +}; +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER +/* for netdump / net console */ +static void igb_netpoll(struct net_device *); +#endif +#ifdef CONFIG_PCI_IOV +static unsigned int max_vfs; +module_param(max_vfs, uint, 0); +MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function"); +#endif /* CONFIG_PCI_IOV */ + +static pci_ers_result_t igb_io_error_detected(struct pci_dev *, + pci_channel_state_t); +static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); +static void igb_io_resume(struct pci_dev *); + +static const struct pci_error_handlers igb_err_handler = { + .error_detected = igb_io_error_detected, + .slot_reset = igb_io_slot_reset, + .resume = igb_io_resume, +}; + +static void igb_init_dmac(struct igb_adapter *adapter, u32 pba); + +static struct pci_driver igb_driver = { + .name = igb_driver_name, + .id_table = igb_pci_tbl, + .probe = igb_probe, + .remove = igb_remove, +#ifdef CONFIG_PM + .driver.pm = &igb_pm_ops, +#endif + .shutdown = igb_shutdown, + .sriov_configure = igb_pci_sriov_configure, + .err_handler = &igb_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +struct igb_reg_info { + u32 ofs; + char *name; +}; + +static const struct igb_reg_info igb_reg_info_tbl[] = { + + /* General Registers */ + {E1000_CTRL, "CTRL"}, + {E1000_STATUS, "STATUS"}, + {E1000_CTRL_EXT, "CTRL_EXT"}, + + /* Interrupt Registers */ + {E1000_ICR, "ICR"}, + + /* RX Registers */ + {E1000_RCTL, "RCTL"}, + {E1000_RDLEN(0), "RDLEN"}, + {E1000_RDH(0), "RDH"}, + {E1000_RDT(0), "RDT"}, + {E1000_RXDCTL(0), "RXDCTL"}, + {E1000_RDBAL(0), "RDBAL"}, + {E1000_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {E1000_TCTL, "TCTL"}, + {E1000_TDBAL(0), "TDBAL"}, + {E1000_TDBAH(0), "TDBAH"}, + {E1000_TDLEN(0), "TDLEN"}, + {E1000_TDH(0), "TDH"}, + {E1000_TDT(0), "TDT"}, + {E1000_TXDCTL(0), "TXDCTL"}, + {E1000_TDFH, "TDFH"}, + {E1000_TDFT, "TDFT"}, + {E1000_TDFHS, "TDFHS"}, + {E1000_TDFPC, "TDFPC"}, + + /* List Terminator */ + {} +}; + +/* igb_regdump - register printout routine */ +static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) +{ + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case E1000_RDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDLEN(n)); + break; + case E1000_RDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDH(n)); + break; + case E1000_RDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDT(n)); + break; + case E1000_RXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RXDCTL(n)); + break; + case E1000_RDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDBAL(n)); + break; + case E1000_RDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDBAH(n)); + break; + case E1000_TDBAL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_RDBAL(n)); + break; + case E1000_TDBAH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDBAH(n)); + break; + case E1000_TDLEN(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDLEN(n)); + break; + case E1000_TDH(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDH(n)); + break; + case E1000_TDT(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TDT(n)); + break; + case E1000_TXDCTL(0): + for (n = 0; n < 4; n++) + regs[n] = rd32(E1000_TXDCTL(n)); + break; + default: + pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]"); + pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1], + regs[2], regs[3]); +} + +/* igb_dump - Print registers, Tx-rings and Rx-rings */ +static void igb_dump(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct igb_reg_info *reginfo; + struct igb_ring *tx_ring; + union e1000_adv_tx_desc *tx_desc; + struct my_u0 { u64 a; u64 b; } *u0; + struct igb_ring *rx_ring; + union e1000_adv_rx_desc *rx_desc; + u32 staterr; + u16 i, n; + + if (!netif_msg_hw(adapter)) + return; + + /* Print netdevice Info */ + if (netdev) { + dev_info(&adapter->pdev->dev, "Net device Info\n"); + pr_info("Device Name state trans_start last_rx\n"); + pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, + netdev->state, netdev->trans_start, netdev->last_rx); + } + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + pr_info(" Register Name Value\n"); + for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl; + reginfo->name; reginfo++) { + igb_regdump(hw, reginfo); + } + + /* Print TX Ring Summary */ + if (!netdev || !netif_running(netdev)) + goto exit; + + dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + for (n = 0; n < adapter->num_tx_queues; n++) { + struct igb_tx_buffer *buffer_info; + tx_ring = adapter->tx_ring[n]; + buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 15 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + const char *next_desc; + struct igb_tx_buffer *buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, i); + buffer_info = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + next_desc = " NTC/U"; + else if (i == tx_ring->next_to_use) + next_desc = " NTU"; + else if (i == tx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n", + i, le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter) && buffer_info->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + dma_unmap_len(buffer_info, len), + true); + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + pr_info("Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info(" %5d %5X %5X\n", + n, rx_ring->next_to_use, rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + goto exit; + + dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + + /* Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | + * | Checksum Ident | | | | Type | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n"); + pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + struct igb_rx_buffer *buffer_info; + buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = IGB_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & E1000_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n", + "RWB", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + next_desc); + } else { + pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n", + "R ", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)buffer_info->dma, + next_desc); + + if (netif_msg_pktdata(adapter) && + buffer_info->dma && buffer_info->page) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, + 16, 1, + page_address(buffer_info->page) + + buffer_info->page_offset, + IGB_RX_BUFSZ, true); + } + } + } + } + +exit: + return; +} + +/** + * igb_get_i2c_data - Reads the I2C SDA data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Returns the I2C data bit value + **/ +static int igb_get_i2c_data(void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + return !!(i2cctl & E1000_I2C_DATA_IN); +} + +/** + * igb_set_i2c_data - Sets the I2C data bit + * @data: pointer to hardware structure + * @state: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + **/ +static void igb_set_i2c_data(void *data, int state) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + if (state) + i2cctl |= E1000_I2C_DATA_OUT; + else + i2cctl &= ~E1000_I2C_DATA_OUT; + + i2cctl &= ~E1000_I2C_DATA_OE_N; + i2cctl |= E1000_I2C_CLK_OE_N; + wr32(E1000_I2CPARAMS, i2cctl); + wrfl(); + +} + +/** + * igb_set_i2c_clk - Sets the I2C SCL clock + * @data: pointer to hardware structure + * @state: state to set clock + * + * Sets the I2C clock line to state + **/ +static void igb_set_i2c_clk(void *data, int state) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + if (state) { + i2cctl |= E1000_I2C_CLK_OUT; + i2cctl &= ~E1000_I2C_CLK_OE_N; + } else { + i2cctl &= ~E1000_I2C_CLK_OUT; + i2cctl &= ~E1000_I2C_CLK_OE_N; + } + wr32(E1000_I2CPARAMS, i2cctl); + wrfl(); +} + +/** + * igb_get_i2c_clk - Gets the I2C SCL clock state + * @data: pointer to hardware structure + * + * Gets the I2C clock state + **/ +static int igb_get_i2c_clk(void *data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + struct e1000_hw *hw = &adapter->hw; + s32 i2cctl = rd32(E1000_I2CPARAMS); + + return !!(i2cctl & E1000_I2C_CLK_IN); +} + +static const struct i2c_algo_bit_data igb_i2c_algo = { + .setsda = igb_set_i2c_data, + .setscl = igb_set_i2c_clk, + .getsda = igb_get_i2c_data, + .getscl = igb_get_i2c_clk, + .udelay = 5, + .timeout = 20, +}; + +/** + * igb_get_hw_dev - return device + * @hw: pointer to hardware structure + * + * used by hardware layer to print debugging information + **/ +struct net_device *igb_get_hw_dev(struct e1000_hw *hw) +{ + struct igb_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** + * igb_init_module - Driver Registration Routine + * + * igb_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init igb_init_module(void) +{ + int ret; + + pr_info("%s - version %s\n", + igb_driver_string, igb_driver_version); + pr_info("%s\n", igb_copyright); + +#ifdef CONFIG_IGB_DCA + dca_register_notify(&dca_notifier); +#endif + ret = pci_register_driver(&igb_driver); + return ret; +} + +module_init(igb_init_module); + +/** + * igb_exit_module - Driver Exit Cleanup Routine + * + * igb_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit igb_exit_module(void) +{ +#ifdef CONFIG_IGB_DCA + dca_unregister_notify(&dca_notifier); +#endif + pci_unregister_driver(&igb_driver); +} + +module_exit(igb_exit_module); + +#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) +/** + * igb_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + **/ +static void igb_cache_ring_register(struct igb_adapter *adapter) +{ + int i = 0, j = 0; + u32 rbase_offset = adapter->vfs_allocated_count; + + switch (adapter->hw.mac.type) { + case e1000_82576: + /* The queues are allocated for virtualization such that VF 0 + * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc. + * In order to avoid collision we start at the first free queue + * and continue consuming queues in the same sequence + */ + if (adapter->vfs_allocated_count) { + for (; i < adapter->rss_queues; i++) + adapter->rx_ring[i]->reg_idx = rbase_offset + + Q_IDX_82576(i); + } + /* Fall through */ + case e1000_82575: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + /* Fall through */ + default: + for (; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = rbase_offset + i; + for (; j < adapter->num_tx_queues; j++) + adapter->tx_ring[j]->reg_idx = rbase_offset + j; + break; + } +} + +u32 igb_rd32(struct e1000_hw *hw, u32 reg) +{ + struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); + u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr); + u32 value = 0; + + if (E1000_REMOVED(hw_addr)) + return ~value; + + value = readl(&hw_addr[reg]); + + /* reads should not return all F's */ + if (!(~value) && (!reg || !(~readl(hw_addr)))) { + struct net_device *netdev = igb->netdev; + hw->hw_addr = NULL; + netif_device_detach(netdev); + netdev_err(netdev, "PCIe link lost, device now detached\n"); + } + + return value; +} + +/** + * igb_write_ivar - configure ivar for given MSI-X vector + * @hw: pointer to the HW structure + * @msix_vector: vector number we are allocating to a given ring + * @index: row index of IVAR register to write within IVAR table + * @offset: column offset of in IVAR, should be multiple of 8 + * + * This function is intended to handle the writing of the IVAR register + * for adapters 82576 and newer. The IVAR table consists of 2 columns, + * each containing an cause allocation for an Rx and Tx ring, and a + * variable number of rows depending on the number of queues supported. + **/ +static void igb_write_ivar(struct e1000_hw *hw, int msix_vector, + int index, int offset) +{ + u32 ivar = array_rd32(E1000_IVAR0, index); + + /* clear any bits that are currently set */ + ivar &= ~((u32)0xFF << offset); + + /* write vector and valid bit */ + ivar |= (msix_vector | E1000_IVAR_VALID) << offset; + + array_wr32(E1000_IVAR0, index, ivar); +} + +#define IGB_N0_QUEUE -1 +static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + int rx_queue = IGB_N0_QUEUE; + int tx_queue = IGB_N0_QUEUE; + u32 msixbm = 0; + + if (q_vector->rx.ring) + rx_queue = q_vector->rx.ring->reg_idx; + if (q_vector->tx.ring) + tx_queue = q_vector->tx.ring->reg_idx; + + switch (hw->mac.type) { + case e1000_82575: + /* The 82575 assigns vectors using a bitmask, which matches the + * bitmask for the EICR/EIMS/EIMC registers. To assign one + * or more queues to a vector, we write the appropriate bits + * into the MSIXBM register for that vector. + */ + if (rx_queue > IGB_N0_QUEUE) + msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; + if (tx_queue > IGB_N0_QUEUE) + msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; + if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0) + msixbm |= E1000_EIMS_OTHER; + array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); + q_vector->eims_value = msixbm; + break; + case e1000_82576: + /* 82576 uses a table that essentially consists of 2 columns + * with 8 rows. The ordering is column-major so we use the + * lower 3 bits as the row index, and the 4th bit as the + * column offset. + */ + if (rx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + rx_queue & 0x7, + (rx_queue & 0x8) << 1); + if (tx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + tx_queue & 0x7, + ((tx_queue & 0x8) << 1) + 8); + q_vector->eims_value = 1 << msix_vector; + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + /* On 82580 and newer adapters the scheme is similar to 82576 + * however instead of ordering column-major we have things + * ordered row-major. So we traverse the table by using + * bit 0 as the column offset, and the remaining bits as the + * row index. + */ + if (rx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + rx_queue >> 1, + (rx_queue & 0x1) << 4); + if (tx_queue > IGB_N0_QUEUE) + igb_write_ivar(hw, msix_vector, + tx_queue >> 1, + ((tx_queue & 0x1) << 4) + 8); + q_vector->eims_value = 1 << msix_vector; + break; + default: + BUG(); + break; + } + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= q_vector->eims_value; + + /* configure q_vector to set itr on first interrupt */ + q_vector->set_itr = 1; +} + +/** + * igb_configure_msix - Configure MSI-X hardware + * @adapter: board private structure to initialize + * + * igb_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + **/ +static void igb_configure_msix(struct igb_adapter *adapter) +{ + u32 tmp; + int i, vector = 0; + struct e1000_hw *hw = &adapter->hw; + + adapter->eims_enable_mask = 0; + + /* set vector for other causes, i.e. link changes */ + switch (hw->mac.type) { + case e1000_82575: + tmp = rd32(E1000_CTRL_EXT); + /* enable MSI-X PBA support*/ + tmp |= E1000_CTRL_EXT_PBA_CLR; + + /* Auto-Mask interrupts upon ICR read. */ + tmp |= E1000_CTRL_EXT_EIAME; + tmp |= E1000_CTRL_EXT_IRCA; + + wr32(E1000_CTRL_EXT, tmp); + + /* enable msix_other interrupt */ + array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER); + adapter->eims_other = E1000_EIMS_OTHER; + + break; + + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + /* Turn on MSI-X capability first, or our settings + * won't stick. And it will take days to debug. + */ + wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | + E1000_GPIE_PBA | E1000_GPIE_EIAME | + E1000_GPIE_NSICR); + + /* enable msix_other interrupt */ + adapter->eims_other = 1 << vector; + tmp = (vector++ | E1000_IVAR_VALID) << 8; + + wr32(E1000_IVAR_MISC, tmp); + break; + default: + /* do nothing, since nothing else supports MSI-X */ + break; + } /* switch (hw->mac.type) */ + + adapter->eims_enable_mask |= adapter->eims_other; + + for (i = 0; i < adapter->num_q_vectors; i++) + igb_assign_vector(adapter->q_vector[i], vector++); + + wrfl(); +} + +/** + * igb_request_msix - Initialize MSI-X interrupts + * @adapter: board private structure to initialize + * + * igb_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + **/ +static int igb_request_msix(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + int i, err = 0, vector = 0, free_vector = 0; + + err = request_irq(adapter->msix_entries[vector].vector, + igb_msix_other, 0, netdev->name, adapter); + if (err) + goto err_out; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igb_q_vector *q_vector = adapter->q_vector[i]; + + vector++; + + q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); + + if (q_vector->rx.ring && q_vector->tx.ring) + sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else if (q_vector->tx.ring) + sprintf(q_vector->name, "%s-tx-%u", netdev->name, + q_vector->tx.ring->queue_index); + else if (q_vector->rx.ring) + sprintf(q_vector->name, "%s-rx-%u", netdev->name, + q_vector->rx.ring->queue_index); + else + sprintf(q_vector->name, "%s-unused", netdev->name); + + err = request_irq(adapter->msix_entries[vector].vector, + igb_msix_ring, 0, q_vector->name, + q_vector); + if (err) + goto err_free; + } + + igb_configure_msix(adapter); + return 0; + +err_free: + /* free already assigned IRQs */ + free_irq(adapter->msix_entries[free_vector++].vector, adapter); + + vector--; + for (i = 0; i < vector; i++) { + free_irq(adapter->msix_entries[free_vector++].vector, + adapter->q_vector[i]); + } +err_out: + return err; +} + +/** + * igb_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. + **/ +static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) +{ + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + + adapter->q_vector[v_idx] = NULL; + + /* igb_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + if (q_vector) + kfree_rcu(q_vector, rcu); +} + +/** + * igb_reset_q_vector - Reset config for interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be reset + * + * If NAPI is enabled it will delete any references to the + * NAPI struct. This is preparation for igb_free_q_vector. + **/ +static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) +{ + struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; + + /* Coming from igb_set_interrupt_capability, the vectors are not yet + * allocated. So, q_vector is NULL so we should stop here. + */ + if (!q_vector) + return; + + if (q_vector->tx.ring) + adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; + + if (q_vector->rx.ring) + adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; + + netif_napi_del(&q_vector->napi); + +} + +static void igb_reset_interrupt_capability(struct igb_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + if (adapter->flags & IGB_FLAG_HAS_MSIX) + pci_disable_msix(adapter->pdev); + else if (adapter->flags & IGB_FLAG_HAS_MSI) + pci_disable_msi(adapter->pdev); + + while (v_idx--) + igb_reset_q_vector(adapter, v_idx); +} + +/** + * igb_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void igb_free_q_vectors(struct igb_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) { + igb_reset_q_vector(adapter, v_idx); + igb_free_q_vector(adapter, v_idx); + } +} + +/** + * igb_clear_interrupt_scheme - reset the device to a state of no interrupts + * @adapter: board private structure to initialize + * + * This function resets the device so that it has 0 Rx queues, Tx queues, and + * MSI-X interrupts allocated. + */ +static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) +{ + igb_free_q_vectors(adapter); + igb_reset_interrupt_capability(adapter); +} + +/** + * igb_set_interrupt_capability - set MSI or MSI-X if supported + * @adapter: board private structure to initialize + * @msix: boolean value of MSIX capability + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) +{ + int err; + int numvecs, i; + + if (!msix) + goto msi_only; + adapter->flags |= IGB_FLAG_HAS_MSIX; + + /* Number of supported queues. */ + adapter->num_rx_queues = adapter->rss_queues; + if (adapter->vfs_allocated_count) + adapter->num_tx_queues = 1; + else + adapter->num_tx_queues = adapter->rss_queues; + + /* start with one vector for every Rx queue */ + numvecs = adapter->num_rx_queues; + + /* if Tx handler is separate add 1 for every Tx queue */ + if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) + numvecs += adapter->num_tx_queues; + + /* store the number of vectors reserved for queues */ + adapter->num_q_vectors = numvecs; + + /* add 1 vector for link status interrupts */ + numvecs++; + for (i = 0; i < numvecs; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, + numvecs, + numvecs); + if (err > 0) + return; + + igb_reset_interrupt_capability(adapter); + + /* If we can't do MSI-X, try MSI */ +msi_only: + adapter->flags &= ~IGB_FLAG_HAS_MSIX; +#ifdef CONFIG_PCI_IOV + /* disable SR-IOV for non MSI-X configurations */ + if (adapter->vf_data) { + struct e1000_hw *hw = &adapter->hw; + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); + msleep(500); + + kfree(adapter->vf_data); + adapter->vf_data = NULL; + wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); + wrfl(); + msleep(100); + dev_info(&adapter->pdev->dev, "IOV Disabled\n"); + } +#endif + adapter->vfs_allocated_count = 0; + adapter->rss_queues = 1; + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_q_vectors = 1; + if (!pci_enable_msi(adapter->pdev)) + adapter->flags |= IGB_FLAG_HAS_MSI; +} + +static void igb_add_ring(struct igb_ring *ring, + struct igb_ring_container *head) +{ + head->ring = ring; + head->count++; +} + +/** + * igb_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int igb_alloc_q_vector(struct igb_adapter *adapter, + int v_count, int v_idx, + int txr_count, int txr_idx, + int rxr_count, int rxr_idx) +{ + struct igb_q_vector *q_vector; + struct igb_ring *ring; + int ring_count, size; + + /* igb only supports 1 Tx and/or 1 Rx queue per vector */ + if (txr_count > 1 || rxr_count > 1) + return -ENOMEM; + + ring_count = txr_count + rxr_count; + size = sizeof(struct igb_q_vector) + + (sizeof(struct igb_ring) * ring_count); + + /* allocate q_vector and rings */ + q_vector = adapter->q_vector[v_idx]; + if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); + else + memset(q_vector, 0, size); + if (!q_vector) + return -ENOMEM; + + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, + igb_poll, 64); + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize ITR configuration */ + q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0); + q_vector->itr_val = IGB_START_ITR; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* intialize ITR */ + if (rxr_count) { + /* rx or rx/tx vector */ + if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) + q_vector->itr_val = adapter->rx_itr_setting; + } else { + /* tx only vector */ + if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) + q_vector->itr_val = adapter->tx_itr_setting; + } + + if (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + igb_add_ring(ring, &q_vector->tx); + + /* For 82575, context index must be unique per ring. */ + if (adapter->hw.mac.type == e1000_82575) + set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + u64_stats_init(&ring->tx_syncp); + u64_stats_init(&ring->tx_syncp2); + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* push pointer to next ring */ + ring++; + } + + if (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + igb_add_ring(ring, &q_vector->rx); + + /* set flag indicating ring supports SCTP checksum offload */ + if (adapter->hw.mac.type >= e1000_82576) + set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); + + /* On i350, i354, i210, and i211, loopback VLAN packets + * have the tag byte-swapped. + */ + if (adapter->hw.mac.type >= e1000_i350) + set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + u64_stats_init(&ring->rx_syncp); + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + } + + return 0; +} + + +/** + * igb_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int igb_alloc_q_vectors(struct igb_adapter *adapter) +{ + int q_vectors = adapter->num_q_vectors; + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int err; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = igb_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + + err = igb_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + igb_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors + * @adapter: board private structure to initialize + * @msix: boolean value of MSIX capability + * + * This function initializes the interrupts and allocates all of the queues. + **/ +static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix) +{ + struct pci_dev *pdev = adapter->pdev; + int err; + + igb_set_interrupt_capability(adapter, msix); + + err = igb_alloc_q_vectors(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to allocate memory for vectors\n"); + goto err_alloc_q_vectors; + } + + igb_cache_ring_register(adapter); + + return 0; + +err_alloc_q_vectors: + igb_reset_interrupt_capability(adapter); + return err; +} + +/** + * igb_request_irq - initialize interrupts + * @adapter: board private structure to initialize + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int igb_request_irq(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + err = igb_request_msix(adapter); + if (!err) + goto request_done; + /* fall back to MSI */ + igb_free_all_tx_resources(adapter); + igb_free_all_rx_resources(adapter); + + igb_clear_interrupt_scheme(adapter); + err = igb_init_interrupt_scheme(adapter, false); + if (err) + goto request_done; + + igb_setup_all_tx_resources(adapter); + igb_setup_all_rx_resources(adapter); + igb_configure(adapter); + } + + igb_assign_vector(adapter->q_vector[0], 0); + + if (adapter->flags & IGB_FLAG_HAS_MSI) { + err = request_irq(pdev->irq, igb_intr_msi, 0, + netdev->name, adapter); + if (!err) + goto request_done; + + /* fall back to legacy interrupts */ + igb_reset_interrupt_capability(adapter); + adapter->flags &= ~IGB_FLAG_HAS_MSI; + } + + err = request_irq(pdev->irq, igb_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + dev_err(&pdev->dev, "Error %d getting interrupt\n", + err); + +request_done: + return err; +} + +static void igb_free_irq(struct igb_adapter *adapter) +{ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + int vector = 0, i; + + free_irq(adapter->msix_entries[vector++].vector, adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) + free_irq(adapter->msix_entries[vector++].vector, + adapter->q_vector[i]); + } else { + free_irq(adapter->pdev->irq, adapter); + } +} + +/** + * igb_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void igb_irq_disable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* we need to be careful when disabling interrupts. The VFs are also + * mapped into these registers and so clearing the bits can cause + * issues on the VF drivers so we only need to clear what we set + */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + u32 regval = rd32(E1000_EIAM); + + wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); + wr32(E1000_EIMC, adapter->eims_enable_mask); + regval = rd32(E1000_EIAC); + wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask); + } + + wr32(E1000_IAM, 0); + wr32(E1000_IMC, ~0); + wrfl(); + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + int i; + + for (i = 0; i < adapter->num_q_vectors; i++) + synchronize_irq(adapter->msix_entries[i].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * igb_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void igb_irq_enable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA; + u32 regval = rd32(E1000_EIAC); + + wr32(E1000_EIAC, regval | adapter->eims_enable_mask); + regval = rd32(E1000_EIAM); + wr32(E1000_EIAM, regval | adapter->eims_enable_mask); + wr32(E1000_EIMS, adapter->eims_enable_mask); + if (adapter->vfs_allocated_count) { + wr32(E1000_MBVFIMR, 0xFF); + ims |= E1000_IMS_VMMB; + } + wr32(E1000_IMS, ims); + } else { + wr32(E1000_IMS, IMS_ENABLE_MASK | + E1000_IMS_DRSTA); + wr32(E1000_IAM, IMS_ENABLE_MASK | + E1000_IMS_DRSTA); + } +} + +static void igb_update_mng_vlan(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 vid = adapter->hw.mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + /* add VID to filter table */ + igb_vfta_set(hw, vid, true); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; + } + + if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && + (vid != old_vid) && + !test_bit(old_vid, adapter->active_vlans)) { + /* remove VID from filter table */ + igb_vfta_set(hw, old_vid, false); + } +} + +/** + * igb_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + **/ +static void igb_release_hw_control(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware take over control of h/w */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); +} + +/** + * igb_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. + **/ +static void igb_get_hw_control(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = rd32(E1000_CTRL_EXT); + wr32(E1000_CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); +} + +/** + * igb_configure - configure the hardware for RX and TX + * @adapter: private board structure + **/ +static void igb_configure(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + igb_get_hw_control(adapter); + igb_set_rx_mode(netdev); + + igb_restore_vlan(adapter); + + igb_setup_tctl(adapter); + igb_setup_mrqc(adapter); + igb_setup_rctl(adapter); + + igb_configure_tx(adapter); + igb_configure_rx(adapter); + + igb_rx_fifo_flush_82575(&adapter->hw); + + /* call igb_desc_unused which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); + } +} + +/** + * igb_power_up_link - Power up the phy/serdes link + * @adapter: address of board private structure + **/ +void igb_power_up_link(struct igb_adapter *adapter) +{ + igb_reset_phy(&adapter->hw); + + if (adapter->hw.phy.media_type == e1000_media_type_copper) + igb_power_up_phy_copper(&adapter->hw); + else + igb_power_up_serdes_link_82575(&adapter->hw); + + igb_setup_link(&adapter->hw); +} + +/** + * igb_power_down_link - Power down the phy/serdes link + * @adapter: address of board private structure + */ +static void igb_power_down_link(struct igb_adapter *adapter) +{ + if (adapter->hw.phy.media_type == e1000_media_type_copper) + igb_power_down_phy_copper_82575(&adapter->hw); + else + igb_shutdown_serdes_link_82575(&adapter->hw); +} + +/** + * Detect and switch function for Media Auto Sense + * @adapter: address of the board private structure + **/ +static void igb_check_swap_media(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext, connsw; + bool swap_now = false; + + ctrl_ext = rd32(E1000_CTRL_EXT); + connsw = rd32(E1000_CONNSW); + + /* need to live swap if current media is copper and we have fiber/serdes + * to go to. + */ + + if ((hw->phy.media_type == e1000_media_type_copper) && + (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) { + swap_now = true; + } else if (!(connsw & E1000_CONNSW_SERDESD)) { + /* copper signal takes time to appear */ + if (adapter->copper_tries < 4) { + adapter->copper_tries++; + connsw |= E1000_CONNSW_AUTOSENSE_CONF; + wr32(E1000_CONNSW, connsw); + return; + } else { + adapter->copper_tries = 0; + if ((connsw & E1000_CONNSW_PHYSD) && + (!(connsw & E1000_CONNSW_PHY_PDN))) { + swap_now = true; + connsw &= ~E1000_CONNSW_AUTOSENSE_CONF; + wr32(E1000_CONNSW, connsw); + } + } + } + + if (!swap_now) + return; + + switch (hw->phy.media_type) { + case e1000_media_type_copper: + netdev_info(adapter->netdev, + "MAS: changing media to fiber/serdes\n"); + ctrl_ext |= + E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + adapter->copper_tries = 0; + break; + case e1000_media_type_internal_serdes: + case e1000_media_type_fiber: + netdev_info(adapter->netdev, + "MAS: changing media to copper\n"); + ctrl_ext &= + ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + break; + default: + /* shouldn't get here during regular operation */ + netdev_err(adapter->netdev, + "AMS: Invalid media type found, returning\n"); + break; + } + wr32(E1000_CTRL_EXT, ctrl_ext); +} + +/** + * igb_up - Open the interface and prepare it to handle traffic + * @adapter: board private structure + **/ +int igb_up(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + /* hardware has been reset, we need to reload some things */ + igb_configure(adapter); + + clear_bit(__IGB_DOWN, &adapter->state); + + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&(adapter->q_vector[i]->napi)); + + if (adapter->flags & IGB_FLAG_HAS_MSIX) + igb_configure_msix(adapter); + else + igb_assign_vector(adapter->q_vector[0], 0); + + /* Clear any pending interrupts. */ + rd32(E1000_ICR); + igb_irq_enable(adapter); + + /* notify VFs that reset has been completed */ + if (adapter->vfs_allocated_count) { + u32 reg_data = rd32(E1000_CTRL_EXT); + + reg_data |= E1000_CTRL_EXT_PFRSTD; + wr32(E1000_CTRL_EXT, reg_data); + } + + netif_tx_start_all_queues(adapter->netdev); + + /* start the watchdog. */ + hw->mac.get_link_status = 1; + schedule_work(&adapter->watchdog_task); + + if ((adapter->flags & IGB_FLAG_EEE) && + (!hw->dev_spec._82575.eee_disable)) + adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; + + return 0; +} + +void igb_down(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 tctl, rctl; + int i; + + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer + */ + set_bit(__IGB_DOWN, &adapter->state); + + /* disable receives in the hardware */ + rctl = rd32(E1000_RCTL); + wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + /* disable transmits in the hardware */ + tctl = rd32(E1000_TCTL); + tctl &= ~E1000_TCTL_EN; + wr32(E1000_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 11000); + + igb_irq_disable(adapter); + + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + + for (i = 0; i < adapter->num_q_vectors; i++) { + if (adapter->q_vector[i]) { + napi_synchronize(&adapter->q_vector[i]->napi); + napi_disable(&adapter->q_vector[i]->napi); + } + } + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + /* record the stats before reset*/ + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter, &adapter->stats64); + spin_unlock(&adapter->stats64_lock); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + if (!pci_channel_offline(adapter->pdev)) + igb_reset(adapter); + igb_clean_all_tx_rings(adapter); + igb_clean_all_rx_rings(adapter); +#ifdef CONFIG_IGB_DCA + + /* since we reset the hardware DCA settings were cleared */ + igb_setup_dca(adapter); +#endif +} + +void igb_reinit_locked(struct igb_adapter *adapter) +{ + WARN_ON(in_interrupt()); + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + igb_down(adapter); + igb_up(adapter); + clear_bit(__IGB_RESETTING, &adapter->state); +} + +/** igb_enable_mas - Media Autosense re-enable after swap + * + * @adapter: adapter struct + **/ +static s32 igb_enable_mas(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 connsw; + s32 ret_val = 0; + + connsw = rd32(E1000_CONNSW); + if (!(hw->phy.media_type == e1000_media_type_copper)) + return ret_val; + + /* configure for SerDes media detect */ + if (!(connsw & E1000_CONNSW_SERDESD)) { + connsw |= E1000_CONNSW_ENRGSRC; + connsw |= E1000_CONNSW_AUTOSENSE_EN; + wr32(E1000_CONNSW, connsw); + wrfl(); + } else if (connsw & E1000_CONNSW_SERDESD) { + /* already SerDes, no need to enable anything */ + return ret_val; + } else { + netdev_info(adapter->netdev, + "MAS: Unable to configure feature, disabling..\n"); + adapter->flags &= ~IGB_FLAG_MAS_ENABLE; + } + return ret_val; +} + +void igb_reset(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + struct e1000_fc_info *fc = &hw->fc; + u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + switch (mac->type) { + case e1000_i350: + case e1000_i354: + case e1000_82580: + pba = rd32(E1000_RXPBS); + pba = igb_rxpbs_adjust_82580(pba); + break; + case e1000_82576: + pba = rd32(E1000_RXPBS); + pba &= E1000_RXPBS_SIZE_MASK_82576; + break; + case e1000_82575: + case e1000_i210: + case e1000_i211: + default: + pba = E1000_PBA_34K; + break; + } + + if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && + (mac->type < e1000_82576)) { + /* adjust PBA for jumbo frames */ + wr32(E1000_PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + pba = rd32(E1000_PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* the Tx fifo also stores 16 bytes of information about the Tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (adapter->max_frame_size + + sizeof(union e1000_adv_tx_desc) - + ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = adapter->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation + */ + if (tx_space < min_tx_space && + ((min_tx_space - tx_space) < pba)) { + pba = pba - (min_tx_space - tx_space); + + /* if short on Rx space, Rx wins and must trump Tx + * adjustment + */ + if (pba < min_rx_space) + pba = min_rx_space; + } + wr32(E1000_PBA, pba); + } + + /* flow control settings */ + /* The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, or + * - the full Rx FIFO size minus one full frame + */ + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - 2 * adapter->max_frame_size)); + + fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ + fc->low_water = fc->high_water - 16; + fc->pause_time = 0xFFFF; + fc->send_xon = 1; + fc->current_mode = fc->requested_mode; + + /* disable receive for all VFs and wait one second */ + if (adapter->vfs_allocated_count) { + int i; + + for (i = 0 ; i < adapter->vfs_allocated_count; i++) + adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; + + /* ping all the active vfs to let them know we are going down */ + igb_ping_all_vfs(adapter); + + /* disable transmits and receives */ + wr32(E1000_VFRE, 0); + wr32(E1000_VFTE, 0); + } + + /* Allow time for pending master requests to run */ + hw->mac.ops.reset_hw(hw); + wr32(E1000_WUC, 0); + + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + /* need to resetup here after media swap */ + adapter->ei.get_invariants(hw); + adapter->flags &= ~IGB_FLAG_MEDIA_RESET; + } + if (adapter->flags & IGB_FLAG_MAS_ENABLE) { + if (igb_enable_mas(adapter)) + dev_err(&pdev->dev, + "Error enabling Media Auto Sense\n"); + } + if (hw->mac.ops.init_hw(hw)) + dev_err(&pdev->dev, "Hardware Error\n"); + + /* Flow control settings reset on hardware reset, so guarantee flow + * control is off when forcing speed. + */ + if (!hw->mac.autoneg) + igb_force_mac_fc(hw); + + igb_init_dmac(adapter, pba); +#ifdef CONFIG_IGB_HWMON + /* Re-initialize the thermal sensor on i350 devices. */ + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (mac->type == e1000_i350 && hw->bus.func == 0) { + /* If present, re-initialize the external thermal sensor + * interface. + */ + if (adapter->ets) + mac->ops.init_thermal_sensor_thresh(hw); + } + } +#endif + /* Re-establish EEE setting */ + if (hw->phy.media_type == e1000_media_type_copper) { + switch (mac->type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: + igb_set_eee_i350(hw, true, true); + break; + case e1000_i354: + igb_set_eee_i354(hw, true, true); + break; + default: + break; + } + } + if (!netif_running(adapter->netdev)) + igb_power_down_link(adapter); + + igb_update_mng_vlan(adapter); + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); + + /* Re-enable PTP, where applicable. */ + igb_ptp_reset(adapter); + + igb_get_phy_info(hw); +} + +static netdev_features_t igb_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int igb_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + struct igb_adapter *adapter = netdev_priv(netdev); + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + igb_vlan_mode(netdev, features); + + if (!(changed & NETIF_F_RXALL)) + return 0; + + netdev->features = features; + + if (netif_running(netdev)) + igb_reinit_locked(adapter); + else + igb_reset(adapter); + + return 0; +} + +static const struct net_device_ops igb_netdev_ops = { + .ndo_open = igb_open, + .ndo_stop = igb_close, + .ndo_start_xmit = igb_xmit_frame, + .ndo_get_stats64 = igb_get_stats64, + .ndo_set_rx_mode = igb_set_rx_mode, + .ndo_set_mac_address = igb_set_mac, + .ndo_change_mtu = igb_change_mtu, + .ndo_do_ioctl = igb_ioctl, + .ndo_tx_timeout = igb_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid, + .ndo_set_vf_mac = igb_ndo_set_vf_mac, + .ndo_set_vf_vlan = igb_ndo_set_vf_vlan, + .ndo_set_vf_rate = igb_ndo_set_vf_bw, + .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, + .ndo_get_vf_config = igb_ndo_get_vf_config, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = igb_netpoll, +#endif + .ndo_fix_features = igb_fix_features, + .ndo_set_features = igb_set_features, + .ndo_features_check = passthru_features_check, +}; + +/** + * igb_set_fw_version - Configure version string for ethtool + * @adapter: adapter struct + **/ +void igb_set_fw_version(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_fw_version fw; + + igb_get_fw_version(hw, &fw); + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (!(igb_get_flash_presence_i210(hw))) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%2d.%2d-%d", + fw.invm_major, fw.invm_minor, + fw.invm_img_type); + break; + } + /* fall through */ + default: + /* if option is rom valid, display its version too */ + if (fw.or_valid) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x, %d.%d.%d", + fw.eep_major, fw.eep_minor, fw.etrack_id, + fw.or_major, fw.or_build, fw.or_patch); + /* no option rom */ + } else if (fw.etrack_id != 0X0000) { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x", + fw.eep_major, fw.eep_minor, fw.etrack_id); + } else { + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d.%d", + fw.eep_major, fw.eep_minor, fw.eep_build); + } + break; + } +} + +/** + * igb_init_mas - init Media Autosense feature if enabled in the NVM + * + * @adapter: adapter struct + **/ +static void igb_init_mas(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 eeprom_data; + + hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data); + switch (hw->bus.func) { + case E1000_FUNC_0: + if (eeprom_data & IGB_MAS_ENABLE_0) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_1: + if (eeprom_data & IGB_MAS_ENABLE_1) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_2: + if (eeprom_data & IGB_MAS_ENABLE_2) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + case E1000_FUNC_3: + if (eeprom_data & IGB_MAS_ENABLE_3) { + adapter->flags |= IGB_FLAG_MAS_ENABLE; + netdev_info(adapter->netdev, + "MAS: Enabling Media Autosense for port %d\n", + hw->bus.func); + } + break; + default: + /* Shouldn't get here */ + netdev_err(adapter->netdev, + "MAS: Invalid port configuration, returning\n"); + break; + } +} + +/** + * igb_init_i2c - Init I2C interface + * @adapter: pointer to adapter structure + **/ +static s32 igb_init_i2c(struct igb_adapter *adapter) +{ + s32 status = 0; + + /* I2C interface supported on i350 devices */ + if (adapter->hw.mac.type != e1000_i350) + return 0; + + /* Initialize the i2c bus which is controlled by the registers. + * This bus will use the i2c_algo_bit structue that implements + * the protocol through toggling of the 4 bits in the register. + */ + adapter->i2c_adap.owner = THIS_MODULE; + adapter->i2c_algo = igb_i2c_algo; + adapter->i2c_algo.data = adapter; + adapter->i2c_adap.algo_data = &adapter->i2c_algo; + adapter->i2c_adap.dev.parent = &adapter->pdev->dev; + strlcpy(adapter->i2c_adap.name, "igb BB", + sizeof(adapter->i2c_adap.name)); + status = i2c_bit_add_bus(&adapter->i2c_adap); + return status; +} + +/** + * igb_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igb_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igb_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct igb_adapter *adapter; + struct e1000_hw *hw; + u16 eeprom_data = 0; + s32 ret_val; + static int global_quad_port_a; /* global quad port a indication */ + const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; + int err, pci_using_dac; + u8 part_str[E1000_PBANUM_LENGTH]; + + /* Catch broken hardware that put the wrong VF device ID in + * the PCIe SR-IOV capability. + */ + if (pdev->is_virtfn) { + WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", + pci_name(pdev), pdev->vendor, pdev->device); + return -EINVAL; + } + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + } + + err = pci_request_selected_regions(pdev, pci_select_bars(pdev, + IORESOURCE_MEM), + igb_driver_name); + if (err) + goto err_pci_reg; + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + pci_save_state(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), + IGB_MAX_TX_QUEUES); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + err = -EIO; + hw->hw_addr = pci_iomap(pdev, 0, 0); + if (!hw->hw_addr) + goto err_ioremap; + + netdev->netdev_ops = &igb_netdev_ops; + igb_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + netdev->mem_start = pci_resource_start(pdev, 0); + netdev->mem_end = pci_resource_end(pdev, 0); + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Copy the default MAC, PHY and NVM function pointers */ + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); + /* Initialize skew-specific constants */ + err = ei->get_invariants(hw); + if (err) + goto err_sw_init; + + /* setup the private structure */ + err = igb_sw_init(adapter); + if (err) + goto err_sw_init; + + igb_get_bus_info_pcie(hw); + + hw->phy.autoneg_wait_to_complete = false; + + /* Copper options */ + if (hw->phy.media_type == e1000_media_type_copper) { + hw->phy.mdix = AUTO_ALL_MODES; + hw->phy.disable_polarity_correction = false; + hw->phy.ms_type = e1000_ms_hw_default; + } + + if (igb_check_reset_block(hw)) + dev_info(&pdev->dev, + "PHY reset is blocked due to SOL/IDER session.\n"); + + /* features is initialized to 0 in allocation, it might have bits + * set by igb_sw_init so we should use an or instead of an + * assignment. + */ + netdev->features |= NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features; + netdev->hw_features |= NETIF_F_RXALL; + + /* set this bit last since it cannot be part of hw_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + netdev->vlan_features |= NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SG; + + netdev->priv_flags |= IFF_SUPP_NOFCS; + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + if (hw->mac.type >= e1000_82576) { + netdev->hw_features |= NETIF_F_SCTP_CSUM; + netdev->features |= NETIF_F_SCTP_CSUM; + } + + netdev->priv_flags |= IFF_UNICAST_FLT; + + adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); + + /* before reading the NVM, reset the controller to put the device in a + * known good starting state + */ + hw->mac.ops.reset_hw(hw); + + /* make sure the NVM is good , i211/i210 parts can have special NVM + * that doesn't contain a checksum + */ + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (igb_get_flash_presence_i210(hw)) { + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, + "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + break; + default: + if (hw->nvm.ops.validate(hw) < 0) { + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + break; + } + + /* copy the MAC address out of the NVM */ + if (hw->mac.ops.read_mac_addr(hw)) + dev_err(&pdev->dev, "NVM Read Error\n"); + + memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + + /* get firmware version for ethtool -i */ + igb_set_fw_version(adapter); + + /* configure RXPBSIZE and TXPBSIZE */ + if (hw->mac.type == e1000_i210) { + wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); + wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); + } + + setup_timer(&adapter->watchdog_timer, igb_watchdog, + (unsigned long) adapter); + setup_timer(&adapter->phy_info_timer, igb_update_phy_info, + (unsigned long) adapter); + + INIT_WORK(&adapter->reset_task, igb_reset_task); + INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); + + /* Initialize link properties that are user-changeable */ + adapter->fc_autoneg = true; + hw->mac.autoneg = true; + hw->phy.autoneg_advertised = 0x2f; + + hw->fc.requested_mode = e1000_fc_default; + hw->fc.current_mode = e1000_fc_default; + + igb_validate_mdi_setting(hw); + + /* By default, support wake on port A */ + if (hw->bus.func == 0) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + + /* Check the NVM for wake support on non-port A ports */ + if (hw->mac.type >= e1000_82580) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &eeprom_data); + else if (hw->bus.func == 1) + hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + + if (eeprom_data & IGB_EEPROM_APME) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + + /* now that we have the eeprom settings, apply the special cases where + * the eeprom may be wrong or the board simply won't support wake on + * lan on a particular port + */ + switch (pdev->device) { + case E1000_DEV_ID_82575GB_QUAD_COPPER: + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + break; + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting + */ + if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + break; + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + else + adapter->flags |= IGB_FLAG_QUAD_PORT_A; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; + default: + /* If the device can't wake, don't set software support */ + if (!device_can_wakeup(&adapter->pdev->dev)) + adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; + } + + /* initialize the wol settings based on the eeprom settings */ + if (adapter->flags & IGB_FLAG_WOL_SUPPORTED) + adapter->wol |= E1000_WUFC_MAG; + + /* Some vendors want WoL disabled by default, but still supported */ + if ((hw->mac.type == e1000_i350) && + (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) { + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + adapter->wol = 0; + } + + device_set_wakeup_enable(&adapter->pdev->dev, + adapter->flags & IGB_FLAG_WOL_SUPPORTED); + + /* reset the hardware with the new settings */ + igb_reset(adapter); + + /* Init the I2C interface */ + err = igb_init_i2c(adapter); + if (err) { + dev_err(&pdev->dev, "failed to init i2c interface\n"); + goto err_eeprom; + } + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igb_get_hw_control(adapter); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + +#ifdef CONFIG_IGB_DCA + if (dca_add_requester(&pdev->dev) == 0) { + adapter->flags |= IGB_FLAG_DCA_ENABLED; + dev_info(&pdev->dev, "DCA enabled\n"); + igb_setup_dca(adapter); + } + +#endif +#ifdef CONFIG_IGB_HWMON + /* Initialize the thermal sensor on i350 devices. */ + if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { + u16 ets_word; + + /* Read the NVM to determine if this i350 device supports an + * external thermal sensor. + */ + hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); + if (ets_word != 0x0000 && ets_word != 0xFFFF) + adapter->ets = true; + else + adapter->ets = false; + if (igb_sysfs_init(adapter)) + dev_err(&pdev->dev, + "failed to allocate sysfs resources\n"); + } else { + adapter->ets = false; + } +#endif + /* Check if Media Autosense is enabled */ + adapter->ei = *ei; + if (hw->dev_spec._82575.mas_capable) + igb_init_mas(adapter); + + /* do hw tstamp init after resetting */ + igb_ptp_init(adapter); + + dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); + /* print bus type/speed/width info, not applicable to i354 */ + if (hw->mac.type != e1000_i354) { + dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", + netdev->name, + ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : + (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : + "unknown"), + ((hw->bus.width == e1000_bus_width_pcie_x4) ? + "Width x4" : + (hw->bus.width == e1000_bus_width_pcie_x2) ? + "Width x2" : + (hw->bus.width == e1000_bus_width_pcie_x1) ? + "Width x1" : "unknown"), netdev->dev_addr); + } + + if ((hw->mac.type >= e1000_i210 || + igb_get_flash_presence_i210(hw))) { + ret_val = igb_read_part_string(hw, part_str, + E1000_PBANUM_LENGTH); + } else { + ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND; + } + + if (ret_val) + strcpy(part_str, "Unknown"); + dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str); + dev_info(&pdev->dev, + "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", + (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" : + (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", + adapter->num_rx_queues, adapter->num_tx_queues); + if (hw->phy.media_type == e1000_media_type_copper) { + switch (hw->mac.type) { + case e1000_i350: + case e1000_i210: + case e1000_i211: + /* Enable EEE for internal copper PHY devices */ + err = igb_set_eee_i350(hw, true, true); + if ((!err) && + (!hw->dev_spec._82575.eee_disable)) { + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; + adapter->flags |= IGB_FLAG_EEE; + } + break; + case e1000_i354: + if ((rd32(E1000_CTRL_EXT) & + E1000_CTRL_EXT_LINK_MODE_SGMII)) { + err = igb_set_eee_i354(hw, true, true); + if ((!err) && + (!hw->dev_spec._82575.eee_disable)) { + adapter->eee_advert = + MDIO_EEE_100TX | MDIO_EEE_1000T; + adapter->flags |= IGB_FLAG_EEE; + } + } + break; + default: + break; + } + } + pm_runtime_put_noidle(&pdev->dev); + return 0; + +err_register: + igb_release_hw_control(adapter); + memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap)); +err_eeprom: + if (!igb_check_reset_block(hw)) + igb_reset_phy(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); +err_sw_init: + igb_clear_interrupt_scheme(adapter); + pci_iounmap(pdev, hw->hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +#ifdef CONFIG_PCI_IOV +static int igb_disable_sriov(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* reclaim resources allocated to VFs */ + if (adapter->vf_data) { + /* disable iov and allow time for transactions to clear */ + if (pci_vfs_assigned(pdev)) { + dev_warn(&pdev->dev, + "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n"); + return -EPERM; + } else { + pci_disable_sriov(pdev); + msleep(500); + } + + kfree(adapter->vf_data); + adapter->vf_data = NULL; + adapter->vfs_allocated_count = 0; + wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); + wrfl(); + msleep(100); + dev_info(&pdev->dev, "IOV Disabled\n"); + + /* Re-enable DMA Coalescing flag since IOV is turned off */ + adapter->flags |= IGB_FLAG_DMAC; + } + + return 0; +} + +static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + int old_vfs = pci_num_vf(pdev); + int err = 0; + int i; + + if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) { + err = -EPERM; + goto out; + } + if (!num_vfs) + goto out; + + if (old_vfs) { + dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n", + old_vfs, max_vfs); + adapter->vfs_allocated_count = old_vfs; + } else + adapter->vfs_allocated_count = num_vfs; + + adapter->vf_data = kcalloc(adapter->vfs_allocated_count, + sizeof(struct vf_data_storage), GFP_KERNEL); + + /* if allocation failed then we do not support SR-IOV */ + if (!adapter->vf_data) { + adapter->vfs_allocated_count = 0; + dev_err(&pdev->dev, + "Unable to allocate memory for VF Data Storage\n"); + err = -ENOMEM; + goto out; + } + + /* only call pci_enable_sriov() if no VFs are allocated already */ + if (!old_vfs) { + err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); + if (err) + goto err_out; + } + dev_info(&pdev->dev, "%d VFs allocated\n", + adapter->vfs_allocated_count); + for (i = 0; i < adapter->vfs_allocated_count; i++) + igb_vf_configure(adapter, i); + + /* DMA Coalescing is not supported in IOV mode. */ + adapter->flags &= ~IGB_FLAG_DMAC; + goto out; + +err_out: + kfree(adapter->vf_data); + adapter->vf_data = NULL; + adapter->vfs_allocated_count = 0; +out: + return err; +} + +#endif +/** + * igb_remove_i2c - Cleanup I2C interface + * @adapter: pointer to adapter structure + **/ +static void igb_remove_i2c(struct igb_adapter *adapter) +{ + /* free the adapter bus structure */ + i2c_del_adapter(&adapter->i2c_adap); +} + +/** + * igb_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * igb_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void igb_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pm_runtime_get_noresume(&pdev->dev); +#ifdef CONFIG_IGB_HWMON + igb_sysfs_exit(adapter); +#endif + igb_remove_i2c(adapter); + igb_ptp_stop(adapter); + /* The watchdog timer may be rescheduled, so explicitly + * disable watchdog from being rescheduled. + */ + set_bit(__IGB_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + +#ifdef CONFIG_IGB_DCA + if (adapter->flags & IGB_FLAG_DCA_ENABLED) { + dev_info(&pdev->dev, "DCA disabled\n"); + dca_remove_requester(&pdev->dev); + adapter->flags &= ~IGB_FLAG_DCA_ENABLED; + wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); + } +#endif + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igb_release_hw_control(adapter); + + unregister_netdev(netdev); + + igb_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_PCI_IOV + igb_disable_sriov(pdev); +#endif + + pci_iounmap(pdev, hw->hw_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + kfree(adapter->shadow_vfta); + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +/** + * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space + * @adapter: board private structure to initialize + * + * This function initializes the vf specific data storage and then attempts to + * allocate the VFs. The reason for ordering it this way is because it is much + * mor expensive time wise to disable SR-IOV than it is to allocate and free + * the memory for the VFs. + **/ +static void igb_probe_vfs(struct igb_adapter *adapter) +{ +#ifdef CONFIG_PCI_IOV + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + + /* Virtualization features not supported on i210 family. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) + return; + + pci_sriov_set_totalvfs(pdev, 7); + igb_pci_enable_sriov(pdev, max_vfs); + +#endif /* CONFIG_PCI_IOV */ +} + +static void igb_init_queue_configuration(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 max_rss_queues; + + /* Determine the maximum number of RSS queues supported. */ + switch (hw->mac.type) { + case e1000_i211: + max_rss_queues = IGB_MAX_RX_QUEUES_I211; + break; + case e1000_82575: + case e1000_i210: + max_rss_queues = IGB_MAX_RX_QUEUES_82575; + break; + case e1000_i350: + /* I350 cannot do RSS and SR-IOV at the same time */ + if (!!adapter->vfs_allocated_count) { + max_rss_queues = 1; + break; + } + /* fall through */ + case e1000_82576: + if (!!adapter->vfs_allocated_count) { + max_rss_queues = 2; + break; + } + /* fall through */ + case e1000_82580: + case e1000_i354: + default: + max_rss_queues = IGB_MAX_RX_QUEUES; + break; + } + + adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); + + /* Determine if we need to pair queues. */ + switch (hw->mac.type) { + case e1000_82575: + case e1000_i211: + /* Device supports enough interrupts without queue pairing. */ + break; + case e1000_82576: + /* If VFs are going to be allocated with RSS queues then we + * should pair the queues in order to conserve interrupts due + * to limited supply. + */ + if ((adapter->rss_queues > 1) && + (adapter->vfs_allocated_count > 6)) + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + /* fall through */ + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + default: + /* If rss_queues > half of max_rss_queues, pair the queues in + * order to conserve interrupts due to limited supply. + */ + if (adapter->rss_queues > (max_rss_queues / 2)) + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + break; + } +} + +/** + * igb_sw_init - Initialize general software structures (struct igb_adapter) + * @adapter: board private structure to initialize + * + * igb_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int igb_sw_init(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); + + /* set default ring sizes */ + adapter->tx_ring_count = IGB_DEFAULT_TXD; + adapter->rx_ring_count = IGB_DEFAULT_RXD; + + /* set default ITR values */ + adapter->rx_itr_setting = IGB_DEFAULT_ITR; + adapter->tx_itr_setting = IGB_DEFAULT_ITR; + + /* set default work limits */ + adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; + + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + + VLAN_HLEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + spin_lock_init(&adapter->stats64_lock); +#ifdef CONFIG_PCI_IOV + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + if (max_vfs > 7) { + dev_warn(&pdev->dev, + "Maximum of 7 VFs per PF, using max\n"); + max_vfs = adapter->vfs_allocated_count = 7; + } else + adapter->vfs_allocated_count = max_vfs; + if (adapter->vfs_allocated_count) + dev_warn(&pdev->dev, + "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n"); + break; + default: + break; + } +#endif /* CONFIG_PCI_IOV */ + + igb_init_queue_configuration(adapter); + + /* Setup and initialize a copy of the hw vlan table array */ + adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), + GFP_ATOMIC); + + /* This call may decrease the number of queues */ + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + igb_probe_vfs(adapter); + + /* Explicitly disable IRQ since the NIC can be in any state. */ + igb_irq_disable(adapter); + + if (hw->mac.type >= e1000_i350) + adapter->flags &= ~IGB_FLAG_DMAC; + + set_bit(__IGB_DOWN, &adapter->state); + return 0; +} + +/** + * igb_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int __igb_open(struct net_device *netdev, bool resuming) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int err; + int i; + + /* disallow open during test */ + if (test_bit(__IGB_TESTING, &adapter->state)) { + WARN_ON(resuming); + return -EBUSY; + } + + if (!resuming) + pm_runtime_get_sync(&pdev->dev); + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = igb_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = igb_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + igb_power_up_link(adapter); + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + igb_configure(adapter); + + err = igb_request_irq(adapter); + if (err) + goto err_req_irq; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(adapter->netdev, + adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(adapter->netdev, + adapter->num_rx_queues); + if (err) + goto err_set_queues; + + /* From here on the code is the same as igb_up() */ + clear_bit(__IGB_DOWN, &adapter->state); + + for (i = 0; i < adapter->num_q_vectors; i++) + napi_enable(&(adapter->q_vector[i]->napi)); + + /* Clear any pending interrupts. */ + rd32(E1000_ICR); + + igb_irq_enable(adapter); + + /* notify VFs that reset has been completed */ + if (adapter->vfs_allocated_count) { + u32 reg_data = rd32(E1000_CTRL_EXT); + + reg_data |= E1000_CTRL_EXT_PFRSTD; + wr32(E1000_CTRL_EXT, reg_data); + } + + netif_tx_start_all_queues(netdev); + + if (!resuming) + pm_runtime_put(&pdev->dev); + + /* start the watchdog. */ + hw->mac.get_link_status = 1; + schedule_work(&adapter->watchdog_task); + + return 0; + +err_set_queues: + igb_free_irq(adapter); +err_req_irq: + igb_release_hw_control(adapter); + igb_power_down_link(adapter); + igb_free_all_rx_resources(adapter); +err_setup_rx: + igb_free_all_tx_resources(adapter); +err_setup_tx: + igb_reset(adapter); + if (!resuming) + pm_runtime_put(&pdev->dev); + + return err; +} + +static int igb_open(struct net_device *netdev) +{ + return __igb_open(netdev, false); +} + +/** + * igb_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the driver's control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int __igb_close(struct net_device *netdev, bool suspending) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); + + if (!suspending) + pm_runtime_get_sync(&pdev->dev); + + igb_down(adapter); + igb_free_irq(adapter); + + igb_free_all_tx_resources(adapter); + igb_free_all_rx_resources(adapter); + + if (!suspending) + pm_runtime_put_sync(&pdev->dev); + return 0; +} + +static int igb_close(struct net_device *netdev) +{ + return __igb_close(netdev, false); +} + +/** + * igb_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int igb_setup_tx_resources(struct igb_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int size; + + size = sizeof(struct igb_tx_buffer) * tx_ring->count; + + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igb_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int igb_setup_all_tx_resources(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = igb_setup_tx_resources(adapter->tx_ring[i]); + if (err) { + dev_err(&pdev->dev, + "Allocation for Tx Queue %u failed\n", i); + for (i--; i >= 0; i--) + igb_free_tx_resources(adapter->tx_ring[i]); + break; + } + } + + return err; +} + +/** + * igb_setup_tctl - configure the transmit control registers + * @adapter: Board private structure + **/ +void igb_setup_tctl(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 tctl; + + /* disable queue 0 which is enabled by default on 82575 and 82576 */ + wr32(E1000_TXDCTL(0), 0); + + /* Program the Transmit Control Register */ + tctl = rd32(E1000_TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + igb_config_collision_dist(hw); + + /* Enable transmits */ + tctl |= E1000_TCTL_EN; + + wr32(E1000_TCTL, tctl); +} + +/** + * igb_configure_tx_ring - Configure transmit ring after Reset + * @adapter: board private structure + * @ring: tx ring to configure + * + * Configure a transmit ring after a reset. + **/ +void igb_configure_tx_ring(struct igb_adapter *adapter, + struct igb_ring *ring) +{ + struct e1000_hw *hw = &adapter->hw; + u32 txdctl = 0; + u64 tdba = ring->dma; + int reg_idx = ring->reg_idx; + + /* disable the queue */ + wr32(E1000_TXDCTL(reg_idx), 0); + wrfl(); + mdelay(10); + + wr32(E1000_TDLEN(reg_idx), + ring->count * sizeof(union e1000_adv_tx_desc)); + wr32(E1000_TDBAL(reg_idx), + tdba & 0x00000000ffffffffULL); + wr32(E1000_TDBAH(reg_idx), tdba >> 32); + + ring->tail = hw->hw_addr + E1000_TDT(reg_idx); + wr32(E1000_TDH(reg_idx), 0); + writel(0, ring->tail); + + txdctl |= IGB_TX_PTHRESH; + txdctl |= IGB_TX_HTHRESH << 8; + txdctl |= IGB_TX_WTHRESH << 16; + + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + wr32(E1000_TXDCTL(reg_idx), txdctl); +} + +/** + * igb_configure_tx - Configure transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void igb_configure_tx(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + igb_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +/** + * igb_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: Rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int igb_setup_rx_resources(struct igb_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int size; + + size = sizeof(struct igb_rx_buffer) * rx_ring->count; + + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; + +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * igb_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int igb_setup_all_rx_resources(struct igb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = igb_setup_rx_resources(adapter->rx_ring[i]); + if (err) { + dev_err(&pdev->dev, + "Allocation for Rx Queue %u failed\n", i); + for (i--; i >= 0; i--) + igb_free_rx_resources(adapter->rx_ring[i]); + break; + } + } + + return err; +} + +/** + * igb_setup_mrqc - configure the multiple receive queue control registers + * @adapter: Board private structure + **/ +static void igb_setup_mrqc(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 mrqc, rxcsum; + u32 j, num_rx_queues; + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (j = 0; j < 10; j++) + wr32(E1000_RSSRK(j), rss_key[j]); + + num_rx_queues = adapter->rss_queues; + + switch (hw->mac.type) { + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) + num_rx_queues = 2; + break; + default: + break; + } + + if (adapter->rss_indir_tbl_init != num_rx_queues) { + for (j = 0; j < IGB_RETA_SIZE; j++) + adapter->rss_indir_tbl[j] = + (j * num_rx_queues) / IGB_RETA_SIZE; + adapter->rss_indir_tbl_init = num_rx_queues; + } + igb_write_rss_indir_tbl(adapter); + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. No need to enable TCP/UDP/IP checksum + * offloads as they are enabled by default + */ + rxcsum = rd32(E1000_RXCSUM); + rxcsum |= E1000_RXCSUM_PCSD; + + if (adapter->hw.mac.type >= e1000_82576) + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= E1000_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(E1000_RXCSUM, rxcsum); + + /* Generate RSS hash based on packet types, TCP/UDP + * port numbers and/or IPv4/v6 src and dst addresses + */ + mrqc = E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP | + E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; + + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; + + /* If VMDq is enabled then we set the appropriate mode for that, else + * we default to RSS so that an RSS hash is calculated per packet even + * if we are only using one queue + */ + if (adapter->vfs_allocated_count) { + if (hw->mac.type > e1000_82575) { + /* Set the default pool for the PF's first queue */ + u32 vtctl = rd32(E1000_VT_CTL); + + vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | + E1000_VT_CTL_DISABLE_DEF_POOL); + vtctl |= adapter->vfs_allocated_count << + E1000_VT_CTL_DEFAULT_POOL_SHIFT; + wr32(E1000_VT_CTL, vtctl); + } + if (adapter->rss_queues > 1) + mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q; + else + mrqc |= E1000_MRQC_ENABLE_VMDQ; + } else { + if (hw->mac.type != e1000_i211) + mrqc |= E1000_MRQC_ENABLE_RSS_4Q; + } + igb_vmm_control(adapter); + + wr32(E1000_MRQC, mrqc); +} + +/** + * igb_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +void igb_setup_rctl(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + rctl = rd32(E1000_RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* enable stripping of CRC. It's unlikely this will break BMC + * redirection as it did with e1000. Newer features require + * that the HW strips the CRC. + */ + rctl |= E1000_RCTL_SECRC; + + /* disable store bad packets and clear size bits. */ + rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); + + /* enable LPE to prevent packets larger than max_frame_size */ + rctl |= E1000_RCTL_LPE; + + /* disable queue 0 to prevent tail write w/o re-config */ + wr32(E1000_RXDCTL(0), 0); + + /* Attention!!! For SR-IOV PF driver operations you must enable + * queue drop for all VF and PF queues to prevent head of line blocking + * if an un-trusted VF does not provide descriptors to hardware. + */ + if (adapter->vfs_allocated_count) { + /* set all queue drop enable bits */ + wr32(E1000_QDE, ALL_QUEUES); + } + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode + */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + + wr32(E1000_RCTL, rctl); +} + +static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, + int vfn) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr; + + /* if it isn't the PF check to see if VFs are enabled and + * increase the size to support vlan tags + */ + if (vfn < adapter->vfs_allocated_count && + adapter->vf_data[vfn].vlans_enabled) + size += VLAN_TAG_SIZE; + + vmolr = rd32(E1000_VMOLR(vfn)); + vmolr &= ~E1000_VMOLR_RLPML_MASK; + vmolr |= size | E1000_VMOLR_LPE; + wr32(E1000_VMOLR(vfn), vmolr); + + return 0; +} + +/** + * igb_rlpml_set - set maximum receive packet size + * @adapter: board private structure + * + * Configure maximum receivable packet size. + **/ +static void igb_rlpml_set(struct igb_adapter *adapter) +{ + u32 max_frame_size = adapter->max_frame_size; + struct e1000_hw *hw = &adapter->hw; + u16 pf_id = adapter->vfs_allocated_count; + + if (pf_id) { + igb_set_vf_rlpml(adapter, max_frame_size, pf_id); + /* If we're in VMDQ or SR-IOV mode, then set global RLPML + * to our max jumbo frame size, in case we need to enable + * jumbo frames on one of the rings later. + * This will not pass over-length frames into the default + * queue because it's gated by the VMOLR.RLPML. + */ + max_frame_size = MAX_JUMBO_FRAME_SIZE; + } + + wr32(E1000_RLPML, max_frame_size); +} + +static inline void igb_set_vmolr(struct igb_adapter *adapter, + int vfn, bool aupe) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr; + + /* This register exists only on 82576 and newer so if we are older then + * we should exit and do nothing + */ + if (hw->mac.type < e1000_82576) + return; + + vmolr = rd32(E1000_VMOLR(vfn)); + vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ + if (hw->mac.type == e1000_i350) { + u32 dvmolr; + + dvmolr = rd32(E1000_DVMOLR(vfn)); + dvmolr |= E1000_DVMOLR_STRVLAN; + wr32(E1000_DVMOLR(vfn), dvmolr); + } + if (aupe) + vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ + else + vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ + + /* clear all bits that might not be set */ + vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE); + + if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) + vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ + /* for VMDq only allow the VFs and pool 0 to accept broadcast and + * multicast packets + */ + if (vfn <= adapter->vfs_allocated_count) + vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ + + wr32(E1000_VMOLR(vfn), vmolr); +} + +/** + * igb_configure_rx_ring - Configure a receive ring after Reset + * @adapter: board private structure + * @ring: receive ring to be configured + * + * Configure the Rx unit of the MAC after a reset. + **/ +void igb_configure_rx_ring(struct igb_adapter *adapter, + struct igb_ring *ring) +{ + struct e1000_hw *hw = &adapter->hw; + u64 rdba = ring->dma; + int reg_idx = ring->reg_idx; + u32 srrctl = 0, rxdctl = 0; + + /* disable the queue */ + wr32(E1000_RXDCTL(reg_idx), 0); + + /* Set DMA base address registers */ + wr32(E1000_RDBAL(reg_idx), + rdba & 0x00000000ffffffffULL); + wr32(E1000_RDBAH(reg_idx), rdba >> 32); + wr32(E1000_RDLEN(reg_idx), + ring->count * sizeof(union e1000_adv_rx_desc)); + + /* initialize head and tail */ + ring->tail = hw->hw_addr + E1000_RDT(reg_idx); + wr32(E1000_RDH(reg_idx), 0); + writel(0, ring->tail); + + /* set descriptor configuration */ + srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + if (hw->mac.type >= e1000_82580) + srrctl |= E1000_SRRCTL_TIMESTAMP; + /* Only set Drop Enable if we are supporting multiple queues */ + if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) + srrctl |= E1000_SRRCTL_DROP_EN; + + wr32(E1000_SRRCTL(reg_idx), srrctl); + + /* set filtering for VMDQ pools */ + igb_set_vmolr(adapter, reg_idx & 0x7, true); + + rxdctl |= IGB_RX_PTHRESH; + rxdctl |= IGB_RX_HTHRESH << 8; + rxdctl |= IGB_RX_WTHRESH << 16; + + /* enable receive descriptor fetching */ + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; + wr32(E1000_RXDCTL(reg_idx), rxdctl); +} + +/** + * igb_configure_rx - Configure receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void igb_configure_rx(struct igb_adapter *adapter) +{ + int i; + + /* set UTA to appropriate mode */ + igb_set_uta(adapter); + + /* set the correct pool for the PF default MAC address in entry 0 */ + igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, + adapter->vfs_allocated_count); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + igb_configure_rx_ring(adapter, adapter->rx_ring[i]); +} + +/** + * igb_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void igb_free_tx_resources(struct igb_ring *tx_ring) +{ + igb_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * igb_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void igb_free_all_tx_resources(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igb_free_tx_resources(adapter->tx_ring[i]); +} + +void igb_unmap_and_free_tx_resource(struct igb_ring *ring, + struct igb_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* buffer_info must be completely set up in the transmit path */ +} + +/** + * igb_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void igb_clean_tx_ring(struct igb_ring *tx_ring) +{ + struct igb_tx_buffer *buffer_info; + unsigned long size; + u16 i; + + if (!tx_ring->tx_buffer_info) + return; + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->tx_buffer_info[i]; + igb_unmap_and_free_tx_resource(tx_ring, buffer_info); + } + + netdev_tx_reset_queue(txring_txq(tx_ring)); + + size = sizeof(struct igb_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * igb_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void igb_clean_all_tx_rings(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]) + igb_clean_tx_ring(adapter->tx_ring[i]); +} + +/** + * igb_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void igb_free_rx_resources(struct igb_ring *rx_ring) +{ + igb_clean_rx_ring(rx_ring); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * igb_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void igb_free_all_rx_resources(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igb_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * igb_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void igb_clean_rx_ring(struct igb_ring *rx_ring) +{ + unsigned long size; + u16 i; + + if (rx_ring->skb) + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + + if (!rx_ring->rx_buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; + + if (!buffer_info->page) + continue; + + dma_unmap_page(rx_ring->dev, + buffer_info->dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + __free_page(buffer_info->page); + + buffer_info->page = NULL; + } + + size = sizeof(struct igb_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * igb_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void igb_clean_all_rx_rings(struct igb_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]) + igb_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * igb_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int igb_set_mac(struct net_device *netdev, void *p) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + /* set the correct pool for the new PF MAC address in entry 0 */ + igb_rar_set_qsel(adapter, hw->mac.addr, 0, + adapter->vfs_allocated_count); + + return 0; +} + +/** + * igb_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +static int igb_write_mc_addr_list(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + int i; + + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + igb_update_mc_addr_list(hw, NULL, 0); + igb_restore_vf_multicasts(adapter); + return 0; + } + + mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* The shared function expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + igb_update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +} + +/** + * igb_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int igb_write_uc_addr_list(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + unsigned int vfn = adapter->vfs_allocated_count; + unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1); + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > rar_entries) + return -ENOMEM; + + if (!netdev_uc_empty(netdev) && rar_entries) { + struct netdev_hw_addr *ha; + + netdev_for_each_uc_addr(ha, netdev) { + if (!rar_entries) + break; + igb_rar_set_qsel(adapter, ha->addr, + rar_entries--, + vfn); + count++; + } + } + /* write the addresses in reverse order to avoid write combining */ + for (; rar_entries > 0 ; rar_entries--) { + wr32(E1000_RAH(rar_entries), 0); + wr32(E1000_RAL(rar_entries), 0); + } + wrfl(); + + return count; +} + +/** + * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void igb_set_rx_mode(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + unsigned int vfn = adapter->vfs_allocated_count; + u32 rctl, vmolr = 0; + int count; + + /* Check for Promiscuous and All Multicast modes */ + rctl = rd32(E1000_RCTL); + + /* clear the effected bits */ + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); + + if (netdev->flags & IFF_PROMISC) { + /* retain VLAN HW filtering if in VT mode */ + if (adapter->vfs_allocated_count) + rctl |= E1000_RCTL_VFE; + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + } else { + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = igb_write_mc_addr_list(netdev); + if (count < 0) { + rctl |= E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_MPME; + } else if (count) { + vmolr |= E1000_VMOLR_ROMPE; + } + } + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + count = igb_write_uc_addr_list(netdev); + if (count < 0) { + rctl |= E1000_RCTL_UPE; + vmolr |= E1000_VMOLR_ROPE; + } + rctl |= E1000_RCTL_VFE; + } + wr32(E1000_RCTL, rctl); + + /* In order to support SR-IOV and eventually VMDq it is necessary to set + * the VMOLR to enable the appropriate modes. Without this workaround + * we will have issues with VLAN tag stripping not being done for frames + * that are only arriving because we are the default pool + */ + if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350)) + return; + + vmolr |= rd32(E1000_VMOLR(vfn)) & + ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); + wr32(E1000_VMOLR(vfn), vmolr); + igb_restore_vf_multicasts(adapter); +} + +static void igb_check_wvbr(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 wvbr = 0; + + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + wvbr = rd32(E1000_WVBR); + if (!wvbr) + return; + break; + default: + break; + } + + adapter->wvbr |= wvbr; +} + +#define IGB_STAGGERED_QUEUE_OFFSET 8 + +static void igb_spoof_check(struct igb_adapter *adapter) +{ + int j; + + if (!adapter->wvbr) + return; + + for (j = 0; j < adapter->vfs_allocated_count; j++) { + if (adapter->wvbr & (1 << j) || + adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) { + dev_warn(&adapter->pdev->dev, + "Spoof event(s) detected on VF %d\n", j); + adapter->wvbr &= + ~((1 << j) | + (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))); + } + } +} + +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void igb_update_phy_info(unsigned long data) +{ + struct igb_adapter *adapter = (struct igb_adapter *) data; + igb_get_phy_info(&adapter->hw); +} + +/** + * igb_has_link - check shared code for link and determine up/down + * @adapter: pointer to driver private info + **/ +bool igb_has_link(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or + * rx sequence error interrupt. get_link_status will stay + * false until the e1000_check_for_link establishes link + * for copper adapters ONLY + */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + if (!hw->mac.get_link_status) + return true; + case e1000_media_type_internal_serdes: + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + break; + default: + case e1000_media_type_unknown: + break; + } + + if (((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) && + (hw->phy.id == I210_I_PHY_ID)) { + if (!netif_carrier_ok(adapter->netdev)) { + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) { + adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + } + } + + return link_active; +} + +static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event) +{ + bool ret = false; + u32 ctrl_ext, thstat; + + /* check for thermal sensor event on i350 copper only */ + if (hw->mac.type == e1000_i350) { + thstat = rd32(E1000_THSTAT); + ctrl_ext = rd32(E1000_CTRL_EXT); + + if ((hw->phy.media_type == e1000_media_type_copper) && + !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) + ret = !!(thstat & event); + } + + return ret; +} + +/** + * igb_check_lvmmc - check for malformed packets received + * and indicated in LVMMC register + * @adapter: pointer to adapter + **/ +static void igb_check_lvmmc(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 lvmmc; + + lvmmc = rd32(E1000_LVMMC); + if (lvmmc) { + if (unlikely(net_ratelimit())) { + netdev_warn(adapter->netdev, + "malformed Tx packet detected and dropped, LVMMC:0x%08x\n", + lvmmc); + } + } +} + +/** + * igb_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void igb_watchdog(unsigned long data) +{ + struct igb_adapter *adapter = (struct igb_adapter *)data; + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); +} + +static void igb_watchdog_task(struct work_struct *work) +{ + struct igb_adapter *adapter = container_of(work, + struct igb_adapter, + watchdog_task); + struct e1000_hw *hw = &adapter->hw; + struct e1000_phy_info *phy = &hw->phy; + struct net_device *netdev = adapter->netdev; + u32 link; + int i; + u32 connsw; + + link = igb_has_link(adapter); + + if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { + if (time_after(jiffies, (adapter->link_check_timeout + HZ))) + adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; + else + link = false; + } + + /* Force link down if we have fiber to swap to */ + if (adapter->flags & IGB_FLAG_MAS_ENABLE) { + if (hw->phy.media_type == e1000_media_type_copper) { + connsw = rd32(E1000_CONNSW); + if (!(connsw & E1000_CONNSW_AUTOSENSE_EN)) + link = 0; + } + } + if (link) { + /* Perform a reset if the media type changed. */ + if (hw->dev_spec._82575.media_changed) { + hw->dev_spec._82575.media_changed = false; + adapter->flags |= IGB_FLAG_MEDIA_RESET; + igb_reset(adapter); + } + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + + hw->mac.ops.get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = rd32(E1000_CTRL); + /* Links status message must follow this format */ + netdev_info(netdev, + "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full" : "Half", + (ctrl & E1000_CTRL_TFCE) && + (ctrl & E1000_CTRL_RFCE) ? "RX/TX" : + (ctrl & E1000_CTRL_RFCE) ? "RX" : + (ctrl & E1000_CTRL_TFCE) ? "TX" : "None"); + + /* disable EEE if enabled */ + if ((adapter->flags & IGB_FLAG_EEE) && + (adapter->link_duplex == HALF_DUPLEX)) { + dev_info(&adapter->pdev->dev, + "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n"); + adapter->hw.dev_spec._82575.eee_disable = true; + adapter->flags &= ~IGB_FLAG_EEE; + } + + /* check if SmartSpeed worked */ + igb_check_downshift(hw); + if (phy->speed_downgraded) + netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n"); + + /* check for thermal sensor event */ + if (igb_thermal_sensor_event(hw, + E1000_THSTAT_LINK_THROTTLE)) + netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n"); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + adapter->tx_timeout_factor = 14; + break; + case SPEED_100: + /* maybe add some timeout factor ? */ + break; + } + + netif_carrier_on(netdev); + + igb_ping_all_vfs(adapter); + igb_check_vf_rate_limit(adapter); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + + /* check for thermal sensor event */ + if (igb_thermal_sensor_event(hw, + E1000_THSTAT_PWR_DOWN)) { + netdev_err(netdev, "The network adapter was stopped because it overheated\n"); + } + + /* Links status message must follow this format */ + netdev_info(netdev, "igb: %s NIC Link is Down\n", + netdev->name); + netif_carrier_off(netdev); + + igb_ping_all_vfs(adapter); + + /* link state has changed, schedule phy info update */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + /* link is down, time to check for alternate media */ + if (adapter->flags & IGB_FLAG_MAS_ENABLE) { + igb_check_swap_media(adapter); + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + pm_schedule_suspend(netdev->dev.parent, + MSEC_PER_SEC * 5); + + /* also check for alternate media here */ + } else if (!netif_carrier_ok(netdev) && + (adapter->flags & IGB_FLAG_MAS_ENABLE)) { + igb_check_swap_media(adapter); + if (adapter->flags & IGB_FLAG_MEDIA_RESET) { + schedule_work(&adapter->reset_task); + /* return immediately */ + return; + } + } + } + + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter, &adapter->stats64); + spin_unlock(&adapter->stats64_lock); + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igb_ring *tx_ring = adapter->tx_ring[i]; + if (!netif_carrier_ok(netdev)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + } + + /* Force detection of hung controller every watchdog period */ + set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->flags & IGB_FLAG_HAS_MSIX) { + u32 eics = 0; + + for (i = 0; i < adapter->num_q_vectors; i++) + eics |= adapter->q_vector[i]->eims_value; + wr32(E1000_EICS, eics); + } else { + wr32(E1000_ICS, E1000_ICS_RXDMT0); + } + + igb_spoof_check(adapter); + igb_ptp_rx_hang(adapter); + + /* Check LVMMC register on i350/i354 only */ + if ((adapter->hw.mac.type == e1000_i350) || + (adapter->hw.mac.type == e1000_i354)) + igb_check_lvmmc(adapter); + + /* Reset the timer */ + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + HZ)); + else + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); + } +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * igb_update_ring_itr - update the dynamic ITR value based on packet size + * @q_vector: pointer to q_vector + * + * Stores a new ITR value based on strictly on packet size. This + * algorithm is less sophisticated than that used in igb_update_itr, + * due to the difficulty of synchronizing statistics across multiple + * receive rings. The divisors and thresholds used by this function + * were determined based on theoretical maximum wire speed and testing + * data, in order to minimize response time while increasing bulk + * throughput. + * This functionality is controlled by ethtool's coalescing settings. + * NOTE: This function is called only when operating in a multiqueue + * receive environment. + **/ +static void igb_update_ring_itr(struct igb_q_vector *q_vector) +{ + int new_val = q_vector->itr_val; + int avg_wire_size = 0; + struct igb_adapter *adapter = q_vector->adapter; + unsigned int packets; + + /* For non-gigabit speeds, just fix the interrupt rate at 4000 + * ints/sec - ITR timer value of 120 ticks. + */ + if (adapter->link_speed != SPEED_1000) { + new_val = IGB_4K_ITR; + goto set_itr_val; + } + + packets = q_vector->rx.total_packets; + if (packets) + avg_wire_size = q_vector->rx.total_bytes / packets; + + packets = q_vector->tx.total_packets; + if (packets) + avg_wire_size = max_t(u32, avg_wire_size, + q_vector->tx.total_bytes / packets); + + /* if avg_wire_size isn't set no work was done */ + if (!avg_wire_size) + goto clear_counts; + + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + avg_wire_size = min(avg_wire_size, 3000); + + /* Give a little boost to mid-size frames */ + if ((avg_wire_size > 300) && (avg_wire_size < 1200)) + new_val = avg_wire_size / 3; + else + new_val = avg_wire_size / 2; + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (new_val < IGB_20K_ITR && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + new_val = IGB_20K_ITR; + +set_itr_val: + if (new_val != q_vector->itr_val) { + q_vector->itr_val = new_val; + q_vector->set_itr = 1; + } +clear_counts: + q_vector->rx.total_bytes = 0; + q_vector->rx.total_packets = 0; + q_vector->tx.total_bytes = 0; + q_vector->tx.total_packets = 0; +} + +/** + * igb_update_itr - update the dynamic ITR value based on statistics + * @q_vector: pointer to q_vector + * @ring_container: ring info to update the itr for + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * This functionality is controlled by ethtool's coalescing settings. + * NOTE: These calculations are only valid when operating in a single- + * queue environment. + **/ +static void igb_update_itr(struct igb_q_vector *q_vector, + struct igb_ring_container *ring_container) +{ + unsigned int packets = ring_container->total_packets; + unsigned int bytes = ring_container->total_bytes; + u8 itrval = ring_container->itr; + + /* no packets, exit with status unchanged */ + if (packets == 0) + return; + + switch (itrval) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes/packets > 8000) + itrval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + itrval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes/packets > 8000) + itrval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + itrval = bulk_latency; + else if ((packets > 35)) + itrval = lowest_latency; + } else if (bytes/packets > 2000) { + itrval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + itrval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + itrval = low_latency; + } else if (bytes < 1500) { + itrval = low_latency; + } + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itrval; +} + +static void igb_set_itr(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + u32 new_itr = q_vector->itr_val; + u8 current_itr = 0; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (adapter->link_speed != SPEED_1000) { + current_itr = 0; + new_itr = IGB_4K_ITR; + goto set_itr_now; + } + + igb_update_itr(q_vector, &q_vector->tx); + igb_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (current_itr == lowest_latency && + ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || + (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) + current_itr = low_latency; + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = IGB_70K_ITR; /* 70,000 ints/sec */ + break; + case low_latency: + new_itr = IGB_20K_ITR; /* 20,000 ints/sec */ + break; + case bulk_latency: + new_itr = IGB_4K_ITR; /* 4,000 ints/sec */ + break; + default: + break; + } + +set_itr_now: + if (new_itr != q_vector->itr_val) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > q_vector->itr_val ? + max((new_itr * q_vector->itr_val) / + (new_itr + (q_vector->itr_val >> 2)), + new_itr) : new_itr; + /* Don't write the value here; it resets the adapter's + * internal timer, and causes us to delay far longer than + * we should between interrupts. Instead, we write the ITR + * value at the beginning of the next interrupt so the timing + * ends up being correct. + */ + q_vector->itr_val = new_itr; + q_vector->set_itr = 1; + } +} + +static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, + u32 type_tucmd, u32 mss_l4len_idx) +{ + struct e1000_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = IGB_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT; + + /* For 82575, context index must be unique per ring. */ + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + mss_l4len_idx |= tx_ring->reg_idx << 4; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = 0; + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} + +static int igb_tso(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + u8 *hdr_len) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens, type_tucmd; + u32 mss_l4len_idx, l4len; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; + + if (first->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; + first->tx_flags |= IGB_TX_FLAGS_TSO | + IGB_TX_FLAGS_CSUM | + IGB_TX_FLAGS_IPV4; + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + first->tx_flags |= IGB_TX_FLAGS_TSO | + IGB_TX_FLAGS_CSUM; + } + + /* compute header lengths */ + l4len = tcp_hdrlen(skb); + *hdr_len = skb_transport_offset(skb) + l4len; + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* MSS L4LEN IDX */ + mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; + + /* VLAN MACLEN IPLEN */ + vlan_macip_lens = skb_network_header_len(skb); + vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; + + igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); + + return 1; +} + +static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 mss_l4len_idx = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { + if (!(first->tx_flags & IGB_TX_FLAGS_VLAN)) + return; + } else { + u8 l4_hdr = 0; + + switch (first->protocol) { + case htons(ETH_P_IP): + vlan_macip_lens |= skb_network_header_len(skb); + type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; + l4_hdr = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + vlan_macip_lens |= skb_network_header_len(skb); + l4_hdr = ipv6_hdr(skb)->nexthdr; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but proto=%x!\n", + first->protocol); + } + break; + } + + switch (l4_hdr) { + case IPPROTO_TCP: + type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + mss_l4len_idx = tcp_hdrlen(skb) << + E1000_ADVTXD_L4LEN_SHIFT; + break; + case IPPROTO_SCTP: + type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; + mss_l4len_idx = sizeof(struct sctphdr) << + E1000_ADVTXD_L4LEN_SHIFT; + break; + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + E1000_ADVTXD_L4LEN_SHIFT; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but l4 proto=%x!\n", + l4_hdr); + } + break; + } + + /* update TX checksum flag */ + first->tx_flags |= IGB_TX_FLAGS_CSUM; + } + + vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; + + igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); +} + +#define IGB_SET_FLAG(_input, _flag, _result) \ + ((_flag <= _result) ? \ + ((u32)(_input & _flag) * (_result / _flag)) : \ + ((u32)(_input & _flag) / (_flag / _result))) + +static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = E1000_ADVTXD_DTYP_DATA | + E1000_ADVTXD_DCMD_DEXT | + E1000_ADVTXD_DCMD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN, + (E1000_ADVTXD_DCMD_VLE)); + + /* set segmentation bits for TSO */ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO, + (E1000_ADVTXD_DCMD_TSE)); + + /* set timestamp bit if present */ + cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP, + (E1000_ADVTXD_MAC_TSTAMP)); + + /* insert frame checksum */ + cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS); + + return cmd_type; +} + +static void igb_tx_olinfo_status(struct igb_ring *tx_ring, + union e1000_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT; + + /* 82575 requires a unique index per ring */ + if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) + olinfo_status |= tx_ring->reg_idx << 4; + + /* insert L4 checksum */ + olinfo_status |= IGB_SET_FLAG(tx_flags, + IGB_TX_FLAGS_CSUM, + (E1000_TXD_POPTS_TXSM << 8)); + + /* insert IPv4 checksum */ + olinfo_status |= IGB_SET_FLAG(tx_flags, + IGB_TX_FLAGS_IPV4, + (E1000_TXD_POPTS_IXSM << 8)); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) +{ + struct net_device *netdev = tx_ring->netdev; + + netif_stop_subqueue(netdev, tx_ring->queue_index); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (igb_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + netif_wake_subqueue(netdev, tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp2); + tx_ring->tx_stats.restart_queue2++; + u64_stats_update_end(&tx_ring->tx_syncp2); + + return 0; +} + +static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) +{ + if (igb_desc_unused(tx_ring) >= size) + return 0; + return __igb_maybe_stop_tx(tx_ring, size); +} + +static void igb_tx_map(struct igb_ring *tx_ring, + struct igb_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct igb_tx_buffer *tx_buffer; + union e1000_adv_tx_desc *tx_desc; + struct skb_frag_struct *frag; + dma_addr_t dma; + unsigned int data_len, size; + u32 tx_flags = first->tx_flags; + u32 cmd_type = igb_tx_cmd_type(skb, tx_flags); + u16 i = tx_ring->next_to_use; + + tx_desc = IGB_TX_DESC(tx_ring, i); + + igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > IGB_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGB_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += IGB_MAX_DATA_PER_TXD; + size -= IGB_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IGB_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, + size, DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | IGB_TXD_DCMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + /* Make sure there is space in the ring for the next send. */ + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + writel(i, tx_ring->tail); + + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); + } + return; + +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + igb_unmap_and_free_tx_resource(tx_ring, tx_buffer); + if (tx_buffer == first) + break; + if (i == 0) + i = tx_ring->count; + i--; + } + + tx_ring->next_to_use = i; +} + +netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, + struct igb_ring *tx_ring) +{ + struct igb_tx_buffer *first; + int tso; + u32 tx_flags = 0; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = vlan_get_protocol(skb); + u8 hdr_len = 0; + + /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) { + unsigned short f; + + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + } else { + count += skb_shinfo(skb)->nr_frags; + } + + if (igb_maybe_stop_tx(tx_ring, count + 3)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); + + if (!test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IGB_TX_FLAGS_TSTAMP; + + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + if (adapter->hw.mac.type == e1000_82576) + schedule_work(&adapter->ptp_tx_work); + } + } + + skb_tx_timestamp(skb); + + if (skb_vlan_tag_present(skb)) { + tx_flags |= IGB_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + + tso = igb_tso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + igb_tx_csum(tx_ring, first); + + igb_tx_map(tx_ring, first, hdr_len); + + return NETDEV_TX_OK; + +out_drop: + igb_unmap_and_free_tx_resource(tx_ring, first); + + return NETDEV_TX_OK; +} + +static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter, + struct sk_buff *skb) +{ + unsigned int r_idx = skb->queue_mapping; + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* The minimum packet size with TCTL.PSP set is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + + return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); +} + +/** + * igb_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void igb_tx_timeout(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + + if (hw->mac.type >= e1000_82580) + hw->dev_spec._82575.global_device_reset = true; + + schedule_work(&adapter->reset_task); + wr32(E1000_EICS, + (adapter->eims_enable_mask & ~adapter->eims_other)); +} + +static void igb_reset_task(struct work_struct *work) +{ + struct igb_adapter *adapter; + adapter = container_of(work, struct igb_adapter, reset_task); + + igb_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); + igb_reinit_locked(adapter); +} + +/** + * igb_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer + **/ +static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + spin_lock(&adapter->stats64_lock); + igb_update_stats(adapter, &adapter->stats64); + memcpy(stats, &adapter->stats64, sizeof(*stats)); + spin_unlock(&adapter->stats64_lock); + + return stats; +} + +/** + * igb_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int igb_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { + dev_err(&pdev->dev, "Invalid MTU setting\n"); + return -EINVAL; + } + +#define MAX_STD_JUMBO_FRAME_SIZE 9238 + if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { + dev_err(&pdev->dev, "MTU > 9216 not supported.\n"); + return -EINVAL; + } + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; + + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + /* igb_down has a dependency on max_frame_size */ + adapter->max_frame_size = max_frame; + + if (netif_running(netdev)) + igb_down(adapter); + + dev_info(&pdev->dev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + igb_up(adapter); + else + igb_reset(adapter); + + clear_bit(__IGB_RESETTING, &adapter->state); + + return 0; +} + +/** + * igb_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +void igb_update_stats(struct igb_adapter *adapter, + struct rtnl_link_stats64 *net_stats) +{ + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + u32 reg, mpc; + int i; + u64 bytes, packets; + unsigned int start; + u64 _bytes, _packets; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + bytes = 0; + packets = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct igb_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(E1000_RQDPC(i)); + if (hw->mac.type >= e1000_i210) + wr32(E1000_RQDPC(i), 0); + + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); + _bytes = ring->rx_stats.bytes; + _packets = ring->rx_stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + + net_stats->rx_bytes = bytes; + net_stats->rx_packets = packets; + + bytes = 0; + packets = 0; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igb_ring *ring = adapter->tx_ring[i]; + do { + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); + _bytes = ring->tx_stats.bytes; + _packets = ring->tx_stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); + bytes += _bytes; + packets += _packets; + } + net_stats->tx_bytes = bytes; + net_stats->tx_packets = packets; + rcu_read_unlock(); + + /* read stats registers */ + adapter->stats.crcerrs += rd32(E1000_CRCERRS); + adapter->stats.gprc += rd32(E1000_GPRC); + adapter->stats.gorc += rd32(E1000_GORCL); + rd32(E1000_GORCH); /* clear GORCL */ + adapter->stats.bprc += rd32(E1000_BPRC); + adapter->stats.mprc += rd32(E1000_MPRC); + adapter->stats.roc += rd32(E1000_ROC); + + adapter->stats.prc64 += rd32(E1000_PRC64); + adapter->stats.prc127 += rd32(E1000_PRC127); + adapter->stats.prc255 += rd32(E1000_PRC255); + adapter->stats.prc511 += rd32(E1000_PRC511); + adapter->stats.prc1023 += rd32(E1000_PRC1023); + adapter->stats.prc1522 += rd32(E1000_PRC1522); + adapter->stats.symerrs += rd32(E1000_SYMERRS); + adapter->stats.sec += rd32(E1000_SEC); + + mpc = rd32(E1000_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; + adapter->stats.scc += rd32(E1000_SCC); + adapter->stats.ecol += rd32(E1000_ECOL); + adapter->stats.mcc += rd32(E1000_MCC); + adapter->stats.latecol += rd32(E1000_LATECOL); + adapter->stats.dc += rd32(E1000_DC); + adapter->stats.rlec += rd32(E1000_RLEC); + adapter->stats.xonrxc += rd32(E1000_XONRXC); + adapter->stats.xontxc += rd32(E1000_XONTXC); + adapter->stats.xoffrxc += rd32(E1000_XOFFRXC); + adapter->stats.xofftxc += rd32(E1000_XOFFTXC); + adapter->stats.fcruc += rd32(E1000_FCRUC); + adapter->stats.gptc += rd32(E1000_GPTC); + adapter->stats.gotc += rd32(E1000_GOTCL); + rd32(E1000_GOTCH); /* clear GOTCL */ + adapter->stats.rnbc += rd32(E1000_RNBC); + adapter->stats.ruc += rd32(E1000_RUC); + adapter->stats.rfc += rd32(E1000_RFC); + adapter->stats.rjc += rd32(E1000_RJC); + adapter->stats.tor += rd32(E1000_TORH); + adapter->stats.tot += rd32(E1000_TOTH); + adapter->stats.tpr += rd32(E1000_TPR); + + adapter->stats.ptc64 += rd32(E1000_PTC64); + adapter->stats.ptc127 += rd32(E1000_PTC127); + adapter->stats.ptc255 += rd32(E1000_PTC255); + adapter->stats.ptc511 += rd32(E1000_PTC511); + adapter->stats.ptc1023 += rd32(E1000_PTC1023); + adapter->stats.ptc1522 += rd32(E1000_PTC1522); + + adapter->stats.mptc += rd32(E1000_MPTC); + adapter->stats.bptc += rd32(E1000_BPTC); + + adapter->stats.tpt += rd32(E1000_TPT); + adapter->stats.colc += rd32(E1000_COLC); + + adapter->stats.algnerrc += rd32(E1000_ALGNERRC); + /* read internal phy specific stats */ + reg = rd32(E1000_CTRL_EXT); + if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { + adapter->stats.rxerrc += rd32(E1000_RXERRC); + + /* this stat has invalid values on i210/i211 */ + if ((hw->mac.type != e1000_i210) && + (hw->mac.type != e1000_i211)) + adapter->stats.tncrs += rd32(E1000_TNCRS); + } + + adapter->stats.tsctc += rd32(E1000_TSCTC); + adapter->stats.tsctfc += rd32(E1000_TSCTFC); + + adapter->stats.iac += rd32(E1000_IAC); + adapter->stats.icrxoc += rd32(E1000_ICRXOC); + adapter->stats.icrxptc += rd32(E1000_ICRXPTC); + adapter->stats.icrxatc += rd32(E1000_ICRXATC); + adapter->stats.ictxptc += rd32(E1000_ICTXPTC); + adapter->stats.ictxatc += rd32(E1000_ICTXATC); + adapter->stats.ictxqec += rd32(E1000_ICTXQEC); + adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC); + adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); + + /* Fill out the OS statistics structure */ + net_stats->multicast = adapter->stats.mprc; + net_stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + net_stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + net_stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + net_stats->rx_crc_errors = adapter->stats.crcerrs; + net_stats->rx_frame_errors = adapter->stats.algnerrc; + net_stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + net_stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + net_stats->tx_aborted_errors = adapter->stats.ecol; + net_stats->tx_window_errors = adapter->stats.latecol; + net_stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Management Stats */ + adapter->stats.mgptc += rd32(E1000_MGTPTC); + adapter->stats.mgprc += rd32(E1000_MGTPRC); + adapter->stats.mgpdc += rd32(E1000_MGTPDC); + + /* OS2BMC Stats */ + reg = rd32(E1000_MANC); + if (reg & E1000_MANC_EN_BMC2OS) { + adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); + adapter->stats.o2bspc += rd32(E1000_O2BSPC); + adapter->stats.b2ospc += rd32(E1000_B2OSPC); + adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); + } +} + +static void igb_tsync_interrupt(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct ptp_clock_event event; + struct timespec ts; + u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR); + + if (tsicr & TSINTR_SYS_WRAP) { + event.type = PTP_CLOCK_PPS; + if (adapter->ptp_caps.pps) + ptp_clock_event(adapter->ptp_clock, &event); + else + dev_err(&adapter->pdev->dev, "unexpected SYS WRAP"); + ack |= TSINTR_SYS_WRAP; + } + + if (tsicr & E1000_TSICR_TXTS) { + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + ack |= E1000_TSICR_TXTS; + } + + if (tsicr & TSINTR_TT0) { + spin_lock(&adapter->tmreg_lock); + ts = timespec_add(adapter->perout[0].start, + adapter->perout[0].period); + wr32(E1000_TRGTTIML0, ts.tv_nsec); + wr32(E1000_TRGTTIMH0, ts.tv_sec); + tsauxc = rd32(E1000_TSAUXC); + tsauxc |= TSAUXC_EN_TT0; + wr32(E1000_TSAUXC, tsauxc); + adapter->perout[0].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= TSINTR_TT0; + } + + if (tsicr & TSINTR_TT1) { + spin_lock(&adapter->tmreg_lock); + ts = timespec_add(adapter->perout[1].start, + adapter->perout[1].period); + wr32(E1000_TRGTTIML1, ts.tv_nsec); + wr32(E1000_TRGTTIMH1, ts.tv_sec); + tsauxc = rd32(E1000_TSAUXC); + tsauxc |= TSAUXC_EN_TT1; + wr32(E1000_TSAUXC, tsauxc); + adapter->perout[1].start = ts; + spin_unlock(&adapter->tmreg_lock); + ack |= TSINTR_TT1; + } + + if (tsicr & TSINTR_AUTT0) { + nsec = rd32(E1000_AUXSTMPL0); + sec = rd32(E1000_AUXSTMPH0); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = sec * 1000000000ULL + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= TSINTR_AUTT0; + } + + if (tsicr & TSINTR_AUTT1) { + nsec = rd32(E1000_AUXSTMPL1); + sec = rd32(E1000_AUXSTMPH1); + event.type = PTP_CLOCK_EXTTS; + event.index = 1; + event.timestamp = sec * 1000000000ULL + nsec; + ptp_clock_event(adapter->ptp_clock, &event); + ack |= TSINTR_AUTT1; + } + + /* acknowledge the interrupts */ + wr32(E1000_TSICR, ack); +} + +static irqreturn_t igb_msix_other(int irq, void *data) +{ + struct igb_adapter *adapter = data; + struct e1000_hw *hw = &adapter->hw; + u32 icr = rd32(E1000_ICR); + /* reading ICR causes bit 31 of EICR to be cleared */ + + if (icr & E1000_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & E1000_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + /* The DMA Out of Sync is also indication of a spoof event + * in IOV mode. Check the Wrong VM Behavior register to + * see if it is really a spoof event. + */ + igb_check_wvbr(adapter); + } + + /* Check for a mailbox event */ + if (icr & E1000_ICR_VMMB) + igb_msg_task(adapter); + + if (icr & E1000_ICR_LSC) { + hw->mac.get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & E1000_ICR_TS) + igb_tsync_interrupt(adapter); + + wr32(E1000_EIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +static void igb_write_itr(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + u32 itr_val = q_vector->itr_val & 0x7FFC; + + if (!q_vector->set_itr) + return; + + if (!itr_val) + itr_val = 0x4; + + if (adapter->hw.mac.type == e1000_82575) + itr_val |= itr_val << 16; + else + itr_val |= E1000_EITR_CNT_IGNR; + + writel(itr_val, q_vector->itr_register); + q_vector->set_itr = 0; +} + +static irqreturn_t igb_msix_ring(int irq, void *data) +{ + struct igb_q_vector *q_vector = data; + + /* Write the ITR value calculated from the previous interrupt. */ + igb_write_itr(q_vector); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_IGB_DCA +static void igb_update_tx_dca(struct igb_adapter *adapter, + struct igb_ring *tx_ring, + int cpu) +{ + struct e1000_hw *hw = &adapter->hw; + u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); + + if (hw->mac.type != e1000_82575) + txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT; + + /* We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN | + E1000_DCA_TXCTRL_DATA_RRO_EN | + E1000_DCA_TXCTRL_DESC_DCA_EN; + + wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl); +} + +static void igb_update_rx_dca(struct igb_adapter *adapter, + struct igb_ring *rx_ring, + int cpu) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu); + + if (hw->mac.type != e1000_82575) + rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT; + + /* We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN | + E1000_DCA_RXCTRL_DESC_DCA_EN; + + wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl); +} + +static void igb_update_dca(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + int cpu = get_cpu(); + + if (q_vector->cpu == cpu) + goto out_no_update; + + if (q_vector->tx.ring) + igb_update_tx_dca(adapter, q_vector->tx.ring, cpu); + + if (q_vector->rx.ring) + igb_update_rx_dca(adapter, q_vector->rx.ring, cpu); + + q_vector->cpu = cpu; +out_no_update: + put_cpu(); +} + +static void igb_setup_dca(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + if (!(adapter->flags & IGB_FLAG_DCA_ENABLED)) + return; + + /* Always use CB2 mode, difference is masked in the CB driver. */ + wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); + + for (i = 0; i < adapter->num_q_vectors; i++) { + adapter->q_vector[i]->cpu = -1; + igb_update_dca(adapter->q_vector[i]); + } +} + +static int __igb_notify_dca(struct device *dev, void *data) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + unsigned long event = *(unsigned long *)data; + + switch (event) { + case DCA_PROVIDER_ADD: + /* if already enabled, don't do it again */ + if (adapter->flags & IGB_FLAG_DCA_ENABLED) + break; + if (dca_add_requester(dev) == 0) { + adapter->flags |= IGB_FLAG_DCA_ENABLED; + dev_info(&pdev->dev, "DCA enabled\n"); + igb_setup_dca(adapter); + break; + } + /* Fall Through since DCA is disabled. */ + case DCA_PROVIDER_REMOVE: + if (adapter->flags & IGB_FLAG_DCA_ENABLED) { + /* without this a class_device is left + * hanging around in the sysfs model + */ + dca_remove_requester(dev); + dev_info(&pdev->dev, "DCA disabled\n"); + adapter->flags &= ~IGB_FLAG_DCA_ENABLED; + wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); + } + break; + } + + return 0; +} + +static int igb_notify_dca(struct notifier_block *nb, unsigned long event, + void *p) +{ + int ret_val; + + ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, + __igb_notify_dca); + + return ret_val ? NOTIFY_BAD : NOTIFY_DONE; +} +#endif /* CONFIG_IGB_DCA */ + +#ifdef CONFIG_PCI_IOV +static int igb_vf_configure(struct igb_adapter *adapter, int vf) +{ + unsigned char mac_addr[ETH_ALEN]; + + eth_zero_addr(mac_addr); + igb_set_vf_mac(adapter, vf, mac_addr); + + /* By default spoof check is enabled for all VFs */ + adapter->vf_data[vf].spoofchk_enabled = true; + + return 0; +} + +#endif +static void igb_ping_all_vfs(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ping; + int i; + + for (i = 0 ; i < adapter->vfs_allocated_count; i++) { + ping = E1000_PF_CONTROL_MSG; + if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) + ping |= E1000_VT_MSGTYPE_CTS; + igb_write_mbx(hw, &ping, 1, i); + } +} + +static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vmolr = rd32(E1000_VMOLR(vf)); + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + + vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | + IGB_VF_FLAG_MULTI_PROMISC); + vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); + + if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { + vmolr |= E1000_VMOLR_MPME; + vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; + *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; + } else { + /* if we have hashes and we are clearing a multicast promisc + * flag we need to write the hashes to the MTA as this step + * was previously skipped + */ + if (vf_data->num_vf_mc_hashes > 30) { + vmolr |= E1000_VMOLR_MPME; + } else if (vf_data->num_vf_mc_hashes) { + int j; + + vmolr |= E1000_VMOLR_ROMPE; + for (j = 0; j < vf_data->num_vf_mc_hashes; j++) + igb_mta_set(hw, vf_data->vf_mc_hashes[j]); + } + } + + wr32(E1000_VMOLR(vf), vmolr); + + /* there are flags left unprocessed, likely not supported */ + if (*msgbuf & E1000_VT_MSGINFO_MASK) + return -EINVAL; + + return 0; +} + +static int igb_set_vf_multicasts(struct igb_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; + u16 *hash_list = (u16 *)&msgbuf[1]; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + int i; + + /* salt away the number of multicast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vf_data->num_vf_mc_hashes = n; + + /* only up to 30 hash values supported */ + if (n > 30) + n = 30; + + /* store the hashes for later use */ + for (i = 0; i < n; i++) + vf_data->vf_mc_hashes[i] = hash_list[i]; + + /* Flush and reset the mta with the new values */ + igb_set_rx_mode(adapter->netdev); + + return 0; +} + +static void igb_restore_vf_multicasts(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct vf_data_storage *vf_data; + int i, j; + + for (i = 0; i < adapter->vfs_allocated_count; i++) { + u32 vmolr = rd32(E1000_VMOLR(i)); + + vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); + + vf_data = &adapter->vf_data[i]; + + if ((vf_data->num_vf_mc_hashes > 30) || + (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) { + vmolr |= E1000_VMOLR_MPME; + } else if (vf_data->num_vf_mc_hashes) { + vmolr |= E1000_VMOLR_ROMPE; + for (j = 0; j < vf_data->num_vf_mc_hashes; j++) + igb_mta_set(hw, vf_data->vf_mc_hashes[j]); + } + wr32(E1000_VMOLR(i), vmolr); + } +} + +static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pool_mask, reg, vid; + int i; + + pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); + + /* Find the vlan filter for this id */ + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { + reg = rd32(E1000_VLVF(i)); + + /* remove the vf from the pool */ + reg &= ~pool_mask; + + /* if pool is empty then remove entry from vfta */ + if (!(reg & E1000_VLVF_POOLSEL_MASK) && + (reg & E1000_VLVF_VLANID_ENABLE)) { + reg = 0; + vid = reg & E1000_VLVF_VLANID_MASK; + igb_vfta_set(hw, vid, false); + } + + wr32(E1000_VLVF(i), reg); + } + + adapter->vf_data[vf].vlans_enabled = 0; +} + +static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg, i; + + /* The vlvf table only exists on 82576 hardware and newer */ + if (hw->mac.type < e1000_82576) + return -1; + + /* we only need to do this if VMDq is enabled */ + if (!adapter->vfs_allocated_count) + return -1; + + /* Find the vlan filter for this id */ + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { + reg = rd32(E1000_VLVF(i)); + if ((reg & E1000_VLVF_VLANID_ENABLE) && + vid == (reg & E1000_VLVF_VLANID_MASK)) + break; + } + + if (add) { + if (i == E1000_VLVF_ARRAY_SIZE) { + /* Did not find a matching VLAN ID entry that was + * enabled. Search for a free filter entry, i.e. + * one without the enable bit set + */ + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { + reg = rd32(E1000_VLVF(i)); + if (!(reg & E1000_VLVF_VLANID_ENABLE)) + break; + } + } + if (i < E1000_VLVF_ARRAY_SIZE) { + /* Found an enabled/available entry */ + reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); + + /* if !enabled we need to set this up in vfta */ + if (!(reg & E1000_VLVF_VLANID_ENABLE)) { + /* add VID to filter table */ + igb_vfta_set(hw, vid, true); + reg |= E1000_VLVF_VLANID_ENABLE; + } + reg &= ~E1000_VLVF_VLANID_MASK; + reg |= vid; + wr32(E1000_VLVF(i), reg); + + /* do not modify RLPML for PF devices */ + if (vf >= adapter->vfs_allocated_count) + return 0; + + if (!adapter->vf_data[vf].vlans_enabled) { + u32 size; + + reg = rd32(E1000_VMOLR(vf)); + size = reg & E1000_VMOLR_RLPML_MASK; + size += 4; + reg &= ~E1000_VMOLR_RLPML_MASK; + reg |= size; + wr32(E1000_VMOLR(vf), reg); + } + + adapter->vf_data[vf].vlans_enabled++; + } + } else { + if (i < E1000_VLVF_ARRAY_SIZE) { + /* remove vf from the pool */ + reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf)); + /* if pool is empty then remove entry from vfta */ + if (!(reg & E1000_VLVF_POOLSEL_MASK)) { + reg = 0; + igb_vfta_set(hw, vid, false); + } + wr32(E1000_VLVF(i), reg); + + /* do not modify RLPML for PF devices */ + if (vf >= adapter->vfs_allocated_count) + return 0; + + adapter->vf_data[vf].vlans_enabled--; + if (!adapter->vf_data[vf].vlans_enabled) { + u32 size; + + reg = rd32(E1000_VMOLR(vf)); + size = reg & E1000_VMOLR_RLPML_MASK; + size -= 4; + reg &= ~E1000_VMOLR_RLPML_MASK; + reg |= size; + wr32(E1000_VMOLR(vf), reg); + } + } + } + return 0; +} + +static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + + if (vid) + wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT)); + else + wr32(E1000_VMVIR(vf), 0); +} + +static int igb_ndo_set_vf_vlan(struct net_device *netdev, + int vf, u16 vlan, u8 qos) +{ + int err = 0; + struct igb_adapter *adapter = netdev_priv(netdev); + + if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) + return -EINVAL; + if (vlan || qos) { + err = igb_vlvf_set(adapter, vlan, !!vlan, vf); + if (err) + goto out; + igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); + igb_set_vmolr(adapter, vf, !vlan); + adapter->vf_data[vf].pf_vlan = vlan; + adapter->vf_data[vf].pf_qos = qos; + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF VLAN has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); + } + } else { + igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, + false, vf); + igb_set_vmvir(adapter, vlan, vf); + igb_set_vmolr(adapter, vf, true); + adapter->vf_data[vf].pf_vlan = 0; + adapter->vf_data[vf].pf_qos = 0; + } +out: + return err; +} + +static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + u32 reg; + + /* Find the vlan filter for this id */ + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { + reg = rd32(E1000_VLVF(i)); + if ((reg & E1000_VLVF_VLANID_ENABLE) && + vid == (reg & E1000_VLVF_VLANID_MASK)) + break; + } + + if (i >= E1000_VLVF_ARRAY_SIZE) + i = -1; + + return i; +} + +static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); + int err = 0; + + /* If in promiscuous mode we need to make sure the PF also has + * the VLAN filter set. + */ + if (add && (adapter->netdev->flags & IFF_PROMISC)) + err = igb_vlvf_set(adapter, vid, add, + adapter->vfs_allocated_count); + if (err) + goto out; + + err = igb_vlvf_set(adapter, vid, add, vf); + + if (err) + goto out; + + /* Go through all the checks to see if the VLAN filter should + * be wiped completely. + */ + if (!add && (adapter->netdev->flags & IFF_PROMISC)) { + u32 vlvf, bits; + int regndx = igb_find_vlvf_entry(adapter, vid); + + if (regndx < 0) + goto out; + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + vlvf = bits = rd32(E1000_VLVF(regndx)); + bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT + + adapter->vfs_allocated_count); + /* If the filter was removed then ensure PF pool bit + * is cleared if the PF only added itself to the pool + * because the PF is in promiscuous mode. + */ + if ((vlvf & VLAN_VID_MASK) == vid && + !test_bit(vid, adapter->active_vlans) && + !bits) + igb_vlvf_set(adapter, vid, add, + adapter->vfs_allocated_count); + } + +out: + return err; +} + +static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) +{ + /* clear flags - except flag that indicates PF has set the MAC */ + adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC; + adapter->vf_data[vf].last_nack = jiffies; + + /* reset offloads to defaults */ + igb_set_vmolr(adapter, vf, true); + + /* reset vlans for device */ + igb_clear_vf_vfta(adapter, vf); + if (adapter->vf_data[vf].pf_vlan) + igb_ndo_set_vf_vlan(adapter->netdev, vf, + adapter->vf_data[vf].pf_vlan, + adapter->vf_data[vf].pf_qos); + else + igb_clear_vf_vfta(adapter, vf); + + /* reset multicast table array for vf */ + adapter->vf_data[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + igb_set_rx_mode(adapter->netdev); +} + +static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) +{ + unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; + + /* clear mac address as we were hotplug removed/added */ + if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) + eth_zero_addr(vf_mac); + + /* process remaining reset events */ + igb_vf_reset(adapter, vf); +} + +static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; + int rar_entry = hw->mac.rar_entry_count - (vf + 1); + u32 reg, msgbuf[3]; + u8 *addr = (u8 *)(&msgbuf[1]); + + /* process all the same items cleared in a function level reset */ + igb_vf_reset(adapter, vf); + + /* set vf mac address */ + igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf); + + /* enable transmit and receive for vf */ + reg = rd32(E1000_VFTE); + wr32(E1000_VFTE, reg | (1 << vf)); + reg = rd32(E1000_VFRE); + wr32(E1000_VFRE, reg | (1 << vf)); + + adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; + + /* reply to reset with ack and vf mac address */ + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK; + } + igb_write_mbx(hw, msgbuf, 3, vf); +} + +static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) +{ + /* The VF MAC Address is stored in a packed array of bytes + * starting at the second 32 bit word of the msg array + */ + unsigned char *addr = (char *)&msg[1]; + int err = -1; + + if (is_valid_ether_addr(addr)) + err = igb_set_vf_mac(adapter, vf, addr); + + return err; +} + +static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) +{ + struct e1000_hw *hw = &adapter->hw; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + u32 msg = E1000_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!(vf_data->flags & IGB_VF_FLAG_CTS) && + time_after(jiffies, vf_data->last_nack + (2 * HZ))) { + igb_write_mbx(hw, &msg, 1, vf); + vf_data->last_nack = jiffies; + } +} + +static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) +{ + struct pci_dev *pdev = adapter->pdev; + u32 msgbuf[E1000_VFMAILBOX_SIZE]; + struct e1000_hw *hw = &adapter->hw; + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + s32 retval; + + retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf); + + if (retval) { + /* if receive failed revoke VF CTS stats and restart init */ + dev_err(&pdev->dev, "Error receiving message from VF\n"); + vf_data->flags &= ~IGB_VF_FLAG_CTS; + if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) + return; + goto out; + } + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) + return; + + /* until the vf completes a reset it should not be + * allowed to start any configuration. + */ + if (msgbuf[0] == E1000_VF_RESET) { + igb_vf_reset_msg(adapter, vf); + return; + } + + if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { + if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) + return; + retval = -1; + goto out; + } + + switch ((msgbuf[0] & 0xFFFF)) { + case E1000_VF_SET_MAC_ADDR: + retval = -EINVAL; + if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC)) + retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); + else + dev_warn(&pdev->dev, + "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n", + vf); + break; + case E1000_VF_SET_PROMISC: + retval = igb_set_vf_promisc(adapter, msgbuf, vf); + break; + case E1000_VF_SET_MULTICAST: + retval = igb_set_vf_multicasts(adapter, msgbuf, vf); + break; + case E1000_VF_SET_LPE: + retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf); + break; + case E1000_VF_SET_VLAN: + retval = -1; + if (vf_data->pf_vlan) + dev_warn(&pdev->dev, + "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n", + vf); + else + retval = igb_set_vf_vlan(adapter, msgbuf, vf); + break; + default: + dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); + retval = -1; + break; + } + + msgbuf[0] |= E1000_VT_MSGTYPE_CTS; +out: + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= E1000_VT_MSGTYPE_NACK; + else + msgbuf[0] |= E1000_VT_MSGTYPE_ACK; + + igb_write_mbx(hw, msgbuf, 1, vf); +} + +static void igb_msg_task(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vf; + + for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { + /* process any reset requests */ + if (!igb_check_for_rst(hw, vf)) + igb_vf_reset_event(adapter, vf); + + /* process any messages pending */ + if (!igb_check_for_msg(hw, vf)) + igb_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ + if (!igb_check_for_ack(hw, vf)) + igb_rcv_ack_from_vf(adapter, vf); + } +} + +/** + * igb_set_uta - Set unicast filter table address + * @adapter: board private structure + * + * The unicast table address is a register array of 32-bit registers. + * The table is meant to be used in a way similar to how the MTA is used + * however due to certain limitations in the hardware it is necessary to + * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous + * enable bit to allow vlan tag stripping when promiscuous mode is enabled + **/ +static void igb_set_uta(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int i; + + /* The UTA table only exists on 82576 hardware and newer */ + if (hw->mac.type < e1000_82576) + return; + + /* we only need to do this if VMDq is enabled */ + if (!adapter->vfs_allocated_count) + return; + + for (i = 0; i < hw->mac.uta_reg_count; i++) + array_wr32(E1000_UTA, i, ~0); +} + +/** + * igb_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t igb_intr_msi(int irq, void *data) +{ + struct igb_adapter *adapter = data; + struct igb_q_vector *q_vector = adapter->q_vector[0]; + struct e1000_hw *hw = &adapter->hw; + /* read ICR disables interrupts using IAM */ + u32 icr = rd32(E1000_ICR); + + igb_write_itr(q_vector); + + if (icr & E1000_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & E1000_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { + hw->mac.get_link_status = 1; + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & E1000_ICR_TS) + igb_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * igb_intr - Legacy Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t igb_intr(int irq, void *data) +{ + struct igb_adapter *adapter = data; + struct igb_q_vector *q_vector = adapter->q_vector[0]; + struct e1000_hw *hw = &adapter->hw; + /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No + * need for the IMC write + */ + u32 icr = rd32(E1000_ICR); + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & E1000_ICR_INT_ASSERTED)) + return IRQ_NONE; + + igb_write_itr(q_vector); + + if (icr & E1000_ICR_DRSTA) + schedule_work(&adapter->reset_task); + + if (icr & E1000_ICR_DOUTSYNC) { + /* HW is reporting DMA is out of sync */ + adapter->stats.doosync++; + } + + if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { + hw->mac.get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__IGB_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (icr & E1000_ICR_TS) + igb_tsync_interrupt(adapter); + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static void igb_ring_irq_enable(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + + if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || + (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { + if ((adapter->num_q_vectors == 1) && !adapter->vf_data) + igb_set_itr(q_vector); + else + igb_update_ring_itr(q_vector); + } + + if (!test_bit(__IGB_DOWN, &adapter->state)) { + if (adapter->flags & IGB_FLAG_HAS_MSIX) + wr32(E1000_EIMS, q_vector->eims_value); + else + igb_irq_enable(adapter); + } +} + +/** + * igb_poll - NAPI Rx polling callback + * @napi: napi polling structure + * @budget: count of how many packets we should handle + **/ +static int igb_poll(struct napi_struct *napi, int budget) +{ + struct igb_q_vector *q_vector = container_of(napi, + struct igb_q_vector, + napi); + bool clean_complete = true; + +#ifdef CONFIG_IGB_DCA + if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) + igb_update_dca(q_vector); +#endif + if (q_vector->tx.ring) + clean_complete = igb_clean_tx_irq(q_vector); + + if (q_vector->rx.ring) + clean_complete &= igb_clean_rx_irq(q_vector, budget); + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* If not enough Rx work done, exit the polling mode */ + napi_complete(napi); + igb_ring_irq_enable(q_vector); + + return 0; +} + +/** + * igb_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: pointer to q_vector containing needed info + * + * returns true if ring is completely cleaned + **/ +static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct igb_ring *tx_ring = q_vector->tx.ring; + struct igb_tx_buffer *tx_buffer; + union e1000_adv_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + unsigned int i = tx_ring->next_to_clean; + + if (test_bit(__IGB_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IGB_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + dev_consume_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + + /* clear last DMA location and unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IGB_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.bytes += total_bytes; + tx_ring->tx_stats.packets += total_packets; + u64_stats_update_end(&tx_ring->tx_syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { + struct e1000_hw *hw = &adapter->hw; + + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); + if (tx_buffer->next_to_watch && + time_after(jiffies, tx_buffer->time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + dev_err(tx_ring->dev, + "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%p>\n" + " jiffies <%lx>\n" + " desc.status <%x>\n", + tx_ring->queue_index, + rd32(E1000_TDH(tx_ring->reg_idx)), + readl(tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_buffer->time_stamp, + tx_buffer->next_to_watch, + jiffies, + tx_buffer->next_to_watch->wb.status); + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + /* we are about to reset, no point in enabling stuff */ + return true; + } + } + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && + netif_carrier_ok(tx_ring->netdev) && + igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !(test_bit(__IGB_DOWN, &adapter->state))) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp); + tx_ring->tx_stats.restart_queue++; + u64_stats_update_end(&tx_ring->tx_syncp); + } + } + + return !!budget; +} + +/** + * igb_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void igb_reuse_rx_page(struct igb_ring *rx_ring, + struct igb_rx_buffer *old_buff) +{ + struct igb_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + *new_buff = *old_buff; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, + old_buff->page_offset, + IGB_RX_BUFSZ, + DMA_FROM_DEVICE); +} + +static inline bool igb_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; +} + +static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, + struct page *page, + unsigned int truesize) +{ + /* avoid re-using remote pages */ + if (unlikely(igb_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= IGB_RX_BUFSZ; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) + return false; +#endif + + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + atomic_inc(&page->_count); + + return true; +} + +/** + * igb_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static bool igb_add_rx_frag(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = IGB_RX_BUFSZ; +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); +#endif + + if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { + igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); + va += IGB_TS_HDR_LEN; + size -= IGB_TS_HDR_LEN; + } + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* page is not reserved, we can reuse buffer as-is */ + if (likely(!igb_page_is_reserved(page))) + return true; + + /* this page cannot be reused so discard it */ + __free_page(page); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + return igb_can_reuse_rx_page(rx_buffer, page, truesize); +} + +static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct igb_rx_buffer *rx_buffer; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_failed++; + return NULL; + } + + /* we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + IGB_RX_BUFSZ, + DMA_FROM_DEVICE); + + /* pull page into skb */ + if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + igb_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; + + return skb; +} + +static inline void igb_rx_checksum(struct igb_ring *ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Ignore Checksum bit is set */ + if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM)) + return; + + /* Rx checksum disabled via ethtool */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* TCP/UDP checksum error bit is set */ + if (igb_test_staterr(rx_desc, + E1000_RXDEXT_STATERR_TCPE | + E1000_RXDEXT_STATERR_IPE)) { + /* work around errata with sctp packets where the TCPE aka + * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) + * packets, (aka let the stack check the crc32c) + */ + if (!((skb->len == 60) && + test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { + u64_stats_update_begin(&ring->rx_syncp); + ring->rx_stats.csum_err++; + u64_stats_update_end(&ring->rx_syncp); + } + /* let the stack verify checksum errors */ + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS | + E1000_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + dev_dbg(ring->dev, "cksum success: bits %08X\n", + le32_to_cpu(rx_desc->wb.upper.status_error)); +} + +static inline void igb_rx_hash(struct igb_ring *ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (ring->netdev->features & NETIF_F_RXHASH) + skb_set_hash(skb, + le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + PKT_HASH_TYPE_L3); +} + +/** + * igb_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool igb_is_non_eop(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IGB_RX_DESC(rx_ring, ntc)); + + if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))) + return false; + + return true; +} + +/** + * igb_pull_tail - igb specific version of skb_pull_tail + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being adjusted + * + * This function is an igb specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void igb_pull_tail(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { + /* retrieve timestamp from buffer */ + igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); + + /* update pointers to remove timestamp header */ + skb_frag_size_sub(frag, IGB_TS_HDR_LEN); + frag->page_offset += IGB_TS_HDR_LEN; + skb->data_len -= IGB_TS_HDR_LEN; + skb->len -= IGB_TS_HDR_LEN; + + /* move va to start of packet data */ + va += IGB_TS_HDR_LEN; + } + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * igb_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool igb_cleanup_headers(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (unlikely((igb_test_staterr(rx_desc, + E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { + struct net_device *netdev = rx_ring->netdev; + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + igb_pull_tail(rx_ring, rx_desc, skb); + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * igb_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static void igb_process_skb_fields(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + + igb_rx_hash(rx_ring, rx_desc, skb); + + igb_rx_checksum(rx_ring, rx_desc, skb); + + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) && + !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) + igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb); + + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { + u16 vid; + + if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) && + test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) + vid = be16_to_cpu(rx_desc->wb.upper.vlan); + else + vid = le16_to_cpu(rx_desc->wb.upper.vlan); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) +{ + struct igb_ring *rx_ring = q_vector->rx.ring; + struct sk_buff *skb = rx_ring->skb; + unsigned int total_bytes = 0, total_packets = 0; + u16 cleaned_count = igb_desc_unused(rx_ring); + + while (likely(total_packets < budget)) { + union e1000_adv_rx_desc *rx_desc; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGB_RX_BUFFER_WRITE) { + igb_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean); + + if (!rx_desc->wb.upper.status_error) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + /* retrieve a buffer from the ring */ + skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); + + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; + + cleaned_count++; + + /* fetch next buffer in frame if non-eop */ + if (igb_is_non_eop(rx_ring, rx_desc)) + continue; + + /* verify the packet layout is correct */ + if (igb_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + igb_process_skb_fields(rx_ring, rx_desc, skb); + + napi_gro_receive(&q_vector->napi, skb); + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_packets++; + } + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + + u64_stats_update_begin(&rx_ring->rx_syncp); + rx_ring->rx_stats.packets += total_packets; + rx_ring->rx_stats.bytes += total_bytes; + u64_stats_update_end(&rx_ring->rx_syncp); + q_vector->rx.total_packets += total_packets; + q_vector->rx.total_bytes += total_bytes; + + if (cleaned_count) + igb_alloc_rx_buffers(rx_ring, cleaned_count); + + return total_packets < budget; +} + +static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, + struct igb_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_page(); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_page(page); + + rx_ring->rx_stats.alloc_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; + + return true; +} + +/** + * igb_alloc_rx_buffers - Replace used receive buffers; packet split + * @adapter: address of board private structure + **/ +void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) +{ + union e1000_adv_rx_desc *rx_desc; + struct igb_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = IGB_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { + if (!igb_alloc_mapped_page(rx_ring, bi)) + break; + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IGB_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.upper.status_error = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +/** + * igb_mii_ioctl - + * @netdev: + * @ifreq: + * @cmd: + **/ +static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + + if (adapter->hw.phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = adapter->hw.phy.addr; + break; + case SIOCGMIIREG: + if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) + return -EIO; + break; + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } + return 0; +} + +/** + * igb_ioctl - + * @netdev: + * @ifreq: + * @cmd: + **/ +static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return igb_mii_ioctl(netdev, ifr, cmd); + case SIOCGHWTSTAMP: + return igb_ptp_get_ts_config(netdev, ifr); + case SIOCSHWTSTAMP: + return igb_ptp_set_ts_config(netdev, ifr); + default: + return -EOPNOTSUPP; + } +} + +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + +s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + if (pcie_capability_read_word(adapter->pdev, reg, value)) + return -E1000_ERR_CONFIG; + + return 0; +} + +s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + if (pcie_capability_write_word(adapter->pdev, reg, *value)) + return -E1000_ERR_CONFIG; + + return 0; +} + +static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, rctl; + bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); + + if (enable) { + /* enable VLAN tag insert/strip */ + ctrl = rd32(E1000_CTRL); + ctrl |= E1000_CTRL_VME; + wr32(E1000_CTRL, ctrl); + + /* Disable CFI check */ + rctl = rd32(E1000_RCTL); + rctl &= ~E1000_RCTL_CFIEN; + wr32(E1000_RCTL, rctl); + } else { + /* disable VLAN tag insert/strip */ + ctrl = rd32(E1000_CTRL); + ctrl &= ~E1000_CTRL_VME; + wr32(E1000_CTRL, ctrl); + } + + igb_rlpml_set(adapter); +} + +static int igb_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int pf_id = adapter->vfs_allocated_count; + + /* attempt to add filter to vlvf array */ + igb_vlvf_set(adapter, vid, true, pf_id); + + /* add the filter since PF can receive vlans w/o entry in vlvf */ + igb_vfta_set(hw, vid, true); + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int igb_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int pf_id = adapter->vfs_allocated_count; + s32 err; + + /* remove vlan from VLVF table array */ + err = igb_vlvf_set(adapter, vid, false, pf_id); + + /* if vid was not present in VLVF just remove it from table */ + if (err) + igb_vfta_set(hw, vid, false); + + clear_bit(vid, adapter->active_vlans); + + return 0; +} + +static void igb_restore_vlan(struct igb_adapter *adapter) +{ + u16 vid; + + igb_vlan_mode(adapter->netdev, adapter->netdev->features); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + + mac->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NIC's only allow 1000 gbps Full duplex + * and 100Mbps Full duplex for 100baseFx sfp + */ + if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + case SPEED_10 + DUPLEX_FULL: + case SPEED_100 + DUPLEX_HALF: + goto err_inval; + default: + break; + } + } + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_10_HALF; + break; + case SPEED_10 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_10_FULL; + break; + case SPEED_100 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_100_HALF; + break; + case SPEED_100 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_100_FULL; + break; + case SPEED_1000 + DUPLEX_FULL: + mac->autoneg = 1; + adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + adapter->hw.phy.mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, + bool runtime) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, rctl, status; + u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + if (netif_running(netdev)) + __igb_close(netdev, true); + + igb_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + status = rd32(E1000_STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + igb_setup_rctl(adapter); + igb_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) { + rctl = rd32(E1000_RCTL); + rctl |= E1000_RCTL_MPE; + wr32(E1000_RCTL, rctl); + } + + ctrl = rd32(E1000_CTRL); + /* advertise wake from D3Cold */ + #define E1000_CTRL_ADVD3WUC 0x00100000 + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC; + wr32(E1000_CTRL, ctrl); + + /* Allow time for pending master requests to run */ + igb_disable_pcie_master(hw); + + wr32(E1000_WUC, E1000_WUC_PME_EN); + wr32(E1000_WUFC, wufc); + } else { + wr32(E1000_WUC, 0); + wr32(E1000_WUFC, 0); + } + + *enable_wake = wufc || adapter->en_mng_pt; + if (!*enable_wake) + igb_power_down_link(adapter); + else + igb_power_up_link(adapter); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + igb_release_hw_control(adapter); + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP +static int igb_suspend(struct device *dev) +{ + int retval; + bool wake; + struct pci_dev *pdev = to_pci_dev(dev); + + retval = __igb_shutdown(pdev, &wake, 0); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static int igb_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!pci_device_is_present(pdev)) + return -ENODEV; + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, + "igb: Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + igb_reset(adapter); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igb_get_hw_control(adapter); + + wr32(E1000_WUS, ~0); + + if (netdev->flags & IFF_UP) { + rtnl_lock(); + err = __igb_open(netdev, true); + rtnl_unlock(); + if (err) + return err; + } + + netif_device_attach(netdev); + return 0; +} + +static int igb_runtime_idle(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + + if (!igb_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC * 5); + + return -EBUSY; +} + +static int igb_runtime_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int retval; + bool wake; + + retval = __igb_shutdown(pdev, &wake, 1); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} + +static int igb_runtime_resume(struct device *dev) +{ + return igb_resume(dev); +} +#endif /* CONFIG_PM */ + +static void igb_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __igb_shutdown(pdev, &wake, 0); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_PCI_IOV +static int igb_sriov_reinit(struct pci_dev *dev) +{ + struct net_device *netdev = pci_get_drvdata(dev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + rtnl_lock(); + + if (netif_running(netdev)) + igb_close(netdev); + else + igb_reset(adapter); + + igb_clear_interrupt_scheme(adapter); + + igb_init_queue_configuration(adapter); + + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + igb_open(netdev); + + rtnl_unlock(); + + return 0; +} + +static int igb_pci_disable_sriov(struct pci_dev *dev) +{ + int err = igb_disable_sriov(dev); + + if (!err) + err = igb_sriov_reinit(dev); + + return err; +} + +static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs) +{ + int err = igb_enable_sriov(dev, num_vfs); + + if (err) + goto out; + + err = igb_sriov_reinit(dev); + if (!err) + return num_vfs; + +out: + return err; +} + +#endif +static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + if (num_vfs == 0) + return igb_pci_disable_sriov(dev); + else + return igb_pci_enable_sriov(dev, num_vfs); +#endif + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void igb_netpoll(struct net_device *netdev) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct igb_q_vector *q_vector; + int i; + + for (i = 0; i < adapter->num_q_vectors; i++) { + q_vector = adapter->q_vector[i]; + if (adapter->flags & IGB_FLAG_HAS_MSIX) + wr32(E1000_EIMC, q_vector->eims_value); + else + igb_irq_disable(adapter); + napi_schedule(&q_vector->napi); + } +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +/** + * igb_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + **/ +static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + igb_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * igb_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the igb_resume routine. + **/ +static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + pci_ers_result_t result; + int err; + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + igb_reset(adapter); + wr32(E1000_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + err = pci_cleanup_aer_uncorrect_error_status(pdev); + if (err) { + dev_err(&pdev->dev, + "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", + err); + /* non-fatal, continue */ + } + + return result; +} + +/** + * igb_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the igb_resume routine. + */ +static void igb_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igb_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (igb_up(adapter)) { + dev_err(&pdev->dev, "igb_up failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* let the f/w know that the h/w is now under the control of the + * driver. + */ + igb_get_hw_control(adapter); +} + +static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, + u8 qsel) +{ + u32 rar_low, rar_high; + struct e1000_hw *hw = &adapter->hw; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* Indicate to hardware the Address is Valid. */ + rar_high |= E1000_RAH_AV; + + if (hw->mac.type == e1000_82575) + rar_high |= E1000_RAH_POOL_1 * qsel; + else + rar_high |= E1000_RAH_POOL_1 << qsel; + + wr32(E1000_RAL(index), rar_low); + wrfl(); + wr32(E1000_RAH(index), rar_high); + wrfl(); +} + +static int igb_set_vf_mac(struct igb_adapter *adapter, + int vf, unsigned char *mac_addr) +{ + struct e1000_hw *hw = &adapter->hw; + /* VF MAC addresses start at end of receive addresses and moves + * towards the first, as a result a collision should not be possible + */ + int rar_entry = hw->mac.rar_entry_count - (vf + 1); + + memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); + + igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf); + + return 0; +} + +static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count)) + return -EINVAL; + adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); + dev_info(&adapter->pdev->dev, + "Reload the VF driver to make this change effective."); + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF MAC address has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); + } + return igb_set_vf_mac(adapter, vf, mac); +} + +static int igb_link_mbps(int internal_link_speed) +{ + switch (internal_link_speed) { + case SPEED_100: + return 100; + case SPEED_1000: + return 1000; + default: + return 0; + } +} + +static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, + int link_speed) +{ + int rf_dec, rf_int; + u32 bcnrc_val; + + if (tx_rate != 0) { + /* Calculate the rate factor values to set */ + rf_int = link_speed / tx_rate; + rf_dec = (link_speed - (rf_int * tx_rate)); + rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) / + tx_rate; + + bcnrc_val = E1000_RTTBCNRC_RS_ENA; + bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) & + E1000_RTTBCNRC_RF_INT_MASK); + bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK); + } else { + bcnrc_val = 0; + } + + wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */ + /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported. + */ + wr32(E1000_RTTBCNRM, 0x14); + wr32(E1000_RTTBCNRC, bcnrc_val); +} + +static void igb_check_vf_rate_limit(struct igb_adapter *adapter) +{ + int actual_link_speed, i; + bool reset_rate = false; + + /* VF TX rate limit was not set or not supported */ + if ((adapter->vf_rate_link_speed == 0) || + (adapter->hw.mac.type != e1000_82576)) + return; + + actual_link_speed = igb_link_mbps(adapter->link_speed); + if (actual_link_speed != adapter->vf_rate_link_speed) { + reset_rate = true; + adapter->vf_rate_link_speed = 0; + dev_info(&adapter->pdev->dev, + "Link speed has been changed. VF Transmit rate is disabled\n"); + } + + for (i = 0; i < adapter->vfs_allocated_count; i++) { + if (reset_rate) + adapter->vf_data[i].tx_rate = 0; + + igb_set_vf_rate_limit(&adapter->hw, i, + adapter->vf_data[i].tx_rate, + actual_link_speed); + } +} + +static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, + int min_tx_rate, int max_tx_rate) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int actual_link_speed; + + if (hw->mac.type != e1000_82576) + return -EOPNOTSUPP; + + if (min_tx_rate) + return -EINVAL; + + actual_link_speed = igb_link_mbps(adapter->link_speed); + if ((vf >= adapter->vfs_allocated_count) || + (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || + (max_tx_rate < 0) || + (max_tx_rate > actual_link_speed)) + return -EINVAL; + + adapter->vf_rate_link_speed = actual_link_speed; + adapter->vf_data[vf].tx_rate = (u16)max_tx_rate; + igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed); + + return 0; +} + +static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, + bool setting) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 reg_val, reg_offset; + + if (!adapter->vfs_allocated_count) + return -EOPNOTSUPP; + + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + + reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC; + reg_val = rd32(reg_offset); + if (setting) + reg_val |= ((1 << vf) | + (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); + else + reg_val &= ~((1 << vf) | + (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); + wr32(reg_offset, reg_val); + + adapter->vf_data[vf].spoofchk_enabled = setting; + return 0; +} + +static int igb_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + if (vf >= adapter->vfs_allocated_count) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); + ivi->max_tx_rate = adapter->vf_data[vf].tx_rate; + ivi->min_tx_rate = 0; + ivi->vlan = adapter->vf_data[vf].pf_vlan; + ivi->qos = adapter->vf_data[vf].pf_qos; + ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; + return 0; +} + +static void igb_vmm_control(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg; + + switch (hw->mac.type) { + case e1000_82575: + case e1000_i210: + case e1000_i211: + case e1000_i354: + default: + /* replication is not supported for 82575 */ + return; + case e1000_82576: + /* notify HW that the MAC is adding vlan tags */ + reg = rd32(E1000_DTXCTL); + reg |= E1000_DTXCTL_VLAN_ADDED; + wr32(E1000_DTXCTL, reg); + /* Fall through */ + case e1000_82580: + /* enable replication vlan tag stripping */ + reg = rd32(E1000_RPLOLR); + reg |= E1000_RPLOLR_STRVLAN; + wr32(E1000_RPLOLR, reg); + /* Fall through */ + case e1000_i350: + /* none of the above registers are supported by i350 */ + break; + } + + if (adapter->vfs_allocated_count) { + igb_vmdq_set_loopback_pf(hw, true); + igb_vmdq_set_replication_pf(hw, true); + igb_vmdq_set_anti_spoofing_pf(hw, true, + adapter->vfs_allocated_count); + } else { + igb_vmdq_set_loopback_pf(hw, false); + igb_vmdq_set_replication_pf(hw, false); + } +} + +static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) +{ + struct e1000_hw *hw = &adapter->hw; + u32 dmac_thr; + u16 hwm; + + if (hw->mac.type > e1000_82580) { + if (adapter->flags & IGB_FLAG_DMAC) { + u32 reg; + + /* force threshold to 0. */ + wr32(E1000_DMCTXTH, 0); + + /* DMA Coalescing high water mark needs to be greater + * than the Rx threshold. Set hwm to PBA - max frame + * size in 16B units, capping it at PBA - 6KB. + */ + hwm = 64 * pba - adapter->max_frame_size / 16; + if (hwm < 64 * (pba - 6)) + hwm = 64 * (pba - 6); + reg = rd32(E1000_FCRTC); + reg &= ~E1000_FCRTC_RTH_COAL_MASK; + reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) + & E1000_FCRTC_RTH_COAL_MASK); + wr32(E1000_FCRTC, reg); + + /* Set the DMA Coalescing Rx threshold to PBA - 2 * max + * frame size, capping it at PBA - 10KB. + */ + dmac_thr = pba - adapter->max_frame_size / 512; + if (dmac_thr < pba - 10) + dmac_thr = pba - 10; + reg = rd32(E1000_DMACR); + reg &= ~E1000_DMACR_DMACTHR_MASK; + reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT) + & E1000_DMACR_DMACTHR_MASK); + + /* transition to L0x or L1 if available..*/ + reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); + + /* watchdog timer= +-1000 usec in 32usec intervals */ + reg |= (1000 >> 5); + + /* Disable BMC-to-OS Watchdog Enable */ + if (hw->mac.type != e1000_i354) + reg &= ~E1000_DMACR_DC_BMC2OSW_EN; + + wr32(E1000_DMACR, reg); + + /* no lower threshold to disable + * coalescing(smart fifb)-UTRESH=0 + */ + wr32(E1000_DMCRTRH, 0); + + reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4); + + wr32(E1000_DMCTLX, reg); + + /* free space in tx packet buffer to wake from + * DMA coal + */ + wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - + (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); + + /* make low power state decision controlled + * by DMA coal + */ + reg = rd32(E1000_PCIEMISC); + reg &= ~E1000_PCIEMISC_LX_DECISION; + wr32(E1000_PCIEMISC, reg); + } /* endif adapter->dmac is not disabled */ + } else if (hw->mac.type == e1000_82580) { + u32 reg = rd32(E1000_PCIEMISC); + + wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); + wr32(E1000_DMACR, 0); + } +} + +/** + * igb_read_i2c_byte - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: device address + * @data: value read + * + * Performs byte read operation over I2C interface at + * a specified device address. + **/ +s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); + struct i2c_client *this_client = adapter->i2c_client; + s32 status; + u16 swfw_mask = 0; + + if (!this_client) + return E1000_ERR_I2C; + + swfw_mask = E1000_SWFW_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return E1000_ERR_SWFW_SYNC; + + status = i2c_smbus_read_byte_data(this_client, byte_offset); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + if (status < 0) + return E1000_ERR_I2C; + else { + *data = status; + return 0; + } +} + +/** + * igb_write_i2c_byte - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: value to write + * + * Performs byte write operation over I2C interface at + * a specified device address. + **/ +s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); + struct i2c_client *this_client = adapter->i2c_client; + s32 status; + u16 swfw_mask = E1000_SWFW_PHY0_SM; + + if (!this_client) + return E1000_ERR_I2C; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return E1000_ERR_SWFW_SYNC; + status = i2c_smbus_write_byte_data(this_client, byte_offset, data); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + if (status) + return E1000_ERR_I2C; + else + return 0; + +} + +int igb_reinit_queues(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (netif_running(netdev)) + igb_close(netdev); + + igb_reset_interrupt_capability(adapter); + + if (igb_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + err = igb_open(netdev); + + return err; +} +/* igb_main.c */ diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c new file mode 100644 index 000000000..c3a9392cb --- /dev/null +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -0,0 +1,1182 @@ +/* PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580 + * + * Copyright (C) 2011 Richard Cochran + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, see . + */ +#include +#include +#include +#include + +#include "igb.h" + +#define INCVALUE_MASK 0x7fffffff +#define ISGN 0x80000000 + +/* The 82580 timesync updates the system timer every 8ns by 8ns, + * and this update value cannot be reprogrammed. + * + * Neither the 82576 nor the 82580 offer registers wide enough to hold + * nanoseconds time values for very long. For the 82580, SYSTIM always + * counts nanoseconds, but the upper 24 bits are not available. The + * frequency is adjusted by changing the 32 bit fractional nanoseconds + * register, TIMINCA. + * + * For the 82576, the SYSTIM register time unit is affect by the + * choice of the 24 bit TININCA:IV (incvalue) field. Five bits of this + * field are needed to provide the nominal 16 nanosecond period, + * leaving 19 bits for fractional nanoseconds. + * + * We scale the NIC clock cycle by a large factor so that relatively + * small clock corrections can be added or subtracted at each clock + * tick. The drawbacks of a large factor are a) that the clock + * register overflows more quickly (not such a big deal) and b) that + * the increment per tick has to fit into 24 bits. As a result we + * need to use a shift of 19 so we can fit a value of 16 into the + * TIMINCA register. + * + * + * SYSTIMH SYSTIML + * +--------------+ +---+---+------+ + * 82576 | 32 | | 8 | 5 | 19 | + * +--------------+ +---+---+------+ + * \________ 45 bits _______/ fract + * + * +----------+---+ +--------------+ + * 82580 | 24 | 8 | | 32 | + * +----------+---+ +--------------+ + * reserved \______ 40 bits _____/ + * + * + * The 45 bit 82576 SYSTIM overflows every + * 2^45 * 10^-9 / 3600 = 9.77 hours. + * + * The 40 bit 82580 SYSTIM overflows every + * 2^40 * 10^-9 / 60 = 18.3 minutes. + */ + +#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9) +#define IGB_PTP_TX_TIMEOUT (HZ * 15) +#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT) +#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1) +#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) +#define IGB_NBITS_82580 40 + +static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); + +/* SYSTIM read access for the 82576 */ +static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc) +{ + struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); + struct e1000_hw *hw = &igb->hw; + u64 val; + u32 lo, hi; + + lo = rd32(E1000_SYSTIML); + hi = rd32(E1000_SYSTIMH); + + val = ((u64) hi) << 32; + val |= lo; + + return val; +} + +/* SYSTIM read access for the 82580 */ +static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc) +{ + struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); + struct e1000_hw *hw = &igb->hw; + u32 lo, hi; + u64 val; + + /* The timestamp latches on lowest register read. For the 82580 + * the lowest register is SYSTIMR instead of SYSTIML. However we only + * need to provide nanosecond resolution, so we just ignore it. + */ + rd32(E1000_SYSTIMR); + lo = rd32(E1000_SYSTIML); + hi = rd32(E1000_SYSTIMH); + + val = ((u64) hi) << 32; + val |= lo; + + return val; +} + +/* SYSTIM read access for I210/I211 */ +static void igb_ptp_read_i210(struct igb_adapter *adapter, + struct timespec64 *ts) +{ + struct e1000_hw *hw = &adapter->hw; + u32 sec, nsec; + + /* The timestamp latches on lowest register read. For I210/I211, the + * lowest register is SYSTIMR. Since we only need to provide nanosecond + * resolution, we can ignore it. + */ + rd32(E1000_SYSTIMR); + nsec = rd32(E1000_SYSTIML); + sec = rd32(E1000_SYSTIMH); + + ts->tv_sec = sec; + ts->tv_nsec = nsec; +} + +static void igb_ptp_write_i210(struct igb_adapter *adapter, + const struct timespec64 *ts) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Writing the SYSTIMR register is not necessary as it only provides + * sub-nanosecond resolution. + */ + wr32(E1000_SYSTIML, ts->tv_nsec); + wr32(E1000_SYSTIMH, ts->tv_sec); +} + +/** + * igb_ptp_systim_to_hwtstamp - convert system time value to hw timestamp + * @adapter: board private structure + * @hwtstamps: timestamp structure to update + * @systim: unsigned 64bit system time value. + * + * We need to convert the system time value stored in the RX/TXSTMP registers + * into a hwtstamp which can be used by the upper level timestamping functions. + * + * The 'tmreg_lock' spinlock is used to protect the consistency of the + * system time value. This is needed because reading the 64 bit time + * value involves reading two (or three) 32 bit registers. The first + * read latches the value. Ditto for writing. + * + * In addition, here have extended the system time with an overflow + * counter in software. + **/ +static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + unsigned long flags; + u64 ns; + + switch (adapter->hw.mac.type) { + case e1000_82576: + case e1000_82580: + case e1000_i354: + case e1000_i350: + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + ns = timecounter_cyc2time(&adapter->tc, systim); + + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + memset(hwtstamps, 0, sizeof(*hwtstamps)); + hwtstamps->hwtstamp = ns_to_ktime(ns); + break; + case e1000_i210: + case e1000_i211: + memset(hwtstamps, 0, sizeof(*hwtstamps)); + /* Upper 32 bits contain s, lower 32 bits contain ns. */ + hwtstamps->hwtstamp = ktime_set(systim >> 32, + systim & 0xFFFFFFFF); + break; + default: + break; + } +} + +/* PTP clock operations */ +static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + int neg_adj = 0; + u64 rate; + u32 incvalue; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + rate = ppb; + rate <<= 14; + rate = div_u64(rate, 1953125); + + incvalue = 16 << IGB_82576_TSYNC_SHIFT; + + if (neg_adj) + incvalue -= rate; + else + incvalue += rate; + + wr32(E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK)); + + return 0; +} + +static int igb_ptp_adjfreq_82580(struct ptp_clock_info *ptp, s32 ppb) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + int neg_adj = 0; + u64 rate; + u32 inca; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + rate = ppb; + rate <<= 26; + rate = div_u64(rate, 1953125); + + inca = rate & INCVALUE_MASK; + if (neg_adj) + inca |= ISGN; + + wr32(E1000_TIMINCA, inca); + + return 0; +} + +static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + timecounter_adjtime(&igb->tc, delta); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + struct timespec64 now, then = ns_to_timespec64(delta); + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + igb_ptp_read_i210(igb, &now); + now = timespec64_add(now, then); + igb_ptp_write_i210(igb, (const struct timespec64 *)&now); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + u64 ns; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + ns = timecounter_read(&igb->tc); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + igb_ptp_read_i210(igb, ts); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_settime_82576(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + u64 ns; + + ns = timespec64_to_ns(ts); + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + timecounter_init(&igb->tc, &igb->cc, ns); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_settime_i210(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + igb_ptp_write_i210(igb, ts); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) +{ + u32 *ptr = pin < 2 ? ctrl : ctrl_ext; + static const u32 mask[IGB_N_SDP] = { + E1000_CTRL_SDP0_DIR, + E1000_CTRL_SDP1_DIR, + E1000_CTRL_EXT_SDP2_DIR, + E1000_CTRL_EXT_SDP3_DIR, + }; + + if (input) + *ptr &= ~mask[pin]; + else + *ptr |= mask[pin]; +} + +static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin) +{ + static const u32 aux0_sel_sdp[IGB_N_SDP] = { + AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3, + }; + static const u32 aux1_sel_sdp[IGB_N_SDP] = { + AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3, + }; + static const u32 ts_sdp_en[IGB_N_SDP] = { + TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN, + }; + struct e1000_hw *hw = &igb->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(E1000_CTRL); + ctrl_ext = rd32(E1000_CTRL_EXT); + tssdp = rd32(E1000_TSSDP); + + igb_pin_direction(pin, 1, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an output. */ + tssdp &= ~ts_sdp_en[pin]; + + if (chan == 1) { + tssdp &= ~AUX1_SEL_SDP3; + tssdp |= aux1_sel_sdp[pin] | AUX1_TS_SDP_EN; + } else { + tssdp &= ~AUX0_SEL_SDP3; + tssdp |= aux0_sel_sdp[pin] | AUX0_TS_SDP_EN; + } + + wr32(E1000_TSSDP, tssdp); + wr32(E1000_CTRL, ctrl); + wr32(E1000_CTRL_EXT, ctrl_ext); +} + +static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin) +{ + static const u32 aux0_sel_sdp[IGB_N_SDP] = { + AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3, + }; + static const u32 aux1_sel_sdp[IGB_N_SDP] = { + AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3, + }; + static const u32 ts_sdp_en[IGB_N_SDP] = { + TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN, + }; + static const u32 ts_sdp_sel_tt0[IGB_N_SDP] = { + TS_SDP0_SEL_TT0, TS_SDP1_SEL_TT0, + TS_SDP2_SEL_TT0, TS_SDP3_SEL_TT0, + }; + static const u32 ts_sdp_sel_tt1[IGB_N_SDP] = { + TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1, + TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1, + }; + static const u32 ts_sdp_sel_clr[IGB_N_SDP] = { + TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1, + TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1, + }; + struct e1000_hw *hw = &igb->hw; + u32 ctrl, ctrl_ext, tssdp = 0; + + ctrl = rd32(E1000_CTRL); + ctrl_ext = rd32(E1000_CTRL_EXT); + tssdp = rd32(E1000_TSSDP); + + igb_pin_direction(pin, 0, &ctrl, &ctrl_ext); + + /* Make sure this pin is not enabled as an input. */ + if ((tssdp & AUX0_SEL_SDP3) == aux0_sel_sdp[pin]) + tssdp &= ~AUX0_TS_SDP_EN; + + if ((tssdp & AUX1_SEL_SDP3) == aux1_sel_sdp[pin]) + tssdp &= ~AUX1_TS_SDP_EN; + + tssdp &= ~ts_sdp_sel_clr[pin]; + if (chan == 1) + tssdp |= ts_sdp_sel_tt1[pin]; + else + tssdp |= ts_sdp_sel_tt0[pin]; + + tssdp |= ts_sdp_en[pin]; + + wr32(E1000_TSSDP, tssdp); + wr32(E1000_CTRL, ctrl); + wr32(E1000_CTRL_EXT, ctrl_ext); +} + +static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct igb_adapter *igb = + container_of(ptp, struct igb_adapter, ptp_caps); + struct e1000_hw *hw = &igb->hw; + u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh; + unsigned long flags; + struct timespec ts; + int pin = -1; + s64 ns; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + if (on) { + pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + } + if (rq->extts.index == 1) { + tsauxc_mask = TSAUXC_EN_TS1; + tsim_mask = TSINTR_AUTT1; + } else { + tsauxc_mask = TSAUXC_EN_TS0; + tsim_mask = TSINTR_AUTT0; + } + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsauxc = rd32(E1000_TSAUXC); + tsim = rd32(E1000_TSIM); + if (on) { + igb_pin_extts(igb, rq->extts.index, pin); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } else { + tsauxc &= ~tsauxc_mask; + tsim &= ~tsim_mask; + } + wr32(E1000_TSAUXC, tsauxc); + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PEROUT: + if (on) { + pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + } + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec_to_ns(&ts); + ns = ns >> 1; + if (on && ns < 500000LL) { + /* 2k interrupts per second is an awful lot. */ + return -EINVAL; + } + ts = ns_to_timespec(ns); + if (rq->perout.index == 1) { + tsauxc_mask = TSAUXC_EN_TT1; + tsim_mask = TSINTR_TT1; + trgttiml = E1000_TRGTTIML1; + trgttimh = E1000_TRGTTIMH1; + } else { + tsauxc_mask = TSAUXC_EN_TT0; + tsim_mask = TSINTR_TT0; + trgttiml = E1000_TRGTTIML0; + trgttimh = E1000_TRGTTIMH0; + } + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsauxc = rd32(E1000_TSAUXC); + tsim = rd32(E1000_TSIM); + if (on) { + int i = rq->perout.index; + + igb_pin_perout(igb, i, pin); + igb->perout[i].start.tv_sec = rq->perout.start.sec; + igb->perout[i].start.tv_nsec = rq->perout.start.nsec; + igb->perout[i].period.tv_sec = ts.tv_sec; + igb->perout[i].period.tv_nsec = ts.tv_nsec; + wr32(trgttimh, rq->perout.start.sec); + wr32(trgttiml, rq->perout.start.nsec); + tsauxc |= tsauxc_mask; + tsim |= tsim_mask; + } else { + tsauxc &= ~tsauxc_mask; + tsim &= ~tsim_mask; + } + wr32(E1000_TSAUXC, tsauxc); + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + spin_lock_irqsave(&igb->tmreg_lock, flags); + tsim = rd32(E1000_TSIM); + if (on) + tsim |= TSINTR_SYS_WRAP; + else + tsim &= ~TSINTR_SYS_WRAP; + wr32(E1000_TSIM, tsim); + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + return 0; + } + + return -EOPNOTSUPP; +} + +static int igb_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +static int igb_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + case PTP_PF_PEROUT: + break; + case PTP_PF_PHYSYNC: + return -1; + } + return 0; +} + +/** + * igb_ptp_tx_work + * @work: pointer to work struct + * + * This work function polls the TSYNCTXCTL valid bit to determine when a + * timestamp has been taken for the current stored skb. + **/ +static void igb_ptp_tx_work(struct work_struct *work) +{ + struct igb_adapter *adapter = container_of(work, struct igb_adapter, + ptp_tx_work); + struct e1000_hw *hw = &adapter->hw; + u32 tsynctxctl; + + if (!adapter->ptp_tx_skb) + return; + + if (time_is_before_jiffies(adapter->ptp_tx_start + + IGB_PTP_TX_TIMEOUT)) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + adapter->tx_hwtstamp_timeouts++; + dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n"); + return; + } + + tsynctxctl = rd32(E1000_TSYNCTXCTL); + if (tsynctxctl & E1000_TSYNCTXCTL_VALID) + igb_ptp_tx_hwtstamp(adapter); + else + /* reschedule to check later */ + schedule_work(&adapter->ptp_tx_work); +} + +static void igb_ptp_overflow_check(struct work_struct *work) +{ + struct igb_adapter *igb = + container_of(work, struct igb_adapter, ptp_overflow_work.work); + struct timespec64 ts; + + igb->ptp_caps.gettime64(&igb->ptp_caps, &ts); + + pr_debug("igb overflow check at %lld.%09lu\n", + (long long) ts.tv_sec, ts.tv_nsec); + + schedule_delayed_work(&igb->ptp_overflow_work, + IGB_SYSTIM_OVERFLOW_PERIOD); +} + +/** + * igb_ptp_rx_hang - detect error case when Rx timestamp registers latched + * @adapter: private network adapter structure + * + * This watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + **/ +void igb_ptp_rx_hang(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL); + unsigned long rx_event; + + if (hw->mac.type != e1000_82576) + return; + + /* If we don't have a valid timestamp in the registers, just update the + * timeout counter and exit + */ + if (!(tsyncrxctl & E1000_TSYNCRXCTL_VALID)) { + adapter->last_rx_ptp_check = jiffies; + return; + } + + /* Determine the most recent watchdog or rx_timestamp event */ + rx_event = adapter->last_rx_ptp_check; + if (time_after(adapter->last_rx_timestamp, rx_event)) + rx_event = adapter->last_rx_timestamp; + + /* Only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { + rd32(E1000_RXSTMPH); + adapter->last_rx_ptp_check = jiffies; + adapter->rx_hwtstamp_cleared++; + dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n"); + } +} + +/** + * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: Board private structure. + * + * If we were asked to do hardware stamping and such a time stamp is + * available, then it must have been for this skb here because we only + * allow only one such packet into the queue. + **/ +static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval; + + regval = rd32(E1000_TXSTMPL); + regval |= (u64)rd32(E1000_TXSTMPH) << 32; + + igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); + skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); +} + +/** + * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp + * @q_vector: Pointer to interrupt specific structure + * @va: Pointer to address containing Rx buffer + * @skb: Buffer containing timestamp and packet + * + * This function is meant to retrieve a timestamp from the first buffer of an + * incoming frame. The value is stored in little endian format starting on + * byte 8. + **/ +void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, + unsigned char *va, + struct sk_buff *skb) +{ + __le64 *regval = (__le64 *)va; + + /* The timestamp is recorded in little endian format. + * DWORD: 0 1 2 3 + * Field: Reserved Reserved SYSTIML SYSTIMH + */ + igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb), + le64_to_cpu(regval[1])); +} + +/** + * igb_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register + * @q_vector: Pointer to interrupt specific structure + * @skb: Buffer containing timestamp and packet + * + * This function is meant to retrieve a timestamp from the internal registers + * of the adapter and store it in the skb. + **/ +void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, + struct sk_buff *skb) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + u64 regval; + + /* If this bit is set, then the RX registers contain the time stamp. No + * other packet will be time stamped until we read these registers, so + * read the registers to make them available again. Because only one + * packet can be time stamped at a time, we know that the register + * values must belong to this one here and therefore we don't need to + * compare any of the additional attributes stored for it. + * + * If nothing went wrong, then it should have a shared tx_flags that we + * can turn into a skb_shared_hwtstamps. + */ + if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) + return; + + regval = rd32(E1000_RXSTMPL); + regval |= (u64)rd32(E1000_RXSTMPH) << 32; + + igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); + + /* Update the last_rx_timestamp timer in order to enable watchdog check + * for error case of latched timestamp on a dropped packet. + */ + adapter->last_rx_timestamp = jiffies; +} + +/** + * igb_ptp_get_ts_config - get hardware time stamping config + * @netdev: + * @ifreq: + * + * Get the hwtstamp_config settings to return to the user. Rather than attempt + * to deconstruct the settings from the registers, just return a shadow copy + * of the last known settings. + **/ +int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/** + * igb_ptp_set_timestamp_mode - setup hardware for timestamping + * @adapter: networking device structure + * @config: hwtstamp configuration + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't case any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware + * filters. Not all combinations are supported, in particular event + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". + */ +static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter, + struct hwtstamp_config *config) +{ + struct e1000_hw *hw = &adapter->hw; + u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; + u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + u32 tsync_rx_cfg = 0; + bool is_l4 = false; + bool is_l2 = false; + u32 regval; + + /* reserved for future extensions */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_ALL: + /* 82576 cannot timestamp all packets, which it needs to do to + * support both V1 Sync and Delay_Req messages + */ + if (hw->mac.type != e1000_82576) { + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + } + /* fall through */ + default: + config->rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + if (hw->mac.type == e1000_82575) { + if (tsync_rx_ctl | tsync_tx_ctl) + return -EINVAL; + return 0; + } + + /* Per-packet timestamping only works if all packets are + * timestamped, so enable timestamping in all packets as + * long as one Rx filter was configured. + */ + if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { + tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + is_l2 = true; + is_l4 = true; + + if ((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) { + regval = rd32(E1000_RXPBS); + regval |= E1000_RXPBS_CFG_TS_EN; + wr32(E1000_RXPBS, regval); + } + } + + /* enable/disable TX */ + regval = rd32(E1000_TSYNCTXCTL); + regval &= ~E1000_TSYNCTXCTL_ENABLED; + regval |= tsync_tx_ctl; + wr32(E1000_TSYNCTXCTL, regval); + + /* enable/disable RX */ + regval = rd32(E1000_TSYNCRXCTL); + regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); + regval |= tsync_rx_ctl; + wr32(E1000_TSYNCRXCTL, regval); + + /* define which PTP packets are time stamped */ + wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); + + /* define ethertype filter for timestamped packets */ + if (is_l2) + wr32(E1000_ETQF(3), + (E1000_ETQF_FILTER_ENABLE | /* enable filter */ + E1000_ETQF_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + wr32(E1000_ETQF(3), 0); + + /* L4 Queue Filter[3]: filter by destination port and protocol */ + if (is_l4) { + u32 ftqf = (IPPROTO_UDP /* UDP */ + | E1000_FTQF_VF_BP /* VF not compared */ + | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */ + | E1000_FTQF_MASK); /* mask all inputs */ + ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ + + wr32(E1000_IMIR(3), htons(PTP_EV_PORT)); + wr32(E1000_IMIREXT(3), + (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); + if (hw->mac.type == e1000_82576) { + /* enable source port check */ + wr32(E1000_SPQF(3), htons(PTP_EV_PORT)); + ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; + } + wr32(E1000_FTQF(3), ftqf); + } else { + wr32(E1000_FTQF(3), E1000_FTQF_MASK); + } + wrfl(); + + /* clear TX/RX time stamp registers, just to be sure */ + regval = rd32(E1000_TXSTMPL); + regval = rd32(E1000_TXSTMPH); + regval = rd32(E1000_RXSTMPL); + regval = rd32(E1000_RXSTMPH); + + return 0; +} + +/** + * igb_ptp_set_ts_config - set hardware time stamping config + * @netdev: + * @ifreq: + * + **/ +int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = igb_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +void igb_ptp_init(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int i; + + switch (hw->mac.type) { + case e1000_82576: + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 999999881; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; + adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576; + adapter->ptp_caps.settime64 = igb_ptp_settime_82576; + adapter->ptp_caps.enable = igb_ptp_feature_enable; + adapter->cc.read = igb_ptp_read_82576; + adapter->cc.mask = CYCLECOUNTER_MASK(64); + adapter->cc.mult = 1; + adapter->cc.shift = IGB_82576_TSYNC_SHIFT; + /* Dial the nominal frequency. */ + wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); + break; + case e1000_82580: + case e1000_i354: + case e1000_i350: + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; + adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576; + adapter->ptp_caps.settime64 = igb_ptp_settime_82576; + adapter->ptp_caps.enable = igb_ptp_feature_enable; + adapter->cc.read = igb_ptp_read_82580; + adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580); + adapter->cc.mult = 1; + adapter->cc.shift = 0; + /* Enable the timer functions by clearing bit 31. */ + wr32(E1000_TSAUXC, 0x0); + break; + case e1000_i210: + case e1000_i211: + for (i = 0; i < IGB_N_SDP; i++) { + struct ptp_pin_desc *ppd = &adapter->sdp_config[i]; + + snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i); + ppd->index = i; + ppd->func = PTP_PF_NONE; + } + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS; + adapter->ptp_caps.n_per_out = IGB_N_PEROUT; + adapter->ptp_caps.n_pins = IGB_N_SDP; + adapter->ptp_caps.pps = 1; + adapter->ptp_caps.pin_config = adapter->sdp_config; + adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; + adapter->ptp_caps.gettime64 = igb_ptp_gettime_i210; + adapter->ptp_caps.settime64 = igb_ptp_settime_i210; + adapter->ptp_caps.enable = igb_ptp_feature_enable_i210; + adapter->ptp_caps.verify = igb_ptp_verify_pin; + /* Enable the timer functions by clearing bit 31. */ + wr32(E1000_TSAUXC, 0x0); + break; + default: + adapter->ptp_clock = NULL; + return; + } + + wrfl(); + + spin_lock_init(&adapter->tmreg_lock); + INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); + + /* Initialize the clock and overflow work for devices that need it. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { + struct timespec64 ts = ktime_to_timespec64(ktime_get_real()); + + igb_ptp_settime_i210(&adapter->ptp_caps, &ts); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + + INIT_DELAYED_WORK(&adapter->ptp_overflow_work, + igb_ptp_overflow_check); + + schedule_delayed_work(&adapter->ptp_overflow_work, + IGB_SYSTIM_OVERFLOW_PERIOD); + } + + /* Initialize the time sync interrupts for devices that support it. */ + if (hw->mac.type >= e1000_82580) { + wr32(E1000_TSIM, TSYNC_INTERRUPTS); + wr32(E1000_IMS, E1000_IMS_TS); + } + + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; + dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n"); + } else { + dev_info(&adapter->pdev->dev, "added PHC on %s\n", + adapter->netdev->name); + adapter->flags |= IGB_FLAG_PTP; + } +} + +/** + * igb_ptp_stop - Disable PTP device and stop the overflow check. + * @adapter: Board private structure. + * + * This function stops the PTP support and cancels the delayed work. + **/ +void igb_ptp_stop(struct igb_adapter *adapter) +{ + switch (adapter->hw.mac.type) { + case e1000_82576: + case e1000_82580: + case e1000_i354: + case e1000_i350: + cancel_delayed_work_sync(&adapter->ptp_overflow_work); + break; + case e1000_i210: + case e1000_i211: + /* No delayed work to cancel. */ + break; + default: + return; + } + + cancel_work_sync(&adapter->ptp_tx_work); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); + } + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + dev_info(&adapter->pdev->dev, "removed PHC on %s\n", + adapter->netdev->name); + adapter->flags &= ~IGB_FLAG_PTP; + } +} + +/** + * igb_ptp_reset - Re-enable the adapter for PTP following a reset. + * @adapter: Board private structure. + * + * This function handles the reset work required to re-enable the PTP device. + **/ +void igb_ptp_reset(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned long flags; + + if (!(adapter->flags & IGB_FLAG_PTP)) + return; + + /* reset the tstamp_config */ + igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + switch (adapter->hw.mac.type) { + case e1000_82576: + /* Dial the nominal frequency. */ + wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); + break; + case e1000_82580: + case e1000_i354: + case e1000_i350: + case e1000_i210: + case e1000_i211: + wr32(E1000_TSAUXC, 0x0); + wr32(E1000_TSSDP, 0x0); + wr32(E1000_TSIM, TSYNC_INTERRUPTS); + wr32(E1000_IMS, E1000_IMS_TS); + break; + default: + /* No work to do. */ + goto out; + } + + /* Re-initialize the timer. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { + struct timespec64 ts = ktime_to_timespec64(ktime_get_real()); + + igb_ptp_write_i210(adapter, &ts); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + } +out: + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); +} diff --git a/drivers/net/ethernet/intel/igbvf/Makefile b/drivers/net/ethernet/intel/igbvf/Makefile new file mode 100644 index 000000000..044b0ad5f --- /dev/null +++ b/drivers/net/ethernet/intel/igbvf/Makefile @@ -0,0 +1,38 @@ +################################################################################ +# +# Intel(R) 82576 Virtual Function Linux driver +# Copyright(c) 2009 - 2012 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) 82576 VF ethernet driver +# + +obj-$(CONFIG_IGBVF) += igbvf.o + +igbvf-objs := vf.o \ + mbx.o \ + ethtool.o \ + netdev.o + diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h new file mode 100644 index 000000000..ae3f28332 --- /dev/null +++ b/drivers/net/ethernet/intel/igbvf/defines.h @@ -0,0 +1,120 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 1999 - 2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* IVAR valid bit */ +#define E1000_IVAR_VALID 0x80 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define E1000_RXDEXT_STATERR_LB 0x00040000 +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +/* Device Control */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define E1000_TXD_STAT_DD 0x00000001 /* Desc Done */ + +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* 802.1q VLAN Packet Size */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_MBX 15 + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 + +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 + +/* Additional Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Que */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Que */ + +/* Direct Cache Access (DCA) definitions */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ + +#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +#endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c new file mode 100644 index 000000000..c6996feb1 --- /dev/null +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -0,0 +1,476 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 - 2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for igbvf */ + +#include +#include +#include +#include +#include + +#include "igbvf.h" +#include + +struct igbvf_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; + int base_stat_offset; +}; + +#define IGBVF_STAT(current, base) \ + sizeof(((struct igbvf_adapter *)0)->current), \ + offsetof(struct igbvf_adapter, current), \ + offsetof(struct igbvf_adapter, base) + +static const struct igbvf_stats igbvf_gstrings_stats[] = { + { "rx_packets", IGBVF_STAT(stats.gprc, stats.base_gprc) }, + { "tx_packets", IGBVF_STAT(stats.gptc, stats.base_gptc) }, + { "rx_bytes", IGBVF_STAT(stats.gorc, stats.base_gorc) }, + { "tx_bytes", IGBVF_STAT(stats.gotc, stats.base_gotc) }, + { "multicast", IGBVF_STAT(stats.mprc, stats.base_mprc) }, + { "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) }, + { "lbrx_packets", IGBVF_STAT(stats.gprlbc, stats.base_gprlbc) }, + { "tx_restart_queue", IGBVF_STAT(restart_queue, zero_base) }, + { "rx_long_byte_count", IGBVF_STAT(stats.gorc, stats.base_gorc) }, + { "rx_csum_offload_good", IGBVF_STAT(hw_csum_good, zero_base) }, + { "rx_csum_offload_errors", IGBVF_STAT(hw_csum_err, zero_base) }, + { "rx_header_split", IGBVF_STAT(rx_hdr_split, zero_base) }, + { "alloc_rx_buff_failed", IGBVF_STAT(alloc_rx_buff_failed, zero_base) }, +}; + +#define IGBVF_GLOBAL_STATS_LEN ARRAY_SIZE(igbvf_gstrings_stats) + +static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test (on/offline)" +}; + +#define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test) + +static int igbvf_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 status; + + ecmd->supported = SUPPORTED_1000baseT_Full; + + ecmd->advertising = ADVERTISED_1000baseT_Full; + + ecmd->port = -1; + ecmd->transceiver = XCVR_DUMMY1; + + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + if (status & E1000_STATUS_SPEED_1000) + ethtool_cmd_speed_set(ecmd, SPEED_1000); + else if (status & E1000_STATUS_SPEED_100) + ethtool_cmd_speed_set(ecmd, SPEED_100); + else + ethtool_cmd_speed_set(ecmd, SPEED_10); + + if (status & E1000_STATUS_FD) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } + + ecmd->autoneg = AUTONEG_DISABLE; + + return 0; +} + +static int igbvf_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + return -EOPNOTSUPP; +} + +static void igbvf_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ +} + +static int igbvf_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + return -EOPNOTSUPP; +} + +static u32 igbvf_get_msglevel(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void igbvf_set_msglevel(struct net_device *netdev, u32 data) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int igbvf_get_regs_len(struct net_device *netdev) +{ +#define IGBVF_REGS_LEN 8 + return IGBVF_REGS_LEN * sizeof(u32); +} + +static void igbvf_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + + memset(p, 0, IGBVF_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (adapter->pdev->revision << 16) | + adapter->pdev->device; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RDLEN(0)); + regs_buff[3] = er32(RDH(0)); + regs_buff[4] = er32(RDT(0)); + + regs_buff[5] = er32(TDLEN(0)); + regs_buff[6] = er32(TDH(0)); + regs_buff[7] = er32(TDT(0)); +} + +static int igbvf_get_eeprom_len(struct net_device *netdev) +{ + return 0; +} + +static int igbvf_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + return -EOPNOTSUPP; +} + +static int igbvf_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + return -EOPNOTSUPP; +} + +static void igbvf_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, igbvf_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->regdump_len = igbvf_get_regs_len(netdev); + drvinfo->eedump_len = igbvf_get_eeprom_len(netdev); +} + +static void igbvf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct igbvf_ring *tx_ring = adapter->tx_ring; + struct igbvf_ring *rx_ring = adapter->rx_ring; + + ring->rx_max_pending = IGBVF_MAX_RXD; + ring->tx_max_pending = IGBVF_MAX_TXD; + ring->rx_pending = rx_ring->count; + ring->tx_pending = tx_ring->count; +} + +static int igbvf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct igbvf_ring *temp_ring; + int err = 0; + u32 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_rx_count = max_t(u32, ring->rx_pending, IGBVF_MIN_RXD); + new_rx_count = min_t(u32, new_rx_count, IGBVF_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = max_t(u32, ring->tx_pending, IGBVF_MIN_TXD); + new_tx_count = min_t(u32, new_tx_count, IGBVF_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring->count) && + (new_rx_count == adapter->rx_ring->count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + adapter->tx_ring->count = new_tx_count; + adapter->rx_ring->count = new_rx_count; + goto clear_reset; + } + + temp_ring = vmalloc(sizeof(struct igbvf_ring)); + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + igbvf_down(adapter); + + /* We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the Tx and Rx ring structs. + */ + if (new_tx_count != adapter->tx_ring->count) { + memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring)); + + temp_ring->count = new_tx_count; + err = igbvf_setup_tx_resources(adapter, temp_ring); + if (err) + goto err_setup; + + igbvf_free_tx_resources(adapter->tx_ring); + + memcpy(adapter->tx_ring, temp_ring, sizeof(struct igbvf_ring)); + } + + if (new_rx_count != adapter->rx_ring->count) { + memcpy(temp_ring, adapter->rx_ring, sizeof(struct igbvf_ring)); + + temp_ring->count = new_rx_count; + err = igbvf_setup_rx_resources(adapter, temp_ring); + if (err) + goto err_setup; + + igbvf_free_rx_resources(adapter->rx_ring); + + memcpy(adapter->rx_ring, temp_ring, sizeof(struct igbvf_ring)); + } +err_setup: + igbvf_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__IGBVF_RESETTING, &adapter->state); + return err; +} + +static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + + hw->mac.ops.check_for_link(hw); + + if (!(er32(STATUS) & E1000_STATUS_LU)) + *data = 1; + + return *data; +} + +static void igbvf_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + set_bit(__IGBVF_TESTING, &adapter->state); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (igbvf_link_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + clear_bit(__IGBVF_TESTING, &adapter->state); + msleep_interruptible(4 * 1000); +} + +static void igbvf_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; +} + +static int igbvf_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + return -EOPNOTSUPP; +} + +static int igbvf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + if (adapter->requested_itr <= 3) + ec->rx_coalesce_usecs = adapter->requested_itr; + else + ec->rx_coalesce_usecs = adapter->current_itr >> 2; + + return 0; +} + +static int igbvf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) && + (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) { + adapter->current_itr = ec->rx_coalesce_usecs << 2; + adapter->requested_itr = 1000000000 / + (adapter->current_itr * 256); + } else if ((ec->rx_coalesce_usecs == 3) || + (ec->rx_coalesce_usecs == 2)) { + adapter->current_itr = IGBVF_START_ITR; + adapter->requested_itr = ec->rx_coalesce_usecs; + } else if (ec->rx_coalesce_usecs == 0) { + /* The user's desire is to turn off interrupt throttling + * altogether, but due to HW limitations, we can't do that. + * Instead we set a very small value in EITR, which would + * allow ~967k interrupts per second, but allow the adapter's + * internal clocking to still function properly. + */ + adapter->current_itr = 4; + adapter->requested_itr = 1000000000 / + (adapter->current_itr * 256); + } else { + return -EINVAL; + } + + writel(adapter->current_itr, + hw->hw_addr + adapter->rx_ring->itr_register); + + return 0; +} + +static int igbvf_nway_reset(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + igbvf_reinit_locked(adapter); + return 0; +} + +static void igbvf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + int i; + + igbvf_update_stats(adapter); + for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { + char *p = (char *)adapter + + igbvf_gstrings_stats[i].stat_offset; + char *b = (char *)adapter + + igbvf_gstrings_stats[i].base_stat_offset; + data[i] = ((igbvf_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) : + (*(u32 *)p - *(u32 *)b)); + } +} + +static int igbvf_get_sset_count(struct net_device *dev, int stringset) +{ + switch (stringset) { + case ETH_SS_TEST: + return IGBVF_TEST_LEN; + case ETH_SS_STATS: + return IGBVF_GLOBAL_STATS_LEN; + default: + return -EINVAL; + } +} + +static void igbvf_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *igbvf_gstrings_test, sizeof(igbvf_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { + memcpy(p, igbvf_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static const struct ethtool_ops igbvf_ethtool_ops = { + .get_settings = igbvf_get_settings, + .set_settings = igbvf_set_settings, + .get_drvinfo = igbvf_get_drvinfo, + .get_regs_len = igbvf_get_regs_len, + .get_regs = igbvf_get_regs, + .get_wol = igbvf_get_wol, + .set_wol = igbvf_set_wol, + .get_msglevel = igbvf_get_msglevel, + .set_msglevel = igbvf_set_msglevel, + .nway_reset = igbvf_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = igbvf_get_eeprom_len, + .get_eeprom = igbvf_get_eeprom, + .set_eeprom = igbvf_set_eeprom, + .get_ringparam = igbvf_get_ringparam, + .set_ringparam = igbvf_set_ringparam, + .get_pauseparam = igbvf_get_pauseparam, + .set_pauseparam = igbvf_set_pauseparam, + .self_test = igbvf_diag_test, + .get_sset_count = igbvf_get_sset_count, + .get_strings = igbvf_get_strings, + .get_ethtool_stats = igbvf_get_ethtool_stats, + .get_coalesce = igbvf_get_coalesce, + .set_coalesce = igbvf_set_coalesce, +}; + +void igbvf_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &igbvf_ethtool_ops; +} diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h new file mode 100644 index 000000000..f166baab8 --- /dev/null +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -0,0 +1,322 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 - 2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _IGBVF_H_ +#define _IGBVF_H_ + +#include +#include +#include +#include +#include + +#include "vf.h" + +/* Forward declarations */ +struct igbvf_info; +struct igbvf_adapter; + +/* Interrupt defines */ +#define IGBVF_START_ITR 488 /* ~8000 ints/sec */ +#define IGBVF_4K_ITR 980 +#define IGBVF_20K_ITR 196 +#define IGBVF_70K_ITR 56 + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/* Interrupt modes, as used by the IntMode parameter */ +#define IGBVF_INT_MODE_LEGACY 0 +#define IGBVF_INT_MODE_MSI 1 +#define IGBVF_INT_MODE_MSIX 2 + +/* Tx/Rx descriptor defines */ +#define IGBVF_DEFAULT_TXD 256 +#define IGBVF_MAX_TXD 4096 +#define IGBVF_MIN_TXD 80 + +#define IGBVF_DEFAULT_RXD 256 +#define IGBVF_MAX_RXD 4096 +#define IGBVF_MIN_RXD 80 + +#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +/* RX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGBVF_RX_PTHRESH 16 +#define IGBVF_RX_HTHRESH 8 +#define IGBVF_RX_WTHRESH 1 + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +#define IGBVF_TX_QUEUE_WAKE 32 +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define IGBVF_EEPROM_APME 0x0400 + +#define IGBVF_MNG_VLAN_NONE (-1) + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + +enum igbvf_boards { + board_vf, + board_i350_vf, +}; + +struct igbvf_queue_stats { + u64 packets; + u64 bytes; +}; + +/* wrappers around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct igbvf_buffer { + dma_addr_t dma; + struct sk_buff *skb; + union { + /* Tx */ + struct { + unsigned long time_stamp; + union e1000_adv_tx_desc *next_to_watch; + u16 length; + u16 mapped_as_page; + }; + /* Rx */ + struct { + struct page *page; + u64 page_dma; + unsigned int page_offset; + }; + }; +}; + +union igbvf_desc { + union e1000_adv_rx_desc rx_desc; + union e1000_adv_tx_desc tx_desc; + struct e1000_adv_tx_context_desc tx_context_desc; +}; + +struct igbvf_ring { + struct igbvf_adapter *adapter; /* backlink */ + union igbvf_desc *desc; /* pointer to ring memory */ + dma_addr_t dma; /* phys address of ring */ + unsigned int size; /* length of ring in bytes */ + unsigned int count; /* number of desc. in ring */ + + u16 next_to_use; + u16 next_to_clean; + + u16 head; + u16 tail; + + /* array of buffer information structs */ + struct igbvf_buffer *buffer_info; + struct napi_struct napi; + + char name[IFNAMSIZ + 5]; + u32 eims_value; + u32 itr_val; + enum latency_range itr_range; + u16 itr_register; + int set_itr; + + struct sk_buff *rx_skb_top; + + struct igbvf_queue_stats stats; +}; + +/* board specific private data structure */ +struct igbvf_adapter { + struct timer_list watchdog_timer; + struct timer_list blink_timer; + + struct work_struct reset_task; + struct work_struct watchdog_task; + + const struct igbvf_info *ei; + + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u32 bd_number; + u32 rx_buffer_len; + u32 polling_interval; + u16 mng_vlan_id; + u16 link_speed; + u16 link_duplex; + + spinlock_t tx_queue_lock; /* prevent concurrent tail updates */ + + /* track device up/down/testing state */ + unsigned long state; + + /* Interrupt Throttle Rate */ + u32 requested_itr; /* ints/sec or adaptive */ + u32 current_itr; /* Actual ITR register value, not ints/sec */ + + /* Tx */ + struct igbvf_ring *tx_ring /* One per active queue */ + ____cacheline_aligned_in_smp; + + unsigned int restart_queue; + u32 txd_cmd; + + u32 tx_int_delay; + u32 tx_abs_int_delay; + + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + + /* Tx stats */ + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u32 tx_dma_failed; + + /* Rx */ + struct igbvf_ring *rx_ring; + + u32 rx_int_delay; + u32 rx_abs_int_delay; + + /* Rx stats */ + u64 hw_csum_err; + u64 hw_csum_good; + u64 rx_hdr_split; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + + unsigned int rx_ps_hdr_size; + u32 max_frame_size; + u32 min_frame_size; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + struct net_device_stats net_stats; + spinlock_t stats_lock; /* prevent concurrent stats updates */ + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + + /* The VF counters don't clear on read so we have to get a base + * count on driver start up and always subtract that base on + * on the first update, thus the flag.. + */ + struct e1000_vf_stats stats; + u64 zero_base; + + struct igbvf_ring test_tx_ring; + struct igbvf_ring test_rx_ring; + u32 test_icr; + + u32 msg_enable; + struct msix_entry *msix_entries; + int int_mode; + u32 eims_enable_mask; + u32 eims_other; + u32 int_counter0; + u32 int_counter1; + + u32 eeprom_wol; + u32 wol; + u32 pba; + + bool fc_autoneg; + + unsigned long led_status; + + unsigned int flags; + unsigned long last_reset; +}; + +struct igbvf_info { + enum e1000_mac_type mac; + unsigned int flags; + u32 pba; + void (*init_ops)(struct e1000_hw *); + s32 (*get_variants)(struct igbvf_adapter *); +}; + +/* hardware capability, feature, and workaround flags */ +#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) +#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1) +#define IGBVF_RX_DESC_ADV(R, i) \ + (&((((R).desc))[i].rx_desc)) +#define IGBVF_TX_DESC_ADV(R, i) \ + (&((((R).desc))[i].tx_desc)) +#define IGBVF_TX_CTXTDESC_ADV(R, i) \ + (&((((R).desc))[i].tx_context_desc)) + +enum igbvf_state_t { + __IGBVF_TESTING, + __IGBVF_RESETTING, + __IGBVF_DOWN +}; + +extern char igbvf_driver_name[]; +extern const char igbvf_driver_version[]; + +void igbvf_check_options(struct igbvf_adapter *); +void igbvf_set_ethtool_ops(struct net_device *); + +int igbvf_up(struct igbvf_adapter *); +void igbvf_down(struct igbvf_adapter *); +void igbvf_reinit_locked(struct igbvf_adapter *); +int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *); +int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *); +void igbvf_free_rx_resources(struct igbvf_ring *); +void igbvf_free_tx_resources(struct igbvf_ring *); +void igbvf_update_stats(struct igbvf_adapter *); + +extern unsigned int copybreak; + +#endif /* _IGBVF_H_ */ diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c new file mode 100644 index 000000000..7b6cb4c37 --- /dev/null +++ b/drivers/net/ethernet/intel/igbvf/mbx.c @@ -0,0 +1,349 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 - 2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "mbx.h" + +/** + * e1000_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 e1000_poll_for_msg(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw)) { + countdown--; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; +} + +/** + * e1000_poll_for_ack - Wait for message acknowledgment + * @hw: pointer to the HW structure + * + * returns SUCCESS if it successfully received a message acknowledgment + **/ +static s32 e1000_poll_for_ack(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw)) { + countdown--; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; +} + +/** + * e1000_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + ret_val = e1000_poll_for_msg(hw); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size); +out: + return ret_val; +} + +/** + * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + /* exit if we either can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg*/ + ret_val = mbx->ops.write(hw, msg, size); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = e1000_poll_for_ack(hw); +out: + return ret_val; +} + +/** + * e1000_read_v2p_mailbox - read v2p mailbox + * @hw: pointer to the HW structure + * + * This function is used to read the v2p mailbox without losing the read to + * clear status bits. + **/ +static u32 e1000_read_v2p_mailbox(struct e1000_hw *hw) +{ + u32 v2p_mailbox = er32(V2PMAILBOX(0)); + + v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox; + hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +/** + * e1000_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +static s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask) +{ + u32 v2p_mailbox = e1000_read_v2p_mailbox(hw); + s32 ret_val = -E1000_ERR_MBX; + + if (v2p_mailbox & mask) + ret_val = E1000_SUCCESS; + + hw->dev_spec.vf.v2p_mailbox &= ~mask; + + return ret_val; +} + +/** + * e1000_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * + * returns SUCCESS if the PF has set the Status bit or else ERR_MBX + **/ +static s32 e1000_check_for_msg_vf(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * e1000_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * + * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX + **/ +static s32 e1000_check_for_ack_vf(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * e1000_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * + * returns true if the PF has set the reset done bit or else false + **/ +static s32 e1000_check_for_rst_vf(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | + E1000_V2PMAILBOX_RSTI))) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * e1000_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_MBX; + + /* Take ownership of the buffer */ + ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); + + /* reserve mailbox for VF use */ + if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) + ret_val = E1000_SUCCESS; + + return ret_val; +} + +/** + * e1000_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) +{ + s32 err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = e1000_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_write; + + /* flush any ack or msg as we are going to overwrite mailbox */ + e1000_check_for_ack_vf(hw); + e1000_check_for_msg_vf(hw); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + array_ew32(VMBMEM(0), i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_REQ); + +out_no_write: + return err; +} + +/** + * e1000_read_mbx_vf - Reads a message from the inbox intended for VF + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns SUCCESS if it successfully read message from buffer + **/ +static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) +{ + s32 err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = e1000_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = array_er32(VMBMEM(0), i); + + /* Acknowledge receipt and release mailbox, then we're done */ + ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return err; +} + +/** + * e1000_init_mbx_params_vf - set initial values for VF mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for VF mailbox + */ +s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to being communications + */ + mbx->timeout = 0; + mbx->usec_delay = E1000_VF_MBX_INIT_DELAY; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = e1000_read_mbx_vf; + mbx->ops.write = e1000_write_mbx_vf; + mbx->ops.read_posted = e1000_read_posted_mbx; + mbx->ops.write_posted = e1000_write_posted_mbx; + mbx->ops.check_for_msg = e1000_check_for_msg_vf; + mbx->ops.check_for_ack = e1000_check_for_ack_vf; + mbx->ops.check_for_rst = e1000_check_for_rst_vf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + return E1000_SUCCESS; +} diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h new file mode 100644 index 000000000..f800bf8ee --- /dev/null +++ b/drivers/net/ethernet/intel/igbvf/mbx.h @@ -0,0 +1,74 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 1999 - 2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_MBX_H_ +#define _E1000_MBX_H_ + +#include "vf.h" + +#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ + +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is E1000_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +/* Messages below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 + +/* We have a total wait time of 1s for vf mailbox posted messages */ +#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mbx timeout */ +#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ + +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for exra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ +#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ + +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ + +void e1000_init_mbx_ops_generic(struct e1000_hw *hw); +s32 e1000_init_mbx_params_vf(struct e1000_hw *); + +#endif /* _E1000_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c new file mode 100644 index 000000000..95af14e13 --- /dev/null +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -0,0 +1,2920 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 - 2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "igbvf.h" + +#define DRV_VERSION "2.0.2-k" +char igbvf_driver_name[] = "igbvf"; +const char igbvf_driver_version[] = DRV_VERSION; +static const char igbvf_driver_string[] = + "Intel(R) Gigabit Virtual Function Network Driver"; +static const char igbvf_copyright[] = + "Copyright (c) 2009 - 2012 Intel Corporation."; + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static int igbvf_poll(struct napi_struct *napi, int budget); +static void igbvf_reset(struct igbvf_adapter *); +static void igbvf_set_interrupt_capability(struct igbvf_adapter *); +static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); + +static struct igbvf_info igbvf_vf_info = { + .mac = e1000_vfadapt, + .flags = 0, + .pba = 10, + .init_ops = e1000_init_function_pointers_vf, +}; + +static struct igbvf_info igbvf_i350_vf_info = { + .mac = e1000_vfadapt_i350, + .flags = 0, + .pba = 10, + .init_ops = e1000_init_function_pointers_vf, +}; + +static const struct igbvf_info *igbvf_info_tbl[] = { + [board_vf] = &igbvf_vf_info, + [board_i350_vf] = &igbvf_i350_vf_info, +}; + +/** + * igbvf_desc_unused - calculate if we have unused descriptors + * @rx_ring: address of receive ring structure + **/ +static int igbvf_desc_unused(struct igbvf_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +/** + * igbvf_receive_skb - helper function to handle Rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + **/ +static void igbvf_receive_skb(struct igbvf_adapter *adapter, + struct net_device *netdev, + struct sk_buff *skb, + u32 status, u16 vlan) +{ + u16 vid; + + if (status & E1000_RXD_STAT_VP) { + if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) && + (status & E1000_RXDEXT_STATERR_LB)) + vid = be16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + else + vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + if (test_bit(vid, adapter->active_vlans)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + napi_gro_receive(&adapter->rx_ring->napi, skb); +} + +static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, + u32 status_err, struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Ignore Checksum bit is set or checksum is disabled through ethtool */ + if ((status_err & E1000_RXD_STAT_IXSM) || + (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED)) + return; + + /* TCP/UDP checksum error bit is set */ + if (status_err & + (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + + /* It must be a TCP or UDP packet with a valid checksum */ + if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + adapter->hw_csum_good++; +} + +/** + * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split + * @rx_ring: address of ring structure to repopulate + * @cleaned_count: number of buffers to repopulate + **/ +static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, + int cleaned_count) +{ + struct igbvf_adapter *adapter = rx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_adv_rx_desc *rx_desc; + struct igbvf_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + int bufsz; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + if (adapter->rx_ps_hdr_size) + bufsz = adapter->rx_ps_hdr_size; + else + bufsz = adapter->rx_buffer_len; + + while (cleaned_count--) { + rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); + + if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { + if (!buffer_info->page) { + buffer_info->page = alloc_page(GFP_ATOMIC); + if (!buffer_info->page) { + adapter->alloc_rx_buff_failed++; + goto no_buffers; + } + buffer_info->page_offset = 0; + } else { + buffer_info->page_offset ^= PAGE_SIZE / 2; + } + buffer_info->page_dma = + dma_map_page(&pdev->dev, buffer_info->page, + buffer_info->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, + buffer_info->page_dma)) { + __free_page(buffer_info->page); + buffer_info->page = NULL; + dev_err(&pdev->dev, "RX DMA map failed\n"); + break; + } + } + + if (!buffer_info->skb) { + skb = netdev_alloc_skb_ip_align(netdev, bufsz); + if (!skb) { + adapter->alloc_rx_buff_failed++; + goto no_buffers; + } + + buffer_info->skb = skb; + buffer_info->dma = dma_map_single(&pdev->dev, skb->data, + bufsz, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + dev_err(&pdev->dev, "RX DMA map failed\n"); + goto no_buffers; + } + } + /* Refresh the desc even if buffer_addrs didn't change because + * each write-back erases this info. + */ + if (adapter->rx_ps_hdr_size) { + rx_desc->read.pkt_addr = + cpu_to_le64(buffer_info->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); + } else { + rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); + rx_desc->read.hdr_addr = 0; + } + + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + +no_buffers: + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + if (i == 0) + i = (rx_ring->count - 1); + else + i--; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->tail); + } +} + +/** + * igbvf_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, + int *work_done, int work_to_do) +{ + struct igbvf_ring *rx_ring = adapter->rx_ring; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_adv_rx_desc *rx_desc, *next_rxd; + struct igbvf_buffer *buffer_info, *next_buffer; + struct sk_buff *skb; + bool cleaned = false; + int cleaned_count = 0; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int i; + u32 length, hlen, staterr; + + i = rx_ring->next_to_clean; + rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + while (staterr & E1000_RXD_STAT_DD) { + if (*work_done >= work_to_do) + break; + (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ + + buffer_info = &rx_ring->buffer_info[i]; + + /* HW will not DMA in data larger than the given buffer, even + * if it parses the (NFS, of course) header to be larger. In + * that case, it fills the header buffer and spills the rest + * into the page. + */ + hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) + & E1000_RXDADV_HDRBUFLEN_MASK) >> + E1000_RXDADV_HDRBUFLEN_SHIFT; + if (hlen > adapter->rx_ps_hdr_size) + hlen = adapter->rx_ps_hdr_size; + + length = le16_to_cpu(rx_desc->wb.upper.length); + cleaned = true; + cleaned_count++; + + skb = buffer_info->skb; + prefetch(skb->data - NET_IP_ALIGN); + buffer_info->skb = NULL; + if (!adapter->rx_ps_hdr_size) { + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + skb_put(skb, length); + goto send_up; + } + + if (!skb_shinfo(skb)->nr_frags) { + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_ps_hdr_size, + DMA_FROM_DEVICE); + skb_put(skb, hlen); + } + + if (length) { + dma_unmap_page(&pdev->dev, buffer_info->page_dma, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + buffer_info->page_dma = 0; + + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + buffer_info->page, + buffer_info->page_offset, + length); + + if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || + (page_count(buffer_info->page) != 1)) + buffer_info->page = NULL; + else + get_page(buffer_info->page); + + skb->len += length; + skb->data_len += length; + skb->truesize += PAGE_SIZE / 2; + } +send_up: + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i); + prefetch(next_rxd); + next_buffer = &rx_ring->buffer_info[i]; + + if (!(staterr & E1000_RXD_STAT_EOP)) { + buffer_info->skb = next_buffer->skb; + buffer_info->dma = next_buffer->dma; + next_buffer->skb = skb; + next_buffer->dma = 0; + goto next_desc; + } + + if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { + dev_kfree_skb_irq(skb); + goto next_desc; + } + + total_bytes += skb->len; + total_packets++; + + igbvf_rx_checksum_adv(adapter, staterr, skb); + + skb->protocol = eth_type_trans(skb, netdev); + + igbvf_receive_skb(adapter, netdev, skb, staterr, + rx_desc->wb.upper.vlan); + +next_desc: + rx_desc->wb.upper.status_error = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) { + igbvf_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + } + + rx_ring->next_to_clean = i; + cleaned_count = igbvf_desc_unused(rx_ring); + + if (cleaned_count) + igbvf_alloc_rx_buffers(rx_ring, cleaned_count); + + adapter->total_rx_packets += total_packets; + adapter->total_rx_bytes += total_bytes; + adapter->net_stats.rx_bytes += total_bytes; + adapter->net_stats.rx_packets += total_packets; + return cleaned; +} + +static void igbvf_put_txbuf(struct igbvf_adapter *adapter, + struct igbvf_buffer *buffer_info) +{ + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, + buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, + buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; +} + +/** + * igbvf_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, + struct igbvf_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct igbvf_buffer) * tx_ring->count; + tx_ring->buffer_info = vzalloc(size); + if (!tx_ring->buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + tx_ring->adapter = adapter; + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; +err: + vfree(tx_ring->buffer_info); + dev_err(&adapter->pdev->dev, + "Unable to allocate memory for the transmit descriptor ring\n"); + return -ENOMEM; +} + +/** + * igbvf_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * + * Returns 0 on success, negative on failure + **/ +int igbvf_setup_rx_resources(struct igbvf_adapter *adapter, + struct igbvf_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + int size, desc_len; + + size = sizeof(struct igbvf_buffer) * rx_ring->count; + rx_ring->buffer_info = vzalloc(size); + if (!rx_ring->buffer_info) + goto err; + + desc_len = sizeof(union e1000_adv_rx_desc); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + rx_ring->adapter = adapter; + + return 0; + +err: + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + dev_err(&adapter->pdev->dev, + "Unable to allocate memory for the receive descriptor ring\n"); + return -ENOMEM; +} + +/** + * igbvf_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring) +{ + struct igbvf_adapter *adapter = tx_ring->adapter; + struct igbvf_buffer *buffer_info; + unsigned long size; + unsigned int i; + + if (!tx_ring->buffer_info) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + igbvf_put_txbuf(adapter, buffer_info); + } + + size = sizeof(struct igbvf_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + writel(0, adapter->hw.hw_addr + tx_ring->head); + writel(0, adapter->hw.hw_addr + tx_ring->tail); +} + +/** + * igbvf_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: ring to free resources from + * + * Free all transmit software resources + **/ +void igbvf_free_tx_resources(struct igbvf_ring *tx_ring) +{ + struct pci_dev *pdev = tx_ring->adapter->pdev; + + igbvf_clean_tx_ring(tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * igbvf_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + **/ +static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) +{ + struct igbvf_adapter *adapter = rx_ring->adapter; + struct igbvf_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + if (!rx_ring->buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (buffer_info->dma) { + if (adapter->rx_ps_hdr_size) { + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_ps_hdr_size, + DMA_FROM_DEVICE); + } else { + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + } + buffer_info->dma = 0; + } + + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + + if (buffer_info->page) { + if (buffer_info->page_dma) + dma_unmap_page(&pdev->dev, + buffer_info->page_dma, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + put_page(buffer_info->page); + buffer_info->page = NULL; + buffer_info->page_dma = 0; + buffer_info->page_offset = 0; + } + } + + size = sizeof(struct igbvf_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, adapter->hw.hw_addr + rx_ring->head); + writel(0, adapter->hw.hw_addr + rx_ring->tail); +} + +/** + * igbvf_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ + +void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) +{ + struct pci_dev *pdev = rx_ring->adapter->pdev; + + igbvf_clean_rx_ring(rx_ring); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; +} + +/** + * igbvf_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte counts during the last + * interrupt. The advantage of per interrupt computation is faster updates + * and more accurate ITR for the current traffic pattern. Constants in this + * function were computed based on theoretical maximum wire speed and thresholds + * were set based on testing data as well as attempting to minimize response + * time while increasing bulk throughput. + **/ +static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter, + enum latency_range itr_setting, + int packets, int bytes) +{ + enum latency_range retval = itr_setting; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes/packets > 2000) { + retval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + retval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + default: + break; + } + +update_itr_done: + return retval; +} + +static int igbvf_range_to_itr(enum latency_range current_range) +{ + int new_itr; + + switch (current_range) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = IGBVF_70K_ITR; + break; + case low_latency: + new_itr = IGBVF_20K_ITR; + break; + case bulk_latency: + new_itr = IGBVF_4K_ITR; + break; + default: + new_itr = IGBVF_START_ITR; + break; + } + return new_itr; +} + +static void igbvf_set_itr(struct igbvf_adapter *adapter) +{ + u32 new_itr; + + adapter->tx_ring->itr_range = + igbvf_update_itr(adapter, + adapter->tx_ring->itr_val, + adapter->total_tx_packets, + adapter->total_tx_bytes); + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->requested_itr == 3 && + adapter->tx_ring->itr_range == lowest_latency) + adapter->tx_ring->itr_range = low_latency; + + new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range); + + if (new_itr != adapter->tx_ring->itr_val) { + u32 current_itr = adapter->tx_ring->itr_val; + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > current_itr ? + min(current_itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->tx_ring->itr_val = new_itr; + + adapter->tx_ring->set_itr = 1; + } + + adapter->rx_ring->itr_range = + igbvf_update_itr(adapter, adapter->rx_ring->itr_val, + adapter->total_rx_packets, + adapter->total_rx_bytes); + if (adapter->requested_itr == 3 && + adapter->rx_ring->itr_range == lowest_latency) + adapter->rx_ring->itr_range = low_latency; + + new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range); + + if (new_itr != adapter->rx_ring->itr_val) { + u32 current_itr = adapter->rx_ring->itr_val; + + new_itr = new_itr > current_itr ? + min(current_itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->rx_ring->itr_val = new_itr; + + adapter->rx_ring->set_itr = 1; + } +} + +/** + * igbvf_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + * + * returns true if ring is completely cleaned + **/ +static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) +{ + struct igbvf_adapter *adapter = tx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct igbvf_buffer *buffer_info; + struct sk_buff *skb; + union e1000_adv_tx_desc *tx_desc, *eop_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int i, count = 0; + bool cleaned = false; + + i = tx_ring->next_to_clean; + buffer_info = &tx_ring->buffer_info[i]; + eop_desc = buffer_info->next_to_watch; + + do { + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + buffer_info->next_to_watch = NULL; + + for (cleaned = false; !cleaned; count++) { + tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); + cleaned = (tx_desc == eop_desc); + skb = buffer_info->skb; + + if (skb) { + unsigned int segs, bytecount; + + /* gso_segs is currently only valid for tcp */ + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + + skb->len; + total_packets += segs; + total_bytes += bytecount; + } + + igbvf_put_txbuf(adapter, buffer_info); + tx_desc->wb.status = 0; + + i++; + if (i == tx_ring->count) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + } + + eop_desc = buffer_info->next_to_watch; + } while (count < tx_ring->count); + + tx_ring->next_to_clean = i; + + if (unlikely(count && netif_carrier_ok(netdev) && + igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (netif_queue_stopped(netdev) && + !(test_bit(__IGBVF_DOWN, &adapter->state))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + adapter->net_stats.tx_bytes += total_bytes; + adapter->net_stats.tx_packets += total_packets; + return count < tx_ring->count; +} + +static irqreturn_t igbvf_msix_other(int irq, void *data) +{ + struct net_device *netdev = data; + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->int_counter1++; + + netif_carrier_off(netdev); + hw->mac.get_link_status = 1; + if (!test_bit(__IGBVF_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + + ew32(EIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) +{ + struct net_device *netdev = data; + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct igbvf_ring *tx_ring = adapter->tx_ring; + + if (tx_ring->set_itr) { + writel(tx_ring->itr_val, + adapter->hw.hw_addr + tx_ring->itr_register); + adapter->tx_ring->set_itr = 0; + } + + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + + /* auto mask will automatically re-enable the interrupt when we write + * EICS + */ + if (!igbvf_clean_tx_irq(tx_ring)) + /* Ring was not completely cleaned, so fire another interrupt */ + ew32(EICS, tx_ring->eims_value); + else + ew32(EIMS, tx_ring->eims_value); + + return IRQ_HANDLED; +} + +static irqreturn_t igbvf_intr_msix_rx(int irq, void *data) +{ + struct net_device *netdev = data; + struct igbvf_adapter *adapter = netdev_priv(netdev); + + adapter->int_counter0++; + + /* Write the ITR value calculated at the end of the + * previous interrupt. + */ + if (adapter->rx_ring->set_itr) { + writel(adapter->rx_ring->itr_val, + adapter->hw.hw_addr + adapter->rx_ring->itr_register); + adapter->rx_ring->set_itr = 0; + } + + if (napi_schedule_prep(&adapter->rx_ring->napi)) { + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->rx_ring->napi); + } + + return IRQ_HANDLED; +} + +#define IGBVF_NO_QUEUE -1 + +static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, + int tx_queue, int msix_vector) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ivar, index; + + /* 82576 uses a table-based method for assigning vectors. + * Each queue has a single entry in the table to which we write + * a vector number along with a "valid" bit. Sadly, the layout + * of the table is somewhat counterintuitive. + */ + if (rx_queue > IGBVF_NO_QUEUE) { + index = (rx_queue >> 1); + ivar = array_er32(IVAR0, index); + if (rx_queue & 0x1) { + /* vector goes into third byte of register */ + ivar = ivar & 0xFF00FFFF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 16; + } else { + /* vector goes into low byte of register */ + ivar = ivar & 0xFFFFFF00; + ivar |= msix_vector | E1000_IVAR_VALID; + } + adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector; + array_ew32(IVAR0, index, ivar); + } + if (tx_queue > IGBVF_NO_QUEUE) { + index = (tx_queue >> 1); + ivar = array_er32(IVAR0, index); + if (tx_queue & 0x1) { + /* vector goes into high byte of register */ + ivar = ivar & 0x00FFFFFF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 24; + } else { + /* vector goes into second byte of register */ + ivar = ivar & 0xFFFF00FF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 8; + } + adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector; + array_ew32(IVAR0, index, ivar); + } +} + +/** + * igbvf_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * igbvf_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + **/ +static void igbvf_configure_msix(struct igbvf_adapter *adapter) +{ + u32 tmp; + struct e1000_hw *hw = &adapter->hw; + struct igbvf_ring *tx_ring = adapter->tx_ring; + struct igbvf_ring *rx_ring = adapter->rx_ring; + int vector = 0; + + adapter->eims_enable_mask = 0; + + igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++); + adapter->eims_enable_mask |= tx_ring->eims_value; + writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register); + igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++); + adapter->eims_enable_mask |= rx_ring->eims_value; + writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register); + + /* set vector for other causes, i.e. link changes */ + + tmp = (vector++ | E1000_IVAR_VALID); + + ew32(IVAR_MISC, tmp); + + adapter->eims_enable_mask = (1 << (vector)) - 1; + adapter->eims_other = 1 << (vector - 1); + e1e_flush(); +} + +static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter) +{ + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } +} + +/** + * igbvf_set_interrupt_capability - set MSI or MSI-X if supported + * @adapter: board private structure + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) +{ + int err = -ENOMEM; + int i; + + /* we allocate 3 vectors, 1 for Tx, 1 for Rx, one for PF messages */ + adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), + GFP_KERNEL); + if (adapter->msix_entries) { + for (i = 0; i < 3; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix_range(adapter->pdev, + adapter->msix_entries, 3, 3); + } + + if (err < 0) { + /* MSI-X failed */ + dev_err(&adapter->pdev->dev, + "Failed to initialize MSI-X interrupts.\n"); + igbvf_reset_interrupt_capability(adapter); + } +} + +/** + * igbvf_request_msix - Initialize MSI-X interrupts + * @adapter: board private structure + * + * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + **/ +static int igbvf_request_msix(struct igbvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0, vector = 0; + + if (strlen(netdev->name) < (IFNAMSIZ - 5)) { + sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); + sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); + } else { + memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); + memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); + } + + err = request_irq(adapter->msix_entries[vector].vector, + igbvf_intr_msix_tx, 0, adapter->tx_ring->name, + netdev); + if (err) + goto out; + + adapter->tx_ring->itr_register = E1000_EITR(vector); + adapter->tx_ring->itr_val = adapter->current_itr; + vector++; + + err = request_irq(adapter->msix_entries[vector].vector, + igbvf_intr_msix_rx, 0, adapter->rx_ring->name, + netdev); + if (err) + goto out; + + adapter->rx_ring->itr_register = E1000_EITR(vector); + adapter->rx_ring->itr_val = adapter->current_itr; + vector++; + + err = request_irq(adapter->msix_entries[vector].vector, + igbvf_msix_other, 0, netdev->name, netdev); + if (err) + goto out; + + igbvf_configure_msix(adapter); + return 0; +out: + return err; +} + +/** + * igbvf_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + **/ +static int igbvf_alloc_queues(struct igbvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + + adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + + netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64); + + return 0; +} + +/** + * igbvf_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int igbvf_request_irq(struct igbvf_adapter *adapter) +{ + int err = -1; + + /* igbvf supports msi-x only */ + if (adapter->msix_entries) + err = igbvf_request_msix(adapter); + + if (!err) + return err; + + dev_err(&adapter->pdev->dev, + "Unable to allocate interrupt, Error: %d\n", err); + + return err; +} + +static void igbvf_free_irq(struct igbvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int vector; + + if (adapter->msix_entries) { + for (vector = 0; vector < 3; vector++) + free_irq(adapter->msix_entries[vector].vector, netdev); + } +} + +/** + * igbvf_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void igbvf_irq_disable(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(EIMC, ~0); + + if (adapter->msix_entries) + ew32(EIAC, 0); +} + +/** + * igbvf_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void igbvf_irq_enable(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(EIAC, adapter->eims_enable_mask); + ew32(EIAM, adapter->eims_enable_mask); + ew32(EIMS, adapter->eims_enable_mask); +} + +/** + * igbvf_poll - NAPI Rx polling callback + * @napi: struct associated with this polling callback + * @budget: amount of packets driver is allowed to process this poll + **/ +static int igbvf_poll(struct napi_struct *napi, int budget) +{ + struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi); + struct igbvf_adapter *adapter = rx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; + int work_done = 0; + + igbvf_clean_rx_irq(adapter, &work_done, budget); + + /* If not enough Rx work done, exit the polling mode */ + if (work_done < budget) { + napi_complete(napi); + + if (adapter->requested_itr & 3) + igbvf_set_itr(adapter); + + if (!test_bit(__IGBVF_DOWN, &adapter->state)) + ew32(EIMS, adapter->rx_ring->eims_value); + } + + return work_done; +} + +/** + * igbvf_set_rlpml - set receive large packet maximum length + * @adapter: board private structure + * + * Configure the maximum size of packets that will be received + */ +static void igbvf_set_rlpml(struct igbvf_adapter *adapter) +{ + int max_frame_size; + struct e1000_hw *hw = &adapter->hw; + + max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE; + e1000_rlpml_set_vf(hw, max_frame_size); +} + +static int igbvf_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->mac.ops.set_vfta(hw, vid, true)) { + dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid); + return -EINVAL; + } + set_bit(vid, adapter->active_vlans); + return 0; +} + +static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->mac.ops.set_vfta(hw, vid, false)) { + dev_err(&adapter->pdev->dev, + "Failed to remove vlan id %d\n", vid); + return -EINVAL; + } + clear_bit(vid, adapter->active_vlans); + return 0; +} + +static void igbvf_restore_vlan(struct igbvf_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + igbvf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +/** + * igbvf_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void igbvf_configure_tx(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct igbvf_ring *tx_ring = adapter->tx_ring; + u64 tdba; + u32 txdctl, dca_txctrl; + + /* disable transmits */ + txdctl = er32(TXDCTL(0)); + ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); + e1e_flush(); + msleep(10); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc)); + tdba = tx_ring->dma; + ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); + ew32(TDBAH(0), (tdba >> 32)); + ew32(TDH(0), 0); + ew32(TDT(0), 0); + tx_ring->head = E1000_TDH(0); + tx_ring->tail = E1000_TDT(0); + + /* Turn off Relaxed Ordering on head write-backs. The writebacks + * MUST be delivered in order or it will completely screw up + * our bookkeeping. + */ + dca_txctrl = er32(DCA_TXCTRL(0)); + dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; + ew32(DCA_TXCTRL(0), dca_txctrl); + + /* enable transmits */ + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + ew32(TXDCTL(0), txdctl); + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS; + + /* enable Report Status bit */ + adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; +} + +/** + * igbvf_setup_srrctl - configure the receive control registers + * @adapter: Board private structure + **/ +static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 srrctl = 0; + + srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | + E1000_SRRCTL_BSIZEHDR_MASK | + E1000_SRRCTL_BSIZEPKT_MASK); + + /* Enable queue drop to avoid head of line blocking */ + srrctl |= E1000_SRRCTL_DROP_EN; + + /* Setup buffer sizes */ + srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> + E1000_SRRCTL_BSIZEPKT_SHIFT; + + if (adapter->rx_buffer_len < 2048) { + adapter->rx_ps_hdr_size = 0; + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + } else { + adapter->rx_ps_hdr_size = 128; + srrctl |= adapter->rx_ps_hdr_size << + E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; + } + + ew32(SRRCTL(0), srrctl); +} + +/** + * igbvf_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void igbvf_configure_rx(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct igbvf_ring *rx_ring = adapter->rx_ring; + u64 rdba; + u32 rdlen, rxdctl; + + /* disable receives */ + rxdctl = er32(RXDCTL(0)); + ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); + e1e_flush(); + msleep(10); + + rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + rdba = rx_ring->dma; + ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); + ew32(RDBAH(0), (rdba >> 32)); + ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc)); + rx_ring->head = E1000_RDH(0); + rx_ring->tail = E1000_RDT(0); + ew32(RDH(0), 0); + ew32(RDT(0), 0); + + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; + rxdctl &= 0xFFF00000; + rxdctl |= IGBVF_RX_PTHRESH; + rxdctl |= IGBVF_RX_HTHRESH << 8; + rxdctl |= IGBVF_RX_WTHRESH << 16; + + igbvf_set_rlpml(adapter); + + /* enable receives */ + ew32(RXDCTL(0), rxdctl); +} + +/** + * igbvf_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void igbvf_set_multi(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list = NULL; + int i; + + if (!netdev_mc_empty(netdev)) { + mta_list = kmalloc_array(netdev_mc_count(netdev), ETH_ALEN, + GFP_ATOMIC); + if (!mta_list) + return; + } + + /* prepare a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); + kfree(mta_list); +} + +/** + * igbvf_configure - configure the hardware for Rx and Tx + * @adapter: private board structure + **/ +static void igbvf_configure(struct igbvf_adapter *adapter) +{ + igbvf_set_multi(adapter->netdev); + + igbvf_restore_vlan(adapter); + + igbvf_configure_tx(adapter); + igbvf_setup_srrctl(adapter); + igbvf_configure_rx(adapter); + igbvf_alloc_rx_buffers(adapter->rx_ring, + igbvf_desc_unused(adapter->rx_ring)); +} + +/* igbvf_reset - bring the hardware into a known good state + * @adapter: private board structure + * + * This function boots the hardware and enables some settings that + * require a configuration cycle of the hardware - those cannot be + * set/changed during runtime. After reset the device needs to be + * properly configured for Rx, Tx etc. + */ +static void igbvf_reset(struct igbvf_adapter *adapter) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + + /* Allow time for pending master requests to run */ + if (mac->ops.reset_hw(hw)) + dev_err(&adapter->pdev->dev, "PF still resetting\n"); + + mac->ops.init_hw(hw); + + if (is_valid_ether_addr(adapter->hw.mac.addr)) { + memcpy(netdev->dev_addr, adapter->hw.mac.addr, + netdev->addr_len); + memcpy(netdev->perm_addr, adapter->hw.mac.addr, + netdev->addr_len); + } + + adapter->last_reset = jiffies; +} + +int igbvf_up(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + igbvf_configure(adapter); + + clear_bit(__IGBVF_DOWN, &adapter->state); + + napi_enable(&adapter->rx_ring->napi); + if (adapter->msix_entries) + igbvf_configure_msix(adapter); + + /* Clear any pending interrupts. */ + er32(EICR); + igbvf_irq_enable(adapter); + + /* start the watchdog */ + hw->mac.get_link_status = 1; + mod_timer(&adapter->watchdog_timer, jiffies + 1); + + return 0; +} + +void igbvf_down(struct igbvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 rxdctl, txdctl; + + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer + */ + set_bit(__IGBVF_DOWN, &adapter->state); + + /* disable receives in the hardware */ + rxdctl = er32(RXDCTL(0)); + ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); + + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + /* disable transmits in the hardware */ + txdctl = er32(TXDCTL(0)); + ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); + + /* flush both disables and wait for them to finish */ + e1e_flush(); + msleep(10); + + napi_disable(&adapter->rx_ring->napi); + + igbvf_irq_disable(adapter); + + del_timer_sync(&adapter->watchdog_timer); + + /* record the stats before reset*/ + igbvf_update_stats(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + igbvf_reset(adapter); + igbvf_clean_tx_ring(adapter->tx_ring); + igbvf_clean_rx_ring(adapter->rx_ring); +} + +void igbvf_reinit_locked(struct igbvf_adapter *adapter) +{ + might_sleep(); + while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + igbvf_down(adapter); + igbvf_up(adapter); + clear_bit(__IGBVF_RESETTING, &adapter->state); +} + +/** + * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter) + * @adapter: board private structure to initialize + * + * igbvf_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int igbvf_sw_init(struct igbvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + s32 rc; + + adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; + adapter->rx_ps_hdr_size = 0; + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + adapter->tx_int_delay = 8; + adapter->tx_abs_int_delay = 32; + adapter->rx_int_delay = 0; + adapter->rx_abs_int_delay = 8; + adapter->requested_itr = 3; + adapter->current_itr = IGBVF_START_ITR; + + /* Set various function pointers */ + adapter->ei->init_ops(&adapter->hw); + + rc = adapter->hw.mac.ops.init_params(&adapter->hw); + if (rc) + return rc; + + rc = adapter->hw.mbx.ops.init_params(&adapter->hw); + if (rc) + return rc; + + igbvf_set_interrupt_capability(adapter); + + if (igbvf_alloc_queues(adapter)) + return -ENOMEM; + + spin_lock_init(&adapter->tx_queue_lock); + + /* Explicitly disable IRQ since the NIC can be in any state. */ + igbvf_irq_disable(adapter); + + spin_lock_init(&adapter->stats_lock); + + set_bit(__IGBVF_DOWN, &adapter->state); + return 0; +} + +static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + adapter->stats.last_gprc = er32(VFGPRC); + adapter->stats.last_gorc = er32(VFGORC); + adapter->stats.last_gptc = er32(VFGPTC); + adapter->stats.last_gotc = er32(VFGOTC); + adapter->stats.last_mprc = er32(VFMPRC); + adapter->stats.last_gotlbc = er32(VFGOTLBC); + adapter->stats.last_gptlbc = er32(VFGPTLBC); + adapter->stats.last_gorlbc = er32(VFGORLBC); + adapter->stats.last_gprlbc = er32(VFGPRLBC); + + adapter->stats.base_gprc = er32(VFGPRC); + adapter->stats.base_gorc = er32(VFGORC); + adapter->stats.base_gptc = er32(VFGPTC); + adapter->stats.base_gotc = er32(VFGOTC); + adapter->stats.base_mprc = er32(VFMPRC); + adapter->stats.base_gotlbc = er32(VFGOTLBC); + adapter->stats.base_gptlbc = er32(VFGPTLBC); + adapter->stats.base_gorlbc = er32(VFGORLBC); + adapter->stats.base_gprlbc = er32(VFGPRLBC); +} + +/** + * igbvf_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int igbvf_open(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + /* disallow open during test */ + if (test_bit(__IGBVF_TESTING, &adapter->state)) + return -EBUSY; + + /* allocate transmit descriptors */ + err = igbvf_setup_tx_resources(adapter, adapter->tx_ring); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = igbvf_setup_rx_resources(adapter, adapter->rx_ring); + if (err) + goto err_setup_rx; + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + igbvf_configure(adapter); + + err = igbvf_request_irq(adapter); + if (err) + goto err_req_irq; + + /* From here on the code is the same as igbvf_up() */ + clear_bit(__IGBVF_DOWN, &adapter->state); + + napi_enable(&adapter->rx_ring->napi); + + /* clear any pending interrupts */ + er32(EICR); + + igbvf_irq_enable(adapter); + + /* start the watchdog */ + hw->mac.get_link_status = 1; + mod_timer(&adapter->watchdog_timer, jiffies + 1); + + return 0; + +err_req_irq: + igbvf_free_rx_resources(adapter->rx_ring); +err_setup_rx: + igbvf_free_tx_resources(adapter->tx_ring); +err_setup_tx: + igbvf_reset(adapter); + + return err; +} + +/** + * igbvf_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int igbvf_close(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); + igbvf_down(adapter); + + igbvf_free_irq(adapter); + + igbvf_free_tx_resources(adapter->tx_ring); + igbvf_free_rx_resources(adapter->rx_ring); + + return 0; +} + +/** + * igbvf_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int igbvf_set_mac(struct net_device *netdev, void *p) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + if (!ether_addr_equal(addr->sa_data, hw->mac.addr)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + + return 0; +} + +#define UPDATE_VF_COUNTER(reg, name) \ +{ \ + u32 current_counter = er32(reg); \ + if (current_counter < adapter->stats.last_##name) \ + adapter->stats.name += 0x100000000LL; \ + adapter->stats.last_##name = current_counter; \ + adapter->stats.name &= 0xFFFFFFFF00000000LL; \ + adapter->stats.name |= current_counter; \ +} + +/** + * igbvf_update_stats - Update the board statistics counters + * @adapter: board private structure +**/ +void igbvf_update_stats(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + + /* Prevent stats update while adapter is being reset, link is down + * or if the pci connection is down. + */ + if (adapter->link_speed == 0) + return; + + if (test_bit(__IGBVF_RESETTING, &adapter->state)) + return; + + if (pci_channel_offline(pdev)) + return; + + UPDATE_VF_COUNTER(VFGPRC, gprc); + UPDATE_VF_COUNTER(VFGORC, gorc); + UPDATE_VF_COUNTER(VFGPTC, gptc); + UPDATE_VF_COUNTER(VFGOTC, gotc); + UPDATE_VF_COUNTER(VFMPRC, mprc); + UPDATE_VF_COUNTER(VFGOTLBC, gotlbc); + UPDATE_VF_COUNTER(VFGPTLBC, gptlbc); + UPDATE_VF_COUNTER(VFGORLBC, gorlbc); + UPDATE_VF_COUNTER(VFGPRLBC, gprlbc); + + /* Fill out the OS statistics structure */ + adapter->net_stats.multicast = adapter->stats.mprc; +} + +static void igbvf_print_link_info(struct igbvf_adapter *adapter) +{ + dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n", + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half"); +} + +static bool igbvf_has_link(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 ret_val = E1000_SUCCESS; + bool link_active; + + /* If interface is down, stay link down */ + if (test_bit(__IGBVF_DOWN, &adapter->state)) + return false; + + ret_val = hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + + /* if check for link returns error we will need to reset */ + if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ))) + schedule_work(&adapter->reset_task); + + return link_active; +} + +/** + * igbvf_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void igbvf_watchdog(unsigned long data) +{ + struct igbvf_adapter *adapter = (struct igbvf_adapter *)data; + + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); +} + +static void igbvf_watchdog_task(struct work_struct *work) +{ + struct igbvf_adapter *adapter = container_of(work, + struct igbvf_adapter, + watchdog_task); + struct net_device *netdev = adapter->netdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + struct igbvf_ring *tx_ring = adapter->tx_ring; + struct e1000_hw *hw = &adapter->hw; + u32 link; + int tx_pending = 0; + + link = igbvf_has_link(adapter); + + if (link) { + if (!netif_carrier_ok(netdev)) { + mac->ops.get_link_up_info(&adapter->hw, + &adapter->link_speed, + &adapter->link_duplex); + igbvf_print_link_info(adapter); + + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + dev_info(&adapter->pdev->dev, "Link is Down\n"); + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + } + + if (netif_carrier_ok(netdev)) { + igbvf_update_stats(adapter); + } else { + tx_pending = (igbvf_desc_unused(tx_ring) + 1 < + tx_ring->count); + if (tx_pending) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + } + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + ew32(EICS, adapter->rx_ring->eims_value); + + /* Reset the timer */ + if (!test_bit(__IGBVF_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + (2 * HZ))); +} + +#define IGBVF_TX_FLAGS_CSUM 0x00000001 +#define IGBVF_TX_FLAGS_VLAN 0x00000002 +#define IGBVF_TX_FLAGS_TSO 0x00000004 +#define IGBVF_TX_FLAGS_IPV4 0x00000008 +#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGBVF_TX_FLAGS_VLAN_SHIFT 16 + +static int igbvf_tso(struct igbvf_adapter *adapter, + struct igbvf_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, + __be16 protocol) +{ + struct e1000_adv_tx_context_desc *context_desc; + struct igbvf_buffer *buffer_info; + u32 info = 0, tu_cmd = 0; + u32 mss_l4len_idx, l4len; + unsigned int i; + int err; + + *hdr_len = 0; + + err = skb_cow_head(skb, 0); + if (err < 0) { + dev_err(&adapter->pdev->dev, "igbvf_tso returning an error\n"); + return err; + } + + l4len = tcp_hdrlen(skb); + *hdr_len += l4len; + + if (protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } + + i = tx_ring->next_to_use; + + buffer_info = &tx_ring->buffer_info[i]; + context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); + /* VLAN MACLEN IPLEN */ + if (tx_flags & IGBVF_TX_FLAGS_VLAN) + info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); + info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); + *hdr_len += skb_network_offset(skb); + info |= (skb_transport_header(skb) - skb_network_header(skb)); + *hdr_len += (skb_transport_header(skb) - skb_network_header(skb)); + context_desc->vlan_macip_lens = cpu_to_le32(info); + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); + + if (protocol == htons(ETH_P_IP)) + tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + + context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); + + /* MSS L4LEN IDX */ + mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); + mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); + + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + context_desc->seqnum_seed = 0; + + buffer_info->time_stamp = jiffies; + buffer_info->dma = 0; + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + return true; +} + +static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, + struct igbvf_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags, + __be16 protocol) +{ + struct e1000_adv_tx_context_desc *context_desc; + unsigned int i; + struct igbvf_buffer *buffer_info; + u32 info = 0, tu_cmd = 0; + + if ((skb->ip_summed == CHECKSUM_PARTIAL) || + (tx_flags & IGBVF_TX_FLAGS_VLAN)) { + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); + + if (tx_flags & IGBVF_TX_FLAGS_VLAN) + info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); + + info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); + if (skb->ip_summed == CHECKSUM_PARTIAL) + info |= (skb_transport_header(skb) - + skb_network_header(skb)); + + context_desc->vlan_macip_lens = cpu_to_le32(info); + + tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + switch (protocol) { + case htons(ETH_P_IP): + tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + break; + case htons(ETH_P_IPV6): + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + break; + default: + break; + } + } + + context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); + context_desc->seqnum_seed = 0; + context_desc->mss_l4len_idx = 0; + + buffer_info->time_stamp = jiffies; + buffer_info->dma = 0; + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return true; + } + + return false; +} + +static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + /* there is enough descriptors then we don't need to worry */ + if (igbvf_desc_unused(adapter->tx_ring) >= size) + return 0; + + netif_stop_queue(netdev); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again just in case room has been made available */ + if (igbvf_desc_unused(adapter->tx_ring) < size) + return -EBUSY; + + netif_wake_queue(netdev); + + ++adapter->restart_queue; + return 0; +} + +#define IGBVF_MAX_TXD_PWR 16 +#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) + +static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, + struct igbvf_ring *tx_ring, + struct sk_buff *skb) +{ + struct igbvf_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned int len = skb_headlen(skb); + unsigned int count = 0, i; + unsigned int f; + + i = tx_ring->next_to_use; + + buffer_info = &tx_ring->buffer_info[i]; + BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); + buffer_info->length = len; + /* set time_stamp *before* dma to help avoid a possible race */ + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = false; + buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { + const struct skb_frag_struct *frag; + + count++; + i++; + if (i == tx_ring->count) + i = 0; + + frag = &skb_shinfo(skb)->frags[f]; + len = skb_frag_size(frag); + + buffer_info = &tx_ring->buffer_info[i]; + BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); + buffer_info->length = len; + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = true; + buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + } + + tx_ring->buffer_info[i].skb = skb; + + return ++count; + +dma_error: + dev_err(&pdev->dev, "TX DMA map failed\n"); + + /* clear timestamp and dma mappings for failed buffer_info mapping */ + buffer_info->dma = 0; + buffer_info->time_stamp = 0; + buffer_info->length = 0; + buffer_info->mapped_as_page = false; + if (count) + count--; + + /* clear timestamp and dma mappings for remaining portion of packet */ + while (count--) { + if (i == 0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + igbvf_put_txbuf(adapter, buffer_info); + } + + return 0; +} + +static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, + struct igbvf_ring *tx_ring, + int tx_flags, int count, + unsigned int first, u32 paylen, + u8 hdr_len) +{ + union e1000_adv_tx_desc *tx_desc = NULL; + struct igbvf_buffer *buffer_info; + u32 olinfo_status = 0, cmd_type_len; + unsigned int i; + + cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | + E1000_ADVTXD_DCMD_DEXT); + + if (tx_flags & IGBVF_TX_FLAGS_VLAN) + cmd_type_len |= E1000_ADVTXD_DCMD_VLE; + + if (tx_flags & IGBVF_TX_FLAGS_TSO) { + cmd_type_len |= E1000_ADVTXD_DCMD_TSE; + + /* insert tcp checksum */ + olinfo_status |= E1000_TXD_POPTS_TXSM << 8; + + /* insert ip checksum */ + if (tx_flags & IGBVF_TX_FLAGS_IPV4) + olinfo_status |= E1000_TXD_POPTS_IXSM << 8; + + } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) { + olinfo_status |= E1000_TXD_POPTS_TXSM << 8; + } + + olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); + + i = tx_ring->next_to_use; + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); + tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type_len | buffer_info->length); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + i++; + if (i == tx_ring->count) + i = 0; + } + + tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + tx_ring->buffer_info[first].next_to_watch = tx_desc; + tx_ring->next_to_use = i; + writel(i, adapter->hw.hw_addr + tx_ring->tail); + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); +} + +static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, + struct net_device *netdev, + struct igbvf_ring *tx_ring) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + unsigned int first, tx_flags = 0; + u8 hdr_len = 0; + int count = 0; + int tso = 0; + __be16 protocol = vlan_get_protocol(skb); + + if (test_bit(__IGBVF_DOWN, &adapter->state)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* need: count + 4 desc gap to keep tail from touching + * + 2 desc gap to keep tail from touching head, + * + 1 desc for skb->data, + * + 1 desc for context descriptor, + * head, otherwise try next time + */ + if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= IGBVF_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << + IGBVF_TX_FLAGS_VLAN_SHIFT); + } + + if (protocol == htons(ETH_P_IP)) + tx_flags |= IGBVF_TX_FLAGS_IPV4; + + first = tx_ring->next_to_use; + + tso = skb_is_gso(skb) ? + igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0; + if (unlikely(tso < 0)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (tso) + tx_flags |= IGBVF_TX_FLAGS_TSO; + else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) && + (skb->ip_summed == CHECKSUM_PARTIAL)) + tx_flags |= IGBVF_TX_FLAGS_CSUM; + + /* count reflects descriptors mapped, if 0 then mapping error + * has occurred and we need to rewind the descriptor queue + */ + count = igbvf_tx_map_adv(adapter, tx_ring, skb); + + if (count) { + igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count, + first, skb->len, hdr_len); + /* Make sure there is space in the ring for the next send. */ + igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4); + } else { + dev_kfree_skb_any(skb); + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct igbvf_ring *tx_ring; + + if (test_bit(__IGBVF_DOWN, &adapter->state)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + tx_ring = &adapter->tx_ring[0]; + + return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring); +} + +/** + * igbvf_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void igbvf_tx_timeout(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void igbvf_reset_task(struct work_struct *work) +{ + struct igbvf_adapter *adapter; + + adapter = container_of(work, struct igbvf_adapter, reset_task); + + igbvf_reinit_locked(adapter); +} + +/** + * igbvf_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ +static struct net_device_stats *igbvf_get_stats(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + /* only return the current stats */ + return &adapter->net_stats; +} + +/** + * igbvf_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + if (new_mtu < 68 || new_mtu > INT_MAX - ETH_HLEN - ETH_FCS_LEN || + max_frame > MAX_JUMBO_FRAME_SIZE) + return -EINVAL; + +#define MAX_STD_JUMBO_FRAME_SIZE 9234 + if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { + dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); + return -EINVAL; + } + + while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + /* igbvf_down has a dependency on max_frame_size */ + adapter->max_frame_size = max_frame; + if (netif_running(netdev)) + igbvf_down(adapter); + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * However with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ + + if (max_frame <= 1024) + adapter->rx_buffer_len = 1024; + else if (max_frame <= 2048) + adapter->rx_buffer_len = 2048; + else +#if (PAGE_SIZE / 2) > 16384 + adapter->rx_buffer_len = 16384; +#else + adapter->rx_buffer_len = PAGE_SIZE / 2; +#endif + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || + (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) + adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + + ETH_FCS_LEN; + + dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + igbvf_up(adapter); + else + igbvf_reset(adapter); + + clear_bit(__IGBVF_RESETTING, &adapter->state); + + return 0; +} + +static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + default: + return -EOPNOTSUPP; + } +} + +static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); + igbvf_down(adapter); + igbvf_free_irq(adapter); + } + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int igbvf_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); + u32 err; + + pci_restore_state(pdev); + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); + return err; + } + + pci_set_master(pdev); + + if (netif_running(netdev)) { + err = igbvf_request_irq(adapter); + if (err) + return err; + } + + igbvf_reset(adapter); + + if (netif_running(netdev)) + igbvf_up(adapter); + + netif_device_attach(netdev); + + return 0; +} +#endif + +static void igbvf_shutdown(struct pci_dev *pdev) +{ + igbvf_suspend(pdev, PMSG_SUSPEND); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void igbvf_netpoll(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + + igbvf_clean_tx_irq(adapter->tx_ring); + + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * igbvf_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + igbvf_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * igbvf_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the igbvf_resume routine. + */ +static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + igbvf_reset(adapter); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * igbvf_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the igbvf_resume routine. + */ +static void igbvf_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (igbvf_up(adapter)) { + dev_err(&pdev->dev, + "can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +static void igbvf_print_device_info(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + if (hw->mac.type == e1000_vfadapt_i350) + dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n"); + else + dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n"); + dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr); +} + +static int igbvf_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + if (features & NETIF_F_RXCSUM) + adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED; + else + adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED; + + return 0; +} + +static const struct net_device_ops igbvf_netdev_ops = { + .ndo_open = igbvf_open, + .ndo_stop = igbvf_close, + .ndo_start_xmit = igbvf_xmit_frame, + .ndo_get_stats = igbvf_get_stats, + .ndo_set_rx_mode = igbvf_set_multi, + .ndo_set_mac_address = igbvf_set_mac, + .ndo_change_mtu = igbvf_change_mtu, + .ndo_do_ioctl = igbvf_ioctl, + .ndo_tx_timeout = igbvf_tx_timeout, + .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = igbvf_netpoll, +#endif + .ndo_set_features = igbvf_set_features, +}; + +/** + * igbvf_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igbvf_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igbvf_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct igbvf_adapter *adapter; + struct e1000_hw *hw; + const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data]; + + static int cards_found; + int err, pci_using_dac; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + } + + err = pci_request_regions(pdev, igbvf_driver_name); + if (err) + goto err_pci_reg; + + pci_set_master(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct igbvf_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + hw = &adapter->hw; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->ei = ei; + adapter->pba = ei->pba; + adapter->flags = ei->flags; + adapter->hw.back = adapter; + adapter->hw.mac.type = ei->mac; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + /* PCI config space info */ + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + hw->revision_id = pdev->revision; + + err = -EIO; + adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + + if (!adapter->hw.hw_addr) + goto err_ioremap; + + if (ei->get_variants) { + err = ei->get_variants(adapter); + if (err) + goto err_get_variants; + } + + /* setup adapter struct */ + err = igbvf_sw_init(adapter); + if (err) + goto err_sw_init; + + /* construct the net_device struct */ + netdev->netdev_ops = &igbvf_netdev_ops; + + igbvf_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found++; + + netdev->hw_features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM; + + netdev->features = netdev->hw_features | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= NETIF_F_TSO; + netdev->vlan_features |= NETIF_F_TSO6; + netdev->vlan_features |= NETIF_F_IP_CSUM; + netdev->vlan_features |= NETIF_F_IPV6_CSUM; + netdev->vlan_features |= NETIF_F_SG; + + /*reset the controller to put the device in a known good state */ + err = hw->mac.ops.reset_hw(hw); + if (err) { + dev_info(&pdev->dev, + "PF still in reset state. Is the PF interface up?\n"); + } else { + err = hw->mac.ops.read_mac_addr(hw); + if (err) + dev_info(&pdev->dev, "Error reading MAC address.\n"); + else if (is_zero_ether_addr(adapter->hw.mac.addr)) + dev_info(&pdev->dev, + "MAC address not assigned by administrator.\n"); + memcpy(netdev->dev_addr, adapter->hw.mac.addr, + netdev->addr_len); + } + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_info(&pdev->dev, "Assigning random MAC address.\n"); + eth_hw_addr_random(netdev); + memcpy(adapter->hw.mac.addr, netdev->dev_addr, + netdev->addr_len); + } + + setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, + (unsigned long)adapter); + + INIT_WORK(&adapter->reset_task, igbvf_reset_task); + INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); + + /* ring size defaults */ + adapter->rx_ring->count = 1024; + adapter->tx_ring->count = 1024; + + /* reset the hardware with the new settings */ + igbvf_reset(adapter); + + /* set hardware-specific flags */ + if (adapter->hw.mac.type == e1000_vfadapt_i350) + adapter->flags |= IGBVF_FLAG_RX_LB_VLAN_BSWAP; + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_hw_init; + + /* tell the stack to leave us alone until igbvf_open() is called */ + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + igbvf_print_device_info(adapter); + + igbvf_initialize_last_counter_stats(adapter); + + return 0; + +err_hw_init: + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_sw_init: + igbvf_reset_interrupt_capability(adapter); +err_get_variants: + iounmap(adapter->hw.hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * igbvf_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * igbvf_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void igbvf_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* The watchdog timer may be rescheduled, so explicitly + * disable it from being rescheduled. + */ + set_bit(__IGBVF_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + + unregister_netdev(netdev); + + igbvf_reset_interrupt_capability(adapter); + + /* it is important to delete the NAPI struct prior to freeing the + * Rx ring so that you do not end up with null pointer refs + */ + netif_napi_del(&adapter->rx_ring->napi); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + iounmap(hw->hw_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_regions(pdev); + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +/* PCI Error Recovery (ERS) */ +static const struct pci_error_handlers igbvf_err_handler = { + .error_detected = igbvf_io_error_detected, + .slot_reset = igbvf_io_slot_reset, + .resume = igbvf_io_resume, +}; + +static const struct pci_device_id igbvf_pci_tbl[] = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf }, + { } /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); + +/* PCI Device API Driver */ +static struct pci_driver igbvf_driver = { + .name = igbvf_driver_name, + .id_table = igbvf_pci_tbl, + .probe = igbvf_probe, + .remove = igbvf_remove, +#ifdef CONFIG_PM + /* Power Management Hooks */ + .suspend = igbvf_suspend, + .resume = igbvf_resume, +#endif + .shutdown = igbvf_shutdown, + .err_handler = &igbvf_err_handler +}; + +/** + * igbvf_init_module - Driver Registration Routine + * + * igbvf_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init igbvf_init_module(void) +{ + int ret; + + pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version); + pr_info("%s\n", igbvf_copyright); + + ret = pci_register_driver(&igbvf_driver); + + return ret; +} +module_init(igbvf_init_module); + +/** + * igbvf_exit_module - Driver Exit Cleanup Routine + * + * igbvf_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit igbvf_exit_module(void) +{ + pci_unregister_driver(&igbvf_driver); +} +module_exit(igbvf_exit_module); + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/* netdev.c */ diff --git a/drivers/net/ethernet/intel/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h new file mode 100644 index 000000000..86a7c120b --- /dev/null +++ b/drivers/net/ethernet/intel/igbvf/regs.h @@ -0,0 +1,107 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 - 2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_REGS_H_ +#define _E1000_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ + +/* Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ + (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ + (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ + (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ + (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ + (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ + (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ + (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ + (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ + (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ + (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ + (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ + (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ + (0x0E028 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) +#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) + +/* Statistics registers */ +#define E1000_VFGPRC 0x00F10 +#define E1000_VFGORC 0x00F18 +#define E1000_VFMPRC 0x00F3C +#define E1000_VFGPTC 0x00F14 +#define E1000_VFGOTC 0x00F34 +#define E1000_VFGOTLBC 0x00F50 +#define E1000_VFGPTLBC 0x00F44 +#define E1000_VFGORLBC 0x00F48 +#define E1000_VFGPRLBC 0x00F40 + +/* These act per VF so an array friendly macro is used */ +#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) + +/* Define macros for handling registers */ +#define er32(reg) readl(hw->hw_addr + E1000_##reg) +#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg) +#define array_er32(reg, offset) \ + readl(hw->hw_addr + E1000_##reg + (offset << 2)) +#define array_ew32(reg, offset, val) \ + writel((val), hw->hw_addr + E1000_##reg + (offset << 2)) +#define e1e_flush() er32(STATUS) + +#endif diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c new file mode 100644 index 000000000..a13baa90a --- /dev/null +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -0,0 +1,399 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 - 2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "vf.h" + +static s32 e1000_check_for_link_vf(struct e1000_hw *hw); +static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +static s32 e1000_init_hw_vf(struct e1000_hw *hw); +static s32 e1000_reset_hw_vf(struct e1000_hw *hw); + +static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, + u32, u32, u32); +static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); +static s32 e1000_read_mac_addr_vf(struct e1000_hw *); +static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool); + +/** + * e1000_init_mac_params_vf - Inits MAC params + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_mac_params_vf(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + /* VF's have no MTA Registers - PF feature only */ + mac->mta_reg_count = 128; + /* VF's have no access to RAR entries */ + mac->rar_entry_count = 1; + + /* Function pointers */ + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_vf; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_vf; + /* check for link */ + mac->ops.check_for_link = e1000_check_for_link_vf; + /* link info */ + mac->ops.get_link_up_info = e1000_get_link_up_info_vf; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf; + /* set mac address */ + mac->ops.rar_set = e1000_rar_set_vf; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_vf; + /* set vlan filter table array */ + mac->ops.set_vfta = e1000_set_vfta_vf; + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_vf - Inits function pointers + * @hw: pointer to the HW structure + **/ +void e1000_init_function_pointers_vf(struct e1000_hw *hw) +{ + hw->mac.ops.init_params = e1000_init_mac_params_vf; + hw->mbx.ops.init_params = e1000_init_mbx_params_vf; +} + +/** + * e1000_get_link_up_info_vf - Gets link info. + * @hw: pointer to the HW structure + * @speed: pointer to 16 bit value to store link speed. + * @duplex: pointer to 16 bit value to store duplex. + * + * Since we cannot read the PHY and get accurate link info, we must rely upon + * the status register's data which is often stale and inaccurate. + **/ +static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 status; + + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) + *speed = SPEED_1000; + else if (status & E1000_STATUS_SPEED_100) + *speed = SPEED_100; + else + *speed = SPEED_10; + + if (status & E1000_STATUS_FD) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + return E1000_SUCCESS; +} + +/** + * e1000_reset_hw_vf - Resets the HW + * @hw: pointer to the HW structure + * + * VF's provide a function level reset. This is done using bit 26 of ctrl_reg. + * This is all the reset we can perform on a VF. + **/ +static s32 e1000_reset_hw_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 timeout = E1000_VF_INIT_TIMEOUT; + u32 ret_val = -E1000_ERR_MAC_INIT; + u32 msgbuf[3]; + u8 *addr = (u8 *)(&msgbuf[1]); + u32 ctrl; + + /* assert VF queue/interrupt reset */ + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_RST); + + /* we cannot initialize while the RSTI / RSTD bits are asserted */ + while (!mbx->ops.check_for_rst(hw) && timeout) { + timeout--; + udelay(5); + } + + if (timeout) { + /* mailbox timeout can now become active */ + mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT; + + /* notify PF of VF reset completion */ + msgbuf[0] = E1000_VF_RESET; + mbx->ops.write_posted(hw, msgbuf, 1); + + msleep(10); + + /* set our "perm_addr" based on info provided by PF */ + ret_val = mbx->ops.read_posted(hw, msgbuf, 3); + if (!ret_val) { + if (msgbuf[0] == (E1000_VF_RESET | + E1000_VT_MSGTYPE_ACK)) + memcpy(hw->mac.perm_addr, addr, ETH_ALEN); + else + ret_val = -E1000_ERR_MAC_INIT; + } + } + + return ret_val; +} + +/** + * e1000_init_hw_vf - Inits the HW + * @hw: pointer to the HW structure + * + * Not much to do here except clear the PF Reset indication if there is one. + **/ +static s32 e1000_init_hw_vf(struct e1000_hw *hw) +{ + /* attempt to set and restore our mac address */ + e1000_rar_set_vf(hw, hw->mac.addr, 0); + + return E1000_SUCCESS; +} + +/** + * e1000_hash_mc_addr_vf - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * e1000_mta_set_generic() + **/ +static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* The bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16)mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * e1000_update_mc_addr_list_vf - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * @rar_used_count: the first RAR register free to program + * @rar_count: total number of supported Receive Address Registers + * + * Updates the Receive Address Registers and Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + * The parameter rar_count will usually be hw->mac.rar_entry_count + * unless there are workarounds that change this. + **/ +static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[E1000_VFMAILBOX_SIZE]; + u16 *hash_list = (u16 *)&msgbuf[1]; + u32 hash_value; + u32 cnt, i; + + /* Each entry in the list uses 1 16 bit word. We have 30 + * 16 bit words available in our HW msg buffer (minus 1 for the + * msg type). That's 30 hash values if we pack 'em right. If + * there are more than 30 MC addresses to add then punt the + * extras for now and then add code to handle more than 30 later. + * It would be unusual for a server to request that many multi-cast + * addresses except for in large enterprise network environments. + */ + + cnt = (mc_addr_count > 30) ? 30 : mc_addr_count; + msgbuf[0] = E1000_VF_SET_MULTICAST; + msgbuf[0] |= cnt << E1000_VT_MSGINFO_SHIFT; + + for (i = 0; i < cnt; i++) { + hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list); + hash_list[i] = hash_value & 0x0FFFF; + mc_addr_list += ETH_ALEN; + } + + mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE); +} + +/** + * e1000_set_vfta_vf - Set/Unset vlan filter table address + * @hw: pointer to the HW structure + * @vid: determines the vfta register and bit to set/unset + * @set: if true then set bit, else clear bit + **/ +static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + s32 err; + + msgbuf[0] = E1000_VF_SET_VLAN; + msgbuf[1] = vid; + /* Setting the 8 bit field MSG INFO to true indicates "add" */ + if (set) + msgbuf[0] |= 1 << E1000_VT_MSGINFO_SHIFT; + + mbx->ops.write_posted(hw, msgbuf, 2); + + err = mbx->ops.read_posted(hw, msgbuf, 2); + + msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; + + /* if nacked the vlan was rejected */ + if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))) + err = -E1000_ERR_MAC_INIT; + + return err; +} + +/** + * e1000_rlpml_set_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + **/ +void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + + msgbuf[0] = E1000_VF_SET_LPE; + msgbuf[1] = max_size; + + mbx->ops.write_posted(hw, msgbuf, 2); +} + +/** + * e1000_rar_set_vf - set device MAC address + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + **/ +static void e1000_rar_set_vf(struct e1000_hw *hw, u8 *addr, u32 index) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + + memset(msgbuf, 0, 12); + msgbuf[0] = E1000_VF_SET_MAC_ADDR; + memcpy(msg_addr, addr, ETH_ALEN); + ret_val = mbx->ops.write_posted(hw, msgbuf, 3); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, msgbuf, 3); + + msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; + + /* if nacked the address was rejected, use "perm_addr" */ + if (!ret_val && + (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK))) + e1000_read_mac_addr_vf(hw); +} + +/** + * e1000_read_mac_addr_vf - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw) +{ + memcpy(hw->mac.addr, hw->mac.perm_addr, ETH_ALEN); + + return E1000_SUCCESS; +} + +/** + * e1000_check_for_link_vf - Check for link for a virtual interface + * @hw: pointer to the HW structure + * + * Checks to see if the underlying PF is still talking to the VF and + * if it is then it reports the link state to the hardware, otherwise + * it reports link down and returns an error. + **/ +static s32 e1000_check_for_link_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + u32 in_msg = 0; + + /* We only want to run this if there has been a rst asserted. + * in this case that could mean a link change, device reset, + * or a virtual function reset + */ + + /* If we were hit with a reset or timeout drop the link */ + if (!mbx->ops.check_for_rst(hw) || !mbx->timeout) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if PF is up */ + if (!(er32(STATUS) & E1000_STATUS_LU)) + goto out; + + /* if the read failed it could just be a mailbox collision, best wait + * until we are called again and don't report an error + */ + if (mbx->ops.read(hw, &in_msg, 1)) + goto out; + + /* if incoming message isn't clear to send we are waiting on response */ + if (!(in_msg & E1000_VT_MSGTYPE_CTS)) { + /* msg is not CTS and is NACK we must have lost CTS status */ + if (in_msg & E1000_VT_MSGTYPE_NACK) + ret_val = -E1000_ERR_MAC_INIT; + goto out; + } + + /* the PF is talking, if we timed out in the past we reinit */ + if (!mbx->timeout) { + ret_val = -E1000_ERR_MAC_INIT; + goto out; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link + */ + mac->get_link_status = false; + +out: + return ret_val; +} + diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h new file mode 100644 index 000000000..0f1eca639 --- /dev/null +++ b/drivers/net/ethernet/intel/igbvf/vf.h @@ -0,0 +1,263 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 - 2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_VF_H_ +#define _E1000_VF_H_ + +#include +#include +#include +#include + +#include "regs.h" +#include "defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82576_VF 0x10CA +#define E1000_DEV_ID_I350_VF 0x1520 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 + +/* Receive Address Register Count + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * These entries are also used for MAC-based filtering. + */ +#define E1000_RAR_ENTRIES_VF 1 + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + u64 pkt_addr; /* Packet buffer address */ + u64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + u32 data; + struct { + u16 pkt_info; /* RSS/Packet type */ + /* Split Header, hdr buffer length */ + u16 hdr_info; + } hs_rss; + } lo_dword; + union { + u32 rss; /* RSS Hash */ + struct { + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + u32 status_error; /* ext status/error */ + u16 length; /* Packet length */ + u16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 + +/* Transmit Descriptor - Advanced */ +union e1000_adv_tx_desc { + struct { + u64 buffer_addr; /* Address of descriptor's data buf */ + u32 cmd_type_len; + u32 olinfo_status; + } read; + struct { + u64 rsvd; /* Reserved */ + u32 nxtseq_seed; + u32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Context descriptors */ +struct e1000_adv_tx_context_desc { + u32 vlan_macip_lens; + u32 seqnum_seed; + u32 type_tucmd_mlhl; + u32 mss_l4len_idx; +}; + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_vfadapt, + e1000_vfadapt_i350, + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +struct e1000_vf_stats { + u64 base_gprc; + u64 base_gptc; + u64 base_gorc; + u64 base_gotc; + u64 base_mprc; + u64 base_gotlbc; + u64 base_gptlbc; + u64 base_gorlbc; + u64 base_gprlbc; + + u32 last_gprc; + u32 last_gptc; + u32 last_gorc; + u32 last_gotc; + u32 last_mprc; + u32 last_gotlbc; + u32 last_gptlbc; + u32 last_gorlbc; + u32 last_gprlbc; + + u64 gprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 mprc; + u64 gotlbc; + u64 gptlbc; + u64 gorlbc; + u64 gprlbc; +}; + +#include "mbx.h" + +struct e1000_mac_operations { + /* Function pointers for the MAC. */ + s32 (*init_params)(struct e1000_hw *); + s32 (*check_for_link)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + void (*mta_set)(struct e1000_hw *, u32); + void (*rar_set)(struct e1000_hw *, u8*, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*set_vfta)(struct e1000_hw *, u16, bool); +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[6]; + u8 perm_addr[6]; + + enum e1000_mac_type type; + + u16 mta_reg_count; + u16 rar_entry_count; + + bool get_link_status; +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *hw); + s32 (*read)(struct e1000_hw *, u32 *, u16); + s32 (*write)(struct e1000_hw *, u32 *, u16); + s32 (*read_posted)(struct e1000_hw *, u32 *, u16); + s32 (*write_posted)(struct e1000_hw *, u32 *, u16); + s32 (*check_for_msg)(struct e1000_hw *); + s32 (*check_for_ack)(struct e1000_hw *); + s32 (*check_for_rst)(struct e1000_hw *); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_vf { + u32 vf_number; + u32 v2p_mailbox; +}; + +struct e1000_hw { + void *back; + + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + unsigned long io_base; + + struct e1000_mac_info mac; + struct e1000_mbx_info mbx; + + union { + struct e1000_dev_spec_vf vf; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +/* These functions must be implemented by drivers */ +void e1000_rlpml_set_vf(struct e1000_hw *, u16); +void e1000_init_function_pointers_vf(struct e1000_hw *hw); + +#endif /* _E1000_VF_H_ */ diff --git a/drivers/net/ethernet/intel/ixgb/Makefile b/drivers/net/ethernet/intel/ixgb/Makefile new file mode 100644 index 000000000..0b20c5e62 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/Makefile @@ -0,0 +1,35 @@ +################################################################################ +# +# Intel PRO/10GbE Linux driver +# Copyright(c) 1999 - 2008 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# Linux NICS +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) PRO/10GbE ethernet driver +# + +obj-$(CONFIG_IXGB) += ixgb.o + +ixgb-objs := ixgb_main.o ixgb_hw.o ixgb_ee.o ixgb_ethtool.o ixgb_param.o diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h new file mode 100644 index 000000000..1180cd59b --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/ixgb.h @@ -0,0 +1,206 @@ +/******************************************************************************* + + Intel PRO/10GbE Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGB_H_ +#define _IXGB_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define BAR_0 0 +#define BAR_1 1 +#define BAR_5 5 + +struct ixgb_adapter; +#include "ixgb_hw.h" +#include "ixgb_ee.h" +#include "ixgb_ids.h" + +/* TX/RX descriptor defines */ +#define DEFAULT_TXD 256 +#define MAX_TXD 4096 +#define MIN_TXD 64 + +/* hardware cannot reliably support more than 512 descriptors owned by + * hardware descriptor cache otherwise an unreliable ring under heavy + * receive load may result */ +#define DEFAULT_RXD 512 +#define MAX_RXD 512 +#define MIN_RXD 64 + +/* Supported Rx Buffer Sizes */ +#define IXGB_RXBUFFER_2048 2048 +#define IXGB_RXBUFFER_4096 4096 +#define IXGB_RXBUFFER_8192 8192 +#define IXGB_RXBUFFER_16384 16384 + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IXGB_RX_BUFFER_WRITE 8 /* Must be power of 2 */ + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct ixgb_buffer { + struct sk_buff *skb; + dma_addr_t dma; + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + u16 mapped_as_page; +}; + +struct ixgb_desc_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct ixgb_buffer *buffer_info; +}; + +#define IXGB_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +#define IXGB_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define IXGB_RX_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_rx_desc) +#define IXGB_TX_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_tx_desc) +#define IXGB_CONTEXT_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_context_desc) + +/* board specific private data structure */ + +struct ixgb_adapter { + struct timer_list watchdog_timer; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u32 bd_number; + u32 rx_buffer_len; + u32 part_num; + u16 link_speed; + u16 link_duplex; + struct work_struct tx_timeout_task; + + /* TX */ + struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp; + unsigned int restart_queue; + unsigned long timeo_start; + u32 tx_cmd_type; + u64 hw_csum_tx_good; + u64 hw_csum_tx_error; + u32 tx_int_delay; + u32 tx_timeout_count; + bool tx_int_delay_enable; + bool detect_tx_hung; + + /* RX */ + struct ixgb_desc_ring rx_ring; + u64 hw_csum_rx_error; + u64 hw_csum_rx_good; + u32 rx_int_delay; + bool rx_csum; + + /* OS defined structs */ + struct napi_struct napi; + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in ixgb_hw.h */ + struct ixgb_hw hw; + u16 msg_enable; + struct ixgb_hw_stats stats; + u32 alloc_rx_buff_failed; + bool have_msi; + unsigned long flags; +}; + +enum ixgb_state_t { + /* TBD + __IXGB_TESTING, + __IXGB_RESETTING, + */ + __IXGB_DOWN +}; + +/* Exported from other modules */ +void ixgb_check_options(struct ixgb_adapter *adapter); +void ixgb_set_ethtool_ops(struct net_device *netdev); +extern char ixgb_driver_name[]; +extern const char ixgb_driver_version[]; + +void ixgb_set_speed_duplex(struct net_device *netdev); + +int ixgb_up(struct ixgb_adapter *adapter); +void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog); +void ixgb_reset(struct ixgb_adapter *adapter); +int ixgb_setup_rx_resources(struct ixgb_adapter *adapter); +int ixgb_setup_tx_resources(struct ixgb_adapter *adapter); +void ixgb_free_rx_resources(struct ixgb_adapter *adapter); +void ixgb_free_tx_resources(struct ixgb_adapter *adapter); +void ixgb_update_stats(struct ixgb_adapter *adapter); + + +#endif /* _IXGB_H_ */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c new file mode 100644 index 000000000..eca216b9b --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c @@ -0,0 +1,605 @@ +/******************************************************************************* + + Intel PRO/10GbE Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "ixgb_hw.h" +#include "ixgb_ee.h" +/* Local prototypes */ +static u16 ixgb_shift_in_bits(struct ixgb_hw *hw); + +static void ixgb_shift_out_bits(struct ixgb_hw *hw, + u16 data, + u16 count); +static void ixgb_standby_eeprom(struct ixgb_hw *hw); + +static bool ixgb_wait_eeprom_command(struct ixgb_hw *hw); + +static void ixgb_cleanup_eeprom(struct ixgb_hw *hw); + +/****************************************************************************** + * Raises the EEPROM's clock input. + * + * hw - Struct containing variables accessed by shared code + * eecd_reg - EECD's current value + *****************************************************************************/ +static void +ixgb_raise_clock(struct ixgb_hw *hw, + u32 *eecd_reg) +{ + /* Raise the clock input to the EEPROM (by setting the SK bit), and then + * wait 50 microseconds. + */ + *eecd_reg = *eecd_reg | IXGB_EECD_SK; + IXGB_WRITE_REG(hw, EECD, *eecd_reg); + IXGB_WRITE_FLUSH(hw); + udelay(50); +} + +/****************************************************************************** + * Lowers the EEPROM's clock input. + * + * hw - Struct containing variables accessed by shared code + * eecd_reg - EECD's current value + *****************************************************************************/ +static void +ixgb_lower_clock(struct ixgb_hw *hw, + u32 *eecd_reg) +{ + /* Lower the clock input to the EEPROM (by clearing the SK bit), and then + * wait 50 microseconds. + */ + *eecd_reg = *eecd_reg & ~IXGB_EECD_SK; + IXGB_WRITE_REG(hw, EECD, *eecd_reg); + IXGB_WRITE_FLUSH(hw); + udelay(50); +} + +/****************************************************************************** + * Shift data bits out to the EEPROM. + * + * hw - Struct containing variables accessed by shared code + * data - data to send to the EEPROM + * count - number of bits to shift out + *****************************************************************************/ +static void +ixgb_shift_out_bits(struct ixgb_hw *hw, + u16 data, + u16 count) +{ + u32 eecd_reg; + u32 mask; + + /* We need to shift "count" bits out to the EEPROM. So, value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + */ + mask = 0x01 << (count - 1); + eecd_reg = IXGB_READ_REG(hw, EECD); + eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI); + do { + /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1", + * and then raising and then lowering the clock (the SK bit controls + * the clock input to the EEPROM). A "0" is shifted out to the EEPROM + * by setting "DI" to "0" and then raising and then lowering the clock. + */ + eecd_reg &= ~IXGB_EECD_DI; + + if (data & mask) + eecd_reg |= IXGB_EECD_DI; + + IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); + + udelay(50); + + ixgb_raise_clock(hw, &eecd_reg); + ixgb_lower_clock(hw, &eecd_reg); + + mask = mask >> 1; + + } while (mask); + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eecd_reg &= ~IXGB_EECD_DI; + IXGB_WRITE_REG(hw, EECD, eecd_reg); +} + +/****************************************************************************** + * Shift data bits in from the EEPROM + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static u16 +ixgb_shift_in_bits(struct ixgb_hw *hw) +{ + u32 eecd_reg; + u32 i; + u16 data; + + /* In order to read a register from the EEPROM, we need to shift 16 bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the "DO" + * bit. During this "shifting in" process the "DI" bit should always be + * clear.. + */ + + eecd_reg = IXGB_READ_REG(hw, EECD); + + eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI); + data = 0; + + for (i = 0; i < 16; i++) { + data = data << 1; + ixgb_raise_clock(hw, &eecd_reg); + + eecd_reg = IXGB_READ_REG(hw, EECD); + + eecd_reg &= ~(IXGB_EECD_DI); + if (eecd_reg & IXGB_EECD_DO) + data |= 1; + + ixgb_lower_clock(hw, &eecd_reg); + } + + return data; +} + +/****************************************************************************** + * Prepares EEPROM for access + * + * hw - Struct containing variables accessed by shared code + * + * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This + * function should be called before issuing a command to the EEPROM. + *****************************************************************************/ +static void +ixgb_setup_eeprom(struct ixgb_hw *hw) +{ + u32 eecd_reg; + + eecd_reg = IXGB_READ_REG(hw, EECD); + + /* Clear SK and DI */ + eecd_reg &= ~(IXGB_EECD_SK | IXGB_EECD_DI); + IXGB_WRITE_REG(hw, EECD, eecd_reg); + + /* Set CS */ + eecd_reg |= IXGB_EECD_CS; + IXGB_WRITE_REG(hw, EECD, eecd_reg); +} + +/****************************************************************************** + * Returns EEPROM to a "standby" state + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static void +ixgb_standby_eeprom(struct ixgb_hw *hw) +{ + u32 eecd_reg; + + eecd_reg = IXGB_READ_REG(hw, EECD); + + /* Deselect EEPROM */ + eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK); + IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); + udelay(50); + + /* Clock high */ + eecd_reg |= IXGB_EECD_SK; + IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); + udelay(50); + + /* Select EEPROM */ + eecd_reg |= IXGB_EECD_CS; + IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); + udelay(50); + + /* Clock low */ + eecd_reg &= ~IXGB_EECD_SK; + IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); + udelay(50); +} + +/****************************************************************************** + * Raises then lowers the EEPROM's clock pin + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static void +ixgb_clock_eeprom(struct ixgb_hw *hw) +{ + u32 eecd_reg; + + eecd_reg = IXGB_READ_REG(hw, EECD); + + /* Rising edge of clock */ + eecd_reg |= IXGB_EECD_SK; + IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); + udelay(50); + + /* Falling edge of clock */ + eecd_reg &= ~IXGB_EECD_SK; + IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); + udelay(50); +} + +/****************************************************************************** + * Terminates a command by lowering the EEPROM's chip select pin + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static void +ixgb_cleanup_eeprom(struct ixgb_hw *hw) +{ + u32 eecd_reg; + + eecd_reg = IXGB_READ_REG(hw, EECD); + + eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_DI); + + IXGB_WRITE_REG(hw, EECD, eecd_reg); + + ixgb_clock_eeprom(hw); +} + +/****************************************************************************** + * Waits for the EEPROM to finish the current command. + * + * hw - Struct containing variables accessed by shared code + * + * The command is done when the EEPROM's data out pin goes high. + * + * Returns: + * true: EEPROM data pin is high before timeout. + * false: Time expired. + *****************************************************************************/ +static bool +ixgb_wait_eeprom_command(struct ixgb_hw *hw) +{ + u32 eecd_reg; + u32 i; + + /* Toggle the CS line. This in effect tells to EEPROM to actually execute + * the command in question. + */ + ixgb_standby_eeprom(hw); + + /* Now read DO repeatedly until is high (equal to '1'). The EEPROM will + * signal that the command has been completed by raising the DO signal. + * If DO does not go high in 10 milliseconds, then error out. + */ + for (i = 0; i < 200; i++) { + eecd_reg = IXGB_READ_REG(hw, EECD); + + if (eecd_reg & IXGB_EECD_DO) + return true; + + udelay(50); + } + ASSERT(0); + return false; +} + +/****************************************************************************** + * Verifies that the EEPROM has a valid checksum + * + * hw - Struct containing variables accessed by shared code + * + * Reads the first 64 16 bit words of the EEPROM and sums the values read. + * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is + * valid. + * + * Returns: + * true: Checksum is valid + * false: Checksum is not valid. + *****************************************************************************/ +bool +ixgb_validate_eeprom_checksum(struct ixgb_hw *hw) +{ + u16 checksum = 0; + u16 i; + + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) + checksum += ixgb_read_eeprom(hw, i); + + if (checksum == (u16) EEPROM_SUM) + return true; + else + return false; +} + +/****************************************************************************** + * Calculates the EEPROM checksum and writes it to the EEPROM + * + * hw - Struct containing variables accessed by shared code + * + * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. + * Writes the difference to word offset 63 of the EEPROM. + *****************************************************************************/ +void +ixgb_update_eeprom_checksum(struct ixgb_hw *hw) +{ + u16 checksum = 0; + u16 i; + + for (i = 0; i < EEPROM_CHECKSUM_REG; i++) + checksum += ixgb_read_eeprom(hw, i); + + checksum = (u16) EEPROM_SUM - checksum; + + ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum); +} + +/****************************************************************************** + * Writes a 16 bit word to a given offset in the EEPROM. + * + * hw - Struct containing variables accessed by shared code + * reg - offset within the EEPROM to be written to + * data - 16 bit word to be written to the EEPROM + * + * If ixgb_update_eeprom_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + * + *****************************************************************************/ +void +ixgb_write_eeprom(struct ixgb_hw *hw, u16 offset, u16 data) +{ + struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; + + /* Prepare the EEPROM for writing */ + ixgb_setup_eeprom(hw); + + /* Send the 9-bit EWEN (write enable) command to the EEPROM (5-bit opcode + * plus 4-bit dummy). This puts the EEPROM into write/erase mode. + */ + ixgb_shift_out_bits(hw, EEPROM_EWEN_OPCODE, 5); + ixgb_shift_out_bits(hw, 0, 4); + + /* Prepare the EEPROM */ + ixgb_standby_eeprom(hw); + + /* Send the Write command (3-bit opcode + 6-bit addr) */ + ixgb_shift_out_bits(hw, EEPROM_WRITE_OPCODE, 3); + ixgb_shift_out_bits(hw, offset, 6); + + /* Send the data */ + ixgb_shift_out_bits(hw, data, 16); + + ixgb_wait_eeprom_command(hw); + + /* Recover from write */ + ixgb_standby_eeprom(hw); + + /* Send the 9-bit EWDS (write disable) command to the EEPROM (5-bit + * opcode plus 4-bit dummy). This takes the EEPROM out of write/erase + * mode. + */ + ixgb_shift_out_bits(hw, EEPROM_EWDS_OPCODE, 5); + ixgb_shift_out_bits(hw, 0, 4); + + /* Done with writing */ + ixgb_cleanup_eeprom(hw); + + /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */ + ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR); +} + +/****************************************************************************** + * Reads a 16 bit word from the EEPROM. + * + * hw - Struct containing variables accessed by shared code + * offset - offset of 16 bit word in the EEPROM to read + * + * Returns: + * The 16-bit value read from the eeprom + *****************************************************************************/ +u16 +ixgb_read_eeprom(struct ixgb_hw *hw, + u16 offset) +{ + u16 data; + + /* Prepare the EEPROM for reading */ + ixgb_setup_eeprom(hw); + + /* Send the READ command (opcode + addr) */ + ixgb_shift_out_bits(hw, EEPROM_READ_OPCODE, 3); + /* + * We have a 64 word EEPROM, there are 6 address bits + */ + ixgb_shift_out_bits(hw, offset, 6); + + /* Read the data */ + data = ixgb_shift_in_bits(hw); + + /* End this read operation */ + ixgb_standby_eeprom(hw); + + return data; +} + +/****************************************************************************** + * Reads eeprom and stores data in shared structure. + * Validates eeprom checksum and eeprom signature. + * + * hw - Struct containing variables accessed by shared code + * + * Returns: + * true: if eeprom read is successful + * false: otherwise. + *****************************************************************************/ +bool +ixgb_get_eeprom_data(struct ixgb_hw *hw) +{ + u16 i; + u16 checksum = 0; + struct ixgb_ee_map_type *ee_map; + + ENTER(); + + ee_map = (struct ixgb_ee_map_type *)hw->eeprom; + + pr_debug("Reading eeprom data\n"); + for (i = 0; i < IXGB_EEPROM_SIZE ; i++) { + u16 ee_data; + ee_data = ixgb_read_eeprom(hw, i); + checksum += ee_data; + hw->eeprom[i] = cpu_to_le16(ee_data); + } + + if (checksum != (u16) EEPROM_SUM) { + pr_debug("Checksum invalid\n"); + /* clear the init_ctrl_reg_1 to signify that the cache is + * invalidated */ + ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR); + return false; + } + + if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) + != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { + pr_debug("Signature invalid\n"); + return false; + } + + return true; +} + +/****************************************************************************** + * Local function to check if the eeprom signature is good + * If the eeprom signature is good, calls ixgb)get_eeprom_data. + * + * hw - Struct containing variables accessed by shared code + * + * Returns: + * true: eeprom signature was good and the eeprom read was successful + * false: otherwise. + ******************************************************************************/ +static bool +ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw) +{ + struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; + + if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) + == cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { + return true; + } else { + return ixgb_get_eeprom_data(hw); + } +} + +/****************************************************************************** + * return a word from the eeprom + * + * hw - Struct containing variables accessed by shared code + * index - Offset of eeprom word + * + * Returns: + * Word at indexed offset in eeprom, if valid, 0 otherwise. + ******************************************************************************/ +__le16 +ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index) +{ + + if (index < IXGB_EEPROM_SIZE && ixgb_check_and_get_eeprom_data(hw)) + return hw->eeprom[index]; + + return 0; +} + +/****************************************************************************** + * return the mac address from EEPROM + * + * hw - Struct containing variables accessed by shared code + * mac_addr - Ethernet Address if EEPROM contents are valid, 0 otherwise + * + * Returns: None. + ******************************************************************************/ +void +ixgb_get_ee_mac_addr(struct ixgb_hw *hw, + u8 *mac_addr) +{ + int i; + struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; + + ENTER(); + + if (ixgb_check_and_get_eeprom_data(hw)) { + for (i = 0; i < ETH_ALEN; i++) { + mac_addr[i] = ee_map->mac_addr[i]; + } + pr_debug("eeprom mac address = %pM\n", mac_addr); + } +} + + +/****************************************************************************** + * return the Printed Board Assembly number from EEPROM + * + * hw - Struct containing variables accessed by shared code + * + * Returns: + * PBA number if EEPROM contents are valid, 0 otherwise + ******************************************************************************/ +u32 +ixgb_get_ee_pba_number(struct ixgb_hw *hw) +{ + if (ixgb_check_and_get_eeprom_data(hw)) + return le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG]) + | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16); + + return 0; +} + + +/****************************************************************************** + * return the Device Id from EEPROM + * + * hw - Struct containing variables accessed by shared code + * + * Returns: + * Device Id if EEPROM contents are valid, 0 otherwise + ******************************************************************************/ +u16 +ixgb_get_ee_device_id(struct ixgb_hw *hw) +{ + struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; + + if (ixgb_check_and_get_eeprom_data(hw)) + return le16_to_cpu(ee_map->device_id); + + return 0; +} + diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.h b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h new file mode 100644 index 000000000..5680f6431 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h @@ -0,0 +1,104 @@ +/******************************************************************************* + + Intel PRO/10GbE Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGB_EE_H_ +#define _IXGB_EE_H_ + +#define IXGB_EEPROM_SIZE 64 /* Size in words */ + +/* EEPROM Commands */ +#define EEPROM_READ_OPCODE 0x6 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE 0x5 /* EEPROM write opcode */ +#define EEPROM_ERASE_OPCODE 0x7 /* EEPROM erase opcode */ +#define EEPROM_EWEN_OPCODE 0x13 /* EEPROM erase/write enable */ +#define EEPROM_EWDS_OPCODE 0x10 /* EEPROM erase/write disable */ + +/* EEPROM MAP (Word Offsets) */ +#define EEPROM_IA_1_2_REG 0x0000 +#define EEPROM_IA_3_4_REG 0x0001 +#define EEPROM_IA_5_6_REG 0x0002 +#define EEPROM_COMPATIBILITY_REG 0x0003 +#define EEPROM_PBA_1_2_REG 0x0008 +#define EEPROM_PBA_3_4_REG 0x0009 +#define EEPROM_INIT_CONTROL1_REG 0x000A +#define EEPROM_SUBSYS_ID_REG 0x000B +#define EEPROM_SUBVEND_ID_REG 0x000C +#define EEPROM_DEVICE_ID_REG 0x000D +#define EEPROM_VENDOR_ID_REG 0x000E +#define EEPROM_INIT_CONTROL2_REG 0x000F +#define EEPROM_SWDPINS_REG 0x0020 +#define EEPROM_CIRCUIT_CTRL_REG 0x0021 +#define EEPROM_D0_D3_POWER_REG 0x0022 +#define EEPROM_FLASH_VERSION 0x0032 +#define EEPROM_CHECKSUM_REG 0x003F + +/* Mask bits for fields in Word 0x0a of the EEPROM */ + +#define EEPROM_ICW1_SIGNATURE_MASK 0xC000 +#define EEPROM_ICW1_SIGNATURE_VALID 0x4000 +#define EEPROM_ICW1_SIGNATURE_CLEAR 0x0000 + +/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ +#define EEPROM_SUM 0xBABA + +/* EEPROM Map Sizes (Byte Counts) */ +#define PBA_SIZE 4 + +/* EEPROM Map defines (WORD OFFSETS)*/ + +/* EEPROM structure */ +struct ixgb_ee_map_type { + u8 mac_addr[ETH_ALEN]; + __le16 compatibility; + __le16 reserved1[4]; + __le32 pba_number; + __le16 init_ctrl_reg_1; + __le16 subsystem_id; + __le16 subvendor_id; + __le16 device_id; + __le16 vendor_id; + __le16 init_ctrl_reg_2; + __le16 oem_reserved[16]; + __le16 swdpins_reg; + __le16 circuit_ctrl_reg; + u8 d3_power; + u8 d0_power; + __le16 reserved2[28]; + __le16 checksum; +}; + +/* EEPROM Functions */ +u16 ixgb_read_eeprom(struct ixgb_hw *hw, u16 reg); + +bool ixgb_validate_eeprom_checksum(struct ixgb_hw *hw); + +void ixgb_update_eeprom_checksum(struct ixgb_hw *hw); + +void ixgb_write_eeprom(struct ixgb_hw *hw, u16 reg, u16 data); + +#endif /* IXGB_EE_H */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c new file mode 100644 index 000000000..b311e9e71 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c @@ -0,0 +1,660 @@ +/******************************************************************************* + + Intel PRO/10GbE Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for ixgb */ + +#include "ixgb.h" + +#include + +#define IXGB_ALL_RAR_ENTRIES 16 + +enum {NETDEV_STATS, IXGB_STATS}; + +struct ixgb_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define IXGB_STAT(m) IXGB_STATS, \ + FIELD_SIZEOF(struct ixgb_adapter, m), \ + offsetof(struct ixgb_adapter, m) +#define IXGB_NETDEV_STAT(m) NETDEV_STATS, \ + FIELD_SIZEOF(struct net_device, m), \ + offsetof(struct net_device, m) + +static struct ixgb_stats ixgb_gstrings_stats[] = { + {"rx_packets", IXGB_NETDEV_STAT(stats.rx_packets)}, + {"tx_packets", IXGB_NETDEV_STAT(stats.tx_packets)}, + {"rx_bytes", IXGB_NETDEV_STAT(stats.rx_bytes)}, + {"tx_bytes", IXGB_NETDEV_STAT(stats.tx_bytes)}, + {"rx_errors", IXGB_NETDEV_STAT(stats.rx_errors)}, + {"tx_errors", IXGB_NETDEV_STAT(stats.tx_errors)}, + {"rx_dropped", IXGB_NETDEV_STAT(stats.rx_dropped)}, + {"tx_dropped", IXGB_NETDEV_STAT(stats.tx_dropped)}, + {"multicast", IXGB_NETDEV_STAT(stats.multicast)}, + {"collisions", IXGB_NETDEV_STAT(stats.collisions)}, + +/* { "rx_length_errors", IXGB_NETDEV_STAT(stats.rx_length_errors) }, */ + {"rx_over_errors", IXGB_NETDEV_STAT(stats.rx_over_errors)}, + {"rx_crc_errors", IXGB_NETDEV_STAT(stats.rx_crc_errors)}, + {"rx_frame_errors", IXGB_NETDEV_STAT(stats.rx_frame_errors)}, + {"rx_no_buffer_count", IXGB_STAT(stats.rnbc)}, + {"rx_fifo_errors", IXGB_NETDEV_STAT(stats.rx_fifo_errors)}, + {"rx_missed_errors", IXGB_NETDEV_STAT(stats.rx_missed_errors)}, + {"tx_aborted_errors", IXGB_NETDEV_STAT(stats.tx_aborted_errors)}, + {"tx_carrier_errors", IXGB_NETDEV_STAT(stats.tx_carrier_errors)}, + {"tx_fifo_errors", IXGB_NETDEV_STAT(stats.tx_fifo_errors)}, + {"tx_heartbeat_errors", IXGB_NETDEV_STAT(stats.tx_heartbeat_errors)}, + {"tx_window_errors", IXGB_NETDEV_STAT(stats.tx_window_errors)}, + {"tx_deferred_ok", IXGB_STAT(stats.dc)}, + {"tx_timeout_count", IXGB_STAT(tx_timeout_count) }, + {"tx_restart_queue", IXGB_STAT(restart_queue) }, + {"rx_long_length_errors", IXGB_STAT(stats.roc)}, + {"rx_short_length_errors", IXGB_STAT(stats.ruc)}, + {"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)}, + {"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)}, + {"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)}, + {"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)}, + {"tx_flow_control_xon", IXGB_STAT(stats.xontxc)}, + {"tx_flow_control_xoff", IXGB_STAT(stats.xofftxc)}, + {"rx_csum_offload_good", IXGB_STAT(hw_csum_rx_good)}, + {"rx_csum_offload_errors", IXGB_STAT(hw_csum_rx_error)}, + {"tx_csum_offload_good", IXGB_STAT(hw_csum_tx_good)}, + {"tx_csum_offload_errors", IXGB_STAT(hw_csum_tx_error)} +}; + +#define IXGB_STATS_LEN ARRAY_SIZE(ixgb_gstrings_stats) + +static int +ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + + ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + + if (netif_carrier_ok(adapter->netdev)) { + ethtool_cmd_speed_set(ecmd, SPEED_10000); + ecmd->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } + + ecmd->autoneg = AUTONEG_DISABLE; + return 0; +} + +void ixgb_set_speed_duplex(struct net_device *netdev) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + /* be optimistic about our link, since we were up before */ + adapter->link_speed = 10000; + adapter->link_duplex = FULL_DUPLEX; + netif_carrier_on(netdev); + netif_wake_queue(netdev); +} + +static int +ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + u32 speed = ethtool_cmd_speed(ecmd); + + if (ecmd->autoneg == AUTONEG_ENABLE || + (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) + return -EINVAL; + + if (netif_running(adapter->netdev)) { + ixgb_down(adapter, true); + ixgb_reset(adapter); + ixgb_up(adapter); + ixgb_set_speed_duplex(netdev); + } else + ixgb_reset(adapter); + + return 0; +} + +static void +ixgb_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_hw *hw = &adapter->hw; + + pause->autoneg = AUTONEG_DISABLE; + + if (hw->fc.type == ixgb_fc_rx_pause) + pause->rx_pause = 1; + else if (hw->fc.type == ixgb_fc_tx_pause) + pause->tx_pause = 1; + else if (hw->fc.type == ixgb_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int +ixgb_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_hw *hw = &adapter->hw; + + if (pause->autoneg == AUTONEG_ENABLE) + return -EINVAL; + + if (pause->rx_pause && pause->tx_pause) + hw->fc.type = ixgb_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.type = ixgb_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.type = ixgb_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.type = ixgb_fc_none; + + if (netif_running(adapter->netdev)) { + ixgb_down(adapter, true); + ixgb_up(adapter); + ixgb_set_speed_duplex(netdev); + } else + ixgb_reset(adapter); + + return 0; +} + +static u32 +ixgb_get_msglevel(struct net_device *netdev) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void +ixgb_set_msglevel(struct net_device *netdev, u32 data) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} +#define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_ + +static int +ixgb_get_regs_len(struct net_device *netdev) +{ +#define IXGB_REG_DUMP_LEN 136*sizeof(u32) + return IXGB_REG_DUMP_LEN; +} + +static void +ixgb_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_hw *hw = &adapter->hw; + u32 *reg = p; + u32 *reg_start = reg; + u8 i; + + /* the 1 (one) below indicates an attempt at versioning, if the + * interface in ethtool or the driver changes, this 1 should be + * incremented */ + regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id; + + /* General Registers */ + *reg++ = IXGB_READ_REG(hw, CTRL0); /* 0 */ + *reg++ = IXGB_READ_REG(hw, CTRL1); /* 1 */ + *reg++ = IXGB_READ_REG(hw, STATUS); /* 2 */ + *reg++ = IXGB_READ_REG(hw, EECD); /* 3 */ + *reg++ = IXGB_READ_REG(hw, MFS); /* 4 */ + + /* Interrupt */ + *reg++ = IXGB_READ_REG(hw, ICR); /* 5 */ + *reg++ = IXGB_READ_REG(hw, ICS); /* 6 */ + *reg++ = IXGB_READ_REG(hw, IMS); /* 7 */ + *reg++ = IXGB_READ_REG(hw, IMC); /* 8 */ + + /* Receive */ + *reg++ = IXGB_READ_REG(hw, RCTL); /* 9 */ + *reg++ = IXGB_READ_REG(hw, FCRTL); /* 10 */ + *reg++ = IXGB_READ_REG(hw, FCRTH); /* 11 */ + *reg++ = IXGB_READ_REG(hw, RDBAL); /* 12 */ + *reg++ = IXGB_READ_REG(hw, RDBAH); /* 13 */ + *reg++ = IXGB_READ_REG(hw, RDLEN); /* 14 */ + *reg++ = IXGB_READ_REG(hw, RDH); /* 15 */ + *reg++ = IXGB_READ_REG(hw, RDT); /* 16 */ + *reg++ = IXGB_READ_REG(hw, RDTR); /* 17 */ + *reg++ = IXGB_READ_REG(hw, RXDCTL); /* 18 */ + *reg++ = IXGB_READ_REG(hw, RAIDC); /* 19 */ + *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */ + + /* there are 16 RAR entries in hardware, we only use 3 */ + for (i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) { + *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */ + *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */ + } + + /* Transmit */ + *reg++ = IXGB_READ_REG(hw, TCTL); /* 53 */ + *reg++ = IXGB_READ_REG(hw, TDBAL); /* 54 */ + *reg++ = IXGB_READ_REG(hw, TDBAH); /* 55 */ + *reg++ = IXGB_READ_REG(hw, TDLEN); /* 56 */ + *reg++ = IXGB_READ_REG(hw, TDH); /* 57 */ + *reg++ = IXGB_READ_REG(hw, TDT); /* 58 */ + *reg++ = IXGB_READ_REG(hw, TIDV); /* 59 */ + *reg++ = IXGB_READ_REG(hw, TXDCTL); /* 60 */ + *reg++ = IXGB_READ_REG(hw, TSPMT); /* 61 */ + *reg++ = IXGB_READ_REG(hw, PAP); /* 62 */ + + /* Physical */ + *reg++ = IXGB_READ_REG(hw, PCSC1); /* 63 */ + *reg++ = IXGB_READ_REG(hw, PCSC2); /* 64 */ + *reg++ = IXGB_READ_REG(hw, PCSS1); /* 65 */ + *reg++ = IXGB_READ_REG(hw, PCSS2); /* 66 */ + *reg++ = IXGB_READ_REG(hw, XPCSS); /* 67 */ + *reg++ = IXGB_READ_REG(hw, UCCR); /* 68 */ + *reg++ = IXGB_READ_REG(hw, XPCSTC); /* 69 */ + *reg++ = IXGB_READ_REG(hw, MACA); /* 70 */ + *reg++ = IXGB_READ_REG(hw, APAE); /* 71 */ + *reg++ = IXGB_READ_REG(hw, ARD); /* 72 */ + *reg++ = IXGB_READ_REG(hw, AIS); /* 73 */ + *reg++ = IXGB_READ_REG(hw, MSCA); /* 74 */ + *reg++ = IXGB_READ_REG(hw, MSRWD); /* 75 */ + + /* Statistics */ + *reg++ = IXGB_GET_STAT(adapter, tprl); /* 76 */ + *reg++ = IXGB_GET_STAT(adapter, tprh); /* 77 */ + *reg++ = IXGB_GET_STAT(adapter, gprcl); /* 78 */ + *reg++ = IXGB_GET_STAT(adapter, gprch); /* 79 */ + *reg++ = IXGB_GET_STAT(adapter, bprcl); /* 80 */ + *reg++ = IXGB_GET_STAT(adapter, bprch); /* 81 */ + *reg++ = IXGB_GET_STAT(adapter, mprcl); /* 82 */ + *reg++ = IXGB_GET_STAT(adapter, mprch); /* 83 */ + *reg++ = IXGB_GET_STAT(adapter, uprcl); /* 84 */ + *reg++ = IXGB_GET_STAT(adapter, uprch); /* 85 */ + *reg++ = IXGB_GET_STAT(adapter, vprcl); /* 86 */ + *reg++ = IXGB_GET_STAT(adapter, vprch); /* 87 */ + *reg++ = IXGB_GET_STAT(adapter, jprcl); /* 88 */ + *reg++ = IXGB_GET_STAT(adapter, jprch); /* 89 */ + *reg++ = IXGB_GET_STAT(adapter, gorcl); /* 90 */ + *reg++ = IXGB_GET_STAT(adapter, gorch); /* 91 */ + *reg++ = IXGB_GET_STAT(adapter, torl); /* 92 */ + *reg++ = IXGB_GET_STAT(adapter, torh); /* 93 */ + *reg++ = IXGB_GET_STAT(adapter, rnbc); /* 94 */ + *reg++ = IXGB_GET_STAT(adapter, ruc); /* 95 */ + *reg++ = IXGB_GET_STAT(adapter, roc); /* 96 */ + *reg++ = IXGB_GET_STAT(adapter, rlec); /* 97 */ + *reg++ = IXGB_GET_STAT(adapter, crcerrs); /* 98 */ + *reg++ = IXGB_GET_STAT(adapter, icbc); /* 99 */ + *reg++ = IXGB_GET_STAT(adapter, ecbc); /* 100 */ + *reg++ = IXGB_GET_STAT(adapter, mpc); /* 101 */ + *reg++ = IXGB_GET_STAT(adapter, tptl); /* 102 */ + *reg++ = IXGB_GET_STAT(adapter, tpth); /* 103 */ + *reg++ = IXGB_GET_STAT(adapter, gptcl); /* 104 */ + *reg++ = IXGB_GET_STAT(adapter, gptch); /* 105 */ + *reg++ = IXGB_GET_STAT(adapter, bptcl); /* 106 */ + *reg++ = IXGB_GET_STAT(adapter, bptch); /* 107 */ + *reg++ = IXGB_GET_STAT(adapter, mptcl); /* 108 */ + *reg++ = IXGB_GET_STAT(adapter, mptch); /* 109 */ + *reg++ = IXGB_GET_STAT(adapter, uptcl); /* 110 */ + *reg++ = IXGB_GET_STAT(adapter, uptch); /* 111 */ + *reg++ = IXGB_GET_STAT(adapter, vptcl); /* 112 */ + *reg++ = IXGB_GET_STAT(adapter, vptch); /* 113 */ + *reg++ = IXGB_GET_STAT(adapter, jptcl); /* 114 */ + *reg++ = IXGB_GET_STAT(adapter, jptch); /* 115 */ + *reg++ = IXGB_GET_STAT(adapter, gotcl); /* 116 */ + *reg++ = IXGB_GET_STAT(adapter, gotch); /* 117 */ + *reg++ = IXGB_GET_STAT(adapter, totl); /* 118 */ + *reg++ = IXGB_GET_STAT(adapter, toth); /* 119 */ + *reg++ = IXGB_GET_STAT(adapter, dc); /* 120 */ + *reg++ = IXGB_GET_STAT(adapter, plt64c); /* 121 */ + *reg++ = IXGB_GET_STAT(adapter, tsctc); /* 122 */ + *reg++ = IXGB_GET_STAT(adapter, tsctfc); /* 123 */ + *reg++ = IXGB_GET_STAT(adapter, ibic); /* 124 */ + *reg++ = IXGB_GET_STAT(adapter, rfc); /* 125 */ + *reg++ = IXGB_GET_STAT(adapter, lfc); /* 126 */ + *reg++ = IXGB_GET_STAT(adapter, pfrc); /* 127 */ + *reg++ = IXGB_GET_STAT(adapter, pftc); /* 128 */ + *reg++ = IXGB_GET_STAT(adapter, mcfrc); /* 129 */ + *reg++ = IXGB_GET_STAT(adapter, mcftc); /* 130 */ + *reg++ = IXGB_GET_STAT(adapter, xonrxc); /* 131 */ + *reg++ = IXGB_GET_STAT(adapter, xontxc); /* 132 */ + *reg++ = IXGB_GET_STAT(adapter, xoffrxc); /* 133 */ + *reg++ = IXGB_GET_STAT(adapter, xofftxc); /* 134 */ + *reg++ = IXGB_GET_STAT(adapter, rjc); /* 135 */ + + regs->len = (reg - reg_start) * sizeof(u32); +} + +static int +ixgb_get_eeprom_len(struct net_device *netdev) +{ + /* return size in bytes */ + return IXGB_EEPROM_SIZE << 1; +} + +static int +ixgb_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_hw *hw = &adapter->hw; + __le16 *eeprom_buff; + int i, max_len, first_word, last_word; + int ret_val = 0; + + if (eeprom->len == 0) { + ret_val = -EINVAL; + goto geeprom_error; + } + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + max_len = ixgb_get_eeprom_len(netdev); + + if (eeprom->offset > eeprom->offset + eeprom->len) { + ret_val = -EINVAL; + goto geeprom_error; + } + + if ((eeprom->offset + eeprom->len) > max_len) + eeprom->len = (max_len - eeprom->offset); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc(sizeof(__le16) * + (last_word - first_word + 1), GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + /* note the eeprom was good because the driver loaded */ + for (i = 0; i <= (last_word - first_word); i++) + eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i)); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + +geeprom_error: + return ret_val; +} + +static int +ixgb_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = ixgb_get_eeprom_len(netdev); + + if (eeprom->offset > eeprom->offset + eeprom->len) + return -EINVAL; + + if ((eeprom->offset + eeprom->len) > max_len) + eeprom->len = (max_len - eeprom->offset); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word */ + /* only the second byte of the word is being modified */ + eeprom_buff[0] = ixgb_read_eeprom(hw, first_word); + ptr++; + } + if ((eeprom->offset + eeprom->len) & 1) { + /* need read/modify/write of last changed EEPROM word */ + /* only the first byte of the word is being modified */ + eeprom_buff[last_word - first_word] + = ixgb_read_eeprom(hw, last_word); + } + + memcpy(ptr, bytes, eeprom->len); + for (i = 0; i <= (last_word - first_word); i++) + ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]); + + /* Update the checksum over the first part of the EEPROM if needed */ + if (first_word <= EEPROM_CHECKSUM_REG) + ixgb_update_eeprom_checksum(hw); + + kfree(eeprom_buff); + return 0; +} + +static void +ixgb_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, ixgb_driver_name, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, ixgb_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = IXGB_STATS_LEN; + drvinfo->regdump_len = ixgb_get_regs_len(netdev); + drvinfo->eedump_len = ixgb_get_eeprom_len(netdev); +} + +static void +ixgb_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_desc_ring *txdr = &adapter->tx_ring; + struct ixgb_desc_ring *rxdr = &adapter->rx_ring; + + ring->rx_max_pending = MAX_RXD; + ring->tx_max_pending = MAX_TXD; + ring->rx_pending = rxdr->count; + ring->tx_pending = txdr->count; +} + +static int +ixgb_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_desc_ring *txdr = &adapter->tx_ring; + struct ixgb_desc_ring *rxdr = &adapter->rx_ring; + struct ixgb_desc_ring tx_old, tx_new, rx_old, rx_new; + int err; + + tx_old = adapter->tx_ring; + rx_old = adapter->rx_ring; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if (netif_running(adapter->netdev)) + ixgb_down(adapter, true); + + rxdr->count = max(ring->rx_pending,(u32)MIN_RXD); + rxdr->count = min(rxdr->count,(u32)MAX_RXD); + rxdr->count = ALIGN(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE); + + txdr->count = max(ring->tx_pending,(u32)MIN_TXD); + txdr->count = min(txdr->count,(u32)MAX_TXD); + txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); + + if (netif_running(adapter->netdev)) { + /* Try to get new resources before deleting old */ + if ((err = ixgb_setup_rx_resources(adapter))) + goto err_setup_rx; + if ((err = ixgb_setup_tx_resources(adapter))) + goto err_setup_tx; + + /* save the new, restore the old in order to free it, + * then restore the new back again */ + + rx_new = adapter->rx_ring; + tx_new = adapter->tx_ring; + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + ixgb_free_rx_resources(adapter); + ixgb_free_tx_resources(adapter); + adapter->rx_ring = rx_new; + adapter->tx_ring = tx_new; + if ((err = ixgb_up(adapter))) + return err; + ixgb_set_speed_duplex(netdev); + } + + return 0; +err_setup_tx: + ixgb_free_rx_resources(adapter); +err_setup_rx: + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + ixgb_up(adapter); + return err; +} + +static int +ixgb_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; + + case ETHTOOL_ID_ON: + ixgb_led_on(&adapter->hw); + break; + + case ETHTOOL_ID_OFF: + case ETHTOOL_ID_INACTIVE: + ixgb_led_off(&adapter->hw); + } + + return 0; +} + +static int +ixgb_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IXGB_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void +ixgb_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + int i; + char *p = NULL; + + ixgb_update_stats(adapter); + for (i = 0; i < IXGB_STATS_LEN; i++) { + switch (ixgb_gstrings_stats[i].type) { + case NETDEV_STATS: + p = (char *) netdev + + ixgb_gstrings_stats[i].stat_offset; + break; + case IXGB_STATS: + p = (char *) adapter + + ixgb_gstrings_stats[i].stat_offset; + break; + } + + data[i] = (ixgb_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } +} + +static void +ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + int i; + + switch(stringset) { + case ETH_SS_STATS: + for (i = 0; i < IXGB_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + ixgb_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static const struct ethtool_ops ixgb_ethtool_ops = { + .get_settings = ixgb_get_settings, + .set_settings = ixgb_set_settings, + .get_drvinfo = ixgb_get_drvinfo, + .get_regs_len = ixgb_get_regs_len, + .get_regs = ixgb_get_regs, + .get_link = ethtool_op_get_link, + .get_eeprom_len = ixgb_get_eeprom_len, + .get_eeprom = ixgb_get_eeprom, + .set_eeprom = ixgb_set_eeprom, + .get_ringparam = ixgb_get_ringparam, + .set_ringparam = ixgb_set_ringparam, + .get_pauseparam = ixgb_get_pauseparam, + .set_pauseparam = ixgb_set_pauseparam, + .get_msglevel = ixgb_get_msglevel, + .set_msglevel = ixgb_set_msglevel, + .get_strings = ixgb_get_strings, + .set_phys_id = ixgb_set_phys_id, + .get_sset_count = ixgb_get_sset_count, + .get_ethtool_stats = ixgb_get_ethtool_stats, +}; + +void ixgb_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ixgb_ethtool_ops; +} diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c new file mode 100644 index 000000000..bf9a220f7 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c @@ -0,0 +1,1263 @@ +/******************************************************************************* + + Intel PRO/10GbE Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ixgb_hw.c + * Shared functions for accessing and configuring the adapter + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include "ixgb_hw.h" +#include "ixgb_ids.h" + +#include + +/* Local function prototypes */ + +static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr); + +static void ixgb_mta_set(struct ixgb_hw *hw, u32 hash_value); + +static void ixgb_get_bus_info(struct ixgb_hw *hw); + +static bool ixgb_link_reset(struct ixgb_hw *hw); + +static void ixgb_optics_reset(struct ixgb_hw *hw); + +static void ixgb_optics_reset_bcm(struct ixgb_hw *hw); + +static ixgb_phy_type ixgb_identify_phy(struct ixgb_hw *hw); + +static void ixgb_clear_hw_cntrs(struct ixgb_hw *hw); + +static void ixgb_clear_vfta(struct ixgb_hw *hw); + +static void ixgb_init_rx_addrs(struct ixgb_hw *hw); + +static u16 ixgb_read_phy_reg(struct ixgb_hw *hw, + u32 reg_address, + u32 phy_address, + u32 device_type); + +static bool ixgb_setup_fc(struct ixgb_hw *hw); + +static bool mac_addr_valid(u8 *mac_addr); + +static u32 ixgb_mac_reset(struct ixgb_hw *hw) +{ + u32 ctrl_reg; + + ctrl_reg = IXGB_CTRL0_RST | + IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */ + IXGB_CTRL0_SDP2_DIR | + IXGB_CTRL0_SDP1_DIR | + IXGB_CTRL0_SDP0_DIR | + IXGB_CTRL0_SDP3 | /* Initial value 1101 */ + IXGB_CTRL0_SDP2 | + IXGB_CTRL0_SDP0; + +#ifdef HP_ZX1 + /* Workaround for 82597EX reset errata */ + IXGB_WRITE_REG_IO(hw, CTRL0, ctrl_reg); +#else + IXGB_WRITE_REG(hw, CTRL0, ctrl_reg); +#endif + + /* Delay a few ms just to allow the reset to complete */ + msleep(IXGB_DELAY_AFTER_RESET); + ctrl_reg = IXGB_READ_REG(hw, CTRL0); +#ifdef DBG + /* Make sure the self-clearing global reset bit did self clear */ + ASSERT(!(ctrl_reg & IXGB_CTRL0_RST)); +#endif + + if (hw->subsystem_vendor_id == PCI_VENDOR_ID_SUN) { + ctrl_reg = /* Enable interrupt from XFP and SerDes */ + IXGB_CTRL1_GPI0_EN | + IXGB_CTRL1_SDP6_DIR | + IXGB_CTRL1_SDP7_DIR | + IXGB_CTRL1_SDP6 | + IXGB_CTRL1_SDP7; + IXGB_WRITE_REG(hw, CTRL1, ctrl_reg); + ixgb_optics_reset_bcm(hw); + } + + if (hw->phy_type == ixgb_phy_type_txn17401) + ixgb_optics_reset(hw); + + return ctrl_reg; +} + +/****************************************************************************** + * Reset the transmit and receive units; mask and clear all interrupts. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +bool +ixgb_adapter_stop(struct ixgb_hw *hw) +{ + u32 ctrl_reg; + u32 icr_reg; + + ENTER(); + + /* If we are stopped or resetting exit gracefully and wait to be + * started again before accessing the hardware. + */ + if (hw->adapter_stopped) { + pr_debug("Exiting because the adapter is already stopped!!!\n"); + return false; + } + + /* Set the Adapter Stopped flag so other driver functions stop + * touching the Hardware. + */ + hw->adapter_stopped = true; + + /* Clear interrupt mask to stop board from generating interrupts */ + pr_debug("Masking off all interrupts\n"); + IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC with + * the global reset. + */ + IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN); + IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN); + IXGB_WRITE_FLUSH(hw); + msleep(IXGB_DELAY_BEFORE_RESET); + + /* Issue a global reset to the MAC. This will reset the chip's + * transmit, receive, DMA, and link units. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + pr_debug("Issuing a global reset to MAC\n"); + + ctrl_reg = ixgb_mac_reset(hw); + + /* Clear interrupt mask to stop board from generating interrupts */ + pr_debug("Masking off all interrupts\n"); + IXGB_WRITE_REG(hw, IMC, 0xffffffff); + + /* Clear any pending interrupt events. */ + icr_reg = IXGB_READ_REG(hw, ICR); + + return ctrl_reg & IXGB_CTRL0_RST; +} + + +/****************************************************************************** + * Identifies the vendor of the optics module on the adapter. The SR adapters + * support two different types of XPAK optics, so it is necessary to determine + * which optics are present before applying any optics-specific workarounds. + * + * hw - Struct containing variables accessed by shared code. + * + * Returns: the vendor of the XPAK optics module. + *****************************************************************************/ +static ixgb_xpak_vendor +ixgb_identify_xpak_vendor(struct ixgb_hw *hw) +{ + u32 i; + u16 vendor_name[5]; + ixgb_xpak_vendor xpak_vendor; + + ENTER(); + + /* Read the first few bytes of the vendor string from the XPAK NVR + * registers. These are standard XENPAK/XPAK registers, so all XPAK + * devices should implement them. */ + for (i = 0; i < 5; i++) { + vendor_name[i] = ixgb_read_phy_reg(hw, + MDIO_PMA_PMD_XPAK_VENDOR_NAME + + i, IXGB_PHY_ADDRESS, + MDIO_MMD_PMAPMD); + } + + /* Determine the actual vendor */ + if (vendor_name[0] == 'I' && + vendor_name[1] == 'N' && + vendor_name[2] == 'T' && + vendor_name[3] == 'E' && vendor_name[4] == 'L') { + xpak_vendor = ixgb_xpak_vendor_intel; + } else { + xpak_vendor = ixgb_xpak_vendor_infineon; + } + + return xpak_vendor; +} + +/****************************************************************************** + * Determine the physical layer module on the adapter. + * + * hw - Struct containing variables accessed by shared code. The device_id + * field must be (correctly) populated before calling this routine. + * + * Returns: the phy type of the adapter. + *****************************************************************************/ +static ixgb_phy_type +ixgb_identify_phy(struct ixgb_hw *hw) +{ + ixgb_phy_type phy_type; + ixgb_xpak_vendor xpak_vendor; + + ENTER(); + + /* Infer the transceiver/phy type from the device id */ + switch (hw->device_id) { + case IXGB_DEVICE_ID_82597EX: + pr_debug("Identified TXN17401 optics\n"); + phy_type = ixgb_phy_type_txn17401; + break; + + case IXGB_DEVICE_ID_82597EX_SR: + /* The SR adapters carry two different types of XPAK optics + * modules; read the vendor identifier to determine the exact + * type of optics. */ + xpak_vendor = ixgb_identify_xpak_vendor(hw); + if (xpak_vendor == ixgb_xpak_vendor_intel) { + pr_debug("Identified TXN17201 optics\n"); + phy_type = ixgb_phy_type_txn17201; + } else { + pr_debug("Identified G6005 optics\n"); + phy_type = ixgb_phy_type_g6005; + } + break; + case IXGB_DEVICE_ID_82597EX_LR: + pr_debug("Identified G6104 optics\n"); + phy_type = ixgb_phy_type_g6104; + break; + case IXGB_DEVICE_ID_82597EX_CX4: + pr_debug("Identified CX4\n"); + xpak_vendor = ixgb_identify_xpak_vendor(hw); + if (xpak_vendor == ixgb_xpak_vendor_intel) { + pr_debug("Identified TXN17201 optics\n"); + phy_type = ixgb_phy_type_txn17201; + } else { + pr_debug("Identified G6005 optics\n"); + phy_type = ixgb_phy_type_g6005; + } + break; + default: + pr_debug("Unknown physical layer module\n"); + phy_type = ixgb_phy_type_unknown; + break; + } + + /* update phy type for sun specific board */ + if (hw->subsystem_vendor_id == PCI_VENDOR_ID_SUN) + phy_type = ixgb_phy_type_bcm; + + return phy_type; +} + +/****************************************************************************** + * Performs basic configuration of the adapter. + * + * hw - Struct containing variables accessed by shared code + * + * Resets the controller. + * Reads and validates the EEPROM. + * Initializes the receive address registers. + * Initializes the multicast table. + * Clears all on-chip counters. + * Calls routine to setup flow control settings. + * Leaves the transmit and receive units disabled and uninitialized. + * + * Returns: + * true if successful, + * false if unrecoverable problems were encountered. + *****************************************************************************/ +bool +ixgb_init_hw(struct ixgb_hw *hw) +{ + u32 i; + u32 ctrl_reg; + bool status; + + ENTER(); + + /* Issue a global reset to the MAC. This will reset the chip's + * transmit, receive, DMA, and link units. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + pr_debug("Issuing a global reset to MAC\n"); + + ctrl_reg = ixgb_mac_reset(hw); + + pr_debug("Issuing an EE reset to MAC\n"); +#ifdef HP_ZX1 + /* Workaround for 82597EX reset errata */ + IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST); +#else + IXGB_WRITE_REG(hw, CTRL1, IXGB_CTRL1_EE_RST); +#endif + + /* Delay a few ms just to allow the reset to complete */ + msleep(IXGB_DELAY_AFTER_EE_RESET); + + if (!ixgb_get_eeprom_data(hw)) + return false; + + /* Use the device id to determine the type of phy/transceiver. */ + hw->device_id = ixgb_get_ee_device_id(hw); + hw->phy_type = ixgb_identify_phy(hw); + + /* Setup the receive addresses. + * Receive Address Registers (RARs 0 - 15). + */ + ixgb_init_rx_addrs(hw); + + /* + * Check that a valid MAC address has been set. + * If it is not valid, we fail hardware init. + */ + if (!mac_addr_valid(hw->curr_mac_addr)) { + pr_debug("MAC address invalid after ixgb_init_rx_addrs\n"); + return(false); + } + + /* tell the routines in this file they can access hardware again */ + hw->adapter_stopped = false; + + /* Fill in the bus_info structure */ + ixgb_get_bus_info(hw); + + /* Zero out the Multicast HASH table */ + pr_debug("Zeroing the MTA\n"); + for (i = 0; i < IXGB_MC_TBL_SIZE; i++) + IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0); + + /* Zero out the VLAN Filter Table Array */ + ixgb_clear_vfta(hw); + + /* Zero all of the hardware counters */ + ixgb_clear_hw_cntrs(hw); + + /* Call a subroutine to setup flow control. */ + status = ixgb_setup_fc(hw); + + /* 82597EX errata: Call check-for-link in case lane deskew is locked */ + ixgb_check_for_link(hw); + + return status; +} + +/****************************************************************************** + * Initializes receive address filters. + * + * hw - Struct containing variables accessed by shared code + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + *****************************************************************************/ +static void +ixgb_init_rx_addrs(struct ixgb_hw *hw) +{ + u32 i; + + ENTER(); + + /* + * If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (!mac_addr_valid(hw->curr_mac_addr)) { + + /* Get the MAC address from the eeprom for later reference */ + ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr); + + pr_debug("Keeping Permanent MAC Addr = %pM\n", + hw->curr_mac_addr); + } else { + + /* Setup the receive address. */ + pr_debug("Overriding MAC Address in RAR[0]\n"); + pr_debug("New MAC Addr = %pM\n", hw->curr_mac_addr); + + ixgb_rar_set(hw, hw->curr_mac_addr, 0); + } + + /* Zero out the other 15 receive addresses. */ + pr_debug("Clearing RAR[1-15]\n"); + for (i = 1; i < IXGB_RAR_ENTRIES; i++) { + /* Write high reg first to disable the AV bit first */ + IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); + IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); + } +} + +/****************************************************************************** + * Updates the MAC's list of multicast addresses. + * + * hw - Struct containing variables accessed by shared code + * mc_addr_list - the list of new multicast addresses + * mc_addr_count - number of addresses + * pad - number of bytes between addresses in the list + * + * The given list replaces any existing list. Clears the last 15 receive + * address registers and the multicast table. Uses receive address registers + * for the first 15 multicast addresses, and hashes the rest into the + * multicast table. + *****************************************************************************/ +void +ixgb_mc_addr_list_update(struct ixgb_hw *hw, + u8 *mc_addr_list, + u32 mc_addr_count, + u32 pad) +{ + u32 hash_value; + u32 i; + u32 rar_used_count = 1; /* RAR[0] is used for our MAC address */ + u8 *mca; + + ENTER(); + + /* Set the new number of MC addresses that we are being requested to use. */ + hw->num_mc_addrs = mc_addr_count; + + /* Clear RAR[1-15] */ + pr_debug("Clearing RAR[1-15]\n"); + for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) { + IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); + IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); + } + + /* Clear the MTA */ + pr_debug("Clearing MTA\n"); + for (i = 0; i < IXGB_MC_TBL_SIZE; i++) + IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0); + + /* Add the new addresses */ + mca = mc_addr_list; + for (i = 0; i < mc_addr_count; i++) { + pr_debug("Adding the multicast addresses:\n"); + pr_debug("MC Addr #%d = %pM\n", i, mca); + + /* Place this multicast address in the RAR if there is room, * + * else put it in the MTA + */ + if (rar_used_count < IXGB_RAR_ENTRIES) { + ixgb_rar_set(hw, mca, rar_used_count); + pr_debug("Added a multicast address to RAR[%d]\n", i); + rar_used_count++; + } else { + hash_value = ixgb_hash_mc_addr(hw, mca); + + pr_debug("Hash value = 0x%03X\n", hash_value); + + ixgb_mta_set(hw, hash_value); + } + + mca += ETH_ALEN + pad; + } + + pr_debug("MC Update Complete\n"); +} + +/****************************************************************************** + * Hashes an address to determine its location in the multicast table + * + * hw - Struct containing variables accessed by shared code + * mc_addr - the multicast address to hash + * + * Returns: + * The hash value + *****************************************************************************/ +static u32 +ixgb_hash_mc_addr(struct ixgb_hw *hw, + u8 *mc_addr) +{ + u32 hash_value = 0; + + ENTER(); + + /* The portion of the address that is used for the hash table is + * determined by the mc_filter_type setting. + */ + switch (hw->mc_filter_type) { + /* [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB - According to H/W docs */ + case 0: + /* [47:36] i.e. 0x563 for above example address */ + hash_value = + ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4)); + break; + case 1: /* [46:35] i.e. 0xAC6 for above example address */ + hash_value = + ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5)); + break; + case 2: /* [45:34] i.e. 0x5D8 for above example address */ + hash_value = + ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6)); + break; + case 3: /* [43:32] i.e. 0x634 for above example address */ + hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8)); + break; + default: + /* Invalid mc_filter_type, what should we do? */ + pr_debug("MC filter type param set incorrectly\n"); + ASSERT(0); + break; + } + + hash_value &= 0xFFF; + return hash_value; +} + +/****************************************************************************** + * Sets the bit in the multicast table corresponding to the hash value. + * + * hw - Struct containing variables accessed by shared code + * hash_value - Multicast address hash value + *****************************************************************************/ +static void +ixgb_mta_set(struct ixgb_hw *hw, + u32 hash_value) +{ + u32 hash_bit, hash_reg; + u32 mta_reg; + + /* The MTA is a register array of 128 32-bit registers. + * It is treated like an array of 4096 bits. We want to set + * bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The register is determined by the + * upper 7 bits of the hash value and the bit within that + * register are determined by the lower 5 bits of the value. + */ + hash_reg = (hash_value >> 5) & 0x7F; + hash_bit = hash_value & 0x1F; + + mta_reg = IXGB_READ_REG_ARRAY(hw, MTA, hash_reg); + + mta_reg |= (1 << hash_bit); + + IXGB_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta_reg); +} + +/****************************************************************************** + * Puts an ethernet address into a receive address register. + * + * hw - Struct containing variables accessed by shared code + * addr - Address to put into receive address register + * index - Receive address register to write + *****************************************************************************/ +void +ixgb_rar_set(struct ixgb_hw *hw, + u8 *addr, + u32 index) +{ + u32 rar_low, rar_high; + + ENTER(); + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | + ((u32)addr[3] << 24)); + + rar_high = ((u32) addr[4] | + ((u32)addr[5] << 8) | + IXGB_RAH_AV); + + IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); + IXGB_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); +} + +/****************************************************************************** + * Writes a value to the specified offset in the VLAN filter table. + * + * hw - Struct containing variables accessed by shared code + * offset - Offset in VLAN filer table to write + * value - Value to write into VLAN filter table + *****************************************************************************/ +void +ixgb_write_vfta(struct ixgb_hw *hw, + u32 offset, + u32 value) +{ + IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value); +} + +/****************************************************************************** + * Clears the VLAN filer table + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static void +ixgb_clear_vfta(struct ixgb_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++) + IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0); +} + +/****************************************************************************** + * Configures the flow control settings based on SW configuration. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ + +static bool +ixgb_setup_fc(struct ixgb_hw *hw) +{ + u32 ctrl_reg; + u32 pap_reg = 0; /* by default, assume no pause time */ + bool status = true; + + ENTER(); + + /* Get the current control reg 0 settings */ + ctrl_reg = IXGB_READ_REG(hw, CTRL0); + + /* Clear the Receive Pause Enable and Transmit Pause Enable bits */ + ctrl_reg &= ~(IXGB_CTRL0_RPE | IXGB_CTRL0_TPE); + + /* The possible values of the "flow_control" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.type) { + case ixgb_fc_none: /* 0 */ + /* Set CMDC bit to disable Rx Flow control */ + ctrl_reg |= (IXGB_CTRL0_CMDC); + break; + case ixgb_fc_rx_pause: /* 1 */ + /* RX Flow control is enabled, and TX Flow control is + * disabled. + */ + ctrl_reg |= (IXGB_CTRL0_RPE); + break; + case ixgb_fc_tx_pause: /* 2 */ + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + ctrl_reg |= (IXGB_CTRL0_TPE); + pap_reg = hw->fc.pause_time; + break; + case ixgb_fc_full: /* 3 */ + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + ctrl_reg |= (IXGB_CTRL0_RPE | IXGB_CTRL0_TPE); + pap_reg = hw->fc.pause_time; + break; + default: + /* We should never get here. The value should be 0-3. */ + pr_debug("Flow control param set incorrectly\n"); + ASSERT(0); + break; + } + + /* Write the new settings */ + IXGB_WRITE_REG(hw, CTRL0, ctrl_reg); + + if (pap_reg != 0) + IXGB_WRITE_REG(hw, PAP, pap_reg); + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames in not enabled, then these + * registers will be set to 0. + */ + if (!(hw->fc.type & ixgb_fc_tx_pause)) { + IXGB_WRITE_REG(hw, FCRTL, 0); + IXGB_WRITE_REG(hw, FCRTH, 0); + } else { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of XON + * frames. */ + if (hw->fc.send_xon) { + IXGB_WRITE_REG(hw, FCRTL, + (hw->fc.low_water | IXGB_FCRTL_XONE)); + } else { + IXGB_WRITE_REG(hw, FCRTL, hw->fc.low_water); + } + IXGB_WRITE_REG(hw, FCRTH, hw->fc.high_water); + } + return status; +} + +/****************************************************************************** + * Reads a word from a device over the Management Data Interface (MDI) bus. + * This interface is used to manage Physical layer devices. + * + * hw - Struct containing variables accessed by hw code + * reg_address - Offset of device register being read. + * phy_address - Address of device on MDI. + * + * Returns: Data word (16 bits) from MDI device. + * + * The 82597EX has support for several MDI access methods. This routine + * uses the new protocol MDI Single Command and Address Operation. + * This requires that first an address cycle command is sent, followed by a + * read command. + *****************************************************************************/ +static u16 +ixgb_read_phy_reg(struct ixgb_hw *hw, + u32 reg_address, + u32 phy_address, + u32 device_type) +{ + u32 i; + u32 data; + u32 command = 0; + + ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS); + ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS); + ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE); + + /* Setup and write the address cycle command */ + command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) | + (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) | + (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND)); + + IXGB_WRITE_REG(hw, MSCA, command); + + /************************************************************** + ** Check every 10 usec to see if the address cycle completed + ** The COMMAND bit will clear when the operation is complete. + ** This may take as long as 64 usecs (we'll wait 100 usecs max) + ** from the CPU Write to the Ready bit assertion. + **************************************************************/ + + for (i = 0; i < 10; i++) + { + udelay(10); + + command = IXGB_READ_REG(hw, MSCA); + + if ((command & IXGB_MSCA_MDI_COMMAND) == 0) + break; + } + + ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0); + + /* Address cycle complete, setup and write the read command */ + command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) | + (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) | + (IXGB_MSCA_READ | IXGB_MSCA_MDI_COMMAND)); + + IXGB_WRITE_REG(hw, MSCA, command); + + /************************************************************** + ** Check every 10 usec to see if the read command completed + ** The COMMAND bit will clear when the operation is complete. + ** The read may take as long as 64 usecs (we'll wait 100 usecs max) + ** from the CPU Write to the Ready bit assertion. + **************************************************************/ + + for (i = 0; i < 10; i++) + { + udelay(10); + + command = IXGB_READ_REG(hw, MSCA); + + if ((command & IXGB_MSCA_MDI_COMMAND) == 0) + break; + } + + ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0); + + /* Operation is complete, get the data from the MDIO Read/Write Data + * register and return. + */ + data = IXGB_READ_REG(hw, MSRWD); + data >>= IXGB_MSRWD_READ_DATA_SHIFT; + return((u16) data); +} + +/****************************************************************************** + * Writes a word to a device over the Management Data Interface (MDI) bus. + * This interface is used to manage Physical layer devices. + * + * hw - Struct containing variables accessed by hw code + * reg_address - Offset of device register being read. + * phy_address - Address of device on MDI. + * device_type - Also known as the Device ID or DID. + * data - 16-bit value to be written + * + * Returns: void. + * + * The 82597EX has support for several MDI access methods. This routine + * uses the new protocol MDI Single Command and Address Operation. + * This requires that first an address cycle command is sent, followed by a + * write command. + *****************************************************************************/ +static void +ixgb_write_phy_reg(struct ixgb_hw *hw, + u32 reg_address, + u32 phy_address, + u32 device_type, + u16 data) +{ + u32 i; + u32 command = 0; + + ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS); + ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS); + ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE); + + /* Put the data in the MDIO Read/Write Data register */ + IXGB_WRITE_REG(hw, MSRWD, (u32)data); + + /* Setup and write the address cycle command */ + command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) | + (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) | + (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND)); + + IXGB_WRITE_REG(hw, MSCA, command); + + /************************************************************** + ** Check every 10 usec to see if the address cycle completed + ** The COMMAND bit will clear when the operation is complete. + ** This may take as long as 64 usecs (we'll wait 100 usecs max) + ** from the CPU Write to the Ready bit assertion. + **************************************************************/ + + for (i = 0; i < 10; i++) + { + udelay(10); + + command = IXGB_READ_REG(hw, MSCA); + + if ((command & IXGB_MSCA_MDI_COMMAND) == 0) + break; + } + + ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0); + + /* Address cycle complete, setup and write the write command */ + command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) | + (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) | + (IXGB_MSCA_WRITE | IXGB_MSCA_MDI_COMMAND)); + + IXGB_WRITE_REG(hw, MSCA, command); + + /************************************************************** + ** Check every 10 usec to see if the read command completed + ** The COMMAND bit will clear when the operation is complete. + ** The write may take as long as 64 usecs (we'll wait 100 usecs max) + ** from the CPU Write to the Ready bit assertion. + **************************************************************/ + + for (i = 0; i < 10; i++) + { + udelay(10); + + command = IXGB_READ_REG(hw, MSCA); + + if ((command & IXGB_MSCA_MDI_COMMAND) == 0) + break; + } + + ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0); + + /* Operation is complete, return. */ +} + +/****************************************************************************** + * Checks to see if the link status of the hardware has changed. + * + * hw - Struct containing variables accessed by hw code + * + * Called by any function that needs to check the link status of the adapter. + *****************************************************************************/ +void +ixgb_check_for_link(struct ixgb_hw *hw) +{ + u32 status_reg; + u32 xpcss_reg; + + ENTER(); + + xpcss_reg = IXGB_READ_REG(hw, XPCSS); + status_reg = IXGB_READ_REG(hw, STATUS); + + if ((xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) && + (status_reg & IXGB_STATUS_LU)) { + hw->link_up = true; + } else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) && + (status_reg & IXGB_STATUS_LU)) { + pr_debug("XPCSS Not Aligned while Status:LU is set\n"); + hw->link_up = ixgb_link_reset(hw); + } else { + /* + * 82597EX errata. Since the lane deskew problem may prevent + * link, reset the link before reporting link down. + */ + hw->link_up = ixgb_link_reset(hw); + } + /* Anything else for 10 Gig?? */ +} + +/****************************************************************************** + * Check for a bad link condition that may have occurred. + * The indication is that the RFC / LFC registers may be incrementing + * continually. A full adapter reset is required to recover. + * + * hw - Struct containing variables accessed by hw code + * + * Called by any function that needs to check the link status of the adapter. + *****************************************************************************/ +bool ixgb_check_for_bad_link(struct ixgb_hw *hw) +{ + u32 newLFC, newRFC; + bool bad_link_returncode = false; + + if (hw->phy_type == ixgb_phy_type_txn17401) { + newLFC = IXGB_READ_REG(hw, LFC); + newRFC = IXGB_READ_REG(hw, RFC); + if ((hw->lastLFC + 250 < newLFC) + || (hw->lastRFC + 250 < newRFC)) { + pr_debug("BAD LINK! too many LFC/RFC since last check\n"); + bad_link_returncode = true; + } + hw->lastLFC = newLFC; + hw->lastRFC = newRFC; + } + + return bad_link_returncode; +} + +/****************************************************************************** + * Clears all hardware statistics counters. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static void +ixgb_clear_hw_cntrs(struct ixgb_hw *hw) +{ + volatile u32 temp_reg; + + ENTER(); + + /* if we are stopped or resetting exit gracefully */ + if (hw->adapter_stopped) { + pr_debug("Exiting because the adapter is stopped!!!\n"); + return; + } + + temp_reg = IXGB_READ_REG(hw, TPRL); + temp_reg = IXGB_READ_REG(hw, TPRH); + temp_reg = IXGB_READ_REG(hw, GPRCL); + temp_reg = IXGB_READ_REG(hw, GPRCH); + temp_reg = IXGB_READ_REG(hw, BPRCL); + temp_reg = IXGB_READ_REG(hw, BPRCH); + temp_reg = IXGB_READ_REG(hw, MPRCL); + temp_reg = IXGB_READ_REG(hw, MPRCH); + temp_reg = IXGB_READ_REG(hw, UPRCL); + temp_reg = IXGB_READ_REG(hw, UPRCH); + temp_reg = IXGB_READ_REG(hw, VPRCL); + temp_reg = IXGB_READ_REG(hw, VPRCH); + temp_reg = IXGB_READ_REG(hw, JPRCL); + temp_reg = IXGB_READ_REG(hw, JPRCH); + temp_reg = IXGB_READ_REG(hw, GORCL); + temp_reg = IXGB_READ_REG(hw, GORCH); + temp_reg = IXGB_READ_REG(hw, TORL); + temp_reg = IXGB_READ_REG(hw, TORH); + temp_reg = IXGB_READ_REG(hw, RNBC); + temp_reg = IXGB_READ_REG(hw, RUC); + temp_reg = IXGB_READ_REG(hw, ROC); + temp_reg = IXGB_READ_REG(hw, RLEC); + temp_reg = IXGB_READ_REG(hw, CRCERRS); + temp_reg = IXGB_READ_REG(hw, ICBC); + temp_reg = IXGB_READ_REG(hw, ECBC); + temp_reg = IXGB_READ_REG(hw, MPC); + temp_reg = IXGB_READ_REG(hw, TPTL); + temp_reg = IXGB_READ_REG(hw, TPTH); + temp_reg = IXGB_READ_REG(hw, GPTCL); + temp_reg = IXGB_READ_REG(hw, GPTCH); + temp_reg = IXGB_READ_REG(hw, BPTCL); + temp_reg = IXGB_READ_REG(hw, BPTCH); + temp_reg = IXGB_READ_REG(hw, MPTCL); + temp_reg = IXGB_READ_REG(hw, MPTCH); + temp_reg = IXGB_READ_REG(hw, UPTCL); + temp_reg = IXGB_READ_REG(hw, UPTCH); + temp_reg = IXGB_READ_REG(hw, VPTCL); + temp_reg = IXGB_READ_REG(hw, VPTCH); + temp_reg = IXGB_READ_REG(hw, JPTCL); + temp_reg = IXGB_READ_REG(hw, JPTCH); + temp_reg = IXGB_READ_REG(hw, GOTCL); + temp_reg = IXGB_READ_REG(hw, GOTCH); + temp_reg = IXGB_READ_REG(hw, TOTL); + temp_reg = IXGB_READ_REG(hw, TOTH); + temp_reg = IXGB_READ_REG(hw, DC); + temp_reg = IXGB_READ_REG(hw, PLT64C); + temp_reg = IXGB_READ_REG(hw, TSCTC); + temp_reg = IXGB_READ_REG(hw, TSCTFC); + temp_reg = IXGB_READ_REG(hw, IBIC); + temp_reg = IXGB_READ_REG(hw, RFC); + temp_reg = IXGB_READ_REG(hw, LFC); + temp_reg = IXGB_READ_REG(hw, PFRC); + temp_reg = IXGB_READ_REG(hw, PFTC); + temp_reg = IXGB_READ_REG(hw, MCFRC); + temp_reg = IXGB_READ_REG(hw, MCFTC); + temp_reg = IXGB_READ_REG(hw, XONRXC); + temp_reg = IXGB_READ_REG(hw, XONTXC); + temp_reg = IXGB_READ_REG(hw, XOFFRXC); + temp_reg = IXGB_READ_REG(hw, XOFFTXC); + temp_reg = IXGB_READ_REG(hw, RJC); +} + +/****************************************************************************** + * Turns on the software controllable LED + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +void +ixgb_led_on(struct ixgb_hw *hw) +{ + u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0); + + /* To turn on the LED, clear software-definable pin 0 (SDP0). */ + ctrl0_reg &= ~IXGB_CTRL0_SDP0; + IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg); +} + +/****************************************************************************** + * Turns off the software controllable LED + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +void +ixgb_led_off(struct ixgb_hw *hw) +{ + u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0); + + /* To turn off the LED, set software-definable pin 0 (SDP0). */ + ctrl0_reg |= IXGB_CTRL0_SDP0; + IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg); +} + +/****************************************************************************** + * Gets the current PCI bus type, speed, and width of the hardware + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static void +ixgb_get_bus_info(struct ixgb_hw *hw) +{ + u32 status_reg; + + status_reg = IXGB_READ_REG(hw, STATUS); + + hw->bus.type = (status_reg & IXGB_STATUS_PCIX_MODE) ? + ixgb_bus_type_pcix : ixgb_bus_type_pci; + + if (hw->bus.type == ixgb_bus_type_pci) { + hw->bus.speed = (status_reg & IXGB_STATUS_PCI_SPD) ? + ixgb_bus_speed_66 : ixgb_bus_speed_33; + } else { + switch (status_reg & IXGB_STATUS_PCIX_SPD_MASK) { + case IXGB_STATUS_PCIX_SPD_66: + hw->bus.speed = ixgb_bus_speed_66; + break; + case IXGB_STATUS_PCIX_SPD_100: + hw->bus.speed = ixgb_bus_speed_100; + break; + case IXGB_STATUS_PCIX_SPD_133: + hw->bus.speed = ixgb_bus_speed_133; + break; + default: + hw->bus.speed = ixgb_bus_speed_reserved; + break; + } + } + + hw->bus.width = (status_reg & IXGB_STATUS_BUS64) ? + ixgb_bus_width_64 : ixgb_bus_width_32; +} + +/****************************************************************************** + * Tests a MAC address to ensure it is a valid Individual Address + * + * mac_addr - pointer to MAC address. + * + *****************************************************************************/ +static bool +mac_addr_valid(u8 *mac_addr) +{ + bool is_valid = true; + ENTER(); + + /* Make sure it is not a multicast address */ + if (is_multicast_ether_addr(mac_addr)) { + pr_debug("MAC address is multicast\n"); + is_valid = false; + } + /* Not a broadcast address */ + else if (is_broadcast_ether_addr(mac_addr)) { + pr_debug("MAC address is broadcast\n"); + is_valid = false; + } + /* Reject the zero address */ + else if (is_zero_ether_addr(mac_addr)) { + pr_debug("MAC address is all zeros\n"); + is_valid = false; + } + return is_valid; +} + +/****************************************************************************** + * Resets the 10GbE link. Waits the settle time and returns the state of + * the link. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static bool +ixgb_link_reset(struct ixgb_hw *hw) +{ + bool link_status = false; + u8 wait_retries = MAX_RESET_ITERATIONS; + u8 lrst_retries = MAX_RESET_ITERATIONS; + + do { + /* Reset the link */ + IXGB_WRITE_REG(hw, CTRL0, + IXGB_READ_REG(hw, CTRL0) | IXGB_CTRL0_LRST); + + /* Wait for link-up and lane re-alignment */ + do { + udelay(IXGB_DELAY_USECS_AFTER_LINK_RESET); + link_status = + ((IXGB_READ_REG(hw, STATUS) & IXGB_STATUS_LU) + && (IXGB_READ_REG(hw, XPCSS) & + IXGB_XPCSS_ALIGN_STATUS)) ? true : false; + } while (!link_status && --wait_retries); + + } while (!link_status && --lrst_retries); + + return link_status; +} + +/****************************************************************************** + * Resets the 10GbE optics module. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +static void +ixgb_optics_reset(struct ixgb_hw *hw) +{ + if (hw->phy_type == ixgb_phy_type_txn17401) { + u16 mdio_reg; + + ixgb_write_phy_reg(hw, + MDIO_CTRL1, + IXGB_PHY_ADDRESS, + MDIO_MMD_PMAPMD, + MDIO_CTRL1_RESET); + + mdio_reg = ixgb_read_phy_reg(hw, + MDIO_CTRL1, + IXGB_PHY_ADDRESS, + MDIO_MMD_PMAPMD); + } +} + +/****************************************************************************** + * Resets the 10GbE optics module for Sun variant NIC. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ + +#define IXGB_BCM8704_USER_PMD_TX_CTRL_REG 0xC803 +#define IXGB_BCM8704_USER_PMD_TX_CTRL_REG_VAL 0x0164 +#define IXGB_BCM8704_USER_CTRL_REG 0xC800 +#define IXGB_BCM8704_USER_CTRL_REG_VAL 0x7FBF +#define IXGB_BCM8704_USER_DEV3_ADDR 0x0003 +#define IXGB_SUN_PHY_ADDRESS 0x0000 +#define IXGB_SUN_PHY_RESET_DELAY 305 + +static void +ixgb_optics_reset_bcm(struct ixgb_hw *hw) +{ + u32 ctrl = IXGB_READ_REG(hw, CTRL0); + ctrl &= ~IXGB_CTRL0_SDP2; + ctrl |= IXGB_CTRL0_SDP3; + IXGB_WRITE_REG(hw, CTRL0, ctrl); + IXGB_WRITE_FLUSH(hw); + + /* SerDes needs extra delay */ + msleep(IXGB_SUN_PHY_RESET_DELAY); + + /* Broadcom 7408L configuration */ + /* Reference clock config */ + ixgb_write_phy_reg(hw, + IXGB_BCM8704_USER_PMD_TX_CTRL_REG, + IXGB_SUN_PHY_ADDRESS, + IXGB_BCM8704_USER_DEV3_ADDR, + IXGB_BCM8704_USER_PMD_TX_CTRL_REG_VAL); + /* we must read the registers twice */ + ixgb_read_phy_reg(hw, + IXGB_BCM8704_USER_PMD_TX_CTRL_REG, + IXGB_SUN_PHY_ADDRESS, + IXGB_BCM8704_USER_DEV3_ADDR); + ixgb_read_phy_reg(hw, + IXGB_BCM8704_USER_PMD_TX_CTRL_REG, + IXGB_SUN_PHY_ADDRESS, + IXGB_BCM8704_USER_DEV3_ADDR); + + ixgb_write_phy_reg(hw, + IXGB_BCM8704_USER_CTRL_REG, + IXGB_SUN_PHY_ADDRESS, + IXGB_BCM8704_USER_DEV3_ADDR, + IXGB_BCM8704_USER_CTRL_REG_VAL); + ixgb_read_phy_reg(hw, + IXGB_BCM8704_USER_CTRL_REG, + IXGB_SUN_PHY_ADDRESS, + IXGB_BCM8704_USER_DEV3_ADDR); + ixgb_read_phy_reg(hw, + IXGB_BCM8704_USER_CTRL_REG, + IXGB_SUN_PHY_ADDRESS, + IXGB_BCM8704_USER_DEV3_ADDR); + + /* SerDes needs extra delay */ + msleep(IXGB_SUN_PHY_RESET_DELAY); +} diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h new file mode 100644 index 000000000..0bd5d72e1 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h @@ -0,0 +1,792 @@ +/******************************************************************************* + + Intel PRO/10GbE Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGB_HW_H_ +#define _IXGB_HW_H_ + +#include + +#include "ixgb_osdep.h" + +/* Enums */ +typedef enum { + ixgb_mac_unknown = 0, + ixgb_82597, + ixgb_num_macs +} ixgb_mac_type; + +/* Types of physical layer modules */ +typedef enum { + ixgb_phy_type_unknown = 0, + ixgb_phy_type_g6005, /* 850nm, MM fiber, XPAK transceiver */ + ixgb_phy_type_g6104, /* 1310nm, SM fiber, XPAK transceiver */ + ixgb_phy_type_txn17201, /* 850nm, MM fiber, XPAK transceiver */ + ixgb_phy_type_txn17401, /* 1310nm, SM fiber, XENPAK transceiver */ + ixgb_phy_type_bcm /* SUN specific board */ +} ixgb_phy_type; + +/* XPAK transceiver vendors, for the SR adapters */ +typedef enum { + ixgb_xpak_vendor_intel, + ixgb_xpak_vendor_infineon +} ixgb_xpak_vendor; + +/* Media Types */ +typedef enum { + ixgb_media_type_unknown = 0, + ixgb_media_type_fiber = 1, + ixgb_media_type_copper = 2, + ixgb_num_media_types +} ixgb_media_type; + +/* Flow Control Settings */ +typedef enum { + ixgb_fc_none = 0, + ixgb_fc_rx_pause = 1, + ixgb_fc_tx_pause = 2, + ixgb_fc_full = 3, + ixgb_fc_default = 0xFF +} ixgb_fc_type; + +/* PCI bus types */ +typedef enum { + ixgb_bus_type_unknown = 0, + ixgb_bus_type_pci, + ixgb_bus_type_pcix +} ixgb_bus_type; + +/* PCI bus speeds */ +typedef enum { + ixgb_bus_speed_unknown = 0, + ixgb_bus_speed_33, + ixgb_bus_speed_66, + ixgb_bus_speed_100, + ixgb_bus_speed_133, + ixgb_bus_speed_reserved +} ixgb_bus_speed; + +/* PCI bus widths */ +typedef enum { + ixgb_bus_width_unknown = 0, + ixgb_bus_width_32, + ixgb_bus_width_64 +} ixgb_bus_width; + +#define IXGB_EEPROM_SIZE 64 /* Size in words */ + +#define SPEED_10000 10000 +#define FULL_DUPLEX 2 + +#define MIN_NUMBER_OF_DESCRIPTORS 8 +#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 /* 13 bits in RDLEN/TDLEN, 128B aligned */ + +#define IXGB_DELAY_BEFORE_RESET 10 /* allow 10ms after idling rx/tx units */ +#define IXGB_DELAY_AFTER_RESET 1 /* allow 1ms after the reset */ +#define IXGB_DELAY_AFTER_EE_RESET 10 /* allow 10ms after the EEPROM reset */ + +#define IXGB_DELAY_USECS_AFTER_LINK_RESET 13 /* allow 13 microseconds after the reset */ + /* NOTE: this is MICROSECONDS */ +#define MAX_RESET_ITERATIONS 8 /* number of iterations to get things right */ + +/* General Registers */ +#define IXGB_CTRL0 0x00000 /* Device Control Register 0 - RW */ +#define IXGB_CTRL1 0x00008 /* Device Control Register 1 - RW */ +#define IXGB_STATUS 0x00010 /* Device Status Register - RO */ +#define IXGB_EECD 0x00018 /* EEPROM/Flash Control/Data Register - RW */ +#define IXGB_MFS 0x00020 /* Maximum Frame Size - RW */ + +/* Interrupt */ +#define IXGB_ICR 0x00080 /* Interrupt Cause Read - R/clr */ +#define IXGB_ICS 0x00088 /* Interrupt Cause Set - RW */ +#define IXGB_IMS 0x00090 /* Interrupt Mask Set/Read - RW */ +#define IXGB_IMC 0x00098 /* Interrupt Mask Clear - WO */ + +/* Receive */ +#define IXGB_RCTL 0x00100 /* RX Control - RW */ +#define IXGB_FCRTL 0x00108 /* Flow Control Receive Threshold Low - RW */ +#define IXGB_FCRTH 0x00110 /* Flow Control Receive Threshold High - RW */ +#define IXGB_RDBAL 0x00118 /* RX Descriptor Base Low - RW */ +#define IXGB_RDBAH 0x0011C /* RX Descriptor Base High - RW */ +#define IXGB_RDLEN 0x00120 /* RX Descriptor Length - RW */ +#define IXGB_RDH 0x00128 /* RX Descriptor Head - RW */ +#define IXGB_RDT 0x00130 /* RX Descriptor Tail - RW */ +#define IXGB_RDTR 0x00138 /* RX Delay Timer Ring - RW */ +#define IXGB_RXDCTL 0x00140 /* Receive Descriptor Control - RW */ +#define IXGB_RAIDC 0x00148 /* Receive Adaptive Interrupt Delay Control - RW */ +#define IXGB_RXCSUM 0x00158 /* Receive Checksum Control - RW */ +#define IXGB_RA 0x00180 /* Receive Address Array Base - RW */ +#define IXGB_RAL 0x00180 /* Receive Address Low [0:15] - RW */ +#define IXGB_RAH 0x00184 /* Receive Address High [0:15] - RW */ +#define IXGB_MTA 0x00200 /* Multicast Table Array [0:127] - RW */ +#define IXGB_VFTA 0x00400 /* VLAN Filter Table Array [0:127] - RW */ +#define IXGB_REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Transmit */ +#define IXGB_TCTL 0x00600 /* TX Control - RW */ +#define IXGB_TDBAL 0x00608 /* TX Descriptor Base Low - RW */ +#define IXGB_TDBAH 0x0060C /* TX Descriptor Base High - RW */ +#define IXGB_TDLEN 0x00610 /* TX Descriptor Length - RW */ +#define IXGB_TDH 0x00618 /* TX Descriptor Head - RW */ +#define IXGB_TDT 0x00620 /* TX Descriptor Tail - RW */ +#define IXGB_TIDV 0x00628 /* TX Interrupt Delay Value - RW */ +#define IXGB_TXDCTL 0x00630 /* Transmit Descriptor Control - RW */ +#define IXGB_TSPMT 0x00638 /* TCP Segmentation PAD & Min Threshold - RW */ +#define IXGB_PAP 0x00640 /* Pause and Pace - RW */ +#define IXGB_REQ_TX_DESCRIPTOR_MULTIPLE 8 + +/* Physical */ +#define IXGB_PCSC1 0x00700 /* PCS Control 1 - RW */ +#define IXGB_PCSC2 0x00708 /* PCS Control 2 - RW */ +#define IXGB_PCSS1 0x00710 /* PCS Status 1 - RO */ +#define IXGB_PCSS2 0x00718 /* PCS Status 2 - RO */ +#define IXGB_XPCSS 0x00720 /* 10GBASE-X PCS Status (or XGXS Lane Status) - RO */ +#define IXGB_UCCR 0x00728 /* Unilink Circuit Control Register */ +#define IXGB_XPCSTC 0x00730 /* 10GBASE-X PCS Test Control */ +#define IXGB_MACA 0x00738 /* MDI Autoscan Command and Address - RW */ +#define IXGB_APAE 0x00740 /* Autoscan PHY Address Enable - RW */ +#define IXGB_ARD 0x00748 /* Autoscan Read Data - RO */ +#define IXGB_AIS 0x00750 /* Autoscan Interrupt Status - RO */ +#define IXGB_MSCA 0x00758 /* MDI Single Command and Address - RW */ +#define IXGB_MSRWD 0x00760 /* MDI Single Read and Write Data - RW, RO */ + +/* Wake-up */ +#define IXGB_WUFC 0x00808 /* Wake Up Filter Control - RW */ +#define IXGB_WUS 0x00810 /* Wake Up Status - RO */ +#define IXGB_FFLT 0x01000 /* Flexible Filter Length Table - RW */ +#define IXGB_FFMT 0x01020 /* Flexible Filter Mask Table - RW */ +#define IXGB_FTVT 0x01420 /* Flexible Filter Value Table - RW */ + +/* Statistics */ +#define IXGB_TPRL 0x02000 /* Total Packets Received (Low) */ +#define IXGB_TPRH 0x02004 /* Total Packets Received (High) */ +#define IXGB_GPRCL 0x02008 /* Good Packets Received Count (Low) */ +#define IXGB_GPRCH 0x0200C /* Good Packets Received Count (High) */ +#define IXGB_BPRCL 0x02010 /* Broadcast Packets Received Count (Low) */ +#define IXGB_BPRCH 0x02014 /* Broadcast Packets Received Count (High) */ +#define IXGB_MPRCL 0x02018 /* Multicast Packets Received Count (Low) */ +#define IXGB_MPRCH 0x0201C /* Multicast Packets Received Count (High) */ +#define IXGB_UPRCL 0x02020 /* Unicast Packets Received Count (Low) */ +#define IXGB_UPRCH 0x02024 /* Unicast Packets Received Count (High) */ +#define IXGB_VPRCL 0x02028 /* VLAN Packets Received Count (Low) */ +#define IXGB_VPRCH 0x0202C /* VLAN Packets Received Count (High) */ +#define IXGB_JPRCL 0x02030 /* Jumbo Packets Received Count (Low) */ +#define IXGB_JPRCH 0x02034 /* Jumbo Packets Received Count (High) */ +#define IXGB_GORCL 0x02038 /* Good Octets Received Count (Low) */ +#define IXGB_GORCH 0x0203C /* Good Octets Received Count (High) */ +#define IXGB_TORL 0x02040 /* Total Octets Received (Low) */ +#define IXGB_TORH 0x02044 /* Total Octets Received (High) */ +#define IXGB_RNBC 0x02048 /* Receive No Buffers Count */ +#define IXGB_RUC 0x02050 /* Receive Undersize Count */ +#define IXGB_ROC 0x02058 /* Receive Oversize Count */ +#define IXGB_RLEC 0x02060 /* Receive Length Error Count */ +#define IXGB_CRCERRS 0x02068 /* CRC Error Count */ +#define IXGB_ICBC 0x02070 /* Illegal control byte in mid-packet Count */ +#define IXGB_ECBC 0x02078 /* Error Control byte in mid-packet Count */ +#define IXGB_MPC 0x02080 /* Missed Packets Count */ +#define IXGB_TPTL 0x02100 /* Total Packets Transmitted (Low) */ +#define IXGB_TPTH 0x02104 /* Total Packets Transmitted (High) */ +#define IXGB_GPTCL 0x02108 /* Good Packets Transmitted Count (Low) */ +#define IXGB_GPTCH 0x0210C /* Good Packets Transmitted Count (High) */ +#define IXGB_BPTCL 0x02110 /* Broadcast Packets Transmitted Count (Low) */ +#define IXGB_BPTCH 0x02114 /* Broadcast Packets Transmitted Count (High) */ +#define IXGB_MPTCL 0x02118 /* Multicast Packets Transmitted Count (Low) */ +#define IXGB_MPTCH 0x0211C /* Multicast Packets Transmitted Count (High) */ +#define IXGB_UPTCL 0x02120 /* Unicast Packets Transmitted Count (Low) */ +#define IXGB_UPTCH 0x02124 /* Unicast Packets Transmitted Count (High) */ +#define IXGB_VPTCL 0x02128 /* VLAN Packets Transmitted Count (Low) */ +#define IXGB_VPTCH 0x0212C /* VLAN Packets Transmitted Count (High) */ +#define IXGB_JPTCL 0x02130 /* Jumbo Packets Transmitted Count (Low) */ +#define IXGB_JPTCH 0x02134 /* Jumbo Packets Transmitted Count (High) */ +#define IXGB_GOTCL 0x02138 /* Good Octets Transmitted Count (Low) */ +#define IXGB_GOTCH 0x0213C /* Good Octets Transmitted Count (High) */ +#define IXGB_TOTL 0x02140 /* Total Octets Transmitted Count (Low) */ +#define IXGB_TOTH 0x02144 /* Total Octets Transmitted Count (High) */ +#define IXGB_DC 0x02148 /* Defer Count */ +#define IXGB_PLT64C 0x02150 /* Packet Transmitted was less than 64 bytes Count */ +#define IXGB_TSCTC 0x02170 /* TCP Segmentation Context Transmitted Count */ +#define IXGB_TSCTFC 0x02178 /* TCP Segmentation Context Tx Fail Count */ +#define IXGB_IBIC 0x02180 /* Illegal byte during Idle stream count */ +#define IXGB_RFC 0x02188 /* Remote Fault Count */ +#define IXGB_LFC 0x02190 /* Local Fault Count */ +#define IXGB_PFRC 0x02198 /* Pause Frame Receive Count */ +#define IXGB_PFTC 0x021A0 /* Pause Frame Transmit Count */ +#define IXGB_MCFRC 0x021A8 /* MAC Control Frames (non-Pause) Received Count */ +#define IXGB_MCFTC 0x021B0 /* MAC Control Frames (non-Pause) Transmitted Count */ +#define IXGB_XONRXC 0x021B8 /* XON Received Count */ +#define IXGB_XONTXC 0x021C0 /* XON Transmitted Count */ +#define IXGB_XOFFRXC 0x021C8 /* XOFF Received Count */ +#define IXGB_XOFFTXC 0x021D0 /* XOFF Transmitted Count */ +#define IXGB_RJC 0x021D8 /* Receive Jabber Count */ + +/* CTRL0 Bit Masks */ +#define IXGB_CTRL0_LRST 0x00000008 +#define IXGB_CTRL0_JFE 0x00000010 +#define IXGB_CTRL0_XLE 0x00000020 +#define IXGB_CTRL0_MDCS 0x00000040 +#define IXGB_CTRL0_CMDC 0x00000080 +#define IXGB_CTRL0_SDP0 0x00040000 +#define IXGB_CTRL0_SDP1 0x00080000 +#define IXGB_CTRL0_SDP2 0x00100000 +#define IXGB_CTRL0_SDP3 0x00200000 +#define IXGB_CTRL0_SDP0_DIR 0x00400000 +#define IXGB_CTRL0_SDP1_DIR 0x00800000 +#define IXGB_CTRL0_SDP2_DIR 0x01000000 +#define IXGB_CTRL0_SDP3_DIR 0x02000000 +#define IXGB_CTRL0_RST 0x04000000 +#define IXGB_CTRL0_RPE 0x08000000 +#define IXGB_CTRL0_TPE 0x10000000 +#define IXGB_CTRL0_VME 0x40000000 + +/* CTRL1 Bit Masks */ +#define IXGB_CTRL1_GPI0_EN 0x00000001 +#define IXGB_CTRL1_GPI1_EN 0x00000002 +#define IXGB_CTRL1_GPI2_EN 0x00000004 +#define IXGB_CTRL1_GPI3_EN 0x00000008 +#define IXGB_CTRL1_SDP4 0x00000010 +#define IXGB_CTRL1_SDP5 0x00000020 +#define IXGB_CTRL1_SDP6 0x00000040 +#define IXGB_CTRL1_SDP7 0x00000080 +#define IXGB_CTRL1_SDP4_DIR 0x00000100 +#define IXGB_CTRL1_SDP5_DIR 0x00000200 +#define IXGB_CTRL1_SDP6_DIR 0x00000400 +#define IXGB_CTRL1_SDP7_DIR 0x00000800 +#define IXGB_CTRL1_EE_RST 0x00002000 +#define IXGB_CTRL1_RO_DIS 0x00020000 +#define IXGB_CTRL1_PCIXHM_MASK 0x00C00000 +#define IXGB_CTRL1_PCIXHM_1_2 0x00000000 +#define IXGB_CTRL1_PCIXHM_5_8 0x00400000 +#define IXGB_CTRL1_PCIXHM_3_4 0x00800000 +#define IXGB_CTRL1_PCIXHM_7_8 0x00C00000 + +/* STATUS Bit Masks */ +#define IXGB_STATUS_LU 0x00000002 +#define IXGB_STATUS_AIP 0x00000004 +#define IXGB_STATUS_TXOFF 0x00000010 +#define IXGB_STATUS_XAUIME 0x00000020 +#define IXGB_STATUS_RES 0x00000040 +#define IXGB_STATUS_RIS 0x00000080 +#define IXGB_STATUS_RIE 0x00000100 +#define IXGB_STATUS_RLF 0x00000200 +#define IXGB_STATUS_RRF 0x00000400 +#define IXGB_STATUS_PCI_SPD 0x00000800 +#define IXGB_STATUS_BUS64 0x00001000 +#define IXGB_STATUS_PCIX_MODE 0x00002000 +#define IXGB_STATUS_PCIX_SPD_MASK 0x0000C000 +#define IXGB_STATUS_PCIX_SPD_66 0x00000000 +#define IXGB_STATUS_PCIX_SPD_100 0x00004000 +#define IXGB_STATUS_PCIX_SPD_133 0x00008000 +#define IXGB_STATUS_REV_ID_MASK 0x000F0000 +#define IXGB_STATUS_REV_ID_SHIFT 16 + +/* EECD Bit Masks */ +#define IXGB_EECD_SK 0x00000001 +#define IXGB_EECD_CS 0x00000002 +#define IXGB_EECD_DI 0x00000004 +#define IXGB_EECD_DO 0x00000008 +#define IXGB_EECD_FWE_MASK 0x00000030 +#define IXGB_EECD_FWE_DIS 0x00000010 +#define IXGB_EECD_FWE_EN 0x00000020 + +/* MFS */ +#define IXGB_MFS_SHIFT 16 + +/* Interrupt Register Bit Masks (used for ICR, ICS, IMS, and IMC) */ +#define IXGB_INT_TXDW 0x00000001 +#define IXGB_INT_TXQE 0x00000002 +#define IXGB_INT_LSC 0x00000004 +#define IXGB_INT_RXSEQ 0x00000008 +#define IXGB_INT_RXDMT0 0x00000010 +#define IXGB_INT_RXO 0x00000040 +#define IXGB_INT_RXT0 0x00000080 +#define IXGB_INT_AUTOSCAN 0x00000200 +#define IXGB_INT_GPI0 0x00000800 +#define IXGB_INT_GPI1 0x00001000 +#define IXGB_INT_GPI2 0x00002000 +#define IXGB_INT_GPI3 0x00004000 + +/* RCTL Bit Masks */ +#define IXGB_RCTL_RXEN 0x00000002 +#define IXGB_RCTL_SBP 0x00000004 +#define IXGB_RCTL_UPE 0x00000008 +#define IXGB_RCTL_MPE 0x00000010 +#define IXGB_RCTL_RDMTS_MASK 0x00000300 +#define IXGB_RCTL_RDMTS_1_2 0x00000000 +#define IXGB_RCTL_RDMTS_1_4 0x00000100 +#define IXGB_RCTL_RDMTS_1_8 0x00000200 +#define IXGB_RCTL_MO_MASK 0x00003000 +#define IXGB_RCTL_MO_47_36 0x00000000 +#define IXGB_RCTL_MO_46_35 0x00001000 +#define IXGB_RCTL_MO_45_34 0x00002000 +#define IXGB_RCTL_MO_43_32 0x00003000 +#define IXGB_RCTL_MO_SHIFT 12 +#define IXGB_RCTL_BAM 0x00008000 +#define IXGB_RCTL_BSIZE_MASK 0x00030000 +#define IXGB_RCTL_BSIZE_2048 0x00000000 +#define IXGB_RCTL_BSIZE_4096 0x00010000 +#define IXGB_RCTL_BSIZE_8192 0x00020000 +#define IXGB_RCTL_BSIZE_16384 0x00030000 +#define IXGB_RCTL_VFE 0x00040000 +#define IXGB_RCTL_CFIEN 0x00080000 +#define IXGB_RCTL_CFI 0x00100000 +#define IXGB_RCTL_RPDA_MASK 0x00600000 +#define IXGB_RCTL_RPDA_MC_MAC 0x00000000 +#define IXGB_RCTL_MC_ONLY 0x00400000 +#define IXGB_RCTL_CFF 0x00800000 +#define IXGB_RCTL_SECRC 0x04000000 +#define IXGB_RDT_FPDB 0x80000000 + +#define IXGB_RCTL_IDLE_RX_UNIT 0 + +/* FCRTL Bit Masks */ +#define IXGB_FCRTL_XONE 0x80000000 + +/* RXDCTL Bit Masks */ +#define IXGB_RXDCTL_PTHRESH_MASK 0x000001FF +#define IXGB_RXDCTL_PTHRESH_SHIFT 0 +#define IXGB_RXDCTL_HTHRESH_MASK 0x0003FE00 +#define IXGB_RXDCTL_HTHRESH_SHIFT 9 +#define IXGB_RXDCTL_WTHRESH_MASK 0x07FC0000 +#define IXGB_RXDCTL_WTHRESH_SHIFT 18 + +/* RAIDC Bit Masks */ +#define IXGB_RAIDC_HIGHTHRS_MASK 0x0000003F +#define IXGB_RAIDC_DELAY_MASK 0x000FF800 +#define IXGB_RAIDC_DELAY_SHIFT 11 +#define IXGB_RAIDC_POLL_MASK 0x1FF00000 +#define IXGB_RAIDC_POLL_SHIFT 20 +#define IXGB_RAIDC_RXT_GATE 0x40000000 +#define IXGB_RAIDC_EN 0x80000000 + +#define IXGB_RAIDC_POLL_1000_INTERRUPTS_PER_SECOND 1220 +#define IXGB_RAIDC_POLL_5000_INTERRUPTS_PER_SECOND 244 +#define IXGB_RAIDC_POLL_10000_INTERRUPTS_PER_SECOND 122 +#define IXGB_RAIDC_POLL_20000_INTERRUPTS_PER_SECOND 61 + +/* RXCSUM Bit Masks */ +#define IXGB_RXCSUM_IPOFL 0x00000100 +#define IXGB_RXCSUM_TUOFL 0x00000200 + +/* RAH Bit Masks */ +#define IXGB_RAH_ASEL_MASK 0x00030000 +#define IXGB_RAH_ASEL_DEST 0x00000000 +#define IXGB_RAH_ASEL_SRC 0x00010000 +#define IXGB_RAH_AV 0x80000000 + +/* TCTL Bit Masks */ +#define IXGB_TCTL_TCE 0x00000001 +#define IXGB_TCTL_TXEN 0x00000002 +#define IXGB_TCTL_TPDE 0x00000004 + +#define IXGB_TCTL_IDLE_TX_UNIT 0 + +/* TXDCTL Bit Masks */ +#define IXGB_TXDCTL_PTHRESH_MASK 0x0000007F +#define IXGB_TXDCTL_HTHRESH_MASK 0x00007F00 +#define IXGB_TXDCTL_HTHRESH_SHIFT 8 +#define IXGB_TXDCTL_WTHRESH_MASK 0x007F0000 +#define IXGB_TXDCTL_WTHRESH_SHIFT 16 + +/* TSPMT Bit Masks */ +#define IXGB_TSPMT_TSMT_MASK 0x0000FFFF +#define IXGB_TSPMT_TSPBP_MASK 0xFFFF0000 +#define IXGB_TSPMT_TSPBP_SHIFT 16 + +/* PAP Bit Masks */ +#define IXGB_PAP_TXPC_MASK 0x0000FFFF +#define IXGB_PAP_TXPV_MASK 0x000F0000 +#define IXGB_PAP_TXPV_10G 0x00000000 +#define IXGB_PAP_TXPV_1G 0x00010000 +#define IXGB_PAP_TXPV_2G 0x00020000 +#define IXGB_PAP_TXPV_3G 0x00030000 +#define IXGB_PAP_TXPV_4G 0x00040000 +#define IXGB_PAP_TXPV_5G 0x00050000 +#define IXGB_PAP_TXPV_6G 0x00060000 +#define IXGB_PAP_TXPV_7G 0x00070000 +#define IXGB_PAP_TXPV_8G 0x00080000 +#define IXGB_PAP_TXPV_9G 0x00090000 +#define IXGB_PAP_TXPV_WAN 0x000F0000 + +/* PCSC1 Bit Masks */ +#define IXGB_PCSC1_LOOPBACK 0x00004000 + +/* PCSC2 Bit Masks */ +#define IXGB_PCSC2_PCS_TYPE_MASK 0x00000003 +#define IXGB_PCSC2_PCS_TYPE_10GBX 0x00000001 + +/* PCSS1 Bit Masks */ +#define IXGB_PCSS1_LOCAL_FAULT 0x00000080 +#define IXGB_PCSS1_RX_LINK_STATUS 0x00000004 + +/* PCSS2 Bit Masks */ +#define IXGB_PCSS2_DEV_PRES_MASK 0x0000C000 +#define IXGB_PCSS2_DEV_PRES 0x00004000 +#define IXGB_PCSS2_TX_LF 0x00000800 +#define IXGB_PCSS2_RX_LF 0x00000400 +#define IXGB_PCSS2_10GBW 0x00000004 +#define IXGB_PCSS2_10GBX 0x00000002 +#define IXGB_PCSS2_10GBR 0x00000001 + +/* XPCSS Bit Masks */ +#define IXGB_XPCSS_ALIGN_STATUS 0x00001000 +#define IXGB_XPCSS_PATTERN_TEST 0x00000800 +#define IXGB_XPCSS_LANE_3_SYNC 0x00000008 +#define IXGB_XPCSS_LANE_2_SYNC 0x00000004 +#define IXGB_XPCSS_LANE_1_SYNC 0x00000002 +#define IXGB_XPCSS_LANE_0_SYNC 0x00000001 + +/* XPCSTC Bit Masks */ +#define IXGB_XPCSTC_BERT_TRIG 0x00200000 +#define IXGB_XPCSTC_BERT_SST 0x00100000 +#define IXGB_XPCSTC_BERT_PSZ_MASK 0x000C0000 +#define IXGB_XPCSTC_BERT_PSZ_SHIFT 17 +#define IXGB_XPCSTC_BERT_PSZ_INF 0x00000003 +#define IXGB_XPCSTC_BERT_PSZ_68 0x00000001 +#define IXGB_XPCSTC_BERT_PSZ_1028 0x00000000 + +/* MSCA bit Masks */ +/* New Protocol Address */ +#define IXGB_MSCA_NP_ADDR_MASK 0x0000FFFF +#define IXGB_MSCA_NP_ADDR_SHIFT 0 +/* Either Device Type or Register Address,depending on ST_CODE */ +#define IXGB_MSCA_DEV_TYPE_MASK 0x001F0000 +#define IXGB_MSCA_DEV_TYPE_SHIFT 16 +#define IXGB_MSCA_PHY_ADDR_MASK 0x03E00000 +#define IXGB_MSCA_PHY_ADDR_SHIFT 21 +#define IXGB_MSCA_OP_CODE_MASK 0x0C000000 +/* OP_CODE == 00, Address cycle, New Protocol */ +/* OP_CODE == 01, Write operation */ +/* OP_CODE == 10, Read operation */ +/* OP_CODE == 11, Read, auto increment, New Protocol */ +#define IXGB_MSCA_ADDR_CYCLE 0x00000000 +#define IXGB_MSCA_WRITE 0x04000000 +#define IXGB_MSCA_READ 0x08000000 +#define IXGB_MSCA_READ_AUTOINC 0x0C000000 +#define IXGB_MSCA_OP_CODE_SHIFT 26 +#define IXGB_MSCA_ST_CODE_MASK 0x30000000 +/* ST_CODE == 00, New Protocol */ +/* ST_CODE == 01, Old Protocol */ +#define IXGB_MSCA_NEW_PROTOCOL 0x00000000 +#define IXGB_MSCA_OLD_PROTOCOL 0x10000000 +#define IXGB_MSCA_ST_CODE_SHIFT 28 +/* Initiate command, self-clearing when command completes */ +#define IXGB_MSCA_MDI_COMMAND 0x40000000 +/*MDI In Progress Enable. */ +#define IXGB_MSCA_MDI_IN_PROG_EN 0x80000000 + +/* MSRWD bit masks */ +#define IXGB_MSRWD_WRITE_DATA_MASK 0x0000FFFF +#define IXGB_MSRWD_WRITE_DATA_SHIFT 0 +#define IXGB_MSRWD_READ_DATA_MASK 0xFFFF0000 +#define IXGB_MSRWD_READ_DATA_SHIFT 16 + +/* Definitions for the optics devices on the MDIO bus. */ +#define IXGB_PHY_ADDRESS 0x0 /* Single PHY, multiple "Devices" */ + +#define MDIO_PMA_PMD_XPAK_VENDOR_NAME 0x803A /* XPAK/XENPAK devices only */ + +/* Vendor-specific MDIO registers */ +#define G6XXX_PMA_PMD_VS1 0xC001 /* Vendor-specific register */ +#define G6XXX_XGXS_XAUI_VS2 0x18 /* Vendor-specific register */ + +#define G6XXX_PMA_PMD_VS1_PLL_RESET 0x80 +#define G6XXX_PMA_PMD_VS1_REMOVE_PLL_RESET 0x00 +#define G6XXX_XGXS_XAUI_VS2_INPUT_MASK 0x0F /* XAUI lanes synchronized */ + +/* Layout of a single receive descriptor. The controller assumes that this + * structure is packed into 16 bytes, which is a safe assumption with most + * compilers. However, some compilers may insert padding between the fields, + * in which case the structure must be packed in some compiler-specific + * manner. */ +struct ixgb_rx_desc { + __le64 buff_addr; + __le16 length; + __le16 reserved; + u8 status; + u8 errors; + __le16 special; +}; + +#define IXGB_RX_DESC_STATUS_DD 0x01 +#define IXGB_RX_DESC_STATUS_EOP 0x02 +#define IXGB_RX_DESC_STATUS_IXSM 0x04 +#define IXGB_RX_DESC_STATUS_VP 0x08 +#define IXGB_RX_DESC_STATUS_TCPCS 0x20 +#define IXGB_RX_DESC_STATUS_IPCS 0x40 +#define IXGB_RX_DESC_STATUS_PIF 0x80 + +#define IXGB_RX_DESC_ERRORS_CE 0x01 +#define IXGB_RX_DESC_ERRORS_SE 0x02 +#define IXGB_RX_DESC_ERRORS_P 0x08 +#define IXGB_RX_DESC_ERRORS_TCPE 0x20 +#define IXGB_RX_DESC_ERRORS_IPE 0x40 +#define IXGB_RX_DESC_ERRORS_RXE 0x80 + +#define IXGB_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define IXGB_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define IXGB_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority is in upper 3 of 16 */ + +/* Layout of a single transmit descriptor. The controller assumes that this + * structure is packed into 16 bytes, which is a safe assumption with most + * compilers. However, some compilers may insert padding between the fields, + * in which case the structure must be packed in some compiler-specific + * manner. */ +struct ixgb_tx_desc { + __le64 buff_addr; + __le32 cmd_type_len; + u8 status; + u8 popts; + __le16 vlan; +}; + +#define IXGB_TX_DESC_LENGTH_MASK 0x000FFFFF +#define IXGB_TX_DESC_TYPE_MASK 0x00F00000 +#define IXGB_TX_DESC_TYPE_SHIFT 20 +#define IXGB_TX_DESC_CMD_MASK 0xFF000000 +#define IXGB_TX_DESC_CMD_SHIFT 24 +#define IXGB_TX_DESC_CMD_EOP 0x01000000 +#define IXGB_TX_DESC_CMD_TSE 0x04000000 +#define IXGB_TX_DESC_CMD_RS 0x08000000 +#define IXGB_TX_DESC_CMD_VLE 0x40000000 +#define IXGB_TX_DESC_CMD_IDE 0x80000000 + +#define IXGB_TX_DESC_TYPE 0x00100000 + +#define IXGB_TX_DESC_STATUS_DD 0x01 + +#define IXGB_TX_DESC_POPTS_IXSM 0x01 +#define IXGB_TX_DESC_POPTS_TXSM 0x02 +#define IXGB_TX_DESC_SPECIAL_PRI_SHIFT IXGB_RX_DESC_SPECIAL_PRI_SHIFT /* Priority is in upper 3 of 16 */ + +struct ixgb_context_desc { + u8 ipcss; + u8 ipcso; + __le16 ipcse; + u8 tucss; + u8 tucso; + __le16 tucse; + __le32 cmd_type_len; + u8 status; + u8 hdr_len; + __le16 mss; +}; + +#define IXGB_CONTEXT_DESC_CMD_TCP 0x01000000 +#define IXGB_CONTEXT_DESC_CMD_IP 0x02000000 +#define IXGB_CONTEXT_DESC_CMD_TSE 0x04000000 +#define IXGB_CONTEXT_DESC_CMD_RS 0x08000000 +#define IXGB_CONTEXT_DESC_CMD_IDE 0x80000000 + +#define IXGB_CONTEXT_DESC_TYPE 0x00000000 + +#define IXGB_CONTEXT_DESC_STATUS_DD 0x01 + +/* Filters */ +#define IXGB_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ +#define IXGB_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ +#define IXGB_RAR_ENTRIES 3 /* Number of entries in Rx Address array */ + +#define IXGB_MEMORY_REGISTER_BASE_ADDRESS 0 +#define ENET_HEADER_SIZE 14 +#define ENET_FCS_LENGTH 4 +#define IXGB_MAX_NUM_MULTICAST_ADDRESSES 128 +#define IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS 60 +#define IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS 1514 +#define IXGB_MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* Phy Addresses */ +#define IXGB_OPTICAL_PHY_ADDR 0x0 /* Optical Module phy address */ +#define IXGB_XAUII_PHY_ADDR 0x1 /* Xauii transceiver phy address */ +#define IXGB_DIAG_PHY_ADDR 0x1F /* Diagnostic Device phy address */ + +/* This structure takes a 64k flash and maps it for identification commands */ +struct ixgb_flash_buffer { + u8 manufacturer_id; + u8 device_id; + u8 filler1[0x2AA8]; + u8 cmd2; + u8 filler2[0x2AAA]; + u8 cmd1; + u8 filler3[0xAAAA]; +}; + +/* Flow control parameters */ +struct ixgb_fc { + u32 high_water; /* Flow Control High-water */ + u32 low_water; /* Flow Control Low-water */ + u16 pause_time; /* Flow Control Pause timer */ + bool send_xon; /* Flow control send XON */ + ixgb_fc_type type; /* Type of flow control */ +}; + +/* The historical defaults for the flow control values are given below. */ +#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */ +#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */ +#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */ + +/* Phy definitions */ +#define IXGB_MAX_PHY_REG_ADDRESS 0xFFFF +#define IXGB_MAX_PHY_ADDRESS 31 +#define IXGB_MAX_PHY_DEV_TYPE 31 + +/* Bus parameters */ +struct ixgb_bus { + ixgb_bus_speed speed; + ixgb_bus_width width; + ixgb_bus_type type; +}; + +struct ixgb_hw { + u8 __iomem *hw_addr;/* Base Address of the hardware */ + void *back; /* Pointer to OS-dependent struct */ + struct ixgb_fc fc; /* Flow control parameters */ + struct ixgb_bus bus; /* Bus parameters */ + u32 phy_id; /* Phy Identifier */ + u32 phy_addr; /* XGMII address of Phy */ + ixgb_mac_type mac_type; /* Identifier for MAC controller */ + ixgb_phy_type phy_type; /* Transceiver/phy identifier */ + u32 max_frame_size; /* Maximum frame size supported */ + u32 mc_filter_type; /* Multicast filter hash type */ + u32 num_mc_addrs; /* Number of current Multicast addrs */ + u8 curr_mac_addr[ETH_ALEN]; /* Individual address currently programmed in MAC */ + u32 num_tx_desc; /* Number of Transmit descriptors */ + u32 num_rx_desc; /* Number of Receive descriptors */ + u32 rx_buffer_size; /* Size of Receive buffer */ + bool link_up; /* true if link is valid */ + bool adapter_stopped; /* State of adapter */ + u16 device_id; /* device id from PCI configuration space */ + u16 vendor_id; /* vendor id from PCI configuration space */ + u8 revision_id; /* revision id from PCI configuration space */ + u16 subsystem_vendor_id; /* subsystem vendor id from PCI configuration space */ + u16 subsystem_id; /* subsystem id from PCI configuration space */ + u32 bar0; /* Base Address registers */ + u32 bar1; + u32 bar2; + u32 bar3; + u16 pci_cmd_word; /* PCI command register id from PCI configuration space */ + __le16 eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */ + unsigned long io_base; /* Our I/O mapped location */ + u32 lastLFC; + u32 lastRFC; +}; + +/* Statistics reported by the hardware */ +struct ixgb_hw_stats { + u64 tprl; + u64 tprh; + u64 gprcl; + u64 gprch; + u64 bprcl; + u64 bprch; + u64 mprcl; + u64 mprch; + u64 uprcl; + u64 uprch; + u64 vprcl; + u64 vprch; + u64 jprcl; + u64 jprch; + u64 gorcl; + u64 gorch; + u64 torl; + u64 torh; + u64 rnbc; + u64 ruc; + u64 roc; + u64 rlec; + u64 crcerrs; + u64 icbc; + u64 ecbc; + u64 mpc; + u64 tptl; + u64 tpth; + u64 gptcl; + u64 gptch; + u64 bptcl; + u64 bptch; + u64 mptcl; + u64 mptch; + u64 uptcl; + u64 uptch; + u64 vptcl; + u64 vptch; + u64 jptcl; + u64 jptch; + u64 gotcl; + u64 gotch; + u64 totl; + u64 toth; + u64 dc; + u64 plt64c; + u64 tsctc; + u64 tsctfc; + u64 ibic; + u64 rfc; + u64 lfc; + u64 pfrc; + u64 pftc; + u64 mcfrc; + u64 mcftc; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 rjc; +}; + +/* Function Prototypes */ +bool ixgb_adapter_stop(struct ixgb_hw *hw); +bool ixgb_init_hw(struct ixgb_hw *hw); +bool ixgb_adapter_start(struct ixgb_hw *hw); +void ixgb_check_for_link(struct ixgb_hw *hw); +bool ixgb_check_for_bad_link(struct ixgb_hw *hw); + +void ixgb_rar_set(struct ixgb_hw *hw, u8 *addr, u32 index); + +/* Filters (multicast, vlan, receive) */ +void ixgb_mc_addr_list_update(struct ixgb_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, u32 pad); + +/* Vfta functions */ +void ixgb_write_vfta(struct ixgb_hw *hw, u32 offset, u32 value); + +/* Access functions to eeprom data */ +void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr); +u32 ixgb_get_ee_pba_number(struct ixgb_hw *hw); +u16 ixgb_get_ee_device_id(struct ixgb_hw *hw); +bool ixgb_get_eeprom_data(struct ixgb_hw *hw); +__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index); + +/* Everything else */ +void ixgb_led_on(struct ixgb_hw *hw); +void ixgb_led_off(struct ixgb_hw *hw); +void ixgb_write_pci_cfg(struct ixgb_hw *hw, + u32 reg, + u16 * value); + + +#endif /* _IXGB_HW_H_ */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h new file mode 100644 index 000000000..32c1b302d --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h @@ -0,0 +1,48 @@ +/******************************************************************************* + + Intel PRO/10GbE Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGB_IDS_H_ +#define _IXGB_IDS_H_ + +/********************************************************************** +** The Device and Vendor IDs for 10 Gigabit MACs +**********************************************************************/ + +#define IXGB_DEVICE_ID_82597EX 0x1048 +#define IXGB_DEVICE_ID_82597EX_SR 0x1A48 +#define IXGB_DEVICE_ID_82597EX_LR 0x1B48 +#define IXGB_SUBDEVICE_ID_A11F 0xA11F +#define IXGB_SUBDEVICE_ID_A01F 0xA01F + +#define IXGB_DEVICE_ID_82597EX_CX4 0x109E +#define IXGB_SUBDEVICE_ID_A00C 0xA00C +#define IXGB_SUBDEVICE_ID_A01C 0xA01C +#define IXGB_SUBDEVICE_ID_7036 0x7036 + +#endif /* #ifndef _IXGB_IDS_H_ */ +/* End of File */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c new file mode 100644 index 000000000..31f914593 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -0,0 +1,2368 @@ +/******************************************************************************* + + Intel PRO/10GbE Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include "ixgb.h" + +char ixgb_driver_name[] = "ixgb"; +static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver"; + +#define DRIVERNAPI "-NAPI" +#define DRV_VERSION "1.0.135-k2" DRIVERNAPI +const char ixgb_driver_version[] = DRV_VERSION; +static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation."; + +#define IXGB_CB_LENGTH 256 +static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +/* ixgb_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id ixgb_pci_tbl[] = { + {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_CX4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_SR, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_LR, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl); + +/* Local Function Prototypes */ +static int ixgb_init_module(void); +static void ixgb_exit_module(void); +static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void ixgb_remove(struct pci_dev *pdev); +static int ixgb_sw_init(struct ixgb_adapter *adapter); +static int ixgb_open(struct net_device *netdev); +static int ixgb_close(struct net_device *netdev); +static void ixgb_configure_tx(struct ixgb_adapter *adapter); +static void ixgb_configure_rx(struct ixgb_adapter *adapter); +static void ixgb_setup_rctl(struct ixgb_adapter *adapter); +static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter); +static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter); +static void ixgb_set_multi(struct net_device *netdev); +static void ixgb_watchdog(unsigned long data); +static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb, + struct net_device *netdev); +static struct net_device_stats *ixgb_get_stats(struct net_device *netdev); +static int ixgb_change_mtu(struct net_device *netdev, int new_mtu); +static int ixgb_set_mac(struct net_device *netdev, void *p); +static irqreturn_t ixgb_intr(int irq, void *data); +static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter); + +static int ixgb_clean(struct napi_struct *, int); +static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int); +static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int); + +static void ixgb_tx_timeout(struct net_device *dev); +static void ixgb_tx_timeout_task(struct work_struct *work); + +static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter); +static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter); +static int ixgb_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static void ixgb_restore_vlan(struct ixgb_adapter *adapter); + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* for netdump / net console */ +static void ixgb_netpoll(struct net_device *dev); +#endif + +static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, + enum pci_channel_state state); +static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); +static void ixgb_io_resume (struct pci_dev *pdev); + +static const struct pci_error_handlers ixgb_err_handler = { + .error_detected = ixgb_io_error_detected, + .slot_reset = ixgb_io_slot_reset, + .resume = ixgb_io_resume, +}; + +static struct pci_driver ixgb_driver = { + .name = ixgb_driver_name, + .id_table = ixgb_pci_tbl, + .probe = ixgb_probe, + .remove = ixgb_remove, + .err_handler = &ixgb_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +/** + * ixgb_init_module - Driver Registration Routine + * + * ixgb_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ + +static int __init +ixgb_init_module(void) +{ + pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version); + pr_info("%s\n", ixgb_copyright); + + return pci_register_driver(&ixgb_driver); +} + +module_init(ixgb_init_module); + +/** + * ixgb_exit_module - Driver Exit Cleanup Routine + * + * ixgb_exit_module is called just before the driver is removed + * from memory. + **/ + +static void __exit +ixgb_exit_module(void) +{ + pci_unregister_driver(&ixgb_driver); +} + +module_exit(ixgb_exit_module); + +/** + * ixgb_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ + +static void +ixgb_irq_disable(struct ixgb_adapter *adapter) +{ + IXGB_WRITE_REG(&adapter->hw, IMC, ~0); + IXGB_WRITE_FLUSH(&adapter->hw); + synchronize_irq(adapter->pdev->irq); +} + +/** + * ixgb_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ + +static void +ixgb_irq_enable(struct ixgb_adapter *adapter) +{ + u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | + IXGB_INT_TXDW | IXGB_INT_LSC; + if (adapter->hw.subsystem_vendor_id == PCI_VENDOR_ID_SUN) + val |= IXGB_INT_GPI0; + IXGB_WRITE_REG(&adapter->hw, IMS, val); + IXGB_WRITE_FLUSH(&adapter->hw); +} + +int +ixgb_up(struct ixgb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err, irq_flags = IRQF_SHARED; + int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; + struct ixgb_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + + ixgb_rar_set(hw, netdev->dev_addr, 0); + ixgb_set_multi(netdev); + + ixgb_restore_vlan(adapter); + + ixgb_configure_tx(adapter); + ixgb_setup_rctl(adapter); + ixgb_configure_rx(adapter); + ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring)); + + /* disable interrupts and get the hardware into a known state */ + IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff); + + /* only enable MSI if bus is in PCI-X mode */ + if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) { + err = pci_enable_msi(adapter->pdev); + if (!err) { + adapter->have_msi = true; + irq_flags = 0; + } + /* proceed to try to request regular interrupt */ + } + + err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags, + netdev->name, netdev); + if (err) { + if (adapter->have_msi) + pci_disable_msi(adapter->pdev); + netif_err(adapter, probe, adapter->netdev, + "Unable to allocate interrupt Error: %d\n", err); + return err; + } + + if ((hw->max_frame_size != max_frame) || + (hw->max_frame_size != + (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) { + + hw->max_frame_size = max_frame; + + IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT); + + if (hw->max_frame_size > + IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) { + u32 ctrl0 = IXGB_READ_REG(hw, CTRL0); + + if (!(ctrl0 & IXGB_CTRL0_JFE)) { + ctrl0 |= IXGB_CTRL0_JFE; + IXGB_WRITE_REG(hw, CTRL0, ctrl0); + } + } + } + + clear_bit(__IXGB_DOWN, &adapter->flags); + + napi_enable(&adapter->napi); + ixgb_irq_enable(adapter); + + netif_wake_queue(netdev); + + mod_timer(&adapter->watchdog_timer, jiffies); + + return 0; +} + +void +ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog) +{ + struct net_device *netdev = adapter->netdev; + + /* prevent the interrupt handler from restarting watchdog */ + set_bit(__IXGB_DOWN, &adapter->flags); + + netif_carrier_off(netdev); + + napi_disable(&adapter->napi); + /* waiting for NAPI to complete can re-enable interrupts */ + ixgb_irq_disable(adapter); + free_irq(adapter->pdev->irq, netdev); + + if (adapter->have_msi) + pci_disable_msi(adapter->pdev); + + if (kill_watchdog) + del_timer_sync(&adapter->watchdog_timer); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + netif_stop_queue(netdev); + + ixgb_reset(adapter); + ixgb_clean_tx_ring(adapter); + ixgb_clean_rx_ring(adapter); +} + +void +ixgb_reset(struct ixgb_adapter *adapter) +{ + struct ixgb_hw *hw = &adapter->hw; + + ixgb_adapter_stop(hw); + if (!ixgb_init_hw(hw)) + netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n"); + + /* restore frame size information */ + IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT); + if (hw->max_frame_size > + IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) { + u32 ctrl0 = IXGB_READ_REG(hw, CTRL0); + if (!(ctrl0 & IXGB_CTRL0_JFE)) { + ctrl0 |= IXGB_CTRL0_JFE; + IXGB_WRITE_REG(hw, CTRL0, ctrl0); + } + } +} + +static netdev_features_t +ixgb_fix_features(struct net_device *netdev, netdev_features_t features) +{ + /* + * Tx VLAN insertion does not work per HW design when Rx stripping is + * disabled. + */ + if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int +ixgb_set_features(struct net_device *netdev, netdev_features_t features) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_CTAG_RX))) + return 0; + + adapter->rx_csum = !!(features & NETIF_F_RXCSUM); + + if (netif_running(netdev)) { + ixgb_down(adapter, true); + ixgb_up(adapter); + ixgb_set_speed_duplex(netdev); + } else + ixgb_reset(adapter); + + return 0; +} + + +static const struct net_device_ops ixgb_netdev_ops = { + .ndo_open = ixgb_open, + .ndo_stop = ixgb_close, + .ndo_start_xmit = ixgb_xmit_frame, + .ndo_get_stats = ixgb_get_stats, + .ndo_set_rx_mode = ixgb_set_multi, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ixgb_set_mac, + .ndo_change_mtu = ixgb_change_mtu, + .ndo_tx_timeout = ixgb_tx_timeout, + .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ixgb_netpoll, +#endif + .ndo_fix_features = ixgb_fix_features, + .ndo_set_features = ixgb_set_features, +}; + +/** + * ixgb_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in ixgb_pci_tbl + * + * Returns 0 on success, negative on failure + * + * ixgb_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ + +static int +ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev = NULL; + struct ixgb_adapter *adapter; + static int cards_found = 0; + int pci_using_dac; + int i; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + pr_err("No usable DMA configuration, aborting\n"); + goto err_dma_mask; + } + } + + err = pci_request_regions(pdev, ixgb_driver_name); + if (err) + goto err_request_regions; + + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct ixgb_adapter)); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->hw.back = adapter; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0); + if (!adapter->hw.hw_addr) { + err = -EIO; + goto err_ioremap; + } + + for (i = BAR_1; i <= BAR_5; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + adapter->hw.io_base = pci_resource_start(pdev, i); + break; + } + } + + netdev->netdev_ops = &ixgb_netdev_ops; + ixgb_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64); + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; + adapter->link_speed = 0; + adapter->link_duplex = 0; + + /* setup the private structure */ + + err = ixgb_sw_init(adapter); + if (err) + goto err_sw_init; + + netdev->hw_features = NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + netdev->features = netdev->hw_features | + NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->hw_features |= NETIF_F_RXCSUM; + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + /* make sure the EEPROM is good */ + + if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { + netif_err(adapter, probe, adapter->netdev, + "The EEPROM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + + ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n"); + err = -EIO; + goto err_eeprom; + } + + adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw); + + init_timer(&adapter->watchdog_timer); + adapter->watchdog_timer.function = ixgb_watchdog; + adapter->watchdog_timer.data = (unsigned long)adapter; + + INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + netif_info(adapter, probe, adapter->netdev, + "Intel(R) PRO/10GbE Network Connection\n"); + ixgb_check_options(adapter); + /* reset the hardware with the new settings */ + + ixgb_reset(adapter); + + cards_found++; + return 0; + +err_register: +err_sw_init: +err_eeprom: + iounmap(adapter->hw.hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_request_regions: +err_dma_mask: + pci_disable_device(pdev); + return err; +} + +/** + * ixgb_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ixgb_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ + +static void +ixgb_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgb_adapter *adapter = netdev_priv(netdev); + + cancel_work_sync(&adapter->tx_timeout_task); + + unregister_netdev(netdev); + + iounmap(adapter->hw.hw_addr); + pci_release_regions(pdev); + + free_netdev(netdev); + pci_disable_device(pdev); +} + +/** + * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter) + * @adapter: board private structure to initialize + * + * ixgb_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ + +static int +ixgb_sw_init(struct ixgb_adapter *adapter) +{ + struct ixgb_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + /* PCI config space info */ + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + + hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; + adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */ + + if ((hw->device_id == IXGB_DEVICE_ID_82597EX) || + (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) || + (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) || + (hw->device_id == IXGB_DEVICE_ID_82597EX_SR)) + hw->mac_type = ixgb_82597; + else { + /* should never have loaded on this device */ + netif_err(adapter, probe, adapter->netdev, "unsupported device id\n"); + } + + /* enable flow control to be programmed */ + hw->fc.send_xon = 1; + + set_bit(__IXGB_DOWN, &adapter->flags); + return 0; +} + +/** + * ixgb_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ + +static int +ixgb_open(struct net_device *netdev) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + int err; + + /* allocate transmit descriptors */ + err = ixgb_setup_tx_resources(adapter); + if (err) + goto err_setup_tx; + + netif_carrier_off(netdev); + + /* allocate receive descriptors */ + + err = ixgb_setup_rx_resources(adapter); + if (err) + goto err_setup_rx; + + err = ixgb_up(adapter); + if (err) + goto err_up; + + netif_start_queue(netdev); + + return 0; + +err_up: + ixgb_free_rx_resources(adapter); +err_setup_rx: + ixgb_free_tx_resources(adapter); +err_setup_tx: + ixgb_reset(adapter); + + return err; +} + +/** + * ixgb_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ + +static int +ixgb_close(struct net_device *netdev) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + + ixgb_down(adapter, true); + + ixgb_free_tx_resources(adapter); + ixgb_free_rx_resources(adapter); + + return 0; +} + +/** + * ixgb_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ + +int +ixgb_setup_tx_resources(struct ixgb_adapter *adapter) +{ + struct ixgb_desc_ring *txdr = &adapter->tx_ring; + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct ixgb_buffer) * txdr->count; + txdr->buffer_info = vzalloc(size); + if (!txdr->buffer_info) + return -ENOMEM; + + /* round up to nearest 4K */ + + txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + + txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { + vfree(txdr->buffer_info); + return -ENOMEM; + } + + txdr->next_to_use = 0; + txdr->next_to_clean = 0; + + return 0; +} + +/** + * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset. + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ + +static void +ixgb_configure_tx(struct ixgb_adapter *adapter) +{ + u64 tdba = adapter->tx_ring.dma; + u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc); + u32 tctl; + struct ixgb_hw *hw = &adapter->hw; + + /* Setup the Base and Length of the Tx Descriptor Ring + * tx_ring.dma can be either a 32 or 64 bit value + */ + + IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); + IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32)); + + IXGB_WRITE_REG(hw, TDLEN, tdlen); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + + IXGB_WRITE_REG(hw, TDH, 0); + IXGB_WRITE_REG(hw, TDT, 0); + + /* don't set up txdctl, it induces performance problems if configured + * incorrectly */ + /* Set the Tx Interrupt Delay register */ + + IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay); + + /* Program the Transmit Control Register */ + + tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE; + IXGB_WRITE_REG(hw, TCTL, tctl); + + /* Setup Transmit Descriptor Settings for this adapter */ + adapter->tx_cmd_type = + IXGB_TX_DESC_TYPE | + (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0); +} + +/** + * ixgb_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * + * Returns 0 on success, negative on failure + **/ + +int +ixgb_setup_rx_resources(struct ixgb_adapter *adapter) +{ + struct ixgb_desc_ring *rxdr = &adapter->rx_ring; + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct ixgb_buffer) * rxdr->count; + rxdr->buffer_info = vzalloc(size); + if (!rxdr->buffer_info) + return -ENOMEM; + + /* Round up to nearest 4K */ + + rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); + rxdr->size = ALIGN(rxdr->size, 4096); + + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + + if (!rxdr->desc) { + vfree(rxdr->buffer_info); + return -ENOMEM; + } + memset(rxdr->desc, 0, rxdr->size); + + rxdr->next_to_clean = 0; + rxdr->next_to_use = 0; + + return 0; +} + +/** + * ixgb_setup_rctl - configure the receive control register + * @adapter: Board private structure + **/ + +static void +ixgb_setup_rctl(struct ixgb_adapter *adapter) +{ + u32 rctl; + + rctl = IXGB_READ_REG(&adapter->hw, RCTL); + + rctl &= ~(3 << IXGB_RCTL_MO_SHIFT); + + rctl |= + IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | + IXGB_RCTL_RXEN | IXGB_RCTL_CFF | + (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT); + + rctl |= IXGB_RCTL_SECRC; + + if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048) + rctl |= IXGB_RCTL_BSIZE_2048; + else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096) + rctl |= IXGB_RCTL_BSIZE_4096; + else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192) + rctl |= IXGB_RCTL_BSIZE_8192; + else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384) + rctl |= IXGB_RCTL_BSIZE_16384; + + IXGB_WRITE_REG(&adapter->hw, RCTL, rctl); +} + +/** + * ixgb_configure_rx - Configure 82597 Receive Unit after Reset. + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ + +static void +ixgb_configure_rx(struct ixgb_adapter *adapter) +{ + u64 rdba = adapter->rx_ring.dma; + u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc); + struct ixgb_hw *hw = &adapter->hw; + u32 rctl; + u32 rxcsum; + + /* make sure receives are disabled while setting up the descriptors */ + + rctl = IXGB_READ_REG(hw, RCTL); + IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN); + + /* set the Receive Delay Timer Register */ + + IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay); + + /* Setup the Base and Length of the Rx Descriptor Ring */ + + IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); + IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32)); + + IXGB_WRITE_REG(hw, RDLEN, rdlen); + + /* Setup the HW Rx Head and Tail Descriptor Pointers */ + IXGB_WRITE_REG(hw, RDH, 0); + IXGB_WRITE_REG(hw, RDT, 0); + + /* due to the hardware errata with RXDCTL, we are unable to use any of + * the performance enhancing features of it without causing other + * subtle bugs, some of the bugs could include receive length + * corruption at high data rates (WTHRESH > 0) and/or receive + * descriptor ring irregularites (particularly in hardware cache) */ + IXGB_WRITE_REG(hw, RXDCTL, 0); + + /* Enable Receive Checksum Offload for TCP and UDP */ + if (adapter->rx_csum) { + rxcsum = IXGB_READ_REG(hw, RXCSUM); + rxcsum |= IXGB_RXCSUM_TUOFL; + IXGB_WRITE_REG(hw, RXCSUM, rxcsum); + } + + /* Enable Receives */ + + IXGB_WRITE_REG(hw, RCTL, rctl); +} + +/** + * ixgb_free_tx_resources - Free Tx Resources + * @adapter: board private structure + * + * Free all transmit software resources + **/ + +void +ixgb_free_tx_resources(struct ixgb_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + + ixgb_clean_tx_ring(adapter); + + vfree(adapter->tx_ring.buffer_info); + adapter->tx_ring.buffer_info = NULL; + + dma_free_coherent(&pdev->dev, adapter->tx_ring.size, + adapter->tx_ring.desc, adapter->tx_ring.dma); + + adapter->tx_ring.desc = NULL; +} + +static void +ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter, + struct ixgb_buffer *buffer_info) +{ + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + buffer_info->dma = 0; + } + + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; + /* these fields must always be initialized in tx + * buffer_info->length = 0; + * buffer_info->next_to_watch = 0; */ +} + +/** + * ixgb_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + **/ + +static void +ixgb_clean_tx_ring(struct ixgb_adapter *adapter) +{ + struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; + struct ixgb_buffer *buffer_info; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + ixgb_unmap_and_free_tx_resource(adapter, buffer_info); + } + + size = sizeof(struct ixgb_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + IXGB_WRITE_REG(&adapter->hw, TDH, 0); + IXGB_WRITE_REG(&adapter->hw, TDT, 0); +} + +/** + * ixgb_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * + * Free all receive software resources + **/ + +void +ixgb_free_rx_resources(struct ixgb_adapter *adapter) +{ + struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; + struct pci_dev *pdev = adapter->pdev; + + ixgb_clean_rx_ring(adapter); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * ixgb_clean_rx_ring - Free Rx Buffers + * @adapter: board private structure + **/ + +static void +ixgb_clean_rx_ring(struct ixgb_adapter *adapter) +{ + struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; + struct ixgb_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx ring sk_buffs */ + + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (buffer_info->dma) { + dma_unmap_single(&pdev->dev, + buffer_info->dma, + buffer_info->length, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + buffer_info->length = 0; + } + + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + } + + size = sizeof(struct ixgb_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + IXGB_WRITE_REG(&adapter->hw, RDH, 0); + IXGB_WRITE_REG(&adapter->hw, RDT, 0); +} + +/** + * ixgb_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ + +static int +ixgb_set_mac(struct net_device *netdev, void *p) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + + ixgb_rar_set(&adapter->hw, addr->sa_data, 0); + + return 0; +} + +/** + * ixgb_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + **/ + +static void +ixgb_set_multi(struct net_device *netdev) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u32 rctl; + + /* Check for Promiscuous and All Multicast modes */ + + rctl = IXGB_READ_REG(hw, RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); + /* disable VLAN filtering */ + rctl &= ~IXGB_RCTL_CFIEN; + rctl &= ~IXGB_RCTL_VFE; + } else { + if (netdev->flags & IFF_ALLMULTI) { + rctl |= IXGB_RCTL_MPE; + rctl &= ~IXGB_RCTL_UPE; + } else { + rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE); + } + /* enable VLAN filtering */ + rctl |= IXGB_RCTL_VFE; + rctl &= ~IXGB_RCTL_CFIEN; + } + + if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) { + rctl |= IXGB_RCTL_MPE; + IXGB_WRITE_REG(hw, RCTL, rctl); + } else { + u8 *mta = kmalloc(IXGB_MAX_NUM_MULTICAST_ADDRESSES * + ETH_ALEN, GFP_ATOMIC); + u8 *addr; + if (!mta) + goto alloc_failed; + + IXGB_WRITE_REG(hw, RCTL, rctl); + + addr = mta; + netdev_for_each_mc_addr(ha, netdev) { + memcpy(addr, ha->addr, ETH_ALEN); + addr += ETH_ALEN; + } + + ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0); + kfree(mta); + } + +alloc_failed: + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + ixgb_vlan_strip_enable(adapter); + else + ixgb_vlan_strip_disable(adapter); + +} + +/** + * ixgb_watchdog - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + **/ + +static void +ixgb_watchdog(unsigned long data) +{ + struct ixgb_adapter *adapter = (struct ixgb_adapter *)data; + struct net_device *netdev = adapter->netdev; + struct ixgb_desc_ring *txdr = &adapter->tx_ring; + + ixgb_check_for_link(&adapter->hw); + + if (ixgb_check_for_bad_link(&adapter->hw)) { + /* force the reset path */ + netif_stop_queue(netdev); + } + + if (adapter->hw.link_up) { + if (!netif_carrier_ok(netdev)) { + netdev_info(netdev, + "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n", + (adapter->hw.fc.type == ixgb_fc_full) ? + "RX/TX" : + (adapter->hw.fc.type == ixgb_fc_rx_pause) ? + "RX" : + (adapter->hw.fc.type == ixgb_fc_tx_pause) ? + "TX" : "None"); + adapter->link_speed = 10000; + adapter->link_duplex = FULL_DUPLEX; + netif_carrier_on(netdev); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + netdev_info(netdev, "NIC Link is Down\n"); + netif_carrier_off(netdev); + } + } + + ixgb_update_stats(adapter); + + if (!netif_carrier_ok(netdev)) { + if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). */ + schedule_work(&adapter->tx_timeout_task); + /* return immediately since reset is imminent */ + return; + } + } + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = true; + + /* generate an interrupt to force clean up of any stragglers */ + IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW); + + /* Reset the timer */ + mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); +} + +#define IXGB_TX_FLAGS_CSUM 0x00000001 +#define IXGB_TX_FLAGS_VLAN 0x00000002 +#define IXGB_TX_FLAGS_TSO 0x00000004 + +static int +ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) +{ + struct ixgb_context_desc *context_desc; + unsigned int i; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + u16 ipcse, tucse, mss; + + if (likely(skb_is_gso(skb))) { + struct ixgb_buffer *buffer_info; + struct iphdr *iph; + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, 0); + ipcss = skb_network_offset(skb); + ipcso = (void *)&(iph->check) - (void *)skb->data; + ipcse = skb_transport_offset(skb) - 1; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + tucse = 0; + + i = adapter->tx_ring.next_to_use; + context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i); + buffer_info = &adapter->tx_ring.buffer_info[i]; + WARN_ON(buffer_info->dma != 0); + + context_desc->ipcss = ipcss; + context_desc->ipcso = ipcso; + context_desc->ipcse = cpu_to_le16(ipcse); + context_desc->tucss = tucss; + context_desc->tucso = tucso; + context_desc->tucse = cpu_to_le16(tucse); + context_desc->mss = cpu_to_le16(mss); + context_desc->hdr_len = hdr_len; + context_desc->status = 0; + context_desc->cmd_type_len = cpu_to_le32( + IXGB_CONTEXT_DESC_TYPE + | IXGB_CONTEXT_DESC_CMD_TSE + | IXGB_CONTEXT_DESC_CMD_IP + | IXGB_CONTEXT_DESC_CMD_TCP + | IXGB_CONTEXT_DESC_CMD_IDE + | (skb->len - (hdr_len))); + + + if (++i == adapter->tx_ring.count) i = 0; + adapter->tx_ring.next_to_use = i; + + return 1; + } + + return 0; +} + +static bool +ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) +{ + struct ixgb_context_desc *context_desc; + unsigned int i; + u8 css, cso; + + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + struct ixgb_buffer *buffer_info; + css = skb_checksum_start_offset(skb); + cso = css + skb->csum_offset; + + i = adapter->tx_ring.next_to_use; + context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i); + buffer_info = &adapter->tx_ring.buffer_info[i]; + WARN_ON(buffer_info->dma != 0); + + context_desc->tucss = css; + context_desc->tucso = cso; + context_desc->tucse = 0; + /* zero out any previously existing data in one instruction */ + *(u32 *)&(context_desc->ipcss) = 0; + context_desc->status = 0; + context_desc->hdr_len = 0; + context_desc->mss = 0; + context_desc->cmd_type_len = + cpu_to_le32(IXGB_CONTEXT_DESC_TYPE + | IXGB_TX_DESC_CMD_IDE); + + if (++i == adapter->tx_ring.count) i = 0; + adapter->tx_ring.next_to_use = i; + + return true; + } + + return false; +} + +#define IXGB_MAX_TXD_PWR 14 +#define IXGB_MAX_DATA_PER_TXD (1<tx_ring; + struct pci_dev *pdev = adapter->pdev; + struct ixgb_buffer *buffer_info; + int len = skb_headlen(skb); + unsigned int offset = 0, size, count = 0, i; + unsigned int mss = skb_shinfo(skb)->gso_size; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + unsigned int f; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, IXGB_MAX_DATA_PER_TXD); + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc */ + if (unlikely(mss && !nr_frags && size == len && size > 8)) + size -= 4; + + buffer_info->length = size; + WARN_ON(buffer_info->dma != 0); + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = false; + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = 0; + + len -= size; + offset += size; + count++; + if (len) { + i++; + if (i == tx_ring->count) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + const struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + len = skb_frag_size(frag); + offset = 0; + + while (len) { + i++; + if (i == tx_ring->count) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, IXGB_MAX_DATA_PER_TXD); + + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc */ + if (unlikely(mss && (f == (nr_frags - 1)) + && size == len && size > 8)) + size -= 4; + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = true; + buffer_info->dma = + skb_frag_dma_map(&pdev->dev, frag, offset, size, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = 0; + + len -= size; + offset += size; + count++; + } + } + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + dev_err(&pdev->dev, "TX DMA map failed\n"); + buffer_info->dma = 0; + if (count) + count--; + + while (count--) { + if (i==0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + ixgb_unmap_and_free_tx_resource(adapter, buffer_info); + } + + return 0; +} + +static void +ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags) +{ + struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; + struct ixgb_tx_desc *tx_desc = NULL; + struct ixgb_buffer *buffer_info; + u32 cmd_type_len = adapter->tx_cmd_type; + u8 status = 0; + u8 popts = 0; + unsigned int i; + + if (tx_flags & IXGB_TX_FLAGS_TSO) { + cmd_type_len |= IXGB_TX_DESC_CMD_TSE; + popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM); + } + + if (tx_flags & IXGB_TX_FLAGS_CSUM) + popts |= IXGB_TX_DESC_POPTS_TXSM; + + if (tx_flags & IXGB_TX_FLAGS_VLAN) + cmd_type_len |= IXGB_TX_DESC_CMD_VLE; + + i = tx_ring->next_to_use; + + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = IXGB_TX_DESC(*tx_ring, i); + tx_desc->buff_addr = cpu_to_le64(buffer_info->dma); + tx_desc->cmd_type_len = + cpu_to_le32(cmd_type_len | buffer_info->length); + tx_desc->status = status; + tx_desc->popts = popts; + tx_desc->vlan = cpu_to_le16(vlan_id); + + if (++i == tx_ring->count) i = 0; + } + + tx_desc->cmd_type_len |= + cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + + tx_ring->next_to_use = i; + IXGB_WRITE_REG(&adapter->hw, TDT, i); +} + +static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; + + netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. */ + if (likely(IXGB_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(netdev); + ++adapter->restart_queue; + return 0; +} + +static int ixgb_maybe_stop_tx(struct net_device *netdev, + struct ixgb_desc_ring *tx_ring, int size) +{ + if (likely(IXGB_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __ixgb_maybe_stop_tx(netdev, size); +} + + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \ + (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) +#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \ + MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \ + + 1 /* one more needed for sentinel TSO workaround */ + +static netdev_tx_t +ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + unsigned int first; + unsigned int tx_flags = 0; + int vlan_id = 0; + int count = 0; + int tso; + + if (test_bit(__IXGB_DOWN, &adapter->flags)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, + DESC_NEEDED))) + return NETDEV_TX_BUSY; + + if (skb_vlan_tag_present(skb)) { + tx_flags |= IXGB_TX_FLAGS_VLAN; + vlan_id = skb_vlan_tag_get(skb); + } + + first = adapter->tx_ring.next_to_use; + + tso = ixgb_tso(adapter, skb); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (likely(tso)) + tx_flags |= IXGB_TX_FLAGS_TSO; + else if (ixgb_tx_csum(adapter, skb)) + tx_flags |= IXGB_TX_FLAGS_CSUM; + + count = ixgb_tx_map(adapter, skb, first); + + if (count) { + ixgb_tx_queue(adapter, count, vlan_id, tx_flags); + /* Make sure there is space in the ring for the next send. */ + ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED); + + } else { + dev_kfree_skb_any(skb); + adapter->tx_ring.buffer_info[first].time_stamp = 0; + adapter->tx_ring.next_to_use = first; + } + + return NETDEV_TX_OK; +} + +/** + * ixgb_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ + +static void +ixgb_tx_timeout(struct net_device *netdev) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->tx_timeout_task); +} + +static void +ixgb_tx_timeout_task(struct work_struct *work) +{ + struct ixgb_adapter *adapter = + container_of(work, struct ixgb_adapter, tx_timeout_task); + + adapter->tx_timeout_count++; + ixgb_down(adapter, true); + ixgb_up(adapter); +} + +/** + * ixgb_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ + +static struct net_device_stats * +ixgb_get_stats(struct net_device *netdev) +{ + return &netdev->stats; +} + +/** + * ixgb_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ + +static int +ixgb_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; + int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH; + + /* MTU < 68 is an error for IPv4 traffic, just don't allow it */ + if ((new_mtu < 68) || + (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) { + netif_err(adapter, probe, adapter->netdev, + "Invalid MTU setting %d\n", new_mtu); + return -EINVAL; + } + + if (old_max_frame == max_frame) + return 0; + + if (netif_running(netdev)) + ixgb_down(adapter, true); + + adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */ + + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + ixgb_up(adapter); + + return 0; +} + +/** + * ixgb_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ + +void +ixgb_update_stats(struct ixgb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + /* Prevent stats update while adapter is being reset */ + if (pci_channel_offline(pdev)) + return; + + if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) || + (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) { + u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL); + u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL); + u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH); + u64 bcast = ((u64)bcast_h << 32) | bcast_l; + + multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32); + /* fix up multicast stats by removing broadcasts */ + if (multi >= bcast) + multi -= bcast; + + adapter->stats.mprcl += (multi & 0xFFFFFFFF); + adapter->stats.mprch += (multi >> 32); + adapter->stats.bprcl += bcast_l; + adapter->stats.bprch += bcast_h; + } else { + adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL); + adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH); + adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL); + adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH); + } + adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL); + adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH); + adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL); + adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH); + adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL); + adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH); + adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL); + adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH); + adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL); + adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH); + adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL); + adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH); + adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL); + adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH); + adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC); + adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC); + adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC); + adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC); + adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS); + adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC); + adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC); + adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC); + adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL); + adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH); + adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL); + adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH); + adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL); + adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH); + adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL); + adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH); + adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL); + adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH); + adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL); + adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH); + adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL); + adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH); + adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL); + adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH); + adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL); + adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH); + adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC); + adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C); + adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC); + adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC); + adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC); + adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC); + adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC); + adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC); + adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC); + adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC); + adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC); + adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC); + adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC); + adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC); + adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC); + adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC); + + /* Fill out the OS statistics structure */ + + netdev->stats.rx_packets = adapter->stats.gprcl; + netdev->stats.tx_packets = adapter->stats.gptcl; + netdev->stats.rx_bytes = adapter->stats.gorcl; + netdev->stats.tx_bytes = adapter->stats.gotcl; + netdev->stats.multicast = adapter->stats.mprcl; + netdev->stats.collisions = 0; + + /* ignore RLEC as it reports errors for padded (<64bytes) frames + * with a length in the type/len field */ + netdev->stats.rx_errors = + /* adapter->stats.rnbc + */ adapter->stats.crcerrs + + adapter->stats.ruc + + adapter->stats.roc /*+ adapter->stats.rlec */ + + adapter->stats.icbc + + adapter->stats.ecbc + adapter->stats.mpc; + + /* see above + * netdev->stats.rx_length_errors = adapter->stats.rlec; + */ + + netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_fifo_errors = adapter->stats.mpc; + netdev->stats.rx_missed_errors = adapter->stats.mpc; + netdev->stats.rx_over_errors = adapter->stats.mpc; + + netdev->stats.tx_errors = 0; + netdev->stats.rx_frame_errors = 0; + netdev->stats.tx_aborted_errors = 0; + netdev->stats.tx_carrier_errors = 0; + netdev->stats.tx_fifo_errors = 0; + netdev->stats.tx_heartbeat_errors = 0; + netdev->stats.tx_window_errors = 0; +} + +#define IXGB_MAX_INTR 10 +/** + * ixgb_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ + +static irqreturn_t +ixgb_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct ixgb_adapter *adapter = netdev_priv(netdev); + struct ixgb_hw *hw = &adapter->hw; + u32 icr = IXGB_READ_REG(hw, ICR); + + if (unlikely(!icr)) + return IRQ_NONE; /* Not our interrupt */ + + if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) + if (!test_bit(__IXGB_DOWN, &adapter->flags)) + mod_timer(&adapter->watchdog_timer, jiffies); + + if (napi_schedule_prep(&adapter->napi)) { + + /* Disable interrupts and register for poll. The flush + of the posted write is intentionally left out. + */ + + IXGB_WRITE_REG(&adapter->hw, IMC, ~0); + __napi_schedule(&adapter->napi); + } + return IRQ_HANDLED; +} + +/** + * ixgb_clean - NAPI Rx polling callback + * @adapter: board private structure + **/ + +static int +ixgb_clean(struct napi_struct *napi, int budget) +{ + struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi); + int work_done = 0; + + ixgb_clean_tx_irq(adapter); + ixgb_clean_rx_irq(adapter, &work_done, budget); + + /* If budget not fully consumed, exit the polling mode */ + if (work_done < budget) { + napi_complete(napi); + if (!test_bit(__IXGB_DOWN, &adapter->flags)) + ixgb_irq_enable(adapter); + } + + return work_done; +} + +/** + * ixgb_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + **/ + +static bool +ixgb_clean_tx_irq(struct ixgb_adapter *adapter) +{ + struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; + struct net_device *netdev = adapter->netdev; + struct ixgb_tx_desc *tx_desc, *eop_desc; + struct ixgb_buffer *buffer_info; + unsigned int i, eop; + bool cleaned = false; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = IXGB_TX_DESC(*tx_ring, eop); + + while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) { + + rmb(); /* read buffer_info after eop_desc */ + for (cleaned = false; !cleaned; ) { + tx_desc = IXGB_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + if (tx_desc->popts & + (IXGB_TX_DESC_POPTS_TXSM | + IXGB_TX_DESC_POPTS_IXSM)) + adapter->hw_csum_tx_good++; + + ixgb_unmap_and_free_tx_resource(adapter, buffer_info); + + *(u32 *)&(tx_desc->status) = 0; + + cleaned = (i == eop); + if (++i == tx_ring->count) i = 0; + } + + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = IXGB_TX_DESC(*tx_ring, eop); + } + + tx_ring->next_to_clean = i; + + if (unlikely(cleaned && netif_carrier_ok(netdev) && + IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__IXGB_DOWN, &adapter->flags))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i */ + adapter->detect_tx_hung = false; + if (tx_ring->buffer_info[eop].time_stamp && + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ) + && !(IXGB_READ_REG(&adapter->hw, STATUS) & + IXGB_STATUS_TXOFF)) { + /* detected Tx unit hang */ + netif_err(adapter, drv, adapter->netdev, + "Detected Tx Unit Hang\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + IXGB_READ_REG(&adapter->hw, TDH), + IXGB_READ_REG(&adapter->hw, TDT), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->status); + netif_stop_queue(netdev); + } + } + + return cleaned; +} + +/** + * ixgb_rx_checksum - Receive Checksum Offload for 82597. + * @adapter: board private structure + * @rx_desc: receive descriptor + * @sk_buff: socket buffer with received data + **/ + +static void +ixgb_rx_checksum(struct ixgb_adapter *adapter, + struct ixgb_rx_desc *rx_desc, + struct sk_buff *skb) +{ + /* Ignore Checksum bit is set OR + * TCP Checksum has not been calculated + */ + if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) || + (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) { + skb_checksum_none_assert(skb); + return; + } + + /* At this point we know the hardware did the TCP checksum */ + /* now look at the TCP checksum error bit */ + if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) { + /* let the stack verify checksum errors */ + skb_checksum_none_assert(skb); + adapter->hw_csum_rx_error++; + } else { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + adapter->hw_csum_rx_good++; + } +} + +/* + * this should improve performance for small packets with large amounts + * of reassembly being done in the stack + */ +static void ixgb_check_copybreak(struct napi_struct *napi, + struct ixgb_buffer *buffer_info, + u32 length, struct sk_buff **skb) +{ + struct sk_buff *new_skb; + + if (length > copybreak) + return; + + new_skb = napi_alloc_skb(napi, length); + if (!new_skb) + return; + + skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, + (*skb)->data - NET_IP_ALIGN, + length + NET_IP_ALIGN); + /* save the skb in buffer_info as good */ + buffer_info->skb = *skb; + *skb = new_skb; +} + +/** + * ixgb_clean_rx_irq - Send received data up the network stack, + * @adapter: board private structure + **/ + +static bool +ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) +{ + struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct ixgb_rx_desc *rx_desc, *next_rxd; + struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer; + u32 length; + unsigned int i, j; + int cleaned_count = 0; + bool cleaned = false; + + i = rx_ring->next_to_clean; + rx_desc = IXGB_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + + (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ + status = rx_desc->status; + skb = buffer_info->skb; + buffer_info->skb = NULL; + + prefetch(skb->data - NET_IP_ALIGN); + + if (++i == rx_ring->count) + i = 0; + next_rxd = IXGB_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + j = i + 1; + if (j == rx_ring->count) + j = 0; + next2_buffer = &rx_ring->buffer_info[j]; + prefetch(next2_buffer); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + + dma_unmap_single(&pdev->dev, + buffer_info->dma, + buffer_info->length, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + rx_desc->length = 0; + + if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) { + + /* All receives must fit into a single buffer */ + + pr_debug("Receive packet consumed multiple buffers length<%x>\n", + length); + + dev_kfree_skb_irq(skb); + goto rxdesc_done; + } + + if (unlikely(rx_desc->errors & + (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE | + IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) { + dev_kfree_skb_irq(skb); + goto rxdesc_done; + } + + ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb); + + /* Good Receive */ + skb_put(skb, length); + + /* Receive Checksum Offload */ + ixgb_rx_checksum(adapter, rx_desc, skb); + + skb->protocol = eth_type_trans(skb, netdev); + if (status & IXGB_RX_DESC_STATUS_VP) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + le16_to_cpu(rx_desc->special)); + + netif_receive_skb(skb); + +rxdesc_done: + /* clean up descriptor, might be written over by hw */ + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) { + ixgb_alloc_rx_buffers(adapter, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + + rx_ring->next_to_clean = i; + + cleaned_count = IXGB_DESC_UNUSED(rx_ring); + if (cleaned_count) + ixgb_alloc_rx_buffers(adapter, cleaned_count); + + return cleaned; +} + +/** + * ixgb_alloc_rx_buffers - Replace used receive buffers + * @adapter: address of board private structure + **/ + +static void +ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count) +{ + struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct ixgb_rx_desc *rx_desc; + struct ixgb_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + long cleancount; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + cleancount = IXGB_DESC_UNUSED(rx_ring); + + + /* leave three descriptors unused */ + while (--cleancount > 2 && cleaned_count--) { + /* recycle! its good for you */ + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto map_skb; + } + + skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + buffer_info->skb = skb; + buffer_info->length = adapter->rx_buffer_len; +map_skb: + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + adapter->alloc_rx_buff_failed++; + break; + } + + rx_desc = IXGB_RX_DESC(*rx_ring, i); + rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); + /* guarantee DD bit not set now before h/w gets descriptor + * this is the rest of the workaround for h/w double + * writeback. */ + rx_desc->status = 0; + + + if (++i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, such + * as IA-64). */ + wmb(); + IXGB_WRITE_REG(&adapter->hw, RDT, i); + } +} + +static void +ixgb_vlan_strip_enable(struct ixgb_adapter *adapter) +{ + u32 ctrl; + + /* enable VLAN tag insert/strip */ + ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); + ctrl |= IXGB_CTRL0_VME; + IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl); +} + +static void +ixgb_vlan_strip_disable(struct ixgb_adapter *adapter) +{ + u32 ctrl; + + /* disable VLAN tag insert/strip */ + ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); + ctrl &= ~IXGB_CTRL0_VME; + IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl); +} + +static int +ixgb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + u32 vfta, index; + + /* add VID to filter table */ + + index = (vid >> 5) & 0x7F; + vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index); + vfta |= (1 << (vid & 0x1F)); + ixgb_write_vfta(&adapter->hw, index, vfta); + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int +ixgb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + u32 vfta, index; + + /* remove VID from filter table */ + + index = (vid >> 5) & 0x7F; + vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + ixgb_write_vfta(&adapter->hw, index, vfta); + clear_bit(vid, adapter->active_vlans); + + return 0; +} + +static void +ixgb_restore_vlan(struct ixgb_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ + +static void ixgb_netpoll(struct net_device *dev) +{ + struct ixgb_adapter *adapter = netdev_priv(dev); + + disable_irq(adapter->pdev->irq); + ixgb_intr(adapter->pdev->irq, dev); + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * ixgb_io_error_detected - called when PCI error is detected + * @pdev: pointer to pci device with error + * @state: pci channel state after error + * + * This callback is called by the PCI subsystem whenever + * a PCI bus error is detected. + */ +static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev, + enum pci_channel_state state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgb_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + ixgb_down(adapter, true); + + pci_disable_device(pdev); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * ixgb_io_slot_reset - called after the pci bus has been reset. + * @pdev pointer to pci device with error + * + * This callback is called after the PCI bus has been reset. + * Basically, this tries to restart the card from scratch. + * This is a shortened version of the device probe/discovery code, + * it resembles the first-half of the ixgb_probe() routine. + */ +static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgb_adapter *adapter = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + netif_err(adapter, probe, adapter->netdev, + "Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + /* Perform card reset only on one instance of the card */ + if (0 != PCI_FUNC (pdev->devfn)) + return PCI_ERS_RESULT_RECOVERED; + + pci_set_master(pdev); + + netif_carrier_off(netdev); + netif_stop_queue(netdev); + ixgb_reset(adapter); + + /* Make sure the EEPROM is good */ + if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { + netif_err(adapter, probe, adapter->netdev, + "After reset, the EEPROM checksum is not valid\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); + memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->perm_addr)) { + netif_err(adapter, probe, adapter->netdev, + "After reset, invalid MAC address\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * ixgb_io_resume - called when its OK to resume normal operations + * @pdev pointer to pci device with error + * + * The error recovery driver tells us that its OK to resume + * normal operation. Implementation resembles the second-half + * of the ixgb_probe() routine. + */ +static void ixgb_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgb_adapter *adapter = netdev_priv(netdev); + + pci_set_master(pdev); + + if (netif_running(netdev)) { + if (ixgb_up(adapter)) { + pr_err("can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); + mod_timer(&adapter->watchdog_timer, jiffies); +} + +/* ixgb_main.c */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h new file mode 100644 index 000000000..8fc905192 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h @@ -0,0 +1,64 @@ +/******************************************************************************* + + Intel PRO/10GbE Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* glue for the OS independent part of ixgb + * includes register access macros + */ + +#ifndef _IXGB_OSDEP_H_ +#define _IXGB_OSDEP_H_ + +#include +#include +#include +#include +#include +#include + +#undef ASSERT +#define ASSERT(x) BUG_ON(!(x)) + +#define ENTER() pr_debug("%s\n", __func__); + +#define IXGB_WRITE_REG(a, reg, value) ( \ + writel((value), ((a)->hw_addr + IXGB_##reg))) + +#define IXGB_READ_REG(a, reg) ( \ + readl((a)->hw_addr + IXGB_##reg)) + +#define IXGB_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), ((a)->hw_addr + IXGB_##reg + ((offset) << 2)))) + +#define IXGB_READ_REG_ARRAY(a, reg, offset) ( \ + readl((a)->hw_addr + IXGB_##reg + ((offset) << 2))) + +#define IXGB_WRITE_FLUSH(a) IXGB_READ_REG(a, STATUS) + +#define IXGB_MEMCPY memcpy + +#endif /* _IXGB_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_param.c b/drivers/net/ethernet/intel/ixgb/ixgb_param.c new file mode 100644 index 000000000..04a60640d --- /dev/null +++ b/drivers/net/ethernet/intel/ixgb/ixgb_param.c @@ -0,0 +1,469 @@ +/******************************************************************************* + + Intel PRO/10GbE Linux driver + Copyright(c) 1999 - 2008 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "ixgb.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define IXGB_MAX_NIC 8 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define IXGB_PARAM_INIT { [0 ... IXGB_MAX_NIC] = OPTION_UNSET } +#define IXGB_PARAM(X, desc) \ + static int X[IXGB_MAX_NIC+1] \ + = IXGB_PARAM_INIT; \ + static unsigned int num_##X = 0; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* Transmit Descriptor Count + * + * Valid Range: 64-4096 + * + * Default Value: 256 + */ + +IXGB_PARAM(TxDescriptors, "Number of transmit descriptors"); + +/* Receive Descriptor Count + * + * Valid Range: 64-4096 + * + * Default Value: 1024 + */ + +IXGB_PARAM(RxDescriptors, "Number of receive descriptors"); + +/* User Specified Flow Control Override + * + * Valid Range: 0-3 + * - 0 - No Flow Control + * - 1 - Rx only, respond to PAUSE frames but do not generate them + * - 2 - Tx only, generate PAUSE frames but ignore them on receive + * - 3 - Full Flow Control Support + * + * Default Value: 2 - Tx only (silicon bug avoidance) + */ + +IXGB_PARAM(FlowControl, "Flow Control setting"); + +/* XsumRX - Receive Checksum Offload Enable/Disable + * + * Valid Range: 0, 1 + * - 0 - disables all checksum offload + * - 1 - enables receive IP/TCP/UDP checksum offload + * on 82597 based NICs + * + * Default Value: 1 + */ + +IXGB_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); + +/* Transmit Interrupt Delay in units of 0.8192 microseconds + * + * Valid Range: 0-65535 + * + * Default Value: 32 + */ + +IXGB_PARAM(TxIntDelay, "Transmit Interrupt Delay"); + +/* Receive Interrupt Delay in units of 0.8192 microseconds + * + * Valid Range: 0-65535 + * + * Default Value: 72 + */ + +IXGB_PARAM(RxIntDelay, "Receive Interrupt Delay"); + +/* Receive Flow control high threshold (when we send a pause frame) + * (FCRTH) + * + * Valid Range: 1,536 - 262,136 (0x600 - 0x3FFF8, 8 byte granularity) + * + * Default Value: 196,608 (0x30000) + */ + +IXGB_PARAM(RxFCHighThresh, "Receive Flow Control High Threshold"); + +/* Receive Flow control low threshold (when we send a resume frame) + * (FCRTL) + * + * Valid Range: 64 - 262,136 (0x40 - 0x3FFF8, 8 byte granularity) + * must be less than high threshold by at least 8 bytes + * + * Default Value: 163,840 (0x28000) + */ + +IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold"); + +/* Flow control request timeout (how long to pause the link partner's tx) + * (PAP 15:0) + * + * Valid Range: 1 - 65535 + * + * Default Value: 65535 (0xffff) (we'll send an xon if we recover) + */ + +IXGB_PARAM(FCReqTimeout, "Flow Control Request Timeout"); + +/* Interrupt Delay Enable + * + * Valid Range: 0, 1 + * + * - 0 - disables transmit interrupt delay + * - 1 - enables transmmit interrupt delay + * + * Default Value: 1 + */ + +IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable"); + + +#define DEFAULT_TIDV 32 +#define MAX_TIDV 0xFFFF +#define MIN_TIDV 0 + +#define DEFAULT_RDTR 72 +#define MAX_RDTR 0xFFFF +#define MIN_RDTR 0 + +#define XSUMRX_DEFAULT OPTION_ENABLED + +#define DEFAULT_FCRTL 0x28000 +#define DEFAULT_FCRTH 0x30000 +#define MIN_FCRTL 0 +#define MAX_FCRTL 0x3FFE8 +#define MIN_FCRTH 8 +#define MAX_FCRTH 0x3FFF0 + +#define MIN_FCPAUSE 1 +#define MAX_FCPAUSE 0xffff +#define DEFAULT_FCPAUSE 0xFFFF /* this may be too long */ + +struct ixgb_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct ixgb_opt_list { + int i; + const char *str; + } *p; + } l; + } arg; +}; + +static int +ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + pr_info("%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + pr_info("%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + pr_info("%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + const struct ixgb_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + pr_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + pr_info("Invalid %s specified (%i) %s\n", opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +/** + * ixgb_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ + +void +ixgb_check_options(struct ixgb_adapter *adapter) +{ + int bd = adapter->bd_number; + if (bd >= IXGB_MAX_NIC) { + pr_notice("Warning: no configuration for board #%i\n", bd); + pr_notice("Using defaults for all values\n"); + } + + { /* Transmit Descriptor Count */ + static const struct ixgb_option opt = { + .type = range_option, + .name = "Transmit Descriptors", + .err = "using default of " __MODULE_STRING(DEFAULT_TXD), + .def = DEFAULT_TXD, + .arg = { .r = { .min = MIN_TXD, + .max = MAX_TXD}} + }; + struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; + + if (num_TxDescriptors > bd) { + tx_ring->count = TxDescriptors[bd]; + ixgb_validate_option(&tx_ring->count, &opt); + } else { + tx_ring->count = opt.def; + } + tx_ring->count = ALIGN(tx_ring->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); + } + { /* Receive Descriptor Count */ + static const struct ixgb_option opt = { + .type = range_option, + .name = "Receive Descriptors", + .err = "using default of " __MODULE_STRING(DEFAULT_RXD), + .def = DEFAULT_RXD, + .arg = { .r = { .min = MIN_RXD, + .max = MAX_RXD}} + }; + struct ixgb_desc_ring *rx_ring = &adapter->rx_ring; + + if (num_RxDescriptors > bd) { + rx_ring->count = RxDescriptors[bd]; + ixgb_validate_option(&rx_ring->count, &opt); + } else { + rx_ring->count = opt.def; + } + rx_ring->count = ALIGN(rx_ring->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE); + } + { /* Receive Checksum Offload Enable */ + static const struct ixgb_option opt = { + .type = enable_option, + .name = "Receive Checksum Offload", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_XsumRX > bd) { + unsigned int rx_csum = XsumRX[bd]; + ixgb_validate_option(&rx_csum, &opt); + adapter->rx_csum = rx_csum; + } else { + adapter->rx_csum = opt.def; + } + } + { /* Flow Control */ + + static const struct ixgb_opt_list fc_list[] = { + { ixgb_fc_none, "Flow Control Disabled" }, + { ixgb_fc_rx_pause, "Flow Control Receive Only" }, + { ixgb_fc_tx_pause, "Flow Control Transmit Only" }, + { ixgb_fc_full, "Flow Control Enabled" }, + { ixgb_fc_default, "Flow Control Hardware Default" } + }; + + static const struct ixgb_option opt = { + .type = list_option, + .name = "Flow Control", + .err = "reading default settings from EEPROM", + .def = ixgb_fc_tx_pause, + .arg = { .l = { .nr = ARRAY_SIZE(fc_list), + .p = fc_list }} + }; + + if (num_FlowControl > bd) { + unsigned int fc = FlowControl[bd]; + ixgb_validate_option(&fc, &opt); + adapter->hw.fc.type = fc; + } else { + adapter->hw.fc.type = opt.def; + } + } + { /* Receive Flow Control High Threshold */ + static const struct ixgb_option opt = { + .type = range_option, + .name = "Rx Flow Control High Threshold", + .err = "using default of " __MODULE_STRING(DEFAULT_FCRTH), + .def = DEFAULT_FCRTH, + .arg = { .r = { .min = MIN_FCRTH, + .max = MAX_FCRTH}} + }; + + if (num_RxFCHighThresh > bd) { + adapter->hw.fc.high_water = RxFCHighThresh[bd]; + ixgb_validate_option(&adapter->hw.fc.high_water, &opt); + } else { + adapter->hw.fc.high_water = opt.def; + } + if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) + pr_info("Ignoring RxFCHighThresh when no RxFC\n"); + } + { /* Receive Flow Control Low Threshold */ + static const struct ixgb_option opt = { + .type = range_option, + .name = "Rx Flow Control Low Threshold", + .err = "using default of " __MODULE_STRING(DEFAULT_FCRTL), + .def = DEFAULT_FCRTL, + .arg = { .r = { .min = MIN_FCRTL, + .max = MAX_FCRTL}} + }; + + if (num_RxFCLowThresh > bd) { + adapter->hw.fc.low_water = RxFCLowThresh[bd]; + ixgb_validate_option(&adapter->hw.fc.low_water, &opt); + } else { + adapter->hw.fc.low_water = opt.def; + } + if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) + pr_info("Ignoring RxFCLowThresh when no RxFC\n"); + } + { /* Flow Control Pause Time Request*/ + static const struct ixgb_option opt = { + .type = range_option, + .name = "Flow Control Pause Time Request", + .err = "using default of "__MODULE_STRING(DEFAULT_FCPAUSE), + .def = DEFAULT_FCPAUSE, + .arg = { .r = { .min = MIN_FCPAUSE, + .max = MAX_FCPAUSE}} + }; + + if (num_FCReqTimeout > bd) { + unsigned int pause_time = FCReqTimeout[bd]; + ixgb_validate_option(&pause_time, &opt); + adapter->hw.fc.pause_time = pause_time; + } else { + adapter->hw.fc.pause_time = opt.def; + } + if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) + pr_info("Ignoring FCReqTimeout when no RxFC\n"); + } + /* high low and spacing check for rx flow control thresholds */ + if (adapter->hw.fc.type & ixgb_fc_tx_pause) { + /* high must be greater than low */ + if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) { + /* set defaults */ + pr_info("RxFCHighThresh must be >= (RxFCLowThresh + 8), Using Defaults\n"); + adapter->hw.fc.high_water = DEFAULT_FCRTH; + adapter->hw.fc.low_water = DEFAULT_FCRTL; + } + } + { /* Receive Interrupt Delay */ + static const struct ixgb_option opt = { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RDTR, + .max = MAX_RDTR}} + }; + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + ixgb_validate_option(&adapter->rx_int_delay, &opt); + } else { + adapter->rx_int_delay = opt.def; + } + } + { /* Transmit Interrupt Delay */ + static const struct ixgb_option opt = { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TIDV, + .max = MAX_TIDV}} + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + ixgb_validate_option(&adapter->tx_int_delay, &opt); + } else { + adapter->tx_int_delay = opt.def; + } + } + + { /* Transmit Interrupt Delay Enable */ + static const struct ixgb_option opt = { + .type = enable_option, + .name = "Tx Interrupt Delay Enable", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_IntDelayEnable > bd) { + unsigned int ide = IntDelayEnable[bd]; + ixgb_validate_option(&ide, &opt); + adapter->tx_int_delay_enable = ide; + } else { + adapter->tx_int_delay_enable = opt.def; + } + } +} diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile new file mode 100644 index 000000000..35e6fa643 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -0,0 +1,44 @@ +################################################################################ +# +# Intel 10 Gigabit PCI Express Linux driver +# Copyright(c) 1999 - 2013 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# Linux NICS +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) 10GbE PCI Express ethernet driver +# + +obj-$(CONFIG_IXGBE) += ixgbe.o + +ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ + ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ + ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o + +ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ + ixgbe_dcb_82599.o ixgbe_dcb_nl.o + +ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o +ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o +ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h new file mode 100644 index 000000000..636f9e350 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -0,0 +1,969 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_H_ +#define _IXGBE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "ixgbe_type.h" +#include "ixgbe_common.h" +#include "ixgbe_dcb.h" +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#define IXGBE_FCOE +#include "ixgbe_fcoe.h" +#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */ +#ifdef CONFIG_IXGBE_DCA +#include +#endif + +#include + +#ifdef CONFIG_NET_RX_BUSY_POLL +#define BP_EXTENDED_STATS +#endif +/* common prefix used by pr_<> macros */ +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +/* TX/RX descriptor defines */ +#define IXGBE_DEFAULT_TXD 512 +#define IXGBE_DEFAULT_TX_WORK 256 +#define IXGBE_MAX_TXD 4096 +#define IXGBE_MIN_TXD 64 + +#if (PAGE_SIZE < 8192) +#define IXGBE_DEFAULT_RXD 512 +#else +#define IXGBE_DEFAULT_RXD 128 +#endif +#define IXGBE_MAX_RXD 4096 +#define IXGBE_MIN_RXD 64 + +#define IXGBE_ETH_P_LLDP 0x88CC + +/* flow control */ +#define IXGBE_MIN_FCRTL 0x40 +#define IXGBE_MAX_FCRTL 0x7FF80 +#define IXGBE_MIN_FCRTH 0x600 +#define IXGBE_MAX_FCRTH 0x7FFF0 +#define IXGBE_DEFAULT_FCPAUSE 0xFFFF +#define IXGBE_MIN_FCPAUSE 0 +#define IXGBE_MAX_FCPAUSE 0xFFFF + +/* Supported Rx Buffer Sizes */ +#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ +#define IXGBE_RXBUFFER_2K 2048 +#define IXGBE_RXBUFFER_3K 3072 +#define IXGBE_RXBUFFER_4K 4096 +#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ + +/* + * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we + * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, + * this adds up to 448 bytes of extra data. + * + * Since netdev_alloc_skb now allocates a page fragment we can use a value + * of 256 and the resultant skb will have a truesize of 960 or less. + */ +#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +enum ixgbe_tx_flags { + /* cmd_type flags */ + IXGBE_TX_FLAGS_HW_VLAN = 0x01, + IXGBE_TX_FLAGS_TSO = 0x02, + IXGBE_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IXGBE_TX_FLAGS_CC = 0x08, + IXGBE_TX_FLAGS_IPV4 = 0x10, + IXGBE_TX_FLAGS_CSUM = 0x20, + + /* software defined flags */ + IXGBE_TX_FLAGS_SW_VLAN = 0x40, + IXGBE_TX_FLAGS_FCOE = 0x80, +}; + +/* VLAN info */ +#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 +#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 +#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 + +#define IXGBE_MAX_VF_MC_ENTRIES 30 +#define IXGBE_MAX_VF_FUNCTIONS 64 +#define IXGBE_MAX_VFTA_ENTRIES 128 +#define MAX_EMULATION_MAC_ADDRS 16 +#define IXGBE_MAX_PF_MACVLANS 15 +#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) +#define IXGBE_82599_VF_DEVICE_ID 0x10ED +#define IXGBE_X540_VF_DEVICE_ID 0x1515 + +struct vf_data_storage { + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u16 default_vf_vlan_id; + u16 vlans_enabled; + bool clear_to_send; + bool pf_set_mac; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 tx_rate; + u16 vlan_count; + u8 spoofchk_enabled; + bool rss_query_enabled; + unsigned int vf_api; +}; + +struct vf_macvlans { + struct list_head l; + int vf; + bool free; + bool is_macvlan; + u8 vf_macvlan[ETH_ALEN]; +}; + +#define IXGBE_MAX_TXD_PWR 14 +#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct ixgbe_tx_buffer { + union ixgbe_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + struct sk_buff *skb; + unsigned int bytecount; + unsigned short gso_segs; + __be16 protocol; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct ixgbe_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + struct page *page; + unsigned int page_offset; +}; + +struct ixgbe_queue_stats { + u64 packets; + u64 bytes; +#ifdef BP_EXTENDED_STATS + u64 yields; + u64 misses; + u64 cleaned; +#endif /* BP_EXTENDED_STATS */ +}; + +struct ixgbe_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct ixgbe_rx_queue_stats { + u64 rsc_count; + u64 rsc_flush; + u64 non_eop_descs; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 csum_err; +}; + +enum ixgbe_ring_state_t { + __IXGBE_TX_FDIR_INIT_DONE, + __IXGBE_TX_XPS_INIT_DONE, + __IXGBE_TX_DETECT_HANG, + __IXGBE_HANG_CHECK_ARMED, + __IXGBE_RX_RSC_ENABLED, + __IXGBE_RX_CSUM_UDP_ZERO_ERR, + __IXGBE_RX_FCOE, +}; + +struct ixgbe_fwd_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct net_device *netdev; + struct ixgbe_adapter *real_adapter; + unsigned int tx_base_queue; + unsigned int rx_base_queue; + int pool; +}; + +#define check_for_tx_hang(ring) \ + test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) +#define ring_is_rsc_enabled(ring) \ + test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) +#define set_ring_rsc_enabled(ring) \ + set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) +#define clear_ring_rsc_enabled(ring) \ + clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) +struct ixgbe_ring { + struct ixgbe_ring *next; /* pointer to next ring in q_vector */ + struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ + struct device *dev; /* device for DMA mapping */ + struct ixgbe_fwd_adapter *l2_accel_priv; + void *desc; /* descriptor ring memory */ + union { + struct ixgbe_tx_buffer *tx_buffer_info; + struct ixgbe_rx_buffer *rx_buffer_info; + }; + unsigned long state; + u8 __iomem *tail; + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + + u16 count; /* amount of descriptors */ + + u8 queue_index; /* needed for multiqueue queue management */ + u8 reg_idx; /* holds the special value that gets + * the hardware register offset + * associated with this ring, which is + * different for DCB and RSS modes + */ + u16 next_to_use; + u16 next_to_clean; + + union { + u16 next_to_alloc; + struct { + u8 atr_sample_rate; + u8 atr_count; + }; + }; + + u8 dcb_tc; + struct ixgbe_queue_stats stats; + struct u64_stats_sync syncp; + union { + struct ixgbe_tx_queue_stats tx_stats; + struct ixgbe_rx_queue_stats rx_stats; + }; +} ____cacheline_internodealigned_in_smp; + +enum ixgbe_ring_f_enum { + RING_F_NONE = 0, + RING_F_VMDQ, /* SR-IOV uses the same ring feature */ + RING_F_RSS, + RING_F_FDIR, +#ifdef IXGBE_FCOE + RING_F_FCOE, +#endif /* IXGBE_FCOE */ + + RING_F_ARRAY_SIZE /* must be last in enum set */ +}; + +#define IXGBE_MAX_RSS_INDICES 16 +#define IXGBE_MAX_RSS_INDICES_X550 64 +#define IXGBE_MAX_VMDQ_INDICES 64 +#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */ +#define IXGBE_MAX_FCOE_INDICES 8 +#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#define IXGBE_MAX_L2A_QUEUES 4 +#define IXGBE_BAD_L2A_QUEUE 3 +#define IXGBE_MAX_MACVLANS 31 +#define IXGBE_MAX_DCBMACVLANS 8 + +struct ixgbe_ring_feature { + u16 limit; /* upper limit on feature indices */ + u16 indices; /* current value of indices */ + u16 mask; /* Mask used for feature to ring mapping */ + u16 offset; /* offset to start of feature */ +} ____cacheline_internodealigned_in_smp; + +#define IXGBE_82599_VMDQ_8Q_MASK 0x78 +#define IXGBE_82599_VMDQ_4Q_MASK 0x7C +#define IXGBE_82599_VMDQ_2Q_MASK 0x7E + +/* + * FCoE requires that all Rx buffers be over 2200 bytes in length. Since + * this is twice the size of a half page we need to double the page order + * for FCoE enabled Rx queues. + */ +static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring) +{ +#ifdef IXGBE_FCOE + if (test_bit(__IXGBE_RX_FCOE, &ring->state)) + return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K : + IXGBE_RXBUFFER_3K; +#endif + return IXGBE_RXBUFFER_2K; +} + +static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) +{ +#ifdef IXGBE_FCOE + if (test_bit(__IXGBE_RX_FCOE, &ring->state)) + return (PAGE_SIZE < 8192) ? 1 : 0; +#endif + return 0; +} +#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring)) + +struct ixgbe_ring_container { + struct ixgbe_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +/* iterator for handling rings in ring container */ +#define ixgbe_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + +#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ + ? 8 : 1) +#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS + +/* MAX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct ixgbe_q_vector { + struct ixgbe_adapter *adapter; +#ifdef CONFIG_IXGBE_DCA + int cpu; /* CPU for DCA */ +#endif + u16 v_idx; /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring */ + u16 itr; /* Interrupt throttle rate written to EITR */ + struct ixgbe_ring_container rx, tx; + + struct napi_struct napi; + cpumask_t affinity_mask; + int numa_node; + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; + +#ifdef CONFIG_NET_RX_BUSY_POLL + atomic_t state; +#endif /* CONFIG_NET_RX_BUSY_POLL */ + + /* for dynamic allocation of rings associated with this q_vector */ + struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +#ifdef CONFIG_NET_RX_BUSY_POLL +enum ixgbe_qv_state_t { + IXGBE_QV_STATE_IDLE = 0, + IXGBE_QV_STATE_NAPI, + IXGBE_QV_STATE_POLL, + IXGBE_QV_STATE_DISABLE +}; + +static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) +{ + /* reset state to idle */ + atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); +} + +/* called from the device poll routine to get ownership of a q_vector */ +static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, + IXGBE_QV_STATE_NAPI); +#ifdef BP_EXTENDED_STATS + if (rc != IXGBE_QV_STATE_IDLE) + q_vector->tx.ring->stats.yields++; +#endif + + return rc == IXGBE_QV_STATE_IDLE; +} + +/* returns true is someone tried to get the qv while napi had it */ +static inline void ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) +{ + WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_NAPI); + + /* flush any outstanding Rx frames */ + if (q_vector->napi.gro_list) + napi_gro_flush(&q_vector->napi, false); + + /* reset state to idle */ + atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); +} + +/* called from ixgbe_low_latency_poll() */ +static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, + IXGBE_QV_STATE_POLL); +#ifdef BP_EXTENDED_STATS + if (rc != IXGBE_QV_STATE_IDLE) + q_vector->tx.ring->stats.yields++; +#endif + return rc == IXGBE_QV_STATE_IDLE; +} + +/* returns true if someone tried to get the qv while it was locked */ +static inline void ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) +{ + WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_POLL); + + /* reset state to idle */ + atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) +{ + return atomic_read(&q_vector->state) == IXGBE_QV_STATE_POLL; +} + +/* false if QV is currently owned */ +static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) +{ + int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, + IXGBE_QV_STATE_DISABLE); + + return rc == IXGBE_QV_STATE_IDLE; +} + +#else /* CONFIG_NET_RX_BUSY_POLL */ +static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) +{ +} + +static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) +{ + return true; +} + +static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) +{ + return false; +} + +static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) +{ + return false; +} + +static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) +{ + return false; +} + +static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) +{ + return false; +} + +static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) +{ + return true; +} + +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +#ifdef CONFIG_IXGBE_HWMON + +#define IXGBE_HWMON_TYPE_LOC 0 +#define IXGBE_HWMON_TYPE_TEMP 1 +#define IXGBE_HWMON_TYPE_CAUTION 2 +#define IXGBE_HWMON_TYPE_MAX 3 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct ixgbe_hw *hw; + struct ixgbe_thermal_diode_data *sensor; + char name[12]; +}; + +struct hwmon_buff { + struct attribute_group group; + const struct attribute_group *groups[2]; + struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1]; + struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4]; + unsigned int n_hwmon; +}; +#endif /* CONFIG_IXGBE_HWMON */ + +/* + * microsecond values for various ITR rates shifted by 2 to fit itr register + * with the first 3 bits reserved 0 + */ +#define IXGBE_MIN_RSC_ITR 24 +#define IXGBE_100K_ITR 40 +#define IXGBE_20K_ITR 200 +#define IXGBE_10K_ITR 400 +#define IXGBE_8K_ITR 500 + +/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */ +static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +#define IXGBE_RX_DESC(R, i) \ + (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) +#define IXGBE_TX_DESC(R, i) \ + (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) +#define IXGBE_TX_CTXTDESC(R, i) \ + (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) + +#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ +#ifdef IXGBE_FCOE +/* Use 3K as the baby jumbo frame size for FCoE */ +#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 +#endif /* IXGBE_FCOE */ + +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR) + +#define MAX_MSIX_VECTORS_82599 64 +#define MAX_Q_VECTORS_82599 64 +#define MAX_MSIX_VECTORS_82598 18 +#define MAX_Q_VECTORS_82598 16 + +struct ixgbe_mac_addr { + u8 addr[ETH_ALEN]; + u16 queue; + u16 state; /* bitmask */ +}; +#define IXGBE_MAC_STATE_DEFAULT 0x1 +#define IXGBE_MAC_STATE_MODIFIED 0x2 +#define IXGBE_MAC_STATE_IN_USE 0x4 + +#define MAX_Q_VECTORS MAX_Q_VECTORS_82599 +#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599 + +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) + +/* default to trying for four seconds */ +#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) + +/* board specific private data structure */ +struct ixgbe_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + unsigned long state; + + /* Some features need tri-state capability, + * thus the additional *_CAPABLE flags. + */ + u32 flags; +#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) +#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3) +#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4) +#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5) +#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6) +#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8) +#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9) +#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10) +#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 11) +#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 12) +#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 13) +#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 14) +#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 15) +#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 16) +#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 17) +#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 18) +#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 19) +#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 20) +#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21) +#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22) +#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23) + + u32 flags2; +#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0) +#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) +#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2) +#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3) +#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4) +#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5) +#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6) +#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) +#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) +#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) +#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) + + /* Tx fast path data */ + int num_tx_queues; + u16 tx_itr_setting; + u16 tx_work_limit; + + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr_setting; + + /* TX */ + struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; + + u64 restart_queue; + u64 lsc_int; + u32 tx_timeout_count; + + /* RX */ + struct ixgbe_ring *rx_ring[MAX_RX_QUEUES]; + int num_rx_pools; /* == num_rx_queues in 82598 */ + int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ + u64 hw_csum_rx_error; + u64 hw_rx_no_dma_resources; + u64 rsc_total_count; + u64 rsc_total_flush; + u64 non_eop_descs; + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + + struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS]; + + /* DCB parameters */ + struct ieee_pfc *ixgbe_ieee_pfc; + struct ieee_ets *ixgbe_ieee_ets; + struct ixgbe_dcb_config dcb_cfg; + struct ixgbe_dcb_config temp_dcb_cfg; + u8 dcb_set_bitmap; + u8 dcbx_cap; + enum ixgbe_fc_mode last_lfc_mode; + + int num_q_vectors; /* current number of q_vectors for device */ + int max_q_vectors; /* true count of q_vectors for device */ + struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; + struct msix_entry *msix_entries; + + u32 test_icr; + struct ixgbe_ring test_tx_ring; + struct ixgbe_ring test_rx_ring; + + /* structs defined in ixgbe_hw.h */ + struct ixgbe_hw hw; + u16 msg_enable; + struct ixgbe_hw_stats stats; + + u64 tx_busy; + unsigned int tx_ring_count; + unsigned int rx_ring_count; + + u32 link_speed; + bool link_up; + unsigned long link_check_timeout; + + struct timer_list service_timer; + struct work_struct service_task; + + struct hlist_head fdir_filter_list; + unsigned long fdir_overflow; /* number of times ATR was backed off */ + union ixgbe_atr_input fdir_mask; + int fdir_filter_count; + u32 fdir_pballoc; + u32 atr_sample_rate; + spinlock_t fdir_perfect_lock; + +#ifdef IXGBE_FCOE + struct ixgbe_fcoe fcoe; +#endif /* IXGBE_FCOE */ + u8 __iomem *io_addr; /* Mainly for iounmap use */ + u32 wol; + + u16 bridge_mode; + + u16 eeprom_verh; + u16 eeprom_verl; + u16 eeprom_cap; + + u32 interrupt_event; + u32 led_reg; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned long last_overflow_check; + unsigned long last_rx_ptp_check; + unsigned long last_rx_timestamp; + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + u32 base_incval; + + /* SR-IOV */ + DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); + unsigned int num_vfs; + struct vf_data_storage *vfinfo; + int vf_rate_link_speed; + struct vf_macvlans vf_mvs; + struct vf_macvlans *mv_list; + + u32 timer_event_accumulator; + u32 vferr_refcount; + struct ixgbe_mac_addr *mac_table; + u16 vxlan_port; + struct kobject *info_kobj; +#ifdef CONFIG_IXGBE_HWMON + struct hwmon_buff *ixgbe_hwmon_buff; +#endif /* CONFIG_IXGBE_HWMON */ +#ifdef CONFIG_DEBUG_FS + struct dentry *ixgbe_dbg_adapter; +#endif /*CONFIG_DEBUG_FS*/ + + u8 default_up; + unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ + +/* maximum number of RETA entries among all devices supported by ixgbe + * driver: currently it's x550 device in non-SRIOV mode + */ +#define IXGBE_MAX_RETA_ENTRIES 512 + u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES]; + +#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ + u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)]; +}; + +static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) +{ + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + return IXGBE_MAX_RSS_INDICES; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + return IXGBE_MAX_RSS_INDICES_X550; + default: + return 0; + } +} + +struct ixgbe_fdir_filter { + struct hlist_node fdir_node; + union ixgbe_atr_input filter; + u16 sw_idx; + u16 action; +}; + +enum ixgbe_state_t { + __IXGBE_TESTING, + __IXGBE_RESETTING, + __IXGBE_DOWN, + __IXGBE_DISABLED, + __IXGBE_REMOVING, + __IXGBE_SERVICE_SCHED, + __IXGBE_SERVICE_INITED, + __IXGBE_IN_SFP_INIT, + __IXGBE_PTP_RUNNING, + __IXGBE_PTP_TX_IN_PROGRESS, +}; + +struct ixgbe_cb { + union { /* Union defining head/tail partner */ + struct sk_buff *head; + struct sk_buff *tail; + }; + dma_addr_t dma; + u16 append_cnt; + bool page_released; +}; +#define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb) + +enum ixgbe_boards { + board_82598, + board_82599, + board_X540, + board_X550, + board_X550EM_x, +}; + +extern struct ixgbe_info ixgbe_82598_info; +extern struct ixgbe_info ixgbe_82599_info; +extern struct ixgbe_info ixgbe_X540_info; +extern struct ixgbe_info ixgbe_X550_info; +extern struct ixgbe_info ixgbe_X550EM_x_info; +#ifdef CONFIG_IXGBE_DCB +extern const struct dcbnl_rtnl_ops dcbnl_ops; +#endif + +extern char ixgbe_driver_name[]; +extern const char ixgbe_driver_version[]; +#ifdef IXGBE_FCOE +extern char ixgbe_default_device_descr[]; +#endif /* IXGBE_FCOE */ + +void ixgbe_up(struct ixgbe_adapter *adapter); +void ixgbe_down(struct ixgbe_adapter *adapter); +void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); +void ixgbe_reset(struct ixgbe_adapter *adapter); +void ixgbe_set_ethtool_ops(struct net_device *netdev); +int ixgbe_setup_rx_resources(struct ixgbe_ring *); +int ixgbe_setup_tx_resources(struct ixgbe_ring *); +void ixgbe_free_rx_resources(struct ixgbe_ring *); +void ixgbe_free_tx_resources(struct ixgbe_ring *); +void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); +void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); +void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *); +void ixgbe_update_stats(struct ixgbe_adapter *adapter); +int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); +int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, + u16 subdevice_id); +#ifdef CONFIG_PCI_IOV +void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); +#endif +int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, + u8 *addr, u16 queue); +int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, + u8 *addr, u16 queue); +void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); +netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *, + struct ixgbe_ring *); +void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, + struct ixgbe_tx_buffer *); +void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); +void ixgbe_write_eitr(struct ixgbe_q_vector *); +int ixgbe_poll(struct napi_struct *napi, int budget); +int ethtool_ioctl(struct ifreq *ifr); +s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); +s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl); +s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common, + u8 queue); +s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input_mask); +s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id, u8 queue); +s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id); +void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + union ixgbe_atr_input *mask); +void ixgbe_set_rx_mode(struct net_device *netdev); +#ifdef CONFIG_IXGBE_DCB +void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); +#endif +int ixgbe_setup_tc(struct net_device *dev, u8 tc); +void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); +void ixgbe_do_reset(struct net_device *netdev); +#ifdef CONFIG_IXGBE_HWMON +void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter); +int ixgbe_sysfs_init(struct ixgbe_adapter *adapter); +#endif /* CONFIG_IXGBE_HWMON */ +#ifdef IXGBE_FCOE +void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); +int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, + u8 *hdr_len); +int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, + union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb); +int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc); +int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc); +int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); +int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter); +void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter); +int ixgbe_fcoe_enable(struct net_device *netdev); +int ixgbe_fcoe_disable(struct net_device *netdev); +#ifdef CONFIG_IXGBE_DCB +u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); +u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); +#endif /* CONFIG_IXGBE_DCB */ +int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); +int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, + struct netdev_fcoe_hbainfo *info); +u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter); +#endif /* IXGBE_FCOE */ +#ifdef CONFIG_DEBUG_FS +void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter); +void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter); +void ixgbe_dbg_init(void); +void ixgbe_dbg_exit(void); +#else +static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {} +static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {} +static inline void ixgbe_dbg_init(void) {} +static inline void ixgbe_dbg_exit(void) {} +#endif /* CONFIG_DEBUG_FS */ +static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + +void ixgbe_ptp_init(struct ixgbe_adapter *adapter); +void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter); +void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); +void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); +void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); +void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb); +int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); +int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); +void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); +void ixgbe_ptp_reset(struct ixgbe_adapter *adapter); +void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr); +#ifdef CONFIG_PCI_IOV +void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); +#endif + +netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, + struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring); +u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); +#endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c new file mode 100644 index 000000000..824a7ab79 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -0,0 +1,1237 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2014 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_phy.h" + +#define IXGBE_82598_MAX_TX_QUEUES 32 +#define IXGBE_82598_MAX_RX_QUEUES 64 +#define IXGBE_82598_RAR_ENTRIES 16 +#define IXGBE_82598_MC_TBL_SIZE 128 +#define IXGBE_82598_VFT_TBL_SIZE 128 +#define IXGBE_82598_RX_PB_SIZE 512 + +static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data); + +/** + * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82598 should be in the range of 50us to 50ms, + * however the hardware default for these parts is 500us to 1ms which is less + * than the 10ms recommended by the pci-e spec. To address this we need to + * increase the value to either 10ms to 250ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) +{ + u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); + u16 pcie_devctl2; + + if (ixgbe_removed(hw->hw_addr)) + return; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) + goto out; + + /* + * if capababilities version is type 1 we can write the + * timeout of 10ms to 250ms through the GCR register + */ + if (!(gcr & IXGBE_GCR_CAP_VER2)) { + gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; + goto out; + } + + /* + * for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ + pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); + pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; + ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); +out: + /* disable completion timeout resend */ + gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; + IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); +} + +static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + /* Call PHY identify routine to get the phy type */ + ixgbe_identify_phy_generic(hw); + + mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; + mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + return 0; +} + +/** + * ixgbe_init_phy_ops_82598 - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during get_invariants because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; + u16 list_offset, data_offset; + + /* Identify the PHY */ + phy->ops.identify(hw); + + /* Overwrite the link function pointers if copper PHY */ + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { + mac->ops.setup_link = &ixgbe_setup_copper_link_82598; + mac->ops.get_link_capabilities = + &ixgbe_get_copper_link_capabilities_generic; + } + + switch (hw->phy.type) { + case ixgbe_phy_tn: + phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; + phy->ops.check_link = &ixgbe_check_phy_link_tnx; + phy->ops.get_firmware_version = + &ixgbe_get_phy_firmware_version_tnx; + break; + case ixgbe_phy_nl: + phy->ops.reset = &ixgbe_reset_phy_nl; + + /* Call SFP+ identify routine to get the SFP+ module type */ + ret_val = phy->ops.identify_sfp(hw); + if (ret_val) + return ret_val; + if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + + /* Check to see if SFP+ module is supported */ + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, + &list_offset, + &data_offset); + if (ret_val) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + break; + default: + break; + } + + return 0; +} + +/** + * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function. + * Disables relaxed ordering for archs other than SPARC + * Then set pcie completion timeout + * + **/ +static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) +{ +#ifndef CONFIG_SPARC + u32 regval; + u32 i; +#endif + s32 ret_val; + + ret_val = ixgbe_start_hw_generic(hw); + +#ifndef CONFIG_SPARC + /* Disable relaxed ordering */ + for (i = 0; ((i < hw->mac.max_tx_queues) && + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); + regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); + } + + for (i = 0; ((i < hw->mac.max_rx_queues) && + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | + IXGBE_DCA_RXCTRL_HEAD_WRO_EN); + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } +#endif + if (ret_val) + return ret_val; + + /* set the completion timeout for interface */ + ixgbe_set_pcie_completion_timeout(hw); + + return 0; +} + +/** + * ixgbe_get_link_capabilities_82598 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the link capabilities by reading the AUTOC register. + **/ +static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + u32 autoc = 0; + + /* + * Determine link capabilities based on the stored value of AUTOC, + * which represents EEPROM defaults. If AUTOC value has not been + * stored, use the current register value. + */ + if (hw->mac.orig_link_settings_stored) + autoc = hw->mac.orig_autoc; + else + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_1G_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + case IXGBE_AUTOC_LMS_KX4_AN: + case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX_SUPP) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + default: + return IXGBE_ERR_LINK_SETUP; + } + + return 0; +} + +/** + * ixgbe_get_media_type_82598 - Determines media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) +{ + /* Detect if there is a copper PHY attached. */ + switch (hw->phy.type) { + case ixgbe_phy_cu_unknown: + case ixgbe_phy_tn: + return ixgbe_media_type_copper; + + default: + break; + } + + /* Media type for I82598 is based on device ID */ + switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + case IXGBE_DEV_ID_82598_BX: + /* Default device ID is mezzanine card KX/KX4 */ + return ixgbe_media_type_backplane; + + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + case IXGBE_DEV_ID_82598EB_XF_LR: + case IXGBE_DEV_ID_82598EB_SFP_LOM: + return ixgbe_media_type_fiber; + + case IXGBE_DEV_ID_82598EB_CX4: + case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: + return ixgbe_media_type_cx4; + + case IXGBE_DEV_ID_82598AT: + case IXGBE_DEV_ID_82598AT2: + return ixgbe_media_type_copper; + + default: + return ixgbe_media_type_unknown; + } +} + +/** + * ixgbe_fc_enable_82598 - Enable flow control + * @hw: pointer to hardware structure + * + * Enable flow control according to the current settings. + **/ +static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) +{ + u32 fctrl_reg; + u32 rmcs_reg; + u32 reg; + u32 fcrtl, fcrth; + u32 link_speed = 0; + int i; + bool link_up; + + /* Validate the water mark configuration */ + if (!hw->fc.pause_time) + return IXGBE_ERR_INVALID_LINK_SETTINGS; + + /* Low water mark of zero causes XOFF floods */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + if (!hw->fc.low_water[i] || + hw->fc.low_water[i] >= hw->fc.high_water[i]) { + hw_dbg(hw, "Invalid water mark configuration\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + } + } + + /* + * On 82598 having Rx FC on causes resets while doing 1G + * so if it's on turn it off once we know link_speed. For + * more details see 82598 Specification update. + */ + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { + switch (hw->fc.requested_mode) { + case ixgbe_fc_full: + hw->fc.requested_mode = ixgbe_fc_tx_pause; + break; + case ixgbe_fc_rx_pause: + hw->fc.requested_mode = ixgbe_fc_none; + break; + default: + /* no change */ + break; + } + } + + /* Negotiate the fc mode to use */ + ixgbe_fc_autoneg(hw); + + /* Disable any previous flow control settings */ + fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); + + rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); + + /* + * The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case ixgbe_fc_none: + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + fctrl_reg |= IXGBE_FCTRL_RFCE; + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + fctrl_reg |= IXGBE_FCTRL_RFCE; + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + return IXGBE_ERR_CONFIG; + } + + /* Set 802.3x based flow control settings. */ + fctrl_reg |= IXGBE_FCTRL_DPF; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); + IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; + fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth); + } else { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); + } + + } + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + + return 0; +} + +/** + * ixgbe_start_mac_link_82598 - Configures MAC link settings + * @hw: pointer to hardware structure + * + * Configures link settings based on values in the ixgbe_hw struct. + * Restarts the link. Performs autonegotiation if needed. + **/ +static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete) +{ + u32 autoc_reg; + u32 links_reg; + u32 i; + s32 status = 0; + + /* Restart link */ + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + + /* Only poll for autoneg to complete if specified to do so */ + if (autoneg_wait_to_complete) { + if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_AN || + (autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { + links_reg = 0; /* Just in case Autoneg time = 0 */ + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; + msleep(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; + hw_dbg(hw, "Autonegotiation did not complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ + msleep(50); + + return status; +} + +/** + * ixgbe_validate_link_ready - Function looks for phy link + * @hw: pointer to hardware structure + * + * Function indicates success when phy link is available. If phy is not ready + * within 5 seconds of MAC indicating link, the function returns error. + **/ +static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) +{ + u32 timeout; + u16 an_reg; + + if (hw->device_id != IXGBE_DEV_ID_82598AT2) + return 0; + + for (timeout = 0; + timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { + hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg); + + if ((an_reg & MDIO_AN_STAT1_COMPLETE) && + (an_reg & MDIO_STAT1_LSTATUS)) + break; + + msleep(100); + } + + if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { + hw_dbg(hw, "Link was indicated but link is down\n"); + return IXGBE_ERR_LINK_SETUP; + } + + return 0; +} + +/** + * ixgbe_check_mac_link_82598 - Get link/speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true is link is up, false otherwise + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *link_up, + bool link_up_wait_to_complete) +{ + u32 links_reg; + u32 i; + u16 link_reg, adapt_comp_reg; + + /* + * SERDES PHY requires us to read link status from register 0xC79F. + * Bit 0 set indicates link is up/ready; clear indicates link down. + * 0xC00C is read to check that the XAUI lanes are active. Bit 0 + * clear indicates active; set indicates inactive. + */ + if (hw->phy.type == ixgbe_phy_nl) { + hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); + hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); + hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD, + &adapt_comp_reg); + if (link_up_wait_to_complete) { + for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + if ((link_reg & 1) && + ((adapt_comp_reg & 1) == 0)) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + hw->phy.ops.read_reg(hw, 0xC79F, + MDIO_MMD_PMAPMD, + &link_reg); + hw->phy.ops.read_reg(hw, 0xC00C, + MDIO_MMD_PMAPMD, + &adapt_comp_reg); + } + } else { + if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) + *link_up = true; + else + *link_up = false; + } + + if (!*link_up) + return 0; + } + + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (link_up_wait_to_complete) { + for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + if (links_reg & IXGBE_LINKS_UP) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + } + } else { + if (links_reg & IXGBE_LINKS_UP) + *link_up = true; + else + *link_up = false; + } + + if (links_reg & IXGBE_LINKS_SPEED) + *speed = IXGBE_LINK_SPEED_10GB_FULL; + else + *speed = IXGBE_LINK_SPEED_1GB_FULL; + + if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && *link_up && + (ixgbe_validate_link_ready(hw) != 0)) + *link_up = false; + + return 0; +} + +/** + * ixgbe_setup_mac_link_82598 - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + bool autoneg = false; + ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; + u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc = curr_autoc; + u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; + + /* Check to see if speed passed in is supported. */ + ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg); + speed &= link_capabilities; + + if (speed == IXGBE_LINK_SPEED_UNKNOWN) + return IXGBE_ERR_LINK_SETUP; + + /* Set KX4/KX support according to speed requested */ + else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || + link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { + autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + autoc |= IXGBE_AUTOC_KX4_SUPP; + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + autoc |= IXGBE_AUTOC_KX_SUPP; + if (autoc != curr_autoc) + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); + } + + /* Setup and restart the link based on the new values in + * ixgbe_hw This will write the AUTOC register based on the new + * stored values + */ + return ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); +} + + +/** + * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true if waiting is needed to complete + * + * Sets the link speed in the AUTOC register in the MAC and restarts link. + **/ +static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 status; + + /* Setup the PHY according to input speed */ + status = hw->phy.ops.setup_link_speed(hw, speed, + autoneg_wait_to_complete); + /* Set up MAC */ + ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); + + return status; +} + +/** + * ixgbe_reset_hw_82598 - Performs hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks and + * clears all interrupts, performing a PHY reset, and performing a link (MAC) + * reset. + **/ +static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) +{ + s32 status; + s32 phy_status = 0; + u32 ctrl; + u32 gheccr; + u32 i; + u32 autoc; + u8 analog_val; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status) + return status; + + /* + * Power up the Atlas Tx lanes if they are currently powered down. + * Atlas Tx lanes are powered down for MAC loopback tests, but + * they are not automatically restored on reset. + */ + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); + if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { + /* Enable Tx Atlas so packets can be transmitted again */ + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, + analog_val); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, + analog_val); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, + analog_val); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, + analog_val); + } + + /* Reset PHY */ + if (hw->phy.reset_disable == false) { + /* PHY ops must be identified and initialized prior to reset */ + + /* Init PHY and function pointers, perform SFP setup */ + phy_status = hw->phy.ops.init(hw); + if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) + return phy_status; + if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) + goto mac_reset_top; + + hw->phy.ops.reset(hw); + } + +mac_reset_top: + /* + * Issue global reset to the MAC. This needs to be a SW reset. + * If link reset is used, it might reset the MAC when mng is using it + */ + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + udelay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST)) + break; + } + if (ctrl & IXGBE_CTRL_RST) { + status = IXGBE_ERR_RESET_FAILED; + hw_dbg(hw, "Reset polling failed to complete.\n"); + } + + msleep(50); + + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); + gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); + IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); + + /* + * Store the original AUTOC value if it has not been + * stored off yet. Otherwise restore the stored original + * AUTOC value since the reset operation sets back to deaults. + */ + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + if (hw->mac.orig_link_settings_stored == false) { + hw->mac.orig_autoc = autoc; + hw->mac.orig_link_settings_stored = true; + } else if (autoc != hw->mac.orig_autoc) { + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table + */ + hw->mac.ops.init_rx_addrs(hw); + + if (phy_status) + status = phy_status; + + return status; +} + +/** + * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq set index + **/ +static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + rar_high &= ~IXGBE_RAH_VIND_MASK; + rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); + return 0; +} + +/** + * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq clear index (not used in 82598, but elsewhere) + **/ +static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + if (rar_high & IXGBE_RAH_VIND_MASK) { + rar_high &= ~IXGBE_RAH_VIND_MASK; + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); + } + + return 0; +} + +/** + * ixgbe_set_vfta_82598 - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFTA + * @vlan_on: boolean flag to turn on/off VLAN in VFTA + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + u32 regindex; + u32 bitindex; + u32 bits; + u32 vftabyte; + + if (vlan > 4095) + return IXGBE_ERR_PARAM; + + /* Determine 32-bit word position in array */ + regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ + + /* Determine the location of the (VMD) queue index */ + vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ + bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ + + /* Set the nibble for VMD queue index */ + bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); + bits &= (~(0x0F << bitindex)); + bits |= (vind << bitindex); + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); + + /* Determine the location of the bit for this VLAN id */ + bitindex = vlan & 0x1F; /* lower five bits */ + + bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); + if (vlan_on) + /* Turn on this VLAN id */ + bits |= (1 << bitindex); + else + /* Turn off this VLAN id */ + bits &= ~(1 << bitindex); + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); + + return 0; +} + +/** + * ixgbe_clear_vfta_82598 - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) +{ + u32 offset; + u32 vlanbyte; + + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); + + for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), + 0); + + return 0; +} + +/** + * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs read operation to Atlas analog register specified. + **/ +static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + u32 atlas_ctl; + + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, + IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); + IXGBE_WRITE_FLUSH(hw); + udelay(10); + atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); + *val = (u8)atlas_ctl; + + return 0; +} + +/** + * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register + * @hw: pointer to hardware structure + * @reg: atlas register to write + * @val: value to write + * + * Performs write operation to Atlas analog register specified. + **/ +static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + u32 atlas_ctl; + + atlas_ctl = (reg << 8) | val; + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); + IXGBE_WRITE_FLUSH(hw); + udelay(10); + + return 0; +} + +/** + * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @dev_addr: address to read from + * @byte_offset: byte offset to read from dev_addr + * @eeprom_data: value read + * + * Performs 8 byte read operation to SFP module's data over I2C interface. + **/ +static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, + u8 byte_offset, u8 *eeprom_data) +{ + s32 status = 0; + u16 sfp_addr = 0; + u16 sfp_data = 0; + u16 sfp_stat = 0; + u16 gssr; + u32 i; + + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + gssr = IXGBE_GSSR_PHY1_SM; + else + gssr = IXGBE_GSSR_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) + return IXGBE_ERR_SWFW_SYNC; + + if (hw->phy.type == ixgbe_phy_nl) { + /* + * phy SDA/SCL registers are at addresses 0xC30A to + * 0xC30D. These registers are used to talk to the SFP+ + * module's EEPROM through the SDA/SCL (I2C) interface. + */ + sfp_addr = (dev_addr << 8) + byte_offset; + sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); + hw->phy.ops.write_reg_mdi(hw, + IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, + MDIO_MMD_PMAPMD, + sfp_addr); + + /* Poll status */ + for (i = 0; i < 100; i++) { + hw->phy.ops.read_reg_mdi(hw, + IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, + MDIO_MMD_PMAPMD, + &sfp_stat); + sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; + if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) + break; + usleep_range(10000, 20000); + } + + if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { + hw_dbg(hw, "EEPROM read did not pass.\n"); + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + + /* Read data */ + hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, + MDIO_MMD_PMAPMD, &sfp_data); + + *eeprom_data = (u8)(sfp_data >> 8); + } else { + status = IXGBE_ERR_PHY; + } + +out: + hw->mac.ops.release_swfw_sync(hw, gssr); + return status; +} + +/** + * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. + **/ +static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data) +{ + return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR, + byte_offset, eeprom_data); +} + +/** + * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @eeprom_data: value read + * + * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C + **/ +static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data) +{ + return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2, + byte_offset, sff8472_data); +} + +/** + * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple + * port devices. + * @hw: pointer to the HW structure + * + * Calls common function and corrects issue with some single port devices + * that enable LAN1 but not LAN0. + **/ +static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_bus_info *bus = &hw->bus; + u16 pci_gen = 0; + u16 pci_ctrl2 = 0; + + ixgbe_set_lan_id_multi_port_pcie(hw); + + /* check if LAN0 is disabled */ + hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); + if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { + + hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); + + /* if LAN0 is completely disabled force function to 0 */ + if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && + !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && + !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { + + bus->func = 0; + } + } +} + +/** + * ixgbe_set_rxpba_82598 - Initialize RX packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, + u32 headroom, int strategy) +{ + u32 rxpktsize = IXGBE_RXPBSIZE_64KB; + u8 i = 0; + + if (!num_pb) + return; + + /* Setup Rx packet buffer sizes */ + switch (strategy) { + case PBA_STRATEGY_WEIGHTED: + /* Setup the first four at 80KB */ + rxpktsize = IXGBE_RXPBSIZE_80KB; + for (; i < 4; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + /* Setup the last four at 48KB...don't re-init i */ + rxpktsize = IXGBE_RXPBSIZE_48KB; + /* Fall Through */ + case PBA_STRATEGY_EQUAL: + default: + /* Divide the remaining Rx packet buffer evenly among the TCs */ + for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + break; + } + + /* Setup Tx packet buffer sizes */ + for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); +} + +static struct ixgbe_mac_operations mac_ops_82598 = { + .init_hw = &ixgbe_init_hw_generic, + .reset_hw = &ixgbe_reset_hw_82598, + .start_hw = &ixgbe_start_hw_82598, + .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, + .get_media_type = &ixgbe_get_media_type_82598, + .enable_rx_dma = &ixgbe_enable_rx_dma_generic, + .get_mac_addr = &ixgbe_get_mac_addr_generic, + .stop_adapter = &ixgbe_stop_adapter_generic, + .get_bus_info = &ixgbe_get_bus_info_generic, + .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598, + .read_analog_reg8 = &ixgbe_read_analog_reg8_82598, + .write_analog_reg8 = &ixgbe_write_analog_reg8_82598, + .setup_link = &ixgbe_setup_mac_link_82598, + .set_rxpba = &ixgbe_set_rxpba_82598, + .check_link = &ixgbe_check_mac_link_82598, + .get_link_capabilities = &ixgbe_get_link_capabilities_82598, + .led_on = &ixgbe_led_on_generic, + .led_off = &ixgbe_led_off_generic, + .blink_led_start = &ixgbe_blink_led_start_generic, + .blink_led_stop = &ixgbe_blink_led_stop_generic, + .set_rar = &ixgbe_set_rar_generic, + .clear_rar = &ixgbe_clear_rar_generic, + .set_vmdq = &ixgbe_set_vmdq_82598, + .clear_vmdq = &ixgbe_clear_vmdq_82598, + .init_rx_addrs = &ixgbe_init_rx_addrs_generic, + .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, + .enable_mc = &ixgbe_enable_mc_generic, + .disable_mc = &ixgbe_disable_mc_generic, + .clear_vfta = &ixgbe_clear_vfta_82598, + .set_vfta = &ixgbe_set_vfta_82598, + .fc_enable = &ixgbe_fc_enable_82598, + .set_fw_drv_ver = NULL, + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, + .release_swfw_sync = &ixgbe_release_swfw_sync, + .get_thermal_sensor_data = NULL, + .init_thermal_sensor_thresh = NULL, + .prot_autoc_read = &prot_autoc_read_generic, + .prot_autoc_write = &prot_autoc_write_generic, + .enable_rx = &ixgbe_enable_rx_generic, + .disable_rx = &ixgbe_disable_rx_generic, +}; + +static struct ixgbe_eeprom_operations eeprom_ops_82598 = { + .init_params = &ixgbe_init_eeprom_params_generic, + .read = &ixgbe_read_eerd_generic, + .write = &ixgbe_write_eeprom_generic, + .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic, + .read_buffer = &ixgbe_read_eerd_buffer_generic, + .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, + .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, + .update_checksum = &ixgbe_update_eeprom_checksum_generic, +}; + +static struct ixgbe_phy_operations phy_ops_82598 = { + .identify = &ixgbe_identify_phy_generic, + .identify_sfp = &ixgbe_identify_module_generic, + .init = &ixgbe_init_phy_ops_82598, + .reset = &ixgbe_reset_phy_generic, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, + .read_reg_mdi = &ixgbe_read_phy_reg_mdi, + .write_reg_mdi = &ixgbe_write_phy_reg_mdi, + .setup_link = &ixgbe_setup_phy_link_generic, + .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, + .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598, + .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, + .check_overtemp = &ixgbe_tn_check_overtemp, +}; + +struct ixgbe_info ixgbe_82598_info = { + .mac = ixgbe_mac_82598EB, + .get_invariants = &ixgbe_get_invariants_82598, + .mac_ops = &mac_ops_82598, + .eeprom_ops = &eeprom_ops_82598, + .phy_ops = &phy_ops_82598, +}; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c new file mode 100644 index 000000000..e0c363948 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -0,0 +1,2381 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2014 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_phy.h" +#include "ixgbe_mbx.h" + +#define IXGBE_82599_MAX_TX_QUEUES 128 +#define IXGBE_82599_MAX_RX_QUEUES 128 +#define IXGBE_82599_RAR_ENTRIES 128 +#define IXGBE_82599_MC_TBL_SIZE 128 +#define IXGBE_82599_VFT_TBL_SIZE 128 +#define IXGBE_82599_RX_PB_SIZE 512 + +static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); +static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete); +static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); +static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); +static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); + +bool ixgbe_mng_enabled(struct ixgbe_hw *hw) +{ + u32 fwsm, manc, factps; + + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); + if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) + return false; + + manc = IXGBE_READ_REG(hw, IXGBE_MANC); + if (!(manc & IXGBE_MANC_RCV_TCO_EN)) + return false; + + factps = IXGBE_READ_REG(hw, IXGBE_FACTPS); + if (factps & IXGBE_FACTPS_MNGCG) + return false; + + return true; +} + +static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + /* enable the laser control functions for SFP+ fiber + * and MNG not enabled + */ + if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && + !ixgbe_mng_enabled(hw)) { + mac->ops.disable_tx_laser = + &ixgbe_disable_tx_laser_multispeed_fiber; + mac->ops.enable_tx_laser = + &ixgbe_enable_tx_laser_multispeed_fiber; + mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; + } else { + mac->ops.disable_tx_laser = NULL; + mac->ops.enable_tx_laser = NULL; + mac->ops.flap_tx_laser = NULL; + } + + if (hw->phy.multispeed_fiber) { + /* Set up dual speed SFP+ support */ + mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; + } else { + if ((mac->ops.get_media_type(hw) == + ixgbe_media_type_backplane) && + (hw->phy.smart_speed == ixgbe_smart_speed_auto || + hw->phy.smart_speed == ixgbe_smart_speed_on) && + !ixgbe_verify_lesm_fw_enabled_82599(hw)) + mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; + else + mac->ops.setup_link = &ixgbe_setup_mac_link_82599; + } +} + +static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) +{ + s32 ret_val; + u16 list_offset, data_offset, data_value; + + if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { + ixgbe_init_mac_link_ops_82599(hw); + + hw->phy.ops.reset = NULL; + + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, + &data_offset); + if (ret_val) + return ret_val; + + /* PHY config will finish before releasing the semaphore */ + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val) + return IXGBE_ERR_SWFW_SYNC; + + if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) + goto setup_sfp_err; + while (data_value != 0xffff) { + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); + IXGBE_WRITE_FLUSH(hw); + if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) + goto setup_sfp_err; + } + + /* Release the semaphore */ + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + /* + * Delay obtaining semaphore again to allow FW access, + * semaphore_delay is in ms usleep_range needs us. + */ + usleep_range(hw->eeprom.semaphore_delay * 1000, + hw->eeprom.semaphore_delay * 2000); + + /* Restart DSP and set SFI mode */ + ret_val = hw->mac.ops.prot_autoc_write(hw, + hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL, + false); + + if (ret_val) { + hw_dbg(hw, " sfp module setup not complete\n"); + return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; + } + } + + return 0; + +setup_sfp_err: + /* Release the semaphore */ + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + /* Delay obtaining semaphore again to allow FW access, + * semaphore_delay is in ms usleep_range needs us. + */ + usleep_range(hw->eeprom.semaphore_delay * 1000, + hw->eeprom.semaphore_delay * 2000); + hw_err(hw, "eeprom read at offset %d failed\n", data_offset); + return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; +} + +/** + * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read + * @hw: pointer to hardware structure + * @locked: Return the if we locked for this read. + * @reg_val: Value we read from AUTOC + * + * For this part (82599) we need to wrap read-modify-writes with a possible + * FW/SW lock. It is assumed this lock will be freed with the next + * prot_autoc_write_82599(). Note, that locked can only be true in cases + * where this function doesn't return an error. + **/ +static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, + u32 *reg_val) +{ + s32 ret_val; + + *locked = false; + /* If LESM is on then we need to hold the SW/FW semaphore. */ + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val) + return IXGBE_ERR_SWFW_SYNC; + + *locked = true; + } + + *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); + return 0; +} + +/** + * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write + * @hw: pointer to hardware structure + * @reg_val: value to write to AUTOC + * @locked: bool to indicate whether the SW/FW lock was already taken by + * previous proc_autoc_read_82599. + * + * This part (82599) may need to hold a the SW/FW lock around all writes to + * AUTOC. Likewise after a write we need to do a pipeline reset. + **/ +static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) +{ + s32 ret_val = 0; + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + goto out; + + /* We only need to get the lock if: + * - We didn't do it already (in the read part of a read-modify-write) + * - LESM is enabled. + */ + if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val) + return IXGBE_ERR_SWFW_SYNC; + + locked = true; + } + + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); + ret_val = ixgbe_reset_pipeline_82599(hw); + +out: + /* Free the SW/FW semaphore as we either grabbed it here or + * already had it when this function was called. + */ + if (locked) + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + + return ret_val; +} + +static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + ixgbe_init_mac_link_ops_82599(hw); + + mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; + mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + return 0; +} + +/** + * ixgbe_init_phy_ops_82599 - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during get_invariants because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; + u32 esdp; + + if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { + /* Store flag indicating I2C bus access control unit. */ + hw->phy.qsfp_shared_i2c_bus = true; + + /* Initialize access to QSFP+ I2C bus */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0_DIR; + esdp &= ~IXGBE_ESDP_SDP1_DIR; + esdp &= ~IXGBE_ESDP_SDP0; + esdp &= ~IXGBE_ESDP_SDP0_NATIVE; + esdp &= ~IXGBE_ESDP_SDP1_NATIVE; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599; + phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599; + } + + /* Identify the PHY or SFP module */ + ret_val = phy->ops.identify(hw); + + /* Setup function pointers based on detected SFP module and speeds */ + ixgbe_init_mac_link_ops_82599(hw); + + /* If copper media, overwrite with copper function pointers */ + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { + mac->ops.setup_link = &ixgbe_setup_copper_link_82599; + mac->ops.get_link_capabilities = + &ixgbe_get_copper_link_capabilities_generic; + } + + /* Set necessary function pointers based on phy type */ + switch (hw->phy.type) { + case ixgbe_phy_tn: + phy->ops.check_link = &ixgbe_check_phy_link_tnx; + phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; + phy->ops.get_firmware_version = + &ixgbe_get_phy_firmware_version_tnx; + break; + default: + break; + } + + return ret_val; +} + +/** + * ixgbe_get_link_capabilities_82599 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + u32 autoc = 0; + + /* Determine 1G link capabilities off of SFP+ type */ + if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + return 0; + } + + /* + * Determine link capabilities based on the stored value of AUTOC, + * which represents EEPROM defaults. If AUTOC value has not been + * stored, use the current register value. + */ + if (hw->mac.orig_link_settings_stored) + autoc = hw->mac.orig_autoc; + else + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_1G_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + case IXGBE_AUTOC_LMS_10G_SERIAL: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_KX4_KX_KR: + case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + if (autoc & IXGBE_AUTOC_KR_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX_SUPP) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: + *speed = IXGBE_LINK_SPEED_100_FULL; + if (autoc & IXGBE_AUTOC_KR_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX_SUPP) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + case IXGBE_AUTOC_LMS_SGMII_1G_100M: + *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; + *autoneg = false; + break; + + default: + return IXGBE_ERR_LINK_SETUP; + } + + if (hw->phy.multispeed_fiber) { + *speed |= IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + + /* QSFP must not enable auto-negotiation */ + if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp) + *autoneg = false; + else + *autoneg = true; + } + + return 0; +} + +/** + * ixgbe_get_media_type_82599 - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) +{ + /* Detect if there is a copper PHY attached. */ + switch (hw->phy.type) { + case ixgbe_phy_cu_unknown: + case ixgbe_phy_tn: + return ixgbe_media_type_copper; + + default: + break; + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_KX4_MEZZ: + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: + case IXGBE_DEV_ID_82599_XAUI_LOM: + /* Default device ID is mezzanine card KX/KX4 */ + return ixgbe_media_type_backplane; + + case IXGBE_DEV_ID_82599_SFP: + case IXGBE_DEV_ID_82599_SFP_FCOE: + case IXGBE_DEV_ID_82599_SFP_EM: + case IXGBE_DEV_ID_82599_SFP_SF2: + case IXGBE_DEV_ID_82599_SFP_SF_QP: + case IXGBE_DEV_ID_82599EN_SFP: + return ixgbe_media_type_fiber; + + case IXGBE_DEV_ID_82599_CX4: + return ixgbe_media_type_cx4; + + case IXGBE_DEV_ID_82599_T3_LOM: + return ixgbe_media_type_copper; + + case IXGBE_DEV_ID_82599_LS: + return ixgbe_media_type_fiber_lco; + + case IXGBE_DEV_ID_82599_QSFP_SF_QP: + return ixgbe_media_type_fiber_qsfp; + + default: + return ixgbe_media_type_unknown; + } +} + +/** + * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3 + * @hw: pointer to hardware structure + * + * Disables link, should be called during D3 power down sequence. + * + **/ +static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) +{ + u32 autoc2_reg, fwsm; + u16 ee_ctrl_2 = 0; + + hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); + + /* Check to see if MNG FW could be enabled */ + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); + + if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) && + !hw->wol_enabled && + ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { + autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); + } +} + +/** + * ixgbe_start_mac_link_82599 - Setup MAC link settings + * @hw: pointer to hardware structure + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Configures link settings based on values in the ixgbe_hw struct. + * Restarts the link. Performs autonegotiation if needed. + **/ +static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete) +{ + u32 autoc_reg; + u32 links_reg; + u32 i; + s32 status = 0; + bool got_lock = false; + + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + status = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (status) + return status; + + got_lock = true; + } + + /* Restart link */ + ixgbe_reset_pipeline_82599(hw); + + if (got_lock) + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + + /* Only poll for autoneg to complete if specified to do so */ + if (autoneg_wait_to_complete) { + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_KX_KR || + (autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || + (autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { + links_reg = 0; /* Just in case Autoneg time = 0 */ + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; + msleep(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; + hw_dbg(hw, "Autoneg did not complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ + msleep(50); + + return status; +} + +/** + * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser + * @hw: pointer to hardware structure + * + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively shutting down the Tx + * laser on the PHY, effectively halting physical link. + **/ +static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + return; + + /* Disable tx laser; allow 100us to go dark per spec */ + esdp_reg |= IXGBE_ESDP_SDP3; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + udelay(100); +} + +/** + * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser + * @hw: pointer to hardware structure + * + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively turning on the Tx + * laser on the PHY, effectively starting physical link. + **/ +static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + + /* Enable tx laser; allow 100ms to light up */ + esdp_reg &= ~IXGBE_ESDP_SDP3; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + msleep(100); +} + +/** + * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser + * @hw: pointer to hardware structure + * + * When the driver changes the link speeds that it can support, + * it sets autotry_restart to true to indicate that we need to + * initiate a new autotry session with the link partner. To do + * so, we set the speed then disable and re-enable the tx laser, to + * alert the link partner that it also needs to restart autotry on its + * end. This is consistent with true clause 37 autoneg, which also + * involves a loss of signal. + **/ +static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + return; + + if (hw->mac.autotry_restart) { + ixgbe_disable_tx_laser_multispeed_fiber(hw); + ixgbe_enable_tx_laser_multispeed_fiber(hw); + hw->mac.autotry_restart = false; + } +} + +/** + * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 status = 0; + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; + u32 speedcnt = 0; + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + u32 i = 0; + bool link_up = false; + bool autoneg = false; + + /* Mask off requested but non-supported speeds */ + status = hw->mac.ops.get_link_capabilities(hw, &link_speed, + &autoneg); + if (status != 0) + return status; + + speed &= link_speed; + + /* + * Try each speed one by one, highest priority first. We do this in + * software because 10gb fiber doesn't support speed autonegotiation. + */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + speedcnt++; + highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, &link_up, + false); + if (status != 0) + return status; + + if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) + goto out; + + /* Set the module link speed */ + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: + esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + break; + case ixgbe_media_type_fiber_qsfp: + /* QSFP module automatically detects MAC link speed */ + break; + default: + hw_dbg(hw, "Unexpected media type.\n"); + break; + } + + /* Allow module to change analog characteristics (1G->10G) */ + msleep(40); + + status = ixgbe_setup_mac_link_82599(hw, + IXGBE_LINK_SPEED_10GB_FULL, + autoneg_wait_to_complete); + if (status != 0) + return status; + + /* Flap the tx laser if it has not already been done */ + if (hw->mac.ops.flap_tx_laser) + hw->mac.ops.flap_tx_laser(hw); + + /* + * Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted. 82599 uses the same timing for 10g SFI. + */ + for (i = 0; i < 5; i++) { + /* Wait for the link partner to also set speed */ + msleep(100); + + /* If we have link, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, + &link_up, false); + if (status != 0) + return status; + + if (link_up) + goto out; + } + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + speedcnt++; + if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, &link_up, + false); + if (status != 0) + return status; + + if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) + goto out; + + /* Set the module link speed */ + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: + esdp_reg &= ~IXGBE_ESDP_SDP5; + esdp_reg |= IXGBE_ESDP_SDP5_DIR; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + break; + case ixgbe_media_type_fiber_qsfp: + /* QSFP module automatically detects MAC link speed */ + break; + default: + hw_dbg(hw, "Unexpected media type.\n"); + break; + } + + /* Allow module to change analog characteristics (10G->1G) */ + msleep(40); + + status = ixgbe_setup_mac_link_82599(hw, + IXGBE_LINK_SPEED_1GB_FULL, + autoneg_wait_to_complete); + if (status != 0) + return status; + + /* Flap the tx laser if it has not already been done */ + if (hw->mac.ops.flap_tx_laser) + hw->mac.ops.flap_tx_laser(hw); + + /* Wait for the link partner to also set speed */ + msleep(100); + + /* If we have link, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, &link_up, + false); + if (status != 0) + return status; + + if (link_up) + goto out; + } + + /* + * We didn't get link. Configure back to the highest speed we tried, + * (if there was more than one). We call ourselves back with just the + * single highest speed that the user requested. + */ + if (speedcnt > 1) + status = ixgbe_setup_mac_link_multispeed_fiber(hw, + highest_link_speed, + autoneg_wait_to_complete); + +out: + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + return status; +} + +/** + * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Implements the Intel SmartSpeed algorithm. + **/ +static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 status = 0; + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + s32 i, j; + bool link_up = false; + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + if (speed & IXGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + + /* + * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the + * autoneg advertisement if link is unable to be established at the + * highest negotiated rate. This can sometimes happen due to integrity + * issues with the physical media connection. + */ + + /* First, try to get link with full advertisement */ + hw->phy.smart_speed_active = false; + for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { + status = ixgbe_setup_mac_link_82599(hw, speed, + autoneg_wait_to_complete); + if (status != 0) + goto out; + + /* + * Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per + * Table 9 in the AN MAS. + */ + for (i = 0; i < 5; i++) { + mdelay(100); + + /* If we have link, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, + &link_up, false); + if (status != 0) + goto out; + + if (link_up) + goto out; + } + } + + /* + * We didn't get link. If we advertised KR plus one of KX4/KX + * (or BX4/BX), then disable KR and try again. + */ + if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || + ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) + goto out; + + /* Turn SmartSpeed on to disable KR support */ + hw->phy.smart_speed_active = true; + status = ixgbe_setup_mac_link_82599(hw, speed, + autoneg_wait_to_complete); + if (status != 0) + goto out; + + /* + * Wait for the controller to acquire link. 600ms will allow for + * the AN link_fail_inhibit_timer as well for multiple cycles of + * parallel detect, both 10g and 1g. This allows for the maximum + * connect attempts as defined in the AN MAS table 73-7. + */ + for (i = 0; i < 6; i++) { + mdelay(100); + + /* If we have link, just jump out */ + status = hw->mac.ops.check_link(hw, &link_speed, + &link_up, false); + if (status != 0) + goto out; + + if (link_up) + goto out; + } + + /* We didn't get link. Turn SmartSpeed back off. */ + hw->phy.smart_speed_active = false; + status = ixgbe_setup_mac_link_82599(hw, speed, + autoneg_wait_to_complete); + +out: + if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) + hw_dbg(hw, "Smartspeed has downgraded the link speed from the maximum advertised\n"); + return status; +} + +/** + * ixgbe_setup_mac_link_82599 - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + bool autoneg = false; + s32 status; + u32 pma_pmd_1g, link_mode, links_reg, i; + u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; + ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; + + /* holds the value of AUTOC register at this current point in time */ + u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + /* holds the cached value of AUTOC register */ + u32 orig_autoc = 0; + /* temporary variable used for comparison purposes */ + u32 autoc = current_autoc; + + /* Check to see if speed passed in is supported. */ + status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities, + &autoneg); + if (status) + return status; + + speed &= link_capabilities; + + if (speed == IXGBE_LINK_SPEED_UNKNOWN) + return IXGBE_ERR_LINK_SETUP; + + /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ + if (hw->mac.orig_link_settings_stored) + orig_autoc = hw->mac.orig_autoc; + else + orig_autoc = autoc; + + link_mode = autoc & IXGBE_AUTOC_LMS_MASK; + pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + + if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { + /* Set KX4/KX/KR support according to speed requested */ + autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) + autoc |= IXGBE_AUTOC_KX4_SUPP; + if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && + (hw->phy.smart_speed_active == false)) + autoc |= IXGBE_AUTOC_KR_SUPP; + } + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + autoc |= IXGBE_AUTOC_KX_SUPP; + } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && + (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || + link_mode == IXGBE_AUTOC_LMS_1G_AN)) { + /* Switch from 1G SFI to 10G SFI if requested */ + if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && + (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { + autoc &= ~IXGBE_AUTOC_LMS_MASK; + autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; + } + } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && + (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { + /* Switch from 10G SFI to 1G SFI if requested */ + if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && + (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { + autoc &= ~IXGBE_AUTOC_LMS_MASK; + if (autoneg) + autoc |= IXGBE_AUTOC_LMS_1G_AN; + else + autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; + } + } + + if (autoc != current_autoc) { + /* Restart link */ + status = hw->mac.ops.prot_autoc_write(hw, autoc, false); + if (status) + return status; + + /* Only poll for autoneg to complete if specified to do so */ + if (autoneg_wait_to_complete) { + if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { + links_reg = 0; /*Just in case Autoneg time=0*/ + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { + links_reg = + IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; + msleep(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = + IXGBE_ERR_AUTONEG_NOT_COMPLETE; + hw_dbg(hw, "Autoneg did not complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ + msleep(50); + } + + return status; +} + +/** + * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true if waiting is needed to complete + * + * Restarts link on PHY and MAC based on settings passed in. + **/ +static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 status; + + /* Setup the PHY according to input speed */ + status = hw->phy.ops.setup_link_speed(hw, speed, + autoneg_wait_to_complete); + /* Set up MAC */ + ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); + + return status; +} + +/** + * ixgbe_reset_hw_82599 - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + **/ +static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) +{ + ixgbe_link_speed link_speed; + s32 status; + u32 ctrl, i, autoc, autoc2; + u32 curr_lms; + bool link_up = false; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status) + return status; + + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + + /* PHY ops must be identified and initialized prior to reset */ + + /* Identify PHY and related function pointers */ + status = hw->phy.ops.init(hw); + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + return status; + + /* Setup SFP module if there is one present. */ + if (hw->phy.sfp_setup_needed) { + status = hw->mac.ops.setup_sfp(hw); + hw->phy.sfp_setup_needed = false; + } + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + return status; + + /* Reset PHY */ + if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) + hw->phy.ops.reset(hw); + + /* remember AUTOC from before we reset */ + curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK; + +mac_reset_top: + /* + * Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + ctrl = IXGBE_CTRL_LNK_RST; + if (!hw->force_full_reset) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up) + ctrl = IXGBE_CTRL_RST; + } + + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + udelay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + } + + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; + hw_dbg(hw, "Reset polling failed to complete.\n"); + } + + msleep(50); + + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* + * Store the original AUTOC/AUTOC2 values if they have not been + * stored off yet. Otherwise restore the stored original + * values since the reset operation sets back to defaults. + */ + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + + /* Enable link if disabled in NVM */ + if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) { + autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); + IXGBE_WRITE_FLUSH(hw); + } + + if (hw->mac.orig_link_settings_stored == false) { + hw->mac.orig_autoc = autoc; + hw->mac.orig_autoc2 = autoc2; + hw->mac.orig_link_settings_stored = true; + } else { + + /* If MNG FW is running on a multi-speed device that + * doesn't autoneg with out driver support we need to + * leave LMS in the state it was before we MAC reset. + * Likewise if we support WoL we don't want change the + * LMS state either. + */ + if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) || + hw->wol_enabled) + hw->mac.orig_autoc = + (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) | + curr_lms; + + if (autoc != hw->mac.orig_autoc) { + status = hw->mac.ops.prot_autoc_write(hw, + hw->mac.orig_autoc, + false); + if (status) + return status; + } + + if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != + (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { + autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; + autoc2 |= (hw->mac.orig_autoc2 & + IXGBE_AUTOC2_UPPER_MASK); + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); + } + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + /* Store the permanent SAN mac address */ + hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); + + /* Add the SAN MAC address to the RAR only if it's a valid address */ + if (is_valid_ether_addr(hw->mac.san_addr)) { + hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* Save the SAN MAC RAR index */ + hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + + /* Reserve the last RAR for the SAN MAC address */ + hw->mac.num_rar_entries--; + } + + /* Store the alternative WWNN/WWPN prefix */ + hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, + &hw->mac.wwpn_prefix); + + return status; +} + +/** + * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) +{ + int i; + u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + + fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; + + /* + * Before starting reinitialization process, + * FDIRCMD.CMD must be zero. + */ + for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { + if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & + IXGBE_FDIRCMD_CMD_MASK)) + break; + udelay(10); + } + if (i >= IXGBE_FDIRCMD_CMD_POLL) { + hw_dbg(hw, "Flow Director previous command isn't complete, aborting table re-initialization.\n"); + return IXGBE_ERR_FDIR_REINIT_FAILED; + } + + IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); + IXGBE_WRITE_FLUSH(hw); + /* + * 82599 adapters flow director init flow cannot be restarted, + * Workaround 82599 silicon errata by performing the following steps + * before re-writing the FDIRCTRL control register with the same value. + * - write 1 to bit 8 of FDIRCMD register & + * - write 0 to bit 8 of FDIRCMD register + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | + IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & + ~IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + /* + * Clear FDIR Hash register to clear any leftover hashes + * waiting to be programmed. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); + IXGBE_WRITE_FLUSH(hw); + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll init-done after we write FDIRCTRL register */ + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + usleep_range(1000, 2000); + } + if (i >= IXGBE_FDIR_INIT_DONE_POLL) { + hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); + return IXGBE_ERR_FDIR_REINIT_FAILED; + } + + /* Clear FDIR statistics registers (read to clear) */ + IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); + IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); + IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + IXGBE_READ_REG(hw, IXGBE_FDIRMISS); + IXGBE_READ_REG(hw, IXGBE_FDIRLEN); + + return 0; +} + +/** + * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register + **/ +static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ + int i; + + /* Prime the keys for hashing */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); + + /* + * Poll init-done after we write the register. Estimated times: + * 10G: PBALLOC = 11b, timing is 60us + * 1G: PBALLOC = 11b, timing is 600us + * 100M: PBALLOC = 11b, timing is 6ms + * + * Multiple these timings by 4 if under full Rx load + * + * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for + * 1 msec per poll time. If we're at line rate and drop to 100M, then + * this might not finish in our poll time, but we can live with that + * for now. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + usleep_range(1000, 2000); + } + + if (i >= IXGBE_FDIR_INIT_DONE_POLL) + hw_dbg(hw, "Flow Director poll time exceeded!\n"); +} + +/** + * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation + **/ +s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ + /* + * Continue setup of fdirctrl register bits: + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 filters are left + */ + fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | + (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | + (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); + + return 0; +} + +/** + * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation + **/ +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ + /* + * Continue setup of fdirctrl register bits: + * Turn perfect match filtering on + * Report hash in RSS field of Rx wb descriptor + * Initialize the drop queue + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 (0x4 * 16) filters are left + */ + fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | + IXGBE_FDIRCTRL_REPORT_STATUS | + (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | + (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | + (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | + (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); + + return 0; +} + +/* + * These defines allow us to quickly generate all of the necessary instructions + * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION + * for values 0 through 15 + */ +#define IXGBE_ATR_COMMON_HASH_KEY \ + (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) +#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ + common_hash ^= lo_hash_dword >> n; \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ + sig_hash ^= lo_hash_dword << (16 - n); \ + if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ + common_hash ^= hi_hash_dword >> n; \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ + sig_hash ^= hi_hash_dword << (16 - n); \ +} while (0) + +/** + * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash + * @stream: input bitstream to compute the hash on + * + * This function is almost identical to the function above but contains + * several optomizations such as unwinding all of the loops, letting the + * compiler work out all of the conditional ifs since the keys are static + * defines, and computing two keys at once since the hashed dword stream + * will be the same for both keys. + **/ +static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common) +{ + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = ntohl(input.dword); + + /* generate common hash dword */ + hi_hash_dword = ntohl(common.dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + IXGBE_COMPUTE_SIG_HASH_ITERATION(0); + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the vlan until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + IXGBE_COMPUTE_SIG_HASH_ITERATION(1); + IXGBE_COMPUTE_SIG_HASH_ITERATION(2); + IXGBE_COMPUTE_SIG_HASH_ITERATION(3); + IXGBE_COMPUTE_SIG_HASH_ITERATION(4); + IXGBE_COMPUTE_SIG_HASH_ITERATION(5); + IXGBE_COMPUTE_SIG_HASH_ITERATION(6); + IXGBE_COMPUTE_SIG_HASH_ITERATION(7); + IXGBE_COMPUTE_SIG_HASH_ITERATION(8); + IXGBE_COMPUTE_SIG_HASH_ITERATION(9); + IXGBE_COMPUTE_SIG_HASH_ITERATION(10); + IXGBE_COMPUTE_SIG_HASH_ITERATION(11); + IXGBE_COMPUTE_SIG_HASH_ITERATION(12); + IXGBE_COMPUTE_SIG_HASH_ITERATION(13); + IXGBE_COMPUTE_SIG_HASH_ITERATION(14); + IXGBE_COMPUTE_SIG_HASH_ITERATION(15); + + /* combine common_hash result with signature and bucket hashes */ + bucket_hash ^= common_hash; + bucket_hash &= IXGBE_ATR_HASH_MASK; + + sig_hash ^= common_hash << 16; + sig_hash &= IXGBE_ATR_HASH_MASK << 16; + + /* return completed signature hash */ + return sig_hash ^ bucket_hash; +} + +/** + * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter + * @hw: pointer to hardware structure + * @input: unique input dword + * @common: compressed common input dword + * @queue: queue index to direct traffic to + **/ +s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common, + u8 queue) +{ + u64 fdirhashcmd; + u32 fdircmd; + + /* + * Get the flow_type in order to program FDIRCMD properly + * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 + */ + switch (input.formatted.flow_type) { + case IXGBE_ATR_FLOW_TYPE_TCPV4: + case IXGBE_ATR_FLOW_TYPE_UDPV4: + case IXGBE_ATR_FLOW_TYPE_SCTPV4: + case IXGBE_ATR_FLOW_TYPE_TCPV6: + case IXGBE_ATR_FLOW_TYPE_UDPV6: + case IXGBE_ATR_FLOW_TYPE_SCTPV6: + break; + default: + hw_dbg(hw, " Error on flow type input\n"); + return IXGBE_ERR_CONFIG; + } + + /* configure FDIRCMD register */ + fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + + /* + * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits + * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. + */ + fdirhashcmd = (u64)fdircmd << 32; + fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); + IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); + + hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); + + return 0; +} + +#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ +} while (0) + +/** + * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash + * @atr_input: input bitstream to compute the hash on + * @input_mask: mask for the input bitstream + * + * This function serves two main purposes. First it applys the input_mask + * to the atr_input resulting in a cleaned up atr_input data stream. + * Secondly it computes the hash and stores it in the bkt_hash field at + * the end of the input byte stream. This way it will be available for + * future use without needing to recompute the hash. + **/ +void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + union ixgbe_atr_input *input_mask) +{ + + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 bucket_hash = 0, hi_dword = 0; + int i; + + /* Apply masks to input data */ + for (i = 0; i <= 10; i++) + input->dword_stream[i] &= input_mask->dword_stream[i]; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = ntohl(input->dword_stream[0]); + + /* generate common hash dword */ + for (i = 1; i <= 10; i++) + hi_dword ^= input->dword_stream[i]; + hi_hash_dword = ntohl(hi_dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + IXGBE_COMPUTE_BKT_HASH_ITERATION(0); + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the vlan until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + for (i = 1; i <= 15; i++) + IXGBE_COMPUTE_BKT_HASH_ITERATION(i); + + /* + * Limit hash to 13 bits since max bucket count is 8K. + * Store result at the end of the input stream. + */ + input->formatted.bkt_hash = bucket_hash & 0x1FFF; +} + +/** + * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks + * @input_mask: mask to be bit swapped + * + * The source and destination port masks for flow director are bit swapped + * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to + * generate a correctly swapped value we need to bit swap the mask and that + * is what is accomplished by this function. + **/ +static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) +{ + u32 mask = ntohs(input_mask->formatted.dst_port); + + mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; + mask |= ntohs(input_mask->formatted.src_port); + mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); + mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); + mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); + return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); +} + +/* + * These two macros are meant to address the fact that we have registers + * that are either all or in part big-endian. As a result on big-endian + * systems we will end up byte swapping the value to little-endian before + * it is byte swapped again and written to the hardware in the original + * big-endian format. + */ +#define IXGBE_STORE_AS_BE32(_value) \ + (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ + (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) + +#define IXGBE_WRITE_REG_BE32(a, reg, value) \ + IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value))) + +#define IXGBE_STORE_AS_BE16(_value) \ + ntohs(((u16)(_value) >> 8) | ((u16)(_value) << 8)) + +s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input_mask) +{ + /* mask IPv6 since it is currently not supported */ + u32 fdirm = IXGBE_FDIRM_DIPv6; + u32 fdirtcpm; + + /* + * Program the relevant mask registers. If src/dst_port or src/dst_addr + * are zero, then assume a full mask for that field. Also assume that + * a VLAN of 0 is unspecified, so mask that out as well. L4type + * cannot be masked out in this implementation. + * + * This also assumes IPv4 only. IPv6 masking isn't supported at this + * point in time. + */ + + /* verify bucket hash is cleared on hash generation */ + if (input_mask->formatted.bkt_hash) + hw_dbg(hw, " bucket hash should always be 0 in mask\n"); + + /* Program FDIRM and verify partial masks */ + switch (input_mask->formatted.vm_pool & 0x7F) { + case 0x0: + fdirm |= IXGBE_FDIRM_POOL; + case 0x7F: + break; + default: + hw_dbg(hw, " Error on vm pool mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { + case 0x0: + fdirm |= IXGBE_FDIRM_L4P; + if (input_mask->formatted.dst_port || + input_mask->formatted.src_port) { + hw_dbg(hw, " Error on src/dst port mask\n"); + return IXGBE_ERR_CONFIG; + } + case IXGBE_ATR_L4TYPE_MASK: + break; + default: + hw_dbg(hw, " Error on flow type mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) { + case 0x0000: + /* mask VLAN ID, fall through to mask VLAN priority */ + fdirm |= IXGBE_FDIRM_VLANID; + case 0x0FFF: + /* mask VLAN priority */ + fdirm |= IXGBE_FDIRM_VLANP; + break; + case 0xE000: + /* mask VLAN ID only, fall through */ + fdirm |= IXGBE_FDIRM_VLANID; + case 0xEFFF: + /* no VLAN fields masked */ + break; + default: + hw_dbg(hw, " Error on VLAN mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.flex_bytes & 0xFFFF) { + case 0x0000: + /* Mask Flex Bytes, fall through */ + fdirm |= IXGBE_FDIRM_FLEX; + case 0xFFFF: + break; + default: + hw_dbg(hw, " Error on flexible byte mask\n"); + return IXGBE_ERR_CONFIG; + } + + /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); + + /* store the TCP/UDP port masks, bit reversed from port layout */ + fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); + + /* write both the same so that UDP and TCP use the same mask */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); + + /* store source and destination IP masks (big-enian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, + ~input_mask->formatted.src_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, + ~input_mask->formatted.dst_ip[0]); + + return 0; +} + +s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id, u8 queue) +{ + u32 fdirport, fdirvlan, fdirhash, fdircmd; + + /* currently IPv6 is not supported, must be programmed with 0 */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), + input->formatted.src_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), + input->formatted.src_ip[1]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), + input->formatted.src_ip[2]); + + /* record the source address (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); + + /* record the first 32 bits of the destination address (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); + + /* record source and destination port (little-endian)*/ + fdirport = ntohs(input->formatted.dst_port); + fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; + fdirport |= ntohs(input->formatted.src_port); + IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); + + /* record vlan (little-endian) and flex_bytes(big-endian) */ + fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); + fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; + fdirvlan |= ntohs(input->formatted.vlan_id); + IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); + + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash; + fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + + /* + * flush all previous writes to make certain registers are + * programmed prior to issuing the command + */ + IXGBE_WRITE_FLUSH(hw); + + /* configure FDIRCMD register */ + fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + if (queue == IXGBE_FDIR_DROP_QUEUE) + fdircmd |= IXGBE_FDIRCMD_DROP; + fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); + + return 0; +} + +s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id) +{ + u32 fdirhash; + u32 fdircmd = 0; + u32 retry_count; + s32 err = 0; + + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash; + fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + + /* flush hash to HW */ + IXGBE_WRITE_FLUSH(hw); + + /* Query if filter is present */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); + + for (retry_count = 10; retry_count; retry_count--) { + /* allow 10us for query to process */ + udelay(10); + /* verify query completed successfully */ + fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); + if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK)) + break; + } + + if (!retry_count) + err = IXGBE_ERR_FDIR_REINIT_FAILED; + + /* if filter exists in hardware then remove it */ + if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + IXGBE_FDIRCMD_CMD_REMOVE_FLOW); + } + + return err; +} + +/** + * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs read operation to Omer analog register specified. + **/ +static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + u32 core_ctl; + + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | + (reg << 8)); + IXGBE_WRITE_FLUSH(hw); + udelay(10); + core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); + *val = (u8)core_ctl; + + return 0; +} + +/** + * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register + * @hw: pointer to hardware structure + * @reg: atlas register to write + * @val: value to write + * + * Performs write operation to Omer analog register specified. + **/ +static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + u32 core_ctl; + + core_ctl = (reg << 8) | val; + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); + IXGBE_WRITE_FLUSH(hw); + udelay(10); + + return 0; +} + +/** + * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) +{ + s32 ret_val = 0; + + ret_val = ixgbe_start_hw_generic(hw); + if (ret_val) + return ret_val; + + ret_val = ixgbe_start_hw_gen2(hw); + if (ret_val) + return ret_val; + + /* We need to run link autotry after the driver loads */ + hw->mac.autotry_restart = true; + + if (ret_val) + return ret_val; + + return ixgbe_verify_fw_version_82599(hw); +} + +/** + * ixgbe_identify_phy_82599 - Get physical layer module + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + * If PHY already detected, maintains current PHY type in hw struct, + * otherwise executes the PHY detection routine. + **/ +static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) +{ + s32 status; + + /* Detect PHY if not unknown - returns success if already detected. */ + status = ixgbe_identify_phy_generic(hw); + if (status) { + /* 82599 10GBASE-T requires an external PHY */ + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) + return status; + status = ixgbe_identify_module_generic(hw); + } + + /* Set PHY type none if no PHY detected */ + if (hw->phy.type == ixgbe_phy_unknown) { + hw->phy.type = ixgbe_phy_none; + status = 0; + } + + /* Return error if SFP module has been detected but is not supported */ + if (hw->phy.type == ixgbe_phy_sfp_unsupported) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + + return status; +} + +/** + * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit for 82599 + **/ +static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) +{ + /* + * Workaround for 82599 silicon errata when enabling the Rx datapath. + * If traffic is incoming before we enable the Rx unit, it could hang + * the Rx DMA unit. Therefore, make sure the security engine is + * completely disabled prior to enabling the Rx unit. + */ + hw->mac.ops.disable_rx_buff(hw); + + if (regval & IXGBE_RXCTRL_RXEN) + hw->mac.ops.enable_rx(hw); + else + hw->mac.ops.disable_rx(hw); + + hw->mac.ops.enable_rx_buff(hw); + + return 0; +} + +/** + * ixgbe_verify_fw_version_82599 - verify fw version for 82599 + * @hw: pointer to hardware structure + * + * Verifies that installed the firmware version is 0.6 or higher + * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. + * + * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or + * if the FW version is not supported. + **/ +static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_EEPROM_VERSION; + u16 fw_offset, fw_ptp_cfg_offset; + u16 offset; + u16 fw_version = 0; + + /* firmware check is only necessary for SFI devices */ + if (hw->phy.media_type != ixgbe_media_type_fiber) + return 0; + + /* get the offset to the Firmware Module block */ + offset = IXGBE_FW_PTR; + if (hw->eeprom.ops.read(hw, offset, &fw_offset)) + goto fw_version_err; + + if (fw_offset == 0 || fw_offset == 0xFFFF) + return IXGBE_ERR_EEPROM_VERSION; + + /* get the offset to the Pass Through Patch Configuration block */ + offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR; + if (hw->eeprom.ops.read(hw, offset, &fw_ptp_cfg_offset)) + goto fw_version_err; + + if (fw_ptp_cfg_offset == 0 || fw_ptp_cfg_offset == 0xFFFF) + return IXGBE_ERR_EEPROM_VERSION; + + /* get the firmware version */ + offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4; + if (hw->eeprom.ops.read(hw, offset, &fw_version)) + goto fw_version_err; + + if (fw_version > 0x5) + status = 0; + + return status; + +fw_version_err: + hw_err(hw, "eeprom read at offset %d failed\n", offset); + return IXGBE_ERR_EEPROM_VERSION; +} + +/** + * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. + * @hw: pointer to hardware structure + * + * Returns true if the LESM FW module is present and enabled. Otherwise + * returns false. Smart Speed must be disabled if LESM FW module is enabled. + **/ +static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) +{ + u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; + s32 status; + + /* get the offset to the Firmware Module block */ + status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); + + if (status || fw_offset == 0 || fw_offset == 0xFFFF) + return false; + + /* get the offset to the LESM Parameters block */ + status = hw->eeprom.ops.read(hw, (fw_offset + + IXGBE_FW_LESM_PARAMETERS_PTR), + &fw_lesm_param_offset); + + if (status || + fw_lesm_param_offset == 0 || fw_lesm_param_offset == 0xFFFF) + return false; + + /* get the lesm state word */ + status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + + IXGBE_FW_LESM_STATE_1), + &fw_lesm_state); + + if (!status && (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) + return true; + + return false; +} + +/** + * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of word in EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Retrieves 16 bit word(s) read from EEPROM + **/ +static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + + /* If EEPROM is detected and can be addressed using 14 bits, + * use EERD otherwise use bit bang + */ + if (eeprom->type == ixgbe_eeprom_spi && + offset + (words - 1) <= IXGBE_EERD_MAX_ADDR) + return ixgbe_read_eerd_buffer_generic(hw, offset, words, data); + + return ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, words, + data); +} + +/** + * ixgbe_read_eeprom_82599 - Read EEPROM word using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM + **/ +static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, + u16 offset, u16 *data) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + + /* + * If EEPROM is detected and can be addressed using 14 bits, + * use EERD otherwise use bit bang + */ + if (eeprom->type == ixgbe_eeprom_spi && offset <= IXGBE_EERD_MAX_ADDR) + return ixgbe_read_eerd_generic(hw, offset, data); + + return ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); +} + +/** + * ixgbe_reset_pipeline_82599 - perform pipeline reset + * + * @hw: pointer to hardware structure + * + * Reset pipeline by asserting Restart_AN together with LMS change to ensure + * full pipeline reset. Note - We must hold the SW/FW semaphore before writing + * to AUTOC, so this function assumes the semaphore is held. + **/ +static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) +{ + s32 ret_val; + u32 anlp1_reg = 0; + u32 i, autoc_reg, autoc2_reg; + + /* Enable link if disabled in NVM */ + autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) { + autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); + IXGBE_WRITE_FLUSH(hw); + } + + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + + /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, + autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); + + /* Wait for AN to leave state 0 */ + for (i = 0; i < 10; i++) { + usleep_range(4000, 8000); + anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); + if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) + break; + } + + if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) { + hw_dbg(hw, "auto negotiation not completed\n"); + ret_val = IXGBE_ERR_RESET_FAILED; + goto reset_pipeline_out; + } + + ret_val = 0; + +reset_pipeline_out: + /* Write AUTOC register with original LMS field and Restart_AN */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + IXGBE_WRITE_FLUSH(hw); + + return ret_val; +} + +/** + * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + u32 esdp; + s32 status; + s32 timeout = 200; + + if (hw->phy.qsfp_shared_i2c_bus == true) { + /* Acquire I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + while (timeout) { + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + break; + + usleep_range(5000, 10000); + timeout--; + } + + if (!timeout) { + hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n"); + status = IXGBE_ERR_I2C; + goto release_i2c_access; + } + } + + status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data); + +release_i2c_access: + if (hw->phy.qsfp_shared_i2c_bus == true) { + /* Release I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp &= ~IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + } + + return status; +} + +/** + * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + u32 esdp; + s32 status; + s32 timeout = 200; + + if (hw->phy.qsfp_shared_i2c_bus == true) { + /* Acquire I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + + while (timeout) { + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + break; + + usleep_range(5000, 10000); + timeout--; + } + + if (!timeout) { + hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n"); + status = IXGBE_ERR_I2C; + goto release_i2c_access; + } + } + + status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data); + +release_i2c_access: + if (hw->phy.qsfp_shared_i2c_bus == true) { + /* Release I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp &= ~IXGBE_ESDP_SDP0; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); + } + + return status; +} + +static struct ixgbe_mac_operations mac_ops_82599 = { + .init_hw = &ixgbe_init_hw_generic, + .reset_hw = &ixgbe_reset_hw_82599, + .start_hw = &ixgbe_start_hw_82599, + .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, + .get_media_type = &ixgbe_get_media_type_82599, + .enable_rx_dma = &ixgbe_enable_rx_dma_82599, + .disable_rx_buff = &ixgbe_disable_rx_buff_generic, + .enable_rx_buff = &ixgbe_enable_rx_buff_generic, + .get_mac_addr = &ixgbe_get_mac_addr_generic, + .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, + .get_device_caps = &ixgbe_get_device_caps_generic, + .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, + .stop_adapter = &ixgbe_stop_adapter_generic, + .get_bus_info = &ixgbe_get_bus_info_generic, + .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, + .read_analog_reg8 = &ixgbe_read_analog_reg8_82599, + .write_analog_reg8 = &ixgbe_write_analog_reg8_82599, + .stop_link_on_d3 = &ixgbe_stop_mac_link_on_d3_82599, + .setup_link = &ixgbe_setup_mac_link_82599, + .set_rxpba = &ixgbe_set_rxpba_generic, + .check_link = &ixgbe_check_mac_link_generic, + .get_link_capabilities = &ixgbe_get_link_capabilities_82599, + .led_on = &ixgbe_led_on_generic, + .led_off = &ixgbe_led_off_generic, + .blink_led_start = &ixgbe_blink_led_start_generic, + .blink_led_stop = &ixgbe_blink_led_stop_generic, + .set_rar = &ixgbe_set_rar_generic, + .clear_rar = &ixgbe_clear_rar_generic, + .set_vmdq = &ixgbe_set_vmdq_generic, + .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, + .clear_vmdq = &ixgbe_clear_vmdq_generic, + .init_rx_addrs = &ixgbe_init_rx_addrs_generic, + .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, + .enable_mc = &ixgbe_enable_mc_generic, + .disable_mc = &ixgbe_disable_mc_generic, + .clear_vfta = &ixgbe_clear_vfta_generic, + .set_vfta = &ixgbe_set_vfta_generic, + .fc_enable = &ixgbe_fc_enable_generic, + .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, + .init_uta_tables = &ixgbe_init_uta_tables_generic, + .setup_sfp = &ixgbe_setup_sfp_modules_82599, + .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, + .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, + .release_swfw_sync = &ixgbe_release_swfw_sync, + .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic, + .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic, + .prot_autoc_read = &prot_autoc_read_82599, + .prot_autoc_write = &prot_autoc_write_82599, + .enable_rx = &ixgbe_enable_rx_generic, + .disable_rx = &ixgbe_disable_rx_generic, +}; + +static struct ixgbe_eeprom_operations eeprom_ops_82599 = { + .init_params = &ixgbe_init_eeprom_params_generic, + .read = &ixgbe_read_eeprom_82599, + .read_buffer = &ixgbe_read_eeprom_buffer_82599, + .write = &ixgbe_write_eeprom_generic, + .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic, + .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, + .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, + .update_checksum = &ixgbe_update_eeprom_checksum_generic, +}; + +static struct ixgbe_phy_operations phy_ops_82599 = { + .identify = &ixgbe_identify_phy_82599, + .identify_sfp = &ixgbe_identify_module_generic, + .init = &ixgbe_init_phy_ops_82599, + .reset = &ixgbe_reset_phy_generic, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, + .setup_link = &ixgbe_setup_phy_link_generic, + .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, + .read_i2c_byte = &ixgbe_read_i2c_byte_generic, + .write_i2c_byte = &ixgbe_write_i2c_byte_generic, + .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, + .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, + .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, + .check_overtemp = &ixgbe_tn_check_overtemp, +}; + +struct ixgbe_info ixgbe_82599_info = { + .mac = ixgbe_mac_82599EB, + .get_invariants = &ixgbe_get_invariants_82599, + .mac_ops = &mac_ops_82599, + .eeprom_ops = &eeprom_ops_82599, + .phy_ops = &phy_ops_82599, + .mbx_ops = &mbx_ops_generic, +}; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c new file mode 100644 index 000000000..06d8f3cfa --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -0,0 +1,3900 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2014 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); +static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); +static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); +static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); +static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); +static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, + u16 count); +static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); +static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +static void ixgbe_release_eeprom(struct ixgbe_hw *hw); + +static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); +static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); +static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, + u16 offset); +static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); + +/** + * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow + * control + * @hw: pointer to hardware structure + * + * There are several phys that do not support autoneg flow control. This + * function check the device id to see if the associated phy supports + * autoneg flow control. + **/ +bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) +{ + bool supported = false; + ixgbe_link_speed speed; + bool link_up; + + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: + hw->mac.ops.check_link(hw, &speed, &link_up, false); + /* if link is down, assume supported */ + if (link_up) + supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? + true : false; + else + supported = true; + break; + case ixgbe_media_type_backplane: + supported = true; + break; + case ixgbe_media_type_copper: + /* only some copper devices support flow control autoneg */ + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_T3_LOM: + case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: + supported = true; + break; + default: + break; + } + default: + break; + } + + return supported; +} + +/** + * ixgbe_setup_fc - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) +{ + s32 ret_val = 0; + u32 reg = 0, reg_bp = 0; + u16 reg_cu = 0; + bool locked = false; + + /* + * Validate the requested mode. Strict IEEE mode does not allow + * ixgbe_fc_rx_pause because it will cause us to fail at UNH. + */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + /* + * 10gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + /* + * Set up the 1G and 10G flow control advertisement registers so the + * HW will be able to do fc autoneg once the cable is plugged in. If + * we link at 10G, the 1G advertisement is harmless and vice versa. + */ + switch (hw->phy.media_type) { + case ixgbe_media_type_backplane: + /* some MAC's need RMW protection on AUTOC */ + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp); + if (ret_val) + return ret_val; + + /* only backplane uses autoc so fall though */ + case ixgbe_media_type_fiber: + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + + break; + case ixgbe_media_type_copper: + hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, ®_cu); + break; + default: + break; + } + + /* + * The possible values of fc.requested_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + /* Flow control completely disabled by software override. */ + reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); + if (hw->phy.media_type == ixgbe_media_type_backplane) + reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | + IXGBE_AUTOC_ASM_PAUSE); + else if (hw->phy.media_type == ixgbe_media_type_copper) + reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + reg |= IXGBE_PCS1GANA_ASM_PAUSE; + reg &= ~IXGBE_PCS1GANA_SYM_PAUSE; + if (hw->phy.media_type == ixgbe_media_type_backplane) { + reg_bp |= IXGBE_AUTOC_ASM_PAUSE; + reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE; + } else if (hw->phy.media_type == ixgbe_media_type_copper) { + reg_cu |= IXGBE_TAF_ASM_PAUSE; + reg_cu &= ~IXGBE_TAF_SYM_PAUSE; + } + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE; + if (hw->phy.media_type == ixgbe_media_type_backplane) + reg_bp |= IXGBE_AUTOC_SYM_PAUSE | + IXGBE_AUTOC_ASM_PAUSE; + else if (hw->phy.media_type == ixgbe_media_type_copper) + reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; + break; + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + return IXGBE_ERR_CONFIG; + } + + if (hw->mac.type != ixgbe_mac_X540) { + /* + * Enable auto-negotiation between the MAC & PHY; + * the MAC will advertise clause 37 flow control. + */ + IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); + + /* Disable AN timeout */ + if (hw->fc.strict_ieee) + reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; + + IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); + hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); + } + + /* + * AUTOC restart handles negotiation of 1G and 10G on backplane + * and copper. There is no need to set the PCS1GCTL register. + * + */ + if (hw->phy.media_type == ixgbe_media_type_backplane) { + /* Need the SW/FW semaphore around AUTOC writes if 82599 and + * LESM is on, likewise reset_pipeline requries the lock as + * it also writes AUTOC. + */ + ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); + if (ret_val) + return ret_val; + + } else if ((hw->phy.media_type == ixgbe_media_type_copper) && + ixgbe_device_supports_autoneg_fc(hw)) { + hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, reg_cu); + } + + hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); + return ret_val; +} + +/** + * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware by filling the bus info structure and media type, clears + * all on chip counters, initializes receive address registers, multicast + * table, VLAN filter table, calls routine to set up link and flow control + * settings, and leaves transmit and receive units disabled and uninitialized + **/ +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) +{ + s32 ret_val; + u32 ctrl_ext; + + /* Set the media type */ + hw->phy.media_type = hw->mac.ops.get_media_type(hw); + + /* Identify the PHY */ + hw->phy.ops.identify(hw); + + /* Clear the VLAN filter table */ + hw->mac.ops.clear_vfta(hw); + + /* Clear statistics registers */ + hw->mac.ops.clear_hw_cntrs(hw); + + /* Set No Snoop Disable */ + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + IXGBE_WRITE_FLUSH(hw); + + /* Setup flow control */ + ret_val = ixgbe_setup_fc(hw); + if (!ret_val) + return 0; + + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + + return ret_val; +} + +/** + * ixgbe_start_hw_gen2 - Init sequence for common device family + * @hw: pointer to hw structure + * + * Performs the init sequence common to the second generation + * of 10 GbE devices. + * Devices in the second generation: + * 82599 + * X540 + **/ +s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) +{ + u32 i; + + /* Clear the rate limiters */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); + } + IXGBE_WRITE_FLUSH(hw); + +#ifndef CONFIG_SPARC + /* Disable relaxed ordering */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + u32 regval; + + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); + regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); + } + + for (i = 0; i < hw->mac.max_rx_queues; i++) { + u32 regval; + + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | + IXGBE_DCA_RXCTRL_HEAD_WRO_EN); + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } +#endif + return 0; +} + +/** + * ixgbe_init_hw_generic - Generic hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware, filling the bus info + * structure and media type, clears all on chip counters, initializes receive + * address registers, multicast table, VLAN filter table, calls routine to set + * up link and flow control settings, and leaves transmit and receive units + * disabled and uninitialized + **/ +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) +{ + s32 status; + + /* Reset the hardware */ + status = hw->mac.ops.reset_hw(hw); + + if (status == 0) { + /* Start the HW */ + status = hw->mac.ops.start_hw(hw); + } + + return status; +} + +/** + * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) +{ + u16 i = 0; + + IXGBE_READ_REG(hw, IXGBE_CRCERRS); + IXGBE_READ_REG(hw, IXGBE_ILLERRC); + IXGBE_READ_REG(hw, IXGBE_ERRBC); + IXGBE_READ_REG(hw, IXGBE_MSPDC); + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_MPC(i)); + + IXGBE_READ_REG(hw, IXGBE_MLFC); + IXGBE_READ_REG(hw, IXGBE_MRFC); + IXGBE_READ_REG(hw, IXGBE_RLEC); + IXGBE_READ_REG(hw, IXGBE_LXONTXC); + IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); + IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); + } else { + IXGBE_READ_REG(hw, IXGBE_LXONRXC); + IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + } + + for (i = 0; i < 8; i++) { + IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); + } else { + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + } + } + if (hw->mac.type >= ixgbe_mac_82599EB) + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); + IXGBE_READ_REG(hw, IXGBE_PRC64); + IXGBE_READ_REG(hw, IXGBE_PRC127); + IXGBE_READ_REG(hw, IXGBE_PRC255); + IXGBE_READ_REG(hw, IXGBE_PRC511); + IXGBE_READ_REG(hw, IXGBE_PRC1023); + IXGBE_READ_REG(hw, IXGBE_PRC1522); + IXGBE_READ_REG(hw, IXGBE_GPRC); + IXGBE_READ_REG(hw, IXGBE_BPRC); + IXGBE_READ_REG(hw, IXGBE_MPRC); + IXGBE_READ_REG(hw, IXGBE_GPTC); + IXGBE_READ_REG(hw, IXGBE_GORCL); + IXGBE_READ_REG(hw, IXGBE_GORCH); + IXGBE_READ_REG(hw, IXGBE_GOTCL); + IXGBE_READ_REG(hw, IXGBE_GOTCH); + if (hw->mac.type == ixgbe_mac_82598EB) + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_RNBC(i)); + IXGBE_READ_REG(hw, IXGBE_RUC); + IXGBE_READ_REG(hw, IXGBE_RFC); + IXGBE_READ_REG(hw, IXGBE_ROC); + IXGBE_READ_REG(hw, IXGBE_RJC); + IXGBE_READ_REG(hw, IXGBE_MNGPRC); + IXGBE_READ_REG(hw, IXGBE_MNGPDC); + IXGBE_READ_REG(hw, IXGBE_MNGPTC); + IXGBE_READ_REG(hw, IXGBE_TORL); + IXGBE_READ_REG(hw, IXGBE_TORH); + IXGBE_READ_REG(hw, IXGBE_TPR); + IXGBE_READ_REG(hw, IXGBE_TPT); + IXGBE_READ_REG(hw, IXGBE_PTC64); + IXGBE_READ_REG(hw, IXGBE_PTC127); + IXGBE_READ_REG(hw, IXGBE_PTC255); + IXGBE_READ_REG(hw, IXGBE_PTC511); + IXGBE_READ_REG(hw, IXGBE_PTC1023); + IXGBE_READ_REG(hw, IXGBE_PTC1522); + IXGBE_READ_REG(hw, IXGBE_MPTC); + IXGBE_READ_REG(hw, IXGBE_BPTC); + for (i = 0; i < 16; i++) { + IXGBE_READ_REG(hw, IXGBE_QPRC(i)); + IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); + IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); + IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); + } else { + IXGBE_READ_REG(hw, IXGBE_QBRC(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC(i)); + } + } + + if (hw->mac.type == ixgbe_mac_X540) { + if (hw->phy.id == 0) + hw->phy.ops.identify(hw); + hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i); + hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i); + hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i); + hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i); + } + + return 0; +} + +/** + * ixgbe_read_pba_string_generic - Reads part number string from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number string from the EEPROM + * @pba_num_size: part number string buffer length + * + * Reads the part number string from the EEPROM. + **/ +s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 data; + u16 pba_ptr; + u16 offset; + u16 length; + + if (pba_num == NULL) { + hw_dbg(hw, "PBA string buffer was null\n"); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + + /* + * if data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (data != IXGBE_PBANUM_PTR_GUARD) { + hw_dbg(hw, "NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (pba_num_size < 11) { + hw_dbg(hw, "PBA string buffer too small\n"); + return IXGBE_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (data >> 12) & 0xF; + pba_num[1] = (data >> 8) & 0xF; + pba_num[2] = (data >> 4) & 0xF; + pba_num[3] = data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + return 0; + } + + ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + hw_dbg(hw, "NVM PBA number section invalid length\n"); + return IXGBE_ERR_PBA_SECTION; + } + + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + hw_dbg(hw, "PBA string buffer too small\n"); + return IXGBE_ERR_NO_SPACE; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + pba_num[offset * 2] = (u8)(data >> 8); + pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); + } + pba_num[offset * 2] = '\0'; + + return 0; +} + +/** + * ixgbe_get_mac_addr_generic - Generic get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address + * + * Reads the adapter's MAC address from first Receive Address Register (RAR0) + * A reset of the adapter must be performed prior to calling this function + * in order for the MAC address to have been loaded from the EEPROM into RAR0 + **/ +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); + + for (i = 0; i < 4; i++) + mac_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < 2; i++) + mac_addr[i+4] = (u8)(rar_high >> (i*8)); + + return 0; +} + +enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status) +{ + switch (link_status & IXGBE_PCI_LINK_WIDTH) { + case IXGBE_PCI_LINK_WIDTH_1: + return ixgbe_bus_width_pcie_x1; + case IXGBE_PCI_LINK_WIDTH_2: + return ixgbe_bus_width_pcie_x2; + case IXGBE_PCI_LINK_WIDTH_4: + return ixgbe_bus_width_pcie_x4; + case IXGBE_PCI_LINK_WIDTH_8: + return ixgbe_bus_width_pcie_x8; + default: + return ixgbe_bus_width_unknown; + } +} + +enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status) +{ + switch (link_status & IXGBE_PCI_LINK_SPEED) { + case IXGBE_PCI_LINK_SPEED_2500: + return ixgbe_bus_speed_2500; + case IXGBE_PCI_LINK_SPEED_5000: + return ixgbe_bus_speed_5000; + case IXGBE_PCI_LINK_SPEED_8000: + return ixgbe_bus_speed_8000; + default: + return ixgbe_bus_speed_unknown; + } +} + +/** + * ixgbe_get_bus_info_generic - Generic set PCI bus info + * @hw: pointer to hardware structure + * + * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure + **/ +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) +{ + u16 link_status; + + hw->bus.type = ixgbe_bus_type_pci_express; + + /* Get the negotiated link width and speed from PCI config space */ + link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS); + + hw->bus.width = ixgbe_convert_bus_width(link_status); + hw->bus.speed = ixgbe_convert_bus_speed(link_status); + + hw->mac.ops.set_lan_id(hw); + + return 0; +} + +/** + * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) +{ + struct ixgbe_bus_info *bus = &hw->bus; + u32 reg; + + reg = IXGBE_READ_REG(hw, IXGBE_STATUS); + bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; + bus->lan_id = bus->func; + + /* check for a port swap */ + reg = IXGBE_READ_REG(hw, IXGBE_FACTPS); + if (reg & IXGBE_FACTPS_LFS) + bus->func ^= 0x1; +} + +/** + * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) +{ + u32 reg_val; + u16 i; + + /* + * Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = true; + + /* Disable the receive unit */ + hw->mac.ops.disable_rx(hw); + + /* Clear interrupt mask to stop interrupts from being generated */ + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); + + /* Clear any pending interrupts, flush previous writes */ + IXGBE_READ_REG(hw, IXGBE_EICR); + + /* Disable the transmit unit. Each queue must be disabled. */ + for (i = 0; i < hw->mac.max_tx_queues; i++) + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH); + + /* Disable the receive unit by stopping each queue */ + for (i = 0; i < hw->mac.max_rx_queues; i++) { + reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + reg_val &= ~IXGBE_RXDCTL_ENABLE; + reg_val |= IXGBE_RXDCTL_SWFLSH; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); + } + + /* flush all queues disables */ + IXGBE_WRITE_FLUSH(hw); + usleep_range(1000, 2000); + + /* + * Prevent the PCI-E bus from from hanging by disabling PCI-E master + * access and verify no pending requests + */ + return ixgbe_disable_pcie_master(hw); +} + +/** + * ixgbe_led_on_generic - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn on + **/ +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + /* To turn on the LED, set mode to ON. */ + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_led_off_generic - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn off + **/ +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + /* To turn off the LED, set mode to OFF. */ + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_init_eeprom_params_generic - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->type = ixgbe_eeprom_none; + /* Set default semaphore delay to 10ms which is a well + * tested value */ + eeprom->semaphore_delay = 10; + /* Clear EEPROM page size, it will be initialized as needed */ + eeprom->word_page_size = 0; + + /* + * Check for EEPROM present first. + * If not present leave as none + */ + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + if (eec & IXGBE_EEC_PRES) { + eeprom->type = ixgbe_eeprom_spi; + + /* + * SPI EEPROM is assumed here. This code would need to + * change if a future EEPROM is not SPI. + */ + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + } + + if (eec & IXGBE_EEC_ADDR_SIZE) + eeprom->address_bits = 16; + else + eeprom->address_bits = 8; + hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n", + eeprom->type, eeprom->word_size, eeprom->address_bits); + } + + return 0; +} + +/** + * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to write + * @words: number of words + * @data: 16 bit word(s) to write to EEPROM + * + * Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status; + u16 i, count; + + hw->eeprom.ops.init_params(hw); + + if (words == 0) + return IXGBE_ERR_INVALID_ARGUMENT; + + if (offset + words > hw->eeprom.word_size) + return IXGBE_ERR_EEPROM; + + /* + * The EEPROM page size cannot be queried from the chip. We do lazy + * initialization. It is worth to do that when we write large buffer. + */ + if ((hw->eeprom.word_page_size == 0) && + (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) + ixgbe_detect_eeprom_page_size_generic(hw, offset); + + /* + * We cannot hold synchronization semaphores for too long + * to avoid other entity starvation. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { + count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? + IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, + count, &data[i]); + + if (status != 0) + break; + } + + return status; +} + +/** + * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @words: number of word(s) + * @data: 16 bit word(s) to be written to the EEPROM + * + * If ixgbe_eeprom_update_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status; + u16 word; + u16 page_size; + u16 i; + u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; + + /* Prepare the EEPROM for writing */ + status = ixgbe_acquire_eeprom(hw); + if (status) + return status; + + if (ixgbe_ready_eeprom(hw) != 0) { + ixgbe_release_eeprom(hw); + return IXGBE_ERR_EEPROM; + } + + for (i = 0; i < words; i++) { + ixgbe_standby_eeprom(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + ixgbe_shift_out_eeprom_bits(hw, + IXGBE_EEPROM_WREN_OPCODE_SPI, + IXGBE_EEPROM_OPCODE_BITS); + + ixgbe_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded + * in the opcode + */ + if ((hw->eeprom.address_bits == 8) && + ((offset + i) >= 128)) + write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + ixgbe_shift_out_eeprom_bits(hw, write_opcode, + IXGBE_EEPROM_OPCODE_BITS); + ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), + hw->eeprom.address_bits); + + page_size = hw->eeprom.word_page_size; + + /* Send the data in burst via SPI */ + do { + word = data[i]; + word = (word >> 8) | (word << 8); + ixgbe_shift_out_eeprom_bits(hw, word, 16); + + if (page_size == 0) + break; + + /* do not wrap around page */ + if (((offset + i) & (page_size - 1)) == + (page_size - 1)) + break; + } while (++i < words); + + ixgbe_standby_eeprom(hw); + usleep_range(10000, 20000); + } + /* Done with writing - release the EEPROM */ + ixgbe_release_eeprom(hw); + + return 0; +} + +/** + * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @data: 16 bit word to be written to the EEPROM + * + * If ixgbe_eeprom_update_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + hw->eeprom.ops.init_params(hw); + + if (offset >= hw->eeprom.word_size) + return IXGBE_ERR_EEPROM; + + return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); +} + +/** + * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @words: number of word(s) + * @data: read 16 bit words(s) from EEPROM + * + * Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status; + u16 i, count; + + hw->eeprom.ops.init_params(hw); + + if (words == 0) + return IXGBE_ERR_INVALID_ARGUMENT; + + if (offset + words > hw->eeprom.word_size) + return IXGBE_ERR_EEPROM; + + /* + * We cannot hold synchronization semaphores for too long + * to avoid other entity starvation. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { + count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? + IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); + + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, + count, &data[i]); + + if (status) + return status; + } + + return 0; +} + +/** + * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @words: number of word(s) + * @data: read 16 bit word(s) from EEPROM + * + * Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status; + u16 word_in; + u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; + u16 i; + + /* Prepare the EEPROM for reading */ + status = ixgbe_acquire_eeprom(hw); + if (status) + return status; + + if (ixgbe_ready_eeprom(hw) != 0) { + ixgbe_release_eeprom(hw); + return IXGBE_ERR_EEPROM; + } + + for (i = 0; i < words; i++) { + ixgbe_standby_eeprom(hw); + /* Some SPI eeproms use the 8th address bit embedded + * in the opcode + */ + if ((hw->eeprom.address_bits == 8) && + ((offset + i) >= 128)) + read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + ixgbe_shift_out_eeprom_bits(hw, read_opcode, + IXGBE_EEPROM_OPCODE_BITS); + ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), + hw->eeprom.address_bits); + + /* Read the data. */ + word_in = ixgbe_shift_in_eeprom_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + + /* End this read operation */ + ixgbe_release_eeprom(hw); + + return 0; +} + +/** + * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit value from EEPROM + * + * Reads 16 bit value from EEPROM through bit-bang method + **/ +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 *data) +{ + hw->eeprom.ops.init_params(hw); + + if (offset >= hw->eeprom.word_size) + return IXGBE_ERR_EEPROM; + + return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); +} + +/** + * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of word(s) + * @data: 16 bit word(s) from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + u32 eerd; + s32 status; + u32 i; + + hw->eeprom.ops.init_params(hw); + + if (words == 0) + return IXGBE_ERR_INVALID_ARGUMENT; + + if (offset >= hw->eeprom.word_size) + return IXGBE_ERR_EEPROM; + + for (i = 0; i < words; i++) { + eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | + IXGBE_EEPROM_RW_REG_START; + + IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); + + if (status == 0) { + data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> + IXGBE_EEPROM_RW_REG_DATA); + } else { + hw_dbg(hw, "Eeprom read timed out\n"); + return status; + } + } + + return 0; +} + +/** + * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be used as a scratch pad + * + * Discover EEPROM page size by writing marching data at given offset. + * This function is called only when we are writing a new large buffer + * at given offset so the data would be overwritten anyway. + **/ +static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, + u16 offset) +{ + u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; + s32 status; + u16 i; + + for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) + data[i] = i; + + hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, + IXGBE_EEPROM_PAGE_SIZE_MAX, data); + hw->eeprom.word_page_size = 0; + if (status) + return status; + + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); + if (status) + return status; + + /* + * When writing in burst more than the actual page size + * EEPROM address wraps around current page. + */ + hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; + + hw_dbg(hw, "Detected EEPROM page size = %d words.\n", + hw->eeprom.word_page_size); + return 0; +} + +/** + * ixgbe_read_eerd_generic - Read EEPROM word using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); +} + +/** + * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + u32 eewr; + s32 status; + u16 i; + + hw->eeprom.ops.init_params(hw); + + if (words == 0) + return IXGBE_ERR_INVALID_ARGUMENT; + + if (offset >= hw->eeprom.word_size) + return IXGBE_ERR_EEPROM; + + for (i = 0; i < words; i++) { + eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | + (data[i] << IXGBE_EEPROM_RW_REG_DATA) | + IXGBE_EEPROM_RW_REG_START; + + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); + if (status) { + hw_dbg(hw, "Eeprom write EEWR timed out\n"); + return status; + } + + IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); + + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); + if (status) { + hw_dbg(hw, "Eeprom write EEWR timed out\n"); + return status; + } + } + + return 0; +} + +/** + * ixgbe_write_eewr_generic - Write EEPROM word using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); +} + +/** + * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status + * @hw: pointer to hardware structure + * @ee_reg: EEPROM flag for polling + * + * Polls the status bit (bit 1) of the EERD or EEWR to determine when the + * read or write is done respectively. + **/ +static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) +{ + u32 i; + u32 reg; + + for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { + if (ee_reg == IXGBE_NVM_POLL_READ) + reg = IXGBE_READ_REG(hw, IXGBE_EERD); + else + reg = IXGBE_READ_REG(hw, IXGBE_EEWR); + + if (reg & IXGBE_EEPROM_RW_REG_DONE) { + return 0; + } + udelay(5); + } + return IXGBE_ERR_EEPROM; +} + +/** + * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang + * @hw: pointer to hardware structure + * + * Prepares EEPROM for access using bit-bang method. This function should + * be called before issuing a command to the EEPROM. + **/ +static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) +{ + u32 eec; + u32 i; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) + return IXGBE_ERR_SWFW_SYNC; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + /* Request EEPROM Access */ + eec |= IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + + for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + if (eec & IXGBE_EEC_GNT) + break; + udelay(5); + } + + /* Release if grant not acquired */ + if (!(eec & IXGBE_EEC_GNT)) { + eec &= ~IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + hw_dbg(hw, "Could not acquire EEPROM grant\n"); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return IXGBE_ERR_EEPROM; + } + + /* Setup EEPROM for Read/Write */ + /* Clear CS and SK */ + eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); + return 0; +} + +/** + * ixgbe_get_eeprom_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so EEPROM access can occur for bit-bang method + **/ +static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) +{ + u32 timeout = 2000; + u32 i; + u32 swsm; + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + if (!(swsm & IXGBE_SWSM_SMBI)) + break; + usleep_range(50, 100); + } + + if (i == timeout) { + hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n"); + /* this release is particularly important because our attempts + * above to get the semaphore may have succeeded, and if there + * was a timeout, we should unconditionally clear the semaphore + * bits to free the driver to make progress + */ + ixgbe_release_eeprom_semaphore(hw); + + usleep_range(50, 100); + /* one last try + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + if (swsm & IXGBE_SWSM_SMBI) { + hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n"); + return IXGBE_ERR_EEPROM; + } + } + + /* Now get the semaphore between SW/FW through the SWESMBI bit */ + for (i = 0; i < timeout; i++) { + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + + /* Set the SW EEPROM semaphore bit to request access */ + swsm |= IXGBE_SWSM_SWESMBI; + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + + /* If we set the bit successfully then we got the + * semaphore. + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + if (swsm & IXGBE_SWSM_SWESMBI) + break; + + usleep_range(50, 100); + } + + /* Release semaphores and return error if SW EEPROM semaphore + * was not granted because we don't have access to the EEPROM + */ + if (i >= timeout) { + hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n"); + ixgbe_release_eeprom_semaphore(hw); + return IXGBE_ERR_EEPROM; + } + + return 0; +} + +/** + * ixgbe_release_eeprom_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) +{ + u32 swsm; + + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + + /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ + swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_ready_eeprom - Polls for EEPROM ready + * @hw: pointer to hardware structure + **/ +static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) +{ + u16 i; + u8 spi_stat_reg; + + /* + * Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing + * bit 0 of the internal status register. If it's not cleared within + * 5 milliseconds, then error out. + */ + for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { + ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, + IXGBE_EEPROM_OPCODE_BITS); + spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); + if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) + break; + + udelay(5); + ixgbe_standby_eeprom(hw); + } + + /* + * On some parts, SPI write time could vary from 0-20mSec on 3.3V + * devices (and only 0-5mSec on 5V devices) + */ + if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { + hw_dbg(hw, "SPI EEPROM Status error\n"); + return IXGBE_ERR_EEPROM; + } + + return 0; +} + +/** + * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: pointer to hardware structure + **/ +static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) +{ + u32 eec; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + /* Toggle CS to flush commands */ + eec |= IXGBE_EEC_CS; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); + eec &= ~IXGBE_EEC_CS; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); +} + +/** + * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. + * @hw: pointer to hardware structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + **/ +static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, + u16 count) +{ + u32 eec; + u32 mask; + u32 i; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + /* + * Mask is used to shift "count" bits of "data" out to the EEPROM + * one bit at a time. Determine the starting bit based on count + */ + mask = 0x01 << (count - 1); + + for (i = 0; i < count; i++) { + /* + * A "1" is shifted out to the EEPROM by setting bit "DI" to a + * "1", and then raising and then lowering the clock (the SK + * bit controls the clock input to the EEPROM). A "0" is + * shifted out to the EEPROM by setting "DI" to "0" and then + * raising and then lowering the clock. + */ + if (data & mask) + eec |= IXGBE_EEC_DI; + else + eec &= ~IXGBE_EEC_DI; + + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + + udelay(1); + + ixgbe_raise_eeprom_clk(hw, &eec); + ixgbe_lower_eeprom_clk(hw, &eec); + + /* + * Shift mask to signify next bit of data to shift in to the + * EEPROM + */ + mask = mask >> 1; + } + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eec &= ~IXGBE_EEC_DI; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM + * @hw: pointer to hardware structure + **/ +static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) +{ + u32 eec; + u32 i; + u16 data = 0; + + /* + * In order to read a register from the EEPROM, we need to shift + * 'count' bits in from the EEPROM. Bits are "shifted in" by raising + * the clock input to the EEPROM (setting the SK bit), and then reading + * the value of the "DO" bit. During this "shifting in" process the + * "DI" bit should always be clear. + */ + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); + + for (i = 0; i < count; i++) { + data = data << 1; + ixgbe_raise_eeprom_clk(hw, &eec); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + eec &= ~(IXGBE_EEC_DI); + if (eec & IXGBE_EEC_DO) + data |= 1; + + ixgbe_lower_eeprom_clk(hw, &eec); + } + + return data; +} + +/** + * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. + * @hw: pointer to hardware structure + * @eec: EEC register's current value + **/ +static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) +{ + /* + * Raise the clock input to the EEPROM + * (setting the SK bit), then delay + */ + *eec = *eec | IXGBE_EEC_SK; + IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); +} + +/** + * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. + * @hw: pointer to hardware structure + * @eecd: EECD's current value + **/ +static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) +{ + /* + * Lower the clock input to the EEPROM (clearing the SK bit), then + * delay + */ + *eec = *eec & ~IXGBE_EEC_SK; + IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); +} + +/** + * ixgbe_release_eeprom - Release EEPROM, release semaphores + * @hw: pointer to hardware structure + **/ +static void ixgbe_release_eeprom(struct ixgbe_hw *hw) +{ + u32 eec; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + eec |= IXGBE_EEC_CS; /* Pull CS high */ + eec &= ~IXGBE_EEC_SK; /* Lower SCK */ + + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + + udelay(1); + + /* Stop requesting EEPROM access */ + eec &= ~IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + /* + * Delay before attempt to obtain semaphore again to allow FW + * access. semaphore_delay is in ms we need us for usleep_range + */ + usleep_range(hw->eeprom.semaphore_delay * 1000, + hw->eeprom.semaphore_delay * 2000); +} + +/** + * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum + * @hw: pointer to hardware structure + **/ +s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) +{ + u16 i; + u16 j; + u16 checksum = 0; + u16 length = 0; + u16 pointer = 0; + u16 word = 0; + + /* Include 0x0-0x3F in the checksum */ + for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { + if (hw->eeprom.ops.read(hw, i, &word)) { + hw_dbg(hw, "EEPROM read failed\n"); + break; + } + checksum += word; + } + + /* Include all data from pointers except for the fw pointer */ + for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { + if (hw->eeprom.ops.read(hw, i, &pointer)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + + /* If the pointer seems invalid */ + if (pointer == 0xFFFF || pointer == 0) + continue; + + if (hw->eeprom.ops.read(hw, pointer, &length)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + + if (length == 0xFFFF || length == 0) + continue; + + for (j = pointer + 1; j <= pointer + length; j++) { + if (hw->eeprom.ops.read(hw, j, &word)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + checksum += word; + } + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return (s32)checksum; +} + +/** + * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, + u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) + status = IXGBE_ERR_EEPROM_CHECKSUM; + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + +/** + * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum + * @hw: pointer to hardware structure + **/ +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum; + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); + + return status; +} + +/** + * ixgbe_set_rar_generic - Set Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. + **/ +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr) +{ + u32 rar_low, rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", index); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + /* setup VMDq pool selection before this RAR gets enabled */ + hw->mac.ops.set_vmdq(hw, index, vmdq); + + /* + * HW expects these in little endian so we reverse the byte + * order from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | + ((u32)addr[3] << 24)); + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); + rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); + + if (enable_addr != 0) + rar_high |= IXGBE_RAH_AV; + + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + + return 0; +} + +/** + * ixgbe_clear_rar_generic - Remove Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * + * Clears an ethernet address from a receive address register. + **/ +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", index); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); + + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); + + return 0; +} + +/** + * ixgbe_init_rx_addrs_generic - Initializes receive address filters. + * @hw: pointer to hardware structure + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + **/ +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) +{ + u32 i; + u32 rar_entries = hw->mac.num_rar_entries; + + /* + * If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (!is_valid_ether_addr(hw->mac.addr)) { + /* Get the MAC address from the RAR0 for later reference */ + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + + hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr); + } else { + /* Setup the receive address. */ + hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); + hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); + + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + + /* clear VMDq pool/queue selection for RAR 0 */ + hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); + } + hw->addr_ctrl.overflow_promisc = 0; + + hw->addr_ctrl.rar_used_count = 1; + + /* Zero out the other receive addresses. */ + hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); + for (i = 1; i < rar_entries; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); + } + + /* Clear the MTA */ + hw->addr_ctrl.mta_in_use = 0; + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); + + hw_dbg(hw, " Clearing MTA\n"); + for (i = 0; i < hw->mac.mcft_size; i++) + IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); + + if (hw->mac.ops.init_uta_tables) + hw->mac.ops.init_uta_tables(hw); + + return 0; +} + +/** + * ixgbe_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + hw_dbg(hw, "MC filter type param set incorrectly\n"); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * ixgbe_set_mta - Set bit-vector in multicast table + * @hw: pointer to hardware structure + * @hash_value: Multicast address hash value + * + * Sets the bit-vector in the multicast table. + **/ +static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector; + u32 vector_bit; + u32 vector_reg; + + hw->addr_ctrl.mta_in_use++; + + vector = ixgbe_mta_vector(hw, mc_addr); + hw_dbg(hw, " bit-vector = 0x%03X\n", vector); + + /* + * The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); +} + +/** + * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses + * @hw: pointer to hardware structure + * @netdev: pointer to net device structure + * + * The given list replaces any existing list. Clears the MC addrs from receive + * address registers and the multicast table. Uses unused receive address + * registers for the first multicast addresses, and hashes the rest into the + * multicast table. + **/ +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, + struct net_device *netdev) +{ + struct netdev_hw_addr *ha; + u32 i; + + /* + * Set the new number of MC addresses that we are being requested to + * use. + */ + hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); + hw->addr_ctrl.mta_in_use = 0; + + /* Clear mta_shadow */ + hw_dbg(hw, " Clearing MTA\n"); + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* Update mta shadow */ + netdev_for_each_mc_addr(ha, netdev) { + hw_dbg(hw, " Adding the multicast addresses:\n"); + ixgbe_set_mta(hw, ha->addr); + } + + /* Enable mta */ + for (i = 0; i < hw->mac.mcft_size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, + hw->mac.mta_shadow[i]); + + if (hw->addr_ctrl.mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, + IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); + + hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); + return 0; +} + +/** + * ixgbe_enable_mc_generic - Enable multicast address in RAR + * @hw: pointer to hardware structure + * + * Enables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; + + if (a->mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | + hw->mac.mc_filter_type); + + return 0; +} + +/** + * ixgbe_disable_mc_generic - Disable multicast address in RAR + * @hw: pointer to hardware structure + * + * Disables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; + + if (a->mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); + + return 0; +} + +/** + * ixgbe_fc_enable_generic - Enable flow control + * @hw: pointer to hardware structure + * + * Enable flow control according to the current settings. + **/ +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) +{ + u32 mflcn_reg, fccfg_reg; + u32 reg; + u32 fcrtl, fcrth; + int i; + + /* Validate the water mark configuration. */ + if (!hw->fc.pause_time) + return IXGBE_ERR_INVALID_LINK_SETTINGS; + + /* Low water mark of zero causes XOFF floods */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + if (!hw->fc.low_water[i] || + hw->fc.low_water[i] >= hw->fc.high_water[i]) { + hw_dbg(hw, "Invalid water mark configuration\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + } + } + + /* Negotiate the fc mode to use */ + ixgbe_fc_autoneg(hw); + + /* Disable any previous flow control settings */ + mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); + mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); + + fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); + fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); + + /* + * The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case ixgbe_fc_none: + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + mflcn_reg |= IXGBE_MFLCN_RFCE; + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; + break; + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + mflcn_reg |= IXGBE_MFLCN_RFCE; + fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; + break; + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + return IXGBE_ERR_CONFIG; + } + + /* Set 802.3x based flow control settings. */ + mflcn_reg |= IXGBE_MFLCN_DPF; + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); + IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); + fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; + } else { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); + /* + * In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the maximum FCRTH value. This allows the Tx + * switch to function even under heavy Rx workloads. + */ + fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; + } + + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); + } + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + + return 0; +} + +/** + * ixgbe_negotiate_fc - Negotiate flow control + * @hw: pointer to hardware structure + * @adv_reg: flow control advertised settings + * @lp_reg: link partner's flow control settings + * @adv_sym: symmetric pause bit in advertisement + * @adv_asm: asymmetric pause bit in advertisement + * @lp_sym: symmetric pause bit in link partner advertisement + * @lp_asm: asymmetric pause bit in link partner advertisement + * + * Find the intersection between advertised settings and link partner's + * advertised settings + **/ +static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) +{ + if ((!(adv_reg)) || (!(lp_reg))) + return IXGBE_ERR_FC_NOT_NEGOTIATED; + + if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { + /* + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == ixgbe_fc_full) { + hw->fc.current_mode = ixgbe_fc_full; + hw_dbg(hw, "Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = ixgbe_fc_rx_pause; + hw_dbg(hw, "Flow Control=RX PAUSE frames only\n"); + } + } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && + (lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ixgbe_fc_tx_pause; + hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); + } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && + !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ixgbe_fc_rx_pause; + hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); + } else { + hw->fc.current_mode = ixgbe_fc_none; + hw_dbg(hw, "Flow Control = NONE.\n"); + } + return 0; +} + +/** + * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber + * @hw: pointer to hardware structure + * + * Enable flow control according on 1 gig fiber. + **/ +static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) +{ + u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; + s32 ret_val; + + /* + * On multispeed fiber at 1g, bail out if + * - link is up but AN did not complete, or if + * - link is up and AN completed but timed out + */ + + linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); + if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || + (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) + return IXGBE_ERR_FC_NOT_NEGOTIATED; + + pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); + + ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, + pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, + IXGBE_PCS1GANA_ASM_PAUSE, + IXGBE_PCS1GANA_SYM_PAUSE, + IXGBE_PCS1GANA_ASM_PAUSE); + + return ret_val; +} + +/** + * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) +{ + u32 links2, anlp1_reg, autoc_reg, links; + s32 ret_val; + + /* + * On backplane, bail out if + * - backplane autoneg was not completed, or if + * - we are 82599 and link partner is not AN enabled + */ + links = IXGBE_READ_REG(hw, IXGBE_LINKS); + if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) + return IXGBE_ERR_FC_NOT_NEGOTIATED; + + if (hw->mac.type == ixgbe_mac_82599EB) { + links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); + if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) + return IXGBE_ERR_FC_NOT_NEGOTIATED; + } + /* + * Read the 10g AN autoc and LP ability registers and resolve + * local flow control settings accordingly + */ + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); + + ret_val = ixgbe_negotiate_fc(hw, autoc_reg, + anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, + IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); + + return ret_val; +} + +/** + * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) +{ + u16 technology_ability_reg = 0; + u16 lp_technology_ability_reg = 0; + + hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + &technology_ability_reg); + hw->phy.ops.read_reg(hw, MDIO_AN_LPA, + MDIO_MMD_AN, + &lp_technology_ability_reg); + + return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, + (u32)lp_technology_ability_reg, + IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, + IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); +} + +/** + * ixgbe_fc_autoneg - Configure flow control + * @hw: pointer to hardware structure + * + * Compares our advertised flow control capabilities to those advertised by + * our link partner, and determines the proper flow control mode to use. + **/ +void ixgbe_fc_autoneg(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + ixgbe_link_speed speed; + bool link_up; + + /* + * AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + * + * Since we're being called from an LSC, link is already known to be up. + * So use link_up_wait_to_complete=false. + */ + if (hw->fc.disable_fc_autoneg) + goto out; + + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) + goto out; + + switch (hw->phy.media_type) { + /* Autoneg flow control on fiber adapters */ + case ixgbe_media_type_fiber: + if (speed == IXGBE_LINK_SPEED_1GB_FULL) + ret_val = ixgbe_fc_autoneg_fiber(hw); + break; + + /* Autoneg flow control on backplane adapters */ + case ixgbe_media_type_backplane: + ret_val = ixgbe_fc_autoneg_backplane(hw); + break; + + /* Autoneg flow control on copper adapters */ + case ixgbe_media_type_copper: + if (ixgbe_device_supports_autoneg_fc(hw)) + ret_val = ixgbe_fc_autoneg_copper(hw); + break; + + default: + break; + } + +out: + if (ret_val == 0) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + +/** + * ixgbe_pcie_timeout_poll - Return number of times to poll for completion + * @hw: pointer to hardware structure + * + * System-wide timeout range is encoded in PCIe Device Control2 register. + * + * Add 10% to specified maximum and return the number of times to poll for + * completion timeout, in units of 100 microsec. Never return less than + * 800 = 80 millisec. + **/ +static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) +{ + s16 devctl2; + u32 pollcnt; + + devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); + devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK; + + switch (devctl2) { + case IXGBE_PCIDEVCTRL2_65_130ms: + pollcnt = 1300; /* 130 millisec */ + break; + case IXGBE_PCIDEVCTRL2_260_520ms: + pollcnt = 5200; /* 520 millisec */ + break; + case IXGBE_PCIDEVCTRL2_1_2s: + pollcnt = 20000; /* 2 sec */ + break; + case IXGBE_PCIDEVCTRL2_4_8s: + pollcnt = 80000; /* 8 sec */ + break; + case IXGBE_PCIDEVCTRL2_17_34s: + pollcnt = 34000; /* 34 sec */ + break; + case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ + case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ + case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ + case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ + default: + pollcnt = 800; /* 80 millisec minimum */ + break; + } + + /* add 10% to spec maximum */ + return (pollcnt * 11) / 10; +} + +/** + * ixgbe_disable_pcie_master - Disable PCI-express master access + * @hw: pointer to hardware structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable + * bit hasn't caused the master requests to be disabled, else 0 + * is returned signifying master requests disabled. + **/ +static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) +{ + u32 i, poll; + u16 value; + + /* Always set this bit to ensure any future transactions are blocked */ + IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); + + /* Exit if master requests are blocked */ + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || + ixgbe_removed(hw->hw_addr)) + return 0; + + /* Poll for master request bit to clear */ + for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + udelay(100); + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) + return 0; + } + + /* + * Two consecutive resets are required via CTRL.RST per datasheet + * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine + * of this need. The first reset prevents new master requests from + * being issued by our device. We then must wait 1usec or more for any + * remaining completions from the PCIe bus to trickle in, and then reset + * again to clear out any effects they may have had on our device. + */ + hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n"); + hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + + /* + * Before proceeding, make sure that the PCIe block does not have + * transactions pending. + */ + poll = ixgbe_pcie_timeout_poll(hw); + for (i = 0; i < poll; i++) { + udelay(100); + value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); + if (ixgbe_removed(hw->hw_addr)) + return 0; + if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) + return 0; + } + + hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n"); + return IXGBE_ERR_MASTER_REQUESTS_PENDING; +} + +/** + * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) +{ + u32 gssr = 0; + u32 swmask = mask; + u32 fwmask = mask << 5; + u32 timeout = 200; + u32 i; + + for (i = 0; i < timeout; i++) { + /* + * SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) + */ + if (ixgbe_get_eeprom_semaphore(hw)) + return IXGBE_ERR_SWFW_SYNC; + + gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); + if (!(gssr & (fwmask | swmask))) { + gssr |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + ixgbe_release_eeprom_semaphore(hw); + return 0; + } else { + /* Resource is currently in use by FW or SW */ + ixgbe_release_eeprom_semaphore(hw); + usleep_range(5000, 10000); + } + } + + /* If time expired clear the bits holding the lock and retry */ + if (gssr & (fwmask | swmask)) + ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); + + usleep_range(5000, 10000); + return IXGBE_ERR_SWFW_SYNC; +} + +/** + * ixgbe_release_swfw_sync - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) +{ + u32 gssr; + u32 swmask = mask; + + ixgbe_get_eeprom_semaphore(hw); + + gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); + gssr &= ~swmask; + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + + ixgbe_release_eeprom_semaphore(hw); +} + +/** + * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read + * @hw: pointer to hardware structure + * @reg_val: Value we read from AUTOC + * @locked: bool to indicate whether the SW/FW lock should be taken. Never + * true in this the generic case. + * + * The default case requires no protection so just to the register read. + **/ +s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) +{ + *locked = false; + *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); + return 0; +} + +/** + * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write + * @hw: pointer to hardware structure + * @reg_val: value to write to AUTOC + * @locked: bool to indicate whether the SW/FW lock was already taken by + * previous read. + **/ +s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) +{ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); + return 0; +} + +/** + * ixgbe_disable_rx_buff_generic - Stops the receive data path + * @hw: pointer to hardware structure + * + * Stops the receive data path and waits for the HW to internally + * empty the Rx security block. + **/ +s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) +{ +#define IXGBE_MAX_SECRX_POLL 40 + int i; + int secrxreg; + + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + secrxreg |= IXGBE_SECRXCTRL_RX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); + for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); + if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) + break; + else + /* Use interrupt-safe sleep just in case */ + udelay(1000); + } + + /* For informational purposes only */ + if (i >= IXGBE_MAX_SECRX_POLL) + hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n"); + + return 0; + +} + +/** + * ixgbe_enable_rx_buff - Enables the receive data path + * @hw: pointer to hardware structure + * + * Enables the receive data path + **/ +s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) +{ + int secrxreg; + + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit + **/ +s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) +{ + if (regval & IXGBE_RXCTRL_RXEN) + hw->mac.ops.enable_rx(hw); + else + hw->mac.ops.disable_rx(hw); + + return 0; +} + +/** + * ixgbe_blink_led_start_generic - Blink LED based on index. + * @hw: pointer to hardware structure + * @index: led number to blink + **/ +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) +{ + ixgbe_link_speed speed = 0; + bool link_up = false; + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + bool locked = false; + s32 ret_val; + + /* + * Link must be up to auto-blink the LEDs; + * Force it if link is down. + */ + hw->mac.ops.check_link(hw, &speed, &link_up, false); + + if (!link_up) { + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); + if (ret_val) + return ret_val; + + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + autoc_reg |= IXGBE_AUTOC_FLU; + + ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); + if (ret_val) + return ret_val; + + IXGBE_WRITE_FLUSH(hw); + + usleep_range(10000, 20000); + } + + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. + * @hw: pointer to hardware structure + * @index: led number to stop blinking + **/ +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 autoc_reg = 0; + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + bool locked = false; + s32 ret_val; + + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); + if (ret_val) + return ret_val; + + autoc_reg &= ~IXGBE_AUTOC_FLU; + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + + ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); + if (ret_val) + return ret_val; + + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg &= ~IXGBE_LED_BLINK(index); + led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM + * @hw: pointer to hardware structure + * @san_mac_offset: SAN MAC address offset + * + * This function will read the EEPROM location for the SAN MAC address + * pointer, and returns the value at that location. This is used in both + * get and set mac_addr routines. + **/ +static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, + u16 *san_mac_offset) +{ + s32 ret_val; + + /* + * First read the EEPROM pointer to see if the MAC addresses are + * available. + */ + ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, + san_mac_offset); + if (ret_val) + hw_err(hw, "eeprom read at offset %d failed\n", + IXGBE_SAN_MAC_ADDR_PTR); + + return ret_val; +} + +/** + * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Reads the SAN MAC address from the EEPROM, if it's available. This is + * per-port, so set_lan_id() must be called before reading the addresses. + * set_lan_id() is called by identify_sfp(), but this cannot be relied + * upon for non-SFP connections, so we must call it here. + **/ +s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + u16 san_mac_data, san_mac_offset; + u8 i; + s32 ret_val; + + /* + * First read the EEPROM pointer to see if the MAC addresses are + * available. If they're not, no point in calling set_lan_id() here. + */ + ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) + + goto san_mac_addr_clr; + + /* make sure we know which port we need to program */ + hw->mac.ops.set_lan_id(hw); + /* apply the port offset to the address offset */ + (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : + (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); + for (i = 0; i < 3; i++) { + ret_val = hw->eeprom.ops.read(hw, san_mac_offset, + &san_mac_data); + if (ret_val) { + hw_err(hw, "eeprom read at offset %d failed\n", + san_mac_offset); + goto san_mac_addr_clr; + } + san_mac_addr[i * 2] = (u8)(san_mac_data); + san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); + san_mac_offset++; + } + return 0; + +san_mac_addr_clr: + /* No addresses available in this EEPROM. It's not necessarily an + * error though, so just wipe the local address and return. + */ + for (i = 0; i < 6; i++) + san_mac_addr[i] = 0xFF; + return ret_val; +} + +/** + * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count + * @hw: pointer to hardware structure + * + * Read PCIe configuration space, and get the MSI-X vector count from + * the capabilities table. + **/ +u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) +{ + u16 msix_count; + u16 max_msix_count; + u16 pcie_offset; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS; + max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; + max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; + break; + default: + return 1; + } + + msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset); + if (ixgbe_removed(hw->hw_addr)) + msix_count = 0; + msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; + + /* MSI-X count is zero-based in HW */ + msix_count++; + + if (msix_count > max_msix_count) + msix_count = max_msix_count; + + return msix_count; +} + +/** + * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to disassociate + * @vmdq: VMDq pool index to remove from the rar + **/ +s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 mpsar_lo, mpsar_hi; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); + mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); + + if (ixgbe_removed(hw->hw_addr)) + return 0; + + if (!mpsar_lo && !mpsar_hi) + return 0; + + if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { + if (mpsar_lo) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); + mpsar_lo = 0; + } + if (mpsar_hi) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); + mpsar_hi = 0; + } + } else if (vmdq < 32) { + mpsar_lo &= ~(1 << vmdq); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); + } else { + mpsar_hi &= ~(1 << (vmdq - 32)); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); + } + + /* was that the last pool using this rar? */ + if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) + hw->mac.ops.clear_rar(hw, rar); + return 0; +} + +/** + * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq pool index + **/ +s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 mpsar; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + if (vmdq < 32) { + mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); + mpsar |= 1 << vmdq; + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); + } else { + mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); + mpsar |= 1 << (vmdq - 32); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); + } + return 0; +} + +/** + * This function should only be involved in the IOV mode. + * In IOV mode, Default pool is next pool after the number of + * VFs advertized and not 0. + * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] + * + * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @vmdq: VMDq pool index + **/ +s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) +{ + u32 rar = hw->mac.san_mac_rar_index; + + if (vmdq < 32) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); + } else { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); + } + + return 0; +} + +/** + * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array + * @hw: pointer to hardware structure + **/ +s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) +{ + int i; + + for (i = 0; i < 128; i++) + IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); + + return 0; +} + +/** + * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * + * return the VLVF index where this VLAN id should be placed + * + **/ +static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) +{ + u32 bits = 0; + u32 first_empty_slot = 0; + s32 regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* + * Search for the vlan id in the VLVF entries. Save off the first empty + * slot found along the way + */ + for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { + bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); + if (!bits && !(first_empty_slot)) + first_empty_slot = regindex; + else if ((bits & 0x0FFF) == vlan) + break; + } + + /* + * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan + * in the VLVF. Else use the first empty VLVF register for this + * vlan id. + */ + if (regindex >= IXGBE_VLVF_ENTRIES) { + if (first_empty_slot) + regindex = first_empty_slot; + else { + hw_dbg(hw, "No space in VLVF.\n"); + regindex = IXGBE_ERR_NO_SPACE; + } + } + + return regindex; +} + +/** + * ixgbe_set_vfta_generic - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + s32 regindex; + u32 bitindex; + u32 vfta; + u32 bits; + u32 vt; + u32 targetbit; + bool vfta_changed = false; + + if (vlan > 4095) + return IXGBE_ERR_PARAM; + + /* + * this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regindex = (vlan >> 5) & 0x7F; + bitindex = vlan & 0x1F; + targetbit = (1 << bitindex); + vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); + + if (vlan_on) { + if (!(vfta & targetbit)) { + vfta |= targetbit; + vfta_changed = true; + } + } else { + if ((vfta & targetbit)) { + vfta &= ~targetbit; + vfta_changed = true; + } + } + + /* Part 2 + * If VT Mode is set + * Either vlan_on + * make sure the vlan is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + if (vt & IXGBE_VT_CTL_VT_ENABLE) { + s32 vlvf_index; + + vlvf_index = ixgbe_find_vlvf_slot(hw, vlan); + if (vlvf_index < 0) + return vlvf_index; + + if (vlan_on) { + /* set the pool bit */ + if (vind < 32) { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB(vlvf_index*2)); + bits |= (1 << vind); + IXGBE_WRITE_REG(hw, + IXGBE_VLVFB(vlvf_index*2), + bits); + } else { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB((vlvf_index*2)+1)); + bits |= (1 << (vind-32)); + IXGBE_WRITE_REG(hw, + IXGBE_VLVFB((vlvf_index*2)+1), + bits); + } + } else { + /* clear the pool bit */ + if (vind < 32) { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB(vlvf_index*2)); + bits &= ~(1 << vind); + IXGBE_WRITE_REG(hw, + IXGBE_VLVFB(vlvf_index*2), + bits); + bits |= IXGBE_READ_REG(hw, + IXGBE_VLVFB((vlvf_index*2)+1)); + } else { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB((vlvf_index*2)+1)); + bits &= ~(1 << (vind-32)); + IXGBE_WRITE_REG(hw, + IXGBE_VLVFB((vlvf_index*2)+1), + bits); + bits |= IXGBE_READ_REG(hw, + IXGBE_VLVFB(vlvf_index*2)); + } + } + + /* + * If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + if (bits) { + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), + (IXGBE_VLVF_VIEN | vlan)); + if (!vlan_on) { + /* someone wants to clear the vfta entry + * but some pools/VFs are still using it. + * Ignore it. */ + vfta_changed = false; + } + } else { + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); + } + } + + if (vfta_changed) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta); + + return 0; +} + +/** + * ixgbe_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); + + for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { + IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0); + } + + return 0; +} + +/** + * ixgbe_check_mac_link_generic - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + u32 links_reg, links_orig; + u32 i; + + /* clear the old state */ + links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); + + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + + if (links_orig != links_reg) { + hw_dbg(hw, "LINKS changed from %08X to %08X\n", + links_orig, links_reg); + } + + if (link_up_wait_to_complete) { + for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + if (links_reg & IXGBE_LINKS_UP) { + *link_up = true; + break; + } else { + *link_up = false; + } + msleep(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + } + } else { + if (links_reg & IXGBE_LINKS_UP) + *link_up = true; + else + *link_up = false; + } + + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + if ((hw->mac.type >= ixgbe_mac_X550) && + (links_reg & IXGBE_LINKS_SPEED_NON_STD)) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + else + *speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + if ((hw->mac.type >= ixgbe_mac_X550) && + (links_reg & IXGBE_LINKS_SPEED_NON_STD)) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + else + *speed = IXGBE_LINK_SPEED_100_FULL; + break; + default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + } + + return 0; +} + +/** + * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from + * the EEPROM + * @hw: pointer to hardware structure + * @wwnn_prefix: the alternative WWNN prefix + * @wwpn_prefix: the alternative WWPN prefix + * + * This function will read the EEPROM from the alternative SAN MAC address + * block to check the support for the alternative WWNN/WWPN prefix support. + **/ +s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix) +{ + u16 offset, caps; + u16 alt_san_mac_blk_offset; + + /* clear output first */ + *wwnn_prefix = 0xFFFF; + *wwpn_prefix = 0xFFFF; + + /* check if alternative SAN MAC is supported */ + offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; + if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset)) + goto wwn_prefix_err; + + if ((alt_san_mac_blk_offset == 0) || + (alt_san_mac_blk_offset == 0xFFFF)) + return 0; + + /* check capability in alternative san mac address block */ + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; + if (hw->eeprom.ops.read(hw, offset, &caps)) + goto wwn_prefix_err; + if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) + return 0; + + /* get the corresponding prefix for WWNN/WWPN */ + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; + if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) + hw_err(hw, "eeprom read at offset %d failed\n", offset); + + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; + if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) + goto wwn_prefix_err; + + return 0; + +wwn_prefix_err: + hw_err(hw, "eeprom read at offset %d failed\n", offset); + return 0; +} + +/** + * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for anti-spoofing + * @pf: Physical Function pool - do not enable anti-spoofing for the PF + * + **/ +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) +{ + int j; + int pf_target_reg = pf >> 3; + int pf_target_shift = pf % 8; + u32 pfvfspoof = 0; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + if (enable) + pfvfspoof = IXGBE_SPOOF_MACAS_MASK; + + /* + * PFVFSPOOF register array is size 8 with 8 bits assigned to + * MAC anti-spoof enables in each register array element. + */ + for (j = 0; j < pf_target_reg; j++) + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); + + /* + * The PF should be allowed to spoof so that it can support + * emulation mode NICs. Do not set the bits assigned to the PF + */ + pfvfspoof &= (1 << pf_target_shift) - 1; + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); + + /* + * Remaining pools belong to the PF so they do not need to have + * anti-spoofing enabled. + */ + for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0); +} + +/** + * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for VLAN anti-spoofing + * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + * + **/ +void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) +{ + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; + u32 pfvfspoof; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); + if (enable) + pfvfspoof |= (1 << vf_target_shift); + else + pfvfspoof &= ~(1 << vf_target_shift); + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); +} + +/** + * ixgbe_get_device_caps_generic - Get additional device capabilities + * @hw: pointer to hardware structure + * @device_caps: the EEPROM word with the extra device capabilities + * + * This function will read the EEPROM location for the device capabilities, + * and return the word through device_caps. + **/ +s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) +{ + hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); + + return 0; +} + +/** + * ixgbe_set_rxpba_generic - Initialize RX packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, + int num_pb, + u32 headroom, + int strategy) +{ + u32 pbsize = hw->mac.rx_pb_size; + int i = 0; + u32 rxpktsize, txpktsize, txpbthresh; + + /* Reserve headroom */ + pbsize -= headroom; + + if (!num_pb) + num_pb = 1; + + /* Divide remaining packet buffer space amongst the number + * of packet buffers requested using supplied strategy. + */ + switch (strategy) { + case (PBA_STRATEGY_WEIGHTED): + /* pba_80_48 strategy weight first half of packet buffer with + * 5/8 of the packet buffer space. + */ + rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8)); + pbsize -= rxpktsize * (num_pb / 2); + rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; + for (; i < (num_pb / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + /* Fall through to configure remaining packet buffers */ + case (PBA_STRATEGY_EQUAL): + /* Divide the remaining Rx packet buffer evenly among the TCs */ + rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; + for (; i < num_pb; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + break; + default: + break; + } + + /* + * Setup Tx packet buffer and threshold equally for all TCs + * TXPBTHRESH register is set in K so divide by 1024 and subtract + * 10 since the largest packet we support is just over 9K. + */ + txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; + txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; + for (i = 0; i < num_pb; i++) { + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); + } + + /* Clear unused TCs, if any, to zero buffer size*/ + for (; i < IXGBE_MAX_PB; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); + } +} + +/** + * ixgbe_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * ixgbe_host_interface_command - Issue command to manageability block + * @hw: pointer to the HW structure + * @buffer: contains the command to write and where the return status will + * be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. + * + * Communicates with the manageability block. On success return 0 + * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, + bool return_data) +{ + u32 hicr, i, bi, fwsts; + u32 hdr_size = sizeof(struct ixgbe_hic_hdr); + u16 buf_len, dword_len; + + if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + /* Set bit 9 of FWSTS clearing FW reset indication */ + fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); + IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); + + /* Check that the host interface is enabled. */ + hicr = IXGBE_READ_REG(hw, IXGBE_HICR); + if ((hicr & IXGBE_HICR_EN) == 0) { + hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs. We must be DWORD aligned */ + if ((length % (sizeof(u32))) != 0) { + hw_dbg(hw, "Buffer length failure, not aligned to dword"); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + dword_len = length >> 2; + + /* + * The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < dword_len; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, + i, cpu_to_le32(buffer[i])); + + /* Setting this bit tells the ARC that a new command is pending. */ + IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); + + for (i = 0; i < timeout; i++) { + hicr = IXGBE_READ_REG(hw, IXGBE_HICR); + if (!(hicr & IXGBE_HICR_C)) + break; + usleep_range(1000, 2000); + } + + /* Check command successful completion. */ + if ((timeout != 0 && i == timeout) || + (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) { + hw_dbg(hw, "Command has failed with no status valid.\n"); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + if (!return_data) + return 0; + + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* first pull in the header so we know the buffer length */ + for (bi = 0; bi < dword_len; bi++) { + buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); + le32_to_cpus(&buffer[bi]); + } + + /* If there is any thing in data position pull it in */ + buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len; + if (buf_len == 0) + return 0; + + if (length < (buf_len + hdr_size)) { + hw_dbg(hw, "Buffer not large enough for reply message.\n"); + return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + + /* Pull in the rest of the buffer (bi is where we left off)*/ + for (; bi <= dword_len; bi++) { + buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); + le32_to_cpus(&buffer[bi]); + } + + return 0; +} + +/** + * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + * + * Sends driver version number to firmware through the manageability + * block. On success return 0 + * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub) +{ + struct ixgbe_hic_drv_info fw_cmd; + int i; + s32 ret_val; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)) + return IXGBE_ERR_SWFW_SYNC; + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.ver_maj = maj; + fw_cmd.ver_min = min; + fw_cmd.ver_build = build; + fw_cmd.ver_sub = sub; + fw_cmd.hdr.checksum = 0; + fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + fw_cmd.pad = 0; + fw_cmd.pad2 = 0; + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(fw_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (ret_val != 0) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + ret_val = 0; + else + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); + return ret_val; +} + +/** + * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo + * @hw: pointer to the hardware structure + * + * The 82599 and x540 MACs can experience issues if TX work is still pending + * when a reset occurs. This function prevents this by flushing the PCIe + * buffers on the system. + **/ +void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) +{ + u32 gcr_ext, hlreg0, i, poll; + u16 value; + + /* + * If double reset is not requested then all transactions should + * already be clear and as such there is no work to do + */ + if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED)) + return; + + /* + * Set loopback enable to prevent any transmits from being sent + * should the link come up. This assumes that the RXCTRL.RXEN bit + * has already been cleared. + */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); + + /* wait for a last completion before clearing buffers */ + IXGBE_WRITE_FLUSH(hw); + usleep_range(3000, 6000); + + /* Before proceeding, make sure that the PCIe block does not have + * transactions pending. + */ + poll = ixgbe_pcie_timeout_poll(hw); + for (i = 0; i < poll; i++) { + usleep_range(100, 200); + value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); + if (ixgbe_removed(hw->hw_addr)) + break; + if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) + break; + } + + /* initiate cleaning flow for buffers in the PCIe transaction layer */ + gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, + gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR); + + /* Flush all writes and allow 20usec for all transactions to clear */ + IXGBE_WRITE_FLUSH(hw); + udelay(20); + + /* restore previous register values */ + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); +} + +static const u8 ixgbe_emc_temp_data[4] = { + IXGBE_EMC_INTERNAL_DATA, + IXGBE_EMC_DIODE1_DATA, + IXGBE_EMC_DIODE2_DATA, + IXGBE_EMC_DIODE3_DATA +}; +static const u8 ixgbe_emc_therm_limit[4] = { + IXGBE_EMC_INTERNAL_THERM_LIMIT, + IXGBE_EMC_DIODE1_THERM_LIMIT, + IXGBE_EMC_DIODE2_THERM_LIMIT, + IXGBE_EMC_DIODE3_THERM_LIMIT +}; + +/** + * ixgbe_get_ets_data - Extracts the ETS bit data + * @hw: pointer to hardware structure + * @ets_cfg: extected ETS data + * @ets_offset: offset of ETS data + * + * Returns error code. + **/ +static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg, + u16 *ets_offset) +{ + s32 status; + + status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset); + if (status) + return status; + + if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) + return IXGBE_NOT_IMPLEMENTED; + + status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg); + if (status) + return status; + + if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) + return IXGBE_NOT_IMPLEMENTED; + + return 0; +} + +/** + * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Returns the thermal sensor data structure + **/ +s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) +{ + s32 status; + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 num_sensors; + u8 i; + struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + /* Only support thermal sensors attached to physical port 0 */ + if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) + return IXGBE_NOT_IMPLEMENTED; + + status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); + if (status) + return status; + + num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); + if (num_sensors > IXGBE_MAX_SENSORS) + num_sensors = IXGBE_MAX_SENSORS; + + for (i = 0; i < num_sensors; i++) { + u8 sensor_index; + u8 sensor_location; + + status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i), + &ets_sensor); + if (status) + return status; + + sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> + IXGBE_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> + IXGBE_ETS_DATA_LOC_SHIFT); + + if (sensor_location != 0) { + status = hw->phy.ops.read_i2c_byte(hw, + ixgbe_emc_temp_data[sensor_index], + IXGBE_I2C_THERMAL_SENSOR_ADDR, + &data->sensor[i].temp); + if (status) + return status; + } + } + + return 0; +} + +/** + * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Inits the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) +{ + s32 status; + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 low_thresh_delta; + u8 num_sensors; + u8 therm_limit; + u8 i; + struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data)); + + /* Only support thermal sensors attached to physical port 0 */ + if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) + return IXGBE_NOT_IMPLEMENTED; + + status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); + if (status) + return status; + + low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >> + IXGBE_ETS_LTHRES_DELTA_SHIFT); + num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); + if (num_sensors > IXGBE_MAX_SENSORS) + num_sensors = IXGBE_MAX_SENSORS; + + for (i = 0; i < num_sensors; i++) { + u8 sensor_index; + u8 sensor_location; + + if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) { + hw_err(hw, "eeprom read at offset %d failed\n", + ets_offset + 1 + i); + continue; + } + sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> + IXGBE_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >> + IXGBE_ETS_DATA_LOC_SHIFT); + therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK; + + hw->phy.ops.write_i2c_byte(hw, + ixgbe_emc_therm_limit[sensor_index], + IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit); + + if (sensor_location == 0) + continue; + + data->sensor[i].location = sensor_location; + data->sensor[i].caution_thresh = therm_limit; + data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta; + } + + return 0; +} + +void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) +{ + u32 rxctrl; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + if (hw->mac.type != ixgbe_mac_82598EB) { + u32 pfdtxgswc; + + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + } + rxctrl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + } +} + +void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) +{ + u32 rxctrl; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); + + if (hw->mac.type != ixgbe_mac_82598EB) { + if (hw->mac.set_lben) { + u32 pfdtxgswc; + + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = false; + } + } +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h new file mode 100644 index 000000000..f21f8a165 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -0,0 +1,210 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2014 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_COMMON_H_ +#define _IXGBE_COMMON_H_ + +#include "ixgbe_type.h" +#include "ixgbe.h" + +u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw); +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); +s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); +enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status); +enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status); +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); +void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); + +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); + +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); +s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 *data); +s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, + u16 *checksum_val); +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); + +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr); +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, + struct net_device *netdev); +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); +s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); +bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); +void ixgbe_fc_autoneg(struct ixgbe_hw *hw); + +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask); +s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); +s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); +s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); +s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on); +s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); +s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); +s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix); + +s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val); +s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); + +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf); +void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); +s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); +s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 ver); +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data); +void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); +bool ixgbe_mng_enabled(struct ixgbe_hw *hw); + +void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, + u32 headroom, int strategy); + +#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define IXGBE_EMC_INTERNAL_DATA 0x00 +#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20 +#define IXGBE_EMC_DIODE1_DATA 0x01 +#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19 +#define IXGBE_EMC_DIODE2_DATA 0x23 +#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A +#define IXGBE_EMC_DIODE3_DATA 0x2A +#define IXGBE_EMC_DIODE3_THERM_LIMIT 0x30 + +s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); +void ixgbe_disable_rx_generic(struct ixgbe_hw *hw); +void ixgbe_enable_rx_generic(struct ixgbe_hw *hw); + +#define IXGBE_FAILED_READ_REG 0xffffffffU +#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU +#define IXGBE_FAILED_READ_CFG_WORD 0xffffU + +u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg); +void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value); + +static inline bool ixgbe_removed(void __iomem *addr) +{ + return unlikely(!addr); +} + +static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) +{ + u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + + if (ixgbe_removed(reg_addr)) + return; + writel(value, reg_addr + reg); +} +#define IXGBE_WRITE_REG(a, reg, value) ixgbe_write_reg((a), (reg), (value)) + +#ifndef writeq +#define writeq writeq +static inline void writeq(u64 val, void __iomem *addr) +{ + writel((u32)val, addr); + writel((u32)(val >> 32), addr + 4); +} +#endif + +static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value) +{ + u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + + if (ixgbe_removed(reg_addr)) + return; + writeq(value, reg_addr + reg); +} +#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value)) + +u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg); +#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg)) + +#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \ + ixgbe_write_reg((a), (reg) + ((offset) << 2), (value)) + +#define IXGBE_READ_REG_ARRAY(a, reg, offset) \ + ixgbe_read_reg((a), (reg) + ((offset) << 2)) + +#define IXGBE_WRITE_FLUSH(a) ixgbe_read_reg((a), IXGBE_STATUS) + +#define ixgbe_hw_to_netdev(hw) (((struct ixgbe_adapter *)(hw)->back)->netdev) + +#define hw_dbg(hw, format, arg...) \ + netdev_dbg(ixgbe_hw_to_netdev(hw), format, ## arg) +#define hw_err(hw, format, arg...) \ + netdev_err(ixgbe_hw_to_netdev(hw), format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(&adapter->pdev->dev, format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(&adapter->pdev->dev, format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg) +#define e_dev_notice(format, arg...) \ + dev_notice(&adapter->pdev->dev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_crit(msglvl, format, arg...) \ + netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) +#endif /* IXGBE_COMMON */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c new file mode 100644 index 000000000..a507a6fe3 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -0,0 +1,401 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2014 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +#include "ixgbe.h" +#include "ixgbe_type.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82598.h" +#include "ixgbe_dcb_82599.h" + +/** + * ixgbe_ieee_credits - This calculates the ieee traffic class + * credits from the configured bandwidth percentages. Credits + * are the smallest unit programmable into the underlying + * hardware. The IEEE 802.1Qaz specification do not use bandwidth + * groups so this is much simplified from the CEE case. + */ +static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, + __u16 *max, int max_frame) +{ + int min_percent = 100; + int min_credit, multiplier; + int i; + + min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / + DCB_CREDIT_QUANTUM; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + if (bw[i] < min_percent && bw[i]) + min_percent = bw[i]; + } + + multiplier = (min_credit / min_percent) + 1; + + /* Find out the hw credits for each TC */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL); + + if (val < min_credit) + val = min_credit; + refill[i] = val; + + max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit; + } + return 0; +} + +/** + * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits + * @ixgbe_dcb_config: Struct containing DCB settings. + * @direction: Configuring either Tx or Rx. + * + * This function calculates the credits allocated to each traffic class. + * It should be called only after the rules are checked by + * ixgbe_dcb_check_config(). + */ +s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config, + int max_frame, u8 direction) +{ + struct tc_bw_alloc *p; + int min_credit; + int min_multiplier; + int min_percent = 100; + /* Initialization values default for Tx settings */ + u32 credit_refill = 0; + u32 credit_max = 0; + u16 link_percentage = 0; + u8 bw_percent = 0; + u8 i; + + if (!dcb_config) + return DCB_ERR_CONFIG; + + min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / + DCB_CREDIT_QUANTUM; + + /* Find smallest link percentage */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + p = &dcb_config->tc_config[i].path[direction]; + bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; + link_percentage = p->bwg_percent; + + link_percentage = (link_percentage * bw_percent) / 100; + + if (link_percentage && link_percentage < min_percent) + min_percent = link_percentage; + } + + /* + * The ratio between traffic classes will control the bandwidth + * percentages seen on the wire. To calculate this ratio we use + * a multiplier. It is required that the refill credits must be + * larger than the max frame size so here we find the smallest + * multiplier that will allow all bandwidth percentages to be + * greater than the max frame size. + */ + min_multiplier = (min_credit / min_percent) + 1; + + /* Find out the link percentage for each TC first */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + p = &dcb_config->tc_config[i].path[direction]; + bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; + + link_percentage = p->bwg_percent; + /* Must be careful of integer division for very small nums */ + link_percentage = (link_percentage * bw_percent) / 100; + if (p->bwg_percent > 0 && link_percentage == 0) + link_percentage = 1; + + /* Save link_percentage for reference */ + p->link_percent = (u8)link_percentage; + + /* Calculate credit refill ratio using multiplier */ + credit_refill = min(link_percentage * min_multiplier, + MAX_CREDIT_REFILL); + p->data_credits_refill = (u16)credit_refill; + + /* Calculate maximum credit for the TC */ + credit_max = (link_percentage * MAX_CREDIT) / 100; + + /* + * Adjustment based on rule checking, if the percentage + * of a TC is too small, the maximum credit may not be + * enough to send out a jumbo frame in data plane arbitration. + */ + if (credit_max && (credit_max < min_credit)) + credit_max = min_credit; + + if (direction == DCB_TX_CONFIG) { + /* + * Adjustment based on rule checking, if the + * percentage of a TC is too small, the maximum + * credit may not be enough to send out a TSO + * packet in descriptor plane arbitration. + */ + if ((hw->mac.type == ixgbe_mac_82598EB) && + credit_max && + (credit_max < MINIMUM_CREDIT_FOR_TSO)) + credit_max = MINIMUM_CREDIT_FOR_TSO; + + dcb_config->tc_config[i].desc_credits_max = + (u16)credit_max; + } + + p->data_credits_max = (u16)credit_max; + } + + return 0; +} + +void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en) +{ + struct tc_configuration *tc_config = &cfg->tc_config[0]; + int tc; + + for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) { + if (tc_config[tc].dcb_pfc != pfc_disabled) + *pfc_en |= 1 << tc; + } +} + +void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction, + u16 *refill) +{ + struct tc_configuration *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) + refill[tc] = tc_config[tc].path[direction].data_credits_refill; +} + +void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max) +{ + struct tc_configuration *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) + max[tc] = tc_config[tc].desc_credits_max; +} + +void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction, + u8 *bwgid) +{ + struct tc_configuration *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) + bwgid[tc] = tc_config[tc].path[direction].bwg_id; +} + +void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, + u8 *ptype) +{ + struct tc_configuration *tc_config = &cfg->tc_config[0]; + int tc; + + for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) + ptype[tc] = tc_config[tc].path[direction].prio_type; +} + +u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) +{ + struct tc_configuration *tc_config = &cfg->tc_config[0]; + u8 prio_mask = 1 << up; + u8 tc = cfg->num_tcs.pg_tcs; + + /* If tc is 0 then DCB is likely not enabled or supported */ + if (!tc) + return 0; + + /* + * Test from maximum TC to 1 and report the first match we find. If + * we find no match we can assume that the TC is 0 since the TC must + * be set for all user priorities + */ + for (tc--; tc; tc--) { + if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap) + break; + } + + return tc; +} + +void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map) +{ + u8 up; + + for (up = 0; up < MAX_USER_PRIORITY; up++) + map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up); +} + +/** + * ixgbe_dcb_hw_config - Config and enable DCB + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure dcb settings and enable dcb mode. + */ +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config) +{ + u8 pfc_en; + u8 ptype[MAX_TRAFFIC_CLASS]; + u8 bwgid[MAX_TRAFFIC_CLASS]; + u8 prio_tc[MAX_TRAFFIC_CLASS]; + u16 refill[MAX_TRAFFIC_CLASS]; + u16 max[MAX_TRAFFIC_CLASS]; + + /* Unpack CEE standard containers */ + ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en); + ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max(dcb_config, max); + ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype); + ixgbe_dcb_unpack_map(dcb_config, DCB_TX_CONFIG, prio_tc); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + return ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max, + bwgid, ptype); + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max, + bwgid, ptype, prio_tc); + default: + break; + } + return 0; +} + +/* Helper routines to abstract HW specifics from DCB netlink ops */ +s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) +{ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + return ixgbe_dcb_config_pfc_82598(hw, pfc_en); + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); + default: + break; + } + return -EINVAL; +} + +s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame) +{ + __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; + __u8 prio_type[IEEE_8021QAZ_MAX_TCS]; + int i; + + /* naively give each TC a bwg to map onto CEE hardware */ + __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; + + /* Map TSA onto CEE prio type */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + prio_type[i] = 2; + break; + case IEEE_8021QAZ_TSA_ETS: + prio_type[i] = 0; + break; + default: + /* Hardware only supports priority strict or + * ETS transmission selection algorithms if + * we receive some other value from dcbnl + * throw an error + */ + return -EINVAL; + } + } + + ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame); + return ixgbe_dcb_hw_ets_config(hw, refill, max, + bwg_id, prio_type, ets->prio_tc); +} + +s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, + u16 *refill, u16 *max, u8 *bwg_id, + u8 *prio_type, u8 *prio_tc) +{ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, + prio_type); + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, + bwg_id, prio_type); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, + bwg_id, prio_type); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, + bwg_id, prio_type, prio_tc); + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, + bwg_id, prio_type); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, + prio_type, prio_tc); + break; + default: + break; + } + return 0; +} + +static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map) +{ + u32 reg, i; + + reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); + for (i = 0; i < MAX_USER_PRIORITY; i++) + map[i] = IXGBE_RTRUP2TC_UP_MASK & + (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); +} + +void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map) +{ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + ixgbe_dcb_read_rtrup2tc_82599(hw, map); + break; + default: + break; + } +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h new file mode 100644 index 000000000..fc0a2dd52 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h @@ -0,0 +1,171 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _DCB_CONFIG_H_ +#define _DCB_CONFIG_H_ + +#include +#include "ixgbe_type.h" + +/* DCB data structures */ + +#define IXGBE_MAX_PACKET_BUFFERS 8 +#define MAX_USER_PRIORITY 8 +#define MAX_BW_GROUP 8 +#define BW_PERCENT 100 + +#define DCB_TX_CONFIG 0 +#define DCB_RX_CONFIG 1 + +/* DCB error Codes */ +#define DCB_SUCCESS 0 +#define DCB_ERR_CONFIG -1 +#define DCB_ERR_PARAM -2 + +/* Transmit and receive Errors */ +/* Error in bandwidth group allocation */ +#define DCB_ERR_BW_GROUP -3 +/* Error in traffic class bandwidth allocation */ +#define DCB_ERR_TC_BW -4 +/* Traffic class has both link strict and group strict enabled */ +#define DCB_ERR_LS_GS -5 +/* Link strict traffic class has non zero bandwidth */ +#define DCB_ERR_LS_BW_NONZERO -6 +/* Link strict bandwidth group has non zero bandwidth */ +#define DCB_ERR_LS_BWG_NONZERO -7 +/* Traffic class has zero bandwidth */ +#define DCB_ERR_TC_BW_ZERO -8 + +#define DCB_NOT_IMPLEMENTED 0x7FFFFFFF + +struct dcb_pfc_tc_debug { + u8 tc; + u8 pause_status; + u64 pause_quanta; +}; + +enum strict_prio_type { + prio_none = 0, + prio_group, + prio_link +}; + +/* DCB capability definitions */ +#define IXGBE_DCB_PG_SUPPORT 0x00000001 +#define IXGBE_DCB_PFC_SUPPORT 0x00000002 +#define IXGBE_DCB_BCN_SUPPORT 0x00000004 +#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008 +#define IXGBE_DCB_GSP_SUPPORT 0x00000010 + +#define IXGBE_DCB_8_TC_SUPPORT 0x80 + +struct dcb_support { + /* DCB capabilities */ + u32 capabilities; + + /* Each bit represents a number of TCs configurable in the hw. + * If 8 traffic classes can be configured, the value is 0x80. + */ + u8 traffic_classes; + u8 pfc_traffic_classes; +}; + +/* Traffic class bandwidth allocation per direction */ +struct tc_bw_alloc { + u8 bwg_id; /* Bandwidth Group (BWG) ID */ + u8 bwg_percent; /* % of BWG's bandwidth */ + u8 link_percent; /* % of link bandwidth */ + u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */ + u16 data_credits_refill; /* Credit refill amount in 64B granularity */ + u16 data_credits_max; /* Max credits for a configured packet buffer + * in 64B granularity.*/ + enum strict_prio_type prio_type; /* Link or Group Strict Priority */ +}; + +enum dcb_pfc_type { + pfc_disabled = 0, + pfc_enabled_full, + pfc_enabled_tx, + pfc_enabled_rx +}; + +/* Traffic class configuration */ +struct tc_configuration { + struct tc_bw_alloc path[2]; /* One each for Tx/Rx */ + enum dcb_pfc_type dcb_pfc; /* Class based flow control setting */ + + u16 desc_credits_max; /* For Tx Descriptor arbitration */ + u8 tc; /* Traffic class (TC) */ +}; + +struct dcb_num_tcs { + u8 pg_tcs; + u8 pfc_tcs; +}; + +struct ixgbe_dcb_config { + struct dcb_support support; + struct dcb_num_tcs num_tcs; + struct tc_configuration tc_config[MAX_TRAFFIC_CLASS]; + u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */ + bool pfc_mode_enable; + + u32 dcb_cfg_version; /* Not used...OS-specific? */ + u32 link_speed; /* For bandwidth allocation validation purpose */ +}; + +/* DCB driver APIs */ +void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en); +void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *); +void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *); +void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *); +void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *); +void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *); +u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8); + +/* DCB credits calculation */ +s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, + struct ixgbe_dcb_config *, int, u8); + +/* DCB hw initialization */ +s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max); +s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, + u8 *bwg_id, u8 *prio_type, u8 *tc_prio); +s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio); +s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); + +void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map); + +/* DCB definitions for credit calculation */ +#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */ +#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */ +#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */ +#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */ +#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */ + +#endif /* _DCB_CONFIG_H */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c new file mode 100644 index 000000000..d3ba63f9a --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c @@ -0,0 +1,288 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include "ixgbe_type.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82598.h" + +/** + * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Rx Data Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *prio_type) +{ + u32 reg = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u8 i = 0; + + reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA; + IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg); + + reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + /* Enable Arbiter */ + reg &= ~IXGBE_RMCS_ARBDIS; + /* Enable Receive Recycle within the BWG */ + reg |= IXGBE_RMCS_RRM; + /* Enable Deficit Fixed Priority arbitration*/ + reg |= IXGBE_RMCS_DFP; + + IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + credit_refill = refill[i]; + credit_max = max[i]; + + reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); + + if (prio_type[i] == prio_link) + reg |= IXGBE_RT2CR_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); + } + + reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + reg |= IXGBE_RDRXCTL_RDMTS_1_2; + reg |= IXGBE_RDRXCTL_MPBEN; + reg |= IXGBE_RDRXCTL_MCEN; + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); + + reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + /* Make sure there is enough descriptors before arbitration */ + reg &= ~IXGBE_RXCTRL_DMBYPS; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg); + + return 0; +} + +/** + * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Descriptor Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type) +{ + u32 reg, max_credits; + u8 i; + + reg = IXGBE_READ_REG(hw, IXGBE_DPMCS); + + /* Enable arbiter */ + reg &= ~IXGBE_DPMCS_ARBDIS; + reg |= IXGBE_DPMCS_TSOEF; + + /* Configure Max TSO packet size 34KB including payload and headers */ + reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); + + IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + max_credits = max[i]; + reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; + reg |= refill[i]; + reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT; + + if (prio_type[i] == prio_group) + reg |= IXGBE_TDTQ2TCCR_GSP; + + if (prio_type[i] == prio_link) + reg |= IXGBE_TDTQ2TCCR_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); + } + + return 0; +} + +/** + * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Data Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type) +{ + u32 reg; + u8 i; + + reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS); + /* Enable Data Plane Arbiter */ + reg &= ~IXGBE_PDPMCS_ARBDIS; + /* Enable DFP and Transmit Recycle Mode */ + reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM); + + IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + reg = refill[i]; + reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT; + reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT; + + if (prio_type[i] == prio_group) + reg |= IXGBE_TDPT2TCCR_GSP; + + if (prio_type[i] == prio_link) + reg |= IXGBE_TDPT2TCCR_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); + } + + /* Enable Tx packet buffer division */ + reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL); + reg |= IXGBE_DTXCTL_ENDBUBD; + IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg); + + return 0; +} + +/** + * ixgbe_dcb_config_pfc_82598 - Config priority flow control + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Priority Flow Control for each traffic class. + */ +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) +{ + u32 fcrtl, reg; + u8 i; + + /* Enable Transmit Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + reg &= ~IXGBE_RMCS_TFCE_802_3X; + reg |= IXGBE_RMCS_TFCE_PRIORITY; + IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); + + /* Enable Receive Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); + reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE); + + if (pfc_en) + reg |= IXGBE_FCTRL_RPFCE; + + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); + + /* Configure PFC Tx thresholds per TC */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + if (!(pfc_en & (1 << i))) { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); + continue; + } + + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; + reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); + } + + /* Configure pause time */ + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + + + return 0; +} + +/** + * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics + * @hw: pointer to hardware structure + * + * Configure queue statistics registers, all queues belonging to same traffic + * class uses a single set of queue statistics counters. + */ +static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) +{ + u32 reg = 0; + u8 i = 0; + u8 j = 0; + + /* Receive Queues stats setting - 8 queues per statistics reg */ + for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) { + reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i)); + reg |= ((0x1010101) * j); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); + reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1)); + reg |= ((0x1010101) * j); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg); + } + /* Transmit Queues stats setting - 4 queues per statistics reg */ + for (i = 0; i < 8; i++) { + reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i)); + reg |= ((0x1010101) * i); + IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg); + } + + return 0; +} + +/** + * ixgbe_dcb_hw_config_82598 - Config and enable DCB + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure dcb settings and enable dcb mode. + */ +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type) +{ + ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type); + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, + bwg_id, prio_type); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, + bwg_id, prio_type); + ixgbe_dcb_config_pfc_82598(hw, pfc_en); + ixgbe_dcb_config_tc_stats_82598(hw); + + return 0; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h new file mode 100644 index 000000000..3164f5453 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h @@ -0,0 +1,97 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _DCB_82598_CONFIG_H_ +#define _DCB_82598_CONFIG_H_ + +/* DCB register definitions */ + +#define IXGBE_DPMCS_MTSOS_SHIFT 16 +#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, 1 DFP - Deficit Fixed Priority */ +#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */ +#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */ + +#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */ + +#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ +#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */ + +#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet buffers enable */ +#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores (RSS) enable */ + +#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12 +#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9 +#define IXGBE_TDTQ2TCCR_GSP 0x40000000 +#define IXGBE_TDTQ2TCCR_LSP 0x80000000 + +#define IXGBE_TDPT2TCCR_MCL_SHIFT 12 +#define IXGBE_TDPT2TCCR_BWG_SHIFT 9 +#define IXGBE_TDPT2TCCR_GSP 0x40000000 +#define IXGBE_TDPT2TCCR_LSP 0x80000000 + +#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, 1 for DFP - Deficit Fixed Priority */ +#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */ +#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */ + +#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */ + +#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ +#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ +#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ +#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ + +#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 + +/* DCB hardware-specific driver APIs */ + +/* DCB PFC functions */ +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en); + +/* DCB hw initialization */ +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *prio_type); + +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type); + +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type); + +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type); + +#endif /* _DCB_82598_CONFIG_H */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c new file mode 100644 index 000000000..3b932fe64 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -0,0 +1,363 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include "ixgbe_type.h" +#include "ixgbe_dcb.h" +#include "ixgbe_dcb_82599.h" + +/** + * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class + * + * Configure Rx Packet Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc) +{ + u32 reg = 0; + u32 credit_refill = 0; + u32 credit_max = 0; + u8 i = 0; + + /* + * Disable the arbiter before changing parameters + * (always enable recycle mode; WSP) + */ + reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); + + /* Map all traffic classes to their UP */ + reg = 0; + for (i = 0; i < MAX_USER_PRIORITY; i++) + reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT)); + IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + credit_refill = refill[i]; + credit_max = max[i]; + reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); + + reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT; + + if (prio_type[i] == prio_link) + reg |= IXGBE_RTRPT4C_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); + } + + /* + * Configure Rx packet plane (recycle mode; WSP) and + * enable arbiter + */ + reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); + + return 0; +} + +/** + * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class + * + * Configure Tx Descriptor Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type) +{ + u32 reg, max_credits; + u8 i; + + /* Clear the per-Tx queue credits; we use per-TC instead */ + for (i = 0; i < 128; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); + IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0); + } + + /* Configure traffic class credits and priority */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + max_credits = max[i]; + reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; + reg |= refill[i]; + reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT; + + if (prio_type[i] == prio_group) + reg |= IXGBE_RTTDT2C_GSP; + + if (prio_type[i] == prio_link) + reg |= IXGBE_RTTDT2C_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); + } + + /* + * Configure Tx descriptor plane (recycle mode; WSP) and + * enable arbiter + */ + reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + + return 0; +} + +/** + * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class + * + * Configure Tx Packet Arbiter and credits for each traffic class. + */ +s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc) +{ + u32 reg; + u8 i; + + /* + * Disable the arbiter before changing parameters + * (always enable recycle mode; SP; arb delay) + */ + reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | + (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) | + IXGBE_RTTPCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); + + /* Map all traffic classes to their UP */ + reg = 0; + for (i = 0; i < MAX_USER_PRIORITY; i++) + reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT)); + IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); + + /* Configure traffic class credits and priority */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + reg = refill[i]; + reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT; + reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT; + + if (prio_type[i] == prio_group) + reg |= IXGBE_RTTPT2C_GSP; + + if (prio_type[i] == prio_link) + reg |= IXGBE_RTTPT2C_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); + } + + /* + * Configure Tx packet plane (recycle mode; SP; arb delay) and + * enable arbiter + */ + reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | + (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); + + return 0; +} + +/** + * ixgbe_dcb_config_pfc_82599 - Configure priority flow control + * @hw: pointer to hardware structure + * @pfc_en: enabled pfc bitmask + * @prio_tc: priority to tc assignments indexed by priority + * + * Configure Priority Flow Control (PFC) for each traffic class. + */ +s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) +{ + u32 i, j, fcrtl, reg; + u8 max_tc = 0; + + /* Enable Transmit Priority Flow Control */ + IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY); + + /* Enable Receive Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); + reg |= IXGBE_MFLCN_DPF; + + /* + * X540 supports per TC Rx priority flow control. So + * clear all TCs and only enable those that should be + * enabled. + */ + reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); + + if (hw->mac.type == ixgbe_mac_X540) + reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; + + if (pfc_en) + reg |= IXGBE_MFLCN_RPFCE; + + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); + + for (i = 0; i < MAX_USER_PRIORITY; i++) { + if (prio_tc[i] > max_tc) + max_tc = prio_tc[i]; + } + + + /* Configure PFC Tx thresholds per TC */ + for (i = 0; i <= max_tc; i++) { + int enabled = 0; + + for (j = 0; j < MAX_USER_PRIORITY; j++) { + if ((prio_tc[j] == i) && (pfc_en & (1 << j))) { + enabled = 1; + break; + } + } + + if (enabled) { + reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); + } else { + reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); + } + + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); + } + + for (; i < MAX_TRAFFIC_CLASS; i++) { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0); + } + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + + return 0; +} + +/** + * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics + * @hw: pointer to hardware structure + * + * Configure queue statistics registers, all queues belonging to same traffic + * class uses a single set of queue statistics counters. + */ +static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw) +{ + u32 reg = 0; + u8 i = 0; + + /* + * Receive Queues stats setting + * 32 RQSMR registers, each configuring 4 queues. + * Set all 16 queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + */ + for (i = 0; i < 32; i++) { + reg = 0x01010101 * (i / 4); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); + } + /* + * Transmit Queues stats setting + * 32 TQSM registers, each controlling 4 queues. + * Set all queues of each TC to the same stat + * with TC 'n' going to stat 'n'. + * Tx queues are allocated non-uniformly to TCs: + * 32, 32, 16, 16, 8, 8, 8, 8. + */ + for (i = 0; i < 32; i++) { + if (i < 8) + reg = 0x00000000; + else if (i < 16) + reg = 0x01010101; + else if (i < 20) + reg = 0x02020202; + else if (i < 24) + reg = 0x03030303; + else if (i < 26) + reg = 0x04040404; + else if (i < 28) + reg = 0x05050505; + else if (i < 30) + reg = 0x06060606; + else + reg = 0x07070707; + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); + } + + return 0; +} + +/** + * ixgbe_dcb_hw_config_82599 - Configure and enable DCB + * @hw: pointer to hardware structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class + * @pfc_en: enabled pfc bitmask + * + * Configure dcb settings and enable dcb mode. + */ +s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc) +{ + ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, + prio_type, prio_tc); + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, + bwg_id, prio_type); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, + bwg_id, prio_type, prio_tc); + ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); + ixgbe_dcb_config_tc_stats_82599(hw); + + return 0; +} + diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h new file mode 100644 index 000000000..90c370230 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h @@ -0,0 +1,125 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _DCB_82599_CONFIG_H_ +#define _DCB_82599_CONFIG_H_ + +/* DCB register definitions */ +#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin, + * 1 WSP - Weighted Strict Priority + */ +#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin, + * 1 WRR - Weighted Round Robin + */ +#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */ +#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */ +#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must + * clear! + */ +#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */ + +/* Receive UP2TC mapping */ +#define IXGBE_RTRUP2TC_UP_SHIFT 3 +#define IXGBE_RTRUP2TC_UP_MASK 7 +/* Transmit UP2TC mapping */ +#define IXGBE_RTTUP2TC_UP_SHIFT 3 + +#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ +#define IXGBE_RTRPT4C_BWG_SHIFT 9 /* Offset to BWG index */ +#define IXGBE_RTRPT4C_GSP 0x40000000 /* GSP enable bit */ +#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */ + +#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet + * buffers enable + */ +#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores + * (RSS) enable + */ + +/* RTRPCS Bit Masks */ +#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */ +/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ +#define IXGBE_RTRPCS_RAC 0x00000004 +#define IXGBE_RTRPCS_ARBDIS 0x00000040 /* Arbitration disable bit */ + +/* RTTDT2C Bit Masks */ +#define IXGBE_RTTDT2C_MCL_SHIFT 12 +#define IXGBE_RTTDT2C_BWG_SHIFT 9 +#define IXGBE_RTTDT2C_GSP 0x40000000 +#define IXGBE_RTTDT2C_LSP 0x80000000 + +#define IXGBE_RTTPT2C_MCL_SHIFT 12 +#define IXGBE_RTTPT2C_BWG_SHIFT 9 +#define IXGBE_RTTPT2C_GSP 0x40000000 +#define IXGBE_RTTPT2C_LSP 0x80000000 + +/* RTTPCS Bit Masks */ +#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin, + * 1 SP - Strict Priority + */ +#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */ +#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */ +#define IXGBE_RTTPCS_ARBD_SHIFT 22 +#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */ + +/* SECTXMINIFG DCB */ +#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */ + + +/* DCB hardware-specific driver APIs */ + +/* DCB PFC functions */ +s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc); + +/* DCB hw initialization */ +s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc); + +s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type); + +s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc); + +s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type, + u8 *prio_tc); + +#endif /* _DCB_82599_CONFIG_H */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c new file mode 100644 index 000000000..2707bda37 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -0,0 +1,809 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2014 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include +#include "ixgbe_dcb_82598.h" +#include "ixgbe_dcb_82599.h" +#include "ixgbe_sriov.h" + +/* Callbacks for DCB netlink in the kernel */ +#define BIT_DCB_MODE 0x01 +#define BIT_PFC 0x02 +#define BIT_PG_RX 0x04 +#define BIT_PG_TX 0x08 +#define BIT_APP_UPCHG 0x10 +#define BIT_LINKSPEED 0x80 + +/* Responses for the DCB_C_SET_ALL command */ +#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ +#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ +#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ + +static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) +{ + struct ixgbe_dcb_config *scfg = &adapter->temp_dcb_cfg; + struct ixgbe_dcb_config *dcfg = &adapter->dcb_cfg; + struct tc_configuration *src = NULL; + struct tc_configuration *dst = NULL; + int i, j; + int tx = DCB_TX_CONFIG; + int rx = DCB_RX_CONFIG; + int changes = 0; +#ifdef IXGBE_FCOE + struct dcb_app app = { + .selector = DCB_APP_IDTYPE_ETHTYPE, + .protocol = ETH_P_FCOE, + }; + u8 up = dcb_getapp(adapter->netdev, &app); + + if (up && !(up & (1 << adapter->fcoe.up))) + changes |= BIT_APP_UPCHG; +#endif + + for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { + src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0]; + dst = &dcfg->tc_config[i - DCB_PG_ATTR_TC_0]; + + if (dst->path[tx].prio_type != src->path[tx].prio_type) { + dst->path[tx].prio_type = src->path[tx].prio_type; + changes |= BIT_PG_TX; + } + + if (dst->path[tx].bwg_id != src->path[tx].bwg_id) { + dst->path[tx].bwg_id = src->path[tx].bwg_id; + changes |= BIT_PG_TX; + } + + if (dst->path[tx].bwg_percent != src->path[tx].bwg_percent) { + dst->path[tx].bwg_percent = src->path[tx].bwg_percent; + changes |= BIT_PG_TX; + } + + if (dst->path[tx].up_to_tc_bitmap != + src->path[tx].up_to_tc_bitmap) { + dst->path[tx].up_to_tc_bitmap = + src->path[tx].up_to_tc_bitmap; + changes |= (BIT_PG_TX | BIT_PFC | BIT_APP_UPCHG); + } + + if (dst->path[rx].prio_type != src->path[rx].prio_type) { + dst->path[rx].prio_type = src->path[rx].prio_type; + changes |= BIT_PG_RX; + } + + if (dst->path[rx].bwg_id != src->path[rx].bwg_id) { + dst->path[rx].bwg_id = src->path[rx].bwg_id; + changes |= BIT_PG_RX; + } + + if (dst->path[rx].bwg_percent != src->path[rx].bwg_percent) { + dst->path[rx].bwg_percent = src->path[rx].bwg_percent; + changes |= BIT_PG_RX; + } + + if (dst->path[rx].up_to_tc_bitmap != + src->path[rx].up_to_tc_bitmap) { + dst->path[rx].up_to_tc_bitmap = + src->path[rx].up_to_tc_bitmap; + changes |= (BIT_PG_RX | BIT_PFC | BIT_APP_UPCHG); + } + } + + for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) { + j = i - DCB_PG_ATTR_BW_ID_0; + if (dcfg->bw_percentage[tx][j] != scfg->bw_percentage[tx][j]) { + dcfg->bw_percentage[tx][j] = scfg->bw_percentage[tx][j]; + changes |= BIT_PG_TX; + } + if (dcfg->bw_percentage[rx][j] != scfg->bw_percentage[rx][j]) { + dcfg->bw_percentage[rx][j] = scfg->bw_percentage[rx][j]; + changes |= BIT_PG_RX; + } + } + + for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) { + j = i - DCB_PFC_UP_ATTR_0; + if (dcfg->tc_config[j].dcb_pfc != scfg->tc_config[j].dcb_pfc) { + dcfg->tc_config[j].dcb_pfc = scfg->tc_config[j].dcb_pfc; + changes |= BIT_PFC; + } + } + + if (dcfg->pfc_mode_enable != scfg->pfc_mode_enable) { + dcfg->pfc_mode_enable = scfg->pfc_mode_enable; + changes |= BIT_PFC; + } + + return changes; +} + +static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED); +} + +static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* Fail command if not in CEE mode */ + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return 1; + + /* verify there is something to do, if not then exit */ + if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) + return 0; + + return !!ixgbe_setup_tc(netdev, + state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0); +} + +static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, + u8 *perm_addr) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int i, j; + + memset(perm_addr, 0xff, MAX_ADDR_LEN); + + for (i = 0; i < netdev->addr_len; i++) + perm_addr[i] = adapter->hw.mac.perm_addr[i]; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + for (j = 0; j < netdev->addr_len; j++, i++) + perm_addr[i] = adapter->hw.mac.san_addr[j]; + break; + default: + break; + } +} + +static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, + u8 prio, u8 bwg_id, u8 bw_pct, + u8 up_map) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (prio != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio; + if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id; + if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent = + bw_pct; + if (up_map != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap = + up_map; +} + +static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, + u8 bw_pct) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; +} + +static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, + u8 prio, u8 bwg_id, u8 bw_pct, + u8 up_map) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (prio != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio; + if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id; + if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent = + bw_pct; + if (up_map != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap = + up_map; +} + +static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, + u8 bw_pct) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; +} + +static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, + u8 *prio, u8 *bwg_id, u8 *bw_pct, + u8 *up_map) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type; + *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; + *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent; + *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; +} + +static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, + u8 *bw_pct) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id]; +} + +static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, + u8 *prio, u8 *bwg_id, u8 *bw_pct, + u8 *up_map) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type; + *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; + *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent; + *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap; +} + +static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, + u8 *bw_pct) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id]; +} + +static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, + u8 setting) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; + if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != + adapter->dcb_cfg.tc_config[priority].dcb_pfc) + adapter->temp_dcb_cfg.pfc_mode_enable = true; +} + +static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, + u8 *setting) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc; +} + +static void ixgbe_dcbnl_devreset(struct net_device *dev) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (netif_running(dev)) + dev->netdev_ops->ndo_stop(dev); + + ixgbe_clear_interrupt_scheme(adapter); + ixgbe_init_interrupt_scheme(adapter); + + if (netif_running(dev)) + dev->netdev_ops->ndo_open(dev); + + clear_bit(__IXGBE_RESETTING, &adapter->state); +} + +static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; + struct ixgbe_hw *hw = &adapter->hw; + int ret = DCB_NO_HW_CHG; + int i; + + /* Fail command if not in CEE mode */ + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return DCB_NO_HW_CHG; + + adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter, + MAX_TRAFFIC_CLASS); + if (!adapter->dcb_set_bitmap) + return DCB_NO_HW_CHG; + + if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) { + u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS]; + u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS]; + /* Priority to TC mapping in CEE case default to 1:1 */ + u8 prio_tc[MAX_USER_PRIORITY]; + int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + +#ifdef IXGBE_FCOE + if (adapter->netdev->features & NETIF_F_FCOE_MTU) + max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif + + ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame, + DCB_TX_CONFIG); + ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame, + DCB_RX_CONFIG); + + ixgbe_dcb_unpack_refill(dcb_cfg, DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max(dcb_cfg, max); + ixgbe_dcb_unpack_bwgid(dcb_cfg, DCB_TX_CONFIG, bwg_id); + ixgbe_dcb_unpack_prio(dcb_cfg, DCB_TX_CONFIG, prio_type); + ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc); + + ixgbe_dcb_hw_ets_config(hw, refill, max, bwg_id, + prio_type, prio_tc); + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + netdev_set_prio_tc_map(netdev, i, prio_tc[i]); + + ret = DCB_HW_CHG_RST; + } + + if (adapter->dcb_set_bitmap & BIT_PFC) { + if (dcb_cfg->pfc_mode_enable) { + u8 pfc_en; + u8 prio_tc[MAX_USER_PRIORITY]; + + ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc); + ixgbe_dcb_unpack_pfc(dcb_cfg, &pfc_en); + ixgbe_dcb_hw_pfc_config(hw, pfc_en, prio_tc); + } else { + hw->mac.ops.fc_enable(hw); + } + + ixgbe_set_rx_drop_en(adapter); + + ret = DCB_HW_CHG; + } + +#ifdef IXGBE_FCOE + /* Reprogam FCoE hardware offloads when the traffic class + * FCoE is using changes. This happens if the APP info + * changes or the up2tc mapping is updated. + */ + if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { + struct dcb_app app = { + .selector = DCB_APP_IDTYPE_ETHTYPE, + .protocol = ETH_P_FCOE, + }; + u8 up = dcb_getapp(netdev, &app); + + adapter->fcoe.up = ffs(up) - 1; + ixgbe_dcbnl_devreset(netdev); + ret = DCB_HW_CHG_RST; + } +#endif + + adapter->dcb_set_bitmap = 0x00; + return ret; +} + +static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + switch (capid) { + case DCB_CAP_ATTR_PG: + *cap = true; + break; + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_UP2TC: + *cap = false; + break; + case DCB_CAP_ATTR_PG_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_GSP: + *cap = true; + break; + case DCB_CAP_ATTR_BCN: + *cap = false; + break; + case DCB_CAP_ATTR_DCBX: + *cap = adapter->dcbx_cap; + break; + default: + *cap = false; + break; + } + + return 0; +} + +static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + *num = adapter->dcb_cfg.num_tcs.pg_tcs; + break; + case DCB_NUMTCS_ATTR_PFC: + *num = adapter->dcb_cfg.num_tcs.pfc_tcs; + break; + default: + return -EINVAL; + } + } else { + return -EINVAL; + } + + return 0; +} + +static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +{ + return -EINVAL; +} + +static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + return adapter->dcb_cfg.pfc_mode_enable; +} + +static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + adapter->temp_dcb_cfg.pfc_mode_enable = state; +} + +/** + * ixgbe_dcbnl_getapp - retrieve the DCBX application user priority + * @netdev : the corresponding netdev + * @idtype : identifies the id as ether type or TCP/UDP port number + * @id: id is either ether type or TCP/UDP port number + * + * Returns : on success, returns a non-zero 802.1p user priority bitmap + * otherwise returns -EINVAL as the invalid user priority bitmap to indicate an + * error. + */ +static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct dcb_app app = { + .selector = idtype, + .protocol = id, + }; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return -EINVAL; + + return dcb_getapp(netdev, &app); +} + +static int ixgbe_dcbnl_ieee_getets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets; + + ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs; + + /* No IEEE PFC settings available */ + if (!my_ets) + return 0; + + ets->cbs = my_ets->cbs; + memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); + memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); + return 0; +} + +static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + int i, err; + __u8 max_tc = 0; + __u8 map_chg = 0; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + if (!adapter->ixgbe_ieee_ets) { + adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets), + GFP_KERNEL); + if (!adapter->ixgbe_ieee_ets) + return -ENOMEM; + + /* initialize UP2TC mappings to invalid value */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + adapter->ixgbe_ieee_ets->prio_tc[i] = + IEEE_8021QAZ_MAX_TCS; + /* if possible update UP2TC mappings from HW */ + ixgbe_dcb_read_rtrup2tc(&adapter->hw, + adapter->ixgbe_ieee_ets->prio_tc); + } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->prio_tc[i] > max_tc) + max_tc = ets->prio_tc[i]; + if (ets->prio_tc[i] != adapter->ixgbe_ieee_ets->prio_tc[i]) + map_chg = 1; + } + + memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets)); + + if (max_tc) + max_tc++; + + if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs) + return -EINVAL; + + if (max_tc != netdev_get_num_tc(dev)) { + err = ixgbe_setup_tc(dev, max_tc); + if (err) + return err; + } else if (map_chg) { + ixgbe_dcbnl_devreset(dev); + } + + return ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame); +} + +static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc; + int i; + + pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs; + + /* No IEEE PFC settings available */ + if (!my_pfc) + return 0; + + pfc->pfc_en = my_pfc->pfc_en; + pfc->mbc = my_pfc->mbc; + pfc->delay = my_pfc->delay; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + pfc->requests[i] = adapter->stats.pxoffrxc[i]; + pfc->indications[i] = adapter->stats.pxofftxc[i]; + } + + return 0; +} + +static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + u8 *prio_tc; + int err; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + if (!adapter->ixgbe_ieee_pfc) { + adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc), + GFP_KERNEL); + if (!adapter->ixgbe_ieee_pfc) + return -ENOMEM; + } + + prio_tc = adapter->ixgbe_ieee_ets->prio_tc; + memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc)); + + /* Enable link flow control parameters if PFC is disabled */ + if (pfc->pfc_en) + err = ixgbe_dcb_hw_pfc_config(hw, pfc->pfc_en, prio_tc); + else + err = hw->mac.ops.fc_enable(hw); + + ixgbe_set_rx_drop_en(adapter); + + return err; +} + +static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, + struct dcb_app *app) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int err; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + err = dcb_ieee_setapp(dev, app); + if (err) + return err; + +#ifdef IXGBE_FCOE + if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == ETH_P_FCOE) { + u8 app_mask = dcb_ieee_getapp_mask(dev, app); + + if (app_mask & (1 << adapter->fcoe.up)) + return 0; + + adapter->fcoe.up = app->priority; + ixgbe_dcbnl_devreset(dev); + } +#endif + + /* VF devices should use default UP when available */ + if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == 0) { + int vf; + + adapter->default_up = app->priority; + + for (vf = 0; vf < adapter->num_vfs; vf++) { + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + + if (!vfinfo->pf_qos) + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + app->priority, vf); + } + } + + return 0; +} + +static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, + struct dcb_app *app) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int err; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + err = dcb_ieee_delapp(dev, app); + +#ifdef IXGBE_FCOE + if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == ETH_P_FCOE) { + u8 app_mask = dcb_ieee_getapp_mask(dev, app); + + if (app_mask & (1 << adapter->fcoe.up)) + return 0; + + adapter->fcoe.up = app_mask ? + ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC; + ixgbe_dcbnl_devreset(dev); + } +#endif + /* IF default priority is being removed clear VF default UP */ + if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == 0 && adapter->default_up == app->priority) { + int vf; + long unsigned int app_mask = dcb_ieee_getapp_mask(dev, app); + int qos = app_mask ? find_first_bit(&app_mask, 8) : 0; + + adapter->default_up = qos; + + for (vf = 0; vf < adapter->num_vfs; vf++) { + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + + if (!vfinfo->pf_qos) + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + qos, vf); + } + } + + return err; +} + +static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + return adapter->dcbx_cap; +} + +static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ieee_ets ets = {0}; + struct ieee_pfc pfc = {0}; + int err = 0; + + /* no support for LLD_MANAGED modes or CEE+IEEE */ + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || + !(mode & DCB_CAP_DCBX_HOST)) + return 1; + + if (mode == adapter->dcbx_cap) + return 0; + + adapter->dcbx_cap = mode; + + /* ETS and PFC defaults */ + ets.ets_cap = 8; + pfc.pfc_cap = 8; + + if (mode & DCB_CAP_DCBX_VER_IEEE) { + ixgbe_dcbnl_ieee_setets(dev, &ets); + ixgbe_dcbnl_ieee_setpfc(dev, &pfc); + } else if (mode & DCB_CAP_DCBX_VER_CEE) { + u8 mask = BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG; + + adapter->dcb_set_bitmap |= mask; + ixgbe_dcbnl_set_all(dev); + } else { + /* Drop into single TC mode strict priority as this + * indicates CEE and IEEE versions are disabled + */ + ixgbe_dcbnl_ieee_setets(dev, &ets); + ixgbe_dcbnl_ieee_setpfc(dev, &pfc); + err = ixgbe_setup_tc(dev, 0); + } + + return err ? 1 : 0; +} + +const struct dcbnl_rtnl_ops dcbnl_ops = { + .ieee_getets = ixgbe_dcbnl_ieee_getets, + .ieee_setets = ixgbe_dcbnl_ieee_setets, + .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc, + .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc, + .ieee_setapp = ixgbe_dcbnl_ieee_setapp, + .ieee_delapp = ixgbe_dcbnl_ieee_delapp, + .getstate = ixgbe_dcbnl_get_state, + .setstate = ixgbe_dcbnl_set_state, + .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, + .setpgtccfgtx = ixgbe_dcbnl_set_pg_tc_cfg_tx, + .setpgbwgcfgtx = ixgbe_dcbnl_set_pg_bwg_cfg_tx, + .setpgtccfgrx = ixgbe_dcbnl_set_pg_tc_cfg_rx, + .setpgbwgcfgrx = ixgbe_dcbnl_set_pg_bwg_cfg_rx, + .getpgtccfgtx = ixgbe_dcbnl_get_pg_tc_cfg_tx, + .getpgbwgcfgtx = ixgbe_dcbnl_get_pg_bwg_cfg_tx, + .getpgtccfgrx = ixgbe_dcbnl_get_pg_tc_cfg_rx, + .getpgbwgcfgrx = ixgbe_dcbnl_get_pg_bwg_cfg_rx, + .setpfccfg = ixgbe_dcbnl_set_pfc_cfg, + .getpfccfg = ixgbe_dcbnl_get_pfc_cfg, + .setall = ixgbe_dcbnl_set_all, + .getcap = ixgbe_dcbnl_getcap, + .getnumtcs = ixgbe_dcbnl_getnumtcs, + .setnumtcs = ixgbe_dcbnl_setnumtcs, + .getpfcstate = ixgbe_dcbnl_getpfcstate, + .setpfcstate = ixgbe_dcbnl_setpfcstate, + .getapp = ixgbe_dcbnl_getapp, + .getdcbx = ixgbe_dcbnl_getdcbx, + .setdcbx = ixgbe_dcbnl_setdcbx, +}; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c new file mode 100644 index 000000000..5e2c1e35e --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c @@ -0,0 +1,276 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ +#include +#include + +#include "ixgbe.h" + +static struct dentry *ixgbe_dbg_root; + +static char ixgbe_dbg_reg_ops_buf[256] = ""; + +/** + * ixgbe_dbg_reg_ops_read - read for reg_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ixgbe_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: %s\n", + adapter->netdev->name, + ixgbe_dbg_reg_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +/** + * ixgbe_dbg_reg_ops_write - write into reg_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ixgbe_adapter *adapter = filp->private_data; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(ixgbe_dbg_reg_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(ixgbe_dbg_reg_ops_buf, + sizeof(ixgbe_dbg_reg_ops_buf)-1, + ppos, + buffer, + count); + if (len < 0) + return len; + + ixgbe_dbg_reg_ops_buf[len] = '\0'; + + if (strncmp(ixgbe_dbg_reg_ops_buf, "write", 5) == 0) { + u32 reg, value; + int cnt; + cnt = sscanf(&ixgbe_dbg_reg_ops_buf[5], "%x %x", ®, &value); + if (cnt == 2) { + IXGBE_WRITE_REG(&adapter->hw, reg, value); + value = IXGBE_READ_REG(&adapter->hw, reg); + e_dev_info("write: 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("write \n"); + } + } else if (strncmp(ixgbe_dbg_reg_ops_buf, "read", 4) == 0) { + u32 reg, value; + int cnt; + cnt = sscanf(&ixgbe_dbg_reg_ops_buf[4], "%x", ®); + if (cnt == 1) { + value = IXGBE_READ_REG(&adapter->hw, reg); + e_dev_info("read 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("read \n"); + } + } else { + e_dev_info("Unknown command %s\n", ixgbe_dbg_reg_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" read \n"); + e_dev_info(" write \n"); + } + return count; +} + +static const struct file_operations ixgbe_dbg_reg_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ixgbe_dbg_reg_ops_read, + .write = ixgbe_dbg_reg_ops_write, +}; + +static char ixgbe_dbg_netdev_ops_buf[256] = ""; + +/** + * ixgbe_dbg_netdev_ops_read - read for netdev_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ixgbe_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: %s\n", + adapter->netdev->name, + ixgbe_dbg_netdev_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +/** + * ixgbe_dbg_netdev_ops_write - write into netdev_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ixgbe_adapter *adapter = filp->private_data; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(ixgbe_dbg_netdev_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(ixgbe_dbg_netdev_ops_buf, + sizeof(ixgbe_dbg_netdev_ops_buf)-1, + ppos, + buffer, + count); + if (len < 0) + return len; + + ixgbe_dbg_netdev_ops_buf[len] = '\0'; + + if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev); + e_dev_info("tx_timeout called\n"); + } else { + e_dev_info("Unknown command: %s\n", ixgbe_dbg_netdev_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" tx_timeout\n"); + } + return count; +} + +static const struct file_operations ixgbe_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ixgbe_dbg_netdev_ops_read, + .write = ixgbe_dbg_netdev_ops_write, +}; + +/** + * ixgbe_dbg_adapter_init - setup the debugfs directory for the adapter + * @adapter: the adapter that is starting up + **/ +void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) +{ + const char *name = pci_name(adapter->pdev); + struct dentry *pfile; + adapter->ixgbe_dbg_adapter = debugfs_create_dir(name, ixgbe_dbg_root); + if (adapter->ixgbe_dbg_adapter) { + pfile = debugfs_create_file("reg_ops", 0600, + adapter->ixgbe_dbg_adapter, adapter, + &ixgbe_dbg_reg_ops_fops); + if (!pfile) + e_dev_err("debugfs reg_ops for %s failed\n", name); + pfile = debugfs_create_file("netdev_ops", 0600, + adapter->ixgbe_dbg_adapter, adapter, + &ixgbe_dbg_netdev_ops_fops); + if (!pfile) + e_dev_err("debugfs netdev_ops for %s failed\n", name); + } else { + e_dev_err("debugfs entry for %s failed\n", name); + } +} + +/** + * ixgbe_dbg_adapter_exit - clear out the adapter's debugfs entries + * @pf: the pf that is stopping + **/ +void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) +{ + debugfs_remove_recursive(adapter->ixgbe_dbg_adapter); + adapter->ixgbe_dbg_adapter = NULL; +} + +/** + * ixgbe_dbg_init - start up debugfs for the driver + **/ +void ixgbe_dbg_init(void) +{ + ixgbe_dbg_root = debugfs_create_dir(ixgbe_driver_name, NULL); + if (ixgbe_dbg_root == NULL) + pr_err("init of debugfs failed\n"); +} + +/** + * ixgbe_dbg_exit - clean out the driver's debugfs entries + **/ +void ixgbe_dbg_exit(void) +{ + debugfs_remove_recursive(ixgbe_dbg_root); +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c new file mode 100644 index 000000000..eafa9ec80 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -0,0 +1,3165 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2014 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for ixgbe */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_phy.h" + + +#define IXGBE_ALL_RAR_ENTRIES 16 + +enum {NETDEV_STATS, IXGBE_STATS}; + +struct ixgbe_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define IXGBE_STAT(m) IXGBE_STATS, \ + sizeof(((struct ixgbe_adapter *)0)->m), \ + offsetof(struct ixgbe_adapter, m) +#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \ + sizeof(((struct rtnl_link_stats64 *)0)->m), \ + offsetof(struct rtnl_link_stats64, m) + +static const struct ixgbe_stats ixgbe_gstrings_stats[] = { + {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)}, + {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)}, + {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)}, + {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)}, + {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, + {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, + {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, + {"tx_bytes_nic", IXGBE_STAT(stats.gotc)}, + {"lsc_int", IXGBE_STAT(lsc_int)}, + {"tx_busy", IXGBE_STAT(tx_busy)}, + {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, + {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)}, + {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)}, + {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)}, + {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)}, + {"multicast", IXGBE_NETDEV_STAT(multicast)}, + {"broadcast", IXGBE_STAT(stats.bprc)}, + {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, + {"collisions", IXGBE_NETDEV_STAT(collisions)}, + {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)}, + {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)}, + {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)}, + {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, + {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, + {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, + {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, + {"fdir_overflow", IXGBE_STAT(fdir_overflow)}, + {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, + {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, + {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, + {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)}, + {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)}, + {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)}, + {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, + {"tx_restart_queue", IXGBE_STAT(restart_queue)}, + {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, + {"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, + {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, + {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)}, + {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, + {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, + {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, + {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, + {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, + {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, + {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)}, + {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)}, + {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)}, + {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)}, +#ifdef IXGBE_FCOE + {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, + {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, + {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)}, + {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)}, + {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)}, + {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)}, + {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)}, + {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)}, +#endif /* IXGBE_FCOE */ +}; + +/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so + * we set the num_rx_queues to evaluate to num_tx_queues. This is + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ +#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues + +#define IXGBE_QUEUE_STATS_LEN ( \ + (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \ + (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) +#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) +#define IXGBE_PB_STATS_LEN ( \ + (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ + sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ + sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ + sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ + / sizeof(u64)) +#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ + IXGBE_PB_STATS_LEN + \ + IXGBE_QUEUE_STATS_LEN) + +static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; +#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN + +static int ixgbe_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + ixgbe_link_speed supported_link; + u32 link_speed = 0; + bool autoneg = false; + bool link_up; + + hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); + + /* set the supported link speeds */ + if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) + ecmd->supported |= SUPPORTED_10000baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) + ecmd->supported |= SUPPORTED_1000baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_100_FULL) + ecmd->supported |= SUPPORTED_100baseT_Full; + + /* set the advertised speeds */ + if (hw->phy.autoneg_advertised) { + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } else { + /* default modes in case phy.autoneg_advertised isn't set */ + if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_100_FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + + if (hw->phy.multispeed_fiber && !autoneg) { + if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) + ecmd->advertising = ADVERTISED_10000baseT_Full; + } + } + + if (autoneg) { + ecmd->supported |= SUPPORTED_Autoneg; + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->autoneg = AUTONEG_ENABLE; + } else + ecmd->autoneg = AUTONEG_DISABLE; + + ecmd->transceiver = XCVR_EXTERNAL; + + /* Determine the remaining settings based on the PHY type. */ + switch (adapter->hw.phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_aq: + case ixgbe_phy_cu_unknown: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + break; + case ixgbe_phy_qt: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + break; + case ixgbe_phy_nl: + case ixgbe_phy_sfp_passive_tyco: + case ixgbe_phy_sfp_passive_unknown: + case ixgbe_phy_sfp_ftl: + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: + /* SFP+ devices, further checking needed */ + switch (adapter->hw.phy.sfp_type) { + case ixgbe_sfp_type_da_cu: + case ixgbe_sfp_type_da_cu_core0: + case ixgbe_sfp_type_da_cu_core1: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_DA; + break; + case ixgbe_sfp_type_sr: + case ixgbe_sfp_type_lr: + case ixgbe_sfp_type_srlr_core0: + case ixgbe_sfp_type_srlr_core1: + case ixgbe_sfp_type_1g_sx_core0: + case ixgbe_sfp_type_1g_sx_core1: + case ixgbe_sfp_type_1g_lx_core0: + case ixgbe_sfp_type_1g_lx_core1: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + break; + case ixgbe_sfp_type_not_present: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_NONE; + break; + case ixgbe_sfp_type_1g_cu_core0: + case ixgbe_sfp_type_1g_cu_core1: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + break; + case ixgbe_sfp_type_unknown: + default: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_OTHER; + break; + } + break; + case ixgbe_phy_xaui: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_NONE; + break; + case ixgbe_phy_unknown: + case ixgbe_phy_generic: + case ixgbe_phy_sfp_unsupported: + default: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_OTHER; + break; + } + + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up) { + switch (link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_10000); + break; + case IXGBE_LINK_SPEED_1GB_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_1000); + break; + case IXGBE_LINK_SPEED_100_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_100); + break; + default: + break; + } + ecmd->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } + + return 0; +} + +static int ixgbe_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 advertised, old; + s32 err = 0; + + if ((hw->phy.media_type == ixgbe_media_type_copper) || + (hw->phy.multispeed_fiber)) { + /* + * this function does not support duplex forcing, but can + * limit the advertising of the adapter to the specified speed + */ + if (ecmd->advertising & ~ecmd->supported) + return -EINVAL; + + /* only allow one speed at a time if no autoneg */ + if (!ecmd->autoneg && hw->phy.multispeed_fiber) { + if (ecmd->advertising == + (ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full)) + return -EINVAL; + } + + old = hw->phy.autoneg_advertised; + advertised = 0; + if (ecmd->advertising & ADVERTISED_10000baseT_Full) + advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (ecmd->advertising & ADVERTISED_1000baseT_Full) + advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + if (ecmd->advertising & ADVERTISED_100baseT_Full) + advertised |= IXGBE_LINK_SPEED_100_FULL; + + if (old == advertised) + return err; + /* this sets the link speed and restarts auto-neg */ + while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + hw->mac.autotry_restart = true; + err = hw->mac.ops.setup_link(hw, advertised, true); + if (err) { + e_info(probe, "setup link failed with code %d\n", err); + hw->mac.ops.setup_link(hw, old, true); + } + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); + } else { + /* in this case we currently only support 10Gb/FULL */ + u32 speed = ethtool_cmd_speed(ecmd); + if ((ecmd->autoneg == AUTONEG_ENABLE) || + (ecmd->advertising != ADVERTISED_10000baseT_Full) || + (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) + return -EINVAL; + } + + return err; +} + +static void ixgbe_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (ixgbe_device_supports_autoneg_fc(hw) && + !hw->fc.disable_fc_autoneg) + pause->autoneg = 1; + else + pause->autoneg = 0; + + if (hw->fc.current_mode == ixgbe_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == ixgbe_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int ixgbe_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_fc_info fc = hw->fc; + + /* 82598 does no support link flow control with DCB enabled */ + if ((hw->mac.type == ixgbe_mac_82598EB) && + (adapter->flags & IXGBE_FLAG_DCB_ENABLED)) + return -EINVAL; + + /* some devices do not support autoneg of link flow control */ + if ((pause->autoneg == AUTONEG_ENABLE) && + !ixgbe_device_supports_autoneg_fc(hw)) + return -EINVAL; + + fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); + + if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) + fc.requested_mode = ixgbe_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + fc.requested_mode = ixgbe_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + fc.requested_mode = ixgbe_fc_tx_pause; + else + fc.requested_mode = ixgbe_fc_none; + + /* if the thing changed then we'll update and use new autoneg */ + if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) { + hw->fc = fc; + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + else + ixgbe_reset(adapter); + } + + return 0; +} + +static u32 ixgbe_get_msglevel(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int ixgbe_get_regs_len(struct net_device *netdev) +{ +#define IXGBE_REGS_LEN 1139 + return IXGBE_REGS_LEN * sizeof(u32); +} + +#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ + +static void ixgbe_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IXGBE_REGS_LEN * sizeof(u32)); + + regs->version = hw->mac.type << 24 | hw->revision_id << 16 | + hw->device_id; + + /* General Registers */ + regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); + regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS); + regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP); + regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP); + regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER); + regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); + + /* NVM Register */ + regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC); + regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); + regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA); + regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); + regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); + regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); + regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); + regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); + regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); + regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); + + /* Interrupt */ + /* don't read EICR because it can clear interrupt causes, instead + * read EICS which is a shadow but doesn't clear EICR */ + regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS); + regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); + regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); + regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); + regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC); + regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM); + regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0)); + regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); + regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); + regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); + regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); + regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); + + /* Flow Control */ + regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); + regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0)); + regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); + regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); + regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); + for (i = 0; i < 8; i++) { + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); + regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); + regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); + break; + default: + break; + } + } + regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); + regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); + + /* Receive DMA */ + for (i = 0; i < 64; i++) + regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); + for (i = 0; i < 64; i++) + regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); + for (i = 0; i < 64; i++) + regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); + for (i = 0; i < 64; i++) + regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); + for (i = 0; i < 64; i++) + regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); + for (i = 0; i < 64; i++) + regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + for (i = 0; i < 16; i++) + regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); + for (i = 0; i < 16; i++) + regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + for (i = 0; i < 8; i++) + regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN); + + /* Receive */ + regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL); + for (i = 0; i < 16; i++) + regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); + regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); + regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); + regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); + regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC); + regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + for (i = 0; i < 8; i++) + regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i)); + for (i = 0; i < 8; i++) + regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i)); + regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP); + + /* Transmit */ + for (i = 0; i < 32; i++) + regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); + for (i = 0; i < 32; i++) + regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); + for (i = 0; i < 32; i++) + regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); + for (i = 0; i < 32; i++) + regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); + for (i = 0; i < 32; i++) + regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); + for (i = 0; i < 32; i++) + regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); + for (i = 0; i < 32; i++) + regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i)); + for (i = 0; i < 32; i++) + regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i)); + regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL); + for (i = 0; i < 16; i++) + regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); + regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG); + for (i = 0; i < 8; i++) + regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i)); + regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP); + + /* Wake Up */ + regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC); + regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC); + regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS); + regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV); + regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT); + regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT); + regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL); + regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); + regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); + + /* DCB */ + regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */ + regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */ + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); + regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); + for (i = 0; i < 8; i++) + regs_buff[833 + i] = + IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); + for (i = 0; i < 8; i++) + regs_buff[841 + i] = + IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); + for (i = 0; i < 8; i++) + regs_buff[849 + i] = + IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); + for (i = 0; i < 8; i++) + regs_buff[857 + i] = + IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS); + for (i = 0; i < 8; i++) + regs_buff[833 + i] = + IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i)); + for (i = 0; i < 8; i++) + regs_buff[841 + i] = + IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i)); + for (i = 0; i < 8; i++) + regs_buff[849 + i] = + IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i)); + for (i = 0; i < 8; i++) + regs_buff[857 + i] = + IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i)); + break; + default: + break; + } + + for (i = 0; i < 8; i++) + regs_buff[865 + i] = + IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */ + for (i = 0; i < 8; i++) + regs_buff[873 + i] = + IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */ + + /* Statistics */ + regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); + regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc); + regs_buff[883] = IXGBE_GET_STAT(adapter, errbc); + regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc); + for (i = 0; i < 8; i++) + regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]); + regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc); + regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc); + regs_buff[895] = IXGBE_GET_STAT(adapter, rlec); + regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc); + regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc); + regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc); + regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc); + for (i = 0; i < 8; i++) + regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]); + for (i = 0; i < 8; i++) + regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]); + for (i = 0; i < 8; i++) + regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]); + for (i = 0; i < 8; i++) + regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]); + regs_buff[932] = IXGBE_GET_STAT(adapter, prc64); + regs_buff[933] = IXGBE_GET_STAT(adapter, prc127); + regs_buff[934] = IXGBE_GET_STAT(adapter, prc255); + regs_buff[935] = IXGBE_GET_STAT(adapter, prc511); + regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023); + regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522); + regs_buff[938] = IXGBE_GET_STAT(adapter, gprc); + regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); + regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); + regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); + regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); + regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); + for (i = 0; i < 8; i++) + regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); + regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); + regs_buff[955] = IXGBE_GET_STAT(adapter, rfc); + regs_buff[956] = IXGBE_GET_STAT(adapter, roc); + regs_buff[957] = IXGBE_GET_STAT(adapter, rjc); + regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); + regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); + regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); + regs_buff[961] = IXGBE_GET_STAT(adapter, tor); + regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); + regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); + regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); + regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127); + regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255); + regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511); + regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023); + regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522); + regs_buff[971] = IXGBE_GET_STAT(adapter, mptc); + regs_buff[972] = IXGBE_GET_STAT(adapter, bptc); + regs_buff[973] = IXGBE_GET_STAT(adapter, xec); + for (i = 0; i < 16; i++) + regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]); + for (i = 0; i < 16; i++) + regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]); + for (i = 0; i < 16; i++) + regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]); + for (i = 0; i < 16; i++) + regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); + + /* MAC */ + regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG); + regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); + regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); + regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0); + regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1); + regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); + regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP); + regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP); + regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0); + regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1); + regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP); + regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA); + regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE); + regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD); + regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS); + regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA); + regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD); + regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD); + regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD); + regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG); + regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1); + regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2); + regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS); + regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC); + regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS); + regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC); + regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS); + regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3); + regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1); + regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2); + regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); + + /* Diagnostic */ + regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); + for (i = 0; i < 8; i++) + regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); + regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); + for (i = 0; i < 4; i++) + regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i)); + regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); + regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); + for (i = 0; i < 8; i++) + regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); + regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); + for (i = 0; i < 4; i++) + regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); + regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); + regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); + regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); + regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1); + regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2); + regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3); + regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); + regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0); + regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1); + regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); + regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); + for (i = 0; i < 8; i++) + regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); + regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); + regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); + regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); + regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1); + regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2); + regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS); + regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL); + regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC); + regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC); + + /* 82599 X540 specific registers */ + regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN); + + /* 82599 X540 specific DCB registers */ + regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); + regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC); + for (i = 0; i < 4; i++) + regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i)); + regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM); + /* same as RTTQCNRM */ + regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD); + /* same as RTTQCNRR */ + + /* X540 specific DCB registers */ + regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR); + regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG); +} + +static int ixgbe_get_eeprom_len(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + return adapter->hw.eeprom.word_size * 2; +} + +static int ixgbe_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word, eeprom_len; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_len = last_word - first_word + 1; + + eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, + eeprom_buff); + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < eeprom_len; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int ixgbe_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EINVAL; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = eeprom_buff; + + if (eeprom->offset & 1) { + /* + * need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]); + if (ret_val) + goto err; + + ptr++; + } + if ((eeprom->offset + eeprom->len) & 1) { + /* + * need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = hw->eeprom.ops.read(hw, last_word, + &eeprom_buff[last_word - first_word]); + if (ret_val) + goto err; + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = hw->eeprom.ops.write_buffer(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + + /* Update the checksum */ + if (ret_val == 0) + hw->eeprom.ops.update_checksum(hw); + +err: + kfree(eeprom_buff); + return ret_val; +} + +static void ixgbe_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u32 nvm_track_id; + + strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, ixgbe_driver_version, + sizeof(drvinfo->version)); + + nvm_track_id = (adapter->eeprom_verh << 16) | + adapter->eeprom_verl; + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", + nvm_track_id); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = IXGBE_STATS_LEN; + drvinfo->testinfo_len = IXGBE_TEST_LEN; + drvinfo->regdump_len = ixgbe_get_regs_len(netdev); +} + +static void ixgbe_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; + struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; + + ring->rx_max_pending = IXGBE_MAX_RXD; + ring->tx_max_pending = IXGBE_MAX_TXD; + ring->rx_pending = rx_ring->count; + ring->tx_pending = tx_ring->count; +} + +static int ixgbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_ring *temp_ring; + int i, err = 0; + u32 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_tx_count = clamp_t(u32, ring->tx_pending, + IXGBE_MIN_TXD, IXGBE_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, + IXGBE_MIN_RXD, IXGBE_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + temp_ring = vmalloc(i * sizeof(struct ixgbe_ring)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + ixgbe_down(adapter); + + /* + * Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct ixgbe_ring)); + + temp_ring[i].count = new_tx_count; + err = ixgbe_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + ixgbe_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + ixgbe_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct ixgbe_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + /* Repeat the process for the Rx rings if needed */ + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct ixgbe_ring)); + + temp_ring[i].count = new_rx_count; + err = ixgbe_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + ixgbe_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + ixgbe_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct ixgbe_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } + +err_setup: + ixgbe_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__IXGBE_RESETTING, &adapter->state); + return err; +} + +static int ixgbe_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return IXGBE_TEST_LEN; + case ETH_SS_STATS: + return IXGBE_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void ixgbe_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *net_stats; + unsigned int start; + struct ixgbe_ring *ring; + int i, j; + char *p = NULL; + + ixgbe_update_stats(adapter); + net_stats = dev_get_stats(netdev, &temp); + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { + switch (ixgbe_gstrings_stats[i].type) { + case NETDEV_STATS: + p = (char *) net_stats + + ixgbe_gstrings_stats[i].stat_offset; + break; + case IXGBE_STATS: + p = (char *) adapter + + ixgbe_gstrings_stats[i].stat_offset; + break; + default: + data[i] = 0; + continue; + } + + data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < netdev->num_tx_queues; j++) { + ring = adapter->tx_ring[j]; + if (!ring) { + data[i] = 0; + data[i+1] = 0; + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = 0; + data[i+1] = 0; + data[i+2] = 0; + i += 3; +#endif + continue; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i+2] = ring->stats.cleaned; + i += 3; +#endif + } + for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { + ring = adapter->rx_ring[j]; + if (!ring) { + data[i] = 0; + data[i+1] = 0; + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = 0; + data[i+1] = 0; + data[i+2] = 0; + i += 3; +#endif + continue; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i+1] = ring->stats.misses; + data[i+2] = ring->stats.cleaned; + i += 3; +#endif + } + + for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxontxc[j]; + data[i++] = adapter->stats.pxofftxc[j]; + } + for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxonrxc[j]; + data[i++] = adapter->stats.pxoffrxc[j]; + } +} + +static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + char *p = (char *)data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + for (i = 0; i < IXGBE_TEST_LEN; i++) { + memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + case ETH_SS_STATS: + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { + memcpy(p, ixgbe_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < netdev->num_tx_queues; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + sprintf(p, "tx_queue_%u_bp_napi_yield", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } + for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + sprintf(p, "rx_queue_%u_bp_poll_yield", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } + for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { + sprintf(p, "tx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { + sprintf(p, "rx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + +static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) +{ + struct ixgbe_hw *hw = &adapter->hw; + bool link_up; + u32 link_speed = 0; + + if (ixgbe_removed(hw->hw_addr)) { + *data = 1; + return 1; + } + *data = 0; + + hw->mac.ops.check_link(hw, &link_speed, &link_up, true); + if (link_up) + return *data; + else + *data = 1; + return *data; +} + +/* ethtool register test data */ +struct ixgbe_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* default 82599 register test */ +static const struct ixgbe_reg_test reg_test_82599[] = { + { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, + { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, + { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, + { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, + { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, + { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, + { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { .reg = 0 } +}; + +/* default 82598 register test */ +static const struct ixgbe_reg_test reg_test_82598[] = { + { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, + { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + /* Enable all four RX queues before testing. */ + { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, + /* RDH is read-only for 82598, only test RDT. */ + { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, + { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF }, + { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 }, + { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 }, + { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, + { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { .reg = 0 } +}; + +static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + + if (ixgbe_removed(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = ixgbe_read_reg(&adapter->hw, reg); + ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write); + val = ixgbe_read_reg(&adapter->hw, reg); + if (val != (test_pattern[pat] & write & mask)) { + e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg, val, (test_pattern[pat] & write & mask)); + *data = reg; + ixgbe_write_reg(&adapter->hw, reg, before); + return true; + } + ixgbe_write_reg(&adapter->hw, reg, before); + } + return false; +} + +static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 val, before; + + if (ixgbe_removed(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + before = ixgbe_read_reg(&adapter->hw, reg); + ixgbe_write_reg(&adapter->hw, reg, write & mask); + val = ixgbe_read_reg(&adapter->hw, reg); + if ((write & mask) != (val & mask)) { + e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + ixgbe_write_reg(&adapter->hw, reg, before); + return true; + } + ixgbe_write_reg(&adapter->hw, reg, before); + return false; +} + +static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) +{ + const struct ixgbe_reg_test *test; + u32 value, before, after; + u32 i, toggle; + + if (ixgbe_removed(adapter->hw.hw_addr)) { + e_err(drv, "Adapter removed - register test blocked\n"); + *data = 1; + return 1; + } + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + toggle = 0x7FFFF3FF; + test = reg_test_82598; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + toggle = 0x7FFFF30F; + test = reg_test_82599; + break; + default: + *data = 1; + return 1; + } + + /* + * Because the status register is such a special case, + * we handle it separately from the rest of the register + * tests. Some bits are read-only, some toggle, and some + * are writeable on newer MACs. + */ + before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS); + value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle); + ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle); + after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle; + if (value != after) { + e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n", + after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before); + + /* + * Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + bool b = false; + + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + ixgbe_write_reg(&adapter->hw, + test->reg + (i * 0x40), + test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + (test->reg + 4) + (i * 8), + test->mask, + test->write); + break; + } + if (b) + return 1; + } + test++; + } + + *data = 0; + return 0; +} + +static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) +{ + struct ixgbe_hw *hw = &adapter->hw; + if (hw->eeprom.ops.validate_checksum(hw, NULL)) + *data = 1; + else + *data = 0; + return *data; +} + +static irqreturn_t ixgbe_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *) data; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); + + return IRQ_HANDLED; +} + +static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u32 mask, i = 0, shared_int = true; + u32 irq = adapter->pdev->irq; + + *data = 0; + + /* Hook up test interrupt handler just for this test */ + if (adapter->msix_entries) { + /* NOTE: we don't test MSI-X interrupts here, yet */ + return 0; + } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { + shared_int = false; + if (request_irq(irq, ixgbe_test_intr, 0, netdev->name, + netdev)) { + *data = 1; + return -1; + } + } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, + netdev->name, netdev)) { + shared_int = false; + } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", shared_int ? + "shared" : "unshared"); + + /* Disable all the interrupts */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); + IXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Test each interrupt */ + for (; i < 10; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (!shared_int) { + /* + * Disable the interrupts to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, + ~mask & 0x00007FFF); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, + ~mask & 0x00007FFF); + IXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* + * Enable the interrupt to be reported in the cause + * register and then force the same interrupt and see + * if one gets posted. If an interrupt was not posted + * to the bus, the test failed. + */ + adapter->test_icr = 0; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); + IXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* + * Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, + ~mask & 0x00007FFF); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, + ~mask & 0x00007FFF); + IXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); + IXGBE_WRITE_FLUSH(&adapter->hw); + usleep_range(10000, 20000); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; + struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; + struct ixgbe_hw *hw = &adapter->hw; + u32 reg_ctl; + + /* shut down the DMA engines now so they can be reinitialized later */ + + /* first Rx */ + hw->mac.ops.disable_rx(hw); + ixgbe_disable_rx_queue(adapter, rx_ring); + + /* now Tx */ + reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); + reg_ctl &= ~IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg_ctl &= ~IXGBE_DMATXCTL_TE; + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); + break; + default: + break; + } + + ixgbe_reset(adapter); + + ixgbe_free_tx_resources(&adapter->test_tx_ring); + ixgbe_free_rx_resources(&adapter->test_rx_ring); +} + +static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; + struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; + struct ixgbe_hw *hw = &adapter->hw; + u32 rctl, reg_data; + int ret_val; + int err; + + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = IXGBE_DEFAULT_TXD; + tx_ring->queue_index = 0; + tx_ring->dev = &adapter->pdev->dev; + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; + + err = ixgbe_setup_tx_resources(tx_ring); + if (err) + return 1; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); + reg_data |= IXGBE_DMATXCTL_TE; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); + break; + default: + break; + } + + ixgbe_configure_tx_ring(adapter, tx_ring); + + /* Setup Rx Descriptor ring and Rx buffers */ + rx_ring->count = IXGBE_DEFAULT_RXD; + rx_ring->queue_index = 0; + rx_ring->dev = &adapter->pdev->dev; + rx_ring->netdev = adapter->netdev; + rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; + + err = ixgbe_setup_rx_resources(rx_ring); + if (err) { + ret_val = 4; + goto err_nomem; + } + + hw->mac.ops.disable_rx(hw); + + ixgbe_configure_rx_ring(adapter, rx_ring); + + rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); + rctl |= IXGBE_RXCTRL_DMBYPS; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); + + hw->mac.ops.enable_rx(hw); + + return 0; + +err_nomem: + ixgbe_free_desc_rings(adapter); + return ret_val; +} + +static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 reg_data; + + + /* Setup MAC loopback */ + reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0); + reg_data |= IXGBE_HLREG0_LPBK; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data); + + reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL); + reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data); + + /* X540 and X550 needs to set the MACC.FLU bit to force link up */ + switch (adapter->hw.mac.type) { + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); + reg_data |= IXGBE_MACC_FLU; + IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); + break; + default: + if (hw->mac.orig_autoc) { + reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data); + } else { + return 10; + } + } + IXGBE_WRITE_FLUSH(hw); + usleep_range(10000, 20000); + + /* Disable Atlas Tx lanes; re-enabled in reset path */ + if (hw->mac.type == ixgbe_mac_82598EB) { + u8 atlas; + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas); + atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas); + atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas); + atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas); + atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas); + } + + return 0; +} + +static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter) +{ + u32 reg_data; + + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); + reg_data &= ~IXGBE_HLREG0_LPBK; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); +} + +static void ixgbe_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size >>= 1; + memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size + 10], 0xBE, 1); + memset(&skb->data[frame_size + 12], 0xAF, 1); +} + +static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer, + unsigned int frame_size) +{ + unsigned char *data; + bool match = true; + + frame_size >>= 1; + + data = kmap(rx_buffer->page) + rx_buffer->page_offset; + + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + + kunmap(rx_buffer->page); + + return match; +} + +static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, + struct ixgbe_ring *tx_ring, + unsigned int size) +{ + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *rx_buffer; + struct ixgbe_tx_buffer *tx_buffer; + u16 rx_ntc, tx_ntc, count = 0; + + /* initialize next to clean and descriptor values */ + rx_ntc = rx_ring->next_to_clean; + tx_ntc = tx_ring->next_to_clean; + rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); + + while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) { + /* check Rx buffer */ + rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; + + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer->dma, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* verify contents of skb */ + if (ixgbe_check_lbtest_frame(rx_buffer, size)) + count++; + + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, + rx_buffer->dma, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* unmap buffer on Tx side */ + tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); + + /* increment Rx/Tx next to clean counters */ + rx_ntc++; + if (rx_ntc == rx_ring->count) + rx_ntc = 0; + tx_ntc++; + if (tx_ntc == tx_ring->count) + tx_ntc = 0; + + /* fetch next descriptor */ + rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); + } + + netdev_tx_reset_queue(txring_txq(tx_ring)); + + /* re-map buffers to ring, store next to clean values */ + ixgbe_alloc_rx_buffers(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; + tx_ring->next_to_clean = tx_ntc; + + return count; +} + +static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; + struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; + int i, j, lc, good_cnt, ret_val = 0; + unsigned int size = 1024; + netdev_tx_t tx_ret_val; + struct sk_buff *skb; + u32 flags_orig = adapter->flags; + + /* DCB can modify the frames on Tx */ + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; + + /* allocate test skb */ + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return 11; + + /* place data into test skb */ + ixgbe_create_lbtest_frame(skb, size); + skb_put(skb, size); + + /* + * Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + for (j = 0; j <= lc; j++) { + /* reset count of good packets */ + good_cnt = 0; + + /* place 64 packets on the transmit queue*/ + for (i = 0; i < 64; i++) { + skb_get(skb); + tx_ret_val = ixgbe_xmit_frame_ring(skb, + adapter, + tx_ring); + if (tx_ret_val == NETDEV_TX_OK) + good_cnt++; + } + + if (good_cnt != 64) { + ret_val = 12; + break; + } + + /* allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size); + if (good_cnt != 64) { + ret_val = 13; + break; + } + } + + /* free the original skb */ + kfree_skb(skb); + adapter->flags = flags_orig; + + return ret_val; +} + +static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data) +{ + *data = ixgbe_setup_desc_rings(adapter); + if (*data) + goto out; + *data = ixgbe_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = ixgbe_run_loopback_test(adapter); + ixgbe_loopback_cleanup(adapter); + +err_loopback: + ixgbe_free_desc_rings(adapter); +out: + return *data; +} + +static void ixgbe_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + + if (ixgbe_removed(adapter->hw.hw_addr)) { + e_err(hw, "Adapter removed - test blocked\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + set_bit(__IXGBE_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + struct ixgbe_hw *hw = &adapter->hw; + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + int i; + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) { + netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + data[4] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__IXGBE_TESTING, + &adapter->state); + goto skip_ol_tests; + } + } + } + + /* Offline tests */ + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (ixgbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + ixgbe_reset(adapter); + + e_info(hw, "register testing starting\n"); + if (ixgbe_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + ixgbe_reset(adapter); + e_info(hw, "eeprom testing starting\n"); + if (ixgbe_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + ixgbe_reset(adapter); + e_info(hw, "interrupt testing starting\n"); + if (ixgbe_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* If SRIOV or VMDq is enabled then skip MAC + * loopback diagnostic. */ + if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | + IXGBE_FLAG_VMDQ_ENABLED)) { + e_info(hw, "Skip MAC loopback diagnostic in VT mode\n"); + data[3] = 0; + goto skip_loopback; + } + + ixgbe_reset(adapter); + e_info(hw, "loopback testing starting\n"); + if (ixgbe_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + +skip_loopback: + ixgbe_reset(adapter); + + /* clear testing bit and return adapter to previous state */ + clear_bit(__IXGBE_TESTING, &adapter->state); + if (if_running) + dev_open(netdev); + else if (hw->mac.ops.disable_tx_laser) + hw->mac.ops.disable_tx_laser(hw); + } else { + e_info(hw, "online testing starting\n"); + + /* Online tests */ + if (ixgbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Offline tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__IXGBE_TESTING, &adapter->state); + } + +skip_ol_tests: + msleep_interruptible(4 * 1000); +} + +static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + struct ixgbe_hw *hw = &adapter->hw; + int retval = 0; + + /* WOL not supported for all devices */ + if (!ixgbe_wol_supported(adapter, hw->device_id, + hw->subsystem_device_id)) { + retval = 1; + wol->supported = 0; + } + + return retval; +} + +static void ixgbe_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + if (ixgbe_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + if (adapter->wol & IXGBE_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & IXGBE_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & IXGBE_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & IXGBE_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (ixgbe_wol_exclusion(adapter, wol)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= IXGBE_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= IXGBE_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= IXGBE_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= IXGBE_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int ixgbe_nway_reset(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + + return 0; +} + +static int ixgbe_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + return 2; + + case ETHTOOL_ID_ON: + hw->mac.ops.led_on(hw, IXGBE_LED_ON); + break; + + case ETHTOOL_ID_OFF: + hw->mac.ops.led_off(hw, IXGBE_LED_ON); + break; + + case ETHTOOL_ID_INACTIVE: + /* Restore LED settings */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg); + break; + } + + return 0; +} + +static int ixgbe_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* only valid if in constant ITR mode */ + if (adapter->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (adapter->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + + return 0; +} + +/* + * this function must be called before setting the new value of + * rx_itr_setting + */ +static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + /* nothing to do if LRO or RSC are not enabled */ + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) || + !(netdev->features & NETIF_F_LRO)) + return false; + + /* check the feature flag value and enable RSC if necessary */ + if (adapter->rx_itr_setting == 1 || + adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; + e_info(probe, "rx-usecs value high enough to re-enable RSC\n"); + return true; + } + /* if interrupt rate is too high then disable RSC */ + } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { + adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; + e_info(probe, "rx-usecs set too low, disabling RSC\n"); + return true; + } + return false; +} + +static int ixgbe_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_q_vector *q_vector; + int i; + u16 tx_itr_param, rx_itr_param, tx_itr_prev; + bool need_reset = false; + + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { + /* reject Tx specific changes in case of mixed RxTx vectors */ + if (ec->tx_coalesce_usecs) + return -EINVAL; + tx_itr_prev = adapter->rx_itr_setting; + } else { + tx_itr_prev = adapter->tx_itr_setting; + } + + if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || + (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) + return -EINVAL; + + if (ec->rx_coalesce_usecs > 1) + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + + if (adapter->rx_itr_setting == 1) + rx_itr_param = IXGBE_20K_ITR; + else + rx_itr_param = adapter->rx_itr_setting; + + if (ec->tx_coalesce_usecs > 1) + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + + if (adapter->tx_itr_setting == 1) + tx_itr_param = IXGBE_10K_ITR; + else + tx_itr_param = adapter->tx_itr_setting; + + /* mixed Rx/Tx */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + adapter->tx_itr_setting = adapter->rx_itr_setting; + + /* detect ITR changes that require update of TXDCTL.WTHRESH */ + if ((adapter->tx_itr_setting != 1) && + (adapter->tx_itr_setting < IXGBE_100K_ITR)) { + if ((tx_itr_prev == 1) || + (tx_itr_prev >= IXGBE_100K_ITR)) + need_reset = true; + } else { + if ((tx_itr_prev != 1) && + (tx_itr_prev < IXGBE_100K_ITR)) + need_reset = true; + } + + /* check the old value and enable RSC if necessary */ + need_reset |= ixgbe_update_rsc(adapter); + + for (i = 0; i < adapter->num_q_vectors; i++) { + q_vector = adapter->q_vector[i]; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + ixgbe_write_eitr(q_vector); + } + + /* + * do reset here at the end to make sure EITR==0 case is handled + * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings + * also locks in RSC enable/disable which requires reset + */ + if (need_reset) + ixgbe_do_reset(netdev); + + return 0; +} + +static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + union ixgbe_atr_input *mask = &adapter->fdir_mask; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct hlist_node *node2; + struct ixgbe_fdir_filter *rule = NULL; + + /* report total rule count */ + cmd->data = (1024 << adapter->fdir_pballoc) - 2; + + hlist_for_each_entry_safe(rule, node2, + &adapter->fdir_filter_list, fdir_node) { + if (fsp->location <= rule->sw_idx) + break; + } + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + + /* fill out the flow spec entry */ + + /* set flow type field */ + switch (rule->filter.formatted.flow_type) { + case IXGBE_ATR_FLOW_TYPE_TCPV4: + fsp->flow_type = TCP_V4_FLOW; + break; + case IXGBE_ATR_FLOW_TYPE_UDPV4: + fsp->flow_type = UDP_V4_FLOW; + break; + case IXGBE_ATR_FLOW_TYPE_SCTPV4: + fsp->flow_type = SCTP_V4_FLOW; + break; + case IXGBE_ATR_FLOW_TYPE_IPV4: + fsp->flow_type = IP_USER_FLOW; + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + fsp->h_u.usr_ip4_spec.proto = 0; + fsp->m_u.usr_ip4_spec.proto = 0; + break; + default: + return -EINVAL; + } + + fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; + fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; + fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; + fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id; + fsp->m_ext.vlan_tci = mask->formatted.vlan_id; + fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; + fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; + fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); + fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); + fsp->flow_type |= FLOW_EXT; + + /* record action */ + if (rule->action == IXGBE_FDIR_DROP_QUEUE) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else + fsp->ring_cookie = rule->action; + + return 0; +} + +static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct hlist_node *node2; + struct ixgbe_fdir_filter *rule; + int cnt = 0; + + /* report total rule count */ + cmd->data = (1024 << adapter->fdir_pballoc) - 2; + + hlist_for_each_entry_safe(rule, node2, + &adapter->fdir_filter_list, fdir_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on ixgbe */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fallthrough */ + case UDP_V4_FLOW: + if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fallthrough */ + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fallthrough */ + case UDP_V6_FLOW: + if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fallthrough */ + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->fdir_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs); + break; + case ETHTOOL_GRXFH: + ret = ixgbe_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ixgbe_fdir_filter *input, + u16 sw_idx) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct hlist_node *node2; + struct ixgbe_fdir_filter *rule, *parent; + int err = -EINVAL; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry_safe(rule, node2, + &adapter->fdir_filter_list, fdir_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + parent = rule; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->sw_idx == sw_idx)) { + if (!input || (rule->filter.formatted.bkt_hash != + input->filter.formatted.bkt_hash)) { + err = ixgbe_fdir_erase_perfect_filter_82599(hw, + &rule->filter, + sw_idx); + } + + hlist_del(&rule->fdir_node); + kfree(rule); + adapter->fdir_filter_count--; + } + + /* + * If no input this was a delete, err should be 0 if a rule was + * successfully found and removed from the list else -EINVAL + */ + if (!input) + return err; + + /* initialize node and set software index */ + INIT_HLIST_NODE(&input->fdir_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&input->fdir_node, &parent->fdir_node); + else + hlist_add_head(&input->fdir_node, + &adapter->fdir_filter_list); + + /* update counts */ + adapter->fdir_filter_count++; + + return 0; +} + +static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, + u8 *flow_type) +{ + switch (fsp->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case UDP_V4_FLOW: + *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case SCTP_V4_FLOW: + *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case IP_USER_FLOW: + switch (fsp->h_u.usr_ip4_spec.proto) { + case IPPROTO_TCP: + *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case IPPROTO_UDP: + *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case IPPROTO_SCTP: + *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case 0: + if (!fsp->m_u.usr_ip4_spec.proto) { + *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; + break; + } + default: + return 0; + } + break; + default: + return 0; + } + + return 1; +} + +static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_fdir_filter *input; + union ixgbe_atr_input mask; + int err; + + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + return -EOPNOTSUPP; + + /* + * Don't allow programming if the action is a queue greater than + * the number of online Rx queues. + */ + if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && + (fsp->ring_cookie >= adapter->num_rx_queues)) + return -EINVAL; + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { + e_err(drv, "Location out of range\n"); + return -EINVAL; + } + + input = kzalloc(sizeof(*input), GFP_ATOMIC); + if (!input) + return -ENOMEM; + + memset(&mask, 0, sizeof(union ixgbe_atr_input)); + + /* set SW index */ + input->sw_idx = fsp->location; + + /* record flow type */ + if (!ixgbe_flowspec_to_flow_type(fsp, + &input->filter.formatted.flow_type)) { + e_err(drv, "Unrecognized flow type\n"); + goto err_out; + } + + mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | + IXGBE_ATR_L4TYPE_MASK; + + if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) + mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; + + /* Copy input into formatted structures */ + input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; + mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; + input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; + input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; + mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; + input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + + if (fsp->flow_type & FLOW_EXT) { + input->filter.formatted.vm_pool = + (unsigned char)ntohl(fsp->h_ext.data[1]); + mask.formatted.vm_pool = + (unsigned char)ntohl(fsp->m_ext.data[1]); + input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci; + mask.formatted.vlan_id = fsp->m_ext.vlan_tci; + input->filter.formatted.flex_bytes = + fsp->h_ext.vlan_etype; + mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; + } + + /* determine if we need to drop or route the packet */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) + input->action = IXGBE_FDIR_DROP_QUEUE; + else + input->action = fsp->ring_cookie; + + spin_lock(&adapter->fdir_perfect_lock); + + if (hlist_empty(&adapter->fdir_filter_list)) { + /* save mask and program input mask into HW */ + memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); + err = ixgbe_fdir_set_input_mask_82599(hw, &mask); + if (err) { + e_err(drv, "Error writing mask\n"); + goto err_out_w_lock; + } + } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { + e_err(drv, "Only one mask supported per port\n"); + goto err_out_w_lock; + } + + /* apply mask and compute/store hash */ + ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); + + /* program filters to filter memory */ + err = ixgbe_fdir_write_perfect_filter_82599(hw, + &input->filter, input->sw_idx, + (input->action == IXGBE_FDIR_DROP_QUEUE) ? + IXGBE_FDIR_DROP_QUEUE : + adapter->rx_ring[input->action]->reg_idx); + if (err) + goto err_out_w_lock; + + ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +err_out_w_lock: + spin_unlock(&adapter->fdir_perfect_lock); +err_out: + kfree(input); + return -EINVAL; +} + +static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err; + + spin_lock(&adapter->fdir_perfect_lock); + err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location); + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +} + +#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ + IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) +static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags2 = adapter->flags2; + + /* + * RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags2 != adapter->flags2) { + struct ixgbe_hw *hw = &adapter->hw; + u32 mrqc; + unsigned int pf_pool = adapter->num_vfs; + + if ((hw->mac.type >= ixgbe_mac_X550) && + (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool)); + else + mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + + if ((flags2 & UDP_RSS_FLAGS) && + !(adapter->flags2 & UDP_RSS_FLAGS)) + e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); + + adapter->flags2 = flags2; + + /* Perform hash on these packet types */ + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 + | IXGBE_MRQC_RSS_FIELD_IPV4_TCP + | IXGBE_MRQC_RSS_FIELD_IPV6 + | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP | + IXGBE_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; + + if ((hw->mac.type >= ixgbe_mac_X550) && + (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc); + else + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + } + + return 0; +} + +static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_SRXFH: + ret = ixgbe_set_rss_hash_opt(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + return sizeof(adapter->rss_key); +} + +static u32 ixgbe_rss_indir_size(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + return ixgbe_rss_indir_tbl_entries(adapter); +} + +static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir) +{ + int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter); + + for (i = 0; i < reta_size; i++) + indir[i] = adapter->rss_indir_tbl[i]; +} + +static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (indir) + ixgbe_get_reta(adapter, indir); + + if (key) + memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev)); + + return 0; +} + +static int ixgbe_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X540: + case ixgbe_mac_82599EB: + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + break; + default: + return ethtool_op_get_ts_info(dev, info); + } + return 0; +} + +static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter) +{ + unsigned int max_combined; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { + /* We only support one q_vector without MSI-X */ + max_combined = 1; + } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + /* SR-IOV currently only allows one queue on the PF */ + max_combined = 1; + } else if (tcs > 1) { + /* For DCB report channels per traffic class */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + /* 8 TC w/ 4 queues per TC */ + max_combined = 4; + } else if (tcs > 4) { + /* 8 TC w/ 8 queues per TC */ + max_combined = 8; + } else { + /* 4 TC w/ 16 queues per TC */ + max_combined = 16; + } + } else if (adapter->atr_sample_rate) { + /* support up to 64 queues with ATR */ + max_combined = IXGBE_MAX_FDIR_INDICES; + } else { + /* support up to 16 queues with RSS */ + max_combined = ixgbe_max_rss_indices(adapter); + } + + return max_combined; +} + +static void ixgbe_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + /* report maximum channels */ + ch->max_combined = ixgbe_max_channels(adapter); + + /* report info for other vector */ + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + /* record RSS queues */ + ch->combined_count = adapter->ring_feature[RING_F_RSS].indices; + + /* nothing else to report if RSS is disabled */ + if (ch->combined_count == 1) + return; + + /* we do not support ATR queueing if SR-IOV is enabled */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + return; + + /* same thing goes for being DCB enabled */ + if (netdev_get_num_tc(dev) > 1) + return; + + /* if ATR is disabled we can exit */ + if (!adapter->atr_sample_rate) + return; + + /* report flow director queues as maximum channels */ + ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices; +} + +static int ixgbe_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + unsigned int count = ch->combined_count; + u8 max_rss_indices = ixgbe_max_rss_indices(adapter); + + /* verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* verify other_count has not changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* verify the number of channels does not exceed hardware limits */ + if (count > ixgbe_max_channels(adapter)) + return -EINVAL; + + /* update feature limits from largest to smallest supported values */ + adapter->ring_feature[RING_F_FDIR].limit = count; + + /* cap RSS limit */ + if (count > max_rss_indices) + count = max_rss_indices; + adapter->ring_feature[RING_F_RSS].limit = count; + +#ifdef IXGBE_FCOE + /* cap FCoE limit at 8 */ + if (count > IXGBE_FCRETA_SIZE) + count = IXGBE_FCRETA_SIZE; + adapter->ring_feature[RING_F_FCOE].limit = count; + +#endif + /* use setup TC to update any traffic class queue mapping */ + return ixgbe_setup_tc(dev, netdev_get_num_tc(dev)); +} + +static int ixgbe_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + u32 status; + u8 sff8472_rev, addr_mode; + bool page_swap = false; + + /* Check whether we support SFF-8472 or not */ + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_SFF_8472_COMP, + &sff8472_rev); + if (status != 0) + return -EIO; + + /* addressing mode is not supported */ + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_SFF_8472_SWAP, + &addr_mode); + if (status != 0) + return -EIO; + + if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { + e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n"); + page_swap = true; + } + + if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { + /* We have a SFP, but it does not support SFF-8472 */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + /* We have a SFP which supports a revision of SFF-8472. */ + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int ixgbe_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + u32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u8 databyte = 0xFF; + int i = 0; + + if (ee->len == 0) + return -EINVAL; + + for (i = ee->offset; i < ee->offset + ee->len; i++) { + /* I2C reads can take long time */ + if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + return -EBUSY; + + if (i < ETH_MODULE_SFF_8079_LEN) + status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); + else + status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); + + if (status != 0) + return -EIO; + + data[i - ee->offset] = databyte; + } + + return 0; +} + +static const struct ethtool_ops ixgbe_ethtool_ops = { + .get_settings = ixgbe_get_settings, + .set_settings = ixgbe_set_settings, + .get_drvinfo = ixgbe_get_drvinfo, + .get_regs_len = ixgbe_get_regs_len, + .get_regs = ixgbe_get_regs, + .get_wol = ixgbe_get_wol, + .set_wol = ixgbe_set_wol, + .nway_reset = ixgbe_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = ixgbe_get_eeprom_len, + .get_eeprom = ixgbe_get_eeprom, + .set_eeprom = ixgbe_set_eeprom, + .get_ringparam = ixgbe_get_ringparam, + .set_ringparam = ixgbe_set_ringparam, + .get_pauseparam = ixgbe_get_pauseparam, + .set_pauseparam = ixgbe_set_pauseparam, + .get_msglevel = ixgbe_get_msglevel, + .set_msglevel = ixgbe_set_msglevel, + .self_test = ixgbe_diag_test, + .get_strings = ixgbe_get_strings, + .set_phys_id = ixgbe_set_phys_id, + .get_sset_count = ixgbe_get_sset_count, + .get_ethtool_stats = ixgbe_get_ethtool_stats, + .get_coalesce = ixgbe_get_coalesce, + .set_coalesce = ixgbe_set_coalesce, + .get_rxnfc = ixgbe_get_rxnfc, + .set_rxnfc = ixgbe_set_rxnfc, + .get_rxfh_indir_size = ixgbe_rss_indir_size, + .get_rxfh_key_size = ixgbe_get_rxfh_key_size, + .get_rxfh = ixgbe_get_rxfh, + .get_channels = ixgbe_get_channels, + .set_channels = ixgbe_set_channels, + .get_ts_info = ixgbe_get_ts_info, + .get_module_info = ixgbe_get_module_info, + .get_module_eeprom = ixgbe_get_module_eeprom, +}; + +void ixgbe_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ixgbe_ethtool_ops; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c new file mode 100644 index 000000000..631c603fc --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -0,0 +1,1075 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2014 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * ixgbe_fcoe_clear_ddp - clear the given ddp context + * @ddp: ptr to the ixgbe_fcoe_ddp + * + * Returns : none + * + */ +static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) +{ + ddp->len = 0; + ddp->err = 1; + ddp->udl = NULL; + ddp->udp = 0UL; + ddp->sgl = NULL; + ddp->sgc = 0; +} + +/** + * ixgbe_fcoe_ddp_put - free the ddp context for a given xid + * @netdev: the corresponding net_device + * @xid: the xid that corresponding ddp will be freed + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_done + * and it is expected to be called by ULD, i.e., FCP layer of libfc + * to release the corresponding ddp context when the I/O is done. + * + * Returns : data length already ddp-ed in bytes + */ +int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) +{ + int len; + struct ixgbe_fcoe *fcoe; + struct ixgbe_adapter *adapter; + struct ixgbe_fcoe_ddp *ddp; + struct ixgbe_hw *hw; + u32 fcbuff; + + if (!netdev) + return 0; + + if (xid >= IXGBE_FCOE_DDP_MAX) + return 0; + + adapter = netdev_priv(netdev); + fcoe = &adapter->fcoe; + ddp = &fcoe->ddp[xid]; + if (!ddp->udl) + return 0; + + hw = &adapter->hw; + len = ddp->len; + /* if no error then skip ddp context invalidation */ + if (!ddp->err) + goto skip_ddpinv; + + if (hw->mac.type == ixgbe_mac_X550) { + /* X550 does not require DDP FCoE lock */ + + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), + (xid | IXGBE_FCFLTRW_WE)); + + /* program FCBUFF */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0); + + /* program FCDMARW */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), + (xid | IXGBE_FCDMARW_WE)); + + /* read FCBUFF to check context invalidated */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), + (xid | IXGBE_FCDMARW_RE)); + fcbuff = IXGBE_READ_REG(hw, IXGBE_FCDDC(2, xid)); + } else { + /* other hardware requires DDP FCoE lock */ + spin_lock_bh(&fcoe->lock); + IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, + (xid | IXGBE_FCFLTRW_WE)); + IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, + (xid | IXGBE_FCDMARW_WE)); + + /* guaranteed to be invalidated after 100us */ + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, + (xid | IXGBE_FCDMARW_RE)); + fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF); + spin_unlock_bh(&fcoe->lock); + } + + if (fcbuff & IXGBE_FCBUFF_VALID) + usleep_range(100, 150); + +skip_ddpinv: + if (ddp->sgl) + dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, + DMA_FROM_DEVICE); + if (ddp->pool) { + dma_pool_free(ddp->pool, ddp->udl, ddp->udp); + ddp->pool = NULL; + } + + ixgbe_fcoe_clear_ddp(ddp); + + return len; +} + +/** + * ixgbe_fcoe_ddp_setup - called to set up ddp context + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * Returns : 1 for success and 0 for no ddp + */ +static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc, + int target_mode) +{ + struct ixgbe_adapter *adapter; + struct ixgbe_hw *hw; + struct ixgbe_fcoe *fcoe; + struct ixgbe_fcoe_ddp *ddp; + struct ixgbe_fcoe_ddp_pool *ddp_pool; + struct scatterlist *sg; + unsigned int i, j, dmacount; + unsigned int len; + static const unsigned int bufflen = IXGBE_FCBUFF_MIN; + unsigned int firstoff = 0; + unsigned int lastsize; + unsigned int thisoff = 0; + unsigned int thislen = 0; + u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; + dma_addr_t addr = 0; + + if (!netdev || !sgl) + return 0; + + adapter = netdev_priv(netdev); + if (xid >= IXGBE_FCOE_DDP_MAX) { + e_warn(drv, "xid=0x%x out-of-range\n", xid); + return 0; + } + + /* no DDP if we are already down or resetting */ + if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return 0; + + fcoe = &adapter->fcoe; + ddp = &fcoe->ddp[xid]; + if (ddp->sgl) { + e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", + xid, ddp->sgl, ddp->sgc); + return 0; + } + ixgbe_fcoe_clear_ddp(ddp); + + + if (!fcoe->ddp_pool) { + e_warn(drv, "No ddp_pool resources allocated\n"); + return 0; + } + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); + if (!ddp_pool->pool) { + e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); + goto out_noddp; + } + + /* setup dma from scsi command sgl */ + dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); + if (dmacount == 0) { + e_err(drv, "xid 0x%x DMA map error\n", xid); + goto out_noddp; + } + + /* alloc the udl from per cpu ddp pool */ + ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); + if (!ddp->udl) { + e_err(drv, "failed allocated ddp context\n"); + goto out_noddp_unmap; + } + ddp->pool = ddp_pool->pool; + ddp->sgl = sgl; + ddp->sgc = sgc; + + j = 0; + for_each_sg(sgl, sg, dmacount, i) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + while (len) { + /* max number of buffers allowed in one DDP context */ + if (j >= IXGBE_BUFFCNT_MAX) { + ddp_pool->noddp++; + goto out_noddp_free; + } + + /* get the offset of length of current buffer */ + thisoff = addr & ((dma_addr_t)bufflen - 1); + thislen = min((bufflen - thisoff), len); + /* + * all but the 1st buffer (j == 0) + * must be aligned on bufflen + */ + if ((j != 0) && (thisoff)) + goto out_noddp_free; + /* + * all but the last buffer + * ((i == (dmacount - 1)) && (thislen == len)) + * must end at bufflen + */ + if (((i != (dmacount - 1)) || (thislen != len)) + && ((thislen + thisoff) != bufflen)) + goto out_noddp_free; + + ddp->udl[j] = (u64)(addr - thisoff); + /* only the first buffer may have none-zero offset */ + if (j == 0) + firstoff = thisoff; + len -= thislen; + addr += thislen; + j++; + } + } + /* only the last buffer may have non-full bufflen */ + lastsize = thisoff + thislen; + + /* + * lastsize can not be buffer len. + * If it is then adding another buffer with lastsize = 1. + */ + if (lastsize == bufflen) { + if (j >= IXGBE_BUFFCNT_MAX) { + ddp_pool->noddp_ext_buff++; + goto out_noddp_free; + } + + ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); + j++; + lastsize = 1; + } + put_cpu(); + + fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); + fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); + fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); + /* Set WRCONTX bit to allow DDP for target */ + if (target_mode) + fcbuff |= (IXGBE_FCBUFF_WRCONTX); + fcbuff |= (IXGBE_FCBUFF_VALID); + + fcdmarw = xid; + fcdmarw |= IXGBE_FCDMARW_WE; + fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT); + + fcfltrw = xid; + fcfltrw |= IXGBE_FCFLTRW_WE; + + /* program DMA context */ + hw = &adapter->hw; + + /* turn on last frame indication for target mode as FCP_RSPtarget is + * supposed to send FCP_RSP when it is done. */ + if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) { + set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode); + fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL); + fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH; + IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); + } + + if (hw->mac.type == ixgbe_mac_X550) { + /* X550 does not require DDP lock */ + + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid), + ddp->udp & DMA_BIT_MASK(32)); + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(1, xid), (u64)ddp->udp >> 32); + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), fcbuff); + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), fcdmarw); + /* program filter context */ + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID); + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw); + } else { + /* DDP lock for indirect DDP context access */ + spin_lock_bh(&fcoe->lock); + + IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); + IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); + IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); + /* program filter context */ + IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); + IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); + + spin_unlock_bh(&fcoe->lock); + } + + return 1; + +out_noddp_free: + dma_pool_free(ddp->pool, ddp->udl, ddp->udp); + ixgbe_fcoe_clear_ddp(ddp); + +out_noddp_unmap: + dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); +out_noddp: + put_cpu(); + return 0; +} + +/** + * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup + * and is expected to be called from ULD, e.g., FCP layer of libfc + * to set up ddp for the corresponding xid of the given sglist for + * the corresponding I/O. + * + * Returns : 1 for success and 0 for no ddp + */ +int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); +} + +/** + * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_target + * and is expected to be called from ULD, e.g., FCP layer of libfc + * to set up ddp for the corresponding xid of the given sglist for + * the corresponding I/O. The DDP in target mode is a write I/O request + * from the initiator. + * + * Returns : 1 for success and 0 for no ddp + */ +int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); +} + +/** + * ixgbe_fcoe_ddp - check ddp status and mark it done + * @adapter: ixgbe adapter + * @rx_desc: advanced rx descriptor + * @skb: the skb holding the received data + * + * This checks ddp status. + * + * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates + * not passing the skb to ULD, > 0 indicates is the length of data + * being ddped. + */ +int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + int rc = -EINVAL; + struct ixgbe_fcoe *fcoe; + struct ixgbe_fcoe_ddp *ddp; + struct fc_frame_header *fh; + struct fcoe_crc_eof *crc; + __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR); + __le32 ddp_err; + int ddp_max; + u32 fctl; + u16 xid; + + if (fcerr == cpu_to_le32(IXGBE_FCERR_BADCRC)) + skb->ip_summed = CHECKSUM_NONE; + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) + fh = (struct fc_frame_header *)(skb->data + + sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr)); + else + fh = (struct fc_frame_header *)(skb->data + + sizeof(struct fcoe_hdr)); + + fctl = ntoh24(fh->fh_f_ctl); + if (fctl & FC_FC_EX_CTX) + xid = be16_to_cpu(fh->fh_ox_id); + else + xid = be16_to_cpu(fh->fh_rx_id); + + ddp_max = IXGBE_FCOE_DDP_MAX; + /* X550 has different DDP Max limit */ + if (adapter->hw.mac.type == ixgbe_mac_X550) + ddp_max = IXGBE_FCOE_DDP_MAX_X550; + if (xid >= ddp_max) + return -EINVAL; + + fcoe = &adapter->fcoe; + ddp = &fcoe->ddp[xid]; + if (!ddp->udl) + return -EINVAL; + + ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE | + IXGBE_RXDADV_ERR_FCERR); + if (ddp_err) + return -EINVAL; + + switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) { + /* return 0 to bypass going to ULD for DDPed data */ + case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP): + /* update length of DDPed data */ + ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); + rc = 0; + break; + /* unmap the sg list when FCPRSP is received */ + case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): + dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, + ddp->sgc, DMA_FROM_DEVICE); + ddp->err = ddp_err; + ddp->sgl = NULL; + ddp->sgc = 0; + /* fall through */ + /* if DDP length is present pass it through to ULD */ + case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP): + /* update length of DDPed data */ + ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); + if (ddp->len) + rc = ddp->len; + break; + /* no match will return as an error */ + case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH): + default: + break; + } + + /* In target mode, check the last data frame of the sequence. + * For DDP in target mode, data is already DDPed but the header + * indication of the last data frame ould allow is to tell if we + * got all the data and the ULP can send FCP_RSP back, as this is + * not a full fcoe frame, we fill the trailer here so it won't be + * dropped by the ULP stack. + */ + if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && + (fctl & FC_FC_END_SEQ)) { + skb_linearize(skb); + crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); + crc->fcoe_eof = FC_EOF_T; + } + + return rc; +} + +/** + * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) + * @tx_ring: tx desc ring + * @first: first tx_buffer structure containing skb, tx_flags, and protocol + * @hdr_len: hdr_len to be returned + * + * This sets up large send offload for FCoE + * + * Returns : 0 indicates success, < 0 for error + */ +int ixgbe_fso(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first, + u8 *hdr_len) +{ + struct sk_buff *skb = first->skb; + struct fc_frame_header *fh; + u32 vlan_macip_lens; + u32 fcoe_sof_eof = 0; + u32 mss_l4len_idx; + u8 sof, eof; + + if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { + dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", + skb_shinfo(skb)->gso_type); + return -EINVAL; + } + + /* resets the header to point fcoe/fc */ + skb_set_network_header(skb, skb->mac_len); + skb_set_transport_header(skb, skb->mac_len + + sizeof(struct fcoe_hdr)); + + /* sets up SOF and ORIS */ + sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; + switch (sof) { + case FC_SOF_I2: + fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS; + break; + case FC_SOF_I3: + fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF | + IXGBE_ADVTXD_FCOEF_ORIS; + break; + case FC_SOF_N2: + break; + case FC_SOF_N3: + fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF; + break; + default: + dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof); + return -EINVAL; + } + + /* the first byte of the last dword is EOF */ + skb_copy_bits(skb, skb->len - 4, &eof, 1); + /* sets up EOF and ORIE */ + switch (eof) { + case FC_EOF_N: + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; + break; + case FC_EOF_T: + /* lso needs ORIE */ + if (skb_is_gso(skb)) + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N | + IXGBE_ADVTXD_FCOEF_ORIE; + else + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; + break; + case FC_EOF_NI: + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; + break; + case FC_EOF_A: + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; + break; + default: + dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof); + return -EINVAL; + } + + /* sets up PARINC indicating data offset */ + fh = (struct fc_frame_header *)skb_transport_header(skb); + if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) + fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; + + /* include trailer in headlen as it is replicated per frame */ + *hdr_len = sizeof(struct fcoe_crc_eof); + + /* hdr_len includes fc_hdr if FCoE LSO is enabled */ + if (skb_is_gso(skb)) { + *hdr_len += skb_transport_offset(skb) + + sizeof(struct fc_frame_header); + /* update gso_segs and bytecount */ + first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, + skb_shinfo(skb)->gso_size); + first->bytecount += (first->gso_segs - 1) * *hdr_len; + first->tx_flags |= IXGBE_TX_FLAGS_TSO; + } + + /* set flag indicating FCOE to ixgbe_tx_map call */ + first->tx_flags |= IXGBE_TX_FLAGS_FCOE | IXGBE_TX_FLAGS_CC; + + /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */ + mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + vlan_macip_lens = skb_transport_offset(skb) + + sizeof(struct fc_frame_header); + vlan_macip_lens |= (skb_transport_offset(skb) - 4) + << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + + /* write context desc */ + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, + IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx); + + return 0; +} + +static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu) +{ + struct ixgbe_fcoe_ddp_pool *ddp_pool; + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + if (ddp_pool->pool) + dma_pool_destroy(ddp_pool->pool); + ddp_pool->pool = NULL; +} + +static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, + struct device *dev, + unsigned int cpu) +{ + struct ixgbe_fcoe_ddp_pool *ddp_pool; + struct dma_pool *pool; + char pool_name[32]; + + snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu); + + pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX, + IXGBE_FCPTR_ALIGN, PAGE_SIZE); + if (!pool) + return -ENOMEM; + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + ddp_pool->pool = pool; + ddp_pool->noddp = 0; + ddp_pool->noddp_ext_buff = 0; + + return 0; +} + +/** + * ixgbe_configure_fcoe - configures registers for fcoe at start + * @adapter: ptr to ixgbe adapter + * + * This sets up FCoE related registers + * + * Returns : none + */ +void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; + struct ixgbe_hw *hw = &adapter->hw; + int i, fcoe_q, fcoe_i, fcoe_q_h = 0; + int fcreta_size; + u32 etqf; + + /* Minimal functionality for FCoE requires at least CRC offloads */ + if (!(adapter->netdev->features & NETIF_F_FCOE_CRC)) + return; + + /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */ + etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN; + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + etqf |= IXGBE_ETQF_POOL_ENABLE; + etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; + } + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); + + /* leave registers un-configured if FCoE is disabled */ + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return; + + /* Use one or more Rx queues for FCoE by redirection table */ + fcreta_size = IXGBE_FCRETA_SIZE; + if (adapter->hw.mac.type == ixgbe_mac_X550) + fcreta_size = IXGBE_FCRETA_SIZE_X550; + + for (i = 0; i < fcreta_size; i++) { + if (adapter->hw.mac.type == ixgbe_mac_X550) { + int fcoe_i_h = fcoe->offset + ((i + fcreta_size) % + fcoe->indices); + fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx; + fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) & + IXGBE_FCRETA_ENTRY_HIGH_MASK; + } + + fcoe_i = fcoe->offset + (i % fcoe->indices); + fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; + fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; + fcoe_q |= fcoe_q_h; + IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); + } + IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); + + /* Enable L2 EtherType filter for FIP */ + etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN; + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + etqf |= IXGBE_ETQF_POOL_ENABLE; + etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; + } + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf); + + /* Send FIP frames to the first FCoE queue */ + fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx; + IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), + IXGBE_ETQS_QUEUE_EN | + (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); + + /* Configure FCoE Rx control */ + IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, + IXGBE_FCRXCTRL_FCCRCBO | + (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); +} + +/** + * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources + * @adapter : ixgbe adapter + * + * Cleans up outstanding ddp context resources + * + * Returns : none + */ +void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) +{ + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + int cpu, i, ddp_max; + + /* do nothing if no DDP pools were allocated */ + if (!fcoe->ddp_pool) + return; + + ddp_max = IXGBE_FCOE_DDP_MAX; + /* X550 has different DDP Max limit */ + if (adapter->hw.mac.type == ixgbe_mac_X550) + ddp_max = IXGBE_FCOE_DDP_MAX_X550; + + for (i = 0; i < ddp_max; i++) + ixgbe_fcoe_ddp_put(adapter->netdev, i); + + for_each_possible_cpu(cpu) + ixgbe_fcoe_dma_pool_free(fcoe, cpu); + + dma_unmap_single(&adapter->pdev->dev, + fcoe->extra_ddp_buffer_dma, + IXGBE_FCBUFF_MIN, + DMA_FROM_DEVICE); + kfree(fcoe->extra_ddp_buffer); + + fcoe->extra_ddp_buffer = NULL; + fcoe->extra_ddp_buffer_dma = 0; +} + +/** + * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources + * @adapter: ixgbe adapter + * + * Sets up ddp context resouces + * + * Returns : 0 indicates success or -EINVAL on failure + */ +int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) +{ + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + struct device *dev = &adapter->pdev->dev; + void *buffer; + dma_addr_t dma; + unsigned int cpu; + + /* do nothing if no DDP pools were allocated */ + if (!fcoe->ddp_pool) + return 0; + + /* Extra buffer to be shared by all DDPs for HW work around */ + buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); + if (!buffer) + return -ENOMEM; + + dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dma)) { + e_err(drv, "failed to map extra DDP buffer\n"); + kfree(buffer); + return -ENOMEM; + } + + fcoe->extra_ddp_buffer = buffer; + fcoe->extra_ddp_buffer_dma = dma; + + /* allocate pci pool for each cpu */ + for_each_possible_cpu(cpu) { + int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu); + if (!err) + continue; + + e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); + ixgbe_free_fcoe_ddp_resources(adapter); + return -ENOMEM; + } + + return 0; +} + +static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + + if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) + return -EINVAL; + + fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool); + + if (!fcoe->ddp_pool) { + e_err(drv, "failed to allocate percpu DDP resources\n"); + return -ENOMEM; + } + + adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + /* X550 has different DDP Max limit */ + if (adapter->hw.mac.type == ixgbe_mac_X550) + adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1; + + return 0; +} + +static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + + adapter->netdev->fcoe_ddp_xid = 0; + + if (!fcoe->ddp_pool) + return; + + free_percpu(fcoe->ddp_pool); + fcoe->ddp_pool = NULL; +} + +/** + * ixgbe_fcoe_enable - turn on FCoE offload feature + * @netdev: the corresponding netdev + * + * Turns on FCoE offload feature in 82599. + * + * Returns : 0 indicates success or -EINVAL on failure + */ +int ixgbe_fcoe_enable(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + + atomic_inc(&fcoe->refcnt); + + if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) + return -EINVAL; + + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + return -EINVAL; + + e_info(drv, "Enabling FCoE offload features.\n"); + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n"); + + if (netif_running(netdev)) + netdev->netdev_ops->ndo_stop(netdev); + + /* Allocate per CPU memory to track DDP pools */ + ixgbe_fcoe_ddp_enable(adapter); + + /* enable FCoE and notify stack */ + adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; + netdev->features |= NETIF_F_FCOE_MTU; + netdev_features_change(netdev); + + /* release existing queues and reallocate them */ + ixgbe_clear_interrupt_scheme(adapter); + ixgbe_init_interrupt_scheme(adapter); + + if (netif_running(netdev)) + netdev->netdev_ops->ndo_open(netdev); + + return 0; +} + +/** + * ixgbe_fcoe_disable - turn off FCoE offload feature + * @netdev: the corresponding netdev + * + * Turns off FCoE offload feature in 82599. + * + * Returns : 0 indicates success or -EINVAL on failure + */ +int ixgbe_fcoe_disable(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (!atomic_dec_and_test(&adapter->fcoe.refcnt)) + return -EINVAL; + + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return -EINVAL; + + e_info(drv, "Disabling FCoE offload features.\n"); + if (netif_running(netdev)) + netdev->netdev_ops->ndo_stop(netdev); + + /* Free per CPU memory to track DDP pools */ + ixgbe_fcoe_ddp_disable(adapter); + + /* disable FCoE and notify stack */ + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + netdev->features &= ~NETIF_F_FCOE_MTU; + + netdev_features_change(netdev); + + /* release existing queues and reallocate them */ + ixgbe_clear_interrupt_scheme(adapter); + ixgbe_init_interrupt_scheme(adapter); + + if (netif_running(netdev)) + netdev->netdev_ops->ndo_open(netdev); + + return 0; +} + +/** + * ixgbe_fcoe_get_wwn - get world wide name for the node or the port + * @netdev : ixgbe adapter + * @wwn : the world wide name + * @type: the type of world wide name + * + * Returns the node or port world wide name if both the prefix and the san + * mac address are valid, then the wwn is formed based on the NAA-2 for + * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3). + * + * Returns : 0 on success + */ +int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) +{ + u16 prefix = 0xffff; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_mac_info *mac = &adapter->hw.mac; + + switch (type) { + case NETDEV_FCOE_WWNN: + prefix = mac->wwnn_prefix; + break; + case NETDEV_FCOE_WWPN: + prefix = mac->wwpn_prefix; + break; + default: + break; + } + + if ((prefix != 0xffff) && + is_valid_ether_addr(mac->san_addr)) { + *wwn = ((u64) prefix << 48) | + ((u64) mac->san_addr[0] << 40) | + ((u64) mac->san_addr[1] << 32) | + ((u64) mac->san_addr[2] << 24) | + ((u64) mac->san_addr[3] << 16) | + ((u64) mac->san_addr[4] << 8) | + ((u64) mac->san_addr[5]); + return 0; + } + return -EINVAL; +} + +/** + * ixgbe_fcoe_get_hbainfo - get FCoE HBA information + * @netdev : ixgbe adapter + * @info : HBA information + * + * Returns ixgbe HBA information + * + * Returns : 0 on success + */ +int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, + struct netdev_fcoe_hbainfo *info) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int i, pos; + u8 buf[8]; + + if (!info) + return -EINVAL; + + /* Don't return information on unsupported devices */ + if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X540) + return -EINVAL; + + /* Manufacturer */ + snprintf(info->manufacturer, sizeof(info->manufacturer), + "Intel Corporation"); + + /* Serial Number */ + + /* Get the PCI-e Device Serial Number Capability */ + pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN); + if (pos) { + pos += 4; + for (i = 0; i < 8; i++) + pci_read_config_byte(adapter->pdev, pos + i, &buf[i]); + + snprintf(info->serial_number, sizeof(info->serial_number), + "%02X%02X%02X%02X%02X%02X%02X%02X", + buf[7], buf[6], buf[5], buf[4], + buf[3], buf[2], buf[1], buf[0]); + } else + snprintf(info->serial_number, sizeof(info->serial_number), + "Unknown"); + + /* Hardware Version */ + snprintf(info->hardware_version, + sizeof(info->hardware_version), + "Rev %d", hw->revision_id); + /* Driver Name/Version */ + snprintf(info->driver_version, + sizeof(info->driver_version), + "%s v%s", + ixgbe_driver_name, + ixgbe_driver_version); + /* Firmware Version */ + snprintf(info->firmware_version, + sizeof(info->firmware_version), + "0x%08x", + (adapter->eeprom_verh << 16) | + adapter->eeprom_verl); + + /* Model */ + if (hw->mac.type == ixgbe_mac_82599EB) { + snprintf(info->model, + sizeof(info->model), + "Intel 82599"); + } else { + snprintf(info->model, + sizeof(info->model), + "Intel X540"); + } + + /* Model Description */ + snprintf(info->model_description, + sizeof(info->model_description), + "%s", + ixgbe_default_device_descr); + + return 0; +} + +/** + * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to + * @adapter - pointer to the device adapter structure + * + * Return : TC that FCoE is mapped to + */ +u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter) +{ +#ifdef CONFIG_IXGBE_DCB + return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up); +#else + return 0; +#endif +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h new file mode 100644 index 000000000..38385876e --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h @@ -0,0 +1,88 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_FCOE_H +#define _IXGBE_FCOE_H + +#include +#include + +/* shift bits within STAT fo FCSTAT */ +#define IXGBE_RXDADV_FCSTAT_SHIFT 4 + +/* ddp user buffer */ +#define IXGBE_BUFFCNT_MAX 256 /* 8 bits bufcnt */ +#define IXGBE_FCPTR_ALIGN 16 +#define IXGBE_FCPTR_MAX (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t)) +#define IXGBE_FCBUFF_4KB 0x0 +#define IXGBE_FCBUFF_8KB 0x1 +#define IXGBE_FCBUFF_16KB 0x2 +#define IXGBE_FCBUFF_64KB 0x3 +#define IXGBE_FCBUFF_MAX 65536 /* 64KB max */ +#define IXGBE_FCBUFF_MIN 4096 /* 4KB min */ +#define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */ +#define IXGBE_FCOE_DDP_MAX_X550 2048 /* 11 bits xid */ + +/* Default traffic class to use for FCoE */ +#define IXGBE_FCOE_DEFTC 3 + +/* fcerr */ +#define IXGBE_FCERR_BADCRC 0x00100000 + +/* FCoE DDP for target mode */ +#define __IXGBE_FCOE_TARGET 1 + +struct ixgbe_fcoe_ddp { + int len; + u32 err; + unsigned int sgc; + struct scatterlist *sgl; + dma_addr_t udp; + u64 *udl; + struct dma_pool *pool; +}; + +/* per cpu variables */ +struct ixgbe_fcoe_ddp_pool { + struct dma_pool *pool; + u64 noddp; + u64 noddp_ext_buff; +}; + +struct ixgbe_fcoe { + struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool; + atomic_t refcnt; + spinlock_t lock; + struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX_X550]; + void *extra_ddp_buffer; + dma_addr_t extra_ddp_buffer_dma; + unsigned long mode; + u8 up; +}; + +#endif /* _IXGBE_FCOE_H */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c new file mode 100644 index 000000000..68e1e757e --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -0,0 +1,1221 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include "ixgbe_sriov.h" + +#ifdef CONFIG_IXGBE_DCB +/** + * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE are enabled along + * with VMDq. + * + **/ +static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) +{ +#ifdef IXGBE_FCOE + struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; +#endif /* IXGBE_FCOE */ + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + int i; + u16 reg_idx; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + /* verify we have DCB queueing enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return false; + + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= tcs) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->rx_ring[i]->reg_idx = reg_idx; + } + + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= tcs) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->tx_ring[i]->reg_idx = reg_idx; + } + +#ifdef IXGBE_FCOE + /* nothing to do if FCoE is disabled */ + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return true; + + /* The work is already done if the FCoE ring is shared */ + if (fcoe->offset < tcs) + return true; + + /* The FCoE rings exist separately, we need to move their reg_idx */ + if (fcoe->indices) { + u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); + + reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; + for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; + adapter->rx_ring[i]->reg_idx = reg_idx; + reg_idx++; + } + + reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; + for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; + adapter->tx_ring[i]->reg_idx = reg_idx; + reg_idx++; + } + } + +#endif /* IXGBE_FCOE */ + return true; +} + +/* ixgbe_get_first_reg_idx - Return first register index associated with ring */ +static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, + unsigned int *tx, unsigned int *rx) +{ + struct net_device *dev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + u8 num_tcs = netdev_get_num_tc(dev); + + *tx = 0; + *rx = 0; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + /* TxQs/TC: 4 RxQs/TC: 8 */ + *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */ + *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */ + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + if (num_tcs > 4) { + /* + * TCs : TC0/1 TC2/3 TC4-7 + * TxQs/TC: 32 16 8 + * RxQs/TC: 16 16 16 + */ + *rx = tc << 4; + if (tc < 3) + *tx = tc << 5; /* 0, 32, 64 */ + else if (tc < 5) + *tx = (tc + 2) << 4; /* 80, 96 */ + else + *tx = (tc + 8) << 3; /* 104, 112, 120 */ + } else { + /* + * TCs : TC0 TC1 TC2/3 + * TxQs/TC: 64 32 16 + * RxQs/TC: 32 32 32 + */ + *rx = tc << 5; + if (tc < 2) + *tx = tc << 6; /* 0, 64 */ + else + *tx = (tc + 4) << 4; /* 96, 112 */ + } + default: + break; + } +} + +/** + * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for DCB to the assigned rings. + * + **/ +static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + unsigned int tx_idx, rx_idx; + int tc, offset, rss_i, i; + u8 num_tcs = netdev_get_num_tc(dev); + + /* verify we have DCB queueing enabled before proceeding */ + if (num_tcs <= 1) + return false; + + rss_i = adapter->ring_feature[RING_F_RSS].indices; + + for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { + ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); + for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { + adapter->tx_ring[offset + i]->reg_idx = tx_idx; + adapter->rx_ring[offset + i]->reg_idx = rx_idx; + adapter->tx_ring[offset + i]->dcb_tc = tc; + adapter->rx_ring[offset + i]->dcb_tc = tc; + } + } + + return true; +} + +#endif +/** + * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov + * @adapter: board private structure to initialize + * + * SR-IOV doesn't use any descriptor rings but changes the default if + * no other mapping is used. + * + */ +static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) +{ +#ifdef IXGBE_FCOE + struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; +#endif /* IXGBE_FCOE */ + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; + int i; + u16 reg_idx; + + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) + return false; + + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { +#ifdef IXGBE_FCOE + /* Allow first FCoE queue to be mapped as RSS */ + if (fcoe->offset && (i > fcoe->offset)) + break; +#endif + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->rx_ring[i]->reg_idx = reg_idx; + } + +#ifdef IXGBE_FCOE + /* FCoE uses a linear block of queues so just assigning 1:1 */ + for (; i < adapter->num_rx_queues; i++, reg_idx++) + adapter->rx_ring[i]->reg_idx = reg_idx; + +#endif + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { +#ifdef IXGBE_FCOE + /* Allow first FCoE queue to be mapped as RSS */ + if (fcoe->offset && (i > fcoe->offset)) + break; +#endif + /* If we are greater than indices move to next pool */ + if ((reg_idx & rss->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->tx_ring[i]->reg_idx = reg_idx; + } + +#ifdef IXGBE_FCOE + /* FCoE uses a linear block of queues so just assigning 1:1 */ + for (; i < adapter->num_tx_queues; i++, reg_idx++) + adapter->tx_ring[i]->reg_idx = reg_idx; + +#endif + + return true; +} + +/** + * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS to the assigned rings. + * + **/ +static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->reg_idx = i; + + return true; +} + +/** + * ixgbe_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + * + * Note, the order the various feature calls is important. It must start with + * the "most" features enabled at the same time, then trickle down to the + * least amount of features turned on at once. + **/ +static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) +{ + /* start with default case */ + adapter->rx_ring[0]->reg_idx = 0; + adapter->tx_ring[0]->reg_idx = 0; + +#ifdef CONFIG_IXGBE_DCB + if (ixgbe_cache_ring_dcb_sriov(adapter)) + return; + + if (ixgbe_cache_ring_dcb(adapter)) + return; + +#endif + if (ixgbe_cache_ring_sriov(adapter)) + return; + + ixgbe_cache_ring_rss(adapter); +} + +#define IXGBE_RSS_16Q_MASK 0xF +#define IXGBE_RSS_8Q_MASK 0x7 +#define IXGBE_RSS_4Q_MASK 0x3 +#define IXGBE_RSS_2Q_MASK 0x1 +#define IXGBE_RSS_DISABLED_MASK 0x0 + +#ifdef CONFIG_IXGBE_DCB +/** + * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB + * @adapter: board private structure to initialize + * + * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * and VM pools where appropriate. Also assign queues based on DCB + * priorities and map accordingly.. + * + **/ +static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) +{ + int i; + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; +#ifdef IXGBE_FCOE + u16 fcoe_i = 0; +#endif + u8 tcs = netdev_get_num_tc(adapter->netdev); + + /* verify we have DCB queueing enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return false; + + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* 16 pools w/ 8 TC per pool */ + if (tcs > 4) { + vmdq_i = min_t(u16, vmdq_i, 16); + vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; + /* 32 pools w/ 4 TC per pool */ + } else { + vmdq_i = min_t(u16, vmdq_i, 32); + vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; + } + +#ifdef IXGBE_FCOE + /* queues in the remaining pools are available for FCoE */ + fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; + +#endif + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* + * We do not support DCB, VMDq, and RSS all simultaneously + * so we will disable RSS since it is the lowest priority + */ + adapter->ring_feature[RING_F_RSS].indices = 1; + adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + + adapter->num_rx_pools = vmdq_i; + adapter->num_rx_queues_per_pool = tcs; + + adapter->num_tx_queues = vmdq_i * tcs; + adapter->num_rx_queues = vmdq_i * tcs; + +#ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + struct ixgbe_ring_feature *fcoe; + + fcoe = &adapter->ring_feature[RING_F_FCOE]; + + /* limit ourselves based on feature limits */ + fcoe_i = min_t(u16, fcoe_i, fcoe->limit); + + if (fcoe_i) { + /* alloc queues for FCoE separately */ + fcoe->indices = fcoe_i; + fcoe->offset = vmdq_i * tcs; + + /* add queues to adapter */ + adapter->num_tx_queues += fcoe_i; + adapter->num_rx_queues += fcoe_i; + } else if (tcs > 1) { + /* use queue belonging to FcoE TC */ + fcoe->indices = 1; + fcoe->offset = ixgbe_fcoe_get_tc(adapter); + } else { + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + + fcoe->indices = 0; + fcoe->offset = 0; + } + } + +#endif /* IXGBE_FCOE */ + /* configure TC to queue mapping */ + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(adapter->netdev, i, 1, i); + + return true; +} + +static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct ixgbe_ring_feature *f; + int rss_i, rss_m, i; + int tcs; + + /* Map queue offset and counts onto allocated tx queues */ + tcs = netdev_get_num_tc(dev); + + /* verify we have DCB queueing enabled before proceeding */ + if (tcs <= 1) + return false; + + /* determine the upper limit for our current DCB mode */ + rss_i = dev->num_tx_queues / tcs; + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + /* 8 TC w/ 4 queues per TC */ + rss_i = min_t(u16, rss_i, 4); + rss_m = IXGBE_RSS_4Q_MASK; + } else if (tcs > 4) { + /* 8 TC w/ 8 queues per TC */ + rss_i = min_t(u16, rss_i, 8); + rss_m = IXGBE_RSS_8Q_MASK; + } else { + /* 4 TC w/ 16 queues per TC */ + rss_i = min_t(u16, rss_i, 16); + rss_m = IXGBE_RSS_16Q_MASK; + } + + /* set RSS mask and indices */ + f = &adapter->ring_feature[RING_F_RSS]; + rss_i = min_t(int, rss_i, f->limit); + f->indices = rss_i; + f->mask = rss_m; + + /* disable ATR as it is not supported when multiple TCs are enabled */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + +#ifdef IXGBE_FCOE + /* FCoE enabled queues require special configuration indexed + * by feature specific indices and offset. Here we map FCoE + * indices onto the DCB queue pairs allowing FCoE to own + * configuration later. + */ + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + u8 tc = ixgbe_fcoe_get_tc(adapter); + + f = &adapter->ring_feature[RING_F_FCOE]; + f->indices = min_t(u16, rss_i, f->limit); + f->offset = rss_i * tc; + } + +#endif /* IXGBE_FCOE */ + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(dev, i, rss_i, rss_i * i); + + adapter->num_tx_queues = rss_i * tcs; + adapter->num_rx_queues = rss_i * tcs; + + return true; +} + +#endif +/** + * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices + * @adapter: board private structure to initialize + * + * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * and VM pools where appropriate. If RSS is available, then also try and + * enable RSS and map accordingly. + * + **/ +static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) +{ + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; + u16 rss_m = IXGBE_RSS_DISABLED_MASK; +#ifdef IXGBE_FCOE + u16 fcoe_i = 0; +#endif + bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); + + /* only proceed if SR-IOV is enabled */ + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return false; + + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* double check we are limited to maximum pools */ + vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); + + /* 64 pool mode with 2 queues per pool */ + if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) { + vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; + rss_m = IXGBE_RSS_2Q_MASK; + rss_i = min_t(u16, rss_i, 2); + /* 32 pool mode with 4 queues per pool */ + } else { + vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; + rss_m = IXGBE_RSS_4Q_MASK; + rss_i = 4; + } + +#ifdef IXGBE_FCOE + /* queues in the remaining pools are available for FCoE */ + fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); + +#endif + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* limit RSS based on user input and save for later use */ + adapter->ring_feature[RING_F_RSS].indices = rss_i; + adapter->ring_feature[RING_F_RSS].mask = rss_m; + + adapter->num_rx_pools = vmdq_i; + adapter->num_rx_queues_per_pool = rss_i; + + adapter->num_rx_queues = vmdq_i * rss_i; + adapter->num_tx_queues = vmdq_i * rss_i; + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + +#ifdef IXGBE_FCOE + /* + * FCoE can use rings from adjacent buffers to allow RSS + * like behavior. To account for this we need to add the + * FCoE indices to the total ring count. + */ + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + struct ixgbe_ring_feature *fcoe; + + fcoe = &adapter->ring_feature[RING_F_FCOE]; + + /* limit ourselves based on feature limits */ + fcoe_i = min_t(u16, fcoe_i, fcoe->limit); + + if (vmdq_i > 1 && fcoe_i) { + /* alloc queues for FCoE separately */ + fcoe->indices = fcoe_i; + fcoe->offset = vmdq_i * rss_i; + } else { + /* merge FCoE queues with RSS queues */ + fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus()); + + /* limit indices to rss_i if MSI-X is disabled */ + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) + fcoe_i = rss_i; + + /* attempt to reserve some queues for just FCoE */ + fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); + fcoe->offset = fcoe_i - fcoe->indices; + + fcoe_i -= rss_i; + } + + /* add queues to adapter */ + adapter->num_tx_queues += fcoe_i; + adapter->num_rx_queues += fcoe_i; + } + +#endif + return true; +} + +/** + * ixgbe_set_rss_queues - Allocate queues for RSS + * @adapter: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring_feature *f; + u16 rss_i; + + /* set mask for 16 queue limit of RSS */ + f = &adapter->ring_feature[RING_F_RSS]; + rss_i = f->limit; + + f->indices = rss_i; + f->mask = IXGBE_RSS_16Q_MASK; + + /* disable ATR by default, it will be configured below */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + + /* + * Use Flow Director in addition to RSS to ensure the best + * distribution of flows across cores, even when an FDIR flow + * isn't matched. + */ + if (rss_i > 1 && adapter->atr_sample_rate) { + f = &adapter->ring_feature[RING_F_FDIR]; + + rss_i = f->indices = f->limit; + + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + } + +#ifdef IXGBE_FCOE + /* + * FCoE can exist on the same rings as standard network traffic + * however it is preferred to avoid that if possible. In order + * to get the best performance we allocate as many FCoE queues + * as we can and we place them at the end of the ring array to + * avoid sharing queues with standard RSS on systems with 24 or + * more CPUs. + */ + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + struct net_device *dev = adapter->netdev; + u16 fcoe_i; + + f = &adapter->ring_feature[RING_F_FCOE]; + + /* merge FCoE queues with RSS queues */ + fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus()); + fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues); + + /* limit indices to rss_i if MSI-X is disabled */ + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) + fcoe_i = rss_i; + + /* attempt to reserve some queues for just FCoE */ + f->indices = min_t(u16, fcoe_i, f->limit); + f->offset = fcoe_i - f->indices; + rss_i = max_t(u16, fcoe_i, rss_i); + } + +#endif /* IXGBE_FCOE */ + adapter->num_rx_queues = rss_i; + adapter->num_tx_queues = rss_i; + + return true; +} + +/** + * ixgbe_set_num_queues - Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) +{ + /* Start with base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_rx_pools = adapter->num_rx_queues; + adapter->num_rx_queues_per_pool = 1; + +#ifdef CONFIG_IXGBE_DCB + if (ixgbe_set_dcb_sriov_queues(adapter)) + return; + + if (ixgbe_set_dcb_queues(adapter)) + return; + +#endif + if (ixgbe_set_sriov_queues(adapter)) + return; + + ixgbe_set_rss_queues(adapter); +} + +/** + * ixgbe_acquire_msix_vectors - acquire MSI-X vectors + * @adapter: board private structure + * + * Attempts to acquire a suitable range of MSI-X vector interrupts. Will + * return a negative error code if unable to acquire MSI-X vectors for any + * reason. + */ +static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i, vectors, vector_threshold; + + /* We start by asking for one vector per queue pair */ + vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); + + /* It is easy to be greedy for MSI-X vectors. However, it really + * doesn't do much good if we have a lot more vectors than CPUs. We'll + * be somewhat conservative and only ask for (roughly) the same number + * of vectors as there are CPUs. + */ + vectors = min_t(int, vectors, num_online_cpus()); + + /* Some vectors are necessary for non-queue interrupts */ + vectors += NON_Q_VECTORS; + + /* Hardware can only support a maximum of hw.mac->max_msix_vectors. + * With features such as RSS and VMDq, we can easily surpass the + * number of Rx and Tx descriptor queues supported by our device. + * Thus, we cap the maximum in the rare cases where the CPU count also + * exceeds our vector limit + */ + vectors = min_t(int, vectors, hw->mac.max_msix_vectors); + + /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] + * handler, and (2) an Other (Link Status Change, etc.) handler. + */ + vector_threshold = MIN_MSIX_COUNT; + + adapter->msix_entries = kcalloc(vectors, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + + for (i = 0; i < vectors; i++) + adapter->msix_entries[i].entry = i; + + vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + vector_threshold, vectors); + + if (vectors < 0) { + /* A negative count of allocated vectors indicates an error in + * acquiring within the specified range of MSI-X vectors + */ + e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", + vectors); + + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + return vectors; + } + + /* we successfully allocated some number of vectors within our + * requested range. + */ + adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; + + /* Adjust for only the vectors we'll use, which is minimum + * of max_q_vectors, or the number of vectors we were allocated. + */ + vectors -= NON_Q_VECTORS; + adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); + + return 0; +} + +static void ixgbe_add_ring(struct ixgbe_ring *ring, + struct ixgbe_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; +} + +/** + * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, + int v_count, int v_idx, + int txr_count, int txr_idx, + int rxr_count, int rxr_idx) +{ + struct ixgbe_q_vector *q_vector; + struct ixgbe_ring *ring; + int node = NUMA_NO_NODE; + int cpu = -1; + int ring_count, size; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + ring_count = txr_count + rxr_count; + size = sizeof(struct ixgbe_q_vector) + + (sizeof(struct ixgbe_ring) * ring_count); + + /* customize cpu for Flow Director mapping */ + if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + if (rss_i > 1 && adapter->atr_sample_rate) { + if (cpu_online(v_idx)) { + cpu = v_idx; + node = cpu_to_node(cpu); + } + } + } + + /* allocate q_vector and rings */ + q_vector = kzalloc_node(size, GFP_KERNEL, node); + if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* setup affinity mask and node */ + if (cpu != -1) + cpumask_set_cpu(cpu, &q_vector->affinity_mask); + q_vector->numa_node = node; + +#ifdef CONFIG_IXGBE_DCA + /* initialize CPU for DCA */ + q_vector->cpu = -1; + +#endif + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, + ixgbe_poll, 64); + napi_hash_add(&q_vector->napi); + +#ifdef CONFIG_NET_RX_BUSY_POLL + /* initialize busy poll */ + atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE); + +#endif + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + q_vector->v_idx = v_idx; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + /* intialize ITR */ + if (txr_count && !rxr_count) { + /* tx only vector */ + if (adapter->tx_itr_setting == 1) + q_vector->itr = IXGBE_10K_ITR; + else + q_vector->itr = adapter->tx_itr_setting; + } else { + /* rx or rx/tx vector */ + if (adapter->rx_itr_setting == 1) + q_vector->itr = IXGBE_20K_ITR; + else + q_vector->itr = adapter->rx_itr_setting; + } + + while (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + ixgbe_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + if (adapter->num_rx_pools > 1) + ring->queue_index = + txr_idx % adapter->num_rx_queues_per_pool; + else + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* update count and index */ + txr_count--; + txr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + while (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + ixgbe_add_ring(ring, &q_vector->rx); + + /* + * 82599 errata, UDP frames with a 0 checksum + * can be marked as checksum errors. + */ + if (adapter->hw.mac.type == ixgbe_mac_82599EB) + set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); + +#ifdef IXGBE_FCOE + if (adapter->netdev->features & NETIF_F_FCOE_MTU) { + struct ixgbe_ring_feature *f; + f = &adapter->ring_feature[RING_F_FCOE]; + if ((rxr_idx >= f->offset) && + (rxr_idx < f->offset + f->indices)) + set_bit(__IXGBE_RX_FCOE, &ring->state); + } + +#endif /* IXGBE_FCOE */ + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + if (adapter->num_rx_pools > 1) + ring->queue_index = + rxr_idx % adapter->num_rx_queues_per_pool; + else + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + + /* update count and index */ + rxr_count--; + rxr_idx += v_count; + + /* push pointer to next ring */ + ring++; + } + + return 0; +} + +/** + * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) +{ + struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; + struct ixgbe_ring *ring; + + ixgbe_for_each_ring(ring, q_vector->tx) + adapter->tx_ring[ring->queue_index] = NULL; + + ixgbe_for_each_ring(ring, q_vector->rx) + adapter->rx_ring[ring->queue_index] = NULL; + + adapter->q_vector[v_idx] = NULL; + napi_hash_del(&q_vector->napi); + netif_napi_del(&q_vector->napi); + + /* + * ixgbe_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + kfree_rcu(q_vector, rcu); +} + +/** + * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) +{ + int q_vectors = adapter->num_q_vectors; + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int err; + + /* only one q_vector if MSI-X is disabled. */ + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) + q_vectors = 1; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining--; + rxr_idx++; + } + } + + for (; v_idx < q_vectors; v_idx++) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); + err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, + tqpv, txr_idx, + rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + txr_remaining -= tqpv; + rxr_idx++; + txr_idx++; + } + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + ixgbe_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + ixgbe_free_q_vector(adapter, v_idx); +} + +static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) +{ + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { + adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; + pci_disable_msi(adapter->pdev); + } +} + +/** + * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) +{ + int err; + + /* We will try to get MSI-X interrupts first */ + if (!ixgbe_acquire_msix_vectors(adapter)) + return; + + /* At this point, we do not have MSI-X capabilities. We need to + * reconfigure or disable various features which require MSI-X + * capability. + */ + + /* Disable DCB unless we only have a single traffic class */ + if (netdev_get_num_tc(adapter->netdev) > 1) { + e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n"); + netdev_reset_tc(adapter->netdev); + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + adapter->hw.fc.requested_mode = adapter->last_lfc_mode; + + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; + adapter->temp_dcb_cfg.pfc_mode_enable = false; + adapter->dcb_cfg.pfc_mode_enable = false; + } + + adapter->dcb_cfg.num_tcs.pg_tcs = 1; + adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + + /* Disable SR-IOV support */ + e_dev_warn("Disabling SR-IOV support\n"); + ixgbe_disable_sriov(adapter); + + /* Disable RSS */ + e_dev_warn("Disabling RSS support\n"); + adapter->ring_feature[RING_F_RSS].limit = 1; + + /* recalculate number of queues now that many features have been + * changed or disabled. + */ + ixgbe_set_num_queues(adapter); + adapter->num_q_vectors = 1; + + err = pci_enable_msi(adapter->pdev); + if (err) + e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n", + err); + else + adapter->flags |= IXGBE_FLAG_MSI_ENABLED; +} + +/** + * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme + * @adapter: board private structure to initialize + * + * We determine which interrupt scheme to use based on... + * - Kernel support (MSI, MSI-X) + * - which can be user-defined (via MODULE_PARAM) + * - Hardware queue count (num_*_queues) + * - defined by miscellaneous hardware support/features (RSS, etc.) + **/ +int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) +{ + int err; + + /* Number of supported queues */ + ixgbe_set_num_queues(adapter); + + /* Set interrupt mode */ + ixgbe_set_interrupt_capability(adapter); + + err = ixgbe_alloc_q_vectors(adapter); + if (err) { + e_dev_err("Unable to allocate memory for queue vectors\n"); + goto err_alloc_q_vectors; + } + + ixgbe_cache_ring_register(adapter); + + e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", + (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", + adapter->num_rx_queues, adapter->num_tx_queues); + + set_bit(__IXGBE_DOWN, &adapter->state); + + return 0; + +err_alloc_q_vectors: + ixgbe_reset_interrupt_capability(adapter); + return err; +} + +/** + * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) +{ + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + + ixgbe_free_q_vectors(adapter); + ixgbe_reset_interrupt_capability(adapter); +} + +void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, + u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) +{ + struct ixgbe_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = IXGBE_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} + diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c new file mode 100644 index 000000000..5be12a00e --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -0,0 +1,9123 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2014 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_OF +#include +#endif + +#ifdef CONFIG_SPARC +#include +#include +#endif + +#include "ixgbe.h" +#include "ixgbe_common.h" +#include "ixgbe_dcb_82599.h" +#include "ixgbe_sriov.h" + +char ixgbe_driver_name[] = "ixgbe"; +static const char ixgbe_driver_string[] = + "Intel(R) 10 Gigabit PCI Express Network Driver"; +#ifdef IXGBE_FCOE +char ixgbe_default_device_descr[] = + "Intel(R) 10 Gigabit Network Connection"; +#else +static char ixgbe_default_device_descr[] = + "Intel(R) 10 Gigabit Network Connection"; +#endif +#define DRV_VERSION "4.0.1-k" +const char ixgbe_driver_version[] = DRV_VERSION; +static const char ixgbe_copyright[] = + "Copyright (c) 1999-2014 Intel Corporation."; + +static const struct ixgbe_info *ixgbe_info_tbl[] = { + [board_82598] = &ixgbe_82598_info, + [board_82599] = &ixgbe_82599_info, + [board_X540] = &ixgbe_X540_info, + [board_X550] = &ixgbe_X550_info, + [board_X550EM_x] = &ixgbe_X550EM_x_info, +}; + +/* ixgbe_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id ixgbe_pci_tbl[] = { + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, + /* required last entry */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); + +#ifdef CONFIG_IXGBE_DCA +static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, + void *p); +static struct notifier_block dca_notifier = { + .notifier_call = ixgbe_notify_dca, + .next = NULL, + .priority = 0 +}; +#endif + +#ifdef CONFIG_PCI_IOV +static unsigned int max_vfs; +module_param(max_vfs, uint, 0); +MODULE_PARM_DESC(max_vfs, + "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)"); +#endif /* CONFIG_PCI_IOV */ + +static unsigned int allow_unsupported_sfp; +module_param(allow_unsupported_sfp, uint, 0); +MODULE_PARM_DESC(allow_unsupported_sfp, + "Allow unsupported and untested SFP+ modules on 82599-based adapters"); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); + +static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, + u32 reg, u16 *value) +{ + struct pci_dev *parent_dev; + struct pci_bus *parent_bus; + + parent_bus = adapter->pdev->bus->parent; + if (!parent_bus) + return -1; + + parent_dev = parent_bus->self; + if (!parent_dev) + return -1; + + if (!pci_is_pcie(parent_dev)) + return -1; + + pcie_capability_read_word(parent_dev, reg, value); + if (*value == IXGBE_FAILED_READ_CFG_WORD && + ixgbe_check_cfg_remove(&adapter->hw, parent_dev)) + return -1; + return 0; +} + +static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u16 link_status = 0; + int err; + + hw->bus.type = ixgbe_bus_type_pci_express; + + /* Get the negotiated link width and speed from PCI config space of the + * parent, as this device is behind a switch + */ + err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status); + + /* assume caller will handle error case */ + if (err) + return err; + + hw->bus.width = ixgbe_convert_bus_width(link_status); + hw->bus.speed = ixgbe_convert_bus_speed(link_status); + + return 0; +} + +/** + * ixgbe_check_from_parent - Determine whether PCIe info should come from parent + * @hw: hw specific details + * + * This function is used by probe to determine whether a device's PCI-Express + * bandwidth details should be gathered from the parent bus instead of from the + * device. Used to ensure that various locations all have the correct device ID + * checks. + */ +static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) +{ + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_SFP_SF_QP: + case IXGBE_DEV_ID_82599_QSFP_SF_QP: + return true; + default: + return false; + } +} + +static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, + int expected_gts) +{ + int max_gts = 0; + enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; + enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; + struct pci_dev *pdev; + + /* determine whether to use the the parent device + */ + if (ixgbe_pcie_from_parent(&adapter->hw)) + pdev = adapter->pdev->bus->parent->self; + else + pdev = adapter->pdev; + + if (pcie_get_minimum_link(pdev, &speed, &width) || + speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) { + e_dev_warn("Unable to determine PCI Express bandwidth.\n"); + return; + } + + switch (speed) { + case PCIE_SPEED_2_5GT: + /* 8b/10b encoding reduces max throughput by 20% */ + max_gts = 2 * width; + break; + case PCIE_SPEED_5_0GT: + /* 8b/10b encoding reduces max throughput by 20% */ + max_gts = 4 * width; + break; + case PCIE_SPEED_8_0GT: + /* 128b/130b encoding reduces throughput by less than 2% */ + max_gts = 8 * width; + break; + default: + e_dev_warn("Unable to determine PCI Express bandwidth.\n"); + return; + } + + e_dev_info("PCI Express bandwidth of %dGT/s available\n", + max_gts); + e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n", + (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : + speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : + speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : + "Unknown"), + width, + (speed == PCIE_SPEED_2_5GT ? "20%" : + speed == PCIE_SPEED_5_0GT ? "20%" : + speed == PCIE_SPEED_8_0GT ? "<2%" : + "Unknown")); + + if (max_gts < expected_gts) { + e_dev_warn("This is not sufficient for optimal performance of this card.\n"); + e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n", + expected_gts); + e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n"); + } +} + +static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) +{ + if (!test_bit(__IXGBE_DOWN, &adapter->state) && + !test_bit(__IXGBE_REMOVING, &adapter->state) && + !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) + schedule_work(&adapter->service_task); +} + +static void ixgbe_remove_adapter(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + + if (!hw->hw_addr) + return; + hw->hw_addr = NULL; + e_dev_err("Adapter removed\n"); + if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) + ixgbe_service_event_schedule(adapter); +} + +static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) +{ + u32 value; + + /* The following check not only optimizes a bit by not + * performing a read on the status register when the + * register just read was a status register read that + * returned IXGBE_FAILED_READ_REG. It also blocks any + * potential recursion. + */ + if (reg == IXGBE_STATUS) { + ixgbe_remove_adapter(hw); + return; + } + value = ixgbe_read_reg(hw, IXGBE_STATUS); + if (value == IXGBE_FAILED_READ_REG) + ixgbe_remove_adapter(hw); +} + +/** + * ixgbe_read_reg - Read from device register + * @hw: hw specific details + * @reg: offset of register to read + * + * Returns : value read or IXGBE_FAILED_READ_REG if removed + * + * This function is used to read device registers. It checks for device + * removal by confirming any read that returns all ones by checking the + * status register value for all ones. This function avoids reading from + * the hardware if a removal was previously detected in which case it + * returns IXGBE_FAILED_READ_REG (all ones). + */ +u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) +{ + u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u32 value; + + if (ixgbe_removed(reg_addr)) + return IXGBE_FAILED_READ_REG; + value = readl(reg_addr + reg); + if (unlikely(value == IXGBE_FAILED_READ_REG)) + ixgbe_check_remove(hw, reg); + return value; +} + +static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev) +{ + u16 value; + + pci_read_config_word(pdev, PCI_VENDOR_ID, &value); + if (value == IXGBE_FAILED_READ_CFG_WORD) { + ixgbe_remove_adapter(hw); + return true; + } + return false; +} + +u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg) +{ + struct ixgbe_adapter *adapter = hw->back; + u16 value; + + if (ixgbe_removed(hw->hw_addr)) + return IXGBE_FAILED_READ_CFG_WORD; + pci_read_config_word(adapter->pdev, reg, &value); + if (value == IXGBE_FAILED_READ_CFG_WORD && + ixgbe_check_cfg_remove(hw, adapter->pdev)) + return IXGBE_FAILED_READ_CFG_WORD; + return value; +} + +#ifdef CONFIG_PCI_IOV +static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg) +{ + struct ixgbe_adapter *adapter = hw->back; + u32 value; + + if (ixgbe_removed(hw->hw_addr)) + return IXGBE_FAILED_READ_CFG_DWORD; + pci_read_config_dword(adapter->pdev, reg, &value); + if (value == IXGBE_FAILED_READ_CFG_DWORD && + ixgbe_check_cfg_remove(hw, adapter->pdev)) + return IXGBE_FAILED_READ_CFG_DWORD; + return value; +} +#endif /* CONFIG_PCI_IOV */ + +void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value) +{ + struct ixgbe_adapter *adapter = hw->back; + + if (ixgbe_removed(hw->hw_addr)) + return; + pci_write_config_word(adapter->pdev, reg, value); +} + +static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) +{ + BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); + + /* flush memory to make sure state is correct before next watchdog */ + smp_mb__before_atomic(); + clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); +} + +struct ixgbe_reg_info { + u32 ofs; + char *name; +}; + +static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { + + /* General Registers */ + {IXGBE_CTRL, "CTRL"}, + {IXGBE_STATUS, "STATUS"}, + {IXGBE_CTRL_EXT, "CTRL_EXT"}, + + /* Interrupt Registers */ + {IXGBE_EICR, "EICR"}, + + /* RX Registers */ + {IXGBE_SRRCTL(0), "SRRCTL"}, + {IXGBE_DCA_RXCTRL(0), "DRXCTL"}, + {IXGBE_RDLEN(0), "RDLEN"}, + {IXGBE_RDH(0), "RDH"}, + {IXGBE_RDT(0), "RDT"}, + {IXGBE_RXDCTL(0), "RXDCTL"}, + {IXGBE_RDBAL(0), "RDBAL"}, + {IXGBE_RDBAH(0), "RDBAH"}, + + /* TX Registers */ + {IXGBE_TDBAL(0), "TDBAL"}, + {IXGBE_TDBAH(0), "TDBAH"}, + {IXGBE_TDLEN(0), "TDLEN"}, + {IXGBE_TDH(0), "TDH"}, + {IXGBE_TDT(0), "TDT"}, + {IXGBE_TXDCTL(0), "TXDCTL"}, + + /* List Terminator */ + { .name = NULL } +}; + + +/* + * ixgbe_regdump - register printout routine + */ +static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) +{ + int i = 0, j = 0; + char rname[16]; + u32 regs[64]; + + switch (reginfo->ofs) { + case IXGBE_SRRCTL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); + break; + case IXGBE_DCA_RXCTRL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + break; + case IXGBE_RDLEN(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); + break; + case IXGBE_RDH(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); + break; + case IXGBE_RDT(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); + break; + case IXGBE_RXDCTL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + break; + case IXGBE_RDBAL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); + break; + case IXGBE_RDBAH(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); + break; + case IXGBE_TDBAL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); + break; + case IXGBE_TDBAH(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); + break; + case IXGBE_TDLEN(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); + break; + case IXGBE_TDH(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); + break; + case IXGBE_TDT(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); + break; + case IXGBE_TXDCTL(0): + for (i = 0; i < 64; i++) + regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); + break; + default: + pr_info("%-15s %08x\n", reginfo->name, + IXGBE_READ_REG(hw, reginfo->ofs)); + return; + } + + for (i = 0; i < 8; i++) { + snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); + pr_err("%-15s", rname); + for (j = 0; j < 8; j++) + pr_cont(" %08x", regs[i*8+j]); + pr_cont("\n"); + } + +} + +/* + * ixgbe_dump - Print registers, tx-rings and rx-rings + */ +static void ixgbe_dump(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_reg_info *reginfo; + int n = 0; + struct ixgbe_ring *tx_ring; + struct ixgbe_tx_buffer *tx_buffer; + union ixgbe_adv_tx_desc *tx_desc; + struct my_u0 { u64 a; u64 b; } *u0; + struct ixgbe_ring *rx_ring; + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *rx_buffer_info; + u32 staterr; + int i = 0; + + if (!netif_msg_hw(adapter)) + return; + + /* Print netdevice Info */ + if (netdev) { + dev_info(&adapter->pdev->dev, "Net device Info\n"); + pr_info("Device Name state " + "trans_start last_rx\n"); + pr_info("%-15s %016lX %016lX %016lX\n", + netdev->name, + netdev->state, + netdev->trans_start, + netdev->last_rx); + } + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + pr_info(" Register Name Value\n"); + for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; + reginfo->name; reginfo++) { + ixgbe_regdump(hw, reginfo); + } + + /* Print TX Ring Summary */ + if (!netdev || !netif_running(netdev)) + return; + + dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); + pr_info(" %s %s %s %s\n", + "Queue [NTU] [NTC] [bi(ntc)->dma ]", + "leng", "ntw", "timestamp"); + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n", + n, tx_ring->next_to_use, tx_ring->next_to_clean, + (u64)dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp); + } + + /* Print TX Rings */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); + + /* Transmit Descriptor Formats + * + * 82598 Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 + * + * 82598 Advanced Transmit Descriptor (Write-Back Format) + * +--------------------------------------------------------------+ + * 0 | RSV [63:0] | + * +--------------------------------------------------------------+ + * 8 | RSV | STA | NXTSEQ | + * +--------------------------------------------------------------+ + * 63 36 35 32 31 0 + * + * 82599+ Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0 + * + * 82599+ Advanced Transmit Descriptor (Write-Back Format) + * +--------------------------------------------------------------+ + * 0 | RSV [63:0] | + * +--------------------------------------------------------------+ + * 8 | RSV | STA | RSV | + * +--------------------------------------------------------------+ + * 63 36 35 32 31 0 + */ + + for (n = 0; n < adapter->num_tx_queues; n++) { + tx_ring = adapter->tx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("%s%s %s %s %s %s\n", + "T [desc] [address 63:0 ] ", + "[PlPOIdStDDt Ln] [bi->dma ] ", + "leng", "ntw", "timestamp", "bi->skb"); + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + tx_desc = IXGBE_TX_DESC(tx_ring, i); + tx_buffer = &tx_ring->tx_buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (dma_unmap_len(tx_buffer, len) > 0) { + pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p", + i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp, + tx_buffer->skb); + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + pr_cont(" NTC/U\n"); + else if (i == tx_ring->next_to_use) + pr_cont(" NTU\n"); + else if (i == tx_ring->next_to_clean) + pr_cont(" NTC\n"); + else + pr_cont("\n"); + + if (netif_msg_pktdata(adapter) && + tx_buffer->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + tx_buffer->skb->data, + dma_unmap_len(tx_buffer, len), + true); + } + } + } + + /* Print RX Rings Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); + pr_info("Queue [NTU] [NTC]\n"); + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("%5d %5X %5X\n", + n, rx_ring->next_to_use, rx_ring->next_to_clean); + } + + /* Print RX Rings */ + if (!netif_msg_rx_status(adapter)) + return; + + dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); + + /* Receive Descriptor Formats + * + * 82598 Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * 82598 Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 16 15 4 3 0 + * +------------------------------------------------------+ + * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS | + * | Packet | IP | | | | Type | Type | + * | Checksum | Ident | | | | | | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + * + * 82599+ Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * 82599+ Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS | + * |/ RTT / PCoE_PARAM | | | CNT | Type | Type | + * |/ Flow Dir Flt ID | | | | | | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + + for (n = 0; n < adapter->num_rx_queues; n++) { + rx_ring = adapter->rx_ring[n]; + pr_info("------------------------------------\n"); + pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("%s%s%s", + "R [desc] [ PktBuf A0] ", + "[ HeadBuf DD] [bi->dma ] [bi->skb ] ", + "<-- Adv Rx Read format\n"); + pr_info("%s%s%s", + "RWB[desc] [PcsmIpSHl PtRs] ", + "[vl er S cks ln] ---------------- [bi->skb ] ", + "<-- Adv Rx Write-Back format\n"); + + for (i = 0; i < rx_ring->count; i++) { + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + rx_desc = IXGBE_RX_DESC(rx_ring, i); + u0 = (struct my_u0 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + if (staterr & IXGBE_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("RWB[0x%03X] %016llX " + "%016llX ---------------- %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + rx_buffer_info->skb); + } else { + pr_info("R [0x%03X] %016llX " + "%016llX %016llX %p", i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)rx_buffer_info->dma, + rx_buffer_info->skb); + + if (netif_msg_pktdata(adapter) && + rx_buffer_info->dma) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + page_address(rx_buffer_info->page) + + rx_buffer_info->page_offset, + ixgbe_rx_bufsz(rx_ring), true); + } + } + + if (i == rx_ring->next_to_use) + pr_cont(" NTU\n"); + else if (i == rx_ring->next_to_clean) + pr_cont(" NTC\n"); + else + pr_cont("\n"); + + } + } +} + +static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) +{ + u32 ctrl_ext; + + /* Let firmware take over control of h/w */ + ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, + ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); +} + +static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) +{ + u32 ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, + ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); +} + +/** + * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors + * @adapter: pointer to adapter struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * + */ +static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, + u8 queue, u8 msix_vector) +{ + u32 ivar, index; + struct ixgbe_hw *hw = &adapter->hw; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + if (direction == -1) + direction = 0; + index = (((direction * 64) + queue) >> 2) & 0x1F; + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); + ivar &= ~(0xFF << (8 * (queue & 0x3))); + ivar |= (msix_vector << (8 * (queue & 0x3))); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + if (direction == -1) { + /* other causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + index = ((queue & 1) * 8); + ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); + break; + } else { + /* tx or rx causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + index = ((16 * (queue & 1)) + (8 * direction)); + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar); + break; + } + default: + break; + } +} + +static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, + u64 qmask) +{ + u32 mask; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + mask = (IXGBE_EIMS_RTX_QUEUE & qmask); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + mask = (qmask & 0xFFFFFFFF); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); + mask = (qmask >> 32); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); + break; + default: + break; + } +} + +void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring, + struct ixgbe_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ +} + +static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_hw_stats *hwstats = &adapter->stats; + int i; + u32 data; + + if ((hw->fc.current_mode != ixgbe_fc_full) && + (hw->fc.current_mode != ixgbe_fc_rx_pause)) + return; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + break; + default: + data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); + } + hwstats->lxoffrxc += data; + + /* refill credits (no tx hang) if we received xoff */ + if (!data) + return; + + for (i = 0; i < adapter->num_tx_queues; i++) + clear_bit(__IXGBE_HANG_CHECK_ARMED, + &adapter->tx_ring[i]->state); +} + +static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_hw_stats *hwstats = &adapter->stats; + u32 xoff[8] = {0}; + u8 tc; + int i; + bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; + + if (adapter->ixgbe_ieee_pfc) + pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); + + if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) { + ixgbe_update_xoff_rx_lfc(adapter); + return; + } + + /* update stats for each tc, only valid with PFC enabled */ + for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { + u32 pxoffrxc; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + break; + default: + pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); + } + hwstats->pxoffrxc[i] += pxoffrxc; + /* Get the TC for given UP */ + tc = netdev_get_prio_tc_map(adapter->netdev, i); + xoff[tc] += pxoffrxc; + } + + /* disarm tx queues that have received xoff frames */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + + tc = tx_ring->dcb_tc; + if (xoff[tc]) + clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); + } +} + +static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) +{ + return ring->stats.packets; +} + +static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) +{ + struct ixgbe_adapter *adapter; + struct ixgbe_hw *hw; + u32 head, tail; + + if (ring->l2_accel_priv) + adapter = ring->l2_accel_priv->real_adapter; + else + adapter = netdev_priv(ring->netdev); + + hw = &adapter->hw; + head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); + tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; +} + +static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) +{ + u32 tx_done = ixgbe_get_tx_completed(tx_ring); + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; + u32 tx_pending = ixgbe_get_tx_pending(tx_ring); + + clear_check_for_tx_hang(tx_ring); + + /* + * Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. The + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with + * pfc clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if (tx_done_old == tx_done && tx_pending) + /* make sure it is true for two checks in a row */ + return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED, + &tx_ring->state); + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + /* reset the countdown */ + clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); + + return false; +} + +/** + * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout + * @adapter: driver private struct + **/ +static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) +{ + + /* Do the reset outside of interrupt context */ + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; + e_warn(drv, "initiating reset due to tx timeout\n"); + ixgbe_service_event_schedule(adapter); + } +} + +/** + * ixgbe_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean + **/ +static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *tx_ring) +{ + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_tx_buffer *tx_buffer; + union ixgbe_adv_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = q_vector->tx.work_limit; + unsigned int i = tx_ring->next_to_clean; + + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IXGBE_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + dev_consume_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IXGBE_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IXGBE_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { + /* schedule immediate reset if we believe we hung */ + struct ixgbe_hw *hw = &adapter->hw; + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH, TDT <%x>, <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "tx_buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", + tx_ring->queue_index, + IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), + IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), + tx_ring->next_to_use, i, + tx_ring->tx_buffer_info[i].time_stamp, jiffies); + + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + e_info(probe, + "tx hang %d detected on queue %d, resetting adapter\n", + adapter->tx_timeout_count + 1, tx_ring->queue_index); + + /* schedule immediate reset if we believe we hung */ + ixgbe_tx_timeout_reset(adapter); + + /* the adapter is about to reset, no point in enabling stuff */ + return true; + } + + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) + && !test_bit(__IXGBE_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + + return !!budget; +} + +#ifdef CONFIG_IXGBE_DCA +static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring, + int cpu) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); + u16 reg_offset; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx); + txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599; + break; + default: + /* for unknown hardware do not write register */ + return; + } + + /* + * We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN | + IXGBE_DCA_TXCTRL_DATA_RRO_EN | + IXGBE_DCA_TXCTRL_DESC_DCA_EN; + + IXGBE_WRITE_REG(hw, reg_offset, txctrl); +} + +static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring, + int cpu) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu); + u8 reg_idx = rx_ring->reg_idx; + + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599; + break; + default: + break; + } + + /* + * We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN | + IXGBE_DCA_RXCTRL_DESC_DCA_EN; + + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); +} + +static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) +{ + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *ring; + int cpu = get_cpu(); + + if (q_vector->cpu == cpu) + goto out_no_update; + + ixgbe_for_each_ring(ring, q_vector->tx) + ixgbe_update_tx_dca(adapter, ring, cpu); + + ixgbe_for_each_ring(ring, q_vector->rx) + ixgbe_update_rx_dca(adapter, ring, cpu); + + q_vector->cpu = cpu; +out_no_update: + put_cpu(); +} + +static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) +{ + int i; + + if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) + return; + + /* always use CB2 mode, difference is masked in the CB driver */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); + + for (i = 0; i < adapter->num_q_vectors; i++) { + adapter->q_vector[i]->cpu = -1; + ixgbe_update_dca(adapter->q_vector[i]); + } +} + +static int __ixgbe_notify_dca(struct device *dev, void *data) +{ + struct ixgbe_adapter *adapter = dev_get_drvdata(dev); + unsigned long event = *(unsigned long *)data; + + if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) + return 0; + + switch (event) { + case DCA_PROVIDER_ADD: + /* if we're already enabled, don't do it again */ + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + break; + if (dca_add_requester(dev) == 0) { + adapter->flags |= IXGBE_FLAG_DCA_ENABLED; + ixgbe_setup_dca(adapter); + break; + } + /* Fall Through since DCA is disabled. */ + case DCA_PROVIDER_REMOVE: + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { + dca_remove_requester(dev); + adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); + } + break; + } + + return 0; +} + +#endif /* CONFIG_IXGBE_DCA */ +static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (ring->netdev->features & NETIF_F_RXHASH) + skb_set_hash(skb, + le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + PKT_HASH_TYPE_L3); +} + +#ifdef IXGBE_FCOE +/** + * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type + * @ring: structure containing ring specific data + * @rx_desc: advanced rx descriptor + * + * Returns : true if it is FCoE pkt + */ +static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring, + union ixgbe_adv_rx_desc *rx_desc) +{ + __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + + return test_bit(__IXGBE_RX_FCOE, &ring->state) && + ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) == + (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE << + IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); +} + +#endif /* IXGBE_FCOE */ +/** + * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containing ring specific data + * @rx_desc: current Rx descriptor being processed + * @skb: skb currently being received and modified + **/ +static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + __le16 hdr_info = rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; + bool encap_pkt = false; + + skb_checksum_none_assert(skb); + + /* Rx csum disabled */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) && + (hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) { + encap_pkt = true; + skb->encapsulation = 1; + skb->ip_summed = CHECKSUM_NONE; + } + + /* if IP and error */ + if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && + ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { + ring->rx_stats.csum_err++; + return; + } + + if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS)) + return; + + if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { + /* + * 82599 errata, UDP frames with a 0 checksum can be marked as + * checksum errors. + */ + if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) && + test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state)) + return; + + ring->rx_stats.csum_err++; + return; + } + + /* It must be a TCP or UDP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + if (encap_pkt) { + if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS)) + return; + + if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) { + ring->rx_stats.csum_err++; + return; + } + /* If we checked the outer header let the stack know */ + skb->csum_level = 1; + } +} + +static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, + ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, ixgbe_rx_pg_order(rx_ring)); + + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; + + return true; +} + +/** + * ixgbe_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) +{ + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = IXGBE_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { + if (!ixgbe_alloc_mapped_page(rx_ring, bi)) + break; + + /* + * Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IXGBE_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.upper.status_error = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } +} + +static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, + struct sk_buff *skb) +{ + u16 hdr_len = skb_headlen(skb); + + /* set gso_size to avoid messing up TCP MSS */ + skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), + IXGBE_CB(skb)->append_cnt); + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; +} + +static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, + struct sk_buff *skb) +{ + /* if append_cnt is 0 then frame is not RSC */ + if (!IXGBE_CB(skb)->append_cnt) + return; + + rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; + rx_ring->rx_stats.rsc_flush++; + + ixgbe_set_rsc_gso_size(rx_ring, skb); + + /* gso_size is computed using append_cnt so always clear it last */ + IXGBE_CB(skb)->append_cnt = 0; +} + +/** + * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + + ixgbe_update_rsc_stats(rx_ring, skb); + + ixgbe_rx_hash(rx_ring, rx_desc, skb); + + ixgbe_rx_checksum(rx_ring, rx_desc, skb); + + if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) + ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb); + + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { + u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, dev); +} + +static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb) +{ + if (ixgbe_qv_busy_polling(q_vector)) + netif_receive_skb(skb); + else + napi_gro_receive(&q_vector->napi, skb); +} + +/** + * ixgbe_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IXGBE_RX_DESC(rx_ring, ntc)); + + /* update RSC append count if present */ + if (ring_is_rsc_enabled(rx_ring)) { + __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data & + cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK); + + if (unlikely(rsc_enabled)) { + u32 rsc_cnt = le32_to_cpu(rsc_enabled); + + rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT; + IXGBE_CB(skb)->append_cnt += rsc_cnt - 1; + + /* update ntc based on RSC value */ + ntc = le32_to_cpu(rx_desc->wb.upper.status_error); + ntc &= IXGBE_RXDADV_NEXTP_MASK; + ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; + } + } + + /* if we are the last buffer then there is nothing else to do */ + if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) + return false; + + /* place skb in next buffer to be received */ + rx_ring->rx_buffer_info[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; + + return true; +} + +/** + * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being adjusted + * + * This function is an ixgbe specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, + struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* + * it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being updated + * + * This function provides a basic DMA sync up for the first fragment of an + * skb. The reason for doing this is that the first fragment cannot be + * unmapped until we have reached the end of packet descriptor for a buffer + * chain. + */ +static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, + struct sk_buff *skb) +{ + /* if the page was released unmap it, else just sync our portion */ + if (unlikely(IXGBE_CB(skb)->page_released)) { + dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, + ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + IXGBE_CB(skb)->page_released = false; + } else { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rx_ring->dev, + IXGBE_CB(skb)->dma, + frag->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + } + IXGBE_CB(skb)->dma = 0; +} + +/** + * ixgbe_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *netdev = rx_ring->netdev; + + /* verify that the packet does not have any known errors */ + if (unlikely(ixgbe_test_staterr(rx_desc, + IXGBE_RXDADV_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL))) { + dev_kfree_skb_any(skb); + return true; + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + ixgbe_pull_tail(rx_ring, skb); + +#ifdef IXGBE_FCOE + /* do not attempt to pad FCoE Frames as this will disrupt DDP */ + if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) + return false; + +#endif + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *old_buff) +{ + struct ixgbe_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + *new_buff = *old_buff; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, + new_buff->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); +} + +static inline bool ixgbe_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; +} + +/** + * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *rx_buffer, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = ixgbe_rx_bufsz(rx_ring); +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) - + ixgbe_rx_bufsz(rx_ring); +#endif + + if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* page is not reserved, we can reuse buffer as-is */ + if (likely(!ixgbe_page_is_reserved(page))) + return true; + + /* this page cannot be reused so discard it */ + __free_pages(page, ixgbe_rx_pg_order(rx_ring)); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(ixgbe_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > last_offset) + return false; +#endif + + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + atomic_inc(&page->_count); + + return true; +} + +static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc) +{ + struct ixgbe_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, + IXGBE_RX_HDR_SIZE); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return NULL; + } + + /* + * we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + + /* + * Delay unmapping of the first packet. It carries the + * header information, HW may still access the header + * after the writeback. Only unmap it when EOP is + * reached + */ + if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) + goto dma_sync; + + IXGBE_CB(skb)->dma = rx_buffer->dma; + } else { + if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) + ixgbe_dma_sync_frag(rx_ring, skb); + +dma_sync: + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + rx_buffer->skb = NULL; + } + + /* pull page into skb */ + if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + ixgbe_reuse_rx_page(rx_ring, rx_buffer); + } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { + /* the page has been released from the ring */ + IXGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; + + return skb; +} + +/** + * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the syste. + * + * Returns amount of work completed + **/ +static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *rx_ring, + const int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; +#ifdef IXGBE_FCOE + struct ixgbe_adapter *adapter = q_vector->adapter; + int ddp_bytes; + unsigned int mss = 0; +#endif /* IXGBE_FCOE */ + u16 cleaned_count = ixgbe_desc_unused(rx_ring); + + while (likely(total_rx_packets < budget)) { + union ixgbe_adv_rx_desc *rx_desc; + struct sk_buff *skb; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { + ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); + + if (!rx_desc->wb.upper.status_error) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + /* retrieve a buffer from the ring */ + skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc); + + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; + + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* verify the packet layout is correct */ + if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) + continue; + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + ixgbe_process_skb_fields(rx_ring, rx_desc, skb); + +#ifdef IXGBE_FCOE + /* if ddp, not passing to ULD unless for FCP_RSP or error */ + if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { + ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); + /* include DDPed FCoE data */ + if (ddp_bytes > 0) { + if (!mss) { + mss = rx_ring->netdev->mtu - + sizeof(struct fcoe_hdr) - + sizeof(struct fc_frame_header) - + sizeof(struct fcoe_crc_eof); + if (mss > 512) + mss &= ~511; + } + total_rx_bytes += ddp_bytes; + total_rx_packets += DIV_ROUND_UP(ddp_bytes, + mss); + } + if (!ddp_bytes) { + dev_kfree_skb_any(skb); + continue; + } + } + +#endif /* IXGBE_FCOE */ + skb_mark_napi_id(skb, &q_vector->napi); + ixgbe_rx_skb(q_vector, skb); + + /* update budget accounting */ + total_rx_packets++; + } + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +#ifdef CONFIG_NET_RX_BUSY_POLL +/* must be called with local_bh_disable()d */ +static int ixgbe_low_latency_recv(struct napi_struct *napi) +{ + struct ixgbe_q_vector *q_vector = + container_of(napi, struct ixgbe_q_vector, napi); + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *ring; + int found = 0; + + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return LL_FLUSH_FAILED; + + if (!ixgbe_qv_lock_poll(q_vector)) + return LL_FLUSH_BUSY; + + ixgbe_for_each_ring(ring, q_vector->rx) { + found = ixgbe_clean_rx_irq(q_vector, ring, 4); +#ifdef BP_EXTENDED_STATS + if (found) + ring->stats.cleaned += found; + else + ring->stats.misses++; +#endif + if (found) + break; + } + + ixgbe_qv_unlock_poll(q_vector); + + return found; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +/** + * ixgbe_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * ixgbe_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) +{ + struct ixgbe_q_vector *q_vector; + int v_idx; + u32 mask; + + /* Populate MSIX to EITR Select */ + if (adapter->num_vfs > 32) { + u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); + } + + /* + * Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { + struct ixgbe_ring *ring; + q_vector = adapter->q_vector[v_idx]; + + ixgbe_for_each_ring(ring, q_vector->rx) + ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); + + ixgbe_for_each_ring(ring, q_vector->tx) + ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); + + ixgbe_write_eitr(q_vector); + } + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, + v_idx); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + ixgbe_set_ivar(adapter, -1, 1, v_idx); + break; + default: + break; + } + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); + + /* set up to autoclear timer, and the vectors */ + mask = IXGBE_EIMS_ENABLE_MASK; + mask &= ~(IXGBE_EIMS_OTHER | + IXGBE_EIMS_MAILBOX | + IXGBE_EIMS_LSC); + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * ixgbe_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @ring_container: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see ixgbe_param.c) + **/ +static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring_container *ring_container) +{ + int bytes = ring_container->total_bytes; + int packets = ring_container->total_packets; + u32 timepassed_us; + u64 bytes_perint; + u8 itr_setting = ring_container->itr; + + if (packets == 0) + return; + + /* simple throttlerate management + * 0-10MB/s lowest (100000 ints/s) + * 10-20MB/s low (20000 ints/s) + * 20-1249MB/s bulk (8000 ints/s) + */ + /* what was last interrupt timeslice? */ + timepassed_us = q_vector->itr >> 2; + if (timepassed_us == 0) + return; + + bytes_perint = bytes / timepassed_us; /* bytes/usec */ + + switch (itr_setting) { + case lowest_latency: + if (bytes_perint > 10) + itr_setting = low_latency; + break; + case low_latency: + if (bytes_perint > 20) + itr_setting = bulk_latency; + else if (bytes_perint <= 10) + itr_setting = lowest_latency; + break; + case bulk_latency: + if (bytes_perint <= 20) + itr_setting = low_latency; + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itr_setting; +} + +/** + * ixgbe_write_eitr - write EITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + * + * This function is made to be called by ethtool and by the driver + * when it needs to update EITR registers at runtime. Hardware + * specific quirks/differences are taken care of here. + */ +void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) +{ + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_hw *hw = &adapter->hw; + int v_idx = q_vector->v_idx; + u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + /* must write high and low 16 bits to reset counter */ + itr_reg |= (itr_reg << 16); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + /* + * set the WDIS bit to not clear the timer bits and cause an + * immediate assertion of the interrupt + */ + itr_reg |= IXGBE_EITR_CNT_WDIS; + break; + default: + break; + } + IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); +} + +static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) +{ + u32 new_itr = q_vector->itr; + u8 current_itr; + + ixgbe_update_itr(q_vector, &q_vector->tx); + ixgbe_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = IXGBE_100K_ITR; + break; + case low_latency: + new_itr = IXGBE_20K_ITR; + break; + case bulk_latency: + new_itr = IXGBE_8K_ITR; + break; + default: + break; + } + + if (new_itr != q_vector->itr) { + /* do an exponential smoothing */ + new_itr = (10 * new_itr * q_vector->itr) / + ((9 * new_itr) + q_vector->itr); + + /* save the algorithm value here */ + q_vector->itr = new_itr; + + ixgbe_write_eitr(q_vector); + } +} + +/** + * ixgbe_check_overtemp_subtask - check for over temperature + * @adapter: pointer to adapter + **/ +static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 eicr = adapter->interrupt_event; + + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return; + + if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && + !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; + + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_T3_LOM: + /* + * Since the warning interrupt is for both ports + * we don't have to check if: + * - This interrupt wasn't for our port. + * - We may have missed the interrupt so always have to + * check if we got a LSC + */ + if (!(eicr & IXGBE_EICR_GPI_SDP0) && + !(eicr & IXGBE_EICR_LSC)) + return; + + if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) { + u32 speed; + bool link_up = false; + + hw->mac.ops.check_link(hw, &speed, &link_up, false); + + if (link_up) + return; + } + + /* Check if this is not due to overtemp */ + if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP) + return; + + break; + default: + if (!(eicr & IXGBE_EICR_GPI_SDP0)) + return; + break; + } + e_crit(drv, + "Network adapter has been stopped because it has over heated. " + "Restart the computer. If the problem persists, " + "power off the system and replace the adapter\n"); + + adapter->interrupt_event = 0; +} + +static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) +{ + struct ixgbe_hw *hw = &adapter->hw; + + if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && + (eicr & IXGBE_EICR_GPI_SDP1)) { + e_crit(probe, "Fan has stopped, replace the adapter\n"); + /* write to clear the interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); + } +} + +static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) +{ + if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) + return; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + /* + * Need to check link state so complete overtemp check + * on service task + */ + if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) && + (!test_bit(__IXGBE_DOWN, &adapter->state))) { + adapter->interrupt_event = eicr; + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; + ixgbe_service_event_schedule(adapter); + return; + } + return; + case ixgbe_mac_X540: + if (!(eicr & IXGBE_EICR_TS)) + return; + break; + default: + return; + } + + e_crit(drv, + "Network adapter has been stopped because it has over heated. " + "Restart the computer. If the problem persists, " + "power off the system and replace the adapter\n"); +} + +static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) +{ + struct ixgbe_hw *hw = &adapter->hw; + + if (eicr & IXGBE_EICR_GPI_SDP2) { + /* Clear the interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; + ixgbe_service_event_schedule(adapter); + } + } + + if (eicr & IXGBE_EICR_GPI_SDP1) { + /* Clear the interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; + ixgbe_service_event_schedule(adapter); + } + } +} + +static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + adapter->lsc_int++; + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); + IXGBE_WRITE_FLUSH(hw); + ixgbe_service_event_schedule(adapter); + } +} + +static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, + u64 qmask) +{ + u32 mask; + struct ixgbe_hw *hw = &adapter->hw; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + mask = (IXGBE_EIMS_RTX_QUEUE & qmask); + IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + mask = (qmask & 0xFFFFFFFF); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); + mask = (qmask >> 32); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); + break; + default: + break; + } + /* skip the flush */ +} + +static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, + u64 qmask) +{ + u32 mask; + struct ixgbe_hw *hw = &adapter->hw; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + mask = (IXGBE_EIMS_RTX_QUEUE & qmask); + IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + mask = (qmask & 0xFFFFFFFF); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); + mask = (qmask >> 32); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); + break; + default: + break; + } + /* skip the flush */ +} + +/** + * ixgbe_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, + bool flush) +{ + u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); + + /* don't reenable LSC while waiting for link */ + if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) + mask &= ~IXGBE_EIMS_LSC; + + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + mask |= IXGBE_EIMS_GPI_SDP0; + break; + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + mask |= IXGBE_EIMS_TS; + break; + default: + break; + } + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) + mask |= IXGBE_EIMS_GPI_SDP1; + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + mask |= IXGBE_EIMS_GPI_SDP1; + mask |= IXGBE_EIMS_GPI_SDP2; + /* fall through */ + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + mask |= IXGBE_EIMS_ECC; + mask |= IXGBE_EIMS_MAILBOX; + break; + default: + break; + } + + if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && + !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) + mask |= IXGBE_EIMS_FLOW_DIR; + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); + if (queues) + ixgbe_irq_enable_queues(adapter, ~0); + if (flush) + IXGBE_WRITE_FLUSH(&adapter->hw); +} + +static irqreturn_t ixgbe_msix_other(int irq, void *data) +{ + struct ixgbe_adapter *adapter = data; + struct ixgbe_hw *hw = &adapter->hw; + u32 eicr; + + /* + * Workaround for Silicon errata. Use clear-by-write instead + * of clear-by-read. Reading with EICS will return the + * interrupt causes without clearing, which later be done + * with the write to EICR. + */ + eicr = IXGBE_READ_REG(hw, IXGBE_EICS); + + /* The lower 16bits of the EICR register are for the queue interrupts + * which should be masked here in order to not accidentally clear them if + * the bits are high when ixgbe_msix_other is called. There is a race + * condition otherwise which results in possible performance loss + * especially if the ixgbe_msix_other interrupt is triggering + * consistently (as it would when PPS is turned on for the X540 device) + */ + eicr &= 0xFFFF0000; + + IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); + + if (eicr & IXGBE_EICR_LSC) + ixgbe_check_lsc(adapter); + + if (eicr & IXGBE_EICR_MAILBOX) + ixgbe_msg_task(adapter); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + if (eicr & IXGBE_EICR_ECC) { + e_info(link, "Received ECC Err, initiating reset\n"); + adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; + ixgbe_service_event_schedule(adapter); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); + } + /* Handle Flow Director Full threshold interrupt */ + if (eicr & IXGBE_EICR_FLOW_DIR) { + int reinit_count = 0; + int i; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *ring = adapter->tx_ring[i]; + if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, + &ring->state)) + reinit_count++; + } + if (reinit_count) { + /* no more flow director interrupts until after init */ + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); + adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; + ixgbe_service_event_schedule(adapter); + } + } + ixgbe_check_sfp_event(adapter, eicr); + ixgbe_check_overtemp_event(adapter, eicr); + break; + default: + break; + } + + ixgbe_check_fan_failure(adapter, eicr); + + if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) + ixgbe_ptp_check_pps_event(adapter, eicr); + + /* re-enable the original interrupt state, no lsc, no queues */ + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable(adapter, false, false); + + return IRQ_HANDLED; +} + +static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) +{ + struct ixgbe_q_vector *q_vector = data; + + /* EIAM disabled interrupts (on this vector) for us */ + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * ixgbe_poll - NAPI Rx polling callback + * @napi: structure for representing this polling device + * @budget: how many packets driver is allowed to clean + * + * This function is used for legacy and MSI, NAPI mode + **/ +int ixgbe_poll(struct napi_struct *napi, int budget) +{ + struct ixgbe_q_vector *q_vector = + container_of(napi, struct ixgbe_q_vector, napi); + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *ring; + int per_ring_budget; + bool clean_complete = true; + +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + ixgbe_update_dca(q_vector); +#endif + + ixgbe_for_each_ring(ring, q_vector->tx) + clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); + + if (!ixgbe_qv_lock_napi(q_vector)) + return budget; + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget/q_vector->rx.count, 1); + else + per_ring_budget = budget; + + ixgbe_for_each_ring(ring, q_vector->rx) + clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring, + per_ring_budget) < per_ring_budget); + + ixgbe_qv_unlock_napi(q_vector); + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* all work done, exit the polling mode */ + napi_complete(napi); + if (adapter->rx_itr_setting & 1) + ixgbe_set_itr(q_vector); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); + + return 0; +} + +/** + * ixgbe_request_msix_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * ixgbe_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int vector, err; + int ri = 0, ti = 0; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "TxRx", ri++); + ti++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "rx", ri++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "tx", ti++); + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { + e_err(probe, "request_irq failed for MSIX interrupt " + "Error: %d\n", err); + goto free_queue_irqs; + } + /* If Flow Director is enabled, set interrupt affinity */ + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + /* assign the mask for this irq */ + irq_set_affinity_hint(entry->vector, + &q_vector->affinity_mask); + } + } + + err = request_irq(adapter->msix_entries[vector].vector, + ixgbe_msix_other, 0, netdev->name, adapter); + if (err) { + e_err(probe, "request_irq for msix_other failed: %d\n", err); + goto free_queue_irqs; + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + irq_set_affinity_hint(adapter->msix_entries[vector].vector, + NULL); + free_irq(adapter->msix_entries[vector].vector, + adapter->q_vector[vector]); + } + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return err; +} + +/** + * ixgbe_intr - legacy mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t ixgbe_intr(int irq, void *data) +{ + struct ixgbe_adapter *adapter = data; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; + u32 eicr; + + /* + * Workaround for silicon errata #26 on 82598. Mask the interrupt + * before the read of EICR. + */ + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); + + /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read + * therefore no explicit interrupt disable is necessary */ + eicr = IXGBE_READ_REG(hw, IXGBE_EICR); + if (!eicr) { + /* + * shared interrupt alert! + * make sure interrupts are enabled because the read will + * have disabled interrupts due to EIAM + * finish the workaround of silicon errata on 82598. Unmask + * the interrupt that we masked before the EICR read. + */ + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable(adapter, true, true); + return IRQ_NONE; /* Not our interrupt */ + } + + if (eicr & IXGBE_EICR_LSC) + ixgbe_check_lsc(adapter); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + ixgbe_check_sfp_event(adapter, eicr); + /* Fall through */ + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + if (eicr & IXGBE_EICR_ECC) { + e_info(link, "Received ECC Err, initiating reset\n"); + adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; + ixgbe_service_event_schedule(adapter); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); + } + ixgbe_check_overtemp_event(adapter, eicr); + break; + default: + break; + } + + ixgbe_check_fan_failure(adapter, eicr); + if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) + ixgbe_ptp_check_pps_event(adapter, eicr); + + /* would disable interrupts here but EIAM disabled it */ + napi_schedule(&q_vector->napi); + + /* + * re-enable link(maybe) and non-queue interrupts, no flush. + * ixgbe_poll will re-enable the queue interrupts + */ + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable(adapter, false, false); + + return IRQ_HANDLED; +} + +/** + * ixgbe_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int ixgbe_request_irq(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + err = ixgbe_request_msix_irqs(adapter); + else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) + err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, + netdev->name, adapter); + else + err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) + e_err(probe, "request_irq failed, Error %d\n", err); + + return err; +} + +static void ixgbe_free_irq(struct ixgbe_adapter *adapter) +{ + int vector; + + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { + free_irq(adapter->pdev->irq, adapter); + return; + } + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + /* free only the irqs that were actually requested */ + if (!q_vector->rx.ring && !q_vector->tx.ring) + continue; + + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); + + free_irq(entry->vector, q_vector); + } + + free_irq(adapter->msix_entries[vector++].vector, adapter); +} + +/** + * ixgbe_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) +{ + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); + break; + default: + break; + } + IXGBE_WRITE_FLUSH(&adapter->hw); + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + int vector; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) + synchronize_irq(adapter->msix_entries[vector].vector); + + synchronize_irq(adapter->msix_entries[vector++].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts + * + **/ +static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) +{ + struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; + + ixgbe_write_eitr(q_vector); + + ixgbe_set_ivar(adapter, 0, 0, 0); + ixgbe_set_ivar(adapter, 1, 0, 0); + + e_info(hw, "Legacy interrupt IVAR setup done\n"); +} + +/** + * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset + * @adapter: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 tdba = ring->dma; + int wait_loop = 10; + u32 txdctl = IXGBE_TXDCTL_ENABLE; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0); + IXGBE_WRITE_FLUSH(hw); + + IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), + (tdba & DMA_BIT_MASK(32))); + IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), + ring->count * sizeof(union ixgbe_adv_tx_desc)); + IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); + ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx); + + /* + * set WTHRESH to encourage burst writeback, it should not be set + * higher than 1 when: + * - ITR is 0 as it could cause false TX hangs + * - ITR is set to > 100k int/sec and BQL is enabled + * + * In order to avoid issues WTHRESH + PTHRESH should always be equal + * to or less than the number of on chip descriptors, which is + * currently 40. + */ + if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) + txdctl |= (1 << 16); /* WTHRESH = 1 */ + else + txdctl |= (8 << 16); /* WTHRESH = 8 */ + + /* + * Setting PTHRESH to 32 both improves performance + * and avoids a TX hang with DFP enabled + */ + txdctl |= (1 << 8) | /* HTHRESH = 1 */ + 32; /* PTHRESH = 32 */ + + /* reinitialize flowdirector state */ + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + ring->atr_sample_rate = adapter->atr_sample_rate; + ring->atr_count = 0; + set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); + } else { + ring->atr_sample_rate = 0; + } + + /* initialize XPS */ + if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) { + struct ixgbe_q_vector *q_vector = ring->q_vector; + + if (q_vector) + netif_set_xps_queue(ring->netdev, + &q_vector->affinity_mask, + ring->queue_index); + } + + clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); + + /* enable queue */ + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); + + /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; + + /* poll to verify queue is enabled */ + do { + usleep_range(1000, 2000); + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); + } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); + if (!wait_loop) + e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +} + +static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rttdcs, mtqc; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + /* disable the arbiter while setting MTQC */ + rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + rttdcs |= IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); + + /* set transmit pool layout */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + mtqc = IXGBE_MTQC_VT_ENA; + if (tcs > 4) + mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; + else if (tcs > 1) + mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + else if (adapter->ring_feature[RING_F_RSS].indices == 4) + mtqc |= IXGBE_MTQC_32VF; + else + mtqc |= IXGBE_MTQC_64VF; + } else { + if (tcs > 4) + mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; + else if (tcs > 1) + mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + else + mtqc = IXGBE_MTQC_64Q_1PB; + } + + IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); + + /* Enable Security TX Buffer IFG for multiple pb */ + if (tcs) { + u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + sectx |= IXGBE_SECTX_DCB; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx); + } + + /* re-enable the arbiter */ + rttdcs &= ~IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); +} + +/** + * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 dmatxctl; + u32 i; + + ixgbe_setup_mtqc(adapter); + + if (hw->mac.type != ixgbe_mac_82598EB) { + /* DMATXCTL.EN must be before Tx queues are enabled */ + dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + dmatxctl |= IXGBE_DMATXCTL_TE; + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); + } + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u8 reg_idx = ring->reg_idx; + u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx)); + + srrctl |= IXGBE_SRRCTL_DROP_EN; + + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); +} + +static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u8 reg_idx = ring->reg_idx; + u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx)); + + srrctl &= ~IXGBE_SRRCTL_DROP_EN; + + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); +} + +#ifdef CONFIG_IXGBE_DCB +void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) +#else +static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) +#endif +{ + int i; + bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; + + if (adapter->ixgbe_ieee_pfc) + pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); + + /* + * We should set the drop enable bit if: + * SR-IOV is enabled + * or + * Number of Rx queues > 1 and flow control is disabled + * + * This allows us to avoid head of line blocking for security + * and performance reasons. + */ + if (adapter->num_vfs || (adapter->num_rx_queues > 1 && + !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) { + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); + } else { + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); + } +} + +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 + +static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 srrctl; + u8 reg_idx = rx_ring->reg_idx; + + if (hw->mac.type == ixgbe_mac_82598EB) { + u16 mask = adapter->ring_feature[RING_F_RSS].mask; + + /* + * if VMDq is not active we must program one srrctl register + * per RSS queue since we have enabled RDRXCTL.MVMEN + */ + reg_idx &= mask; + } + + /* configure header buffer length, needed for RSC */ + srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; + + /* configure the packet buffer length */ + srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + + /* configure descriptor type */ + srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); +} + +/** + * Return a number of entries in the RSS indirection table + * + * @adapter: device handle + * + * - 82598/82599/X540: 128 + * - X550(non-SRIOV mode): 512 + * - X550(SRIOV mode): 64 + */ +u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter) +{ + if (adapter->hw.mac.type < ixgbe_mac_X550) + return 128; + else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + return 64; + else + return 512; +} + +/** + * Write the RETA table to HW + * + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +static void ixgbe_store_reta(struct ixgbe_adapter *adapter) +{ + u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); + struct ixgbe_hw *hw = &adapter->hw; + u32 reta = 0; + u32 indices_multi; + u8 *indir_tbl = adapter->rss_indir_tbl; + + /* Fill out the redirection table as follows: + * - 82598: 8 bit wide entries containing pair of 4 bit RSS + * indices. + * - 82599/X540: 8 bit wide entries containing 4 bit RSS index + * - X550: 8 bit wide entries containing 6 bit RSS index + */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + indices_multi = 0x11; + else + indices_multi = 0x1; + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + if (i < 128) + IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); + else + IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), + reta); + reta = 0; + } + } +} + +/** + * Write the RETA table to HW (for x550 devices in SRIOV mode) + * + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter) +{ + u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); + struct ixgbe_hw *hw = &adapter->hw; + u32 vfreta = 0; + unsigned int pf_pool = adapter->num_vfs; + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool), + vfreta); + vfreta = 0; + } + } +} + +static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 i, j; + u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + + /* Program table for at least 2 queues w/ SR-IOV so that VFs can + * make full use of any rings they may have. We will use the + * PSRTYPE register to control how many rings we use within the PF. + */ + if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2)) + rss_i = 2; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); + + /* Fill out redirection table */ + memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); + + for (i = 0, j = 0; i < reta_entries; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + ixgbe_store_reta(adapter); +} + +static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + unsigned int pf_pool = adapter->num_vfs; + int i, j; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), + adapter->rss_key[i]); + + /* Fill out the redirection table */ + for (i = 0, j = 0; i < 64; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + ixgbe_store_vfreta(adapter); +} + +static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 mrqc = 0, rss_field = 0, vfmrqc = 0; + u32 rxcsum; + + /* Disable indicating checksum in descriptor, enables RSS hash */ + rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + rxcsum |= IXGBE_RXCSUM_PCSD; + IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + if (adapter->ring_feature[RING_F_RSS].mask) + mrqc = IXGBE_MRQC_RSSEN; + } else { + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + if (tcs > 4) + mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */ + else if (tcs > 1) + mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */ + else if (adapter->ring_feature[RING_F_RSS].indices == 4) + mrqc = IXGBE_MRQC_VMDQRSS32EN; + else + mrqc = IXGBE_MRQC_VMDQRSS64EN; + } else { + if (tcs > 4) + mrqc = IXGBE_MRQC_RTRSS8TCEN; + else if (tcs > 1) + mrqc = IXGBE_MRQC_RTRSS4TCEN; + else + mrqc = IXGBE_MRQC_RSSEN; + } + } + + /* Perform hash on these packet types */ + rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 | + IXGBE_MRQC_RSS_FIELD_IPV4_TCP | + IXGBE_MRQC_RSS_FIELD_IPV6 | + IXGBE_MRQC_RSS_FIELD_IPV6_TCP; + + if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) + rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; + + netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); + if ((hw->mac.type >= ixgbe_mac_X550) && + (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { + unsigned int pf_pool = adapter->num_vfs; + + /* Enable VF RSS mode */ + mrqc |= IXGBE_MRQC_MULTIPLE_RSS; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + + /* Setup RSS through the VF registers */ + ixgbe_setup_vfreta(adapter); + vfmrqc = IXGBE_MRQC_RSSEN; + vfmrqc |= rss_field; + IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc); + } else { + ixgbe_setup_reta(adapter); + mrqc |= rss_field; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + } +} + +/** + * ixgbe_configure_rscctl - enable RSC for the indicated ring + * @adapter: address of board private structure + * @index: index of ring to set + **/ +static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rscctrl; + u8 reg_idx = ring->reg_idx; + + if (!ring_is_rsc_enabled(ring)) + return; + + rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); + rscctrl |= IXGBE_RSCCTL_RSCEN; + /* + * we must limit the number of descriptors so that the + * total size of max desc * buf_len is not greater + * than 65536 + */ + rscctrl |= IXGBE_RSCCTL_MAXDESC_16; + IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); +} + +#define IXGBE_MAX_RX_DESC_POLL 10 +static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + int wait_loop = IXGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (ixgbe_removed(hw->hw_addr)) + return; + /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; + + do { + usleep_range(1000, 2000); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " + "the polling period\n", reg_idx); + } +} + +void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + int wait_loop = IXGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (ixgbe_removed(hw->hw_addr)) + return; + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + + /* write value back with RXDCTL.ENABLE bit cleared */ + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; + + /* the hardware may take up to 100us to really disable the rx queue */ + do { + udelay(10); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " + "the polling period\n", reg_idx); + } +} + +void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 rdba = ring->dma; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + ixgbe_disable_rx_queue(adapter, ring); + + IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); + IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), + ring->count * sizeof(union ixgbe_adv_rx_desc)); + IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); + ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx); + + ixgbe_configure_srrctl(adapter, ring); + ixgbe_configure_rscctl(adapter, ring); + + if (hw->mac.type == ixgbe_mac_82598EB) { + /* + * enable cache line friendly hardware writes: + * PTHRESH=32 descriptors (half the internal cache), + * this also removes ugly rx_no_buffer_count increment + * HTHRESH=4 descriptors (to minimize latency on fetch) + * WTHRESH=8 burst writeback up to two cache lines + */ + rxdctl &= ~0x3FFFFF; + rxdctl |= 0x080420; + } + + /* enable receive descriptor ring */ + rxdctl |= IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + + ixgbe_rx_desc_queue_enable(adapter, ring); + ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring)); +} + +static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int rss_i = adapter->ring_feature[RING_F_RSS].indices; + u16 pool; + + /* PSRTYPE must be initialized in non 82598 adapters */ + u32 psrtype = IXGBE_PSRTYPE_TCPHDR | + IXGBE_PSRTYPE_UDPHDR | + IXGBE_PSRTYPE_IPV4HDR | + IXGBE_PSRTYPE_L2HDR | + IXGBE_PSRTYPE_IPV6HDR; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + if (rss_i > 3) + psrtype |= 2 << 29; + else if (rss_i > 1) + psrtype |= 1 << 29; + + for_each_set_bit(pool, &adapter->fwd_bitmask, 32) + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); +} + +static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 reg_offset, vf_shift; + u32 gcr_ext, vmdctl; + int i; + + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return; + + vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + vmdctl |= IXGBE_VMD_CTL_VMDQ_EN; + vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; + vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT; + vmdctl |= IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); + + vf_shift = VMDQ_P(0) % 32; + reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; + + /* Enable only the PF's pool for Tx/Rx */ + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); + if (adapter->bridge_mode == BRIDGE_MODE_VEB) + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + + /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ + hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); + + /* + * Set up VF register offsets for selected VT Mode, + * i.e. 32 or 64 VFs for SR-IOV + */ + switch (adapter->ring_feature[RING_F_VMDQ].mask) { + case IXGBE_82599_VMDQ_8Q_MASK: + gcr_ext = IXGBE_GCR_EXT_VT_MODE_16; + break; + case IXGBE_82599_VMDQ_4Q_MASK: + gcr_ext = IXGBE_GCR_EXT_VT_MODE_32; + break; + default: + gcr_ext = IXGBE_GCR_EXT_VT_MODE_64; + break; + } + + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); + + + /* Enable MAC Anti-Spoofing */ + hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), + adapter->num_vfs); + + /* Ensure LLDP is set for Ethertype Antispoofing if we will be + * calling set_ethertype_anti_spoofing for each VF in loop below + */ + if (hw->mac.ops.set_ethertype_anti_spoofing) + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), + (IXGBE_ETQF_FILTER_EN | /* enable filter */ + IXGBE_ETQF_TX_ANTISPOOF | /* tx antispoof */ + IXGBE_ETH_P_LLDP)); /* LLDP eth type */ + + /* For VFs that have spoof checking turned off */ + for (i = 0; i < adapter->num_vfs; i++) { + if (!adapter->vfinfo[i].spoofchk_enabled) + ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false); + + /* enable ethertype anti spoofing if hw supports it */ + if (hw->mac.ops.set_ethertype_anti_spoofing) + hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i); + + /* Enable/Disable RSS query feature */ + ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i, + adapter->vfinfo[i].rss_query_enabled); + } +} + +static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + struct ixgbe_ring *rx_ring; + int i; + u32 mhadd, hlreg0; + +#ifdef IXGBE_FCOE + /* adjust max frame to be able to do baby jumbo for FCoE */ + if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && + (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) + max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; + +#endif /* IXGBE_FCOE */ + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); + + mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); + if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { + mhadd &= ~IXGBE_MHADD_MFS_MASK; + mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); + } + + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ + hlreg0 |= IXGBE_HLREG0_JUMBOEN; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring = adapter->rx_ring[i]; + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + set_ring_rsc_enabled(rx_ring); + else + clear_ring_rsc_enabled(rx_ring); + } +} + +static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_82598EB: + /* + * For VMDq support of different descriptor types or + * buffer sizes through the use of multiple SRRCTL + * registers, RDRXCTL.MVMEN must be set to 1 + * + * also, the manual doesn't mention it clearly but DCA hints + * will only use queue 0's tags unless this bit is set. Side + * effects of setting this bit are only that SRRCTL must be + * fully programmed [0..15] + */ + rdrxctl |= IXGBE_RDRXCTL_MVMEN; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + /* Disable RSC for ACK packets */ + IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, + (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); + rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; + /* hardware requires some bits to be set by default */ + rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); + rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; + break; + default: + /* We should do nothing since we don't know this hardware */ + return; + } + + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); +} + +/** + * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + u32 rxctrl, rfctl; + + /* disable receives while setting up the descriptors */ + hw->mac.ops.disable_rx(hw); + + ixgbe_setup_psrtype(adapter); + ixgbe_setup_rdrxctl(adapter); + + /* RSC Setup */ + rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); + rfctl &= ~IXGBE_RFCTL_RSC_DIS; + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) + rfctl |= IXGBE_RFCTL_RSC_DIS; + IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); + + /* Program registers for the distribution of queues */ + ixgbe_setup_mrqc(adapter); + + /* set_rx_buffer_len must be called before ring initialization */ + ixgbe_set_rx_buffer_len(adapter); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + /* disable drop enable for 82598 parts */ + if (hw->mac.type == ixgbe_mac_82598EB) + rxctrl |= IXGBE_RXCTRL_DMBYPS; + + /* enable all receives */ + rxctrl |= IXGBE_RXCTRL_RXEN; + hw->mac.ops.enable_rx_dma(hw, rxctrl); +} + +static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + /* add VID to filter table */ + hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true); + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + /* remove VID from filter table */ + hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false); + clear_bit(vid, adapter->active_vlans); + + return 0; +} + +/** + * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping + * @adapter: driver data + */ +static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl; + int i, j; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl &= ~IXGBE_VLNCTRL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = adapter->rx_ring[i]; + + if (ring->l2_accel_priv) + continue; + j = ring->reg_idx; + vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); + vlnctrl &= ~IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); + } + break; + default: + break; + } +} + +/** + * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping + * @adapter: driver data + */ +static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl; + int i, j; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl |= IXGBE_VLNCTRL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = adapter->rx_ring[i]; + + if (ring->l2_accel_priv) + continue; + j = ring->reg_idx; + vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); + vlnctrl |= IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); + } + break; + default: + break; + } +} + +static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) +{ + u16 vid; + + ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +/** + * ixgbe_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +static int ixgbe_write_mc_addr_list(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (!netif_running(netdev)) + return 0; + + if (hw->mac.ops.update_mc_addr_list) + hw->mac.ops.update_mc_addr_list(hw, netdev); + else + return -ENOMEM; + +#ifdef CONFIG_PCI_IOV + ixgbe_restore_vf_multicasts(adapter); +#endif + + return netdev_mc_count(netdev); +} + +#ifdef CONFIG_PCI_IOV +void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE) + hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr, + adapter->mac_table[i].queue, + IXGBE_RAH_AV); + else + hw->mac.ops.clear_rar(hw, i); + + adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED); + } +} +#endif + +static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) { + if (adapter->mac_table[i].state & + IXGBE_MAC_STATE_IN_USE) + hw->mac.ops.set_rar(hw, i, + adapter->mac_table[i].addr, + adapter->mac_table[i].queue, + IXGBE_RAH_AV); + else + hw->mac.ops.clear_rar(hw, i); + + adapter->mac_table[i].state &= + ~(IXGBE_MAC_STATE_MODIFIED); + } + } +} + +static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) +{ + int i; + struct ixgbe_hw *hw = &adapter->hw; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; + adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; + eth_zero_addr(adapter->mac_table[i].addr); + adapter->mac_table[i].queue = 0; + } + ixgbe_sync_mac_table(adapter); +} + +static int ixgbe_available_rars(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i, count = 0; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state == 0) + count++; + } + return count; +} + +/* this function destroys the first RAR entry */ +static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter, + u8 *addr) +{ + struct ixgbe_hw *hw = &adapter->hw; + + memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN); + adapter->mac_table[0].queue = VMDQ_P(0); + adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT | + IXGBE_MAC_STATE_IN_USE); + hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr, + adapter->mac_table[0].queue, + IXGBE_RAH_AV); +} + +int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE) + continue; + adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED | + IXGBE_MAC_STATE_IN_USE); + ether_addr_copy(adapter->mac_table[i].addr, addr); + adapter->mac_table[i].queue = queue; + ixgbe_sync_mac_table(adapter); + return i; + } + return -ENOMEM; +} + +int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) +{ + /* search table for addr, if found, set to 0 and sync */ + int i; + struct ixgbe_hw *hw = &adapter->hw; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (ether_addr_equal(addr, adapter->mac_table[i].addr) && + adapter->mac_table[i].queue == queue) { + adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; + adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; + eth_zero_addr(adapter->mac_table[i].addr); + adapter->mac_table[i].queue = 0; + ixgbe_sync_mac_table(adapter); + return 0; + } + } + return -ENOMEM; +} +/** + * ixgbe_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter)) + return -ENOMEM; + + if (!netdev_uc_empty(netdev)) { + struct netdev_hw_addr *ha; + netdev_for_each_uc_addr(ha, netdev) { + ixgbe_del_mac_filter(adapter, ha->addr, vfn); + ixgbe_add_mac_filter(adapter, ha->addr, vfn); + count++; + } + } + return count; +} + +/** + * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_method entry point is called whenever the unicast/multicast + * address list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast and + * promiscuous mode. + **/ +void ixgbe_set_rx_mode(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; + u32 vlnctrl; + int count; + + /* Check for Promiscuous and All Multicast modes */ + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + + /* set all bits that we expect to always be set */ + fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ + fctrl |= IXGBE_FCTRL_BAM; + fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ + fctrl |= IXGBE_FCTRL_PMCF; + + /* clear the bits we are changing the status of */ + fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); + if (netdev->flags & IFF_PROMISC) { + hw->addr_ctrl.user_set_promisc = true; + fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + vmolr |= IXGBE_VMOLR_MPE; + /* Only disable hardware filter vlans in promiscuous mode + * if SR-IOV and VMDQ are disabled - otherwise ensure + * that hardware VLAN filters remain enabled. + */ + if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | + IXGBE_FLAG_SRIOV_ENABLED)) + vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); + } else { + if (netdev->flags & IFF_ALLMULTI) { + fctrl |= IXGBE_FCTRL_MPE; + vmolr |= IXGBE_VMOLR_MPE; + } + vlnctrl |= IXGBE_VLNCTRL_VFE; + hw->addr_ctrl.user_set_promisc = false; + } + + /* + * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0)); + if (count < 0) { + fctrl |= IXGBE_FCTRL_UPE; + vmolr |= IXGBE_VMOLR_ROPE; + } + + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = ixgbe_write_mc_addr_list(netdev); + if (count < 0) { + fctrl |= IXGBE_FCTRL_MPE; + vmolr |= IXGBE_VMOLR_MPE; + } else if (count) { + vmolr |= IXGBE_VMOLR_ROMPE; + } + + if (hw->mac.type != ixgbe_mac_82598EB) { + vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) & + ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_ROPE); + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr); + } + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode */ + fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */ + IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */ + IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */ + + fctrl &= ~(IXGBE_FCTRL_DPF); + /* NOTE: VLAN filtering is disabled by setting PROMISC */ + } + + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + ixgbe_vlan_strip_enable(adapter); + else + ixgbe_vlan_strip_disable(adapter); +} + +static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) +{ + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + ixgbe_qv_init_lock(adapter->q_vector[q_idx]); + napi_enable(&adapter->q_vector[q_idx]->napi); + } +} + +static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) +{ + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { + napi_disable(&adapter->q_vector[q_idx]->napi); + while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) { + pr_info("QV %d locked\n", q_idx); + usleep_range(1000, 20000); + } + } +} + +#ifdef CONFIG_IXGBE_DCB +/** + * ixgbe_configure_dcb - Configure DCB hardware + * @adapter: ixgbe adapter struct + * + * This is called by the driver on open to configure the DCB hardware. + * This is also called by the gennetlink interface when reconfiguring + * the DCB state. + */ +static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + + if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { + if (hw->mac.type == ixgbe_mac_82598EB) + netif_set_gso_max_size(adapter->netdev, 65536); + return; + } + + if (hw->mac.type == ixgbe_mac_82598EB) + netif_set_gso_max_size(adapter->netdev, 32768); + +#ifdef IXGBE_FCOE + if (adapter->netdev->features & NETIF_F_FCOE_MTU) + max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif + + /* reconfigure the hardware */ + if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { + ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, + DCB_TX_CONFIG); + ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, + DCB_RX_CONFIG); + ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); + } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) { + ixgbe_dcb_hw_ets(&adapter->hw, + adapter->ixgbe_ieee_ets, + max_frame); + ixgbe_dcb_hw_pfc_config(&adapter->hw, + adapter->ixgbe_ieee_pfc->pfc_en, + adapter->ixgbe_ieee_ets->prio_tc); + } + + /* Enable RSS Hash per TC */ + if (hw->mac.type != ixgbe_mac_82598EB) { + u32 msb = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1; + + while (rss_i) { + msb++; + rss_i >>= 1; + } + + /* write msb to all 8 TCs in one write */ + IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111); + } +} +#endif + +/* Additional bittime to account for IXGBE framing */ +#define IXGBE_ETH_FRAMING 20 + +/** + * ixgbe_hpbthresh - calculate high water mark for flow control + * + * @adapter: board private structure to calculate for + * @pb: packet buffer to calculate + */ +static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *dev = adapter->netdev; + int link, tc, kb, marker; + u32 dv_id, rx_pba; + + /* Calculate max LAN frame size */ + tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING; + +#ifdef IXGBE_FCOE + /* FCoE traffic class uses FCOE jumbo frames */ + if ((dev->features & NETIF_F_FCOE_MTU) && + (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && + (pb == ixgbe_fcoe_get_tc(adapter))) + tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; +#endif + + /* Calculate delay value for device */ + switch (hw->mac.type) { + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + dv_id = IXGBE_DV_X540(link, tc); + break; + default: + dv_id = IXGBE_DV(link, tc); + break; + } + + /* Loopback switch introduces additional latency */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + dv_id += IXGBE_B2BT(tc); + + /* Delay value is calculated in bit times convert to KB */ + kb = IXGBE_BT2KB(dv_id); + rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10; + + marker = rx_pba - kb; + + /* It is possible that the packet buffer is not large enough + * to provide required headroom. In this case throw an error + * to user and a do the best we can. + */ + if (marker < 0) { + e_warn(drv, "Packet Buffer(%i) can not provide enough" + "headroom to support flow control." + "Decrease MTU or number of traffic classes\n", pb); + marker = tc + 1; + } + + return marker; +} + +/** + * ixgbe_lpbthresh - calculate low water mark for for flow control + * + * @adapter: board private structure to calculate for + * @pb: packet buffer to calculate + */ +static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *dev = adapter->netdev; + int tc; + u32 dv_id; + + /* Calculate max LAN frame size */ + tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + +#ifdef IXGBE_FCOE + /* FCoE traffic class uses FCOE jumbo frames */ + if ((dev->features & NETIF_F_FCOE_MTU) && + (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && + (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) + tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; +#endif + + /* Calculate delay value for device */ + switch (hw->mac.type) { + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + dv_id = IXGBE_LOW_DV_X540(tc); + break; + default: + dv_id = IXGBE_LOW_DV(tc); + break; + } + + /* Delay value is calculated in bit times convert to KB */ + return IXGBE_BT2KB(dv_id); +} + +/* + * ixgbe_pbthresh_setup - calculate and setup high low water marks + */ +static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int num_tc = netdev_get_num_tc(adapter->netdev); + int i; + + if (!num_tc) + num_tc = 1; + + for (i = 0; i < num_tc; i++) { + hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i); + hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i); + + /* Low water marks must not be larger than high water marks */ + if (hw->fc.low_water[i] > hw->fc.high_water[i]) + hw->fc.low_water[i] = 0; + } + + for (; i < MAX_TRAFFIC_CLASS; i++) + hw->fc.high_water[i] = 0; +} + +static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int hdrm; + u8 tc = netdev_get_num_tc(adapter->netdev); + + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || + adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) + hdrm = 32 << adapter->fdir_pballoc; + else + hdrm = 0; + + hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); + ixgbe_pbthresh_setup(adapter); +} + +static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct hlist_node *node2; + struct ixgbe_fdir_filter *filter; + + spin_lock(&adapter->fdir_perfect_lock); + + if (!hlist_empty(&adapter->fdir_filter_list)) + ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); + + hlist_for_each_entry_safe(filter, node2, + &adapter->fdir_filter_list, fdir_node) { + ixgbe_fdir_write_perfect_filter_82599(hw, + &filter->filter, + filter->sw_idx, + (filter->action == IXGBE_FDIR_DROP_QUEUE) ? + IXGBE_FDIR_DROP_QUEUE : + adapter->rx_ring[filter->action]->reg_idx); + } + + spin_unlock(&adapter->fdir_perfect_lock); +} + +static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool, + struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vmolr; + + /* No unicast promiscuous support for VMDQ devices. */ + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); + vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE); + + /* clear the affected bit */ + vmolr &= ~IXGBE_VMOLR_MPE; + + if (dev->flags & IFF_ALLMULTI) { + vmolr |= IXGBE_VMOLR_MPE; + } else { + vmolr |= IXGBE_VMOLR_ROMPE; + hw->mac.ops.update_mc_addr_list(hw, dev); + } + ixgbe_write_uc_addr_list(adapter->netdev, pool); + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); +} + +static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) +{ + struct ixgbe_adapter *adapter = vadapter->real_adapter; + int rss_i = adapter->num_rx_queues_per_pool; + struct ixgbe_hw *hw = &adapter->hw; + u16 pool = vadapter->pool; + u32 psrtype = IXGBE_PSRTYPE_TCPHDR | + IXGBE_PSRTYPE_UDPHDR | + IXGBE_PSRTYPE_IPV4HDR | + IXGBE_PSRTYPE_L2HDR | + IXGBE_PSRTYPE_IPV6HDR; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + if (rss_i > 3) + psrtype |= 2 << 29; + else if (rss_i > 1) + psrtype |= 1 << 29; + + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); +} + +/** + * ixgbe_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; + + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; + if (IXGBE_CB(skb)->page_released) + dma_unmap_page(dev, + IXGBE_CB(skb)->dma, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + dev_kfree_skb(skb); + rx_buffer->skb = NULL; + } + + if (!rx_buffer->page) + continue; + + dma_unmap_page(dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring)); + + rx_buffer->page = NULL; + } + + size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter, + struct ixgbe_ring *rx_ring) +{ + struct ixgbe_adapter *adapter = vadapter->real_adapter; + int index = rx_ring->queue_index + vadapter->rx_base_queue; + + /* shutdown specific queue receive and wait for dma to settle */ + ixgbe_disable_rx_queue(adapter, rx_ring); + usleep_range(10000, 20000); + ixgbe_irq_disable_queues(adapter, ((u64)1 << index)); + ixgbe_clean_rx_ring(rx_ring); + rx_ring->l2_accel_priv = NULL; +} + +static int ixgbe_fwd_ring_down(struct net_device *vdev, + struct ixgbe_fwd_adapter *accel) +{ + struct ixgbe_adapter *adapter = accel->real_adapter; + unsigned int rxbase = accel->rx_base_queue; + unsigned int txbase = accel->tx_base_queue; + int i; + + netif_tx_stop_all_queues(vdev); + + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { + ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); + adapter->rx_ring[rxbase + i]->netdev = adapter->netdev; + } + + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { + adapter->tx_ring[txbase + i]->l2_accel_priv = NULL; + adapter->tx_ring[txbase + i]->netdev = adapter->netdev; + } + + + return 0; +} + +static int ixgbe_fwd_ring_up(struct net_device *vdev, + struct ixgbe_fwd_adapter *accel) +{ + struct ixgbe_adapter *adapter = accel->real_adapter; + unsigned int rxbase, txbase, queues; + int i, baseq, err = 0; + + if (!test_bit(accel->pool, &adapter->fwd_bitmask)) + return 0; + + baseq = accel->pool * adapter->num_rx_queues_per_pool; + netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", + accel->pool, adapter->num_rx_pools, + baseq, baseq + adapter->num_rx_queues_per_pool, + adapter->fwd_bitmask); + + accel->netdev = vdev; + accel->rx_base_queue = rxbase = baseq; + accel->tx_base_queue = txbase = baseq; + + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) + ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); + + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { + adapter->rx_ring[rxbase + i]->netdev = vdev; + adapter->rx_ring[rxbase + i]->l2_accel_priv = accel; + ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]); + } + + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { + adapter->tx_ring[txbase + i]->netdev = vdev; + adapter->tx_ring[txbase + i]->l2_accel_priv = accel; + } + + queues = min_t(unsigned int, + adapter->num_rx_queues_per_pool, vdev->num_tx_queues); + err = netif_set_real_num_tx_queues(vdev, queues); + if (err) + goto fwd_queue_err; + + err = netif_set_real_num_rx_queues(vdev, queues); + if (err) + goto fwd_queue_err; + + if (is_valid_ether_addr(vdev->dev_addr)) + ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool); + + ixgbe_fwd_psrtype(accel); + ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter); + return err; +fwd_queue_err: + ixgbe_fwd_ring_down(vdev, accel); + return err; +} + +static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) +{ + struct net_device *upper; + struct list_head *iter; + int err; + + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *dfwd = netdev_priv(upper); + struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; + + if (dfwd->fwd_priv) { + err = ixgbe_fwd_ring_up(upper, vadapter); + if (err) + continue; + } + } + } +} + +static void ixgbe_configure(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + ixgbe_configure_pb(adapter); +#ifdef CONFIG_IXGBE_DCB + ixgbe_configure_dcb(adapter); +#endif + /* + * We must restore virtualization before VLANs or else + * the VLVF registers will not be populated + */ + ixgbe_configure_virtualization(adapter); + + ixgbe_set_rx_mode(adapter->netdev); + ixgbe_restore_vlan(adapter); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + hw->mac.ops.disable_rx_buff(hw); + break; + default: + break; + } + + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + ixgbe_init_fdir_signature_82599(&adapter->hw, + adapter->fdir_pballoc); + } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { + ixgbe_init_fdir_perfect_82599(&adapter->hw, + adapter->fdir_pballoc); + ixgbe_fdir_filter_restore(adapter); + } + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + hw->mac.ops.enable_rx_buff(hw); + break; + default: + break; + } + +#ifdef IXGBE_FCOE + /* configure FCoE L2 filters, redirection table, and Rx control */ + ixgbe_configure_fcoe(adapter); + +#endif /* IXGBE_FCOE */ + ixgbe_configure_tx(adapter); + ixgbe_configure_rx(adapter); + ixgbe_configure_dfwd(adapter); +} + +static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) +{ + switch (hw->phy.type) { + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_ftl: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: + case ixgbe_phy_sfp_passive_tyco: + case ixgbe_phy_sfp_passive_unknown: + case ixgbe_phy_sfp_active_unknown: + case ixgbe_phy_sfp_ftl_active: + case ixgbe_phy_qsfp_passive_unknown: + case ixgbe_phy_qsfp_active_unknown: + case ixgbe_phy_qsfp_intel: + case ixgbe_phy_qsfp_unknown: + /* ixgbe_phy_none is set when no SFP module is present */ + case ixgbe_phy_none: + return true; + case ixgbe_phy_nl: + if (hw->mac.type == ixgbe_mac_82598EB) + return true; + default: + return false; + } +} + +/** + * ixgbe_sfp_link_config - set up SFP+ link + * @adapter: pointer to private adapter struct + **/ +static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) +{ + /* + * We are assuming the worst case scenario here, and that + * is that an SFP was inserted/removed after the reset + * but before SFP detection was enabled. As such the best + * solution is to just start searching as soon as we start + */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; + + adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; +} + +/** + * ixgbe_non_sfp_link_config - set up non-SFP+ link + * @hw: pointer to private hardware struct + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) +{ + u32 speed; + bool autoneg, link_up = false; + u32 ret = IXGBE_ERR_LINK_SETUP; + + if (hw->mac.ops.check_link) + ret = hw->mac.ops.check_link(hw, &speed, &link_up, false); + + if (ret) + return ret; + + speed = hw->phy.autoneg_advertised; + if ((!speed) && (hw->mac.ops.get_link_capabilities)) + ret = hw->mac.ops.get_link_capabilities(hw, &speed, + &autoneg); + if (ret) + return ret; + + if (hw->mac.ops.setup_link) + ret = hw->mac.ops.setup_link(hw, speed, link_up); + + return ret; +} + +static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 gpie = 0; + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | + IXGBE_GPIE_OCD; + gpie |= IXGBE_GPIE_EIAME; + /* + * use EIAM to auto-mask when MSI-X interrupt is asserted + * this saves a register write for every interrupt + */ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + default: + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); + break; + } + } else { + /* legacy interrupts, use EIAM to auto-mask when reading EICR, + * specifically only auto mask tx and rx interrupts */ + IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); + } + + /* XXX: to interrupt immediately for EICS writes, enable this */ + /* gpie |= IXGBE_GPIE_EIMEN; */ + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + gpie &= ~IXGBE_GPIE_VTMODE_MASK; + + switch (adapter->ring_feature[RING_F_VMDQ].mask) { + case IXGBE_82599_VMDQ_8Q_MASK: + gpie |= IXGBE_GPIE_VTMODE_16; + break; + case IXGBE_82599_VMDQ_4Q_MASK: + gpie |= IXGBE_GPIE_VTMODE_32; + break; + default: + gpie |= IXGBE_GPIE_VTMODE_64; + break; + } + } + + /* Enable Thermal over heat sensor interrupt */ + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + gpie |= IXGBE_SDP0_GPIEN; + break; + case ixgbe_mac_X540: + gpie |= IXGBE_EIMS_TS; + break; + default: + break; + } + } + + /* Enable fan failure interrupt */ + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) + gpie |= IXGBE_SDP1_GPIEN; + + if (hw->mac.type == ixgbe_mac_82599EB) { + gpie |= IXGBE_SDP1_GPIEN; + gpie |= IXGBE_SDP2_GPIEN; + } + + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); +} + +static void ixgbe_up_complete(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err; + u32 ctrl_ext; + + ixgbe_get_hw_control(adapter); + ixgbe_setup_gpie(adapter); + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + ixgbe_configure_msix(adapter); + else + ixgbe_configure_msi_and_legacy(adapter); + + /* enable the optics for 82599 SFP+ fiber */ + if (hw->mac.ops.enable_tx_laser) + hw->mac.ops.enable_tx_laser(hw); + + smp_mb__before_atomic(); + clear_bit(__IXGBE_DOWN, &adapter->state); + ixgbe_napi_enable_all(adapter); + + if (ixgbe_is_sfp(hw)) { + ixgbe_sfp_link_config(adapter); + } else { + err = ixgbe_non_sfp_link_config(hw); + if (err) + e_err(probe, "link_config FAILED %d\n", err); + } + + /* clear any pending interrupts, may auto mask */ + IXGBE_READ_REG(hw, IXGBE_EICR); + ixgbe_irq_enable(adapter, true, true); + + /* + * If this adapter has a fan, check to see if we had a failure + * before we enabled the interrupt. + */ + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + e_crit(drv, "Fan has stopped, replace the adapter\n"); + } + + /* bring the link up in the watchdog, this could race with our first + * link up interrupt but shouldn't be a problem */ + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + mod_timer(&adapter->service_timer, jiffies); + + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); +} + +void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) +{ + WARN_ON(in_interrupt()); + /* put off any impending NetWatchDogTimeout */ + adapter->netdev->trans_start = jiffies; + + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + ixgbe_down(adapter); + /* + * If SR-IOV enabled then wait a bit before bringing the adapter + * back up to give the VFs time to respond to the reset. The + * two second wait is based upon the watchdog timer cycle in + * the VF driver. + */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + msleep(2000); + ixgbe_up(adapter); + clear_bit(__IXGBE_RESETTING, &adapter->state); +} + +void ixgbe_up(struct ixgbe_adapter *adapter) +{ + /* hardware has been reset, we need to reload some things */ + ixgbe_configure(adapter); + + ixgbe_up_complete(adapter); +} + +void ixgbe_reset(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int err; + u8 old_addr[ETH_ALEN]; + + if (ixgbe_removed(hw->hw_addr)) + return; + /* lock SFP init bit to prevent race conditions with the watchdog */ + while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + /* clear all SFP and link config related flags while holding SFP_INIT */ + adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP | + IXGBE_FLAG2_SFP_NEEDS_RESET); + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; + + err = hw->mac.ops.init_hw(hw); + switch (err) { + case 0: + case IXGBE_ERR_SFP_NOT_PRESENT: + case IXGBE_ERR_SFP_NOT_SUPPORTED: + break; + case IXGBE_ERR_MASTER_REQUESTS_PENDING: + e_dev_err("master disable timed out\n"); + break; + case IXGBE_ERR_EEPROM_VERSION: + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter/LOM. " + "Please be aware there may be issues associated with " + "your hardware. If you are experiencing problems " + "please contact your Intel or hardware " + "representative who provided you with this " + "hardware.\n"); + break; + default: + e_dev_err("Hardware Error: %d\n", err); + } + + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); + /* do not flush user set addresses */ + memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len); + ixgbe_flush_sw_mac_table(adapter); + ixgbe_mac_set_default_filter(adapter, old_addr); + + /* update SAN MAC vmdq pool selection */ + if (hw->mac.san_mac_rar_index) + hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); + + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) + ixgbe_ptp_reset(adapter); +} + +/** + * ixgbe_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) +{ + struct ixgbe_tx_buffer *tx_buffer_info; + unsigned long size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buffer_info) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + } + + netdev_tx_reset_queue(txring_txq(tx_ring)); + + size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbe_clean_tx_ring(adapter->tx_ring[i]); +} + +static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) +{ + struct hlist_node *node2; + struct ixgbe_fdir_filter *filter; + + spin_lock(&adapter->fdir_perfect_lock); + + hlist_for_each_entry_safe(filter, node2, + &adapter->fdir_filter_list, fdir_node) { + hlist_del(&filter->fdir_node); + kfree(filter); + } + adapter->fdir_filter_count = 0; + + spin_unlock(&adapter->fdir_perfect_lock); +} + +void ixgbe_down(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *upper; + struct list_head *iter; + int i; + + /* signal that we are down to the interrupt handler */ + if (test_and_set_bit(__IXGBE_DOWN, &adapter->state)) + return; /* do nothing if already down */ + + /* disable receives */ + hw->mac.ops.disable_rx(hw); + + /* disable all enabled rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) + /* this call also flushes the previous write */ + ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); + + usleep_range(10000, 20000); + + netif_tx_stop_all_queues(netdev); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + /* disable any upper devices */ + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *vlan = netdev_priv(upper); + + if (vlan->fwd_priv) { + netif_tx_stop_all_queues(upper); + netif_carrier_off(upper); + netif_tx_disable(upper); + } + } + } + + ixgbe_irq_disable(adapter); + + ixgbe_napi_disable_all(adapter); + + adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT | + IXGBE_FLAG2_RESET_REQUESTED); + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; + + del_timer_sync(&adapter->service_timer); + + if (adapter->num_vfs) { + /* Clear EITR Select mapping */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); + + /* Mark all the VFs as inactive */ + for (i = 0 ; i < adapter->num_vfs; i++) + adapter->vfinfo[i].clear_to_send = false; + + /* ping all the active vfs to let them know we are going down */ + ixgbe_ping_all_vfs(adapter); + + /* Disable all VFTE/VFRE TX/RX */ + ixgbe_disable_tx_rx(adapter); + } + + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < adapter->num_tx_queues; i++) { + u8 reg_idx = adapter->tx_ring[i]->reg_idx; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); + } + + /* Disable the Tx DMA engine on 82599 and later MAC */ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, + (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & + ~IXGBE_DMATXCTL_TE)); + break; + default: + break; + } + + if (!pci_channel_offline(adapter->pdev)) + ixgbe_reset(adapter); + + /* power down the optics for 82599 SFP+ fiber */ + if (hw->mac.ops.disable_tx_laser) + hw->mac.ops.disable_tx_laser(hw); + + ixgbe_clean_all_tx_rings(adapter); + ixgbe_clean_all_rx_rings(adapter); + +#ifdef CONFIG_IXGBE_DCA + /* since we reset the hardware DCA settings were cleared */ + ixgbe_setup_dca(adapter); +#endif +} + +/** + * ixgbe_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void ixgbe_tx_timeout(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + ixgbe_tx_timeout_reset(adapter); +} + +/** + * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) + * @adapter: board private structure to initialize + * + * ixgbe_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int ixgbe_sw_init(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + unsigned int rss, fdir; + u32 fwsm; +#ifdef CONFIG_IXGBE_DCB + int j; + struct tc_configuration *tc; +#endif + + /* PCI config space info */ + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Set common capability flags and settings */ + rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus()); + adapter->ring_feature[RING_F_RSS].limit = rss; + adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; + adapter->max_q_vectors = MAX_Q_VECTORS_82599; + adapter->atr_sample_rate = 20; + fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); + adapter->ring_feature[RING_F_FDIR].limit = fdir; + adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; +#ifdef CONFIG_IXGBE_DCA + adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; +#endif +#ifdef IXGBE_FCOE + adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; +#ifdef CONFIG_IXGBE_DCB + /* Default traffic class to use for FCoE */ + adapter->fcoe.up = IXGBE_FCOE_DEFTC; +#endif /* CONFIG_IXGBE_DCB */ +#endif /* IXGBE_FCOE */ + + adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) * + hw->mac.num_rar_entries, + GFP_ATOMIC); + + /* Set MAC specific capability flags and exceptions */ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; + adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; + + if (hw->device_id == IXGBE_DEV_ID_82598AT) + adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; + + adapter->max_q_vectors = MAX_Q_VECTORS_82598; + adapter->ring_feature[RING_F_FDIR].limit = 0; + adapter->atr_sample_rate = 0; + adapter->fdir_pballoc = 0; +#ifdef IXGBE_FCOE + adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; +#ifdef CONFIG_IXGBE_DCB + adapter->fcoe.up = 0; +#endif /* IXGBE_DCB */ +#endif /* IXGBE_FCOE */ + break; + case ixgbe_mac_82599EB: + if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; + break; + case ixgbe_mac_X540: + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); + if (fwsm & IXGBE_FWSM_TS_ENABLED) + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; + break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550: +#ifdef CONFIG_IXGBE_DCA + adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; +#endif + break; + default: + break; + } + +#ifdef IXGBE_FCOE + /* FCoE support exists, always init the FCoE lock */ + spin_lock_init(&adapter->fcoe.lock); + +#endif + /* n-tuple support exists, always init our spinlock */ + spin_lock_init(&adapter->fdir_perfect_lock); + +#ifdef CONFIG_IXGBE_DCB + switch (hw->mac.type) { + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; + break; + default: + adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; + break; + } + + /* Configure DCB traffic classes */ + for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { + tc = &adapter->dcb_cfg.tc_config[j]; + tc->path[DCB_TX_CONFIG].bwg_id = 0; + tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); + tc->path[DCB_RX_CONFIG].bwg_id = 0; + tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); + tc->dcb_pfc = pfc_disabled; + } + + /* Initialize default user to priority mapping, UPx->TC0 */ + tc = &adapter->dcb_cfg.tc_config[0]; + tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; + tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; + + adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; + adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; + adapter->dcb_cfg.pfc_mode_enable = false; + adapter->dcb_set_bitmap = 0x00; + adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; + memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, + sizeof(adapter->temp_dcb_cfg)); + +#endif + + /* default flow control settings */ + hw->fc.requested_mode = ixgbe_fc_full; + hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ + ixgbe_pbthresh_setup(adapter); + hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; + hw->fc.send_xon = true; + hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw); + +#ifdef CONFIG_PCI_IOV + if (max_vfs > 0) + e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n"); + + /* assign number of SR-IOV VFs */ + if (hw->mac.type != ixgbe_mac_82598EB) { + if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) { + adapter->num_vfs = 0; + e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n"); + } else { + adapter->num_vfs = max_vfs; + } + } +#endif /* CONFIG_PCI_IOV */ + + /* enable itr by default in dynamic mode */ + adapter->rx_itr_setting = 1; + adapter->tx_itr_setting = 1; + + /* set default ring sizes */ + adapter->tx_ring_count = IXGBE_DEFAULT_TXD; + adapter->rx_ring_count = IXGBE_DEFAULT_RXD; + + /* set default work limits */ + adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; + + /* initialize eeprom parameters */ + if (ixgbe_init_eeprom_params_generic(hw)) { + e_dev_err("EEPROM initialization failed\n"); + return -EIO; + } + + /* PF holds first pool slot */ + set_bit(0, &adapter->fwd_bitmask); + set_bit(__IXGBE_DOWN, &adapter->state); + + return 0; +} + +/** + * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int orig_node = dev_to_node(dev); + int ring_node = -1; + int size; + + size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; + + if (tx_ring->q_vector) + ring_node = tx_ring->q_vector->numa_node; + + tx_ring->tx_buffer_info = vzalloc_node(size, ring_node); + if (!tx_ring->tx_buffer_info) + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + u64_stats_init(&tx_ring->syncp); + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + set_dev_node(dev, ring_node); + tx_ring->desc = dma_alloc_coherent(dev, + tx_ring->size, + &tx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!tx_ring->desc) + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * ixgbe_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); + if (!err) + continue; + + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + ixgbe_free_tx_resources(adapter->tx_ring[i]); + return err; +} + +/** + * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int orig_node = dev_to_node(dev); + int ring_node = -1; + int size; + + size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; + + if (rx_ring->q_vector) + ring_node = rx_ring->q_vector->numa_node; + + rx_ring->rx_buffer_info = vzalloc_node(size, ring_node); + if (!rx_ring->rx_buffer_info) + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + u64_stats_init(&rx_ring->syncp); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + set_dev_node(dev, ring_node); + rx_ring->desc = dma_alloc_coherent(dev, + rx_ring->size, + &rx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!rx_ring->desc) + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * ixgbe_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); + if (!err) + continue; + + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + +#ifdef IXGBE_FCOE + err = ixgbe_setup_fcoe_ddp_resources(adapter); + if (!err) +#endif + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + ixgbe_free_rx_resources(adapter->rx_ring[i]); + return err; +} + +/** + * ixgbe_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) +{ + ixgbe_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]->desc) + ixgbe_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * ixgbe_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) +{ + ixgbe_clean_rx_ring(rx_ring); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) +{ + int i; + +#ifdef IXGBE_FCOE + ixgbe_free_fcoe_ddp_resources(adapter); + +#endif + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]->desc) + ixgbe_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * ixgbe_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) + return -EINVAL; + + /* + * For 82599EB we cannot allow legacy VFs to enable their receive + * paths when MTU greater than 1500 is configured. So display a + * warning that legacy VFs will be disabled. + */ + if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && + (adapter->hw.mac.type == ixgbe_mac_82599EB) && + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) + e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); + + e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + + return 0; +} + +/** + * ixgbe_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int ixgbe_open(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int err, queues; + + /* disallow open during test */ + if (test_bit(__IXGBE_TESTING, &adapter->state)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = ixgbe_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = ixgbe_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + ixgbe_configure(adapter); + + err = ixgbe_request_irq(adapter); + if (err) + goto err_req_irq; + + /* Notify the stack of the actual queue counts. */ + if (adapter->num_rx_pools > 1) + queues = adapter->num_rx_queues_per_pool; + else + queues = adapter->num_tx_queues; + + err = netif_set_real_num_tx_queues(netdev, queues); + if (err) + goto err_set_queues; + + if (adapter->num_rx_pools > 1 && + adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES) + queues = IXGBE_MAX_L2A_QUEUES; + else + queues = adapter->num_rx_queues; + err = netif_set_real_num_rx_queues(netdev, queues); + if (err) + goto err_set_queues; + + ixgbe_ptp_init(adapter); + + ixgbe_up_complete(adapter); + +#if IS_ENABLED(CONFIG_IXGBE_VXLAN) + vxlan_get_rx_port(netdev); + +#endif + return 0; + +err_set_queues: + ixgbe_free_irq(adapter); +err_req_irq: + ixgbe_free_all_rx_resources(adapter); +err_setup_rx: + ixgbe_free_all_tx_resources(adapter); +err_setup_tx: + ixgbe_reset(adapter); + + return err; +} + +static void ixgbe_close_suspend(struct ixgbe_adapter *adapter) +{ + ixgbe_ptp_suspend(adapter); + + ixgbe_down(adapter); + ixgbe_free_irq(adapter); + + ixgbe_free_all_tx_resources(adapter); + ixgbe_free_all_rx_resources(adapter); +} + +/** + * ixgbe_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int ixgbe_close(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + ixgbe_ptp_stop(adapter); + + ixgbe_close_suspend(adapter); + + ixgbe_fdir_filter_exit(adapter); + + ixgbe_release_hw_control(adapter); + + return 0; +} + +#ifdef CONFIG_PM +static int ixgbe_resume(struct pci_dev *pdev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + u32 err; + + adapter->hw.hw_addr = adapter->io_addr; + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* + * pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + e_dev_err("Cannot enable PCI device from suspend\n"); + return err; + } + smp_mb__before_atomic(); + clear_bit(__IXGBE_DISABLED, &adapter->state); + pci_set_master(pdev); + + pci_wake_from_d3(pdev, false); + + ixgbe_reset(adapter); + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); + + rtnl_lock(); + err = ixgbe_init_interrupt_scheme(adapter); + if (!err && netif_running(netdev)) + err = ixgbe_open(netdev); + + rtnl_unlock(); + + if (err) + return err; + + netif_device_attach(netdev); + + return 0; +} +#endif /* CONFIG_PM */ + +static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + u32 ctrl, fctrl; + u32 wufc = adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + rtnl_lock(); + if (netif_running(netdev)) + ixgbe_close_suspend(adapter); + rtnl_unlock(); + + ixgbe_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; + +#endif + if (hw->mac.ops.stop_link_on_d3) + hw->mac.ops.stop_link_on_d3(hw); + + if (wufc) { + ixgbe_set_rx_mode(netdev); + + /* enable the optics for 82599 SFP+ fiber as we can WoL */ + if (hw->mac.ops.enable_tx_laser) + hw->mac.ops.enable_tx_laser(hw); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & IXGBE_WUFC_MC) { + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_MPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + } + + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + ctrl |= IXGBE_CTRL_GIO_DIS; + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + + IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc); + } else { + IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); + IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); + } + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + pci_wake_from_d3(pdev, false); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + pci_wake_from_d3(pdev, !!wufc); + break; + default: + break; + } + + *enable_wake = !!wufc; + + ixgbe_release_hw_control(adapter); + + if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + bool wake; + + retval = __ixgbe_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} +#endif /* CONFIG_PM */ + +static void ixgbe_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __ixgbe_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +/** + * ixgbe_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void ixgbe_update_stats(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_hw_stats *hwstats = &adapter->stats; + u64 total_mpc = 0; + u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; + u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; + u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; + u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; + + if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return; + + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { + u64 rsc_count = 0; + u64 rsc_flush = 0; + for (i = 0; i < adapter->num_rx_queues; i++) { + rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; + rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; + } + adapter->rsc_total_count = rsc_count; + adapter->rsc_total_flush = rsc_flush; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + hw_csum_rx_error += rx_ring->rx_stats.csum_err; + bytes += rx_ring->stats.bytes; + packets += rx_ring->stats.packets; + } + adapter->non_eop_descs = non_eop_descs; + adapter->alloc_rx_page_failed = alloc_rx_page_failed; + adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; + adapter->hw_csum_rx_error = hw_csum_rx_error; + netdev->stats.rx_bytes = bytes; + netdev->stats.rx_packets = packets; + + bytes = 0; + packets = 0; + /* gather some stats to the adapter struct that are per queue */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + bytes += tx_ring->stats.bytes; + packets += tx_ring->stats.packets; + } + adapter->restart_queue = restart_queue; + adapter->tx_busy = tx_busy; + netdev->stats.tx_bytes = bytes; + netdev->stats.tx_packets = packets; + + hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); + + /* 8 register reads */ + for (i = 0; i < 8; i++) { + /* for packet buffers not used, the register should read 0 */ + mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); + missed_rx += mpc; + hwstats->mpc[i] += mpc; + total_mpc += hwstats->mpc[i]; + hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); + hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); + hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); + hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); + hwstats->pxonrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + hwstats->pxonrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); + break; + default: + break; + } + } + + /*16 register reads */ + for (i = 0; i < 16; i++) { + hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); + if ((hw->mac.type == ixgbe_mac_82599EB) || + (hw->mac.type == ixgbe_mac_X540) || + (hw->mac.type == ixgbe_mac_X550) || + (hw->mac.type == ixgbe_mac_X550EM_x)) { + hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ + hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); + IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */ + } + } + + hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); + /* work around hardware counting issue */ + hwstats->gprc -= missed_rx; + + ixgbe_update_xoff_received(adapter); + + /* 82598 hardware only has a 32 bit counter in the high register */ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); + hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); + hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); + hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); + break; + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + /* OS2BMC stats are X540 and later */ + hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); + hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); + hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); + hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); + case ixgbe_mac_82599EB: + for (i = 0; i < 16; i++) + adapter->hw_rx_no_dma_resources += + IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); + hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); + IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ + hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); + IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ + hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); + IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ + hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); + hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); +#ifdef IXGBE_FCOE + hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); + hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); + hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); + hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); + hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); + hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); + /* Add up per cpu counters for total ddp aloc fail */ + if (adapter->fcoe.ddp_pool) { + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + struct ixgbe_fcoe_ddp_pool *ddp_pool; + unsigned int cpu; + u64 noddp = 0, noddp_ext_buff = 0; + for_each_possible_cpu(cpu) { + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + noddp += ddp_pool->noddp; + noddp_ext_buff += ddp_pool->noddp_ext_buff; + } + hwstats->fcoe_noddp = noddp; + hwstats->fcoe_noddp_ext_buff = noddp_ext_buff; + } +#endif /* IXGBE_FCOE */ + break; + default: + break; + } + bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); + hwstats->bprc += bprc; + hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); + if (hw->mac.type == ixgbe_mac_82598EB) + hwstats->mprc -= bprc; + hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); + hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); + hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); + hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); + hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); + hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); + hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); + hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); + lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); + hwstats->lxontxc += lxon; + lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); + hwstats->lxofftxc += lxoff; + hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); + hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); + /* + * 82598 errata - tx of flow control packets is included in tx counters + */ + xon_off_tot = lxon + lxoff; + hwstats->gptc -= xon_off_tot; + hwstats->mptc -= xon_off_tot; + hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); + hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); + hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); + hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); + hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); + hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); + hwstats->ptc64 -= xon_off_tot; + hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); + hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); + hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); + hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); + hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); + hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = hwstats->mprc; + + /* Rx Errors */ + netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec; + netdev->stats.rx_dropped = 0; + netdev->stats.rx_length_errors = hwstats->rlec; + netdev->stats.rx_crc_errors = hwstats->crcerrs; + netdev->stats.rx_missed_errors = total_mpc; +} + +/** + * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table + * @adapter: pointer to the device adapter structure + **/ +static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; + + /* if interface is down do nothing */ + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return; + + /* do nothing if we are not using signature filters */ + if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) + return; + + adapter->fdir_overflow++; + + if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_bit(__IXGBE_TX_FDIR_INIT_DONE, + &(adapter->tx_ring[i]->state)); + /* re-enable flow director interrupts */ + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); + } else { + e_err(probe, "failed to finish FDIR re-initialization, " + "ignored adding FDIR ATR filters\n"); + } +} + +/** + * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter: pointer to the device adapter structure + * + * This function serves two purposes. First it strobes the interrupt lines + * in order to make certain interrupts are occurring. Secondly it sets the + * bits needed to check for TX hangs. As a result we should immediately + * determine if a hang has occurred. + */ +static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 eics = 0; + int i; + + /* If we're down, removing or resetting, just bail */ + if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_REMOVING, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return; + + /* Force detection of hung controller */ + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_check_for_tx_hang(adapter->tx_ring[i]); + } + + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { + /* + * for legacy and MSI interrupts don't set any bits + * that are enabled for EIAM, because this operation + * would set *both* EIMS and EICS for any bit in EIAM + */ + IXGBE_WRITE_REG(hw, IXGBE_EICS, + (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); + } else { + /* get one bit for every active tx/rx interrupt vector */ + for (i = 0; i < adapter->num_q_vectors; i++) { + struct ixgbe_q_vector *qv = adapter->q_vector[i]; + if (qv->rx.ring || qv->tx.ring) + eics |= ((u64)1 << i); + } + } + + /* Cause software interrupt to ensure rings are cleaned */ + ixgbe_irq_rearm_queues(adapter, eics); +} + +/** + * ixgbe_watchdog_update_link - update the link status + * @adapter: pointer to the device adapter structure + * @link_speed: pointer to a u32 to store the link_speed + **/ +static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; + bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; + + if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) + return; + + if (hw->mac.ops.check_link) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + } else { + /* always assume link is up, if no check link function */ + link_speed = IXGBE_LINK_SPEED_10GB_FULL; + link_up = true; + } + + if (adapter->ixgbe_ieee_pfc) + pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); + + if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) { + hw->mac.ops.fc_enable(hw); + ixgbe_set_rx_drop_en(adapter); + } + + if (link_up || + time_after(jiffies, (adapter->link_check_timeout + + IXGBE_TRY_LINK_TIMEOUT))) { + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); + IXGBE_WRITE_FLUSH(hw); + } + + adapter->link_up = link_up; + adapter->link_speed = link_speed; +} + +static void ixgbe_update_default_up(struct ixgbe_adapter *adapter) +{ +#ifdef CONFIG_IXGBE_DCB + struct net_device *netdev = adapter->netdev; + struct dcb_app app = { + .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE, + .protocol = 0, + }; + u8 up = 0; + + if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) + up = dcb_ieee_getapp_mask(netdev, &app); + + adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0; +#endif +} + +/** + * ixgbe_watchdog_link_is_up - update netif_carrier status and + * print link up message + * @adapter: pointer to the device adapter structure + **/ +static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *upper; + struct list_head *iter; + u32 link_speed = adapter->link_speed; + bool flow_rx, flow_tx; + + /* only continue if link was previously down */ + if (netif_carrier_ok(netdev)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: { + u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); + flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); + flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); + } + break; + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_82599EB: { + u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); + u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); + flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); + flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); + } + break; + default: + flow_tx = false; + flow_rx = false; + break; + } + + adapter->last_rx_ptp_check = jiffies; + + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) + ixgbe_ptp_start_cyclecounter(adapter); + + e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", + (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? + "10 Gbps" : + (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? + "1 Gbps" : + (link_speed == IXGBE_LINK_SPEED_100_FULL ? + "100 Mbps" : + "unknown speed"))), + ((flow_rx && flow_tx) ? "RX/TX" : + (flow_rx ? "RX" : + (flow_tx ? "TX" : "None")))); + + netif_carrier_on(netdev); + ixgbe_check_vf_rate_limit(adapter); + + /* enable transmits */ + netif_tx_wake_all_queues(adapter->netdev); + + /* enable any upper devices */ + rtnl_lock(); + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *vlan = netdev_priv(upper); + + if (vlan->fwd_priv) + netif_tx_wake_all_queues(upper); + } + } + rtnl_unlock(); + + /* update the default user priority for VFs */ + ixgbe_update_default_up(adapter); + + /* ping all the active vfs to let them know link has changed */ + ixgbe_ping_all_vfs(adapter); +} + +/** + * ixgbe_watchdog_link_is_down - update netif_carrier status and + * print link down message + * @adapter: pointer to the adapter structure + **/ +static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + + adapter->link_up = false; + adapter->link_speed = 0; + + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) + return; + + /* poll for SFP+ cable when link is down */ + if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) + adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; + + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) + ixgbe_ptp_start_cyclecounter(adapter); + + e_info(drv, "NIC Link is Down\n"); + netif_carrier_off(netdev); + + /* ping all the active vfs to let them know link has changed */ + ixgbe_ping_all_vfs(adapter); +} + +static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + + if (tx_ring->next_to_use != tx_ring->next_to_clean) + return true; + } + + return false; +} + +static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + + int i, j; + + if (!adapter->num_vfs) + return false; + + /* resetting the PF is only needed for MAC before X550 */ + if (hw->mac.type >= ixgbe_mac_X550) + return false; + + for (i = 0; i < adapter->num_vfs; i++) { + for (j = 0; j < q_per_pool; j++) { + u32 h, t; + + h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j)); + t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j)); + + if (h != t) + return true; + } + } + + return false; +} + +/** + * ixgbe_watchdog_flush_tx - flush queues on link down + * @adapter: pointer to the device adapter structure + **/ +static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) +{ + if (!netif_carrier_ok(adapter->netdev)) { + if (ixgbe_ring_tx_pending(adapter) || + ixgbe_vf_tx_pending(adapter)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + e_warn(drv, "initiating reset to clear Tx work after link loss\n"); + adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; + } + } +} + +#ifdef CONFIG_PCI_IOV +static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter, + struct pci_dev *vfdev) +{ + if (!pci_wait_for_pending_transaction(vfdev)) + e_dev_warn("Issuing VFLR with pending transactions\n"); + + e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev)); + pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); + + msleep(100); +} + +static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct pci_dev *vfdev; + u32 gpc; + int pos; + unsigned short vf_id; + + if (!(netif_carrier_ok(adapter->netdev))) + return; + + gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC); + if (gpc) /* If incrementing then no need for the check below */ + return; + /* Check to see if a bad DMA write target from an errant or + * malicious VF has caused a PCIe error. If so then we can + * issue a VFLR to the offending VF(s) and then resume without + * requesting a full slot reset. + */ + + if (!pdev) + return; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return; + + /* get the device ID for the VF */ + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); + + /* check status reg for all VFs owned by this PF */ + vfdev = pci_get_device(pdev->vendor, vf_id, NULL); + while (vfdev) { + if (vfdev->is_virtfn && (vfdev->physfn == pdev)) { + u16 status_reg; + + pci_read_config_word(vfdev, PCI_STATUS, &status_reg); + if (status_reg & PCI_STATUS_REC_MASTER_ABORT) + /* issue VFLR */ + ixgbe_issue_vf_flr(adapter, vfdev); + } + + vfdev = pci_get_device(pdev->vendor, vf_id, vfdev); + } +} + +static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) +{ + u32 ssvpc; + + /* Do not perform spoof check for 82598 or if not in IOV mode */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB || + adapter->num_vfs == 0) + return; + + ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); + + /* + * ssvpc register is cleared on read, if zero then no + * spoofed packets in the last interval. + */ + if (!ssvpc) + return; + + e_warn(drv, "%u Spoofed packets detected\n", ssvpc); +} +#else +static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter) +{ +} + +static void +ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter) +{ +} +#endif /* CONFIG_PCI_IOV */ + + +/** + * ixgbe_watchdog_subtask - check and bring link up + * @adapter: pointer to the device adapter structure + **/ +static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) +{ + /* if interface is down, removing or resetting, do nothing */ + if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_REMOVING, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return; + + ixgbe_watchdog_update_link(adapter); + + if (adapter->link_up) + ixgbe_watchdog_link_is_up(adapter); + else + ixgbe_watchdog_link_is_down(adapter); + + ixgbe_check_for_bad_vf(adapter); + ixgbe_spoof_check(adapter); + ixgbe_update_stats(adapter); + + ixgbe_watchdog_flush_tx(adapter); +} + +/** + * ixgbe_sfp_detection_subtask - poll for SFP+ cable + * @adapter: the ixgbe adapter structure + **/ +static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + s32 err; + + /* not searching for SFP so there is nothing to do here */ + if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && + !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) + return; + + /* someone else is in init, wait until next service event */ + if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + return; + + err = hw->phy.ops.identify_sfp(hw); + if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto sfp_out; + + if (err == IXGBE_ERR_SFP_NOT_PRESENT) { + /* If no cable is present, then we need to reset + * the next time we find a good cable. */ + adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; + } + + /* exit on error */ + if (err) + goto sfp_out; + + /* exit if reset not needed */ + if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) + goto sfp_out; + + adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET; + + /* + * A module may be identified correctly, but the EEPROM may not have + * support for that module. setup_sfp() will fail in that case, so + * we should not allow that module to load. + */ + if (hw->mac.type == ixgbe_mac_82598EB) + err = hw->phy.ops.reset(hw); + else + err = hw->mac.ops.setup_sfp(hw); + + if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto sfp_out; + + adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; + e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type); + +sfp_out: + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); + + if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) && + (adapter->netdev->reg_state == NETREG_REGISTERED)) { + e_dev_err("failed to initialize because an unsupported " + "SFP+ module type was detected.\n"); + e_dev_err("Reload the driver after installing a " + "supported module.\n"); + unregister_netdev(adapter->netdev); + } +} + +/** + * ixgbe_sfp_link_config_subtask - set up link SFP after module install + * @adapter: the ixgbe adapter structure + **/ +static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 speed; + bool autoneg = false; + + if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) + return; + + /* someone else is in init, wait until next service event */ + if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + return; + + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; + + speed = hw->phy.autoneg_advertised; + if ((!speed) && (hw->mac.ops.get_link_capabilities)) { + hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); + + /* setup the highest link when no autoneg */ + if (!autoneg) { + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + speed = IXGBE_LINK_SPEED_10GB_FULL; + } + } + + if (hw->mac.ops.setup_link) + hw->mac.ops.setup_link(hw, speed, true); + + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); +} + +/** + * ixgbe_service_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void ixgbe_service_timer(unsigned long data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + unsigned long next_event_offset; + + /* poll faster when waiting for link */ + if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) + next_event_offset = HZ / 10; + else + next_event_offset = HZ * 2; + + /* Reset the timer */ + mod_timer(&adapter->service_timer, next_event_offset + jiffies); + + ixgbe_service_event_schedule(adapter); +} + +static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) +{ + if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED; + + /* If we're already down, removing or resetting, just bail */ + if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_REMOVING, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return; + + ixgbe_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); + adapter->tx_timeout_count++; + + rtnl_lock(); + ixgbe_reinit_locked(adapter); + rtnl_unlock(); +} + +/** + * ixgbe_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void ixgbe_service_task(struct work_struct *work) +{ + struct ixgbe_adapter *adapter = container_of(work, + struct ixgbe_adapter, + service_task); + if (ixgbe_removed(adapter->hw.hw_addr)) { + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + rtnl_lock(); + ixgbe_down(adapter); + rtnl_unlock(); + } + ixgbe_service_event_complete(adapter); + return; + } + ixgbe_reset_subtask(adapter); + ixgbe_sfp_detection_subtask(adapter); + ixgbe_sfp_link_config_subtask(adapter); + ixgbe_check_overtemp_subtask(adapter); + ixgbe_watchdog_subtask(adapter); + ixgbe_fdir_reinit_subtask(adapter); + ixgbe_check_hang_subtask(adapter); + + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { + ixgbe_ptp_overflow_check(adapter); + ixgbe_ptp_rx_hang(adapter); + } + + ixgbe_service_event_complete(adapter); +} + +static int ixgbe_tso(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first, + u8 *hdr_len) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens, type_tucmd; + u32 mss_l4len_idx, l4len; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; + + if (first->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + first->tx_flags |= IXGBE_TX_FLAGS_TSO | + IXGBE_TX_FLAGS_CSUM | + IXGBE_TX_FLAGS_IPV4; + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + first->tx_flags |= IXGBE_TX_FLAGS_TSO | + IXGBE_TX_FLAGS_CSUM; + } + + /* compute header lengths */ + l4len = tcp_hdrlen(skb); + *hdr_len = skb_transport_offset(skb) + l4len; + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* mss_l4len_id: use 0 as index for TSO */ + mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + vlan_macip_lens = skb_network_header_len(skb); + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, + mss_l4len_idx); + + return 1; +} + +static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 mss_l4len_idx = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { + if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && + !(first->tx_flags & IXGBE_TX_FLAGS_CC)) + return; + } else { + u8 l4_hdr = 0; + switch (first->protocol) { + case htons(ETH_P_IP): + vlan_macip_lens |= skb_network_header_len(skb); + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + l4_hdr = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + vlan_macip_lens |= skb_network_header_len(skb); + l4_hdr = ipv6_hdr(skb)->nexthdr; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but proto=%x!\n", + first->protocol); + } + break; + } + + switch (l4_hdr) { + case IPPROTO_TCP: + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + mss_l4len_idx = tcp_hdrlen(skb) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; + case IPPROTO_SCTP: + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; + mss_l4len_idx = sizeof(struct sctphdr) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but l4 proto=%x!\n", + l4_hdr); + } + break; + } + + /* update TX checksum flag */ + first->tx_flags |= IXGBE_TX_FLAGS_CSUM; + } + + /* vlan_macip_lens: MACLEN, VLAN tag */ + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, + type_tucmd, mss_l4len_idx); +} + +#define IXGBE_SET_FLAG(_input, _flag, _result) \ + ((_flag <= _result) ? \ + ((u32)(_input & _flag) * (_result / _flag)) : \ + ((u32)(_input & _flag) / (_flag / _result))) + +static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_DEXT | + IXGBE_ADVTXD_DCMD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN, + IXGBE_ADVTXD_DCMD_VLE); + + /* set segmentation enable bits for TSO/FSO */ + cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO, + IXGBE_ADVTXD_DCMD_TSE); + + /* set timestamp bit if present */ + cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP, + IXGBE_ADVTXD_MAC_TSTAMP); + + /* insert frame checksum */ + cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS); + + return cmd_type; +} + +static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; + + /* enable L4 checksum for TSO and TX checksum offload */ + olinfo_status |= IXGBE_SET_FLAG(tx_flags, + IXGBE_TX_FLAGS_CSUM, + IXGBE_ADVTXD_POPTS_TXSM); + + /* enble IPv4 checksum for TSO */ + olinfo_status |= IXGBE_SET_FLAG(tx_flags, + IXGBE_TX_FLAGS_IPV4, + IXGBE_ADVTXD_POPTS_IXSM); + + /* + * Check Context must be set if Tx switch is enabled, which it + * always is for case where virtual functions are running + */ + olinfo_status |= IXGBE_SET_FLAG(tx_flags, + IXGBE_TX_FLAGS_CC, + IXGBE_ADVTXD_CC); + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + +static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(ixgbe_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) +{ + if (likely(ixgbe_desc_unused(tx_ring) >= size)) + return 0; + + return __ixgbe_maybe_stop_tx(tx_ring, size); +} + +#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ + IXGBE_TXD_CMD_RS) + +static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first, + const u8 hdr_len) +{ + struct sk_buff *skb = first->skb; + struct ixgbe_tx_buffer *tx_buffer; + union ixgbe_adv_tx_desc *tx_desc; + struct skb_frag_struct *frag; + dma_addr_t dma; + unsigned int data_len, size; + u32 tx_flags = first->tx_flags; + u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags); + u16 i = tx_ring->next_to_use; + + tx_desc = IXGBE_TX_DESC(tx_ring, i); + + ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); + + size = skb_headlen(skb); + data_len = skb->data_len; + +#ifdef IXGBE_FCOE + if (tx_flags & IXGBE_TX_FLAGS_FCOE) { + if (data_len < sizeof(struct fcoe_crc_eof)) { + size -= sizeof(struct fcoe_crc_eof) - data_len; + data_len = 0; + } else { + data_len -= sizeof(struct fcoe_crc_eof); + } + } + +#endif + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IXGBE_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + + dma += IXGBE_MAX_DATA_PER_TXD; + size -= IXGBE_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IXGBE_TX_DESC(tx_ring, 0); + i = 0; + } + tx_desc->read.olinfo_status = 0; + +#ifdef IXGBE_FCOE + size = min_t(unsigned int, data_len, skb_frag_size(frag)); +#else + size = skb_frag_size(frag); +#endif + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= size | IXGBE_TXD_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + /* + * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + writel(i, tx_ring->tail); + + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); + } + + return; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); + if (tx_buffer == first) + break; + if (i == 0) + i = tx_ring->count; + i--; + } + + tx_ring->next_to_use = i; +} + +static void ixgbe_atr(struct ixgbe_ring *ring, + struct ixgbe_tx_buffer *first) +{ + struct ixgbe_q_vector *q_vector = ring->q_vector; + union ixgbe_atr_hash_dword input = { .dword = 0 }; + union ixgbe_atr_hash_dword common = { .dword = 0 }; + union { + unsigned char *network; + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + struct tcphdr *th; + __be16 vlan_id; + + /* if ring doesn't have a interrupt vector, cannot perform ATR */ + if (!q_vector) + return; + + /* do nothing if sampling is disabled */ + if (!ring->atr_sample_rate) + return; + + ring->atr_count++; + + /* snag network header to get L4 type and address */ + hdr.network = skb_network_header(first->skb); + + /* Currently only IPv4/IPv6 with TCP is supported */ + if ((first->protocol != htons(ETH_P_IPV6) || + hdr.ipv6->nexthdr != IPPROTO_TCP) && + (first->protocol != htons(ETH_P_IP) || + hdr.ipv4->protocol != IPPROTO_TCP)) + return; + + th = tcp_hdr(first->skb); + + /* skip this packet since it is invalid or the socket is closing */ + if (!th || th->fin) + return; + + /* sample on all syn packets or once every atr sample count */ + if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) + return; + + /* reset sample count */ + ring->atr_count = 0; + + vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); + + /* + * src and dst are inverted, think how the receiver sees them + * + * The input is broken into two sections, a non-compressed section + * containing vm_pool, vlan_id, and flow_type. The rest of the data + * is XORed together and stored in the compressed dword. + */ + input.formatted.vlan_id = vlan_id; + + /* + * since src port and flex bytes occupy the same word XOR them together + * and write the value to source port portion of compressed dword + */ + if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) + common.port.src ^= th->dest ^ htons(ETH_P_8021Q); + else + common.port.src ^= th->dest ^ first->protocol; + common.port.dst ^= th->source; + + if (first->protocol == htons(ETH_P_IP)) { + input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; + common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; + } else { + input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; + common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ + hdr.ipv6->saddr.s6_addr32[1] ^ + hdr.ipv6->saddr.s6_addr32[2] ^ + hdr.ipv6->saddr.s6_addr32[3] ^ + hdr.ipv6->daddr.s6_addr32[0] ^ + hdr.ipv6->daddr.s6_addr32[1] ^ + hdr.ipv6->daddr.s6_addr32[2] ^ + hdr.ipv6->daddr.s6_addr32[3]; + } + + /* This assumes the Rx queue and Tx queue are bound to the same CPU */ + ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, + input, common, ring->queue_index); +} + +static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; +#ifdef IXGBE_FCOE + struct ixgbe_adapter *adapter; + struct ixgbe_ring_feature *f; + int txq; +#endif + + if (fwd_adapter) + return skb->queue_mapping + fwd_adapter->tx_base_queue; + +#ifdef IXGBE_FCOE + + /* + * only execute the code below if protocol is FCoE + * or FIP and we have FCoE enabled on the adapter + */ + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_FCOE): + case htons(ETH_P_FIP): + adapter = netdev_priv(dev); + + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + break; + default: + return fallback(dev, skb); + } + + f = &adapter->ring_feature[RING_F_FCOE]; + + txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : + smp_processor_id(); + + while (txq >= f->indices) + txq -= f->indices; + + return txq + f->offset; +#else + return fallback(dev, skb); +#endif +} + +netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, + struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring) +{ + struct ixgbe_tx_buffer *first; + int tso; + u32 tx_flags = 0; + unsigned short f; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = skb->protocol; + u8 hdr_len = 0; + + /* + * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + + if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + /* if we have a HW VLAN tag being added default to the HW one */ + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; + /* else if it is a SW VLAN check the next protocol and store the tag */ + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + goto out_drop; + + tx_flags |= ntohs(vhdr->h_vlan_TCI) << + IXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; + } + protocol = vlan_get_protocol(skb); + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + adapter->ptp_clock && + !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IXGBE_TX_FLAGS_TSTAMP; + + /* schedule check for Tx timestamp */ + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + schedule_work(&adapter->ptp_tx_work); + } + + skb_tx_timestamp(skb); + +#ifdef CONFIG_PCI_IOV + /* + * Use the l2switch_enable flag - would be false if the DMA + * Tx switch had been disabled. + */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + tx_flags |= IXGBE_TX_FLAGS_CC; + +#endif + /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */ + if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && + ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) || + (skb->priority != TC_PRIO_CONTROL))) { + tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; + tx_flags |= (skb->priority & 0x7) << + IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; + if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { + struct vlan_ethhdr *vhdr; + + if (skb_cow_head(skb, 0)) + goto out_drop; + vhdr = (struct vlan_ethhdr *)skb->data; + vhdr->h_vlan_TCI = htons(tx_flags >> + IXGBE_TX_FLAGS_VLAN_SHIFT); + } else { + tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; + } + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + +#ifdef IXGBE_FCOE + /* setup tx offload for FCoE */ + if ((protocol == htons(ETH_P_FCOE)) && + (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { + tso = ixgbe_fso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + + goto xmit_fcoe; + } + +#endif /* IXGBE_FCOE */ + tso = ixgbe_tso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + ixgbe_tx_csum(tx_ring, first); + + /* add the ATR filter if ATR is on */ + if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) + ixgbe_atr(tx_ring, first); + +#ifdef IXGBE_FCOE +xmit_fcoe: +#endif /* IXGBE_FCOE */ + ixgbe_tx_map(tx_ring, first, hdr_len); + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + return NETDEV_TX_OK; +} + +static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev, + struct ixgbe_ring *ring) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_ring *tx_ring; + + /* + * The minimum packet size for olinfo paylen is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + + tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; + + return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); +} + +static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + return __ixgbe_xmit_frame(skb, netdev, NULL); +} + +/** + * ixgbe_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_set_mac(struct net_device *netdev, void *p) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + int ret; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); + return ret > 0 ? 0 : ret; +} + +static int +ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u16 value; + int rc; + + if (prtad != hw->phy.mdio.prtad) + return -EINVAL; + rc = hw->phy.ops.read_reg(hw, addr, devad, &value); + if (!rc) + rc = value; + return rc; +} + +static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, + u16 addr, u16 value) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (prtad != hw->phy.mdio.prtad) + return -EINVAL; + return hw->phy.ops.write_reg(hw, addr, devad, value); +} + +static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + switch (cmd) { + case SIOCSHWTSTAMP: + return ixgbe_ptp_set_ts_config(adapter, req); + case SIOCGHWTSTAMP: + return ixgbe_ptp_get_ts_config(adapter, req); + default: + return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); + } +} + +/** + * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding + * netdev->dev_addrs + * @netdev: network interface device structure + * + * Returns non-zero on failure + **/ +static int ixgbe_add_sanmac_netdev(struct net_device *dev) +{ + int err = 0; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + + if (is_valid_ether_addr(hw->mac.san_addr)) { + rtnl_lock(); + err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + + /* update SAN MAC vmdq pool selection */ + hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); + } + return err; +} + +/** + * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding + * netdev->dev_addrs + * @netdev: network interface device structure + * + * Returns non-zero on failure + **/ +static int ixgbe_del_sanmac_netdev(struct net_device *dev) +{ + int err = 0; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_mac_info *mac = &adapter->hw.mac; + + if (is_valid_ether_addr(mac->san_addr)) { + rtnl_lock(); + err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + } + return err; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void ixgbe_netpoll(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int i; + + /* if interface is down do nothing */ + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return; + + /* loop through and schedule all active queues */ + for (i = 0; i < adapter->num_q_vectors; i++) + ixgbe_msix_clean_rings(0, adapter->q_vector[i]); +} + +#endif +static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + rcu_read_unlock(); + /* following stats updated by ixgbe_watchdog_task() */ + stats->multicast = netdev->stats.multicast; + stats->rx_errors = netdev->stats.rx_errors; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; + return stats; +} + +#ifdef CONFIG_IXGBE_DCB +/** + * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. + * @adapter: pointer to ixgbe_adapter + * @tc: number of traffic classes currently enabled + * + * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm + * 802.1Q priority maps to a packet buffer that exists. + */ +static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 reg, rsave; + int i; + + /* 82598 have a static priority to TC mapping that can not + * be changed so no validation is needed. + */ + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); + rsave = reg; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT); + + /* If up2tc is out of bounds default to zero */ + if (up2tc > tc) + reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT); + } + + if (reg != rsave) + IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); + + return; +} + +/** + * ixgbe_set_prio_tc_map - Configure netdev prio tc map + * @adapter: Pointer to adapter struct + * + * Populate the netdev user priority to tc map + */ +static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; + struct ieee_ets *ets = adapter->ixgbe_ieee_ets; + u8 prio; + + for (prio = 0; prio < MAX_USER_PRIORITY; prio++) { + u8 tc = 0; + + if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) + tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio); + else if (ets) + tc = ets->prio_tc[prio]; + + netdev_set_prio_tc_map(dev, prio, tc); + } +} + +#endif /* CONFIG_IXGBE_DCB */ +/** + * ixgbe_setup_tc - configure net_device for multiple traffic classes + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + */ +int ixgbe_setup_tc(struct net_device *dev, u8 tc) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + bool pools; + + /* Hardware supports up to 8 traffic classes */ + if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || + (hw->mac.type == ixgbe_mac_82598EB && + tc < MAX_TRAFFIC_CLASS)) + return -EINVAL; + + pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); + if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS) + return -EBUSY; + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + if (netif_running(dev)) + ixgbe_close(dev); + ixgbe_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_IXGBE_DCB + if (tc) { + netdev_set_num_tc(dev, tc); + ixgbe_set_prio_tc_map(adapter); + + adapter->flags |= IXGBE_FLAG_DCB_ENABLED; + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + adapter->last_lfc_mode = adapter->hw.fc.requested_mode; + adapter->hw.fc.requested_mode = ixgbe_fc_none; + } + } else { + netdev_reset_tc(dev); + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + adapter->hw.fc.requested_mode = adapter->last_lfc_mode; + + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; + + adapter->temp_dcb_cfg.pfc_mode_enable = false; + adapter->dcb_cfg.pfc_mode_enable = false; + } + + ixgbe_validate_rtr(adapter, tc); + +#endif /* CONFIG_IXGBE_DCB */ + ixgbe_init_interrupt_scheme(adapter); + + if (netif_running(dev)) + return ixgbe_open(dev); + + return 0; +} + +#ifdef CONFIG_PCI_IOV +void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev)); + rtnl_unlock(); +} + +#endif +void ixgbe_do_reset(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + ixgbe_reinit_locked(adapter); + else + ixgbe_reset(adapter); +} + +static netdev_features_t ixgbe_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + /* Turn off LRO if not RSC capable */ + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) + features &= ~NETIF_F_LRO; + + return features; +} + +static int ixgbe_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = netdev->features ^ features; + bool need_reset = false; + + /* Make sure RSC matches LRO, reset if change */ + if (!(features & NETIF_F_LRO)) { + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + need_reset = true; + adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; + } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && + !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { + if (adapter->rx_itr_setting == 1 || + adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; + need_reset = true; + } else if ((changed ^ features) & NETIF_F_LRO) { + e_info(probe, "rx-usecs set too low, " + "disabling RSC\n"); + } + } + + /* + * Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. + */ + switch (features & NETIF_F_NTUPLE) { + case NETIF_F_NTUPLE: + /* turn off ATR, enable perfect filters and reset */ + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + need_reset = true; + + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + break; + default: + /* turn off perfect filters, enable ATR and reset */ + if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) + need_reset = true; + + adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + + /* We cannot enable ATR if SR-IOV is enabled */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + break; + + /* We cannot enable ATR if we have 2 or more traffic classes */ + if (netdev_get_num_tc(netdev) > 1) + break; + + /* We cannot enable ATR if RSS is disabled */ + if (adapter->ring_feature[RING_F_RSS].limit <= 1) + break; + + /* A sample rate of 0 indicates ATR disabled */ + if (!adapter->atr_sample_rate) + break; + + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + break; + } + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + ixgbe_vlan_strip_enable(adapter); + else + ixgbe_vlan_strip_disable(adapter); + + if (changed & NETIF_F_RXALL) + need_reset = true; + + netdev->features = features; + if (need_reset) + ixgbe_do_reset(netdev); + + return 0; +} + +/** + * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up + * @dev: The port's netdev + * @sa_family: Socket Family that VXLAN is notifiying us about + * @port: New UDP port number that VXLAN started listening to + **/ +static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + u16 new_port = ntohs(port); + + if (sa_family == AF_INET6) + return; + + if (adapter->vxlan_port == new_port) { + netdev_info(dev, "Port %d already offloaded\n", new_port); + return; + } + + if (adapter->vxlan_port) { + netdev_info(dev, + "Hit Max num of UDP ports, not adding port %d\n", + new_port); + return; + } + + adapter->vxlan_port = new_port; + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port); +} + +/** + * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away + * @dev: The port's netdev + * @sa_family: Socket Family that VXLAN is notifying us about + * @port: UDP port number that VXLAN stopped listening to + **/ +static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + u16 new_port = ntohs(port); + + if (sa_family == AF_INET6) + return; + + if (adapter->vxlan_port != new_port) { + netdev_info(dev, "Port %d was not found, not deleting\n", + new_port); + return; + } + + adapter->vxlan_port = 0; + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, 0); +} + +static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, + u16 flags) +{ + /* guarantee we can provide a unique filter for the unicast address */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + if (IXGBE_MAX_PF_MACVLANS <= netdev_uc_count(dev)) + return -ENOMEM; + } + + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); +} + +/** + * ixgbe_configure_bridge_mode - set various bridge modes + * @adapter - the private structure + * @mode - requested bridge mode + * + * Configure some settings require for various bridge modes. + **/ +static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter, + __u16 mode) +{ + struct ixgbe_hw *hw = &adapter->hw; + unsigned int p, num_pools; + u32 vmdctl; + + switch (mode) { + case BRIDGE_MODE_VEPA: + /* disable Tx loopback, rely on switch hairpin mode */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0); + + /* must enable Rx switching replication to allow multicast + * packet reception on all VFs, and to enable source address + * pruning. + */ + vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + vmdctl |= IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); + + /* enable Rx source address pruning. Note, this requires + * replication to be enabled or else it does nothing. + */ + num_pools = adapter->num_vfs + adapter->num_rx_pools; + for (p = 0; p < num_pools; p++) { + if (hw->mac.ops.set_source_address_pruning) + hw->mac.ops.set_source_address_pruning(hw, + true, + p); + } + break; + case BRIDGE_MODE_VEB: + /* enable Tx loopback for internal VF/PF communication */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, + IXGBE_PFDTXGSWC_VT_LBEN); + + /* disable Rx switching replication unless we have SR-IOV + * virtual functions + */ + vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + if (!adapter->num_vfs) + vmdctl &= ~IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); + + /* disable Rx source address pruning, since we don't expect to + * be receiving external loopback of our transmitted frames. + */ + num_pools = adapter->num_vfs + adapter->num_rx_pools; + for (p = 0; p < num_pools; p++) { + if (hw->mac.ops.set_source_address_pruning) + hw->mac.ops.set_source_address_pruning(hw, + false, + p); + } + break; + default: + return -EINVAL; + } + + adapter->bridge_mode = mode; + + e_info(drv, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + + return 0; +} + +static int ixgbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh, u16 flags) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct nlattr *attr, *br_spec; + int rem; + + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; + + nla_for_each_nested(attr, br_spec, rem) { + u32 status; + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + if (nla_len(attr) < sizeof(mode)) + return -EINVAL; + + mode = nla_get_u16(attr); + status = ixgbe_configure_bridge_mode(adapter, mode); + if (status) + return status; + + break; + } + + return 0; +} + +static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 filter_mask, int nlflags) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return 0; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, + adapter->bridge_mode, 0, 0, nlflags); +} + +static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) +{ + struct ixgbe_fwd_adapter *fwd_adapter = NULL; + struct ixgbe_adapter *adapter = netdev_priv(pdev); + int used_pools = adapter->num_vfs + adapter->num_rx_pools; + unsigned int limit; + int pool, err; + + /* Hardware has a limited number of available pools. Each VF, and the + * PF require a pool. Check to ensure we don't attempt to use more + * then the available number of pools. + */ + if (used_pools >= IXGBE_MAX_VF_FUNCTIONS) + return ERR_PTR(-EINVAL); + +#ifdef CONFIG_RPS + if (vdev->num_rx_queues != vdev->num_tx_queues) { + netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n", + vdev->name); + return ERR_PTR(-EINVAL); + } +#endif + /* Check for hardware restriction on number of rx/tx queues */ + if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES || + vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) { + netdev_info(pdev, + "%s: Supports RX/TX Queue counts 1,2, and 4\n", + pdev->name); + return ERR_PTR(-EINVAL); + } + + if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && + adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) || + (adapter->num_rx_pools > IXGBE_MAX_MACVLANS)) + return ERR_PTR(-EBUSY); + + fwd_adapter = kcalloc(1, sizeof(struct ixgbe_fwd_adapter), GFP_KERNEL); + if (!fwd_adapter) + return ERR_PTR(-ENOMEM); + + pool = find_first_zero_bit(&adapter->fwd_bitmask, 32); + adapter->num_rx_pools++; + set_bit(pool, &adapter->fwd_bitmask); + limit = find_last_bit(&adapter->fwd_bitmask, 32); + + /* Enable VMDq flag so device will be set in VM mode */ + adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED; + adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; + adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues; + + /* Force reinit of ring allocation with VMDQ enabled */ + err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); + if (err) + goto fwd_add_err; + fwd_adapter->pool = pool; + fwd_adapter->real_adapter = adapter; + err = ixgbe_fwd_ring_up(vdev, fwd_adapter); + if (err) + goto fwd_add_err; + netif_tx_start_all_queues(vdev); + return fwd_adapter; +fwd_add_err: + /* unwind counter and free adapter struct */ + netdev_info(pdev, + "%s: dfwd hardware acceleration failed\n", vdev->name); + clear_bit(pool, &adapter->fwd_bitmask); + adapter->num_rx_pools--; + kfree(fwd_adapter); + return ERR_PTR(err); +} + +static void ixgbe_fwd_del(struct net_device *pdev, void *priv) +{ + struct ixgbe_fwd_adapter *fwd_adapter = priv; + struct ixgbe_adapter *adapter = fwd_adapter->real_adapter; + unsigned int limit; + + clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask); + adapter->num_rx_pools--; + + limit = find_last_bit(&adapter->fwd_bitmask, 32); + adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; + ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter); + ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); + netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", + fwd_adapter->pool, adapter->num_rx_pools, + fwd_adapter->rx_base_queue, + fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool, + adapter->fwd_bitmask); + kfree(fwd_adapter); +} + +static const struct net_device_ops ixgbe_netdev_ops = { + .ndo_open = ixgbe_open, + .ndo_stop = ixgbe_close, + .ndo_start_xmit = ixgbe_xmit_frame, + .ndo_select_queue = ixgbe_select_queue, + .ndo_set_rx_mode = ixgbe_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ixgbe_set_mac, + .ndo_change_mtu = ixgbe_change_mtu, + .ndo_tx_timeout = ixgbe_tx_timeout, + .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, + .ndo_do_ioctl = ixgbe_ioctl, + .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, + .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, + .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, + .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, + .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, + .ndo_get_vf_config = ixgbe_ndo_get_vf_config, + .ndo_get_stats64 = ixgbe_get_stats64, +#ifdef CONFIG_IXGBE_DCB + .ndo_setup_tc = ixgbe_setup_tc, +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ixgbe_netpoll, +#endif +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = ixgbe_low_latency_recv, +#endif +#ifdef IXGBE_FCOE + .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, + .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, + .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, + .ndo_fcoe_enable = ixgbe_fcoe_enable, + .ndo_fcoe_disable = ixgbe_fcoe_disable, + .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, + .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo, +#endif /* IXGBE_FCOE */ + .ndo_set_features = ixgbe_set_features, + .ndo_fix_features = ixgbe_fix_features, + .ndo_fdb_add = ixgbe_ndo_fdb_add, + .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, + .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, + .ndo_dfwd_add_station = ixgbe_fwd_add, + .ndo_dfwd_del_station = ixgbe_fwd_del, + .ndo_add_vxlan_port = ixgbe_add_vxlan_port, + .ndo_del_vxlan_port = ixgbe_del_vxlan_port, +}; + +/** + * ixgbe_enumerate_functions - Get the number of ports this device has + * @adapter: adapter structure + * + * This function enumerates the phsyical functions co-located on a single slot, + * in order to determine how many ports a device has. This is most useful in + * determining the required GT/s of PCIe bandwidth necessary for optimal + * performance. + **/ +static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) +{ + struct pci_dev *entry, *pdev = adapter->pdev; + int physfns = 0; + + /* Some cards can not use the generic count PCIe functions method, + * because they are behind a parent switch, so we hardcode these with + * the correct number of functions. + */ + if (ixgbe_pcie_from_parent(&adapter->hw)) + physfns = 4; + + list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) { + /* don't count virtual functions */ + if (entry->is_virtfn) + continue; + + /* When the devices on the bus don't all match our device ID, + * we can't reliably determine the correct number of + * functions. This can occur if a function has been direct + * attached to a virtual machine using VT-d, for example. In + * this case, simply return -1 to indicate this. + */ + if ((entry->vendor != pdev->vendor) || + (entry->device != pdev->device)) + return -1; + + physfns++; + } + + return physfns; +} + +/** + * ixgbe_wol_supported - Check whether device supports WoL + * @hw: hw specific details + * @device_id: the device ID + * @subdev_id: the subsystem device ID + * + * This function is used by probe and ethtool to determine + * which devices have WoL support + * + **/ +int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, + u16 subdevice_id) +{ + struct ixgbe_hw *hw = &adapter->hw; + u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; + int is_wol_supported = 0; + + switch (device_id) { + case IXGBE_DEV_ID_82599_SFP: + /* Only these subdevices could supports WOL */ + switch (subdevice_id) { + case IXGBE_SUBDEV_ID_82599_SFP_WOL0: + case IXGBE_SUBDEV_ID_82599_560FLR: + /* only support first port */ + if (hw->bus.func != 0) + break; + case IXGBE_SUBDEV_ID_82599_SP_560FLR: + case IXGBE_SUBDEV_ID_82599_SFP: + case IXGBE_SUBDEV_ID_82599_RNDC: + case IXGBE_SUBDEV_ID_82599_ECNA_DP: + case IXGBE_SUBDEV_ID_82599_LOM_SFP: + is_wol_supported = 1; + break; + } + break; + case IXGBE_DEV_ID_82599EN_SFP: + /* Only this subdevice supports WOL */ + switch (subdevice_id) { + case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1: + is_wol_supported = 1; + break; + } + break; + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + /* All except this subdevice support WOL */ + if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) + is_wol_supported = 1; + break; + case IXGBE_DEV_ID_82599_KX4: + is_wol_supported = 1; + break; + case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: + /* check eeprom to see if enabled wol */ + if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || + ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && + (hw->bus.func == 0))) { + is_wol_supported = 1; + } + break; + } + + return is_wol_supported; +} + +/** + * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM + * @adapter: Pointer to adapter struct + */ +static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter) +{ +#ifdef CONFIG_OF + struct device_node *dp = pci_device_to_OF_node(adapter->pdev); + struct ixgbe_hw *hw = &adapter->hw; + const unsigned char *addr; + + addr = of_get_mac_address(dp); + if (addr) { + ether_addr_copy(hw->mac.perm_addr, addr); + return; + } +#endif /* CONFIG_OF */ + +#ifdef CONFIG_SPARC + ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr); +#endif /* CONFIG_SPARC */ +} + +/** + * ixgbe_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in ixgbe_pci_tbl + * + * Returns 0 on success, negative on failure + * + * ixgbe_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct ixgbe_adapter *adapter = NULL; + struct ixgbe_hw *hw; + const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; + int i, err, pci_using_dac, expected_gts; + unsigned int indices = MAX_TX_QUEUES; + u8 part_str[IXGBE_PBANUM_LENGTH]; + bool disable_dev = false; +#ifdef IXGBE_FCOE + u16 device_caps; +#endif + u32 eec; + + /* Catch broken hardware that put the wrong VF device ID in + * the PCIe SR-IOV capability. + */ + if (pdev->is_virtfn) { + WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", + pci_name(pdev), pdev->vendor, pdev->device); + return -EINVAL; + } + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + pci_using_dac = 0; + } + + err = pci_request_selected_regions(pdev, pci_select_bars(pdev, + IORESOURCE_MEM), ixgbe_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + pci_save_state(pdev); + + if (ii->mac == ixgbe_mac_82598EB) { +#ifdef CONFIG_IXGBE_DCB + /* 8 TC w/ 4 queues per TC */ + indices = 4 * MAX_TRAFFIC_CLASS; +#else + indices = IXGBE_MAX_RSS_INDICES; +#endif + } + + netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + adapter->io_addr = hw->hw_addr; + if (!hw->hw_addr) { + err = -EIO; + goto err_ioremap; + } + + netdev->netdev_ops = &ixgbe_netdev_ops; + ixgbe_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); + + /* Setup hw api */ + memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); + hw->mac.type = ii->mac; + + /* EEPROM */ + memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + if (ixgbe_removed(hw->hw_addr)) { + err = -EIO; + goto err_ioremap; + } + /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ + if (!(eec & (1 << 8))) + hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; + + /* PHY */ + memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + /* ixgbe_identify_phy_generic will set prtad and mmds properly */ + hw->phy.mdio.prtad = MDIO_PRTAD_NONE; + hw->phy.mdio.mmds = 0; + hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; + hw->phy.mdio.dev = netdev; + hw->phy.mdio.mdio_read = ixgbe_mdio_read; + hw->phy.mdio.mdio_write = ixgbe_mdio_write; + + ii->get_invariants(hw); + + /* setup the private structure */ + err = ixgbe_sw_init(adapter); + if (err) + goto err_sw_init; + + /* Make it possible the adapter to be woken up via WOL */ + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); + break; + default: + break; + } + + /* + * If there is a fan on this device and it has failed log the + * failure. + */ + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + if (esdp & IXGBE_ESDP_SDP1) + e_crit(probe, "Fan has stopped, replace the adapter\n"); + } + + if (allow_unsupported_sfp) + hw->allow_unsupported_sfp = allow_unsupported_sfp; + + /* reset_hw fills in the perm_addr as well */ + hw->phy.reset_if_overtemp = true; + err = hw->mac.ops.reset_hw(hw); + hw->phy.reset_if_overtemp = false; + if (err == IXGBE_ERR_SFP_NOT_PRESENT && + hw->mac.type == ixgbe_mac_82598EB) { + err = 0; + } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { + e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n"); + e_dev_err("Reload the driver after installing a supported module.\n"); + goto err_sw_init; + } else if (err) { + e_dev_err("HW Init failed: %d\n", err); + goto err_sw_init; + } + +#ifdef CONFIG_PCI_IOV + /* SR-IOV not supported on the 82598 */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + goto skip_sriov; + /* Mailbox */ + ixgbe_init_mbx_params_pf(hw); + memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops)); + pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT); + ixgbe_enable_sriov(adapter); +skip_sriov: + +#endif + netdev->features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXHASH | + NETIF_F_RXCSUM; + + netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + netdev->features |= NETIF_F_SCTP_CSUM; + netdev->hw_features |= NETIF_F_SCTP_CSUM | + NETIF_F_NTUPLE; + break; + default: + break; + } + + netdev->hw_features |= NETIF_F_RXALL; + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + netdev->vlan_features |= NETIF_F_TSO; + netdev->vlan_features |= NETIF_F_TSO6; + netdev->vlan_features |= NETIF_F_IP_CSUM; + netdev->vlan_features |= NETIF_F_IPV6_CSUM; + netdev->vlan_features |= NETIF_F_SG; + + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + netdev->hw_enc_features |= NETIF_F_RXCSUM; + break; + default: + break; + } + +#ifdef CONFIG_IXGBE_DCB + netdev->dcbnl_ops = &dcbnl_ops; +#endif + +#ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { + unsigned int fcoe_l; + + if (hw->mac.ops.get_device_caps) { + hw->mac.ops.get_device_caps(hw, &device_caps); + if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) + adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; + } + + + fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus()); + adapter->ring_feature[RING_F_FCOE].limit = fcoe_l; + + netdev->features |= NETIF_F_FSO | + NETIF_F_FCOE_CRC; + + netdev->vlan_features |= NETIF_F_FSO | + NETIF_F_FCOE_CRC | + NETIF_F_FCOE_MTU; + } +#endif /* IXGBE_FCOE */ + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) + netdev->hw_features |= NETIF_F_LRO; + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + netdev->features |= NETIF_F_LRO; + + /* make sure the EEPROM is good */ + if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { + e_dev_err("The EEPROM Checksum Is Not Valid\n"); + err = -EIO; + goto err_sw_init; + } + + ixgbe_get_platform_mac_addr(adapter); + + memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + e_dev_err("invalid MAC address\n"); + err = -EIO; + goto err_sw_init; + } + + ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + + setup_timer(&adapter->service_timer, &ixgbe_service_timer, + (unsigned long) adapter); + + if (ixgbe_removed(hw->hw_addr)) { + err = -EIO; + goto err_sw_init; + } + INIT_WORK(&adapter->service_task, ixgbe_service_task); + set_bit(__IXGBE_SERVICE_INITED, &adapter->state); + clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); + + err = ixgbe_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + + /* WOL not supported for all devices */ + adapter->wol = 0; + hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); + hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device, + pdev->subsystem_device); + if (hw->wol_enabled) + adapter->wol = IXGBE_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* save off EEPROM version number */ + hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh); + hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl); + + /* pick up the PCI bus settings for reporting later */ + hw->mac.ops.get_bus_info(hw); + if (ixgbe_pcie_from_parent(hw)) + ixgbe_get_parent_bus_info(adapter); + + /* calculate the expected PCIe bandwidth required for optimal + * performance. Note that some older parts will never have enough + * bandwidth due to being older generation PCIe parts. We clamp these + * parts to ensure no warning is displayed if it can't be fixed. + */ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16); + break; + default: + expected_gts = ixgbe_enumerate_functions(adapter) * 10; + break; + } + + /* don't check link if we failed to enumerate functions */ + if (expected_gts > 0) + ixgbe_check_minimum_link(adapter, expected_gts); + + err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str)); + if (err) + strlcpy(part_str, "Unknown", sizeof(part_str)); + if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) + e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, hw->phy.sfp_type, + part_str); + else + e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, part_str); + + e_dev_info("%pM\n", netdev->dev_addr); + + /* reset the hardware with the new settings */ + err = hw->mac.ops.start_hw(hw); + if (err == IXGBE_ERR_EEPROM_VERSION) { + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter/LOM. " + "Please be aware there may be issues associated " + "with your hardware. If you are experiencing " + "problems please contact your Intel or hardware " + "representative who provided you with this " + "hardware.\n"); + } + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + pci_set_drvdata(pdev, adapter); + + /* power down the optics for 82599 SFP+ fiber */ + if (hw->mac.ops.disable_tx_laser) + hw->mac.ops.disable_tx_laser(hw); + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + +#ifdef CONFIG_IXGBE_DCA + if (dca_add_requester(&pdev->dev) == 0) { + adapter->flags |= IXGBE_FLAG_DCA_ENABLED; + ixgbe_setup_dca(adapter); + } +#endif + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs); + for (i = 0; i < adapter->num_vfs; i++) + ixgbe_vf_configuration(pdev, (i | 0x10000000)); + } + + /* firmware requires driver version to be 0xFFFFFFFF + * since os does not support feature + */ + if (hw->mac.ops.set_fw_drv_ver) + hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, + 0xFF); + + /* add san mac addr to netdev */ + ixgbe_add_sanmac_netdev(netdev); + + e_dev_info("%s\n", ixgbe_default_device_descr); + +#ifdef CONFIG_IXGBE_HWMON + if (ixgbe_sysfs_init(adapter)) + e_err(probe, "failed to allocate sysfs resources\n"); +#endif /* CONFIG_IXGBE_HWMON */ + + ixgbe_dbg_adapter_init(adapter); + + /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */ + if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link) + hw->mac.ops.setup_link(hw, + IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, + true); + + return 0; + +err_register: + ixgbe_release_hw_control(adapter); + ixgbe_clear_interrupt_scheme(adapter); +err_sw_init: + ixgbe_disable_sriov(adapter); + adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; + iounmap(adapter->io_addr); + kfree(adapter->mac_table); +err_ioremap: + disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + if (!adapter || disable_dev) + pci_disable_device(pdev); + return err; +} + +/** + * ixgbe_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ixgbe_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void ixgbe_remove(struct pci_dev *pdev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev; + bool disable_dev; + + /* if !adapter then we already cleaned up in probe */ + if (!adapter) + return; + + netdev = adapter->netdev; + ixgbe_dbg_adapter_exit(adapter); + + set_bit(__IXGBE_REMOVING, &adapter->state); + cancel_work_sync(&adapter->service_task); + + +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { + adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; + dca_remove_requester(&pdev->dev); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); + } + +#endif +#ifdef CONFIG_IXGBE_HWMON + ixgbe_sysfs_exit(adapter); +#endif /* CONFIG_IXGBE_HWMON */ + + /* remove the added san mac */ + ixgbe_del_sanmac_netdev(netdev); + + if (netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(netdev); + +#ifdef CONFIG_PCI_IOV + /* + * Only disable SR-IOV on unload if the user specified the now + * deprecated max_vfs module parameter. + */ + if (max_vfs) + ixgbe_disable_sriov(adapter); +#endif + ixgbe_clear_interrupt_scheme(adapter); + + ixgbe_release_hw_control(adapter); + +#ifdef CONFIG_DCB + kfree(adapter->ixgbe_ieee_pfc); + kfree(adapter->ixgbe_ieee_ets); + +#endif + iounmap(adapter->io_addr); + pci_release_selected_regions(pdev, pci_select_bars(pdev, + IORESOURCE_MEM)); + + e_dev_info("complete\n"); + + kfree(adapter->mac_table); + disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + if (disable_dev) + pci_disable_device(pdev); +} + +/** + * ixgbe_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + +#ifdef CONFIG_PCI_IOV + struct ixgbe_hw *hw = &adapter->hw; + struct pci_dev *bdev, *vfdev; + u32 dw0, dw1, dw2, dw3; + int vf, pos; + u16 req_id, pf_func; + + if (adapter->hw.mac.type == ixgbe_mac_82598EB || + adapter->num_vfs == 0) + goto skip_bad_vf_detection; + + bdev = pdev->bus->self; + while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) + bdev = bdev->bus->self; + + if (!bdev) + goto skip_bad_vf_detection; + + pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR); + if (!pos) + goto skip_bad_vf_detection; + + dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG); + dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4); + dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8); + dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12); + if (ixgbe_removed(hw->hw_addr)) + goto skip_bad_vf_detection; + + req_id = dw1 >> 16; + /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */ + if (!(req_id & 0x0080)) + goto skip_bad_vf_detection; + + pf_func = req_id & 0x01; + if ((pf_func & 1) == (pdev->devfn & 1)) { + unsigned int device_id; + + vf = (req_id & 0x7F) >> 1; + e_dev_err("VF %d has caused a PCIe error\n", vf); + e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: " + "%8.8x\tdw3: %8.8x\n", + dw0, dw1, dw2, dw3); + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + device_id = IXGBE_82599_VF_DEVICE_ID; + break; + case ixgbe_mac_X540: + device_id = IXGBE_X540_VF_DEVICE_ID; + break; + case ixgbe_mac_X550: + device_id = IXGBE_DEV_ID_X550_VF; + break; + case ixgbe_mac_X550EM_x: + device_id = IXGBE_DEV_ID_X550EM_X_VF; + break; + default: + device_id = 0; + break; + } + + /* Find the pci device of the offending VF */ + vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL); + while (vfdev) { + if (vfdev->devfn == (req_id & 0xFF)) + break; + vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, + device_id, vfdev); + } + /* + * There's a slim chance the VF could have been hot plugged, + * so if it is no longer present we don't need to issue the + * VFLR. Just clean up the AER in that case. + */ + if (vfdev) { + ixgbe_issue_vf_flr(adapter, vfdev); + /* Free device reference count */ + pci_dev_put(vfdev); + } + + pci_cleanup_aer_uncorrect_error_status(pdev); + } + + /* + * Even though the error may have occurred on the other port + * we still need to increment the vf error reference count for + * both ports because the I/O resume function will be called + * for both of them. + */ + adapter->vferr_refcount++; + + return PCI_ERS_RESULT_RECOVERED; + +skip_bad_vf_detection: +#endif /* CONFIG_PCI_IOV */ + if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) + return PCI_ERS_RESULT_DISCONNECT; + + rtnl_lock(); + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (netif_running(netdev)) + ixgbe_down(adapter); + + if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); + rtnl_unlock(); + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * ixgbe_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + pci_ers_result_t result; + int err; + + if (pci_enable_device_mem(pdev)) { + e_err(probe, "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + smp_mb__before_atomic(); + clear_bit(__IXGBE_DISABLED, &adapter->state); + adapter->hw.hw_addr = adapter->io_addr; + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_wake_from_d3(pdev, false); + + ixgbe_reset(adapter); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + err = pci_cleanup_aer_uncorrect_error_status(pdev); + if (err) { + e_dev_err("pci_cleanup_aer_uncorrect_error_status " + "failed 0x%0x\n", err); + /* non-fatal, continue */ + } + + return result; +} + +/** + * ixgbe_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void ixgbe_io_resume(struct pci_dev *pdev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + +#ifdef CONFIG_PCI_IOV + if (adapter->vferr_refcount) { + e_info(drv, "Resuming after VF err\n"); + adapter->vferr_refcount--; + return; + } + +#endif + if (netif_running(netdev)) + ixgbe_up(adapter); + + netif_device_attach(netdev); +} + +static const struct pci_error_handlers ixgbe_err_handler = { + .error_detected = ixgbe_io_error_detected, + .slot_reset = ixgbe_io_slot_reset, + .resume = ixgbe_io_resume, +}; + +static struct pci_driver ixgbe_driver = { + .name = ixgbe_driver_name, + .id_table = ixgbe_pci_tbl, + .probe = ixgbe_probe, + .remove = ixgbe_remove, +#ifdef CONFIG_PM + .suspend = ixgbe_suspend, + .resume = ixgbe_resume, +#endif + .shutdown = ixgbe_shutdown, + .sriov_configure = ixgbe_pci_sriov_configure, + .err_handler = &ixgbe_err_handler +}; + +/** + * ixgbe_init_module - Driver Registration Routine + * + * ixgbe_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init ixgbe_init_module(void) +{ + int ret; + pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); + pr_info("%s\n", ixgbe_copyright); + + ixgbe_dbg_init(); + + ret = pci_register_driver(&ixgbe_driver); + if (ret) { + ixgbe_dbg_exit(); + return ret; + } + +#ifdef CONFIG_IXGBE_DCA + dca_register_notify(&dca_notifier); +#endif + + return 0; +} + +module_init(ixgbe_init_module); + +/** + * ixgbe_exit_module - Driver Exit Cleanup Routine + * + * ixgbe_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit ixgbe_exit_module(void) +{ +#ifdef CONFIG_IXGBE_DCA + dca_unregister_notify(&dca_notifier); +#endif + pci_unregister_driver(&ixgbe_driver); + + ixgbe_dbg_exit(); +} + +#ifdef CONFIG_IXGBE_DCA +static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, + void *p) +{ + int ret_val; + + ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, + __ixgbe_notify_dca); + + return ret_val ? NOTIFY_BAD : NOTIFY_DONE; +} + +#endif /* CONFIG_IXGBE_DCA */ + +module_exit(ixgbe_exit_module); + +/* ixgbe_main.c */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c new file mode 100644 index 000000000..9993a471d --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -0,0 +1,458 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2014 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include "ixgbe.h" +#include "ixgbe_mbx.h" + +/** + * ixgbe_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (!mbx->ops.read) + return IXGBE_ERR_MBX; + + return mbx->ops.read(hw, msg, size, mbx_id); +} + +/** + * ixgbe_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + if (size > mbx->size) + return IXGBE_ERR_MBX; + + if (!mbx->ops.write) + return IXGBE_ERR_MBX; + + return mbx->ops.write(hw, msg, size, mbx_id); +} + +/** + * ixgbe_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + if (!mbx->ops.check_for_msg) + return IXGBE_ERR_MBX; + + return mbx->ops.check_for_msg(hw, mbx_id); +} + +/** + * ixgbe_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + if (!mbx->ops.check_for_ack) + return IXGBE_ERR_MBX; + + return mbx->ops.check_for_ack(hw, mbx_id); +} + +/** + * ixgbe_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + if (!mbx->ops.check_for_rst) + return IXGBE_ERR_MBX; + + return mbx->ops.check_for_rst(hw, mbx_id); +} + +/** + * ixgbe_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_msg) + return IXGBE_ERR_MBX; + + while (mbx->ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + return IXGBE_ERR_MBX; + udelay(mbx->usec_delay); + } + + return 0; +} + +/** + * ixgbe_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_ack) + return IXGBE_ERR_MBX; + + while (mbx->ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + return IXGBE_ERR_MBX; + udelay(mbx->usec_delay); + } + + return 0; +} + +/** + * ixgbe_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val; + + if (!mbx->ops.read) + return IXGBE_ERR_MBX; + + ret_val = ixgbe_poll_for_msg(hw, mbx_id); + if (ret_val) + return ret_val; + + /* if ack received read message */ + return mbx->ops.read(hw, msg, size, mbx_id); +} + +/** + * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + return IXGBE_ERR_MBX; + + /* send msg */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + if (ret_val) + return ret_val; + + /* if msg sent wait until we receive an ack */ + return ixgbe_poll_for_ack(hw, mbx_id); +} + +static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) +{ + u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); + + if (mbvficr & mask) { + IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask); + return 0; + } + + return IXGBE_ERR_MBX; +} + +/** + * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + s32 index = IXGBE_MBVFICR_INDEX(vf_number); + u32 vf_bit = vf_number % 16; + + if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, + index)) { + hw->mbx.stats.reqs++; + return 0; + } + + return IXGBE_ERR_MBX; +} + +/** + * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + s32 index = IXGBE_MBVFICR_INDEX(vf_number); + u32 vf_bit = vf_number % 16; + + if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, + index)) { + hw->mbx.stats.acks++; + return 0; + } + + return IXGBE_ERR_MBX; +} + +/** + * ixgbe_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + u32 reg_offset = (vf_number < 32) ? 0 : 1; + u32 vf_shift = vf_number % 32; + u32 vflre = 0; + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); + break; + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); + break; + default: + break; + } + + if (vflre & (1 << vf_shift)) { + IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift)); + hw->mbx.stats.rsts++; + return 0; + } + + return IXGBE_ERR_MBX; +} + +/** + * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + u32 p2v_mailbox; + + /* Take ownership of the buffer */ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU); + + /* reserve mailbox for vf use */ + p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number)); + if (p2v_mailbox & IXGBE_PFMAILBOX_PFU) + return 0; + + return IXGBE_ERR_MBX; +} + +/** + * ixgbe_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + return ret_val; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbe_check_for_msg_pf(hw, vf_number); + ixgbe_check_for_ack_pf(hw, vf_number); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + return 0; +} + +/** + * ixgbe_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + return ret_val; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i); + + /* Acknowledge the message and release buffer */ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + + return 0; +} + +#ifdef CONFIG_PCI_IOV +/** + * ixgbe_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X540) + return; + + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + mbx->size = IXGBE_VFMAILBOX_SIZE; +} +#endif /* CONFIG_PCI_IOV */ + +struct ixgbe_mbx_operations mbx_ops_generic = { + .read = ixgbe_read_mbx_pf, + .write = ixgbe_write_mbx_pf, + .read_posted = ixgbe_read_posted_mbx, + .write_posted = ixgbe_write_posted_mbx, + .check_for_msg = ixgbe_check_for_msg_pf, + .check_for_ack = ixgbe_check_for_ack_pf, + .check_for_rst = ixgbe_check_for_rst_pf, +}; + diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h new file mode 100644 index 000000000..b1e4703ff --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -0,0 +1,126 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_MBX_H_ +#define _IXGBE_MBX_H_ + +#include "ixgbe_type.h" + +#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +#define IXGBE_ERR_MBX -100 + +#define IXGBE_VFMAILBOX 0x002FC +#define IXGBE_VFMBMEM 0x00200 + +#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ +#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ +#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + + +/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is IXGBE_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with + * this are the ACK */ +#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with + * this are the NACK */ +#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + clear to send requests */ +#define IXGBE_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for exra info for certain messages */ +#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) + +/* definitions to support mailbox API version negotiation */ + +/* + * Each element denotes a version of the API; existing numbers may not + * change; any additions must go at the end + */ +enum ixgbe_pfvf_api_rev { + ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ + ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ + ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + /* This value should always be last */ + ixgbe_mbox_api_unknown, /* indicates that API version is not known */ +}; + +/* mailbox API, legacy requests */ +#define IXGBE_VF_RESET 0x01 /* VF requests reset */ +#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ + +/* mailbox API, version 1.0 VF requests */ +#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ +#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ + +/* mailbox API, version 1.1 VF requests */ +#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ + +/* GET_QUEUES return data indices within the mailbox */ +#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ + +/* mailbox API, version 1.2 VF requests */ +#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ + +/* length of permanent address message returned from PF */ +#define IXGBE_VF_PERMADDR_MSG_LEN 4 +/* word in permanent address message with the current multicast type */ +#define IXGBE_VF_MC_TYPE_WORD 3 + +#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ + +#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); +s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); +s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); +#ifdef CONFIG_PCI_IOV +void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); +#endif /* CONFIG_PCI_IOV */ + +extern struct ixgbe_mbx_operations mbx_ops_generic; + +#endif /* _IXGBE_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c new file mode 100644 index 000000000..8a2be4441 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -0,0 +1,2139 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2014 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_phy.h" + +static void ixgbe_i2c_start(struct ixgbe_hw *hw); +static void ixgbe_i2c_stop(struct ixgbe_hw *hw); +static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data); +static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data); +static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw); +static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data); +static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); +static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); +static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); +static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); +static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl); +static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); +static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); +static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); +static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); + +/** + * ixgbe_out_i2c_byte_ack - Send I2C byte with ack + * @hw: pointer to the hardware structure + * @byte: byte to send + * + * Returns an error code on error. + **/ +static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) +{ + s32 status; + + status = ixgbe_clock_out_i2c_byte(hw, byte); + if (status) + return status; + return ixgbe_get_i2c_ack(hw); +} + +/** + * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack + * @hw: pointer to the hardware structure + * @byte: pointer to a u8 to receive the byte + * + * Returns an error code on error. + **/ +static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte) +{ + s32 status; + + status = ixgbe_clock_in_i2c_byte(hw, byte); + if (status) + return status; + /* ACK */ + return ixgbe_clock_out_i2c_bit(hw, false); +} + +/** + * ixgbe_ones_comp_byte_add - Perform one's complement addition + * @add1: addend 1 + * @add2: addend 2 + * + * Returns one's complement 8-bit sum. + **/ +static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) +{ + u16 sum = add1 + add2; + + sum = (sum & 0xFF) + (sum >> 8); + return sum & 0xFF; +} + +/** + * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + int max_retry = 10; + int retry = 0; + u8 csum_byte; + u8 high_bits; + u8 low_bits; + u8 reg_high; + u8 csum; + + reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */ + csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); + csum = ~csum; + do { + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + ixgbe_i2c_start(hw); + /* Device Address and write indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr)) + goto fail; + /* Write bits 14:8 */ + if (ixgbe_out_i2c_byte_ack(hw, reg_high)) + goto fail; + /* Write bits 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) + goto fail; + /* Write csum */ + if (ixgbe_out_i2c_byte_ack(hw, csum)) + goto fail; + /* Re-start condition */ + ixgbe_i2c_start(hw); + /* Device Address and read indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr | 1)) + goto fail; + /* Get upper bits */ + if (ixgbe_in_i2c_byte_ack(hw, &high_bits)) + goto fail; + /* Get low bits */ + if (ixgbe_in_i2c_byte_ack(hw, &low_bits)) + goto fail; + /* Get csum */ + if (ixgbe_clock_in_i2c_byte(hw, &csum_byte)) + goto fail; + /* NACK */ + if (ixgbe_clock_out_i2c_bit(hw, false)) + goto fail; + ixgbe_i2c_stop(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + *val = (high_bits << 8) | low_bits; + return 0; + +fail: + ixgbe_i2c_bus_clear(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + retry++; + if (retry < max_retry) + hw_dbg(hw, "I2C byte read combined error - Retry.\n"); + else + hw_dbg(hw, "I2C byte read combined error.\n"); + } while (retry < max_retry); + + return IXGBE_ERR_I2C; +} + +/** + * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, + u8 addr, u16 reg, u16 val) +{ + int max_retry = 1; + int retry = 0; + u8 reg_high; + u8 csum; + + reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */ + csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); + csum = ixgbe_ones_comp_byte_add(csum, val >> 8); + csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF); + csum = ~csum; + do { + ixgbe_i2c_start(hw); + /* Device Address and write indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr)) + goto fail; + /* Write bits 14:8 */ + if (ixgbe_out_i2c_byte_ack(hw, reg_high)) + goto fail; + /* Write bits 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) + goto fail; + /* Write data 15:8 */ + if (ixgbe_out_i2c_byte_ack(hw, val >> 8)) + goto fail; + /* Write data 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF)) + goto fail; + /* Write csum */ + if (ixgbe_out_i2c_byte_ack(hw, csum)) + goto fail; + ixgbe_i2c_stop(hw); + return 0; + +fail: + ixgbe_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + hw_dbg(hw, "I2C byte write combined error - Retry.\n"); + else + hw_dbg(hw, "I2C byte write combined error.\n"); + } while (retry < max_retry); + + return IXGBE_ERR_I2C; +} + +/** + * ixgbe_identify_phy_generic - Get physical layer module + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + **/ +s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) +{ + u32 phy_addr; + u16 ext_ability = 0; + + if (!hw->phy.phy_semaphore_mask) { + hw->phy.lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) & + IXGBE_STATUS_LAN_ID_1; + if (hw->phy.lan_id) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + } + + if (hw->phy.type == ixgbe_phy_unknown) { + for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { + hw->phy.mdio.prtad = phy_addr; + if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) { + ixgbe_get_phy_id(hw); + hw->phy.type = + ixgbe_get_phy_type_from_id(hw->phy.id); + + if (hw->phy.type == ixgbe_phy_unknown) { + hw->phy.ops.read_reg(hw, + MDIO_PMA_EXTABLE, + MDIO_MMD_PMAPMD, + &ext_ability); + if (ext_ability & + (MDIO_PMA_EXTABLE_10GBT | + MDIO_PMA_EXTABLE_1000BT)) + hw->phy.type = + ixgbe_phy_cu_unknown; + else + hw->phy.type = + ixgbe_phy_generic; + } + + return 0; + } + } + /* clear value if nothing found */ + hw->phy.mdio.prtad = 0; + return IXGBE_ERR_PHY_ADDR_INVALID; + } + return 0; +} + +/** + * ixgbe_check_reset_blocked - check status of MNG FW veto bit + * @hw: pointer to the hardware structure + * + * This function checks the MMNGC.MNG_VETO bit to see if there are + * any constraints on link from manageability. For MAC's that don't + * have this bit just return false since the link can not be blocked + * via this method. + **/ +bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw) +{ + u32 mmngc; + + /* If we don't have this bit, it can't be blocking */ + if (hw->mac.type == ixgbe_mac_82598EB) + return false; + + mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC); + if (mmngc & IXGBE_MMNGC_MNG_VETO) { + hw_dbg(hw, "MNG_VETO bit detected.\n"); + return true; + } + + return false; +} + +/** + * ixgbe_get_phy_id - Get the phy type + * @hw: pointer to hardware structure + * + **/ +static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) +{ + u32 status; + u16 phy_id_high = 0; + u16 phy_id_low = 0; + + status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, + &phy_id_high); + + if (status == 0) { + hw->phy.id = (u32)(phy_id_high << 16); + status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD, + &phy_id_low); + hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); + hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); + } + return status; +} + +/** + * ixgbe_get_phy_type_from_id - Get the phy type + * @hw: pointer to hardware structure + * + **/ +static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) +{ + enum ixgbe_phy_type phy_type; + + switch (phy_id) { + case TN1010_PHY_ID: + phy_type = ixgbe_phy_tn; + break; + case X540_PHY_ID: + phy_type = ixgbe_phy_aq; + break; + case QT2022_PHY_ID: + phy_type = ixgbe_phy_qt; + break; + case ATH_PHY_ID: + phy_type = ixgbe_phy_nl; + break; + default: + phy_type = ixgbe_phy_unknown; + break; + } + + return phy_type; +} + +/** + * ixgbe_reset_phy_generic - Performs a PHY reset + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) +{ + u32 i; + u16 ctrl = 0; + s32 status = 0; + + if (hw->phy.type == ixgbe_phy_unknown) + status = ixgbe_identify_phy_generic(hw); + + if (status != 0 || hw->phy.type == ixgbe_phy_none) + return status; + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) + return 0; + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + return 0; + + /* + * Perform soft PHY reset to the PHY_XS. + * This will cause a soft reset to the PHY + */ + hw->phy.ops.write_reg(hw, MDIO_CTRL1, + MDIO_MMD_PHYXS, + MDIO_CTRL1_RESET); + + /* + * Poll for reset bit to self-clear indicating reset is complete. + * Some PHYs could take up to 3 seconds to complete and need about + * 1.7 usec delay after the reset is complete. + */ + for (i = 0; i < 30; i++) { + msleep(100); + hw->phy.ops.read_reg(hw, MDIO_CTRL1, + MDIO_MMD_PHYXS, &ctrl); + if (!(ctrl & MDIO_CTRL1_RESET)) { + udelay(2); + break; + } + } + + if (ctrl & MDIO_CTRL1_RESET) { + hw_dbg(hw, "PHY reset polling failed to complete.\n"); + return IXGBE_ERR_RESET_FAILED; + } + + return 0; +} + +/** + * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without + * the SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data) +{ + u32 i, data, command; + + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY address command did not complete.\n"); + return IXGBE_ERR_PHY; + } + + /* Address cycle complete, setup and write the read + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY read command didn't complete\n"); + return IXGBE_ERR_PHY; + } + + /* Read operation is complete. Get the data + * from MSRWD + */ + data = IXGBE_READ_REG(hw, IXGBE_MSRWD); + data >>= IXGBE_MSRWD_READ_DATA_SHIFT; + *phy_data = (u16)(data); + + return 0; +} + +/** + * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register + * using the SWFW lock - this function is needed in most cases + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + s32 status; + u32 gssr = hw->phy.phy_semaphore_mask; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { + status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type, + phy_data); + hw->mac.ops.release_swfw_sync(hw, gssr); + } else { + return IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register + * without SWFW lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + u32 i, command; + + /* Put the data in the MDI single read and write data register*/ + IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); + + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY address cmd didn't complete\n"); + return IXGBE_ERR_PHY; + } + + /* + * Address cycle complete, setup and write the write + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY write cmd didn't complete\n"); + return IXGBE_ERR_PHY; + } + + return 0; +} + +/** + * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register + * using SWFW lock- this function is needed in most cases + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + s32 status; + u32 gssr; + + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + gssr = IXGBE_GSSR_PHY1_SM; + else + gssr = IXGBE_GSSR_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { + status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, + phy_data); + hw->mac.ops.release_swfw_sync(hw, gssr); + } else { + return IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** + * ixgbe_setup_phy_link_generic - Set and restart autoneg + * @hw: pointer to hardware structure + * + * Restart autonegotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) +{ + s32 status = 0; + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + bool autoneg = false; + ixgbe_link_speed speed; + + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + /* Set or unset auto-negotiation 10G advertisement */ + hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; + + hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + /* Set or unset auto-negotiation 1G advertisement */ + hw->phy.ops.read_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + MDIO_MMD_AN, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_100_FULL) { + /* Set or unset auto-negotiation 100M advertisement */ + hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~(ADVERTISE_100FULL | + ADVERTISE_100HALF); + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + autoneg_reg |= ADVERTISE_100FULL; + + hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + autoneg_reg); + } + + /* Blocked by MNG FW so don't reset PHY */ + if (ixgbe_check_reset_blocked(hw)) + return 0; + + /* Restart PHY autonegotiation and wait for completion */ + hw->phy.ops.read_reg(hw, MDIO_CTRL1, + MDIO_MMD_AN, &autoneg_reg); + + autoneg_reg |= MDIO_AN_CTRL1_RESTART; + + hw->phy.ops.write_reg(hw, MDIO_CTRL1, + MDIO_MMD_AN, autoneg_reg); + + return status; +} + +/** + * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities + * @hw: pointer to hardware structure + * @speed: new link speed + **/ +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + + /* + * Clear autoneg_advertised and set new values based on input link + * speed. + */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + if (speed & IXGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + + /* Setup link based on the new speed settings */ + hw->phy.ops.setup_link(hw); + + return 0; +} + +/** + * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the link capabilities by reading the AUTOC register. + */ +s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + s32 status; + u16 speed_ability; + + *speed = 0; + *autoneg = true; + + status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, + &speed_ability); + + if (status == 0) { + if (speed_ability & MDIO_SPEED_10G) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (speed_ability & MDIO_PMA_SPEED_1000) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + if (speed_ability & MDIO_PMA_SPEED_100) + *speed |= IXGBE_LINK_SPEED_100_FULL; + } + + /* Internal PHY does not support 100 Mbps */ + if (hw->mac.type == ixgbe_mac_X550EM_x) + *speed &= ~IXGBE_LINK_SPEED_100_FULL; + + return status; +} + +/** + * ixgbe_check_phy_link_tnx - Determine link and speed status + * @hw: pointer to hardware structure + * + * Reads the VS1 register to determine if link is up and the current speed for + * the PHY. + **/ +s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up) +{ + s32 status; + u32 time_out; + u32 max_time_out = 10; + u16 phy_link = 0; + u16 phy_speed = 0; + u16 phy_data = 0; + + /* Initialize speed and link to default case */ + *link_up = false; + *speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* + * Check current speed and link status of the PHY register. + * This is a vendor specific register and may have to + * be changed for other copper PHYs. + */ + for (time_out = 0; time_out < max_time_out; time_out++) { + udelay(10); + status = hw->phy.ops.read_reg(hw, + MDIO_STAT1, + MDIO_MMD_VEND1, + &phy_data); + phy_link = phy_data & + IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; + phy_speed = phy_data & + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; + if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { + *link_up = true; + if (phy_speed == + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + } + } + + return status; +} + +/** + * ixgbe_setup_phy_link_tnx - Set and restart autoneg + * @hw: pointer to hardware structure + * + * Restart autonegotiation and PHY and waits for completion. + * This function always returns success, this is nessary since + * it is called via a function pointer that could call other + * functions that could return an error. + **/ +s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) +{ + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + bool autoneg = false; + ixgbe_link_speed speed; + + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + /* Set or unset auto-negotiation 10G advertisement */ + hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; + + hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + /* Set or unset auto-negotiation 1G advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; + + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, + MDIO_MMD_AN, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_100_FULL) { + /* Set or unset auto-negotiation 100M advertisement */ + hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~(ADVERTISE_100FULL | + ADVERTISE_100HALF); + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + autoneg_reg |= ADVERTISE_100FULL; + + hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + autoneg_reg); + } + + /* Blocked by MNG FW so don't reset PHY */ + if (ixgbe_check_reset_blocked(hw)) + return 0; + + /* Restart PHY autonegotiation and wait for completion */ + hw->phy.ops.read_reg(hw, MDIO_CTRL1, + MDIO_MMD_AN, &autoneg_reg); + + autoneg_reg |= MDIO_AN_CTRL1_RESTART; + + hw->phy.ops.write_reg(hw, MDIO_CTRL1, + MDIO_MMD_AN, autoneg_reg); + return 0; +} + +/** + * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, + u16 *firmware_version) +{ + s32 status; + + status = hw->phy.ops.read_reg(hw, TNX_FW_REV, + MDIO_MMD_VEND1, + firmware_version); + + return status; +} + +/** + * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, + u16 *firmware_version) +{ + s32 status; + + status = hw->phy.ops.read_reg(hw, AQ_FW_REV, + MDIO_MMD_VEND1, + firmware_version); + + return status; +} + +/** + * ixgbe_reset_phy_nl - Performs a PHY reset + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) +{ + u16 phy_offset, control, eword, edata, block_crc; + bool end_data = false; + u16 list_offset, data_offset; + u16 phy_data = 0; + s32 ret_val; + u32 i; + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + return 0; + + hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data); + + /* reset the PHY and poll for completion */ + hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, + (phy_data | MDIO_CTRL1_RESET)); + + for (i = 0; i < 100; i++) { + hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, + &phy_data); + if ((phy_data & MDIO_CTRL1_RESET) == 0) + break; + usleep_range(10000, 20000); + } + + if ((phy_data & MDIO_CTRL1_RESET) != 0) { + hw_dbg(hw, "PHY reset did not complete.\n"); + return IXGBE_ERR_PHY; + } + + /* Get init offsets */ + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, + &data_offset); + if (ret_val) + return ret_val; + + ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc); + data_offset++; + while (!end_data) { + /* + * Read control word from PHY init contents offset + */ + ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); + if (ret_val) + goto err_eeprom; + control = (eword & IXGBE_CONTROL_MASK_NL) >> + IXGBE_CONTROL_SHIFT_NL; + edata = eword & IXGBE_DATA_MASK_NL; + switch (control) { + case IXGBE_DELAY_NL: + data_offset++; + hw_dbg(hw, "DELAY: %d MS\n", edata); + usleep_range(edata * 1000, edata * 2000); + break; + case IXGBE_DATA_NL: + hw_dbg(hw, "DATA:\n"); + data_offset++; + ret_val = hw->eeprom.ops.read(hw, data_offset++, + &phy_offset); + if (ret_val) + goto err_eeprom; + for (i = 0; i < edata; i++) { + ret_val = hw->eeprom.ops.read(hw, data_offset, + &eword); + if (ret_val) + goto err_eeprom; + hw->phy.ops.write_reg(hw, phy_offset, + MDIO_MMD_PMAPMD, eword); + hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword, + phy_offset); + data_offset++; + phy_offset++; + } + break; + case IXGBE_CONTROL_NL: + data_offset++; + hw_dbg(hw, "CONTROL:\n"); + if (edata == IXGBE_CONTROL_EOL_NL) { + hw_dbg(hw, "EOL\n"); + end_data = true; + } else if (edata == IXGBE_CONTROL_SOL_NL) { + hw_dbg(hw, "SOL\n"); + } else { + hw_dbg(hw, "Bad control value\n"); + return IXGBE_ERR_PHY; + } + break; + default: + hw_dbg(hw, "Bad control type\n"); + return IXGBE_ERR_PHY; + } + } + + return ret_val; + +err_eeprom: + hw_err(hw, "eeprom read at offset %d failed\n", data_offset); + return IXGBE_ERR_PHY; +} + +/** + * ixgbe_identify_module_generic - Identifies module type + * @hw: pointer to hardware structure + * + * Determines HW type and calls appropriate function. + **/ +s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw) +{ + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + return ixgbe_identify_sfp_module_generic(hw); + case ixgbe_media_type_fiber_qsfp: + return ixgbe_identify_qsfp_module_generic(hw); + default: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + return IXGBE_ERR_SFP_NOT_PRESENT; + } + + return IXGBE_ERR_SFP_NOT_PRESENT; +} + +/** + * ixgbe_identify_sfp_module_generic - Identifies SFP modules + * @hw: pointer to hardware structure + * + * Searches for and identifies the SFP module and assigns appropriate PHY type. + **/ +s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + s32 status; + u32 vendor_oui = 0; + enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; + u8 identifier = 0; + u8 comp_codes_1g = 0; + u8 comp_codes_10g = 0; + u8 oui_bytes[3] = {0, 0, 0}; + u8 cable_tech = 0; + u8 cable_spec = 0; + u16 enforce_sfp = 0; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) { + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + return IXGBE_ERR_SFP_NOT_PRESENT; + } + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_IDENTIFIER, + &identifier); + + if (status) + goto err_read_i2c_eeprom; + + /* LAN ID is needed for sfp_type determination */ + hw->mac.ops.set_lan_id(hw); + + if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_1GBE_COMP_CODES, + &comp_codes_1g); + + if (status) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_10GBE_COMP_CODES, + &comp_codes_10g); + + if (status) + goto err_read_i2c_eeprom; + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_CABLE_TECHNOLOGY, + &cable_tech); + + if (status) + goto err_read_i2c_eeprom; + + /* ID Module + * ========= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CORE0 - 82599-specific + * 4 SFP_DA_CORE1 - 82599-specific + * 5 SFP_SR/LR_CORE0 - 82599-specific + * 6 SFP_SR/LR_CORE1 - 82599-specific + * 7 SFP_act_lmt_DA_CORE0 - 82599-specific + * 8 SFP_act_lmt_DA_CORE1 - 82599-specific + * 9 SFP_1g_cu_CORE0 - 82599-specific + * 10 SFP_1g_cu_CORE1 - 82599-specific + * 11 SFP_1g_sx_CORE0 - 82599-specific + * 12 SFP_1g_sx_CORE1 - 82599-specific + */ + if (hw->mac.type == ixgbe_mac_82598EB) { + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.sfp_type = ixgbe_sfp_type_da_cu; + else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + hw->phy.sfp_type = ixgbe_sfp_type_sr; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + hw->phy.sfp_type = ixgbe_sfp_type_lr; + else + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + } else if (hw->mac.type == ixgbe_mac_82599EB) { + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_cu_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_cu_core1; + } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) { + hw->phy.ops.read_i2c_eeprom( + hw, IXGBE_SFF_CABLE_SPEC_COMP, + &cable_spec); + if (cable_spec & + IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core1; + } else { + hw->phy.sfp_type = + ixgbe_sfp_type_unknown; + } + } else if (comp_codes_10g & + (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_srlr_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_srlr_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_cu_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_cu_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_sx_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_sx_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_lx_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_lx_core1; + } else { + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + } + } + + if (hw->phy.sfp_type != stored_sfp_type) + hw->phy.sfp_setup_needed = true; + + /* Determine if the SFP+ PHY is dual speed or not. */ + hw->phy.multispeed_fiber = false; + if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + + /* Determine PHY vendor */ + if (hw->phy.type != ixgbe_phy_nl) { + hw->phy.id = identifier; + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE0, + &oui_bytes[0]); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE1, + &oui_bytes[1]); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE2, + &oui_bytes[2]); + + if (status != 0) + goto err_read_i2c_eeprom; + + vendor_oui = + ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | + (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | + (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + + switch (vendor_oui) { + case IXGBE_SFF_VENDOR_OUI_TYCO: + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_passive_tyco; + break; + case IXGBE_SFF_VENDOR_OUI_FTL: + if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = ixgbe_phy_sfp_ftl_active; + else + hw->phy.type = ixgbe_phy_sfp_ftl; + break; + case IXGBE_SFF_VENDOR_OUI_AVAGO: + hw->phy.type = ixgbe_phy_sfp_avago; + break; + case IXGBE_SFF_VENDOR_OUI_INTEL: + hw->phy.type = ixgbe_phy_sfp_intel; + break; + default: + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_passive_unknown; + else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_active_unknown; + else + hw->phy.type = ixgbe_phy_sfp_unknown; + break; + } + } + + /* Allow any DA cable vendor */ + if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | + IXGBE_SFF_DA_ACTIVE_CABLE)) + return 0; + + /* Verify supported 1G SFP modules */ + if (comp_codes_10g == 0 && + !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + /* Anything else 82598-based is supported */ + if (hw->mac.type == ixgbe_mac_82598EB) + return 0; + + hw->mac.ops.get_device_caps(hw, &enforce_sfp); + if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && + !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { + /* Make sure we're a supported PHY type */ + if (hw->phy.type == ixgbe_phy_sfp_intel) + return 0; + if (hw->allow_unsupported_sfp) { + e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); + return 0; + } + hw_dbg(hw, "SFP+ module not supported\n"); + hw->phy.type = ixgbe_phy_sfp_unsupported; + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + return 0; + +err_read_i2c_eeprom: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + if (hw->phy.type != ixgbe_phy_nl) { + hw->phy.id = 0; + hw->phy.type = ixgbe_phy_unknown; + } + return IXGBE_ERR_SFP_NOT_PRESENT; +} + +/** + * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules + * @hw: pointer to hardware structure + * + * Searches for and identifies the QSFP module and assigns appropriate PHY type + **/ +static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + s32 status; + u32 vendor_oui = 0; + enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; + u8 identifier = 0; + u8 comp_codes_1g = 0; + u8 comp_codes_10g = 0; + u8 oui_bytes[3] = {0, 0, 0}; + u16 enforce_sfp = 0; + u8 connector = 0; + u8 cable_length = 0; + u8 device_tech = 0; + bool active_cable = false; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) { + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + return IXGBE_ERR_SFP_NOT_PRESENT; + } + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, + &identifier); + + if (status != 0) + goto err_read_i2c_eeprom; + + if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + hw->phy.id = identifier; + + /* LAN ID is needed for sfp_type determination */ + hw->mac.ops.set_lan_id(hw); + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP, + &comp_codes_10g); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP, + &comp_codes_1g); + + if (status != 0) + goto err_read_i2c_eeprom; + + if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) { + hw->phy.type = ixgbe_phy_qsfp_passive_unknown; + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0; + else + hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1; + } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0; + else + hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1; + } else { + if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE) + active_cable = true; + + if (!active_cable) { + /* check for active DA cables that pre-date + * SFF-8436 v3.6 + */ + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_CONNECTOR, + &connector); + + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_CABLE_LENGTH, + &cable_length); + + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_DEVICE_TECH, + &device_tech); + + if ((connector == + IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) && + (cable_length > 0) && + ((device_tech >> 4) == + IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL)) + active_cable = true; + } + + if (active_cable) { + hw->phy.type = ixgbe_phy_qsfp_active_unknown; + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core1; + } else { + /* unsupported module type */ + hw->phy.type = ixgbe_phy_sfp_unsupported; + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + } + + if (hw->phy.sfp_type != stored_sfp_type) + hw->phy.sfp_setup_needed = true; + + /* Determine if the QSFP+ PHY is dual speed or not. */ + hw->phy.multispeed_fiber = false; + if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + + /* Determine PHY vendor for optical modules */ + if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0, + &oui_bytes[0]); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1, + &oui_bytes[1]); + + if (status != 0) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2, + &oui_bytes[2]); + + if (status != 0) + goto err_read_i2c_eeprom; + + vendor_oui = + ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | + (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | + (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + + if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL) + hw->phy.type = ixgbe_phy_qsfp_intel; + else + hw->phy.type = ixgbe_phy_qsfp_unknown; + + hw->mac.ops.get_device_caps(hw, &enforce_sfp); + if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) { + /* Make sure we're a supported PHY type */ + if (hw->phy.type == ixgbe_phy_qsfp_intel) + return 0; + if (hw->allow_unsupported_sfp) { + e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); + return 0; + } + hw_dbg(hw, "QSFP module not supported\n"); + hw->phy.type = ixgbe_phy_sfp_unsupported; + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + return 0; + } + return 0; + +err_read_i2c_eeprom: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + hw->phy.id = 0; + hw->phy.type = ixgbe_phy_unknown; + + return IXGBE_ERR_SFP_NOT_PRESENT; +} + +/** + * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence + * @hw: pointer to hardware structure + * @list_offset: offset to the SFP ID list + * @data_offset: offset to the SFP data block + * + * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if + * so it returns the offsets to the phy init sequence block. + **/ +s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, + u16 *list_offset, + u16 *data_offset) +{ + u16 sfp_id; + u16 sfp_type = hw->phy.sfp_type; + + if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + + if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) + return IXGBE_ERR_SFP_NOT_PRESENT; + + if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) && + (hw->phy.sfp_type == ixgbe_sfp_type_da_cu)) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + + /* + * Limiting active cables and 1G Phys must be initialized as + * SR modules + */ + if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || + sfp_type == ixgbe_sfp_type_1g_lx_core0 || + sfp_type == ixgbe_sfp_type_1g_cu_core0 || + sfp_type == ixgbe_sfp_type_1g_sx_core0) + sfp_type = ixgbe_sfp_type_srlr_core0; + else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || + sfp_type == ixgbe_sfp_type_1g_lx_core1 || + sfp_type == ixgbe_sfp_type_1g_cu_core1 || + sfp_type == ixgbe_sfp_type_1g_sx_core1) + sfp_type = ixgbe_sfp_type_srlr_core1; + + /* Read offset to PHY init contents */ + if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) { + hw_err(hw, "eeprom read at %d failed\n", + IXGBE_PHY_INIT_OFFSET_NL); + return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; + } + + if ((!*list_offset) || (*list_offset == 0xFFFF)) + return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; + + /* Shift offset to first ID word */ + (*list_offset)++; + + /* + * Find the matching SFP ID in the EEPROM + * and program the init sequence + */ + if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) + goto err_phy; + + while (sfp_id != IXGBE_PHY_INIT_END_NL) { + if (sfp_id == sfp_type) { + (*list_offset)++; + if (hw->eeprom.ops.read(hw, *list_offset, data_offset)) + goto err_phy; + if ((!*data_offset) || (*data_offset == 0xFFFF)) { + hw_dbg(hw, "SFP+ module not supported\n"); + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } else { + break; + } + } else { + (*list_offset) += 2; + if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) + goto err_phy; + } + } + + if (sfp_id == IXGBE_PHY_INIT_END_NL) { + hw_dbg(hw, "No matching SFP+ module found\n"); + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + return 0; + +err_phy: + hw_err(hw, "eeprom read at offset %d failed\n", *list_offset); + return IXGBE_ERR_PHY; +} + +/** + * ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data) +{ + return hw->phy.ops.read_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR, + eeprom_data); +} + +/** + * ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: byte offset at address 0xA2 + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's SFF-8472 data over I2C + **/ +s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data) +{ + return hw->phy.ops.read_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR2, + sff8472_data); +} + +/** + * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to write + * @eeprom_data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 eeprom_data) +{ + return hw->phy.ops.write_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR, + eeprom_data); +} + +/** + * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + s32 status; + u32 max_retry = 10; + u32 retry = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + bool nack = true; + *data = 0; + + do { + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + + ixgbe_i2c_start(hw); + + /* Device Address and write indication */ + status = ixgbe_clock_out_i2c_byte(hw, dev_addr); + if (status != 0) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != 0) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, byte_offset); + if (status != 0) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != 0) + goto fail; + + ixgbe_i2c_start(hw); + + /* Device Address and read indication */ + status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1)); + if (status != 0) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != 0) + goto fail; + + status = ixgbe_clock_in_i2c_byte(hw, data); + if (status != 0) + goto fail; + + status = ixgbe_clock_out_i2c_bit(hw, nack); + if (status != 0) + goto fail; + + ixgbe_i2c_stop(hw); + break; + +fail: + ixgbe_i2c_bus_clear(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msleep(100); + retry++; + if (retry < max_retry) + hw_dbg(hw, "I2C byte read error - Retrying.\n"); + else + hw_dbg(hw, "I2C byte read error.\n"); + + } while (retry < max_retry); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + return status; +} + +/** + * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + s32 status; + u32 max_retry = 1; + u32 retry = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + + do { + ixgbe_i2c_start(hw); + + status = ixgbe_clock_out_i2c_byte(hw, dev_addr); + if (status != 0) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != 0) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, byte_offset); + if (status != 0) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != 0) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, data); + if (status != 0) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != 0) + goto fail; + + ixgbe_i2c_stop(hw); + break; + +fail: + ixgbe_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + hw_dbg(hw, "I2C byte write error - Retrying.\n"); + else + hw_dbg(hw, "I2C byte write error.\n"); + } while (retry < max_retry); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + return status; +} + +/** + * ixgbe_i2c_start - Sets I2C start condition + * @hw: pointer to hardware structure + * + * Sets I2C start condition (High -> Low on SDA while SCL is High) + **/ +static void ixgbe_i2c_start(struct ixgbe_hw *hw) +{ + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + + /* Start condition must begin with data and clock high */ + ixgbe_set_i2c_data(hw, &i2cctl, 1); + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for start condition (4.7us) */ + udelay(IXGBE_I2C_T_SU_STA); + + ixgbe_set_i2c_data(hw, &i2cctl, 0); + + /* Hold time for start condition (4us) */ + udelay(IXGBE_I2C_T_HD_STA); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + udelay(IXGBE_I2C_T_LOW); + +} + +/** + * ixgbe_i2c_stop - Sets I2C stop condition + * @hw: pointer to hardware structure + * + * Sets I2C stop condition (Low -> High on SDA while SCL is High) + **/ +static void ixgbe_i2c_stop(struct ixgbe_hw *hw) +{ + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + + /* Stop condition must begin with data low and clock high */ + ixgbe_set_i2c_data(hw, &i2cctl, 0); + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for stop condition (4us) */ + udelay(IXGBE_I2C_T_SU_STO); + + ixgbe_set_i2c_data(hw, &i2cctl, 1); + + /* bus free time between stop and start (4.7us)*/ + udelay(IXGBE_I2C_T_BUF); +} + +/** + * ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte to clock in + * + * Clocks in one byte data via I2C data/clock + **/ +static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) +{ + s32 i; + bool bit = false; + + for (i = 7; i >= 0; i--) { + ixgbe_clock_in_i2c_bit(hw, &bit); + *data |= bit << i; + } + + return 0; +} + +/** + * ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte clocked out + * + * Clocks out one byte data via I2C data/clock + **/ +static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) +{ + s32 status; + s32 i; + u32 i2cctl; + bool bit = false; + + for (i = 7; i >= 0; i--) { + bit = (data >> i) & 0x1; + status = ixgbe_clock_out_i2c_bit(hw, bit); + + if (status != 0) + break; + } + + /* Release SDA line (set high) */ + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + + return status; +} + +/** + * ixgbe_get_i2c_ack - Polls for I2C ACK + * @hw: pointer to hardware structure + * + * Clocks in/out one bit via I2C data/clock + **/ +static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) +{ + s32 status = 0; + u32 i = 0; + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 timeout = 10; + bool ack = true; + + ixgbe_raise_i2c_clk(hw, &i2cctl); + + + /* Minimum high period of clock is 4us */ + udelay(IXGBE_I2C_T_HIGH); + + /* Poll for ACK. Note that ACK in I2C spec is + * transition from 1 to 0 */ + for (i = 0; i < timeout; i++) { + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + ack = ixgbe_get_i2c_data(hw, &i2cctl); + + udelay(1); + if (ack == 0) + break; + } + + if (ack == 1) { + hw_dbg(hw, "I2C ack was not received.\n"); + status = IXGBE_ERR_I2C; + } + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + udelay(IXGBE_I2C_T_LOW); + + return status; +} + +/** + * ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: read data value + * + * Clocks in one bit via I2C data/clock + **/ +static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) +{ + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + udelay(IXGBE_I2C_T_HIGH); + + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + *data = ixgbe_get_i2c_data(hw, &i2cctl); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + udelay(IXGBE_I2C_T_LOW); + + return 0; +} + +/** + * ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: data value to write + * + * Clocks out one bit via I2C data/clock + **/ +static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) +{ + s32 status; + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + + status = ixgbe_set_i2c_data(hw, &i2cctl, data); + if (status == 0) { + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + udelay(IXGBE_I2C_T_HIGH); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us. + * This also takes care of the data hold time. + */ + udelay(IXGBE_I2C_T_LOW); + } else { + hw_dbg(hw, "I2C data was not set to %X\n", data); + return IXGBE_ERR_I2C; + } + + return 0; +} +/** + * ixgbe_raise_i2c_clk - Raises the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Raises the I2C clock line '0'->'1' + **/ +static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) +{ + u32 i = 0; + u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT; + u32 i2cctl_r = 0; + + for (i = 0; i < timeout; i++) { + *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + /* SCL rise time (1000ns) */ + udelay(IXGBE_I2C_T_RISE); + + i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw)) + break; + } +} + +/** + * ixgbe_lower_i2c_clk - Lowers the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Lowers the I2C clock line '1'->'0' + **/ +static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) +{ + + *i2cctl &= ~IXGBE_I2C_CLK_OUT_BY_MAC(hw); + + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + + /* SCL fall time (300ns) */ + udelay(IXGBE_I2C_T_FALL); +} + +/** + * ixgbe_set_i2c_data - Sets the I2C data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * @data: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + **/ +static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) +{ + if (data) + *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + else + *i2cctl &= ~IXGBE_I2C_DATA_OUT_BY_MAC(hw); + + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + + /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ + udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); + + /* Verify data was set correctly */ + *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + if (data != ixgbe_get_i2c_data(hw, i2cctl)) { + hw_dbg(hw, "Error - I2C data was not set to %X.\n", data); + return IXGBE_ERR_I2C; + } + + return 0; +} + +/** + * ixgbe_get_i2c_data - Reads the I2C SDA data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Returns the I2C data bit value + **/ +static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) +{ + if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw)) + return true; + return false; +} + +/** + * ixgbe_i2c_bus_clear - Clears the I2C bus + * @hw: pointer to hardware structure + * + * Clears the I2C bus by sending nine clock pulses. + * Used when data line is stuck low. + **/ +static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) +{ + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 i; + + ixgbe_i2c_start(hw); + + ixgbe_set_i2c_data(hw, &i2cctl, 1); + + for (i = 0; i < 9; i++) { + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Min high period of clock is 4us */ + udelay(IXGBE_I2C_T_HIGH); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Min low period of clock is 4.7us*/ + udelay(IXGBE_I2C_T_LOW); + } + + ixgbe_i2c_start(hw); + + /* Put the i2c bus back to default state */ + ixgbe_i2c_stop(hw); +} + +/** + * ixgbe_tn_check_overtemp - Checks if an overtemp occurred. + * @hw: pointer to hardware structure + * + * Checks if the LASI temp alarm status was triggered due to overtemp + **/ +s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) +{ + u16 phy_data = 0; + + if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM) + return 0; + + /* Check that the LASI temp alarm status was triggered */ + hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, + MDIO_MMD_PMAPMD, &phy_data); + + if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) + return 0; + + return IXGBE_ERR_OVERTEMP; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h new file mode 100644 index 000000000..434643881 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -0,0 +1,168 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2014 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_PHY_H_ +#define _IXGBE_PHY_H_ + +#include "ixgbe_type.h" +#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0 +#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 + +/* EEPROM byte offsets */ +#define IXGBE_SFF_IDENTIFIER 0x0 +#define IXGBE_SFF_IDENTIFIER_SFP 0x3 +#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25 +#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26 +#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27 +#define IXGBE_SFF_1GBE_COMP_CODES 0x6 +#define IXGBE_SFF_10GBE_COMP_CODES 0x3 +#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 +#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C +#define IXGBE_SFF_SFF_8472_SWAP 0x5C +#define IXGBE_SFF_SFF_8472_COMP 0x5E +#define IXGBE_SFF_SFF_8472_OSCB 0x6E +#define IXGBE_SFF_SFF_8472_ESCB 0x76 +#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5 +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6 +#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7 +#define IXGBE_SFF_QSFP_CONNECTOR 0x82 +#define IXGBE_SFF_QSFP_10GBE_COMP 0x83 +#define IXGBE_SFF_QSFP_1GBE_COMP 0x86 +#define IXGBE_SFF_QSFP_CABLE_LENGTH 0x92 +#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93 + +/* Bitmasks */ +#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 +#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8 +#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 +#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 +#define IXGBE_SFF_1GBASET_CAPABLE 0x8 +#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 +#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define IXGBE_SFF_ADDRESSING_MODE 0x4 +#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 +#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 +#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 +#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0 +#define IXGBE_I2C_EEPROM_READ_MASK 0x100 +#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 +#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 +#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 +#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 +#define IXGBE_CS4227 0xBE /* CS4227 address */ +#define IXGBE_CS4227_SPARE24_LSB 0x12B0 /* Reg to program EDC */ +#define IXGBE_CS4227_EDC_MODE_CX1 0x0002 +#define IXGBE_CS4227_EDC_MODE_SR 0x0004 + +/* Flow control defines */ +#define IXGBE_TAF_SYM_PAUSE 0x400 +#define IXGBE_TAF_ASM_PAUSE 0x800 + +/* Bit-shift macros */ +#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 +#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 +#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 +#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 +#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 + +/* I2C SDA and SCL timing parameters for standard mode */ +#define IXGBE_I2C_T_HD_STA 4 +#define IXGBE_I2C_T_LOW 5 +#define IXGBE_I2C_T_HIGH 4 +#define IXGBE_I2C_T_SU_STA 5 +#define IXGBE_I2C_T_HD_DATA 5 +#define IXGBE_I2C_T_SU_DATA 1 +#define IXGBE_I2C_T_RISE 1 +#define IXGBE_I2C_T_FALL 1 +#define IXGBE_I2C_T_SU_STO 4 +#define IXGBE_I2C_T_BUF 5 + +#define IXGBE_TN_LASI_STATUS_REG 0x9005 +#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 + +/* SFP+ SFF-8472 Compliance code */ +#define IXGBE_SFF_SFF_8472_UNSUP 0x00 + +s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); +s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); +s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); +s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); +s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); +s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg); +bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw); + +/* PHY specific */ +s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up); +s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, + u16 *firmware_version); +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, + u16 *firmware_version); + +s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); +s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); +s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); +s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, + u16 *list_offset, + u16 *data_offset); +s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); +s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data); +s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data); +s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 eeprom_data); +s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val); +s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 val); +#endif /* _IXGBE_PHY_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c new file mode 100644 index 000000000..e5ba04025 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -0,0 +1,997 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ +#include "ixgbe.h" +#include + +/* + * The 82599 and the X540 do not have true 64bit nanosecond scale + * counter registers. Instead, SYSTIME is defined by a fixed point + * system which allows the user to define the scale counter increment + * value at every level change of the oscillator driving the SYSTIME + * value. For both devices the TIMINCA:IV field defines this + * increment. On the X540 device, 31 bits are provided. However on the + * 82599 only provides 24 bits. The time unit is determined by the + * clock frequency of the oscillator in combination with the TIMINCA + * register. When these devices link at 10Gb the oscillator has a + * period of 6.4ns. In order to convert the scale counter into + * nanoseconds the cyclecounter and timecounter structures are + * used. The SYSTIME registers need to be converted to ns values by use + * of only a right shift (division by power of 2). The following math + * determines the largest incvalue that will fit into the available + * bits in the TIMINCA register. + * + * PeriodWidth: Number of bits to store the clock period + * MaxWidth: The maximum width value of the TIMINCA register + * Period: The clock period for the oscillator + * round(): discard the fractional portion of the calculation + * + * Period * [ 2 ^ ( MaxWidth - PeriodWidth ) ] + * + * For the X540, MaxWidth is 31 bits, and the base period is 6.4 ns + * For the 82599, MaxWidth is 24 bits, and the base period is 6.4 ns + * + * The period also changes based on the link speed: + * At 10Gb link or no link, the period remains the same. + * At 1Gb link, the period is multiplied by 10. (64ns) + * At 100Mb link, the period is multiplied by 100. (640ns) + * + * The calculated value allows us to right shift the SYSTIME register + * value in order to quickly convert it into a nanosecond clock, + * while allowing for the maximum possible adjustment value. + * + * These diagrams are only for the 10Gb link period + * + * SYSTIMEH SYSTIMEL + * +--------------+ +--------------+ + * X540 | 32 | | 1 | 3 | 28 | + * *--------------+ +--------------+ + * \________ 36 bits ______/ fract + * + * +--------------+ +--------------+ + * 82599 | 32 | | 8 | 3 | 21 | + * *--------------+ +--------------+ + * \________ 43 bits ______/ fract + * + * The 36 bit X540 SYSTIME overflows every + * 2^36 * 10^-9 / 60 = 1.14 minutes or 69 seconds + * + * The 43 bit 82599 SYSTIME overflows every + * 2^43 * 10^-9 / 3600 = 2.4 hours + */ +#define IXGBE_INCVAL_10GB 0x66666666 +#define IXGBE_INCVAL_1GB 0x40000000 +#define IXGBE_INCVAL_100 0x50000000 + +#define IXGBE_INCVAL_SHIFT_10GB 28 +#define IXGBE_INCVAL_SHIFT_1GB 24 +#define IXGBE_INCVAL_SHIFT_100 21 + +#define IXGBE_INCVAL_SHIFT_82599 7 +#define IXGBE_INCPER_SHIFT_82599 24 +#define IXGBE_MAX_TIMEADJ_VALUE 0x7FFFFFFFFFFFFFFFULL + +#define IXGBE_OVERFLOW_PERIOD (HZ * 30) +#define IXGBE_PTP_TX_TIMEOUT (HZ * 15) + +/* half of a one second clock period, for use with PPS signal. We have to use + * this instead of something pre-defined like IXGBE_PTP_PPS_HALF_SECOND, in + * order to force at least 64bits of precision for shifting + */ +#define IXGBE_PTP_PPS_HALF_SECOND 500000000ULL + +/** + * ixgbe_ptp_setup_sdp + * @hw: the hardware private structure + * + * this function enables or disables the clock out feature on SDP0 for + * the X540 device. It will create a 1second periodic output that can + * be used as the PPS (via an interrupt). + * + * It calculates when the systime will be on an exact second, and then + * aligns the start of the PPS signal to that value. The shift is + * necessary because it can change based on the link speed. + */ +static void ixgbe_ptp_setup_sdp(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int shift = adapter->cc.shift; + u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh, rem; + u64 ns = 0, clock_edge = 0; + + if ((adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED) && + (hw->mac.type == ixgbe_mac_X540)) { + + /* disable the pin first */ + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); + IXGBE_WRITE_FLUSH(hw); + + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + + /* + * enable the SDP0 pin as output, and connected to the + * native function for Timesync (ClockOut) + */ + esdp |= (IXGBE_ESDP_SDP0_DIR | + IXGBE_ESDP_SDP0_NATIVE); + + /* + * enable the Clock Out feature on SDP0, and allow + * interrupts to occur when the pin changes + */ + tsauxc = (IXGBE_TSAUXC_EN_CLK | + IXGBE_TSAUXC_SYNCLK | + IXGBE_TSAUXC_SDP0_INT); + + /* clock period (or pulse length) */ + clktiml = (u32)(IXGBE_PTP_PPS_HALF_SECOND << shift); + clktimh = (u32)((IXGBE_PTP_PPS_HALF_SECOND << shift) >> 32); + + /* + * Account for the cyclecounter wrap-around value by + * using the converted ns value of the current time to + * check for when the next aligned second would occur. + */ + clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); + clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; + ns = timecounter_cyc2time(&adapter->tc, clock_edge); + + div_u64_rem(ns, IXGBE_PTP_PPS_HALF_SECOND, &rem); + clock_edge += ((IXGBE_PTP_PPS_HALF_SECOND - (u64)rem) << shift); + + /* specify the initial clock start time */ + trgttiml = (u32)clock_edge; + trgttimh = (u32)(clock_edge >> 32); + + IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml); + IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh); + IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); + IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); + + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); + } else { + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); + } + + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_ptp_read - read raw cycle counter (to be used by time counter) + * @cc: the cyclecounter structure + * + * this function reads the cyclecounter registers and is called by the + * cyclecounter structure used to construct a ns counter from the + * arbitrary fixed point registers + */ +static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc) +{ + struct ixgbe_adapter *adapter = + container_of(cc, struct ixgbe_adapter, cc); + struct ixgbe_hw *hw = &adapter->hw; + u64 stamp = 0; + + stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); + stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; + + return stamp; +} + +/** + * ixgbe_ptp_adjfreq + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * adjust the frequency of the ptp cycle counter by the + * indicated ppb from the base frequency. + */ +static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + struct ixgbe_hw *hw = &adapter->hw; + u64 freq; + u32 diff, incval; + int neg_adj = 0; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + smp_mb(); + incval = ACCESS_ONCE(adapter->base_incval); + + freq = incval; + freq *= ppb; + diff = div_u64(freq, 1000000000ULL); + + incval = neg_adj ? (incval - diff) : (incval + diff); + + switch (hw->mac.type) { + case ixgbe_mac_X540: + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); + break; + case ixgbe_mac_82599EB: + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, + (1 << IXGBE_INCPER_SHIFT_82599) | + incval); + break; + default: + break; + } + + return 0; +} + +/** + * ixgbe_ptp_adjtime + * @ptp: the ptp clock structure + * @delta: offset to adjust the cycle counter by + * + * adjust the timer by resetting the timecounter structure. + */ +static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_adjtime(&adapter->tc, delta); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + ixgbe_ptp_setup_sdp(adapter); + + return 0; +} + +/** + * ixgbe_ptp_gettime + * @ptp: the ptp clock structure + * @ts: timespec structure to hold the current time value + * + * read the timecounter and return the correct value on ns, + * after converting it into a struct timespec. + */ +static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + u64 ns; + unsigned long flags; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->tc); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/** + * ixgbe_ptp_settime + * @ptp: the ptp clock structure + * @ts: the timespec containing the new time for the cycle counter + * + * reset the timecounter to use a new base value instead of the kernel + * wall timer value. + */ +static int ixgbe_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + u64 ns; + unsigned long flags; + + ns = timespec64_to_ns(ts); + + /* reset the timecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_init(&adapter->tc, &adapter->cc, ns); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + ixgbe_ptp_setup_sdp(adapter); + return 0; +} + +/** + * ixgbe_ptp_feature_enable + * @ptp: the ptp clock structure + * @rq: the requested feature to change + * @on: whether to enable or disable the feature + * + * enable (or disable) ancillary features of the phc subsystem. + * our driver only supports the PPS feature on the X540 + */ +static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + + /** + * When PPS is enabled, unmask the interrupt for the ClockOut + * feature, so that the interrupt handler can send the PPS + * event when the clock SDP triggers. Clear mask when PPS is + * disabled + */ + if (rq->type == PTP_CLK_REQ_PPS) { + switch (adapter->hw.mac.type) { + case ixgbe_mac_X540: + if (on) + adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED; + else + adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; + + ixgbe_ptp_setup_sdp(adapter); + return 0; + default: + break; + } + } + + return -ENOTSUPP; +} + +/** + * ixgbe_ptp_check_pps_event + * @adapter: the private adapter structure + * @eicr: the interrupt cause register value + * + * This function is called by the interrupt routine when checking for + * interrupts. It will check and handle a pps event. + */ +void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ptp_clock_event event; + + event.type = PTP_CLOCK_PPS; + + /* this check is necessary in case the interrupt was enabled via some + * alternative means (ex. debug_fs). Better to check here than + * everywhere that calls this function. + */ + if (!adapter->ptp_clock) + return; + + switch (hw->mac.type) { + case ixgbe_mac_X540: + ptp_clock_event(adapter->ptp_clock, &event); + break; + default: + break; + } +} + +/** + * ixgbe_ptp_overflow_check - watchdog task to detect SYSTIME overflow + * @adapter: private adapter struct + * + * this watchdog task periodically reads the timecounter + * in order to prevent missing when the system time registers wrap + * around. This needs to be run approximately twice a minute. + */ +void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) +{ + bool timeout = time_is_before_jiffies(adapter->last_overflow_check + + IXGBE_OVERFLOW_PERIOD); + struct timespec64 ts; + + if (timeout) { + ixgbe_ptp_gettime(&adapter->ptp_caps, &ts); + adapter->last_overflow_check = jiffies; + } +} + +/** + * ixgbe_ptp_rx_hang - detect error case when Rx timestamp registers latched + * @adapter: private network adapter structure + * + * this watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + */ +void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + unsigned long rx_event; + + /* if we don't have a valid timestamp in the registers, just update the + * timeout counter and exit + */ + if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) { + adapter->last_rx_ptp_check = jiffies; + return; + } + + /* determine the most recent watchdog or rx_timestamp event */ + rx_event = adapter->last_rx_ptp_check; + if (time_after(adapter->last_rx_timestamp, rx_event)) + rx_event = adapter->last_rx_timestamp; + + /* only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5*HZ)) { + IXGBE_READ_REG(hw, IXGBE_RXSTMPH); + adapter->last_rx_ptp_check = jiffies; + + e_warn(drv, "clearing RX Timestamp hang\n"); + } +} + +/** + * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: the private adapter struct + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval = 0, ns; + unsigned long flags; + + regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); + regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->tc, regval); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); + + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); +} + +/** + * ixgbe_ptp_tx_hwtstamp_work + * @work: pointer to the work struct + * + * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware + * timestamp has been taken for the current skb. It is necessary, because the + * descriptor's "done" bit does not correlate with the timestamp event. + */ +static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) +{ + struct ixgbe_adapter *adapter = container_of(work, struct ixgbe_adapter, + ptp_tx_work); + struct ixgbe_hw *hw = &adapter->hw; + bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + + IXGBE_PTP_TX_TIMEOUT); + u32 tsynctxctl; + + if (timeout) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); + e_warn(drv, "clearing Tx Timestamp hang\n"); + return; + } + + tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); + if (tsynctxctl & IXGBE_TSYNCTXCTL_VALID) + ixgbe_ptp_tx_hwtstamp(adapter); + else + /* reschedule to keep checking if it's not available yet */ + schedule_work(&adapter->ptp_tx_work); +} + +/** + * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp + * @adapter: pointer to adapter struct + * @skb: particular skb to send timestamp with + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps *shhwtstamps; + u64 regval = 0, ns; + u32 tsyncrxctl; + unsigned long flags; + + tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) + return; + + regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); + regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_cyc2time(&adapter->tc, regval); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = ns_to_ktime(ns); + + /* Update the last_rx_timestamp timer in order to enable watchdog check + * for error case of latched timestamp on a dropped packet. + */ + adapter->last_rx_timestamp = jiffies; +} + +int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config *config = &adapter->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, + sizeof(*config)) ? -EFAULT : 0; +} + +/** + * ixgbe_ptp_set_timestamp_mode - setup the hardware for the requested mode + * @adapter: the private ixgbe adapter structure + * @config: the hwtstamp configuration requested + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware + * filters. Not all combinations are supported, in particular event + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". + * + * Since hardware always timestamps Path delay packets when timestamping V2 + * packets, regardless of the type specified in the register, only use V2 + * Event mode. This more accurately tells the user what the hardware is going + * to do anyways. + * + * Note: this may modify the hwtstamp configuration towards a more general + * mode, if required to support the specifically requested mode. + */ +static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, + struct hwtstamp_config *config) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; + u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED; + u32 tsync_rx_mtrl = PTP_EV_PORT << 16; + bool is_l2 = false; + u32 regval; + + /* reserved for future extensions */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + tsync_rx_mtrl = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; + is_l2 = true; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_ALL: + default: + /* + * register RXMTRL must be set in order to do V1 packets, + * therefore it is not possible to time stamp both V1 Sync and + * Delay_Req messages and hardware does not support + * timestamping all packets => return error + */ + config->rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + if (hw->mac.type == ixgbe_mac_82598EB) { + if (tsync_rx_ctl | tsync_tx_ctl) + return -ERANGE; + return 0; + } + + /* define ethertype filter for timestamping L2 packets */ + if (is_l2) + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), + (IXGBE_ETQF_FILTER_EN | /* enable filter */ + IXGBE_ETQF_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); + + /* enable/disable TX */ + regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); + regval &= ~IXGBE_TSYNCTXCTL_ENABLED; + regval |= tsync_tx_ctl; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, regval); + + /* enable/disable RX */ + regval = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + regval &= ~(IXGBE_TSYNCRXCTL_ENABLED | IXGBE_TSYNCRXCTL_TYPE_MASK); + regval |= tsync_rx_ctl; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, regval); + + /* define which PTP packets are time stamped */ + IXGBE_WRITE_REG(hw, IXGBE_RXMTRL, tsync_rx_mtrl); + + IXGBE_WRITE_FLUSH(hw); + + /* clear TX/RX time stamp registers, just to be sure */ + regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH); + regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH); + + return 0; +} + +/** + * ixgbe_ptp_set_ts_config - user entry point for timestamp mode + * @adapter: pointer to adapter struct + * @ifreq: ioctl data + * + * Set hardware to requested mode. If unsupported, return an error with no + * changes. Otherwise, store the mode for future reference. + */ +int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int err; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = ixgbe_ptp_set_timestamp_mode(adapter, &config); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&adapter->tstamp_config, &config, + sizeof(adapter->tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/** + * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw + * @adapter: pointer to the adapter structure + * + * This function should be called to set the proper values for the TIMINCA + * register and tell the cyclecounter structure what the tick rate of SYSTIME + * is. It does not directly modify SYSTIME registers or the timecounter + * structure. It should be called whenever a new TIMINCA value is necessary, + * such as during initialization or when the link speed changes. + */ +void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 incval = 0; + u32 shift = 0; + unsigned long flags; + + /** + * Scale the NIC cycle counter by a large factor so that + * relatively small corrections to the frequency can be added + * or subtracted. The drawbacks of a large factor include + * (a) the clock register overflows more quickly, (b) the cycle + * counter structure must be able to convert the systime value + * to nanoseconds using only a multiplier and a right-shift, + * and (c) the value must fit within the timinca register space + * => math based on internal DMA clock rate and available bits + * + * Note that when there is no link, internal DMA clock is same as when + * link speed is 10Gb. Set the registers correctly even when link is + * down to preserve the clock setting + */ + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_100_FULL: + incval = IXGBE_INCVAL_100; + shift = IXGBE_INCVAL_SHIFT_100; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + incval = IXGBE_INCVAL_1GB; + shift = IXGBE_INCVAL_SHIFT_1GB; + break; + case IXGBE_LINK_SPEED_10GB_FULL: + default: + incval = IXGBE_INCVAL_10GB; + shift = IXGBE_INCVAL_SHIFT_10GB; + break; + } + + /** + * Modify the calculated values to fit within the correct + * number of bits specified by the hardware. The 82599 doesn't + * have the same space as the X540, so bitshift the calculated + * values to fit. + */ + switch (hw->mac.type) { + case ixgbe_mac_X540: + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); + break; + case ixgbe_mac_82599EB: + incval >>= IXGBE_INCVAL_SHIFT_82599; + shift -= IXGBE_INCVAL_SHIFT_82599; + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, + (1 << IXGBE_INCPER_SHIFT_82599) | + incval); + break; + default: + /* other devices aren't supported */ + return; + } + + /* update the base incval used to calculate frequency adjustment */ + ACCESS_ONCE(adapter->base_incval) = incval; + smp_mb(); + + /* need lock to prevent incorrect read while modifying cyclecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + memset(&adapter->cc, 0, sizeof(adapter->cc)); + adapter->cc.read = ixgbe_ptp_read; + adapter->cc.mask = CYCLECOUNTER_MASK(64); + adapter->cc.shift = shift; + adapter->cc.mult = 1; + + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); +} + +/** + * ixgbe_ptp_reset + * @adapter: the ixgbe private board structure + * + * When the MAC resets, all the hardware bits for timesync are reset. This + * function is used to re-enable the device for PTP based on current settings. + * We do lose the current clock time, so just reset the cyclecounter to the + * system real clock time. + * + * This function will maintain hwtstamp_config settings, and resets the SDP + * output if it was enabled. + */ +void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + unsigned long flags; + + /* set SYSTIME registers to 0 just in case */ + IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000); + IXGBE_WRITE_FLUSH(hw); + + /* reset the hardware timestamping mode */ + ixgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + + ixgbe_ptp_start_cyclecounter(adapter); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + /* reset the ns time counter */ + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + /* + * Now that the shift has been calculated and the systime + * registers reset, (re-)enable the Clock out feature + */ + ixgbe_ptp_setup_sdp(adapter); +} + +/** + * ixgbe_ptp_create_clock + * @adapter: the ixgbe private adapter structure + * + * This function performs setup of the user entry point function table and + * initializes the PTP clock device, which is used to access the clock-like + * features of the PTP core. It will be called by ixgbe_ptp_init, only if + * there isn't already a clock device (such as after a suspend/resume cycle, + * where the clock device wasn't destroyed). + */ +static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + long err; + + /* do nothing if we already have a clock device */ + if (!IS_ERR_OR_NULL(adapter->ptp_clock)) + return 0; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_X540: + snprintf(adapter->ptp_caps.name, + sizeof(adapter->ptp_caps.name), + "%s", netdev->name); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 250000000; + adapter->ptp_caps.n_alarm = 0; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.n_per_out = 0; + adapter->ptp_caps.pps = 1; + adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq; + adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; + adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; + adapter->ptp_caps.settime64 = ixgbe_ptp_settime; + adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; + break; + case ixgbe_mac_82599EB: + snprintf(adapter->ptp_caps.name, + sizeof(adapter->ptp_caps.name), + "%s", netdev->name); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 250000000; + adapter->ptp_caps.n_alarm = 0; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.n_per_out = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq; + adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; + adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; + adapter->ptp_caps.settime64 = ixgbe_ptp_settime; + adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; + break; + default: + adapter->ptp_clock = NULL; + return -EOPNOTSUPP; + } + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + err = PTR_ERR(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_dev_err("ptp_clock_register failed\n"); + return err; + } else + e_dev_info("registered PHC device on %s\n", netdev->name); + + /* set default timestamp mode to disabled here. We do this in + * create_clock instead of init, because we don't want to override the + * previous settings during a resume cycle. + */ + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + return 0; +} + +/** + * ixgbe_ptp_init + * @adapter: the ixgbe private adapter structure + * + * This function performs the required steps for enabling PTP + * support. If PTP support has already been loaded it simply calls the + * cyclecounter init routine and exits. + */ +void ixgbe_ptp_init(struct ixgbe_adapter *adapter) +{ + /* initialize the spin lock first since we can't control when a user + * will call the entry functions once we have initialized the clock + * device + */ + spin_lock_init(&adapter->tmreg_lock); + + /* obtain a PTP device, or re-use an existing device */ + if (ixgbe_ptp_create_clock(adapter)) + return; + + /* we have a clock so we can initialize work now */ + INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work); + + /* reset the PTP related hardware bits */ + ixgbe_ptp_reset(adapter); + + /* enter the IXGBE_PTP_RUNNING state */ + set_bit(__IXGBE_PTP_RUNNING, &adapter->state); + + return; +} + +/** + * ixgbe_ptp_suspend - stop PTP work items + * @ adapter: pointer to adapter struct + * + * this function suspends PTP activity, and prevents more PTP work from being + * generated, but does not destroy the PTP clock device. + */ +void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter) +{ + /* Leave the IXGBE_PTP_RUNNING state. */ + if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state)) + return; + + /* since this might be called in suspend, we don't clear the state, + * but simply reset the auxiliary PPS signal control register + */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSAUXC, 0x0); + + /* ensure that we cancel any pending PTP Tx work item in progress */ + cancel_work_sync(&adapter->ptp_tx_work); + if (adapter->ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); + } +} + +/** + * ixgbe_ptp_stop - close the PTP device + * @adapter: pointer to adapter struct + * + * completely destroy the PTP device, should only be called when the device is + * being fully closed. + */ +void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) +{ + /* first, suspend PTP activity */ + ixgbe_ptp_suspend(adapter); + + /* disable the PTP clock device */ + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_dev_info("removed PHC on %s\n", + adapter->netdev->name); + } +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c new file mode 100644 index 000000000..1d17b5872 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -0,0 +1,1434 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2014 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef NETIF_F_HW_VLAN_CTAG_TX +#include +#endif + +#include "ixgbe.h" +#include "ixgbe_type.h" +#include "ixgbe_sriov.h" + +#ifdef CONFIG_PCI_IOV +static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int num_vf_macvlans, i; + struct vf_macvlans *mv_list; + + adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; + e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs); + + /* Enable VMDq flag so device will be set in VM mode */ + adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED; + if (!adapter->ring_feature[RING_F_VMDQ].limit) + adapter->ring_feature[RING_F_VMDQ].limit = 1; + adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs; + + num_vf_macvlans = hw->mac.num_rar_entries - + (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); + + adapter->mv_list = mv_list = kcalloc(num_vf_macvlans, + sizeof(struct vf_macvlans), + GFP_KERNEL); + if (mv_list) { + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&adapter->vf_mvs.l); + for (i = 0; i < num_vf_macvlans; i++) { + mv_list->vf = -1; + mv_list->free = true; + list_add(&mv_list->l, &adapter->vf_mvs.l); + mv_list++; + } + } + + /* Initialize default switching mode VEB */ + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + adapter->bridge_mode = BRIDGE_MODE_VEB; + + /* If call to enable VFs succeeded then allocate memory + * for per VF control structures. + */ + adapter->vfinfo = + kcalloc(adapter->num_vfs, + sizeof(struct vf_data_storage), GFP_KERNEL); + if (adapter->vfinfo) { + /* limit trafffic classes based on VFs enabled */ + if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && + (adapter->num_vfs < 16)) { + adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; + } else if (adapter->num_vfs < 32) { + adapter->dcb_cfg.num_tcs.pg_tcs = 4; + adapter->dcb_cfg.num_tcs.pfc_tcs = 4; + } else { + adapter->dcb_cfg.num_tcs.pg_tcs = 1; + adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + } + + /* Disable RSC when in SR-IOV mode */ + adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | + IXGBE_FLAG2_RSC_ENABLED); + + for (i = 0; i < adapter->num_vfs; i++) { + /* enable spoof checking for all VFs */ + adapter->vfinfo[i].spoofchk_enabled = true; + + /* We support VF RSS querying only for 82599 and x540 + * devices at the moment. These devices share RSS + * indirection table and RSS hash key with PF therefore + * we want to disable the querying by default. + */ + adapter->vfinfo[i].rss_query_enabled = 0; + } + + return 0; + } + + return -ENOMEM; +} + +/* Note this function is called when the user wants to enable SR-IOV + * VFs using the now deprecated module parameter + */ +void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) +{ + int pre_existing_vfs = 0; + + pre_existing_vfs = pci_num_vf(adapter->pdev); + if (!pre_existing_vfs && !adapter->num_vfs) + return; + + /* If there are pre-existing VFs then we have to force + * use of that many - over ride any module parameter value. + * This may result from the user unloading the PF driver + * while VFs were assigned to guest VMs or because the VFs + * have been created via the new PCI SR-IOV sysfs interface. + */ + if (pre_existing_vfs) { + adapter->num_vfs = pre_existing_vfs; + dev_warn(&adapter->pdev->dev, + "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n"); + } else { + int err; + /* + * The 82599 supports up to 64 VFs per physical function + * but this implementation limits allocation to 63 so that + * basic networking resources are still available to the + * physical function. If the user requests greater than + * 63 VFs then it is an error - reset to default of zero. + */ + adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT); + + err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + if (err) { + e_err(probe, "Failed to enable PCI sriov: %d\n", err); + adapter->num_vfs = 0; + return; + } + } + + if (!__ixgbe_enable_sriov(adapter)) + return; + + /* If we have gotten to this point then there is no memory available + * to manage the VF devices - print message and bail. + */ + e_err(probe, "Unable to allocate memory for VF Data Storage - " + "SRIOV disabled\n"); + ixgbe_disable_sriov(adapter); +} + +#endif /* #ifdef CONFIG_PCI_IOV */ +int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 gpie; + u32 vmdctl; + int rss; + + /* set num VFs to 0 to prevent access to vfinfo */ + adapter->num_vfs = 0; + + /* free VF control structures */ + kfree(adapter->vfinfo); + adapter->vfinfo = NULL; + + /* free macvlan list */ + kfree(adapter->mv_list); + adapter->mv_list = NULL; + + /* if SR-IOV is already disabled then there is nothing to do */ + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return 0; + +#ifdef CONFIG_PCI_IOV + /* + * If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(adapter->pdev)) { + e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n"); + return -EPERM; + } + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); +#endif + + /* turn off device IOV mode */ + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0); + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + gpie &= ~IXGBE_GPIE_VTMODE_MASK; + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + + /* set default pool back to 0 */ + vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); + IXGBE_WRITE_FLUSH(hw); + + /* Disable VMDq flag so device will be set in VM mode */ + if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { + adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; + adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; + rss = min_t(int, ixgbe_max_rss_indices(adapter), + num_online_cpus()); + } else { + rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus()); + } + + adapter->ring_feature[RING_F_VMDQ].offset = 0; + adapter->ring_feature[RING_F_RSS].limit = rss; + + /* take a breather then clean up driver data */ + msleep(100); + return 0; +} + +static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + struct ixgbe_adapter *adapter = pci_get_drvdata(dev); + int err = 0; + int i; + int pre_existing_vfs = pci_num_vf(dev); + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + err = ixgbe_disable_sriov(adapter); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + return num_vfs; + + if (err) + return err; + + /* While the SR-IOV capability structure reports total VFs to be 64, + * we have to limit the actual number allocated based on two factors. + * First, we reserve some transmit/receive resources for the PF. + * Second, VMDQ also uses the same pools that SR-IOV does. We need to + * account for this, so that we don't accidentally allocate more VFs + * than we have available pools. The PCI bus driver already checks for + * other values out of range. + */ + if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VF_FUNCTIONS) + return -EPERM; + + adapter->num_vfs = num_vfs; + + err = __ixgbe_enable_sriov(adapter); + if (err) + return err; + + for (i = 0; i < adapter->num_vfs; i++) + ixgbe_vf_configuration(dev, (i | 0x10000000)); + + err = pci_enable_sriov(dev, num_vfs); + if (err) { + e_dev_warn("Failed to enable PCI sriov: %d\n", err); + return err; + } + ixgbe_sriov_reinit(adapter); + + return num_vfs; +#else + return 0; +#endif +} + +static int ixgbe_pci_sriov_disable(struct pci_dev *dev) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(dev); + int err; +#ifdef CONFIG_PCI_IOV + u32 current_flags = adapter->flags; +#endif + + err = ixgbe_disable_sriov(adapter); + + /* Only reinit if no error and state changed */ +#ifdef CONFIG_PCI_IOV + if (!err && current_flags != adapter->flags) + ixgbe_sriov_reinit(adapter); +#endif + + return err; +} + +int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + if (num_vfs == 0) + return ixgbe_pci_sriov_disable(dev); + else + return ixgbe_pci_sriov_enable(dev, num_vfs); +} + +static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) + >> IXGBE_VT_MSGINFO_SHIFT; + u16 *hash_list = (u16 *)&msgbuf[1]; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + struct ixgbe_hw *hw = &adapter->hw; + int i; + u32 vector_bit; + u32 vector_reg; + u32 mta_reg; + u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + + /* only so many hash values supported */ + entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); + + /* + * salt away the number of multi cast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vfinfo->num_vf_mc_hashes = entries; + + /* + * VFs are limited to using the MTA hash table for their multicast + * addresses + */ + for (i = 0; i < entries; i++) { + vfinfo->vf_mc_hashes[i] = hash_list[i]; + } + + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; + mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); + mta_reg |= (1 << vector_bit); + IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); + } + vmolr |= IXGBE_VMOLR_ROMPE; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + + return 0; +} + +#ifdef CONFIG_PCI_IOV +void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo; + int i, j; + u32 vector_bit; + u32 vector_reg; + u32 mta_reg; + + for (i = 0; i < adapter->num_vfs; i++) { + u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i)); + vfinfo = &adapter->vfinfo[i]; + for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { + hw->addr_ctrl.mta_in_use++; + vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; + vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; + mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); + mta_reg |= (1 << vector_bit); + IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); + } + + if (vfinfo->num_vf_mc_hashes) + vmolr |= IXGBE_VMOLR_ROMPE; + else + vmolr &= ~IXGBE_VMOLR_ROMPE; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr); + } + + /* Restore any VF macvlans */ + ixgbe_full_sync_mac_table(adapter); +} +#endif + +static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, + u32 vf) +{ + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + + return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); +} + +static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + int max_frame = msgbuf[1]; + u32 max_frs; + + /* + * For 82599EB we have to keep all PFs and VFs operating with + * the same max_frame value in order to avoid sending an oversize + * frame to a VF. In order to guarantee this is handled correctly + * for all cases we have several special exceptions to take into + * account before we can enable the VF for receive + */ + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + struct net_device *dev = adapter->netdev; + int pf_max_frame = dev->mtu + ETH_HLEN; + u32 reg_offset, vf_shift, vfre; + s32 err = 0; + +#ifdef CONFIG_FCOE + if (dev->features & NETIF_F_FCOE_MTU) + pf_max_frame = max_t(int, pf_max_frame, + IXGBE_FCOE_JUMBO_FRAME_SIZE); + +#endif /* CONFIG_FCOE */ + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: + /* + * Version 1.1 supports jumbo frames on VFs if PF has + * jumbo frames enabled which means legacy VFs are + * disabled + */ + if (pf_max_frame > ETH_FRAME_LEN) + break; + default: + /* + * If the PF or VF are running w/ jumbo frames enabled + * we need to shut down the VF Rx path as we cannot + * support jumbo frames on legacy VFs + */ + if ((pf_max_frame > ETH_FRAME_LEN) || + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) + err = -EINVAL; + break; + } + + /* determine VF receive enable location */ + vf_shift = vf % 32; + reg_offset = vf / 32; + + /* enable or disable receive depending on error */ + vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); + if (err) + vfre &= ~(1 << vf_shift); + else + vfre |= 1 << vf_shift; + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre); + + if (err) { + e_err(drv, "VF max_frame %d out of range\n", max_frame); + return err; + } + } + + /* MTU < 68 is an error and causes problems on some kernels */ + if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) { + e_err(drv, "VF max_frame %d out of range\n", max_frame); + return -EINVAL; + } + + /* pull current max frame size from hardware */ + max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); + max_frs &= IXGBE_MHADD_MFS_MASK; + max_frs >>= IXGBE_MHADD_MFS_SHIFT; + + if (max_frs < max_frame) { + max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); + } + + e_info(hw, "VF requests change max MTU to %d\n", max_frame); + + return 0; +} + +static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) +{ + u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + vmolr |= IXGBE_VMOLR_BAM; + if (aupe) + vmolr |= IXGBE_VMOLR_AUPE; + else + vmolr &= ~IXGBE_VMOLR_AUPE; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); +} + +static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); +} +static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + u8 num_tcs = netdev_get_num_tc(adapter->netdev); + + /* add PF assigned VLAN or VLAN 0 */ + ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); + + /* reset offloads to defaults */ + ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); + + /* set outgoing tags for VFs */ + if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { + ixgbe_clear_vmvir(adapter, vf); + } else { + if (vfinfo->pf_qos || !num_tcs) + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + vfinfo->pf_qos, vf); + else + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + adapter->default_up, vf); + + if (vfinfo->spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + } + + /* reset multicast table array for vf */ + adapter->vfinfo[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + ixgbe_set_rx_mode(adapter->netdev); + + ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + + /* reset VF api back to unknown */ + adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; +} + +static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, + int vf, unsigned char *mac_addr) +{ + ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN); + ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); + + return 0; +} + +static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, + int vf, int index, unsigned char *mac_addr) +{ + struct list_head *pos; + struct vf_macvlans *entry; + + if (index <= 1) { + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->vf == vf) { + entry->vf = -1; + entry->free = true; + entry->is_macvlan = false; + ixgbe_del_mac_filter(adapter, + entry->vf_macvlan, vf); + } + } + } + + /* + * If index was zero then we were asked to clear the uc list + * for the VF. We're done. + */ + if (!index) + return 0; + + entry = NULL; + + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->free) + break; + } + + /* + * If we traversed the entire list and didn't find a free entry + * then we're out of space on the RAR table. Also entry may + * be NULL because the original memory allocation for the list + * failed, which is not fatal but does mean we can't support + * VF requests for MACVLAN because we couldn't allocate + * memory for the list management required. + */ + if (!entry || !entry->free) + return -ENOSPC; + + entry->free = false; + entry->is_macvlan = true; + entry->vf = vf; + memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); + + ixgbe_add_mac_filter(adapter, mac_addr, vf); + + return 0; +} + +int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) +{ + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + unsigned int vfn = (event_mask & 0x3f); + + bool enable = ((event_mask & 0x10000000U) != 0); + + if (enable) + eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses); + + return 0; +} + +static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, + u32 qde) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + int i; + + for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { + u32 reg; + + /* flush previous write */ + IXGBE_WRITE_FLUSH(hw); + + /* indicate to hardware that we want to set drop enable */ + reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE; + reg |= i << IXGBE_QDE_IDX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); + } +} + +static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct ixgbe_hw *hw = &adapter->hw; + unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; + u32 reg, reg_offset, vf_shift; + u32 msgbuf[4] = {0, 0, 0, 0}; + u8 *addr = (u8 *)(&msgbuf[1]); + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + int i; + + e_info(probe, "VF Reset msg received from vf %d\n", vf); + + /* reset the filters for the device */ + ixgbe_vf_reset_event(adapter, vf); + + /* set vf mac address */ + if (!is_zero_ether_addr(vf_mac)) + ixgbe_set_vf_mac(adapter, vf, vf_mac); + + vf_shift = vf % 32; + reg_offset = vf / 32; + + /* enable transmit for vf */ + reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); + reg |= 1 << vf_shift; + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); + + /* force drop enable for all VF Rx queues */ + ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); + + /* enable receive for vf */ + reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); + reg |= 1 << vf_shift; + /* + * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. + * For more info take a look at ixgbe_set_vf_lpe + */ + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + struct net_device *dev = adapter->netdev; + int pf_max_frame = dev->mtu + ETH_HLEN; + +#ifdef CONFIG_FCOE + if (dev->features & NETIF_F_FCOE_MTU) + pf_max_frame = max_t(int, pf_max_frame, + IXGBE_FCOE_JUMBO_FRAME_SIZE); + +#endif /* CONFIG_FCOE */ + if (pf_max_frame > ETH_FRAME_LEN) + reg &= ~(1 << vf_shift); + } + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); + + /* enable VF mailbox for further messages */ + adapter->vfinfo[vf].clear_to_send = true; + + /* Enable counting of spoofed packets in the SSVPC register */ + reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); + reg |= (1 << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); + + /* + * Reset the VFs TDWBAL and TDWBAH registers + * which are not cleared by an FLR + */ + for (i = 0; i < q_per_pool; i++) { + IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0); + IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0); + } + + /* reply to reset with ack and vf mac address */ + msgbuf[0] = IXGBE_VF_RESET; + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; + dev_warn(&adapter->pdev->dev, + "VF %d has no MAC address assigned, you may have to assign one manually\n", + vf); + } + + /* + * Piggyback the multicast filter type so VF can compute the + * correct vectors + */ + msgbuf[3] = hw->mac.mc_filter_type; + ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); + + return 0; +} + +static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + + if (adapter->vfinfo[vf].pf_set_mac && + !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) { + e_warn(drv, + "VF %d attempted to override administratively set MAC address\n" + "Reload the VF driver to resume operations\n", + vf); + return -1; + } + + return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0; +} + +static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) +{ + u32 vlvf; + s32 regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries */ + for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { + vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + /* Return a negative value if not found */ + if (regindex >= IXGBE_VLVF_ENTRIES) + regindex = -1; + + return regindex; +} + +static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); + int err; + s32 reg_ndx; + u32 vlvf; + u32 bits; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (adapter->vfinfo[vf].pf_vlan || tcs) { + e_warn(drv, + "VF %d attempted to override administratively set VLAN configuration\n" + "Reload the VF driver to resume operations\n", + vf); + return -1; + } + + if (add) + adapter->vfinfo[vf].vlan_count++; + else if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; + + /* in case of promiscuous mode any VLAN filter set for a VF must + * also have the PF pool added to it. + */ + if (add && adapter->netdev->flags & IFF_PROMISC) + err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + + err = ixgbe_set_vf_vlan(adapter, add, vid, vf); + if (!err && adapter->vfinfo[vf].spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + + /* Go through all the checks to see if the VLAN filter should + * be wiped completely. + */ + if (!add && adapter->netdev->flags & IFF_PROMISC) { + reg_ndx = ixgbe_find_vlvf_entry(hw, vid); + if (reg_ndx < 0) + return err; + vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx)); + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + if (VMDQ_P(0) < 32) { + bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2)); + bits &= ~(1 << VMDQ_P(0)); + bits |= IXGBE_READ_REG(hw, + IXGBE_VLVFB(reg_ndx * 2) + 1); + } else { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB(reg_ndx * 2) + 1); + bits &= ~(1 << (VMDQ_P(0) - 32)); + bits |= IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2)); + } + + /* If the filter was removed then ensure PF pool bit + * is cleared if the PF only added itself to the pool + * because the PF is in promiscuous mode. + */ + if ((vlvf & VLAN_VID_MASK) == vid && + !test_bit(vid, adapter->active_vlans) && !bits) + ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + } + + return err; +} + +static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> + IXGBE_VT_MSGINFO_SHIFT; + int err; + + if (adapter->vfinfo[vf].pf_set_mac && index > 0) { + e_warn(drv, + "VF %d requested MACVLAN filter but is administratively denied\n", + vf); + return -1; + } + + /* An non-zero index indicates the VF is setting a filter */ + if (index) { + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + + /* + * If the VF is allowed to set MAC filters then turn off + * anti-spoofing to avoid false positives. + */ + if (adapter->vfinfo[vf].spoofchk_enabled) + ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false); + } + + err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac); + if (err == -ENOSPC) + e_warn(drv, + "VF %d has requested a MACVLAN filter but there is no space for it\n", + vf); + + return err < 0; +} + +static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + int api = msgbuf[1]; + + switch (api) { + case ixgbe_mbox_api_10: + case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: + adapter->vfinfo[vf].vf_api = api; + return 0; + default: + break; + } + + e_info(drv, "VF %d requested invalid api version %u\n", vf, api); + + return -1; +} + +static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct net_device *dev = adapter->netdev; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + unsigned int default_tc = 0; + u8 num_tcs = netdev_get_num_tc(dev); + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_20: + case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: + break; + default: + return -1; + } + + /* only allow 1 Tx queue for bandwidth limiting */ + msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); + msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); + + /* if TCs > 1 determine which TC belongs to default user priority */ + if (num_tcs > 1) + default_tc = netdev_get_prio_tc_map(dev, adapter->default_up); + + /* notify VF of need for VLAN tag stripping, and correct queue */ + if (num_tcs) + msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs; + else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos) + msgbuf[IXGBE_VF_TRANS_VLAN] = 1; + else + msgbuf[IXGBE_VF_TRANS_VLAN] = 0; + + /* notify VF of default queue */ + msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc; + + return 0; +} + +static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) +{ + u32 i, j; + u32 *out_buf = &msgbuf[1]; + const u8 *reta = adapter->rss_indir_tbl; + u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter); + + /* Check if operation is permitted */ + if (!adapter->vfinfo[vf].rss_query_enabled) + return -EPERM; + + /* verify the PF is supporting the correct API */ + if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12) + return -EOPNOTSUPP; + + /* This mailbox command is supported (required) only for 82599 and x540 + * VFs which support up to 4 RSS queues. Therefore we will compress the + * RETA by saving only 2 bits from each entry. This way we will be able + * to transfer the whole RETA in a single mailbox operation. + */ + for (i = 0; i < reta_size / 16; i++) { + out_buf[i] = 0; + for (j = 0; j < 16; j++) + out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j); + } + + return 0; +} + +static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u32 *rss_key = &msgbuf[1]; + + /* Check if the operation is permitted */ + if (!adapter->vfinfo[vf].rss_query_enabled) + return -EPERM; + + /* verify the PF is supporting the correct API */ + if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12) + return -EOPNOTSUPP; + + memcpy(rss_key, adapter->rss_key, sizeof(adapter->rss_key)); + + return 0; +} + +static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) +{ + u32 mbx_size = IXGBE_VFMAILBOX_SIZE; + u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; + struct ixgbe_hw *hw = &adapter->hw; + s32 retval; + + retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); + + if (retval) { + pr_err("Error receiving message from VF\n"); + return retval; + } + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) + return 0; + + /* flush the ack before we write any messages back */ + IXGBE_WRITE_FLUSH(hw); + + if (msgbuf[0] == IXGBE_VF_RESET) + return ixgbe_vf_reset_msg(adapter, vf); + + /* + * until the vf completes a virtual function reset it should not be + * allowed to start any configuration. + */ + if (!adapter->vfinfo[vf].clear_to_send) { + msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; + ixgbe_write_mbx(hw, msgbuf, 1, vf); + return 0; + } + + switch ((msgbuf[0] & 0xFFFF)) { + case IXGBE_VF_SET_MAC_ADDR: + retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf); + break; + case IXGBE_VF_SET_MULTICAST: + retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf); + break; + case IXGBE_VF_SET_VLAN: + retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf); + break; + case IXGBE_VF_SET_LPE: + retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf); + break; + case IXGBE_VF_SET_MACVLAN: + retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf); + break; + case IXGBE_VF_API_NEGOTIATE: + retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf); + break; + case IXGBE_VF_GET_QUEUES: + retval = ixgbe_get_vf_queues(adapter, msgbuf, vf); + break; + case IXGBE_VF_GET_RETA: + retval = ixgbe_get_vf_reta(adapter, msgbuf, vf); + break; + case IXGBE_VF_GET_RSS_KEY: + retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf); + break; + default: + e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); + retval = IXGBE_ERR_MBX; + break; + } + + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; + else + msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; + + msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; + + ixgbe_write_mbx(hw, msgbuf, mbx_size, vf); + + return retval; +} + +static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 msg = IXGBE_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!adapter->vfinfo[vf].clear_to_send) + ixgbe_write_mbx(hw, &msg, 1, vf); +} + +void ixgbe_msg_task(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vf; + + for (vf = 0; vf < adapter->num_vfs; vf++) { + /* process any reset requests */ + if (!ixgbe_check_for_rst(hw, vf)) + ixgbe_vf_reset_event(adapter, vf); + + /* process any messages pending */ + if (!ixgbe_check_for_msg(hw, vf)) + ixgbe_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ + if (!ixgbe_check_for_ack(hw, vf)) + ixgbe_rcv_ack_from_vf(adapter, vf); + } +} + +void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + /* disable transmit and receive for all vfs */ + IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0); + + IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); +} + +void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 ping; + int i; + + for (i = 0 ; i < adapter->num_vfs; i++) { + ping = IXGBE_PF_CONTROL_MSG; + if (adapter->vfinfo[i].clear_to_send) + ping |= IXGBE_VT_MSGTYPE_CTS; + ixgbe_write_mbx(hw, &ping, 1, i); + } +} + +int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) + return -EINVAL; + adapter->vfinfo[vf].pf_set_mac = true; + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); + dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" + " change effective."); + if (test_bit(__IXGBE_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," + " but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, "Bring the PF device up before" + " attempting to use the VF device.\n"); + } + return ixgbe_set_vf_mac(adapter, vf, mac); +} + +static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, + u16 vlan, u8 qos) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err; + + err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); + if (err) + goto out; + + ixgbe_set_vmvir(adapter, vlan, qos, vf); + ixgbe_set_vmolr(hw, vf, false); + if (adapter->vfinfo[vf].spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + adapter->vfinfo[vf].vlan_count++; + + /* enable hide vlan on X550 */ + if (hw->mac.type >= ixgbe_mac_X550) + ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE | + IXGBE_QDE_HIDE_VLAN); + + adapter->vfinfo[vf].pf_vlan = vlan; + adapter->vfinfo[vf].pf_qos = qos; + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__IXGBE_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF VLAN has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); + } + +out: + return err; +} + +static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err; + + err = ixgbe_set_vf_vlan(adapter, false, + adapter->vfinfo[vf].pf_vlan, vf); + ixgbe_clear_vmvir(adapter, vf); + ixgbe_set_vmolr(hw, vf, true); + hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); + if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; + + /* disable hide VLAN on X550 */ + if (hw->mac.type >= ixgbe_mac_X550) + ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); + + adapter->vfinfo[vf].pf_vlan = 0; + adapter->vfinfo[vf].pf_qos = 0; + + return err; +} + +int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) +{ + int err = 0; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) + return -EINVAL; + if (vlan || qos) { + /* Check if there is already a port VLAN set, if so + * we have to delete the old one first before we + * can set the new one. The usage model had + * previously assumed the user would delete the + * old port VLAN before setting a new one but this + * is not necessarily the case. + */ + if (adapter->vfinfo[vf].pf_vlan) + err = ixgbe_disable_port_vlan(adapter, vf); + if (err) + goto out; + err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos); + } else { + err = ixgbe_disable_port_vlan(adapter, vf); + } + +out: + return err; +} + +static int ixgbe_link_mbps(struct ixgbe_adapter *adapter) +{ + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_100_FULL: + return 100; + case IXGBE_LINK_SPEED_1GB_FULL: + return 1000; + case IXGBE_LINK_SPEED_10GB_FULL: + return 10000; + default: + return 0; + } +} + +static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf) +{ + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct ixgbe_hw *hw = &adapter->hw; + u32 bcnrc_val = 0; + u16 queue, queues_per_pool; + u16 tx_rate = adapter->vfinfo[vf].tx_rate; + + if (tx_rate) { + /* start with base link speed value */ + bcnrc_val = adapter->vf_rate_link_speed; + + /* Calculate the rate factor values to set */ + bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; + bcnrc_val /= tx_rate; + + /* clear everything but the rate factor */ + bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | + IXGBE_RTTBCNRC_RF_DEC_MASK; + + /* enable the rate scheduler */ + bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; + } + + /* + * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported + * and 0x004 otherwise. + */ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4); + break; + case ixgbe_mac_X540: + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14); + break; + default: + break; + } + + /* determine how many queues per pool based on VMDq mask */ + queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + + /* write value for all Tx queues belonging to VF */ + for (queue = 0; queue < queues_per_pool; queue++) { + unsigned int reg_idx = (vf * queues_per_pool) + queue; + + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); + } +} + +void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) +{ + int i; + + /* VF Tx rate limit was not set */ + if (!adapter->vf_rate_link_speed) + return; + + if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) { + adapter->vf_rate_link_speed = 0; + dev_info(&adapter->pdev->dev, + "Link speed has been changed. VF Transmit rate is disabled\n"); + } + + for (i = 0; i < adapter->num_vfs; i++) { + if (!adapter->vf_rate_link_speed) + adapter->vfinfo[i].tx_rate = 0; + + ixgbe_set_vf_rate_limit(adapter, i); + } +} + +int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, + int max_tx_rate) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int link_speed; + + /* verify VF is active */ + if (vf >= adapter->num_vfs) + return -EINVAL; + + /* verify link is up */ + if (!adapter->link_up) + return -EINVAL; + + /* verify we are linked at 10Gbps */ + link_speed = ixgbe_link_mbps(adapter); + if (link_speed != 10000) + return -EINVAL; + + if (min_tx_rate) + return -EINVAL; + + /* rate limit cannot be less than 10Mbs or greater than link speed */ + if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed))) + return -EINVAL; + + /* store values */ + adapter->vf_rate_link_speed = link_speed; + adapter->vfinfo[vf].tx_rate = max_tx_rate; + + /* update hardware configuration */ + ixgbe_set_vf_rate_limit(adapter, vf); + + return 0; +} + +int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8; + struct ixgbe_hw *hw = &adapter->hw; + u32 regval; + + if (vf >= adapter->num_vfs) + return -EINVAL; + + adapter->vfinfo[vf].spoofchk_enabled = setting; + + regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); + regval &= ~(1 << vf_target_shift); + regval |= (setting << vf_target_shift); + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval); + + if (adapter->vfinfo[vf].vlan_count) { + vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT; + regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); + regval &= ~(1 << vf_target_shift); + regval |= (setting << vf_target_shift); + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval); + } + + return 0; +} + +int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, + bool setting) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* This operation is currently supported only for 82599 and x540 + * devices. + */ + if (adapter->hw.mac.type < ixgbe_mac_82599EB || + adapter->hw.mac.type >= ixgbe_mac_X550) + return -EOPNOTSUPP; + + if (vf >= adapter->num_vfs) + return -EINVAL; + + adapter->vfinfo[vf].rss_query_enabled = setting; + + return 0; +} + +int ixgbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + if (vf >= adapter->num_vfs) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); + ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate; + ivi->min_tx_rate = 0; + ivi->vlan = adapter->vfinfo[vf].pf_vlan; + ivi->qos = adapter->vfinfo[vf].pf_qos; + ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; + ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; + return 0; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h new file mode 100644 index 000000000..2c197e6d1 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -0,0 +1,71 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_SRIOV_H_ +#define _IXGBE_SRIOV_H_ + +/* ixgbe driver limit the max number of VFs could be enabled to + * 63 (IXGBE_MAX_VF_FUNCTIONS - 1) + */ +#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1) + +#ifdef CONFIG_PCI_IOV +void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); +#endif +void ixgbe_msg_task(struct ixgbe_adapter *adapter); +int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); +void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); +void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); +int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); +int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, + u8 qos); +int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, + int max_tx_rate); +int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, + bool setting); +int ixgbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi); +void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); +int ixgbe_disable_sriov(struct ixgbe_adapter *adapter); +#ifdef CONFIG_PCI_IOV +void ixgbe_enable_sriov(struct ixgbe_adapter *adapter); +#endif +int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs); + +static inline void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, + u16 vid, u16 qos, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT; + + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir); +} + +#endif /* _IXGBE_SRIOV_H_ */ + diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c new file mode 100644 index 000000000..ef6df3d64 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c @@ -0,0 +1,230 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include "ixgbe_common.h" +#include "ixgbe_type.h" + +#include +#include +#include +#include +#include +#include +#include + +/* hwmon callback functions */ +static ssize_t ixgbe_hwmon_show_location(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + return sprintf(buf, "loc%u\n", + ixgbe_attr->sensor->location); +} + +static ssize_t ixgbe_hwmon_show_temp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value; + + /* reset the temp field */ + ixgbe_attr->hw->mac.ops.get_thermal_sensor_data(ixgbe_attr->hw); + + value = ixgbe_attr->sensor->temp; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t ixgbe_hwmon_show_cautionthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = ixgbe_attr->sensor->caution_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr, + dev_attr); + unsigned int value = ixgbe_attr->sensor->max_op_thresh; + + /* display millidegree */ + value *= 1000; + + return sprintf(buf, "%u\n", value); +} + +/** + * ixgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. + * @adapter: pointer to the adapter structure + * @offset: offset in the eeprom sensor data table + * @type: type of sensor data to display + * + * For each file we want in hwmon's sysfs interface we need a device_attribute + * This is included in our hwmon_attr struct that contains the references to + * the data structures we need to get the data to display. + */ +static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter, + unsigned int offset, int type) { + int rc; + unsigned int n_attr; + struct hwmon_attr *ixgbe_attr; + + n_attr = adapter->ixgbe_hwmon_buff->n_hwmon; + ixgbe_attr = &adapter->ixgbe_hwmon_buff->hwmon_list[n_attr]; + + switch (type) { + case IXGBE_HWMON_TYPE_LOC: + ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_location; + snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), + "temp%u_label", offset + 1); + break; + case IXGBE_HWMON_TYPE_TEMP: + ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_temp; + snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), + "temp%u_input", offset + 1); + break; + case IXGBE_HWMON_TYPE_CAUTION: + ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_cautionthresh; + snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), + "temp%u_max", offset + 1); + break; + case IXGBE_HWMON_TYPE_MAX: + ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_maxopthresh; + snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), + "temp%u_crit", offset + 1); + break; + default: + rc = -EPERM; + return rc; + } + + /* These always the same regardless of type */ + ixgbe_attr->sensor = + &adapter->hw.mac.thermal_sensor_data.sensor[offset]; + ixgbe_attr->hw = &adapter->hw; + ixgbe_attr->dev_attr.store = NULL; + ixgbe_attr->dev_attr.attr.mode = S_IRUGO; + ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name; + sysfs_attr_init(&ixgbe_attr->dev_attr.attr); + + adapter->ixgbe_hwmon_buff->attrs[n_attr] = &ixgbe_attr->dev_attr.attr; + + ++adapter->ixgbe_hwmon_buff->n_hwmon; + + return 0; +} + +static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter) +{ +} + +/* called from ixgbe_main.c */ +void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter) +{ + ixgbe_sysfs_del_adapter(adapter); +} + +/* called from ixgbe_main.c */ +int ixgbe_sysfs_init(struct ixgbe_adapter *adapter) +{ + struct hwmon_buff *ixgbe_hwmon; + struct device *hwmon_dev; + unsigned int i; + int rc = 0; + + /* If this method isn't defined we don't support thermals */ + if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) { + goto exit; + } + + /* Don't create thermal hwmon interface if no sensors present */ + if (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)) + goto exit; + + ixgbe_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*ixgbe_hwmon), + GFP_KERNEL); + if (ixgbe_hwmon == NULL) { + rc = -ENOMEM; + goto exit; + } + adapter->ixgbe_hwmon_buff = ixgbe_hwmon; + + for (i = 0; i < IXGBE_MAX_SENSORS; i++) { + /* + * Only create hwmon sysfs entries for sensors that have + * meaningful data for. + */ + if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0) + continue; + + /* Bail if any hwmon attr struct fails to initialize */ + rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION); + if (rc) + goto exit; + rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC); + if (rc) + goto exit; + rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP); + if (rc) + goto exit; + rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX); + if (rc) + goto exit; + } + + ixgbe_hwmon->groups[0] = &ixgbe_hwmon->group; + ixgbe_hwmon->group.attrs = ixgbe_hwmon->attrs; + + hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev, + "ixgbe", + ixgbe_hwmon, + ixgbe_hwmon->groups); + if (IS_ERR(hwmon_dev)) + rc = PTR_ERR(hwmon_dev); +exit: + return rc; +} + diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h new file mode 100644 index 000000000..dd6ba5916 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -0,0 +1,3342 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2014 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_TYPE_H_ +#define _IXGBE_TYPE_H_ + +#include +#include +#include + +/* Device IDs */ +#define IXGBE_DEV_ID_82598 0x10B6 +#define IXGBE_DEV_ID_82598_BX 0x1508 +#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 +#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 +#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB +#define IXGBE_DEV_ID_82598AT 0x10C8 +#define IXGBE_DEV_ID_82598AT2 0x150B +#define IXGBE_DEV_ID_82598EB_CX4 0x10DD +#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC +#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1 +#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 +#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 +#define IXGBE_DEV_ID_82599_KX4 0x10F7 +#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 +#define IXGBE_DEV_ID_82599_KR 0x1517 +#define IXGBE_DEV_ID_82599_T3_LOM 0x151C +#define IXGBE_DEV_ID_82599_CX4 0x10F9 +#define IXGBE_DEV_ID_82599_SFP 0x10FB +#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a +#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 +#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 +#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071 +#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 +#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 +#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B +#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 +#define IXGBE_SUBDEV_ID_82599_LOM_SFP 0x8976 +#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 +#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D +#define IXGBE_DEV_ID_82599EN_SFP 0x1557 +#define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1 0x0001 +#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC +#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 +#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C +#define IXGBE_DEV_ID_82599_LS 0x154F +#define IXGBE_DEV_ID_X540T 0x1528 +#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A +#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558 +#define IXGBE_DEV_ID_X540T1 0x1560 + +#define IXGBE_DEV_ID_X550T 0x1563 +#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA +#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB +#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC +#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD +#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE +#define IXGBE_DEV_ID_X550_VF_HV 0x1564 +#define IXGBE_DEV_ID_X550_VF 0x1565 +#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 +#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9 + +/* VF Device IDs */ +#define IXGBE_DEV_ID_82599_VF 0x10ED +#define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_X550_VF 0x1565 +#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 + +/* General Registers */ +#define IXGBE_CTRL 0x00000 +#define IXGBE_STATUS 0x00008 +#define IXGBE_CTRL_EXT 0x00018 +#define IXGBE_ESDP 0x00020 +#define IXGBE_EODSDP 0x00028 +#define IXGBE_I2CCTL_BY_MAC(_hw)((((_hw)->mac.type >= ixgbe_mac_X550) ? \ + 0x15F5C : 0x00028)) +#define IXGBE_LEDCTL 0x00200 +#define IXGBE_FRTIMER 0x00048 +#define IXGBE_TCPTIMER 0x0004C +#define IXGBE_CORESPARE 0x00600 +#define IXGBE_EXVET 0x05078 + +/* NVM Registers */ +#define IXGBE_EEC 0x10010 +#define IXGBE_EERD 0x10014 +#define IXGBE_EEWR 0x10018 +#define IXGBE_FLA 0x1001C +#define IXGBE_EEMNGCTL 0x10110 +#define IXGBE_EEMNGDATA 0x10114 +#define IXGBE_FLMNGCTL 0x10118 +#define IXGBE_FLMNGDATA 0x1011C +#define IXGBE_FLMNGCNT 0x10120 +#define IXGBE_FLOP 0x1013C +#define IXGBE_GRC 0x10200 + +/* General Receive Control */ +#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ +#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ + +#define IXGBE_VPDDIAG0 0x10204 +#define IXGBE_VPDDIAG1 0x10208 + +/* I2CCTL Bit Masks */ +#define IXGBE_I2C_CLK_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ + 0x00004000 : 0x00000001) +#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ + 0x00000200 : 0x00000002) +#define IXGBE_I2C_DATA_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ + 0x00001000 : 0x00000004) +#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ + 0x00000400 : 0x00000008) +#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500 + +#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define IXGBE_EMC_INTERNAL_DATA 0x00 +#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20 +#define IXGBE_EMC_DIODE1_DATA 0x01 +#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19 +#define IXGBE_EMC_DIODE2_DATA 0x23 +#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A + +#define IXGBE_MAX_SENSORS 3 + +struct ixgbe_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; +}; + +struct ixgbe_thermal_sensor_data { + struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS]; +}; + +/* Interrupt Registers */ +#define IXGBE_EICR 0x00800 +#define IXGBE_EICS 0x00808 +#define IXGBE_EIMS 0x00880 +#define IXGBE_EIMC 0x00888 +#define IXGBE_EIAC 0x00810 +#define IXGBE_EIAM 0x00890 +#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4) +#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4) +#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4) +#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4) +/* + * 82598 EITR is 16 bits but set the limits based on the max + * supported by all ixgbe hardware. 82599 EITR is only 12 bits, + * with the lower 3 always zero. + */ +#define IXGBE_MAX_INT_RATE 488281 +#define IXGBE_MIN_INT_RATE 956 +#define IXGBE_MAX_EITR 0x00000FF8 +#define IXGBE_MIN_EITR 8 +#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ + (0x012300 + (((_i) - 24) * 4))) +#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 +#define IXGBE_EITR_LLI_MOD 0x00008000 +#define IXGBE_EITR_CNT_WDIS 0x80000000 +#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ +#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */ +#define IXGBE_EITRSEL 0x00894 +#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ +#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ +#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) +#define IXGBE_GPIE 0x00898 + +/* Flow Control Registers */ +#define IXGBE_FCADBUL 0x03210 +#define IXGBE_FCADBUH 0x03214 +#define IXGBE_FCAMACL 0x04328 +#define IXGBE_FCAMACH 0x0432C +#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_PFCTOP 0x03008 +#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTV 0x032A0 +#define IXGBE_FCCFG 0x03D00 +#define IXGBE_TFCS 0x0CE00 + +/* Receive DMA Registers */ +#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ + (0x0D000 + (((_i) - 64) * 0x40))) +#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ + (0x0D004 + (((_i) - 64) * 0x40))) +#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ + (0x0D008 + (((_i) - 64) * 0x40))) +#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ + (0x0D010 + (((_i) - 64) * 0x40))) +#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ + (0x0D018 + (((_i) - 64) * 0x40))) +#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ + (0x0D028 + (((_i) - 64) * 0x40))) +#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ + (0x0D02C + (((_i) - 64) * 0x40))) +#define IXGBE_RSCDBU 0x03028 +#define IXGBE_RDDCC 0x02F20 +#define IXGBE_RXMEMWRAP 0x03190 +#define IXGBE_STARCTRL 0x03024 +/* + * Split and Replication Receive Control Registers + * 00-15 : 0x02100 + n*4 + * 16-64 : 0x01014 + n*0x40 + * 64-127: 0x0D014 + (n-64)*0x40 + */ +#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ + (0x0D014 + (((_i) - 64) * 0x40)))) +/* + * Rx DCA Control Register: + * 00-15 : 0x02200 + n*4 + * 16-64 : 0x0100C + n*0x40 + * 64-127: 0x0D00C + (n-64)*0x40 + */ +#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ + (0x0D00C + (((_i) - 64) * 0x40)))) +#define IXGBE_RDRXCTL 0x02F00 +#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) + /* 8 of these 0x03C00 - 0x03C1C */ +#define IXGBE_RXCTRL 0x03000 +#define IXGBE_DROPEN 0x03D04 +#define IXGBE_RXPBSIZE_SHIFT 10 + +/* Receive Registers */ +#define IXGBE_RXCSUM 0x05000 +#define IXGBE_RFCTL 0x05008 +#define IXGBE_DRECCCTL 0x02F08 +#define IXGBE_DRECCCTL_DISABLE 0 +/* Multicast Table Array - 128 entries */ +#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) +#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x0A200 + ((_i) * 8))) +#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x0A204 + ((_i) * 8))) +#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) +#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) +/* Packet split receive type */ +#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ + (0x0EA00 + ((_i) * 4))) +/* array of 4096 1-bit vlan filters */ +#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) +/*array of 4096 4-bit vlan vmdq indices */ +#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) +#define IXGBE_FCTRL 0x05080 +#define IXGBE_VLNCTRL 0x05088 +#define IXGBE_MCSTCTRL 0x05090 +#define IXGBE_MRQC 0x05818 +#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */ +#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */ +#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */ +#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */ +#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */ +#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */ +#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */ +#define IXGBE_RQTC 0x0EC70 +#define IXGBE_MTQC 0x08120 +#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ +#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_PFFLPL 0x050B0 +#define IXGBE_PFFLPH 0x050B4 +#define IXGBE_VT_CTL 0x051B0 +#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */ +#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */ +#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */ +#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */ +#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) +#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) +#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4)) +#define IXGBE_QDE 0x2F04 +#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */ +#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ +#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) +#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) +#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) +#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) +#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ +#define IXGBE_RXFECCERR0 0x051B8 +#define IXGBE_LLITHRESH 0x0EC90 +#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIRVP 0x05AC0 +#define IXGBE_VMD_CTL 0x0581C +#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ +#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */ +#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ + +/* Registers for setting up RSS on X550 with SRIOV + * _p - pool number (0..63) + * _i - index (0..10 for PFVFRSSRK, 0..15 for PFVFRETA) + */ +#define IXGBE_PFVFMRQC(_p) (0x03400 + ((_p) * 4)) +#define IXGBE_PFVFRSSRK(_i, _p) (0x018000 + ((_i) * 4) + ((_p) * 0x40)) +#define IXGBE_PFVFRETA(_i, _p) (0x019000 + ((_i) * 4) + ((_p) * 0x40)) + +/* Flow Director registers */ +#define IXGBE_FDIRCTRL 0x0EE00 +#define IXGBE_FDIRHKEY 0x0EE68 +#define IXGBE_FDIRSKEY 0x0EE6C +#define IXGBE_FDIRDIP4M 0x0EE3C +#define IXGBE_FDIRSIP4M 0x0EE40 +#define IXGBE_FDIRTCPM 0x0EE44 +#define IXGBE_FDIRUDPM 0x0EE48 +#define IXGBE_FDIRIP6M 0x0EE74 +#define IXGBE_FDIRM 0x0EE70 + +/* Flow Director Stats registers */ +#define IXGBE_FDIRFREE 0x0EE38 +#define IXGBE_FDIRLEN 0x0EE4C +#define IXGBE_FDIRUSTAT 0x0EE50 +#define IXGBE_FDIRFSTAT 0x0EE54 +#define IXGBE_FDIRMATCH 0x0EE58 +#define IXGBE_FDIRMISS 0x0EE5C + +/* Flow Director Programming registers */ +#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */ +#define IXGBE_FDIRIPSA 0x0EE18 +#define IXGBE_FDIRIPDA 0x0EE1C +#define IXGBE_FDIRPORT 0x0EE20 +#define IXGBE_FDIRVLAN 0x0EE24 +#define IXGBE_FDIRHASH 0x0EE28 +#define IXGBE_FDIRCMD 0x0EE2C + +/* Transmit DMA registers */ +#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/ +#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) +#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) +#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) +#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40)) +#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) +#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) +#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) +#define IXGBE_DTXCTL 0x07E00 + +#define IXGBE_DMATXCTL 0x04A80 +#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */ +#define IXGBE_PFDTXGSWC 0x08220 +#define IXGBE_DTXMXSZRQ 0x08100 +#define IXGBE_DTXTCPFLGL 0x04A88 +#define IXGBE_DTXTCPFLGH 0x04A8C +#define IXGBE_LBDRPEN 0x0CA00 +#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */ + +#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ +#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ +#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ +#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ + +#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ + +/* Anti-spoofing defines */ +#define IXGBE_SPOOF_MACAS_MASK 0xFF +#define IXGBE_SPOOF_VLANAS_MASK 0xFF00 +#define IXGBE_SPOOF_VLANAS_SHIFT 8 +#define IXGBE_SPOOF_ETHERTYPEAS 0xFF000000 +#define IXGBE_SPOOF_ETHERTYPEAS_SHIFT 16 +#define IXGBE_PFVFSPOOF_REG_COUNT 8 + +#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ +/* Tx DCA Control register : 128 of these (0-127) */ +#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) +#define IXGBE_TIPG 0x0CB00 +#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_MNGTXMAP 0x0CD10 +#define IXGBE_TIPG_FIBER_DEFAULT 3 +#define IXGBE_TXPBSIZE_SHIFT 10 + +/* Wake up registers */ +#define IXGBE_WUC 0x05800 +#define IXGBE_WUFC 0x05808 +#define IXGBE_WUS 0x05810 +#define IXGBE_IPAV 0x05838 +#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ +#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ + +#define IXGBE_WUPL 0x05900 +#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ +#define IXGBE_VXLANCTRL 0x0000507C /* Rx filter VXLAN UDPPORT Register */ +#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */ +#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host + * Filter Table */ + +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 +#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128 +#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */ +#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */ + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ +#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */ + +/* Wake Up Filter Control */ +#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */ + +#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */ +#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ +#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ +#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */ +#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */ +#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ + +/* Wake Up Status */ +#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC +#define IXGBE_WUS_MAG IXGBE_WUFC_MAG +#define IXGBE_WUS_EX IXGBE_WUFC_EX +#define IXGBE_WUS_MC IXGBE_WUFC_MC +#define IXGBE_WUS_BC IXGBE_WUFC_BC +#define IXGBE_WUS_ARP IXGBE_WUFC_ARP +#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4 +#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6 +#define IXGBE_WUS_MNG IXGBE_WUFC_MNG +#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0 +#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1 +#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2 +#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3 +#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4 +#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5 +#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS + +/* Wake Up Packet Length */ +#define IXGBE_WUPL_LENGTH_MASK 0xFFFF + +/* DCB registers */ +#define MAX_TRAFFIC_CLASS 8 +#define X540_TRAFFIC_CLASS 4 +#define IXGBE_RMCS 0x03D00 +#define IXGBE_DPMCS 0x07F40 +#define IXGBE_PDPMCS 0x0CD00 +#define IXGBE_RUPPBMR 0x050A0 +#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ + + +/* Security Control Registers */ +#define IXGBE_SECTXCTRL 0x08800 +#define IXGBE_SECTXSTAT 0x08804 +#define IXGBE_SECTXBUFFAF 0x08808 +#define IXGBE_SECTXMINIFG 0x08810 +#define IXGBE_SECRXCTRL 0x08D00 +#define IXGBE_SECRXSTAT 0x08D04 + +/* Security Bit Fields and Masks */ +#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001 +#define IXGBE_SECTXCTRL_TX_DIS 0x00000002 +#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004 + +#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001 +#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002 + +#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001 +#define IXGBE_SECRXCTRL_RX_DIS 0x00000002 + +#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001 +#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002 + +/* LinkSec (MacSec) Registers */ +#define IXGBE_LSECTXCAP 0x08A00 +#define IXGBE_LSECRXCAP 0x08F00 +#define IXGBE_LSECTXCTRL 0x08A04 +#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */ +#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */ +#define IXGBE_LSECTXSA 0x08A10 +#define IXGBE_LSECTXPN0 0x08A14 +#define IXGBE_LSECTXPN1 0x08A18 +#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECRXCTRL 0x08F04 +#define IXGBE_LSECRXSCL 0x08F08 +#define IXGBE_LSECRXSCH 0x08F0C +#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m)))) +#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */ +#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */ +#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */ +#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */ +#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */ +#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */ +#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */ +#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */ +#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */ +#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */ +#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */ +#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */ +#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */ +#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */ +#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */ +#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */ +#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */ +#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */ +#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */ + +/* LinkSec (MacSec) Bit Fields and Masks */ +#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000 +#define IXGBE_LSECTXCAP_SUM_SHIFT 16 +#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000 +#define IXGBE_LSECRXCAP_SUM_SHIFT 16 + +#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003 +#define IXGBE_LSECTXCTRL_DISABLE 0x0 +#define IXGBE_LSECTXCTRL_AUTH 0x1 +#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2 +#define IXGBE_LSECTXCTRL_AISCI 0x00000020 +#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 +#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8 + +#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C +#define IXGBE_LSECRXCTRL_EN_SHIFT 2 +#define IXGBE_LSECRXCTRL_DISABLE 0x0 +#define IXGBE_LSECRXCTRL_CHECK 0x1 +#define IXGBE_LSECRXCTRL_STRICT 0x2 +#define IXGBE_LSECRXCTRL_DROP 0x3 +#define IXGBE_LSECRXCTRL_PLSH 0x00000040 +#define IXGBE_LSECRXCTRL_RP 0x00000080 +#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33 + +/* IpSec Registers */ +#define IXGBE_IPSTXIDX 0x08900 +#define IXGBE_IPSTXSALT 0x08904 +#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXIDX 0x08E00 +#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSPI 0x08E14 +#define IXGBE_IPSRXIPIDX 0x08E18 +#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSALT 0x08E2C +#define IXGBE_IPSRXMOD 0x08E30 + +#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 + +/* DCB registers */ +#define IXGBE_RTRPCS 0x02430 +#define IXGBE_RTTDCS 0x04900 +#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +#define IXGBE_RTTPCS 0x0CD00 +#define IXGBE_RTRUP2TC 0x03020 +#define IXGBE_RTTUP2TC 0x0C800 +#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDQSEL 0x04904 +#define IXGBE_RTTDT1C 0x04908 +#define IXGBE_RTTDT1S 0x0490C +#define IXGBE_RTTQCNCR 0x08B00 +#define IXGBE_RTTQCNTG 0x04A90 +#define IXGBE_RTTBCNRD 0x0498C +#define IXGBE_RTTQCNRR 0x0498C +#define IXGBE_RTTDTECC 0x04990 +#define IXGBE_RTTDTECC_NO_BCN 0x00000100 +#define IXGBE_RTTBCNRC 0x04984 +#define IXGBE_RTTBCNRC_RS_ENA 0x80000000 +#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14 +#define IXGBE_RTTBCNRC_RF_INT_MASK \ + (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) +#define IXGBE_RTTBCNRM 0x04980 +#define IXGBE_RTTQCNRM 0x04980 + +/* FCoE Direct DMA Context */ +#define IXGBE_FCDDC(_i, _j) (0x20000 + ((_i) * 0x4) + ((_j) * 0x10)) +/* FCoE DMA Context Registers */ +#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ +#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ +#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ +#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ +#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */ +#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4)) +#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */ +#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */ +#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */ +#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ +#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ +#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 +#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 +#define IXGBE_FCBUFF_OFFSET_SHIFT 16 +#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */ +#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */ +#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ +#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ +#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 + +/* FCoE SOF/EOF */ +#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */ +#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ +#define IXGBE_REOFF 0x05158 /* Rx FC EOF */ +#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ +/* FCoE Direct Filter Context */ +#define IXGBE_FCDFC(_i, _j) (0x28000 + ((_i) * 0x4) + ((_j) * 0x10)) +#define IXGBE_FCDFCD(_i) (0x30000 + ((_i) * 0x4)) +/* FCoE Filter Context Registers */ +#define IXGBE_FCFLT 0x05108 /* FC FLT Context */ +#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ +#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ +#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */ +#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */ +#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ +#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ +#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */ +#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */ +#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */ +/* FCoE Receive Control */ +#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ +#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */ +#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */ +#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */ +#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */ +#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */ +#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */ +#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */ +#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */ +#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ +#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 +/* FCoE Redirection */ +#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */ +#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */ +#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */ +#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ +#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ +#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ +#define IXGBE_FCRETA_SIZE_X550 32 /* Max entries in FCRETA */ +/* Higher 7 bits for the queue index */ +#define IXGBE_FCRETA_ENTRY_HIGH_MASK 0x007F0000 +#define IXGBE_FCRETA_ENTRY_HIGH_SHIFT 16 + +/* Stats registers */ +#define IXGBE_CRCERRS 0x04000 +#define IXGBE_ILLERRC 0x04004 +#define IXGBE_ERRBC 0x04008 +#define IXGBE_MSPDC 0x04010 +#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/ +#define IXGBE_MLFC 0x04034 +#define IXGBE_MRFC 0x04038 +#define IXGBE_RLEC 0x04040 +#define IXGBE_LXONTXC 0x03F60 +#define IXGBE_LXONRXC 0x0CF60 +#define IXGBE_LXOFFTXC 0x03F68 +#define IXGBE_LXOFFRXC 0x0CF68 +#define IXGBE_LXONRXCNT 0x041A4 +#define IXGBE_LXOFFRXCNT 0x041A8 +#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/ +#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/ +#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/ +#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/ +#define IXGBE_PRC64 0x0405C +#define IXGBE_PRC127 0x04060 +#define IXGBE_PRC255 0x04064 +#define IXGBE_PRC511 0x04068 +#define IXGBE_PRC1023 0x0406C +#define IXGBE_PRC1522 0x04070 +#define IXGBE_GPRC 0x04074 +#define IXGBE_BPRC 0x04078 +#define IXGBE_MPRC 0x0407C +#define IXGBE_GPTC 0x04080 +#define IXGBE_GORCL 0x04088 +#define IXGBE_GORCH 0x0408C +#define IXGBE_GOTCL 0x04090 +#define IXGBE_GOTCH 0x04094 +#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/ +#define IXGBE_RUC 0x040A4 +#define IXGBE_RFC 0x040A8 +#define IXGBE_ROC 0x040AC +#define IXGBE_RJC 0x040B0 +#define IXGBE_MNGPRC 0x040B4 +#define IXGBE_MNGPDC 0x040B8 +#define IXGBE_MNGPTC 0x0CF90 +#define IXGBE_TORL 0x040C0 +#define IXGBE_TORH 0x040C4 +#define IXGBE_TPR 0x040D0 +#define IXGBE_TPT 0x040D4 +#define IXGBE_PTC64 0x040D8 +#define IXGBE_PTC127 0x040DC +#define IXGBE_PTC255 0x040E0 +#define IXGBE_PTC511 0x040E4 +#define IXGBE_PTC1023 0x040E8 +#define IXGBE_PTC1522 0x040EC +#define IXGBE_MPTC 0x040F0 +#define IXGBE_BPTC 0x040F4 +#define IXGBE_XEC 0x04120 +#define IXGBE_SSVPC 0x08780 + +#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) +#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ + (0x08600 + ((_i) * 4))) +#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4)) + +#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_FCCRC 0x05118 /* Count of Good Eth CRC w/ Bad FC CRC */ +#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */ +#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */ +#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */ +#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ +#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ +#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ +#define IXGBE_O2BGPTC 0x041C4 +#define IXGBE_O2BSPC 0x087B0 +#define IXGBE_B2OSPC 0x041C0 +#define IXGBE_B2OGPRC 0x02F90 +#define IXGBE_PCRC8ECL 0x0E810 +#define IXGBE_PCRC8ECH 0x0E811 +#define IXGBE_PCRC8ECH_MASK 0x1F +#define IXGBE_LDPCECL 0x0E820 +#define IXGBE_LDPCECH 0x0E821 + +/* MII clause 22/28 definitions */ +#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 + +#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register */ +#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */ + +#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */ + +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s H Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s F Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */ + +/* Management */ +#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MANC 0x05820 +#define IXGBE_MFVAL 0x05824 +#define IXGBE_MANC2H 0x05860 +#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MIPAF 0x058B0 +#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */ +#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_LSWFW 0x15014 + +/* Management Bit Fields and Masks */ +#define IXGBE_MANC_RCV_TCO_EN 0x00020000 /* Rcv TCO packet enable */ + +/* Firmware Semaphore Register */ +#define IXGBE_FWSM_MODE_MASK 0xE +#define IXGBE_FWSM_FW_MODE_PT 0x4 + +/* ARC Subsystem registers */ +#define IXGBE_HICR 0x15F00 +#define IXGBE_FWSTS 0x15F0C +#define IXGBE_HSMC0R 0x15F04 +#define IXGBE_HSMC1R 0x15F08 +#define IXGBE_SWSR 0x15F10 +#define IXGBE_HFDR 0x15FE8 +#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */ + +#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define IXGBE_HICR_C 0x02 +#define IXGBE_HICR_SV 0x04 /* Status Validity */ +#define IXGBE_HICR_FW_RESET_ENABLE 0x40 +#define IXGBE_HICR_FW_RESET 0x80 + +/* PCI-E registers */ +#define IXGBE_GCR 0x11000 +#define IXGBE_GTV 0x11004 +#define IXGBE_FUNCTAG 0x11008 +#define IXGBE_GLT 0x1100C +#define IXGBE_GSCL_1 0x11010 +#define IXGBE_GSCL_2 0x11014 +#define IXGBE_GSCL_3 0x11018 +#define IXGBE_GSCL_4 0x1101C +#define IXGBE_GSCN_0 0x11020 +#define IXGBE_GSCN_1 0x11024 +#define IXGBE_GSCN_2 0x11028 +#define IXGBE_GSCN_3 0x1102C +#define IXGBE_FACTPS 0x10150 +#define IXGBE_PCIEANACTL 0x11040 +#define IXGBE_SWSM 0x10140 +#define IXGBE_FWSM 0x10148 +#define IXGBE_GSSR 0x10160 +#define IXGBE_MREVID 0x11064 +#define IXGBE_DCA_ID 0x11070 +#define IXGBE_DCA_CTRL 0x11074 +#define IXGBE_SWFW_SYNC IXGBE_GSSR + +/* PCIe registers 82599-specific */ +#define IXGBE_GCR_EXT 0x11050 +#define IXGBE_GSCL_5_82599 0x11030 +#define IXGBE_GSCL_6_82599 0x11034 +#define IXGBE_GSCL_7_82599 0x11038 +#define IXGBE_GSCL_8_82599 0x1103C +#define IXGBE_PHYADR_82599 0x11040 +#define IXGBE_PHYDAT_82599 0x11044 +#define IXGBE_PHYCTL_82599 0x11048 +#define IXGBE_PBACLR_82599 0x11068 +#define IXGBE_CIAA_82599 0x11088 +#define IXGBE_CIAD_82599 0x1108C +#define IXGBE_CIAA_X550 0x11508 +#define IXGBE_CIAD_X550 0x11510 +#define IXGBE_CIAA_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \ + IXGBE_CIAA_X550 : IXGBE_CIAA_82599)) +#define IXGBE_CIAD_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \ + IXGBE_CIAD_X550 : IXGBE_CIAD_82599)) +#define IXGBE_PICAUSE 0x110B0 +#define IXGBE_PIENA 0x110B8 +#define IXGBE_CDQ_MBR_82599 0x110B4 +#define IXGBE_PCIESPARE 0x110BC +#define IXGBE_MISC_REG_82599 0x110F0 +#define IXGBE_ECC_CTRL_0_82599 0x11100 +#define IXGBE_ECC_CTRL_1_82599 0x11104 +#define IXGBE_ECC_STATUS_82599 0x110E0 +#define IXGBE_BAR_CTRL_82599 0x110F4 + +/* PCI Express Control */ +#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000 +#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define IXGBE_GCR_CAP_VER2 0x00040000 + +#define IXGBE_GCR_EXT_MSIX_EN 0x80000000 +#define IXGBE_GCR_EXT_BUFFERS_CLEAR 0x40000000 +#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001 +#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 +#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 +#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ + IXGBE_GCR_EXT_VT_MODE_64) + +/* Time Sync Registers */ +#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ +#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ +#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */ +#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */ +#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */ +#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */ +#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */ +#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */ +#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */ +#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ +#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ +#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ +#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */ +#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */ +#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */ +#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */ +#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */ +#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */ +#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */ +#define IXGBE_CLKTIML 0x08C34 /* Clock Out Time Register Low - RW */ +#define IXGBE_CLKTIMH 0x08C38 /* Clock Out Time Register High - RW */ +#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */ +#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */ +#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ +#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ +#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ +#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ + +/* Diagnostic Registers */ +#define IXGBE_RDSTATCTL 0x02C20 +#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ +#define IXGBE_RDHMPN 0x02F08 +#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) +#define IXGBE_RDPROBE 0x02F20 +#define IXGBE_RDMAM 0x02F30 +#define IXGBE_RDMAD 0x02F34 +#define IXGBE_TDSTATCTL 0x07C20 +#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ +#define IXGBE_TDHMPN 0x07F08 +#define IXGBE_TDHMPN2 0x082FC +#define IXGBE_TXDESCIC 0x082CC +#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) +#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) +#define IXGBE_TDPROBE 0x07F20 +#define IXGBE_TXBUFCTRL 0x0C600 +#define IXGBE_TXBUFDATA0 0x0C610 +#define IXGBE_TXBUFDATA1 0x0C614 +#define IXGBE_TXBUFDATA2 0x0C618 +#define IXGBE_TXBUFDATA3 0x0C61C +#define IXGBE_RXBUFCTRL 0x03600 +#define IXGBE_RXBUFDATA0 0x03610 +#define IXGBE_RXBUFDATA1 0x03614 +#define IXGBE_RXBUFDATA2 0x03618 +#define IXGBE_RXBUFDATA3 0x0361C +#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_RFVAL 0x050A4 +#define IXGBE_MDFTC1 0x042B8 +#define IXGBE_MDFTC2 0x042C0 +#define IXGBE_MDFTFIFO1 0x042C4 +#define IXGBE_MDFTFIFO2 0x042C8 +#define IXGBE_MDFTS 0x042CC +#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/ +#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/ +#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/ +#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/ +#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/ +#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/ +#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ +#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ +#define IXGBE_PCIEECCCTL 0x1106C +#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/ +#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/ +#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/ +#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/ +#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/ +#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/ +#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/ +#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/ +#define IXGBE_PCIEECCCTL0 0x11100 +#define IXGBE_PCIEECCCTL1 0x11104 +#define IXGBE_RXDBUECC 0x03F70 +#define IXGBE_TXDBUECC 0x0CF70 +#define IXGBE_RXDBUEST 0x03F74 +#define IXGBE_TXDBUEST 0x0CF74 +#define IXGBE_PBTXECC 0x0C300 +#define IXGBE_PBRXECC 0x03300 +#define IXGBE_GHECCR 0x110B0 + +/* MAC Registers */ +#define IXGBE_PCS1GCFIG 0x04200 +#define IXGBE_PCS1GLCTL 0x04208 +#define IXGBE_PCS1GLSTA 0x0420C +#define IXGBE_PCS1GDBG0 0x04210 +#define IXGBE_PCS1GDBG1 0x04214 +#define IXGBE_PCS1GANA 0x04218 +#define IXGBE_PCS1GANLP 0x0421C +#define IXGBE_PCS1GANNP 0x04220 +#define IXGBE_PCS1GANLPNP 0x04224 +#define IXGBE_HLREG0 0x04240 +#define IXGBE_HLREG1 0x04244 +#define IXGBE_PAP 0x04248 +#define IXGBE_MACA 0x0424C +#define IXGBE_APAE 0x04250 +#define IXGBE_ARD 0x04254 +#define IXGBE_AIS 0x04258 +#define IXGBE_MSCA 0x0425C +#define IXGBE_MSRWD 0x04260 +#define IXGBE_MLADD 0x04264 +#define IXGBE_MHADD 0x04268 +#define IXGBE_MAXFRS 0x04268 +#define IXGBE_TREG 0x0426C +#define IXGBE_PCSS1 0x04288 +#define IXGBE_PCSS2 0x0428C +#define IXGBE_XPCSS 0x04290 +#define IXGBE_MFLCN 0x04294 +#define IXGBE_SERDESC 0x04298 +#define IXGBE_MACS 0x0429C +#define IXGBE_AUTOC 0x042A0 +#define IXGBE_LINKS 0x042A4 +#define IXGBE_LINKS2 0x04324 +#define IXGBE_AUTOC2 0x042A8 +#define IXGBE_AUTOC3 0x042AC +#define IXGBE_ANLP1 0x042B0 +#define IXGBE_ANLP2 0x042B4 +#define IXGBE_MACC 0x04330 +#define IXGBE_ATLASCTL 0x04800 +#define IXGBE_MMNGC 0x042D0 +#define IXGBE_ANLPNP1 0x042D4 +#define IXGBE_ANLPNP2 0x042D8 +#define IXGBE_KRPCSFC 0x042E0 +#define IXGBE_KRPCSS 0x042E4 +#define IXGBE_FECS1 0x042E8 +#define IXGBE_FECS2 0x042EC +#define IXGBE_SMADARCTL 0x14F10 +#define IXGBE_MPVC 0x04318 +#define IXGBE_SGMIIC 0x04314 + +/* Statistics Registers */ +#define IXGBE_RXNFGPC 0x041B0 +#define IXGBE_RXNFGBCL 0x041B4 +#define IXGBE_RXNFGBCH 0x041B8 +#define IXGBE_RXDGPC 0x02F50 +#define IXGBE_RXDGBCL 0x02F54 +#define IXGBE_RXDGBCH 0x02F58 +#define IXGBE_RXDDGPC 0x02F5C +#define IXGBE_RXDDGBCL 0x02F60 +#define IXGBE_RXDDGBCH 0x02F64 +#define IXGBE_RXLPBKGPC 0x02F68 +#define IXGBE_RXLPBKGBCL 0x02F6C +#define IXGBE_RXLPBKGBCH 0x02F70 +#define IXGBE_RXDLPBKGPC 0x02F74 +#define IXGBE_RXDLPBKGBCL 0x02F78 +#define IXGBE_RXDLPBKGBCH 0x02F7C +#define IXGBE_TXDGPC 0x087A0 +#define IXGBE_TXDGBCL 0x087A4 +#define IXGBE_TXDGBCH 0x087A8 + +#define IXGBE_RXDSTATCTRL 0x02F40 + +/* Copper Pond 2 link timeout */ +#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50 + +/* Omer CORECTL */ +#define IXGBE_CORECTL 0x014F00 +/* BARCTRL */ +#define IXGBE_BARCTRL 0x110F4 +#define IXGBE_BARCTRL_FLSIZE 0x0700 +#define IXGBE_BARCTRL_FLSIZE_SHIFT 8 +#define IXGBE_BARCTRL_CSRSIZE 0x2000 + +/* RSCCTL Bit Masks */ +#define IXGBE_RSCCTL_RSCEN 0x01 +#define IXGBE_RSCCTL_MAXDESC_1 0x00 +#define IXGBE_RSCCTL_MAXDESC_4 0x04 +#define IXGBE_RSCCTL_MAXDESC_8 0x08 +#define IXGBE_RSCCTL_MAXDESC_16 0x0C + +/* RSCDBU Bit Masks */ +#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F +#define IXGBE_RSCDBU_RSCACKDIS 0x00000080 + +/* RDRXCTL Bit Masks */ +#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */ +#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */ +#define IXGBE_RDRXCTL_MVMEN 0x00000020 +#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ +#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ +#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */ +#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */ +#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */ +#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */ + +/* RQTC Bit Masks and Shifts */ +#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) +#define IXGBE_RQTC_TC0_MASK (0x7 << 0) +#define IXGBE_RQTC_TC1_MASK (0x7 << 4) +#define IXGBE_RQTC_TC2_MASK (0x7 << 8) +#define IXGBE_RQTC_TC3_MASK (0x7 << 12) +#define IXGBE_RQTC_TC4_MASK (0x7 << 16) +#define IXGBE_RQTC_TC5_MASK (0x7 << 20) +#define IXGBE_RQTC_TC6_MASK (0x7 << 24) +#define IXGBE_RQTC_TC7_MASK (0x7 << 28) + +/* PSRTYPE.RQPL Bit masks and shift */ +#define IXGBE_PSRTYPE_RQPL_MASK 0x7 +#define IXGBE_PSRTYPE_RQPL_SHIFT 29 + +/* CTRL Bit Masks */ +#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ +#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ +#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ +#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST) + +/* FACTPS */ +#define IXGBE_FACTPS_MNGCG 0x20000000 /* Manageblility Clock Gated */ +#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ + +/* MHADD Bit Masks */ +#define IXGBE_MHADD_MFS_MASK 0xFFFF0000 +#define IXGBE_MHADD_MFS_SHIFT 16 + +/* Extended Device Control */ +#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */ +#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */ +#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ + +/* Direct Cache Access (DCA) definitions */ +#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ +#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ +#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ + +#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ +#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ + +/* MSCA Bit Masks */ +#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Address (new protocol) */ +#define IXGBE_MSCA_NP_ADDR_SHIFT 0 +#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Device Type (new protocol) */ +#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old protocol */ +#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */ +#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/ +#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */ +#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ +#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ +#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */ +#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (read) */ +#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (read, auto inc)*/ +#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ +#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ +#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */ +#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old protocol) */ +#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */ +#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */ + +/* MSRWD bit masks */ +#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF +#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 +#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 +#define IXGBE_MSRWD_READ_DATA_SHIFT 16 + +/* Atlas registers */ +#define IXGBE_ATLAS_PDN_LPBK 0x24 +#define IXGBE_ATLAS_PDN_10G 0xB +#define IXGBE_ATLAS_PDN_1G 0xC +#define IXGBE_ATLAS_PDN_AN 0xD + +/* Atlas bit masks */ +#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000 +#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10 +#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 + +/* Omer bit masks */ +#define IXGBE_CORECTL_WRITE_CMD 0x00010000 + +/* MDIO definitions */ + +#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 +#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 +#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 +#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ +#define IXGBE_TWINAX_DEV 1 + +#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ + +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0 - 10G, 1 - 1G */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 + +#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ +#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ +#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */ + +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */ +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */ +#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */ +#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */ + +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Stat Reg */ +#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Tx Dis Reg */ +#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Tx Dis */ + +/* MII clause 22/28 definitions */ +#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define IXGBE_MII_AUTONEG_REG 0x0 + +#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 +#define IXGBE_MAX_PHY_ADDR 32 + +/* PHY IDs*/ +#define TN1010_PHY_ID 0x00A19410 +#define TNX_FW_REV 0xB +#define X540_PHY_ID 0x01540200 +#define QT2022_PHY_ID 0x0043A400 +#define ATH_PHY_ID 0x03429050 +#define AQ_FW_REV 0x20 + +/* PHY Types */ +#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 + +/* Special PHY Init Routine */ +#define IXGBE_PHY_INIT_OFFSET_NL 0x002B +#define IXGBE_PHY_INIT_END_NL 0xFFFF +#define IXGBE_CONTROL_MASK_NL 0xF000 +#define IXGBE_DATA_MASK_NL 0x0FFF +#define IXGBE_CONTROL_SHIFT_NL 12 +#define IXGBE_DELAY_NL 0 +#define IXGBE_DATA_NL 1 +#define IXGBE_CONTROL_NL 0x000F +#define IXGBE_CONTROL_EOL_NL 0x0FFF +#define IXGBE_CONTROL_SOL_NL 0x0000 + +/* General purpose Interrupt Enable */ +#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ +#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ +#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */ +#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ +#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ +#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ +#define IXGBE_GPIE_EIAME 0x40000000 +#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 +#define IXGBE_GPIE_RSC_DELAY_SHIFT 11 +#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */ +#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */ +#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */ +#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */ + +/* Packet Buffer Initialization */ +#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */ +#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ +#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ +#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ +#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ +#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */ +#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer*/ +#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer*/ + +#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ +#define IXGBE_MAX_PB 8 + +/* Packet buffer allocation strategies */ +enum { + PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ +#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL + PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ +#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED +}; + +/* Transmit Flow Control status */ +#define IXGBE_TFCS_TXOFF 0x00000001 +#define IXGBE_TFCS_TXOFF0 0x00000100 +#define IXGBE_TFCS_TXOFF1 0x00000200 +#define IXGBE_TFCS_TXOFF2 0x00000400 +#define IXGBE_TFCS_TXOFF3 0x00000800 +#define IXGBE_TFCS_TXOFF4 0x00001000 +#define IXGBE_TFCS_TXOFF5 0x00002000 +#define IXGBE_TFCS_TXOFF6 0x00004000 +#define IXGBE_TFCS_TXOFF7 0x00008000 + +/* TCP Timer */ +#define IXGBE_TCPTIMER_KS 0x00000100 +#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200 +#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400 +#define IXGBE_TCPTIMER_LOOP 0x00000800 +#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF + +/* HLREG0 Bit Masks */ +#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */ +#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */ +#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */ +#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */ +#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */ +#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */ +#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */ +#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */ +#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */ +#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */ +#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */ +#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */ +#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */ +#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */ +#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */ + +/* VMD_CTL bitmasks */ +#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001 +#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002 + +/* VT_CTL bitmasks */ +#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */ +#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */ +#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */ +#define IXGBE_VT_CTL_POOL_SHIFT 7 +#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) + +/* VMOLR bitmasks */ +#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ +#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ +#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ +#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */ +#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */ + +/* VFRE bitmask */ +#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF + +#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +/* RDHMPN and TDHMPN bitmasks */ +#define IXGBE_RDHMPN_RDICADDR 0x007FF800 +#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 +#define IXGBE_RDHMPN_RDICADDR_SHIFT 11 +#define IXGBE_TDHMPN_TDICADDR 0x003FF800 +#define IXGBE_TDHMPN_TDICRDREQ 0x00800000 +#define IXGBE_TDHMPN_TDICADDR_SHIFT 11 + +#define IXGBE_RDMAM_MEM_SEL_SHIFT 13 +#define IXGBE_RDMAM_DWORD_SHIFT 9 +#define IXGBE_RDMAM_DESC_COMP_FIFO 1 +#define IXGBE_RDMAM_DFC_CMD_FIFO 2 +#define IXGBE_RDMAM_TCN_STATUS_RAM 4 +#define IXGBE_RDMAM_WB_COLL_FIFO 5 +#define IXGBE_RDMAM_QSC_CNT_RAM 6 +#define IXGBE_RDMAM_QSC_QUEUE_CNT 8 +#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA +#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135 +#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4 +#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48 +#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7 +#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256 +#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9 +#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8 +#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4 +#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64 +#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8 + +#define IXGBE_TXDESCIC_READY 0x80000000 + +/* Receive Checksum Control */ +#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* FCRTL Bit Masks */ +#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */ +#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */ + +/* PAP bit masks*/ +#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ + +/* RMCS Bit Masks */ +#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */ +/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ +#define IXGBE_RMCS_RAC 0x00000004 +#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */ +#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */ +#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */ +#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ + +/* FCCFG Bit Masks */ +#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */ +#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */ + +/* Interrupt register bitmasks */ + +/* Extended Interrupt Cause Read */ +#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ +#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */ +#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */ +#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */ +#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ +#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ +#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ +#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ +#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ +#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */ +#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ +#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */ +#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */ +#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ +#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ +#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ + +/* Extended Interrupt Cause Set */ +#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */ +#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ +#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +/* Extended Interrupt Mask Set */ +#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ +#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */ +#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ +#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +/* Extended Interrupt Mask Clear */ +#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ +#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ +#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +#define IXGBE_EIMS_ENABLE_MASK ( \ + IXGBE_EIMS_RTX_QUEUE | \ + IXGBE_EIMS_LSC | \ + IXGBE_EIMS_TCP_TIMER | \ + IXGBE_EIMS_OTHER) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */ +#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */ +#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass check of control bits */ +#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */ +#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */ +#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */ +#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */ +#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */ + +#define IXGBE_MAX_FTQF_FILTERS 128 +#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003 +#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000 +#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001 +#define IXGBE_FTQF_PROTOCOL_SCTP 2 +#define IXGBE_FTQF_PRIORITY_MASK 0x00000007 +#define IXGBE_FTQF_PRIORITY_SHIFT 2 +#define IXGBE_FTQF_POOL_MASK 0x0000003F +#define IXGBE_FTQF_POOL_SHIFT 8 +#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F +#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25 +#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E +#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D +#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B +#define IXGBE_FTQF_DEST_PORT_MASK 0x17 +#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F +#define IXGBE_FTQF_POOL_MASK_EN 0x40000000 +#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000 + +/* Interrupt clear mask */ +#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF + +/* Interrupt Vector Allocation Registers */ +#define IXGBE_IVAR_REG_NUM 25 +#define IXGBE_IVAR_REG_NUM_82599 64 +#define IXGBE_IVAR_TXRX_ENTRY 96 +#define IXGBE_IVAR_RX_ENTRY 64 +#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i)) +#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i)) +#define IXGBE_IVAR_TX_ENTRY 32 + +#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */ +#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */ + +#define IXGBE_MSIX_VECTOR(_i) (0 + (_i)) + +#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ + +/* ETYPE Queue Filter/Select Bit Masks */ +#define IXGBE_MAX_ETQF_FILTERS 8 +#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ +#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ +#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */ +#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ +#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ +#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ +#define IXGBE_ETQF_POOL_SHIFT 20 + +#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ +#define IXGBE_ETQS_RX_QUEUE_SHIFT 16 +#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */ +#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */ + +/* + * ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters + * here!! + * + * Current filters: + * EAPOL 802.1x (0x888e): Filter 0 + * FCoE (0x8906): Filter 2 + * 1588 (0x88f7): Filter 3 + * FIP (0x8914): Filter 4 + */ +#define IXGBE_ETQF_FILTER_EAPOL 0 +#define IXGBE_ETQF_FILTER_FCOE 2 +#define IXGBE_ETQF_FILTER_1588 3 +#define IXGBE_ETQF_FILTER_FIP 4 +#define IXGBE_ETQF_FILTER_LLDP 5 +#define IXGBE_ETQF_FILTER_LACP 6 + +/* VLAN Control Bit Masks */ +#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ +#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ +#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */ +#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ +#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ + +/* VLAN pool filtering masks */ +#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ +#define IXGBE_VLVF_ENTRIES 64 +#define IXGBE_VLVF_VLANID_MASK 0x00000FFF + +/* Per VF Port VLAN insertion rules */ +#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ + +/* STATUS Bit Masks */ +#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */ +#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/ +#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */ + +#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */ +#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ + +/* ESDP Bit Masks */ +#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */ +#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */ +#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */ +#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */ +#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ +#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ +#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ +#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */ +#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */ +#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */ +#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ +#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 Native Function */ +#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */ + +/* LEDCTL Bit Masks */ +#define IXGBE_LED_IVRT_BASE 0x00000040 +#define IXGBE_LED_BLINK_BASE 0x00000080 +#define IXGBE_LED_MODE_MASK_BASE 0x0000000F +#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) +#define IXGBE_LED_MODE_SHIFT(_i) (8 * (_i)) +#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) +#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) +#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) + +/* LED modes */ +#define IXGBE_LED_LINK_UP 0x0 +#define IXGBE_LED_LINK_10G 0x1 +#define IXGBE_LED_MAC 0x2 +#define IXGBE_LED_FILTER 0x3 +#define IXGBE_LED_LINK_ACTIVE 0x4 +#define IXGBE_LED_LINK_1G 0x5 +#define IXGBE_LED_ON 0xE +#define IXGBE_LED_OFF 0xF + +/* AUTOC Bit Masks */ +#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000 +#define IXGBE_AUTOC_KX4_SUPP 0x80000000 +#define IXGBE_AUTOC_KX_SUPP 0x40000000 +#define IXGBE_AUTOC_PAUSE 0x30000000 +#define IXGBE_AUTOC_ASM_PAUSE 0x20000000 +#define IXGBE_AUTOC_SYM_PAUSE 0x10000000 +#define IXGBE_AUTOC_RF 0x08000000 +#define IXGBE_AUTOC_PD_TMR 0x06000000 +#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 +#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000 +#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000 +#define IXGBE_AUTOC_FECA 0x00040000 +#define IXGBE_AUTOC_FECR 0x00020000 +#define IXGBE_AUTOC_KR_SUPP 0x00010000 +#define IXGBE_AUTOC_AN_RESTART 0x00001000 +#define IXGBE_AUTOC_FLU 0x00000001 +#define IXGBE_AUTOC_LMS_SHIFT 13 +#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200 +#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 +#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180 +#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 +#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 +#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000 +#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000 + +#define IXGBE_MACC_FLU 0x00000001 +#define IXGBE_MACC_FSV_10G 0x00030000 +#define IXGBE_MACC_FS 0x00040000 +#define IXGBE_MAC_RX2TX_LPBK 0x00000002 + +/* Veto Bit definition */ +#define IXGBE_MMNGC_MNG_VETO 0x00000001 + +/* LINKS Bit Masks */ +#define IXGBE_LINKS_KX_AN_COMP 0x80000000 +#define IXGBE_LINKS_UP 0x40000000 +#define IXGBE_LINKS_SPEED 0x20000000 +#define IXGBE_LINKS_MODE 0x18000000 +#define IXGBE_LINKS_RX_MODE 0x06000000 +#define IXGBE_LINKS_TX_MODE 0x01800000 +#define IXGBE_LINKS_XGXS_EN 0x00400000 +#define IXGBE_LINKS_SGMII_EN 0x02000000 +#define IXGBE_LINKS_PCS_1G_EN 0x00200000 +#define IXGBE_LINKS_1G_AN_EN 0x00100000 +#define IXGBE_LINKS_KX_AN_IDLE 0x00080000 +#define IXGBE_LINKS_1G_SYNC 0x00040000 +#define IXGBE_LINKS_10G_ALIGN 0x00020000 +#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000 +#define IXGBE_LINKS_TL_FAULT 0x00001000 +#define IXGBE_LINKS_SIGNAL 0x00000F00 + +#define IXGBE_LINKS_SPEED_NON_STD 0x08000000 +#define IXGBE_LINKS_SPEED_82599 0x30000000 +#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 +#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 +#define IXGBE_LINKS_SPEED_100_82599 0x10000000 +#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ +#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ + +#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040 + +/* PCS1GLSTA Bit Masks */ +#define IXGBE_PCS1GLSTA_LINK_OK 1 +#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 +#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000 +#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000 +#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000 +#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000 +#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000 + +#define IXGBE_PCS1GANA_SYM_PAUSE 0x80 +#define IXGBE_PCS1GANA_ASM_PAUSE 0x100 + +/* PCS1GLCTL Bit Masks */ +#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */ +#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1 +#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20 +#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40 +#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 +#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 + +/* ANLP1 Bit Masks */ +#define IXGBE_ANLP1_PAUSE 0x0C00 +#define IXGBE_ANLP1_SYM_PAUSE 0x0400 +#define IXGBE_ANLP1_ASM_PAUSE 0x0800 +#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000 + +/* SW Semaphore Register bitmasks */ +#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ + +/* SW_FW_SYNC/GSSR definitions */ +#define IXGBE_GSSR_EEP_SM 0x0001 +#define IXGBE_GSSR_PHY0_SM 0x0002 +#define IXGBE_GSSR_PHY1_SM 0x0004 +#define IXGBE_GSSR_MAC_CSR_SM 0x0008 +#define IXGBE_GSSR_FLASH_SM 0x0010 +#define IXGBE_GSSR_SW_MNG_SM 0x0400 +#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */ +#define IXGBE_GSSR_I2C_MASK 0x1800 + +/* FW Status register bitmask */ +#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ + +/* EEC Register */ +#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ +#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */ +#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */ +#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */ +#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */ +#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define IXGBE_EEC_FWE_SHIFT 4 +#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */ +#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */ +#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ +#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ +#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ +#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */ +#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ +/* EEPROM Addressing bits based on type (0-small, 1-large) */ +#define IXGBE_EEC_ADDR_SIZE 0x00000400 +#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ +#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */ + +#define IXGBE_EEC_SIZE_SHIFT 11 +#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 +#define IXGBE_EEPROM_OPCODE_BITS 8 + +/* Part Number String Length */ +#define IXGBE_PBANUM_LENGTH 11 + +/* Checksum and EEPROM pointers */ +#define IXGBE_PBANUM_PTR_GUARD 0xFAFA +#define IXGBE_EEPROM_CHECKSUM 0x3F +#define IXGBE_EEPROM_SUM 0xBABA +#define IXGBE_PCIE_ANALOG_PTR 0x03 +#define IXGBE_ATLAS0_CONFIG_PTR 0x04 +#define IXGBE_PHY_PTR 0x04 +#define IXGBE_ATLAS1_CONFIG_PTR 0x05 +#define IXGBE_OPTION_ROM_PTR 0x05 +#define IXGBE_PCIE_GENERAL_PTR 0x06 +#define IXGBE_PCIE_CONFIG0_PTR 0x07 +#define IXGBE_PCIE_CONFIG1_PTR 0x08 +#define IXGBE_CORE0_PTR 0x09 +#define IXGBE_CORE1_PTR 0x0A +#define IXGBE_MAC0_PTR 0x0B +#define IXGBE_MAC1_PTR 0x0C +#define IXGBE_CSR0_CONFIG_PTR 0x0D +#define IXGBE_CSR1_CONFIG_PTR 0x0E +#define IXGBE_PCIE_ANALOG_PTR_X550 0x02 +#define IXGBE_SHADOW_RAM_SIZE_X550 0x4000 +#define IXGBE_IXGBE_PCIE_GENERAL_SIZE 0x24 +#define IXGBE_PCIE_CONFIG_SIZE 0x08 +#define IXGBE_EEPROM_LAST_WORD 0x41 +#define IXGBE_FW_PTR 0x0F +#define IXGBE_PBANUM0_PTR 0x15 +#define IXGBE_PBANUM1_PTR 0x16 +#define IXGBE_FREE_SPACE_PTR 0X3E + +/* External Thermal Sensor Config */ +#define IXGBE_ETS_CFG 0x26 +#define IXGBE_ETS_LTHRES_DELTA_MASK 0x07C0 +#define IXGBE_ETS_LTHRES_DELTA_SHIFT 6 +#define IXGBE_ETS_TYPE_MASK 0x0038 +#define IXGBE_ETS_TYPE_SHIFT 3 +#define IXGBE_ETS_TYPE_EMC 0x000 +#define IXGBE_ETS_TYPE_EMC_SHIFTED 0x000 +#define IXGBE_ETS_NUM_SENSORS_MASK 0x0007 +#define IXGBE_ETS_DATA_LOC_MASK 0x3C00 +#define IXGBE_ETS_DATA_LOC_SHIFT 10 +#define IXGBE_ETS_DATA_INDEX_MASK 0x0300 +#define IXGBE_ETS_DATA_INDEX_SHIFT 8 +#define IXGBE_ETS_DATA_HTHRESH_MASK 0x00FF + +#define IXGBE_SAN_MAC_ADDR_PTR 0x28 +#define IXGBE_DEVICE_CAPS 0x2C +#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 +#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 +#define IXGBE_MAX_MSIX_VECTORS_82599 0x40 +#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 +#define IXGBE_MAX_MSIX_VECTORS_82598 0x13 + +/* MSI-X capability fields masks */ +#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF + +/* Legacy EEPROM word offsets */ +#define IXGBE_ISCSI_BOOT_CAPS 0x0033 +#define IXGBE_ISCSI_SETUP_PORT_0 0x0030 +#define IXGBE_ISCSI_SETUP_PORT_1 0x0034 + +/* EEPROM Commands - SPI */ +#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ +#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 +#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ +#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ +/* EEPROM reset Write Enable latch */ +#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 +#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ +#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ +#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Read Register */ +#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ +#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ +#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ +#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */ + +#define IXGBE_EEPROM_PAGE_SIZE_MAX 128 +#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */ +#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */ + +#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */ +#define IXGBE_EEPROM_CCD_BIT 2 /* EEPROM Core Clock Disable bit */ + +#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS +#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif + +#ifndef IXGBE_EERD_EEWR_ATTEMPTS +/* Number of 5 microseconds we wait for EERD read and + * EERW write to complete */ +#define IXGBE_EERD_EEWR_ATTEMPTS 100000 +#endif + +#ifndef IXGBE_FLUDONE_ATTEMPTS +/* # attempts we wait for flush update to complete */ +#define IXGBE_FLUDONE_ATTEMPTS 20000 +#endif + +#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ +#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ +#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ +#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ + +#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 +#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 +#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 +#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 +#define IXGBE_FW_LESM_STATE_1 0x1 +#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ +#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define IXGBE_FW_PATCH_VERSION_4 0x7 +#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ +#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ +#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ +#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ +#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ +#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */ +#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */ + +#define IXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */ +#define IXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */ +#define IXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */ + +/* PCI Bus Info */ +#define IXGBE_PCI_DEVICE_STATUS 0xAA +#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 +#define IXGBE_PCI_LINK_STATUS 0xB2 +#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 +#define IXGBE_PCI_LINK_WIDTH 0x3F0 +#define IXGBE_PCI_LINK_WIDTH_1 0x10 +#define IXGBE_PCI_LINK_WIDTH_2 0x20 +#define IXGBE_PCI_LINK_WIDTH_4 0x40 +#define IXGBE_PCI_LINK_WIDTH_8 0x80 +#define IXGBE_PCI_LINK_SPEED 0xF +#define IXGBE_PCI_LINK_SPEED_2500 0x1 +#define IXGBE_PCI_LINK_SPEED_5000 0x2 +#define IXGBE_PCI_LINK_SPEED_8000 0x3 +#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E +#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 + +#define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf +#define IXGBE_PCIDEVCTRL2_16_32ms_def 0x0 +#define IXGBE_PCIDEVCTRL2_50_100us 0x1 +#define IXGBE_PCIDEVCTRL2_1_2ms 0x2 +#define IXGBE_PCIDEVCTRL2_16_32ms 0x5 +#define IXGBE_PCIDEVCTRL2_65_130ms 0x6 +#define IXGBE_PCIDEVCTRL2_260_520ms 0x9 +#define IXGBE_PCIDEVCTRL2_1_2s 0xa +#define IXGBE_PCIDEVCTRL2_4_8s 0xd +#define IXGBE_PCIDEVCTRL2_17_34s 0xe + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 + +/* RAH */ +#define IXGBE_RAH_VIND_MASK 0x003C0000 +#define IXGBE_RAH_VIND_SHIFT 18 +#define IXGBE_RAH_AV 0x80000000 +#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF + +/* Header split receive */ +#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 +#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 +#define IXGBE_RFCTL_RSC_DIS 0x00000020 +#define IXGBE_RFCTL_NFSW_DIS 0x00000040 +#define IXGBE_RFCTL_NFSR_DIS 0x00000080 +#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 +#define IXGBE_RFCTL_NFS_VER_SHIFT 8 +#define IXGBE_RFCTL_NFS_VER_2 0 +#define IXGBE_RFCTL_NFS_VER_3 1 +#define IXGBE_RFCTL_NFS_VER_4 2 +#define IXGBE_RFCTL_IPV6_DIS 0x00000400 +#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000 +#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000 +#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Transmit Config masks */ +#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ +#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */ +#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ +/* Enable short packet padding to 64 bytes */ +#define IXGBE_TX_PAD_ENABLE 0x00000400 +#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ +/* This allows for 16K packets + 4k for vlan */ +#define IXGBE_MAX_FRAME_SZ 0x40040000 + +#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ +#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */ + +/* Receive Config masks */ +#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ +#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define IXGBE_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. write-back flushing */ +#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ +#define IXGBE_RXDCTL_RLPML_EN 0x00008000 +#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ + +#define IXGBE_TSAUXC_EN_CLK 0x00000004 +#define IXGBE_TSAUXC_SYNCLK 0x00000008 +#define IXGBE_TSAUXC_SDP0_INT 0x00000040 + +#define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */ + +#define IXGBE_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ +#define IXGBE_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */ + +#define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF +#define IXGBE_RXMTRL_V1_SYNC_MSG 0x00 +#define IXGBE_RXMTRL_V1_DELAY_REQ_MSG 0x01 +#define IXGBE_RXMTRL_V1_FOLLOWUP_MSG 0x02 +#define IXGBE_RXMTRL_V1_DELAY_RESP_MSG 0x03 +#define IXGBE_RXMTRL_V1_MGMT_MSG 0x04 + +#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00 +#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000 +#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100 +#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200 +#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300 +#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800 +#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900 +#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00 +#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00 +#define IXGBE_RXMTRL_V2_SIGNALING_MSG 0x0C00 +#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00 + +#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ +#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ +#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ +#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ +#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ +#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ +/* Receive Priority Flow Control Enable */ +#define IXGBE_FCTRL_RPFCE 0x00004000 +#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ +#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */ +#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */ +#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ +#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ +#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Receive FC Mask */ + +#define IXGBE_MFLCN_RPFCE_SHIFT 4 + +/* Multiple Receive Queue Control */ +#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ +#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */ +#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */ +#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */ +#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */ +#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */ +#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */ +#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */ +#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */ +#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */ +#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */ +#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 +#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000 +#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 + +#define IXGBE_FWSM_TS_ENABLED 0x1 + +/* Queue Drop Enable */ +#define IXGBE_QDE_ENABLE 0x00000001 +#define IXGBE_QDE_HIDE_VLAN 0x00000002 +#define IXGBE_QDE_IDX_MASK 0x00007F00 +#define IXGBE_QDE_IDX_SHIFT 8 +#define IXGBE_QDE_WRITE 0x00010000 + +#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ +#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ + +#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 +#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000 +#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 +/* Multiple Transmit Queue Command Register */ +#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ +#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ +#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */ +#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */ +#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */ +#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */ +#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA or 4 TQ if VT_ENA */ + +/* Receive Descriptor bit definitions */ +#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ +#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ +#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ +#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define IXGBE_RXD_STAT_OUTERIPCS 0x100 /* Cloud IP xsum calculated */ +#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ +#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */ +#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ +#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ +#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ +#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ +#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ +#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ +#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ +#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ +#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ +#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ +#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ +#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define IXGBE_RXDADV_ERR_OUTERIPER 0x04000000 /* CRC IP Header error */ +#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */ +#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ +#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ +#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */ +#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */ +#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ +#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ +#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ +#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ +#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ +#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ +#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ +#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ +#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define IXGBE_RXD_PRI_SHIFT 13 +#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define IXGBE_RXD_CFI_SHIFT 12 + +#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ +#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ +#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ +#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ +#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */ +#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ +#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ +#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ +#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ +#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE 1588 Time Stamp */ + +/* PSRTYPE bit definitions */ +#define IXGBE_PSRTYPE_TCPHDR 0x00000010 +#define IXGBE_PSRTYPE_UDPHDR 0x00000020 +#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 +#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 +#define IXGBE_PSRTYPE_L2HDR 0x00001000 + +/* SRRCTL bit definitions */ +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ +#define IXGBE_SRRCTL_RDMTS_SHIFT 22 +#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 +#define IXGBE_SRRCTL_DROP_EN 0x10000000 +#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 + +#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F +#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 +#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 +#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 +#define IXGBE_RXDADV_RSCCNT_SHIFT 17 +#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 +#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 +#define IXGBE_RXDADV_SPH 0x8000 + +/* RSS Hash results */ +#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +/* RSS Packet Types as indicated in the receive descriptor. */ +#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ +#define IXGBE_RXDADV_PKTTYPE_VXLAN 0x00000800 /* VXLAN hdr present */ +#define IXGBE_RXDADV_PKTTYPE_TUNNEL 0x00010000 /* Tunnel type */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ +#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ +#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ +#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ +#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ + +/* Security Processing bit Indication */ +#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000 +#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 + +/* Masks to determine if packets should be dropped due to frame errors */ +#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXD_ERR_CE | \ + IXGBE_RXD_ERR_LE | \ + IXGBE_RXD_ERR_PE | \ + IXGBE_RXD_ERR_OSE | \ + IXGBE_RXD_ERR_USE) + +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXDADV_ERR_CE | \ + IXGBE_RXDADV_ERR_LE | \ + IXGBE_RXDADV_ERR_PE | \ + IXGBE_RXDADV_ERR_OSE | \ + IXGBE_RXDADV_ERR_USE) + +/* Multicast bit mask */ +#define IXGBE_MCSTCTRL_MFE 0x4 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 + +/* Vlan-specific macros */ +#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ +#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT + +/* SR-IOV specific macros */ +#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4) +#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4)) +#define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600)) +#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4)) +/* Translated register #defines */ +#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) +#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) +#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) +#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) + +#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index))) +#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index))) + +#define IXGBE_PVFTDHN(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index))) +#define IXGBE_PVFTDTN(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index))) + +enum ixgbe_fdir_pballoc_type { + IXGBE_FDIR_PBALLOC_NONE = 0, + IXGBE_FDIR_PBALLOC_64K = 1, + IXGBE_FDIR_PBALLOC_128K = 2, + IXGBE_FDIR_PBALLOC_256K = 3, +}; +#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16 + +/* Flow Director register values */ +#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001 +#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002 +#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003 +#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008 +#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010 +#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020 +#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 +#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 +#define IXGBE_FDIRCTRL_FLEX_SHIFT 16 +#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 +#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 +#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 +#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28 + +#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16 +#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16 +#define IXGBE_FDIRIP6M_DIPM_SHIFT 16 +#define IXGBE_FDIRM_VLANID 0x00000001 +#define IXGBE_FDIRM_VLANP 0x00000002 +#define IXGBE_FDIRM_POOL 0x00000004 +#define IXGBE_FDIRM_L4P 0x00000008 +#define IXGBE_FDIRM_FLEX 0x00000010 +#define IXGBE_FDIRM_DIPv6 0x00000020 + +#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF +#define IXGBE_FDIRFREE_FREE_SHIFT 0 +#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000 +#define IXGBE_FDIRFREE_COLL_SHIFT 16 +#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F +#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0 +#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000 +#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16 +#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF +#define IXGBE_FDIRUSTAT_ADD_SHIFT 0 +#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000 +#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16 +#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF +#define IXGBE_FDIRFSTAT_FADD_SHIFT 0 +#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00 +#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8 +#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16 +#define IXGBE_FDIRVLAN_FLEX_SHIFT 16 +#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15 +#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16 + +#define IXGBE_FDIRCMD_CMD_MASK 0x00000003 +#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001 +#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002 +#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003 +#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004 +#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008 +#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010 +#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020 +#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040 +#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060 +#define IXGBE_FDIRCMD_IPV6 0x00000080 +#define IXGBE_FDIRCMD_CLEARHT 0x00000100 +#define IXGBE_FDIRCMD_DROP 0x00000200 +#define IXGBE_FDIRCMD_INT 0x00000400 +#define IXGBE_FDIRCMD_LAST 0x00000800 +#define IXGBE_FDIRCMD_COLLISION 0x00001000 +#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 +#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 +#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 +#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 +#define IXGBE_FDIR_INIT_DONE_POLL 10 +#define IXGBE_FDIRCMD_CMD_POLL 10 + +#define IXGBE_FDIR_DROP_QUEUE 127 + +/* Manageablility Host Interface defines */ +#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ +#define IXGBE_HI_FLASH_ERASE_TIMEOUT 1000 /* Process Erase command limit */ +#define IXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ +#define IXGBE_HI_FLASH_APPLY_TIMEOUT 0 /* Process Apply command limit */ + +/* CEM Support */ +#define FW_CEM_HDR_LEN 0x4 +#define FW_CEM_CMD_DRIVER_INFO 0xDD +#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 +#define FW_CEM_CMD_RESERVED 0x0 +#define FW_CEM_UNUSED_VER 0x0 +#define FW_CEM_MAX_RETRIES 3 +#define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_READ_SHADOW_RAM_CMD 0x31 +#define FW_READ_SHADOW_RAM_LEN 0x6 +#define FW_WRITE_SHADOW_RAM_CMD 0x33 +#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */ +#define FW_SHADOW_RAM_DUMP_CMD 0x36 +#define FW_SHADOW_RAM_DUMP_LEN 0 +#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ +#define FW_NVM_DATA_OFFSET 3 +#define FW_MAX_READ_BUFFER_SIZE 1024 +#define FW_DISABLE_RXEN_CMD 0xDE +#define FW_DISABLE_RXEN_LEN 0x1 + +/* Host Interface Command Structures */ +struct ixgbe_hic_hdr { + u8 cmd; + u8 buf_len; + union { + u8 cmd_resv; + u8 ret_status; + } cmd_or_resp; + u8 checksum; +}; + +struct ixgbe_hic_hdr2_req { + u8 cmd; + u8 buf_lenh; + u8 buf_lenl; + u8 checksum; +}; + +struct ixgbe_hic_hdr2_rsp { + u8 cmd; + u8 buf_lenl; + u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ + u8 checksum; +}; + +union ixgbe_hic_hdr2 { + struct ixgbe_hic_hdr2_req req; + struct ixgbe_hic_hdr2_rsp rsp; +}; + +struct ixgbe_hic_drv_info { + struct ixgbe_hic_hdr hdr; + u8 port_num; + u8 ver_sub; + u8 ver_build; + u8 ver_min; + u8 ver_maj; + u8 pad; /* end spacing to ensure length is mult. of dword */ + u16 pad2; /* end spacing to ensure length is mult. of dword2 */ +}; + +/* These need to be dword aligned */ +struct ixgbe_hic_read_shadow_ram { + union ixgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ixgbe_hic_write_shadow_ram { + union ixgbe_hic_hdr2 hdr; + __be32 address; + __be16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ixgbe_hic_disable_rxen { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 pad2; + u16 pad3; +}; + +/* Transmit Descriptor - Advanced */ +union ixgbe_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Receive Descriptor - Advanced */ +union ixgbe_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Context descriptors */ +struct ixgbe_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ +#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */ +#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE 1588 Time Stamp */ +#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */ +#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */ +#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ +#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ +#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ +#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ +#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ +#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ +#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ +#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ +#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */ +#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ +#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ +#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/ +#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ +#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ +#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ +#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ +#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ +#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation: End */ +#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation: Start */ +#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ +#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ +#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ +#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +/* Autonegotiation advertised speeds */ +typedef u32 ixgbe_autoneg_advertised; +/* Link speed */ +typedef u32 ixgbe_link_speed; +#define IXGBE_LINK_SPEED_UNKNOWN 0 +#define IXGBE_LINK_SPEED_100_FULL 0x0008 +#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 +#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 +#define IXGBE_LINK_SPEED_5GB_FULL 0x0800 +#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 +#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) +#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ + IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) + +/* Flow Control Data Sheet defined values + * Calculation and defines taken from 802.1bb Annex O + */ + +/* BitTimes (BT) conversion */ +#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024)) +#define IXGBE_B2BT(BT) (BT * 8) + +/* Calculate Delay to respond to PFC */ +#define IXGBE_PFC_D 672 + +/* Calculate Cable Delay */ +#define IXGBE_CABLE_DC 5556 /* Delay Copper */ +#define IXGBE_CABLE_DO 5000 /* Delay Optical */ + +/* Calculate Interface Delay X540 */ +#define IXGBE_PHY_DC 25600 /* Delay 10G BASET */ +#define IXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */ +#define IXGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */ + +#define IXGBE_ID_X540 (IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC) + +/* Calculate Interface Delay 82598, 82599 */ +#define IXGBE_PHY_D 12800 +#define IXGBE_MAC_D 4096 +#define IXGBE_XAUI_D (2 * 1024) + +#define IXGBE_ID (IXGBE_MAC_D + IXGBE_XAUI_D + IXGBE_PHY_D) + +/* Calculate Delay incurred from higher layer */ +#define IXGBE_HD 6144 + +/* Calculate PCI Bus delay for low thresholds */ +#define IXGBE_PCI_DELAY 10000 + +/* Calculate X540 delay value in bit times */ +#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (IXGBE_B2BT(_max_frame_link) + \ + IXGBE_PFC_D + \ + (2 * IXGBE_CABLE_DC) + \ + (2 * IXGBE_ID_X540) + \ + IXGBE_HD) / 25 + 1) + \ + 2 * IXGBE_B2BT(_max_frame_tc)) + +/* Calculate 82599, 82598 delay value in bit times */ +#define IXGBE_DV(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (IXGBE_B2BT(_max_frame_link) + \ + IXGBE_PFC_D + \ + (2 * IXGBE_CABLE_DC) + \ + (2 * IXGBE_ID) + \ + IXGBE_HD) / 25 + 1) + \ + 2 * IXGBE_B2BT(_max_frame_tc)) + +/* Calculate low threshold delay values */ +#define IXGBE_LOW_DV_X540(_max_frame_tc) \ + (2 * IXGBE_B2BT(_max_frame_tc) + \ + (36 * IXGBE_PCI_DELAY / 25) + 1) +#define IXGBE_LOW_DV(_max_frame_tc) \ + (2 * IXGBE_LOW_DV_X540(_max_frame_tc)) + +/* Software ATR hash keys */ +#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 + +/* Software ATR input stream values and masks */ +#define IXGBE_ATR_HASH_MASK 0x7fff +#define IXGBE_ATR_L4TYPE_MASK 0x3 +#define IXGBE_ATR_L4TYPE_UDP 0x1 +#define IXGBE_ATR_L4TYPE_TCP 0x2 +#define IXGBE_ATR_L4TYPE_SCTP 0x3 +#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +enum ixgbe_atr_flow_type { + IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, + IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, + IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, + IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, + IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, + IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, + IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, + IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, +}; + +/* Flow Director ATR input struct. */ +union ixgbe_atr_input { + /* + * Byte layout in order, all values with MSB first: + * + * vm_pool - 1 byte + * flow_type - 1 byte + * vlan_id - 2 bytes + * src_ip - 16 bytes + * dst_ip - 16 bytes + * src_port - 2 bytes + * dst_port - 2 bytes + * flex_bytes - 2 bytes + * bkt_hash - 2 bytes + */ + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + __be32 dst_ip[4]; + __be32 src_ip[4]; + __be16 src_port; + __be16 dst_port; + __be16 flex_bytes; + __be16 bkt_hash; + } formatted; + __be32 dword_stream[11]; +}; + +/* Flow Director compressed ATR hash input struct */ +union ixgbe_atr_hash_dword { + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + } formatted; + __be32 ip; + struct { + __be16 src; + __be16 dst; + } port; + __be16 flex_bytes; + __be32 dword; +}; + +enum ixgbe_eeprom_type { + ixgbe_eeprom_uninitialized = 0, + ixgbe_eeprom_spi, + ixgbe_flash, + ixgbe_eeprom_none /* No NVM support */ +}; + +enum ixgbe_mac_type { + ixgbe_mac_unknown = 0, + ixgbe_mac_82598EB, + ixgbe_mac_82599EB, + ixgbe_mac_X540, + ixgbe_mac_X550, + ixgbe_mac_X550EM_x, + ixgbe_num_macs +}; + +enum ixgbe_phy_type { + ixgbe_phy_unknown = 0, + ixgbe_phy_none, + ixgbe_phy_tn, + ixgbe_phy_aq, + ixgbe_phy_x550em_kr, + ixgbe_phy_x550em_kx4, + ixgbe_phy_x550em_ext_t, + ixgbe_phy_cu_unknown, + ixgbe_phy_qt, + ixgbe_phy_xaui, + ixgbe_phy_nl, + ixgbe_phy_sfp_passive_tyco, + ixgbe_phy_sfp_passive_unknown, + ixgbe_phy_sfp_active_unknown, + ixgbe_phy_sfp_avago, + ixgbe_phy_sfp_ftl, + ixgbe_phy_sfp_ftl_active, + ixgbe_phy_sfp_unknown, + ixgbe_phy_sfp_intel, + ixgbe_phy_qsfp_passive_unknown, + ixgbe_phy_qsfp_active_unknown, + ixgbe_phy_qsfp_intel, + ixgbe_phy_qsfp_unknown, + ixgbe_phy_sfp_unsupported, + ixgbe_phy_generic +}; + +/* + * SFP+ module type IDs: + * + * ID Module Type + * ============= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CU_CORE0 - 82599-specific + * 4 SFP_DA_CU_CORE1 - 82599-specific + * 5 SFP_SR/LR_CORE0 - 82599-specific + * 6 SFP_SR/LR_CORE1 - 82599-specific + */ +enum ixgbe_sfp_type { + ixgbe_sfp_type_da_cu = 0, + ixgbe_sfp_type_sr = 1, + ixgbe_sfp_type_lr = 2, + ixgbe_sfp_type_da_cu_core0 = 3, + ixgbe_sfp_type_da_cu_core1 = 4, + ixgbe_sfp_type_srlr_core0 = 5, + ixgbe_sfp_type_srlr_core1 = 6, + ixgbe_sfp_type_da_act_lmt_core0 = 7, + ixgbe_sfp_type_da_act_lmt_core1 = 8, + ixgbe_sfp_type_1g_cu_core0 = 9, + ixgbe_sfp_type_1g_cu_core1 = 10, + ixgbe_sfp_type_1g_sx_core0 = 11, + ixgbe_sfp_type_1g_sx_core1 = 12, + ixgbe_sfp_type_1g_lx_core0 = 13, + ixgbe_sfp_type_1g_lx_core1 = 14, + ixgbe_sfp_type_not_present = 0xFFFE, + ixgbe_sfp_type_unknown = 0xFFFF +}; + +enum ixgbe_media_type { + ixgbe_media_type_unknown = 0, + ixgbe_media_type_fiber, + ixgbe_media_type_fiber_qsfp, + ixgbe_media_type_fiber_lco, + ixgbe_media_type_copper, + ixgbe_media_type_backplane, + ixgbe_media_type_cx4, + ixgbe_media_type_virtual +}; + +/* Flow Control Settings */ +enum ixgbe_fc_mode { + ixgbe_fc_none = 0, + ixgbe_fc_rx_pause, + ixgbe_fc_tx_pause, + ixgbe_fc_full, + ixgbe_fc_default +}; + +/* Smart Speed Settings */ +#define IXGBE_SMARTSPEED_MAX_RETRIES 3 +enum ixgbe_smart_speed { + ixgbe_smart_speed_auto = 0, + ixgbe_smart_speed_on, + ixgbe_smart_speed_off +}; + +/* PCI bus types */ +enum ixgbe_bus_type { + ixgbe_bus_type_unknown = 0, + ixgbe_bus_type_pci, + ixgbe_bus_type_pcix, + ixgbe_bus_type_pci_express, + ixgbe_bus_type_reserved +}; + +/* PCI bus speeds */ +enum ixgbe_bus_speed { + ixgbe_bus_speed_unknown = 0, + ixgbe_bus_speed_33 = 33, + ixgbe_bus_speed_66 = 66, + ixgbe_bus_speed_100 = 100, + ixgbe_bus_speed_120 = 120, + ixgbe_bus_speed_133 = 133, + ixgbe_bus_speed_2500 = 2500, + ixgbe_bus_speed_5000 = 5000, + ixgbe_bus_speed_8000 = 8000, + ixgbe_bus_speed_reserved +}; + +/* PCI bus widths */ +enum ixgbe_bus_width { + ixgbe_bus_width_unknown = 0, + ixgbe_bus_width_pcie_x1 = 1, + ixgbe_bus_width_pcie_x2 = 2, + ixgbe_bus_width_pcie_x4 = 4, + ixgbe_bus_width_pcie_x8 = 8, + ixgbe_bus_width_32 = 32, + ixgbe_bus_width_64 = 64, + ixgbe_bus_width_reserved +}; + +struct ixgbe_addr_filter_info { + u32 num_mc_addrs; + u32 rar_used_count; + u32 mta_in_use; + u32 overflow_promisc; + bool uc_set_promisc; + bool user_set_promisc; +}; + +/* Bus parameters */ +struct ixgbe_bus_info { + enum ixgbe_bus_speed speed; + enum ixgbe_bus_width width; + enum ixgbe_bus_type type; + + u16 func; + u16 lan_id; +}; + +/* Flow control parameters */ +struct ixgbe_fc_info { + u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */ + u32 low_water[MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */ + u16 pause_time; /* Flow Control Pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + bool disable_fc_autoneg; /* Do not autonegotiate FC */ + bool fc_was_autonegged; /* Is current_mode the result of autonegging? */ + enum ixgbe_fc_mode current_mode; /* FC mode in effect */ + enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +/* Statistics counters collected by the MAC */ +struct ixgbe_hw_stats { + u64 crcerrs; + u64 illerrc; + u64 errbc; + u64 mspdc; + u64 mpctotal; + u64 mpc[8]; + u64 mlfc; + u64 mrfc; + u64 rlec; + u64 lxontxc; + u64 lxonrxc; + u64 lxofftxc; + u64 lxoffrxc; + u64 pxontxc[8]; + u64 pxonrxc[8]; + u64 pxofftxc[8]; + u64 pxoffrxc[8]; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc[8]; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mngprc; + u64 mngpdc; + u64 mngptc; + u64 tor; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 xec; + u64 rqsmr[16]; + u64 tqsmr[8]; + u64 qprc[16]; + u64 qptc[16]; + u64 qbrc[16]; + u64 qbtc[16]; + u64 qprdc[16]; + u64 pxon2offc[8]; + u64 fdirustat_add; + u64 fdirustat_remove; + u64 fdirfstat_fadd; + u64 fdirfstat_fremove; + u64 fdirmatch; + u64 fdirmiss; + u64 fccrc; + u64 fcoerpdc; + u64 fcoeprc; + u64 fcoeptc; + u64 fcoedwrc; + u64 fcoedwtc; + u64 fcoe_noddp; + u64 fcoe_noddp_ext_buff; + u64 b2ospc; + u64 b2ogprc; + u64 o2bgptc; + u64 o2bspc; +}; + +/* forward declaration */ +struct ixgbe_hw; + +/* iterator type for walking multicast address lists */ +typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, + u32 *vmdq); + +/* Function pointer table */ +struct ixgbe_eeprom_operations { + s32 (*init_params)(struct ixgbe_hw *); + s32 (*read)(struct ixgbe_hw *, u16, u16 *); + s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *); + s32 (*write)(struct ixgbe_hw *, u16, u16); + s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *); + s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); + s32 (*update_checksum)(struct ixgbe_hw *); + s32 (*calc_checksum)(struct ixgbe_hw *); +}; + +struct ixgbe_mac_operations { + s32 (*init_hw)(struct ixgbe_hw *); + s32 (*reset_hw)(struct ixgbe_hw *); + s32 (*start_hw)(struct ixgbe_hw *); + s32 (*clear_hw_cntrs)(struct ixgbe_hw *); + enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); + s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*get_device_caps)(struct ixgbe_hw *, u16 *); + s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *); + s32 (*stop_adapter)(struct ixgbe_hw *); + s32 (*get_bus_info)(struct ixgbe_hw *); + void (*set_lan_id)(struct ixgbe_hw *); + s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*); + s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); + s32 (*setup_sfp)(struct ixgbe_hw *); + s32 (*disable_rx_buff)(struct ixgbe_hw *); + s32 (*enable_rx_buff)(struct ixgbe_hw *); + s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); + s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32); + void (*release_swfw_sync)(struct ixgbe_hw *, u32); + s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *); + s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool); + + /* Link */ + void (*disable_tx_laser)(struct ixgbe_hw *); + void (*enable_tx_laser)(struct ixgbe_hw *); + void (*flap_tx_laser)(struct ixgbe_hw *); + void (*stop_link_on_d3)(struct ixgbe_hw *); + s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); + s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, + bool *); + + /* Packet Buffer Manipulation */ + void (*set_rxpba)(struct ixgbe_hw *, int, u32, int); + + /* LED */ + s32 (*led_on)(struct ixgbe_hw *, u32); + s32 (*led_off)(struct ixgbe_hw *, u32); + s32 (*blink_led_start)(struct ixgbe_hw *, u32); + s32 (*blink_led_stop)(struct ixgbe_hw *, u32); + + /* RAR, Multicast, VLAN */ + s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); + s32 (*clear_rar)(struct ixgbe_hw *, u32); + s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); + s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32); + s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); + s32 (*init_rx_addrs)(struct ixgbe_hw *); + s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); + s32 (*enable_mc)(struct ixgbe_hw *); + s32 (*disable_mc)(struct ixgbe_hw *); + s32 (*clear_vfta)(struct ixgbe_hw *); + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); + s32 (*init_uta_tables)(struct ixgbe_hw *); + void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int); + void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); + + /* Flow Control */ + s32 (*fc_enable)(struct ixgbe_hw *); + + /* Manageability interface */ + s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); + s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); + s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); + void (*disable_rx)(struct ixgbe_hw *hw); + void (*enable_rx)(struct ixgbe_hw *hw); + void (*set_source_address_pruning)(struct ixgbe_hw *, bool, + unsigned int); + void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int); + + /* DMA Coalescing */ + s32 (*dmac_config)(struct ixgbe_hw *hw); + s32 (*dmac_update_tcs)(struct ixgbe_hw *hw); + s32 (*dmac_config_tcs)(struct ixgbe_hw *hw); +}; + +struct ixgbe_phy_operations { + s32 (*identify)(struct ixgbe_hw *); + s32 (*identify_sfp)(struct ixgbe_hw *); + s32 (*init)(struct ixgbe_hw *); + s32 (*reset)(struct ixgbe_hw *); + s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); + s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); + s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *); + s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16); + s32 (*setup_link)(struct ixgbe_hw *); + s32 (*setup_internal_link)(struct ixgbe_hw *); + s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); + s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); + s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); + s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *); + s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); + s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); + s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); + s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); + s32 (*check_overtemp)(struct ixgbe_hw *); +}; + +struct ixgbe_eeprom_info { + struct ixgbe_eeprom_operations ops; + enum ixgbe_eeprom_type type; + u32 semaphore_delay; + u16 word_size; + u16 address_bits; + u16 word_page_size; +}; + +#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 +struct ixgbe_mac_info { + struct ixgbe_mac_operations ops; + enum ixgbe_mac_type type; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u8 san_addr[ETH_ALEN]; + /* prefix for World Wide Node Name (WWNN) */ + u16 wwnn_prefix; + /* prefix for World Wide Port Name (WWPN) */ + u16 wwpn_prefix; + u16 max_msix_vectors; +#define IXGBE_MAX_MTA 128 + u32 mta_shadow[IXGBE_MAX_MTA]; + s32 mc_filter_type; + u32 mcft_size; + u32 vft_size; + u32 num_rar_entries; + u32 rar_highwater; + u32 rx_pb_size; + u32 max_tx_queues; + u32 max_rx_queues; + u32 orig_autoc; + u32 orig_autoc2; + bool orig_link_settings_stored; + bool autotry_restart; + u8 flags; + u8 san_mac_rar_index; + struct ixgbe_thermal_sensor_data thermal_sensor_data; + bool set_lben; +}; + +struct ixgbe_phy_info { + struct ixgbe_phy_operations ops; + struct mdio_if_info mdio; + enum ixgbe_phy_type type; + u32 id; + enum ixgbe_sfp_type sfp_type; + bool sfp_setup_needed; + u32 revision; + enum ixgbe_media_type media_type; + u8 lan_id; + u32 phy_semaphore_mask; + bool reset_disable; + ixgbe_autoneg_advertised autoneg_advertised; + enum ixgbe_smart_speed smart_speed; + bool smart_speed_active; + bool multispeed_fiber; + bool reset_if_overtemp; + bool qsfp_shared_i2c_bus; +}; + +#include "ixgbe_mbx.h" + +struct ixgbe_mbx_operations { + s32 (*init_params)(struct ixgbe_hw *hw); + s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct ixgbe_hw *, u16); + s32 (*check_for_ack)(struct ixgbe_hw *, u16); + s32 (*check_for_rst)(struct ixgbe_hw *, u16); +}; + +struct ixgbe_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct ixgbe_mbx_info { + struct ixgbe_mbx_operations ops; + struct ixgbe_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u32 v2p_mailbox; + u16 size; +}; + +struct ixgbe_hw { + u8 __iomem *hw_addr; + void *back; + struct ixgbe_mac_info mac; + struct ixgbe_addr_filter_info addr_ctrl; + struct ixgbe_fc_info fc; + struct ixgbe_phy_info phy; + struct ixgbe_eeprom_info eeprom; + struct ixgbe_bus_info bus; + struct ixgbe_mbx_info mbx; + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + bool adapter_stopped; + bool force_full_reset; + bool allow_unsupported_sfp; + bool wol_enabled; +}; + +struct ixgbe_info { + enum ixgbe_mac_type mac; + s32 (*get_invariants)(struct ixgbe_hw *); + struct ixgbe_mac_operations *mac_ops; + struct ixgbe_eeprom_operations *eeprom_ops; + struct ixgbe_phy_operations *phy_ops; + struct ixgbe_mbx_operations *mbx_ops; +}; + + +/* Error Codes */ +#define IXGBE_ERR_EEPROM -1 +#define IXGBE_ERR_EEPROM_CHECKSUM -2 +#define IXGBE_ERR_PHY -3 +#define IXGBE_ERR_CONFIG -4 +#define IXGBE_ERR_PARAM -5 +#define IXGBE_ERR_MAC_TYPE -6 +#define IXGBE_ERR_UNKNOWN_PHY -7 +#define IXGBE_ERR_LINK_SETUP -8 +#define IXGBE_ERR_ADAPTER_STOPPED -9 +#define IXGBE_ERR_INVALID_MAC_ADDR -10 +#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11 +#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12 +#define IXGBE_ERR_INVALID_LINK_SETTINGS -13 +#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14 +#define IXGBE_ERR_RESET_FAILED -15 +#define IXGBE_ERR_SWFW_SYNC -16 +#define IXGBE_ERR_PHY_ADDR_INVALID -17 +#define IXGBE_ERR_I2C -18 +#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 +#define IXGBE_ERR_SFP_NOT_PRESENT -20 +#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 +#define IXGBE_ERR_NO_SAN_ADDR_PTR -22 +#define IXGBE_ERR_FDIR_REINIT_FAILED -23 +#define IXGBE_ERR_EEPROM_VERSION -24 +#define IXGBE_ERR_NO_SPACE -25 +#define IXGBE_ERR_OVERTEMP -26 +#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 +#define IXGBE_ERR_FC_NOT_SUPPORTED -28 +#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 +#define IXGBE_ERR_PBA_SECTION -31 +#define IXGBE_ERR_INVALID_ARGUMENT -32 +#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 +#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF + +#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010)) +#define IXGBE_KRM_LINK_CTRL_1(P) ((P == 0) ? (0x420C) : (0x820C)) +#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634)) +#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638)) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P == 0) ? (0x4B00) : (0x8B00)) +#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P == 0) ? (0x4E00) : (0x8E00)) +#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P == 0) ? (0x5520) : (0x9520)) +#define IXGBE_KRM_RX_ANA_CTL(P) ((P == 0) ? (0x5A00) : (0x9A00)) + +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9) +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11) + +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31) + +#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) +#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) +#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) + +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2) + +#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16) + +#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3) +#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31) + +#define IXGBE_KX4_LINK_CNTL_1 0x4C +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX (1 << 16) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 (1 << 17) +#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX (1 << 24) +#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 (1 << 25) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE (1 << 29) +#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP (1 << 30) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART (1 << 31) + +#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144 +#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148 + +#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT 0 +#define IXGBE_SB_IOSF_CTRL_ADDR_MASK 0xFF +#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18 +#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \ + (0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT) +#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20 +#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \ + (0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT) +#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28 +#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7 +#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31 +#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) +#define IXGBE_SB_IOSF_TARGET_KR_PHY 0 +#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1 +#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2 +#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3 + +#endif /* _IXGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c new file mode 100644 index 000000000..f5f948d08 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -0,0 +1,863 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2014 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_phy.h" +#include "ixgbe_x540.h" + +#define IXGBE_X540_MAX_TX_QUEUES 128 +#define IXGBE_X540_MAX_RX_QUEUES 128 +#define IXGBE_X540_RAR_ENTRIES 128 +#define IXGBE_X540_MC_TBL_SIZE 128 +#define IXGBE_X540_VFT_TBL_SIZE 128 +#define IXGBE_X540_RX_PB_SIZE 384 + +static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); +static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); +static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); +static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); + +enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) +{ + return ixgbe_media_type_copper; +} + +s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; + mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; + mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE; + mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + return 0; +} + +/** + * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + **/ +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + return hw->phy.ops.setup_link_speed(hw, speed, + autoneg_wait_to_complete); +} + +/** + * ixgbe_reset_hw_X540 - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + **/ +s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) +{ + s32 status; + u32 ctrl, i; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status) + return status; + + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + +mac_reset_top: + ctrl = IXGBE_CTRL_RST; + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + udelay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + } + + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; + hw_dbg(hw, "Reset polling failed to complete.\n"); + } + msleep(100); + + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* Set the Rx packet buffer size. */ + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT); + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES; + hw->mac.ops.init_rx_addrs(hw); + + /* Store the permanent SAN mac address */ + hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); + + /* Add the SAN MAC address to the RAR only if it's a valid address */ + if (is_valid_ether_addr(hw->mac.san_addr)) { + hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* Save the SAN MAC RAR index */ + hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + + /* Reserve the last RAR for the SAN MAC address */ + hw->mac.num_rar_entries--; + } + + /* Store the alternative WWNN/WWPN prefix */ + hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, + &hw->mac.wwpn_prefix); + + return status; +} + +/** + * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) +{ + s32 ret_val; + + ret_val = ixgbe_start_hw_generic(hw); + if (ret_val) + return ret_val; + + return ixgbe_start_hw_gen2(hw); +} + +/** + * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ixgbe_flash; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + + hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + + return 0; +} + +/** + * ixgbe_read_eerd_X540- Read EEPROM word using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + s32 status; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) + return IXGBE_ERR_SWFW_SYNC; + + status = ixgbe_read_eerd_generic(hw, offset, data); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_read_eerd_buffer_X540 - Read EEPROM word(s) using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the EERD register. + **/ +static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) + return IXGBE_ERR_SWFW_SYNC; + + status = ixgbe_read_eerd_buffer_generic(hw, offset, words, data); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the EEWR register. + **/ +static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + s32 status; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) + return IXGBE_ERR_SWFW_SYNC; + + status = ixgbe_write_eewr_generic(hw, offset, data); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the EEWR register. + **/ +static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) + return IXGBE_ERR_SWFW_SYNC; + + status = ixgbe_write_eewr_buffer_generic(hw, offset, words, data); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum + * + * This function does not use synchronization for EERD and EEWR. It can + * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540. + * + * @hw: pointer to hardware structure + **/ +static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) +{ + u16 i; + u16 j; + u16 checksum = 0; + u16 length = 0; + u16 pointer = 0; + u16 word = 0; + u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM; + u16 ptr_start = IXGBE_PCIE_ANALOG_PTR; + + /* + * Do not use hw->eeprom.ops.read because we do not want to take + * the synchronization semaphores here. Instead use + * ixgbe_read_eerd_generic + */ + + /* Include 0x0-0x3F in the checksum */ + for (i = 0; i < checksum_last_word; i++) { + if (ixgbe_read_eerd_generic(hw, i, &word)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + checksum += word; + } + + /* + * Include all data from pointers 0x3, 0x6-0xE. This excludes the + * FW, PHY module, and PCIe Expansion/Option ROM pointers. + */ + for (i = ptr_start; i < IXGBE_FW_PTR; i++) { + if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) + continue; + + if (ixgbe_read_eerd_generic(hw, i, &pointer)) { + hw_dbg(hw, "EEPROM read failed\n"); + break; + } + + /* Skip pointer section if the pointer is invalid. */ + if (pointer == 0xFFFF || pointer == 0 || + pointer >= hw->eeprom.word_size) + continue; + + if (ixgbe_read_eerd_generic(hw, pointer, &length)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + break; + } + + /* Skip pointer section if length is invalid. */ + if (length == 0xFFFF || length == 0 || + (pointer + length) >= hw->eeprom.word_size) + continue; + + for (j = pointer + 1; j <= pointer + length; j++) { + if (ixgbe_read_eerd_generic(hw, j, &word)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + checksum += word; + } + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return (s32)checksum; +} + +/** + * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, + u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) + return IXGBE_ERR_SWFW_SYNC; + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + goto out; + + checksum = (u16)(status & 0xffff); + + /* Do not use hw->eeprom.ops.read because we do not want to take + * the synchronization semaphores twice here. + */ + status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + goto out; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + hw_dbg(hw, "Invalid EEPROM checksum"); + status = IXGBE_ERR_EEPROM_CHECKSUM; + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + +out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + return status; +} + +/** + * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) + return IXGBE_ERR_SWFW_SYNC; + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + goto out; + + checksum = (u16)(status & 0xffff); + + /* Do not use hw->eeprom.ops.write because we do not want to + * take the synchronization semaphores twice here. + */ + status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum); + if (status) + goto out; + + status = ixgbe_update_flash_X540(hw); + +out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure + * + * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy + * EEPROM from shadow RAM to the flash device. + **/ +static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) +{ + u32 flup; + s32 status; + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status == IXGBE_ERR_EEPROM) { + hw_dbg(hw, "Flash update time out\n"); + return status; + } + + flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP; + IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status == 0) + hw_dbg(hw, "Flash update complete\n"); + else + hw_dbg(hw, "Flash update time out\n"); + + if (hw->revision_id == 0) { + flup = IXGBE_READ_REG(hw, IXGBE_EEC); + + if (flup & IXGBE_EEC_SEC1VAL) { + flup |= IXGBE_EEC_FLUP; + IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); + } + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status == 0) + hw_dbg(hw, "Flash update complete\n"); + else + hw_dbg(hw, "Flash update time out\n"); + } + + return status; +} + +/** + * ixgbe_poll_flash_update_done_X540 - Poll flash update status + * @hw: pointer to hardware structure + * + * Polls the FLUDONE (bit 26) of the EEC Register to determine when the + * flash update is done. + **/ +static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) +{ + u32 i; + u32 reg; + + for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) { + reg = IXGBE_READ_REG(hw, IXGBE_EEC); + if (reg & IXGBE_EEC_FLUDONE) + return 0; + udelay(5); + } + return IXGBE_ERR_EEPROM; +} + +/** + * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore thought the SW_FW_SYNC register for + * the specified function (CSR, PHY0, PHY1, NVM, Flash) + **/ +s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 5; + u32 hwmask = 0; + u32 timeout = 200; + u32 i; + + if (swmask == IXGBE_GSSR_EEP_SM) + hwmask = IXGBE_GSSR_FLASH_SM; + + for (i = 0; i < timeout; i++) { + /* + * SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) + */ + if (ixgbe_get_swfw_sync_semaphore(hw)) + return IXGBE_ERR_SWFW_SYNC; + + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + if (!(swfw_sync & (fwmask | swmask | hwmask))) { + swfw_sync |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); + ixgbe_release_swfw_sync_semaphore(hw); + break; + } else { + /* + * Firmware currently using resource (fwmask), + * hardware currently using resource (hwmask), + * or other software thread currently using + * resource (swmask) + */ + ixgbe_release_swfw_sync_semaphore(hw); + usleep_range(5000, 10000); + } + } + + /* + * If the resource is not released by the FW/HW the SW can assume that + * the FW/HW malfunctions. In that case the SW should sets the + * SW bit(s) of the requested resource(s) while ignoring the + * corresponding FW/HW bits in the SW_FW_SYNC register. + */ + if (i >= timeout) { + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + if (swfw_sync & (fwmask | hwmask)) { + if (ixgbe_get_swfw_sync_semaphore(hw)) + return IXGBE_ERR_SWFW_SYNC; + + swfw_sync |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); + ixgbe_release_swfw_sync_semaphore(hw); + } + } + + usleep_range(5000, 10000); + return 0; +} + +/** + * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through the SW_FW_SYNC register + * for the specified function (CSR, PHY0, PHY1, EVM, Flash) + **/ +void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + + ixgbe_get_swfw_sync_semaphore(hw); + + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + swfw_sync &= ~swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); + + ixgbe_release_swfw_sync_semaphore(hw); + usleep_range(5000, 10000); +} + +/** + * ixgbe_get_swfw_sync_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so SW/FW can gain control of shared resources + */ +static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) +{ + u32 timeout = 2000; + u32 i; + u32 swsm; + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + if (!(swsm & IXGBE_SWSM_SMBI)) + break; + usleep_range(50, 100); + } + + if (i == timeout) { + hw_dbg(hw, + "Software semaphore SMBI between device drivers not granted.\n"); + return IXGBE_ERR_EEPROM; + } + + /* Now get the semaphore between SW/FW through the REGSMP bit */ + for (i = 0; i < timeout; i++) { + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + if (!(swsm & IXGBE_SWFW_REGSMP)) + return 0; + + usleep_range(50, 100); + } + + return IXGBE_ERR_EEPROM; +} + +/** + * ixgbe_release_nvm_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) +{ + u32 swsm; + + /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ + + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + swsm &= ~IXGBE_SWSM_SMBI; + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + swsm &= ~IXGBE_SWFW_REGSMP; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm); + + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_blink_led_start_X540 - Blink LED based on index. + * @hw: pointer to hardware structure + * @index: led number to blink + * + * Devices that implement the version 2 interface: + * X540 + **/ +s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) +{ + u32 macc_reg; + u32 ledctl_reg; + ixgbe_link_speed speed; + bool link_up; + + /* + * Link should be up in order for the blink bit in the LED control + * register to work. Force link and speed in the MAC if link is down. + * This will be reversed when we stop the blinking. + */ + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) { + macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); + macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); + } + /* Set the LED to LINK_UP + BLINK. */ + ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); + ledctl_reg |= IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index. + * @hw: pointer to hardware structure + * @index: led number to stop blinking + * + * Devices that implement the version 2 interface: + * X540 + **/ +s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) +{ + u32 macc_reg; + u32 ledctl_reg; + + /* Restore the LED to its default value. */ + ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); + ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); + ledctl_reg &= ~IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); + + /* Unforce link and speed in the MAC. */ + macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); + macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS); + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} +static struct ixgbe_mac_operations mac_ops_X540 = { + .init_hw = &ixgbe_init_hw_generic, + .reset_hw = &ixgbe_reset_hw_X540, + .start_hw = &ixgbe_start_hw_X540, + .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, + .get_media_type = &ixgbe_get_media_type_X540, + .enable_rx_dma = &ixgbe_enable_rx_dma_generic, + .get_mac_addr = &ixgbe_get_mac_addr_generic, + .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, + .get_device_caps = &ixgbe_get_device_caps_generic, + .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, + .stop_adapter = &ixgbe_stop_adapter_generic, + .get_bus_info = &ixgbe_get_bus_info_generic, + .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, + .read_analog_reg8 = NULL, + .write_analog_reg8 = NULL, + .setup_link = &ixgbe_setup_mac_link_X540, + .set_rxpba = &ixgbe_set_rxpba_generic, + .check_link = &ixgbe_check_mac_link_generic, + .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, + .led_on = &ixgbe_led_on_generic, + .led_off = &ixgbe_led_off_generic, + .blink_led_start = &ixgbe_blink_led_start_X540, + .blink_led_stop = &ixgbe_blink_led_stop_X540, + .set_rar = &ixgbe_set_rar_generic, + .clear_rar = &ixgbe_clear_rar_generic, + .set_vmdq = &ixgbe_set_vmdq_generic, + .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, + .clear_vmdq = &ixgbe_clear_vmdq_generic, + .init_rx_addrs = &ixgbe_init_rx_addrs_generic, + .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, + .enable_mc = &ixgbe_enable_mc_generic, + .disable_mc = &ixgbe_disable_mc_generic, + .clear_vfta = &ixgbe_clear_vfta_generic, + .set_vfta = &ixgbe_set_vfta_generic, + .fc_enable = &ixgbe_fc_enable_generic, + .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, + .init_uta_tables = &ixgbe_init_uta_tables_generic, + .setup_sfp = NULL, + .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, + .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, + .release_swfw_sync = &ixgbe_release_swfw_sync_X540, + .disable_rx_buff = &ixgbe_disable_rx_buff_generic, + .enable_rx_buff = &ixgbe_enable_rx_buff_generic, + .get_thermal_sensor_data = NULL, + .init_thermal_sensor_thresh = NULL, + .prot_autoc_read = &prot_autoc_read_generic, + .prot_autoc_write = &prot_autoc_write_generic, + .enable_rx = &ixgbe_enable_rx_generic, + .disable_rx = &ixgbe_disable_rx_generic, +}; + +static struct ixgbe_eeprom_operations eeprom_ops_X540 = { + .init_params = &ixgbe_init_eeprom_params_X540, + .read = &ixgbe_read_eerd_X540, + .read_buffer = &ixgbe_read_eerd_buffer_X540, + .write = &ixgbe_write_eewr_X540, + .write_buffer = &ixgbe_write_eewr_buffer_X540, + .calc_checksum = &ixgbe_calc_eeprom_checksum_X540, + .validate_checksum = &ixgbe_validate_eeprom_checksum_X540, + .update_checksum = &ixgbe_update_eeprom_checksum_X540, +}; + +static struct ixgbe_phy_operations phy_ops_X540 = { + .identify = &ixgbe_identify_phy_generic, + .identify_sfp = &ixgbe_identify_sfp_module_generic, + .init = NULL, + .reset = NULL, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, + .setup_link = &ixgbe_setup_phy_link_generic, + .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, + .read_i2c_byte = &ixgbe_read_i2c_byte_generic, + .write_i2c_byte = &ixgbe_write_i2c_byte_generic, + .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, + .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, + .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, + .check_overtemp = &ixgbe_tn_check_overtemp, + .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, +}; + +struct ixgbe_info ixgbe_X540_info = { + .mac = ixgbe_mac_X540, + .get_invariants = &ixgbe_get_invariants_X540, + .mac_ops = &mac_ops_X540, + .eeprom_ops = &eeprom_ops_X540, + .phy_ops = &phy_ops_X540, + .mbx_ops = &mbx_ops_generic, +}; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h new file mode 100644 index 000000000..a1468b1f4 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h @@ -0,0 +1,39 @@ +/******************************************************************************* + * + * Intel 10 Gigabit PCI Express Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include "ixgbe_type.h" + +s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw); +enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); +s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c new file mode 100644 index 000000000..cf5cf819a --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -0,0 +1,1532 @@ +/******************************************************************************* + * + * Intel 10 Gigabit PCI Express Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ +#include "ixgbe_x540.h" +#include "ixgbe_type.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +/** ixgbe_identify_phy_x550em - Get PHY type based on device id + * @hw: pointer to hardware structure + * + * Returns error code + */ +static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) +{ + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_SFP: + /* set up for CS4227 usage */ + hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + if (hw->bus.lan_id) { + esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); + esdp |= IXGBE_ESDP_SDP1_DIR; + } + esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + + return ixgbe_identify_module_generic(hw); + case IXGBE_DEV_ID_X550EM_X_KX4: + hw->phy.type = ixgbe_phy_x550em_kx4; + break; + case IXGBE_DEV_ID_X550EM_X_KR: + hw->phy.type = ixgbe_phy_x550em_kr; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + return ixgbe_identify_phy_generic(hw); + default: + break; + } + return 0; +} + +static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + return IXGBE_NOT_IMPLEMENTED; +} + +static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + return IXGBE_NOT_IMPLEMENTED; +} + +/** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ixgbe_flash; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + + hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + + return 0; +} + +/** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the + * IOSF device + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @phy_data: Pointer to read data from the register + **/ +static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data) +{ + u32 i, command, error; + + command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | + (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + + /* Write IOSF control register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + + /* Check every 10 usec to see if the address cycle completed. + * The SB IOSF BUSY bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usleep_range(10, 20); + + command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); + if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0) + break; + } + + if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { + error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> + IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + hw_dbg(hw, "Failed to read, error %x\n", error); + return IXGBE_ERR_PHY; + } + + if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { + hw_dbg(hw, "Read timed out\n"); + return IXGBE_ERR_PHY; + } + + *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA); + + return 0; +} + +/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface + * command assuming that the semaphore is already obtained. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, + u16 *data) +{ + s32 status; + struct ixgbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = cpu_to_be32(offset * 2); + /* one word */ + buffer.length = cpu_to_be16(sizeof(u16)); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); + if (status) + return status; + + *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, + FW_NVM_DATA_OFFSET); + + return 0; +} + +/** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the hostif. + **/ +static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + struct ixgbe_hic_read_shadow_ram buffer; + u32 current_word = 0; + u16 words_to_read; + s32 status; + u32 i; + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + if (status) { + hw_dbg(hw, "EEPROM read buffer - semaphore failed\n"); + return status; + } + + while (words) { + if (words > FW_MAX_READ_BUFFER_SIZE / 2) + words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; + else + words_to_read = words; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = cpu_to_be32((offset + current_word) * 2); + buffer.length = cpu_to_be16(words_to_read * 2); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, + false); + if (status) { + hw_dbg(hw, "Host interface command failed\n"); + goto out; + } + + for (i = 0; i < words_to_read; i++) { + u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) + + 2 * i; + u32 value = IXGBE_READ_REG(hw, reg); + + data[current_word] = (u16)(value & 0xffff); + current_word++; + i++; + if (i < words_to_read) { + value >>= 16; + data[current_word] = (u16)(value & 0xffff); + current_word++; + } + } + words -= words_to_read; + } + +out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** ixgbe_checksum_ptr_x550 - Checksum one pointer region + * @hw: pointer to hardware structure + * @ptr: pointer offset in eeprom + * @size: size of section pointed by ptr, if 0 first word will be used as size + * @csum: address of checksum to update + * + * Returns error status for any failure + **/ +static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, + u16 size, u16 *csum, u16 *buffer, + u32 buffer_size) +{ + u16 buf[256]; + s32 status; + u16 length, bufsz, i, start; + u16 *local_buffer; + + bufsz = sizeof(buf) / sizeof(buf[0]); + + /* Read a chunk at the pointer location */ + if (!buffer) { + status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf); + if (status) { + hw_dbg(hw, "Failed to read EEPROM image\n"); + return status; + } + local_buffer = buf; + } else { + if (buffer_size < ptr) + return IXGBE_ERR_PARAM; + local_buffer = &buffer[ptr]; + } + + if (size) { + start = 0; + length = size; + } else { + start = 1; + length = local_buffer[0]; + + /* Skip pointer section if length is invalid. */ + if (length == 0xFFFF || length == 0 || + (ptr + length) >= hw->eeprom.word_size) + return 0; + } + + if (buffer && ((u32)start + (u32)length > buffer_size)) + return IXGBE_ERR_PARAM; + + for (i = start; length; i++, length--) { + if (i == bufsz && !buffer) { + ptr += bufsz; + i = 0; + if (length < bufsz) + bufsz = length; + + /* Read a chunk at the pointer location */ + status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, + bufsz, buf); + if (status) { + hw_dbg(hw, "Failed to read EEPROM image\n"); + return status; + } + } + *csum += local_buffer[i]; + } + return 0; +} + +/** ixgbe_calc_checksum_X550 - Calculates and returns the checksum + * @hw: pointer to hardware structure + * @buffer: pointer to buffer containing calculated checksum + * @buffer_size: size of buffer + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, + u32 buffer_size) +{ + u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1]; + u16 *local_buffer; + s32 status; + u16 checksum = 0; + u16 pointer, i, size; + + hw->eeprom.ops.init_params(hw); + + if (!buffer) { + /* Read pointer area */ + status = ixgbe_read_ee_hostif_buffer_X550(hw, 0, + IXGBE_EEPROM_LAST_WORD + 1, + eeprom_ptrs); + if (status) { + hw_dbg(hw, "Failed to read EEPROM image\n"); + return status; + } + local_buffer = eeprom_ptrs; + } else { + if (buffer_size < IXGBE_EEPROM_LAST_WORD) + return IXGBE_ERR_PARAM; + local_buffer = buffer; + } + + /* For X550 hardware include 0x0-0x41 in the checksum, skip the + * checksum word itself + */ + for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++) + if (i != IXGBE_EEPROM_CHECKSUM) + checksum += local_buffer[i]; + + /* Include all data from pointers 0x3, 0x6-0xE. This excludes the + * FW, PHY module, and PCIe Expansion/Option ROM pointers. + */ + for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) { + if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) + continue; + + pointer = local_buffer[i]; + + /* Skip pointer section if the pointer is invalid. */ + if (pointer == 0xFFFF || pointer == 0 || + pointer >= hw->eeprom.word_size) + continue; + + switch (i) { + case IXGBE_PCIE_GENERAL_PTR: + size = IXGBE_IXGBE_PCIE_GENERAL_SIZE; + break; + case IXGBE_PCIE_CONFIG0_PTR: + case IXGBE_PCIE_CONFIG1_PTR: + size = IXGBE_PCIE_CONFIG_SIZE; + break; + default: + size = 0; + break; + } + + status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum, + buffer, buffer_size); + if (status) + return status; + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return (s32)checksum; +} + +/** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) +{ + return ixgbe_calc_checksum_X550(hw, NULL, 0); +} + +/** ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + s32 status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { + status = ixgbe_read_ee_hostif_data_X550(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, + u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + return status; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + status = IXGBE_ERR_EEPROM_CHECKSUM; + hw_dbg(hw, "Invalid EEPROM checksum"); + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + +/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, + u16 data) +{ + s32 status; + struct ixgbe_hic_write_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = cpu_to_be16(sizeof(u16)); + buffer.data = data; + buffer.address = cpu_to_be32(offset * 2); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); + return status; +} + +/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + s32 status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { + status = ixgbe_write_ee_hostif_data_X550(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + hw_dbg(hw, "write ee hostif failed to get semaphore"); + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure + * + * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. + **/ +static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) +{ + s32 status = 0; + union ixgbe_hic_hdr2 buffer; + + buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; + buffer.req.buf_lenh = 0; + buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; + buffer.req.checksum = FW_DEFAULT_CHECKSUM; + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); + return status; +} + +/** ixgbe_disable_rx_x550 - Disable RX unit + * + * Enables the Rx DMA unit for x550 + **/ +static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) +{ + u32 rxctrl, pfdtxgswc; + s32 status; + struct ixgbe_hic_disable_rxen fw_cmd; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + + fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD; + fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN; + fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + fw_cmd.port_number = (u8)hw->bus.lan_id; + + status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(struct ixgbe_hic_disable_rxen), + IXGBE_HI_COMMAND_TIMEOUT, true); + + /* If we fail - disable RX using register write */ + if (status) { + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + rxctrl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + } + } + } +} + +/** ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + status = ixgbe_calc_eeprom_checksum_X550(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, + checksum); + if (status) + return status; + + status = ixgbe_update_flash_X550(hw); + + return status; +} + +/** ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * + * Write a 16 bit word(s) to the EEPROM using the hostif. + **/ +static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, + u16 *data) +{ + s32 status = 0; + u32 i = 0; + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + if (status) { + hw_dbg(hw, "EEPROM write buffer - semaphore failed\n"); + return status; + } + + for (i = 0; i < words; i++) { + status = ixgbe_write_ee_hostif_data_X550(hw, offset + i, + data[i]); + if (status) { + hw_dbg(hw, "Eeprom buffered write failed\n"); + break; + } + } + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + return status; +} + +/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers + * @hw: pointer to hardware structure + **/ +static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + /* CS4227 does not support autoneg, so disable the laser control + * functions for SFP+ fiber + */ + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) { + mac->ops.disable_tx_laser = NULL; + mac->ops.enable_tx_laser = NULL; + mac->ops.flap_tx_laser = NULL; + } +} + +/** ixgbe_setup_sfp_modules_X550em - Setup SFP module + * @hw: pointer to hardware structure + */ +static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) +{ + bool setup_linear; + u16 reg_slice, edc_mode; + s32 ret_val; + + switch (hw->phy.sfp_type) { + case ixgbe_sfp_type_unknown: + return 0; + case ixgbe_sfp_type_not_present: + return IXGBE_ERR_SFP_NOT_PRESENT; + case ixgbe_sfp_type_da_cu_core0: + case ixgbe_sfp_type_da_cu_core1: + setup_linear = true; + break; + case ixgbe_sfp_type_srlr_core0: + case ixgbe_sfp_type_srlr_core1: + case ixgbe_sfp_type_da_act_lmt_core0: + case ixgbe_sfp_type_da_act_lmt_core1: + case ixgbe_sfp_type_1g_sx_core0: + case ixgbe_sfp_type_1g_sx_core1: + setup_linear = false; + break; + default: + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + ixgbe_init_mac_link_ops_X550em(hw); + hw->phy.ops.reset = NULL; + + /* The CS4227 slice address is the base address + the port-pair reg + * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0. + */ + reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12); + + if (setup_linear) + edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; + else + edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + + /* Configure CS4227 for connection type. */ + ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227, reg_slice, + edc_mode); + + if (ret_val) + ret_val = hw->phy.ops.write_i2c_combined(hw, 0x80, reg_slice, + edc_mode); + + return ret_val; +} + +/** ixgbe_get_link_capabilities_x550em - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + **/ +static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + /* SFP */ + if (hw->phy.media_type == ixgbe_media_type_fiber) { + /* CS4227 SFP must not enable auto-negotiation */ + *autoneg = false; + + if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + return 0; + } + + /* Link capabilities are based on SFP */ + if (hw->phy.multispeed_fiber) + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + else + *speed = IXGBE_LINK_SPEED_10GB_FULL; + } else { + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + } + return 0; +} + +/** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the + * IOSF device + * + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Data to write to the register + **/ +static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data) +{ + u32 i, command, error; + + command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | + (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + + /* Write IOSF control register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + + /* Write IOSF data register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data); + + /* Check every 10 usec to see if the address cycle completed. + * The SB IOSF BUSY bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usleep_range(10, 20); + + command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); + if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0) + break; + } + + if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { + error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> + IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + hw_dbg(hw, "Failed to write, error %x\n", error); + return IXGBE_ERR_PHY; + } + + if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { + hw_dbg(hw, "Write timed out\n"); + return IXGBE_ERR_PHY; + } + + return 0; +} + +/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. + * @hw: pointer to hardware structure + * @speed: the link speed to force + * + * Configures the integrated KR PHY to use iXFI mode. Used to connect an + * internal and external PHY at a specific speed, without autonegotiation. + **/ +static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) +{ + s32 status; + u32 reg_val; + + /* Disable AN and force speed to 10G Serial. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + + /* Select forced link speed for internal PHY. */ + switch (*speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + break; + default: + /* Other link speeds are not supported by internal KR PHY. */ + return IXGBE_ERR_LINK_SETUP; + } + + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + /* Disable training protocol FSM. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + /* Disable Flex from training TXFFE. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + /* Enable override for coefficients. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + /* Toggle port SW reset by AN reset. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + return status; +} + +/** ixgbe_setup_kx4_x550em - Configure the KX4 PHY. + * @hw: pointer to hardware structure + * + * Configures the integrated KX4 PHY. + **/ +static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) +{ + s32 status; + u32 reg_val; + + status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1, + IXGBE_SB_IOSF_TARGET_KX4_PCS0 + + hw->bus.lan_id, ®_val); + if (status) + return status; + + reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 | + IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX); + + reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE; + + /* Advertise 10G support. */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4; + + /* Advertise 1G support. */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX; + + /* Restart auto-negotiation. */ + reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART; + status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1, + IXGBE_SB_IOSF_TARGET_KX4_PCS0 + + hw->bus.lan_id, reg_val); + + return status; +} + +/** ixgbe_setup_kr_x550em - Configure the KR PHY. + * @hw: pointer to hardware structure + * + * Configures the integrated KR PHY. + **/ +static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) +{ + s32 status; + u32 reg_val; + + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ; + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC; + reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | + IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); + + /* Advertise 10G support. */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR; + + /* Advertise 1G support. */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; + + /* Restart auto-negotiation. */ + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + return status; +} + +/** ixgbe_setup_internal_phy_x550em - Configure integrated KR PHY + * @hw: point to hardware structure + * + * Configures the integrated KR PHY to talk to the external PHY. The base + * driver will call this function when it gets notification via interrupt from + * the external PHY. This function forces the internal PHY into iXFI mode at + * the correct speed. + * + * A return of a non-zero value indicates an error, and the base driver should + * not report link up. + **/ +static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw) +{ + u32 status; + u16 lasi, autoneg_status, speed; + ixgbe_link_speed force_speed; + + /* Verify that the external link status has changed */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_XENPAK_LASI_STATUS, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &lasi); + if (status) + return status; + + /* If there was no change in link status, we can just exit */ + if (!(lasi & IXGBE_XENPAK_LASI_LINK_STATUS_ALARM)) + return 0; + + /* we read this twice back to back to indicate current status */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (status) + return status; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (status) + return status; + + /* If link is not up return an error indicating treat link as down */ + if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) + return IXGBE_ERR_INVALID_LINK_SETTINGS; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &speed); + + /* clear everything but the speed and duplex bits */ + speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK; + + switch (speed) { + case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL: + force_speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL: + force_speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + default: + /* Internal PHY does not support anything else */ + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + return ixgbe_setup_ixfi_x550em(hw, &force_speed); +} + +/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + **/ +static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; + u32 esdp; + + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) { + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + + if (hw->bus.lan_id) { + esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); + esdp |= IXGBE_ESDP_SDP1_DIR; + } + esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + } + + /* Identify the PHY or SFP module */ + ret_val = phy->ops.identify(hw); + + /* Setup function pointers based on detected SFP module and speeds */ + ixgbe_init_mac_link_ops_X550em(hw); + if (phy->sfp_type != ixgbe_sfp_type_unknown) + phy->ops.reset = NULL; + + /* Set functions pointers based on phy type */ + switch (hw->phy.type) { + case ixgbe_phy_x550em_kx4: + phy->ops.setup_link = ixgbe_setup_kx4_x550em; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_x550em_kr: + phy->ops.setup_link = ixgbe_setup_kr_x550em; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_x550em_ext_t: + phy->ops.setup_internal_link = ixgbe_setup_internal_phy_x550em; + break; + default: + break; + } + return ret_val; +} + +/** ixgbe_get_media_type_X550em - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + * + */ +static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; + + /* Detect if there is a copper PHY attached. */ + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_KX4: + media_type = ixgbe_media_type_backplane; + break; + case IXGBE_DEV_ID_X550EM_X_SFP: + media_type = ixgbe_media_type_fiber; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + media_type = ixgbe_media_type_copper; + break; + default: + media_type = ixgbe_media_type_unknown; + break; + } + return media_type; +} + +/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY. + ** @hw: pointer to hardware structure + **/ +static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) +{ + u32 status; + u16 reg; + u32 retries = 2; + + do { + /* decrement retries counter and exit if we hit 0 */ + if (retries < 1) { + hw_dbg(hw, "External PHY not yet finished resetting."); + return IXGBE_ERR_PHY; + } + retries--; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_TX_VENDOR_ALARMS_3, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + ®); + if (status) + return status; + + /* Verify PHY FW reset has completed */ + } while ((reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) != 1); + + /* Set port to low power mode */ + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + /* Enable the transmitter */ + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + ®); + if (status) + return status; + + reg &= ~IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE; + + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + reg); + if (status) + return status; + + /* Un-stall the PHY FW */ + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + reg &= ~IXGBE_MDIO_POWER_UP_STALL; + + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + return status; +} + +/** ixgbe_reset_hw_X550em - Perform hardware reset + ** @hw: pointer to hardware structure + ** + ** Resets the hardware by resetting the transmit and receive units, masks + ** and clears all interrupts, perform a PHY reset, and perform a link (MAC) + ** reset. + **/ +static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) +{ + ixgbe_link_speed link_speed; + s32 status; + u32 ctrl = 0; + u32 i; + bool link_up = false; + + /* Call adapter stop to disable Tx/Rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status) + return status; + + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + + /* PHY ops must be identified and initialized prior to reset */ + + /* Identify PHY and related function pointers */ + status = hw->phy.ops.init(hw); + + /* start the external PHY */ + if (hw->phy.type == ixgbe_phy_x550em_ext_t) { + status = ixgbe_init_ext_t_x550em(hw); + if (status) + return status; + } + + /* Setup SFP module if there is one present. */ + if (hw->phy.sfp_setup_needed) { + status = hw->mac.ops.setup_sfp(hw); + hw->phy.sfp_setup_needed = false; + } + + /* Reset PHY */ + if (!hw->phy.reset_disable && hw->phy.ops.reset) + hw->phy.ops.reset(hw); + +mac_reset_top: + /* Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + ctrl = IXGBE_CTRL_LNK_RST; + + if (!hw->force_full_reset) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up) + ctrl = IXGBE_CTRL_RST; + } + + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear meaning reset is complete */ + for (i = 0; i < 10; i++) { + udelay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + } + + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; + hw_dbg(hw, "Reset polling failed to complete.\n"); + } + + msleep(50); + + /* Double resets are required for recovery from certain error + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + return status; +} + +/** ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype + * anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for Ethertype anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing + **/ +static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, + bool enable, int vf) +{ + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT; + u32 pfvfspoof; + + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); + if (enable) + pfvfspoof |= (1 << vf_target_shift); + else + pfvfspoof &= ~(1 << vf_target_shift); + + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); +} + +/** ixgbe_set_source_address_pruning_X550 - Enable/Disbale src address pruning + * @hw: pointer to hardware structure + * @enable: enable or disable source address pruning + * @pool: Rx pool to set source address pruning for + **/ +static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, + bool enable, + unsigned int pool) +{ + u64 pfflp; + + /* max rx pool is 63 */ + if (pool > 63) + return; + + pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL); + pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32; + + if (enable) + pfflp |= (1ULL << pool); + else + pfflp &= ~(1ULL << pool); + + IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp); + IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32)); +} + +#define X550_COMMON_MAC \ + .init_hw = &ixgbe_init_hw_generic, \ + .start_hw = &ixgbe_start_hw_X540, \ + .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, \ + .enable_rx_dma = &ixgbe_enable_rx_dma_generic, \ + .get_mac_addr = &ixgbe_get_mac_addr_generic, \ + .get_device_caps = &ixgbe_get_device_caps_generic, \ + .stop_adapter = &ixgbe_stop_adapter_generic, \ + .get_bus_info = &ixgbe_get_bus_info_generic, \ + .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, \ + .read_analog_reg8 = NULL, \ + .write_analog_reg8 = NULL, \ + .set_rxpba = &ixgbe_set_rxpba_generic, \ + .check_link = &ixgbe_check_mac_link_generic, \ + .led_on = &ixgbe_led_on_generic, \ + .led_off = &ixgbe_led_off_generic, \ + .blink_led_start = &ixgbe_blink_led_start_X540, \ + .blink_led_stop = &ixgbe_blink_led_stop_X540, \ + .set_rar = &ixgbe_set_rar_generic, \ + .clear_rar = &ixgbe_clear_rar_generic, \ + .set_vmdq = &ixgbe_set_vmdq_generic, \ + .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, \ + .clear_vmdq = &ixgbe_clear_vmdq_generic, \ + .init_rx_addrs = &ixgbe_init_rx_addrs_generic, \ + .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, \ + .enable_mc = &ixgbe_enable_mc_generic, \ + .disable_mc = &ixgbe_disable_mc_generic, \ + .clear_vfta = &ixgbe_clear_vfta_generic, \ + .set_vfta = &ixgbe_set_vfta_generic, \ + .fc_enable = &ixgbe_fc_enable_generic, \ + .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, \ + .init_uta_tables = &ixgbe_init_uta_tables_generic, \ + .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ + .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ + .set_source_address_pruning = \ + &ixgbe_set_source_address_pruning_X550, \ + .set_ethertype_anti_spoofing = \ + &ixgbe_set_ethertype_anti_spoofing_X550, \ + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \ + .release_swfw_sync = &ixgbe_release_swfw_sync_X540, \ + .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \ + .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \ + .get_thermal_sensor_data = NULL, \ + .init_thermal_sensor_thresh = NULL, \ + .prot_autoc_read = &prot_autoc_read_generic, \ + .prot_autoc_write = &prot_autoc_write_generic, \ + .enable_rx = &ixgbe_enable_rx_generic, \ + .disable_rx = &ixgbe_disable_rx_x550, \ + +static struct ixgbe_mac_operations mac_ops_X550 = { + X550_COMMON_MAC + .reset_hw = &ixgbe_reset_hw_X540, + .get_media_type = &ixgbe_get_media_type_X540, + .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, + .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, + .setup_link = &ixgbe_setup_mac_link_X540, + .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, + .setup_sfp = NULL, +}; + +static struct ixgbe_mac_operations mac_ops_X550EM_x = { + X550_COMMON_MAC + .reset_hw = &ixgbe_reset_hw_X550em, + .get_media_type = &ixgbe_get_media_type_X550em, + .get_san_mac_addr = NULL, + .get_wwn_prefix = NULL, + .setup_link = NULL, /* defined later */ + .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, + .setup_sfp = ixgbe_setup_sfp_modules_X550em, + +}; + +#define X550_COMMON_EEP \ + .read = &ixgbe_read_ee_hostif_X550, \ + .read_buffer = &ixgbe_read_ee_hostif_buffer_X550, \ + .write = &ixgbe_write_ee_hostif_X550, \ + .write_buffer = &ixgbe_write_ee_hostif_buffer_X550, \ + .validate_checksum = &ixgbe_validate_eeprom_checksum_X550, \ + .update_checksum = &ixgbe_update_eeprom_checksum_X550, \ + .calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \ + +static struct ixgbe_eeprom_operations eeprom_ops_X550 = { + X550_COMMON_EEP + .init_params = &ixgbe_init_eeprom_params_X550, +}; + +static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { + X550_COMMON_EEP + .init_params = &ixgbe_init_eeprom_params_X540, +}; + +#define X550_COMMON_PHY \ + .identify_sfp = &ixgbe_identify_module_generic, \ + .reset = NULL, \ + .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, \ + .read_i2c_byte = &ixgbe_read_i2c_byte_generic, \ + .write_i2c_byte = &ixgbe_write_i2c_byte_generic, \ + .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \ + .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ + .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ + .check_overtemp = &ixgbe_tn_check_overtemp, \ + .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, + +static struct ixgbe_phy_operations phy_ops_X550 = { + X550_COMMON_PHY + .init = NULL, + .identify = &ixgbe_identify_phy_generic, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, + .setup_link = &ixgbe_setup_phy_link_generic, + .read_i2c_combined = &ixgbe_read_i2c_combined_generic, + .write_i2c_combined = &ixgbe_write_i2c_combined_generic, +}; + +static struct ixgbe_phy_operations phy_ops_X550EM_x = { + X550_COMMON_PHY + .init = &ixgbe_init_phy_ops_X550em, + .identify = &ixgbe_identify_phy_x550em, + .read_reg = NULL, /* defined later */ + .write_reg = NULL, /* defined later */ + .setup_link = NULL, /* defined later */ +}; + +struct ixgbe_info ixgbe_X550_info = { + .mac = ixgbe_mac_X550, + .get_invariants = &ixgbe_get_invariants_X540, + .mac_ops = &mac_ops_X550, + .eeprom_ops = &eeprom_ops_X550, + .phy_ops = &phy_ops_X550, + .mbx_ops = &mbx_ops_generic, +}; + +struct ixgbe_info ixgbe_X550EM_x_info = { + .mac = ixgbe_mac_X550EM_x, + .get_invariants = &ixgbe_get_invariants_X540, + .mac_ops = &mac_ops_X550EM_x, + .eeprom_ops = &eeprom_ops_X550EM_x, + .phy_ops = &phy_ops_X550EM_x, + .mbx_ops = &mbx_ops_generic, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile new file mode 100644 index 000000000..4ce4c97ef --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/Makefile @@ -0,0 +1,38 @@ +################################################################################ +# +# Intel 82599 Virtual Function driver +# Copyright(c) 1999 - 2012 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) 82599 VF ethernet driver +# + +obj-$(CONFIG_IXGBEVF) += ixgbevf.o + +ixgbevf-objs := vf.o \ + mbx.o \ + ethtool.o \ + ixgbevf_main.o + diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h new file mode 100644 index 000000000..770e21a64 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -0,0 +1,299 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBEVF_DEFINES_H_ +#define _IXGBEVF_DEFINES_H_ + +/* Device IDs */ +#define IXGBE_DEV_ID_82599_VF 0x10ED +#define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_X550_VF 0x1565 +#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 + +#define IXGBE_VF_IRQ_CLEAR_MASK 7 +#define IXGBE_VF_MAX_TX_QUEUES 8 +#define IXGBE_VF_MAX_RX_QUEUES 8 + +/* DCB define */ +#define IXGBE_VF_MAX_TRAFFIC_CLASS 8 + +/* Link speed */ +typedef u32 ixgbe_link_speed; +#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 +#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 +#define IXGBE_LINK_SPEED_100_FULL 0x0008 + +#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ +#define IXGBE_LINKS_UP 0x40000000 +#define IXGBE_LINKS_SPEED_82599 0x30000000 +#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 +#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 +#define IXGBE_LINKS_SPEED_100_82599 0x10000000 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 + +/* Interrupt Vector Allocation Registers */ +#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ + +#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +/* Receive Config masks */ +#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ +#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ +#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ +#define IXGBE_RXDCTL_RLPML_EN 0x00008000 + +/* DCA Control */ +#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ + +/* PSRTYPE bit definitions */ +#define IXGBE_PSRTYPE_TCPHDR 0x00000010 +#define IXGBE_PSRTYPE_UDPHDR 0x00000020 +#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 +#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 +#define IXGBE_PSRTYPE_L2HDR 0x00001000 + +/* SRRCTL bit definitions */ +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ +#define IXGBE_SRRCTL_RDMTS_SHIFT 22 +#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 +#define IXGBE_SRRCTL_DROP_EN 0x10000000 +#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 + +/* Receive Descriptor bit definitions */ +#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ +#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ +#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ +#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ +#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ +#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ +#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ +#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ +#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ +#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ +#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ +#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ +#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ +#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ +#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */ +#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ +#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ +#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ +#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ +#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ +#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ +#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ +#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ +#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define IXGBE_RXD_PRI_SHIFT 13 +#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define IXGBE_RXD_CFI_SHIFT 12 + +#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ +#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ +#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ +#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ +#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */ +#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ +#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ +#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ +#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ + +#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F +#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 +#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 +#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 +#define IXGBE_RXDADV_RSCCNT_SHIFT 17 +#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 +#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 +#define IXGBE_RXDADV_SPH 0x8000 + +#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXD_ERR_CE | \ + IXGBE_RXD_ERR_LE | \ + IXGBE_RXD_ERR_PE | \ + IXGBE_RXD_ERR_OSE | \ + IXGBE_RXD_ERR_USE) + +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXDADV_ERR_CE | \ + IXGBE_RXDADV_ERR_LE | \ + IXGBE_RXDADV_ERR_PE | \ + IXGBE_RXDADV_ERR_OSE | \ + IXGBE_RXDADV_ERR_USE) + +#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ +#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor ext (0 = legacy) */ +#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS) + +/* Transmit Descriptor - Advanced */ +union ixgbe_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Receive Descriptor - Advanced */ +union ixgbe_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Context descriptors */ +struct ixgbe_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ +#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ +#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ +#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ +#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ +#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ +#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ +#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ +#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +/* Interrupt register bitmasks */ + +#define IXGBE_EITR_CNT_WDIS 0x80000000 +#define IXGBE_MAX_EITR 0x00000FF8 +#define IXGBE_MIN_EITR 8 + +/* Error Codes */ +#define IXGBE_ERR_INVALID_MAC_ADDR -1 +#define IXGBE_ERR_RESET_FAILED -2 +#define IXGBE_ERR_INVALID_ARGUMENT -3 + +/* Transmit Config masks */ +#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */ +#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ + +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */ +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */ +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */ +#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ +#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ + +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ + +#endif /* _IXGBEVF_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c new file mode 100644 index 000000000..b2f5b161d --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -0,0 +1,888 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for ixgbevf */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbevf.h" + +#define IXGBE_ALL_RAR_ENTRIES 16 + +struct ixgbe_stats { + char stat_string[ETH_GSTRING_LEN]; + struct { + int sizeof_stat; + int stat_offset; + int base_stat_offset; + int saved_reset_offset; + }; +}; + +#define IXGBEVF_STAT(m, b, r) { \ + .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \ + .stat_offset = offsetof(struct ixgbevf_adapter, m), \ + .base_stat_offset = offsetof(struct ixgbevf_adapter, b), \ + .saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \ +} + +#define IXGBEVF_ZSTAT(m) { \ + .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \ + .stat_offset = offsetof(struct ixgbevf_adapter, m), \ + .base_stat_offset = -1, \ + .saved_reset_offset = -1 \ +} + +static const struct ixgbe_stats ixgbe_gstrings_stats[] = { + {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc, + stats.saved_reset_vfgprc)}, + {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc, + stats.saved_reset_vfgptc)}, + {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc, + stats.saved_reset_vfgorc)}, + {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc, + stats.saved_reset_vfgotc)}, + {"tx_busy", IXGBEVF_ZSTAT(tx_busy)}, + {"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)}, + {"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)}, + {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc, + stats.saved_reset_vfmprc)}, + {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)}, +#ifdef BP_EXTENDED_STATS + {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)}, + {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)}, + {"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)}, + {"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)}, + {"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)}, + {"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)}, +#endif +}; + +#define IXGBE_QUEUE_STATS_LEN 0 +#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) + +#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) +static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", + "Link test (on/offline)" +}; + +#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) + +static int ixgbevf_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 link_speed = 0; + bool link_up; + + ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->autoneg = AUTONEG_DISABLE; + ecmd->transceiver = XCVR_DUMMY1; + ecmd->port = -1; + + hw->mac.get_link_status = 1; + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + + if (link_up) { + __u32 speed = SPEED_10000; + + switch (link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + speed = SPEED_10000; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + speed = SPEED_1000; + break; + case IXGBE_LINK_SPEED_100_FULL: + speed = SPEED_100; + break; + } + + ethtool_cmd_speed_set(ecmd, speed); + ecmd->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } + + return 0; +} + +static u32 ixgbevf_get_msglevel(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_) + +static int ixgbevf_get_regs_len(struct net_device *netdev) +{ +#define IXGBE_REGS_LEN 45 + return IXGBE_REGS_LEN * sizeof(u32); +} + +static void ixgbevf_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, + void *p) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u32 regs_len = ixgbevf_get_regs_len(netdev); + u8 i; + + memset(p, 0, regs_len); + + regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; + + /* General Registers */ + regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL); + regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS); + regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP); + regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER); + + /* Interrupt */ + /* don't read EICR because it can clear interrupt causes, instead + * read EICS which is a shadow but doesn't clear EICR + */ + regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS); + regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS); + regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS); + regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC); + regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC); + regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM); + regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0)); + regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0)); + regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); + + /* Receive DMA */ + for (i = 0; i < 2; i++) + regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i)); + for (i = 0; i < 2; i++) + regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i)); + for (i = 0; i < 2; i++) + regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i)); + for (i = 0; i < 2; i++) + regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i)); + for (i = 0; i < 2; i++) + regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i)); + for (i = 0; i < 2; i++) + regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); + for (i = 0; i < 2; i++) + regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i)); + + /* Receive */ + regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE); + + /* Transmit */ + for (i = 0; i < 2; i++) + regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i)); + for (i = 0; i < 2; i++) + regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i)); + for (i = 0; i < 2; i++) + regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i)); + for (i = 0; i < 2; i++) + regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i)); + for (i = 0; i < 2; i++) + regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i)); + for (i = 0; i < 2; i++) + regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); + for (i = 0; i < 2; i++) + regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i)); + for (i = 0; i < 2; i++) + regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i)); +} + +static void ixgbevf_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, ixgbevf_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); +} + +static void ixgbevf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IXGBEVF_MAX_RXD; + ring->tx_max_pending = IXGBEVF_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int ixgbevf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; + u32 new_rx_count, new_tx_count; + int i, err = 0; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD); + new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + + new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD); + new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + + /* if nothing to do return success */ + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) + return 0; + + while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + if (new_tx_count != adapter->tx_ring_count) { + tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring)); + if (!tx_ring) { + err = -ENOMEM; + goto clear_reset; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + /* clone ring and setup updated count */ + tx_ring[i] = *adapter->tx_ring[i]; + tx_ring[i].count = new_tx_count; + err = ixgbevf_setup_tx_resources(&tx_ring[i]); + if (err) { + while (i) { + i--; + ixgbevf_free_tx_resources(&tx_ring[i]); + } + + vfree(tx_ring); + tx_ring = NULL; + + goto clear_reset; + } + } + } + + if (new_rx_count != adapter->rx_ring_count) { + rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring)); + if (!rx_ring) { + err = -ENOMEM; + goto clear_reset; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + /* clone ring and setup updated count */ + rx_ring[i] = *adapter->rx_ring[i]; + rx_ring[i].count = new_rx_count; + err = ixgbevf_setup_rx_resources(&rx_ring[i]); + if (err) { + while (i) { + i--; + ixgbevf_free_rx_resources(&rx_ring[i]); + } + + vfree(rx_ring); + rx_ring = NULL; + + goto clear_reset; + } + } + } + + /* bring interface down to prepare for update */ + ixgbevf_down(adapter); + + /* Tx */ + if (tx_ring) { + for (i = 0; i < adapter->num_tx_queues; i++) { + ixgbevf_free_tx_resources(adapter->tx_ring[i]); + *adapter->tx_ring[i] = tx_ring[i]; + } + adapter->tx_ring_count = new_tx_count; + + vfree(tx_ring); + tx_ring = NULL; + } + + /* Rx */ + if (rx_ring) { + for (i = 0; i < adapter->num_rx_queues; i++) { + ixgbevf_free_rx_resources(adapter->rx_ring[i]); + *adapter->rx_ring[i] = rx_ring[i]; + } + adapter->rx_ring_count = new_rx_count; + + vfree(rx_ring); + rx_ring = NULL; + } + + /* restore interface using new values */ + ixgbevf_up(adapter); + +clear_reset: + /* free Tx resources if Rx error is encountered */ + if (tx_ring) { + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbevf_free_tx_resources(&tx_ring[i]); + vfree(tx_ring); + } + + clear_bit(__IXGBEVF_RESETTING, &adapter->state); + return err; +} + +static int ixgbevf_get_sset_count(struct net_device *dev, int stringset) +{ + switch (stringset) { + case ETH_SS_TEST: + return IXGBE_TEST_LEN; + case ETH_SS_STATS: + return IXGBE_GLOBAL_STATS_LEN; + default: + return -EINVAL; + } +} + +static void ixgbevf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + char *base = (char *)adapter; + int i; +#ifdef BP_EXTENDED_STATS + u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0, + tx_yields = 0, tx_cleaned = 0, tx_missed = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_yields += adapter->rx_ring[i]->stats.yields; + rx_cleaned += adapter->rx_ring[i]->stats.cleaned; + rx_yields += adapter->rx_ring[i]->stats.yields; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + tx_yields += adapter->tx_ring[i]->stats.yields; + tx_cleaned += adapter->tx_ring[i]->stats.cleaned; + tx_yields += adapter->tx_ring[i]->stats.yields; + } + + adapter->bp_rx_yields = rx_yields; + adapter->bp_rx_cleaned = rx_cleaned; + adapter->bp_rx_missed = rx_missed; + + adapter->bp_tx_yields = tx_yields; + adapter->bp_tx_cleaned = tx_cleaned; + adapter->bp_tx_missed = tx_missed; +#endif + + ixgbevf_update_stats(adapter); + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { + char *p = base + ixgbe_gstrings_stats[i].stat_offset; + char *b = base + ixgbe_gstrings_stats[i].base_stat_offset; + char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset; + + if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) { + if (ixgbe_gstrings_stats[i].base_stat_offset >= 0) + data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r; + else + data[i] = *(u64 *)p; + } else { + if (ixgbe_gstrings_stats[i].base_stat_offset >= 0) + data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r; + else + data[i] = *(u32 *)p; + } + } +} + +static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + char *p = (char *)data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *ixgbe_gstrings_test, + IXGBE_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { + memcpy(p, ixgbe_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data) +{ + struct ixgbe_hw *hw = &adapter->hw; + bool link_up; + u32 link_speed = 0; + *data = 0; + + hw->mac.ops.check_link(hw, &link_speed, &link_up, true); + if (!link_up) + *data = 1; + + return *data; +} + +/* ethtool register test data */ +struct ixgbevf_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* default VF register test */ +static const struct ixgbevf_reg_test reg_test_vf[] = { + { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE }, + { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 }, + { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, + { .reg = 0 } +}; + +static const u32 register_test_patterns[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF +}; + +static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + u32 pat, val, before; + + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { + before = ixgbevf_read_reg(&adapter->hw, reg); + ixgbe_write_reg(&adapter->hw, reg, + register_test_patterns[pat] & write); + val = ixgbevf_read_reg(&adapter->hw, reg); + if (val != (register_test_patterns[pat] & write & mask)) { + hw_dbg(&adapter->hw, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg, val, + register_test_patterns[pat] & write & mask); + *data = reg; + ixgbe_write_reg(&adapter->hw, reg, before); + return true; + } + ixgbe_write_reg(&adapter->hw, reg, before); + } + return false; +} + +static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + u32 val, before; + + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; + return true; + } + before = ixgbevf_read_reg(&adapter->hw, reg); + ixgbe_write_reg(&adapter->hw, reg, write & mask); + val = ixgbevf_read_reg(&adapter->hw, reg); + if ((write & mask) != (val & mask)) { + pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + reg, (val & mask), write & mask); + *data = reg; + ixgbe_write_reg(&adapter->hw, reg, before); + return true; + } + ixgbe_write_reg(&adapter->hw, reg, before); + return false; +} + +static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) +{ + const struct ixgbevf_reg_test *test; + u32 i; + + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { + dev_err(&adapter->pdev->dev, + "Adapter removed - register test blocked\n"); + *data = 1; + return 1; + } + test = reg_test_vf; + + /* Perform the register test, looping through the test table + * until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + bool b = false; + + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; + case WRITE_NO_TEST: + ixgbe_write_reg(&adapter->hw, + test->reg + (i * 0x40), + test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, + test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, + test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + test->reg + 4 + (i * 8), + test->mask, + test->write); + break; + } + if (b) + return 1; + } + test++; + } + + *data = 0; + return *data; +} + +static void ixgbevf_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + + if (IXGBE_REMOVED(adapter->hw.hw_addr)) { + dev_err(&adapter->pdev->dev, + "Adapter removed - test blocked\n"); + data[0] = 1; + data[1] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + set_bit(__IXGBEVF_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + hw_dbg(&adapter->hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (ixgbevf_link_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + ixgbevf_reset(adapter); + + hw_dbg(&adapter->hw, "register testing starting\n"); + if (ixgbevf_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + ixgbevf_reset(adapter); + + clear_bit(__IXGBEVF_TESTING, &adapter->state); + if (if_running) + dev_open(netdev); + } else { + hw_dbg(&adapter->hw, "online testing starting\n"); + /* Online tests */ + if (ixgbevf_link_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + + clear_bit(__IXGBEVF_TESTING, &adapter->state); + } + msleep_interruptible(4 * 1000); +} + +static int ixgbevf_nway_reset(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + ixgbevf_reinit_locked(adapter); + + return 0; +} + +static int ixgbevf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + /* only valid if in constant ITR mode */ + if (adapter->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + /* if in mixed Tx/Rx queues per vector mode, report only Rx settings */ + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (adapter->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + + return 0; +} + +static int ixgbevf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_q_vector *q_vector; + int num_vectors, i; + u16 tx_itr_param, rx_itr_param; + + /* don't accept Tx specific changes if we've got mixed RxTx vectors */ + if (adapter->q_vector[0]->tx.count && + adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs) + return -EINVAL; + + if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || + (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) + return -EINVAL; + + if (ec->rx_coalesce_usecs > 1) + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + + if (adapter->rx_itr_setting == 1) + rx_itr_param = IXGBE_20K_ITR; + else + rx_itr_param = adapter->rx_itr_setting; + + if (ec->tx_coalesce_usecs > 1) + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + + if (adapter->tx_itr_setting == 1) + tx_itr_param = IXGBE_10K_ITR; + else + tx_itr_param = adapter->tx_itr_setting; + + num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + for (i = 0; i < num_vectors; i++) { + q_vector = adapter->q_vector[i]; + if (q_vector->tx.count && !q_vector->rx.count) + /* Tx only */ + q_vector->itr = tx_itr_param; + else + /* Rx only or mixed */ + q_vector->itr = rx_itr_param; + ixgbevf_write_eitr(q_vector); + } + + return 0; +} + +static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + u32 *rules __always_unused) +{ + struct ixgbevf_adapter *adapter = netdev_priv(dev); + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = adapter->num_rx_queues; + return 0; + default: + hw_dbg(&adapter->hw, "Command parameters not supported\n"); + return -EOPNOTSUPP; + } +} + +static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + /* We support this operation only for 82599 and x540 at the moment */ + if (adapter->hw.mac.type < ixgbe_mac_X550_vf) + return IXGBEVF_82599_RETA_SIZE; + + return 0; +} + +static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + /* We support this operation only for 82599 and x540 at the moment */ + if (adapter->hw.mac.type < ixgbe_mac_X550_vf) + return IXGBEVF_RSS_HASH_KEY_SIZE; + + return 0; +} + +static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + int err = 0; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + /* If neither indirection table nor hash key was requested - just + * return a success avoiding taking any locks. + */ + if (!indir && !key) + return 0; + + spin_lock_bh(&adapter->mbx_lock); + if (indir) + err = ixgbevf_get_reta_locked(&adapter->hw, indir, + adapter->num_rx_queues); + + if (!err && key) + err = ixgbevf_get_rss_key_locked(&adapter->hw, key); + + spin_unlock_bh(&adapter->mbx_lock); + + return err; +} + +static const struct ethtool_ops ixgbevf_ethtool_ops = { + .get_settings = ixgbevf_get_settings, + .get_drvinfo = ixgbevf_get_drvinfo, + .get_regs_len = ixgbevf_get_regs_len, + .get_regs = ixgbevf_get_regs, + .nway_reset = ixgbevf_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = ixgbevf_get_ringparam, + .set_ringparam = ixgbevf_set_ringparam, + .get_msglevel = ixgbevf_get_msglevel, + .set_msglevel = ixgbevf_set_msglevel, + .self_test = ixgbevf_diag_test, + .get_sset_count = ixgbevf_get_sset_count, + .get_strings = ixgbevf_get_strings, + .get_ethtool_stats = ixgbevf_get_ethtool_stats, + .get_coalesce = ixgbevf_get_coalesce, + .set_coalesce = ixgbevf_set_coalesce, + .get_rxnfc = ixgbevf_get_rxnfc, + .get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size, + .get_rxfh_key_size = ixgbevf_get_rxfh_key_size, + .get_rxfh = ixgbevf_get_rxfh, +}; + +void ixgbevf_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ixgbevf_ethtool_ops; +} diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h new file mode 100644 index 000000000..775d08900 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -0,0 +1,504 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBEVF_H_ +#define _IXGBEVF_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "vf.h" + +#ifdef CONFIG_NET_RX_BUSY_POLL +#include +#define BP_EXTENDED_STATS +#endif + +#define IXGBE_MAX_TXD_PWR 14 +#define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct ixgbevf_tx_buffer { + union ixgbe_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + struct sk_buff *skb; + unsigned int bytecount; + unsigned short gso_segs; + __be16 protocol; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct ixgbevf_rx_buffer { + dma_addr_t dma; + struct page *page; + unsigned int page_offset; +}; + +struct ixgbevf_stats { + u64 packets; + u64 bytes; +#ifdef BP_EXTENDED_STATS + u64 yields; + u64 misses; + u64 cleaned; +#endif +}; + +struct ixgbevf_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct ixgbevf_rx_queue_stats { + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 csum_err; +}; + +enum ixgbevf_ring_state_t { + __IXGBEVF_TX_DETECT_HANG, + __IXGBEVF_HANG_CHECK_ARMED, +}; + +#define check_for_tx_hang(ring) \ + test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) + +struct ixgbevf_ring { + struct ixgbevf_ring *next; + struct net_device *netdev; + struct device *dev; + void *desc; /* descriptor ring memory */ + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + u16 count; /* amount of descriptors */ + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; + + union { + struct ixgbevf_tx_buffer *tx_buffer_info; + struct ixgbevf_rx_buffer *rx_buffer_info; + }; + unsigned long state; + struct ixgbevf_stats stats; + struct u64_stats_sync syncp; + union { + struct ixgbevf_tx_queue_stats tx_stats; + struct ixgbevf_rx_queue_stats rx_stats; + }; + + u64 hw_csum_rx_error; + u8 __iomem *tail; + struct sk_buff *skb; + + /* holds the special value that gets the hardware register offset + * associated with this ring, which is different for DCB and RSS modes + */ + u16 reg_idx; + int queue_index; /* needed for multiqueue queue management */ +}; + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES +#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES +#define IXGBEVF_MAX_RSS_QUEUES 2 +#define IXGBEVF_82599_RETA_SIZE 128 +#define IXGBEVF_RSS_HASH_KEY_SIZE 40 + +#define IXGBEVF_DEFAULT_TXD 1024 +#define IXGBEVF_DEFAULT_RXD 512 +#define IXGBEVF_MAX_TXD 4096 +#define IXGBEVF_MIN_TXD 64 +#define IXGBEVF_MAX_RXD 4096 +#define IXGBEVF_MIN_RXD 64 + +/* Supported Rx Buffer Sizes */ +#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ +#define IXGBEVF_RXBUFFER_2048 2048 + +#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 +#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048 + +#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) + +#define IXGBE_TX_FLAGS_CSUM (u32)(1) +#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) +#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) +#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) +#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 +#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 + +struct ixgbevf_ring_container { + struct ixgbevf_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +/* iterator for handling rings in ring container */ +#define ixgbevf_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + +/* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct ixgbevf_q_vector { + struct ixgbevf_adapter *adapter; + /* index of q_vector within array, also used for finding the bit in + * EICR and friends that represents the vector for this ring + */ + u16 v_idx; + u16 itr; /* Interrupt throttle rate written to EITR */ + struct napi_struct napi; + struct ixgbevf_ring_container rx, tx; + char name[IFNAMSIZ + 9]; +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int state; +#define IXGBEVF_QV_STATE_IDLE 0 +#define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */ +#define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */ +#define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */ +#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL) +#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED) +#define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */ +#define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */ +#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | \ + IXGBEVF_QV_STATE_POLL_YIELD) +#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | \ + IXGBEVF_QV_STATE_POLL_YIELD) + spinlock_t lock; +#endif /* CONFIG_NET_RX_BUSY_POLL */ +}; + +#ifdef CONFIG_NET_RX_BUSY_POLL +static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) +{ + spin_lock_init(&q_vector->lock); + q_vector->state = IXGBEVF_QV_STATE_IDLE; +} + +/* called from the device poll routine to get ownership of a q_vector */ +static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) +{ + int rc = true; + + spin_lock_bh(&q_vector->lock); + if (q_vector->state & IXGBEVF_QV_LOCKED) { + WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI); + q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD; + rc = false; +#ifdef BP_EXTENDED_STATS + q_vector->tx.ring->stats.yields++; +#endif + } else { + /* we don't care if someone yielded */ + q_vector->state = IXGBEVF_QV_STATE_NAPI; + } + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* returns true is someone tried to get the qv while napi had it */ +static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) +{ + int rc = false; + + spin_lock_bh(&q_vector->lock); + WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL | + IXGBEVF_QV_STATE_NAPI_YIELD)); + + if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD) + rc = true; + /* reset state to idle, unless QV is disabled */ + q_vector->state &= IXGBEVF_QV_STATE_DISABLED; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* called from ixgbevf_low_latency_poll() */ +static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) +{ + int rc = true; + + spin_lock_bh(&q_vector->lock); + if ((q_vector->state & IXGBEVF_QV_LOCKED)) { + q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; + rc = false; +#ifdef BP_EXTENDED_STATS + q_vector->rx.ring->stats.yields++; +#endif + } else { + /* preserve yield marks */ + q_vector->state |= IXGBEVF_QV_STATE_POLL; + } + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* returns true if someone tried to get the qv while it was locked */ +static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector) +{ + int rc = false; + + spin_lock_bh(&q_vector->lock); + WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI)); + + if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD) + rc = true; + /* reset state to idle, unless QV is disabled */ + q_vector->state &= IXGBEVF_QV_STATE_DISABLED; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector) +{ + WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED)); + return q_vector->state & IXGBEVF_QV_USER_PEND; +} + +/* false if QV is currently owned */ +static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) +{ + int rc = true; + + spin_lock_bh(&q_vector->lock); + if (q_vector->state & IXGBEVF_QV_OWNED) + rc = false; + q_vector->state |= IXGBEVF_QV_STATE_DISABLED; + spin_unlock_bh(&q_vector->lock); + return rc; +} + +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +/* microsecond values for various ITR rates shifted by 2 to fit itr register + * with the first 3 bits reserved 0 + */ +#define IXGBE_MIN_RSC_ITR 24 +#define IXGBE_100K_ITR 40 +#define IXGBE_20K_ITR 200 +#define IXGBE_10K_ITR 400 +#define IXGBE_8K_ITR 500 + +/* Helper macros to switch between ints/sec and what the register uses. + * And yes, it's the same math going both ways. The lowest value + * supported by all of the ixgbe hardware is 8. + */ +#define EITR_INTS_PER_SEC_TO_REG(_eitr) \ + ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) +#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG + +/* ixgbevf_test_staterr - tests bits in Rx descriptor status and error fields */ +static inline __le32 ixgbevf_test_staterr(union ixgbe_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value) +{ + writel(value, ring->tail); +} + +#define IXGBEVF_RX_DESC(R, i) \ + (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) +#define IXGBEVF_TX_DESC(R, i) \ + (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) +#define IXGBEVF_TX_CTXTDESC(R, i) \ + (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) + +#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ + +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR) + +#define MAX_MSIX_Q_VECTORS 2 + +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) + +/* board specific private data structure */ +struct ixgbevf_adapter { + /* this field must be first, see ixgbevf_process_skb_fields */ + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + + /* Interrupt Throttle Rate */ + u16 rx_itr_setting; + u16 tx_itr_setting; + + /* interrupt masks */ + u32 eims_enable_mask; + u32 eims_other; + + /* TX */ + int num_tx_queues; + struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */ + u64 restart_queue; + u32 tx_timeout_count; + + /* RX */ + int num_rx_queues; + struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */ + u64 hw_csum_rx_error; + u64 hw_rx_no_dma_resources; + int num_msix_vectors; + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + + /* Some features need tri-state capability, + * thus the additional *_CAPABLE flags. + */ + u32 flags; +#define IXGBEVF_FLAG_RESET_REQUESTED (u32)(1) +#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2) + + struct msix_entry *msix_entries; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in ixgbe_vf.h */ + struct ixgbe_hw hw; + u16 msg_enable; + /* Interrupt Throttle Rate */ + u32 eitr_param; + + struct ixgbevf_hw_stats stats; + + unsigned long state; + u64 tx_busy; + unsigned int tx_ring_count; + unsigned int rx_ring_count; + +#ifdef BP_EXTENDED_STATS + u64 bp_rx_yields; + u64 bp_rx_cleaned; + u64 bp_rx_missed; + + u64 bp_tx_yields; + u64 bp_tx_cleaned; + u64 bp_tx_missed; +#endif + + u8 __iomem *io_addr; /* Mainly for iounmap use */ + u32 link_speed; + bool link_up; + + struct timer_list service_timer; + struct work_struct service_task; + + spinlock_t mbx_lock; + unsigned long last_reset; +}; + +enum ixbgevf_state_t { + __IXGBEVF_TESTING, + __IXGBEVF_RESETTING, + __IXGBEVF_DOWN, + __IXGBEVF_DISABLED, + __IXGBEVF_REMOVING, + __IXGBEVF_SERVICE_SCHED, + __IXGBEVF_SERVICE_INITED, +}; + +enum ixgbevf_boards { + board_82599_vf, + board_X540_vf, + board_X550_vf, + board_X550EM_x_vf, +}; + +extern const struct ixgbevf_info ixgbevf_82599_vf_info; +extern const struct ixgbevf_info ixgbevf_X540_vf_info; +extern const struct ixgbevf_info ixgbevf_X550_vf_info; +extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info; +extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; + +/* needed by ethtool.c */ +extern const char ixgbevf_driver_name[]; +extern const char ixgbevf_driver_version[]; + +void ixgbevf_up(struct ixgbevf_adapter *adapter); +void ixgbevf_down(struct ixgbevf_adapter *adapter); +void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); +void ixgbevf_reset(struct ixgbevf_adapter *adapter); +void ixgbevf_set_ethtool_ops(struct net_device *netdev); +int ixgbevf_setup_rx_resources(struct ixgbevf_ring *); +int ixgbevf_setup_tx_resources(struct ixgbevf_ring *); +void ixgbevf_free_rx_resources(struct ixgbevf_ring *); +void ixgbevf_free_tx_resources(struct ixgbevf_ring *); +void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); +int ethtool_ioctl(struct ifreq *ifr); + +extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector); + +void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); +void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); + +#ifdef DEBUG +char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw); +#define hw_dbg(hw, format, arg...) \ + printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg) +#else +#define hw_dbg(hw, format, arg...) do {} while (0) +#endif + +#endif /* _IXGBEVF_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c new file mode 100644 index 000000000..e71cdde9c --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -0,0 +1,4277 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/****************************************************************************** + Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code +******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbevf.h" + +const char ixgbevf_driver_name[] = "ixgbevf"; +static const char ixgbevf_driver_string[] = + "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; + +#define DRV_VERSION "2.12.1-k" +const char ixgbevf_driver_version[] = DRV_VERSION; +static char ixgbevf_copyright[] = + "Copyright (c) 2009 - 2012 Intel Corporation."; + +static const struct ixgbevf_info *ixgbevf_info_tbl[] = { + [board_82599_vf] = &ixgbevf_82599_vf_info, + [board_X540_vf] = &ixgbevf_X540_vf_info, + [board_X550_vf] = &ixgbevf_X550_vf_info, + [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, +}; + +/* ixgbevf_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id ixgbevf_pci_tbl[] = { + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf }, + /* required last entry */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter) +{ + if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && + !test_bit(__IXGBEVF_REMOVING, &adapter->state) && + !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)) + schedule_work(&adapter->service_task); +} + +static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter) +{ + BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)); + + /* flush memory to make sure state is correct before next watchdog */ + smp_mb__before_atomic(); + clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state); +} + +/* forward decls */ +static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter); +static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); +static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); + +static void ixgbevf_remove_adapter(struct ixgbe_hw *hw) +{ + struct ixgbevf_adapter *adapter = hw->back; + + if (!hw->hw_addr) + return; + hw->hw_addr = NULL; + dev_err(&adapter->pdev->dev, "Adapter removed\n"); + if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state)) + ixgbevf_service_event_schedule(adapter); +} + +static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) +{ + u32 value; + + /* The following check not only optimizes a bit by not + * performing a read on the status register when the + * register just read was a status register read that + * returned IXGBE_FAILED_READ_REG. It also blocks any + * potential recursion. + */ + if (reg == IXGBE_VFSTATUS) { + ixgbevf_remove_adapter(hw); + return; + } + value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS); + if (value == IXGBE_FAILED_READ_REG) + ixgbevf_remove_adapter(hw); +} + +u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) +{ + u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u32 value; + + if (IXGBE_REMOVED(reg_addr)) + return IXGBE_FAILED_READ_REG; + value = readl(reg_addr + reg); + if (unlikely(value == IXGBE_FAILED_READ_REG)) + ixgbevf_check_remove(hw, reg); + return value; +} + +/** + * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors + * @adapter: pointer to adapter struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + **/ +static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, + u8 queue, u8 msix_vector) +{ + u32 ivar, index; + struct ixgbe_hw *hw = &adapter->hw; + + if (direction == -1) { + /* other causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); + ivar &= ~0xFF; + ivar |= msix_vector; + IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); + } else { + /* Tx or Rx causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + index = ((16 * (queue & 1)) + (8 * direction)); + ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); + ivar &= ~(0xFF << index); + ivar |= (msix_vector << index); + IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); + } +} + +static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, + struct ixgbevf_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ +} + +static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring) +{ + return ring->stats.packets; +} + +static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring) +{ + struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev); + struct ixgbe_hw *hw = &adapter->hw; + + u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx)); + u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx)); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; +} + +static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring) +{ + u32 tx_done = ixgbevf_get_tx_completed(tx_ring); + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; + u32 tx_pending = ixgbevf_get_tx_pending(tx_ring); + + clear_check_for_tx_hang(tx_ring); + + /* Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. + */ + if ((tx_done_old == tx_done) && tx_pending) { + /* make sure it is true for two checks in a row */ + return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED, + &tx_ring->state); + } + /* reset the countdown */ + clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state); + + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + + return false; +} + +static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter) +{ + /* Do the reset outside of interrupt context */ + if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { + adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED; + ixgbevf_service_event_schedule(adapter); + } +} + +/** + * ixgbevf_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void ixgbevf_tx_timeout(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + ixgbevf_tx_timeout_reset(adapter); +} + +/** + * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: board private structure + * @tx_ring: tx ring to clean + **/ +static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, + struct ixgbevf_ring *tx_ring) +{ + struct ixgbevf_adapter *adapter = q_vector->adapter; + struct ixgbevf_tx_buffer *tx_buffer; + union ixgbe_adv_tx_desc *tx_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = tx_ring->count / 2; + unsigned int i = tx_ring->next_to_clean; + + if (test_bit(__IXGBEVF_DOWN, &adapter->state)) + return true; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = IXGBEVF_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + + /* if DD is not set pending work has not been completed */ + if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + dev_kfree_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) { + struct ixgbe_hw *hw = &adapter->hw; + union ixgbe_adv_tx_desc *eop_desc; + + eop_desc = tx_ring->tx_buffer_info[i].next_to_watch; + + pr_err("Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH, TDT <%x>, <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "tx_buffer_info[next_to_clean]\n" + " next_to_watch <%p>\n" + " eop_desc->wb.status <%x>\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", + tx_ring->queue_index, + IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)), + IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)), + tx_ring->next_to_use, i, + eop_desc, (eop_desc ? eop_desc->wb.status : 0), + tx_ring->tx_buffer_info[i].time_stamp, jiffies); + + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* schedule immediate reset if we believe we hung */ + ixgbevf_tx_timeout_reset(adapter); + + return true; + } + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !test_bit(__IXGBEVF_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + + return !!budget; +} + +/** + * ixgbevf_rx_skb - Helper function to determine proper Rx method + * @q_vector: structure containing interrupt and ring information + * @skb: packet to send up + **/ +static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, + struct sk_buff *skb) +{ +#ifdef CONFIG_NET_RX_BUSY_POLL + skb_mark_napi_id(skb, &q_vector->napi); + + if (ixgbevf_qv_busy_polling(q_vector)) { + netif_receive_skb(skb); + /* exit early if we busy polled */ + return; + } +#endif /* CONFIG_NET_RX_BUSY_POLL */ + + napi_gro_receive(&q_vector->napi, skb); +} + +/** + * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containig ring specific data + * @rx_desc: current Rx descriptor being processed + * @skb: skb currently being received and modified + **/ +static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* Rx csum disabled */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* if IP and error */ + if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && + ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { + ring->rx_stats.csum_err++; + return; + } + + if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS)) + return; + + if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { + ring->rx_stats.csum_err++; + return; + } + + /* It must be a TCP or UDP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +/** + * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the checksum, VLAN, protocol, and other fields within + * the skb. + **/ +static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + ixgbevf_rx_checksum(rx_ring, rx_desc, skb); + + if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { + u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); + unsigned long *active_vlans = netdev_priv(rx_ring->netdev); + + if (test_bit(vid & VLAN_VID_MASK, active_vlans)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +/** + * ixgbevf_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IXGBEVF_RX_DESC(rx_ring, ntc)); + + if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) + return false; + + return true; +} + +static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, + struct ixgbevf_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma = bi->dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_page(); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_page(page); + + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; + + return true; +} + +/** + * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split + * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on + * @cleaned_count: number of buffers to replace + **/ +static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, + u16 cleaned_count) +{ + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbevf_rx_buffer *bi; + unsigned int i = rx_ring->next_to_use; + + /* nothing to do or no valid netdev defined */ + if (!cleaned_count || !rx_ring->netdev) + return; + + rx_desc = IXGBEVF_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + + do { + if (!ixgbevf_alloc_mapped_page(rx_ring, bi)) + break; + + /* Refresh the desc even if pkt_addr didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IXGBEVF_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the hdr_addr for the next_to_use descriptor */ + rx_desc->read.hdr_addr = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + ixgbevf_write_tail(rx_ring, i); + } +} + +/** + * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being adjusted + * + * This function is an ixgbevf specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + **/ +static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, + struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * ixgbevf_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + /* verify that the packet does not have any known errors */ + if (unlikely(ixgbevf_test_staterr(rx_desc, + IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { + struct net_device *netdev = rx_ring->netdev; + + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + ixgbevf_pull_tail(rx_ring, skb); + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, + struct ixgbevf_rx_buffer *old_buff) +{ + struct ixgbevf_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_buff->page = old_buff->page; + new_buff->dma = old_buff->dma; + new_buff->page_offset = old_buff->page_offset; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, + new_buff->page_offset, + IXGBEVF_RX_BUFSZ, + DMA_FROM_DEVICE); +} + +static inline bool ixgbevf_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; +} + +/** + * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, + struct ixgbevf_rx_buffer *rx_buffer, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = IXGBEVF_RX_BUFSZ; +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); +#endif + + if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* page is not reserved, we can reuse buffer as is */ + if (likely(!ixgbevf_page_is_reserved(page))) + return true; + + /* this page cannot be reused so discard it */ + put_page(page); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(ixgbevf_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ; + +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ)) + return false; + +#endif + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + atomic_inc(&page->_count); + + return true; +} + +static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct ixgbevf_rx_buffer *rx_buffer; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + IXGBEVF_RX_HDR_SIZE); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return NULL; + } + + /* we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + IXGBEVF_RX_BUFSZ, + DMA_FROM_DEVICE); + + /* pull page into skb */ + if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + ixgbevf_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->dma = 0; + rx_buffer->page = NULL; + + return skb; +} + +static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, + u32 qmask) +{ + struct ixgbe_hw *hw = &adapter->hw; + + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); +} + +static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, + struct ixgbevf_ring *rx_ring, + int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = ixgbevf_desc_unused(rx_ring); + struct sk_buff *skb = rx_ring->skb; + + while (likely(total_rx_packets < budget)) { + union ixgbe_adv_rx_desc *rx_desc; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { + ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); + + if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * RXD_STAT_DD bit is set + */ + rmb(); + + /* retrieve a buffer from the ring */ + skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb); + + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; + + cleaned_count++; + + /* fetch next buffer in frame if non-eop */ + if (ixgbevf_is_non_eop(rx_ring, rx_desc)) + continue; + + /* verify the packet layout is correct */ + if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* Workaround hardware that can't do proper VEPA multicast + * source pruning. + */ + if ((skb->pkt_type == PACKET_BROADCAST || + skb->pkt_type == PACKET_MULTICAST) && + ether_addr_equal(rx_ring->netdev->dev_addr, + eth_hdr(skb)->h_source)) { + dev_kfree_skb_irq(skb); + continue; + } + + /* populate checksum, VLAN, and protocol */ + ixgbevf_process_skb_fields(rx_ring, rx_desc, skb); + + ixgbevf_rx_skb(q_vector, skb); + + /* reset skb pointer */ + skb = NULL; + + /* update budget accounting */ + total_rx_packets++; + } + + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * ixgbevf_poll - NAPI polling calback + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean more than one or more rings associated with a + * q_vector. + **/ +static int ixgbevf_poll(struct napi_struct *napi, int budget) +{ + struct ixgbevf_q_vector *q_vector = + container_of(napi, struct ixgbevf_q_vector, napi); + struct ixgbevf_adapter *adapter = q_vector->adapter; + struct ixgbevf_ring *ring; + int per_ring_budget; + bool clean_complete = true; + + ixgbevf_for_each_ring(ring, q_vector->tx) + clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); + +#ifdef CONFIG_NET_RX_BUSY_POLL + if (!ixgbevf_qv_lock_napi(q_vector)) + return budget; +#endif + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling + */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget/q_vector->rx.count, 1); + else + per_ring_budget = budget; + + ixgbevf_for_each_ring(ring, q_vector->rx) + clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring, + per_ring_budget) + < per_ring_budget); + +#ifdef CONFIG_NET_RX_BUSY_POLL + ixgbevf_qv_unlock_napi(q_vector); +#endif + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + /* all work done, exit the polling mode */ + napi_complete(napi); + if (adapter->rx_itr_setting & 1) + ixgbevf_set_itr(q_vector); + if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && + !test_bit(__IXGBEVF_REMOVING, &adapter->state)) + ixgbevf_irq_enable_queues(adapter, + 1 << q_vector->v_idx); + + return 0; +} + +/** + * ixgbevf_write_eitr - write VTEITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + **/ +void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) +{ + struct ixgbevf_adapter *adapter = q_vector->adapter; + struct ixgbe_hw *hw = &adapter->hw; + int v_idx = q_vector->v_idx; + u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; + + /* set the WDIS bit to not clear the timer bits and cause an + * immediate assertion of the interrupt + */ + itr_reg |= IXGBE_EITR_CNT_WDIS; + + IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); +} + +#ifdef CONFIG_NET_RX_BUSY_POLL +/* must be called with local_bh_disable()d */ +static int ixgbevf_busy_poll_recv(struct napi_struct *napi) +{ + struct ixgbevf_q_vector *q_vector = + container_of(napi, struct ixgbevf_q_vector, napi); + struct ixgbevf_adapter *adapter = q_vector->adapter; + struct ixgbevf_ring *ring; + int found = 0; + + if (test_bit(__IXGBEVF_DOWN, &adapter->state)) + return LL_FLUSH_FAILED; + + if (!ixgbevf_qv_lock_poll(q_vector)) + return LL_FLUSH_BUSY; + + ixgbevf_for_each_ring(ring, q_vector->rx) { + found = ixgbevf_clean_rx_irq(q_vector, ring, 4); +#ifdef BP_EXTENDED_STATS + if (found) + ring->stats.cleaned += found; + else + ring->stats.misses++; +#endif + if (found) + break; + } + + ixgbevf_qv_unlock_poll(q_vector); + + return found; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +/** + * ixgbevf_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) +{ + struct ixgbevf_q_vector *q_vector; + int q_vectors, v_idx; + + q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + adapter->eims_enable_mask = 0; + + /* Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (v_idx = 0; v_idx < q_vectors; v_idx++) { + struct ixgbevf_ring *ring; + + q_vector = adapter->q_vector[v_idx]; + + ixgbevf_for_each_ring(ring, q_vector->rx) + ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); + + ixgbevf_for_each_ring(ring, q_vector->tx) + ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); + + if (q_vector->tx.ring && !q_vector->rx.ring) { + /* Tx only vector */ + if (adapter->tx_itr_setting == 1) + q_vector->itr = IXGBE_10K_ITR; + else + q_vector->itr = adapter->tx_itr_setting; + } else { + /* Rx or Rx/Tx vector */ + if (adapter->rx_itr_setting == 1) + q_vector->itr = IXGBE_20K_ITR; + else + q_vector->itr = adapter->rx_itr_setting; + } + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= 1 << v_idx; + + ixgbevf_write_eitr(q_vector); + } + + ixgbevf_set_ivar(adapter, -1, 1, v_idx); + /* setup eims_other and add value to global eims_enable_mask */ + adapter->eims_other = 1 << v_idx; + adapter->eims_enable_mask |= adapter->eims_other; +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * ixgbevf_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @ring_container: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + **/ +static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, + struct ixgbevf_ring_container *ring_container) +{ + int bytes = ring_container->total_bytes; + int packets = ring_container->total_packets; + u32 timepassed_us; + u64 bytes_perint; + u8 itr_setting = ring_container->itr; + + if (packets == 0) + return; + + /* simple throttle rate management + * 0-20MB/s lowest (100000 ints/s) + * 20-100MB/s low (20000 ints/s) + * 100-1249MB/s bulk (8000 ints/s) + */ + /* what was last interrupt timeslice? */ + timepassed_us = q_vector->itr >> 2; + bytes_perint = bytes / timepassed_us; /* bytes/usec */ + + switch (itr_setting) { + case lowest_latency: + if (bytes_perint > 10) + itr_setting = low_latency; + break; + case low_latency: + if (bytes_perint > 20) + itr_setting = bulk_latency; + else if (bytes_perint <= 10) + itr_setting = lowest_latency; + break; + case bulk_latency: + if (bytes_perint <= 20) + itr_setting = low_latency; + break; + } + + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itr_setting; +} + +static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) +{ + u32 new_itr = q_vector->itr; + u8 current_itr; + + ixgbevf_update_itr(q_vector, &q_vector->tx); + ixgbevf_update_itr(q_vector, &q_vector->rx); + + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = IXGBE_100K_ITR; + break; + case low_latency: + new_itr = IXGBE_20K_ITR; + break; + case bulk_latency: + default: + new_itr = IXGBE_8K_ITR; + break; + } + + if (new_itr != q_vector->itr) { + /* do an exponential smoothing */ + new_itr = (10 * new_itr * q_vector->itr) / + ((9 * new_itr) + q_vector->itr); + + /* save the algorithm value here */ + q_vector->itr = new_itr; + + ixgbevf_write_eitr(q_vector); + } +} + +static irqreturn_t ixgbevf_msix_other(int irq, void *data) +{ + struct ixgbevf_adapter *adapter = data; + struct ixgbe_hw *hw = &adapter->hw; + + hw->mac.get_link_status = 1; + + ixgbevf_service_event_schedule(adapter); + + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +/** + * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) + * @irq: unused + * @data: pointer to our q_vector struct for this interrupt vector + **/ +static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) +{ + struct ixgbevf_q_vector *q_vector = data; + + /* EIAM disabled interrupts (on this vector) for us */ + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, + int r_idx) +{ + struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; + + a->rx_ring[r_idx]->next = q_vector->rx.ring; + q_vector->rx.ring = a->rx_ring[r_idx]; + q_vector->rx.count++; +} + +static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, + int t_idx) +{ + struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; + + a->tx_ring[t_idx]->next = q_vector->tx.ring; + q_vector->tx.ring = a->tx_ring[t_idx]; + q_vector->tx.count++; +} + +/** + * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors + * @adapter: board private structure to initialize + * + * This function maps descriptor rings to the queue-specific vectors + * we were allotted through the MSI-X enabling code. Ideally, we'd have + * one vector per ring/queue, but on a constrained vector budget, we + * group the rings as "efficiently" as possible. You would add new + * mapping configurations in here. + **/ +static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) +{ + int q_vectors; + int v_start = 0; + int rxr_idx = 0, txr_idx = 0; + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int i, j; + int rqpv, tqpv; + int err = 0; + + q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + /* The ideal configuration... + * We have enough vectors to map one per queue. + */ + if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { + for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) + map_vector_to_rxq(adapter, v_start, rxr_idx); + + for (; txr_idx < txr_remaining; v_start++, txr_idx++) + map_vector_to_txq(adapter, v_start, txr_idx); + goto out; + } + + /* If we don't have enough vectors for a 1-to-1 + * mapping, we'll have to group them so there are + * multiple queues per vector. + */ + /* Re-adjusting *qpv takes care of the remainder. */ + for (i = v_start; i < q_vectors; i++) { + rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); + for (j = 0; j < rqpv; j++) { + map_vector_to_rxq(adapter, i, rxr_idx); + rxr_idx++; + rxr_remaining--; + } + } + for (i = v_start; i < q_vectors; i++) { + tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); + for (j = 0; j < tqpv; j++) { + map_vector_to_txq(adapter, i, txr_idx); + txr_idx++; + txr_remaining--; + } + } + +out: + return err; +} + +/** + * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + int vector, err; + int ri = 0, ti = 0; + + for (vector = 0; vector < q_vectors; vector++) { + struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "TxRx", ri++); + ti++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "rx", ri++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "tx", ti++); + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { + hw_dbg(&adapter->hw, + "request_irq failed for MSIX interrupt Error: %d\n", + err); + goto free_queue_irqs; + } + } + + err = request_irq(adapter->msix_entries[vector].vector, + &ixgbevf_msix_other, 0, netdev->name, adapter); + if (err) { + hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n", + err); + goto free_queue_irqs; + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + free_irq(adapter->msix_entries[vector].vector, + adapter->q_vector[vector]); + } + /* This failure is non-recoverable - it indicates the system is + * out of MSIX vector resources and the VF driver cannot run + * without them. Set the number of msix vectors to zero + * indicating that not enough can be allocated. The error + * will be returned to the user indicating device open failed. + * Any further attempts to force the driver to open will also + * fail. The only way to recover is to unload the driver and + * reload it again. If the system has recovered some MSIX + * vectors then it may succeed. + */ + adapter->num_msix_vectors = 0; + return err; +} + +static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) +{ + int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + for (i = 0; i < q_vectors; i++) { + struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; + + q_vector->rx.ring = NULL; + q_vector->tx.ring = NULL; + q_vector->rx.count = 0; + q_vector->tx.count = 0; + } +} + +/** + * ixgbevf_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) +{ + int err = 0; + + err = ixgbevf_request_msix_irqs(adapter); + + if (err) + hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err); + + return err; +} + +static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) +{ + int i, q_vectors; + + q_vectors = adapter->num_msix_vectors; + i = q_vectors - 1; + + free_irq(adapter->msix_entries[i].vector, adapter); + i--; + + for (; i >= 0; i--) { + /* free only the irqs that were actually requested */ + if (!adapter->q_vector[i]->rx.ring && + !adapter->q_vector[i]->tx.ring) + continue; + + free_irq(adapter->msix_entries[i].vector, + adapter->q_vector[i]); + } + + ixgbevf_reset_q_vectors(adapter); +} + +/** + * ixgbevf_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); + IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0); + + IXGBE_WRITE_FLUSH(hw); + + for (i = 0; i < adapter->num_msix_vectors; i++) + synchronize_irq(adapter->msix_entries[i].vector); +} + +/** + * ixgbevf_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); + IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); +} + +/** + * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset + * @adapter: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 tdba = ring->dma; + int wait_loop = 10; + u32 txdctl = IXGBE_TXDCTL_ENABLE; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); + IXGBE_WRITE_FLUSH(hw); + + IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32)); + IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32); + IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx), + ring->count * sizeof(union ixgbe_adv_tx_desc)); + + /* disable head writeback */ + IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0); + + /* enable relaxed ordering */ + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx), + (IXGBE_DCA_TXCTRL_DESC_RRO_EN | + IXGBE_DCA_TXCTRL_DATA_RRO_EN)); + + /* reset head and tail pointers */ + IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0); + ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + + /* In order to avoid issues WTHRESH + PTHRESH should always be equal + * to or less than the number of on chip descriptors, which is + * currently 40. + */ + txdctl |= (8 << 16); /* WTHRESH = 8 */ + + /* Setting PTHRESH to 32 both improves performance */ + txdctl |= (1 << 8) | /* HTHRESH = 1 */ + 32; /* PTHRESH = 32 */ + + clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); + + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); + + /* poll to verify queue is enabled */ + do { + usleep_range(1000, 2000); + txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx)); + } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); + if (!wait_loop) + pr_err("Could not enable Tx Queue %d\n", reg_idx); +} + +/** + * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) +{ + u32 i; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 + +static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 srrctl; + + srrctl = IXGBE_SRRCTL_DROP_EN; + + srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + + IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); +} + +static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + /* PSRTYPE must be initialized in 82599 */ + u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | + IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR | + IXGBE_PSRTYPE_L2HDR; + + if (adapter->num_rx_queues > 1) + psrtype |= 1 << 29; + + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); +} + +#define IXGBEVF_MAX_RX_DESC_POLL 10 +static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (IXGBE_REMOVED(hw->hw_addr)) + return; + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + + /* write value back with RXDCTL.ENABLE bit cleared */ + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); + + /* the hardware may take up to 100us to really disable the Rx queue */ + do { + udelay(10); + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); + } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); + + if (!wait_loop) + pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n", + reg_idx); +} + +static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + int wait_loop = IXGBEVF_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + if (IXGBE_REMOVED(hw->hw_addr)) + return; + do { + usleep_range(1000, 2000); + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); + } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); + + if (!wait_loop) + pr_err("RXDCTL.ENABLE queue %d not set while polling\n", + reg_idx); +} + +static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vfmrqc = 0, vfreta = 0; + u32 rss_key[10]; + u16 rss_i = adapter->num_rx_queues; + int i, j; + + /* Fill out hash function seeds */ + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]); + + /* Fill out redirection table */ + for (i = 0, j = 0; i < 64; i++, j++) { + if (j == rss_i) + j = 0; + vfreta = (vfreta << 8) | (j * 0x1); + if ((i & 3) == 3) + IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta); + } + + /* Perform hash on these packet types */ + vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 | + IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP | + IXGBE_VFMRQC_RSS_FIELD_IPV6 | + IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP; + + vfmrqc |= IXGBE_VFMRQC_RSSEN; + + IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc); +} + +static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 rdba = ring->dma; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); + ixgbevf_disable_rx_queue(adapter, ring); + + IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32)); + IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32); + IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx), + ring->count * sizeof(union ixgbe_adv_rx_desc)); + + /* enable relaxed ordering */ + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx), + IXGBE_DCA_RXCTRL_DESC_RRO_EN); + + /* reset head and tail pointers */ + IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0); + ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx); + + /* reset ntu and ntc to place SW in sync with hardwdare */ + ring->next_to_clean = 0; + ring->next_to_use = 0; + ring->next_to_alloc = 0; + + ixgbevf_configure_srrctl(adapter, reg_idx); + + /* allow any size packet since we can handle overflow */ + rxdctl &= ~IXGBE_RXDCTL_RLPML_EN; + + rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); + + ixgbevf_rx_desc_queue_enable(adapter, ring); + ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring)); +} + +/** + * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) +{ + int i; + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + ixgbevf_setup_psrtype(adapter); + if (hw->mac.type >= ixgbe_mac_X550_vf) + ixgbevf_setup_vfmrqc(adapter); + + /* notify the PF of our intent to use this size of frame */ + ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]); +} + +static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int err; + + spin_lock_bh(&adapter->mbx_lock); + + /* add VID to filter table */ + err = hw->mac.ops.set_vfta(hw, vid, 0, true); + + spin_unlock_bh(&adapter->mbx_lock); + + /* translate error return types so error makes sense */ + if (err == IXGBE_ERR_MBX) + return -EIO; + + if (err == IXGBE_ERR_INVALID_ARGUMENT) + return -EACCES; + + set_bit(vid, adapter->active_vlans); + + return err; +} + +static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int err = -EOPNOTSUPP; + + spin_lock_bh(&adapter->mbx_lock); + + /* remove VID from filter table */ + err = hw->mac.ops.set_vfta(hw, vid, 0, false); + + spin_unlock_bh(&adapter->mbx_lock); + + clear_bit(vid, adapter->active_vlans); + + return err; +} + +static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + ixgbevf_vlan_rx_add_vid(adapter->netdev, + htons(ETH_P_8021Q), vid); +} + +static int ixgbevf_write_uc_addr_list(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int count = 0; + + if ((netdev_uc_count(netdev)) > 10) { + pr_err("Too many unicast filters - No Space\n"); + return -ENOSPC; + } + + if (!netdev_uc_empty(netdev)) { + struct netdev_hw_addr *ha; + + netdev_for_each_uc_addr(ha, netdev) { + hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); + udelay(200); + } + } else { + /* If the list is empty then send message to PF driver to + * clear all MAC VLANs on this VF. + */ + hw->mac.ops.set_uc_addr(hw, 0, NULL); + } + + return count; +} + +/** + * ixgbevf_set_rx_mode - Multicast and unicast set + * @netdev: network interface device structure + * + * The set_rx_method entry point is called whenever the multicast address + * list, unicast address list or the network interface flags are updated. + * This routine is responsible for configuring the hardware for proper + * multicast mode and configuring requested unicast filters. + **/ +static void ixgbevf_set_rx_mode(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + spin_lock_bh(&adapter->mbx_lock); + + /* reprogram multicast list */ + hw->mac.ops.update_mc_addr_list(hw, netdev); + + ixgbevf_write_uc_addr_list(netdev); + + spin_unlock_bh(&adapter->mbx_lock); +} + +static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) +{ + int q_idx; + struct ixgbevf_q_vector *q_vector; + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; +#ifdef CONFIG_NET_RX_BUSY_POLL + ixgbevf_qv_init_lock(adapter->q_vector[q_idx]); +#endif + napi_enable(&q_vector->napi); + } +} + +static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) +{ + int q_idx; + struct ixgbevf_q_vector *q_vector; + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + napi_disable(&q_vector->napi); +#ifdef CONFIG_NET_RX_BUSY_POLL + while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) { + pr_info("QV %d locked\n", q_idx); + usleep_range(1000, 20000); + } +#endif /* CONFIG_NET_RX_BUSY_POLL */ + } +} + +static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + unsigned int def_q = 0; + unsigned int num_tcs = 0; + unsigned int num_rx_queues = adapter->num_rx_queues; + unsigned int num_tx_queues = adapter->num_tx_queues; + int err; + + spin_lock_bh(&adapter->mbx_lock); + + /* fetch queue configuration from the PF */ + err = ixgbevf_get_queues(hw, &num_tcs, &def_q); + + spin_unlock_bh(&adapter->mbx_lock); + + if (err) + return err; + + if (num_tcs > 1) { + /* we need only one Tx queue */ + num_tx_queues = 1; + + /* update default Tx ring register index */ + adapter->tx_ring[0]->reg_idx = def_q; + + /* we need as many queues as traffic classes */ + num_rx_queues = num_tcs; + } + + /* if we have a bad config abort request queue reset */ + if ((adapter->num_rx_queues != num_rx_queues) || + (adapter->num_tx_queues != num_tx_queues)) { + /* force mailbox timeout to prevent further messages */ + hw->mbx.timeout = 0; + + /* wait for watchdog to come around and bail us out */ + adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; + } + + return 0; +} + +static void ixgbevf_configure(struct ixgbevf_adapter *adapter) +{ + ixgbevf_configure_dcb(adapter); + + ixgbevf_set_rx_mode(adapter->netdev); + + ixgbevf_restore_vlan(adapter); + + ixgbevf_configure_tx(adapter); + ixgbevf_configure_rx(adapter); +} + +static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) +{ + /* Only save pre-reset stats if there are some */ + if (adapter->stats.vfgprc || adapter->stats.vfgptc) { + adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - + adapter->stats.base_vfgprc; + adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - + adapter->stats.base_vfgptc; + adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - + adapter->stats.base_vfgorc; + adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - + adapter->stats.base_vfgotc; + adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - + adapter->stats.base_vfmprc; + } +} + +static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); + adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); + adapter->stats.last_vfgorc |= + (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); + adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); + adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); + adapter->stats.last_vfgotc |= + (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); + adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); + + adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; + adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; + adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; + adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; + adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; +} + +static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int api[] = { ixgbe_mbox_api_12, + ixgbe_mbox_api_11, + ixgbe_mbox_api_10, + ixgbe_mbox_api_unknown }; + int err = 0, idx = 0; + + spin_lock_bh(&adapter->mbx_lock); + + while (api[idx] != ixgbe_mbox_api_unknown) { + err = ixgbevf_negotiate_api_version(hw, api[idx]); + if (!err) + break; + idx++; + } + + spin_unlock_bh(&adapter->mbx_lock); +} + +static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + + ixgbevf_configure_msix(adapter); + + spin_lock_bh(&adapter->mbx_lock); + + if (is_valid_ether_addr(hw->mac.addr)) + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); + else + hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); + + spin_unlock_bh(&adapter->mbx_lock); + + smp_mb__before_atomic(); + clear_bit(__IXGBEVF_DOWN, &adapter->state); + ixgbevf_napi_enable_all(adapter); + + /* clear any pending interrupts, may auto mask */ + IXGBE_READ_REG(hw, IXGBE_VTEICR); + ixgbevf_irq_enable(adapter); + + /* enable transmits */ + netif_tx_start_all_queues(netdev); + + ixgbevf_save_reset_stats(adapter); + ixgbevf_init_last_counter_stats(adapter); + + hw->mac.get_link_status = 1; + mod_timer(&adapter->service_timer, jiffies); +} + +void ixgbevf_up(struct ixgbevf_adapter *adapter) +{ + ixgbevf_configure(adapter); + + ixgbevf_up_complete(adapter); +} + +/** + * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + unsigned long size; + unsigned int i; + + /* Free Rx ring sk_buff */ + if (rx_ring->skb) { + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + } + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buffer_info) + return; + + /* Free all the Rx ring pages */ + for (i = 0; i < rx_ring->count; i++) { + struct ixgbevf_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[i]; + if (rx_buffer->dma) + dma_unmap_page(dev, rx_buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + rx_buffer->dma = 0; + if (rx_buffer->page) + __free_page(rx_buffer->page); + rx_buffer->page = NULL; + } + + size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); +} + +/** + * ixgbevf_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) +{ + struct ixgbevf_tx_buffer *tx_buffer_info; + unsigned long size; + unsigned int i; + + if (!tx_ring->tx_buffer_info) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + } + + size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + memset(tx_ring->desc, 0, tx_ring->size); +} + +/** + * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbevf_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbevf_clean_tx_ring(adapter->tx_ring[i]); +} + +void ixgbevf_down(struct ixgbevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + int i; + + /* signal that we are down to the interrupt handler */ + if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state)) + return; /* do nothing if already down */ + + /* disable all enabled Rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); + + usleep_range(10000, 20000); + + netif_tx_stop_all_queues(netdev); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + ixgbevf_irq_disable(adapter); + + ixgbevf_napi_disable_all(adapter); + + del_timer_sync(&adapter->service_timer); + + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < adapter->num_tx_queues; i++) { + u8 reg_idx = adapter->tx_ring[i]->reg_idx; + + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), + IXGBE_TXDCTL_SWFLSH); + } + + if (!pci_channel_offline(adapter->pdev)) + ixgbevf_reset(adapter); + + ixgbevf_clean_all_tx_rings(adapter); + ixgbevf_clean_all_rx_rings(adapter); +} + +void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) +{ + WARN_ON(in_interrupt()); + + while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) + msleep(1); + + ixgbevf_down(adapter); + ixgbevf_up(adapter); + + clear_bit(__IXGBEVF_RESETTING, &adapter->state); +} + +void ixgbevf_reset(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + if (hw->mac.ops.reset_hw(hw)) { + hw_dbg(hw, "PF still resetting\n"); + } else { + hw->mac.ops.init_hw(hw); + ixgbevf_negotiate_api(adapter); + } + + if (is_valid_ether_addr(adapter->hw.mac.addr)) { + memcpy(netdev->dev_addr, adapter->hw.mac.addr, + netdev->addr_len); + memcpy(netdev->perm_addr, adapter->hw.mac.addr, + netdev->addr_len); + } + + adapter->last_reset = jiffies; +} + +static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, + int vectors) +{ + int vector_threshold; + + /* We'll want at least 2 (vector_threshold): + * 1) TxQ[0] + RxQ[0] handler + * 2) Other (Link Status Change, etc.) + */ + vector_threshold = MIN_MSIX_COUNT; + + /* The more we get, the more we will assign to Tx/Rx Cleanup + * for the separate queues...where Rx Cleanup >= Tx Cleanup. + * Right now, we simply care about how many we'll get; we'll + * set them up later while requesting irq's. + */ + vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + vector_threshold, vectors); + + if (vectors < 0) { + dev_err(&adapter->pdev->dev, + "Unable to allocate MSI-X interrupts\n"); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return vectors; + } + + /* Adjust for only the vectors we'll use, which is minimum + * of max_msix_q_vectors + NON_Q_VECTORS, or the number of + * vectors we were allocated. + */ + adapter->num_msix_vectors = vectors; + + return 0; +} + +/** + * ixgbevf_set_num_queues - Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + unsigned int def_q = 0; + unsigned int num_tcs = 0; + int err; + + /* Start with base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + + spin_lock_bh(&adapter->mbx_lock); + + /* fetch queue configuration from the PF */ + err = ixgbevf_get_queues(hw, &num_tcs, &def_q); + + spin_unlock_bh(&adapter->mbx_lock); + + if (err) + return; + + /* we need as many queues as traffic classes */ + if (num_tcs > 1) { + adapter->num_rx_queues = num_tcs; + } else { + u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES); + + switch (hw->api_version) { + case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: + adapter->num_rx_queues = rss; + adapter->num_tx_queues = rss; + default: + break; + } + } +} + +/** + * ixgbevf_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. The polling_netdev array is + * intended for Multiqueue, but should work fine with a single queue. + **/ +static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) +{ + struct ixgbevf_ring *ring; + int rx = 0, tx = 0; + + for (; tx < adapter->num_tx_queues; tx++) { + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + goto err_allocation; + + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + ring->count = adapter->tx_ring_count; + ring->queue_index = tx; + ring->reg_idx = tx; + + adapter->tx_ring[tx] = ring; + } + + for (; rx < adapter->num_rx_queues; rx++) { + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + goto err_allocation; + + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + ring->count = adapter->rx_ring_count; + ring->queue_index = rx; + ring->reg_idx = rx; + + adapter->rx_ring[rx] = ring; + } + + return 0; + +err_allocation: + while (tx) { + kfree(adapter->tx_ring[--tx]); + adapter->tx_ring[tx] = NULL; + } + + while (rx) { + kfree(adapter->rx_ring[--rx]); + adapter->rx_ring[rx] = NULL; + } + return -ENOMEM; +} + +/** + * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + int vector, v_budget; + + /* It's easy to be greedy for MSI-X vectors, but it really + * doesn't do us much good if we have a lot more vectors + * than CPU's. So let's be conservative and only ask for + * (roughly) the same number of vectors as there are CPU's. + * The default is to use pairs of vectors. + */ + v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); + v_budget = min_t(int, v_budget, num_online_cpus()); + v_budget += NON_Q_VECTORS; + + /* A failure in MSI-X entry allocation isn't fatal, but it does + * mean we disable MSI-X capabilities of the adapter. + */ + adapter->msix_entries = kcalloc(v_budget, + sizeof(struct msix_entry), GFP_KERNEL); + if (!adapter->msix_entries) { + err = -ENOMEM; + goto out; + } + + for (vector = 0; vector < v_budget; vector++) + adapter->msix_entries[vector].entry = vector; + + err = ixgbevf_acquire_msix_vectors(adapter, v_budget); + if (err) + goto out; + + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto out; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + +out: + return err; +} + +/** + * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) +{ + int q_idx, num_q_vectors; + struct ixgbevf_q_vector *q_vector; + + num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); + if (!q_vector) + goto err_out; + q_vector->adapter = adapter; + q_vector->v_idx = q_idx; + netif_napi_add(adapter->netdev, &q_vector->napi, + ixgbevf_poll, 64); +#ifdef CONFIG_NET_RX_BUSY_POLL + napi_hash_add(&q_vector->napi); +#endif + adapter->q_vector[q_idx] = q_vector; + } + + return 0; + +err_out: + while (q_idx) { + q_idx--; + q_vector = adapter->q_vector[q_idx]; +#ifdef CONFIG_NET_RX_BUSY_POLL + napi_hash_del(&q_vector->napi); +#endif + netif_napi_del(&q_vector->napi); + kfree(q_vector); + adapter->q_vector[q_idx] = NULL; + } + return -ENOMEM; +} + +/** + * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) +{ + int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; + + adapter->q_vector[q_idx] = NULL; +#ifdef CONFIG_NET_RX_BUSY_POLL + napi_hash_del(&q_vector->napi); +#endif + netif_napi_del(&q_vector->napi); + kfree(q_vector); + } +} + +/** + * ixgbevf_reset_interrupt_capability - Reset MSIX setup + * @adapter: board private structure + * + **/ +static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) +{ + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; +} + +/** + * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init + * @adapter: board private structure to initialize + * + **/ +static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) +{ + int err; + + /* Number of supported queues */ + ixgbevf_set_num_queues(adapter); + + err = ixgbevf_set_interrupt_capability(adapter); + if (err) { + hw_dbg(&adapter->hw, + "Unable to setup interrupt capabilities\n"); + goto err_set_interrupt; + } + + err = ixgbevf_alloc_q_vectors(adapter); + if (err) { + hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n"); + goto err_alloc_q_vectors; + } + + err = ixgbevf_alloc_queues(adapter); + if (err) { + pr_err("Unable to allocate memory for queues\n"); + goto err_alloc_queues; + } + + hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", + (adapter->num_rx_queues > 1) ? "Enabled" : + "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); + + set_bit(__IXGBEVF_DOWN, &adapter->state); + + return 0; +err_alloc_queues: + ixgbevf_free_q_vectors(adapter); +err_alloc_q_vectors: + ixgbevf_reset_interrupt_capability(adapter); +err_set_interrupt: + return err; +} + +/** + * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + kfree(adapter->tx_ring[i]); + adapter->tx_ring[i] = NULL; + } + for (i = 0; i < adapter->num_rx_queues; i++) { + kfree(adapter->rx_ring[i]); + adapter->rx_ring[i] = NULL; + } + + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + + ixgbevf_free_q_vectors(adapter); + ixgbevf_reset_interrupt_capability(adapter); +} + +/** + * ixgbevf_sw_init - Initialize general software structures + * @adapter: board private structure to initialize + * + * ixgbevf_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + int err; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + hw->mbx.ops.init_params(hw); + + /* assume legacy case in which PF would only give VF 2 queues */ + hw->mac.max_tx_queues = 2; + hw->mac.max_rx_queues = 2; + + /* lock to protect mailbox accesses */ + spin_lock_init(&adapter->mbx_lock); + + err = hw->mac.ops.reset_hw(hw); + if (err) { + dev_info(&pdev->dev, + "PF still in reset state. Is the PF interface up?\n"); + } else { + err = hw->mac.ops.init_hw(hw); + if (err) { + pr_err("init_shared_code failed: %d\n", err); + goto out; + } + ixgbevf_negotiate_api(adapter); + err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + if (err) + dev_info(&pdev->dev, "Error reading MAC address\n"); + else if (is_zero_ether_addr(adapter->hw.mac.addr)) + dev_info(&pdev->dev, + "MAC address not assigned by administrator.\n"); + memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); + } + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_info(&pdev->dev, "Assigning random MAC address\n"); + eth_hw_addr_random(netdev); + memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len); + } + + /* Enable dynamic interrupt throttling rates */ + adapter->rx_itr_setting = 1; + adapter->tx_itr_setting = 1; + + /* set default ring sizes */ + adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; + adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; + + set_bit(__IXGBEVF_DOWN, &adapter->state); + return 0; + +out: + return err; +} + +#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ + { \ + u32 current_counter = IXGBE_READ_REG(hw, reg); \ + if (current_counter < last_counter) \ + counter += 0x100000000LL; \ + last_counter = current_counter; \ + counter &= 0xFFFFFFFF00000000LL; \ + counter |= current_counter; \ + } + +#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ + { \ + u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ + u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ + u64 current_counter = (current_counter_msb << 32) | \ + current_counter_lsb; \ + if (current_counter < last_counter) \ + counter += 0x1000000000LL; \ + last_counter = current_counter; \ + counter &= 0xFFFFFFF000000000LL; \ + counter |= current_counter; \ + } +/** + * ixgbevf_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + if (test_bit(__IXGBEVF_DOWN, &adapter->state) || + test_bit(__IXGBEVF_RESETTING, &adapter->state)) + return; + + UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, + adapter->stats.vfgprc); + UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, + adapter->stats.vfgptc); + UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, + adapter->stats.last_vfgorc, + adapter->stats.vfgorc); + UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, + adapter->stats.last_vfgotc, + adapter->stats.vfgotc); + UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, + adapter->stats.vfmprc); + + for (i = 0; i < adapter->num_rx_queues; i++) { + adapter->hw_csum_rx_error += + adapter->rx_ring[i]->hw_csum_rx_error; + adapter->rx_ring[i]->hw_csum_rx_error = 0; + } +} + +/** + * ixgbevf_service_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void ixgbevf_service_timer(unsigned long data) +{ + struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; + + /* Reset the timer */ + mod_timer(&adapter->service_timer, (HZ * 2) + jiffies); + + ixgbevf_service_event_schedule(adapter); +} + +static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) +{ + if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED)) + return; + + adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED; + + /* If we're already down or resetting, just bail */ + if (test_bit(__IXGBEVF_DOWN, &adapter->state) || + test_bit(__IXGBEVF_RESETTING, &adapter->state)) + return; + + adapter->tx_timeout_count++; + + ixgbevf_reinit_locked(adapter); +} + +/** + * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter: pointer to the device adapter structure + * + * This function serves two purposes. First it strobes the interrupt lines + * in order to make certain interrupts are occurring. Secondly it sets the + * bits needed to check for TX hangs. As a result we should immediately + * determine if a hang has occurred. + **/ +static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 eics = 0; + int i; + + /* If we're down or resetting, just bail */ + if (test_bit(__IXGBEVF_DOWN, &adapter->state) || + test_bit(__IXGBEVF_RESETTING, &adapter->state)) + return; + + /* Force detection of hung controller */ + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_check_for_tx_hang(adapter->tx_ring[i]); + } + + /* get one bit for every active Tx/Rx interrupt vector */ + for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { + struct ixgbevf_q_vector *qv = adapter->q_vector[i]; + + if (qv->rx.ring || qv->tx.ring) + eics |= 1 << i; + } + + /* Cause software interrupt to ensure rings are cleaned */ + IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); +} + +/** + * ixgbevf_watchdog_update_link - update the link status + * @adapter: pointer to the device adapter structure + **/ +static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; + s32 err; + + spin_lock_bh(&adapter->mbx_lock); + + err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + + spin_unlock_bh(&adapter->mbx_lock); + + /* if check for link returns error we will need to reset */ + if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) { + adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED; + link_up = false; + } + + adapter->link_up = link_up; + adapter->link_speed = link_speed; +} + +/** + * ixgbevf_watchdog_link_is_up - update netif_carrier status and + * print link up message + * @adapter: pointer to the device adapter structure + **/ +static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + /* only continue if link was previously down */ + if (netif_carrier_ok(netdev)) + return; + + dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n", + (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? + "10 Gbps" : + (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ? + "1 Gbps" : + (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ? + "100 Mbps" : + "unknown speed"); + + netif_carrier_on(netdev); +} + +/** + * ixgbevf_watchdog_link_is_down - update netif_carrier status and + * print link down message + * @adapter: pointer to the adapter structure + **/ +static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + adapter->link_speed = 0; + + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) + return; + + dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); + + netif_carrier_off(netdev); +} + +/** + * ixgbevf_watchdog_subtask - worker thread to bring link up + * @work: pointer to work_struct containing our data + **/ +static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter) +{ + /* if interface is down do nothing */ + if (test_bit(__IXGBEVF_DOWN, &adapter->state) || + test_bit(__IXGBEVF_RESETTING, &adapter->state)) + return; + + ixgbevf_watchdog_update_link(adapter); + + if (adapter->link_up) + ixgbevf_watchdog_link_is_up(adapter); + else + ixgbevf_watchdog_link_is_down(adapter); + + ixgbevf_update_stats(adapter); +} + +/** + * ixgbevf_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void ixgbevf_service_task(struct work_struct *work) +{ + struct ixgbevf_adapter *adapter = container_of(work, + struct ixgbevf_adapter, + service_task); + struct ixgbe_hw *hw = &adapter->hw; + + if (IXGBE_REMOVED(hw->hw_addr)) { + if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { + rtnl_lock(); + ixgbevf_down(adapter); + rtnl_unlock(); + } + return; + } + + ixgbevf_queue_reset_subtask(adapter); + ixgbevf_reset_subtask(adapter); + ixgbevf_watchdog_subtask(adapter); + ixgbevf_check_hang_subtask(adapter); + + ixgbevf_service_event_complete(adapter); +} + +/** + * ixgbevf_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring) +{ + ixgbevf_clean_tx_ring(tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + if (adapter->tx_ring[i]->desc) + ixgbevf_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: Tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) +{ + int size; + + size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n"); + return -ENOMEM; +} + +/** + * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); + if (!err) + continue; + hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: Rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring) +{ + int size; + + size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) + goto err; + + return 0; +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]); + if (!err) + continue; + hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i); + break; + } + return err; +} + +/** + * ixgbevf_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring) +{ + ixgbevf_clean_rx_ring(rx_ring); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->rx_ring[i]->desc) + ixgbevf_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * ixgbevf_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int ixgbevf_open(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int err; + + /* A previous failure to open the device because of a lack of + * available MSIX vector resources may have reset the number + * of msix vectors variable to zero. The only way to recover + * is to unload/reload the driver and hope that the system has + * been able to recover some MSIX vector resources. + */ + if (!adapter->num_msix_vectors) + return -ENOMEM; + + if (hw->adapter_stopped) { + ixgbevf_reset(adapter); + /* if adapter is still stopped then PF isn't up and + * the VF can't start. + */ + if (hw->adapter_stopped) { + err = IXGBE_ERR_MBX; + pr_err("Unable to start - perhaps the PF Driver isn't up yet\n"); + goto err_setup_reset; + } + } + + /* disallow open during test */ + if (test_bit(__IXGBEVF_TESTING, &adapter->state)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = ixgbevf_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = ixgbevf_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + ixgbevf_configure(adapter); + + /* Map the Tx/Rx rings to the vectors we were allotted. + * if request_irq will be called in this function map_rings + * must be called *before* up_complete + */ + ixgbevf_map_rings_to_vectors(adapter); + + err = ixgbevf_request_irq(adapter); + if (err) + goto err_req_irq; + + ixgbevf_up_complete(adapter); + + return 0; + +err_req_irq: + ixgbevf_down(adapter); +err_setup_rx: + ixgbevf_free_all_rx_resources(adapter); +err_setup_tx: + ixgbevf_free_all_tx_resources(adapter); + ixgbevf_reset(adapter); + +err_setup_reset: + + return err; +} + +/** + * ixgbevf_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int ixgbevf_close(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + ixgbevf_down(adapter); + ixgbevf_free_irq(adapter); + + ixgbevf_free_all_tx_resources(adapter); + ixgbevf_free_all_rx_resources(adapter); + + return 0; +} + +static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + + if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED)) + return; + + adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; + + /* if interface is down do nothing */ + if (test_bit(__IXGBEVF_DOWN, &adapter->state) || + test_bit(__IXGBEVF_RESETTING, &adapter->state)) + return; + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + if (netif_running(dev)) + ixgbevf_close(dev); + + ixgbevf_clear_interrupt_scheme(adapter); + ixgbevf_init_interrupt_scheme(adapter); + + if (netif_running(dev)) + ixgbevf_open(dev); +} + +static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) +{ + struct ixgbe_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = 0; + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} + +static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, + struct ixgbevf_tx_buffer *first, + u8 *hdr_len) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens, type_tucmd; + u32 mss_l4len_idx, l4len; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; + + if (first->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + first->tx_flags |= IXGBE_TX_FLAGS_TSO | + IXGBE_TX_FLAGS_CSUM | + IXGBE_TX_FLAGS_IPV4; + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + first->tx_flags |= IXGBE_TX_FLAGS_TSO | + IXGBE_TX_FLAGS_CSUM; + } + + /* compute header lengths */ + l4len = tcp_hdrlen(skb); + *hdr_len += l4len; + *hdr_len = skb_transport_offset(skb) + l4len; + + /* update GSO size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* mss_l4len_id: use 1 as index for TSO */ + mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; + mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + vlan_macip_lens = skb_network_header_len(skb); + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, + type_tucmd, mss_l4len_idx); + + return 1; +} + +static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, + struct ixgbevf_tx_buffer *first) +{ + struct sk_buff *skb = first->skb; + u32 vlan_macip_lens = 0; + u32 mss_l4len_idx = 0; + u32 type_tucmd = 0; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 l4_hdr = 0; + + switch (first->protocol) { + case htons(ETH_P_IP): + vlan_macip_lens |= skb_network_header_len(skb); + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + l4_hdr = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + vlan_macip_lens |= skb_network_header_len(skb); + l4_hdr = ipv6_hdr(skb)->nexthdr; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but proto=%x!\n", + first->protocol); + } + break; + } + + switch (l4_hdr) { + case IPPROTO_TCP: + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + mss_l4len_idx = tcp_hdrlen(skb) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; + case IPPROTO_SCTP: + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; + mss_l4len_idx = sizeof(struct sctphdr) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but l4 proto=%x!\n", + l4_hdr); + } + break; + } + + /* update TX checksum flag */ + first->tx_flags |= IXGBE_TX_FLAGS_CSUM; + } + + /* vlan_macip_lens: MACLEN, VLAN tag */ + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, + type_tucmd, mss_l4len_idx); +} + +static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_IFCS | + IXGBE_ADVTXD_DCMD_DEXT); + + /* set HW VLAN bit if VLAN is present */ + if (tx_flags & IXGBE_TX_FLAGS_VLAN) + cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); + + /* set segmentation enable bits for TSO/FSO */ + if (tx_flags & IXGBE_TX_FLAGS_TSO) + cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); + + return cmd_type; +} + +static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); + + /* enable L4 checksum for TSO and TX checksum offload */ + if (tx_flags & IXGBE_TX_FLAGS_CSUM) + olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM); + + /* enble IPv4 checksum for TSO */ + if (tx_flags & IXGBE_TX_FLAGS_IPV4) + olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); + + /* use index 1 context for TSO/FSO/FCOE */ + if (tx_flags & IXGBE_TX_FLAGS_TSO) + olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT); + + /* Check Context must be set if Tx switch is enabled, which it + * always is for case where virtual functions are running + */ + olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC); + + tx_desc->read.olinfo_status = olinfo_status; +} + +static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, + struct ixgbevf_tx_buffer *first, + const u8 hdr_len) +{ + dma_addr_t dma; + struct sk_buff *skb = first->skb; + struct ixgbevf_tx_buffer *tx_buffer; + union ixgbe_adv_tx_desc *tx_desc; + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned int data_len = skb->data_len; + unsigned int size = skb_headlen(skb); + unsigned int paylen = skb->len - hdr_len; + u32 tx_flags = first->tx_flags; + __le32 cmd_type; + u16 i = tx_ring->next_to_use; + + tx_desc = IXGBEVF_TX_DESC(tx_ring, i); + + ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen); + cmd_type = ixgbevf_tx_cmd_type(tx_flags); + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(first, len, size); + dma_unmap_addr_set(first, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + for (;;) { + while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { + tx_desc->read.cmd_type_len = + cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); + i = 0; + } + + dma += IXGBE_MAX_DATA_PER_TXD; + size -= IXGBE_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_desc->read.olinfo_status = 0; + } + + if (likely(!data_len)) + break; + + tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); + + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); + i = 0; + } + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_desc->read.olinfo_status = 0; + + frag++; + } + + /* write last descriptor with RS and EOP bits */ + cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD); + tx_desc->read.cmd_type_len = cmd_type; + + /* set the timestamp */ + first->time_stamp = jiffies; + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier (wmb) to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + /* notify HW of packet */ + ixgbevf_write_tail(tx_ring, i); + + return; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer); + if (tx_buffer == first) + break; + if (i == 0) + i = tx_ring->count; + i--; + } + + tx_ring->next_to_use = i; +} + +static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(ixgbevf_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + + return 0; +} + +static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) +{ + if (likely(ixgbevf_desc_unused(tx_ring) >= size)) + return 0; + return __ixgbevf_maybe_stop_tx(tx_ring, size); +} + +static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_tx_buffer *first; + struct ixgbevf_ring *tx_ring; + int tso; + u32 tx_flags = 0; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); +#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD + unsigned short f; +#endif + u8 hdr_len = 0; + u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); + + if (!dst_mac || is_link_local_ether_addr(dst_mac)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + tx_ring = adapter->tx_ring[skb->queue_mapping]; + + /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ +#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); +#else + count += skb_shinfo(skb)->nr_frags; +#endif + if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb); + tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IXGBE_TX_FLAGS_VLAN; + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = vlan_get_protocol(skb); + + tso = ixgbevf_tso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; + else if (!tso) + ixgbevf_tx_csum(tx_ring, first); + + ixgbevf_tx_map(tx_ring, first, hdr_len); + + ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; + + return NETDEV_TX_OK; +} + +/** + * ixgbevf_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int ixgbevf_set_mac(struct net_device *netdev, void *p) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + spin_lock_bh(&adapter->mbx_lock); + + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); + + spin_unlock_bh(&adapter->mbx_lock); + + return 0; +} + +/** + * ixgbevf_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; + + switch (adapter->hw.api_version) { + case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: + max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; + break; + default: + if (adapter->hw.mac.type != ixgbe_mac_82599_vf) + max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; + break; + } + + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > max_possible_frame)) + return -EINVAL; + + hw_dbg(hw, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + + /* notify the PF of our intent to use this size of frame */ + ixgbevf_rlpml_set_vf(hw, max_frame); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void ixgbevf_netpoll(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + int i; + + /* if interface is down do nothing */ + if (test_bit(__IXGBEVF_DOWN, &adapter->state)) + return; + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbevf_msix_clean_rings(0, adapter->q_vector[i]); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbevf_adapter *adapter = netdev_priv(netdev); +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + rtnl_lock(); + ixgbevf_down(adapter); + ixgbevf_free_irq(adapter); + ixgbevf_free_all_tx_resources(adapter); + ixgbevf_free_all_rx_resources(adapter); + rtnl_unlock(); + } + + ixgbevf_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; + +#endif + if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int ixgbevf_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + u32 err; + + pci_restore_state(pdev); + /* pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); + return err; + } + smp_mb__before_atomic(); + clear_bit(__IXGBEVF_DISABLED, &adapter->state); + pci_set_master(pdev); + + ixgbevf_reset(adapter); + + rtnl_lock(); + err = ixgbevf_init_interrupt_scheme(adapter); + rtnl_unlock(); + if (err) { + dev_err(&pdev->dev, "Cannot initialize interrupts\n"); + return err; + } + + if (netif_running(netdev)) { + err = ixgbevf_open(netdev); + if (err) + return err; + } + + netif_device_attach(netdev); + + return err; +} + +#endif /* CONFIG_PM */ +static void ixgbevf_shutdown(struct pci_dev *pdev) +{ + ixgbevf_suspend(pdev, PMSG_SUSPEND); +} + +static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + unsigned int start; + u64 bytes, packets; + const struct ixgbevf_ring *ring; + int i; + + ixgbevf_update_stats(adapter); + + stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; + + for (i = 0; i < adapter->num_rx_queues; i++) { + ring = adapter->rx_ring[i]; + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + bytes = ring->stats.bytes; + packets = ring->stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + stats->rx_bytes += bytes; + stats->rx_packets += packets; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + ring = adapter->tx_ring[i]; + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + bytes = ring->stats.bytes; + packets = ring->stats.packets; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + stats->tx_bytes += bytes; + stats->tx_packets += packets; + } + + return stats; +} + +static const struct net_device_ops ixgbevf_netdev_ops = { + .ndo_open = ixgbevf_open, + .ndo_stop = ixgbevf_close, + .ndo_start_xmit = ixgbevf_xmit_frame, + .ndo_set_rx_mode = ixgbevf_set_rx_mode, + .ndo_get_stats64 = ixgbevf_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ixgbevf_set_mac, + .ndo_change_mtu = ixgbevf_change_mtu, + .ndo_tx_timeout = ixgbevf_tx_timeout, + .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = ixgbevf_busy_poll_recv, +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ixgbevf_netpoll, +#endif +}; + +static void ixgbevf_assign_netdev_ops(struct net_device *dev) +{ + dev->netdev_ops = &ixgbevf_netdev_ops; + ixgbevf_set_ethtool_ops(dev); + dev->watchdog_timeo = 5 * HZ; +} + +/** + * ixgbevf_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in ixgbevf_pci_tbl + * + * Returns 0 on success, negative on failure + * + * ixgbevf_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct ixgbevf_adapter *adapter = NULL; + struct ixgbe_hw *hw = NULL; + const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; + int err, pci_using_dac; + bool disable_dev = false; + + err = pci_enable_device(pdev); + if (err) + return err; + + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); + goto err_dma; + } + pci_using_dac = 0; + } + + err = pci_request_regions(pdev, ixgbevf_driver_name); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + pci_set_master(pdev); + + netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), + MAX_TX_QUEUES); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + /* call save state here in standalone driver because it relies on + * adapter struct to exist, and needs to call netdev_priv + */ + pci_save_state(pdev); + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + adapter->io_addr = hw->hw_addr; + if (!hw->hw_addr) { + err = -EIO; + goto err_ioremap; + } + + ixgbevf_assign_netdev_ops(netdev); + + /* Setup HW API */ + memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); + hw->mac.type = ii->mac; + + memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, + sizeof(struct ixgbe_mbx_operations)); + + /* setup the private structure */ + err = ixgbevf_sw_init(adapter); + if (err) + goto err_sw_init; + + /* The HW MAC address was set and/or determined in sw_init */ + if (!is_valid_ether_addr(netdev->dev_addr)) { + pr_err("invalid MAC address\n"); + err = -EIO; + goto err_sw_init; + } + + netdev->hw_features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM; + + netdev->features = netdev->hw_features | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + + netdev->vlan_features |= NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SG; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + if (IXGBE_REMOVED(hw->hw_addr)) { + err = -EIO; + goto err_sw_init; + } + + setup_timer(&adapter->service_timer, &ixgbevf_service_timer, + (unsigned long)adapter); + + INIT_WORK(&adapter->service_task, ixgbevf_service_task); + set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state); + clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state); + + err = ixgbevf_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + + strcpy(netdev->name, "eth%d"); + + err = register_netdev(netdev); + if (err) + goto err_register; + + pci_set_drvdata(pdev, netdev); + netif_carrier_off(netdev); + + ixgbevf_init_last_counter_stats(adapter); + + /* print the VF info */ + dev_info(&pdev->dev, "%pM\n", netdev->dev_addr); + dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); + + switch (hw->mac.type) { + case ixgbe_mac_X550_vf: + dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n"); + break; + case ixgbe_mac_X540_vf: + dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n"); + break; + case ixgbe_mac_82599_vf: + default: + dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n"); + break; + } + + return 0; + +err_register: + ixgbevf_clear_interrupt_scheme(adapter); +err_sw_init: + ixgbevf_reset_interrupt_capability(adapter); + iounmap(adapter->io_addr); +err_ioremap: + disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + if (!adapter || disable_dev) + pci_disable_device(pdev); + return err; +} + +/** + * ixgbevf_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ixgbevf_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void ixgbevf_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbevf_adapter *adapter; + bool disable_dev; + + if (!netdev) + return; + + adapter = netdev_priv(netdev); + + set_bit(__IXGBEVF_REMOVING, &adapter->state); + cancel_work_sync(&adapter->service_task); + + if (netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(netdev); + + ixgbevf_clear_interrupt_scheme(adapter); + ixgbevf_reset_interrupt_capability(adapter); + + iounmap(adapter->io_addr); + pci_release_regions(pdev); + + hw_dbg(&adapter->hw, "Remove complete\n"); + + disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); + free_netdev(netdev); + + if (disable_dev) + pci_disable_device(pdev); +} + +/** + * ixgbevf_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + **/ +static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state)) + return PCI_ERS_RESULT_DISCONNECT; + + rtnl_lock(); + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (netif_running(netdev)) + ixgbevf_down(adapter); + + if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) + pci_disable_device(pdev); + rtnl_unlock(); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * ixgbevf_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the ixgbevf_resume routine. + **/ +static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + smp_mb__before_atomic(); + clear_bit(__IXGBEVF_DISABLED, &adapter->state); + pci_set_master(pdev); + + ixgbevf_reset(adapter); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * ixgbevf_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the ixgbevf_resume routine. + **/ +static void ixgbevf_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + ixgbevf_up(adapter); + + netif_device_attach(netdev); +} + +/* PCI Error Recovery (ERS) */ +static const struct pci_error_handlers ixgbevf_err_handler = { + .error_detected = ixgbevf_io_error_detected, + .slot_reset = ixgbevf_io_slot_reset, + .resume = ixgbevf_io_resume, +}; + +static struct pci_driver ixgbevf_driver = { + .name = ixgbevf_driver_name, + .id_table = ixgbevf_pci_tbl, + .probe = ixgbevf_probe, + .remove = ixgbevf_remove, +#ifdef CONFIG_PM + /* Power Management Hooks */ + .suspend = ixgbevf_suspend, + .resume = ixgbevf_resume, +#endif + .shutdown = ixgbevf_shutdown, + .err_handler = &ixgbevf_err_handler +}; + +/** + * ixgbevf_init_module - Driver Registration Routine + * + * ixgbevf_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init ixgbevf_init_module(void) +{ + int ret; + + pr_info("%s - version %s\n", ixgbevf_driver_string, + ixgbevf_driver_version); + + pr_info("%s\n", ixgbevf_copyright); + + ret = pci_register_driver(&ixgbevf_driver); + return ret; +} + +module_init(ixgbevf_init_module); + +/** + * ixgbevf_exit_module - Driver Exit Cleanup Routine + * + * ixgbevf_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit ixgbevf_exit_module(void) +{ + pci_unregister_driver(&ixgbevf_driver); +} + +#ifdef DEBUG +/** + * ixgbevf_get_hw_dev_name - return device name string + * used by hardware layer to print debugging information + **/ +char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) +{ + struct ixgbevf_adapter *adapter = hw->back; + + return adapter->netdev->name; +} + +#endif +module_exit(ixgbevf_exit_module); + +/* ixgbevf_main.c */ diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c new file mode 100644 index 000000000..dc68fea48 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c @@ -0,0 +1,348 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "mbx.h" +#include "ixgbevf.h" + +/** + * ixgbevf_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * + * returns 0 if it successfully received a message notification + **/ +static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + while (countdown && mbx->ops.check_for_msg(hw)) { + countdown--; + udelay(mbx->udelay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; + + return countdown ? 0 : IXGBE_ERR_MBX; +} + +/** + * ixgbevf_poll_for_ack - Wait for message acknowledgment + * @hw: pointer to the HW structure + * + * returns 0 if it successfully received a message acknowledgment + **/ +static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + while (countdown && mbx->ops.check_for_ack(hw)) { + countdown--; + udelay(mbx->udelay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; + + return countdown ? 0 : IXGBE_ERR_MBX; +} + +/** + * ixgbevf_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns 0 if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = -IXGBE_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + ret_val = ixgbevf_poll_for_msg(hw); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size); +out: + return ret_val; +} + +/** + * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns 0 if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = -IXGBE_ERR_MBX; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg */ + ret_val = mbx->ops.write(hw, msg, size); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = ixgbevf_poll_for_ack(hw); +out: + return ret_val; +} + +/** + * ixgbevf_read_v2p_mailbox - read v2p mailbox + * @hw: pointer to the HW structure + * + * This function is used to read the v2p mailbox without losing the read to + * clear status bits. + **/ +static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw) +{ + u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX); + + v2p_mailbox |= hw->mbx.v2p_mailbox; + hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +/** + * ixgbevf_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask) +{ + u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw); + s32 ret_val = IXGBE_ERR_MBX; + + if (v2p_mailbox & mask) + ret_val = 0; + + hw->mbx.v2p_mailbox &= ~mask; + + return ret_val; +} + +/** + * ixgbevf_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * + * returns 0 if the PF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbevf_check_for_msg_vf(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_MBX; + + if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * ixgbevf_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * + * returns 0 if the PF has set the ACK bit or else ERR_MBX + **/ +static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_MBX; + + if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) { + ret_val = 0; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * ixgbevf_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * + * returns true if the PF has set the reset done bit or else false + **/ +static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_MBX; + + if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD | + IXGBE_VFMAILBOX_RSTI))) { + ret_val = 0; + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * ixgbevf_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return 0 if we obtained the mailbox lock + **/ +static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_MBX; + + /* Take ownership of the buffer */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU); + + /* reserve mailbox for VF use */ + if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU) + ret_val = 0; + + return ret_val; +} + +/** + * ixgbevf_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns 0 if it successfully copied message into the buffer + **/ +static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) +{ + s32 ret_val; + u16 i; + + /* lock the mailbox to prevent PF/VF race condition */ + ret_val = ixgbevf_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbevf_check_for_msg_vf(hw); + ixgbevf_check_for_ack_vf(hw); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ); + +out_no_write: + return ret_val; +} + +/** + * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for VF + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns 0 if it successfully read message from buffer + **/ +static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) +{ + s32 ret_val = 0; + u16 i; + + /* lock the mailbox to prevent PF/VF race condition */ + ret_val = ixgbevf_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i); + + /* Acknowledge receipt and release mailbox, then we're done */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * ixgbevf_init_mbx_params_vf - set initial values for VF mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for VF mailbox + */ +static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to begin communications + */ + mbx->timeout = 0; + mbx->udelay = IXGBE_VF_MBX_INIT_DELAY; + + mbx->size = IXGBE_VFMAILBOX_SIZE; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + return 0; +} + +const struct ixgbe_mbx_operations ixgbevf_mbx_ops = { + .init_params = ixgbevf_init_mbx_params_vf, + .read = ixgbevf_read_mbx_vf, + .write = ixgbevf_write_mbx_vf, + .read_posted = ixgbevf_read_posted_mbx, + .write_posted = ixgbevf_write_posted_mbx, + .check_for_msg = ixgbevf_check_for_msg_vf, + .check_for_ack = ixgbevf_check_for_ack_vf, + .check_for_rst = ixgbevf_check_for_rst_vf, +}; + diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h new file mode 100644 index 000000000..82f44e06e --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -0,0 +1,128 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_MBX_H_ +#define _IXGBE_MBX_H_ + +#include "vf.h" + +#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +#define IXGBE_ERR_MBX -100 + +#define IXGBE_VFMAILBOX 0x002FC +#define IXGBE_VFMBMEM 0x00200 + +/* Define mailbox register bits */ +#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ + +#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * (x))) +#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * (vfn))) + +#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ +#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ +#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + +/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is IXGBE_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +/* Messages below or'd with this are the ACK */ +#define IXGBE_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with this are the NACK */ +#define IXGBE_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define IXGBE_VT_MSGTYPE_CTS 0x20000000 +#define IXGBE_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for exra info for certain messages */ +#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) + +/* definitions to support mailbox API version negotiation */ + +/* each element denotes a version of the API; existing numbers may not + * change; any additions must go at the end + */ +enum ixgbe_pfvf_api_rev { + ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ + ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ + ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ + /* This value should always be last */ + ixgbe_mbox_api_unknown, /* indicates that API version is not known */ +}; + +/* mailbox API, legacy requests */ +#define IXGBE_VF_RESET 0x01 /* VF requests reset */ +#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ + +/* mailbox API, version 1.0 VF requests */ +#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ +#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ + +/* mailbox API, version 1.1 VF requests */ +#define IXGBE_VF_GET_QUEUE 0x09 /* get queue configuration */ + +/* GET_QUEUES return data indices within the mailbox */ +#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port VLAN */ +#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ + +/* mailbox API, version 1.2 VF requests */ +#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS hash key */ + +/* length of permanent address message returned from PF */ +#define IXGBE_VF_PERMADDR_MSG_LEN 4 +/* word in permanent address message with the current multicast type */ +#define IXGBE_VF_MC_TYPE_WORD 3 + +#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ + +#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +/* forward declaration of the HW struct */ +struct ixgbe_hw; + +#endif /* _IXGBE_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h new file mode 100644 index 000000000..2764fd162 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/regs.h @@ -0,0 +1,84 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBEVF_REGS_H_ +#define _IXGBEVF_REGS_H_ + +#define IXGBE_VFCTRL 0x00000 +#define IXGBE_VFSTATUS 0x00008 +#define IXGBE_VFLINKS 0x00010 +#define IXGBE_VFFRTIMER 0x00048 +#define IXGBE_VFRXMEMWRAP 0x03190 +#define IXGBE_VTEICR 0x00100 +#define IXGBE_VTEICS 0x00104 +#define IXGBE_VTEIMS 0x00108 +#define IXGBE_VTEIMC 0x0010C +#define IXGBE_VTEIAC 0x00110 +#define IXGBE_VTEIAM 0x00114 +#define IXGBE_VTEITR(x) (0x00820 + (4 * (x))) +#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x))) +#define IXGBE_VTIVAR_MISC 0x00140 +#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x))) +#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x))) +#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x))) +#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x))) +#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x))) +#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x))) +#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x))) +#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x))) +#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x))) +#define IXGBE_VFPSRTYPE 0x00300 +#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x))) +#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x))) +#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x))) +#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x))) +#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x))) +#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x))) +#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x))) +#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x))) +#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x))) +#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x))) +#define IXGBE_VFGPRC 0x0101C +#define IXGBE_VFGPTC 0x0201C +#define IXGBE_VFGORC_LSB 0x01020 +#define IXGBE_VFGORC_MSB 0x01024 +#define IXGBE_VFGOTC_LSB 0x02020 +#define IXGBE_VFGOTC_MSB 0x02024 +#define IXGBE_VFMPRC 0x01034 +#define IXGBE_VFMRQC 0x3000 +#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4)) +#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4)) + +/* VFMRQC bits */ +#define IXGBE_VFMRQC_RSSEN 0x00000001 /* RSS Enable */ +#define IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IXGBE_VFMRQC_RSS_FIELD_IPV4 0x00020000 +#define IXGBE_VFMRQC_RSS_FIELD_IPV6 0x00100000 +#define IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS)) + +#endif /* _IXGBEVF_REGS_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c new file mode 100644 index 000000000..d1339b050 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -0,0 +1,752 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "vf.h" +#include "ixgbevf.h" + +/** + * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware by filling the bus info structure and media type, clears + * all on chip counters, initializes receive address registers, multicast + * table, VLAN filter table, calls routine to set up link and flow control + * settings, and leaves transmit and receive units disabled and uninitialized + **/ +static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw) +{ + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + + return 0; +} + +/** + * ixgbevf_init_hw_vf - virtual function hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware and then starting + * the hardware + **/ +static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw) +{ + s32 status = hw->mac.ops.start_hw(hw); + + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + + return status; +} + +/** + * ixgbevf_reset_hw_vf - Performs hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks and + * clears all interrupts. + **/ +static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 timeout = IXGBE_VF_INIT_TIMEOUT; + s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR; + u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN]; + u8 *addr = (u8 *)(&msgbuf[1]); + + /* Call adapter stop to disable tx/rx and clear interrupts */ + hw->mac.ops.stop_adapter(hw); + + /* reset the api version */ + hw->api_version = ixgbe_mbox_api_10; + + IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST); + IXGBE_WRITE_FLUSH(hw); + + /* we cannot reset while the RSTI / RSTD bits are asserted */ + while (!mbx->ops.check_for_rst(hw) && timeout) { + timeout--; + udelay(5); + } + + if (!timeout) + return IXGBE_ERR_RESET_FAILED; + + /* mailbox timeout can now become active */ + mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; + + msgbuf[0] = IXGBE_VF_RESET; + mbx->ops.write_posted(hw, msgbuf, 1); + + mdelay(10); + + /* set our "perm_addr" based on info provided by PF + * also set up the mc_filter_type which is piggy backed + * on the mac address in word 3 + */ + ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN); + if (ret_val) + return ret_val; + + /* New versions of the PF may NACK the reset return message + * to indicate that no MAC address has yet been assigned for + * the VF. + */ + if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) && + msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK)) + return IXGBE_ERR_INVALID_MAC_ADDR; + + ether_addr_copy(hw->mac.perm_addr, addr); + hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; + + return 0; +} + +/** + * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw) +{ + u32 number_of_queues; + u32 reg_val; + u16 i; + + /* Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = true; + + /* Disable the receive unit by stopped each queue */ + number_of_queues = hw->mac.max_rx_queues; + for (i = 0; i < number_of_queues; i++) { + reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); + if (reg_val & IXGBE_RXDCTL_ENABLE) { + reg_val &= ~IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val); + } + } + + IXGBE_WRITE_FLUSH(hw); + + /* Clear interrupt mask to stop from interrupts being generated */ + IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); + + /* Clear any pending interrupts */ + IXGBE_READ_REG(hw, IXGBE_VTEICR); + + /* Disable the transmit unit. Each queue must be disabled. */ + number_of_queues = hw->mac.max_tx_queues; + for (i = 0; i < number_of_queues; i++) { + reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); + if (reg_val & IXGBE_TXDCTL_ENABLE) { + reg_val &= ~IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val); + } + } + + return 0; +} + +/** + * ixgbevf_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming Rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * ixgbevf_get_mac_addr_vf - Read device MAC address + * @hw: pointer to the HW structure + * @mac_addr: pointer to storage for retrieved MAC address + **/ +static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) +{ + ether_addr_copy(mac_addr, hw->mac.perm_addr); + + return 0; +} + +static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + + memset(msgbuf, 0, sizeof(msgbuf)); + /* If index is one then this is the start of a new list and needs + * indication to the PF so it can do it's own list management. + * If it is zero then that tells the PF to just clear all of + * this VF's macvlans and there is no new list. + */ + msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; + msgbuf[0] |= IXGBE_VF_SET_MACVLAN; + if (addr) + ether_addr_copy(msg_addr, addr); + ret_val = mbx->ops.write_posted(hw, msgbuf, 3); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, msgbuf, 3); + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + if (!ret_val) + if (msgbuf[0] == + (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK)) + ret_val = -ENOMEM; + + return ret_val; +} + +/** + * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents. + * @adapter: pointer to the port handle + * @reta: buffer to fill with RETA contents. + * @num_rx_queues: Number of Rx queues configured for this port + * + * The "reta" buffer should be big enough to contain 32 registers. + * + * Returns: 0 on success. + * if API doesn't support this operation - (-EOPNOTSUPP). + */ +int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) +{ + int err, i, j; + u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; + u32 *hw_reta = &msgbuf[1]; + u32 mask = 0; + + /* We have to use a mailbox for 82599 and x540 devices only. + * For these devices RETA has 128 entries. + * Also these VFs support up to 4 RSS queues. Therefore PF will compress + * 16 RETA entries in each DWORD giving 2 bits to each entry. + */ + int dwords = IXGBEVF_82599_RETA_SIZE / 16; + + /* We support the RSS querying for 82599 and x540 devices only. + * Thus return an error if API doesn't support RETA querying or querying + * is not supported for this device type. + */ + if (hw->api_version != ixgbe_mbox_api_12 || + hw->mac.type >= ixgbe_mac_X550_vf) + return -EOPNOTSUPP; + + msgbuf[0] = IXGBE_VF_GET_RETA; + + err = hw->mbx.ops.write_posted(hw, msgbuf, 1); + + if (err) + return err; + + err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1); + + if (err) + return err; + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* If the operation has been refused by a PF return -EPERM */ + if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK)) + return -EPERM; + + /* If we didn't get an ACK there must have been + * some sort of mailbox error so we should treat it + * as such. + */ + if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK)) + return IXGBE_ERR_MBX; + + /* ixgbevf doesn't support more than 2 queues at the moment */ + if (num_rx_queues > 1) + mask = 0x1; + + for (i = 0; i < dwords; i++) + for (j = 0; j < 16; j++) + reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask; + + return 0; +} + +/** + * ixgbevf_get_rss_key_locked - get the RSS Random Key + * @hw: pointer to the HW structure + * @rss_key: buffer to fill with RSS Hash Key contents. + * + * The "rss_key" buffer should be big enough to contain 10 registers. + * + * Returns: 0 on success. + * if API doesn't support this operation - (-EOPNOTSUPP). + */ +int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) +{ + int err; + u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; + + /* We currently support the RSS Random Key retrieval for 82599 and x540 + * devices only. + * + * Thus return an error if API doesn't support RSS Random Key retrieval + * or if the operation is not supported for this device type. + */ + if (hw->api_version != ixgbe_mbox_api_12 || + hw->mac.type >= ixgbe_mac_X550_vf) + return -EOPNOTSUPP; + + msgbuf[0] = IXGBE_VF_GET_RSS_KEY; + err = hw->mbx.ops.write_posted(hw, msgbuf, 1); + + if (err) + return err; + + err = hw->mbx.ops.read_posted(hw, msgbuf, 11); + + if (err) + return err; + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* If the operation has been refused by a PF return -EPERM */ + if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK)) + return -EPERM; + + /* If we didn't get an ACK there must have been + * some sort of mailbox error so we should treat it + * as such. + */ + if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK)) + return IXGBE_ERR_MBX; + + memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE); + + return 0; +} + +/** + * ixgbevf_set_rar_vf - set device MAC address + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: Unused in this implementation + **/ +static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, + u32 vmdq) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + + memset(msgbuf, 0, sizeof(msgbuf)); + msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; + ether_addr_copy(msg_addr, addr); + ret_val = mbx->ops.write_posted(hw, msgbuf, 3); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, msgbuf, 3); + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* if nacked the address was rejected, use "perm_addr" */ + if (!ret_val && + (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) + ixgbevf_get_mac_addr_vf(hw, hw->mac.addr); + + return ret_val; +} + +static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, + u32 *msg, u16 size) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 retmsg[IXGBE_VFMAILBOX_SIZE]; + s32 retval = mbx->ops.write_posted(hw, msg, size); + + if (!retval) + mbx->ops.read_posted(hw, retmsg, size); +} + +/** + * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses + * @hw: pointer to the HW structure + * @netdev: pointer to net device structure + * + * Updates the Multicast Table Array. + **/ +static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, + struct net_device *netdev) +{ + struct netdev_hw_addr *ha; + u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; + u16 *vector_list = (u16 *)&msgbuf[1]; + u32 cnt, i; + + /* Each entry in the list uses 1 16 bit word. We have 30 + * 16 bit words available in our HW msg buffer (minus 1 for the + * msg type). That's 30 hash values if we pack 'em right. If + * there are more than 30 MC addresses to add then punt the + * extras for now and then add code to handle more than 30 later. + * It would be unusual for a server to request that many multi-cast + * addresses except for in large enterprise network environments. + */ + + cnt = netdev_mc_count(netdev); + if (cnt > 30) + cnt = 30; + msgbuf[0] = IXGBE_VF_SET_MULTICAST; + msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT; + + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == cnt) + break; + if (is_link_local_ether_addr(ha->addr)) + continue; + + vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr); + } + + ixgbevf_write_msg_read_ack(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); + + return 0; +} + +/** + * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address + * @hw: pointer to the HW structure + * @vlan: 12 bit VLAN ID + * @vind: unused by VF drivers + * @vlan_on: if true then set bit, else clear bit + **/ +static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + s32 err; + + msgbuf[0] = IXGBE_VF_SET_VLAN; + msgbuf[1] = vlan; + /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ + msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; + + err = mbx->ops.write_posted(hw, msgbuf, 2); + if (err) + goto mbx_err; + + err = mbx->ops.read_posted(hw, msgbuf, 2); + if (err) + goto mbx_err; + + /* remove extra bits from the message */ + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT); + + if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK)) + err = IXGBE_ERR_INVALID_ARGUMENT; + +mbx_err: + return err; +} + +/** + * ixgbevf_setup_mac_link_vf - Setup MAC link settings + * @hw: pointer to hardware structure + * @speed: Unused in this implementation + * @autoneg: Unused in this implementation + * @autoneg_wait_to_complete: Unused in this implementation + * + * Do nothing and return success. VF drivers are not allowed to change + * global settings. Maintained for driver compatibility. + **/ +static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg, + bool autoneg_wait_to_complete) +{ + return 0; +} + +/** + * ixgbevf_check_mac_link_vf - Get link/speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true is link is up, false otherwise + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Reads the links register to determine if link is up and the current speed + **/ +static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, + bool autoneg_wait_to_complete) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + struct ixgbe_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u32 links_reg; + u32 in_msg = 0; + + /* If we were hit with a reset drop the link */ + if (!mbx->ops.check_for_rst(hw) || !mbx->timeout) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + + /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs + * before the link status is correct + */ + if (mac->type == ixgbe_mac_82599_vf) { + int i; + + for (i = 0; i < 5; i++) { + udelay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + } + } + + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; + break; + } + + /* if the read failed it could just be a mailbox collision, best wait + * until we are called again and don't report an error + */ + if (mbx->ops.read(hw, &in_msg, 1)) + goto out; + + if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { + /* msg is not CTS and is NACK we must have lost CTS status */ + if (in_msg & IXGBE_VT_MSGTYPE_NACK) + ret_val = -1; + goto out; + } + + /* the pf is talking, if we timed out in the past we reinit */ + if (!mbx->timeout) { + ret_val = -1; + goto out; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link + */ + mac->get_link_status = false; + +out: + *link_up = !mac->get_link_status; + return ret_val; +} + +/** + * ixgbevf_rlpml_set_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + **/ +void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) +{ + u32 msgbuf[2]; + + msgbuf[0] = IXGBE_VF_SET_LPE; + msgbuf[1] = max_size; + ixgbevf_write_msg_read_ack(hw, msgbuf, 2); +} + +/** + * ixgbevf_negotiate_api_version - Negotiate supported API version + * @hw: pointer to the HW structure + * @api: integer containing requested API version + **/ +int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api) +{ + int err; + u32 msg[3]; + + /* Negotiate the mailbox API version */ + msg[0] = IXGBE_VF_API_NEGOTIATE; + msg[1] = api; + msg[2] = 0; + err = hw->mbx.ops.write_posted(hw, msg, 3); + + if (!err) + err = hw->mbx.ops.read_posted(hw, msg, 3); + + if (!err) { + msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* Store value and return 0 on success */ + if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) { + hw->api_version = api; + return 0; + } + + err = IXGBE_ERR_INVALID_ARGUMENT; + } + + return err; +} + +int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, + unsigned int *default_tc) +{ + int err; + u32 msg[5]; + + /* do nothing if API doesn't support ixgbevf_get_queues */ + switch (hw->api_version) { + case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: + break; + default: + return 0; + } + + /* Fetch queue configuration from the PF */ + msg[0] = IXGBE_VF_GET_QUEUE; + msg[1] = msg[2] = msg[3] = msg[4] = 0; + err = hw->mbx.ops.write_posted(hw, msg, 5); + + if (!err) + err = hw->mbx.ops.read_posted(hw, msg, 5); + + if (!err) { + msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* if we we didn't get an ACK there must have been + * some sort of mailbox error so we should treat it + * as such + */ + if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK)) + return IXGBE_ERR_MBX; + + /* record and validate values from message */ + hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES]; + if (hw->mac.max_tx_queues == 0 || + hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES) + hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES; + + hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES]; + if (hw->mac.max_rx_queues == 0 || + hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES) + hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES; + + *num_tcs = msg[IXGBE_VF_TRANS_VLAN]; + /* in case of unknown state assume we cannot tag frames */ + if (*num_tcs > hw->mac.max_rx_queues) + *num_tcs = 1; + + *default_tc = msg[IXGBE_VF_DEF_QUEUE]; + /* default to queue 0 on out-of-bounds queue number */ + if (*default_tc >= hw->mac.max_tx_queues) + *default_tc = 0; + } + + return err; +} + +static const struct ixgbe_mac_operations ixgbevf_mac_ops = { + .init_hw = ixgbevf_init_hw_vf, + .reset_hw = ixgbevf_reset_hw_vf, + .start_hw = ixgbevf_start_hw_vf, + .get_mac_addr = ixgbevf_get_mac_addr_vf, + .stop_adapter = ixgbevf_stop_hw_vf, + .setup_link = ixgbevf_setup_mac_link_vf, + .check_link = ixgbevf_check_mac_link_vf, + .set_rar = ixgbevf_set_rar_vf, + .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, + .set_uc_addr = ixgbevf_set_uc_addr_vf, + .set_vfta = ixgbevf_set_vfta_vf, +}; + +const struct ixgbevf_info ixgbevf_82599_vf_info = { + .mac = ixgbe_mac_82599_vf, + .mac_ops = &ixgbevf_mac_ops, +}; + +const struct ixgbevf_info ixgbevf_X540_vf_info = { + .mac = ixgbe_mac_X540_vf, + .mac_ops = &ixgbevf_mac_ops, +}; + +const struct ixgbevf_info ixgbevf_X550_vf_info = { + .mac = ixgbe_mac_X550_vf, + .mac_ops = &ixgbevf_mac_ops, +}; + +const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = { + .mac = ixgbe_mac_X550EM_x_vf, + .mac_ops = &ixgbevf_mac_ops, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h new file mode 100644 index 000000000..d40f036b6 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -0,0 +1,215 @@ +/******************************************************************************* + + Intel 82599 Virtual Function driver + Copyright(c) 1999 - 2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, see . + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef __IXGBE_VF_H__ +#define __IXGBE_VF_H__ + +#include +#include +#include +#include +#include + +#include "defines.h" +#include "regs.h" +#include "mbx.h" + +struct ixgbe_hw; + +/* iterator type for walking multicast address lists */ +typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, + u32 *vmdq); +struct ixgbe_mac_operations { + s32 (*init_hw)(struct ixgbe_hw *); + s32 (*reset_hw)(struct ixgbe_hw *); + s32 (*start_hw)(struct ixgbe_hw *); + s32 (*clear_hw_cntrs)(struct ixgbe_hw *); + enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); + s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*stop_adapter)(struct ixgbe_hw *); + s32 (*get_bus_info)(struct ixgbe_hw *); + + /* Link */ + s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); + s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, + bool *); + + /* RAR, Multicast, VLAN */ + s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32); + s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *); + s32 (*init_rx_addrs)(struct ixgbe_hw *); + s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); + s32 (*enable_mc)(struct ixgbe_hw *); + s32 (*disable_mc)(struct ixgbe_hw *); + s32 (*clear_vfta)(struct ixgbe_hw *); + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); +}; + +enum ixgbe_mac_type { + ixgbe_mac_unknown = 0, + ixgbe_mac_82599_vf, + ixgbe_mac_X540_vf, + ixgbe_mac_X550_vf, + ixgbe_mac_X550EM_x_vf, + ixgbe_num_macs +}; + +struct ixgbe_mac_info { + struct ixgbe_mac_operations ops; + u8 addr[6]; + u8 perm_addr[6]; + + enum ixgbe_mac_type type; + + s32 mc_filter_type; + + bool get_link_status; + u32 max_tx_queues; + u32 max_rx_queues; + u32 max_msix_vectors; +}; + +struct ixgbe_mbx_operations { + s32 (*init_params)(struct ixgbe_hw *hw); + s32 (*read)(struct ixgbe_hw *, u32 *, u16); + s32 (*write)(struct ixgbe_hw *, u32 *, u16); + s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16); + s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16); + s32 (*check_for_msg)(struct ixgbe_hw *); + s32 (*check_for_ack)(struct ixgbe_hw *); + s32 (*check_for_rst)(struct ixgbe_hw *); +}; + +struct ixgbe_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct ixgbe_mbx_info { + struct ixgbe_mbx_operations ops; + struct ixgbe_mbx_stats stats; + u32 timeout; + u32 udelay; + u32 v2p_mailbox; + u16 size; +}; + +struct ixgbe_hw { + void *back; + + u8 __iomem *hw_addr; + + struct ixgbe_mac_info mac; + struct ixgbe_mbx_info mbx; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; + bool adapter_stopped; + + int api_version; +}; + +struct ixgbevf_hw_stats { + u64 base_vfgprc; + u64 base_vfgptc; + u64 base_vfgorc; + u64 base_vfgotc; + u64 base_vfmprc; + + u64 last_vfgprc; + u64 last_vfgptc; + u64 last_vfgorc; + u64 last_vfgotc; + u64 last_vfmprc; + + u64 vfgprc; + u64 vfgptc; + u64 vfgorc; + u64 vfgotc; + u64 vfmprc; + + u64 saved_reset_vfgprc; + u64 saved_reset_vfgptc; + u64 saved_reset_vfgorc; + u64 saved_reset_vfgotc; + u64 saved_reset_vfmprc; +}; + +struct ixgbevf_info { + enum ixgbe_mac_type mac; + const struct ixgbe_mac_operations *mac_ops; +}; + +#define IXGBE_FAILED_READ_REG 0xffffffffU + +#define IXGBE_REMOVED(a) unlikely(!(a)) + +static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) +{ + u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + + if (IXGBE_REMOVED(reg_addr)) + return; + writel(value, reg_addr + reg); +} + +#define IXGBE_WRITE_REG(h, r, v) ixgbe_write_reg(h, r, v) + +u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg); +#define IXGBE_READ_REG(h, r) ixgbevf_read_reg(h, r) + +static inline void ixgbe_write_reg_array(struct ixgbe_hw *hw, u32 reg, + u32 offset, u32 value) +{ + ixgbe_write_reg(hw, reg + (offset << 2), value); +} + +#define IXGBE_WRITE_REG_ARRAY(h, r, o, v) ixgbe_write_reg_array(h, r, o, v) + +static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg, + u32 offset) +{ + return ixgbevf_read_reg(hw, reg + (offset << 2)); +} + +#define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o) + +void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); +int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); +int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, + unsigned int *default_tc); +int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues); +int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key); +#endif /* __IXGBE_VF_H__ */ diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c new file mode 100644 index 000000000..6e9a79209 --- /dev/null +++ b/drivers/net/ethernet/jme.c @@ -0,0 +1,3372 @@ +/* + * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver + * + * Copyright 2008 JMicron Technology Corporation + * http://www.jmicron.com/ + * Copyright (c) 2009 - 2010 Guo-Fu Tseng + * + * Author: Guo-Fu Tseng + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "jme.h" + +static int force_pseudohp = -1; +static int no_pseudohp = -1; +static int no_extplug = -1; +module_param(force_pseudohp, int, 0); +MODULE_PARM_DESC(force_pseudohp, + "Enable pseudo hot-plug feature manually by driver instead of BIOS."); +module_param(no_pseudohp, int, 0); +MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature."); +module_param(no_extplug, int, 0); +MODULE_PARM_DESC(no_extplug, + "Do not use external plug signal for pseudo hot-plug."); + +static int +jme_mdio_read(struct net_device *netdev, int phy, int reg) +{ + struct jme_adapter *jme = netdev_priv(netdev); + int i, val, again = (reg == MII_BMSR) ? 1 : 0; + +read_again: + jwrite32(jme, JME_SMI, SMI_OP_REQ | + smi_phy_addr(phy) | + smi_reg_addr(reg)); + + wmb(); + for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { + udelay(20); + val = jread32(jme, JME_SMI); + if ((val & SMI_OP_REQ) == 0) + break; + } + + if (i == 0) { + pr_err("phy(%d) read timeout : %d\n", phy, reg); + return 0; + } + + if (again--) + goto read_again; + + return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT; +} + +static void +jme_mdio_write(struct net_device *netdev, + int phy, int reg, int val) +{ + struct jme_adapter *jme = netdev_priv(netdev); + int i; + + jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ | + ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | + smi_phy_addr(phy) | smi_reg_addr(reg)); + + wmb(); + for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { + udelay(20); + if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0) + break; + } + + if (i == 0) + pr_err("phy(%d) write timeout : %d\n", phy, reg); +} + +static inline void +jme_reset_phy_processor(struct jme_adapter *jme) +{ + u32 val; + + jme_mdio_write(jme->dev, + jme->mii_if.phy_id, + MII_ADVERTISE, ADVERTISE_ALL | + ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + + if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) + jme_mdio_write(jme->dev, + jme->mii_if.phy_id, + MII_CTRL1000, + ADVERTISE_1000FULL | ADVERTISE_1000HALF); + + val = jme_mdio_read(jme->dev, + jme->mii_if.phy_id, + MII_BMCR); + + jme_mdio_write(jme->dev, + jme->mii_if.phy_id, + MII_BMCR, val | BMCR_RESET); +} + +static void +jme_setup_wakeup_frame(struct jme_adapter *jme, + const u32 *mask, u32 crc, int fnr) +{ + int i; + + /* + * Setup CRC pattern + */ + jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL)); + wmb(); + jwrite32(jme, JME_WFODP, crc); + wmb(); + + /* + * Setup Mask + */ + for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) { + jwrite32(jme, JME_WFOI, + ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) | + (fnr & WFOI_FRAME_SEL)); + wmb(); + jwrite32(jme, JME_WFODP, mask[i]); + wmb(); + } +} + +static inline void +jme_mac_rxclk_off(struct jme_adapter *jme) +{ + jme->reg_gpreg1 |= GPREG1_RXCLKOFF; + jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1); +} + +static inline void +jme_mac_rxclk_on(struct jme_adapter *jme) +{ + jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF; + jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1); +} + +static inline void +jme_mac_txclk_off(struct jme_adapter *jme) +{ + jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC); + jwrite32f(jme, JME_GHC, jme->reg_ghc); +} + +static inline void +jme_mac_txclk_on(struct jme_adapter *jme) +{ + u32 speed = jme->reg_ghc & GHC_SPEED; + if (speed == GHC_SPEED_1000M) + jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY; + else + jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE; + jwrite32f(jme, JME_GHC, jme->reg_ghc); +} + +static inline void +jme_reset_ghc_speed(struct jme_adapter *jme) +{ + jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX); + jwrite32f(jme, JME_GHC, jme->reg_ghc); +} + +static inline void +jme_reset_250A2_workaround(struct jme_adapter *jme) +{ + jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH | + GPREG1_RSSPATCH); + jwrite32(jme, JME_GPREG1, jme->reg_gpreg1); +} + +static inline void +jme_assert_ghc_reset(struct jme_adapter *jme) +{ + jme->reg_ghc |= GHC_SWRST; + jwrite32f(jme, JME_GHC, jme->reg_ghc); +} + +static inline void +jme_clear_ghc_reset(struct jme_adapter *jme) +{ + jme->reg_ghc &= ~GHC_SWRST; + jwrite32f(jme, JME_GHC, jme->reg_ghc); +} + +static inline void +jme_reset_mac_processor(struct jme_adapter *jme) +{ + static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; + u32 crc = 0xCDCDCDCD; + u32 gpreg0; + int i; + + jme_reset_ghc_speed(jme); + jme_reset_250A2_workaround(jme); + + jme_mac_rxclk_on(jme); + jme_mac_txclk_on(jme); + udelay(1); + jme_assert_ghc_reset(jme); + udelay(1); + jme_mac_rxclk_off(jme); + jme_mac_txclk_off(jme); + udelay(1); + jme_clear_ghc_reset(jme); + udelay(1); + jme_mac_rxclk_on(jme); + jme_mac_txclk_on(jme); + udelay(1); + jme_mac_rxclk_off(jme); + jme_mac_txclk_off(jme); + + jwrite32(jme, JME_RXDBA_LO, 0x00000000); + jwrite32(jme, JME_RXDBA_HI, 0x00000000); + jwrite32(jme, JME_RXQDC, 0x00000000); + jwrite32(jme, JME_RXNDA, 0x00000000); + jwrite32(jme, JME_TXDBA_LO, 0x00000000); + jwrite32(jme, JME_TXDBA_HI, 0x00000000); + jwrite32(jme, JME_TXQDC, 0x00000000); + jwrite32(jme, JME_TXNDA, 0x00000000); + + jwrite32(jme, JME_RXMCHT_LO, 0x00000000); + jwrite32(jme, JME_RXMCHT_HI, 0x00000000); + for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i) + jme_setup_wakeup_frame(jme, mask, crc, i); + if (jme->fpgaver) + gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL; + else + gpreg0 = GPREG0_DEFAULT; + jwrite32(jme, JME_GPREG0, gpreg0); +} + +static inline void +jme_clear_pm(struct jme_adapter *jme) +{ + jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs); +} + +static int +jme_reload_eeprom(struct jme_adapter *jme) +{ + u32 val; + int i; + + val = jread32(jme, JME_SMBCSR); + + if (val & SMBCSR_EEPROMD) { + val |= SMBCSR_CNACK; + jwrite32(jme, JME_SMBCSR, val); + val |= SMBCSR_RELOAD; + jwrite32(jme, JME_SMBCSR, val); + mdelay(12); + + for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) { + mdelay(1); + if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0) + break; + } + + if (i == 0) { + pr_err("eeprom reload timeout\n"); + return -EIO; + } + } + + return 0; +} + +static void +jme_load_macaddr(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + unsigned char macaddr[ETH_ALEN]; + u32 val; + + spin_lock_bh(&jme->macaddr_lock); + val = jread32(jme, JME_RXUMA_LO); + macaddr[0] = (val >> 0) & 0xFF; + macaddr[1] = (val >> 8) & 0xFF; + macaddr[2] = (val >> 16) & 0xFF; + macaddr[3] = (val >> 24) & 0xFF; + val = jread32(jme, JME_RXUMA_HI); + macaddr[4] = (val >> 0) & 0xFF; + macaddr[5] = (val >> 8) & 0xFF; + memcpy(netdev->dev_addr, macaddr, ETH_ALEN); + spin_unlock_bh(&jme->macaddr_lock); +} + +static inline void +jme_set_rx_pcc(struct jme_adapter *jme, int p) +{ + switch (p) { + case PCC_OFF: + jwrite32(jme, JME_PCCRX0, + ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | + ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK)); + break; + case PCC_P1: + jwrite32(jme, JME_PCCRX0, + ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | + ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK)); + break; + case PCC_P2: + jwrite32(jme, JME_PCCRX0, + ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | + ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK)); + break; + case PCC_P3: + jwrite32(jme, JME_PCCRX0, + ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | + ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK)); + break; + default: + break; + } + wmb(); + + if (!(test_bit(JME_FLAG_POLL, &jme->flags))) + netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p); +} + +static void +jme_start_irq(struct jme_adapter *jme) +{ + register struct dynpcc_info *dpi = &(jme->dpi); + + jme_set_rx_pcc(jme, PCC_P1); + dpi->cur = PCC_P1; + dpi->attempt = PCC_P1; + dpi->cnt = 0; + + jwrite32(jme, JME_PCCTX, + ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) | + ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) | + PCCTXQ0_EN + ); + + /* + * Enable Interrupts + */ + jwrite32(jme, JME_IENS, INTR_ENABLE); +} + +static inline void +jme_stop_irq(struct jme_adapter *jme) +{ + /* + * Disable Interrupts + */ + jwrite32f(jme, JME_IENC, INTR_ENABLE); +} + +static u32 +jme_linkstat_from_phy(struct jme_adapter *jme) +{ + u32 phylink, bmsr; + + phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17); + bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR); + if (bmsr & BMSR_ANCOMP) + phylink |= PHY_LINK_AUTONEG_COMPLETE; + + return phylink; +} + +static inline void +jme_set_phyfifo_5level(struct jme_adapter *jme) +{ + jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004); +} + +static inline void +jme_set_phyfifo_8level(struct jme_adapter *jme) +{ + jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000); +} + +static int +jme_check_link(struct net_device *netdev, int testonly) +{ + struct jme_adapter *jme = netdev_priv(netdev); + u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr; + char linkmsg[64]; + int rc = 0; + + linkmsg[0] = '\0'; + + if (jme->fpgaver) + phylink = jme_linkstat_from_phy(jme); + else + phylink = jread32(jme, JME_PHY_LINK); + + if (phylink & PHY_LINK_UP) { + if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) { + /* + * If we did not enable AN + * Speed/Duplex Info should be obtained from SMI + */ + phylink = PHY_LINK_UP; + + bmcr = jme_mdio_read(jme->dev, + jme->mii_if.phy_id, + MII_BMCR); + + phylink |= ((bmcr & BMCR_SPEED1000) && + (bmcr & BMCR_SPEED100) == 0) ? + PHY_LINK_SPEED_1000M : + (bmcr & BMCR_SPEED100) ? + PHY_LINK_SPEED_100M : + PHY_LINK_SPEED_10M; + + phylink |= (bmcr & BMCR_FULLDPLX) ? + PHY_LINK_DUPLEX : 0; + + strcat(linkmsg, "Forced: "); + } else { + /* + * Keep polling for speed/duplex resolve complete + */ + while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) && + --cnt) { + + udelay(1); + + if (jme->fpgaver) + phylink = jme_linkstat_from_phy(jme); + else + phylink = jread32(jme, JME_PHY_LINK); + } + if (!cnt) + pr_err("Waiting speed resolve timeout\n"); + + strcat(linkmsg, "ANed: "); + } + + if (jme->phylink == phylink) { + rc = 1; + goto out; + } + if (testonly) + goto out; + + jme->phylink = phylink; + + /* + * The speed/duplex setting of jme->reg_ghc already cleared + * by jme_reset_mac_processor() + */ + switch (phylink & PHY_LINK_SPEED_MASK) { + case PHY_LINK_SPEED_10M: + jme->reg_ghc |= GHC_SPEED_10M; + strcat(linkmsg, "10 Mbps, "); + break; + case PHY_LINK_SPEED_100M: + jme->reg_ghc |= GHC_SPEED_100M; + strcat(linkmsg, "100 Mbps, "); + break; + case PHY_LINK_SPEED_1000M: + jme->reg_ghc |= GHC_SPEED_1000M; + strcat(linkmsg, "1000 Mbps, "); + break; + default: + break; + } + + if (phylink & PHY_LINK_DUPLEX) { + jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT); + jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX); + jme->reg_ghc |= GHC_DPX; + } else { + jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT | + TXMCS_BACKOFF | + TXMCS_CARRIERSENSE | + TXMCS_COLLISION); + jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX); + } + + jwrite32(jme, JME_GHC, jme->reg_ghc); + + if (is_buggy250(jme->pdev->device, jme->chiprev)) { + jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH | + GPREG1_RSSPATCH); + if (!(phylink & PHY_LINK_DUPLEX)) + jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH; + switch (phylink & PHY_LINK_SPEED_MASK) { + case PHY_LINK_SPEED_10M: + jme_set_phyfifo_8level(jme); + jme->reg_gpreg1 |= GPREG1_RSSPATCH; + break; + case PHY_LINK_SPEED_100M: + jme_set_phyfifo_5level(jme); + jme->reg_gpreg1 |= GPREG1_RSSPATCH; + break; + case PHY_LINK_SPEED_1000M: + jme_set_phyfifo_8level(jme); + break; + default: + break; + } + } + jwrite32(jme, JME_GPREG1, jme->reg_gpreg1); + + strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ? + "Full-Duplex, " : + "Half-Duplex, "); + strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ? + "MDI-X" : + "MDI"); + netif_info(jme, link, jme->dev, "Link is up at %s\n", linkmsg); + netif_carrier_on(netdev); + } else { + if (testonly) + goto out; + + netif_info(jme, link, jme->dev, "Link is down\n"); + jme->phylink = 0; + netif_carrier_off(netdev); + } + +out: + return rc; +} + +static int +jme_setup_tx_resources(struct jme_adapter *jme) +{ + struct jme_ring *txring = &(jme->txring[0]); + + txring->alloc = dma_alloc_coherent(&(jme->pdev->dev), + TX_RING_ALLOC_SIZE(jme->tx_ring_size), + &(txring->dmaalloc), + GFP_ATOMIC); + + if (!txring->alloc) + goto err_set_null; + + /* + * 16 Bytes align + */ + txring->desc = (void *)ALIGN((unsigned long)(txring->alloc), + RING_DESC_ALIGN); + txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN); + txring->next_to_use = 0; + atomic_set(&txring->next_to_clean, 0); + atomic_set(&txring->nr_free, jme->tx_ring_size); + + txring->bufinf = kmalloc(sizeof(struct jme_buffer_info) * + jme->tx_ring_size, GFP_ATOMIC); + if (unlikely(!(txring->bufinf))) + goto err_free_txring; + + /* + * Initialize Transmit Descriptors + */ + memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size)); + memset(txring->bufinf, 0, + sizeof(struct jme_buffer_info) * jme->tx_ring_size); + + return 0; + +err_free_txring: + dma_free_coherent(&(jme->pdev->dev), + TX_RING_ALLOC_SIZE(jme->tx_ring_size), + txring->alloc, + txring->dmaalloc); + +err_set_null: + txring->desc = NULL; + txring->dmaalloc = 0; + txring->dma = 0; + txring->bufinf = NULL; + + return -ENOMEM; +} + +static void +jme_free_tx_resources(struct jme_adapter *jme) +{ + int i; + struct jme_ring *txring = &(jme->txring[0]); + struct jme_buffer_info *txbi; + + if (txring->alloc) { + if (txring->bufinf) { + for (i = 0 ; i < jme->tx_ring_size ; ++i) { + txbi = txring->bufinf + i; + if (txbi->skb) { + dev_kfree_skb(txbi->skb); + txbi->skb = NULL; + } + txbi->mapping = 0; + txbi->len = 0; + txbi->nr_desc = 0; + txbi->start_xmit = 0; + } + kfree(txring->bufinf); + } + + dma_free_coherent(&(jme->pdev->dev), + TX_RING_ALLOC_SIZE(jme->tx_ring_size), + txring->alloc, + txring->dmaalloc); + + txring->alloc = NULL; + txring->desc = NULL; + txring->dmaalloc = 0; + txring->dma = 0; + txring->bufinf = NULL; + } + txring->next_to_use = 0; + atomic_set(&txring->next_to_clean, 0); + atomic_set(&txring->nr_free, 0); +} + +static inline void +jme_enable_tx_engine(struct jme_adapter *jme) +{ + /* + * Select Queue 0 + */ + jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0); + wmb(); + + /* + * Setup TX Queue 0 DMA Bass Address + */ + jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); + jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32); + jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); + + /* + * Setup TX Descptor Count + */ + jwrite32(jme, JME_TXQDC, jme->tx_ring_size); + + /* + * Enable TX Engine + */ + wmb(); + jwrite32f(jme, JME_TXCS, jme->reg_txcs | + TXCS_SELECT_QUEUE0 | + TXCS_ENABLE); + + /* + * Start clock for TX MAC Processor + */ + jme_mac_txclk_on(jme); +} + +static inline void +jme_restart_tx_engine(struct jme_adapter *jme) +{ + /* + * Restart TX Engine + */ + jwrite32(jme, JME_TXCS, jme->reg_txcs | + TXCS_SELECT_QUEUE0 | + TXCS_ENABLE); +} + +static inline void +jme_disable_tx_engine(struct jme_adapter *jme) +{ + int i; + u32 val; + + /* + * Disable TX Engine + */ + jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0); + wmb(); + + val = jread32(jme, JME_TXCS); + for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) { + mdelay(1); + val = jread32(jme, JME_TXCS); + rmb(); + } + + if (!i) + pr_err("Disable TX engine timeout\n"); + + /* + * Stop clock for TX MAC Processor + */ + jme_mac_txclk_off(jme); +} + +static void +jme_set_clean_rxdesc(struct jme_adapter *jme, int i) +{ + struct jme_ring *rxring = &(jme->rxring[0]); + register struct rxdesc *rxdesc = rxring->desc; + struct jme_buffer_info *rxbi = rxring->bufinf; + rxdesc += i; + rxbi += i; + + rxdesc->dw[0] = 0; + rxdesc->dw[1] = 0; + rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32); + rxdesc->desc1.bufaddrl = cpu_to_le32( + (__u64)rxbi->mapping & 0xFFFFFFFFUL); + rxdesc->desc1.datalen = cpu_to_le16(rxbi->len); + if (jme->dev->features & NETIF_F_HIGHDMA) + rxdesc->desc1.flags = RXFLAG_64BIT; + wmb(); + rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT; +} + +static int +jme_make_new_rx_buf(struct jme_adapter *jme, int i) +{ + struct jme_ring *rxring = &(jme->rxring[0]); + struct jme_buffer_info *rxbi = rxring->bufinf + i; + struct sk_buff *skb; + dma_addr_t mapping; + + skb = netdev_alloc_skb(jme->dev, + jme->dev->mtu + RX_EXTRA_LEN); + if (unlikely(!skb)) + return -ENOMEM; + + mapping = pci_map_page(jme->pdev, virt_to_page(skb->data), + offset_in_page(skb->data), skb_tailroom(skb), + PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) { + dev_kfree_skb(skb); + return -ENOMEM; + } + + if (likely(rxbi->mapping)) + pci_unmap_page(jme->pdev, rxbi->mapping, + rxbi->len, PCI_DMA_FROMDEVICE); + + rxbi->skb = skb; + rxbi->len = skb_tailroom(skb); + rxbi->mapping = mapping; + return 0; +} + +static void +jme_free_rx_buf(struct jme_adapter *jme, int i) +{ + struct jme_ring *rxring = &(jme->rxring[0]); + struct jme_buffer_info *rxbi = rxring->bufinf; + rxbi += i; + + if (rxbi->skb) { + pci_unmap_page(jme->pdev, + rxbi->mapping, + rxbi->len, + PCI_DMA_FROMDEVICE); + dev_kfree_skb(rxbi->skb); + rxbi->skb = NULL; + rxbi->mapping = 0; + rxbi->len = 0; + } +} + +static void +jme_free_rx_resources(struct jme_adapter *jme) +{ + int i; + struct jme_ring *rxring = &(jme->rxring[0]); + + if (rxring->alloc) { + if (rxring->bufinf) { + for (i = 0 ; i < jme->rx_ring_size ; ++i) + jme_free_rx_buf(jme, i); + kfree(rxring->bufinf); + } + + dma_free_coherent(&(jme->pdev->dev), + RX_RING_ALLOC_SIZE(jme->rx_ring_size), + rxring->alloc, + rxring->dmaalloc); + rxring->alloc = NULL; + rxring->desc = NULL; + rxring->dmaalloc = 0; + rxring->dma = 0; + rxring->bufinf = NULL; + } + rxring->next_to_use = 0; + atomic_set(&rxring->next_to_clean, 0); +} + +static int +jme_setup_rx_resources(struct jme_adapter *jme) +{ + int i; + struct jme_ring *rxring = &(jme->rxring[0]); + + rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev), + RX_RING_ALLOC_SIZE(jme->rx_ring_size), + &(rxring->dmaalloc), + GFP_ATOMIC); + if (!rxring->alloc) + goto err_set_null; + + /* + * 16 Bytes align + */ + rxring->desc = (void *)ALIGN((unsigned long)(rxring->alloc), + RING_DESC_ALIGN); + rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN); + rxring->next_to_use = 0; + atomic_set(&rxring->next_to_clean, 0); + + rxring->bufinf = kmalloc(sizeof(struct jme_buffer_info) * + jme->rx_ring_size, GFP_ATOMIC); + if (unlikely(!(rxring->bufinf))) + goto err_free_rxring; + + /* + * Initiallize Receive Descriptors + */ + memset(rxring->bufinf, 0, + sizeof(struct jme_buffer_info) * jme->rx_ring_size); + for (i = 0 ; i < jme->rx_ring_size ; ++i) { + if (unlikely(jme_make_new_rx_buf(jme, i))) { + jme_free_rx_resources(jme); + return -ENOMEM; + } + + jme_set_clean_rxdesc(jme, i); + } + + return 0; + +err_free_rxring: + dma_free_coherent(&(jme->pdev->dev), + RX_RING_ALLOC_SIZE(jme->rx_ring_size), + rxring->alloc, + rxring->dmaalloc); +err_set_null: + rxring->desc = NULL; + rxring->dmaalloc = 0; + rxring->dma = 0; + rxring->bufinf = NULL; + + return -ENOMEM; +} + +static inline void +jme_enable_rx_engine(struct jme_adapter *jme) +{ + /* + * Select Queue 0 + */ + jwrite32(jme, JME_RXCS, jme->reg_rxcs | + RXCS_QUEUESEL_Q0); + wmb(); + + /* + * Setup RX DMA Bass Address + */ + jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); + jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32); + jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); + + /* + * Setup RX Descriptor Count + */ + jwrite32(jme, JME_RXQDC, jme->rx_ring_size); + + /* + * Setup Unicast Filter + */ + jme_set_unicastaddr(jme->dev); + jme_set_multi(jme->dev); + + /* + * Enable RX Engine + */ + wmb(); + jwrite32f(jme, JME_RXCS, jme->reg_rxcs | + RXCS_QUEUESEL_Q0 | + RXCS_ENABLE | + RXCS_QST); + + /* + * Start clock for RX MAC Processor + */ + jme_mac_rxclk_on(jme); +} + +static inline void +jme_restart_rx_engine(struct jme_adapter *jme) +{ + /* + * Start RX Engine + */ + jwrite32(jme, JME_RXCS, jme->reg_rxcs | + RXCS_QUEUESEL_Q0 | + RXCS_ENABLE | + RXCS_QST); +} + +static inline void +jme_disable_rx_engine(struct jme_adapter *jme) +{ + int i; + u32 val; + + /* + * Disable RX Engine + */ + jwrite32(jme, JME_RXCS, jme->reg_rxcs); + wmb(); + + val = jread32(jme, JME_RXCS); + for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) { + mdelay(1); + val = jread32(jme, JME_RXCS); + rmb(); + } + + if (!i) + pr_err("Disable RX engine timeout\n"); + + /* + * Stop clock for RX MAC Processor + */ + jme_mac_rxclk_off(jme); +} + +static u16 +jme_udpsum(struct sk_buff *skb) +{ + u16 csum = 0xFFFFu; + + if (skb->len < (ETH_HLEN + sizeof(struct iphdr))) + return csum; + if (skb->protocol != htons(ETH_P_IP)) + return csum; + skb_set_network_header(skb, ETH_HLEN); + if ((ip_hdr(skb)->protocol != IPPROTO_UDP) || + (skb->len < (ETH_HLEN + + (ip_hdr(skb)->ihl << 2) + + sizeof(struct udphdr)))) { + skb_reset_network_header(skb); + return csum; + } + skb_set_transport_header(skb, + ETH_HLEN + (ip_hdr(skb)->ihl << 2)); + csum = udp_hdr(skb)->check; + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + + return csum; +} + +static int +jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb) +{ + if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4))) + return false; + + if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS)) + == RXWBFLAG_TCPON)) { + if (flags & RXWBFLAG_IPV4) + netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n"); + return false; + } + + if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) + == RXWBFLAG_UDPON) && jme_udpsum(skb)) { + if (flags & RXWBFLAG_IPV4) + netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n"); + return false; + } + + if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS)) + == RXWBFLAG_IPV4)) { + netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n"); + return false; + } + + return true; +} + +static void +jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) +{ + struct jme_ring *rxring = &(jme->rxring[0]); + struct rxdesc *rxdesc = rxring->desc; + struct jme_buffer_info *rxbi = rxring->bufinf; + struct sk_buff *skb; + int framesize; + + rxdesc += idx; + rxbi += idx; + + skb = rxbi->skb; + pci_dma_sync_single_for_cpu(jme->pdev, + rxbi->mapping, + rxbi->len, + PCI_DMA_FROMDEVICE); + + if (unlikely(jme_make_new_rx_buf(jme, idx))) { + pci_dma_sync_single_for_device(jme->pdev, + rxbi->mapping, + rxbi->len, + PCI_DMA_FROMDEVICE); + + ++(NET_STAT(jme).rx_dropped); + } else { + framesize = le16_to_cpu(rxdesc->descwb.framesize) + - RX_PREPAD_SIZE; + + skb_reserve(skb, RX_PREPAD_SIZE); + skb_put(skb, framesize); + skb->protocol = eth_type_trans(skb, jme->dev); + + if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + + if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { + u16 vid = le16_to_cpu(rxdesc->descwb.vlan); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + NET_STAT(jme).rx_bytes += 4; + } + jme->jme_rx(skb); + + if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) == + cpu_to_le16(RXWBFLAG_DEST_MUL)) + ++(NET_STAT(jme).multicast); + + NET_STAT(jme).rx_bytes += framesize; + ++(NET_STAT(jme).rx_packets); + } + + jme_set_clean_rxdesc(jme, idx); + +} + +static int +jme_process_receive(struct jme_adapter *jme, int limit) +{ + struct jme_ring *rxring = &(jme->rxring[0]); + struct rxdesc *rxdesc = rxring->desc; + int i, j, ccnt, desccnt, mask = jme->rx_ring_mask; + + if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning))) + goto out_inc; + + if (unlikely(atomic_read(&jme->link_changing) != 1)) + goto out_inc; + + if (unlikely(!netif_carrier_ok(jme->dev))) + goto out_inc; + + i = atomic_read(&rxring->next_to_clean); + while (limit > 0) { + rxdesc = rxring->desc; + rxdesc += i; + + if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) || + !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)) + goto out; + --limit; + + rmb(); + desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT; + + if (unlikely(desccnt > 1 || + rxdesc->descwb.errstat & RXWBERR_ALLERR)) { + + if (rxdesc->descwb.errstat & RXWBERR_CRCERR) + ++(NET_STAT(jme).rx_crc_errors); + else if (rxdesc->descwb.errstat & RXWBERR_OVERUN) + ++(NET_STAT(jme).rx_fifo_errors); + else + ++(NET_STAT(jme).rx_errors); + + if (desccnt > 1) + limit -= desccnt - 1; + + for (j = i, ccnt = desccnt ; ccnt-- ; ) { + jme_set_clean_rxdesc(jme, j); + j = (j + 1) & (mask); + } + + } else { + jme_alloc_and_feed_skb(jme, i); + } + + i = (i + desccnt) & (mask); + } + +out: + atomic_set(&rxring->next_to_clean, i); + +out_inc: + atomic_inc(&jme->rx_cleaning); + + return limit > 0 ? limit : 0; + +} + +static void +jme_attempt_pcc(struct dynpcc_info *dpi, int atmp) +{ + if (likely(atmp == dpi->cur)) { + dpi->cnt = 0; + return; + } + + if (dpi->attempt == atmp) { + ++(dpi->cnt); + } else { + dpi->attempt = atmp; + dpi->cnt = 0; + } + +} + +static void +jme_dynamic_pcc(struct jme_adapter *jme) +{ + register struct dynpcc_info *dpi = &(jme->dpi); + + if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD) + jme_attempt_pcc(dpi, PCC_P3); + else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD || + dpi->intr_cnt > PCC_INTR_THRESHOLD) + jme_attempt_pcc(dpi, PCC_P2); + else + jme_attempt_pcc(dpi, PCC_P1); + + if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) { + if (dpi->attempt < dpi->cur) + tasklet_schedule(&jme->rxclean_task); + jme_set_rx_pcc(jme, dpi->attempt); + dpi->cur = dpi->attempt; + dpi->cnt = 0; + } +} + +static void +jme_start_pcc_timer(struct jme_adapter *jme) +{ + struct dynpcc_info *dpi = &(jme->dpi); + dpi->last_bytes = NET_STAT(jme).rx_bytes; + dpi->last_pkts = NET_STAT(jme).rx_packets; + dpi->intr_cnt = 0; + jwrite32(jme, JME_TMCSR, + TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT)); +} + +static inline void +jme_stop_pcc_timer(struct jme_adapter *jme) +{ + jwrite32(jme, JME_TMCSR, 0); +} + +static void +jme_shutdown_nic(struct jme_adapter *jme) +{ + u32 phylink; + + phylink = jme_linkstat_from_phy(jme); + + if (!(phylink & PHY_LINK_UP)) { + /* + * Disable all interrupt before issue timer + */ + jme_stop_irq(jme); + jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE); + } +} + +static void +jme_pcc_tasklet(unsigned long arg) +{ + struct jme_adapter *jme = (struct jme_adapter *)arg; + struct net_device *netdev = jme->dev; + + if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) { + jme_shutdown_nic(jme); + return; + } + + if (unlikely(!netif_carrier_ok(netdev) || + (atomic_read(&jme->link_changing) != 1) + )) { + jme_stop_pcc_timer(jme); + return; + } + + if (!(test_bit(JME_FLAG_POLL, &jme->flags))) + jme_dynamic_pcc(jme); + + jme_start_pcc_timer(jme); +} + +static inline void +jme_polling_mode(struct jme_adapter *jme) +{ + jme_set_rx_pcc(jme, PCC_OFF); +} + +static inline void +jme_interrupt_mode(struct jme_adapter *jme) +{ + jme_set_rx_pcc(jme, PCC_P1); +} + +static inline int +jme_pseudo_hotplug_enabled(struct jme_adapter *jme) +{ + u32 apmc; + apmc = jread32(jme, JME_APMC); + return apmc & JME_APMC_PSEUDO_HP_EN; +} + +static void +jme_start_shutdown_timer(struct jme_adapter *jme) +{ + u32 apmc; + + apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN; + apmc &= ~JME_APMC_EPIEN_CTRL; + if (!no_extplug) { + jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN); + wmb(); + } + jwrite32f(jme, JME_APMC, apmc); + + jwrite32f(jme, JME_TIMER2, 0); + set_bit(JME_FLAG_SHUTDOWN, &jme->flags); + jwrite32(jme, JME_TMCSR, + TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT)); +} + +static void +jme_stop_shutdown_timer(struct jme_adapter *jme) +{ + u32 apmc; + + jwrite32f(jme, JME_TMCSR, 0); + jwrite32f(jme, JME_TIMER2, 0); + clear_bit(JME_FLAG_SHUTDOWN, &jme->flags); + + apmc = jread32(jme, JME_APMC); + apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL); + jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS); + wmb(); + jwrite32f(jme, JME_APMC, apmc); +} + +static void +jme_link_change_tasklet(unsigned long arg) +{ + struct jme_adapter *jme = (struct jme_adapter *)arg; + struct net_device *netdev = jme->dev; + int rc; + + while (!atomic_dec_and_test(&jme->link_changing)) { + atomic_inc(&jme->link_changing); + netif_info(jme, intr, jme->dev, "Get link change lock failed\n"); + while (atomic_read(&jme->link_changing) != 1) + netif_info(jme, intr, jme->dev, "Waiting link change lock\n"); + } + + if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu) + goto out; + + jme->old_mtu = netdev->mtu; + netif_stop_queue(netdev); + if (jme_pseudo_hotplug_enabled(jme)) + jme_stop_shutdown_timer(jme); + + jme_stop_pcc_timer(jme); + tasklet_disable(&jme->txclean_task); + tasklet_disable(&jme->rxclean_task); + tasklet_disable(&jme->rxempty_task); + + if (netif_carrier_ok(netdev)) { + jme_disable_rx_engine(jme); + jme_disable_tx_engine(jme); + jme_reset_mac_processor(jme); + jme_free_rx_resources(jme); + jme_free_tx_resources(jme); + + if (test_bit(JME_FLAG_POLL, &jme->flags)) + jme_polling_mode(jme); + + netif_carrier_off(netdev); + } + + jme_check_link(netdev, 0); + if (netif_carrier_ok(netdev)) { + rc = jme_setup_rx_resources(jme); + if (rc) { + pr_err("Allocating resources for RX error, Device STOPPED!\n"); + goto out_enable_tasklet; + } + + rc = jme_setup_tx_resources(jme); + if (rc) { + pr_err("Allocating resources for TX error, Device STOPPED!\n"); + goto err_out_free_rx_resources; + } + + jme_enable_rx_engine(jme); + jme_enable_tx_engine(jme); + + netif_start_queue(netdev); + + if (test_bit(JME_FLAG_POLL, &jme->flags)) + jme_interrupt_mode(jme); + + jme_start_pcc_timer(jme); + } else if (jme_pseudo_hotplug_enabled(jme)) { + jme_start_shutdown_timer(jme); + } + + goto out_enable_tasklet; + +err_out_free_rx_resources: + jme_free_rx_resources(jme); +out_enable_tasklet: + tasklet_enable(&jme->txclean_task); + tasklet_enable(&jme->rxclean_task); + tasklet_enable(&jme->rxempty_task); +out: + atomic_inc(&jme->link_changing); +} + +static void +jme_rx_clean_tasklet(unsigned long arg) +{ + struct jme_adapter *jme = (struct jme_adapter *)arg; + struct dynpcc_info *dpi = &(jme->dpi); + + jme_process_receive(jme, jme->rx_ring_size); + ++(dpi->intr_cnt); + +} + +static int +jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget)) +{ + struct jme_adapter *jme = jme_napi_priv(holder); + int rest; + + rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget)); + + while (atomic_read(&jme->rx_empty) > 0) { + atomic_dec(&jme->rx_empty); + ++(NET_STAT(jme).rx_dropped); + jme_restart_rx_engine(jme); + } + atomic_inc(&jme->rx_empty); + + if (rest) { + JME_RX_COMPLETE(netdev, holder); + jme_interrupt_mode(jme); + } + + JME_NAPI_WEIGHT_SET(budget, rest); + return JME_NAPI_WEIGHT_VAL(budget) - rest; +} + +static void +jme_rx_empty_tasklet(unsigned long arg) +{ + struct jme_adapter *jme = (struct jme_adapter *)arg; + + if (unlikely(atomic_read(&jme->link_changing) != 1)) + return; + + if (unlikely(!netif_carrier_ok(jme->dev))) + return; + + netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n"); + + jme_rx_clean_tasklet(arg); + + while (atomic_read(&jme->rx_empty) > 0) { + atomic_dec(&jme->rx_empty); + ++(NET_STAT(jme).rx_dropped); + jme_restart_rx_engine(jme); + } + atomic_inc(&jme->rx_empty); +} + +static void +jme_wake_queue_if_stopped(struct jme_adapter *jme) +{ + struct jme_ring *txring = &(jme->txring[0]); + + smp_wmb(); + if (unlikely(netif_queue_stopped(jme->dev) && + atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) { + netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n"); + netif_wake_queue(jme->dev); + } + +} + +static void +jme_tx_clean_tasklet(unsigned long arg) +{ + struct jme_adapter *jme = (struct jme_adapter *)arg; + struct jme_ring *txring = &(jme->txring[0]); + struct txdesc *txdesc = txring->desc; + struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi; + int i, j, cnt = 0, max, err, mask; + + tx_dbg(jme, "Into txclean\n"); + + if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning))) + goto out; + + if (unlikely(atomic_read(&jme->link_changing) != 1)) + goto out; + + if (unlikely(!netif_carrier_ok(jme->dev))) + goto out; + + max = jme->tx_ring_size - atomic_read(&txring->nr_free); + mask = jme->tx_ring_mask; + + for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) { + + ctxbi = txbi + i; + + if (likely(ctxbi->skb && + !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) { + + tx_dbg(jme, "txclean: %d+%d@%lu\n", + i, ctxbi->nr_desc, jiffies); + + err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR; + + for (j = 1 ; j < ctxbi->nr_desc ; ++j) { + ttxbi = txbi + ((i + j) & (mask)); + txdesc[(i + j) & (mask)].dw[0] = 0; + + pci_unmap_page(jme->pdev, + ttxbi->mapping, + ttxbi->len, + PCI_DMA_TODEVICE); + + ttxbi->mapping = 0; + ttxbi->len = 0; + } + + dev_kfree_skb(ctxbi->skb); + + cnt += ctxbi->nr_desc; + + if (unlikely(err)) { + ++(NET_STAT(jme).tx_carrier_errors); + } else { + ++(NET_STAT(jme).tx_packets); + NET_STAT(jme).tx_bytes += ctxbi->len; + } + + ctxbi->skb = NULL; + ctxbi->len = 0; + ctxbi->start_xmit = 0; + + } else { + break; + } + + i = (i + ctxbi->nr_desc) & mask; + + ctxbi->nr_desc = 0; + } + + tx_dbg(jme, "txclean: done %d@%lu\n", i, jiffies); + atomic_set(&txring->next_to_clean, i); + atomic_add(cnt, &txring->nr_free); + + jme_wake_queue_if_stopped(jme); + +out: + atomic_inc(&jme->tx_cleaning); +} + +static void +jme_intr_msi(struct jme_adapter *jme, u32 intrstat) +{ + /* + * Disable interrupt + */ + jwrite32f(jme, JME_IENC, INTR_ENABLE); + + if (intrstat & (INTR_LINKCH | INTR_SWINTR)) { + /* + * Link change event is critical + * all other events are ignored + */ + jwrite32(jme, JME_IEVE, intrstat); + tasklet_schedule(&jme->linkch_task); + goto out_reenable; + } + + if (intrstat & INTR_TMINTR) { + jwrite32(jme, JME_IEVE, INTR_TMINTR); + tasklet_schedule(&jme->pcc_task); + } + + if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) { + jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0); + tasklet_schedule(&jme->txclean_task); + } + + if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { + jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO | + INTR_PCCRX0 | + INTR_RX0EMP)) | + INTR_RX0); + } + + if (test_bit(JME_FLAG_POLL, &jme->flags)) { + if (intrstat & INTR_RX0EMP) + atomic_inc(&jme->rx_empty); + + if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { + if (likely(JME_RX_SCHEDULE_PREP(jme))) { + jme_polling_mode(jme); + JME_RX_SCHEDULE(jme); + } + } + } else { + if (intrstat & INTR_RX0EMP) { + atomic_inc(&jme->rx_empty); + tasklet_hi_schedule(&jme->rxempty_task); + } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) { + tasklet_hi_schedule(&jme->rxclean_task); + } + } + +out_reenable: + /* + * Re-enable interrupt + */ + jwrite32f(jme, JME_IENS, INTR_ENABLE); +} + +static irqreturn_t +jme_intr(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct jme_adapter *jme = netdev_priv(netdev); + u32 intrstat; + + intrstat = jread32(jme, JME_IEVE); + + /* + * Check if it's really an interrupt for us + */ + if (unlikely((intrstat & INTR_ENABLE) == 0)) + return IRQ_NONE; + + /* + * Check if the device still exist + */ + if (unlikely(intrstat == ~((typeof(intrstat))0))) + return IRQ_NONE; + + jme_intr_msi(jme, intrstat); + + return IRQ_HANDLED; +} + +static irqreturn_t +jme_msi(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct jme_adapter *jme = netdev_priv(netdev); + u32 intrstat; + + intrstat = jread32(jme, JME_IEVE); + + jme_intr_msi(jme, intrstat); + + return IRQ_HANDLED; +} + +static void +jme_reset_link(struct jme_adapter *jme) +{ + jwrite32(jme, JME_TMCSR, TMCSR_SWIT); +} + +static void +jme_restart_an(struct jme_adapter *jme) +{ + u32 bmcr; + + spin_lock_bh(&jme->phy_lock); + bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); + spin_unlock_bh(&jme->phy_lock); +} + +static int +jme_request_irq(struct jme_adapter *jme) +{ + int rc; + struct net_device *netdev = jme->dev; + irq_handler_t handler = jme_intr; + int irq_flags = IRQF_SHARED; + + if (!pci_enable_msi(jme->pdev)) { + set_bit(JME_FLAG_MSI, &jme->flags); + handler = jme_msi; + irq_flags = 0; + } + + rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name, + netdev); + if (rc) { + netdev_err(netdev, + "Unable to request %s interrupt (return: %d)\n", + test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx", + rc); + + if (test_bit(JME_FLAG_MSI, &jme->flags)) { + pci_disable_msi(jme->pdev); + clear_bit(JME_FLAG_MSI, &jme->flags); + } + } else { + netdev->irq = jme->pdev->irq; + } + + return rc; +} + +static void +jme_free_irq(struct jme_adapter *jme) +{ + free_irq(jme->pdev->irq, jme->dev); + if (test_bit(JME_FLAG_MSI, &jme->flags)) { + pci_disable_msi(jme->pdev); + clear_bit(JME_FLAG_MSI, &jme->flags); + jme->dev->irq = jme->pdev->irq; + } +} + +static inline void +jme_new_phy_on(struct jme_adapter *jme) +{ + u32 reg; + + reg = jread32(jme, JME_PHY_PWR); + reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW | + PHY_PWR_DWN2 | PHY_PWR_CLKSEL); + jwrite32(jme, JME_PHY_PWR, reg); + + pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®); + reg &= ~PE1_GPREG0_PBG; + reg |= PE1_GPREG0_ENBG; + pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg); +} + +static inline void +jme_new_phy_off(struct jme_adapter *jme) +{ + u32 reg; + + reg = jread32(jme, JME_PHY_PWR); + reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW | + PHY_PWR_DWN2 | PHY_PWR_CLKSEL; + jwrite32(jme, JME_PHY_PWR, reg); + + pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®); + reg &= ~PE1_GPREG0_PBG; + reg |= PE1_GPREG0_PDD3COLD; + pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg); +} + +static inline void +jme_phy_on(struct jme_adapter *jme) +{ + u32 bmcr; + + bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); + bmcr &= ~BMCR_PDOWN; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); + + if (new_phy_power_ctrl(jme->chip_main_rev)) + jme_new_phy_on(jme); +} + +static inline void +jme_phy_off(struct jme_adapter *jme) +{ + u32 bmcr; + + bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); + bmcr |= BMCR_PDOWN; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); + + if (new_phy_power_ctrl(jme->chip_main_rev)) + jme_new_phy_off(jme); +} + +static int +jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg) +{ + u32 phy_addr; + + phy_addr = JM_PHY_SPEC_REG_READ | specreg; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG, + phy_addr); + return jme_mdio_read(jme->dev, jme->mii_if.phy_id, + JM_PHY_SPEC_DATA_REG); +} + +static void +jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data) +{ + u32 phy_addr; + + phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG, + phy_data); + jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG, + phy_addr); +} + +static int +jme_phy_calibration(struct jme_adapter *jme) +{ + u32 ctrl1000, phy_data; + + jme_phy_off(jme); + jme_phy_on(jme); + /* Enabel PHY test mode 1 */ + ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000); + ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK; + ctrl1000 |= PHY_GAD_TEST_MODE_1; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000); + + phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG); + phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0; + phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH | + JM_PHY_EXT_COMM_2_CALI_ENABLE; + jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data); + msleep(20); + phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG); + phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE | + JM_PHY_EXT_COMM_2_CALI_MODE_0 | + JM_PHY_EXT_COMM_2_CALI_LATCH); + jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data); + + /* Disable PHY test mode */ + ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000); + ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000); + return 0; +} + +static int +jme_phy_setEA(struct jme_adapter *jme) +{ + u32 phy_comm0 = 0, phy_comm1 = 0; + u8 nic_ctrl; + + pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl); + if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE) + return 0; + + switch (jme->pdev->device) { + case PCI_DEVICE_ID_JMICRON_JMC250: + if (((jme->chip_main_rev == 5) && + ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) || + (jme->chip_sub_rev == 3))) || + (jme->chip_main_rev >= 6)) { + phy_comm0 = 0x008A; + phy_comm1 = 0x4109; + } + if ((jme->chip_main_rev == 3) && + ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2))) + phy_comm0 = 0xE088; + break; + case PCI_DEVICE_ID_JMICRON_JMC260: + if (((jme->chip_main_rev == 5) && + ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) || + (jme->chip_sub_rev == 3))) || + (jme->chip_main_rev >= 6)) { + phy_comm0 = 0x008A; + phy_comm1 = 0x4109; + } + if ((jme->chip_main_rev == 3) && + ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2))) + phy_comm0 = 0xE088; + if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0)) + phy_comm0 = 0x608A; + if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2)) + phy_comm0 = 0x408A; + break; + default: + return -ENODEV; + } + if (phy_comm0) + jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0); + if (phy_comm1) + jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1); + + return 0; +} + +static int +jme_open(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + int rc; + + jme_clear_pm(jme); + JME_NAPI_ENABLE(jme); + + tasklet_init(&jme->linkch_task, jme_link_change_tasklet, + (unsigned long) jme); + tasklet_init(&jme->txclean_task, jme_tx_clean_tasklet, + (unsigned long) jme); + tasklet_init(&jme->rxclean_task, jme_rx_clean_tasklet, + (unsigned long) jme); + tasklet_init(&jme->rxempty_task, jme_rx_empty_tasklet, + (unsigned long) jme); + + rc = jme_request_irq(jme); + if (rc) + goto err_out; + + jme_start_irq(jme); + + jme_phy_on(jme); + if (test_bit(JME_FLAG_SSET, &jme->flags)) + jme_set_settings(netdev, &jme->old_ecmd); + else + jme_reset_phy_processor(jme); + jme_phy_calibration(jme); + jme_phy_setEA(jme); + jme_reset_link(jme); + + return 0; + +err_out: + netif_stop_queue(netdev); + netif_carrier_off(netdev); + return rc; +} + +static void +jme_set_100m_half(struct jme_adapter *jme) +{ + u32 bmcr, tmp; + + jme_phy_on(jme); + bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); + tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | + BMCR_SPEED1000 | BMCR_FULLDPLX); + tmp |= BMCR_SPEED100; + + if (bmcr != tmp) + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp); + + if (jme->fpgaver) + jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL); + else + jwrite32(jme, JME_GHC, GHC_SPEED_100M); +} + +#define JME_WAIT_LINK_TIME 2000 /* 2000ms */ +static void +jme_wait_link(struct jme_adapter *jme) +{ + u32 phylink, to = JME_WAIT_LINK_TIME; + + mdelay(1000); + phylink = jme_linkstat_from_phy(jme); + while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) { + mdelay(10); + phylink = jme_linkstat_from_phy(jme); + } +} + +static void +jme_powersave_phy(struct jme_adapter *jme) +{ + if (jme->reg_pmcs) { + jme_set_100m_half(jme); + if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) + jme_wait_link(jme); + jme_clear_pm(jme); + } else { + jme_phy_off(jme); + } +} + +static int +jme_close(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + + netif_stop_queue(netdev); + netif_carrier_off(netdev); + + jme_stop_irq(jme); + jme_free_irq(jme); + + JME_NAPI_DISABLE(jme); + + tasklet_kill(&jme->linkch_task); + tasklet_kill(&jme->txclean_task); + tasklet_kill(&jme->rxclean_task); + tasklet_kill(&jme->rxempty_task); + + jme_disable_rx_engine(jme); + jme_disable_tx_engine(jme); + jme_reset_mac_processor(jme); + jme_free_rx_resources(jme); + jme_free_tx_resources(jme); + jme->phylink = 0; + jme_phy_off(jme); + + return 0; +} + +static int +jme_alloc_txdesc(struct jme_adapter *jme, + struct sk_buff *skb) +{ + struct jme_ring *txring = &(jme->txring[0]); + int idx, nr_alloc, mask = jme->tx_ring_mask; + + idx = txring->next_to_use; + nr_alloc = skb_shinfo(skb)->nr_frags + 2; + + if (unlikely(atomic_read(&txring->nr_free) < nr_alloc)) + return -1; + + atomic_sub(nr_alloc, &txring->nr_free); + + txring->next_to_use = (txring->next_to_use + nr_alloc) & mask; + + return idx; +} + +static int +jme_fill_tx_map(struct pci_dev *pdev, + struct txdesc *txdesc, + struct jme_buffer_info *txbi, + struct page *page, + u32 page_offset, + u32 len, + bool hidma) +{ + dma_addr_t dmaaddr; + + dmaaddr = pci_map_page(pdev, + page, + page_offset, + len, + PCI_DMA_TODEVICE); + + if (unlikely(pci_dma_mapping_error(pdev, dmaaddr))) + return -EINVAL; + + pci_dma_sync_single_for_device(pdev, + dmaaddr, + len, + PCI_DMA_TODEVICE); + + txdesc->dw[0] = 0; + txdesc->dw[1] = 0; + txdesc->desc2.flags = TXFLAG_OWN; + txdesc->desc2.flags |= (hidma) ? TXFLAG_64BIT : 0; + txdesc->desc2.datalen = cpu_to_le16(len); + txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32); + txdesc->desc2.bufaddrl = cpu_to_le32( + (__u64)dmaaddr & 0xFFFFFFFFUL); + + txbi->mapping = dmaaddr; + txbi->len = len; + return 0; +} + +static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count) +{ + struct jme_ring *txring = &(jme->txring[0]); + struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; + int mask = jme->tx_ring_mask; + int j; + + for (j = 0 ; j < count ; j++) { + ctxbi = txbi + ((startidx + j + 2) & (mask)); + pci_unmap_page(jme->pdev, + ctxbi->mapping, + ctxbi->len, + PCI_DMA_TODEVICE); + + ctxbi->mapping = 0; + ctxbi->len = 0; + } + +} + +static int +jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) +{ + struct jme_ring *txring = &(jme->txring[0]); + struct txdesc *txdesc = txring->desc, *ctxdesc; + struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; + bool hidma = jme->dev->features & NETIF_F_HIGHDMA; + int i, nr_frags = skb_shinfo(skb)->nr_frags; + int mask = jme->tx_ring_mask; + const struct skb_frag_struct *frag; + u32 len; + int ret = 0; + + for (i = 0 ; i < nr_frags ; ++i) { + frag = &skb_shinfo(skb)->frags[i]; + ctxdesc = txdesc + ((idx + i + 2) & (mask)); + ctxbi = txbi + ((idx + i + 2) & (mask)); + + ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, + skb_frag_page(frag), + frag->page_offset, skb_frag_size(frag), hidma); + if (ret) { + jme_drop_tx_map(jme, idx, i); + goto out; + } + + } + + len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; + ctxdesc = txdesc + ((idx + 1) & (mask)); + ctxbi = txbi + ((idx + 1) & (mask)); + ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), + offset_in_page(skb->data), len, hidma); + if (ret) + jme_drop_tx_map(jme, idx, i); + +out: + return ret; + +} + + +static int +jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) +{ + *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT); + if (*mss) { + *flags |= TXFLAG_LSEN; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + } else { + struct ipv6hdr *ip6h = ipv6_hdr(skb); + + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr, + &ip6h->daddr, 0, + IPPROTO_TCP, + 0); + } + + return 0; + } + + return 1; +} + +static void +jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags) +{ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 ip_proto; + + switch (skb->protocol) { + case htons(ETH_P_IP): + ip_proto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + ip_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + ip_proto = 0; + break; + } + + switch (ip_proto) { + case IPPROTO_TCP: + *flags |= TXFLAG_TCPCS; + break; + case IPPROTO_UDP: + *flags |= TXFLAG_UDPCS; + break; + default: + netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n"); + break; + } + } +} + +static inline void +jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags) +{ + if (skb_vlan_tag_present(skb)) { + *flags |= TXFLAG_TAGON; + *vlan = cpu_to_le16(skb_vlan_tag_get(skb)); + } +} + +static int +jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) +{ + struct jme_ring *txring = &(jme->txring[0]); + struct txdesc *txdesc; + struct jme_buffer_info *txbi; + u8 flags; + int ret = 0; + + txdesc = (struct txdesc *)txring->desc + idx; + txbi = txring->bufinf + idx; + + txdesc->dw[0] = 0; + txdesc->dw[1] = 0; + txdesc->dw[2] = 0; + txdesc->dw[3] = 0; + txdesc->desc1.pktsize = cpu_to_le16(skb->len); + /* + * Set OWN bit at final. + * When kernel transmit faster than NIC. + * And NIC trying to send this descriptor before we tell + * it to start sending this TX queue. + * Other fields are already filled correctly. + */ + wmb(); + flags = TXFLAG_OWN | TXFLAG_INT; + /* + * Set checksum flags while not tso + */ + if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) + jme_tx_csum(jme, skb, &flags); + jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); + ret = jme_map_tx_skb(jme, skb, idx); + if (ret) + return ret; + + txdesc->desc1.flags = flags; + /* + * Set tx buffer info after telling NIC to send + * For better tx_clean timing + */ + wmb(); + txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2; + txbi->skb = skb; + txbi->len = skb->len; + txbi->start_xmit = jiffies; + if (!txbi->start_xmit) + txbi->start_xmit = (0UL-1); + + return 0; +} + +static void +jme_stop_queue_if_full(struct jme_adapter *jme) +{ + struct jme_ring *txring = &(jme->txring[0]); + struct jme_buffer_info *txbi = txring->bufinf; + int idx = atomic_read(&txring->next_to_clean); + + txbi += idx; + + smp_wmb(); + if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) { + netif_stop_queue(jme->dev); + netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n"); + smp_wmb(); + if (atomic_read(&txring->nr_free) + >= (jme->tx_wake_threshold)) { + netif_wake_queue(jme->dev); + netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n"); + } + } + + if (unlikely(txbi->start_xmit && + (jiffies - txbi->start_xmit) >= TX_TIMEOUT && + txbi->skb)) { + netif_stop_queue(jme->dev); + netif_info(jme, tx_queued, jme->dev, + "TX Queue Stopped %d@%lu\n", idx, jiffies); + } +} + +/* + * This function is already protected by netif_tx_lock() + */ + +static netdev_tx_t +jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + int idx; + + if (unlikely(skb_is_gso(skb) && skb_cow_head(skb, 0))) { + dev_kfree_skb_any(skb); + ++(NET_STAT(jme).tx_dropped); + return NETDEV_TX_OK; + } + + idx = jme_alloc_txdesc(jme, skb); + + if (unlikely(idx < 0)) { + netif_stop_queue(netdev); + netif_err(jme, tx_err, jme->dev, + "BUG! Tx ring full when queue awake!\n"); + + return NETDEV_TX_BUSY; + } + + if (jme_fill_tx_desc(jme, skb, idx)) + return NETDEV_TX_OK; + + jwrite32(jme, JME_TXCS, jme->reg_txcs | + TXCS_SELECT_QUEUE0 | + TXCS_QUEUE0S | + TXCS_ENABLE); + + tx_dbg(jme, "xmit: %d+%d@%lu\n", + idx, skb_shinfo(skb)->nr_frags + 2, jiffies); + jme_stop_queue_if_full(jme); + + return NETDEV_TX_OK; +} + +static void +jme_set_unicastaddr(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + u32 val; + + val = (netdev->dev_addr[3] & 0xff) << 24 | + (netdev->dev_addr[2] & 0xff) << 16 | + (netdev->dev_addr[1] & 0xff) << 8 | + (netdev->dev_addr[0] & 0xff); + jwrite32(jme, JME_RXUMA_LO, val); + val = (netdev->dev_addr[5] & 0xff) << 8 | + (netdev->dev_addr[4] & 0xff); + jwrite32(jme, JME_RXUMA_HI, val); +} + +static int +jme_set_macaddr(struct net_device *netdev, void *p) +{ + struct jme_adapter *jme = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (netif_running(netdev)) + return -EBUSY; + + spin_lock_bh(&jme->macaddr_lock); + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + jme_set_unicastaddr(netdev); + spin_unlock_bh(&jme->macaddr_lock); + + return 0; +} + +static void +jme_set_multi(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + u32 mc_hash[2] = {}; + + spin_lock_bh(&jme->rxmcs_lock); + + jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME; + + if (netdev->flags & IFF_PROMISC) { + jme->reg_rxmcs |= RXMCS_ALLFRAME; + } else if (netdev->flags & IFF_ALLMULTI) { + jme->reg_rxmcs |= RXMCS_ALLMULFRAME; + } else if (netdev->flags & IFF_MULTICAST) { + struct netdev_hw_addr *ha; + int bit_nr; + + jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED; + netdev_for_each_mc_addr(ha, netdev) { + bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F; + mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F); + } + + jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]); + jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]); + } + + wmb(); + jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); + + spin_unlock_bh(&jme->rxmcs_lock); +} + +static int +jme_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct jme_adapter *jme = netdev_priv(netdev); + + if (new_mtu == jme->old_mtu) + return 0; + + if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) || + ((new_mtu) < IPV6_MIN_MTU)) + return -EINVAL; + + + netdev->mtu = new_mtu; + netdev_update_features(netdev); + + jme_restart_rx_engine(jme); + jme_reset_link(jme); + + return 0; +} + +static void +jme_tx_timeout(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + + jme->phylink = 0; + jme_reset_phy_processor(jme); + if (test_bit(JME_FLAG_SSET, &jme->flags)) + jme_set_settings(netdev, &jme->old_ecmd); + + /* + * Force to Reset the link again + */ + jme_reset_link(jme); +} + +static inline void jme_pause_rx(struct jme_adapter *jme) +{ + atomic_dec(&jme->link_changing); + + jme_set_rx_pcc(jme, PCC_OFF); + if (test_bit(JME_FLAG_POLL, &jme->flags)) { + JME_NAPI_DISABLE(jme); + } else { + tasklet_disable(&jme->rxclean_task); + tasklet_disable(&jme->rxempty_task); + } +} + +static inline void jme_resume_rx(struct jme_adapter *jme) +{ + struct dynpcc_info *dpi = &(jme->dpi); + + if (test_bit(JME_FLAG_POLL, &jme->flags)) { + JME_NAPI_ENABLE(jme); + } else { + tasklet_enable(&jme->rxclean_task); + tasklet_enable(&jme->rxempty_task); + } + dpi->cur = PCC_P1; + dpi->attempt = PCC_P1; + dpi->cnt = 0; + jme_set_rx_pcc(jme, PCC_P1); + + atomic_inc(&jme->link_changing); +} + +static void +jme_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct jme_adapter *jme = netdev_priv(netdev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info)); +} + +static int +jme_get_regs_len(struct net_device *netdev) +{ + return JME_REG_LEN; +} + +static void +mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len) +{ + int i; + + for (i = 0 ; i < len ; i += 4) + p[i >> 2] = jread32(jme, reg + i); +} + +static void +mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr) +{ + int i; + u16 *p16 = (u16 *)p; + + for (i = 0 ; i < reg_nr ; ++i) + p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i); +} + +static void +jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) +{ + struct jme_adapter *jme = netdev_priv(netdev); + u32 *p32 = (u32 *)p; + + memset(p, 0xFF, JME_REG_LEN); + + regs->version = 1; + mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN); + + p32 += 0x100 >> 2; + mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN); + + p32 += 0x100 >> 2; + mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN); + + p32 += 0x100 >> 2; + mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN); + + p32 += 0x100 >> 2; + mdio_memcpy(jme, p32, JME_PHY_REG_NR); +} + +static int +jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) +{ + struct jme_adapter *jme = netdev_priv(netdev); + + ecmd->tx_coalesce_usecs = PCC_TX_TO; + ecmd->tx_max_coalesced_frames = PCC_TX_CNT; + + if (test_bit(JME_FLAG_POLL, &jme->flags)) { + ecmd->use_adaptive_rx_coalesce = false; + ecmd->rx_coalesce_usecs = 0; + ecmd->rx_max_coalesced_frames = 0; + return 0; + } + + ecmd->use_adaptive_rx_coalesce = true; + + switch (jme->dpi.cur) { + case PCC_P1: + ecmd->rx_coalesce_usecs = PCC_P1_TO; + ecmd->rx_max_coalesced_frames = PCC_P1_CNT; + break; + case PCC_P2: + ecmd->rx_coalesce_usecs = PCC_P2_TO; + ecmd->rx_max_coalesced_frames = PCC_P2_CNT; + break; + case PCC_P3: + ecmd->rx_coalesce_usecs = PCC_P3_TO; + ecmd->rx_max_coalesced_frames = PCC_P3_CNT; + break; + default: + break; + } + + return 0; +} + +static int +jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) +{ + struct jme_adapter *jme = netdev_priv(netdev); + struct dynpcc_info *dpi = &(jme->dpi); + + if (netif_running(netdev)) + return -EBUSY; + + if (ecmd->use_adaptive_rx_coalesce && + test_bit(JME_FLAG_POLL, &jme->flags)) { + clear_bit(JME_FLAG_POLL, &jme->flags); + jme->jme_rx = netif_rx; + dpi->cur = PCC_P1; + dpi->attempt = PCC_P1; + dpi->cnt = 0; + jme_set_rx_pcc(jme, PCC_P1); + jme_interrupt_mode(jme); + } else if (!(ecmd->use_adaptive_rx_coalesce) && + !(test_bit(JME_FLAG_POLL, &jme->flags))) { + set_bit(JME_FLAG_POLL, &jme->flags); + jme->jme_rx = netif_receive_skb; + jme_interrupt_mode(jme); + } + + return 0; +} + +static void +jme_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *ecmd) +{ + struct jme_adapter *jme = netdev_priv(netdev); + u32 val; + + ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0; + ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0; + + spin_lock_bh(&jme->phy_lock); + val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); + spin_unlock_bh(&jme->phy_lock); + + ecmd->autoneg = + (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0; +} + +static int +jme_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *ecmd) +{ + struct jme_adapter *jme = netdev_priv(netdev); + u32 val; + + if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^ + (ecmd->tx_pause != 0)) { + + if (ecmd->tx_pause) + jme->reg_txpfc |= TXPFC_PF_EN; + else + jme->reg_txpfc &= ~TXPFC_PF_EN; + + jwrite32(jme, JME_TXPFC, jme->reg_txpfc); + } + + spin_lock_bh(&jme->rxmcs_lock); + if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^ + (ecmd->rx_pause != 0)) { + + if (ecmd->rx_pause) + jme->reg_rxmcs |= RXMCS_FLOWCTRL; + else + jme->reg_rxmcs &= ~RXMCS_FLOWCTRL; + + jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); + } + spin_unlock_bh(&jme->rxmcs_lock); + + spin_lock_bh(&jme->phy_lock); + val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); + if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^ + (ecmd->autoneg != 0)) { + + if (ecmd->autoneg) + val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + else + val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + + jme_mdio_write(jme->dev, jme->mii_if.phy_id, + MII_ADVERTISE, val); + } + spin_unlock_bh(&jme->phy_lock); + + return 0; +} + +static void +jme_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct jme_adapter *jme = netdev_priv(netdev); + + wol->supported = WAKE_MAGIC | WAKE_PHY; + + wol->wolopts = 0; + + if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) + wol->wolopts |= WAKE_PHY; + + if (jme->reg_pmcs & PMCS_MFEN) + wol->wolopts |= WAKE_MAGIC; + +} + +static int +jme_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct jme_adapter *jme = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_MAGICSECURE | + WAKE_UCAST | + WAKE_MCAST | + WAKE_BCAST | + WAKE_ARP)) + return -EOPNOTSUPP; + + jme->reg_pmcs = 0; + + if (wol->wolopts & WAKE_PHY) + jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN; + + if (wol->wolopts & WAKE_MAGIC) + jme->reg_pmcs |= PMCS_MFEN; + + jwrite32(jme, JME_PMCS, jme->reg_pmcs); + device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs)); + + return 0; +} + +static int +jme_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct jme_adapter *jme = netdev_priv(netdev); + int rc; + + spin_lock_bh(&jme->phy_lock); + rc = mii_ethtool_gset(&(jme->mii_if), ecmd); + spin_unlock_bh(&jme->phy_lock); + return rc; +} + +static int +jme_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct jme_adapter *jme = netdev_priv(netdev); + int rc, fdc = 0; + + if (ethtool_cmd_speed(ecmd) == SPEED_1000 + && ecmd->autoneg != AUTONEG_ENABLE) + return -EINVAL; + + /* + * Check If user changed duplex only while force_media. + * Hardware would not generate link change interrupt. + */ + if (jme->mii_if.force_media && + ecmd->autoneg != AUTONEG_ENABLE && + (jme->mii_if.full_duplex != ecmd->duplex)) + fdc = 1; + + spin_lock_bh(&jme->phy_lock); + rc = mii_ethtool_sset(&(jme->mii_if), ecmd); + spin_unlock_bh(&jme->phy_lock); + + if (!rc) { + if (fdc) + jme_reset_link(jme); + jme->old_ecmd = *ecmd; + set_bit(JME_FLAG_SSET, &jme->flags); + } + + return rc; +} + +static int +jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) +{ + int rc; + struct jme_adapter *jme = netdev_priv(netdev); + struct mii_ioctl_data *mii_data = if_mii(rq); + unsigned int duplex_chg; + + if (cmd == SIOCSMIIREG) { + u16 val = mii_data->val_in; + if (!(val & (BMCR_RESET|BMCR_ANENABLE)) && + (val & BMCR_SPEED1000)) + return -EINVAL; + } + + spin_lock_bh(&jme->phy_lock); + rc = generic_mii_ioctl(&jme->mii_if, mii_data, cmd, &duplex_chg); + spin_unlock_bh(&jme->phy_lock); + + if (!rc && (cmd == SIOCSMIIREG)) { + if (duplex_chg) + jme_reset_link(jme); + jme_get_settings(netdev, &jme->old_ecmd); + set_bit(JME_FLAG_SSET, &jme->flags); + } + + return rc; +} + +static u32 +jme_get_link(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP; +} + +static u32 +jme_get_msglevel(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + return jme->msg_enable; +} + +static void +jme_set_msglevel(struct net_device *netdev, u32 value) +{ + struct jme_adapter *jme = netdev_priv(netdev); + jme->msg_enable = value; +} + +static netdev_features_t +jme_fix_features(struct net_device *netdev, netdev_features_t features) +{ + if (netdev->mtu > 1900) + features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM); + return features; +} + +static int +jme_set_features(struct net_device *netdev, netdev_features_t features) +{ + struct jme_adapter *jme = netdev_priv(netdev); + + spin_lock_bh(&jme->rxmcs_lock); + if (features & NETIF_F_RXCSUM) + jme->reg_rxmcs |= RXMCS_CHECKSUM; + else + jme->reg_rxmcs &= ~RXMCS_CHECKSUM; + jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); + spin_unlock_bh(&jme->rxmcs_lock); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void jme_netpoll(struct net_device *dev) +{ + unsigned long flags; + + local_irq_save(flags); + jme_intr(dev->irq, dev); + local_irq_restore(flags); +} +#endif + +static int +jme_nway_reset(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + jme_restart_an(jme); + return 0; +} + +static u8 +jme_smb_read(struct jme_adapter *jme, unsigned int addr) +{ + u32 val; + int to; + + val = jread32(jme, JME_SMBCSR); + to = JME_SMB_BUSY_TIMEOUT; + while ((val & SMBCSR_BUSY) && --to) { + msleep(1); + val = jread32(jme, JME_SMBCSR); + } + if (!to) { + netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); + return 0xFF; + } + + jwrite32(jme, JME_SMBINTF, + ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | + SMBINTF_HWRWN_READ | + SMBINTF_HWCMD); + + val = jread32(jme, JME_SMBINTF); + to = JME_SMB_BUSY_TIMEOUT; + while ((val & SMBINTF_HWCMD) && --to) { + msleep(1); + val = jread32(jme, JME_SMBINTF); + } + if (!to) { + netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); + return 0xFF; + } + + return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT; +} + +static void +jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data) +{ + u32 val; + int to; + + val = jread32(jme, JME_SMBCSR); + to = JME_SMB_BUSY_TIMEOUT; + while ((val & SMBCSR_BUSY) && --to) { + msleep(1); + val = jread32(jme, JME_SMBCSR); + } + if (!to) { + netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); + return; + } + + jwrite32(jme, JME_SMBINTF, + ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) | + ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | + SMBINTF_HWRWN_WRITE | + SMBINTF_HWCMD); + + val = jread32(jme, JME_SMBINTF); + to = JME_SMB_BUSY_TIMEOUT; + while ((val & SMBINTF_HWCMD) && --to) { + msleep(1); + val = jread32(jme, JME_SMBINTF); + } + if (!to) { + netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); + return; + } + + mdelay(2); +} + +static int +jme_get_eeprom_len(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + u32 val; + val = jread32(jme, JME_SMBCSR); + return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0; +} + +static int +jme_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct jme_adapter *jme = netdev_priv(netdev); + int i, offset = eeprom->offset, len = eeprom->len; + + /* + * ethtool will check the boundary for us + */ + eeprom->magic = JME_EEPROM_MAGIC; + for (i = 0 ; i < len ; ++i) + data[i] = jme_smb_read(jme, i + offset); + + return 0; +} + +static int +jme_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct jme_adapter *jme = netdev_priv(netdev); + int i, offset = eeprom->offset, len = eeprom->len; + + if (eeprom->magic != JME_EEPROM_MAGIC) + return -EINVAL; + + /* + * ethtool will check the boundary for us + */ + for (i = 0 ; i < len ; ++i) + jme_smb_write(jme, i + offset, data[i]); + + return 0; +} + +static const struct ethtool_ops jme_ethtool_ops = { + .get_drvinfo = jme_get_drvinfo, + .get_regs_len = jme_get_regs_len, + .get_regs = jme_get_regs, + .get_coalesce = jme_get_coalesce, + .set_coalesce = jme_set_coalesce, + .get_pauseparam = jme_get_pauseparam, + .set_pauseparam = jme_set_pauseparam, + .get_wol = jme_get_wol, + .set_wol = jme_set_wol, + .get_settings = jme_get_settings, + .set_settings = jme_set_settings, + .get_link = jme_get_link, + .get_msglevel = jme_get_msglevel, + .set_msglevel = jme_set_msglevel, + .nway_reset = jme_nway_reset, + .get_eeprom_len = jme_get_eeprom_len, + .get_eeprom = jme_get_eeprom, + .set_eeprom = jme_set_eeprom, +}; + +static int +jme_pci_dma64(struct pci_dev *pdev) +{ + if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) + if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) + return 1; + + if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(40))) + if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40))) + return 1; + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) + if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) + return 0; + + return -1; +} + +static inline void +jme_phy_init(struct jme_adapter *jme) +{ + u16 reg26; + + reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26); + jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000); +} + +static inline void +jme_check_hw_ver(struct jme_adapter *jme) +{ + u32 chipmode; + + chipmode = jread32(jme, JME_CHIPMODE); + + jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT; + jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT; + jme->chip_main_rev = jme->chiprev & 0xF; + jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF; +} + +static const struct net_device_ops jme_netdev_ops = { + .ndo_open = jme_open, + .ndo_stop = jme_close, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = jme_ioctl, + .ndo_start_xmit = jme_start_xmit, + .ndo_set_mac_address = jme_set_macaddr, + .ndo_set_rx_mode = jme_set_multi, + .ndo_change_mtu = jme_change_mtu, + .ndo_tx_timeout = jme_tx_timeout, + .ndo_fix_features = jme_fix_features, + .ndo_set_features = jme_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = jme_netpoll, +#endif +}; + +static int +jme_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int rc = 0, using_dac, i; + struct net_device *netdev; + struct jme_adapter *jme; + u16 bmcr, bmsr; + u32 apmc; + + /* + * set up PCI device basics + */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + + rc = pci_enable_device(pdev); + if (rc) { + pr_err("Cannot enable PCI device\n"); + goto err_out; + } + + using_dac = jme_pci_dma64(pdev); + if (using_dac < 0) { + pr_err("Cannot set PCI DMA Mask\n"); + rc = -EIO; + goto err_out_disable_pdev; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + pr_err("No PCI resource region found\n"); + rc = -ENOMEM; + goto err_out_disable_pdev; + } + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) { + pr_err("Cannot obtain PCI resource region\n"); + goto err_out_disable_pdev; + } + + pci_set_master(pdev); + + /* + * alloc and init net device + */ + netdev = alloc_etherdev(sizeof(*jme)); + if (!netdev) { + rc = -ENOMEM; + goto err_out_release_regions; + } + netdev->netdev_ops = &jme_netdev_ops; + netdev->ethtool_ops = &jme_ethtool_ops; + netdev->watchdog_timeo = TX_TIMEOUT; + netdev->hw_features = NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM; + netdev->features = NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + if (using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + SET_NETDEV_DEV(netdev, &pdev->dev); + pci_set_drvdata(pdev, netdev); + + /* + * init adapter info + */ + jme = netdev_priv(netdev); + jme->pdev = pdev; + jme->dev = netdev; + jme->jme_rx = netif_rx; + jme->old_mtu = netdev->mtu = 1500; + jme->phylink = 0; + jme->tx_ring_size = 1 << 10; + jme->tx_ring_mask = jme->tx_ring_size - 1; + jme->tx_wake_threshold = 1 << 9; + jme->rx_ring_size = 1 << 9; + jme->rx_ring_mask = jme->rx_ring_size - 1; + jme->msg_enable = JME_DEF_MSG_ENABLE; + jme->regs = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!(jme->regs)) { + pr_err("Mapping PCI resource region error\n"); + rc = -ENOMEM; + goto err_out_free_netdev; + } + + if (no_pseudohp) { + apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN; + jwrite32(jme, JME_APMC, apmc); + } else if (force_pseudohp) { + apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN; + jwrite32(jme, JME_APMC, apmc); + } + + NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT) + + spin_lock_init(&jme->phy_lock); + spin_lock_init(&jme->macaddr_lock); + spin_lock_init(&jme->rxmcs_lock); + + atomic_set(&jme->link_changing, 1); + atomic_set(&jme->rx_cleaning, 1); + atomic_set(&jme->tx_cleaning, 1); + atomic_set(&jme->rx_empty, 1); + + tasklet_init(&jme->pcc_task, + jme_pcc_tasklet, + (unsigned long) jme); + jme->dpi.cur = PCC_P1; + + jme->reg_ghc = 0; + jme->reg_rxcs = RXCS_DEFAULT; + jme->reg_rxmcs = RXMCS_DEFAULT; + jme->reg_txpfc = 0; + jme->reg_pmcs = PMCS_MFEN; + jme->reg_gpreg1 = GPREG1_DEFAULT; + + if (jme->reg_rxmcs & RXMCS_CHECKSUM) + netdev->features |= NETIF_F_RXCSUM; + + /* + * Get Max Read Req Size from PCI Config Space + */ + pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs); + jme->mrrs &= PCI_DCSR_MRRS_MASK; + switch (jme->mrrs) { + case MRRS_128B: + jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B; + break; + case MRRS_256B: + jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B; + break; + default: + jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B; + break; + } + + /* + * Must check before reset_mac_processor + */ + jme_check_hw_ver(jme); + jme->mii_if.dev = netdev; + if (jme->fpgaver) { + jme->mii_if.phy_id = 0; + for (i = 1 ; i < 32 ; ++i) { + bmcr = jme_mdio_read(netdev, i, MII_BMCR); + bmsr = jme_mdio_read(netdev, i, MII_BMSR); + if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) { + jme->mii_if.phy_id = i; + break; + } + } + + if (!jme->mii_if.phy_id) { + rc = -EIO; + pr_err("Can not find phy_id\n"); + goto err_out_unmap; + } + + jme->reg_ghc |= GHC_LINK_POLL; + } else { + jme->mii_if.phy_id = 1; + } + if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) + jme->mii_if.supports_gmii = true; + else + jme->mii_if.supports_gmii = false; + jme->mii_if.phy_id_mask = 0x1F; + jme->mii_if.reg_num_mask = 0x1F; + jme->mii_if.mdio_read = jme_mdio_read; + jme->mii_if.mdio_write = jme_mdio_write; + + jme_clear_pm(jme); + device_set_wakeup_enable(&pdev->dev, true); + + jme_set_phyfifo_5level(jme); + jme->pcirev = pdev->revision; + if (!jme->fpgaver) + jme_phy_init(jme); + jme_phy_off(jme); + + /* + * Reset MAC processor and reload EEPROM for MAC Address + */ + jme_reset_mac_processor(jme); + rc = jme_reload_eeprom(jme); + if (rc) { + pr_err("Reload eeprom for reading MAC Address error\n"); + goto err_out_unmap; + } + jme_load_macaddr(netdev); + + /* + * Tell stack that we are not ready to work until open() + */ + netif_carrier_off(netdev); + + rc = register_netdev(netdev); + if (rc) { + pr_err("Cannot register net device\n"); + goto err_out_unmap; + } + + netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n", + (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ? + "JMC250 Gigabit Ethernet" : + (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ? + "JMC260 Fast Ethernet" : "Unknown", + (jme->fpgaver != 0) ? " (FPGA)" : "", + (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, + jme->pcirev, netdev->dev_addr); + + return 0; + +err_out_unmap: + iounmap(jme->regs); +err_out_free_netdev: + free_netdev(netdev); +err_out_release_regions: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); +err_out: + return rc; +} + +static void +jme_remove_one(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct jme_adapter *jme = netdev_priv(netdev); + + unregister_netdev(netdev); + iounmap(jme->regs); + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + +} + +static void +jme_shutdown(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct jme_adapter *jme = netdev_priv(netdev); + + jme_powersave_phy(jme); + pci_pme_active(pdev, true); +} + +#ifdef CONFIG_PM_SLEEP +static int +jme_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct jme_adapter *jme = netdev_priv(netdev); + + if (!netif_running(netdev)) + return 0; + + atomic_dec(&jme->link_changing); + + netif_device_detach(netdev); + netif_stop_queue(netdev); + jme_stop_irq(jme); + + tasklet_disable(&jme->txclean_task); + tasklet_disable(&jme->rxclean_task); + tasklet_disable(&jme->rxempty_task); + + if (netif_carrier_ok(netdev)) { + if (test_bit(JME_FLAG_POLL, &jme->flags)) + jme_polling_mode(jme); + + jme_stop_pcc_timer(jme); + jme_disable_rx_engine(jme); + jme_disable_tx_engine(jme); + jme_reset_mac_processor(jme); + jme_free_rx_resources(jme); + jme_free_tx_resources(jme); + netif_carrier_off(netdev); + jme->phylink = 0; + } + + tasklet_enable(&jme->txclean_task); + tasklet_enable(&jme->rxclean_task); + tasklet_enable(&jme->rxempty_task); + + jme_powersave_phy(jme); + + return 0; +} + +static int +jme_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct jme_adapter *jme = netdev_priv(netdev); + + if (!netif_running(netdev)) + return 0; + + jme_clear_pm(jme); + jme_phy_on(jme); + if (test_bit(JME_FLAG_SSET, &jme->flags)) + jme_set_settings(netdev, &jme->old_ecmd); + else + jme_reset_phy_processor(jme); + jme_phy_calibration(jme); + jme_phy_setEA(jme); + jme_start_irq(jme); + netif_device_attach(netdev); + + atomic_inc(&jme->link_changing); + + jme_reset_link(jme); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume); +#define JME_PM_OPS (&jme_pm_ops) + +#else + +#define JME_PM_OPS NULL +#endif + +static const struct pci_device_id jme_pci_tbl[] = { + { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) }, + { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) }, + { } +}; + +static struct pci_driver jme_driver = { + .name = DRV_NAME, + .id_table = jme_pci_tbl, + .probe = jme_init_one, + .remove = jme_remove_one, + .shutdown = jme_shutdown, + .driver.pm = JME_PM_OPS, +}; + +static int __init +jme_init_module(void) +{ + pr_info("JMicron JMC2XX ethernet driver version %s\n", DRV_VERSION); + return pci_register_driver(&jme_driver); +} + +static void __exit +jme_cleanup_module(void) +{ + pci_unregister_driver(&jme_driver); +} + +module_init(jme_init_module); +module_exit(jme_cleanup_module); + +MODULE_AUTHOR("Guo-Fu Tseng "); +MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, jme_pci_tbl); diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h new file mode 100644 index 000000000..58cd67c0c --- /dev/null +++ b/drivers/net/ethernet/jme.h @@ -0,0 +1,1278 @@ +/* + * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver + * + * Copyright 2008 JMicron Technology Corporation + * http://www.jmicron.com/ + * Copyright (c) 2009 - 2010 Guo-Fu Tseng + * + * Author: Guo-Fu Tseng + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __JME_H_INCLUDED__ +#define __JME_H_INCLUDED__ +#include + +#define DRV_NAME "jme" +#define DRV_VERSION "1.0.8" + +#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250 +#define PCI_DEVICE_ID_JMICRON_JMC260 0x0260 + +/* + * Message related definitions + */ +#define JME_DEF_MSG_ENABLE \ + (NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR | \ + NETIF_MSG_HW) + +#ifdef TX_DEBUG +#define tx_dbg(priv, fmt, args...) \ + printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ##args) +#else +#define tx_dbg(priv, fmt, args...) \ +do { \ + if (0) \ + printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ##args); \ +} while (0) +#endif + +/* + * Extra PCI Configuration space interface + */ +#define PCI_DCSR_MRRS 0x59 +#define PCI_DCSR_MRRS_MASK 0x70 + +enum pci_dcsr_mrrs_vals { + MRRS_128B = 0x00, + MRRS_256B = 0x10, + MRRS_512B = 0x20, + MRRS_1024B = 0x30, + MRRS_2048B = 0x40, + MRRS_4096B = 0x50, +}; + +#define PCI_SPI 0xB0 + +enum pci_spi_bits { + SPI_EN = 0x10, + SPI_MISO = 0x08, + SPI_MOSI = 0x04, + SPI_SCLK = 0x02, + SPI_CS = 0x01, +}; + +struct jme_spi_op { + void __user *uwbuf; + void __user *urbuf; + __u8 wn; /* Number of write actions */ + __u8 rn; /* Number of read actions */ + __u8 bitn; /* Number of bits per action */ + __u8 spd; /* The maxim acceptable speed of controller, in MHz.*/ + __u8 mode; /* CPOL, CPHA, and Duplex mode of SPI */ + + /* Internal use only */ + u8 *kwbuf; + u8 *krbuf; + u8 sr; + u16 halfclk; /* Half of clock cycle calculated from spd, in ns */ +}; + +enum jme_spi_op_bits { + SPI_MODE_CPHA = 0x01, + SPI_MODE_CPOL = 0x02, + SPI_MODE_DUP = 0x80, +}; + +#define HALF_US 500 /* 500 ns */ + +#define PCI_PRIV_PE1 0xE4 + +enum pci_priv_pe1_bit_masks { + PE1_ASPMSUPRT = 0x00000003, /* + * RW: + * Aspm_support[1:0] + * (R/W Port of 5C[11:10]) + */ + PE1_MULTIFUN = 0x00000004, /* RW: Multi_fun_bit */ + PE1_RDYDMA = 0x00000008, /* RO: ~link.rdy_for_dma */ + PE1_ASPMOPTL = 0x00000030, /* RW: link.rx10s_option[1:0] */ + PE1_ASPMOPTH = 0x000000C0, /* RW: 10_req=[3]?HW:[2] */ + PE1_GPREG0 = 0x0000FF00, /* + * SRW: + * Cfg_gp_reg0 + * [7:6] phy_giga BG control + * [5] CREQ_N as CREQ_N1 (CPPE# as CREQ#) + * [4:0] Reserved + */ + PE1_GPREG0_PBG = 0x0000C000, /* phy_giga BG control */ + PE1_GPREG1 = 0x00FF0000, /* RW: Cfg_gp_reg1 */ + PE1_REVID = 0xFF000000, /* RO: Rev ID */ +}; + +enum pci_priv_pe1_values { + PE1_GPREG0_ENBG = 0x00000000, /* en BG */ + PE1_GPREG0_PDD3COLD = 0x00004000, /* giga_PD + d3cold */ + PE1_GPREG0_PDPCIESD = 0x00008000, /* giga_PD + pcie_shutdown */ + PE1_GPREG0_PDPCIEIDDQ = 0x0000C000, /* giga_PD + pcie_iddq */ +}; + +/* + * Dynamic(adaptive)/Static PCC values + */ +enum dynamic_pcc_values { + PCC_OFF = 0, + PCC_P1 = 1, + PCC_P2 = 2, + PCC_P3 = 3, + + PCC_OFF_TO = 0, + PCC_P1_TO = 1, + PCC_P2_TO = 64, + PCC_P3_TO = 128, + + PCC_OFF_CNT = 0, + PCC_P1_CNT = 1, + PCC_P2_CNT = 16, + PCC_P3_CNT = 32, +}; +struct dynpcc_info { + unsigned long last_bytes; + unsigned long last_pkts; + unsigned long intr_cnt; + unsigned char cur; + unsigned char attempt; + unsigned char cnt; +}; +#define PCC_INTERVAL_US 100000 +#define PCC_INTERVAL (HZ / (1000000 / PCC_INTERVAL_US)) +#define PCC_P3_THRESHOLD (2 * 1024 * 1024) +#define PCC_P2_THRESHOLD 800 +#define PCC_INTR_THRESHOLD 800 +#define PCC_TX_TO 1000 +#define PCC_TX_CNT 8 + +/* + * TX/RX Descriptors + * + * TX/RX Ring DESC Count Must be multiple of 16 and <= 1024 + */ +#define RING_DESC_ALIGN 16 /* Descriptor alignment */ +#define TX_DESC_SIZE 16 +#define TX_RING_NR 8 +#define TX_RING_ALLOC_SIZE(s) ((s * TX_DESC_SIZE) + RING_DESC_ALIGN) + +struct txdesc { + union { + __u8 all[16]; + __le32 dw[4]; + struct { + /* DW0 */ + __le16 vlan; + __u8 rsv1; + __u8 flags; + + /* DW1 */ + __le16 datalen; + __le16 mss; + + /* DW2 */ + __le16 pktsize; + __le16 rsv2; + + /* DW3 */ + __le32 bufaddr; + } desc1; + struct { + /* DW0 */ + __le16 rsv1; + __u8 rsv2; + __u8 flags; + + /* DW1 */ + __le16 datalen; + __le16 rsv3; + + /* DW2 */ + __le32 bufaddrh; + + /* DW3 */ + __le32 bufaddrl; + } desc2; + struct { + /* DW0 */ + __u8 ehdrsz; + __u8 rsv1; + __u8 rsv2; + __u8 flags; + + /* DW1 */ + __le16 trycnt; + __le16 segcnt; + + /* DW2 */ + __le16 pktsz; + __le16 rsv3; + + /* DW3 */ + __le32 bufaddrl; + } descwb; + }; +}; + +enum jme_txdesc_flags_bits { + TXFLAG_OWN = 0x80, + TXFLAG_INT = 0x40, + TXFLAG_64BIT = 0x20, + TXFLAG_TCPCS = 0x10, + TXFLAG_UDPCS = 0x08, + TXFLAG_IPCS = 0x04, + TXFLAG_LSEN = 0x02, + TXFLAG_TAGON = 0x01, +}; + +#define TXDESC_MSS_SHIFT 2 +enum jme_txwbdesc_flags_bits { + TXWBFLAG_OWN = 0x80, + TXWBFLAG_INT = 0x40, + TXWBFLAG_TMOUT = 0x20, + TXWBFLAG_TRYOUT = 0x10, + TXWBFLAG_COL = 0x08, + + TXWBFLAG_ALLERR = TXWBFLAG_TMOUT | + TXWBFLAG_TRYOUT | + TXWBFLAG_COL, +}; + +#define RX_DESC_SIZE 16 +#define RX_RING_NR 4 +#define RX_RING_ALLOC_SIZE(s) ((s * RX_DESC_SIZE) + RING_DESC_ALIGN) +#define RX_BUF_DMA_ALIGN 8 +#define RX_PREPAD_SIZE 10 +#define ETH_CRC_LEN 2 +#define RX_VLANHDR_LEN 2 +#define RX_EXTRA_LEN (RX_PREPAD_SIZE + \ + ETH_HLEN + \ + ETH_CRC_LEN + \ + RX_VLANHDR_LEN + \ + RX_BUF_DMA_ALIGN) + +struct rxdesc { + union { + __u8 all[16]; + __le32 dw[4]; + struct { + /* DW0 */ + __le16 rsv2; + __u8 rsv1; + __u8 flags; + + /* DW1 */ + __le16 datalen; + __le16 wbcpl; + + /* DW2 */ + __le32 bufaddrh; + + /* DW3 */ + __le32 bufaddrl; + } desc1; + struct { + /* DW0 */ + __le16 vlan; + __le16 flags; + + /* DW1 */ + __le16 framesize; + __u8 errstat; + __u8 desccnt; + + /* DW2 */ + __le32 rsshash; + + /* DW3 */ + __u8 hashfun; + __u8 hashtype; + __le16 resrv; + } descwb; + }; +}; + +enum jme_rxdesc_flags_bits { + RXFLAG_OWN = 0x80, + RXFLAG_INT = 0x40, + RXFLAG_64BIT = 0x20, +}; + +enum jme_rxwbdesc_flags_bits { + RXWBFLAG_OWN = 0x8000, + RXWBFLAG_INT = 0x4000, + RXWBFLAG_MF = 0x2000, + RXWBFLAG_64BIT = 0x2000, + RXWBFLAG_TCPON = 0x1000, + RXWBFLAG_UDPON = 0x0800, + RXWBFLAG_IPCS = 0x0400, + RXWBFLAG_TCPCS = 0x0200, + RXWBFLAG_UDPCS = 0x0100, + RXWBFLAG_TAGON = 0x0080, + RXWBFLAG_IPV4 = 0x0040, + RXWBFLAG_IPV6 = 0x0020, + RXWBFLAG_PAUSE = 0x0010, + RXWBFLAG_MAGIC = 0x0008, + RXWBFLAG_WAKEUP = 0x0004, + RXWBFLAG_DEST = 0x0003, + RXWBFLAG_DEST_UNI = 0x0001, + RXWBFLAG_DEST_MUL = 0x0002, + RXWBFLAG_DEST_BRO = 0x0003, +}; + +enum jme_rxwbdesc_desccnt_mask { + RXWBDCNT_WBCPL = 0x80, + RXWBDCNT_DCNT = 0x7F, +}; + +enum jme_rxwbdesc_errstat_bits { + RXWBERR_LIMIT = 0x80, + RXWBERR_MIIER = 0x40, + RXWBERR_NIBON = 0x20, + RXWBERR_COLON = 0x10, + RXWBERR_ABORT = 0x08, + RXWBERR_SHORT = 0x04, + RXWBERR_OVERUN = 0x02, + RXWBERR_CRCERR = 0x01, + RXWBERR_ALLERR = 0xFF, +}; + +/* + * Buffer information corresponding to ring descriptors. + */ +struct jme_buffer_info { + struct sk_buff *skb; + dma_addr_t mapping; + int len; + int nr_desc; + unsigned long start_xmit; +}; + +/* + * The structure holding buffer information and ring descriptors all together. + */ +struct jme_ring { + void *alloc; /* pointer to allocated memory */ + void *desc; /* pointer to ring memory */ + dma_addr_t dmaalloc; /* phys address of ring alloc */ + dma_addr_t dma; /* phys address for ring dma */ + + /* Buffer information corresponding to each descriptor */ + struct jme_buffer_info *bufinf; + + int next_to_use; + atomic_t next_to_clean; + atomic_t nr_free; +}; + +#define NET_STAT(priv) (priv->dev->stats) +#define NETDEV_GET_STATS(netdev, fun_ptr) +#define DECLARE_NET_DEVICE_STATS + +#define DECLARE_NAPI_STRUCT struct napi_struct napi; +#define NETIF_NAPI_SET(dev, napis, pollfn, q) \ + netif_napi_add(dev, napis, pollfn, q); +#define JME_NAPI_HOLDER(holder) struct napi_struct *holder +#define JME_NAPI_WEIGHT(w) int w +#define JME_NAPI_WEIGHT_VAL(w) w +#define JME_NAPI_WEIGHT_SET(w, r) +#define JME_RX_COMPLETE(dev, napis) napi_complete(napis) +#define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi); +#define JME_NAPI_DISABLE(priv) \ + if (!napi_disable_pending(&priv->napi)) \ + napi_disable(&priv->napi); +#define JME_RX_SCHEDULE_PREP(priv) \ + napi_schedule_prep(&priv->napi) +#define JME_RX_SCHEDULE(priv) \ + __napi_schedule(&priv->napi); + +/* + * Jmac Adapter Private data + */ +struct jme_adapter { + struct pci_dev *pdev; + struct net_device *dev; + void __iomem *regs; + struct mii_if_info mii_if; + struct jme_ring rxring[RX_RING_NR]; + struct jme_ring txring[TX_RING_NR]; + spinlock_t phy_lock; + spinlock_t macaddr_lock; + spinlock_t rxmcs_lock; + struct tasklet_struct rxempty_task; + struct tasklet_struct rxclean_task; + struct tasklet_struct txclean_task; + struct tasklet_struct linkch_task; + struct tasklet_struct pcc_task; + unsigned long flags; + u32 reg_txcs; + u32 reg_txpfc; + u32 reg_rxcs; + u32 reg_rxmcs; + u32 reg_ghc; + u32 reg_pmcs; + u32 reg_gpreg1; + u32 phylink; + u32 tx_ring_size; + u32 tx_ring_mask; + u32 tx_wake_threshold; + u32 rx_ring_size; + u32 rx_ring_mask; + u8 mrrs; + unsigned int fpgaver; + u8 chiprev; + u8 chip_main_rev; + u8 chip_sub_rev; + u8 pcirev; + u32 msg_enable; + struct ethtool_cmd old_ecmd; + unsigned int old_mtu; + struct dynpcc_info dpi; + atomic_t intr_sem; + atomic_t link_changing; + atomic_t tx_cleaning; + atomic_t rx_cleaning; + atomic_t rx_empty; + int (*jme_rx)(struct sk_buff *skb); + DECLARE_NAPI_STRUCT + DECLARE_NET_DEVICE_STATS +}; + +enum jme_flags_bits { + JME_FLAG_MSI = 1, + JME_FLAG_SSET = 2, + JME_FLAG_POLL = 5, + JME_FLAG_SHUTDOWN = 6, +}; + +#define TX_TIMEOUT (5 * HZ) +#define JME_REG_LEN 0x500 +#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9216 + +static inline struct jme_adapter* +jme_napi_priv(struct napi_struct *napi) +{ + struct jme_adapter *jme; + jme = container_of(napi, struct jme_adapter, napi); + return jme; +} + +/* + * MMaped I/O Resters + */ +enum jme_iomap_offsets { + JME_MAC = 0x0000, + JME_PHY = 0x0400, + JME_MISC = 0x0800, + JME_RSS = 0x0C00, +}; + +enum jme_iomap_lens { + JME_MAC_LEN = 0x80, + JME_PHY_LEN = 0x58, + JME_MISC_LEN = 0x98, + JME_RSS_LEN = 0xFF, +}; + +enum jme_iomap_regs { + JME_TXCS = JME_MAC | 0x00, /* Transmit Control and Status */ + JME_TXDBA_LO = JME_MAC | 0x04, /* Transmit Queue Desc Base Addr */ + JME_TXDBA_HI = JME_MAC | 0x08, /* Transmit Queue Desc Base Addr */ + JME_TXQDC = JME_MAC | 0x0C, /* Transmit Queue Desc Count */ + JME_TXNDA = JME_MAC | 0x10, /* Transmit Queue Next Desc Addr */ + JME_TXMCS = JME_MAC | 0x14, /* Transmit MAC Control Status */ + JME_TXPFC = JME_MAC | 0x18, /* Transmit Pause Frame Control */ + JME_TXTRHD = JME_MAC | 0x1C, /* Transmit Timer/Retry@Half-Dup */ + + JME_RXCS = JME_MAC | 0x20, /* Receive Control and Status */ + JME_RXDBA_LO = JME_MAC | 0x24, /* Receive Queue Desc Base Addr */ + JME_RXDBA_HI = JME_MAC | 0x28, /* Receive Queue Desc Base Addr */ + JME_RXQDC = JME_MAC | 0x2C, /* Receive Queue Desc Count */ + JME_RXNDA = JME_MAC | 0x30, /* Receive Queue Next Desc Addr */ + JME_RXMCS = JME_MAC | 0x34, /* Receive MAC Control Status */ + JME_RXUMA_LO = JME_MAC | 0x38, /* Receive Unicast MAC Address */ + JME_RXUMA_HI = JME_MAC | 0x3C, /* Receive Unicast MAC Address */ + JME_RXMCHT_LO = JME_MAC | 0x40, /* Recv Multicast Addr HashTable */ + JME_RXMCHT_HI = JME_MAC | 0x44, /* Recv Multicast Addr HashTable */ + JME_WFODP = JME_MAC | 0x48, /* Wakeup Frame Output Data Port */ + JME_WFOI = JME_MAC | 0x4C, /* Wakeup Frame Output Interface */ + + JME_SMI = JME_MAC | 0x50, /* Station Management Interface */ + JME_GHC = JME_MAC | 0x54, /* Global Host Control */ + JME_PMCS = JME_MAC | 0x60, /* Power Management Control/Stat */ + + + JME_PHY_PWR = JME_PHY | 0x24, /* New PHY Power Ctrl Register */ + JME_PHY_CS = JME_PHY | 0x28, /* PHY Ctrl and Status Register */ + JME_PHY_LINK = JME_PHY | 0x30, /* PHY Link Status Register */ + JME_SMBCSR = JME_PHY | 0x40, /* SMB Control and Status */ + JME_SMBINTF = JME_PHY | 0x44, /* SMB Interface */ + + + JME_TMCSR = JME_MISC | 0x00, /* Timer Control/Status Register */ + JME_GPREG0 = JME_MISC | 0x08, /* General purpose REG-0 */ + JME_GPREG1 = JME_MISC | 0x0C, /* General purpose REG-1 */ + JME_IEVE = JME_MISC | 0x20, /* Interrupt Event Status */ + JME_IREQ = JME_MISC | 0x24, /* Intr Req Status(For Debug) */ + JME_IENS = JME_MISC | 0x28, /* Intr Enable - Setting Port */ + JME_IENC = JME_MISC | 0x2C, /* Interrupt Enable - Clear Port */ + JME_PCCRX0 = JME_MISC | 0x30, /* PCC Control for RX Queue 0 */ + JME_PCCTX = JME_MISC | 0x40, /* PCC Control for TX Queues */ + JME_CHIPMODE = JME_MISC | 0x44, /* Identify FPGA Version */ + JME_SHBA_HI = JME_MISC | 0x48, /* Shadow Register Base HI */ + JME_SHBA_LO = JME_MISC | 0x4C, /* Shadow Register Base LO */ + JME_TIMER1 = JME_MISC | 0x70, /* Timer1 */ + JME_TIMER2 = JME_MISC | 0x74, /* Timer2 */ + JME_APMC = JME_MISC | 0x7C, /* Aggressive Power Mode Control */ + JME_PCCSRX0 = JME_MISC | 0x80, /* PCC Status of RX0 */ +}; + +/* + * TX Control/Status Bits + */ +enum jme_txcs_bits { + TXCS_QUEUE7S = 0x00008000, + TXCS_QUEUE6S = 0x00004000, + TXCS_QUEUE5S = 0x00002000, + TXCS_QUEUE4S = 0x00001000, + TXCS_QUEUE3S = 0x00000800, + TXCS_QUEUE2S = 0x00000400, + TXCS_QUEUE1S = 0x00000200, + TXCS_QUEUE0S = 0x00000100, + TXCS_FIFOTH = 0x000000C0, + TXCS_DMASIZE = 0x00000030, + TXCS_BURST = 0x00000004, + TXCS_ENABLE = 0x00000001, +}; + +enum jme_txcs_value { + TXCS_FIFOTH_16QW = 0x000000C0, + TXCS_FIFOTH_12QW = 0x00000080, + TXCS_FIFOTH_8QW = 0x00000040, + TXCS_FIFOTH_4QW = 0x00000000, + + TXCS_DMASIZE_64B = 0x00000000, + TXCS_DMASIZE_128B = 0x00000010, + TXCS_DMASIZE_256B = 0x00000020, + TXCS_DMASIZE_512B = 0x00000030, + + TXCS_SELECT_QUEUE0 = 0x00000000, + TXCS_SELECT_QUEUE1 = 0x00010000, + TXCS_SELECT_QUEUE2 = 0x00020000, + TXCS_SELECT_QUEUE3 = 0x00030000, + TXCS_SELECT_QUEUE4 = 0x00040000, + TXCS_SELECT_QUEUE5 = 0x00050000, + TXCS_SELECT_QUEUE6 = 0x00060000, + TXCS_SELECT_QUEUE7 = 0x00070000, + + TXCS_DEFAULT = TXCS_FIFOTH_4QW | + TXCS_BURST, +}; + +#define JME_TX_DISABLE_TIMEOUT 10 /* 10 msec */ + +/* + * TX MAC Control/Status Bits + */ +enum jme_txmcs_bit_masks { + TXMCS_IFG2 = 0xC0000000, + TXMCS_IFG1 = 0x30000000, + TXMCS_TTHOLD = 0x00000300, + TXMCS_FBURST = 0x00000080, + TXMCS_CARRIEREXT = 0x00000040, + TXMCS_DEFER = 0x00000020, + TXMCS_BACKOFF = 0x00000010, + TXMCS_CARRIERSENSE = 0x00000008, + TXMCS_COLLISION = 0x00000004, + TXMCS_CRC = 0x00000002, + TXMCS_PADDING = 0x00000001, +}; + +enum jme_txmcs_values { + TXMCS_IFG2_6_4 = 0x00000000, + TXMCS_IFG2_8_5 = 0x40000000, + TXMCS_IFG2_10_6 = 0x80000000, + TXMCS_IFG2_12_7 = 0xC0000000, + + TXMCS_IFG1_8_4 = 0x00000000, + TXMCS_IFG1_12_6 = 0x10000000, + TXMCS_IFG1_16_8 = 0x20000000, + TXMCS_IFG1_20_10 = 0x30000000, + + TXMCS_TTHOLD_1_8 = 0x00000000, + TXMCS_TTHOLD_1_4 = 0x00000100, + TXMCS_TTHOLD_1_2 = 0x00000200, + TXMCS_TTHOLD_FULL = 0x00000300, + + TXMCS_DEFAULT = TXMCS_IFG2_8_5 | + TXMCS_IFG1_16_8 | + TXMCS_TTHOLD_FULL | + TXMCS_DEFER | + TXMCS_CRC | + TXMCS_PADDING, +}; + +enum jme_txpfc_bits_masks { + TXPFC_VLAN_TAG = 0xFFFF0000, + TXPFC_VLAN_EN = 0x00008000, + TXPFC_PF_EN = 0x00000001, +}; + +enum jme_txtrhd_bits_masks { + TXTRHD_TXPEN = 0x80000000, + TXTRHD_TXP = 0x7FFFFF00, + TXTRHD_TXREN = 0x00000080, + TXTRHD_TXRL = 0x0000007F, +}; + +enum jme_txtrhd_shifts { + TXTRHD_TXP_SHIFT = 8, + TXTRHD_TXRL_SHIFT = 0, +}; + +enum jme_txtrhd_values { + TXTRHD_FULLDUPLEX = 0x00000000, + TXTRHD_HALFDUPLEX = TXTRHD_TXPEN | + ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) | + TXTRHD_TXREN | + ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL), +}; + +/* + * RX Control/Status Bits + */ +enum jme_rxcs_bit_masks { + /* FIFO full threshold for transmitting Tx Pause Packet */ + RXCS_FIFOTHTP = 0x30000000, + /* FIFO threshold for processing next packet */ + RXCS_FIFOTHNP = 0x0C000000, + RXCS_DMAREQSZ = 0x03000000, /* DMA Request Size */ + RXCS_QUEUESEL = 0x00030000, /* Queue selection */ + RXCS_RETRYGAP = 0x0000F000, /* RX Desc full retry gap */ + RXCS_RETRYCNT = 0x00000F00, /* RX Desc full retry counter */ + RXCS_WAKEUP = 0x00000040, /* Enable receive wakeup packet */ + RXCS_MAGIC = 0x00000020, /* Enable receive magic packet */ + RXCS_SHORT = 0x00000010, /* Enable receive short packet */ + RXCS_ABORT = 0x00000008, /* Enable receive errorr packet */ + RXCS_QST = 0x00000004, /* Receive queue start */ + RXCS_SUSPEND = 0x00000002, + RXCS_ENABLE = 0x00000001, +}; + +enum jme_rxcs_values { + RXCS_FIFOTHTP_16T = 0x00000000, + RXCS_FIFOTHTP_32T = 0x10000000, + RXCS_FIFOTHTP_64T = 0x20000000, + RXCS_FIFOTHTP_128T = 0x30000000, + + RXCS_FIFOTHNP_16QW = 0x00000000, + RXCS_FIFOTHNP_32QW = 0x04000000, + RXCS_FIFOTHNP_64QW = 0x08000000, + RXCS_FIFOTHNP_128QW = 0x0C000000, + + RXCS_DMAREQSZ_16B = 0x00000000, + RXCS_DMAREQSZ_32B = 0x01000000, + RXCS_DMAREQSZ_64B = 0x02000000, + RXCS_DMAREQSZ_128B = 0x03000000, + + RXCS_QUEUESEL_Q0 = 0x00000000, + RXCS_QUEUESEL_Q1 = 0x00010000, + RXCS_QUEUESEL_Q2 = 0x00020000, + RXCS_QUEUESEL_Q3 = 0x00030000, + + RXCS_RETRYGAP_256ns = 0x00000000, + RXCS_RETRYGAP_512ns = 0x00001000, + RXCS_RETRYGAP_1024ns = 0x00002000, + RXCS_RETRYGAP_2048ns = 0x00003000, + RXCS_RETRYGAP_4096ns = 0x00004000, + RXCS_RETRYGAP_8192ns = 0x00005000, + RXCS_RETRYGAP_16384ns = 0x00006000, + RXCS_RETRYGAP_32768ns = 0x00007000, + + RXCS_RETRYCNT_0 = 0x00000000, + RXCS_RETRYCNT_4 = 0x00000100, + RXCS_RETRYCNT_8 = 0x00000200, + RXCS_RETRYCNT_12 = 0x00000300, + RXCS_RETRYCNT_16 = 0x00000400, + RXCS_RETRYCNT_20 = 0x00000500, + RXCS_RETRYCNT_24 = 0x00000600, + RXCS_RETRYCNT_28 = 0x00000700, + RXCS_RETRYCNT_32 = 0x00000800, + RXCS_RETRYCNT_36 = 0x00000900, + RXCS_RETRYCNT_40 = 0x00000A00, + RXCS_RETRYCNT_44 = 0x00000B00, + RXCS_RETRYCNT_48 = 0x00000C00, + RXCS_RETRYCNT_52 = 0x00000D00, + RXCS_RETRYCNT_56 = 0x00000E00, + RXCS_RETRYCNT_60 = 0x00000F00, + + RXCS_DEFAULT = RXCS_FIFOTHTP_128T | + RXCS_FIFOTHNP_16QW | + RXCS_DMAREQSZ_128B | + RXCS_RETRYGAP_256ns | + RXCS_RETRYCNT_32, +}; + +#define JME_RX_DISABLE_TIMEOUT 10 /* 10 msec */ + +/* + * RX MAC Control/Status Bits + */ +enum jme_rxmcs_bits { + RXMCS_ALLFRAME = 0x00000800, + RXMCS_BRDFRAME = 0x00000400, + RXMCS_MULFRAME = 0x00000200, + RXMCS_UNIFRAME = 0x00000100, + RXMCS_ALLMULFRAME = 0x00000080, + RXMCS_MULFILTERED = 0x00000040, + RXMCS_RXCOLLDEC = 0x00000020, + RXMCS_FLOWCTRL = 0x00000008, + RXMCS_VTAGRM = 0x00000004, + RXMCS_PREPAD = 0x00000002, + RXMCS_CHECKSUM = 0x00000001, + + RXMCS_DEFAULT = RXMCS_VTAGRM | + RXMCS_PREPAD | + RXMCS_FLOWCTRL | + RXMCS_CHECKSUM, +}; + +/* Extern PHY common register 2 */ + +#define PHY_GAD_TEST_MODE_1 0x00002000 +#define PHY_GAD_TEST_MODE_MSK 0x0000E000 +#define JM_PHY_SPEC_REG_READ 0x00004000 +#define JM_PHY_SPEC_REG_WRITE 0x00008000 +#define PHY_CALIBRATION_DELAY 20 +#define JM_PHY_SPEC_ADDR_REG 0x1E +#define JM_PHY_SPEC_DATA_REG 0x1F + +#define JM_PHY_EXT_COMM_0_REG 0x30 +#define JM_PHY_EXT_COMM_1_REG 0x31 +#define JM_PHY_EXT_COMM_2_REG 0x32 +#define JM_PHY_EXT_COMM_2_CALI_ENABLE 0x01 +#define JM_PHY_EXT_COMM_2_CALI_MODE_0 0x02 +#define JM_PHY_EXT_COMM_2_CALI_LATCH 0x10 +#define PCI_PRIV_SHARE_NICCTRL 0xF5 +#define JME_FLAG_PHYEA_ENABLE 0x2 + +/* + * Wakeup Frame setup interface registers + */ +#define WAKEUP_FRAME_NR 8 +#define WAKEUP_FRAME_MASK_DWNR 4 + +enum jme_wfoi_bit_masks { + WFOI_MASK_SEL = 0x00000070, + WFOI_CRC_SEL = 0x00000008, + WFOI_FRAME_SEL = 0x00000007, +}; + +enum jme_wfoi_shifts { + WFOI_MASK_SHIFT = 4, +}; + +/* + * SMI Related definitions + */ +enum jme_smi_bit_mask { + SMI_DATA_MASK = 0xFFFF0000, + SMI_REG_ADDR_MASK = 0x0000F800, + SMI_PHY_ADDR_MASK = 0x000007C0, + SMI_OP_WRITE = 0x00000020, + /* Set to 1, after req done it'll be cleared to 0 */ + SMI_OP_REQ = 0x00000010, + SMI_OP_MDIO = 0x00000008, /* Software assess In/Out */ + SMI_OP_MDOE = 0x00000004, /* Software Output Enable */ + SMI_OP_MDC = 0x00000002, /* Software CLK Control */ + SMI_OP_MDEN = 0x00000001, /* Software access Enable */ +}; + +enum jme_smi_bit_shift { + SMI_DATA_SHIFT = 16, + SMI_REG_ADDR_SHIFT = 11, + SMI_PHY_ADDR_SHIFT = 6, +}; + +static inline u32 smi_reg_addr(int x) +{ + return (x << SMI_REG_ADDR_SHIFT) & SMI_REG_ADDR_MASK; +} + +static inline u32 smi_phy_addr(int x) +{ + return (x << SMI_PHY_ADDR_SHIFT) & SMI_PHY_ADDR_MASK; +} + +#define JME_PHY_TIMEOUT 100 /* 100 msec */ +#define JME_PHY_REG_NR 32 + +/* + * Global Host Control + */ +enum jme_ghc_bit_mask { + GHC_SWRST = 0x40000000, + GHC_TO_CLK_SRC = 0x00C00000, + GHC_TXMAC_CLK_SRC = 0x00300000, + GHC_DPX = 0x00000040, + GHC_SPEED = 0x00000030, + GHC_LINK_POLL = 0x00000001, +}; + +enum jme_ghc_speed_val { + GHC_SPEED_10M = 0x00000010, + GHC_SPEED_100M = 0x00000020, + GHC_SPEED_1000M = 0x00000030, +}; + +enum jme_ghc_to_clk { + GHC_TO_CLK_OFF = 0x00000000, + GHC_TO_CLK_GPHY = 0x00400000, + GHC_TO_CLK_PCIE = 0x00800000, + GHC_TO_CLK_INVALID = 0x00C00000, +}; + +enum jme_ghc_txmac_clk { + GHC_TXMAC_CLK_OFF = 0x00000000, + GHC_TXMAC_CLK_GPHY = 0x00100000, + GHC_TXMAC_CLK_PCIE = 0x00200000, + GHC_TXMAC_CLK_INVALID = 0x00300000, +}; + +/* + * Power management control and status register + */ +enum jme_pmcs_bit_masks { + PMCS_STMASK = 0xFFFF0000, + PMCS_WF7DET = 0x80000000, + PMCS_WF6DET = 0x40000000, + PMCS_WF5DET = 0x20000000, + PMCS_WF4DET = 0x10000000, + PMCS_WF3DET = 0x08000000, + PMCS_WF2DET = 0x04000000, + PMCS_WF1DET = 0x02000000, + PMCS_WF0DET = 0x01000000, + PMCS_LFDET = 0x00040000, + PMCS_LRDET = 0x00020000, + PMCS_MFDET = 0x00010000, + PMCS_ENMASK = 0x0000FFFF, + PMCS_WF7EN = 0x00008000, + PMCS_WF6EN = 0x00004000, + PMCS_WF5EN = 0x00002000, + PMCS_WF4EN = 0x00001000, + PMCS_WF3EN = 0x00000800, + PMCS_WF2EN = 0x00000400, + PMCS_WF1EN = 0x00000200, + PMCS_WF0EN = 0x00000100, + PMCS_LFEN = 0x00000004, + PMCS_LREN = 0x00000002, + PMCS_MFEN = 0x00000001, +}; + +/* + * New PHY Power Control Register + */ +enum jme_phy_pwr_bit_masks { + PHY_PWR_DWN1SEL = 0x01000000, /* Phy_giga.p_PWR_DOWN1_SEL */ + PHY_PWR_DWN1SW = 0x02000000, /* Phy_giga.p_PWR_DOWN1_SW */ + PHY_PWR_DWN2 = 0x04000000, /* Phy_giga.p_PWR_DOWN2 */ + PHY_PWR_CLKSEL = 0x08000000, /* + * XTL_OUT Clock select + * (an internal free-running clock) + * 0: xtl_out = phy_giga.A_XTL25_O + * 1: xtl_out = phy_giga.PD_OSC + */ +}; + +/* + * Giga PHY Status Registers + */ +enum jme_phy_link_bit_mask { + PHY_LINK_SPEED_MASK = 0x0000C000, + PHY_LINK_DUPLEX = 0x00002000, + PHY_LINK_SPEEDDPU_RESOLVED = 0x00000800, + PHY_LINK_UP = 0x00000400, + PHY_LINK_AUTONEG_COMPLETE = 0x00000200, + PHY_LINK_MDI_STAT = 0x00000040, +}; + +enum jme_phy_link_speed_val { + PHY_LINK_SPEED_10M = 0x00000000, + PHY_LINK_SPEED_100M = 0x00004000, + PHY_LINK_SPEED_1000M = 0x00008000, +}; + +#define JME_SPDRSV_TIMEOUT 500 /* 500 us */ + +/* + * SMB Control and Status + */ +enum jme_smbcsr_bit_mask { + SMBCSR_CNACK = 0x00020000, + SMBCSR_RELOAD = 0x00010000, + SMBCSR_EEPROMD = 0x00000020, + SMBCSR_INITDONE = 0x00000010, + SMBCSR_BUSY = 0x0000000F, +}; + +enum jme_smbintf_bit_mask { + SMBINTF_HWDATR = 0xFF000000, + SMBINTF_HWDATW = 0x00FF0000, + SMBINTF_HWADDR = 0x0000FF00, + SMBINTF_HWRWN = 0x00000020, + SMBINTF_HWCMD = 0x00000010, + SMBINTF_FASTM = 0x00000008, + SMBINTF_GPIOSCL = 0x00000004, + SMBINTF_GPIOSDA = 0x00000002, + SMBINTF_GPIOEN = 0x00000001, +}; + +enum jme_smbintf_vals { + SMBINTF_HWRWN_READ = 0x00000020, + SMBINTF_HWRWN_WRITE = 0x00000000, +}; + +enum jme_smbintf_shifts { + SMBINTF_HWDATR_SHIFT = 24, + SMBINTF_HWDATW_SHIFT = 16, + SMBINTF_HWADDR_SHIFT = 8, +}; + +#define JME_EEPROM_RELOAD_TIMEOUT 2000 /* 2000 msec */ +#define JME_SMB_BUSY_TIMEOUT 20 /* 20 msec */ +#define JME_SMB_LEN 256 +#define JME_EEPROM_MAGIC 0x250 + +/* + * Timer Control/Status Register + */ +enum jme_tmcsr_bit_masks { + TMCSR_SWIT = 0x80000000, + TMCSR_EN = 0x01000000, + TMCSR_CNT = 0x00FFFFFF, +}; + +/* + * General Purpose REG-0 + */ +enum jme_gpreg0_masks { + GPREG0_DISSH = 0xFF000000, + GPREG0_PCIRLMT = 0x00300000, + GPREG0_PCCNOMUTCLR = 0x00040000, + GPREG0_LNKINTPOLL = 0x00001000, + GPREG0_PCCTMR = 0x00000300, + GPREG0_PHYADDR = 0x0000001F, +}; + +enum jme_gpreg0_vals { + GPREG0_DISSH_DW7 = 0x80000000, + GPREG0_DISSH_DW6 = 0x40000000, + GPREG0_DISSH_DW5 = 0x20000000, + GPREG0_DISSH_DW4 = 0x10000000, + GPREG0_DISSH_DW3 = 0x08000000, + GPREG0_DISSH_DW2 = 0x04000000, + GPREG0_DISSH_DW1 = 0x02000000, + GPREG0_DISSH_DW0 = 0x01000000, + GPREG0_DISSH_ALL = 0xFF000000, + + GPREG0_PCIRLMT_8 = 0x00000000, + GPREG0_PCIRLMT_6 = 0x00100000, + GPREG0_PCIRLMT_5 = 0x00200000, + GPREG0_PCIRLMT_4 = 0x00300000, + + GPREG0_PCCTMR_16ns = 0x00000000, + GPREG0_PCCTMR_256ns = 0x00000100, + GPREG0_PCCTMR_1us = 0x00000200, + GPREG0_PCCTMR_1ms = 0x00000300, + + GPREG0_PHYADDR_1 = 0x00000001, + + GPREG0_DEFAULT = GPREG0_PCIRLMT_4 | + GPREG0_PCCTMR_1us | + GPREG0_PHYADDR_1, +}; + +/* + * General Purpose REG-1 + */ +enum jme_gpreg1_bit_masks { + GPREG1_RXCLKOFF = 0x04000000, + GPREG1_PCREQN = 0x00020000, + GPREG1_HALFMODEPATCH = 0x00000040, /* For Chip revision 0x11 only */ + GPREG1_RSSPATCH = 0x00000020, /* For Chip revision 0x11 only */ + GPREG1_INTRDELAYUNIT = 0x00000018, + GPREG1_INTRDELAYENABLE = 0x00000007, +}; + +enum jme_gpreg1_vals { + GPREG1_INTDLYUNIT_16NS = 0x00000000, + GPREG1_INTDLYUNIT_256NS = 0x00000008, + GPREG1_INTDLYUNIT_1US = 0x00000010, + GPREG1_INTDLYUNIT_16US = 0x00000018, + + GPREG1_INTDLYEN_1U = 0x00000001, + GPREG1_INTDLYEN_2U = 0x00000002, + GPREG1_INTDLYEN_3U = 0x00000003, + GPREG1_INTDLYEN_4U = 0x00000004, + GPREG1_INTDLYEN_5U = 0x00000005, + GPREG1_INTDLYEN_6U = 0x00000006, + GPREG1_INTDLYEN_7U = 0x00000007, + + GPREG1_DEFAULT = GPREG1_PCREQN, +}; + +/* + * Interrupt Status Bits + */ +enum jme_interrupt_bits { + INTR_SWINTR = 0x80000000, + INTR_TMINTR = 0x40000000, + INTR_LINKCH = 0x20000000, + INTR_PAUSERCV = 0x10000000, + INTR_MAGICRCV = 0x08000000, + INTR_WAKERCV = 0x04000000, + INTR_PCCRX0TO = 0x02000000, + INTR_PCCRX1TO = 0x01000000, + INTR_PCCRX2TO = 0x00800000, + INTR_PCCRX3TO = 0x00400000, + INTR_PCCTXTO = 0x00200000, + INTR_PCCRX0 = 0x00100000, + INTR_PCCRX1 = 0x00080000, + INTR_PCCRX2 = 0x00040000, + INTR_PCCRX3 = 0x00020000, + INTR_PCCTX = 0x00010000, + INTR_RX3EMP = 0x00008000, + INTR_RX2EMP = 0x00004000, + INTR_RX1EMP = 0x00002000, + INTR_RX0EMP = 0x00001000, + INTR_RX3 = 0x00000800, + INTR_RX2 = 0x00000400, + INTR_RX1 = 0x00000200, + INTR_RX0 = 0x00000100, + INTR_TX7 = 0x00000080, + INTR_TX6 = 0x00000040, + INTR_TX5 = 0x00000020, + INTR_TX4 = 0x00000010, + INTR_TX3 = 0x00000008, + INTR_TX2 = 0x00000004, + INTR_TX1 = 0x00000002, + INTR_TX0 = 0x00000001, +}; + +static const u32 INTR_ENABLE = INTR_SWINTR | + INTR_TMINTR | + INTR_LINKCH | + INTR_PCCRX0TO | + INTR_PCCRX0 | + INTR_PCCTXTO | + INTR_PCCTX | + INTR_RX0EMP; + +/* + * PCC Control Registers + */ +enum jme_pccrx_masks { + PCCRXTO_MASK = 0xFFFF0000, + PCCRX_MASK = 0x0000FF00, +}; + +enum jme_pcctx_masks { + PCCTXTO_MASK = 0xFFFF0000, + PCCTX_MASK = 0x0000FF00, + PCCTX_QS_MASK = 0x000000FF, +}; + +enum jme_pccrx_shifts { + PCCRXTO_SHIFT = 16, + PCCRX_SHIFT = 8, +}; + +enum jme_pcctx_shifts { + PCCTXTO_SHIFT = 16, + PCCTX_SHIFT = 8, +}; + +enum jme_pcctx_bits { + PCCTXQ0_EN = 0x00000001, + PCCTXQ1_EN = 0x00000002, + PCCTXQ2_EN = 0x00000004, + PCCTXQ3_EN = 0x00000008, + PCCTXQ4_EN = 0x00000010, + PCCTXQ5_EN = 0x00000020, + PCCTXQ6_EN = 0x00000040, + PCCTXQ7_EN = 0x00000080, +}; + +/* + * Chip Mode Register + */ +enum jme_chipmode_bit_masks { + CM_FPGAVER_MASK = 0xFFFF0000, + CM_CHIPREV_MASK = 0x0000FF00, + CM_CHIPMODE_MASK = 0x0000000F, +}; + +enum jme_chipmode_shifts { + CM_FPGAVER_SHIFT = 16, + CM_CHIPREV_SHIFT = 8, +}; + +/* + * Aggressive Power Mode Control + */ +enum jme_apmc_bits { + JME_APMC_PCIE_SD_EN = 0x40000000, + JME_APMC_PSEUDO_HP_EN = 0x20000000, + JME_APMC_EPIEN = 0x04000000, + JME_APMC_EPIEN_CTRL = 0x03000000, +}; + +enum jme_apmc_values { + JME_APMC_EPIEN_CTRL_EN = 0x02000000, + JME_APMC_EPIEN_CTRL_DIS = 0x01000000, +}; + +#define APMC_PHP_SHUTDOWN_DELAY (10 * 1000 * 1000) + +#ifdef REG_DEBUG +static char *MAC_REG_NAME[] = { + "JME_TXCS", "JME_TXDBA_LO", "JME_TXDBA_HI", "JME_TXQDC", + "JME_TXNDA", "JME_TXMCS", "JME_TXPFC", "JME_TXTRHD", + "JME_RXCS", "JME_RXDBA_LO", "JME_RXDBA_HI", "JME_RXQDC", + "JME_RXNDA", "JME_RXMCS", "JME_RXUMA_LO", "JME_RXUMA_HI", + "JME_RXMCHT_LO", "JME_RXMCHT_HI", "JME_WFODP", "JME_WFOI", + "JME_SMI", "JME_GHC", "UNKNOWN", "UNKNOWN", + "JME_PMCS"}; + +static char *PE_REG_NAME[] = { + "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", + "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", + "UNKNOWN", "UNKNOWN", "JME_PHY_CS", "UNKNOWN", + "JME_PHY_LINK", "UNKNOWN", "UNKNOWN", "UNKNOWN", + "JME_SMBCSR", "JME_SMBINTF"}; + +static char *MISC_REG_NAME[] = { + "JME_TMCSR", "JME_GPIO", "JME_GPREG0", "JME_GPREG1", + "JME_IEVE", "JME_IREQ", "JME_IENS", "JME_IENC", + "JME_PCCRX0", "JME_PCCRX1", "JME_PCCRX2", "JME_PCCRX3", + "JME_PCCTX0", "JME_CHIPMODE", "JME_SHBA_HI", "JME_SHBA_LO", + "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", + "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", + "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", + "JME_TIMER1", "JME_TIMER2", "UNKNOWN", "JME_APMC", + "JME_PCCSRX0"}; + +static inline void reg_dbg(const struct jme_adapter *jme, + const char *msg, u32 val, u32 reg) +{ + const char *regname; + switch (reg & 0xF00) { + case 0x000: + regname = MAC_REG_NAME[(reg & 0xFF) >> 2]; + break; + case 0x400: + regname = PE_REG_NAME[(reg & 0xFF) >> 2]; + break; + case 0x800: + regname = MISC_REG_NAME[(reg & 0xFF) >> 2]; + break; + default: + regname = PE_REG_NAME[0]; + } + printk(KERN_DEBUG "%s: %-20s %08x@%s\n", jme->dev->name, + msg, val, regname); +} +#else +static inline void reg_dbg(const struct jme_adapter *jme, + const char *msg, u32 val, u32 reg) {} +#endif + +/* + * Read/Write MMaped I/O Registers + */ +static inline u32 jread32(struct jme_adapter *jme, u32 reg) +{ + return readl(jme->regs + reg); +} + +static inline void jwrite32(struct jme_adapter *jme, u32 reg, u32 val) +{ + reg_dbg(jme, "REG WRITE", val, reg); + writel(val, jme->regs + reg); + reg_dbg(jme, "VAL AFTER WRITE", readl(jme->regs + reg), reg); +} + +static inline void jwrite32f(struct jme_adapter *jme, u32 reg, u32 val) +{ + /* + * Read after write should cause flush + */ + reg_dbg(jme, "REG WRITE FLUSH", val, reg); + writel(val, jme->regs + reg); + readl(jme->regs + reg); + reg_dbg(jme, "VAL AFTER WRITE", readl(jme->regs + reg), reg); +} + +/* + * PHY Regs + */ +enum jme_phy_reg17_bit_masks { + PREG17_SPEED = 0xC000, + PREG17_DUPLEX = 0x2000, + PREG17_SPDRSV = 0x0800, + PREG17_LNKUP = 0x0400, + PREG17_MDI = 0x0040, +}; + +enum jme_phy_reg17_vals { + PREG17_SPEED_10M = 0x0000, + PREG17_SPEED_100M = 0x4000, + PREG17_SPEED_1000M = 0x8000, +}; + +#define BMSR_ANCOMP 0x0020 + +/* + * Workaround + */ +static inline int is_buggy250(unsigned short device, u8 chiprev) +{ + return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11; +} + +static inline int new_phy_power_ctrl(u8 chip_main_rev) +{ + return chip_main_rev >= 5; +} + +/* + * Function prototypes + */ +static int jme_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd); +static void jme_set_unicastaddr(struct net_device *netdev); +static void jme_set_multi(struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c new file mode 100644 index 000000000..d74f5f4e5 --- /dev/null +++ b/drivers/net/ethernet/korina.c @@ -0,0 +1,1230 @@ +/* + * Driver for the IDT RC32434 (Korina) on-chip ethernet controller. + * + * Copyright 2004 IDT Inc. (rischelp@idt.com) + * Copyright 2006 Felix Fietkau + * Copyright 2008 Florian Fainelli + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Writing to a DMA status register: + * + * When writing to the status register, you should mask the bit you have + * been testing the status register with. Both Tx and Rx DMA registers + * should stick to this procedure. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define DRV_NAME "korina" +#define DRV_VERSION "0.10" +#define DRV_RELDATE "04Mar2008" + +#define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \ + ((dev)->dev_addr[1])) +#define STATION_ADDRESS_LOW(dev) (((dev)->dev_addr[2] << 24) | \ + ((dev)->dev_addr[3] << 16) | \ + ((dev)->dev_addr[4] << 8) | \ + ((dev)->dev_addr[5])) + +#define MII_CLOCK 1250000 /* no more than 2.5MHz */ + +/* the following must be powers of two */ +#define KORINA_NUM_RDS 64 /* number of receive descriptors */ +#define KORINA_NUM_TDS 64 /* number of transmit descriptors */ + +/* KORINA_RBSIZE is the hardware's default maximum receive + * frame size in bytes. Having this hardcoded means that there + * is no support for MTU sizes greater than 1500. */ +#define KORINA_RBSIZE 1536 /* size of one resource buffer = Ether MTU */ +#define KORINA_RDS_MASK (KORINA_NUM_RDS - 1) +#define KORINA_TDS_MASK (KORINA_NUM_TDS - 1) +#define RD_RING_SIZE (KORINA_NUM_RDS * sizeof(struct dma_desc)) +#define TD_RING_SIZE (KORINA_NUM_TDS * sizeof(struct dma_desc)) + +#define TX_TIMEOUT (6000 * HZ / 1000) + +enum chain_status { desc_filled, desc_empty }; +#define IS_DMA_FINISHED(X) (((X) & (DMA_DESC_FINI)) != 0) +#define IS_DMA_DONE(X) (((X) & (DMA_DESC_DONE)) != 0) +#define RCVPKT_LENGTH(X) (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT) + +/* Information that need to be kept for each board. */ +struct korina_private { + struct eth_regs *eth_regs; + struct dma_reg *rx_dma_regs; + struct dma_reg *tx_dma_regs; + struct dma_desc *td_ring; /* transmit descriptor ring */ + struct dma_desc *rd_ring; /* receive descriptor ring */ + + struct sk_buff *tx_skb[KORINA_NUM_TDS]; + struct sk_buff *rx_skb[KORINA_NUM_RDS]; + + int rx_next_done; + int rx_chain_head; + int rx_chain_tail; + enum chain_status rx_chain_status; + + int tx_next_done; + int tx_chain_head; + int tx_chain_tail; + enum chain_status tx_chain_status; + int tx_count; + int tx_full; + + int rx_irq; + int tx_irq; + int ovr_irq; + int und_irq; + + spinlock_t lock; /* NIC xmit lock */ + + int dma_halt_cnt; + int dma_run_cnt; + struct napi_struct napi; + struct timer_list media_check_timer; + struct mii_if_info mii_if; + struct work_struct restart_task; + struct net_device *dev; + int phy_addr; +}; + +extern unsigned int idt_cpu_freq; + +static inline void korina_start_dma(struct dma_reg *ch, u32 dma_addr) +{ + writel(0, &ch->dmandptr); + writel(dma_addr, &ch->dmadptr); +} + +static inline void korina_abort_dma(struct net_device *dev, + struct dma_reg *ch) +{ + if (readl(&ch->dmac) & DMA_CHAN_RUN_BIT) { + writel(0x10, &ch->dmac); + + while (!(readl(&ch->dmas) & DMA_STAT_HALT)) + dev->trans_start = jiffies; + + writel(0, &ch->dmas); + } + + writel(0, &ch->dmadptr); + writel(0, &ch->dmandptr); +} + +static inline void korina_chain_dma(struct dma_reg *ch, u32 dma_addr) +{ + writel(dma_addr, &ch->dmandptr); +} + +static void korina_abort_tx(struct net_device *dev) +{ + struct korina_private *lp = netdev_priv(dev); + + korina_abort_dma(dev, lp->tx_dma_regs); +} + +static void korina_abort_rx(struct net_device *dev) +{ + struct korina_private *lp = netdev_priv(dev); + + korina_abort_dma(dev, lp->rx_dma_regs); +} + +static void korina_start_rx(struct korina_private *lp, + struct dma_desc *rd) +{ + korina_start_dma(lp->rx_dma_regs, CPHYSADDR(rd)); +} + +static void korina_chain_rx(struct korina_private *lp, + struct dma_desc *rd) +{ + korina_chain_dma(lp->rx_dma_regs, CPHYSADDR(rd)); +} + +/* transmit packet */ +static int korina_send_packet(struct sk_buff *skb, struct net_device *dev) +{ + struct korina_private *lp = netdev_priv(dev); + unsigned long flags; + u32 length; + u32 chain_prev, chain_next; + struct dma_desc *td; + + spin_lock_irqsave(&lp->lock, flags); + + td = &lp->td_ring[lp->tx_chain_tail]; + + /* stop queue when full, drop pkts if queue already full */ + if (lp->tx_count >= (KORINA_NUM_TDS - 2)) { + lp->tx_full = 1; + + if (lp->tx_count == (KORINA_NUM_TDS - 2)) + netif_stop_queue(dev); + else { + dev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&lp->lock, flags); + + return NETDEV_TX_BUSY; + } + } + + lp->tx_count++; + + lp->tx_skb[lp->tx_chain_tail] = skb; + + length = skb->len; + dma_cache_wback((u32)skb->data, skb->len); + + /* Setup the transmit descriptor. */ + dma_cache_inv((u32) td, sizeof(*td)); + td->ca = CPHYSADDR(skb->data); + chain_prev = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK; + chain_next = (lp->tx_chain_tail + 1) & KORINA_TDS_MASK; + + if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) { + if (lp->tx_chain_status == desc_empty) { + /* Update tail */ + td->control = DMA_COUNT(length) | + DMA_DESC_COF | DMA_DESC_IOF; + /* Move tail */ + lp->tx_chain_tail = chain_next; + /* Write to NDPTR */ + writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), + &lp->tx_dma_regs->dmandptr); + /* Move head to tail */ + lp->tx_chain_head = lp->tx_chain_tail; + } else { + /* Update tail */ + td->control = DMA_COUNT(length) | + DMA_DESC_COF | DMA_DESC_IOF; + /* Link to prev */ + lp->td_ring[chain_prev].control &= + ~DMA_DESC_COF; + /* Link to prev */ + lp->td_ring[chain_prev].link = CPHYSADDR(td); + /* Move tail */ + lp->tx_chain_tail = chain_next; + /* Write to NDPTR */ + writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), + &(lp->tx_dma_regs->dmandptr)); + /* Move head to tail */ + lp->tx_chain_head = lp->tx_chain_tail; + lp->tx_chain_status = desc_empty; + } + } else { + if (lp->tx_chain_status == desc_empty) { + /* Update tail */ + td->control = DMA_COUNT(length) | + DMA_DESC_COF | DMA_DESC_IOF; + /* Move tail */ + lp->tx_chain_tail = chain_next; + lp->tx_chain_status = desc_filled; + } else { + /* Update tail */ + td->control = DMA_COUNT(length) | + DMA_DESC_COF | DMA_DESC_IOF; + lp->td_ring[chain_prev].control &= + ~DMA_DESC_COF; + lp->td_ring[chain_prev].link = CPHYSADDR(td); + lp->tx_chain_tail = chain_next; + } + } + dma_cache_wback((u32) td, sizeof(*td)); + + dev->trans_start = jiffies; + spin_unlock_irqrestore(&lp->lock, flags); + + return NETDEV_TX_OK; +} + +static int mdio_read(struct net_device *dev, int mii_id, int reg) +{ + struct korina_private *lp = netdev_priv(dev); + int ret; + + mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8); + + writel(0, &lp->eth_regs->miimcfg); + writel(0, &lp->eth_regs->miimcmd); + writel(mii_id | reg, &lp->eth_regs->miimaddr); + writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd); + + ret = (int)(readl(&lp->eth_regs->miimrdd)); + return ret; +} + +static void mdio_write(struct net_device *dev, int mii_id, int reg, int val) +{ + struct korina_private *lp = netdev_priv(dev); + + mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8); + + writel(0, &lp->eth_regs->miimcfg); + writel(1, &lp->eth_regs->miimcmd); + writel(mii_id | reg, &lp->eth_regs->miimaddr); + writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd); + writel(val, &lp->eth_regs->miimwtd); +} + +/* Ethernet Rx DMA interrupt */ +static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct korina_private *lp = netdev_priv(dev); + u32 dmas, dmasm; + irqreturn_t retval; + + dmas = readl(&lp->rx_dma_regs->dmas); + if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) { + dmasm = readl(&lp->rx_dma_regs->dmasm); + writel(dmasm | (DMA_STAT_DONE | + DMA_STAT_HALT | DMA_STAT_ERR), + &lp->rx_dma_regs->dmasm); + + napi_schedule(&lp->napi); + + if (dmas & DMA_STAT_ERR) + printk(KERN_ERR "%s: DMA error\n", dev->name); + + retval = IRQ_HANDLED; + } else + retval = IRQ_NONE; + + return retval; +} + +static int korina_rx(struct net_device *dev, int limit) +{ + struct korina_private *lp = netdev_priv(dev); + struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done]; + struct sk_buff *skb, *skb_new; + u8 *pkt_buf; + u32 devcs, pkt_len, dmas; + int count; + + dma_cache_inv((u32)rd, sizeof(*rd)); + + for (count = 0; count < limit; count++) { + skb = lp->rx_skb[lp->rx_next_done]; + skb_new = NULL; + + devcs = rd->devcs; + + if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0) + break; + + /* Update statistics counters */ + if (devcs & ETH_RX_CRC) + dev->stats.rx_crc_errors++; + if (devcs & ETH_RX_LOR) + dev->stats.rx_length_errors++; + if (devcs & ETH_RX_LE) + dev->stats.rx_length_errors++; + if (devcs & ETH_RX_OVR) + dev->stats.rx_fifo_errors++; + if (devcs & ETH_RX_CV) + dev->stats.rx_frame_errors++; + if (devcs & ETH_RX_CES) + dev->stats.rx_length_errors++; + if (devcs & ETH_RX_MP) + dev->stats.multicast++; + + if ((devcs & ETH_RX_LD) != ETH_RX_LD) { + /* check that this is a whole packet + * WARNING: DMA_FD bit incorrectly set + * in Rc32434 (errata ref #077) */ + dev->stats.rx_errors++; + dev->stats.rx_dropped++; + } else if ((devcs & ETH_RX_ROK)) { + pkt_len = RCVPKT_LENGTH(devcs); + + /* must be the (first and) last + * descriptor then */ + pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data; + + /* invalidate the cache */ + dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4); + + /* Malloc up new buffer. */ + skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE); + + if (!skb_new) + break; + /* Do not count the CRC */ + skb_put(skb, pkt_len - 4); + skb->protocol = eth_type_trans(skb, dev); + + /* Pass the packet to upper layers */ + netif_receive_skb(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + + /* Update the mcast stats */ + if (devcs & ETH_RX_MP) + dev->stats.multicast++; + + lp->rx_skb[lp->rx_next_done] = skb_new; + } + + rd->devcs = 0; + + /* Restore descriptor's curr_addr */ + if (skb_new) + rd->ca = CPHYSADDR(skb_new->data); + else + rd->ca = CPHYSADDR(skb->data); + + rd->control = DMA_COUNT(KORINA_RBSIZE) | + DMA_DESC_COD | DMA_DESC_IOD; + lp->rd_ring[(lp->rx_next_done - 1) & + KORINA_RDS_MASK].control &= + ~DMA_DESC_COD; + + lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK; + dma_cache_wback((u32)rd, sizeof(*rd)); + rd = &lp->rd_ring[lp->rx_next_done]; + writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas); + } + + dmas = readl(&lp->rx_dma_regs->dmas); + + if (dmas & DMA_STAT_HALT) { + writel(~(DMA_STAT_HALT | DMA_STAT_ERR), + &lp->rx_dma_regs->dmas); + + lp->dma_halt_cnt++; + rd->devcs = 0; + skb = lp->rx_skb[lp->rx_next_done]; + rd->ca = CPHYSADDR(skb->data); + dma_cache_wback((u32)rd, sizeof(*rd)); + korina_chain_rx(lp, rd); + } + + return count; +} + +static int korina_poll(struct napi_struct *napi, int budget) +{ + struct korina_private *lp = + container_of(napi, struct korina_private, napi); + struct net_device *dev = lp->dev; + int work_done; + + work_done = korina_rx(dev, budget); + if (work_done < budget) { + napi_complete(napi); + + writel(readl(&lp->rx_dma_regs->dmasm) & + ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR), + &lp->rx_dma_regs->dmasm); + } + return work_done; +} + +/* + * Set or clear the multicast filter for this adaptor. + */ +static void korina_multicast_list(struct net_device *dev) +{ + struct korina_private *lp = netdev_priv(dev); + unsigned long flags; + struct netdev_hw_addr *ha; + u32 recognise = ETH_ARC_AB; /* always accept broadcasts */ + + /* Set promiscuous mode */ + if (dev->flags & IFF_PROMISC) + recognise |= ETH_ARC_PRO; + + else if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 4)) + /* All multicast and broadcast */ + recognise |= ETH_ARC_AM; + + /* Build the hash table */ + if (netdev_mc_count(dev) > 4) { + u16 hash_table[4] = { 0 }; + u32 crc; + + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr); + crc >>= 26; + hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); + } + /* Accept filtered multicast */ + recognise |= ETH_ARC_AFM; + + /* Fill the MAC hash tables with their values */ + writel((u32)(hash_table[1] << 16 | hash_table[0]), + &lp->eth_regs->ethhash0); + writel((u32)(hash_table[3] << 16 | hash_table[2]), + &lp->eth_regs->ethhash1); + } + + spin_lock_irqsave(&lp->lock, flags); + writel(recognise, &lp->eth_regs->etharc); + spin_unlock_irqrestore(&lp->lock, flags); +} + +static void korina_tx(struct net_device *dev) +{ + struct korina_private *lp = netdev_priv(dev); + struct dma_desc *td = &lp->td_ring[lp->tx_next_done]; + u32 devcs; + u32 dmas; + + spin_lock(&lp->lock); + + /* Process all desc that are done */ + while (IS_DMA_FINISHED(td->control)) { + if (lp->tx_full == 1) { + netif_wake_queue(dev); + lp->tx_full = 0; + } + + devcs = lp->td_ring[lp->tx_next_done].devcs; + if ((devcs & (ETH_TX_FD | ETH_TX_LD)) != + (ETH_TX_FD | ETH_TX_LD)) { + dev->stats.tx_errors++; + dev->stats.tx_dropped++; + + /* Should never happen */ + printk(KERN_ERR "%s: split tx ignored\n", + dev->name); + } else if (devcs & ETH_TX_TOK) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += + lp->tx_skb[lp->tx_next_done]->len; + } else { + dev->stats.tx_errors++; + dev->stats.tx_dropped++; + + /* Underflow */ + if (devcs & ETH_TX_UND) + dev->stats.tx_fifo_errors++; + + /* Oversized frame */ + if (devcs & ETH_TX_OF) + dev->stats.tx_aborted_errors++; + + /* Excessive deferrals */ + if (devcs & ETH_TX_ED) + dev->stats.tx_carrier_errors++; + + /* Collisions: medium busy */ + if (devcs & ETH_TX_EC) + dev->stats.collisions++; + + /* Late collision */ + if (devcs & ETH_TX_LC) + dev->stats.tx_window_errors++; + } + + /* We must always free the original skb */ + if (lp->tx_skb[lp->tx_next_done]) { + dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]); + lp->tx_skb[lp->tx_next_done] = NULL; + } + + lp->td_ring[lp->tx_next_done].control = DMA_DESC_IOF; + lp->td_ring[lp->tx_next_done].devcs = ETH_TX_FD | ETH_TX_LD; + lp->td_ring[lp->tx_next_done].link = 0; + lp->td_ring[lp->tx_next_done].ca = 0; + lp->tx_count--; + + /* Go on to next transmission */ + lp->tx_next_done = (lp->tx_next_done + 1) & KORINA_TDS_MASK; + td = &lp->td_ring[lp->tx_next_done]; + + } + + /* Clear the DMA status register */ + dmas = readl(&lp->tx_dma_regs->dmas); + writel(~dmas, &lp->tx_dma_regs->dmas); + + writel(readl(&lp->tx_dma_regs->dmasm) & + ~(DMA_STAT_FINI | DMA_STAT_ERR), + &lp->tx_dma_regs->dmasm); + + spin_unlock(&lp->lock); +} + +static irqreturn_t +korina_tx_dma_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct korina_private *lp = netdev_priv(dev); + u32 dmas, dmasm; + irqreturn_t retval; + + dmas = readl(&lp->tx_dma_regs->dmas); + + if (dmas & (DMA_STAT_FINI | DMA_STAT_ERR)) { + dmasm = readl(&lp->tx_dma_regs->dmasm); + writel(dmasm | (DMA_STAT_FINI | DMA_STAT_ERR), + &lp->tx_dma_regs->dmasm); + + korina_tx(dev); + + if (lp->tx_chain_status == desc_filled && + (readl(&(lp->tx_dma_regs->dmandptr)) == 0)) { + writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), + &(lp->tx_dma_regs->dmandptr)); + lp->tx_chain_status = desc_empty; + lp->tx_chain_head = lp->tx_chain_tail; + dev->trans_start = jiffies; + } + if (dmas & DMA_STAT_ERR) + printk(KERN_ERR "%s: DMA error\n", dev->name); + + retval = IRQ_HANDLED; + } else + retval = IRQ_NONE; + + return retval; +} + + +static void korina_check_media(struct net_device *dev, unsigned int init_media) +{ + struct korina_private *lp = netdev_priv(dev); + + mii_check_media(&lp->mii_if, 0, init_media); + + if (lp->mii_if.full_duplex) + writel(readl(&lp->eth_regs->ethmac2) | ETH_MAC2_FD, + &lp->eth_regs->ethmac2); + else + writel(readl(&lp->eth_regs->ethmac2) & ~ETH_MAC2_FD, + &lp->eth_regs->ethmac2); +} + +static void korina_poll_media(unsigned long data) +{ + struct net_device *dev = (struct net_device *) data; + struct korina_private *lp = netdev_priv(dev); + + korina_check_media(dev, 0); + mod_timer(&lp->media_check_timer, jiffies + HZ); +} + +static void korina_set_carrier(struct mii_if_info *mii) +{ + if (mii->force_media) { + /* autoneg is off: Link is always assumed to be up */ + if (!netif_carrier_ok(mii->dev)) + netif_carrier_on(mii->dev); + } else /* Let MMI library update carrier status */ + korina_check_media(mii->dev, 0); +} + +static int korina_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct korina_private *lp = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(rq); + int rc; + + if (!netif_running(dev)) + return -EINVAL; + spin_lock_irq(&lp->lock); + rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL); + spin_unlock_irq(&lp->lock); + korina_set_carrier(&lp->mii_if); + + return rc; +} + +/* ethtool helpers */ +static void netdev_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct korina_private *lp = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info)); +} + +static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct korina_private *lp = netdev_priv(dev); + int rc; + + spin_lock_irq(&lp->lock); + rc = mii_ethtool_gset(&lp->mii_if, cmd); + spin_unlock_irq(&lp->lock); + + return rc; +} + +static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct korina_private *lp = netdev_priv(dev); + int rc; + + spin_lock_irq(&lp->lock); + rc = mii_ethtool_sset(&lp->mii_if, cmd); + spin_unlock_irq(&lp->lock); + korina_set_carrier(&lp->mii_if); + + return rc; +} + +static u32 netdev_get_link(struct net_device *dev) +{ + struct korina_private *lp = netdev_priv(dev); + + return mii_link_ok(&lp->mii_if); +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, + .get_settings = netdev_get_settings, + .set_settings = netdev_set_settings, + .get_link = netdev_get_link, +}; + +static int korina_alloc_ring(struct net_device *dev) +{ + struct korina_private *lp = netdev_priv(dev); + struct sk_buff *skb; + int i; + + /* Initialize the transmit descriptors */ + for (i = 0; i < KORINA_NUM_TDS; i++) { + lp->td_ring[i].control = DMA_DESC_IOF; + lp->td_ring[i].devcs = ETH_TX_FD | ETH_TX_LD; + lp->td_ring[i].ca = 0; + lp->td_ring[i].link = 0; + } + lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail = + lp->tx_full = lp->tx_count = 0; + lp->tx_chain_status = desc_empty; + + /* Initialize the receive descriptors */ + for (i = 0; i < KORINA_NUM_RDS; i++) { + skb = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE); + if (!skb) + return -ENOMEM; + lp->rx_skb[i] = skb; + lp->rd_ring[i].control = DMA_DESC_IOD | + DMA_COUNT(KORINA_RBSIZE); + lp->rd_ring[i].devcs = 0; + lp->rd_ring[i].ca = CPHYSADDR(skb->data); + lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]); + } + + /* loop back receive descriptors, so the last + * descriptor points to the first one */ + lp->rd_ring[i - 1].link = CPHYSADDR(&lp->rd_ring[0]); + lp->rd_ring[i - 1].control |= DMA_DESC_COD; + + lp->rx_next_done = 0; + lp->rx_chain_head = 0; + lp->rx_chain_tail = 0; + lp->rx_chain_status = desc_empty; + + return 0; +} + +static void korina_free_ring(struct net_device *dev) +{ + struct korina_private *lp = netdev_priv(dev); + int i; + + for (i = 0; i < KORINA_NUM_RDS; i++) { + lp->rd_ring[i].control = 0; + if (lp->rx_skb[i]) + dev_kfree_skb_any(lp->rx_skb[i]); + lp->rx_skb[i] = NULL; + } + + for (i = 0; i < KORINA_NUM_TDS; i++) { + lp->td_ring[i].control = 0; + if (lp->tx_skb[i]) + dev_kfree_skb_any(lp->tx_skb[i]); + lp->tx_skb[i] = NULL; + } +} + +/* + * Initialize the RC32434 ethernet controller. + */ +static int korina_init(struct net_device *dev) +{ + struct korina_private *lp = netdev_priv(dev); + + /* Disable DMA */ + korina_abort_tx(dev); + korina_abort_rx(dev); + + /* reset ethernet logic */ + writel(0, &lp->eth_regs->ethintfc); + while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP)) + dev->trans_start = jiffies; + + /* Enable Ethernet Interface */ + writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc); + + /* Allocate rings */ + if (korina_alloc_ring(dev)) { + printk(KERN_ERR "%s: descriptor allocation failed\n", dev->name); + korina_free_ring(dev); + return -ENOMEM; + } + + writel(0, &lp->rx_dma_regs->dmas); + /* Start Rx DMA */ + korina_start_rx(lp, &lp->rd_ring[0]); + + writel(readl(&lp->tx_dma_regs->dmasm) & + ~(DMA_STAT_FINI | DMA_STAT_ERR), + &lp->tx_dma_regs->dmasm); + writel(readl(&lp->rx_dma_regs->dmasm) & + ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR), + &lp->rx_dma_regs->dmasm); + + /* Accept only packets destined for this Ethernet device address */ + writel(ETH_ARC_AB, &lp->eth_regs->etharc); + + /* Set all Ether station address registers to their initial values */ + writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal0); + writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah0); + + writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal1); + writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah1); + + writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal2); + writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah2); + + writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal3); + writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah3); + + + /* Frame Length Checking, Pad Enable, CRC Enable, Full Duplex set */ + writel(ETH_MAC2_PE | ETH_MAC2_CEN | ETH_MAC2_FD, + &lp->eth_regs->ethmac2); + + /* Back to back inter-packet-gap */ + writel(0x15, &lp->eth_regs->ethipgt); + /* Non - Back to back inter-packet-gap */ + writel(0x12, &lp->eth_regs->ethipgr); + + /* Management Clock Prescaler Divisor + * Clock independent setting */ + writel(((idt_cpu_freq) / MII_CLOCK + 1) & ~1, + &lp->eth_regs->ethmcp); + + /* don't transmit until fifo contains 48b */ + writel(48, &lp->eth_regs->ethfifott); + + writel(ETH_MAC1_RE, &lp->eth_regs->ethmac1); + + napi_enable(&lp->napi); + netif_start_queue(dev); + + return 0; +} + +/* + * Restart the RC32434 ethernet controller. + */ +static void korina_restart_task(struct work_struct *work) +{ + struct korina_private *lp = container_of(work, + struct korina_private, restart_task); + struct net_device *dev = lp->dev; + + /* + * Disable interrupts + */ + disable_irq(lp->rx_irq); + disable_irq(lp->tx_irq); + disable_irq(lp->ovr_irq); + disable_irq(lp->und_irq); + + writel(readl(&lp->tx_dma_regs->dmasm) | + DMA_STAT_FINI | DMA_STAT_ERR, + &lp->tx_dma_regs->dmasm); + writel(readl(&lp->rx_dma_regs->dmasm) | + DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR, + &lp->rx_dma_regs->dmasm); + + korina_free_ring(dev); + + napi_disable(&lp->napi); + + if (korina_init(dev) < 0) { + printk(KERN_ERR "%s: cannot restart device\n", dev->name); + return; + } + korina_multicast_list(dev); + + enable_irq(lp->und_irq); + enable_irq(lp->ovr_irq); + enable_irq(lp->tx_irq); + enable_irq(lp->rx_irq); +} + +static void korina_clear_and_restart(struct net_device *dev, u32 value) +{ + struct korina_private *lp = netdev_priv(dev); + + netif_stop_queue(dev); + writel(value, &lp->eth_regs->ethintfc); + schedule_work(&lp->restart_task); +} + +/* Ethernet Tx Underflow interrupt */ +static irqreturn_t korina_und_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct korina_private *lp = netdev_priv(dev); + unsigned int und; + + spin_lock(&lp->lock); + + und = readl(&lp->eth_regs->ethintfc); + + if (und & ETH_INT_FC_UND) + korina_clear_and_restart(dev, und & ~ETH_INT_FC_UND); + + spin_unlock(&lp->lock); + + return IRQ_HANDLED; +} + +static void korina_tx_timeout(struct net_device *dev) +{ + struct korina_private *lp = netdev_priv(dev); + + schedule_work(&lp->restart_task); +} + +/* Ethernet Rx Overflow interrupt */ +static irqreturn_t +korina_ovr_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct korina_private *lp = netdev_priv(dev); + unsigned int ovr; + + spin_lock(&lp->lock); + ovr = readl(&lp->eth_regs->ethintfc); + + if (ovr & ETH_INT_FC_OVR) + korina_clear_and_restart(dev, ovr & ~ETH_INT_FC_OVR); + + spin_unlock(&lp->lock); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void korina_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + korina_tx_dma_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +static int korina_open(struct net_device *dev) +{ + struct korina_private *lp = netdev_priv(dev); + int ret; + + /* Initialize */ + ret = korina_init(dev); + if (ret < 0) { + printk(KERN_ERR "%s: cannot open device\n", dev->name); + goto out; + } + + /* Install the interrupt handler + * that handles the Done Finished + * Ovr and Und Events */ + ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt, + 0, "Korina ethernet Rx", dev); + if (ret < 0) { + printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n", + dev->name, lp->rx_irq); + goto err_release; + } + ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt, + 0, "Korina ethernet Tx", dev); + if (ret < 0) { + printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n", + dev->name, lp->tx_irq); + goto err_free_rx_irq; + } + + /* Install handler for overrun error. */ + ret = request_irq(lp->ovr_irq, korina_ovr_interrupt, + 0, "Ethernet Overflow", dev); + if (ret < 0) { + printk(KERN_ERR "%s: unable to get OVR IRQ %d\n", + dev->name, lp->ovr_irq); + goto err_free_tx_irq; + } + + /* Install handler for underflow error. */ + ret = request_irq(lp->und_irq, korina_und_interrupt, + 0, "Ethernet Underflow", dev); + if (ret < 0) { + printk(KERN_ERR "%s: unable to get UND IRQ %d\n", + dev->name, lp->und_irq); + goto err_free_ovr_irq; + } + mod_timer(&lp->media_check_timer, jiffies + 1); +out: + return ret; + +err_free_ovr_irq: + free_irq(lp->ovr_irq, dev); +err_free_tx_irq: + free_irq(lp->tx_irq, dev); +err_free_rx_irq: + free_irq(lp->rx_irq, dev); +err_release: + korina_free_ring(dev); + goto out; +} + +static int korina_close(struct net_device *dev) +{ + struct korina_private *lp = netdev_priv(dev); + u32 tmp; + + del_timer(&lp->media_check_timer); + + /* Disable interrupts */ + disable_irq(lp->rx_irq); + disable_irq(lp->tx_irq); + disable_irq(lp->ovr_irq); + disable_irq(lp->und_irq); + + korina_abort_tx(dev); + tmp = readl(&lp->tx_dma_regs->dmasm); + tmp = tmp | DMA_STAT_FINI | DMA_STAT_ERR; + writel(tmp, &lp->tx_dma_regs->dmasm); + + korina_abort_rx(dev); + tmp = readl(&lp->rx_dma_regs->dmasm); + tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR; + writel(tmp, &lp->rx_dma_regs->dmasm); + + korina_free_ring(dev); + + napi_disable(&lp->napi); + + cancel_work_sync(&lp->restart_task); + + free_irq(lp->rx_irq, dev); + free_irq(lp->tx_irq, dev); + free_irq(lp->ovr_irq, dev); + free_irq(lp->und_irq, dev); + + return 0; +} + +static const struct net_device_ops korina_netdev_ops = { + .ndo_open = korina_open, + .ndo_stop = korina_close, + .ndo_start_xmit = korina_send_packet, + .ndo_set_rx_mode = korina_multicast_list, + .ndo_tx_timeout = korina_tx_timeout, + .ndo_do_ioctl = korina_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = korina_poll_controller, +#endif +}; + +static int korina_probe(struct platform_device *pdev) +{ + struct korina_device *bif = platform_get_drvdata(pdev); + struct korina_private *lp; + struct net_device *dev; + struct resource *r; + int rc; + + dev = alloc_etherdev(sizeof(struct korina_private)); + if (!dev) + return -ENOMEM; + + SET_NETDEV_DEV(dev, &pdev->dev); + lp = netdev_priv(dev); + + bif->dev = dev; + memcpy(dev->dev_addr, bif->mac, ETH_ALEN); + + lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx"); + lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx"); + lp->ovr_irq = platform_get_irq_byname(pdev, "korina_ovr"); + lp->und_irq = platform_get_irq_byname(pdev, "korina_und"); + + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs"); + dev->base_addr = r->start; + lp->eth_regs = ioremap_nocache(r->start, resource_size(r)); + if (!lp->eth_regs) { + printk(KERN_ERR DRV_NAME ": cannot remap registers\n"); + rc = -ENXIO; + goto probe_err_out; + } + + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx"); + lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r)); + if (!lp->rx_dma_regs) { + printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n"); + rc = -ENXIO; + goto probe_err_dma_rx; + } + + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx"); + lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r)); + if (!lp->tx_dma_regs) { + printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n"); + rc = -ENXIO; + goto probe_err_dma_tx; + } + + lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL); + if (!lp->td_ring) { + rc = -ENXIO; + goto probe_err_td_ring; + } + + dma_cache_inv((unsigned long)(lp->td_ring), + TD_RING_SIZE + RD_RING_SIZE); + + /* now convert TD_RING pointer to KSEG1 */ + lp->td_ring = (struct dma_desc *)KSEG1ADDR(lp->td_ring); + lp->rd_ring = &lp->td_ring[KORINA_NUM_TDS]; + + spin_lock_init(&lp->lock); + /* just use the rx dma irq */ + dev->irq = lp->rx_irq; + lp->dev = dev; + + dev->netdev_ops = &korina_netdev_ops; + dev->ethtool_ops = &netdev_ethtool_ops; + dev->watchdog_timeo = TX_TIMEOUT; + netif_napi_add(dev, &lp->napi, korina_poll, 64); + + lp->phy_addr = (((lp->rx_irq == 0x2c? 1:0) << 8) | 0x05); + lp->mii_if.dev = dev; + lp->mii_if.mdio_read = mdio_read; + lp->mii_if.mdio_write = mdio_write; + lp->mii_if.phy_id = lp->phy_addr; + lp->mii_if.phy_id_mask = 0x1f; + lp->mii_if.reg_num_mask = 0x1f; + + rc = register_netdev(dev); + if (rc < 0) { + printk(KERN_ERR DRV_NAME + ": cannot register net device: %d\n", rc); + goto probe_err_register; + } + setup_timer(&lp->media_check_timer, korina_poll_media, (unsigned long) dev); + + INIT_WORK(&lp->restart_task, korina_restart_task); + + printk(KERN_INFO "%s: " DRV_NAME "-" DRV_VERSION " " DRV_RELDATE "\n", + dev->name); +out: + return rc; + +probe_err_register: + kfree(lp->td_ring); +probe_err_td_ring: + iounmap(lp->tx_dma_regs); +probe_err_dma_tx: + iounmap(lp->rx_dma_regs); +probe_err_dma_rx: + iounmap(lp->eth_regs); +probe_err_out: + free_netdev(dev); + goto out; +} + +static int korina_remove(struct platform_device *pdev) +{ + struct korina_device *bif = platform_get_drvdata(pdev); + struct korina_private *lp = netdev_priv(bif->dev); + + iounmap(lp->eth_regs); + iounmap(lp->rx_dma_regs); + iounmap(lp->tx_dma_regs); + + unregister_netdev(bif->dev); + free_netdev(bif->dev); + + return 0; +} + +static struct platform_driver korina_driver = { + .driver.name = "korina", + .probe = korina_probe, + .remove = korina_remove, +}; + +module_platform_driver(korina_driver); + +MODULE_AUTHOR("Philip Rischel "); +MODULE_AUTHOR("Felix Fietkau "); +MODULE_AUTHOR("Florian Fainelli "); +MODULE_DESCRIPTION("IDT RC32434 (Korina) Ethernet driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c new file mode 100644 index 000000000..581928c06 --- /dev/null +++ b/drivers/net/ethernet/lantiq_etop.c @@ -0,0 +1,816 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * Copyright (C) 2011 John Crispin + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#define LTQ_ETOP_MDIO 0x11804 +#define MDIO_REQUEST 0x80000000 +#define MDIO_READ 0x40000000 +#define MDIO_ADDR_MASK 0x1f +#define MDIO_ADDR_OFFSET 0x15 +#define MDIO_REG_MASK 0x1f +#define MDIO_REG_OFFSET 0x10 +#define MDIO_VAL_MASK 0xffff + +#define PPE32_CGEN 0x800 +#define LQ_PPE32_ENET_MAC_CFG 0x1840 + +#define LTQ_ETOP_ENETS0 0x11850 +#define LTQ_ETOP_MAC_DA0 0x1186C +#define LTQ_ETOP_MAC_DA1 0x11870 +#define LTQ_ETOP_CFG 0x16020 +#define LTQ_ETOP_IGPLEN 0x16080 + +#define MAX_DMA_CHAN 0x8 +#define MAX_DMA_CRC_LEN 0x4 +#define MAX_DMA_DATA_LEN 0x600 + +#define ETOP_FTCU BIT(28) +#define ETOP_MII_MASK 0xf +#define ETOP_MII_NORMAL 0xd +#define ETOP_MII_REVERSE 0xe +#define ETOP_PLEN_UNDER 0x40 +#define ETOP_CGEN 0x800 + +/* use 2 static channels for TX/RX */ +#define LTQ_ETOP_TX_CHANNEL 1 +#define LTQ_ETOP_RX_CHANNEL 6 +#define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL) +#define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL) + +#define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x)) +#define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y)) +#define ltq_etop_w32_mask(x, y, z) \ + ltq_w32_mask(x, y, ltq_etop_membase + (z)) + +#define DRV_VERSION "1.0" + +static void __iomem *ltq_etop_membase; + +struct ltq_etop_chan { + int idx; + int tx_free; + struct net_device *netdev; + struct napi_struct napi; + struct ltq_dma_channel dma; + struct sk_buff *skb[LTQ_DESC_NUM]; +}; + +struct ltq_etop_priv { + struct net_device *netdev; + struct platform_device *pdev; + struct ltq_eth_data *pldata; + struct resource *res; + + struct mii_bus *mii_bus; + struct phy_device *phydev; + + struct ltq_etop_chan ch[MAX_DMA_CHAN]; + int tx_free[MAX_DMA_CHAN >> 1]; + + spinlock_t lock; +}; + +static int +ltq_etop_alloc_skb(struct ltq_etop_chan *ch) +{ + ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN); + if (!ch->skb[ch->dma.desc]) + return -ENOMEM; + ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL, + ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN, + DMA_FROM_DEVICE); + ch->dma.desc_base[ch->dma.desc].addr = + CPHYSADDR(ch->skb[ch->dma.desc]->data); + ch->dma.desc_base[ch->dma.desc].ctl = + LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | + MAX_DMA_DATA_LEN; + skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN); + return 0; +} + +static void +ltq_etop_hw_receive(struct ltq_etop_chan *ch) +{ + struct ltq_etop_priv *priv = netdev_priv(ch->netdev); + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + struct sk_buff *skb = ch->skb[ch->dma.desc]; + int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - MAX_DMA_CRC_LEN; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + if (ltq_etop_alloc_skb(ch)) { + netdev_err(ch->netdev, + "failed to allocate new rx buffer, stopping DMA\n"); + ltq_dma_close(&ch->dma); + } + ch->dma.desc++; + ch->dma.desc %= LTQ_DESC_NUM; + spin_unlock_irqrestore(&priv->lock, flags); + + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, ch->netdev); + netif_receive_skb(skb); +} + +static int +ltq_etop_poll_rx(struct napi_struct *napi, int budget) +{ + struct ltq_etop_chan *ch = container_of(napi, + struct ltq_etop_chan, napi); + int rx = 0; + int complete = 0; + + while ((rx < budget) && !complete) { + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { + ltq_etop_hw_receive(ch); + rx++; + } else { + complete = 1; + } + } + if (complete || !rx) { + napi_complete(&ch->napi); + ltq_dma_ack_irq(&ch->dma); + } + return rx; +} + +static int +ltq_etop_poll_tx(struct napi_struct *napi, int budget) +{ + struct ltq_etop_chan *ch = + container_of(napi, struct ltq_etop_chan, napi); + struct ltq_etop_priv *priv = netdev_priv(ch->netdev); + struct netdev_queue *txq = + netdev_get_tx_queue(ch->netdev, ch->idx >> 1); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + while ((ch->dma.desc_base[ch->tx_free].ctl & + (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { + dev_kfree_skb_any(ch->skb[ch->tx_free]); + ch->skb[ch->tx_free] = NULL; + memset(&ch->dma.desc_base[ch->tx_free], 0, + sizeof(struct ltq_dma_desc)); + ch->tx_free++; + ch->tx_free %= LTQ_DESC_NUM; + } + spin_unlock_irqrestore(&priv->lock, flags); + + if (netif_tx_queue_stopped(txq)) + netif_tx_start_queue(txq); + napi_complete(&ch->napi); + ltq_dma_ack_irq(&ch->dma); + return 1; +} + +static irqreturn_t +ltq_etop_dma_irq(int irq, void *_priv) +{ + struct ltq_etop_priv *priv = _priv; + int ch = irq - LTQ_DMA_CH0_INT; + + napi_schedule(&priv->ch[ch].napi); + return IRQ_HANDLED; +} + +static void +ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + + ltq_dma_free(&ch->dma); + if (ch->dma.irq) + free_irq(ch->dma.irq, priv); + if (IS_RX(ch->idx)) { + int desc; + for (desc = 0; desc < LTQ_DESC_NUM; desc++) + dev_kfree_skb_any(ch->skb[ch->dma.desc]); + } +} + +static void +ltq_etop_hw_exit(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + int i; + + ltq_pmu_disable(PMU_PPE); + for (i = 0; i < MAX_DMA_CHAN; i++) + if (IS_TX(i) || IS_RX(i)) + ltq_etop_free_channel(dev, &priv->ch[i]); +} + +static int +ltq_etop_hw_init(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + int i; + + ltq_pmu_enable(PMU_PPE); + + switch (priv->pldata->mii_mode) { + case PHY_INTERFACE_MODE_RMII: + ltq_etop_w32_mask(ETOP_MII_MASK, + ETOP_MII_REVERSE, LTQ_ETOP_CFG); + break; + + case PHY_INTERFACE_MODE_MII: + ltq_etop_w32_mask(ETOP_MII_MASK, + ETOP_MII_NORMAL, LTQ_ETOP_CFG); + break; + + default: + netdev_err(dev, "unknown mii mode %d\n", + priv->pldata->mii_mode); + return -ENOTSUPP; + } + + /* enable crc generation */ + ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG); + + ltq_dma_init_port(DMA_PORT_ETOP); + + for (i = 0; i < MAX_DMA_CHAN; i++) { + int irq = LTQ_DMA_CH0_INT + i; + struct ltq_etop_chan *ch = &priv->ch[i]; + + ch->idx = ch->dma.nr = i; + + if (IS_TX(i)) { + ltq_dma_alloc_tx(&ch->dma); + request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv); + } else if (IS_RX(i)) { + ltq_dma_alloc_rx(&ch->dma); + for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM; + ch->dma.desc++) + if (ltq_etop_alloc_skb(ch)) + return -ENOMEM; + ch->dma.desc = 0; + request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv); + } + ch->dma.irq = irq; + } + return 0; +} + +static void +ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, "Lantiq ETOP", sizeof(info->driver)); + strlcpy(info->bus_info, "internal", sizeof(info->bus_info)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); +} + +static int +ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + + return phy_ethtool_gset(priv->phydev, cmd); +} + +static int +ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + + return phy_ethtool_sset(priv->phydev, cmd); +} + +static int +ltq_etop_nway_reset(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + + return phy_start_aneg(priv->phydev); +} + +static const struct ethtool_ops ltq_etop_ethtool_ops = { + .get_drvinfo = ltq_etop_get_drvinfo, + .get_settings = ltq_etop_get_settings, + .set_settings = ltq_etop_set_settings, + .nway_reset = ltq_etop_nway_reset, +}; + +static int +ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data) +{ + u32 val = MDIO_REQUEST | + ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) | + ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET) | + phy_data; + + while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) + ; + ltq_etop_w32(val, LTQ_ETOP_MDIO); + return 0; +} + +static int +ltq_etop_mdio_rd(struct mii_bus *bus, int phy_addr, int phy_reg) +{ + u32 val = MDIO_REQUEST | MDIO_READ | + ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) | + ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET); + + while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) + ; + ltq_etop_w32(val, LTQ_ETOP_MDIO); + while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) + ; + val = ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_VAL_MASK; + return val; +} + +static void +ltq_etop_mdio_link(struct net_device *dev) +{ + /* nothing to do */ +} + +static int +ltq_etop_mdio_probe(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + struct phy_device *phydev = NULL; + int phy_addr; + + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { + if (priv->mii_bus->phy_map[phy_addr]) { + phydev = priv->mii_bus->phy_map[phy_addr]; + break; + } + } + + if (!phydev) { + netdev_err(dev, "no PHY found\n"); + return -ENODEV; + } + + phydev = phy_connect(dev, dev_name(&phydev->dev), + <q_etop_mdio_link, priv->pldata->mii_mode); + + if (IS_ERR(phydev)) { + netdev_err(dev, "Could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + phydev->supported &= (SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full + | SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full + | SUPPORTED_Autoneg + | SUPPORTED_MII + | SUPPORTED_TP); + + phydev->advertising = phydev->supported; + priv->phydev = phydev; + pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n", + dev->name, phydev->drv->name, + dev_name(&phydev->dev), phydev->irq); + + return 0; +} + +static int +ltq_etop_mdio_init(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + int i; + int err; + + priv->mii_bus = mdiobus_alloc(); + if (!priv->mii_bus) { + netdev_err(dev, "failed to allocate mii bus\n"); + err = -ENOMEM; + goto err_out; + } + + priv->mii_bus->priv = dev; + priv->mii_bus->read = ltq_etop_mdio_rd; + priv->mii_bus->write = ltq_etop_mdio_wr; + priv->mii_bus->name = "ltq_mii"; + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + priv->pdev->name, priv->pdev->id); + priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!priv->mii_bus->irq) { + err = -ENOMEM; + goto err_out_free_mdiobus; + } + + for (i = 0; i < PHY_MAX_ADDR; ++i) + priv->mii_bus->irq[i] = PHY_POLL; + + if (mdiobus_register(priv->mii_bus)) { + err = -ENXIO; + goto err_out_free_mdio_irq; + } + + if (ltq_etop_mdio_probe(dev)) { + err = -ENXIO; + goto err_out_unregister_bus; + } + return 0; + +err_out_unregister_bus: + mdiobus_unregister(priv->mii_bus); +err_out_free_mdio_irq: + kfree(priv->mii_bus->irq); +err_out_free_mdiobus: + mdiobus_free(priv->mii_bus); +err_out: + return err; +} + +static void +ltq_etop_mdio_cleanup(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + + phy_disconnect(priv->phydev); + mdiobus_unregister(priv->mii_bus); + kfree(priv->mii_bus->irq); + mdiobus_free(priv->mii_bus); +} + +static int +ltq_etop_open(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + int i; + + for (i = 0; i < MAX_DMA_CHAN; i++) { + struct ltq_etop_chan *ch = &priv->ch[i]; + + if (!IS_TX(i) && (!IS_RX(i))) + continue; + ltq_dma_open(&ch->dma); + napi_enable(&ch->napi); + } + phy_start(priv->phydev); + netif_tx_start_all_queues(dev); + return 0; +} + +static int +ltq_etop_stop(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + int i; + + netif_tx_stop_all_queues(dev); + phy_stop(priv->phydev); + for (i = 0; i < MAX_DMA_CHAN; i++) { + struct ltq_etop_chan *ch = &priv->ch[i]; + + if (!IS_RX(i) && !IS_TX(i)) + continue; + napi_disable(&ch->napi); + ltq_dma_close(&ch->dma); + } + return 0; +} + +static int +ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) +{ + int queue = skb_get_queue_mapping(skb); + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue); + struct ltq_etop_priv *priv = netdev_priv(dev); + struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1]; + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + int len; + unsigned long flags; + u32 byte_offset; + + len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; + + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { + dev_kfree_skb_any(skb); + netdev_err(dev, "tx ring full\n"); + netif_tx_stop_queue(txq); + return NETDEV_TX_BUSY; + } + + /* dma needs to start on a 16 byte aligned address */ + byte_offset = CPHYSADDR(skb->data) % 16; + ch->skb[ch->dma.desc] = skb; + + dev->trans_start = jiffies; + + spin_lock_irqsave(&priv->lock, flags); + desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len, + DMA_TO_DEVICE)) - byte_offset; + wmb(); + desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP | + LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK); + ch->dma.desc++; + ch->dma.desc %= LTQ_DESC_NUM; + spin_unlock_irqrestore(&priv->lock, flags); + + if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN) + netif_tx_stop_queue(txq); + + return NETDEV_TX_OK; +} + +static int +ltq_etop_change_mtu(struct net_device *dev, int new_mtu) +{ + int ret = eth_change_mtu(dev, new_mtu); + + if (!ret) { + struct ltq_etop_priv *priv = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu, + LTQ_ETOP_IGPLEN); + spin_unlock_irqrestore(&priv->lock, flags); + } + return ret; +} + +static int +ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + + /* TODO: mii-toll reports "No MII transceiver present!." ?!*/ + return phy_mii_ioctl(priv->phydev, rq, cmd); +} + +static int +ltq_etop_set_mac_address(struct net_device *dev, void *p) +{ + int ret = eth_mac_addr(dev, p); + + if (!ret) { + struct ltq_etop_priv *priv = netdev_priv(dev); + unsigned long flags; + + /* store the mac for the unicast filter */ + spin_lock_irqsave(&priv->lock, flags); + ltq_etop_w32(*((u32 *)dev->dev_addr), LTQ_ETOP_MAC_DA0); + ltq_etop_w32(*((u16 *)&dev->dev_addr[4]) << 16, + LTQ_ETOP_MAC_DA1); + spin_unlock_irqrestore(&priv->lock, flags); + } + return ret; +} + +static void +ltq_etop_set_multicast_list(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + unsigned long flags; + + /* ensure that the unicast filter is not enabled in promiscious mode */ + spin_lock_irqsave(&priv->lock, flags); + if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) + ltq_etop_w32_mask(ETOP_FTCU, 0, LTQ_ETOP_ENETS0); + else + ltq_etop_w32_mask(0, ETOP_FTCU, LTQ_ETOP_ENETS0); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static u16 +ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + /* we are currently only using the first queue */ + return 0; +} + +static int +ltq_etop_init(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + struct sockaddr mac; + int err; + bool random_mac = false; + + dev->watchdog_timeo = 10 * HZ; + err = ltq_etop_hw_init(dev); + if (err) + goto err_hw; + ltq_etop_change_mtu(dev, 1500); + + memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr)); + if (!is_valid_ether_addr(mac.sa_data)) { + pr_warn("etop: invalid MAC, using random\n"); + eth_random_addr(mac.sa_data); + random_mac = true; + } + + err = ltq_etop_set_mac_address(dev, &mac); + if (err) + goto err_netdev; + + /* Set addr_assign_type here, ltq_etop_set_mac_address would reset it. */ + if (random_mac) + dev->addr_assign_type = NET_ADDR_RANDOM; + + ltq_etop_set_multicast_list(dev); + err = ltq_etop_mdio_init(dev); + if (err) + goto err_netdev; + return 0; + +err_netdev: + unregister_netdev(dev); + free_netdev(dev); +err_hw: + ltq_etop_hw_exit(dev); + return err; +} + +static void +ltq_etop_tx_timeout(struct net_device *dev) +{ + int err; + + ltq_etop_hw_exit(dev); + err = ltq_etop_hw_init(dev); + if (err) + goto err_hw; + dev->trans_start = jiffies; + netif_wake_queue(dev); + return; + +err_hw: + ltq_etop_hw_exit(dev); + netdev_err(dev, "failed to restart etop after TX timeout\n"); +} + +static const struct net_device_ops ltq_eth_netdev_ops = { + .ndo_open = ltq_etop_open, + .ndo_stop = ltq_etop_stop, + .ndo_start_xmit = ltq_etop_tx, + .ndo_change_mtu = ltq_etop_change_mtu, + .ndo_do_ioctl = ltq_etop_ioctl, + .ndo_set_mac_address = ltq_etop_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = ltq_etop_set_multicast_list, + .ndo_select_queue = ltq_etop_select_queue, + .ndo_init = ltq_etop_init, + .ndo_tx_timeout = ltq_etop_tx_timeout, +}; + +static int __init +ltq_etop_probe(struct platform_device *pdev) +{ + struct net_device *dev; + struct ltq_etop_priv *priv; + struct resource *res; + int err; + int i; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "failed to get etop resource\n"); + err = -ENOENT; + goto err_out; + } + + res = devm_request_mem_region(&pdev->dev, res->start, + resource_size(res), dev_name(&pdev->dev)); + if (!res) { + dev_err(&pdev->dev, "failed to request etop resource\n"); + err = -EBUSY; + goto err_out; + } + + ltq_etop_membase = devm_ioremap_nocache(&pdev->dev, + res->start, resource_size(res)); + if (!ltq_etop_membase) { + dev_err(&pdev->dev, "failed to remap etop engine %d\n", + pdev->id); + err = -ENOMEM; + goto err_out; + } + + dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4); + if (!dev) { + err = -ENOMEM; + goto err_out; + } + strcpy(dev->name, "eth%d"); + dev->netdev_ops = <q_eth_netdev_ops; + dev->ethtool_ops = <q_etop_ethtool_ops; + priv = netdev_priv(dev); + priv->res = res; + priv->pdev = pdev; + priv->pldata = dev_get_platdata(&pdev->dev); + priv->netdev = dev; + spin_lock_init(&priv->lock); + + for (i = 0; i < MAX_DMA_CHAN; i++) { + if (IS_TX(i)) + netif_napi_add(dev, &priv->ch[i].napi, + ltq_etop_poll_tx, 8); + else if (IS_RX(i)) + netif_napi_add(dev, &priv->ch[i].napi, + ltq_etop_poll_rx, 32); + priv->ch[i].netdev = dev; + } + + err = register_netdev(dev); + if (err) + goto err_free; + + platform_set_drvdata(pdev, dev); + return 0; + +err_free: + free_netdev(dev); +err_out: + return err; +} + +static int +ltq_etop_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + + if (dev) { + netif_tx_stop_all_queues(dev); + ltq_etop_hw_exit(dev); + ltq_etop_mdio_cleanup(dev); + unregister_netdev(dev); + } + return 0; +} + +static struct platform_driver ltq_mii_driver = { + .remove = ltq_etop_remove, + .driver = { + .name = "ltq_etop", + }, +}; + +int __init +init_ltq_etop(void) +{ + int ret = platform_driver_probe(<q_mii_driver, ltq_etop_probe); + + if (ret) + pr_err("ltq_etop: Error registering platform driver!"); + return ret; +} + +static void __exit +exit_ltq_etop(void) +{ + platform_driver_unregister(<q_mii_driver); +} + +module_init(init_ltq_etop); +module_exit(exit_ltq_etop); + +MODULE_AUTHOR("John Crispin "); +MODULE_DESCRIPTION("Lantiq SoC ETOP"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig new file mode 100644 index 000000000..d323a695d --- /dev/null +++ b/drivers/net/ethernet/marvell/Kconfig @@ -0,0 +1,143 @@ +# +# Marvell device configuration +# + +config NET_VENDOR_MARVELL + bool "Marvell devices" + default y + depends on PCI || CPU_PXA168 || MV64X60 || PPC32 || PLAT_ORION || INET + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Marvell devices. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_MARVELL + +config MV643XX_ETH + tristate "Marvell Discovery (643XX) and Orion ethernet support" + depends on (MV64X60 || PPC32 || PLAT_ORION) && INET + select PHYLIB + select MVMDIO + ---help--- + This driver supports the gigabit ethernet MACs in the + Marvell Discovery PPC/MIPS chipset family (MV643XX) and + in the Marvell Orion ARM SoC family. + + Some boards that use the Discovery chipset are the Momenco + Ocelot C and Jaguar ATX and Pegasos II. + +config MVMDIO + tristate "Marvell MDIO interface support" + depends on HAS_IOMEM + select PHYLIB + ---help--- + This driver supports the MDIO interface found in the network + interface units of the Marvell EBU SoCs (Kirkwood, Orion5x, + Dove, Armada 370 and Armada XP). + + This driver is used by the MV643XX_ETH and MVNETA drivers. + +config MVNETA + tristate "Marvell Armada 370/38x/XP network interface support" + depends on PLAT_ORION + select MVMDIO + ---help--- + This driver supports the network interface units in the + Marvell ARMADA XP, ARMADA 370 and ARMADA 38x SoC family. + + Note that this driver is distinct from the mv643xx_eth + driver, which should be used for the older Marvell SoCs + (Dove, Orion, Discovery, Kirkwood). + +config MVPP2 + tristate "Marvell Armada 375 network interface support" + depends on MACH_ARMADA_375 + select MVMDIO + ---help--- + This driver supports the network interface units in the + Marvell ARMADA 375 SoC. + +config PXA168_ETH + tristate "Marvell pxa168 ethernet support" + depends on HAS_IOMEM && HAS_DMA + depends on CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST + select PHYLIB + ---help--- + This driver supports the pxa168 Ethernet ports. + + To compile this driver as a module, choose M here. The module + will be called pxa168_eth. + +config SKGE + tristate "Marvell Yukon Gigabit Ethernet support" + depends on PCI + select CRC32 + ---help--- + This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx + and related Gigabit Ethernet adapters. It is a new smaller driver + with better performance and more complete ethtool support. + + It does not support the link failover and network management + features that "portable" vendor supplied sk98lin driver does. + + This driver supports adapters based on the original Yukon chipset: + Marvell 88E8001, Belkin F5D5005, CNet GigaCard, DLink DGE-530T, + Linksys EG1032/EG1064, 3Com 3C940/3C940B, SysKonnect SK-9871/9872. + + It does not support the newer Yukon2 chipset: a separate driver, + sky2, is provided for these adapters. + + To compile this driver as a module, choose M here: the module + will be called skge. This is recommended. + +config SKGE_DEBUG + bool "Debugging interface" + depends on SKGE && DEBUG_FS + ---help--- + This option adds the ability to dump driver state for debugging. + The file /sys/kernel/debug/skge/ethX displays the state of the internal + transmit and receive rings. + + If unsure, say N. + +config SKGE_GENESIS + bool "Support for older SysKonnect Genesis boards" + depends on SKGE + ---help--- + This enables support for the older and uncommon SysKonnect Genesis + chips, which support MII via an external transceiver, instead of + an internal one. Disabling this option will save some memory + by making code smaller. If unsure say Y. + +config SKY2 + tristate "Marvell Yukon 2 support" + depends on PCI + select CRC32 + ---help--- + This driver supports Gigabit Ethernet adapters based on the + Marvell Yukon 2 chipset: + Marvell 88E8021/88E8022/88E8035/88E8036/88E8038/88E8050/88E8052/ + 88E8053/88E8055/88E8061/88E8062, SysKonnect SK-9E21D/SK-9S21 + + There is companion driver for the older Marvell Yukon and + SysKonnect Genesis based adapters: skge. + + To compile this driver as a module, choose M here: the module + will be called sky2. This is recommended. + +config SKY2_DEBUG + bool "Debugging interface" + depends on SKY2 && DEBUG_FS + ---help--- + This option adds the ability to dump driver state for debugging. + The file /sys/kernel/debug/sky2/ethX displays the state of the internal + transmit and receive rings. + + If unsure, say N. + +endif # NET_VENDOR_MARVELL diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile new file mode 100644 index 000000000..f6425bd28 --- /dev/null +++ b/drivers/net/ethernet/marvell/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the Marvell device drivers. +# + +obj-$(CONFIG_MVMDIO) += mvmdio.o +obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o +obj-$(CONFIG_MVNETA) += mvneta.o +obj-$(CONFIG_MVPP2) += mvpp2.o +obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o +obj-$(CONFIG_SKGE) += skge.o +obj-$(CONFIG_SKY2) += sky2.o diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c new file mode 100644 index 000000000..1c75829eb --- /dev/null +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -0,0 +1,3259 @@ +/* + * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports + * Copyright (C) 2002 Matthew Dharm + * + * Based on the 64360 driver from: + * Copyright (C) 2002 Rabeeh Khoury + * Rabeeh Khoury + * + * Copyright (C) 2003 PMC-Sierra, Inc., + * written by Manish Lachwani + * + * Copyright (C) 2003 Ralf Baechle + * + * Copyright (C) 2004-2006 MontaVista Software, Inc. + * Dale Farnsworth + * + * Copyright (C) 2004 Steven J. Hill + * + * + * Copyright (C) 2007-2008 Marvell Semiconductor + * Lennert Buytenhek + * + * Copyright (C) 2013 Michael Stapelberg + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static char mv643xx_eth_driver_name[] = "mv643xx_eth"; +static char mv643xx_eth_driver_version[] = "1.4"; + + +/* + * Registers shared between all ports. + */ +#define PHY_ADDR 0x0000 +#define WINDOW_BASE(w) (0x0200 + ((w) << 3)) +#define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) +#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) +#define WINDOW_BAR_ENABLE 0x0290 +#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4)) + +/* + * Main per-port registers. These live at offset 0x0400 for + * port #0, 0x0800 for port #1, and 0x0c00 for port #2. + */ +#define PORT_CONFIG 0x0000 +#define UNICAST_PROMISCUOUS_MODE 0x00000001 +#define PORT_CONFIG_EXT 0x0004 +#define MAC_ADDR_LOW 0x0014 +#define MAC_ADDR_HIGH 0x0018 +#define SDMA_CONFIG 0x001c +#define TX_BURST_SIZE_16_64BIT 0x01000000 +#define TX_BURST_SIZE_4_64BIT 0x00800000 +#define BLM_TX_NO_SWAP 0x00000020 +#define BLM_RX_NO_SWAP 0x00000010 +#define RX_BURST_SIZE_16_64BIT 0x00000008 +#define RX_BURST_SIZE_4_64BIT 0x00000004 +#define PORT_SERIAL_CONTROL 0x003c +#define SET_MII_SPEED_TO_100 0x01000000 +#define SET_GMII_SPEED_TO_1000 0x00800000 +#define SET_FULL_DUPLEX_MODE 0x00200000 +#define MAX_RX_PACKET_9700BYTE 0x000a0000 +#define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000 +#define DO_NOT_FORCE_LINK_FAIL 0x00000400 +#define SERIAL_PORT_CONTROL_RESERVED 0x00000200 +#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008 +#define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004 +#define FORCE_LINK_PASS 0x00000002 +#define SERIAL_PORT_ENABLE 0x00000001 +#define PORT_STATUS 0x0044 +#define TX_FIFO_EMPTY 0x00000400 +#define TX_IN_PROGRESS 0x00000080 +#define PORT_SPEED_MASK 0x00000030 +#define PORT_SPEED_1000 0x00000010 +#define PORT_SPEED_100 0x00000020 +#define PORT_SPEED_10 0x00000000 +#define FLOW_CONTROL_ENABLED 0x00000008 +#define FULL_DUPLEX 0x00000004 +#define LINK_UP 0x00000002 +#define TXQ_COMMAND 0x0048 +#define TXQ_FIX_PRIO_CONF 0x004c +#define PORT_SERIAL_CONTROL1 0x004c +#define CLK125_BYPASS_EN 0x00000010 +#define TX_BW_RATE 0x0050 +#define TX_BW_MTU 0x0058 +#define TX_BW_BURST 0x005c +#define INT_CAUSE 0x0060 +#define INT_TX_END 0x07f80000 +#define INT_TX_END_0 0x00080000 +#define INT_RX 0x000003fc +#define INT_RX_0 0x00000004 +#define INT_EXT 0x00000002 +#define INT_CAUSE_EXT 0x0064 +#define INT_EXT_LINK_PHY 0x00110000 +#define INT_EXT_TX 0x000000ff +#define INT_MASK 0x0068 +#define INT_MASK_EXT 0x006c +#define TX_FIFO_URGENT_THRESHOLD 0x0074 +#define RX_DISCARD_FRAME_CNT 0x0084 +#define RX_OVERRUN_FRAME_CNT 0x0088 +#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc +#define TX_BW_RATE_MOVED 0x00e0 +#define TX_BW_MTU_MOVED 0x00e8 +#define TX_BW_BURST_MOVED 0x00ec +#define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4)) +#define RXQ_COMMAND 0x0280 +#define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2)) +#define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4)) +#define TXQ_BW_CONF(q) (0x0304 + ((q) << 4)) +#define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4)) + +/* + * Misc per-port registers. + */ +#define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) +#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) +#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) +#define UNICAST_TABLE(p) (0x1600 + ((p) << 10)) + + +/* + * SDMA configuration register default value. + */ +#if defined(__BIG_ENDIAN) +#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ + (RX_BURST_SIZE_4_64BIT | \ + TX_BURST_SIZE_4_64BIT) +#elif defined(__LITTLE_ENDIAN) +#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ + (RX_BURST_SIZE_4_64BIT | \ + BLM_RX_NO_SWAP | \ + BLM_TX_NO_SWAP | \ + TX_BURST_SIZE_4_64BIT) +#else +#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined +#endif + + +/* + * Misc definitions. + */ +#define DEFAULT_RX_QUEUE_SIZE 128 +#define DEFAULT_TX_QUEUE_SIZE 512 +#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) + +#define TSO_HEADER_SIZE 128 + +/* Max number of allowed TCP segments for software TSO */ +#define MV643XX_MAX_TSO_SEGS 100 +#define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) + +#define IS_TSO_HEADER(txq, addr) \ + ((addr >= txq->tso_hdrs_dma) && \ + (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) + +#define DESC_DMA_MAP_SINGLE 0 +#define DESC_DMA_MAP_PAGE 1 + +/* + * RX/TX descriptors. + */ +#if defined(__BIG_ENDIAN) +struct rx_desc { + u16 byte_cnt; /* Descriptor buffer byte count */ + u16 buf_size; /* Buffer size */ + u32 cmd_sts; /* Descriptor command status */ + u32 next_desc_ptr; /* Next descriptor pointer */ + u32 buf_ptr; /* Descriptor buffer pointer */ +}; + +struct tx_desc { + u16 byte_cnt; /* buffer byte count */ + u16 l4i_chk; /* CPU provided TCP checksum */ + u32 cmd_sts; /* Command/status field */ + u32 next_desc_ptr; /* Pointer to next descriptor */ + u32 buf_ptr; /* pointer to buffer for this descriptor*/ +}; +#elif defined(__LITTLE_ENDIAN) +struct rx_desc { + u32 cmd_sts; /* Descriptor command status */ + u16 buf_size; /* Buffer size */ + u16 byte_cnt; /* Descriptor buffer byte count */ + u32 buf_ptr; /* Descriptor buffer pointer */ + u32 next_desc_ptr; /* Next descriptor pointer */ +}; + +struct tx_desc { + u32 cmd_sts; /* Command/status field */ + u16 l4i_chk; /* CPU provided TCP checksum */ + u16 byte_cnt; /* buffer byte count */ + u32 buf_ptr; /* pointer to buffer for this descriptor*/ + u32 next_desc_ptr; /* Pointer to next descriptor */ +}; +#else +#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined +#endif + +/* RX & TX descriptor command */ +#define BUFFER_OWNED_BY_DMA 0x80000000 + +/* RX & TX descriptor status */ +#define ERROR_SUMMARY 0x00000001 + +/* RX descriptor status */ +#define LAYER_4_CHECKSUM_OK 0x40000000 +#define RX_ENABLE_INTERRUPT 0x20000000 +#define RX_FIRST_DESC 0x08000000 +#define RX_LAST_DESC 0x04000000 +#define RX_IP_HDR_OK 0x02000000 +#define RX_PKT_IS_IPV4 0x01000000 +#define RX_PKT_IS_ETHERNETV2 0x00800000 +#define RX_PKT_LAYER4_TYPE_MASK 0x00600000 +#define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000 +#define RX_PKT_IS_VLAN_TAGGED 0x00080000 + +/* TX descriptor command */ +#define TX_ENABLE_INTERRUPT 0x00800000 +#define GEN_CRC 0x00400000 +#define TX_FIRST_DESC 0x00200000 +#define TX_LAST_DESC 0x00100000 +#define ZERO_PADDING 0x00080000 +#define GEN_IP_V4_CHECKSUM 0x00040000 +#define GEN_TCP_UDP_CHECKSUM 0x00020000 +#define UDP_FRAME 0x00010000 +#define MAC_HDR_EXTRA_4_BYTES 0x00008000 +#define GEN_TCP_UDP_CHK_FULL 0x00000400 +#define MAC_HDR_EXTRA_8_BYTES 0x00000200 + +#define TX_IHL_SHIFT 11 + + +/* global *******************************************************************/ +struct mv643xx_eth_shared_private { + /* + * Ethernet controller base address. + */ + void __iomem *base; + + /* + * Per-port MBUS window access register value. + */ + u32 win_protect; + + /* + * Hardware-specific parameters. + */ + int extended_rx_coal_limit; + int tx_bw_control; + int tx_csum_limit; + struct clk *clk; +}; + +#define TX_BW_CONTROL_ABSENT 0 +#define TX_BW_CONTROL_OLD_LAYOUT 1 +#define TX_BW_CONTROL_NEW_LAYOUT 2 + +static int mv643xx_eth_open(struct net_device *dev); +static int mv643xx_eth_stop(struct net_device *dev); + + +/* per-port *****************************************************************/ +struct mib_counters { + u64 good_octets_received; + u32 bad_octets_received; + u32 internal_mac_transmit_err; + u32 good_frames_received; + u32 bad_frames_received; + u32 broadcast_frames_received; + u32 multicast_frames_received; + u32 frames_64_octets; + u32 frames_65_to_127_octets; + u32 frames_128_to_255_octets; + u32 frames_256_to_511_octets; + u32 frames_512_to_1023_octets; + u32 frames_1024_to_max_octets; + u64 good_octets_sent; + u32 good_frames_sent; + u32 excessive_collision; + u32 multicast_frames_sent; + u32 broadcast_frames_sent; + u32 unrec_mac_control_received; + u32 fc_sent; + u32 good_fc_received; + u32 bad_fc_received; + u32 undersize_received; + u32 fragments_received; + u32 oversize_received; + u32 jabber_received; + u32 mac_receive_error; + u32 bad_crc_event; + u32 collision; + u32 late_collision; + /* Non MIB hardware counters */ + u32 rx_discard; + u32 rx_overrun; +}; + +struct rx_queue { + int index; + + int rx_ring_size; + + int rx_desc_count; + int rx_curr_desc; + int rx_used_desc; + + struct rx_desc *rx_desc_area; + dma_addr_t rx_desc_dma; + int rx_desc_area_size; + struct sk_buff **rx_skb; +}; + +struct tx_queue { + int index; + + int tx_ring_size; + + int tx_desc_count; + int tx_curr_desc; + int tx_used_desc; + + int tx_stop_threshold; + int tx_wake_threshold; + + char *tso_hdrs; + dma_addr_t tso_hdrs_dma; + + struct tx_desc *tx_desc_area; + char *tx_desc_mapping; /* array to track the type of the dma mapping */ + dma_addr_t tx_desc_dma; + int tx_desc_area_size; + + struct sk_buff_head tx_skb; + + unsigned long tx_packets; + unsigned long tx_bytes; + unsigned long tx_dropped; +}; + +struct mv643xx_eth_private { + struct mv643xx_eth_shared_private *shared; + void __iomem *base; + int port_num; + + struct net_device *dev; + + struct phy_device *phy; + + struct timer_list mib_counters_timer; + spinlock_t mib_counters_lock; + struct mib_counters mib_counters; + + struct work_struct tx_timeout_task; + + struct napi_struct napi; + u32 int_mask; + u8 oom; + u8 work_link; + u8 work_tx; + u8 work_tx_end; + u8 work_rx; + u8 work_rx_refill; + + int skb_size; + + /* + * RX state. + */ + int rx_ring_size; + unsigned long rx_desc_sram_addr; + int rx_desc_sram_size; + int rxq_count; + struct timer_list rx_oom; + struct rx_queue rxq[8]; + + /* + * TX state. + */ + int tx_ring_size; + unsigned long tx_desc_sram_addr; + int tx_desc_sram_size; + int txq_count; + struct tx_queue txq[8]; + + /* + * Hardware-specific parameters. + */ + struct clk *clk; + unsigned int t_clk; +}; + + +/* port register accessors **************************************************/ +static inline u32 rdl(struct mv643xx_eth_private *mp, int offset) +{ + return readl(mp->shared->base + offset); +} + +static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset) +{ + return readl(mp->base + offset); +} + +static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) +{ + writel(data, mp->shared->base + offset); +} + +static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data) +{ + writel(data, mp->base + offset); +} + + +/* rxq/txq helper functions *************************************************/ +static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) +{ + return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); +} + +static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) +{ + return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); +} + +static void rxq_enable(struct rx_queue *rxq) +{ + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); + wrlp(mp, RXQ_COMMAND, 1 << rxq->index); +} + +static void rxq_disable(struct rx_queue *rxq) +{ + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); + u8 mask = 1 << rxq->index; + + wrlp(mp, RXQ_COMMAND, mask << 8); + while (rdlp(mp, RXQ_COMMAND) & mask) + udelay(10); +} + +static void txq_reset_hw_ptr(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + u32 addr; + + addr = (u32)txq->tx_desc_dma; + addr += txq->tx_curr_desc * sizeof(struct tx_desc); + wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr); +} + +static void txq_enable(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + wrlp(mp, TXQ_COMMAND, 1 << txq->index); +} + +static void txq_disable(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + u8 mask = 1 << txq->index; + + wrlp(mp, TXQ_COMMAND, mask << 8); + while (rdlp(mp, TXQ_COMMAND) & mask) + udelay(10); +} + +static void txq_maybe_wake(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); + + if (netif_tx_queue_stopped(nq)) { + __netif_tx_lock(nq, smp_processor_id()); + if (txq->tx_desc_count <= txq->tx_wake_threshold) + netif_tx_wake_queue(nq); + __netif_tx_unlock(nq); + } +} + +static int rxq_process(struct rx_queue *rxq, int budget) +{ + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); + struct net_device_stats *stats = &mp->dev->stats; + int rx; + + rx = 0; + while (rx < budget && rxq->rx_desc_count) { + struct rx_desc *rx_desc; + unsigned int cmd_sts; + struct sk_buff *skb; + u16 byte_cnt; + + rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; + + cmd_sts = rx_desc->cmd_sts; + if (cmd_sts & BUFFER_OWNED_BY_DMA) + break; + rmb(); + + skb = rxq->rx_skb[rxq->rx_curr_desc]; + rxq->rx_skb[rxq->rx_curr_desc] = NULL; + + rxq->rx_curr_desc++; + if (rxq->rx_curr_desc == rxq->rx_ring_size) + rxq->rx_curr_desc = 0; + + dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, + rx_desc->buf_size, DMA_FROM_DEVICE); + rxq->rx_desc_count--; + rx++; + + mp->work_rx_refill |= 1 << rxq->index; + + byte_cnt = rx_desc->byte_cnt; + + /* + * Update statistics. + * + * Note that the descriptor byte count includes 2 dummy + * bytes automatically inserted by the hardware at the + * start of the packet (which we don't count), and a 4 + * byte CRC at the end of the packet (which we do count). + */ + stats->rx_packets++; + stats->rx_bytes += byte_cnt - 2; + + /* + * In case we received a packet without first / last bits + * on, or the error summary bit is set, the packet needs + * to be dropped. + */ + if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY)) + != (RX_FIRST_DESC | RX_LAST_DESC)) + goto err; + + /* + * The -4 is for the CRC in the trailer of the + * received packet + */ + skb_put(skb, byte_cnt - 2 - 4); + + if (cmd_sts & LAYER_4_CHECKSUM_OK) + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->protocol = eth_type_trans(skb, mp->dev); + + napi_gro_receive(&mp->napi, skb); + + continue; + +err: + stats->rx_dropped++; + + if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != + (RX_FIRST_DESC | RX_LAST_DESC)) { + if (net_ratelimit()) + netdev_err(mp->dev, + "received packet spanning multiple descriptors\n"); + } + + if (cmd_sts & ERROR_SUMMARY) + stats->rx_errors++; + + dev_kfree_skb(skb); + } + + if (rx < budget) + mp->work_rx &= ~(1 << rxq->index); + + return rx; +} + +static int rxq_refill(struct rx_queue *rxq, int budget) +{ + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); + int refilled; + + refilled = 0; + while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { + struct sk_buff *skb; + int rx; + struct rx_desc *rx_desc; + int size; + + skb = netdev_alloc_skb(mp->dev, mp->skb_size); + + if (skb == NULL) { + mp->oom = 1; + goto oom; + } + + if (SKB_DMA_REALIGN) + skb_reserve(skb, SKB_DMA_REALIGN); + + refilled++; + rxq->rx_desc_count++; + + rx = rxq->rx_used_desc++; + if (rxq->rx_used_desc == rxq->rx_ring_size) + rxq->rx_used_desc = 0; + + rx_desc = rxq->rx_desc_area + rx; + + size = skb_end_pointer(skb) - skb->data; + rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, + skb->data, size, + DMA_FROM_DEVICE); + rx_desc->buf_size = size; + rxq->rx_skb[rx] = skb; + wmb(); + rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; + wmb(); + + /* + * The hardware automatically prepends 2 bytes of + * dummy data to each received packet, so that the + * IP header ends up 16-byte aligned. + */ + skb_reserve(skb, 2); + } + + if (refilled < budget) + mp->work_rx_refill &= ~(1 << rxq->index); + +oom: + return refilled; +} + + +/* tx ***********************************************************************/ +static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) +{ + int frag; + + for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { + const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; + + if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7) + return 1; + } + + return 0; +} + +static inline __be16 sum16_as_be(__sum16 sum) +{ + return (__force __be16)sum; +} + +static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb, + u16 *l4i_chk, u32 *command, int length) +{ + int ret; + u32 cmd = 0; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + int hdr_len; + int tag_bytes; + + BUG_ON(skb->protocol != htons(ETH_P_IP) && + skb->protocol != htons(ETH_P_8021Q)); + + hdr_len = (void *)ip_hdr(skb) - (void *)skb->data; + tag_bytes = hdr_len - ETH_HLEN; + + if (length - hdr_len > mp->shared->tx_csum_limit || + unlikely(tag_bytes & ~12)) { + ret = skb_checksum_help(skb); + if (!ret) + goto no_csum; + return ret; + } + + if (tag_bytes & 4) + cmd |= MAC_HDR_EXTRA_4_BYTES; + if (tag_bytes & 8) + cmd |= MAC_HDR_EXTRA_8_BYTES; + + cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL | + GEN_IP_V4_CHECKSUM | + ip_hdr(skb)->ihl << TX_IHL_SHIFT; + + /* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL + * it seems we don't need to pass the initial checksum. */ + switch (ip_hdr(skb)->protocol) { + case IPPROTO_UDP: + cmd |= UDP_FRAME; + *l4i_chk = 0; + break; + case IPPROTO_TCP: + *l4i_chk = 0; + break; + default: + WARN(1, "protocol not supported"); + } + } else { +no_csum: + /* Errata BTS #50, IHL must be 5 if no HW checksum */ + cmd |= 5 << TX_IHL_SHIFT; + } + *command = cmd; + return 0; +} + +static inline int +txq_put_data_tso(struct net_device *dev, struct tx_queue *txq, + struct sk_buff *skb, char *data, int length, + bool last_tcp, bool is_last) +{ + int tx_index; + u32 cmd_sts; + struct tx_desc *desc; + + tx_index = txq->tx_curr_desc++; + if (txq->tx_curr_desc == txq->tx_ring_size) + txq->tx_curr_desc = 0; + desc = &txq->tx_desc_area[tx_index]; + txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; + + desc->l4i_chk = 0; + desc->byte_cnt = length; + desc->buf_ptr = dma_map_single(dev->dev.parent, data, + length, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) { + WARN(1, "dma_map_single failed!\n"); + return -ENOMEM; + } + + cmd_sts = BUFFER_OWNED_BY_DMA; + if (last_tcp) { + /* last descriptor in the TCP packet */ + cmd_sts |= ZERO_PADDING | TX_LAST_DESC; + /* last descriptor in SKB */ + if (is_last) + cmd_sts |= TX_ENABLE_INTERRUPT; + } + desc->cmd_sts = cmd_sts; + return 0; +} + +static inline void +txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int tx_index; + struct tx_desc *desc; + int ret; + u32 cmd_csum = 0; + u16 l4i_chk = 0; + + tx_index = txq->tx_curr_desc; + desc = &txq->tx_desc_area[tx_index]; + + ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length); + if (ret) + WARN(1, "failed to prepare checksum!"); + + /* Should we set this? Can't use the value from skb_tx_csum() + * as it's not the correct initial L4 checksum to use. */ + desc->l4i_chk = 0; + + desc->byte_cnt = hdr_len; + desc->buf_ptr = txq->tso_hdrs_dma + + txq->tx_curr_desc * TSO_HEADER_SIZE; + desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC | + GEN_CRC; + + txq->tx_curr_desc++; + if (txq->tx_curr_desc == txq->tx_ring_size) + txq->tx_curr_desc = 0; +} + +static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, + struct net_device *dev) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + int total_len, data_left, ret; + int desc_count = 0; + struct tso_t tso; + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + + /* Count needed descriptors */ + if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) { + netdev_dbg(dev, "not enough descriptors for TSO!\n"); + return -EBUSY; + } + + /* Initialize the TSO handler, and prepare the first payload */ + tso_start(skb, &tso); + + total_len = skb->len - hdr_len; + while (total_len > 0) { + char *hdr; + + data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); + total_len -= data_left; + desc_count++; + + /* prepare packet headers: MAC + IP + TCP */ + hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE; + tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); + txq_put_hdr_tso(skb, txq, data_left); + + while (data_left > 0) { + int size; + desc_count++; + + size = min_t(int, tso.size, data_left); + ret = txq_put_data_tso(dev, txq, skb, tso.data, size, + size == data_left, + total_len == 0); + if (ret) + goto err_release; + data_left -= size; + tso_build_data(skb, &tso, size); + } + } + + __skb_queue_tail(&txq->tx_skb, skb); + skb_tx_timestamp(skb); + + /* clear TX_END status */ + mp->work_tx_end &= ~(1 << txq->index); + + /* ensure all descriptors are written before poking hardware */ + wmb(); + txq_enable(txq); + txq->tx_desc_count += desc_count; + return 0; +err_release: + /* TODO: Release all used data descriptors; header descriptors must not + * be DMA-unmapped. + */ + return ret; +} + +static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + int nr_frags = skb_shinfo(skb)->nr_frags; + int frag; + + for (frag = 0; frag < nr_frags; frag++) { + skb_frag_t *this_frag; + int tx_index; + struct tx_desc *desc; + + this_frag = &skb_shinfo(skb)->frags[frag]; + tx_index = txq->tx_curr_desc++; + if (txq->tx_curr_desc == txq->tx_ring_size) + txq->tx_curr_desc = 0; + desc = &txq->tx_desc_area[tx_index]; + txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE; + + /* + * The last fragment will generate an interrupt + * which will free the skb on TX completion. + */ + if (frag == nr_frags - 1) { + desc->cmd_sts = BUFFER_OWNED_BY_DMA | + ZERO_PADDING | TX_LAST_DESC | + TX_ENABLE_INTERRUPT; + } else { + desc->cmd_sts = BUFFER_OWNED_BY_DMA; + } + + desc->l4i_chk = 0; + desc->byte_cnt = skb_frag_size(this_frag); + desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, + this_frag, 0, desc->byte_cnt, + DMA_TO_DEVICE); + } +} + +static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb, + struct net_device *dev) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + int nr_frags = skb_shinfo(skb)->nr_frags; + int tx_index; + struct tx_desc *desc; + u32 cmd_sts; + u16 l4i_chk; + int length, ret; + + cmd_sts = 0; + l4i_chk = 0; + + if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { + if (net_ratelimit()) + netdev_err(dev, "tx queue full?!\n"); + return -EBUSY; + } + + ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len); + if (ret) + return ret; + cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; + + tx_index = txq->tx_curr_desc++; + if (txq->tx_curr_desc == txq->tx_ring_size) + txq->tx_curr_desc = 0; + desc = &txq->tx_desc_area[tx_index]; + txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; + + if (nr_frags) { + txq_submit_frag_skb(txq, skb); + length = skb_headlen(skb); + } else { + cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; + length = skb->len; + } + + desc->l4i_chk = l4i_chk; + desc->byte_cnt = length; + desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, + length, DMA_TO_DEVICE); + + __skb_queue_tail(&txq->tx_skb, skb); + + skb_tx_timestamp(skb); + + /* ensure all other descriptors are written before first cmd_sts */ + wmb(); + desc->cmd_sts = cmd_sts; + + /* clear TX_END status */ + mp->work_tx_end &= ~(1 << txq->index); + + /* ensure all descriptors are written before poking hardware */ + wmb(); + txq_enable(txq); + + txq->tx_desc_count += nr_frags + 1; + + return 0; +} + +static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + int length, queue, ret; + struct tx_queue *txq; + struct netdev_queue *nq; + + queue = skb_get_queue_mapping(skb); + txq = mp->txq + queue; + nq = netdev_get_tx_queue(dev, queue); + + if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { + netdev_printk(KERN_DEBUG, dev, + "failed to linearize skb with tiny unaligned fragment\n"); + return NETDEV_TX_BUSY; + } + + length = skb->len; + + if (skb_is_gso(skb)) + ret = txq_submit_tso(txq, skb, dev); + else + ret = txq_submit_skb(txq, skb, dev); + if (!ret) { + txq->tx_bytes += length; + txq->tx_packets++; + + if (txq->tx_desc_count >= txq->tx_stop_threshold) + netif_tx_stop_queue(nq); + } else { + txq->tx_dropped++; + dev_kfree_skb_any(skb); + } + + return NETDEV_TX_OK; +} + + +/* tx napi ******************************************************************/ +static void txq_kick(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); + u32 hw_desc_ptr; + u32 expected_ptr; + + __netif_tx_lock(nq, smp_processor_id()); + + if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) + goto out; + + hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); + expected_ptr = (u32)txq->tx_desc_dma + + txq->tx_curr_desc * sizeof(struct tx_desc); + + if (hw_desc_ptr != expected_ptr) + txq_enable(txq); + +out: + __netif_tx_unlock(nq); + + mp->work_tx_end &= ~(1 << txq->index); +} + +static int txq_reclaim(struct tx_queue *txq, int budget, int force) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); + int reclaimed; + + __netif_tx_lock_bh(nq); + + reclaimed = 0; + while (reclaimed < budget && txq->tx_desc_count > 0) { + int tx_index; + struct tx_desc *desc; + u32 cmd_sts; + char desc_dma_map; + + tx_index = txq->tx_used_desc; + desc = &txq->tx_desc_area[tx_index]; + desc_dma_map = txq->tx_desc_mapping[tx_index]; + + cmd_sts = desc->cmd_sts; + + if (cmd_sts & BUFFER_OWNED_BY_DMA) { + if (!force) + break; + desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; + } + + txq->tx_used_desc = tx_index + 1; + if (txq->tx_used_desc == txq->tx_ring_size) + txq->tx_used_desc = 0; + + reclaimed++; + txq->tx_desc_count--; + + if (!IS_TSO_HEADER(txq, desc->buf_ptr)) { + + if (desc_dma_map == DESC_DMA_MAP_PAGE) + dma_unmap_page(mp->dev->dev.parent, + desc->buf_ptr, + desc->byte_cnt, + DMA_TO_DEVICE); + else + dma_unmap_single(mp->dev->dev.parent, + desc->buf_ptr, + desc->byte_cnt, + DMA_TO_DEVICE); + } + + if (cmd_sts & TX_ENABLE_INTERRUPT) { + struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); + + if (!WARN_ON(!skb)) + dev_kfree_skb(skb); + } + + if (cmd_sts & ERROR_SUMMARY) { + netdev_info(mp->dev, "tx error\n"); + mp->dev->stats.tx_errors++; + } + + } + + __netif_tx_unlock_bh(nq); + + if (reclaimed < budget) + mp->work_tx &= ~(1 << txq->index); + + return reclaimed; +} + + +/* tx rate control **********************************************************/ +/* + * Set total maximum TX rate (shared by all TX queues for this port) + * to 'rate' bits per second, with a maximum burst of 'burst' bytes. + */ +static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst) +{ + int token_rate; + int mtu; + int bucket_size; + + token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000); + if (token_rate > 1023) + token_rate = 1023; + + mtu = (mp->dev->mtu + 255) >> 8; + if (mtu > 63) + mtu = 63; + + bucket_size = (burst + 255) >> 8; + if (bucket_size > 65535) + bucket_size = 65535; + + switch (mp->shared->tx_bw_control) { + case TX_BW_CONTROL_OLD_LAYOUT: + wrlp(mp, TX_BW_RATE, token_rate); + wrlp(mp, TX_BW_MTU, mtu); + wrlp(mp, TX_BW_BURST, bucket_size); + break; + case TX_BW_CONTROL_NEW_LAYOUT: + wrlp(mp, TX_BW_RATE_MOVED, token_rate); + wrlp(mp, TX_BW_MTU_MOVED, mtu); + wrlp(mp, TX_BW_BURST_MOVED, bucket_size); + break; + } +} + +static void txq_set_rate(struct tx_queue *txq, int rate, int burst) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + int token_rate; + int bucket_size; + + token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000); + if (token_rate > 1023) + token_rate = 1023; + + bucket_size = (burst + 255) >> 8; + if (bucket_size > 65535) + bucket_size = 65535; + + wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14); + wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate); +} + +static void txq_set_fixed_prio_mode(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + int off; + u32 val; + + /* + * Turn on fixed priority mode. + */ + off = 0; + switch (mp->shared->tx_bw_control) { + case TX_BW_CONTROL_OLD_LAYOUT: + off = TXQ_FIX_PRIO_CONF; + break; + case TX_BW_CONTROL_NEW_LAYOUT: + off = TXQ_FIX_PRIO_CONF_MOVED; + break; + } + + if (off) { + val = rdlp(mp, off); + val |= 1 << txq->index; + wrlp(mp, off, val); + } +} + + +/* mii management interface *************************************************/ +static void mv643xx_eth_adjust_link(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL); + u32 autoneg_disable = FORCE_LINK_PASS | + DISABLE_AUTO_NEG_SPEED_GMII | + DISABLE_AUTO_NEG_FOR_FLOW_CTRL | + DISABLE_AUTO_NEG_FOR_DUPLEX; + + if (mp->phy->autoneg == AUTONEG_ENABLE) { + /* enable auto negotiation */ + pscr &= ~autoneg_disable; + goto out_write; + } + + pscr |= autoneg_disable; + + if (mp->phy->speed == SPEED_1000) { + /* force gigabit, half duplex not supported */ + pscr |= SET_GMII_SPEED_TO_1000; + pscr |= SET_FULL_DUPLEX_MODE; + goto out_write; + } + + pscr &= ~SET_GMII_SPEED_TO_1000; + + if (mp->phy->speed == SPEED_100) + pscr |= SET_MII_SPEED_TO_100; + else + pscr &= ~SET_MII_SPEED_TO_100; + + if (mp->phy->duplex == DUPLEX_FULL) + pscr |= SET_FULL_DUPLEX_MODE; + else + pscr &= ~SET_FULL_DUPLEX_MODE; + +out_write: + wrlp(mp, PORT_SERIAL_CONTROL, pscr); +} + +/* statistics ***************************************************************/ +static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + unsigned long tx_packets = 0; + unsigned long tx_bytes = 0; + unsigned long tx_dropped = 0; + int i; + + for (i = 0; i < mp->txq_count; i++) { + struct tx_queue *txq = mp->txq + i; + + tx_packets += txq->tx_packets; + tx_bytes += txq->tx_bytes; + tx_dropped += txq->tx_dropped; + } + + stats->tx_packets = tx_packets; + stats->tx_bytes = tx_bytes; + stats->tx_dropped = tx_dropped; + + return stats; +} + +static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) +{ + return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); +} + +static void mib_counters_clear(struct mv643xx_eth_private *mp) +{ + int i; + + for (i = 0; i < 0x80; i += 4) + mib_read(mp, i); + + /* Clear non MIB hw counters also */ + rdlp(mp, RX_DISCARD_FRAME_CNT); + rdlp(mp, RX_OVERRUN_FRAME_CNT); +} + +static void mib_counters_update(struct mv643xx_eth_private *mp) +{ + struct mib_counters *p = &mp->mib_counters; + + spin_lock_bh(&mp->mib_counters_lock); + p->good_octets_received += mib_read(mp, 0x00); + p->bad_octets_received += mib_read(mp, 0x08); + p->internal_mac_transmit_err += mib_read(mp, 0x0c); + p->good_frames_received += mib_read(mp, 0x10); + p->bad_frames_received += mib_read(mp, 0x14); + p->broadcast_frames_received += mib_read(mp, 0x18); + p->multicast_frames_received += mib_read(mp, 0x1c); + p->frames_64_octets += mib_read(mp, 0x20); + p->frames_65_to_127_octets += mib_read(mp, 0x24); + p->frames_128_to_255_octets += mib_read(mp, 0x28); + p->frames_256_to_511_octets += mib_read(mp, 0x2c); + p->frames_512_to_1023_octets += mib_read(mp, 0x30); + p->frames_1024_to_max_octets += mib_read(mp, 0x34); + p->good_octets_sent += mib_read(mp, 0x38); + p->good_frames_sent += mib_read(mp, 0x40); + p->excessive_collision += mib_read(mp, 0x44); + p->multicast_frames_sent += mib_read(mp, 0x48); + p->broadcast_frames_sent += mib_read(mp, 0x4c); + p->unrec_mac_control_received += mib_read(mp, 0x50); + p->fc_sent += mib_read(mp, 0x54); + p->good_fc_received += mib_read(mp, 0x58); + p->bad_fc_received += mib_read(mp, 0x5c); + p->undersize_received += mib_read(mp, 0x60); + p->fragments_received += mib_read(mp, 0x64); + p->oversize_received += mib_read(mp, 0x68); + p->jabber_received += mib_read(mp, 0x6c); + p->mac_receive_error += mib_read(mp, 0x70); + p->bad_crc_event += mib_read(mp, 0x74); + p->collision += mib_read(mp, 0x78); + p->late_collision += mib_read(mp, 0x7c); + /* Non MIB hardware counters */ + p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT); + p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT); + spin_unlock_bh(&mp->mib_counters_lock); +} + +static void mib_counters_timer_wrapper(unsigned long _mp) +{ + struct mv643xx_eth_private *mp = (void *)_mp; + mib_counters_update(mp); + mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); +} + + +/* interrupt coalescing *****************************************************/ +/* + * Hardware coalescing parameters are set in units of 64 t_clk + * cycles. I.e.: + * + * coal_delay_in_usec = 64000000 * register_value / t_clk_rate + * + * register_value = coal_delay_in_usec * t_clk_rate / 64000000 + * + * In the ->set*() methods, we round the computed register value + * to the nearest integer. + */ +static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) +{ + u32 val = rdlp(mp, SDMA_CONFIG); + u64 temp; + + if (mp->shared->extended_rx_coal_limit) + temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7); + else + temp = (val & 0x003fff00) >> 8; + + temp *= 64000000; + do_div(temp, mp->t_clk); + + return (unsigned int)temp; +} + +static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec) +{ + u64 temp; + u32 val; + + temp = (u64)usec * mp->t_clk; + temp += 31999999; + do_div(temp, 64000000); + + val = rdlp(mp, SDMA_CONFIG); + if (mp->shared->extended_rx_coal_limit) { + if (temp > 0xffff) + temp = 0xffff; + val &= ~0x023fff80; + val |= (temp & 0x8000) << 10; + val |= (temp & 0x7fff) << 7; + } else { + if (temp > 0x3fff) + temp = 0x3fff; + val &= ~0x003fff00; + val |= (temp & 0x3fff) << 8; + } + wrlp(mp, SDMA_CONFIG, val); +} + +static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) +{ + u64 temp; + + temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; + temp *= 64000000; + do_div(temp, mp->t_clk); + + return (unsigned int)temp; +} + +static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec) +{ + u64 temp; + + temp = (u64)usec * mp->t_clk; + temp += 31999999; + do_div(temp, 64000000); + + if (temp > 0x3fff) + temp = 0x3fff; + + wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4); +} + + +/* ethtool ******************************************************************/ +struct mv643xx_eth_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int netdev_off; + int mp_off; +}; + +#define SSTAT(m) \ + { #m, FIELD_SIZEOF(struct net_device_stats, m), \ + offsetof(struct net_device, stats.m), -1 } + +#define MIBSTAT(m) \ + { #m, FIELD_SIZEOF(struct mib_counters, m), \ + -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } + +static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { + SSTAT(rx_packets), + SSTAT(tx_packets), + SSTAT(rx_bytes), + SSTAT(tx_bytes), + SSTAT(rx_errors), + SSTAT(tx_errors), + SSTAT(rx_dropped), + SSTAT(tx_dropped), + MIBSTAT(good_octets_received), + MIBSTAT(bad_octets_received), + MIBSTAT(internal_mac_transmit_err), + MIBSTAT(good_frames_received), + MIBSTAT(bad_frames_received), + MIBSTAT(broadcast_frames_received), + MIBSTAT(multicast_frames_received), + MIBSTAT(frames_64_octets), + MIBSTAT(frames_65_to_127_octets), + MIBSTAT(frames_128_to_255_octets), + MIBSTAT(frames_256_to_511_octets), + MIBSTAT(frames_512_to_1023_octets), + MIBSTAT(frames_1024_to_max_octets), + MIBSTAT(good_octets_sent), + MIBSTAT(good_frames_sent), + MIBSTAT(excessive_collision), + MIBSTAT(multicast_frames_sent), + MIBSTAT(broadcast_frames_sent), + MIBSTAT(unrec_mac_control_received), + MIBSTAT(fc_sent), + MIBSTAT(good_fc_received), + MIBSTAT(bad_fc_received), + MIBSTAT(undersize_received), + MIBSTAT(fragments_received), + MIBSTAT(oversize_received), + MIBSTAT(jabber_received), + MIBSTAT(mac_receive_error), + MIBSTAT(bad_crc_event), + MIBSTAT(collision), + MIBSTAT(late_collision), + MIBSTAT(rx_discard), + MIBSTAT(rx_overrun), +}; + +static int +mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp, + struct ethtool_cmd *cmd) +{ + int err; + + err = phy_read_status(mp->phy); + if (err == 0) + err = phy_ethtool_gset(mp->phy, cmd); + + /* + * The MAC does not support 1000baseT_Half. + */ + cmd->supported &= ~SUPPORTED_1000baseT_Half; + cmd->advertising &= ~ADVERTISED_1000baseT_Half; + + return err; +} + +static int +mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp, + struct ethtool_cmd *cmd) +{ + u32 port_status; + + port_status = rdlp(mp, PORT_STATUS); + + cmd->supported = SUPPORTED_MII; + cmd->advertising = ADVERTISED_MII; + switch (port_status & PORT_SPEED_MASK) { + case PORT_SPEED_10: + ethtool_cmd_speed_set(cmd, SPEED_10); + break; + case PORT_SPEED_100: + ethtool_cmd_speed_set(cmd, SPEED_100); + break; + case PORT_SPEED_1000: + ethtool_cmd_speed_set(cmd, SPEED_1000); + break; + default: + cmd->speed = -1; + break; + } + cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; + cmd->port = PORT_MII; + cmd->phy_address = 0; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = AUTONEG_DISABLE; + cmd->maxtxpkt = 1; + cmd->maxrxpkt = 1; + + return 0; +} + +static void +mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + wol->supported = 0; + wol->wolopts = 0; + if (mp->phy) + phy_ethtool_get_wol(mp->phy, wol); +} + +static int +mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + int err; + + if (mp->phy == NULL) + return -EOPNOTSUPP; + + err = phy_ethtool_set_wol(mp->phy, wol); + /* Given that mv643xx_eth works without the marvell-specific PHY driver, + * this debugging hint is useful to have. + */ + if (err == -EOPNOTSUPP) + netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n"); + return err; +} + +static int +mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + if (mp->phy != NULL) + return mv643xx_eth_get_settings_phy(mp, cmd); + else + return mv643xx_eth_get_settings_phyless(mp, cmd); +} + +static int +mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + int ret; + + if (mp->phy == NULL) + return -EINVAL; + + /* + * The MAC does not support 1000baseT_Half. + */ + cmd->advertising &= ~ADVERTISED_1000baseT_Half; + + ret = phy_ethtool_sset(mp->phy, cmd); + if (!ret) + mv643xx_eth_adjust_link(dev); + return ret; +} + +static void mv643xx_eth_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, mv643xx_eth_driver_name, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, mv643xx_eth_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); + drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats); +} + +static int mv643xx_eth_nway_reset(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + if (mp->phy == NULL) + return -EINVAL; + + return genphy_restart_aneg(mp->phy); +} + +static int +mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + ec->rx_coalesce_usecs = get_rx_coal(mp); + ec->tx_coalesce_usecs = get_tx_coal(mp); + + return 0; +} + +static int +mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + set_rx_coal(mp, ec->rx_coalesce_usecs); + set_tx_coal(mp, ec->tx_coalesce_usecs); + + return 0; +} + +static void +mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + er->rx_max_pending = 4096; + er->tx_max_pending = 4096; + + er->rx_pending = mp->rx_ring_size; + er->tx_pending = mp->tx_ring_size; +} + +static int +mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + if (er->rx_mini_pending || er->rx_jumbo_pending) + return -EINVAL; + + mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; + mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending, + MV643XX_MAX_SKB_DESCS * 2, 4096); + if (mp->tx_ring_size != er->tx_pending) + netdev_warn(dev, "TX queue size set to %u (requested %u)\n", + mp->tx_ring_size, er->tx_pending); + + if (netif_running(dev)) { + mv643xx_eth_stop(dev); + if (mv643xx_eth_open(dev)) { + netdev_err(dev, + "fatal error on re-opening device after ring param change\n"); + return -ENOMEM; + } + } + + return 0; +} + + +static int +mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + bool rx_csum = features & NETIF_F_RXCSUM; + + wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000); + + return 0; +} + +static void mv643xx_eth_get_strings(struct net_device *dev, + uint32_t stringset, uint8_t *data) +{ + int i; + + if (stringset == ETH_SS_STATS) { + for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { + memcpy(data + i * ETH_GSTRING_LEN, + mv643xx_eth_stats[i].stat_string, + ETH_GSTRING_LEN); + } + } +} + +static void mv643xx_eth_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + uint64_t *data) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + int i; + + mv643xx_eth_get_stats(dev); + mib_counters_update(mp); + + for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { + const struct mv643xx_eth_stats *stat; + void *p; + + stat = mv643xx_eth_stats + i; + + if (stat->netdev_off >= 0) + p = ((void *)mp->dev) + stat->netdev_off; + else + p = ((void *)mp) + stat->mp_off; + + data[i] = (stat->sizeof_stat == 8) ? + *(uint64_t *)p : *(uint32_t *)p; + } +} + +static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset) +{ + if (sset == ETH_SS_STATS) + return ARRAY_SIZE(mv643xx_eth_stats); + + return -EOPNOTSUPP; +} + +static const struct ethtool_ops mv643xx_eth_ethtool_ops = { + .get_settings = mv643xx_eth_get_settings, + .set_settings = mv643xx_eth_set_settings, + .get_drvinfo = mv643xx_eth_get_drvinfo, + .nway_reset = mv643xx_eth_nway_reset, + .get_link = ethtool_op_get_link, + .get_coalesce = mv643xx_eth_get_coalesce, + .set_coalesce = mv643xx_eth_set_coalesce, + .get_ringparam = mv643xx_eth_get_ringparam, + .set_ringparam = mv643xx_eth_set_ringparam, + .get_strings = mv643xx_eth_get_strings, + .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, + .get_sset_count = mv643xx_eth_get_sset_count, + .get_ts_info = ethtool_op_get_ts_info, + .get_wol = mv643xx_eth_get_wol, + .set_wol = mv643xx_eth_set_wol, +}; + + +/* address handling *********************************************************/ +static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) +{ + unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH); + unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW); + + addr[0] = (mac_h >> 24) & 0xff; + addr[1] = (mac_h >> 16) & 0xff; + addr[2] = (mac_h >> 8) & 0xff; + addr[3] = mac_h & 0xff; + addr[4] = (mac_l >> 8) & 0xff; + addr[5] = mac_l & 0xff; +} + +static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr) +{ + wrlp(mp, MAC_ADDR_HIGH, + (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]); + wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]); +} + +static u32 uc_addr_filter_mask(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + u32 nibbles; + + if (dev->flags & IFF_PROMISC) + return 0; + + nibbles = 1 << (dev->dev_addr[5] & 0x0f); + netdev_for_each_uc_addr(ha, dev) { + if (memcmp(dev->dev_addr, ha->addr, 5)) + return 0; + if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) + return 0; + + nibbles |= 1 << (ha->addr[5] & 0x0f); + } + + return nibbles; +} + +static void mv643xx_eth_program_unicast_filter(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + u32 port_config; + u32 nibbles; + int i; + + uc_addr_set(mp, dev->dev_addr); + + port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE; + + nibbles = uc_addr_filter_mask(dev); + if (!nibbles) { + port_config |= UNICAST_PROMISCUOUS_MODE; + nibbles = 0xffff; + } + + for (i = 0; i < 16; i += 4) { + int off = UNICAST_TABLE(mp->port_num) + i; + u32 v; + + v = 0; + if (nibbles & 1) + v |= 0x00000001; + if (nibbles & 2) + v |= 0x00000100; + if (nibbles & 4) + v |= 0x00010000; + if (nibbles & 8) + v |= 0x01000000; + nibbles >>= 4; + + wrl(mp, off, v); + } + + wrlp(mp, PORT_CONFIG, port_config); +} + +static int addr_crc(unsigned char *addr) +{ + int crc = 0; + int i; + + for (i = 0; i < 6; i++) { + int j; + + crc = (crc ^ addr[i]) << 8; + for (j = 7; j >= 0; j--) { + if (crc & (0x100 << j)) + crc ^= 0x107 << j; + } + } + + return crc; +} + +static void mv643xx_eth_program_multicast_filter(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + u32 *mc_spec; + u32 *mc_other; + struct netdev_hw_addr *ha; + int i; + + if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { + int port_num; + u32 accept; + +oom: + port_num = mp->port_num; + accept = 0x01010101; + for (i = 0; i < 0x100; i += 4) { + wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept); + wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept); + } + return; + } + + mc_spec = kmalloc(0x200, GFP_ATOMIC); + if (mc_spec == NULL) + goto oom; + mc_other = mc_spec + (0x100 >> 2); + + memset(mc_spec, 0, 0x100); + memset(mc_other, 0, 0x100); + + netdev_for_each_mc_addr(ha, dev) { + u8 *a = ha->addr; + u32 *table; + int entry; + + if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) { + table = mc_spec; + entry = a[5]; + } else { + table = mc_other; + entry = addr_crc(a); + } + + table[entry >> 2] |= 1 << (8 * (entry & 3)); + } + + for (i = 0; i < 0x100; i += 4) { + wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]); + wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]); + } + + kfree(mc_spec); +} + +static void mv643xx_eth_set_rx_mode(struct net_device *dev) +{ + mv643xx_eth_program_unicast_filter(dev); + mv643xx_eth_program_multicast_filter(dev); +} + +static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *sa = addr; + + if (!is_valid_ether_addr(sa->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); + + netif_addr_lock_bh(dev); + mv643xx_eth_program_unicast_filter(dev); + netif_addr_unlock_bh(dev); + + return 0; +} + + +/* rx/tx queue initialisation ***********************************************/ +static int rxq_init(struct mv643xx_eth_private *mp, int index) +{ + struct rx_queue *rxq = mp->rxq + index; + struct rx_desc *rx_desc; + int size; + int i; + + rxq->index = index; + + rxq->rx_ring_size = mp->rx_ring_size; + + rxq->rx_desc_count = 0; + rxq->rx_curr_desc = 0; + rxq->rx_used_desc = 0; + + size = rxq->rx_ring_size * sizeof(struct rx_desc); + + if (index == 0 && size <= mp->rx_desc_sram_size) { + rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, + mp->rx_desc_sram_size); + rxq->rx_desc_dma = mp->rx_desc_sram_addr; + } else { + rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, + size, &rxq->rx_desc_dma, + GFP_KERNEL); + } + + if (rxq->rx_desc_area == NULL) { + netdev_err(mp->dev, + "can't allocate rx ring (%d bytes)\n", size); + goto out; + } + memset(rxq->rx_desc_area, 0, size); + + rxq->rx_desc_area_size = size; + rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb), + GFP_KERNEL); + if (rxq->rx_skb == NULL) + goto out_free; + + rx_desc = rxq->rx_desc_area; + for (i = 0; i < rxq->rx_ring_size; i++) { + int nexti; + + nexti = i + 1; + if (nexti == rxq->rx_ring_size) + nexti = 0; + + rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + + nexti * sizeof(struct rx_desc); + } + + return 0; + + +out_free: + if (index == 0 && size <= mp->rx_desc_sram_size) + iounmap(rxq->rx_desc_area); + else + dma_free_coherent(mp->dev->dev.parent, size, + rxq->rx_desc_area, + rxq->rx_desc_dma); + +out: + return -ENOMEM; +} + +static void rxq_deinit(struct rx_queue *rxq) +{ + struct mv643xx_eth_private *mp = rxq_to_mp(rxq); + int i; + + rxq_disable(rxq); + + for (i = 0; i < rxq->rx_ring_size; i++) { + if (rxq->rx_skb[i]) { + dev_kfree_skb(rxq->rx_skb[i]); + rxq->rx_desc_count--; + } + } + + if (rxq->rx_desc_count) { + netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n", + rxq->rx_desc_count); + } + + if (rxq->index == 0 && + rxq->rx_desc_area_size <= mp->rx_desc_sram_size) + iounmap(rxq->rx_desc_area); + else + dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, + rxq->rx_desc_area, rxq->rx_desc_dma); + + kfree(rxq->rx_skb); +} + +static int txq_init(struct mv643xx_eth_private *mp, int index) +{ + struct tx_queue *txq = mp->txq + index; + struct tx_desc *tx_desc; + int size; + int ret; + int i; + + txq->index = index; + + txq->tx_ring_size = mp->tx_ring_size; + + /* A queue must always have room for at least one skb. + * Therefore, stop the queue when the free entries reaches + * the maximum number of descriptors per skb. + */ + txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS; + txq->tx_wake_threshold = txq->tx_stop_threshold / 2; + + txq->tx_desc_count = 0; + txq->tx_curr_desc = 0; + txq->tx_used_desc = 0; + + size = txq->tx_ring_size * sizeof(struct tx_desc); + + if (index == 0 && size <= mp->tx_desc_sram_size) { + txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, + mp->tx_desc_sram_size); + txq->tx_desc_dma = mp->tx_desc_sram_addr; + } else { + txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, + size, &txq->tx_desc_dma, + GFP_KERNEL); + } + + if (txq->tx_desc_area == NULL) { + netdev_err(mp->dev, + "can't allocate tx ring (%d bytes)\n", size); + return -ENOMEM; + } + memset(txq->tx_desc_area, 0, size); + + txq->tx_desc_area_size = size; + + tx_desc = txq->tx_desc_area; + for (i = 0; i < txq->tx_ring_size; i++) { + struct tx_desc *txd = tx_desc + i; + int nexti; + + nexti = i + 1; + if (nexti == txq->tx_ring_size) + nexti = 0; + + txd->cmd_sts = 0; + txd->next_desc_ptr = txq->tx_desc_dma + + nexti * sizeof(struct tx_desc); + } + + txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char), + GFP_KERNEL); + if (!txq->tx_desc_mapping) { + ret = -ENOMEM; + goto err_free_desc_area; + } + + /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ + txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent, + txq->tx_ring_size * TSO_HEADER_SIZE, + &txq->tso_hdrs_dma, GFP_KERNEL); + if (txq->tso_hdrs == NULL) { + ret = -ENOMEM; + goto err_free_desc_mapping; + } + skb_queue_head_init(&txq->tx_skb); + + return 0; + +err_free_desc_mapping: + kfree(txq->tx_desc_mapping); +err_free_desc_area: + if (index == 0 && size <= mp->tx_desc_sram_size) + iounmap(txq->tx_desc_area); + else + dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, + txq->tx_desc_area, txq->tx_desc_dma); + return ret; +} + +static void txq_deinit(struct tx_queue *txq) +{ + struct mv643xx_eth_private *mp = txq_to_mp(txq); + + txq_disable(txq); + txq_reclaim(txq, txq->tx_ring_size, 1); + + BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); + + if (txq->index == 0 && + txq->tx_desc_area_size <= mp->tx_desc_sram_size) + iounmap(txq->tx_desc_area); + else + dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, + txq->tx_desc_area, txq->tx_desc_dma); + kfree(txq->tx_desc_mapping); + + if (txq->tso_hdrs) + dma_free_coherent(mp->dev->dev.parent, + txq->tx_ring_size * TSO_HEADER_SIZE, + txq->tso_hdrs, txq->tso_hdrs_dma); +} + + +/* netdev ops and related ***************************************************/ +static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) +{ + u32 int_cause; + u32 int_cause_ext; + + int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask; + if (int_cause == 0) + return 0; + + int_cause_ext = 0; + if (int_cause & INT_EXT) { + int_cause &= ~INT_EXT; + int_cause_ext = rdlp(mp, INT_CAUSE_EXT); + } + + if (int_cause) { + wrlp(mp, INT_CAUSE, ~int_cause); + mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & + ~(rdlp(mp, TXQ_COMMAND) & 0xff); + mp->work_rx |= (int_cause & INT_RX) >> 2; + } + + int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX; + if (int_cause_ext) { + wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext); + if (int_cause_ext & INT_EXT_LINK_PHY) + mp->work_link = 1; + mp->work_tx |= int_cause_ext & INT_EXT_TX; + } + + return 1; +} + +static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct mv643xx_eth_private *mp = netdev_priv(dev); + + if (unlikely(!mv643xx_eth_collect_events(mp))) + return IRQ_NONE; + + wrlp(mp, INT_MASK, 0); + napi_schedule(&mp->napi); + + return IRQ_HANDLED; +} + +static void handle_link_event(struct mv643xx_eth_private *mp) +{ + struct net_device *dev = mp->dev; + u32 port_status; + int speed; + int duplex; + int fc; + + port_status = rdlp(mp, PORT_STATUS); + if (!(port_status & LINK_UP)) { + if (netif_carrier_ok(dev)) { + int i; + + netdev_info(dev, "link down\n"); + + netif_carrier_off(dev); + + for (i = 0; i < mp->txq_count; i++) { + struct tx_queue *txq = mp->txq + i; + + txq_reclaim(txq, txq->tx_ring_size, 1); + txq_reset_hw_ptr(txq); + } + } + return; + } + + switch (port_status & PORT_SPEED_MASK) { + case PORT_SPEED_10: + speed = 10; + break; + case PORT_SPEED_100: + speed = 100; + break; + case PORT_SPEED_1000: + speed = 1000; + break; + default: + speed = -1; + break; + } + duplex = (port_status & FULL_DUPLEX) ? 1 : 0; + fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; + + netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n", + speed, duplex ? "full" : "half", fc ? "en" : "dis"); + + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); +} + +static int mv643xx_eth_poll(struct napi_struct *napi, int budget) +{ + struct mv643xx_eth_private *mp; + int work_done; + + mp = container_of(napi, struct mv643xx_eth_private, napi); + + if (unlikely(mp->oom)) { + mp->oom = 0; + del_timer(&mp->rx_oom); + } + + work_done = 0; + while (work_done < budget) { + u8 queue_mask; + int queue; + int work_tbd; + + if (mp->work_link) { + mp->work_link = 0; + handle_link_event(mp); + work_done++; + continue; + } + + queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; + if (likely(!mp->oom)) + queue_mask |= mp->work_rx_refill; + + if (!queue_mask) { + if (mv643xx_eth_collect_events(mp)) + continue; + break; + } + + queue = fls(queue_mask) - 1; + queue_mask = 1 << queue; + + work_tbd = budget - work_done; + if (work_tbd > 16) + work_tbd = 16; + + if (mp->work_tx_end & queue_mask) { + txq_kick(mp->txq + queue); + } else if (mp->work_tx & queue_mask) { + work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); + txq_maybe_wake(mp->txq + queue); + } else if (mp->work_rx & queue_mask) { + work_done += rxq_process(mp->rxq + queue, work_tbd); + } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { + work_done += rxq_refill(mp->rxq + queue, work_tbd); + } else { + BUG(); + } + } + + if (work_done < budget) { + if (mp->oom) + mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); + napi_complete(napi); + wrlp(mp, INT_MASK, mp->int_mask); + } + + return work_done; +} + +static inline void oom_timer_wrapper(unsigned long data) +{ + struct mv643xx_eth_private *mp = (void *)data; + + napi_schedule(&mp->napi); +} + +static void port_start(struct mv643xx_eth_private *mp) +{ + u32 pscr; + int i; + + /* + * Perform PHY reset, if there is a PHY. + */ + if (mp->phy != NULL) { + struct ethtool_cmd cmd; + + mv643xx_eth_get_settings(mp->dev, &cmd); + phy_init_hw(mp->phy); + mv643xx_eth_set_settings(mp->dev, &cmd); + phy_start(mp->phy); + } + + /* + * Configure basic link parameters. + */ + pscr = rdlp(mp, PORT_SERIAL_CONTROL); + + pscr |= SERIAL_PORT_ENABLE; + wrlp(mp, PORT_SERIAL_CONTROL, pscr); + + pscr |= DO_NOT_FORCE_LINK_FAIL; + if (mp->phy == NULL) + pscr |= FORCE_LINK_PASS; + wrlp(mp, PORT_SERIAL_CONTROL, pscr); + + /* + * Configure TX path and queues. + */ + tx_set_rate(mp, 1000000000, 16777216); + for (i = 0; i < mp->txq_count; i++) { + struct tx_queue *txq = mp->txq + i; + + txq_reset_hw_ptr(txq); + txq_set_rate(txq, 1000000000, 16777216); + txq_set_fixed_prio_mode(txq); + } + + /* + * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast + * frames to RX queue #0, and include the pseudo-header when + * calculating receive checksums. + */ + mv643xx_eth_set_features(mp->dev, mp->dev->features); + + /* + * Treat BPDUs as normal multicasts, and disable partition mode. + */ + wrlp(mp, PORT_CONFIG_EXT, 0x00000000); + + /* + * Add configured unicast addresses to address filter table. + */ + mv643xx_eth_program_unicast_filter(mp->dev); + + /* + * Enable the receive queues. + */ + for (i = 0; i < mp->rxq_count; i++) { + struct rx_queue *rxq = mp->rxq + i; + u32 addr; + + addr = (u32)rxq->rx_desc_dma; + addr += rxq->rx_curr_desc * sizeof(struct rx_desc); + wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr); + + rxq_enable(rxq); + } +} + +static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) +{ + int skb_size; + + /* + * Reserve 2+14 bytes for an ethernet header (the hardware + * automatically prepends 2 bytes of dummy data to each + * received packet), 16 bytes for up to four VLAN tags, and + * 4 bytes for the trailing FCS -- 36 bytes total. + */ + skb_size = mp->dev->mtu + 36; + + /* + * Make sure that the skb size is a multiple of 8 bytes, as + * the lower three bits of the receive descriptor's buffer + * size field are ignored by the hardware. + */ + mp->skb_size = (skb_size + 7) & ~7; + + /* + * If NET_SKB_PAD is smaller than a cache line, + * netdev_alloc_skb() will cause skb->data to be misaligned + * to a cache line boundary. If this is the case, include + * some extra space to allow re-aligning the data area. + */ + mp->skb_size += SKB_DMA_REALIGN; +} + +static int mv643xx_eth_open(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + int err; + int i; + + wrlp(mp, INT_CAUSE, 0); + wrlp(mp, INT_CAUSE_EXT, 0); + rdlp(mp, INT_CAUSE_EXT); + + err = request_irq(dev->irq, mv643xx_eth_irq, + IRQF_SHARED, dev->name, dev); + if (err) { + netdev_err(dev, "can't assign irq\n"); + return -EAGAIN; + } + + mv643xx_eth_recalc_skb_size(mp); + + napi_enable(&mp->napi); + + mp->int_mask = INT_EXT; + + for (i = 0; i < mp->rxq_count; i++) { + err = rxq_init(mp, i); + if (err) { + while (--i >= 0) + rxq_deinit(mp->rxq + i); + goto out; + } + + rxq_refill(mp->rxq + i, INT_MAX); + mp->int_mask |= INT_RX_0 << i; + } + + if (mp->oom) { + mp->rx_oom.expires = jiffies + (HZ / 10); + add_timer(&mp->rx_oom); + } + + for (i = 0; i < mp->txq_count; i++) { + err = txq_init(mp, i); + if (err) { + while (--i >= 0) + txq_deinit(mp->txq + i); + goto out_free; + } + mp->int_mask |= INT_TX_END_0 << i; + } + + add_timer(&mp->mib_counters_timer); + port_start(mp); + + wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); + wrlp(mp, INT_MASK, mp->int_mask); + + return 0; + + +out_free: + for (i = 0; i < mp->rxq_count; i++) + rxq_deinit(mp->rxq + i); +out: + free_irq(dev->irq, dev); + + return err; +} + +static void port_reset(struct mv643xx_eth_private *mp) +{ + unsigned int data; + int i; + + for (i = 0; i < mp->rxq_count; i++) + rxq_disable(mp->rxq + i); + for (i = 0; i < mp->txq_count; i++) + txq_disable(mp->txq + i); + + while (1) { + u32 ps = rdlp(mp, PORT_STATUS); + + if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY) + break; + udelay(10); + } + + /* Reset the Enable bit in the Configuration Register */ + data = rdlp(mp, PORT_SERIAL_CONTROL); + data &= ~(SERIAL_PORT_ENABLE | + DO_NOT_FORCE_LINK_FAIL | + FORCE_LINK_PASS); + wrlp(mp, PORT_SERIAL_CONTROL, data); +} + +static int mv643xx_eth_stop(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + int i; + + wrlp(mp, INT_MASK_EXT, 0x00000000); + wrlp(mp, INT_MASK, 0x00000000); + rdlp(mp, INT_MASK); + + napi_disable(&mp->napi); + + del_timer_sync(&mp->rx_oom); + + netif_carrier_off(dev); + if (mp->phy) + phy_stop(mp->phy); + free_irq(dev->irq, dev); + + port_reset(mp); + mv643xx_eth_get_stats(dev); + mib_counters_update(mp); + del_timer_sync(&mp->mib_counters_timer); + + for (i = 0; i < mp->rxq_count; i++) + rxq_deinit(mp->rxq + i); + for (i = 0; i < mp->txq_count; i++) + txq_deinit(mp->txq + i); + + return 0; +} + +static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + int ret; + + if (mp->phy == NULL) + return -ENOTSUPP; + + ret = phy_mii_ioctl(mp->phy, ifr, cmd); + if (!ret) + mv643xx_eth_adjust_link(dev); + return ret; +} + +static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + if (new_mtu < 64 || new_mtu > 9500) + return -EINVAL; + + dev->mtu = new_mtu; + mv643xx_eth_recalc_skb_size(mp); + tx_set_rate(mp, 1000000000, 16777216); + + if (!netif_running(dev)) + return 0; + + /* + * Stop and then re-open the interface. This will allocate RX + * skbs of the new MTU. + * There is a possible danger that the open will not succeed, + * due to memory being full. + */ + mv643xx_eth_stop(dev); + if (mv643xx_eth_open(dev)) { + netdev_err(dev, + "fatal error on re-opening device after MTU change\n"); + } + + return 0; +} + +static void tx_timeout_task(struct work_struct *ugly) +{ + struct mv643xx_eth_private *mp; + + mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); + if (netif_running(mp->dev)) { + netif_tx_stop_all_queues(mp->dev); + port_reset(mp); + port_start(mp); + netif_tx_wake_all_queues(mp->dev); + } +} + +static void mv643xx_eth_tx_timeout(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + netdev_info(dev, "tx timeout\n"); + + schedule_work(&mp->tx_timeout_task); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void mv643xx_eth_netpoll(struct net_device *dev) +{ + struct mv643xx_eth_private *mp = netdev_priv(dev); + + wrlp(mp, INT_MASK, 0x00000000); + rdlp(mp, INT_MASK); + + mv643xx_eth_irq(dev->irq, dev); + + wrlp(mp, INT_MASK, mp->int_mask); +} +#endif + + +/* platform glue ************************************************************/ +static void +mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp, + const struct mbus_dram_target_info *dram) +{ + void __iomem *base = msp->base; + u32 win_enable; + u32 win_protect; + int i; + + for (i = 0; i < 6; i++) { + writel(0, base + WINDOW_BASE(i)); + writel(0, base + WINDOW_SIZE(i)); + if (i < 4) + writel(0, base + WINDOW_REMAP_HIGH(i)); + } + + win_enable = 0x3f; + win_protect = 0; + + for (i = 0; i < dram->num_cs; i++) { + const struct mbus_dram_window *cs = dram->cs + i; + + writel((cs->base & 0xffff0000) | + (cs->mbus_attr << 8) | + dram->mbus_dram_target_id, base + WINDOW_BASE(i)); + writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); + + win_enable &= ~(1 << i); + win_protect |= 3 << (2 * i); + } + + writel(win_enable, base + WINDOW_BAR_ENABLE); + msp->win_protect = win_protect; +} + +static void infer_hw_params(struct mv643xx_eth_shared_private *msp) +{ + /* + * Check whether we have a 14-bit coal limit field in bits + * [21:8], or a 16-bit coal limit in bits [25,21:7] of the + * SDMA config register. + */ + writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG); + if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000) + msp->extended_rx_coal_limit = 1; + else + msp->extended_rx_coal_limit = 0; + + /* + * Check whether the MAC supports TX rate control, and if + * yes, whether its associated registers are in the old or + * the new place. + */ + writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED); + if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) { + msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT; + } else { + writel(7, msp->base + 0x0400 + TX_BW_RATE); + if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7) + msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT; + else + msp->tx_bw_control = TX_BW_CONTROL_ABSENT; + } +} + +#if defined(CONFIG_OF) +static const struct of_device_id mv643xx_eth_shared_ids[] = { + { .compatible = "marvell,orion-eth", }, + { .compatible = "marvell,kirkwood-eth", }, + { } +}; +MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids); +#endif + +#if defined(CONFIG_OF) && !defined(CONFIG_MV64X60) +#define mv643xx_eth_property(_np, _name, _v) \ + do { \ + u32 tmp; \ + if (!of_property_read_u32(_np, "marvell," _name, &tmp)) \ + _v = tmp; \ + } while (0) + +static struct platform_device *port_platdev[3]; + +static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, + struct device_node *pnp) +{ + struct platform_device *ppdev; + struct mv643xx_eth_platform_data ppd; + struct resource res; + const char *mac_addr; + int ret; + int dev_num = 0; + + memset(&ppd, 0, sizeof(ppd)); + ppd.shared = pdev; + + memset(&res, 0, sizeof(res)); + if (!of_irq_to_resource(pnp, 0, &res)) { + dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name); + return -EINVAL; + } + + if (of_property_read_u32(pnp, "reg", &ppd.port_number)) { + dev_err(&pdev->dev, "missing reg property on %s\n", pnp->name); + return -EINVAL; + } + + if (ppd.port_number >= 3) { + dev_err(&pdev->dev, "invalid reg property on %s\n", pnp->name); + return -EINVAL; + } + + while (dev_num < 3 && port_platdev[dev_num]) + dev_num++; + + if (dev_num == 3) { + dev_err(&pdev->dev, "too many ports registered\n"); + return -EINVAL; + } + + mac_addr = of_get_mac_address(pnp); + if (mac_addr) + memcpy(ppd.mac_addr, mac_addr, ETH_ALEN); + + mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size); + mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr); + mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size); + mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size); + mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr); + mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size); + + ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0); + if (!ppd.phy_node) { + ppd.phy_addr = MV643XX_ETH_PHY_NONE; + of_property_read_u32(pnp, "speed", &ppd.speed); + of_property_read_u32(pnp, "duplex", &ppd.duplex); + } + + ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num); + if (!ppdev) + return -ENOMEM; + ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + ppdev->dev.of_node = pnp; + + ret = platform_device_add_resources(ppdev, &res, 1); + if (ret) + goto port_err; + + ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd)); + if (ret) + goto port_err; + + ret = platform_device_add(ppdev); + if (ret) + goto port_err; + + port_platdev[dev_num] = ppdev; + + return 0; + +port_err: + platform_device_put(ppdev); + return ret; +} + +static int mv643xx_eth_shared_of_probe(struct platform_device *pdev) +{ + struct mv643xx_eth_shared_platform_data *pd; + struct device_node *pnp, *np = pdev->dev.of_node; + int ret; + + /* bail out if not registered from DT */ + if (!np) + return 0; + + pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL); + if (!pd) + return -ENOMEM; + pdev->dev.platform_data = pd; + + mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit); + + for_each_available_child_of_node(np, pnp) { + ret = mv643xx_eth_shared_of_add_port(pdev, pnp); + if (ret) + return ret; + } + return 0; +} + +static void mv643xx_eth_shared_of_remove(void) +{ + int n; + + for (n = 0; n < 3; n++) { + platform_device_del(port_platdev[n]); + port_platdev[n] = NULL; + } +} +#else +static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev) +{ + return 0; +} + +static inline void mv643xx_eth_shared_of_remove(void) +{ +} +#endif + +static int mv643xx_eth_shared_probe(struct platform_device *pdev) +{ + static int mv643xx_eth_version_printed; + struct mv643xx_eth_shared_platform_data *pd; + struct mv643xx_eth_shared_private *msp; + const struct mbus_dram_target_info *dram; + struct resource *res; + int ret; + + if (!mv643xx_eth_version_printed++) + pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n", + mv643xx_eth_driver_version); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) + return -EINVAL; + + msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); + if (msp == NULL) + return -ENOMEM; + platform_set_drvdata(pdev, msp); + + msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (msp->base == NULL) + return -ENOMEM; + + msp->clk = devm_clk_get(&pdev->dev, NULL); + if (!IS_ERR(msp->clk)) + clk_prepare_enable(msp->clk); + + /* + * (Re-)program MBUS remapping windows if we are asked to. + */ + dram = mv_mbus_dram_info(); + if (dram) + mv643xx_eth_conf_mbus_windows(msp, dram); + + ret = mv643xx_eth_shared_of_probe(pdev); + if (ret) + return ret; + pd = dev_get_platdata(&pdev->dev); + + msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? + pd->tx_csum_limit : 9 * 1024; + infer_hw_params(msp); + + return 0; +} + +static int mv643xx_eth_shared_remove(struct platform_device *pdev) +{ + struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); + + mv643xx_eth_shared_of_remove(); + if (!IS_ERR(msp->clk)) + clk_disable_unprepare(msp->clk); + return 0; +} + +static struct platform_driver mv643xx_eth_shared_driver = { + .probe = mv643xx_eth_shared_probe, + .remove = mv643xx_eth_shared_remove, + .driver = { + .name = MV643XX_ETH_SHARED_NAME, + .of_match_table = of_match_ptr(mv643xx_eth_shared_ids), + }, +}; + +static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr) +{ + int addr_shift = 5 * mp->port_num; + u32 data; + + data = rdl(mp, PHY_ADDR); + data &= ~(0x1f << addr_shift); + data |= (phy_addr & 0x1f) << addr_shift; + wrl(mp, PHY_ADDR, data); +} + +static int phy_addr_get(struct mv643xx_eth_private *mp) +{ + unsigned int data; + + data = rdl(mp, PHY_ADDR); + + return (data >> (5 * mp->port_num)) & 0x1f; +} + +static void set_params(struct mv643xx_eth_private *mp, + struct mv643xx_eth_platform_data *pd) +{ + struct net_device *dev = mp->dev; + unsigned int tx_ring_size; + + if (is_valid_ether_addr(pd->mac_addr)) + memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); + else + uc_addr_get(mp, dev->dev_addr); + + mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; + if (pd->rx_queue_size) + mp->rx_ring_size = pd->rx_queue_size; + mp->rx_desc_sram_addr = pd->rx_sram_addr; + mp->rx_desc_sram_size = pd->rx_sram_size; + + mp->rxq_count = pd->rx_queue_count ? : 1; + + tx_ring_size = DEFAULT_TX_QUEUE_SIZE; + if (pd->tx_queue_size) + tx_ring_size = pd->tx_queue_size; + + mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size, + MV643XX_MAX_SKB_DESCS * 2, 4096); + if (mp->tx_ring_size != tx_ring_size) + netdev_warn(dev, "TX queue size set to %u (requested %u)\n", + mp->tx_ring_size, tx_ring_size); + + mp->tx_desc_sram_addr = pd->tx_sram_addr; + mp->tx_desc_sram_size = pd->tx_sram_size; + + mp->txq_count = pd->tx_queue_count ? : 1; +} + +static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, + int phy_addr) +{ + struct phy_device *phydev; + int start; + int num; + int i; + char phy_id[MII_BUS_ID_SIZE + 3]; + + if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { + start = phy_addr_get(mp) & 0x1f; + num = 32; + } else { + start = phy_addr & 0x1f; + num = 1; + } + + /* Attempt to connect to the PHY using orion-mdio */ + phydev = ERR_PTR(-ENODEV); + for (i = 0; i < num; i++) { + int addr = (start + i) & 0x1f; + + snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, + "orion-mdio-mii", addr); + + phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link, + PHY_INTERFACE_MODE_GMII); + if (!IS_ERR(phydev)) { + phy_addr_set(mp, addr); + break; + } + } + + return phydev; +} + +static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) +{ + struct phy_device *phy = mp->phy; + + if (speed == 0) { + phy->autoneg = AUTONEG_ENABLE; + phy->speed = 0; + phy->duplex = 0; + phy->advertising = phy->supported | ADVERTISED_Autoneg; + } else { + phy->autoneg = AUTONEG_DISABLE; + phy->advertising = 0; + phy->speed = speed; + phy->duplex = duplex; + } + phy_start_aneg(phy); +} + +static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) +{ + u32 pscr; + + pscr = rdlp(mp, PORT_SERIAL_CONTROL); + if (pscr & SERIAL_PORT_ENABLE) { + pscr &= ~SERIAL_PORT_ENABLE; + wrlp(mp, PORT_SERIAL_CONTROL, pscr); + } + + pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; + if (mp->phy == NULL) { + pscr |= DISABLE_AUTO_NEG_SPEED_GMII; + if (speed == SPEED_1000) + pscr |= SET_GMII_SPEED_TO_1000; + else if (speed == SPEED_100) + pscr |= SET_MII_SPEED_TO_100; + + pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL; + + pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX; + if (duplex == DUPLEX_FULL) + pscr |= SET_FULL_DUPLEX_MODE; + } + + wrlp(mp, PORT_SERIAL_CONTROL, pscr); +} + +static const struct net_device_ops mv643xx_eth_netdev_ops = { + .ndo_open = mv643xx_eth_open, + .ndo_stop = mv643xx_eth_stop, + .ndo_start_xmit = mv643xx_eth_xmit, + .ndo_set_rx_mode = mv643xx_eth_set_rx_mode, + .ndo_set_mac_address = mv643xx_eth_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = mv643xx_eth_ioctl, + .ndo_change_mtu = mv643xx_eth_change_mtu, + .ndo_set_features = mv643xx_eth_set_features, + .ndo_tx_timeout = mv643xx_eth_tx_timeout, + .ndo_get_stats = mv643xx_eth_get_stats, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = mv643xx_eth_netpoll, +#endif +}; + +static int mv643xx_eth_probe(struct platform_device *pdev) +{ + struct mv643xx_eth_platform_data *pd; + struct mv643xx_eth_private *mp; + struct net_device *dev; + struct resource *res; + int err; + + pd = dev_get_platdata(&pdev->dev); + if (pd == NULL) { + dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n"); + return -ENODEV; + } + + if (pd->shared == NULL) { + dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n"); + return -ENODEV; + } + + dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8); + if (!dev) + return -ENOMEM; + + mp = netdev_priv(dev); + platform_set_drvdata(pdev, mp); + + mp->shared = platform_get_drvdata(pd->shared); + mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10); + mp->port_num = pd->port_number; + + mp->dev = dev; + + /* Kirkwood resets some registers on gated clocks. Especially + * CLK125_BYPASS_EN must be cleared but is not available on + * all other SoCs/System Controllers using this driver. + */ + if (of_device_is_compatible(pdev->dev.of_node, + "marvell,kirkwood-eth-port")) + wrlp(mp, PORT_SERIAL_CONTROL1, + rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN); + + /* + * Start with a default rate, and if there is a clock, allow + * it to override the default. + */ + mp->t_clk = 133000000; + mp->clk = devm_clk_get(&pdev->dev, NULL); + if (!IS_ERR(mp->clk)) { + clk_prepare_enable(mp->clk); + mp->t_clk = clk_get_rate(mp->clk); + } else if (!IS_ERR(mp->shared->clk)) { + mp->t_clk = clk_get_rate(mp->shared->clk); + } + + set_params(mp, pd); + netif_set_real_num_tx_queues(dev, mp->txq_count); + netif_set_real_num_rx_queues(dev, mp->rxq_count); + + err = 0; + if (pd->phy_node) { + mp->phy = of_phy_connect(mp->dev, pd->phy_node, + mv643xx_eth_adjust_link, 0, + PHY_INTERFACE_MODE_GMII); + if (!mp->phy) + err = -ENODEV; + else + phy_addr_set(mp, mp->phy->addr); + } else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) { + mp->phy = phy_scan(mp, pd->phy_addr); + + if (IS_ERR(mp->phy)) + err = PTR_ERR(mp->phy); + else + phy_init(mp, pd->speed, pd->duplex); + } + if (err == -ENODEV) { + err = -EPROBE_DEFER; + goto out; + } + if (err) + goto out; + + dev->ethtool_ops = &mv643xx_eth_ethtool_ops; + + init_pscr(mp, pd->speed, pd->duplex); + + + mib_counters_clear(mp); + + init_timer(&mp->mib_counters_timer); + mp->mib_counters_timer.data = (unsigned long)mp; + mp->mib_counters_timer.function = mib_counters_timer_wrapper; + mp->mib_counters_timer.expires = jiffies + 30 * HZ; + + spin_lock_init(&mp->mib_counters_lock); + + INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); + + netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT); + + init_timer(&mp->rx_oom); + mp->rx_oom.data = (unsigned long)mp; + mp->rx_oom.function = oom_timer_wrapper; + + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + BUG_ON(!res); + dev->irq = res->start; + + dev->netdev_ops = &mv643xx_eth_netdev_ops; + + dev->watchdog_timeo = 2 * HZ; + dev->base_addr = 0; + + dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; + dev->vlan_features = dev->features; + + dev->features |= NETIF_F_RXCSUM; + dev->hw_features = dev->features; + + dev->priv_flags |= IFF_UNICAST_FLT; + dev->gso_max_segs = MV643XX_MAX_TSO_SEGS; + + SET_NETDEV_DEV(dev, &pdev->dev); + + if (mp->shared->win_protect) + wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); + + netif_carrier_off(dev); + + wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); + + set_rx_coal(mp, 250); + set_tx_coal(mp, 0); + + err = register_netdev(dev); + if (err) + goto out; + + netdev_notice(dev, "port %d with MAC address %pM\n", + mp->port_num, dev->dev_addr); + + if (mp->tx_desc_sram_size > 0) + netdev_notice(dev, "configured with sram\n"); + + return 0; + +out: + if (!IS_ERR(mp->clk)) + clk_disable_unprepare(mp->clk); + free_netdev(dev); + + return err; +} + +static int mv643xx_eth_remove(struct platform_device *pdev) +{ + struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); + + unregister_netdev(mp->dev); + if (mp->phy != NULL) + phy_disconnect(mp->phy); + cancel_work_sync(&mp->tx_timeout_task); + + if (!IS_ERR(mp->clk)) + clk_disable_unprepare(mp->clk); + + free_netdev(mp->dev); + + return 0; +} + +static void mv643xx_eth_shutdown(struct platform_device *pdev) +{ + struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); + + /* Mask all interrupts on ethernet port */ + wrlp(mp, INT_MASK, 0); + rdlp(mp, INT_MASK); + + if (netif_running(mp->dev)) + port_reset(mp); +} + +static struct platform_driver mv643xx_eth_driver = { + .probe = mv643xx_eth_probe, + .remove = mv643xx_eth_remove, + .shutdown = mv643xx_eth_shutdown, + .driver = { + .name = MV643XX_ETH_NAME, + }, +}; + +static int __init mv643xx_eth_init_module(void) +{ + int rc; + + rc = platform_driver_register(&mv643xx_eth_shared_driver); + if (!rc) { + rc = platform_driver_register(&mv643xx_eth_driver); + if (rc) + platform_driver_unregister(&mv643xx_eth_shared_driver); + } + + return rc; +} +module_init(mv643xx_eth_init_module); + +static void __exit mv643xx_eth_cleanup_module(void) +{ + platform_driver_unregister(&mv643xx_eth_driver); + platform_driver_unregister(&mv643xx_eth_shared_driver); +} +module_exit(mv643xx_eth_cleanup_module); + +MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, " + "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek"); +MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME); +MODULE_ALIAS("platform:" MV643XX_ETH_NAME); diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c new file mode 100644 index 000000000..fc2fb2534 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -0,0 +1,301 @@ +/* + * Driver for the MDIO interface of Marvell network interfaces. + * + * Since the MDIO interface of Marvell network interfaces is shared + * between all network interfaces, having a single driver allows to + * handle concurrent accesses properly (you may have four Ethernet + * ports, but they in fact share the same SMI interface to access + * the MDIO bus). This driver is currently used by the mvneta and + * mv643xx_eth drivers. + * + * Copyright (C) 2012 Marvell + * + * Thomas Petazzoni + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MVMDIO_SMI_DATA_SHIFT 0 +#define MVMDIO_SMI_PHY_ADDR_SHIFT 16 +#define MVMDIO_SMI_PHY_REG_SHIFT 21 +#define MVMDIO_SMI_READ_OPERATION BIT(26) +#define MVMDIO_SMI_WRITE_OPERATION 0 +#define MVMDIO_SMI_READ_VALID BIT(27) +#define MVMDIO_SMI_BUSY BIT(28) +#define MVMDIO_ERR_INT_CAUSE 0x007C +#define MVMDIO_ERR_INT_SMI_DONE 0x00000010 +#define MVMDIO_ERR_INT_MASK 0x0080 + +/* + * SMI Timeout measurements: + * - Kirkwood 88F6281 (Globalscale Dreamplug): 45us to 95us (Interrupt) + * - Armada 370 (Globalscale Mirabox): 41us to 43us (Polled) + */ +#define MVMDIO_SMI_TIMEOUT 1000 /* 1000us = 1ms */ +#define MVMDIO_SMI_POLL_INTERVAL_MIN 45 +#define MVMDIO_SMI_POLL_INTERVAL_MAX 55 + +struct orion_mdio_dev { + struct mutex lock; + void __iomem *regs; + struct clk *clk; + /* + * If we have access to the error interrupt pin (which is + * somewhat misnamed as it not only reflects internal errors + * but also reflects SMI completion), use that to wait for + * SMI access completion instead of polling the SMI busy bit. + */ + int err_interrupt; + wait_queue_head_t smi_busy_wait; +}; + +static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev) +{ + return !(readl(dev->regs) & MVMDIO_SMI_BUSY); +} + +/* Wait for the SMI unit to be ready for another operation + */ +static int orion_mdio_wait_ready(struct mii_bus *bus) +{ + struct orion_mdio_dev *dev = bus->priv; + unsigned long timeout = usecs_to_jiffies(MVMDIO_SMI_TIMEOUT); + unsigned long end = jiffies + timeout; + int timedout = 0; + + while (1) { + if (orion_mdio_smi_is_done(dev)) + return 0; + else if (timedout) + break; + + if (dev->err_interrupt <= 0) { + usleep_range(MVMDIO_SMI_POLL_INTERVAL_MIN, + MVMDIO_SMI_POLL_INTERVAL_MAX); + + if (time_is_before_jiffies(end)) + ++timedout; + } else { + /* wait_event_timeout does not guarantee a delay of at + * least one whole jiffie, so timeout must be no less + * than two. + */ + if (timeout < 2) + timeout = 2; + wait_event_timeout(dev->smi_busy_wait, + orion_mdio_smi_is_done(dev), + timeout); + + ++timedout; + } + } + + dev_err(bus->parent, "Timeout: SMI busy for too long\n"); + return -ETIMEDOUT; +} + +static int orion_mdio_read(struct mii_bus *bus, int mii_id, + int regnum) +{ + struct orion_mdio_dev *dev = bus->priv; + u32 val; + int ret; + + mutex_lock(&dev->lock); + + ret = orion_mdio_wait_ready(bus); + if (ret < 0) + goto out; + + writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) | + (regnum << MVMDIO_SMI_PHY_REG_SHIFT) | + MVMDIO_SMI_READ_OPERATION), + dev->regs); + + ret = orion_mdio_wait_ready(bus); + if (ret < 0) + goto out; + + val = readl(dev->regs); + if (!(val & MVMDIO_SMI_READ_VALID)) { + dev_err(bus->parent, "SMI bus read not valid\n"); + ret = -ENODEV; + goto out; + } + + ret = val & 0xFFFF; +out: + mutex_unlock(&dev->lock); + return ret; +} + +static int orion_mdio_write(struct mii_bus *bus, int mii_id, + int regnum, u16 value) +{ + struct orion_mdio_dev *dev = bus->priv; + int ret; + + mutex_lock(&dev->lock); + + ret = orion_mdio_wait_ready(bus); + if (ret < 0) + goto out; + + writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) | + (regnum << MVMDIO_SMI_PHY_REG_SHIFT) | + MVMDIO_SMI_WRITE_OPERATION | + (value << MVMDIO_SMI_DATA_SHIFT)), + dev->regs); + +out: + mutex_unlock(&dev->lock); + return ret; +} + +static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id) +{ + struct orion_mdio_dev *dev = dev_id; + + if (readl(dev->regs + MVMDIO_ERR_INT_CAUSE) & + MVMDIO_ERR_INT_SMI_DONE) { + writel(~MVMDIO_ERR_INT_SMI_DONE, + dev->regs + MVMDIO_ERR_INT_CAUSE); + wake_up(&dev->smi_busy_wait); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int orion_mdio_probe(struct platform_device *pdev) +{ + struct resource *r; + struct mii_bus *bus; + struct orion_mdio_dev *dev; + int i, ret; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "No SMI register address given\n"); + return -ENODEV; + } + + bus = devm_mdiobus_alloc_size(&pdev->dev, + sizeof(struct orion_mdio_dev)); + if (!bus) + return -ENOMEM; + + bus->name = "orion_mdio_bus"; + bus->read = orion_mdio_read; + bus->write = orion_mdio_write; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", + dev_name(&pdev->dev)); + bus->parent = &pdev->dev; + + bus->irq = devm_kmalloc_array(&pdev->dev, PHY_MAX_ADDR, sizeof(int), + GFP_KERNEL); + if (!bus->irq) + return -ENOMEM; + + for (i = 0; i < PHY_MAX_ADDR; i++) + bus->irq[i] = PHY_POLL; + + dev = bus->priv; + dev->regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); + if (!dev->regs) { + dev_err(&pdev->dev, "Unable to remap SMI register\n"); + ret = -ENODEV; + goto out_mdio; + } + + init_waitqueue_head(&dev->smi_busy_wait); + + dev->clk = devm_clk_get(&pdev->dev, NULL); + if (!IS_ERR(dev->clk)) + clk_prepare_enable(dev->clk); + + dev->err_interrupt = platform_get_irq(pdev, 0); + if (dev->err_interrupt > 0) { + ret = devm_request_irq(&pdev->dev, dev->err_interrupt, + orion_mdio_err_irq, + IRQF_SHARED, pdev->name, dev); + if (ret) + goto out_mdio; + + writel(MVMDIO_ERR_INT_SMI_DONE, + dev->regs + MVMDIO_ERR_INT_MASK); + + } else if (dev->err_interrupt == -EPROBE_DEFER) { + return -EPROBE_DEFER; + } + + mutex_init(&dev->lock); + + if (pdev->dev.of_node) + ret = of_mdiobus_register(bus, pdev->dev.of_node); + else + ret = mdiobus_register(bus); + if (ret < 0) { + dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret); + goto out_mdio; + } + + platform_set_drvdata(pdev, bus); + + return 0; + +out_mdio: + if (!IS_ERR(dev->clk)) + clk_disable_unprepare(dev->clk); + return ret; +} + +static int orion_mdio_remove(struct platform_device *pdev) +{ + struct mii_bus *bus = platform_get_drvdata(pdev); + struct orion_mdio_dev *dev = bus->priv; + + writel(0, dev->regs + MVMDIO_ERR_INT_MASK); + mdiobus_unregister(bus); + if (!IS_ERR(dev->clk)) + clk_disable_unprepare(dev->clk); + + return 0; +} + +static const struct of_device_id orion_mdio_match[] = { + { .compatible = "marvell,orion-mdio" }, + { } +}; +MODULE_DEVICE_TABLE(of, orion_mdio_match); + +static struct platform_driver orion_mdio_driver = { + .probe = orion_mdio_probe, + .remove = orion_mdio_remove, + .driver = { + .name = "orion-mdio", + .of_match_table = orion_mdio_match, + }, +}; + +module_platform_driver(orion_mdio_driver); + +MODULE_DESCRIPTION("Marvell MDIO interface driver"); +MODULE_AUTHOR("Thomas Petazzoni "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:orion-mdio"); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c new file mode 100644 index 000000000..74d0389bf --- /dev/null +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -0,0 +1,3236 @@ +/* + * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. + * + * Copyright (C) 2012 Marvell + * + * Rami Rosen + * Thomas Petazzoni + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Registers */ +#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) +#define MVNETA_RXQ_HW_BUF_ALLOC BIT(1) +#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) +#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) +#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) +#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) +#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) +#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) +#define MVNETA_RXQ_BUF_SIZE_SHIFT 19 +#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) +#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) +#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff +#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) +#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 +#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 +#define MVNETA_PORT_RX_RESET 0x1cc0 +#define MVNETA_PORT_RX_DMA_RESET BIT(0) +#define MVNETA_PHY_ADDR 0x2000 +#define MVNETA_PHY_ADDR_MASK 0x1f +#define MVNETA_MBUS_RETRY 0x2010 +#define MVNETA_UNIT_INTR_CAUSE 0x2080 +#define MVNETA_UNIT_CONTROL 0x20B0 +#define MVNETA_PHY_POLLING_ENABLE BIT(1) +#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) +#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) +#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) +#define MVNETA_BASE_ADDR_ENABLE 0x2290 +#define MVNETA_PORT_CONFIG 0x2400 +#define MVNETA_UNI_PROMISC_MODE BIT(0) +#define MVNETA_DEF_RXQ(q) ((q) << 1) +#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) +#define MVNETA_TX_UNSET_ERR_SUM BIT(12) +#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) +#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) +#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) +#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) +#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ + MVNETA_DEF_RXQ_ARP(q) | \ + MVNETA_DEF_RXQ_TCP(q) | \ + MVNETA_DEF_RXQ_UDP(q) | \ + MVNETA_DEF_RXQ_BPDU(q) | \ + MVNETA_TX_UNSET_ERR_SUM | \ + MVNETA_RX_CSUM_WITH_PSEUDO_HDR) +#define MVNETA_PORT_CONFIG_EXTEND 0x2404 +#define MVNETA_MAC_ADDR_LOW 0x2414 +#define MVNETA_MAC_ADDR_HIGH 0x2418 +#define MVNETA_SDMA_CONFIG 0x241c +#define MVNETA_SDMA_BRST_SIZE_16 4 +#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) +#define MVNETA_RX_NO_DATA_SWAP BIT(4) +#define MVNETA_TX_NO_DATA_SWAP BIT(5) +#define MVNETA_DESC_SWAP BIT(6) +#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) +#define MVNETA_PORT_STATUS 0x2444 +#define MVNETA_TX_IN_PRGRS BIT(1) +#define MVNETA_TX_FIFO_EMPTY BIT(8) +#define MVNETA_RX_MIN_FRAME_SIZE 0x247c +#define MVNETA_SERDES_CFG 0x24A0 +#define MVNETA_SGMII_SERDES_PROTO 0x0cc7 +#define MVNETA_QSGMII_SERDES_PROTO 0x0667 +#define MVNETA_TYPE_PRIO 0x24bc +#define MVNETA_FORCE_UNI BIT(21) +#define MVNETA_TXQ_CMD_1 0x24e4 +#define MVNETA_TXQ_CMD 0x2448 +#define MVNETA_TXQ_DISABLE_SHIFT 8 +#define MVNETA_TXQ_ENABLE_MASK 0x000000ff +#define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4 +#define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31) +#define MVNETA_ACC_MODE 0x2500 +#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) +#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff +#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 +#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) + +/* Exception Interrupt Port/Queue Cause register */ + +#define MVNETA_INTR_NEW_CAUSE 0x25a0 +#define MVNETA_INTR_NEW_MASK 0x25a4 + +/* bits 0..7 = TXQ SENT, one bit per queue. + * bits 8..15 = RXQ OCCUP, one bit per queue. + * bits 16..23 = RXQ FREE, one bit per queue. + * bit 29 = OLD_REG_SUM, see old reg ? + * bit 30 = TX_ERR_SUM, one bit for 4 ports + * bit 31 = MISC_SUM, one bit for 4 ports + */ +#define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) +#define MVNETA_TX_INTR_MASK_ALL (0xff << 0) +#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) +#define MVNETA_RX_INTR_MASK_ALL (0xff << 8) +#define MVNETA_MISCINTR_INTR_MASK BIT(31) + +#define MVNETA_INTR_OLD_CAUSE 0x25a8 +#define MVNETA_INTR_OLD_MASK 0x25ac + +/* Data Path Port/Queue Cause Register */ +#define MVNETA_INTR_MISC_CAUSE 0x25b0 +#define MVNETA_INTR_MISC_MASK 0x25b4 + +#define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0) +#define MVNETA_CAUSE_LINK_CHANGE BIT(1) +#define MVNETA_CAUSE_PTP BIT(4) + +#define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7) +#define MVNETA_CAUSE_RX_OVERRUN BIT(8) +#define MVNETA_CAUSE_RX_CRC_ERROR BIT(9) +#define MVNETA_CAUSE_RX_LARGE_PKT BIT(10) +#define MVNETA_CAUSE_TX_UNDERUN BIT(11) +#define MVNETA_CAUSE_PRBS_ERR BIT(12) +#define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13) +#define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14) + +#define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16 +#define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT) +#define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool))) + +#define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24 +#define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT) +#define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q))) + +#define MVNETA_INTR_ENABLE 0x25b8 +#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 +#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF + +#define MVNETA_RXQ_CMD 0x2680 +#define MVNETA_RXQ_DISABLE_SHIFT 8 +#define MVNETA_RXQ_ENABLE_MASK 0x000000ff +#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) +#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) +#define MVNETA_GMAC_CTRL_0 0x2c00 +#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 +#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc +#define MVNETA_GMAC0_PORT_ENABLE BIT(0) +#define MVNETA_GMAC_CTRL_2 0x2c08 +#define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0) +#define MVNETA_GMAC2_PCS_ENABLE BIT(3) +#define MVNETA_GMAC2_PORT_RGMII BIT(4) +#define MVNETA_GMAC2_PORT_RESET BIT(6) +#define MVNETA_GMAC_STATUS 0x2c10 +#define MVNETA_GMAC_LINK_UP BIT(0) +#define MVNETA_GMAC_SPEED_1000 BIT(1) +#define MVNETA_GMAC_SPEED_100 BIT(2) +#define MVNETA_GMAC_FULL_DUPLEX BIT(3) +#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) +#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) +#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) +#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) +#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c +#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) +#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) +#define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2) +#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) +#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) +#define MVNETA_GMAC_AN_SPEED_EN BIT(7) +#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11) +#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) +#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) +#define MVNETA_MIB_COUNTERS_BASE 0x3080 +#define MVNETA_MIB_LATE_COLLISION 0x7c +#define MVNETA_DA_FILT_SPEC_MCAST 0x3400 +#define MVNETA_DA_FILT_OTH_MCAST 0x3500 +#define MVNETA_DA_FILT_UCAST_BASE 0x3600 +#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) +#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) +#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 +#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) +#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) +#define MVNETA_TXQ_DEC_SENT_SHIFT 16 +#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) +#define MVNETA_TXQ_SENT_DESC_SHIFT 16 +#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 +#define MVNETA_PORT_TX_RESET 0x3cf0 +#define MVNETA_PORT_TX_DMA_RESET BIT(0) +#define MVNETA_TX_MTU 0x3e0c +#define MVNETA_TX_TOKEN_SIZE 0x3e14 +#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff +#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) +#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff + +#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff + +/* Descriptor ring Macros */ +#define MVNETA_QUEUE_NEXT_DESC(q, index) \ + (((index) < (q)->last_desc) ? ((index) + 1) : 0) + +/* Various constants */ + +/* Coalescing */ +#define MVNETA_TXDONE_COAL_PKTS 1 +#define MVNETA_RX_COAL_PKTS 32 +#define MVNETA_RX_COAL_USEC 100 + +/* The two bytes Marvell header. Either contains a special value used + * by Marvell switches when a specific hardware mode is enabled (not + * supported by this driver) or is filled automatically by zeroes on + * the RX side. Those two bytes being at the front of the Ethernet + * header, they allow to have the IP header aligned on a 4 bytes + * boundary automatically: the hardware skips those two bytes on its + * own. + */ +#define MVNETA_MH_SIZE 2 + +#define MVNETA_VLAN_TAG_LEN 4 + +#define MVNETA_CPU_D_CACHE_LINE_SIZE 32 +#define MVNETA_TX_CSUM_MAX_SIZE 9800 +#define MVNETA_ACC_MODE_EXT 1 + +/* Timeout constants */ +#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 +#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 +#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 + +#define MVNETA_TX_MTU_MAX 0x3ffff + +/* TSO header size */ +#define TSO_HEADER_SIZE 128 + +/* Max number of Rx descriptors */ +#define MVNETA_MAX_RXD 128 + +/* Max number of Tx descriptors */ +#define MVNETA_MAX_TXD 532 + +/* Max number of allowed TCP segments for software TSO */ +#define MVNETA_MAX_TSO_SEGS 100 + +#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) + +/* descriptor aligned size */ +#define MVNETA_DESC_ALIGNED_SIZE 32 + +#define MVNETA_RX_PKT_SIZE(mtu) \ + ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ + ETH_HLEN + ETH_FCS_LEN, \ + MVNETA_CPU_D_CACHE_LINE_SIZE) + +#define IS_TSO_HEADER(txq, addr) \ + ((addr >= txq->tso_hdrs_phys) && \ + (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE)) + +#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) + +struct mvneta_pcpu_stats { + struct u64_stats_sync syncp; + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; +}; + +struct mvneta_port { + int pkt_size; + unsigned int frag_size; + void __iomem *base; + struct mvneta_rx_queue *rxqs; + struct mvneta_tx_queue *txqs; + struct net_device *dev; + + u32 cause_rx_tx; + struct napi_struct napi; + + /* Core clock */ + struct clk *clk; + u8 mcast_count[256]; + u16 tx_ring_size; + u16 rx_ring_size; + struct mvneta_pcpu_stats *stats; + + struct mii_bus *mii_bus; + struct phy_device *phy_dev; + phy_interface_t phy_interface; + struct device_node *phy_node; + unsigned int link; + unsigned int duplex; + unsigned int speed; + unsigned int tx_csum_limit; + int use_inband_status:1; +}; + +/* The mvneta_tx_desc and mvneta_rx_desc structures describe the + * layout of the transmit and reception DMA descriptors, and their + * layout is therefore defined by the hardware design + */ + +#define MVNETA_TX_L3_OFF_SHIFT 0 +#define MVNETA_TX_IP_HLEN_SHIFT 8 +#define MVNETA_TX_L4_UDP BIT(16) +#define MVNETA_TX_L3_IP6 BIT(17) +#define MVNETA_TXD_IP_CSUM BIT(18) +#define MVNETA_TXD_Z_PAD BIT(19) +#define MVNETA_TXD_L_DESC BIT(20) +#define MVNETA_TXD_F_DESC BIT(21) +#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ + MVNETA_TXD_L_DESC | \ + MVNETA_TXD_F_DESC) +#define MVNETA_TX_L4_CSUM_FULL BIT(30) +#define MVNETA_TX_L4_CSUM_NOT BIT(31) + +#define MVNETA_RXD_ERR_CRC 0x0 +#define MVNETA_RXD_ERR_SUMMARY BIT(16) +#define MVNETA_RXD_ERR_OVERRUN BIT(17) +#define MVNETA_RXD_ERR_LEN BIT(18) +#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) +#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) +#define MVNETA_RXD_L3_IP4 BIT(25) +#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27)) +#define MVNETA_RXD_L4_CSUM_OK BIT(30) + +#if defined(__LITTLE_ENDIAN) +struct mvneta_tx_desc { + u32 command; /* Options used by HW for packet transmitting.*/ + u16 reserverd1; /* csum_l4 (for future use) */ + u16 data_size; /* Data size of transmitted packet in bytes */ + u32 buf_phys_addr; /* Physical addr of transmitted buffer */ + u32 reserved2; /* hw_cmd - (for future use, PMT) */ + u32 reserved3[4]; /* Reserved - (for future use) */ +}; + +struct mvneta_rx_desc { + u32 status; /* Info about received packet */ + u16 reserved1; /* pnc_info - (for future use, PnC) */ + u16 data_size; /* Size of received packet in bytes */ + + u32 buf_phys_addr; /* Physical address of the buffer */ + u32 reserved2; /* pnc_flow_id (for future use, PnC) */ + + u32 buf_cookie; /* cookie for access to RX buffer in rx path */ + u16 reserved3; /* prefetch_cmd, for future use */ + u16 reserved4; /* csum_l4 - (for future use, PnC) */ + + u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ + u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ +}; +#else +struct mvneta_tx_desc { + u16 data_size; /* Data size of transmitted packet in bytes */ + u16 reserverd1; /* csum_l4 (for future use) */ + u32 command; /* Options used by HW for packet transmitting.*/ + u32 reserved2; /* hw_cmd - (for future use, PMT) */ + u32 buf_phys_addr; /* Physical addr of transmitted buffer */ + u32 reserved3[4]; /* Reserved - (for future use) */ +}; + +struct mvneta_rx_desc { + u16 data_size; /* Size of received packet in bytes */ + u16 reserved1; /* pnc_info - (for future use, PnC) */ + u32 status; /* Info about received packet */ + + u32 reserved2; /* pnc_flow_id (for future use, PnC) */ + u32 buf_phys_addr; /* Physical address of the buffer */ + + u16 reserved4; /* csum_l4 - (for future use, PnC) */ + u16 reserved3; /* prefetch_cmd, for future use */ + u32 buf_cookie; /* cookie for access to RX buffer in rx path */ + + u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ + u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ +}; +#endif + +struct mvneta_tx_queue { + /* Number of this TX queue, in the range 0-7 */ + u8 id; + + /* Number of TX DMA descriptors in the descriptor ring */ + int size; + + /* Number of currently used TX DMA descriptor in the + * descriptor ring + */ + int count; + int tx_stop_threshold; + int tx_wake_threshold; + + /* Array of transmitted skb */ + struct sk_buff **tx_skb; + + /* Index of last TX DMA descriptor that was inserted */ + int txq_put_index; + + /* Index of the TX DMA descriptor to be cleaned up */ + int txq_get_index; + + u32 done_pkts_coal; + + /* Virtual address of the TX DMA descriptors array */ + struct mvneta_tx_desc *descs; + + /* DMA address of the TX DMA descriptors array */ + dma_addr_t descs_phys; + + /* Index of the last TX DMA descriptor */ + int last_desc; + + /* Index of the next TX DMA descriptor to process */ + int next_desc_to_proc; + + /* DMA buffers for TSO headers */ + char *tso_hdrs; + + /* DMA address of TSO headers */ + dma_addr_t tso_hdrs_phys; +}; + +struct mvneta_rx_queue { + /* rx queue number, in the range 0-7 */ + u8 id; + + /* num of rx descriptors in the rx descriptor ring */ + int size; + + /* counter of times when mvneta_refill() failed */ + int missed; + + u32 pkts_coal; + u32 time_coal; + + /* Virtual address of the RX DMA descriptors array */ + struct mvneta_rx_desc *descs; + + /* DMA address of the RX DMA descriptors array */ + dma_addr_t descs_phys; + + /* Index of the last RX DMA descriptor */ + int last_desc; + + /* Index of the next RX DMA descriptor to process */ + int next_desc_to_proc; +}; + +/* The hardware supports eight (8) rx queues, but we are only allowing + * the first one to be used. Therefore, let's just allocate one queue. + */ +static int rxq_number = 1; +static int txq_number = 8; + +static int rxq_def; + +static int rx_copybreak __read_mostly = 256; + +#define MVNETA_DRIVER_NAME "mvneta" +#define MVNETA_DRIVER_VERSION "1.0" + +/* Utility/helper methods */ + +/* Write helper method */ +static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) +{ + writel(data, pp->base + offset); +} + +/* Read helper method */ +static u32 mvreg_read(struct mvneta_port *pp, u32 offset) +{ + return readl(pp->base + offset); +} + +/* Increment txq get counter */ +static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) +{ + txq->txq_get_index++; + if (txq->txq_get_index == txq->size) + txq->txq_get_index = 0; +} + +/* Increment txq put counter */ +static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) +{ + txq->txq_put_index++; + if (txq->txq_put_index == txq->size) + txq->txq_put_index = 0; +} + + +/* Clear all MIB counters */ +static void mvneta_mib_counters_clear(struct mvneta_port *pp) +{ + int i; + u32 dummy; + + /* Perform dummy reads from MIB counters */ + for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) + dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); +} + +/* Get System Network Statistics */ +struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct mvneta_port *pp = netdev_priv(dev); + unsigned int start; + int cpu; + + for_each_possible_cpu(cpu) { + struct mvneta_pcpu_stats *cpu_stats; + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + + cpu_stats = per_cpu_ptr(pp->stats, cpu); + do { + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + rx_packets = cpu_stats->rx_packets; + rx_bytes = cpu_stats->rx_bytes; + tx_packets = cpu_stats->tx_packets; + tx_bytes = cpu_stats->tx_bytes; + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + } + + stats->rx_errors = dev->stats.rx_errors; + stats->rx_dropped = dev->stats.rx_dropped; + + stats->tx_dropped = dev->stats.tx_dropped; + + return stats; +} + +/* Rx descriptors helper methods */ + +/* Checks whether the RX descriptor having this status is both the first + * and the last descriptor for the RX packet. Each RX packet is currently + * received through a single RX descriptor, so not having each RX + * descriptor with its first and last bits set is an error + */ +static int mvneta_rxq_desc_is_first_last(u32 status) +{ + return (status & MVNETA_RXD_FIRST_LAST_DESC) == + MVNETA_RXD_FIRST_LAST_DESC; +} + +/* Add number of descriptors ready to receive new packets */ +static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq, + int ndescs) +{ + /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can + * be added at once + */ + while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { + mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), + (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << + MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); + ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; + } + + mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), + (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); +} + +/* Get number of RX descriptors occupied by received packets */ +static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); + return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; +} + +/* Update num of rx desc called upon return from rx path or + * from mvneta_rxq_drop_pkts(). + */ +static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq, + int rx_done, int rx_filled) +{ + u32 val; + + if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { + val = rx_done | + (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); + mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); + return; + } + + /* Only 255 descriptors can be added at once */ + while ((rx_done > 0) || (rx_filled > 0)) { + if (rx_done <= 0xff) { + val = rx_done; + rx_done = 0; + } else { + val = 0xff; + rx_done -= 0xff; + } + if (rx_filled <= 0xff) { + val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; + rx_filled = 0; + } else { + val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; + rx_filled -= 0xff; + } + mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); + } +} + +/* Get pointer to next RX descriptor to be processed by SW */ +static struct mvneta_rx_desc * +mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) +{ + int rx_desc = rxq->next_desc_to_proc; + + rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); + prefetch(rxq->descs + rxq->next_desc_to_proc); + return rxq->descs + rx_desc; +} + +/* Change maximum receive size of the port. */ +static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); + val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK; + val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) << + MVNETA_GMAC_MAX_RX_SIZE_SHIFT; + mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); +} + + +/* Set rx queue offset */ +static void mvneta_rxq_offset_set(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq, + int offset) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); + val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK; + + /* Offset is in */ + val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3); + mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); +} + + +/* Tx descriptors helper methods */ + +/* Update HW with number of TX descriptors to be sent */ +static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, + struct mvneta_tx_queue *txq, + int pend_desc) +{ + u32 val; + + /* Only 255 descriptors can be added at once ; Assume caller + * process TX desriptors in quanta less than 256 + */ + val = pend_desc; + mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); +} + +/* Get pointer to next TX descriptor to be processed (send) by HW */ +static struct mvneta_tx_desc * +mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) +{ + int tx_desc = txq->next_desc_to_proc; + + txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); + return txq->descs + tx_desc; +} + +/* Release the last allocated TX descriptor. Useful to handle DMA + * mapping failures in the TX path. + */ +static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq) +{ + if (txq->next_desc_to_proc == 0) + txq->next_desc_to_proc = txq->last_desc - 1; + else + txq->next_desc_to_proc--; +} + +/* Set rxq buf size */ +static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq, + int buf_size) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); + + val &= ~MVNETA_RXQ_BUF_SIZE_MASK; + val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); + + mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); +} + +/* Disable buffer management (BM) */ +static void mvneta_rxq_bm_disable(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); + val &= ~MVNETA_RXQ_HW_BUF_ALLOC; + mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); +} + +/* Start the Ethernet port RX and TX activity */ +static void mvneta_port_up(struct mvneta_port *pp) +{ + int queue; + u32 q_map; + + /* Enable all initialized TXs. */ + mvneta_mib_counters_clear(pp); + q_map = 0; + for (queue = 0; queue < txq_number; queue++) { + struct mvneta_tx_queue *txq = &pp->txqs[queue]; + if (txq->descs != NULL) + q_map |= (1 << queue); + } + mvreg_write(pp, MVNETA_TXQ_CMD, q_map); + + /* Enable all initialized RXQs. */ + q_map = 0; + for (queue = 0; queue < rxq_number; queue++) { + struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; + if (rxq->descs != NULL) + q_map |= (1 << queue); + } + + mvreg_write(pp, MVNETA_RXQ_CMD, q_map); +} + +/* Stop the Ethernet port activity */ +static void mvneta_port_down(struct mvneta_port *pp) +{ + u32 val; + int count; + + /* Stop Rx port activity. Check port Rx activity. */ + val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; + + /* Issue stop command for active channels only */ + if (val != 0) + mvreg_write(pp, MVNETA_RXQ_CMD, + val << MVNETA_RXQ_DISABLE_SHIFT); + + /* Wait for all Rx activity to terminate. */ + count = 0; + do { + if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { + netdev_warn(pp->dev, + "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n", + val); + break; + } + mdelay(1); + + val = mvreg_read(pp, MVNETA_RXQ_CMD); + } while (val & 0xff); + + /* Stop Tx port activity. Check port Tx activity. Issue stop + * command for active channels only + */ + val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; + + if (val != 0) + mvreg_write(pp, MVNETA_TXQ_CMD, + (val << MVNETA_TXQ_DISABLE_SHIFT)); + + /* Wait for all Tx activity to terminate. */ + count = 0; + do { + if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { + netdev_warn(pp->dev, + "TIMEOUT for TX stopped status=0x%08x\n", + val); + break; + } + mdelay(1); + + /* Check TX Command reg that all Txqs are stopped */ + val = mvreg_read(pp, MVNETA_TXQ_CMD); + + } while (val & 0xff); + + /* Double check to verify that TX FIFO is empty */ + count = 0; + do { + if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { + netdev_warn(pp->dev, + "TX FIFO empty timeout status=0x08%x\n", + val); + break; + } + mdelay(1); + + val = mvreg_read(pp, MVNETA_PORT_STATUS); + } while (!(val & MVNETA_TX_FIFO_EMPTY) && + (val & MVNETA_TX_IN_PRGRS)); + + udelay(200); +} + +/* Enable the port by setting the port enable bit of the MAC control register */ +static void mvneta_port_enable(struct mvneta_port *pp) +{ + u32 val; + + /* Enable port */ + val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); + val |= MVNETA_GMAC0_PORT_ENABLE; + mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); +} + +/* Disable the port and wait for about 200 usec before retuning */ +static void mvneta_port_disable(struct mvneta_port *pp) +{ + u32 val; + + /* Reset the Enable bit in the Serial Control Register */ + val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); + val &= ~MVNETA_GMAC0_PORT_ENABLE; + mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); + + udelay(200); +} + +/* Multicast tables methods */ + +/* Set all entries in Unicast MAC Table; queue==-1 means reject all */ +static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) +{ + int offset; + u32 val; + + if (queue == -1) { + val = 0; + } else { + val = 0x1 | (queue << 1); + val |= (val << 24) | (val << 16) | (val << 8); + } + + for (offset = 0; offset <= 0xc; offset += 4) + mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); +} + +/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ +static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) +{ + int offset; + u32 val; + + if (queue == -1) { + val = 0; + } else { + val = 0x1 | (queue << 1); + val |= (val << 24) | (val << 16) | (val << 8); + } + + for (offset = 0; offset <= 0xfc; offset += 4) + mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); + +} + +/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ +static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) +{ + int offset; + u32 val; + + if (queue == -1) { + memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); + val = 0; + } else { + memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); + val = 0x1 | (queue << 1); + val |= (val << 24) | (val << 16) | (val << 8); + } + + for (offset = 0; offset <= 0xfc; offset += 4) + mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); +} + +/* This method sets defaults to the NETA port: + * Clears interrupt Cause and Mask registers. + * Clears all MAC tables. + * Sets defaults to all registers. + * Resets RX and TX descriptor rings. + * Resets PHY. + * This method can be called after mvneta_port_down() to return the port + * settings to defaults. + */ +static void mvneta_defaults_set(struct mvneta_port *pp) +{ + int cpu; + int queue; + u32 val; + + /* Clear all Cause registers */ + mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); + mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); + mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); + + /* Mask all interrupts */ + mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); + mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); + mvreg_write(pp, MVNETA_INTR_ENABLE, 0); + + /* Enable MBUS Retry bit16 */ + mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); + + /* Set CPU queue access map - all CPUs have access to all RX + * queues and to all TX queues + */ + for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) + mvreg_write(pp, MVNETA_CPU_MAP(cpu), + (MVNETA_CPU_RXQ_ACCESS_ALL_MASK | + MVNETA_CPU_TXQ_ACCESS_ALL_MASK)); + + /* Reset RX and TX DMAs */ + mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); + mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); + + /* Disable Legacy WRR, Disable EJP, Release from reset */ + mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); + for (queue = 0; queue < txq_number; queue++) { + mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); + mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); + } + + mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); + mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); + + /* Set Port Acceleration Mode */ + val = MVNETA_ACC_MODE_EXT; + mvreg_write(pp, MVNETA_ACC_MODE, val); + + /* Update val of portCfg register accordingly with all RxQueue types */ + val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def); + mvreg_write(pp, MVNETA_PORT_CONFIG, val); + + val = 0; + mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); + mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); + + /* Build PORT_SDMA_CONFIG_REG */ + val = 0; + + /* Default burst size */ + val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); + val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); + val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; + +#if defined(__BIG_ENDIAN) + val |= MVNETA_DESC_SWAP; +#endif + + /* Assign port SDMA configuration */ + mvreg_write(pp, MVNETA_SDMA_CONFIG, val); + + /* Disable PHY polling in hardware, since we're using the + * kernel phylib to do this. + */ + val = mvreg_read(pp, MVNETA_UNIT_CONTROL); + val &= ~MVNETA_PHY_POLLING_ENABLE; + mvreg_write(pp, MVNETA_UNIT_CONTROL, val); + + if (pp->use_inband_status) { + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~(MVNETA_GMAC_FORCE_LINK_PASS | + MVNETA_GMAC_FORCE_LINK_DOWN | + MVNETA_GMAC_AN_FLOW_CTRL_EN); + val |= MVNETA_GMAC_INBAND_AN_ENABLE | + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_AN_DUPLEX_EN; + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); + val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); + val |= MVNETA_GMAC_1MS_CLOCK_ENABLE; + mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); + } else { + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE | + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_AN_DUPLEX_EN); + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); + } + + mvneta_set_ucast_table(pp, -1); + mvneta_set_special_mcast_table(pp, -1); + mvneta_set_other_mcast_table(pp, -1); + + /* Set port interrupt enable register - default enable all */ + mvreg_write(pp, MVNETA_INTR_ENABLE, + (MVNETA_RXQ_INTR_ENABLE_ALL_MASK + | MVNETA_TXQ_INTR_ENABLE_ALL_MASK)); +} + +/* Set max sizes for tx queues */ +static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) + +{ + u32 val, size, mtu; + int queue; + + mtu = max_tx_size * 8; + if (mtu > MVNETA_TX_MTU_MAX) + mtu = MVNETA_TX_MTU_MAX; + + /* Set MTU */ + val = mvreg_read(pp, MVNETA_TX_MTU); + val &= ~MVNETA_TX_MTU_MAX; + val |= mtu; + mvreg_write(pp, MVNETA_TX_MTU, val); + + /* TX token size and all TXQs token size must be larger that MTU */ + val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); + + size = val & MVNETA_TX_TOKEN_SIZE_MAX; + if (size < mtu) { + size = mtu; + val &= ~MVNETA_TX_TOKEN_SIZE_MAX; + val |= size; + mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val); + } + for (queue = 0; queue < txq_number; queue++) { + val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); + + size = val & MVNETA_TXQ_TOKEN_SIZE_MAX; + if (size < mtu) { + size = mtu; + val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX; + val |= size; + mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); + } + } +} + +/* Set unicast address */ +static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, + int queue) +{ + unsigned int unicast_reg; + unsigned int tbl_offset; + unsigned int reg_offset; + + /* Locate the Unicast table entry */ + last_nibble = (0xf & last_nibble); + + /* offset from unicast tbl base */ + tbl_offset = (last_nibble / 4) * 4; + + /* offset within the above reg */ + reg_offset = last_nibble % 4; + + unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); + + if (queue == -1) { + /* Clear accepts frame bit at specified unicast DA tbl entry */ + unicast_reg &= ~(0xff << (8 * reg_offset)); + } else { + unicast_reg &= ~(0xff << (8 * reg_offset)); + unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); + } + + mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); +} + +/* Set mac address */ +static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, + int queue) +{ + unsigned int mac_h; + unsigned int mac_l; + + if (queue != -1) { + mac_l = (addr[4] << 8) | (addr[5]); + mac_h = (addr[0] << 24) | (addr[1] << 16) | + (addr[2] << 8) | (addr[3] << 0); + + mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); + mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); + } + + /* Accept frames of this address */ + mvneta_set_ucast_addr(pp, addr[5], queue); +} + +/* Set the number of packets that will be received before RX interrupt + * will be generated by HW. + */ +static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq, u32 value) +{ + mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), + value | MVNETA_RXQ_NON_OCCUPIED(0)); + rxq->pkts_coal = value; +} + +/* Set the time delay in usec before RX interrupt will be generated by + * HW. + */ +static void mvneta_rx_time_coal_set(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq, u32 value) +{ + u32 val; + unsigned long clk_rate; + + clk_rate = clk_get_rate(pp->clk); + val = (clk_rate / 1000000) * value; + + mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); + rxq->time_coal = value; +} + +/* Set threshold for TX_DONE pkts coalescing */ +static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, + struct mvneta_tx_queue *txq, u32 value) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); + + val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK; + val |= MVNETA_TXQ_SENT_THRESH_MASK(value); + + mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); + + txq->done_pkts_coal = value; +} + +/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ +static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, + u32 phys_addr, u32 cookie) +{ + rx_desc->buf_cookie = cookie; + rx_desc->buf_phys_addr = phys_addr; +} + +/* Decrement sent descriptors counter */ +static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, + struct mvneta_tx_queue *txq, + int sent_desc) +{ + u32 val; + + /* Only 255 TX descriptors can be updated at once */ + while (sent_desc > 0xff) { + val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; + mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); + sent_desc = sent_desc - 0xff; + } + + val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; + mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); +} + +/* Get number of TX descriptors already sent by HW */ +static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ + u32 val; + int sent_desc; + + val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); + sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> + MVNETA_TXQ_SENT_DESC_SHIFT; + + return sent_desc; +} + +/* Get number of sent descriptors and decrement counter. + * The number of sent descriptors is returned. + */ +static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ + int sent_desc; + + /* Get number of sent descriptors */ + sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); + + /* Decrement sent descriptors counter */ + if (sent_desc) + mvneta_txq_sent_desc_dec(pp, txq, sent_desc); + + return sent_desc; +} + +/* Set TXQ descriptors fields relevant for CSUM calculation */ +static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, + int ip_hdr_len, int l4_proto) +{ + u32 command; + + /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, + * G_L4_chk, L4_type; required only for checksum + * calculation + */ + command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; + command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; + + if (l3_proto == htons(ETH_P_IP)) + command |= MVNETA_TXD_IP_CSUM; + else + command |= MVNETA_TX_L3_IP6; + + if (l4_proto == IPPROTO_TCP) + command |= MVNETA_TX_L4_CSUM_FULL; + else if (l4_proto == IPPROTO_UDP) + command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL; + else + command |= MVNETA_TX_L4_CSUM_NOT; + + return command; +} + + +/* Display more error info */ +static void mvneta_rx_error(struct mvneta_port *pp, + struct mvneta_rx_desc *rx_desc) +{ + u32 status = rx_desc->status; + + if (!mvneta_rxq_desc_is_first_last(status)) { + netdev_err(pp->dev, + "bad rx status %08x (buffer oversize), size=%d\n", + status, rx_desc->data_size); + return; + } + + switch (status & MVNETA_RXD_ERR_CODE_MASK) { + case MVNETA_RXD_ERR_CRC: + netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", + status, rx_desc->data_size); + break; + case MVNETA_RXD_ERR_OVERRUN: + netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", + status, rx_desc->data_size); + break; + case MVNETA_RXD_ERR_LEN: + netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", + status, rx_desc->data_size); + break; + case MVNETA_RXD_ERR_RESOURCE: + netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", + status, rx_desc->data_size); + break; + } +} + +/* Handle RX checksum offload based on the descriptor's status */ +static void mvneta_rx_csum(struct mvneta_port *pp, u32 status, + struct sk_buff *skb) +{ + if ((status & MVNETA_RXD_L3_IP4) && + (status & MVNETA_RXD_L4_CSUM_OK)) { + skb->csum = 0; + skb->ip_summed = CHECKSUM_UNNECESSARY; + return; + } + + skb->ip_summed = CHECKSUM_NONE; +} + +/* Return tx queue pointer (find last set bit) according to returned + * form tx_done reg. must not be null. The return value is always a + * valid queue for matching the first one found in . + */ +static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, + u32 cause) +{ + int queue = fls(cause) - 1; + + return &pp->txqs[queue]; +} + +/* Free tx queue skbuffs */ +static void mvneta_txq_bufs_free(struct mvneta_port *pp, + struct mvneta_tx_queue *txq, int num) +{ + int i; + + for (i = 0; i < num; i++) { + struct mvneta_tx_desc *tx_desc = txq->descs + + txq->txq_get_index; + struct sk_buff *skb = txq->tx_skb[txq->txq_get_index]; + + mvneta_txq_inc_get(txq); + + if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) + dma_unmap_single(pp->dev->dev.parent, + tx_desc->buf_phys_addr, + tx_desc->data_size, DMA_TO_DEVICE); + if (!skb) + continue; + dev_kfree_skb_any(skb); + } +} + +/* Handle end of transmission */ +static void mvneta_txq_done(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ + struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); + int tx_done; + + tx_done = mvneta_txq_sent_desc_proc(pp, txq); + if (!tx_done) + return; + + mvneta_txq_bufs_free(pp, txq, tx_done); + + txq->count -= tx_done; + + if (netif_tx_queue_stopped(nq)) { + if (txq->count <= txq->tx_wake_threshold) + netif_tx_wake_queue(nq); + } +} + +static void *mvneta_frag_alloc(const struct mvneta_port *pp) +{ + if (likely(pp->frag_size <= PAGE_SIZE)) + return netdev_alloc_frag(pp->frag_size); + else + return kmalloc(pp->frag_size, GFP_ATOMIC); +} + +static void mvneta_frag_free(const struct mvneta_port *pp, void *data) +{ + if (likely(pp->frag_size <= PAGE_SIZE)) + put_page(virt_to_head_page(data)); + else + kfree(data); +} + +/* Refill processing */ +static int mvneta_rx_refill(struct mvneta_port *pp, + struct mvneta_rx_desc *rx_desc) + +{ + dma_addr_t phys_addr; + void *data; + + data = mvneta_frag_alloc(pp); + if (!data) + return -ENOMEM; + + phys_addr = dma_map_single(pp->dev->dev.parent, data, + MVNETA_RX_BUF_SIZE(pp->pkt_size), + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) { + mvneta_frag_free(pp, data); + return -ENOMEM; + } + + mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data); + return 0; +} + +/* Handle tx checksum */ +static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + int ip_hdr_len = 0; + __be16 l3_proto = vlan_get_protocol(skb); + u8 l4_proto; + + if (l3_proto == htons(ETH_P_IP)) { + struct iphdr *ip4h = ip_hdr(skb); + + /* Calculate IPv4 checksum and L4 checksum */ + ip_hdr_len = ip4h->ihl; + l4_proto = ip4h->protocol; + } else if (l3_proto == htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = ipv6_hdr(skb); + + /* Read l4_protocol from one of IPv6 extra headers */ + if (skb_network_header_len(skb) > 0) + ip_hdr_len = (skb_network_header_len(skb) >> 2); + l4_proto = ip6h->nexthdr; + } else + return MVNETA_TX_L4_CSUM_NOT; + + return mvneta_txq_desc_csum(skb_network_offset(skb), + l3_proto, ip_hdr_len, l4_proto); + } + + return MVNETA_TX_L4_CSUM_NOT; +} + +/* Returns rx queue pointer (find last set bit) according to causeRxTx + * value + */ +static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp, + u32 cause) +{ + int queue = fls(cause >> 8) - 1; + + return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue]; +} + +/* Drop packets received by the RXQ and free buffers */ +static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ + int rx_done, i; + + rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); + for (i = 0; i < rxq->size; i++) { + struct mvneta_rx_desc *rx_desc = rxq->descs + i; + void *data = (void *)rx_desc->buf_cookie; + + mvneta_frag_free(pp, data); + dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, + MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); + } + + if (rx_done) + mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); +} + +/* Main rx processing */ +static int mvneta_rx(struct mvneta_port *pp, int rx_todo, + struct mvneta_rx_queue *rxq) +{ + struct net_device *dev = pp->dev; + int rx_done, rx_filled; + u32 rcvd_pkts = 0; + u32 rcvd_bytes = 0; + + /* Get number of received packets */ + rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); + + if (rx_todo > rx_done) + rx_todo = rx_done; + + rx_done = 0; + rx_filled = 0; + + /* Fairness NAPI loop */ + while (rx_done < rx_todo) { + struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); + struct sk_buff *skb; + unsigned char *data; + u32 rx_status; + int rx_bytes, err; + + rx_done++; + rx_filled++; + rx_status = rx_desc->status; + rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); + data = (unsigned char *)rx_desc->buf_cookie; + + if (!mvneta_rxq_desc_is_first_last(rx_status) || + (rx_status & MVNETA_RXD_ERR_SUMMARY)) { + err_drop_frame: + dev->stats.rx_errors++; + mvneta_rx_error(pp, rx_desc); + /* leave the descriptor untouched */ + continue; + } + + if (rx_bytes <= rx_copybreak) { + /* better copy a small frame and not unmap the DMA region */ + skb = netdev_alloc_skb_ip_align(dev, rx_bytes); + if (unlikely(!skb)) + goto err_drop_frame; + + dma_sync_single_range_for_cpu(dev->dev.parent, + rx_desc->buf_phys_addr, + MVNETA_MH_SIZE + NET_SKB_PAD, + rx_bytes, + DMA_FROM_DEVICE); + memcpy(skb_put(skb, rx_bytes), + data + MVNETA_MH_SIZE + NET_SKB_PAD, + rx_bytes); + + skb->protocol = eth_type_trans(skb, dev); + mvneta_rx_csum(pp, rx_status, skb); + napi_gro_receive(&pp->napi, skb); + + rcvd_pkts++; + rcvd_bytes += rx_bytes; + + /* leave the descriptor and buffer untouched */ + continue; + } + + skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); + if (!skb) + goto err_drop_frame; + + dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr, + MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); + + rcvd_pkts++; + rcvd_bytes += rx_bytes; + + /* Linux processing */ + skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD); + skb_put(skb, rx_bytes); + + skb->protocol = eth_type_trans(skb, dev); + + mvneta_rx_csum(pp, rx_status, skb); + + napi_gro_receive(&pp->napi, skb); + + /* Refill processing */ + err = mvneta_rx_refill(pp, rx_desc); + if (err) { + netdev_err(dev, "Linux processing - Can't refill\n"); + rxq->missed++; + rx_filled--; + } + } + + if (rcvd_pkts) { + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + + u64_stats_update_begin(&stats->syncp); + stats->rx_packets += rcvd_pkts; + stats->rx_bytes += rcvd_bytes; + u64_stats_update_end(&stats->syncp); + } + + /* Update rxq management counters */ + mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled); + + return rx_done; +} + +static inline void +mvneta_tso_put_hdr(struct sk_buff *skb, + struct mvneta_port *pp, struct mvneta_tx_queue *txq) +{ + struct mvneta_tx_desc *tx_desc; + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + + txq->tx_skb[txq->txq_put_index] = NULL; + tx_desc = mvneta_txq_next_desc_get(txq); + tx_desc->data_size = hdr_len; + tx_desc->command = mvneta_skb_tx_csum(pp, skb); + tx_desc->command |= MVNETA_TXD_F_DESC; + tx_desc->buf_phys_addr = txq->tso_hdrs_phys + + txq->txq_put_index * TSO_HEADER_SIZE; + mvneta_txq_inc_put(txq); +} + +static inline int +mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, + struct sk_buff *skb, char *data, int size, + bool last_tcp, bool is_last) +{ + struct mvneta_tx_desc *tx_desc; + + tx_desc = mvneta_txq_next_desc_get(txq); + tx_desc->data_size = size; + tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data, + size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, + tx_desc->buf_phys_addr))) { + mvneta_txq_desc_put(txq); + return -ENOMEM; + } + + tx_desc->command = 0; + txq->tx_skb[txq->txq_put_index] = NULL; + + if (last_tcp) { + /* last descriptor in the TCP packet */ + tx_desc->command = MVNETA_TXD_L_DESC; + + /* last descriptor in SKB */ + if (is_last) + txq->tx_skb[txq->txq_put_index] = skb; + } + mvneta_txq_inc_put(txq); + return 0; +} + +static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, + struct mvneta_tx_queue *txq) +{ + int total_len, data_left; + int desc_count = 0; + struct mvneta_port *pp = netdev_priv(dev); + struct tso_t tso; + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int i; + + /* Count needed descriptors */ + if ((txq->count + tso_count_descs(skb)) >= txq->size) + return 0; + + if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) { + pr_info("*** Is this even possible???!?!?\n"); + return 0; + } + + /* Initialize the TSO handler, and prepare the first payload */ + tso_start(skb, &tso); + + total_len = skb->len - hdr_len; + while (total_len > 0) { + char *hdr; + + data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); + total_len -= data_left; + desc_count++; + + /* prepare packet headers: MAC + IP + TCP */ + hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE; + tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); + + mvneta_tso_put_hdr(skb, pp, txq); + + while (data_left > 0) { + int size; + desc_count++; + + size = min_t(int, tso.size, data_left); + + if (mvneta_tso_put_data(dev, txq, skb, + tso.data, size, + size == data_left, + total_len == 0)) + goto err_release; + data_left -= size; + + tso_build_data(skb, &tso, size); + } + } + + return desc_count; + +err_release: + /* Release all used data descriptors; header descriptors must not + * be DMA-unmapped. + */ + for (i = desc_count - 1; i >= 0; i--) { + struct mvneta_tx_desc *tx_desc = txq->descs + i; + if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) + dma_unmap_single(pp->dev->dev.parent, + tx_desc->buf_phys_addr, + tx_desc->data_size, + DMA_TO_DEVICE); + mvneta_txq_desc_put(txq); + } + return 0; +} + +/* Handle tx fragmentation processing */ +static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, + struct mvneta_tx_queue *txq) +{ + struct mvneta_tx_desc *tx_desc; + int i, nr_frags = skb_shinfo(skb)->nr_frags; + + for (i = 0; i < nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + void *addr = page_address(frag->page.p) + frag->page_offset; + + tx_desc = mvneta_txq_next_desc_get(txq); + tx_desc->data_size = frag->size; + + tx_desc->buf_phys_addr = + dma_map_single(pp->dev->dev.parent, addr, + tx_desc->data_size, DMA_TO_DEVICE); + + if (dma_mapping_error(pp->dev->dev.parent, + tx_desc->buf_phys_addr)) { + mvneta_txq_desc_put(txq); + goto error; + } + + if (i == nr_frags - 1) { + /* Last descriptor */ + tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; + txq->tx_skb[txq->txq_put_index] = skb; + } else { + /* Descriptor in the middle: Not First, Not Last */ + tx_desc->command = 0; + txq->tx_skb[txq->txq_put_index] = NULL; + } + mvneta_txq_inc_put(txq); + } + + return 0; + +error: + /* Release all descriptors that were used to map fragments of + * this packet, as well as the corresponding DMA mappings + */ + for (i = i - 1; i >= 0; i--) { + tx_desc = txq->descs + i; + dma_unmap_single(pp->dev->dev.parent, + tx_desc->buf_phys_addr, + tx_desc->data_size, + DMA_TO_DEVICE); + mvneta_txq_desc_put(txq); + } + + return -ENOMEM; +} + +/* Main tx processing */ +static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct mvneta_port *pp = netdev_priv(dev); + u16 txq_id = skb_get_queue_mapping(skb); + struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; + struct mvneta_tx_desc *tx_desc; + int len = skb->len; + int frags = 0; + u32 tx_cmd; + + if (!netif_running(dev)) + goto out; + + if (skb_is_gso(skb)) { + frags = mvneta_tx_tso(skb, dev, txq); + goto out; + } + + frags = skb_shinfo(skb)->nr_frags + 1; + + /* Get a descriptor for the first part of the packet */ + tx_desc = mvneta_txq_next_desc_get(txq); + + tx_cmd = mvneta_skb_tx_csum(pp, skb); + + tx_desc->data_size = skb_headlen(skb); + + tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, + tx_desc->data_size, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, + tx_desc->buf_phys_addr))) { + mvneta_txq_desc_put(txq); + frags = 0; + goto out; + } + + if (frags == 1) { + /* First and Last descriptor */ + tx_cmd |= MVNETA_TXD_FLZ_DESC; + tx_desc->command = tx_cmd; + txq->tx_skb[txq->txq_put_index] = skb; + mvneta_txq_inc_put(txq); + } else { + /* First but not Last */ + tx_cmd |= MVNETA_TXD_F_DESC; + txq->tx_skb[txq->txq_put_index] = NULL; + mvneta_txq_inc_put(txq); + tx_desc->command = tx_cmd; + /* Continue with other skb fragments */ + if (mvneta_tx_frag_process(pp, skb, txq)) { + dma_unmap_single(dev->dev.parent, + tx_desc->buf_phys_addr, + tx_desc->data_size, + DMA_TO_DEVICE); + mvneta_txq_desc_put(txq); + frags = 0; + goto out; + } + } + +out: + if (frags > 0) { + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); + + txq->count += frags; + mvneta_txq_pend_desc_add(pp, txq, frags); + + if (txq->count >= txq->tx_stop_threshold) + netif_tx_stop_queue(nq); + + u64_stats_update_begin(&stats->syncp); + stats->tx_packets++; + stats->tx_bytes += len; + u64_stats_update_end(&stats->syncp); + } else { + dev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + } + + return NETDEV_TX_OK; +} + + +/* Free tx resources, when resetting a port */ +static void mvneta_txq_done_force(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) + +{ + int tx_done = txq->count; + + mvneta_txq_bufs_free(pp, txq, tx_done); + + /* reset txq */ + txq->count = 0; + txq->txq_put_index = 0; + txq->txq_get_index = 0; +} + +/* Handle tx done - called in softirq context. The argument + * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL. + */ +static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) +{ + struct mvneta_tx_queue *txq; + struct netdev_queue *nq; + + while (cause_tx_done) { + txq = mvneta_tx_done_policy(pp, cause_tx_done); + + nq = netdev_get_tx_queue(pp->dev, txq->id); + __netif_tx_lock(nq, smp_processor_id()); + + if (txq->count) + mvneta_txq_done(pp, txq); + + __netif_tx_unlock(nq); + cause_tx_done &= ~((1 << txq->id)); + } +} + +/* Compute crc8 of the specified address, using a unique algorithm , + * according to hw spec, different than generic crc8 algorithm + */ +static int mvneta_addr_crc(unsigned char *addr) +{ + int crc = 0; + int i; + + for (i = 0; i < ETH_ALEN; i++) { + int j; + + crc = (crc ^ addr[i]) << 8; + for (j = 7; j >= 0; j--) { + if (crc & (0x100 << j)) + crc ^= 0x107 << j; + } + } + + return crc; +} + +/* This method controls the net device special MAC multicast support. + * The Special Multicast Table for MAC addresses supports MAC of the form + * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). + * The MAC DA[7:0] bits are used as a pointer to the Special Multicast + * Table entries in the DA-Filter table. This method set the Special + * Multicast Table appropriate entry. + */ +static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, + unsigned char last_byte, + int queue) +{ + unsigned int smc_table_reg; + unsigned int tbl_offset; + unsigned int reg_offset; + + /* Register offset from SMC table base */ + tbl_offset = (last_byte / 4); + /* Entry offset within the above reg */ + reg_offset = last_byte % 4; + + smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST + + tbl_offset * 4)); + + if (queue == -1) + smc_table_reg &= ~(0xff << (8 * reg_offset)); + else { + smc_table_reg &= ~(0xff << (8 * reg_offset)); + smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); + } + + mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, + smc_table_reg); +} + +/* This method controls the network device Other MAC multicast support. + * The Other Multicast Table is used for multicast of another type. + * A CRC-8 is used as an index to the Other Multicast Table entries + * in the DA-Filter table. + * The method gets the CRC-8 value from the calling routine and + * sets the Other Multicast Table appropriate entry according to the + * specified CRC-8 . + */ +static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, + unsigned char crc8, + int queue) +{ + unsigned int omc_table_reg; + unsigned int tbl_offset; + unsigned int reg_offset; + + tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */ + reg_offset = crc8 % 4; /* Entry offset within the above reg */ + + omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); + + if (queue == -1) { + /* Clear accepts frame bit at specified Other DA table entry */ + omc_table_reg &= ~(0xff << (8 * reg_offset)); + } else { + omc_table_reg &= ~(0xff << (8 * reg_offset)); + omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); + } + + mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg); +} + +/* The network device supports multicast using two tables: + * 1) Special Multicast Table for MAC addresses of the form + * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). + * The MAC DA[7:0] bits are used as a pointer to the Special Multicast + * Table entries in the DA-Filter table. + * 2) Other Multicast Table for multicast of another type. A CRC-8 value + * is used as an index to the Other Multicast Table entries in the + * DA-Filter table. + */ +static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, + int queue) +{ + unsigned char crc_result = 0; + + if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) { + mvneta_set_special_mcast_addr(pp, p_addr[5], queue); + return 0; + } + + crc_result = mvneta_addr_crc(p_addr); + if (queue == -1) { + if (pp->mcast_count[crc_result] == 0) { + netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", + crc_result); + return -EINVAL; + } + + pp->mcast_count[crc_result]--; + if (pp->mcast_count[crc_result] != 0) { + netdev_info(pp->dev, + "After delete there are %d valid Mcast for crc8=0x%02x\n", + pp->mcast_count[crc_result], crc_result); + return -EINVAL; + } + } else + pp->mcast_count[crc_result]++; + + mvneta_set_other_mcast_addr(pp, crc_result, queue); + + return 0; +} + +/* Configure Fitering mode of Ethernet port */ +static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, + int is_promisc) +{ + u32 port_cfg_reg, val; + + port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); + + val = mvreg_read(pp, MVNETA_TYPE_PRIO); + + /* Set / Clear UPM bit in port configuration register */ + if (is_promisc) { + /* Accept all Unicast addresses */ + port_cfg_reg |= MVNETA_UNI_PROMISC_MODE; + val |= MVNETA_FORCE_UNI; + mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff); + mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff); + } else { + /* Reject all Unicast addresses */ + port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE; + val &= ~MVNETA_FORCE_UNI; + } + + mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg); + mvreg_write(pp, MVNETA_TYPE_PRIO, val); +} + +/* register unicast and multicast addresses */ +static void mvneta_set_rx_mode(struct net_device *dev) +{ + struct mvneta_port *pp = netdev_priv(dev); + struct netdev_hw_addr *ha; + + if (dev->flags & IFF_PROMISC) { + /* Accept all: Multicast + Unicast */ + mvneta_rx_unicast_promisc_set(pp, 1); + mvneta_set_ucast_table(pp, rxq_def); + mvneta_set_special_mcast_table(pp, rxq_def); + mvneta_set_other_mcast_table(pp, rxq_def); + } else { + /* Accept single Unicast */ + mvneta_rx_unicast_promisc_set(pp, 0); + mvneta_set_ucast_table(pp, -1); + mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def); + + if (dev->flags & IFF_ALLMULTI) { + /* Accept all multicast */ + mvneta_set_special_mcast_table(pp, rxq_def); + mvneta_set_other_mcast_table(pp, rxq_def); + } else { + /* Accept only initialized multicast */ + mvneta_set_special_mcast_table(pp, -1); + mvneta_set_other_mcast_table(pp, -1); + + if (!netdev_mc_empty(dev)) { + netdev_for_each_mc_addr(ha, dev) { + mvneta_mcast_addr_set(pp, ha->addr, + rxq_def); + } + } + } + } +} + +/* Interrupt handling - the callback for request_irq() */ +static irqreturn_t mvneta_isr(int irq, void *dev_id) +{ + struct mvneta_port *pp = (struct mvneta_port *)dev_id; + + /* Mask all interrupts */ + mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); + + napi_schedule(&pp->napi); + + return IRQ_HANDLED; +} + +static int mvneta_fixed_link_update(struct mvneta_port *pp, + struct phy_device *phy) +{ + struct fixed_phy_status status; + struct fixed_phy_status changed = {}; + u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); + + status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); + if (gmac_stat & MVNETA_GMAC_SPEED_1000) + status.speed = SPEED_1000; + else if (gmac_stat & MVNETA_GMAC_SPEED_100) + status.speed = SPEED_100; + else + status.speed = SPEED_10; + status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); + changed.link = 1; + changed.speed = 1; + changed.duplex = 1; + fixed_phy_update_state(phy, &status, &changed); + return 0; +} + +/* NAPI handler + * Bits 0 - 7 of the causeRxTx register indicate that are transmitted + * packets on the corresponding TXQ (Bit 0 is for TX queue 1). + * Bits 8 -15 of the cause Rx Tx register indicate that are received + * packets on the corresponding RXQ (Bit 8 is for RX queue 0). + * Each CPU has its own causeRxTx register + */ +static int mvneta_poll(struct napi_struct *napi, int budget) +{ + int rx_done = 0; + u32 cause_rx_tx; + unsigned long flags; + struct mvneta_port *pp = netdev_priv(napi->dev); + + if (!netif_running(pp->dev)) { + napi_complete(napi); + return rx_done; + } + + /* Read cause register */ + cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE); + if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) { + u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); + + mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); + if (pp->use_inband_status && (cause_misc & + (MVNETA_CAUSE_PHY_STATUS_CHANGE | + MVNETA_CAUSE_LINK_CHANGE | + MVNETA_CAUSE_PSC_SYNC_CHANGE))) { + mvneta_fixed_link_update(pp, pp->phy_dev); + } + } + + /* Release Tx descriptors */ + if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) { + mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL)); + cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL; + } + + /* For the case where the last mvneta_poll did not process all + * RX packets + */ + cause_rx_tx |= pp->cause_rx_tx; + if (rxq_number > 1) { + while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) { + int count; + struct mvneta_rx_queue *rxq; + /* get rx queue number from cause_rx_tx */ + rxq = mvneta_rx_policy(pp, cause_rx_tx); + if (!rxq) + break; + + /* process the packet in that rx queue */ + count = mvneta_rx(pp, budget, rxq); + rx_done += count; + budget -= count; + if (budget > 0) { + /* set off the rx bit of the + * corresponding bit in the cause rx + * tx register, so that next iteration + * will find the next rx queue where + * packets are received on + */ + cause_rx_tx &= ~((1 << rxq->id) << 8); + } + } + } else { + rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]); + budget -= rx_done; + } + + if (budget > 0) { + cause_rx_tx = 0; + napi_complete(napi); + local_irq_save(flags); + mvreg_write(pp, MVNETA_INTR_NEW_MASK, + MVNETA_RX_INTR_MASK(rxq_number) | + MVNETA_TX_INTR_MASK(txq_number) | + MVNETA_MISCINTR_INTR_MASK); + local_irq_restore(flags); + } + + pp->cause_rx_tx = cause_rx_tx; + return rx_done; +} + +/* Handle rxq fill: allocates rxq skbs; called when initializing a port */ +static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, + int num) +{ + int i; + + for (i = 0; i < num; i++) { + memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); + if (mvneta_rx_refill(pp, rxq->descs + i) != 0) { + netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n", + __func__, rxq->id, i, num); + break; + } + } + + /* Add this number of RX descriptors as non occupied (ready to + * get packets) + */ + mvneta_rxq_non_occup_desc_add(pp, rxq, i); + + return i; +} + +/* Free all packets pending transmit from all TXQs and reset TX port */ +static void mvneta_tx_reset(struct mvneta_port *pp) +{ + int queue; + + /* free the skb's in the tx ring */ + for (queue = 0; queue < txq_number; queue++) + mvneta_txq_done_force(pp, &pp->txqs[queue]); + + mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); + mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); +} + +static void mvneta_rx_reset(struct mvneta_port *pp) +{ + mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); + mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); +} + +/* Rx/Tx queue initialization/cleanup methods */ + +/* Create a specified RX queue */ +static int mvneta_rxq_init(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) + +{ + rxq->size = pp->rx_ring_size; + + /* Allocate memory for RX descriptors */ + rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, + rxq->size * MVNETA_DESC_ALIGNED_SIZE, + &rxq->descs_phys, GFP_KERNEL); + if (rxq->descs == NULL) + return -ENOMEM; + + BUG_ON(rxq->descs != + PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); + + rxq->last_desc = rxq->size - 1; + + /* Set Rx descriptors queue starting address */ + mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); + mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); + + /* Set Offset */ + mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD); + + /* Set coalescing pkts and time */ + mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); + mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); + + /* Fill RXQ with buffers from RX pool */ + mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size)); + mvneta_rxq_bm_disable(pp, rxq); + mvneta_rxq_fill(pp, rxq, rxq->size); + + return 0; +} + +/* Cleanup Rx queue */ +static void mvneta_rxq_deinit(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ + mvneta_rxq_drop_pkts(pp, rxq); + + if (rxq->descs) + dma_free_coherent(pp->dev->dev.parent, + rxq->size * MVNETA_DESC_ALIGNED_SIZE, + rxq->descs, + rxq->descs_phys); + + rxq->descs = NULL; + rxq->last_desc = 0; + rxq->next_desc_to_proc = 0; + rxq->descs_phys = 0; +} + +/* Create and initialize a tx queue */ +static int mvneta_txq_init(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ + txq->size = pp->tx_ring_size; + + /* A queue must always have room for at least one skb. + * Therefore, stop the queue when the free entries reaches + * the maximum number of descriptors per skb. + */ + txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS; + txq->tx_wake_threshold = txq->tx_stop_threshold / 2; + + + /* Allocate memory for TX descriptors */ + txq->descs = dma_alloc_coherent(pp->dev->dev.parent, + txq->size * MVNETA_DESC_ALIGNED_SIZE, + &txq->descs_phys, GFP_KERNEL); + if (txq->descs == NULL) + return -ENOMEM; + + /* Make sure descriptor address is cache line size aligned */ + BUG_ON(txq->descs != + PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); + + txq->last_desc = txq->size - 1; + + /* Set maximum bandwidth for enabled TXQs */ + mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); + mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); + + /* Set Tx descriptors queue starting address */ + mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); + mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); + + txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL); + if (txq->tx_skb == NULL) { + dma_free_coherent(pp->dev->dev.parent, + txq->size * MVNETA_DESC_ALIGNED_SIZE, + txq->descs, txq->descs_phys); + return -ENOMEM; + } + + /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ + txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent, + txq->size * TSO_HEADER_SIZE, + &txq->tso_hdrs_phys, GFP_KERNEL); + if (txq->tso_hdrs == NULL) { + kfree(txq->tx_skb); + dma_free_coherent(pp->dev->dev.parent, + txq->size * MVNETA_DESC_ALIGNED_SIZE, + txq->descs, txq->descs_phys); + return -ENOMEM; + } + mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); + + return 0; +} + +/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ +static void mvneta_txq_deinit(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) +{ + kfree(txq->tx_skb); + + if (txq->tso_hdrs) + dma_free_coherent(pp->dev->dev.parent, + txq->size * TSO_HEADER_SIZE, + txq->tso_hdrs, txq->tso_hdrs_phys); + if (txq->descs) + dma_free_coherent(pp->dev->dev.parent, + txq->size * MVNETA_DESC_ALIGNED_SIZE, + txq->descs, txq->descs_phys); + + txq->descs = NULL; + txq->last_desc = 0; + txq->next_desc_to_proc = 0; + txq->descs_phys = 0; + + /* Set minimum bandwidth for disabled TXQs */ + mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); + mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); + + /* Set Tx descriptors queue starting address and size */ + mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); + mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); +} + +/* Cleanup all Tx queues */ +static void mvneta_cleanup_txqs(struct mvneta_port *pp) +{ + int queue; + + for (queue = 0; queue < txq_number; queue++) + mvneta_txq_deinit(pp, &pp->txqs[queue]); +} + +/* Cleanup all Rx queues */ +static void mvneta_cleanup_rxqs(struct mvneta_port *pp) +{ + int queue; + + for (queue = 0; queue < rxq_number; queue++) + mvneta_rxq_deinit(pp, &pp->rxqs[queue]); +} + + +/* Init all Rx queues */ +static int mvneta_setup_rxqs(struct mvneta_port *pp) +{ + int queue; + + for (queue = 0; queue < rxq_number; queue++) { + int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); + if (err) { + netdev_err(pp->dev, "%s: can't create rxq=%d\n", + __func__, queue); + mvneta_cleanup_rxqs(pp); + return err; + } + } + + return 0; +} + +/* Init all tx queues */ +static int mvneta_setup_txqs(struct mvneta_port *pp) +{ + int queue; + + for (queue = 0; queue < txq_number; queue++) { + int err = mvneta_txq_init(pp, &pp->txqs[queue]); + if (err) { + netdev_err(pp->dev, "%s: can't create txq=%d\n", + __func__, queue); + mvneta_cleanup_txqs(pp); + return err; + } + } + + return 0; +} + +static void mvneta_start_dev(struct mvneta_port *pp) +{ + mvneta_max_rx_size_set(pp, pp->pkt_size); + mvneta_txq_max_tx_size_set(pp, pp->pkt_size); + + /* start the Rx/Tx activity */ + mvneta_port_enable(pp); + + /* Enable polling on the port */ + napi_enable(&pp->napi); + + /* Unmask interrupts */ + mvreg_write(pp, MVNETA_INTR_NEW_MASK, + MVNETA_RX_INTR_MASK(rxq_number) | + MVNETA_TX_INTR_MASK(txq_number) | + MVNETA_MISCINTR_INTR_MASK); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, + MVNETA_CAUSE_PHY_STATUS_CHANGE | + MVNETA_CAUSE_LINK_CHANGE | + MVNETA_CAUSE_PSC_SYNC_CHANGE); + + phy_start(pp->phy_dev); + netif_tx_start_all_queues(pp->dev); +} + +static void mvneta_stop_dev(struct mvneta_port *pp) +{ + phy_stop(pp->phy_dev); + + napi_disable(&pp->napi); + + netif_carrier_off(pp->dev); + + mvneta_port_down(pp); + netif_tx_stop_all_queues(pp->dev); + + /* Stop the port activity */ + mvneta_port_disable(pp); + + /* Clear all ethernet port interrupts */ + mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); + mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); + + /* Mask all ethernet port interrupts */ + mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); + mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); + + mvneta_tx_reset(pp); + mvneta_rx_reset(pp); +} + +/* Return positive if MTU is valid */ +static int mvneta_check_mtu_valid(struct net_device *dev, int mtu) +{ + if (mtu < 68) { + netdev_err(dev, "cannot change mtu to less than 68\n"); + return -EINVAL; + } + + /* 9676 == 9700 - 20 and rounding to 8 */ + if (mtu > 9676) { + netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu); + mtu = 9676; + } + + if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) { + netdev_info(dev, "Illegal MTU value %d, rounding to %d\n", + mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8)); + mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8); + } + + return mtu; +} + +/* Change the device mtu */ +static int mvneta_change_mtu(struct net_device *dev, int mtu) +{ + struct mvneta_port *pp = netdev_priv(dev); + int ret; + + mtu = mvneta_check_mtu_valid(dev, mtu); + if (mtu < 0) + return -EINVAL; + + dev->mtu = mtu; + + if (!netif_running(dev)) { + netdev_update_features(dev); + return 0; + } + + /* The interface is running, so we have to force a + * reallocation of the queues + */ + mvneta_stop_dev(pp); + + mvneta_cleanup_txqs(pp); + mvneta_cleanup_rxqs(pp); + + pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); + pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + ret = mvneta_setup_rxqs(pp); + if (ret) { + netdev_err(dev, "unable to setup rxqs after MTU change\n"); + return ret; + } + + ret = mvneta_setup_txqs(pp); + if (ret) { + netdev_err(dev, "unable to setup txqs after MTU change\n"); + return ret; + } + + mvneta_start_dev(pp); + mvneta_port_up(pp); + + netdev_update_features(dev); + + return 0; +} + +static netdev_features_t mvneta_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct mvneta_port *pp = netdev_priv(dev); + + if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { + features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); + netdev_info(dev, + "Disable IP checksum for MTU greater than %dB\n", + pp->tx_csum_limit); + } + + return features; +} + +/* Get mac address */ +static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) +{ + u32 mac_addr_l, mac_addr_h; + + mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); + mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); + addr[0] = (mac_addr_h >> 24) & 0xFF; + addr[1] = (mac_addr_h >> 16) & 0xFF; + addr[2] = (mac_addr_h >> 8) & 0xFF; + addr[3] = mac_addr_h & 0xFF; + addr[4] = (mac_addr_l >> 8) & 0xFF; + addr[5] = mac_addr_l & 0xFF; +} + +/* Handle setting mac address */ +static int mvneta_set_mac_addr(struct net_device *dev, void *addr) +{ + struct mvneta_port *pp = netdev_priv(dev); + struct sockaddr *sockaddr = addr; + int ret; + + ret = eth_prepare_mac_addr_change(dev, addr); + if (ret < 0) + return ret; + /* Remove previous address table entry */ + mvneta_mac_addr_set(pp, dev->dev_addr, -1); + + /* Set new addr in hw */ + mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def); + + eth_commit_mac_addr_change(dev, addr); + return 0; +} + +static void mvneta_adjust_link(struct net_device *ndev) +{ + struct mvneta_port *pp = netdev_priv(ndev); + struct phy_device *phydev = pp->phy_dev; + int status_change = 0; + + if (phydev->link) { + if ((pp->speed != phydev->speed) || + (pp->duplex != phydev->duplex)) { + u32 val; + + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | + MVNETA_GMAC_CONFIG_GMII_SPEED | + MVNETA_GMAC_CONFIG_FULL_DUPLEX); + + if (phydev->duplex) + val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; + + if (phydev->speed == SPEED_1000) + val |= MVNETA_GMAC_CONFIG_GMII_SPEED; + else if (phydev->speed == SPEED_100) + val |= MVNETA_GMAC_CONFIG_MII_SPEED; + + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); + + pp->duplex = phydev->duplex; + pp->speed = phydev->speed; + } + } + + if (phydev->link != pp->link) { + if (!phydev->link) { + pp->duplex = -1; + pp->speed = 0; + } + + pp->link = phydev->link; + status_change = 1; + } + + if (status_change) { + if (phydev->link) { + if (!pp->use_inband_status) { + u32 val = mvreg_read(pp, + MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~MVNETA_GMAC_FORCE_LINK_DOWN; + val |= MVNETA_GMAC_FORCE_LINK_PASS; + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, + val); + } + mvneta_port_up(pp); + } else { + if (!pp->use_inband_status) { + u32 val = mvreg_read(pp, + MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~MVNETA_GMAC_FORCE_LINK_PASS; + val |= MVNETA_GMAC_FORCE_LINK_DOWN; + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, + val); + } + mvneta_port_down(pp); + } + phy_print_status(phydev); + } +} + +static int mvneta_mdio_probe(struct mvneta_port *pp) +{ + struct phy_device *phy_dev; + + phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0, + pp->phy_interface); + if (!phy_dev) { + netdev_err(pp->dev, "could not find the PHY\n"); + return -ENODEV; + } + + phy_dev->supported &= PHY_GBIT_FEATURES; + phy_dev->advertising = phy_dev->supported; + + pp->phy_dev = phy_dev; + pp->link = 0; + pp->duplex = 0; + pp->speed = 0; + + return 0; +} + +static void mvneta_mdio_remove(struct mvneta_port *pp) +{ + phy_disconnect(pp->phy_dev); + pp->phy_dev = NULL; +} + +static int mvneta_open(struct net_device *dev) +{ + struct mvneta_port *pp = netdev_priv(dev); + int ret; + + pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); + pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + ret = mvneta_setup_rxqs(pp); + if (ret) + return ret; + + ret = mvneta_setup_txqs(pp); + if (ret) + goto err_cleanup_rxqs; + + /* Connect to port interrupt line */ + ret = request_irq(pp->dev->irq, mvneta_isr, 0, + MVNETA_DRIVER_NAME, pp); + if (ret) { + netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); + goto err_cleanup_txqs; + } + + /* In default link is down */ + netif_carrier_off(pp->dev); + + ret = mvneta_mdio_probe(pp); + if (ret < 0) { + netdev_err(dev, "cannot probe MDIO bus\n"); + goto err_free_irq; + } + + mvneta_start_dev(pp); + + return 0; + +err_free_irq: + free_irq(pp->dev->irq, pp); +err_cleanup_txqs: + mvneta_cleanup_txqs(pp); +err_cleanup_rxqs: + mvneta_cleanup_rxqs(pp); + return ret; +} + +/* Stop the port, free port interrupt line */ +static int mvneta_stop(struct net_device *dev) +{ + struct mvneta_port *pp = netdev_priv(dev); + + mvneta_stop_dev(pp); + mvneta_mdio_remove(pp); + free_irq(dev->irq, pp); + mvneta_cleanup_rxqs(pp); + mvneta_cleanup_txqs(pp); + + return 0; +} + +static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mvneta_port *pp = netdev_priv(dev); + + if (!pp->phy_dev) + return -ENOTSUPP; + + return phy_mii_ioctl(pp->phy_dev, ifr, cmd); +} + +/* Ethtool methods */ + +/* Get settings (phy address, speed) for ethtools */ +int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct mvneta_port *pp = netdev_priv(dev); + + if (!pp->phy_dev) + return -ENODEV; + + return phy_ethtool_gset(pp->phy_dev, cmd); +} + +/* Set settings (phy address, speed) for ethtools */ +int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct mvneta_port *pp = netdev_priv(dev); + + if (!pp->phy_dev) + return -ENODEV; + + return phy_ethtool_sset(pp->phy_dev, cmd); +} + +/* Set interrupt coalescing for ethtools */ +static int mvneta_ethtool_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *c) +{ + struct mvneta_port *pp = netdev_priv(dev); + int queue; + + for (queue = 0; queue < rxq_number; queue++) { + struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; + rxq->time_coal = c->rx_coalesce_usecs; + rxq->pkts_coal = c->rx_max_coalesced_frames; + mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); + mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); + } + + for (queue = 0; queue < txq_number; queue++) { + struct mvneta_tx_queue *txq = &pp->txqs[queue]; + txq->done_pkts_coal = c->tx_max_coalesced_frames; + mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); + } + + return 0; +} + +/* get coalescing for ethtools */ +static int mvneta_ethtool_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *c) +{ + struct mvneta_port *pp = netdev_priv(dev); + + c->rx_coalesce_usecs = pp->rxqs[0].time_coal; + c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; + + c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; + return 0; +} + + +static void mvneta_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, dev_name(&dev->dev), + sizeof(drvinfo->bus_info)); +} + + +static void mvneta_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct mvneta_port *pp = netdev_priv(netdev); + + ring->rx_max_pending = MVNETA_MAX_RXD; + ring->tx_max_pending = MVNETA_MAX_TXD; + ring->rx_pending = pp->rx_ring_size; + ring->tx_pending = pp->tx_ring_size; +} + +static int mvneta_ethtool_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct mvneta_port *pp = netdev_priv(dev); + + if ((ring->rx_pending == 0) || (ring->tx_pending == 0)) + return -EINVAL; + pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? + ring->rx_pending : MVNETA_MAX_RXD; + + pp->tx_ring_size = clamp_t(u16, ring->tx_pending, + MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD); + if (pp->tx_ring_size != ring->tx_pending) + netdev_warn(dev, "TX queue size set to %u (requested %u)\n", + pp->tx_ring_size, ring->tx_pending); + + if (netif_running(dev)) { + mvneta_stop(dev); + if (mvneta_open(dev)) { + netdev_err(dev, + "error on opening device after ring param change\n"); + return -ENOMEM; + } + } + + return 0; +} + +static const struct net_device_ops mvneta_netdev_ops = { + .ndo_open = mvneta_open, + .ndo_stop = mvneta_stop, + .ndo_start_xmit = mvneta_tx, + .ndo_set_rx_mode = mvneta_set_rx_mode, + .ndo_set_mac_address = mvneta_set_mac_addr, + .ndo_change_mtu = mvneta_change_mtu, + .ndo_fix_features = mvneta_fix_features, + .ndo_get_stats64 = mvneta_get_stats64, + .ndo_do_ioctl = mvneta_ioctl, +}; + +const struct ethtool_ops mvneta_eth_tool_ops = { + .get_link = ethtool_op_get_link, + .get_settings = mvneta_ethtool_get_settings, + .set_settings = mvneta_ethtool_set_settings, + .set_coalesce = mvneta_ethtool_set_coalesce, + .get_coalesce = mvneta_ethtool_get_coalesce, + .get_drvinfo = mvneta_ethtool_get_drvinfo, + .get_ringparam = mvneta_ethtool_get_ringparam, + .set_ringparam = mvneta_ethtool_set_ringparam, +}; + +/* Initialize hw */ +static int mvneta_init(struct device *dev, struct mvneta_port *pp) +{ + int queue; + + /* Disable port */ + mvneta_port_disable(pp); + + /* Set port default values */ + mvneta_defaults_set(pp); + + pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue), + GFP_KERNEL); + if (!pp->txqs) + return -ENOMEM; + + /* Initialize TX descriptor rings */ + for (queue = 0; queue < txq_number; queue++) { + struct mvneta_tx_queue *txq = &pp->txqs[queue]; + txq->id = queue; + txq->size = pp->tx_ring_size; + txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; + } + + pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue), + GFP_KERNEL); + if (!pp->rxqs) + return -ENOMEM; + + /* Create Rx descriptor rings */ + for (queue = 0; queue < rxq_number; queue++) { + struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; + rxq->id = queue; + rxq->size = pp->rx_ring_size; + rxq->pkts_coal = MVNETA_RX_COAL_PKTS; + rxq->time_coal = MVNETA_RX_COAL_USEC; + } + + return 0; +} + +/* platform glue : initialize decoding windows */ +static void mvneta_conf_mbus_windows(struct mvneta_port *pp, + const struct mbus_dram_target_info *dram) +{ + u32 win_enable; + u32 win_protect; + int i; + + for (i = 0; i < 6; i++) { + mvreg_write(pp, MVNETA_WIN_BASE(i), 0); + mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); + + if (i < 4) + mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); + } + + win_enable = 0x3f; + win_protect = 0; + + for (i = 0; i < dram->num_cs; i++) { + const struct mbus_dram_window *cs = dram->cs + i; + mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) | + (cs->mbus_attr << 8) | dram->mbus_dram_target_id); + + mvreg_write(pp, MVNETA_WIN_SIZE(i), + (cs->size - 1) & 0xffff0000); + + win_enable &= ~(1 << i); + win_protect |= 3 << (2 * i); + } + + mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); +} + +/* Power up the port */ +static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) +{ + u32 ctrl; + + /* MAC Cause register should be cleared */ + mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); + + ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2); + + /* Even though it might look weird, when we're configured in + * SGMII or QSGMII mode, the RGMII bit needs to be set. + */ + switch(phy_mode) { + case PHY_INTERFACE_MODE_QSGMII: + mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); + ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; + break; + case PHY_INTERFACE_MODE_SGMII: + mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); + ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + ctrl |= MVNETA_GMAC2_PORT_RGMII; + break; + default: + return -EINVAL; + } + + if (pp->use_inband_status) + ctrl |= MVNETA_GMAC2_INBAND_AN_ENABLE; + + /* Cancel Port Reset */ + ctrl &= ~MVNETA_GMAC2_PORT_RESET; + mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); + + while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & + MVNETA_GMAC2_PORT_RESET) != 0) + continue; + + return 0; +} + +/* Device initialization routine */ +static int mvneta_probe(struct platform_device *pdev) +{ + const struct mbus_dram_target_info *dram_target_info; + struct resource *res; + struct device_node *dn = pdev->dev.of_node; + struct device_node *phy_node; + struct mvneta_port *pp; + struct net_device *dev; + const char *dt_mac_addr; + char hw_mac_addr[ETH_ALEN]; + const char *mac_from; + int phy_mode; + int fixed_phy = 0; + int err; + + /* Our multiqueue support is not complete, so for now, only + * allow the usage of the first RX queue + */ + if (rxq_def != 0) { + dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def); + return -EINVAL; + } + + dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number); + if (!dev) + return -ENOMEM; + + dev->irq = irq_of_parse_and_map(dn, 0); + if (dev->irq == 0) { + err = -EINVAL; + goto err_free_netdev; + } + + phy_node = of_parse_phandle(dn, "phy", 0); + if (!phy_node) { + if (!of_phy_is_fixed_link(dn)) { + dev_err(&pdev->dev, "no PHY specified\n"); + err = -ENODEV; + goto err_free_irq; + } + + err = of_phy_register_fixed_link(dn); + if (err < 0) { + dev_err(&pdev->dev, "cannot register fixed PHY\n"); + goto err_free_irq; + } + fixed_phy = 1; + + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + phy_node = of_node_get(dn); + } + + phy_mode = of_get_phy_mode(dn); + if (phy_mode < 0) { + dev_err(&pdev->dev, "incorrect phy-mode\n"); + err = -EINVAL; + goto err_put_phy_node; + } + + dev->tx_queue_len = MVNETA_MAX_TXD; + dev->watchdog_timeo = 5 * HZ; + dev->netdev_ops = &mvneta_netdev_ops; + + dev->ethtool_ops = &mvneta_eth_tool_ops; + + pp = netdev_priv(dev); + pp->phy_node = phy_node; + pp->phy_interface = phy_mode; + pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) && + fixed_phy; + + pp->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(pp->clk)) { + err = PTR_ERR(pp->clk); + goto err_put_phy_node; + } + + clk_prepare_enable(pp->clk); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pp->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pp->base)) { + err = PTR_ERR(pp->base); + goto err_clk; + } + + /* Alloc per-cpu stats */ + pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); + if (!pp->stats) { + err = -ENOMEM; + goto err_clk; + } + + dt_mac_addr = of_get_mac_address(dn); + if (dt_mac_addr) { + mac_from = "device tree"; + memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN); + } else { + mvneta_get_mac_addr(pp, hw_mac_addr); + if (is_valid_ether_addr(hw_mac_addr)) { + mac_from = "hardware"; + memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN); + } else { + mac_from = "random"; + eth_hw_addr_random(dev); + } + } + + if (of_device_is_compatible(dn, "marvell,armada-370-neta")) + pp->tx_csum_limit = 1600; + + pp->tx_ring_size = MVNETA_MAX_TXD; + pp->rx_ring_size = MVNETA_MAX_RXD; + + pp->dev = dev; + SET_NETDEV_DEV(dev, &pdev->dev); + + err = mvneta_init(&pdev->dev, pp); + if (err < 0) + goto err_free_stats; + + err = mvneta_port_power_up(pp, phy_mode); + if (err < 0) { + dev_err(&pdev->dev, "can't power up port\n"); + goto err_free_stats; + } + + dram_target_info = mv_mbus_dram_info(); + if (dram_target_info) + mvneta_conf_mbus_windows(pp, dram_target_info); + + netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT); + + dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; + dev->hw_features |= dev->features; + dev->vlan_features |= dev->features; + dev->priv_flags |= IFF_UNICAST_FLT; + dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; + + err = register_netdev(dev); + if (err < 0) { + dev_err(&pdev->dev, "failed to register\n"); + goto err_free_stats; + } + + netdev_info(dev, "Using %s mac address %pM\n", mac_from, + dev->dev_addr); + + platform_set_drvdata(pdev, pp->dev); + + if (pp->use_inband_status) { + struct phy_device *phy = of_phy_find_device(dn); + + mvneta_fixed_link_update(pp, phy); + } + + return 0; + +err_free_stats: + free_percpu(pp->stats); +err_clk: + clk_disable_unprepare(pp->clk); +err_put_phy_node: + of_node_put(phy_node); +err_free_irq: + irq_dispose_mapping(dev->irq); +err_free_netdev: + free_netdev(dev); + return err; +} + +/* Device removal routine */ +static int mvneta_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct mvneta_port *pp = netdev_priv(dev); + + unregister_netdev(dev); + clk_disable_unprepare(pp->clk); + free_percpu(pp->stats); + irq_dispose_mapping(dev->irq); + of_node_put(pp->phy_node); + free_netdev(dev); + + return 0; +} + +static const struct of_device_id mvneta_match[] = { + { .compatible = "marvell,armada-370-neta" }, + { .compatible = "marvell,armada-xp-neta" }, + { } +}; +MODULE_DEVICE_TABLE(of, mvneta_match); + +static struct platform_driver mvneta_driver = { + .probe = mvneta_probe, + .remove = mvneta_remove, + .driver = { + .name = MVNETA_DRIVER_NAME, + .of_match_table = mvneta_match, + }, +}; + +module_platform_driver(mvneta_driver); + +MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com"); +MODULE_AUTHOR("Rami Rosen , Thomas Petazzoni "); +MODULE_LICENSE("GPL"); + +module_param(rxq_number, int, S_IRUGO); +module_param(txq_number, int, S_IRUGO); + +module_param(rxq_def, int, S_IRUGO); +module_param(rx_copybreak, int, S_IRUGO | S_IWUSR); diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c new file mode 100644 index 000000000..3e8b1bfb1 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -0,0 +1,6435 @@ +/* + * Driver for Marvell PPv2 network controller for Armada 375 SoC. + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* RX Fifo Registers */ +#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) +#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) +#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 +#define MVPP2_RX_FIFO_INIT_REG 0x64 + +/* RX DMA Top Registers */ +#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) +#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16) +#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31) +#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) +#define MVPP2_POOL_BUF_SIZE_OFFSET 5 +#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq)) +#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff +#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) +#define MVPP2_RXQ_POOL_SHORT_OFFS 20 +#define MVPP2_RXQ_POOL_SHORT_MASK 0x700000 +#define MVPP2_RXQ_POOL_LONG_OFFS 24 +#define MVPP2_RXQ_POOL_LONG_MASK 0x7000000 +#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 +#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 +#define MVPP2_RXQ_DISABLE_MASK BIT(31) + +/* Parser Registers */ +#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 +#define MVPP2_PRS_PORT_LU_MAX 0xf +#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4)) +#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4)) +#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4)) +#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8)) +#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8)) +#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4)) +#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8)) +#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8)) +#define MVPP2_PRS_TCAM_IDX_REG 0x1100 +#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4) +#define MVPP2_PRS_TCAM_INV_MASK BIT(31) +#define MVPP2_PRS_SRAM_IDX_REG 0x1200 +#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) +#define MVPP2_PRS_TCAM_CTRL_REG 0x1230 +#define MVPP2_PRS_TCAM_EN_MASK BIT(0) + +/* Classifier Registers */ +#define MVPP2_CLS_MODE_REG 0x1800 +#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) +#define MVPP2_CLS_PORT_WAY_REG 0x1810 +#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port)) +#define MVPP2_CLS_LKP_INDEX_REG 0x1814 +#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 +#define MVPP2_CLS_LKP_TBL_REG 0x1818 +#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff +#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) +#define MVPP2_CLS_FLOW_INDEX_REG 0x1820 +#define MVPP2_CLS_FLOW_TBL0_REG 0x1824 +#define MVPP2_CLS_FLOW_TBL1_REG 0x1828 +#define MVPP2_CLS_FLOW_TBL2_REG 0x182c +#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) +#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 +#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 +#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4)) +#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 +#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) + +/* Descriptor Manager Top Registers */ +#define MVPP2_RXQ_NUM_REG 0x2040 +#define MVPP2_RXQ_DESC_ADDR_REG 0x2044 +#define MVPP2_RXQ_DESC_SIZE_REG 0x2048 +#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) +#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0 +#define MVPP2_RXQ_NUM_NEW_OFFSET 16 +#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq)) +#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff +#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16 +#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000 +#define MVPP2_RXQ_THRESH_REG 0x204c +#define MVPP2_OCCUPIED_THRESH_OFFSET 0 +#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff +#define MVPP2_RXQ_INDEX_REG 0x2050 +#define MVPP2_TXQ_NUM_REG 0x2080 +#define MVPP2_TXQ_DESC_ADDR_REG 0x2084 +#define MVPP2_TXQ_DESC_SIZE_REG 0x2088 +#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 +#define MVPP2_TXQ_THRESH_REG 0x2094 +#define MVPP2_TRANSMITTED_THRESH_OFFSET 16 +#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000 +#define MVPP2_TXQ_INDEX_REG 0x2098 +#define MVPP2_TXQ_PREF_BUF_REG 0x209c +#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) +#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13)) +#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14)) +#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17) +#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31) +#define MVPP2_TXQ_PENDING_REG 0x20a0 +#define MVPP2_TXQ_PENDING_MASK 0x3fff +#define MVPP2_TXQ_INT_STATUS_REG 0x20a4 +#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) +#define MVPP2_TRANSMITTED_COUNT_OFFSET 16 +#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000 +#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0 +#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16 +#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4 +#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff +#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 +#define MVPP2_TXQ_RSVD_CLR_OFFSET 16 +#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) +#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) +#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) +#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff +#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu)) + +/* MBUS bridge registers */ +#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2)) +#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2)) +#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) +#define MVPP2_BASE_ADDR_ENABLE 0x4060 + +/* Interrupt Cause and Mask registers */ +#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) +#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) +#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) +#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) +#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) +#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) +#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff +#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 +#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) +#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) +#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) +#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29) +#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30) +#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31) +#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port)) +#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc +#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff +#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000 +#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31) +#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0 + +/* Buffer Manager registers */ +#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) +#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80 +#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) +#define MVPP2_BM_POOL_SIZE_MASK 0xfff0 +#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) +#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0 +#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) +#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0 +#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) +#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) +#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff +#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) +#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) +#define MVPP2_BM_START_MASK BIT(0) +#define MVPP2_BM_STOP_MASK BIT(1) +#define MVPP2_BM_STATE_MASK BIT(4) +#define MVPP2_BM_LOW_THRESH_OFFS 8 +#define MVPP2_BM_LOW_THRESH_MASK 0x7f00 +#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \ + MVPP2_BM_LOW_THRESH_OFFS) +#define MVPP2_BM_HIGH_THRESH_OFFS 16 +#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000 +#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \ + MVPP2_BM_HIGH_THRESH_OFFS) +#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) +#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0) +#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1) +#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2) +#define MVPP2_BM_BPPE_FULL_MASK BIT(3) +#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4) +#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) +#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) +#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) +#define MVPP2_BM_VIRT_ALLOC_REG 0x6440 +#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) +#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) +#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) +#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) +#define MVPP2_BM_VIRT_RLS_REG 0x64c0 +#define MVPP2_BM_MC_RLS_REG 0x64c4 +#define MVPP2_BM_MC_ID_MASK 0xfff +#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12) + +/* TX Scheduler registers */ +#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 +#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 +#define MVPP2_TXP_SCHED_ENQ_MASK 0xff +#define MVPP2_TXP_SCHED_DISQ_OFFSET 8 +#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 +#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 +#define MVPP2_TXP_SCHED_MTU_REG 0x801c +#define MVPP2_TXP_MTU_MAX 0x7FFFF +#define MVPP2_TXP_SCHED_REFILL_REG 0x8020 +#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff +#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000 +#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20) +#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024 +#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff +#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) +#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff +#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000 +#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20) +#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) +#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff +#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) +#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff + +/* TX general registers */ +#define MVPP2_TX_SNOOP_REG 0x8800 +#define MVPP2_TX_PORT_FLUSH_REG 0x8810 +#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port)) + +/* LMS registers */ +#define MVPP2_SRC_ADDR_MIDDLE 0x24 +#define MVPP2_SRC_ADDR_HIGH 0x28 +#define MVPP2_PHY_AN_CFG0_REG 0x34 +#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) +#define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \ + 0x400 + (port) * 0x400) +#define MVPP2_MIB_LATE_COLLISION 0x7c +#define MVPP2_ISR_SUM_MASK_REG 0x220c +#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c +#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 + +/* Per-port registers */ +#define MVPP2_GMAC_CTRL_0_REG 0x0 +#define MVPP2_GMAC_PORT_EN_MASK BIT(0) +#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 +#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc +#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) +#define MVPP2_GMAC_CTRL_1_REG 0x4 +#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) +#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) +#define MVPP2_GMAC_PCS_LB_EN_BIT 6 +#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) +#define MVPP2_GMAC_SA_LOW_OFFS 7 +#define MVPP2_GMAC_CTRL_2_REG 0x8 +#define MVPP2_GMAC_INBAND_AN_MASK BIT(0) +#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) +#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) +#define MVPP2_GMAC_PORT_RESET_MASK BIT(6) +#define MVPP2_GMAC_AUTONEG_CONFIG 0xc +#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) +#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) +#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) +#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) +#define MVPP2_GMAC_AN_SPEED_EN BIT(7) +#define MVPP2_GMAC_FC_ADV_EN BIT(9) +#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) +#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) +#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c +#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 +#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 +#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ + MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) + +#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff + +/* Descriptor ring Macros */ +#define MVPP2_QUEUE_NEXT_DESC(q, index) \ + (((index) < (q)->last_desc) ? ((index) + 1) : 0) + +/* Various constants */ + +/* Coalescing */ +#define MVPP2_TXDONE_COAL_PKTS_THRESH 15 +#define MVPP2_RX_COAL_PKTS 32 +#define MVPP2_RX_COAL_USEC 100 + +/* The two bytes Marvell header. Either contains a special value used + * by Marvell switches when a specific hardware mode is enabled (not + * supported by this driver) or is filled automatically by zeroes on + * the RX side. Those two bytes being at the front of the Ethernet + * header, they allow to have the IP header aligned on a 4 bytes + * boundary automatically: the hardware skips those two bytes on its + * own. + */ +#define MVPP2_MH_SIZE 2 +#define MVPP2_ETH_TYPE_LEN 2 +#define MVPP2_PPPOE_HDR_SIZE 8 +#define MVPP2_VLAN_TAG_LEN 4 + +/* Lbtd 802.3 type */ +#define MVPP2_IP_LBDT_TYPE 0xfffa + +#define MVPP2_CPU_D_CACHE_LINE_SIZE 32 +#define MVPP2_TX_CSUM_MAX_SIZE 9800 + +/* Timeout constants */ +#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000 +#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000 + +#define MVPP2_TX_MTU_MAX 0x7ffff + +/* Maximum number of T-CONTs of PON port */ +#define MVPP2_MAX_TCONT 16 + +/* Maximum number of supported ports */ +#define MVPP2_MAX_PORTS 4 + +/* Maximum number of TXQs used by single port */ +#define MVPP2_MAX_TXQ 8 + +/* Maximum number of RXQs used by single port */ +#define MVPP2_MAX_RXQ 8 + +/* Dfault number of RXQs in use */ +#define MVPP2_DEFAULT_RXQ 4 + +/* Total number of RXQs available to all ports */ +#define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ) + +/* Max number of Rx descriptors */ +#define MVPP2_MAX_RXD 128 + +/* Max number of Tx descriptors */ +#define MVPP2_MAX_TXD 1024 + +/* Amount of Tx descriptors that can be reserved at once by CPU */ +#define MVPP2_CPU_DESC_CHUNK 64 + +/* Max number of Tx descriptors in each aggregated queue */ +#define MVPP2_AGGR_TXQ_SIZE 256 + +/* Descriptor aligned size */ +#define MVPP2_DESC_ALIGNED_SIZE 32 + +/* Descriptor alignment mask */ +#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) + +/* RX FIFO constants */ +#define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000 +#define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80 +#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 + +/* RX buffer constants */ +#define MVPP2_SKB_SHINFO_SIZE \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + +#define MVPP2_RX_PKT_SIZE(mtu) \ + ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ + ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE) + +#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) +#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) +#define MVPP2_RX_MAX_PKT_SIZE(total_size) \ + ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) + +#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) + +/* IPv6 max L3 address size */ +#define MVPP2_MAX_L3_ADDR_SIZE 16 + +/* Port flags */ +#define MVPP2_F_LOOPBACK BIT(0) + +/* Marvell tag types */ +enum mvpp2_tag_type { + MVPP2_TAG_TYPE_NONE = 0, + MVPP2_TAG_TYPE_MH = 1, + MVPP2_TAG_TYPE_DSA = 2, + MVPP2_TAG_TYPE_EDSA = 3, + MVPP2_TAG_TYPE_VLAN = 4, + MVPP2_TAG_TYPE_LAST = 5 +}; + +/* Parser constants */ +#define MVPP2_PRS_TCAM_SRAM_SIZE 256 +#define MVPP2_PRS_TCAM_WORDS 6 +#define MVPP2_PRS_SRAM_WORDS 4 +#define MVPP2_PRS_FLOW_ID_SIZE 64 +#define MVPP2_PRS_FLOW_ID_MASK 0x3f +#define MVPP2_PRS_TCAM_ENTRY_INVALID 1 +#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5) +#define MVPP2_PRS_IPV4_HEAD 0x40 +#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0 +#define MVPP2_PRS_IPV4_MC 0xe0 +#define MVPP2_PRS_IPV4_MC_MASK 0xf0 +#define MVPP2_PRS_IPV4_BC_MASK 0xff +#define MVPP2_PRS_IPV4_IHL 0x5 +#define MVPP2_PRS_IPV4_IHL_MASK 0xf +#define MVPP2_PRS_IPV6_MC 0xff +#define MVPP2_PRS_IPV6_MC_MASK 0xff +#define MVPP2_PRS_IPV6_HOP_MASK 0xff +#define MVPP2_PRS_TCAM_PROTO_MASK 0xff +#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f +#define MVPP2_PRS_DBL_VLANS_MAX 100 + +/* Tcam structure: + * - lookup ID - 4 bits + * - port ID - 1 byte + * - additional information - 1 byte + * - header data - 8 bytes + * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). + */ +#define MVPP2_PRS_AI_BITS 8 +#define MVPP2_PRS_PORT_MASK 0xff +#define MVPP2_PRS_LU_MASK 0xf +#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ + (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) +#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ + (((offs) * 2) - ((offs) % 2) + 2) +#define MVPP2_PRS_TCAM_AI_BYTE 16 +#define MVPP2_PRS_TCAM_PORT_BYTE 17 +#define MVPP2_PRS_TCAM_LU_BYTE 20 +#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) +#define MVPP2_PRS_TCAM_INV_WORD 5 +/* Tcam entries ID */ +#define MVPP2_PE_DROP_ALL 0 +#define MVPP2_PE_FIRST_FREE_TID 1 +#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31) +#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) +#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29) +#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) +#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27) +#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26) +#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19) +#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) +#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) +#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) +#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) +#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) +#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13) +#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12) +#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11) +#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10) +#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9) +#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8) +#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7) +#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6) +#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5) +#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4) +#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3) +#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) +#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) + +/* Sram structure + * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). + */ +#define MVPP2_PRS_SRAM_RI_OFFS 0 +#define MVPP2_PRS_SRAM_RI_WORD 0 +#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32 +#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1 +#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 +#define MVPP2_PRS_SRAM_SHIFT_OFFS 64 +#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 +#define MVPP2_PRS_SRAM_UDF_OFFS 73 +#define MVPP2_PRS_SRAM_UDF_BITS 8 +#define MVPP2_PRS_SRAM_UDF_MASK 0xff +#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81 +#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82 +#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7 +#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1 +#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3 +#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89 +#define MVPP2_PRS_SRAM_AI_OFFS 90 +#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98 +#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8 +#define MVPP2_PRS_SRAM_AI_MASK 0xff +#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106 +#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf +#define MVPP2_PRS_SRAM_LU_DONE_BIT 110 +#define MVPP2_PRS_SRAM_LU_GEN_BIT 111 + +/* Sram result info bits assignment */ +#define MVPP2_PRS_RI_MAC_ME_MASK 0x1 +#define MVPP2_PRS_RI_DSA_MASK 0x2 +#define MVPP2_PRS_RI_VLAN_MASK 0xc +#define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3)) +#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) +#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) +#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) +#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 +#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) +#define MVPP2_PRS_RI_L2_CAST_MASK 0x600 +#define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10)) +#define MVPP2_PRS_RI_L2_MCAST BIT(9) +#define MVPP2_PRS_RI_L2_BCAST BIT(10) +#define MVPP2_PRS_RI_PPPOE_MASK 0x800 +#define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000 +#define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14)) +#define MVPP2_PRS_RI_L3_IP4 BIT(12) +#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) +#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) +#define MVPP2_PRS_RI_L3_IP6 BIT(14) +#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) +#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) +#define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000 +#define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16)) +#define MVPP2_PRS_RI_L3_MCAST BIT(15) +#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) +#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 +#define MVPP2_PRS_RI_UDF3_MASK 0x300000 +#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) +#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 +#define MVPP2_PRS_RI_L4_TCP BIT(22) +#define MVPP2_PRS_RI_L4_UDP BIT(23) +#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23)) +#define MVPP2_PRS_RI_UDF7_MASK 0x60000000 +#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) +#define MVPP2_PRS_RI_DROP_MASK 0x80000000 + +/* Sram additional info bits assignment */ +#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) +#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) +#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1) +#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2) +#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3) +#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) +#define MVPP2_PRS_SINGLE_VLAN_AI 0 +#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) + +/* DSA/EDSA type */ +#define MVPP2_PRS_TAGGED true +#define MVPP2_PRS_UNTAGGED false +#define MVPP2_PRS_EDSA true +#define MVPP2_PRS_DSA false + +/* MAC entries, shadow udf */ +enum mvpp2_prs_udf { + MVPP2_PRS_UDF_MAC_DEF, + MVPP2_PRS_UDF_MAC_RANGE, + MVPP2_PRS_UDF_L2_DEF, + MVPP2_PRS_UDF_L2_DEF_COPY, + MVPP2_PRS_UDF_L2_USER, +}; + +/* Lookup ID */ +enum mvpp2_prs_lookup { + MVPP2_PRS_LU_MH, + MVPP2_PRS_LU_MAC, + MVPP2_PRS_LU_DSA, + MVPP2_PRS_LU_VLAN, + MVPP2_PRS_LU_L2, + MVPP2_PRS_LU_PPPOE, + MVPP2_PRS_LU_IP4, + MVPP2_PRS_LU_IP6, + MVPP2_PRS_LU_FLOWS, + MVPP2_PRS_LU_LAST, +}; + +/* L3 cast enum */ +enum mvpp2_prs_l3_cast { + MVPP2_PRS_L3_UNI_CAST, + MVPP2_PRS_L3_MULTI_CAST, + MVPP2_PRS_L3_BROAD_CAST +}; + +/* Classifier constants */ +#define MVPP2_CLS_FLOWS_TBL_SIZE 512 +#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 +#define MVPP2_CLS_LKP_TBL_SIZE 64 + +/* BM constants */ +#define MVPP2_BM_POOLS_NUM 8 +#define MVPP2_BM_LONG_BUF_NUM 1024 +#define MVPP2_BM_SHORT_BUF_NUM 2048 +#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) +#define MVPP2_BM_POOL_PTR_ALIGN 128 +#define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port) +#define MVPP2_BM_SWF_SHORT_POOL 3 + +/* BM cookie (32 bits) definition */ +#define MVPP2_BM_COOKIE_POOL_OFFS 8 +#define MVPP2_BM_COOKIE_CPU_OFFS 24 + +/* BM short pool packet size + * These value assure that for SWF the total number + * of bytes allocated for each buffer will be 512 + */ +#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512) + +enum mvpp2_bm_type { + MVPP2_BM_FREE, + MVPP2_BM_SWF_LONG, + MVPP2_BM_SWF_SHORT +}; + +/* Definitions */ + +/* Shared Packet Processor resources */ +struct mvpp2 { + /* Shared registers' base addresses */ + void __iomem *base; + void __iomem *lms_base; + + /* Common clocks */ + struct clk *pp_clk; + struct clk *gop_clk; + + /* List of pointers to port structures */ + struct mvpp2_port **port_list; + + /* Aggregated TXQs */ + struct mvpp2_tx_queue *aggr_txqs; + + /* BM pools */ + struct mvpp2_bm_pool *bm_pools; + + /* PRS shadow table */ + struct mvpp2_prs_shadow *prs_shadow; + /* PRS auxiliary table for double vlan entries control */ + bool *prs_double_vlans; + + /* Tclk value */ + u32 tclk; +}; + +struct mvpp2_pcpu_stats { + struct u64_stats_sync syncp; + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; +}; + +struct mvpp2_port { + u8 id; + + int irq; + + struct mvpp2 *priv; + + /* Per-port registers' base address */ + void __iomem *base; + + struct mvpp2_rx_queue **rxqs; + struct mvpp2_tx_queue **txqs; + struct net_device *dev; + + int pkt_size; + + u32 pending_cause_rx; + struct napi_struct napi; + + /* Flags */ + unsigned long flags; + + u16 tx_ring_size; + u16 rx_ring_size; + struct mvpp2_pcpu_stats __percpu *stats; + + struct phy_device *phy_dev; + phy_interface_t phy_interface; + struct device_node *phy_node; + unsigned int link; + unsigned int duplex; + unsigned int speed; + + struct mvpp2_bm_pool *pool_long; + struct mvpp2_bm_pool *pool_short; + + /* Index of first port's physical RXQ */ + u8 first_rxq; +}; + +/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the + * layout of the transmit and reception DMA descriptors, and their + * layout is therefore defined by the hardware design + */ + +#define MVPP2_TXD_L3_OFF_SHIFT 0 +#define MVPP2_TXD_IP_HLEN_SHIFT 8 +#define MVPP2_TXD_L4_CSUM_FRAG BIT(13) +#define MVPP2_TXD_L4_CSUM_NOT BIT(14) +#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15) +#define MVPP2_TXD_PADDING_DISABLE BIT(23) +#define MVPP2_TXD_L4_UDP BIT(24) +#define MVPP2_TXD_L3_IP6 BIT(26) +#define MVPP2_TXD_L_DESC BIT(28) +#define MVPP2_TXD_F_DESC BIT(29) + +#define MVPP2_RXD_ERR_SUMMARY BIT(15) +#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14)) +#define MVPP2_RXD_ERR_CRC 0x0 +#define MVPP2_RXD_ERR_OVERRUN BIT(13) +#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14)) +#define MVPP2_RXD_BM_POOL_ID_OFFS 16 +#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18)) +#define MVPP2_RXD_HWF_SYNC BIT(21) +#define MVPP2_RXD_L4_CSUM_OK BIT(22) +#define MVPP2_RXD_IP4_HEADER_ERR BIT(24) +#define MVPP2_RXD_L4_TCP BIT(25) +#define MVPP2_RXD_L4_UDP BIT(26) +#define MVPP2_RXD_L3_IP4 BIT(28) +#define MVPP2_RXD_L3_IP6 BIT(30) +#define MVPP2_RXD_BUF_HDR BIT(31) + +struct mvpp2_tx_desc { + u32 command; /* Options used by HW for packet transmitting.*/ + u8 packet_offset; /* the offset from the buffer beginning */ + u8 phys_txq; /* destination queue ID */ + u16 data_size; /* data size of transmitted packet in bytes */ + u32 buf_phys_addr; /* physical addr of transmitted buffer */ + u32 buf_cookie; /* cookie for access to TX buffer in tx path */ + u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ + u32 reserved2; /* reserved (for future use) */ +}; + +struct mvpp2_rx_desc { + u32 status; /* info about received packet */ + u16 reserved1; /* parser_info (for future use, PnC) */ + u16 data_size; /* size of received packet in bytes */ + u32 buf_phys_addr; /* physical address of the buffer */ + u32 buf_cookie; /* cookie for access to RX buffer in rx path */ + u16 reserved2; /* gem_port_id (for future use, PON) */ + u16 reserved3; /* csum_l4 (for future use, PnC) */ + u8 reserved4; /* bm_qset (for future use, BM) */ + u8 reserved5; + u16 reserved6; /* classify_info (for future use, PnC) */ + u32 reserved7; /* flow_id (for future use, PnC) */ + u32 reserved8; +}; + +/* Per-CPU Tx queue control */ +struct mvpp2_txq_pcpu { + int cpu; + + /* Number of Tx DMA descriptors in the descriptor ring */ + int size; + + /* Number of currently used Tx DMA descriptor in the + * descriptor ring + */ + int count; + + /* Number of Tx DMA descriptors reserved for each CPU */ + int reserved_num; + + /* Array of transmitted skb */ + struct sk_buff **tx_skb; + + /* Index of last TX DMA descriptor that was inserted */ + int txq_put_index; + + /* Index of the TX DMA descriptor to be cleaned up */ + int txq_get_index; +}; + +struct mvpp2_tx_queue { + /* Physical number of this Tx queue */ + u8 id; + + /* Logical number of this Tx queue */ + u8 log_id; + + /* Number of Tx DMA descriptors in the descriptor ring */ + int size; + + /* Number of currently used Tx DMA descriptor in the descriptor ring */ + int count; + + /* Per-CPU control of physical Tx queues */ + struct mvpp2_txq_pcpu __percpu *pcpu; + + /* Array of transmitted skb */ + struct sk_buff **tx_skb; + + u32 done_pkts_coal; + + /* Virtual address of thex Tx DMA descriptors array */ + struct mvpp2_tx_desc *descs; + + /* DMA address of the Tx DMA descriptors array */ + dma_addr_t descs_phys; + + /* Index of the last Tx DMA descriptor */ + int last_desc; + + /* Index of the next Tx DMA descriptor to process */ + int next_desc_to_proc; +}; + +struct mvpp2_rx_queue { + /* RX queue number, in the range 0-31 for physical RXQs */ + u8 id; + + /* Num of rx descriptors in the rx descriptor ring */ + int size; + + u32 pkts_coal; + u32 time_coal; + + /* Virtual address of the RX DMA descriptors array */ + struct mvpp2_rx_desc *descs; + + /* DMA address of the RX DMA descriptors array */ + dma_addr_t descs_phys; + + /* Index of the last RX DMA descriptor */ + int last_desc; + + /* Index of the next RX DMA descriptor to process */ + int next_desc_to_proc; + + /* ID of port to which physical RXQ is mapped */ + int port; + + /* Port's logic RXQ number to which physical RXQ is mapped */ + int logic_rxq; +}; + +union mvpp2_prs_tcam_entry { + u32 word[MVPP2_PRS_TCAM_WORDS]; + u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; +}; + +union mvpp2_prs_sram_entry { + u32 word[MVPP2_PRS_SRAM_WORDS]; + u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; +}; + +struct mvpp2_prs_entry { + u32 index; + union mvpp2_prs_tcam_entry tcam; + union mvpp2_prs_sram_entry sram; +}; + +struct mvpp2_prs_shadow { + bool valid; + bool finish; + + /* Lookup ID */ + int lu; + + /* User defined offset */ + int udf; + + /* Result info */ + u32 ri; + u32 ri_mask; +}; + +struct mvpp2_cls_flow_entry { + u32 index; + u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; +}; + +struct mvpp2_cls_lookup_entry { + u32 lkpid; + u32 way; + u32 data; +}; + +struct mvpp2_bm_pool { + /* Pool number in the range 0-7 */ + int id; + enum mvpp2_bm_type type; + + /* Buffer Pointers Pool External (BPPE) size */ + int size; + /* Number of buffers for this pool */ + int buf_num; + /* Pool buffer size */ + int buf_size; + /* Packet size */ + int pkt_size; + + /* BPPE virtual base address */ + u32 *virt_addr; + /* BPPE physical base address */ + dma_addr_t phys_addr; + + /* Ports using BM pool */ + u32 port_map; + + /* Occupied buffers indicator */ + atomic_t in_use; + int in_use_thresh; + + spinlock_t lock; +}; + +struct mvpp2_buff_hdr { + u32 next_buff_phys_addr; + u32 next_buff_virt_addr; + u16 byte_count; + u16 info; + u8 reserved1; /* bm_qset (for future use, BM) */ +}; + +/* Buffer header info bits */ +#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff +#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK) +#define MVPP2_B_HDR_INFO_LAST_OFFS 12 +#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12) +#define MVPP2_B_HDR_INFO_IS_LAST(info) \ + ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS) + +/* Static declaractions */ + +/* Number of RXQs used by single port */ +static int rxq_number = MVPP2_DEFAULT_RXQ; +/* Number of TXQs used by single port */ +static int txq_number = MVPP2_MAX_TXQ; + +#define MVPP2_DRIVER_NAME "mvpp2" +#define MVPP2_DRIVER_VERSION "1.0" + +/* Utility/helper methods */ + +static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) +{ + writel(data, priv->base + offset); +} + +static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) +{ + return readl(priv->base + offset); +} + +static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) +{ + txq_pcpu->txq_get_index++; + if (txq_pcpu->txq_get_index == txq_pcpu->size) + txq_pcpu->txq_get_index = 0; +} + +static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu, + struct sk_buff *skb) +{ + txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb; + txq_pcpu->txq_put_index++; + if (txq_pcpu->txq_put_index == txq_pcpu->size) + txq_pcpu->txq_put_index = 0; +} + +/* Get number of physical egress port */ +static inline int mvpp2_egress_port(struct mvpp2_port *port) +{ + return MVPP2_MAX_TCONT + port->id; +} + +/* Get number of physical TXQ */ +static inline int mvpp2_txq_phys(int port, int txq) +{ + return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; +} + +/* Parser configuration routines */ + +/* Update parser tcam and sram hw entries */ +static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) +{ + int i; + + if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) + return -EINVAL; + + /* Clear entry invalidation bit */ + pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; + + /* Write tcam index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); + + /* Write sram index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); + + return 0; +} + +/* Read tcam entry from hw */ +static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) +{ + int i; + + if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) + return -EINVAL; + + /* Write tcam index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); + + pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, + MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); + if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) + return MVPP2_PRS_TCAM_ENTRY_INVALID; + + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) + pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); + + /* Write sram index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); + + return 0; +} + +/* Invalidate tcam hw entry */ +static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) +{ + /* Write index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), + MVPP2_PRS_TCAM_INV_MASK); +} + +/* Enable shadow table entry and set its lookup ID */ +static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) +{ + priv->prs_shadow[index].valid = true; + priv->prs_shadow[index].lu = lu; +} + +/* Update ri fields in shadow table entry */ +static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, + unsigned int ri, unsigned int ri_mask) +{ + priv->prs_shadow[index].ri_mask = ri_mask; + priv->prs_shadow[index].ri = ri; +} + +/* Update lookup field in tcam sw entry */ +static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) +{ + int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); + + pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; + pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; +} + +/* Update mask for single port in tcam sw entry */ +static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, + unsigned int port, bool add) +{ + int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); + + if (add) + pe->tcam.byte[enable_off] &= ~(1 << port); + else + pe->tcam.byte[enable_off] |= 1 << port; +} + +/* Update port map in tcam sw entry */ +static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, + unsigned int ports) +{ + unsigned char port_mask = MVPP2_PRS_PORT_MASK; + int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); + + pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; + pe->tcam.byte[enable_off] &= ~port_mask; + pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; +} + +/* Obtain port map from tcam sw entry */ +static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) +{ + int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); + + return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; +} + +/* Set byte of data and its enable bits in tcam sw entry */ +static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, + unsigned int offs, unsigned char byte, + unsigned char enable) +{ + pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; + pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; +} + +/* Get byte of data and its enable bits from tcam sw entry */ +static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, + unsigned int offs, unsigned char *byte, + unsigned char *enable) +{ + *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; + *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; +} + +/* Compare tcam data bytes with a pattern */ +static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs, + u16 data) +{ + int off = MVPP2_PRS_TCAM_DATA_BYTE(offs); + u16 tcam_data; + + tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off]; + if (tcam_data != data) + return false; + return true; +} + +/* Update ai bits in tcam sw entry */ +static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, + unsigned int bits, unsigned int enable) +{ + int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE; + + for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { + + if (!(enable & BIT(i))) + continue; + + if (bits & BIT(i)) + pe->tcam.byte[ai_idx] |= 1 << i; + else + pe->tcam.byte[ai_idx] &= ~(1 << i); + } + + pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable; +} + +/* Get ai bits from tcam sw entry */ +static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe) +{ + return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE]; +} + +/* Set ethertype in tcam sw entry */ +static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, + unsigned short ethertype) +{ + mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); + mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); +} + +/* Set bits in sram sw entry */ +static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, + int val) +{ + pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); +} + +/* Clear bits in sram sw entry */ +static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, + int val) +{ + pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); +} + +/* Update ri bits in sram sw entry */ +static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, + unsigned int bits, unsigned int mask) +{ + unsigned int i; + + for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { + int ri_off = MVPP2_PRS_SRAM_RI_OFFS; + + if (!(mask & BIT(i))) + continue; + + if (bits & BIT(i)) + mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); + else + mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); + + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); + } +} + +/* Obtain ri bits from sram sw entry */ +static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe) +{ + return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD]; +} + +/* Update ai bits in sram sw entry */ +static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, + unsigned int bits, unsigned int mask) +{ + unsigned int i; + int ai_off = MVPP2_PRS_SRAM_AI_OFFS; + + for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { + + if (!(mask & BIT(i))) + continue; + + if (bits & BIT(i)) + mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); + else + mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); + + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); + } +} + +/* Read ai bits from sram sw entry */ +static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) +{ + u8 bits; + int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); + int ai_en_off = ai_off + 1; + int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; + + bits = (pe->sram.byte[ai_off] >> ai_shift) | + (pe->sram.byte[ai_en_off] << (8 - ai_shift)); + + return bits; +} + +/* In sram sw entry set lookup ID field of the tcam key to be used in the next + * lookup interation + */ +static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, + unsigned int lu) +{ + int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; + + mvpp2_prs_sram_bits_clear(pe, sram_next_off, + MVPP2_PRS_SRAM_NEXT_LU_MASK); + mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); +} + +/* In the sram sw entry set sign and value of the next lookup offset + * and the offset value generated to the classifier + */ +static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, + unsigned int op) +{ + /* Set sign */ + if (shift < 0) { + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); + shift = 0 - shift; + } else { + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); + } + + /* Set value */ + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = + (unsigned char)shift; + + /* Reset and set operation */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); + + /* Set base offset as current */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); +} + +/* In the sram sw entry set sign and value of the user defined offset + * generated to the classifier + */ +static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, + unsigned int type, int offset, + unsigned int op) +{ + /* Set sign */ + if (offset < 0) { + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); + offset = 0 - offset; + } else { + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); + } + + /* Set value */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, + MVPP2_PRS_SRAM_UDF_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + + MVPP2_PRS_SRAM_UDF_BITS)] &= + ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + + MVPP2_PRS_SRAM_UDF_BITS)] |= + (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); + + /* Set offset type */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, + MVPP2_PRS_SRAM_UDF_TYPE_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); + + /* Set offset operation */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, + MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); + + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + + MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= + ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> + (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); + + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + + MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= + (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); + + /* Set base offset as current */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); +} + +/* Find parser flow entry */ +static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) +{ + struct mvpp2_prs_entry *pe; + int tid; + + pe = kzalloc(sizeof(*pe), GFP_KERNEL); + if (!pe) + return NULL; + mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); + + /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ + for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { + u8 bits; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) + continue; + + pe->index = tid; + mvpp2_prs_hw_read(priv, pe); + bits = mvpp2_prs_sram_ai_get(pe); + + /* Sram store classification lookup ID in AI bits [5:0] */ + if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) + return pe; + } + kfree(pe); + + return NULL; +} + +/* Return first free tcam index, seeking from start to end */ +static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, + unsigned char end) +{ + int tid; + + if (start > end) + swap(start, end); + + if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) + end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; + + for (tid = start; tid <= end; tid++) { + if (!priv->prs_shadow[tid].valid) + return tid; + } + + return -EINVAL; +} + +/* Enable/disable dropping all mac da's */ +static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) +{ + struct mvpp2_prs_entry pe; + + if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { + /* Entry exist - update port only */ + pe.index = MVPP2_PE_DROP_ALL; + mvpp2_prs_hw_read(priv, &pe); + } else { + /* Entry doesn't exist - create new */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + pe.index = MVPP2_PE_DROP_ALL; + + /* Non-promiscuous mode for all ports - DROP unknown packets */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_DROP_MASK); + + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set port to promiscuous mode */ +static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add) +{ + struct mvpp2_prs_entry pe; + + /* Promiscuous mode - Accept unknown packets */ + + if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) { + /* Entry exist - update port only */ + pe.index = MVPP2_PE_MAC_PROMISCUOUS; + mvpp2_prs_hw_read(priv, &pe); + } else { + /* Entry doesn't exist - create new */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + pe.index = MVPP2_PE_MAC_PROMISCUOUS; + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); + + /* Set result info bits */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST, + MVPP2_PRS_RI_L2_CAST_MASK); + + /* Shift to ethertype */ + mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Accept multicast */ +static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index, + bool add) +{ + struct mvpp2_prs_entry pe; + unsigned char da_mc; + + /* Ethernet multicast address first byte is + * 0x01 for IPv4 and 0x33 for IPv6 + */ + da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33; + + if (priv->prs_shadow[index].valid) { + /* Entry exist - update port only */ + pe.index = index; + mvpp2_prs_hw_read(priv, &pe); + } else { + /* Entry doesn't exist - create new */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + pe.index = index; + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); + + /* Set result info bits */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST, + MVPP2_PRS_RI_L2_CAST_MASK); + + /* Update tcam entry data first byte */ + mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff); + + /* Shift to ethertype */ + mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set entry for dsa packets */ +static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, + bool tagged, bool extend) +{ + struct mvpp2_prs_entry pe; + int tid, shift; + + if (extend) { + tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED; + shift = 8; + } else { + tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED; + shift = 4; + } + + if (priv->prs_shadow[tid].valid) { + /* Entry exist - update port only */ + pe.index = tid; + mvpp2_prs_hw_read(priv, &pe); + } else { + /* Entry doesn't exist - create new */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); + pe.index = tid; + + /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/ + mvpp2_prs_sram_shift_set(&pe, shift, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); + + if (tagged) { + /* Set tagged bit in DSA tag */ + mvpp2_prs_tcam_data_byte_set(&pe, 0, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT); + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, + MVPP2_PRS_SRAM_AI_MASK); + /* If packet is tagged continue check vlans */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + } else { + /* Set result info bits to 'no vlans' */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + } + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set entry for dsa ethertype */ +static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, + bool add, bool tagged, bool extend) +{ + struct mvpp2_prs_entry pe; + int tid, shift, port_mask; + + if (extend) { + tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED : + MVPP2_PE_ETYPE_EDSA_UNTAGGED; + port_mask = 0; + shift = 8; + } else { + tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED : + MVPP2_PE_ETYPE_DSA_UNTAGGED; + port_mask = MVPP2_PRS_PORT_MASK; + shift = 4; + } + + if (priv->prs_shadow[tid].valid) { + /* Entry exist - update port only */ + pe.index = tid; + mvpp2_prs_hw_read(priv, &pe); + } else { + /* Entry doesn't exist - create new */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); + pe.index = tid; + + /* Set ethertype */ + mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA); + mvpp2_prs_match_etype(&pe, 2, 0); + + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK, + MVPP2_PRS_RI_DSA_MASK); + /* Shift ethertype + 2 byte reserved + tag*/ + mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); + + if (tagged) { + /* Set tagged bit in DSA tag */ + mvpp2_prs_tcam_data_byte_set(&pe, + MVPP2_ETH_TYPE_LEN + 2 + 3, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT); + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, + MVPP2_PRS_SRAM_AI_MASK); + /* If packet is tagged continue check vlans */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + } else { + /* Set result info bits to 'no vlans' */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + } + /* Mask/unmask all ports, depending on dsa type */ + mvpp2_prs_tcam_port_map_set(&pe, port_mask); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Search for existing single/triple vlan entry */ +static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv, + unsigned short tpid, int ai) +{ + struct mvpp2_prs_entry *pe; + int tid; + + pe = kzalloc(sizeof(*pe), GFP_KERNEL); + if (!pe) + return NULL; + mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); + + /* Go through the all entries with MVPP2_PRS_LU_VLAN */ + for (tid = MVPP2_PE_FIRST_FREE_TID; + tid <= MVPP2_PE_LAST_FREE_TID; tid++) { + unsigned int ri_bits, ai_bits; + bool match; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) + continue; + + pe->index = tid; + + mvpp2_prs_hw_read(priv, pe); + match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid)); + if (!match) + continue; + + /* Get vlan type */ + ri_bits = mvpp2_prs_sram_ri_get(pe); + ri_bits &= MVPP2_PRS_RI_VLAN_MASK; + + /* Get current ai value from tcam */ + ai_bits = mvpp2_prs_tcam_ai_get(pe); + /* Clear double vlan bit */ + ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT; + + if (ai != ai_bits) + continue; + + if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || + ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) + return pe; + } + kfree(pe); + + return NULL; +} + +/* Add/update single/triple vlan entry */ +static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, + unsigned int port_map) +{ + struct mvpp2_prs_entry *pe; + int tid_aux, tid; + int ret = 0; + + pe = mvpp2_prs_vlan_find(priv, tpid, ai); + + if (!pe) { + /* Create new tcam entry */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, + MVPP2_PE_FIRST_FREE_TID); + if (tid < 0) + return tid; + + pe = kzalloc(sizeof(*pe), GFP_KERNEL); + if (!pe) + return -ENOMEM; + + /* Get last double vlan tid */ + for (tid_aux = MVPP2_PE_LAST_FREE_TID; + tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) { + unsigned int ri_bits; + + if (!priv->prs_shadow[tid_aux].valid || + priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) + continue; + + pe->index = tid_aux; + mvpp2_prs_hw_read(priv, pe); + ri_bits = mvpp2_prs_sram_ri_get(pe); + if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == + MVPP2_PRS_RI_VLAN_DOUBLE) + break; + } + + if (tid <= tid_aux) { + ret = -EINVAL; + goto error; + } + + memset(pe, 0 , sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); + pe->index = tid; + + mvpp2_prs_match_etype(pe, 0, tpid); + + mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2); + /* Shift 4 bytes - skip 1 vlan tag */ + mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + if (ai == MVPP2_PRS_SINGLE_VLAN_AI) { + mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE, + MVPP2_PRS_RI_VLAN_MASK); + } else { + ai |= MVPP2_PRS_DBL_VLAN_AI_BIT; + mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE, + MVPP2_PRS_RI_VLAN_MASK); + } + mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN); + } + /* Update ports' mask */ + mvpp2_prs_tcam_port_map_set(pe, port_map); + + mvpp2_prs_hw_write(priv, pe); + +error: + kfree(pe); + + return ret; +} + +/* Get first free double vlan ai number */ +static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv) +{ + int i; + + for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) { + if (!priv->prs_double_vlans[i]) + return i; + } + + return -EINVAL; +} + +/* Search for existing double vlan entry */ +static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv, + unsigned short tpid1, + unsigned short tpid2) +{ + struct mvpp2_prs_entry *pe; + int tid; + + pe = kzalloc(sizeof(*pe), GFP_KERNEL); + if (!pe) + return NULL; + mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); + + /* Go through the all entries with MVPP2_PRS_LU_VLAN */ + for (tid = MVPP2_PE_FIRST_FREE_TID; + tid <= MVPP2_PE_LAST_FREE_TID; tid++) { + unsigned int ri_mask; + bool match; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) + continue; + + pe->index = tid; + mvpp2_prs_hw_read(priv, pe); + + match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1)) + && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2)); + + if (!match) + continue; + + ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK; + if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE) + return pe; + } + kfree(pe); + + return NULL; +} + +/* Add or update double vlan entry */ +static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, + unsigned short tpid2, + unsigned int port_map) +{ + struct mvpp2_prs_entry *pe; + int tid_aux, tid, ai, ret = 0; + + pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); + + if (!pe) { + /* Create new tcam entry */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + pe = kzalloc(sizeof(*pe), GFP_KERNEL); + if (!pe) + return -ENOMEM; + + /* Set ai value for new double vlan entry */ + ai = mvpp2_prs_double_vlan_ai_free_get(priv); + if (ai < 0) { + ret = ai; + goto error; + } + + /* Get first single/triple vlan tid */ + for (tid_aux = MVPP2_PE_FIRST_FREE_TID; + tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) { + unsigned int ri_bits; + + if (!priv->prs_shadow[tid_aux].valid || + priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) + continue; + + pe->index = tid_aux; + mvpp2_prs_hw_read(priv, pe); + ri_bits = mvpp2_prs_sram_ri_get(pe); + ri_bits &= MVPP2_PRS_RI_VLAN_MASK; + if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || + ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) + break; + } + + if (tid >= tid_aux) { + ret = -ERANGE; + goto error; + } + + memset(pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); + pe->index = tid; + + priv->prs_double_vlans[ai] = true; + + mvpp2_prs_match_etype(pe, 0, tpid1); + mvpp2_prs_match_etype(pe, 4, tpid2); + + mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN); + /* Shift 8 bytes - skip 2 vlan tags */ + mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE, + MVPP2_PRS_RI_VLAN_MASK); + mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT, + MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN); + } + + /* Update ports' mask */ + mvpp2_prs_tcam_port_map_set(pe, port_map); + mvpp2_prs_hw_write(priv, pe); + +error: + kfree(pe); + return ret; +} + +/* IPv4 header parsing for fragmentation and L4 offset */ +static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, + unsigned int ri, unsigned int ri_mask) +{ + struct mvpp2_prs_entry pe; + int tid; + + if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && + (proto != IPPROTO_IGMP)) + return -EINVAL; + + /* Fragmented packet */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = tid; + + /* Set next lu to IPv4 */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L4 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(struct iphdr) - 4, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK, + ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Not fragmented packet */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + /* Clear ri before updating */ + pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); + + mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L); + mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* IPv4 L3 multicast or broadcast */ +static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast) +{ + struct mvpp2_prs_entry pe; + int mask, tid; + + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = tid; + + switch (l3_cast) { + case MVPP2_PRS_L3_MULTI_CAST: + mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC, + MVPP2_PRS_IPV4_MC_MASK); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + break; + case MVPP2_PRS_L3_BROAD_CAST: + mask = MVPP2_PRS_IPV4_BC_MASK; + mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask); + mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask); + mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask); + mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + break; + default: + return -EINVAL; + } + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Set entries for protocols over IPv6 */ +static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto, + unsigned int ri, unsigned int ri_mask) +{ + struct mvpp2_prs_entry pe; + int tid; + + if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && + (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP)) + return -EINVAL; + + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = tid; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(struct ipv6hdr) - 6, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK); + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Write HW */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* IPv6 L3 multicast entry */ +static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast) +{ + struct mvpp2_prs_entry pe; + int tid; + + if (l3_cast != MVPP2_PRS_L3_MULTI_CAST) + return -EINVAL; + + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = tid; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Shift back to IPv6 NH */ + mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC, + MVPP2_PRS_IPV6_MC_MASK); + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Parser per-port initialization */ +static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, + int lu_max, int offset) +{ + u32 val; + + /* Set lookup ID */ + val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); + val &= ~MVPP2_PRS_PORT_LU_MASK(port); + val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); + mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); + + /* Set maximum number of loops for packet received from port */ + val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); + val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); + val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); + mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); + + /* Set initial offset for packet header extraction for the first + * searching loop + */ + val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); + val &= ~MVPP2_PRS_INIT_OFF_MASK(port); + val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); + mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); +} + +/* Default flow entries initialization for all ports */ +static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int port; + + for (port = 0; port < MVPP2_MAX_PORTS; port++) { + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + + /* Set flow ID*/ + mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_hw_write(priv, &pe); + } +} + +/* Set default entry for Marvell Header field */ +static void mvpp2_prs_mh_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + + pe.index = MVPP2_PE_MH_DEFAULT; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); + mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set default entires (place holder) for promiscuous, non-promiscuous and + * multicast MAC addresses + */ +static void mvpp2_prs_mac_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + + /* Non-promiscuous mode for all ports - DROP unknown packets */ + pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_DROP_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + mvpp2_prs_hw_write(priv, &pe); + + /* place holders only - no ports */ + mvpp2_prs_mac_drop_all_set(priv, 0, false); + mvpp2_prs_mac_promisc_set(priv, 0, false); + mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false); + mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false); +} + +/* Set default entries for various types of dsa packets */ +static void mvpp2_prs_dsa_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + /* None tagged EDSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, + MVPP2_PRS_EDSA); + + /* Tagged EDSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + + /* None tagged DSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, + MVPP2_PRS_DSA); + + /* Tagged DSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + + /* None tagged EDSA ethertype entry - place holder*/ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + + /* Tagged EDSA ethertype entry - place holder*/ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + + /* None tagged DSA ethertype entry */ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + + /* Tagged DSA ethertype entry */ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + + /* Set default entry, in case DSA or EDSA tag not found */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); + pe.index = MVPP2_PE_DSA_DEFAULT; + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + + /* Shift 0 bytes */ + mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + + /* Clear all sram ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Match basic ethertypes */ +static int mvpp2_prs_etype_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Ethertype: PPPoE */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES); + + mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, + MVPP2_PRS_RI_PPPOE_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, + MVPP2_PRS_RI_PPPOE_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: ARP */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP); + + /* Generate flow in the next iteration*/ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = true; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: LBTD */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); + + /* Generate flow in the next iteration*/ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = true; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: IPv4 without options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, ETH_P_IP); + mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, + MVPP2_PRS_IPV4_HEAD_MASK | + MVPP2_PRS_IPV4_IHL_MASK); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Skip eth_type + 4 bytes of IP header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: IPv4 with options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + + /* Clear tcam data before updating */ + pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; + pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; + + mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_IPV4_HEAD, + MVPP2_PRS_IPV4_HEAD_MASK); + + /* Clear ri before updating */ + pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: IPv6 without options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6); + + /* Skip DIP of IPV6 header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + + MVPP2_MAX_L3_ADDR_SIZE, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = MVPP2_PE_ETH_TYPE_UN; + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Generate flow in the next iteration*/ + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Set L3 offset even it's unknown L3 */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = true; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Configure vlan entries and detect up to 2 successive VLAN tags. + * Possible options: + * 0x8100, 0x88A8 + * 0x8100, 0x8100 + * 0x8100 + * 0x88A8 + */ +static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int err; + + priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), + MVPP2_PRS_DBL_VLANS_MAX, + GFP_KERNEL); + if (!priv->prs_double_vlans) + return -ENOMEM; + + /* Double VLAN: 0x8100, 0x88A8 */ + err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Double VLAN: 0x8100, 0x8100 */ + err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Single VLAN: 0x88a8 */ + err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Single VLAN: 0x8100 */ + err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Set default double vlan entry */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + pe.index = MVPP2_PE_VLAN_DBL; + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + /* Clear ai for next iterations */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, + MVPP2_PRS_RI_VLAN_MASK); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT, + MVPP2_PRS_DBL_VLAN_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + mvpp2_prs_hw_write(priv, &pe); + + /* Set default vlan none entry */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + pe.index = MVPP2_PE_VLAN_NONE; + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Set entries for PPPoE ethertype */ +static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* IPv4 over PPPoE with options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, PPP_IP); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Skip eth_type + 4 bytes of IP header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + /* IPv4 over PPPoE without options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + + mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, + MVPP2_PRS_IPV4_HEAD_MASK | + MVPP2_PRS_IPV4_IHL_MASK); + + /* Clear ri before updating */ + pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + /* IPv6 over PPPoE */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, PPP_IPV6); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Skip eth_type + 4 bytes of IPv6 header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + /* Non-IP over PPPoE */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + pe.index = tid; + + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, + MVPP2_PRS_RI_L3_PROTO_MASK); + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + /* Set L3 offset even if it's unknown L3 */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Initialize entries for IPv4 */ +static int mvpp2_prs_ip4_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int err; + + /* Set entries for TCP, UDP and IGMP over IPv4 */ + err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP, + MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + if (err) + return err; + + /* IPv4 Broadcast */ + err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST); + if (err) + return err; + + /* IPv4 Multicast */ + err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST); + if (err) + return err; + + /* Default IPv4 entry for unknown protocols */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = MVPP2_PE_IP4_PROTO_UN; + + /* Set next lu to IPv4 */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L4 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(struct iphdr) - 4, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, + MVPP2_PRS_RI_L4_PROTO_MASK); + + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv4 entry for unicast address */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = MVPP2_PE_IP4_ADDR_UN; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Initialize entries for IPv6 */ +static int mvpp2_prs_ip6_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int tid, err; + + /* Set entries for TCP, UDP and ICMP over IPv6 */ + err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP, + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP, + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6, + MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + if (err) + return err; + + /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */ + /* Result Info: UDF7=1, DS lite */ + err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP, + MVPP2_PRS_RI_UDF7_IP6_LITE, + MVPP2_PRS_RI_UDF7_MASK); + if (err) + return err; + + /* IPv6 multicast */ + err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST); + if (err) + return err; + + /* Entry for checking hop limit */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = tid; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN | + MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_L3_PROTO_MASK | + MVPP2_PRS_RI_DROP_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK); + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv6 entry for unknown protocols */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = MVPP2_PE_IP6_PROTO_UN; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, + MVPP2_PRS_RI_L4_PROTO_MASK); + /* Set L4 offset relatively to our current place */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(struct ipv6hdr) - 4, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv6 entry for unknown ext protocols */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = MVPP2_PE_IP6_EXT_PROTO_UN; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, + MVPP2_PRS_RI_L4_PROTO_MASK); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT, + MVPP2_PRS_IPV6_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv6 entry for unicast address */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = MVPP2_PE_IP6_ADDR_UN; + + /* Finished: go to IPv6 again */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Shift back to IPV6 NH */ + mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Parser default initialization */ +static int mvpp2_prs_default_init(struct platform_device *pdev, + struct mvpp2 *priv) +{ + int err, index, i; + + /* Enable tcam table */ + mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); + + /* Clear all tcam and sram entries */ + for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); + + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); + } + + /* Invalidate all tcam entries */ + for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) + mvpp2_prs_hw_inv(priv, index); + + priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, + sizeof(struct mvpp2_prs_shadow), + GFP_KERNEL); + if (!priv->prs_shadow) + return -ENOMEM; + + /* Always start from lookup = 0 */ + for (index = 0; index < MVPP2_MAX_PORTS; index++) + mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, + MVPP2_PRS_PORT_LU_MAX, 0); + + mvpp2_prs_def_flow_init(priv); + + mvpp2_prs_mh_init(priv); + + mvpp2_prs_mac_init(priv); + + mvpp2_prs_dsa_init(priv); + + err = mvpp2_prs_etype_init(priv); + if (err) + return err; + + err = mvpp2_prs_vlan_init(pdev, priv); + if (err) + return err; + + err = mvpp2_prs_pppoe_init(priv); + if (err) + return err; + + err = mvpp2_prs_ip6_init(priv); + if (err) + return err; + + err = mvpp2_prs_ip4_init(priv); + if (err) + return err; + + return 0; +} + +/* Compare MAC DA with tcam entry data */ +static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, + const u8 *da, unsigned char *mask) +{ + unsigned char tcam_byte, tcam_mask; + int index; + + for (index = 0; index < ETH_ALEN; index++) { + mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); + if (tcam_mask != mask[index]) + return false; + + if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) + return false; + } + + return true; +} + +/* Find tcam entry with matched pair */ +static struct mvpp2_prs_entry * +mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, + unsigned char *mask, int udf_type) +{ + struct mvpp2_prs_entry *pe; + int tid; + + pe = kzalloc(sizeof(*pe), GFP_KERNEL); + if (!pe) + return NULL; + mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); + + /* Go through the all entires with MVPP2_PRS_LU_MAC */ + for (tid = MVPP2_PE_FIRST_FREE_TID; + tid <= MVPP2_PE_LAST_FREE_TID; tid++) { + unsigned int entry_pmap; + + if (!priv->prs_shadow[tid].valid || + (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || + (priv->prs_shadow[tid].udf != udf_type)) + continue; + + pe->index = tid; + mvpp2_prs_hw_read(priv, pe); + entry_pmap = mvpp2_prs_tcam_port_map_get(pe); + + if (mvpp2_prs_mac_range_equals(pe, da, mask) && + entry_pmap == pmap) + return pe; + } + kfree(pe); + + return NULL; +} + +/* Update parser's mac da entry */ +static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port, + const u8 *da, bool add) +{ + struct mvpp2_prs_entry *pe; + unsigned int pmap, len, ri; + unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + int tid; + + /* Scan TCAM and see if entry with this already exist */ + pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask, + MVPP2_PRS_UDF_MAC_DEF); + + /* No such entry */ + if (!pe) { + if (!add) + return 0; + + /* Create new TCAM entry */ + /* Find first range mac entry*/ + for (tid = MVPP2_PE_FIRST_FREE_TID; + tid <= MVPP2_PE_LAST_FREE_TID; tid++) + if (priv->prs_shadow[tid].valid && + (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) && + (priv->prs_shadow[tid].udf == + MVPP2_PRS_UDF_MAC_RANGE)) + break; + + /* Go through the all entries from first to last */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + tid - 1); + if (tid < 0) + return tid; + + pe = kzalloc(sizeof(*pe), GFP_KERNEL); + if (!pe) + return -1; + mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); + pe->index = tid; + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(pe, 0); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(pe, port, add); + + /* Invalidate the entry if no ports are left enabled */ + pmap = mvpp2_prs_tcam_port_map_get(pe); + if (pmap == 0) { + if (add) { + kfree(pe); + return -1; + } + mvpp2_prs_hw_inv(priv, pe->index); + priv->prs_shadow[pe->index].valid = false; + kfree(pe); + return 0; + } + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA); + + /* Set match on DA */ + len = ETH_ALEN; + while (len--) + mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff); + + /* Set result info bits */ + if (is_broadcast_ether_addr(da)) + ri = MVPP2_PRS_RI_L2_BCAST; + else if (is_multicast_ether_addr(da)) + ri = MVPP2_PRS_RI_L2_MCAST; + else + ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK; + + mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | + MVPP2_PRS_RI_MAC_ME_MASK); + mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK | + MVPP2_PRS_RI_MAC_ME_MASK); + + /* Shift to ethertype */ + mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Update shadow table and hw entry */ + priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF; + mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC); + mvpp2_prs_hw_write(priv, pe); + + kfree(pe); + + return 0; +} + +static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) +{ + struct mvpp2_port *port = netdev_priv(dev); + int err; + + /* Remove old parser entry */ + err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr, + false); + if (err) + return err; + + /* Add new parser entry */ + err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true); + if (err) + return err; + + /* Set addr in the device */ + ether_addr_copy(dev->dev_addr, da); + + return 0; +} + +/* Delete all port's multicast simple (not range) entries */ +static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port) +{ + struct mvpp2_prs_entry pe; + int index, tid; + + for (tid = MVPP2_PE_FIRST_FREE_TID; + tid <= MVPP2_PE_LAST_FREE_TID; tid++) { + unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; + + if (!priv->prs_shadow[tid].valid || + (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || + (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) + continue; + + /* Only simple mac entries */ + pe.index = tid; + mvpp2_prs_hw_read(priv, &pe); + + /* Read mac addr from entry */ + for (index = 0; index < ETH_ALEN; index++) + mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], + &da_mask[index]); + + if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da)) + /* Delete this entry */ + mvpp2_prs_mac_da_accept(priv, port, da, false); + } +} + +static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) +{ + switch (type) { + case MVPP2_TAG_TYPE_EDSA: + /* Add port to EDSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, true, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + mvpp2_prs_dsa_tag_set(priv, port, true, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + /* Remove port from DSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + break; + + case MVPP2_TAG_TYPE_DSA: + /* Add port to DSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, true, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, true, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + /* Remove port from EDSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + break; + + case MVPP2_TAG_TYPE_MH: + case MVPP2_TAG_TYPE_NONE: + /* Remove port form EDSA and DSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + break; + + default: + if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA)) + return -EINVAL; + } + + return 0; +} + +/* Set prs flow for the port */ +static int mvpp2_prs_def_flow(struct mvpp2_port *port) +{ + struct mvpp2_prs_entry *pe; + int tid; + + pe = mvpp2_prs_flow_find(port->priv, port->id); + + /* Such entry not exist */ + if (!pe) { + /* Go through the all entires from last to first */ + tid = mvpp2_prs_tcam_first_free(port->priv, + MVPP2_PE_LAST_FREE_TID, + MVPP2_PE_FIRST_FREE_TID); + if (tid < 0) + return tid; + + pe = kzalloc(sizeof(*pe), GFP_KERNEL); + if (!pe) + return -ENOMEM; + + mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); + pe->index = tid; + + /* Set flow ID*/ + mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); + + /* Update shadow table */ + mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS); + } + + mvpp2_prs_tcam_port_map_set(pe, (1 << port->id)); + mvpp2_prs_hw_write(port->priv, pe); + kfree(pe); + + return 0; +} + +/* Classifier configuration routines */ + +/* Update classification flow table registers */ +static void mvpp2_cls_flow_write(struct mvpp2 *priv, + struct mvpp2_cls_flow_entry *fe) +{ + mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); +} + +/* Update classification lookup table register */ +static void mvpp2_cls_lookup_write(struct mvpp2 *priv, + struct mvpp2_cls_lookup_entry *le) +{ + u32 val; + + val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; + mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); + mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); +} + +/* Classifier default initialization */ +static void mvpp2_cls_init(struct mvpp2 *priv) +{ + struct mvpp2_cls_lookup_entry le; + struct mvpp2_cls_flow_entry fe; + int index; + + /* Enable classifier */ + mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); + + /* Clear classifier flow table */ + memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS); + for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { + fe.index = index; + mvpp2_cls_flow_write(priv, &fe); + } + + /* Clear classifier lookup table */ + le.data = 0; + for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { + le.lkpid = index; + le.way = 0; + mvpp2_cls_lookup_write(priv, &le); + + le.way = 1; + mvpp2_cls_lookup_write(priv, &le); + } +} + +static void mvpp2_cls_port_config(struct mvpp2_port *port) +{ + struct mvpp2_cls_lookup_entry le; + u32 val; + + /* Set way for the port */ + val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); + val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); + mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); + + /* Pick the entry to be accessed in lookup ID decoding table + * according to the way and lkpid. + */ + le.lkpid = port->id; + le.way = 0; + le.data = 0; + + /* Set initial CPU queue for receiving packets */ + le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; + le.data |= port->first_rxq; + + /* Disable classification engines */ + le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; + + /* Update lookup ID table entry */ + mvpp2_cls_lookup_write(port->priv, &le); +} + +/* Set CPU queue number for oversize packets */ +static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) +{ + u32 val; + + mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), + port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); + + mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), + (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); + + val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); + val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); + mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); +} + +/* Buffer Manager configuration routines */ + +/* Create pool */ +static int mvpp2_bm_pool_create(struct platform_device *pdev, + struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool, int size) +{ + int size_bytes; + u32 val; + + size_bytes = sizeof(u32) * size; + bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes, + &bm_pool->phys_addr, + GFP_KERNEL); + if (!bm_pool->virt_addr) + return -ENOMEM; + + if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) { + dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, + bm_pool->phys_addr); + dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", + bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); + return -ENOMEM; + } + + mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), + bm_pool->phys_addr); + mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); + + val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); + val |= MVPP2_BM_START_MASK; + mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); + + bm_pool->type = MVPP2_BM_FREE; + bm_pool->size = size; + bm_pool->pkt_size = 0; + bm_pool->buf_num = 0; + atomic_set(&bm_pool->in_use, 0); + spin_lock_init(&bm_pool->lock); + + return 0; +} + +/* Set pool buffer size */ +static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool, + int buf_size) +{ + u32 val; + + bm_pool->buf_size = buf_size; + + val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); + mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); +} + +/* Free all buffers from the pool */ +static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) +{ + int i; + + for (i = 0; i < bm_pool->buf_num; i++) { + u32 vaddr; + + /* Get buffer virtual address (indirect access) */ + mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); + vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG); + if (!vaddr) + break; + dev_kfree_skb_any((struct sk_buff *)vaddr); + } + + /* Update BM driver with number of buffers removed from pool */ + bm_pool->buf_num -= i; +} + +/* Cleanup pool */ +static int mvpp2_bm_pool_destroy(struct platform_device *pdev, + struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool) +{ + u32 val; + + mvpp2_bm_bufs_free(priv, bm_pool); + if (bm_pool->buf_num) { + WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); + return 0; + } + + val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); + val |= MVPP2_BM_STOP_MASK; + mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); + + dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size, + bm_pool->virt_addr, + bm_pool->phys_addr); + return 0; +} + +static int mvpp2_bm_pools_init(struct platform_device *pdev, + struct mvpp2 *priv) +{ + int i, err, size; + struct mvpp2_bm_pool *bm_pool; + + /* Create all pools with maximum size */ + size = MVPP2_BM_POOL_SIZE_MAX; + for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { + bm_pool = &priv->bm_pools[i]; + bm_pool->id = i; + err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size); + if (err) + goto err_unroll_pools; + mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); + } + return 0; + +err_unroll_pools: + dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size); + for (i = i - 1; i >= 0; i--) + mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]); + return err; +} + +static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv) +{ + int i, err; + + for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { + /* Mask BM all interrupts */ + mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); + /* Clear BM cause register */ + mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); + } + + /* Allocate and initialize BM pools */ + priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM, + sizeof(struct mvpp2_bm_pool), GFP_KERNEL); + if (!priv->bm_pools) + return -ENOMEM; + + err = mvpp2_bm_pools_init(pdev, priv); + if (err < 0) + return err; + return 0; +} + +/* Attach long pool to rxq */ +static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, + int lrxq, int long_pool) +{ + u32 val; + int prxq; + + /* Get queue physical ID */ + prxq = port->rxqs[lrxq]->id; + + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); + val &= ~MVPP2_RXQ_POOL_LONG_MASK; + val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & + MVPP2_RXQ_POOL_LONG_MASK); + + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); +} + +/* Attach short pool to rxq */ +static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, + int lrxq, int short_pool) +{ + u32 val; + int prxq; + + /* Get queue physical ID */ + prxq = port->rxqs[lrxq]->id; + + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); + val &= ~MVPP2_RXQ_POOL_SHORT_MASK; + val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & + MVPP2_RXQ_POOL_SHORT_MASK); + + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); +} + +/* Allocate skb for BM pool */ +static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *port, + struct mvpp2_bm_pool *bm_pool, + dma_addr_t *buf_phys_addr, + gfp_t gfp_mask) +{ + struct sk_buff *skb; + dma_addr_t phys_addr; + + skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask); + if (!skb) + return NULL; + + phys_addr = dma_map_single(port->dev->dev.parent, skb->head, + MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) { + dev_kfree_skb_any(skb); + return NULL; + } + *buf_phys_addr = phys_addr; + + return skb; +} + +/* Set pool number in a BM cookie */ +static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool) +{ + u32 bm; + + bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS); + bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS); + + return bm; +} + +/* Get pool number from a BM cookie */ +static inline int mvpp2_bm_cookie_pool_get(u32 cookie) +{ + return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; +} + +/* Release buffer to BM */ +static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, + u32 buf_phys_addr, u32 buf_virt_addr) +{ + mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr); + mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr); +} + +/* Release multicast buffer */ +static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool, + u32 buf_phys_addr, u32 buf_virt_addr, + int mc_id) +{ + u32 val = 0; + + val |= (mc_id & MVPP2_BM_MC_ID_MASK); + mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val); + + mvpp2_bm_pool_put(port, pool, + buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK, + buf_virt_addr); +} + +/* Refill BM pool */ +static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, + u32 phys_addr, u32 cookie) +{ + int pool = mvpp2_bm_cookie_pool_get(bm); + + mvpp2_bm_pool_put(port, pool, phys_addr, cookie); +} + +/* Allocate buffers for the pool */ +static int mvpp2_bm_bufs_add(struct mvpp2_port *port, + struct mvpp2_bm_pool *bm_pool, int buf_num) +{ + struct sk_buff *skb; + int i, buf_size, total_size; + u32 bm; + dma_addr_t phys_addr; + + buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); + total_size = MVPP2_RX_TOTAL_SIZE(buf_size); + + if (buf_num < 0 || + (buf_num + bm_pool->buf_num > bm_pool->size)) { + netdev_err(port->dev, + "cannot allocate %d buffers for pool %d\n", + buf_num, bm_pool->id); + return 0; + } + + bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id); + for (i = 0; i < buf_num; i++) { + skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL); + if (!skb) + break; + + mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb); + } + + /* Update BM driver with number of buffers added to pool */ + bm_pool->buf_num += i; + bm_pool->in_use_thresh = bm_pool->buf_num / 4; + + netdev_dbg(port->dev, + "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", + bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long", + bm_pool->id, bm_pool->pkt_size, buf_size, total_size); + + netdev_dbg(port->dev, + "%s pool %d: %d of %d buffers added\n", + bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long", + bm_pool->id, i, buf_num); + return i; +} + +/* Notify the driver that BM pool is being used as specific type and return the + * pool pointer on success + */ +static struct mvpp2_bm_pool * +mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, + int pkt_size) +{ + unsigned long flags = 0; + struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; + int num; + + if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) { + netdev_err(port->dev, "mixing pool types is forbidden\n"); + return NULL; + } + + spin_lock_irqsave(&new_pool->lock, flags); + + if (new_pool->type == MVPP2_BM_FREE) + new_pool->type = type; + + /* Allocate buffers in case BM pool is used as long pool, but packet + * size doesn't match MTU or BM pool hasn't being used yet + */ + if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) || + (new_pool->pkt_size == 0)) { + int pkts_num; + + /* Set default buffer number or free all the buffers in case + * the pool is not empty + */ + pkts_num = new_pool->buf_num; + if (pkts_num == 0) + pkts_num = type == MVPP2_BM_SWF_LONG ? + MVPP2_BM_LONG_BUF_NUM : + MVPP2_BM_SHORT_BUF_NUM; + else + mvpp2_bm_bufs_free(port->priv, new_pool); + + new_pool->pkt_size = pkt_size; + + /* Allocate buffers for this pool */ + num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); + if (num != pkts_num) { + WARN(1, "pool %d: %d of %d allocated\n", + new_pool->id, num, pkts_num); + /* We need to undo the bufs_add() allocations */ + spin_unlock_irqrestore(&new_pool->lock, flags); + return NULL; + } + } + + mvpp2_bm_pool_bufsize_set(port->priv, new_pool, + MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); + + spin_unlock_irqrestore(&new_pool->lock, flags); + + return new_pool; +} + +/* Initialize pools for swf */ +static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) +{ + unsigned long flags = 0; + int rxq; + + if (!port->pool_long) { + port->pool_long = + mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id), + MVPP2_BM_SWF_LONG, + port->pkt_size); + if (!port->pool_long) + return -ENOMEM; + + spin_lock_irqsave(&port->pool_long->lock, flags); + port->pool_long->port_map |= (1 << port->id); + spin_unlock_irqrestore(&port->pool_long->lock, flags); + + for (rxq = 0; rxq < rxq_number; rxq++) + mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); + } + + if (!port->pool_short) { + port->pool_short = + mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL, + MVPP2_BM_SWF_SHORT, + MVPP2_BM_SHORT_PKT_SIZE); + if (!port->pool_short) + return -ENOMEM; + + spin_lock_irqsave(&port->pool_short->lock, flags); + port->pool_short->port_map |= (1 << port->id); + spin_unlock_irqrestore(&port->pool_short->lock, flags); + + for (rxq = 0; rxq < rxq_number; rxq++) + mvpp2_rxq_short_pool_set(port, rxq, + port->pool_short->id); + } + + return 0; +} + +static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_bm_pool *port_pool = port->pool_long; + int num, pkts_num = port_pool->buf_num; + int pkt_size = MVPP2_RX_PKT_SIZE(mtu); + + /* Update BM pool with new buffer size */ + mvpp2_bm_bufs_free(port->priv, port_pool); + if (port_pool->buf_num) { + WARN(1, "cannot free all buffers in pool %d\n", port_pool->id); + return -EIO; + } + + port_pool->pkt_size = pkt_size; + num = mvpp2_bm_bufs_add(port, port_pool, pkts_num); + if (num != pkts_num) { + WARN(1, "pool %d: %d of %d allocated\n", + port_pool->id, num, pkts_num); + return -EIO; + } + + mvpp2_bm_pool_bufsize_set(port->priv, port_pool, + MVPP2_RX_BUF_SIZE(port_pool->pkt_size)); + dev->mtu = mtu; + netdev_update_features(dev); + return 0; +} + +static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) +{ + int cpu, cpu_mask = 0; + + for_each_present_cpu(cpu) + cpu_mask |= 1 << cpu; + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask)); +} + +static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) +{ + int cpu, cpu_mask = 0; + + for_each_present_cpu(cpu) + cpu_mask |= 1 << cpu; + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask)); +} + +/* Mask the current CPU's Rx/Tx interrupts */ +static void mvpp2_interrupts_mask(void *arg) +{ + struct mvpp2_port *port = arg; + + mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); +} + +/* Unmask the current CPU's Rx/Tx interrupts */ +static void mvpp2_interrupts_unmask(void *arg) +{ + struct mvpp2_port *port = arg; + + mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), + (MVPP2_CAUSE_MISC_SUM_MASK | + MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK | + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); +} + +/* Port configuration routines */ + +static void mvpp2_port_mii_set(struct mvpp2_port *port) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + + switch (port->phy_interface) { + case PHY_INTERFACE_MODE_SGMII: + val |= MVPP2_GMAC_INBAND_AN_MASK; + break; + case PHY_INTERFACE_MODE_RGMII: + val |= MVPP2_GMAC_PORT_RGMII_MASK; + default: + val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; + } + + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); +} + +static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val |= MVPP2_GMAC_FC_ADV_EN; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); +} + +static void mvpp2_port_enable(struct mvpp2_port *port) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + val |= MVPP2_GMAC_PORT_EN_MASK; + val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); +} + +static void mvpp2_port_disable(struct mvpp2_port *port) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + val &= ~(MVPP2_GMAC_PORT_EN_MASK); + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); +} + +/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ +static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & + ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); +} + +/* Configure loopback port */ +static void mvpp2_port_loopback_set(struct mvpp2_port *port) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); + + if (port->speed == 1000) + val |= MVPP2_GMAC_GMII_LB_EN_MASK; + else + val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; + + if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) + val |= MVPP2_GMAC_PCS_LB_EN_MASK; + else + val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; + + writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); +} + +static void mvpp2_port_reset(struct mvpp2_port *port) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & + ~MVPP2_GMAC_PORT_RESET_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + + while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & + MVPP2_GMAC_PORT_RESET_MASK) + continue; +} + +/* Change maximum receive size of the port */ +static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; + val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << + MVPP2_GMAC_MAX_RX_SIZE_OFFS); + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); +} + +/* Set defaults to the MVPP2 port */ +static void mvpp2_defaults_set(struct mvpp2_port *port) +{ + int tx_port_num, val, queue, ptxq, lrxq; + + /* Configure port to loopback if needed */ + if (port->flags & MVPP2_F_LOOPBACK) + mvpp2_port_loopback_set(port); + + /* Update TX FIFO MIN Threshold */ + val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; + /* Min. TX threshold must be less than minimal packet length */ + val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); + writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + + /* Disable Legacy WRR, Disable EJP, Release from reset */ + tx_port_num = mvpp2_egress_port(port); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, + tx_port_num); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); + + /* Close bandwidth for all queues */ + for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { + ptxq = mvpp2_txq_phys(port->id, queue); + mvpp2_write(port->priv, + MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); + } + + /* Set refill period to 1 usec, refill tokens + * and bucket size to maximum + */ + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, + port->priv->tclk / USEC_PER_SEC); + val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); + val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; + val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); + val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; + mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); + val = MVPP2_TXP_TOKEN_SIZE_MAX; + mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); + + /* Set MaximumLowLatencyPacketSize value to 256 */ + mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), + MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | + MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); + + /* Enable Rx cache snoop */ + for (lrxq = 0; lrxq < rxq_number; lrxq++) { + queue = port->rxqs[lrxq]->id; + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); + val |= MVPP2_SNOOP_PKT_SIZE_MASK | + MVPP2_SNOOP_BUF_HDR_MASK; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); + } + + /* At default, mask all interrupts to all present cpus */ + mvpp2_interrupts_disable(port); +} + +/* Enable/disable receiving packets */ +static void mvpp2_ingress_enable(struct mvpp2_port *port) +{ + u32 val; + int lrxq, queue; + + for (lrxq = 0; lrxq < rxq_number; lrxq++) { + queue = port->rxqs[lrxq]->id; + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); + val &= ~MVPP2_RXQ_DISABLE_MASK; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); + } +} + +static void mvpp2_ingress_disable(struct mvpp2_port *port) +{ + u32 val; + int lrxq, queue; + + for (lrxq = 0; lrxq < rxq_number; lrxq++) { + queue = port->rxqs[lrxq]->id; + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); + val |= MVPP2_RXQ_DISABLE_MASK; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); + } +} + +/* Enable transmit via physical egress queue + * - HW starts take descriptors from DRAM + */ +static void mvpp2_egress_enable(struct mvpp2_port *port) +{ + u32 qmap; + int queue; + int tx_port_num = mvpp2_egress_port(port); + + /* Enable all initialized TXs. */ + qmap = 0; + for (queue = 0; queue < txq_number; queue++) { + struct mvpp2_tx_queue *txq = port->txqs[queue]; + + if (txq->descs != NULL) + qmap |= (1 << queue); + } + + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); +} + +/* Disable transmit via physical egress queue + * - HW doesn't take descriptors from DRAM + */ +static void mvpp2_egress_disable(struct mvpp2_port *port) +{ + u32 reg_data; + int delay; + int tx_port_num = mvpp2_egress_port(port); + + /* Issue stop command for active channels only */ + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); + reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & + MVPP2_TXP_SCHED_ENQ_MASK; + if (reg_data != 0) + mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, + (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); + + /* Wait for all Tx activity to terminate. */ + delay = 0; + do { + if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { + netdev_warn(port->dev, + "Tx stop timed out, status=0x%08x\n", + reg_data); + break; + } + mdelay(1); + delay++; + + /* Check port TX Command register that all + * Tx queues are stopped + */ + reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); + } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); +} + +/* Rx descriptors helper methods */ + +/* Get number of Rx descriptors occupied by received packets */ +static inline int +mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) +{ + u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); + + return val & MVPP2_RXQ_OCCUPIED_MASK; +} + +/* Update Rx queue status with the number of occupied and available + * Rx descriptor slots. + */ +static inline void +mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, + int used_count, int free_count) +{ + /* Decrement the number of used descriptors and increment count + * increment the number of free descriptors. + */ + u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); + + mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); +} + +/* Get pointer to next RX descriptor to be processed by SW */ +static inline struct mvpp2_rx_desc * +mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) +{ + int rx_desc = rxq->next_desc_to_proc; + + rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); + prefetch(rxq->descs + rxq->next_desc_to_proc); + return rxq->descs + rx_desc; +} + +/* Set rx queue offset */ +static void mvpp2_rxq_offset_set(struct mvpp2_port *port, + int prxq, int offset) +{ + u32 val; + + /* Convert offset from bytes to units of 32 bytes */ + offset = offset >> 5; + + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); + val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; + + /* Offset is in */ + val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & + MVPP2_RXQ_PACKET_OFFSET_MASK); + + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); +} + +/* Obtain BM cookie information from descriptor */ +static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc) +{ + int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >> + MVPP2_RXD_BM_POOL_ID_OFFS; + int cpu = smp_processor_id(); + + return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | + ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); +} + +/* Tx descriptors helper methods */ + +/* Get number of Tx descriptors waiting to be transmitted by HW */ +static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + u32 val; + + mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); + val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); + + return val & MVPP2_TXQ_PENDING_MASK; +} + +/* Get pointer to next Tx descriptor to be processed (send) by HW */ +static struct mvpp2_tx_desc * +mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) +{ + int tx_desc = txq->next_desc_to_proc; + + txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); + return txq->descs + tx_desc; +} + +/* Update HW with number of aggregated Tx descriptors to be sent */ +static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) +{ + /* aggregated access - relevant TXQ number is written in TX desc */ + mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending); +} + + +/* Check if there are enough free descriptors in aggregated txq. + * If not, update the number of occupied descriptors and repeat the check. + */ +static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, + struct mvpp2_tx_queue *aggr_txq, int num) +{ + if ((aggr_txq->count + num) > aggr_txq->size) { + /* Update number of occupied aggregated Tx descriptors */ + int cpu = smp_processor_id(); + u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu)); + + aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; + } + + if ((aggr_txq->count + num) > aggr_txq->size) + return -ENOMEM; + + return 0; +} + +/* Reserved Tx descriptors allocation request */ +static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, + struct mvpp2_tx_queue *txq, int num) +{ + u32 val; + + val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; + mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val); + + val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG); + + return val & MVPP2_TXQ_RSVD_RSLT_MASK; +} + +/* Check if there are enough reserved descriptors for transmission. + * If not, request chunk of reserved descriptors and check again. + */ +static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv, + struct mvpp2_tx_queue *txq, + struct mvpp2_txq_pcpu *txq_pcpu, + int num) +{ + int req, cpu, desc_count; + + if (txq_pcpu->reserved_num >= num) + return 0; + + /* Not enough descriptors reserved! Update the reserved descriptor + * count and check again. + */ + + desc_count = 0; + /* Compute total of used descriptors */ + for_each_present_cpu(cpu) { + struct mvpp2_txq_pcpu *txq_pcpu_aux; + + txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu); + desc_count += txq_pcpu_aux->count; + desc_count += txq_pcpu_aux->reserved_num; + } + + req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num); + desc_count += req; + + if (desc_count > + (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK))) + return -ENOMEM; + + txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req); + + /* OK, the descriptor cound has been updated: check again. */ + if (txq_pcpu->reserved_num < num) + return -ENOMEM; + return 0; +} + +/* Release the last allocated Tx descriptor. Useful to handle DMA + * mapping failures in the Tx path. + */ +static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq) +{ + if (txq->next_desc_to_proc == 0) + txq->next_desc_to_proc = txq->last_desc - 1; + else + txq->next_desc_to_proc--; +} + +/* Set Tx descriptors fields relevant for CSUM calculation */ +static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto, + int ip_hdr_len, int l4_proto) +{ + u32 command; + + /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, + * G_L4_chk, L4_type required only for checksum calculation + */ + command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT); + command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); + command |= MVPP2_TXD_IP_CSUM_DISABLE; + + if (l3_proto == swab16(ETH_P_IP)) { + command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ + command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ + } else { + command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */ + } + + if (l4_proto == IPPROTO_TCP) { + command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */ + command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ + } else if (l4_proto == IPPROTO_UDP) { + command |= MVPP2_TXD_L4_UDP; /* enable UDP */ + command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ + } else { + command |= MVPP2_TXD_L4_CSUM_NOT; + } + + return command; +} + +/* Get number of sent descriptors and decrement counter. + * The number of sent descriptors is returned. + * Per-CPU access + */ +static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + u32 val; + + /* Reading status reg resets transmitted descriptor counter */ + val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id)); + + return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> + MVPP2_TRANSMITTED_COUNT_OFFSET; +} + +static void mvpp2_txq_sent_counter_clear(void *arg) +{ + struct mvpp2_port *port = arg; + int queue; + + for (queue = 0; queue < txq_number; queue++) { + int id = port->txqs[queue]->id; + + mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id)); + } +} + +/* Set max sizes for Tx queues */ +static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) +{ + u32 val, size, mtu; + int txq, tx_port_num; + + mtu = port->pkt_size * 8; + if (mtu > MVPP2_TXP_MTU_MAX) + mtu = MVPP2_TXP_MTU_MAX; + + /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ + mtu = 3 * mtu; + + /* Indirect access to registers */ + tx_port_num = mvpp2_egress_port(port); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); + + /* Set MTU */ + val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); + val &= ~MVPP2_TXP_MTU_MAX; + val |= mtu; + mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); + + /* TXP token size and all TXQs token size must be larger that MTU */ + val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); + size = val & MVPP2_TXP_TOKEN_SIZE_MAX; + if (size < mtu) { + size = mtu; + val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; + val |= size; + mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); + } + + for (txq = 0; txq < txq_number; txq++) { + val = mvpp2_read(port->priv, + MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); + size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; + + if (size < mtu) { + size = mtu; + val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; + val |= size; + mvpp2_write(port->priv, + MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), + val); + } + } +} + +/* Set the number of packets that will be received before Rx interrupt + * will be generated by HW. + */ +static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq, u32 pkts) +{ + u32 val; + + val = (pkts & MVPP2_OCCUPIED_THRESH_MASK); + mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val); + + rxq->pkts_coal = pkts; +} + +/* Set the time delay in usec before Rx interrupt */ +static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq, u32 usec) +{ + u32 val; + + val = (port->priv->tclk / USEC_PER_SEC) * usec; + mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); + + rxq->time_coal = usec; +} + +/* Set threshold for TX_DONE pkts coalescing */ +static void mvpp2_tx_done_pkts_coal_set(void *arg) +{ + struct mvpp2_port *port = arg; + int queue; + u32 val; + + for (queue = 0; queue < txq_number; queue++) { + struct mvpp2_tx_queue *txq = port->txqs[queue]; + + val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) & + MVPP2_TRANSMITTED_THRESH_MASK; + mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val); + } +} + +/* Free Tx queue skbuffs */ +static void mvpp2_txq_bufs_free(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq, + struct mvpp2_txq_pcpu *txq_pcpu, int num) +{ + int i; + + for (i = 0; i < num; i++) { + struct mvpp2_tx_desc *tx_desc = txq->descs + + txq_pcpu->txq_get_index; + struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index]; + + mvpp2_txq_inc_get(txq_pcpu); + + if (!skb) + continue; + + dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr, + tx_desc->data_size, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + } +} + +static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, + u32 cause) +{ + int queue = fls(cause) - 1; + + return port->rxqs[queue]; +} + +static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, + u32 cause) +{ + int queue = fls(cause >> 16) - 1; + + return port->txqs[queue]; +} + +/* Handle end of transmission */ +static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, + struct mvpp2_txq_pcpu *txq_pcpu) +{ + struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); + int tx_done; + + if (txq_pcpu->cpu != smp_processor_id()) + netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); + + tx_done = mvpp2_txq_sent_desc_proc(port, txq); + if (!tx_done) + return; + mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done); + + txq_pcpu->count -= tx_done; + + if (netif_tx_queue_stopped(nq)) + if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1) + netif_tx_wake_queue(nq); +} + +/* Rx/Tx queue initialization/cleanup methods */ + +/* Allocate and initialize descriptors for aggr TXQ */ +static int mvpp2_aggr_txq_init(struct platform_device *pdev, + struct mvpp2_tx_queue *aggr_txq, + int desc_num, int cpu, + struct mvpp2 *priv) +{ + /* Allocate memory for TX descriptors */ + aggr_txq->descs = dma_alloc_coherent(&pdev->dev, + desc_num * MVPP2_DESC_ALIGNED_SIZE, + &aggr_txq->descs_phys, GFP_KERNEL); + if (!aggr_txq->descs) + return -ENOMEM; + + /* Make sure descriptor address is cache line size aligned */ + BUG_ON(aggr_txq->descs != + PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); + + aggr_txq->last_desc = aggr_txq->size - 1; + + /* Aggr TXQ no reset WA */ + aggr_txq->next_desc_to_proc = mvpp2_read(priv, + MVPP2_AGGR_TXQ_INDEX_REG(cpu)); + + /* Set Tx descriptors queue starting address */ + /* indirect access */ + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), + aggr_txq->descs_phys); + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num); + + return 0; +} + +/* Create a specified Rx queue */ +static int mvpp2_rxq_init(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) + +{ + rxq->size = port->rx_ring_size; + + /* Allocate memory for RX descriptors */ + rxq->descs = dma_alloc_coherent(port->dev->dev.parent, + rxq->size * MVPP2_DESC_ALIGNED_SIZE, + &rxq->descs_phys, GFP_KERNEL); + if (!rxq->descs) + return -ENOMEM; + + BUG_ON(rxq->descs != + PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); + + rxq->last_desc = rxq->size - 1; + + /* Zero occupied and non-occupied counters - direct access */ + mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); + + /* Set Rx descriptors queue starting address - indirect access */ + mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys); + mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); + mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0); + + /* Set Offset */ + mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); + + /* Set coalescing pkts and time */ + mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal); + mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal); + + /* Add number of descriptors ready for receiving packets */ + mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); + + return 0; +} + +/* Push packets received by the RXQ to BM pool */ +static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) +{ + int rx_received, i; + + rx_received = mvpp2_rxq_received(port, rxq->id); + if (!rx_received) + return; + + for (i = 0; i < rx_received; i++) { + struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); + u32 bm = mvpp2_bm_cookie_build(rx_desc); + + mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, + rx_desc->buf_cookie); + } + mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); +} + +/* Cleanup Rx queue */ +static void mvpp2_rxq_deinit(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) +{ + mvpp2_rxq_drop_pkts(port, rxq); + + if (rxq->descs) + dma_free_coherent(port->dev->dev.parent, + rxq->size * MVPP2_DESC_ALIGNED_SIZE, + rxq->descs, + rxq->descs_phys); + + rxq->descs = NULL; + rxq->last_desc = 0; + rxq->next_desc_to_proc = 0; + rxq->descs_phys = 0; + + /* Clear Rx descriptors queue starting address and size; + * free descriptor number + */ + mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); + mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0); + mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0); +} + +/* Create and initialize a Tx queue */ +static int mvpp2_txq_init(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + u32 val; + int cpu, desc, desc_per_txq, tx_port_num; + struct mvpp2_txq_pcpu *txq_pcpu; + + txq->size = port->tx_ring_size; + + /* Allocate memory for Tx descriptors */ + txq->descs = dma_alloc_coherent(port->dev->dev.parent, + txq->size * MVPP2_DESC_ALIGNED_SIZE, + &txq->descs_phys, GFP_KERNEL); + if (!txq->descs) + return -ENOMEM; + + /* Make sure descriptor address is cache line size aligned */ + BUG_ON(txq->descs != + PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); + + txq->last_desc = txq->size - 1; + + /* Set Tx descriptors queue starting address - indirect access */ + mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys); + mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size & + MVPP2_TXQ_DESC_SIZE_MASK); + mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0); + mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG, + txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); + val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); + val &= ~MVPP2_TXQ_PENDING_MASK; + mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val); + + /* Calculate base address in prefetch buffer. We reserve 16 descriptors + * for each existing TXQ. + * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT + * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS + */ + desc_per_txq = 16; + desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + + (txq->log_id * desc_per_txq); + + mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, + MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | + MVPP2_PREF_BUF_THRESH(desc_per_txq/2)); + + /* WRR / EJP configuration - indirect access */ + tx_port_num = mvpp2_egress_port(port); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); + + val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); + val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; + val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); + val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; + mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); + + val = MVPP2_TXQ_TOKEN_SIZE_MAX; + mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), + val); + + for_each_present_cpu(cpu) { + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + txq_pcpu->size = txq->size; + txq_pcpu->tx_skb = kmalloc(txq_pcpu->size * + sizeof(*txq_pcpu->tx_skb), + GFP_KERNEL); + if (!txq_pcpu->tx_skb) { + dma_free_coherent(port->dev->dev.parent, + txq->size * MVPP2_DESC_ALIGNED_SIZE, + txq->descs, txq->descs_phys); + return -ENOMEM; + } + + txq_pcpu->count = 0; + txq_pcpu->reserved_num = 0; + txq_pcpu->txq_put_index = 0; + txq_pcpu->txq_get_index = 0; + } + + return 0; +} + +/* Free allocated TXQ resources */ +static void mvpp2_txq_deinit(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + struct mvpp2_txq_pcpu *txq_pcpu; + int cpu; + + for_each_present_cpu(cpu) { + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + kfree(txq_pcpu->tx_skb); + } + + if (txq->descs) + dma_free_coherent(port->dev->dev.parent, + txq->size * MVPP2_DESC_ALIGNED_SIZE, + txq->descs, txq->descs_phys); + + txq->descs = NULL; + txq->last_desc = 0; + txq->next_desc_to_proc = 0; + txq->descs_phys = 0; + + /* Set minimum bandwidth for disabled TXQs */ + mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); + + /* Set Tx descriptors queue starting address and size */ + mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0); + mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0); +} + +/* Cleanup Tx ports */ +static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) +{ + struct mvpp2_txq_pcpu *txq_pcpu; + int delay, pending, cpu; + u32 val; + + mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); + val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG); + val |= MVPP2_TXQ_DRAIN_EN_MASK; + mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); + + /* The napi queue has been stopped so wait for all packets + * to be transmitted. + */ + delay = 0; + do { + if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { + netdev_warn(port->dev, + "port %d: cleaning queue %d timed out\n", + port->id, txq->log_id); + break; + } + mdelay(1); + delay++; + + pending = mvpp2_txq_pend_desc_num_get(port, txq); + } while (pending); + + val &= ~MVPP2_TXQ_DRAIN_EN_MASK; + mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); + + for_each_present_cpu(cpu) { + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + + /* Release all packets */ + mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); + + /* Reset queue */ + txq_pcpu->count = 0; + txq_pcpu->txq_put_index = 0; + txq_pcpu->txq_get_index = 0; + } +} + +/* Cleanup all Tx queues */ +static void mvpp2_cleanup_txqs(struct mvpp2_port *port) +{ + struct mvpp2_tx_queue *txq; + int queue; + u32 val; + + val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); + + /* Reset Tx ports and delete Tx queues */ + val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); + mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); + + for (queue = 0; queue < txq_number; queue++) { + txq = port->txqs[queue]; + mvpp2_txq_clean(port, txq); + mvpp2_txq_deinit(port, txq); + } + + on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); + + val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); + mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); +} + +/* Cleanup all Rx queues */ +static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) +{ + int queue; + + for (queue = 0; queue < rxq_number; queue++) + mvpp2_rxq_deinit(port, port->rxqs[queue]); +} + +/* Init all Rx queues for port */ +static int mvpp2_setup_rxqs(struct mvpp2_port *port) +{ + int queue, err; + + for (queue = 0; queue < rxq_number; queue++) { + err = mvpp2_rxq_init(port, port->rxqs[queue]); + if (err) + goto err_cleanup; + } + return 0; + +err_cleanup: + mvpp2_cleanup_rxqs(port); + return err; +} + +/* Init all tx queues for port */ +static int mvpp2_setup_txqs(struct mvpp2_port *port) +{ + struct mvpp2_tx_queue *txq; + int queue, err; + + for (queue = 0; queue < txq_number; queue++) { + txq = port->txqs[queue]; + err = mvpp2_txq_init(port, txq); + if (err) + goto err_cleanup; + } + + on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1); + on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); + return 0; + +err_cleanup: + mvpp2_cleanup_txqs(port); + return err; +} + +/* The callback for per-port interrupt */ +static irqreturn_t mvpp2_isr(int irq, void *dev_id) +{ + struct mvpp2_port *port = (struct mvpp2_port *)dev_id; + + mvpp2_interrupts_disable(port); + + napi_schedule(&port->napi); + + return IRQ_HANDLED; +} + +/* Adjust link */ +static void mvpp2_link_event(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct phy_device *phydev = port->phy_dev; + int status_change = 0; + u32 val; + + if (phydev->link) { + if ((port->speed != phydev->speed) || + (port->duplex != phydev->duplex)) { + u32 val; + + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | + MVPP2_GMAC_CONFIG_GMII_SPEED | + MVPP2_GMAC_CONFIG_FULL_DUPLEX | + MVPP2_GMAC_AN_SPEED_EN | + MVPP2_GMAC_AN_DUPLEX_EN); + + if (phydev->duplex) + val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; + + if (phydev->speed == SPEED_1000) + val |= MVPP2_GMAC_CONFIG_GMII_SPEED; + else if (phydev->speed == SPEED_100) + val |= MVPP2_GMAC_CONFIG_MII_SPEED; + + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + + port->duplex = phydev->duplex; + port->speed = phydev->speed; + } + } + + if (phydev->link != port->link) { + if (!phydev->link) { + port->duplex = -1; + port->speed = 0; + } + + port->link = phydev->link; + status_change = 1; + } + + if (status_change) { + if (phydev->link) { + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val |= (MVPP2_GMAC_FORCE_LINK_PASS | + MVPP2_GMAC_FORCE_LINK_DOWN); + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + } else { + mvpp2_ingress_disable(port); + mvpp2_egress_disable(port); + } + phy_print_status(phydev); + } +} + +/* Main RX/TX processing routines */ + +/* Display more error info */ +static void mvpp2_rx_error(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + u32 status = rx_desc->status; + + switch (status & MVPP2_RXD_ERR_CODE_MASK) { + case MVPP2_RXD_ERR_CRC: + netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n", + status, rx_desc->data_size); + break; + case MVPP2_RXD_ERR_OVERRUN: + netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n", + status, rx_desc->data_size); + break; + case MVPP2_RXD_ERR_RESOURCE: + netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n", + status, rx_desc->data_size); + break; + } +} + +/* Handle RX checksum offload */ +static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, + struct sk_buff *skb) +{ + if (((status & MVPP2_RXD_L3_IP4) && + !(status & MVPP2_RXD_IP4_HEADER_ERR)) || + (status & MVPP2_RXD_L3_IP6)) + if (((status & MVPP2_RXD_L4_UDP) || + (status & MVPP2_RXD_L4_TCP)) && + (status & MVPP2_RXD_L4_CSUM_OK)) { + skb->csum = 0; + skb->ip_summed = CHECKSUM_UNNECESSARY; + return; + } + + skb->ip_summed = CHECKSUM_NONE; +} + +/* Reuse skb if possible, or allocate a new skb and add it to BM pool */ +static int mvpp2_rx_refill(struct mvpp2_port *port, + struct mvpp2_bm_pool *bm_pool, + u32 bm, int is_recycle) +{ + struct sk_buff *skb; + dma_addr_t phys_addr; + + if (is_recycle && + (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh)) + return 0; + + /* No recycle or too many buffers are in use, so allocate a new skb */ + skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb); + atomic_dec(&bm_pool->in_use); + return 0; +} + +/* Handle tx checksum */ +static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + int ip_hdr_len = 0; + u8 l4_proto; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *ip4h = ip_hdr(skb); + + /* Calculate IPv4 checksum and L4 checksum */ + ip_hdr_len = ip4h->ihl; + l4_proto = ip4h->protocol; + } else if (skb->protocol == htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = ipv6_hdr(skb); + + /* Read l4_protocol from one of IPv6 extra headers */ + if (skb_network_header_len(skb) > 0) + ip_hdr_len = (skb_network_header_len(skb) >> 2); + l4_proto = ip6h->nexthdr; + } else { + return MVPP2_TXD_L4_CSUM_NOT; + } + + return mvpp2_txq_desc_csum(skb_network_offset(skb), + skb->protocol, ip_hdr_len, l4_proto); + } + + return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; +} + +static void mvpp2_buff_hdr_rx(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + struct mvpp2_buff_hdr *buff_hdr; + struct sk_buff *skb; + u32 rx_status = rx_desc->status; + u32 buff_phys_addr; + u32 buff_virt_addr; + u32 buff_phys_addr_next; + u32 buff_virt_addr_next; + int mc_id; + int pool_id; + + pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> + MVPP2_RXD_BM_POOL_ID_OFFS; + buff_phys_addr = rx_desc->buf_phys_addr; + buff_virt_addr = rx_desc->buf_cookie; + + do { + skb = (struct sk_buff *)buff_virt_addr; + buff_hdr = (struct mvpp2_buff_hdr *)skb->head; + + mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info); + + buff_phys_addr_next = buff_hdr->next_buff_phys_addr; + buff_virt_addr_next = buff_hdr->next_buff_virt_addr; + + /* Release buffer */ + mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr, + buff_virt_addr, mc_id); + + buff_phys_addr = buff_phys_addr_next; + buff_virt_addr = buff_virt_addr_next; + + } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info)); +} + +/* Main rx processing */ +static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, + struct mvpp2_rx_queue *rxq) +{ + struct net_device *dev = port->dev; + int rx_received, rx_filled, i; + u32 rcvd_pkts = 0; + u32 rcvd_bytes = 0; + + /* Get number of received packets and clamp the to-do */ + rx_received = mvpp2_rxq_received(port, rxq->id); + if (rx_todo > rx_received) + rx_todo = rx_received; + + rx_filled = 0; + for (i = 0; i < rx_todo; i++) { + struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); + struct mvpp2_bm_pool *bm_pool; + struct sk_buff *skb; + u32 bm, rx_status; + int pool, rx_bytes, err; + + rx_filled++; + rx_status = rx_desc->status; + rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; + + bm = mvpp2_bm_cookie_build(rx_desc); + pool = mvpp2_bm_cookie_pool_get(bm); + bm_pool = &port->priv->bm_pools[pool]; + /* Check if buffer header is used */ + if (rx_status & MVPP2_RXD_BUF_HDR) { + mvpp2_buff_hdr_rx(port, rx_desc); + continue; + } + + /* In case of an error, release the requested buffer pointer + * to the Buffer Manager. This request process is controlled + * by the hardware, and the information about the buffer is + * comprised by the RX descriptor. + */ + if (rx_status & MVPP2_RXD_ERR_SUMMARY) { + dev->stats.rx_errors++; + mvpp2_rx_error(port, rx_desc); + mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, + rx_desc->buf_cookie); + continue; + } + + skb = (struct sk_buff *)rx_desc->buf_cookie; + + rcvd_pkts++; + rcvd_bytes += rx_bytes; + atomic_inc(&bm_pool->in_use); + + skb_reserve(skb, MVPP2_MH_SIZE); + skb_put(skb, rx_bytes); + skb->protocol = eth_type_trans(skb, dev); + mvpp2_rx_csum(port, rx_status, skb); + + napi_gro_receive(&port->napi, skb); + + err = mvpp2_rx_refill(port, bm_pool, bm, 0); + if (err) { + netdev_err(port->dev, "failed to refill BM pools\n"); + rx_filled--; + } + } + + if (rcvd_pkts) { + struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); + + u64_stats_update_begin(&stats->syncp); + stats->rx_packets += rcvd_pkts; + stats->rx_bytes += rcvd_bytes; + u64_stats_update_end(&stats->syncp); + } + + /* Update Rx queue management counters */ + wmb(); + mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled); + + return rx_todo; +} + +static inline void +tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq, + struct mvpp2_tx_desc *desc) +{ + dma_unmap_single(dev, desc->buf_phys_addr, + desc->data_size, DMA_TO_DEVICE); + mvpp2_txq_desc_put(txq); +} + +/* Handle tx fragmentation processing */ +static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, + struct mvpp2_tx_queue *aggr_txq, + struct mvpp2_tx_queue *txq) +{ + struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); + struct mvpp2_tx_desc *tx_desc; + int i; + dma_addr_t buf_phys_addr; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + void *addr = page_address(frag->page.p) + frag->page_offset; + + tx_desc = mvpp2_txq_next_desc_get(aggr_txq); + tx_desc->phys_txq = txq->id; + tx_desc->data_size = frag->size; + + buf_phys_addr = dma_map_single(port->dev->dev.parent, addr, + tx_desc->data_size, + DMA_TO_DEVICE); + if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) { + mvpp2_txq_desc_put(txq); + goto error; + } + + tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN; + tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN); + + if (i == (skb_shinfo(skb)->nr_frags - 1)) { + /* Last descriptor */ + tx_desc->command = MVPP2_TXD_L_DESC; + mvpp2_txq_inc_put(txq_pcpu, skb); + } else { + /* Descriptor in the middle: Not First, Not Last */ + tx_desc->command = 0; + mvpp2_txq_inc_put(txq_pcpu, NULL); + } + } + + return 0; + +error: + /* Release all descriptors that were used to map fragments of + * this packet, as well as the corresponding DMA mappings + */ + for (i = i - 1; i >= 0; i--) { + tx_desc = txq->descs + i; + tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc); + } + + return -ENOMEM; +} + +/* Main tx processing */ +static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_tx_queue *txq, *aggr_txq; + struct mvpp2_txq_pcpu *txq_pcpu; + struct mvpp2_tx_desc *tx_desc; + dma_addr_t buf_phys_addr; + int frags = 0; + u16 txq_id; + u32 tx_cmd; + + txq_id = skb_get_queue_mapping(skb); + txq = port->txqs[txq_id]; + txq_pcpu = this_cpu_ptr(txq->pcpu); + aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; + + frags = skb_shinfo(skb)->nr_frags + 1; + + /* Check number of available descriptors */ + if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) || + mvpp2_txq_reserved_desc_num_proc(port->priv, txq, + txq_pcpu, frags)) { + frags = 0; + goto out; + } + + /* Get a descriptor for the first part of the packet */ + tx_desc = mvpp2_txq_next_desc_get(aggr_txq); + tx_desc->phys_txq = txq->id; + tx_desc->data_size = skb_headlen(skb); + + buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, + tx_desc->data_size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) { + mvpp2_txq_desc_put(txq); + frags = 0; + goto out; + } + tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN; + tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN; + + tx_cmd = mvpp2_skb_tx_csum(port, skb); + + if (frags == 1) { + /* First and Last descriptor */ + tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; + tx_desc->command = tx_cmd; + mvpp2_txq_inc_put(txq_pcpu, skb); + } else { + /* First but not Last */ + tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; + tx_desc->command = tx_cmd; + mvpp2_txq_inc_put(txq_pcpu, NULL); + + /* Continue with other skb fragments */ + if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { + tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc); + frags = 0; + goto out; + } + } + + txq_pcpu->reserved_num -= frags; + txq_pcpu->count += frags; + aggr_txq->count += frags; + + /* Enable transmit */ + wmb(); + mvpp2_aggr_txq_pend_desc_add(port, frags); + + if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) { + struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); + + netif_tx_stop_queue(nq); + } +out: + if (frags > 0) { + struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); + + u64_stats_update_begin(&stats->syncp); + stats->tx_packets++; + stats->tx_bytes += skb->len; + u64_stats_update_end(&stats->syncp); + } else { + dev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + } + + return NETDEV_TX_OK; +} + +static inline void mvpp2_cause_error(struct net_device *dev, int cause) +{ + if (cause & MVPP2_CAUSE_FCS_ERR_MASK) + netdev_err(dev, "FCS error\n"); + if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK) + netdev_err(dev, "rx fifo overrun error\n"); + if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK) + netdev_err(dev, "tx fifo underrun error\n"); +} + +static void mvpp2_txq_done_percpu(void *arg) +{ + struct mvpp2_port *port = arg; + u32 cause_rx_tx, cause_tx, cause_misc; + + /* Rx/Tx cause register + * + * Bits 0-15: each bit indicates received packets on the Rx queue + * (bit 0 is for Rx queue 0). + * + * Bits 16-23: each bit indicates transmitted packets on the Tx queue + * (bit 16 is for Tx queue 0). + * + * Each CPU has its own Rx/Tx cause register + */ + cause_rx_tx = mvpp2_read(port->priv, + MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); + cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; + cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; + + if (cause_misc) { + mvpp2_cause_error(port->dev, cause_misc); + + /* Clear the cause register */ + mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); + mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id), + cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); + } + + /* Release TX descriptors */ + if (cause_tx) { + struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx); + struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); + + if (txq_pcpu->count) + mvpp2_txq_done(port, txq, txq_pcpu); + } +} + +static int mvpp2_poll(struct napi_struct *napi, int budget) +{ + u32 cause_rx_tx, cause_rx; + int rx_done = 0; + struct mvpp2_port *port = netdev_priv(napi->dev); + + on_each_cpu(mvpp2_txq_done_percpu, port, 1); + + cause_rx_tx = mvpp2_read(port->priv, + MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); + cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + + /* Process RX packets */ + cause_rx |= port->pending_cause_rx; + while (cause_rx && budget > 0) { + int count; + struct mvpp2_rx_queue *rxq; + + rxq = mvpp2_get_rx_queue(port, cause_rx); + if (!rxq) + break; + + count = mvpp2_rx(port, budget, rxq); + rx_done += count; + budget -= count; + if (budget > 0) { + /* Clear the bit associated to this Rx queue + * so that next iteration will continue from + * the next Rx queue. + */ + cause_rx &= ~(1 << rxq->logic_rxq); + } + } + + if (budget > 0) { + cause_rx = 0; + napi_complete(napi); + + mvpp2_interrupts_enable(port); + } + port->pending_cause_rx = cause_rx; + return rx_done; +} + +/* Set hw internals when starting port */ +static void mvpp2_start_dev(struct mvpp2_port *port) +{ + mvpp2_gmac_max_rx_size_set(port); + mvpp2_txp_max_tx_size_set(port); + + napi_enable(&port->napi); + + /* Enable interrupts on all CPUs */ + mvpp2_interrupts_enable(port); + + mvpp2_port_enable(port); + phy_start(port->phy_dev); + netif_tx_start_all_queues(port->dev); +} + +/* Set hw internals when stopping port */ +static void mvpp2_stop_dev(struct mvpp2_port *port) +{ + /* Stop new packets from arriving to RXQs */ + mvpp2_ingress_disable(port); + + mdelay(10); + + /* Disable interrupts on all CPUs */ + mvpp2_interrupts_disable(port); + + napi_disable(&port->napi); + + netif_carrier_off(port->dev); + netif_tx_stop_all_queues(port->dev); + + mvpp2_egress_disable(port); + mvpp2_port_disable(port); + phy_stop(port->phy_dev); +} + +/* Return positive if MTU is valid */ +static inline int mvpp2_check_mtu_valid(struct net_device *dev, int mtu) +{ + if (mtu < 68) { + netdev_err(dev, "cannot change mtu to less than 68\n"); + return -EINVAL; + } + + /* 9676 == 9700 - 20 and rounding to 8 */ + if (mtu > 9676) { + netdev_info(dev, "illegal MTU value %d, round to 9676\n", mtu); + mtu = 9676; + } + + if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { + netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu, + ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8)); + mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); + } + + return mtu; +} + +static int mvpp2_check_ringparam_valid(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + u16 new_rx_pending = ring->rx_pending; + u16 new_tx_pending = ring->tx_pending; + + if (ring->rx_pending == 0 || ring->tx_pending == 0) + return -EINVAL; + + if (ring->rx_pending > MVPP2_MAX_RXD) + new_rx_pending = MVPP2_MAX_RXD; + else if (!IS_ALIGNED(ring->rx_pending, 16)) + new_rx_pending = ALIGN(ring->rx_pending, 16); + + if (ring->tx_pending > MVPP2_MAX_TXD) + new_tx_pending = MVPP2_MAX_TXD; + else if (!IS_ALIGNED(ring->tx_pending, 32)) + new_tx_pending = ALIGN(ring->tx_pending, 32); + + if (ring->rx_pending != new_rx_pending) { + netdev_info(dev, "illegal Rx ring size value %d, round to %d\n", + ring->rx_pending, new_rx_pending); + ring->rx_pending = new_rx_pending; + } + + if (ring->tx_pending != new_tx_pending) { + netdev_info(dev, "illegal Tx ring size value %d, round to %d\n", + ring->tx_pending, new_tx_pending); + ring->tx_pending = new_tx_pending; + } + + return 0; +} + +static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr) +{ + u32 mac_addr_l, mac_addr_m, mac_addr_h; + + mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG); + mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE); + mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH); + addr[0] = (mac_addr_h >> 24) & 0xFF; + addr[1] = (mac_addr_h >> 16) & 0xFF; + addr[2] = (mac_addr_h >> 8) & 0xFF; + addr[3] = mac_addr_h & 0xFF; + addr[4] = mac_addr_m & 0xFF; + addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF; +} + +static int mvpp2_phy_connect(struct mvpp2_port *port) +{ + struct phy_device *phy_dev; + + phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0, + port->phy_interface); + if (!phy_dev) { + netdev_err(port->dev, "cannot connect to phy\n"); + return -ENODEV; + } + phy_dev->supported &= PHY_GBIT_FEATURES; + phy_dev->advertising = phy_dev->supported; + + port->phy_dev = phy_dev; + port->link = 0; + port->duplex = 0; + port->speed = 0; + + return 0; +} + +static void mvpp2_phy_disconnect(struct mvpp2_port *port) +{ + phy_disconnect(port->phy_dev); + port->phy_dev = NULL; +} + +static int mvpp2_open(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + unsigned char mac_bcast[ETH_ALEN] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + int err; + + err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true); + if (err) { + netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); + return err; + } + err = mvpp2_prs_mac_da_accept(port->priv, port->id, + dev->dev_addr, true); + if (err) { + netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n"); + return err; + } + err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH); + if (err) { + netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n"); + return err; + } + err = mvpp2_prs_def_flow(port); + if (err) { + netdev_err(dev, "mvpp2_prs_def_flow failed\n"); + return err; + } + + /* Allocate the Rx/Tx queues */ + err = mvpp2_setup_rxqs(port); + if (err) { + netdev_err(port->dev, "cannot allocate Rx queues\n"); + return err; + } + + err = mvpp2_setup_txqs(port); + if (err) { + netdev_err(port->dev, "cannot allocate Tx queues\n"); + goto err_cleanup_rxqs; + } + + err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port); + if (err) { + netdev_err(port->dev, "cannot request IRQ %d\n", port->irq); + goto err_cleanup_txqs; + } + + /* In default link is down */ + netif_carrier_off(port->dev); + + err = mvpp2_phy_connect(port); + if (err < 0) + goto err_free_irq; + + /* Unmask interrupts on all CPUs */ + on_each_cpu(mvpp2_interrupts_unmask, port, 1); + + mvpp2_start_dev(port); + + return 0; + +err_free_irq: + free_irq(port->irq, port); +err_cleanup_txqs: + mvpp2_cleanup_txqs(port); +err_cleanup_rxqs: + mvpp2_cleanup_rxqs(port); + return err; +} + +static int mvpp2_stop(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + + mvpp2_stop_dev(port); + mvpp2_phy_disconnect(port); + + /* Mask interrupts on all CPUs */ + on_each_cpu(mvpp2_interrupts_mask, port, 1); + + free_irq(port->irq, port); + mvpp2_cleanup_rxqs(port); + mvpp2_cleanup_txqs(port); + + return 0; +} + +static void mvpp2_set_rx_mode(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2 *priv = port->priv; + struct netdev_hw_addr *ha; + int id = port->id; + bool allmulti = dev->flags & IFF_ALLMULTI; + + mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); + mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); + mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); + + /* Remove all port->id's mcast enries */ + mvpp2_prs_mcast_del_all(priv, id); + + if (allmulti && !netdev_mc_empty(dev)) { + netdev_for_each_mc_addr(ha, dev) + mvpp2_prs_mac_da_accept(priv, id, ha->addr, true); + } +} + +static int mvpp2_set_mac_address(struct net_device *dev, void *p) +{ + struct mvpp2_port *port = netdev_priv(dev); + const struct sockaddr *addr = p; + int err; + + if (!is_valid_ether_addr(addr->sa_data)) { + err = -EADDRNOTAVAIL; + goto error; + } + + if (!netif_running(dev)) { + err = mvpp2_prs_update_mac_da(dev, addr->sa_data); + if (!err) + return 0; + /* Reconfigure parser to accept the original MAC address */ + err = mvpp2_prs_update_mac_da(dev, dev->dev_addr); + if (err) + goto error; + } + + mvpp2_stop_dev(port); + + err = mvpp2_prs_update_mac_da(dev, addr->sa_data); + if (!err) + goto out_start; + + /* Reconfigure parser accept the original MAC address */ + err = mvpp2_prs_update_mac_da(dev, dev->dev_addr); + if (err) + goto error; +out_start: + mvpp2_start_dev(port); + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + return 0; + +error: + netdev_err(dev, "fail to change MAC address\n"); + return err; +} + +static int mvpp2_change_mtu(struct net_device *dev, int mtu) +{ + struct mvpp2_port *port = netdev_priv(dev); + int err; + + mtu = mvpp2_check_mtu_valid(dev, mtu); + if (mtu < 0) { + err = mtu; + goto error; + } + + if (!netif_running(dev)) { + err = mvpp2_bm_update_mtu(dev, mtu); + if (!err) { + port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); + return 0; + } + + /* Reconfigure BM to the original MTU */ + err = mvpp2_bm_update_mtu(dev, dev->mtu); + if (err) + goto error; + } + + mvpp2_stop_dev(port); + + err = mvpp2_bm_update_mtu(dev, mtu); + if (!err) { + port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); + goto out_start; + } + + /* Reconfigure BM to the original MTU */ + err = mvpp2_bm_update_mtu(dev, dev->mtu); + if (err) + goto error; + +out_start: + mvpp2_start_dev(port); + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + + return 0; + +error: + netdev_err(dev, "fail to change MTU\n"); + return err; +} + +static struct rtnl_link_stats64 * +mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct mvpp2_port *port = netdev_priv(dev); + unsigned int start; + int cpu; + + for_each_possible_cpu(cpu) { + struct mvpp2_pcpu_stats *cpu_stats; + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + + cpu_stats = per_cpu_ptr(port->stats, cpu); + do { + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + rx_packets = cpu_stats->rx_packets; + rx_bytes = cpu_stats->rx_bytes; + tx_packets = cpu_stats->tx_packets; + tx_bytes = cpu_stats->tx_bytes; + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + } + + stats->rx_errors = dev->stats.rx_errors; + stats->rx_dropped = dev->stats.rx_dropped; + stats->tx_dropped = dev->stats.tx_dropped; + + return stats; +} + +static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mvpp2_port *port = netdev_priv(dev); + int ret; + + if (!port->phy_dev) + return -ENOTSUPP; + + ret = phy_mii_ioctl(port->phy_dev, ifr, cmd); + if (!ret) + mvpp2_link_event(dev); + + return ret; +} + +/* Ethtool methods */ + +/* Get settings (phy address, speed) for ethtools */ +static int mvpp2_ethtool_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phy_dev) + return -ENODEV; + return phy_ethtool_gset(port->phy_dev, cmd); +} + +/* Set settings (phy address, speed) for ethtools */ +static int mvpp2_ethtool_set_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phy_dev) + return -ENODEV; + return phy_ethtool_sset(port->phy_dev, cmd); +} + +/* Set interrupt coalescing for ethtools */ +static int mvpp2_ethtool_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *c) +{ + struct mvpp2_port *port = netdev_priv(dev); + int queue; + + for (queue = 0; queue < rxq_number; queue++) { + struct mvpp2_rx_queue *rxq = port->rxqs[queue]; + + rxq->time_coal = c->rx_coalesce_usecs; + rxq->pkts_coal = c->rx_max_coalesced_frames; + mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal); + mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal); + } + + for (queue = 0; queue < txq_number; queue++) { + struct mvpp2_tx_queue *txq = port->txqs[queue]; + + txq->done_pkts_coal = c->tx_max_coalesced_frames; + } + + on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1); + return 0; +} + +/* get coalescing for ethtools */ +static int mvpp2_ethtool_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *c) +{ + struct mvpp2_port *port = netdev_priv(dev); + + c->rx_coalesce_usecs = port->rxqs[0]->time_coal; + c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; + c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; + return 0; +} + +static void mvpp2_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, dev_name(&dev->dev), + sizeof(drvinfo->bus_info)); +} + +static void mvpp2_ethtool_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct mvpp2_port *port = netdev_priv(dev); + + ring->rx_max_pending = MVPP2_MAX_RXD; + ring->tx_max_pending = MVPP2_MAX_TXD; + ring->rx_pending = port->rx_ring_size; + ring->tx_pending = port->tx_ring_size; +} + +static int mvpp2_ethtool_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct mvpp2_port *port = netdev_priv(dev); + u16 prev_rx_ring_size = port->rx_ring_size; + u16 prev_tx_ring_size = port->tx_ring_size; + int err; + + err = mvpp2_check_ringparam_valid(dev, ring); + if (err) + return err; + + if (!netif_running(dev)) { + port->rx_ring_size = ring->rx_pending; + port->tx_ring_size = ring->tx_pending; + return 0; + } + + /* The interface is running, so we have to force a + * reallocation of the queues + */ + mvpp2_stop_dev(port); + mvpp2_cleanup_rxqs(port); + mvpp2_cleanup_txqs(port); + + port->rx_ring_size = ring->rx_pending; + port->tx_ring_size = ring->tx_pending; + + err = mvpp2_setup_rxqs(port); + if (err) { + /* Reallocate Rx queues with the original ring size */ + port->rx_ring_size = prev_rx_ring_size; + ring->rx_pending = prev_rx_ring_size; + err = mvpp2_setup_rxqs(port); + if (err) + goto err_out; + } + err = mvpp2_setup_txqs(port); + if (err) { + /* Reallocate Tx queues with the original ring size */ + port->tx_ring_size = prev_tx_ring_size; + ring->tx_pending = prev_tx_ring_size; + err = mvpp2_setup_txqs(port); + if (err) + goto err_clean_rxqs; + } + + mvpp2_start_dev(port); + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + + return 0; + +err_clean_rxqs: + mvpp2_cleanup_rxqs(port); +err_out: + netdev_err(dev, "fail to change ring parameters"); + return err; +} + +/* Device ops */ + +static const struct net_device_ops mvpp2_netdev_ops = { + .ndo_open = mvpp2_open, + .ndo_stop = mvpp2_stop, + .ndo_start_xmit = mvpp2_tx, + .ndo_set_rx_mode = mvpp2_set_rx_mode, + .ndo_set_mac_address = mvpp2_set_mac_address, + .ndo_change_mtu = mvpp2_change_mtu, + .ndo_get_stats64 = mvpp2_get_stats64, + .ndo_do_ioctl = mvpp2_ioctl, +}; + +static const struct ethtool_ops mvpp2_eth_tool_ops = { + .get_link = ethtool_op_get_link, + .get_settings = mvpp2_ethtool_get_settings, + .set_settings = mvpp2_ethtool_set_settings, + .set_coalesce = mvpp2_ethtool_set_coalesce, + .get_coalesce = mvpp2_ethtool_get_coalesce, + .get_drvinfo = mvpp2_ethtool_get_drvinfo, + .get_ringparam = mvpp2_ethtool_get_ringparam, + .set_ringparam = mvpp2_ethtool_set_ringparam, +}; + +/* Driver initialization */ + +static void mvpp2_port_power_up(struct mvpp2_port *port) +{ + mvpp2_port_mii_set(port); + mvpp2_port_periodic_xon_disable(port); + mvpp2_port_fc_adv_enable(port); + mvpp2_port_reset(port); +} + +/* Initialize port HW */ +static int mvpp2_port_init(struct mvpp2_port *port) +{ + struct device *dev = port->dev->dev.parent; + struct mvpp2 *priv = port->priv; + struct mvpp2_txq_pcpu *txq_pcpu; + int queue, cpu, err; + + if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM) + return -EINVAL; + + /* Disable port */ + mvpp2_egress_disable(port); + mvpp2_port_disable(port); + + port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs), + GFP_KERNEL); + if (!port->txqs) + return -ENOMEM; + + /* Associate physical Tx queues to this port and initialize. + * The mapping is predefined. + */ + for (queue = 0; queue < txq_number; queue++) { + int queue_phy_id = mvpp2_txq_phys(port->id, queue); + struct mvpp2_tx_queue *txq; + + txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); + if (!txq) + return -ENOMEM; + + txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); + if (!txq->pcpu) { + err = -ENOMEM; + goto err_free_percpu; + } + + txq->id = queue_phy_id; + txq->log_id = queue; + txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; + for_each_present_cpu(cpu) { + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + txq_pcpu->cpu = cpu; + } + + port->txqs[queue] = txq; + } + + port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs), + GFP_KERNEL); + if (!port->rxqs) { + err = -ENOMEM; + goto err_free_percpu; + } + + /* Allocate and initialize Rx queue for this port */ + for (queue = 0; queue < rxq_number; queue++) { + struct mvpp2_rx_queue *rxq; + + /* Map physical Rx queue to port's logical Rx queue */ + rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); + if (!rxq) + goto err_free_percpu; + /* Map this Rx queue to a physical queue */ + rxq->id = port->first_rxq + queue; + rxq->port = port->id; + rxq->logic_rxq = queue; + + port->rxqs[queue] = rxq; + } + + /* Configure Rx queue group interrupt for this port */ + mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number); + + /* Create Rx descriptor rings */ + for (queue = 0; queue < rxq_number; queue++) { + struct mvpp2_rx_queue *rxq = port->rxqs[queue]; + + rxq->size = port->rx_ring_size; + rxq->pkts_coal = MVPP2_RX_COAL_PKTS; + rxq->time_coal = MVPP2_RX_COAL_USEC; + } + + mvpp2_ingress_disable(port); + + /* Port default configuration */ + mvpp2_defaults_set(port); + + /* Port's classifier configuration */ + mvpp2_cls_oversize_rxq_set(port); + mvpp2_cls_port_config(port); + + /* Provide an initial Rx packet size */ + port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); + + /* Initialize pools for swf */ + err = mvpp2_swf_bm_pool_init(port); + if (err) + goto err_free_percpu; + + return 0; + +err_free_percpu: + for (queue = 0; queue < txq_number; queue++) { + if (!port->txqs[queue]) + continue; + free_percpu(port->txqs[queue]->pcpu); + } + return err; +} + +/* Ports initialization */ +static int mvpp2_port_probe(struct platform_device *pdev, + struct device_node *port_node, + struct mvpp2 *priv, + int *next_first_rxq) +{ + struct device_node *phy_node; + struct mvpp2_port *port; + struct net_device *dev; + struct resource *res; + const char *dt_mac_addr; + const char *mac_from; + char hw_mac_addr[ETH_ALEN]; + u32 id; + int features; + int phy_mode; + int priv_common_regs_num = 2; + int err, i; + + dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number, + rxq_number); + if (!dev) + return -ENOMEM; + + phy_node = of_parse_phandle(port_node, "phy", 0); + if (!phy_node) { + dev_err(&pdev->dev, "missing phy\n"); + err = -ENODEV; + goto err_free_netdev; + } + + phy_mode = of_get_phy_mode(port_node); + if (phy_mode < 0) { + dev_err(&pdev->dev, "incorrect phy mode\n"); + err = phy_mode; + goto err_free_netdev; + } + + if (of_property_read_u32(port_node, "port-id", &id)) { + err = -EINVAL; + dev_err(&pdev->dev, "missing port-id value\n"); + goto err_free_netdev; + } + + dev->tx_queue_len = MVPP2_MAX_TXD; + dev->watchdog_timeo = 5 * HZ; + dev->netdev_ops = &mvpp2_netdev_ops; + dev->ethtool_ops = &mvpp2_eth_tool_ops; + + port = netdev_priv(dev); + + port->irq = irq_of_parse_and_map(port_node, 0); + if (port->irq <= 0) { + err = -EINVAL; + goto err_free_netdev; + } + + if (of_property_read_bool(port_node, "marvell,loopback")) + port->flags |= MVPP2_F_LOOPBACK; + + port->priv = priv; + port->id = id; + port->first_rxq = *next_first_rxq; + port->phy_node = phy_node; + port->phy_interface = phy_mode; + + res = platform_get_resource(pdev, IORESOURCE_MEM, + priv_common_regs_num + id); + port->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(port->base)) { + err = PTR_ERR(port->base); + goto err_free_irq; + } + + /* Alloc per-cpu stats */ + port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); + if (!port->stats) { + err = -ENOMEM; + goto err_free_irq; + } + + dt_mac_addr = of_get_mac_address(port_node); + if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) { + mac_from = "device tree"; + ether_addr_copy(dev->dev_addr, dt_mac_addr); + } else { + mvpp2_get_mac_address(port, hw_mac_addr); + if (is_valid_ether_addr(hw_mac_addr)) { + mac_from = "hardware"; + ether_addr_copy(dev->dev_addr, hw_mac_addr); + } else { + mac_from = "random"; + eth_hw_addr_random(dev); + } + } + + port->tx_ring_size = MVPP2_MAX_TXD; + port->rx_ring_size = MVPP2_MAX_RXD; + port->dev = dev; + SET_NETDEV_DEV(dev, &pdev->dev); + + err = mvpp2_port_init(port); + if (err < 0) { + dev_err(&pdev->dev, "failed to init port %d\n", id); + goto err_free_stats; + } + mvpp2_port_power_up(port); + + netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT); + features = NETIF_F_SG | NETIF_F_IP_CSUM; + dev->features = features | NETIF_F_RXCSUM; + dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO; + dev->vlan_features |= features; + + err = register_netdev(dev); + if (err < 0) { + dev_err(&pdev->dev, "failed to register netdev\n"); + goto err_free_txq_pcpu; + } + netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); + + /* Increment the first Rx queue number to be used by the next port */ + *next_first_rxq += rxq_number; + priv->port_list[id] = port; + return 0; + +err_free_txq_pcpu: + for (i = 0; i < txq_number; i++) + free_percpu(port->txqs[i]->pcpu); +err_free_stats: + free_percpu(port->stats); +err_free_irq: + irq_dispose_mapping(port->irq); +err_free_netdev: + free_netdev(dev); + return err; +} + +/* Ports removal routine */ +static void mvpp2_port_remove(struct mvpp2_port *port) +{ + int i; + + unregister_netdev(port->dev); + free_percpu(port->stats); + for (i = 0; i < txq_number; i++) + free_percpu(port->txqs[i]->pcpu); + irq_dispose_mapping(port->irq); + free_netdev(port->dev); +} + +/* Initialize decoding windows */ +static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, + struct mvpp2 *priv) +{ + u32 win_enable; + int i; + + for (i = 0; i < 6; i++) { + mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); + mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); + + if (i < 4) + mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); + } + + win_enable = 0; + + for (i = 0; i < dram->num_cs; i++) { + const struct mbus_dram_window *cs = dram->cs + i; + + mvpp2_write(priv, MVPP2_WIN_BASE(i), + (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | + dram->mbus_dram_target_id); + + mvpp2_write(priv, MVPP2_WIN_SIZE(i), + (cs->size - 1) & 0xffff0000); + + win_enable |= (1 << i); + } + + mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); +} + +/* Initialize Rx FIFO's */ +static void mvpp2_rx_fifo_init(struct mvpp2 *priv) +{ + int port; + + for (port = 0; port < MVPP2_MAX_PORTS; port++) { + mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), + MVPP2_RX_FIFO_PORT_DATA_SIZE); + mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), + MVPP2_RX_FIFO_PORT_ATTR_SIZE); + } + + mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, + MVPP2_RX_FIFO_PORT_MIN_PKT); + mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); +} + +/* Initialize network controller common part HW */ +static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) +{ + const struct mbus_dram_target_info *dram_target_info; + int err, i; + u32 val; + + /* Checks for hardware constraints */ + if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) || + (txq_number > MVPP2_MAX_TXQ)) { + dev_err(&pdev->dev, "invalid queue size parameter\n"); + return -EINVAL; + } + + /* MBUS windows configuration */ + dram_target_info = mv_mbus_dram_info(); + if (dram_target_info) + mvpp2_conf_mbus_windows(dram_target_info, priv); + + /* Disable HW PHY polling */ + val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); + val |= MVPP2_PHY_AN_STOP_SMI0_MASK; + writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); + + /* Allocate and initialize aggregated TXQs */ + priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(), + sizeof(struct mvpp2_tx_queue), + GFP_KERNEL); + if (!priv->aggr_txqs) + return -ENOMEM; + + for_each_present_cpu(i) { + priv->aggr_txqs[i].id = i; + priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; + err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], + MVPP2_AGGR_TXQ_SIZE, i, priv); + if (err < 0) + return err; + } + + /* Rx Fifo Init */ + mvpp2_rx_fifo_init(priv); + + /* Reset Rx queue group interrupt configuration */ + for (i = 0; i < MVPP2_MAX_PORTS; i++) + mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number); + + writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, + priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); + + /* Allow cache snoop when transmiting packets */ + mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); + + /* Buffer Manager initialization */ + err = mvpp2_bm_init(pdev, priv); + if (err < 0) + return err; + + /* Parser default initialization */ + err = mvpp2_prs_default_init(pdev, priv); + if (err < 0) + return err; + + /* Classifier default initialization */ + mvpp2_cls_init(priv); + + return 0; +} + +static int mvpp2_probe(struct platform_device *pdev) +{ + struct device_node *dn = pdev->dev.of_node; + struct device_node *port_node; + struct mvpp2 *priv; + struct resource *res; + int port_count, first_rxq; + int err; + + priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->lms_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->lms_base)) + return PTR_ERR(priv->lms_base); + + priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); + if (IS_ERR(priv->pp_clk)) + return PTR_ERR(priv->pp_clk); + err = clk_prepare_enable(priv->pp_clk); + if (err < 0) + return err; + + priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk"); + if (IS_ERR(priv->gop_clk)) { + err = PTR_ERR(priv->gop_clk); + goto err_pp_clk; + } + err = clk_prepare_enable(priv->gop_clk); + if (err < 0) + goto err_pp_clk; + + /* Get system's tclk rate */ + priv->tclk = clk_get_rate(priv->pp_clk); + + /* Initialize network controller */ + err = mvpp2_init(pdev, priv); + if (err < 0) { + dev_err(&pdev->dev, "failed to initialize controller\n"); + goto err_gop_clk; + } + + port_count = of_get_available_child_count(dn); + if (port_count == 0) { + dev_err(&pdev->dev, "no ports enabled\n"); + err = -ENODEV; + goto err_gop_clk; + } + + priv->port_list = devm_kcalloc(&pdev->dev, port_count, + sizeof(struct mvpp2_port *), + GFP_KERNEL); + if (!priv->port_list) { + err = -ENOMEM; + goto err_gop_clk; + } + + /* Initialize ports */ + first_rxq = 0; + for_each_available_child_of_node(dn, port_node) { + err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq); + if (err < 0) + goto err_gop_clk; + } + + platform_set_drvdata(pdev, priv); + return 0; + +err_gop_clk: + clk_disable_unprepare(priv->gop_clk); +err_pp_clk: + clk_disable_unprepare(priv->pp_clk); + return err; +} + +static int mvpp2_remove(struct platform_device *pdev) +{ + struct mvpp2 *priv = platform_get_drvdata(pdev); + struct device_node *dn = pdev->dev.of_node; + struct device_node *port_node; + int i = 0; + + for_each_available_child_of_node(dn, port_node) { + if (priv->port_list[i]) + mvpp2_port_remove(priv->port_list[i]); + i++; + } + + for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { + struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; + + mvpp2_bm_pool_destroy(pdev, priv, bm_pool); + } + + for_each_present_cpu(i) { + struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; + + dma_free_coherent(&pdev->dev, + MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, + aggr_txq->descs, + aggr_txq->descs_phys); + } + + clk_disable_unprepare(priv->pp_clk); + clk_disable_unprepare(priv->gop_clk); + + return 0; +} + +static const struct of_device_id mvpp2_match[] = { + { .compatible = "marvell,armada-375-pp2" }, + { } +}; +MODULE_DEVICE_TABLE(of, mvpp2_match); + +static struct platform_driver mvpp2_driver = { + .probe = mvpp2_probe, + .remove = mvpp2_remove, + .driver = { + .name = MVPP2_DRIVER_NAME, + .of_match_table = mvpp2_match, + }, +}; + +module_platform_driver(mvpp2_driver); + +MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); +MODULE_AUTHOR("Marcin Wojtas "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c new file mode 100644 index 000000000..7ace07dad --- /dev/null +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -0,0 +1,1630 @@ +/* + * PXA168 ethernet driver. + * Most of the code is derived from mv643xx ethernet driver. + * + * Copyright (C) 2010 Marvell International Ltd. + * Sachin Sanap + * Zhangfei Gao + * Philip Rakity + * Mark Brown + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define DRIVER_NAME "pxa168-eth" +#define DRIVER_VERSION "0.3" + +/* + * Registers + */ + +#define PHY_ADDRESS 0x0000 +#define SMI 0x0010 +#define PORT_CONFIG 0x0400 +#define PORT_CONFIG_EXT 0x0408 +#define PORT_COMMAND 0x0410 +#define PORT_STATUS 0x0418 +#define HTPR 0x0428 +#define MAC_ADDR_LOW 0x0430 +#define MAC_ADDR_HIGH 0x0438 +#define SDMA_CONFIG 0x0440 +#define SDMA_CMD 0x0448 +#define INT_CAUSE 0x0450 +#define INT_W_CLEAR 0x0454 +#define INT_MASK 0x0458 +#define ETH_F_RX_DESC_0 0x0480 +#define ETH_C_RX_DESC_0 0x04A0 +#define ETH_C_TX_DESC_1 0x04E4 + +/* smi register */ +#define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */ +#define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */ +#define SMI_OP_W (0 << 26) /* Write operation */ +#define SMI_OP_R (1 << 26) /* Read operation */ + +#define PHY_WAIT_ITERATIONS 10 + +#define PXA168_ETH_PHY_ADDR_DEFAULT 0 +/* RX & TX descriptor command */ +#define BUF_OWNED_BY_DMA (1 << 31) + +/* RX descriptor status */ +#define RX_EN_INT (1 << 23) +#define RX_FIRST_DESC (1 << 17) +#define RX_LAST_DESC (1 << 16) +#define RX_ERROR (1 << 15) + +/* TX descriptor command */ +#define TX_EN_INT (1 << 23) +#define TX_GEN_CRC (1 << 22) +#define TX_ZERO_PADDING (1 << 18) +#define TX_FIRST_DESC (1 << 17) +#define TX_LAST_DESC (1 << 16) +#define TX_ERROR (1 << 15) + +/* SDMA_CMD */ +#define SDMA_CMD_AT (1 << 31) +#define SDMA_CMD_TXDL (1 << 24) +#define SDMA_CMD_TXDH (1 << 23) +#define SDMA_CMD_AR (1 << 15) +#define SDMA_CMD_ERD (1 << 7) + +/* Bit definitions of the Port Config Reg */ +#define PCR_DUPLEX_FULL (1 << 15) +#define PCR_HS (1 << 12) +#define PCR_EN (1 << 7) +#define PCR_PM (1 << 0) + +/* Bit definitions of the Port Config Extend Reg */ +#define PCXR_2BSM (1 << 28) +#define PCXR_DSCP_EN (1 << 21) +#define PCXR_RMII_EN (1 << 20) +#define PCXR_AN_SPEED_DIS (1 << 19) +#define PCXR_SPEED_100 (1 << 18) +#define PCXR_MFL_1518 (0 << 14) +#define PCXR_MFL_1536 (1 << 14) +#define PCXR_MFL_2048 (2 << 14) +#define PCXR_MFL_64K (3 << 14) +#define PCXR_FLOWCTL_DIS (1 << 12) +#define PCXR_FLP (1 << 11) +#define PCXR_AN_FLOWCTL_DIS (1 << 10) +#define PCXR_AN_DUPLEX_DIS (1 << 9) +#define PCXR_PRIO_TX_OFF 3 +#define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF) + +/* Bit definitions of the SDMA Config Reg */ +#define SDCR_BSZ_OFF 12 +#define SDCR_BSZ8 (3 << SDCR_BSZ_OFF) +#define SDCR_BSZ4 (2 << SDCR_BSZ_OFF) +#define SDCR_BSZ2 (1 << SDCR_BSZ_OFF) +#define SDCR_BSZ1 (0 << SDCR_BSZ_OFF) +#define SDCR_BLMR (1 << 6) +#define SDCR_BLMT (1 << 7) +#define SDCR_RIFB (1 << 9) +#define SDCR_RC_OFF 2 +#define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF) + +/* + * Bit definitions of the Interrupt Cause Reg + * and Interrupt MASK Reg is the same + */ +#define ICR_RXBUF (1 << 0) +#define ICR_TXBUF_H (1 << 2) +#define ICR_TXBUF_L (1 << 3) +#define ICR_TXEND_H (1 << 6) +#define ICR_TXEND_L (1 << 7) +#define ICR_RXERR (1 << 8) +#define ICR_TXERR_H (1 << 10) +#define ICR_TXERR_L (1 << 11) +#define ICR_TX_UDR (1 << 13) +#define ICR_MII_CH (1 << 28) + +#define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\ + ICR_TXERR_H | ICR_TXERR_L |\ + ICR_TXEND_H | ICR_TXEND_L |\ + ICR_RXBUF | ICR_RXERR | ICR_MII_CH) + +#define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ + +#define NUM_RX_DESCS 64 +#define NUM_TX_DESCS 64 + +#define HASH_ADD 0 +#define HASH_DELETE 1 +#define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */ +#define HOP_NUMBER 12 + +/* Bit definitions for Port status */ +#define PORT_SPEED_100 (1 << 0) +#define FULL_DUPLEX (1 << 1) +#define FLOW_CONTROL_DISABLED (1 << 2) +#define LINK_UP (1 << 3) + +/* Bit definitions for work to be done */ +#define WORK_TX_DONE (1 << 1) + +/* + * Misc definitions. + */ +#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) + +struct rx_desc { + u32 cmd_sts; /* Descriptor command status */ + u16 byte_cnt; /* Descriptor buffer byte count */ + u16 buf_size; /* Buffer size */ + u32 buf_ptr; /* Descriptor buffer pointer */ + u32 next_desc_ptr; /* Next descriptor pointer */ +}; + +struct tx_desc { + u32 cmd_sts; /* Command/status field */ + u16 reserved; + u16 byte_cnt; /* buffer byte count */ + u32 buf_ptr; /* pointer to buffer for this descriptor */ + u32 next_desc_ptr; /* Pointer to next descriptor */ +}; + +struct pxa168_eth_private { + int port_num; /* User Ethernet port number */ + int phy_addr; + int phy_speed; + int phy_duplex; + phy_interface_t phy_intf; + + int rx_resource_err; /* Rx ring resource error flag */ + + /* Next available and first returning Rx resource */ + int rx_curr_desc_q, rx_used_desc_q; + + /* Next available and first returning Tx resource */ + int tx_curr_desc_q, tx_used_desc_q; + + struct rx_desc *p_rx_desc_area; + dma_addr_t rx_desc_dma; + int rx_desc_area_size; + struct sk_buff **rx_skb; + + struct tx_desc *p_tx_desc_area; + dma_addr_t tx_desc_dma; + int tx_desc_area_size; + struct sk_buff **tx_skb; + + struct work_struct tx_timeout_task; + + struct net_device *dev; + struct napi_struct napi; + u8 work_todo; + int skb_size; + + /* Size of Tx Ring per queue */ + int tx_ring_size; + /* Number of tx descriptors in use */ + int tx_desc_count; + /* Size of Rx Ring per queue */ + int rx_ring_size; + /* Number of rx descriptors in use */ + int rx_desc_count; + + /* + * Used in case RX Ring is empty, which can occur when + * system does not have resources (skb's) + */ + struct timer_list timeout; + struct mii_bus *smi_bus; + struct phy_device *phy; + + /* clock */ + struct clk *clk; + struct pxa168_eth_platform_data *pd; + /* + * Ethernet controller base address. + */ + void __iomem *base; + + /* Pointer to the hardware address filter table */ + void *htpr; + dma_addr_t htpr_dma; +}; + +struct addr_table_entry { + __le32 lo; + __le32 hi; +}; + +/* Bit fields of a Hash Table Entry */ +enum hash_table_entry { + HASH_ENTRY_VALID = 1, + SKIP = 2, + HASH_ENTRY_RECEIVE_DISCARD = 4, + HASH_ENTRY_RECEIVE_DISCARD_BIT = 2 +}; + +static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); +static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd); +static int pxa168_init_hw(struct pxa168_eth_private *pep); +static int pxa168_init_phy(struct net_device *dev); +static void eth_port_reset(struct net_device *dev); +static void eth_port_start(struct net_device *dev); +static int pxa168_eth_open(struct net_device *dev); +static int pxa168_eth_stop(struct net_device *dev); + +static inline u32 rdl(struct pxa168_eth_private *pep, int offset) +{ + return readl(pep->base + offset); +} + +static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data) +{ + writel(data, pep->base + offset); +} + +static void abort_dma(struct pxa168_eth_private *pep) +{ + int delay; + int max_retries = 40; + + do { + wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT); + udelay(100); + + delay = 10; + while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT)) + && delay-- > 0) { + udelay(10); + } + } while (max_retries-- > 0 && delay <= 0); + + if (max_retries <= 0) + netdev_err(pep->dev, "%s : DMA Stuck\n", __func__); +} + +static void rxq_refill(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct sk_buff *skb; + struct rx_desc *p_used_rx_desc; + int used_rx_desc; + + while (pep->rx_desc_count < pep->rx_ring_size) { + int size; + + skb = netdev_alloc_skb(dev, pep->skb_size); + if (!skb) + break; + if (SKB_DMA_REALIGN) + skb_reserve(skb, SKB_DMA_REALIGN); + pep->rx_desc_count++; + /* Get 'used' Rx descriptor */ + used_rx_desc = pep->rx_used_desc_q; + p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc]; + size = skb_end_pointer(skb) - skb->data; + p_used_rx_desc->buf_ptr = dma_map_single(NULL, + skb->data, + size, + DMA_FROM_DEVICE); + p_used_rx_desc->buf_size = size; + pep->rx_skb[used_rx_desc] = skb; + + /* Return the descriptor to DMA ownership */ + wmb(); + p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT; + wmb(); + + /* Move the used descriptor pointer to the next descriptor */ + pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size; + + /* Any Rx return cancels the Rx resource error status */ + pep->rx_resource_err = 0; + + skb_reserve(skb, ETH_HW_IP_ALIGN); + } + + /* + * If RX ring is empty of SKB, set a timer to try allocating + * again at a later time. + */ + if (pep->rx_desc_count == 0) { + pep->timeout.expires = jiffies + (HZ / 10); + add_timer(&pep->timeout); + } +} + +static inline void rxq_refill_timer_wrapper(unsigned long data) +{ + struct pxa168_eth_private *pep = (void *)data; + napi_schedule(&pep->napi); +} + +static inline u8 flip_8_bits(u8 x) +{ + return (((x) & 0x01) << 3) | (((x) & 0x02) << 1) + | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3) + | (((x) & 0x10) << 3) | (((x) & 0x20) << 1) + | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3); +} + +static void nibble_swap_every_byte(unsigned char *mac_addr) +{ + int i; + for (i = 0; i < ETH_ALEN; i++) { + mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) | + ((mac_addr[i] & 0xf0) >> 4); + } +} + +static void inverse_every_nibble(unsigned char *mac_addr) +{ + int i; + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] = flip_8_bits(mac_addr[i]); +} + +/* + * ---------------------------------------------------------------------------- + * This function will calculate the hash function of the address. + * Inputs + * mac_addr_orig - MAC address. + * Outputs + * return the calculated entry. + */ +static u32 hash_function(unsigned char *mac_addr_orig) +{ + u32 hash_result; + u32 addr0; + u32 addr1; + u32 addr2; + u32 addr3; + unsigned char mac_addr[ETH_ALEN]; + + /* Make a copy of MAC address since we are going to performe bit + * operations on it + */ + memcpy(mac_addr, mac_addr_orig, ETH_ALEN); + + nibble_swap_every_byte(mac_addr); + inverse_every_nibble(mac_addr); + + addr0 = (mac_addr[5] >> 2) & 0x3f; + addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2); + addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1; + addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8); + + hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3); + hash_result = hash_result & 0x07ff; + return hash_result; +} + +/* + * ---------------------------------------------------------------------------- + * This function will add/del an entry to the address table. + * Inputs + * pep - ETHERNET . + * mac_addr - MAC address. + * skip - if 1, skip this address.Used in case of deleting an entry which is a + * part of chain in the hash table.We can't just delete the entry since + * that will break the chain.We need to defragment the tables time to + * time. + * rd - 0 Discard packet upon match. + * - 1 Receive packet upon match. + * Outputs + * address table entry is added/deleted. + * 0 if success. + * -ENOSPC if table full + */ +static int add_del_hash_entry(struct pxa168_eth_private *pep, + unsigned char *mac_addr, + u32 rd, u32 skip, int del) +{ + struct addr_table_entry *entry, *start; + u32 new_high; + u32 new_low; + u32 i; + + new_low = (((mac_addr[1] >> 4) & 0xf) << 15) + | (((mac_addr[1] >> 0) & 0xf) << 11) + | (((mac_addr[0] >> 4) & 0xf) << 7) + | (((mac_addr[0] >> 0) & 0xf) << 3) + | (((mac_addr[3] >> 4) & 0x1) << 31) + | (((mac_addr[3] >> 0) & 0xf) << 27) + | (((mac_addr[2] >> 4) & 0xf) << 23) + | (((mac_addr[2] >> 0) & 0xf) << 19) + | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT) + | HASH_ENTRY_VALID; + + new_high = (((mac_addr[5] >> 4) & 0xf) << 15) + | (((mac_addr[5] >> 0) & 0xf) << 11) + | (((mac_addr[4] >> 4) & 0xf) << 7) + | (((mac_addr[4] >> 0) & 0xf) << 3) + | (((mac_addr[3] >> 5) & 0x7) << 0); + + /* + * Pick the appropriate table, start scanning for free/reusable + * entries at the index obtained by hashing the specified MAC address + */ + start = pep->htpr; + entry = start + hash_function(mac_addr); + for (i = 0; i < HOP_NUMBER; i++) { + if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) { + break; + } else { + /* if same address put in same position */ + if (((le32_to_cpu(entry->lo) & 0xfffffff8) == + (new_low & 0xfffffff8)) && + (le32_to_cpu(entry->hi) == new_high)) { + break; + } + } + if (entry == start + 0x7ff) + entry = start; + else + entry++; + } + + if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) && + (le32_to_cpu(entry->hi) != new_high) && del) + return 0; + + if (i == HOP_NUMBER) { + if (!del) { + netdev_info(pep->dev, + "%s: table section is full, need to " + "move to 16kB implementation?\n", + __FILE__); + return -ENOSPC; + } else + return 0; + } + + /* + * Update the selected entry + */ + if (del) { + entry->hi = 0; + entry->lo = 0; + } else { + entry->hi = cpu_to_le32(new_high); + entry->lo = cpu_to_le32(new_low); + } + + return 0; +} + +/* + * ---------------------------------------------------------------------------- + * Create an addressTable entry from MAC address info + * found in the specifed net_device struct + * + * Input : pointer to ethernet interface network device structure + * Output : N/A + */ +static void update_hash_table_mac_address(struct pxa168_eth_private *pep, + unsigned char *oaddr, + unsigned char *addr) +{ + /* Delete old entry */ + if (oaddr) + add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE); + /* Add new entry */ + add_del_hash_entry(pep, addr, 1, 0, HASH_ADD); +} + +static int init_hash_table(struct pxa168_eth_private *pep) +{ + /* + * Hardware expects CPU to build a hash table based on a predefined + * hash function and populate it based on hardware address. The + * location of the hash table is identified by 32-bit pointer stored + * in HTPR internal register. Two possible sizes exists for the hash + * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB + * (16kB of DRAM required (4 x 4 kB banks)).We currently only support + * 1/2kB. + */ + /* TODO: Add support for 8kB hash table and alternative hash + * function.Driver can dynamically switch to them if the 1/2kB hash + * table is full. + */ + if (pep->htpr == NULL) { + pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent, + HASH_ADDR_TABLE_SIZE, + &pep->htpr_dma, GFP_KERNEL); + if (pep->htpr == NULL) + return -ENOMEM; + } else { + memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); + } + wrl(pep, HTPR, pep->htpr_dma); + return 0; +} + +static void pxa168_eth_set_rx_mode(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct netdev_hw_addr *ha; + u32 val; + + val = rdl(pep, PORT_CONFIG); + if (dev->flags & IFF_PROMISC) + val |= PCR_PM; + else + val &= ~PCR_PM; + wrl(pep, PORT_CONFIG, val); + + /* + * Remove the old list of MAC address and add dev->addr + * and multicast address. + */ + memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); + update_hash_table_mac_address(pep, NULL, dev->dev_addr); + + netdev_for_each_mc_addr(ha, dev) + update_hash_table_mac_address(pep, NULL, ha->addr); +} + +static void pxa168_eth_get_mac_address(struct net_device *dev, + unsigned char *addr) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + unsigned int mac_h = rdl(pep, MAC_ADDR_HIGH); + unsigned int mac_l = rdl(pep, MAC_ADDR_LOW); + + addr[0] = (mac_h >> 24) & 0xff; + addr[1] = (mac_h >> 16) & 0xff; + addr[2] = (mac_h >> 8) & 0xff; + addr[3] = mac_h & 0xff; + addr[4] = (mac_l >> 8) & 0xff; + addr[5] = mac_l & 0xff; +} + +static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *sa = addr; + struct pxa168_eth_private *pep = netdev_priv(dev); + unsigned char oldMac[ETH_ALEN]; + u32 mac_h, mac_l; + + if (!is_valid_ether_addr(sa->sa_data)) + return -EADDRNOTAVAIL; + memcpy(oldMac, dev->dev_addr, ETH_ALEN); + memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); + + mac_h = dev->dev_addr[0] << 24; + mac_h |= dev->dev_addr[1] << 16; + mac_h |= dev->dev_addr[2] << 8; + mac_h |= dev->dev_addr[3]; + mac_l = dev->dev_addr[4] << 8; + mac_l |= dev->dev_addr[5]; + wrl(pep, MAC_ADDR_HIGH, mac_h); + wrl(pep, MAC_ADDR_LOW, mac_l); + + netif_addr_lock_bh(dev); + update_hash_table_mac_address(pep, oldMac, dev->dev_addr); + netif_addr_unlock_bh(dev); + return 0; +} + +static void eth_port_start(struct net_device *dev) +{ + unsigned int val = 0; + struct pxa168_eth_private *pep = netdev_priv(dev); + int tx_curr_desc, rx_curr_desc; + + phy_start(pep->phy); + + /* Assignment of Tx CTRP of given queue */ + tx_curr_desc = pep->tx_curr_desc_q; + wrl(pep, ETH_C_TX_DESC_1, + (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc))); + + /* Assignment of Rx CRDP of given queue */ + rx_curr_desc = pep->rx_curr_desc_q; + wrl(pep, ETH_C_RX_DESC_0, + (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc))); + + wrl(pep, ETH_F_RX_DESC_0, + (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc))); + + /* Clear all interrupts */ + wrl(pep, INT_CAUSE, 0); + + /* Enable all interrupts for receive, transmit and error. */ + wrl(pep, INT_MASK, ALL_INTS); + + val = rdl(pep, PORT_CONFIG); + val |= PCR_EN; + wrl(pep, PORT_CONFIG, val); + + /* Start RX DMA engine */ + val = rdl(pep, SDMA_CMD); + val |= SDMA_CMD_ERD; + wrl(pep, SDMA_CMD, val); +} + +static void eth_port_reset(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + unsigned int val = 0; + + /* Stop all interrupts for receive, transmit and error. */ + wrl(pep, INT_MASK, 0); + + /* Clear all interrupts */ + wrl(pep, INT_CAUSE, 0); + + /* Stop RX DMA */ + val = rdl(pep, SDMA_CMD); + val &= ~SDMA_CMD_ERD; /* abort dma command */ + + /* Abort any transmit and receive operations and put DMA + * in idle state. + */ + abort_dma(pep); + + /* Disable port */ + val = rdl(pep, PORT_CONFIG); + val &= ~PCR_EN; + wrl(pep, PORT_CONFIG, val); + + phy_stop(pep->phy); +} + +/* + * txq_reclaim - Free the tx desc data for completed descriptors + * If force is non-zero, frees uncompleted descriptors as well + */ +static int txq_reclaim(struct net_device *dev, int force) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct tx_desc *desc; + u32 cmd_sts; + struct sk_buff *skb; + int tx_index; + dma_addr_t addr; + int count; + int released = 0; + + netif_tx_lock(dev); + + pep->work_todo &= ~WORK_TX_DONE; + while (pep->tx_desc_count > 0) { + tx_index = pep->tx_used_desc_q; + desc = &pep->p_tx_desc_area[tx_index]; + cmd_sts = desc->cmd_sts; + if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) { + if (released > 0) { + goto txq_reclaim_end; + } else { + released = -1; + goto txq_reclaim_end; + } + } + pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size; + pep->tx_desc_count--; + addr = desc->buf_ptr; + count = desc->byte_cnt; + skb = pep->tx_skb[tx_index]; + if (skb) + pep->tx_skb[tx_index] = NULL; + + if (cmd_sts & TX_ERROR) { + if (net_ratelimit()) + netdev_err(dev, "Error in TX\n"); + dev->stats.tx_errors++; + } + dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); + if (skb) + dev_kfree_skb_irq(skb); + released++; + } +txq_reclaim_end: + netif_tx_unlock(dev); + return released; +} + +static void pxa168_eth_tx_timeout(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + + netdev_info(dev, "TX timeout desc_count %d\n", pep->tx_desc_count); + + schedule_work(&pep->tx_timeout_task); +} + +static void pxa168_eth_tx_timeout_task(struct work_struct *work) +{ + struct pxa168_eth_private *pep = container_of(work, + struct pxa168_eth_private, + tx_timeout_task); + struct net_device *dev = pep->dev; + pxa168_eth_stop(dev); + pxa168_eth_open(dev); +} + +static int rxq_process(struct net_device *dev, int budget) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + unsigned int received_packets = 0; + struct sk_buff *skb; + + while (budget-- > 0) { + int rx_next_curr_desc, rx_curr_desc, rx_used_desc; + struct rx_desc *rx_desc; + unsigned int cmd_sts; + + /* Do not process Rx ring in case of Rx ring resource error */ + if (pep->rx_resource_err) + break; + rx_curr_desc = pep->rx_curr_desc_q; + rx_used_desc = pep->rx_used_desc_q; + rx_desc = &pep->p_rx_desc_area[rx_curr_desc]; + cmd_sts = rx_desc->cmd_sts; + rmb(); + if (cmd_sts & (BUF_OWNED_BY_DMA)) + break; + skb = pep->rx_skb[rx_curr_desc]; + pep->rx_skb[rx_curr_desc] = NULL; + + rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size; + pep->rx_curr_desc_q = rx_next_curr_desc; + + /* Rx descriptors exhausted. */ + /* Set the Rx ring resource error flag */ + if (rx_next_curr_desc == rx_used_desc) + pep->rx_resource_err = 1; + pep->rx_desc_count--; + dma_unmap_single(NULL, rx_desc->buf_ptr, + rx_desc->buf_size, + DMA_FROM_DEVICE); + received_packets++; + /* + * Update statistics. + * Note byte count includes 4 byte CRC count + */ + stats->rx_packets++; + stats->rx_bytes += rx_desc->byte_cnt; + /* + * In case received a packet without first / last bits on OR + * the error summary bit is on, the packets needs to be droped. + */ + if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != + (RX_FIRST_DESC | RX_LAST_DESC)) + || (cmd_sts & RX_ERROR)) { + + stats->rx_dropped++; + if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != + (RX_FIRST_DESC | RX_LAST_DESC)) { + if (net_ratelimit()) + netdev_err(dev, + "Rx pkt on multiple desc\n"); + } + if (cmd_sts & RX_ERROR) + stats->rx_errors++; + dev_kfree_skb_irq(skb); + } else { + /* + * The -4 is for the CRC in the trailer of the + * received packet + */ + skb_put(skb, rx_desc->byte_cnt - 4); + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); + } + } + /* Fill RX ring with skb's */ + rxq_refill(dev); + return received_packets; +} + +static int pxa168_eth_collect_events(struct pxa168_eth_private *pep, + struct net_device *dev) +{ + u32 icr; + int ret = 0; + + icr = rdl(pep, INT_CAUSE); + if (icr == 0) + return IRQ_NONE; + + wrl(pep, INT_CAUSE, ~icr); + if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) { + pep->work_todo |= WORK_TX_DONE; + ret = 1; + } + if (icr & ICR_RXBUF) + ret = 1; + return ret; +} + +static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct pxa168_eth_private *pep = netdev_priv(dev); + + if (unlikely(!pxa168_eth_collect_events(pep, dev))) + return IRQ_NONE; + /* Disable interrupts */ + wrl(pep, INT_MASK, 0); + napi_schedule(&pep->napi); + return IRQ_HANDLED; +} + +static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep) +{ + int skb_size; + + /* + * Reserve 2+14 bytes for an ethernet header (the hardware + * automatically prepends 2 bytes of dummy data to each + * received packet), 16 bytes for up to four VLAN tags, and + * 4 bytes for the trailing FCS -- 36 bytes total. + */ + skb_size = pep->dev->mtu + 36; + + /* + * Make sure that the skb size is a multiple of 8 bytes, as + * the lower three bits of the receive descriptor's buffer + * size field are ignored by the hardware. + */ + pep->skb_size = (skb_size + 7) & ~7; + + /* + * If NET_SKB_PAD is smaller than a cache line, + * netdev_alloc_skb() will cause skb->data to be misaligned + * to a cache line boundary. If this is the case, include + * some extra space to allow re-aligning the data area. + */ + pep->skb_size += SKB_DMA_REALIGN; + +} + +static int set_port_config_ext(struct pxa168_eth_private *pep) +{ + int skb_size; + + pxa168_eth_recalc_skb_size(pep); + if (pep->skb_size <= 1518) + skb_size = PCXR_MFL_1518; + else if (pep->skb_size <= 1536) + skb_size = PCXR_MFL_1536; + else if (pep->skb_size <= 2048) + skb_size = PCXR_MFL_2048; + else + skb_size = PCXR_MFL_64K; + + /* Extended Port Configuration */ + wrl(pep, PORT_CONFIG_EXT, + PCXR_AN_SPEED_DIS | /* Disable HW AN */ + PCXR_AN_DUPLEX_DIS | + PCXR_AN_FLOWCTL_DIS | + PCXR_2BSM | /* Two byte prefix aligns IP hdr */ + PCXR_DSCP_EN | /* Enable DSCP in IP */ + skb_size | PCXR_FLP | /* do not force link pass */ + PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */ + + return 0; +} + +static void pxa168_eth_adjust_link(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct phy_device *phy = pep->phy; + u32 cfg, cfg_o = rdl(pep, PORT_CONFIG); + u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT); + + cfg = cfg_o & ~PCR_DUPLEX_FULL; + cfgext = cfgext_o & ~(PCXR_SPEED_100 | PCXR_FLOWCTL_DIS | PCXR_RMII_EN); + + if (phy->interface == PHY_INTERFACE_MODE_RMII) + cfgext |= PCXR_RMII_EN; + if (phy->speed == SPEED_100) + cfgext |= PCXR_SPEED_100; + if (phy->duplex) + cfg |= PCR_DUPLEX_FULL; + if (!phy->pause) + cfgext |= PCXR_FLOWCTL_DIS; + + /* Bail out if there has nothing changed */ + if (cfg == cfg_o && cfgext == cfgext_o) + return; + + wrl(pep, PORT_CONFIG, cfg); + wrl(pep, PORT_CONFIG_EXT, cfgext); + + phy_print_status(phy); +} + +static int pxa168_init_phy(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct ethtool_cmd cmd; + int err; + + if (pep->phy) + return 0; + + pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr); + if (!pep->phy) + return -ENODEV; + + err = phy_connect_direct(dev, pep->phy, pxa168_eth_adjust_link, + pep->phy_intf); + if (err) + return err; + + err = pxa168_get_settings(dev, &cmd); + if (err) + return err; + + cmd.phy_address = pep->phy_addr; + cmd.speed = pep->phy_speed; + cmd.duplex = pep->phy_duplex; + cmd.advertising = PHY_BASIC_FEATURES; + cmd.autoneg = AUTONEG_ENABLE; + + if (cmd.speed != 0) + cmd.autoneg = AUTONEG_DISABLE; + + return pxa168_set_settings(dev, &cmd); +} + +static int pxa168_init_hw(struct pxa168_eth_private *pep) +{ + int err = 0; + + /* Disable interrupts */ + wrl(pep, INT_MASK, 0); + wrl(pep, INT_CAUSE, 0); + /* Write to ICR to clear interrupts. */ + wrl(pep, INT_W_CLEAR, 0); + /* Abort any transmit and receive operations and put DMA + * in idle state. + */ + abort_dma(pep); + /* Initialize address hash table */ + err = init_hash_table(pep); + if (err) + return err; + /* SDMA configuration */ + wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */ + SDCR_RIFB | /* Rx interrupt on frame */ + SDCR_BLMT | /* Little endian transmit */ + SDCR_BLMR | /* Little endian receive */ + SDCR_RC_MAX_RETRANS); /* Max retransmit count */ + /* Port Configuration */ + wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */ + set_port_config_ext(pep); + + return err; +} + +static int rxq_init(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct rx_desc *p_rx_desc; + int size = 0, i = 0; + int rx_desc_num = pep->rx_ring_size; + + /* Allocate RX skb rings */ + pep->rx_skb = kzalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size, + GFP_KERNEL); + if (!pep->rx_skb) + return -ENOMEM; + + /* Allocate RX ring */ + pep->rx_desc_count = 0; + size = pep->rx_ring_size * sizeof(struct rx_desc); + pep->rx_desc_area_size = size; + pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size, + &pep->rx_desc_dma, + GFP_KERNEL); + if (!pep->p_rx_desc_area) + goto out; + + /* initialize the next_desc_ptr links in the Rx descriptors ring */ + p_rx_desc = pep->p_rx_desc_area; + for (i = 0; i < rx_desc_num; i++) { + p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma + + ((i + 1) % rx_desc_num) * sizeof(struct rx_desc); + } + /* Save Rx desc pointer to driver struct. */ + pep->rx_curr_desc_q = 0; + pep->rx_used_desc_q = 0; + pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc); + return 0; +out: + kfree(pep->rx_skb); + return -ENOMEM; +} + +static void rxq_deinit(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + int curr; + + /* Free preallocated skb's on RX rings */ + for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) { + if (pep->rx_skb[curr]) { + dev_kfree_skb(pep->rx_skb[curr]); + pep->rx_desc_count--; + } + } + if (pep->rx_desc_count) + netdev_err(dev, "Error in freeing Rx Ring. %d skb's still\n", + pep->rx_desc_count); + /* Free RX ring */ + if (pep->p_rx_desc_area) + dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size, + pep->p_rx_desc_area, pep->rx_desc_dma); + kfree(pep->rx_skb); +} + +static int txq_init(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct tx_desc *p_tx_desc; + int size = 0, i = 0; + int tx_desc_num = pep->tx_ring_size; + + pep->tx_skb = kzalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size, + GFP_KERNEL); + if (!pep->tx_skb) + return -ENOMEM; + + /* Allocate TX ring */ + pep->tx_desc_count = 0; + size = pep->tx_ring_size * sizeof(struct tx_desc); + pep->tx_desc_area_size = size; + pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size, + &pep->tx_desc_dma, + GFP_KERNEL); + if (!pep->p_tx_desc_area) + goto out; + /* Initialize the next_desc_ptr links in the Tx descriptors ring */ + p_tx_desc = pep->p_tx_desc_area; + for (i = 0; i < tx_desc_num; i++) { + p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma + + ((i + 1) % tx_desc_num) * sizeof(struct tx_desc); + } + pep->tx_curr_desc_q = 0; + pep->tx_used_desc_q = 0; + pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc); + return 0; +out: + kfree(pep->tx_skb); + return -ENOMEM; +} + +static void txq_deinit(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + + /* Free outstanding skb's on TX ring */ + txq_reclaim(dev, 1); + BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q); + /* Free TX ring */ + if (pep->p_tx_desc_area) + dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size, + pep->p_tx_desc_area, pep->tx_desc_dma); + kfree(pep->tx_skb); +} + +static int pxa168_eth_open(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + int err; + + err = pxa168_init_phy(dev); + if (err) + return err; + + err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev); + if (err) { + dev_err(&dev->dev, "can't assign irq\n"); + return -EAGAIN; + } + pep->rx_resource_err = 0; + err = rxq_init(dev); + if (err != 0) + goto out_free_irq; + err = txq_init(dev); + if (err != 0) + goto out_free_rx_skb; + pep->rx_used_desc_q = 0; + pep->rx_curr_desc_q = 0; + + /* Fill RX ring with skb's */ + rxq_refill(dev); + pep->rx_used_desc_q = 0; + pep->rx_curr_desc_q = 0; + netif_carrier_off(dev); + napi_enable(&pep->napi); + eth_port_start(dev); + return 0; +out_free_rx_skb: + rxq_deinit(dev); +out_free_irq: + free_irq(dev->irq, dev); + return err; +} + +static int pxa168_eth_stop(struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + eth_port_reset(dev); + + /* Disable interrupts */ + wrl(pep, INT_MASK, 0); + wrl(pep, INT_CAUSE, 0); + /* Write to ICR to clear interrupts. */ + wrl(pep, INT_W_CLEAR, 0); + napi_disable(&pep->napi); + del_timer_sync(&pep->timeout); + netif_carrier_off(dev); + free_irq(dev->irq, dev); + rxq_deinit(dev); + txq_deinit(dev); + + return 0; +} + +static int pxa168_eth_change_mtu(struct net_device *dev, int mtu) +{ + int retval; + struct pxa168_eth_private *pep = netdev_priv(dev); + + if ((mtu > 9500) || (mtu < 68)) + return -EINVAL; + + dev->mtu = mtu; + retval = set_port_config_ext(pep); + + if (!netif_running(dev)) + return 0; + + /* + * Stop and then re-open the interface. This will allocate RX + * skbs of the new MTU. + * There is a possible danger that the open will not succeed, + * due to memory being full. + */ + pxa168_eth_stop(dev); + if (pxa168_eth_open(dev)) { + dev_err(&dev->dev, + "fatal error on re-opening device after MTU change\n"); + } + + return 0; +} + +static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep) +{ + int tx_desc_curr; + + tx_desc_curr = pep->tx_curr_desc_q; + pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size; + BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q); + pep->tx_desc_count++; + + return tx_desc_curr; +} + +static int pxa168_rx_poll(struct napi_struct *napi, int budget) +{ + struct pxa168_eth_private *pep = + container_of(napi, struct pxa168_eth_private, napi); + struct net_device *dev = pep->dev; + int work_done = 0; + + /* + * We call txq_reclaim every time since in NAPI interupts are disabled + * and due to this we miss the TX_DONE interrupt,which is not updated in + * interrupt status register. + */ + txq_reclaim(dev, 0); + if (netif_queue_stopped(dev) + && pep->tx_ring_size - pep->tx_desc_count > 1) { + netif_wake_queue(dev); + } + work_done = rxq_process(dev, budget); + if (work_done < budget) { + napi_complete(napi); + wrl(pep, INT_MASK, ALL_INTS); + } + + return work_done; +} + +static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct tx_desc *desc; + int tx_index; + int length; + + tx_index = eth_alloc_tx_desc_index(pep); + desc = &pep->p_tx_desc_area[tx_index]; + length = skb->len; + pep->tx_skb[tx_index] = skb; + desc->byte_cnt = length; + desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); + + skb_tx_timestamp(skb); + + wmb(); + desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC | + TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT; + wmb(); + wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD); + + stats->tx_bytes += length; + stats->tx_packets++; + dev->trans_start = jiffies; + if (pep->tx_ring_size - pep->tx_desc_count <= 1) { + /* We handled the current skb, but now we are out of space.*/ + netif_stop_queue(dev); + } + + return NETDEV_TX_OK; +} + +static int smi_wait_ready(struct pxa168_eth_private *pep) +{ + int i = 0; + + /* wait for the SMI register to become available */ + for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) { + if (i == PHY_WAIT_ITERATIONS) + return -ETIMEDOUT; + msleep(10); + } + + return 0; +} + +static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum) +{ + struct pxa168_eth_private *pep = bus->priv; + int i = 0; + int val; + + if (smi_wait_ready(pep)) { + netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n"); + return -ETIMEDOUT; + } + wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R); + /* now wait for the data to be valid */ + for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) { + if (i == PHY_WAIT_ITERATIONS) { + netdev_warn(pep->dev, + "pxa168_eth: SMI bus read not valid\n"); + return -ENODEV; + } + msleep(10); + } + + return val & 0xffff; +} + +static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum, + u16 value) +{ + struct pxa168_eth_private *pep = bus->priv; + + if (smi_wait_ready(pep)) { + netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n"); + return -ETIMEDOUT; + } + + wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | + SMI_OP_W | (value & 0xffff)); + + if (smi_wait_ready(pep)) { + netdev_err(pep->dev, "pxa168_eth: SMI bus busy timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, + int cmd) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + if (pep->phy != NULL) + return phy_mii_ioctl(pep->phy, ifr, cmd); + + return -EOPNOTSUPP; +} + +static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + int err; + + err = phy_read_status(pep->phy); + if (err == 0) + err = phy_ethtool_gset(pep->phy, cmd); + + return err; +} + +static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct pxa168_eth_private *pep = netdev_priv(dev); + + return phy_ethtool_sset(pep->phy, cmd); +} + +static void pxa168_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver)); + strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); + strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); +} + +static const struct ethtool_ops pxa168_ethtool_ops = { + .get_settings = pxa168_get_settings, + .set_settings = pxa168_set_settings, + .get_drvinfo = pxa168_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, +}; + +static const struct net_device_ops pxa168_eth_netdev_ops = { + .ndo_open = pxa168_eth_open, + .ndo_stop = pxa168_eth_stop, + .ndo_start_xmit = pxa168_eth_start_xmit, + .ndo_set_rx_mode = pxa168_eth_set_rx_mode, + .ndo_set_mac_address = pxa168_eth_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = pxa168_eth_do_ioctl, + .ndo_change_mtu = pxa168_eth_change_mtu, + .ndo_tx_timeout = pxa168_eth_tx_timeout, +}; + +static int pxa168_eth_probe(struct platform_device *pdev) +{ + struct pxa168_eth_private *pep = NULL; + struct net_device *dev = NULL; + struct resource *res; + struct clk *clk; + struct device_node *np; + const unsigned char *mac_addr = NULL; + int err; + + printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n"); + + clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "Fast Ethernet failed to get clock\n"); + return -ENODEV; + } + clk_prepare_enable(clk); + + dev = alloc_etherdev(sizeof(struct pxa168_eth_private)); + if (!dev) { + err = -ENOMEM; + goto err_clk; + } + + platform_set_drvdata(pdev, dev); + pep = netdev_priv(dev); + pep->dev = dev; + pep->clk = clk; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pep->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pep->base)) { + err = -ENOMEM; + goto err_netdev; + } + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + BUG_ON(!res); + dev->irq = res->start; + dev->netdev_ops = &pxa168_eth_netdev_ops; + dev->watchdog_timeo = 2 * HZ; + dev->base_addr = 0; + dev->ethtool_ops = &pxa168_ethtool_ops; + + INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task); + + if (pdev->dev.of_node) + mac_addr = of_get_mac_address(pdev->dev.of_node); + + if (mac_addr && is_valid_ether_addr(mac_addr)) { + ether_addr_copy(dev->dev_addr, mac_addr); + } else { + /* try reading the mac address, if set by the bootloader */ + pxa168_eth_get_mac_address(dev, dev->dev_addr); + if (!is_valid_ether_addr(dev->dev_addr)) { + dev_info(&pdev->dev, "Using random mac address\n"); + eth_hw_addr_random(dev); + } + } + + pep->rx_ring_size = NUM_RX_DESCS; + pep->tx_ring_size = NUM_TX_DESCS; + + pep->pd = dev_get_platdata(&pdev->dev); + if (pep->pd) { + if (pep->pd->rx_queue_size) + pep->rx_ring_size = pep->pd->rx_queue_size; + + if (pep->pd->tx_queue_size) + pep->tx_ring_size = pep->pd->tx_queue_size; + + pep->port_num = pep->pd->port_number; + pep->phy_addr = pep->pd->phy_addr; + pep->phy_speed = pep->pd->speed; + pep->phy_duplex = pep->pd->duplex; + pep->phy_intf = pep->pd->intf; + + if (pep->pd->init) + pep->pd->init(); + } else if (pdev->dev.of_node) { + of_property_read_u32(pdev->dev.of_node, "port-id", + &pep->port_num); + + np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); + if (!np) { + dev_err(&pdev->dev, "missing phy-handle\n"); + err = -EINVAL; + goto err_netdev; + } + of_property_read_u32(np, "reg", &pep->phy_addr); + pep->phy_intf = of_get_phy_mode(pdev->dev.of_node); + } + + /* Hardware supports only 3 ports */ + BUG_ON(pep->port_num > 2); + netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size); + + memset(&pep->timeout, 0, sizeof(struct timer_list)); + init_timer(&pep->timeout); + pep->timeout.function = rxq_refill_timer_wrapper; + pep->timeout.data = (unsigned long)pep; + + pep->smi_bus = mdiobus_alloc(); + if (pep->smi_bus == NULL) { + err = -ENOMEM; + goto err_netdev; + } + pep->smi_bus->priv = pep; + pep->smi_bus->name = "pxa168_eth smi"; + pep->smi_bus->read = pxa168_smi_read; + pep->smi_bus->write = pxa168_smi_write; + snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d", + pdev->name, pdev->id); + pep->smi_bus->parent = &pdev->dev; + pep->smi_bus->phy_mask = 0xffffffff; + err = mdiobus_register(pep->smi_bus); + if (err) + goto err_free_mdio; + + SET_NETDEV_DEV(dev, &pdev->dev); + pxa168_init_hw(pep); + err = register_netdev(dev); + if (err) + goto err_mdiobus; + return 0; + +err_mdiobus: + mdiobus_unregister(pep->smi_bus); +err_free_mdio: + mdiobus_free(pep->smi_bus); +err_netdev: + free_netdev(dev); +err_clk: + clk_disable_unprepare(clk); + return err; +} + +static int pxa168_eth_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct pxa168_eth_private *pep = netdev_priv(dev); + + if (pep->htpr) { + dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE, + pep->htpr, pep->htpr_dma); + pep->htpr = NULL; + } + if (pep->phy) + phy_disconnect(pep->phy); + if (pep->clk) { + clk_disable_unprepare(pep->clk); + } + + mdiobus_unregister(pep->smi_bus); + mdiobus_free(pep->smi_bus); + unregister_netdev(dev); + cancel_work_sync(&pep->tx_timeout_task); + free_netdev(dev); + return 0; +} + +static void pxa168_eth_shutdown(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + eth_port_reset(dev); +} + +#ifdef CONFIG_PM +static int pxa168_eth_resume(struct platform_device *pdev) +{ + return -ENOSYS; +} + +static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state) +{ + return -ENOSYS; +} + +#else +#define pxa168_eth_resume NULL +#define pxa168_eth_suspend NULL +#endif + +static const struct of_device_id pxa168_eth_of_match[] = { + { .compatible = "marvell,pxa168-eth" }, + { }, +}; +MODULE_DEVICE_TABLE(of, pxa168_eth_of_match); + +static struct platform_driver pxa168_eth_driver = { + .probe = pxa168_eth_probe, + .remove = pxa168_eth_remove, + .shutdown = pxa168_eth_shutdown, + .resume = pxa168_eth_resume, + .suspend = pxa168_eth_suspend, + .driver = { + .name = DRIVER_NAME, + .of_match_table = of_match_ptr(pxa168_eth_of_match), + }, +}; + +module_platform_driver(pxa168_eth_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168"); +MODULE_ALIAS("platform:pxa168_eth"); diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c new file mode 100644 index 000000000..7173836fe --- /dev/null +++ b/drivers/net/ethernet/marvell/skge.c @@ -0,0 +1,4226 @@ +/* + * New driver for Marvell Yukon chipset and SysKonnect Gigabit + * Ethernet adapters. Based on earlier sk98lin, e100 and + * FreeBSD if_sk drivers. + * + * This driver intentionally does not support all the features + * of the original driver such as link fail-over and link management because + * those should be done at higher levels. + * + * Copyright (C) 2004, 2005 Stephen Hemminger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "skge.h" + +#define DRV_NAME "skge" +#define DRV_VERSION "1.14" + +#define DEFAULT_TX_RING_SIZE 128 +#define DEFAULT_RX_RING_SIZE 512 +#define MAX_TX_RING_SIZE 1024 +#define TX_LOW_WATER (MAX_SKB_FRAGS + 1) +#define MAX_RX_RING_SIZE 4096 +#define RX_COPY_THRESHOLD 128 +#define RX_BUF_SIZE 1536 +#define PHY_RETRIES 1000 +#define ETH_JUMBO_MTU 9000 +#define TX_WATCHDOG (5 * HZ) +#define NAPI_WEIGHT 64 +#define BLINK_MS 250 +#define LINK_HZ HZ + +#define SKGE_EEPROM_MAGIC 0x9933aabb + + +MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver"); +MODULE_AUTHOR("Stephen Hemminger "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static const u32 default_msg = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN); + +static int debug = -1; /* defaults above */ +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static const struct pci_device_id skge_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x1700) }, /* 3Com 3C940 */ + { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x80EB) }, /* 3Com 3C940B */ +#ifdef CONFIG_SKGE_GENESIS + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4300) }, /* SK-9xx */ +#endif + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4320) }, /* SK-98xx V2.0 */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* D-Link DGE-530T (rev.B) */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4c00) }, /* D-Link DGE-530T */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302) }, /* D-Link DGE-530T Rev C1 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, /* Marvell Yukon 88E8001/8003/8010 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ + { PCI_DEVICE(PCI_VENDOR_ID_CNET, 0x434E) }, /* CNet PowerG-2000 */ + { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, 0x1064) }, /* Linksys EG1064 v2 */ + { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 }, /* Linksys EG1032 v2 */ + { 0 } +}; +MODULE_DEVICE_TABLE(pci, skge_id_table); + +static int skge_up(struct net_device *dev); +static int skge_down(struct net_device *dev); +static void skge_phy_reset(struct skge_port *skge); +static void skge_tx_clean(struct net_device *dev); +static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); +static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); +static void genesis_get_stats(struct skge_port *skge, u64 *data); +static void yukon_get_stats(struct skge_port *skge, u64 *data); +static void yukon_init(struct skge_hw *hw, int port); +static void genesis_mac_init(struct skge_hw *hw, int port); +static void genesis_link_up(struct skge_port *skge); +static void skge_set_multicast(struct net_device *dev); +static irqreturn_t skge_intr(int irq, void *dev_id); + +/* Avoid conditionals by using array */ +static const int txqaddr[] = { Q_XA1, Q_XA2 }; +static const int rxqaddr[] = { Q_R1, Q_R2 }; +static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; +static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; +static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F }; +static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 }; + +static inline bool is_genesis(const struct skge_hw *hw) +{ +#ifdef CONFIG_SKGE_GENESIS + return hw->chip_id == CHIP_ID_GENESIS; +#else + return false; +#endif +} + +static int skge_get_regs_len(struct net_device *dev) +{ + return 0x4000; +} + +/* + * Returns copy of whole control register region + * Note: skip RAM address register because accessing it will + * cause bus hangs! + */ +static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + const struct skge_port *skge = netdev_priv(dev); + const void __iomem *io = skge->hw->regs; + + regs->version = 1; + memset(p, 0, regs->len); + memcpy_fromio(p, io, B3_RAM_ADDR); + + memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, + regs->len - B3_RI_WTO_R1); +} + +/* Wake on Lan only supported on Yukon chips with rev 1 or above */ +static u32 wol_supported(const struct skge_hw *hw) +{ + if (is_genesis(hw)) + return 0; + + if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) + return 0; + + return WAKE_MAGIC | WAKE_PHY; +} + +static void skge_wol_init(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 ctrl; + + skge_write16(hw, B0_CTST, CS_RST_CLR); + skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); + + /* Turn on Vaux */ + skge_write8(hw, B0_POWER_CTRL, + PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); + + /* WA code for COMA mode -- clear PHY reset */ + if (hw->chip_id == CHIP_ID_YUKON_LITE && + hw->chip_rev >= CHIP_REV_YU_LITE_A3) { + u32 reg = skge_read32(hw, B2_GP_IO); + reg |= GP_DIR_9; + reg &= ~GP_IO_9; + skge_write32(hw, B2_GP_IO, reg); + } + + skge_write32(hw, SK_REG(port, GPHY_CTRL), + GPC_DIS_SLEEP | + GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | + GPC_ANEG_1 | GPC_RST_SET); + + skge_write32(hw, SK_REG(port, GPHY_CTRL), + GPC_DIS_SLEEP | + GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | + GPC_ANEG_1 | GPC_RST_CLR); + + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); + + /* Force to 10/100 skge_reset will re-enable on resume */ + gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, + (PHY_AN_100FULL | PHY_AN_100HALF | + PHY_AN_10FULL | PHY_AN_10HALF | PHY_AN_CSMA)); + /* no 1000 HD/FD */ + gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0); + gm_phy_write(hw, port, PHY_MARV_CTRL, + PHY_CT_RESET | PHY_CT_SPS_LSB | PHY_CT_ANE | + PHY_CT_RE_CFG | PHY_CT_DUP_MD); + + + /* Set GMAC to no flow control and auto update for speed/duplex */ + gma_write16(hw, port, GM_GP_CTRL, + GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA| + GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS); + + /* Set WOL address */ + memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), + skge->netdev->dev_addr, ETH_ALEN); + + /* Turn on appropriate WOL control bits */ + skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT); + ctrl = 0; + if (skge->wol & WAKE_PHY) + ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT; + else + ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT; + + if (skge->wol & WAKE_MAGIC) + ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; + else + ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT; + + ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; + skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); + + /* block receiver */ + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); +} + +static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct skge_port *skge = netdev_priv(dev); + + wol->supported = wol_supported(skge->hw); + wol->wolopts = skge->wol; +} + +static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + + if ((wol->wolopts & ~wol_supported(hw)) || + !device_can_wakeup(&hw->pdev->dev)) + return -EOPNOTSUPP; + + skge->wol = wol->wolopts; + + device_set_wakeup_enable(&hw->pdev->dev, skge->wol); + + return 0; +} + +/* Determine supported/advertised modes based on hardware. + * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx + */ +static u32 skge_supported_modes(const struct skge_hw *hw) +{ + u32 supported; + + if (hw->copper) { + supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + + if (is_genesis(hw)) + supported &= ~(SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full); + + else if (hw->chip_id == CHIP_ID_YUKON) + supported &= ~SUPPORTED_1000baseT_Half; + } else + supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + return supported; +} + +static int skge_get_settings(struct net_device *dev, + struct ethtool_cmd *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + + ecmd->transceiver = XCVR_INTERNAL; + ecmd->supported = skge_supported_modes(hw); + + if (hw->copper) { + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy_addr; + } else + ecmd->port = PORT_FIBRE; + + ecmd->advertising = skge->advertising; + ecmd->autoneg = skge->autoneg; + ethtool_cmd_speed_set(ecmd, skge->speed); + ecmd->duplex = skge->duplex; + return 0; +} + +static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + const struct skge_hw *hw = skge->hw; + u32 supported = skge_supported_modes(hw); + int err = 0; + + if (ecmd->autoneg == AUTONEG_ENABLE) { + ecmd->advertising = supported; + skge->duplex = -1; + skge->speed = -1; + } else { + u32 setting; + u32 speed = ethtool_cmd_speed(ecmd); + + switch (speed) { + case SPEED_1000: + if (ecmd->duplex == DUPLEX_FULL) + setting = SUPPORTED_1000baseT_Full; + else if (ecmd->duplex == DUPLEX_HALF) + setting = SUPPORTED_1000baseT_Half; + else + return -EINVAL; + break; + case SPEED_100: + if (ecmd->duplex == DUPLEX_FULL) + setting = SUPPORTED_100baseT_Full; + else if (ecmd->duplex == DUPLEX_HALF) + setting = SUPPORTED_100baseT_Half; + else + return -EINVAL; + break; + + case SPEED_10: + if (ecmd->duplex == DUPLEX_FULL) + setting = SUPPORTED_10baseT_Full; + else if (ecmd->duplex == DUPLEX_HALF) + setting = SUPPORTED_10baseT_Half; + else + return -EINVAL; + break; + default: + return -EINVAL; + } + + if ((setting & supported) == 0) + return -EINVAL; + + skge->speed = speed; + skge->duplex = ecmd->duplex; + } + + skge->autoneg = ecmd->autoneg; + skge->advertising = ecmd->advertising; + + if (netif_running(dev)) { + skge_down(dev); + err = skge_up(dev); + if (err) { + dev_close(dev); + return err; + } + } + + return 0; +} + +static void skge_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct skge_port *skge = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(skge->hw->pdev), + sizeof(info->bus_info)); +} + +static const struct skge_stat { + char name[ETH_GSTRING_LEN]; + u16 xmac_offset; + u16 gma_offset; +} skge_stats[] = { + { "tx_bytes", XM_TXO_OK_HI, GM_TXO_OK_HI }, + { "rx_bytes", XM_RXO_OK_HI, GM_RXO_OK_HI }, + + { "tx_broadcast", XM_TXF_BC_OK, GM_TXF_BC_OK }, + { "rx_broadcast", XM_RXF_BC_OK, GM_RXF_BC_OK }, + { "tx_multicast", XM_TXF_MC_OK, GM_TXF_MC_OK }, + { "rx_multicast", XM_RXF_MC_OK, GM_RXF_MC_OK }, + { "tx_unicast", XM_TXF_UC_OK, GM_TXF_UC_OK }, + { "rx_unicast", XM_RXF_UC_OK, GM_RXF_UC_OK }, + { "tx_mac_pause", XM_TXF_MPAUSE, GM_TXF_MPAUSE }, + { "rx_mac_pause", XM_RXF_MPAUSE, GM_RXF_MPAUSE }, + + { "collisions", XM_TXF_SNG_COL, GM_TXF_SNG_COL }, + { "multi_collisions", XM_TXF_MUL_COL, GM_TXF_MUL_COL }, + { "aborted", XM_TXF_ABO_COL, GM_TXF_ABO_COL }, + { "late_collision", XM_TXF_LAT_COL, GM_TXF_LAT_COL }, + { "fifo_underrun", XM_TXE_FIFO_UR, GM_TXE_FIFO_UR }, + { "fifo_overflow", XM_RXE_FIFO_OV, GM_RXE_FIFO_OV }, + + { "rx_toolong", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, + { "rx_jabber", XM_RXF_JAB_PKT, GM_RXF_JAB_PKT }, + { "rx_runt", XM_RXE_RUNT, GM_RXE_FRAG }, + { "rx_too_long", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, + { "rx_fcs_error", XM_RXF_FCS_ERR, GM_RXF_FCS_ERR }, +}; + +static int skge_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(skge_stats); + default: + return -EOPNOTSUPP; + } +} + +static void skge_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct skge_port *skge = netdev_priv(dev); + + if (is_genesis(skge->hw)) + genesis_get_stats(skge, data); + else + yukon_get_stats(skge, data); +} + +/* Use hardware MIB variables for critical path statistics and + * transmit feedback not reported at interrupt. + * Other errors are accounted for in interrupt handler. + */ +static struct net_device_stats *skge_get_stats(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + u64 data[ARRAY_SIZE(skge_stats)]; + + if (is_genesis(skge->hw)) + genesis_get_stats(skge, data); + else + yukon_get_stats(skge, data); + + dev->stats.tx_bytes = data[0]; + dev->stats.rx_bytes = data[1]; + dev->stats.tx_packets = data[2] + data[4] + data[6]; + dev->stats.rx_packets = data[3] + data[5] + data[7]; + dev->stats.multicast = data[3] + data[5]; + dev->stats.collisions = data[10]; + dev->stats.tx_aborted_errors = data[12]; + + return &dev->stats; +} + +static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(skge_stats); i++) + memcpy(data + i * ETH_GSTRING_LEN, + skge_stats[i].name, ETH_GSTRING_LEN); + break; + } +} + +static void skge_get_ring_param(struct net_device *dev, + struct ethtool_ringparam *p) +{ + struct skge_port *skge = netdev_priv(dev); + + p->rx_max_pending = MAX_RX_RING_SIZE; + p->tx_max_pending = MAX_TX_RING_SIZE; + + p->rx_pending = skge->rx_ring.count; + p->tx_pending = skge->tx_ring.count; +} + +static int skge_set_ring_param(struct net_device *dev, + struct ethtool_ringparam *p) +{ + struct skge_port *skge = netdev_priv(dev); + int err = 0; + + if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || + p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE) + return -EINVAL; + + skge->rx_ring.count = p->rx_pending; + skge->tx_ring.count = p->tx_pending; + + if (netif_running(dev)) { + skge_down(dev); + err = skge_up(dev); + if (err) + dev_close(dev); + } + + return err; +} + +static u32 skge_get_msglevel(struct net_device *netdev) +{ + struct skge_port *skge = netdev_priv(netdev); + return skge->msg_enable; +} + +static void skge_set_msglevel(struct net_device *netdev, u32 value) +{ + struct skge_port *skge = netdev_priv(netdev); + skge->msg_enable = value; +} + +static int skge_nway_reset(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + + if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev)) + return -EINVAL; + + skge_phy_reset(skge); + return 0; +} + +static void skge_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + + ecmd->rx_pause = ((skge->flow_control == FLOW_MODE_SYMMETRIC) || + (skge->flow_control == FLOW_MODE_SYM_OR_REM)); + ecmd->tx_pause = (ecmd->rx_pause || + (skge->flow_control == FLOW_MODE_LOC_SEND)); + + ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause; +} + +static int skge_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + struct ethtool_pauseparam old; + int err = 0; + + skge_get_pauseparam(dev, &old); + + if (ecmd->autoneg != old.autoneg) + skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC; + else { + if (ecmd->rx_pause && ecmd->tx_pause) + skge->flow_control = FLOW_MODE_SYMMETRIC; + else if (ecmd->rx_pause && !ecmd->tx_pause) + skge->flow_control = FLOW_MODE_SYM_OR_REM; + else if (!ecmd->rx_pause && ecmd->tx_pause) + skge->flow_control = FLOW_MODE_LOC_SEND; + else + skge->flow_control = FLOW_MODE_NONE; + } + + if (netif_running(dev)) { + skge_down(dev); + err = skge_up(dev); + if (err) { + dev_close(dev); + return err; + } + } + + return 0; +} + +/* Chip internal frequency for clock calculations */ +static inline u32 hwkhz(const struct skge_hw *hw) +{ + return is_genesis(hw) ? 53125 : 78125; +} + +/* Chip HZ to microseconds */ +static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks) +{ + return (ticks * 1000) / hwkhz(hw); +} + +/* Microseconds to chip HZ */ +static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec) +{ + return hwkhz(hw) * usec / 1000; +} + +static int skge_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + + ecmd->rx_coalesce_usecs = 0; + ecmd->tx_coalesce_usecs = 0; + + if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) { + u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI)); + u32 msk = skge_read32(hw, B2_IRQM_MSK); + + if (msk & rxirqmask[port]) + ecmd->rx_coalesce_usecs = delay; + if (msk & txirqmask[port]) + ecmd->tx_coalesce_usecs = delay; + } + + return 0; +} + +/* Note: interrupt timer is per board, but can turn on/off per port */ +static int skge_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + u32 msk = skge_read32(hw, B2_IRQM_MSK); + u32 delay = 25; + + if (ecmd->rx_coalesce_usecs == 0) + msk &= ~rxirqmask[port]; + else if (ecmd->rx_coalesce_usecs < 25 || + ecmd->rx_coalesce_usecs > 33333) + return -EINVAL; + else { + msk |= rxirqmask[port]; + delay = ecmd->rx_coalesce_usecs; + } + + if (ecmd->tx_coalesce_usecs == 0) + msk &= ~txirqmask[port]; + else if (ecmd->tx_coalesce_usecs < 25 || + ecmd->tx_coalesce_usecs > 33333) + return -EINVAL; + else { + msk |= txirqmask[port]; + delay = min(delay, ecmd->rx_coalesce_usecs); + } + + skge_write32(hw, B2_IRQM_MSK, msk); + if (msk == 0) + skge_write32(hw, B2_IRQM_CTRL, TIM_STOP); + else { + skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay)); + skge_write32(hw, B2_IRQM_CTRL, TIM_START); + } + return 0; +} + +enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST }; +static void skge_led(struct skge_port *skge, enum led_mode mode) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + + spin_lock_bh(&hw->phy_lock); + if (is_genesis(hw)) { + switch (mode) { + case LED_MODE_OFF: + if (hw->phy_type == SK_PHY_BCOM) + xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF); + else { + skge_write32(hw, SK_REG(port, TX_LED_VAL), 0); + skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_T_OFF); + } + skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); + skge_write32(hw, SK_REG(port, RX_LED_VAL), 0); + skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF); + break; + + case LED_MODE_ON: + skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON); + skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON); + + skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); + skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); + + break; + + case LED_MODE_TST: + skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON); + skge_write32(hw, SK_REG(port, RX_LED_VAL), 100); + skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); + + if (hw->phy_type == SK_PHY_BCOM) + xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON); + else { + skge_write8(hw, SK_REG(port, TX_LED_TST), LED_T_ON); + skge_write32(hw, SK_REG(port, TX_LED_VAL), 100); + skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); + } + + } + } else { + switch (mode) { + case LED_MODE_OFF: + gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); + gm_phy_write(hw, port, PHY_MARV_LED_OVER, + PHY_M_LED_MO_DUP(MO_LED_OFF) | + PHY_M_LED_MO_10(MO_LED_OFF) | + PHY_M_LED_MO_100(MO_LED_OFF) | + PHY_M_LED_MO_1000(MO_LED_OFF) | + PHY_M_LED_MO_RX(MO_LED_OFF)); + break; + case LED_MODE_ON: + gm_phy_write(hw, port, PHY_MARV_LED_CTRL, + PHY_M_LED_PULS_DUR(PULS_170MS) | + PHY_M_LED_BLINK_RT(BLINK_84MS) | + PHY_M_LEDC_TX_CTRL | + PHY_M_LEDC_DP_CTRL); + + gm_phy_write(hw, port, PHY_MARV_LED_OVER, + PHY_M_LED_MO_RX(MO_LED_OFF) | + (skge->speed == SPEED_100 ? + PHY_M_LED_MO_100(MO_LED_ON) : 0)); + break; + case LED_MODE_TST: + gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); + gm_phy_write(hw, port, PHY_MARV_LED_OVER, + PHY_M_LED_MO_DUP(MO_LED_ON) | + PHY_M_LED_MO_10(MO_LED_ON) | + PHY_M_LED_MO_100(MO_LED_ON) | + PHY_M_LED_MO_1000(MO_LED_ON) | + PHY_M_LED_MO_RX(MO_LED_ON)); + } + } + spin_unlock_bh(&hw->phy_lock); +} + +/* blink LED's for finding board */ +static int skge_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct skge_port *skge = netdev_priv(dev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; /* cycle on/off twice per second */ + + case ETHTOOL_ID_ON: + skge_led(skge, LED_MODE_TST); + break; + + case ETHTOOL_ID_OFF: + skge_led(skge, LED_MODE_OFF); + break; + + case ETHTOOL_ID_INACTIVE: + /* back to regular LED state */ + skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF); + } + + return 0; +} + +static int skge_get_eeprom_len(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + u32 reg2; + + pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, ®2); + return 1 << (((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); +} + +static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset) +{ + u32 val; + + pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset); + + do { + pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); + } while (!(offset & PCI_VPD_ADDR_F)); + + pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val); + return val; +} + +static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val) +{ + pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val); + pci_write_config_word(pdev, cap + PCI_VPD_ADDR, + offset | PCI_VPD_ADDR_F); + + do { + pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); + } while (offset & PCI_VPD_ADDR_F); +} + +static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct skge_port *skge = netdev_priv(dev); + struct pci_dev *pdev = skge->hw->pdev; + int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); + int length = eeprom->len; + u16 offset = eeprom->offset; + + if (!cap) + return -EINVAL; + + eeprom->magic = SKGE_EEPROM_MAGIC; + + while (length > 0) { + u32 val = skge_vpd_read(pdev, cap, offset); + int n = min_t(int, length, sizeof(val)); + + memcpy(data, &val, n); + length -= n; + data += n; + offset += n; + } + return 0; +} + +static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct skge_port *skge = netdev_priv(dev); + struct pci_dev *pdev = skge->hw->pdev; + int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); + int length = eeprom->len; + u16 offset = eeprom->offset; + + if (!cap) + return -EINVAL; + + if (eeprom->magic != SKGE_EEPROM_MAGIC) + return -EINVAL; + + while (length > 0) { + u32 val; + int n = min_t(int, length, sizeof(val)); + + if (n < sizeof(val)) + val = skge_vpd_read(pdev, cap, offset); + memcpy(&val, data, n); + + skge_vpd_write(pdev, cap, offset, val); + + length -= n; + data += n; + offset += n; + } + return 0; +} + +static const struct ethtool_ops skge_ethtool_ops = { + .get_settings = skge_get_settings, + .set_settings = skge_set_settings, + .get_drvinfo = skge_get_drvinfo, + .get_regs_len = skge_get_regs_len, + .get_regs = skge_get_regs, + .get_wol = skge_get_wol, + .set_wol = skge_set_wol, + .get_msglevel = skge_get_msglevel, + .set_msglevel = skge_set_msglevel, + .nway_reset = skge_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = skge_get_eeprom_len, + .get_eeprom = skge_get_eeprom, + .set_eeprom = skge_set_eeprom, + .get_ringparam = skge_get_ring_param, + .set_ringparam = skge_set_ring_param, + .get_pauseparam = skge_get_pauseparam, + .set_pauseparam = skge_set_pauseparam, + .get_coalesce = skge_get_coalesce, + .set_coalesce = skge_set_coalesce, + .get_strings = skge_get_strings, + .set_phys_id = skge_set_phys_id, + .get_sset_count = skge_get_sset_count, + .get_ethtool_stats = skge_get_ethtool_stats, +}; + +/* + * Allocate ring elements and chain them together + * One-to-one association of board descriptors with ring elements + */ +static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) +{ + struct skge_tx_desc *d; + struct skge_element *e; + int i; + + ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL); + if (!ring->start) + return -ENOMEM; + + for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { + e->desc = d; + if (i == ring->count - 1) { + e->next = ring->start; + d->next_offset = base; + } else { + e->next = e + 1; + d->next_offset = base + (i+1) * sizeof(*d); + } + } + ring->to_use = ring->to_clean = ring->start; + + return 0; +} + +/* Allocate and setup a new buffer for receiving */ +static int skge_rx_setup(struct skge_port *skge, struct skge_element *e, + struct sk_buff *skb, unsigned int bufsize) +{ + struct skge_rx_desc *rd = e->desc; + dma_addr_t map; + + map = pci_map_single(skge->hw->pdev, skb->data, bufsize, + PCI_DMA_FROMDEVICE); + + if (pci_dma_mapping_error(skge->hw->pdev, map)) + return -1; + + rd->dma_lo = lower_32_bits(map); + rd->dma_hi = upper_32_bits(map); + e->skb = skb; + rd->csum1_start = ETH_HLEN; + rd->csum2_start = ETH_HLEN; + rd->csum1 = 0; + rd->csum2 = 0; + + wmb(); + + rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; + dma_unmap_addr_set(e, mapaddr, map); + dma_unmap_len_set(e, maplen, bufsize); + return 0; +} + +/* Resume receiving using existing skb, + * Note: DMA address is not changed by chip. + * MTU not changed while receiver active. + */ +static inline void skge_rx_reuse(struct skge_element *e, unsigned int size) +{ + struct skge_rx_desc *rd = e->desc; + + rd->csum2 = 0; + rd->csum2_start = ETH_HLEN; + + wmb(); + + rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size; +} + + +/* Free all buffers in receive ring, assumes receiver stopped */ +static void skge_rx_clean(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + struct skge_ring *ring = &skge->rx_ring; + struct skge_element *e; + + e = ring->start; + do { + struct skge_rx_desc *rd = e->desc; + rd->control = 0; + if (e->skb) { + pci_unmap_single(hw->pdev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_FROMDEVICE); + dev_kfree_skb(e->skb); + e->skb = NULL; + } + } while ((e = e->next) != ring->start); +} + + +/* Allocate buffers for receive ring + * For receive: to_clean is next received frame. + */ +static int skge_rx_fill(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_ring *ring = &skge->rx_ring; + struct skge_element *e; + + e = ring->start; + do { + struct sk_buff *skb; + + skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN, + GFP_KERNEL); + if (!skb) + return -ENOMEM; + + skb_reserve(skb, NET_IP_ALIGN); + if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) { + dev_kfree_skb(skb); + return -EIO; + } + } while ((e = e->next) != ring->start); + + ring->to_clean = ring->start; + return 0; +} + +static const char *skge_pause(enum pause_status status) +{ + switch (status) { + case FLOW_STAT_NONE: + return "none"; + case FLOW_STAT_REM_SEND: + return "rx only"; + case FLOW_STAT_LOC_SEND: + return "tx_only"; + case FLOW_STAT_SYMMETRIC: /* Both station may send PAUSE */ + return "both"; + default: + return "indeterminated"; + } +} + + +static void skge_link_up(struct skge_port *skge) +{ + skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), + LED_BLK_OFF|LED_SYNC_OFF|LED_ON); + + netif_carrier_on(skge->netdev); + netif_wake_queue(skge->netdev); + + netif_info(skge, link, skge->netdev, + "Link is up at %d Mbps, %s duplex, flow control %s\n", + skge->speed, + skge->duplex == DUPLEX_FULL ? "full" : "half", + skge_pause(skge->flow_status)); +} + +static void skge_link_down(struct skge_port *skge) +{ + skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF); + netif_carrier_off(skge->netdev); + netif_stop_queue(skge->netdev); + + netif_info(skge, link, skge->netdev, "Link is down\n"); +} + +static void xm_link_down(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + + xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); + + if (netif_carrier_ok(dev)) + skge_link_down(skge); +} + +static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) +{ + int i; + + xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); + *val = xm_read16(hw, port, XM_PHY_DATA); + + if (hw->phy_type == SK_PHY_XMAC) + goto ready; + + for (i = 0; i < PHY_RETRIES; i++) { + if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) + goto ready; + udelay(1); + } + + return -ETIMEDOUT; + ready: + *val = xm_read16(hw, port, XM_PHY_DATA); + + return 0; +} + +static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg) +{ + u16 v = 0; + if (__xm_phy_read(hw, port, reg, &v)) + pr_warn("%s: phy read timed out\n", hw->dev[port]->name); + return v; +} + +static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) +{ + int i; + + xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); + for (i = 0; i < PHY_RETRIES; i++) { + if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) + goto ready; + udelay(1); + } + return -EIO; + + ready: + xm_write16(hw, port, XM_PHY_DATA, val); + for (i = 0; i < PHY_RETRIES; i++) { + if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static void genesis_init(struct skge_hw *hw) +{ + /* set blink source counter */ + skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100); + skge_write8(hw, B2_BSC_CTRL, BSC_START); + + /* configure mac arbiter */ + skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); + + /* configure mac arbiter timeout values */ + skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53); + skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53); + skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53); + skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53); + + skge_write8(hw, B3_MA_RCINI_RX1, 0); + skge_write8(hw, B3_MA_RCINI_RX2, 0); + skge_write8(hw, B3_MA_RCINI_TX1, 0); + skge_write8(hw, B3_MA_RCINI_TX2, 0); + + /* configure packet arbiter timeout */ + skge_write16(hw, B3_PA_CTRL, PA_RST_CLR); + skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX); + skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX); + skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX); + skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX); +} + +static void genesis_reset(struct skge_hw *hw, int port) +{ + static const u8 zero[8] = { 0 }; + u32 reg; + + skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); + + /* reset the statistics module */ + xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); + xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); + xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ + xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ + xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ + + /* disable Broadcom PHY IRQ */ + if (hw->phy_type == SK_PHY_BCOM) + xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff); + + xm_outhash(hw, port, XM_HSM, zero); + + /* Flush TX and RX fifo */ + reg = xm_read32(hw, port, XM_MODE); + xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF); + xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF); +} + +/* Convert mode to MII values */ +static const u16 phy_pause_map[] = { + [FLOW_MODE_NONE] = 0, + [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM, + [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP, + [FLOW_MODE_SYM_OR_REM] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM, +}; + +/* special defines for FIBER (88E1011S only) */ +static const u16 fiber_pause_map[] = { + [FLOW_MODE_NONE] = PHY_X_P_NO_PAUSE, + [FLOW_MODE_LOC_SEND] = PHY_X_P_ASYM_MD, + [FLOW_MODE_SYMMETRIC] = PHY_X_P_SYM_MD, + [FLOW_MODE_SYM_OR_REM] = PHY_X_P_BOTH_MD, +}; + + +/* Check status of Broadcom phy link */ +static void bcom_check_link(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + u16 status; + + /* read twice because of latch */ + xm_phy_read(hw, port, PHY_BCOM_STAT); + status = xm_phy_read(hw, port, PHY_BCOM_STAT); + + if ((status & PHY_ST_LSYNC) == 0) { + xm_link_down(hw, port); + return; + } + + if (skge->autoneg == AUTONEG_ENABLE) { + u16 lpa, aux; + + if (!(status & PHY_ST_AN_OVER)) + return; + + lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); + if (lpa & PHY_B_AN_RF) { + netdev_notice(dev, "remote fault\n"); + return; + } + + aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT); + + /* Check Duplex mismatch */ + switch (aux & PHY_B_AS_AN_RES_MSK) { + case PHY_B_RES_1000FD: + skge->duplex = DUPLEX_FULL; + break; + case PHY_B_RES_1000HD: + skge->duplex = DUPLEX_HALF; + break; + default: + netdev_notice(dev, "duplex mismatch\n"); + return; + } + + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + switch (aux & PHY_B_AS_PAUSE_MSK) { + case PHY_B_AS_PAUSE_MSK: + skge->flow_status = FLOW_STAT_SYMMETRIC; + break; + case PHY_B_AS_PRR: + skge->flow_status = FLOW_STAT_REM_SEND; + break; + case PHY_B_AS_PRT: + skge->flow_status = FLOW_STAT_LOC_SEND; + break; + default: + skge->flow_status = FLOW_STAT_NONE; + } + skge->speed = SPEED_1000; + } + + if (!netif_carrier_ok(dev)) + genesis_link_up(skge); +} + +/* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional + * Phy on for 100 or 10Mbit operation + */ +static void bcom_phy_init(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + int i; + u16 id1, r, ext, ctl; + + /* magic workaround patterns for Broadcom */ + static const struct { + u16 reg; + u16 val; + } A1hack[] = { + { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, + { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 }, + { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 }, + { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, + }, C0hack[] = { + { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, + { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 }, + }; + + /* read Id from external PHY (all have the same address) */ + id1 = xm_phy_read(hw, port, PHY_XMAC_ID1); + + /* Optimize MDIO transfer by suppressing preamble. */ + r = xm_read16(hw, port, XM_MMU_CMD); + r |= XM_MMU_NO_PRE; + xm_write16(hw, port, XM_MMU_CMD, r); + + switch (id1) { + case PHY_BCOM_ID1_C0: + /* + * Workaround BCOM Errata for the C0 type. + * Write magic patterns to reserved registers. + */ + for (i = 0; i < ARRAY_SIZE(C0hack); i++) + xm_phy_write(hw, port, + C0hack[i].reg, C0hack[i].val); + + break; + case PHY_BCOM_ID1_A1: + /* + * Workaround BCOM Errata for the A1 type. + * Write magic patterns to reserved registers. + */ + for (i = 0; i < ARRAY_SIZE(A1hack); i++) + xm_phy_write(hw, port, + A1hack[i].reg, A1hack[i].val); + break; + } + + /* + * Workaround BCOM Errata (#10523) for all BCom PHYs. + * Disable Power Management after reset. + */ + r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL); + r |= PHY_B_AC_DIS_PM; + xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r); + + /* Dummy read */ + xm_read16(hw, port, XM_ISRC); + + ext = PHY_B_PEC_EN_LTR; /* enable tx led */ + ctl = PHY_CT_SP1000; /* always 1000mbit */ + + if (skge->autoneg == AUTONEG_ENABLE) { + /* + * Workaround BCOM Errata #1 for the C5 type. + * 1000Base-T Link Acquisition Failure in Slave Mode + * Set Repeater/DTE bit 10 of the 1000Base-T Control Register + */ + u16 adv = PHY_B_1000C_RD; + if (skge->advertising & ADVERTISED_1000baseT_Half) + adv |= PHY_B_1000C_AHD; + if (skge->advertising & ADVERTISED_1000baseT_Full) + adv |= PHY_B_1000C_AFD; + xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv); + + ctl |= PHY_CT_ANE | PHY_CT_RE_CFG; + } else { + if (skge->duplex == DUPLEX_FULL) + ctl |= PHY_CT_DUP_MD; + /* Force to slave */ + xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE); + } + + /* Set autonegotiation pause parameters */ + xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV, + phy_pause_map[skge->flow_control] | PHY_AN_CSMA); + + /* Handle Jumbo frames */ + if (hw->dev[port]->mtu > ETH_DATA_LEN) { + xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, + PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK); + + ext |= PHY_B_PEC_HIGH_LA; + + } + + xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext); + xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl); + + /* Use link status change interrupt */ + xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); +} + +static void xm_phy_init(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 ctrl = 0; + + if (skge->autoneg == AUTONEG_ENABLE) { + if (skge->advertising & ADVERTISED_1000baseT_Half) + ctrl |= PHY_X_AN_HD; + if (skge->advertising & ADVERTISED_1000baseT_Full) + ctrl |= PHY_X_AN_FD; + + ctrl |= fiber_pause_map[skge->flow_control]; + + xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl); + + /* Restart Auto-negotiation */ + ctrl = PHY_CT_ANE | PHY_CT_RE_CFG; + } else { + /* Set DuplexMode in Config register */ + if (skge->duplex == DUPLEX_FULL) + ctrl |= PHY_CT_DUP_MD; + /* + * Do NOT enable Auto-negotiation here. This would hold + * the link down because no IDLEs are transmitted + */ + } + + xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl); + + /* Poll PHY for status changes */ + mod_timer(&skge->link_timer, jiffies + LINK_HZ); +} + +static int xm_check_link(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 status; + + /* read twice because of latch */ + xm_phy_read(hw, port, PHY_XMAC_STAT); + status = xm_phy_read(hw, port, PHY_XMAC_STAT); + + if ((status & PHY_ST_LSYNC) == 0) { + xm_link_down(hw, port); + return 0; + } + + if (skge->autoneg == AUTONEG_ENABLE) { + u16 lpa, res; + + if (!(status & PHY_ST_AN_OVER)) + return 0; + + lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); + if (lpa & PHY_B_AN_RF) { + netdev_notice(dev, "remote fault\n"); + return 0; + } + + res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI); + + /* Check Duplex mismatch */ + switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) { + case PHY_X_RS_FD: + skge->duplex = DUPLEX_FULL; + break; + case PHY_X_RS_HD: + skge->duplex = DUPLEX_HALF; + break; + default: + netdev_notice(dev, "duplex mismatch\n"); + return 0; + } + + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + if ((skge->flow_control == FLOW_MODE_SYMMETRIC || + skge->flow_control == FLOW_MODE_SYM_OR_REM) && + (lpa & PHY_X_P_SYM_MD)) + skge->flow_status = FLOW_STAT_SYMMETRIC; + else if (skge->flow_control == FLOW_MODE_SYM_OR_REM && + (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD) + /* Enable PAUSE receive, disable PAUSE transmit */ + skge->flow_status = FLOW_STAT_REM_SEND; + else if (skge->flow_control == FLOW_MODE_LOC_SEND && + (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD) + /* Disable PAUSE receive, enable PAUSE transmit */ + skge->flow_status = FLOW_STAT_LOC_SEND; + else + skge->flow_status = FLOW_STAT_NONE; + + skge->speed = SPEED_1000; + } + + if (!netif_carrier_ok(dev)) + genesis_link_up(skge); + return 1; +} + +/* Poll to check for link coming up. + * + * Since internal PHY is wired to a level triggered pin, can't + * get an interrupt when carrier is detected, need to poll for + * link coming up. + */ +static void xm_link_timer(unsigned long arg) +{ + struct skge_port *skge = (struct skge_port *) arg; + struct net_device *dev = skge->netdev; + struct skge_hw *hw = skge->hw; + int port = skge->port; + int i; + unsigned long flags; + + if (!netif_running(dev)) + return; + + spin_lock_irqsave(&hw->phy_lock, flags); + + /* + * Verify that the link by checking GPIO register three times. + * This pin has the signal from the link_sync pin connected to it. + */ + for (i = 0; i < 3; i++) { + if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS) + goto link_down; + } + + /* Re-enable interrupt to detect link down */ + if (xm_check_link(dev)) { + u16 msk = xm_read16(hw, port, XM_IMSK); + msk &= ~XM_IS_INP_ASS; + xm_write16(hw, port, XM_IMSK, msk); + xm_read16(hw, port, XM_ISRC); + } else { +link_down: + mod_timer(&skge->link_timer, + round_jiffies(jiffies + LINK_HZ)); + } + spin_unlock_irqrestore(&hw->phy_lock, flags); +} + +static void genesis_mac_init(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN; + int i; + u32 r; + static const u8 zero[6] = { 0 }; + + for (i = 0; i < 10; i++) { + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), + MFF_SET_MAC_RST); + if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST) + goto reset_ok; + udelay(1); + } + + netdev_warn(dev, "genesis reset failed\n"); + + reset_ok: + /* Unreset the XMAC. */ + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); + + /* + * Perform additional initialization for external PHYs, + * namely for the 1000baseTX cards that use the XMAC's + * GMII mode. + */ + if (hw->phy_type != SK_PHY_XMAC) { + /* Take external Phy out of reset */ + r = skge_read32(hw, B2_GP_IO); + if (port == 0) + r |= GP_DIR_0|GP_IO_0; + else + r |= GP_DIR_2|GP_IO_2; + + skge_write32(hw, B2_GP_IO, r); + + /* Enable GMII interface */ + xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); + } + + + switch (hw->phy_type) { + case SK_PHY_XMAC: + xm_phy_init(skge); + break; + case SK_PHY_BCOM: + bcom_phy_init(skge); + bcom_check_link(hw, port); + } + + /* Set Station Address */ + xm_outaddr(hw, port, XM_SA, dev->dev_addr); + + /* We don't use match addresses so clear */ + for (i = 1; i < 16; i++) + xm_outaddr(hw, port, XM_EXM(i), zero); + + /* Clear MIB counters */ + xm_write16(hw, port, XM_STAT_CMD, + XM_SC_CLR_RXC | XM_SC_CLR_TXC); + /* Clear two times according to Errata #3 */ + xm_write16(hw, port, XM_STAT_CMD, + XM_SC_CLR_RXC | XM_SC_CLR_TXC); + + /* configure Rx High Water Mark (XM_RX_HI_WM) */ + xm_write16(hw, port, XM_RX_HI_WM, 1450); + + /* We don't need the FCS appended to the packet. */ + r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS; + if (jumbo) + r |= XM_RX_BIG_PK_OK; + + if (skge->duplex == DUPLEX_HALF) { + /* + * If in manual half duplex mode the other side might be in + * full duplex mode, so ignore if a carrier extension is not seen + * on frames received + */ + r |= XM_RX_DIS_CEXT; + } + xm_write16(hw, port, XM_RX_CMD, r); + + /* We want short frames padded to 60 bytes. */ + xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD); + + /* Increase threshold for jumbo frames on dual port */ + if (hw->ports > 1 && jumbo) + xm_write16(hw, port, XM_TX_THR, 1020); + else + xm_write16(hw, port, XM_TX_THR, 512); + + /* + * Enable the reception of all error frames. This is is + * a necessary evil due to the design of the XMAC. The + * XMAC's receive FIFO is only 8K in size, however jumbo + * frames can be up to 9000 bytes in length. When bad + * frame filtering is enabled, the XMAC's RX FIFO operates + * in 'store and forward' mode. For this to work, the + * entire frame has to fit into the FIFO, but that means + * that jumbo frames larger than 8192 bytes will be + * truncated. Disabling all bad frame filtering causes + * the RX FIFO to operate in streaming mode, in which + * case the XMAC will start transferring frames out of the + * RX FIFO as soon as the FIFO threshold is reached. + */ + xm_write32(hw, port, XM_MODE, XM_DEF_MODE); + + + /* + * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK) + * - Enable all bits excepting 'Octets Rx OK Low CntOv' + * and 'Octets Rx OK Hi Cnt Ov'. + */ + xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK); + + /* + * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK) + * - Enable all bits excepting 'Octets Tx OK Low CntOv' + * and 'Octets Tx OK Hi Cnt Ov'. + */ + xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK); + + /* Configure MAC arbiter */ + skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); + + /* configure timeout values */ + skge_write8(hw, B3_MA_TOINI_RX1, 72); + skge_write8(hw, B3_MA_TOINI_RX2, 72); + skge_write8(hw, B3_MA_TOINI_TX1, 72); + skge_write8(hw, B3_MA_TOINI_TX2, 72); + + skge_write8(hw, B3_MA_RCINI_RX1, 0); + skge_write8(hw, B3_MA_RCINI_RX2, 0); + skge_write8(hw, B3_MA_RCINI_TX1, 0); + skge_write8(hw, B3_MA_RCINI_TX2, 0); + + /* Configure Rx MAC FIFO */ + skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR); + skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT); + skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD); + + /* Configure Tx MAC FIFO */ + skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR); + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF); + skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD); + + if (jumbo) { + /* Enable frame flushing if jumbo frames used */ + skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_FLUSH); + } else { + /* enable timeout timers if normal frames */ + skge_write16(hw, B3_PA_CTRL, + (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2); + } +} + +static void genesis_stop(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + unsigned retries = 1000; + u16 cmd; + + /* Disable Tx and Rx */ + cmd = xm_read16(hw, port, XM_MMU_CMD); + cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); + xm_write16(hw, port, XM_MMU_CMD, cmd); + + genesis_reset(hw, port); + + /* Clear Tx packet arbiter timeout IRQ */ + skge_write16(hw, B3_PA_CTRL, + port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); + + /* Reset the MAC */ + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); + do { + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); + if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)) + break; + } while (--retries > 0); + + /* For external PHYs there must be special handling */ + if (hw->phy_type != SK_PHY_XMAC) { + u32 reg = skge_read32(hw, B2_GP_IO); + if (port == 0) { + reg |= GP_DIR_0; + reg &= ~GP_IO_0; + } else { + reg |= GP_DIR_2; + reg &= ~GP_IO_2; + } + skge_write32(hw, B2_GP_IO, reg); + skge_read32(hw, B2_GP_IO); + } + + xm_write16(hw, port, XM_MMU_CMD, + xm_read16(hw, port, XM_MMU_CMD) + & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX)); + + xm_read16(hw, port, XM_MMU_CMD); +} + + +static void genesis_get_stats(struct skge_port *skge, u64 *data) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + int i; + unsigned long timeout = jiffies + HZ; + + xm_write16(hw, port, + XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC); + + /* wait for update to complete */ + while (xm_read16(hw, port, XM_STAT_CMD) + & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) { + if (time_after(jiffies, timeout)) + break; + udelay(10); + } + + /* special case for 64 bit octet counter */ + data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32 + | xm_read32(hw, port, XM_TXO_OK_LO); + data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32 + | xm_read32(hw, port, XM_RXO_OK_LO); + + for (i = 2; i < ARRAY_SIZE(skge_stats); i++) + data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset); +} + +static void genesis_mac_intr(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + u16 status = xm_read16(hw, port, XM_ISRC); + + netif_printk(skge, intr, KERN_DEBUG, skge->netdev, + "mac interrupt status 0x%x\n", status); + + if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) { + xm_link_down(hw, port); + mod_timer(&skge->link_timer, jiffies + 1); + } + + if (status & XM_IS_TXF_UR) { + xm_write32(hw, port, XM_MODE, XM_MD_FTF); + ++dev->stats.tx_fifo_errors; + } +} + +static void genesis_link_up(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 cmd, msk; + u32 mode; + + cmd = xm_read16(hw, port, XM_MMU_CMD); + + /* + * enabling pause frame reception is required for 1000BT + * because the XMAC is not reset if the link is going down + */ + if (skge->flow_status == FLOW_STAT_NONE || + skge->flow_status == FLOW_STAT_LOC_SEND) + /* Disable Pause Frame Reception */ + cmd |= XM_MMU_IGN_PF; + else + /* Enable Pause Frame Reception */ + cmd &= ~XM_MMU_IGN_PF; + + xm_write16(hw, port, XM_MMU_CMD, cmd); + + mode = xm_read32(hw, port, XM_MODE); + if (skge->flow_status == FLOW_STAT_SYMMETRIC || + skge->flow_status == FLOW_STAT_LOC_SEND) { + /* + * Configure Pause Frame Generation + * Use internal and external Pause Frame Generation. + * Sending pause frames is edge triggered. + * Send a Pause frame with the maximum pause time if + * internal oder external FIFO full condition occurs. + * Send a zero pause time frame to re-start transmission. + */ + /* XM_PAUSE_DA = '010000C28001' (default) */ + /* XM_MAC_PTIME = 0xffff (maximum) */ + /* remember this value is defined in big endian (!) */ + xm_write16(hw, port, XM_MAC_PTIME, 0xffff); + + mode |= XM_PAUSE_MODE; + skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE); + } else { + /* + * disable pause frame generation is required for 1000BT + * because the XMAC is not reset if the link is going down + */ + /* Disable Pause Mode in Mode Register */ + mode &= ~XM_PAUSE_MODE; + + skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE); + } + + xm_write32(hw, port, XM_MODE, mode); + + /* Turn on detection of Tx underrun */ + msk = xm_read16(hw, port, XM_IMSK); + msk &= ~XM_IS_TXF_UR; + xm_write16(hw, port, XM_IMSK, msk); + + xm_read16(hw, port, XM_ISRC); + + /* get MMU Command Reg. */ + cmd = xm_read16(hw, port, XM_MMU_CMD); + if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL) + cmd |= XM_MMU_GMII_FD; + + /* + * Workaround BCOM Errata (#10523) for all BCom Phys + * Enable Power Management after link up + */ + if (hw->phy_type == SK_PHY_BCOM) { + xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, + xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL) + & ~PHY_B_AC_DIS_PM); + xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); + } + + /* enable Rx/Tx */ + xm_write16(hw, port, XM_MMU_CMD, + cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX); + skge_link_up(skge); +} + + +static inline void bcom_phy_intr(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 isrc; + + isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT); + netif_printk(skge, intr, KERN_DEBUG, skge->netdev, + "phy interrupt status 0x%x\n", isrc); + + if (isrc & PHY_B_IS_PSE) + pr_err("%s: uncorrectable pair swap error\n", + hw->dev[port]->name); + + /* Workaround BCom Errata: + * enable and disable loopback mode if "NO HCD" occurs. + */ + if (isrc & PHY_B_IS_NO_HDCL) { + u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL); + xm_phy_write(hw, port, PHY_BCOM_CTRL, + ctrl | PHY_CT_LOOP); + xm_phy_write(hw, port, PHY_BCOM_CTRL, + ctrl & ~PHY_CT_LOOP); + } + + if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) + bcom_check_link(hw, port); + +} + +static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) +{ + int i; + + gma_write16(hw, port, GM_SMI_DATA, val); + gma_write16(hw, port, GM_SMI_CTRL, + GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg)); + for (i = 0; i < PHY_RETRIES; i++) { + udelay(1); + + if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) + return 0; + } + + pr_warn("%s: phy write timeout\n", hw->dev[port]->name); + return -EIO; +} + +static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) +{ + int i; + + gma_write16(hw, port, GM_SMI_CTRL, + GM_SMI_CT_PHY_AD(hw->phy_addr) + | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); + + for (i = 0; i < PHY_RETRIES; i++) { + udelay(1); + if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) + goto ready; + } + + return -ETIMEDOUT; + ready: + *val = gma_read16(hw, port, GM_SMI_DATA); + return 0; +} + +static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg) +{ + u16 v = 0; + if (__gm_phy_read(hw, port, reg, &v)) + pr_warn("%s: phy read timeout\n", hw->dev[port]->name); + return v; +} + +/* Marvell Phy Initialization */ +static void yukon_init(struct skge_hw *hw, int port) +{ + struct skge_port *skge = netdev_priv(hw->dev[port]); + u16 ctrl, ct1000, adv; + + if (skge->autoneg == AUTONEG_ENABLE) { + u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); + + ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | + PHY_M_EC_MAC_S_MSK); + ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); + + ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); + + gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); + } + + ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); + if (skge->autoneg == AUTONEG_DISABLE) + ctrl &= ~PHY_CT_ANE; + + ctrl |= PHY_CT_RESET; + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + + ctrl = 0; + ct1000 = 0; + adv = PHY_AN_CSMA; + + if (skge->autoneg == AUTONEG_ENABLE) { + if (hw->copper) { + if (skge->advertising & ADVERTISED_1000baseT_Full) + ct1000 |= PHY_M_1000C_AFD; + if (skge->advertising & ADVERTISED_1000baseT_Half) + ct1000 |= PHY_M_1000C_AHD; + if (skge->advertising & ADVERTISED_100baseT_Full) + adv |= PHY_M_AN_100_FD; + if (skge->advertising & ADVERTISED_100baseT_Half) + adv |= PHY_M_AN_100_HD; + if (skge->advertising & ADVERTISED_10baseT_Full) + adv |= PHY_M_AN_10_FD; + if (skge->advertising & ADVERTISED_10baseT_Half) + adv |= PHY_M_AN_10_HD; + + /* Set Flow-control capabilities */ + adv |= phy_pause_map[skge->flow_control]; + } else { + if (skge->advertising & ADVERTISED_1000baseT_Full) + adv |= PHY_M_AN_1000X_AFD; + if (skge->advertising & ADVERTISED_1000baseT_Half) + adv |= PHY_M_AN_1000X_AHD; + + adv |= fiber_pause_map[skge->flow_control]; + } + + /* Restart Auto-negotiation */ + ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; + } else { + /* forced speed/duplex settings */ + ct1000 = PHY_M_1000C_MSE; + + if (skge->duplex == DUPLEX_FULL) + ctrl |= PHY_CT_DUP_MD; + + switch (skge->speed) { + case SPEED_1000: + ctrl |= PHY_CT_SP1000; + break; + case SPEED_100: + ctrl |= PHY_CT_SP100; + break; + } + + ctrl |= PHY_CT_RESET; + } + + gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); + + gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + + /* Enable phy interrupt on autonegotiation complete (or link up) */ + if (skge->autoneg == AUTONEG_ENABLE) + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK); + else + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); +} + +static void yukon_reset(struct skge_hw *hw, int port) +{ + gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */ + gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ + gma_write16(hw, port, GM_MC_ADDR_H2, 0); + gma_write16(hw, port, GM_MC_ADDR_H3, 0); + gma_write16(hw, port, GM_MC_ADDR_H4, 0); + + gma_write16(hw, port, GM_RX_CTRL, + gma_read16(hw, port, GM_RX_CTRL) + | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); +} + +/* Apparently, early versions of Yukon-Lite had wrong chip_id? */ +static int is_yukon_lite_a0(struct skge_hw *hw) +{ + u32 reg; + int ret; + + if (hw->chip_id != CHIP_ID_YUKON) + return 0; + + reg = skge_read32(hw, B2_FAR); + skge_write8(hw, B2_FAR + 3, 0xff); + ret = (skge_read8(hw, B2_FAR + 3) != 0); + skge_write32(hw, B2_FAR, reg); + return ret; +} + +static void yukon_mac_init(struct skge_hw *hw, int port) +{ + struct skge_port *skge = netdev_priv(hw->dev[port]); + int i; + u32 reg; + const u8 *addr = hw->dev[port]->dev_addr; + + /* WA code for COMA mode -- set PHY reset */ + if (hw->chip_id == CHIP_ID_YUKON_LITE && + hw->chip_rev >= CHIP_REV_YU_LITE_A3) { + reg = skge_read32(hw, B2_GP_IO); + reg |= GP_DIR_9 | GP_IO_9; + skge_write32(hw, B2_GP_IO, reg); + } + + /* hard reset */ + skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); + + /* WA code for COMA mode -- clear PHY reset */ + if (hw->chip_id == CHIP_ID_YUKON_LITE && + hw->chip_rev >= CHIP_REV_YU_LITE_A3) { + reg = skge_read32(hw, B2_GP_IO); + reg |= GP_DIR_9; + reg &= ~GP_IO_9; + skge_write32(hw, B2_GP_IO, reg); + } + + /* Set hardware config mode */ + reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | + GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE; + reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB; + + /* Clear GMC reset */ + skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); + skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); + + if (skge->autoneg == AUTONEG_DISABLE) { + reg = GM_GPCR_AU_ALL_DIS; + gma_write16(hw, port, GM_GP_CTRL, + gma_read16(hw, port, GM_GP_CTRL) | reg); + + switch (skge->speed) { + case SPEED_1000: + reg &= ~GM_GPCR_SPEED_100; + reg |= GM_GPCR_SPEED_1000; + break; + case SPEED_100: + reg &= ~GM_GPCR_SPEED_1000; + reg |= GM_GPCR_SPEED_100; + break; + case SPEED_10: + reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100); + break; + } + + if (skge->duplex == DUPLEX_FULL) + reg |= GM_GPCR_DUP_FULL; + } else + reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; + + switch (skge->flow_control) { + case FLOW_MODE_NONE: + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; + break; + case FLOW_MODE_LOC_SEND: + /* disable Rx flow-control */ + reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; + break; + case FLOW_MODE_SYMMETRIC: + case FLOW_MODE_SYM_OR_REM: + /* enable Tx & Rx flow-control */ + break; + } + + gma_write16(hw, port, GM_GP_CTRL, reg); + skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); + + yukon_init(hw, port); + + /* MIB clear */ + reg = gma_read16(hw, port, GM_PHY_ADDR); + gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); + + for (i = 0; i < GM_MIB_CNT_SIZE; i++) + gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i); + gma_write16(hw, port, GM_PHY_ADDR, reg); + + /* transmit control */ + gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); + + /* receive control reg: unicast + multicast + no FCS */ + gma_write16(hw, port, GM_RX_CTRL, + GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); + + /* transmit flow control */ + gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); + + /* transmit parameter */ + gma_write16(hw, port, GM_TX_PARAM, + TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | + TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | + TX_IPG_JAM_DATA(TX_IPG_JAM_DEF)); + + /* configure the Serial Mode Register */ + reg = DATA_BLIND_VAL(DATA_BLIND_DEF) + | GM_SMOD_VLAN_ENA + | IPG_DATA_VAL(IPG_DATA_DEF); + + if (hw->dev[port]->mtu > ETH_DATA_LEN) + reg |= GM_SMOD_JUMBO_ENA; + + gma_write16(hw, port, GM_SERIAL_MODE, reg); + + /* physical address: used for pause frames */ + gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); + /* virtual address for data */ + gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); + + /* enable interrupt mask for counter overflows */ + gma_write16(hw, port, GM_TX_IRQ_MSK, 0); + gma_write16(hw, port, GM_RX_IRQ_MSK, 0); + gma_write16(hw, port, GM_TR_IRQ_MSK, 0); + + /* Initialize Mac Fifo */ + + /* Configure Rx MAC FIFO */ + skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); + reg = GMF_OPER_ON | GMF_RX_F_FL_ON; + + /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */ + if (is_yukon_lite_a0(hw)) + reg &= ~GMF_RX_F_FL_ON; + + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); + skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); + /* + * because Pause Packet Truncation in GMAC is not working + * we have to increase the Flush Threshold to 64 bytes + * in order to flush pause packets in Rx FIFO on Yukon-1 + */ + skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1); + + /* Configure Tx MAC FIFO */ + skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); + skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); +} + +/* Go into power down mode */ +static void yukon_suspend(struct skge_hw *hw, int port) +{ + u16 ctrl; + + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + ctrl |= PHY_M_PC_POL_R_DIS; + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); + ctrl |= PHY_CT_RESET; + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + + /* switch IEEE compatible power down mode on */ + ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); + ctrl |= PHY_CT_PDOWN; + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); +} + +static void yukon_stop(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + + skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); + yukon_reset(hw, port); + + gma_write16(hw, port, GM_GP_CTRL, + gma_read16(hw, port, GM_GP_CTRL) + & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA)); + gma_read16(hw, port, GM_GP_CTRL); + + yukon_suspend(hw, port); + + /* set GPHY Control reset */ + skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); + skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); +} + +static void yukon_get_stats(struct skge_port *skge, u64 *data) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + int i; + + data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32 + | gma_read32(hw, port, GM_TXO_OK_LO); + data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32 + | gma_read32(hw, port, GM_RXO_OK_LO); + + for (i = 2; i < ARRAY_SIZE(skge_stats); i++) + data[i] = gma_read32(hw, port, + skge_stats[i].gma_offset); +} + +static void yukon_mac_intr(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); + + netif_printk(skge, intr, KERN_DEBUG, skge->netdev, + "mac interrupt status 0x%x\n", status); + + if (status & GM_IS_RX_FF_OR) { + ++dev->stats.rx_fifo_errors; + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); + } + + if (status & GM_IS_TX_FF_UR) { + ++dev->stats.tx_fifo_errors; + skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); + } + +} + +static u16 yukon_speed(const struct skge_hw *hw, u16 aux) +{ + switch (aux & PHY_M_PS_SPEED_MSK) { + case PHY_M_PS_SPEED_1000: + return SPEED_1000; + case PHY_M_PS_SPEED_100: + return SPEED_100; + default: + return SPEED_10; + } +} + +static void yukon_link_up(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 reg; + + /* Enable Transmit FIFO Underrun */ + skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); + + reg = gma_read16(hw, port, GM_GP_CTRL); + if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) + reg |= GM_GPCR_DUP_FULL; + + /* enable Rx/Tx */ + reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; + gma_write16(hw, port, GM_GP_CTRL, reg); + + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); + skge_link_up(skge); +} + +static void yukon_link_down(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 ctrl; + + ctrl = gma_read16(hw, port, GM_GP_CTRL); + ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); + gma_write16(hw, port, GM_GP_CTRL, ctrl); + + if (skge->flow_status == FLOW_STAT_REM_SEND) { + ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); + ctrl |= PHY_M_AN_ASP; + /* restore Asymmetric Pause bit */ + gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl); + } + + skge_link_down(skge); + + yukon_init(hw, port); +} + +static void yukon_phy_intr(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + const char *reason = NULL; + u16 istatus, phystat; + + istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); + phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); + + netif_printk(skge, intr, KERN_DEBUG, skge->netdev, + "phy interrupt status 0x%x 0x%x\n", istatus, phystat); + + if (istatus & PHY_M_IS_AN_COMPL) { + if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP) + & PHY_M_AN_RF) { + reason = "remote fault"; + goto failed; + } + + if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) { + reason = "master/slave fault"; + goto failed; + } + + if (!(phystat & PHY_M_PS_SPDUP_RES)) { + reason = "speed/duplex"; + goto failed; + } + + skge->duplex = (phystat & PHY_M_PS_FULL_DUP) + ? DUPLEX_FULL : DUPLEX_HALF; + skge->speed = yukon_speed(hw, phystat); + + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + switch (phystat & PHY_M_PS_PAUSE_MSK) { + case PHY_M_PS_PAUSE_MSK: + skge->flow_status = FLOW_STAT_SYMMETRIC; + break; + case PHY_M_PS_RX_P_EN: + skge->flow_status = FLOW_STAT_REM_SEND; + break; + case PHY_M_PS_TX_P_EN: + skge->flow_status = FLOW_STAT_LOC_SEND; + break; + default: + skge->flow_status = FLOW_STAT_NONE; + } + + if (skge->flow_status == FLOW_STAT_NONE || + (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF)) + skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + else + skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); + yukon_link_up(skge); + return; + } + + if (istatus & PHY_M_IS_LSP_CHANGE) + skge->speed = yukon_speed(hw, phystat); + + if (istatus & PHY_M_IS_DUP_CHANGE) + skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; + if (istatus & PHY_M_IS_LST_CHANGE) { + if (phystat & PHY_M_PS_LINK_UP) + yukon_link_up(skge); + else + yukon_link_down(skge); + } + return; + failed: + pr_err("%s: autonegotiation failed (%s)\n", skge->netdev->name, reason); + + /* XXX restart autonegotiation? */ +} + +static void skge_phy_reset(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + struct net_device *dev = hw->dev[port]; + + netif_stop_queue(skge->netdev); + netif_carrier_off(skge->netdev); + + spin_lock_bh(&hw->phy_lock); + if (is_genesis(hw)) { + genesis_reset(hw, port); + genesis_mac_init(hw, port); + } else { + yukon_reset(hw, port); + yukon_init(hw, port); + } + spin_unlock_bh(&hw->phy_lock); + + skge_set_multicast(dev); +} + +/* Basic MII support */ +static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int err = -EOPNOTSUPP; + + if (!netif_running(dev)) + return -ENODEV; /* Phy still in reset */ + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hw->phy_addr; + + /* fallthru */ + case SIOCGMIIREG: { + u16 val = 0; + spin_lock_bh(&hw->phy_lock); + + if (is_genesis(hw)) + err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); + else + err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); + spin_unlock_bh(&hw->phy_lock); + data->val_out = val; + break; + } + + case SIOCSMIIREG: + spin_lock_bh(&hw->phy_lock); + if (is_genesis(hw)) + err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f, + data->val_in); + else + err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f, + data->val_in); + spin_unlock_bh(&hw->phy_lock); + break; + } + return err; +} + +static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len) +{ + u32 end; + + start /= 8; + len /= 8; + end = start + len - 1; + + skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); + skge_write32(hw, RB_ADDR(q, RB_START), start); + skge_write32(hw, RB_ADDR(q, RB_WP), start); + skge_write32(hw, RB_ADDR(q, RB_RP), start); + skge_write32(hw, RB_ADDR(q, RB_END), end); + + if (q == Q_R1 || q == Q_R2) { + /* Set thresholds on receive queue's */ + skge_write32(hw, RB_ADDR(q, RB_RX_UTPP), + start + (2*len)/3); + skge_write32(hw, RB_ADDR(q, RB_RX_LTPP), + start + (len/3)); + } else { + /* Enable store & forward on Tx queue's because + * Tx FIFO is only 4K on Genesis and 1K on Yukon + */ + skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); + } + + skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); +} + +/* Setup Bus Memory Interface */ +static void skge_qset(struct skge_port *skge, u16 q, + const struct skge_element *e) +{ + struct skge_hw *hw = skge->hw; + u32 watermark = 0x600; + u64 base = skge->dma + (e->desc - skge->mem); + + /* optimization to reduce window on 32bit/33mhz */ + if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0) + watermark /= 2; + + skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET); + skge_write32(hw, Q_ADDR(q, Q_F), watermark); + skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32)); + skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base); +} + +static int skge_up(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + u32 chunk, ram_addr; + size_t rx_size, tx_size; + int err; + + if (!is_valid_ether_addr(dev->dev_addr)) + return -EINVAL; + + netif_info(skge, ifup, skge->netdev, "enabling interface\n"); + + if (dev->mtu > RX_BUF_SIZE) + skge->rx_buf_size = dev->mtu + ETH_HLEN; + else + skge->rx_buf_size = RX_BUF_SIZE; + + + rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); + tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); + skge->mem_size = tx_size + rx_size; + skge->mem = pci_alloc_consistent(hw->pdev, skge->mem_size, &skge->dma); + if (!skge->mem) + return -ENOMEM; + + BUG_ON(skge->dma & 7); + + if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) { + dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); + err = -EINVAL; + goto free_pci_mem; + } + + memset(skge->mem, 0, skge->mem_size); + + err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma); + if (err) + goto free_pci_mem; + + err = skge_rx_fill(dev); + if (err) + goto free_rx_ring; + + err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, + skge->dma + rx_size); + if (err) + goto free_rx_ring; + + if (hw->ports == 1) { + err = request_irq(hw->pdev->irq, skge_intr, IRQF_SHARED, + dev->name, hw); + if (err) { + netdev_err(dev, "Unable to allocate interrupt %d error: %d\n", + hw->pdev->irq, err); + goto free_tx_ring; + } + } + + /* Initialize MAC */ + netif_carrier_off(dev); + spin_lock_bh(&hw->phy_lock); + if (is_genesis(hw)) + genesis_mac_init(hw, port); + else + yukon_mac_init(hw, port); + spin_unlock_bh(&hw->phy_lock); + + /* Configure RAMbuffers - equally between ports and tx/rx */ + chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2); + ram_addr = hw->ram_offset + 2 * chunk * port; + + skge_ramset(hw, rxqaddr[port], ram_addr, chunk); + skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean); + + BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean); + skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk); + skge_qset(skge, txqaddr[port], skge->tx_ring.to_use); + + /* Start receiver BMU */ + wmb(); + skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); + skge_led(skge, LED_MODE_ON); + + spin_lock_irq(&hw->hw_lock); + hw->intr_mask |= portmask[port]; + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock_irq(&hw->hw_lock); + + napi_enable(&skge->napi); + + skge_set_multicast(dev); + + return 0; + + free_tx_ring: + kfree(skge->tx_ring.start); + free_rx_ring: + skge_rx_clean(skge); + kfree(skge->rx_ring.start); + free_pci_mem: + pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); + skge->mem = NULL; + + return err; +} + +/* stop receiver */ +static void skge_rx_stop(struct skge_hw *hw, int port) +{ + skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP); + skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL), + RB_RST_SET|RB_DIS_OP_MD); + skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET); +} + +static int skge_down(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + + if (skge->mem == NULL) + return 0; + + netif_info(skge, ifdown, skge->netdev, "disabling interface\n"); + + netif_tx_disable(dev); + + if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC) + del_timer_sync(&skge->link_timer); + + napi_disable(&skge->napi); + netif_carrier_off(dev); + + spin_lock_irq(&hw->hw_lock); + hw->intr_mask &= ~portmask[port]; + skge_write32(hw, B0_IMSK, (hw->ports == 1) ? 0 : hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock_irq(&hw->hw_lock); + + if (hw->ports == 1) + free_irq(hw->pdev->irq, hw); + + skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF); + if (is_genesis(hw)) + genesis_stop(skge); + else + yukon_stop(skge); + + /* Stop transmitter */ + skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); + skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), + RB_RST_SET|RB_DIS_OP_MD); + + + /* Disable Force Sync bit and Enable Alloc bit */ + skge_write8(hw, SK_REG(port, TXA_CTRL), + TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); + + /* Stop Interval Timer and Limit Counter of Tx Arbiter */ + skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); + skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); + + /* Reset PCI FIFO */ + skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET); + skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); + + /* Reset the RAM Buffer async Tx queue */ + skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET); + + skge_rx_stop(hw, port); + + if (is_genesis(hw)) { + skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET); + skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET); + } else { + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); + skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); + } + + skge_led(skge, LED_MODE_OFF); + + netif_tx_lock_bh(dev); + skge_tx_clean(dev); + netif_tx_unlock_bh(dev); + + skge_rx_clean(skge); + + kfree(skge->rx_ring.start); + kfree(skge->tx_ring.start); + pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); + skge->mem = NULL; + return 0; +} + +static inline int skge_avail(const struct skge_ring *ring) +{ + smp_mb(); + return ((ring->to_clean > ring->to_use) ? 0 : ring->count) + + (ring->to_clean - ring->to_use) - 1; +} + +static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, + struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + struct skge_element *e; + struct skge_tx_desc *td; + int i; + u32 control, len; + dma_addr_t map; + + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) + return NETDEV_TX_BUSY; + + e = skge->tx_ring.to_use; + td = e->desc; + BUG_ON(td->control & BMU_OWN); + e->skb = skb; + len = skb_headlen(skb); + map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(hw->pdev, map)) + goto mapping_error; + + dma_unmap_addr_set(e, mapaddr, map); + dma_unmap_len_set(e, maplen, len); + + td->dma_lo = lower_32_bits(map); + td->dma_hi = upper_32_bits(map); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + const int offset = skb_checksum_start_offset(skb); + + /* This seems backwards, but it is what the sk98lin + * does. Looks like hardware is wrong? + */ + if (ipip_hdr(skb)->protocol == IPPROTO_UDP && + hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) + control = BMU_TCP_CHECK; + else + control = BMU_UDP_CHECK; + + td->csum_offs = 0; + td->csum_start = offset; + td->csum_write = offset + skb->csum_offset; + } else + control = BMU_CHECK; + + if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */ + control |= BMU_EOF | BMU_IRQ_EOF; + else { + struct skge_tx_desc *tf = td; + + control |= BMU_STFWD; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + if (dma_mapping_error(&hw->pdev->dev, map)) + goto mapping_unwind; + + e = e->next; + e->skb = skb; + tf = e->desc; + BUG_ON(tf->control & BMU_OWN); + + tf->dma_lo = lower_32_bits(map); + tf->dma_hi = upper_32_bits(map); + dma_unmap_addr_set(e, mapaddr, map); + dma_unmap_len_set(e, maplen, skb_frag_size(frag)); + + tf->control = BMU_OWN | BMU_SW | control | skb_frag_size(frag); + } + tf->control |= BMU_EOF | BMU_IRQ_EOF; + } + /* Make sure all the descriptors written */ + wmb(); + td->control = BMU_OWN | BMU_SW | BMU_STF | control | len; + wmb(); + + netdev_sent_queue(dev, skb->len); + + skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); + + netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev, + "tx queued, slot %td, len %d\n", + e - skge->tx_ring.start, skb->len); + + skge->tx_ring.to_use = e->next; + smp_wmb(); + + if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) { + netdev_dbg(dev, "transmit queue full\n"); + netif_stop_queue(dev); + } + + return NETDEV_TX_OK; + +mapping_unwind: + e = skge->tx_ring.to_use; + pci_unmap_single(hw->pdev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_TODEVICE); + while (i-- > 0) { + e = e->next; + pci_unmap_page(hw->pdev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_TODEVICE); + } + +mapping_error: + if (net_ratelimit()) + dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + + +/* Free resources associated with this reing element */ +static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e, + u32 control) +{ + /* skb header vs. fragment */ + if (control & BMU_STF) + pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_TODEVICE); + else + pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_TODEVICE); +} + +/* Free all buffers in transmit ring */ +static void skge_tx_clean(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_element *e; + + for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { + struct skge_tx_desc *td = e->desc; + + skge_tx_unmap(skge->hw->pdev, e, td->control); + + if (td->control & BMU_EOF) + dev_kfree_skb(e->skb); + td->control = 0; + } + + netdev_reset_queue(dev); + skge->tx_ring.to_clean = e; +} + +static void skge_tx_timeout(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + + netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n"); + + skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); + skge_tx_clean(dev); + netif_wake_queue(dev); +} + +static int skge_change_mtu(struct net_device *dev, int new_mtu) +{ + int err; + + if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) + return -EINVAL; + + if (!netif_running(dev)) { + dev->mtu = new_mtu; + return 0; + } + + skge_down(dev); + + dev->mtu = new_mtu; + + err = skge_up(dev); + if (err) + dev_close(dev); + + return err; +} + +static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 }; + +static void genesis_add_filter(u8 filter[8], const u8 *addr) +{ + u32 crc, bit; + + crc = ether_crc_le(ETH_ALEN, addr); + bit = ~crc & 0x3f; + filter[bit/8] |= 1 << (bit%8); +} + +static void genesis_set_multicast(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + struct netdev_hw_addr *ha; + u32 mode; + u8 filter[8]; + + mode = xm_read32(hw, port, XM_MODE); + mode |= XM_MD_ENA_HASH; + if (dev->flags & IFF_PROMISC) + mode |= XM_MD_ENA_PROM; + else + mode &= ~XM_MD_ENA_PROM; + + if (dev->flags & IFF_ALLMULTI) + memset(filter, 0xff, sizeof(filter)); + else { + memset(filter, 0, sizeof(filter)); + + if (skge->flow_status == FLOW_STAT_REM_SEND || + skge->flow_status == FLOW_STAT_SYMMETRIC) + genesis_add_filter(filter, pause_mc_addr); + + netdev_for_each_mc_addr(ha, dev) + genesis_add_filter(filter, ha->addr); + } + + xm_write32(hw, port, XM_MODE, mode); + xm_outhash(hw, port, XM_HSM, filter); +} + +static void yukon_add_filter(u8 filter[8], const u8 *addr) +{ + u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f; + filter[bit/8] |= 1 << (bit%8); +} + +static void yukon_set_multicast(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + struct netdev_hw_addr *ha; + int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND || + skge->flow_status == FLOW_STAT_SYMMETRIC); + u16 reg; + u8 filter[8]; + + memset(filter, 0, sizeof(filter)); + + reg = gma_read16(hw, port, GM_RX_CTRL); + reg |= GM_RXCR_UCF_ENA; + + if (dev->flags & IFF_PROMISC) /* promiscuous */ + reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); + else if (dev->flags & IFF_ALLMULTI) /* all multicast */ + memset(filter, 0xff, sizeof(filter)); + else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */ + reg &= ~GM_RXCR_MCF_ENA; + else { + reg |= GM_RXCR_MCF_ENA; + + if (rx_pause) + yukon_add_filter(filter, pause_mc_addr); + + netdev_for_each_mc_addr(ha, dev) + yukon_add_filter(filter, ha->addr); + } + + + gma_write16(hw, port, GM_MC_ADDR_H1, + (u16)filter[0] | ((u16)filter[1] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H2, + (u16)filter[2] | ((u16)filter[3] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H3, + (u16)filter[4] | ((u16)filter[5] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H4, + (u16)filter[6] | ((u16)filter[7] << 8)); + + gma_write16(hw, port, GM_RX_CTRL, reg); +} + +static inline u16 phy_length(const struct skge_hw *hw, u32 status) +{ + if (is_genesis(hw)) + return status >> XMR_FS_LEN_SHIFT; + else + return status >> GMR_FS_LEN_SHIFT; +} + +static inline int bad_phy_status(const struct skge_hw *hw, u32 status) +{ + if (is_genesis(hw)) + return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0; + else + return (status & GMR_FS_ANY_ERR) || + (status & GMR_FS_RX_OK) == 0; +} + +static void skge_set_multicast(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + + if (is_genesis(skge->hw)) + genesis_set_multicast(dev); + else + yukon_set_multicast(dev); + +} + + +/* Get receive buffer from descriptor. + * Handles copy of small buffers and reallocation failures + */ +static struct sk_buff *skge_rx_get(struct net_device *dev, + struct skge_element *e, + u32 control, u32 status, u16 csum) +{ + struct skge_port *skge = netdev_priv(dev); + struct sk_buff *skb; + u16 len = control & BMU_BBC; + + netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev, + "rx slot %td status 0x%x len %d\n", + e - skge->rx_ring.start, status, len); + + if (len > skge->rx_buf_size) + goto error; + + if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)) + goto error; + + if (bad_phy_status(skge->hw, status)) + goto error; + + if (phy_length(skge->hw, status) != len) + goto error; + + if (len < RX_COPY_THRESHOLD) { + skb = netdev_alloc_skb_ip_align(dev, len); + if (!skb) + goto resubmit; + + pci_dma_sync_single_for_cpu(skge->hw->pdev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_FROMDEVICE); + skb_copy_from_linear_data(e->skb, skb->data, len); + pci_dma_sync_single_for_device(skge->hw->pdev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_FROMDEVICE); + skge_rx_reuse(e, skge->rx_buf_size); + } else { + struct skge_element ee; + struct sk_buff *nskb; + + nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); + if (!nskb) + goto resubmit; + + ee = *e; + + skb = ee.skb; + prefetch(skb->data); + + if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { + dev_kfree_skb(nskb); + goto resubmit; + } + + pci_unmap_single(skge->hw->pdev, + dma_unmap_addr(&ee, mapaddr), + dma_unmap_len(&ee, maplen), + PCI_DMA_FROMDEVICE); + } + + skb_put(skb, len); + + if (dev->features & NETIF_F_RXCSUM) { + skb->csum = csum; + skb->ip_summed = CHECKSUM_COMPLETE; + } + + skb->protocol = eth_type_trans(skb, dev); + + return skb; +error: + + netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev, + "rx err, slot %td control 0x%x status 0x%x\n", + e - skge->rx_ring.start, control, status); + + if (is_genesis(skge->hw)) { + if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) + dev->stats.rx_length_errors++; + if (status & XMR_FS_FRA_ERR) + dev->stats.rx_frame_errors++; + if (status & XMR_FS_FCS_ERR) + dev->stats.rx_crc_errors++; + } else { + if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) + dev->stats.rx_length_errors++; + if (status & GMR_FS_FRAGMENT) + dev->stats.rx_frame_errors++; + if (status & GMR_FS_CRC_ERR) + dev->stats.rx_crc_errors++; + } + +resubmit: + skge_rx_reuse(e, skge->rx_buf_size); + return NULL; +} + +/* Free all buffers in Tx ring which are no longer owned by device */ +static void skge_tx_done(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_ring *ring = &skge->tx_ring; + struct skge_element *e; + unsigned int bytes_compl = 0, pkts_compl = 0; + + skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); + + for (e = ring->to_clean; e != ring->to_use; e = e->next) { + u32 control = ((const struct skge_tx_desc *) e->desc)->control; + + if (control & BMU_OWN) + break; + + skge_tx_unmap(skge->hw->pdev, e, control); + + if (control & BMU_EOF) { + netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev, + "tx done slot %td\n", + e - skge->tx_ring.start); + + pkts_compl++; + bytes_compl += e->skb->len; + + dev_consume_skb_any(e->skb); + } + } + netdev_completed_queue(dev, pkts_compl, bytes_compl); + skge->tx_ring.to_clean = e; + + /* Can run lockless until we need to synchronize to restart queue. */ + smp_mb(); + + if (unlikely(netif_queue_stopped(dev) && + skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { + netif_tx_lock(dev); + if (unlikely(netif_queue_stopped(dev) && + skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { + netif_wake_queue(dev); + + } + netif_tx_unlock(dev); + } +} + +static int skge_poll(struct napi_struct *napi, int to_do) +{ + struct skge_port *skge = container_of(napi, struct skge_port, napi); + struct net_device *dev = skge->netdev; + struct skge_hw *hw = skge->hw; + struct skge_ring *ring = &skge->rx_ring; + struct skge_element *e; + int work_done = 0; + + skge_tx_done(dev); + + skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); + + for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { + struct skge_rx_desc *rd = e->desc; + struct sk_buff *skb; + u32 control; + + rmb(); + control = rd->control; + if (control & BMU_OWN) + break; + + skb = skge_rx_get(dev, e, control, rd->status, rd->csum2); + if (likely(skb)) { + napi_gro_receive(napi, skb); + ++work_done; + } + } + ring->to_clean = e; + + /* restart receiver */ + wmb(); + skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); + + if (work_done < to_do) { + unsigned long flags; + + napi_gro_flush(napi, false); + spin_lock_irqsave(&hw->hw_lock, flags); + __napi_complete(napi); + hw->intr_mask |= napimask[skge->port]; + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock_irqrestore(&hw->hw_lock, flags); + } + + return work_done; +} + +/* Parity errors seem to happen when Genesis is connected to a switch + * with no other ports present. Heartbeat error?? + */ +static void skge_mac_parity(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + + ++dev->stats.tx_heartbeat_errors; + + if (is_genesis(hw)) + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), + MFF_CLR_PERR); + else + /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ + skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), + (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) + ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); +} + +static void skge_mac_intr(struct skge_hw *hw, int port) +{ + if (is_genesis(hw)) + genesis_mac_intr(hw, port); + else + yukon_mac_intr(hw, port); +} + +/* Handle device specific framing and timeout interrupts */ +static void skge_error_irq(struct skge_hw *hw) +{ + struct pci_dev *pdev = hw->pdev; + u32 hwstatus = skge_read32(hw, B0_HWE_ISRC); + + if (is_genesis(hw)) { + /* clear xmac errors */ + if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1)) + skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT); + if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2)) + skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT); + } else { + /* Timestamp (unused) overflow */ + if (hwstatus & IS_IRQ_TIST_OV) + skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); + } + + if (hwstatus & IS_RAM_RD_PAR) { + dev_err(&pdev->dev, "Ram read data parity error\n"); + skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR); + } + + if (hwstatus & IS_RAM_WR_PAR) { + dev_err(&pdev->dev, "Ram write data parity error\n"); + skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR); + } + + if (hwstatus & IS_M1_PAR_ERR) + skge_mac_parity(hw, 0); + + if (hwstatus & IS_M2_PAR_ERR) + skge_mac_parity(hw, 1); + + if (hwstatus & IS_R1_PAR_ERR) { + dev_err(&pdev->dev, "%s: receive queue parity error\n", + hw->dev[0]->name); + skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); + } + + if (hwstatus & IS_R2_PAR_ERR) { + dev_err(&pdev->dev, "%s: receive queue parity error\n", + hw->dev[1]->name); + skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); + } + + if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { + u16 pci_status, pci_cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + pci_read_config_word(pdev, PCI_STATUS, &pci_status); + + dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n", + pci_cmd, pci_status); + + /* Write the error bits back to clear them. */ + pci_status &= PCI_STATUS_ERROR_BITS; + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + pci_write_config_word(pdev, PCI_COMMAND, + pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY); + pci_write_config_word(pdev, PCI_STATUS, pci_status); + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + /* if error still set then just ignore it */ + hwstatus = skge_read32(hw, B0_HWE_ISRC); + if (hwstatus & IS_IRQ_STAT) { + dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n"); + hw->intr_mask &= ~IS_HW_ERR; + } + } +} + +/* + * Interrupt from PHY are handled in tasklet (softirq) + * because accessing phy registers requires spin wait which might + * cause excess interrupt latency. + */ +static void skge_extirq(unsigned long arg) +{ + struct skge_hw *hw = (struct skge_hw *) arg; + int port; + + for (port = 0; port < hw->ports; port++) { + struct net_device *dev = hw->dev[port]; + + if (netif_running(dev)) { + struct skge_port *skge = netdev_priv(dev); + + spin_lock(&hw->phy_lock); + if (!is_genesis(hw)) + yukon_phy_intr(skge); + else if (hw->phy_type == SK_PHY_BCOM) + bcom_phy_intr(skge); + spin_unlock(&hw->phy_lock); + } + } + + spin_lock_irq(&hw->hw_lock); + hw->intr_mask |= IS_EXT_REG; + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock_irq(&hw->hw_lock); +} + +static irqreturn_t skge_intr(int irq, void *dev_id) +{ + struct skge_hw *hw = dev_id; + u32 status; + int handled = 0; + + spin_lock(&hw->hw_lock); + /* Reading this register masks IRQ */ + status = skge_read32(hw, B0_SP_ISRC); + if (status == 0 || status == ~0) + goto out; + + handled = 1; + status &= hw->intr_mask; + if (status & IS_EXT_REG) { + hw->intr_mask &= ~IS_EXT_REG; + tasklet_schedule(&hw->phy_task); + } + + if (status & (IS_XA1_F|IS_R1_F)) { + struct skge_port *skge = netdev_priv(hw->dev[0]); + hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); + napi_schedule(&skge->napi); + } + + if (status & IS_PA_TO_TX1) + skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1); + + if (status & IS_PA_TO_RX1) { + ++hw->dev[0]->stats.rx_over_errors; + skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1); + } + + + if (status & IS_MAC1) + skge_mac_intr(hw, 0); + + if (hw->dev[1]) { + struct skge_port *skge = netdev_priv(hw->dev[1]); + + if (status & (IS_XA2_F|IS_R2_F)) { + hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); + napi_schedule(&skge->napi); + } + + if (status & IS_PA_TO_RX2) { + ++hw->dev[1]->stats.rx_over_errors; + skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); + } + + if (status & IS_PA_TO_TX2) + skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2); + + if (status & IS_MAC2) + skge_mac_intr(hw, 1); + } + + if (status & IS_HW_ERR) + skge_error_irq(hw); +out: + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock(&hw->hw_lock); + + return IRQ_RETVAL(handled); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void skge_netpoll(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + + disable_irq(dev->irq); + skge_intr(dev->irq, skge->hw); + enable_irq(dev->irq); +} +#endif + +static int skge_set_mac_address(struct net_device *dev, void *p) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + unsigned port = skge->port; + const struct sockaddr *addr = p; + u16 ctrl; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + + if (!netif_running(dev)) { + memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); + memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); + } else { + /* disable Rx */ + spin_lock_bh(&hw->phy_lock); + ctrl = gma_read16(hw, port, GM_GP_CTRL); + gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA); + + memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); + memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); + + if (is_genesis(hw)) + xm_outaddr(hw, port, XM_SA, dev->dev_addr); + else { + gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); + gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); + } + + gma_write16(hw, port, GM_GP_CTRL, ctrl); + spin_unlock_bh(&hw->phy_lock); + } + + return 0; +} + +static const struct { + u8 id; + const char *name; +} skge_chips[] = { + { CHIP_ID_GENESIS, "Genesis" }, + { CHIP_ID_YUKON, "Yukon" }, + { CHIP_ID_YUKON_LITE, "Yukon-Lite"}, + { CHIP_ID_YUKON_LP, "Yukon-LP"}, +}; + +static const char *skge_board_name(const struct skge_hw *hw) +{ + int i; + static char buf[16]; + + for (i = 0; i < ARRAY_SIZE(skge_chips); i++) + if (skge_chips[i].id == hw->chip_id) + return skge_chips[i].name; + + snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id); + return buf; +} + + +/* + * Setup the board data structure, but don't bring up + * the port(s) + */ +static int skge_reset(struct skge_hw *hw) +{ + u32 reg; + u16 ctst, pci_status; + u8 t8, mac_cfg, pmd_type; + int i; + + ctst = skge_read16(hw, B0_CTST); + + /* do a SW reset */ + skge_write8(hw, B0_CTST, CS_RST_SET); + skge_write8(hw, B0_CTST, CS_RST_CLR); + + /* clear PCI errors, if any */ + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + skge_write8(hw, B2_TST_CTRL2, 0); + + pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); + pci_write_config_word(hw->pdev, PCI_STATUS, + pci_status | PCI_STATUS_ERROR_BITS); + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + skge_write8(hw, B0_CTST, CS_MRST_CLR); + + /* restore CLK_RUN bits (for Yukon-Lite) */ + skge_write16(hw, B0_CTST, + ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA)); + + hw->chip_id = skge_read8(hw, B2_CHIP_ID); + hw->phy_type = skge_read8(hw, B2_E_1) & 0xf; + pmd_type = skge_read8(hw, B2_PMD_TYP); + hw->copper = (pmd_type == 'T' || pmd_type == '1'); + + switch (hw->chip_id) { + case CHIP_ID_GENESIS: +#ifdef CONFIG_SKGE_GENESIS + switch (hw->phy_type) { + case SK_PHY_XMAC: + hw->phy_addr = PHY_ADDR_XMAC; + break; + case SK_PHY_BCOM: + hw->phy_addr = PHY_ADDR_BCOM; + break; + default: + dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n", + hw->phy_type); + return -EOPNOTSUPP; + } + break; +#else + dev_err(&hw->pdev->dev, "Genesis chip detected but not configured\n"); + return -EOPNOTSUPP; +#endif + + case CHIP_ID_YUKON: + case CHIP_ID_YUKON_LITE: + case CHIP_ID_YUKON_LP: + if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S') + hw->copper = 1; + + hw->phy_addr = PHY_ADDR_MARV; + break; + + default: + dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", + hw->chip_id); + return -EOPNOTSUPP; + } + + mac_cfg = skge_read8(hw, B2_MAC_CFG); + hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2; + hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4; + + /* read the adapters RAM size */ + t8 = skge_read8(hw, B2_E_0); + if (is_genesis(hw)) { + if (t8 == 3) { + /* special case: 4 x 64k x 36, offset = 0x80000 */ + hw->ram_size = 0x100000; + hw->ram_offset = 0x80000; + } else + hw->ram_size = t8 * 512; + } else if (t8 == 0) + hw->ram_size = 0x20000; + else + hw->ram_size = t8 * 4096; + + hw->intr_mask = IS_HW_ERR; + + /* Use PHY IRQ for all but fiber based Genesis board */ + if (!(is_genesis(hw) && hw->phy_type == SK_PHY_XMAC)) + hw->intr_mask |= IS_EXT_REG; + + if (is_genesis(hw)) + genesis_init(hw); + else { + /* switch power to VCC (WA for VAUX problem) */ + skge_write8(hw, B0_POWER_CTRL, + PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); + + /* avoid boards with stuck Hardware error bits */ + if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) && + (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) { + dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n"); + hw->intr_mask &= ~IS_HW_ERR; + } + + /* Clear PHY COMA */ + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®); + reg &= ~PCI_PHY_COMA; + pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg); + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + + for (i = 0; i < hw->ports; i++) { + skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); + skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); + } + } + + /* turn off hardware timer (unused) */ + skge_write8(hw, B2_TI_CTRL, TIM_STOP); + skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); + skge_write8(hw, B0_LED, LED_STAT_ON); + + /* enable the Tx Arbiters */ + for (i = 0; i < hw->ports; i++) + skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); + + /* Initialize ram interface */ + skge_write16(hw, B3_RI_CTRL, RI_RST_CLR); + + skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53); + + skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK); + + /* Set interrupt moderation for Transmit only + * Receive interrupts avoided by NAPI + */ + skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F); + skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100)); + skge_write32(hw, B2_IRQM_CTRL, TIM_START); + + /* Leave irq disabled until first port is brought up. */ + skge_write32(hw, B0_IMSK, 0); + + for (i = 0; i < hw->ports; i++) { + if (is_genesis(hw)) + genesis_reset(hw, i); + else + yukon_reset(hw, i); + } + + return 0; +} + + +#ifdef CONFIG_SKGE_DEBUG + +static struct dentry *skge_debug; + +static int skge_debug_show(struct seq_file *seq, void *v) +{ + struct net_device *dev = seq->private; + const struct skge_port *skge = netdev_priv(dev); + const struct skge_hw *hw = skge->hw; + const struct skge_element *e; + + if (!netif_running(dev)) + return -ENETDOWN; + + seq_printf(seq, "IRQ src=%x mask=%x\n", skge_read32(hw, B0_ISRC), + skge_read32(hw, B0_IMSK)); + + seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring)); + for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { + const struct skge_tx_desc *t = e->desc; + seq_printf(seq, "%#x dma=%#x%08x %#x csum=%#x/%x/%x\n", + t->control, t->dma_hi, t->dma_lo, t->status, + t->csum_offs, t->csum_write, t->csum_start); + } + + seq_printf(seq, "\nRx Ring:\n"); + for (e = skge->rx_ring.to_clean; ; e = e->next) { + const struct skge_rx_desc *r = e->desc; + + if (r->control & BMU_OWN) + break; + + seq_printf(seq, "%#x dma=%#x%08x %#x %#x csum=%#x/%x\n", + r->control, r->dma_hi, r->dma_lo, r->status, + r->timestamp, r->csum1, r->csum1_start); + } + + return 0; +} + +static int skge_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, skge_debug_show, inode->i_private); +} + +static const struct file_operations skge_debug_fops = { + .owner = THIS_MODULE, + .open = skge_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * Use network device events to create/remove/rename + * debugfs file entries + */ +static int skge_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct skge_port *skge; + struct dentry *d; + + if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug) + goto done; + + skge = netdev_priv(dev); + switch (event) { + case NETDEV_CHANGENAME: + if (skge->debugfs) { + d = debugfs_rename(skge_debug, skge->debugfs, + skge_debug, dev->name); + if (d) + skge->debugfs = d; + else { + netdev_info(dev, "rename failed\n"); + debugfs_remove(skge->debugfs); + } + } + break; + + case NETDEV_GOING_DOWN: + if (skge->debugfs) { + debugfs_remove(skge->debugfs); + skge->debugfs = NULL; + } + break; + + case NETDEV_UP: + d = debugfs_create_file(dev->name, S_IRUGO, + skge_debug, dev, + &skge_debug_fops); + if (!d || IS_ERR(d)) + netdev_info(dev, "debugfs create failed\n"); + else + skge->debugfs = d; + break; + } + +done: + return NOTIFY_DONE; +} + +static struct notifier_block skge_notifier = { + .notifier_call = skge_device_event, +}; + + +static __init void skge_debug_init(void) +{ + struct dentry *ent; + + ent = debugfs_create_dir("skge", NULL); + if (!ent || IS_ERR(ent)) { + pr_info("debugfs create directory failed\n"); + return; + } + + skge_debug = ent; + register_netdevice_notifier(&skge_notifier); +} + +static __exit void skge_debug_cleanup(void) +{ + if (skge_debug) { + unregister_netdevice_notifier(&skge_notifier); + debugfs_remove(skge_debug); + skge_debug = NULL; + } +} + +#else +#define skge_debug_init() +#define skge_debug_cleanup() +#endif + +static const struct net_device_ops skge_netdev_ops = { + .ndo_open = skge_up, + .ndo_stop = skge_down, + .ndo_start_xmit = skge_xmit_frame, + .ndo_do_ioctl = skge_ioctl, + .ndo_get_stats = skge_get_stats, + .ndo_tx_timeout = skge_tx_timeout, + .ndo_change_mtu = skge_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = skge_set_multicast, + .ndo_set_mac_address = skge_set_mac_address, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = skge_netpoll, +#endif +}; + + +/* Initialize network device */ +static struct net_device *skge_devinit(struct skge_hw *hw, int port, + int highmem) +{ + struct skge_port *skge; + struct net_device *dev = alloc_etherdev(sizeof(*skge)); + + if (!dev) + return NULL; + + SET_NETDEV_DEV(dev, &hw->pdev->dev); + dev->netdev_ops = &skge_netdev_ops; + dev->ethtool_ops = &skge_ethtool_ops; + dev->watchdog_timeo = TX_WATCHDOG; + dev->irq = hw->pdev->irq; + + if (highmem) + dev->features |= NETIF_F_HIGHDMA; + + skge = netdev_priv(dev); + netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT); + skge->netdev = dev; + skge->hw = hw; + skge->msg_enable = netif_msg_init(debug, default_msg); + + skge->tx_ring.count = DEFAULT_TX_RING_SIZE; + skge->rx_ring.count = DEFAULT_RX_RING_SIZE; + + /* Auto speed and flow control */ + skge->autoneg = AUTONEG_ENABLE; + skge->flow_control = FLOW_MODE_SYM_OR_REM; + skge->duplex = -1; + skge->speed = -1; + skge->advertising = skge_supported_modes(hw); + + if (device_can_wakeup(&hw->pdev->dev)) { + skge->wol = wol_supported(hw) & WAKE_MAGIC; + device_set_wakeup_enable(&hw->pdev->dev, skge->wol); + } + + hw->dev[port] = dev; + + skge->port = port; + + /* Only used for Genesis XMAC */ + if (is_genesis(hw)) + setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge); + else { + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_RXCSUM; + dev->features |= dev->hw_features; + } + + /* read the mac address */ + memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); + + return dev; +} + +static void skge_show_addr(struct net_device *dev) +{ + const struct skge_port *skge = netdev_priv(dev); + + netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr); +} + +static int only_32bit_dma; + +static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *dev, *dev1; + struct skge_hw *hw; + int err, using_dac = 0; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + goto err_out; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + goto err_out_disable_pdev; + } + + pci_set_master(pdev); + + if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + using_dac = 1; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + using_dac = 0; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + if (err) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto err_out_free_regions; + } + +#ifdef __BIG_ENDIAN + /* byte swap descriptors in hardware */ + { + u32 reg; + + pci_read_config_dword(pdev, PCI_DEV_REG2, ®); + reg |= PCI_REV_DESC; + pci_write_config_dword(pdev, PCI_DEV_REG2, reg); + } +#endif + + err = -ENOMEM; + /* space for skge@pci:0000:04:00.0 */ + hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:") + + strlen(pci_name(pdev)) + 1, GFP_KERNEL); + if (!hw) + goto err_out_free_regions; + + sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); + + hw->pdev = pdev; + spin_lock_init(&hw->hw_lock); + spin_lock_init(&hw->phy_lock); + tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw); + + hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); + if (!hw->regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + goto err_out_free_hw; + } + + err = skge_reset(hw); + if (err) + goto err_out_iounmap; + + pr_info("%s addr 0x%llx irq %d chip %s rev %d\n", + DRV_VERSION, + (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, + skge_board_name(hw), hw->chip_rev); + + dev = skge_devinit(hw, 0, using_dac); + if (!dev) { + err = -ENOMEM; + goto err_out_led_off; + } + + /* Some motherboards are broken and has zero in ROM. */ + if (!is_valid_ether_addr(dev->dev_addr)) + dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n"); + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "cannot register net device\n"); + goto err_out_free_netdev; + } + + skge_show_addr(dev); + + if (hw->ports > 1) { + dev1 = skge_devinit(hw, 1, using_dac); + if (!dev1) { + err = -ENOMEM; + goto err_out_unregister; + } + + err = register_netdev(dev1); + if (err) { + dev_err(&pdev->dev, "cannot register second net device\n"); + goto err_out_free_dev1; + } + + err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, + hw->irq_name, hw); + if (err) { + dev_err(&pdev->dev, "cannot assign irq %d\n", + pdev->irq); + goto err_out_unregister_dev1; + } + + skge_show_addr(dev1); + } + pci_set_drvdata(pdev, hw); + + return 0; + +err_out_unregister_dev1: + unregister_netdev(dev1); +err_out_free_dev1: + free_netdev(dev1); +err_out_unregister: + unregister_netdev(dev); +err_out_free_netdev: + free_netdev(dev); +err_out_led_off: + skge_write16(hw, B0_LED, LED_STAT_OFF); +err_out_iounmap: + iounmap(hw->regs); +err_out_free_hw: + kfree(hw); +err_out_free_regions: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); +err_out: + return err; +} + +static void skge_remove(struct pci_dev *pdev) +{ + struct skge_hw *hw = pci_get_drvdata(pdev); + struct net_device *dev0, *dev1; + + if (!hw) + return; + + dev1 = hw->dev[1]; + if (dev1) + unregister_netdev(dev1); + dev0 = hw->dev[0]; + unregister_netdev(dev0); + + tasklet_kill(&hw->phy_task); + + spin_lock_irq(&hw->hw_lock); + hw->intr_mask = 0; + + if (hw->ports > 1) { + skge_write32(hw, B0_IMSK, 0); + skge_read32(hw, B0_IMSK); + free_irq(pdev->irq, hw); + } + spin_unlock_irq(&hw->hw_lock); + + skge_write16(hw, B0_LED, LED_STAT_OFF); + skge_write8(hw, B0_CTST, CS_RST_SET); + + if (hw->ports > 1) + free_irq(pdev->irq, hw); + pci_release_regions(pdev); + pci_disable_device(pdev); + if (dev1) + free_netdev(dev1); + free_netdev(dev0); + + iounmap(hw->regs); + kfree(hw); +} + +#ifdef CONFIG_PM_SLEEP +static int skge_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct skge_hw *hw = pci_get_drvdata(pdev); + int i; + + if (!hw) + return 0; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct skge_port *skge = netdev_priv(dev); + + if (netif_running(dev)) + skge_down(dev); + + if (skge->wol) + skge_wol_init(skge); + } + + skge_write32(hw, B0_IMSK, 0); + + return 0; +} + +static int skge_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct skge_hw *hw = pci_get_drvdata(pdev); + int i, err; + + if (!hw) + return 0; + + err = skge_reset(hw); + if (err) + goto out; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + + if (netif_running(dev)) { + err = skge_up(dev); + + if (err) { + netdev_err(dev, "could not up: %d\n", err); + dev_close(dev); + goto out; + } + } + } +out: + return err; +} + +static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume); +#define SKGE_PM_OPS (&skge_pm_ops) + +#else + +#define SKGE_PM_OPS NULL +#endif /* CONFIG_PM_SLEEP */ + +static void skge_shutdown(struct pci_dev *pdev) +{ + struct skge_hw *hw = pci_get_drvdata(pdev); + int i; + + if (!hw) + return; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct skge_port *skge = netdev_priv(dev); + + if (skge->wol) + skge_wol_init(skge); + } + + pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); + pci_set_power_state(pdev, PCI_D3hot); +} + +static struct pci_driver skge_driver = { + .name = DRV_NAME, + .id_table = skge_id_table, + .probe = skge_probe, + .remove = skge_remove, + .shutdown = skge_shutdown, + .driver.pm = SKGE_PM_OPS, +}; + +static struct dmi_system_id skge_32bit_dma_boards[] = { + { + .ident = "Gigabyte nForce boards", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"), + DMI_MATCH(DMI_BOARD_NAME, "nForce"), + }, + }, + { + .ident = "ASUS P5NSLI", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P5NSLI") + }, + }, + { + .ident = "FUJITSU SIEMENS A8NE-FM", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "A8NE-FM") + }, + }, + {} +}; + +static int __init skge_init_module(void) +{ + if (dmi_check_system(skge_32bit_dma_boards)) + only_32bit_dma = 1; + skge_debug_init(); + return pci_register_driver(&skge_driver); +} + +static void __exit skge_cleanup_module(void) +{ + pci_unregister_driver(&skge_driver); + skge_debug_cleanup(); +} + +module_init(skge_init_module); +module_exit(skge_cleanup_module); diff --git a/drivers/net/ethernet/marvell/skge.h b/drivers/net/ethernet/marvell/skge.h new file mode 100644 index 000000000..a2eb34115 --- /dev/null +++ b/drivers/net/ethernet/marvell/skge.h @@ -0,0 +1,2584 @@ +/* + * Definitions for the new Marvell Yukon / SysKonnect driver. + */ +#ifndef _SKGE_H +#define _SKGE_H +#include + +/* PCI config registers */ +#define PCI_DEV_REG1 0x40 +#define PCI_PHY_COMA 0x8000000 +#define PCI_VIO 0x2000000 + +#define PCI_DEV_REG2 0x44 +#define PCI_VPD_ROM_SZ 7L<<14 /* VPD ROM size 0=256, 1=512, ... */ +#define PCI_REV_DESC 1<<2 /* Reverse Descriptor bytes */ + +#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ + PCI_STATUS_SIG_SYSTEM_ERROR | \ + PCI_STATUS_REC_MASTER_ABORT | \ + PCI_STATUS_REC_TARGET_ABORT | \ + PCI_STATUS_PARITY) + +enum csr_regs { + B0_RAP = 0x0000, + B0_CTST = 0x0004, + B0_LED = 0x0006, + B0_POWER_CTRL = 0x0007, + B0_ISRC = 0x0008, + B0_IMSK = 0x000c, + B0_HWE_ISRC = 0x0010, + B0_HWE_IMSK = 0x0014, + B0_SP_ISRC = 0x0018, + B0_XM1_IMSK = 0x0020, + B0_XM1_ISRC = 0x0028, + B0_XM1_PHY_ADDR = 0x0030, + B0_XM1_PHY_DATA = 0x0034, + B0_XM2_IMSK = 0x0040, + B0_XM2_ISRC = 0x0048, + B0_XM2_PHY_ADDR = 0x0050, + B0_XM2_PHY_DATA = 0x0054, + B0_R1_CSR = 0x0060, + B0_R2_CSR = 0x0064, + B0_XS1_CSR = 0x0068, + B0_XA1_CSR = 0x006c, + B0_XS2_CSR = 0x0070, + B0_XA2_CSR = 0x0074, + + B2_MAC_1 = 0x0100, + B2_MAC_2 = 0x0108, + B2_MAC_3 = 0x0110, + B2_CONN_TYP = 0x0118, + B2_PMD_TYP = 0x0119, + B2_MAC_CFG = 0x011a, + B2_CHIP_ID = 0x011b, + B2_E_0 = 0x011c, + B2_E_1 = 0x011d, + B2_E_2 = 0x011e, + B2_E_3 = 0x011f, + B2_FAR = 0x0120, + B2_FDP = 0x0124, + B2_LD_CTRL = 0x0128, + B2_LD_TEST = 0x0129, + B2_TI_INI = 0x0130, + B2_TI_VAL = 0x0134, + B2_TI_CTRL = 0x0138, + B2_TI_TEST = 0x0139, + B2_IRQM_INI = 0x0140, + B2_IRQM_VAL = 0x0144, + B2_IRQM_CTRL = 0x0148, + B2_IRQM_TEST = 0x0149, + B2_IRQM_MSK = 0x014c, + B2_IRQM_HWE_MSK = 0x0150, + B2_TST_CTRL1 = 0x0158, + B2_TST_CTRL2 = 0x0159, + B2_GP_IO = 0x015c, + B2_I2C_CTRL = 0x0160, + B2_I2C_DATA = 0x0164, + B2_I2C_IRQ = 0x0168, + B2_I2C_SW = 0x016c, + B2_BSC_INI = 0x0170, + B2_BSC_VAL = 0x0174, + B2_BSC_CTRL = 0x0178, + B2_BSC_STAT = 0x0179, + B2_BSC_TST = 0x017a, + + B3_RAM_ADDR = 0x0180, + B3_RAM_DATA_LO = 0x0184, + B3_RAM_DATA_HI = 0x0188, + B3_RI_WTO_R1 = 0x0190, + B3_RI_WTO_XA1 = 0x0191, + B3_RI_WTO_XS1 = 0x0192, + B3_RI_RTO_R1 = 0x0193, + B3_RI_RTO_XA1 = 0x0194, + B3_RI_RTO_XS1 = 0x0195, + B3_RI_WTO_R2 = 0x0196, + B3_RI_WTO_XA2 = 0x0197, + B3_RI_WTO_XS2 = 0x0198, + B3_RI_RTO_R2 = 0x0199, + B3_RI_RTO_XA2 = 0x019a, + B3_RI_RTO_XS2 = 0x019b, + B3_RI_TO_VAL = 0x019c, + B3_RI_CTRL = 0x01a0, + B3_RI_TEST = 0x01a2, + B3_MA_TOINI_RX1 = 0x01b0, + B3_MA_TOINI_RX2 = 0x01b1, + B3_MA_TOINI_TX1 = 0x01b2, + B3_MA_TOINI_TX2 = 0x01b3, + B3_MA_TOVAL_RX1 = 0x01b4, + B3_MA_TOVAL_RX2 = 0x01b5, + B3_MA_TOVAL_TX1 = 0x01b6, + B3_MA_TOVAL_TX2 = 0x01b7, + B3_MA_TO_CTRL = 0x01b8, + B3_MA_TO_TEST = 0x01ba, + B3_MA_RCINI_RX1 = 0x01c0, + B3_MA_RCINI_RX2 = 0x01c1, + B3_MA_RCINI_TX1 = 0x01c2, + B3_MA_RCINI_TX2 = 0x01c3, + B3_MA_RCVAL_RX1 = 0x01c4, + B3_MA_RCVAL_RX2 = 0x01c5, + B3_MA_RCVAL_TX1 = 0x01c6, + B3_MA_RCVAL_TX2 = 0x01c7, + B3_MA_RC_CTRL = 0x01c8, + B3_MA_RC_TEST = 0x01ca, + B3_PA_TOINI_RX1 = 0x01d0, + B3_PA_TOINI_RX2 = 0x01d4, + B3_PA_TOINI_TX1 = 0x01d8, + B3_PA_TOINI_TX2 = 0x01dc, + B3_PA_TOVAL_RX1 = 0x01e0, + B3_PA_TOVAL_RX2 = 0x01e4, + B3_PA_TOVAL_TX1 = 0x01e8, + B3_PA_TOVAL_TX2 = 0x01ec, + B3_PA_CTRL = 0x01f0, + B3_PA_TEST = 0x01f2, +}; + +/* B0_CTST 16 bit Control/Status register */ +enum { + CS_CLK_RUN_HOT = 1<<13,/* CLK_RUN hot m. (YUKON-Lite only) */ + CS_CLK_RUN_RST = 1<<12,/* CLK_RUN reset (YUKON-Lite only) */ + CS_CLK_RUN_ENA = 1<<11,/* CLK_RUN enable (YUKON-Lite only) */ + CS_VAUX_AVAIL = 1<<10,/* VAUX available (YUKON only) */ + CS_BUS_CLOCK = 1<<9, /* Bus Clock 0/1 = 33/66 MHz */ + CS_BUS_SLOT_SZ = 1<<8, /* Slot Size 0/1 = 32/64 bit slot */ + CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */ + CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */ + CS_STOP_DONE = 1<<5, /* Stop Master is finished */ + CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */ + CS_MRST_CLR = 1<<3, /* Clear Master reset */ + CS_MRST_SET = 1<<2, /* Set Master reset */ + CS_RST_CLR = 1<<1, /* Clear Software reset */ + CS_RST_SET = 1, /* Set Software reset */ + +/* B0_LED 8 Bit LED register */ +/* Bit 7.. 2: reserved */ + LED_STAT_ON = 1<<1, /* Status LED on */ + LED_STAT_OFF = 1, /* Status LED off */ + +/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */ + PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */ + PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */ + PC_VCC_ENA = 1<<5, /* Switch VCC Enable */ + PC_VCC_DIS = 1<<4, /* Switch VCC Disable */ + PC_VAUX_ON = 1<<3, /* Switch VAUX On */ + PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */ + PC_VCC_ON = 1<<1, /* Switch VCC On */ + PC_VCC_OFF = 1<<0, /* Switch VCC Off */ +}; + +/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */ +enum { + IS_ALL_MSK = 0xbffffffful, /* All Interrupt bits */ + IS_HW_ERR = 1<<31, /* Interrupt HW Error */ + /* Bit 30: reserved */ + IS_PA_TO_RX1 = 1<<29, /* Packet Arb Timeout Rx1 */ + IS_PA_TO_RX2 = 1<<28, /* Packet Arb Timeout Rx2 */ + IS_PA_TO_TX1 = 1<<27, /* Packet Arb Timeout Tx1 */ + IS_PA_TO_TX2 = 1<<26, /* Packet Arb Timeout Tx2 */ + IS_I2C_READY = 1<<25, /* IRQ on end of I2C Tx */ + IS_IRQ_SW = 1<<24, /* SW forced IRQ */ + IS_EXT_REG = 1<<23, /* IRQ from LM80 or PHY (GENESIS only) */ + /* IRQ from PHY (YUKON only) */ + IS_TIMINT = 1<<22, /* IRQ from Timer */ + IS_MAC1 = 1<<21, /* IRQ from MAC 1 */ + IS_LNK_SYNC_M1 = 1<<20, /* Link Sync Cnt wrap MAC 1 */ + IS_MAC2 = 1<<19, /* IRQ from MAC 2 */ + IS_LNK_SYNC_M2 = 1<<18, /* Link Sync Cnt wrap MAC 2 */ +/* Receive Queue 1 */ + IS_R1_B = 1<<17, /* Q_R1 End of Buffer */ + IS_R1_F = 1<<16, /* Q_R1 End of Frame */ + IS_R1_C = 1<<15, /* Q_R1 Encoding Error */ +/* Receive Queue 2 */ + IS_R2_B = 1<<14, /* Q_R2 End of Buffer */ + IS_R2_F = 1<<13, /* Q_R2 End of Frame */ + IS_R2_C = 1<<12, /* Q_R2 Encoding Error */ +/* Synchronous Transmit Queue 1 */ + IS_XS1_B = 1<<11, /* Q_XS1 End of Buffer */ + IS_XS1_F = 1<<10, /* Q_XS1 End of Frame */ + IS_XS1_C = 1<<9, /* Q_XS1 Encoding Error */ +/* Asynchronous Transmit Queue 1 */ + IS_XA1_B = 1<<8, /* Q_XA1 End of Buffer */ + IS_XA1_F = 1<<7, /* Q_XA1 End of Frame */ + IS_XA1_C = 1<<6, /* Q_XA1 Encoding Error */ +/* Synchronous Transmit Queue 2 */ + IS_XS2_B = 1<<5, /* Q_XS2 End of Buffer */ + IS_XS2_F = 1<<4, /* Q_XS2 End of Frame */ + IS_XS2_C = 1<<3, /* Q_XS2 Encoding Error */ +/* Asynchronous Transmit Queue 2 */ + IS_XA2_B = 1<<2, /* Q_XA2 End of Buffer */ + IS_XA2_F = 1<<1, /* Q_XA2 End of Frame */ + IS_XA2_C = 1<<0, /* Q_XA2 Encoding Error */ + + IS_TO_PORT1 = IS_PA_TO_RX1 | IS_PA_TO_TX1, + IS_TO_PORT2 = IS_PA_TO_RX2 | IS_PA_TO_TX2, + + IS_PORT_1 = IS_XA1_F| IS_R1_F | IS_TO_PORT1 | IS_MAC1, + IS_PORT_2 = IS_XA2_F| IS_R2_F | IS_TO_PORT2 | IS_MAC2, +}; + + +/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ +enum { + IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */ + IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */ + IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */ + IS_IRQ_STAT = 1<<10, /* IRQ status exception */ + IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */ + IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */ + IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */ + IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */ + IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */ + IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */ + IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */ + IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */ + IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */ + IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */ + + IS_ERR_MSK = IS_IRQ_MST_ERR | IS_IRQ_STAT + | IS_RAM_RD_PAR | IS_RAM_WR_PAR + | IS_M1_PAR_ERR | IS_M2_PAR_ERR + | IS_R1_PAR_ERR | IS_R2_PAR_ERR, +}; + +/* B2_TST_CTRL1 8 bit Test Control Register 1 */ +enum { + TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */ + TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */ + TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */ + TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */ + TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */ + TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */ + TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */ + TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */ +}; + +/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */ +enum { + CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */ + /* Bit 3.. 2: reserved */ + CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */ + CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/ +}; + +/* B2_CHIP_ID 8 bit Chip Identification Number */ +enum { + CHIP_ID_GENESIS = 0x0a, /* Chip ID for GENESIS */ + CHIP_ID_YUKON = 0xb0, /* Chip ID for YUKON */ + CHIP_ID_YUKON_LITE = 0xb1, /* Chip ID for YUKON-Lite (Rev. A1-A3) */ + CHIP_ID_YUKON_LP = 0xb2, /* Chip ID for YUKON-LP */ + CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */ + CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */ + CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */ + + CHIP_REV_YU_LITE_A1 = 3, /* Chip Rev. for YUKON-Lite A1,A2 */ + CHIP_REV_YU_LITE_A3 = 7, /* Chip Rev. for YUKON-Lite A3 */ +}; + +/* B2_TI_CTRL 8 bit Timer control */ +/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */ +enum { + TIM_START = 1<<2, /* Start Timer */ + TIM_STOP = 1<<1, /* Stop Timer */ + TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */ +}; + +/* B2_TI_TEST 8 Bit Timer Test */ +/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */ +/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */ +enum { + TIM_T_ON = 1<<2, /* Test mode on */ + TIM_T_OFF = 1<<1, /* Test mode off */ + TIM_T_STEP = 1<<0, /* Test step */ +}; + +/* B2_GP_IO 32 bit General Purpose I/O Register */ +enum { + GP_DIR_9 = 1<<25, /* IO_9 direct, 0=In/1=Out */ + GP_DIR_8 = 1<<24, /* IO_8 direct, 0=In/1=Out */ + GP_DIR_7 = 1<<23, /* IO_7 direct, 0=In/1=Out */ + GP_DIR_6 = 1<<22, /* IO_6 direct, 0=In/1=Out */ + GP_DIR_5 = 1<<21, /* IO_5 direct, 0=In/1=Out */ + GP_DIR_4 = 1<<20, /* IO_4 direct, 0=In/1=Out */ + GP_DIR_3 = 1<<19, /* IO_3 direct, 0=In/1=Out */ + GP_DIR_2 = 1<<18, /* IO_2 direct, 0=In/1=Out */ + GP_DIR_1 = 1<<17, /* IO_1 direct, 0=In/1=Out */ + GP_DIR_0 = 1<<16, /* IO_0 direct, 0=In/1=Out */ + + GP_IO_9 = 1<<9, /* IO_9 pin */ + GP_IO_8 = 1<<8, /* IO_8 pin */ + GP_IO_7 = 1<<7, /* IO_7 pin */ + GP_IO_6 = 1<<6, /* IO_6 pin */ + GP_IO_5 = 1<<5, /* IO_5 pin */ + GP_IO_4 = 1<<4, /* IO_4 pin */ + GP_IO_3 = 1<<3, /* IO_3 pin */ + GP_IO_2 = 1<<2, /* IO_2 pin */ + GP_IO_1 = 1<<1, /* IO_1 pin */ + GP_IO_0 = 1<<0, /* IO_0 pin */ +}; + +/* Descriptor Bit Definition */ +/* TxCtrl Transmit Buffer Control Field */ +/* RxCtrl Receive Buffer Control Field */ +enum { + BMU_OWN = 1<<31, /* OWN bit: 0=host/1=BMU */ + BMU_STF = 1<<30, /* Start of Frame */ + BMU_EOF = 1<<29, /* End of Frame */ + BMU_IRQ_EOB = 1<<28, /* Req "End of Buffer" IRQ */ + BMU_IRQ_EOF = 1<<27, /* Req "End of Frame" IRQ */ + /* TxCtrl specific bits */ + BMU_STFWD = 1<<26, /* (Tx) Store & Forward Frame */ + BMU_NO_FCS = 1<<25, /* (Tx) Disable MAC FCS (CRC) generation */ + BMU_SW = 1<<24, /* (Tx) 1 bit res. for SW use */ + /* RxCtrl specific bits */ + BMU_DEV_0 = 1<<26, /* (Rx) Transfer data to Dev0 */ + BMU_STAT_VAL = 1<<25, /* (Rx) Rx Status Valid */ + BMU_TIST_VAL = 1<<24, /* (Rx) Rx TimeStamp Valid */ + /* Bit 23..16: BMU Check Opcodes */ + BMU_CHECK = 0x55<<16, /* Default BMU check */ + BMU_TCP_CHECK = 0x56<<16, /* Descr with TCP ext */ + BMU_UDP_CHECK = 0x57<<16, /* Descr with UDP ext (YUKON only) */ + BMU_BBC = 0xffffL, /* Bit 15.. 0: Buffer Byte Counter */ +}; + +/* B2_BSC_CTRL 8 bit Blink Source Counter Control */ +enum { + BSC_START = 1<<1, /* Start Blink Source Counter */ + BSC_STOP = 1<<0, /* Stop Blink Source Counter */ +}; + +/* B2_BSC_STAT 8 bit Blink Source Counter Status */ +enum { + BSC_SRC = 1<<0, /* Blink Source, 0=Off / 1=On */ +}; + +/* B2_BSC_TST 16 bit Blink Source Counter Test Reg */ +enum { + BSC_T_ON = 1<<2, /* Test mode on */ + BSC_T_OFF = 1<<1, /* Test mode off */ + BSC_T_STEP = 1<<0, /* Test step */ +}; + +/* B3_RAM_ADDR 32 bit RAM Address, to read or write */ + /* Bit 31..19: reserved */ +#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */ +/* RAM Interface Registers */ + +/* B3_RI_CTRL 16 bit RAM Iface Control Register */ +enum { + RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */ + RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/ + + RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */ + RI_RST_SET = 1<<0, /* Set RAM Interface Reset */ +}; + +/* MAC Arbiter Registers */ +/* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */ +enum { + MA_FOE_ON = 1<<3, /* XMAC Fast Output Enable ON */ + MA_FOE_OFF = 1<<2, /* XMAC Fast Output Enable OFF */ + MA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */ + MA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */ + +}; + +/* Timeout values */ +#define SK_MAC_TO_53 72 /* MAC arbiter timeout */ +#define SK_PKT_TO_53 0x2000 /* Packet arbiter timeout */ +#define SK_PKT_TO_MAX 0xffff /* Maximum value */ +#define SK_RI_TO_53 36 /* RAM interface timeout */ + +/* Packet Arbiter Registers */ +/* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */ +enum { + PA_CLR_TO_TX2 = 1<<13,/* Clear IRQ Packet Timeout TX2 */ + PA_CLR_TO_TX1 = 1<<12,/* Clear IRQ Packet Timeout TX1 */ + PA_CLR_TO_RX2 = 1<<11,/* Clear IRQ Packet Timeout RX2 */ + PA_CLR_TO_RX1 = 1<<10,/* Clear IRQ Packet Timeout RX1 */ + PA_ENA_TO_TX2 = 1<<9, /* Enable Timeout Timer TX2 */ + PA_DIS_TO_TX2 = 1<<8, /* Disable Timeout Timer TX2 */ + PA_ENA_TO_TX1 = 1<<7, /* Enable Timeout Timer TX1 */ + PA_DIS_TO_TX1 = 1<<6, /* Disable Timeout Timer TX1 */ + PA_ENA_TO_RX2 = 1<<5, /* Enable Timeout Timer RX2 */ + PA_DIS_TO_RX2 = 1<<4, /* Disable Timeout Timer RX2 */ + PA_ENA_TO_RX1 = 1<<3, /* Enable Timeout Timer RX1 */ + PA_DIS_TO_RX1 = 1<<2, /* Disable Timeout Timer RX1 */ + PA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */ + PA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */ +}; + +#define PA_ENA_TO_ALL (PA_ENA_TO_RX1 | PA_ENA_TO_RX2 |\ + PA_ENA_TO_TX1 | PA_ENA_TO_TX2) + + +/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ +/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */ +/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */ +/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */ +/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */ + +#define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */ + +/* TXA_CTRL 8 bit Tx Arbiter Control Register */ +enum { + TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */ + TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */ + TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */ + TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */ + TXA_START_RC = 1<<3, /* Start sync Rate Control */ + TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */ + TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */ + TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */ +}; + +/* + * Bank 4 - 5 + */ +/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ +enum { + TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/ + TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */ + TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */ + TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */ + TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */ + TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */ + TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */ +}; + + +enum { + B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */ + B7_CFG_SPC = 0x0380,/* copy of the Configuration register */ + B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */ + B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */ + B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */ + B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */ + B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */ + B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */ + B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */ +}; + +/* Queue Register Offsets, use Q_ADDR() to access */ +enum { + B8_Q_REGS = 0x0400, /* base of Queue registers */ + Q_D = 0x00, /* 8*32 bit Current Descriptor */ + Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */ + Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */ + Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */ + Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */ + Q_BC = 0x30, /* 32 bit Current Byte Counter */ + Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */ + Q_F = 0x38, /* 32 bit Flag Register */ + Q_T1 = 0x3c, /* 32 bit Test Register 1 */ + Q_T1_TR = 0x3c, /* 8 bit Test Register 1 Transfer SM */ + Q_T1_WR = 0x3d, /* 8 bit Test Register 1 Write Descriptor SM */ + Q_T1_RD = 0x3e, /* 8 bit Test Register 1 Read Descriptor SM */ + Q_T1_SV = 0x3f, /* 8 bit Test Register 1 Supervisor SM */ + Q_T2 = 0x40, /* 32 bit Test Register 2 */ + Q_T3 = 0x44, /* 32 bit Test Register 3 */ + +}; +#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs)) + +/* RAM Buffer Register Offsets */ +enum { + + RB_START= 0x00,/* 32 bit RAM Buffer Start Address */ + RB_END = 0x04,/* 32 bit RAM Buffer End Address */ + RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */ + RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */ + RB_RX_UTPP= 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */ + RB_RX_LTPP= 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */ + RB_RX_UTHP= 0x18,/* 32 bit Rx Upper Threshold, High Prio */ + RB_RX_LTHP= 0x1c,/* 32 bit Rx Lower Threshold, High Prio */ + /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */ + RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */ + RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */ + RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */ + RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */ + RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */ +}; + +/* Receive and Transmit Queues */ +enum { + Q_R1 = 0x0000, /* Receive Queue 1 */ + Q_R2 = 0x0080, /* Receive Queue 2 */ + Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */ + Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */ + Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */ + Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */ +}; + +/* Different MAC Types */ +enum { + SK_MAC_XMAC = 0, /* Xaqti XMAC II */ + SK_MAC_GMAC = 1, /* Marvell GMAC */ +}; + +/* Different PHY Types */ +enum { + SK_PHY_XMAC = 0,/* integrated in XMAC II */ + SK_PHY_BCOM = 1,/* Broadcom BCM5400 */ + SK_PHY_LONE = 2,/* Level One LXT1000 [not supported]*/ + SK_PHY_NAT = 3,/* National DP83891 [not supported] */ + SK_PHY_MARV_COPPER= 4,/* Marvell 88E1011S */ + SK_PHY_MARV_FIBER = 5,/* Marvell 88E1011S working on fiber */ +}; + +/* PHY addresses (bits 12..8 of PHY address reg) */ +enum { + PHY_ADDR_XMAC = 0<<8, + PHY_ADDR_BCOM = 1<<8, + +/* GPHY address (bits 15..11 of SMI control reg) */ + PHY_ADDR_MARV = 0, +}; + +#define RB_ADDR(offs, queue) ((u16)B16_RAM_REGS + (u16)(queue) + (offs)) + +/* Receive MAC FIFO, Receive LED, and Link_Sync regs (GENESIS only) */ +enum { + RX_MFF_EA = 0x0c00,/* 32 bit Receive MAC FIFO End Address */ + RX_MFF_WP = 0x0c04,/* 32 bit Receive MAC FIFO Write Pointer */ + + RX_MFF_RP = 0x0c0c,/* 32 bit Receive MAC FIFO Read Pointer */ + RX_MFF_PC = 0x0c10,/* 32 bit Receive MAC FIFO Packet Cnt */ + RX_MFF_LEV = 0x0c14,/* 32 bit Receive MAC FIFO Level */ + RX_MFF_CTRL1 = 0x0c18,/* 16 bit Receive MAC FIFO Control Reg 1*/ + RX_MFF_STAT_TO = 0x0c1a,/* 8 bit Receive MAC Status Timeout */ + RX_MFF_TIST_TO = 0x0c1b,/* 8 bit Receive MAC Time Stamp Timeout */ + RX_MFF_CTRL2 = 0x0c1c,/* 8 bit Receive MAC FIFO Control Reg 2*/ + RX_MFF_TST1 = 0x0c1d,/* 8 bit Receive MAC FIFO Test Reg 1 */ + RX_MFF_TST2 = 0x0c1e,/* 8 bit Receive MAC FIFO Test Reg 2 */ + + RX_LED_INI = 0x0c20,/* 32 bit Receive LED Cnt Init Value */ + RX_LED_VAL = 0x0c24,/* 32 bit Receive LED Cnt Current Value */ + RX_LED_CTRL = 0x0c28,/* 8 bit Receive LED Cnt Control Reg */ + RX_LED_TST = 0x0c29,/* 8 bit Receive LED Cnt Test Register */ + + LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */ + LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */ + LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */ + LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */ + LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */ +}; + +/* Receive and Transmit MAC FIFO Registers (GENESIS only) */ +/* RX_MFF_CTRL1 16 bit Receive MAC FIFO Control Reg 1 */ +enum { + MFF_ENA_RDY_PAT = 1<<13, /* Enable Ready Patch */ + MFF_DIS_RDY_PAT = 1<<12, /* Disable Ready Patch */ + MFF_ENA_TIM_PAT = 1<<11, /* Enable Timing Patch */ + MFF_DIS_TIM_PAT = 1<<10, /* Disable Timing Patch */ + MFF_ENA_ALM_FUL = 1<<9, /* Enable AlmostFull Sign */ + MFF_DIS_ALM_FUL = 1<<8, /* Disable AlmostFull Sign */ + MFF_ENA_PAUSE = 1<<7, /* Enable Pause Signaling */ + MFF_DIS_PAUSE = 1<<6, /* Disable Pause Signaling */ + MFF_ENA_FLUSH = 1<<5, /* Enable Frame Flushing */ + MFF_DIS_FLUSH = 1<<4, /* Disable Frame Flushing */ + MFF_ENA_TIST = 1<<3, /* Enable Time Stamp Gener */ + MFF_DIS_TIST = 1<<2, /* Disable Time Stamp Gener */ + MFF_CLR_INTIST = 1<<1, /* Clear IRQ No Time Stamp */ + MFF_CLR_INSTAT = 1<<0, /* Clear IRQ No Status */ + MFF_RX_CTRL_DEF = MFF_ENA_TIM_PAT, +}; + +/* TX_MFF_CTRL1 16 bit Transmit MAC FIFO Control Reg 1 */ +enum { + MFF_CLR_PERR = 1<<15, /* Clear Parity Error IRQ */ + + MFF_ENA_PKT_REC = 1<<13, /* Enable Packet Recovery */ + MFF_DIS_PKT_REC = 1<<12, /* Disable Packet Recovery */ + + MFF_ENA_W4E = 1<<7, /* Enable Wait for Empty */ + MFF_DIS_W4E = 1<<6, /* Disable Wait for Empty */ + + MFF_ENA_LOOPB = 1<<3, /* Enable Loopback */ + MFF_DIS_LOOPB = 1<<2, /* Disable Loopback */ + MFF_CLR_MAC_RST = 1<<1, /* Clear XMAC Reset */ + MFF_SET_MAC_RST = 1<<0, /* Set XMAC Reset */ + + MFF_TX_CTRL_DEF = MFF_ENA_PKT_REC | (u16) MFF_ENA_TIM_PAT | MFF_ENA_FLUSH, +}; + + +/* RX_MFF_TST2 8 bit Receive MAC FIFO Test Register 2 */ +/* TX_MFF_TST2 8 bit Transmit MAC FIFO Test Register 2 */ +enum { + MFF_WSP_T_ON = 1<<6, /* Tx: Write Shadow Ptr TestOn */ + MFF_WSP_T_OFF = 1<<5, /* Tx: Write Shadow Ptr TstOff */ + MFF_WSP_INC = 1<<4, /* Tx: Write Shadow Ptr Increment */ + MFF_PC_DEC = 1<<3, /* Packet Counter Decrement */ + MFF_PC_T_ON = 1<<2, /* Packet Counter Test On */ + MFF_PC_T_OFF = 1<<1, /* Packet Counter Test Off */ + MFF_PC_INC = 1<<0, /* Packet Counter Increment */ +}; + +/* RX_MFF_TST1 8 bit Receive MAC FIFO Test Register 1 */ +/* TX_MFF_TST1 8 bit Transmit MAC FIFO Test Register 1 */ +enum { + MFF_WP_T_ON = 1<<6, /* Write Pointer Test On */ + MFF_WP_T_OFF = 1<<5, /* Write Pointer Test Off */ + MFF_WP_INC = 1<<4, /* Write Pointer Increm */ + + MFF_RP_T_ON = 1<<2, /* Read Pointer Test On */ + MFF_RP_T_OFF = 1<<1, /* Read Pointer Test Off */ + MFF_RP_DEC = 1<<0, /* Read Pointer Decrement */ +}; + +/* RX_MFF_CTRL2 8 bit Receive MAC FIFO Control Reg 2 */ +/* TX_MFF_CTRL2 8 bit Transmit MAC FIFO Control Reg 2 */ +enum { + MFF_ENA_OP_MD = 1<<3, /* Enable Operation Mode */ + MFF_DIS_OP_MD = 1<<2, /* Disable Operation Mode */ + MFF_RST_CLR = 1<<1, /* Clear MAC FIFO Reset */ + MFF_RST_SET = 1<<0, /* Set MAC FIFO Reset */ +}; + + +/* Link LED Counter Registers (GENESIS only) */ + +/* RX_LED_CTRL 8 bit Receive LED Cnt Control Reg */ +/* TX_LED_CTRL 8 bit Transmit LED Cnt Control Reg */ +/* LNK_SYNC_CTRL 8 bit Link Sync Cnt Control Register */ +enum { + LED_START = 1<<2, /* Start Timer */ + LED_STOP = 1<<1, /* Stop Timer */ + LED_STATE = 1<<0, /* Rx/Tx: LED State, 1=LED on */ +}; + +/* RX_LED_TST 8 bit Receive LED Cnt Test Register */ +/* TX_LED_TST 8 bit Transmit LED Cnt Test Register */ +/* LNK_SYNC_TST 8 bit Link Sync Cnt Test Register */ +enum { + LED_T_ON = 1<<2, /* LED Counter Test mode On */ + LED_T_OFF = 1<<1, /* LED Counter Test mode Off */ + LED_T_STEP = 1<<0, /* LED Counter Step */ +}; + +/* LNK_LED_REG 8 bit Link LED Register */ +enum { + LED_BLK_ON = 1<<5, /* Link LED Blinking On */ + LED_BLK_OFF = 1<<4, /* Link LED Blinking Off */ + LED_SYNC_ON = 1<<3, /* Use Sync Wire to switch LED */ + LED_SYNC_OFF = 1<<2, /* Disable Sync Wire Input */ + LED_ON = 1<<1, /* switch LED on */ + LED_OFF = 1<<0, /* switch LED off */ +}; + +/* Receive GMAC FIFO (YUKON) */ +enum { + RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */ + RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */ + RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */ + RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */ + RX_GMF_FL_THR = 0x0c50,/* 32 bit Rx GMAC FIFO Flush Threshold */ + RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */ + RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */ + RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */ + RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */ +}; + + +/* TXA_TEST 8 bit Tx Arbiter Test Register */ +enum { + TXA_INT_T_ON = 1<<5, /* Tx Arb Interval Timer Test On */ + TXA_INT_T_OFF = 1<<4, /* Tx Arb Interval Timer Test Off */ + TXA_INT_T_STEP = 1<<3, /* Tx Arb Interval Timer Step */ + TXA_LIM_T_ON = 1<<2, /* Tx Arb Limit Timer Test On */ + TXA_LIM_T_OFF = 1<<1, /* Tx Arb Limit Timer Test Off */ + TXA_LIM_T_STEP = 1<<0, /* Tx Arb Limit Timer Step */ +}; + +/* TXA_STAT 8 bit Tx Arbiter Status Register */ +enum { + TXA_PRIO_XS = 1<<0, /* sync queue has prio to send */ +}; + + +/* Q_BC 32 bit Current Byte Counter */ + +/* BMU Control Status Registers */ +/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */ +/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */ +/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */ +/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */ +/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */ +/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */ +/* Q_CSR 32 bit BMU Control/Status Register */ + +enum { + CSR_SV_IDLE = 1<<24, /* BMU SM Idle */ + + CSR_DESC_CLR = 1<<21, /* Clear Reset for Descr */ + CSR_DESC_SET = 1<<20, /* Set Reset for Descr */ + CSR_FIFO_CLR = 1<<19, /* Clear Reset for FIFO */ + CSR_FIFO_SET = 1<<18, /* Set Reset for FIFO */ + CSR_HPI_RUN = 1<<17, /* Release HPI SM */ + CSR_HPI_RST = 1<<16, /* Reset HPI SM to Idle */ + CSR_SV_RUN = 1<<15, /* Release Supervisor SM */ + CSR_SV_RST = 1<<14, /* Reset Supervisor SM */ + CSR_DREAD_RUN = 1<<13, /* Release Descr Read SM */ + CSR_DREAD_RST = 1<<12, /* Reset Descr Read SM */ + CSR_DWRITE_RUN = 1<<11, /* Release Descr Write SM */ + CSR_DWRITE_RST = 1<<10, /* Reset Descr Write SM */ + CSR_TRANS_RUN = 1<<9, /* Release Transfer SM */ + CSR_TRANS_RST = 1<<8, /* Reset Transfer SM */ + CSR_ENA_POL = 1<<7, /* Enable Descr Polling */ + CSR_DIS_POL = 1<<6, /* Disable Descr Polling */ + CSR_STOP = 1<<5, /* Stop Rx/Tx Queue */ + CSR_START = 1<<4, /* Start Rx/Tx Queue */ + CSR_IRQ_CL_P = 1<<3, /* (Rx) Clear Parity IRQ */ + CSR_IRQ_CL_B = 1<<2, /* Clear EOB IRQ */ + CSR_IRQ_CL_F = 1<<1, /* Clear EOF IRQ */ + CSR_IRQ_CL_C = 1<<0, /* Clear ERR IRQ */ +}; + +#define CSR_SET_RESET (CSR_DESC_SET | CSR_FIFO_SET | CSR_HPI_RST |\ + CSR_SV_RST | CSR_DREAD_RST | CSR_DWRITE_RST |\ + CSR_TRANS_RST) +#define CSR_CLR_RESET (CSR_DESC_CLR | CSR_FIFO_CLR | CSR_HPI_RUN |\ + CSR_SV_RUN | CSR_DREAD_RUN | CSR_DWRITE_RUN |\ + CSR_TRANS_RUN) + +/* Q_F 32 bit Flag Register */ +enum { + F_ALM_FULL = 1<<27, /* Rx FIFO: almost full */ + F_EMPTY = 1<<27, /* Tx FIFO: empty flag */ + F_FIFO_EOF = 1<<26, /* Tag (EOF Flag) bit in FIFO */ + F_WM_REACHED = 1<<25, /* Watermark reached */ + + F_FIFO_LEVEL = 0x1fL<<16, /* Bit 23..16: # of Qwords in FIFO */ + F_WATER_MARK = 0x0007ffL, /* Bit 10.. 0: Watermark */ +}; + +/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */ +/* RB_START 32 bit RAM Buffer Start Address */ +/* RB_END 32 bit RAM Buffer End Address */ +/* RB_WP 32 bit RAM Buffer Write Pointer */ +/* RB_RP 32 bit RAM Buffer Read Pointer */ +/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */ +/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */ +/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */ +/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */ +/* RB_PC 32 bit RAM Buffer Packet Counter */ +/* RB_LEV 32 bit RAM Buffer Level Register */ + +#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */ +/* RB_TST2 8 bit RAM Buffer Test Register 2 */ +/* RB_TST1 8 bit RAM Buffer Test Register 1 */ + +/* RB_CTRL 8 bit RAM Buffer Control Register */ +enum { + RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */ + RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */ + RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */ + RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */ + RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */ + RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */ +}; + +/* Transmit MAC FIFO and Transmit LED Registers (GENESIS only), */ +enum { + TX_MFF_EA = 0x0d00,/* 32 bit Transmit MAC FIFO End Address */ + TX_MFF_WP = 0x0d04,/* 32 bit Transmit MAC FIFO WR Pointer */ + TX_MFF_WSP = 0x0d08,/* 32 bit Transmit MAC FIFO WR Shadow Ptr */ + TX_MFF_RP = 0x0d0c,/* 32 bit Transmit MAC FIFO RD Pointer */ + TX_MFF_PC = 0x0d10,/* 32 bit Transmit MAC FIFO Packet Cnt */ + TX_MFF_LEV = 0x0d14,/* 32 bit Transmit MAC FIFO Level */ + TX_MFF_CTRL1 = 0x0d18,/* 16 bit Transmit MAC FIFO Ctrl Reg 1 */ + TX_MFF_WAF = 0x0d1a,/* 8 bit Transmit MAC Wait after flush */ + + TX_MFF_CTRL2 = 0x0d1c,/* 8 bit Transmit MAC FIFO Ctrl Reg 2 */ + TX_MFF_TST1 = 0x0d1d,/* 8 bit Transmit MAC FIFO Test Reg 1 */ + TX_MFF_TST2 = 0x0d1e,/* 8 bit Transmit MAC FIFO Test Reg 2 */ + + TX_LED_INI = 0x0d20,/* 32 bit Transmit LED Cnt Init Value */ + TX_LED_VAL = 0x0d24,/* 32 bit Transmit LED Cnt Current Val */ + TX_LED_CTRL = 0x0d28,/* 8 bit Transmit LED Cnt Control Reg */ + TX_LED_TST = 0x0d29,/* 8 bit Transmit LED Cnt Test Reg */ +}; + +/* Counter and Timer constants, for a host clock of 62.5 MHz */ +#define SK_XMIT_DUR 0x002faf08UL /* 50 ms */ +#define SK_BLK_DUR 0x01dcd650UL /* 500 ms */ + +#define SK_DPOLL_DEF 0x00ee6b28UL /* 250 ms at 62.5 MHz */ + +#define SK_DPOLL_MAX 0x00ffffffUL /* 268 ms at 62.5 MHz */ + /* 215 ms at 78.12 MHz */ + +#define SK_FACT_62 100 /* is given in percent */ +#define SK_FACT_53 85 /* on GENESIS: 53.12 MHz */ +#define SK_FACT_78 125 /* on YUKON: 78.12 MHz */ + + +/* Transmit GMAC FIFO (YUKON only) */ +enum { + TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */ + TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/ + TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */ + + TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */ + TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */ + TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */ + + TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */ + TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */ + TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */ + + /* Descriptor Poll Timer Registers */ + B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */ + B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */ + B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */ + + B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */ + + /* Time Stamp Timer Registers (YUKON only) */ + GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */ + GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */ + GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */ +}; + + +enum { + LINKLED_OFF = 0x01, + LINKLED_ON = 0x02, + LINKLED_LINKSYNC_OFF = 0x04, + LINKLED_LINKSYNC_ON = 0x08, + LINKLED_BLINK_OFF = 0x10, + LINKLED_BLINK_ON = 0x20, +}; + +/* GMAC and GPHY Control Registers (YUKON only) */ +enum { + GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */ + GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */ + GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */ + GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */ + GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */ + +/* Wake-up Frame Pattern Match Control Registers (YUKON only) */ + + WOL_REG_OFFS = 0x20,/* HW-Bug: Address is + 0x20 against spec. */ + + WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */ + WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */ + WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */ + WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */ + WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */ + +/* WOL Pattern Length Registers (YUKON only) */ + + WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */ + WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */ + +/* WOL Pattern Counter Registers (YUKON only) */ + + WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */ + WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */ +}; +#define WOL_REGS(port, x) (x + (port)*0x80) + +enum { + WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */ + WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */ +}; +#define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400) + +enum { + BASE_XMAC_1 = 0x2000,/* XMAC 1 registers */ + BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */ + BASE_XMAC_2 = 0x3000,/* XMAC 2 registers */ + BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */ +}; + +/* + * Receive Frame Status Encoding + */ +enum { + XMR_FS_LEN = 0x3fff<<18, /* Bit 31..18: Rx Frame Length */ + XMR_FS_LEN_SHIFT = 18, + XMR_FS_2L_VLAN = 1<<17, /* Bit 17: tagged wh 2Lev VLAN ID*/ + XMR_FS_1_VLAN = 1<<16, /* Bit 16: tagged wh 1ev VLAN ID*/ + XMR_FS_BC = 1<<15, /* Bit 15: Broadcast Frame */ + XMR_FS_MC = 1<<14, /* Bit 14: Multicast Frame */ + XMR_FS_UC = 1<<13, /* Bit 13: Unicast Frame */ + + XMR_FS_BURST = 1<<11, /* Bit 11: Burst Mode */ + XMR_FS_CEX_ERR = 1<<10, /* Bit 10: Carrier Ext. Error */ + XMR_FS_802_3 = 1<<9, /* Bit 9: 802.3 Frame */ + XMR_FS_COL_ERR = 1<<8, /* Bit 8: Collision Error */ + XMR_FS_CAR_ERR = 1<<7, /* Bit 7: Carrier Event Error */ + XMR_FS_LEN_ERR = 1<<6, /* Bit 6: In-Range Length Error */ + XMR_FS_FRA_ERR = 1<<5, /* Bit 5: Framing Error */ + XMR_FS_RUNT = 1<<4, /* Bit 4: Runt Frame */ + XMR_FS_LNG_ERR = 1<<3, /* Bit 3: Giant (Jumbo) Frame */ + XMR_FS_FCS_ERR = 1<<2, /* Bit 2: Frame Check Sequ Err */ + XMR_FS_ERR = 1<<1, /* Bit 1: Frame Error */ + XMR_FS_MCTRL = 1<<0, /* Bit 0: MAC Control Packet */ + +/* + * XMR_FS_ERR will be set if + * XMR_FS_FCS_ERR, XMR_FS_LNG_ERR, XMR_FS_RUNT, + * XMR_FS_FRA_ERR, XMR_FS_LEN_ERR, or XMR_FS_CEX_ERR + * is set. XMR_FS_LNG_ERR and XMR_FS_LEN_ERR will issue + * XMR_FS_ERR unless the corresponding bit in the Receive Command + * Register is set. + */ +}; + +/* +,* XMAC-PHY Registers, indirect addressed over the XMAC + */ +enum { + PHY_XMAC_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ + PHY_XMAC_STAT = 0x01,/* 16 bit r/w PHY Status Register */ + PHY_XMAC_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ + PHY_XMAC_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ + PHY_XMAC_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ + PHY_XMAC_AUNE_LP = 0x05,/* 16 bit r/o Link Partner Abi Reg */ + PHY_XMAC_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ + PHY_XMAC_NEPG = 0x07,/* 16 bit r/w Next Page Register */ + PHY_XMAC_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ + + PHY_XMAC_EXT_STAT = 0x0f,/* 16 bit r/o Ext Status Register */ + PHY_XMAC_RES_ABI = 0x10,/* 16 bit r/o PHY Resolved Ability */ +}; +/* + * Broadcom-PHY Registers, indirect addressed over XMAC + */ +enum { + PHY_BCOM_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ + PHY_BCOM_STAT = 0x01,/* 16 bit r/o PHY Status Register */ + PHY_BCOM_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ + PHY_BCOM_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ + PHY_BCOM_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ + PHY_BCOM_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */ + PHY_BCOM_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ + PHY_BCOM_NEPG = 0x07,/* 16 bit r/w Next Page Register */ + PHY_BCOM_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ + /* Broadcom-specific registers */ + PHY_BCOM_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */ + PHY_BCOM_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */ + PHY_BCOM_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */ + PHY_BCOM_P_EXT_CTRL = 0x10,/* 16 bit r/w PHY Extended Ctrl Reg */ + PHY_BCOM_P_EXT_STAT = 0x11,/* 16 bit r/o PHY Extended Stat Reg */ + PHY_BCOM_RE_CTR = 0x12,/* 16 bit r/w Receive Error Counter */ + PHY_BCOM_FC_CTR = 0x13,/* 16 bit r/w False Carrier Sense Cnt */ + PHY_BCOM_RNO_CTR = 0x14,/* 16 bit r/w Receiver NOT_OK Cnt */ + + PHY_BCOM_AUX_CTRL = 0x18,/* 16 bit r/w Auxiliary Control Reg */ + PHY_BCOM_AUX_STAT = 0x19,/* 16 bit r/o Auxiliary Stat Summary */ + PHY_BCOM_INT_STAT = 0x1a,/* 16 bit r/o Interrupt Status Reg */ + PHY_BCOM_INT_MASK = 0x1b,/* 16 bit r/w Interrupt Mask Reg */ +}; + +/* + * Marvel-PHY Registers, indirect addressed over GMAC + */ +enum { + PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ + PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */ + PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ + PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ + PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ + PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */ + PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ + PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */ + PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ + /* Marvel-specific registers */ + PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */ + PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */ + PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */ + PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */ + PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */ + PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */ + PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */ + PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */ + PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */ + PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */ + PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */ + PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */ + PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */ + PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */ + PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */ + PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */ + PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */ + PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */ + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ + PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */ + PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */ + PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */ + PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */ + PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */ +}; + +enum { + PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */ + PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */ + PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */ + PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */ + PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */ + PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */ + PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */ + PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */ + PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */ + PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */ +}; + +enum { + PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */ + PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */ + PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */ +}; + +enum { + PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */ + + PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */ + PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */ + PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occurred */ + PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */ + PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */ + PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */ + PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */ +}; + +enum { + PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */ + PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */ + PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */ +}; + +/* different Broadcom PHY Ids */ +enum { + PHY_BCOM_ID1_A1 = 0x6041, + PHY_BCOM_ID1_B2 = 0x6043, + PHY_BCOM_ID1_C0 = 0x6044, + PHY_BCOM_ID1_C5 = 0x6047, +}; + +/* different Marvell PHY Ids */ +enum { + PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */ + PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */ + PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */ + PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */ + PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */ +}; + +/* Advertisement register bits */ +enum { + PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ + PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ + PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */ + + PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */ + PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */ + PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */ + PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */ + PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */ + PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */ + PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */ + PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */ + PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/ + PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA, + PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL | + PHY_AN_100HALF | PHY_AN_100FULL, +}; + +/* Xmac Specific */ +enum { + PHY_X_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ + PHY_X_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ + PHY_X_AN_RFB = 3<<12,/* Bit 13..12: Remote Fault Bits */ + + PHY_X_AN_PAUSE = 3<<7,/* Bit 8.. 7: Pause Bits */ + PHY_X_AN_HD = 1<<6, /* Bit 6: Half Duplex */ + PHY_X_AN_FD = 1<<5, /* Bit 5: Full Duplex */ +}; + +/* Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding */ +enum { + PHY_X_P_NO_PAUSE= 0<<7,/* Bit 8..7: no Pause Mode */ + PHY_X_P_SYM_MD = 1<<7, /* Bit 8..7: symmetric Pause Mode */ + PHY_X_P_ASYM_MD = 2<<7,/* Bit 8..7: asymmetric Pause Mode */ + PHY_X_P_BOTH_MD = 3<<7,/* Bit 8..7: both Pause Mode */ +}; + + +/***** PHY_XMAC_EXT_STAT 16 bit r/w Extended Status Register *****/ +enum { + PHY_X_EX_FD = 1<<15, /* Bit 15: Device Supports Full Duplex */ + PHY_X_EX_HD = 1<<14, /* Bit 14: Device Supports Half Duplex */ +}; + +/***** PHY_XMAC_RES_ABI 16 bit r/o PHY Resolved Ability *****/ +enum { + PHY_X_RS_PAUSE = 3<<7, /* Bit 8..7: selected Pause Mode */ + PHY_X_RS_HD = 1<<6, /* Bit 6: Half Duplex Mode selected */ + PHY_X_RS_FD = 1<<5, /* Bit 5: Full Duplex Mode selected */ + PHY_X_RS_ABLMIS = 1<<4, /* Bit 4: duplex or pause cap mismatch */ + PHY_X_RS_PAUMIS = 1<<3, /* Bit 3: pause capability mismatch */ +}; + +/* Remote Fault Bits (PHY_X_AN_RFB) encoding */ +enum { + X_RFB_OK = 0<<12,/* Bit 13..12 No errors, Link OK */ + X_RFB_LF = 1<<12,/* Bit 13..12 Link Failure */ + X_RFB_OFF = 2<<12,/* Bit 13..12 Offline */ + X_RFB_AN_ERR = 3<<12,/* Bit 13..12 Auto-Negotiation Error */ +}; + +/* Broadcom-Specific */ +/***** PHY_BCOM_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +enum { + PHY_B_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */ + PHY_B_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */ + PHY_B_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */ + PHY_B_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */ + PHY_B_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */ + PHY_B_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */ +}; + +/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +enum { + PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */ + PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */ + PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */ + PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */ + PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */ + PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */ + /* Bit 9..8: reserved */ + PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */ +}; + +/***** PHY_BCOM_EXT_STAT 16 bit r/o Extended Status Register *****/ +enum { + PHY_B_ES_X_FD_CAP = 1<<15, /* Bit 15: 1000Base-X FD capable */ + PHY_B_ES_X_HD_CAP = 1<<14, /* Bit 14: 1000Base-X HD capable */ + PHY_B_ES_T_FD_CAP = 1<<13, /* Bit 13: 1000Base-T FD capable */ + PHY_B_ES_T_HD_CAP = 1<<12, /* Bit 12: 1000Base-T HD capable */ +}; + +/***** PHY_BCOM_P_EXT_CTRL 16 bit r/w PHY Extended Control Reg *****/ +enum { + PHY_B_PEC_MAC_PHY = 1<<15, /* Bit 15: 10BIT/GMI-Interface */ + PHY_B_PEC_DIS_CROSS = 1<<14, /* Bit 14: Disable MDI Crossover */ + PHY_B_PEC_TX_DIS = 1<<13, /* Bit 13: Tx output Disabled */ + PHY_B_PEC_INT_DIS = 1<<12, /* Bit 12: Interrupts Disabled */ + PHY_B_PEC_F_INT = 1<<11, /* Bit 11: Force Interrupt */ + PHY_B_PEC_BY_45 = 1<<10, /* Bit 10: Bypass 4B5B-Decoder */ + PHY_B_PEC_BY_SCR = 1<<9, /* Bit 9: Bypass Scrambler */ + PHY_B_PEC_BY_MLT3 = 1<<8, /* Bit 8: Bypass MLT3 Encoder */ + PHY_B_PEC_BY_RXA = 1<<7, /* Bit 7: Bypass Rx Alignm. */ + PHY_B_PEC_RES_SCR = 1<<6, /* Bit 6: Reset Scrambler */ + PHY_B_PEC_EN_LTR = 1<<5, /* Bit 5: Ena LED Traffic Mode */ + PHY_B_PEC_LED_ON = 1<<4, /* Bit 4: Force LED's on */ + PHY_B_PEC_LED_OFF = 1<<3, /* Bit 3: Force LED's off */ + PHY_B_PEC_EX_IPG = 1<<2, /* Bit 2: Extend Tx IPG Mode */ + PHY_B_PEC_3_LED = 1<<1, /* Bit 1: Three Link LED mode */ + PHY_B_PEC_HIGH_LA = 1<<0, /* Bit 0: GMII FIFO Elasticy */ +}; + +/***** PHY_BCOM_P_EXT_STAT 16 bit r/o PHY Extended Status Reg *****/ +enum { + PHY_B_PES_CROSS_STAT = 1<<13, /* Bit 13: MDI Crossover Status */ + PHY_B_PES_INT_STAT = 1<<12, /* Bit 12: Interrupt Status */ + PHY_B_PES_RRS = 1<<11, /* Bit 11: Remote Receiver Stat. */ + PHY_B_PES_LRS = 1<<10, /* Bit 10: Local Receiver Stat. */ + PHY_B_PES_LOCKED = 1<<9, /* Bit 9: Locked */ + PHY_B_PES_LS = 1<<8, /* Bit 8: Link Status */ + PHY_B_PES_RF = 1<<7, /* Bit 7: Remote Fault */ + PHY_B_PES_CE_ER = 1<<6, /* Bit 6: Carrier Ext Error */ + PHY_B_PES_BAD_SSD = 1<<5, /* Bit 5: Bad SSD */ + PHY_B_PES_BAD_ESD = 1<<4, /* Bit 4: Bad ESD */ + PHY_B_PES_RX_ER = 1<<3, /* Bit 3: Receive Error */ + PHY_B_PES_TX_ER = 1<<2, /* Bit 2: Transmit Error */ + PHY_B_PES_LOCK_ER = 1<<1, /* Bit 1: Lock Error */ + PHY_B_PES_MLT3_ER = 1<<0, /* Bit 0: MLT3 code Error */ +}; + +/* PHY_BCOM_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ +/* PHY_BCOM_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/ +enum { + PHY_B_AN_RF = 1<<13, /* Bit 13: Remote Fault */ + + PHY_B_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */ + PHY_B_AN_PC = 1<<10, /* Bit 10: Pause Capable */ +}; + + +/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/ +enum { + PHY_B_FC_CTR = 0xff, /* Bit 7..0: False Carrier Counter */ + +/***** PHY_BCOM_RNO_CTR 16 bit r/w Receive NOT_OK Counter *****/ + PHY_B_RC_LOC_MSK = 0xff00, /* Bit 15..8: Local Rx NOT_OK cnt */ + PHY_B_RC_REM_MSK = 0x00ff, /* Bit 7..0: Remote Rx NOT_OK cnt */ + +/***** PHY_BCOM_AUX_CTRL 16 bit r/w Auxiliary Control Reg *****/ + PHY_B_AC_L_SQE = 1<<15, /* Bit 15: Low Squelch */ + PHY_B_AC_LONG_PACK = 1<<14, /* Bit 14: Rx Long Packets */ + PHY_B_AC_ER_CTRL = 3<<12,/* Bit 13..12: Edgerate Control */ + /* Bit 11: reserved */ + PHY_B_AC_TX_TST = 1<<10, /* Bit 10: Tx test bit, always 1 */ + /* Bit 9.. 8: reserved */ + PHY_B_AC_DIS_PRF = 1<<7, /* Bit 7: dis part resp filter */ + /* Bit 6: reserved */ + PHY_B_AC_DIS_PM = 1<<5, /* Bit 5: dis power management */ + /* Bit 4: reserved */ + PHY_B_AC_DIAG = 1<<3, /* Bit 3: Diagnostic Mode */ +}; + +/***** PHY_BCOM_AUX_STAT 16 bit r/o Auxiliary Status Reg *****/ +enum { + PHY_B_AS_AN_C = 1<<15, /* Bit 15: AutoNeg complete */ + PHY_B_AS_AN_CA = 1<<14, /* Bit 14: AN Complete Ack */ + PHY_B_AS_ANACK_D = 1<<13, /* Bit 13: AN Ack Detect */ + PHY_B_AS_ANAB_D = 1<<12, /* Bit 12: AN Ability Detect */ + PHY_B_AS_NPW = 1<<11, /* Bit 11: AN Next Page Wait */ + PHY_B_AS_AN_RES_MSK = 7<<8,/* Bit 10..8: AN HDC */ + PHY_B_AS_PDF = 1<<7, /* Bit 7: Parallel Detect. Fault */ + PHY_B_AS_RF = 1<<6, /* Bit 6: Remote Fault */ + PHY_B_AS_ANP_R = 1<<5, /* Bit 5: AN Page Received */ + PHY_B_AS_LP_ANAB = 1<<4, /* Bit 4: LP AN Ability */ + PHY_B_AS_LP_NPAB = 1<<3, /* Bit 3: LP Next Page Ability */ + PHY_B_AS_LS = 1<<2, /* Bit 2: Link Status */ + PHY_B_AS_PRR = 1<<1, /* Bit 1: Pause Resolution-Rx */ + PHY_B_AS_PRT = 1<<0, /* Bit 0: Pause Resolution-Tx */ +}; +#define PHY_B_AS_PAUSE_MSK (PHY_B_AS_PRR | PHY_B_AS_PRT) + +/***** PHY_BCOM_INT_STAT 16 bit r/o Interrupt Status Reg *****/ +/***** PHY_BCOM_INT_MASK 16 bit r/w Interrupt Mask Reg *****/ +enum { + PHY_B_IS_PSE = 1<<14, /* Bit 14: Pair Swap Error */ + PHY_B_IS_MDXI_SC = 1<<13, /* Bit 13: MDIX Status Change */ + PHY_B_IS_HCT = 1<<12, /* Bit 12: counter above 32k */ + PHY_B_IS_LCT = 1<<11, /* Bit 11: counter above 128 */ + PHY_B_IS_AN_PR = 1<<10, /* Bit 10: Page Received */ + PHY_B_IS_NO_HDCL = 1<<9, /* Bit 9: No HCD Link */ + PHY_B_IS_NO_HDC = 1<<8, /* Bit 8: No HCD */ + PHY_B_IS_NEG_USHDC = 1<<7, /* Bit 7: Negotiated Unsup. HCD */ + PHY_B_IS_SCR_S_ER = 1<<6, /* Bit 6: Scrambler Sync Error */ + PHY_B_IS_RRS_CHANGE = 1<<5, /* Bit 5: Remote Rx Stat Change */ + PHY_B_IS_LRS_CHANGE = 1<<4, /* Bit 4: Local Rx Stat Change */ + PHY_B_IS_DUP_CHANGE = 1<<3, /* Bit 3: Duplex Mode Change */ + PHY_B_IS_LSP_CHANGE = 1<<2, /* Bit 2: Link Speed Change */ + PHY_B_IS_LST_CHANGE = 1<<1, /* Bit 1: Link Status Changed */ + PHY_B_IS_CRC_ER = 1<<0, /* Bit 0: CRC Error */ +}; +#define PHY_B_DEF_MSK \ + (~(PHY_B_IS_PSE | PHY_B_IS_AN_PR | PHY_B_IS_DUP_CHANGE | \ + PHY_B_IS_LSP_CHANGE | PHY_B_IS_LST_CHANGE)) + +/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */ +enum { + PHY_B_P_NO_PAUSE = 0<<10,/* Bit 11..10: no Pause Mode */ + PHY_B_P_SYM_MD = 1<<10, /* Bit 11..10: symmetric Pause Mode */ + PHY_B_P_ASYM_MD = 2<<10,/* Bit 11..10: asymmetric Pause Mode */ + PHY_B_P_BOTH_MD = 3<<10,/* Bit 11..10: both Pause Mode */ +}; +/* + * Resolved Duplex mode and Capabilities (Aux Status Summary Reg) + */ +enum { + PHY_B_RES_1000FD = 7<<8,/* Bit 10..8: 1000Base-T Full Dup. */ + PHY_B_RES_1000HD = 6<<8,/* Bit 10..8: 1000Base-T Half Dup. */ +}; + +/** Marvell-Specific */ +enum { + PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */ + PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */ + PHY_M_AN_RF = 1<<13, /* Remote Fault */ + + PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */ + PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */ + PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */ + PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */ + PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */ + PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */ + PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */ + PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */ +}; + +/* special defines for FIBER (88E1011S only) */ +enum { + PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */ + PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */ + PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */ + PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */ +}; + +/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */ +enum { + PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */ + PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */ + PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */ + PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */ +}; + +/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +enum { + PHY_M_1000C_TEST= 7<<13,/* Bit 15..13: Test Modes */ + PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */ + PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */ + PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */ + PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */ + PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */ +}; + +/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/ +enum { + PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */ + PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */ + PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */ + PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */ + PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */ + PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */ + PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */ + PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */ + PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */ + PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */ + PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */ + PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */ +}; + +enum { + PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */ + PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */ +}; + +enum { + PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */ + PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */ + PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */ +}; + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +enum { + PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */ + PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */ + PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */ + PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */ + PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */ + + PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */ + PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */ + + PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */ + PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */ +}; + +/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/ +enum { + PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */ + PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */ + PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */ + PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */ + PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */ + PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */ + PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */ + PHY_M_PS_LINK_UP = 1<<10, /* Link Up */ + PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */ + PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */ + PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */ + PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */ + PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */ + PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */ + PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */ + PHY_M_PS_JABBER = 1<<0, /* Jabber */ +}; + +#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN) + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +enum { + PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */ + PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */ +}; + +enum { + PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */ + PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */ + PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */ + PHY_M_IS_AN_PR = 1<<12, /* Page Received */ + PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */ + PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */ + PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */ + PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */ + PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */ + PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */ + PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */ + PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */ + + PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */ + PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */ + PHY_M_IS_JABBER = 1<<0, /* Jabber */ + + PHY_M_IS_DEF_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_LSP_CHANGE | + PHY_M_IS_LST_CHANGE | PHY_M_IS_FIFO_ERROR, + + PHY_M_IS_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL, +}; + +/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/ +enum { + PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */ + PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */ + + PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */ + PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */ + /* (88E1011 only) */ + PHY_M_EC_S_DSC_MSK = 3<<8, /* Bit 9.. 8: Slave Downshift Counter */ + /* (88E1011 only) */ + PHY_M_EC_M_DSC_MSK2 = 7<<9, /* Bit 11.. 9: Master Downshift Counter */ + /* (88E1111 only) */ + PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */ + /* !!! Errata in spec. (1 = disable) */ + PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/ + PHY_M_EC_MAC_S_MSK = 7<<4, /* Bit 6.. 4: Def. MAC interface speed */ + PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */ + PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */ + PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */ + PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */}; + +#define PHY_M_EC_M_DSC(x) ((u16)(x)<<10) /* 00=1x; 01=2x; 10=3x; 11=4x */ +#define PHY_M_EC_S_DSC(x) ((u16)(x)<<8) /* 00=dis; 01=1x; 10=2x; 11=3x */ +#define PHY_M_EC_MAC_S(x) ((u16)(x)<<4) /* 01X=0; 110=2.5; 111=25 (MHz) */ + +#define PHY_M_EC_M_DSC_2(x) ((u16)(x)<<9) /* 000=1x; 001=2x; 010=3x; 011=4x */ + /* 100=5x; 101=6x; 110=7x; 111=8x */ +enum { + MAC_TX_CLK_0_MHZ = 2, + MAC_TX_CLK_2_5_MHZ = 6, + MAC_TX_CLK_25_MHZ = 7, +}; + +/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/ +enum { + PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */ + PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */ + PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */ + PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */ + PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */ + PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */ + PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */ + /* (88E1111 only) */ +}; +#define PHY_M_LED_PULS_DUR(x) (((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK) +#define PHY_M_LED_BLINK_RT(x) (((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK) + +enum { + PHY_M_LEDC_LINK_MSK = 3<<3, /* Bit 4.. 3: Link Control Mask */ + /* (88E1011 only) */ + PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */ + PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */ + PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */ + PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */ + PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */ +}; + +enum { + PULS_NO_STR = 0, /* no pulse stretching */ + PULS_21MS = 1, /* 21 ms to 42 ms */ + PULS_42MS = 2, /* 42 ms to 84 ms */ + PULS_84MS = 3, /* 84 ms to 170 ms */ + PULS_170MS = 4, /* 170 ms to 340 ms */ + PULS_340MS = 5, /* 340 ms to 670 ms */ + PULS_670MS = 6, /* 670 ms to 1.3 s */ + PULS_1300MS = 7, /* 1.3 s to 2.7 s */ +}; + + +enum { + BLINK_42MS = 0, /* 42 ms */ + BLINK_84MS = 1, /* 84 ms */ + BLINK_170MS = 2, /* 170 ms */ + BLINK_340MS = 3, /* 340 ms */ + BLINK_670MS = 4, /* 670 ms */ +}; + +/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/ +#define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */ + /* Bit 13..12: reserved */ +#define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */ +#define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */ +#define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */ +#define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */ +#define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */ +#define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */ + +enum { + MO_LED_NORM = 0, + MO_LED_BLINK = 1, + MO_LED_OFF = 2, + MO_LED_ON = 3, +}; + +/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/ +enum { + PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */ + PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */ + PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */ + PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */ + PHY_M_EC2_FO_AM_MSK = 7, /* Bit 2.. 0: Fiber Output Amplitude */ +}; + +/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/ +enum { + PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */ + PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */ + PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */ + PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */ + PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */ + PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */ + PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */ + /* (88E1111 only) */ + /* Bit 9.. 4: reserved (88E1011 only) */ + PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */ + PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */ + PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */ +}; + +/***** PHY_MARV_CABLE_DIAG 16 bit r/o Cable Diagnostic Reg *****/ +enum { + PHY_M_CABD_ENA_TEST = 1<<15, /* Enable Test (Page 0) */ + PHY_M_CABD_DIS_WAIT = 1<<15, /* Disable Waiting Period (Page 1) */ + /* (88E1111 only) */ + PHY_M_CABD_STAT_MSK = 3<<13, /* Bit 14..13: Status Mask */ + PHY_M_CABD_AMPL_MSK = 0x1f<<8, /* Bit 12.. 8: Amplitude Mask */ + /* (88E1111 only) */ + PHY_M_CABD_DIST_MSK = 0xff, /* Bit 7.. 0: Distance Mask */ +}; + +/* values for Cable Diagnostic Status (11=fail; 00=OK; 10=open; 01=short) */ +enum { + CABD_STAT_NORMAL= 0, + CABD_STAT_SHORT = 1, + CABD_STAT_OPEN = 2, + CABD_STAT_FAIL = 3, +}; + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +/***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/ + /* Bit 15..12: reserved (used internally) */ +enum { + PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */ + PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */ + PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */ +}; + +#define PHY_M_FELP_LED2_CTRL(x) (((x)<<8) & PHY_M_FELP_LED2_MSK) +#define PHY_M_FELP_LED1_CTRL(x) (((x)<<4) & PHY_M_FELP_LED1_MSK) +#define PHY_M_FELP_LED0_CTRL(x) (((x)<<0) & PHY_M_FELP_LED0_MSK) + +enum { + LED_PAR_CTRL_COLX = 0x00, + LED_PAR_CTRL_ERROR = 0x01, + LED_PAR_CTRL_DUPLEX = 0x02, + LED_PAR_CTRL_DP_COL = 0x03, + LED_PAR_CTRL_SPEED = 0x04, + LED_PAR_CTRL_LINK = 0x05, + LED_PAR_CTRL_TX = 0x06, + LED_PAR_CTRL_RX = 0x07, + LED_PAR_CTRL_ACT = 0x08, + LED_PAR_CTRL_LNK_RX = 0x09, + LED_PAR_CTRL_LNK_AC = 0x0a, + LED_PAR_CTRL_ACT_BL = 0x0b, + LED_PAR_CTRL_TX_BL = 0x0c, + LED_PAR_CTRL_RX_BL = 0x0d, + LED_PAR_CTRL_COL_BL = 0x0e, + LED_PAR_CTRL_INACT = 0x0f +}; + +/*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/ +enum { + PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */ + PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */ + PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */ +}; + + +/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/ +enum { + PHY_M_LEDC_LOS_MSK = 0xf<<12, /* Bit 15..12: LOS LED Ctrl. Mask */ + PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */ + PHY_M_LEDC_STA1_MSK = 0xf<<4, /* Bit 7.. 4: STAT1 LED Ctrl. Mask */ + PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */ +}; + +#define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK) +#define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK) +#define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK) +#define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK) + +/* GMAC registers */ +/* Port Registers */ +enum { + GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */ + GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */ + GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */ + GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */ + GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */ + GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */ + GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */ +/* Source Address Registers */ + GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */ + GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */ + GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */ + GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */ + GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */ + GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */ + +/* Multicast Address Hash Registers */ + GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */ + GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */ + GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */ + GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */ + +/* Interrupt Source Registers */ + GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */ + GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */ + GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */ + +/* Interrupt Mask Registers */ + GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */ + GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */ + GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */ + +/* Serial Management Interface (SMI) Registers */ + GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */ + GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */ + GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */ +}; + +/* MIB Counters */ +#define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */ +#define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */ + +/* + * MIB Counters base address definitions (low word) - + * use offset 4 for access to high word (32 bit r/o) + */ +enum { + GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */ + GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */ + GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */ + GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */ + GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */ + /* GM_MIB_CNT_BASE + 40: reserved */ + GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */ + GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */ + GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */ + GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */ + GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */ + GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */ + GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */ + GM_RXF_127B = GM_MIB_CNT_BASE + 104, /* 65-127 Byte Rx Frame */ + GM_RXF_255B = GM_MIB_CNT_BASE + 112, /* 128-255 Byte Rx Frame */ + GM_RXF_511B = GM_MIB_CNT_BASE + 120, /* 256-511 Byte Rx Frame */ + GM_RXF_1023B = GM_MIB_CNT_BASE + 128, /* 512-1023 Byte Rx Frame */ + GM_RXF_1518B = GM_MIB_CNT_BASE + 136, /* 1024-1518 Byte Rx Frame */ + GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144, /* 1519-MaxSize Byte Rx Frame */ + GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152, /* Rx Frame too Long Error */ + GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160, /* Rx Jabber Packet Frame */ + /* GM_MIB_CNT_BASE + 168: reserved */ + GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176, /* Rx FIFO overflow Event */ + /* GM_MIB_CNT_BASE + 184: reserved */ + GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192, /* Unicast Frames Xmitted OK */ + GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200, /* Broadcast Frames Xmitted OK */ + GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208, /* Pause MAC Ctrl Frames Xmitted */ + GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216, /* Multicast Frames Xmitted OK */ + GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224, /* Octets Transmitted OK Low */ + GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232, /* Octets Transmitted OK High */ + GM_TXF_64B = GM_MIB_CNT_BASE + 240, /* 64 Byte Tx Frame */ + GM_TXF_127B = GM_MIB_CNT_BASE + 248, /* 65-127 Byte Tx Frame */ + GM_TXF_255B = GM_MIB_CNT_BASE + 256, /* 128-255 Byte Tx Frame */ + GM_TXF_511B = GM_MIB_CNT_BASE + 264, /* 256-511 Byte Tx Frame */ + GM_TXF_1023B = GM_MIB_CNT_BASE + 272, /* 512-1023 Byte Tx Frame */ + GM_TXF_1518B = GM_MIB_CNT_BASE + 280, /* 1024-1518 Byte Tx Frame */ + GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288, /* 1519-MaxSize Byte Tx Frame */ + + GM_TXF_COL = GM_MIB_CNT_BASE + 304, /* Tx Collision */ + GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312, /* Tx Late Collision */ + GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320, /* Tx aborted due to Exces. Col. */ + GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328, /* Tx Multiple Collision */ + GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336, /* Tx Single Collision */ + GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344, /* Tx FIFO Underrun Event */ +}; + +/* GMAC Bit Definitions */ +/* GM_GP_STAT 16 bit r/o General Purpose Status Register */ +enum { + GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */ + GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */ + GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */ + GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */ + GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */ + GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */ + GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occurred */ + GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occurred */ + + GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */ + GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */ + GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */ + GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */ + GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */ +}; + +/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */ +enum { + GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */ + GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */ + GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */ + GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */ + GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */ + GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */ + GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */ + GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */ + GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */ + GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */ + GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */ + GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */ + GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */ + GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */ + GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */ +}; + +#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100) +#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS) + +/* GM_TX_CTRL 16 bit r/w Transmit Control Register */ +enum { + GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ + GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */ + GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */ + GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */ +}; + +#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) +#define TX_COL_DEF 0x04 /* late collision after 64 byte */ + +/* GM_RX_CTRL 16 bit r/w Receive Control Register */ +enum { + GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */ + GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */ + GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */ + GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */ +}; + +/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */ +enum { + GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */ + GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */ + GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */ + + TX_JAM_LEN_DEF = 0x03, + TX_JAM_IPG_DEF = 0x0b, + TX_IPG_JAM_DEF = 0x1c, +}; + +#define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK) +#define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK) +#define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK) + + +/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */ +enum { + GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */ + GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */ + GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */ + GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */ + GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ +}; + +#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK) +#define DATA_BLIND_DEF 0x04 + +#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK) +#define IPG_DATA_DEF 0x1e + +/* GM_SMI_CTRL 16 bit r/w SMI Control Register */ +enum { + GM_SMI_CT_PHY_A_MSK = 0x1f<<11, /* Bit 15..11: PHY Device Address */ + GM_SMI_CT_REG_A_MSK = 0x1f<<6, /* Bit 10.. 6: PHY Register Address */ + GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/ + GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */ + GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */ +}; + +#define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK) +#define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK) + +/* GM_PHY_ADDR 16 bit r/w GPHY Address Register */ +enum { + GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */ + GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */ +}; + +/* Receive Frame Status Encoding */ +enum { + GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */ + GMR_FS_LEN_SHIFT = 16, + GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */ + GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */ + GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */ + GMR_FS_MC = 1<<10, /* Bit 10: Multicast Packet */ + GMR_FS_BC = 1<<9, /* Bit 9: Broadcast Packet */ + GMR_FS_RX_OK = 1<<8, /* Bit 8: Receive OK (Good Packet) */ + GMR_FS_GOOD_FC = 1<<7, /* Bit 7: Good Flow-Control Packet */ + GMR_FS_BAD_FC = 1<<6, /* Bit 6: Bad Flow-Control Packet */ + GMR_FS_MII_ERR = 1<<5, /* Bit 5: MII Error */ + GMR_FS_LONG_ERR = 1<<4, /* Bit 4: Too Long Packet */ + GMR_FS_FRAGMENT = 1<<3, /* Bit 3: Fragment */ + + GMR_FS_CRC_ERR = 1<<1, /* Bit 1: CRC Error */ + GMR_FS_RX_FF_OV = 1<<0, /* Bit 0: Rx FIFO Overflow */ + +/* + * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR) + */ + GMR_FS_ANY_ERR = GMR_FS_CRC_ERR | GMR_FS_LONG_ERR | + GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC | + GMR_FS_JABBER, +/* Rx GMAC FIFO Flush Mask (default) */ + RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR | + GMR_FS_BAD_FC | GMR_FS_UN_SIZE | GMR_FS_JABBER, +}; + +/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ +enum { + GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */ + GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */ + GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */ + + GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */ + GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */ + GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */ + GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */ + GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */ + GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */ + GMF_CLI_RX_FC = 1<<4, /* Clear IRQ Rx Frame Complete */ + GMF_OPER_ON = 1<<3, /* Operational Mode On */ + GMF_OPER_OFF = 1<<2, /* Operational Mode Off */ + GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */ + GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */ + + RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */ +}; + + +/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */ +enum { + GMF_WSP_TST_ON = 1<<18, /* Write Shadow Pointer Test On */ + GMF_WSP_TST_OFF = 1<<17, /* Write Shadow Pointer Test Off */ + GMF_WSP_STEP = 1<<16, /* Write Shadow Pointer Step/Increment */ + + GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */ + GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */ + GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */ +}; + +/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */ +enum { + GMT_ST_START = 1<<2, /* Start Time Stamp Timer */ + GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */ + GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */ +}; + +/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */ +enum { + GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */ + GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */ + GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */ + GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */ + GMC_PAUSE_ON = 1<<3, /* Pause On */ + GMC_PAUSE_OFF = 1<<2, /* Pause Off */ + GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */ + GMC_RST_SET = 1<<0, /* Set GMAC Reset */ +}; + +/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */ +enum { + GPC_SEL_BDT = 1<<28, /* Select Bi-Dir. Transfer for MDC/MDIO */ + GPC_INT_POL_HI = 1<<27, /* IRQ Polarity is Active HIGH */ + GPC_75_OHM = 1<<26, /* Use 75 Ohm Termination instead of 50 */ + GPC_DIS_FC = 1<<25, /* Disable Automatic Fiber/Copper Detection */ + GPC_DIS_SLEEP = 1<<24, /* Disable Energy Detect */ + GPC_HWCFG_M_3 = 1<<23, /* HWCFG_MODE[3] */ + GPC_HWCFG_M_2 = 1<<22, /* HWCFG_MODE[2] */ + GPC_HWCFG_M_1 = 1<<21, /* HWCFG_MODE[1] */ + GPC_HWCFG_M_0 = 1<<20, /* HWCFG_MODE[0] */ + GPC_ANEG_0 = 1<<19, /* ANEG[0] */ + GPC_ENA_XC = 1<<18, /* Enable MDI crossover */ + GPC_DIS_125 = 1<<17, /* Disable 125 MHz clock */ + GPC_ANEG_3 = 1<<16, /* ANEG[3] */ + GPC_ANEG_2 = 1<<15, /* ANEG[2] */ + GPC_ANEG_1 = 1<<14, /* ANEG[1] */ + GPC_ENA_PAUSE = 1<<13, /* Enable Pause (SYM_OR_REM) */ + GPC_PHYADDR_4 = 1<<12, /* Bit 4 of Phy Addr */ + GPC_PHYADDR_3 = 1<<11, /* Bit 3 of Phy Addr */ + GPC_PHYADDR_2 = 1<<10, /* Bit 2 of Phy Addr */ + GPC_PHYADDR_1 = 1<<9, /* Bit 1 of Phy Addr */ + GPC_PHYADDR_0 = 1<<8, /* Bit 0 of Phy Addr */ + /* Bits 7..2: reserved */ + GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */ + GPC_RST_SET = 1<<0, /* Set GPHY Reset */ +}; + +#define GPC_HWCFG_GMII_COP (GPC_HWCFG_M_3|GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0) +#define GPC_HWCFG_GMII_FIB (GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0) +#define GPC_ANEG_ADV_ALL_M (GPC_ANEG_3 | GPC_ANEG_2 | GPC_ANEG_1 | GPC_ANEG_0) + +/* forced speed and duplex mode (don't mix with other ANEG bits) */ +#define GPC_FRC10MBIT_HALF 0 +#define GPC_FRC10MBIT_FULL GPC_ANEG_0 +#define GPC_FRC100MBIT_HALF GPC_ANEG_1 +#define GPC_FRC100MBIT_FULL (GPC_ANEG_0 | GPC_ANEG_1) + +/* auto-negotiation with limited advertised speeds */ +/* mix only with master/slave settings (for copper) */ +#define GPC_ADV_1000_HALF GPC_ANEG_2 +#define GPC_ADV_1000_FULL GPC_ANEG_3 +#define GPC_ADV_ALL (GPC_ANEG_2 | GPC_ANEG_3) + +/* master/slave settings */ +/* only for copper with 1000 Mbps */ +#define GPC_FORCE_MASTER 0 +#define GPC_FORCE_SLAVE GPC_ANEG_0 +#define GPC_PREF_MASTER GPC_ANEG_1 +#define GPC_PREF_SLAVE (GPC_ANEG_1 | GPC_ANEG_0) + +/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */ +/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */ +enum { + GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */ + GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */ + GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */ + GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */ + GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ + GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ + +#define GMAC_DEF_MSK (GM_IS_RX_FF_OR | GM_IS_TX_FF_UR) + +/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ + /* Bits 15.. 2: reserved */ + GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */ + GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */ + + +/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */ + WOL_CTL_LINK_CHG_OCC = 1<<15, + WOL_CTL_MAGIC_PKT_OCC = 1<<14, + WOL_CTL_PATTERN_OCC = 1<<13, + WOL_CTL_CLEAR_RESULT = 1<<12, + WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11, + WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10, + WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9, + WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8, + WOL_CTL_ENA_PME_ON_PATTERN = 1<<7, + WOL_CTL_DIS_PME_ON_PATTERN = 1<<6, + WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5, + WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4, + WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3, + WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2, + WOL_CTL_ENA_PATTERN_UNIT = 1<<1, + WOL_CTL_DIS_PATTERN_UNIT = 1<<0, +}; + +#define WOL_CTL_DEFAULT \ + (WOL_CTL_DIS_PME_ON_LINK_CHG | \ + WOL_CTL_DIS_PME_ON_PATTERN | \ + WOL_CTL_DIS_PME_ON_MAGIC_PKT | \ + WOL_CTL_DIS_LINK_CHG_UNIT | \ + WOL_CTL_DIS_PATTERN_UNIT | \ + WOL_CTL_DIS_MAGIC_PKT_UNIT) + +/* WOL_MATCH_CTL 8 bit WOL Match Control Reg */ +#define WOL_CTL_PATT_ENA(x) (1 << (x)) + + +/* XMAC II registers */ +enum { + XM_MMU_CMD = 0x0000, /* 16 bit r/w MMU Command Register */ + XM_POFF = 0x0008, /* 32 bit r/w Packet Offset Register */ + XM_BURST = 0x000c, /* 32 bit r/w Burst Register for half duplex*/ + XM_1L_VLAN_TAG = 0x0010, /* 16 bit r/w One Level VLAN Tag ID */ + XM_2L_VLAN_TAG = 0x0014, /* 16 bit r/w Two Level VLAN Tag ID */ + XM_TX_CMD = 0x0020, /* 16 bit r/w Transmit Command Register */ + XM_TX_RT_LIM = 0x0024, /* 16 bit r/w Transmit Retry Limit Register */ + XM_TX_STIME = 0x0028, /* 16 bit r/w Transmit Slottime Register */ + XM_TX_IPG = 0x002c, /* 16 bit r/w Transmit Inter Packet Gap */ + XM_RX_CMD = 0x0030, /* 16 bit r/w Receive Command Register */ + XM_PHY_ADDR = 0x0034, /* 16 bit r/w PHY Address Register */ + XM_PHY_DATA = 0x0038, /* 16 bit r/w PHY Data Register */ + XM_GP_PORT = 0x0040, /* 32 bit r/w General Purpose Port Register */ + XM_IMSK = 0x0044, /* 16 bit r/w Interrupt Mask Register */ + XM_ISRC = 0x0048, /* 16 bit r/o Interrupt Status Register */ + XM_HW_CFG = 0x004c, /* 16 bit r/w Hardware Config Register */ + XM_TX_LO_WM = 0x0060, /* 16 bit r/w Tx FIFO Low Water Mark */ + XM_TX_HI_WM = 0x0062, /* 16 bit r/w Tx FIFO High Water Mark */ + XM_TX_THR = 0x0064, /* 16 bit r/w Tx Request Threshold */ + XM_HT_THR = 0x0066, /* 16 bit r/w Host Request Threshold */ + XM_PAUSE_DA = 0x0068, /* NA reg r/w Pause Destination Address */ + XM_CTL_PARA = 0x0070, /* 32 bit r/w Control Parameter Register */ + XM_MAC_OPCODE = 0x0074, /* 16 bit r/w Opcode for MAC control frames */ + XM_MAC_PTIME = 0x0076, /* 16 bit r/w Pause time for MAC ctrl frames*/ + XM_TX_STAT = 0x0078, /* 32 bit r/o Tx Status LIFO Register */ + + XM_EXM_START = 0x0080, /* r/w Start Address of the EXM Regs */ +#define XM_EXM(reg) (XM_EXM_START + ((reg) << 3)) +}; + +enum { + XM_SRC_CHK = 0x0100, /* NA reg r/w Source Check Address Register */ + XM_SA = 0x0108, /* NA reg r/w Station Address Register */ + XM_HSM = 0x0110, /* 64 bit r/w Hash Match Address Registers */ + XM_RX_LO_WM = 0x0118, /* 16 bit r/w Receive Low Water Mark */ + XM_RX_HI_WM = 0x011a, /* 16 bit r/w Receive High Water Mark */ + XM_RX_THR = 0x011c, /* 32 bit r/w Receive Request Threshold */ + XM_DEV_ID = 0x0120, /* 32 bit r/o Device ID Register */ + XM_MODE = 0x0124, /* 32 bit r/w Mode Register */ + XM_LSA = 0x0128, /* NA reg r/o Last Source Register */ + XM_TS_READ = 0x0130, /* 32 bit r/o Time Stamp Read Register */ + XM_TS_LOAD = 0x0134, /* 32 bit r/o Time Stamp Load Value */ + XM_STAT_CMD = 0x0200, /* 16 bit r/w Statistics Command Register */ + XM_RX_CNT_EV = 0x0204, /* 32 bit r/o Rx Counter Event Register */ + XM_TX_CNT_EV = 0x0208, /* 32 bit r/o Tx Counter Event Register */ + XM_RX_EV_MSK = 0x020c, /* 32 bit r/w Rx Counter Event Mask */ + XM_TX_EV_MSK = 0x0210, /* 32 bit r/w Tx Counter Event Mask */ + XM_TXF_OK = 0x0280, /* 32 bit r/o Frames Transmitted OK Conuter */ + XM_TXO_OK_HI = 0x0284, /* 32 bit r/o Octets Transmitted OK High Cnt*/ + XM_TXO_OK_LO = 0x0288, /* 32 bit r/o Octets Transmitted OK Low Cnt */ + XM_TXF_BC_OK = 0x028c, /* 32 bit r/o Broadcast Frames Xmitted OK */ + XM_TXF_MC_OK = 0x0290, /* 32 bit r/o Multicast Frames Xmitted OK */ + XM_TXF_UC_OK = 0x0294, /* 32 bit r/o Unicast Frames Xmitted OK */ + XM_TXF_LONG = 0x0298, /* 32 bit r/o Tx Long Frame Counter */ + XM_TXE_BURST = 0x029c, /* 32 bit r/o Tx Burst Event Counter */ + XM_TXF_MPAUSE = 0x02a0, /* 32 bit r/o Tx Pause MAC Ctrl Frame Cnt */ + XM_TXF_MCTRL = 0x02a4, /* 32 bit r/o Tx MAC Ctrl Frame Counter */ + XM_TXF_SNG_COL = 0x02a8, /* 32 bit r/o Tx Single Collision Counter */ + XM_TXF_MUL_COL = 0x02ac, /* 32 bit r/o Tx Multiple Collision Counter */ + XM_TXF_ABO_COL = 0x02b0, /* 32 bit r/o Tx aborted due to Exces. Col. */ + XM_TXF_LAT_COL = 0x02b4, /* 32 bit r/o Tx Late Collision Counter */ + XM_TXF_DEF = 0x02b8, /* 32 bit r/o Tx Deferred Frame Counter */ + XM_TXF_EX_DEF = 0x02bc, /* 32 bit r/o Tx Excessive Deferall Counter */ + XM_TXE_FIFO_UR = 0x02c0, /* 32 bit r/o Tx FIFO Underrun Event Cnt */ + XM_TXE_CS_ERR = 0x02c4, /* 32 bit r/o Tx Carrier Sense Error Cnt */ + XM_TXP_UTIL = 0x02c8, /* 32 bit r/o Tx Utilization in % */ + XM_TXF_64B = 0x02d0, /* 32 bit r/o 64 Byte Tx Frame Counter */ + XM_TXF_127B = 0x02d4, /* 32 bit r/o 65-127 Byte Tx Frame Counter */ + XM_TXF_255B = 0x02d8, /* 32 bit r/o 128-255 Byte Tx Frame Counter */ + XM_TXF_511B = 0x02dc, /* 32 bit r/o 256-511 Byte Tx Frame Counter */ + XM_TXF_1023B = 0x02e0, /* 32 bit r/o 512-1023 Byte Tx Frame Counter*/ + XM_TXF_MAX_SZ = 0x02e4, /* 32 bit r/o 1024-MaxSize Byte Tx Frame Cnt*/ + XM_RXF_OK = 0x0300, /* 32 bit r/o Frames Received OK */ + XM_RXO_OK_HI = 0x0304, /* 32 bit r/o Octets Received OK High Cnt */ + XM_RXO_OK_LO = 0x0308, /* 32 bit r/o Octets Received OK Low Counter*/ + XM_RXF_BC_OK = 0x030c, /* 32 bit r/o Broadcast Frames Received OK */ + XM_RXF_MC_OK = 0x0310, /* 32 bit r/o Multicast Frames Received OK */ + XM_RXF_UC_OK = 0x0314, /* 32 bit r/o Unicast Frames Received OK */ + XM_RXF_MPAUSE = 0x0318, /* 32 bit r/o Rx Pause MAC Ctrl Frame Cnt */ + XM_RXF_MCTRL = 0x031c, /* 32 bit r/o Rx MAC Ctrl Frame Counter */ + XM_RXF_INV_MP = 0x0320, /* 32 bit r/o Rx invalid Pause Frame Cnt */ + XM_RXF_INV_MOC = 0x0324, /* 32 bit r/o Rx Frames with inv. MAC Opcode*/ + XM_RXE_BURST = 0x0328, /* 32 bit r/o Rx Burst Event Counter */ + XM_RXE_FMISS = 0x032c, /* 32 bit r/o Rx Missed Frames Event Cnt */ + XM_RXF_FRA_ERR = 0x0330, /* 32 bit r/o Rx Framing Error Counter */ + XM_RXE_FIFO_OV = 0x0334, /* 32 bit r/o Rx FIFO overflow Event Cnt */ + XM_RXF_JAB_PKT = 0x0338, /* 32 bit r/o Rx Jabber Packet Frame Cnt */ + XM_RXE_CAR_ERR = 0x033c, /* 32 bit r/o Rx Carrier Event Error Cnt */ + XM_RXF_LEN_ERR = 0x0340, /* 32 bit r/o Rx in Range Length Error */ + XM_RXE_SYM_ERR = 0x0344, /* 32 bit r/o Rx Symbol Error Counter */ + XM_RXE_SHT_ERR = 0x0348, /* 32 bit r/o Rx Short Event Error Cnt */ + XM_RXE_RUNT = 0x034c, /* 32 bit r/o Rx Runt Event Counter */ + XM_RXF_LNG_ERR = 0x0350, /* 32 bit r/o Rx Frame too Long Error Cnt */ + XM_RXF_FCS_ERR = 0x0354, /* 32 bit r/o Rx Frame Check Seq. Error Cnt */ + XM_RXF_CEX_ERR = 0x035c, /* 32 bit r/o Rx Carrier Ext Error Frame Cnt*/ + XM_RXP_UTIL = 0x0360, /* 32 bit r/o Rx Utilization in % */ + XM_RXF_64B = 0x0368, /* 32 bit r/o 64 Byte Rx Frame Counter */ + XM_RXF_127B = 0x036c, /* 32 bit r/o 65-127 Byte Rx Frame Counter */ + XM_RXF_255B = 0x0370, /* 32 bit r/o 128-255 Byte Rx Frame Counter */ + XM_RXF_511B = 0x0374, /* 32 bit r/o 256-511 Byte Rx Frame Counter */ + XM_RXF_1023B = 0x0378, /* 32 bit r/o 512-1023 Byte Rx Frame Counter*/ + XM_RXF_MAX_SZ = 0x037c, /* 32 bit r/o 1024-MaxSize Byte Rx Frame Cnt*/ +}; + +/* XM_MMU_CMD 16 bit r/w MMU Command Register */ +enum { + XM_MMU_PHY_RDY = 1<<12, /* Bit 12: PHY Read Ready */ + XM_MMU_PHY_BUSY = 1<<11, /* Bit 11: PHY Busy */ + XM_MMU_IGN_PF = 1<<10, /* Bit 10: Ignore Pause Frame */ + XM_MMU_MAC_LB = 1<<9, /* Bit 9: Enable MAC Loopback */ + XM_MMU_FRC_COL = 1<<7, /* Bit 7: Force Collision */ + XM_MMU_SIM_COL = 1<<6, /* Bit 6: Simulate Collision */ + XM_MMU_NO_PRE = 1<<5, /* Bit 5: No MDIO Preamble */ + XM_MMU_GMII_FD = 1<<4, /* Bit 4: GMII uses Full Duplex */ + XM_MMU_RAT_CTRL = 1<<3, /* Bit 3: Enable Rate Control */ + XM_MMU_GMII_LOOP= 1<<2, /* Bit 2: PHY is in Loopback Mode */ + XM_MMU_ENA_RX = 1<<1, /* Bit 1: Enable Receiver */ + XM_MMU_ENA_TX = 1<<0, /* Bit 0: Enable Transmitter */ +}; + + +/* XM_TX_CMD 16 bit r/w Transmit Command Register */ +enum { + XM_TX_BK2BK = 1<<6, /* Bit 6: Ignor Carrier Sense (Tx Bk2Bk)*/ + XM_TX_ENC_BYP = 1<<5, /* Bit 5: Set Encoder in Bypass Mode */ + XM_TX_SAM_LINE = 1<<4, /* Bit 4: (sc) Start utilization calculation */ + XM_TX_NO_GIG_MD = 1<<3, /* Bit 3: Disable Carrier Extension */ + XM_TX_NO_PRE = 1<<2, /* Bit 2: Disable Preamble Generation */ + XM_TX_NO_CRC = 1<<1, /* Bit 1: Disable CRC Generation */ + XM_TX_AUTO_PAD = 1<<0, /* Bit 0: Enable Automatic Padding */ +}; + +/* XM_TX_RT_LIM 16 bit r/w Transmit Retry Limit Register */ +#define XM_RT_LIM_MSK 0x1f /* Bit 4..0: Tx Retry Limit */ + + +/* XM_TX_STIME 16 bit r/w Transmit Slottime Register */ +#define XM_STIME_MSK 0x7f /* Bit 6..0: Tx Slottime bits */ + + +/* XM_TX_IPG 16 bit r/w Transmit Inter Packet Gap */ +#define XM_IPG_MSK 0xff /* Bit 7..0: IPG value bits */ + + +/* XM_RX_CMD 16 bit r/w Receive Command Register */ +enum { + XM_RX_LENERR_OK = 1<<8, /* Bit 8 don't set Rx Err bit for */ + /* inrange error packets */ + XM_RX_BIG_PK_OK = 1<<7, /* Bit 7 don't set Rx Err bit for */ + /* jumbo packets */ + XM_RX_IPG_CAP = 1<<6, /* Bit 6 repl. type field with IPG */ + XM_RX_TP_MD = 1<<5, /* Bit 5: Enable transparent Mode */ + XM_RX_STRIP_FCS = 1<<4, /* Bit 4: Enable FCS Stripping */ + XM_RX_SELF_RX = 1<<3, /* Bit 3: Enable Rx of own packets */ + XM_RX_SAM_LINE = 1<<2, /* Bit 2: (sc) Start utilization calculation */ + XM_RX_STRIP_PAD = 1<<1, /* Bit 1: Strip pad bytes of Rx frames */ + XM_RX_DIS_CEXT = 1<<0, /* Bit 0: Disable carrier ext. check */ +}; + + +/* XM_GP_PORT 32 bit r/w General Purpose Port Register */ +enum { + XM_GP_ANIP = 1<<6, /* Bit 6: (ro) Auto-Neg. in progress */ + XM_GP_FRC_INT = 1<<5, /* Bit 5: (sc) Force Interrupt */ + XM_GP_RES_MAC = 1<<3, /* Bit 3: (sc) Reset MAC and FIFOs */ + XM_GP_RES_STAT = 1<<2, /* Bit 2: (sc) Reset the statistics module */ + XM_GP_INP_ASS = 1<<0, /* Bit 0: (ro) GP Input Pin asserted */ +}; + + +/* XM_IMSK 16 bit r/w Interrupt Mask Register */ +/* XM_ISRC 16 bit r/o Interrupt Status Register */ +enum { + XM_IS_LNK_AE = 1<<14, /* Bit 14: Link Asynchronous Event */ + XM_IS_TX_ABORT = 1<<13, /* Bit 13: Transmit Abort, late Col. etc */ + XM_IS_FRC_INT = 1<<12, /* Bit 12: Force INT bit set in GP */ + XM_IS_INP_ASS = 1<<11, /* Bit 11: Input Asserted, GP bit 0 set */ + XM_IS_LIPA_RC = 1<<10, /* Bit 10: Link Partner requests config */ + XM_IS_RX_PAGE = 1<<9, /* Bit 9: Page Received */ + XM_IS_TX_PAGE = 1<<8, /* Bit 8: Next Page Loaded for Transmit */ + XM_IS_AND = 1<<7, /* Bit 7: Auto-Negotiation Done */ + XM_IS_TSC_OV = 1<<6, /* Bit 6: Time Stamp Counter Overflow */ + XM_IS_RXC_OV = 1<<5, /* Bit 5: Rx Counter Event Overflow */ + XM_IS_TXC_OV = 1<<4, /* Bit 4: Tx Counter Event Overflow */ + XM_IS_RXF_OV = 1<<3, /* Bit 3: Receive FIFO Overflow */ + XM_IS_TXF_UR = 1<<2, /* Bit 2: Transmit FIFO Underrun */ + XM_IS_TX_COMP = 1<<1, /* Bit 1: Frame Tx Complete */ + XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */ + + XM_IMSK_DISABLE = 0xffff, +}; + +/* XM_HW_CFG 16 bit r/w Hardware Config Register */ +enum { + XM_HW_GEN_EOP = 1<<3, /* Bit 3: generate End of Packet pulse */ + XM_HW_COM4SIG = 1<<2, /* Bit 2: use Comma Detect for Sig. Det.*/ + XM_HW_GMII_MD = 1<<0, /* Bit 0: GMII Interface selected */ +}; + + +/* XM_TX_LO_WM 16 bit r/w Tx FIFO Low Water Mark */ +/* XM_TX_HI_WM 16 bit r/w Tx FIFO High Water Mark */ +#define XM_TX_WM_MSK 0x01ff /* Bit 9.. 0 Tx FIFO Watermark bits */ + +/* XM_TX_THR 16 bit r/w Tx Request Threshold */ +/* XM_HT_THR 16 bit r/w Host Request Threshold */ +/* XM_RX_THR 16 bit r/w Rx Request Threshold */ +#define XM_THR_MSK 0x03ff /* Bit 10.. 0 Rx/Tx Request Threshold bits */ + + +/* XM_TX_STAT 32 bit r/o Tx Status LIFO Register */ +enum { + XM_ST_VALID = (1UL<<31), /* Bit 31: Status Valid */ + XM_ST_BYTE_CNT = (0x3fffL<<17), /* Bit 30..17: Tx frame Length */ + XM_ST_RETRY_CNT = (0x1fL<<12), /* Bit 16..12: Retry Count */ + XM_ST_EX_COL = 1<<11, /* Bit 11: Excessive Collisions */ + XM_ST_EX_DEF = 1<<10, /* Bit 10: Excessive Deferral */ + XM_ST_BURST = 1<<9, /* Bit 9: p. xmitted in burst md*/ + XM_ST_DEFER = 1<<8, /* Bit 8: packet was defered */ + XM_ST_BC = 1<<7, /* Bit 7: Broadcast packet */ + XM_ST_MC = 1<<6, /* Bit 6: Multicast packet */ + XM_ST_UC = 1<<5, /* Bit 5: Unicast packet */ + XM_ST_TX_UR = 1<<4, /* Bit 4: FIFO Underrun occurred */ + XM_ST_CS_ERR = 1<<3, /* Bit 3: Carrier Sense Error */ + XM_ST_LAT_COL = 1<<2, /* Bit 2: Late Collision Error */ + XM_ST_MUL_COL = 1<<1, /* Bit 1: Multiple Collisions */ + XM_ST_SGN_COL = 1<<0, /* Bit 0: Single Collision */ +}; + +/* XM_RX_LO_WM 16 bit r/w Receive Low Water Mark */ +/* XM_RX_HI_WM 16 bit r/w Receive High Water Mark */ +#define XM_RX_WM_MSK 0x03ff /* Bit 11.. 0: Rx FIFO Watermark bits */ + + +/* XM_DEV_ID 32 bit r/o Device ID Register */ +#define XM_DEV_OUI (0x00ffffffUL<<8) /* Bit 31..8: Device OUI */ +#define XM_DEV_REV (0x07L << 5) /* Bit 7..5: Chip Rev Num */ + + +/* XM_MODE 32 bit r/w Mode Register */ +enum { + XM_MD_ENA_REJ = 1<<26, /* Bit 26: Enable Frame Reject */ + XM_MD_SPOE_E = 1<<25, /* Bit 25: Send Pause on Edge */ + /* extern generated */ + XM_MD_TX_REP = 1<<24, /* Bit 24: Transmit Repeater Mode */ + XM_MD_SPOFF_I = 1<<23, /* Bit 23: Send Pause on FIFO full */ + /* intern generated */ + XM_MD_LE_STW = 1<<22, /* Bit 22: Rx Stat Word in Little Endian */ + XM_MD_TX_CONT = 1<<21, /* Bit 21: Send Continuous */ + XM_MD_TX_PAUSE = 1<<20, /* Bit 20: (sc) Send Pause Frame */ + XM_MD_ATS = 1<<19, /* Bit 19: Append Time Stamp */ + XM_MD_SPOL_I = 1<<18, /* Bit 18: Send Pause on Low */ + /* intern generated */ + XM_MD_SPOH_I = 1<<17, /* Bit 17: Send Pause on High */ + /* intern generated */ + XM_MD_CAP = 1<<16, /* Bit 16: Check Address Pair */ + XM_MD_ENA_HASH = 1<<15, /* Bit 15: Enable Hashing */ + XM_MD_CSA = 1<<14, /* Bit 14: Check Station Address */ + XM_MD_CAA = 1<<13, /* Bit 13: Check Address Array */ + XM_MD_RX_MCTRL = 1<<12, /* Bit 12: Rx MAC Control Frame */ + XM_MD_RX_RUNT = 1<<11, /* Bit 11: Rx Runt Frames */ + XM_MD_RX_IRLE = 1<<10, /* Bit 10: Rx in Range Len Err Frame */ + XM_MD_RX_LONG = 1<<9, /* Bit 9: Rx Long Frame */ + XM_MD_RX_CRCE = 1<<8, /* Bit 8: Rx CRC Error Frame */ + XM_MD_RX_ERR = 1<<7, /* Bit 7: Rx Error Frame */ + XM_MD_DIS_UC = 1<<6, /* Bit 6: Disable Rx Unicast */ + XM_MD_DIS_MC = 1<<5, /* Bit 5: Disable Rx Multicast */ + XM_MD_DIS_BC = 1<<4, /* Bit 4: Disable Rx Broadcast */ + XM_MD_ENA_PROM = 1<<3, /* Bit 3: Enable Promiscuous */ + XM_MD_ENA_BE = 1<<2, /* Bit 2: Enable Big Endian */ + XM_MD_FTF = 1<<1, /* Bit 1: (sc) Flush Tx FIFO */ + XM_MD_FRF = 1<<0, /* Bit 0: (sc) Flush Rx FIFO */ +}; + +#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I) +#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\ + XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA) + +/* XM_STAT_CMD 16 bit r/w Statistics Command Register */ +enum { + XM_SC_SNP_RXC = 1<<5, /* Bit 5: (sc) Snap Rx Counters */ + XM_SC_SNP_TXC = 1<<4, /* Bit 4: (sc) Snap Tx Counters */ + XM_SC_CP_RXC = 1<<3, /* Bit 3: Copy Rx Counters Continuously */ + XM_SC_CP_TXC = 1<<2, /* Bit 2: Copy Tx Counters Continuously */ + XM_SC_CLR_RXC = 1<<1, /* Bit 1: (sc) Clear Rx Counters */ + XM_SC_CLR_TXC = 1<<0, /* Bit 0: (sc) Clear Tx Counters */ +}; + + +/* XM_RX_CNT_EV 32 bit r/o Rx Counter Event Register */ +/* XM_RX_EV_MSK 32 bit r/w Rx Counter Event Mask */ +enum { + XMR_MAX_SZ_OV = 1<<31, /* Bit 31: 1024-MaxSize Rx Cnt Ov*/ + XMR_1023B_OV = 1<<30, /* Bit 30: 512-1023Byte Rx Cnt Ov*/ + XMR_511B_OV = 1<<29, /* Bit 29: 256-511 Byte Rx Cnt Ov*/ + XMR_255B_OV = 1<<28, /* Bit 28: 128-255 Byte Rx Cnt Ov*/ + XMR_127B_OV = 1<<27, /* Bit 27: 65-127 Byte Rx Cnt Ov */ + XMR_64B_OV = 1<<26, /* Bit 26: 64 Byte Rx Cnt Ov */ + XMR_UTIL_OV = 1<<25, /* Bit 25: Rx Util Cnt Overflow */ + XMR_UTIL_UR = 1<<24, /* Bit 24: Rx Util Cnt Underrun */ + XMR_CEX_ERR_OV = 1<<23, /* Bit 23: CEXT Err Cnt Ov */ + XMR_FCS_ERR_OV = 1<<21, /* Bit 21: Rx FCS Error Cnt Ov */ + XMR_LNG_ERR_OV = 1<<20, /* Bit 20: Rx too Long Err Cnt Ov*/ + XMR_RUNT_OV = 1<<19, /* Bit 19: Runt Event Cnt Ov */ + XMR_SHT_ERR_OV = 1<<18, /* Bit 18: Rx Short Ev Err Cnt Ov*/ + XMR_SYM_ERR_OV = 1<<17, /* Bit 17: Rx Sym Err Cnt Ov */ + XMR_CAR_ERR_OV = 1<<15, /* Bit 15: Rx Carr Ev Err Cnt Ov */ + XMR_JAB_PKT_OV = 1<<14, /* Bit 14: Rx Jabb Packet Cnt Ov */ + XMR_FIFO_OV = 1<<13, /* Bit 13: Rx FIFO Ov Ev Cnt Ov */ + XMR_FRA_ERR_OV = 1<<12, /* Bit 12: Rx Framing Err Cnt Ov */ + XMR_FMISS_OV = 1<<11, /* Bit 11: Rx Missed Ev Cnt Ov */ + XMR_BURST = 1<<10, /* Bit 10: Rx Burst Event Cnt Ov */ + XMR_INV_MOC = 1<<9, /* Bit 9: Rx with inv. MAC OC Ov*/ + XMR_INV_MP = 1<<8, /* Bit 8: Rx inv Pause Frame Ov */ + XMR_MCTRL_OV = 1<<7, /* Bit 7: Rx MAC Ctrl-F Cnt Ov */ + XMR_MPAUSE_OV = 1<<6, /* Bit 6: Rx Pause MAC Ctrl-F Ov*/ + XMR_UC_OK_OV = 1<<5, /* Bit 5: Rx Unicast Frame CntOv*/ + XMR_MC_OK_OV = 1<<4, /* Bit 4: Rx Multicast Cnt Ov */ + XMR_BC_OK_OV = 1<<3, /* Bit 3: Rx Broadcast Cnt Ov */ + XMR_OK_LO_OV = 1<<2, /* Bit 2: Octets Rx OK Low CntOv*/ + XMR_OK_HI_OV = 1<<1, /* Bit 1: Octets Rx OK Hi Cnt Ov*/ + XMR_OK_OV = 1<<0, /* Bit 0: Frames Received Ok Ov */ +}; + +#define XMR_DEF_MSK (XMR_OK_LO_OV | XMR_OK_HI_OV) + +/* XM_TX_CNT_EV 32 bit r/o Tx Counter Event Register */ +/* XM_TX_EV_MSK 32 bit r/w Tx Counter Event Mask */ +enum { + XMT_MAX_SZ_OV = 1<<25, /* Bit 25: 1024-MaxSize Tx Cnt Ov*/ + XMT_1023B_OV = 1<<24, /* Bit 24: 512-1023Byte Tx Cnt Ov*/ + XMT_511B_OV = 1<<23, /* Bit 23: 256-511 Byte Tx Cnt Ov*/ + XMT_255B_OV = 1<<22, /* Bit 22: 128-255 Byte Tx Cnt Ov*/ + XMT_127B_OV = 1<<21, /* Bit 21: 65-127 Byte Tx Cnt Ov */ + XMT_64B_OV = 1<<20, /* Bit 20: 64 Byte Tx Cnt Ov */ + XMT_UTIL_OV = 1<<19, /* Bit 19: Tx Util Cnt Overflow */ + XMT_UTIL_UR = 1<<18, /* Bit 18: Tx Util Cnt Underrun */ + XMT_CS_ERR_OV = 1<<17, /* Bit 17: Tx Carr Sen Err Cnt Ov*/ + XMT_FIFO_UR_OV = 1<<16, /* Bit 16: Tx FIFO Ur Ev Cnt Ov */ + XMT_EX_DEF_OV = 1<<15, /* Bit 15: Tx Ex Deferall Cnt Ov */ + XMT_DEF = 1<<14, /* Bit 14: Tx Deferred Cnt Ov */ + XMT_LAT_COL_OV = 1<<13, /* Bit 13: Tx Late Col Cnt Ov */ + XMT_ABO_COL_OV = 1<<12, /* Bit 12: Tx abo dueto Ex Col Ov*/ + XMT_MUL_COL_OV = 1<<11, /* Bit 11: Tx Mult Col Cnt Ov */ + XMT_SNG_COL = 1<<10, /* Bit 10: Tx Single Col Cnt Ov */ + XMT_MCTRL_OV = 1<<9, /* Bit 9: Tx MAC Ctrl Counter Ov*/ + XMT_MPAUSE = 1<<8, /* Bit 8: Tx Pause MAC Ctrl-F Ov*/ + XMT_BURST = 1<<7, /* Bit 7: Tx Burst Event Cnt Ov */ + XMT_LONG = 1<<6, /* Bit 6: Tx Long Frame Cnt Ov */ + XMT_UC_OK_OV = 1<<5, /* Bit 5: Tx Unicast Cnt Ov */ + XMT_MC_OK_OV = 1<<4, /* Bit 4: Tx Multicast Cnt Ov */ + XMT_BC_OK_OV = 1<<3, /* Bit 3: Tx Broadcast Cnt Ov */ + XMT_OK_LO_OV = 1<<2, /* Bit 2: Octets Tx OK Low CntOv*/ + XMT_OK_HI_OV = 1<<1, /* Bit 1: Octets Tx OK Hi Cnt Ov*/ + XMT_OK_OV = 1<<0, /* Bit 0: Frames Tx Ok Ov */ +}; + +#define XMT_DEF_MSK (XMT_OK_LO_OV | XMT_OK_HI_OV) + +struct skge_rx_desc { + u32 control; + u32 next_offset; + u32 dma_lo; + u32 dma_hi; + u32 status; + u32 timestamp; + u16 csum2; + u16 csum1; + u16 csum2_start; + u16 csum1_start; +}; + +struct skge_tx_desc { + u32 control; + u32 next_offset; + u32 dma_lo; + u32 dma_hi; + u32 status; + u32 csum_offs; + u16 csum_write; + u16 csum_start; + u32 rsvd; +}; + +struct skge_element { + struct skge_element *next; + void *desc; + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); +}; + +struct skge_ring { + struct skge_element *to_clean; + struct skge_element *to_use; + struct skge_element *start; + unsigned long count; +}; + + +struct skge_hw { + void __iomem *regs; + struct pci_dev *pdev; + spinlock_t hw_lock; + u32 intr_mask; + struct net_device *dev[2]; + + u8 chip_id; + u8 chip_rev; + u8 copper; + u8 ports; + u8 phy_type; + + u32 ram_size; + u32 ram_offset; + u16 phy_addr; + spinlock_t phy_lock; + struct tasklet_struct phy_task; + + char irq_name[0]; /* skge@pci:000:04:00.0 */ +}; + +enum pause_control { + FLOW_MODE_NONE = 1, /* No Flow-Control */ + FLOW_MODE_LOC_SEND = 2, /* Local station sends PAUSE */ + FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */ + FLOW_MODE_SYM_OR_REM = 4, /* Both stations may send PAUSE or + * just the remote station may send PAUSE + */ +}; + +enum pause_status { + FLOW_STAT_INDETERMINATED=0, /* indeterminated */ + FLOW_STAT_NONE, /* No Flow Control */ + FLOW_STAT_REM_SEND, /* Remote Station sends PAUSE */ + FLOW_STAT_LOC_SEND, /* Local station sends PAUSE */ + FLOW_STAT_SYMMETRIC, /* Both station may send PAUSE */ +}; + + +struct skge_port { + struct skge_hw *hw; + struct net_device *netdev; + struct napi_struct napi; + int port; + u32 msg_enable; + + struct skge_ring tx_ring; + + struct skge_ring rx_ring ____cacheline_aligned_in_smp; + unsigned int rx_buf_size; + + struct timer_list link_timer; + enum pause_control flow_control; + enum pause_status flow_status; + u8 blink_on; + u8 wol; + u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */ + u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ + u16 speed; /* SPEED_1000, SPEED_100, ... */ + u32 advertising; + + void *mem; /* PCI memory for rings */ + dma_addr_t dma; + unsigned long mem_size; +#ifdef CONFIG_SKGE_DEBUG + struct dentry *debugfs; +#endif +}; + + +/* Register accessor for memory mapped device */ +static inline u32 skge_read32(const struct skge_hw *hw, int reg) +{ + return readl(hw->regs + reg); +} + +static inline u16 skge_read16(const struct skge_hw *hw, int reg) +{ + return readw(hw->regs + reg); +} + +static inline u8 skge_read8(const struct skge_hw *hw, int reg) +{ + return readb(hw->regs + reg); +} + +static inline void skge_write32(const struct skge_hw *hw, int reg, u32 val) +{ + writel(val, hw->regs + reg); +} + +static inline void skge_write16(const struct skge_hw *hw, int reg, u16 val) +{ + writew(val, hw->regs + reg); +} + +static inline void skge_write8(const struct skge_hw *hw, int reg, u8 val) +{ + writeb(val, hw->regs + reg); +} + +/* MAC Related Registers inside the device. */ +#define SK_REG(port,reg) (((port)<<7)+(u16)(reg)) +#define SK_XMAC_REG(port, reg) \ + ((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1) + +static inline u32 xm_read32(const struct skge_hw *hw, int port, int reg) +{ + u32 v; + v = skge_read16(hw, SK_XMAC_REG(port, reg)); + v |= (u32)skge_read16(hw, SK_XMAC_REG(port, reg+2)) << 16; + return v; +} + +static inline u16 xm_read16(const struct skge_hw *hw, int port, int reg) +{ + return skge_read16(hw, SK_XMAC_REG(port,reg)); +} + +static inline void xm_write32(const struct skge_hw *hw, int port, int r, u32 v) +{ + skge_write16(hw, SK_XMAC_REG(port,r), v & 0xffff); + skge_write16(hw, SK_XMAC_REG(port,r+2), v >> 16); +} + +static inline void xm_write16(const struct skge_hw *hw, int port, int r, u16 v) +{ + skge_write16(hw, SK_XMAC_REG(port,r), v); +} + +static inline void xm_outhash(const struct skge_hw *hw, int port, int reg, + const u8 *hash) +{ + xm_write16(hw, port, reg, (u16)hash[0] | ((u16)hash[1] << 8)); + xm_write16(hw, port, reg+2, (u16)hash[2] | ((u16)hash[3] << 8)); + xm_write16(hw, port, reg+4, (u16)hash[4] | ((u16)hash[5] << 8)); + xm_write16(hw, port, reg+6, (u16)hash[6] | ((u16)hash[7] << 8)); +} + +static inline void xm_outaddr(const struct skge_hw *hw, int port, int reg, + const u8 *addr) +{ + xm_write16(hw, port, reg, (u16)addr[0] | ((u16)addr[1] << 8)); + xm_write16(hw, port, reg+2, (u16)addr[2] | ((u16)addr[3] << 8)); + xm_write16(hw, port, reg+4, (u16)addr[4] | ((u16)addr[5] << 8)); +} + +#define SK_GMAC_REG(port,reg) \ + (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg)) + +static inline u16 gma_read16(const struct skge_hw *hw, int port, int reg) +{ + return skge_read16(hw, SK_GMAC_REG(port,reg)); +} + +static inline u32 gma_read32(const struct skge_hw *hw, int port, int reg) +{ + return (u32) skge_read16(hw, SK_GMAC_REG(port,reg)) + | ((u32)skge_read16(hw, SK_GMAC_REG(port,reg+4)) << 16); +} + +static inline void gma_write16(const struct skge_hw *hw, int port, int r, u16 v) +{ + skge_write16(hw, SK_GMAC_REG(port,r), v); +} + +static inline void gma_set_addr(struct skge_hw *hw, int port, int reg, + const u8 *addr) +{ + gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8)); + gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); + gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); +} + +#endif diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c new file mode 100644 index 000000000..d9f449883 --- /dev/null +++ b/drivers/net/ethernet/marvell/sky2.c @@ -0,0 +1,5245 @@ +/* + * New driver for Marvell Yukon 2 chipset. + * Based on earlier sk98lin, and skge driver. + * + * This driver intentionally does not support all the features + * of the original driver such as link fail-over and link management because + * those should be done at higher levels. + * + * Copyright (C) 2005 Stephen Hemminger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "sky2.h" + +#define DRV_NAME "sky2" +#define DRV_VERSION "1.30" + +/* + * The Yukon II chipset takes 64 bit command blocks (called list elements) + * that are organized into three (receive, transmit, status) different rings + * similar to Tigon3. + */ + +#define RX_LE_SIZE 1024 +#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) +#define RX_MAX_PENDING (RX_LE_SIZE/6 - 2) +#define RX_DEF_PENDING RX_MAX_PENDING + +/* This is the worst case number of transmit list elements for a single skb: + VLAN:GSO + CKSUM + Data + skb_frags * DMA */ +#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1)) +#define TX_MIN_PENDING (MAX_SKB_TX_LE+1) +#define TX_MAX_PENDING 1024 +#define TX_DEF_PENDING 63 + +#define TX_WATCHDOG (5 * HZ) +#define NAPI_WEIGHT 64 +#define PHY_RETRIES 1000 + +#define SKY2_EEPROM_MAGIC 0x9955aabb + +#define RING_NEXT(x, s) (((x)+1) & ((s)-1)) + +static const u32 default_msg = + NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK + | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR + | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; + +static int debug = -1; /* defaults above */ +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static int copybreak __read_mostly = 128; +module_param(copybreak, int, 0); +MODULE_PARM_DESC(copybreak, "Receive copy threshold"); + +static int disable_msi = 0; +module_param(disable_msi, int, 0); +MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); + +static int legacy_pme = 0; +module_param(legacy_pme, int, 0); +MODULE_PARM_DESC(legacy_pme, "Legacy power management"); + +static const struct pci_device_id sky2_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */ + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */ + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E01) }, /* SK-9E21M */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) }, /* DGE-550T */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) }, /* 88E8062 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) }, /* 88E8021 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) }, /* 88E8022 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) }, /* 88E8061 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) }, /* 88E8062 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, /* 88E8035 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4355) }, /* 88E8040T */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4357) }, /* 88E8042 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) }, /* 88E8070 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */ + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, sky2_id_table); + +/* Avoid conditionals by using array */ +static const unsigned txqaddr[] = { Q_XA1, Q_XA2 }; +static const unsigned rxqaddr[] = { Q_R1, Q_R2 }; +static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 }; + +static void sky2_set_multicast(struct net_device *dev); +static irqreturn_t sky2_intr(int irq, void *dev_id); + +/* Access to PHY via serial interconnect */ +static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val) +{ + int i; + + gma_write16(hw, port, GM_SMI_DATA, val); + gma_write16(hw, port, GM_SMI_CTRL, + GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg)); + + for (i = 0; i < PHY_RETRIES; i++) { + u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL); + if (ctrl == 0xffff) + goto io_error; + + if (!(ctrl & GM_SMI_CT_BUSY)) + return 0; + + udelay(10); + } + + dev_warn(&hw->pdev->dev, "%s: phy write timeout\n", hw->dev[port]->name); + return -ETIMEDOUT; + +io_error: + dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name); + return -EIO; +} + +static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val) +{ + int i; + + gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) + | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); + + for (i = 0; i < PHY_RETRIES; i++) { + u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL); + if (ctrl == 0xffff) + goto io_error; + + if (ctrl & GM_SMI_CT_RD_VAL) { + *val = gma_read16(hw, port, GM_SMI_DATA); + return 0; + } + + udelay(10); + } + + dev_warn(&hw->pdev->dev, "%s: phy read timeout\n", hw->dev[port]->name); + return -ETIMEDOUT; +io_error: + dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name); + return -EIO; +} + +static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg) +{ + u16 v; + __gm_phy_read(hw, port, reg, &v); + return v; +} + + +static void sky2_power_on(struct sky2_hw *hw) +{ + /* switch power to VCC (WA for VAUX problem) */ + sky2_write8(hw, B0_POWER_CTRL, + PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); + + /* disable Core Clock Division, */ + sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); + + if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) + /* enable bits are inverted */ + sky2_write8(hw, B2_Y2_CLK_GATE, + Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | + Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | + Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS); + else + sky2_write8(hw, B2_Y2_CLK_GATE, 0); + + if (hw->flags & SKY2_HW_ADV_POWER_CTL) { + u32 reg; + + sky2_pci_write32(hw, PCI_DEV_REG3, 0); + + reg = sky2_pci_read32(hw, PCI_DEV_REG4); + /* set all bits to 0 except bits 15..12 and 8 */ + reg &= P_ASPM_CONTROL_MSK; + sky2_pci_write32(hw, PCI_DEV_REG4, reg); + + reg = sky2_pci_read32(hw, PCI_DEV_REG5); + /* set all bits to 0 except bits 28 & 27 */ + reg &= P_CTL_TIM_VMAIN_AV_MSK; + sky2_pci_write32(hw, PCI_DEV_REG5, reg); + + sky2_pci_write32(hw, PCI_CFG_REG_1, 0); + + sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON); + + /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */ + reg = sky2_read32(hw, B2_GP_IO); + reg |= GLB_GPIO_STAT_RACE_DIS; + sky2_write32(hw, B2_GP_IO, reg); + + sky2_read32(hw, B2_GP_IO); + } + + /* Turn on "driver loaded" LED */ + sky2_write16(hw, B0_CTST, Y2_LED_STAT_ON); +} + +static void sky2_power_aux(struct sky2_hw *hw) +{ + if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) + sky2_write8(hw, B2_Y2_CLK_GATE, 0); + else + /* enable bits are inverted */ + sky2_write8(hw, B2_Y2_CLK_GATE, + Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | + Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | + Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS); + + /* switch power to VAUX if supported and PME from D3cold */ + if ( (sky2_read32(hw, B0_CTST) & Y2_VAUX_AVAIL) && + pci_pme_capable(hw->pdev, PCI_D3cold)) + sky2_write8(hw, B0_POWER_CTRL, + (PC_VAUX_ENA | PC_VCC_ENA | + PC_VAUX_ON | PC_VCC_OFF)); + + /* turn off "driver loaded LED" */ + sky2_write16(hw, B0_CTST, Y2_LED_STAT_OFF); +} + +static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) +{ + u16 reg; + + /* disable all GMAC IRQ's */ + sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); + + gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ + gma_write16(hw, port, GM_MC_ADDR_H2, 0); + gma_write16(hw, port, GM_MC_ADDR_H3, 0); + gma_write16(hw, port, GM_MC_ADDR_H4, 0); + + reg = gma_read16(hw, port, GM_RX_CTRL); + reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA; + gma_write16(hw, port, GM_RX_CTRL, reg); +} + +/* flow control to advertise bits */ +static const u16 copper_fc_adv[] = { + [FC_NONE] = 0, + [FC_TX] = PHY_M_AN_ASP, + [FC_RX] = PHY_M_AN_PC, + [FC_BOTH] = PHY_M_AN_PC | PHY_M_AN_ASP, +}; + +/* flow control to advertise bits when using 1000BaseX */ +static const u16 fiber_fc_adv[] = { + [FC_NONE] = PHY_M_P_NO_PAUSE_X, + [FC_TX] = PHY_M_P_ASYM_MD_X, + [FC_RX] = PHY_M_P_SYM_MD_X, + [FC_BOTH] = PHY_M_P_BOTH_MD_X, +}; + +/* flow control to GMA disable bits */ +static const u16 gm_fc_disable[] = { + [FC_NONE] = GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS, + [FC_TX] = GM_GPCR_FC_RX_DIS, + [FC_RX] = GM_GPCR_FC_TX_DIS, + [FC_BOTH] = 0, +}; + + +static void sky2_phy_init(struct sky2_hw *hw, unsigned port) +{ + struct sky2_port *sky2 = netdev_priv(hw->dev[port]); + u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg; + + if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) && + !(hw->flags & SKY2_HW_NEWER_PHY)) { + u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); + + ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | + PHY_M_EC_MAC_S_MSK); + ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); + + /* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */ + if (hw->chip_id == CHIP_ID_YUKON_EC) + /* set downshift counter to 3x and enable downshift */ + ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA; + else + /* set master & slave downshift counter to 1x */ + ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); + + gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); + } + + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + if (sky2_is_copper(hw)) { + if (!(hw->flags & SKY2_HW_GIGABIT)) { + /* enable automatic crossover */ + ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1; + + if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) { + u16 spec; + + /* Enable Class A driver for FE+ A0 */ + spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2); + spec |= PHY_M_FESC_SEL_CL_A; + gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec); + } + } else { + /* disable energy detect */ + ctrl &= ~PHY_M_PC_EN_DET_MSK; + + /* enable automatic crossover */ + ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); + + /* downshift on PHY 88E1112 and 88E1149 is changed */ + if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) && + (hw->flags & SKY2_HW_NEWER_PHY)) { + /* set downshift counter to 3x and enable downshift */ + ctrl &= ~PHY_M_PC_DSC_MSK; + ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; + } + } + } else { + /* workaround for deviation #4.88 (CRC errors) */ + /* disable Automatic Crossover */ + + ctrl &= ~PHY_M_PC_MDIX_MSK; + } + + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + /* special setup for PHY 88E1112 Fiber */ + if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) { + pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); + + /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + ctrl &= ~PHY_M_MAC_MD_MSK; + ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX); + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + if (hw->pmd_type == 'P') { + /* select page 1 to access Fiber registers */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1); + + /* for SFP-module set SIGDET polarity to low */ + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + ctrl |= PHY_M_FIB_SIGD_POL; + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + } + + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); + } + + ctrl = PHY_CT_RESET; + ct1000 = 0; + adv = PHY_AN_CSMA; + reg = 0; + + if (sky2->flags & SKY2_FLAG_AUTO_SPEED) { + if (sky2_is_copper(hw)) { + if (sky2->advertising & ADVERTISED_1000baseT_Full) + ct1000 |= PHY_M_1000C_AFD; + if (sky2->advertising & ADVERTISED_1000baseT_Half) + ct1000 |= PHY_M_1000C_AHD; + if (sky2->advertising & ADVERTISED_100baseT_Full) + adv |= PHY_M_AN_100_FD; + if (sky2->advertising & ADVERTISED_100baseT_Half) + adv |= PHY_M_AN_100_HD; + if (sky2->advertising & ADVERTISED_10baseT_Full) + adv |= PHY_M_AN_10_FD; + if (sky2->advertising & ADVERTISED_10baseT_Half) + adv |= PHY_M_AN_10_HD; + + } else { /* special defines for FIBER (88E1040S only) */ + if (sky2->advertising & ADVERTISED_1000baseT_Full) + adv |= PHY_M_AN_1000X_AFD; + if (sky2->advertising & ADVERTISED_1000baseT_Half) + adv |= PHY_M_AN_1000X_AHD; + } + + /* Restart Auto-negotiation */ + ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; + } else { + /* forced speed/duplex settings */ + ct1000 = PHY_M_1000C_MSE; + + /* Disable auto update for duplex flow control and duplex */ + reg |= GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_SPD_DIS; + + switch (sky2->speed) { + case SPEED_1000: + ctrl |= PHY_CT_SP1000; + reg |= GM_GPCR_SPEED_1000; + break; + case SPEED_100: + ctrl |= PHY_CT_SP100; + reg |= GM_GPCR_SPEED_100; + break; + } + + if (sky2->duplex == DUPLEX_FULL) { + reg |= GM_GPCR_DUP_FULL; + ctrl |= PHY_CT_DUP_MD; + } else if (sky2->speed < SPEED_1000) + sky2->flow_mode = FC_NONE; + } + + if (sky2->flags & SKY2_FLAG_AUTO_PAUSE) { + if (sky2_is_copper(hw)) + adv |= copper_fc_adv[sky2->flow_mode]; + else + adv |= fiber_fc_adv[sky2->flow_mode]; + } else { + reg |= GM_GPCR_AU_FCT_DIS; + reg |= gm_fc_disable[sky2->flow_mode]; + + /* Forward pause packets to GMAC? */ + if (sky2->flow_mode & FC_RX) + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); + else + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + } + + gma_write16(hw, port, GM_GP_CTRL, reg); + + if (hw->flags & SKY2_HW_GIGABIT) + gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); + + gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + + /* Setup Phy LED's */ + ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS); + ledover = 0; + + switch (hw->chip_id) { + case CHIP_ID_YUKON_FE: + /* on 88E3082 these bits are at 11..9 (shifted left) */ + ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1; + + ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR); + + /* delete ACT LED control bits */ + ctrl &= ~PHY_M_FELP_LED1_MSK; + /* change ACT LED control to blink mode */ + ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL); + gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl); + break; + + case CHIP_ID_YUKON_FE_P: + /* Enable Link Partner Next Page */ + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + ctrl |= PHY_M_PC_ENA_LIP_NP; + + /* disable Energy Detect and enable scrambler */ + ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB); + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + /* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */ + ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) | + PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) | + PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED); + + gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl); + break; + + case CHIP_ID_YUKON_XL: + pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); + + /* select page 3 to access LED control register */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); + + /* set LED Function Control register */ + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, + (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ + PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */ + PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ + PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */ + + /* set Polarity Control register */ + gm_phy_write(hw, port, PHY_MARV_PHY_STAT, + (PHY_M_POLC_LS1_P_MIX(4) | + PHY_M_POLC_IS0_P_MIX(4) | + PHY_M_POLC_LOS_CTRL(2) | + PHY_M_POLC_INIT_CTRL(2) | + PHY_M_POLC_STA1_CTRL(2) | + PHY_M_POLC_STA0_CTRL(2))); + + /* restore page register */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); + break; + + case CHIP_ID_YUKON_EC_U: + case CHIP_ID_YUKON_EX: + case CHIP_ID_YUKON_SUPR: + pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); + + /* select page 3 to access LED control register */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); + + /* set LED Function Control register */ + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, + (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */ + PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */ + PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */ + PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */ + + /* set Blink Rate in LED Timer Control Register */ + gm_phy_write(hw, port, PHY_MARV_INT_MASK, + ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS)); + /* restore page register */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); + break; + + default: + /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */ + ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL; + + /* turn off the Rx LED (LED_RX) */ + ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); + } + + if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_UL_2) { + /* apply fixes in PHY AFE */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255); + + /* increase differential signal amplitude in 10BASE-T */ + gm_phy_write(hw, port, 0x18, 0xaa99); + gm_phy_write(hw, port, 0x17, 0x2011); + + if (hw->chip_id == CHIP_ID_YUKON_EC_U) { + /* fix for IEEE A/B Symmetry failure in 1000BASE-T */ + gm_phy_write(hw, port, 0x18, 0xa204); + gm_phy_write(hw, port, 0x17, 0x2002); + } + + /* set page register to 0 */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); + } else if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) { + /* apply workaround for integrated resistors calibration */ + gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17); + gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60); + } else if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) { + /* apply fixes in PHY AFE */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff); + + /* apply RDAC termination workaround */ + gm_phy_write(hw, port, 24, 0x2800); + gm_phy_write(hw, port, 23, 0x2001); + + /* set page register back to 0 */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); + } else if (hw->chip_id != CHIP_ID_YUKON_EX && + hw->chip_id < CHIP_ID_YUKON_SUPR) { + /* no effect on Yukon-XL */ + gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); + + if (!(sky2->flags & SKY2_FLAG_AUTO_SPEED) || + sky2->speed == SPEED_100) { + /* turn on 100 Mbps LED (LED_LINK100) */ + ledover |= PHY_M_LED_MO_100(MO_LED_ON); + } + + if (ledover) + gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); + + } else if (hw->chip_id == CHIP_ID_YUKON_PRM && + (sky2_read8(hw, B2_MAC_CFG) & 0xf) == 0x7) { + int i; + /* This a phy register setup workaround copied from vendor driver. */ + static const struct { + u16 reg, val; + } eee_afe[] = { + { 0x156, 0x58ce }, + { 0x153, 0x99eb }, + { 0x141, 0x8064 }, + /* { 0x155, 0x130b },*/ + { 0x000, 0x0000 }, + { 0x151, 0x8433 }, + { 0x14b, 0x8c44 }, + { 0x14c, 0x0f90 }, + { 0x14f, 0x39aa }, + /* { 0x154, 0x2f39 },*/ + { 0x14d, 0xba33 }, + { 0x144, 0x0048 }, + { 0x152, 0x2010 }, + /* { 0x158, 0x1223 },*/ + { 0x140, 0x4444 }, + { 0x154, 0x2f3b }, + { 0x158, 0xb203 }, + { 0x157, 0x2029 }, + }; + + /* Start Workaround for OptimaEEE Rev.Z0 */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fb); + + gm_phy_write(hw, port, 1, 0x4099); + gm_phy_write(hw, port, 3, 0x1120); + gm_phy_write(hw, port, 11, 0x113c); + gm_phy_write(hw, port, 14, 0x8100); + gm_phy_write(hw, port, 15, 0x112a); + gm_phy_write(hw, port, 17, 0x1008); + + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fc); + gm_phy_write(hw, port, 1, 0x20b0); + + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff); + + for (i = 0; i < ARRAY_SIZE(eee_afe); i++) { + /* apply AFE settings */ + gm_phy_write(hw, port, 17, eee_afe[i].val); + gm_phy_write(hw, port, 16, eee_afe[i].reg | 1u<<13); + } + + /* End Workaround for OptimaEEE */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); + + /* Enable 10Base-Te (EEE) */ + if (hw->chip_id >= CHIP_ID_YUKON_PRM) { + reg = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); + gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, + reg | PHY_M_10B_TE_ENABLE); + } + } + + /* Enable phy interrupt on auto-negotiation complete (or link up) */ + if (sky2->flags & SKY2_FLAG_AUTO_SPEED) + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL); + else + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); +} + +static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD }; +static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA }; + +static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port) +{ + u32 reg1; + + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); + reg1 &= ~phy_power[port]; + + if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) + reg1 |= coma_mode[port]; + + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + sky2_pci_read32(hw, PCI_DEV_REG1); + + if (hw->chip_id == CHIP_ID_YUKON_FE) + gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE); + else if (hw->flags & SKY2_HW_ADV_POWER_CTL) + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); +} + +static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port) +{ + u32 reg1; + u16 ctrl; + + /* release GPHY Control reset */ + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); + + /* release GMAC reset */ + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); + + if (hw->flags & SKY2_HW_NEWER_PHY) { + /* select page 2 to access MAC control register */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); + + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + /* allow GMII Power Down */ + ctrl &= ~PHY_M_MAC_GMIF_PUP; + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + /* set page register back to 0 */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); + } + + /* setup General Purpose Control Register */ + gma_write16(hw, port, GM_GP_CTRL, + GM_GPCR_FL_PASS | GM_GPCR_SPEED_100 | + GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS | + GM_GPCR_AU_SPD_DIS); + + if (hw->chip_id != CHIP_ID_YUKON_EC) { + if (hw->chip_id == CHIP_ID_YUKON_EC_U) { + /* select page 2 to access MAC control register */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2); + + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + /* enable Power Down */ + ctrl |= PHY_M_PC_POW_D_ENA; + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + /* set page register back to 0 */ + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); + } + + /* set IEEE compatible Power Down Mode (dev. #4.99) */ + gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN); + } + + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); + reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */ + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); +} + +/* configure IPG according to used link speed */ +static void sky2_set_ipg(struct sky2_port *sky2) +{ + u16 reg; + + reg = gma_read16(sky2->hw, sky2->port, GM_SERIAL_MODE); + reg &= ~GM_SMOD_IPG_MSK; + if (sky2->speed > SPEED_100) + reg |= IPG_DATA_VAL(IPG_DATA_DEF_1000); + else + reg |= IPG_DATA_VAL(IPG_DATA_DEF_10_100); + gma_write16(sky2->hw, sky2->port, GM_SERIAL_MODE, reg); +} + +/* Enable Rx/Tx */ +static void sky2_enable_rx_tx(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + u16 reg; + + reg = gma_read16(hw, port, GM_GP_CTRL); + reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; + gma_write16(hw, port, GM_GP_CTRL, reg); +} + +/* Force a renegotiation */ +static void sky2_phy_reinit(struct sky2_port *sky2) +{ + spin_lock_bh(&sky2->phy_lock); + sky2_phy_init(sky2->hw, sky2->port); + sky2_enable_rx_tx(sky2); + spin_unlock_bh(&sky2->phy_lock); +} + +/* Put device in state to listen for Wake On Lan */ +static void sky2_wol_init(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + enum flow_control save_mode; + u16 ctrl; + + /* Bring hardware out of reset */ + sky2_write16(hw, B0_CTST, CS_RST_CLR); + sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); + + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); + + /* Force to 10/100 + * sky2_reset will re-enable on resume + */ + save_mode = sky2->flow_mode; + ctrl = sky2->advertising; + + sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full); + sky2->flow_mode = FC_NONE; + + spin_lock_bh(&sky2->phy_lock); + sky2_phy_power_up(hw, port); + sky2_phy_init(hw, port); + spin_unlock_bh(&sky2->phy_lock); + + sky2->flow_mode = save_mode; + sky2->advertising = ctrl; + + /* Set GMAC to no flow control and auto update for speed/duplex */ + gma_write16(hw, port, GM_GP_CTRL, + GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA| + GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS); + + /* Set WOL address */ + memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), + sky2->netdev->dev_addr, ETH_ALEN); + + /* Turn on appropriate WOL control bits */ + sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT); + ctrl = 0; + if (sky2->wol & WAKE_PHY) + ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT; + else + ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT; + + if (sky2->wol & WAKE_MAGIC) + ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; + else + ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT; + + ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; + sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); + + /* Disable PiG firmware */ + sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF); + + /* Needed by some broken BIOSes, use PCI rather than PCI-e for WOL */ + if (legacy_pme) { + u32 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); + reg1 |= PCI_Y2_PME_LEGACY; + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); + } + + /* block receiver */ + sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); + sky2_read32(hw, B0_CTST); +} + +static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port) +{ + struct net_device *dev = hw->dev[port]; + + if ( (hw->chip_id == CHIP_ID_YUKON_EX && + hw->chip_rev != CHIP_REV_YU_EX_A0) || + hw->chip_id >= CHIP_ID_YUKON_FE_P) { + /* Yukon-Extreme B0 and further Extreme devices */ + sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA); + } else if (dev->mtu > ETH_DATA_LEN) { + /* set Tx GMAC FIFO Almost Empty Threshold */ + sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), + (ECU_JUMBO_WM << 16) | ECU_AE_THR); + + sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS); + } else + sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA); +} + +static void sky2_mac_init(struct sky2_hw *hw, unsigned port) +{ + struct sky2_port *sky2 = netdev_priv(hw->dev[port]); + u16 reg; + u32 rx_reg; + int i; + const u8 *addr = hw->dev[port]->dev_addr; + + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); + + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); + + if (hw->chip_id == CHIP_ID_YUKON_XL && + hw->chip_rev == CHIP_REV_YU_XL_A0 && + port == 1) { + /* WA DEV_472 -- looks like crossed wires on port 2 */ + /* clear GMAC 1 Control reset */ + sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR); + do { + sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET); + sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR); + } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL || + gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 || + gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0); + } + + sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); + + /* Enable Transmit FIFO Underrun */ + sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); + + spin_lock_bh(&sky2->phy_lock); + sky2_phy_power_up(hw, port); + sky2_phy_init(hw, port); + spin_unlock_bh(&sky2->phy_lock); + + /* MIB clear */ + reg = gma_read16(hw, port, GM_PHY_ADDR); + gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); + + for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4) + gma_read16(hw, port, i); + gma_write16(hw, port, GM_PHY_ADDR, reg); + + /* transmit control */ + gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); + + /* receive control reg: unicast + multicast + no FCS */ + gma_write16(hw, port, GM_RX_CTRL, + GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); + + /* transmit flow control */ + gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); + + /* transmit parameter */ + gma_write16(hw, port, GM_TX_PARAM, + TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | + TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | + TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | + TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); + + /* serial mode register */ + reg = DATA_BLIND_VAL(DATA_BLIND_DEF) | + GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF_1000); + + if (hw->dev[port]->mtu > ETH_DATA_LEN) + reg |= GM_SMOD_JUMBO_ENA; + + if (hw->chip_id == CHIP_ID_YUKON_EC_U && + hw->chip_rev == CHIP_REV_YU_EC_U_B1) + reg |= GM_NEW_FLOW_CTRL; + + gma_write16(hw, port, GM_SERIAL_MODE, reg); + + /* virtual address for data */ + gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); + + /* physical address: used for pause frames */ + gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); + + /* ignore counter overflows */ + gma_write16(hw, port, GM_TX_IRQ_MSK, 0); + gma_write16(hw, port, GM_RX_IRQ_MSK, 0); + gma_write16(hw, port, GM_TR_IRQ_MSK, 0); + + /* Configure Rx MAC FIFO */ + sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); + rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON; + if (hw->chip_id == CHIP_ID_YUKON_EX || + hw->chip_id == CHIP_ID_YUKON_FE_P) + rx_reg |= GMF_RX_OVER_ON; + + sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg); + + if (hw->chip_id == CHIP_ID_YUKON_XL) { + /* Hardware errata - clear flush mask */ + sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), 0); + } else { + /* Flush Rx MAC FIFO on any flow control or error */ + sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); + } + + /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug */ + reg = RX_GMF_FL_THR_DEF + 1; + /* Another magic mystery workaround from sk98lin */ + if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) + reg = 0x178; + sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg); + + /* Configure Tx MAC FIFO */ + sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); + sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); + + /* On chips without ram buffer, pause is controlled by MAC level */ + if (!(hw->flags & SKY2_HW_RAM_BUFFER)) { + /* Pause threshold is scaled by 8 in bytes */ + if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) + reg = 1568 / 8; + else + reg = 1024 / 8; + sky2_write16(hw, SK_REG(port, RX_GMF_UP_THR), reg); + sky2_write16(hw, SK_REG(port, RX_GMF_LP_THR), 768 / 8); + + sky2_set_tx_stfwd(hw, port); + } + + if (hw->chip_id == CHIP_ID_YUKON_FE_P && + hw->chip_rev == CHIP_REV_YU_FE2_A0) { + /* disable dynamic watermark */ + reg = sky2_read16(hw, SK_REG(port, TX_GMF_EA)); + reg &= ~TX_DYN_WM_ENA; + sky2_write16(hw, SK_REG(port, TX_GMF_EA), reg); + } +} + +/* Assign Ram Buffer allocation to queue */ +static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space) +{ + u32 end; + + /* convert from K bytes to qwords used for hw register */ + start *= 1024/8; + space *= 1024/8; + end = start + space - 1; + + sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); + sky2_write32(hw, RB_ADDR(q, RB_START), start); + sky2_write32(hw, RB_ADDR(q, RB_END), end); + sky2_write32(hw, RB_ADDR(q, RB_WP), start); + sky2_write32(hw, RB_ADDR(q, RB_RP), start); + + if (q == Q_R1 || q == Q_R2) { + u32 tp = space - space/4; + + /* On receive queue's set the thresholds + * give receiver priority when > 3/4 full + * send pause when down to 2K + */ + sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); + sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); + + tp = space - 8192/8; + sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); + sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); + } else { + /* Enable store & forward on Tx queue's because + * Tx FIFO is only 1K on Yukon + */ + sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); + } + + sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); + sky2_read8(hw, RB_ADDR(q, RB_CTRL)); +} + +/* Setup Bus Memory Interface */ +static void sky2_qset(struct sky2_hw *hw, u16 q) +{ + sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET); + sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT); + sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON); + sky2_write32(hw, Q_ADDR(q, Q_WM), BMU_WM_DEFAULT); +} + +/* Setup prefetch unit registers. This is the interface between + * hardware and driver list elements + */ +static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr, + dma_addr_t addr, u32 last) +{ + sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); + sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR); + sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), upper_32_bits(addr)); + sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), lower_32_bits(addr)); + sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last); + sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON); + + sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL)); +} + +static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot) +{ + struct sky2_tx_le *le = sky2->tx_le + *slot; + + *slot = RING_NEXT(*slot, sky2->tx_ring_size); + le->ctrl = 0; + return le; +} + +static void tx_init(struct sky2_port *sky2) +{ + struct sky2_tx_le *le; + + sky2->tx_prod = sky2->tx_cons = 0; + sky2->tx_tcpsum = 0; + sky2->tx_last_mss = 0; + netdev_reset_queue(sky2->netdev); + + le = get_tx_le(sky2, &sky2->tx_prod); + le->addr = 0; + le->opcode = OP_ADDR64 | HW_OWNER; + sky2->tx_last_upper = 0; +} + +/* Update chip's next pointer */ +static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) +{ + /* Make sure write' to descriptors are complete before we tell hardware */ + wmb(); + sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); + + /* Synchronize I/O on since next processor may write to tail */ + mmiowb(); +} + + +static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2) +{ + struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put; + sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE); + le->ctrl = 0; + return le; +} + +static unsigned sky2_get_rx_threshold(struct sky2_port *sky2) +{ + unsigned size; + + /* Space needed for frame data + headers rounded up */ + size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8); + + /* Stopping point for hardware truncation */ + return (size - 8) / sizeof(u32); +} + +static unsigned sky2_get_rx_data_size(struct sky2_port *sky2) +{ + struct rx_ring_info *re; + unsigned size; + + /* Space needed for frame data + headers rounded up */ + size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8); + + sky2->rx_nfrags = size >> PAGE_SHIFT; + BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr)); + + /* Compute residue after pages */ + size -= sky2->rx_nfrags << PAGE_SHIFT; + + /* Optimize to handle small packets and headers */ + if (size < copybreak) + size = copybreak; + if (size < ETH_HLEN) + size = ETH_HLEN; + + return size; +} + +/* Build description to hardware for one receive segment */ +static void sky2_rx_add(struct sky2_port *sky2, u8 op, + dma_addr_t map, unsigned len) +{ + struct sky2_rx_le *le; + + if (sizeof(dma_addr_t) > sizeof(u32)) { + le = sky2_next_rx(sky2); + le->addr = cpu_to_le32(upper_32_bits(map)); + le->opcode = OP_ADDR64 | HW_OWNER; + } + + le = sky2_next_rx(sky2); + le->addr = cpu_to_le32(lower_32_bits(map)); + le->length = cpu_to_le16(len); + le->opcode = op | HW_OWNER; +} + +/* Build description to hardware for one possibly fragmented skb */ +static void sky2_rx_submit(struct sky2_port *sky2, + const struct rx_ring_info *re) +{ + int i; + + sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size); + + for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++) + sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE); +} + + +static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re, + unsigned size) +{ + struct sk_buff *skb = re->skb; + int i; + + re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(pdev, re->data_addr)) + goto mapping_error; + + dma_unmap_len_set(re, data_size, size); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0, + skb_frag_size(frag), + DMA_FROM_DEVICE); + + if (dma_mapping_error(&pdev->dev, re->frag_addr[i])) + goto map_page_error; + } + return 0; + +map_page_error: + while (--i >= 0) { + pci_unmap_page(pdev, re->frag_addr[i], + skb_frag_size(&skb_shinfo(skb)->frags[i]), + PCI_DMA_FROMDEVICE); + } + + pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size), + PCI_DMA_FROMDEVICE); + +mapping_error: + if (net_ratelimit()) + dev_warn(&pdev->dev, "%s: rx mapping error\n", + skb->dev->name); + return -EIO; +} + +static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re) +{ + struct sk_buff *skb = re->skb; + int i; + + pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size), + PCI_DMA_FROMDEVICE); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) + pci_unmap_page(pdev, re->frag_addr[i], + skb_frag_size(&skb_shinfo(skb)->frags[i]), + PCI_DMA_FROMDEVICE); +} + +/* Tell chip where to start receive checksum. + * Actually has two checksums, but set both same to avoid possible byte + * order problems. + */ +static void rx_set_checksum(struct sky2_port *sky2) +{ + struct sky2_rx_le *le = sky2_next_rx(sky2); + + le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN); + le->ctrl = 0; + le->opcode = OP_TCPSTART | HW_OWNER; + + sky2_write32(sky2->hw, + Q_ADDR(rxqaddr[sky2->port], Q_CSR), + (sky2->netdev->features & NETIF_F_RXCSUM) + ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); +} + +/* Enable/disable receive hash calculation (RSS) */ +static void rx_set_rss(struct net_device *dev, netdev_features_t features) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + int i, nkeys = 4; + + /* Supports IPv6 and other modes */ + if (hw->flags & SKY2_HW_NEW_LE) { + nkeys = 10; + sky2_write32(hw, SK_REG(sky2->port, RSS_CFG), HASH_ALL); + } + + /* Program RSS initial values */ + if (features & NETIF_F_RXHASH) { + u32 rss_key[10]; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (i = 0; i < nkeys; i++) + sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4), + rss_key[i]); + + /* Need to turn on (undocumented) flag to make hashing work */ + sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), + RX_STFW_ENA); + + sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), + BMU_ENA_RX_RSS_HASH); + } else + sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), + BMU_DIS_RX_RSS_HASH); +} + +/* + * The RX Stop command will not work for Yukon-2 if the BMU does not + * reach the end of packet and since we can't make sure that we have + * incoming data, we must reset the BMU while it is not doing a DMA + * transfer. Since it is possible that the RX path is still active, + * the RX RAM buffer will be stopped first, so any possible incoming + * data will not trigger a DMA. After the RAM buffer is stopped, the + * BMU is polled until any DMA in progress is ended and only then it + * will be reset. + */ +static void sky2_rx_stop(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned rxq = rxqaddr[sky2->port]; + int i; + + /* disable the RAM Buffer receive queue */ + sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD); + + for (i = 0; i < 0xffff; i++) + if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL)) + == sky2_read8(hw, RB_ADDR(rxq, Q_RL))) + goto stopped; + + netdev_warn(sky2->netdev, "receiver stop failed\n"); +stopped: + sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST); + + /* reset the Rx prefetch unit */ + sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); + mmiowb(); +} + +/* Clean out receive buffer area, assumes receiver hardware stopped */ +static void sky2_rx_clean(struct sky2_port *sky2) +{ + unsigned i; + + if (sky2->rx_le) + memset(sky2->rx_le, 0, RX_LE_BYTES); + + for (i = 0; i < sky2->rx_pending; i++) { + struct rx_ring_info *re = sky2->rx_ring + i; + + if (re->skb) { + sky2_rx_unmap_skb(sky2->hw->pdev, re); + kfree_skb(re->skb); + re->skb = NULL; + } + } +} + +/* Basic MII support */ +static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + int err = -EOPNOTSUPP; + + if (!netif_running(dev)) + return -ENODEV; /* Phy still in reset */ + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = PHY_ADDR_MARV; + + /* fallthru */ + case SIOCGMIIREG: { + u16 val = 0; + + spin_lock_bh(&sky2->phy_lock); + err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val); + spin_unlock_bh(&sky2->phy_lock); + + data->val_out = val; + break; + } + + case SIOCSMIIREG: + spin_lock_bh(&sky2->phy_lock); + err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f, + data->val_in); + spin_unlock_bh(&sky2->phy_lock); + break; + } + return err; +} + +#define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO) + +static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + u16 port = sky2->port; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), + RX_VLAN_STRIP_ON); + else + sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), + RX_VLAN_STRIP_OFF); + + if (features & NETIF_F_HW_VLAN_CTAG_TX) { + sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), + TX_VLAN_TAG_ON); + + dev->vlan_features |= SKY2_VLAN_OFFLOADS; + } else { + sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), + TX_VLAN_TAG_OFF); + + /* Can't do transmit offload of vlan without hw vlan */ + dev->vlan_features &= ~SKY2_VLAN_OFFLOADS; + } +} + +/* Amount of required worst case padding in rx buffer */ +static inline unsigned sky2_rx_pad(const struct sky2_hw *hw) +{ + return (hw->flags & SKY2_HW_RAM_BUFFER) ? 8 : 2; +} + +/* + * Allocate an skb for receiving. If the MTU is large enough + * make the skb non-linear with a fragment list of pages. + */ +static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2, gfp_t gfp) +{ + struct sk_buff *skb; + int i; + + skb = __netdev_alloc_skb(sky2->netdev, + sky2->rx_data_size + sky2_rx_pad(sky2->hw), + gfp); + if (!skb) + goto nomem; + + if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) { + unsigned char *start; + /* + * Workaround for a bug in FIFO that cause hang + * if the FIFO if the receive buffer is not 64 byte aligned. + * The buffer returned from netdev_alloc_skb is + * aligned except if slab debugging is enabled. + */ + start = PTR_ALIGN(skb->data, 8); + skb_reserve(skb, start - skb->data); + } else + skb_reserve(skb, NET_IP_ALIGN); + + for (i = 0; i < sky2->rx_nfrags; i++) { + struct page *page = alloc_page(gfp); + + if (!page) + goto free_partial; + skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE); + } + + return skb; +free_partial: + kfree_skb(skb); +nomem: + return NULL; +} + +static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq) +{ + sky2_put_idx(sky2->hw, rxq, sky2->rx_put); +} + +static int sky2_alloc_rx_skbs(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned i; + + sky2->rx_data_size = sky2_get_rx_data_size(sky2); + + /* Fill Rx ring */ + for (i = 0; i < sky2->rx_pending; i++) { + struct rx_ring_info *re = sky2->rx_ring + i; + + re->skb = sky2_rx_alloc(sky2, GFP_KERNEL); + if (!re->skb) + return -ENOMEM; + + if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) { + dev_kfree_skb(re->skb); + re->skb = NULL; + return -ENOMEM; + } + } + return 0; +} + +/* + * Setup receiver buffer pool. + * Normal case this ends up creating one list element for skb + * in the receive ring. Worst case if using large MTU and each + * allocation falls on a different 64 bit region, that results + * in 6 list elements per ring entry. + * One element is used for checksum enable/disable, and one + * extra to avoid wrap. + */ +static void sky2_rx_start(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + struct rx_ring_info *re; + unsigned rxq = rxqaddr[sky2->port]; + unsigned i, thresh; + + sky2->rx_put = sky2->rx_next = 0; + sky2_qset(hw, rxq); + + /* On PCI express lowering the watermark gives better performance */ + if (pci_is_pcie(hw->pdev)) + sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX); + + /* These chips have no ram buffer? + * MAC Rx RAM Read is controlled by hardware */ + if (hw->chip_id == CHIP_ID_YUKON_EC_U && + hw->chip_rev > CHIP_REV_YU_EC_U_A0) + sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS); + + sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); + + if (!(hw->flags & SKY2_HW_NEW_LE)) + rx_set_checksum(sky2); + + if (!(hw->flags & SKY2_HW_RSS_BROKEN)) + rx_set_rss(sky2->netdev, sky2->netdev->features); + + /* submit Rx ring */ + for (i = 0; i < sky2->rx_pending; i++) { + re = sky2->rx_ring + i; + sky2_rx_submit(sky2, re); + } + + /* + * The receiver hangs if it receives frames larger than the + * packet buffer. As a workaround, truncate oversize frames, but + * the register is limited to 9 bits, so if you do frames > 2052 + * you better get the MTU right! + */ + thresh = sky2_get_rx_threshold(sky2); + if (thresh > 0x1ff) + sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF); + else { + sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh); + sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON); + } + + /* Tell chip about available buffers */ + sky2_rx_update(sky2, rxq); + + if (hw->chip_id == CHIP_ID_YUKON_EX || + hw->chip_id == CHIP_ID_YUKON_SUPR) { + /* + * Disable flushing of non ASF packets; + * must be done after initializing the BMUs; + * drivers without ASF support should do this too, otherwise + * it may happen that they cannot run on ASF devices; + * remember that the MAC FIFO isn't reset during initialization. + */ + sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_MACSEC_FLUSH_OFF); + } + + if (hw->chip_id >= CHIP_ID_YUKON_SUPR) { + /* Enable RX Home Address & Routing Header checksum fix */ + sky2_write16(hw, SK_REG(sky2->port, RX_GMF_FL_CTRL), + RX_IPV6_SA_MOB_ENA | RX_IPV6_DA_MOB_ENA); + + /* Enable TX Home Address & Routing Header checksum fix */ + sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST), + TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN); + } +} + +static int sky2_alloc_buffers(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + + /* must be power of 2 */ + sky2->tx_le = pci_alloc_consistent(hw->pdev, + sky2->tx_ring_size * + sizeof(struct sky2_tx_le), + &sky2->tx_le_map); + if (!sky2->tx_le) + goto nomem; + + sky2->tx_ring = kcalloc(sky2->tx_ring_size, sizeof(struct tx_ring_info), + GFP_KERNEL); + if (!sky2->tx_ring) + goto nomem; + + sky2->rx_le = pci_zalloc_consistent(hw->pdev, RX_LE_BYTES, + &sky2->rx_le_map); + if (!sky2->rx_le) + goto nomem; + + sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info), + GFP_KERNEL); + if (!sky2->rx_ring) + goto nomem; + + return sky2_alloc_rx_skbs(sky2); +nomem: + return -ENOMEM; +} + +static void sky2_free_buffers(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + + sky2_rx_clean(sky2); + + if (sky2->rx_le) { + pci_free_consistent(hw->pdev, RX_LE_BYTES, + sky2->rx_le, sky2->rx_le_map); + sky2->rx_le = NULL; + } + if (sky2->tx_le) { + pci_free_consistent(hw->pdev, + sky2->tx_ring_size * sizeof(struct sky2_tx_le), + sky2->tx_le, sky2->tx_le_map); + sky2->tx_le = NULL; + } + kfree(sky2->tx_ring); + kfree(sky2->rx_ring); + + sky2->tx_ring = NULL; + sky2->rx_ring = NULL; +} + +static void sky2_hw_up(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + u32 ramsize; + int cap; + struct net_device *otherdev = hw->dev[sky2->port^1]; + + tx_init(sky2); + + /* + * On dual port PCI-X card, there is an problem where status + * can be received out of order due to split transactions + */ + if (otherdev && netif_running(otherdev) && + (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) { + u16 cmd; + + cmd = sky2_pci_read16(hw, cap + PCI_X_CMD); + cmd &= ~PCI_X_CMD_MAX_SPLIT; + sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); + } + + sky2_mac_init(hw, port); + + /* Register is number of 4K blocks on internal RAM buffer. */ + ramsize = sky2_read8(hw, B2_E_0) * 4; + if (ramsize > 0) { + u32 rxspace; + + netdev_dbg(sky2->netdev, "ram buffer %dK\n", ramsize); + if (ramsize < 16) + rxspace = ramsize / 2; + else + rxspace = 8 + (2*(ramsize - 16))/3; + + sky2_ramset(hw, rxqaddr[port], 0, rxspace); + sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace); + + /* Make sure SyncQ is disabled */ + sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL), + RB_RST_SET); + } + + sky2_qset(hw, txqaddr[port]); + + /* This is copied from sk98lin 10.0.5.3; no one tells me about erratta's */ + if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0) + sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF); + + /* Set almost empty threshold */ + if (hw->chip_id == CHIP_ID_YUKON_EC_U && + hw->chip_rev == CHIP_REV_YU_EC_U_A0) + sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV); + + sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, + sky2->tx_ring_size - 1); + + sky2_vlan_mode(sky2->netdev, sky2->netdev->features); + netdev_update_features(sky2->netdev); + + sky2_rx_start(sky2); +} + +/* Setup device IRQ and enable napi to process */ +static int sky2_setup_irq(struct sky2_hw *hw, const char *name) +{ + struct pci_dev *pdev = hw->pdev; + int err; + + err = request_irq(pdev->irq, sky2_intr, + (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED, + name, hw); + if (err) + dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); + else { + hw->flags |= SKY2_HW_IRQ_SETUP; + + napi_enable(&hw->napi); + sky2_write32(hw, B0_IMSK, Y2_IS_BASE); + sky2_read32(hw, B0_IMSK); + } + + return err; +} + + +/* Bring up network interface. */ +static int sky2_open(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + u32 imask; + int err; + + netif_carrier_off(dev); + + err = sky2_alloc_buffers(sky2); + if (err) + goto err_out; + + /* With single port, IRQ is setup when device is brought up */ + if (hw->ports == 1 && (err = sky2_setup_irq(hw, dev->name))) + goto err_out; + + sky2_hw_up(sky2); + + /* Enable interrupts from phy/mac for port */ + imask = sky2_read32(hw, B0_IMSK); + + if (hw->chip_id == CHIP_ID_YUKON_OPT || + hw->chip_id == CHIP_ID_YUKON_PRM || + hw->chip_id == CHIP_ID_YUKON_OP_2) + imask |= Y2_IS_PHY_QLNK; /* enable PHY Quick Link */ + + imask |= portirq_msk[port]; + sky2_write32(hw, B0_IMSK, imask); + sky2_read32(hw, B0_IMSK); + + netif_info(sky2, ifup, dev, "enabling interface\n"); + + return 0; + +err_out: + sky2_free_buffers(sky2); + return err; +} + +/* Modular subtraction in ring */ +static inline int tx_inuse(const struct sky2_port *sky2) +{ + return (sky2->tx_prod - sky2->tx_cons) & (sky2->tx_ring_size - 1); +} + +/* Number of list elements available for next tx */ +static inline int tx_avail(const struct sky2_port *sky2) +{ + return sky2->tx_pending - tx_inuse(sky2); +} + +/* Estimate of number of transmit list elements required */ +static unsigned tx_le_req(const struct sk_buff *skb) +{ + unsigned count; + + count = (skb_shinfo(skb)->nr_frags + 1) + * (sizeof(dma_addr_t) / sizeof(u32)); + + if (skb_is_gso(skb)) + ++count; + else if (sizeof(dma_addr_t) == sizeof(u32)) + ++count; /* possible vlan */ + + if (skb->ip_summed == CHECKSUM_PARTIAL) + ++count; + + return count; +} + +static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re) +{ + if (re->flags & TX_MAP_SINGLE) + pci_unmap_single(pdev, dma_unmap_addr(re, mapaddr), + dma_unmap_len(re, maplen), + PCI_DMA_TODEVICE); + else if (re->flags & TX_MAP_PAGE) + pci_unmap_page(pdev, dma_unmap_addr(re, mapaddr), + dma_unmap_len(re, maplen), + PCI_DMA_TODEVICE); + re->flags = 0; +} + +/* + * Put one packet in ring for transmit. + * A single packet can generate multiple list elements, and + * the number of ring elements will probably be less than the number + * of list elements used. + */ +static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, + struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + struct sky2_tx_le *le = NULL; + struct tx_ring_info *re; + unsigned i, len; + dma_addr_t mapping; + u32 upper; + u16 slot; + u16 mss; + u8 ctrl; + + if (unlikely(tx_avail(sky2) < tx_le_req(skb))) + return NETDEV_TX_BUSY; + + len = skb_headlen(skb); + mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); + + if (pci_dma_mapping_error(hw->pdev, mapping)) + goto mapping_error; + + slot = sky2->tx_prod; + netif_printk(sky2, tx_queued, KERN_DEBUG, dev, + "tx queued, slot %u, len %d\n", slot, skb->len); + + /* Send high bits if needed */ + upper = upper_32_bits(mapping); + if (upper != sky2->tx_last_upper) { + le = get_tx_le(sky2, &slot); + le->addr = cpu_to_le32(upper); + sky2->tx_last_upper = upper; + le->opcode = OP_ADDR64 | HW_OWNER; + } + + /* Check for TCP Segmentation Offload */ + mss = skb_shinfo(skb)->gso_size; + if (mss != 0) { + + if (!(hw->flags & SKY2_HW_NEW_LE)) + mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); + + if (mss != sky2->tx_last_mss) { + le = get_tx_le(sky2, &slot); + le->addr = cpu_to_le32(mss); + + if (hw->flags & SKY2_HW_NEW_LE) + le->opcode = OP_MSS | HW_OWNER; + else + le->opcode = OP_LRGLEN | HW_OWNER; + sky2->tx_last_mss = mss; + } + } + + ctrl = 0; + + /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ + if (skb_vlan_tag_present(skb)) { + if (!le) { + le = get_tx_le(sky2, &slot); + le->addr = 0; + le->opcode = OP_VLAN|HW_OWNER; + } else + le->opcode |= OP_VLAN; + le->length = cpu_to_be16(skb_vlan_tag_get(skb)); + ctrl |= INS_VLAN; + } + + /* Handle TCP checksum offload */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + /* On Yukon EX (some versions) encoding change. */ + if (hw->flags & SKY2_HW_AUTO_TX_SUM) + ctrl |= CALSUM; /* auto checksum */ + else { + const unsigned offset = skb_transport_offset(skb); + u32 tcpsum; + + tcpsum = offset << 16; /* sum start */ + tcpsum |= offset + skb->csum_offset; /* sum write */ + + ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; + if (ip_hdr(skb)->protocol == IPPROTO_UDP) + ctrl |= UDPTCP; + + if (tcpsum != sky2->tx_tcpsum) { + sky2->tx_tcpsum = tcpsum; + + le = get_tx_le(sky2, &slot); + le->addr = cpu_to_le32(tcpsum); + le->length = 0; /* initial checksum value */ + le->ctrl = 1; /* one packet */ + le->opcode = OP_TCPLISW | HW_OWNER; + } + } + } + + re = sky2->tx_ring + slot; + re->flags = TX_MAP_SINGLE; + dma_unmap_addr_set(re, mapaddr, mapping); + dma_unmap_len_set(re, maplen, len); + + le = get_tx_le(sky2, &slot); + le->addr = cpu_to_le32(lower_32_bits(mapping)); + le->length = cpu_to_le16(len); + le->ctrl = ctrl; + le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER); + + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + + if (dma_mapping_error(&hw->pdev->dev, mapping)) + goto mapping_unwind; + + upper = upper_32_bits(mapping); + if (upper != sky2->tx_last_upper) { + le = get_tx_le(sky2, &slot); + le->addr = cpu_to_le32(upper); + sky2->tx_last_upper = upper; + le->opcode = OP_ADDR64 | HW_OWNER; + } + + re = sky2->tx_ring + slot; + re->flags = TX_MAP_PAGE; + dma_unmap_addr_set(re, mapaddr, mapping); + dma_unmap_len_set(re, maplen, skb_frag_size(frag)); + + le = get_tx_le(sky2, &slot); + le->addr = cpu_to_le32(lower_32_bits(mapping)); + le->length = cpu_to_le16(skb_frag_size(frag)); + le->ctrl = ctrl; + le->opcode = OP_BUFFER | HW_OWNER; + } + + re->skb = skb; + le->ctrl |= EOP; + + sky2->tx_prod = slot; + + if (tx_avail(sky2) <= MAX_SKB_TX_LE) + netif_stop_queue(dev); + + netdev_sent_queue(dev, skb->len); + sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod); + + return NETDEV_TX_OK; + +mapping_unwind: + for (i = sky2->tx_prod; i != slot; i = RING_NEXT(i, sky2->tx_ring_size)) { + re = sky2->tx_ring + i; + + sky2_tx_unmap(hw->pdev, re); + } + +mapping_error: + if (net_ratelimit()) + dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/* + * Free ring elements from starting at tx_cons until "done" + * + * NB: + * 1. The hardware will tell us about partial completion of multi-part + * buffers so make sure not to free skb to early. + * 2. This may run in parallel start_xmit because the it only + * looks at the tail of the queue of FIFO (tx_cons), not + * the head (tx_prod) + */ +static void sky2_tx_complete(struct sky2_port *sky2, u16 done) +{ + struct net_device *dev = sky2->netdev; + u16 idx; + unsigned int bytes_compl = 0, pkts_compl = 0; + + BUG_ON(done >= sky2->tx_ring_size); + + for (idx = sky2->tx_cons; idx != done; + idx = RING_NEXT(idx, sky2->tx_ring_size)) { + struct tx_ring_info *re = sky2->tx_ring + idx; + struct sk_buff *skb = re->skb; + + sky2_tx_unmap(sky2->hw->pdev, re); + + if (skb) { + netif_printk(sky2, tx_done, KERN_DEBUG, dev, + "tx done %u\n", idx); + + pkts_compl++; + bytes_compl += skb->len; + + re->skb = NULL; + dev_kfree_skb_any(skb); + + sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size); + } + } + + sky2->tx_cons = idx; + smp_mb(); + + netdev_completed_queue(dev, pkts_compl, bytes_compl); + + u64_stats_update_begin(&sky2->tx_stats.syncp); + sky2->tx_stats.packets += pkts_compl; + sky2->tx_stats.bytes += bytes_compl; + u64_stats_update_end(&sky2->tx_stats.syncp); +} + +static void sky2_tx_reset(struct sky2_hw *hw, unsigned port) +{ + /* Disable Force Sync bit and Enable Alloc bit */ + sky2_write8(hw, SK_REG(port, TXA_CTRL), + TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); + + /* Stop Interval Timer and Limit Counter of Tx Arbiter */ + sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); + sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); + + /* Reset the PCI FIFO of the async Tx queue */ + sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), + BMU_RST_SET | BMU_FIFO_RST); + + /* Reset the Tx prefetch units */ + sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL), + PREF_UNIT_RST_SET); + + sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); + sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); + + sky2_read32(hw, B0_CTST); +} + +static void sky2_hw_down(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + u16 ctrl; + + /* Force flow control off */ + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + + /* Stop transmitter */ + sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP); + sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR)); + + sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), + RB_RST_SET | RB_DIS_OP_MD); + + ctrl = gma_read16(hw, port, GM_GP_CTRL); + ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA); + gma_write16(hw, port, GM_GP_CTRL, ctrl); + + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); + + /* Workaround shared GMAC reset */ + if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && + port == 0 && hw->dev[1] && netif_running(hw->dev[1]))) + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); + + sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); + + /* Force any delayed status interrupt and NAPI */ + sky2_write32(hw, STAT_LEV_TIMER_CNT, 0); + sky2_write32(hw, STAT_TX_TIMER_CNT, 0); + sky2_write32(hw, STAT_ISR_TIMER_CNT, 0); + sky2_read8(hw, STAT_ISR_TIMER_CTRL); + + sky2_rx_stop(sky2); + + spin_lock_bh(&sky2->phy_lock); + sky2_phy_power_down(hw, port); + spin_unlock_bh(&sky2->phy_lock); + + sky2_tx_reset(hw, port); + + /* Free any pending frames stuck in HW queue */ + sky2_tx_complete(sky2, sky2->tx_prod); +} + +/* Network shutdown */ +static int sky2_close(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + + /* Never really got started! */ + if (!sky2->tx_le) + return 0; + + netif_info(sky2, ifdown, dev, "disabling interface\n"); + + if (hw->ports == 1) { + sky2_write32(hw, B0_IMSK, 0); + sky2_read32(hw, B0_IMSK); + + napi_disable(&hw->napi); + free_irq(hw->pdev->irq, hw); + hw->flags &= ~SKY2_HW_IRQ_SETUP; + } else { + u32 imask; + + /* Disable port IRQ */ + imask = sky2_read32(hw, B0_IMSK); + imask &= ~portirq_msk[sky2->port]; + sky2_write32(hw, B0_IMSK, imask); + sky2_read32(hw, B0_IMSK); + + synchronize_irq(hw->pdev->irq); + napi_synchronize(&hw->napi); + } + + sky2_hw_down(sky2); + + sky2_free_buffers(sky2); + + return 0; +} + +static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux) +{ + if (hw->flags & SKY2_HW_FIBRE_PHY) + return SPEED_1000; + + if (!(hw->flags & SKY2_HW_GIGABIT)) { + if (aux & PHY_M_PS_SPEED_100) + return SPEED_100; + else + return SPEED_10; + } + + switch (aux & PHY_M_PS_SPEED_MSK) { + case PHY_M_PS_SPEED_1000: + return SPEED_1000; + case PHY_M_PS_SPEED_100: + return SPEED_100; + default: + return SPEED_10; + } +} + +static void sky2_link_up(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + static const char *fc_name[] = { + [FC_NONE] = "none", + [FC_TX] = "tx", + [FC_RX] = "rx", + [FC_BOTH] = "both", + }; + + sky2_set_ipg(sky2); + + sky2_enable_rx_tx(sky2); + + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); + + netif_carrier_on(sky2->netdev); + + mod_timer(&hw->watchdog_timer, jiffies + 1); + + /* Turn on link LED */ + sky2_write8(hw, SK_REG(port, LNK_LED_REG), + LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); + + netif_info(sky2, link, sky2->netdev, + "Link is up at %d Mbps, %s duplex, flow control %s\n", + sky2->speed, + sky2->duplex == DUPLEX_FULL ? "full" : "half", + fc_name[sky2->flow_status]); +} + +static void sky2_link_down(struct sky2_port *sky2) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + u16 reg; + + gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0); + + reg = gma_read16(hw, port, GM_GP_CTRL); + reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); + gma_write16(hw, port, GM_GP_CTRL, reg); + + netif_carrier_off(sky2->netdev); + + /* Turn off link LED */ + sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); + + netif_info(sky2, link, sky2->netdev, "Link is down\n"); + + sky2_phy_init(hw, port); +} + +static enum flow_control sky2_flow(int rx, int tx) +{ + if (rx) + return tx ? FC_BOTH : FC_RX; + else + return tx ? FC_TX : FC_NONE; +} + +static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + u16 advert, lpa; + + advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); + lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP); + if (lpa & PHY_M_AN_RF) { + netdev_err(sky2->netdev, "remote fault\n"); + return -1; + } + + if (!(aux & PHY_M_PS_SPDUP_RES)) { + netdev_err(sky2->netdev, "speed/duplex mismatch\n"); + return -1; + } + + sky2->speed = sky2_phy_speed(hw, aux); + sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; + + /* Since the pause result bits seem to in different positions on + * different chips. look at registers. + */ + if (hw->flags & SKY2_HW_FIBRE_PHY) { + /* Shift for bits in fiber PHY */ + advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM); + lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM); + + if (advert & ADVERTISE_1000XPAUSE) + advert |= ADVERTISE_PAUSE_CAP; + if (advert & ADVERTISE_1000XPSE_ASYM) + advert |= ADVERTISE_PAUSE_ASYM; + if (lpa & LPA_1000XPAUSE) + lpa |= LPA_PAUSE_CAP; + if (lpa & LPA_1000XPAUSE_ASYM) + lpa |= LPA_PAUSE_ASYM; + } + + sky2->flow_status = FC_NONE; + if (advert & ADVERTISE_PAUSE_CAP) { + if (lpa & LPA_PAUSE_CAP) + sky2->flow_status = FC_BOTH; + else if (advert & ADVERTISE_PAUSE_ASYM) + sky2->flow_status = FC_RX; + } else if (advert & ADVERTISE_PAUSE_ASYM) { + if ((lpa & LPA_PAUSE_CAP) && (lpa & LPA_PAUSE_ASYM)) + sky2->flow_status = FC_TX; + } + + if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000 && + !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX)) + sky2->flow_status = FC_NONE; + + if (sky2->flow_status & FC_TX) + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); + else + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + + return 0; +} + +/* Interrupt from PHY */ +static void sky2_phy_intr(struct sky2_hw *hw, unsigned port) +{ + struct net_device *dev = hw->dev[port]; + struct sky2_port *sky2 = netdev_priv(dev); + u16 istatus, phystat; + + if (!netif_running(dev)) + return; + + spin_lock(&sky2->phy_lock); + istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); + phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); + + netif_info(sky2, intr, sky2->netdev, "phy interrupt status 0x%x 0x%x\n", + istatus, phystat); + + if (istatus & PHY_M_IS_AN_COMPL) { + if (sky2_autoneg_done(sky2, phystat) == 0 && + !netif_carrier_ok(dev)) + sky2_link_up(sky2); + goto out; + } + + if (istatus & PHY_M_IS_LSP_CHANGE) + sky2->speed = sky2_phy_speed(hw, phystat); + + if (istatus & PHY_M_IS_DUP_CHANGE) + sky2->duplex = + (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; + + if (istatus & PHY_M_IS_LST_CHANGE) { + if (phystat & PHY_M_PS_LINK_UP) + sky2_link_up(sky2); + else + sky2_link_down(sky2); + } +out: + spin_unlock(&sky2->phy_lock); +} + +/* Special quick link interrupt (Yukon-2 Optima only) */ +static void sky2_qlink_intr(struct sky2_hw *hw) +{ + struct sky2_port *sky2 = netdev_priv(hw->dev[0]); + u32 imask; + u16 phy; + + /* disable irq */ + imask = sky2_read32(hw, B0_IMSK); + imask &= ~Y2_IS_PHY_QLNK; + sky2_write32(hw, B0_IMSK, imask); + + /* reset PHY Link Detect */ + phy = sky2_pci_read16(hw, PSM_CONFIG_REG4); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + sky2_link_up(sky2); +} + +/* Transmit timeout is only called if we are running, carrier is up + * and tx queue is full (stopped). + */ +static void sky2_tx_timeout(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + + netif_err(sky2, timer, dev, "tx timeout\n"); + + netdev_printk(KERN_DEBUG, dev, "transmit ring %u .. %u report=%u done=%u\n", + sky2->tx_cons, sky2->tx_prod, + sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX), + sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE))); + + /* can't restart safely under softirq */ + schedule_work(&hw->restart_work); +} + +static int sky2_change_mtu(struct net_device *dev, int new_mtu) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + int err; + u16 ctl, mode; + u32 imask; + + /* MTU size outside the spec */ + if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) + return -EINVAL; + + /* MTU > 1500 on yukon FE and FE+ not allowed */ + if (new_mtu > ETH_DATA_LEN && + (hw->chip_id == CHIP_ID_YUKON_FE || + hw->chip_id == CHIP_ID_YUKON_FE_P)) + return -EINVAL; + + if (!netif_running(dev)) { + dev->mtu = new_mtu; + netdev_update_features(dev); + return 0; + } + + imask = sky2_read32(hw, B0_IMSK); + sky2_write32(hw, B0_IMSK, 0); + sky2_read32(hw, B0_IMSK); + + dev->trans_start = jiffies; /* prevent tx timeout */ + napi_disable(&hw->napi); + netif_tx_disable(dev); + + synchronize_irq(hw->pdev->irq); + + if (!(hw->flags & SKY2_HW_RAM_BUFFER)) + sky2_set_tx_stfwd(hw, port); + + ctl = gma_read16(hw, port, GM_GP_CTRL); + gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); + sky2_rx_stop(sky2); + sky2_rx_clean(sky2); + + dev->mtu = new_mtu; + netdev_update_features(dev); + + mode = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA; + if (sky2->speed > SPEED_100) + mode |= IPG_DATA_VAL(IPG_DATA_DEF_1000); + else + mode |= IPG_DATA_VAL(IPG_DATA_DEF_10_100); + + if (dev->mtu > ETH_DATA_LEN) + mode |= GM_SMOD_JUMBO_ENA; + + gma_write16(hw, port, GM_SERIAL_MODE, mode); + + sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD); + + err = sky2_alloc_rx_skbs(sky2); + if (!err) + sky2_rx_start(sky2); + else + sky2_rx_clean(sky2); + sky2_write32(hw, B0_IMSK, imask); + + sky2_read32(hw, B0_Y2_SP_LISR); + napi_enable(&hw->napi); + + if (err) + dev_close(dev); + else { + gma_write16(hw, port, GM_GP_CTRL, ctl); + + netif_wake_queue(dev); + } + + return err; +} + +static inline bool needs_copy(const struct rx_ring_info *re, + unsigned length) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + /* Some architectures need the IP header to be aligned */ + if (!IS_ALIGNED(re->data_addr + ETH_HLEN, sizeof(u32))) + return true; +#endif + return length < copybreak; +} + +/* For small just reuse existing skb for next receive */ +static struct sk_buff *receive_copy(struct sky2_port *sky2, + const struct rx_ring_info *re, + unsigned length) +{ + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(sky2->netdev, length); + if (likely(skb)) { + pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr, + length, PCI_DMA_FROMDEVICE); + skb_copy_from_linear_data(re->skb, skb->data, length); + skb->ip_summed = re->skb->ip_summed; + skb->csum = re->skb->csum; + skb_copy_hash(skb, re->skb); + skb->vlan_proto = re->skb->vlan_proto; + skb->vlan_tci = re->skb->vlan_tci; + + pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, + length, PCI_DMA_FROMDEVICE); + re->skb->vlan_proto = 0; + re->skb->vlan_tci = 0; + skb_clear_hash(re->skb); + re->skb->ip_summed = CHECKSUM_NONE; + skb_put(skb, length); + } + return skb; +} + +/* Adjust length of skb with fragments to match received data */ +static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, + unsigned int length) +{ + int i, num_frags; + unsigned int size; + + /* put header into skb */ + size = min(length, hdr_space); + skb->tail += size; + skb->len += size; + length -= size; + + num_frags = skb_shinfo(skb)->nr_frags; + for (i = 0; i < num_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + if (length == 0) { + /* don't need this page */ + __skb_frag_unref(frag); + --skb_shinfo(skb)->nr_frags; + } else { + size = min(length, (unsigned) PAGE_SIZE); + + skb_frag_size_set(frag, size); + skb->data_len += size; + skb->truesize += PAGE_SIZE; + skb->len += size; + length -= size; + } + } +} + +/* Normal packet - take skb from ring element and put in a new one */ +static struct sk_buff *receive_new(struct sky2_port *sky2, + struct rx_ring_info *re, + unsigned int length) +{ + struct sk_buff *skb; + struct rx_ring_info nre; + unsigned hdr_space = sky2->rx_data_size; + + nre.skb = sky2_rx_alloc(sky2, GFP_ATOMIC); + if (unlikely(!nre.skb)) + goto nobuf; + + if (sky2_rx_map_skb(sky2->hw->pdev, &nre, hdr_space)) + goto nomap; + + skb = re->skb; + sky2_rx_unmap_skb(sky2->hw->pdev, re); + prefetch(skb->data); + *re = nre; + + if (skb_shinfo(skb)->nr_frags) + skb_put_frags(skb, hdr_space, length); + else + skb_put(skb, length); + return skb; + +nomap: + dev_kfree_skb(nre.skb); +nobuf: + return NULL; +} + +/* + * Receive one packet. + * For larger packets, get new buffer. + */ +static struct sk_buff *sky2_receive(struct net_device *dev, + u16 length, u32 status) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next; + struct sk_buff *skb = NULL; + u16 count = (status & GMR_FS_LEN) >> 16; + + netif_printk(sky2, rx_status, KERN_DEBUG, dev, + "rx slot %u status 0x%x len %d\n", + sky2->rx_next, status, length); + + sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; + prefetch(sky2->rx_ring + sky2->rx_next); + + if (skb_vlan_tag_present(re->skb)) + count -= VLAN_HLEN; /* Account for vlan tag */ + + /* This chip has hardware problems that generates bogus status. + * So do only marginal checking and expect higher level protocols + * to handle crap frames. + */ + if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && + sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 && + length != count) + goto okay; + + if (status & GMR_FS_ANY_ERR) + goto error; + + if (!(status & GMR_FS_RX_OK)) + goto resubmit; + + /* if length reported by DMA does not match PHY, packet was truncated */ + if (length != count) + goto error; + +okay: + if (needs_copy(re, length)) + skb = receive_copy(sky2, re, length); + else + skb = receive_new(sky2, re, length); + + dev->stats.rx_dropped += (skb == NULL); + +resubmit: + sky2_rx_submit(sky2, re); + + return skb; + +error: + ++dev->stats.rx_errors; + + if (net_ratelimit()) + netif_info(sky2, rx_err, dev, + "rx error, status 0x%x length %d\n", status, length); + + goto resubmit; +} + +/* Transmit complete */ +static inline void sky2_tx_done(struct net_device *dev, u16 last) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + if (netif_running(dev)) { + sky2_tx_complete(sky2, last); + + /* Wake unless it's detached, and called e.g. from sky2_close() */ + if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) + netif_wake_queue(dev); + } +} + +static inline void sky2_skb_rx(const struct sky2_port *sky2, + struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_NONE) + netif_receive_skb(skb); + else + napi_gro_receive(&sky2->hw->napi, skb); +} + +static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port, + unsigned packets, unsigned bytes) +{ + struct net_device *dev = hw->dev[port]; + struct sky2_port *sky2 = netdev_priv(dev); + + if (packets == 0) + return; + + u64_stats_update_begin(&sky2->rx_stats.syncp); + sky2->rx_stats.packets += packets; + sky2->rx_stats.bytes += bytes; + u64_stats_update_end(&sky2->rx_stats.syncp); + + dev->last_rx = jiffies; + sky2_rx_update(netdev_priv(dev), rxqaddr[port]); +} + +static void sky2_rx_checksum(struct sky2_port *sky2, u32 status) +{ + /* If this happens then driver assuming wrong format for chip type */ + BUG_ON(sky2->hw->flags & SKY2_HW_NEW_LE); + + /* Both checksum counters are programmed to start at + * the same offset, so unless there is a problem they + * should match. This failure is an early indication that + * hardware receive checksumming won't work. + */ + if (likely((u16)(status >> 16) == (u16)status)) { + struct sk_buff *skb = sky2->rx_ring[sky2->rx_next].skb; + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = le16_to_cpu(status); + } else { + dev_notice(&sky2->hw->pdev->dev, + "%s: receive checksum problem (status = %#x)\n", + sky2->netdev->name, status); + + /* Disable checksum offload + * It will be reenabled on next ndo_set_features, but if it's + * really broken, will get disabled again + */ + sky2->netdev->features &= ~NETIF_F_RXCSUM; + sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), + BMU_DIS_RX_CHKSUM); + } +} + +static void sky2_rx_tag(struct sky2_port *sky2, u16 length) +{ + struct sk_buff *skb; + + skb = sky2->rx_ring[sky2->rx_next].skb; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(length)); +} + +static void sky2_rx_hash(struct sky2_port *sky2, u32 status) +{ + struct sk_buff *skb; + + skb = sky2->rx_ring[sky2->rx_next].skb; + skb_set_hash(skb, le32_to_cpu(status), PKT_HASH_TYPE_L3); +} + +/* Process status response ring */ +static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) +{ + int work_done = 0; + unsigned int total_bytes[2] = { 0 }; + unsigned int total_packets[2] = { 0 }; + + if (to_do <= 0) + return work_done; + + rmb(); + do { + struct sky2_port *sky2; + struct sky2_status_le *le = hw->st_le + hw->st_idx; + unsigned port; + struct net_device *dev; + struct sk_buff *skb; + u32 status; + u16 length; + u8 opcode = le->opcode; + + if (!(opcode & HW_OWNER)) + break; + + hw->st_idx = RING_NEXT(hw->st_idx, hw->st_size); + + port = le->css & CSS_LINK_BIT; + dev = hw->dev[port]; + sky2 = netdev_priv(dev); + length = le16_to_cpu(le->length); + status = le32_to_cpu(le->status); + + le->opcode = 0; + switch (opcode & ~HW_OWNER) { + case OP_RXSTAT: + total_packets[port]++; + total_bytes[port] += length; + + skb = sky2_receive(dev, length, status); + if (!skb) + break; + + /* This chip reports checksum status differently */ + if (hw->flags & SKY2_HW_NEW_LE) { + if ((dev->features & NETIF_F_RXCSUM) && + (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) && + (le->css & CSS_TCPUDPCSOK)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + } + + skb->protocol = eth_type_trans(skb, dev); + sky2_skb_rx(sky2, skb); + + /* Stop after net poll weight */ + if (++work_done >= to_do) + goto exit_loop; + break; + + case OP_RXVLAN: + sky2_rx_tag(sky2, length); + break; + + case OP_RXCHKSVLAN: + sky2_rx_tag(sky2, length); + /* fall through */ + case OP_RXCHKS: + if (likely(dev->features & NETIF_F_RXCSUM)) + sky2_rx_checksum(sky2, status); + break; + + case OP_RSS_HASH: + sky2_rx_hash(sky2, status); + break; + + case OP_TXINDEXLE: + /* TX index reports status for both ports */ + sky2_tx_done(hw->dev[0], status & 0xfff); + if (hw->dev[1]) + sky2_tx_done(hw->dev[1], + ((status >> 24) & 0xff) + | (u16)(length & 0xf) << 8); + break; + + default: + if (net_ratelimit()) + pr_warn("unknown status opcode 0x%x\n", opcode); + } + } while (hw->st_idx != idx); + + /* Fully processed status ring so clear irq */ + sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); + +exit_loop: + sky2_rx_done(hw, 0, total_packets[0], total_bytes[0]); + sky2_rx_done(hw, 1, total_packets[1], total_bytes[1]); + + return work_done; +} + +static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status) +{ + struct net_device *dev = hw->dev[port]; + + if (net_ratelimit()) + netdev_info(dev, "hw error interrupt status 0x%x\n", status); + + if (status & Y2_IS_PAR_RD1) { + if (net_ratelimit()) + netdev_err(dev, "ram data read parity error\n"); + /* Clear IRQ */ + sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR); + } + + if (status & Y2_IS_PAR_WR1) { + if (net_ratelimit()) + netdev_err(dev, "ram data write parity error\n"); + + sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR); + } + + if (status & Y2_IS_PAR_MAC1) { + if (net_ratelimit()) + netdev_err(dev, "MAC parity error\n"); + sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE); + } + + if (status & Y2_IS_PAR_RX1) { + if (net_ratelimit()) + netdev_err(dev, "RX parity error\n"); + sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR); + } + + if (status & Y2_IS_TCP_TXA1) { + if (net_ratelimit()) + netdev_err(dev, "TCP segmentation error\n"); + sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP); + } +} + +static void sky2_hw_intr(struct sky2_hw *hw) +{ + struct pci_dev *pdev = hw->pdev; + u32 status = sky2_read32(hw, B0_HWE_ISRC); + u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK); + + status &= hwmsk; + + if (status & Y2_IS_TIST_OV) + sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); + + if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { + u16 pci_err; + + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + pci_err = sky2_pci_read16(hw, PCI_STATUS); + if (net_ratelimit()) + dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", + pci_err); + + sky2_pci_write16(hw, PCI_STATUS, + pci_err | PCI_STATUS_ERROR_BITS); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + } + + if (status & Y2_IS_PCI_EXP) { + /* PCI-Express uncorrectable Error occurred */ + u32 err; + + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); + sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, + 0xfffffffful); + if (net_ratelimit()) + dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); + + sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + } + + if (status & Y2_HWE_L1_MASK) + sky2_hw_error(hw, 0, status); + status >>= 8; + if (status & Y2_HWE_L1_MASK) + sky2_hw_error(hw, 1, status); +} + +static void sky2_mac_intr(struct sky2_hw *hw, unsigned port) +{ + struct net_device *dev = hw->dev[port]; + struct sky2_port *sky2 = netdev_priv(dev); + u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); + + netif_info(sky2, intr, dev, "mac interrupt status 0x%x\n", status); + + if (status & GM_IS_RX_CO_OV) + gma_read16(hw, port, GM_RX_IRQ_SRC); + + if (status & GM_IS_TX_CO_OV) + gma_read16(hw, port, GM_TX_IRQ_SRC); + + if (status & GM_IS_RX_FF_OR) { + ++dev->stats.rx_fifo_errors; + sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); + } + + if (status & GM_IS_TX_FF_UR) { + ++dev->stats.tx_fifo_errors; + sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); + } +} + +/* This should never happen it is a bug. */ +static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q) +{ + struct net_device *dev = hw->dev[port]; + u16 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX)); + + dev_err(&hw->pdev->dev, "%s: descriptor error q=%#x get=%u put=%u\n", + dev->name, (unsigned) q, (unsigned) idx, + (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX))); + + sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK); +} + +static int sky2_rx_hung(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + unsigned rxq = rxqaddr[port]; + u32 mac_rp = sky2_read32(hw, SK_REG(port, RX_GMF_RP)); + u8 mac_lev = sky2_read8(hw, SK_REG(port, RX_GMF_RLEV)); + u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP)); + u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL)); + + /* If idle and MAC or PCI is stuck */ + if (sky2->check.last == dev->last_rx && + ((mac_rp == sky2->check.mac_rp && + mac_lev != 0 && mac_lev >= sky2->check.mac_lev) || + /* Check if the PCI RX hang */ + (fifo_rp == sky2->check.fifo_rp && + fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) { + netdev_printk(KERN_DEBUG, dev, + "hung mac %d:%d fifo %d (%d:%d)\n", + mac_lev, mac_rp, fifo_lev, + fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP))); + return 1; + } else { + sky2->check.last = dev->last_rx; + sky2->check.mac_rp = mac_rp; + sky2->check.mac_lev = mac_lev; + sky2->check.fifo_rp = fifo_rp; + sky2->check.fifo_lev = fifo_lev; + return 0; + } +} + +static void sky2_watchdog(unsigned long arg) +{ + struct sky2_hw *hw = (struct sky2_hw *) arg; + + /* Check for lost IRQ once a second */ + if (sky2_read32(hw, B0_ISRC)) { + napi_schedule(&hw->napi); + } else { + int i, active = 0; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + if (!netif_running(dev)) + continue; + ++active; + + /* For chips with Rx FIFO, check if stuck */ + if ((hw->flags & SKY2_HW_RAM_BUFFER) && + sky2_rx_hung(dev)) { + netdev_info(dev, "receiver hang detected\n"); + schedule_work(&hw->restart_work); + return; + } + } + + if (active == 0) + return; + } + + mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ)); +} + +/* Hardware/software error handling */ +static void sky2_err_intr(struct sky2_hw *hw, u32 status) +{ + if (net_ratelimit()) + dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status); + + if (status & Y2_IS_HW_ERR) + sky2_hw_intr(hw); + + if (status & Y2_IS_IRQ_MAC1) + sky2_mac_intr(hw, 0); + + if (status & Y2_IS_IRQ_MAC2) + sky2_mac_intr(hw, 1); + + if (status & Y2_IS_CHK_RX1) + sky2_le_error(hw, 0, Q_R1); + + if (status & Y2_IS_CHK_RX2) + sky2_le_error(hw, 1, Q_R2); + + if (status & Y2_IS_CHK_TXA1) + sky2_le_error(hw, 0, Q_XA1); + + if (status & Y2_IS_CHK_TXA2) + sky2_le_error(hw, 1, Q_XA2); +} + +static int sky2_poll(struct napi_struct *napi, int work_limit) +{ + struct sky2_hw *hw = container_of(napi, struct sky2_hw, napi); + u32 status = sky2_read32(hw, B0_Y2_SP_EISR); + int work_done = 0; + u16 idx; + + if (unlikely(status & Y2_IS_ERROR)) + sky2_err_intr(hw, status); + + if (status & Y2_IS_IRQ_PHY1) + sky2_phy_intr(hw, 0); + + if (status & Y2_IS_IRQ_PHY2) + sky2_phy_intr(hw, 1); + + if (status & Y2_IS_PHY_QLNK) + sky2_qlink_intr(hw); + + while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) { + work_done += sky2_status_intr(hw, work_limit - work_done, idx); + + if (work_done >= work_limit) + goto done; + } + + napi_complete(napi); + sky2_read32(hw, B0_Y2_SP_LISR); +done: + + return work_done; +} + +static irqreturn_t sky2_intr(int irq, void *dev_id) +{ + struct sky2_hw *hw = dev_id; + u32 status; + + /* Reading this mask interrupts as side effect */ + status = sky2_read32(hw, B0_Y2_SP_ISRC2); + if (status == 0 || status == ~0) { + sky2_write32(hw, B0_Y2_SP_ICR, 2); + return IRQ_NONE; + } + + prefetch(&hw->st_le[hw->st_idx]); + + napi_schedule(&hw->napi); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void sky2_netpoll(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + napi_schedule(&sky2->hw->napi); +} +#endif + +/* Chip internal frequency for clock calculations */ +static u32 sky2_mhz(const struct sky2_hw *hw) +{ + switch (hw->chip_id) { + case CHIP_ID_YUKON_EC: + case CHIP_ID_YUKON_EC_U: + case CHIP_ID_YUKON_EX: + case CHIP_ID_YUKON_SUPR: + case CHIP_ID_YUKON_UL_2: + case CHIP_ID_YUKON_OPT: + case CHIP_ID_YUKON_PRM: + case CHIP_ID_YUKON_OP_2: + return 125; + + case CHIP_ID_YUKON_FE: + return 100; + + case CHIP_ID_YUKON_FE_P: + return 50; + + case CHIP_ID_YUKON_XL: + return 156; + + default: + BUG(); + } +} + +static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us) +{ + return sky2_mhz(hw) * us; +} + +static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk) +{ + return clk / sky2_mhz(hw); +} + + +static int sky2_init(struct sky2_hw *hw) +{ + u8 t8; + + /* Enable all clocks and check for bad PCI access */ + sky2_pci_write32(hw, PCI_DEV_REG3, 0); + + sky2_write8(hw, B0_CTST, CS_RST_CLR); + + hw->chip_id = sky2_read8(hw, B2_CHIP_ID); + hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; + + switch (hw->chip_id) { + case CHIP_ID_YUKON_XL: + hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY; + if (hw->chip_rev < CHIP_REV_YU_XL_A2) + hw->flags |= SKY2_HW_RSS_BROKEN; + break; + + case CHIP_ID_YUKON_EC_U: + hw->flags = SKY2_HW_GIGABIT + | SKY2_HW_NEWER_PHY + | SKY2_HW_ADV_POWER_CTL; + break; + + case CHIP_ID_YUKON_EX: + hw->flags = SKY2_HW_GIGABIT + | SKY2_HW_NEWER_PHY + | SKY2_HW_NEW_LE + | SKY2_HW_ADV_POWER_CTL + | SKY2_HW_RSS_CHKSUM; + + /* New transmit checksum */ + if (hw->chip_rev != CHIP_REV_YU_EX_B0) + hw->flags |= SKY2_HW_AUTO_TX_SUM; + break; + + case CHIP_ID_YUKON_EC: + /* This rev is really old, and requires untested workarounds */ + if (hw->chip_rev == CHIP_REV_YU_EC_A1) { + dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n"); + return -EOPNOTSUPP; + } + hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RSS_BROKEN; + break; + + case CHIP_ID_YUKON_FE: + hw->flags = SKY2_HW_RSS_BROKEN; + break; + + case CHIP_ID_YUKON_FE_P: + hw->flags = SKY2_HW_NEWER_PHY + | SKY2_HW_NEW_LE + | SKY2_HW_AUTO_TX_SUM + | SKY2_HW_ADV_POWER_CTL; + + /* The workaround for status conflicts VLAN tag detection. */ + if (hw->chip_rev == CHIP_REV_YU_FE2_A0) + hw->flags |= SKY2_HW_VLAN_BROKEN | SKY2_HW_RSS_CHKSUM; + break; + + case CHIP_ID_YUKON_SUPR: + hw->flags = SKY2_HW_GIGABIT + | SKY2_HW_NEWER_PHY + | SKY2_HW_NEW_LE + | SKY2_HW_AUTO_TX_SUM + | SKY2_HW_ADV_POWER_CTL; + + if (hw->chip_rev == CHIP_REV_YU_SU_A0) + hw->flags |= SKY2_HW_RSS_CHKSUM; + break; + + case CHIP_ID_YUKON_UL_2: + hw->flags = SKY2_HW_GIGABIT + | SKY2_HW_ADV_POWER_CTL; + break; + + case CHIP_ID_YUKON_OPT: + case CHIP_ID_YUKON_PRM: + case CHIP_ID_YUKON_OP_2: + hw->flags = SKY2_HW_GIGABIT + | SKY2_HW_NEW_LE + | SKY2_HW_ADV_POWER_CTL; + break; + + default: + dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", + hw->chip_id); + return -EOPNOTSUPP; + } + + hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); + if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P') + hw->flags |= SKY2_HW_FIBRE_PHY; + + hw->ports = 1; + t8 = sky2_read8(hw, B2_Y2_HW_RES); + if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) { + if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) + ++hw->ports; + } + + if (sky2_read8(hw, B2_E_0)) + hw->flags |= SKY2_HW_RAM_BUFFER; + + return 0; +} + +static void sky2_reset(struct sky2_hw *hw) +{ + struct pci_dev *pdev = hw->pdev; + u16 status; + int i; + u32 hwe_mask = Y2_HWE_ALL_MASK; + + /* disable ASF */ + if (hw->chip_id == CHIP_ID_YUKON_EX + || hw->chip_id == CHIP_ID_YUKON_SUPR) { + sky2_write32(hw, CPU_WDOG, 0); + status = sky2_read16(hw, HCU_CCSR); + status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE | + HCU_CCSR_UC_STATE_MSK); + /* + * CPU clock divider shouldn't be used because + * - ASF firmware may malfunction + * - Yukon-Supreme: Parallel FLASH doesn't support divided clocks + */ + status &= ~HCU_CCSR_CPU_CLK_DIVIDE_MSK; + sky2_write16(hw, HCU_CCSR, status); + sky2_write32(hw, CPU_WDOG, 0); + } else + sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); + sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE); + + /* do a SW reset */ + sky2_write8(hw, B0_CTST, CS_RST_SET); + sky2_write8(hw, B0_CTST, CS_RST_CLR); + + /* allow writes to PCI config */ + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + + /* clear PCI errors, if any */ + status = sky2_pci_read16(hw, PCI_STATUS); + status |= PCI_STATUS_ERROR_BITS; + sky2_pci_write16(hw, PCI_STATUS, status); + + sky2_write8(hw, B0_CTST, CS_MRST_CLR); + + if (pci_is_pcie(pdev)) { + sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, + 0xfffffffful); + + /* If error bit is stuck on ignore it */ + if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP) + dev_info(&pdev->dev, "ignoring stuck error report bit\n"); + else + hwe_mask |= Y2_IS_PCI_EXP; + } + + sky2_power_on(hw); + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + for (i = 0; i < hw->ports; i++) { + sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); + sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); + + if (hw->chip_id == CHIP_ID_YUKON_EX || + hw->chip_id == CHIP_ID_YUKON_SUPR) + sky2_write16(hw, SK_REG(i, GMAC_CTRL), + GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON + | GMC_BYP_RETR_ON); + + } + + if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev > CHIP_REV_YU_SU_B0) { + /* enable MACSec clock gating */ + sky2_pci_write32(hw, PCI_DEV_REG3, P_CLK_MACSEC_DIS); + } + + if (hw->chip_id == CHIP_ID_YUKON_OPT || + hw->chip_id == CHIP_ID_YUKON_PRM || + hw->chip_id == CHIP_ID_YUKON_OP_2) { + u16 reg; + + if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) { + /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */ + sky2_write32(hw, Y2_PEX_PHY_DATA, (0x80UL << 16) | (1 << 7)); + + /* set PHY Link Detect Timer to 1.1 second (11x 100ms) */ + reg = 10; + + /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ + sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); + } else { + /* set PHY Link Detect Timer to 0.4 second (4x 100ms) */ + reg = 3; + } + + reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE; + reg |= PSM_CONFIG_REG4_RST_PHY_LINK_DETECT; + + /* reset PHY Link Detect */ + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); + + /* check if PSMv2 was running before */ + reg = sky2_pci_read16(hw, PSM_CONFIG_REG3); + if (reg & PCI_EXP_LNKCTL_ASPMC) + /* restore the PCIe Link Control register */ + sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL, + reg); + + if (hw->chip_id == CHIP_ID_YUKON_PRM && + hw->chip_rev == CHIP_REV_YU_PRM_A0) { + /* change PHY Interrupt polarity to low active */ + reg = sky2_read16(hw, GPHY_CTRL); + sky2_write16(hw, GPHY_CTRL, reg | GPC_INTPOL); + + /* adapt HW for low active PHY Interrupt */ + reg = sky2_read16(hw, Y2_CFG_SPC + PCI_LDO_CTRL); + sky2_write16(hw, Y2_CFG_SPC + PCI_LDO_CTRL, reg | PHY_M_UNDOC1); + } + + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ + sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); + } + + /* Clear I2C IRQ noise */ + sky2_write32(hw, B2_I2C_IRQ, 1); + + /* turn off hardware timer (unused) */ + sky2_write8(hw, B2_TI_CTRL, TIM_STOP); + sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); + + /* Turn off descriptor polling */ + sky2_write32(hw, B28_DPT_CTRL, DPT_STOP); + + /* Turn off receive timestamp */ + sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP); + sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); + + /* enable the Tx Arbiters */ + for (i = 0; i < hw->ports; i++) + sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); + + /* Initialize ram interface */ + for (i = 0; i < hw->ports; i++) { + sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); + + sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53); + sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53); + } + + sky2_write32(hw, B0_HWE_IMSK, hwe_mask); + + for (i = 0; i < hw->ports; i++) + sky2_gmac_reset(hw, i); + + memset(hw->st_le, 0, hw->st_size * sizeof(struct sky2_status_le)); + hw->st_idx = 0; + + sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET); + sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR); + + sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma); + sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32); + + /* Set the list last index */ + sky2_write16(hw, STAT_LAST_IDX, hw->st_size - 1); + + sky2_write16(hw, STAT_TX_IDX_TH, 10); + sky2_write8(hw, STAT_FIFO_WM, 16); + + /* set Status-FIFO ISR watermark */ + if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0) + sky2_write8(hw, STAT_FIFO_ISR_WM, 4); + else + sky2_write8(hw, STAT_FIFO_ISR_WM, 16); + + sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000)); + sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20)); + sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100)); + + /* enable status unit */ + sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON); + + sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); + sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); + sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); +} + +/* Take device down (offline). + * Equivalent to doing dev_stop() but this does not + * inform upper layers of the transition. + */ +static void sky2_detach(struct net_device *dev) +{ + if (netif_running(dev)) { + netif_tx_lock(dev); + netif_device_detach(dev); /* stop txq */ + netif_tx_unlock(dev); + sky2_close(dev); + } +} + +/* Bring device back after doing sky2_detach */ +static int sky2_reattach(struct net_device *dev) +{ + int err = 0; + + if (netif_running(dev)) { + err = sky2_open(dev); + if (err) { + netdev_info(dev, "could not restart %d\n", err); + dev_close(dev); + } else { + netif_device_attach(dev); + sky2_set_multicast(dev); + } + } + + return err; +} + +static void sky2_all_down(struct sky2_hw *hw) +{ + int i; + + if (hw->flags & SKY2_HW_IRQ_SETUP) { + sky2_write32(hw, B0_IMSK, 0); + sky2_read32(hw, B0_IMSK); + + synchronize_irq(hw->pdev->irq); + napi_disable(&hw->napi); + } + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct sky2_port *sky2 = netdev_priv(dev); + + if (!netif_running(dev)) + continue; + + netif_carrier_off(dev); + netif_tx_disable(dev); + sky2_hw_down(sky2); + } +} + +static void sky2_all_up(struct sky2_hw *hw) +{ + u32 imask = Y2_IS_BASE; + int i; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct sky2_port *sky2 = netdev_priv(dev); + + if (!netif_running(dev)) + continue; + + sky2_hw_up(sky2); + sky2_set_multicast(dev); + imask |= portirq_msk[i]; + netif_wake_queue(dev); + } + + if (hw->flags & SKY2_HW_IRQ_SETUP) { + sky2_write32(hw, B0_IMSK, imask); + sky2_read32(hw, B0_IMSK); + sky2_read32(hw, B0_Y2_SP_LISR); + napi_enable(&hw->napi); + } +} + +static void sky2_restart(struct work_struct *work) +{ + struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work); + + rtnl_lock(); + + sky2_all_down(hw); + sky2_reset(hw); + sky2_all_up(hw); + + rtnl_unlock(); +} + +static inline u8 sky2_wol_supported(const struct sky2_hw *hw) +{ + return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0; +} + +static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + const struct sky2_port *sky2 = netdev_priv(dev); + + wol->supported = sky2_wol_supported(sky2->hw); + wol->wolopts = sky2->wol; +} + +static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + bool enable_wakeup = false; + int i; + + if ((wol->wolopts & ~sky2_wol_supported(sky2->hw)) || + !device_can_wakeup(&hw->pdev->dev)) + return -EOPNOTSUPP; + + sky2->wol = wol->wolopts; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct sky2_port *sky2 = netdev_priv(dev); + + if (sky2->wol) + enable_wakeup = true; + } + device_set_wakeup_enable(&hw->pdev->dev, enable_wakeup); + + return 0; +} + +static u32 sky2_supported_modes(const struct sky2_hw *hw) +{ + if (sky2_is_copper(hw)) { + u32 modes = SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full + | SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full; + + if (hw->flags & SKY2_HW_GIGABIT) + modes |= SUPPORTED_1000baseT_Half + | SUPPORTED_1000baseT_Full; + return modes; + } else + return SUPPORTED_1000baseT_Half + | SUPPORTED_1000baseT_Full; +} + +static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + + ecmd->transceiver = XCVR_INTERNAL; + ecmd->supported = sky2_supported_modes(hw); + ecmd->phy_address = PHY_ADDR_MARV; + if (sky2_is_copper(hw)) { + ecmd->port = PORT_TP; + ethtool_cmd_speed_set(ecmd, sky2->speed); + ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_1000); + ecmd->port = PORT_FIBRE; + ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE; + } + + ecmd->advertising = sky2->advertising; + ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED) + ? AUTONEG_ENABLE : AUTONEG_DISABLE; + ecmd->duplex = sky2->duplex; + return 0; +} + +static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct sky2_port *sky2 = netdev_priv(dev); + const struct sky2_hw *hw = sky2->hw; + u32 supported = sky2_supported_modes(hw); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + if (ecmd->advertising & ~supported) + return -EINVAL; + + if (sky2_is_copper(hw)) + sky2->advertising = ecmd->advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + else + sky2->advertising = ecmd->advertising | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + + sky2->flags |= SKY2_FLAG_AUTO_SPEED; + sky2->duplex = -1; + sky2->speed = -1; + } else { + u32 setting; + u32 speed = ethtool_cmd_speed(ecmd); + + switch (speed) { + case SPEED_1000: + if (ecmd->duplex == DUPLEX_FULL) + setting = SUPPORTED_1000baseT_Full; + else if (ecmd->duplex == DUPLEX_HALF) + setting = SUPPORTED_1000baseT_Half; + else + return -EINVAL; + break; + case SPEED_100: + if (ecmd->duplex == DUPLEX_FULL) + setting = SUPPORTED_100baseT_Full; + else if (ecmd->duplex == DUPLEX_HALF) + setting = SUPPORTED_100baseT_Half; + else + return -EINVAL; + break; + + case SPEED_10: + if (ecmd->duplex == DUPLEX_FULL) + setting = SUPPORTED_10baseT_Full; + else if (ecmd->duplex == DUPLEX_HALF) + setting = SUPPORTED_10baseT_Half; + else + return -EINVAL; + break; + default: + return -EINVAL; + } + + if ((setting & supported) == 0) + return -EINVAL; + + sky2->speed = speed; + sky2->duplex = ecmd->duplex; + sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; + } + + if (netif_running(dev)) { + sky2_phy_reinit(sky2); + sky2_set_multicast(dev); + } + + return 0; +} + +static void sky2_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(sky2->hw->pdev), + sizeof(info->bus_info)); +} + +static const struct sky2_stat { + char name[ETH_GSTRING_LEN]; + u16 offset; +} sky2_stats[] = { + { "tx_bytes", GM_TXO_OK_HI }, + { "rx_bytes", GM_RXO_OK_HI }, + { "tx_broadcast", GM_TXF_BC_OK }, + { "rx_broadcast", GM_RXF_BC_OK }, + { "tx_multicast", GM_TXF_MC_OK }, + { "rx_multicast", GM_RXF_MC_OK }, + { "tx_unicast", GM_TXF_UC_OK }, + { "rx_unicast", GM_RXF_UC_OK }, + { "tx_mac_pause", GM_TXF_MPAUSE }, + { "rx_mac_pause", GM_RXF_MPAUSE }, + { "collisions", GM_TXF_COL }, + { "late_collision",GM_TXF_LAT_COL }, + { "aborted", GM_TXF_ABO_COL }, + { "single_collisions", GM_TXF_SNG_COL }, + { "multi_collisions", GM_TXF_MUL_COL }, + + { "rx_short", GM_RXF_SHT }, + { "rx_runt", GM_RXE_FRAG }, + { "rx_64_byte_packets", GM_RXF_64B }, + { "rx_65_to_127_byte_packets", GM_RXF_127B }, + { "rx_128_to_255_byte_packets", GM_RXF_255B }, + { "rx_256_to_511_byte_packets", GM_RXF_511B }, + { "rx_512_to_1023_byte_packets", GM_RXF_1023B }, + { "rx_1024_to_1518_byte_packets", GM_RXF_1518B }, + { "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ }, + { "rx_too_long", GM_RXF_LNG_ERR }, + { "rx_fifo_overflow", GM_RXE_FIFO_OV }, + { "rx_jabber", GM_RXF_JAB_PKT }, + { "rx_fcs_error", GM_RXF_FCS_ERR }, + + { "tx_64_byte_packets", GM_TXF_64B }, + { "tx_65_to_127_byte_packets", GM_TXF_127B }, + { "tx_128_to_255_byte_packets", GM_TXF_255B }, + { "tx_256_to_511_byte_packets", GM_TXF_511B }, + { "tx_512_to_1023_byte_packets", GM_TXF_1023B }, + { "tx_1024_to_1518_byte_packets", GM_TXF_1518B }, + { "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ }, + { "tx_fifo_underrun", GM_TXE_FIFO_UR }, +}; + +static u32 sky2_get_msglevel(struct net_device *netdev) +{ + struct sky2_port *sky2 = netdev_priv(netdev); + return sky2->msg_enable; +} + +static int sky2_nway_reset(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + if (!netif_running(dev) || !(sky2->flags & SKY2_FLAG_AUTO_SPEED)) + return -EINVAL; + + sky2_phy_reinit(sky2); + sky2_set_multicast(dev); + + return 0; +} + +static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + int i; + + data[0] = get_stats64(hw, port, GM_TXO_OK_LO); + data[1] = get_stats64(hw, port, GM_RXO_OK_LO); + + for (i = 2; i < count; i++) + data[i] = get_stats32(hw, port, sky2_stats[i].offset); +} + +static void sky2_set_msglevel(struct net_device *netdev, u32 value) +{ + struct sky2_port *sky2 = netdev_priv(netdev); + sky2->msg_enable = value; +} + +static int sky2_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(sky2_stats); + default: + return -EOPNOTSUPP; + } +} + +static void sky2_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 * data) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats)); +} + +static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(sky2_stats); i++) + memcpy(data + i * ETH_GSTRING_LEN, + sky2_stats[i].name, ETH_GSTRING_LEN); + break; + } +} + +static int sky2_set_mac_address(struct net_device *dev, void *p) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + const struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + memcpy_toio(hw->regs + B2_MAC_1 + port * 8, + dev->dev_addr, ETH_ALEN); + memcpy_toio(hw->regs + B2_MAC_2 + port * 8, + dev->dev_addr, ETH_ALEN); + + /* virtual address for data */ + gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); + + /* physical address: used for pause frames */ + gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); + + return 0; +} + +static inline void sky2_add_filter(u8 filter[8], const u8 *addr) +{ + u32 bit; + + bit = ether_crc(ETH_ALEN, addr) & 63; + filter[bit >> 3] |= 1 << (bit & 7); +} + +static void sky2_set_multicast(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + struct netdev_hw_addr *ha; + u16 reg; + u8 filter[8]; + int rx_pause; + static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 }; + + rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH); + memset(filter, 0, sizeof(filter)); + + reg = gma_read16(hw, port, GM_RX_CTRL); + reg |= GM_RXCR_UCF_ENA; + + if (dev->flags & IFF_PROMISC) /* promiscuous */ + reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); + else if (dev->flags & IFF_ALLMULTI) + memset(filter, 0xff, sizeof(filter)); + else if (netdev_mc_empty(dev) && !rx_pause) + reg &= ~GM_RXCR_MCF_ENA; + else { + reg |= GM_RXCR_MCF_ENA; + + if (rx_pause) + sky2_add_filter(filter, pause_mc_addr); + + netdev_for_each_mc_addr(ha, dev) + sky2_add_filter(filter, ha->addr); + } + + gma_write16(hw, port, GM_MC_ADDR_H1, + (u16) filter[0] | ((u16) filter[1] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H2, + (u16) filter[2] | ((u16) filter[3] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H3, + (u16) filter[4] | ((u16) filter[5] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H4, + (u16) filter[6] | ((u16) filter[7] << 8)); + + gma_write16(hw, port, GM_RX_CTRL, reg); +} + +static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + unsigned int start; + u64 _bytes, _packets; + + do { + start = u64_stats_fetch_begin_irq(&sky2->rx_stats.syncp); + _bytes = sky2->rx_stats.bytes; + _packets = sky2->rx_stats.packets; + } while (u64_stats_fetch_retry_irq(&sky2->rx_stats.syncp, start)); + + stats->rx_packets = _packets; + stats->rx_bytes = _bytes; + + do { + start = u64_stats_fetch_begin_irq(&sky2->tx_stats.syncp); + _bytes = sky2->tx_stats.bytes; + _packets = sky2->tx_stats.packets; + } while (u64_stats_fetch_retry_irq(&sky2->tx_stats.syncp, start)); + + stats->tx_packets = _packets; + stats->tx_bytes = _bytes; + + stats->multicast = get_stats32(hw, port, GM_RXF_MC_OK) + + get_stats32(hw, port, GM_RXF_BC_OK); + + stats->collisions = get_stats32(hw, port, GM_TXF_COL); + + stats->rx_length_errors = get_stats32(hw, port, GM_RXF_LNG_ERR); + stats->rx_crc_errors = get_stats32(hw, port, GM_RXF_FCS_ERR); + stats->rx_frame_errors = get_stats32(hw, port, GM_RXF_SHT) + + get_stats32(hw, port, GM_RXE_FRAG); + stats->rx_over_errors = get_stats32(hw, port, GM_RXE_FIFO_OV); + + stats->rx_dropped = dev->stats.rx_dropped; + stats->rx_fifo_errors = dev->stats.rx_fifo_errors; + stats->tx_fifo_errors = dev->stats.tx_fifo_errors; + + return stats; +} + +/* Can have one global because blinking is controlled by + * ethtool and that is always under RTNL mutex + */ +static void sky2_led(struct sky2_port *sky2, enum led_mode mode) +{ + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + + spin_lock_bh(&sky2->phy_lock); + if (hw->chip_id == CHIP_ID_YUKON_EC_U || + hw->chip_id == CHIP_ID_YUKON_EX || + hw->chip_id == CHIP_ID_YUKON_SUPR) { + u16 pg; + pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); + + switch (mode) { + case MO_LED_OFF: + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, + PHY_M_LEDC_LOS_CTRL(8) | + PHY_M_LEDC_INIT_CTRL(8) | + PHY_M_LEDC_STA1_CTRL(8) | + PHY_M_LEDC_STA0_CTRL(8)); + break; + case MO_LED_ON: + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, + PHY_M_LEDC_LOS_CTRL(9) | + PHY_M_LEDC_INIT_CTRL(9) | + PHY_M_LEDC_STA1_CTRL(9) | + PHY_M_LEDC_STA0_CTRL(9)); + break; + case MO_LED_BLINK: + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, + PHY_M_LEDC_LOS_CTRL(0xa) | + PHY_M_LEDC_INIT_CTRL(0xa) | + PHY_M_LEDC_STA1_CTRL(0xa) | + PHY_M_LEDC_STA0_CTRL(0xa)); + break; + case MO_LED_NORM: + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, + PHY_M_LEDC_LOS_CTRL(1) | + PHY_M_LEDC_INIT_CTRL(8) | + PHY_M_LEDC_STA1_CTRL(7) | + PHY_M_LEDC_STA0_CTRL(7)); + } + + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); + } else + gm_phy_write(hw, port, PHY_MARV_LED_OVER, + PHY_M_LED_MO_DUP(mode) | + PHY_M_LED_MO_10(mode) | + PHY_M_LED_MO_100(mode) | + PHY_M_LED_MO_1000(mode) | + PHY_M_LED_MO_RX(mode) | + PHY_M_LED_MO_TX(mode)); + + spin_unlock_bh(&sky2->phy_lock); +} + +/* blink LED's for finding board */ +static int sky2_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 1; /* cycle on/off once per second */ + case ETHTOOL_ID_INACTIVE: + sky2_led(sky2, MO_LED_NORM); + break; + case ETHTOOL_ID_ON: + sky2_led(sky2, MO_LED_ON); + break; + case ETHTOOL_ID_OFF: + sky2_led(sky2, MO_LED_OFF); + break; + } + + return 0; +} + +static void sky2_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + switch (sky2->flow_mode) { + case FC_NONE: + ecmd->tx_pause = ecmd->rx_pause = 0; + break; + case FC_TX: + ecmd->tx_pause = 1, ecmd->rx_pause = 0; + break; + case FC_RX: + ecmd->tx_pause = 0, ecmd->rx_pause = 1; + break; + case FC_BOTH: + ecmd->tx_pause = ecmd->rx_pause = 1; + } + + ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_PAUSE) + ? AUTONEG_ENABLE : AUTONEG_DISABLE; +} + +static int sky2_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + if (ecmd->autoneg == AUTONEG_ENABLE) + sky2->flags |= SKY2_FLAG_AUTO_PAUSE; + else + sky2->flags &= ~SKY2_FLAG_AUTO_PAUSE; + + sky2->flow_mode = sky2_flow(ecmd->rx_pause, ecmd->tx_pause); + + if (netif_running(dev)) + sky2_phy_reinit(sky2); + + return 0; +} + +static int sky2_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ecmd) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + + if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_STOP) + ecmd->tx_coalesce_usecs = 0; + else { + u32 clks = sky2_read32(hw, STAT_TX_TIMER_INI); + ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks); + } + ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH); + + if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_STOP) + ecmd->rx_coalesce_usecs = 0; + else { + u32 clks = sky2_read32(hw, STAT_LEV_TIMER_INI); + ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks); + } + ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM); + + if (sky2_read8(hw, STAT_ISR_TIMER_CTRL) == TIM_STOP) + ecmd->rx_coalesce_usecs_irq = 0; + else { + u32 clks = sky2_read32(hw, STAT_ISR_TIMER_INI); + ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks); + } + + ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM); + + return 0; +} + +/* Note: this affect both ports */ +static int sky2_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ecmd) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + const u32 tmax = sky2_clk2us(hw, 0x0ffffff); + + if (ecmd->tx_coalesce_usecs > tmax || + ecmd->rx_coalesce_usecs > tmax || + ecmd->rx_coalesce_usecs_irq > tmax) + return -EINVAL; + + if (ecmd->tx_max_coalesced_frames >= sky2->tx_ring_size-1) + return -EINVAL; + if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING) + return -EINVAL; + if (ecmd->rx_max_coalesced_frames_irq > RX_MAX_PENDING) + return -EINVAL; + + if (ecmd->tx_coalesce_usecs == 0) + sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); + else { + sky2_write32(hw, STAT_TX_TIMER_INI, + sky2_us2clk(hw, ecmd->tx_coalesce_usecs)); + sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); + } + sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames); + + if (ecmd->rx_coalesce_usecs == 0) + sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP); + else { + sky2_write32(hw, STAT_LEV_TIMER_INI, + sky2_us2clk(hw, ecmd->rx_coalesce_usecs)); + sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); + } + sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames); + + if (ecmd->rx_coalesce_usecs_irq == 0) + sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_STOP); + else { + sky2_write32(hw, STAT_ISR_TIMER_INI, + sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq)); + sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); + } + sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq); + return 0; +} + +/* + * Hardware is limited to min of 128 and max of 2048 for ring size + * and rounded up to next power of two + * to avoid division in modulus calclation + */ +static unsigned long roundup_ring_size(unsigned long pending) +{ + return max(128ul, roundup_pow_of_two(pending+1)); +} + +static void sky2_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + ering->rx_max_pending = RX_MAX_PENDING; + ering->tx_max_pending = TX_MAX_PENDING; + + ering->rx_pending = sky2->rx_pending; + ering->tx_pending = sky2->tx_pending; +} + +static int sky2_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct sky2_port *sky2 = netdev_priv(dev); + + if (ering->rx_pending > RX_MAX_PENDING || + ering->rx_pending < 8 || + ering->tx_pending < TX_MIN_PENDING || + ering->tx_pending > TX_MAX_PENDING) + return -EINVAL; + + sky2_detach(dev); + + sky2->rx_pending = ering->rx_pending; + sky2->tx_pending = ering->tx_pending; + sky2->tx_ring_size = roundup_ring_size(sky2->tx_pending); + + return sky2_reattach(dev); +} + +static int sky2_get_regs_len(struct net_device *dev) +{ + return 0x4000; +} + +static int sky2_reg_access_ok(struct sky2_hw *hw, unsigned int b) +{ + /* This complicated switch statement is to make sure and + * only access regions that are unreserved. + * Some blocks are only valid on dual port cards. + */ + switch (b) { + /* second port */ + case 5: /* Tx Arbiter 2 */ + case 9: /* RX2 */ + case 14 ... 15: /* TX2 */ + case 17: case 19: /* Ram Buffer 2 */ + case 22 ... 23: /* Tx Ram Buffer 2 */ + case 25: /* Rx MAC Fifo 1 */ + case 27: /* Tx MAC Fifo 2 */ + case 31: /* GPHY 2 */ + case 40 ... 47: /* Pattern Ram 2 */ + case 52: case 54: /* TCP Segmentation 2 */ + case 112 ... 116: /* GMAC 2 */ + return hw->ports > 1; + + case 0: /* Control */ + case 2: /* Mac address */ + case 4: /* Tx Arbiter 1 */ + case 7: /* PCI express reg */ + case 8: /* RX1 */ + case 12 ... 13: /* TX1 */ + case 16: case 18:/* Rx Ram Buffer 1 */ + case 20 ... 21: /* Tx Ram Buffer 1 */ + case 24: /* Rx MAC Fifo 1 */ + case 26: /* Tx MAC Fifo 1 */ + case 28 ... 29: /* Descriptor and status unit */ + case 30: /* GPHY 1*/ + case 32 ... 39: /* Pattern Ram 1 */ + case 48: case 50: /* TCP Segmentation 1 */ + case 56 ... 60: /* PCI space */ + case 80 ... 84: /* GMAC 1 */ + return 1; + + default: + return 0; + } +} + +/* + * Returns copy of control register region + * Note: ethtool_get_regs always provides full size (16k) buffer + */ +static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + const struct sky2_port *sky2 = netdev_priv(dev); + const void __iomem *io = sky2->hw->regs; + unsigned int b; + + regs->version = 1; + + for (b = 0; b < 128; b++) { + /* skip poisonous diagnostic ram region in block 3 */ + if (b == 3) + memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10); + else if (sky2_reg_access_ok(sky2->hw, b)) + memcpy_fromio(p, io, 128); + else + memset(p, 0, 128); + + p += 128; + io += 128; + } +} + +static int sky2_get_eeprom_len(struct net_device *dev) +{ + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + u16 reg2; + + reg2 = sky2_pci_read16(hw, PCI_DEV_REG2); + return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); +} + +static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy) +{ + unsigned long start = jiffies; + + while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) { + /* Can take up to 10.6 ms for write */ + if (time_after(jiffies, start + HZ/4)) { + dev_err(&hw->pdev->dev, "VPD cycle timed out\n"); + return -ETIMEDOUT; + } + mdelay(1); + } + + return 0; +} + +static int sky2_vpd_read(struct sky2_hw *hw, int cap, void *data, + u16 offset, size_t length) +{ + int rc = 0; + + while (length > 0) { + u32 val; + + sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset); + rc = sky2_vpd_wait(hw, cap, 0); + if (rc) + break; + + val = sky2_pci_read32(hw, cap + PCI_VPD_DATA); + + memcpy(data, &val, min(sizeof(val), length)); + offset += sizeof(u32); + data += sizeof(u32); + length -= sizeof(u32); + } + + return rc; +} + +static int sky2_vpd_write(struct sky2_hw *hw, int cap, const void *data, + u16 offset, unsigned int length) +{ + unsigned int i; + int rc = 0; + + for (i = 0; i < length; i += sizeof(u32)) { + u32 val = *(u32 *)(data + i); + + sky2_pci_write32(hw, cap + PCI_VPD_DATA, val); + sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F); + + rc = sky2_vpd_wait(hw, cap, PCI_VPD_ADDR_F); + if (rc) + break; + } + return rc; +} + +static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct sky2_port *sky2 = netdev_priv(dev); + int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD); + + if (!cap) + return -EINVAL; + + eeprom->magic = SKY2_EEPROM_MAGIC; + + return sky2_vpd_read(sky2->hw, cap, data, eeprom->offset, eeprom->len); +} + +static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct sky2_port *sky2 = netdev_priv(dev); + int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD); + + if (!cap) + return -EINVAL; + + if (eeprom->magic != SKY2_EEPROM_MAGIC) + return -EINVAL; + + /* Partial writes not supported */ + if ((eeprom->offset & 3) || (eeprom->len & 3)) + return -EINVAL; + + return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len); +} + +static netdev_features_t sky2_fix_features(struct net_device *dev, + netdev_features_t features) +{ + const struct sky2_port *sky2 = netdev_priv(dev); + const struct sky2_hw *hw = sky2->hw; + + /* In order to do Jumbo packets on these chips, need to turn off the + * transmit store/forward. Therefore checksum offload won't work. + */ + if (dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U) { + netdev_info(dev, "checksum offload not possible with jumbo frames\n"); + features &= ~(NETIF_F_TSO|NETIF_F_SG|NETIF_F_ALL_CSUM); + } + + /* Some hardware requires receive checksum for RSS to work. */ + if ( (features & NETIF_F_RXHASH) && + !(features & NETIF_F_RXCSUM) && + (sky2->hw->flags & SKY2_HW_RSS_CHKSUM)) { + netdev_info(dev, "receive hashing forces receive checksum\n"); + features |= NETIF_F_RXCSUM; + } + + return features; +} + +static int sky2_set_features(struct net_device *dev, netdev_features_t features) +{ + struct sky2_port *sky2 = netdev_priv(dev); + netdev_features_t changed = dev->features ^ features; + + if ((changed & NETIF_F_RXCSUM) && + !(sky2->hw->flags & SKY2_HW_NEW_LE)) { + sky2_write32(sky2->hw, + Q_ADDR(rxqaddr[sky2->port], Q_CSR), + (features & NETIF_F_RXCSUM) + ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); + } + + if (changed & NETIF_F_RXHASH) + rx_set_rss(dev, features); + + if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX)) + sky2_vlan_mode(dev, features); + + return 0; +} + +static const struct ethtool_ops sky2_ethtool_ops = { + .get_settings = sky2_get_settings, + .set_settings = sky2_set_settings, + .get_drvinfo = sky2_get_drvinfo, + .get_wol = sky2_get_wol, + .set_wol = sky2_set_wol, + .get_msglevel = sky2_get_msglevel, + .set_msglevel = sky2_set_msglevel, + .nway_reset = sky2_nway_reset, + .get_regs_len = sky2_get_regs_len, + .get_regs = sky2_get_regs, + .get_link = ethtool_op_get_link, + .get_eeprom_len = sky2_get_eeprom_len, + .get_eeprom = sky2_get_eeprom, + .set_eeprom = sky2_set_eeprom, + .get_strings = sky2_get_strings, + .get_coalesce = sky2_get_coalesce, + .set_coalesce = sky2_set_coalesce, + .get_ringparam = sky2_get_ringparam, + .set_ringparam = sky2_set_ringparam, + .get_pauseparam = sky2_get_pauseparam, + .set_pauseparam = sky2_set_pauseparam, + .set_phys_id = sky2_set_phys_id, + .get_sset_count = sky2_get_sset_count, + .get_ethtool_stats = sky2_get_ethtool_stats, +}; + +#ifdef CONFIG_SKY2_DEBUG + +static struct dentry *sky2_debug; + + +/* + * Read and parse the first part of Vital Product Data + */ +#define VPD_SIZE 128 +#define VPD_MAGIC 0x82 + +static const struct vpd_tag { + char tag[2]; + char *label; +} vpd_tags[] = { + { "PN", "Part Number" }, + { "EC", "Engineering Level" }, + { "MN", "Manufacturer" }, + { "SN", "Serial Number" }, + { "YA", "Asset Tag" }, + { "VL", "First Error Log Message" }, + { "VF", "Second Error Log Message" }, + { "VB", "Boot Agent ROM Configuration" }, + { "VE", "EFI UNDI Configuration" }, +}; + +static void sky2_show_vpd(struct seq_file *seq, struct sky2_hw *hw) +{ + size_t vpd_size; + loff_t offs; + u8 len; + unsigned char *buf; + u16 reg2; + + reg2 = sky2_pci_read16(hw, PCI_DEV_REG2); + vpd_size = 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); + + seq_printf(seq, "%s Product Data\n", pci_name(hw->pdev)); + buf = kmalloc(vpd_size, GFP_KERNEL); + if (!buf) { + seq_puts(seq, "no memory!\n"); + return; + } + + if (pci_read_vpd(hw->pdev, 0, vpd_size, buf) < 0) { + seq_puts(seq, "VPD read failed\n"); + goto out; + } + + if (buf[0] != VPD_MAGIC) { + seq_printf(seq, "VPD tag mismatch: %#x\n", buf[0]); + goto out; + } + len = buf[1]; + if (len == 0 || len > vpd_size - 4) { + seq_printf(seq, "Invalid id length: %d\n", len); + goto out; + } + + seq_printf(seq, "%.*s\n", len, buf + 3); + offs = len + 3; + + while (offs < vpd_size - 4) { + int i; + + if (!memcmp("RW", buf + offs, 2)) /* end marker */ + break; + len = buf[offs + 2]; + if (offs + len + 3 >= vpd_size) + break; + + for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) { + if (!memcmp(vpd_tags[i].tag, buf + offs, 2)) { + seq_printf(seq, " %s: %.*s\n", + vpd_tags[i].label, len, buf + offs + 3); + break; + } + } + offs += len + 3; + } +out: + kfree(buf); +} + +static int sky2_debug_show(struct seq_file *seq, void *v) +{ + struct net_device *dev = seq->private; + const struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + unsigned port = sky2->port; + unsigned idx, last; + int sop; + + sky2_show_vpd(seq, hw); + + seq_printf(seq, "\nIRQ src=%x mask=%x control=%x\n", + sky2_read32(hw, B0_ISRC), + sky2_read32(hw, B0_IMSK), + sky2_read32(hw, B0_Y2_SP_ICR)); + + if (!netif_running(dev)) { + seq_printf(seq, "network not running\n"); + return 0; + } + + napi_disable(&hw->napi); + last = sky2_read16(hw, STAT_PUT_IDX); + + seq_printf(seq, "Status ring %u\n", hw->st_size); + if (hw->st_idx == last) + seq_puts(seq, "Status ring (empty)\n"); + else { + seq_puts(seq, "Status ring\n"); + for (idx = hw->st_idx; idx != last && idx < hw->st_size; + idx = RING_NEXT(idx, hw->st_size)) { + const struct sky2_status_le *le = hw->st_le + idx; + seq_printf(seq, "[%d] %#x %d %#x\n", + idx, le->opcode, le->length, le->status); + } + seq_puts(seq, "\n"); + } + + seq_printf(seq, "Tx ring pending=%u...%u report=%d done=%d\n", + sky2->tx_cons, sky2->tx_prod, + sky2_read16(hw, port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX), + sky2_read16(hw, Q_ADDR(txqaddr[port], Q_DONE))); + + /* Dump contents of tx ring */ + sop = 1; + for (idx = sky2->tx_next; idx != sky2->tx_prod && idx < sky2->tx_ring_size; + idx = RING_NEXT(idx, sky2->tx_ring_size)) { + const struct sky2_tx_le *le = sky2->tx_le + idx; + u32 a = le32_to_cpu(le->addr); + + if (sop) + seq_printf(seq, "%u:", idx); + sop = 0; + + switch (le->opcode & ~HW_OWNER) { + case OP_ADDR64: + seq_printf(seq, " %#x:", a); + break; + case OP_LRGLEN: + seq_printf(seq, " mtu=%d", a); + break; + case OP_VLAN: + seq_printf(seq, " vlan=%d", be16_to_cpu(le->length)); + break; + case OP_TCPLISW: + seq_printf(seq, " csum=%#x", a); + break; + case OP_LARGESEND: + seq_printf(seq, " tso=%#x(%d)", a, le16_to_cpu(le->length)); + break; + case OP_PACKET: + seq_printf(seq, " %#x(%d)", a, le16_to_cpu(le->length)); + break; + case OP_BUFFER: + seq_printf(seq, " frag=%#x(%d)", a, le16_to_cpu(le->length)); + break; + default: + seq_printf(seq, " op=%#x,%#x(%d)", le->opcode, + a, le16_to_cpu(le->length)); + } + + if (le->ctrl & EOP) { + seq_putc(seq, '\n'); + sop = 1; + } + } + + seq_printf(seq, "\nRx ring hw get=%d put=%d last=%d\n", + sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_GET_IDX)), + sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX)), + sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_LAST_IDX))); + + sky2_read32(hw, B0_Y2_SP_LISR); + napi_enable(&hw->napi); + return 0; +} + +static int sky2_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, sky2_debug_show, inode->i_private); +} + +static const struct file_operations sky2_debug_fops = { + .owner = THIS_MODULE, + .open = sky2_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * Use network device events to create/remove/rename + * debugfs file entries + */ +static int sky2_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct sky2_port *sky2 = netdev_priv(dev); + + if (dev->netdev_ops->ndo_open != sky2_open || !sky2_debug) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_CHANGENAME: + if (sky2->debugfs) { + sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs, + sky2_debug, dev->name); + } + break; + + case NETDEV_GOING_DOWN: + if (sky2->debugfs) { + netdev_printk(KERN_DEBUG, dev, "remove debugfs\n"); + debugfs_remove(sky2->debugfs); + sky2->debugfs = NULL; + } + break; + + case NETDEV_UP: + sky2->debugfs = debugfs_create_file(dev->name, S_IRUGO, + sky2_debug, dev, + &sky2_debug_fops); + if (IS_ERR(sky2->debugfs)) + sky2->debugfs = NULL; + } + + return NOTIFY_DONE; +} + +static struct notifier_block sky2_notifier = { + .notifier_call = sky2_device_event, +}; + + +static __init void sky2_debug_init(void) +{ + struct dentry *ent; + + ent = debugfs_create_dir("sky2", NULL); + if (!ent || IS_ERR(ent)) + return; + + sky2_debug = ent; + register_netdevice_notifier(&sky2_notifier); +} + +static __exit void sky2_debug_cleanup(void) +{ + if (sky2_debug) { + unregister_netdevice_notifier(&sky2_notifier); + debugfs_remove(sky2_debug); + sky2_debug = NULL; + } +} + +#else +#define sky2_debug_init() +#define sky2_debug_cleanup() +#endif + +/* Two copies of network device operations to handle special case of + not allowing netpoll on second port */ +static const struct net_device_ops sky2_netdev_ops[2] = { + { + .ndo_open = sky2_open, + .ndo_stop = sky2_close, + .ndo_start_xmit = sky2_xmit_frame, + .ndo_do_ioctl = sky2_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = sky2_set_mac_address, + .ndo_set_rx_mode = sky2_set_multicast, + .ndo_change_mtu = sky2_change_mtu, + .ndo_fix_features = sky2_fix_features, + .ndo_set_features = sky2_set_features, + .ndo_tx_timeout = sky2_tx_timeout, + .ndo_get_stats64 = sky2_get_stats, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = sky2_netpoll, +#endif + }, + { + .ndo_open = sky2_open, + .ndo_stop = sky2_close, + .ndo_start_xmit = sky2_xmit_frame, + .ndo_do_ioctl = sky2_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = sky2_set_mac_address, + .ndo_set_rx_mode = sky2_set_multicast, + .ndo_change_mtu = sky2_change_mtu, + .ndo_fix_features = sky2_fix_features, + .ndo_set_features = sky2_set_features, + .ndo_tx_timeout = sky2_tx_timeout, + .ndo_get_stats64 = sky2_get_stats, + }, +}; + +/* Initialize network device */ +static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port, + int highmem, int wol) +{ + struct sky2_port *sky2; + struct net_device *dev = alloc_etherdev(sizeof(*sky2)); + const void *iap; + + if (!dev) + return NULL; + + SET_NETDEV_DEV(dev, &hw->pdev->dev); + dev->irq = hw->pdev->irq; + dev->ethtool_ops = &sky2_ethtool_ops; + dev->watchdog_timeo = TX_WATCHDOG; + dev->netdev_ops = &sky2_netdev_ops[port]; + + sky2 = netdev_priv(dev); + sky2->netdev = dev; + sky2->hw = hw; + sky2->msg_enable = netif_msg_init(debug, default_msg); + + u64_stats_init(&sky2->tx_stats.syncp); + u64_stats_init(&sky2->rx_stats.syncp); + + /* Auto speed and flow control */ + sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE; + if (hw->chip_id != CHIP_ID_YUKON_XL) + dev->hw_features |= NETIF_F_RXCSUM; + + sky2->flow_mode = FC_BOTH; + + sky2->duplex = -1; + sky2->speed = -1; + sky2->advertising = sky2_supported_modes(hw); + sky2->wol = wol; + + spin_lock_init(&sky2->phy_lock); + + sky2->tx_pending = TX_DEF_PENDING; + sky2->tx_ring_size = roundup_ring_size(TX_DEF_PENDING); + sky2->rx_pending = RX_DEF_PENDING; + + hw->dev[port] = dev; + + sky2->port = port; + + dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO; + + if (highmem) + dev->features |= NETIF_F_HIGHDMA; + + /* Enable receive hashing unless hardware is known broken */ + if (!(hw->flags & SKY2_HW_RSS_BROKEN)) + dev->hw_features |= NETIF_F_RXHASH; + + if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) { + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + dev->vlan_features |= SKY2_VLAN_OFFLOADS; + } + + dev->features |= dev->hw_features; + + /* try to get mac address in the following order: + * 1) from device tree data + * 2) from internal registers set by bootloader + */ + iap = of_get_mac_address(hw->pdev->dev.of_node); + if (iap) + memcpy(dev->dev_addr, iap, ETH_ALEN); + else + memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, + ETH_ALEN); + + return dev; +} + +static void sky2_show_addr(struct net_device *dev) +{ + const struct sky2_port *sky2 = netdev_priv(dev); + + netif_info(sky2, probe, dev, "addr %pM\n", dev->dev_addr); +} + +/* Handle software interrupt used during MSI test */ +static irqreturn_t sky2_test_intr(int irq, void *dev_id) +{ + struct sky2_hw *hw = dev_id; + u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2); + + if (status == 0) + return IRQ_NONE; + + if (status & Y2_IS_IRQ_SW) { + hw->flags |= SKY2_HW_USE_MSI; + wake_up(&hw->msi_wait); + sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); + } + sky2_write32(hw, B0_Y2_SP_ICR, 2); + + return IRQ_HANDLED; +} + +/* Test interrupt path by forcing a a software IRQ */ +static int sky2_test_msi(struct sky2_hw *hw) +{ + struct pci_dev *pdev = hw->pdev; + int err; + + init_waitqueue_head(&hw->msi_wait); + + err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw); + if (err) { + dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); + return err; + } + + sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW); + + sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); + sky2_read8(hw, B0_CTST); + + wait_event_timeout(hw->msi_wait, (hw->flags & SKY2_HW_USE_MSI), HZ/10); + + if (!(hw->flags & SKY2_HW_USE_MSI)) { + /* MSI test failed, go back to INTx mode */ + dev_info(&pdev->dev, "No interrupt generated using MSI, " + "switching to INTx mode.\n"); + + err = -EOPNOTSUPP; + sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); + } + + sky2_write32(hw, B0_IMSK, 0); + sky2_read32(hw, B0_IMSK); + + free_irq(pdev->irq, hw); + + return err; +} + +/* This driver supports yukon2 chipset only */ +static const char *sky2_name(u8 chipid, char *buf, int sz) +{ + const char *name[] = { + "XL", /* 0xb3 */ + "EC Ultra", /* 0xb4 */ + "Extreme", /* 0xb5 */ + "EC", /* 0xb6 */ + "FE", /* 0xb7 */ + "FE+", /* 0xb8 */ + "Supreme", /* 0xb9 */ + "UL 2", /* 0xba */ + "Unknown", /* 0xbb */ + "Optima", /* 0xbc */ + "OptimaEEE", /* 0xbd */ + "Optima 2", /* 0xbe */ + }; + + if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OP_2) + strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz); + else + snprintf(buf, sz, "(chip %#x)", chipid); + return buf; +} + +static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *dev, *dev1; + struct sky2_hw *hw; + int err, using_dac = 0, wol_default; + u32 reg; + char buf1[16]; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + goto err_out; + } + + /* Get configuration information + * Note: only regular PCI config access once to test for HW issues + * other PCI access through shared memory for speed and to + * avoid MMCONFIG problems. + */ + err = pci_read_config_dword(pdev, PCI_DEV_REG2, ®); + if (err) { + dev_err(&pdev->dev, "PCI read config failed\n"); + goto err_out_disable; + } + + if (~reg == 0) { + dev_err(&pdev->dev, "PCI configuration read error\n"); + err = -EIO; + goto err_out_disable; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + goto err_out_disable; + } + + pci_set_master(pdev); + + if (sizeof(dma_addr_t) > sizeof(u32) && + !(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))) { + using_dac = 1; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err < 0) { + dev_err(&pdev->dev, "unable to obtain 64 bit DMA " + "for consistent allocations\n"); + goto err_out_free_regions; + } + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto err_out_free_regions; + } + } + + +#ifdef __BIG_ENDIAN + /* The sk98lin vendor driver uses hardware byte swapping but + * this driver uses software swapping. + */ + reg &= ~PCI_REV_DESC; + err = pci_write_config_dword(pdev, PCI_DEV_REG2, reg); + if (err) { + dev_err(&pdev->dev, "PCI write config failed\n"); + goto err_out_free_regions; + } +#endif + + wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0; + + err = -ENOMEM; + + hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:") + + strlen(pci_name(pdev)) + 1, GFP_KERNEL); + if (!hw) + goto err_out_free_regions; + + hw->pdev = pdev; + sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); + + hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); + if (!hw->regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + goto err_out_free_hw; + } + + err = sky2_init(hw); + if (err) + goto err_out_iounmap; + + /* ring for status responses */ + hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING); + hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), + &hw->st_dma); + if (!hw->st_le) { + err = -ENOMEM; + goto err_out_reset; + } + + dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n", + sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev); + + sky2_reset(hw); + + dev = sky2_init_netdev(hw, 0, using_dac, wol_default); + if (!dev) { + err = -ENOMEM; + goto err_out_free_pci; + } + + if (!disable_msi && pci_enable_msi(pdev) == 0) { + err = sky2_test_msi(hw); + if (err) { + pci_disable_msi(pdev); + if (err != -EOPNOTSUPP) + goto err_out_free_netdev; + } + } + + netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT); + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "cannot register net device\n"); + goto err_out_free_netdev; + } + + netif_carrier_off(dev); + + sky2_show_addr(dev); + + if (hw->ports > 1) { + dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default); + if (!dev1) { + err = -ENOMEM; + goto err_out_unregister; + } + + err = register_netdev(dev1); + if (err) { + dev_err(&pdev->dev, "cannot register second net device\n"); + goto err_out_free_dev1; + } + + err = sky2_setup_irq(hw, hw->irq_name); + if (err) + goto err_out_unregister_dev1; + + sky2_show_addr(dev1); + } + + setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw); + INIT_WORK(&hw->restart_work, sky2_restart); + + pci_set_drvdata(pdev, hw); + pdev->d3_delay = 150; + + return 0; + +err_out_unregister_dev1: + unregister_netdev(dev1); +err_out_free_dev1: + free_netdev(dev1); +err_out_unregister: + unregister_netdev(dev); +err_out_free_netdev: + if (hw->flags & SKY2_HW_USE_MSI) + pci_disable_msi(pdev); + free_netdev(dev); +err_out_free_pci: + pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), + hw->st_le, hw->st_dma); +err_out_reset: + sky2_write8(hw, B0_CTST, CS_RST_SET); +err_out_iounmap: + iounmap(hw->regs); +err_out_free_hw: + kfree(hw); +err_out_free_regions: + pci_release_regions(pdev); +err_out_disable: + pci_disable_device(pdev); +err_out: + return err; +} + +static void sky2_remove(struct pci_dev *pdev) +{ + struct sky2_hw *hw = pci_get_drvdata(pdev); + int i; + + if (!hw) + return; + + del_timer_sync(&hw->watchdog_timer); + cancel_work_sync(&hw->restart_work); + + for (i = hw->ports-1; i >= 0; --i) + unregister_netdev(hw->dev[i]); + + sky2_write32(hw, B0_IMSK, 0); + sky2_read32(hw, B0_IMSK); + + sky2_power_aux(hw); + + sky2_write8(hw, B0_CTST, CS_RST_SET); + sky2_read8(hw, B0_CTST); + + if (hw->ports > 1) { + napi_disable(&hw->napi); + free_irq(pdev->irq, hw); + } + + if (hw->flags & SKY2_HW_USE_MSI) + pci_disable_msi(pdev); + pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), + hw->st_le, hw->st_dma); + pci_release_regions(pdev); + pci_disable_device(pdev); + + for (i = hw->ports-1; i >= 0; --i) + free_netdev(hw->dev[i]); + + iounmap(hw->regs); + kfree(hw); +} + +static int sky2_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct sky2_hw *hw = pci_get_drvdata(pdev); + int i; + + if (!hw) + return 0; + + del_timer_sync(&hw->watchdog_timer); + cancel_work_sync(&hw->restart_work); + + rtnl_lock(); + + sky2_all_down(hw); + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct sky2_port *sky2 = netdev_priv(dev); + + if (sky2->wol) + sky2_wol_init(sky2); + } + + sky2_power_aux(hw); + rtnl_unlock(); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sky2_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct sky2_hw *hw = pci_get_drvdata(pdev); + int err; + + if (!hw) + return 0; + + /* Re-enable all clocks */ + err = pci_write_config_dword(pdev, PCI_DEV_REG3, 0); + if (err) { + dev_err(&pdev->dev, "PCI write config failed\n"); + goto out; + } + + rtnl_lock(); + sky2_reset(hw); + sky2_all_up(hw); + rtnl_unlock(); + + return 0; +out: + + dev_err(&pdev->dev, "resume failed (%d)\n", err); + pci_disable_device(pdev); + return err; +} + +static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume); +#define SKY2_PM_OPS (&sky2_pm_ops) + +#else + +#define SKY2_PM_OPS NULL +#endif + +static void sky2_shutdown(struct pci_dev *pdev) +{ + sky2_suspend(&pdev->dev); + pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); + pci_set_power_state(pdev, PCI_D3hot); +} + +static struct pci_driver sky2_driver = { + .name = DRV_NAME, + .id_table = sky2_id_table, + .probe = sky2_probe, + .remove = sky2_remove, + .shutdown = sky2_shutdown, + .driver.pm = SKY2_PM_OPS, +}; + +static int __init sky2_init_module(void) +{ + pr_info("driver version " DRV_VERSION "\n"); + + sky2_debug_init(); + return pci_register_driver(&sky2_driver); +} + +static void __exit sky2_cleanup_module(void) +{ + pci_unregister_driver(&sky2_driver); + sky2_debug_cleanup(); +} + +module_init(sky2_init_module); +module_exit(sky2_cleanup_module); + +MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver"); +MODULE_AUTHOR("Stephen Hemminger "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h new file mode 100644 index 000000000..ec6dcd801 --- /dev/null +++ b/drivers/net/ethernet/marvell/sky2.h @@ -0,0 +1,2432 @@ +/* + * Definitions for the new Marvell Yukon 2 driver. + */ +#ifndef _SKY2_H +#define _SKY2_H + +#define ETH_JUMBO_MTU 9000 /* Maximum MTU supported */ + +/* PCI config registers */ +enum { + PCI_DEV_REG1 = 0x40, + PCI_DEV_REG2 = 0x44, + PCI_DEV_STATUS = 0x7c, + PCI_DEV_REG3 = 0x80, + PCI_DEV_REG4 = 0x84, + PCI_DEV_REG5 = 0x88, + PCI_CFG_REG_0 = 0x90, + PCI_CFG_REG_1 = 0x94, + + PSM_CONFIG_REG0 = 0x98, + PSM_CONFIG_REG1 = 0x9C, + PSM_CONFIG_REG2 = 0x160, + PSM_CONFIG_REG3 = 0x164, + PSM_CONFIG_REG4 = 0x168, + + PCI_LDO_CTRL = 0xbc, +}; + +/* Yukon-2 */ +enum pci_dev_reg_1 { + PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */ + PCI_Y2_DLL_DIS = 1<<30, /* Disable PCI DLL (YUKON-2) */ + PCI_SW_PWR_ON_RST= 1<<30, /* SW Power on Reset (Yukon-EX) */ + PCI_Y2_PHY2_COMA = 1<<29, /* Set PHY 2 to Coma Mode (YUKON-2) */ + PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */ + PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */ + PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */ + PCI_Y2_PME_LEGACY= 1<<15, /* PCI Express legacy power management mode */ + + PCI_PHY_LNK_TIM_MSK= 3L<<8,/* Bit 9.. 8: GPHY Link Trigger Timer */ + PCI_ENA_L1_EVENT = 1<<7, /* Enable PEX L1 Event */ + PCI_ENA_GPHY_LNK = 1<<6, /* Enable PEX L1 on GPHY Link down */ + PCI_FORCE_PEX_L1 = 1<<5, /* Force to PEX L1 */ +}; + +enum pci_dev_reg_2 { + PCI_VPD_WR_THR = 0xffL<<24, /* Bit 31..24: VPD Write Threshold */ + PCI_DEV_SEL = 0x7fL<<17, /* Bit 23..17: EEPROM Device Select */ + PCI_VPD_ROM_SZ = 7L<<14, /* Bit 16..14: VPD ROM Size */ + + PCI_PATCH_DIR = 0xfL<<8, /* Bit 11.. 8: Ext Patches dir 3..0 */ + PCI_EXT_PATCHS = 0xfL<<4, /* Bit 7.. 4: Extended Patches 3..0 */ + PCI_EN_DUMMY_RD = 1<<3, /* Enable Dummy Read */ + PCI_REV_DESC = 1<<2, /* Reverse Desc. Bytes */ + + PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */ +}; + +/* PCI_OUR_REG_3 32 bit Our Register 3 (Yukon-ECU only) */ +enum pci_dev_reg_3 { + P_CLK_ASF_REGS_DIS = 1<<18,/* Disable Clock ASF (Yukon-Ext.) */ + P_CLK_COR_REGS_D0_DIS = 1<<17,/* Disable Clock Core Regs D0 */ + P_CLK_MACSEC_DIS = 1<<17,/* Disable Clock MACSec (Yukon-Ext.) */ + P_CLK_PCI_REGS_D0_DIS = 1<<16,/* Disable Clock PCI Regs D0 */ + P_CLK_COR_YTB_ARB_DIS = 1<<15,/* Disable Clock YTB Arbiter */ + P_CLK_MAC_LNK1_D3_DIS = 1<<14,/* Disable Clock MAC Link1 D3 */ + P_CLK_COR_LNK1_D0_DIS = 1<<13,/* Disable Clock Core Link1 D0 */ + P_CLK_MAC_LNK1_D0_DIS = 1<<12,/* Disable Clock MAC Link1 D0 */ + P_CLK_COR_LNK1_D3_DIS = 1<<11,/* Disable Clock Core Link1 D3 */ + P_CLK_PCI_MST_ARB_DIS = 1<<10,/* Disable Clock PCI Master Arb. */ + P_CLK_COR_REGS_D3_DIS = 1<<9, /* Disable Clock Core Regs D3 */ + P_CLK_PCI_REGS_D3_DIS = 1<<8, /* Disable Clock PCI Regs D3 */ + P_CLK_REF_LNK1_GM_DIS = 1<<7, /* Disable Clock Ref. Link1 GMAC */ + P_CLK_COR_LNK1_GM_DIS = 1<<6, /* Disable Clock Core Link1 GMAC */ + P_CLK_PCI_COMMON_DIS = 1<<5, /* Disable Clock PCI Common */ + P_CLK_COR_COMMON_DIS = 1<<4, /* Disable Clock Core Common */ + P_CLK_PCI_LNK1_BMU_DIS = 1<<3, /* Disable Clock PCI Link1 BMU */ + P_CLK_COR_LNK1_BMU_DIS = 1<<2, /* Disable Clock Core Link1 BMU */ + P_CLK_PCI_LNK1_BIU_DIS = 1<<1, /* Disable Clock PCI Link1 BIU */ + P_CLK_COR_LNK1_BIU_DIS = 1<<0, /* Disable Clock Core Link1 BIU */ + PCIE_OUR3_WOL_D3_COLD_SET = P_CLK_ASF_REGS_DIS | + P_CLK_COR_REGS_D0_DIS | + P_CLK_COR_LNK1_D0_DIS | + P_CLK_MAC_LNK1_D0_DIS | + P_CLK_PCI_MST_ARB_DIS | + P_CLK_COR_COMMON_DIS | + P_CLK_COR_LNK1_BMU_DIS, +}; + +/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */ +enum pci_dev_reg_4 { + /* (Link Training & Status State Machine) */ + P_PEX_LTSSM_STAT_MSK = 0x7fL<<25, /* Bit 31..25: PEX LTSSM Mask */ +#define P_PEX_LTSSM_STAT(x) ((x << 25) & P_PEX_LTSSM_STAT_MSK) + P_PEX_LTSSM_L1_STAT = 0x34, + P_PEX_LTSSM_DET_STAT = 0x01, + P_TIMER_VALUE_MSK = 0xffL<<16, /* Bit 23..16: Timer Value Mask */ + /* (Active State Power Management) */ + P_FORCE_ASPM_REQUEST = 1<<15, /* Force ASPM Request (A1 only) */ + P_ASPM_GPHY_LINK_DOWN = 1<<14, /* GPHY Link Down (A1 only) */ + P_ASPM_INT_FIFO_EMPTY = 1<<13, /* Internal FIFO Empty (A1 only) */ + P_ASPM_CLKRUN_REQUEST = 1<<12, /* CLKRUN Request (A1 only) */ + + P_ASPM_FORCE_CLKREQ_ENA = 1<<4, /* Force CLKREQ Enable (A1b only) */ + P_ASPM_CLKREQ_PAD_CTL = 1<<3, /* CLKREQ PAD Control (A1 only) */ + P_ASPM_A1_MODE_SELECT = 1<<2, /* A1 Mode Select (A1 only) */ + P_CLK_GATE_PEX_UNIT_ENA = 1<<1, /* Enable Gate PEX Unit Clock */ + P_CLK_GATE_ROOT_COR_ENA = 1<<0, /* Enable Gate Root Core Clock */ + P_ASPM_CONTROL_MSK = P_FORCE_ASPM_REQUEST | P_ASPM_GPHY_LINK_DOWN + | P_ASPM_CLKRUN_REQUEST | P_ASPM_INT_FIFO_EMPTY, +}; + +/* PCI_OUR_REG_5 32 bit Our Register 5 (Yukon-ECU only) */ +enum pci_dev_reg_5 { + /* Bit 31..27: for A3 & later */ + P_CTL_DIV_CORE_CLK_ENA = 1<<31, /* Divide Core Clock Enable */ + P_CTL_SRESET_VMAIN_AV = 1<<30, /* Soft Reset for Vmain_av De-Glitch */ + P_CTL_BYPASS_VMAIN_AV = 1<<29, /* Bypass En. for Vmain_av De-Glitch */ + P_CTL_TIM_VMAIN_AV_MSK = 3<<27, /* Bit 28..27: Timer Vmain_av Mask */ + /* Bit 26..16: Release Clock on Event */ + P_REL_PCIE_RST_DE_ASS = 1<<26, /* PCIe Reset De-Asserted */ + P_REL_GPHY_REC_PACKET = 1<<25, /* GPHY Received Packet */ + P_REL_INT_FIFO_N_EMPTY = 1<<24, /* Internal FIFO Not Empty */ + P_REL_MAIN_PWR_AVAIL = 1<<23, /* Main Power Available */ + P_REL_CLKRUN_REQ_REL = 1<<22, /* CLKRUN Request Release */ + P_REL_PCIE_RESET_ASS = 1<<21, /* PCIe Reset Asserted */ + P_REL_PME_ASSERTED = 1<<20, /* PME Asserted */ + P_REL_PCIE_EXIT_L1_ST = 1<<19, /* PCIe Exit L1 State */ + P_REL_LOADER_NOT_FIN = 1<<18, /* EPROM Loader Not Finished */ + P_REL_PCIE_RX_EX_IDLE = 1<<17, /* PCIe Rx Exit Electrical Idle State */ + P_REL_GPHY_LINK_UP = 1<<16, /* GPHY Link Up */ + + /* Bit 10.. 0: Mask for Gate Clock */ + P_GAT_PCIE_RST_ASSERTED = 1<<10,/* PCIe Reset Asserted */ + P_GAT_GPHY_N_REC_PACKET = 1<<9, /* GPHY Not Received Packet */ + P_GAT_INT_FIFO_EMPTY = 1<<8, /* Internal FIFO Empty */ + P_GAT_MAIN_PWR_N_AVAIL = 1<<7, /* Main Power Not Available */ + P_GAT_CLKRUN_REQ_REL = 1<<6, /* CLKRUN Not Requested */ + P_GAT_PCIE_RESET_ASS = 1<<5, /* PCIe Reset Asserted */ + P_GAT_PME_DE_ASSERTED = 1<<4, /* PME De-Asserted */ + P_GAT_PCIE_ENTER_L1_ST = 1<<3, /* PCIe Enter L1 State */ + P_GAT_LOADER_FINISHED = 1<<2, /* EPROM Loader Finished */ + P_GAT_PCIE_RX_EL_IDLE = 1<<1, /* PCIe Rx Electrical Idle State */ + P_GAT_GPHY_LINK_DOWN = 1<<0, /* GPHY Link Down */ + + PCIE_OUR5_EVENT_CLK_D3_SET = P_REL_GPHY_REC_PACKET | + P_REL_INT_FIFO_N_EMPTY | + P_REL_PCIE_EXIT_L1_ST | + P_REL_PCIE_RX_EX_IDLE | + P_GAT_GPHY_N_REC_PACKET | + P_GAT_INT_FIFO_EMPTY | + P_GAT_PCIE_ENTER_L1_ST | + P_GAT_PCIE_RX_EL_IDLE, +}; + +/* PCI_CFG_REG_1 32 bit Config Register 1 (Yukon-Ext only) */ +enum pci_cfg_reg1 { + P_CF1_DIS_REL_EVT_RST = 1<<24, /* Dis. Rel. Event during PCIE reset */ + /* Bit 23..21: Release Clock on Event */ + P_CF1_REL_LDR_NOT_FIN = 1<<23, /* EEPROM Loader Not Finished */ + P_CF1_REL_VMAIN_AVLBL = 1<<22, /* Vmain available */ + P_CF1_REL_PCIE_RESET = 1<<21, /* PCI-E reset */ + /* Bit 20..18: Gate Clock on Event */ + P_CF1_GAT_LDR_NOT_FIN = 1<<20, /* EEPROM Loader Finished */ + P_CF1_GAT_PCIE_RX_IDLE = 1<<19, /* PCI-E Rx Electrical idle */ + P_CF1_GAT_PCIE_RESET = 1<<18, /* PCI-E Reset */ + P_CF1_PRST_PHY_CLKREQ = 1<<17, /* Enable PCI-E rst & PM2PHY gen. CLKREQ */ + P_CF1_PCIE_RST_CLKREQ = 1<<16, /* Enable PCI-E rst generate CLKREQ */ + + P_CF1_ENA_CFG_LDR_DONE = 1<<8, /* Enable core level Config loader done */ + + P_CF1_ENA_TXBMU_RD_IDLE = 1<<1, /* Enable TX BMU Read IDLE for ASPM */ + P_CF1_ENA_TXBMU_WR_IDLE = 1<<0, /* Enable TX BMU Write IDLE for ASPM */ + + PCIE_CFG1_EVENT_CLK_D3_SET = P_CF1_DIS_REL_EVT_RST | + P_CF1_REL_LDR_NOT_FIN | + P_CF1_REL_VMAIN_AVLBL | + P_CF1_REL_PCIE_RESET | + P_CF1_GAT_LDR_NOT_FIN | + P_CF1_GAT_PCIE_RESET | + P_CF1_PRST_PHY_CLKREQ | + P_CF1_ENA_CFG_LDR_DONE | + P_CF1_ENA_TXBMU_RD_IDLE | + P_CF1_ENA_TXBMU_WR_IDLE, +}; + +/* Yukon-Optima */ +enum { + PSM_CONFIG_REG1_AC_PRESENT_STATUS = 1<<31, /* AC Present Status */ + + PSM_CONFIG_REG1_PTP_CLK_SEL = 1<<29, /* PTP Clock Select */ + PSM_CONFIG_REG1_PTP_MODE = 1<<28, /* PTP Mode */ + + PSM_CONFIG_REG1_MUX_PHY_LINK = 1<<27, /* PHY Energy Detect Event */ + + PSM_CONFIG_REG1_EN_PIN63_AC_PRESENT = 1<<26, /* Enable LED_DUPLEX for ac_present */ + PSM_CONFIG_REG1_EN_PCIE_TIMER = 1<<25, /* Enable PCIe Timer */ + PSM_CONFIG_REG1_EN_SPU_TIMER = 1<<24, /* Enable SPU Timer */ + PSM_CONFIG_REG1_POLARITY_AC_PRESENT = 1<<23, /* AC Present Polarity */ + + PSM_CONFIG_REG1_EN_AC_PRESENT = 1<<21, /* Enable AC Present */ + + PSM_CONFIG_REG1_EN_GPHY_INT_PSM = 1<<20, /* Enable GPHY INT for PSM */ + PSM_CONFIG_REG1_DIS_PSM_TIMER = 1<<19, /* Disable PSM Timer */ +}; + +/* Yukon-Supreme */ +enum { + PSM_CONFIG_REG1_GPHY_ENERGY_STS = 1<<31, /* GPHY Energy Detect Status */ + + PSM_CONFIG_REG1_UART_MODE_MSK = 3<<29, /* UART_Mode */ + PSM_CONFIG_REG1_CLK_RUN_ASF = 1<<28, /* Enable Clock Free Running for ASF Subsystem */ + PSM_CONFIG_REG1_UART_CLK_DISABLE= 1<<27, /* Disable UART clock */ + PSM_CONFIG_REG1_VAUX_ONE = 1<<26, /* Tie internal Vaux to 1'b1 */ + PSM_CONFIG_REG1_UART_FC_RI_VAL = 1<<25, /* Default value for UART_RI_n */ + PSM_CONFIG_REG1_UART_FC_DCD_VAL = 1<<24, /* Default value for UART_DCD_n */ + PSM_CONFIG_REG1_UART_FC_DSR_VAL = 1<<23, /* Default value for UART_DSR_n */ + PSM_CONFIG_REG1_UART_FC_CTS_VAL = 1<<22, /* Default value for UART_CTS_n */ + PSM_CONFIG_REG1_LATCH_VAUX = 1<<21, /* Enable Latch current Vaux_avlbl */ + PSM_CONFIG_REG1_FORCE_TESTMODE_INPUT= 1<<20, /* Force Testmode pin as input PAD */ + PSM_CONFIG_REG1_UART_RST = 1<<19, /* UART_RST */ + PSM_CONFIG_REG1_PSM_PCIE_L1_POL = 1<<18, /* PCIE L1 Event Polarity for PSM */ + PSM_CONFIG_REG1_TIMER_STAT = 1<<17, /* PSM Timer Status */ + PSM_CONFIG_REG1_GPHY_INT = 1<<16, /* GPHY INT Status */ + PSM_CONFIG_REG1_FORCE_TESTMODE_ZERO= 1<<15, /* Force internal Testmode as 1'b0 */ + PSM_CONFIG_REG1_EN_INT_ASPM_CLKREQ = 1<<14, /* ENABLE INT for CLKRUN on ASPM and CLKREQ */ + PSM_CONFIG_REG1_EN_SND_TASK_ASPM_CLKREQ = 1<<13, /* ENABLE Snd_task for CLKRUN on ASPM and CLKREQ */ + PSM_CONFIG_REG1_DIS_CLK_GATE_SND_TASK = 1<<12, /* Disable CLK_GATE control snd_task */ + PSM_CONFIG_REG1_DIS_FF_CHIAN_SND_INTA = 1<<11, /* Disable flip-flop chain for sndmsg_inta */ + + PSM_CONFIG_REG1_DIS_LOADER = 1<<9, /* Disable Loader SM after PSM Goes back to IDLE */ + PSM_CONFIG_REG1_DO_PWDN = 1<<8, /* Do Power Down, Start PSM Scheme */ + PSM_CONFIG_REG1_DIS_PIG = 1<<7, /* Disable Plug-in-Go SM after PSM Goes back to IDLE */ + PSM_CONFIG_REG1_DIS_PERST = 1<<6, /* Disable Internal PCIe Reset after PSM Goes back to IDLE */ + PSM_CONFIG_REG1_EN_REG18_PD = 1<<5, /* Enable REG18 Power Down for PSM */ + PSM_CONFIG_REG1_EN_PSM_LOAD = 1<<4, /* Disable EEPROM Loader after PSM Goes back to IDLE */ + PSM_CONFIG_REG1_EN_PSM_HOT_RST = 1<<3, /* Enable PCIe Hot Reset for PSM */ + PSM_CONFIG_REG1_EN_PSM_PERST = 1<<2, /* Enable PCIe Reset Event for PSM */ + PSM_CONFIG_REG1_EN_PSM_PCIE_L1 = 1<<1, /* Enable PCIe L1 Event for PSM */ + PSM_CONFIG_REG1_EN_PSM = 1<<0, /* Enable PSM Scheme */ +}; + +/* PSM_CONFIG_REG4 0x0168 PSM Config Register 4 */ +enum { + /* PHY Link Detect Timer */ + PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_MSK = 0xf<<4, + PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE = 4, + + PSM_CONFIG_REG4_DEBUG_TIMER = 1<<1, /* Debug Timer */ + PSM_CONFIG_REG4_RST_PHY_LINK_DETECT = 1<<0, /* Reset GPHY Link Detect */ +}; + + +#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ + PCI_STATUS_SIG_SYSTEM_ERROR | \ + PCI_STATUS_REC_MASTER_ABORT | \ + PCI_STATUS_REC_TARGET_ABORT | \ + PCI_STATUS_PARITY) + +enum csr_regs { + B0_RAP = 0x0000, + B0_CTST = 0x0004, + + B0_POWER_CTRL = 0x0007, + B0_ISRC = 0x0008, + B0_IMSK = 0x000c, + B0_HWE_ISRC = 0x0010, + B0_HWE_IMSK = 0x0014, + + /* Special ISR registers (Yukon-2 only) */ + B0_Y2_SP_ISRC2 = 0x001c, + B0_Y2_SP_ISRC3 = 0x0020, + B0_Y2_SP_EISR = 0x0024, + B0_Y2_SP_LISR = 0x0028, + B0_Y2_SP_ICR = 0x002c, + + B2_MAC_1 = 0x0100, + B2_MAC_2 = 0x0108, + B2_MAC_3 = 0x0110, + B2_CONN_TYP = 0x0118, + B2_PMD_TYP = 0x0119, + B2_MAC_CFG = 0x011a, + B2_CHIP_ID = 0x011b, + B2_E_0 = 0x011c, + + B2_Y2_CLK_GATE = 0x011d, + B2_Y2_HW_RES = 0x011e, + B2_E_3 = 0x011f, + B2_Y2_CLK_CTRL = 0x0120, + + B2_TI_INI = 0x0130, + B2_TI_VAL = 0x0134, + B2_TI_CTRL = 0x0138, + B2_TI_TEST = 0x0139, + + B2_TST_CTRL1 = 0x0158, + B2_TST_CTRL2 = 0x0159, + B2_GP_IO = 0x015c, + + B2_I2C_CTRL = 0x0160, + B2_I2C_DATA = 0x0164, + B2_I2C_IRQ = 0x0168, + B2_I2C_SW = 0x016c, + + Y2_PEX_PHY_DATA = 0x0170, + Y2_PEX_PHY_ADDR = 0x0172, + + B3_RAM_ADDR = 0x0180, + B3_RAM_DATA_LO = 0x0184, + B3_RAM_DATA_HI = 0x0188, + +/* RAM Interface Registers */ +/* Yukon-2: use RAM_BUFFER() to access the RAM buffer */ +/* + * The HW-Spec. calls this registers Timeout Value 0..11. But this names are + * not usable in SW. Please notice these are NOT real timeouts, these are + * the number of qWords transferred continuously. + */ +#define RAM_BUFFER(port, reg) (reg | (port <<6)) + + B3_RI_WTO_R1 = 0x0190, + B3_RI_WTO_XA1 = 0x0191, + B3_RI_WTO_XS1 = 0x0192, + B3_RI_RTO_R1 = 0x0193, + B3_RI_RTO_XA1 = 0x0194, + B3_RI_RTO_XS1 = 0x0195, + B3_RI_WTO_R2 = 0x0196, + B3_RI_WTO_XA2 = 0x0197, + B3_RI_WTO_XS2 = 0x0198, + B3_RI_RTO_R2 = 0x0199, + B3_RI_RTO_XA2 = 0x019a, + B3_RI_RTO_XS2 = 0x019b, + B3_RI_TO_VAL = 0x019c, + B3_RI_CTRL = 0x01a0, + B3_RI_TEST = 0x01a2, + B3_MA_TOINI_RX1 = 0x01b0, + B3_MA_TOINI_RX2 = 0x01b1, + B3_MA_TOINI_TX1 = 0x01b2, + B3_MA_TOINI_TX2 = 0x01b3, + B3_MA_TOVAL_RX1 = 0x01b4, + B3_MA_TOVAL_RX2 = 0x01b5, + B3_MA_TOVAL_TX1 = 0x01b6, + B3_MA_TOVAL_TX2 = 0x01b7, + B3_MA_TO_CTRL = 0x01b8, + B3_MA_TO_TEST = 0x01ba, + B3_MA_RCINI_RX1 = 0x01c0, + B3_MA_RCINI_RX2 = 0x01c1, + B3_MA_RCINI_TX1 = 0x01c2, + B3_MA_RCINI_TX2 = 0x01c3, + B3_MA_RCVAL_RX1 = 0x01c4, + B3_MA_RCVAL_RX2 = 0x01c5, + B3_MA_RCVAL_TX1 = 0x01c6, + B3_MA_RCVAL_TX2 = 0x01c7, + B3_MA_RC_CTRL = 0x01c8, + B3_MA_RC_TEST = 0x01ca, + B3_PA_TOINI_RX1 = 0x01d0, + B3_PA_TOINI_RX2 = 0x01d4, + B3_PA_TOINI_TX1 = 0x01d8, + B3_PA_TOINI_TX2 = 0x01dc, + B3_PA_TOVAL_RX1 = 0x01e0, + B3_PA_TOVAL_RX2 = 0x01e4, + B3_PA_TOVAL_TX1 = 0x01e8, + B3_PA_TOVAL_TX2 = 0x01ec, + B3_PA_CTRL = 0x01f0, + B3_PA_TEST = 0x01f2, + + Y2_CFG_SPC = 0x1c00, /* PCI config space region */ + Y2_CFG_AER = 0x1d00, /* PCI Advanced Error Report region */ +}; + +/* B0_CTST 24 bit Control/Status register */ +enum { + Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */ + Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */ + Y2_HW_WOL_ON = 1<<15,/* HW WOL On (Yukon-EC Ultra A1 only) */ + Y2_HW_WOL_OFF = 1<<14,/* HW WOL On (Yukon-EC Ultra A1 only) */ + Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */ + Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */ + Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */ + Y2_CLK_RUN_DIS = 1<<10,/* CLK_RUN Disable (YUKON-2 only) */ + Y2_LED_STAT_ON = 1<<9, /* Status LED On (YUKON-2 only) */ + Y2_LED_STAT_OFF = 1<<8, /* Status LED Off (YUKON-2 only) */ + + CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */ + CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */ + CS_STOP_DONE = 1<<5, /* Stop Master is finished */ + CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */ + CS_MRST_CLR = 1<<3, /* Clear Master reset */ + CS_MRST_SET = 1<<2, /* Set Master reset */ + CS_RST_CLR = 1<<1, /* Clear Software reset */ + CS_RST_SET = 1, /* Set Software reset */ +}; + +/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */ +enum { + PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */ + PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */ + PC_VCC_ENA = 1<<5, /* Switch VCC Enable */ + PC_VCC_DIS = 1<<4, /* Switch VCC Disable */ + PC_VAUX_ON = 1<<3, /* Switch VAUX On */ + PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */ + PC_VCC_ON = 1<<1, /* Switch VCC On */ + PC_VCC_OFF = 1<<0, /* Switch VCC Off */ +}; + +/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */ + +/* B0_Y2_SP_ISRC2 32 bit Special Interrupt Source Reg 2 */ +/* B0_Y2_SP_ISRC3 32 bit Special Interrupt Source Reg 3 */ +/* B0_Y2_SP_EISR 32 bit Enter ISR Reg */ +/* B0_Y2_SP_LISR 32 bit Leave ISR Reg */ +enum { + Y2_IS_HW_ERR = 1<<31, /* Interrupt HW Error */ + Y2_IS_STAT_BMU = 1<<30, /* Status BMU Interrupt */ + Y2_IS_ASF = 1<<29, /* ASF subsystem Interrupt */ + Y2_IS_CPU_TO = 1<<28, /* CPU Timeout */ + Y2_IS_POLL_CHK = 1<<27, /* Check IRQ from polling unit */ + Y2_IS_TWSI_RDY = 1<<26, /* IRQ on end of TWSI Tx */ + Y2_IS_IRQ_SW = 1<<25, /* SW forced IRQ */ + Y2_IS_TIMINT = 1<<24, /* IRQ from Timer */ + + Y2_IS_IRQ_PHY2 = 1<<12, /* Interrupt from PHY 2 */ + Y2_IS_IRQ_MAC2 = 1<<11, /* Interrupt from MAC 2 */ + Y2_IS_CHK_RX2 = 1<<10, /* Descriptor error Rx 2 */ + Y2_IS_CHK_TXS2 = 1<<9, /* Descriptor error TXS 2 */ + Y2_IS_CHK_TXA2 = 1<<8, /* Descriptor error TXA 2 */ + + Y2_IS_PSM_ACK = 1<<7, /* PSM Acknowledge (Yukon-Optima only) */ + Y2_IS_PTP_TIST = 1<<6, /* PTP Time Stamp (Yukon-Optima only) */ + Y2_IS_PHY_QLNK = 1<<5, /* PHY Quick Link (Yukon-Optima only) */ + + Y2_IS_IRQ_PHY1 = 1<<4, /* Interrupt from PHY 1 */ + Y2_IS_IRQ_MAC1 = 1<<3, /* Interrupt from MAC 1 */ + Y2_IS_CHK_RX1 = 1<<2, /* Descriptor error Rx 1 */ + Y2_IS_CHK_TXS1 = 1<<1, /* Descriptor error TXS 1 */ + Y2_IS_CHK_TXA1 = 1<<0, /* Descriptor error TXA 1 */ + + Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU, + Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1 + | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1, + Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 + | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2, + Y2_IS_ERROR = Y2_IS_HW_ERR | + Y2_IS_IRQ_MAC1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1 | + Y2_IS_IRQ_MAC2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2, +}; + +/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ +enum { + IS_ERR_MSK = 0x00003fff,/* All Error bits */ + + IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */ + IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */ + IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */ + IS_IRQ_STAT = 1<<10, /* IRQ status exception */ + IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */ + IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */ + IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */ + IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */ + IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */ + IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */ + IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */ + IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */ + IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */ + IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */ +}; + +/* Hardware error interrupt mask for Yukon 2 */ +enum { + Y2_IS_TIST_OV = 1<<29,/* Time Stamp Timer overflow interrupt */ + Y2_IS_SENSOR = 1<<28, /* Sensor interrupt */ + Y2_IS_MST_ERR = 1<<27, /* Master error interrupt */ + Y2_IS_IRQ_STAT = 1<<26, /* Status exception interrupt */ + Y2_IS_PCI_EXP = 1<<25, /* PCI-Express interrupt */ + Y2_IS_PCI_NEXP = 1<<24, /* PCI-Express error similar to PCI error */ + /* Link 2 */ + Y2_IS_PAR_RD2 = 1<<13, /* Read RAM parity error interrupt */ + Y2_IS_PAR_WR2 = 1<<12, /* Write RAM parity error interrupt */ + Y2_IS_PAR_MAC2 = 1<<11, /* MAC hardware fault interrupt */ + Y2_IS_PAR_RX2 = 1<<10, /* Parity Error Rx Queue 2 */ + Y2_IS_TCP_TXS2 = 1<<9, /* TCP length mismatch sync Tx queue IRQ */ + Y2_IS_TCP_TXA2 = 1<<8, /* TCP length mismatch async Tx queue IRQ */ + /* Link 1 */ + Y2_IS_PAR_RD1 = 1<<5, /* Read RAM parity error interrupt */ + Y2_IS_PAR_WR1 = 1<<4, /* Write RAM parity error interrupt */ + Y2_IS_PAR_MAC1 = 1<<3, /* MAC hardware fault interrupt */ + Y2_IS_PAR_RX1 = 1<<2, /* Parity Error Rx Queue 1 */ + Y2_IS_TCP_TXS1 = 1<<1, /* TCP length mismatch sync Tx queue IRQ */ + Y2_IS_TCP_TXA1 = 1<<0, /* TCP length mismatch async Tx queue IRQ */ + + Y2_HWE_L1_MASK = Y2_IS_PAR_RD1 | Y2_IS_PAR_WR1 | Y2_IS_PAR_MAC1 | + Y2_IS_PAR_RX1 | Y2_IS_TCP_TXS1| Y2_IS_TCP_TXA1, + Y2_HWE_L2_MASK = Y2_IS_PAR_RD2 | Y2_IS_PAR_WR2 | Y2_IS_PAR_MAC2 | + Y2_IS_PAR_RX2 | Y2_IS_TCP_TXS2| Y2_IS_TCP_TXA2, + + Y2_HWE_ALL_MASK = Y2_IS_TIST_OV | Y2_IS_MST_ERR | Y2_IS_IRQ_STAT | + Y2_HWE_L1_MASK | Y2_HWE_L2_MASK, +}; + +/* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */ +enum { + DPT_START = 1<<1, + DPT_STOP = 1<<0, +}; + +/* B2_TST_CTRL1 8 bit Test Control Register 1 */ +enum { + TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */ + TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */ + TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */ + TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */ + TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */ + TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */ + TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */ + TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */ +}; + +/* B2_GPIO */ +enum { + GLB_GPIO_CLK_DEB_ENA = 1<<31, /* Clock Debug Enable */ + GLB_GPIO_CLK_DBG_MSK = 0xf<<26, /* Clock Debug */ + + GLB_GPIO_INT_RST_D3_DIS = 1<<15, /* Disable Internal Reset After D3 to D0 */ + GLB_GPIO_LED_PAD_SPEED_UP = 1<<14, /* LED PAD Speed Up */ + GLB_GPIO_STAT_RACE_DIS = 1<<13, /* Status Race Disable */ + GLB_GPIO_TEST_SEL_MSK = 3<<11, /* Testmode Select */ + GLB_GPIO_TEST_SEL_BASE = 1<<11, + GLB_GPIO_RAND_ENA = 1<<10, /* Random Enable */ + GLB_GPIO_RAND_BIT_1 = 1<<9, /* Random Bit 1 */ +}; + +/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */ +enum { + CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */ + /* Bit 3.. 2: reserved */ + CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */ + CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/ +}; + +/* B2_CHIP_ID 8 bit Chip Identification Number */ +enum { + CHIP_ID_YUKON_XL = 0xb3, /* YUKON-2 XL */ + CHIP_ID_YUKON_EC_U = 0xb4, /* YUKON-2 EC Ultra */ + CHIP_ID_YUKON_EX = 0xb5, /* YUKON-2 Extreme */ + CHIP_ID_YUKON_EC = 0xb6, /* YUKON-2 EC */ + CHIP_ID_YUKON_FE = 0xb7, /* YUKON-2 FE */ + CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */ + CHIP_ID_YUKON_SUPR = 0xb9, /* YUKON-2 Supreme */ + CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */ + CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */ + CHIP_ID_YUKON_PRM = 0xbd, /* YUKON-2 Optima Prime */ + CHIP_ID_YUKON_OP_2 = 0xbe, /* YUKON-2 Optima 2 */ +}; + +enum yukon_xl_rev { + CHIP_REV_YU_XL_A0 = 0, + CHIP_REV_YU_XL_A1 = 1, + CHIP_REV_YU_XL_A2 = 2, + CHIP_REV_YU_XL_A3 = 3, +}; + +enum yukon_ec_rev { + CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */ + CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */ + CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */ +}; +enum yukon_ec_u_rev { + CHIP_REV_YU_EC_U_A0 = 1, + CHIP_REV_YU_EC_U_A1 = 2, + CHIP_REV_YU_EC_U_B0 = 3, + CHIP_REV_YU_EC_U_B1 = 5, +}; +enum yukon_fe_rev { + CHIP_REV_YU_FE_A1 = 1, + CHIP_REV_YU_FE_A2 = 2, +}; +enum yukon_fe_p_rev { + CHIP_REV_YU_FE2_A0 = 0, +}; +enum yukon_ex_rev { + CHIP_REV_YU_EX_A0 = 1, + CHIP_REV_YU_EX_B0 = 2, +}; +enum yukon_supr_rev { + CHIP_REV_YU_SU_A0 = 0, + CHIP_REV_YU_SU_B0 = 1, + CHIP_REV_YU_SU_B1 = 3, +}; + +enum yukon_prm_rev { + CHIP_REV_YU_PRM_Z1 = 1, + CHIP_REV_YU_PRM_A0 = 2, +}; + +/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */ +enum { + Y2_STATUS_LNK2_INAC = 1<<7, /* Status Link 2 inactive (0 = active) */ + Y2_CLK_GAT_LNK2_DIS = 1<<6, /* Disable clock gating Link 2 */ + Y2_COR_CLK_LNK2_DIS = 1<<5, /* Disable Core clock Link 2 */ + Y2_PCI_CLK_LNK2_DIS = 1<<4, /* Disable PCI clock Link 2 */ + Y2_STATUS_LNK1_INAC = 1<<3, /* Status Link 1 inactive (0 = active) */ + Y2_CLK_GAT_LNK1_DIS = 1<<2, /* Disable clock gating Link 1 */ + Y2_COR_CLK_LNK1_DIS = 1<<1, /* Disable Core clock Link 1 */ + Y2_PCI_CLK_LNK1_DIS = 1<<0, /* Disable PCI clock Link 1 */ +}; + +/* B2_Y2_HW_RES 8 bit HW Resources (Yukon-2 only) */ +enum { + CFG_LED_MODE_MSK = 7<<2, /* Bit 4.. 2: LED Mode Mask */ + CFG_LINK_2_AVAIL = 1<<1, /* Link 2 available */ + CFG_LINK_1_AVAIL = 1<<0, /* Link 1 available */ +}; +#define CFG_LED_MODE(x) (((x) & CFG_LED_MODE_MSK) >> 2) +#define CFG_DUAL_MAC_MSK (CFG_LINK_2_AVAIL | CFG_LINK_1_AVAIL) + + +/* B2_Y2_CLK_CTRL 32 bit Clock Frequency Control Register (Yukon-2/EC) */ +enum { + Y2_CLK_DIV_VAL_MSK = 0xff<<16,/* Bit 23..16: Clock Divisor Value */ +#define Y2_CLK_DIV_VAL(x) (((x)<<16) & Y2_CLK_DIV_VAL_MSK) + Y2_CLK_DIV_VAL2_MSK = 7<<21, /* Bit 23..21: Clock Divisor Value */ + Y2_CLK_SELECT2_MSK = 0x1f<<16,/* Bit 20..16: Clock Select */ +#define Y2_CLK_DIV_VAL_2(x) (((x)<<21) & Y2_CLK_DIV_VAL2_MSK) +#define Y2_CLK_SEL_VAL_2(x) (((x)<<16) & Y2_CLK_SELECT2_MSK) + Y2_CLK_DIV_ENA = 1<<1, /* Enable Core Clock Division */ + Y2_CLK_DIV_DIS = 1<<0, /* Disable Core Clock Division */ +}; + +/* B2_TI_CTRL 8 bit Timer control */ +/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */ +enum { + TIM_START = 1<<2, /* Start Timer */ + TIM_STOP = 1<<1, /* Stop Timer */ + TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */ +}; + +/* B2_TI_TEST 8 Bit Timer Test */ +/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */ +/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */ +enum { + TIM_T_ON = 1<<2, /* Test mode on */ + TIM_T_OFF = 1<<1, /* Test mode off */ + TIM_T_STEP = 1<<0, /* Test step */ +}; + +/* Y2_PEX_PHY_ADDR/DATA PEX PHY address and data reg (Yukon-2 only) */ +enum { + PEX_RD_ACCESS = 1<<31, /* Access Mode Read = 1, Write = 0 */ + PEX_DB_ACCESS = 1<<30, /* Access to debug register */ +}; + +/* B3_RAM_ADDR 32 bit RAM Address, to read or write */ + /* Bit 31..19: reserved */ +#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */ +/* RAM Interface Registers */ + +/* B3_RI_CTRL 16 bit RAM Interface Control Register */ +enum { + RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */ + RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/ + + RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */ + RI_RST_SET = 1<<0, /* Set RAM Interface Reset */ +}; + +#define SK_RI_TO_53 36 /* RAM interface timeout */ + + +/* Port related registers FIFO, and Arbiter */ +#define SK_REG(port,reg) (((port)<<7)+(reg)) + +/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ +/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */ +/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */ +/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */ +/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */ + +#define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */ + +/* TXA_CTRL 8 bit Tx Arbiter Control Register */ +enum { + TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */ + TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */ + TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */ + TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */ + TXA_START_RC = 1<<3, /* Start sync Rate Control */ + TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */ + TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */ + TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */ +}; + +/* + * Bank 4 - 5 + */ +/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ +enum { + TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/ + TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */ + TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */ + TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */ + TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */ + TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */ + TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */ + + RSS_KEY = 0x0220, /* RSS Key setup */ + RSS_CFG = 0x0248, /* RSS Configuration */ +}; + +enum { + HASH_TCP_IPV6_EX_CTRL = 1<<5, + HASH_IPV6_EX_CTRL = 1<<4, + HASH_TCP_IPV6_CTRL = 1<<3, + HASH_IPV6_CTRL = 1<<2, + HASH_TCP_IPV4_CTRL = 1<<1, + HASH_IPV4_CTRL = 1<<0, + + HASH_ALL = 0x3f, +}; + +enum { + B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */ + B7_CFG_SPC = 0x0380,/* copy of the Configuration register */ + B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */ + B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */ + B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */ + B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */ + B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */ + B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */ + B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */ +}; + +/* Queue Register Offsets, use Q_ADDR() to access */ +enum { + B8_Q_REGS = 0x0400, /* base of Queue registers */ + Q_D = 0x00, /* 8*32 bit Current Descriptor */ + Q_VLAN = 0x20, /* 16 bit Current VLAN Tag */ + Q_DONE = 0x24, /* 16 bit Done Index */ + Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */ + Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */ + Q_BC = 0x30, /* 32 bit Current Byte Counter */ + Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */ + Q_TEST = 0x38, /* 32 bit Test/Control Register */ + +/* Yukon-2 */ + Q_WM = 0x40, /* 16 bit FIFO Watermark */ + Q_AL = 0x42, /* 8 bit FIFO Alignment */ + Q_RSP = 0x44, /* 16 bit FIFO Read Shadow Pointer */ + Q_RSL = 0x46, /* 8 bit FIFO Read Shadow Level */ + Q_RP = 0x48, /* 8 bit FIFO Read Pointer */ + Q_RL = 0x4a, /* 8 bit FIFO Read Level */ + Q_WP = 0x4c, /* 8 bit FIFO Write Pointer */ + Q_WSP = 0x4d, /* 8 bit FIFO Write Shadow Pointer */ + Q_WL = 0x4e, /* 8 bit FIFO Write Level */ + Q_WSL = 0x4f, /* 8 bit FIFO Write Shadow Level */ +}; +#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs)) + +/* Q_TEST 32 bit Test Register */ +enum { + /* Transmit */ + F_TX_CHK_AUTO_OFF = 1<<31, /* Tx checksum auto calc off (Yukon EX) */ + F_TX_CHK_AUTO_ON = 1<<30, /* Tx checksum auto calc off (Yukon EX) */ + + /* Receive */ + F_M_RX_RAM_DIS = 1<<24, /* MAC Rx RAM Read Port disable */ + + /* Hardware testbits not used */ +}; + +/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/ +enum { + Y2_B8_PREF_REGS = 0x0450, + + PREF_UNIT_CTRL = 0x00, /* 32 bit Control register */ + PREF_UNIT_LAST_IDX = 0x04, /* 16 bit Last Index */ + PREF_UNIT_ADDR_LO = 0x08, /* 32 bit List start addr, low part */ + PREF_UNIT_ADDR_HI = 0x0c, /* 32 bit List start addr, high part*/ + PREF_UNIT_GET_IDX = 0x10, /* 16 bit Get Index */ + PREF_UNIT_PUT_IDX = 0x14, /* 16 bit Put Index */ + PREF_UNIT_FIFO_WP = 0x20, /* 8 bit FIFO write pointer */ + PREF_UNIT_FIFO_RP = 0x24, /* 8 bit FIFO read pointer */ + PREF_UNIT_FIFO_WM = 0x28, /* 8 bit FIFO watermark */ + PREF_UNIT_FIFO_LEV = 0x2c, /* 8 bit FIFO level */ + + PREF_UNIT_MASK_IDX = 0x0fff, +}; +#define Y2_QADDR(q,reg) (Y2_B8_PREF_REGS + (q) + (reg)) + +/* RAM Buffer Register Offsets */ +enum { + + RB_START = 0x00,/* 32 bit RAM Buffer Start Address */ + RB_END = 0x04,/* 32 bit RAM Buffer End Address */ + RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */ + RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */ + RB_RX_UTPP = 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */ + RB_RX_LTPP = 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */ + RB_RX_UTHP = 0x18,/* 32 bit Rx Upper Threshold, High Prio */ + RB_RX_LTHP = 0x1c,/* 32 bit Rx Lower Threshold, High Prio */ + /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */ + RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */ + RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */ + RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */ + RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */ + RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */ +}; + +/* Receive and Transmit Queues */ +enum { + Q_R1 = 0x0000, /* Receive Queue 1 */ + Q_R2 = 0x0080, /* Receive Queue 2 */ + Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */ + Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */ + Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */ + Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */ +}; + +/* Different PHY Types */ +enum { + PHY_ADDR_MARV = 0, +}; + +#define RB_ADDR(offs, queue) ((u16) B16_RAM_REGS + (queue) + (offs)) + + +enum { + LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */ + LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */ + LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */ + LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */ + + LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */ + +/* Receive GMAC FIFO (YUKON and Yukon-2) */ + + RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */ + RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */ + RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */ + RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */ + RX_GMF_FL_THR = 0x0c50,/* 16 bit Rx GMAC FIFO Flush Threshold */ + RX_GMF_FL_CTRL = 0x0c52,/* 16 bit Rx GMAC FIFO Flush Control */ + RX_GMF_TR_THR = 0x0c54,/* 32 bit Rx Truncation Threshold (Yukon-2) */ + RX_GMF_UP_THR = 0x0c58,/* 16 bit Rx Upper Pause Thr (Yukon-EC_U) */ + RX_GMF_LP_THR = 0x0c5a,/* 16 bit Rx Lower Pause Thr (Yukon-EC_U) */ + RX_GMF_VLAN = 0x0c5c,/* 32 bit Rx VLAN Type Register (Yukon-2) */ + RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */ + + RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */ + + RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */ + + RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */ +}; + + +/* Q_BC 32 bit Current Byte Counter */ + +/* BMU Control Status Registers */ +/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */ +/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */ +/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */ +/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */ +/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */ +/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */ +/* Q_CSR 32 bit BMU Control/Status Register */ + +/* Rx BMU Control / Status Registers (Yukon-2) */ +enum { + BMU_IDLE = 1<<31, /* BMU Idle State */ + BMU_RX_TCP_PKT = 1<<30, /* Rx TCP Packet (when RSS Hash enabled) */ + BMU_RX_IP_PKT = 1<<29, /* Rx IP Packet (when RSS Hash enabled) */ + + BMU_ENA_RX_RSS_HASH = 1<<15, /* Enable Rx RSS Hash */ + BMU_DIS_RX_RSS_HASH = 1<<14, /* Disable Rx RSS Hash */ + BMU_ENA_RX_CHKSUM = 1<<13, /* Enable Rx TCP/IP Checksum Check */ + BMU_DIS_RX_CHKSUM = 1<<12, /* Disable Rx TCP/IP Checksum Check */ + BMU_CLR_IRQ_PAR = 1<<11, /* Clear IRQ on Parity errors (Rx) */ + BMU_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment. error (Tx) */ + BMU_CLR_IRQ_CHK = 1<<10, /* Clear IRQ Check */ + BMU_STOP = 1<<9, /* Stop Rx/Tx Queue */ + BMU_START = 1<<8, /* Start Rx/Tx Queue */ + BMU_FIFO_OP_ON = 1<<7, /* FIFO Operational On */ + BMU_FIFO_OP_OFF = 1<<6, /* FIFO Operational Off */ + BMU_FIFO_ENA = 1<<5, /* Enable FIFO */ + BMU_FIFO_RST = 1<<4, /* Reset FIFO */ + BMU_OP_ON = 1<<3, /* BMU Operational On */ + BMU_OP_OFF = 1<<2, /* BMU Operational Off */ + BMU_RST_CLR = 1<<1, /* Clear BMU Reset (Enable) */ + BMU_RST_SET = 1<<0, /* Set BMU Reset */ + + BMU_CLR_RESET = BMU_FIFO_RST | BMU_OP_OFF | BMU_RST_CLR, + BMU_OPER_INIT = BMU_CLR_IRQ_PAR | BMU_CLR_IRQ_CHK | BMU_START | + BMU_FIFO_ENA | BMU_OP_ON, + + BMU_WM_DEFAULT = 0x600, + BMU_WM_PEX = 0x80, +}; + +/* Tx BMU Control / Status Registers (Yukon-2) */ + /* Bit 31: same as for Rx */ +enum { + BMU_TX_IPIDINCR_ON = 1<<13, /* Enable IP ID Increment */ + BMU_TX_IPIDINCR_OFF = 1<<12, /* Disable IP ID Increment */ + BMU_TX_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment length mismatch */ +}; + +/* TBMU_TEST 0x06B8 Transmit BMU Test Register */ +enum { + TBMU_TEST_BMU_TX_CHK_AUTO_OFF = 1<<31, /* BMU Tx Checksum Auto Calculation Disable */ + TBMU_TEST_BMU_TX_CHK_AUTO_ON = 1<<30, /* BMU Tx Checksum Auto Calculation Enable */ + TBMU_TEST_HOME_ADD_PAD_FIX1_EN = 1<<29, /* Home Address Paddiing FIX1 Enable */ + TBMU_TEST_HOME_ADD_PAD_FIX1_DIS = 1<<28, /* Home Address Paddiing FIX1 Disable */ + TBMU_TEST_ROUTING_ADD_FIX_EN = 1<<27, /* Routing Address Fix Enable */ + TBMU_TEST_ROUTING_ADD_FIX_DIS = 1<<26, /* Routing Address Fix Disable */ + TBMU_TEST_HOME_ADD_FIX_EN = 1<<25, /* Home address checksum fix enable */ + TBMU_TEST_HOME_ADD_FIX_DIS = 1<<24, /* Home address checksum fix disable */ + + TBMU_TEST_TEST_RSPTR_ON = 1<<22, /* Testmode Shadow Read Ptr On */ + TBMU_TEST_TEST_RSPTR_OFF = 1<<21, /* Testmode Shadow Read Ptr Off */ + TBMU_TEST_TESTSTEP_RSPTR = 1<<20, /* Teststep Shadow Read Ptr */ + + TBMU_TEST_TEST_RPTR_ON = 1<<18, /* Testmode Read Ptr On */ + TBMU_TEST_TEST_RPTR_OFF = 1<<17, /* Testmode Read Ptr Off */ + TBMU_TEST_TESTSTEP_RPTR = 1<<16, /* Teststep Read Ptr */ + + TBMU_TEST_TEST_WSPTR_ON = 1<<14, /* Testmode Shadow Write Ptr On */ + TBMU_TEST_TEST_WSPTR_OFF = 1<<13, /* Testmode Shadow Write Ptr Off */ + TBMU_TEST_TESTSTEP_WSPTR = 1<<12, /* Teststep Shadow Write Ptr */ + + TBMU_TEST_TEST_WPTR_ON = 1<<10, /* Testmode Write Ptr On */ + TBMU_TEST_TEST_WPTR_OFF = 1<<9, /* Testmode Write Ptr Off */ + TBMU_TEST_TESTSTEP_WPTR = 1<<8, /* Teststep Write Ptr */ + + TBMU_TEST_TEST_REQ_NB_ON = 1<<6, /* Testmode Req Nbytes/Addr On */ + TBMU_TEST_TEST_REQ_NB_OFF = 1<<5, /* Testmode Req Nbytes/Addr Off */ + TBMU_TEST_TESTSTEP_REQ_NB = 1<<4, /* Teststep Req Nbytes/Addr */ + + TBMU_TEST_TEST_DONE_IDX_ON = 1<<2, /* Testmode Done Index On */ + TBMU_TEST_TEST_DONE_IDX_OFF = 1<<1, /* Testmode Done Index Off */ + TBMU_TEST_TESTSTEP_DONE_IDX = 1<<0, /* Teststep Done Index */ +}; + +/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/ +/* PREF_UNIT_CTRL 32 bit Prefetch Control register */ +enum { + PREF_UNIT_OP_ON = 1<<3, /* prefetch unit operational */ + PREF_UNIT_OP_OFF = 1<<2, /* prefetch unit not operational */ + PREF_UNIT_RST_CLR = 1<<1, /* Clear Prefetch Unit Reset */ + PREF_UNIT_RST_SET = 1<<0, /* Set Prefetch Unit Reset */ +}; + +/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */ +/* RB_START 32 bit RAM Buffer Start Address */ +/* RB_END 32 bit RAM Buffer End Address */ +/* RB_WP 32 bit RAM Buffer Write Pointer */ +/* RB_RP 32 bit RAM Buffer Read Pointer */ +/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */ +/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */ +/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */ +/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */ +/* RB_PC 32 bit RAM Buffer Packet Counter */ +/* RB_LEV 32 bit RAM Buffer Level Register */ + +#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */ +/* RB_TST2 8 bit RAM Buffer Test Register 2 */ +/* RB_TST1 8 bit RAM Buffer Test Register 1 */ + +/* RB_CTRL 8 bit RAM Buffer Control Register */ +enum { + RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */ + RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */ + RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */ + RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */ + RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */ + RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */ +}; + + +/* Transmit GMAC FIFO (YUKON only) */ +enum { + TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */ + TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/ + TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */ + + TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */ + TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */ + TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */ + + TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */ + TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */ + TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */ + + /* Threshold values for Yukon-EC Ultra and Extreme */ + ECU_AE_THR = 0x0070, /* Almost Empty Threshold */ + ECU_TXFF_LEV = 0x01a0, /* Tx BMU FIFO Level */ + ECU_JUMBO_WM = 0x0080, /* Jumbo Mode Watermark */ +}; + +/* Descriptor Poll Timer Registers */ +enum { + B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */ + B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */ + B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */ + + B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */ +}; + +/* Time Stamp Timer Registers (YUKON only) */ +enum { + GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */ + GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */ + GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */ +}; + +/* Polling Unit Registers (Yukon-2 only) */ +enum { + POLL_CTRL = 0x0e20, /* 32 bit Polling Unit Control Reg */ + POLL_LAST_IDX = 0x0e24,/* 16 bit Polling Unit List Last Index */ + + POLL_LIST_ADDR_LO= 0x0e28,/* 32 bit Poll. List Start Addr (low) */ + POLL_LIST_ADDR_HI= 0x0e2c,/* 32 bit Poll. List Start Addr (high) */ +}; + +enum { + SMB_CFG = 0x0e40, /* 32 bit SMBus Config Register */ + SMB_CSR = 0x0e44, /* 32 bit SMBus Control/Status Register */ +}; + +enum { + CPU_WDOG = 0x0e48, /* 32 bit Watchdog Register */ + CPU_CNTR = 0x0e4C, /* 32 bit Counter Register */ + CPU_TIM = 0x0e50,/* 32 bit Timer Compare Register */ + CPU_AHB_ADDR = 0x0e54, /* 32 bit CPU AHB Debug Register */ + CPU_AHB_WDATA = 0x0e58, /* 32 bit CPU AHB Debug Register */ + CPU_AHB_RDATA = 0x0e5C, /* 32 bit CPU AHB Debug Register */ + HCU_MAP_BASE = 0x0e60, /* 32 bit Reset Mapping Base */ + CPU_AHB_CTRL = 0x0e64, /* 32 bit CPU AHB Debug Register */ + HCU_CCSR = 0x0e68, /* 32 bit CPU Control and Status Register */ + HCU_HCSR = 0x0e6C, /* 32 bit Host Control and Status Register */ +}; + +/* ASF Subsystem Registers (Yukon-2 only) */ +enum { + B28_Y2_SMB_CONFIG = 0x0e40,/* 32 bit ASF SMBus Config Register */ + B28_Y2_SMB_CSD_REG = 0x0e44,/* 32 bit ASF SMB Control/Status/Data */ + B28_Y2_ASF_IRQ_V_BASE=0x0e60,/* 32 bit ASF IRQ Vector Base */ + + B28_Y2_ASF_STAT_CMD= 0x0e68,/* 32 bit ASF Status and Command Reg */ + B28_Y2_ASF_HOST_COM= 0x0e6c,/* 32 bit ASF Host Communication Reg */ + B28_Y2_DATA_REG_1 = 0x0e70,/* 32 bit ASF/Host Data Register 1 */ + B28_Y2_DATA_REG_2 = 0x0e74,/* 32 bit ASF/Host Data Register 2 */ + B28_Y2_DATA_REG_3 = 0x0e78,/* 32 bit ASF/Host Data Register 3 */ + B28_Y2_DATA_REG_4 = 0x0e7c,/* 32 bit ASF/Host Data Register 4 */ +}; + +/* Status BMU Registers (Yukon-2 only)*/ +enum { + STAT_CTRL = 0x0e80,/* 32 bit Status BMU Control Reg */ + STAT_LAST_IDX = 0x0e84,/* 16 bit Status BMU Last Index */ + + STAT_LIST_ADDR_LO= 0x0e88,/* 32 bit Status List Start Addr (low) */ + STAT_LIST_ADDR_HI= 0x0e8c,/* 32 bit Status List Start Addr (high) */ + STAT_TXA1_RIDX = 0x0e90,/* 16 bit Status TxA1 Report Index Reg */ + STAT_TXS1_RIDX = 0x0e92,/* 16 bit Status TxS1 Report Index Reg */ + STAT_TXA2_RIDX = 0x0e94,/* 16 bit Status TxA2 Report Index Reg */ + STAT_TXS2_RIDX = 0x0e96,/* 16 bit Status TxS2 Report Index Reg */ + STAT_TX_IDX_TH = 0x0e98,/* 16 bit Status Tx Index Threshold Reg */ + STAT_PUT_IDX = 0x0e9c,/* 16 bit Status Put Index Reg */ + +/* FIFO Control/Status Registers (Yukon-2 only)*/ + STAT_FIFO_WP = 0x0ea0,/* 8 bit Status FIFO Write Pointer Reg */ + STAT_FIFO_RP = 0x0ea4,/* 8 bit Status FIFO Read Pointer Reg */ + STAT_FIFO_RSP = 0x0ea6,/* 8 bit Status FIFO Read Shadow Ptr */ + STAT_FIFO_LEVEL = 0x0ea8,/* 8 bit Status FIFO Level Reg */ + STAT_FIFO_SHLVL = 0x0eaa,/* 8 bit Status FIFO Shadow Level Reg */ + STAT_FIFO_WM = 0x0eac,/* 8 bit Status FIFO Watermark Reg */ + STAT_FIFO_ISR_WM= 0x0ead,/* 8 bit Status FIFO ISR Watermark Reg */ + +/* Level and ISR Timer Registers (Yukon-2 only)*/ + STAT_LEV_TIMER_INI= 0x0eb0,/* 32 bit Level Timer Init. Value Reg */ + STAT_LEV_TIMER_CNT= 0x0eb4,/* 32 bit Level Timer Counter Reg */ + STAT_LEV_TIMER_CTRL= 0x0eb8,/* 8 bit Level Timer Control Reg */ + STAT_LEV_TIMER_TEST= 0x0eb9,/* 8 bit Level Timer Test Reg */ + STAT_TX_TIMER_INI = 0x0ec0,/* 32 bit Tx Timer Init. Value Reg */ + STAT_TX_TIMER_CNT = 0x0ec4,/* 32 bit Tx Timer Counter Reg */ + STAT_TX_TIMER_CTRL = 0x0ec8,/* 8 bit Tx Timer Control Reg */ + STAT_TX_TIMER_TEST = 0x0ec9,/* 8 bit Tx Timer Test Reg */ + STAT_ISR_TIMER_INI = 0x0ed0,/* 32 bit ISR Timer Init. Value Reg */ + STAT_ISR_TIMER_CNT = 0x0ed4,/* 32 bit ISR Timer Counter Reg */ + STAT_ISR_TIMER_CTRL= 0x0ed8,/* 8 bit ISR Timer Control Reg */ + STAT_ISR_TIMER_TEST= 0x0ed9,/* 8 bit ISR Timer Test Reg */ +}; + +enum { + LINKLED_OFF = 0x01, + LINKLED_ON = 0x02, + LINKLED_LINKSYNC_OFF = 0x04, + LINKLED_LINKSYNC_ON = 0x08, + LINKLED_BLINK_OFF = 0x10, + LINKLED_BLINK_ON = 0x20, +}; + +/* GMAC and GPHY Control Registers (YUKON only) */ +enum { + GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */ + GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */ + GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */ + GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */ + GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */ + +/* Wake-up Frame Pattern Match Control Registers (YUKON only) */ + WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */ + WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */ + WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */ + WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */ + WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */ + +/* WOL Pattern Length Registers (YUKON only) */ + WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */ + WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */ + +/* WOL Pattern Counter Registers (YUKON only) */ + WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */ + WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */ +}; +#define WOL_REGS(port, x) (x + (port)*0x80) + +enum { + WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */ + WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */ +}; +#define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400) + +enum { + BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */ + BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */ +}; + +/* + * Marvel-PHY Registers, indirect addressed over GMAC + */ +enum { + PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ + PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */ + PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ + PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ + PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ + PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */ + PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ + PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */ + PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ + /* Marvel-specific registers */ + PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */ + PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */ + PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */ + PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */ + PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */ + PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */ + PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */ + PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */ + PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */ + PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */ + PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */ + PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */ + PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */ + PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */ + PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */ + PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */ + PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */ + PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */ + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ + PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */ + PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */ + PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */ + PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */ + PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */ +}; + +enum { + PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */ + PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */ + PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */ + PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */ + PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */ + PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */ + PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */ + PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */ + PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */ + PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */ +}; + +enum { + PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */ + PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */ + PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */ +}; + +enum { + PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */ + + PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */ + PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */ + PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occurred */ + PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */ + PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */ + PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */ + PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */ +}; + +enum { + PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */ + PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */ + PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */ +}; + +/* different Marvell PHY Ids */ +enum { + PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */ + + PHY_BCOM_ID1_A1 = 0x6041, + PHY_BCOM_ID1_B2 = 0x6043, + PHY_BCOM_ID1_C0 = 0x6044, + PHY_BCOM_ID1_C5 = 0x6047, + + PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */ + PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */ + PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */ + PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */ + PHY_MARV_ID1_FE = 0x0C83, /* Yukon-FE (PHY 88E3082 Rev.A1) */ + PHY_MARV_ID1_ECU= 0x0CB0, /* Yukon-ECU (PHY 88E1149 Rev.B2?) */ +}; + +/* Advertisement register bits */ +enum { + PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ + PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ + PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */ + + PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */ + PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */ + PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */ + PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */ + PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */ + PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */ + PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */ + PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */ + PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/ + PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA, + PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL | + PHY_AN_100HALF | PHY_AN_100FULL, +}; + +/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +enum { + PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */ + PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */ + PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */ + PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */ + PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */ + PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */ + /* Bit 9..8: reserved */ + PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */ +}; + +/** Marvell-Specific */ +enum { + PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */ + PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */ + PHY_M_AN_RF = 1<<13, /* Remote Fault */ + + PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */ + PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */ + PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */ + PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */ + PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */ + PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */ + PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */ + PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */ +}; + +/* special defines for FIBER (88E1011S only) */ +enum { + PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */ + PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */ + PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */ + PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */ +}; + +/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */ +enum { + PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */ + PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */ + PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */ + PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */ +}; + +/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +enum { + PHY_M_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */ + PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */ + PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */ + PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */ + PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */ + PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */ +}; + +/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/ +enum { + PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */ + PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */ + PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */ + PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */ + PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */ + PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */ + PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */ + PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */ + PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */ + PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */ + PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */ + PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */ +}; + +enum { + PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */ + PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */ +}; + +#define PHY_M_PC_MDI_XMODE(x) (((u16)(x)<<5) & PHY_M_PC_MDIX_MSK) + +enum { + PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */ + PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */ + PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */ +}; + +/* for Yukon-EC Ultra Gigabit Ethernet PHY (88E1149 only) */ +enum { + PHY_M_PC_COP_TX_DIS = 1<<3, /* Copper Transmitter Disable */ + PHY_M_PC_POW_D_ENA = 1<<2, /* Power Down Enable */ +}; + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +enum { + PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */ + PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */ + PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */ + PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */ + PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */ + + PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */ + PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */ + + PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */ + PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */ +}; + +/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/ +enum { + PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */ + PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */ + PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */ + PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */ + PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */ + PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */ + PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */ + PHY_M_PS_LINK_UP = 1<<10, /* Link Up */ + PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */ + PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */ + PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */ + PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */ + PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */ + PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */ + PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */ + PHY_M_PS_JABBER = 1<<0, /* Jabber */ +}; + +#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN) + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +enum { + PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */ + PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */ +}; + +enum { + PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */ + PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */ + PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */ + PHY_M_IS_AN_PR = 1<<12, /* Page Received */ + PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */ + PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */ + PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */ + PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */ + PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */ + PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */ + PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */ + PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */ + + PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */ + PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */ + PHY_M_IS_JABBER = 1<<0, /* Jabber */ + + PHY_M_DEF_MSK = PHY_M_IS_LSP_CHANGE | PHY_M_IS_LST_CHANGE + | PHY_M_IS_DUP_CHANGE, + PHY_M_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL, +}; + + +/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/ +enum { + PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */ + PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */ + + PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */ + PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */ + /* (88E1011 only) */ + PHY_M_EC_S_DSC_MSK = 3<<8,/* Bit 9.. 8: Slave Downshift Counter */ + /* (88E1011 only) */ + PHY_M_EC_M_DSC_MSK2 = 7<<9,/* Bit 11.. 9: Master Downshift Counter */ + /* (88E1111 only) */ + PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */ + /* !!! Errata in spec. (1 = disable) */ + PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/ + PHY_M_EC_MAC_S_MSK = 7<<4,/* Bit 6.. 4: Def. MAC interface speed */ + PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */ + PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */ + PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */ + PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */ + + PHY_M_10B_TE_ENABLE = 1<<7, /* 10Base-Te Enable (88E8079 and above) */ +}; +#define PHY_M_EC_M_DSC(x) ((u16)(x)<<10 & PHY_M_EC_M_DSC_MSK) + /* 00=1x; 01=2x; 10=3x; 11=4x */ +#define PHY_M_EC_S_DSC(x) ((u16)(x)<<8 & PHY_M_EC_S_DSC_MSK) + /* 00=dis; 01=1x; 10=2x; 11=3x */ +#define PHY_M_EC_DSC_2(x) ((u16)(x)<<9 & PHY_M_EC_M_DSC_MSK2) + /* 000=1x; 001=2x; 010=3x; 011=4x */ +#define PHY_M_EC_MAC_S(x) ((u16)(x)<<4 & PHY_M_EC_MAC_S_MSK) + /* 01X=0; 110=2.5; 111=25 (MHz) */ + +/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */ +enum { + PHY_M_PC_DIS_LINK_Pa = 1<<15,/* Disable Link Pulses */ + PHY_M_PC_DSC_MSK = 7<<12,/* Bit 14..12: Downshift Counter */ + PHY_M_PC_DOWN_S_ENA = 1<<11,/* Downshift Enable */ +}; +/* !!! Errata in spec. (1 = disable) */ + +#define PHY_M_PC_DSC(x) (((u16)(x)<<12) & PHY_M_PC_DSC_MSK) + /* 100=5x; 101=6x; 110=7x; 111=8x */ +enum { + MAC_TX_CLK_0_MHZ = 2, + MAC_TX_CLK_2_5_MHZ = 6, + MAC_TX_CLK_25_MHZ = 7, +}; + +/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/ +enum { + PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */ + PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */ + PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */ + PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */ + PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */ + PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */ + PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */ + /* (88E1111 only) */ +}; + +enum { + PHY_M_LEDC_LINK_MSK = 3<<3,/* Bit 4.. 3: Link Control Mask */ + /* (88E1011 only) */ + PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */ + PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */ + PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */ + PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */ + PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */ +}; + +#define PHY_M_LED_PULS_DUR(x) (((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK) + +/***** PHY_MARV_PHY_STAT (page 3)16 bit r/w Polarity Control Reg. *****/ +enum { + PHY_M_POLC_LS1M_MSK = 0xf<<12, /* Bit 15..12: LOS,STAT1 Mix % Mask */ + PHY_M_POLC_IS0M_MSK = 0xf<<8, /* Bit 11.. 8: INIT,STAT0 Mix % Mask */ + PHY_M_POLC_LOS_MSK = 0x3<<6, /* Bit 7.. 6: LOS Pol. Ctrl. Mask */ + PHY_M_POLC_INIT_MSK = 0x3<<4, /* Bit 5.. 4: INIT Pol. Ctrl. Mask */ + PHY_M_POLC_STA1_MSK = 0x3<<2, /* Bit 3.. 2: STAT1 Pol. Ctrl. Mask */ + PHY_M_POLC_STA0_MSK = 0x3, /* Bit 1.. 0: STAT0 Pol. Ctrl. Mask */ +}; + +#define PHY_M_POLC_LS1_P_MIX(x) (((x)<<12) & PHY_M_POLC_LS1M_MSK) +#define PHY_M_POLC_IS0_P_MIX(x) (((x)<<8) & PHY_M_POLC_IS0M_MSK) +#define PHY_M_POLC_LOS_CTRL(x) (((x)<<6) & PHY_M_POLC_LOS_MSK) +#define PHY_M_POLC_INIT_CTRL(x) (((x)<<4) & PHY_M_POLC_INIT_MSK) +#define PHY_M_POLC_STA1_CTRL(x) (((x)<<2) & PHY_M_POLC_STA1_MSK) +#define PHY_M_POLC_STA0_CTRL(x) (((x)<<0) & PHY_M_POLC_STA0_MSK) + +enum { + PULS_NO_STR = 0,/* no pulse stretching */ + PULS_21MS = 1,/* 21 ms to 42 ms */ + PULS_42MS = 2,/* 42 ms to 84 ms */ + PULS_84MS = 3,/* 84 ms to 170 ms */ + PULS_170MS = 4,/* 170 ms to 340 ms */ + PULS_340MS = 5,/* 340 ms to 670 ms */ + PULS_670MS = 6,/* 670 ms to 1.3 s */ + PULS_1300MS = 7,/* 1.3 s to 2.7 s */ +}; + +#define PHY_M_LED_BLINK_RT(x) (((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK) + +enum { + BLINK_42MS = 0,/* 42 ms */ + BLINK_84MS = 1,/* 84 ms */ + BLINK_170MS = 2,/* 170 ms */ + BLINK_340MS = 3,/* 340 ms */ + BLINK_670MS = 4,/* 670 ms */ +}; + +/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/ +#define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */ + +#define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */ +#define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */ +#define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */ +#define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */ +#define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */ +#define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */ + +enum led_mode { + MO_LED_NORM = 0, + MO_LED_BLINK = 1, + MO_LED_OFF = 2, + MO_LED_ON = 3, +}; + +/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/ +enum { + PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */ + PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */ + PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */ + PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */ + PHY_M_EC2_FO_AM_MSK = 7,/* Bit 2.. 0: Fiber Output Amplitude */ +}; + +/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/ +enum { + PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */ + PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */ + PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */ + PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */ + PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */ + PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */ + PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */ + /* (88E1111 only) */ + + PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */ + PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */ + PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */ +}; + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +/***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/ + /* Bit 15..12: reserved (used internally) */ +enum { + PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */ + PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */ + PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */ +}; + +#define PHY_M_FELP_LED2_CTRL(x) (((u16)(x)<<8) & PHY_M_FELP_LED2_MSK) +#define PHY_M_FELP_LED1_CTRL(x) (((u16)(x)<<4) & PHY_M_FELP_LED1_MSK) +#define PHY_M_FELP_LED0_CTRL(x) (((u16)(x)<<0) & PHY_M_FELP_LED0_MSK) + +enum { + LED_PAR_CTRL_COLX = 0x00, + LED_PAR_CTRL_ERROR = 0x01, + LED_PAR_CTRL_DUPLEX = 0x02, + LED_PAR_CTRL_DP_COL = 0x03, + LED_PAR_CTRL_SPEED = 0x04, + LED_PAR_CTRL_LINK = 0x05, + LED_PAR_CTRL_TX = 0x06, + LED_PAR_CTRL_RX = 0x07, + LED_PAR_CTRL_ACT = 0x08, + LED_PAR_CTRL_LNK_RX = 0x09, + LED_PAR_CTRL_LNK_AC = 0x0a, + LED_PAR_CTRL_ACT_BL = 0x0b, + LED_PAR_CTRL_TX_BL = 0x0c, + LED_PAR_CTRL_RX_BL = 0x0d, + LED_PAR_CTRL_COL_BL = 0x0e, + LED_PAR_CTRL_INACT = 0x0f +}; + +/*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/ +enum { + PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */ + PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */ + PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */ +}; + +/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */ +/***** PHY_MARV_PHY_CTRL (page 1) 16 bit r/w Fiber Specific Ctrl *****/ +enum { + PHY_M_FIB_FORCE_LNK = 1<<10,/* Force Link Good */ + PHY_M_FIB_SIGD_POL = 1<<9, /* SIGDET Polarity */ + PHY_M_FIB_TX_DIS = 1<<3, /* Transmitter Disable */ +}; + +/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */ +/***** PHY_MARV_PHY_CTRL (page 2) 16 bit r/w MAC Specific Ctrl *****/ +enum { + PHY_M_MAC_MD_MSK = 7<<7, /* Bit 9.. 7: Mode Select Mask */ + PHY_M_MAC_GMIF_PUP = 1<<3, /* GMII Power Up (88E1149 only) */ + PHY_M_MAC_MD_AUTO = 3,/* Auto Copper/1000Base-X */ + PHY_M_MAC_MD_COPPER = 5,/* Copper only */ + PHY_M_MAC_MD_1000BX = 7,/* 1000Base-X only */ +}; +#define PHY_M_MAC_MODE_SEL(x) (((x)<<7) & PHY_M_MAC_MD_MSK) + +/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/ +enum { + PHY_M_LEDC_LOS_MSK = 0xf<<12,/* Bit 15..12: LOS LED Ctrl. Mask */ + PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */ + PHY_M_LEDC_STA1_MSK = 0xf<<4,/* Bit 7.. 4: STAT1 LED Ctrl. Mask */ + PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */ +}; + +#define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK) +#define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK) +#define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK) +#define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK) + +/* GMAC registers */ +/* Port Registers */ +enum { + GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */ + GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */ + GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */ + GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */ + GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */ + GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */ + GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */ +/* Source Address Registers */ + GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */ + GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */ + GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */ + GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */ + GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */ + GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */ + +/* Multicast Address Hash Registers */ + GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */ + GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */ + GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */ + GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */ + +/* Interrupt Source Registers */ + GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */ + GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */ + GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */ + +/* Interrupt Mask Registers */ + GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */ + GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */ + GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */ + +/* Serial Management Interface (SMI) Registers */ + GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */ + GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */ + GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */ +/* MIB Counters */ + GM_MIB_CNT_BASE = 0x0100, /* Base Address of MIB Counters */ + GM_MIB_CNT_END = 0x025C, /* Last MIB counter */ +}; + + +/* + * MIB Counters base address definitions (low word) - + * use offset 4 for access to high word (32 bit r/o) + */ +enum { + GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */ + GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */ + GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */ + GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */ + GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */ + + GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */ + GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */ + GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */ + GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */ + GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */ + GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */ + GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */ + GM_RXF_127B = GM_MIB_CNT_BASE + 104,/* 65-127 Byte Rx Frame */ + GM_RXF_255B = GM_MIB_CNT_BASE + 112,/* 128-255 Byte Rx Frame */ + GM_RXF_511B = GM_MIB_CNT_BASE + 120,/* 256-511 Byte Rx Frame */ + GM_RXF_1023B = GM_MIB_CNT_BASE + 128,/* 512-1023 Byte Rx Frame */ + GM_RXF_1518B = GM_MIB_CNT_BASE + 136,/* 1024-1518 Byte Rx Frame */ + GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144,/* 1519-MaxSize Byte Rx Frame */ + GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152,/* Rx Frame too Long Error */ + GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160,/* Rx Jabber Packet Frame */ + + GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176,/* Rx FIFO overflow Event */ + GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192,/* Unicast Frames Xmitted OK */ + GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200,/* Broadcast Frames Xmitted OK */ + GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208,/* Pause MAC Ctrl Frames Xmitted */ + GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216,/* Multicast Frames Xmitted OK */ + GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224,/* Octets Transmitted OK Low */ + GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232,/* Octets Transmitted OK High */ + GM_TXF_64B = GM_MIB_CNT_BASE + 240,/* 64 Byte Tx Frame */ + GM_TXF_127B = GM_MIB_CNT_BASE + 248,/* 65-127 Byte Tx Frame */ + GM_TXF_255B = GM_MIB_CNT_BASE + 256,/* 128-255 Byte Tx Frame */ + GM_TXF_511B = GM_MIB_CNT_BASE + 264,/* 256-511 Byte Tx Frame */ + GM_TXF_1023B = GM_MIB_CNT_BASE + 272,/* 512-1023 Byte Tx Frame */ + GM_TXF_1518B = GM_MIB_CNT_BASE + 280,/* 1024-1518 Byte Tx Frame */ + GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288,/* 1519-MaxSize Byte Tx Frame */ + + GM_TXF_COL = GM_MIB_CNT_BASE + 304,/* Tx Collision */ + GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312,/* Tx Late Collision */ + GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320,/* Tx aborted due to Exces. Col. */ + GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328,/* Tx Multiple Collision */ + GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336,/* Tx Single Collision */ + GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344,/* Tx FIFO Underrun Event */ +}; + +/* GMAC Bit Definitions */ +/* GM_GP_STAT 16 bit r/o General Purpose Status Register */ +enum { + GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */ + GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */ + GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */ + GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */ + GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */ + GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */ + GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occurred */ + GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occurred */ + + GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */ + GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */ + GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */ + GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */ + GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */ +}; + +/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */ +enum { + GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */ + GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */ + GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */ + GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */ + GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */ + GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */ + GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */ + GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */ + GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */ + GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */ + GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */ + GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */ + GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */ + GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */ + GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */ +}; + +#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100) + +/* GM_TX_CTRL 16 bit r/w Transmit Control Register */ +enum { + GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ + GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */ + GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */ + GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */ +}; + +#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) +#define TX_COL_DEF 0x04 + +/* GM_RX_CTRL 16 bit r/w Receive Control Register */ +enum { + GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */ + GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */ + GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */ + GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */ +}; + +/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */ +enum { + GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */ + GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */ + GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */ + GM_TXPA_BO_LIM_MSK = 0x0f, /* Bit 3.. 0: Backoff Limit Mask */ + + TX_JAM_LEN_DEF = 0x03, + TX_JAM_IPG_DEF = 0x0b, + TX_IPG_JAM_DEF = 0x1c, + TX_BOF_LIM_DEF = 0x04, +}; + +#define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK) +#define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK) +#define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK) +#define TX_BACK_OFF_LIM(x) ((x) & GM_TXPA_BO_LIM_MSK) + + +/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */ +enum { + GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */ + GM_SMOD_LIMIT_4 = 1<<10, /* 4 consecutive Tx trials */ + GM_SMOD_VLAN_ENA = 1<<9, /* Enable VLAN (Max. Frame Len) */ + GM_SMOD_JUMBO_ENA = 1<<8, /* Enable Jumbo (Max. Frame Len) */ + + GM_NEW_FLOW_CTRL = 1<<6, /* Enable New Flow-Control */ + + GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ +}; + +#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK) +#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK) + +#define DATA_BLIND_DEF 0x04 +#define IPG_DATA_DEF_1000 0x1e +#define IPG_DATA_DEF_10_100 0x18 + +/* GM_SMI_CTRL 16 bit r/w SMI Control Register */ +enum { + GM_SMI_CT_PHY_A_MSK = 0x1f<<11,/* Bit 15..11: PHY Device Address */ + GM_SMI_CT_REG_A_MSK = 0x1f<<6,/* Bit 10.. 6: PHY Register Address */ + GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/ + GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */ + GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */ +}; + +#define GM_SMI_CT_PHY_AD(x) (((u16)(x)<<11) & GM_SMI_CT_PHY_A_MSK) +#define GM_SMI_CT_REG_AD(x) (((u16)(x)<<6) & GM_SMI_CT_REG_A_MSK) + +/* GM_PHY_ADDR 16 bit r/w GPHY Address Register */ +enum { + GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */ + GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */ +}; + +/* Receive Frame Status Encoding */ +enum { + GMR_FS_LEN = 0x7fff<<16, /* Bit 30..16: Rx Frame Length */ + GMR_FS_VLAN = 1<<13, /* VLAN Packet */ + GMR_FS_JABBER = 1<<12, /* Jabber Packet */ + GMR_FS_UN_SIZE = 1<<11, /* Undersize Packet */ + GMR_FS_MC = 1<<10, /* Multicast Packet */ + GMR_FS_BC = 1<<9, /* Broadcast Packet */ + GMR_FS_RX_OK = 1<<8, /* Receive OK (Good Packet) */ + GMR_FS_GOOD_FC = 1<<7, /* Good Flow-Control Packet */ + GMR_FS_BAD_FC = 1<<6, /* Bad Flow-Control Packet */ + GMR_FS_MII_ERR = 1<<5, /* MII Error */ + GMR_FS_LONG_ERR = 1<<4, /* Too Long Packet */ + GMR_FS_FRAGMENT = 1<<3, /* Fragment */ + + GMR_FS_CRC_ERR = 1<<1, /* CRC Error */ + GMR_FS_RX_FF_OV = 1<<0, /* Rx FIFO Overflow */ + + GMR_FS_ANY_ERR = GMR_FS_RX_FF_OV | GMR_FS_CRC_ERR | + GMR_FS_FRAGMENT | GMR_FS_LONG_ERR | + GMR_FS_MII_ERR | GMR_FS_BAD_FC | + GMR_FS_UN_SIZE | GMR_FS_JABBER, +}; + +/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ +enum { + RX_GCLKMAC_ENA = 1<<31, /* RX MAC Clock Gating Enable */ + RX_GCLKMAC_OFF = 1<<30, + + RX_STFW_DIS = 1<<29, /* RX Store and Forward Enable */ + RX_STFW_ENA = 1<<28, + + RX_TRUNC_ON = 1<<27, /* enable packet truncation */ + RX_TRUNC_OFF = 1<<26, /* disable packet truncation */ + RX_VLAN_STRIP_ON = 1<<25, /* enable VLAN stripping */ + RX_VLAN_STRIP_OFF = 1<<24, /* disable VLAN stripping */ + + RX_MACSEC_FLUSH_ON = 1<<23, + RX_MACSEC_FLUSH_OFF = 1<<22, + RX_MACSEC_ASF_FLUSH_ON = 1<<21, + RX_MACSEC_ASF_FLUSH_OFF = 1<<20, + + GMF_RX_OVER_ON = 1<<19, /* enable flushing on receive overrun */ + GMF_RX_OVER_OFF = 1<<18, /* disable flushing on receive overrun */ + GMF_ASF_RX_OVER_ON = 1<<17, /* enable flushing of ASF when overrun */ + GMF_ASF_RX_OVER_OFF = 1<<16, /* disable flushing of ASF when overrun */ + + GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */ + GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */ + GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */ + + GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */ + GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */ + GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */ + GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */ + GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */ + GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */ + GMF_CLI_RX_C = 1<<4, /* Clear IRQ Rx Frame Complete */ + + GMF_OPER_ON = 1<<3, /* Operational Mode On */ + GMF_OPER_OFF = 1<<2, /* Operational Mode Off */ + GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */ + GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */ + + RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */ + + GMF_RX_CTRL_DEF = GMF_OPER_ON | GMF_RX_F_FL_ON, +}; + +/* RX_GMF_FL_CTRL 16 bit Rx GMAC FIFO Flush Control (Yukon-Supreme) */ +enum { + RX_IPV6_SA_MOB_ENA = 1<<9, /* IPv6 SA Mobility Support Enable */ + RX_IPV6_SA_MOB_DIS = 1<<8, /* IPv6 SA Mobility Support Disable */ + RX_IPV6_DA_MOB_ENA = 1<<7, /* IPv6 DA Mobility Support Enable */ + RX_IPV6_DA_MOB_DIS = 1<<6, /* IPv6 DA Mobility Support Disable */ + RX_PTR_SYNCDLY_ENA = 1<<5, /* Pointers Delay Synch Enable */ + RX_PTR_SYNCDLY_DIS = 1<<4, /* Pointers Delay Synch Disable */ + RX_ASF_NEWFLAG_ENA = 1<<3, /* RX ASF Flag New Logic Enable */ + RX_ASF_NEWFLAG_DIS = 1<<2, /* RX ASF Flag New Logic Disable */ + RX_FLSH_MISSPKT_ENA = 1<<1, /* RX Flush Miss-Packet Enable */ + RX_FLSH_MISSPKT_DIS = 1<<0, /* RX Flush Miss-Packet Disable */ +}; + +/* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */ +enum { + TX_DYN_WM_ENA = 3, /* Yukon-FE+ specific */ +}; + +/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */ +enum { + TX_STFW_DIS = 1<<31,/* Disable Store & Forward */ + TX_STFW_ENA = 1<<30,/* Enable Store & Forward */ + + TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */ + TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */ + + TX_PCI_JUM_ENA = 1<<23,/* PCI Jumbo Mode enable */ + TX_PCI_JUM_DIS = 1<<22,/* PCI Jumbo Mode enable */ + + GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */ + GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */ + GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */ + + GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */ + GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */ + GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */ +}; + +/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */ +enum { + GMT_ST_START = 1<<2, /* Start Time Stamp Timer */ + GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */ + GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */ +}; + +/* B28_Y2_ASF_STAT_CMD 32 bit ASF Status and Command Reg */ +enum { + Y2_ASF_OS_PRES = 1<<4, /* ASF operation system present */ + Y2_ASF_RESET = 1<<3, /* ASF system in reset state */ + Y2_ASF_RUNNING = 1<<2, /* ASF system operational */ + Y2_ASF_CLR_HSTI = 1<<1, /* Clear ASF IRQ */ + Y2_ASF_IRQ = 1<<0, /* Issue an IRQ to ASF system */ + + Y2_ASF_UC_STATE = 3<<2, /* ASF uC State */ + Y2_ASF_CLK_HALT = 0, /* ASF system clock stopped */ +}; + +/* B28_Y2_ASF_HOST_COM 32 bit ASF Host Communication Reg */ +enum { + Y2_ASF_CLR_ASFI = 1<<1, /* Clear host IRQ */ + Y2_ASF_HOST_IRQ = 1<<0, /* Issue an IRQ to HOST system */ +}; +/* HCU_CCSR CPU Control and Status Register */ +enum { + HCU_CCSR_SMBALERT_MONITOR= 1<<27, /* SMBALERT pin monitor */ + HCU_CCSR_CPU_SLEEP = 1<<26, /* CPU sleep status */ + /* Clock Stretching Timeout */ + HCU_CCSR_CS_TO = 1<<25, + HCU_CCSR_WDOG = 1<<24, /* Watchdog Reset */ + + HCU_CCSR_CLR_IRQ_HOST = 1<<17, /* Clear IRQ_HOST */ + HCU_CCSR_SET_IRQ_HCU = 1<<16, /* Set IRQ_HCU */ + + HCU_CCSR_AHB_RST = 1<<9, /* Reset AHB bridge */ + HCU_CCSR_CPU_RST_MODE = 1<<8, /* CPU Reset Mode */ + + HCU_CCSR_SET_SYNC_CPU = 1<<5, + HCU_CCSR_CPU_CLK_DIVIDE_MSK = 3<<3,/* CPU Clock Divide */ + HCU_CCSR_CPU_CLK_DIVIDE_BASE= 1<<3, + HCU_CCSR_OS_PRSNT = 1<<2, /* ASF OS Present */ +/* Microcontroller State */ + HCU_CCSR_UC_STATE_MSK = 3, + HCU_CCSR_UC_STATE_BASE = 1<<0, + HCU_CCSR_ASF_RESET = 0, + HCU_CCSR_ASF_HALTED = 1<<1, + HCU_CCSR_ASF_RUNNING = 1<<0, +}; + +/* HCU_HCSR Host Control and Status Register */ +enum { + HCU_HCSR_SET_IRQ_CPU = 1<<16, /* Set IRQ_CPU */ + + HCU_HCSR_CLR_IRQ_HCU = 1<<1, /* Clear IRQ_HCU */ + HCU_HCSR_SET_IRQ_HOST = 1<<0, /* Set IRQ_HOST */ +}; + +/* STAT_CTRL 32 bit Status BMU control register (Yukon-2 only) */ +enum { + SC_STAT_CLR_IRQ = 1<<4, /* Status Burst IRQ clear */ + SC_STAT_OP_ON = 1<<3, /* Operational Mode On */ + SC_STAT_OP_OFF = 1<<2, /* Operational Mode Off */ + SC_STAT_RST_CLR = 1<<1, /* Clear Status Unit Reset (Enable) */ + SC_STAT_RST_SET = 1<<0, /* Set Status Unit Reset */ +}; + +/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */ +enum { + GMC_SET_RST = 1<<15,/* MAC SEC RST */ + GMC_SEC_RST_OFF = 1<<14,/* MAC SEC RSt OFF */ + GMC_BYP_MACSECRX_ON = 1<<13,/* Bypass macsec RX */ + GMC_BYP_MACSECRX_OFF= 1<<12,/* Bypass macsec RX off */ + GMC_BYP_MACSECTX_ON = 1<<11,/* Bypass macsec TX */ + GMC_BYP_MACSECTX_OFF= 1<<10,/* Bypass macsec TX off*/ + GMC_BYP_RETR_ON = 1<<9, /* Bypass retransmit FIFO On */ + GMC_BYP_RETR_OFF= 1<<8, /* Bypass retransmit FIFO Off */ + + GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */ + GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */ + GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */ + GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */ + GMC_PAUSE_ON = 1<<3, /* Pause On */ + GMC_PAUSE_OFF = 1<<2, /* Pause Off */ + GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */ + GMC_RST_SET = 1<<0, /* Set GMAC Reset */ +}; + +/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */ +enum { + GPC_TX_PAUSE = 1<<30, /* Tx pause enabled (ro) */ + GPC_RX_PAUSE = 1<<29, /* Rx pause enabled (ro) */ + GPC_SPEED = 3<<27, /* PHY speed (ro) */ + GPC_LINK = 1<<26, /* Link up (ro) */ + GPC_DUPLEX = 1<<25, /* Duplex (ro) */ + GPC_CLOCK = 1<<24, /* 125Mhz clock stable (ro) */ + + GPC_PDOWN = 1<<23, /* Internal regulator 2.5 power down */ + GPC_TSTMODE = 1<<22, /* Test mode */ + GPC_REG18 = 1<<21, /* Reg18 Power down */ + GPC_REG12SEL = 3<<19, /* Reg12 power setting */ + GPC_REG18SEL = 3<<17, /* Reg18 power setting */ + GPC_SPILOCK = 1<<16, /* SPI lock (ASF) */ + + GPC_LEDMUX = 3<<14, /* LED Mux */ + GPC_INTPOL = 1<<13, /* Interrupt polarity */ + GPC_DETECT = 1<<12, /* Energy detect */ + GPC_1000HD = 1<<11, /* Enable 1000Mbit HD */ + GPC_SLAVE = 1<<10, /* Slave mode */ + GPC_PAUSE = 1<<9, /* Pause enable */ + GPC_LEDCTL = 3<<6, /* GPHY Leds */ + + GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */ + GPC_RST_SET = 1<<0, /* Set GPHY Reset */ +}; + +/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */ +/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */ +enum { + GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */ + GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */ + GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */ + GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */ + GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ + GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ + +#define GMAC_DEF_MSK (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR) +}; + +/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ +enum { /* Bits 15.. 2: reserved */ + GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */ + GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */ +}; + + +/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */ +enum { + WOL_CTL_LINK_CHG_OCC = 1<<15, + WOL_CTL_MAGIC_PKT_OCC = 1<<14, + WOL_CTL_PATTERN_OCC = 1<<13, + WOL_CTL_CLEAR_RESULT = 1<<12, + WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11, + WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10, + WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9, + WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8, + WOL_CTL_ENA_PME_ON_PATTERN = 1<<7, + WOL_CTL_DIS_PME_ON_PATTERN = 1<<6, + WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5, + WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4, + WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3, + WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2, + WOL_CTL_ENA_PATTERN_UNIT = 1<<1, + WOL_CTL_DIS_PATTERN_UNIT = 1<<0, +}; + + +/* Control flags */ +enum { + UDPTCP = 1<<0, + CALSUM = 1<<1, + WR_SUM = 1<<2, + INIT_SUM= 1<<3, + LOCK_SUM= 1<<4, + INS_VLAN= 1<<5, + EOP = 1<<7, +}; + +enum { + HW_OWNER = 1<<7, + OP_TCPWRITE = 0x11, + OP_TCPSTART = 0x12, + OP_TCPINIT = 0x14, + OP_TCPLCK = 0x18, + OP_TCPCHKSUM = OP_TCPSTART, + OP_TCPIS = OP_TCPINIT | OP_TCPSTART, + OP_TCPLW = OP_TCPLCK | OP_TCPWRITE, + OP_TCPLSW = OP_TCPLCK | OP_TCPSTART | OP_TCPWRITE, + OP_TCPLISW = OP_TCPLCK | OP_TCPINIT | OP_TCPSTART | OP_TCPWRITE, + + OP_ADDR64 = 0x21, + OP_VLAN = 0x22, + OP_ADDR64VLAN = OP_ADDR64 | OP_VLAN, + OP_LRGLEN = 0x24, + OP_LRGLENVLAN = OP_LRGLEN | OP_VLAN, + OP_MSS = 0x28, + OP_MSSVLAN = OP_MSS | OP_VLAN, + + OP_BUFFER = 0x40, + OP_PACKET = 0x41, + OP_LARGESEND = 0x43, + OP_LSOV2 = 0x45, + +/* YUKON-2 STATUS opcodes defines */ + OP_RXSTAT = 0x60, + OP_RXTIMESTAMP = 0x61, + OP_RXVLAN = 0x62, + OP_RXCHKS = 0x64, + OP_RXCHKSVLAN = OP_RXCHKS | OP_RXVLAN, + OP_RXTIMEVLAN = OP_RXTIMESTAMP | OP_RXVLAN, + OP_RSS_HASH = 0x65, + OP_TXINDEXLE = 0x68, + OP_MACSEC = 0x6c, + OP_PUTIDX = 0x70, +}; + +enum status_css { + CSS_TCPUDPCSOK = 1<<7, /* TCP / UDP checksum is ok */ + CSS_ISUDP = 1<<6, /* packet is a UDP packet */ + CSS_ISTCP = 1<<5, /* packet is a TCP packet */ + CSS_ISIPFRAG = 1<<4, /* packet is a TCP/UDP frag, CS calc not done */ + CSS_ISIPV6 = 1<<3, /* packet is a IPv6 packet */ + CSS_IPV4CSUMOK = 1<<2, /* IP v4: TCP header checksum is ok */ + CSS_ISIPV4 = 1<<1, /* packet is a IPv4 packet */ + CSS_LINK_BIT = 1<<0, /* port number (legacy) */ +}; + +/* Yukon 2 hardware interface */ +struct sky2_tx_le { + __le32 addr; + __le16 length; /* also vlan tag or checksum start */ + u8 ctrl; + u8 opcode; +} __packed; + +struct sky2_rx_le { + __le32 addr; + __le16 length; + u8 ctrl; + u8 opcode; +} __packed; + +struct sky2_status_le { + __le32 status; /* also checksum */ + __le16 length; /* also vlan tag */ + u8 css; + u8 opcode; +} __packed; + +struct tx_ring_info { + struct sk_buff *skb; + unsigned long flags; +#define TX_MAP_SINGLE 0x0001 +#define TX_MAP_PAGE 0x0002 + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); +}; + +struct rx_ring_info { + struct sk_buff *skb; + dma_addr_t data_addr; + DEFINE_DMA_UNMAP_LEN(data_size); + dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT]; +}; + +enum flow_control { + FC_NONE = 0, + FC_TX = 1, + FC_RX = 2, + FC_BOTH = 3, +}; + +struct sky2_stats { + struct u64_stats_sync syncp; + u64 packets; + u64 bytes; +}; + +struct sky2_port { + struct sky2_hw *hw; + struct net_device *netdev; + unsigned port; + u32 msg_enable; + spinlock_t phy_lock; + + struct tx_ring_info *tx_ring; + struct sky2_tx_le *tx_le; + struct sky2_stats tx_stats; + + u16 tx_ring_size; + u16 tx_cons; /* next le to check */ + u16 tx_prod; /* next le to use */ + u16 tx_next; /* debug only */ + + u16 tx_pending; + u16 tx_last_mss; + u32 tx_last_upper; + u32 tx_tcpsum; + + struct rx_ring_info *rx_ring ____cacheline_aligned_in_smp; + struct sky2_rx_le *rx_le; + struct sky2_stats rx_stats; + + u16 rx_next; /* next re to check */ + u16 rx_put; /* next le index to use */ + u16 rx_pending; + u16 rx_data_size; + u16 rx_nfrags; + + struct { + unsigned long last; + u32 mac_rp; + u8 mac_lev; + u8 fifo_rp; + u8 fifo_lev; + } check; + + dma_addr_t rx_le_map; + dma_addr_t tx_le_map; + + u16 advertising; /* ADVERTISED_ bits */ + u16 speed; /* SPEED_1000, SPEED_100, ... */ + u8 wol; /* WAKE_ bits */ + u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ + u16 flags; +#define SKY2_FLAG_AUTO_SPEED 0x0002 +#define SKY2_FLAG_AUTO_PAUSE 0x0004 + + enum flow_control flow_mode; + enum flow_control flow_status; + +#ifdef CONFIG_SKY2_DEBUG + struct dentry *debugfs; +#endif +}; + +struct sky2_hw { + void __iomem *regs; + struct pci_dev *pdev; + struct napi_struct napi; + struct net_device *dev[2]; + unsigned long flags; +#define SKY2_HW_USE_MSI 0x00000001 +#define SKY2_HW_FIBRE_PHY 0x00000002 +#define SKY2_HW_GIGABIT 0x00000004 +#define SKY2_HW_NEWER_PHY 0x00000008 +#define SKY2_HW_RAM_BUFFER 0x00000010 +#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ +#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ +#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ +#define SKY2_HW_RSS_BROKEN 0x00000100 +#define SKY2_HW_VLAN_BROKEN 0x00000200 +#define SKY2_HW_RSS_CHKSUM 0x00000400 /* RSS requires chksum */ +#define SKY2_HW_IRQ_SETUP 0x00000800 + + u8 chip_id; + u8 chip_rev; + u8 pmd_type; + u8 ports; + + struct sky2_status_le *st_le; + u32 st_size; + u32 st_idx; + dma_addr_t st_dma; + + struct timer_list watchdog_timer; + struct work_struct restart_work; + wait_queue_head_t msi_wait; + + char irq_name[0]; +}; + +static inline int sky2_is_copper(const struct sky2_hw *hw) +{ + return !(hw->flags & SKY2_HW_FIBRE_PHY); +} + +/* Register accessor for memory mapped device */ +static inline u32 sky2_read32(const struct sky2_hw *hw, unsigned reg) +{ + return readl(hw->regs + reg); +} + +static inline u16 sky2_read16(const struct sky2_hw *hw, unsigned reg) +{ + return readw(hw->regs + reg); +} + +static inline u8 sky2_read8(const struct sky2_hw *hw, unsigned reg) +{ + return readb(hw->regs + reg); +} + +static inline void sky2_write32(const struct sky2_hw *hw, unsigned reg, u32 val) +{ + writel(val, hw->regs + reg); +} + +static inline void sky2_write16(const struct sky2_hw *hw, unsigned reg, u16 val) +{ + writew(val, hw->regs + reg); +} + +static inline void sky2_write8(const struct sky2_hw *hw, unsigned reg, u8 val) +{ + writeb(val, hw->regs + reg); +} + +/* Yukon PHY related registers */ +#define SK_GMAC_REG(port,reg) \ + (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg)) +#define GM_PHY_RETRIES 100 + +static inline u16 gma_read16(const struct sky2_hw *hw, unsigned port, unsigned reg) +{ + return sky2_read16(hw, SK_GMAC_REG(port,reg)); +} + +static inline u32 gma_read32(struct sky2_hw *hw, unsigned port, unsigned reg) +{ + unsigned base = SK_GMAC_REG(port, reg); + return (u32) sky2_read16(hw, base) + | (u32) sky2_read16(hw, base+4) << 16; +} + +static inline u64 gma_read64(struct sky2_hw *hw, unsigned port, unsigned reg) +{ + unsigned base = SK_GMAC_REG(port, reg); + + return (u64) sky2_read16(hw, base) + | (u64) sky2_read16(hw, base+4) << 16 + | (u64) sky2_read16(hw, base+8) << 32 + | (u64) sky2_read16(hw, base+12) << 48; +} + +/* There is no way to atomically read32 bit values from PHY, so retry */ +static inline u32 get_stats32(struct sky2_hw *hw, unsigned port, unsigned reg) +{ + u32 val; + + do { + val = gma_read32(hw, port, reg); + } while (gma_read32(hw, port, reg) != val); + + return val; +} + +static inline u64 get_stats64(struct sky2_hw *hw, unsigned port, unsigned reg) +{ + u64 val; + + do { + val = gma_read64(hw, port, reg); + } while (gma_read64(hw, port, reg) != val); + + return val; +} + +static inline void gma_write16(const struct sky2_hw *hw, unsigned port, int r, u16 v) +{ + sky2_write16(hw, SK_GMAC_REG(port,r), v); +} + +static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg, + const u8 *addr) +{ + gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8)); + gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); + gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); +} + +/* PCI config space access */ +static inline u32 sky2_pci_read32(const struct sky2_hw *hw, unsigned reg) +{ + return sky2_read32(hw, Y2_CFG_SPC + reg); +} + +static inline u16 sky2_pci_read16(const struct sky2_hw *hw, unsigned reg) +{ + return sky2_read16(hw, Y2_CFG_SPC + reg); +} + +static inline void sky2_pci_write32(struct sky2_hw *hw, unsigned reg, u32 val) +{ + sky2_write32(hw, Y2_CFG_SPC + reg, val); +} + +static inline void sky2_pci_write16(struct sky2_hw *hw, unsigned reg, u16 val) +{ + sky2_write16(hw, Y2_CFG_SPC + reg, val); +} +#endif diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig new file mode 100644 index 000000000..8cf7563a8 --- /dev/null +++ b/drivers/net/ethernet/mellanox/Kconfig @@ -0,0 +1,24 @@ +# +# Mellanox driver configuration +# + +config NET_VENDOR_MELLANOX + bool "Mellanox devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Mellanox cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_MELLANOX + +source "drivers/net/ethernet/mellanox/mlx4/Kconfig" +source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig" + +endif # NET_VENDOR_MELLANOX diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile new file mode 100644 index 000000000..38fe32ef5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the Mellanox device drivers. +# + +obj-$(CONFIG_MLX4_CORE) += mlx4/ +obj-$(CONFIG_MLX5_CORE) += mlx5/core/ diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig new file mode 100644 index 000000000..1486ce902 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -0,0 +1,46 @@ +# +# Mellanox driver configuration +# + +config MLX4_EN + tristate "Mellanox Technologies 1/10/40Gbit Ethernet support" + depends on PCI + select MLX4_CORE + select PTP_1588_CLOCK + ---help--- + This driver supports Mellanox Technologies ConnectX Ethernet + devices. + +config MLX4_EN_DCB + bool "Data Center Bridging (DCB) Support" + default y + depends on MLX4_EN && DCB + ---help--- + Say Y here if you want to use Data Center Bridging (DCB) in the + driver. + If set to N, will not be able to configure QoS and ratelimit attributes. + This flag is depended on the kernel's DCB support. + + If unsure, set to Y + +config MLX4_EN_VXLAN + bool "VXLAN offloads Support" + default y + depends on MLX4_EN && VXLAN && !(MLX4_EN=y && VXLAN=m) + ---help--- + Say Y here if you want to use VXLAN offloads in the driver. + +config MLX4_CORE + tristate + depends on PCI + default n + +config MLX4_DEBUG + bool "Verbose debugging output" if (MLX4_CORE && EXPERT) + depends on MLX4_CORE + default y + ---help--- + This option causes debugging code to be compiled into the + mlx4_core driver. The output can be turned on via the + debug_level module parameter (which can also be set after + the driver is loaded through sysfs). diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile new file mode 100644 index 000000000..c82217e0d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/Makefile @@ -0,0 +1,11 @@ +obj-$(CONFIG_MLX4_CORE) += mlx4_core.o + +mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o fw_qos.o icm.o intf.o \ + main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \ + srq.o resource_tracker.o + +obj-$(CONFIG_MLX4_EN) += mlx4_en.o + +mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \ + en_resources.o en_netdev.o en_selftest.o en_clock.o +mlx4_en-$(CONFIG_MLX4_EN_DCB) += en_dcb_nl.o diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c new file mode 100644 index 000000000..0c51c69f8 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -0,0 +1,835 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "mlx4.h" + +u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap) +{ + u32 obj; + + spin_lock(&bitmap->lock); + + obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); + if (obj >= bitmap->max) { + bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) + & bitmap->mask; + obj = find_first_zero_bit(bitmap->table, bitmap->max); + } + + if (obj < bitmap->max) { + set_bit(obj, bitmap->table); + bitmap->last = (obj + 1); + if (bitmap->last == bitmap->max) + bitmap->last = 0; + obj |= bitmap->top; + } else + obj = -1; + + if (obj != -1) + --bitmap->avail; + + spin_unlock(&bitmap->lock); + + return obj; +} + +void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr) +{ + mlx4_bitmap_free_range(bitmap, obj, 1, use_rr); +} + +static unsigned long find_aligned_range(unsigned long *bitmap, + u32 start, u32 nbits, + int len, int align, u32 skip_mask) +{ + unsigned long end, i; + +again: + start = ALIGN(start, align); + + while ((start < nbits) && (test_bit(start, bitmap) || + (start & skip_mask))) + start += align; + + if (start >= nbits) + return -1; + + end = start+len; + if (end > nbits) + return -1; + + for (i = start + 1; i < end; i++) { + if (test_bit(i, bitmap) || ((u32)i & skip_mask)) { + start = i + 1; + goto again; + } + } + + return start; +} + +u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, + int align, u32 skip_mask) +{ + u32 obj; + + if (likely(cnt == 1 && align == 1 && !skip_mask)) + return mlx4_bitmap_alloc(bitmap); + + spin_lock(&bitmap->lock); + + obj = find_aligned_range(bitmap->table, bitmap->last, + bitmap->max, cnt, align, skip_mask); + if (obj >= bitmap->max) { + bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) + & bitmap->mask; + obj = find_aligned_range(bitmap->table, 0, bitmap->max, + cnt, align, skip_mask); + } + + if (obj < bitmap->max) { + bitmap_set(bitmap->table, obj, cnt); + if (obj == bitmap->last) { + bitmap->last = (obj + cnt); + if (bitmap->last >= bitmap->max) + bitmap->last = 0; + } + obj |= bitmap->top; + } else + obj = -1; + + if (obj != -1) + bitmap->avail -= cnt; + + spin_unlock(&bitmap->lock); + + return obj; +} + +u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap) +{ + return bitmap->avail; +} + +static u32 mlx4_bitmap_masked_value(struct mlx4_bitmap *bitmap, u32 obj) +{ + return obj & (bitmap->max + bitmap->reserved_top - 1); +} + +void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt, + int use_rr) +{ + obj &= bitmap->max + bitmap->reserved_top - 1; + + spin_lock(&bitmap->lock); + if (!use_rr) { + bitmap->last = min(bitmap->last, obj); + bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) + & bitmap->mask; + } + bitmap_clear(bitmap->table, obj, cnt); + bitmap->avail += cnt; + spin_unlock(&bitmap->lock); +} + +int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, + u32 reserved_bot, u32 reserved_top) +{ + /* num must be a power of 2 */ + if (num != roundup_pow_of_two(num)) + return -EINVAL; + + bitmap->last = 0; + bitmap->top = 0; + bitmap->max = num - reserved_top; + bitmap->mask = mask; + bitmap->reserved_top = reserved_top; + bitmap->avail = num - reserved_top - reserved_bot; + bitmap->effective_len = bitmap->avail; + spin_lock_init(&bitmap->lock); + bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * + sizeof (long), GFP_KERNEL); + if (!bitmap->table) + return -ENOMEM; + + bitmap_set(bitmap->table, 0, reserved_bot); + + return 0; +} + +void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap) +{ + kfree(bitmap->table); +} + +struct mlx4_zone_allocator { + struct list_head entries; + struct list_head prios; + u32 last_uid; + u32 mask; + /* protect the zone_allocator from concurrent accesses */ + spinlock_t lock; + enum mlx4_zone_alloc_flags flags; +}; + +struct mlx4_zone_entry { + struct list_head list; + struct list_head prio_list; + u32 uid; + struct mlx4_zone_allocator *allocator; + struct mlx4_bitmap *bitmap; + int use_rr; + int priority; + int offset; + enum mlx4_zone_flags flags; +}; + +struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags) +{ + struct mlx4_zone_allocator *zones = kmalloc(sizeof(*zones), GFP_KERNEL); + + if (NULL == zones) + return NULL; + + INIT_LIST_HEAD(&zones->entries); + INIT_LIST_HEAD(&zones->prios); + spin_lock_init(&zones->lock); + zones->last_uid = 0; + zones->mask = 0; + zones->flags = flags; + + return zones; +} + +int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc, + struct mlx4_bitmap *bitmap, + u32 flags, + int priority, + int offset, + u32 *puid) +{ + u32 mask = mlx4_bitmap_masked_value(bitmap, (u32)-1); + struct mlx4_zone_entry *it; + struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL); + + if (NULL == zone) + return -ENOMEM; + + zone->flags = flags; + zone->bitmap = bitmap; + zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0; + zone->priority = priority; + zone->offset = offset; + + spin_lock(&zone_alloc->lock); + + zone->uid = zone_alloc->last_uid++; + zone->allocator = zone_alloc; + + if (zone_alloc->mask < mask) + zone_alloc->mask = mask; + + list_for_each_entry(it, &zone_alloc->prios, prio_list) + if (it->priority >= priority) + break; + + if (&it->prio_list == &zone_alloc->prios || it->priority > priority) + list_add_tail(&zone->prio_list, &it->prio_list); + list_add_tail(&zone->list, &it->list); + + spin_unlock(&zone_alloc->lock); + + *puid = zone->uid; + + return 0; +} + +/* Should be called under a lock */ +static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry) +{ + struct mlx4_zone_allocator *zone_alloc = entry->allocator; + + if (!list_empty(&entry->prio_list)) { + /* Check if we need to add an alternative node to the prio list */ + if (!list_is_last(&entry->list, &zone_alloc->entries)) { + struct mlx4_zone_entry *next = list_first_entry(&entry->list, + typeof(*next), + list); + + if (next->priority == entry->priority) + list_add_tail(&next->prio_list, &entry->prio_list); + } + + list_del(&entry->prio_list); + } + + list_del(&entry->list); + + if (zone_alloc->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP) { + u32 mask = 0; + struct mlx4_zone_entry *it; + + list_for_each_entry(it, &zone_alloc->prios, prio_list) { + u32 cur_mask = mlx4_bitmap_masked_value(it->bitmap, (u32)-1); + + if (mask < cur_mask) + mask = cur_mask; + } + zone_alloc->mask = mask; + } + + return 0; +} + +void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc) +{ + struct mlx4_zone_entry *zone, *tmp; + + spin_lock(&zone_alloc->lock); + + list_for_each_entry_safe(zone, tmp, &zone_alloc->entries, list) { + list_del(&zone->list); + list_del(&zone->prio_list); + kfree(zone); + } + + spin_unlock(&zone_alloc->lock); + kfree(zone_alloc); +} + +/* Should be called under a lock */ +static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count, + int align, u32 skip_mask, u32 *puid) +{ + u32 uid; + u32 res; + struct mlx4_zone_allocator *zone_alloc = zone->allocator; + struct mlx4_zone_entry *curr_node; + + res = mlx4_bitmap_alloc_range(zone->bitmap, count, + align, skip_mask); + + if (res != (u32)-1) { + res += zone->offset; + uid = zone->uid; + goto out; + } + + list_for_each_entry(curr_node, &zone_alloc->prios, prio_list) { + if (unlikely(curr_node->priority == zone->priority)) + break; + } + + if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO) { + struct mlx4_zone_entry *it = curr_node; + + list_for_each_entry_continue_reverse(it, &zone_alloc->entries, list) { + res = mlx4_bitmap_alloc_range(it->bitmap, count, + align, skip_mask); + if (res != (u32)-1) { + res += it->offset; + uid = it->uid; + goto out; + } + } + } + + if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO) { + struct mlx4_zone_entry *it = curr_node; + + list_for_each_entry_from(it, &zone_alloc->entries, list) { + if (unlikely(it == zone)) + continue; + + if (unlikely(it->priority != curr_node->priority)) + break; + + res = mlx4_bitmap_alloc_range(it->bitmap, count, + align, skip_mask); + if (res != (u32)-1) { + res += it->offset; + uid = it->uid; + goto out; + } + } + } + + if (zone->flags & MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO) { + if (list_is_last(&curr_node->prio_list, &zone_alloc->prios)) + goto out; + + curr_node = list_first_entry(&curr_node->prio_list, + typeof(*curr_node), + prio_list); + + list_for_each_entry_from(curr_node, &zone_alloc->entries, list) { + res = mlx4_bitmap_alloc_range(curr_node->bitmap, count, + align, skip_mask); + if (res != (u32)-1) { + res += curr_node->offset; + uid = curr_node->uid; + goto out; + } + } + } + +out: + if (NULL != puid && res != (u32)-1) + *puid = uid; + return res; +} + +/* Should be called under a lock */ +static void __mlx4_free_from_zone(struct mlx4_zone_entry *zone, u32 obj, + u32 count) +{ + mlx4_bitmap_free_range(zone->bitmap, obj - zone->offset, count, zone->use_rr); +} + +/* Should be called under a lock */ +static struct mlx4_zone_entry *__mlx4_find_zone_by_uid( + struct mlx4_zone_allocator *zones, u32 uid) +{ + struct mlx4_zone_entry *zone; + + list_for_each_entry(zone, &zones->entries, list) { + if (zone->uid == uid) + return zone; + } + + return NULL; +} + +struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid) +{ + struct mlx4_zone_entry *zone; + struct mlx4_bitmap *bitmap; + + spin_lock(&zones->lock); + + zone = __mlx4_find_zone_by_uid(zones, uid); + + bitmap = zone == NULL ? NULL : zone->bitmap; + + spin_unlock(&zones->lock); + + return bitmap; +} + +int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid) +{ + struct mlx4_zone_entry *zone; + int res; + + spin_lock(&zones->lock); + + zone = __mlx4_find_zone_by_uid(zones, uid); + + if (NULL == zone) { + res = -1; + goto out; + } + + res = __mlx4_zone_remove_one_entry(zone); + +out: + spin_unlock(&zones->lock); + kfree(zone); + + return res; +} + +/* Should be called under a lock */ +static struct mlx4_zone_entry *__mlx4_find_zone_by_uid_unique( + struct mlx4_zone_allocator *zones, u32 obj) +{ + struct mlx4_zone_entry *zone, *zone_candidate = NULL; + u32 dist = (u32)-1; + + /* Search for the smallest zone that this obj could be + * allocated from. This is done in order to handle + * situations when small bitmaps are allocated from bigger + * bitmaps (and the allocated space is marked as reserved in + * the bigger bitmap. + */ + list_for_each_entry(zone, &zones->entries, list) { + if (obj >= zone->offset) { + u32 mobj = (obj - zone->offset) & zones->mask; + + if (mobj < zone->bitmap->max) { + u32 curr_dist = zone->bitmap->effective_len; + + if (curr_dist < dist) { + dist = curr_dist; + zone_candidate = zone; + } + } + } + } + + return zone_candidate; +} + +u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count, + int align, u32 skip_mask, u32 *puid) +{ + struct mlx4_zone_entry *zone; + int res = -1; + + spin_lock(&zones->lock); + + zone = __mlx4_find_zone_by_uid(zones, uid); + + if (NULL == zone) + goto out; + + res = __mlx4_alloc_from_zone(zone, count, align, skip_mask, puid); + +out: + spin_unlock(&zones->lock); + + return res; +} + +u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, u32 uid, u32 obj, u32 count) +{ + struct mlx4_zone_entry *zone; + int res = 0; + + spin_lock(&zones->lock); + + zone = __mlx4_find_zone_by_uid(zones, uid); + + if (NULL == zone) { + res = -1; + goto out; + } + + __mlx4_free_from_zone(zone, obj, count); + +out: + spin_unlock(&zones->lock); + + return res; +} + +u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count) +{ + struct mlx4_zone_entry *zone; + int res; + + if (!(zones->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP)) + return -EFAULT; + + spin_lock(&zones->lock); + + zone = __mlx4_find_zone_by_uid_unique(zones, obj); + + if (NULL == zone) { + res = -1; + goto out; + } + + __mlx4_free_from_zone(zone, obj, count); + res = 0; + +out: + spin_unlock(&zones->lock); + + return res; +} +/* + * Handling for queue buffers -- we allocate a bunch of memory and + * register it in a memory region at HCA virtual address 0. If the + * requested size is > max_direct, we split the allocation into + * multiple pages, so we don't require too much contiguous memory. + */ + +int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, + struct mlx4_buf *buf, gfp_t gfp) +{ + dma_addr_t t; + + if (size <= max_direct) { + buf->nbufs = 1; + buf->npages = 1; + buf->page_shift = get_order(size) + PAGE_SHIFT; + buf->direct.buf = dma_alloc_coherent(&dev->persist->pdev->dev, + size, &t, gfp); + if (!buf->direct.buf) + return -ENOMEM; + + buf->direct.map = t; + + while (t & ((1 << buf->page_shift) - 1)) { + --buf->page_shift; + buf->npages *= 2; + } + + memset(buf->direct.buf, 0, size); + } else { + int i; + + buf->direct.buf = NULL; + buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; + buf->npages = buf->nbufs; + buf->page_shift = PAGE_SHIFT; + buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), + gfp); + if (!buf->page_list) + return -ENOMEM; + + for (i = 0; i < buf->nbufs; ++i) { + buf->page_list[i].buf = + dma_alloc_coherent(&dev->persist->pdev->dev, + PAGE_SIZE, + &t, gfp); + if (!buf->page_list[i].buf) + goto err_free; + + buf->page_list[i].map = t; + + memset(buf->page_list[i].buf, 0, PAGE_SIZE); + } + + if (BITS_PER_LONG == 64) { + struct page **pages; + pages = kmalloc(sizeof *pages * buf->nbufs, gfp); + if (!pages) + goto err_free; + for (i = 0; i < buf->nbufs; ++i) + pages[i] = virt_to_page(buf->page_list[i].buf); + buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); + kfree(pages); + if (!buf->direct.buf) + goto err_free; + } + } + + return 0; + +err_free: + mlx4_buf_free(dev, size, buf); + + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(mlx4_buf_alloc); + +void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) +{ + int i; + + if (buf->nbufs == 1) + dma_free_coherent(&dev->persist->pdev->dev, size, + buf->direct.buf, + buf->direct.map); + else { + if (BITS_PER_LONG == 64) + vunmap(buf->direct.buf); + + for (i = 0; i < buf->nbufs; ++i) + if (buf->page_list[i].buf) + dma_free_coherent(&dev->persist->pdev->dev, + PAGE_SIZE, + buf->page_list[i].buf, + buf->page_list[i].map); + kfree(buf->page_list); + } +} +EXPORT_SYMBOL_GPL(mlx4_buf_free); + +static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device, + gfp_t gfp) +{ + struct mlx4_db_pgdir *pgdir; + + pgdir = kzalloc(sizeof *pgdir, gfp); + if (!pgdir) + return NULL; + + bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2); + pgdir->bits[0] = pgdir->order0; + pgdir->bits[1] = pgdir->order1; + pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, + &pgdir->db_dma, gfp); + if (!pgdir->db_page) { + kfree(pgdir); + return NULL; + } + + return pgdir; +} + +static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir, + struct mlx4_db *db, int order) +{ + int o; + int i; + + for (o = order; o <= 1; ++o) { + i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o); + if (i < MLX4_DB_PER_PAGE >> o) + goto found; + } + + return -ENOMEM; + +found: + clear_bit(i, pgdir->bits[o]); + + i <<= o; + + if (o > order) + set_bit(i ^ 1, pgdir->bits[order]); + + db->u.pgdir = pgdir; + db->index = i; + db->db = pgdir->db_page + db->index; + db->dma = pgdir->db_dma + db->index * 4; + db->order = order; + + return 0; +} + +int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_db_pgdir *pgdir; + int ret = 0; + + mutex_lock(&priv->pgdir_mutex); + + list_for_each_entry(pgdir, &priv->pgdir_list, list) + if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) + goto out; + + pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev, gfp); + if (!pgdir) { + ret = -ENOMEM; + goto out; + } + + list_add(&pgdir->list, &priv->pgdir_list); + + /* This should never fail -- we just allocated an empty page: */ + WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order)); + +out: + mutex_unlock(&priv->pgdir_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(mlx4_db_alloc); + +void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int o; + int i; + + mutex_lock(&priv->pgdir_mutex); + + o = db->order; + i = db->index; + + if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) { + clear_bit(i ^ 1, db->u.pgdir->order0); + ++o; + } + i >>= o; + set_bit(i, db->u.pgdir->bits[o]); + + if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) { + dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, + db->u.pgdir->db_page, db->u.pgdir->db_dma); + list_del(&db->u.pgdir->list); + kfree(db->u.pgdir); + } + + mutex_unlock(&priv->pgdir_mutex); +} +EXPORT_SYMBOL_GPL(mlx4_db_free); + +int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, + int size, int max_direct) +{ + int err; + + err = mlx4_db_alloc(dev, &wqres->db, 1, GFP_KERNEL); + if (err) + return err; + + *wqres->db.db = 0; + + err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf, GFP_KERNEL); + if (err) + goto err_db; + + err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift, + &wqres->mtt); + if (err) + goto err_buf; + + err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf, GFP_KERNEL); + if (err) + goto err_mtt; + + return 0; + +err_mtt: + mlx4_mtt_cleanup(dev, &wqres->mtt); +err_buf: + mlx4_buf_free(dev, size, &wqres->buf); +err_db: + mlx4_db_free(dev, &wqres->db); + + return err; +} +EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res); + +void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, + int size) +{ + mlx4_mtt_cleanup(dev, &wqres->mtt); + mlx4_buf_free(dev, size, &wqres->buf); + mlx4_db_free(dev, &wqres->db); +} +EXPORT_SYMBOL_GPL(mlx4_free_hwq_res); diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c new file mode 100644 index 000000000..715de8aff --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include + +#include "mlx4.h" + +enum { + MLX4_CATAS_POLL_INTERVAL = 5 * HZ, +}; + + + +int mlx4_internal_err_reset = 1; +module_param_named(internal_err_reset, mlx4_internal_err_reset, int, 0644); +MODULE_PARM_DESC(internal_err_reset, + "Reset device on internal errors if non-zero (default 1)"); + +static int read_vendor_id(struct mlx4_dev *dev) +{ + u16 vendor_id = 0; + int ret; + + ret = pci_read_config_word(dev->persist->pdev, 0, &vendor_id); + if (ret) { + mlx4_err(dev, "Failed to read vendor ID, ret=%d\n", ret); + return ret; + } + + if (vendor_id == 0xffff) { + mlx4_err(dev, "PCI can't be accessed to read vendor id\n"); + return -EINVAL; + } + + return 0; +} + +static int mlx4_reset_master(struct mlx4_dev *dev) +{ + int err = 0; + + if (mlx4_is_master(dev)) + mlx4_report_internal_err_comm_event(dev); + + if (!pci_channel_offline(dev->persist->pdev)) { + err = read_vendor_id(dev); + /* If PCI can't be accessed to read vendor ID we assume that its + * link was disabled and chip was already reset. + */ + if (err) + return 0; + + err = mlx4_reset(dev); + if (err) + mlx4_err(dev, "Fail to reset HCA\n"); + } + + return err; +} + +static int mlx4_reset_slave(struct mlx4_dev *dev) +{ +#define COM_CHAN_RST_REQ_OFFSET 0x10 +#define COM_CHAN_RST_ACK_OFFSET 0x08 + + u32 comm_flags; + u32 rst_req; + u32 rst_ack; + unsigned long end; + struct mlx4_priv *priv = mlx4_priv(dev); + + if (pci_channel_offline(dev->persist->pdev)) + return 0; + + comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + + MLX4_COMM_CHAN_FLAGS)); + if (comm_flags == 0xffffffff) { + mlx4_err(dev, "VF reset is not needed\n"); + return 0; + } + + if (!(dev->caps.vf_caps & MLX4_VF_CAP_FLAG_RESET)) { + mlx4_err(dev, "VF reset is not supported\n"); + return -EOPNOTSUPP; + } + + rst_req = (comm_flags & (u32)(1 << COM_CHAN_RST_REQ_OFFSET)) >> + COM_CHAN_RST_REQ_OFFSET; + rst_ack = (comm_flags & (u32)(1 << COM_CHAN_RST_ACK_OFFSET)) >> + COM_CHAN_RST_ACK_OFFSET; + if (rst_req != rst_ack) { + mlx4_err(dev, "Communication channel isn't sync, fail to send reset\n"); + return -EIO; + } + + rst_req ^= 1; + mlx4_warn(dev, "VF is sending reset request to Firmware\n"); + comm_flags = rst_req << COM_CHAN_RST_REQ_OFFSET; + __raw_writel((__force u32)cpu_to_be32(comm_flags), + (__iomem char *)priv->mfunc.comm + MLX4_COMM_CHAN_FLAGS); + /* Make sure that our comm channel write doesn't + * get mixed in with writes from another CPU. + */ + mmiowb(); + + end = msecs_to_jiffies(MLX4_COMM_TIME) + jiffies; + while (time_before(jiffies, end)) { + comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + + MLX4_COMM_CHAN_FLAGS)); + rst_ack = (comm_flags & (u32)(1 << COM_CHAN_RST_ACK_OFFSET)) >> + COM_CHAN_RST_ACK_OFFSET; + + /* Reading rst_req again since the communication channel can + * be reset at any time by the PF and all its bits will be + * set to zero. + */ + rst_req = (comm_flags & (u32)(1 << COM_CHAN_RST_REQ_OFFSET)) >> + COM_CHAN_RST_REQ_OFFSET; + + if (rst_ack == rst_req) { + mlx4_warn(dev, "VF Reset succeed\n"); + return 0; + } + cond_resched(); + } + mlx4_err(dev, "Fail to send reset over the communication channel\n"); + return -ETIMEDOUT; +} + +static int mlx4_comm_internal_err(u32 slave_read) +{ + return (u32)COMM_CHAN_EVENT_INTERNAL_ERR == + (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0; +} + +void mlx4_enter_error_state(struct mlx4_dev_persistent *persist) +{ + int err; + struct mlx4_dev *dev; + + if (!mlx4_internal_err_reset) + return; + + mutex_lock(&persist->device_state_mutex); + if (persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) + goto out; + + dev = persist->dev; + mlx4_err(dev, "device is going to be reset\n"); + if (mlx4_is_slave(dev)) + err = mlx4_reset_slave(dev); + else + err = mlx4_reset_master(dev); + BUG_ON(err != 0); + + dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR; + mlx4_err(dev, "device was reset successfully\n"); + mutex_unlock(&persist->device_state_mutex); + + /* At that step HW was already reset, now notify clients */ + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0); + mlx4_cmd_wake_completions(dev); + return; + +out: + mutex_unlock(&persist->device_state_mutex); +} + +static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist) +{ + int err = 0; + + mlx4_enter_error_state(persist); + mutex_lock(&persist->interface_state_mutex); + if (persist->interface_state & MLX4_INTERFACE_STATE_UP && + !(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) { + err = mlx4_restart_one(persist->pdev); + mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n", + err); + } + mutex_unlock(&persist->interface_state_mutex); +} + +static void dump_err_buf(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + int i; + + mlx4_err(dev, "Internal error detected:\n"); + for (i = 0; i < priv->fw.catas_size; ++i) + mlx4_err(dev, " buf[%02x]: %08x\n", + i, swab32(readl(priv->catas_err.map + i))); +} + +static void poll_catas(unsigned long dev_ptr) +{ + struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr; + struct mlx4_priv *priv = mlx4_priv(dev); + u32 slave_read; + + if (mlx4_is_slave(dev)) { + slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); + if (mlx4_comm_internal_err(slave_read)) { + mlx4_warn(dev, "Internal error detected on the communication channel\n"); + goto internal_err; + } + } else if (readl(priv->catas_err.map)) { + dump_err_buf(dev); + goto internal_err; + } + + if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { + mlx4_warn(dev, "Internal error mark was detected on device\n"); + goto internal_err; + } + + mod_timer(&priv->catas_err.timer, + round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL)); + return; + +internal_err: + if (mlx4_internal_err_reset) + queue_work(dev->persist->catas_wq, &dev->persist->catas_work); +} + +static void catas_reset(struct work_struct *work) +{ + struct mlx4_dev_persistent *persist = + container_of(work, struct mlx4_dev_persistent, + catas_work); + + mlx4_handle_error_state(persist); +} + +void mlx4_start_catas_poll(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + phys_addr_t addr; + + INIT_LIST_HEAD(&priv->catas_err.list); + init_timer(&priv->catas_err.timer); + priv->catas_err.map = NULL; + + if (!mlx4_is_slave(dev)) { + addr = pci_resource_start(dev->persist->pdev, + priv->fw.catas_bar) + + priv->fw.catas_offset; + + priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); + if (!priv->catas_err.map) { + mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n", + (unsigned long long)addr); + return; + } + } + + priv->catas_err.timer.data = (unsigned long) dev; + priv->catas_err.timer.function = poll_catas; + priv->catas_err.timer.expires = + round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL); + add_timer(&priv->catas_err.timer); +} + +void mlx4_stop_catas_poll(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + del_timer_sync(&priv->catas_err.timer); + + if (priv->catas_err.map) { + iounmap(priv->catas_err.map); + priv->catas_err.map = NULL; + } + + if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION) + flush_workqueue(dev->persist->catas_wq); +} + +int mlx4_catas_init(struct mlx4_dev *dev) +{ + INIT_WORK(&dev->persist->catas_work, catas_reset); + dev->persist->catas_wq = create_singlethread_workqueue("mlx4_health"); + if (!dev->persist->catas_wq) + return -ENOMEM; + + return 0; +} + +void mlx4_catas_end(struct mlx4_dev *dev) +{ + if (dev->persist->catas_wq) { + destroy_workqueue(dev->persist->catas_wq); + dev->persist->catas_wq = NULL; + } +} diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c new file mode 100644 index 000000000..529ef0594 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -0,0 +1,3212 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include "mlx4.h" +#include "fw.h" +#include "fw_qos.h" + +#define CMD_POLL_TOKEN 0xffff +#define INBOX_MASK 0xffffffffffffff00ULL + +#define CMD_CHAN_VER 1 +#define CMD_CHAN_IF_REV 1 + +enum { + /* command completed successfully: */ + CMD_STAT_OK = 0x00, + /* Internal error (such as a bus error) occurred while processing command: */ + CMD_STAT_INTERNAL_ERR = 0x01, + /* Operation/command not supported or opcode modifier not supported: */ + CMD_STAT_BAD_OP = 0x02, + /* Parameter not supported or parameter out of range: */ + CMD_STAT_BAD_PARAM = 0x03, + /* System not enabled or bad system state: */ + CMD_STAT_BAD_SYS_STATE = 0x04, + /* Attempt to access reserved or unallocaterd resource: */ + CMD_STAT_BAD_RESOURCE = 0x05, + /* Requested resource is currently executing a command, or is otherwise busy: */ + CMD_STAT_RESOURCE_BUSY = 0x06, + /* Required capability exceeds device limits: */ + CMD_STAT_EXCEED_LIM = 0x08, + /* Resource is not in the appropriate state or ownership: */ + CMD_STAT_BAD_RES_STATE = 0x09, + /* Index out of range: */ + CMD_STAT_BAD_INDEX = 0x0a, + /* FW image corrupted: */ + CMD_STAT_BAD_NVMEM = 0x0b, + /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */ + CMD_STAT_ICM_ERROR = 0x0c, + /* Attempt to modify a QP/EE which is not in the presumed state: */ + CMD_STAT_BAD_QP_STATE = 0x10, + /* Bad segment parameters (Address/Size): */ + CMD_STAT_BAD_SEG_PARAM = 0x20, + /* Memory Region has Memory Windows bound to: */ + CMD_STAT_REG_BOUND = 0x21, + /* HCA local attached memory not present: */ + CMD_STAT_LAM_NOT_PRE = 0x22, + /* Bad management packet (silently discarded): */ + CMD_STAT_BAD_PKT = 0x30, + /* More outstanding CQEs in CQ than new CQ size: */ + CMD_STAT_BAD_SIZE = 0x40, + /* Multi Function device support required: */ + CMD_STAT_MULTI_FUNC_REQ = 0x50, +}; + +enum { + HCR_IN_PARAM_OFFSET = 0x00, + HCR_IN_MODIFIER_OFFSET = 0x08, + HCR_OUT_PARAM_OFFSET = 0x0c, + HCR_TOKEN_OFFSET = 0x14, + HCR_STATUS_OFFSET = 0x18, + + HCR_OPMOD_SHIFT = 12, + HCR_T_BIT = 21, + HCR_E_BIT = 22, + HCR_GO_BIT = 23 +}; + +enum { + GO_BIT_TIMEOUT_MSECS = 10000 +}; + +enum mlx4_vlan_transition { + MLX4_VLAN_TRANSITION_VST_VST = 0, + MLX4_VLAN_TRANSITION_VST_VGT = 1, + MLX4_VLAN_TRANSITION_VGT_VST = 2, + MLX4_VLAN_TRANSITION_VGT_VGT = 3, +}; + + +struct mlx4_cmd_context { + struct completion done; + int result; + int next; + u64 out_param; + u16 token; + u8 fw_status; +}; + +static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr_cmd *in_vhcr); + +static int mlx4_status_to_errno(u8 status) +{ + static const int trans_table[] = { + [CMD_STAT_INTERNAL_ERR] = -EIO, + [CMD_STAT_BAD_OP] = -EPERM, + [CMD_STAT_BAD_PARAM] = -EINVAL, + [CMD_STAT_BAD_SYS_STATE] = -ENXIO, + [CMD_STAT_BAD_RESOURCE] = -EBADF, + [CMD_STAT_RESOURCE_BUSY] = -EBUSY, + [CMD_STAT_EXCEED_LIM] = -ENOMEM, + [CMD_STAT_BAD_RES_STATE] = -EBADF, + [CMD_STAT_BAD_INDEX] = -EBADF, + [CMD_STAT_BAD_NVMEM] = -EFAULT, + [CMD_STAT_ICM_ERROR] = -ENFILE, + [CMD_STAT_BAD_QP_STATE] = -EINVAL, + [CMD_STAT_BAD_SEG_PARAM] = -EFAULT, + [CMD_STAT_REG_BOUND] = -EBUSY, + [CMD_STAT_LAM_NOT_PRE] = -EAGAIN, + [CMD_STAT_BAD_PKT] = -EINVAL, + [CMD_STAT_BAD_SIZE] = -ENOMEM, + [CMD_STAT_MULTI_FUNC_REQ] = -EACCES, + }; + + if (status >= ARRAY_SIZE(trans_table) || + (status != CMD_STAT_OK && trans_table[status] == 0)) + return -EIO; + + return trans_table[status]; +} + +static u8 mlx4_errno_to_status(int errno) +{ + switch (errno) { + case -EPERM: + return CMD_STAT_BAD_OP; + case -EINVAL: + return CMD_STAT_BAD_PARAM; + case -ENXIO: + return CMD_STAT_BAD_SYS_STATE; + case -EBUSY: + return CMD_STAT_RESOURCE_BUSY; + case -ENOMEM: + return CMD_STAT_EXCEED_LIM; + case -ENFILE: + return CMD_STAT_ICM_ERROR; + default: + return CMD_STAT_INTERNAL_ERR; + } +} + +static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op, + u8 op_modifier) +{ + switch (op) { + case MLX4_CMD_UNMAP_ICM: + case MLX4_CMD_UNMAP_ICM_AUX: + case MLX4_CMD_UNMAP_FA: + case MLX4_CMD_2RST_QP: + case MLX4_CMD_HW2SW_EQ: + case MLX4_CMD_HW2SW_CQ: + case MLX4_CMD_HW2SW_SRQ: + case MLX4_CMD_HW2SW_MPT: + case MLX4_CMD_CLOSE_HCA: + case MLX4_QP_FLOW_STEERING_DETACH: + case MLX4_CMD_FREE_RES: + case MLX4_CMD_CLOSE_PORT: + return CMD_STAT_OK; + + case MLX4_CMD_QP_ATTACH: + /* On Detach case return success */ + if (op_modifier == 0) + return CMD_STAT_OK; + return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); + + default: + return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); + } +} + +static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status) +{ + /* Any error during the closing commands below is considered fatal */ + if (op == MLX4_CMD_CLOSE_HCA || + op == MLX4_CMD_HW2SW_EQ || + op == MLX4_CMD_HW2SW_CQ || + op == MLX4_CMD_2RST_QP || + op == MLX4_CMD_HW2SW_SRQ || + op == MLX4_CMD_SYNC_TPT || + op == MLX4_CMD_UNMAP_ICM || + op == MLX4_CMD_UNMAP_ICM_AUX || + op == MLX4_CMD_UNMAP_FA) + return 1; + /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals + * CMD_STAT_REG_BOUND. + * This status indicates that memory region has memory windows bound to it + * which may result from invalid user space usage and is not fatal. + */ + if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND) + return 1; + return 0; +} + +static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier, + int err) +{ + /* Only if reset flow is really active return code is based on + * command, otherwise current error code is returned. + */ + if (mlx4_internal_err_reset) { + mlx4_enter_error_state(dev->persist); + err = mlx4_internal_err_ret_value(dev, op, op_modifier); + } + + return err; +} + +static int comm_pending(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + u32 status = readl(&priv->mfunc.comm->slave_read); + + return (swab32(status) >> 31) != priv->cmd.comm_toggle; +} + +static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + u32 val; + + /* To avoid writing to unknown addresses after the device state was + * changed to internal error and the function was rest, + * check the INTERNAL_ERROR flag which is updated under + * device_state_mutex lock. + */ + mutex_lock(&dev->persist->device_state_mutex); + + if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { + mutex_unlock(&dev->persist->device_state_mutex); + return -EIO; + } + + priv->cmd.comm_toggle ^= 1; + val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31); + __raw_writel((__force u32) cpu_to_be32(val), + &priv->mfunc.comm->slave_write); + mmiowb(); + mutex_unlock(&dev->persist->device_state_mutex); + return 0; +} + +static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param, + unsigned long timeout) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + unsigned long end; + int err = 0; + int ret_from_pending = 0; + + /* First, verify that the master reports correct status */ + if (comm_pending(dev)) { + mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n", + priv->cmd.comm_toggle, cmd); + return -EAGAIN; + } + + /* Write command */ + down(&priv->cmd.poll_sem); + if (mlx4_comm_cmd_post(dev, cmd, param)) { + /* Only in case the device state is INTERNAL_ERROR, + * mlx4_comm_cmd_post returns with an error + */ + err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); + goto out; + } + + end = msecs_to_jiffies(timeout) + jiffies; + while (comm_pending(dev) && time_before(jiffies, end)) + cond_resched(); + ret_from_pending = comm_pending(dev); + if (ret_from_pending) { + /* check if the slave is trying to boot in the middle of + * FLR process. The only non-zero result in the RESET command + * is MLX4_DELAY_RESET_SLAVE*/ + if ((MLX4_COMM_CMD_RESET == cmd)) { + err = MLX4_DELAY_RESET_SLAVE; + goto out; + } else { + mlx4_warn(dev, "Communication channel command 0x%x timed out\n", + cmd); + err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); + } + } + + if (err) + mlx4_enter_error_state(dev->persist); +out: + up(&priv->cmd.poll_sem); + return err; +} + +static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd, + u16 param, u16 op, unsigned long timeout) +{ + struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; + struct mlx4_cmd_context *context; + unsigned long end; + int err = 0; + + down(&cmd->event_sem); + + spin_lock(&cmd->context_lock); + BUG_ON(cmd->free_head < 0); + context = &cmd->context[cmd->free_head]; + context->token += cmd->token_mask + 1; + cmd->free_head = context->next; + spin_unlock(&cmd->context_lock); + + reinit_completion(&context->done); + + if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) { + /* Only in case the device state is INTERNAL_ERROR, + * mlx4_comm_cmd_post returns with an error + */ + err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); + goto out; + } + + if (!wait_for_completion_timeout(&context->done, + msecs_to_jiffies(timeout))) { + mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n", + vhcr_cmd, op); + goto out_reset; + } + + err = context->result; + if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) { + mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", + vhcr_cmd, context->fw_status); + if (mlx4_closing_cmd_fatal_error(op, context->fw_status)) + goto out_reset; + } + + /* wait for comm channel ready + * this is necessary for prevention the race + * when switching between event to polling mode + * Skipping this section in case the device is in FATAL_ERROR state, + * In this state, no commands are sent via the comm channel until + * the device has returned from reset. + */ + if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) { + end = msecs_to_jiffies(timeout) + jiffies; + while (comm_pending(dev) && time_before(jiffies, end)) + cond_resched(); + } + goto out; + +out_reset: + err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); + mlx4_enter_error_state(dev->persist); +out: + spin_lock(&cmd->context_lock); + context->next = cmd->free_head; + cmd->free_head = context - cmd->context; + spin_unlock(&cmd->context_lock); + + up(&cmd->event_sem); + return err; +} + +int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, + u16 op, unsigned long timeout) +{ + if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) + return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); + + if (mlx4_priv(dev)->cmd.use_events) + return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout); + return mlx4_comm_cmd_poll(dev, cmd, param, timeout); +} + +static int cmd_pending(struct mlx4_dev *dev) +{ + u32 status; + + if (pci_channel_offline(dev->persist->pdev)) + return -EIO; + + status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET); + + return (status & swab32(1 << HCR_GO_BIT)) || + (mlx4_priv(dev)->cmd.toggle == + !!(status & swab32(1 << HCR_T_BIT))); +} + +static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, + u32 in_modifier, u8 op_modifier, u16 op, u16 token, + int event) +{ + struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; + u32 __iomem *hcr = cmd->hcr; + int ret = -EIO; + unsigned long end; + + mutex_lock(&dev->persist->device_state_mutex); + /* To avoid writing to unknown addresses after the device state was + * changed to internal error and the chip was reset, + * check the INTERNAL_ERROR flag which is updated under + * device_state_mutex lock. + */ + if (pci_channel_offline(dev->persist->pdev) || + (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) { + /* + * Device is going through error recovery + * and cannot accept commands. + */ + goto out; + } + + end = jiffies; + if (event) + end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS); + + while (cmd_pending(dev)) { + if (pci_channel_offline(dev->persist->pdev)) { + /* + * Device is going through error recovery + * and cannot accept commands. + */ + goto out; + } + + if (time_after_eq(jiffies, end)) { + mlx4_err(dev, "%s:cmd_pending failed\n", __func__); + goto out; + } + cond_resched(); + } + + /* + * We use writel (instead of something like memcpy_toio) + * because writes of less than 32 bits to the HCR don't work + * (and some architectures such as ia64 implement memcpy_toio + * in terms of writeb). + */ + __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0); + __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1); + __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2); + __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3); + __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4); + __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5); + + /* __raw_writel may not order writes. */ + wmb(); + + __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) | + (cmd->toggle << HCR_T_BIT) | + (event ? (1 << HCR_E_BIT) : 0) | + (op_modifier << HCR_OPMOD_SHIFT) | + op), hcr + 6); + + /* + * Make sure that our HCR writes don't get mixed in with + * writes from another CPU starting a FW command. + */ + mmiowb(); + + cmd->toggle = cmd->toggle ^ 1; + + ret = 0; + +out: + if (ret) + mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n", + op, ret, in_param, in_modifier, op_modifier); + mutex_unlock(&dev->persist->device_state_mutex); + + return ret; +} + +static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, + int out_is_imm, u32 in_modifier, u8 op_modifier, + u16 op, unsigned long timeout) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr; + int ret; + + mutex_lock(&priv->cmd.slave_cmd_mutex); + + vhcr->in_param = cpu_to_be64(in_param); + vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0; + vhcr->in_modifier = cpu_to_be32(in_modifier); + vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff)); + vhcr->token = cpu_to_be16(CMD_POLL_TOKEN); + vhcr->status = 0; + vhcr->flags = !!(priv->cmd.use_events) << 6; + + if (mlx4_is_master(dev)) { + ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr); + if (!ret) { + if (out_is_imm) { + if (out_param) + *out_param = + be64_to_cpu(vhcr->out_param); + else { + mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n", + op); + vhcr->status = CMD_STAT_BAD_PARAM; + } + } + ret = mlx4_status_to_errno(vhcr->status); + } + if (ret && + dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) + ret = mlx4_internal_err_ret_value(dev, op, op_modifier); + } else { + ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op, + MLX4_COMM_TIME + timeout); + if (!ret) { + if (out_is_imm) { + if (out_param) + *out_param = + be64_to_cpu(vhcr->out_param); + else { + mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n", + op); + vhcr->status = CMD_STAT_BAD_PARAM; + } + } + ret = mlx4_status_to_errno(vhcr->status); + } else { + if (dev->persist->state & + MLX4_DEVICE_STATE_INTERNAL_ERROR) + ret = mlx4_internal_err_ret_value(dev, op, + op_modifier); + else + mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op); + } + } + + mutex_unlock(&priv->cmd.slave_cmd_mutex); + return ret; +} + +static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, + int out_is_imm, u32 in_modifier, u8 op_modifier, + u16 op, unsigned long timeout) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + void __iomem *hcr = priv->cmd.hcr; + int err = 0; + unsigned long end; + u32 stat; + + down(&priv->cmd.poll_sem); + + if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { + /* + * Device is going through error recovery + * and cannot accept commands. + */ + err = mlx4_internal_err_ret_value(dev, op, op_modifier); + goto out; + } + + if (out_is_imm && !out_param) { + mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n", + op); + err = -EINVAL; + goto out; + } + + err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, + in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0); + if (err) + goto out_reset; + + end = msecs_to_jiffies(timeout) + jiffies; + while (cmd_pending(dev) && time_before(jiffies, end)) { + if (pci_channel_offline(dev->persist->pdev)) { + /* + * Device is going through error recovery + * and cannot accept commands. + */ + err = -EIO; + goto out_reset; + } + + if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) { + err = mlx4_internal_err_ret_value(dev, op, op_modifier); + goto out; + } + + cond_resched(); + } + + if (cmd_pending(dev)) { + mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", + op); + err = -EIO; + goto out_reset; + } + + if (out_is_imm) + *out_param = + (u64) be32_to_cpu((__force __be32) + __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 | + (u64) be32_to_cpu((__force __be32) + __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4)); + stat = be32_to_cpu((__force __be32) + __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24; + err = mlx4_status_to_errno(stat); + if (err) { + mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", + op, stat); + if (mlx4_closing_cmd_fatal_error(op, stat)) + goto out_reset; + goto out; + } + +out_reset: + if (err) + err = mlx4_cmd_reset_flow(dev, op, op_modifier, err); +out: + up(&priv->cmd.poll_sem); + return err; +} + +void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_context *context = + &priv->cmd.context[token & priv->cmd.token_mask]; + + /* previously timed out command completing at long last */ + if (token != context->token) + return; + + context->fw_status = status; + context->result = mlx4_status_to_errno(status); + context->out_param = out_param; + + complete(&context->done); +} + +static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, + int out_is_imm, u32 in_modifier, u8 op_modifier, + u16 op, unsigned long timeout) +{ + struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; + struct mlx4_cmd_context *context; + int err = 0; + + down(&cmd->event_sem); + + spin_lock(&cmd->context_lock); + BUG_ON(cmd->free_head < 0); + context = &cmd->context[cmd->free_head]; + context->token += cmd->token_mask + 1; + cmd->free_head = context->next; + spin_unlock(&cmd->context_lock); + + if (out_is_imm && !out_param) { + mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n", + op); + err = -EINVAL; + goto out; + } + + reinit_completion(&context->done); + + err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, + in_modifier, op_modifier, op, context->token, 1); + if (err) + goto out_reset; + + if (!wait_for_completion_timeout(&context->done, + msecs_to_jiffies(timeout))) { + mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", + op); + if (op == MLX4_CMD_NOP) { + err = -EBUSY; + goto out; + } else { + err = -EIO; + goto out_reset; + } + } + + err = context->result; + if (err) { + /* Since we do not want to have this error message always + * displayed at driver start when there are ConnectX2 HCAs + * on the host, we deprecate the error message for this + * specific command/input_mod/opcode_mod/fw-status to be debug. + */ + if (op == MLX4_CMD_SET_PORT && + (in_modifier == 1 || in_modifier == 2) && + op_modifier == MLX4_SET_PORT_IB_OPCODE && + context->fw_status == CMD_STAT_BAD_SIZE) + mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n", + op, context->fw_status); + else + mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n", + op, context->fw_status); + if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) + err = mlx4_internal_err_ret_value(dev, op, op_modifier); + else if (mlx4_closing_cmd_fatal_error(op, context->fw_status)) + goto out_reset; + + goto out; + } + + if (out_is_imm) + *out_param = context->out_param; + +out_reset: + if (err) + err = mlx4_cmd_reset_flow(dev, op, op_modifier, err); +out: + spin_lock(&cmd->context_lock); + context->next = cmd->free_head; + cmd->free_head = context - cmd->context; + spin_unlock(&cmd->context_lock); + + up(&cmd->event_sem); + return err; +} + +int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, + int out_is_imm, u32 in_modifier, u8 op_modifier, + u16 op, unsigned long timeout, int native) +{ + if (pci_channel_offline(dev->persist->pdev)) + return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO); + + if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) { + if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) + return mlx4_internal_err_ret_value(dev, op, + op_modifier); + if (mlx4_priv(dev)->cmd.use_events) + return mlx4_cmd_wait(dev, in_param, out_param, + out_is_imm, in_modifier, + op_modifier, op, timeout); + else + return mlx4_cmd_poll(dev, in_param, out_param, + out_is_imm, in_modifier, + op_modifier, op, timeout); + } + return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm, + in_modifier, op_modifier, op, timeout); +} +EXPORT_SYMBOL_GPL(__mlx4_cmd); + + +int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev) +{ + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); +} + +static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr, + int slave, u64 slave_addr, + int size, int is_read) +{ + u64 in_param; + u64 out_param; + + if ((slave_addr & 0xfff) | (master_addr & 0xfff) | + (slave & ~0x7f) | (size & 0xff)) { + mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n", + slave_addr, master_addr, slave, size); + return -EINVAL; + } + + if (is_read) { + in_param = (u64) slave | slave_addr; + out_param = (u64) dev->caps.function | master_addr; + } else { + in_param = (u64) dev->caps.function | master_addr; + out_param = (u64) slave | slave_addr; + } + + return mlx4_cmd_imm(dev, in_param, &out_param, size, 0, + MLX4_CMD_ACCESS_MEM, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); +} + +static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox) +{ + struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf); + struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf); + int err; + int i; + + if (index & 0x1f) + return -EINVAL; + + in_mad->attr_mod = cpu_to_be32(index / 32); + + err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3, + MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (err) + return err; + + for (i = 0; i < 32; ++i) + pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]); + + return err; +} + +static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox) +{ + int i; + int err; + + for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) { + err = query_pkey_block(dev, port, i, table + i, inbox, outbox); + if (err) + return err; + } + + return 0; +} +#define PORT_CAPABILITY_LOCATION_IN_SMP 20 +#define PORT_STATE_OFFSET 32 + +static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf) +{ + if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP) + return IB_PORT_ACTIVE; + else + return IB_PORT_DOWN; +} + +static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct ib_smp *smp = inbox->buf; + u32 index; + u8 port; + u8 opcode_modifier; + u16 *table; + int err; + int vidx, pidx; + int network_view; + struct mlx4_priv *priv = mlx4_priv(dev); + struct ib_smp *outsmp = outbox->buf; + __be16 *outtab = (__be16 *)(outsmp->data); + __be32 slave_cap_mask; + __be64 slave_node_guid; + + port = vhcr->in_modifier; + + /* network-view bit is for driver use only, and should not be passed to FW */ + opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */ + network_view = !!(vhcr->op_modifier & 0x8); + + if (smp->base_version == 1 && + smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && + smp->class_version == 1) { + /* host view is paravirtualized */ + if (!network_view && smp->method == IB_MGMT_METHOD_GET) { + if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) { + index = be32_to_cpu(smp->attr_mod); + if (port < 1 || port > dev->caps.num_ports) + return -EINVAL; + table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1, + sizeof(*table) * 32, GFP_KERNEL); + + if (!table) + return -ENOMEM; + /* need to get the full pkey table because the paravirtualized + * pkeys may be scattered among several pkey blocks. + */ + err = get_full_pkey_table(dev, port, table, inbox, outbox); + if (!err) { + for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) { + pidx = priv->virt2phys_pkey[slave][port - 1][vidx]; + outtab[vidx % 32] = cpu_to_be16(table[pidx]); + } + } + kfree(table); + return err; + } + if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) { + /*get the slave specific caps:*/ + /*do the command */ + err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, + vhcr->in_modifier, opcode_modifier, + vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); + /* modify the response for slaves */ + if (!err && slave != mlx4_master_func_num(dev)) { + u8 *state = outsmp->data + PORT_STATE_OFFSET; + + *state = (*state & 0xf0) | vf_port_state(dev, port, slave); + slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port]; + memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4); + } + return err; + } + if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) { + __be64 guid = mlx4_get_admin_guid(dev, slave, + port); + + /* set the PF admin guid to the FW/HW burned + * GUID, if it wasn't yet set + */ + if (slave == 0 && guid == 0) { + smp->attr_mod = 0; + err = mlx4_cmd_box(dev, + inbox->dma, + outbox->dma, + vhcr->in_modifier, + opcode_modifier, + vhcr->op, + MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (err) + return err; + mlx4_set_admin_guid(dev, + *(__be64 *)outsmp-> + data, slave, port); + } else { + memcpy(outsmp->data, &guid, 8); + } + + /* clean all other gids */ + memset(outsmp->data + 8, 0, 56); + return 0; + } + if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) { + err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, + vhcr->in_modifier, opcode_modifier, + vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); + if (!err) { + slave_node_guid = mlx4_get_slave_node_guid(dev, slave); + memcpy(outsmp->data + 12, &slave_node_guid, 8); + } + return err; + } + } + } + + /* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs. + * These are the MADs used by ib verbs (such as ib_query_gids). + */ + if (slave != mlx4_master_func_num(dev) && + !mlx4_vf_smi_enabled(dev, slave, port)) { + if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && + smp->method == IB_MGMT_METHOD_GET) || network_view) { + mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n", + slave, smp->method, smp->mgmt_class, + network_view ? "Network" : "Host", + be16_to_cpu(smp->attr_id)); + return -EPERM; + } + } + + return mlx4_cmd_box(dev, inbox->dma, outbox->dma, + vhcr->in_modifier, opcode_modifier, + vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); +} + +static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + return -EPERM; +} + +int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + u64 in_param; + u64 out_param; + int err; + + in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param; + out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param; + if (cmd->encode_slave_id) { + in_param &= 0xffffffffffffff00ll; + in_param |= slave; + } + + err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm, + vhcr->in_modifier, vhcr->op_modifier, vhcr->op, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + + if (cmd->out_is_imm) + vhcr->out_param = out_param; + + return err; +} + +static struct mlx4_cmd_info cmd_info[] = { + { + .opcode = MLX4_CMD_QUERY_FW, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_FW_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_HCA, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_QUERY_DEV_CAP, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_DEV_CAP_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_FUNC_CAP, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_FUNC_CAP_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_ADAPTER, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_INIT_PORT, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_INIT_PORT_wrapper + }, + { + .opcode = MLX4_CMD_CLOSE_PORT, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CLOSE_PORT_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_PORT, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_PORT_wrapper + }, + { + .opcode = MLX4_CMD_SET_PORT, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_SET_PORT_wrapper + }, + { + .opcode = MLX4_CMD_MAP_EQ, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_MAP_EQ_wrapper + }, + { + .opcode = MLX4_CMD_SW2HW_EQ, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_SW2HW_EQ_wrapper + }, + { + .opcode = MLX4_CMD_HW_HEALTH_CHECK, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_NOP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_CONFIG_DEV, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CONFIG_DEV_wrapper + }, + { + .opcode = MLX4_CMD_ALLOC_RES, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = true, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_ALLOC_RES_wrapper + }, + { + .opcode = MLX4_CMD_FREE_RES, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_FREE_RES_wrapper + }, + { + .opcode = MLX4_CMD_SW2HW_MPT, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_SW2HW_MPT_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_MPT, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_MPT_wrapper + }, + { + .opcode = MLX4_CMD_HW2SW_MPT, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_HW2SW_MPT_wrapper + }, + { + .opcode = MLX4_CMD_READ_MTT, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_WRITE_MTT, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_WRITE_MTT_wrapper + }, + { + .opcode = MLX4_CMD_SYNC_TPT, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_HW2SW_EQ, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_HW2SW_EQ_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_EQ, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_QUERY_EQ_wrapper + }, + { + .opcode = MLX4_CMD_SW2HW_CQ, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_SW2HW_CQ_wrapper + }, + { + .opcode = MLX4_CMD_HW2SW_CQ, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_HW2SW_CQ_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_CQ, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_CQ_wrapper + }, + { + .opcode = MLX4_CMD_MODIFY_CQ, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = true, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_MODIFY_CQ_wrapper + }, + { + .opcode = MLX4_CMD_SW2HW_SRQ, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_SW2HW_SRQ_wrapper + }, + { + .opcode = MLX4_CMD_HW2SW_SRQ, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_HW2SW_SRQ_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_SRQ, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_SRQ_wrapper + }, + { + .opcode = MLX4_CMD_ARM_SRQ, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_ARM_SRQ_wrapper + }, + { + .opcode = MLX4_CMD_RST2INIT_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = true, + .verify = NULL, + .wrapper = mlx4_RST2INIT_QP_wrapper + }, + { + .opcode = MLX4_CMD_INIT2INIT_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_INIT2INIT_QP_wrapper + }, + { + .opcode = MLX4_CMD_INIT2RTR_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_INIT2RTR_QP_wrapper + }, + { + .opcode = MLX4_CMD_RTR2RTS_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_RTR2RTS_QP_wrapper + }, + { + .opcode = MLX4_CMD_RTS2RTS_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_RTS2RTS_QP_wrapper + }, + { + .opcode = MLX4_CMD_SQERR2RTS_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_SQERR2RTS_QP_wrapper + }, + { + .opcode = MLX4_CMD_2ERR_QP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_RTS2SQD_QP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_SQD2SQD_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_SQD2SQD_QP_wrapper + }, + { + .opcode = MLX4_CMD_SQD2RTS_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_SQD2RTS_QP_wrapper + }, + { + .opcode = MLX4_CMD_2RST_QP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_2RST_QP_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_QP, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_SUSPEND_QP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_UNSUSPEND_QP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_GEN_QP_wrapper + }, + { + .opcode = MLX4_CMD_UPDATE_QP, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_UPDATE_QP_wrapper + }, + { + .opcode = MLX4_CMD_GET_OP_REQ, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper, + }, + { + .opcode = MLX4_CMD_ALLOCATE_VPP, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper, + }, + { + .opcode = MLX4_CMD_SET_VPORT_QOS, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper, + }, + { + .opcode = MLX4_CMD_CONF_SPECIAL_QP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, /* XXX verify: only demux can do this */ + .wrapper = NULL + }, + { + .opcode = MLX4_CMD_MAD_IFC, + .has_inbox = true, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_MAD_IFC_wrapper + }, + { + .opcode = MLX4_CMD_MAD_DEMUX, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper + }, + { + .opcode = MLX4_CMD_QUERY_IF_STAT, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QUERY_IF_STAT_wrapper + }, + { + .opcode = MLX4_CMD_ACCESS_REG, + .has_inbox = true, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_ACCESS_REG_wrapper, + }, + { + .opcode = MLX4_CMD_CONGESTION_CTRL_OPCODE, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper, + }, + /* Native multicast commands are not available for guests */ + { + .opcode = MLX4_CMD_QP_ATTACH, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QP_ATTACH_wrapper + }, + { + .opcode = MLX4_CMD_PROMISC, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_PROMISC_wrapper + }, + /* Ethernet specific commands */ + { + .opcode = MLX4_CMD_SET_VLAN_FLTR, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_SET_VLAN_FLTR_wrapper + }, + { + .opcode = MLX4_CMD_SET_MCAST_FLTR, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_SET_MCAST_FLTR_wrapper + }, + { + .opcode = MLX4_CMD_DUMP_ETH_STATS, + .has_inbox = false, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_DUMP_ETH_STATS_wrapper + }, + { + .opcode = MLX4_CMD_INFORM_FLR_DONE, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = NULL + }, + /* flow steering commands */ + { + .opcode = MLX4_QP_FLOW_STEERING_ATTACH, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = true, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper + }, + { + .opcode = MLX4_QP_FLOW_STEERING_DETACH, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper + }, + { + .opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper + }, + { + .opcode = MLX4_CMD_VIRT_PORT_MAP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper + }, +}; + +static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr_cmd *in_vhcr) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_info *cmd = NULL; + struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr; + struct mlx4_vhcr *vhcr; + struct mlx4_cmd_mailbox *inbox = NULL; + struct mlx4_cmd_mailbox *outbox = NULL; + u64 in_param; + u64 out_param; + int ret = 0; + int i; + int err = 0; + + /* Create sw representation of Virtual HCR */ + vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL); + if (!vhcr) + return -ENOMEM; + + /* DMA in the vHCR */ + if (!in_vhcr) { + ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave, + priv->mfunc.master.slave_state[slave].vhcr_dma, + ALIGN(sizeof(struct mlx4_vhcr_cmd), + MLX4_ACCESS_MEM_ALIGN), 1); + if (ret) { + if (!(dev->persist->state & + MLX4_DEVICE_STATE_INTERNAL_ERROR)) + mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n", + __func__, ret); + kfree(vhcr); + return ret; + } + } + + /* Fill SW VHCR fields */ + vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param); + vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param); + vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier); + vhcr->token = be16_to_cpu(vhcr_cmd->token); + vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff; + vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12); + vhcr->e_bit = vhcr_cmd->flags & (1 << 6); + + /* Lookup command */ + for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) { + if (vhcr->op == cmd_info[i].opcode) { + cmd = &cmd_info[i]; + break; + } + } + if (!cmd) { + mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n", + vhcr->op, slave); + vhcr_cmd->status = CMD_STAT_BAD_PARAM; + goto out_status; + } + + /* Read inbox */ + if (cmd->has_inbox) { + vhcr->in_param &= INBOX_MASK; + inbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(inbox)) { + vhcr_cmd->status = CMD_STAT_BAD_SIZE; + inbox = NULL; + goto out_status; + } + + ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave, + vhcr->in_param, + MLX4_MAILBOX_SIZE, 1); + if (ret) { + if (!(dev->persist->state & + MLX4_DEVICE_STATE_INTERNAL_ERROR)) + mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n", + __func__, cmd->opcode); + vhcr_cmd->status = CMD_STAT_INTERNAL_ERR; + goto out_status; + } + } + + /* Apply permission and bound checks if applicable */ + if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) { + mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n", + vhcr->op, slave, vhcr->in_modifier); + vhcr_cmd->status = CMD_STAT_BAD_OP; + goto out_status; + } + + /* Allocate outbox */ + if (cmd->has_outbox) { + outbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(outbox)) { + vhcr_cmd->status = CMD_STAT_BAD_SIZE; + outbox = NULL; + goto out_status; + } + } + + /* Execute the command! */ + if (cmd->wrapper) { + err = cmd->wrapper(dev, slave, vhcr, inbox, outbox, + cmd); + if (cmd->out_is_imm) + vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param); + } else { + in_param = cmd->has_inbox ? (u64) inbox->dma : + vhcr->in_param; + out_param = cmd->has_outbox ? (u64) outbox->dma : + vhcr->out_param; + err = __mlx4_cmd(dev, in_param, &out_param, + cmd->out_is_imm, vhcr->in_modifier, + vhcr->op_modifier, vhcr->op, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + if (cmd->out_is_imm) { + vhcr->out_param = out_param; + vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param); + } + } + + if (err) { + if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) + mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n", + vhcr->op, slave, vhcr->errno, err); + vhcr_cmd->status = mlx4_errno_to_status(err); + goto out_status; + } + + + /* Write outbox if command completed successfully */ + if (cmd->has_outbox && !vhcr_cmd->status) { + ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave, + vhcr->out_param, + MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED); + if (ret) { + /* If we failed to write back the outbox after the + *command was successfully executed, we must fail this + * slave, as it is now in undefined state */ + if (!(dev->persist->state & + MLX4_DEVICE_STATE_INTERNAL_ERROR)) + mlx4_err(dev, "%s:Failed writing outbox\n", __func__); + goto out; + } + } + +out_status: + /* DMA back vhcr result */ + if (!in_vhcr) { + ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave, + priv->mfunc.master.slave_state[slave].vhcr_dma, + ALIGN(sizeof(struct mlx4_vhcr), + MLX4_ACCESS_MEM_ALIGN), + MLX4_CMD_WRAPPED); + if (ret) + mlx4_err(dev, "%s:Failed writing vhcr result\n", + __func__); + else if (vhcr->e_bit && + mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe)) + mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n", + slave); + } + +out: + kfree(vhcr); + mlx4_free_cmd_mailbox(dev, inbox); + mlx4_free_cmd_mailbox(dev, outbox); + return ret; +} + +static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, + int slave, int port) +{ + struct mlx4_vport_oper_state *vp_oper; + struct mlx4_vport_state *vp_admin; + struct mlx4_vf_immed_vlan_work *work; + struct mlx4_dev *dev = &(priv->dev); + int err; + int admin_vlan_ix = NO_INDX; + + vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; + + if (vp_oper->state.default_vlan == vp_admin->default_vlan && + vp_oper->state.default_qos == vp_admin->default_qos && + vp_oper->state.link_state == vp_admin->link_state && + vp_oper->state.qos_vport == vp_admin->qos_vport) + return 0; + + if (!(priv->mfunc.master.slave_state[slave].active && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) { + /* even if the UPDATE_QP command isn't supported, we still want + * to set this VF link according to the admin directive + */ + vp_oper->state.link_state = vp_admin->link_state; + return -1; + } + + mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n", + slave, port); + mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", + vp_admin->default_vlan, vp_admin->default_qos, + vp_admin->link_state); + + work = kzalloc(sizeof(*work), GFP_KERNEL); + if (!work) + return -ENOMEM; + + if (vp_oper->state.default_vlan != vp_admin->default_vlan) { + if (MLX4_VGT != vp_admin->default_vlan) { + err = __mlx4_register_vlan(&priv->dev, port, + vp_admin->default_vlan, + &admin_vlan_ix); + if (err) { + kfree(work); + mlx4_warn(&priv->dev, + "No vlan resources slave %d, port %d\n", + slave, port); + return err; + } + } else { + admin_vlan_ix = NO_INDX; + } + work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN; + mlx4_dbg(&priv->dev, + "alloc vlan %d idx %d slave %d port %d\n", + (int)(vp_admin->default_vlan), + admin_vlan_ix, slave, port); + } + + /* save original vlan ix and vlan id */ + work->orig_vlan_id = vp_oper->state.default_vlan; + work->orig_vlan_ix = vp_oper->vlan_idx; + + /* handle new qos */ + if (vp_oper->state.default_qos != vp_admin->default_qos) + work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS; + + if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN) + vp_oper->vlan_idx = admin_vlan_ix; + + vp_oper->state.default_vlan = vp_admin->default_vlan; + vp_oper->state.default_qos = vp_admin->default_qos; + vp_oper->state.link_state = vp_admin->link_state; + vp_oper->state.qos_vport = vp_admin->qos_vport; + + if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE) + work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE; + + /* iterate over QPs owned by this slave, using UPDATE_QP */ + work->port = port; + work->slave = slave; + work->qos = vp_oper->state.default_qos; + work->qos_vport = vp_oper->state.qos_vport; + work->vlan_id = vp_oper->state.default_vlan; + work->vlan_ix = vp_oper->vlan_idx; + work->priv = priv; + INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler); + queue_work(priv->mfunc.master.comm_wq, &work->work); + + return 0; +} + +static void mlx4_set_default_port_qos(struct mlx4_dev *dev, int port) +{ + struct mlx4_qos_manager *port_qos_ctl; + struct mlx4_priv *priv = mlx4_priv(dev); + + port_qos_ctl = &priv->mfunc.master.qos_ctl[port]; + bitmap_zero(port_qos_ctl->priority_bm, MLX4_NUM_UP); + + /* Enable only default prio at PF init routine */ + set_bit(MLX4_DEFAULT_QOS_PRIO, port_qos_ctl->priority_bm); +} + +static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port) +{ + int i; + int err; + int num_vfs; + u16 availible_vpp; + u8 vpp_param[MLX4_NUM_UP]; + struct mlx4_qos_manager *port_qos; + struct mlx4_priv *priv = mlx4_priv(dev); + + err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param); + if (err) { + mlx4_info(dev, "Failed query availible VPPs\n"); + return; + } + + port_qos = &priv->mfunc.master.qos_ctl[port]; + num_vfs = (availible_vpp / + bitmap_weight(port_qos->priority_bm, MLX4_NUM_UP)); + + for (i = 0; i < MLX4_NUM_UP; i++) { + if (test_bit(i, port_qos->priority_bm)) + vpp_param[i] = num_vfs; + } + + err = mlx4_ALLOCATE_VPP_set(dev, port, vpp_param); + if (err) { + mlx4_info(dev, "Failed allocating VPPs\n"); + return; + } + + /* Query actual allocated VPP, just to make sure */ + err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param); + if (err) { + mlx4_info(dev, "Failed query availible VPPs\n"); + return; + } + + port_qos->num_of_qos_vfs = num_vfs; + mlx4_dbg(dev, "Port %d Availible VPPs %d\n", port, availible_vpp); + + for (i = 0; i < MLX4_NUM_UP; i++) + mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i, + vpp_param[i]); +} + +static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) +{ + int port, err; + struct mlx4_vport_state *vp_admin; + struct mlx4_vport_oper_state *vp_oper; + struct mlx4_active_ports actv_ports = mlx4_get_active_ports( + &priv->dev, slave); + int min_port = find_first_bit(actv_ports.ports, + priv->dev.caps.num_ports) + 1; + int max_port = min_port - 1 + + bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports); + + for (port = min_port; port <= max_port; port++) { + if (!test_bit(port - 1, actv_ports.ports)) + continue; + priv->mfunc.master.vf_oper[slave].smi_enabled[port] = + priv->mfunc.master.vf_admin[slave].enable_smi[port]; + vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; + vp_oper->state = *vp_admin; + if (MLX4_VGT != vp_admin->default_vlan) { + err = __mlx4_register_vlan(&priv->dev, port, + vp_admin->default_vlan, &(vp_oper->vlan_idx)); + if (err) { + vp_oper->vlan_idx = NO_INDX; + mlx4_warn(&priv->dev, + "No vlan resources slave %d, port %d\n", + slave, port); + return err; + } + mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n", + (int)(vp_oper->state.default_vlan), + vp_oper->vlan_idx, slave, port); + } + if (vp_admin->spoofchk) { + vp_oper->mac_idx = __mlx4_register_mac(&priv->dev, + port, + vp_admin->mac); + if (0 > vp_oper->mac_idx) { + err = vp_oper->mac_idx; + vp_oper->mac_idx = NO_INDX; + mlx4_warn(&priv->dev, + "No mac resources slave %d, port %d\n", + slave, port); + return err; + } + mlx4_dbg(&priv->dev, "alloc mac %llx idx %d slave %d port %d\n", + vp_oper->state.mac, vp_oper->mac_idx, slave, port); + } + } + return 0; +} + +static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave) +{ + int port; + struct mlx4_vport_oper_state *vp_oper; + struct mlx4_active_ports actv_ports = mlx4_get_active_ports( + &priv->dev, slave); + int min_port = find_first_bit(actv_ports.ports, + priv->dev.caps.num_ports) + 1; + int max_port = min_port - 1 + + bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports); + + + for (port = min_port; port <= max_port; port++) { + if (!test_bit(port - 1, actv_ports.ports)) + continue; + priv->mfunc.master.vf_oper[slave].smi_enabled[port] = + MLX4_VF_SMI_DISABLED; + vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + if (NO_INDX != vp_oper->vlan_idx) { + __mlx4_unregister_vlan(&priv->dev, + port, vp_oper->state.default_vlan); + vp_oper->vlan_idx = NO_INDX; + } + if (NO_INDX != vp_oper->mac_idx) { + __mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac); + vp_oper->mac_idx = NO_INDX; + } + } + return; +} + +static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, + u16 param, u8 toggle) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; + u32 reply; + u8 is_going_down = 0; + int i; + unsigned long flags; + + slave_state[slave].comm_toggle ^= 1; + reply = (u32) slave_state[slave].comm_toggle << 31; + if (toggle != slave_state[slave].comm_toggle) { + mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n", + toggle, slave); + goto reset_slave; + } + if (cmd == MLX4_COMM_CMD_RESET) { + mlx4_warn(dev, "Received reset from slave:%d\n", slave); + slave_state[slave].active = false; + slave_state[slave].old_vlan_api = false; + mlx4_master_deactivate_admin_state(priv, slave); + for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) { + slave_state[slave].event_eq[i].eqn = -1; + slave_state[slave].event_eq[i].token = 0; + } + /*check if we are in the middle of FLR process, + if so return "retry" status to the slave*/ + if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) + goto inform_slave_state; + + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave); + + /* write the version in the event field */ + reply |= mlx4_comm_get_version(); + + goto reset_slave; + } + /*command from slave in the middle of FLR*/ + if (cmd != MLX4_COMM_CMD_RESET && + MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) { + mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n", + slave, cmd); + return; + } + + switch (cmd) { + case MLX4_COMM_CMD_VHCR0: + if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET) + goto reset_slave; + slave_state[slave].vhcr_dma = ((u64) param) << 48; + priv->mfunc.master.slave_state[slave].cookie = 0; + break; + case MLX4_COMM_CMD_VHCR1: + if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0) + goto reset_slave; + slave_state[slave].vhcr_dma |= ((u64) param) << 32; + break; + case MLX4_COMM_CMD_VHCR2: + if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1) + goto reset_slave; + slave_state[slave].vhcr_dma |= ((u64) param) << 16; + break; + case MLX4_COMM_CMD_VHCR_EN: + if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2) + goto reset_slave; + slave_state[slave].vhcr_dma |= param; + if (mlx4_master_activate_admin_state(priv, slave)) + goto reset_slave; + slave_state[slave].active = true; + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave); + break; + case MLX4_COMM_CMD_VHCR_POST: + if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) && + (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) { + mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n", + slave, cmd, slave_state[slave].last_cmd); + goto reset_slave; + } + + mutex_lock(&priv->cmd.slave_cmd_mutex); + if (mlx4_master_process_vhcr(dev, slave, NULL)) { + mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n", + slave); + mutex_unlock(&priv->cmd.slave_cmd_mutex); + goto reset_slave; + } + mutex_unlock(&priv->cmd.slave_cmd_mutex); + break; + default: + mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave); + goto reset_slave; + } + spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); + if (!slave_state[slave].is_slave_going_down) + slave_state[slave].last_cmd = cmd; + else + is_going_down = 1; + spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); + if (is_going_down) { + mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n", + cmd, slave); + return; + } + __raw_writel((__force u32) cpu_to_be32(reply), + &priv->mfunc.comm[slave].slave_read); + mmiowb(); + + return; + +reset_slave: + /* cleanup any slave resources */ + if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP) + mlx4_delete_all_resources_for_slave(dev, slave); + + if (cmd != MLX4_COMM_CMD_RESET) { + mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n", + slave, cmd); + /* Turn on internal error letting slave reset itself immeditaly, + * otherwise it might take till timeout on command is passed + */ + reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR); + } + + spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); + if (!slave_state[slave].is_slave_going_down) + slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET; + spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); + /*with slave in the middle of flr, no need to clean resources again.*/ +inform_slave_state: + memset(&slave_state[slave].event_eq, 0, + sizeof(struct mlx4_slave_event_eq_info)); + __raw_writel((__force u32) cpu_to_be32(reply), + &priv->mfunc.comm[slave].slave_read); + wmb(); +} + +/* master command processing */ +void mlx4_master_comm_channel(struct work_struct *work) +{ + struct mlx4_mfunc_master_ctx *master = + container_of(work, + struct mlx4_mfunc_master_ctx, + comm_work); + struct mlx4_mfunc *mfunc = + container_of(master, struct mlx4_mfunc, master); + struct mlx4_priv *priv = + container_of(mfunc, struct mlx4_priv, mfunc); + struct mlx4_dev *dev = &priv->dev; + __be32 *bit_vec; + u32 comm_cmd; + u32 vec; + int i, j, slave; + int toggle; + int served = 0; + int reported = 0; + u32 slt; + + bit_vec = master->comm_arm_bit_vector; + for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) { + vec = be32_to_cpu(bit_vec[i]); + for (j = 0; j < 32; j++) { + if (!(vec & (1 << j))) + continue; + ++reported; + slave = (i * 32) + j; + comm_cmd = swab32(readl( + &mfunc->comm[slave].slave_write)); + slt = swab32(readl(&mfunc->comm[slave].slave_read)) + >> 31; + toggle = comm_cmd >> 31; + if (toggle != slt) { + if (master->slave_state[slave].comm_toggle + != slt) { + pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n", + slave, slt, + master->slave_state[slave].comm_toggle); + master->slave_state[slave].comm_toggle = + slt; + } + mlx4_master_do_cmd(dev, slave, + comm_cmd >> 16 & 0xff, + comm_cmd & 0xffff, toggle); + ++served; + } + } + } + + if (reported && reported != served) + mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n", + reported, served); + + if (mlx4_ARM_COMM_CHANNEL(dev)) + mlx4_warn(dev, "Failed to arm comm channel events\n"); +} + +static int sync_toggles(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + u32 wr_toggle; + u32 rd_toggle; + unsigned long end; + + wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)); + if (wr_toggle == 0xffffffff) + end = jiffies + msecs_to_jiffies(30000); + else + end = jiffies + msecs_to_jiffies(5000); + + while (time_before(jiffies, end)) { + rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)); + if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) { + /* PCI might be offline */ + msleep(100); + wr_toggle = swab32(readl(&priv->mfunc.comm-> + slave_write)); + continue; + } + + if (rd_toggle >> 31 == wr_toggle >> 31) { + priv->cmd.comm_toggle = rd_toggle >> 31; + return 0; + } + + cond_resched(); + } + + /* + * we could reach here if for example the previous VM using this + * function misbehaved and left the channel with unsynced state. We + * should fix this here and give this VM a chance to use a properly + * synced channel + */ + mlx4_warn(dev, "recovering from previously mis-behaved VM\n"); + __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read); + __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write); + priv->cmd.comm_toggle = 0; + + return 0; +} + +int mlx4_multi_func_init(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *s_state; + int i, j, err, port; + + if (mlx4_is_master(dev)) + priv->mfunc.comm = + ioremap(pci_resource_start(dev->persist->pdev, + priv->fw.comm_bar) + + priv->fw.comm_base, MLX4_COMM_PAGESIZE); + else + priv->mfunc.comm = + ioremap(pci_resource_start(dev->persist->pdev, 2) + + MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE); + if (!priv->mfunc.comm) { + mlx4_err(dev, "Couldn't map communication vector\n"); + goto err_vhcr; + } + + if (mlx4_is_master(dev)) { + struct mlx4_vf_oper_state *vf_oper; + struct mlx4_vf_admin_state *vf_admin; + + priv->mfunc.master.slave_state = + kzalloc(dev->num_slaves * + sizeof(struct mlx4_slave_state), GFP_KERNEL); + if (!priv->mfunc.master.slave_state) + goto err_comm; + + priv->mfunc.master.vf_admin = + kzalloc(dev->num_slaves * + sizeof(struct mlx4_vf_admin_state), GFP_KERNEL); + if (!priv->mfunc.master.vf_admin) + goto err_comm_admin; + + priv->mfunc.master.vf_oper = + kzalloc(dev->num_slaves * + sizeof(struct mlx4_vf_oper_state), GFP_KERNEL); + if (!priv->mfunc.master.vf_oper) + goto err_comm_oper; + + for (i = 0; i < dev->num_slaves; ++i) { + vf_admin = &priv->mfunc.master.vf_admin[i]; + vf_oper = &priv->mfunc.master.vf_oper[i]; + s_state = &priv->mfunc.master.slave_state[i]; + s_state->last_cmd = MLX4_COMM_CMD_RESET; + mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]); + for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j) + s_state->event_eq[j].eqn = -1; + __raw_writel((__force u32) 0, + &priv->mfunc.comm[i].slave_write); + __raw_writel((__force u32) 0, + &priv->mfunc.comm[i].slave_read); + mmiowb(); + for (port = 1; port <= MLX4_MAX_PORTS; port++) { + struct mlx4_vport_state *admin_vport; + struct mlx4_vport_state *oper_vport; + + s_state->vlan_filter[port] = + kzalloc(sizeof(struct mlx4_vlan_fltr), + GFP_KERNEL); + if (!s_state->vlan_filter[port]) { + if (--port) + kfree(s_state->vlan_filter[port]); + goto err_slaves; + } + + admin_vport = &vf_admin->vport[port]; + oper_vport = &vf_oper->vport[port].state; + INIT_LIST_HEAD(&s_state->mcast_filters[port]); + admin_vport->default_vlan = MLX4_VGT; + oper_vport->default_vlan = MLX4_VGT; + admin_vport->qos_vport = + MLX4_VPP_DEFAULT_VPORT; + oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT; + vf_oper->vport[port].vlan_idx = NO_INDX; + vf_oper->vport[port].mac_idx = NO_INDX; + mlx4_set_random_admin_guid(dev, i, port); + } + spin_lock_init(&s_state->lock); + } + + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP) { + for (port = 1; port <= dev->caps.num_ports; port++) { + if (mlx4_is_eth(dev, port)) { + mlx4_set_default_port_qos(dev, port); + mlx4_allocate_port_vpps(dev, port); + } + } + } + + memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size); + priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD; + INIT_WORK(&priv->mfunc.master.comm_work, + mlx4_master_comm_channel); + INIT_WORK(&priv->mfunc.master.slave_event_work, + mlx4_gen_slave_eqe); + INIT_WORK(&priv->mfunc.master.slave_flr_event_work, + mlx4_master_handle_slave_flr); + spin_lock_init(&priv->mfunc.master.slave_state_lock); + spin_lock_init(&priv->mfunc.master.slave_eq.event_lock); + priv->mfunc.master.comm_wq = + create_singlethread_workqueue("mlx4_comm"); + if (!priv->mfunc.master.comm_wq) + goto err_slaves; + + if (mlx4_init_resource_tracker(dev)) + goto err_thread; + + } else { + err = sync_toggles(dev); + if (err) { + mlx4_err(dev, "Couldn't sync toggles\n"); + goto err_comm; + } + } + return 0; + +err_thread: + flush_workqueue(priv->mfunc.master.comm_wq); + destroy_workqueue(priv->mfunc.master.comm_wq); +err_slaves: + while (--i) { + for (port = 1; port <= MLX4_MAX_PORTS; port++) + kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); + } + kfree(priv->mfunc.master.vf_oper); +err_comm_oper: + kfree(priv->mfunc.master.vf_admin); +err_comm_admin: + kfree(priv->mfunc.master.slave_state); +err_comm: + iounmap(priv->mfunc.comm); +err_vhcr: + dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, + priv->mfunc.vhcr, + priv->mfunc.vhcr_dma); + priv->mfunc.vhcr = NULL; + return -ENOMEM; +} + +int mlx4_cmd_init(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int flags = 0; + + if (!priv->cmd.initialized) { + mutex_init(&priv->cmd.slave_cmd_mutex); + sema_init(&priv->cmd.poll_sem, 1); + priv->cmd.use_events = 0; + priv->cmd.toggle = 1; + priv->cmd.initialized = 1; + flags |= MLX4_CMD_CLEANUP_STRUCT; + } + + if (!mlx4_is_slave(dev) && !priv->cmd.hcr) { + priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev, + 0) + MLX4_HCR_BASE, MLX4_HCR_SIZE); + if (!priv->cmd.hcr) { + mlx4_err(dev, "Couldn't map command register\n"); + goto err; + } + flags |= MLX4_CMD_CLEANUP_HCR; + } + + if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) { + priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev, + PAGE_SIZE, + &priv->mfunc.vhcr_dma, + GFP_KERNEL); + if (!priv->mfunc.vhcr) + goto err; + + flags |= MLX4_CMD_CLEANUP_VHCR; + } + + if (!priv->cmd.pool) { + priv->cmd.pool = pci_pool_create("mlx4_cmd", + dev->persist->pdev, + MLX4_MAILBOX_SIZE, + MLX4_MAILBOX_SIZE, 0); + if (!priv->cmd.pool) + goto err; + + flags |= MLX4_CMD_CLEANUP_POOL; + } + + return 0; + +err: + mlx4_cmd_cleanup(dev, flags); + return -ENOMEM; +} + +void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int slave; + u32 slave_read; + + /* Report an internal error event to all + * communication channels. + */ + for (slave = 0; slave < dev->num_slaves; slave++) { + slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read)); + slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR; + __raw_writel((__force u32)cpu_to_be32(slave_read), + &priv->mfunc.comm[slave].slave_read); + /* Make sure that our comm channel write doesn't + * get mixed in with writes from another CPU. + */ + mmiowb(); + } +} + +void mlx4_multi_func_cleanup(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i, port; + + if (mlx4_is_master(dev)) { + flush_workqueue(priv->mfunc.master.comm_wq); + destroy_workqueue(priv->mfunc.master.comm_wq); + for (i = 0; i < dev->num_slaves; i++) { + for (port = 1; port <= MLX4_MAX_PORTS; port++) + kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); + } + kfree(priv->mfunc.master.slave_state); + kfree(priv->mfunc.master.vf_admin); + kfree(priv->mfunc.master.vf_oper); + dev->num_slaves = 0; + } + + iounmap(priv->mfunc.comm); +} + +void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) { + pci_pool_destroy(priv->cmd.pool); + priv->cmd.pool = NULL; + } + + if (!mlx4_is_slave(dev) && priv->cmd.hcr && + (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) { + iounmap(priv->cmd.hcr); + priv->cmd.hcr = NULL; + } + if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr && + (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) { + dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, + priv->mfunc.vhcr, priv->mfunc.vhcr_dma); + priv->mfunc.vhcr = NULL; + } + if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT)) + priv->cmd.initialized = 0; +} + +/* + * Switch to using events to issue FW commands (can only be called + * after event queue for command events has been initialized). + */ +int mlx4_cmd_use_events(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i; + int err = 0; + + priv->cmd.context = kmalloc(priv->cmd.max_cmds * + sizeof (struct mlx4_cmd_context), + GFP_KERNEL); + if (!priv->cmd.context) + return -ENOMEM; + + for (i = 0; i < priv->cmd.max_cmds; ++i) { + priv->cmd.context[i].token = i; + priv->cmd.context[i].next = i + 1; + /* To support fatal error flow, initialize all + * cmd contexts to allow simulating completions + * with complete() at any time. + */ + init_completion(&priv->cmd.context[i].done); + } + + priv->cmd.context[priv->cmd.max_cmds - 1].next = -1; + priv->cmd.free_head = 0; + + sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds); + spin_lock_init(&priv->cmd.context_lock); + + for (priv->cmd.token_mask = 1; + priv->cmd.token_mask < priv->cmd.max_cmds; + priv->cmd.token_mask <<= 1) + ; /* nothing */ + --priv->cmd.token_mask; + + down(&priv->cmd.poll_sem); + priv->cmd.use_events = 1; + + return err; +} + +/* + * Switch back to polling (used when shutting down the device) + */ +void mlx4_cmd_use_polling(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i; + + priv->cmd.use_events = 0; + + for (i = 0; i < priv->cmd.max_cmds; ++i) + down(&priv->cmd.event_sem); + + kfree(priv->cmd.context); + + up(&priv->cmd.poll_sem); +} + +struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) +{ + struct mlx4_cmd_mailbox *mailbox; + + mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL); + if (!mailbox) + return ERR_PTR(-ENOMEM); + + mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL, + &mailbox->dma); + if (!mailbox->buf) { + kfree(mailbox); + return ERR_PTR(-ENOMEM); + } + + memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); + + return mailbox; +} +EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox); + +void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, + struct mlx4_cmd_mailbox *mailbox) +{ + if (!mailbox) + return; + + pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma); + kfree(mailbox); +} +EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox); + +u32 mlx4_comm_get_version(void) +{ + return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER; +} + +static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf) +{ + if ((vf < 0) || (vf >= dev->persist->num_vfs)) { + mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", + vf, dev->persist->num_vfs); + return -EINVAL; + } + + return vf+1; +} + +int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave) +{ + if (slave < 1 || slave > dev->persist->num_vfs) { + mlx4_err(dev, + "Bad slave number:%d (number of activated slaves: %lu)\n", + slave, dev->num_slaves); + return -EINVAL; + } + return slave - 1; +} + +void mlx4_cmd_wake_completions(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_context *context; + int i; + + spin_lock(&priv->cmd.context_lock); + if (priv->cmd.context) { + for (i = 0; i < priv->cmd.max_cmds; ++i) { + context = &priv->cmd.context[i]; + context->fw_status = CMD_STAT_INTERNAL_ERR; + context->result = + mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); + complete(&context->done); + } + } + spin_unlock(&priv->cmd.context_lock); +} + +struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave) +{ + struct mlx4_active_ports actv_ports; + int vf; + + bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS); + + if (slave == 0) { + bitmap_fill(actv_ports.ports, dev->caps.num_ports); + return actv_ports; + } + + vf = mlx4_get_vf_indx(dev, slave); + if (vf < 0) + return actv_ports; + + bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1, + min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports, + dev->caps.num_ports)); + + return actv_ports; +} +EXPORT_SYMBOL_GPL(mlx4_get_active_ports); + +int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port) +{ + unsigned n; + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports); + + if (port <= 0 || port > m) + return -EINVAL; + + n = find_first_bit(actv_ports.ports, dev->caps.num_ports); + if (port <= n) + port = n + 1; + + return port; +} +EXPORT_SYMBOL_GPL(mlx4_slave_convert_port); + +int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port) +{ + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + if (test_bit(port - 1, actv_ports.ports)) + return port - + find_first_bit(actv_ports.ports, dev->caps.num_ports); + + return -1; +} +EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port); + +struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev, + int port) +{ + unsigned i; + struct mlx4_slaves_pport slaves_pport; + + bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX); + + if (port <= 0 || port > dev->caps.num_ports) + return slaves_pport; + + for (i = 0; i < dev->persist->num_vfs + 1; i++) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, i); + if (test_bit(port - 1, actv_ports.ports)) + set_bit(i, slaves_pport.slaves); + } + + return slaves_pport; +} +EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport); + +struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv( + struct mlx4_dev *dev, + const struct mlx4_active_ports *crit_ports) +{ + unsigned i; + struct mlx4_slaves_pport slaves_pport; + + bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX); + + for (i = 0; i < dev->persist->num_vfs + 1; i++) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, i); + if (bitmap_equal(crit_ports->ports, actv_ports.ports, + dev->caps.num_ports)) + set_bit(i, slaves_pport.slaves); + } + + return slaves_pport; +} +EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv); + +static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port) +{ + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports) + + 1; + int max_port = min_port + + bitmap_weight(actv_ports.ports, dev->caps.num_ports); + + if (port < min_port) + port = min_port; + else if (port >= max_port) + port = max_port - 1; + + return port; +} + +static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port, + int max_tx_rate) +{ + int i; + int err; + struct mlx4_qos_manager *port_qos; + struct mlx4_dev *dev = &priv->dev; + struct mlx4_vport_qos_param vpp_qos[MLX4_NUM_UP]; + + port_qos = &priv->mfunc.master.qos_ctl[port]; + memset(vpp_qos, 0, sizeof(struct mlx4_vport_qos_param) * MLX4_NUM_UP); + + if (slave > port_qos->num_of_qos_vfs) { + mlx4_info(dev, "No availible VPP resources for this VF\n"); + return -EINVAL; + } + + /* Query for default QoS values from Vport 0 is needed */ + err = mlx4_SET_VPORT_QOS_get(dev, port, 0, vpp_qos); + if (err) { + mlx4_info(dev, "Failed to query Vport 0 QoS values\n"); + return err; + } + + for (i = 0; i < MLX4_NUM_UP; i++) { + if (test_bit(i, port_qos->priority_bm) && max_tx_rate) { + vpp_qos[i].max_avg_bw = max_tx_rate; + vpp_qos[i].enable = 1; + } else { + /* if user supplied tx_rate == 0, meaning no rate limit + * configuration is required. so we are leaving the + * value of max_avg_bw as queried from Vport 0. + */ + vpp_qos[i].enable = 0; + } + } + + err = mlx4_SET_VPORT_QOS_set(dev, port, slave, vpp_qos); + if (err) { + mlx4_info(dev, "Failed to set Vport %d QoS values\n", slave); + return err; + } + + return 0; +} + +static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev *dev, int port, + struct mlx4_vport_state *vf_admin) +{ + struct mlx4_qos_manager *info; + struct mlx4_priv *priv = mlx4_priv(dev); + + if (!mlx4_is_master(dev) || + !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) + return false; + + info = &priv->mfunc.master.qos_ctl[port]; + + if (vf_admin->default_vlan != MLX4_VGT && + test_bit(vf_admin->default_qos, info->priority_bm)) + return true; + + return false; +} + +static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port, + struct mlx4_vport_state *vf_admin, + int vlan, int qos) +{ + struct mlx4_vport_state dummy_admin = {0}; + + if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) || + !vf_admin->tx_rate) + return true; + + dummy_admin.default_qos = qos; + dummy_admin.default_vlan = vlan; + + /* VF wants to move to other VST state which is valid with current + * rate limit. Either differnt default vlan in VST or other + * supported QoS priority. Otherwise we don't allow this change when + * the TX rate is still configured. + */ + if (mlx4_is_vf_vst_and_prio_qos(dev, port, &dummy_admin)) + return true; + + mlx4_info(dev, "Cannot change VF state to %s while rate is set\n", + (vlan == MLX4_VGT) ? "VGT" : "VST"); + + if (vlan != MLX4_VGT) + mlx4_info(dev, "VST priority %d not supported for QoS\n", qos); + + mlx4_info(dev, "Please set rate to 0 prior to this VF state change\n"); + + return false; +} + +int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_vport_state *s_info; + int slave; + + if (!mlx4_is_master(dev)) + return -EPROTONOSUPPORT; + + slave = mlx4_get_slave_indx(dev, vf); + if (slave < 0) + return -EINVAL; + + port = mlx4_slaves_closest_port(dev, slave, port); + s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; + s_info->mac = mac; + mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", + vf, port, s_info->mac); + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_set_vf_mac); + + +int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_vport_state *vf_admin; + int slave; + + if ((!mlx4_is_master(dev)) || + !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL)) + return -EPROTONOSUPPORT; + + if ((vlan > 4095) || (qos > 7)) + return -EINVAL; + + slave = mlx4_get_slave_indx(dev, vf); + if (slave < 0) + return -EINVAL; + + port = mlx4_slaves_closest_port(dev, slave, port); + vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; + + if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos)) + return -EPERM; + + if ((0 == vlan) && (0 == qos)) + vf_admin->default_vlan = MLX4_VGT; + else + vf_admin->default_vlan = vlan; + vf_admin->default_qos = qos; + + /* If rate was configured prior to VST, we saved the configured rate + * in vf_admin->rate and now, if priority supported we enforce the QoS + */ + if (mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) && + vf_admin->tx_rate) + vf_admin->qos_vport = slave; + + if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port)) + mlx4_info(dev, + "updating vf %d port %d config will take effect on next VF restart\n", + vf, port); + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan); + +int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate, + int max_tx_rate) +{ + int err; + int slave; + struct mlx4_vport_state *vf_admin; + struct mlx4_priv *priv = mlx4_priv(dev); + + if (!mlx4_is_master(dev) || + !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) + return -EPROTONOSUPPORT; + + if (min_tx_rate) { + mlx4_info(dev, "Minimum BW share not supported\n"); + return -EPROTONOSUPPORT; + } + + slave = mlx4_get_slave_indx(dev, vf); + if (slave < 0) + return -EINVAL; + + port = mlx4_slaves_closest_port(dev, slave, port); + vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; + + err = mlx4_set_vport_qos(priv, slave, port, max_tx_rate); + if (err) { + mlx4_info(dev, "vf %d failed to set rate %d\n", vf, + max_tx_rate); + return err; + } + + vf_admin->tx_rate = max_tx_rate; + /* if VF is not in supported mode (VST with supported prio), + * we do not change vport configuration for its QPs, but save + * the rate, so it will be enforced when it moves to supported + * mode next time. + */ + if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin)) { + mlx4_info(dev, + "rate set for VF %d when not in valid state\n", vf); + + if (vf_admin->default_vlan != MLX4_VGT) + mlx4_info(dev, "VST priority not supported by QoS\n"); + else + mlx4_info(dev, "VF in VGT mode (needed VST)\n"); + + mlx4_info(dev, + "rate %d take affect when VF moves to valid state\n", + max_tx_rate); + return 0; + } + + /* If user sets rate 0 assigning default vport for its QPs */ + vf_admin->qos_vport = max_tx_rate ? slave : MLX4_VPP_DEFAULT_VPORT; + + if (priv->mfunc.master.slave_state[slave].active && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) + mlx4_master_immediate_activate_vlan_qos(priv, slave, port); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_set_vf_rate); + + /* mlx4_get_slave_default_vlan - + * return true if VST ( default vlan) + * if VST, will return vlan & qos (if not NULL) + */ +bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, + u16 *vlan, u8 *qos) +{ + struct mlx4_vport_oper_state *vp_oper; + struct mlx4_priv *priv; + + priv = mlx4_priv(dev); + port = mlx4_slaves_closest_port(dev, slave, port); + vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + + if (MLX4_VGT != vp_oper->state.default_vlan) { + if (vlan) + *vlan = vp_oper->state.default_vlan; + if (qos) + *qos = vp_oper->state.default_qos; + return true; + } + return false; +} +EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan); + +int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_vport_state *s_info; + int slave; + + if ((!mlx4_is_master(dev)) || + !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM)) + return -EPROTONOSUPPORT; + + slave = mlx4_get_slave_indx(dev, vf); + if (slave < 0) + return -EINVAL; + + port = mlx4_slaves_closest_port(dev, slave, port); + s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; + s_info->spoofchk = setting; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk); + +int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_vport_state *s_info; + int slave; + + if (!mlx4_is_master(dev)) + return -EPROTONOSUPPORT; + + slave = mlx4_get_slave_indx(dev, vf); + if (slave < 0) + return -EINVAL; + + s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; + ivf->vf = vf; + + /* need to convert it to a func */ + ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff); + ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff); + ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff); + ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff); + ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff); + ivf->mac[5] = ((s_info->mac) & 0xff); + + ivf->vlan = s_info->default_vlan; + ivf->qos = s_info->default_qos; + + if (mlx4_is_vf_vst_and_prio_qos(dev, port, s_info)) + ivf->max_tx_rate = s_info->tx_rate; + else + ivf->max_tx_rate = 0; + + ivf->min_tx_rate = 0; + ivf->spoofchk = s_info->spoofchk; + ivf->linkstate = s_info->link_state; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_get_vf_config); + +int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_vport_state *s_info; + int slave; + u8 link_stat_event; + + slave = mlx4_get_slave_indx(dev, vf); + if (slave < 0) + return -EINVAL; + + port = mlx4_slaves_closest_port(dev, slave, port); + switch (link_state) { + case IFLA_VF_LINK_STATE_AUTO: + /* get current link state */ + if (!priv->sense.do_sense_port[port]) + link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE; + else + link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN; + break; + + case IFLA_VF_LINK_STATE_ENABLE: + link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE; + break; + + case IFLA_VF_LINK_STATE_DISABLE: + link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN; + break; + + default: + mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n", + link_state, slave, port); + return -EINVAL; + }; + s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; + s_info->link_state = link_state; + + /* send event */ + mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event); + + if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port)) + mlx4_dbg(dev, + "updating vf %d port %d no link state HW enforcment\n", + vf, port); + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state); + +int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (slave < 1 || slave >= dev->num_slaves || + port < 1 || port > MLX4_MAX_PORTS) + return 0; + + return priv->mfunc.master.vf_oper[slave].smi_enabled[port] == + MLX4_VF_SMI_ENABLED; +} +EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled); + +int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (slave == mlx4_master_func_num(dev)) + return 1; + + if (slave < 1 || slave >= dev->num_slaves || + port < 1 || port > MLX4_MAX_PORTS) + return 0; + + return priv->mfunc.master.vf_admin[slave].enable_smi[port] == + MLX4_VF_SMI_ENABLED; +} +EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin); + +int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, + int enabled) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (slave == mlx4_master_func_num(dev)) + return 0; + + if (slave < 1 || slave >= dev->num_slaves || + port < 1 || port > MLX4_MAX_PORTS || + enabled < 0 || enabled > 1) + return -EINVAL; + + priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled; + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin); diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c new file mode 100644 index 000000000..e71f31387 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -0,0 +1,409 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include + +#include +#include + +#include "mlx4.h" +#include "icm.h" + +#define MLX4_CQ_STATUS_OK ( 0 << 28) +#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28) +#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28) +#define MLX4_CQ_FLAG_CC ( 1 << 18) +#define MLX4_CQ_FLAG_OI ( 1 << 17) +#define MLX4_CQ_STATE_ARMED ( 9 << 8) +#define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8) +#define MLX4_EQ_STATE_FIRED (10 << 8) + +#define TASKLET_MAX_TIME 2 +#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME) + +void mlx4_cq_tasklet_cb(unsigned long data) +{ + unsigned long flags; + unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES; + struct mlx4_eq_tasklet *ctx = (struct mlx4_eq_tasklet *)data; + struct mlx4_cq *mcq, *temp; + + spin_lock_irqsave(&ctx->lock, flags); + list_splice_tail_init(&ctx->list, &ctx->process_list); + spin_unlock_irqrestore(&ctx->lock, flags); + + list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) { + list_del_init(&mcq->tasklet_ctx.list); + mcq->tasklet_ctx.comp(mcq); + if (atomic_dec_and_test(&mcq->refcount)) + complete(&mcq->free); + if (time_after(jiffies, end)) + break; + } + + if (!list_empty(&ctx->process_list)) + tasklet_schedule(&ctx->task); +} + +static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) +{ + unsigned long flags; + struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv; + + spin_lock_irqsave(&tasklet_ctx->lock, flags); + /* When migrating CQs between EQs will be implemented, please note + * that you need to sync this point. It is possible that + * while migrating a CQ, completions on the old EQs could + * still arrive. + */ + if (list_empty_careful(&cq->tasklet_ctx.list)) { + atomic_inc(&cq->refcount); + list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); + } + spin_unlock_irqrestore(&tasklet_ctx->lock, flags); +} + +void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) +{ + struct mlx4_cq *cq; + + cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, + cqn & (dev->caps.num_cqs - 1)); + if (!cq) { + mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn); + return; + } + + ++cq->arm_sn; + + cq->comp(cq); +} + +void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) +{ + struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; + struct mlx4_cq *cq; + + spin_lock(&cq_table->lock); + + cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); + if (cq) + atomic_inc(&cq->refcount); + + spin_unlock(&cq_table->lock); + + if (!cq) { + mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn); + return; + } + + cq->event(cq, event_type); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + +static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int cq_num) +{ + return mlx4_cmd(dev, mailbox->dma, cq_num, 0, + MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); +} + +static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int cq_num, u32 opmod) +{ + return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); +} + +static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int cq_num) +{ + return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, + cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); +} + +int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq, + u16 count, u16 period) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_cq_context *cq_context; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + cq_context = mailbox->buf; + cq_context->cq_max_count = cpu_to_be16(count); + cq_context->cq_period = cpu_to_be16(period); + + err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_cq_modify); + +int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, + int entries, struct mlx4_mtt *mtt) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_cq_context *cq_context; + u64 mtt_addr; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + cq_context = mailbox->buf; + cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24); + cq_context->log_page_size = mtt->page_shift - 12; + mtt_addr = mlx4_mtt_addr(dev, mtt); + cq_context->mtt_base_addr_h = mtt_addr >> 32; + cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); + + err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_cq_resize); + +int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cq_table *cq_table = &priv->cq_table; + int err; + + *cqn = mlx4_bitmap_alloc(&cq_table->bitmap); + if (*cqn == -1) + return -ENOMEM; + + err = mlx4_table_get(dev, &cq_table->table, *cqn, GFP_KERNEL); + if (err) + goto err_out; + + err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn, GFP_KERNEL); + if (err) + goto err_put; + return 0; + +err_put: + mlx4_table_put(dev, &cq_table->table, *cqn); + +err_out: + mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR); + return err; +} + +static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn) +{ + u64 out_param; + int err; + + if (mlx4_is_mfunc(dev)) { + err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + return err; + else { + *cqn = get_param_l(&out_param); + return 0; + } + } + return __mlx4_cq_alloc_icm(dev, cqn); +} + +void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cq_table *cq_table = &priv->cq_table; + + mlx4_table_put(dev, &cq_table->cmpt_table, cqn); + mlx4_table_put(dev, &cq_table->table, cqn); + mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR); +} + +static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) +{ + u64 in_param = 0; + int err; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, cqn); + err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP, + MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + mlx4_warn(dev, "Failed freeing cq:%d\n", cqn); + } else + __mlx4_cq_free_icm(dev, cqn); +} + +int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, + struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, + struct mlx4_cq *cq, unsigned vector, int collapsed, + int timestamp_en) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cq_table *cq_table = &priv->cq_table; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_cq_context *cq_context; + u64 mtt_addr; + int err; + + if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool) + return -EINVAL; + + cq->vector = vector; + + err = mlx4_cq_alloc_icm(dev, &cq->cqn); + if (err) + return err; + + spin_lock_irq(&cq_table->lock); + err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); + spin_unlock_irq(&cq_table->lock); + if (err) + goto err_icm; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto err_radix; + } + + cq_context = mailbox->buf; + cq_context->flags = cpu_to_be32(!!collapsed << 18); + if (timestamp_en) + cq_context->flags |= cpu_to_be32(1 << 19); + + cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); + cq_context->comp_eqn = priv->eq_table.eq[vector].eqn; + cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; + + mtt_addr = mlx4_mtt_addr(dev, mtt); + cq_context->mtt_base_addr_h = mtt_addr >> 32; + cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); + cq_context->db_rec_addr = cpu_to_be64(db_rec); + + err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn); + mlx4_free_cmd_mailbox(dev, mailbox); + if (err) + goto err_radix; + + cq->cons_index = 0; + cq->arm_sn = 1; + cq->uar = uar; + atomic_set(&cq->refcount, 1); + init_completion(&cq->free); + cq->comp = mlx4_add_cq_to_tasklet; + cq->tasklet_ctx.priv = + &priv->eq_table.eq[cq->vector].tasklet_ctx; + INIT_LIST_HEAD(&cq->tasklet_ctx.list); + + + cq->irq = priv->eq_table.eq[cq->vector].irq; + return 0; + +err_radix: + spin_lock_irq(&cq_table->lock); + radix_tree_delete(&cq_table->tree, cq->cqn); + spin_unlock_irq(&cq_table->lock); + +err_icm: + mlx4_cq_free_icm(dev, cq->cqn); + + return err; +} +EXPORT_SYMBOL_GPL(mlx4_cq_alloc); + +void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cq_table *cq_table = &priv->cq_table; + int err; + + err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn); + if (err) + mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); + + synchronize_irq(priv->eq_table.eq[cq->vector].irq); + + spin_lock_irq(&cq_table->lock); + radix_tree_delete(&cq_table->tree, cq->cqn); + spin_unlock_irq(&cq_table->lock); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); + wait_for_completion(&cq->free); + + mlx4_cq_free_icm(dev, cq->cqn); +} +EXPORT_SYMBOL_GPL(mlx4_cq_free); + +int mlx4_init_cq_table(struct mlx4_dev *dev) +{ + struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; + int err; + + spin_lock_init(&cq_table->lock); + INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); + if (mlx4_is_slave(dev)) + return 0; + + err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, + dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0); + if (err) + return err; + + return 0; +} + +void mlx4_cleanup_cq_table(struct mlx4_dev *dev) +{ + if (mlx4_is_slave(dev)) + return; + /* Nothing to do to clean up radix_tree */ + mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c new file mode 100644 index 000000000..8a083d73e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -0,0 +1,285 @@ +/* + * Copyright (c) 2012 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include + +#include "mlx4_en.h" + +/* mlx4_en_read_clock - read raw cycle counter (to be used by time counter) + */ +static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc) +{ + struct mlx4_en_dev *mdev = + container_of(tc, struct mlx4_en_dev, cycles); + struct mlx4_dev *dev = mdev->dev; + + return mlx4_read_clock(dev) & tc->mask; +} + +u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe) +{ + u64 hi, lo; + struct mlx4_ts_cqe *ts_cqe = (struct mlx4_ts_cqe *)cqe; + + lo = (u64)be16_to_cpu(ts_cqe->timestamp_lo); + hi = ((u64)be32_to_cpu(ts_cqe->timestamp_hi) + !lo) << 16; + + return hi | lo; +} + +void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev, + struct skb_shared_hwtstamps *hwts, + u64 timestamp) +{ + unsigned long flags; + u64 nsec; + + read_lock_irqsave(&mdev->clock_lock, flags); + nsec = timecounter_cyc2time(&mdev->clock, timestamp); + read_unlock_irqrestore(&mdev->clock_lock, flags); + + memset(hwts, 0, sizeof(struct skb_shared_hwtstamps)); + hwts->hwtstamp = ns_to_ktime(nsec); +} + +/** + * mlx4_en_remove_timestamp - disable PTP device + * @mdev: board private structure + * + * Stop the PTP support. + **/ +void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev) +{ + if (mdev->ptp_clock) { + ptp_clock_unregister(mdev->ptp_clock); + mdev->ptp_clock = NULL; + mlx4_info(mdev, "removed PHC\n"); + } +} + +void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev) +{ + bool timeout = time_is_before_jiffies(mdev->last_overflow_check + + mdev->overflow_period); + unsigned long flags; + + if (timeout) { + write_lock_irqsave(&mdev->clock_lock, flags); + timecounter_read(&mdev->clock); + write_unlock_irqrestore(&mdev->clock_lock, flags); + mdev->last_overflow_check = jiffies; + } +} + +/** + * mlx4_en_phc_adjfreq - adjust the frequency of the hardware clock + * @ptp: ptp clock structure + * @delta: Desired frequency change in parts per billion + * + * Adjust the frequency of the PHC cycle counter by the indicated delta from + * the base frequency. + **/ +static int mlx4_en_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) +{ + u64 adj; + u32 diff, mult; + int neg_adj = 0; + unsigned long flags; + struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, + ptp_clock_info); + + if (delta < 0) { + neg_adj = 1; + delta = -delta; + } + mult = mdev->nominal_c_mult; + adj = mult; + adj *= delta; + diff = div_u64(adj, 1000000000ULL); + + write_lock_irqsave(&mdev->clock_lock, flags); + timecounter_read(&mdev->clock); + mdev->cycles.mult = neg_adj ? mult - diff : mult + diff; + write_unlock_irqrestore(&mdev->clock_lock, flags); + + return 0; +} + +/** + * mlx4_en_phc_adjtime - Shift the time of the hardware clock + * @ptp: ptp clock structure + * @delta: Desired change in nanoseconds + * + * Adjust the timer by resetting the timecounter structure. + **/ +static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, + ptp_clock_info); + unsigned long flags; + + write_lock_irqsave(&mdev->clock_lock, flags); + timecounter_adjtime(&mdev->clock, delta); + write_unlock_irqrestore(&mdev->clock_lock, flags); + + return 0; +} + +/** + * mlx4_en_phc_gettime - Reads the current time from the hardware clock + * @ptp: ptp clock structure + * @ts: timespec structure to hold the current time value + * + * Read the timecounter and return the correct value in ns after converting + * it into a struct timespec. + **/ +static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, + ptp_clock_info); + unsigned long flags; + u64 ns; + + write_lock_irqsave(&mdev->clock_lock, flags); + ns = timecounter_read(&mdev->clock); + write_unlock_irqrestore(&mdev->clock_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/** + * mlx4_en_phc_settime - Set the current time on the hardware clock + * @ptp: ptp clock structure + * @ts: timespec containing the new time for the cycle counter + * + * Reset the timecounter to use a new base value instead of the kernel + * wall timer value. + **/ +static int mlx4_en_phc_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, + ptp_clock_info); + u64 ns = timespec64_to_ns(ts); + unsigned long flags; + + /* reset the timecounter */ + write_lock_irqsave(&mdev->clock_lock, flags); + timecounter_init(&mdev->clock, &mdev->cycles, ns); + write_unlock_irqrestore(&mdev->clock_lock, flags); + + return 0; +} + +/** + * mlx4_en_phc_enable - enable or disable an ancillary feature + * @ptp: ptp clock structure + * @request: Desired resource to enable or disable + * @on: Caller passes one to enable or zero to disable + * + * Enable (or disable) ancillary features of the PHC subsystem. + * Currently, no ancillary features are supported. + **/ +static int mlx4_en_phc_enable(struct ptp_clock_info __always_unused *ptp, + struct ptp_clock_request __always_unused *request, + int __always_unused on) +{ + return -EOPNOTSUPP; +} + +static const struct ptp_clock_info mlx4_en_ptp_clock_info = { + .owner = THIS_MODULE, + .max_adj = 100000000, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .n_pins = 0, + .pps = 0, + .adjfreq = mlx4_en_phc_adjfreq, + .adjtime = mlx4_en_phc_adjtime, + .gettime64 = mlx4_en_phc_gettime, + .settime64 = mlx4_en_phc_settime, + .enable = mlx4_en_phc_enable, +}; + +void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) +{ + struct mlx4_dev *dev = mdev->dev; + unsigned long flags; + u64 ns, zero = 0; + + rwlock_init(&mdev->clock_lock); + + memset(&mdev->cycles, 0, sizeof(mdev->cycles)); + mdev->cycles.read = mlx4_en_read_clock; + mdev->cycles.mask = CLOCKSOURCE_MASK(48); + /* Using shift to make calculation more accurate. Since current HW + * clock frequency is 427 MHz, and cycles are given using a 48 bits + * register, the biggest shift when calculating using u64, is 14 + * (max_cycles * multiplier < 2^64) + */ + mdev->cycles.shift = 14; + mdev->cycles.mult = + clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift); + mdev->nominal_c_mult = mdev->cycles.mult; + + write_lock_irqsave(&mdev->clock_lock, flags); + timecounter_init(&mdev->clock, &mdev->cycles, + ktime_to_ns(ktime_get_real())); + write_unlock_irqrestore(&mdev->clock_lock, flags); + + /* Calculate period in seconds to call the overflow watchdog - to make + * sure counter is checked at least once every wrap around. + */ + ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero); + do_div(ns, NSEC_PER_SEC / 2 / HZ); + mdev->overflow_period = ns; + + /* Configure the PHC */ + mdev->ptp_clock_info = mlx4_en_ptp_clock_info; + snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp"); + + mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info, + &mdev->pdev->dev); + if (IS_ERR(mdev->ptp_clock)) { + mdev->ptp_clock = NULL; + mlx4_err(mdev, "ptp_clock_register failed\n"); + } else { + mlx4_info(mdev, "registered PHC clock\n"); + } + +} diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c new file mode 100644 index 000000000..22da4d0d0 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include + +#include "mlx4_en.h" + +static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) +{ + return; +} + + +int mlx4_en_create_cq(struct mlx4_en_priv *priv, + struct mlx4_en_cq **pcq, + int entries, int ring, enum cq_type mode, + int node) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_cq *cq; + int err; + + cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node); + if (!cq) { + cq = kzalloc(sizeof(*cq), GFP_KERNEL); + if (!cq) { + en_err(priv, "Failed to allocate CQ structure\n"); + return -ENOMEM; + } + } + + cq->size = entries; + cq->buf_size = cq->size * mdev->dev->caps.cqe_size; + + cq->ring = ring; + cq->is_tx = mode; + + /* Allocate HW buffers on provided NUMA node. + * dev->numa_node is used in mtt range allocation flow. + */ + set_dev_node(&mdev->dev->persist->pdev->dev, node); + err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, + cq->buf_size, 2 * PAGE_SIZE); + set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); + if (err) + goto err_cq; + + err = mlx4_en_map_buffer(&cq->wqres.buf); + if (err) + goto err_res; + + cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf; + *pcq = cq; + + return 0; + +err_res: + mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); +err_cq: + kfree(cq); + *pcq = NULL; + return err; +} + +int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, + int cq_idx) +{ + struct mlx4_en_dev *mdev = priv->mdev; + int err = 0; + char name[25]; + int timestamp_en = 0; + struct cpu_rmap *rmap = +#ifdef CONFIG_RFS_ACCEL + priv->dev->rx_cpu_rmap; +#else + NULL; +#endif + + cq->dev = mdev->pndev[priv->port]; + cq->mcq.set_ci_db = cq->wqres.db.db; + cq->mcq.arm_db = cq->wqres.db.db + 1; + *cq->mcq.set_ci_db = 0; + *cq->mcq.arm_db = 0; + memset(cq->buf, 0, cq->buf_size); + + if (cq->is_tx == RX) { + if (mdev->dev->caps.comp_pool) { + if (!cq->vector) { + sprintf(name, "%s-%d", priv->dev->name, + cq->ring); + /* Set IRQ for specific name (per ring) */ + if (mlx4_assign_eq(mdev->dev, name, rmap, + &cq->vector)) { + cq->vector = (cq->ring + 1 + priv->port) + % mdev->dev->caps.num_comp_vectors; + mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n", + name); + } + + } + } else { + cq->vector = (cq->ring + 1 + priv->port) % + mdev->dev->caps.num_comp_vectors; + } + + cq->irq_desc = + irq_to_desc(mlx4_eq_get_irq(mdev->dev, + cq->vector)); + } else { + /* For TX we use the same irq per + ring we assigned for the RX */ + struct mlx4_en_cq *rx_cq; + + cq_idx = cq_idx % priv->rx_ring_num; + rx_cq = priv->rx_cq[cq_idx]; + cq->vector = rx_cq->vector; + } + + if (!cq->is_tx) + cq->size = priv->rx_ring[cq->ring]->actual_size; + + if ((cq->is_tx && priv->hwtstamp_config.tx_type) || + (!cq->is_tx && priv->hwtstamp_config.rx_filter)) + timestamp_en = 1; + + err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, + &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, + cq->vector, 0, timestamp_en); + if (err) + return err; + + cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; + cq->mcq.event = mlx4_en_cq_event; + + if (cq->is_tx) { + netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, + NAPI_POLL_WEIGHT); + } else { + struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; + + err = irq_set_affinity_hint(cq->mcq.irq, + ring->affinity_mask); + if (err) + mlx4_warn(mdev, "Failed setting affinity hint\n"); + + netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); + napi_hash_add(&cq->napi); + } + + napi_enable(&cq->napi); + + return 0; +} + +void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_cq *cq = *pcq; + + mlx4_en_unmap_buffer(&cq->wqres.buf); + mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); + if (priv->mdev->dev->caps.comp_pool && cq->vector) { + mlx4_release_eq(priv->mdev->dev, cq->vector); + } + cq->vector = 0; + cq->buf_size = 0; + cq->buf = NULL; + kfree(cq); + *pcq = NULL; +} + +void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) +{ + napi_disable(&cq->napi); + if (!cq->is_tx) { + napi_hash_del(&cq->napi); + synchronize_rcu(); + irq_set_affinity_hint(cq->mcq.irq, NULL); + } + netif_napi_del(&cq->napi); + + mlx4_cq_free(priv->mdev->dev, &cq->mcq); +} + +/* Set rx cq moderation parameters */ +int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) +{ + return mlx4_cq_modify(priv->mdev->dev, &cq->mcq, + cq->moder_cnt, cq->moder_time); +} + +int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) +{ + mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map, + &priv->mdev->uar_lock); + + return 0; +} + + diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c new file mode 100644 index 000000000..f01918c63 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -0,0 +1,486 @@ +/* + * Copyright (c) 2011 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include + +#include "mlx4_en.h" +#include "fw_qos.h" + +/* Definitions for QCN + */ + +struct mlx4_congestion_control_mb_prio_802_1_qau_params { + __be32 modify_enable_high; + __be32 modify_enable_low; + __be32 reserved1; + __be32 extended_enable; + __be32 rppp_max_rps; + __be32 rpg_time_reset; + __be32 rpg_byte_reset; + __be32 rpg_threshold; + __be32 rpg_max_rate; + __be32 rpg_ai_rate; + __be32 rpg_hai_rate; + __be32 rpg_gd; + __be32 rpg_min_dec_fac; + __be32 rpg_min_rate; + __be32 max_time_rise; + __be32 max_byte_rise; + __be32 max_qdelta; + __be32 min_qoffset; + __be32 gd_coefficient; + __be32 reserved2[5]; + __be32 cp_sample_base; + __be32 reserved3[39]; +}; + +struct mlx4_congestion_control_mb_prio_802_1_qau_statistics { + __be64 rppp_rp_centiseconds; + __be32 reserved1; + __be32 ignored_cnm; + __be32 rppp_created_rps; + __be32 estimated_total_rate; + __be32 max_active_rate_limiter_index; + __be32 dropped_cnms_busy_fw; + __be32 reserved2; + __be32 cnms_handled_successfully; + __be32 min_total_limiters_rate; + __be32 max_total_limiters_rate; + __be32 reserved3[4]; +}; + +static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct ieee_ets *my_ets = &priv->ets; + + /* No IEEE PFC settings available */ + if (!my_ets) + return -EINVAL; + + ets->ets_cap = IEEE_8021QAZ_MAX_TCS; + ets->cbs = my_ets->cbs; + memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); + + return 0; +} + +static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets) +{ + int i; + int total_ets_bw = 0; + int has_ets_tc = 0; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->prio_tc[i] >= MLX4_EN_NUM_UP) { + en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n", + i, ets->prio_tc[i]); + return -EINVAL; + } + + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + break; + case IEEE_8021QAZ_TSA_ETS: + has_ets_tc = 1; + total_ets_bw += ets->tc_tx_bw[i]; + break; + default: + en_err(priv, "TC[%d]: Not supported TSA: %d\n", + i, ets->tc_tsa[i]); + return -ENOTSUPP; + } + } + + if (has_ets_tc && total_ets_bw != MLX4_EN_BW_MAX) { + en_err(priv, "Bad ETS BW sum: %d. Should be exactly 100%%\n", + total_ets_bw); + return -EINVAL; + } + + return 0; +} + +static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv, + struct ieee_ets *ets, u16 *ratelimit) +{ + struct mlx4_en_dev *mdev = priv->mdev; + int num_strict = 0; + int i; + __u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = { 0 }; + __u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 }; + + ets = ets ?: &priv->ets; + ratelimit = ratelimit ?: priv->maxrate; + + /* higher TC means higher priority => lower pg */ + for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + pg[i] = num_strict++; + tc_tx_bw[i] = MLX4_EN_BW_MAX; + break; + case IEEE_8021QAZ_TSA_ETS: + pg[i] = MLX4_EN_TC_ETS; + tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX4_EN_BW_MIN; + break; + } + } + + return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg, + ratelimit); +} + +static int +mlx4_en_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int err; + + err = mlx4_en_ets_validate(priv, ets); + if (err) + return err; + + err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc); + if (err) + return err; + + err = mlx4_en_config_port_scheduler(priv, ets, NULL); + if (err) + return err; + + memcpy(&priv->ets, ets, sizeof(priv->ets)); + + return 0; +} + +static int mlx4_en_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS; + pfc->pfc_en = priv->prof->tx_ppp; + + return 0; +} + +static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_port_profile *prof = priv->prof; + struct mlx4_en_dev *mdev = priv->mdev; + int err; + + en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n", + pfc->pfc_cap, + pfc->pfc_en, + pfc->mbc, + pfc->delay); + + prof->rx_pause = !pfc->pfc_en; + prof->tx_pause = !pfc->pfc_en; + prof->rx_ppp = pfc->pfc_en; + prof->tx_ppp = pfc->pfc_en; + + err = mlx4_SET_PORT_general(mdev->dev, priv->port, + priv->rx_skb_size + ETH_FCS_LEN, + prof->tx_pause, + prof->tx_ppp, + prof->rx_pause, + prof->rx_ppp); + if (err) + en_err(priv, "Failed setting pause params\n"); + else + mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, + prof->rx_ppp, prof->rx_pause, + prof->tx_ppp, prof->tx_pause); + + return err; +} + +static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev) +{ + return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; +} + +static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode) +{ + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + (mode & DCB_CAP_DCBX_VER_CEE) || + !(mode & DCB_CAP_DCBX_VER_IEEE) || + !(mode & DCB_CAP_DCBX_HOST)) + return 1; + + return 0; +} + +#define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */ +static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev, + struct ieee_maxrate *maxrate) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int i; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + maxrate->tc_maxrate[i] = + priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB; + + return 0; +} + +static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev, + struct ieee_maxrate *maxrate) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + u16 tmp[IEEE_8021QAZ_MAX_TCS]; + int i, err; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + /* Convert from Kbps into HW units, rounding result up. + * Setting to 0, means unlimited BW. + */ + tmp[i] = div_u64(maxrate->tc_maxrate[i] + + MLX4_RATELIMIT_UNITS_IN_KB - 1, + MLX4_RATELIMIT_UNITS_IN_KB); + } + + err = mlx4_en_config_port_scheduler(priv, NULL, tmp); + if (err) + return err; + + memcpy(priv->maxrate, tmp, sizeof(priv->maxrate)); + + return 0; +} + +#define RPG_ENABLE_BIT 31 +#define CN_TAG_BIT 30 + +static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev, + struct ieee_qcn *qcn) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn; + struct mlx4_cmd_mailbox *mailbox_out = NULL; + u64 mailbox_in_dma = 0; + u32 inmod = 0; + int i, err; + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN)) + return -EOPNOTSUPP; + + mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev); + if (IS_ERR(mailbox_out)) + return -ENOMEM; + hw_qcn = + (struct mlx4_congestion_control_mb_prio_802_1_qau_params *) + mailbox_out->buf; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + inmod = priv->port | ((1 << i) << 8) | + (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16); + err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma, + mailbox_out->dma, + inmod, MLX4_CONGESTION_CONTROL_GET_PARAMS, + MLX4_CMD_CONGESTION_CTRL_OPCODE, + MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (err) { + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); + return err; + } + + qcn->rpg_enable[i] = + be32_to_cpu(hw_qcn->extended_enable) >> RPG_ENABLE_BIT; + qcn->rppp_max_rps[i] = + be32_to_cpu(hw_qcn->rppp_max_rps); + qcn->rpg_time_reset[i] = + be32_to_cpu(hw_qcn->rpg_time_reset); + qcn->rpg_byte_reset[i] = + be32_to_cpu(hw_qcn->rpg_byte_reset); + qcn->rpg_threshold[i] = + be32_to_cpu(hw_qcn->rpg_threshold); + qcn->rpg_max_rate[i] = + be32_to_cpu(hw_qcn->rpg_max_rate); + qcn->rpg_ai_rate[i] = + be32_to_cpu(hw_qcn->rpg_ai_rate); + qcn->rpg_hai_rate[i] = + be32_to_cpu(hw_qcn->rpg_hai_rate); + qcn->rpg_gd[i] = + be32_to_cpu(hw_qcn->rpg_gd); + qcn->rpg_min_dec_fac[i] = + be32_to_cpu(hw_qcn->rpg_min_dec_fac); + qcn->rpg_min_rate[i] = + be32_to_cpu(hw_qcn->rpg_min_rate); + qcn->cndd_state_machine[i] = + priv->cndd_state[i]; + } + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); + return 0; +} + +static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev, + struct ieee_qcn *qcn) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn; + struct mlx4_cmd_mailbox *mailbox_in = NULL; + u64 mailbox_in_dma = 0; + u32 inmod = 0; + int i, err; +#define MODIFY_ENABLE_HIGH_MASK 0xc0000000 +#define MODIFY_ENABLE_LOW_MASK 0xffc00000 + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN)) + return -EOPNOTSUPP; + + mailbox_in = mlx4_alloc_cmd_mailbox(priv->mdev->dev); + if (IS_ERR(mailbox_in)) + return -ENOMEM; + + mailbox_in_dma = mailbox_in->dma; + hw_qcn = + (struct mlx4_congestion_control_mb_prio_802_1_qau_params *)mailbox_in->buf; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + inmod = priv->port | ((1 << i) << 8) | + (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16); + + /* Before updating QCN parameter, + * need to set it's modify enable bit to 1 + */ + + hw_qcn->modify_enable_high = cpu_to_be32( + MODIFY_ENABLE_HIGH_MASK); + hw_qcn->modify_enable_low = cpu_to_be32(MODIFY_ENABLE_LOW_MASK); + + hw_qcn->extended_enable = cpu_to_be32(qcn->rpg_enable[i] << RPG_ENABLE_BIT); + hw_qcn->rppp_max_rps = cpu_to_be32(qcn->rppp_max_rps[i]); + hw_qcn->rpg_time_reset = cpu_to_be32(qcn->rpg_time_reset[i]); + hw_qcn->rpg_byte_reset = cpu_to_be32(qcn->rpg_byte_reset[i]); + hw_qcn->rpg_threshold = cpu_to_be32(qcn->rpg_threshold[i]); + hw_qcn->rpg_max_rate = cpu_to_be32(qcn->rpg_max_rate[i]); + hw_qcn->rpg_ai_rate = cpu_to_be32(qcn->rpg_ai_rate[i]); + hw_qcn->rpg_hai_rate = cpu_to_be32(qcn->rpg_hai_rate[i]); + hw_qcn->rpg_gd = cpu_to_be32(qcn->rpg_gd[i]); + hw_qcn->rpg_min_dec_fac = cpu_to_be32(qcn->rpg_min_dec_fac[i]); + hw_qcn->rpg_min_rate = cpu_to_be32(qcn->rpg_min_rate[i]); + priv->cndd_state[i] = qcn->cndd_state_machine[i]; + if (qcn->cndd_state_machine[i] == DCB_CNDD_INTERIOR_READY) + hw_qcn->extended_enable |= cpu_to_be32(1 << CN_TAG_BIT); + + err = mlx4_cmd(priv->mdev->dev, mailbox_in_dma, inmod, + MLX4_CONGESTION_CONTROL_SET_PARAMS, + MLX4_CMD_CONGESTION_CTRL_OPCODE, + MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (err) { + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in); + return err; + } + } + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in); + return 0; +} + +static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev, + struct ieee_qcn_stats *qcn_stats) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *hw_qcn_stats; + struct mlx4_cmd_mailbox *mailbox_out = NULL; + u64 mailbox_in_dma = 0; + u32 inmod = 0; + int i, err; + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN)) + return -EOPNOTSUPP; + + mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev); + if (IS_ERR(mailbox_out)) + return -ENOMEM; + + hw_qcn_stats = + (struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *) + mailbox_out->buf; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + inmod = priv->port | ((1 << i) << 8) | + (MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16); + err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma, + mailbox_out->dma, inmod, + MLX4_CONGESTION_CONTROL_GET_STATISTICS, + MLX4_CMD_CONGESTION_CTRL_OPCODE, + MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (err) { + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); + return err; + } + qcn_stats->rppp_rp_centiseconds[i] = + be64_to_cpu(hw_qcn_stats->rppp_rp_centiseconds); + qcn_stats->rppp_created_rps[i] = + be32_to_cpu(hw_qcn_stats->rppp_created_rps); + } + mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out); + return 0; +} + +const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = { + .ieee_getets = mlx4_en_dcbnl_ieee_getets, + .ieee_setets = mlx4_en_dcbnl_ieee_setets, + .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate, + .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate, + .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, + .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, + + .getdcbx = mlx4_en_dcbnl_getdcbx, + .setdcbx = mlx4_en_dcbnl_setdcbx, + .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn, + .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn, + .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats, +}; + +const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = { + .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, + .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, + + .getdcbx = mlx4_en_dcbnl_getdcbx, + .setdcbx = mlx4_en_dcbnl_setdcbx, +}; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c new file mode 100644 index 000000000..a2ddf3d75 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -0,0 +1,2011 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mlx4_en.h" +#include "en_port.h" + +#define EN_ETHTOOL_QP_ATTACH (1ull << 63) +#define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff) +#define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff) + +static int mlx4_en_moderation_update(struct mlx4_en_priv *priv) +{ + int i; + int err = 0; + + for (i = 0; i < priv->tx_ring_num; i++) { + priv->tx_cq[i]->moder_cnt = priv->tx_frames; + priv->tx_cq[i]->moder_time = priv->tx_usecs; + if (priv->port_up) { + err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]); + if (err) + return err; + } + } + + if (priv->adaptive_rx_coal) + return 0; + + for (i = 0; i < priv->rx_ring_num; i++) { + priv->rx_cq[i]->moder_cnt = priv->rx_frames; + priv->rx_cq[i]->moder_time = priv->rx_usecs; + priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; + if (priv->port_up) { + err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]); + if (err) + return err; + } + } + + return err; +} + +static void +mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", + sizeof(drvinfo->version)); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d", + (u16) (mdev->dev->caps.fw_ver >> 32), + (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff), + (u16) (mdev->dev->caps.fw_ver & 0xffff)); + strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = 0; + drvinfo->regdump_len = 0; + drvinfo->eedump_len = 0; +} + +static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = { + "blueflame", +}; + +static const char main_strings[][ETH_GSTRING_LEN] = { + /* main statistics */ + "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", + "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", + "rx_length_errors", "rx_over_errors", "rx_crc_errors", + "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", + "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", + "tx_heartbeat_errors", "tx_window_errors", + + /* port statistics */ + "tso_packets", + "xmit_more", + "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed", + "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload", + + /* priority flow control statistics rx */ + "rx_pause_prio_0", "rx_pause_duration_prio_0", + "rx_pause_transition_prio_0", + "rx_pause_prio_1", "rx_pause_duration_prio_1", + "rx_pause_transition_prio_1", + "rx_pause_prio_2", "rx_pause_duration_prio_2", + "rx_pause_transition_prio_2", + "rx_pause_prio_3", "rx_pause_duration_prio_3", + "rx_pause_transition_prio_3", + "rx_pause_prio_4", "rx_pause_duration_prio_4", + "rx_pause_transition_prio_4", + "rx_pause_prio_5", "rx_pause_duration_prio_5", + "rx_pause_transition_prio_5", + "rx_pause_prio_6", "rx_pause_duration_prio_6", + "rx_pause_transition_prio_6", + "rx_pause_prio_7", "rx_pause_duration_prio_7", + "rx_pause_transition_prio_7", + + /* flow control statistics rx */ + "rx_pause", "rx_pause_duration", "rx_pause_transition", + + /* priority flow control statistics tx */ + "tx_pause_prio_0", "tx_pause_duration_prio_0", + "tx_pause_transition_prio_0", + "tx_pause_prio_1", "tx_pause_duration_prio_1", + "tx_pause_transition_prio_1", + "tx_pause_prio_2", "tx_pause_duration_prio_2", + "tx_pause_transition_prio_2", + "tx_pause_prio_3", "tx_pause_duration_prio_3", + "tx_pause_transition_prio_3", + "tx_pause_prio_4", "tx_pause_duration_prio_4", + "tx_pause_transition_prio_4", + "tx_pause_prio_5", "tx_pause_duration_prio_5", + "tx_pause_transition_prio_5", + "tx_pause_prio_6", "tx_pause_duration_prio_6", + "tx_pause_transition_prio_6", + "tx_pause_prio_7", "tx_pause_duration_prio_7", + "tx_pause_transition_prio_7", + + /* flow control statistics tx */ + "tx_pause", "tx_pause_duration", "tx_pause_transition", + + /* packet statistics */ + "rx_multicast_packets", + "rx_broadcast_packets", + "rx_jabbers", + "rx_in_range_length_error", + "rx_out_range_length_error", + "tx_multicast_packets", + "tx_broadcast_packets", + "rx_prio_0_packets", "rx_prio_0_bytes", + "rx_prio_1_packets", "rx_prio_1_bytes", + "rx_prio_2_packets", "rx_prio_2_bytes", + "rx_prio_3_packets", "rx_prio_3_bytes", + "rx_prio_4_packets", "rx_prio_4_bytes", + "rx_prio_5_packets", "rx_prio_5_bytes", + "rx_prio_6_packets", "rx_prio_6_bytes", + "rx_prio_7_packets", "rx_prio_7_bytes", + "rx_novlan_packets", "rx_novlan_bytes", + "tx_prio_0_packets", "tx_prio_0_bytes", + "tx_prio_1_packets", "tx_prio_1_bytes", + "tx_prio_2_packets", "tx_prio_2_bytes", + "tx_prio_3_packets", "tx_prio_3_bytes", + "tx_prio_4_packets", "tx_prio_4_bytes", + "tx_prio_5_packets", "tx_prio_5_bytes", + "tx_prio_6_packets", "tx_prio_6_bytes", + "tx_prio_7_packets", "tx_prio_7_bytes", + "tx_novlan_packets", "tx_novlan_bytes", + +}; + +static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= { + "Interrupt Test", + "Link Test", + "Speed Test", + "Register Test", + "Loopback Test", +}; + +static u32 mlx4_en_get_msglevel(struct net_device *dev) +{ + return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable; +} + +static void mlx4_en_set_msglevel(struct net_device *dev, u32 val) +{ + ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val; +} + +static void mlx4_en_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + int err = 0; + u64 config = 0; + u64 mask; + + if ((priv->port < 1) || (priv->port > 2)) { + en_err(priv, "Failed to get WoL information\n"); + return; + } + + mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : + MLX4_DEV_CAP_FLAG_WOL_PORT2; + + if (!(priv->mdev->dev->caps.flags & mask)) { + wol->supported = 0; + wol->wolopts = 0; + return; + } + + err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); + if (err) { + en_err(priv, "Failed to get WoL information\n"); + return; + } + + if (config & MLX4_EN_WOL_MAGIC) + wol->supported = WAKE_MAGIC; + else + wol->supported = 0; + + if (config & MLX4_EN_WOL_ENABLED) + wol->wolopts = WAKE_MAGIC; + else + wol->wolopts = 0; +} + +static int mlx4_en_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + u64 config = 0; + int err = 0; + u64 mask; + + if ((priv->port < 1) || (priv->port > 2)) + return -EOPNOTSUPP; + + mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : + MLX4_DEV_CAP_FLAG_WOL_PORT2; + + if (!(priv->mdev->dev->caps.flags & mask)) + return -EOPNOTSUPP; + + if (wol->supported & ~WAKE_MAGIC) + return -EINVAL; + + err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); + if (err) { + en_err(priv, "Failed to get WoL info, unable to modify\n"); + return err; + } + + if (wol->wolopts & WAKE_MAGIC) { + config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED | + MLX4_EN_WOL_MAGIC; + } else { + config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC); + config |= MLX4_EN_WOL_DO_MODIFY; + } + + err = mlx4_wol_write(priv->mdev->dev, config, priv->port); + if (err) + en_err(priv, "Failed to set WoL information\n"); + + return err; +} + +struct bitmap_iterator { + unsigned long *stats_bitmap; + unsigned int count; + unsigned int iterator; + bool advance_array; /* if set, force no increments */ +}; + +static inline void bitmap_iterator_init(struct bitmap_iterator *h, + unsigned long *stats_bitmap, + int count) +{ + h->iterator = 0; + h->advance_array = !bitmap_empty(stats_bitmap, count); + h->count = h->advance_array ? bitmap_weight(stats_bitmap, count) + : count; + h->stats_bitmap = stats_bitmap; +} + +static inline int bitmap_iterator_test(struct bitmap_iterator *h) +{ + return !h->advance_array ? 1 : test_bit(h->iterator, h->stats_bitmap); +} + +static inline int bitmap_iterator_inc(struct bitmap_iterator *h) +{ + return h->iterator++; +} + +static inline unsigned int +bitmap_iterator_count(struct bitmap_iterator *h) +{ + return h->count; +} + +static int mlx4_en_get_sset_count(struct net_device *dev, int sset) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct bitmap_iterator it; + + bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS); + + switch (sset) { + case ETH_SS_STATS: + return bitmap_iterator_count(&it) + + (priv->tx_ring_num * 2) + +#ifdef CONFIG_NET_RX_BUSY_POLL + (priv->rx_ring_num * 5); +#else + (priv->rx_ring_num * 2); +#endif + case ETH_SS_TEST: + return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags + & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; + case ETH_SS_PRIV_FLAGS: + return ARRAY_SIZE(mlx4_en_priv_flags); + default: + return -EOPNOTSUPP; + } +} + +static void mlx4_en_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, uint64_t *data) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int index = 0; + int i; + struct bitmap_iterator it; + + bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS); + + spin_lock_bh(&priv->stats_lock); + + for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = ((unsigned long *)&priv->stats)[i]; + + for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = ((unsigned long *)&priv->port_stats)[i]; + + for (i = 0; i < NUM_FLOW_PRIORITY_STATS_RX; + i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = + ((u64 *)&priv->rx_priority_flowstats)[i]; + + for (i = 0; i < NUM_FLOW_STATS_RX; i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = ((u64 *)&priv->rx_flowstats)[i]; + + for (i = 0; i < NUM_FLOW_PRIORITY_STATS_TX; + i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = + ((u64 *)&priv->tx_priority_flowstats)[i]; + + for (i = 0; i < NUM_FLOW_STATS_TX; i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = ((u64 *)&priv->tx_flowstats)[i]; + + for (i = 0; i < NUM_PKT_STATS; i++, bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + data[index++] = ((unsigned long *)&priv->pkstats)[i]; + + for (i = 0; i < priv->tx_ring_num; i++) { + data[index++] = priv->tx_ring[i]->packets; + data[index++] = priv->tx_ring[i]->bytes; + } + for (i = 0; i < priv->rx_ring_num; i++) { + data[index++] = priv->rx_ring[i]->packets; + data[index++] = priv->rx_ring[i]->bytes; +#ifdef CONFIG_NET_RX_BUSY_POLL + data[index++] = priv->rx_ring[i]->yields; + data[index++] = priv->rx_ring[i]->misses; + data[index++] = priv->rx_ring[i]->cleaned; +#endif + } + spin_unlock_bh(&priv->stats_lock); + +} + +static void mlx4_en_self_test(struct net_device *dev, + struct ethtool_test *etest, u64 *buf) +{ + mlx4_en_ex_selftest(dev, &etest->flags, buf); +} + +static void mlx4_en_get_strings(struct net_device *dev, + uint32_t stringset, uint8_t *data) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int index = 0; + int i, strings = 0; + struct bitmap_iterator it; + + bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS); + + switch (stringset) { + case ETH_SS_TEST: + for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++) + strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]); + if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) + for (; i < MLX4_EN_NUM_SELF_TEST; i++) + strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]); + break; + + case ETH_SS_STATS: + /* Add main counters */ + for (i = 0; i < NUM_MAIN_STATS; i++, strings++, + bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + strcpy(data + (index++) * ETH_GSTRING_LEN, + main_strings[strings]); + + for (i = 0; i < NUM_PORT_STATS; i++, strings++, + bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + strcpy(data + (index++) * ETH_GSTRING_LEN, + main_strings[strings]); + + for (i = 0; i < NUM_FLOW_STATS; i++, strings++, + bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + strcpy(data + (index++) * ETH_GSTRING_LEN, + main_strings[strings]); + + for (i = 0; i < NUM_PKT_STATS; i++, strings++, + bitmap_iterator_inc(&it)) + if (bitmap_iterator_test(&it)) + strcpy(data + (index++) * ETH_GSTRING_LEN, + main_strings[strings]); + + for (i = 0; i < priv->tx_ring_num; i++) { + sprintf(data + (index++) * ETH_GSTRING_LEN, + "tx%d_packets", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "tx%d_bytes", i); + } + for (i = 0; i < priv->rx_ring_num; i++) { + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_packets", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_bytes", i); +#ifdef CONFIG_NET_RX_BUSY_POLL + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_napi_yield", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_misses", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_cleaned", i); +#endif + } + break; + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++) + strcpy(data + i * ETH_GSTRING_LEN, + mlx4_en_priv_flags[i]); + break; + + } +} + +static u32 mlx4_en_autoneg_get(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + u32 autoneg = AUTONEG_DISABLE; + + if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) && + (priv->port_state.flags & MLX4_EN_PORT_ANE)) + autoneg = AUTONEG_ENABLE; + + return autoneg; +} + +static u32 ptys_get_supported_port(struct mlx4_ptys_reg *ptys_reg) +{ + u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap); + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T) + | MLX4_PROT_MASK(MLX4_1000BASE_T) + | MLX4_PROT_MASK(MLX4_100BASE_TX))) { + return SUPPORTED_TP; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR) + | MLX4_PROT_MASK(MLX4_10GBASE_SR) + | MLX4_PROT_MASK(MLX4_56GBASE_SR4) + | MLX4_PROT_MASK(MLX4_40GBASE_CR4) + | MLX4_PROT_MASK(MLX4_40GBASE_SR4) + | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) { + return SUPPORTED_FIBRE; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4) + | MLX4_PROT_MASK(MLX4_40GBASE_KR4) + | MLX4_PROT_MASK(MLX4_20GBASE_KR2) + | MLX4_PROT_MASK(MLX4_10GBASE_KR) + | MLX4_PROT_MASK(MLX4_10GBASE_KX4) + | MLX4_PROT_MASK(MLX4_1000BASE_KX))) { + return SUPPORTED_Backplane; + } + return 0; +} + +static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg) +{ + u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_oper); + + if (!eth_proto) /* link down */ + eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap); + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T) + | MLX4_PROT_MASK(MLX4_1000BASE_T) + | MLX4_PROT_MASK(MLX4_100BASE_TX))) { + return PORT_TP; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_SR) + | MLX4_PROT_MASK(MLX4_56GBASE_SR4) + | MLX4_PROT_MASK(MLX4_40GBASE_SR4) + | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) { + return PORT_FIBRE; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR) + | MLX4_PROT_MASK(MLX4_56GBASE_CR4) + | MLX4_PROT_MASK(MLX4_40GBASE_CR4))) { + return PORT_DA; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4) + | MLX4_PROT_MASK(MLX4_40GBASE_KR4) + | MLX4_PROT_MASK(MLX4_20GBASE_KR2) + | MLX4_PROT_MASK(MLX4_10GBASE_KR) + | MLX4_PROT_MASK(MLX4_10GBASE_KX4) + | MLX4_PROT_MASK(MLX4_1000BASE_KX))) { + return PORT_NONE; + } + return PORT_OTHER; +} + +#define MLX4_LINK_MODES_SZ \ + (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8) + +enum ethtool_report { + SUPPORTED = 0, + ADVERTISED = 1, + SPEED = 2 +}; + +/* Translates mlx4 link mode to equivalent ethtool Link modes/speed */ +static u32 ptys2ethtool_map[MLX4_LINK_MODES_SZ][3] = { + [MLX4_100BASE_TX] = { + SUPPORTED_100baseT_Full, + ADVERTISED_100baseT_Full, + SPEED_100 + }, + + [MLX4_1000BASE_T] = { + SUPPORTED_1000baseT_Full, + ADVERTISED_1000baseT_Full, + SPEED_1000 + }, + [MLX4_1000BASE_CX_SGMII] = { + SUPPORTED_1000baseKX_Full, + ADVERTISED_1000baseKX_Full, + SPEED_1000 + }, + [MLX4_1000BASE_KX] = { + SUPPORTED_1000baseKX_Full, + ADVERTISED_1000baseKX_Full, + SPEED_1000 + }, + + [MLX4_10GBASE_T] = { + SUPPORTED_10000baseT_Full, + ADVERTISED_10000baseT_Full, + SPEED_10000 + }, + [MLX4_10GBASE_CX4] = { + SUPPORTED_10000baseKX4_Full, + ADVERTISED_10000baseKX4_Full, + SPEED_10000 + }, + [MLX4_10GBASE_KX4] = { + SUPPORTED_10000baseKX4_Full, + ADVERTISED_10000baseKX4_Full, + SPEED_10000 + }, + [MLX4_10GBASE_KR] = { + SUPPORTED_10000baseKR_Full, + ADVERTISED_10000baseKR_Full, + SPEED_10000 + }, + [MLX4_10GBASE_CR] = { + SUPPORTED_10000baseKR_Full, + ADVERTISED_10000baseKR_Full, + SPEED_10000 + }, + [MLX4_10GBASE_SR] = { + SUPPORTED_10000baseKR_Full, + ADVERTISED_10000baseKR_Full, + SPEED_10000 + }, + + [MLX4_20GBASE_KR2] = { + SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full, + ADVERTISED_20000baseMLD2_Full | ADVERTISED_20000baseKR2_Full, + SPEED_20000 + }, + + [MLX4_40GBASE_CR4] = { + SUPPORTED_40000baseCR4_Full, + ADVERTISED_40000baseCR4_Full, + SPEED_40000 + }, + [MLX4_40GBASE_KR4] = { + SUPPORTED_40000baseKR4_Full, + ADVERTISED_40000baseKR4_Full, + SPEED_40000 + }, + [MLX4_40GBASE_SR4] = { + SUPPORTED_40000baseSR4_Full, + ADVERTISED_40000baseSR4_Full, + SPEED_40000 + }, + + [MLX4_56GBASE_KR4] = { + SUPPORTED_56000baseKR4_Full, + ADVERTISED_56000baseKR4_Full, + SPEED_56000 + }, + [MLX4_56GBASE_CR4] = { + SUPPORTED_56000baseCR4_Full, + ADVERTISED_56000baseCR4_Full, + SPEED_56000 + }, + [MLX4_56GBASE_SR4] = { + SUPPORTED_56000baseSR4_Full, + ADVERTISED_56000baseSR4_Full, + SPEED_56000 + }, +}; + +static u32 ptys2ethtool_link_modes(u32 eth_proto, enum ethtool_report report) +{ + int i; + u32 link_modes = 0; + + for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { + if (eth_proto & MLX4_PROT_MASK(i)) + link_modes |= ptys2ethtool_map[i][report]; + } + return link_modes; +} + +static u32 ethtool2ptys_link_modes(u32 link_modes, enum ethtool_report report) +{ + int i; + u32 ptys_modes = 0; + + for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { + if (ptys2ethtool_map[i][report] & link_modes) + ptys_modes |= 1 << i; + } + return ptys_modes; +} + +/* Convert actual speed (SPEED_XXX) to ptys link modes */ +static u32 speed2ptys_link_modes(u32 speed) +{ + int i; + u32 ptys_modes = 0; + + for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { + if (ptys2ethtool_map[i][SPEED] == speed) + ptys_modes |= 1 << i; + } + return ptys_modes; +} + +static int ethtool_get_ptys_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_ptys_reg ptys_reg; + u32 eth_proto; + int ret; + + memset(&ptys_reg, 0, sizeof(ptys_reg)); + ptys_reg.local_port = priv->port; + ptys_reg.proto_mask = MLX4_PTYS_EN; + ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, + MLX4_ACCESS_REG_QUERY, &ptys_reg); + if (ret) { + en_warn(priv, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)", + ret); + return ret; + } + en_dbg(DRV, priv, "ptys_reg.proto_mask %x\n", + ptys_reg.proto_mask); + en_dbg(DRV, priv, "ptys_reg.eth_proto_cap %x\n", + be32_to_cpu(ptys_reg.eth_proto_cap)); + en_dbg(DRV, priv, "ptys_reg.eth_proto_admin %x\n", + be32_to_cpu(ptys_reg.eth_proto_admin)); + en_dbg(DRV, priv, "ptys_reg.eth_proto_oper %x\n", + be32_to_cpu(ptys_reg.eth_proto_oper)); + en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n", + be32_to_cpu(ptys_reg.eth_proto_lp_adv)); + + cmd->supported = 0; + cmd->advertising = 0; + + cmd->supported |= ptys_get_supported_port(&ptys_reg); + + eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap); + cmd->supported |= ptys2ethtool_link_modes(eth_proto, SUPPORTED); + + eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin); + cmd->advertising |= ptys2ethtool_link_modes(eth_proto, ADVERTISED); + + cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + cmd->advertising |= (priv->prof->tx_pause) ? ADVERTISED_Pause : 0; + + cmd->advertising |= (priv->prof->tx_pause ^ priv->prof->rx_pause) ? + ADVERTISED_Asym_Pause : 0; + + cmd->port = ptys_get_active_port(&ptys_reg); + cmd->transceiver = (SUPPORTED_TP & cmd->supported) ? + XCVR_EXTERNAL : XCVR_INTERNAL; + + if (mlx4_en_autoneg_get(dev)) { + cmd->supported |= SUPPORTED_Autoneg; + cmd->advertising |= ADVERTISED_Autoneg; + } + + cmd->autoneg = (priv->port_state.flags & MLX4_EN_PORT_ANC) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv); + cmd->lp_advertising = ptys2ethtool_link_modes(eth_proto, ADVERTISED); + + cmd->lp_advertising |= (priv->port_state.flags & MLX4_EN_PORT_ANC) ? + ADVERTISED_Autoneg : 0; + + cmd->phy_address = 0; + cmd->mdio_support = 0; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + + return ret; +} + +static void ethtool_get_default_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int trans_type; + + cmd->autoneg = AUTONEG_DISABLE; + cmd->supported = SUPPORTED_10000baseT_Full; + cmd->advertising = ADVERTISED_10000baseT_Full; + trans_type = priv->port_state.transceiver; + + if (trans_type > 0 && trans_type <= 0xC) { + cmd->port = PORT_FIBRE; + cmd->transceiver = XCVR_EXTERNAL; + cmd->supported |= SUPPORTED_FIBRE; + cmd->advertising |= ADVERTISED_FIBRE; + } else if (trans_type == 0x80 || trans_type == 0) { + cmd->port = PORT_TP; + cmd->transceiver = XCVR_INTERNAL; + cmd->supported |= SUPPORTED_TP; + cmd->advertising |= ADVERTISED_TP; + } else { + cmd->port = -1; + cmd->transceiver = -1; + } +} + +static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int ret = -EINVAL; + + if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) + return -ENOMEM; + + en_dbg(DRV, priv, "query port state.flags ANC(%x) ANE(%x)\n", + priv->port_state.flags & MLX4_EN_PORT_ANC, + priv->port_state.flags & MLX4_EN_PORT_ANE); + + if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) + ret = ethtool_get_ptys_settings(dev, cmd); + if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */ + ethtool_get_default_settings(dev, cmd); + + if (netif_carrier_ok(dev)) { + ethtool_cmd_speed_set(cmd, priv->port_state.link_speed); + cmd->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->duplex = DUPLEX_UNKNOWN; + } + return 0; +} + +/* Calculate PTYS admin according ethtool speed (SPEED_XXX) */ +static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed, + __be32 proto_cap) +{ + __be32 proto_admin = 0; + + if (!speed) { /* Speed = 0 ==> Reset Link modes */ + proto_admin = proto_cap; + en_info(priv, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n", + be32_to_cpu(proto_cap)); + } else { + u32 ptys_link_modes = speed2ptys_link_modes(speed); + + proto_admin = cpu_to_be32(ptys_link_modes) & proto_cap; + en_info(priv, "Setting Speed to %d\n", speed); + } + return proto_admin; +} + +static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_ptys_reg ptys_reg; + __be32 proto_admin; + int ret; + + u32 ptys_adv = ethtool2ptys_link_modes(cmd->advertising, ADVERTISED); + int speed = ethtool_cmd_speed(cmd); + + en_dbg(DRV, priv, "Set Speed=%d adv=0x%x autoneg=%d duplex=%d\n", + speed, cmd->advertising, cmd->autoneg, cmd->duplex); + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) || + (cmd->duplex == DUPLEX_HALF)) + return -EINVAL; + + memset(&ptys_reg, 0, sizeof(ptys_reg)); + ptys_reg.local_port = priv->port; + ptys_reg.proto_mask = MLX4_PTYS_EN; + ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, + MLX4_ACCESS_REG_QUERY, &ptys_reg); + if (ret) { + en_warn(priv, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n", + ret); + return 0; + } + + proto_admin = cmd->autoneg == AUTONEG_ENABLE ? + cpu_to_be32(ptys_adv) : + speed_set_ptys_admin(priv, speed, + ptys_reg.eth_proto_cap); + + proto_admin &= ptys_reg.eth_proto_cap; + if (!proto_admin) { + en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n"); + return -EINVAL; /* nothing to change due to bad input */ + } + + if (proto_admin == ptys_reg.eth_proto_admin) + return 0; /* Nothing to change */ + + en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n", + be32_to_cpu(proto_admin)); + + ptys_reg.eth_proto_admin = proto_admin; + ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE, + &ptys_reg); + if (ret) { + en_warn(priv, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)", + be32_to_cpu(ptys_reg.eth_proto_admin), ret); + return ret; + } + + mutex_lock(&priv->mdev->state_lock); + if (priv->port_up) { + en_warn(priv, "Port link mode changed, restarting port...\n"); + mlx4_en_stop_port(dev, 1); + if (mlx4_en_start_port(dev)) + en_err(priv, "Failed restarting port %d\n", priv->port); + } + mutex_unlock(&priv->mdev->state_lock); + return 0; +} + +static int mlx4_en_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + coal->tx_coalesce_usecs = priv->tx_usecs; + coal->tx_max_coalesced_frames = priv->tx_frames; + coal->tx_max_coalesced_frames_irq = priv->tx_work_limit; + + coal->rx_coalesce_usecs = priv->rx_usecs; + coal->rx_max_coalesced_frames = priv->rx_frames; + + coal->pkt_rate_low = priv->pkt_rate_low; + coal->rx_coalesce_usecs_low = priv->rx_usecs_low; + coal->pkt_rate_high = priv->pkt_rate_high; + coal->rx_coalesce_usecs_high = priv->rx_usecs_high; + coal->rate_sample_interval = priv->sample_interval; + coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal; + + return 0; +} + +static int mlx4_en_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + if (!coal->tx_max_coalesced_frames_irq) + return -EINVAL; + + priv->rx_frames = (coal->rx_max_coalesced_frames == + MLX4_EN_AUTO_CONF) ? + MLX4_EN_RX_COAL_TARGET : + coal->rx_max_coalesced_frames; + priv->rx_usecs = (coal->rx_coalesce_usecs == + MLX4_EN_AUTO_CONF) ? + MLX4_EN_RX_COAL_TIME : + coal->rx_coalesce_usecs; + + /* Setting TX coalescing parameters */ + if (coal->tx_coalesce_usecs != priv->tx_usecs || + coal->tx_max_coalesced_frames != priv->tx_frames) { + priv->tx_usecs = coal->tx_coalesce_usecs; + priv->tx_frames = coal->tx_max_coalesced_frames; + } + + /* Set adaptive coalescing params */ + priv->pkt_rate_low = coal->pkt_rate_low; + priv->rx_usecs_low = coal->rx_coalesce_usecs_low; + priv->pkt_rate_high = coal->pkt_rate_high; + priv->rx_usecs_high = coal->rx_coalesce_usecs_high; + priv->sample_interval = coal->rate_sample_interval; + priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; + priv->tx_work_limit = coal->tx_max_coalesced_frames_irq; + + return mlx4_en_moderation_update(priv); +} + +static int mlx4_en_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int err; + + if (pause->autoneg) + return -EINVAL; + + priv->prof->tx_pause = pause->tx_pause != 0; + priv->prof->rx_pause = pause->rx_pause != 0; + err = mlx4_SET_PORT_general(mdev->dev, priv->port, + priv->rx_skb_size + ETH_FCS_LEN, + priv->prof->tx_pause, + priv->prof->tx_ppp, + priv->prof->rx_pause, + priv->prof->rx_ppp); + if (err) + en_err(priv, "Failed setting pause params\n"); + else + mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, + priv->prof->rx_ppp, + priv->prof->rx_pause, + priv->prof->tx_ppp, + priv->prof->tx_pause); + + return err; +} + +static void mlx4_en_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + pause->tx_pause = priv->prof->tx_pause; + pause->rx_pause = priv->prof->rx_pause; +} + +static int mlx4_en_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *param) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + u32 rx_size, tx_size; + int port_up = 0; + int err = 0; + + if (param->rx_jumbo_pending || param->rx_mini_pending) + return -EINVAL; + + rx_size = roundup_pow_of_two(param->rx_pending); + rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE); + rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE); + tx_size = roundup_pow_of_two(param->tx_pending); + tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); + tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); + + if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size : + priv->rx_ring[0]->size) && + tx_size == priv->tx_ring[0]->size) + return 0; + + mutex_lock(&mdev->state_lock); + if (priv->port_up) { + port_up = 1; + mlx4_en_stop_port(dev, 1); + } + + mlx4_en_free_resources(priv); + + priv->prof->tx_ring_size = tx_size; + priv->prof->rx_ring_size = rx_size; + + err = mlx4_en_alloc_resources(priv); + if (err) { + en_err(priv, "Failed reallocating port resources\n"); + goto out; + } + if (port_up) { + err = mlx4_en_start_port(dev); + if (err) + en_err(priv, "Failed starting port\n"); + } + + err = mlx4_en_moderation_update(priv); + +out: + mutex_unlock(&mdev->state_lock); + return err; +} + +static void mlx4_en_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *param) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + memset(param, 0, sizeof(*param)); + param->rx_max_pending = MLX4_EN_MAX_RX_SIZE; + param->tx_max_pending = MLX4_EN_MAX_TX_SIZE; + param->rx_pending = priv->port_up ? + priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size; + param->tx_pending = priv->tx_ring[0]->size; +} + +static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + return priv->rx_ring_num; +} + +static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev) +{ + return MLX4_EN_RSS_KEY_SIZE; +} + +static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + /* check if requested function is supported by the device */ + if (hfunc == ETH_RSS_HASH_TOP) { + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) + return -EINVAL; + if (!(dev->features & NETIF_F_RXHASH)) + en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n"); + return 0; + } else if (hfunc == ETH_RSS_HASH_XOR) { + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)) + return -EINVAL; + if (dev->features & NETIF_F_RXHASH) + en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n"); + return 0; + } + + return -EINVAL; +} + +static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key, + u8 *hfunc) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_rss_map *rss_map = &priv->rss_map; + int rss_rings; + size_t n = priv->rx_ring_num; + int err = 0; + + rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num; + rss_rings = 1 << ilog2(rss_rings); + + while (n--) { + if (!ring_index) + break; + ring_index[n] = rss_map->qps[n % rss_rings].qpn - + rss_map->base_qpn; + } + if (key) + memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE); + if (hfunc) + *hfunc = priv->rss_hash_fn; + return err; +} + +static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, + const u8 *key, const u8 hfunc) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int port_up = 0; + int err = 0; + int i; + int rss_rings = 0; + + /* Calculate RSS table size and make sure flows are spread evenly + * between rings + */ + for (i = 0; i < priv->rx_ring_num; i++) { + if (!ring_index) + continue; + if (i > 0 && !ring_index[i] && !rss_rings) + rss_rings = i; + + if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num))) + return -EINVAL; + } + + if (!rss_rings) + rss_rings = priv->rx_ring_num; + + /* RSS table size must be an order of 2 */ + if (!is_power_of_2(rss_rings)) + return -EINVAL; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE) { + err = mlx4_en_check_rxfh_func(dev, hfunc); + if (err) + return err; + } + + mutex_lock(&mdev->state_lock); + if (priv->port_up) { + port_up = 1; + mlx4_en_stop_port(dev, 1); + } + + if (ring_index) + priv->prof->rss_rings = rss_rings; + if (key) + memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE); + if (hfunc != ETH_RSS_HASH_NO_CHANGE) + priv->rss_hash_fn = hfunc; + + if (port_up) { + err = mlx4_en_start_port(dev); + if (err) + en_err(priv, "Failed starting port\n"); + } + + mutex_unlock(&mdev->state_lock); + return err; +} + +#define all_zeros_or_all_ones(field) \ + ((field) == 0 || (field) == (__force typeof(field))-1) + +static int mlx4_en_validate_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_usrip4_spec *l3_mask; + struct ethtool_tcpip4_spec *l4_mask; + struct ethhdr *eth_mask; + + if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + if (cmd->fs.flow_type & FLOW_MAC_EXT) { + /* dest mac mask must be ff:ff:ff:ff:ff:ff */ + if (!is_broadcast_ether_addr(cmd->fs.m_ext.h_dest)) + return -EINVAL; + } + + switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + if (cmd->fs.m_u.tcp_ip4_spec.tos) + return -EINVAL; + l4_mask = &cmd->fs.m_u.tcp_ip4_spec; + /* don't allow mask which isn't all 0 or 1 */ + if (!all_zeros_or_all_ones(l4_mask->ip4src) || + !all_zeros_or_all_ones(l4_mask->ip4dst) || + !all_zeros_or_all_ones(l4_mask->psrc) || + !all_zeros_or_all_ones(l4_mask->pdst)) + return -EINVAL; + break; + case IP_USER_FLOW: + l3_mask = &cmd->fs.m_u.usr_ip4_spec; + if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto || + cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 || + (!l3_mask->ip4src && !l3_mask->ip4dst) || + !all_zeros_or_all_ones(l3_mask->ip4src) || + !all_zeros_or_all_ones(l3_mask->ip4dst)) + return -EINVAL; + break; + case ETHER_FLOW: + eth_mask = &cmd->fs.m_u.ether_spec; + /* source mac mask must not be set */ + if (!is_zero_ether_addr(eth_mask->h_source)) + return -EINVAL; + + /* dest mac mask must be ff:ff:ff:ff:ff:ff */ + if (!is_broadcast_ether_addr(eth_mask->h_dest)) + return -EINVAL; + + if (!all_zeros_or_all_ones(eth_mask->h_proto)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + if ((cmd->fs.flow_type & FLOW_EXT)) { + if (cmd->fs.m_ext.vlan_etype || + !((cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) == + 0 || + (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) == + cpu_to_be16(VLAN_VID_MASK))) + return -EINVAL; + + if (cmd->fs.m_ext.vlan_tci) { + if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) >= VLAN_N_VID) + return -EINVAL; + + } + } + + return 0; +} + +static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd, + struct list_head *rule_list_h, + struct mlx4_spec_list *spec_l2, + unsigned char *mac) +{ + int err = 0; + __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16); + + spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH; + memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN); + memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN); + + if ((cmd->fs.flow_type & FLOW_EXT) && + (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) { + spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci; + spec_l2->eth.vlan_id_msk = cpu_to_be16(VLAN_VID_MASK); + } + + list_add_tail(&spec_l2->list, rule_list_h); + + return err; +} + +static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv, + struct ethtool_rxnfc *cmd, + struct list_head *rule_list_h, + struct mlx4_spec_list *spec_l2, + __be32 ipv4_dst) +{ +#ifdef CONFIG_INET + unsigned char mac[ETH_ALEN]; + + if (!ipv4_is_multicast(ipv4_dst)) { + if (cmd->fs.flow_type & FLOW_MAC_EXT) + memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN); + else + memcpy(&mac, priv->dev->dev_addr, ETH_ALEN); + } else { + ip_eth_mc_map(ipv4_dst, mac); + } + + return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]); +#else + return -EINVAL; +#endif +} + +static int add_ip_rule(struct mlx4_en_priv *priv, + struct ethtool_rxnfc *cmd, + struct list_head *list_h) +{ + int err; + struct mlx4_spec_list *spec_l2 = NULL; + struct mlx4_spec_list *spec_l3 = NULL; + struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec; + + spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL); + spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL); + if (!spec_l2 || !spec_l3) { + err = -ENOMEM; + goto free_spec; + } + + err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2, + cmd->fs.h_u. + usr_ip4_spec.ip4dst); + if (err) + goto free_spec; + spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4; + spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src; + if (l3_mask->ip4src) + spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK; + spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst; + if (l3_mask->ip4dst) + spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK; + list_add_tail(&spec_l3->list, list_h); + + return 0; + +free_spec: + kfree(spec_l2); + kfree(spec_l3); + return err; +} + +static int add_tcp_udp_rule(struct mlx4_en_priv *priv, + struct ethtool_rxnfc *cmd, + struct list_head *list_h, int proto) +{ + int err; + struct mlx4_spec_list *spec_l2 = NULL; + struct mlx4_spec_list *spec_l3 = NULL; + struct mlx4_spec_list *spec_l4 = NULL; + struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec; + + spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL); + spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL); + spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL); + if (!spec_l2 || !spec_l3 || !spec_l4) { + err = -ENOMEM; + goto free_spec; + } + + spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4; + + if (proto == TCP_V4_FLOW) { + err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, + spec_l2, + cmd->fs.h_u. + tcp_ip4_spec.ip4dst); + if (err) + goto free_spec; + spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP; + spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src; + spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst; + spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc; + spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst; + } else { + err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, + spec_l2, + cmd->fs.h_u. + udp_ip4_spec.ip4dst); + if (err) + goto free_spec; + spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP; + spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src; + spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst; + spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc; + spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst; + } + + if (l4_mask->ip4src) + spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK; + if (l4_mask->ip4dst) + spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK; + + if (l4_mask->psrc) + spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK; + if (l4_mask->pdst) + spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK; + + list_add_tail(&spec_l3->list, list_h); + list_add_tail(&spec_l4->list, list_h); + + return 0; + +free_spec: + kfree(spec_l2); + kfree(spec_l3); + kfree(spec_l4); + return err; +} + +static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev, + struct ethtool_rxnfc *cmd, + struct list_head *rule_list_h) +{ + int err; + struct ethhdr *eth_spec; + struct mlx4_spec_list *spec_l2; + struct mlx4_en_priv *priv = netdev_priv(dev); + + err = mlx4_en_validate_flow(dev, cmd); + if (err) + return err; + + switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case ETHER_FLOW: + spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL); + if (!spec_l2) + return -ENOMEM; + + eth_spec = &cmd->fs.h_u.ether_spec; + mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, + ð_spec->h_dest[0]); + spec_l2->eth.ether_type = eth_spec->h_proto; + if (eth_spec->h_proto) + spec_l2->eth.ether_type_enable = 1; + break; + case IP_USER_FLOW: + err = add_ip_rule(priv, cmd, rule_list_h); + break; + case TCP_V4_FLOW: + err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW); + break; + case UDP_V4_FLOW: + err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW); + break; + } + + return err; +} + +static int mlx4_en_flow_replace(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + int err; + struct mlx4_en_priv *priv = netdev_priv(dev); + struct ethtool_flow_id *loc_rule; + struct mlx4_spec_list *spec, *tmp_spec; + u32 qpn; + u64 reg_id; + + struct mlx4_net_trans_rule rule = { + .queue_mode = MLX4_NET_TRANS_Q_FIFO, + .exclusive = 0, + .allow_loopback = 1, + .promisc_mode = MLX4_FS_REGULAR, + }; + + rule.port = priv->port; + rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location; + INIT_LIST_HEAD(&rule.list); + + /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */ + if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC) + qpn = priv->drop_qp.qpn; + else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) { + qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); + } else { + if (cmd->fs.ring_cookie >= priv->rx_ring_num) { + en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n", + cmd->fs.ring_cookie); + return -EINVAL; + } + qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn; + if (!qpn) { + en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n", + cmd->fs.ring_cookie); + return -EINVAL; + } + } + rule.qpn = qpn; + err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list); + if (err) + goto out_free_list; + + loc_rule = &priv->ethtool_rules[cmd->fs.location]; + if (loc_rule->id) { + err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id); + if (err) { + en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n", + cmd->fs.location, loc_rule->id); + goto out_free_list; + } + loc_rule->id = 0; + memset(&loc_rule->flow_spec, 0, + sizeof(struct ethtool_rx_flow_spec)); + list_del(&loc_rule->list); + } + err = mlx4_flow_attach(priv->mdev->dev, &rule, ®_id); + if (err) { + en_err(priv, "Fail to attach network rule at location %d\n", + cmd->fs.location); + goto out_free_list; + } + loc_rule->id = reg_id; + memcpy(&loc_rule->flow_spec, &cmd->fs, + sizeof(struct ethtool_rx_flow_spec)); + list_add_tail(&loc_rule->list, &priv->ethtool_list); + +out_free_list: + list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) { + list_del(&spec->list); + kfree(spec); + } + return err; +} + +static int mlx4_en_flow_detach(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + int err = 0; + struct ethtool_flow_id *rule; + struct mlx4_en_priv *priv = netdev_priv(dev); + + if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + rule = &priv->ethtool_rules[cmd->fs.location]; + if (!rule->id) { + err = -ENOENT; + goto out; + } + + err = mlx4_flow_detach(priv->mdev->dev, rule->id); + if (err) { + en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n", + cmd->fs.location, rule->id); + goto out; + } + rule->id = 0; + memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec)); + list_del(&rule->list); +out: + return err; + +} + +static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd, + int loc) +{ + int err = 0; + struct ethtool_flow_id *rule; + struct mlx4_en_priv *priv = netdev_priv(dev); + + if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + rule = &priv->ethtool_rules[loc]; + if (rule->id) + memcpy(&cmd->fs, &rule->flow_spec, + sizeof(struct ethtool_rx_flow_spec)); + else + err = -ENOENT; + + return err; +} + +static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv) +{ + + int i, res = 0; + for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { + if (priv->ethtool_rules[i].id) + res++; + } + return res; + +} + +static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int err = 0; + int i = 0, priority = 0; + + if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT || + cmd->cmd == ETHTOOL_GRXCLSRULE || + cmd->cmd == ETHTOOL_GRXCLSRLALL) && + (mdev->dev->caps.steering_mode != + MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)) + return -EINVAL; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = priv->rx_ring_num; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = mlx4_en_get_num_flows(priv); + break; + case ETHTOOL_GRXCLSRULE: + err = mlx4_en_get_flow(dev, cmd, cmd->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: + while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) { + err = mlx4_en_get_flow(dev, cmd, i); + if (!err) + rule_locs[priority++] = i; + i++; + } + err = 0; + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + int err = 0; + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + + if (mdev->dev->caps.steering_mode != + MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up) + return -EINVAL; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + err = mlx4_en_flow_replace(dev, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + err = mlx4_en_flow_detach(dev, cmd); + break; + default: + en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd); + return -EINVAL; + } + + return err; +} + +static void mlx4_en_get_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + memset(channel, 0, sizeof(*channel)); + + channel->max_rx = MAX_RX_RINGS; + channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP; + + channel->rx_count = priv->rx_ring_num; + channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP; +} + +static int mlx4_en_set_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int port_up = 0; + int err = 0; + + if (channel->other_count || channel->combined_count || + channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP || + channel->rx_count > MAX_RX_RINGS || + !channel->tx_count || !channel->rx_count) + return -EINVAL; + + mutex_lock(&mdev->state_lock); + if (priv->port_up) { + port_up = 1; + mlx4_en_stop_port(dev, 1); + } + + mlx4_en_free_resources(priv); + + priv->num_tx_rings_p_up = channel->tx_count; + priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP; + priv->rx_ring_num = channel->rx_count; + + err = mlx4_en_alloc_resources(priv); + if (err) { + en_err(priv, "Failed reallocating port resources\n"); + goto out; + } + + netif_set_real_num_tx_queues(dev, priv->tx_ring_num); + netif_set_real_num_rx_queues(dev, priv->rx_ring_num); + + if (dev->num_tc) + mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP); + + en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num); + en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num); + + if (port_up) { + err = mlx4_en_start_port(dev); + if (err) + en_err(priv, "Failed starting port\n"); + } + + err = mlx4_en_moderation_update(priv); + +out: + mutex_unlock(&mdev->state_lock); + return err; +} + +static int mlx4_en_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int ret; + + ret = ethtool_op_get_ts_info(dev, info); + if (ret) + return ret; + + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { + info->so_timestamping |= + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + + if (mdev->ptp_clock) + info->phc_index = ptp_clock_index(mdev->ptp_clock); + } + + return ret; +} + +static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME); + bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME); + int i; + + if (bf_enabled_new == bf_enabled_old) + return 0; /* Nothing to do */ + + if (bf_enabled_new) { + bool bf_supported = true; + + for (i = 0; i < priv->tx_ring_num; i++) + bf_supported &= priv->tx_ring[i]->bf_alloced; + + if (!bf_supported) { + en_err(priv, "BlueFlame is not supported\n"); + return -EINVAL; + } + + priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME; + } else { + priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME; + } + + for (i = 0; i < priv->tx_ring_num; i++) + priv->tx_ring[i]->bf_enabled = bf_enabled_new; + + en_info(priv, "BlueFlame %s\n", + bf_enabled_new ? "Enabled" : "Disabled"); + + return 0; +} + +static u32 mlx4_en_get_priv_flags(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + return priv->pflags; +} + +static int mlx4_en_get_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + void *data) +{ + const struct mlx4_en_priv *priv = netdev_priv(dev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_TX_COPYBREAK: + *(u32 *)data = priv->prof->inline_thold; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int mlx4_en_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int val, ret = 0; + + switch (tuna->id) { + case ETHTOOL_TX_COPYBREAK: + val = *(u32 *)data; + if (val < MIN_PKT_LEN || val > MAX_INLINE) + ret = -EINVAL; + else + priv->prof->inline_thold = val; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int mlx4_en_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int ret; + u8 data[4]; + + /* Read first 2 bytes to get Module & REV ID */ + ret = mlx4_get_module_info(mdev->dev, priv->port, + 0/*offset*/, 2/*size*/, data); + if (ret < 2) + return -EIO; + + switch (data[0] /* identifier */) { + case MLX4_MODULE_ID_QSFP: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case MLX4_MODULE_ID_QSFP_PLUS: + if (data[1] >= 0x3) { /* revision id */ + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + } + break; + case MLX4_MODULE_ID_QSFP28: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + break; + case MLX4_MODULE_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + default: + return -ENOSYS; + } + + return 0; +} + +static int mlx4_en_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int offset = ee->offset; + int i = 0, ret; + + if (ee->len == 0) + return -EINVAL; + + memset(data, 0, ee->len); + + while (i < ee->len) { + en_dbg(DRV, priv, + "mlx4_get_module_info i(%d) offset(%d) len(%d)\n", + i, offset, ee->len - i); + + ret = mlx4_get_module_info(mdev->dev, priv->port, + offset, ee->len - i, data + i); + + if (!ret) /* Done reading */ + return 0; + + if (ret < 0) { + en_err(priv, + "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", + i, offset, ee->len - i, ret); + return 0; + } + + i += ret; + offset += ret; + } + return 0; +} + +static int mlx4_en_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + int err; + u16 beacon_duration; + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + + if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_BEACON)) + return -EOPNOTSUPP; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + beacon_duration = PORT_BEACON_MAX_LIMIT; + break; + case ETHTOOL_ID_INACTIVE: + beacon_duration = 0; + break; + default: + return -EOPNOTSUPP; + } + + err = mlx4_SET_PORT_BEACON(mdev->dev, priv->port, beacon_duration); + return err; +} + +const struct ethtool_ops mlx4_en_ethtool_ops = { + .get_drvinfo = mlx4_en_get_drvinfo, + .get_settings = mlx4_en_get_settings, + .set_settings = mlx4_en_set_settings, + .get_link = ethtool_op_get_link, + .get_strings = mlx4_en_get_strings, + .get_sset_count = mlx4_en_get_sset_count, + .get_ethtool_stats = mlx4_en_get_ethtool_stats, + .self_test = mlx4_en_self_test, + .set_phys_id = mlx4_en_set_phys_id, + .get_wol = mlx4_en_get_wol, + .set_wol = mlx4_en_set_wol, + .get_msglevel = mlx4_en_get_msglevel, + .set_msglevel = mlx4_en_set_msglevel, + .get_coalesce = mlx4_en_get_coalesce, + .set_coalesce = mlx4_en_set_coalesce, + .get_pauseparam = mlx4_en_get_pauseparam, + .set_pauseparam = mlx4_en_set_pauseparam, + .get_ringparam = mlx4_en_get_ringparam, + .set_ringparam = mlx4_en_set_ringparam, + .get_rxnfc = mlx4_en_get_rxnfc, + .set_rxnfc = mlx4_en_set_rxnfc, + .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size, + .get_rxfh_key_size = mlx4_en_get_rxfh_key_size, + .get_rxfh = mlx4_en_get_rxfh, + .set_rxfh = mlx4_en_set_rxfh, + .get_channels = mlx4_en_get_channels, + .set_channels = mlx4_en_set_channels, + .get_ts_info = mlx4_en_get_ts_info, + .set_priv_flags = mlx4_en_set_priv_flags, + .get_priv_flags = mlx4_en_get_priv_flags, + .get_tunable = mlx4_en_get_tunable, + .set_tunable = mlx4_en_set_tunable, + .get_module_info = mlx4_en_get_module_info, + .get_module_eeprom = mlx4_en_get_module_eeprom +}; + + + + + diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c new file mode 100644 index 000000000..913b716ed --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -0,0 +1,375 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mlx4_en.h" + +MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin"); +MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")"); + +static const char mlx4_en_version[] = + DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v" + DRV_VERSION " (" DRV_RELDATE ")\n"; + +#define MLX4_EN_PARM_INT(X, def_val, desc) \ + static unsigned int X = def_val;\ + module_param(X , uint, 0444); \ + MODULE_PARM_DESC(X, desc); + + +/* + * Device scope module parameters + */ + +/* Enable RSS UDP traffic */ +MLX4_EN_PARM_INT(udp_rss, 1, + "Enable RSS for incoming UDP traffic or disabled (0)"); + +/* Priority pausing */ +MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]." + " Per priority bit mask"); +MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]." + " Per priority bit mask"); + +MLX4_EN_PARM_INT(inline_thold, MAX_INLINE, + "Threshold for using inline data (range: 17-104, default: 104)"); + +#define MAX_PFC_TX 0xff +#define MAX_PFC_RX 0xff + +void en_print(const char *level, const struct mlx4_en_priv *priv, + const char *format, ...) +{ + va_list args; + struct va_format vaf; + + va_start(args, format); + + vaf.fmt = format; + vaf.va = &args; + if (priv->registered) + printk("%s%s: %s: %pV", + level, DRV_NAME, priv->dev->name, &vaf); + else + printk("%s%s: %s: Port %d: %pV", + level, DRV_NAME, dev_name(&priv->mdev->pdev->dev), + priv->port, &vaf); + va_end(args); +} + +void mlx4_en_update_loopback_state(struct net_device *dev, + netdev_features_t features) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + if (features & NETIF_F_LOOPBACK) + priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); + else + priv->ctrl_flags &= cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK); + + priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED| + MLX4_EN_FLAG_ENABLE_HW_LOOPBACK); + + /* Drop the packet if SRIOV is not enabled + * and not performing the selftest or flb disabled + */ + if (mlx4_is_mfunc(priv->mdev->dev) && + !(features & NETIF_F_LOOPBACK) && !priv->validate_loopback) + priv->flags |= MLX4_EN_FLAG_RX_FILTER_NEEDED; + + /* Set dmac in Tx WQE if we are in SRIOV mode or if loopback selftest + * is requested + */ + if (mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback) + priv->flags |= MLX4_EN_FLAG_ENABLE_HW_LOOPBACK; +} + +static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) +{ + struct mlx4_en_profile *params = &mdev->profile; + int i; + + params->udp_rss = udp_rss; + params->num_tx_rings_p_up = mlx4_low_memory_profile() ? + MLX4_EN_MIN_TX_RING_P_UP : + min_t(int, num_online_cpus(), MLX4_EN_MAX_TX_RING_P_UP); + + if (params->udp_rss && !(mdev->dev->caps.flags + & MLX4_DEV_CAP_FLAG_UDP_RSS)) { + mlx4_warn(mdev, "UDP RSS is not supported on this device\n"); + params->udp_rss = 0; + } + for (i = 1; i <= MLX4_MAX_PORTS; i++) { + params->prof[i].rx_pause = 1; + params->prof[i].rx_ppp = pfcrx; + params->prof[i].tx_pause = 1; + params->prof[i].tx_ppp = pfctx; + params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; + params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; + params->prof[i].tx_ring_num = params->num_tx_rings_p_up * + MLX4_EN_NUM_UP; + params->prof[i].rss_rings = 0; + params->prof[i].inline_thold = inline_thold; + } + + return 0; +} + +static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port) +{ + struct mlx4_en_dev *endev = ctx; + + return endev->pndev[port]; +} + +static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, + enum mlx4_dev_event event, unsigned long port) +{ + struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr; + struct mlx4_en_priv *priv; + + switch (event) { + case MLX4_DEV_EVENT_PORT_UP: + case MLX4_DEV_EVENT_PORT_DOWN: + if (!mdev->pndev[port]) + return; + priv = netdev_priv(mdev->pndev[port]); + /* To prevent races, we poll the link state in a separate + task rather than changing it here */ + priv->link_state = event; + queue_work(mdev->workqueue, &priv->linkstate_task); + break; + + case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: + mlx4_err(mdev, "Internal error detected, restarting device\n"); + break; + + case MLX4_DEV_EVENT_SLAVE_INIT: + case MLX4_DEV_EVENT_SLAVE_SHUTDOWN: + break; + default: + if (port < 1 || port > dev->caps.num_ports || + !mdev->pndev[port]) + return; + mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, + (int) port); + } +} + +static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr) +{ + struct mlx4_en_dev *mdev = endev_ptr; + int i; + + mutex_lock(&mdev->state_lock); + mdev->device_up = false; + mutex_unlock(&mdev->state_lock); + + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) + if (mdev->pndev[i]) + mlx4_en_destroy_netdev(mdev->pndev[i]); + + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) + mlx4_en_remove_timestamp(mdev); + + flush_workqueue(mdev->workqueue); + destroy_workqueue(mdev->workqueue); + (void) mlx4_mr_free(dev, &mdev->mr); + iounmap(mdev->uar_map); + mlx4_uar_free(dev, &mdev->priv_uar); + mlx4_pd_free(dev, mdev->priv_pdn); + if (mdev->nb.notifier_call) + unregister_netdevice_notifier(&mdev->nb); + kfree(mdev); +} + +static void *mlx4_en_add(struct mlx4_dev *dev) +{ + struct mlx4_en_dev *mdev; + int i; + + printk_once(KERN_INFO "%s", mlx4_en_version); + + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); + if (!mdev) + goto err_free_res; + + if (mlx4_pd_alloc(dev, &mdev->priv_pdn)) + goto err_free_dev; + + if (mlx4_uar_alloc(dev, &mdev->priv_uar)) + goto err_pd; + + mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT, + PAGE_SIZE); + if (!mdev->uar_map) + goto err_uar; + spin_lock_init(&mdev->uar_lock); + + mdev->dev = dev; + mdev->dma_device = &dev->persist->pdev->dev; + mdev->pdev = dev->persist->pdev; + mdev->device_up = false; + + mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); + if (!mdev->LSO_support) + mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n"); + + if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull, + MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, + 0, 0, &mdev->mr)) { + mlx4_err(mdev, "Failed allocating memory region\n"); + goto err_map; + } + if (mlx4_mr_enable(mdev->dev, &mdev->mr)) { + mlx4_err(mdev, "Failed enabling memory region\n"); + goto err_mr; + } + + /* Build device profile according to supplied module parameters */ + if (mlx4_en_get_profile(mdev)) { + mlx4_err(mdev, "Bad module parameters, aborting\n"); + goto err_mr; + } + + /* Configure which ports to start according to module parameters */ + mdev->port_cnt = 0; + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) + mdev->port_cnt++; + + /* Initialize time stamp mechanism */ + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) + mlx4_en_init_timestamp(mdev); + + /* Set default number of RX rings*/ + mlx4_en_set_num_rx_rings(mdev); + + /* Create our own workqueue for reset/multicast tasks + * Note: we cannot use the shared workqueue because of deadlocks caused + * by the rtnl lock */ + mdev->workqueue = create_singlethread_workqueue("mlx4_en"); + if (!mdev->workqueue) + goto err_mr; + + /* At this stage all non-port specific tasks are complete: + * mark the card state as up */ + mutex_init(&mdev->state_lock); + mdev->device_up = true; + + /* Setup ports */ + + /* Create a netdev for each port */ + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { + mlx4_info(mdev, "Activating port:%d\n", i); + if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) + mdev->pndev[i] = NULL; + } + /* register notifier */ + mdev->nb.notifier_call = mlx4_en_netdev_event; + if (register_netdevice_notifier(&mdev->nb)) { + mdev->nb.notifier_call = NULL; + mlx4_err(mdev, "Failed to create notifier\n"); + } + + return mdev; + +err_mr: + (void) mlx4_mr_free(dev, &mdev->mr); +err_map: + if (mdev->uar_map) + iounmap(mdev->uar_map); +err_uar: + mlx4_uar_free(dev, &mdev->priv_uar); +err_pd: + mlx4_pd_free(dev, mdev->priv_pdn); +err_free_dev: + kfree(mdev); +err_free_res: + return NULL; +} + +static struct mlx4_interface mlx4_en_interface = { + .add = mlx4_en_add, + .remove = mlx4_en_remove, + .event = mlx4_en_event, + .get_dev = mlx4_en_get_netdev, + .protocol = MLX4_PROT_ETH, +}; + +static void mlx4_en_verify_params(void) +{ + if (pfctx > MAX_PFC_TX) { + pr_warn("mlx4_en: WARNING: illegal module parameter pfctx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n", + pfctx, MAX_PFC_TX); + pfctx = 0; + } + + if (pfcrx > MAX_PFC_RX) { + pr_warn("mlx4_en: WARNING: illegal module parameter pfcrx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n", + pfcrx, MAX_PFC_RX); + pfcrx = 0; + } + + if (inline_thold < MIN_PKT_LEN || inline_thold > MAX_INLINE) { + pr_warn("mlx4_en: WARNING: illegal module parameter inline_thold %d - should be in range %d-%d, will be changed to default (%d)\n", + inline_thold, MIN_PKT_LEN, MAX_INLINE, MAX_INLINE); + inline_thold = MAX_INLINE; + } +} + +static int __init mlx4_en_init(void) +{ + mlx4_en_verify_params(); + + return mlx4_register_interface(&mlx4_en_interface); +} + +static void __exit mlx4_en_cleanup(void) +{ + mlx4_unregister_interface(&mlx4_en_interface); +} + +module_init(mlx4_en_init); +module_exit(mlx4_en_cleanup); + diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c new file mode 100644 index 000000000..a5a0b8420 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -0,0 +1,3092 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mlx4_en.h" +#include "en_port.h" + +int mlx4_en_setup_tc(struct net_device *dev, u8 up) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int i; + unsigned int offset = 0; + + if (up && up != MLX4_EN_NUM_UP) + return -EINVAL; + + netdev_set_num_tc(dev, up); + + /* Partition Tx queues evenly amongst UP's */ + for (i = 0; i < up; i++) { + netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset); + offset += priv->num_tx_rings_p_up; + } + + return 0; +} + +#ifdef CONFIG_NET_RX_BUSY_POLL +/* must be called with local_bh_disable()d */ +static int mlx4_en_low_latency_recv(struct napi_struct *napi) +{ + struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); + struct net_device *dev = cq->dev; + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; + int done; + + if (!priv->port_up) + return LL_FLUSH_FAILED; + + if (!mlx4_en_cq_lock_poll(cq)) + return LL_FLUSH_BUSY; + + done = mlx4_en_process_rx_cq(dev, cq, 4); + if (likely(done)) + rx_ring->cleaned += done; + else + rx_ring->misses++; + + mlx4_en_cq_unlock_poll(cq); + + return done; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +#ifdef CONFIG_RFS_ACCEL + +struct mlx4_en_filter { + struct list_head next; + struct work_struct work; + + u8 ip_proto; + __be32 src_ip; + __be32 dst_ip; + __be16 src_port; + __be16 dst_port; + + int rxq_index; + struct mlx4_en_priv *priv; + u32 flow_id; /* RFS infrastructure id */ + int id; /* mlx4_en driver id */ + u64 reg_id; /* Flow steering API id */ + u8 activated; /* Used to prevent expiry before filter + * is attached + */ + struct hlist_node filter_chain; +}; + +static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); + +static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) +{ + switch (ip_proto) { + case IPPROTO_UDP: + return MLX4_NET_TRANS_RULE_ID_UDP; + case IPPROTO_TCP: + return MLX4_NET_TRANS_RULE_ID_TCP; + default: + return MLX4_NET_TRANS_RULE_NUM; + } +}; + +static void mlx4_en_filter_work(struct work_struct *work) +{ + struct mlx4_en_filter *filter = container_of(work, + struct mlx4_en_filter, + work); + struct mlx4_en_priv *priv = filter->priv; + struct mlx4_spec_list spec_tcp_udp = { + .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto), + { + .tcp_udp = { + .dst_port = filter->dst_port, + .dst_port_msk = (__force __be16)-1, + .src_port = filter->src_port, + .src_port_msk = (__force __be16)-1, + }, + }, + }; + struct mlx4_spec_list spec_ip = { + .id = MLX4_NET_TRANS_RULE_ID_IPV4, + { + .ipv4 = { + .dst_ip = filter->dst_ip, + .dst_ip_msk = (__force __be32)-1, + .src_ip = filter->src_ip, + .src_ip_msk = (__force __be32)-1, + }, + }, + }; + struct mlx4_spec_list spec_eth = { + .id = MLX4_NET_TRANS_RULE_ID_ETH, + }; + struct mlx4_net_trans_rule rule = { + .list = LIST_HEAD_INIT(rule.list), + .queue_mode = MLX4_NET_TRANS_Q_LIFO, + .exclusive = 1, + .allow_loopback = 1, + .promisc_mode = MLX4_FS_REGULAR, + .port = priv->port, + .priority = MLX4_DOMAIN_RFS, + }; + int rc; + __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); + + if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) { + en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", + filter->ip_proto); + goto ignore; + } + list_add_tail(&spec_eth.list, &rule.list); + list_add_tail(&spec_ip.list, &rule.list); + list_add_tail(&spec_tcp_udp.list, &rule.list); + + rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; + memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); + memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); + + filter->activated = 0; + + if (filter->reg_id) { + rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); + if (rc && rc != -ENOENT) + en_err(priv, "Error detaching flow. rc = %d\n", rc); + } + + rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); + if (rc) + en_err(priv, "Error attaching flow. err = %d\n", rc); + +ignore: + mlx4_en_filter_rfs_expire(priv); + + filter->activated = 1; +} + +static inline struct hlist_head * +filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, + __be16 src_port, __be16 dst_port) +{ + unsigned long l; + int bucket_idx; + + l = (__force unsigned long)src_port | + ((__force unsigned long)dst_port << 2); + l ^= (__force unsigned long)(src_ip ^ dst_ip); + + bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT); + + return &priv->filter_hash[bucket_idx]; +} + +static struct mlx4_en_filter * +mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, + __be32 dst_ip, u8 ip_proto, __be16 src_port, + __be16 dst_port, u32 flow_id) +{ + struct mlx4_en_filter *filter = NULL; + + filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC); + if (!filter) + return NULL; + + filter->priv = priv; + filter->rxq_index = rxq_index; + INIT_WORK(&filter->work, mlx4_en_filter_work); + + filter->src_ip = src_ip; + filter->dst_ip = dst_ip; + filter->ip_proto = ip_proto; + filter->src_port = src_port; + filter->dst_port = dst_port; + + filter->flow_id = flow_id; + + filter->id = priv->last_filter_id++ % RPS_NO_FILTER; + + list_add_tail(&filter->next, &priv->filters); + hlist_add_head(&filter->filter_chain, + filter_hash_bucket(priv, src_ip, dst_ip, src_port, + dst_port)); + + return filter; +} + +static void mlx4_en_filter_free(struct mlx4_en_filter *filter) +{ + struct mlx4_en_priv *priv = filter->priv; + int rc; + + list_del(&filter->next); + + rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); + if (rc && rc != -ENOENT) + en_err(priv, "Error detaching flow. rc = %d\n", rc); + + kfree(filter); +} + +static inline struct mlx4_en_filter * +mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, + u8 ip_proto, __be16 src_port, __be16 dst_port) +{ + struct mlx4_en_filter *filter; + struct mlx4_en_filter *ret = NULL; + + hlist_for_each_entry(filter, + filter_hash_bucket(priv, src_ip, dst_ip, + src_port, dst_port), + filter_chain) { + if (filter->src_ip == src_ip && + filter->dst_ip == dst_ip && + filter->ip_proto == ip_proto && + filter->src_port == src_port && + filter->dst_port == dst_port) { + ret = filter; + break; + } + } + + return ret; +} + +static int +mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id) +{ + struct mlx4_en_priv *priv = netdev_priv(net_dev); + struct mlx4_en_filter *filter; + const struct iphdr *ip; + const __be16 *ports; + u8 ip_proto; + __be32 src_ip; + __be32 dst_ip; + __be16 src_port; + __be16 dst_port; + int nhoff = skb_network_offset(skb); + int ret = 0; + + if (skb->protocol != htons(ETH_P_IP)) + return -EPROTONOSUPPORT; + + ip = (const struct iphdr *)(skb->data + nhoff); + if (ip_is_fragment(ip)) + return -EPROTONOSUPPORT; + + if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP)) + return -EPROTONOSUPPORT; + ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); + + ip_proto = ip->protocol; + src_ip = ip->saddr; + dst_ip = ip->daddr; + src_port = ports[0]; + dst_port = ports[1]; + + spin_lock_bh(&priv->filters_lock); + filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto, + src_port, dst_port); + if (filter) { + if (filter->rxq_index == rxq_index) + goto out; + + filter->rxq_index = rxq_index; + } else { + filter = mlx4_en_filter_alloc(priv, rxq_index, + src_ip, dst_ip, ip_proto, + src_port, dst_port, flow_id); + if (!filter) { + ret = -ENOMEM; + goto err; + } + } + + queue_work(priv->mdev->workqueue, &filter->work); + +out: + ret = filter->id; +err: + spin_unlock_bh(&priv->filters_lock); + + return ret; +} + +void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv) +{ + struct mlx4_en_filter *filter, *tmp; + LIST_HEAD(del_list); + + spin_lock_bh(&priv->filters_lock); + list_for_each_entry_safe(filter, tmp, &priv->filters, next) { + list_move(&filter->next, &del_list); + hlist_del(&filter->filter_chain); + } + spin_unlock_bh(&priv->filters_lock); + + list_for_each_entry_safe(filter, tmp, &del_list, next) { + cancel_work_sync(&filter->work); + mlx4_en_filter_free(filter); + } +} + +static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) +{ + struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL; + LIST_HEAD(del_list); + int i = 0; + + spin_lock_bh(&priv->filters_lock); + list_for_each_entry_safe(filter, tmp, &priv->filters, next) { + if (i > MLX4_EN_FILTER_EXPIRY_QUOTA) + break; + + if (filter->activated && + !work_pending(&filter->work) && + rps_may_expire_flow(priv->dev, + filter->rxq_index, filter->flow_id, + filter->id)) { + list_move(&filter->next, &del_list); + hlist_del(&filter->filter_chain); + } else + last_filter = filter; + + i++; + } + + if (last_filter && (&last_filter->next != priv->filters.next)) + list_move(&priv->filters, &last_filter->next); + + spin_unlock_bh(&priv->filters_lock); + + list_for_each_entry_safe(filter, tmp, &del_list, next) + mlx4_en_filter_free(filter); +} +#endif + +static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int err; + int idx; + + en_dbg(HW, priv, "adding VLAN:%d\n", vid); + + set_bit(vid, priv->active_vlans); + + /* Add VID to port VLAN filter */ + mutex_lock(&mdev->state_lock); + if (mdev->device_up && priv->port_up) { + err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); + if (err) + en_err(priv, "Failed configuring VLAN filter\n"); + } + if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) + en_dbg(HW, priv, "failed adding vlan %d\n", vid); + mutex_unlock(&mdev->state_lock); + + return 0; +} + +static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int err; + + en_dbg(HW, priv, "Killing VID:%d\n", vid); + + clear_bit(vid, priv->active_vlans); + + /* Remove VID from port VLAN filter */ + mutex_lock(&mdev->state_lock); + mlx4_unregister_vlan(mdev->dev, priv->port, vid); + + if (mdev->device_up && priv->port_up) { + err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); + if (err) + en_err(priv, "Failed configuring VLAN filter\n"); + } + mutex_unlock(&mdev->state_lock); + + return 0; +} + +static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) +{ + int i; + for (i = ETH_ALEN - 1; i >= 0; --i) { + dst_mac[i] = src_mac & 0xff; + src_mac >>= 8; + } + memset(&dst_mac[ETH_ALEN], 0, 2); +} + + +static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr, + int qpn, u64 *reg_id) +{ + int err; + + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || + priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) + return 0; /* do nothing */ + + err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, + MLX4_DOMAIN_NIC, reg_id); + if (err) { + en_err(priv, "failed to add vxlan steering rule, err %d\n", err); + return err; + } + en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id); + return 0; +} + + +static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, + unsigned char *mac, int *qpn, u64 *reg_id) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_dev *dev = mdev->dev; + int err; + + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_B0: { + struct mlx4_qp qp; + u8 gid[16] = {0}; + + qp.qpn = *qpn; + memcpy(&gid[10], mac, ETH_ALEN); + gid[5] = priv->port; + + err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); + break; + } + case MLX4_STEERING_MODE_DEVICE_MANAGED: { + struct mlx4_spec_list spec_eth = { {NULL} }; + __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); + + struct mlx4_net_trans_rule rule = { + .queue_mode = MLX4_NET_TRANS_Q_FIFO, + .exclusive = 0, + .allow_loopback = 1, + .promisc_mode = MLX4_FS_REGULAR, + .priority = MLX4_DOMAIN_NIC, + }; + + rule.port = priv->port; + rule.qpn = *qpn; + INIT_LIST_HEAD(&rule.list); + + spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; + memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN); + memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); + list_add_tail(&spec_eth.list, &rule.list); + + err = mlx4_flow_attach(dev, &rule, reg_id); + break; + } + default: + return -EINVAL; + } + if (err) + en_warn(priv, "Failed Attaching Unicast\n"); + + return err; +} + +static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, + unsigned char *mac, int qpn, u64 reg_id) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_dev *dev = mdev->dev; + + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_B0: { + struct mlx4_qp qp; + u8 gid[16] = {0}; + + qp.qpn = qpn; + memcpy(&gid[10], mac, ETH_ALEN); + gid[5] = priv->port; + + mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); + break; + } + case MLX4_STEERING_MODE_DEVICE_MANAGED: { + mlx4_flow_detach(dev, reg_id); + break; + } + default: + en_err(priv, "Invalid steering mode.\n"); + } +} + +static int mlx4_en_get_qp(struct mlx4_en_priv *priv) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_dev *dev = mdev->dev; + struct mlx4_mac_entry *entry; + int index = 0; + int err = 0; + u64 reg_id = 0; + int *qpn = &priv->base_qpn; + u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); + + en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", + priv->dev->dev_addr); + index = mlx4_register_mac(dev, priv->port, mac); + if (index < 0) { + err = index; + en_err(priv, "Failed adding MAC: %pM\n", + priv->dev->dev_addr); + return err; + } + + if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { + int base_qpn = mlx4_get_base_qpn(dev, priv->port); + *qpn = base_qpn + index; + return 0; + } + + err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP); + en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); + if (err) { + en_err(priv, "Failed to reserve qp for mac registration\n"); + goto qp_err; + } + + err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id); + if (err) + goto steer_err; + + err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn, + &priv->tunnel_reg_id); + if (err) + goto tunnel_err; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + err = -ENOMEM; + goto alloc_err; + } + memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac)); + memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac)); + entry->reg_id = reg_id; + + hlist_add_head_rcu(&entry->hlist, + &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]); + + return 0; + +alloc_err: + if (priv->tunnel_reg_id) + mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); +tunnel_err: + mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id); + +steer_err: + mlx4_qp_release_range(dev, *qpn, 1); + +qp_err: + mlx4_unregister_mac(dev, priv->port, mac); + return err; +} + +static void mlx4_en_put_qp(struct mlx4_en_priv *priv) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_dev *dev = mdev->dev; + int qpn = priv->base_qpn; + u64 mac; + + if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { + mac = mlx4_mac_to_u64(priv->dev->dev_addr); + en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", + priv->dev->dev_addr); + mlx4_unregister_mac(dev, priv->port, mac); + } else { + struct mlx4_mac_entry *entry; + struct hlist_node *tmp; + struct hlist_head *bucket; + unsigned int i; + + for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { + bucket = &priv->mac_hash[i]; + hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { + mac = mlx4_mac_to_u64(entry->mac); + en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", + entry->mac); + mlx4_en_uc_steer_release(priv, entry->mac, + qpn, entry->reg_id); + + mlx4_unregister_mac(dev, priv->port, mac); + hlist_del_rcu(&entry->hlist); + kfree_rcu(entry, rcu); + } + } + + if (priv->tunnel_reg_id) { + mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); + priv->tunnel_reg_id = 0; + } + + en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", + priv->port, qpn); + mlx4_qp_release_range(dev, qpn, 1); + priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; + } +} + +static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, + unsigned char *new_mac, unsigned char *prev_mac) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_dev *dev = mdev->dev; + int err = 0; + u64 new_mac_u64 = mlx4_mac_to_u64(new_mac); + + if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { + struct hlist_head *bucket; + unsigned int mac_hash; + struct mlx4_mac_entry *entry; + struct hlist_node *tmp; + u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac); + + bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; + hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { + if (ether_addr_equal_64bits(entry->mac, prev_mac)) { + mlx4_en_uc_steer_release(priv, entry->mac, + qpn, entry->reg_id); + mlx4_unregister_mac(dev, priv->port, + prev_mac_u64); + hlist_del_rcu(&entry->hlist); + synchronize_rcu(); + memcpy(entry->mac, new_mac, ETH_ALEN); + entry->reg_id = 0; + mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX]; + hlist_add_head_rcu(&entry->hlist, + &priv->mac_hash[mac_hash]); + mlx4_register_mac(dev, priv->port, new_mac_u64); + err = mlx4_en_uc_steer_add(priv, new_mac, + &qpn, + &entry->reg_id); + if (err) + return err; + if (priv->tunnel_reg_id) { + mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); + priv->tunnel_reg_id = 0; + } + err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn, + &priv->tunnel_reg_id); + return err; + } + } + return -EINVAL; + } + + return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64); +} + +static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv, + unsigned char new_mac[ETH_ALEN + 2]) +{ + int err = 0; + + if (priv->port_up) { + /* Remove old MAC and insert the new one */ + err = mlx4_en_replace_mac(priv, priv->base_qpn, + new_mac, priv->current_mac); + if (err) + en_err(priv, "Failed changing HW MAC address\n"); + } else + en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); + + if (!err) + memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac)); + + return err; +} + +static int mlx4_en_set_mac(struct net_device *dev, void *addr) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct sockaddr *saddr = addr; + unsigned char new_mac[ETH_ALEN + 2]; + int err; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + mutex_lock(&mdev->state_lock); + memcpy(new_mac, saddr->sa_data, ETH_ALEN); + err = mlx4_en_do_set_mac(priv, new_mac); + if (!err) + memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); + mutex_unlock(&mdev->state_lock); + + return err; +} + +static void mlx4_en_clear_list(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_mc_list *tmp, *mc_to_del; + + list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { + list_del(&mc_to_del->list); + kfree(mc_to_del); + } +} + +static void mlx4_en_cache_mclist(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct netdev_hw_addr *ha; + struct mlx4_en_mc_list *tmp; + + mlx4_en_clear_list(dev); + netdev_for_each_mc_addr(ha, dev) { + tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC); + if (!tmp) { + mlx4_en_clear_list(dev); + return; + } + memcpy(tmp->addr, ha->addr, ETH_ALEN); + list_add_tail(&tmp->list, &priv->mc_list); + } +} + +static void update_mclist_flags(struct mlx4_en_priv *priv, + struct list_head *dst, + struct list_head *src) +{ + struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc; + bool found; + + /* Find all the entries that should be removed from dst, + * These are the entries that are not found in src + */ + list_for_each_entry(dst_tmp, dst, list) { + found = false; + list_for_each_entry(src_tmp, src, list) { + if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) { + found = true; + break; + } + } + if (!found) + dst_tmp->action = MCLIST_REM; + } + + /* Add entries that exist in src but not in dst + * mark them as need to add + */ + list_for_each_entry(src_tmp, src, list) { + found = false; + list_for_each_entry(dst_tmp, dst, list) { + if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) { + dst_tmp->action = MCLIST_NONE; + found = true; + break; + } + } + if (!found) { + new_mc = kmemdup(src_tmp, + sizeof(struct mlx4_en_mc_list), + GFP_KERNEL); + if (!new_mc) + return; + + new_mc->action = MCLIST_ADD; + list_add_tail(&new_mc->list, dst); + } + } +} + +static void mlx4_en_set_rx_mode(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + if (!priv->port_up) + return; + + queue_work(priv->mdev->workqueue, &priv->rx_mode_task); +} + +static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, + struct mlx4_en_dev *mdev) +{ + int err = 0; + + if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { + if (netif_msg_rx_status(priv)) + en_warn(priv, "Entering promiscuous mode\n"); + priv->flags |= MLX4_EN_FLAG_PROMISC; + + /* Enable promiscouos mode */ + switch (mdev->dev->caps.steering_mode) { + case MLX4_STEERING_MODE_DEVICE_MANAGED: + err = mlx4_flow_steer_promisc_add(mdev->dev, + priv->port, + priv->base_qpn, + MLX4_FS_ALL_DEFAULT); + if (err) + en_err(priv, "Failed enabling promiscuous mode\n"); + priv->flags |= MLX4_EN_FLAG_MC_PROMISC; + break; + + case MLX4_STEERING_MODE_B0: + err = mlx4_unicast_promisc_add(mdev->dev, + priv->base_qpn, + priv->port); + if (err) + en_err(priv, "Failed enabling unicast promiscuous mode\n"); + + /* Add the default qp number as multicast + * promisc + */ + if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { + err = mlx4_multicast_promisc_add(mdev->dev, + priv->base_qpn, + priv->port); + if (err) + en_err(priv, "Failed enabling multicast promiscuous mode\n"); + priv->flags |= MLX4_EN_FLAG_MC_PROMISC; + } + break; + + case MLX4_STEERING_MODE_A0: + err = mlx4_SET_PORT_qpn_calc(mdev->dev, + priv->port, + priv->base_qpn, + 1); + if (err) + en_err(priv, "Failed enabling promiscuous mode\n"); + break; + } + + /* Disable port multicast filter (unconditionally) */ + err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, + 0, MLX4_MCAST_DISABLE); + if (err) + en_err(priv, "Failed disabling multicast filter\n"); + } +} + +static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, + struct mlx4_en_dev *mdev) +{ + int err = 0; + + if (netif_msg_rx_status(priv)) + en_warn(priv, "Leaving promiscuous mode\n"); + priv->flags &= ~MLX4_EN_FLAG_PROMISC; + + /* Disable promiscouos mode */ + switch (mdev->dev->caps.steering_mode) { + case MLX4_STEERING_MODE_DEVICE_MANAGED: + err = mlx4_flow_steer_promisc_remove(mdev->dev, + priv->port, + MLX4_FS_ALL_DEFAULT); + if (err) + en_err(priv, "Failed disabling promiscuous mode\n"); + priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; + break; + + case MLX4_STEERING_MODE_B0: + err = mlx4_unicast_promisc_remove(mdev->dev, + priv->base_qpn, + priv->port); + if (err) + en_err(priv, "Failed disabling unicast promiscuous mode\n"); + /* Disable Multicast promisc */ + if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { + err = mlx4_multicast_promisc_remove(mdev->dev, + priv->base_qpn, + priv->port); + if (err) + en_err(priv, "Failed disabling multicast promiscuous mode\n"); + priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; + } + break; + + case MLX4_STEERING_MODE_A0: + err = mlx4_SET_PORT_qpn_calc(mdev->dev, + priv->port, + priv->base_qpn, 0); + if (err) + en_err(priv, "Failed disabling promiscuous mode\n"); + break; + } +} + +static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, + struct net_device *dev, + struct mlx4_en_dev *mdev) +{ + struct mlx4_en_mc_list *mclist, *tmp; + u64 mcast_addr = 0; + u8 mc_list[16] = {0}; + int err = 0; + + /* Enable/disable the multicast filter according to IFF_ALLMULTI */ + if (dev->flags & IFF_ALLMULTI) { + err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, + 0, MLX4_MCAST_DISABLE); + if (err) + en_err(priv, "Failed disabling multicast filter\n"); + + /* Add the default qp number as multicast promisc */ + if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { + switch (mdev->dev->caps.steering_mode) { + case MLX4_STEERING_MODE_DEVICE_MANAGED: + err = mlx4_flow_steer_promisc_add(mdev->dev, + priv->port, + priv->base_qpn, + MLX4_FS_MC_DEFAULT); + break; + + case MLX4_STEERING_MODE_B0: + err = mlx4_multicast_promisc_add(mdev->dev, + priv->base_qpn, + priv->port); + break; + + case MLX4_STEERING_MODE_A0: + break; + } + if (err) + en_err(priv, "Failed entering multicast promisc mode\n"); + priv->flags |= MLX4_EN_FLAG_MC_PROMISC; + } + } else { + /* Disable Multicast promisc */ + if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { + switch (mdev->dev->caps.steering_mode) { + case MLX4_STEERING_MODE_DEVICE_MANAGED: + err = mlx4_flow_steer_promisc_remove(mdev->dev, + priv->port, + MLX4_FS_MC_DEFAULT); + break; + + case MLX4_STEERING_MODE_B0: + err = mlx4_multicast_promisc_remove(mdev->dev, + priv->base_qpn, + priv->port); + break; + + case MLX4_STEERING_MODE_A0: + break; + } + if (err) + en_err(priv, "Failed disabling multicast promiscuous mode\n"); + priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; + } + + err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, + 0, MLX4_MCAST_DISABLE); + if (err) + en_err(priv, "Failed disabling multicast filter\n"); + + /* Flush mcast filter and init it with broadcast address */ + mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, + 1, MLX4_MCAST_CONFIG); + + /* Update multicast list - we cache all addresses so they won't + * change while HW is updated holding the command semaphor */ + netif_addr_lock_bh(dev); + mlx4_en_cache_mclist(dev); + netif_addr_unlock_bh(dev); + list_for_each_entry(mclist, &priv->mc_list, list) { + mcast_addr = mlx4_mac_to_u64(mclist->addr); + mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, + mcast_addr, 0, MLX4_MCAST_CONFIG); + } + err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, + 0, MLX4_MCAST_ENABLE); + if (err) + en_err(priv, "Failed enabling multicast filter\n"); + + update_mclist_flags(priv, &priv->curr_list, &priv->mc_list); + list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { + if (mclist->action == MCLIST_REM) { + /* detach this address and delete from list */ + memcpy(&mc_list[10], mclist->addr, ETH_ALEN); + mc_list[5] = priv->port; + err = mlx4_multicast_detach(mdev->dev, + &priv->rss_map.indir_qp, + mc_list, + MLX4_PROT_ETH, + mclist->reg_id); + if (err) + en_err(priv, "Fail to detach multicast address\n"); + + if (mclist->tunnel_reg_id) { + err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id); + if (err) + en_err(priv, "Failed to detach multicast address\n"); + } + + /* remove from list */ + list_del(&mclist->list); + kfree(mclist); + } else if (mclist->action == MCLIST_ADD) { + /* attach the address */ + memcpy(&mc_list[10], mclist->addr, ETH_ALEN); + /* needed for B0 steering support */ + mc_list[5] = priv->port; + err = mlx4_multicast_attach(mdev->dev, + &priv->rss_map.indir_qp, + mc_list, + priv->port, 0, + MLX4_PROT_ETH, + &mclist->reg_id); + if (err) + en_err(priv, "Fail to attach multicast address\n"); + + err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn, + &mclist->tunnel_reg_id); + if (err) + en_err(priv, "Failed to attach multicast address\n"); + } + } + } +} + +static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv, + struct net_device *dev, + struct mlx4_en_dev *mdev) +{ + struct netdev_hw_addr *ha; + struct mlx4_mac_entry *entry; + struct hlist_node *tmp; + bool found; + u64 mac; + int err = 0; + struct hlist_head *bucket; + unsigned int i; + int removed = 0; + u32 prev_flags; + + /* Note that we do not need to protect our mac_hash traversal with rcu, + * since all modification code is protected by mdev->state_lock + */ + + /* find what to remove */ + for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { + bucket = &priv->mac_hash[i]; + hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { + found = false; + netdev_for_each_uc_addr(ha, dev) { + if (ether_addr_equal_64bits(entry->mac, + ha->addr)) { + found = true; + break; + } + } + + /* MAC address of the port is not in uc list */ + if (ether_addr_equal_64bits(entry->mac, + priv->current_mac)) + found = true; + + if (!found) { + mac = mlx4_mac_to_u64(entry->mac); + mlx4_en_uc_steer_release(priv, entry->mac, + priv->base_qpn, + entry->reg_id); + mlx4_unregister_mac(mdev->dev, priv->port, mac); + + hlist_del_rcu(&entry->hlist); + kfree_rcu(entry, rcu); + en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n", + entry->mac, priv->port); + ++removed; + } + } + } + + /* if we didn't remove anything, there is no use in trying to add + * again once we are in a forced promisc mode state + */ + if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed) + return; + + prev_flags = priv->flags; + priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; + + /* find what to add */ + netdev_for_each_uc_addr(ha, dev) { + found = false; + bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; + hlist_for_each_entry(entry, bucket, hlist) { + if (ether_addr_equal_64bits(entry->mac, ha->addr)) { + found = true; + break; + } + } + + if (!found) { + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n", + ha->addr, priv->port); + priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; + break; + } + mac = mlx4_mac_to_u64(ha->addr); + memcpy(entry->mac, ha->addr, ETH_ALEN); + err = mlx4_register_mac(mdev->dev, priv->port, mac); + if (err < 0) { + en_err(priv, "Failed registering MAC %pM on port %d: %d\n", + ha->addr, priv->port, err); + kfree(entry); + priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; + break; + } + err = mlx4_en_uc_steer_add(priv, ha->addr, + &priv->base_qpn, + &entry->reg_id); + if (err) { + en_err(priv, "Failed adding MAC %pM on port %d: %d\n", + ha->addr, priv->port, err); + mlx4_unregister_mac(mdev->dev, priv->port, mac); + kfree(entry); + priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; + break; + } else { + unsigned int mac_hash; + en_dbg(DRV, priv, "Added MAC %pM on port:%d\n", + ha->addr, priv->port); + mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX]; + bucket = &priv->mac_hash[mac_hash]; + hlist_add_head_rcu(&entry->hlist, bucket); + } + } + } + + if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) { + en_warn(priv, "Forcing promiscuous mode on port:%d\n", + priv->port); + } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) { + en_warn(priv, "Stop forcing promiscuous mode on port:%d\n", + priv->port); + } +} + +static void mlx4_en_do_set_rx_mode(struct work_struct *work) +{ + struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, + rx_mode_task); + struct mlx4_en_dev *mdev = priv->mdev; + struct net_device *dev = priv->dev; + + mutex_lock(&mdev->state_lock); + if (!mdev->device_up) { + en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n"); + goto out; + } + if (!priv->port_up) { + en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n"); + goto out; + } + + if (!netif_carrier_ok(dev)) { + if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { + if (priv->port_state.link_state) { + priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; + netif_carrier_on(dev); + en_dbg(LINK, priv, "Link Up\n"); + } + } + } + + if (dev->priv_flags & IFF_UNICAST_FLT) + mlx4_en_do_uc_filter(priv, dev, mdev); + + /* Promsicuous mode: disable all filters */ + if ((dev->flags & IFF_PROMISC) || + (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) { + mlx4_en_set_promisc_mode(priv, mdev); + goto out; + } + + /* Not in promiscuous mode */ + if (priv->flags & MLX4_EN_FLAG_PROMISC) + mlx4_en_clear_promisc_mode(priv, mdev); + + mlx4_en_do_multicast(priv, dev, mdev); +out: + mutex_unlock(&mdev->state_lock); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void mlx4_en_netpoll(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_cq *cq; + int i; + + for (i = 0; i < priv->rx_ring_num; i++) { + cq = priv->rx_cq[i]; + napi_schedule(&cq->napi); + } +} +#endif + +static void mlx4_en_tx_timeout(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int i; + + if (netif_msg_timer(priv)) + en_warn(priv, "Tx timeout called on port:%d\n", priv->port); + + for (i = 0; i < priv->tx_ring_num; i++) { + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) + continue; + en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n", + i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn, + priv->tx_ring[i]->cons, priv->tx_ring[i]->prod); + } + + priv->port_stats.tx_timeout++; + en_dbg(DRV, priv, "Scheduling watchdog\n"); + queue_work(mdev->workqueue, &priv->watchdog_task); +} + + +static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + spin_lock_bh(&priv->stats_lock); + memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); + spin_unlock_bh(&priv->stats_lock); + + return &priv->ret_stats; +} + +static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) +{ + struct mlx4_en_cq *cq; + int i; + + /* If we haven't received a specific coalescing setting + * (module param), we set the moderation parameters as follows: + * - moder_cnt is set to the number of mtu sized packets to + * satisfy our coalescing target. + * - moder_time is set to a fixed value. + */ + priv->rx_frames = MLX4_EN_RX_COAL_TARGET; + priv->rx_usecs = MLX4_EN_RX_COAL_TIME; + priv->tx_frames = MLX4_EN_TX_COAL_PKTS; + priv->tx_usecs = MLX4_EN_TX_COAL_TIME; + en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n", + priv->dev->mtu, priv->rx_frames, priv->rx_usecs); + + /* Setup cq moderation params */ + for (i = 0; i < priv->rx_ring_num; i++) { + cq = priv->rx_cq[i]; + cq->moder_cnt = priv->rx_frames; + cq->moder_time = priv->rx_usecs; + priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; + priv->last_moder_packets[i] = 0; + priv->last_moder_bytes[i] = 0; + } + + for (i = 0; i < priv->tx_ring_num; i++) { + cq = priv->tx_cq[i]; + cq->moder_cnt = priv->tx_frames; + cq->moder_time = priv->tx_usecs; + } + + /* Reset auto-moderation params */ + priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; + priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; + priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; + priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; + priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; + priv->adaptive_rx_coal = 1; + priv->last_moder_jiffies = 0; + priv->last_moder_tx_packets = 0; +} + +static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) +{ + unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); + struct mlx4_en_cq *cq; + unsigned long packets; + unsigned long rate; + unsigned long avg_pkt_size; + unsigned long rx_packets; + unsigned long rx_bytes; + unsigned long rx_pkt_diff; + int moder_time; + int ring, err; + + if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) + return; + + for (ring = 0; ring < priv->rx_ring_num; ring++) { + spin_lock_bh(&priv->stats_lock); + rx_packets = priv->rx_ring[ring]->packets; + rx_bytes = priv->rx_ring[ring]->bytes; + spin_unlock_bh(&priv->stats_lock); + + rx_pkt_diff = ((unsigned long) (rx_packets - + priv->last_moder_packets[ring])); + packets = rx_pkt_diff; + rate = packets * HZ / period; + avg_pkt_size = packets ? ((unsigned long) (rx_bytes - + priv->last_moder_bytes[ring])) / packets : 0; + + /* Apply auto-moderation only when packet rate + * exceeds a rate that it matters */ + if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && + avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { + if (rate < priv->pkt_rate_low) + moder_time = priv->rx_usecs_low; + else if (rate > priv->pkt_rate_high) + moder_time = priv->rx_usecs_high; + else + moder_time = (rate - priv->pkt_rate_low) * + (priv->rx_usecs_high - priv->rx_usecs_low) / + (priv->pkt_rate_high - priv->pkt_rate_low) + + priv->rx_usecs_low; + } else { + moder_time = priv->rx_usecs_low; + } + + if (moder_time != priv->last_moder_time[ring]) { + priv->last_moder_time[ring] = moder_time; + cq = priv->rx_cq[ring]; + cq->moder_time = moder_time; + cq->moder_cnt = priv->rx_frames; + err = mlx4_en_set_cq_moder(priv, cq); + if (err) + en_err(priv, "Failed modifying moderation for cq:%d\n", + ring); + } + priv->last_moder_packets[ring] = rx_packets; + priv->last_moder_bytes[ring] = rx_bytes; + } + + priv->last_moder_jiffies = jiffies; +} + +static void mlx4_en_do_get_stats(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, + stats_task); + struct mlx4_en_dev *mdev = priv->mdev; + int err; + + mutex_lock(&mdev->state_lock); + if (mdev->device_up) { + if (priv->port_up) { + err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); + if (err) + en_dbg(HW, priv, "Could not update stats\n"); + + mlx4_en_auto_moderation(priv); + } + + queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); + } + if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { + mlx4_en_do_set_mac(priv, priv->current_mac); + mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; + } + mutex_unlock(&mdev->state_lock); +} + +/* mlx4_en_service_task - Run service task for tasks that needed to be done + * periodically + */ +static void mlx4_en_service_task(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, + service_task); + struct mlx4_en_dev *mdev = priv->mdev; + + mutex_lock(&mdev->state_lock); + if (mdev->device_up) { + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) + mlx4_en_ptp_overflow_check(mdev); + + mlx4_en_recover_from_oom(priv); + queue_delayed_work(mdev->workqueue, &priv->service_task, + SERVICE_TASK_DELAY); + } + mutex_unlock(&mdev->state_lock); +} + +static void mlx4_en_linkstate(struct work_struct *work) +{ + struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, + linkstate_task); + struct mlx4_en_dev *mdev = priv->mdev; + int linkstate = priv->link_state; + + mutex_lock(&mdev->state_lock); + /* If observable port state changed set carrier state and + * report to system log */ + if (priv->last_link_state != linkstate) { + if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { + en_info(priv, "Link Down\n"); + netif_carrier_off(priv->dev); + } else { + en_info(priv, "Link Up\n"); + netif_carrier_on(priv->dev); + } + } + priv->last_link_state = linkstate; + mutex_unlock(&mdev->state_lock); +} + +static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) +{ + struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx]; + int numa_node = priv->mdev->dev->numa_node; + + if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL)) + return -ENOMEM; + + cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node), + ring->affinity_mask); + return 0; +} + +static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) +{ + free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask); +} + +int mlx4_en_start_port(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_cq *cq; + struct mlx4_en_tx_ring *tx_ring; + int rx_index = 0; + int tx_index = 0; + int err = 0; + int i; + int j; + u8 mc_list[16] = {0}; + + if (priv->port_up) { + en_dbg(DRV, priv, "start port called while port already up\n"); + return 0; + } + + INIT_LIST_HEAD(&priv->mc_list); + INIT_LIST_HEAD(&priv->curr_list); + INIT_LIST_HEAD(&priv->ethtool_list); + memset(&priv->ethtool_rules[0], 0, + sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES); + + /* Calculate Rx buf size */ + dev->mtu = min(dev->mtu, priv->max_mtu); + mlx4_en_calc_rx_buf(dev); + en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); + + /* Configure rx cq's and rings */ + err = mlx4_en_activate_rx_rings(priv); + if (err) { + en_err(priv, "Failed to activate RX rings\n"); + return err; + } + for (i = 0; i < priv->rx_ring_num; i++) { + cq = priv->rx_cq[i]; + + mlx4_en_cq_init_lock(cq); + + err = mlx4_en_init_affinity_hint(priv, i); + if (err) { + en_err(priv, "Failed preparing IRQ affinity hint\n"); + goto cq_err; + } + + err = mlx4_en_activate_cq(priv, cq, i); + if (err) { + en_err(priv, "Failed activating Rx CQ\n"); + mlx4_en_free_affinity_hint(priv, i); + goto cq_err; + } + + for (j = 0; j < cq->size; j++) { + struct mlx4_cqe *cqe = NULL; + + cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) + + priv->cqe_factor; + cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK; + } + + err = mlx4_en_set_cq_moder(priv, cq); + if (err) { + en_err(priv, "Failed setting cq moderation parameters\n"); + mlx4_en_deactivate_cq(priv, cq); + mlx4_en_free_affinity_hint(priv, i); + goto cq_err; + } + mlx4_en_arm_cq(priv, cq); + priv->rx_ring[i]->cqn = cq->mcq.cqn; + ++rx_index; + } + + /* Set qp number */ + en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); + err = mlx4_en_get_qp(priv); + if (err) { + en_err(priv, "Failed getting eth qp\n"); + goto cq_err; + } + mdev->mac_removed[priv->port] = 0; + + err = mlx4_en_config_rss_steer(priv); + if (err) { + en_err(priv, "Failed configuring rss steering\n"); + goto mac_err; + } + + err = mlx4_en_create_drop_qp(priv); + if (err) + goto rss_err; + + /* Configure tx cq's and rings */ + for (i = 0; i < priv->tx_ring_num; i++) { + /* Configure cq */ + cq = priv->tx_cq[i]; + err = mlx4_en_activate_cq(priv, cq, i); + if (err) { + en_err(priv, "Failed allocating Tx CQ\n"); + goto tx_err; + } + err = mlx4_en_set_cq_moder(priv, cq); + if (err) { + en_err(priv, "Failed setting cq moderation parameters\n"); + mlx4_en_deactivate_cq(priv, cq); + goto tx_err; + } + en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); + cq->buf->wqe_index = cpu_to_be16(0xffff); + + /* Configure ring */ + tx_ring = priv->tx_ring[i]; + err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, + i / priv->num_tx_rings_p_up); + if (err) { + en_err(priv, "Failed allocating Tx ring\n"); + mlx4_en_deactivate_cq(priv, cq); + goto tx_err; + } + tx_ring->tx_queue = netdev_get_tx_queue(dev, i); + + /* Arm CQ for TX completions */ + mlx4_en_arm_cq(priv, cq); + + /* Set initial ownership of all Tx TXBBs to SW (1) */ + for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) + *((u32 *) (tx_ring->buf + j)) = 0xffffffff; + ++tx_index; + } + + /* Configure port */ + err = mlx4_SET_PORT_general(mdev->dev, priv->port, + priv->rx_skb_size + ETH_FCS_LEN, + priv->prof->tx_pause, + priv->prof->tx_ppp, + priv->prof->rx_pause, + priv->prof->rx_ppp); + if (err) { + en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", + priv->port, err); + goto tx_err; + } + /* Set default qp number */ + err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); + if (err) { + en_err(priv, "Failed setting default qp numbers\n"); + goto tx_err; + } + + if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { + err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); + if (err) { + en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", + err); + goto tx_err; + } + } + + /* Init port */ + en_dbg(HW, priv, "Initializing port\n"); + err = mlx4_INIT_PORT(mdev->dev, priv->port); + if (err) { + en_err(priv, "Failed Initializing port\n"); + goto tx_err; + } + + /* Attach rx QP to bradcast address */ + eth_broadcast_addr(&mc_list[10]); + mc_list[5] = priv->port; /* needed for B0 steering support */ + if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, + priv->port, 0, MLX4_PROT_ETH, + &priv->broadcast_id)) + mlx4_warn(mdev, "Failed Attaching Broadcast\n"); + + /* Must redo promiscuous mode setup. */ + priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); + + /* Schedule multicast task to populate multicast list */ + queue_work(mdev->workqueue, &priv->rx_mode_task); + +#ifdef CONFIG_MLX4_EN_VXLAN + if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + vxlan_get_rx_port(dev); +#endif + priv->port_up = true; + netif_tx_start_all_queues(dev); + netif_device_attach(dev); + + return 0; + +tx_err: + while (tx_index--) { + mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]); + mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]); + } + mlx4_en_destroy_drop_qp(priv); +rss_err: + mlx4_en_release_rss_steer(priv); +mac_err: + mlx4_en_put_qp(priv); +cq_err: + while (rx_index--) { + mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); + mlx4_en_free_affinity_hint(priv, rx_index); + } + for (i = 0; i < priv->rx_ring_num; i++) + mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); + + return err; /* need to close devices */ +} + + +void mlx4_en_stop_port(struct net_device *dev, int detach) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_mc_list *mclist, *tmp; + struct ethtool_flow_id *flow, *tmp_flow; + int i; + u8 mc_list[16] = {0}; + + if (!priv->port_up) { + en_dbg(DRV, priv, "stop port called while port already down\n"); + return; + } + + /* close port*/ + mlx4_CLOSE_PORT(mdev->dev, priv->port); + + /* Synchronize with tx routine */ + netif_tx_lock_bh(dev); + if (detach) + netif_device_detach(dev); + netif_tx_stop_all_queues(dev); + netif_tx_unlock_bh(dev); + + netif_tx_disable(dev); + + /* Set port as not active */ + priv->port_up = false; + + /* Promsicuous mode */ + if (mdev->dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + priv->flags &= ~(MLX4_EN_FLAG_PROMISC | + MLX4_EN_FLAG_MC_PROMISC); + mlx4_flow_steer_promisc_remove(mdev->dev, + priv->port, + MLX4_FS_ALL_DEFAULT); + mlx4_flow_steer_promisc_remove(mdev->dev, + priv->port, + MLX4_FS_MC_DEFAULT); + } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { + priv->flags &= ~MLX4_EN_FLAG_PROMISC; + + /* Disable promiscouos mode */ + mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, + priv->port); + + /* Disable Multicast promisc */ + if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { + mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, + priv->port); + priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; + } + } + + /* Detach All multicasts */ + eth_broadcast_addr(&mc_list[10]); + mc_list[5] = priv->port; /* needed for B0 steering support */ + mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, + MLX4_PROT_ETH, priv->broadcast_id); + list_for_each_entry(mclist, &priv->curr_list, list) { + memcpy(&mc_list[10], mclist->addr, ETH_ALEN); + mc_list[5] = priv->port; + mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, + mc_list, MLX4_PROT_ETH, mclist->reg_id); + if (mclist->tunnel_reg_id) + mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id); + } + mlx4_en_clear_list(dev); + list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { + list_del(&mclist->list); + kfree(mclist); + } + + /* Flush multicast filter */ + mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); + + /* Remove flow steering rules for the port*/ + if (mdev->dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + ASSERT_RTNL(); + list_for_each_entry_safe(flow, tmp_flow, + &priv->ethtool_list, list) { + mlx4_flow_detach(mdev->dev, flow->id); + list_del(&flow->list); + } + } + + mlx4_en_destroy_drop_qp(priv); + + /* Free TX Rings */ + for (i = 0; i < priv->tx_ring_num; i++) { + mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]); + mlx4_en_deactivate_cq(priv, priv->tx_cq[i]); + } + msleep(10); + + for (i = 0; i < priv->tx_ring_num; i++) + mlx4_en_free_tx_buf(dev, priv->tx_ring[i]); + + /* Free RSS qps */ + mlx4_en_release_rss_steer(priv); + + /* Unregister Mac address for the port */ + mlx4_en_put_qp(priv); + if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN)) + mdev->mac_removed[priv->port] = 1; + + /* Free RX Rings */ + for (i = 0; i < priv->rx_ring_num; i++) { + struct mlx4_en_cq *cq = priv->rx_cq[i]; + + local_bh_disable(); + while (!mlx4_en_cq_lock_napi(cq)) { + pr_info("CQ %d locked\n", i); + mdelay(1); + } + local_bh_enable(); + + napi_synchronize(&cq->napi); + mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); + mlx4_en_deactivate_cq(priv, cq); + + mlx4_en_free_affinity_hint(priv, i); + } +} + +static void mlx4_en_restart(struct work_struct *work) +{ + struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, + watchdog_task); + struct mlx4_en_dev *mdev = priv->mdev; + struct net_device *dev = priv->dev; + + en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); + + mutex_lock(&mdev->state_lock); + if (priv->port_up) { + mlx4_en_stop_port(dev, 1); + if (mlx4_en_start_port(dev)) + en_err(priv, "Failed restarting port %d\n", priv->port); + } + mutex_unlock(&mdev->state_lock); +} + +static void mlx4_en_clear_stats(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int i; + + if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) + en_dbg(HW, priv, "Failed dumping statistics\n"); + + memset(&priv->stats, 0, sizeof(priv->stats)); + memset(&priv->pstats, 0, sizeof(priv->pstats)); + memset(&priv->pkstats, 0, sizeof(priv->pkstats)); + memset(&priv->port_stats, 0, sizeof(priv->port_stats)); + memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats)); + memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats)); + memset(&priv->rx_priority_flowstats, 0, + sizeof(priv->rx_priority_flowstats)); + memset(&priv->tx_priority_flowstats, 0, + sizeof(priv->tx_priority_flowstats)); + + for (i = 0; i < priv->tx_ring_num; i++) { + priv->tx_ring[i]->bytes = 0; + priv->tx_ring[i]->packets = 0; + priv->tx_ring[i]->tx_csum = 0; + } + for (i = 0; i < priv->rx_ring_num; i++) { + priv->rx_ring[i]->bytes = 0; + priv->rx_ring[i]->packets = 0; + priv->rx_ring[i]->csum_ok = 0; + priv->rx_ring[i]->csum_none = 0; + priv->rx_ring[i]->csum_complete = 0; + } +} + +static int mlx4_en_open(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int err = 0; + + mutex_lock(&mdev->state_lock); + + if (!mdev->device_up) { + en_err(priv, "Cannot open - device down/disabled\n"); + err = -EBUSY; + goto out; + } + + /* Reset HW statistics and SW counters */ + mlx4_en_clear_stats(dev); + + err = mlx4_en_start_port(dev); + if (err) + en_err(priv, "Failed starting port:%d\n", priv->port); + +out: + mutex_unlock(&mdev->state_lock); + return err; +} + + +static int mlx4_en_close(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + + en_dbg(IFDOWN, priv, "Close port called\n"); + + mutex_lock(&mdev->state_lock); + + mlx4_en_stop_port(dev, 0); + netif_carrier_off(dev); + + mutex_unlock(&mdev->state_lock); + return 0; +} + +void mlx4_en_free_resources(struct mlx4_en_priv *priv) +{ + int i; + +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(priv->dev->rx_cpu_rmap); + priv->dev->rx_cpu_rmap = NULL; +#endif + + for (i = 0; i < priv->tx_ring_num; i++) { + if (priv->tx_ring && priv->tx_ring[i]) + mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); + if (priv->tx_cq && priv->tx_cq[i]) + mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); + } + + for (i = 0; i < priv->rx_ring_num; i++) { + if (priv->rx_ring[i]) + mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], + priv->prof->rx_ring_size, priv->stride); + if (priv->rx_cq[i]) + mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); + } + +} + +int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) +{ + struct mlx4_en_port_profile *prof = priv->prof; + int i; + int node; + + /* Create tx Rings */ + for (i = 0; i < priv->tx_ring_num; i++) { + node = cpu_to_node(i % num_online_cpus()); + if (mlx4_en_create_cq(priv, &priv->tx_cq[i], + prof->tx_ring_size, i, TX, node)) + goto err; + + if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], + prof->tx_ring_size, TXBB_SIZE, + node, i)) + goto err; + } + + /* Create rx Rings */ + for (i = 0; i < priv->rx_ring_num; i++) { + node = cpu_to_node(i % num_online_cpus()); + if (mlx4_en_create_cq(priv, &priv->rx_cq[i], + prof->rx_ring_size, i, RX, node)) + goto err; + + if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], + prof->rx_ring_size, priv->stride, + node)) + goto err; + } + +#ifdef CONFIG_RFS_ACCEL + if (priv->mdev->dev->caps.comp_pool) { + priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool); + if (!priv->dev->rx_cpu_rmap) + goto err; + } +#endif + + return 0; + +err: + en_err(priv, "Failed to allocate NIC resources\n"); + for (i = 0; i < priv->rx_ring_num; i++) { + if (priv->rx_ring[i]) + mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], + prof->rx_ring_size, + priv->stride); + if (priv->rx_cq[i]) + mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); + } + for (i = 0; i < priv->tx_ring_num; i++) { + if (priv->tx_ring[i]) + mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); + if (priv->tx_cq[i]) + mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); + } + return -ENOMEM; +} + + +void mlx4_en_destroy_netdev(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + + en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); + + /* Unregister device - this will close the port if it was up */ + if (priv->registered) + unregister_netdev(dev); + + if (priv->allocated) + mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); + + cancel_delayed_work(&priv->stats_task); + cancel_delayed_work(&priv->service_task); + /* flush any pending task for this netdev */ + flush_workqueue(mdev->workqueue); + + /* Detach the netdev so tasks would not attempt to access it */ + mutex_lock(&mdev->state_lock); + mdev->pndev[priv->port] = NULL; + mdev->upper[priv->port] = NULL; + mutex_unlock(&mdev->state_lock); + + mlx4_en_free_resources(priv); + + kfree(priv->tx_ring); + kfree(priv->tx_cq); + + free_netdev(dev); +} + +static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int err = 0; + + en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", + dev->mtu, new_mtu); + + if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { + en_err(priv, "Bad MTU size:%d.\n", new_mtu); + return -EPERM; + } + dev->mtu = new_mtu; + + if (netif_running(dev)) { + mutex_lock(&mdev->state_lock); + if (!mdev->device_up) { + /* NIC is probably restarting - let watchdog task reset + * the port */ + en_dbg(DRV, priv, "Change MTU called with card down!?\n"); + } else { + mlx4_en_stop_port(dev, 1); + err = mlx4_en_start_port(dev); + if (err) { + en_err(priv, "Failed restarting port:%d\n", + priv->port); + queue_work(mdev->workqueue, &priv->watchdog_task); + } + } + mutex_unlock(&mdev->state_lock); + } + return 0; +} + +static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct hwtstamp_config config; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + /* device doesn't support time stamping */ + if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)) + return -EINVAL; + + /* TX HW timestamp */ + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + /* RX HW timestamp */ + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + if (mlx4_en_reset_config(dev, config, dev->features)) { + config.tx_type = HWTSTAMP_TX_OFF; + config.rx_filter = HWTSTAMP_FILTER_NONE; + } + + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; +} + +static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config, + sizeof(priv->hwtstamp_config)) ? -EFAULT : 0; +} + +static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCSHWTSTAMP: + return mlx4_en_hwtstamp_set(dev, ifr); + case SIOCGHWTSTAMP: + return mlx4_en_hwtstamp_get(dev, ifr); + default: + return -EOPNOTSUPP; + } +} + +static int mlx4_en_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + bool reset = false; + int ret = 0; + + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) { + en_info(priv, "Turn %s RX-FCS\n", + (features & NETIF_F_RXFCS) ? "ON" : "OFF"); + reset = true; + } + + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) { + u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0; + + en_info(priv, "Turn %s RX-ALL\n", + ignore_fcs_value ? "ON" : "OFF"); + ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev, + priv->port, ignore_fcs_value); + if (ret) + return ret; + } + + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) { + en_info(priv, "Turn %s RX vlan strip offload\n", + (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF"); + reset = true; + } + + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX)) + en_info(priv, "Turn %s TX vlan strip offload\n", + (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF"); + + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) { + en_info(priv, "Turn %s loopback\n", + (features & NETIF_F_LOOPBACK) ? "ON" : "OFF"); + mlx4_en_update_loopback_state(netdev, features); + } + + if (reset) { + ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config, + features); + if (ret) + return ret; + } + + return 0; +} + +static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + u64 mac_u64 = mlx4_mac_to_u64(mac); + + if (!is_valid_ether_addr(mac)) + return -EINVAL; + + return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); +} + +static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + + return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos); +} + +static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, + int max_tx_rate) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + + return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate, + max_tx_rate); +} + +static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + + return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting); +} + +static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + + return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf); +} + +static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state) +{ + struct mlx4_en_priv *en_priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = en_priv->mdev; + + return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state); +} + +#define PORT_ID_BYTE_LEN 8 +static int mlx4_en_get_phys_port_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_dev *mdev = priv->mdev->dev; + int i; + u64 phys_port_id = mdev->caps.phys_port_id[priv->port]; + + if (!phys_port_id) + return -EOPNOTSUPP; + + ppid->id_len = sizeof(phys_port_id); + for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) { + ppid->id[i] = phys_port_id & 0xff; + phys_port_id >>= 8; + } + return 0; +} + +#ifdef CONFIG_MLX4_EN_VXLAN +static void mlx4_en_add_vxlan_offloads(struct work_struct *work) +{ + int ret; + struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, + vxlan_add_task); + + ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port); + if (ret) + goto out; + + ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, + VXLAN_STEER_BY_OUTER_MAC, 1); +out: + if (ret) { + en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); + return; + } + + /* set offloads */ + priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; + priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL; +} + +static void mlx4_en_del_vxlan_offloads(struct work_struct *work) +{ + int ret; + struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, + vxlan_del_task); + /* unset offloads */ + priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL); + priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL; + priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL; + + ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, + VXLAN_STEER_BY_OUTER_MAC, 0); + if (ret) + en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); + + priv->vxlan_port = 0; +} + +static void mlx4_en_add_vxlan_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + __be16 current_port; + + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + return; + + if (sa_family == AF_INET6) + return; + + current_port = priv->vxlan_port; + if (current_port && current_port != port) { + en_warn(priv, "vxlan port %d configured, can't add port %d\n", + ntohs(current_port), ntohs(port)); + return; + } + + priv->vxlan_port = port; + queue_work(priv->mdev->workqueue, &priv->vxlan_add_task); +} + +static void mlx4_en_del_vxlan_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + __be16 current_port; + + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + return; + + if (sa_family == AF_INET6) + return; + + current_port = priv->vxlan_port; + if (current_port != port) { + en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port)); + return; + } + + queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); +} + +static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + features = vlan_features_check(skb, features); + return vxlan_features_check(skb, features); +} +#endif + +static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index]; + struct mlx4_update_qp_params params; + int err; + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT)) + return -EOPNOTSUPP; + + /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */ + if (maxrate >> 12) { + params.rate_unit = MLX4_QP_RATE_LIMIT_GBS; + params.rate_val = maxrate / 1000; + } else if (maxrate) { + params.rate_unit = MLX4_QP_RATE_LIMIT_MBS; + params.rate_val = maxrate; + } else { /* zero serves to revoke the QP rate-limitation */ + params.rate_unit = 0; + params.rate_val = 0; + } + + err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT, + ¶ms); + return err; +} + +static const struct net_device_ops mlx4_netdev_ops = { + .ndo_open = mlx4_en_open, + .ndo_stop = mlx4_en_close, + .ndo_start_xmit = mlx4_en_xmit, + .ndo_select_queue = mlx4_en_select_queue, + .ndo_get_stats = mlx4_en_get_stats, + .ndo_set_rx_mode = mlx4_en_set_rx_mode, + .ndo_set_mac_address = mlx4_en_set_mac, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = mlx4_en_change_mtu, + .ndo_do_ioctl = mlx4_en_ioctl, + .ndo_tx_timeout = mlx4_en_tx_timeout, + .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = mlx4_en_netpoll, +#endif + .ndo_set_features = mlx4_en_set_features, + .ndo_setup_tc = mlx4_en_setup_tc, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = mlx4_en_filter_rfs, +#endif +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = mlx4_en_low_latency_recv, +#endif + .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, +#ifdef CONFIG_MLX4_EN_VXLAN + .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, + .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_features_check = mlx4_en_features_check, +#endif + .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, +}; + +static const struct net_device_ops mlx4_netdev_ops_master = { + .ndo_open = mlx4_en_open, + .ndo_stop = mlx4_en_close, + .ndo_start_xmit = mlx4_en_xmit, + .ndo_select_queue = mlx4_en_select_queue, + .ndo_get_stats = mlx4_en_get_stats, + .ndo_set_rx_mode = mlx4_en_set_rx_mode, + .ndo_set_mac_address = mlx4_en_set_mac, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = mlx4_en_change_mtu, + .ndo_tx_timeout = mlx4_en_tx_timeout, + .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, + .ndo_set_vf_mac = mlx4_en_set_vf_mac, + .ndo_set_vf_vlan = mlx4_en_set_vf_vlan, + .ndo_set_vf_rate = mlx4_en_set_vf_rate, + .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk, + .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, + .ndo_get_vf_config = mlx4_en_get_vf_config, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = mlx4_en_netpoll, +#endif + .ndo_set_features = mlx4_en_set_features, + .ndo_setup_tc = mlx4_en_setup_tc, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = mlx4_en_filter_rfs, +#endif + .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, +#ifdef CONFIG_MLX4_EN_VXLAN + .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, + .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_features_check = mlx4_en_features_check, +#endif + .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, +}; + +struct mlx4_en_bond { + struct work_struct work; + struct mlx4_en_priv *priv; + int is_bonded; + struct mlx4_port_map port_map; +}; + +static void mlx4_en_bond_work(struct work_struct *work) +{ + struct mlx4_en_bond *bond = container_of(work, + struct mlx4_en_bond, + work); + int err = 0; + struct mlx4_dev *dev = bond->priv->mdev->dev; + + if (bond->is_bonded) { + if (!mlx4_is_bonded(dev)) { + err = mlx4_bond(dev); + if (err) + en_err(bond->priv, "Fail to bond device\n"); + } + if (!err) { + err = mlx4_port_map_set(dev, &bond->port_map); + if (err) + en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n", + bond->port_map.port1, + bond->port_map.port2, + err); + } + } else if (mlx4_is_bonded(dev)) { + err = mlx4_unbond(dev); + if (err) + en_err(bond->priv, "Fail to unbond device\n"); + } + dev_put(bond->priv->dev); + kfree(bond); +} + +static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded, + u8 v2p_p1, u8 v2p_p2) +{ + struct mlx4_en_bond *bond = NULL; + + bond = kzalloc(sizeof(*bond), GFP_ATOMIC); + if (!bond) + return -ENOMEM; + + INIT_WORK(&bond->work, mlx4_en_bond_work); + bond->priv = priv; + bond->is_bonded = is_bonded; + bond->port_map.port1 = v2p_p1; + bond->port_map.port2 = v2p_p2; + dev_hold(priv->dev); + queue_work(priv->mdev->workqueue, &bond->work); + return 0; +} + +int mlx4_en_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + u8 port = 0; + struct mlx4_en_dev *mdev; + struct mlx4_dev *dev; + int i, num_eth_ports = 0; + bool do_bond = true; + struct mlx4_en_priv *priv; + u8 v2p_port1 = 0; + u8 v2p_port2 = 0; + + if (!net_eq(dev_net(ndev), &init_net)) + return NOTIFY_DONE; + + mdev = container_of(this, struct mlx4_en_dev, nb); + dev = mdev->dev; + + /* Go into this mode only when two network devices set on two ports + * of the same mlx4 device are slaves of the same bonding master + */ + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { + ++num_eth_ports; + if (!port && (mdev->pndev[i] == ndev)) + port = i; + mdev->upper[i] = mdev->pndev[i] ? + netdev_master_upper_dev_get(mdev->pndev[i]) : NULL; + /* condition not met: network device is a slave */ + if (!mdev->upper[i]) + do_bond = false; + if (num_eth_ports < 2) + continue; + /* condition not met: same master */ + if (mdev->upper[i] != mdev->upper[i-1]) + do_bond = false; + } + /* condition not met: 2 salves */ + do_bond = (num_eth_ports == 2) ? do_bond : false; + + /* handle only events that come with enough info */ + if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port) + return NOTIFY_DONE; + + priv = netdev_priv(ndev); + if (do_bond) { + struct netdev_notifier_bonding_info *notifier_info = ptr; + struct netdev_bonding_info *bonding_info = + ¬ifier_info->bonding_info; + + /* required mode 1, 2 or 4 */ + if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) && + (bonding_info->master.bond_mode != BOND_MODE_XOR) && + (bonding_info->master.bond_mode != BOND_MODE_8023AD)) + do_bond = false; + + /* require exactly 2 slaves */ + if (bonding_info->master.num_slaves != 2) + do_bond = false; + + /* calc v2p */ + if (do_bond) { + if (bonding_info->master.bond_mode == + BOND_MODE_ACTIVEBACKUP) { + /* in active-backup mode virtual ports are + * mapped to the physical port of the active + * slave */ + if (bonding_info->slave.state == + BOND_STATE_BACKUP) { + if (port == 1) { + v2p_port1 = 2; + v2p_port2 = 2; + } else { + v2p_port1 = 1; + v2p_port2 = 1; + } + } else { /* BOND_STATE_ACTIVE */ + if (port == 1) { + v2p_port1 = 1; + v2p_port2 = 1; + } else { + v2p_port1 = 2; + v2p_port2 = 2; + } + } + } else { /* Active-Active */ + /* in active-active mode a virtual port is + * mapped to the native physical port if and only + * if the physical port is up */ + __s8 link = bonding_info->slave.link; + + if (port == 1) + v2p_port2 = 2; + else + v2p_port1 = 1; + if ((link == BOND_LINK_UP) || + (link == BOND_LINK_FAIL)) { + if (port == 1) + v2p_port1 = 1; + else + v2p_port2 = 2; + } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */ + if (port == 1) + v2p_port1 = 2; + else + v2p_port2 = 1; + } + } + } + } + + mlx4_en_queue_bond_work(priv, do_bond, + v2p_port1, v2p_port2); + + return NOTIFY_DONE; +} + +void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev, + struct mlx4_en_stats_bitmap *stats_bitmap, + u8 rx_ppp, u8 rx_pause, + u8 tx_ppp, u8 tx_pause) +{ + int last_i = NUM_MAIN_STATS + NUM_PORT_STATS; + + if (!mlx4_is_slave(dev) && + (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) { + mutex_lock(&stats_bitmap->mutex); + bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS); + + if (rx_ppp) + bitmap_set(stats_bitmap->bitmap, last_i, + NUM_FLOW_PRIORITY_STATS_RX); + last_i += NUM_FLOW_PRIORITY_STATS_RX; + + if (rx_pause && !(rx_ppp)) + bitmap_set(stats_bitmap->bitmap, last_i, + NUM_FLOW_STATS_RX); + last_i += NUM_FLOW_STATS_RX; + + if (tx_ppp) + bitmap_set(stats_bitmap->bitmap, last_i, + NUM_FLOW_PRIORITY_STATS_TX); + last_i += NUM_FLOW_PRIORITY_STATS_TX; + + if (tx_pause && !(tx_ppp)) + bitmap_set(stats_bitmap->bitmap, last_i, + NUM_FLOW_STATS_TX); + last_i += NUM_FLOW_STATS_TX; + + mutex_unlock(&stats_bitmap->mutex); + } +} + +void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, + struct mlx4_en_stats_bitmap *stats_bitmap, + u8 rx_ppp, u8 rx_pause, + u8 tx_ppp, u8 tx_pause) +{ + int last_i = 0; + + mutex_init(&stats_bitmap->mutex); + bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS); + + if (mlx4_is_slave(dev)) { + bitmap_set(stats_bitmap->bitmap, last_i + + MLX4_FIND_NETDEV_STAT(rx_packets), 1); + bitmap_set(stats_bitmap->bitmap, last_i + + MLX4_FIND_NETDEV_STAT(tx_packets), 1); + bitmap_set(stats_bitmap->bitmap, last_i + + MLX4_FIND_NETDEV_STAT(rx_bytes), 1); + bitmap_set(stats_bitmap->bitmap, last_i + + MLX4_FIND_NETDEV_STAT(tx_bytes), 1); + bitmap_set(stats_bitmap->bitmap, last_i + + MLX4_FIND_NETDEV_STAT(rx_dropped), 1); + bitmap_set(stats_bitmap->bitmap, last_i + + MLX4_FIND_NETDEV_STAT(tx_dropped), 1); + } else { + bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS); + } + last_i += NUM_MAIN_STATS; + + bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS); + last_i += NUM_PORT_STATS; + + mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap, + rx_ppp, rx_pause, + tx_ppp, tx_pause); + last_i += NUM_FLOW_STATS; + + if (!mlx4_is_slave(dev)) + bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS); +} + +int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, + struct mlx4_en_port_profile *prof) +{ + struct net_device *dev; + struct mlx4_en_priv *priv; + int i; + int err; + u64 mac_u64; + + dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), + MAX_TX_RINGS, MAX_RX_RINGS); + if (dev == NULL) + return -ENOMEM; + + netif_set_real_num_tx_queues(dev, prof->tx_ring_num); + netif_set_real_num_rx_queues(dev, prof->rx_ring_num); + + SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev); + dev->dev_port = port - 1; + + /* + * Initialize driver private data + */ + + priv = netdev_priv(dev); + memset(priv, 0, sizeof(struct mlx4_en_priv)); + spin_lock_init(&priv->stats_lock); + INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); + INIT_WORK(&priv->watchdog_task, mlx4_en_restart); + INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); + INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); + INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); +#ifdef CONFIG_MLX4_EN_VXLAN + INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); + INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); +#endif +#ifdef CONFIG_RFS_ACCEL + INIT_LIST_HEAD(&priv->filters); + spin_lock_init(&priv->filters_lock); +#endif + + priv->dev = dev; + priv->mdev = mdev; + priv->ddev = &mdev->pdev->dev; + priv->prof = prof; + priv->port = port; + priv->port_up = false; + priv->flags = prof->flags; + priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME; + priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | + MLX4_WQE_CTRL_SOLICITED); + priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; + priv->tx_ring_num = prof->tx_ring_num; + priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK; + netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key)); + + priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, + GFP_KERNEL); + if (!priv->tx_ring) { + err = -ENOMEM; + goto out; + } + priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS, + GFP_KERNEL); + if (!priv->tx_cq) { + err = -ENOMEM; + goto out; + } + priv->rx_ring_num = prof->rx_ring_num; + priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; + priv->cqe_size = mdev->dev->caps.cqe_size; + priv->mac_index = -1; + priv->msg_enable = MLX4_EN_MSG_LEVEL; +#ifdef CONFIG_MLX4_EN_DCB + if (!mlx4_is_slave(priv->mdev->dev)) { + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { + dev->dcbnl_ops = &mlx4_en_dcbnl_ops; + } else { + en_info(priv, "enabling only PFC DCB ops\n"); + dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops; + } + } +#endif + + for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) + INIT_HLIST_HEAD(&priv->mac_hash[i]); + + /* Query for default mac and max mtu */ + priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; + + if (mdev->dev->caps.rx_checksum_flags_port[priv->port] & + MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP) + priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP; + + /* Set default MAC */ + dev->addr_len = ETH_ALEN; + mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); + if (!is_valid_ether_addr(dev->dev_addr)) { + if (mlx4_is_slave(priv->mdev->dev)) { + eth_hw_addr_random(dev); + en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr); + mac_u64 = mlx4_mac_to_u64(dev->dev_addr); + mdev->dev->caps.def_mac[priv->port] = mac_u64; + } else { + en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", + priv->port, dev->dev_addr); + err = -EINVAL; + goto out; + } + } + + memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac)); + + priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + + DS_SIZE * MLX4_EN_MAX_RX_FRAGS); + err = mlx4_en_alloc_resources(priv); + if (err) + goto out; + + /* Initialize time stamping config */ + priv->hwtstamp_config.flags = 0; + priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; + priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + + /* Allocate page for receive rings */ + err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, + MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); + if (err) { + en_err(priv, "Failed to allocate page for rx qps\n"); + goto out; + } + priv->allocated = 1; + + /* + * Initialize netdev entry points + */ + if (mlx4_is_master(priv->mdev->dev)) + dev->netdev_ops = &mlx4_netdev_ops_master; + else + dev->netdev_ops = &mlx4_netdev_ops; + dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; + netif_set_real_num_tx_queues(dev, priv->tx_ring_num); + netif_set_real_num_rx_queues(dev, priv->rx_ring_num); + + dev->ethtool_ops = &mlx4_en_ethtool_ops; + + /* + * Set driver features + */ + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + if (mdev->LSO_support) + dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + + dev->vlan_features = dev->hw_features; + + dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH; + dev->features = dev->hw_features | NETIF_F_HIGHDMA | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + dev->hw_features |= NETIF_F_LOOPBACK | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + + if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) + dev->hw_features |= NETIF_F_RXFCS; + + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS) + dev->hw_features |= NETIF_F_RXALL; + + if (mdev->dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED && + mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC) + dev->hw_features |= NETIF_F_NTUPLE; + + if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) + dev->priv_flags |= IFF_UNICAST_FLT; + + /* Setting a default hash function value */ + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) { + priv->rss_hash_fn = ETH_RSS_HASH_TOP; + } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) { + priv->rss_hash_fn = ETH_RSS_HASH_XOR; + } else { + en_warn(priv, + "No RSS hash capabilities exposed, using Toeplitz\n"); + priv->rss_hash_fn = ETH_RSS_HASH_TOP; + } + + mdev->pndev[port] = dev; + mdev->upper[port] = NULL; + + netif_carrier_off(dev); + mlx4_en_set_default_moderation(priv); + + en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); + en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); + + mlx4_en_update_loopback_state(priv->dev, priv->dev->features); + + /* Configure port */ + mlx4_en_calc_rx_buf(dev); + err = mlx4_SET_PORT_general(mdev->dev, priv->port, + priv->rx_skb_size + ETH_FCS_LEN, + prof->tx_pause, prof->tx_ppp, + prof->rx_pause, prof->rx_ppp); + if (err) { + en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", + priv->port, err); + goto out; + } + + if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { + err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); + if (err) { + en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", + err); + goto out; + } + } + + /* Init port */ + en_warn(priv, "Initializing port\n"); + err = mlx4_INIT_PORT(mdev->dev, priv->port); + if (err) { + en_err(priv, "Failed Initializing port\n"); + goto out; + } + queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); + + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) + queue_delayed_work(mdev->workqueue, &priv->service_task, + SERVICE_TASK_DELAY); + + mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap, + mdev->profile.prof[priv->port].rx_ppp, + mdev->profile.prof[priv->port].rx_pause, + mdev->profile.prof[priv->port].tx_ppp, + mdev->profile.prof[priv->port].tx_pause); + + err = register_netdev(dev); + if (err) { + en_err(priv, "Netdev registration failed for port %d\n", port); + goto out; + } + + priv->registered = 1; + + return 0; + +out: + mlx4_en_destroy_netdev(dev); + return err; +} + +int mlx4_en_reset_config(struct net_device *dev, + struct hwtstamp_config ts_config, + netdev_features_t features) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int port_up = 0; + int err = 0; + + if (priv->hwtstamp_config.tx_type == ts_config.tx_type && + priv->hwtstamp_config.rx_filter == ts_config.rx_filter && + !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && + !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) + return 0; /* Nothing to change */ + + if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && + (features & NETIF_F_HW_VLAN_CTAG_RX) && + (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) { + en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n"); + return -EINVAL; + } + + mutex_lock(&mdev->state_lock); + if (priv->port_up) { + port_up = 1; + mlx4_en_stop_port(dev, 1); + } + + mlx4_en_free_resources(priv); + + en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n", + ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX)); + + priv->hwtstamp_config.tx_type = ts_config.tx_type; + priv->hwtstamp_config.rx_filter = ts_config.rx_filter; + + if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + dev->features |= NETIF_F_HW_VLAN_CTAG_RX; + else + dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) { + /* RX time-stamping is OFF, update the RX vlan offload + * to the latest wanted state + */ + if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX) + dev->features |= NETIF_F_HW_VLAN_CTAG_RX; + else + dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + } + + if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) { + if (features & NETIF_F_RXFCS) + dev->features |= NETIF_F_RXFCS; + else + dev->features &= ~NETIF_F_RXFCS; + } + + /* RX vlan offload and RX time-stamping can't co-exist ! + * Regardless of the caller's choice, + * Turn Off RX vlan offload in case of time-stamping is ON + */ + if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) { + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) + en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n"); + dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + } + + err = mlx4_en_alloc_resources(priv); + if (err) { + en_err(priv, "Failed reallocating port resources\n"); + goto out; + } + if (port_up) { + err = mlx4_en_start_port(dev); + if (err) + en_err(priv, "Failed starting port\n"); + } + +out: + mutex_unlock(&mdev->state_lock); + netdev_features_change(dev); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c new file mode 100644 index 000000000..0a56f010c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -0,0 +1,352 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + + +#include + +#include +#include + +#include "en_port.h" +#include "mlx4_en.h" + + +int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_vlan_fltr_mbox *filter; + int i; + int j; + int index = 0; + u32 entry; + int err = 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + filter = mailbox->buf; + for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) { + entry = 0; + for (j = 0; j < 32; j++) + if (test_bit(index++, priv->active_vlans)) + entry |= 1 << j; + filter->entry[i] = cpu_to_be32(entry); + } + err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port) +{ + struct mlx4_en_query_port_context *qport_context; + struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]); + struct mlx4_en_port_state *state = &priv->port_state; + struct mlx4_cmd_mailbox *mailbox; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0, + MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + if (err) + goto out; + qport_context = mailbox->buf; + + /* This command is always accessed from Ethtool context + * already synchronized, no need in locking */ + state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK); + switch (qport_context->link_speed & MLX4_EN_SPEED_MASK) { + case MLX4_EN_100M_SPEED: + state->link_speed = SPEED_100; + break; + case MLX4_EN_1G_SPEED: + state->link_speed = SPEED_1000; + break; + case MLX4_EN_10G_SPEED_XAUI: + case MLX4_EN_10G_SPEED_XFI: + state->link_speed = SPEED_10000; + break; + case MLX4_EN_20G_SPEED: + state->link_speed = SPEED_20000; + break; + case MLX4_EN_40G_SPEED: + state->link_speed = SPEED_40000; + break; + case MLX4_EN_56G_SPEED: + state->link_speed = SPEED_56000; + break; + default: + state->link_speed = -1; + break; + } + + state->transceiver = qport_context->transceiver; + + state->flags = 0; /* Reset and recalculate the port flags */ + state->flags |= (qport_context->link_up & MLX4_EN_ANC_MASK) ? + MLX4_EN_PORT_ANC : 0; + state->flags |= (qport_context->autoneg & MLX4_EN_AUTONEG_MASK) ? + MLX4_EN_PORT_ANE : 0; + +out: + mlx4_free_cmd_mailbox(mdev->dev, mailbox); + return err; +} + +/* Each counter set is located in struct mlx4_en_stat_out_mbox + * with a const offset between its prio components. + * This function runs over a counter set and sum all of it's prio components. + */ +static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num) +{ + __be64 *curr = start; + unsigned long ret = 0; + int i; + int offset = next - start; + + for (i = 0; i < num; i++) { + ret += be64_to_cpu(*curr); + curr += offset; + } + + return ret; +} + +int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) +{ + struct mlx4_en_stat_out_mbox *mlx4_en_stats; + struct mlx4_en_stat_out_flow_control_mbox *flowstats; + struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]); + struct net_device_stats *stats = &priv->stats; + struct mlx4_cmd_mailbox *mailbox; + u64 in_mod = reset << 8 | port; + int err; + int i; + + mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, + MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + if (err) + goto out; + + mlx4_en_stats = mailbox->buf; + + spin_lock_bh(&priv->stats_lock); + + stats->rx_packets = 0; + stats->rx_bytes = 0; + priv->port_stats.rx_chksum_good = 0; + priv->port_stats.rx_chksum_none = 0; + priv->port_stats.rx_chksum_complete = 0; + for (i = 0; i < priv->rx_ring_num; i++) { + stats->rx_packets += priv->rx_ring[i]->packets; + stats->rx_bytes += priv->rx_ring[i]->bytes; + priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok; + priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none; + priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete; + } + stats->tx_packets = 0; + stats->tx_bytes = 0; + priv->port_stats.tx_chksum_offload = 0; + priv->port_stats.queue_stopped = 0; + priv->port_stats.wake_queue = 0; + priv->port_stats.tso_packets = 0; + priv->port_stats.xmit_more = 0; + + for (i = 0; i < priv->tx_ring_num; i++) { + const struct mlx4_en_tx_ring *ring = priv->tx_ring[i]; + + stats->tx_packets += ring->packets; + stats->tx_bytes += ring->bytes; + priv->port_stats.tx_chksum_offload += ring->tx_csum; + priv->port_stats.queue_stopped += ring->queue_stopped; + priv->port_stats.wake_queue += ring->wake_queue; + priv->port_stats.tso_packets += ring->tso_packets; + priv->port_stats.xmit_more += ring->xmit_more; + } + + /* net device stats */ + stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + + be32_to_cpu(mlx4_en_stats->RJBBR) + + be32_to_cpu(mlx4_en_stats->RCRC) + + be32_to_cpu(mlx4_en_stats->RRUNT) + + be64_to_cpu(mlx4_en_stats->RInRangeLengthErr) + + be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr) + + be32_to_cpu(mlx4_en_stats->RSHORT) + + en_stats_adder(&mlx4_en_stats->RGIANT_prio_0, + &mlx4_en_stats->RGIANT_prio_1, + NUM_PRIORITIES); + stats->tx_errors = en_stats_adder(&mlx4_en_stats->TGIANT_prio_0, + &mlx4_en_stats->TGIANT_prio_1, + NUM_PRIORITIES); + stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0, + &mlx4_en_stats->MCAST_prio_1, + NUM_PRIORITIES); + stats->collisions = 0; + stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP); + stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); + stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); + stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); + stats->rx_frame_errors = 0; + stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); + stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); + stats->tx_aborted_errors = 0; + stats->tx_carrier_errors = 0; + stats->tx_fifo_errors = 0; + stats->tx_heartbeat_errors = 0; + stats->tx_window_errors = 0; + stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP); + + /* RX stats */ + priv->pkstats.rx_multicast_packets = stats->multicast; + priv->pkstats.rx_broadcast_packets = + en_stats_adder(&mlx4_en_stats->RBCAST_prio_0, + &mlx4_en_stats->RBCAST_prio_1, + NUM_PRIORITIES); + priv->pkstats.rx_jabbers = be32_to_cpu(mlx4_en_stats->RJBBR); + priv->pkstats.rx_in_range_length_error = + be64_to_cpu(mlx4_en_stats->RInRangeLengthErr); + priv->pkstats.rx_out_range_length_error = + be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr); + + /* Tx stats */ + priv->pkstats.tx_multicast_packets = + en_stats_adder(&mlx4_en_stats->TMCAST_prio_0, + &mlx4_en_stats->TMCAST_prio_1, + NUM_PRIORITIES); + priv->pkstats.tx_broadcast_packets = + en_stats_adder(&mlx4_en_stats->TBCAST_prio_0, + &mlx4_en_stats->TBCAST_prio_1, + NUM_PRIORITIES); + + priv->pkstats.rx_prio[0][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0); + priv->pkstats.rx_prio[0][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_0); + priv->pkstats.rx_prio[1][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1); + priv->pkstats.rx_prio[1][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_1); + priv->pkstats.rx_prio[2][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2); + priv->pkstats.rx_prio[2][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_2); + priv->pkstats.rx_prio[3][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3); + priv->pkstats.rx_prio[3][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_3); + priv->pkstats.rx_prio[4][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4); + priv->pkstats.rx_prio[4][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_4); + priv->pkstats.rx_prio[5][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5); + priv->pkstats.rx_prio[5][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_5); + priv->pkstats.rx_prio[6][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6); + priv->pkstats.rx_prio[6][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_6); + priv->pkstats.rx_prio[7][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7); + priv->pkstats.rx_prio[7][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_7); + priv->pkstats.rx_prio[8][0] = be64_to_cpu(mlx4_en_stats->RTOT_novlan); + priv->pkstats.rx_prio[8][1] = be64_to_cpu(mlx4_en_stats->ROCT_novlan); + priv->pkstats.tx_prio[0][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0); + priv->pkstats.tx_prio[0][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_0); + priv->pkstats.tx_prio[1][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1); + priv->pkstats.tx_prio[1][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_1); + priv->pkstats.tx_prio[2][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2); + priv->pkstats.tx_prio[2][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_2); + priv->pkstats.tx_prio[3][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3); + priv->pkstats.tx_prio[3][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_3); + priv->pkstats.tx_prio[4][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4); + priv->pkstats.tx_prio[4][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_4); + priv->pkstats.tx_prio[5][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5); + priv->pkstats.tx_prio[5][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_5); + priv->pkstats.tx_prio[6][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6); + priv->pkstats.tx_prio[6][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_6); + priv->pkstats.tx_prio[7][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7); + priv->pkstats.tx_prio[7][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_7); + priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan); + priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan); + + spin_unlock_bh(&priv->stats_lock); + + /* 0xffs indicates invalid value */ + memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES); + + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) { + memset(mailbox->buf, 0, + sizeof(*flowstats) * MLX4_NUM_PRIORITIES); + err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, + in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL, + 0, MLX4_CMD_DUMP_ETH_STATS, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + if (err) + goto out; + } + + flowstats = mailbox->buf; + + spin_lock_bh(&priv->stats_lock); + + for (i = 0; i < MLX4_NUM_PRIORITIES; i++) { + priv->rx_priority_flowstats[i].rx_pause = + be64_to_cpu(flowstats[i].rx_pause); + priv->rx_priority_flowstats[i].rx_pause_duration = + be64_to_cpu(flowstats[i].rx_pause_duration); + priv->rx_priority_flowstats[i].rx_pause_transition = + be64_to_cpu(flowstats[i].rx_pause_transition); + priv->tx_priority_flowstats[i].tx_pause = + be64_to_cpu(flowstats[i].tx_pause); + priv->tx_priority_flowstats[i].tx_pause_duration = + be64_to_cpu(flowstats[i].tx_pause_duration); + priv->tx_priority_flowstats[i].tx_pause_transition = + be64_to_cpu(flowstats[i].tx_pause_transition); + } + + /* if pfc is not in use, all priorities counters have the same value */ + priv->rx_flowstats.rx_pause = + be64_to_cpu(flowstats[0].rx_pause); + priv->rx_flowstats.rx_pause_duration = + be64_to_cpu(flowstats[0].rx_pause_duration); + priv->rx_flowstats.rx_pause_transition = + be64_to_cpu(flowstats[0].rx_pause_transition); + priv->tx_flowstats.tx_pause = + be64_to_cpu(flowstats[0].tx_pause); + priv->tx_flowstats.tx_pause_duration = + be64_to_cpu(flowstats[0].tx_pause_duration); + priv->tx_flowstats.tx_pause_transition = + be64_to_cpu(flowstats[0].tx_pause_transition); + + spin_unlock_bh(&priv->stats_lock); + +out: + mlx4_free_cmd_mailbox(mdev->dev, mailbox); + return err; +} + diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h new file mode 100644 index 000000000..040da4b16 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h @@ -0,0 +1,587 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _MLX4_EN_PORT_H_ +#define _MLX4_EN_PORT_H_ + + +#define SET_PORT_GEN_ALL_VALID 0x7 +#define SET_PORT_PROMISC_SHIFT 31 +#define SET_PORT_MC_PROMISC_SHIFT 30 + +#define MLX4_EN_NUM_TC 8 + +#define VLAN_FLTR_SIZE 128 +struct mlx4_set_vlan_fltr_mbox { + __be32 entry[VLAN_FLTR_SIZE]; +}; + + +enum { + MLX4_MCAST_CONFIG = 0, + MLX4_MCAST_DISABLE = 1, + MLX4_MCAST_ENABLE = 2, +}; + +enum mlx4_link_mode { + MLX4_1000BASE_CX_SGMII = 0, + MLX4_1000BASE_KX = 1, + MLX4_10GBASE_CX4 = 2, + MLX4_10GBASE_KX4 = 3, + MLX4_10GBASE_KR = 4, + MLX4_20GBASE_KR2 = 5, + MLX4_40GBASE_CR4 = 6, + MLX4_40GBASE_KR4 = 7, + MLX4_56GBASE_KR4 = 8, + MLX4_10GBASE_CR = 12, + MLX4_10GBASE_SR = 13, + MLX4_40GBASE_SR4 = 15, + MLX4_56GBASE_CR4 = 17, + MLX4_56GBASE_SR4 = 18, + MLX4_100BASE_TX = 24, + MLX4_1000BASE_T = 25, + MLX4_10GBASE_T = 26, +}; + +#define MLX4_PROT_MASK(link_mode) (1< +#include +#include + +#include "mlx4_en.h" + +void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, + int is_tx, int rss, int qpn, int cqn, + int user_prio, struct mlx4_qp_context *context) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct net_device *dev = priv->dev; + + memset(context, 0, sizeof *context); + context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET); + context->pd = cpu_to_be32(mdev->priv_pdn); + context->mtu_msgmax = 0xff; + if (!is_tx && !rss) + context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); + if (is_tx) { + context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP) + context->params2 |= MLX4_QP_BIT_FPP; + + } else { + context->sq_size_stride = ilog2(TXBB_SIZE) - 4; + } + context->usr_page = cpu_to_be32(mdev->priv_uar.index); + context->local_qpn = cpu_to_be32(qpn); + context->pri_path.ackto = 1 & 0x07; + context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; + if (user_prio >= 0) { + context->pri_path.sched_queue |= user_prio << 3; + context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP; + } + context->pri_path.counter_index = 0xff; + context->cqn_send = cpu_to_be32(cqn); + context->cqn_recv = cpu_to_be32(cqn); + context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2); + if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX)) + context->param3 |= cpu_to_be32(1 << 30); + + if (!is_tx && !rss && + (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)) { + en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn); + context->srqn = cpu_to_be32(7 << 28); /* this fills bits 30:28 */ + } +} + + +int mlx4_en_map_buffer(struct mlx4_buf *buf) +{ + struct page **pages; + int i; + + if (BITS_PER_LONG == 64 || buf->nbufs == 1) + return 0; + + pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL); + if (!pages) + return -ENOMEM; + + for (i = 0; i < buf->nbufs; ++i) + pages[i] = virt_to_page(buf->page_list[i].buf); + + buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); + kfree(pages); + if (!buf->direct.buf) + return -ENOMEM; + + return 0; +} + +void mlx4_en_unmap_buffer(struct mlx4_buf *buf) +{ + if (BITS_PER_LONG == 64 || buf->nbufs == 1) + return; + + vunmap(buf->direct.buf); +} + +void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event) +{ + return; +} + diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c new file mode 100644 index 000000000..eab4e080e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -0,0 +1,1307 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if IS_ENABLED(CONFIG_IPV6) +#include +#endif + +#include "mlx4_en.h" + +static int mlx4_alloc_pages(struct mlx4_en_priv *priv, + struct mlx4_en_rx_alloc *page_alloc, + const struct mlx4_en_frag_info *frag_info, + gfp_t _gfp) +{ + int order; + struct page *page; + dma_addr_t dma; + + for (order = MLX4_EN_ALLOC_PREFER_ORDER; ;) { + gfp_t gfp = _gfp; + + if (order) + gfp |= __GFP_COMP | __GFP_NOWARN; + page = alloc_pages(gfp, order); + if (likely(page)) + break; + if (--order < 0 || + ((PAGE_SIZE << order) < frag_info->frag_size)) + return -ENOMEM; + } + dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(priv->ddev, dma)) { + put_page(page); + return -ENOMEM; + } + page_alloc->page_size = PAGE_SIZE << order; + page_alloc->page = page; + page_alloc->dma = dma; + page_alloc->page_offset = 0; + /* Not doing get_page() for each frag is a big win + * on asymetric workloads. Note we can not use atomic_set(). + */ + atomic_add(page_alloc->page_size / frag_info->frag_stride - 1, + &page->_count); + return 0; +} + +static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, + struct mlx4_en_rx_desc *rx_desc, + struct mlx4_en_rx_alloc *frags, + struct mlx4_en_rx_alloc *ring_alloc, + gfp_t gfp) +{ + struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; + const struct mlx4_en_frag_info *frag_info; + struct page *page; + dma_addr_t dma; + int i; + + for (i = 0; i < priv->num_frags; i++) { + frag_info = &priv->frag_info[i]; + page_alloc[i] = ring_alloc[i]; + page_alloc[i].page_offset += frag_info->frag_stride; + + if (page_alloc[i].page_offset + frag_info->frag_stride <= + ring_alloc[i].page_size) + continue; + + if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp)) + goto out; + } + + for (i = 0; i < priv->num_frags; i++) { + frags[i] = ring_alloc[i]; + dma = ring_alloc[i].dma + ring_alloc[i].page_offset; + ring_alloc[i] = page_alloc[i]; + rx_desc->data[i].addr = cpu_to_be64(dma); + } + + return 0; + +out: + while (i--) { + if (page_alloc[i].page != ring_alloc[i].page) { + dma_unmap_page(priv->ddev, page_alloc[i].dma, + page_alloc[i].page_size, PCI_DMA_FROMDEVICE); + page = page_alloc[i].page; + atomic_set(&page->_count, 1); + put_page(page); + } + } + return -ENOMEM; +} + +static void mlx4_en_free_frag(struct mlx4_en_priv *priv, + struct mlx4_en_rx_alloc *frags, + int i) +{ + const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; + u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride; + + + if (next_frag_end > frags[i].page_size) + dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size, + PCI_DMA_FROMDEVICE); + + if (frags[i].page) + put_page(frags[i].page); +} + +static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring) +{ + int i; + struct mlx4_en_rx_alloc *page_alloc; + + for (i = 0; i < priv->num_frags; i++) { + const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; + + if (mlx4_alloc_pages(priv, &ring->page_alloc[i], + frag_info, GFP_KERNEL | __GFP_COLD)) + goto out; + + en_dbg(DRV, priv, " frag %d allocator: - size:%d frags:%d\n", + i, ring->page_alloc[i].page_size, + atomic_read(&ring->page_alloc[i].page->_count)); + } + return 0; + +out: + while (i--) { + struct page *page; + + page_alloc = &ring->page_alloc[i]; + dma_unmap_page(priv->ddev, page_alloc->dma, + page_alloc->page_size, PCI_DMA_FROMDEVICE); + page = page_alloc->page; + atomic_set(&page->_count, 1); + put_page(page); + page_alloc->page = NULL; + } + return -ENOMEM; +} + +static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring) +{ + struct mlx4_en_rx_alloc *page_alloc; + int i; + + for (i = 0; i < priv->num_frags; i++) { + const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; + + page_alloc = &ring->page_alloc[i]; + en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", + i, page_count(page_alloc->page)); + + dma_unmap_page(priv->ddev, page_alloc->dma, + page_alloc->page_size, PCI_DMA_FROMDEVICE); + while (page_alloc->page_offset + frag_info->frag_stride < + page_alloc->page_size) { + put_page(page_alloc->page); + page_alloc->page_offset += frag_info->frag_stride; + } + page_alloc->page = NULL; + } +} + +static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring, int index) +{ + struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index; + int possible_frags; + int i; + + /* Set size and memtype fields */ + for (i = 0; i < priv->num_frags; i++) { + rx_desc->data[i].byte_count = + cpu_to_be32(priv->frag_info[i].frag_size); + rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key); + } + + /* If the number of used fragments does not fill up the ring stride, + * remaining (unused) fragments must be padded with null address/size + * and a special memory key */ + possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE; + for (i = priv->num_frags; i < possible_frags; i++) { + rx_desc->data[i].byte_count = 0; + rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD); + rx_desc->data[i].addr = 0; + } +} + +static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring, int index, + gfp_t gfp) +{ + struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride); + struct mlx4_en_rx_alloc *frags = ring->rx_info + + (index << priv->log_rx_info); + + return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); +} + +static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring) +{ + BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size); + return ring->prod == ring->cons; +} + +static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) +{ + *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); +} + +static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring, + int index) +{ + struct mlx4_en_rx_alloc *frags; + int nr; + + frags = ring->rx_info + (index << priv->log_rx_info); + for (nr = 0; nr < priv->num_frags; nr++) { + en_dbg(DRV, priv, "Freeing fragment:%d\n", nr); + mlx4_en_free_frag(priv, frags, nr); + } +} + +static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) +{ + struct mlx4_en_rx_ring *ring; + int ring_ind; + int buf_ind; + int new_size; + + for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { + for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { + ring = priv->rx_ring[ring_ind]; + + if (mlx4_en_prepare_rx_desc(priv, ring, + ring->actual_size, + GFP_KERNEL | __GFP_COLD)) { + if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { + en_err(priv, "Failed to allocate enough rx buffers\n"); + return -ENOMEM; + } else { + new_size = rounddown_pow_of_two(ring->actual_size); + en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n", + ring->actual_size, new_size); + goto reduce_rings; + } + } + ring->actual_size++; + ring->prod++; + } + } + return 0; + +reduce_rings: + for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { + ring = priv->rx_ring[ring_ind]; + while (ring->actual_size > new_size) { + ring->actual_size--; + ring->prod--; + mlx4_en_free_rx_desc(priv, ring, ring->actual_size); + } + } + + return 0; +} + +static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring) +{ + int index; + + en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", + ring->cons, ring->prod); + + /* Unmap and free Rx buffers */ + while (!mlx4_en_is_ring_empty(ring)) { + index = ring->cons & ring->size_mask; + en_dbg(DRV, priv, "Processing descriptor:%d\n", index); + mlx4_en_free_rx_desc(priv, ring, index); + ++ring->cons; + } +} + +void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) +{ + int i; + int num_of_eqs; + int num_rx_rings; + struct mlx4_dev *dev = mdev->dev; + + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { + if (!dev->caps.comp_pool) + num_of_eqs = max_t(int, MIN_RX_RINGS, + min_t(int, + dev->caps.num_comp_vectors, + DEF_RX_RINGS)); + else + num_of_eqs = min_t(int, MAX_MSIX_P_PORT, + dev->caps.comp_pool/ + dev->caps.num_ports) - 1; + + num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS : + min_t(int, num_of_eqs, + netif_get_num_default_rss_queues()); + mdev->profile.prof[i].rx_ring_num = + rounddown_pow_of_two(num_rx_rings); + } +} + +int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring **pring, + u32 size, u16 stride, int node) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_rx_ring *ring; + int err = -ENOMEM; + int tmp; + + ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); + if (!ring) { + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) { + en_err(priv, "Failed to allocate RX ring structure\n"); + return -ENOMEM; + } + } + + ring->prod = 0; + ring->cons = 0; + ring->size = size; + ring->size_mask = size - 1; + ring->stride = stride; + ring->log_stride = ffs(ring->stride) - 1; + ring->buf_size = ring->size * ring->stride + TXBB_SIZE; + + tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * + sizeof(struct mlx4_en_rx_alloc)); + ring->rx_info = vmalloc_node(tmp, node); + if (!ring->rx_info) { + ring->rx_info = vmalloc(tmp); + if (!ring->rx_info) { + err = -ENOMEM; + goto err_ring; + } + } + + en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", + ring->rx_info, tmp); + + /* Allocate HW buffers on provided NUMA node */ + set_dev_node(&mdev->dev->persist->pdev->dev, node); + err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, + ring->buf_size, 2 * PAGE_SIZE); + set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); + if (err) + goto err_info; + + err = mlx4_en_map_buffer(&ring->wqres.buf); + if (err) { + en_err(priv, "Failed to map RX buffer\n"); + goto err_hwq; + } + ring->buf = ring->wqres.buf.direct.buf; + + ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter; + + *pring = ring; + return 0; + +err_hwq: + mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); +err_info: + vfree(ring->rx_info); + ring->rx_info = NULL; +err_ring: + kfree(ring); + *pring = NULL; + + return err; +} + +int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) +{ + struct mlx4_en_rx_ring *ring; + int i; + int ring_ind; + int err; + int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + + DS_SIZE * priv->num_frags); + + for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { + ring = priv->rx_ring[ring_ind]; + + ring->prod = 0; + ring->cons = 0; + ring->actual_size = 0; + ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn; + + ring->stride = stride; + if (ring->stride <= TXBB_SIZE) + ring->buf += TXBB_SIZE; + + ring->log_stride = ffs(ring->stride) - 1; + ring->buf_size = ring->size * ring->stride; + + memset(ring->buf, 0, ring->buf_size); + mlx4_en_update_rx_prod_db(ring); + + /* Initialize all descriptors */ + for (i = 0; i < ring->size; i++) + mlx4_en_init_rx_desc(priv, ring, i); + + /* Initialize page allocators */ + err = mlx4_en_init_allocator(priv, ring); + if (err) { + en_err(priv, "Failed initializing ring allocator\n"); + if (ring->stride <= TXBB_SIZE) + ring->buf -= TXBB_SIZE; + ring_ind--; + goto err_allocator; + } + } + err = mlx4_en_fill_rx_buffers(priv); + if (err) + goto err_buffers; + + for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { + ring = priv->rx_ring[ring_ind]; + + ring->size_mask = ring->actual_size - 1; + mlx4_en_update_rx_prod_db(ring); + } + + return 0; + +err_buffers: + for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) + mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]); + + ring_ind = priv->rx_ring_num - 1; +err_allocator: + while (ring_ind >= 0) { + if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE) + priv->rx_ring[ring_ind]->buf -= TXBB_SIZE; + mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]); + ring_ind--; + } + return err; +} + +/* We recover from out of memory by scheduling our napi poll + * function (mlx4_en_process_cq), which tries to allocate + * all missing RX buffers (call to mlx4_en_refill_rx_buffers). + */ +void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) +{ + int ring; + + if (!priv->port_up) + return; + + for (ring = 0; ring < priv->rx_ring_num; ring++) { + if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) + napi_reschedule(&priv->rx_cq[ring]->napi); + } +} + +void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring **pring, + u32 size, u16 stride) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_rx_ring *ring = *pring; + + mlx4_en_unmap_buffer(&ring->wqres.buf); + mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); + vfree(ring->rx_info); + ring->rx_info = NULL; + kfree(ring); + *pring = NULL; +#ifdef CONFIG_RFS_ACCEL + mlx4_en_cleanup_filters(priv); +#endif +} + +void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring) +{ + mlx4_en_free_rx_buf(priv, ring); + if (ring->stride <= TXBB_SIZE) + ring->buf -= TXBB_SIZE; + mlx4_en_destroy_allocator(priv, ring); +} + + +static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_rx_desc *rx_desc, + struct mlx4_en_rx_alloc *frags, + struct sk_buff *skb, + int length) +{ + struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags; + struct mlx4_en_frag_info *frag_info; + int nr; + dma_addr_t dma; + + /* Collect used fragments while replacing them in the HW descriptors */ + for (nr = 0; nr < priv->num_frags; nr++) { + frag_info = &priv->frag_info[nr]; + if (length <= frag_info->frag_prefix_size) + break; + if (!frags[nr].page) + goto fail; + + dma = be64_to_cpu(rx_desc->data[nr].addr); + dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size, + DMA_FROM_DEVICE); + + /* Save page reference in skb */ + __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page); + skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size); + skb_frags_rx[nr].page_offset = frags[nr].page_offset; + skb->truesize += frag_info->frag_stride; + frags[nr].page = NULL; + } + /* Adjust size of last fragment to match actual length */ + if (nr > 0) + skb_frag_size_set(&skb_frags_rx[nr - 1], + length - priv->frag_info[nr - 1].frag_prefix_size); + return nr; + +fail: + while (nr > 0) { + nr--; + __skb_frag_unref(&skb_frags_rx[nr]); + } + return 0; +} + + +static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, + struct mlx4_en_rx_desc *rx_desc, + struct mlx4_en_rx_alloc *frags, + unsigned int length) +{ + struct sk_buff *skb; + void *va; + int used_frags; + dma_addr_t dma; + + skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN); + if (!skb) { + en_dbg(RX_ERR, priv, "Failed allocating skb\n"); + return NULL; + } + skb_reserve(skb, NET_IP_ALIGN); + skb->len = length; + + /* Get pointer to first fragment so we could copy the headers into the + * (linear part of the) skb */ + va = page_address(frags[0].page) + frags[0].page_offset; + + if (length <= SMALL_PACKET_SIZE) { + /* We are copying all relevant data to the skb - temporarily + * sync buffers for the copy */ + dma = be64_to_cpu(rx_desc->data[0].addr); + dma_sync_single_for_cpu(priv->ddev, dma, length, + DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, va, length); + skb->tail += length; + } else { + unsigned int pull_len; + + /* Move relevant fragments to skb */ + used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags, + skb, length); + if (unlikely(!used_frags)) { + kfree_skb(skb); + return NULL; + } + skb_shinfo(skb)->nr_frags = used_frags; + + pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE); + /* Copy headers into the skb linear buffer */ + memcpy(skb->data, va, pull_len); + skb->tail += pull_len; + + /* Skip headers in first fragment */ + skb_shinfo(skb)->frags[0].page_offset += pull_len; + + /* Adjust size of first fragment */ + skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len); + skb->data_len = length - pull_len; + } + return skb; +} + +static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb) +{ + int i; + int offset = ETH_HLEN; + + for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) { + if (*(skb->data + offset) != (unsigned char) (i & 0xff)) + goto out_loopback; + } + /* Loopback found */ + priv->loopback_ok = 1; + +out_loopback: + dev_kfree_skb_any(skb); +} + +static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring) +{ + int index = ring->prod & ring->size_mask; + + while ((u32) (ring->prod - ring->cons) < ring->actual_size) { + if (mlx4_en_prepare_rx_desc(priv, ring, index, + GFP_ATOMIC | __GFP_COLD)) + break; + ring->prod++; + index = ring->prod & ring->size_mask; + } +} + +/* When hardware doesn't strip the vlan, we need to calculate the checksum + * over it and add it to the hardware's checksum calculation + */ +static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum, + struct vlan_hdr *vlanh) +{ + return csum_add(hw_checksum, *(__wsum *)vlanh); +} + +/* Although the stack expects checksum which doesn't include the pseudo + * header, the HW adds it. To address that, we are subtracting the pseudo + * header checksum from the checksum value provided by the HW. + */ +static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, + struct iphdr *iph) +{ + __u16 length_for_csum = 0; + __wsum csum_pseudo_header = 0; + + length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); + csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, + length_for_csum, iph->protocol, 0); + skb->csum = csum_sub(hw_checksum, csum_pseudo_header); +} + +#if IS_ENABLED(CONFIG_IPV6) +/* In IPv6 packets, besides subtracting the pseudo header checksum, + * we also compute/add the IP header checksum which + * is not added by the HW. + */ +static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, + struct ipv6hdr *ipv6h) +{ + __wsum csum_pseudo_hdr = 0; + + if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS) + return -1; + hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8)); + + csum_pseudo_hdr = csum_partial(&ipv6h->saddr, + sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); + csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); + csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr)); + + skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); + skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); + return 0; +} +#endif +static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, + netdev_features_t dev_features) +{ + __wsum hw_checksum = 0; + + void *hdr = (u8 *)va + sizeof(struct ethhdr); + + hw_checksum = csum_unfold((__force __sum16)cqe->checksum); + + if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) && + !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) { + hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr); + hdr += sizeof(struct vlan_hdr); + } + + if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) + get_fixed_ipv4_csum(hw_checksum, skb, hdr); +#if IS_ENABLED(CONFIG_IPV6) + else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) + if (get_fixed_ipv6_csum(hw_checksum, skb, hdr)) + return -1; +#endif + return 0; +} + +int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_cqe *cqe; + struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; + struct mlx4_en_rx_alloc *frags; + struct mlx4_en_rx_desc *rx_desc; + struct sk_buff *skb; + int index; + int nr; + unsigned int length; + int polled = 0; + int ip_summed; + int factor = priv->cqe_factor; + u64 timestamp; + bool l2_tunnel; + + if (!priv->port_up) + return 0; + + if (budget <= 0) + return polled; + + /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx + * descriptor offset can be deduced from the CQE index instead of + * reading 'cqe->index' */ + index = cq->mcq.cons_index & ring->size_mask; + cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; + + /* Process all completed CQEs */ + while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, + cq->mcq.cons_index & cq->size)) { + + frags = ring->rx_info + (index << priv->log_rx_info); + rx_desc = ring->buf + (index << ring->log_stride); + + /* + * make sure we read the CQE after we read the ownership bit + */ + dma_rmb(); + + /* Drop packet on bad receive or bad checksum */ + if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == + MLX4_CQE_OPCODE_ERROR)) { + en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n", + ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome, + ((struct mlx4_err_cqe *)cqe)->syndrome); + goto next; + } + if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { + en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); + goto next; + } + + /* Check if we need to drop the packet if SRIOV is not enabled + * and not performing the selftest or flb disabled + */ + if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) { + struct ethhdr *ethh; + dma_addr_t dma; + /* Get pointer to first fragment since we haven't + * skb yet and cast it to ethhdr struct + */ + dma = be64_to_cpu(rx_desc->data[0].addr); + dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), + DMA_FROM_DEVICE); + ethh = (struct ethhdr *)(page_address(frags[0].page) + + frags[0].page_offset); + + if (is_multicast_ether_addr(ethh->h_dest)) { + struct mlx4_mac_entry *entry; + struct hlist_head *bucket; + unsigned int mac_hash; + + /* Drop the packet, since HW loopback-ed it */ + mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX]; + bucket = &priv->mac_hash[mac_hash]; + rcu_read_lock(); + hlist_for_each_entry_rcu(entry, bucket, hlist) { + if (ether_addr_equal_64bits(entry->mac, + ethh->h_source)) { + rcu_read_unlock(); + goto next; + } + } + rcu_read_unlock(); + } + } + + /* + * Packet is OK - process it. + */ + length = be32_to_cpu(cqe->byte_cnt); + length -= ring->fcs_del; + ring->bytes += length; + ring->packets++; + l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && + (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); + + if (likely(dev->features & NETIF_F_RXCSUM)) { + if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | + MLX4_CQE_STATUS_UDP)) { + if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && + cqe->checksum == cpu_to_be16(0xffff)) { + ip_summed = CHECKSUM_UNNECESSARY; + ring->csum_ok++; + } else { + ip_summed = CHECKSUM_NONE; + ring->csum_none++; + } + } else { + if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP && + (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | + MLX4_CQE_STATUS_IPV6))) { + ip_summed = CHECKSUM_COMPLETE; + ring->csum_complete++; + } else { + ip_summed = CHECKSUM_NONE; + ring->csum_none++; + } + } + } else { + ip_summed = CHECKSUM_NONE; + ring->csum_none++; + } + + /* This packet is eligible for GRO if it is: + * - DIX Ethernet (type interpretation) + * - TCP/IP (v4) + * - without IP options + * - not an IP fragment + * - no LLS polling in progress + */ + if (!mlx4_en_cq_busy_polling(cq) && + (dev->features & NETIF_F_GRO)) { + struct sk_buff *gro_skb = napi_get_frags(&cq->napi); + if (!gro_skb) + goto next; + + nr = mlx4_en_complete_rx_desc(priv, + rx_desc, frags, gro_skb, + length); + if (!nr) + goto next; + + if (ip_summed == CHECKSUM_COMPLETE) { + void *va = skb_frag_address(skb_shinfo(gro_skb)->frags); + if (check_csum(cqe, gro_skb, va, + dev->features)) { + ip_summed = CHECKSUM_NONE; + ring->csum_none++; + ring->csum_complete--; + } + } + + skb_shinfo(gro_skb)->nr_frags = nr; + gro_skb->len = length; + gro_skb->data_len = length; + gro_skb->ip_summed = ip_summed; + + if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY) + gro_skb->csum_level = 1; + + if ((cqe->vlan_my_qpn & + cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) && + (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { + u16 vid = be16_to_cpu(cqe->sl_vid); + + __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid); + } + + if (dev->features & NETIF_F_RXHASH) + skb_set_hash(gro_skb, + be32_to_cpu(cqe->immed_rss_invalid), + PKT_HASH_TYPE_L3); + + skb_record_rx_queue(gro_skb, cq->ring); + skb_mark_napi_id(gro_skb, &cq->napi); + + if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { + timestamp = mlx4_en_get_cqe_ts(cqe); + mlx4_en_fill_hwtstamps(mdev, + skb_hwtstamps(gro_skb), + timestamp); + } + + napi_gro_frags(&cq->napi); + goto next; + } + + /* GRO not possible, complete processing here */ + skb = mlx4_en_rx_skb(priv, rx_desc, frags, length); + if (!skb) { + priv->stats.rx_dropped++; + goto next; + } + + if (unlikely(priv->validate_loopback)) { + validate_loopback(priv, skb); + goto next; + } + + if (ip_summed == CHECKSUM_COMPLETE) { + if (check_csum(cqe, skb, skb->data, dev->features)) { + ip_summed = CHECKSUM_NONE; + ring->csum_complete--; + ring->csum_none++; + } + } + + skb->ip_summed = ip_summed; + skb->protocol = eth_type_trans(skb, dev); + skb_record_rx_queue(skb, cq->ring); + + if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY) + skb->csum_level = 1; + + if (dev->features & NETIF_F_RXHASH) + skb_set_hash(skb, + be32_to_cpu(cqe->immed_rss_invalid), + PKT_HASH_TYPE_L3); + + if ((be32_to_cpu(cqe->vlan_my_qpn) & + MLX4_CQE_VLAN_PRESENT_MASK) && + (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid)); + + if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { + timestamp = mlx4_en_get_cqe_ts(cqe); + mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb), + timestamp); + } + + skb_mark_napi_id(skb, &cq->napi); + + if (!mlx4_en_cq_busy_polling(cq)) + napi_gro_receive(&cq->napi, skb); + else + netif_receive_skb(skb); + +next: + for (nr = 0; nr < priv->num_frags; nr++) + mlx4_en_free_frag(priv, frags, nr); + + ++cq->mcq.cons_index; + index = (cq->mcq.cons_index) & ring->size_mask; + cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; + if (++polled == budget) + goto out; + } + +out: + AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); + mlx4_cq_set_ci(&cq->mcq); + wmb(); /* ensure HW sees CQ consumer before we post new buffers */ + ring->cons = cq->mcq.cons_index; + mlx4_en_refill_rx_buffers(priv, ring); + mlx4_en_update_rx_prod_db(ring); + return polled; +} + + +void mlx4_en_rx_irq(struct mlx4_cq *mcq) +{ + struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); + struct mlx4_en_priv *priv = netdev_priv(cq->dev); + + if (likely(priv->port_up)) + napi_schedule_irqoff(&cq->napi); + else + mlx4_en_arm_cq(priv, cq); +} + +/* Rx CQ polling - called by NAPI */ +int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) +{ + struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); + struct net_device *dev = cq->dev; + struct mlx4_en_priv *priv = netdev_priv(dev); + int done; + + if (!mlx4_en_cq_lock_napi(cq)) + return budget; + + done = mlx4_en_process_rx_cq(dev, cq, budget); + + mlx4_en_cq_unlock_napi(cq); + + /* If we used up all the quota - we're probably not done yet... */ + if (done == budget) { + int cpu_curr; + const struct cpumask *aff; + + INC_PERF_COUNTER(priv->pstats.napi_quota); + + cpu_curr = smp_processor_id(); + aff = irq_desc_get_irq_data(cq->irq_desc)->affinity; + + if (likely(cpumask_test_cpu(cpu_curr, aff))) + return budget; + + /* Current cpu is not according to smp_irq_affinity - + * probably affinity changed. need to stop this NAPI + * poll, and restart it on the right CPU + */ + done = 0; + } + /* Done for now */ + napi_complete_done(napi, done); + mlx4_en_arm_cq(priv, cq); + return done; +} + +static const int frag_sizes[] = { + FRAG_SZ0, + FRAG_SZ1, + FRAG_SZ2, + FRAG_SZ3 +}; + +void mlx4_en_calc_rx_buf(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN; + int buf_size = 0; + int i = 0; + + while (buf_size < eff_mtu) { + priv->frag_info[i].frag_size = + (eff_mtu > buf_size + frag_sizes[i]) ? + frag_sizes[i] : eff_mtu - buf_size; + priv->frag_info[i].frag_prefix_size = buf_size; + priv->frag_info[i].frag_stride = + ALIGN(priv->frag_info[i].frag_size, + SMP_CACHE_BYTES); + buf_size += priv->frag_info[i].frag_size; + i++; + } + + priv->num_frags = i; + priv->rx_skb_size = eff_mtu; + priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc)); + + en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n", + eff_mtu, priv->num_frags); + for (i = 0; i < priv->num_frags; i++) { + en_err(priv, + " frag:%d - size:%d prefix:%d stride:%d\n", + i, + priv->frag_info[i].frag_size, + priv->frag_info[i].frag_prefix_size, + priv->frag_info[i].frag_stride); + } +} + +/* RSS related functions */ + +static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, + struct mlx4_en_rx_ring *ring, + enum mlx4_qp_state *state, + struct mlx4_qp *qp) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_qp_context *context; + int err = 0; + + context = kmalloc(sizeof(*context), GFP_KERNEL); + if (!context) + return -ENOMEM; + + err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL); + if (err) { + en_err(priv, "Failed to allocate qp #%x\n", qpn); + goto out; + } + qp->event = mlx4_en_sqp_event; + + memset(context, 0, sizeof *context); + mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, + qpn, ring->cqn, -1, context); + context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); + + /* Cancel FCS removal if FW allows */ + if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) { + context->param3 |= cpu_to_be32(1 << 29); + if (priv->dev->features & NETIF_F_RXFCS) + ring->fcs_del = 0; + else + ring->fcs_del = ETH_FCS_LEN; + } else + ring->fcs_del = 0; + + err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); + if (err) { + mlx4_qp_remove(mdev->dev, qp); + mlx4_qp_free(mdev->dev, qp); + } + mlx4_en_update_rx_prod_db(ring); +out: + kfree(context); + return err; +} + +int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv) +{ + int err; + u32 qpn; + + err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, + MLX4_RESERVE_A0_QP); + if (err) { + en_err(priv, "Failed reserving drop qpn\n"); + return err; + } + err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL); + if (err) { + en_err(priv, "Failed allocating drop qp\n"); + mlx4_qp_release_range(priv->mdev->dev, qpn, 1); + return err; + } + + return 0; +} + +void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv) +{ + u32 qpn; + + qpn = priv->drop_qp.qpn; + mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp); + mlx4_qp_free(priv->mdev->dev, &priv->drop_qp); + mlx4_qp_release_range(priv->mdev->dev, qpn, 1); +} + +/* Allocate rx qp's and configure them according to rss map */ +int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_rss_map *rss_map = &priv->rss_map; + struct mlx4_qp_context context; + struct mlx4_rss_context *rss_context; + int rss_rings; + void *ptr; + u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 | + MLX4_RSS_TCP_IPV6); + int i, qpn; + int err = 0; + int good_qps = 0; + + en_dbg(DRV, priv, "Configuring rss steering\n"); + err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num, + priv->rx_ring_num, + &rss_map->base_qpn, 0); + if (err) { + en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num); + return err; + } + + for (i = 0; i < priv->rx_ring_num; i++) { + qpn = rss_map->base_qpn + i; + err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i], + &rss_map->state[i], + &rss_map->qps[i]); + if (err) + goto rss_err; + + ++good_qps; + } + + /* Configure RSS indirection qp */ + err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp, GFP_KERNEL); + if (err) { + en_err(priv, "Failed to allocate RSS indirection QP\n"); + goto rss_err; + } + rss_map->indir_qp.event = mlx4_en_sqp_event; + mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, + priv->rx_ring[0]->cqn, -1, &context); + + if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) + rss_rings = priv->rx_ring_num; + else + rss_rings = priv->prof->rss_rings; + + ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path) + + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH; + rss_context = ptr; + rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 | + (rss_map->base_qpn)); + rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); + if (priv->mdev->profile.udp_rss) { + rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6; + rss_context->base_qpn_udp = rss_context->default_qpn; + } + + if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { + en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n"); + rss_mask |= MLX4_RSS_BY_INNER_HEADERS; + } + + rss_context->flags = rss_mask; + rss_context->hash_fn = MLX4_RSS_HASH_TOP; + if (priv->rss_hash_fn == ETH_RSS_HASH_XOR) { + rss_context->hash_fn = MLX4_RSS_HASH_XOR; + } else if (priv->rss_hash_fn == ETH_RSS_HASH_TOP) { + rss_context->hash_fn = MLX4_RSS_HASH_TOP; + memcpy(rss_context->rss_key, priv->rss_key, + MLX4_EN_RSS_KEY_SIZE); + netdev_rss_key_fill(rss_context->rss_key, + MLX4_EN_RSS_KEY_SIZE); + } else { + en_err(priv, "Unknown RSS hash function requested\n"); + err = -EINVAL; + goto indir_err; + } + err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, + &rss_map->indir_qp, &rss_map->indir_state); + if (err) + goto indir_err; + + return 0; + +indir_err: + mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, + MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); + mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); + mlx4_qp_free(mdev->dev, &rss_map->indir_qp); +rss_err: + for (i = 0; i < good_qps; i++) { + mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], + MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); + mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); + mlx4_qp_free(mdev->dev, &rss_map->qps[i]); + } + mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); + return err; +} + +void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_rss_map *rss_map = &priv->rss_map; + int i; + + mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state, + MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); + mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); + mlx4_qp_free(mdev->dev, &rss_map->indir_qp); + + for (i = 0; i < priv->rx_ring_num; i++) { + mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], + MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); + mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); + mlx4_qp_free(mdev->dev, &rss_map->qps[i]); + } + mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c new file mode 100644 index 000000000..b66e03d97 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include +#include +#include + +#include "mlx4_en.h" + + +static int mlx4_en_test_registers(struct mlx4_en_priv *priv) +{ + return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); +} + +static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv) +{ + struct sk_buff *skb; + struct ethhdr *ethh; + unsigned char *packet; + unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD; + unsigned int i; + int err; + + + /* build the pkt before xmit */ + skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN); + if (!skb) + return -ENOMEM; + + skb_reserve(skb, NET_IP_ALIGN); + + ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr)); + packet = (unsigned char *)skb_put(skb, packet_size); + memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN); + eth_zero_addr(ethh->h_source); + ethh->h_proto = htons(ETH_P_ARP); + skb_set_mac_header(skb, 0); + for (i = 0; i < packet_size; ++i) /* fill our packet */ + packet[i] = (unsigned char)(i & 0xff); + + /* xmit the pkt */ + err = mlx4_en_xmit(skb, priv->dev); + return err; +} + +static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) +{ + u32 loopback_ok = 0; + int i; + bool gro_enabled; + + priv->loopback_ok = 0; + priv->validate_loopback = 1; + gro_enabled = priv->dev->features & NETIF_F_GRO; + + mlx4_en_update_loopback_state(priv->dev, priv->dev->features); + priv->dev->features &= ~NETIF_F_GRO; + + /* xmit */ + if (mlx4_en_test_loopback_xmit(priv)) { + en_err(priv, "Transmitting loopback packet failed\n"); + goto mlx4_en_test_loopback_exit; + } + + /* polling for result */ + for (i = 0; i < MLX4_EN_LOOPBACK_RETRIES; ++i) { + msleep(MLX4_EN_LOOPBACK_TIMEOUT); + if (priv->loopback_ok) { + loopback_ok = 1; + break; + } + } + if (!loopback_ok) + en_err(priv, "Loopback packet didn't arrive\n"); + +mlx4_en_test_loopback_exit: + + priv->validate_loopback = 0; + + if (gro_enabled) + priv->dev->features |= NETIF_F_GRO; + + mlx4_en_update_loopback_state(priv->dev, priv->dev->features); + return !loopback_ok; +} + + +static int mlx4_en_test_link(struct mlx4_en_priv *priv) +{ + if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) + return -ENOMEM; + if (priv->port_state.link_state == 1) + return 0; + else + return 1; +} + +static int mlx4_en_test_speed(struct mlx4_en_priv *priv) +{ + + if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) + return -ENOMEM; + + /* The device supports 100M, 1G, 10G, 20G, 40G and 56G speed */ + if (priv->port_state.link_speed != SPEED_100 && + priv->port_state.link_speed != SPEED_1000 && + priv->port_state.link_speed != SPEED_10000 && + priv->port_state.link_speed != SPEED_20000 && + priv->port_state.link_speed != SPEED_40000 && + priv->port_state.link_speed != SPEED_56000) + return priv->port_state.link_speed; + + return 0; +} + + +void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int i, carrier_ok; + + memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); + + if (*flags & ETH_TEST_FL_OFFLINE) { + /* disable the interface */ + carrier_ok = netif_carrier_ok(dev); + + netif_carrier_off(dev); + /* Wait until all tx queues are empty. + * there should not be any additional incoming traffic + * since we turned the carrier off */ + msleep(200); + + if (priv->mdev->dev->caps.flags & + MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { + buf[3] = mlx4_en_test_registers(priv); + if (priv->port_up) + buf[4] = mlx4_en_test_loopback(priv); + } + + if (carrier_ok) + netif_carrier_on(dev); + + } + buf[0] = mlx4_test_interrupts(mdev->dev); + buf[1] = mlx4_en_test_link(priv); + buf[2] = mlx4_en_test_speed(priv); + + for (i = 0; i < MLX4_EN_NUM_SELF_TEST; i++) { + if (buf[i]) + *flags |= ETH_TEST_FL_FAILED; + } +} diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c new file mode 100644 index 000000000..c10d98f6a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -0,0 +1,1021 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mlx4_en.h" + +int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring **pring, u32 size, + u16 stride, int node, int queue_index) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_tx_ring *ring; + int tmp; + int err; + + ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); + if (!ring) { + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) { + en_err(priv, "Failed allocating TX ring\n"); + return -ENOMEM; + } + } + + ring->size = size; + ring->size_mask = size - 1; + ring->stride = stride; + ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS; + + tmp = size * sizeof(struct mlx4_en_tx_info); + ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node); + if (!ring->tx_info) { + ring->tx_info = vmalloc(tmp); + if (!ring->tx_info) { + err = -ENOMEM; + goto err_ring; + } + } + + en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", + ring->tx_info, tmp); + + ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node); + if (!ring->bounce_buf) { + ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); + if (!ring->bounce_buf) { + err = -ENOMEM; + goto err_info; + } + } + ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); + + /* Allocate HW buffers on provided NUMA node */ + set_dev_node(&mdev->dev->persist->pdev->dev, node); + err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, + 2 * PAGE_SIZE); + set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); + if (err) { + en_err(priv, "Failed allocating hwq resources\n"); + goto err_bounce; + } + + err = mlx4_en_map_buffer(&ring->wqres.buf); + if (err) { + en_err(priv, "Failed to map TX buffer\n"); + goto err_hwq_res; + } + + ring->buf = ring->wqres.buf.direct.buf; + + en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n", + ring, ring->buf, ring->size, ring->buf_size, + (unsigned long long) ring->wqres.buf.direct.map); + + err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, + MLX4_RESERVE_ETH_BF_QP); + if (err) { + en_err(priv, "failed reserving qp for TX ring\n"); + goto err_map; + } + + err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); + if (err) { + en_err(priv, "Failed allocating qp %d\n", ring->qpn); + goto err_reserve; + } + ring->qp.event = mlx4_en_sqp_event; + + err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); + if (err) { + en_dbg(DRV, priv, "working without blueflame (%d)\n", err); + ring->bf.uar = &mdev->priv_uar; + ring->bf.uar->map = mdev->uar_map; + ring->bf_enabled = false; + ring->bf_alloced = false; + priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME; + } else { + ring->bf_alloced = true; + ring->bf_enabled = !!(priv->pflags & + MLX4_EN_PRIV_FLAGS_BLUEFLAME); + } + + ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; + ring->queue_index = queue_index; + + if (queue_index < priv->num_tx_rings_p_up) + cpumask_set_cpu(cpumask_local_spread(queue_index, + priv->mdev->dev->numa_node), + &ring->affinity_mask); + + *pring = ring; + return 0; + +err_reserve: + mlx4_qp_release_range(mdev->dev, ring->qpn, 1); +err_map: + mlx4_en_unmap_buffer(&ring->wqres.buf); +err_hwq_res: + mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); +err_bounce: + kfree(ring->bounce_buf); + ring->bounce_buf = NULL; +err_info: + kvfree(ring->tx_info); + ring->tx_info = NULL; +err_ring: + kfree(ring); + *pring = NULL; + return err; +} + +void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring **pring) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_tx_ring *ring = *pring; + en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); + + if (ring->bf_alloced) + mlx4_bf_free(mdev->dev, &ring->bf); + mlx4_qp_remove(mdev->dev, &ring->qp); + mlx4_qp_free(mdev->dev, &ring->qp); + mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); + mlx4_en_unmap_buffer(&ring->wqres.buf); + mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); + kfree(ring->bounce_buf); + ring->bounce_buf = NULL; + kvfree(ring->tx_info); + ring->tx_info = NULL; + kfree(ring); + *pring = NULL; +} + +int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int cq, int user_prio) +{ + struct mlx4_en_dev *mdev = priv->mdev; + int err; + + ring->cqn = cq; + ring->prod = 0; + ring->cons = 0xffffffff; + ring->last_nr_txbb = 1; + memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); + memset(ring->buf, 0, ring->buf_size); + + ring->qp_state = MLX4_QP_STATE_RST; + ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8); + ring->mr_key = cpu_to_be32(mdev->mr.key); + + mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, + ring->cqn, user_prio, &ring->context); + if (ring->bf_alloced) + ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); + + err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, + &ring->qp, &ring->qp_state); + if (!cpumask_empty(&ring->affinity_mask)) + netif_set_xps_queue(priv->dev, &ring->affinity_mask, + ring->queue_index); + + return err; +} + +void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring) +{ + struct mlx4_en_dev *mdev = priv->mdev; + + mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, + MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); +} + +static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring) +{ + return ring->prod - ring->cons > ring->full_size; +} + +static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, int index, + u8 owner) +{ + __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT)); + struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; + struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; + void *end = ring->buf + ring->buf_size; + __be32 *ptr = (__be32 *)tx_desc; + int i; + + /* Optimize the common case when there are no wraparounds */ + if (likely((void *)tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { + /* Stamp the freed descriptor */ + for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; + i += STAMP_STRIDE) { + *ptr = stamp; + ptr += STAMP_DWORDS; + } + } else { + /* Stamp the freed descriptor */ + for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; + i += STAMP_STRIDE) { + *ptr = stamp; + ptr += STAMP_DWORDS; + if ((void *)ptr >= end) { + ptr = ring->buf; + stamp ^= cpu_to_be32(0x80000000); + } + } + } +} + + +static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u8 owner, u64 timestamp) +{ + struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; + struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; + struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset; + void *end = ring->buf + ring->buf_size; + struct sk_buff *skb = tx_info->skb; + int nr_maps = tx_info->nr_maps; + int i; + + /* We do not touch skb here, so prefetch skb->users location + * to speedup consume_skb() + */ + prefetchw(&skb->users); + + if (unlikely(timestamp)) { + struct skb_shared_hwtstamps hwts; + + mlx4_en_fill_hwtstamps(priv->mdev, &hwts, timestamp); + skb_tstamp_tx(skb, &hwts); + } + + /* Optimize the common case when there are no wraparounds */ + if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { + if (!tx_info->inl) { + if (tx_info->linear) + dma_unmap_single(priv->ddev, + tx_info->map0_dma, + tx_info->map0_byte_count, + PCI_DMA_TODEVICE); + else + dma_unmap_page(priv->ddev, + tx_info->map0_dma, + tx_info->map0_byte_count, + PCI_DMA_TODEVICE); + for (i = 1; i < nr_maps; i++) { + data++; + dma_unmap_page(priv->ddev, + (dma_addr_t)be64_to_cpu(data->addr), + be32_to_cpu(data->byte_count), + PCI_DMA_TODEVICE); + } + } + } else { + if (!tx_info->inl) { + if ((void *) data >= end) { + data = ring->buf + ((void *)data - end); + } + + if (tx_info->linear) + dma_unmap_single(priv->ddev, + tx_info->map0_dma, + tx_info->map0_byte_count, + PCI_DMA_TODEVICE); + else + dma_unmap_page(priv->ddev, + tx_info->map0_dma, + tx_info->map0_byte_count, + PCI_DMA_TODEVICE); + for (i = 1; i < nr_maps; i++) { + data++; + /* Check for wraparound before unmapping */ + if ((void *) data >= end) + data = ring->buf; + dma_unmap_page(priv->ddev, + (dma_addr_t)be64_to_cpu(data->addr), + be32_to_cpu(data->byte_count), + PCI_DMA_TODEVICE); + } + } + } + dev_consume_skb_any(skb); + return tx_info->nr_txbb; +} + + +int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int cnt = 0; + + /* Skip last polled descriptor */ + ring->cons += ring->last_nr_txbb; + en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", + ring->cons, ring->prod); + + if ((u32) (ring->prod - ring->cons) > ring->size) { + if (netif_msg_tx_err(priv)) + en_warn(priv, "Tx consumer passed producer!\n"); + return 0; + } + + while (ring->cons != ring->prod) { + ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, + ring->cons & ring->size_mask, + !!(ring->cons & ring->size), 0); + ring->cons += ring->last_nr_txbb; + cnt++; + } + + netdev_tx_reset_queue(ring->tx_queue); + + if (cnt) + en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); + + return cnt; +} + +static bool mlx4_en_process_tx_cq(struct net_device *dev, + struct mlx4_en_cq *cq) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_cq *mcq = &cq->mcq; + struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; + struct mlx4_cqe *cqe; + u16 index; + u16 new_index, ring_index, stamp_index; + u32 txbbs_skipped = 0; + u32 txbbs_stamp = 0; + u32 cons_index = mcq->cons_index; + int size = cq->size; + u32 size_mask = ring->size_mask; + struct mlx4_cqe *buf = cq->buf; + u32 packets = 0; + u32 bytes = 0; + int factor = priv->cqe_factor; + u64 timestamp = 0; + int done = 0; + int budget = priv->tx_work_limit; + u32 last_nr_txbb; + u32 ring_cons; + + if (!priv->port_up) + return true; + + netdev_txq_bql_complete_prefetchw(ring->tx_queue); + + index = cons_index & size_mask; + cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; + last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb); + ring_cons = ACCESS_ONCE(ring->cons); + ring_index = ring_cons & size_mask; + stamp_index = ring_index; + + /* Process all completed CQEs */ + while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, + cons_index & size) && (done < budget)) { + /* + * make sure we read the CQE after we read the + * ownership bit + */ + dma_rmb(); + + if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == + MLX4_CQE_OPCODE_ERROR)) { + struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe; + + en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n", + cqe_err->vendor_err_syndrome, + cqe_err->syndrome); + } + + /* Skip over last polled CQE */ + new_index = be16_to_cpu(cqe->wqe_index) & size_mask; + + do { + txbbs_skipped += last_nr_txbb; + ring_index = (ring_index + last_nr_txbb) & size_mask; + if (ring->tx_info[ring_index].ts_requested) + timestamp = mlx4_en_get_cqe_ts(cqe); + + /* free next descriptor */ + last_nr_txbb = mlx4_en_free_tx_desc( + priv, ring, ring_index, + !!((ring_cons + txbbs_skipped) & + ring->size), timestamp); + + mlx4_en_stamp_wqe(priv, ring, stamp_index, + !!((ring_cons + txbbs_stamp) & + ring->size)); + stamp_index = ring_index; + txbbs_stamp = txbbs_skipped; + packets++; + bytes += ring->tx_info[ring_index].nr_bytes; + } while ((++done < budget) && (ring_index != new_index)); + + ++cons_index; + index = cons_index & size_mask; + cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; + } + + + /* + * To prevent CQ overflow we first update CQ consumer and only then + * the ring consumer. + */ + mcq->cons_index = cons_index; + mlx4_cq_set_ci(mcq); + wmb(); + + /* we want to dirty this cache line once */ + ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb; + ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped; + + netdev_tx_completed_queue(ring->tx_queue, packets, bytes); + + /* Wakeup Tx queue if this stopped, and ring is not full. + */ + if (netif_tx_queue_stopped(ring->tx_queue) && + !mlx4_en_is_tx_ring_full(ring)) { + netif_tx_wake_queue(ring->tx_queue); + ring->wake_queue++; + } + return done < budget; +} + +void mlx4_en_tx_irq(struct mlx4_cq *mcq) +{ + struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); + struct mlx4_en_priv *priv = netdev_priv(cq->dev); + + if (likely(priv->port_up)) + napi_schedule_irqoff(&cq->napi); + else + mlx4_en_arm_cq(priv, cq); +} + +/* TX CQ polling - called by NAPI */ +int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget) +{ + struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); + struct net_device *dev = cq->dev; + struct mlx4_en_priv *priv = netdev_priv(dev); + int clean_complete; + + clean_complete = mlx4_en_process_tx_cq(dev, cq); + if (!clean_complete) + return budget; + + napi_complete(napi); + mlx4_en_arm_cq(priv, cq); + + return 0; +} + +static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + u32 index, + unsigned int desc_size) +{ + u32 copy = (ring->size - index) * TXBB_SIZE; + int i; + + for (i = desc_size - copy - 4; i >= 0; i -= 4) { + if ((i & (TXBB_SIZE - 1)) == 0) + wmb(); + + *((u32 *) (ring->buf + i)) = + *((u32 *) (ring->bounce_buf + copy + i)); + } + + for (i = copy - 4; i >= 4 ; i -= 4) { + if ((i & (TXBB_SIZE - 1)) == 0) + wmb(); + + *((u32 *) (ring->buf + index * TXBB_SIZE + i)) = + *((u32 *) (ring->bounce_buf + i)); + } + + /* Return real descriptor location */ + return ring->buf + index * TXBB_SIZE; +} + +/* Decide if skb can be inlined in tx descriptor to avoid dma mapping + * + * It seems strange we do not simply use skb_copy_bits(). + * This would allow to inline all skbs iff skb->len <= inline_thold + * + * Note that caller already checked skb was not a gso packet + */ +static bool is_inline(int inline_thold, const struct sk_buff *skb, + const struct skb_shared_info *shinfo, + void **pfrag) +{ + void *ptr; + + if (skb->len > inline_thold || !inline_thold) + return false; + + if (shinfo->nr_frags == 1) { + ptr = skb_frag_address_safe(&shinfo->frags[0]); + if (unlikely(!ptr)) + return false; + *pfrag = ptr; + return true; + } + if (shinfo->nr_frags) + return false; + return true; +} + +static int inline_size(const struct sk_buff *skb) +{ + if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg) + <= MLX4_INLINE_ALIGN) + return ALIGN(skb->len + CTRL_SIZE + + sizeof(struct mlx4_wqe_inline_seg), 16); + else + return ALIGN(skb->len + CTRL_SIZE + 2 * + sizeof(struct mlx4_wqe_inline_seg), 16); +} + +static int get_real_size(const struct sk_buff *skb, + const struct skb_shared_info *shinfo, + struct net_device *dev, + int *lso_header_size, + bool *inline_ok, + void **pfrag) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int real_size; + + if (shinfo->gso_size) { + *inline_ok = false; + if (skb->encapsulation) + *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb); + else + *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb); + real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE + + ALIGN(*lso_header_size + 4, DS_SIZE); + if (unlikely(*lso_header_size != skb_headlen(skb))) { + /* We add a segment for the skb linear buffer only if + * it contains data */ + if (*lso_header_size < skb_headlen(skb)) + real_size += DS_SIZE; + else { + if (netif_msg_tx_err(priv)) + en_warn(priv, "Non-linear headers\n"); + return 0; + } + } + } else { + *lso_header_size = 0; + *inline_ok = is_inline(priv->prof->inline_thold, skb, + shinfo, pfrag); + + if (*inline_ok) + real_size = inline_size(skb); + else + real_size = CTRL_SIZE + + (shinfo->nr_frags + 1) * DS_SIZE; + } + + return real_size; +} + +static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, + const struct sk_buff *skb, + const struct skb_shared_info *shinfo, + int real_size, u16 *vlan_tag, + int tx_ind, void *fragptr) +{ + struct mlx4_wqe_inline_seg *inl = &tx_desc->inl; + int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; + unsigned int hlen = skb_headlen(skb); + + if (skb->len <= spc) { + if (likely(skb->len >= MIN_PKT_LEN)) { + inl->byte_count = cpu_to_be32(1 << 31 | skb->len); + } else { + inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN); + memset(((void *)(inl + 1)) + skb->len, 0, + MIN_PKT_LEN - skb->len); + } + skb_copy_from_linear_data(skb, inl + 1, hlen); + if (shinfo->nr_frags) + memcpy(((void *)(inl + 1)) + hlen, fragptr, + skb_frag_size(&shinfo->frags[0])); + + } else { + inl->byte_count = cpu_to_be32(1 << 31 | spc); + if (hlen <= spc) { + skb_copy_from_linear_data(skb, inl + 1, hlen); + if (hlen < spc) { + memcpy(((void *)(inl + 1)) + hlen, + fragptr, spc - hlen); + fragptr += spc - hlen; + } + inl = (void *) (inl + 1) + spc; + memcpy(((void *)(inl + 1)), fragptr, skb->len - spc); + } else { + skb_copy_from_linear_data(skb, inl + 1, spc); + inl = (void *) (inl + 1) + spc; + skb_copy_from_linear_data_offset(skb, spc, inl + 1, + hlen - spc); + if (shinfo->nr_frags) + memcpy(((void *)(inl + 1)) + hlen - spc, + fragptr, + skb_frag_size(&shinfo->frags[0])); + } + + dma_wmb(); + inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc)); + } +} + +u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + u16 rings_p_up = priv->num_tx_rings_p_up; + u8 up = 0; + + if (dev->num_tc) + return skb_tx_hash(dev, skb); + + if (skb_vlan_tag_present(skb)) + up = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT; + + return fallback(dev, skb) % rings_p_up + up * rings_p_up; +} + +static void mlx4_bf_copy(void __iomem *dst, const void *src, + unsigned int bytecnt) +{ + __iowrite64_copy(dst, src, bytecnt / 8); +} + +netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct skb_shared_info *shinfo = skb_shinfo(skb); + struct mlx4_en_priv *priv = netdev_priv(dev); + struct device *ddev = priv->ddev; + struct mlx4_en_tx_ring *ring; + struct mlx4_en_tx_desc *tx_desc; + struct mlx4_wqe_data_seg *data; + struct mlx4_en_tx_info *tx_info; + int tx_ind = 0; + int nr_txbb; + int desc_size; + int real_size; + u32 index, bf_index; + __be32 op_own; + u16 vlan_tag = 0; + int i_frag; + int lso_header_size; + void *fragptr = NULL; + bool bounce = false; + bool send_doorbell; + bool stop_queue; + bool inline_ok; + u32 ring_cons; + + if (!priv->port_up) + goto tx_drop; + + tx_ind = skb_get_queue_mapping(skb); + ring = priv->tx_ring[tx_ind]; + + /* fetch ring->cons far ahead before needing it to avoid stall */ + ring_cons = ACCESS_ONCE(ring->cons); + + real_size = get_real_size(skb, shinfo, dev, &lso_header_size, + &inline_ok, &fragptr); + if (unlikely(!real_size)) + goto tx_drop; + + /* Align descriptor to TXBB size */ + desc_size = ALIGN(real_size, TXBB_SIZE); + nr_txbb = desc_size / TXBB_SIZE; + if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { + if (netif_msg_tx_err(priv)) + en_warn(priv, "Oversized header or SG list\n"); + goto tx_drop; + } + + if (skb_vlan_tag_present(skb)) + vlan_tag = skb_vlan_tag_get(skb); + + + netdev_txq_bql_enqueue_prefetchw(ring->tx_queue); + + /* Track current inflight packets for performance analysis */ + AVG_PERF_COUNTER(priv->pstats.inflight_avg, + (u32)(ring->prod - ring_cons - 1)); + + /* Packet is good - grab an index and transmit it */ + index = ring->prod & ring->size_mask; + bf_index = ring->prod; + + /* See if we have enough space for whole descriptor TXBB for setting + * SW ownership on next descriptor; if not, use a bounce buffer. */ + if (likely(index + nr_txbb <= ring->size)) + tx_desc = ring->buf + index * TXBB_SIZE; + else { + tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; + bounce = true; + } + + /* Save skb in tx_info ring */ + tx_info = &ring->tx_info[index]; + tx_info->skb = skb; + tx_info->nr_txbb = nr_txbb; + + data = &tx_desc->data; + if (lso_header_size) + data = ((void *)&tx_desc->lso + ALIGN(lso_header_size + 4, + DS_SIZE)); + + /* valid only for none inline segments */ + tx_info->data_offset = (void *)data - (void *)tx_desc; + + tx_info->inl = inline_ok; + + tx_info->linear = (lso_header_size < skb_headlen(skb) && + !inline_ok) ? 1 : 0; + + tx_info->nr_maps = shinfo->nr_frags + tx_info->linear; + data += tx_info->nr_maps - 1; + + if (!tx_info->inl) { + dma_addr_t dma = 0; + u32 byte_count = 0; + + /* Map fragments if any */ + for (i_frag = shinfo->nr_frags - 1; i_frag >= 0; i_frag--) { + const struct skb_frag_struct *frag; + + frag = &shinfo->frags[i_frag]; + byte_count = skb_frag_size(frag); + dma = skb_frag_dma_map(ddev, frag, + 0, byte_count, + DMA_TO_DEVICE); + if (dma_mapping_error(ddev, dma)) + goto tx_drop_unmap; + + data->addr = cpu_to_be64(dma); + data->lkey = ring->mr_key; + dma_wmb(); + data->byte_count = cpu_to_be32(byte_count); + --data; + } + + /* Map linear part if needed */ + if (tx_info->linear) { + byte_count = skb_headlen(skb) - lso_header_size; + + dma = dma_map_single(ddev, skb->data + + lso_header_size, byte_count, + PCI_DMA_TODEVICE); + if (dma_mapping_error(ddev, dma)) + goto tx_drop_unmap; + + data->addr = cpu_to_be64(dma); + data->lkey = ring->mr_key; + dma_wmb(); + data->byte_count = cpu_to_be32(byte_count); + } + /* tx completion can avoid cache line miss for common cases */ + tx_info->map0_dma = dma; + tx_info->map0_byte_count = byte_count; + } + + /* + * For timestamping add flag to skb_shinfo and + * set flag for further reference + */ + tx_info->ts_requested = 0; + if (unlikely(ring->hwtstamp_tx_type == HWTSTAMP_TX_ON && + shinfo->tx_flags & SKBTX_HW_TSTAMP)) { + shinfo->tx_flags |= SKBTX_IN_PROGRESS; + tx_info->ts_requested = 1; + } + + /* Prepare ctrl segement apart opcode+ownership, which depends on + * whether LSO is used */ + tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + if (!skb->encapsulation) + tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | + MLX4_WQE_CTRL_TCP_UDP_CSUM); + else + tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM); + ring->tx_csum++; + } + + if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) { + struct ethhdr *ethh; + + /* Copy dst mac address to wqe. This allows loopback in eSwitch, + * so that VFs and PF can communicate with each other + */ + ethh = (struct ethhdr *)skb->data; + tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest); + tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2)); + } + + /* Handle LSO (TSO) packets */ + if (lso_header_size) { + int i; + + /* Mark opcode as LSO */ + op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) | + ((ring->prod & ring->size) ? + cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); + + /* Fill in the LSO prefix */ + tx_desc->lso.mss_hdr_size = cpu_to_be32( + shinfo->gso_size << 16 | lso_header_size); + + /* Copy headers; + * note that we already verified that it is linear */ + memcpy(tx_desc->lso.header, skb->data, lso_header_size); + + ring->tso_packets++; + + i = ((skb->len - lso_header_size) / shinfo->gso_size) + + !!((skb->len - lso_header_size) % shinfo->gso_size); + tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size; + ring->packets += i; + } else { + /* Normal (Non LSO) packet */ + op_own = cpu_to_be32(MLX4_OPCODE_SEND) | + ((ring->prod & ring->size) ? + cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); + tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); + ring->packets++; + } + ring->bytes += tx_info->nr_bytes; + netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes); + AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); + + if (tx_info->inl) + build_inline_wqe(tx_desc, skb, shinfo, real_size, &vlan_tag, + tx_ind, fragptr); + + if (skb->encapsulation) { + struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb); + if (ipv4->protocol == IPPROTO_TCP || ipv4->protocol == IPPROTO_UDP) + op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP | MLX4_WQE_CTRL_ILP); + else + op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP); + } + + ring->prod += nr_txbb; + + /* If we used a bounce buffer then copy descriptor back into place */ + if (unlikely(bounce)) + tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); + + skb_tx_timestamp(skb); + + /* Check available TXBBs And 2K spare for prefetch */ + stop_queue = mlx4_en_is_tx_ring_full(ring); + if (unlikely(stop_queue)) { + netif_tx_stop_queue(ring->tx_queue); + ring->queue_stopped++; + } + send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue); + + real_size = (real_size / 16) & 0x3f; + + if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && + !skb_vlan_tag_present(skb) && send_doorbell) { + tx_desc->ctrl.bf_qpn = ring->doorbell_qpn | + cpu_to_be32(real_size); + + op_own |= htonl((bf_index & 0xffff) << 8); + /* Ensure new descriptor hits memory + * before setting ownership of this descriptor to HW + */ + dma_wmb(); + tx_desc->ctrl.owner_opcode = op_own; + + wmb(); + + mlx4_bf_copy(ring->bf.reg + ring->bf.offset, &tx_desc->ctrl, + desc_size); + + wmb(); + + ring->bf.offset ^= ring->bf.buf_size; + } else { + tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); + tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * + !!skb_vlan_tag_present(skb); + tx_desc->ctrl.fence_size = real_size; + + /* Ensure new descriptor hits memory + * before setting ownership of this descriptor to HW + */ + dma_wmb(); + tx_desc->ctrl.owner_opcode = op_own; + if (send_doorbell) { + wmb(); + /* Since there is no iowrite*_native() that writes the + * value as is, without byteswapping - using the one + * the doesn't do byteswapping in the relevant arch + * endianness. + */ +#if defined(__LITTLE_ENDIAN) + iowrite32( +#else + iowrite32be( +#endif + ring->doorbell_qpn, + ring->bf.uar->map + MLX4_SEND_DOORBELL); + } else { + ring->xmit_more++; + } + } + + if (unlikely(stop_queue)) { + /* If queue was emptied after the if (stop_queue) , and before + * the netif_tx_stop_queue() - need to wake the queue, + * or else it will remain stopped forever. + * Need a memory barrier to make sure ring->cons was not + * updated before queue was stopped. + */ + smp_rmb(); + + ring_cons = ACCESS_ONCE(ring->cons); + if (unlikely(!mlx4_en_is_tx_ring_full(ring))) { + netif_tx_wake_queue(ring->tx_queue); + ring->wake_queue++; + } + } + return NETDEV_TX_OK; + +tx_drop_unmap: + en_err(priv, "DMA mapping error\n"); + + while (++i_frag < shinfo->nr_frags) { + ++data; + dma_unmap_page(ddev, (dma_addr_t) be64_to_cpu(data->addr), + be32_to_cpu(data->byte_count), + PCI_DMA_TODEVICE); + } + +tx_drop: + dev_kfree_skb_any(skb); + priv->stats.tx_dropped++; + return NETDEV_TX_OK; +} + diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c new file mode 100644 index 000000000..2619c9fbf --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -0,0 +1,1441 @@ +/* + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include "mlx4.h" +#include "fw.h" + +enum { + MLX4_IRQNAME_SIZE = 32 +}; + +enum { + MLX4_NUM_ASYNC_EQE = 0x100, + MLX4_NUM_SPARE_EQE = 0x80, + MLX4_EQ_ENTRY_SIZE = 0x20 +}; + +#define MLX4_EQ_STATUS_OK ( 0 << 28) +#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) +#define MLX4_EQ_OWNER_SW ( 0 << 24) +#define MLX4_EQ_OWNER_HW ( 1 << 24) +#define MLX4_EQ_FLAG_EC ( 1 << 18) +#define MLX4_EQ_FLAG_OI ( 1 << 17) +#define MLX4_EQ_STATE_ARMED ( 9 << 8) +#define MLX4_EQ_STATE_FIRED (10 << 8) +#define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8) + +#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \ + (1ull << MLX4_EVENT_TYPE_COMM_EST) | \ + (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \ + (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \ + (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \ + (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \ + (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ + (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ + (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ + (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ + (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ + (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ + (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ + (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ + (1ull << MLX4_EVENT_TYPE_CMD) | \ + (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \ + (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \ + (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ + (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) + +static u64 get_async_ev_mask(struct mlx4_dev *dev) +{ + u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK; + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) + async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT); + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) + async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT); + + return async_ev_mask; +} + +static void eq_set_ci(struct mlx4_eq *eq, int req_not) +{ + __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | + req_not << 31), + eq->doorbell); + /* We still want ordering, just not swabbing, so add a barrier */ + mb(); +} + +static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor, + u8 eqe_size) +{ + /* (entry & (eq->nent - 1)) gives us a cyclic array */ + unsigned long offset = (entry & (eq->nent - 1)) * eqe_size; + /* CX3 is capable of extending the EQE from 32 to 64 bytes with + * strides of 64B,128B and 256B. + * When 64B EQE is used, the first (in the lower addresses) + * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes + * contain the legacy EQE information. + * In all other cases, the first 32B contains the legacy EQE info. + */ + return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE; +} + +static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor, u8 size) +{ + struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size); + return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; +} + +static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq) +{ + struct mlx4_eqe *eqe = + &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)]; + return (!!(eqe->owner & 0x80) ^ + !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ? + eqe : NULL; +} + +void mlx4_gen_slave_eqe(struct work_struct *work) +{ + struct mlx4_mfunc_master_ctx *master = + container_of(work, struct mlx4_mfunc_master_ctx, + slave_event_work); + struct mlx4_mfunc *mfunc = + container_of(master, struct mlx4_mfunc, master); + struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc); + struct mlx4_dev *dev = &priv->dev; + struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq; + struct mlx4_eqe *eqe; + u8 slave; + int i; + + for (eqe = next_slave_event_eqe(slave_eq); eqe; + eqe = next_slave_event_eqe(slave_eq)) { + slave = eqe->slave_id; + + /* All active slaves need to receive the event */ + if (slave == ALL_SLAVES) { + for (i = 0; i <= dev->persist->num_vfs; i++) { + if (mlx4_GEN_EQE(dev, i, eqe)) + mlx4_warn(dev, "Failed to generate event for slave %d\n", + i); + } + } else { + if (mlx4_GEN_EQE(dev, slave, eqe)) + mlx4_warn(dev, "Failed to generate event for slave %d\n", + slave); + } + ++slave_eq->cons; + } +} + + +static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq; + struct mlx4_eqe *s_eqe; + unsigned long flags; + + spin_lock_irqsave(&slave_eq->event_lock, flags); + s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)]; + if ((!!(s_eqe->owner & 0x80)) ^ + (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) { + mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n", + slave); + spin_unlock_irqrestore(&slave_eq->event_lock, flags); + return; + } + + memcpy(s_eqe, eqe, dev->caps.eqe_size - 1); + s_eqe->slave_id = slave; + /* ensure all information is written before setting the ownersip bit */ + dma_wmb(); + s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80; + ++slave_eq->prod; + + queue_work(priv->mfunc.master.comm_wq, + &priv->mfunc.master.slave_event_work); + spin_unlock_irqrestore(&slave_eq->event_lock, flags); +} + +static void mlx4_slave_event(struct mlx4_dev *dev, int slave, + struct mlx4_eqe *eqe) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (slave < 0 || slave > dev->persist->num_vfs || + slave == dev->caps.function || + !priv->mfunc.master.slave_state[slave].active) + return; + + slave_event(dev, slave, eqe); +} + +int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port) +{ + struct mlx4_eqe eqe; + + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave]; + + if (!s_slave->active) + return 0; + + memset(&eqe, 0, sizeof eqe); + + eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; + eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE; + eqe.event.port_mgmt_change.port = port; + + return mlx4_GEN_EQE(dev, slave, &eqe); +} +EXPORT_SYMBOL(mlx4_gen_pkey_eqe); + +int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port) +{ + struct mlx4_eqe eqe; + + /*don't send if we don't have the that slave */ + if (dev->persist->num_vfs < slave) + return 0; + memset(&eqe, 0, sizeof eqe); + + eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; + eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO; + eqe.event.port_mgmt_change.port = port; + + return mlx4_GEN_EQE(dev, slave, &eqe); +} +EXPORT_SYMBOL(mlx4_gen_guid_change_eqe); + +int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, + u8 port_subtype_change) +{ + struct mlx4_eqe eqe; + + /*don't send if we don't have the that slave */ + if (dev->persist->num_vfs < slave) + return 0; + memset(&eqe, 0, sizeof eqe); + + eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE; + eqe.subtype = port_subtype_change; + eqe.event.port_change.port = cpu_to_be32(port << 28); + + mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__, + port_subtype_change, slave, port); + return mlx4_GEN_EQE(dev, slave, &eqe); +} +EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe); + +enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + + if (slave >= dev->num_slaves || port > dev->caps.num_ports || + port <= 0 || !test_bit(port - 1, actv_ports.ports)) { + pr_err("%s: Error: asking for slave:%d, port:%d\n", + __func__, slave, port); + return SLAVE_PORT_DOWN; + } + return s_state[slave].port_state[port]; +} +EXPORT_SYMBOL(mlx4_get_slave_port_state); + +static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, + enum slave_port_state state) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + + if (slave >= dev->num_slaves || port > dev->caps.num_ports || + port <= 0 || !test_bit(port - 1, actv_ports.ports)) { + pr_err("%s: Error: asking for slave:%d, port:%d\n", + __func__, slave, port); + return -1; + } + s_state[slave].port_state[port] = state; + + return 0; +} + +static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event) +{ + int i; + enum slave_port_gen_event gen_event; + struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev, + port); + + for (i = 0; i < dev->persist->num_vfs + 1; i++) + if (test_bit(i, slaves_pport.slaves)) + set_and_calc_slave_port_state(dev, i, port, + event, &gen_event); +} +/************************************************************************** + The function get as input the new event to that port, + and according to the prev state change the slave's port state. + The events are: + MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, + MLX4_PORT_STATE_DEV_EVENT_PORT_UP + MLX4_PORT_STATE_IB_EVENT_GID_VALID + MLX4_PORT_STATE_IB_EVENT_GID_INVALID +***************************************************************************/ +int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, + u8 port, int event, + enum slave_port_gen_event *gen_event) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *ctx = NULL; + unsigned long flags; + int ret = -1; + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + enum slave_port_state cur_state = + mlx4_get_slave_port_state(dev, slave, port); + + *gen_event = SLAVE_PORT_GEN_EVENT_NONE; + + if (slave >= dev->num_slaves || port > dev->caps.num_ports || + port <= 0 || !test_bit(port - 1, actv_ports.ports)) { + pr_err("%s: Error: asking for slave:%d, port:%d\n", + __func__, slave, port); + return ret; + } + + ctx = &priv->mfunc.master.slave_state[slave]; + spin_lock_irqsave(&ctx->lock, flags); + + switch (cur_state) { + case SLAVE_PORT_DOWN: + if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event) + mlx4_set_slave_port_state(dev, slave, port, + SLAVE_PENDING_UP); + break; + case SLAVE_PENDING_UP: + if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) + mlx4_set_slave_port_state(dev, slave, port, + SLAVE_PORT_DOWN); + else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) { + mlx4_set_slave_port_state(dev, slave, port, + SLAVE_PORT_UP); + *gen_event = SLAVE_PORT_GEN_EVENT_UP; + } + break; + case SLAVE_PORT_UP: + if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) { + mlx4_set_slave_port_state(dev, slave, port, + SLAVE_PORT_DOWN); + *gen_event = SLAVE_PORT_GEN_EVENT_DOWN; + } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID == + event) { + mlx4_set_slave_port_state(dev, slave, port, + SLAVE_PENDING_UP); + *gen_event = SLAVE_PORT_GEN_EVENT_DOWN; + } + break; + default: + pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n", + __func__, slave, port); + goto out; + } + ret = mlx4_get_slave_port_state(dev, slave, port); + +out: + spin_unlock_irqrestore(&ctx->lock, flags); + return ret; +} + +EXPORT_SYMBOL(set_and_calc_slave_port_state); + +int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr) +{ + struct mlx4_eqe eqe; + + memset(&eqe, 0, sizeof eqe); + + eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; + eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO; + eqe.event.port_mgmt_change.port = port; + eqe.event.port_mgmt_change.params.port_info.changed_attr = + cpu_to_be32((u32) attr); + + slave_event(dev, ALL_SLAVES, &eqe); + return 0; +} +EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev); + +void mlx4_master_handle_slave_flr(struct work_struct *work) +{ + struct mlx4_mfunc_master_ctx *master = + container_of(work, struct mlx4_mfunc_master_ctx, + slave_flr_event_work); + struct mlx4_mfunc *mfunc = + container_of(master, struct mlx4_mfunc, master); + struct mlx4_priv *priv = + container_of(mfunc, struct mlx4_priv, mfunc); + struct mlx4_dev *dev = &priv->dev; + struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; + int i; + int err; + unsigned long flags; + + mlx4_dbg(dev, "mlx4_handle_slave_flr\n"); + + for (i = 0 ; i < dev->num_slaves; i++) { + + if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) { + mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n", + i); + /* In case of 'Reset flow' FLR can be generated for + * a slave before mlx4_load_one is done. + * make sure interface is up before trying to delete + * slave resources which weren't allocated yet. + */ + if (dev->persist->interface_state & + MLX4_INTERFACE_STATE_UP) + mlx4_delete_all_resources_for_slave(dev, i); + /*return the slave to running mode*/ + spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); + slave_state[i].last_cmd = MLX4_COMM_CMD_RESET; + slave_state[i].is_slave_going_down = 0; + spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); + /*notify the FW:*/ + err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n", + i); + } + } +} + +static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_eqe *eqe; + int cqn = -1; + int eqes_found = 0; + int set_ci = 0; + int port; + int slave = 0; + int ret; + u32 flr_slave; + u8 update_slave_state; + int i; + enum slave_port_gen_event gen_event; + unsigned long flags; + struct mlx4_vport_state *s_info; + int eqe_size = dev->caps.eqe_size; + + while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor, eqe_size))) { + /* + * Make sure we read EQ entry contents after we've + * checked the ownership bit. + */ + dma_rmb(); + + switch (eqe->type) { + case MLX4_EVENT_TYPE_COMP: + cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; + mlx4_cq_completion(dev, cqn); + break; + + case MLX4_EVENT_TYPE_PATH_MIG: + case MLX4_EVENT_TYPE_COMM_EST: + case MLX4_EVENT_TYPE_SQ_DRAINED: + case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: + case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: + case MLX4_EVENT_TYPE_PATH_MIG_FAILED: + case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: + mlx4_dbg(dev, "event %d arrived\n", eqe->type); + if (mlx4_is_master(dev)) { + /* forward only to slave owning the QP */ + ret = mlx4_get_slave_from_resource_id(dev, + RES_QP, + be32_to_cpu(eqe->event.qp.qpn) + & 0xffffff, &slave); + if (ret && ret != -ENOENT) { + mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", + eqe->type, eqe->subtype, + eq->eqn, eq->cons_index, ret); + break; + } + + if (!ret && slave != dev->caps.function) { + mlx4_slave_event(dev, slave, eqe); + break; + } + + } + mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & + 0xffffff, eqe->type); + break; + + case MLX4_EVENT_TYPE_SRQ_LIMIT: + mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", + __func__); + case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: + if (mlx4_is_master(dev)) { + /* forward only to slave owning the SRQ */ + ret = mlx4_get_slave_from_resource_id(dev, + RES_SRQ, + be32_to_cpu(eqe->event.srq.srqn) + & 0xffffff, + &slave); + if (ret && ret != -ENOENT) { + mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", + eqe->type, eqe->subtype, + eq->eqn, eq->cons_index, ret); + break; + } + mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", + __func__, slave, + be32_to_cpu(eqe->event.srq.srqn), + eqe->type, eqe->subtype); + + if (!ret && slave != dev->caps.function) { + mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", + __func__, eqe->type, + eqe->subtype, slave); + mlx4_slave_event(dev, slave, eqe); + break; + } + } + mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & + 0xffffff, eqe->type); + break; + + case MLX4_EVENT_TYPE_CMD: + mlx4_cmd_event(dev, + be16_to_cpu(eqe->event.cmd.token), + eqe->event.cmd.status, + be64_to_cpu(eqe->event.cmd.out_param)); + break; + + case MLX4_EVENT_TYPE_PORT_CHANGE: { + struct mlx4_slaves_pport slaves_port; + port = be32_to_cpu(eqe->event.port_change.port) >> 28; + slaves_port = mlx4_phys_to_slaves_pport(dev, port); + if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, + port); + mlx4_priv(dev)->sense.do_sense_port[port] = 1; + if (!mlx4_is_master(dev)) + break; + for (i = 0; i < dev->persist->num_vfs + 1; + i++) { + if (!test_bit(i, slaves_port.slaves)) + continue; + if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { + if (i == mlx4_master_func_num(dev)) + continue; + mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n", + __func__, i, port); + s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; + if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { + eqe->event.port_change.port = + cpu_to_be32( + (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) + | (mlx4_phys_to_slave_port(dev, i, port) << 28)); + mlx4_slave_event(dev, i, eqe); + } + } else { /* IB port */ + set_and_calc_slave_port_state(dev, i, port, + MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, + &gen_event); + /*we can be in pending state, then do not send port_down event*/ + if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) { + if (i == mlx4_master_func_num(dev)) + continue; + mlx4_slave_event(dev, i, eqe); + } + } + } + } else { + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, port); + + mlx4_priv(dev)->sense.do_sense_port[port] = 0; + + if (!mlx4_is_master(dev)) + break; + if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) + for (i = 0; + i < dev->persist->num_vfs + 1; + i++) { + if (!test_bit(i, slaves_port.slaves)) + continue; + if (i == mlx4_master_func_num(dev)) + continue; + s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; + if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { + eqe->event.port_change.port = + cpu_to_be32( + (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) + | (mlx4_phys_to_slave_port(dev, i, port) << 28)); + mlx4_slave_event(dev, i, eqe); + } + } + else /* IB port */ + /* port-up event will be sent to a slave when the + * slave's alias-guid is set. This is done in alias_GUID.c + */ + set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP); + } + break; + } + + case MLX4_EVENT_TYPE_CQ_ERROR: + mlx4_warn(dev, "CQ %s on CQN %06x\n", + eqe->event.cq_err.syndrome == 1 ? + "overrun" : "access violation", + be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); + if (mlx4_is_master(dev)) { + ret = mlx4_get_slave_from_resource_id(dev, + RES_CQ, + be32_to_cpu(eqe->event.cq_err.cqn) + & 0xffffff, &slave); + if (ret && ret != -ENOENT) { + mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", + eqe->type, eqe->subtype, + eq->eqn, eq->cons_index, ret); + break; + } + + if (!ret && slave != dev->caps.function) { + mlx4_slave_event(dev, slave, eqe); + break; + } + } + mlx4_cq_event(dev, + be32_to_cpu(eqe->event.cq_err.cqn) + & 0xffffff, + eqe->type); + break; + + case MLX4_EVENT_TYPE_EQ_OVERFLOW: + mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); + break; + + case MLX4_EVENT_TYPE_OP_REQUIRED: + atomic_inc(&priv->opreq_count); + /* FW commands can't be executed from interrupt context + * working in deferred task + */ + queue_work(mlx4_wq, &priv->opreq_task); + break; + + case MLX4_EVENT_TYPE_COMM_CHANNEL: + if (!mlx4_is_master(dev)) { + mlx4_warn(dev, "Received comm channel event for non master device\n"); + break; + } + memcpy(&priv->mfunc.master.comm_arm_bit_vector, + eqe->event.comm_channel_arm.bit_vec, + sizeof eqe->event.comm_channel_arm.bit_vec); + queue_work(priv->mfunc.master.comm_wq, + &priv->mfunc.master.comm_work); + break; + + case MLX4_EVENT_TYPE_FLR_EVENT: + flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id); + if (!mlx4_is_master(dev)) { + mlx4_warn(dev, "Non-master function received FLR event\n"); + break; + } + + mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave); + + if (flr_slave >= dev->num_slaves) { + mlx4_warn(dev, + "Got FLR for unknown function: %d\n", + flr_slave); + update_slave_state = 0; + } else + update_slave_state = 1; + + spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); + if (update_slave_state) { + priv->mfunc.master.slave_state[flr_slave].active = false; + priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR; + priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1; + } + spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, + flr_slave); + queue_work(priv->mfunc.master.comm_wq, + &priv->mfunc.master.slave_flr_event_work); + break; + + case MLX4_EVENT_TYPE_FATAL_WARNING: + if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) { + if (mlx4_is_master(dev)) + for (i = 0; i < dev->num_slaves; i++) { + mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n", + __func__, i); + if (i == dev->caps.function) + continue; + mlx4_slave_event(dev, i, eqe); + } + mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n", + be16_to_cpu(eqe->event.warming.warning_threshold), + be16_to_cpu(eqe->event.warming.current_temperature)); + } else + mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n", + eqe->type, eqe->subtype, eq->eqn, + eq->cons_index, eqe->owner, eq->nent, + eqe->slave_id, + !!(eqe->owner & 0x80) ^ + !!(eq->cons_index & eq->nent) ? "HW" : "SW"); + + break; + + case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT: + mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE, + (unsigned long) eqe); + break; + + case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT: + switch (eqe->subtype) { + case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE: + mlx4_warn(dev, "Bad cable detected on port %u\n", + eqe->event.bad_cable.port); + break; + case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE: + mlx4_warn(dev, "Unsupported cable detected\n"); + break; + default: + mlx4_dbg(dev, + "Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n", + eqe->type, eqe->subtype, eq->eqn, + eq->cons_index, eqe->owner, eq->nent, + !!(eqe->owner & 0x80) ^ + !!(eq->cons_index & eq->nent) ? "HW" : "SW"); + break; + } + break; + + case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: + case MLX4_EVENT_TYPE_ECC_DETECT: + default: + mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n", + eqe->type, eqe->subtype, eq->eqn, + eq->cons_index, eqe->owner, eq->nent, + eqe->slave_id, + !!(eqe->owner & 0x80) ^ + !!(eq->cons_index & eq->nent) ? "HW" : "SW"); + break; + }; + + ++eq->cons_index; + eqes_found = 1; + ++set_ci; + + /* + * The HCA will think the queue has overflowed if we + * don't tell it we've been processing events. We + * create our EQs with MLX4_NUM_SPARE_EQE extra + * entries, so we must update our consumer index at + * least that often. + */ + if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) { + eq_set_ci(eq, 0); + set_ci = 0; + } + } + + eq_set_ci(eq, 1); + + /* cqn is 24bit wide but is initialized such that its higher bits + * are ones too. Thus, if we got any event, cqn's high bits should be off + * and we need to schedule the tasklet. + */ + if (!(cqn & ~0xffffff)) + tasklet_schedule(&eq->tasklet_ctx.task); + + return eqes_found; +} + +static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) +{ + struct mlx4_dev *dev = dev_ptr; + struct mlx4_priv *priv = mlx4_priv(dev); + int work = 0; + int i; + + writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); + + for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) + work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); + + return IRQ_RETVAL(work); +} + +static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) +{ + struct mlx4_eq *eq = eq_ptr; + struct mlx4_dev *dev = eq->dev; + + mlx4_eq_int(dev, eq); + + /* MSI-X vectors always belong to us */ + return IRQ_HANDLED; +} + +int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_event_eq_info *event_eq = + priv->mfunc.master.slave_state[slave].event_eq; + u32 in_modifier = vhcr->in_modifier; + u32 eqn = in_modifier & 0x3FF; + u64 in_param = vhcr->in_param; + int err = 0; + int i; + + if (slave == dev->caps.function) + err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn, + 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + if (!err) + for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) + if (in_param & (1LL << i)) + event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn; + + return err; +} + +static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, + int eq_num) +{ + return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num, + 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); +} + +static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int eq_num) +{ + return mlx4_cmd(dev, mailbox->dma, eq_num, 0, + MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); +} + +static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, int eq_num) +{ + return mlx4_cmd(dev, 0, eq_num, 1, MLX4_CMD_HW2SW_EQ, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); +} + +static int mlx4_num_eq_uar(struct mlx4_dev *dev) +{ + /* + * Each UAR holds 4 EQ doorbells. To figure out how many UARs + * we need to map, take the difference of highest index and + * the lowest index we'll use and add 1. + */ + return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs + + dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1; +} + +static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int index; + + index = eq->eqn / 4 - dev->caps.reserved_eqs / 4; + + if (!priv->eq_table.uar_map[index]) { + priv->eq_table.uar_map[index] = + ioremap(pci_resource_start(dev->persist->pdev, 2) + + ((eq->eqn / 4) << PAGE_SHIFT), + PAGE_SIZE); + if (!priv->eq_table.uar_map[index]) { + mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", + eq->eqn); + return NULL; + } + } + + return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); +} + +static void mlx4_unmap_uar(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i; + + for (i = 0; i < mlx4_num_eq_uar(dev); ++i) + if (priv->eq_table.uar_map[i]) { + iounmap(priv->eq_table.uar_map[i]); + priv->eq_table.uar_map[i] = NULL; + } +} + +static int mlx4_create_eq(struct mlx4_dev *dev, int nent, + u8 intr, struct mlx4_eq *eq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_eq_context *eq_context; + int npages; + u64 *dma_list = NULL; + dma_addr_t t; + u64 mtt_addr; + int err = -ENOMEM; + int i; + + eq->dev = dev; + eq->nent = roundup_pow_of_two(max(nent, 2)); + /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with + * strides of 64B,128B and 256B. + */ + npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE; + + eq->page_list = kmalloc(npages * sizeof *eq->page_list, + GFP_KERNEL); + if (!eq->page_list) + goto err_out; + + for (i = 0; i < npages; ++i) + eq->page_list[i].buf = NULL; + + dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); + if (!dma_list) + goto err_out_free; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + goto err_out_free; + eq_context = mailbox->buf; + + for (i = 0; i < npages; ++i) { + eq->page_list[i].buf = dma_alloc_coherent(&dev->persist-> + pdev->dev, + PAGE_SIZE, &t, + GFP_KERNEL); + if (!eq->page_list[i].buf) + goto err_out_free_pages; + + dma_list[i] = t; + eq->page_list[i].map = t; + + memset(eq->page_list[i].buf, 0, PAGE_SIZE); + } + + eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap); + if (eq->eqn == -1) + goto err_out_free_pages; + + eq->doorbell = mlx4_get_eq_uar(dev, eq); + if (!eq->doorbell) { + err = -ENOMEM; + goto err_out_free_eq; + } + + err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); + if (err) + goto err_out_free_eq; + + err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); + if (err) + goto err_out_free_mtt; + + eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | + MLX4_EQ_STATE_ARMED); + eq_context->log_eq_size = ilog2(eq->nent); + eq_context->intr = intr; + eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT; + + mtt_addr = mlx4_mtt_addr(dev, &eq->mtt); + eq_context->mtt_base_addr_h = mtt_addr >> 32; + eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); + + err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn); + if (err) { + mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err); + goto err_out_free_mtt; + } + + kfree(dma_list); + mlx4_free_cmd_mailbox(dev, mailbox); + + eq->cons_index = 0; + + INIT_LIST_HEAD(&eq->tasklet_ctx.list); + INIT_LIST_HEAD(&eq->tasklet_ctx.process_list); + spin_lock_init(&eq->tasklet_ctx.lock); + tasklet_init(&eq->tasklet_ctx.task, mlx4_cq_tasklet_cb, + (unsigned long)&eq->tasklet_ctx); + + return err; + +err_out_free_mtt: + mlx4_mtt_cleanup(dev, &eq->mtt); + +err_out_free_eq: + mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); + +err_out_free_pages: + for (i = 0; i < npages; ++i) + if (eq->page_list[i].buf) + dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, + eq->page_list[i].buf, + eq->page_list[i].map); + + mlx4_free_cmd_mailbox(dev, mailbox); + +err_out_free: + kfree(eq->page_list); + kfree(dma_list); + +err_out: + return err; +} + +static void mlx4_free_eq(struct mlx4_dev *dev, + struct mlx4_eq *eq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + int i; + /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with + * strides of 64B,128B and 256B + */ + int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; + + err = mlx4_HW2SW_EQ(dev, eq->eqn); + if (err) + mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); + + synchronize_irq(eq->irq); + tasklet_disable(&eq->tasklet_ctx.task); + + mlx4_mtt_cleanup(dev, &eq->mtt); + for (i = 0; i < npages; ++i) + dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, + eq->page_list[i].buf, + eq->page_list[i].map); + + kfree(eq->page_list); + mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); +} + +static void mlx4_free_irqs(struct mlx4_dev *dev) +{ + struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; + struct mlx4_priv *priv = mlx4_priv(dev); + int i, vec; + + if (eq_table->have_irq) + free_irq(dev->persist->pdev->irq, dev); + + for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) + if (eq_table->eq[i].have_irq) { + free_irq(eq_table->eq[i].irq, eq_table->eq + i); + eq_table->eq[i].have_irq = 0; + } + + for (i = 0; i < dev->caps.comp_pool; i++) { + /* + * Freeing the assigned irq's + * all bits should be 0, but we need to validate + */ + if (priv->msix_ctl.pool_bm & 1ULL << i) { + /* NO need protecting*/ + vec = dev->caps.num_comp_vectors + 1 + i; + free_irq(priv->eq_table.eq[vec].irq, + &priv->eq_table.eq[vec]); + } + } + + + kfree(eq_table->irq_names); +} + +static int mlx4_map_clr_int(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + priv->clr_base = ioremap(pci_resource_start(dev->persist->pdev, + priv->fw.clr_int_bar) + + priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); + if (!priv->clr_base) { + mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n"); + return -ENOMEM; + } + + return 0; +} + +static void mlx4_unmap_clr_int(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + iounmap(priv->clr_base); +} + +int mlx4_alloc_eq_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs, + sizeof *priv->eq_table.eq, GFP_KERNEL); + if (!priv->eq_table.eq) + return -ENOMEM; + + return 0; +} + +void mlx4_free_eq_table(struct mlx4_dev *dev) +{ + kfree(mlx4_priv(dev)->eq_table.eq); +} + +int mlx4_init_eq_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + int i; + + priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev), + sizeof *priv->eq_table.uar_map, + GFP_KERNEL); + if (!priv->eq_table.uar_map) { + err = -ENOMEM; + goto err_out_free; + } + + err = mlx4_bitmap_init(&priv->eq_table.bitmap, + roundup_pow_of_two(dev->caps.num_eqs), + dev->caps.num_eqs - 1, + dev->caps.reserved_eqs, + roundup_pow_of_two(dev->caps.num_eqs) - + dev->caps.num_eqs); + if (err) + goto err_out_free; + + for (i = 0; i < mlx4_num_eq_uar(dev); ++i) + priv->eq_table.uar_map[i] = NULL; + + if (!mlx4_is_slave(dev)) { + err = mlx4_map_clr_int(dev); + if (err) + goto err_out_bitmap; + + priv->eq_table.clr_mask = + swab32(1 << (priv->eq_table.inta_pin & 31)); + priv->eq_table.clr_int = priv->clr_base + + (priv->eq_table.inta_pin < 32 ? 4 : 0); + } + + priv->eq_table.irq_names = + kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 + + dev->caps.comp_pool), + GFP_KERNEL); + if (!priv->eq_table.irq_names) { + err = -ENOMEM; + goto err_out_bitmap; + } + + for (i = 0; i < dev->caps.num_comp_vectors; ++i) { + err = mlx4_create_eq(dev, dev->caps.num_cqs - + dev->caps.reserved_cqs + + MLX4_NUM_SPARE_EQE, + (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, + &priv->eq_table.eq[i]); + if (err) { + --i; + goto err_out_unmap; + } + } + + err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, + (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0, + &priv->eq_table.eq[dev->caps.num_comp_vectors]); + if (err) + goto err_out_comp; + + /*if additional completion vectors poolsize is 0 this loop will not run*/ + for (i = dev->caps.num_comp_vectors + 1; + i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) { + + err = mlx4_create_eq(dev, dev->caps.num_cqs - + dev->caps.reserved_cqs + + MLX4_NUM_SPARE_EQE, + (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, + &priv->eq_table.eq[i]); + if (err) { + --i; + goto err_out_unmap; + } + } + + + if (dev->flags & MLX4_FLAG_MSI_X) { + const char *eq_name; + + for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { + if (i < dev->caps.num_comp_vectors) { + snprintf(priv->eq_table.irq_names + + i * MLX4_IRQNAME_SIZE, + MLX4_IRQNAME_SIZE, + "mlx4-comp-%d@pci:%s", i, + pci_name(dev->persist->pdev)); + } else { + snprintf(priv->eq_table.irq_names + + i * MLX4_IRQNAME_SIZE, + MLX4_IRQNAME_SIZE, + "mlx4-async@pci:%s", + pci_name(dev->persist->pdev)); + } + + eq_name = priv->eq_table.irq_names + + i * MLX4_IRQNAME_SIZE; + err = request_irq(priv->eq_table.eq[i].irq, + mlx4_msi_x_interrupt, 0, eq_name, + priv->eq_table.eq + i); + if (err) + goto err_out_async; + + priv->eq_table.eq[i].have_irq = 1; + } + } else { + snprintf(priv->eq_table.irq_names, + MLX4_IRQNAME_SIZE, + DRV_NAME "@pci:%s", + pci_name(dev->persist->pdev)); + err = request_irq(dev->persist->pdev->irq, mlx4_interrupt, + IRQF_SHARED, priv->eq_table.irq_names, dev); + if (err) + goto err_out_async; + + priv->eq_table.have_irq = 1; + } + + err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, + priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); + if (err) + mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", + priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err); + + for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) + eq_set_ci(&priv->eq_table.eq[i], 1); + + return 0; + +err_out_async: + mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]); + +err_out_comp: + i = dev->caps.num_comp_vectors - 1; + +err_out_unmap: + while (i >= 0) { + mlx4_free_eq(dev, &priv->eq_table.eq[i]); + --i; + } + if (!mlx4_is_slave(dev)) + mlx4_unmap_clr_int(dev); + mlx4_free_irqs(dev); + +err_out_bitmap: + mlx4_unmap_uar(dev); + mlx4_bitmap_cleanup(&priv->eq_table.bitmap); + +err_out_free: + kfree(priv->eq_table.uar_map); + + return err; +} + +void mlx4_cleanup_eq_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i; + + mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1, + priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); + + mlx4_free_irqs(dev); + + for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) + mlx4_free_eq(dev, &priv->eq_table.eq[i]); + + if (!mlx4_is_slave(dev)) + mlx4_unmap_clr_int(dev); + + mlx4_unmap_uar(dev); + mlx4_bitmap_cleanup(&priv->eq_table.bitmap); + + kfree(priv->eq_table.uar_map); +} + +/* A test that verifies that we can accept interrupts on all + * the irq vectors of the device. + * Interrupts are checked using the NOP command. + */ +int mlx4_test_interrupts(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i; + int err; + + err = mlx4_NOP(dev); + /* When not in MSI_X, there is only one irq to check */ + if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev)) + return err; + + /* A loop over all completion vectors, for each vector we will check + * whether it works by mapping command completions to that vector + * and performing a NOP command + */ + for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) { + /* Temporary use polling for command completions */ + mlx4_cmd_use_polling(dev); + + /* Map the new eq to handle all asynchronous events */ + err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, + priv->eq_table.eq[i].eqn); + if (err) { + mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); + mlx4_cmd_use_events(dev); + break; + } + + /* Go back to using events */ + mlx4_cmd_use_events(dev); + err = mlx4_NOP(dev); + } + + /* Return to default */ + mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, + priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); + return err; +} +EXPORT_SYMBOL(mlx4_test_interrupts); + +int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, + int *vector) +{ + + struct mlx4_priv *priv = mlx4_priv(dev); + int vec = 0, err = 0, i; + + mutex_lock(&priv->msix_ctl.pool_lock); + for (i = 0; !vec && i < dev->caps.comp_pool; i++) { + if (~priv->msix_ctl.pool_bm & 1ULL << i) { + priv->msix_ctl.pool_bm |= 1ULL << i; + vec = dev->caps.num_comp_vectors + 1 + i; + snprintf(priv->eq_table.irq_names + + vec * MLX4_IRQNAME_SIZE, + MLX4_IRQNAME_SIZE, "%s", name); +#ifdef CONFIG_RFS_ACCEL + if (rmap) { + err = irq_cpu_rmap_add(rmap, + priv->eq_table.eq[vec].irq); + if (err) + mlx4_warn(dev, "Failed adding irq rmap\n"); + } +#endif + err = request_irq(priv->eq_table.eq[vec].irq, + mlx4_msi_x_interrupt, 0, + &priv->eq_table.irq_names[vec<<5], + priv->eq_table.eq + vec); + if (err) { + /*zero out bit by fliping it*/ + priv->msix_ctl.pool_bm ^= 1 << i; + vec = 0; + continue; + /*we dont want to break here*/ + } + + eq_set_ci(&priv->eq_table.eq[vec], 1); + } + } + mutex_unlock(&priv->msix_ctl.pool_lock); + + if (vec) { + *vector = vec; + } else { + *vector = 0; + err = (i == dev->caps.comp_pool) ? -ENOSPC : err; + } + return err; +} +EXPORT_SYMBOL(mlx4_assign_eq); + +int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return priv->eq_table.eq[vec].irq; +} +EXPORT_SYMBOL(mlx4_eq_get_irq); + +void mlx4_release_eq(struct mlx4_dev *dev, int vec) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + /*bm index*/ + int i = vec - dev->caps.num_comp_vectors - 1; + + if (likely(i >= 0)) { + /*sanity check , making sure were not trying to free irq's + Belonging to a legacy EQ*/ + mutex_lock(&priv->msix_ctl.pool_lock); + if (priv->msix_ctl.pool_bm & 1ULL << i) { + free_irq(priv->eq_table.eq[vec].irq, + &priv->eq_table.eq[vec]); + priv->msix_ctl.pool_bm &= ~(1ULL << i); + } + mutex_unlock(&priv->msix_ctl.pool_lock); + } + +} +EXPORT_SYMBOL(mlx4_release_eq); + diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c new file mode 100644 index 000000000..e30bf57ad --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -0,0 +1,2760 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include + +#include "fw.h" +#include "icm.h" + +enum { + MLX4_COMMAND_INTERFACE_MIN_REV = 2, + MLX4_COMMAND_INTERFACE_MAX_REV = 3, + MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3, +}; + +extern void __buggy_use_of_MLX4_GET(void); +extern void __buggy_use_of_MLX4_PUT(void); + +static bool enable_qos = true; +module_param(enable_qos, bool, 0444); +MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)"); + +#define MLX4_GET(dest, source, offset) \ + do { \ + void *__p = (char *) (source) + (offset); \ + u64 val; \ + switch (sizeof (dest)) { \ + case 1: (dest) = *(u8 *) __p; break; \ + case 2: (dest) = be16_to_cpup(__p); break; \ + case 4: (dest) = be32_to_cpup(__p); break; \ + case 8: val = get_unaligned((u64 *)__p); \ + (dest) = be64_to_cpu(val); break; \ + default: __buggy_use_of_MLX4_GET(); \ + } \ + } while (0) + +#define MLX4_PUT(dest, source, offset) \ + do { \ + void *__d = ((char *) (dest) + (offset)); \ + switch (sizeof(source)) { \ + case 1: *(u8 *) __d = (source); break; \ + case 2: *(__be16 *) __d = cpu_to_be16(source); break; \ + case 4: *(__be32 *) __d = cpu_to_be32(source); break; \ + case 8: *(__be64 *) __d = cpu_to_be64(source); break; \ + default: __buggy_use_of_MLX4_PUT(); \ + } \ + } while (0) + +static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) +{ + static const char *fname[] = { + [ 0] = "RC transport", + [ 1] = "UC transport", + [ 2] = "UD transport", + [ 3] = "XRC transport", + [ 6] = "SRQ support", + [ 7] = "IPoIB checksum offload", + [ 8] = "P_Key violation counter", + [ 9] = "Q_Key violation counter", + [12] = "Dual Port Different Protocol (DPDP) support", + [15] = "Big LSO headers", + [16] = "MW support", + [17] = "APM support", + [18] = "Atomic ops support", + [19] = "Raw multicast support", + [20] = "Address vector port checking support", + [21] = "UD multicast support", + [30] = "IBoE support", + [32] = "Unicast loopback support", + [34] = "FCS header control", + [37] = "Wake On LAN (port1) support", + [38] = "Wake On LAN (port2) support", + [40] = "UDP RSS support", + [41] = "Unicast VEP steering support", + [42] = "Multicast VEP steering support", + [48] = "Counters support", + [52] = "RSS IP fragments support", + [53] = "Port ETS Scheduler support", + [55] = "Port link type sensing support", + [59] = "Port management change event support", + [61] = "64 byte EQE support", + [62] = "64 byte CQE support", + }; + int i; + + mlx4_dbg(dev, "DEV_CAP flags:\n"); + for (i = 0; i < ARRAY_SIZE(fname); ++i) + if (fname[i] && (flags & (1LL << i))) + mlx4_dbg(dev, " %s\n", fname[i]); +} + +static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) +{ + static const char * const fname[] = { + [0] = "RSS support", + [1] = "RSS Toeplitz Hash Function support", + [2] = "RSS XOR Hash Function support", + [3] = "Device managed flow steering support", + [4] = "Automatic MAC reassignment support", + [5] = "Time stamping support", + [6] = "VST (control vlan insertion/stripping) support", + [7] = "FSM (MAC anti-spoofing) support", + [8] = "Dynamic QP updates support", + [9] = "Device managed flow steering IPoIB support", + [10] = "TCP/IP offloads/flow-steering for VXLAN support", + [11] = "MAD DEMUX (Secure-Host) support", + [12] = "Large cache line (>64B) CQE stride support", + [13] = "Large cache line (>64B) EQE stride support", + [14] = "Ethernet protocol control support", + [15] = "Ethernet Backplane autoneg support", + [16] = "CONFIG DEV support", + [17] = "Asymmetric EQs support", + [18] = "More than 80 VFs support", + [19] = "Performance optimized for limited rule configuration flow steering support", + [20] = "Recoverable error events support", + [21] = "Port Remap support", + [22] = "QCN support", + [23] = "QP rate limiting support", + [24] = "Ethernet Flow control statistics support", + [25] = "Granular QoS per VF support", + [26] = "Port ETS Scheduler support", + [27] = "Port beacon support", + [28] = "RX-ALL support", + }; + int i; + + for (i = 0; i < ARRAY_SIZE(fname); ++i) + if (fname[i] && (flags & (1LL << i))) + mlx4_dbg(dev, " %s\n", fname[i]); +} + +int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *inbox; + int err = 0; + +#define MOD_STAT_CFG_IN_SIZE 0x100 + +#define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002 +#define MOD_STAT_CFG_PG_SZ_OFFSET 0x003 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET); + MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET); + + err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + u8 in_modifier; + u8 field; + u16 field16; + int err; + +#define QUERY_FUNC_BUS_OFFSET 0x00 +#define QUERY_FUNC_DEVICE_OFFSET 0x01 +#define QUERY_FUNC_FUNCTION_OFFSET 0x01 +#define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03 +#define QUERY_FUNC_RSVD_EQS_OFFSET 0x04 +#define QUERY_FUNC_MAX_EQ_OFFSET 0x06 +#define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + in_modifier = slave; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0, + MLX4_CMD_QUERY_FUNC, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + goto out; + + MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET); + func->bus = field & 0xf; + MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET); + func->device = field & 0xf1; + MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET); + func->function = field & 0x7; + MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET); + func->physical_function = field & 0xf; + MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET); + func->rsvd_eqs = field16 & 0xffff; + MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET); + func->max_eq = field16 & 0xffff; + MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET); + func->rsvd_uars = field & 0x0f; + + mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n", + func->bus, func->device, func->function, func->physical_function, + func->max_eq, func->rsvd_eqs, func->rsvd_uars); + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + u8 field, port; + u32 size, proxy_qp, qkey; + int err = 0; + struct mlx4_func func; + +#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 +#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 +#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 +#define QUERY_FUNC_CAP_FMR_OFFSET 0x8 +#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10 +#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14 +#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18 +#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20 +#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24 +#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28 +#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c +#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30 +#define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48 + +#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50 +#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54 +#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58 +#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60 +#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64 +#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68 + +#define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET 0x6c + +#define QUERY_FUNC_CAP_FMR_FLAG 0x80 +#define QUERY_FUNC_CAP_FLAG_RDMA 0x40 +#define QUERY_FUNC_CAP_FLAG_ETH 0x80 +#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10 +#define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08 +#define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04 + +#define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31) +#define QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG (1UL << 30) + +/* when opcode modifier = 1 */ +#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 +#define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4 +#define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8 +#define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc + +#define QUERY_FUNC_CAP_QP0_TUNNEL 0x10 +#define QUERY_FUNC_CAP_QP0_PROXY 0x14 +#define QUERY_FUNC_CAP_QP1_TUNNEL 0x18 +#define QUERY_FUNC_CAP_QP1_PROXY 0x1c +#define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28 + +#define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40 +#define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80 +#define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10 +#define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08 + +#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80 +#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31) + + if (vhcr->op_modifier == 1) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, slave); + int converted_port = mlx4_slave_convert_port( + dev, slave, vhcr->in_modifier); + + if (converted_port < 0) + return -EINVAL; + + vhcr->in_modifier = converted_port; + /* phys-port = logical-port */ + field = vhcr->in_modifier - + find_first_bit(actv_ports.ports, dev->caps.num_ports); + MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); + + port = vhcr->in_modifier; + proxy_qp = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1; + + /* Set nic_info bit to mark new fields support */ + field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO; + + if (mlx4_vf_smi_enabled(dev, slave, port) && + !mlx4_get_parav_qkey(dev, proxy_qp, &qkey)) { + field |= QUERY_FUNC_CAP_VF_ENABLE_QP0; + MLX4_PUT(outbox->buf, qkey, + QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET); + } + MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET); + + /* size is now the QP number */ + size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL); + + size += 2; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL); + + MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP0_PROXY); + proxy_qp += 2; + MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP1_PROXY); + + MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier], + QUERY_FUNC_CAP_PHYS_PORT_ID); + + } else if (vhcr->op_modifier == 0) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, slave); + /* enable rdma and ethernet interfaces, new quota locations, + * and reserved lkey + */ + field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | + QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX | + QUERY_FUNC_CAP_FLAG_RESD_LKEY); + MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); + + field = min( + bitmap_weight(actv_ports.ports, dev->caps.num_ports), + dev->caps.num_ports); + MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); + + size = dev->caps.function_caps; /* set PF behaviours */ + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET); + + field = 0; /* protected FMR support not available as yet */ + MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET); + + size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave]; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); + size = dev->caps.num_qps; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP); + + size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave]; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); + size = dev->caps.num_srqs; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP); + + size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave]; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); + size = dev->caps.num_cqs; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); + + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) || + mlx4_QUERY_FUNC(dev, &func, slave)) { + size = vhcr->in_modifier & + QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? + dev->caps.num_eqs : + rounddown_pow_of_two(dev->caps.num_eqs); + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); + size = dev->caps.reserved_eqs; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); + } else { + size = vhcr->in_modifier & + QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? + func.max_eq : + rounddown_pow_of_two(func.max_eq); + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); + size = func.rsvd_eqs; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); + } + + size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave]; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); + size = dev->caps.num_mpts; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP); + + size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave]; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); + size = dev->caps.num_mtts; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP); + + size = dev->caps.num_mgms + dev->caps.num_amgms; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); + + size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG | + QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG; + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); + + size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00); + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET); + } else + err = -EINVAL; + + return err; +} + +int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, + struct mlx4_func_cap *func_cap) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + u8 field, op_modifier; + u32 size, qkey; + int err = 0, quotas = 0; + u32 in_modifier; + + op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */ + in_modifier = op_modifier ? gen_or_port : + QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier, + MLX4_CMD_QUERY_FUNC_CAP, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + goto out; + + outbox = mailbox->buf; + + if (!op_modifier) { + MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET); + if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) { + mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n"); + err = -EPROTONOSUPPORT; + goto out; + } + func_cap->flags = field; + quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS); + + MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); + func_cap->num_ports = field; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET); + func_cap->pf_context_behaviour = size; + + if (quotas) { + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); + func_cap->qp_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); + func_cap->srq_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); + func_cap->cq_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); + func_cap->mpt_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); + func_cap->mtt_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); + func_cap->mcg_quota = size & 0xFFFFFF; + + } else { + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP); + func_cap->qp_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP); + func_cap->srq_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); + func_cap->cq_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP); + func_cap->mpt_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP); + func_cap->mtt_quota = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); + func_cap->mcg_quota = size & 0xFFFFFF; + } + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET); + func_cap->max_eq = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); + func_cap->reserved_eq = size & 0xFFFFFF; + + if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) { + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET); + func_cap->reserved_lkey = size; + } else { + func_cap->reserved_lkey = 0; + } + + func_cap->extra_flags = 0; + + /* Mailbox data from 0x6c and onward should only be treated if + * QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags + */ + if (func_cap->flags & QUERY_FUNC_CAP_FLAG_VALID_MAILBOX) { + MLX4_GET(size, outbox, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); + if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG) + func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP; + if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG) + func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_A0_RES_QP; + } + + goto out; + } + + /* logical port query */ + if (gen_or_port > dev->caps.num_ports) { + err = -EINVAL; + goto out; + } + + MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET); + if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) { + if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN) { + mlx4_err(dev, "VLAN is enforced on this port\n"); + err = -EPROTONOSUPPORT; + goto out; + } + + if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) { + mlx4_err(dev, "Force mac is enabled on this port\n"); + err = -EPROTONOSUPPORT; + goto out; + } + } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) { + MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); + if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) { + mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n"); + err = -EPROTONOSUPPORT; + goto out; + } + } + + MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); + func_cap->physical_port = field; + if (func_cap->physical_port != gen_or_port) { + err = -ENOSYS; + goto out; + } + + if (func_cap->flags1 & QUERY_FUNC_CAP_VF_ENABLE_QP0) { + MLX4_GET(qkey, outbox, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET); + func_cap->qp0_qkey = qkey; + } else { + func_cap->qp0_qkey = 0; + } + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL); + func_cap->qp0_tunnel_qpn = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY); + func_cap->qp0_proxy_qpn = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL); + func_cap->qp1_tunnel_qpn = size & 0xFFFFFF; + + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY); + func_cap->qp1_proxy_qpn = size & 0xFFFFFF; + + if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO) + MLX4_GET(func_cap->phys_port_id, outbox, + QUERY_FUNC_CAP_PHYS_PORT_ID); + + /* All other resources are allocated by the master, but we still report + * 'num' and 'reserved' capabilities as follows: + * - num remains the maximum resource index + * - 'num - reserved' is the total available objects of a resource, but + * resource indices may be less than 'reserved' + * TODO: set per-resource quotas */ + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; +} + +int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + u8 field; + u32 field32, flags, ext_flags; + u16 size; + u16 stat_rate; + int err; + int i; + +#define QUERY_DEV_CAP_OUT_SIZE 0x100 +#define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10 +#define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11 +#define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12 +#define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13 +#define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14 +#define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15 +#define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16 +#define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17 +#define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19 +#define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a +#define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b +#define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d +#define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e +#define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f +#define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20 +#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21 +#define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22 +#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23 +#define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26 +#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 +#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 +#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b +#define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d +#define QUERY_DEV_CAP_RSS_OFFSET 0x2e +#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f +#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 +#define QUERY_DEV_CAP_PORT_BEACON_OFFSET 0x34 +#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 +#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36 +#define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37 +#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38 +#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b +#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c +#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e +#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f +#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 +#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 +#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 +#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 +#define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b +#define QUERY_DEV_CAP_BF_OFFSET 0x4c +#define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d +#define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e +#define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f +#define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51 +#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52 +#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55 +#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56 +#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61 +#define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62 +#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63 +#define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64 +#define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65 +#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66 +#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 +#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 +#define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET 0x70 +#define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70 +#define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74 +#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 +#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 +#define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a +#define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET 0x7b +#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 +#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 +#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 +#define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86 +#define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88 +#define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a +#define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c +#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e +#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90 +#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92 +#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 +#define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94 +#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 +#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 +#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c +#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d +#define QUERY_DEV_CAP_VXLAN 0x9e +#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 +#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8 +#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac +#define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc +#define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0 +#define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2 + + + dev_cap->flags2 = 0; + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + goto out; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET); + dev_cap->reserved_qps = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET); + dev_cap->max_qps = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET); + dev_cap->reserved_srqs = 1 << (field >> 4); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET); + dev_cap->max_srqs = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET); + dev_cap->max_cq_sz = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET); + dev_cap->reserved_cqs = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET); + dev_cap->max_cqs = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); + dev_cap->max_mpts = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); + dev_cap->reserved_eqs = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); + dev_cap->max_eqs = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); + dev_cap->reserved_mtts = 1 << (field >> 4); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET); + dev_cap->max_mrw_sz = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET); + dev_cap->reserved_mrws = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET); + dev_cap->max_mtt_seg = 1 << (field & 0x3f); + MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET); + dev_cap->num_sys_eqs = size & 0xfff; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET); + dev_cap->max_requester_per_qp = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); + dev_cap->max_responder_per_qp = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET); + field &= 0x1f; + if (!field) + dev_cap->max_gso_sz = 0; + else + dev_cap->max_gso_sz = 1 << field; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET); + if (field & 0x20) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR; + if (field & 0x10) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP; + field &= 0xf; + if (field) { + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS; + dev_cap->max_rss_tbl_sz = 1 << field; + } else + dev_cap->max_rss_tbl_sz = 0; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET); + dev_cap->max_rdma_global = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); + dev_cap->local_ca_ack_delay = field & 0x1f; + MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); + dev_cap->num_ports = field & 0xf; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET); + dev_cap->max_msg_sz = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET); + if (field & 0x10) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN; + MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); + if (field & 0x80) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN; + dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f; + MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_BEACON_OFFSET); + if (field & 0x80) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_BEACON; + MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); + if (field & 0x80) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB; + MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET); + dev_cap->fs_max_num_qp_per_entry = field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); + if (field & 0x1) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN; + MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); + dev_cap->stat_rate_support = stat_rate; + MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); + if (field & 0x80) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS; + MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); + MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); + dev_cap->flags = flags | (u64)ext_flags << 32; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); + dev_cap->reserved_uars = field >> 4; + MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); + dev_cap->uar_size = 1 << ((field & 0x3f) + 20); + MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET); + dev_cap->min_page_sz = 1 << field; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET); + if (field & 0x80) { + MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); + dev_cap->bf_reg_size = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); + if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) + field = 3; + dev_cap->bf_regs_per_page = 1 << (field & 0x3f); + } else { + dev_cap->bf_reg_size = 0; + } + + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET); + dev_cap->max_sq_sg = field; + MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET); + dev_cap->max_sq_desc_sz = size; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET); + dev_cap->max_qp_per_mcg = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET); + dev_cap->reserved_mgms = field & 0xf; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET); + dev_cap->max_mcgs = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET); + dev_cap->reserved_pds = field >> 4; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET); + dev_cap->max_pds = 1 << (field & 0x3f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET); + dev_cap->reserved_xrcds = field >> 4; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET); + dev_cap->max_xrcds = 1 << (field & 0x1f); + + MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET); + dev_cap->rdmarc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET); + dev_cap->qpc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET); + dev_cap->aux_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET); + dev_cap->altc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET); + dev_cap->eqc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET); + dev_cap->cqc_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET); + dev_cap->srq_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET); + dev_cap->cmpt_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET); + dev_cap->mtt_entry_sz = size; + MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET); + dev_cap->dmpt_entry_sz = size; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET); + dev_cap->max_srq_sz = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET); + dev_cap->max_qp_sz = 1 << field; + MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET); + dev_cap->resize_srq = field & 1; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET); + dev_cap->max_rq_sg = field; + MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); + dev_cap->max_rq_desc_sz = size; + MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); + if (field & (1 << 4)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QOS_VPP; + if (field & (1 << 5)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL; + if (field & (1 << 6)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + if (field & (1 << 7)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; + MLX4_GET(dev_cap->bmme_flags, outbox, + QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP; + MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); + if (field & 0x20) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV; + if (field & (1 << 2)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS; + MLX4_GET(dev_cap->reserved_lkey, outbox, + QUERY_DEV_CAP_RSVD_LKEY_OFFSET); + MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); + if (field32 & (1 << 0)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; + if (field32 & (1 << 7)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT; + MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); + if (field & 1<<6) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; + MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN); + if (field & 1<<3) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS; + if (field & (1 << 5)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; + MLX4_GET(dev_cap->max_icm_sz, outbox, + QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); + if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) + MLX4_GET(dev_cap->max_counters, outbox, + QUERY_DEV_CAP_MAX_COUNTERS_OFFSET); + + MLX4_GET(field32, outbox, + QUERY_DEV_CAP_MAD_DEMUX_OFFSET); + if (field32 & (1 << 0)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX; + + MLX4_GET(dev_cap->dmfs_high_rate_qpn_base, outbox, + QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET); + dev_cap->dmfs_high_rate_qpn_base &= MGM_QPN_MASK; + MLX4_GET(dev_cap->dmfs_high_rate_qpn_range, outbox, + QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET); + dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK; + + MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET); + dev_cap->rl_caps.num_rates = size; + if (dev_cap->rl_caps.num_rates) { + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT; + MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET); + dev_cap->rl_caps.max_val = size & 0xfff; + dev_cap->rl_caps.max_unit = size >> 14; + MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET); + dev_cap->rl_caps.min_val = size & 0xfff; + dev_cap->rl_caps.min_unit = size >> 14; + } + + MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); + if (field32 & (1 << 16)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP; + if (field32 & (1 << 26)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL; + if (field32 & (1 << 20)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM; + if (field32 & (1 << 21)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS; + + for (i = 1; i <= dev_cap->num_ports; i++) { + err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i); + if (err) + goto out; + } + + /* + * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then + * we can't use any EQs whose doorbell falls on that page, + * even if the EQ itself isn't reserved. + */ + if (dev_cap->num_sys_eqs == 0) + dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, + dev_cap->reserved_eqs); + else + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS; + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) +{ + if (dev_cap->bf_reg_size > 0) + mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", + dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); + else + mlx4_dbg(dev, "BlueFlame not available\n"); + + mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n", + dev_cap->bmme_flags, dev_cap->reserved_lkey); + mlx4_dbg(dev, "Max ICM size %lld MB\n", + (unsigned long long) dev_cap->max_icm_sz >> 20); + mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", + dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz); + mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", + dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz); + mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", + dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz); + mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n", + dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs, + dev_cap->eqc_entry_sz); + mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", + dev_cap->reserved_mrws, dev_cap->reserved_mtts); + mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", + dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars); + mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", + dev_cap->max_pds, dev_cap->reserved_mgms); + mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", + dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); + mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", + dev_cap->local_ca_ack_delay, 128 << dev_cap->port_cap[1].ib_mtu, + dev_cap->port_cap[1].max_port_width); + mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", + dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); + mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", + dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); + mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz); + mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters); + mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz); + mlx4_dbg(dev, "DMFS high rate steer QPn base: %d\n", + dev_cap->dmfs_high_rate_qpn_base); + mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n", + dev_cap->dmfs_high_rate_qpn_range); + + if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT) { + struct mlx4_rate_limit_caps *rl_caps = &dev_cap->rl_caps; + + mlx4_dbg(dev, "QP Rate-Limit: #rates %d, unit/val max %d/%d, min %d/%d\n", + rl_caps->num_rates, rl_caps->max_unit, rl_caps->max_val, + rl_caps->min_unit, rl_caps->min_val); + } + + dump_dev_cap_flags(dev, dev_cap->flags); + dump_dev_cap_flags2(dev, dev_cap->flags2); +} + +int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + u8 field; + u32 field32; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + if (err) + goto out; + + MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); + port_cap->max_vl = field >> 4; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); + port_cap->ib_mtu = field >> 4; + port_cap->max_port_width = field & 0xf; + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); + port_cap->max_gids = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET); + port_cap->max_pkeys = 1 << (field & 0xf); + } else { +#define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00 +#define QUERY_PORT_MTU_OFFSET 0x01 +#define QUERY_PORT_ETH_MTU_OFFSET 0x02 +#define QUERY_PORT_WIDTH_OFFSET 0x06 +#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 +#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a +#define QUERY_PORT_MAX_VL_OFFSET 0x0b +#define QUERY_PORT_MAC_OFFSET 0x10 +#define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18 +#define QUERY_PORT_WAVELENGTH_OFFSET 0x1c +#define QUERY_PORT_TRANS_CODE_OFFSET 0x20 + + err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + if (err) + goto out; + + MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET); + port_cap->supported_port_types = field & 3; + port_cap->suggested_type = (field >> 3) & 1; + port_cap->default_sense = (field >> 4) & 1; + port_cap->dmfs_optimized_state = (field >> 5) & 1; + MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); + port_cap->ib_mtu = field & 0xf; + MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); + port_cap->max_port_width = field & 0xf; + MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); + port_cap->max_gids = 1 << (field >> 4); + port_cap->max_pkeys = 1 << (field & 0xf); + MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); + port_cap->max_vl = field & 0xf; + MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET); + port_cap->log_max_macs = field & 0xf; + port_cap->log_max_vlans = field >> 4; + MLX4_GET(port_cap->eth_mtu, outbox, QUERY_PORT_ETH_MTU_OFFSET); + MLX4_GET(port_cap->def_mac, outbox, QUERY_PORT_MAC_OFFSET); + MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET); + port_cap->trans_type = field32 >> 24; + port_cap->vendor_oui = field32 & 0xffffff; + MLX4_GET(port_cap->wavelength, outbox, QUERY_PORT_WAVELENGTH_OFFSET); + MLX4_GET(port_cap->trans_code, outbox, QUERY_PORT_TRANS_CODE_OFFSET); + } + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +#define DEV_CAP_EXT_2_FLAG_PFC_COUNTERS (1 << 28) +#define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26) +#define DEV_CAP_EXT_2_FLAG_80_VFS (1 << 21) +#define DEV_CAP_EXT_2_FLAG_FSM (1 << 20) + +int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + u64 flags; + int err = 0; + u8 field; + u16 field16; + u32 bmme_flags, field32; + int real_port; + int slave_port; + int first_port; + struct mlx4_active_ports actv_ports; + + err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + return err; + + /* add port mng change event capability and disable mw type 1 + * unconditionally to slaves + */ + MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); + flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV; + flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW; + actv_ports = mlx4_get_active_ports(dev, slave); + first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports); + for (slave_port = 0, real_port = first_port; + real_port < first_port + + bitmap_weight(actv_ports.ports, dev->caps.num_ports); + ++real_port, ++slave_port) { + if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port)) + flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port; + else + flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port); + } + for (; slave_port < dev->caps.num_ports; ++slave_port) + flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port); + + /* Not exposing RSS IP fragments to guests */ + flags &= ~MLX4_DEV_CAP_FLAG_RSS_IP_FRAG; + MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); + + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET); + field &= ~0x0F; + field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET); + + /* For guests, disable timestamp */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); + field &= 0x7f; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); + + /* For guests, disable vxlan tunneling and QoS support */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN); + field &= 0xd7; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN); + + /* For guests, disable port BEACON */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_PORT_BEACON_OFFSET); + field &= 0x7f; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_PORT_BEACON_OFFSET); + + /* For guests, report Blueflame disabled */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET); + field &= 0x7f; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); + + /* For guests, disable mw type 2 and port remap*/ + MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; + bmme_flags &= ~MLX4_FLAG_PORT_REMAP; + MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + + /* turn off device-managed steering capability if not enabled */ + if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { + MLX4_GET(field, outbox->buf, + QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); + field &= 0x7f; + MLX4_PUT(outbox->buf, field, + QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); + } + + /* turn off ipoib managed steering for guests */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); + field &= ~0x80; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); + + /* turn off host side virt features (VST, FSM, etc) for guests */ + MLX4_GET(field32, outbox->buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); + field32 &= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL | DEV_CAP_EXT_2_FLAG_80_VFS | + DEV_CAP_EXT_2_FLAG_FSM | DEV_CAP_EXT_2_FLAG_PFC_COUNTERS); + MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET); + + /* turn off QCN for guests */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); + field &= 0xfe; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET); + + /* turn off QP max-rate limiting for guests */ + field16 = 0; + MLX4_PUT(outbox->buf, field16, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET); + + /* turn off QoS per VF support for guests */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); + field &= 0xef; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); + + /* turn off ignore FCS feature for guests */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); + field &= 0xfb; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); + + return 0; +} + +int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + u64 def_mac; + u8 port_type; + u16 short_field; + int err; + int admin_link_state; + int port = mlx4_slave_convert_port(dev, slave, + vhcr->in_modifier & 0xFF); + +#define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 +#define MLX4_PORT_LINK_UP_MASK 0x80 +#define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c +#define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e + + if (port < 0) + return -EINVAL; + + /* Protect against untrusted guests: enforce that this is the + * QUERY_PORT general query. + */ + if (vhcr->op_modifier || vhcr->in_modifier & ~0xFF) + return -EINVAL; + + vhcr->in_modifier = port; + + err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, + MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + + if (!err && dev->caps.function != slave) { + def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac; + MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET); + + /* get port type - currently only eth is enabled */ + MLX4_GET(port_type, outbox->buf, + QUERY_PORT_SUPPORTED_TYPE_OFFSET); + + /* No link sensing allowed */ + port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK; + /* set port type to currently operating port type */ + port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3); + + admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state; + if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state) + port_type |= MLX4_PORT_LINK_UP_MASK; + else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state) + port_type &= ~MLX4_PORT_LINK_UP_MASK; + + MLX4_PUT(outbox->buf, port_type, + QUERY_PORT_SUPPORTED_TYPE_OFFSET); + + if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH) + short_field = mlx4_get_slave_num_gids(dev, slave, port); + else + short_field = 1; /* slave max gids */ + MLX4_PUT(outbox->buf, short_field, + QUERY_PORT_CUR_MAX_GID_OFFSET); + + short_field = dev->caps.pkey_table_len[vhcr->in_modifier]; + MLX4_PUT(outbox->buf, short_field, + QUERY_PORT_CUR_MAX_PKEY_OFFSET); + } + + return err; +} + +int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port, + int *gid_tbl_len, int *pkey_tbl_len) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + u16 field; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, + MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + if (err) + goto out; + + outbox = mailbox->buf; + + MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET); + *gid_tbl_len = field; + + MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET); + *pkey_tbl_len = field; + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len); + +int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_icm_iter iter; + __be64 *pages; + int lg; + int nent = 0; + int i; + int err = 0; + int ts = 0, tc = 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + pages = mailbox->buf; + + for (mlx4_icm_first(icm, &iter); + !mlx4_icm_last(&iter); + mlx4_icm_next(&iter)) { + /* + * We have to pass pages that are aligned to their + * size, so find the least significant 1 in the + * address or size and use that as our log2 size. + */ + lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1; + if (lg < MLX4_ICM_PAGE_SHIFT) { + mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n", + MLX4_ICM_PAGE_SIZE, + (unsigned long long) mlx4_icm_addr(&iter), + mlx4_icm_size(&iter)); + err = -EINVAL; + goto out; + } + + for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) { + if (virt != -1) { + pages[nent * 2] = cpu_to_be64(virt); + virt += 1 << lg; + } + + pages[nent * 2 + 1] = + cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) | + (lg - MLX4_ICM_PAGE_SHIFT)); + ts += 1 << (lg - 10); + ++tc; + + if (++nent == MLX4_MAILBOX_SIZE / 16) { + err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, + MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + if (err) + goto out; + nent = 0; + } + } + } + + if (nent) + err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + if (err) + goto out; + + switch (op) { + case MLX4_CMD_MAP_FA: + mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts); + break; + case MLX4_CMD_MAP_ICM_AUX: + mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts); + break; + case MLX4_CMD_MAP_ICM: + mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n", + tc, ts, (unsigned long long) virt - (ts << 10)); + break; + } + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm) +{ + return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1); +} + +int mlx4_UNMAP_FA(struct mlx4_dev *dev) +{ + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); +} + + +int mlx4_RUN_FW(struct mlx4_dev *dev) +{ + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); +} + +int mlx4_QUERY_FW(struct mlx4_dev *dev) +{ + struct mlx4_fw *fw = &mlx4_priv(dev)->fw; + struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + int err = 0; + u64 fw_ver; + u16 cmd_if_rev; + u8 lg; + +#define QUERY_FW_OUT_SIZE 0x100 +#define QUERY_FW_VER_OFFSET 0x00 +#define QUERY_FW_PPF_ID 0x09 +#define QUERY_FW_CMD_IF_REV_OFFSET 0x0a +#define QUERY_FW_MAX_CMD_OFFSET 0x0f +#define QUERY_FW_ERR_START_OFFSET 0x30 +#define QUERY_FW_ERR_SIZE_OFFSET 0x38 +#define QUERY_FW_ERR_BAR_OFFSET 0x3c + +#define QUERY_FW_SIZE_OFFSET 0x00 +#define QUERY_FW_CLR_INT_BASE_OFFSET 0x20 +#define QUERY_FW_CLR_INT_BAR_OFFSET 0x28 + +#define QUERY_FW_COMM_BASE_OFFSET 0x40 +#define QUERY_FW_COMM_BAR_OFFSET 0x48 + +#define QUERY_FW_CLOCK_OFFSET 0x50 +#define QUERY_FW_CLOCK_BAR 0x58 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + goto out; + + MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET); + /* + * FW subminor version is at more significant bits than minor + * version, so swap here. + */ + dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) | + ((fw_ver & 0xffff0000ull) >> 16) | + ((fw_ver & 0x0000ffffull) << 16); + + MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); + dev->caps.function = lg; + + if (mlx4_is_slave(dev)) + goto out; + + + MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); + if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || + cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { + mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n", + cmd_if_rev); + mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n", + (int) (dev->caps.fw_ver >> 32), + (int) (dev->caps.fw_ver >> 16) & 0xffff, + (int) dev->caps.fw_ver & 0xffff); + mlx4_err(dev, "This driver version supports only revisions %d to %d\n", + MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV); + err = -ENODEV; + goto out; + } + + if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS) + dev->flags |= MLX4_FLAG_OLD_PORT_CMDS; + + MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); + cmd->max_cmds = 1 << lg; + + mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n", + (int) (dev->caps.fw_ver >> 32), + (int) (dev->caps.fw_ver >> 16) & 0xffff, + (int) dev->caps.fw_ver & 0xffff, + cmd_if_rev, cmd->max_cmds); + + MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET); + MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET); + MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET); + fw->catas_bar = (fw->catas_bar >> 6) * 2; + + mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n", + (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar); + + MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET); + MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET); + MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET); + fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2; + + MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET); + MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET); + fw->comm_bar = (fw->comm_bar >> 6) * 2; + mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n", + fw->comm_bar, fw->comm_base); + mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2); + + MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET); + MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR); + fw->clock_bar = (fw->clock_bar >> 6) * 2; + mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n", + fw->clock_bar, fw->clock_offset); + + /* + * Round up number of system pages needed in case + * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. + */ + fw->fw_pages = + ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> + (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); + + mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n", + (unsigned long long) fw->clr_int_base, fw->clr_int_bar); + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + u8 *outbuf; + int err; + + outbuf = outbox->buf; + err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + return err; + + /* for slaves, set pci PPF ID to invalid and zero out everything + * else except FW version */ + outbuf[0] = outbuf[1] = 0; + memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8); + outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID; + + return 0; +} + +static void get_board_id(void *vsd, char *board_id) +{ + int i; + +#define VSD_OFFSET_SIG1 0x00 +#define VSD_OFFSET_SIG2 0xde +#define VSD_OFFSET_MLX_BOARD_ID 0xd0 +#define VSD_OFFSET_TS_BOARD_ID 0x20 + +#define VSD_SIGNATURE_TOPSPIN 0x5ad + + memset(board_id, 0, MLX4_BOARD_ID_LEN); + + if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN && + be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) { + strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN); + } else { + /* + * The board ID is a string but the firmware byte + * swaps each 4-byte word before passing it back to + * us. Therefore we need to swab it before printing. + */ + u32 *bid_u32 = (u32 *)board_id; + + for (i = 0; i < 4; ++i) { + u32 *addr; + u32 val; + + addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4); + val = get_unaligned(addr); + val = swab32(val); + put_unaligned(val, &bid_u32[i]); + } + } +} + +int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + int err; + +#define QUERY_ADAPTER_OUT_SIZE 0x100 +#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 +#define QUERY_ADAPTER_VSD_OFFSET 0x20 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + goto out; + + MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); + + get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, + adapter->board_id); + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) +{ + struct mlx4_cmd_mailbox *mailbox; + __be32 *inbox; + int err; + static const u8 a0_dmfs_hw_steering[] = { + [MLX4_STEERING_DMFS_A0_DEFAULT] = 0, + [MLX4_STEERING_DMFS_A0_DYNAMIC] = 1, + [MLX4_STEERING_DMFS_A0_STATIC] = 2, + [MLX4_STEERING_DMFS_A0_DISABLE] = 3 + }; + +#define INIT_HCA_IN_SIZE 0x200 +#define INIT_HCA_VERSION_OFFSET 0x000 +#define INIT_HCA_VERSION 2 +#define INIT_HCA_VXLAN_OFFSET 0x0c +#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e +#define INIT_HCA_FLAGS_OFFSET 0x014 +#define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018 +#define INIT_HCA_QPC_OFFSET 0x020 +#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) +#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) +#define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28) +#define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f) +#define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) +#define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) +#define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38) +#define INIT_HCA_EQE_CQE_STRIDE_OFFSET (INIT_HCA_QPC_OFFSET + 0x3b) +#define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) +#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) +#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) +#define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) +#define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a) +#define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) +#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77) +#define INIT_HCA_MCAST_OFFSET 0x0c0 +#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) +#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) +#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) +#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) +#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) +#define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6 +#define INIT_HCA_FS_PARAM_OFFSET 0x1d0 +#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00) +#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12) +#define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18) +#define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b) +#define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21) +#define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22) +#define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25) +#define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26) +#define INIT_HCA_TPT_OFFSET 0x0f0 +#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) +#define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08) +#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) +#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) +#define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18) +#define INIT_HCA_UAR_OFFSET 0x120 +#define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a) +#define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b) + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; + + *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) = + (ilog2(cache_line_size()) - 4) << 5; + +#if defined(__LITTLE_ENDIAN) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); +#elif defined(__BIG_ENDIAN) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1); +#else +#error Host endianness not defined +#endif + /* Check port for UD address vector: */ + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1); + + /* Enable IPoIB checksumming if we can: */ + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3); + + /* Enable QoS support if module parameter set */ + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG && enable_qos) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2); + + /* enable counters */ + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4); + + /* Enable RSS spread to fragmented IP packets when supported */ + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_RSS_IP_FRAG) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 13); + + /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) { + *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29); + dev->caps.eqe_size = 64; + dev->caps.eqe_factor = 1; + } else { + dev->caps.eqe_size = 32; + dev->caps.eqe_factor = 0; + } + + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) { + *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30); + dev->caps.cqe_size = 64; + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; + } else { + dev->caps.cqe_size = 32; + } + + /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ + if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) && + (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) { + dev->caps.eqe_size = cache_line_size(); + dev->caps.cqe_size = cache_line_size(); + dev->caps.eqe_factor = 0; + MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 | + (ilog2(dev->caps.eqe_size) - 5)), + INIT_HCA_EQE_CQE_STRIDE_OFFSET); + + /* User still need to know to support CQE > 32B */ + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; + } + + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) + *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31); + + /* QPC/EEC/CQC/EQC/RDMARC attributes */ + + MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET); + MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET); + MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET); + MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET); + MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET); + MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); + MLX4_PUT(inbox, param->num_sys_eqs, INIT_HCA_NUM_SYS_EQS_OFFSET); + MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); + + /* steering attributes */ + if (dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= + cpu_to_be32(1 << + INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN); + + MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET); + MLX4_PUT(inbox, param->log_mc_entry_sz, + INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); + MLX4_PUT(inbox, param->log_mc_table_sz, + INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); + /* Enable Ethernet flow steering + * with udp unicast and tcp unicast + */ + if (dev->caps.dmfs_high_steer_mode != + MLX4_STEERING_DMFS_A0_STATIC) + MLX4_PUT(inbox, + (u8)(MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), + INIT_HCA_FS_ETH_BITS_OFFSET); + MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, + INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET); + /* Enable IPoIB flow steering + * with udp unicast and tcp unicast + */ + MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN), + INIT_HCA_FS_IB_BITS_OFFSET); + MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, + INIT_HCA_FS_IB_NUM_ADDRS_OFFSET); + + if (dev->caps.dmfs_high_steer_mode != + MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) + MLX4_PUT(inbox, + ((u8)(a0_dmfs_hw_steering[dev->caps.dmfs_high_steer_mode] + << 6)), + INIT_HCA_FS_A0_OFFSET); + } else { + MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_mc_entry_sz, + INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); + MLX4_PUT(inbox, param->log_mc_hash_sz, + INIT_HCA_LOG_MC_HASH_SZ_OFFSET); + MLX4_PUT(inbox, param->log_mc_table_sz, + INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0) + MLX4_PUT(inbox, (u8) (1 << 3), + INIT_HCA_UC_STEERING_OFFSET); + } + + /* TPT attributes */ + + MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET); + MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET); + MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); + MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); + MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET); + + /* UAR attributes */ + + MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET); + MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); + + /* set parser VXLAN attributes */ + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) { + u8 parser_params = 0; + MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET); + } + + err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, + MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); + + if (err) + mlx4_err(dev, "INIT_HCA returns %d\n", err); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_QUERY_HCA(struct mlx4_dev *dev, + struct mlx4_init_hca_param *param) +{ + struct mlx4_cmd_mailbox *mailbox; + __be32 *outbox; + u32 dword_field; + int err; + u8 byte_field; + static const u8 a0_dmfs_query_hw_steering[] = { + [0] = MLX4_STEERING_DMFS_A0_DEFAULT, + [1] = MLX4_STEERING_DMFS_A0_DYNAMIC, + [2] = MLX4_STEERING_DMFS_A0_STATIC, + [3] = MLX4_STEERING_DMFS_A0_DISABLE + }; + +#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04 +#define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, + MLX4_CMD_QUERY_HCA, + MLX4_CMD_TIME_CLASS_B, + !mlx4_is_slave(dev)); + if (err) + goto out; + + MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET); + MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET); + + /* QPC/EEC/CQC/EQC/RDMARC attributes */ + + MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET); + MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET); + MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET); + MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET); + MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET); + MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET); + MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET); + MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); + MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); + MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); + MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); + MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); + MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); + + MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET); + if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) { + param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; + } else { + MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET); + if (byte_field & 0x8) + param->steering_mode = MLX4_STEERING_MODE_B0; + else + param->steering_mode = MLX4_STEERING_MODE_A0; + } + + if (dword_field & (1 << 13)) + param->rss_ip_frags = 1; + + /* steering attributes */ + if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { + MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); + MLX4_GET(param->log_mc_entry_sz, outbox, + INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); + MLX4_GET(param->log_mc_table_sz, outbox, + INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); + MLX4_GET(byte_field, outbox, + INIT_HCA_FS_A0_OFFSET); + param->dmfs_high_steer_mode = + a0_dmfs_query_hw_steering[(byte_field >> 6) & 3]; + } else { + MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); + MLX4_GET(param->log_mc_entry_sz, outbox, + INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); + MLX4_GET(param->log_mc_hash_sz, outbox, + INIT_HCA_LOG_MC_HASH_SZ_OFFSET); + MLX4_GET(param->log_mc_table_sz, outbox, + INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + } + + /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ + MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS); + if (byte_field & 0x20) /* 64-bytes eqe enabled */ + param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED; + if (byte_field & 0x40) /* 64-bytes cqe enabled */ + param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED; + + /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ + MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET); + if (byte_field) { + param->dev_cap_enabled |= MLX4_DEV_CAP_EQE_STRIDE_ENABLED; + param->dev_cap_enabled |= MLX4_DEV_CAP_CQE_STRIDE_ENABLED; + param->cqe_size = 1 << ((byte_field & + MLX4_CQE_SIZE_MASK_STRIDE) + 5); + param->eqe_size = 1 << (((byte_field & + MLX4_EQE_SIZE_MASK_STRIDE) >> 4) + 5); + } + + /* TPT attributes */ + + MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); + MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET); + MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); + MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); + MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); + + /* UAR attributes */ + + MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); + MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; +} + +static int mlx4_hca_core_clock_update(struct mlx4_dev *dev) +{ + struct mlx4_cmd_mailbox *mailbox; + __be32 *outbox; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n"); + return PTR_ERR(mailbox); + } + outbox = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, + MLX4_CMD_QUERY_HCA, + MLX4_CMD_TIME_CLASS_B, + !mlx4_is_slave(dev)); + if (err) { + mlx4_warn(dev, "hca_core_clock update failed\n"); + goto out; + } + + MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET); + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; +} + +/* for IB-type ports only in SRIOV mode. Checks that both proxy QP0 + * and real QP0 are active, so that the paravirtualized QP0 is ready + * to operate */ +static int check_qp0_state(struct mlx4_dev *dev, int function, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + /* irrelevant if not infiniband */ + if (priv->mfunc.master.qp0_state[port].proxy_qp0_active && + priv->mfunc.master.qp0_state[port].qp0_active) + return 1; + return 0; +} + +int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier); + int err; + + if (port < 0) + return -EINVAL; + + if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port)) + return 0; + + if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { + /* Enable port only if it was previously disabled */ + if (!priv->mfunc.master.init_port_ref[port]) { + err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + return err; + } + priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); + } else { + if (slave == mlx4_master_func_num(dev)) { + if (check_qp0_state(dev, slave, port) && + !priv->mfunc.master.qp0_state[port].port_active) { + err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + return err; + priv->mfunc.master.qp0_state[port].port_active = 1; + priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); + } + } else + priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port); + } + ++priv->mfunc.master.init_port_ref[port]; + return 0; +} + +int mlx4_INIT_PORT(struct mlx4_dev *dev, int port) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *inbox; + int err; + u32 flags; + u16 field; + + if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { +#define INIT_PORT_IN_SIZE 256 +#define INIT_PORT_FLAGS_OFFSET 0x00 +#define INIT_PORT_FLAG_SIG (1 << 18) +#define INIT_PORT_FLAG_NG (1 << 17) +#define INIT_PORT_FLAG_G0 (1 << 16) +#define INIT_PORT_VL_SHIFT 4 +#define INIT_PORT_PORT_WIDTH_SHIFT 8 +#define INIT_PORT_MTU_OFFSET 0x04 +#define INIT_PORT_MAX_GID_OFFSET 0x06 +#define INIT_PORT_MAX_PKEY_OFFSET 0x0a +#define INIT_PORT_GUID0_OFFSET 0x10 +#define INIT_PORT_NODE_GUID_OFFSET 0x18 +#define INIT_PORT_SI_GUID_OFFSET 0x20 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + flags = 0; + flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT; + flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; + MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); + + field = 128 << dev->caps.ib_mtu_cap[port]; + MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); + field = dev->caps.gid_table_len[port]; + MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); + field = dev->caps.pkey_table_len[port]; + MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET); + + err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + } else + err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + + if (!err) + mlx4_hca_core_clock_update(dev); + + return err; +} +EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); + +int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier); + int err; + + if (port < 0) + return -EINVAL; + + if (!(priv->mfunc.master.slave_state[slave].init_port_mask & + (1 << port))) + return 0; + + if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { + if (priv->mfunc.master.init_port_ref[port] == 1) { + err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + return err; + } + priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); + } else { + /* infiniband port */ + if (slave == mlx4_master_func_num(dev)) { + if (!priv->mfunc.master.qp0_state[port].qp0_active && + priv->mfunc.master.qp0_state[port].port_active) { + err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + return err; + priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); + priv->mfunc.master.qp0_state[port].port_active = 0; + } + } else + priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); + } + --priv->mfunc.master.init_port_ref[port]; + return 0; +} + +int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) +{ + return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); +} +EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); + +int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) +{ + return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, + MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); +} + +struct mlx4_config_dev { + __be32 update_flags; + __be32 rsvd1[3]; + __be16 vxlan_udp_dport; + __be16 rsvd2; + __be32 rsvd3; + __be32 roce_flags; + __be32 rsvd4[25]; + __be16 rsvd5; + u8 rsvd6; + u8 rx_checksum_val; +}; + +#define MLX4_VXLAN_UDP_DPORT (1 << 0) +#define MLX4_DISABLE_RX_PORT BIT(18) + +static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) +{ + int err; + struct mlx4_cmd_mailbox *mailbox; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memcpy(mailbox->buf, config_dev, sizeof(*config_dev)); + + err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) +{ + int err; + struct mlx4_cmd_mailbox *mailbox; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + + if (!err) + memcpy(config_dev, mailbox->buf, sizeof(*config_dev)); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +/* Conversion between the HW values and the actual functionality. + * The value represented by the array index, + * and the functionality determined by the flags. + */ +static const u8 config_dev_csum_flags[] = { + [0] = 0, + [1] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP, + [2] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP | + MLX4_RX_CSUM_MODE_L4, + [3] = MLX4_RX_CSUM_MODE_L4 | + MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP | + MLX4_RX_CSUM_MODE_MULTI_VLAN +}; + +int mlx4_config_dev_retrieval(struct mlx4_dev *dev, + struct mlx4_config_dev_params *params) +{ + struct mlx4_config_dev config_dev = {0}; + int err; + u8 csum_mask; + +#define CONFIG_DEV_RX_CSUM_MODE_MASK 0x7 +#define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET 0 +#define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET 4 + + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV)) + return -ENOTSUPP; + + err = mlx4_CONFIG_DEV_get(dev, &config_dev); + if (err) + return err; + + csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) & + CONFIG_DEV_RX_CSUM_MODE_MASK; + + if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0])) + return -EINVAL; + params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask]; + + csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) & + CONFIG_DEV_RX_CSUM_MODE_MASK; + + if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0])) + return -EINVAL; + params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask]; + + params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval); + +int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port) +{ + struct mlx4_config_dev config_dev; + + memset(&config_dev, 0, sizeof(config_dev)); + config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT); + config_dev.vxlan_udp_dport = udp_port; + + return mlx4_CONFIG_DEV_set(dev, &config_dev); +} +EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port); + +#define CONFIG_DISABLE_RX_PORT BIT(15) +int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis) +{ + struct mlx4_config_dev config_dev; + + memset(&config_dev, 0, sizeof(config_dev)); + config_dev.update_flags = cpu_to_be32(MLX4_DISABLE_RX_PORT); + if (dis) + config_dev.roce_flags = + cpu_to_be32(CONFIG_DISABLE_RX_PORT); + + return mlx4_CONFIG_DEV_set(dev, &config_dev); +} + +int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2) +{ + struct mlx4_cmd_mailbox *mailbox; + struct { + __be32 v_port1; + __be32 v_port2; + } *v2p; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return -ENOMEM; + + v2p = mailbox->buf; + v2p->v_port1 = cpu_to_be32(port1); + v2p->v_port2 = cpu_to_be32(port2); + + err = mlx4_cmd(dev, mailbox->dma, 0, + MLX4_SET_PORT_VIRT2PHY, MLX4_CMD_VIRT_PORT_MAP, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + + +int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) +{ + int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0, + MLX4_CMD_SET_ICM_SIZE, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (ret) + return ret; + + /* + * Round up number of system pages needed in case + * MLX4_ICM_PAGE_SIZE < PAGE_SIZE. + */ + *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >> + (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT); + + return 0; +} + +int mlx4_NOP(struct mlx4_dev *dev) +{ + /* Input modifier of 0x1f means "finish as soon as possible." */ + return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); +} + +int mlx4_get_phys_port_id(struct mlx4_dev *dev) +{ + u8 port; + u32 *outbox; + struct mlx4_cmd_mailbox *mailbox; + u32 in_mod; + u32 guid_hi, guid_lo; + int err, ret = 0; +#define MOD_STAT_CFG_PORT_OFFSET 8 +#define MOD_STAT_CFG_GUID_H 0X14 +#define MOD_STAT_CFG_GUID_L 0X1c + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + for (port = 1; port <= dev->caps.num_ports; port++) { + in_mod = port << MOD_STAT_CFG_PORT_OFFSET; + err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2, + MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) { + mlx4_err(dev, "Fail to get port %d uplink guid\n", + port); + ret = err; + } else { + MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H); + MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L); + dev->caps.phys_port_id[port] = (u64)guid_lo | + (u64)guid_hi << 32; + } + } + mlx4_free_cmd_mailbox(dev, mailbox); + return ret; +} + +#define MLX4_WOL_SETUP_MODE (5 << 28) +int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port) +{ + u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; + + return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3, + MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); +} +EXPORT_SYMBOL_GPL(mlx4_wol_read); + +int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port) +{ + u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8; + + return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); +} +EXPORT_SYMBOL_GPL(mlx4_wol_write); + +enum { + ADD_TO_MCG = 0x26, +}; + + +void mlx4_opreq_action(struct work_struct *work) +{ + struct mlx4_priv *priv = container_of(work, struct mlx4_priv, + opreq_task); + struct mlx4_dev *dev = &priv->dev; + int num_tasks = atomic_read(&priv->opreq_count); + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm; + u32 *outbox; + u32 modifier; + u16 token; + u16 type; + int err; + u32 num_qps; + struct mlx4_qp qp; + int i; + u8 rem_mcg; + u8 prot; + +#define GET_OP_REQ_MODIFIER_OFFSET 0x08 +#define GET_OP_REQ_TOKEN_OFFSET 0x14 +#define GET_OP_REQ_TYPE_OFFSET 0x1a +#define GET_OP_REQ_DATA_OFFSET 0x20 + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n"); + return; + } + outbox = mailbox->buf; + + while (num_tasks) { + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, + MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) { + mlx4_err(dev, "Failed to retrieve required operation: %d\n", + err); + return; + } + MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); + MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET); + MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET); + type &= 0xfff; + + switch (type) { + case ADD_TO_MCG: + if (dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n"); + err = EPERM; + break; + } + mgm = (struct mlx4_mgm *)((u8 *)(outbox) + + GET_OP_REQ_DATA_OFFSET); + num_qps = be32_to_cpu(mgm->members_count) & + MGM_QPN_MASK; + rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1; + prot = ((u8 *)(&mgm->members_count))[0] >> 6; + + for (i = 0; i < num_qps; i++) { + qp.qpn = be32_to_cpu(mgm->qp[i]); + if (rem_mcg) + err = mlx4_multicast_detach(dev, &qp, + mgm->gid, + prot, 0); + else + err = mlx4_multicast_attach(dev, &qp, + mgm->gid, + mgm->gid[5] + , 0, prot, + NULL); + if (err) + break; + } + break; + default: + mlx4_warn(dev, "Bad type for required operation\n"); + err = EINVAL; + break; + } + err = mlx4_cmd(dev, 0, ((u32) err | + (__force u32)cpu_to_be32(token) << 16), + 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) { + mlx4_err(dev, "Failed to acknowledge required request: %d\n", + err); + goto out; + } + memset(outbox, 0, 0xffc); + num_tasks = atomic_dec_return(&priv->opreq_count); + } + +out: + mlx4_free_cmd_mailbox(dev, mailbox); +} + +static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev, + struct mlx4_cmd_mailbox *mailbox) +{ +#define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET 0x10 +#define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET 0x20 +#define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET 0x40 +#define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET 0x70 + + u32 set_attr_mask, getresp_attr_mask; + u32 trap_attr_mask, traprepress_attr_mask; + + MLX4_GET(set_attr_mask, mailbox->buf, + MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET); + mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n", + set_attr_mask); + + MLX4_GET(getresp_attr_mask, mailbox->buf, + MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET); + mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n", + getresp_attr_mask); + + MLX4_GET(trap_attr_mask, mailbox->buf, + MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET); + mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n", + trap_attr_mask); + + MLX4_GET(traprepress_attr_mask, mailbox->buf, + MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET); + mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n", + traprepress_attr_mask); + + if (set_attr_mask && getresp_attr_mask && trap_attr_mask && + traprepress_attr_mask) + return 1; + + return 0; +} + +int mlx4_config_mad_demux(struct mlx4_dev *dev) +{ + struct mlx4_cmd_mailbox *mailbox; + int secure_host_active; + int err; + + /* Check if mad_demux is supported */ + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX)) + return 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX"); + return -ENOMEM; + } + + /* Query mad_demux to find out which MADs are handled by internal sma */ + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */, + MLX4_CMD_MAD_DEMUX_QUERY_RESTR, MLX4_CMD_MAD_DEMUX, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + if (err) { + mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n", + err); + goto out; + } + + secure_host_active = mlx4_check_smp_firewall_active(dev, mailbox); + + /* Config mad_demux to handle all MADs returned by the query above */ + err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */, + MLX4_CMD_MAD_DEMUX_CONFIG, MLX4_CMD_MAD_DEMUX, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + if (err) { + mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err); + goto out; + } + + if (secure_host_active) + mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n"); +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +/* Access Reg commands */ +enum mlx4_access_reg_masks { + MLX4_ACCESS_REG_STATUS_MASK = 0x7f, + MLX4_ACCESS_REG_METHOD_MASK = 0x7f, + MLX4_ACCESS_REG_LEN_MASK = 0x7ff +}; + +struct mlx4_access_reg { + __be16 constant1; + u8 status; + u8 resrvd1; + __be16 reg_id; + u8 method; + u8 constant2; + __be32 resrvd2[2]; + __be16 len_const; + __be16 resrvd3; +#define MLX4_ACCESS_REG_HEADER_SIZE (20) + u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE]; +} __attribute__((__packed__)); + +/** + * mlx4_ACCESS_REG - Generic access reg command. + * @dev: mlx4_dev. + * @reg_id: register ID to access. + * @method: Access method Read/Write. + * @reg_len: register length to Read/Write in bytes. + * @reg_data: reg_data pointer to Read/Write From/To. + * + * Access ConnectX registers FW command. + * Returns 0 on success and copies outbox mlx4_access_reg data + * field into reg_data or a negative error code. + */ +static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id, + enum mlx4_access_reg_method method, + u16 reg_len, void *reg_data) +{ + struct mlx4_cmd_mailbox *inbox, *outbox; + struct mlx4_access_reg *inbuf, *outbuf; + int err; + + inbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(inbox)) + return PTR_ERR(inbox); + + outbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(outbox)) { + mlx4_free_cmd_mailbox(dev, inbox); + return PTR_ERR(outbox); + } + + inbuf = inbox->buf; + outbuf = outbox->buf; + + inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4); + inbuf->constant2 = 0x1; + inbuf->reg_id = cpu_to_be16(reg_id); + inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK; + + reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data))); + inbuf->len_const = + cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) | + ((0x3) << 12)); + + memcpy(inbuf->reg_data, reg_data, reg_len); + err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0, + MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_WRAPPED); + if (err) + goto out; + + if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) { + err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK; + mlx4_err(dev, + "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n", + reg_id, err); + goto out; + } + + memcpy(reg_data, outbuf->reg_data, reg_len); +out: + mlx4_free_cmd_mailbox(dev, inbox); + mlx4_free_cmd_mailbox(dev, outbox); + return err; +} + +/* ConnectX registers IDs */ +enum mlx4_reg_id { + MLX4_REG_ID_PTYS = 0x5004, +}; + +/** + * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed) + * register + * @dev: mlx4_dev. + * @method: Access method Read/Write. + * @ptys_reg: PTYS register data pointer. + * + * Access ConnectX PTYS register, to Read/Write Port Type/Speed + * configuration + * Returns 0 on success or a negative error code. + */ +int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, + enum mlx4_access_reg_method method, + struct mlx4_ptys_reg *ptys_reg) +{ + return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS, + method, sizeof(*ptys_reg), ptys_reg); +} +EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG); + +int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_access_reg *inbuf = inbox->buf; + u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK; + u16 reg_id = be16_to_cpu(inbuf->reg_id); + + if (slave != mlx4_master_func_num(dev) && + method == MLX4_ACCESS_REG_WRITE) + return -EPERM; + + if (reg_id == MLX4_REG_ID_PTYS) { + struct mlx4_ptys_reg *ptys_reg = + (struct mlx4_ptys_reg *)inbuf->reg_data; + + ptys_reg->local_port = + mlx4_slave_convert_port(dev, slave, + ptys_reg->local_port); + } + + return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier, + 0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h new file mode 100644 index 000000000..07cb7c246 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -0,0 +1,257 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_FW_H +#define MLX4_FW_H + +#include "mlx4.h" +#include "icm.h" + +struct mlx4_mod_stat_cfg { + u8 log_pg_sz; + u8 log_pg_sz_m; +}; + +struct mlx4_port_cap { + u8 supported_port_types; + u8 suggested_type; + u8 default_sense; + u8 log_max_macs; + u8 log_max_vlans; + int ib_mtu; + int max_port_width; + int max_vl; + int max_gids; + int max_pkeys; + u64 def_mac; + u16 eth_mtu; + int trans_type; + int vendor_oui; + u16 wavelength; + u64 trans_code; + u8 dmfs_optimized_state; +}; + +struct mlx4_dev_cap { + int max_srq_sz; + int max_qp_sz; + int reserved_qps; + int max_qps; + int reserved_srqs; + int max_srqs; + int max_cq_sz; + int reserved_cqs; + int max_cqs; + int max_mpts; + int reserved_eqs; + int max_eqs; + int num_sys_eqs; + int reserved_mtts; + int max_mrw_sz; + int reserved_mrws; + int max_mtt_seg; + int max_requester_per_qp; + int max_responder_per_qp; + int max_rdma_global; + int local_ca_ack_delay; + int num_ports; + u32 max_msg_sz; + u16 stat_rate_support; + int fs_log_max_ucast_qp_range_size; + int fs_max_num_qp_per_entry; + u64 flags; + u64 flags2; + int reserved_uars; + int uar_size; + int min_page_sz; + int bf_reg_size; + int bf_regs_per_page; + int max_sq_sg; + int max_sq_desc_sz; + int max_rq_sg; + int max_rq_desc_sz; + int max_qp_per_mcg; + int reserved_mgms; + int max_mcgs; + int reserved_pds; + int max_pds; + int reserved_xrcds; + int max_xrcds; + int qpc_entry_sz; + int rdmarc_entry_sz; + int altc_entry_sz; + int aux_entry_sz; + int srq_entry_sz; + int cqc_entry_sz; + int eqc_entry_sz; + int dmpt_entry_sz; + int cmpt_entry_sz; + int mtt_entry_sz; + int resize_srq; + u32 bmme_flags; + u32 reserved_lkey; + u64 max_icm_sz; + int max_gso_sz; + int max_rss_tbl_sz; + u32 max_counters; + u32 dmfs_high_rate_qpn_base; + u32 dmfs_high_rate_qpn_range; + struct mlx4_rate_limit_caps rl_caps; + struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; +}; + +struct mlx4_func_cap { + u8 num_ports; + u8 flags; + u32 pf_context_behaviour; + int qp_quota; + int cq_quota; + int srq_quota; + int mpt_quota; + int mtt_quota; + int max_eq; + int reserved_eq; + int mcg_quota; + u32 qp0_qkey; + u32 qp0_tunnel_qpn; + u32 qp0_proxy_qpn; + u32 qp1_tunnel_qpn; + u32 qp1_proxy_qpn; + u32 reserved_lkey; + u8 physical_port; + u8 port_flags; + u8 flags1; + u64 phys_port_id; + u32 extra_flags; +}; + +struct mlx4_func { + int bus; + int device; + int function; + int physical_function; + int rsvd_eqs; + int max_eq; + int rsvd_uars; +}; + +struct mlx4_adapter { + char board_id[MLX4_BOARD_ID_LEN]; + u8 inta_pin; +}; + +struct mlx4_init_hca_param { + u64 qpc_base; + u64 rdmarc_base; + u64 auxc_base; + u64 altc_base; + u64 srqc_base; + u64 cqc_base; + u64 eqc_base; + u64 mc_base; + u64 dmpt_base; + u64 cmpt_base; + u64 mtt_base; + u64 global_caps; + u16 log_mc_entry_sz; + u16 log_mc_hash_sz; + u16 hca_core_clock; /* Internal Clock Frequency (in MHz) */ + u8 log_num_qps; + u8 log_num_srqs; + u8 log_num_cqs; + u8 log_num_eqs; + u16 num_sys_eqs; + u8 log_rd_per_qp; + u8 log_mc_table_sz; + u8 log_mpt_sz; + u8 log_uar_sz; + u8 mw_enabled; /* Enable memory windows */ + u8 uar_page_sz; /* log pg sz in 4k chunks */ + u8 steering_mode; /* for QUERY_HCA */ + u8 dmfs_high_steer_mode; /* for QUERY_HCA */ + u64 dev_cap_enabled; + u16 cqe_size; /* For use only when CQE stride feature enabled */ + u16 eqe_size; /* For use only when EQE stride feature enabled */ + u8 rss_ip_frags; +}; + +struct mlx4_init_ib_param { + int port_width; + int vl_cap; + int mtu_cap; + u16 gid_cap; + u16 pkey_cap; + int set_guid0; + u64 guid0; + int set_node_guid; + u64 node_guid; + int set_si_guid; + u64 si_guid; +}; + +struct mlx4_set_ib_param { + int set_si_guid; + int reset_qkey_viol; + u64 si_guid; + u32 cap_mask; +}; + +void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap); +int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap); +int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap); +int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, + struct mlx4_func_cap *func_cap); +int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave); +int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm); +int mlx4_UNMAP_FA(struct mlx4_dev *dev); +int mlx4_RUN_FW(struct mlx4_dev *dev); +int mlx4_QUERY_FW(struct mlx4_dev *dev); +int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter); +int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param); +int mlx4_QUERY_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param); +int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic); +int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt); +int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages); +int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); +int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev); +int mlx4_NOP(struct mlx4_dev *dev); +int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg); +void mlx4_opreq_action(struct work_struct *work); + +#endif /* MLX4_FW_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.c b/drivers/net/ethernet/mellanox/mlx4/fw_qos.c new file mode 100644 index 000000000..8f2fde048 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.c @@ -0,0 +1,289 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. + * All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include "fw_qos.h" +#include "fw.h" + +enum { + /* allocate vpp opcode modifiers */ + MLX4_ALLOCATE_VPP_ALLOCATE = 0x0, + MLX4_ALLOCATE_VPP_QUERY = 0x1 +}; + +enum { + /* set vport qos opcode modifiers */ + MLX4_SET_VPORT_QOS_SET = 0x0, + MLX4_SET_VPORT_QOS_QUERY = 0x1 +}; + +struct mlx4_set_port_prio2tc_context { + u8 prio2tc[4]; +}; + +struct mlx4_port_scheduler_tc_cfg_be { + __be16 pg; + __be16 bw_precentage; + __be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */ + __be16 max_bw_value; +}; + +struct mlx4_set_port_scheduler_context { + struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC]; +}; + +/* Granular Qos (per VF) section */ +struct mlx4_alloc_vpp_param { + __be32 availible_vpp; + __be32 vpp_p_up[MLX4_NUM_UP]; +}; + +struct mlx4_prio_qos_param { + __be32 bw_share; + __be32 max_avg_bw; + __be32 reserved; + __be32 enable; + __be32 reserved1[4]; +}; + +struct mlx4_set_vport_context { + __be32 reserved[8]; + struct mlx4_prio_qos_param qos_p_up[MLX4_NUM_UP]; +}; + +int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_prio2tc_context *context; + int err; + u32 in_mod; + int i; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + context = mailbox->buf; + + for (i = 0; i < MLX4_NUM_UP; i += 2) + context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1]; + + in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC); + +int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, + u8 *pg, u16 *ratelimit) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_scheduler_context *context; + int err; + u32 in_mod; + int i; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + context = mailbox->buf; + + for (i = 0; i < MLX4_NUM_TC; i++) { + struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i]; + u16 r; + + if (ratelimit && ratelimit[i]) { + if (ratelimit[i] <= MLX4_MAX_100M_UNITS_VAL) { + r = ratelimit[i]; + tc->max_bw_units = + htons(MLX4_RATELIMIT_100M_UNITS); + } else { + r = ratelimit[i] / 10; + tc->max_bw_units = + htons(MLX4_RATELIMIT_1G_UNITS); + } + tc->max_bw_value = htons(r); + } else { + tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT); + tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS); + } + + tc->pg = htons(pg[i]); + tc->bw_precentage = htons(tc_tx_bw[i]); + } + + in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER); + +int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port, + u16 *availible_vpp, u8 *vpp_p_up) +{ + int i; + int err; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_alloc_vpp_param *out_param; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + out_param = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, port, + MLX4_ALLOCATE_VPP_QUERY, + MLX4_CMD_ALLOCATE_VPP, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + goto out; + + /* Total number of supported VPPs */ + *availible_vpp = (u16)be32_to_cpu(out_param->availible_vpp); + + for (i = 0; i < MLX4_NUM_UP; i++) + vpp_p_up[i] = (u8)be32_to_cpu(out_param->vpp_p_up[i]); + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; +} +EXPORT_SYMBOL(mlx4_ALLOCATE_VPP_get); + +int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up) +{ + int i; + int err; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_alloc_vpp_param *in_param; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + in_param = mailbox->buf; + + for (i = 0; i < MLX4_NUM_UP; i++) + in_param->vpp_p_up[i] = cpu_to_be32(vpp_p_up[i]); + + err = mlx4_cmd(dev, mailbox->dma, port, + MLX4_ALLOCATE_VPP_ALLOCATE, + MLX4_CMD_ALLOCATE_VPP, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_ALLOCATE_VPP_set); + +int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport, + struct mlx4_vport_qos_param *out_param) +{ + int i; + int err; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_vport_context *ctx; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ctx = mailbox->buf; + + err = mlx4_cmd_box(dev, 0, mailbox->dma, (vport << 8) | port, + MLX4_SET_VPORT_QOS_QUERY, + MLX4_CMD_SET_VPORT_QOS, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + goto out; + + for (i = 0; i < MLX4_NUM_UP; i++) { + out_param[i].bw_share = be32_to_cpu(ctx->qos_p_up[i].bw_share); + out_param[i].max_avg_bw = + be32_to_cpu(ctx->qos_p_up[i].max_avg_bw); + out_param[i].enable = + !!(be32_to_cpu(ctx->qos_p_up[i].enable) & 31); + } + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; +} +EXPORT_SYMBOL(mlx4_SET_VPORT_QOS_get); + +int mlx4_SET_VPORT_QOS_set(struct mlx4_dev *dev, u8 port, u8 vport, + struct mlx4_vport_qos_param *in_param) +{ + int i; + int err; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_vport_context *ctx; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ctx = mailbox->buf; + + for (i = 0; i < MLX4_NUM_UP; i++) { + ctx->qos_p_up[i].bw_share = cpu_to_be32(in_param[i].bw_share); + ctx->qos_p_up[i].max_avg_bw = + cpu_to_be32(in_param[i].max_avg_bw); + ctx->qos_p_up[i].enable = + cpu_to_be32(in_param[i].enable << 31); + } + + err = mlx4_cmd(dev, mailbox->dma, (vport << 8) | port, + MLX4_SET_VPORT_QOS_SET, + MLX4_CMD_SET_VPORT_QOS, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_VPORT_QOS_set); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h new file mode 100644 index 000000000..ac1f33187 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. + * All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_FW_QOS_H +#define MLX4_FW_QOS_H + +#include +#include + +#define MLX4_NUM_UP 8 +#define MLX4_NUM_TC 8 + +/* Default supported priorities for VPP allocation */ +#define MLX4_DEFAULT_QOS_PRIO (0) + +/* Derived from FW feature definition, 0 is the default vport fo all QPs */ +#define MLX4_VPP_DEFAULT_VPORT (0) + +struct mlx4_vport_qos_param { + u32 bw_share; + u32 max_avg_bw; + u8 enable; +}; + +/** + * mlx4_SET_PORT_PRIO2TC - This routine maps user priorities to traffic + * classes of a given port and device. + * + * @dev: mlx4_dev. + * @port: Physical port number. + * @prio2tc: Array of TC associated with each priorities. + * + * Returns 0 on success or a negative mlx4_core errno code. + **/ +int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc); + +/** + * mlx4_SET_PORT_SCHEDULER - This routine configures the arbitration between + * traffic classes (ETS) and configured rate limit for traffic classes. + * tc_tx_bw, pg and ratelimit are arrays where each index represents a TC. + * The description for those parameters below refers to a single TC. + * + * @dev: mlx4_dev. + * @port: Physical port number. + * @tc_tx_bw: The percentage of the bandwidth allocated for traffic class + * within a TC group. The sum of the bw_percentage of all the traffic + * classes within a TC group must equal 100% for correct operation. + * @pg: The TC group the traffic class is associated with. + * @ratelimit: The maximal bandwidth allowed for the use by this traffic class. + * + * Returns 0 on success or a negative mlx4_core errno code. + **/ +int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, + u8 *pg, u16 *ratelimit); +/** + * mlx4_ALLOCATE_VPP_get - Query port VPP availible resources and allocation. + * Before distribution of VPPs to priorities, only availible_vpp is returned. + * After initialization it returns the distribution of VPPs among priorities. + * + * @dev: mlx4_dev. + * @port: Physical port number. + * @availible_vpp: Pointer to variable where number of availible VPPs is stored + * @vpp_p_up: Distribution of VPPs to priorities is stored in this array + * + * Returns 0 on success or a negative mlx4_core errno code. + **/ +int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port, + u16 *availible_vpp, u8 *vpp_p_up); +/** + * mlx4_ALLOCATE_VPP_set - Distribution of VPPs among differnt priorities. + * The total number of VPPs assigned to all for a port must not exceed + * the value reported by availible_vpp in mlx4_ALLOCATE_VPP_get. + * VPP allocation is allowed only after the port type has been set, + * and while no QPs are open for this port. + * + * @dev: mlx4_dev. + * @port: Physical port number. + * @vpp_p_up: Allocation of VPPs to different priorities. + * + * Returns 0 on success or a negative mlx4_core errno code. + **/ +int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up); + +/** + * mlx4_SET_VPORT_QOS_get - Query QoS proporties of a Vport. + * Each priority allowed for the Vport is assigned with a share of the BW, + * and a BW limitation. This commands query the current QoS values. + * + * @dev: mlx4_dev. + * @port: Physical port number. + * @vport: Vport id. + * @out_param: Array of mlx4_vport_qos_param that will contain the values. + * + * Returns 0 on success or a negative mlx4_core errno code. + **/ +int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport, + struct mlx4_vport_qos_param *out_param); + +/** + * mlx4_SET_VPORT_QOS_set - Set QoS proporties of a Vport. + * QoS parameters can be modified at any time, but must be initialized + * before any QP is associated with the VPort. + * + * @dev: mlx4_dev. + * @port: Physical port number. + * @vport: Vport id. + * @out_param: Array of mlx4_vport_qos_param which holds the requested values. + * + * Returns 0 on success or a negative mlx4_core errno code. + **/ +int mlx4_SET_VPORT_QOS_set(struct mlx4_dev *dev, u8 port, u8 vport, + struct mlx4_vport_qos_param *in_param); + +#endif /* MLX4_FW_QOS_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c new file mode 100644 index 000000000..2a9dd460a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -0,0 +1,462 @@ +/* + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include + +#include + +#include "mlx4.h" +#include "icm.h" +#include "fw.h" + +/* + * We allocate in as big chunks as we can, up to a maximum of 256 KB + * per chunk. + */ +enum { + MLX4_ICM_ALLOC_SIZE = 1 << 18, + MLX4_TABLE_CHUNK_SIZE = 1 << 18 +}; + +static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) +{ + int i; + + if (chunk->nsg > 0) + pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages, + PCI_DMA_BIDIRECTIONAL); + + for (i = 0; i < chunk->npages; ++i) + __free_pages(sg_page(&chunk->mem[i]), + get_order(chunk->mem[i].length)); +} + +static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) +{ + int i; + + for (i = 0; i < chunk->npages; ++i) + dma_free_coherent(&dev->persist->pdev->dev, + chunk->mem[i].length, + lowmem_page_address(sg_page(&chunk->mem[i])), + sg_dma_address(&chunk->mem[i])); +} + +void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) +{ + struct mlx4_icm_chunk *chunk, *tmp; + + if (!icm) + return; + + list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) { + if (coherent) + mlx4_free_icm_coherent(dev, chunk); + else + mlx4_free_icm_pages(dev, chunk); + + kfree(chunk); + } + + kfree(icm); +} + +static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, + gfp_t gfp_mask, int node) +{ + struct page *page; + + page = alloc_pages_node(node, gfp_mask, order); + if (!page) { + page = alloc_pages(gfp_mask, order); + if (!page) + return -ENOMEM; + } + + sg_set_page(mem, page, PAGE_SIZE << order, 0); + return 0; +} + +static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, + int order, gfp_t gfp_mask) +{ + void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, + &sg_dma_address(mem), gfp_mask); + if (!buf) + return -ENOMEM; + + sg_set_buf(mem, buf, PAGE_SIZE << order); + BUG_ON(mem->offset); + sg_dma_len(mem) = PAGE_SIZE << order; + return 0; +} + +struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, + gfp_t gfp_mask, int coherent) +{ + struct mlx4_icm *icm; + struct mlx4_icm_chunk *chunk = NULL; + int cur_order; + int ret; + + /* We use sg_set_buf for coherent allocs, which assumes low memory */ + BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); + + icm = kmalloc_node(sizeof(*icm), + gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), + dev->numa_node); + if (!icm) { + icm = kmalloc(sizeof(*icm), + gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); + if (!icm) + return NULL; + } + + icm->refcount = 0; + INIT_LIST_HEAD(&icm->chunk_list); + + cur_order = get_order(MLX4_ICM_ALLOC_SIZE); + + while (npages > 0) { + if (!chunk) { + chunk = kmalloc_node(sizeof(*chunk), + gfp_mask & ~(__GFP_HIGHMEM | + __GFP_NOWARN), + dev->numa_node); + if (!chunk) { + chunk = kmalloc(sizeof(*chunk), + gfp_mask & ~(__GFP_HIGHMEM | + __GFP_NOWARN)); + if (!chunk) + goto fail; + } + + sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN); + chunk->npages = 0; + chunk->nsg = 0; + list_add_tail(&chunk->list, &icm->chunk_list); + } + + while (1 << cur_order > npages) + --cur_order; + + if (coherent) + ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, + &chunk->mem[chunk->npages], + cur_order, gfp_mask); + else + ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], + cur_order, gfp_mask, + dev->numa_node); + + if (ret) { + if (--cur_order < 0) + goto fail; + else + continue; + } + + ++chunk->npages; + + if (coherent) + ++chunk->nsg; + else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { + chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, + chunk->npages, + PCI_DMA_BIDIRECTIONAL); + + if (chunk->nsg <= 0) + goto fail; + } + + if (chunk->npages == MLX4_ICM_CHUNK_LEN) + chunk = NULL; + + npages -= 1 << cur_order; + } + + if (!coherent && chunk) { + chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, + chunk->npages, + PCI_DMA_BIDIRECTIONAL); + + if (chunk->nsg <= 0) + goto fail; + } + + return icm; + +fail: + mlx4_free_icm(dev, icm, coherent); + return NULL; +} + +static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt) +{ + return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt); +} + +static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count) +{ + return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); +} + +int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm) +{ + return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1); +} + +int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev) +{ + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); +} + +int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj, + gfp_t gfp) +{ + u32 i = (obj & (table->num_obj - 1)) / + (MLX4_TABLE_CHUNK_SIZE / table->obj_size); + int ret = 0; + + mutex_lock(&table->mutex); + + if (table->icm[i]) { + ++table->icm[i]->refcount; + goto out; + } + + table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, + (table->lowmem ? gfp : GFP_HIGHUSER) | + __GFP_NOWARN, table->coherent); + if (!table->icm[i]) { + ret = -ENOMEM; + goto out; + } + + if (mlx4_MAP_ICM(dev, table->icm[i], table->virt + + (u64) i * MLX4_TABLE_CHUNK_SIZE)) { + mlx4_free_icm(dev, table->icm[i], table->coherent); + table->icm[i] = NULL; + ret = -ENOMEM; + goto out; + } + + ++table->icm[i]->refcount; + +out: + mutex_unlock(&table->mutex); + return ret; +} + +void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj) +{ + u32 i; + u64 offset; + + i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size); + + mutex_lock(&table->mutex); + + if (--table->icm[i]->refcount == 0) { + offset = (u64) i * MLX4_TABLE_CHUNK_SIZE; + mlx4_UNMAP_ICM(dev, table->virt + offset, + MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); + mlx4_free_icm(dev, table->icm[i], table->coherent); + table->icm[i] = NULL; + } + + mutex_unlock(&table->mutex); +} + +void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, + dma_addr_t *dma_handle) +{ + int offset, dma_offset, i; + u64 idx; + struct mlx4_icm_chunk *chunk; + struct mlx4_icm *icm; + struct page *page = NULL; + + if (!table->lowmem) + return NULL; + + mutex_lock(&table->mutex); + + idx = (u64) (obj & (table->num_obj - 1)) * table->obj_size; + icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE]; + dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE; + + if (!icm) + goto out; + + list_for_each_entry(chunk, &icm->chunk_list, list) { + for (i = 0; i < chunk->npages; ++i) { + if (dma_handle && dma_offset >= 0) { + if (sg_dma_len(&chunk->mem[i]) > dma_offset) + *dma_handle = sg_dma_address(&chunk->mem[i]) + + dma_offset; + dma_offset -= sg_dma_len(&chunk->mem[i]); + } + /* + * DMA mapping can merge pages but not split them, + * so if we found the page, dma_handle has already + * been assigned to. + */ + if (chunk->mem[i].length > offset) { + page = sg_page(&chunk->mem[i]); + goto out; + } + offset -= chunk->mem[i].length; + } + } + +out: + mutex_unlock(&table->mutex); + return page ? lowmem_page_address(page) + offset : NULL; +} + +int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, + u32 start, u32 end) +{ + int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size; + int err; + u32 i; + + for (i = start; i <= end; i += inc) { + err = mlx4_table_get(dev, table, i, GFP_KERNEL); + if (err) + goto fail; + } + + return 0; + +fail: + while (i > start) { + i -= inc; + mlx4_table_put(dev, table, i); + } + + return err; +} + +void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, + u32 start, u32 end) +{ + u32 i; + + for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size) + mlx4_table_put(dev, table, i); +} + +int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, + u64 virt, int obj_size, u32 nobj, int reserved, + int use_lowmem, int use_coherent) +{ + int obj_per_chunk; + int num_icm; + unsigned chunk_size; + int i; + u64 size; + + obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; + num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; + + table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL); + if (!table->icm) + return -ENOMEM; + table->virt = virt; + table->num_icm = num_icm; + table->num_obj = nobj; + table->obj_size = obj_size; + table->lowmem = use_lowmem; + table->coherent = use_coherent; + mutex_init(&table->mutex); + + size = (u64) nobj * obj_size; + for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { + chunk_size = MLX4_TABLE_CHUNK_SIZE; + if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size) + chunk_size = PAGE_ALIGN(size - + i * MLX4_TABLE_CHUNK_SIZE); + + table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT, + (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | + __GFP_NOWARN, use_coherent); + if (!table->icm[i]) + goto err; + if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) { + mlx4_free_icm(dev, table->icm[i], use_coherent); + table->icm[i] = NULL; + goto err; + } + + /* + * Add a reference to this ICM chunk so that it never + * gets freed (since it contains reserved firmware objects). + */ + ++table->icm[i]->refcount; + } + + return 0; + +err: + for (i = 0; i < num_icm; ++i) + if (table->icm[i]) { + mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE, + MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); + mlx4_free_icm(dev, table->icm[i], use_coherent); + } + + kfree(table->icm); + + return -ENOMEM; +} + +void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table) +{ + int i; + + for (i = 0; i < table->num_icm; ++i) + if (table->icm[i]) { + mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, + MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); + mlx4_free_icm(dev, table->icm[i], table->coherent); + } + + kfree(table->icm); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h new file mode 100644 index 000000000..0c7364550 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/icm.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_ICM_H +#define MLX4_ICM_H + +#include +#include +#include + +#define MLX4_ICM_CHUNK_LEN \ + ((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \ + (sizeof (struct scatterlist))) + +enum { + MLX4_ICM_PAGE_SHIFT = 12, + MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT, +}; + +struct mlx4_icm_chunk { + struct list_head list; + int npages; + int nsg; + struct scatterlist mem[MLX4_ICM_CHUNK_LEN]; +}; + +struct mlx4_icm { + struct list_head chunk_list; + int refcount; +}; + +struct mlx4_icm_iter { + struct mlx4_icm *icm; + struct mlx4_icm_chunk *chunk; + int page_idx; +}; + +struct mlx4_dev; + +struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, + gfp_t gfp_mask, int coherent); +void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent); + +int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj, + gfp_t gfp); +void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj); +int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, + u32 start, u32 end); +void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, + u32 start, u32 end); +int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, + u64 virt, int obj_size, u32 nobj, int reserved, + int use_lowmem, int use_coherent); +void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table); +void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, dma_addr_t *dma_handle); + +static inline void mlx4_icm_first(struct mlx4_icm *icm, + struct mlx4_icm_iter *iter) +{ + iter->icm = icm; + iter->chunk = list_empty(&icm->chunk_list) ? + NULL : list_entry(icm->chunk_list.next, + struct mlx4_icm_chunk, list); + iter->page_idx = 0; +} + +static inline int mlx4_icm_last(struct mlx4_icm_iter *iter) +{ + return !iter->chunk; +} + +static inline void mlx4_icm_next(struct mlx4_icm_iter *iter) +{ + if (++iter->page_idx >= iter->chunk->nsg) { + if (iter->chunk->list.next == &iter->icm->chunk_list) { + iter->chunk = NULL; + return; + } + + iter->chunk = list_entry(iter->chunk->list.next, + struct mlx4_icm_chunk, list); + iter->page_idx = 0; + } +} + +static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter) +{ + return sg_dma_address(&iter->chunk->mem[iter->page_idx]); +} + +static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter) +{ + return sg_dma_len(&iter->chunk->mem[iter->page_idx]); +} + +int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); +int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev); + +#endif /* MLX4_ICM_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c new file mode 100644 index 000000000..0d80aed59 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -0,0 +1,248 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include + +#include "mlx4.h" + +struct mlx4_device_context { + struct list_head list; + struct list_head bond_list; + struct mlx4_interface *intf; + void *context; +}; + +static LIST_HEAD(intf_list); +static LIST_HEAD(dev_list); +static DEFINE_MUTEX(intf_mutex); + +static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv) +{ + struct mlx4_device_context *dev_ctx; + + dev_ctx = kmalloc(sizeof *dev_ctx, GFP_KERNEL); + if (!dev_ctx) + return; + + dev_ctx->intf = intf; + dev_ctx->context = intf->add(&priv->dev); + + if (dev_ctx->context) { + spin_lock_irq(&priv->ctx_lock); + list_add_tail(&dev_ctx->list, &priv->ctx_list); + spin_unlock_irq(&priv->ctx_lock); + } else + kfree(dev_ctx); +} + +static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv) +{ + struct mlx4_device_context *dev_ctx; + + list_for_each_entry(dev_ctx, &priv->ctx_list, list) + if (dev_ctx->intf == intf) { + spin_lock_irq(&priv->ctx_lock); + list_del(&dev_ctx->list); + spin_unlock_irq(&priv->ctx_lock); + + intf->remove(&priv->dev, dev_ctx->context); + kfree(dev_ctx); + return; + } +} + +int mlx4_register_interface(struct mlx4_interface *intf) +{ + struct mlx4_priv *priv; + + if (!intf->add || !intf->remove) + return -EINVAL; + + mutex_lock(&intf_mutex); + + list_add_tail(&intf->list, &intf_list); + list_for_each_entry(priv, &dev_list, dev_list) { + if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) { + mlx4_dbg(&priv->dev, + "SRIOV, disabling HA mode for intf proto %d\n", intf->protocol); + intf->flags &= ~MLX4_INTFF_BONDING; + } + mlx4_add_device(intf, priv); + } + + mutex_unlock(&intf_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_register_interface); + +void mlx4_unregister_interface(struct mlx4_interface *intf) +{ + struct mlx4_priv *priv; + + mutex_lock(&intf_mutex); + + list_for_each_entry(priv, &dev_list, dev_list) + mlx4_remove_device(intf, priv); + + list_del(&intf->list); + + mutex_unlock(&intf_mutex); +} +EXPORT_SYMBOL_GPL(mlx4_unregister_interface); + +int mlx4_do_bond(struct mlx4_dev *dev, bool enable) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_device_context *dev_ctx = NULL, *temp_dev_ctx; + unsigned long flags; + int ret; + LIST_HEAD(bond_list); + + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) + return -ENOTSUPP; + + ret = mlx4_disable_rx_port_check(dev, enable); + if (ret) { + mlx4_err(dev, "Fail to %s rx port check\n", + enable ? "enable" : "disable"); + return ret; + } + if (enable) { + dev->flags |= MLX4_FLAG_BONDED; + } else { + ret = mlx4_virt2phy_port_map(dev, 1, 2); + if (ret) { + mlx4_err(dev, "Fail to reset port map\n"); + return ret; + } + dev->flags &= ~MLX4_FLAG_BONDED; + } + + spin_lock_irqsave(&priv->ctx_lock, flags); + list_for_each_entry_safe(dev_ctx, temp_dev_ctx, &priv->ctx_list, list) { + if (dev_ctx->intf->flags & MLX4_INTFF_BONDING) { + list_add_tail(&dev_ctx->bond_list, &bond_list); + list_del(&dev_ctx->list); + } + } + spin_unlock_irqrestore(&priv->ctx_lock, flags); + + list_for_each_entry(dev_ctx, &bond_list, bond_list) { + dev_ctx->intf->remove(dev, dev_ctx->context); + dev_ctx->context = dev_ctx->intf->add(dev); + + spin_lock_irqsave(&priv->ctx_lock, flags); + list_add_tail(&dev_ctx->list, &priv->ctx_list); + spin_unlock_irqrestore(&priv->ctx_lock, flags); + + mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n", + dev_ctx->intf->protocol, enable ? + "enabled" : "disabled"); + } + return 0; +} + +void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, + unsigned long param) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_device_context *dev_ctx; + unsigned long flags; + + spin_lock_irqsave(&priv->ctx_lock, flags); + + list_for_each_entry(dev_ctx, &priv->ctx_list, list) + if (dev_ctx->intf->event) + dev_ctx->intf->event(dev, dev_ctx->context, type, param); + + spin_unlock_irqrestore(&priv->ctx_lock, flags); +} + +int mlx4_register_device(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_interface *intf; + + mutex_lock(&intf_mutex); + + dev->persist->interface_state |= MLX4_INTERFACE_STATE_UP; + list_add_tail(&priv->dev_list, &dev_list); + list_for_each_entry(intf, &intf_list, list) + mlx4_add_device(intf, priv); + + mutex_unlock(&intf_mutex); + mlx4_start_catas_poll(dev); + + return 0; +} + +void mlx4_unregister_device(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_interface *intf; + + mlx4_stop_catas_poll(dev); + mutex_lock(&intf_mutex); + + list_for_each_entry(intf, &intf_list, list) + mlx4_remove_device(intf, priv); + + list_del(&priv->dev_list); + dev->persist->interface_state &= ~MLX4_INTERFACE_STATE_UP; + + mutex_unlock(&intf_mutex); +} + +void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_device_context *dev_ctx; + unsigned long flags; + void *result = NULL; + + spin_lock_irqsave(&priv->ctx_lock, flags); + + list_for_each_entry(dev_ctx, &priv->ctx_list, list) + if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) { + result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port); + break; + } + + spin_unlock_irqrestore(&priv->ctx_lock, flags); + + return result; +} +EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c new file mode 100644 index 000000000..ced5ecab5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -0,0 +1,3748 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mlx4.h" +#include "fw.h" +#include "icm.h" + +MODULE_AUTHOR("Roland Dreier"); +MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); + +struct workqueue_struct *mlx4_wq; + +#ifdef CONFIG_MLX4_DEBUG + +int mlx4_debug_level = 0; +module_param_named(debug_level, mlx4_debug_level, int, 0644); +MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); + +#endif /* CONFIG_MLX4_DEBUG */ + +#ifdef CONFIG_PCI_MSI + +static int msi_x = 1; +module_param(msi_x, int, 0444); +MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); + +#else /* CONFIG_PCI_MSI */ + +#define msi_x (0) + +#endif /* CONFIG_PCI_MSI */ + +static uint8_t num_vfs[3] = {0, 0, 0}; +static int num_vfs_argc; +module_param_array(num_vfs, byte , &num_vfs_argc, 0444); +MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" + "num_vfs=port1,port2,port1+2"); + +static uint8_t probe_vf[3] = {0, 0, 0}; +static int probe_vfs_argc; +module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); +MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" + "probe_vf=port1,port2,port1+2"); + +int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; +module_param_named(log_num_mgm_entry_size, + mlx4_log_num_mgm_entry_size, int, 0444); +MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" + " of qp per mcg, for example:" + " 10 gives 248.range: 7 <=" + " log_num_mgm_entry_size <= 12." + " To activate device managed" + " flow steering when available, set to -1"); + +static bool enable_64b_cqe_eqe = true; +module_param(enable_64b_cqe_eqe, bool, 0444); +MODULE_PARM_DESC(enable_64b_cqe_eqe, + "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); + +#define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ + MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ + MLX4_FUNC_CAP_DMFS_A0_STATIC) + +#define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV) + +static char mlx4_version[] = + DRV_NAME ": Mellanox ConnectX core driver v" + DRV_VERSION " (" DRV_RELDATE ")\n"; + +static struct mlx4_profile default_profile = { + .num_qp = 1 << 18, + .num_srq = 1 << 16, + .rdmarc_per_qp = 1 << 4, + .num_cq = 1 << 16, + .num_mcg = 1 << 13, + .num_mpt = 1 << 19, + .num_mtt = 1 << 20, /* It is really num mtt segements */ +}; + +static struct mlx4_profile low_mem_profile = { + .num_qp = 1 << 17, + .num_srq = 1 << 6, + .rdmarc_per_qp = 1 << 4, + .num_cq = 1 << 8, + .num_mcg = 1 << 8, + .num_mpt = 1 << 9, + .num_mtt = 1 << 7, +}; + +static int log_num_mac = 7; +module_param_named(log_num_mac, log_num_mac, int, 0444); +MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); + +static int log_num_vlan; +module_param_named(log_num_vlan, log_num_vlan, int, 0444); +MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); +/* Log2 max number of VLANs per ETH port (0-7) */ +#define MLX4_LOG_NUM_VLANS 7 +#define MLX4_MIN_LOG_NUM_VLANS 0 +#define MLX4_MIN_LOG_NUM_MAC 1 + +static bool use_prio; +module_param_named(use_prio, use_prio, bool, 0444); +MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)"); + +int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); +module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); +MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); + +static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; +static int arr_argc = 2; +module_param_array(port_type_array, int, &arr_argc, 0444); +MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " + "1 for IB, 2 for Ethernet"); + +struct mlx4_port_config { + struct list_head list; + enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; + struct pci_dev *pdev; +}; + +static atomic_t pf_loading = ATOMIC_INIT(0); + +int mlx4_check_port_params(struct mlx4_dev *dev, + enum mlx4_port_type *port_type) +{ + int i; + + if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { + for (i = 0; i < dev->caps.num_ports - 1; i++) { + if (port_type[i] != port_type[i + 1]) { + mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); + return -EINVAL; + } + } + } + + for (i = 0; i < dev->caps.num_ports; i++) { + if (!(port_type[i] & dev->caps.supported_type[i+1])) { + mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", + i + 1); + return -EINVAL; + } + } + return 0; +} + +static void mlx4_set_port_mask(struct mlx4_dev *dev) +{ + int i; + + for (i = 1; i <= dev->caps.num_ports; ++i) + dev->caps.port_mask[i] = dev->caps.port_type[i]; +} + +enum { + MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0, +}; + +static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) +{ + int err = 0; + struct mlx4_func func; + + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { + err = mlx4_QUERY_FUNC(dev, &func, 0); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + return err; + } + dev_cap->max_eqs = func.max_eq; + dev_cap->reserved_eqs = func.rsvd_eqs; + dev_cap->reserved_uars = func.rsvd_uars; + err |= MLX4_QUERY_FUNC_NUM_SYS_EQS; + } + return err; +} + +static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) +{ + struct mlx4_caps *dev_cap = &dev->caps; + + /* FW not supporting or cancelled by user */ + if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) || + !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) + return; + + /* Must have 64B CQE_EQE enabled by FW to use bigger stride + * When FW has NCSI it may decide not to report 64B CQE/EQEs + */ + if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) || + !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) { + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; + return; + } + + if (cache_line_size() == 128 || cache_line_size() == 256) { + mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); + /* Changing the real data inside CQE size to 32B */ + dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; + dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; + + if (mlx4_is_master(dev)) + dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; + } else { + if (cache_line_size() != 32 && cache_line_size() != 64) + mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n"); + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; + } +} + +static int _mlx4_dev_port(struct mlx4_dev *dev, int port, + struct mlx4_port_cap *port_cap) +{ + dev->caps.vl_cap[port] = port_cap->max_vl; + dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu; + dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids; + dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys; + /* set gid and pkey table operating lengths by default + * to non-sriov values + */ + dev->caps.gid_table_len[port] = port_cap->max_gids; + dev->caps.pkey_table_len[port] = port_cap->max_pkeys; + dev->caps.port_width_cap[port] = port_cap->max_port_width; + dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; + dev->caps.def_mac[port] = port_cap->def_mac; + dev->caps.supported_type[port] = port_cap->supported_port_types; + dev->caps.suggested_type[port] = port_cap->suggested_type; + dev->caps.default_sense[port] = port_cap->default_sense; + dev->caps.trans_type[port] = port_cap->trans_type; + dev->caps.vendor_oui[port] = port_cap->vendor_oui; + dev->caps.wavelength[port] = port_cap->wavelength; + dev->caps.trans_code[port] = port_cap->trans_code; + + return 0; +} + +static int mlx4_dev_port(struct mlx4_dev *dev, int port, + struct mlx4_port_cap *port_cap) +{ + int err = 0; + + err = mlx4_QUERY_PORT(dev, port, port_cap); + + if (err) + mlx4_err(dev, "QUERY_PORT command failed.\n"); + + return err; +} + +static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev) +{ + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)) + return; + + if (mlx4_is_mfunc(dev)) { + mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS"); + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; + return; + } + + if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) { + mlx4_dbg(dev, + "Keep FCS is not supported - Disabling Ignore FCS"); + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; + return; + } +} + +#define MLX4_A0_STEERING_TABLE_SIZE 256 +static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) +{ + int err; + int i; + + err = mlx4_QUERY_DEV_CAP(dev, dev_cap); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); + return err; + } + mlx4_dev_cap_dump(dev, dev_cap); + + if (dev_cap->min_page_sz > PAGE_SIZE) { + mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", + dev_cap->min_page_sz, PAGE_SIZE); + return -ENODEV; + } + if (dev_cap->num_ports > MLX4_MAX_PORTS) { + mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", + dev_cap->num_ports, MLX4_MAX_PORTS); + return -ENODEV; + } + + if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) { + mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", + dev_cap->uar_size, + (unsigned long long) + pci_resource_len(dev->persist->pdev, 2)); + return -ENODEV; + } + + dev->caps.num_ports = dev_cap->num_ports; + dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; + dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? + dev->caps.num_sys_eqs : + MLX4_MAX_EQ_NUM; + for (i = 1; i <= dev->caps.num_ports; ++i) { + err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i); + if (err) { + mlx4_err(dev, "QUERY_PORT command failed, aborting\n"); + return err; + } + } + + dev->caps.uar_page_size = PAGE_SIZE; + dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; + dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; + dev->caps.bf_reg_size = dev_cap->bf_reg_size; + dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; + dev->caps.max_sq_sg = dev_cap->max_sq_sg; + dev->caps.max_rq_sg = dev_cap->max_rq_sg; + dev->caps.max_wqes = dev_cap->max_qp_sz; + dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; + dev->caps.max_srq_wqes = dev_cap->max_srq_sz; + dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; + dev->caps.reserved_srqs = dev_cap->reserved_srqs; + dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; + dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; + /* + * Subtract 1 from the limit because we need to allocate a + * spare CQE so the HCA HW can tell the difference between an + * empty CQ and a full CQ. + */ + dev->caps.max_cqes = dev_cap->max_cq_sz - 1; + dev->caps.reserved_cqs = dev_cap->reserved_cqs; + dev->caps.reserved_eqs = dev_cap->reserved_eqs; + dev->caps.reserved_mtts = dev_cap->reserved_mtts; + dev->caps.reserved_mrws = dev_cap->reserved_mrws; + + /* The first 128 UARs are used for EQ doorbells */ + dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars); + dev->caps.reserved_pds = dev_cap->reserved_pds; + dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? + dev_cap->reserved_xrcds : 0; + dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? + dev_cap->max_xrcds : 0; + dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; + + dev->caps.max_msg_sz = dev_cap->max_msg_sz; + dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); + dev->caps.flags = dev_cap->flags; + dev->caps.flags2 = dev_cap->flags2; + dev->caps.bmme_flags = dev_cap->bmme_flags; + dev->caps.reserved_lkey = dev_cap->reserved_lkey; + dev->caps.stat_rate_support = dev_cap->stat_rate_support; + dev->caps.max_gso_sz = dev_cap->max_gso_sz; + dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; + + /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ + if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) + dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; + /* Don't do sense port on multifunction devices (for now at least) */ + if (mlx4_is_mfunc(dev)) + dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; + + if (mlx4_low_memory_profile()) { + dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC; + dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS; + } else { + dev->caps.log_num_macs = log_num_mac; + dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; + } + + for (i = 1; i <= dev->caps.num_ports; ++i) { + dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; + if (dev->caps.supported_type[i]) { + /* if only ETH is supported - assign ETH */ + if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) + dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; + /* if only IB is supported, assign IB */ + else if (dev->caps.supported_type[i] == + MLX4_PORT_TYPE_IB) + dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; + else { + /* if IB and ETH are supported, we set the port + * type according to user selection of port type; + * if user selected none, take the FW hint */ + if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) + dev->caps.port_type[i] = dev->caps.suggested_type[i] ? + MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; + else + dev->caps.port_type[i] = port_type_array[i - 1]; + } + } + /* + * Link sensing is allowed on the port if 3 conditions are true: + * 1. Both protocols are supported on the port. + * 2. Different types are supported on the port + * 3. FW declared that it supports link sensing + */ + mlx4_priv(dev)->sense.sense_allowed[i] = + ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && + (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && + (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); + + /* + * If "default_sense" bit is set, we move the port to "AUTO" mode + * and perform sense_port FW command to try and set the correct + * port type from beginning + */ + if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { + enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; + dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; + mlx4_SENSE_PORT(dev, i, &sensed_port); + if (sensed_port != MLX4_PORT_TYPE_NONE) + dev->caps.port_type[i] = sensed_port; + } else { + dev->caps.possible_type[i] = dev->caps.port_type[i]; + } + + if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) { + dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs; + mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", + i, 1 << dev->caps.log_num_macs); + } + if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) { + dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans; + mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", + i, 1 << dev->caps.log_num_vlans); + } + } + + dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters); + + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = + (1 << dev->caps.log_num_macs) * + (1 << dev->caps.log_num_vlans) * + dev->caps.num_ports; + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; + + if (dev_cap->dmfs_high_rate_qpn_base > 0 && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) + dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base; + else + dev->caps.dmfs_high_rate_qpn_base = + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; + + if (dev_cap->dmfs_high_rate_qpn_range > 0 && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { + dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range; + dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT; + dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0; + } else { + dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED; + dev->caps.dmfs_high_rate_qpn_base = + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; + dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; + } + + dev->caps.rl_caps = dev_cap->rl_caps; + + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = + dev->caps.dmfs_high_rate_qpn_range; + + dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; + + dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; + + if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) { + if (dev_cap->flags & + (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { + mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); + dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; + dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; + } + + if (dev_cap->flags2 & + (MLX4_DEV_CAP_FLAG2_CQE_STRIDE | + MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) { + mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; + } + } + + if ((dev->caps.flags & + (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && + mlx4_is_master(dev)) + dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; + + if (!mlx4_is_slave(dev)) { + mlx4_enable_cqe_eqe_stride(dev); + dev->caps.alloc_res_qp_mask = + (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) | + MLX4_RESERVE_A0_QP; + + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) && + dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { + mlx4_warn(dev, "Old device ETS support detected\n"); + mlx4_warn(dev, "Consider upgrading device FW.\n"); + dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; + } + + } else { + dev->caps.alloc_res_qp_mask = 0; + } + + mlx4_enable_ignore_fcs(dev); + + return 0; +} + +static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + u32 lnkcap1, lnkcap2; + int err1, err2; + +#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */ + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP, + &lnkcap1); + err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2, + &lnkcap2); + if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) + *speed = PCIE_SPEED_8_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) + *speed = PCIE_SPEED_5_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) + *speed = PCIE_SPEED_2_5GT; + } + if (!err1) { + *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT; + if (!lnkcap2) { /* pre-r3.0 */ + if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB) + *speed = PCIE_SPEED_5_0GT; + else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB) + *speed = PCIE_SPEED_2_5GT; + } + } + + if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) { + return err1 ? err1 : + err2 ? err2 : -EINVAL; + } + return 0; +} + +static void mlx4_check_pcie_caps(struct mlx4_dev *dev) +{ + enum pcie_link_width width, width_cap; + enum pci_bus_speed speed, speed_cap; + int err; + +#define PCIE_SPEED_STR(speed) \ + (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \ + speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \ + speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \ + "Unknown") + + err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap); + if (err) { + mlx4_warn(dev, + "Unable to determine PCIe device BW capabilities\n"); + return; + } + + err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width); + if (err || speed == PCI_SPEED_UNKNOWN || + width == PCIE_LNK_WIDTH_UNKNOWN) { + mlx4_warn(dev, + "Unable to determine PCI device chain minimum BW\n"); + return; + } + + if (width != width_cap || speed != speed_cap) + mlx4_warn(dev, + "PCIe BW is different than device's capability\n"); + + mlx4_info(dev, "PCIe link speed is %s, device supports %s\n", + PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap)); + mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n", + width, width_cap); + return; +} + +/*The function checks if there are live vf, return the num of them*/ +static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *s_state; + int i; + int ret = 0; + + for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { + s_state = &priv->mfunc.master.slave_state[i]; + if (s_state->active && s_state->last_cmd != + MLX4_COMM_CMD_RESET) { + mlx4_warn(dev, "%s: slave: %d is still active\n", + __func__, i); + ret++; + } + } + return ret; +} + +int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) +{ + u32 qk = MLX4_RESERVED_QKEY_BASE; + + if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || + qpn < dev->phys_caps.base_proxy_sqpn) + return -EINVAL; + + if (qpn >= dev->phys_caps.base_tunnel_sqpn) + /* tunnel qp */ + qk += qpn - dev->phys_caps.base_tunnel_sqpn; + else + qk += qpn - dev->phys_caps.base_proxy_sqpn; + *qkey = qk; + return 0; +} +EXPORT_SYMBOL(mlx4_get_parav_qkey); + +void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val) +{ + struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); + + if (!mlx4_is_master(dev)) + return; + + priv->virt2phys_pkey[slave][port - 1][i] = val; +} +EXPORT_SYMBOL(mlx4_sync_pkey_table); + +void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid) +{ + struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); + + if (!mlx4_is_master(dev)) + return; + + priv->slave_node_guids[slave] = guid; +} +EXPORT_SYMBOL(mlx4_put_slave_node_guid); + +__be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); + + if (!mlx4_is_master(dev)) + return 0; + + return priv->slave_node_guids[slave]; +} +EXPORT_SYMBOL(mlx4_get_slave_node_guid); + +int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *s_slave; + + if (!mlx4_is_master(dev)) + return 0; + + s_slave = &priv->mfunc.master.slave_state[slave]; + return !!s_slave->active; +} +EXPORT_SYMBOL(mlx4_is_slave_active); + +static void slave_adjust_steering_mode(struct mlx4_dev *dev, + struct mlx4_dev_cap *dev_cap, + struct mlx4_init_hca_param *hca_param) +{ + dev->caps.steering_mode = hca_param->steering_mode; + if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { + dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; + dev->caps.fs_log_max_ucast_qp_range_size = + dev_cap->fs_log_max_ucast_qp_range_size; + } else + dev->caps.num_qp_per_mgm = + 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2); + + mlx4_dbg(dev, "Steering mode is: %s\n", + mlx4_steering_mode_str(dev->caps.steering_mode)); +} + +static int mlx4_slave_cap(struct mlx4_dev *dev) +{ + int err; + u32 page_size; + struct mlx4_dev_cap dev_cap; + struct mlx4_func_cap func_cap; + struct mlx4_init_hca_param hca_param; + u8 i; + + memset(&hca_param, 0, sizeof(hca_param)); + err = mlx4_QUERY_HCA(dev, &hca_param); + if (err) { + mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); + return err; + } + + /* fail if the hca has an unknown global capability + * at this time global_caps should be always zeroed + */ + if (hca_param.global_caps) { + mlx4_err(dev, "Unknown hca global capabilities\n"); + return -ENOSYS; + } + + mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; + + dev->caps.hca_core_clock = hca_param.hca_core_clock; + + memset(&dev_cap, 0, sizeof(dev_cap)); + dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; + err = mlx4_dev_cap(dev, &dev_cap); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); + return err; + } + + err = mlx4_QUERY_FW(dev); + if (err) + mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n"); + + page_size = ~dev->caps.page_size_cap + 1; + mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); + if (page_size > PAGE_SIZE) { + mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", + page_size, PAGE_SIZE); + return -ENODEV; + } + + /* slave gets uar page size from QUERY_HCA fw command */ + dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); + + /* TODO: relax this assumption */ + if (dev->caps.uar_page_size != PAGE_SIZE) { + mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", + dev->caps.uar_page_size, PAGE_SIZE); + return -ENODEV; + } + + memset(&func_cap, 0, sizeof(func_cap)); + err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); + if (err) { + mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", + err); + return err; + } + + if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != + PF_CONTEXT_BEHAVIOUR_MASK) { + mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", + func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK); + return -ENOSYS; + } + + dev->caps.num_ports = func_cap.num_ports; + dev->quotas.qp = func_cap.qp_quota; + dev->quotas.srq = func_cap.srq_quota; + dev->quotas.cq = func_cap.cq_quota; + dev->quotas.mpt = func_cap.mpt_quota; + dev->quotas.mtt = func_cap.mtt_quota; + dev->caps.num_qps = 1 << hca_param.log_num_qps; + dev->caps.num_srqs = 1 << hca_param.log_num_srqs; + dev->caps.num_cqs = 1 << hca_param.log_num_cqs; + dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; + dev->caps.num_eqs = func_cap.max_eq; + dev->caps.reserved_eqs = func_cap.reserved_eq; + dev->caps.reserved_lkey = func_cap.reserved_lkey; + dev->caps.num_pds = MLX4_NUM_PDS; + dev->caps.num_mgms = 0; + dev->caps.num_amgms = 0; + + if (dev->caps.num_ports > MLX4_MAX_PORTS) { + mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", + dev->caps.num_ports, MLX4_MAX_PORTS); + return -ENODEV; + } + + dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); + dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); + dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); + dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); + dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); + + if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || + !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy || + !dev->caps.qp0_qkey) { + err = -ENOMEM; + goto err_mem; + } + + for (i = 1; i <= dev->caps.num_ports; ++i) { + err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap); + if (err) { + mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", + i, err); + goto err_mem; + } + dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey; + dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; + dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn; + dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; + dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; + dev->caps.port_mask[i] = dev->caps.port_type[i]; + dev->caps.phys_port_id[i] = func_cap.phys_port_id; + if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, + &dev->caps.gid_table_len[i], + &dev->caps.pkey_table_len[i])) + goto err_mem; + } + + if (dev->caps.uar_page_size * (dev->caps.num_uars - + dev->caps.reserved_uars) > + pci_resource_len(dev->persist->pdev, + 2)) { + mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", + dev->caps.uar_page_size * dev->caps.num_uars, + (unsigned long long) + pci_resource_len(dev->persist->pdev, 2)); + goto err_mem; + } + + if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) { + dev->caps.eqe_size = 64; + dev->caps.eqe_factor = 1; + } else { + dev->caps.eqe_size = 32; + dev->caps.eqe_factor = 0; + } + + if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { + dev->caps.cqe_size = 64; + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; + } else { + dev->caps.cqe_size = 32; + } + + if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { + dev->caps.eqe_size = hca_param.eqe_size; + dev->caps.eqe_factor = 0; + } + + if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { + dev->caps.cqe_size = hca_param.cqe_size; + /* User still need to know when CQE > 32B */ + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; + } + + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; + mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); + + slave_adjust_steering_mode(dev, &dev_cap, &hca_param); + mlx4_dbg(dev, "RSS support for IP fragments is %s\n", + hca_param.rss_ip_frags ? "on" : "off"); + + if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && + dev->caps.bf_reg_size) + dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP; + + if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP) + dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP; + + return 0; + +err_mem: + kfree(dev->caps.qp0_qkey); + kfree(dev->caps.qp0_tunnel); + kfree(dev->caps.qp0_proxy); + kfree(dev->caps.qp1_tunnel); + kfree(dev->caps.qp1_proxy); + dev->caps.qp0_qkey = NULL; + dev->caps.qp0_tunnel = NULL; + dev->caps.qp0_proxy = NULL; + dev->caps.qp1_tunnel = NULL; + dev->caps.qp1_proxy = NULL; + + return err; +} + +static void mlx4_request_modules(struct mlx4_dev *dev) +{ + int port; + int has_ib_port = false; + int has_eth_port = false; +#define EN_DRV_NAME "mlx4_en" +#define IB_DRV_NAME "mlx4_ib" + + for (port = 1; port <= dev->caps.num_ports; port++) { + if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) + has_ib_port = true; + else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) + has_eth_port = true; + } + + if (has_eth_port) + request_module_nowait(EN_DRV_NAME); + if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) + request_module_nowait(IB_DRV_NAME); +} + +/* + * Change the port configuration of the device. + * Every user of this function must hold the port mutex. + */ +int mlx4_change_port_types(struct mlx4_dev *dev, + enum mlx4_port_type *port_types) +{ + int err = 0; + int change = 0; + int port; + + for (port = 0; port < dev->caps.num_ports; port++) { + /* Change the port type only if the new type is different + * from the current, and not set to Auto */ + if (port_types[port] != dev->caps.port_type[port + 1]) + change = 1; + } + if (change) { + mlx4_unregister_device(dev); + for (port = 1; port <= dev->caps.num_ports; port++) { + mlx4_CLOSE_PORT(dev, port); + dev->caps.port_type[port] = port_types[port - 1]; + err = mlx4_SET_PORT(dev, port, -1); + if (err) { + mlx4_err(dev, "Failed to set port %d, aborting\n", + port); + goto out; + } + } + mlx4_set_port_mask(dev); + err = mlx4_register_device(dev); + if (err) { + mlx4_err(dev, "Failed to register device\n"); + goto out; + } + mlx4_request_modules(dev); + } + +out: + return err; +} + +static ssize_t show_port_type(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, + port_attr); + struct mlx4_dev *mdev = info->dev; + char type[8]; + + sprintf(type, "%s", + (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? + "ib" : "eth"); + if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) + sprintf(buf, "auto (%s)\n", type); + else + sprintf(buf, "%s\n", type); + + return strlen(buf); +} + +static ssize_t set_port_type(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, + port_attr); + struct mlx4_dev *mdev = info->dev; + struct mlx4_priv *priv = mlx4_priv(mdev); + enum mlx4_port_type types[MLX4_MAX_PORTS]; + enum mlx4_port_type new_types[MLX4_MAX_PORTS]; + static DEFINE_MUTEX(set_port_type_mutex); + int i; + int err = 0; + + mutex_lock(&set_port_type_mutex); + + if (!strcmp(buf, "ib\n")) + info->tmp_type = MLX4_PORT_TYPE_IB; + else if (!strcmp(buf, "eth\n")) + info->tmp_type = MLX4_PORT_TYPE_ETH; + else if (!strcmp(buf, "auto\n")) + info->tmp_type = MLX4_PORT_TYPE_AUTO; + else { + mlx4_err(mdev, "%s is not supported port type\n", buf); + err = -EINVAL; + goto err_out; + } + + mlx4_stop_sense(mdev); + mutex_lock(&priv->port_mutex); + /* Possible type is always the one that was delivered */ + mdev->caps.possible_type[info->port] = info->tmp_type; + + for (i = 0; i < mdev->caps.num_ports; i++) { + types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : + mdev->caps.possible_type[i+1]; + if (types[i] == MLX4_PORT_TYPE_AUTO) + types[i] = mdev->caps.port_type[i+1]; + } + + if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && + !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { + for (i = 1; i <= mdev->caps.num_ports; i++) { + if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { + mdev->caps.possible_type[i] = mdev->caps.port_type[i]; + err = -EINVAL; + } + } + } + if (err) { + mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n"); + goto out; + } + + mlx4_do_sense_ports(mdev, new_types, types); + + err = mlx4_check_port_params(mdev, new_types); + if (err) + goto out; + + /* We are about to apply the changes after the configuration + * was verified, no need to remember the temporary types + * any more */ + for (i = 0; i < mdev->caps.num_ports; i++) + priv->port[i + 1].tmp_type = 0; + + err = mlx4_change_port_types(mdev, new_types); + +out: + mlx4_start_sense(mdev); + mutex_unlock(&priv->port_mutex); +err_out: + mutex_unlock(&set_port_type_mutex); + + return err ? err : count; +} + +enum ibta_mtu { + IB_MTU_256 = 1, + IB_MTU_512 = 2, + IB_MTU_1024 = 3, + IB_MTU_2048 = 4, + IB_MTU_4096 = 5 +}; + +static inline int int_to_ibta_mtu(int mtu) +{ + switch (mtu) { + case 256: return IB_MTU_256; + case 512: return IB_MTU_512; + case 1024: return IB_MTU_1024; + case 2048: return IB_MTU_2048; + case 4096: return IB_MTU_4096; + default: return -1; + } +} + +static inline int ibta_mtu_to_int(enum ibta_mtu mtu) +{ + switch (mtu) { + case IB_MTU_256: return 256; + case IB_MTU_512: return 512; + case IB_MTU_1024: return 1024; + case IB_MTU_2048: return 2048; + case IB_MTU_4096: return 4096; + default: return -1; + } +} + +static ssize_t show_port_ib_mtu(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, + port_mtu_attr); + struct mlx4_dev *mdev = info->dev; + + if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) + mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); + + sprintf(buf, "%d\n", + ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); + return strlen(buf); +} + +static ssize_t set_port_ib_mtu(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, + port_mtu_attr); + struct mlx4_dev *mdev = info->dev; + struct mlx4_priv *priv = mlx4_priv(mdev); + int err, port, mtu, ibta_mtu = -1; + + if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { + mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); + return -EINVAL; + } + + err = kstrtoint(buf, 0, &mtu); + if (!err) + ibta_mtu = int_to_ibta_mtu(mtu); + + if (err || ibta_mtu < 0) { + mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); + return -EINVAL; + } + + mdev->caps.port_ib_mtu[info->port] = ibta_mtu; + + mlx4_stop_sense(mdev); + mutex_lock(&priv->port_mutex); + mlx4_unregister_device(mdev); + for (port = 1; port <= mdev->caps.num_ports; port++) { + mlx4_CLOSE_PORT(mdev, port); + err = mlx4_SET_PORT(mdev, port, -1); + if (err) { + mlx4_err(mdev, "Failed to set port %d, aborting\n", + port); + goto err_set_port; + } + } + err = mlx4_register_device(mdev); +err_set_port: + mutex_unlock(&priv->port_mutex); + mlx4_start_sense(mdev); + return err ? err : count; +} + +int mlx4_bond(struct mlx4_dev *dev) +{ + int ret = 0; + struct mlx4_priv *priv = mlx4_priv(dev); + + mutex_lock(&priv->bond_mutex); + + if (!mlx4_is_bonded(dev)) + ret = mlx4_do_bond(dev, true); + else + ret = 0; + + mutex_unlock(&priv->bond_mutex); + if (ret) + mlx4_err(dev, "Failed to bond device: %d\n", ret); + else + mlx4_dbg(dev, "Device is bonded\n"); + return ret; +} +EXPORT_SYMBOL_GPL(mlx4_bond); + +int mlx4_unbond(struct mlx4_dev *dev) +{ + int ret = 0; + struct mlx4_priv *priv = mlx4_priv(dev); + + mutex_lock(&priv->bond_mutex); + + if (mlx4_is_bonded(dev)) + ret = mlx4_do_bond(dev, false); + + mutex_unlock(&priv->bond_mutex); + if (ret) + mlx4_err(dev, "Failed to unbond device: %d\n", ret); + else + mlx4_dbg(dev, "Device is unbonded\n"); + return ret; +} +EXPORT_SYMBOL_GPL(mlx4_unbond); + + +int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) +{ + u8 port1 = v2p->port1; + u8 port2 = v2p->port2; + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) + return -ENOTSUPP; + + mutex_lock(&priv->bond_mutex); + + /* zero means keep current mapping for this port */ + if (port1 == 0) + port1 = priv->v2p.port1; + if (port2 == 0) + port2 = priv->v2p.port2; + + if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) || + (port2 < 1) || (port2 > MLX4_MAX_PORTS) || + (port1 == 2 && port2 == 1)) { + /* besides boundary checks cross mapping makes + * no sense and therefore not allowed */ + err = -EINVAL; + } else if ((port1 == priv->v2p.port1) && + (port2 == priv->v2p.port2)) { + err = 0; + } else { + err = mlx4_virt2phy_port_map(dev, port1, port2); + if (!err) { + mlx4_dbg(dev, "port map changed: [%d][%d]\n", + port1, port2); + priv->v2p.port1 = port1; + priv->v2p.port2 = port2; + } else { + mlx4_err(dev, "Failed to change port mape: %d\n", err); + } + } + + mutex_unlock(&priv->bond_mutex); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_port_map_set); + +static int mlx4_load_fw(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + + priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, + GFP_HIGHUSER | __GFP_NOWARN, 0); + if (!priv->fw.fw_icm) { + mlx4_err(dev, "Couldn't allocate FW area, aborting\n"); + return -ENOMEM; + } + + err = mlx4_MAP_FA(dev, priv->fw.fw_icm); + if (err) { + mlx4_err(dev, "MAP_FA command failed, aborting\n"); + goto err_free; + } + + err = mlx4_RUN_FW(dev); + if (err) { + mlx4_err(dev, "RUN_FW command failed, aborting\n"); + goto err_unmap_fa; + } + + return 0; + +err_unmap_fa: + mlx4_UNMAP_FA(dev); + +err_free: + mlx4_free_icm(dev, priv->fw.fw_icm, 0); + return err; +} + +static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, + int cmpt_entry_sz) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + int num_eqs; + + err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, + cmpt_base + + ((u64) (MLX4_CMPT_TYPE_QP * + cmpt_entry_sz) << MLX4_CMPT_SHIFT), + cmpt_entry_sz, dev->caps.num_qps, + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], + 0, 0); + if (err) + goto err; + + err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, + cmpt_base + + ((u64) (MLX4_CMPT_TYPE_SRQ * + cmpt_entry_sz) << MLX4_CMPT_SHIFT), + cmpt_entry_sz, dev->caps.num_srqs, + dev->caps.reserved_srqs, 0, 0); + if (err) + goto err_qp; + + err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, + cmpt_base + + ((u64) (MLX4_CMPT_TYPE_CQ * + cmpt_entry_sz) << MLX4_CMPT_SHIFT), + cmpt_entry_sz, dev->caps.num_cqs, + dev->caps.reserved_cqs, 0, 0); + if (err) + goto err_srq; + + num_eqs = dev->phys_caps.num_phys_eqs; + err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, + cmpt_base + + ((u64) (MLX4_CMPT_TYPE_EQ * + cmpt_entry_sz) << MLX4_CMPT_SHIFT), + cmpt_entry_sz, num_eqs, num_eqs, 0, 0); + if (err) + goto err_cq; + + return 0; + +err_cq: + mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); + +err_srq: + mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); + +err_qp: + mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); + +err: + return err; +} + +static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, + struct mlx4_init_hca_param *init_hca, u64 icm_size) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + u64 aux_pages; + int num_eqs; + int err; + + err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); + if (err) { + mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n"); + return err; + } + + mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n", + (unsigned long long) icm_size >> 10, + (unsigned long long) aux_pages << 2); + + priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, + GFP_HIGHUSER | __GFP_NOWARN, 0); + if (!priv->fw.aux_icm) { + mlx4_err(dev, "Couldn't allocate aux memory, aborting\n"); + return -ENOMEM; + } + + err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); + if (err) { + mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n"); + goto err_free_aux; + } + + err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); + if (err) { + mlx4_err(dev, "Failed to map cMPT context memory, aborting\n"); + goto err_unmap_aux; + } + + + num_eqs = dev->phys_caps.num_phys_eqs; + err = mlx4_init_icm_table(dev, &priv->eq_table.table, + init_hca->eqc_base, dev_cap->eqc_entry_sz, + num_eqs, num_eqs, 0, 0); + if (err) { + mlx4_err(dev, "Failed to map EQ context memory, aborting\n"); + goto err_unmap_cmpt; + } + + /* + * Reserved MTT entries must be aligned up to a cacheline + * boundary, since the FW will write to them, while the driver + * writes to all other MTT entries. (The variable + * dev->caps.mtt_entry_sz below is really the MTT segment + * size, not the raw entry size) + */ + dev->caps.reserved_mtts = + ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, + dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; + + err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, + init_hca->mtt_base, + dev->caps.mtt_entry_sz, + dev->caps.num_mtts, + dev->caps.reserved_mtts, 1, 0); + if (err) { + mlx4_err(dev, "Failed to map MTT context memory, aborting\n"); + goto err_unmap_eq; + } + + err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, + init_hca->dmpt_base, + dev_cap->dmpt_entry_sz, + dev->caps.num_mpts, + dev->caps.reserved_mrws, 1, 1); + if (err) { + mlx4_err(dev, "Failed to map dMPT context memory, aborting\n"); + goto err_unmap_mtt; + } + + err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, + init_hca->qpc_base, + dev_cap->qpc_entry_sz, + dev->caps.num_qps, + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], + 0, 0); + if (err) { + mlx4_err(dev, "Failed to map QP context memory, aborting\n"); + goto err_unmap_dmpt; + } + + err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, + init_hca->auxc_base, + dev_cap->aux_entry_sz, + dev->caps.num_qps, + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], + 0, 0); + if (err) { + mlx4_err(dev, "Failed to map AUXC context memory, aborting\n"); + goto err_unmap_qp; + } + + err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, + init_hca->altc_base, + dev_cap->altc_entry_sz, + dev->caps.num_qps, + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], + 0, 0); + if (err) { + mlx4_err(dev, "Failed to map ALTC context memory, aborting\n"); + goto err_unmap_auxc; + } + + err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, + init_hca->rdmarc_base, + dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, + dev->caps.num_qps, + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], + 0, 0); + if (err) { + mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); + goto err_unmap_altc; + } + + err = mlx4_init_icm_table(dev, &priv->cq_table.table, + init_hca->cqc_base, + dev_cap->cqc_entry_sz, + dev->caps.num_cqs, + dev->caps.reserved_cqs, 0, 0); + if (err) { + mlx4_err(dev, "Failed to map CQ context memory, aborting\n"); + goto err_unmap_rdmarc; + } + + err = mlx4_init_icm_table(dev, &priv->srq_table.table, + init_hca->srqc_base, + dev_cap->srq_entry_sz, + dev->caps.num_srqs, + dev->caps.reserved_srqs, 0, 0); + if (err) { + mlx4_err(dev, "Failed to map SRQ context memory, aborting\n"); + goto err_unmap_cq; + } + + /* + * For flow steering device managed mode it is required to use + * mlx4_init_icm_table. For B0 steering mode it's not strictly + * required, but for simplicity just map the whole multicast + * group table now. The table isn't very big and it's a lot + * easier than trying to track ref counts. + */ + err = mlx4_init_icm_table(dev, &priv->mcg_table.table, + init_hca->mc_base, + mlx4_get_mgm_entry_size(dev), + dev->caps.num_mgms + dev->caps.num_amgms, + dev->caps.num_mgms + dev->caps.num_amgms, + 0, 0); + if (err) { + mlx4_err(dev, "Failed to map MCG context memory, aborting\n"); + goto err_unmap_srq; + } + + return 0; + +err_unmap_srq: + mlx4_cleanup_icm_table(dev, &priv->srq_table.table); + +err_unmap_cq: + mlx4_cleanup_icm_table(dev, &priv->cq_table.table); + +err_unmap_rdmarc: + mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); + +err_unmap_altc: + mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); + +err_unmap_auxc: + mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); + +err_unmap_qp: + mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); + +err_unmap_dmpt: + mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); + +err_unmap_mtt: + mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); + +err_unmap_eq: + mlx4_cleanup_icm_table(dev, &priv->eq_table.table); + +err_unmap_cmpt: + mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); + +err_unmap_aux: + mlx4_UNMAP_ICM_AUX(dev); + +err_free_aux: + mlx4_free_icm(dev, priv->fw.aux_icm, 0); + + return err; +} + +static void mlx4_free_icms(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); + mlx4_cleanup_icm_table(dev, &priv->srq_table.table); + mlx4_cleanup_icm_table(dev, &priv->cq_table.table); + mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); + mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); + mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); + mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); + mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); + mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); + mlx4_cleanup_icm_table(dev, &priv->eq_table.table); + mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); + mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); + + mlx4_UNMAP_ICM_AUX(dev); + mlx4_free_icm(dev, priv->fw.aux_icm, 0); +} + +static void mlx4_slave_exit(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + mutex_lock(&priv->cmd.slave_cmd_mutex); + if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, + MLX4_COMM_TIME)) + mlx4_warn(dev, "Failed to close slave function\n"); + mutex_unlock(&priv->cmd.slave_cmd_mutex); +} + +static int map_bf_area(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + resource_size_t bf_start; + resource_size_t bf_len; + int err = 0; + + if (!dev->caps.bf_reg_size) + return -ENXIO; + + bf_start = pci_resource_start(dev->persist->pdev, 2) + + (dev->caps.num_uars << PAGE_SHIFT); + bf_len = pci_resource_len(dev->persist->pdev, 2) - + (dev->caps.num_uars << PAGE_SHIFT); + priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); + if (!priv->bf_mapping) + err = -ENOMEM; + + return err; +} + +static void unmap_bf_area(struct mlx4_dev *dev) +{ + if (mlx4_priv(dev)->bf_mapping) + io_mapping_free(mlx4_priv(dev)->bf_mapping); +} + +cycle_t mlx4_read_clock(struct mlx4_dev *dev) +{ + u32 clockhi, clocklo, clockhi1; + cycle_t cycles; + int i; + struct mlx4_priv *priv = mlx4_priv(dev); + + for (i = 0; i < 10; i++) { + clockhi = swab32(readl(priv->clock_mapping)); + clocklo = swab32(readl(priv->clock_mapping + 4)); + clockhi1 = swab32(readl(priv->clock_mapping)); + if (clockhi == clockhi1) + break; + } + + cycles = (u64) clockhi << 32 | (u64) clocklo; + + return cycles; +} +EXPORT_SYMBOL_GPL(mlx4_read_clock); + + +static int map_internal_clock(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + priv->clock_mapping = + ioremap(pci_resource_start(dev->persist->pdev, + priv->fw.clock_bar) + + priv->fw.clock_offset, MLX4_CLOCK_SIZE); + + if (!priv->clock_mapping) + return -ENOMEM; + + return 0; +} + +static void unmap_internal_clock(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (priv->clock_mapping) + iounmap(priv->clock_mapping); +} + +static void mlx4_close_hca(struct mlx4_dev *dev) +{ + unmap_internal_clock(dev); + unmap_bf_area(dev); + if (mlx4_is_slave(dev)) + mlx4_slave_exit(dev); + else { + mlx4_CLOSE_HCA(dev, 0); + mlx4_free_icms(dev); + } +} + +static void mlx4_close_fw(struct mlx4_dev *dev) +{ + if (!mlx4_is_slave(dev)) { + mlx4_UNMAP_FA(dev); + mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); + } +} + +static int mlx4_comm_check_offline(struct mlx4_dev *dev) +{ +#define COMM_CHAN_OFFLINE_OFFSET 0x09 + + u32 comm_flags; + u32 offline_bit; + unsigned long end; + struct mlx4_priv *priv = mlx4_priv(dev); + + end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies; + while (time_before(jiffies, end)) { + comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + + MLX4_COMM_CHAN_FLAGS)); + offline_bit = (comm_flags & + (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); + if (!offline_bit) + return 0; + /* There are cases as part of AER/Reset flow that PF needs + * around 100 msec to load. We therefore sleep for 100 msec + * to allow other tasks to make use of that CPU during this + * time interval. + */ + msleep(100); + } + mlx4_err(dev, "Communication channel is offline.\n"); + return -EIO; +} + +static void mlx4_reset_vf_support(struct mlx4_dev *dev) +{ +#define COMM_CHAN_RST_OFFSET 0x1e + + struct mlx4_priv *priv = mlx4_priv(dev); + u32 comm_rst; + u32 comm_caps; + + comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm + + MLX4_COMM_CHAN_CAPS)); + comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET)); + + if (comm_rst) + dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET; +} + +static int mlx4_init_slave(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + u64 dma = (u64) priv->mfunc.vhcr_dma; + int ret_from_reset = 0; + u32 slave_read; + u32 cmd_channel_ver; + + if (atomic_read(&pf_loading)) { + mlx4_warn(dev, "PF is not ready - Deferring probe\n"); + return -EPROBE_DEFER; + } + + mutex_lock(&priv->cmd.slave_cmd_mutex); + priv->cmd.max_cmds = 1; + if (mlx4_comm_check_offline(dev)) { + mlx4_err(dev, "PF is not responsive, skipping initialization\n"); + goto err_offline; + } + + mlx4_reset_vf_support(dev); + mlx4_warn(dev, "Sending reset\n"); + ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, + MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME); + /* if we are in the middle of flr the slave will try + * NUM_OF_RESET_RETRIES times before leaving.*/ + if (ret_from_reset) { + if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { + mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n"); + mutex_unlock(&priv->cmd.slave_cmd_mutex); + return -EPROBE_DEFER; + } else + goto err; + } + + /* check the driver version - the slave I/F revision + * must match the master's */ + slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); + cmd_channel_ver = mlx4_comm_get_version(); + + if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != + MLX4_COMM_GET_IF_REV(slave_read)) { + mlx4_err(dev, "slave driver version is not supported by the master\n"); + goto err; + } + + mlx4_warn(dev, "Sending vhcr0\n"); + if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, + MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) + goto err; + if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, + MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) + goto err; + if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, + MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) + goto err; + if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, + MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) + goto err; + + mutex_unlock(&priv->cmd.slave_cmd_mutex); + return 0; + +err: + mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0); +err_offline: + mutex_unlock(&priv->cmd.slave_cmd_mutex); + return -EIO; +} + +static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) +{ + int i; + + for (i = 1; i <= dev->caps.num_ports; i++) { + if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) + dev->caps.gid_table_len[i] = + mlx4_get_slave_num_gids(dev, 0, i); + else + dev->caps.gid_table_len[i] = 1; + dev->caps.pkey_table_len[i] = + dev->phys_caps.pkey_phys_table_len[i] - 1; + } +} + +static int choose_log_fs_mgm_entry_size(int qp_per_entry) +{ + int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; + + for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE; + i++) { + if (qp_per_entry <= 4 * ((1 << i) / 16 - 2)) + break; + } + + return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; +} + +static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode) +{ + switch (dmfs_high_steer_mode) { + case MLX4_STEERING_DMFS_A0_DEFAULT: + return "default performance"; + + case MLX4_STEERING_DMFS_A0_DYNAMIC: + return "dynamic hybrid mode"; + + case MLX4_STEERING_DMFS_A0_STATIC: + return "performance optimized for limited rule configuration (static)"; + + case MLX4_STEERING_DMFS_A0_DISABLE: + return "disabled performance optimized steering"; + + case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED: + return "performance optimized steering not supported"; + + default: + return "Unrecognized mode"; + } +} + +#define MLX4_DMFS_A0_STEERING (1UL << 2) + +static void choose_steering_mode(struct mlx4_dev *dev, + struct mlx4_dev_cap *dev_cap) +{ + if (mlx4_log_num_mgm_entry_size <= 0) { + if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) { + if (dev->caps.dmfs_high_steer_mode == + MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) + mlx4_err(dev, "DMFS high rate mode not supported\n"); + else + dev->caps.dmfs_high_steer_mode = + MLX4_STEERING_DMFS_A0_STATIC; + } + } + + if (mlx4_log_num_mgm_entry_size <= 0 && + dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && + (!mlx4_is_mfunc(dev) || + (dev_cap->fs_max_num_qp_per_entry >= + (dev->persist->num_vfs + 1))) && + choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= + MLX4_MIN_MGM_LOG_ENTRY_SIZE) { + dev->oper_log_mgm_entry_size = + choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry); + dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; + dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; + dev->caps.fs_log_max_ucast_qp_range_size = + dev_cap->fs_log_max_ucast_qp_range_size; + } else { + if (dev->caps.dmfs_high_steer_mode != + MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) + dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE; + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && + dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) + dev->caps.steering_mode = MLX4_STEERING_MODE_B0; + else { + dev->caps.steering_mode = MLX4_STEERING_MODE_A0; + + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || + dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) + mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n"); + } + dev->oper_log_mgm_entry_size = + mlx4_log_num_mgm_entry_size > 0 ? + mlx4_log_num_mgm_entry_size : + MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; + dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); + } + mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n", + mlx4_steering_mode_str(dev->caps.steering_mode), + dev->oper_log_mgm_entry_size, + mlx4_log_num_mgm_entry_size); +} + +static void choose_tunnel_offload_mode(struct mlx4_dev *dev, + struct mlx4_dev_cap *dev_cap) +{ + if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && + dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) + dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; + else + dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; + + mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode + == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); +} + +static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) +{ + int i; + struct mlx4_port_cap port_cap; + + if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) + return -EINVAL; + + for (i = 1; i <= dev->caps.num_ports; i++) { + if (mlx4_dev_port(dev, i, &port_cap)) { + mlx4_err(dev, + "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); + } else if ((dev->caps.dmfs_high_steer_mode != + MLX4_STEERING_DMFS_A0_DEFAULT) && + (port_cap.dmfs_optimized_state == + !!(dev->caps.dmfs_high_steer_mode == + MLX4_STEERING_DMFS_A0_DISABLE))) { + mlx4_err(dev, + "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n", + dmfs_high_rate_steering_mode_str( + dev->caps.dmfs_high_steer_mode), + (port_cap.dmfs_optimized_state ? + "enabled" : "disabled")); + } + } + + return 0; +} + +static int mlx4_init_fw(struct mlx4_dev *dev) +{ + struct mlx4_mod_stat_cfg mlx4_cfg; + int err = 0; + + if (!mlx4_is_slave(dev)) { + err = mlx4_QUERY_FW(dev); + if (err) { + if (err == -EACCES) + mlx4_info(dev, "non-primary physical function, skipping\n"); + else + mlx4_err(dev, "QUERY_FW command failed, aborting\n"); + return err; + } + + err = mlx4_load_fw(dev); + if (err) { + mlx4_err(dev, "Failed to start FW, aborting\n"); + return err; + } + + mlx4_cfg.log_pg_sz_m = 1; + mlx4_cfg.log_pg_sz = 0; + err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); + if (err) + mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); + } + + return err; +} + +static int mlx4_init_hca(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_adapter adapter; + struct mlx4_dev_cap dev_cap; + struct mlx4_profile profile; + struct mlx4_init_hca_param init_hca; + u64 icm_size; + struct mlx4_config_dev_params params; + int err; + + if (!mlx4_is_slave(dev)) { + err = mlx4_dev_cap(dev, &dev_cap); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); + return err; + } + + choose_steering_mode(dev, &dev_cap); + choose_tunnel_offload_mode(dev, &dev_cap); + + if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && + mlx4_is_master(dev)) + dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC; + + err = mlx4_get_phys_port_id(dev); + if (err) + mlx4_err(dev, "Fail to get physical port id\n"); + + if (mlx4_is_master(dev)) + mlx4_parav_master_pf_caps(dev); + + if (mlx4_low_memory_profile()) { + mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n"); + profile = low_mem_profile; + } else { + profile = default_profile; + } + if (dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) + profile.num_mcg = MLX4_FS_NUM_MCG; + + icm_size = mlx4_make_profile(dev, &profile, &dev_cap, + &init_hca); + if ((long long) icm_size < 0) { + err = icm_size; + return err; + } + + dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; + + init_hca.log_uar_sz = ilog2(dev->caps.num_uars); + init_hca.uar_page_sz = PAGE_SHIFT - 12; + init_hca.mw_enabled = 0; + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || + dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) + init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE; + + err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); + if (err) + return err; + + err = mlx4_INIT_HCA(dev, &init_hca); + if (err) { + mlx4_err(dev, "INIT_HCA command failed, aborting\n"); + goto err_free_icm; + } + + if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { + err = mlx4_query_func(dev, &dev_cap); + if (err < 0) { + mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); + goto err_close; + } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { + dev->caps.num_eqs = dev_cap.max_eqs; + dev->caps.reserved_eqs = dev_cap.reserved_eqs; + dev->caps.reserved_uars = dev_cap.reserved_uars; + } + } + + /* + * If TS is supported by FW + * read HCA frequency by QUERY_HCA command + */ + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { + memset(&init_hca, 0, sizeof(init_hca)); + err = mlx4_QUERY_HCA(dev, &init_hca); + if (err) { + mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; + } else { + dev->caps.hca_core_clock = + init_hca.hca_core_clock; + } + + /* In case we got HCA frequency 0 - disable timestamping + * to avoid dividing by zero + */ + if (!dev->caps.hca_core_clock) { + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; + mlx4_err(dev, + "HCA frequency is 0 - timestamping is not supported\n"); + } else if (map_internal_clock(dev)) { + /* + * Map internal clock, + * in case of failure disable timestamping + */ + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; + mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); + } + } + + if (dev->caps.dmfs_high_steer_mode != + MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) { + if (mlx4_validate_optimized_steering(dev)) + mlx4_warn(dev, "Optimized steering validation failed\n"); + + if (dev->caps.dmfs_high_steer_mode == + MLX4_STEERING_DMFS_A0_DISABLE) { + dev->caps.dmfs_high_rate_qpn_base = + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; + dev->caps.dmfs_high_rate_qpn_range = + MLX4_A0_STEERING_TABLE_SIZE; + } + + mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n", + dmfs_high_rate_steering_mode_str( + dev->caps.dmfs_high_steer_mode)); + } + } else { + err = mlx4_init_slave(dev); + if (err) { + if (err != -EPROBE_DEFER) + mlx4_err(dev, "Failed to initialize slave\n"); + return err; + } + + err = mlx4_slave_cap(dev); + if (err) { + mlx4_err(dev, "Failed to obtain slave caps\n"); + goto err_close; + } + } + + if (map_bf_area(dev)) + mlx4_dbg(dev, "Failed to map blue flame area\n"); + + /*Only the master set the ports, all the rest got it from it.*/ + if (!mlx4_is_slave(dev)) + mlx4_set_port_mask(dev); + + err = mlx4_QUERY_ADAPTER(dev, &adapter); + if (err) { + mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n"); + goto unmap_bf; + } + + /* Query CONFIG_DEV parameters */ + err = mlx4_config_dev_retrieval(dev, ¶ms); + if (err && err != -ENOTSUPP) { + mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n"); + } else if (!err) { + dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1; + dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; + } + priv->eq_table.inta_pin = adapter.inta_pin; + memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); + + return 0; + +unmap_bf: + unmap_internal_clock(dev); + unmap_bf_area(dev); + + if (mlx4_is_slave(dev)) { + kfree(dev->caps.qp0_qkey); + kfree(dev->caps.qp0_tunnel); + kfree(dev->caps.qp0_proxy); + kfree(dev->caps.qp1_tunnel); + kfree(dev->caps.qp1_proxy); + } + +err_close: + if (mlx4_is_slave(dev)) + mlx4_slave_exit(dev); + else + mlx4_CLOSE_HCA(dev, 0); + +err_free_icm: + if (!mlx4_is_slave(dev)) + mlx4_free_icms(dev); + + return err; +} + +static int mlx4_init_counters_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int nent; + + if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) + return -ENOENT; + + nent = dev->caps.max_counters; + return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0); +} + +static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) +{ + mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); +} + +int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) + return -ENOENT; + + *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); + if (*idx == -1) + return -ENOMEM; + + return 0; +} + +int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) +{ + u64 out_param; + int err; + + if (mlx4_is_mfunc(dev)) { + err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER, + RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (!err) + *idx = get_param_l(&out_param); + + return err; + } + return __mlx4_counter_alloc(dev, idx); +} +EXPORT_SYMBOL_GPL(mlx4_counter_alloc); + +void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) +{ + mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR); + return; +} + +void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) +{ + u64 in_param = 0; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, idx); + mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, + MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + return; + } + __mlx4_counter_free(dev, idx); +} +EXPORT_SYMBOL_GPL(mlx4_counter_free); + +void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; +} +EXPORT_SYMBOL_GPL(mlx4_set_admin_guid); + +__be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return priv->mfunc.master.vf_admin[entry].vport[port].guid; +} +EXPORT_SYMBOL_GPL(mlx4_get_admin_guid); + +void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + __be64 guid; + + /* hw GUID */ + if (entry == 0) + return; + + get_random_bytes((char *)&guid, sizeof(guid)); + guid &= ~(cpu_to_be64(1ULL << 56)); + guid |= cpu_to_be64(1ULL << 57); + priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; +} + +static int mlx4_setup_hca(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + int port; + __be32 ib_port_default_caps; + + err = mlx4_init_uar_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); + return err; + } + + err = mlx4_uar_alloc(dev, &priv->driver_uar); + if (err) { + mlx4_err(dev, "Failed to allocate driver access region, aborting\n"); + goto err_uar_table_free; + } + + priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); + if (!priv->kar) { + mlx4_err(dev, "Couldn't map kernel access region, aborting\n"); + err = -ENOMEM; + goto err_uar_free; + } + + err = mlx4_init_pd_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize protection domain table, aborting\n"); + goto err_kar_unmap; + } + + err = mlx4_init_xrcd_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n"); + goto err_pd_table_free; + } + + err = mlx4_init_mr_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize memory region table, aborting\n"); + goto err_xrcd_table_free; + } + + if (!mlx4_is_slave(dev)) { + err = mlx4_init_mcg_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); + goto err_mr_table_free; + } + err = mlx4_config_mad_demux(dev); + if (err) { + mlx4_err(dev, "Failed in config_mad_demux, aborting\n"); + goto err_mcg_table_free; + } + } + + err = mlx4_init_eq_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize event queue table, aborting\n"); + goto err_mcg_table_free; + } + + err = mlx4_cmd_use_events(dev); + if (err) { + mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n"); + goto err_eq_table_free; + } + + err = mlx4_NOP(dev); + if (err) { + if (dev->flags & MLX4_FLAG_MSI_X) { + mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", + priv->eq_table.eq[dev->caps.num_comp_vectors].irq); + mlx4_warn(dev, "Trying again without MSI-X\n"); + } else { + mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", + priv->eq_table.eq[dev->caps.num_comp_vectors].irq); + mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); + } + + goto err_cmd_poll; + } + + mlx4_dbg(dev, "NOP command IRQ test passed\n"); + + err = mlx4_init_cq_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize completion queue table, aborting\n"); + goto err_cmd_poll; + } + + err = mlx4_init_srq_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n"); + goto err_cq_table_free; + } + + err = mlx4_init_qp_table(dev); + if (err) { + mlx4_err(dev, "Failed to initialize queue pair table, aborting\n"); + goto err_srq_table_free; + } + + err = mlx4_init_counters_table(dev); + if (err && err != -ENOENT) { + mlx4_err(dev, "Failed to initialize counters table, aborting\n"); + goto err_qp_table_free; + } + + if (!mlx4_is_slave(dev)) { + for (port = 1; port <= dev->caps.num_ports; port++) { + ib_port_default_caps = 0; + err = mlx4_get_port_ib_caps(dev, port, + &ib_port_default_caps); + if (err) + mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n", + port, err); + dev->caps.ib_port_def_cap[port] = ib_port_default_caps; + + /* initialize per-slave default ib port capabilities */ + if (mlx4_is_master(dev)) { + int i; + for (i = 0; i < dev->num_slaves; i++) { + if (i == mlx4_master_func_num(dev)) + continue; + priv->mfunc.master.slave_state[i].ib_cap_mask[port] = + ib_port_default_caps; + } + } + + if (mlx4_is_mfunc(dev)) + dev->caps.port_ib_mtu[port] = IB_MTU_2048; + else + dev->caps.port_ib_mtu[port] = IB_MTU_4096; + + err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? + dev->caps.pkey_table_len[port] : -1); + if (err) { + mlx4_err(dev, "Failed to set port %d, aborting\n", + port); + goto err_counters_table_free; + } + } + } + + return 0; + +err_counters_table_free: + mlx4_cleanup_counters_table(dev); + +err_qp_table_free: + mlx4_cleanup_qp_table(dev); + +err_srq_table_free: + mlx4_cleanup_srq_table(dev); + +err_cq_table_free: + mlx4_cleanup_cq_table(dev); + +err_cmd_poll: + mlx4_cmd_use_polling(dev); + +err_eq_table_free: + mlx4_cleanup_eq_table(dev); + +err_mcg_table_free: + if (!mlx4_is_slave(dev)) + mlx4_cleanup_mcg_table(dev); + +err_mr_table_free: + mlx4_cleanup_mr_table(dev); + +err_xrcd_table_free: + mlx4_cleanup_xrcd_table(dev); + +err_pd_table_free: + mlx4_cleanup_pd_table(dev); + +err_kar_unmap: + iounmap(priv->kar); + +err_uar_free: + mlx4_uar_free(dev, &priv->driver_uar); + +err_uar_table_free: + mlx4_cleanup_uar_table(dev); + return err; +} + +static void mlx4_enable_msi_x(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct msix_entry *entries; + int i; + + if (msi_x) { + int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ; + + nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, + nreq); + + entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); + if (!entries) + goto no_msi; + + for (i = 0; i < nreq; ++i) + entries[i].entry = i; + + nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, + nreq); + + if (nreq < 0) { + kfree(entries); + goto no_msi; + } else if (nreq < MSIX_LEGACY_SZ + + dev->caps.num_ports * MIN_MSIX_P_PORT) { + /*Working in legacy mode , all EQ's shared*/ + dev->caps.comp_pool = 0; + dev->caps.num_comp_vectors = nreq - 1; + } else { + dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; + dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; + } + for (i = 0; i < nreq; ++i) + priv->eq_table.eq[i].irq = entries[i].vector; + + dev->flags |= MLX4_FLAG_MSI_X; + + kfree(entries); + return; + } + +no_msi: + dev->caps.num_comp_vectors = 1; + dev->caps.comp_pool = 0; + + for (i = 0; i < 2; ++i) + priv->eq_table.eq[i].irq = dev->persist->pdev->irq; +} + +static int mlx4_init_port_info(struct mlx4_dev *dev, int port) +{ + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + int err = 0; + + info->dev = dev; + info->port = port; + if (!mlx4_is_slave(dev)) { + mlx4_init_mac_table(dev, &info->mac_table); + mlx4_init_vlan_table(dev, &info->vlan_table); + mlx4_init_roce_gid_table(dev, &info->gid_table); + info->base_qpn = mlx4_get_base_qpn(dev, port); + } + + sprintf(info->dev_name, "mlx4_port%d", port); + info->port_attr.attr.name = info->dev_name; + if (mlx4_is_mfunc(dev)) + info->port_attr.attr.mode = S_IRUGO; + else { + info->port_attr.attr.mode = S_IRUGO | S_IWUSR; + info->port_attr.store = set_port_type; + } + info->port_attr.show = show_port_type; + sysfs_attr_init(&info->port_attr.attr); + + err = device_create_file(&dev->persist->pdev->dev, &info->port_attr); + if (err) { + mlx4_err(dev, "Failed to create file for port %d\n", port); + info->port = -1; + } + + sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); + info->port_mtu_attr.attr.name = info->dev_mtu_name; + if (mlx4_is_mfunc(dev)) + info->port_mtu_attr.attr.mode = S_IRUGO; + else { + info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; + info->port_mtu_attr.store = set_port_ib_mtu; + } + info->port_mtu_attr.show = show_port_ib_mtu; + sysfs_attr_init(&info->port_mtu_attr.attr); + + err = device_create_file(&dev->persist->pdev->dev, + &info->port_mtu_attr); + if (err) { + mlx4_err(dev, "Failed to create mtu file for port %d\n", port); + device_remove_file(&info->dev->persist->pdev->dev, + &info->port_attr); + info->port = -1; + } + + return err; +} + +static void mlx4_cleanup_port_info(struct mlx4_port_info *info) +{ + if (info->port < 0) + return; + + device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); + device_remove_file(&info->dev->persist->pdev->dev, + &info->port_mtu_attr); +} + +static int mlx4_init_steering(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int num_entries = dev->caps.num_ports; + int i, j; + + priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); + if (!priv->steer) + return -ENOMEM; + + for (i = 0; i < num_entries; i++) + for (j = 0; j < MLX4_NUM_STEERS; j++) { + INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); + INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); + } + return 0; +} + +static void mlx4_clear_steering(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_steer_index *entry, *tmp_entry; + struct mlx4_promisc_qp *pqp, *tmp_pqp; + int num_entries = dev->caps.num_ports; + int i, j; + + for (i = 0; i < num_entries; i++) { + for (j = 0; j < MLX4_NUM_STEERS; j++) { + list_for_each_entry_safe(pqp, tmp_pqp, + &priv->steer[i].promisc_qps[j], + list) { + list_del(&pqp->list); + kfree(pqp); + } + list_for_each_entry_safe(entry, tmp_entry, + &priv->steer[i].steer_entries[j], + list) { + list_del(&entry->list); + list_for_each_entry_safe(pqp, tmp_pqp, + &entry->duplicates, + list) { + list_del(&pqp->list); + kfree(pqp); + } + kfree(entry); + } + } + } + kfree(priv->steer); +} + +static int extended_func_num(struct pci_dev *pdev) +{ + return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); +} + +#define MLX4_OWNER_BASE 0x8069c +#define MLX4_OWNER_SIZE 4 + +static int mlx4_get_ownership(struct mlx4_dev *dev) +{ + void __iomem *owner; + u32 ret; + + if (pci_channel_offline(dev->persist->pdev)) + return -EIO; + + owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + + MLX4_OWNER_BASE, + MLX4_OWNER_SIZE); + if (!owner) { + mlx4_err(dev, "Failed to obtain ownership bit\n"); + return -ENOMEM; + } + + ret = readl(owner); + iounmap(owner); + return (int) !!ret; +} + +static void mlx4_free_ownership(struct mlx4_dev *dev) +{ + void __iomem *owner; + + if (pci_channel_offline(dev->persist->pdev)) + return; + + owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + + MLX4_OWNER_BASE, + MLX4_OWNER_SIZE); + if (!owner) { + mlx4_err(dev, "Failed to obtain ownership bit\n"); + return; + } + writel(0, owner); + msleep(1000); + iounmap(owner); +} + +#define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\ + !!((flags) & MLX4_FLAG_MASTER)) + +static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, + u8 total_vfs, int existing_vfs, int reset_flow) +{ + u64 dev_flags = dev->flags; + int err = 0; + + if (reset_flow) { + dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), + GFP_KERNEL); + if (!dev->dev_vfs) + goto free_mem; + return dev_flags; + } + + atomic_inc(&pf_loading); + if (dev->flags & MLX4_FLAG_SRIOV) { + if (existing_vfs != total_vfs) { + mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", + existing_vfs, total_vfs); + total_vfs = existing_vfs; + } + } + + dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL); + if (NULL == dev->dev_vfs) { + mlx4_err(dev, "Failed to allocate memory for VFs\n"); + goto disable_sriov; + } + + if (!(dev->flags & MLX4_FLAG_SRIOV)) { + mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); + err = pci_enable_sriov(pdev, total_vfs); + } + if (err) { + mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", + err); + goto disable_sriov; + } else { + mlx4_warn(dev, "Running in master mode\n"); + dev_flags |= MLX4_FLAG_SRIOV | + MLX4_FLAG_MASTER; + dev_flags &= ~MLX4_FLAG_SLAVE; + dev->persist->num_vfs = total_vfs; + } + return dev_flags; + +disable_sriov: + atomic_dec(&pf_loading); +free_mem: + dev->persist->num_vfs = 0; + kfree(dev->dev_vfs); + return dev_flags & ~MLX4_FLAG_MASTER; +} + +enum { + MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1, +}; + +static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, + int *nvfs) +{ + int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2]; + /* Checking for 64 VFs as a limitation of CX2 */ + if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) && + requested_vfs >= 64) { + mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n", + requested_vfs); + return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64; + } + return 0; +} + +static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, + int total_vfs, int *nvfs, struct mlx4_priv *priv, + int reset_flow) +{ + struct mlx4_dev *dev; + unsigned sum = 0; + int err; + int port; + int i; + struct mlx4_dev_cap *dev_cap = NULL; + int existing_vfs = 0; + + dev = &priv->dev; + + INIT_LIST_HEAD(&priv->ctx_list); + spin_lock_init(&priv->ctx_lock); + + mutex_init(&priv->port_mutex); + mutex_init(&priv->bond_mutex); + + INIT_LIST_HEAD(&priv->pgdir_list); + mutex_init(&priv->pgdir_mutex); + + INIT_LIST_HEAD(&priv->bf_list); + mutex_init(&priv->bf_mutex); + + dev->rev_id = pdev->revision; + dev->numa_node = dev_to_node(&pdev->dev); + + /* Detect if this device is a virtual function */ + if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { + mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); + dev->flags |= MLX4_FLAG_SLAVE; + } else { + /* We reset the device and enable SRIOV only for physical + * devices. Try to claim ownership on the device; + * if already taken, skip -- do not allow multiple PFs */ + err = mlx4_get_ownership(dev); + if (err) { + if (err < 0) + return err; + else { + mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); + return -EINVAL; + } + } + + atomic_set(&priv->opreq_count, 0); + INIT_WORK(&priv->opreq_task, mlx4_opreq_action); + + /* + * Now reset the HCA before we touch the PCI capabilities or + * attempt a firmware command, since a boot ROM may have left + * the HCA in an undefined state. + */ + err = mlx4_reset(dev); + if (err) { + mlx4_err(dev, "Failed to reset HCA, aborting\n"); + goto err_sriov; + } + + if (total_vfs) { + dev->flags = MLX4_FLAG_MASTER; + existing_vfs = pci_num_vf(pdev); + if (existing_vfs) + dev->flags |= MLX4_FLAG_SRIOV; + dev->persist->num_vfs = total_vfs; + } + } + + /* on load remove any previous indication of internal error, + * device is up. + */ + dev->persist->state = MLX4_DEVICE_STATE_UP; + +slave_start: + err = mlx4_cmd_init(dev); + if (err) { + mlx4_err(dev, "Failed to init command interface, aborting\n"); + goto err_sriov; + } + + /* In slave functions, the communication channel must be initialized + * before posting commands. Also, init num_slaves before calling + * mlx4_init_hca */ + if (mlx4_is_mfunc(dev)) { + if (mlx4_is_master(dev)) { + dev->num_slaves = MLX4_MAX_NUM_SLAVES; + + } else { + dev->num_slaves = 0; + err = mlx4_multi_func_init(dev); + if (err) { + mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n"); + goto err_cmd; + } + } + } + + err = mlx4_init_fw(dev); + if (err) { + mlx4_err(dev, "Failed to init fw, aborting.\n"); + goto err_mfunc; + } + + if (mlx4_is_master(dev)) { + /* when we hit the goto slave_start below, dev_cap already initialized */ + if (!dev_cap) { + dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); + + if (!dev_cap) { + err = -ENOMEM; + goto err_fw; + } + + err = mlx4_QUERY_DEV_CAP(dev, dev_cap); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + goto err_fw; + } + + if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) + goto err_fw; + + if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { + u64 dev_flags = mlx4_enable_sriov(dev, pdev, + total_vfs, + existing_vfs, + reset_flow); + + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); + dev->flags = dev_flags; + if (!SRIOV_VALID_STATE(dev->flags)) { + mlx4_err(dev, "Invalid SRIOV state\n"); + goto err_sriov; + } + err = mlx4_reset(dev); + if (err) { + mlx4_err(dev, "Failed to reset HCA, aborting.\n"); + goto err_sriov; + } + goto slave_start; + } + } else { + /* Legacy mode FW requires SRIOV to be enabled before + * doing QUERY_DEV_CAP, since max_eq's value is different if + * SRIOV is enabled. + */ + memset(dev_cap, 0, sizeof(*dev_cap)); + err = mlx4_QUERY_DEV_CAP(dev, dev_cap); + if (err) { + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + goto err_fw; + } + + if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) + goto err_fw; + } + } + + err = mlx4_init_hca(dev); + if (err) { + if (err == -EACCES) { + /* Not primary Physical function + * Running in slave mode */ + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); + /* We're not a PF */ + if (dev->flags & MLX4_FLAG_SRIOV) { + if (!existing_vfs) + pci_disable_sriov(pdev); + if (mlx4_is_master(dev) && !reset_flow) + atomic_dec(&pf_loading); + dev->flags &= ~MLX4_FLAG_SRIOV; + } + if (!mlx4_is_slave(dev)) + mlx4_free_ownership(dev); + dev->flags |= MLX4_FLAG_SLAVE; + dev->flags &= ~MLX4_FLAG_MASTER; + goto slave_start; + } else + goto err_fw; + } + + if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { + u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, + existing_vfs, reset_flow); + + if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); + dev->flags = dev_flags; + err = mlx4_cmd_init(dev); + if (err) { + /* Only VHCR is cleaned up, so could still + * send FW commands + */ + mlx4_err(dev, "Failed to init VHCR command interface, aborting\n"); + goto err_close; + } + } else { + dev->flags = dev_flags; + } + + if (!SRIOV_VALID_STATE(dev->flags)) { + mlx4_err(dev, "Invalid SRIOV state\n"); + goto err_close; + } + } + + /* check if the device is functioning at its maximum possible speed. + * No return code for this call, just warn the user in case of PCI + * express device capabilities are under-satisfied by the bus. + */ + if (!mlx4_is_slave(dev)) + mlx4_check_pcie_caps(dev); + + /* In master functions, the communication channel must be initialized + * after obtaining its address from fw */ + if (mlx4_is_master(dev)) { + int ib_ports = 0; + + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) + ib_ports++; + + if (ib_ports && + (num_vfs_argc > 1 || probe_vfs_argc > 1)) { + mlx4_err(dev, + "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); + err = -EINVAL; + goto err_close; + } + if (dev->caps.num_ports < 2 && + num_vfs_argc > 1) { + err = -EINVAL; + mlx4_err(dev, + "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n", + dev->caps.num_ports); + goto err_close; + } + memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs)); + + for (i = 0; + i < sizeof(dev->persist->nvfs)/ + sizeof(dev->persist->nvfs[0]); i++) { + unsigned j; + + for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) { + dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; + dev->dev_vfs[sum].n_ports = i < 2 ? 1 : + dev->caps.num_ports; + } + } + + /* In master functions, the communication channel + * must be initialized after obtaining its address from fw + */ + err = mlx4_multi_func_init(dev); + if (err) { + mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n"); + goto err_close; + } + } + + err = mlx4_alloc_eq_table(dev); + if (err) + goto err_master_mfunc; + + priv->msix_ctl.pool_bm = 0; + mutex_init(&priv->msix_ctl.pool_lock); + + mlx4_enable_msi_x(dev); + if ((mlx4_is_mfunc(dev)) && + !(dev->flags & MLX4_FLAG_MSI_X)) { + err = -ENOSYS; + mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); + goto err_free_eq; + } + + if (!mlx4_is_slave(dev)) { + err = mlx4_init_steering(dev); + if (err) + goto err_disable_msix; + } + + err = mlx4_setup_hca(dev); + if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && + !mlx4_is_mfunc(dev)) { + dev->flags &= ~MLX4_FLAG_MSI_X; + dev->caps.num_comp_vectors = 1; + dev->caps.comp_pool = 0; + pci_disable_msix(pdev); + err = mlx4_setup_hca(dev); + } + + if (err) + goto err_steer; + + mlx4_init_quotas(dev); + /* When PF resources are ready arm its comm channel to enable + * getting commands + */ + if (mlx4_is_master(dev)) { + err = mlx4_ARM_COMM_CHANNEL(dev); + if (err) { + mlx4_err(dev, " Failed to arm comm channel eq: %x\n", + err); + goto err_steer; + } + } + + for (port = 1; port <= dev->caps.num_ports; port++) { + err = mlx4_init_port_info(dev, port); + if (err) + goto err_port; + } + + priv->v2p.port1 = 1; + priv->v2p.port2 = 2; + + err = mlx4_register_device(dev); + if (err) + goto err_port; + + mlx4_request_modules(dev); + + mlx4_sense_init(dev); + mlx4_start_sense(dev); + + priv->removed = 0; + + if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) + atomic_dec(&pf_loading); + + kfree(dev_cap); + return 0; + +err_port: + for (--port; port >= 1; --port) + mlx4_cleanup_port_info(&priv->port[port]); + + mlx4_cleanup_counters_table(dev); + mlx4_cleanup_qp_table(dev); + mlx4_cleanup_srq_table(dev); + mlx4_cleanup_cq_table(dev); + mlx4_cmd_use_polling(dev); + mlx4_cleanup_eq_table(dev); + mlx4_cleanup_mcg_table(dev); + mlx4_cleanup_mr_table(dev); + mlx4_cleanup_xrcd_table(dev); + mlx4_cleanup_pd_table(dev); + mlx4_cleanup_uar_table(dev); + +err_steer: + if (!mlx4_is_slave(dev)) + mlx4_clear_steering(dev); + +err_disable_msix: + if (dev->flags & MLX4_FLAG_MSI_X) + pci_disable_msix(pdev); + +err_free_eq: + mlx4_free_eq_table(dev); + +err_master_mfunc: + if (mlx4_is_master(dev)) { + mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY); + mlx4_multi_func_cleanup(dev); + } + + if (mlx4_is_slave(dev)) { + kfree(dev->caps.qp0_qkey); + kfree(dev->caps.qp0_tunnel); + kfree(dev->caps.qp0_proxy); + kfree(dev->caps.qp1_tunnel); + kfree(dev->caps.qp1_proxy); + } + +err_close: + mlx4_close_hca(dev); + +err_fw: + mlx4_close_fw(dev); + +err_mfunc: + if (mlx4_is_slave(dev)) + mlx4_multi_func_cleanup(dev); + +err_cmd: + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); + +err_sriov: + if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) { + pci_disable_sriov(pdev); + dev->flags &= ~MLX4_FLAG_SRIOV; + } + + if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) + atomic_dec(&pf_loading); + + kfree(priv->dev.dev_vfs); + + if (!mlx4_is_slave(dev)) + mlx4_free_ownership(dev); + + kfree(dev_cap); + return err; +} + +static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, + struct mlx4_priv *priv) +{ + int err; + int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { + {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; + unsigned total_vfs = 0; + unsigned int i; + + pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); + return err; + } + + /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS + * per port, we must limit the number of VFs to 63 (since their are + * 128 MACs) + */ + for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; + total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { + nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; + if (nvfs[i] < 0) { + dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); + err = -EINVAL; + goto err_disable_pdev; + } + } + for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; + i++) { + prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; + if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { + dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); + err = -EINVAL; + goto err_disable_pdev; + } + } + if (total_vfs >= MLX4_MAX_NUM_VF) { + dev_err(&pdev->dev, + "Requested more VF's (%d) than allowed (%d)\n", + total_vfs, MLX4_MAX_NUM_VF - 1); + err = -EINVAL; + goto err_disable_pdev; + } + + for (i = 0; i < MLX4_MAX_PORTS; i++) { + if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) { + dev_err(&pdev->dev, + "Requested more VF's (%d) for port (%d) than allowed (%d)\n", + nvfs[i] + nvfs[2], i + 1, + MLX4_MAX_NUM_VF_P_PORT - 1); + err = -EINVAL; + goto err_disable_pdev; + } + } + + /* Check for BARs. */ + if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && + !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", + pci_dev_data, pci_resource_flags(pdev, 0)); + err = -ENODEV; + goto err_disable_pdev; + } + if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "Missing UAR, aborting\n"); + err = -ENODEV; + goto err_disable_pdev; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); + goto err_disable_pdev; + } + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); + goto err_release_regions; + } + } + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); + goto err_release_regions; + } + } + + /* Allow large DMA segments, up to the firmware limit of 1 GB */ + dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); + /* Detect if this device is a virtual function */ + if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { + /* When acting as pf, we normally skip vfs unless explicitly + * requested to probe them. + */ + if (total_vfs) { + unsigned vfs_offset = 0; + + for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && + vfs_offset + nvfs[i] < extended_func_num(pdev); + vfs_offset += nvfs[i], i++) + ; + if (i == sizeof(nvfs)/sizeof(nvfs[0])) { + err = -ENODEV; + goto err_release_regions; + } + if ((extended_func_num(pdev) - vfs_offset) + > prb_vf[i]) { + dev_warn(&pdev->dev, "Skipping virtual function:%d\n", + extended_func_num(pdev)); + err = -ENODEV; + goto err_release_regions; + } + } + } + + err = mlx4_catas_init(&priv->dev); + if (err) + goto err_release_regions; + + err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0); + if (err) + goto err_catas; + + return 0; + +err_catas: + mlx4_catas_end(&priv->dev); + +err_release_regions: + pci_release_regions(pdev); + +err_disable_pdev: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return err; +} + +static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct mlx4_priv *priv; + struct mlx4_dev *dev; + int ret; + + printk_once(KERN_INFO "%s", mlx4_version); + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + dev = &priv->dev; + dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL); + if (!dev->persist) { + kfree(priv); + return -ENOMEM; + } + dev->persist->pdev = pdev; + dev->persist->dev = dev; + pci_set_drvdata(pdev, dev->persist); + priv->pci_dev_data = id->driver_data; + mutex_init(&dev->persist->device_state_mutex); + mutex_init(&dev->persist->interface_state_mutex); + + ret = __mlx4_init_one(pdev, id->driver_data, priv); + if (ret) { + kfree(dev->persist); + kfree(priv); + } else { + pci_save_state(pdev); + } + + return ret; +} + +static void mlx4_clean_dev(struct mlx4_dev *dev) +{ + struct mlx4_dev_persistent *persist = dev->persist; + struct mlx4_priv *priv = mlx4_priv(dev); + unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS); + + memset(priv, 0, sizeof(*priv)); + priv->dev.persist = persist; + priv->dev.flags = flags; +} + +static void mlx4_unload_one(struct pci_dev *pdev) +{ + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; + struct mlx4_priv *priv = mlx4_priv(dev); + int pci_dev_data; + int p, i; + + if (priv->removed) + return; + + /* saving current ports type for further use */ + for (i = 0; i < dev->caps.num_ports; i++) { + dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1]; + dev->persist->curr_port_poss_type[i] = dev->caps. + possible_type[i + 1]; + } + + pci_dev_data = priv->pci_dev_data; + + mlx4_stop_sense(dev); + mlx4_unregister_device(dev); + + for (p = 1; p <= dev->caps.num_ports; p++) { + mlx4_cleanup_port_info(&priv->port[p]); + mlx4_CLOSE_PORT(dev, p); + } + + if (mlx4_is_master(dev)) + mlx4_free_resource_tracker(dev, + RES_TR_FREE_SLAVES_ONLY); + + mlx4_cleanup_counters_table(dev); + mlx4_cleanup_qp_table(dev); + mlx4_cleanup_srq_table(dev); + mlx4_cleanup_cq_table(dev); + mlx4_cmd_use_polling(dev); + mlx4_cleanup_eq_table(dev); + mlx4_cleanup_mcg_table(dev); + mlx4_cleanup_mr_table(dev); + mlx4_cleanup_xrcd_table(dev); + mlx4_cleanup_pd_table(dev); + + if (mlx4_is_master(dev)) + mlx4_free_resource_tracker(dev, + RES_TR_FREE_STRUCTS_ONLY); + + iounmap(priv->kar); + mlx4_uar_free(dev, &priv->driver_uar); + mlx4_cleanup_uar_table(dev); + if (!mlx4_is_slave(dev)) + mlx4_clear_steering(dev); + mlx4_free_eq_table(dev); + if (mlx4_is_master(dev)) + mlx4_multi_func_cleanup(dev); + mlx4_close_hca(dev); + mlx4_close_fw(dev); + if (mlx4_is_slave(dev)) + mlx4_multi_func_cleanup(dev); + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); + + if (dev->flags & MLX4_FLAG_MSI_X) + pci_disable_msix(pdev); + + if (!mlx4_is_slave(dev)) + mlx4_free_ownership(dev); + + kfree(dev->caps.qp0_qkey); + kfree(dev->caps.qp0_tunnel); + kfree(dev->caps.qp0_proxy); + kfree(dev->caps.qp1_tunnel); + kfree(dev->caps.qp1_proxy); + kfree(dev->dev_vfs); + + mlx4_clean_dev(dev); + priv->pci_dev_data = pci_dev_data; + priv->removed = 1; +} + +static void mlx4_remove_one(struct pci_dev *pdev) +{ + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; + struct mlx4_priv *priv = mlx4_priv(dev); + int active_vfs = 0; + + mutex_lock(&persist->interface_state_mutex); + persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; + mutex_unlock(&persist->interface_state_mutex); + + /* Disabling SR-IOV is not allowed while there are active vf's */ + if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) { + active_vfs = mlx4_how_many_lives_vf(dev); + if (active_vfs) { + pr_warn("Removing PF when there are active VF's !!\n"); + pr_warn("Will not disable SR-IOV.\n"); + } + } + + /* device marked to be under deletion running now without the lock + * letting other tasks to be terminated + */ + if (persist->interface_state & MLX4_INTERFACE_STATE_UP) + mlx4_unload_one(pdev); + else + mlx4_info(dev, "%s: interface is down\n", __func__); + mlx4_catas_end(dev); + if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { + mlx4_warn(dev, "Disabling SR-IOV\n"); + pci_disable_sriov(pdev); + } + + pci_release_regions(pdev); + pci_disable_device(pdev); + kfree(dev->persist); + kfree(priv); + pci_set_drvdata(pdev, NULL); +} + +static int restore_current_port_types(struct mlx4_dev *dev, + enum mlx4_port_type *types, + enum mlx4_port_type *poss_types) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err, i; + + mlx4_stop_sense(dev); + + mutex_lock(&priv->port_mutex); + for (i = 0; i < dev->caps.num_ports; i++) + dev->caps.possible_type[i + 1] = poss_types[i]; + err = mlx4_change_port_types(dev, types); + mlx4_start_sense(dev); + mutex_unlock(&priv->port_mutex); + + return err; +} + +int mlx4_restart_one(struct pci_dev *pdev) +{ + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; + struct mlx4_priv *priv = mlx4_priv(dev); + int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + int pci_dev_data, err, total_vfs; + + pci_dev_data = priv->pci_dev_data; + total_vfs = dev->persist->num_vfs; + memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); + + mlx4_unload_one(pdev); + err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1); + if (err) { + mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", + __func__, pci_name(pdev), err); + return err; + } + + err = restore_current_port_types(dev, dev->persist->curr_port_type, + dev->persist->curr_port_poss_type); + if (err) + mlx4_err(dev, "could not restore original port types (%d)\n", + err); + + return err; +} + +static const struct pci_device_id mlx4_pci_table[] = { + /* MT25408 "Hermon" SDR */ + { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT25408 "Hermon" DDR */ + { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT25408 "Hermon" QDR */ + { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT25408 "Hermon" DDR PCIe gen2 */ + { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT25408 "Hermon" QDR PCIe gen2 */ + { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT25408 "Hermon" EN 10GigE */ + { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ + { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT25458 ConnectX EN 10GBASE-T 10GigE */ + { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ + { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT26468 ConnectX EN 10GigE PCIe gen2*/ + { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ + { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT26478 ConnectX2 40GigE PCIe gen2 */ + { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT }, + /* MT25400 Family [ConnectX-2 Virtual Function] */ + { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF }, + /* MT27500 Family [ConnectX-3] */ + { PCI_VDEVICE(MELLANOX, 0x1003), 0 }, + /* MT27500 Family [ConnectX-3 Virtual Function] */ + { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF }, + { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */ + { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */ + { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */ + { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */ + { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */ + { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */ + { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */ + { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */ + { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */ + { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */ + { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */ + { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */ + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, mlx4_pci_table); + +static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + + mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n"); + mlx4_enter_error_state(persist); + + mutex_lock(&persist->interface_state_mutex); + if (persist->interface_state & MLX4_INTERFACE_STATE_UP) + mlx4_unload_one(pdev); + + mutex_unlock(&persist->interface_state_mutex); + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + pci_disable_device(pdev); + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) +{ + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; + struct mlx4_priv *priv = mlx4_priv(dev); + int ret; + int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + int total_vfs; + + mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); + ret = pci_enable_device(pdev); + if (ret) { + mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + total_vfs = dev->persist->num_vfs; + memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); + + mutex_lock(&persist->interface_state_mutex); + if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { + ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, + priv, 1); + if (ret) { + mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n", + __func__, ret); + goto end; + } + + ret = restore_current_port_types(dev, dev->persist-> + curr_port_type, dev->persist-> + curr_port_poss_type); + if (ret) + mlx4_err(dev, "could not restore original port types (%d)\n", ret); + } +end: + mutex_unlock(&persist->interface_state_mutex); + + return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; +} + +static void mlx4_shutdown(struct pci_dev *pdev) +{ + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + + mlx4_info(persist->dev, "mlx4_shutdown was called\n"); + mutex_lock(&persist->interface_state_mutex); + if (persist->interface_state & MLX4_INTERFACE_STATE_UP) + mlx4_unload_one(pdev); + mutex_unlock(&persist->interface_state_mutex); +} + +static const struct pci_error_handlers mlx4_err_handler = { + .error_detected = mlx4_pci_err_detected, + .slot_reset = mlx4_pci_slot_reset, +}; + +static struct pci_driver mlx4_driver = { + .name = DRV_NAME, + .id_table = mlx4_pci_table, + .probe = mlx4_init_one, + .shutdown = mlx4_shutdown, + .remove = mlx4_remove_one, + .err_handler = &mlx4_err_handler, +}; + +static int __init mlx4_verify_params(void) +{ + if ((log_num_mac < 0) || (log_num_mac > 7)) { + pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); + return -1; + } + + if (log_num_vlan != 0) + pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n", + MLX4_LOG_NUM_VLANS); + + if (use_prio != 0) + pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n"); + + if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { + pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", + log_mtts_per_seg); + return -1; + } + + /* Check if module param for ports type has legal combination */ + if (port_type_array[0] == false && port_type_array[1] == true) { + pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); + port_type_array[0] = true; + } + + if (mlx4_log_num_mgm_entry_size < -7 || + (mlx4_log_num_mgm_entry_size > 0 && + (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || + mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) { + pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n", + mlx4_log_num_mgm_entry_size, + MLX4_MIN_MGM_LOG_ENTRY_SIZE, + MLX4_MAX_MGM_LOG_ENTRY_SIZE); + return -1; + } + + return 0; +} + +static int __init mlx4_init(void) +{ + int ret; + + if (mlx4_verify_params()) + return -EINVAL; + + + mlx4_wq = create_singlethread_workqueue("mlx4"); + if (!mlx4_wq) + return -ENOMEM; + + ret = pci_register_driver(&mlx4_driver); + if (ret < 0) + destroy_workqueue(mlx4_wq); + return ret < 0 ? ret : 0; +} + +static void __exit mlx4_cleanup(void) +{ + pci_unregister_driver(&mlx4_driver); + destroy_workqueue(mlx4_wq); +} + +module_init(mlx4_init); +module_exit(mlx4_cleanup); diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c new file mode 100644 index 000000000..bd9ea0d01 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -0,0 +1,1634 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include + +#include +#include + +#include "mlx4.h" + +static const u8 zero_gid[16]; /* automatically initialized to 0 */ + +int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) +{ + return 1 << dev->oper_log_mgm_entry_size; +} + +int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) +{ + return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2); +} + +static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev, + struct mlx4_cmd_mailbox *mailbox, + u32 size, + u64 *reg_id) +{ + u64 imm; + int err = 0; + + err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0, + MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + return err; + *reg_id = imm; + + return err; +} + +static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid) +{ + int err = 0; + + err = mlx4_cmd(dev, regid, 0, 0, + MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + return err; +} + +static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, + struct mlx4_cmd_mailbox *mailbox) +{ + return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); +} + +static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, + struct mlx4_cmd_mailbox *mailbox) +{ + return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); +} + +static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer, + struct mlx4_cmd_mailbox *mailbox) +{ + u32 in_mod; + + in_mod = (u32) port << 16 | steer << 1; + return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, + MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); +} + +static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + u16 *hash, u8 op_mod) +{ + u64 imm; + int err; + + err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, + MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + if (!err) + *hash = imm; + + return err; +} + +static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port, + enum mlx4_steer_type steer, + u32 qpn) +{ + struct mlx4_steer *s_steer; + struct mlx4_promisc_qp *pqp; + + if (port < 1 || port > dev->caps.num_ports) + return NULL; + + s_steer = &mlx4_priv(dev)->steer[port - 1]; + + list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { + if (pqp->qpn == qpn) + return pqp; + } + /* not found */ + return NULL; +} + +/* + * Add new entry to steering data structure. + * All promisc QPs should be added as well + */ +static int new_steering_entry(struct mlx4_dev *dev, u8 port, + enum mlx4_steer_type steer, + unsigned int index, u32 qpn) +{ + struct mlx4_steer *s_steer; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm; + u32 members_count; + struct mlx4_steer_index *new_entry; + struct mlx4_promisc_qp *pqp; + struct mlx4_promisc_qp *dqp = NULL; + u32 prot; + int err; + + if (port < 1 || port > dev->caps.num_ports) + return -EINVAL; + + s_steer = &mlx4_priv(dev)->steer[port - 1]; + new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); + if (!new_entry) + return -ENOMEM; + + INIT_LIST_HEAD(&new_entry->duplicates); + new_entry->index = index; + list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]); + + /* If the given qpn is also a promisc qp, + * it should be inserted to duplicates list + */ + pqp = get_promisc_qp(dev, port, steer, qpn); + if (pqp) { + dqp = kmalloc(sizeof *dqp, GFP_KERNEL); + if (!dqp) { + err = -ENOMEM; + goto out_alloc; + } + dqp->qpn = qpn; + list_add_tail(&dqp->list, &new_entry->duplicates); + } + + /* if no promisc qps for this vep, we are done */ + if (list_empty(&s_steer->promisc_qps[steer])) + return 0; + + /* now need to add all the promisc qps to the new + * steering entry, as they should also receive the packets + * destined to this address */ + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = -ENOMEM; + goto out_alloc; + } + mgm = mailbox->buf; + + err = mlx4_READ_ENTRY(dev, index, mailbox); + if (err) + goto out_mailbox; + + members_count = be32_to_cpu(mgm->members_count) & 0xffffff; + prot = be32_to_cpu(mgm->members_count) >> 30; + list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { + /* don't add already existing qpn */ + if (pqp->qpn == qpn) + continue; + if (members_count == dev->caps.num_qp_per_mgm) { + /* out of space */ + err = -ENOMEM; + goto out_mailbox; + } + + /* add the qpn */ + mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); + } + /* update the qps count and update the entry with all the promisc qps*/ + mgm->members_count = cpu_to_be32(members_count | (prot << 30)); + err = mlx4_WRITE_ENTRY(dev, index, mailbox); + +out_mailbox: + mlx4_free_cmd_mailbox(dev, mailbox); + if (!err) + return 0; +out_alloc: + if (dqp) { + list_del(&dqp->list); + kfree(dqp); + } + list_del(&new_entry->list); + kfree(new_entry); + return err; +} + +/* update the data structures with existing steering entry */ +static int existing_steering_entry(struct mlx4_dev *dev, u8 port, + enum mlx4_steer_type steer, + unsigned int index, u32 qpn) +{ + struct mlx4_steer *s_steer; + struct mlx4_steer_index *tmp_entry, *entry = NULL; + struct mlx4_promisc_qp *pqp; + struct mlx4_promisc_qp *dqp; + + if (port < 1 || port > dev->caps.num_ports) + return -EINVAL; + + s_steer = &mlx4_priv(dev)->steer[port - 1]; + + pqp = get_promisc_qp(dev, port, steer, qpn); + if (!pqp) + return 0; /* nothing to do */ + + list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { + if (tmp_entry->index == index) { + entry = tmp_entry; + break; + } + } + if (unlikely(!entry)) { + mlx4_warn(dev, "Steering entry at index %x is not registered\n", index); + return -EINVAL; + } + + /* the given qpn is listed as a promisc qpn + * we need to add it as a duplicate to this entry + * for future references */ + list_for_each_entry(dqp, &entry->duplicates, list) { + if (qpn == dqp->qpn) + return 0; /* qp is already duplicated */ + } + + /* add the qp as a duplicate on this index */ + dqp = kmalloc(sizeof *dqp, GFP_KERNEL); + if (!dqp) + return -ENOMEM; + dqp->qpn = qpn; + list_add_tail(&dqp->list, &entry->duplicates); + + return 0; +} + +/* Check whether a qpn is a duplicate on steering entry + * If so, it should not be removed from mgm */ +static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port, + enum mlx4_steer_type steer, + unsigned int index, u32 qpn) +{ + struct mlx4_steer *s_steer; + struct mlx4_steer_index *tmp_entry, *entry = NULL; + struct mlx4_promisc_qp *dqp, *tmp_dqp; + + if (port < 1 || port > dev->caps.num_ports) + return NULL; + + s_steer = &mlx4_priv(dev)->steer[port - 1]; + + /* if qp is not promisc, it cannot be duplicated */ + if (!get_promisc_qp(dev, port, steer, qpn)) + return false; + + /* The qp is promisc qp so it is a duplicate on this index + * Find the index entry, and remove the duplicate */ + list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { + if (tmp_entry->index == index) { + entry = tmp_entry; + break; + } + } + if (unlikely(!entry)) { + mlx4_warn(dev, "Steering entry for index %x is not registered\n", index); + return false; + } + list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) { + if (dqp->qpn == qpn) { + list_del(&dqp->list); + kfree(dqp); + } + } + return true; +} + +/* Returns true if all the QPs != tqpn contained in this entry + * are Promisc QPs. Returns false otherwise. + */ +static bool promisc_steering_entry(struct mlx4_dev *dev, u8 port, + enum mlx4_steer_type steer, + unsigned int index, u32 tqpn, + u32 *members_count) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm; + u32 m_count; + bool ret = false; + int i; + + if (port < 1 || port > dev->caps.num_ports) + return false; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return false; + mgm = mailbox->buf; + + if (mlx4_READ_ENTRY(dev, index, mailbox)) + goto out; + m_count = be32_to_cpu(mgm->members_count) & 0xffffff; + if (members_count) + *members_count = m_count; + + for (i = 0; i < m_count; i++) { + u32 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; + if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) { + /* the qp is not promisc, the entry can't be removed */ + goto out; + } + } + ret = true; +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return ret; +} + +/* IF a steering entry contains only promisc QPs, it can be removed. */ +static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, + enum mlx4_steer_type steer, + unsigned int index, u32 tqpn) +{ + struct mlx4_steer *s_steer; + struct mlx4_steer_index *entry = NULL, *tmp_entry; + u32 members_count; + bool ret = false; + + if (port < 1 || port > dev->caps.num_ports) + return NULL; + + s_steer = &mlx4_priv(dev)->steer[port - 1]; + + if (!promisc_steering_entry(dev, port, steer, index, + tqpn, &members_count)) + goto out; + + /* All the qps currently registered for this entry are promiscuous, + * Checking for duplicates */ + ret = true; + list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { + if (entry->index == index) { + if (list_empty(&entry->duplicates) || + members_count == 1) { + struct mlx4_promisc_qp *pqp, *tmp_pqp; + /* If there is only 1 entry in duplicates then + * this is the QP we want to delete, going over + * the list and deleting the entry. + */ + list_del(&entry->list); + list_for_each_entry_safe(pqp, tmp_pqp, + &entry->duplicates, + list) { + list_del(&pqp->list); + kfree(pqp); + } + kfree(entry); + } else { + /* This entry contains duplicates so it shouldn't be removed */ + ret = false; + goto out; + } + } + } + +out: + return ret; +} + +static int add_promisc_qp(struct mlx4_dev *dev, u8 port, + enum mlx4_steer_type steer, u32 qpn) +{ + struct mlx4_steer *s_steer; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm; + struct mlx4_steer_index *entry; + struct mlx4_promisc_qp *pqp; + struct mlx4_promisc_qp *dqp; + u32 members_count; + u32 prot; + int i; + bool found; + int err; + struct mlx4_priv *priv = mlx4_priv(dev); + + if (port < 1 || port > dev->caps.num_ports) + return -EINVAL; + + s_steer = &mlx4_priv(dev)->steer[port - 1]; + + mutex_lock(&priv->mcg_table.mutex); + + if (get_promisc_qp(dev, port, steer, qpn)) { + err = 0; /* Noting to do, already exists */ + goto out_mutex; + } + + pqp = kmalloc(sizeof *pqp, GFP_KERNEL); + if (!pqp) { + err = -ENOMEM; + goto out_mutex; + } + pqp->qpn = qpn; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = -ENOMEM; + goto out_alloc; + } + mgm = mailbox->buf; + + if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) { + /* The promisc QP needs to be added for each one of the steering + * entries. If it already exists, needs to be added as + * a duplicate for this entry. + */ + list_for_each_entry(entry, + &s_steer->steer_entries[steer], + list) { + err = mlx4_READ_ENTRY(dev, entry->index, mailbox); + if (err) + goto out_mailbox; + + members_count = be32_to_cpu(mgm->members_count) & + 0xffffff; + prot = be32_to_cpu(mgm->members_count) >> 30; + found = false; + for (i = 0; i < members_count; i++) { + if ((be32_to_cpu(mgm->qp[i]) & + MGM_QPN_MASK) == qpn) { + /* Entry already exists. + * Add to duplicates. + */ + dqp = kmalloc(sizeof(*dqp), GFP_KERNEL); + if (!dqp) { + err = -ENOMEM; + goto out_mailbox; + } + dqp->qpn = qpn; + list_add_tail(&dqp->list, + &entry->duplicates); + found = true; + } + } + if (!found) { + /* Need to add the qpn to mgm */ + if (members_count == + dev->caps.num_qp_per_mgm) { + /* entry is full */ + err = -ENOMEM; + goto out_mailbox; + } + mgm->qp[members_count++] = + cpu_to_be32(qpn & MGM_QPN_MASK); + mgm->members_count = + cpu_to_be32(members_count | + (prot << 30)); + err = mlx4_WRITE_ENTRY(dev, entry->index, + mailbox); + if (err) + goto out_mailbox; + } + } + } + + /* add the new qpn to list of promisc qps */ + list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); + /* now need to add all the promisc qps to default entry */ + memset(mgm, 0, sizeof *mgm); + members_count = 0; + list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) { + if (members_count == dev->caps.num_qp_per_mgm) { + /* entry is full */ + err = -ENOMEM; + goto out_list; + } + mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); + } + mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); + + err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); + if (err) + goto out_list; + + mlx4_free_cmd_mailbox(dev, mailbox); + mutex_unlock(&priv->mcg_table.mutex); + return 0; + +out_list: + list_del(&pqp->list); +out_mailbox: + mlx4_free_cmd_mailbox(dev, mailbox); +out_alloc: + kfree(pqp); +out_mutex: + mutex_unlock(&priv->mcg_table.mutex); + return err; +} + +static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, + enum mlx4_steer_type steer, u32 qpn) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_steer *s_steer; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm; + struct mlx4_steer_index *entry, *tmp_entry; + struct mlx4_promisc_qp *pqp; + struct mlx4_promisc_qp *dqp; + u32 members_count; + bool found; + bool back_to_list = false; + int i; + int err; + + if (port < 1 || port > dev->caps.num_ports) + return -EINVAL; + + s_steer = &mlx4_priv(dev)->steer[port - 1]; + mutex_lock(&priv->mcg_table.mutex); + + pqp = get_promisc_qp(dev, port, steer, qpn); + if (unlikely(!pqp)) { + mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); + /* nothing to do */ + err = 0; + goto out_mutex; + } + + /*remove from list of promisc qps */ + list_del(&pqp->list); + + /* set the default entry not to include the removed one */ + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = -ENOMEM; + back_to_list = true; + goto out_list; + } + mgm = mailbox->buf; + members_count = 0; + list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) + mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); + mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); + + err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); + if (err) + goto out_mailbox; + + if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) { + /* Remove the QP from all the steering entries */ + list_for_each_entry_safe(entry, tmp_entry, + &s_steer->steer_entries[steer], + list) { + found = false; + list_for_each_entry(dqp, &entry->duplicates, list) { + if (dqp->qpn == qpn) { + found = true; + break; + } + } + if (found) { + /* A duplicate, no need to change the MGM, + * only update the duplicates list + */ + list_del(&dqp->list); + kfree(dqp); + } else { + int loc = -1; + + err = mlx4_READ_ENTRY(dev, + entry->index, + mailbox); + if (err) + goto out_mailbox; + members_count = + be32_to_cpu(mgm->members_count) & + 0xffffff; + if (!members_count) { + mlx4_warn(dev, "QP %06x wasn't found in entry %x mcount=0. deleting entry...\n", + qpn, entry->index); + list_del(&entry->list); + kfree(entry); + continue; + } + + for (i = 0; i < members_count; ++i) + if ((be32_to_cpu(mgm->qp[i]) & + MGM_QPN_MASK) == qpn) { + loc = i; + break; + } + + if (loc < 0) { + mlx4_err(dev, "QP %06x wasn't found in entry %d\n", + qpn, entry->index); + err = -EINVAL; + goto out_mailbox; + } + + /* Copy the last QP in this MGM + * over removed QP + */ + mgm->qp[loc] = mgm->qp[members_count - 1]; + mgm->qp[members_count - 1] = 0; + mgm->members_count = + cpu_to_be32(--members_count | + (MLX4_PROT_ETH << 30)); + + err = mlx4_WRITE_ENTRY(dev, + entry->index, + mailbox); + if (err) + goto out_mailbox; + } + } + } + +out_mailbox: + mlx4_free_cmd_mailbox(dev, mailbox); +out_list: + if (back_to_list) + list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); + else + kfree(pqp); +out_mutex: + mutex_unlock(&priv->mcg_table.mutex); + return err; +} + +/* + * Caller must hold MCG table semaphore. gid and mgm parameters must + * be properly aligned for command interface. + * + * Returns 0 unless a firmware command error occurs. + * + * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 + * and *mgm holds MGM entry. + * + * if GID is found in AMGM, *index = index in AMGM, *prev = index of + * previous entry in hash chain and *mgm holds AMGM entry. + * + * If no AMGM exists for given gid, *index = -1, *prev = index of last + * entry in hash chain and *mgm holds end of hash chain. + */ +static int find_entry(struct mlx4_dev *dev, u8 port, + u8 *gid, enum mlx4_protocol prot, + struct mlx4_cmd_mailbox *mgm_mailbox, + int *prev, int *index) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm = mgm_mailbox->buf; + u8 *mgid; + int err; + u16 hash; + u8 op_mod = (prot == MLX4_PROT_ETH) ? + !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return -ENOMEM; + mgid = mailbox->buf; + + memcpy(mgid, gid, 16); + + err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod); + mlx4_free_cmd_mailbox(dev, mailbox); + if (err) + return err; + + if (0) + mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash); + + *index = hash; + *prev = -1; + + do { + err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox); + if (err) + return err; + + if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { + if (*index != hash) { + mlx4_err(dev, "Found zero MGID in AMGM\n"); + err = -EINVAL; + } + return err; + } + + if (!memcmp(mgm->gid, gid, 16) && + be32_to_cpu(mgm->members_count) >> 30 == prot) + return err; + + *prev = *index; + *index = be32_to_cpu(mgm->next_gid_index) >> 6; + } while (*index); + + *index = -1; + return err; +} + +static const u8 __promisc_mode[] = { + [MLX4_FS_REGULAR] = 0x0, + [MLX4_FS_ALL_DEFAULT] = 0x1, + [MLX4_FS_MC_DEFAULT] = 0x3, + [MLX4_FS_UC_SNIFFER] = 0x4, + [MLX4_FS_MC_SNIFFER] = 0x5, +}; + +int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev, + enum mlx4_net_trans_promisc_mode flow_type) +{ + if (flow_type >= MLX4_FS_MODE_NUM) { + mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type); + return -EINVAL; + } + return __promisc_mode[flow_type]; +} +EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode); + +static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, + struct mlx4_net_trans_rule_hw_ctrl *hw) +{ + u8 flags = 0; + + flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; + flags |= ctrl->exclusive ? (1 << 2) : 0; + flags |= ctrl->allow_loopback ? (1 << 3) : 0; + + hw->flags = flags; + hw->type = __promisc_mode[ctrl->promisc_mode]; + hw->prio = cpu_to_be16(ctrl->priority); + hw->port = ctrl->port; + hw->qpn = cpu_to_be32(ctrl->qpn); +} + +const u16 __sw_id_hw[] = { + [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001, + [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005, + [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, + [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, + [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, + [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006, + [MLX4_NET_TRANS_RULE_ID_VXLAN] = 0xE008 +}; + +int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, + enum mlx4_net_trans_rule_id id) +{ + if (id >= MLX4_NET_TRANS_RULE_NUM) { + mlx4_err(dev, "Invalid network rule id. id = %d\n", id); + return -EINVAL; + } + return __sw_id_hw[id]; +} +EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id); + +static const int __rule_hw_sz[] = { + [MLX4_NET_TRANS_RULE_ID_ETH] = + sizeof(struct mlx4_net_trans_rule_hw_eth), + [MLX4_NET_TRANS_RULE_ID_IB] = + sizeof(struct mlx4_net_trans_rule_hw_ib), + [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, + [MLX4_NET_TRANS_RULE_ID_IPV4] = + sizeof(struct mlx4_net_trans_rule_hw_ipv4), + [MLX4_NET_TRANS_RULE_ID_TCP] = + sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), + [MLX4_NET_TRANS_RULE_ID_UDP] = + sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), + [MLX4_NET_TRANS_RULE_ID_VXLAN] = + sizeof(struct mlx4_net_trans_rule_hw_vxlan) +}; + +int mlx4_hw_rule_sz(struct mlx4_dev *dev, + enum mlx4_net_trans_rule_id id) +{ + if (id >= MLX4_NET_TRANS_RULE_NUM) { + mlx4_err(dev, "Invalid network rule id. id = %d\n", id); + return -EINVAL; + } + + return __rule_hw_sz[id]; +} +EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz); + +static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, + struct _rule_hw *rule_hw) +{ + if (mlx4_hw_rule_sz(dev, spec->id) < 0) + return -EINVAL; + memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id)); + rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); + rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2; + + switch (spec->id) { + case MLX4_NET_TRANS_RULE_ID_ETH: + memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN); + memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk, + ETH_ALEN); + memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN); + memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk, + ETH_ALEN); + if (spec->eth.ether_type_enable) { + rule_hw->eth.ether_type_enable = 1; + rule_hw->eth.ether_type = spec->eth.ether_type; + } + rule_hw->eth.vlan_tag = spec->eth.vlan_id; + rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk; + break; + + case MLX4_NET_TRANS_RULE_ID_IB: + rule_hw->ib.l3_qpn = spec->ib.l3_qpn; + rule_hw->ib.qpn_mask = spec->ib.qpn_msk; + memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); + memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); + break; + + case MLX4_NET_TRANS_RULE_ID_IPV6: + return -EOPNOTSUPP; + + case MLX4_NET_TRANS_RULE_ID_IPV4: + rule_hw->ipv4.src_ip = spec->ipv4.src_ip; + rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk; + rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip; + rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk; + break; + + case MLX4_NET_TRANS_RULE_ID_TCP: + case MLX4_NET_TRANS_RULE_ID_UDP: + rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port; + rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk; + rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port; + rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk; + break; + + case MLX4_NET_TRANS_RULE_ID_VXLAN: + rule_hw->vxlan.vni = + cpu_to_be32(be32_to_cpu(spec->vxlan.vni) << 8); + rule_hw->vxlan.vni_mask = + cpu_to_be32(be32_to_cpu(spec->vxlan.vni_mask) << 8); + break; + + default: + return -EINVAL; + } + + return __rule_hw_sz[spec->id]; +} + +static void mlx4_err_rule(struct mlx4_dev *dev, char *str, + struct mlx4_net_trans_rule *rule) +{ +#define BUF_SIZE 256 + struct mlx4_spec_list *cur; + char buf[BUF_SIZE]; + int len = 0; + + mlx4_err(dev, "%s", str); + len += snprintf(buf + len, BUF_SIZE - len, + "port = %d prio = 0x%x qp = 0x%x ", + rule->port, rule->priority, rule->qpn); + + list_for_each_entry(cur, &rule->list, list) { + switch (cur->id) { + case MLX4_NET_TRANS_RULE_ID_ETH: + len += snprintf(buf + len, BUF_SIZE - len, + "dmac = %pM ", &cur->eth.dst_mac); + if (cur->eth.ether_type) + len += snprintf(buf + len, BUF_SIZE - len, + "ethertype = 0x%x ", + be16_to_cpu(cur->eth.ether_type)); + if (cur->eth.vlan_id) + len += snprintf(buf + len, BUF_SIZE - len, + "vlan-id = %d ", + be16_to_cpu(cur->eth.vlan_id)); + break; + + case MLX4_NET_TRANS_RULE_ID_IPV4: + if (cur->ipv4.src_ip) + len += snprintf(buf + len, BUF_SIZE - len, + "src-ip = %pI4 ", + &cur->ipv4.src_ip); + if (cur->ipv4.dst_ip) + len += snprintf(buf + len, BUF_SIZE - len, + "dst-ip = %pI4 ", + &cur->ipv4.dst_ip); + break; + + case MLX4_NET_TRANS_RULE_ID_TCP: + case MLX4_NET_TRANS_RULE_ID_UDP: + if (cur->tcp_udp.src_port) + len += snprintf(buf + len, BUF_SIZE - len, + "src-port = %d ", + be16_to_cpu(cur->tcp_udp.src_port)); + if (cur->tcp_udp.dst_port) + len += snprintf(buf + len, BUF_SIZE - len, + "dst-port = %d ", + be16_to_cpu(cur->tcp_udp.dst_port)); + break; + + case MLX4_NET_TRANS_RULE_ID_IB: + len += snprintf(buf + len, BUF_SIZE - len, + "dst-gid = %pI6\n", cur->ib.dst_gid); + len += snprintf(buf + len, BUF_SIZE - len, + "dst-gid-mask = %pI6\n", + cur->ib.dst_gid_msk); + break; + + case MLX4_NET_TRANS_RULE_ID_VXLAN: + len += snprintf(buf + len, BUF_SIZE - len, + "VNID = %d ", be32_to_cpu(cur->vxlan.vni)); + break; + case MLX4_NET_TRANS_RULE_ID_IPV6: + break; + + default: + break; + } + } + len += snprintf(buf + len, BUF_SIZE - len, "\n"); + mlx4_err(dev, "%s", buf); + + if (len >= BUF_SIZE) + mlx4_err(dev, "Network rule error message was truncated, print buffer is too small\n"); +} + +int mlx4_flow_attach(struct mlx4_dev *dev, + struct mlx4_net_trans_rule *rule, u64 *reg_id) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_spec_list *cur; + u32 size = 0; + int ret; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + trans_rule_ctrl_to_hw(rule, mailbox->buf); + + size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); + + list_for_each_entry(cur, &rule->list, list) { + ret = parse_trans_rule(dev, cur, mailbox->buf + size); + if (ret < 0) { + mlx4_free_cmd_mailbox(dev, mailbox); + return ret; + } + size += ret; + } + + ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); + if (ret == -ENOMEM) { + mlx4_err_rule(dev, + "mcg table is full. Fail to register network rule\n", + rule); + } else if (ret) { + if (ret == -ENXIO) { + if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) + mlx4_err_rule(dev, + "DMFS is not enabled, " + "failed to register network rule.\n", + rule); + else + mlx4_err_rule(dev, + "Rule exceeds the dmfs_high_rate_mode limitations, " + "failed to register network rule.\n", + rule); + + } else { + mlx4_err_rule(dev, "Fail to register network rule.\n", rule); + } + } + + mlx4_free_cmd_mailbox(dev, mailbox); + + return ret; +} +EXPORT_SYMBOL_GPL(mlx4_flow_attach); + +int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) +{ + int err; + + err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id); + if (err) + mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n", + reg_id); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_flow_detach); + +int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr, + int port, int qpn, u16 prio, u64 *reg_id) +{ + int err; + struct mlx4_spec_list spec_eth_outer = { {NULL} }; + struct mlx4_spec_list spec_vxlan = { {NULL} }; + struct mlx4_spec_list spec_eth_inner = { {NULL} }; + + struct mlx4_net_trans_rule rule = { + .queue_mode = MLX4_NET_TRANS_Q_FIFO, + .exclusive = 0, + .allow_loopback = 1, + .promisc_mode = MLX4_FS_REGULAR, + }; + + __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); + + rule.port = port; + rule.qpn = qpn; + rule.priority = prio; + INIT_LIST_HEAD(&rule.list); + + spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH; + memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN); + memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN); + + spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */ + spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */ + + list_add_tail(&spec_eth_outer.list, &rule.list); + list_add_tail(&spec_vxlan.list, &rule.list); + list_add_tail(&spec_eth_inner.list, &rule.list); + + err = mlx4_flow_attach(dev, &rule, reg_id); + return err; +} +EXPORT_SYMBOL(mlx4_tunnel_steer_add); + +int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, + u32 max_range_qpn) +{ + int err; + u64 in_param; + + in_param = ((u64) min_range_qpn) << 32; + in_param |= ((u64) max_range_qpn) & 0xFFFFFFFF; + + err = mlx4_cmd(dev, in_param, 0, 0, + MLX4_FLOW_STEERING_IB_UC_QP_RANGE, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + + return err; +} +EXPORT_SYMBOL_GPL(mlx4_FLOW_STEERING_IB_UC_QP_RANGE); + +int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + int block_mcast_loopback, enum mlx4_protocol prot, + enum mlx4_steer_type steer) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm; + u32 members_count; + int index, prev; + int link = 0; + int i; + int err; + u8 port = gid[5]; + u8 new_entry = 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + mgm = mailbox->buf; + + mutex_lock(&priv->mcg_table.mutex); + err = find_entry(dev, port, gid, prot, + mailbox, &prev, &index); + if (err) + goto out; + + if (index != -1) { + if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { + new_entry = 1; + memcpy(mgm->gid, gid, 16); + } + } else { + link = 1; + + index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap); + if (index == -1) { + mlx4_err(dev, "No AMGM entries left\n"); + err = -ENOMEM; + goto out; + } + index += dev->caps.num_mgms; + + new_entry = 1; + memset(mgm, 0, sizeof *mgm); + memcpy(mgm->gid, gid, 16); + } + + members_count = be32_to_cpu(mgm->members_count) & 0xffffff; + if (members_count == dev->caps.num_qp_per_mgm) { + mlx4_err(dev, "MGM at index %x is full\n", index); + err = -ENOMEM; + goto out; + } + + for (i = 0; i < members_count; ++i) + if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { + mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); + err = 0; + goto out; + } + + if (block_mcast_loopback) + mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | + (1U << MGM_BLCK_LB_BIT)); + else + mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); + + mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30); + + err = mlx4_WRITE_ENTRY(dev, index, mailbox); + if (err) + goto out; + + if (!link) + goto out; + + err = mlx4_READ_ENTRY(dev, prev, mailbox); + if (err) + goto out; + + mgm->next_gid_index = cpu_to_be32(index << 6); + + err = mlx4_WRITE_ENTRY(dev, prev, mailbox); + if (err) + goto out; + +out: + if (prot == MLX4_PROT_ETH) { + /* manage the steering entry for promisc mode */ + if (new_entry) + new_steering_entry(dev, port, steer, index, qp->qpn); + else + existing_steering_entry(dev, port, steer, + index, qp->qpn); + } + if (err && link && index != -1) { + if (index < dev->caps.num_mgms) + mlx4_warn(dev, "Got AMGM index %d < %d\n", + index, dev->caps.num_mgms); + else + mlx4_bitmap_free(&priv->mcg_table.bitmap, + index - dev->caps.num_mgms, MLX4_USE_RR); + } + mutex_unlock(&priv->mcg_table.mutex); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + enum mlx4_protocol prot, enum mlx4_steer_type steer) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mgm *mgm; + u32 members_count; + int prev, index; + int i, loc = -1; + int err; + u8 port = gid[5]; + bool removed_entry = false; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + mgm = mailbox->buf; + + mutex_lock(&priv->mcg_table.mutex); + + err = find_entry(dev, port, gid, prot, + mailbox, &prev, &index); + if (err) + goto out; + + if (index == -1) { + mlx4_err(dev, "MGID %pI6 not found\n", gid); + err = -EINVAL; + goto out; + } + + /* If this QP is also a promisc QP, it shouldn't be removed only if + * at least one none promisc QP is also attached to this MCG + */ + if (prot == MLX4_PROT_ETH && + check_duplicate_entry(dev, port, steer, index, qp->qpn) && + !promisc_steering_entry(dev, port, steer, index, qp->qpn, NULL)) + goto out; + + members_count = be32_to_cpu(mgm->members_count) & 0xffffff; + for (i = 0; i < members_count; ++i) + if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { + loc = i; + break; + } + + if (loc == -1) { + mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); + err = -EINVAL; + goto out; + } + + /* copy the last QP in this MGM over removed QP */ + mgm->qp[loc] = mgm->qp[members_count - 1]; + mgm->qp[members_count - 1] = 0; + mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30); + + if (prot == MLX4_PROT_ETH) + removed_entry = can_remove_steering_entry(dev, port, steer, + index, qp->qpn); + if (members_count && (prot != MLX4_PROT_ETH || !removed_entry)) { + err = mlx4_WRITE_ENTRY(dev, index, mailbox); + goto out; + } + + /* We are going to delete the entry, members count should be 0 */ + mgm->members_count = cpu_to_be32((u32) prot << 30); + + if (prev == -1) { + /* Remove entry from MGM */ + int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; + if (amgm_index) { + err = mlx4_READ_ENTRY(dev, amgm_index, mailbox); + if (err) + goto out; + } else + memset(mgm->gid, 0, 16); + + err = mlx4_WRITE_ENTRY(dev, index, mailbox); + if (err) + goto out; + + if (amgm_index) { + if (amgm_index < dev->caps.num_mgms) + mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d\n", + index, amgm_index, dev->caps.num_mgms); + else + mlx4_bitmap_free(&priv->mcg_table.bitmap, + amgm_index - dev->caps.num_mgms, MLX4_USE_RR); + } + } else { + /* Remove entry from AMGM */ + int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; + err = mlx4_READ_ENTRY(dev, prev, mailbox); + if (err) + goto out; + + mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); + + err = mlx4_WRITE_ENTRY(dev, prev, mailbox); + if (err) + goto out; + + if (index < dev->caps.num_mgms) + mlx4_warn(dev, "entry %d had next AMGM index %d < %d\n", + prev, index, dev->caps.num_mgms); + else + mlx4_bitmap_free(&priv->mcg_table.bitmap, + index - dev->caps.num_mgms, MLX4_USE_RR); + } + +out: + mutex_unlock(&priv->mcg_table.mutex); + + mlx4_free_cmd_mailbox(dev, mailbox); + if (err && dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) + /* In case device is under an error, return success as a closing command */ + err = 0; + return err; +} + +static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, + u8 gid[16], u8 attach, u8 block_loopback, + enum mlx4_protocol prot) +{ + struct mlx4_cmd_mailbox *mailbox; + int err = 0; + int qpn; + + if (!mlx4_is_mfunc(dev)) + return -EBADF; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memcpy(mailbox->buf, gid, 16); + qpn = qp->qpn; + qpn |= (prot << 28); + if (attach && block_loopback) + qpn |= (1 << 31); + + err = mlx4_cmd(dev, mailbox->dma, qpn, attach, + MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + + mlx4_free_cmd_mailbox(dev, mailbox); + if (err && !attach && + dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) + err = 0; + return err; +} + +int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, + u8 gid[16], u8 port, + int block_mcast_loopback, + enum mlx4_protocol prot, u64 *reg_id) +{ + struct mlx4_spec_list spec = { {NULL} }; + __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); + + struct mlx4_net_trans_rule rule = { + .queue_mode = MLX4_NET_TRANS_Q_FIFO, + .exclusive = 0, + .promisc_mode = MLX4_FS_REGULAR, + .priority = MLX4_DOMAIN_NIC, + }; + + rule.allow_loopback = !block_mcast_loopback; + rule.port = port; + rule.qpn = qp->qpn; + INIT_LIST_HEAD(&rule.list); + + switch (prot) { + case MLX4_PROT_ETH: + spec.id = MLX4_NET_TRANS_RULE_ID_ETH; + memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN); + memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN); + break; + + case MLX4_PROT_IB_IPV6: + spec.id = MLX4_NET_TRANS_RULE_ID_IB; + memcpy(spec.ib.dst_gid, gid, 16); + memset(&spec.ib.dst_gid_msk, 0xff, 16); + break; + default: + return -EINVAL; + } + list_add_tail(&spec.list, &rule.list); + + return mlx4_flow_attach(dev, &rule, reg_id); +} + +int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + u8 port, int block_mcast_loopback, + enum mlx4_protocol prot, u64 *reg_id) +{ + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_A0: + if (prot == MLX4_PROT_ETH) + return 0; + + case MLX4_STEERING_MODE_B0: + if (prot == MLX4_PROT_ETH) + gid[7] |= (MLX4_MC_STEER << 1); + + if (mlx4_is_mfunc(dev)) + return mlx4_QP_ATTACH(dev, qp, gid, 1, + block_mcast_loopback, prot); + return mlx4_qp_attach_common(dev, qp, gid, + block_mcast_loopback, prot, + MLX4_MC_STEER); + + case MLX4_STEERING_MODE_DEVICE_MANAGED: + return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, + block_mcast_loopback, + prot, reg_id); + default: + return -EINVAL; + } +} +EXPORT_SYMBOL_GPL(mlx4_multicast_attach); + +int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + enum mlx4_protocol prot, u64 reg_id) +{ + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_A0: + if (prot == MLX4_PROT_ETH) + return 0; + + case MLX4_STEERING_MODE_B0: + if (prot == MLX4_PROT_ETH) + gid[7] |= (MLX4_MC_STEER << 1); + + if (mlx4_is_mfunc(dev)) + return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); + + return mlx4_qp_detach_common(dev, qp, gid, prot, + MLX4_MC_STEER); + + case MLX4_STEERING_MODE_DEVICE_MANAGED: + return mlx4_flow_detach(dev, reg_id); + + default: + return -EINVAL; + } +} +EXPORT_SYMBOL_GPL(mlx4_multicast_detach); + +int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, + u32 qpn, enum mlx4_net_trans_promisc_mode mode) +{ + struct mlx4_net_trans_rule rule; + u64 *regid_p; + + switch (mode) { + case MLX4_FS_ALL_DEFAULT: + regid_p = &dev->regid_promisc_array[port]; + break; + case MLX4_FS_MC_DEFAULT: + regid_p = &dev->regid_allmulti_array[port]; + break; + default: + return -1; + } + + if (*regid_p != 0) + return -1; + + rule.promisc_mode = mode; + rule.port = port; + rule.qpn = qpn; + INIT_LIST_HEAD(&rule.list); + mlx4_err(dev, "going promisc on %x\n", port); + + return mlx4_flow_attach(dev, &rule, regid_p); +} +EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add); + +int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, + enum mlx4_net_trans_promisc_mode mode) +{ + int ret; + u64 *regid_p; + + switch (mode) { + case MLX4_FS_ALL_DEFAULT: + regid_p = &dev->regid_promisc_array[port]; + break; + case MLX4_FS_MC_DEFAULT: + regid_p = &dev->regid_allmulti_array[port]; + break; + default: + return -1; + } + + if (*regid_p == 0) + return -1; + + ret = mlx4_flow_detach(dev, *regid_p); + if (ret == 0) + *regid_p = 0; + + return ret; +} +EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove); + +int mlx4_unicast_attach(struct mlx4_dev *dev, + struct mlx4_qp *qp, u8 gid[16], + int block_mcast_loopback, enum mlx4_protocol prot) +{ + if (prot == MLX4_PROT_ETH) + gid[7] |= (MLX4_UC_STEER << 1); + + if (mlx4_is_mfunc(dev)) + return mlx4_QP_ATTACH(dev, qp, gid, 1, + block_mcast_loopback, prot); + + return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, + prot, MLX4_UC_STEER); +} +EXPORT_SYMBOL_GPL(mlx4_unicast_attach); + +int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, + u8 gid[16], enum mlx4_protocol prot) +{ + if (prot == MLX4_PROT_ETH) + gid[7] |= (MLX4_UC_STEER << 1); + + if (mlx4_is_mfunc(dev)) + return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); + + return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER); +} +EXPORT_SYMBOL_GPL(mlx4_unicast_detach); + +int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + u32 qpn = (u32) vhcr->in_param & 0xffffffff; + int port = mlx4_slave_convert_port(dev, slave, vhcr->in_param >> 62); + enum mlx4_steer_type steer = vhcr->in_modifier; + + if (port < 0) + return -EINVAL; + + /* Promiscuous unicast is not allowed in mfunc */ + if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER) + return 0; + + if (vhcr->op_modifier) + return add_promisc_qp(dev, port, steer, qpn); + else + return remove_promisc_qp(dev, port, steer, qpn); +} + +static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn, + enum mlx4_steer_type steer, u8 add, u8 port) +{ + return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add, + MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); +} + +int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) +{ + if (mlx4_is_mfunc(dev)) + return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port); + + return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn); +} +EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); + +int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) +{ + if (mlx4_is_mfunc(dev)) + return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port); + + return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn); +} +EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); + +int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) +{ + if (mlx4_is_mfunc(dev)) + return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port); + + return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn); +} +EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); + +int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) +{ + if (mlx4_is_mfunc(dev)) + return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port); + + return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn); +} +EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove); + +int mlx4_init_mcg_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + + /* No need for mcg_table when fw managed the mcg table*/ + if (dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) + return 0; + err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, + dev->caps.num_amgms - 1, 0, 0); + if (err) + return err; + + mutex_init(&priv->mcg_table.mutex); + + return 0; +} + +void mlx4_cleanup_mcg_table(struct mlx4_dev *dev) +{ + if (dev->caps.steering_mode != + MLX4_STEERING_MODE_DEVICE_MANAGED) + mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h new file mode 100644 index 000000000..502d3dd2c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -0,0 +1,1437 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX4_H +#define MLX4_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include "fw_qos.h" + +#define DRV_NAME "mlx4_core" +#define PFX DRV_NAME ": " +#define DRV_VERSION "2.2-1" +#define DRV_RELDATE "Feb, 2014" + +#define MLX4_FS_UDP_UC_EN (1 << 1) +#define MLX4_FS_TCP_UC_EN (1 << 2) +#define MLX4_FS_NUM_OF_L2_ADDR 8 +#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7 +#define MLX4_FS_NUM_MCG (1 << 17) + +#define INIT_HCA_TPT_MW_ENABLE (1 << 7) + +enum { + MLX4_HCR_BASE = 0x80680, + MLX4_HCR_SIZE = 0x0001c, + MLX4_CLR_INT_SIZE = 0x00008, + MLX4_SLAVE_COMM_BASE = 0x0, + MLX4_COMM_PAGESIZE = 0x1000, + MLX4_CLOCK_SIZE = 0x00008, + MLX4_COMM_CHAN_CAPS = 0x8, + MLX4_COMM_CHAN_FLAGS = 0xc +}; + +enum { + MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE = 10, + MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7, + MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12, + MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2), + MLX4_MTT_ENTRY_PER_SEG = 8, +}; + +enum { + MLX4_NUM_PDS = 1 << 15 +}; + +enum { + MLX4_CMPT_TYPE_QP = 0, + MLX4_CMPT_TYPE_SRQ = 1, + MLX4_CMPT_TYPE_CQ = 2, + MLX4_CMPT_TYPE_EQ = 3, + MLX4_CMPT_NUM_TYPE +}; + +enum { + MLX4_CMPT_SHIFT = 24, + MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT +}; + +enum mlx4_mpt_state { + MLX4_MPT_DISABLED = 0, + MLX4_MPT_EN_HW, + MLX4_MPT_EN_SW +}; + +#define MLX4_COMM_TIME 10000 +#define MLX4_COMM_OFFLINE_TIME_OUT 30000 +#define MLX4_COMM_CMD_NA_OP 0x0 + + +enum { + MLX4_COMM_CMD_RESET, + MLX4_COMM_CMD_VHCR0, + MLX4_COMM_CMD_VHCR1, + MLX4_COMM_CMD_VHCR2, + MLX4_COMM_CMD_VHCR_EN, + MLX4_COMM_CMD_VHCR_POST, + MLX4_COMM_CMD_FLR = 254 +}; + +enum { + MLX4_VF_SMI_DISABLED, + MLX4_VF_SMI_ENABLED +}; + +/*The flag indicates that the slave should delay the RESET cmd*/ +#define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb +/*indicates how many retries will be done if we are in the middle of FLR*/ +#define NUM_OF_RESET_RETRIES 10 +#define SLEEP_TIME_IN_RESET (2 * 1000) +enum mlx4_resource { + RES_QP, + RES_CQ, + RES_SRQ, + RES_XRCD, + RES_MPT, + RES_MTT, + RES_MAC, + RES_VLAN, + RES_EQ, + RES_COUNTER, + RES_FS_RULE, + MLX4_NUM_OF_RESOURCE_TYPE +}; + +enum mlx4_alloc_mode { + RES_OP_RESERVE, + RES_OP_RESERVE_AND_MAP, + RES_OP_MAP_ICM, +}; + +enum mlx4_res_tracker_free_type { + RES_TR_FREE_ALL, + RES_TR_FREE_SLAVES_ONLY, + RES_TR_FREE_STRUCTS_ONLY, +}; + +/* + *Virtual HCR structures. + * mlx4_vhcr is the sw representation, in machine endianness + * + * mlx4_vhcr_cmd is the formalized structure, the one that is passed + * to FW to go through communication channel. + * It is big endian, and has the same structure as the physical HCR + * used by command interface + */ +struct mlx4_vhcr { + u64 in_param; + u64 out_param; + u32 in_modifier; + u32 errno; + u16 op; + u16 token; + u8 op_modifier; + u8 e_bit; +}; + +struct mlx4_vhcr_cmd { + __be64 in_param; + __be32 in_modifier; + u32 reserved1; + __be64 out_param; + __be16 token; + u16 reserved; + u8 status; + u8 flags; + __be16 opcode; +}; + +struct mlx4_cmd_info { + u16 opcode; + bool has_inbox; + bool has_outbox; + bool out_is_imm; + bool encode_slave_id; + int (*verify)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox); + int (*wrapper)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +}; + +#ifdef CONFIG_MLX4_DEBUG +extern int mlx4_debug_level; +#else /* CONFIG_MLX4_DEBUG */ +#define mlx4_debug_level (0) +#endif /* CONFIG_MLX4_DEBUG */ + +#define mlx4_dbg(mdev, format, ...) \ +do { \ + if (mlx4_debug_level) \ + dev_printk(KERN_DEBUG, \ + &(mdev)->persist->pdev->dev, format, \ + ##__VA_ARGS__); \ +} while (0) + +#define mlx4_err(mdev, format, ...) \ + dev_err(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__) +#define mlx4_info(mdev, format, ...) \ + dev_info(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__) +#define mlx4_warn(mdev, format, ...) \ + dev_warn(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__) + +extern int mlx4_log_num_mgm_entry_size; +extern int log_mtts_per_seg; +extern int mlx4_internal_err_reset; + +#define MLX4_MAX_NUM_SLAVES (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \ + MLX4_MFUNC_MAX)) +#define ALL_SLAVES 0xff + +struct mlx4_bitmap { + u32 last; + u32 top; + u32 max; + u32 reserved_top; + u32 mask; + u32 avail; + u32 effective_len; + spinlock_t lock; + unsigned long *table; +}; + +struct mlx4_buddy { + unsigned long **bits; + unsigned int *num_free; + u32 max_order; + spinlock_t lock; +}; + +struct mlx4_icm; + +struct mlx4_icm_table { + u64 virt; + int num_icm; + u32 num_obj; + int obj_size; + int lowmem; + int coherent; + struct mutex mutex; + struct mlx4_icm **icm; +}; + +#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) +#define MLX4_MPT_FLAG_FREE (0x3UL << 28) +#define MLX4_MPT_FLAG_MIO (1 << 17) +#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15) +#define MLX4_MPT_FLAG_PHYSICAL (1 << 9) +#define MLX4_MPT_FLAG_REGION (1 << 8) + +#define MLX4_MPT_PD_MASK (0x1FFFFUL) +#define MLX4_MPT_PD_VF_MASK (0xFE0000UL) +#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27) +#define MLX4_MPT_PD_FLAG_RAE (1 << 28) +#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24) + +#define MLX4_MPT_QP_FLAG_BOUND_QP (1 << 7) + +#define MLX4_MPT_STATUS_SW 0xF0 +#define MLX4_MPT_STATUS_HW 0x00 + +#define MLX4_CQE_SIZE_MASK_STRIDE 0x3 +#define MLX4_EQE_SIZE_MASK_STRIDE 0x30 + +/* + * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. + */ +struct mlx4_mpt_entry { + __be32 flags; + __be32 qpn; + __be32 key; + __be32 pd_flags; + __be64 start; + __be64 length; + __be32 lkey; + __be32 win_cnt; + u8 reserved1[3]; + u8 mtt_rep; + __be64 mtt_addr; + __be32 mtt_sz; + __be32 entity_size; + __be32 first_byte_offset; +} __packed; + +/* + * Must be packed because start is 64 bits but only aligned to 32 bits. + */ +struct mlx4_eq_context { + __be32 flags; + u16 reserved1[3]; + __be16 page_offset; + u8 log_eq_size; + u8 reserved2[4]; + u8 eq_period; + u8 reserved3; + u8 eq_max_count; + u8 reserved4[3]; + u8 intr; + u8 log_page_size; + u8 reserved5[2]; + u8 mtt_base_addr_h; + __be32 mtt_base_addr_l; + u32 reserved6[2]; + __be32 consumer_index; + __be32 producer_index; + u32 reserved7[4]; +}; + +struct mlx4_cq_context { + __be32 flags; + u16 reserved1[3]; + __be16 page_offset; + __be32 logsize_usrpage; + __be16 cq_period; + __be16 cq_max_count; + u8 reserved2[3]; + u8 comp_eqn; + u8 log_page_size; + u8 reserved3[2]; + u8 mtt_base_addr_h; + __be32 mtt_base_addr_l; + __be32 last_notified_index; + __be32 solicit_producer_index; + __be32 consumer_index; + __be32 producer_index; + u32 reserved4[2]; + __be64 db_rec_addr; +}; + +struct mlx4_srq_context { + __be32 state_logsize_srqn; + u8 logstride; + u8 reserved1; + __be16 xrcd; + __be32 pg_offset_cqn; + u32 reserved2; + u8 log_page_size; + u8 reserved3[2]; + u8 mtt_base_addr_h; + __be32 mtt_base_addr_l; + __be32 pd; + __be16 limit_watermark; + __be16 wqe_cnt; + u16 reserved4; + __be16 wqe_counter; + u32 reserved5; + __be64 db_rec_addr; +}; + +struct mlx4_eq_tasklet { + struct list_head list; + struct list_head process_list; + struct tasklet_struct task; + /* lock on completion tasklet list */ + spinlock_t lock; +}; + +struct mlx4_eq { + struct mlx4_dev *dev; + void __iomem *doorbell; + int eqn; + u32 cons_index; + u16 irq; + u16 have_irq; + int nent; + struct mlx4_buf_list *page_list; + struct mlx4_mtt mtt; + struct mlx4_eq_tasklet tasklet_ctx; +}; + +struct mlx4_slave_eqe { + u8 type; + u8 port; + u32 param; +}; + +struct mlx4_slave_event_eq_info { + int eqn; + u16 token; +}; + +struct mlx4_profile { + int num_qp; + int rdmarc_per_qp; + int num_srq; + int num_cq; + int num_mcg; + int num_mpt; + unsigned num_mtt; +}; + +struct mlx4_fw { + u64 clr_int_base; + u64 catas_offset; + u64 comm_base; + u64 clock_offset; + struct mlx4_icm *fw_icm; + struct mlx4_icm *aux_icm; + u32 catas_size; + u16 fw_pages; + u8 clr_int_bar; + u8 catas_bar; + u8 comm_bar; + u8 clock_bar; +}; + +struct mlx4_comm { + u32 slave_write; + u32 slave_read; +}; + +enum { + MLX4_MCAST_CONFIG = 0, + MLX4_MCAST_DISABLE = 1, + MLX4_MCAST_ENABLE = 2, +}; + +#define VLAN_FLTR_SIZE 128 + +struct mlx4_vlan_fltr { + __be32 entry[VLAN_FLTR_SIZE]; +}; + +struct mlx4_mcast_entry { + struct list_head list; + u64 addr; +}; + +struct mlx4_promisc_qp { + struct list_head list; + u32 qpn; +}; + +struct mlx4_steer_index { + struct list_head list; + unsigned int index; + struct list_head duplicates; +}; + +#define MLX4_EVENT_TYPES_NUM 64 + +struct mlx4_slave_state { + u8 comm_toggle; + u8 last_cmd; + u8 init_port_mask; + bool active; + bool old_vlan_api; + u8 function; + dma_addr_t vhcr_dma; + u16 mtu[MLX4_MAX_PORTS + 1]; + __be32 ib_cap_mask[MLX4_MAX_PORTS + 1]; + struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES]; + struct list_head mcast_filters[MLX4_MAX_PORTS + 1]; + struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1]; + /* event type to eq number lookup */ + struct mlx4_slave_event_eq_info event_eq[MLX4_EVENT_TYPES_NUM]; + u16 eq_pi; + u16 eq_ci; + spinlock_t lock; + /*initialized via the kzalloc*/ + u8 is_slave_going_down; + u32 cookie; + enum slave_port_state port_state[MLX4_MAX_PORTS + 1]; +}; + +#define MLX4_VGT 4095 +#define NO_INDX (-1) + +struct mlx4_vport_state { + u64 mac; + u16 default_vlan; + u8 default_qos; + u32 tx_rate; + bool spoofchk; + u32 link_state; + u8 qos_vport; + __be64 guid; +}; + +struct mlx4_vf_admin_state { + struct mlx4_vport_state vport[MLX4_MAX_PORTS + 1]; + u8 enable_smi[MLX4_MAX_PORTS + 1]; +}; + +struct mlx4_vport_oper_state { + struct mlx4_vport_state state; + int mac_idx; + int vlan_idx; +}; + +struct mlx4_vf_oper_state { + struct mlx4_vport_oper_state vport[MLX4_MAX_PORTS + 1]; + u8 smi_enabled[MLX4_MAX_PORTS + 1]; +}; + +struct slave_list { + struct mutex mutex; + struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE]; +}; + +struct resource_allocator { + spinlock_t alloc_lock; /* protect quotas */ + union { + int res_reserved; + int res_port_rsvd[MLX4_MAX_PORTS]; + }; + union { + int res_free; + int res_port_free[MLX4_MAX_PORTS]; + }; + int *quota; + int *allocated; + int *guaranteed; +}; + +struct mlx4_resource_tracker { + spinlock_t lock; + /* tree for each resources */ + struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE]; + /* num_of_slave's lists, one per slave */ + struct slave_list *slave_list; + struct resource_allocator res_alloc[MLX4_NUM_OF_RESOURCE_TYPE]; +}; + +#define SLAVE_EVENT_EQ_SIZE 128 +struct mlx4_slave_event_eq { + u32 eqn; + u32 cons; + u32 prod; + spinlock_t event_lock; + struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE]; +}; + +struct mlx4_qos_manager { + int num_of_qos_vfs; + DECLARE_BITMAP(priority_bm, MLX4_NUM_UP); +}; + +struct mlx4_master_qp0_state { + int proxy_qp0_active; + int qp0_active; + int port_active; +}; + +struct mlx4_mfunc_master_ctx { + struct mlx4_slave_state *slave_state; + struct mlx4_vf_admin_state *vf_admin; + struct mlx4_vf_oper_state *vf_oper; + struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; + int init_port_ref[MLX4_MAX_PORTS + 1]; + u16 max_mtu[MLX4_MAX_PORTS + 1]; + int disable_mcast_ref[MLX4_MAX_PORTS + 1]; + struct mlx4_resource_tracker res_tracker; + struct workqueue_struct *comm_wq; + struct work_struct comm_work; + struct work_struct slave_event_work; + struct work_struct slave_flr_event_work; + spinlock_t slave_state_lock; + __be32 comm_arm_bit_vector[4]; + struct mlx4_eqe cmd_eqe; + struct mlx4_slave_event_eq slave_eq; + struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX]; + struct mlx4_qos_manager qos_ctl[MLX4_MAX_PORTS + 1]; +}; + +struct mlx4_mfunc { + struct mlx4_comm __iomem *comm; + struct mlx4_vhcr_cmd *vhcr; + dma_addr_t vhcr_dma; + + struct mlx4_mfunc_master_ctx master; +}; + +#define MGM_QPN_MASK 0x00FFFFFF +#define MGM_BLCK_LB_BIT 30 + +struct mlx4_mgm { + __be32 next_gid_index; + __be32 members_count; + u32 reserved[2]; + u8 gid[16]; + __be32 qp[MLX4_MAX_QP_PER_MGM]; +}; + +struct mlx4_cmd { + struct pci_pool *pool; + void __iomem *hcr; + struct mutex slave_cmd_mutex; + struct semaphore poll_sem; + struct semaphore event_sem; + int max_cmds; + spinlock_t context_lock; + int free_head; + struct mlx4_cmd_context *context; + u16 token_mask; + u8 use_events; + u8 toggle; + u8 comm_toggle; + u8 initialized; +}; + +enum { + MLX4_VF_IMMED_VLAN_FLAG_VLAN = 1 << 0, + MLX4_VF_IMMED_VLAN_FLAG_QOS = 1 << 1, + MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE = 1 << 2, +}; +struct mlx4_vf_immed_vlan_work { + struct work_struct work; + struct mlx4_priv *priv; + int flags; + int slave; + int vlan_ix; + int orig_vlan_ix; + u8 port; + u8 qos; + u8 qos_vport; + u16 vlan_id; + u16 orig_vlan_id; +}; + + +struct mlx4_uar_table { + struct mlx4_bitmap bitmap; +}; + +struct mlx4_mr_table { + struct mlx4_bitmap mpt_bitmap; + struct mlx4_buddy mtt_buddy; + u64 mtt_base; + u64 mpt_base; + struct mlx4_icm_table mtt_table; + struct mlx4_icm_table dmpt_table; +}; + +struct mlx4_cq_table { + struct mlx4_bitmap bitmap; + spinlock_t lock; + struct radix_tree_root tree; + struct mlx4_icm_table table; + struct mlx4_icm_table cmpt_table; +}; + +struct mlx4_eq_table { + struct mlx4_bitmap bitmap; + char *irq_names; + void __iomem *clr_int; + void __iomem **uar_map; + u32 clr_mask; + struct mlx4_eq *eq; + struct mlx4_icm_table table; + struct mlx4_icm_table cmpt_table; + int have_irq; + u8 inta_pin; +}; + +struct mlx4_srq_table { + struct mlx4_bitmap bitmap; + spinlock_t lock; + struct radix_tree_root tree; + struct mlx4_icm_table table; + struct mlx4_icm_table cmpt_table; +}; + +enum mlx4_qp_table_zones { + MLX4_QP_TABLE_ZONE_GENERAL, + MLX4_QP_TABLE_ZONE_RSS, + MLX4_QP_TABLE_ZONE_RAW_ETH, + MLX4_QP_TABLE_ZONE_NUM +}; + +struct mlx4_qp_table { + struct mlx4_bitmap *bitmap_gen; + struct mlx4_zone_allocator *zones; + u32 zones_uids[MLX4_QP_TABLE_ZONE_NUM]; + u32 rdmarc_base; + int rdmarc_shift; + spinlock_t lock; + struct mlx4_icm_table qp_table; + struct mlx4_icm_table auxc_table; + struct mlx4_icm_table altc_table; + struct mlx4_icm_table rdmarc_table; + struct mlx4_icm_table cmpt_table; +}; + +struct mlx4_mcg_table { + struct mutex mutex; + struct mlx4_bitmap bitmap; + struct mlx4_icm_table table; +}; + +struct mlx4_catas_err { + u32 __iomem *map; + struct timer_list timer; + struct list_head list; +}; + +#define MLX4_MAX_MAC_NUM 128 +#define MLX4_MAC_TABLE_SIZE (MLX4_MAX_MAC_NUM << 3) + +struct mlx4_mac_table { + __be64 entries[MLX4_MAX_MAC_NUM]; + int refs[MLX4_MAX_MAC_NUM]; + struct mutex mutex; + int total; + int max; +}; + +#define MLX4_ROCE_GID_ENTRY_SIZE 16 + +struct mlx4_roce_gid_entry { + u8 raw[MLX4_ROCE_GID_ENTRY_SIZE]; +}; + +struct mlx4_roce_gid_table { + struct mlx4_roce_gid_entry roce_gids[MLX4_ROCE_MAX_GIDS]; + struct mutex mutex; +}; + +#define MLX4_MAX_VLAN_NUM 128 +#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2) + +struct mlx4_vlan_table { + __be32 entries[MLX4_MAX_VLAN_NUM]; + int refs[MLX4_MAX_VLAN_NUM]; + struct mutex mutex; + int total; + int max; +}; + +#define SET_PORT_GEN_ALL_VALID 0x7 +#define SET_PORT_PROMISC_SHIFT 31 +#define SET_PORT_MC_PROMISC_SHIFT 30 + +enum { + MCAST_DIRECT_ONLY = 0, + MCAST_DIRECT = 1, + MCAST_DEFAULT = 2 +}; + + +struct mlx4_set_port_general_context { + u16 reserved1; + u8 v_ignore_fcs; + u8 flags; + u8 ignore_fcs; + u8 reserved2; + __be16 mtu; + u8 pptx; + u8 pfctx; + u16 reserved3; + u8 pprx; + u8 pfcrx; + u16 reserved4; +}; + +struct mlx4_set_port_rqp_calc_context { + __be32 base_qpn; + u8 rererved; + u8 n_mac; + u8 n_vlan; + u8 n_prio; + u8 reserved2[3]; + u8 mac_miss; + u8 intra_no_vlan; + u8 no_vlan; + u8 intra_vlan_miss; + u8 vlan_miss; + u8 reserved3[3]; + u8 no_vlan_prio; + __be32 promisc; + __be32 mcast; +}; + +struct mlx4_port_info { + struct mlx4_dev *dev; + int port; + char dev_name[16]; + struct device_attribute port_attr; + enum mlx4_port_type tmp_type; + char dev_mtu_name[16]; + struct device_attribute port_mtu_attr; + struct mlx4_mac_table mac_table; + struct mlx4_vlan_table vlan_table; + struct mlx4_roce_gid_table gid_table; + int base_qpn; +}; + +struct mlx4_sense { + struct mlx4_dev *dev; + u8 do_sense_port[MLX4_MAX_PORTS + 1]; + u8 sense_allowed[MLX4_MAX_PORTS + 1]; + struct delayed_work sense_poll; +}; + +struct mlx4_msix_ctl { + u64 pool_bm; + struct mutex pool_lock; +}; + +struct mlx4_steer { + struct list_head promisc_qps[MLX4_NUM_STEERS]; + struct list_head steer_entries[MLX4_NUM_STEERS]; +}; + +enum { + MLX4_PCI_DEV_IS_VF = 1 << 0, + MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1, +}; + +enum { + MLX4_NO_RR = 0, + MLX4_USE_RR = 1, +}; + +struct mlx4_priv { + struct mlx4_dev dev; + + struct list_head dev_list; + struct list_head ctx_list; + spinlock_t ctx_lock; + + int pci_dev_data; + int removed; + + struct list_head pgdir_list; + struct mutex pgdir_mutex; + + struct mlx4_fw fw; + struct mlx4_cmd cmd; + struct mlx4_mfunc mfunc; + + struct mlx4_bitmap pd_bitmap; + struct mlx4_bitmap xrcd_bitmap; + struct mlx4_uar_table uar_table; + struct mlx4_mr_table mr_table; + struct mlx4_cq_table cq_table; + struct mlx4_eq_table eq_table; + struct mlx4_srq_table srq_table; + struct mlx4_qp_table qp_table; + struct mlx4_mcg_table mcg_table; + struct mlx4_bitmap counters_bitmap; + + struct mlx4_catas_err catas_err; + + void __iomem *clr_base; + + struct mlx4_uar driver_uar; + void __iomem *kar; + struct mlx4_port_info port[MLX4_MAX_PORTS + 1]; + struct mlx4_sense sense; + struct mutex port_mutex; + struct mlx4_msix_ctl msix_ctl; + struct mlx4_steer *steer; + struct list_head bf_list; + struct mutex bf_mutex; + struct io_mapping *bf_mapping; + void __iomem *clock_mapping; + int reserved_mtts; + int fs_hash_mode; + u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; + struct mlx4_port_map v2p; /* cached port mapping configuration */ + struct mutex bond_mutex; /* for bond mode */ + __be64 slave_node_guids[MLX4_MFUNC_MAX]; + + atomic_t opreq_count; + struct work_struct opreq_task; +}; + +static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) +{ + return container_of(dev, struct mlx4_priv, dev); +} + +#define MLX4_SENSE_RANGE (HZ * 3) + +extern struct workqueue_struct *mlx4_wq; + +u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); +void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr); +u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, + int align, u32 skip_mask); +void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt, + int use_rr); +u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap); +int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, + u32 reserved_bot, u32 resetrved_top); +void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); + +int mlx4_reset(struct mlx4_dev *dev); + +int mlx4_alloc_eq_table(struct mlx4_dev *dev); +void mlx4_free_eq_table(struct mlx4_dev *dev); + +int mlx4_init_pd_table(struct mlx4_dev *dev); +int mlx4_init_xrcd_table(struct mlx4_dev *dev); +int mlx4_init_uar_table(struct mlx4_dev *dev); +int mlx4_init_mr_table(struct mlx4_dev *dev); +int mlx4_init_eq_table(struct mlx4_dev *dev); +int mlx4_init_cq_table(struct mlx4_dev *dev); +int mlx4_init_qp_table(struct mlx4_dev *dev); +int mlx4_init_srq_table(struct mlx4_dev *dev); +int mlx4_init_mcg_table(struct mlx4_dev *dev); + +void mlx4_cleanup_pd_table(struct mlx4_dev *dev); +void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev); +void mlx4_cleanup_uar_table(struct mlx4_dev *dev); +void mlx4_cleanup_mr_table(struct mlx4_dev *dev); +void mlx4_cleanup_eq_table(struct mlx4_dev *dev); +void mlx4_cleanup_cq_table(struct mlx4_dev *dev); +void mlx4_cleanup_qp_table(struct mlx4_dev *dev); +void mlx4_cleanup_srq_table(struct mlx4_dev *dev); +void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); +int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp); +void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn); +int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn); +void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn); +int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn); +void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn); +int __mlx4_mpt_reserve(struct mlx4_dev *dev); +void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index); +int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp); +void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index); +u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order); +void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order); + +int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SYNC_TPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, + int *base, u8 flags); +void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); +int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); +void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); +int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + int start_index, int npages, u64 *page_list); +int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); +void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx); +int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn); +void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn); + +void mlx4_start_catas_poll(struct mlx4_dev *dev); +void mlx4_stop_catas_poll(struct mlx4_dev *dev); +int mlx4_catas_init(struct mlx4_dev *dev); +void mlx4_catas_end(struct mlx4_dev *dev); +int mlx4_restart_one(struct pci_dev *pdev); +int mlx4_register_device(struct mlx4_dev *dev); +void mlx4_unregister_device(struct mlx4_dev *dev); +void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, + unsigned long param); + +struct mlx4_dev_cap; +struct mlx4_init_hca_param; + +u64 mlx4_make_profile(struct mlx4_dev *dev, + struct mlx4_profile *request, + struct mlx4_dev_cap *dev_cap, + struct mlx4_init_hca_param *init_hca); +void mlx4_master_comm_channel(struct work_struct *work); +void mlx4_gen_slave_eqe(struct work_struct *work); +void mlx4_master_handle_slave_flr(struct work_struct *work); + +int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_COMM_INT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_2ERR_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_RTS2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); + +int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe); + +enum { + MLX4_CMD_CLEANUP_STRUCT = 1UL << 0, + MLX4_CMD_CLEANUP_POOL = 1UL << 1, + MLX4_CMD_CLEANUP_HCR = 1UL << 2, + MLX4_CMD_CLEANUP_VHCR = 1UL << 3, + MLX4_CMD_CLEANUP_ALL = (MLX4_CMD_CLEANUP_VHCR << 1) - 1 +}; + +int mlx4_cmd_init(struct mlx4_dev *dev); +void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask); +int mlx4_multi_func_init(struct mlx4_dev *dev); +int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev); +void mlx4_multi_func_cleanup(struct mlx4_dev *dev); +void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); +int mlx4_cmd_use_events(struct mlx4_dev *dev); +void mlx4_cmd_use_polling(struct mlx4_dev *dev); + +int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, + u16 op, unsigned long timeout); + +void mlx4_cq_tasklet_cb(unsigned long data); +void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn); +void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type); + +void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type); + +void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); + +void mlx4_enter_error_state(struct mlx4_dev_persistent *persist); + +int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, + enum mlx4_port_type *type); +void mlx4_do_sense_ports(struct mlx4_dev *dev, + enum mlx4_port_type *stype, + enum mlx4_port_type *defaults); +void mlx4_start_sense(struct mlx4_dev *dev); +void mlx4_stop_sense(struct mlx4_dev *dev); +void mlx4_sense_init(struct mlx4_dev *dev); +int mlx4_check_port_params(struct mlx4_dev *dev, + enum mlx4_port_type *port_type); +int mlx4_change_port_types(struct mlx4_dev *dev, + enum mlx4_port_type *port_types); + +void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); +void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); +void mlx4_init_roce_gid_table(struct mlx4_dev *dev, + struct mlx4_roce_gid_table *table); +void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); +int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); + +int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz); +/* resource tracker functions*/ +int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, + enum mlx4_resource resource_type, + u64 resource_id, int *slave); +void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id); +void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave); +int mlx4_init_resource_tracker(struct mlx4_dev *dev); + +void mlx4_free_resource_tracker(struct mlx4_dev *dev, + enum mlx4_res_tracker_free_type type); + +int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); + +int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port, + int *gid_tbl_len, int *pkey_tbl_len); + +int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); + +int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); + +int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + enum mlx4_protocol prot, enum mlx4_steer_type steer); +int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], + int block_mcast_loopback, enum mlx4_protocol prot, + enum mlx4_steer_type steer); +int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, + u8 gid[16], u8 port, + int block_mcast_loopback, + enum mlx4_protocol prot, u64 *reg_id); +int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function, + int port, void *buf); +int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod, + struct mlx4_cmd_mailbox *outbox); +int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_PKEY_TABLE_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); + +int mlx4_get_mgm_entry_size(struct mlx4_dev *dev); +int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); + +static inline void set_param_l(u64 *arg, u32 val) +{ + *arg = (*arg & 0xffffffff00000000ULL) | (u64) val; +} + +static inline void set_param_h(u64 *arg, u32 val) +{ + *arg = (*arg & 0xffffffff) | ((u64) val << 32); +} + +static inline u32 get_param_l(u64 *arg) +{ + return (u32) (*arg & 0xffffffff); +} + +static inline u32 get_param_h(u64 *arg) +{ + return (u32)(*arg >> 32); +} + +static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev) +{ + return &mlx4_priv(dev)->mfunc.master.res_tracker.lock; +} + +#define NOT_MASKED_PD_BITS 17 + +void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work); + +void mlx4_init_quotas(struct mlx4_dev *dev); + +int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port); +/* Returns the VF index of slave */ +int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); +int mlx4_config_mad_demux(struct mlx4_dev *dev); +int mlx4_do_bond(struct mlx4_dev *dev, bool enable); + +enum mlx4_zone_flags { + MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0, + MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO = 1UL << 1, + MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO = 1UL << 2, + MLX4_ZONE_USE_RR = 1UL << 3, +}; + +enum mlx4_zone_alloc_flags { + /* No two objects could overlap between zones. UID + * could be left unused. If this flag is given and + * two overlapped zones are used, an object will be free'd + * from the smallest possible matching zone. + */ + MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP = 1UL << 0, +}; + +struct mlx4_zone_allocator; + +/* Create a new zone allocator */ +struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags); + +/* Attach a mlx4_bitmap of priority to the zone allocator + * . Allocating an object from this zone adds an offset . + * Similarly, when searching for an object to free, this offset it taken into + * account. The use_rr mlx4_ib parameter for allocating objects from this + * is given through the MLX4_ZONE_USE_RR flag in . + * When an allocation fails, tries to allocate from other zones + * according to the policy set by . is the unique identifier + * received to this zone. + */ +int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc, + struct mlx4_bitmap *bitmap, + u32 flags, + int priority, + int offset, + u32 *puid); + +/* Remove bitmap indicated by from */ +int mlx4_zone_remove_one(struct mlx4_zone_allocator *zone_alloc, u32 uid); + +/* Delete the zone allocator objects with align and skip_mask + * from the mlx4_bitmap whose uid is . The bitmap which we actually + * allocated from is returned in . If the allocation fails, a negative + * number is returned. Otherwise, the offset of the first object is returned. + */ +u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count, + int align, u32 skip_mask, u32 *puid); + +/* Free objects, start from of the uid from zone_allocator + * . + */ +u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, + u32 uid, u32 obj, u32 count); + +/* If was allocated with MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP, instead of + * specifying the uid when freeing an object, zone allocator could figure it by + * itself. Other parameters are similar to mlx4_zone_free. + */ +u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count); + +/* Returns a pointer to mlx4_bitmap that was attached to with */ +struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid); + +#endif /* MLX4_H */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h new file mode 100644 index 000000000..909fcf803 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -0,0 +1,887 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _MLX4_EN_H_ +#define _MLX4_EN_H_ + +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_MLX4_EN_DCB +#include +#endif +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "en_port.h" +#include "mlx4_stats.h" + +#define DRV_NAME "mlx4_en" +#define DRV_VERSION "2.2-1" +#define DRV_RELDATE "Feb 2014" + +#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) + +/* + * Device constants + */ + + +#define MLX4_EN_PAGE_SHIFT 12 +#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) +#define DEF_RX_RINGS 16 +#define MAX_RX_RINGS 128 +#define MIN_RX_RINGS 4 +#define TXBB_SIZE 64 +#define HEADROOM (2048 / TXBB_SIZE + 1) +#define STAMP_STRIDE 64 +#define STAMP_DWORDS (STAMP_STRIDE / 4) +#define STAMP_SHIFT 31 +#define STAMP_VAL 0x7fffffff +#define STATS_DELAY (HZ / 4) +#define SERVICE_TASK_DELAY (HZ / 4) +#define MAX_NUM_OF_FS_RULES 256 + +#define MLX4_EN_FILTER_HASH_SHIFT 4 +#define MLX4_EN_FILTER_EXPIRY_QUOTA 60 + +/* Typical TSO descriptor with 16 gather entries is 352 bytes... */ +#define MAX_DESC_SIZE 512 +#define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE) + +/* + * OS related constants and tunables + */ + +#define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1 + +#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ) + +/* Use the maximum between 16384 and a single page */ +#define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384) + +#define MLX4_EN_ALLOC_PREFER_ORDER PAGE_ALLOC_COSTLY_ORDER + +/* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU + * and 4K allocations) */ +enum { + FRAG_SZ0 = 1536 - NET_IP_ALIGN, + FRAG_SZ1 = 4096, + FRAG_SZ2 = 4096, + FRAG_SZ3 = MLX4_EN_ALLOC_SIZE +}; +#define MLX4_EN_MAX_RX_FRAGS 4 + +/* Maximum ring sizes */ +#define MLX4_EN_MAX_TX_SIZE 8192 +#define MLX4_EN_MAX_RX_SIZE 8192 + +/* Minimum ring size for our page-allocation scheme to work */ +#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES) +#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE) + +#define MLX4_EN_SMALL_PKT_SIZE 64 +#define MLX4_EN_MIN_TX_RING_P_UP 1 +#define MLX4_EN_MAX_TX_RING_P_UP 32 +#define MLX4_EN_NUM_UP 8 +#define MLX4_EN_DEF_TX_RING_SIZE 512 +#define MLX4_EN_DEF_RX_RING_SIZE 1024 +#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \ + MLX4_EN_NUM_UP) + +#define MLX4_EN_DEFAULT_TX_WORK 256 + +/* Target number of packets to coalesce with interrupt moderation */ +#define MLX4_EN_RX_COAL_TARGET 44 +#define MLX4_EN_RX_COAL_TIME 0x10 + +#define MLX4_EN_TX_COAL_PKTS 16 +#define MLX4_EN_TX_COAL_TIME 0x10 + +#define MLX4_EN_RX_RATE_LOW 400000 +#define MLX4_EN_RX_COAL_TIME_LOW 0 +#define MLX4_EN_RX_RATE_HIGH 450000 +#define MLX4_EN_RX_COAL_TIME_HIGH 128 +#define MLX4_EN_RX_SIZE_THRESH 1024 +#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH) +#define MLX4_EN_SAMPLE_INTERVAL 0 +#define MLX4_EN_AVG_PKT_SMALL 256 + +#define MLX4_EN_AUTO_CONF 0xffff + +#define MLX4_EN_DEF_RX_PAUSE 1 +#define MLX4_EN_DEF_TX_PAUSE 1 + +/* Interval between successive polls in the Tx routine when polling is used + instead of interrupts (in per-core Tx rings) - should be power of 2 */ +#define MLX4_EN_TX_POLL_MODER 16 +#define MLX4_EN_TX_POLL_TIMEOUT (HZ / 4) + +#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN) +#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN) +#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN) + +#define MLX4_EN_MIN_MTU 46 +#define ETH_BCAST 0xffffffffffffULL + +#define MLX4_EN_LOOPBACK_RETRIES 5 +#define MLX4_EN_LOOPBACK_TIMEOUT 100 + +#ifdef MLX4_EN_PERF_STAT +/* Number of samples to 'average' */ +#define AVG_SIZE 128 +#define AVG_FACTOR 1024 + +#define INC_PERF_COUNTER(cnt) (++(cnt)) +#define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add)) +#define AVG_PERF_COUNTER(cnt, sample) \ + ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE) +#define GET_PERF_COUNTER(cnt) (cnt) +#define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR) + +#else + +#define INC_PERF_COUNTER(cnt) do {} while (0) +#define ADD_PERF_COUNTER(cnt, add) do {} while (0) +#define AVG_PERF_COUNTER(cnt, sample) do {} while (0) +#define GET_PERF_COUNTER(cnt) (0) +#define GET_AVG_PERF_COUNTER(cnt) (0) +#endif /* MLX4_EN_PERF_STAT */ + +/* Constants for TX flow */ +enum { + MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ + MAX_BF = 256, + MIN_PKT_LEN = 17, +}; + +/* + * Configurables + */ + +enum cq_type { + RX = 0, + TX = 1, +}; + + +/* + * Useful macros + */ +#define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x)) +#define XNOR(x, y) (!(x) == !(y)) + + +struct mlx4_en_tx_info { + struct sk_buff *skb; + dma_addr_t map0_dma; + u32 map0_byte_count; + u32 nr_txbb; + u32 nr_bytes; + u8 linear; + u8 data_offset; + u8 inl; + u8 ts_requested; + u8 nr_maps; +} ____cacheline_aligned_in_smp; + + +#define MLX4_EN_BIT_DESC_OWN 0x80000000 +#define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg) +#define MLX4_EN_MEMTYPE_PAD 0x100 +#define DS_SIZE sizeof(struct mlx4_wqe_data_seg) + + +struct mlx4_en_tx_desc { + struct mlx4_wqe_ctrl_seg ctrl; + union { + struct mlx4_wqe_data_seg data; /* at least one data segment */ + struct mlx4_wqe_lso_seg lso; + struct mlx4_wqe_inline_seg inl; + }; +}; + +#define MLX4_EN_USE_SRQ 0x01000000 + +#define MLX4_EN_CX3_LOW_ID 0x1000 +#define MLX4_EN_CX3_HIGH_ID 0x1005 + +struct mlx4_en_rx_alloc { + struct page *page; + dma_addr_t dma; + u32 page_offset; + u32 page_size; +}; + +struct mlx4_en_tx_ring { + /* cache line used and dirtied in tx completion + * (mlx4_en_free_tx_buf()) + */ + u32 last_nr_txbb; + u32 cons; + unsigned long wake_queue; + + /* cache line used and dirtied in mlx4_en_xmit() */ + u32 prod ____cacheline_aligned_in_smp; + unsigned long bytes; + unsigned long packets; + unsigned long tx_csum; + unsigned long tso_packets; + unsigned long xmit_more; + struct mlx4_bf bf; + unsigned long queue_stopped; + + /* Following part should be mostly read */ + cpumask_t affinity_mask; + struct mlx4_qp qp; + struct mlx4_hwq_resources wqres; + u32 size; /* number of TXBBs */ + u32 size_mask; + u16 stride; + u32 full_size; + u16 cqn; /* index of port CQ associated with this ring */ + u32 buf_size; + __be32 doorbell_qpn; + __be32 mr_key; + void *buf; + struct mlx4_en_tx_info *tx_info; + u8 *bounce_buf; + struct mlx4_qp_context context; + int qpn; + enum mlx4_qp_state qp_state; + u8 queue_index; + bool bf_enabled; + bool bf_alloced; + struct netdev_queue *tx_queue; + int hwtstamp_tx_type; +} ____cacheline_aligned_in_smp; + +struct mlx4_en_rx_desc { + /* actual number of entries depends on rx ring stride */ + struct mlx4_wqe_data_seg data[0]; +}; + +struct mlx4_en_rx_ring { + struct mlx4_hwq_resources wqres; + struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; + u32 size ; /* number of Rx descs*/ + u32 actual_size; + u32 size_mask; + u16 stride; + u16 log_stride; + u16 cqn; /* index of port CQ associated with this ring */ + u32 prod; + u32 cons; + u32 buf_size; + u8 fcs_del; + void *buf; + void *rx_info; + unsigned long bytes; + unsigned long packets; +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned long yields; + unsigned long misses; + unsigned long cleaned; +#endif + unsigned long csum_ok; + unsigned long csum_none; + unsigned long csum_complete; + int hwtstamp_rx_filter; + cpumask_var_t affinity_mask; +}; + +struct mlx4_en_cq { + struct mlx4_cq mcq; + struct mlx4_hwq_resources wqres; + int ring; + struct net_device *dev; + struct napi_struct napi; + int size; + int buf_size; + unsigned vector; + enum cq_type is_tx; + u16 moder_time; + u16 moder_cnt; + struct mlx4_cqe *buf; +#define MLX4_EN_OPCODE_ERROR 0x1e + +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int state; +#define MLX4_EN_CQ_STATE_IDLE 0 +#define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */ +#define MLX4_EN_CQ_STATE_POLL 2 /* poll owns this CQ */ +#define MLX4_CQ_LOCKED (MLX4_EN_CQ_STATE_NAPI | MLX4_EN_CQ_STATE_POLL) +#define MLX4_EN_CQ_STATE_NAPI_YIELD 4 /* NAPI yielded this CQ */ +#define MLX4_EN_CQ_STATE_POLL_YIELD 8 /* poll yielded this CQ */ +#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD) +#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD) + spinlock_t poll_lock; /* protects from LLS/napi conflicts */ +#endif /* CONFIG_NET_RX_BUSY_POLL */ + struct irq_desc *irq_desc; +}; + +struct mlx4_en_port_profile { + u32 flags; + u32 tx_ring_num; + u32 rx_ring_num; + u32 tx_ring_size; + u32 rx_ring_size; + u8 rx_pause; + u8 rx_ppp; + u8 tx_pause; + u8 tx_ppp; + int rss_rings; + int inline_thold; +}; + +struct mlx4_en_profile { + int udp_rss; + u8 rss_mask; + u32 active_ports; + u32 small_pkt_int; + u8 no_reset; + u8 num_tx_rings_p_up; + struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1]; +}; + +struct mlx4_en_dev { + struct mlx4_dev *dev; + struct pci_dev *pdev; + struct mutex state_lock; + struct net_device *pndev[MLX4_MAX_PORTS + 1]; + struct net_device *upper[MLX4_MAX_PORTS + 1]; + u32 port_cnt; + bool device_up; + struct mlx4_en_profile profile; + u32 LSO_support; + struct workqueue_struct *workqueue; + struct device *dma_device; + void __iomem *uar_map; + struct mlx4_uar priv_uar; + struct mlx4_mr mr; + u32 priv_pdn; + spinlock_t uar_lock; + u8 mac_removed[MLX4_MAX_PORTS + 1]; + rwlock_t clock_lock; + u32 nominal_c_mult; + struct cyclecounter cycles; + struct timecounter clock; + unsigned long last_overflow_check; + unsigned long overflow_period; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct notifier_block nb; +}; + + +struct mlx4_en_rss_map { + int base_qpn; + struct mlx4_qp qps[MAX_RX_RINGS]; + enum mlx4_qp_state state[MAX_RX_RINGS]; + struct mlx4_qp indir_qp; + enum mlx4_qp_state indir_state; +}; + +enum mlx4_en_port_flag { + MLX4_EN_PORT_ANC = 1<<0, /* Auto-negotiation complete */ + MLX4_EN_PORT_ANE = 1<<1, /* Auto-negotiation enabled */ +}; + +struct mlx4_en_port_state { + int link_state; + int link_speed; + int transceiver; + u32 flags; +}; + +enum mlx4_en_mclist_act { + MCLIST_NONE, + MCLIST_REM, + MCLIST_ADD, +}; + +struct mlx4_en_mc_list { + struct list_head list; + enum mlx4_en_mclist_act action; + u8 addr[ETH_ALEN]; + u64 reg_id; + u64 tunnel_reg_id; +}; + +struct mlx4_en_frag_info { + u16 frag_size; + u16 frag_prefix_size; + u16 frag_stride; +}; + +#ifdef CONFIG_MLX4_EN_DCB +/* Minimal TC BW - setting to 0 will block traffic */ +#define MLX4_EN_BW_MIN 1 +#define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */ + +#define MLX4_EN_TC_ETS 7 + +#endif + +struct ethtool_flow_id { + struct list_head list; + struct ethtool_rx_flow_spec flow_spec; + u64 id; +}; + +enum { + MLX4_EN_FLAG_PROMISC = (1 << 0), + MLX4_EN_FLAG_MC_PROMISC = (1 << 1), + /* whether we need to enable hardware loopback by putting dmac + * in Tx WQE + */ + MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2), + /* whether we need to drop packets that hardware loopback-ed */ + MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3), + MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4), + MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5), +}; + +#define PORT_BEACON_MAX_LIMIT (65535) +#define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE) +#define MLX4_EN_MAC_HASH_IDX 5 + +struct mlx4_en_stats_bitmap { + DECLARE_BITMAP(bitmap, NUM_ALL_STATS); + struct mutex mutex; /* for mutual access to stats bitmap */ +}; + +struct mlx4_en_priv { + struct mlx4_en_dev *mdev; + struct mlx4_en_port_profile *prof; + struct net_device *dev; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct net_device_stats stats; + struct net_device_stats ret_stats; + struct mlx4_en_port_state port_state; + spinlock_t stats_lock; + struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES]; + /* To allow rules removal while port is going down */ + struct list_head ethtool_list; + + unsigned long last_moder_packets[MAX_RX_RINGS]; + unsigned long last_moder_tx_packets; + unsigned long last_moder_bytes[MAX_RX_RINGS]; + unsigned long last_moder_jiffies; + int last_moder_time[MAX_RX_RINGS]; + u16 rx_usecs; + u16 rx_frames; + u16 tx_usecs; + u16 tx_frames; + u32 pkt_rate_low; + u16 rx_usecs_low; + u32 pkt_rate_high; + u16 rx_usecs_high; + u16 sample_interval; + u16 adaptive_rx_coal; + u32 msg_enable; + u32 loopback_ok; + u32 validate_loopback; + + struct mlx4_hwq_resources res; + int link_state; + int last_link_state; + bool port_up; + int port; + int registered; + int allocated; + int stride; + unsigned char current_mac[ETH_ALEN + 2]; + int mac_index; + unsigned max_mtu; + int base_qpn; + int cqe_factor; + int cqe_size; + + struct mlx4_en_rss_map rss_map; + __be32 ctrl_flags; + u32 flags; + u8 num_tx_rings_p_up; + u32 tx_work_limit; + u32 tx_ring_num; + u32 rx_ring_num; + u32 rx_skb_size; + struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS]; + u16 num_frags; + u16 log_rx_info; + + struct mlx4_en_tx_ring **tx_ring; + struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS]; + struct mlx4_en_cq **tx_cq; + struct mlx4_en_cq *rx_cq[MAX_RX_RINGS]; + struct mlx4_qp drop_qp; + struct work_struct rx_mode_task; + struct work_struct watchdog_task; + struct work_struct linkstate_task; + struct delayed_work stats_task; + struct delayed_work service_task; +#ifdef CONFIG_MLX4_EN_VXLAN + struct work_struct vxlan_add_task; + struct work_struct vxlan_del_task; +#endif + struct mlx4_en_perf_stats pstats; + struct mlx4_en_pkt_stats pkstats; + struct mlx4_en_flow_stats_rx rx_priority_flowstats[MLX4_NUM_PRIORITIES]; + struct mlx4_en_flow_stats_tx tx_priority_flowstats[MLX4_NUM_PRIORITIES]; + struct mlx4_en_flow_stats_rx rx_flowstats; + struct mlx4_en_flow_stats_tx tx_flowstats; + struct mlx4_en_port_stats port_stats; + struct mlx4_en_stats_bitmap stats_bitmap; + struct list_head mc_list; + struct list_head curr_list; + u64 broadcast_id; + struct mlx4_en_stat_out_mbox hw_stats; + int vids[128]; + bool wol; + struct device *ddev; + struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE]; + struct hwtstamp_config hwtstamp_config; + +#ifdef CONFIG_MLX4_EN_DCB + struct ieee_ets ets; + u16 maxrate[IEEE_8021QAZ_MAX_TCS]; + enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS]; +#endif +#ifdef CONFIG_RFS_ACCEL + spinlock_t filters_lock; + int last_filter_id; + struct list_head filters; + struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT]; +#endif + u64 tunnel_reg_id; + __be16 vxlan_port; + + u32 pflags; + u8 rss_key[MLX4_EN_RSS_KEY_SIZE]; + u8 rss_hash_fn; +}; + +enum mlx4_en_wol { + MLX4_EN_WOL_MAGIC = (1ULL << 61), + MLX4_EN_WOL_ENABLED = (1ULL << 62), +}; + +struct mlx4_mac_entry { + struct hlist_node hlist; + unsigned char mac[ETH_ALEN + 2]; + u64 reg_id; + struct rcu_head rcu; +}; + +static inline struct mlx4_cqe *mlx4_en_get_cqe(void *buf, int idx, int cqe_sz) +{ + return buf + idx * cqe_sz; +} + +#ifdef CONFIG_NET_RX_BUSY_POLL +static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) +{ + spin_lock_init(&cq->poll_lock); + cq->state = MLX4_EN_CQ_STATE_IDLE; +} + +/* called from the device poll rutine to get ownership of a cq */ +static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq) +{ + int rc = true; + spin_lock(&cq->poll_lock); + if (cq->state & MLX4_CQ_LOCKED) { + WARN_ON(cq->state & MLX4_EN_CQ_STATE_NAPI); + cq->state |= MLX4_EN_CQ_STATE_NAPI_YIELD; + rc = false; + } else + /* we don't care if someone yielded */ + cq->state = MLX4_EN_CQ_STATE_NAPI; + spin_unlock(&cq->poll_lock); + return rc; +} + +/* returns true is someone tried to get the cq while napi had it */ +static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq) +{ + int rc = false; + spin_lock(&cq->poll_lock); + WARN_ON(cq->state & (MLX4_EN_CQ_STATE_POLL | + MLX4_EN_CQ_STATE_NAPI_YIELD)); + + if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD) + rc = true; + cq->state = MLX4_EN_CQ_STATE_IDLE; + spin_unlock(&cq->poll_lock); + return rc; +} + +/* called from mlx4_en_low_latency_poll() */ +static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq) +{ + int rc = true; + spin_lock_bh(&cq->poll_lock); + if ((cq->state & MLX4_CQ_LOCKED)) { + struct net_device *dev = cq->dev; + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; + + cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD; + rc = false; + rx_ring->yields++; + } else + /* preserve yield marks */ + cq->state |= MLX4_EN_CQ_STATE_POLL; + spin_unlock_bh(&cq->poll_lock); + return rc; +} + +/* returns true if someone tried to get the cq while it was locked */ +static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq) +{ + int rc = false; + spin_lock_bh(&cq->poll_lock); + WARN_ON(cq->state & (MLX4_EN_CQ_STATE_NAPI)); + + if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD) + rc = true; + cq->state = MLX4_EN_CQ_STATE_IDLE; + spin_unlock_bh(&cq->poll_lock); + return rc; +} + +/* true if a socket is polling, even if it did not get the lock */ +static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq) +{ + WARN_ON(!(cq->state & MLX4_CQ_LOCKED)); + return cq->state & CQ_USER_PEND; +} +#else +static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) +{ +} + +static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq) +{ + return true; +} + +static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq) +{ + return false; +} + +static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq) +{ + return false; +} + +static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq) +{ + return false; +} + +static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq) +{ + return false; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63) + +void mlx4_en_update_loopback_state(struct net_device *dev, + netdev_features_t features); + +void mlx4_en_destroy_netdev(struct net_device *dev); +int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, + struct mlx4_en_port_profile *prof); + +int mlx4_en_start_port(struct net_device *dev); +void mlx4_en_stop_port(struct net_device *dev, int detach); + +void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, + struct mlx4_en_stats_bitmap *stats_bitmap, + u8 rx_ppp, u8 rx_pause, + u8 tx_ppp, u8 tx_pause); + +void mlx4_en_free_resources(struct mlx4_en_priv *priv); +int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); + +int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq, + int entries, int ring, enum cq_type mode, int node); +void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq); +int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, + int cq_idx); +void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); +int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); +int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); + +void mlx4_en_tx_irq(struct mlx4_cq *mcq); +u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback); +netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); + +int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring **pring, + u32 size, u16 stride, + int node, int queue_index); +void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring **pring); +int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int cq, int user_prio); +void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring); +void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev); +void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv); +int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring **pring, + u32 size, u16 stride, int node); +void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring **pring, + u32 size, u16 stride); +int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv); +void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring); +int mlx4_en_process_rx_cq(struct net_device *dev, + struct mlx4_en_cq *cq, + int budget); +int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget); +int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget); +void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, + int is_tx, int rss, int qpn, int cqn, int user_prio, + struct mlx4_qp_context *context); +void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event); +int mlx4_en_map_buffer(struct mlx4_buf *buf); +void mlx4_en_unmap_buffer(struct mlx4_buf *buf); + +void mlx4_en_calc_rx_buf(struct net_device *dev); +int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv); +void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv); +int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv); +void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv); +int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring); +void mlx4_en_rx_irq(struct mlx4_cq *mcq); + +int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); +int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv); + +int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset); +int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port); + +#ifdef CONFIG_MLX4_EN_DCB +extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops; +extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops; +#endif + +int mlx4_en_setup_tc(struct net_device *dev, u8 up); + +#ifdef CONFIG_RFS_ACCEL +void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv); +#endif + +#define MLX4_EN_NUM_SELF_TEST 5 +void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf); +void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev); + +#define DEV_FEATURE_CHANGED(dev, new_features, feature) \ + ((dev->features & feature) ^ (new_features & feature)) + +int mlx4_en_reset_config(struct net_device *dev, + struct hwtstamp_config ts_config, + netdev_features_t new_features); +void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev, + struct mlx4_en_stats_bitmap *stats_bitmap, + u8 rx_ppp, u8 rx_pause, + u8 tx_ppp, u8 tx_pause); +int mlx4_en_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr); + +/* + * Functions for time stamping + */ +u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe); +void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev, + struct skb_shared_hwtstamps *hwts, + u64 timestamp); +void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev); +void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev); + +/* Globals + */ +extern const struct ethtool_ops mlx4_en_ethtool_ops; + + + +/* + * printk / logging functions + */ + +__printf(3, 4) +void en_print(const char *level, const struct mlx4_en_priv *priv, + const char *format, ...); + +#define en_dbg(mlevel, priv, format, ...) \ +do { \ + if (NETIF_MSG_##mlevel & (priv)->msg_enable) \ + en_print(KERN_DEBUG, priv, format, ##__VA_ARGS__); \ +} while (0) +#define en_warn(priv, format, ...) \ + en_print(KERN_WARNING, priv, format, ##__VA_ARGS__) +#define en_err(priv, format, ...) \ + en_print(KERN_ERR, priv, format, ##__VA_ARGS__) +#define en_info(priv, format, ...) \ + en_print(KERN_INFO, priv, format, ##__VA_ARGS__) + +#define mlx4_err(mdev, format, ...) \ + pr_err(DRV_NAME " %s: " format, \ + dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__) +#define mlx4_info(mdev, format, ...) \ + pr_info(DRV_NAME " %s: " format, \ + dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__) +#define mlx4_warn(mdev, format, ...) \ + pr_warn(DRV_NAME " %s: " format, \ + dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__) + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h new file mode 100644 index 000000000..00555832a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h @@ -0,0 +1,107 @@ +#ifndef _MLX4_STATS_ +#define _MLX4_STATS_ + +#ifdef MLX4_EN_PERF_STAT +#define NUM_PERF_STATS NUM_PERF_COUNTERS +#else +#define NUM_PERF_STATS 0 +#endif + +#define NUM_PRIORITIES 9 +#define NUM_PRIORITY_STATS 2 + +struct mlx4_en_pkt_stats { + unsigned long rx_multicast_packets; + unsigned long rx_broadcast_packets; + unsigned long rx_jabbers; + unsigned long rx_in_range_length_error; + unsigned long rx_out_range_length_error; + unsigned long tx_multicast_packets; + unsigned long tx_broadcast_packets; + unsigned long rx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS]; + unsigned long tx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS]; +#define NUM_PKT_STATS 43 +}; + +struct mlx4_en_port_stats { + unsigned long tso_packets; + unsigned long xmit_more; + unsigned long queue_stopped; + unsigned long wake_queue; + unsigned long tx_timeout; + unsigned long rx_alloc_failed; + unsigned long rx_chksum_good; + unsigned long rx_chksum_none; + unsigned long rx_chksum_complete; + unsigned long tx_chksum_offload; +#define NUM_PORT_STATS 10 +}; + +struct mlx4_en_perf_stats { + u32 tx_poll; + u64 tx_pktsz_avg; + u32 inflight_avg; + u16 tx_coal_avg; + u16 rx_coal_avg; + u32 napi_quota; +#define NUM_PERF_COUNTERS 6 +}; + +#define NUM_MAIN_STATS 21 + +#define MLX4_NUM_PRIORITIES 8 + +struct mlx4_en_flow_stats_rx { + u64 rx_pause; + u64 rx_pause_duration; + u64 rx_pause_transition; +#define NUM_FLOW_STATS_RX 3 +#define NUM_FLOW_PRIORITY_STATS_RX (NUM_FLOW_STATS_RX * \ + MLX4_NUM_PRIORITIES) +}; + +struct mlx4_en_flow_stats_tx { + u64 tx_pause; + u64 tx_pause_duration; + u64 tx_pause_transition; +#define NUM_FLOW_STATS_TX 3 +#define NUM_FLOW_PRIORITY_STATS_TX (NUM_FLOW_STATS_TX * \ + MLX4_NUM_PRIORITIES) +}; + +#define NUM_FLOW_STATS (NUM_FLOW_STATS_RX + NUM_FLOW_STATS_TX + \ + NUM_FLOW_PRIORITY_STATS_TX + \ + NUM_FLOW_PRIORITY_STATS_RX) + +struct mlx4_en_stat_out_flow_control_mbox { + /* Total number of PAUSE frames received from the far-end port */ + __be64 rx_pause; + /* Total number of microseconds that far-end port requested to pause + * transmission of packets + */ + __be64 rx_pause_duration; + /* Number of received transmission from XOFF state to XON state */ + __be64 rx_pause_transition; + /* Total number of PAUSE frames sent from the far-end port */ + __be64 tx_pause; + /* Total time in microseconds that transmission of packets has been + * paused + */ + __be64 tx_pause_duration; + /* Number of transmitter transitions from XOFF state to XON state */ + __be64 tx_pause_transition; + /* Reserverd */ + __be64 reserved[2]; +}; + +enum { + MLX4_DUMP_ETH_STATS_FLOW_CONTROL = 1 << 12 +}; + +#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \ + NUM_FLOW_STATS + NUM_PERF_STATS) + +#define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \ + sizeof(((struct net_device_stats *)0)->n)) + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c new file mode 100644 index 000000000..78f51e103 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -0,0 +1,1158 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include + +#include "mlx4.h" +#include "icm.h" + +static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) +{ + int o; + int m; + u32 seg; + + spin_lock(&buddy->lock); + + for (o = order; o <= buddy->max_order; ++o) + if (buddy->num_free[o]) { + m = 1 << (buddy->max_order - o); + seg = find_first_bit(buddy->bits[o], m); + if (seg < m) + goto found; + } + + spin_unlock(&buddy->lock); + return -1; + + found: + clear_bit(seg, buddy->bits[o]); + --buddy->num_free[o]; + + while (o > order) { + --o; + seg <<= 1; + set_bit(seg ^ 1, buddy->bits[o]); + ++buddy->num_free[o]; + } + + spin_unlock(&buddy->lock); + + seg <<= order; + + return seg; +} + +static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order) +{ + seg >>= order; + + spin_lock(&buddy->lock); + + while (test_bit(seg ^ 1, buddy->bits[order])) { + clear_bit(seg ^ 1, buddy->bits[order]); + --buddy->num_free[order]; + seg >>= 1; + ++order; + } + + set_bit(seg, buddy->bits[order]); + ++buddy->num_free[order]; + + spin_unlock(&buddy->lock); +} + +static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) +{ + int i, s; + + buddy->max_order = max_order; + spin_lock_init(&buddy->lock); + + buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *), + GFP_KERNEL); + buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, + GFP_KERNEL); + if (!buddy->bits || !buddy->num_free) + goto err_out; + + for (i = 0; i <= buddy->max_order; ++i) { + s = BITS_TO_LONGS(1 << (buddy->max_order - i)); + buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN); + if (!buddy->bits[i]) { + buddy->bits[i] = vzalloc(s * sizeof(long)); + if (!buddy->bits[i]) + goto err_out_free; + } + } + + set_bit(0, buddy->bits[buddy->max_order]); + buddy->num_free[buddy->max_order] = 1; + + return 0; + +err_out_free: + for (i = 0; i <= buddy->max_order; ++i) + kvfree(buddy->bits[i]); + +err_out: + kfree(buddy->bits); + kfree(buddy->num_free); + + return -ENOMEM; +} + +static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) +{ + int i; + + for (i = 0; i <= buddy->max_order; ++i) + kvfree(buddy->bits[i]); + + kfree(buddy->bits); + kfree(buddy->num_free); +} + +u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) +{ + struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; + u32 seg; + int seg_order; + u32 offset; + + seg_order = max_t(int, order - log_mtts_per_seg, 0); + + seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order); + if (seg == -1) + return -1; + + offset = seg * (1 << log_mtts_per_seg); + + if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset, + offset + (1 << order) - 1)) { + mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order); + return -1; + } + + return offset; +} + +static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) +{ + u64 in_param = 0; + u64 out_param; + int err; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, order); + err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT, + RES_OP_RESERVE_AND_MAP, + MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + if (err) + return -1; + return get_param_l(&out_param); + } + return __mlx4_alloc_mtt_range(dev, order); +} + +int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, + struct mlx4_mtt *mtt) +{ + int i; + + if (!npages) { + mtt->order = -1; + mtt->page_shift = MLX4_ICM_PAGE_SHIFT; + return 0; + } else + mtt->page_shift = page_shift; + + for (mtt->order = 0, i = 1; i < npages; i <<= 1) + ++mtt->order; + + mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order); + if (mtt->offset == -1) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_mtt_init); + +void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) +{ + u32 first_seg; + int seg_order; + struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; + + seg_order = max_t(int, order - log_mtts_per_seg, 0); + first_seg = offset / (1 << log_mtts_per_seg); + + mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order); + mlx4_table_put_range(dev, &mr_table->mtt_table, offset, + offset + (1 << order) - 1); +} + +static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) +{ + u64 in_param = 0; + int err; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, offset); + set_param_h(&in_param, order); + err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP, + MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + if (err) + mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n", + offset, order); + return; + } + __mlx4_free_mtt_range(dev, offset, order); +} + +void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) +{ + if (mtt->order < 0) + return; + + mlx4_free_mtt_range(dev, mtt->offset, mtt->order); +} +EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup); + +u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt) +{ + return (u64) mtt->offset * dev->caps.mtt_entry_sz; +} +EXPORT_SYMBOL_GPL(mlx4_mtt_addr); + +static u32 hw_index_to_key(u32 ind) +{ + return (ind >> 24) | (ind << 8); +} + +static u32 key_to_hw_index(u32 key) +{ + return (key << 24) | (key >> 8); +} + +static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int mpt_index) +{ + return mlx4_cmd(dev, mailbox->dma, mpt_index, + 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); +} + +static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int mpt_index) +{ + return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, + !mailbox, MLX4_CMD_HW2SW_MPT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); +} + +/* Must protect against concurrent access */ +int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, + struct mlx4_mpt_entry ***mpt_entry) +{ + int err; + int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); + struct mlx4_cmd_mailbox *mailbox = NULL; + + if (mmr->enabled != MLX4_MPT_EN_HW) + return -EINVAL; + + err = mlx4_HW2SW_MPT(dev, NULL, key); + if (err) { + mlx4_warn(dev, "HW2SW_MPT failed (%d).", err); + mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n"); + return err; + } + + mmr->enabled = MLX4_MPT_EN_SW; + + if (!mlx4_is_mfunc(dev)) { + **mpt_entry = mlx4_table_find( + &mlx4_priv(dev)->mr_table.dmpt_table, + key, NULL); + } else { + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR_OR_NULL(mailbox)) + return PTR_ERR(mailbox); + + err = mlx4_cmd_box(dev, 0, mailbox->dma, key, + 0, MLX4_CMD_QUERY_MPT, + MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + if (err) + goto free_mailbox; + + *mpt_entry = (struct mlx4_mpt_entry **)&mailbox->buf; + } + + if (!(*mpt_entry) || !(**mpt_entry)) { + err = -ENOMEM; + goto free_mailbox; + } + + return 0; + +free_mailbox: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_mr_hw_get_mpt); + +int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, + struct mlx4_mpt_entry **mpt_entry) +{ + int err; + + if (!mlx4_is_mfunc(dev)) { + /* Make sure any changes to this entry are flushed */ + wmb(); + + *(u8 *)(*mpt_entry) = MLX4_MPT_STATUS_HW; + + /* Make sure the new status is written */ + wmb(); + + err = mlx4_SYNC_TPT(dev); + } else { + int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); + + struct mlx4_cmd_mailbox *mailbox = + container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, + buf); + + err = mlx4_SW2HW_MPT(dev, mailbox, key); + } + + if (!err) { + mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK; + mmr->enabled = MLX4_MPT_EN_HW; + } + return err; +} +EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt); + +void mlx4_mr_hw_put_mpt(struct mlx4_dev *dev, + struct mlx4_mpt_entry **mpt_entry) +{ + if (mlx4_is_mfunc(dev)) { + struct mlx4_cmd_mailbox *mailbox = + container_of((void *)mpt_entry, struct mlx4_cmd_mailbox, + buf); + mlx4_free_cmd_mailbox(dev, mailbox); + } +} +EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt); + +int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, + u32 pdn) +{ + u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK; + /* The wrapper function will put the slave's id here */ + if (mlx4_is_mfunc(dev)) + pd_flags &= ~MLX4_MPT_PD_VF_MASK; + + mpt_entry->pd_flags = cpu_to_be32(pd_flags | + (pdn & MLX4_MPT_PD_MASK) + | MLX4_MPT_PD_FLAG_EN_INV); + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_pd); + +int mlx4_mr_hw_change_access(struct mlx4_dev *dev, + struct mlx4_mpt_entry *mpt_entry, + u32 access) +{ + u32 flags = (be32_to_cpu(mpt_entry->flags) & ~MLX4_PERM_MASK) | + (access & MLX4_PERM_MASK); + + mpt_entry->flags = cpu_to_be32(flags); + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_access); + +static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, + u64 iova, u64 size, u32 access, int npages, + int page_shift, struct mlx4_mr *mr) +{ + mr->iova = iova; + mr->size = size; + mr->pd = pd; + mr->access = access; + mr->enabled = MLX4_MPT_DISABLED; + mr->key = hw_index_to_key(mridx); + + return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); +} + +static int mlx4_WRITE_MTT(struct mlx4_dev *dev, + struct mlx4_cmd_mailbox *mailbox, + int num_entries) +{ + return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); +} + +int __mlx4_mpt_reserve(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); +} + +static int mlx4_mpt_reserve(struct mlx4_dev *dev) +{ + u64 out_param; + + if (mlx4_is_mfunc(dev)) { + if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE, + MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) + return -1; + return get_param_l(&out_param); + } + return __mlx4_mpt_reserve(dev); +} + +void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index, MLX4_NO_RR); +} + +static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index) +{ + u64 in_param = 0; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, index); + if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE, + MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) + mlx4_warn(dev, "Failed to release mr index:%d\n", + index); + return; + } + __mlx4_mpt_release(dev, index); +} + +int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp) +{ + struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; + + return mlx4_table_get(dev, &mr_table->dmpt_table, index, gfp); +} + +static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp) +{ + u64 param = 0; + + if (mlx4_is_mfunc(dev)) { + set_param_l(¶m, index); + return mlx4_cmd_imm(dev, param, ¶m, RES_MPT, RES_OP_MAP_ICM, + MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + } + return __mlx4_mpt_alloc_icm(dev, index, gfp); +} + +void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) +{ + struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; + + mlx4_table_put(dev, &mr_table->dmpt_table, index); +} + +static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) +{ + u64 in_param = 0; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, index); + if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM, + MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED)) + mlx4_warn(dev, "Failed to free icm of mr index:%d\n", + index); + return; + } + return __mlx4_mpt_free_icm(dev, index); +} + +int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, + int npages, int page_shift, struct mlx4_mr *mr) +{ + u32 index; + int err; + + index = mlx4_mpt_reserve(dev); + if (index == -1) + return -ENOMEM; + + err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, + access, npages, page_shift, mr); + if (err) + mlx4_mpt_release(dev, index); + + return err; +} +EXPORT_SYMBOL_GPL(mlx4_mr_alloc); + +static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) +{ + int err; + + if (mr->enabled == MLX4_MPT_EN_HW) { + err = mlx4_HW2SW_MPT(dev, NULL, + key_to_hw_index(mr->key) & + (dev->caps.num_mpts - 1)); + if (err) { + mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n", + err); + return err; + } + + mr->enabled = MLX4_MPT_EN_SW; + } + mlx4_mtt_cleanup(dev, &mr->mtt); + + return 0; +} + +int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) +{ + int ret; + + ret = mlx4_mr_free_reserved(dev, mr); + if (ret) + return ret; + if (mr->enabled) + mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key)); + mlx4_mpt_release(dev, key_to_hw_index(mr->key)); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_mr_free); + +void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr) +{ + mlx4_mtt_cleanup(dev, &mr->mtt); + mr->mtt.order = -1; +} +EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup); + +int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, + u64 iova, u64 size, int npages, + int page_shift, struct mlx4_mpt_entry *mpt_entry) +{ + int err; + + err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); + if (err) + return err; + + mpt_entry->start = cpu_to_be64(iova); + mpt_entry->length = cpu_to_be64(size); + mpt_entry->entity_size = cpu_to_be32(page_shift); + mpt_entry->flags &= ~(cpu_to_be32(MLX4_MPT_FLAG_FREE | + MLX4_MPT_FLAG_SW_OWNS)); + if (mr->mtt.order < 0) { + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); + mpt_entry->mtt_addr = 0; + } else { + mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, + &mr->mtt)); + if (mr->mtt.page_shift == 0) + mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); + } + if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { + /* fast register MR in free state */ + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); + mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | + MLX4_MPT_PD_FLAG_RAE); + } else { + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); + } + mr->enabled = MLX4_MPT_EN_SW; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_write); + +int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mpt_entry *mpt_entry; + int err; + + err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key), GFP_KERNEL); + if (err) + return err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto err_table; + } + mpt_entry = mailbox->buf; + mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO | + MLX4_MPT_FLAG_REGION | + mr->access); + + mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key)); + mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV); + mpt_entry->start = cpu_to_be64(mr->iova); + mpt_entry->length = cpu_to_be64(mr->size); + mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); + + if (mr->mtt.order < 0) { + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); + mpt_entry->mtt_addr = 0; + } else { + mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev, + &mr->mtt)); + } + + if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { + /* fast register MR in free state */ + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); + mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | + MLX4_MPT_PD_FLAG_RAE); + mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); + } else { + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); + } + + err = mlx4_SW2HW_MPT(dev, mailbox, + key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1)); + if (err) { + mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); + goto err_cmd; + } + mr->enabled = MLX4_MPT_EN_HW; + + mlx4_free_cmd_mailbox(dev, mailbox); + + return 0; + +err_cmd: + mlx4_free_cmd_mailbox(dev, mailbox); + +err_table: + mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key)); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_mr_enable); + +static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + int start_index, int npages, u64 *page_list) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + __be64 *mtts; + dma_addr_t dma_handle; + int i; + + mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset + + start_index, &dma_handle); + + if (!mtts) + return -ENOMEM; + + dma_sync_single_for_cpu(&dev->persist->pdev->dev, dma_handle, + npages * sizeof (u64), DMA_TO_DEVICE); + + for (i = 0; i < npages; ++i) + mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); + + dma_sync_single_for_device(&dev->persist->pdev->dev, dma_handle, + npages * sizeof (u64), DMA_TO_DEVICE); + + return 0; +} + +int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + int start_index, int npages, u64 *page_list) +{ + int err = 0; + int chunk; + int mtts_per_page; + int max_mtts_first_page; + + /* compute how may mtts fit in the first page */ + mtts_per_page = PAGE_SIZE / sizeof(u64); + max_mtts_first_page = mtts_per_page - (mtt->offset + start_index) + % mtts_per_page; + + chunk = min_t(int, max_mtts_first_page, npages); + + while (npages > 0) { + err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); + if (err) + return err; + npages -= chunk; + start_index += chunk; + page_list += chunk; + + chunk = min_t(int, mtts_per_page, npages); + } + return err; +} + +int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + int start_index, int npages, u64 *page_list) +{ + struct mlx4_cmd_mailbox *mailbox = NULL; + __be64 *inbox = NULL; + int chunk; + int err = 0; + int i; + + if (mtt->order < 0) + return -EINVAL; + + if (mlx4_is_mfunc(dev)) { + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + while (npages > 0) { + chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2, + npages); + inbox[0] = cpu_to_be64(mtt->offset + start_index); + inbox[1] = 0; + for (i = 0; i < chunk; ++i) + inbox[i + 2] = cpu_to_be64(page_list[i] | + MLX4_MTT_FLAG_PRESENT); + err = mlx4_WRITE_MTT(dev, mailbox, chunk); + if (err) { + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } + + npages -= chunk; + start_index += chunk; + page_list += chunk; + } + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + } + + return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); +} +EXPORT_SYMBOL_GPL(mlx4_write_mtt); + +int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + struct mlx4_buf *buf, gfp_t gfp) +{ + u64 *page_list; + int err; + int i; + + page_list = kmalloc(buf->npages * sizeof *page_list, + gfp); + if (!page_list) + return -ENOMEM; + + for (i = 0; i < buf->npages; ++i) + if (buf->nbufs == 1) + page_list[i] = buf->direct.map + (i << buf->page_shift); + else + page_list[i] = buf->page_list[i].map; + + err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); + + kfree(page_list); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt); + +int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type, + struct mlx4_mw *mw) +{ + u32 index; + + if ((type == MLX4_MW_TYPE_1 && + !(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) || + (type == MLX4_MW_TYPE_2 && + !(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN))) + return -ENOTSUPP; + + index = mlx4_mpt_reserve(dev); + if (index == -1) + return -ENOMEM; + + mw->key = hw_index_to_key(index); + mw->pd = pd; + mw->type = type; + mw->enabled = MLX4_MPT_DISABLED; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_mw_alloc); + +int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_mpt_entry *mpt_entry; + int err; + + err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key), GFP_KERNEL); + if (err) + return err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto err_table; + } + mpt_entry = mailbox->buf; + + /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned + * off, thus creating a memory window and not a memory region. + */ + mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key)); + mpt_entry->pd_flags = cpu_to_be32(mw->pd); + if (mw->type == MLX4_MW_TYPE_2) { + mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE); + mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP); + mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV); + } + + err = mlx4_SW2HW_MPT(dev, mailbox, + key_to_hw_index(mw->key) & + (dev->caps.num_mpts - 1)); + if (err) { + mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); + goto err_cmd; + } + mw->enabled = MLX4_MPT_EN_HW; + + mlx4_free_cmd_mailbox(dev, mailbox); + + return 0; + +err_cmd: + mlx4_free_cmd_mailbox(dev, mailbox); + +err_table: + mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key)); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_mw_enable); + +void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw) +{ + int err; + + if (mw->enabled == MLX4_MPT_EN_HW) { + err = mlx4_HW2SW_MPT(dev, NULL, + key_to_hw_index(mw->key) & + (dev->caps.num_mpts - 1)); + if (err) + mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err); + + mw->enabled = MLX4_MPT_EN_SW; + } + if (mw->enabled) + mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key)); + mlx4_mpt_release(dev, key_to_hw_index(mw->key)); +} +EXPORT_SYMBOL_GPL(mlx4_mw_free); + +int mlx4_init_mr_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_mr_table *mr_table = &priv->mr_table; + int err; + + /* Nothing to do for slaves - all MR handling is forwarded + * to the master */ + if (mlx4_is_slave(dev)) + return 0; + + if (!is_power_of_2(dev->caps.num_mpts)) + return -EINVAL; + + err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, + ~0, dev->caps.reserved_mrws, 0); + if (err) + return err; + + err = mlx4_buddy_init(&mr_table->mtt_buddy, + ilog2((u32)dev->caps.num_mtts / + (1 << log_mtts_per_seg))); + if (err) + goto err_buddy; + + if (dev->caps.reserved_mtts) { + priv->reserved_mtts = + mlx4_alloc_mtt_range(dev, + fls(dev->caps.reserved_mtts - 1)); + if (priv->reserved_mtts < 0) { + mlx4_warn(dev, "MTT table of order %u is too small\n", + mr_table->mtt_buddy.max_order); + err = -ENOMEM; + goto err_reserve_mtts; + } + } + + return 0; + +err_reserve_mtts: + mlx4_buddy_cleanup(&mr_table->mtt_buddy); + +err_buddy: + mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); + + return err; +} + +void mlx4_cleanup_mr_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_mr_table *mr_table = &priv->mr_table; + + if (mlx4_is_slave(dev)) + return; + if (priv->reserved_mtts >= 0) + mlx4_free_mtt_range(dev, priv->reserved_mtts, + fls(dev->caps.reserved_mtts - 1)); + mlx4_buddy_cleanup(&mr_table->mtt_buddy); + mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); +} + +static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, + int npages, u64 iova) +{ + int i, page_mask; + + if (npages > fmr->max_pages) + return -EINVAL; + + page_mask = (1 << fmr->page_shift) - 1; + + /* We are getting page lists, so va must be page aligned. */ + if (iova & page_mask) + return -EINVAL; + + /* Trust the user not to pass misaligned data in page_list */ + if (0) + for (i = 0; i < npages; ++i) { + if (page_list[i] & ~page_mask) + return -EINVAL; + } + + if (fmr->maps >= fmr->max_maps) + return -EINVAL; + + return 0; +} + +int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, + int npages, u64 iova, u32 *lkey, u32 *rkey) +{ + u32 key; + int i, err; + + err = mlx4_check_fmr(fmr, page_list, npages, iova); + if (err) + return err; + + ++fmr->maps; + + key = key_to_hw_index(fmr->mr.key); + key += dev->caps.num_mpts; + *lkey = *rkey = fmr->mr.key = hw_index_to_key(key); + + *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; + + /* Make sure MPT status is visible before writing MTT entries */ + wmb(); + + dma_sync_single_for_cpu(&dev->persist->pdev->dev, fmr->dma_handle, + npages * sizeof(u64), DMA_TO_DEVICE); + + for (i = 0; i < npages; ++i) + fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); + + dma_sync_single_for_device(&dev->persist->pdev->dev, fmr->dma_handle, + npages * sizeof(u64), DMA_TO_DEVICE); + + fmr->mpt->key = cpu_to_be32(key); + fmr->mpt->lkey = cpu_to_be32(key); + fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift)); + fmr->mpt->start = cpu_to_be64(iova); + + /* Make MTT entries are visible before setting MPT status */ + wmb(); + + *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW; + + /* Make sure MPT status is visible before consumer can use FMR */ + wmb(); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr); + +int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, + int max_maps, u8 page_shift, struct mlx4_fmr *fmr) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err = -ENOMEM; + + if (max_maps > dev->caps.max_fmr_maps) + return -EINVAL; + + if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) + return -EINVAL; + + /* All MTTs must fit in the same page */ + if (max_pages * sizeof *fmr->mtts > PAGE_SIZE) + return -EINVAL; + + fmr->page_shift = page_shift; + fmr->max_pages = max_pages; + fmr->max_maps = max_maps; + fmr->maps = 0; + + err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages, + page_shift, &fmr->mr); + if (err) + return err; + + fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table, + fmr->mr.mtt.offset, + &fmr->dma_handle); + + if (!fmr->mtts) { + err = -ENOMEM; + goto err_free; + } + + return 0; + +err_free: + (void) mlx4_mr_free(dev, &fmr->mr); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_fmr_alloc); + +int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + + err = mlx4_mr_enable(dev, &fmr->mr); + if (err) + return err; + + fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table, + key_to_hw_index(fmr->mr.key), NULL); + if (!fmr->mpt) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_fmr_enable); + +void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, + u32 *lkey, u32 *rkey) +{ + struct mlx4_cmd_mailbox *mailbox; + int err; + + if (!fmr->maps) + return; + + fmr->maps = 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err); + return; + } + + err = mlx4_HW2SW_MPT(dev, NULL, + key_to_hw_index(fmr->mr.key) & + (dev->caps.num_mpts - 1)); + mlx4_free_cmd_mailbox(dev, mailbox); + if (err) { + pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err); + return; + } + fmr->mr.enabled = MLX4_MPT_EN_SW; +} +EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); + +int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) +{ + int ret; + + if (fmr->maps) + return -EBUSY; + + ret = mlx4_mr_free(dev, &fmr->mr); + if (ret) + return ret; + fmr->mr.enabled = MLX4_MPT_DISABLED; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_fmr_free); + +int mlx4_SYNC_TPT(struct mlx4_dev *dev) +{ + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); +} +EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c new file mode 100644 index 000000000..609c59dc8 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c @@ -0,0 +1,287 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include + +#include + +#include "mlx4.h" +#include "icm.h" + +enum { + MLX4_NUM_RESERVED_UARS = 8 +}; + +int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap); + if (*pdn == -1) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_pd_alloc); + +void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn) +{ + mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn, MLX4_USE_RR); +} +EXPORT_SYMBOL_GPL(mlx4_pd_free); + +int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + *xrcdn = mlx4_bitmap_alloc(&priv->xrcd_bitmap); + if (*xrcdn == -1) + return -ENOMEM; + + return 0; +} + +int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn) +{ + u64 out_param; + int err; + + if (mlx4_is_mfunc(dev)) { + err = mlx4_cmd_imm(dev, 0, &out_param, + RES_XRCD, RES_OP_RESERVE, + MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + return err; + + *xrcdn = get_param_l(&out_param); + return 0; + } + return __mlx4_xrcd_alloc(dev, xrcdn); +} +EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc); + +void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) +{ + mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn, MLX4_USE_RR); +} + +void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) +{ + u64 in_param = 0; + int err; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, xrcdn); + err = mlx4_cmd(dev, in_param, RES_XRCD, + RES_OP_RESERVE, MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + mlx4_warn(dev, "Failed to release xrcdn %d\n", xrcdn); + } else + __mlx4_xrcd_free(dev, xrcdn); +} +EXPORT_SYMBOL_GPL(mlx4_xrcd_free); + +int mlx4_init_pd_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds, + (1 << NOT_MASKED_PD_BITS) - 1, + dev->caps.reserved_pds, 0); +} + +void mlx4_cleanup_pd_table(struct mlx4_dev *dev) +{ + mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap); +} + +int mlx4_init_xrcd_table(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return mlx4_bitmap_init(&priv->xrcd_bitmap, (1 << 16), + (1 << 16) - 1, dev->caps.reserved_xrcds + 1, 0); +} + +void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev) +{ + mlx4_bitmap_cleanup(&mlx4_priv(dev)->xrcd_bitmap); +} + +int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar) +{ + int offset; + + uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap); + if (uar->index == -1) + return -ENOMEM; + + if (mlx4_is_slave(dev)) + offset = uar->index % ((int)pci_resource_len(dev->persist->pdev, + 2) / + dev->caps.uar_page_size); + else + offset = uar->index; + uar->pfn = (pci_resource_start(dev->persist->pdev, 2) >> PAGE_SHIFT) + + offset; + uar->map = NULL; + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_uar_alloc); + +void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar) +{ + mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index, MLX4_USE_RR); +} +EXPORT_SYMBOL_GPL(mlx4_uar_free); + +int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_uar *uar; + int err = 0; + int idx; + + if (!priv->bf_mapping) + return -ENOMEM; + + mutex_lock(&priv->bf_mutex); + if (!list_empty(&priv->bf_list)) + uar = list_entry(priv->bf_list.next, struct mlx4_uar, bf_list); + else { + if (mlx4_bitmap_avail(&priv->uar_table.bitmap) < MLX4_NUM_RESERVED_UARS) { + err = -ENOMEM; + goto out; + } + uar = kmalloc_node(sizeof(*uar), GFP_KERNEL, node); + if (!uar) { + uar = kmalloc(sizeof(*uar), GFP_KERNEL); + if (!uar) { + err = -ENOMEM; + goto out; + } + } + err = mlx4_uar_alloc(dev, uar); + if (err) + goto free_kmalloc; + + uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE); + if (!uar->map) { + err = -ENOMEM; + goto free_uar; + } + + uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT); + if (!uar->bf_map) { + err = -ENOMEM; + goto unamp_uar; + } + uar->free_bf_bmap = 0; + list_add(&uar->bf_list, &priv->bf_list); + } + + idx = ffz(uar->free_bf_bmap); + uar->free_bf_bmap |= 1 << idx; + bf->uar = uar; + bf->offset = 0; + bf->buf_size = dev->caps.bf_reg_size / 2; + bf->reg = uar->bf_map + idx * dev->caps.bf_reg_size; + if (uar->free_bf_bmap == (1 << dev->caps.bf_regs_per_page) - 1) + list_del_init(&uar->bf_list); + + goto out; + +unamp_uar: + bf->uar = NULL; + iounmap(uar->map); + +free_uar: + mlx4_uar_free(dev, uar); + +free_kmalloc: + kfree(uar); + +out: + mutex_unlock(&priv->bf_mutex); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_bf_alloc); + +void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int idx; + + if (!bf->uar || !bf->uar->bf_map) + return; + + mutex_lock(&priv->bf_mutex); + idx = (bf->reg - bf->uar->bf_map) / dev->caps.bf_reg_size; + bf->uar->free_bf_bmap &= ~(1 << idx); + if (!bf->uar->free_bf_bmap) { + if (!list_empty(&bf->uar->bf_list)) + list_del(&bf->uar->bf_list); + + io_mapping_unmap(bf->uar->bf_map); + iounmap(bf->uar->map); + mlx4_uar_free(dev, bf->uar); + kfree(bf->uar); + } else if (list_empty(&bf->uar->bf_list)) + list_add(&bf->uar->bf_list, &priv->bf_list); + + mutex_unlock(&priv->bf_mutex); +} +EXPORT_SYMBOL_GPL(mlx4_bf_free); + +int mlx4_init_uar_table(struct mlx4_dev *dev) +{ + if (dev->caps.num_uars <= 128) { + mlx4_err(dev, "Only %d UAR pages (need more than 128)\n", + dev->caps.num_uars); + mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n"); + return -ENODEV; + } + + return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, + dev->caps.num_uars, dev->caps.num_uars - 1, + dev->caps.reserved_uars, 0); +} + +void mlx4_cleanup_uar_table(struct mlx4_dev *dev) +{ + mlx4_bitmap_cleanup(&mlx4_priv(dev)->uar_table.bitmap); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c new file mode 100644 index 000000000..c2b21313d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -0,0 +1,1445 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include + +#include + +#include "mlx4.h" +#include "mlx4_stats.h" + +#define MLX4_MAC_VALID (1ull << 63) + +#define MLX4_VLAN_VALID (1u << 31) +#define MLX4_VLAN_MASK 0xfff + +#define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL +#define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL +#define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL +#define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL + +#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2 +#define MLX4_IGNORE_FCS_MASK 0x1 + +void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) +{ + int i; + + mutex_init(&table->mutex); + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + table->entries[i] = 0; + table->refs[i] = 0; + } + table->max = 1 << dev->caps.log_num_macs; + table->total = 0; +} + +void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table) +{ + int i; + + mutex_init(&table->mutex); + for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { + table->entries[i] = 0; + table->refs[i] = 0; + } + table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR; + table->total = 0; +} + +void mlx4_init_roce_gid_table(struct mlx4_dev *dev, + struct mlx4_roce_gid_table *table) +{ + int i; + + mutex_init(&table->mutex); + for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) + memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE); +} + +static int validate_index(struct mlx4_dev *dev, + struct mlx4_mac_table *table, int index) +{ + int err = 0; + + if (index < 0 || index >= table->max || !table->entries[index]) { + mlx4_warn(dev, "No valid Mac entry for the given index\n"); + err = -EINVAL; + } + return err; +} + +static int find_index(struct mlx4_dev *dev, + struct mlx4_mac_table *table, u64 mac) +{ + int i; + + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (table->refs[i] && + (MLX4_MAC_MASK & mac) == + (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) + return i; + } + /* Mac not found */ + return -EINVAL; +} + +static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, + __be64 *entries) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 in_mod; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE); + + in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port; + + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx) +{ + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + struct mlx4_mac_table *table = &info->mac_table; + int i; + + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (!table->refs[i]) + continue; + + if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { + *idx = i; + return 0; + } + } + + return -ENOENT; +} +EXPORT_SYMBOL_GPL(mlx4_find_cached_mac); + +int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) +{ + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + struct mlx4_mac_table *table = &info->mac_table; + int i, err = 0; + int free = -1; + + mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n", + (unsigned long long) mac, port); + + mutex_lock(&table->mutex); + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (!table->refs[i]) { + if (free < 0) + free = i; + continue; + } + + if ((MLX4_MAC_MASK & mac) == + (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { + /* MAC already registered, increment ref count */ + err = i; + ++table->refs[i]; + goto out; + } + } + + mlx4_dbg(dev, "Free MAC index is %d\n", free); + + if (table->total == table->max) { + /* No free mac entries */ + err = -ENOSPC; + goto out; + } + + /* Register new MAC */ + table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID); + + err = mlx4_set_port_mac_table(dev, port, table->entries); + if (unlikely(err)) { + mlx4_err(dev, "Failed adding MAC: 0x%llx\n", + (unsigned long long) mac); + table->entries[free] = 0; + goto out; + } + table->refs[free] = 1; + err = free; + ++table->total; +out: + mutex_unlock(&table->mutex); + return err; +} +EXPORT_SYMBOL_GPL(__mlx4_register_mac); + +int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) +{ + u64 out_param = 0; + int err = -EINVAL; + + if (mlx4_is_mfunc(dev)) { + if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) { + err = mlx4_cmd_imm(dev, mac, &out_param, + ((u32) port) << 8 | (u32) RES_MAC, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + } + if (err && err == -EINVAL && mlx4_is_slave(dev)) { + /* retry using old REG_MAC format */ + set_param_l(&out_param, port); + err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (!err) + dev->flags |= MLX4_FLAG_OLD_REG_MAC; + } + if (err) + return err; + + return get_param_l(&out_param); + } + return __mlx4_register_mac(dev, port, mac); +} +EXPORT_SYMBOL_GPL(mlx4_register_mac); + +int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port) +{ + return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] + + (port - 1) * (1 << dev->caps.log_num_macs); +} +EXPORT_SYMBOL_GPL(mlx4_get_base_qpn); + +void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) +{ + struct mlx4_port_info *info; + struct mlx4_mac_table *table; + int index; + + if (port < 1 || port > dev->caps.num_ports) { + mlx4_warn(dev, "invalid port number (%d), aborting...\n", port); + return; + } + info = &mlx4_priv(dev)->port[port]; + table = &info->mac_table; + mutex_lock(&table->mutex); + index = find_index(dev, table, mac); + + if (validate_index(dev, table, index)) + goto out; + if (--table->refs[index]) { + mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n", + index); + goto out; + } + + table->entries[index] = 0; + mlx4_set_port_mac_table(dev, port, table->entries); + --table->total; +out: + mutex_unlock(&table->mutex); +} +EXPORT_SYMBOL_GPL(__mlx4_unregister_mac); + +void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) +{ + u64 out_param = 0; + + if (mlx4_is_mfunc(dev)) { + if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) { + (void) mlx4_cmd_imm(dev, mac, &out_param, + ((u32) port) << 8 | (u32) RES_MAC, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + } else { + /* use old unregister mac format */ + set_param_l(&out_param, port); + (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + } + return; + } + __mlx4_unregister_mac(dev, port, mac); + return; +} +EXPORT_SYMBOL_GPL(mlx4_unregister_mac); + +int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) +{ + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + struct mlx4_mac_table *table = &info->mac_table; + int index = qpn - info->base_qpn; + int err = 0; + + /* CX1 doesn't support multi-functions */ + mutex_lock(&table->mutex); + + err = validate_index(dev, table, index); + if (err) + goto out; + + table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID); + + err = mlx4_set_port_mac_table(dev, port, table->entries); + if (unlikely(err)) { + mlx4_err(dev, "Failed adding MAC: 0x%llx\n", + (unsigned long long) new_mac); + table->entries[index] = 0; + } +out: + mutex_unlock(&table->mutex); + return err; +} +EXPORT_SYMBOL_GPL(__mlx4_replace_mac); + +static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, + __be32 *entries) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 in_mod; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE); + in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + + return err; +} + +int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx) +{ + struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; + int i; + + for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) { + if (table->refs[i] && + (vid == (MLX4_VLAN_MASK & + be32_to_cpu(table->entries[i])))) { + /* VLAN already registered, increase reference count */ + *idx = i; + return 0; + } + } + + return -ENOENT; +} +EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan); + +int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, + int *index) +{ + struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; + int i, err = 0; + int free = -1; + + mutex_lock(&table->mutex); + + if (table->total == table->max) { + /* No free vlan entries */ + err = -ENOSPC; + goto out; + } + + for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { + if (free < 0 && (table->refs[i] == 0)) { + free = i; + continue; + } + + if (table->refs[i] && + (vlan == (MLX4_VLAN_MASK & + be32_to_cpu(table->entries[i])))) { + /* Vlan already registered, increase references count */ + *index = i; + ++table->refs[i]; + goto out; + } + } + + if (free < 0) { + err = -ENOMEM; + goto out; + } + + /* Register new VLAN */ + table->refs[free] = 1; + table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); + + err = mlx4_set_port_vlan_table(dev, port, table->entries); + if (unlikely(err)) { + mlx4_warn(dev, "Failed adding vlan: %u\n", vlan); + table->refs[free] = 0; + table->entries[free] = 0; + goto out; + } + + *index = free; + ++table->total; +out: + mutex_unlock(&table->mutex); + return err; +} + +int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) +{ + u64 out_param = 0; + int err; + + if (vlan > 4095) + return -EINVAL; + + if (mlx4_is_mfunc(dev)) { + err = mlx4_cmd_imm(dev, vlan, &out_param, + ((u32) port) << 8 | (u32) RES_VLAN, + RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (!err) + *index = get_param_l(&out_param); + + return err; + } + return __mlx4_register_vlan(dev, port, vlan, index); +} +EXPORT_SYMBOL_GPL(mlx4_register_vlan); + +void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) +{ + struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; + int index; + + mutex_lock(&table->mutex); + if (mlx4_find_cached_vlan(dev, port, vlan, &index)) { + mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan); + goto out; + } + + if (index < MLX4_VLAN_REGULAR) { + mlx4_warn(dev, "Trying to free special vlan index %d\n", index); + goto out; + } + + if (--table->refs[index]) { + mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n", + table->refs[index], index); + goto out; + } + table->entries[index] = 0; + mlx4_set_port_vlan_table(dev, port, table->entries); + --table->total; +out: + mutex_unlock(&table->mutex); +} + +void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) +{ + u64 out_param = 0; + + if (mlx4_is_mfunc(dev)) { + (void) mlx4_cmd_imm(dev, vlan, &out_param, + ((u32) port) << 8 | (u32) RES_VLAN, + RES_OP_RESERVE_AND_MAP, + MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + return; + } + __mlx4_unregister_vlan(dev, port, vlan); +} +EXPORT_SYMBOL_GPL(mlx4_unregister_vlan); + +int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) +{ + struct mlx4_cmd_mailbox *inmailbox, *outmailbox; + u8 *inbuf, *outbuf; + int err; + + inmailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(inmailbox)) + return PTR_ERR(inmailbox); + + outmailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(outmailbox)) { + mlx4_free_cmd_mailbox(dev, inmailbox); + return PTR_ERR(outmailbox); + } + + inbuf = inmailbox->buf; + outbuf = outmailbox->buf; + inbuf[0] = 1; + inbuf[1] = 1; + inbuf[2] = 1; + inbuf[3] = 1; + *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015); + *(__be32 *) (&inbuf[20]) = cpu_to_be32(port); + + err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3, + MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (!err) + *caps = *(__be32 *) (outbuf + 84); + mlx4_free_cmd_mailbox(dev, inmailbox); + mlx4_free_cmd_mailbox(dev, outmailbox); + return err; +} +static struct mlx4_roce_gid_entry zgid_entry; + +int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port) +{ + int vfs; + int slave_gid = slave; + unsigned i; + struct mlx4_slaves_pport slaves_pport; + struct mlx4_active_ports actv_ports; + unsigned max_port_p_one; + + if (slave == 0) + return MLX4_ROCE_PF_GIDS; + + /* Slave is a VF */ + slaves_pport = mlx4_phys_to_slaves_pport(dev, port); + actv_ports = mlx4_get_active_ports(dev, slave); + max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) + + bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1; + + for (i = 1; i < max_port_p_one; i++) { + struct mlx4_active_ports exclusive_ports; + struct mlx4_slaves_pport slaves_pport_actv; + bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); + set_bit(i - 1, exclusive_ports.ports); + if (i == port) + continue; + slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( + dev, &exclusive_ports); + slave_gid -= bitmap_weight(slaves_pport_actv.slaves, + dev->persist->num_vfs + 1); + } + vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1; + if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs)) + return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1; + return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs; +} + +int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port) +{ + int gids; + unsigned i; + int slave_gid = slave; + int vfs; + + struct mlx4_slaves_pport slaves_pport; + struct mlx4_active_ports actv_ports; + unsigned max_port_p_one; + + if (slave == 0) + return 0; + + slaves_pport = mlx4_phys_to_slaves_pport(dev, port); + actv_ports = mlx4_get_active_ports(dev, slave); + max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) + + bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1; + + for (i = 1; i < max_port_p_one; i++) { + struct mlx4_active_ports exclusive_ports; + struct mlx4_slaves_pport slaves_pport_actv; + bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); + set_bit(i - 1, exclusive_ports.ports); + if (i == port) + continue; + slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( + dev, &exclusive_ports); + slave_gid -= bitmap_weight(slaves_pport_actv.slaves, + dev->persist->num_vfs + 1); + } + gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; + vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1; + if (slave_gid <= gids % vfs) + return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1); + + return MLX4_ROCE_PF_GIDS + (gids % vfs) + + ((gids / vfs) * (slave_gid - 1)); +} +EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix); + +static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave, + int port, struct mlx4_cmd_mailbox *mailbox) +{ + struct mlx4_roce_gid_entry *gid_entry_mbox; + struct mlx4_priv *priv = mlx4_priv(dev); + int num_gids, base, offset; + int i, err; + + num_gids = mlx4_get_slave_num_gids(dev, slave, port); + base = mlx4_get_base_gid_ix(dev, slave, port); + + memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); + + mutex_lock(&(priv->port[port].gid_table.mutex)); + /* Zero-out gids belonging to that slave in the port GID table */ + for (i = 0, offset = base; i < num_gids; offset++, i++) + memcpy(priv->port[port].gid_table.roce_gids[offset].raw, + zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE); + + /* Now, copy roce port gids table to mailbox for passing to FW */ + gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf; + for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++) + memcpy(gid_entry_mbox->raw, + priv->port[port].gid_table.roce_gids[i].raw, + MLX4_ROCE_GID_ENTRY_SIZE); + + err = mlx4_cmd(dev, mailbox->dma, + ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), + MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + mutex_unlock(&(priv->port[port].gid_table.mutex)); + return err; +} + + +void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave) +{ + struct mlx4_active_ports actv_ports; + struct mlx4_cmd_mailbox *mailbox; + int num_eth_ports, err; + int i; + + if (slave < 0 || slave > dev->persist->num_vfs) + return; + + actv_ports = mlx4_get_active_ports(dev, slave); + + for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) { + if (test_bit(i, actv_ports.ports)) { + if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH) + continue; + num_eth_ports++; + } + } + + if (!num_eth_ports) + return; + + /* have ETH ports. Alloc mailbox for SET_PORT command */ + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return; + + for (i = 0; i < dev->caps.num_ports; i++) { + if (test_bit(i, actv_ports.ports)) { + if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH) + continue; + err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox); + if (err) + mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n", + slave, i + 1, err); + } + } + + mlx4_free_cmd_mailbox(dev, mailbox); + return; +} + +static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, + u8 op_mod, struct mlx4_cmd_mailbox *inbox) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_port_info *port_info; + struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master; + struct mlx4_slave_state *slave_st = &master->slave_state[slave]; + struct mlx4_set_port_rqp_calc_context *qpn_context; + struct mlx4_set_port_general_context *gen_context; + struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1; + int reset_qkey_viols; + int port; + int is_eth; + int num_gids; + int base; + u32 in_modifier; + u32 promisc; + u16 mtu, prev_mtu; + int err; + int i, j; + int offset; + __be32 agg_cap_mask; + __be32 slave_cap_mask; + __be32 new_cap_mask; + + port = in_mod & 0xff; + in_modifier = in_mod >> 8; + is_eth = op_mod; + port_info = &priv->port[port]; + + /* Slaves cannot perform SET_PORT operations except changing MTU */ + if (is_eth) { + if (slave != dev->caps.function && + in_modifier != MLX4_SET_PORT_GENERAL && + in_modifier != MLX4_SET_PORT_GID_TABLE) { + mlx4_warn(dev, "denying SET_PORT for slave:%d\n", + slave); + return -EINVAL; + } + switch (in_modifier) { + case MLX4_SET_PORT_RQP_CALC: + qpn_context = inbox->buf; + qpn_context->base_qpn = + cpu_to_be32(port_info->base_qpn); + qpn_context->n_mac = 0x7; + promisc = be32_to_cpu(qpn_context->promisc) >> + SET_PORT_PROMISC_SHIFT; + qpn_context->promisc = cpu_to_be32( + promisc << SET_PORT_PROMISC_SHIFT | + port_info->base_qpn); + promisc = be32_to_cpu(qpn_context->mcast) >> + SET_PORT_MC_PROMISC_SHIFT; + qpn_context->mcast = cpu_to_be32( + promisc << SET_PORT_MC_PROMISC_SHIFT | + port_info->base_qpn); + break; + case MLX4_SET_PORT_GENERAL: + gen_context = inbox->buf; + /* Mtu is configured as the max MTU among all the + * the functions on the port. */ + mtu = be16_to_cpu(gen_context->mtu); + mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] + + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); + prev_mtu = slave_st->mtu[port]; + slave_st->mtu[port] = mtu; + if (mtu > master->max_mtu[port]) + master->max_mtu[port] = mtu; + if (mtu < prev_mtu && prev_mtu == + master->max_mtu[port]) { + slave_st->mtu[port] = mtu; + master->max_mtu[port] = mtu; + for (i = 0; i < dev->num_slaves; i++) { + master->max_mtu[port] = + max(master->max_mtu[port], + master->slave_state[i].mtu[port]); + } + } + + gen_context->mtu = cpu_to_be16(master->max_mtu[port]); + break; + case MLX4_SET_PORT_GID_TABLE: + /* change to MULTIPLE entries: number of guest's gids + * need a FOR-loop here over number of gids the guest has. + * 1. Check no duplicates in gids passed by slave + */ + num_gids = mlx4_get_slave_num_gids(dev, slave, port); + base = mlx4_get_base_gid_ix(dev, slave, port); + gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); + for (i = 0; i < num_gids; gid_entry_mbox++, i++) { + if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw, + sizeof(zgid_entry))) + continue; + gid_entry_mb1 = gid_entry_mbox + 1; + for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) { + if (!memcmp(gid_entry_mb1->raw, + zgid_entry.raw, sizeof(zgid_entry))) + continue; + if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw, + sizeof(gid_entry_mbox->raw))) { + /* found duplicate */ + return -EINVAL; + } + } + } + + /* 2. Check that do not have duplicates in OTHER + * entries in the port GID table + */ + + mutex_lock(&(priv->port[port].gid_table.mutex)); + for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { + if (i >= base && i < base + num_gids) + continue; /* don't compare to slave's current gids */ + gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i]; + if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry))) + continue; + gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); + for (j = 0; j < num_gids; gid_entry_mbox++, j++) { + if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw, + sizeof(zgid_entry))) + continue; + if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw, + sizeof(gid_entry_tbl->raw))) { + /* found duplicate */ + mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n", + slave, i); + mutex_unlock(&(priv->port[port].gid_table.mutex)); + return -EINVAL; + } + } + } + + /* insert slave GIDs with memcpy, starting at slave's base index */ + gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); + for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++) + memcpy(priv->port[port].gid_table.roce_gids[offset].raw, + gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE); + + /* Now, copy roce port gids table to current mailbox for passing to FW */ + gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); + for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++) + memcpy(gid_entry_mbox->raw, + priv->port[port].gid_table.roce_gids[i].raw, + MLX4_ROCE_GID_ENTRY_SIZE); + + err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + mutex_unlock(&(priv->port[port].gid_table.mutex)); + return err; + } + + return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + } + + /* Slaves are not allowed to SET_PORT beacon (LED) blink */ + if (op_mod == MLX4_SET_PORT_BEACON_OPCODE) { + mlx4_warn(dev, "denying SET_PORT Beacon slave:%d\n", slave); + return -EPERM; + } + + /* For IB, we only consider: + * - The capability mask, which is set to the aggregate of all + * slave function capabilities + * - The QKey violatin counter - reset according to each request. + */ + + if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { + reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40; + new_cap_mask = ((__be32 *) inbox->buf)[2]; + } else { + reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1; + new_cap_mask = ((__be32 *) inbox->buf)[1]; + } + + /* slave may not set the IS_SM capability for the port */ + if (slave != mlx4_master_func_num(dev) && + (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM)) + return -EINVAL; + + /* No DEV_MGMT in multifunc mode */ + if (mlx4_is_mfunc(dev) && + (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP)) + return -EINVAL; + + agg_cap_mask = 0; + slave_cap_mask = + priv->mfunc.master.slave_state[slave].ib_cap_mask[port]; + priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask; + for (i = 0; i < dev->num_slaves; i++) + agg_cap_mask |= + priv->mfunc.master.slave_state[i].ib_cap_mask[port]; + + /* only clear mailbox for guests. Master may be setting + * MTU or PKEY table size + */ + if (slave != dev->caps.function) + memset(inbox->buf, 0, 256); + if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { + *(u8 *) inbox->buf |= !!reset_qkey_viols << 6; + ((__be32 *) inbox->buf)[2] = agg_cap_mask; + } else { + ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols; + ((__be32 *) inbox->buf)[1] = agg_cap_mask; + } + + err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + if (err) + priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = + slave_cap_mask; + return err; +} + +int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int port = mlx4_slave_convert_port( + dev, slave, vhcr->in_modifier & 0xFF); + + if (port < 0) + return -EINVAL; + + vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) | + (port & 0xFF); + + return mlx4_common_set_port(dev, slave, vhcr->in_modifier, + vhcr->op_modifier, inbox); +} + +/* bit locations for set port command with zero op modifier */ +enum { + MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */ + MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */ + MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20, + MLX4_CHANGE_PORT_VL_CAP = 21, + MLX4_CHANGE_PORT_MTU_CAP = 22, +}; + +int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz) +{ + struct mlx4_cmd_mailbox *mailbox; + int err, vl_cap, pkey_tbl_flag = 0; + + if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) + return 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; + + if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) { + pkey_tbl_flag = 1; + ((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz); + } + + /* IB VL CAP enum isn't used by the firmware, just numerical values */ + for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) { + ((__be32 *) mailbox->buf)[0] = cpu_to_be32( + (1 << MLX4_CHANGE_PORT_MTU_CAP) | + (1 << MLX4_CHANGE_PORT_VL_CAP) | + (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) | + (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) | + (vl_cap << MLX4_SET_PORT_VL_CAP)); + err = mlx4_cmd(dev, mailbox->dma, port, + MLX4_SET_PORT_IB_OPCODE, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + if (err != -ENOMEM) + break; + } + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, + u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_general_context *context; + int err; + u32 in_mod; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + context->flags = SET_PORT_GEN_ALL_VALID; + context->mtu = cpu_to_be16(mtu); + context->pptx = (pptx * (!pfctx)) << 7; + context->pfctx = pfctx; + context->pprx = (pprx * (!pfcrx)) << 7; + context->pfcrx = pfcrx; + + in_mod = MLX4_SET_PORT_GENERAL << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_general); + +int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, + u8 promisc) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_rqp_calc_context *context; + int err; + u32 in_mod; + u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ? + MCAST_DIRECT : MCAST_DEFAULT; + + if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) + return 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + context->base_qpn = cpu_to_be32(base_qpn); + context->n_mac = dev->caps.log_num_macs; + context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | + base_qpn); + context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT | + base_qpn); + context->intra_no_vlan = 0; + context->no_vlan = MLX4_NO_VLAN_IDX; + context->intra_vlan_miss = 0; + context->vlan_miss = MLX4_VLAN_MISS_IDX; + + in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc); + +int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_general_context *context; + u32 in_mod; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + context->v_ignore_fcs |= MLX4_FLAG_V_IGNORE_FCS_MASK; + if (ignore_fcs_value) + context->ignore_fcs |= MLX4_IGNORE_FCS_MASK; + else + context->ignore_fcs &= ~MLX4_IGNORE_FCS_MASK; + + in_mod = MLX4_SET_PORT_GENERAL << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_fcs_check); + +enum { + VXLAN_ENABLE_MODIFY = 1 << 7, + VXLAN_STEERING_MODIFY = 1 << 6, + + VXLAN_ENABLE = 1 << 7, +}; + +struct mlx4_set_port_vxlan_context { + u32 reserved1; + u8 modify_flags; + u8 reserved2; + u8 enable_flags; + u8 steering; +}; + +int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable) +{ + int err; + u32 in_mod; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_vxlan_context *context; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + memset(context, 0, sizeof(*context)); + + context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY; + if (enable) + context->enable_flags = VXLAN_ENABLE; + context->steering = steering; + + in_mod = MLX4_SET_PORT_VXLAN << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN); + +int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time) +{ + int err; + struct mlx4_cmd_mailbox *mailbox; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + *((__be32 *)mailbox->buf) = cpu_to_be32(time); + + err = mlx4_cmd(dev, mailbox->dma, port, MLX4_SET_PORT_BEACON_OPCODE, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_BEACON); + +int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err = 0; + + return err; +} + +int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, + u64 mac, u64 clear, u8 mode) +{ + return mlx4_cmd(dev, (mac | (clear << 63)), port, mode, + MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); +} +EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR); + +int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err = 0; + + return err; +} + +int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, + u32 in_mod, struct mlx4_cmd_mailbox *outbox) +{ + return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0, + MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); +} + +int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + if (slave != dev->caps.function) + return 0; + return mlx4_common_dump_eth_stats(dev, slave, + vhcr->in_modifier, outbox); +} + +int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, + int *slave_id) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i, found_ix = -1; + int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; + struct mlx4_slaves_pport slaves_pport; + unsigned num_vfs; + int slave_gid; + + if (!mlx4_is_mfunc(dev)) + return -EINVAL; + + slaves_pport = mlx4_phys_to_slaves_pport(dev, port); + num_vfs = bitmap_weight(slaves_pport.slaves, + dev->persist->num_vfs + 1) - 1; + + for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { + if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid, + MLX4_ROCE_GID_ENTRY_SIZE)) { + found_ix = i; + break; + } + } + + if (found_ix >= 0) { + /* Calculate a slave_gid which is the slave number in the gid + * table and not a globally unique slave number. + */ + if (found_ix < MLX4_ROCE_PF_GIDS) + slave_gid = 0; + else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) * + (vf_gids / num_vfs + 1)) + slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) / + (vf_gids / num_vfs + 1)) + 1; + else + slave_gid = + ((found_ix - MLX4_ROCE_PF_GIDS - + ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) / + (vf_gids / num_vfs)) + vf_gids % num_vfs + 1; + + /* Calculate the globally unique slave id */ + if (slave_gid) { + struct mlx4_active_ports exclusive_ports; + struct mlx4_active_ports actv_ports; + struct mlx4_slaves_pport slaves_pport_actv; + unsigned max_port_p_one; + int num_vfs_before = 0; + int candidate_slave_gid; + + /* Calculate how many VFs are on the previous port, if exists */ + for (i = 1; i < port; i++) { + bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); + set_bit(i - 1, exclusive_ports.ports); + slaves_pport_actv = + mlx4_phys_to_slaves_pport_actv( + dev, &exclusive_ports); + num_vfs_before += bitmap_weight( + slaves_pport_actv.slaves, + dev->persist->num_vfs + 1); + } + + /* candidate_slave_gid isn't necessarily the correct slave, but + * it has the same number of ports and is assigned to the same + * ports as the real slave we're looking for. On dual port VF, + * slave_gid = [single port VFs on port ] + + * [offset of the current slave from the first dual port VF] + + * 1 (for the PF). + */ + candidate_slave_gid = slave_gid + num_vfs_before; + + actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid); + max_port_p_one = find_first_bit( + actv_ports.ports, dev->caps.num_ports) + + bitmap_weight(actv_ports.ports, + dev->caps.num_ports) + 1; + + /* Calculate the real slave number */ + for (i = 1; i < max_port_p_one; i++) { + if (i == port) + continue; + bitmap_zero(exclusive_ports.ports, + dev->caps.num_ports); + set_bit(i - 1, exclusive_ports.ports); + slaves_pport_actv = + mlx4_phys_to_slaves_pport_actv( + dev, &exclusive_ports); + slave_gid += bitmap_weight( + slaves_pport_actv.slaves, + dev->persist->num_vfs + 1); + } + } + *slave_id = slave_gid; + } + + return (found_ix >= 0) ? 0 : -EINVAL; +} +EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid); + +int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id, + u8 *gid) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (!mlx4_is_master(dev)) + return -EINVAL; + + memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw, + MLX4_ROCE_GID_ENTRY_SIZE); + return 0; +} +EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave); + +/* Cable Module Info */ +#define MODULE_INFO_MAX_READ 48 + +#define I2C_ADDR_LOW 0x50 +#define I2C_ADDR_HIGH 0x51 +#define I2C_PAGE_SIZE 256 + +/* Module Info Data */ +struct mlx4_cable_info { + u8 i2c_addr; + u8 page_num; + __be16 dev_mem_address; + __be16 reserved1; + __be16 size; + __be32 reserved2[2]; + u8 data[MODULE_INFO_MAX_READ]; +}; + +enum cable_info_err { + CABLE_INF_INV_PORT = 0x1, + CABLE_INF_OP_NOSUP = 0x2, + CABLE_INF_NOT_CONN = 0x3, + CABLE_INF_NO_EEPRM = 0x4, + CABLE_INF_PAGE_ERR = 0x5, + CABLE_INF_INV_ADDR = 0x6, + CABLE_INF_I2C_ADDR = 0x7, + CABLE_INF_QSFP_VIO = 0x8, + CABLE_INF_I2C_BUSY = 0x9, +}; + +#define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF) + +static inline const char *cable_info_mad_err_str(u16 mad_status) +{ + u8 err = MAD_STATUS_2_CABLE_ERR(mad_status); + + switch (err) { + case CABLE_INF_INV_PORT: + return "invalid port selected"; + case CABLE_INF_OP_NOSUP: + return "operation not supported for this port (the port is of type CX4 or internal)"; + case CABLE_INF_NOT_CONN: + return "cable is not connected"; + case CABLE_INF_NO_EEPRM: + return "the connected cable has no EPROM (passive copper cable)"; + case CABLE_INF_PAGE_ERR: + return "page number is greater than 15"; + case CABLE_INF_INV_ADDR: + return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)"; + case CABLE_INF_I2C_ADDR: + return "invalid I2C slave address"; + case CABLE_INF_QSFP_VIO: + return "at least one cable violates the QSFP specification and ignores the modsel signal"; + case CABLE_INF_I2C_BUSY: + return "I2C bus is constantly busy"; + } + return "Unknown Error"; +} + +/** + * mlx4_get_module_info - Read cable module eeprom data + * @dev: mlx4_dev. + * @port: port number. + * @offset: byte offset in eeprom to start reading data from. + * @size: num of bytes to read. + * @data: output buffer to put the requested data into. + * + * Reads cable module eeprom data, puts the outcome data into + * data pointer paramer. + * Returns num of read bytes on success or a negative error + * code. + */ +int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, + u16 offset, u16 size, u8 *data) +{ + struct mlx4_cmd_mailbox *inbox, *outbox; + struct mlx4_mad_ifc *inmad, *outmad; + struct mlx4_cable_info *cable_info; + u16 i2c_addr; + int ret; + + if (size > MODULE_INFO_MAX_READ) + size = MODULE_INFO_MAX_READ; + + inbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(inbox)) + return PTR_ERR(inbox); + + outbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(outbox)) { + mlx4_free_cmd_mailbox(dev, inbox); + return PTR_ERR(outbox); + } + + inmad = (struct mlx4_mad_ifc *)(inbox->buf); + outmad = (struct mlx4_mad_ifc *)(outbox->buf); + + inmad->method = 0x1; /* Get */ + inmad->class_version = 0x1; + inmad->mgmt_class = 0x1; + inmad->base_version = 0x1; + inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */ + + if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE) + /* Cross pages reads are not allowed + * read until offset 256 in low page + */ + size -= offset + size - I2C_PAGE_SIZE; + + i2c_addr = I2C_ADDR_LOW; + if (offset >= I2C_PAGE_SIZE) { + /* Reset offset to high page */ + i2c_addr = I2C_ADDR_HIGH; + offset -= I2C_PAGE_SIZE; + } + + cable_info = (struct mlx4_cable_info *)inmad->data; + cable_info->dev_mem_address = cpu_to_be16(offset); + cable_info->page_num = 0; + cable_info->i2c_addr = i2c_addr; + cable_info->size = cpu_to_be16(size); + + ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3, + MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (ret) + goto out; + + if (be16_to_cpu(outmad->status)) { + /* Mad returned with bad status */ + ret = be16_to_cpu(outmad->status); + mlx4_warn(dev, + "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n", + 0xFF60, port, i2c_addr, offset, size, + ret, cable_info_mad_err_str(ret)); + + if (i2c_addr == I2C_ADDR_HIGH && + MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR) + /* Some SFP cables do not support i2c slave + * address 0x51 (high page), abort silently. + */ + ret = 0; + else + ret = -ret; + goto out; + } + cable_info = (struct mlx4_cable_info *)outmad->data; + memcpy(data, cable_info->data, size); + ret = size; +out: + mlx4_free_cmd_mailbox(dev, inbox); + mlx4_free_cmd_mailbox(dev, outbox); + return ret; +} +EXPORT_SYMBOL(mlx4_get_module_info); diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c new file mode 100644 index 000000000..2bf437aaf --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/profile.c @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include + +#include "mlx4.h" +#include "fw.h" + +enum { + MLX4_RES_QP, + MLX4_RES_RDMARC, + MLX4_RES_ALTC, + MLX4_RES_AUXC, + MLX4_RES_SRQ, + MLX4_RES_CQ, + MLX4_RES_EQ, + MLX4_RES_DMPT, + MLX4_RES_CMPT, + MLX4_RES_MTT, + MLX4_RES_MCG, + MLX4_RES_NUM +}; + +static const char *res_name[] = { + [MLX4_RES_QP] = "QP", + [MLX4_RES_RDMARC] = "RDMARC", + [MLX4_RES_ALTC] = "ALTC", + [MLX4_RES_AUXC] = "AUXC", + [MLX4_RES_SRQ] = "SRQ", + [MLX4_RES_CQ] = "CQ", + [MLX4_RES_EQ] = "EQ", + [MLX4_RES_DMPT] = "DMPT", + [MLX4_RES_CMPT] = "CMPT", + [MLX4_RES_MTT] = "MTT", + [MLX4_RES_MCG] = "MCG", +}; + +u64 mlx4_make_profile(struct mlx4_dev *dev, + struct mlx4_profile *request, + struct mlx4_dev_cap *dev_cap, + struct mlx4_init_hca_param *init_hca) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource { + u64 size; + u64 start; + int type; + u32 num; + int log_num; + }; + + u64 total_size = 0; + struct mlx4_resource *profile; + struct mlx4_resource tmp; + struct sysinfo si; + int i, j; + + profile = kcalloc(MLX4_RES_NUM, sizeof(*profile), GFP_KERNEL); + if (!profile) + return -ENOMEM; + + /* + * We want to scale the number of MTTs with the size of the + * system memory, since it makes sense to register a lot of + * memory on a system with a lot of memory. As a heuristic, + * make sure we have enough MTTs to cover twice the system + * memory (with PAGE_SIZE entries). + * + * This number has to be a power of two and fit into 32 bits + * due to device limitations, so cap this at 2^31 as well. + * That limits us to 8TB of memory registration per HCA with + * 4KB pages, which is probably OK for the next few months. + */ + si_meminfo(&si); + request->num_mtt = + roundup_pow_of_two(max_t(unsigned, request->num_mtt, + min(1UL << (31 - log_mtts_per_seg), + si.totalram >> (log_mtts_per_seg - 1)))); + + profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz; + profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz; + profile[MLX4_RES_ALTC].size = dev_cap->altc_entry_sz; + profile[MLX4_RES_AUXC].size = dev_cap->aux_entry_sz; + profile[MLX4_RES_SRQ].size = dev_cap->srq_entry_sz; + profile[MLX4_RES_CQ].size = dev_cap->cqc_entry_sz; + profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz; + profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz; + profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz; + profile[MLX4_RES_MTT].size = dev_cap->mtt_entry_sz; + profile[MLX4_RES_MCG].size = mlx4_get_mgm_entry_size(dev); + + profile[MLX4_RES_QP].num = request->num_qp; + profile[MLX4_RES_RDMARC].num = request->num_qp * request->rdmarc_per_qp; + profile[MLX4_RES_ALTC].num = request->num_qp; + profile[MLX4_RES_AUXC].num = request->num_qp; + profile[MLX4_RES_SRQ].num = request->num_srq; + profile[MLX4_RES_CQ].num = request->num_cq; + profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ? dev->phys_caps.num_phys_eqs : + min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); + profile[MLX4_RES_DMPT].num = request->num_mpt; + profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; + profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg); + profile[MLX4_RES_MCG].num = request->num_mcg; + + for (i = 0; i < MLX4_RES_NUM; ++i) { + profile[i].type = i; + profile[i].num = roundup_pow_of_two(profile[i].num); + profile[i].log_num = ilog2(profile[i].num); + profile[i].size *= profile[i].num; + profile[i].size = max(profile[i].size, (u64) PAGE_SIZE); + } + + /* + * Sort the resources in decreasing order of size. Since they + * all have sizes that are powers of 2, we'll be able to keep + * resources aligned to their size and pack them without gaps + * using the sorted order. + */ + for (i = MLX4_RES_NUM; i > 0; --i) + for (j = 1; j < i; ++j) { + if (profile[j].size > profile[j - 1].size) { + tmp = profile[j]; + profile[j] = profile[j - 1]; + profile[j - 1] = tmp; + } + } + + for (i = 0; i < MLX4_RES_NUM; ++i) { + if (profile[i].size) { + profile[i].start = total_size; + total_size += profile[i].size; + } + + if (total_size > dev_cap->max_icm_sz) { + mlx4_err(dev, "Profile requires 0x%llx bytes; won't fit in 0x%llx bytes of context memory\n", + (unsigned long long) total_size, + (unsigned long long) dev_cap->max_icm_sz); + kfree(profile); + return -ENOMEM; + } + + if (profile[i].size) + mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, size 0x%10llx\n", + i, res_name[profile[i].type], + profile[i].log_num, + (unsigned long long) profile[i].start, + (unsigned long long) profile[i].size); + } + + mlx4_dbg(dev, "HCA context memory: reserving %d KB\n", + (int) (total_size >> 10)); + + for (i = 0; i < MLX4_RES_NUM; ++i) { + switch (profile[i].type) { + case MLX4_RES_QP: + dev->caps.num_qps = profile[i].num; + init_hca->qpc_base = profile[i].start; + init_hca->log_num_qps = profile[i].log_num; + break; + case MLX4_RES_RDMARC: + for (priv->qp_table.rdmarc_shift = 0; + request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num; + ++priv->qp_table.rdmarc_shift) + ; /* nothing */ + dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift; + priv->qp_table.rdmarc_base = (u32) profile[i].start; + init_hca->rdmarc_base = profile[i].start; + init_hca->log_rd_per_qp = priv->qp_table.rdmarc_shift; + break; + case MLX4_RES_ALTC: + init_hca->altc_base = profile[i].start; + break; + case MLX4_RES_AUXC: + init_hca->auxc_base = profile[i].start; + break; + case MLX4_RES_SRQ: + dev->caps.num_srqs = profile[i].num; + init_hca->srqc_base = profile[i].start; + init_hca->log_num_srqs = profile[i].log_num; + break; + case MLX4_RES_CQ: + dev->caps.num_cqs = profile[i].num; + init_hca->cqc_base = profile[i].start; + init_hca->log_num_cqs = profile[i].log_num; + break; + case MLX4_RES_EQ: + if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { + init_hca->log_num_eqs = 0x1f; + init_hca->eqc_base = profile[i].start; + init_hca->num_sys_eqs = dev_cap->num_sys_eqs; + } else { + dev->caps.num_eqs = roundup_pow_of_two( + min_t(unsigned, + dev_cap->max_eqs, + MAX_MSIX)); + init_hca->eqc_base = profile[i].start; + init_hca->log_num_eqs = ilog2(dev->caps.num_eqs); + } + break; + case MLX4_RES_DMPT: + dev->caps.num_mpts = profile[i].num; + priv->mr_table.mpt_base = profile[i].start; + init_hca->dmpt_base = profile[i].start; + init_hca->log_mpt_sz = profile[i].log_num; + break; + case MLX4_RES_CMPT: + init_hca->cmpt_base = profile[i].start; + break; + case MLX4_RES_MTT: + dev->caps.num_mtts = profile[i].num; + priv->mr_table.mtt_base = profile[i].start; + init_hca->mtt_base = profile[i].start; + break; + case MLX4_RES_MCG: + init_hca->mc_base = profile[i].start; + init_hca->log_mc_entry_sz = + ilog2(mlx4_get_mgm_entry_size(dev)); + init_hca->log_mc_table_sz = profile[i].log_num; + if (dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + dev->caps.num_mgms = profile[i].num; + } else { + init_hca->log_mc_hash_sz = + profile[i].log_num - 1; + dev->caps.num_mgms = profile[i].num >> 1; + dev->caps.num_amgms = profile[i].num >> 1; + } + break; + default: + break; + } + } + + /* + * PDs don't take any HCA memory, but we assign them as part + * of the HCA profile anyway. + */ + dev->caps.num_pds = MLX4_NUM_PDS; + + kfree(profile); + return total_size; +} diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c new file mode 100644 index 000000000..b75214a80 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -0,0 +1,909 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include + +#include +#include + +#include "mlx4.h" +#include "icm.h" + +/* QP to support BF should have bits 6,7 cleared */ +#define MLX4_BF_QP_SKIP_MASK 0xc0 +#define MLX4_MAX_BF_QP_RANGE 0x40 + +void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) +{ + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + struct mlx4_qp *qp; + + spin_lock(&qp_table->lock); + + qp = __mlx4_qp_lookup(dev, qpn); + if (qp) + atomic_inc(&qp->refcount); + + spin_unlock(&qp_table->lock); + + if (!qp) { + mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn); + return; + } + + qp->event(qp, event_type); + + if (atomic_dec_and_test(&qp->refcount)) + complete(&qp->free); +} + +/* used for INIT/CLOSE port logic */ +static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0) +{ + /* this procedure is called after we already know we are on the master */ + /* qp0 is either the proxy qp0, or the real qp0 */ + u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev); + *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1; + + *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn && + qp->qpn <= dev->phys_caps.base_sqpn + 1; + + return *real_qp0 || *proxy_qp0; +} + +static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, + struct mlx4_qp_context *context, + enum mlx4_qp_optpar optpar, + int sqd_event, struct mlx4_qp *qp, int native) +{ + static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = { + [MLX4_QP_STATE_RST] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + [MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP, + }, + [MLX4_QP_STATE_INIT] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + [MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP, + [MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP, + }, + [MLX4_QP_STATE_RTR] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + [MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP, + }, + [MLX4_QP_STATE_RTS] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + [MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP, + [MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP, + }, + [MLX4_QP_STATE_SQD] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + [MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP, + [MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP, + }, + [MLX4_QP_STATE_SQER] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + [MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP, + }, + [MLX4_QP_STATE_ERR] = { + [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, + [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, + } + }; + + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_cmd_mailbox *mailbox; + int ret = 0; + int real_qp0 = 0; + int proxy_qp0 = 0; + u8 port; + + if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE || + !op[cur_state][new_state]) + return -EINVAL; + + if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) { + ret = mlx4_cmd(dev, 0, qp->qpn, 2, + MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native); + if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR && + cur_state != MLX4_QP_STATE_RST && + is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) { + port = (qp->qpn & 1) + 1; + if (proxy_qp0) + priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0; + else + priv->mfunc.master.qp0_state[port].qp0_active = 0; + } + return ret; + } + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) { + u64 mtt_addr = mlx4_mtt_addr(dev, mtt); + context->mtt_base_addr_h = mtt_addr >> 32; + context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); + context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; + } + + *(__be32 *) mailbox->buf = cpu_to_be32(optpar); + memcpy(mailbox->buf + 8, context, sizeof *context); + + ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn = + cpu_to_be32(qp->qpn); + + ret = mlx4_cmd(dev, mailbox->dma, + qp->qpn | (!!sqd_event << 31), + new_state == MLX4_QP_STATE_RST ? 2 : 0, + op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native); + + if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) { + port = (qp->qpn & 1) + 1; + if (cur_state != MLX4_QP_STATE_ERR && + cur_state != MLX4_QP_STATE_RST && + new_state == MLX4_QP_STATE_ERR) { + if (proxy_qp0) + priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0; + else + priv->mfunc.master.qp0_state[port].qp0_active = 0; + } else if (new_state == MLX4_QP_STATE_RTR) { + if (proxy_qp0) + priv->mfunc.master.qp0_state[port].proxy_qp0_active = 1; + else + priv->mfunc.master.qp0_state[port].qp0_active = 1; + } + } + + mlx4_free_cmd_mailbox(dev, mailbox); + return ret; +} + +int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, + struct mlx4_qp_context *context, + enum mlx4_qp_optpar optpar, + int sqd_event, struct mlx4_qp *qp) +{ + return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context, + optpar, sqd_event, qp, 0); +} +EXPORT_SYMBOL_GPL(mlx4_qp_modify); + +int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, + int *base, u8 flags) +{ + u32 uid; + int bf_qp = !!(flags & (u8)MLX4_RESERVE_ETH_BF_QP); + + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_qp_table *qp_table = &priv->qp_table; + + if (cnt > MLX4_MAX_BF_QP_RANGE && bf_qp) + return -ENOMEM; + + uid = MLX4_QP_TABLE_ZONE_GENERAL; + if (flags & (u8)MLX4_RESERVE_A0_QP) { + if (bf_qp) + uid = MLX4_QP_TABLE_ZONE_RAW_ETH; + else + uid = MLX4_QP_TABLE_ZONE_RSS; + } + + *base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align, + bf_qp ? MLX4_BF_QP_SKIP_MASK : 0, NULL); + if (*base == -1) + return -ENOMEM; + + return 0; +} + +int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, + int *base, u8 flags) +{ + u64 in_param = 0; + u64 out_param; + int err; + + /* Turn off all unsupported QP allocation flags */ + flags &= dev->caps.alloc_res_qp_mask; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, (((u32)flags) << 24) | (u32)cnt); + set_param_h(&in_param, align); + err = mlx4_cmd_imm(dev, in_param, &out_param, + RES_QP, RES_OP_RESERVE, + MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) + return err; + + *base = get_param_l(&out_param); + return 0; + } + return __mlx4_qp_reserve_range(dev, cnt, align, base, flags); +} +EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range); + +void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_qp_table *qp_table = &priv->qp_table; + + if (mlx4_is_qp_reserved(dev, (u32) base_qpn)) + return; + mlx4_zone_free_entries_unique(qp_table->zones, base_qpn, cnt); +} + +void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) +{ + u64 in_param = 0; + int err; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, base_qpn); + set_param_h(&in_param, cnt); + err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE, + MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (err) { + mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n", + base_qpn, cnt); + } + } else + __mlx4_qp_release_range(dev, base_qpn, cnt); +} +EXPORT_SYMBOL_GPL(mlx4_qp_release_range); + +int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_qp_table *qp_table = &priv->qp_table; + int err; + + err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp); + if (err) + goto err_out; + + err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp); + if (err) + goto err_put_qp; + + err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp); + if (err) + goto err_put_auxc; + + err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp); + if (err) + goto err_put_altc; + + err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp); + if (err) + goto err_put_rdmarc; + + return 0; + +err_put_rdmarc: + mlx4_table_put(dev, &qp_table->rdmarc_table, qpn); + +err_put_altc: + mlx4_table_put(dev, &qp_table->altc_table, qpn); + +err_put_auxc: + mlx4_table_put(dev, &qp_table->auxc_table, qpn); + +err_put_qp: + mlx4_table_put(dev, &qp_table->qp_table, qpn); + +err_out: + return err; +} + +static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp) +{ + u64 param = 0; + + if (mlx4_is_mfunc(dev)) { + set_param_l(¶m, qpn); + return mlx4_cmd_imm(dev, param, ¶m, RES_QP, RES_OP_MAP_ICM, + MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + } + return __mlx4_qp_alloc_icm(dev, qpn, gfp); +} + +void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_qp_table *qp_table = &priv->qp_table; + + mlx4_table_put(dev, &qp_table->cmpt_table, qpn); + mlx4_table_put(dev, &qp_table->rdmarc_table, qpn); + mlx4_table_put(dev, &qp_table->altc_table, qpn); + mlx4_table_put(dev, &qp_table->auxc_table, qpn); + mlx4_table_put(dev, &qp_table->qp_table, qpn); +} + +static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) +{ + u64 in_param = 0; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, qpn); + if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM, + MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED)) + mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn); + } else + __mlx4_qp_free_icm(dev, qpn); +} + +int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_qp_table *qp_table = &priv->qp_table; + int err; + + if (!qpn) + return -EINVAL; + + qp->qpn = qpn; + + err = mlx4_qp_alloc_icm(dev, qpn, gfp); + if (err) + return err; + + spin_lock_irq(&qp_table->lock); + err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & + (dev->caps.num_qps - 1), qp); + spin_unlock_irq(&qp_table->lock); + if (err) + goto err_icm; + + atomic_set(&qp->refcount, 1); + init_completion(&qp->free); + + return 0; + +err_icm: + mlx4_qp_free_icm(dev, qpn); + return err; +} + +EXPORT_SYMBOL_GPL(mlx4_qp_alloc); + +int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, + enum mlx4_update_qp_attr attr, + struct mlx4_update_qp_params *params) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_update_qp_context *cmd; + u64 pri_addr_path_mask = 0; + u64 qp_mask = 0; + int err = 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + cmd = (struct mlx4_update_qp_context *)mailbox->buf; + + if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS)) + return -EINVAL; + + if (attr & MLX4_UPDATE_QP_SMAC) { + pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX; + cmd->qp_context.pri_path.grh_mylmc = params->smac_index; + } + + if (attr & MLX4_UPDATE_QP_VSD) { + qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD; + if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE) + cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN); + } + + if (attr & MLX4_UPDATE_QP_RATE_LIMIT) { + qp_mask |= 1ULL << MLX4_UPD_QP_MASK_RATE_LIMIT; + cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val); + } + + if (attr & MLX4_UPDATE_QP_QOS_VPORT) { + qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP; + cmd->qp_context.qos_vport = params->qos_vport; + } + + cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); + cmd->qp_mask = cpu_to_be64(qp_mask); + + err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0, + MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_update_qp); + +void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) +{ + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + unsigned long flags; + + spin_lock_irqsave(&qp_table->lock, flags); + radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1)); + spin_unlock_irqrestore(&qp_table->lock, flags); +} +EXPORT_SYMBOL_GPL(mlx4_qp_remove); + +void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) +{ + if (atomic_dec_and_test(&qp->refcount)) + complete(&qp->free); + wait_for_completion(&qp->free); + + mlx4_qp_free_icm(dev, qp->qpn); +} +EXPORT_SYMBOL_GPL(mlx4_qp_free); + +static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn) +{ + return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); +} + +#define MLX4_QP_TABLE_RSS_ETH_PRIORITY 2 +#define MLX4_QP_TABLE_RAW_ETH_PRIORITY 1 +#define MLX4_QP_TABLE_RAW_ETH_SIZE 256 + +static int mlx4_create_zones(struct mlx4_dev *dev, + u32 reserved_bottom_general, + u32 reserved_top_general, + u32 reserved_bottom_rss, + u32 start_offset_rss, + u32 max_table_offset) +{ + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + struct mlx4_bitmap (*bitmap)[MLX4_QP_TABLE_ZONE_NUM] = NULL; + int bitmap_initialized = 0; + u32 last_offset; + int k; + int err; + + qp_table->zones = mlx4_zone_allocator_create(MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP); + + if (NULL == qp_table->zones) + return -ENOMEM; + + bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL); + + if (NULL == bitmap) { + err = -ENOMEM; + goto free_zone; + } + + err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_GENERAL, dev->caps.num_qps, + (1 << 23) - 1, reserved_bottom_general, + reserved_top_general); + + if (err) + goto free_bitmap; + + ++bitmap_initialized; + + err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_GENERAL, + MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO | + MLX4_ZONE_USE_RR, 0, + 0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_GENERAL); + + if (err) + goto free_bitmap; + + err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_RSS, + reserved_bottom_rss, + reserved_bottom_rss - 1, + dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], + reserved_bottom_rss - start_offset_rss); + + if (err) + goto free_bitmap; + + ++bitmap_initialized; + + err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_RSS, + MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO | + MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO | + MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RSS_ETH_PRIORITY, + 0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_RSS); + + if (err) + goto free_bitmap; + + last_offset = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; + /* We have a single zone for the A0 steering QPs area of the FW. This area + * needs to be split into subareas. One set of subareas is for RSS QPs + * (in which qp number bits 6 and/or 7 are set); the other set of subareas + * is for RAW_ETH QPs, which require that both bits 6 and 7 are zero. + * Currently, the values returned by the FW (A0 steering area starting qp number + * and A0 steering area size) are such that there are only two subareas -- one + * for RSS and one for RAW_ETH. + */ + for (k = MLX4_QP_TABLE_ZONE_RSS + 1; k < sizeof(*bitmap)/sizeof((*bitmap)[0]); + k++) { + int size; + u32 offset = start_offset_rss; + u32 bf_mask; + u32 requested_size; + + /* Assuming MLX4_BF_QP_SKIP_MASK is consecutive ones, this calculates + * a mask of all LSB bits set until (and not including) the first + * set bit of MLX4_BF_QP_SKIP_MASK. For example, if MLX4_BF_QP_SKIP_MASK + * is 0xc0, bf_mask will be 0x3f. + */ + bf_mask = (MLX4_BF_QP_SKIP_MASK & ~(MLX4_BF_QP_SKIP_MASK - 1)) - 1; + requested_size = min((u32)MLX4_QP_TABLE_RAW_ETH_SIZE, bf_mask + 1); + + if (((last_offset & MLX4_BF_QP_SKIP_MASK) && + ((int)(max_table_offset - last_offset)) >= + roundup_pow_of_two(MLX4_BF_QP_SKIP_MASK)) || + (!(last_offset & MLX4_BF_QP_SKIP_MASK) && + !((last_offset + requested_size - 1) & + MLX4_BF_QP_SKIP_MASK))) + size = requested_size; + else { + u32 candidate_offset = + (last_offset | MLX4_BF_QP_SKIP_MASK | bf_mask) + 1; + + if (last_offset & MLX4_BF_QP_SKIP_MASK) + last_offset = candidate_offset; + + /* From this point, the BF bits are 0 */ + + if (last_offset > max_table_offset) { + /* need to skip */ + size = -1; + } else { + size = min3(max_table_offset - last_offset, + bf_mask - (last_offset & bf_mask), + requested_size); + if (size < requested_size) { + int candidate_size; + + candidate_size = min3( + max_table_offset - candidate_offset, + bf_mask - (last_offset & bf_mask), + requested_size); + + /* We will not take this path if last_offset was + * already set above to candidate_offset + */ + if (candidate_size > size) { + last_offset = candidate_offset; + size = candidate_size; + } + } + } + } + + if (size > 0) { + /* mlx4_bitmap_alloc_range will find a contiguous range of "size" + * QPs in which both bits 6 and 7 are zero, because we pass it the + * MLX4_BF_SKIP_MASK). + */ + offset = mlx4_bitmap_alloc_range( + *bitmap + MLX4_QP_TABLE_ZONE_RSS, + size, 1, + MLX4_BF_QP_SKIP_MASK); + + if (offset == (u32)-1) { + err = -ENOMEM; + break; + } + + last_offset = offset + size; + + err = mlx4_bitmap_init(*bitmap + k, roundup_pow_of_two(size), + roundup_pow_of_two(size) - 1, 0, + roundup_pow_of_two(size) - size); + } else { + /* Add an empty bitmap, we'll allocate from different zones (since + * at least one is reserved) + */ + err = mlx4_bitmap_init(*bitmap + k, 1, + MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0, + 0); + mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0); + } + + if (err) + break; + + ++bitmap_initialized; + + err = mlx4_zone_add_one(qp_table->zones, *bitmap + k, + MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO | + MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO | + MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RAW_ETH_PRIORITY, + offset, qp_table->zones_uids + k); + + if (err) + break; + } + + if (err) + goto free_bitmap; + + qp_table->bitmap_gen = *bitmap; + + return err; + +free_bitmap: + for (k = 0; k < bitmap_initialized; k++) + mlx4_bitmap_cleanup(*bitmap + k); + kfree(bitmap); +free_zone: + mlx4_zone_allocator_destroy(qp_table->zones); + return err; +} + +static void mlx4_cleanup_qp_zones(struct mlx4_dev *dev) +{ + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + + if (qp_table->zones) { + int i; + + for (i = 0; + i < sizeof(qp_table->zones_uids)/sizeof(qp_table->zones_uids[0]); + i++) { + struct mlx4_bitmap *bitmap = + mlx4_zone_get_bitmap(qp_table->zones, + qp_table->zones_uids[i]); + + mlx4_zone_remove_one(qp_table->zones, qp_table->zones_uids[i]); + if (NULL == bitmap) + continue; + + mlx4_bitmap_cleanup(bitmap); + } + mlx4_zone_allocator_destroy(qp_table->zones); + kfree(qp_table->bitmap_gen); + qp_table->bitmap_gen = NULL; + qp_table->zones = NULL; + } +} + +int mlx4_init_qp_table(struct mlx4_dev *dev) +{ + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + int err; + int reserved_from_top = 0; + int reserved_from_bot; + int k; + int fixed_reserved_from_bot_rv = 0; + int bottom_reserved_for_rss_bitmap; + u32 max_table_offset = dev->caps.dmfs_high_rate_qpn_base + + dev->caps.dmfs_high_rate_qpn_range; + + spin_lock_init(&qp_table->lock); + INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); + if (mlx4_is_slave(dev)) + return 0; + + /* We reserve 2 extra QPs per port for the special QPs. The + * block of special QPs must be aligned to a multiple of 8, so + * round up. + * + * We also reserve the MSB of the 24-bit QP number to indicate + * that a QP is an XRC QP. + */ + for (k = 0; k <= MLX4_QP_REGION_BOTTOM; k++) + fixed_reserved_from_bot_rv += dev->caps.reserved_qps_cnt[k]; + + if (fixed_reserved_from_bot_rv < max_table_offset) + fixed_reserved_from_bot_rv = max_table_offset; + + /* We reserve at least 1 extra for bitmaps that we don't have enough space for*/ + bottom_reserved_for_rss_bitmap = + roundup_pow_of_two(fixed_reserved_from_bot_rv + 1); + dev->phys_caps.base_sqpn = ALIGN(bottom_reserved_for_rss_bitmap, 8); + + { + int sort[MLX4_NUM_QP_REGION]; + int i, j, tmp; + int last_base = dev->caps.num_qps; + + for (i = 1; i < MLX4_NUM_QP_REGION; ++i) + sort[i] = i; + + for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) { + for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) { + if (dev->caps.reserved_qps_cnt[sort[j]] > + dev->caps.reserved_qps_cnt[sort[j - 1]]) { + tmp = sort[j]; + sort[j] = sort[j - 1]; + sort[j - 1] = tmp; + } + } + } + + for (i = MLX4_QP_REGION_BOTTOM + 1; i < MLX4_NUM_QP_REGION; ++i) { + last_base -= dev->caps.reserved_qps_cnt[sort[i]]; + dev->caps.reserved_qps_base[sort[i]] = last_base; + reserved_from_top += + dev->caps.reserved_qps_cnt[sort[i]]; + } + } + + /* Reserve 8 real SQPs in both native and SRIOV modes. + * In addition, in SRIOV mode, reserve 8 proxy SQPs per function + * (for all PFs and VFs), and 8 corresponding tunnel QPs. + * Each proxy SQP works opposite its own tunnel QP. + * + * The QPs are arranged as follows: + * a. 8 real SQPs + * b. All the proxy SQPs (8 per function) + * c. All the tunnel QPs (8 per function) + */ + reserved_from_bot = mlx4_num_reserved_sqps(dev); + if (reserved_from_bot + reserved_from_top > dev->caps.num_qps) { + mlx4_err(dev, "Number of reserved QPs is higher than number of QPs\n"); + return -EINVAL; + } + + err = mlx4_create_zones(dev, reserved_from_bot, reserved_from_bot, + bottom_reserved_for_rss_bitmap, + fixed_reserved_from_bot_rv, + max_table_offset); + + if (err) + return err; + + if (mlx4_is_mfunc(dev)) { + /* for PPF use */ + dev->phys_caps.base_proxy_sqpn = dev->phys_caps.base_sqpn + 8; + dev->phys_caps.base_tunnel_sqpn = dev->phys_caps.base_sqpn + 8 + 8 * MLX4_MFUNC_MAX; + + /* In mfunc, calculate proxy and tunnel qp offsets for the PF here, + * since the PF does not call mlx4_slave_caps */ + dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); + dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); + dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); + dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); + + if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || + !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) { + err = -ENOMEM; + goto err_mem; + } + + for (k = 0; k < dev->caps.num_ports; k++) { + dev->caps.qp0_proxy[k] = dev->phys_caps.base_proxy_sqpn + + 8 * mlx4_master_func_num(dev) + k; + dev->caps.qp0_tunnel[k] = dev->caps.qp0_proxy[k] + 8 * MLX4_MFUNC_MAX; + dev->caps.qp1_proxy[k] = dev->phys_caps.base_proxy_sqpn + + 8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k; + dev->caps.qp1_tunnel[k] = dev->caps.qp1_proxy[k] + 8 * MLX4_MFUNC_MAX; + } + } + + + err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn); + if (err) + goto err_mem; + + return err; + +err_mem: + kfree(dev->caps.qp0_tunnel); + kfree(dev->caps.qp0_proxy); + kfree(dev->caps.qp1_tunnel); + kfree(dev->caps.qp1_proxy); + dev->caps.qp0_tunnel = dev->caps.qp0_proxy = + dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL; + mlx4_cleanup_qp_zones(dev); + return err; +} + +void mlx4_cleanup_qp_table(struct mlx4_dev *dev) +{ + if (mlx4_is_slave(dev)) + return; + + mlx4_CONF_SPECIAL_QP(dev, 0); + + mlx4_cleanup_qp_zones(dev); +} + +int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, + struct mlx4_qp_context *context) +{ + struct mlx4_cmd_mailbox *mailbox; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0, + MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); + if (!err) + memcpy(context, mailbox->buf + 8, sizeof *context); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_qp_query); + +int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + struct mlx4_qp_context *context, + struct mlx4_qp *qp, enum mlx4_qp_state *qp_state) +{ + int err; + int i; + enum mlx4_qp_state states[] = { + MLX4_QP_STATE_RST, + MLX4_QP_STATE_INIT, + MLX4_QP_STATE_RTR, + MLX4_QP_STATE_RTS + }; + + for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { + context->flags &= cpu_to_be32(~(0xf << 28)); + context->flags |= cpu_to_be32(states[i + 1] << 28); + if (states[i + 1] != MLX4_QP_STATE_RTR) + context->params2 &= ~MLX4_QP_BIT_FPP; + err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], + context, 0, 0, qp); + if (err) { + mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n", + states[i + 1], err); + return err; + } + + *qp_state = states[i + 1]; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_qp_to_ready); diff --git a/drivers/net/ethernet/mellanox/mlx4/reset.c b/drivers/net/ethernet/mellanox/mlx4/reset.c new file mode 100644 index 000000000..0076d8858 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/reset.c @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include "mlx4.h" + +int mlx4_reset(struct mlx4_dev *dev) +{ + void __iomem *reset; + u32 *hca_header = NULL; + int pcie_cap; + u16 devctl; + u16 linkctl; + u16 vendor; + unsigned long end; + u32 sem; + int i; + int err = 0; + +#define MLX4_RESET_BASE 0xf0000 +#define MLX4_RESET_SIZE 0x400 +#define MLX4_SEM_OFFSET 0x3fc +#define MLX4_RESET_OFFSET 0x10 +#define MLX4_RESET_VALUE swab32(1) + +#define MLX4_SEM_TIMEOUT_JIFFIES (10 * HZ) +#define MLX4_RESET_TIMEOUT_JIFFIES (2 * HZ) + + /* + * Reset the chip. This is somewhat ugly because we have to + * save off the PCI header before reset and then restore it + * after the chip reboots. We skip config space offsets 22 + * and 23 since those have a special meaning. + */ + + /* Do we need to save off the full 4K PCI Express header?? */ + hca_header = kmalloc(256, GFP_KERNEL); + if (!hca_header) { + err = -ENOMEM; + mlx4_err(dev, "Couldn't allocate memory to save HCA PCI header, aborting\n"); + goto out; + } + + pcie_cap = pci_pcie_cap(dev->persist->pdev); + + for (i = 0; i < 64; ++i) { + if (i == 22 || i == 23) + continue; + if (pci_read_config_dword(dev->persist->pdev, i * 4, + hca_header + i)) { + err = -ENODEV; + mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n"); + goto out; + } + } + + reset = ioremap(pci_resource_start(dev->persist->pdev, 0) + + MLX4_RESET_BASE, + MLX4_RESET_SIZE); + if (!reset) { + err = -ENOMEM; + mlx4_err(dev, "Couldn't map HCA reset register, aborting\n"); + goto out; + } + + /* grab HW semaphore to lock out flash updates */ + end = jiffies + MLX4_SEM_TIMEOUT_JIFFIES; + do { + sem = readl(reset + MLX4_SEM_OFFSET); + if (!sem) + break; + + msleep(1); + } while (time_before(jiffies, end)); + + if (sem) { + mlx4_err(dev, "Failed to obtain HW semaphore, aborting\n"); + err = -EAGAIN; + iounmap(reset); + goto out; + } + + /* actually hit reset */ + writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET); + iounmap(reset); + + /* Docs say to wait one second before accessing device */ + msleep(1000); + + end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES; + do { + if (!pci_read_config_word(dev->persist->pdev, PCI_VENDOR_ID, + &vendor) && vendor != 0xffff) + break; + + msleep(1); + } while (time_before(jiffies, end)); + + if (vendor == 0xffff) { + err = -ENODEV; + mlx4_err(dev, "PCI device did not come back after reset, aborting\n"); + goto out; + } + + /* Now restore the PCI headers */ + if (pcie_cap) { + devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4]; + if (pcie_capability_write_word(dev->persist->pdev, + PCI_EXP_DEVCTL, + devctl)) { + err = -ENODEV; + mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n"); + goto out; + } + linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4]; + if (pcie_capability_write_word(dev->persist->pdev, + PCI_EXP_LNKCTL, + linkctl)) { + err = -ENODEV; + mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n"); + goto out; + } + } + + for (i = 0; i < 16; ++i) { + if (i * 4 == PCI_COMMAND) + continue; + + if (pci_write_config_dword(dev->persist->pdev, i * 4, + hca_header[i])) { + err = -ENODEV; + mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n", + i); + goto out; + } + } + + if (pci_write_config_dword(dev->persist->pdev, PCI_COMMAND, + hca_header[PCI_COMMAND / 4])) { + err = -ENODEV; + mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n"); + goto out; + } + +out: + kfree(hca_header); + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c new file mode 100644 index 000000000..bafe2180c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -0,0 +1,4957 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. + * All rights reserved. + * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mlx4.h" +#include "fw.h" + +#define MLX4_MAC_VALID (1ull << 63) + +struct mac_res { + struct list_head list; + u64 mac; + int ref_count; + u8 smac_index; + u8 port; +}; + +struct vlan_res { + struct list_head list; + u16 vlan; + int ref_count; + int vlan_index; + u8 port; +}; + +struct res_common { + struct list_head list; + struct rb_node node; + u64 res_id; + int owner; + int state; + int from_state; + int to_state; + int removing; +}; + +enum { + RES_ANY_BUSY = 1 +}; + +struct res_gid { + struct list_head list; + u8 gid[16]; + enum mlx4_protocol prot; + enum mlx4_steer_type steer; + u64 reg_id; +}; + +enum res_qp_states { + RES_QP_BUSY = RES_ANY_BUSY, + + /* QP number was allocated */ + RES_QP_RESERVED, + + /* ICM memory for QP context was mapped */ + RES_QP_MAPPED, + + /* QP is in hw ownership */ + RES_QP_HW +}; + +struct res_qp { + struct res_common com; + struct res_mtt *mtt; + struct res_cq *rcq; + struct res_cq *scq; + struct res_srq *srq; + struct list_head mcg_list; + spinlock_t mcg_spl; + int local_qpn; + atomic_t ref_count; + u32 qpc_flags; + /* saved qp params before VST enforcement in order to restore on VGT */ + u8 sched_queue; + __be32 param3; + u8 vlan_control; + u8 fvl_rx; + u8 pri_path_fl; + u8 vlan_index; + u8 feup; +}; + +enum res_mtt_states { + RES_MTT_BUSY = RES_ANY_BUSY, + RES_MTT_ALLOCATED, +}; + +static inline const char *mtt_states_str(enum res_mtt_states state) +{ + switch (state) { + case RES_MTT_BUSY: return "RES_MTT_BUSY"; + case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED"; + default: return "Unknown"; + } +} + +struct res_mtt { + struct res_common com; + int order; + atomic_t ref_count; +}; + +enum res_mpt_states { + RES_MPT_BUSY = RES_ANY_BUSY, + RES_MPT_RESERVED, + RES_MPT_MAPPED, + RES_MPT_HW, +}; + +struct res_mpt { + struct res_common com; + struct res_mtt *mtt; + int key; +}; + +enum res_eq_states { + RES_EQ_BUSY = RES_ANY_BUSY, + RES_EQ_RESERVED, + RES_EQ_HW, +}; + +struct res_eq { + struct res_common com; + struct res_mtt *mtt; +}; + +enum res_cq_states { + RES_CQ_BUSY = RES_ANY_BUSY, + RES_CQ_ALLOCATED, + RES_CQ_HW, +}; + +struct res_cq { + struct res_common com; + struct res_mtt *mtt; + atomic_t ref_count; +}; + +enum res_srq_states { + RES_SRQ_BUSY = RES_ANY_BUSY, + RES_SRQ_ALLOCATED, + RES_SRQ_HW, +}; + +struct res_srq { + struct res_common com; + struct res_mtt *mtt; + struct res_cq *cq; + atomic_t ref_count; +}; + +enum res_counter_states { + RES_COUNTER_BUSY = RES_ANY_BUSY, + RES_COUNTER_ALLOCATED, +}; + +struct res_counter { + struct res_common com; + int port; +}; + +enum res_xrcdn_states { + RES_XRCD_BUSY = RES_ANY_BUSY, + RES_XRCD_ALLOCATED, +}; + +struct res_xrcdn { + struct res_common com; + int port; +}; + +enum res_fs_rule_states { + RES_FS_RULE_BUSY = RES_ANY_BUSY, + RES_FS_RULE_ALLOCATED, +}; + +struct res_fs_rule { + struct res_common com; + int qpn; +}; + +static void *res_tracker_lookup(struct rb_root *root, u64 res_id) +{ + struct rb_node *node = root->rb_node; + + while (node) { + struct res_common *res = container_of(node, struct res_common, + node); + + if (res_id < res->res_id) + node = node->rb_left; + else if (res_id > res->res_id) + node = node->rb_right; + else + return res; + } + return NULL; +} + +static int res_tracker_insert(struct rb_root *root, struct res_common *res) +{ + struct rb_node **new = &(root->rb_node), *parent = NULL; + + /* Figure out where to put new node */ + while (*new) { + struct res_common *this = container_of(*new, struct res_common, + node); + + parent = *new; + if (res->res_id < this->res_id) + new = &((*new)->rb_left); + else if (res->res_id > this->res_id) + new = &((*new)->rb_right); + else + return -EEXIST; + } + + /* Add new node and rebalance tree. */ + rb_link_node(&res->node, parent, new); + rb_insert_color(&res->node, root); + + return 0; +} + +enum qp_transition { + QP_TRANS_INIT2RTR, + QP_TRANS_RTR2RTS, + QP_TRANS_RTS2RTS, + QP_TRANS_SQERR2RTS, + QP_TRANS_SQD2SQD, + QP_TRANS_SQD2RTS +}; + +/* For Debug uses */ +static const char *resource_str(enum mlx4_resource rt) +{ + switch (rt) { + case RES_QP: return "RES_QP"; + case RES_CQ: return "RES_CQ"; + case RES_SRQ: return "RES_SRQ"; + case RES_MPT: return "RES_MPT"; + case RES_MTT: return "RES_MTT"; + case RES_MAC: return "RES_MAC"; + case RES_VLAN: return "RES_VLAN"; + case RES_EQ: return "RES_EQ"; + case RES_COUNTER: return "RES_COUNTER"; + case RES_FS_RULE: return "RES_FS_RULE"; + case RES_XRCD: return "RES_XRCD"; + default: return "Unknown resource type !!!"; + }; +} + +static void rem_slave_vlans(struct mlx4_dev *dev, int slave); +static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, + enum mlx4_resource res_type, int count, + int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct resource_allocator *res_alloc = + &priv->mfunc.master.res_tracker.res_alloc[res_type]; + int err = -EINVAL; + int allocated, free, reserved, guaranteed, from_free; + int from_rsvd; + + if (slave > dev->persist->num_vfs) + return -EINVAL; + + spin_lock(&res_alloc->alloc_lock); + allocated = (port > 0) ? + res_alloc->allocated[(port - 1) * + (dev->persist->num_vfs + 1) + slave] : + res_alloc->allocated[slave]; + free = (port > 0) ? res_alloc->res_port_free[port - 1] : + res_alloc->res_free; + reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] : + res_alloc->res_reserved; + guaranteed = res_alloc->guaranteed[slave]; + + if (allocated + count > res_alloc->quota[slave]) { + mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n", + slave, port, resource_str(res_type), count, + allocated, res_alloc->quota[slave]); + goto out; + } + + if (allocated + count <= guaranteed) { + err = 0; + from_rsvd = count; + } else { + /* portion may need to be obtained from free area */ + if (guaranteed - allocated > 0) + from_free = count - (guaranteed - allocated); + else + from_free = count; + + from_rsvd = count - from_free; + + if (free - from_free >= reserved) + err = 0; + else + mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n", + slave, port, resource_str(res_type), free, + from_free, reserved); + } + + if (!err) { + /* grant the request */ + if (port > 0) { + res_alloc->allocated[(port - 1) * + (dev->persist->num_vfs + 1) + slave] += count; + res_alloc->res_port_free[port - 1] -= count; + res_alloc->res_port_rsvd[port - 1] -= from_rsvd; + } else { + res_alloc->allocated[slave] += count; + res_alloc->res_free -= count; + res_alloc->res_reserved -= from_rsvd; + } + } + +out: + spin_unlock(&res_alloc->alloc_lock); + return err; +} + +static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave, + enum mlx4_resource res_type, int count, + int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct resource_allocator *res_alloc = + &priv->mfunc.master.res_tracker.res_alloc[res_type]; + int allocated, guaranteed, from_rsvd; + + if (slave > dev->persist->num_vfs) + return; + + spin_lock(&res_alloc->alloc_lock); + + allocated = (port > 0) ? + res_alloc->allocated[(port - 1) * + (dev->persist->num_vfs + 1) + slave] : + res_alloc->allocated[slave]; + guaranteed = res_alloc->guaranteed[slave]; + + if (allocated - count >= guaranteed) { + from_rsvd = 0; + } else { + /* portion may need to be returned to reserved area */ + if (allocated - guaranteed > 0) + from_rsvd = count - (allocated - guaranteed); + else + from_rsvd = count; + } + + if (port > 0) { + res_alloc->allocated[(port - 1) * + (dev->persist->num_vfs + 1) + slave] -= count; + res_alloc->res_port_free[port - 1] += count; + res_alloc->res_port_rsvd[port - 1] += from_rsvd; + } else { + res_alloc->allocated[slave] -= count; + res_alloc->res_free += count; + res_alloc->res_reserved += from_rsvd; + } + + spin_unlock(&res_alloc->alloc_lock); + return; +} + +static inline void initialize_res_quotas(struct mlx4_dev *dev, + struct resource_allocator *res_alloc, + enum mlx4_resource res_type, + int vf, int num_instances) +{ + res_alloc->guaranteed[vf] = num_instances / + (2 * (dev->persist->num_vfs + 1)); + res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf]; + if (vf == mlx4_master_func_num(dev)) { + res_alloc->res_free = num_instances; + if (res_type == RES_MTT) { + /* reserved mtts will be taken out of the PF allocation */ + res_alloc->res_free += dev->caps.reserved_mtts; + res_alloc->guaranteed[vf] += dev->caps.reserved_mtts; + res_alloc->quota[vf] += dev->caps.reserved_mtts; + } + } +} + +void mlx4_init_quotas(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int pf; + + /* quotas for VFs are initialized in mlx4_slave_cap */ + if (mlx4_is_slave(dev)) + return; + + if (!mlx4_is_mfunc(dev)) { + dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps - + mlx4_num_reserved_sqps(dev); + dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs; + dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs; + dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts; + dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws; + return; + } + + pf = mlx4_master_func_num(dev); + dev->quotas.qp = + priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf]; + dev->quotas.cq = + priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf]; + dev->quotas.srq = + priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf]; + dev->quotas.mtt = + priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf]; + dev->quotas.mpt = + priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf]; +} +int mlx4_init_resource_tracker(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i, j; + int t; + + priv->mfunc.master.res_tracker.slave_list = + kzalloc(dev->num_slaves * sizeof(struct slave_list), + GFP_KERNEL); + if (!priv->mfunc.master.res_tracker.slave_list) + return -ENOMEM; + + for (i = 0 ; i < dev->num_slaves; i++) { + for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t) + INIT_LIST_HEAD(&priv->mfunc.master.res_tracker. + slave_list[i].res_list[t]); + mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex); + } + + mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n", + dev->num_slaves); + for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) + priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT; + + for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { + struct resource_allocator *res_alloc = + &priv->mfunc.master.res_tracker.res_alloc[i]; + res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) * + sizeof(int), GFP_KERNEL); + res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) * + sizeof(int), GFP_KERNEL); + if (i == RES_MAC || i == RES_VLAN) + res_alloc->allocated = kzalloc(MLX4_MAX_PORTS * + (dev->persist->num_vfs + + 1) * + sizeof(int), GFP_KERNEL); + else + res_alloc->allocated = kzalloc((dev->persist-> + num_vfs + 1) * + sizeof(int), GFP_KERNEL); + + if (!res_alloc->quota || !res_alloc->guaranteed || + !res_alloc->allocated) + goto no_mem_err; + + spin_lock_init(&res_alloc->alloc_lock); + for (t = 0; t < dev->persist->num_vfs + 1; t++) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, t); + switch (i) { + case RES_QP: + initialize_res_quotas(dev, res_alloc, RES_QP, + t, dev->caps.num_qps - + dev->caps.reserved_qps - + mlx4_num_reserved_sqps(dev)); + break; + case RES_CQ: + initialize_res_quotas(dev, res_alloc, RES_CQ, + t, dev->caps.num_cqs - + dev->caps.reserved_cqs); + break; + case RES_SRQ: + initialize_res_quotas(dev, res_alloc, RES_SRQ, + t, dev->caps.num_srqs - + dev->caps.reserved_srqs); + break; + case RES_MPT: + initialize_res_quotas(dev, res_alloc, RES_MPT, + t, dev->caps.num_mpts - + dev->caps.reserved_mrws); + break; + case RES_MTT: + initialize_res_quotas(dev, res_alloc, RES_MTT, + t, dev->caps.num_mtts - + dev->caps.reserved_mtts); + break; + case RES_MAC: + if (t == mlx4_master_func_num(dev)) { + int max_vfs_pport = 0; + /* Calculate the max vfs per port for */ + /* both ports. */ + for (j = 0; j < dev->caps.num_ports; + j++) { + struct mlx4_slaves_pport slaves_pport = + mlx4_phys_to_slaves_pport(dev, j + 1); + unsigned current_slaves = + bitmap_weight(slaves_pport.slaves, + dev->caps.num_ports) - 1; + if (max_vfs_pport < current_slaves) + max_vfs_pport = + current_slaves; + } + res_alloc->quota[t] = + MLX4_MAX_MAC_NUM - + 2 * max_vfs_pport; + res_alloc->guaranteed[t] = 2; + for (j = 0; j < MLX4_MAX_PORTS; j++) + res_alloc->res_port_free[j] = + MLX4_MAX_MAC_NUM; + } else { + res_alloc->quota[t] = MLX4_MAX_MAC_NUM; + res_alloc->guaranteed[t] = 2; + } + break; + case RES_VLAN: + if (t == mlx4_master_func_num(dev)) { + res_alloc->quota[t] = MLX4_MAX_VLAN_NUM; + res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2; + for (j = 0; j < MLX4_MAX_PORTS; j++) + res_alloc->res_port_free[j] = + res_alloc->quota[t]; + } else { + res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2; + res_alloc->guaranteed[t] = 0; + } + break; + case RES_COUNTER: + res_alloc->quota[t] = dev->caps.max_counters; + res_alloc->guaranteed[t] = 0; + if (t == mlx4_master_func_num(dev)) + res_alloc->res_free = res_alloc->quota[t]; + break; + default: + break; + } + if (i == RES_MAC || i == RES_VLAN) { + for (j = 0; j < dev->caps.num_ports; j++) + if (test_bit(j, actv_ports.ports)) + res_alloc->res_port_rsvd[j] += + res_alloc->guaranteed[t]; + } else { + res_alloc->res_reserved += res_alloc->guaranteed[t]; + } + } + } + spin_lock_init(&priv->mfunc.master.res_tracker.lock); + return 0; + +no_mem_err: + for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { + kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated); + priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL; + kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed); + priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL; + kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota); + priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL; + } + return -ENOMEM; +} + +void mlx4_free_resource_tracker(struct mlx4_dev *dev, + enum mlx4_res_tracker_free_type type) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i; + + if (priv->mfunc.master.res_tracker.slave_list) { + if (type != RES_TR_FREE_STRUCTS_ONLY) { + for (i = 0; i < dev->num_slaves; i++) { + if (type == RES_TR_FREE_ALL || + dev->caps.function != i) + mlx4_delete_all_resources_for_slave(dev, i); + } + /* free master's vlans */ + i = dev->caps.function; + mlx4_reset_roce_gids(dev, i); + mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); + rem_slave_vlans(dev, i); + mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); + } + + if (type != RES_TR_FREE_SLAVES_ONLY) { + for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { + kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated); + priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL; + kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed); + priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL; + kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota); + priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL; + } + kfree(priv->mfunc.master.res_tracker.slave_list); + priv->mfunc.master.res_tracker.slave_list = NULL; + } + } +} + +static void update_pkey_index(struct mlx4_dev *dev, int slave, + struct mlx4_cmd_mailbox *inbox) +{ + u8 sched = *(u8 *)(inbox->buf + 64); + u8 orig_index = *(u8 *)(inbox->buf + 35); + u8 new_index; + struct mlx4_priv *priv = mlx4_priv(dev); + int port; + + port = (sched >> 6 & 1) + 1; + + new_index = priv->virt2phys_pkey[slave][port - 1][orig_index]; + *(u8 *)(inbox->buf + 35) = new_index; +} + +static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, + u8 slave) +{ + struct mlx4_qp_context *qp_ctx = inbox->buf + 8; + enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf); + u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; + int port; + + if (MLX4_QP_ST_UD == ts) { + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; + if (mlx4_is_eth(dev, port)) + qp_ctx->pri_path.mgid_index = + mlx4_get_base_gid_ix(dev, slave, port) | 0x80; + else + qp_ctx->pri_path.mgid_index = slave | 0x80; + + } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) { + if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; + if (mlx4_is_eth(dev, port)) { + qp_ctx->pri_path.mgid_index += + mlx4_get_base_gid_ix(dev, slave, port); + qp_ctx->pri_path.mgid_index &= 0x7f; + } else { + qp_ctx->pri_path.mgid_index = slave & 0x7F; + } + } + if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { + port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; + if (mlx4_is_eth(dev, port)) { + qp_ctx->alt_path.mgid_index += + mlx4_get_base_gid_ix(dev, slave, port); + qp_ctx->alt_path.mgid_index &= 0x7f; + } else { + qp_ctx->alt_path.mgid_index = slave & 0x7F; + } + } + } +} + +static int update_vport_qp_param(struct mlx4_dev *dev, + struct mlx4_cmd_mailbox *inbox, + u8 slave, u32 qpn) +{ + struct mlx4_qp_context *qpc = inbox->buf + 8; + struct mlx4_vport_oper_state *vp_oper; + struct mlx4_priv *priv; + u32 qp_type; + int port, err = 0; + + port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; + priv = mlx4_priv(dev); + vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; + + if (MLX4_VGT != vp_oper->state.default_vlan) { + /* the reserved QPs (special, proxy, tunnel) + * do not operate over vlans + */ + if (mlx4_is_qp_reserved(dev, qpn)) + return 0; + + /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */ + if (qp_type == MLX4_QP_ST_UD || + (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) { + if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) { + *(__be32 *)inbox->buf = + cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) | + MLX4_QP_OPTPAR_VLAN_STRIPPING); + qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); + } else { + struct mlx4_update_qp_params params = {.flags = 0}; + + err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); + if (err) + goto out; + } + } + + if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && + dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { + qpc->pri_path.vlan_control = + MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; + } else if (0 != vp_oper->state.default_vlan) { + qpc->pri_path.vlan_control = + MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; + } else { /* priority tagged */ + qpc->pri_path.vlan_control = + MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; + } + + qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN; + qpc->pri_path.vlan_index = vp_oper->vlan_idx; + qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN; + qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; + qpc->pri_path.sched_queue &= 0xC7; + qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3; + qpc->qos_vport = vp_oper->state.qos_vport; + } + if (vp_oper->state.spoofchk) { + qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; + qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; + } +out: + return err; +} + +static int mpt_mask(struct mlx4_dev *dev) +{ + return dev->caps.num_mpts - 1; +} + +static void *find_res(struct mlx4_dev *dev, u64 res_id, + enum mlx4_resource type) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type], + res_id); +} + +static int get_res(struct mlx4_dev *dev, int slave, u64 res_id, + enum mlx4_resource type, + void *res) +{ + struct res_common *r; + int err = 0; + + spin_lock_irq(mlx4_tlock(dev)); + r = find_res(dev, res_id, type); + if (!r) { + err = -ENONET; + goto exit; + } + + if (r->state == RES_ANY_BUSY) { + err = -EBUSY; + goto exit; + } + + if (r->owner != slave) { + err = -EPERM; + goto exit; + } + + r->from_state = r->state; + r->state = RES_ANY_BUSY; + + if (res) + *((struct res_common **)res) = r; + +exit: + spin_unlock_irq(mlx4_tlock(dev)); + return err; +} + +int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, + enum mlx4_resource type, + u64 res_id, int *slave) +{ + + struct res_common *r; + int err = -ENOENT; + int id = res_id; + + if (type == RES_QP) + id &= 0x7fffff; + spin_lock(mlx4_tlock(dev)); + + r = find_res(dev, id, type); + if (r) { + *slave = r->owner; + err = 0; + } + spin_unlock(mlx4_tlock(dev)); + + return err; +} + +static void put_res(struct mlx4_dev *dev, int slave, u64 res_id, + enum mlx4_resource type) +{ + struct res_common *r; + + spin_lock_irq(mlx4_tlock(dev)); + r = find_res(dev, res_id, type); + if (r) + r->state = r->from_state; + spin_unlock_irq(mlx4_tlock(dev)); +} + +static struct res_common *alloc_qp_tr(int id) +{ + struct res_qp *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_QP_RESERVED; + ret->local_qpn = id; + INIT_LIST_HEAD(&ret->mcg_list); + spin_lock_init(&ret->mcg_spl); + atomic_set(&ret->ref_count, 0); + + return &ret->com; +} + +static struct res_common *alloc_mtt_tr(int id, int order) +{ + struct res_mtt *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->order = order; + ret->com.state = RES_MTT_ALLOCATED; + atomic_set(&ret->ref_count, 0); + + return &ret->com; +} + +static struct res_common *alloc_mpt_tr(int id, int key) +{ + struct res_mpt *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_MPT_RESERVED; + ret->key = key; + + return &ret->com; +} + +static struct res_common *alloc_eq_tr(int id) +{ + struct res_eq *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_EQ_RESERVED; + + return &ret->com; +} + +static struct res_common *alloc_cq_tr(int id) +{ + struct res_cq *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_CQ_ALLOCATED; + atomic_set(&ret->ref_count, 0); + + return &ret->com; +} + +static struct res_common *alloc_srq_tr(int id) +{ + struct res_srq *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_SRQ_ALLOCATED; + atomic_set(&ret->ref_count, 0); + + return &ret->com; +} + +static struct res_common *alloc_counter_tr(int id) +{ + struct res_counter *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_COUNTER_ALLOCATED; + + return &ret->com; +} + +static struct res_common *alloc_xrcdn_tr(int id) +{ + struct res_xrcdn *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_XRCD_ALLOCATED; + + return &ret->com; +} + +static struct res_common *alloc_fs_rule_tr(u64 id, int qpn) +{ + struct res_fs_rule *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_FS_RULE_ALLOCATED; + ret->qpn = qpn; + return &ret->com; +} + +static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave, + int extra) +{ + struct res_common *ret; + + switch (type) { + case RES_QP: + ret = alloc_qp_tr(id); + break; + case RES_MPT: + ret = alloc_mpt_tr(id, extra); + break; + case RES_MTT: + ret = alloc_mtt_tr(id, extra); + break; + case RES_EQ: + ret = alloc_eq_tr(id); + break; + case RES_CQ: + ret = alloc_cq_tr(id); + break; + case RES_SRQ: + ret = alloc_srq_tr(id); + break; + case RES_MAC: + pr_err("implementation missing\n"); + return NULL; + case RES_COUNTER: + ret = alloc_counter_tr(id); + break; + case RES_XRCD: + ret = alloc_xrcdn_tr(id); + break; + case RES_FS_RULE: + ret = alloc_fs_rule_tr(id, extra); + break; + default: + return NULL; + } + if (ret) + ret->owner = slave; + + return ret; +} + +static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, + enum mlx4_resource type, int extra) +{ + int i; + int err; + struct mlx4_priv *priv = mlx4_priv(dev); + struct res_common **res_arr; + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct rb_root *root = &tracker->res_tree[type]; + + res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL); + if (!res_arr) + return -ENOMEM; + + for (i = 0; i < count; ++i) { + res_arr[i] = alloc_tr(base + i, type, slave, extra); + if (!res_arr[i]) { + for (--i; i >= 0; --i) + kfree(res_arr[i]); + + kfree(res_arr); + return -ENOMEM; + } + } + + spin_lock_irq(mlx4_tlock(dev)); + for (i = 0; i < count; ++i) { + if (find_res(dev, base + i, type)) { + err = -EEXIST; + goto undo; + } + err = res_tracker_insert(root, res_arr[i]); + if (err) + goto undo; + list_add_tail(&res_arr[i]->list, + &tracker->slave_list[slave].res_list[type]); + } + spin_unlock_irq(mlx4_tlock(dev)); + kfree(res_arr); + + return 0; + +undo: + for (--i; i >= base; --i) + rb_erase(&res_arr[i]->node, root); + + spin_unlock_irq(mlx4_tlock(dev)); + + for (i = 0; i < count; ++i) + kfree(res_arr[i]); + + kfree(res_arr); + + return err; +} + +static int remove_qp_ok(struct res_qp *res) +{ + if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) || + !list_empty(&res->mcg_list)) { + pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n", + res->com.state, atomic_read(&res->ref_count)); + return -EBUSY; + } else if (res->com.state != RES_QP_RESERVED) { + return -EPERM; + } + + return 0; +} + +static int remove_mtt_ok(struct res_mtt *res, int order) +{ + if (res->com.state == RES_MTT_BUSY || + atomic_read(&res->ref_count)) { + pr_devel("%s-%d: state %s, ref_count %d\n", + __func__, __LINE__, + mtt_states_str(res->com.state), + atomic_read(&res->ref_count)); + return -EBUSY; + } else if (res->com.state != RES_MTT_ALLOCATED) + return -EPERM; + else if (res->order != order) + return -EINVAL; + + return 0; +} + +static int remove_mpt_ok(struct res_mpt *res) +{ + if (res->com.state == RES_MPT_BUSY) + return -EBUSY; + else if (res->com.state != RES_MPT_RESERVED) + return -EPERM; + + return 0; +} + +static int remove_eq_ok(struct res_eq *res) +{ + if (res->com.state == RES_MPT_BUSY) + return -EBUSY; + else if (res->com.state != RES_MPT_RESERVED) + return -EPERM; + + return 0; +} + +static int remove_counter_ok(struct res_counter *res) +{ + if (res->com.state == RES_COUNTER_BUSY) + return -EBUSY; + else if (res->com.state != RES_COUNTER_ALLOCATED) + return -EPERM; + + return 0; +} + +static int remove_xrcdn_ok(struct res_xrcdn *res) +{ + if (res->com.state == RES_XRCD_BUSY) + return -EBUSY; + else if (res->com.state != RES_XRCD_ALLOCATED) + return -EPERM; + + return 0; +} + +static int remove_fs_rule_ok(struct res_fs_rule *res) +{ + if (res->com.state == RES_FS_RULE_BUSY) + return -EBUSY; + else if (res->com.state != RES_FS_RULE_ALLOCATED) + return -EPERM; + + return 0; +} + +static int remove_cq_ok(struct res_cq *res) +{ + if (res->com.state == RES_CQ_BUSY) + return -EBUSY; + else if (res->com.state != RES_CQ_ALLOCATED) + return -EPERM; + + return 0; +} + +static int remove_srq_ok(struct res_srq *res) +{ + if (res->com.state == RES_SRQ_BUSY) + return -EBUSY; + else if (res->com.state != RES_SRQ_ALLOCATED) + return -EPERM; + + return 0; +} + +static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra) +{ + switch (type) { + case RES_QP: + return remove_qp_ok((struct res_qp *)res); + case RES_CQ: + return remove_cq_ok((struct res_cq *)res); + case RES_SRQ: + return remove_srq_ok((struct res_srq *)res); + case RES_MPT: + return remove_mpt_ok((struct res_mpt *)res); + case RES_MTT: + return remove_mtt_ok((struct res_mtt *)res, extra); + case RES_MAC: + return -ENOSYS; + case RES_EQ: + return remove_eq_ok((struct res_eq *)res); + case RES_COUNTER: + return remove_counter_ok((struct res_counter *)res); + case RES_XRCD: + return remove_xrcdn_ok((struct res_xrcdn *)res); + case RES_FS_RULE: + return remove_fs_rule_ok((struct res_fs_rule *)res); + default: + return -EINVAL; + } +} + +static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, + enum mlx4_resource type, int extra) +{ + u64 i; + int err; + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_common *r; + + spin_lock_irq(mlx4_tlock(dev)); + for (i = base; i < base + count; ++i) { + r = res_tracker_lookup(&tracker->res_tree[type], i); + if (!r) { + err = -ENOENT; + goto out; + } + if (r->owner != slave) { + err = -EPERM; + goto out; + } + err = remove_ok(r, type, extra); + if (err) + goto out; + } + + for (i = base; i < base + count; ++i) { + r = res_tracker_lookup(&tracker->res_tree[type], i); + rb_erase(&r->node, &tracker->res_tree[type]); + list_del(&r->list); + kfree(r); + } + err = 0; + +out: + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, + enum res_qp_states state, struct res_qp **qp, + int alloc) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_qp *r; + int err = 0; + + spin_lock_irq(mlx4_tlock(dev)); + r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn); + if (!r) + err = -ENOENT; + else if (r->com.owner != slave) + err = -EPERM; + else { + switch (state) { + case RES_QP_BUSY: + mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n", + __func__, r->com.res_id); + err = -EBUSY; + break; + + case RES_QP_RESERVED: + if (r->com.state == RES_QP_MAPPED && !alloc) + break; + + mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id); + err = -EINVAL; + break; + + case RES_QP_MAPPED: + if ((r->com.state == RES_QP_RESERVED && alloc) || + r->com.state == RES_QP_HW) + break; + else { + mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", + r->com.res_id); + err = -EINVAL; + } + + break; + + case RES_QP_HW: + if (r->com.state != RES_QP_MAPPED) + err = -EINVAL; + break; + default: + err = -EINVAL; + } + + if (!err) { + r->com.from_state = r->com.state; + r->com.to_state = state; + r->com.state = RES_QP_BUSY; + if (qp) + *qp = r; + } + } + + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index, + enum res_mpt_states state, struct res_mpt **mpt) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_mpt *r; + int err = 0; + + spin_lock_irq(mlx4_tlock(dev)); + r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index); + if (!r) + err = -ENOENT; + else if (r->com.owner != slave) + err = -EPERM; + else { + switch (state) { + case RES_MPT_BUSY: + err = -EINVAL; + break; + + case RES_MPT_RESERVED: + if (r->com.state != RES_MPT_MAPPED) + err = -EINVAL; + break; + + case RES_MPT_MAPPED: + if (r->com.state != RES_MPT_RESERVED && + r->com.state != RES_MPT_HW) + err = -EINVAL; + break; + + case RES_MPT_HW: + if (r->com.state != RES_MPT_MAPPED) + err = -EINVAL; + break; + default: + err = -EINVAL; + } + + if (!err) { + r->com.from_state = r->com.state; + r->com.to_state = state; + r->com.state = RES_MPT_BUSY; + if (mpt) + *mpt = r; + } + } + + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, + enum res_eq_states state, struct res_eq **eq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_eq *r; + int err = 0; + + spin_lock_irq(mlx4_tlock(dev)); + r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index); + if (!r) + err = -ENOENT; + else if (r->com.owner != slave) + err = -EPERM; + else { + switch (state) { + case RES_EQ_BUSY: + err = -EINVAL; + break; + + case RES_EQ_RESERVED: + if (r->com.state != RES_EQ_HW) + err = -EINVAL; + break; + + case RES_EQ_HW: + if (r->com.state != RES_EQ_RESERVED) + err = -EINVAL; + break; + + default: + err = -EINVAL; + } + + if (!err) { + r->com.from_state = r->com.state; + r->com.to_state = state; + r->com.state = RES_EQ_BUSY; + if (eq) + *eq = r; + } + } + + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn, + enum res_cq_states state, struct res_cq **cq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_cq *r; + int err; + + spin_lock_irq(mlx4_tlock(dev)); + r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn); + if (!r) { + err = -ENOENT; + } else if (r->com.owner != slave) { + err = -EPERM; + } else if (state == RES_CQ_ALLOCATED) { + if (r->com.state != RES_CQ_HW) + err = -EINVAL; + else if (atomic_read(&r->ref_count)) + err = -EBUSY; + else + err = 0; + } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) { + err = -EINVAL; + } else { + err = 0; + } + + if (!err) { + r->com.from_state = r->com.state; + r->com.to_state = state; + r->com.state = RES_CQ_BUSY; + if (cq) + *cq = r; + } + + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, + enum res_srq_states state, struct res_srq **srq) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_srq *r; + int err = 0; + + spin_lock_irq(mlx4_tlock(dev)); + r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index); + if (!r) { + err = -ENOENT; + } else if (r->com.owner != slave) { + err = -EPERM; + } else if (state == RES_SRQ_ALLOCATED) { + if (r->com.state != RES_SRQ_HW) + err = -EINVAL; + else if (atomic_read(&r->ref_count)) + err = -EBUSY; + } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) { + err = -EINVAL; + } + + if (!err) { + r->com.from_state = r->com.state; + r->com.to_state = state; + r->com.state = RES_SRQ_BUSY; + if (srq) + *srq = r; + } + + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +static void res_abort_move(struct mlx4_dev *dev, int slave, + enum mlx4_resource type, int id) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_common *r; + + spin_lock_irq(mlx4_tlock(dev)); + r = res_tracker_lookup(&tracker->res_tree[type], id); + if (r && (r->owner == slave)) + r->state = r->from_state; + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void res_end_move(struct mlx4_dev *dev, int slave, + enum mlx4_resource type, int id) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_common *r; + + spin_lock_irq(mlx4_tlock(dev)); + r = res_tracker_lookup(&tracker->res_tree[type], id); + if (r && (r->owner == slave)) + r->state = r->to_state; + spin_unlock_irq(mlx4_tlock(dev)); +} + +static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn) +{ + return mlx4_is_qp_reserved(dev, qpn) && + (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn)); +} + +static int fw_reserved(struct mlx4_dev *dev, int qpn) +{ + return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; +} + +static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int err; + int count; + int align; + int base; + int qpn; + u8 flags; + + switch (op) { + case RES_OP_RESERVE: + count = get_param_l(&in_param) & 0xffffff; + /* Turn off all unsupported QP allocation flags that the + * slave tries to set. + */ + flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask; + align = get_param_h(&in_param); + err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); + if (err) + return err; + + err = __mlx4_qp_reserve_range(dev, count, align, &base, flags); + if (err) { + mlx4_release_resource(dev, slave, RES_QP, count, 0); + return err; + } + + err = add_res_range(dev, slave, base, count, RES_QP, 0); + if (err) { + mlx4_release_resource(dev, slave, RES_QP, count, 0); + __mlx4_qp_release_range(dev, base, count); + return err; + } + set_param_l(out_param, base); + break; + case RES_OP_MAP_ICM: + qpn = get_param_l(&in_param) & 0x7fffff; + if (valid_reserved(dev, slave, qpn)) { + err = add_res_range(dev, slave, qpn, 1, RES_QP, 0); + if (err) + return err; + } + + err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, + NULL, 1); + if (err) + return err; + + if (!fw_reserved(dev, qpn)) { + err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL); + if (err) { + res_abort_move(dev, slave, RES_QP, qpn); + return err; + } + } + + res_end_move(dev, slave, RES_QP, qpn); + break; + + default: + err = -EINVAL; + break; + } + return err; +} + +static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int err = -EINVAL; + int base; + int order; + + if (op != RES_OP_RESERVE_AND_MAP) + return err; + + order = get_param_l(&in_param); + + err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0); + if (err) + return err; + + base = __mlx4_alloc_mtt_range(dev, order); + if (base == -1) { + mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); + return -ENOMEM; + } + + err = add_res_range(dev, slave, base, 1, RES_MTT, order); + if (err) { + mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); + __mlx4_free_mtt_range(dev, base, order); + } else { + set_param_l(out_param, base); + } + + return err; +} + +static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int err = -EINVAL; + int index; + int id; + struct res_mpt *mpt; + + switch (op) { + case RES_OP_RESERVE: + err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0); + if (err) + break; + + index = __mlx4_mpt_reserve(dev); + if (index == -1) { + mlx4_release_resource(dev, slave, RES_MPT, 1, 0); + break; + } + id = index & mpt_mask(dev); + + err = add_res_range(dev, slave, id, 1, RES_MPT, index); + if (err) { + mlx4_release_resource(dev, slave, RES_MPT, 1, 0); + __mlx4_mpt_release(dev, index); + break; + } + set_param_l(out_param, index); + break; + case RES_OP_MAP_ICM: + index = get_param_l(&in_param); + id = index & mpt_mask(dev); + err = mr_res_start_move_to(dev, slave, id, + RES_MPT_MAPPED, &mpt); + if (err) + return err; + + err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL); + if (err) { + res_abort_move(dev, slave, RES_MPT, id); + return err; + } + + res_end_move(dev, slave, RES_MPT, id); + break; + } + return err; +} + +static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int cqn; + int err; + + switch (op) { + case RES_OP_RESERVE_AND_MAP: + err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0); + if (err) + break; + + err = __mlx4_cq_alloc_icm(dev, &cqn); + if (err) { + mlx4_release_resource(dev, slave, RES_CQ, 1, 0); + break; + } + + err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0); + if (err) { + mlx4_release_resource(dev, slave, RES_CQ, 1, 0); + __mlx4_cq_free_icm(dev, cqn); + break; + } + + set_param_l(out_param, cqn); + break; + + default: + err = -EINVAL; + } + + return err; +} + +static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int srqn; + int err; + + switch (op) { + case RES_OP_RESERVE_AND_MAP: + err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0); + if (err) + break; + + err = __mlx4_srq_alloc_icm(dev, &srqn); + if (err) { + mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); + break; + } + + err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0); + if (err) { + mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); + __mlx4_srq_free_icm(dev, srqn); + break; + } + + set_param_l(out_param, srqn); + break; + + default: + err = -EINVAL; + } + + return err; +} + +static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port, + u8 smac_index, u64 *mac) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *mac_list = + &tracker->slave_list[slave].res_list[RES_MAC]; + struct mac_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, mac_list, list) { + if (res->smac_index == smac_index && res->port == (u8) port) { + *mac = res->mac; + return 0; + } + } + return -ENOENT; +} + +static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *mac_list = + &tracker->slave_list[slave].res_list[RES_MAC]; + struct mac_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, mac_list, list) { + if (res->mac == mac && res->port == (u8) port) { + /* mac found. update ref count */ + ++res->ref_count; + return 0; + } + } + + if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port)) + return -EINVAL; + res = kzalloc(sizeof *res, GFP_KERNEL); + if (!res) { + mlx4_release_resource(dev, slave, RES_MAC, 1, port); + return -ENOMEM; + } + res->mac = mac; + res->port = (u8) port; + res->smac_index = smac_index; + res->ref_count = 1; + list_add_tail(&res->list, + &tracker->slave_list[slave].res_list[RES_MAC]); + return 0; +} + +static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac, + int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *mac_list = + &tracker->slave_list[slave].res_list[RES_MAC]; + struct mac_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, mac_list, list) { + if (res->mac == mac && res->port == (u8) port) { + if (!--res->ref_count) { + list_del(&res->list); + mlx4_release_resource(dev, slave, RES_MAC, 1, port); + kfree(res); + } + break; + } + } +} + +static void rem_slave_macs(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *mac_list = + &tracker->slave_list[slave].res_list[RES_MAC]; + struct mac_res *res, *tmp; + int i; + + list_for_each_entry_safe(res, tmp, mac_list, list) { + list_del(&res->list); + /* dereference the mac the num times the slave referenced it */ + for (i = 0; i < res->ref_count; i++) + __mlx4_unregister_mac(dev, res->port, res->mac); + mlx4_release_resource(dev, slave, RES_MAC, 1, res->port); + kfree(res); + } +} + +static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param, int in_port) +{ + int err = -EINVAL; + int port; + u64 mac; + u8 smac_index; + + if (op != RES_OP_RESERVE_AND_MAP) + return err; + + port = !in_port ? get_param_l(out_param) : in_port; + port = mlx4_slave_convert_port( + dev, slave, port); + + if (port < 0) + return -EINVAL; + mac = in_param; + + err = __mlx4_register_mac(dev, port, mac); + if (err >= 0) { + smac_index = err; + set_param_l(out_param, err); + err = 0; + } + + if (!err) { + err = mac_add_to_slave(dev, slave, mac, port, smac_index); + if (err) + __mlx4_unregister_mac(dev, port, mac); + } + return err; +} + +static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan, + int port, int vlan_index) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *vlan_list = + &tracker->slave_list[slave].res_list[RES_VLAN]; + struct vlan_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, vlan_list, list) { + if (res->vlan == vlan && res->port == (u8) port) { + /* vlan found. update ref count */ + ++res->ref_count; + return 0; + } + } + + if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port)) + return -EINVAL; + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) { + mlx4_release_resource(dev, slave, RES_VLAN, 1, port); + return -ENOMEM; + } + res->vlan = vlan; + res->port = (u8) port; + res->vlan_index = vlan_index; + res->ref_count = 1; + list_add_tail(&res->list, + &tracker->slave_list[slave].res_list[RES_VLAN]); + return 0; +} + + +static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan, + int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *vlan_list = + &tracker->slave_list[slave].res_list[RES_VLAN]; + struct vlan_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, vlan_list, list) { + if (res->vlan == vlan && res->port == (u8) port) { + if (!--res->ref_count) { + list_del(&res->list); + mlx4_release_resource(dev, slave, RES_VLAN, + 1, port); + kfree(res); + } + break; + } + } +} + +static void rem_slave_vlans(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *vlan_list = + &tracker->slave_list[slave].res_list[RES_VLAN]; + struct vlan_res *res, *tmp; + int i; + + list_for_each_entry_safe(res, tmp, vlan_list, list) { + list_del(&res->list); + /* dereference the vlan the num times the slave referenced it */ + for (i = 0; i < res->ref_count; i++) + __mlx4_unregister_vlan(dev, res->port, res->vlan); + mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port); + kfree(res); + } +} + +static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param, int in_port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; + int err; + u16 vlan; + int vlan_index; + int port; + + port = !in_port ? get_param_l(out_param) : in_port; + + if (!port || op != RES_OP_RESERVE_AND_MAP) + return -EINVAL; + + port = mlx4_slave_convert_port( + dev, slave, port); + + if (port < 0) + return -EINVAL; + /* upstream kernels had NOP for reg/unreg vlan. Continue this. */ + if (!in_port && port > 0 && port <= dev->caps.num_ports) { + slave_state[slave].old_vlan_api = true; + return 0; + } + + vlan = (u16) in_param; + + err = __mlx4_register_vlan(dev, port, vlan, &vlan_index); + if (!err) { + set_param_l(out_param, (u32) vlan_index); + err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index); + if (err) + __mlx4_unregister_vlan(dev, port, vlan); + } + return err; +} + +static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + u32 index; + int err; + + if (op != RES_OP_RESERVE) + return -EINVAL; + + err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0); + if (err) + return err; + + err = __mlx4_counter_alloc(dev, &index); + if (err) { + mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); + return err; + } + + err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0); + if (err) { + __mlx4_counter_free(dev, index); + mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); + } else { + set_param_l(out_param, index); + } + + return err; +} + +static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + u32 xrcdn; + int err; + + if (op != RES_OP_RESERVE) + return -EINVAL; + + err = __mlx4_xrcd_alloc(dev, &xrcdn); + if (err) + return err; + + err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0); + if (err) + __mlx4_xrcd_free(dev, xrcdn); + else + set_param_l(out_param, xrcdn); + + return err; +} + +int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int alop = vhcr->op_modifier; + + switch (vhcr->in_modifier & 0xFF) { + case RES_QP: + err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_MTT: + err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_MPT: + err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_CQ: + err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_SRQ: + err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_MAC: + err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param, + (vhcr->in_modifier >> 8) & 0xFF); + break; + + case RES_VLAN: + err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param, + (vhcr->in_modifier >> 8) & 0xFF); + break; + + case RES_COUNTER: + err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_XRCD: + err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + default: + err = -EINVAL; + break; + } + + return err; +} + +static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param) +{ + int err; + int count; + int base; + int qpn; + + switch (op) { + case RES_OP_RESERVE: + base = get_param_l(&in_param) & 0x7fffff; + count = get_param_h(&in_param); + err = rem_res_range(dev, slave, base, count, RES_QP, 0); + if (err) + break; + mlx4_release_resource(dev, slave, RES_QP, count, 0); + __mlx4_qp_release_range(dev, base, count); + break; + case RES_OP_MAP_ICM: + qpn = get_param_l(&in_param) & 0x7fffff; + err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED, + NULL, 0); + if (err) + return err; + + if (!fw_reserved(dev, qpn)) + __mlx4_qp_free_icm(dev, qpn); + + res_end_move(dev, slave, RES_QP, qpn); + + if (valid_reserved(dev, slave, qpn)) + err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0); + break; + default: + err = -EINVAL; + break; + } + return err; +} + +static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int err = -EINVAL; + int base; + int order; + + if (op != RES_OP_RESERVE_AND_MAP) + return err; + + base = get_param_l(&in_param); + order = get_param_h(&in_param); + err = rem_res_range(dev, slave, base, 1, RES_MTT, order); + if (!err) { + mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); + __mlx4_free_mtt_range(dev, base, order); + } + return err; +} + +static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param) +{ + int err = -EINVAL; + int index; + int id; + struct res_mpt *mpt; + + switch (op) { + case RES_OP_RESERVE: + index = get_param_l(&in_param); + id = index & mpt_mask(dev); + err = get_res(dev, slave, id, RES_MPT, &mpt); + if (err) + break; + index = mpt->key; + put_res(dev, slave, id, RES_MPT); + + err = rem_res_range(dev, slave, id, 1, RES_MPT, 0); + if (err) + break; + mlx4_release_resource(dev, slave, RES_MPT, 1, 0); + __mlx4_mpt_release(dev, index); + break; + case RES_OP_MAP_ICM: + index = get_param_l(&in_param); + id = index & mpt_mask(dev); + err = mr_res_start_move_to(dev, slave, id, + RES_MPT_RESERVED, &mpt); + if (err) + return err; + + __mlx4_mpt_free_icm(dev, mpt->key); + res_end_move(dev, slave, RES_MPT, id); + return err; + break; + default: + err = -EINVAL; + break; + } + return err; +} + +static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int cqn; + int err; + + switch (op) { + case RES_OP_RESERVE_AND_MAP: + cqn = get_param_l(&in_param); + err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0); + if (err) + break; + + mlx4_release_resource(dev, slave, RES_CQ, 1, 0); + __mlx4_cq_free_icm(dev, cqn); + break; + + default: + err = -EINVAL; + break; + } + + return err; +} + +static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int srqn; + int err; + + switch (op) { + case RES_OP_RESERVE_AND_MAP: + srqn = get_param_l(&in_param); + err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0); + if (err) + break; + + mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); + __mlx4_srq_free_icm(dev, srqn); + break; + + default: + err = -EINVAL; + break; + } + + return err; +} + +static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param, int in_port) +{ + int port; + int err = 0; + + switch (op) { + case RES_OP_RESERVE_AND_MAP: + port = !in_port ? get_param_l(out_param) : in_port; + port = mlx4_slave_convert_port( + dev, slave, port); + + if (port < 0) + return -EINVAL; + mac_del_from_slave(dev, slave, in_param, port); + __mlx4_unregister_mac(dev, port, in_param); + break; + default: + err = -EINVAL; + break; + } + + return err; + +} + +static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param, int port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; + int err = 0; + + port = mlx4_slave_convert_port( + dev, slave, port); + + if (port < 0) + return -EINVAL; + switch (op) { + case RES_OP_RESERVE_AND_MAP: + if (slave_state[slave].old_vlan_api) + return 0; + if (!port) + return -EINVAL; + vlan_del_from_slave(dev, slave, in_param, port); + __mlx4_unregister_vlan(dev, port, in_param); + break; + default: + err = -EINVAL; + break; + } + + return err; +} + +static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int index; + int err; + + if (op != RES_OP_RESERVE) + return -EINVAL; + + index = get_param_l(&in_param); + err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0); + if (err) + return err; + + __mlx4_counter_free(dev, index); + mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); + + return err; +} + +static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, + u64 in_param, u64 *out_param) +{ + int xrcdn; + int err; + + if (op != RES_OP_RESERVE) + return -EINVAL; + + xrcdn = get_param_l(&in_param); + err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0); + if (err) + return err; + + __mlx4_xrcd_free(dev, xrcdn); + + return err; +} + +int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err = -EINVAL; + int alop = vhcr->op_modifier; + + switch (vhcr->in_modifier & 0xFF) { + case RES_QP: + err = qp_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param); + break; + + case RES_MTT: + err = mtt_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_MPT: + err = mpt_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param); + break; + + case RES_CQ: + err = cq_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_SRQ: + err = srq_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_MAC: + err = mac_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param, + (vhcr->in_modifier >> 8) & 0xFF); + break; + + case RES_VLAN: + err = vlan_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param, + (vhcr->in_modifier >> 8) & 0xFF); + break; + + case RES_COUNTER: + err = counter_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + break; + + case RES_XRCD: + err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop, + vhcr->in_param, &vhcr->out_param); + + default: + break; + } + return err; +} + +/* ugly but other choices are uglier */ +static int mr_phys_mpt(struct mlx4_mpt_entry *mpt) +{ + return (be32_to_cpu(mpt->flags) >> 9) & 1; +} + +static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt) +{ + return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8; +} + +static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt) +{ + return be32_to_cpu(mpt->mtt_sz); +} + +static u32 mr_get_pd(struct mlx4_mpt_entry *mpt) +{ + return be32_to_cpu(mpt->pd_flags) & 0x00ffffff; +} + +static int mr_is_fmr(struct mlx4_mpt_entry *mpt) +{ + return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG; +} + +static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt) +{ + return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE; +} + +static int mr_is_region(struct mlx4_mpt_entry *mpt) +{ + return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION; +} + +static int qp_get_mtt_addr(struct mlx4_qp_context *qpc) +{ + return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8; +} + +static int srq_get_mtt_addr(struct mlx4_srq_context *srqc) +{ + return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8; +} + +static int qp_get_mtt_size(struct mlx4_qp_context *qpc) +{ + int page_shift = (qpc->log_page_size & 0x3f) + 12; + int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf; + int log_sq_sride = qpc->sq_size_stride & 7; + int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf; + int log_rq_stride = qpc->rq_size_stride & 7; + int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1; + int rss = (be32_to_cpu(qpc->flags) >> 13) & 1; + u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff; + int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0; + int sq_size; + int rq_size; + int total_pages; + int total_mem; + int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f; + + sq_size = 1 << (log_sq_size + log_sq_sride + 4); + rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4)); + total_mem = sq_size + rq_size; + total_pages = + roundup_pow_of_two((total_mem + (page_offset << 6)) >> + page_shift); + + return total_pages; +} + +static int check_mtt_range(struct mlx4_dev *dev, int slave, int start, + int size, struct res_mtt *mtt) +{ + int res_start = mtt->com.res_id; + int res_size = (1 << mtt->order); + + if (start < res_start || start + size > res_start + res_size) + return -EPERM; + return 0; +} + +int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int index = vhcr->in_modifier; + struct res_mtt *mtt; + struct res_mpt *mpt; + int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz; + int phys; + int id; + u32 pd; + int pd_slave; + + id = index & mpt_mask(dev); + err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt); + if (err) + return err; + + /* Disable memory windows for VFs. */ + if (!mr_is_region(inbox->buf)) { + err = -EPERM; + goto ex_abort; + } + + /* Make sure that the PD bits related to the slave id are zeros. */ + pd = mr_get_pd(inbox->buf); + pd_slave = (pd >> 17) & 0x7f; + if (pd_slave != 0 && --pd_slave != slave) { + err = -EPERM; + goto ex_abort; + } + + if (mr_is_fmr(inbox->buf)) { + /* FMR and Bind Enable are forbidden in slave devices. */ + if (mr_is_bind_enabled(inbox->buf)) { + err = -EPERM; + goto ex_abort; + } + /* FMR and Memory Windows are also forbidden. */ + if (!mr_is_region(inbox->buf)) { + err = -EPERM; + goto ex_abort; + } + } + + phys = mr_phys_mpt(inbox->buf); + if (!phys) { + err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); + if (err) + goto ex_abort; + + err = check_mtt_range(dev, slave, mtt_base, + mr_get_mtt_size(inbox->buf), mtt); + if (err) + goto ex_put; + + mpt->mtt = mtt; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_put; + + if (!phys) { + atomic_inc(&mtt->ref_count); + put_res(dev, slave, mtt->com.res_id, RES_MTT); + } + + res_end_move(dev, slave, RES_MPT, id); + return 0; + +ex_put: + if (!phys) + put_res(dev, slave, mtt->com.res_id, RES_MTT); +ex_abort: + res_abort_move(dev, slave, RES_MPT, id); + + return err; +} + +int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int index = vhcr->in_modifier; + struct res_mpt *mpt; + int id; + + id = index & mpt_mask(dev); + err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt); + if (err) + return err; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_abort; + + if (mpt->mtt) + atomic_dec(&mpt->mtt->ref_count); + + res_end_move(dev, slave, RES_MPT, id); + return 0; + +ex_abort: + res_abort_move(dev, slave, RES_MPT, id); + + return err; +} + +int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int index = vhcr->in_modifier; + struct res_mpt *mpt; + int id; + + id = index & mpt_mask(dev); + err = get_res(dev, slave, id, RES_MPT, &mpt); + if (err) + return err; + + if (mpt->com.from_state == RES_MPT_MAPPED) { + /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do + * that, the VF must read the MPT. But since the MPT entry memory is not + * in the VF's virtual memory space, it must use QUERY_MPT to obtain the + * entry contents. To guarantee that the MPT cannot be changed, the driver + * must perform HW2SW_MPT before this query and return the MPT entry to HW + * ownership fofollowing the change. The change here allows the VF to + * perform QUERY_MPT also when the entry is in SW ownership. + */ + struct mlx4_mpt_entry *mpt_entry = mlx4_table_find( + &mlx4_priv(dev)->mr_table.dmpt_table, + mpt->key, NULL); + + if (NULL == mpt_entry || NULL == outbox->buf) { + err = -EINVAL; + goto out; + } + + memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry)); + + err = 0; + } else if (mpt->com.from_state == RES_MPT_HW) { + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + } else { + err = -EBUSY; + goto out; + } + + +out: + put_res(dev, slave, id, RES_MPT); + return err; +} + +static int qp_get_rcqn(struct mlx4_qp_context *qpc) +{ + return be32_to_cpu(qpc->cqn_recv) & 0xffffff; +} + +static int qp_get_scqn(struct mlx4_qp_context *qpc) +{ + return be32_to_cpu(qpc->cqn_send) & 0xffffff; +} + +static u32 qp_get_srqn(struct mlx4_qp_context *qpc) +{ + return be32_to_cpu(qpc->srqn) & 0x1ffffff; +} + +static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr, + struct mlx4_qp_context *context) +{ + u32 qpn = vhcr->in_modifier & 0xffffff; + u32 qkey = 0; + + if (mlx4_get_parav_qkey(dev, qpn, &qkey)) + return; + + /* adjust qkey in qp context */ + context->qkey = cpu_to_be32(qkey); +} + +int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int qpn = vhcr->in_modifier & 0x7fffff; + struct res_mtt *mtt; + struct res_qp *qp; + struct mlx4_qp_context *qpc = inbox->buf + 8; + int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz; + int mtt_size = qp_get_mtt_size(qpc); + struct res_cq *rcq; + struct res_cq *scq; + int rcqn = qp_get_rcqn(qpc); + int scqn = qp_get_scqn(qpc); + u32 srqn = qp_get_srqn(qpc) & 0xffffff; + int use_srq = (qp_get_srqn(qpc) >> 24) & 1; + struct res_srq *srq; + int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; + + err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0); + if (err) + return err; + qp->local_qpn = local_qpn; + qp->sched_queue = 0; + qp->param3 = 0; + qp->vlan_control = 0; + qp->fvl_rx = 0; + qp->pri_path_fl = 0; + qp->vlan_index = 0; + qp->feup = 0; + qp->qpc_flags = be32_to_cpu(qpc->flags); + + err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); + if (err) + goto ex_abort; + + err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); + if (err) + goto ex_put_mtt; + + err = get_res(dev, slave, rcqn, RES_CQ, &rcq); + if (err) + goto ex_put_mtt; + + if (scqn != rcqn) { + err = get_res(dev, slave, scqn, RES_CQ, &scq); + if (err) + goto ex_put_rcq; + } else + scq = rcq; + + if (use_srq) { + err = get_res(dev, slave, srqn, RES_SRQ, &srq); + if (err) + goto ex_put_scq; + } + + adjust_proxy_tun_qkey(dev, vhcr, qpc); + update_pkey_index(dev, slave, inbox); + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_put_srq; + atomic_inc(&mtt->ref_count); + qp->mtt = mtt; + atomic_inc(&rcq->ref_count); + qp->rcq = rcq; + atomic_inc(&scq->ref_count); + qp->scq = scq; + + if (scqn != rcqn) + put_res(dev, slave, scqn, RES_CQ); + + if (use_srq) { + atomic_inc(&srq->ref_count); + put_res(dev, slave, srqn, RES_SRQ); + qp->srq = srq; + } + put_res(dev, slave, rcqn, RES_CQ); + put_res(dev, slave, mtt_base, RES_MTT); + res_end_move(dev, slave, RES_QP, qpn); + + return 0; + +ex_put_srq: + if (use_srq) + put_res(dev, slave, srqn, RES_SRQ); +ex_put_scq: + if (scqn != rcqn) + put_res(dev, slave, scqn, RES_CQ); +ex_put_rcq: + put_res(dev, slave, rcqn, RES_CQ); +ex_put_mtt: + put_res(dev, slave, mtt_base, RES_MTT); +ex_abort: + res_abort_move(dev, slave, RES_QP, qpn); + + return err; +} + +static int eq_get_mtt_addr(struct mlx4_eq_context *eqc) +{ + return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8; +} + +static int eq_get_mtt_size(struct mlx4_eq_context *eqc) +{ + int log_eq_size = eqc->log_eq_size & 0x1f; + int page_shift = (eqc->log_page_size & 0x3f) + 12; + + if (log_eq_size + 5 < page_shift) + return 1; + + return 1 << (log_eq_size + 5 - page_shift); +} + +static int cq_get_mtt_addr(struct mlx4_cq_context *cqc) +{ + return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8; +} + +static int cq_get_mtt_size(struct mlx4_cq_context *cqc) +{ + int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f; + int page_shift = (cqc->log_page_size & 0x3f) + 12; + + if (log_cq_size + 5 < page_shift) + return 1; + + return 1 << (log_cq_size + 5 - page_shift); +} + +int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int eqn = vhcr->in_modifier; + int res_id = (slave << 10) | eqn; + struct mlx4_eq_context *eqc = inbox->buf; + int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz; + int mtt_size = eq_get_mtt_size(eqc); + struct res_eq *eq; + struct res_mtt *mtt; + + err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0); + if (err) + return err; + err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq); + if (err) + goto out_add; + + err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); + if (err) + goto out_move; + + err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); + if (err) + goto out_put; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto out_put; + + atomic_inc(&mtt->ref_count); + eq->mtt = mtt; + put_res(dev, slave, mtt->com.res_id, RES_MTT); + res_end_move(dev, slave, RES_EQ, res_id); + return 0; + +out_put: + put_res(dev, slave, mtt->com.res_id, RES_MTT); +out_move: + res_abort_move(dev, slave, RES_EQ, res_id); +out_add: + rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); + return err; +} + +int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + u8 get = vhcr->op_modifier; + + if (get != 1) + return -EPERM; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + + return err; +} + +static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start, + int len, struct res_mtt **res) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct res_mtt *mtt; + int err = -EINVAL; + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT], + com.list) { + if (!check_mtt_range(dev, slave, start, len, mtt)) { + *res = mtt; + mtt->com.from_state = mtt->com.state; + mtt->com.state = RES_MTT_BUSY; + err = 0; + break; + } + } + spin_unlock_irq(mlx4_tlock(dev)); + + return err; +} + +static int verify_qp_parameters(struct mlx4_dev *dev, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + enum qp_transition transition, u8 slave) +{ + u32 qp_type; + u32 qpn; + struct mlx4_qp_context *qp_ctx; + enum mlx4_qp_optpar optpar; + int port; + int num_gids; + + qp_ctx = inbox->buf + 8; + qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; + optpar = be32_to_cpu(*(__be32 *) inbox->buf); + + if (slave != mlx4_master_func_num(dev)) { + qp_ctx->params2 &= ~MLX4_QP_BIT_FPP; + /* setting QP rate-limit is disallowed for VFs */ + if (qp_ctx->rate_limit_params) + return -EPERM; + } + + switch (qp_type) { + case MLX4_QP_ST_RC: + case MLX4_QP_ST_XRC: + case MLX4_QP_ST_UC: + switch (transition) { + case QP_TRANS_INIT2RTR: + case QP_TRANS_RTR2RTS: + case QP_TRANS_RTS2RTS: + case QP_TRANS_SQD2SQD: + case QP_TRANS_SQD2RTS: + if (slave != mlx4_master_func_num(dev)) + if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; + if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) + num_gids = mlx4_get_slave_num_gids(dev, slave, port); + else + num_gids = 1; + if (qp_ctx->pri_path.mgid_index >= num_gids) + return -EINVAL; + } + if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { + port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; + if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) + num_gids = mlx4_get_slave_num_gids(dev, slave, port); + else + num_gids = 1; + if (qp_ctx->alt_path.mgid_index >= num_gids) + return -EINVAL; + } + break; + default: + break; + } + break; + + case MLX4_QP_ST_MLX: + qpn = vhcr->in_modifier & 0x7fffff; + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; + if (transition == QP_TRANS_INIT2RTR && + slave != mlx4_master_func_num(dev) && + mlx4_is_qp_reserved(dev, qpn) && + !mlx4_vf_smi_enabled(dev, slave, port)) { + /* only enabled VFs may create MLX proxy QPs */ + mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n", + __func__, slave, port); + return -EPERM; + } + break; + + default: + break; + } + + return 0; +} + +int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_mtt mtt; + __be64 *page_list = inbox->buf; + u64 *pg_list = (u64 *)page_list; + int i; + struct res_mtt *rmtt = NULL; + int start = be64_to_cpu(page_list[0]); + int npages = vhcr->in_modifier; + int err; + + err = get_containing_mtt(dev, slave, start, npages, &rmtt); + if (err) + return err; + + /* Call the SW implementation of write_mtt: + * - Prepare a dummy mtt struct + * - Translate inbox contents to simple addresses in host endianness */ + mtt.offset = 0; /* TBD this is broken but I don't handle it since + we don't really use it */ + mtt.order = 0; + mtt.page_shift = 0; + for (i = 0; i < npages; ++i) + pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL); + + err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages, + ((u64 *)page_list + 2)); + + if (rmtt) + put_res(dev, slave, rmtt->com.res_id, RES_MTT); + + return err; +} + +int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int eqn = vhcr->in_modifier; + int res_id = eqn | (slave << 10); + struct res_eq *eq; + int err; + + err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq); + if (err) + return err; + + err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL); + if (err) + goto ex_abort; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_put; + + atomic_dec(&eq->mtt->ref_count); + put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); + res_end_move(dev, slave, RES_EQ, res_id); + rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); + + return 0; + +ex_put: + put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); +ex_abort: + res_abort_move(dev, slave, RES_EQ, res_id); + + return err; +} + +int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_slave_event_eq_info *event_eq; + struct mlx4_cmd_mailbox *mailbox; + u32 in_modifier = 0; + int err; + int res_id; + struct res_eq *req; + + if (!priv->mfunc.master.slave_state) + return -EINVAL; + + /* check for slave valid, slave not PF, and slave active */ + if (slave < 0 || slave > dev->persist->num_vfs || + slave == dev->caps.function || + !priv->mfunc.master.slave_state[slave].active) + return 0; + + event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; + + /* Create the event only if the slave is registered */ + if (event_eq->eqn < 0) + return 0; + + mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]); + res_id = (slave << 10) | event_eq->eqn; + err = get_res(dev, slave, res_id, RES_EQ, &req); + if (err) + goto unlock; + + if (req->com.from_state != RES_EQ_HW) { + err = -EINVAL; + goto put; + } + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto put; + } + + if (eqe->type == MLX4_EVENT_TYPE_CMD) { + ++event_eq->token; + eqe->event.cmd.token = cpu_to_be16(event_eq->token); + } + + memcpy(mailbox->buf, (u8 *) eqe, 28); + + in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16); + + err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0, + MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + + put_res(dev, slave, res_id, RES_EQ); + mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); + mlx4_free_cmd_mailbox(dev, mailbox); + return err; + +put: + put_res(dev, slave, res_id, RES_EQ); + +unlock: + mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); + return err; +} + +int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int eqn = vhcr->in_modifier; + int res_id = eqn | (slave << 10); + struct res_eq *eq; + int err; + + err = get_res(dev, slave, res_id, RES_EQ, &eq); + if (err) + return err; + + if (eq->com.from_state != RES_EQ_HW) { + err = -EINVAL; + goto ex_put; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + +ex_put: + put_res(dev, slave, res_id, RES_EQ); + return err; +} + +int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int cqn = vhcr->in_modifier; + struct mlx4_cq_context *cqc = inbox->buf; + int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; + struct res_cq *cq = NULL; + struct res_mtt *mtt; + + err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq); + if (err) + return err; + err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); + if (err) + goto out_move; + err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); + if (err) + goto out_put; + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto out_put; + atomic_inc(&mtt->ref_count); + cq->mtt = mtt; + put_res(dev, slave, mtt->com.res_id, RES_MTT); + res_end_move(dev, slave, RES_CQ, cqn); + return 0; + +out_put: + put_res(dev, slave, mtt->com.res_id, RES_MTT); +out_move: + res_abort_move(dev, slave, RES_CQ, cqn); + return err; +} + +int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int cqn = vhcr->in_modifier; + struct res_cq *cq = NULL; + + err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq); + if (err) + return err; + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto out_move; + atomic_dec(&cq->mtt->ref_count); + res_end_move(dev, slave, RES_CQ, cqn); + return 0; + +out_move: + res_abort_move(dev, slave, RES_CQ, cqn); + return err; +} + +int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int cqn = vhcr->in_modifier; + struct res_cq *cq; + int err; + + err = get_res(dev, slave, cqn, RES_CQ, &cq); + if (err) + return err; + + if (cq->com.from_state != RES_CQ_HW) + goto ex_put; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +ex_put: + put_res(dev, slave, cqn, RES_CQ); + + return err; +} + +static int handle_resize(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd, + struct res_cq *cq) +{ + int err; + struct res_mtt *orig_mtt; + struct res_mtt *mtt; + struct mlx4_cq_context *cqc = inbox->buf; + int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; + + err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt); + if (err) + return err; + + if (orig_mtt != cq->mtt) { + err = -EINVAL; + goto ex_put; + } + + err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); + if (err) + goto ex_put; + + err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); + if (err) + goto ex_put1; + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_put1; + atomic_dec(&orig_mtt->ref_count); + put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); + atomic_inc(&mtt->ref_count); + cq->mtt = mtt; + put_res(dev, slave, mtt->com.res_id, RES_MTT); + return 0; + +ex_put1: + put_res(dev, slave, mtt->com.res_id, RES_MTT); +ex_put: + put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); + + return err; + +} + +int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int cqn = vhcr->in_modifier; + struct res_cq *cq; + int err; + + err = get_res(dev, slave, cqn, RES_CQ, &cq); + if (err) + return err; + + if (cq->com.from_state != RES_CQ_HW) + goto ex_put; + + if (vhcr->op_modifier == 0) { + err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq); + goto ex_put; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +ex_put: + put_res(dev, slave, cqn, RES_CQ); + + return err; +} + +static int srq_get_mtt_size(struct mlx4_srq_context *srqc) +{ + int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf; + int log_rq_stride = srqc->logstride & 7; + int page_shift = (srqc->log_page_size & 0x3f) + 12; + + if (log_srq_size + log_rq_stride + 4 < page_shift) + return 1; + + return 1 << (log_srq_size + log_rq_stride + 4 - page_shift); +} + +int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int srqn = vhcr->in_modifier; + struct res_mtt *mtt; + struct res_srq *srq = NULL; + struct mlx4_srq_context *srqc = inbox->buf; + int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz; + + if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff)) + return -EINVAL; + + err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq); + if (err) + return err; + err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); + if (err) + goto ex_abort; + err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc), + mtt); + if (err) + goto ex_put_mtt; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_put_mtt; + + atomic_inc(&mtt->ref_count); + srq->mtt = mtt; + put_res(dev, slave, mtt->com.res_id, RES_MTT); + res_end_move(dev, slave, RES_SRQ, srqn); + return 0; + +ex_put_mtt: + put_res(dev, slave, mtt->com.res_id, RES_MTT); +ex_abort: + res_abort_move(dev, slave, RES_SRQ, srqn); + + return err; +} + +int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int srqn = vhcr->in_modifier; + struct res_srq *srq = NULL; + + err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq); + if (err) + return err; + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_abort; + atomic_dec(&srq->mtt->ref_count); + if (srq->cq) + atomic_dec(&srq->cq->ref_count); + res_end_move(dev, slave, RES_SRQ, srqn); + + return 0; + +ex_abort: + res_abort_move(dev, slave, RES_SRQ, srqn); + + return err; +} + +int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int srqn = vhcr->in_modifier; + struct res_srq *srq; + + err = get_res(dev, slave, srqn, RES_SRQ, &srq); + if (err) + return err; + if (srq->com.from_state != RES_SRQ_HW) { + err = -EBUSY; + goto out; + } + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +out: + put_res(dev, slave, srqn, RES_SRQ); + return err; +} + +int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int srqn = vhcr->in_modifier; + struct res_srq *srq; + + err = get_res(dev, slave, srqn, RES_SRQ, &srq); + if (err) + return err; + + if (srq->com.from_state != RES_SRQ_HW) { + err = -EBUSY; + goto out; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +out: + put_res(dev, slave, srqn, RES_SRQ); + return err; +} + +int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int qpn = vhcr->in_modifier & 0x7fffff; + struct res_qp *qp; + + err = get_res(dev, slave, qpn, RES_QP, &qp); + if (err) + return err; + if (qp->com.from_state != RES_QP_HW) { + err = -EBUSY; + goto out; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +out: + put_res(dev, slave, qpn, RES_QP); + return err; +} + +int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_qp_context *context = inbox->buf + 8; + adjust_proxy_tun_qkey(dev, vhcr, context); + update_pkey_index(dev, slave, inbox); + return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +} + +static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave, + struct mlx4_qp_context *qpc, + struct mlx4_cmd_mailbox *inbox) +{ + enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf); + u8 pri_sched_queue; + int port = mlx4_slave_convert_port( + dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1; + + if (port < 0) + return -EINVAL; + + pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) | + ((port & 1) << 6); + + if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH || + mlx4_is_eth(dev, port + 1)) { + qpc->pri_path.sched_queue = pri_sched_queue; + } + + if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { + port = mlx4_slave_convert_port( + dev, slave, (qpc->alt_path.sched_queue >> 6 & 1) + + 1) - 1; + if (port < 0) + return -EINVAL; + qpc->alt_path.sched_queue = + (qpc->alt_path.sched_queue & ~(1 << 6)) | + (port & 1) << 6; + } + return 0; +} + +static int roce_verify_mac(struct mlx4_dev *dev, int slave, + struct mlx4_qp_context *qpc, + struct mlx4_cmd_mailbox *inbox) +{ + u64 mac; + int port; + u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff; + u8 sched = *(u8 *)(inbox->buf + 64); + u8 smac_ix; + + port = (sched >> 6 & 1) + 1; + if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) { + smac_ix = qpc->pri_path.grh_mylmc & 0x7f; + if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac)) + return -ENOENT; + } + return 0; +} + +int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + struct mlx4_qp_context *qpc = inbox->buf + 8; + int qpn = vhcr->in_modifier & 0x7fffff; + struct res_qp *qp; + u8 orig_sched_queue; + __be32 orig_param3 = qpc->param3; + u8 orig_vlan_control = qpc->pri_path.vlan_control; + u8 orig_fvl_rx = qpc->pri_path.fvl_rx; + u8 orig_pri_path_fl = qpc->pri_path.fl; + u8 orig_vlan_index = qpc->pri_path.vlan_index; + u8 orig_feup = qpc->pri_path.feup; + + err = adjust_qp_sched_queue(dev, slave, qpc, inbox); + if (err) + return err; + err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave); + if (err) + return err; + + if (roce_verify_mac(dev, slave, qpc, inbox)) + return -EINVAL; + + update_pkey_index(dev, slave, inbox); + update_gid(dev, inbox, (u8)slave); + adjust_proxy_tun_qkey(dev, vhcr, qpc); + orig_sched_queue = qpc->pri_path.sched_queue; + err = update_vport_qp_param(dev, inbox, slave, qpn); + if (err) + return err; + + err = get_res(dev, slave, qpn, RES_QP, &qp); + if (err) + return err; + if (qp->com.from_state != RES_QP_HW) { + err = -EBUSY; + goto out; + } + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +out: + /* if no error, save sched queue value passed in by VF. This is + * essentially the QOS value provided by the VF. This will be useful + * if we allow dynamic changes from VST back to VGT + */ + if (!err) { + qp->sched_queue = orig_sched_queue; + qp->param3 = orig_param3; + qp->vlan_control = orig_vlan_control; + qp->fvl_rx = orig_fvl_rx; + qp->pri_path_fl = orig_pri_path_fl; + qp->vlan_index = orig_vlan_index; + qp->feup = orig_feup; + } + put_res(dev, slave, qpn, RES_QP); + return err; +} + +int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + struct mlx4_qp_context *context = inbox->buf + 8; + + err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; + err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave); + if (err) + return err; + + update_pkey_index(dev, slave, inbox); + update_gid(dev, inbox, (u8)slave); + adjust_proxy_tun_qkey(dev, vhcr, context); + return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +} + +int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + struct mlx4_qp_context *context = inbox->buf + 8; + + err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; + err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave); + if (err) + return err; + + update_pkey_index(dev, slave, inbox); + update_gid(dev, inbox, (u8)slave); + adjust_proxy_tun_qkey(dev, vhcr, context); + return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +} + + +int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_qp_context *context = inbox->buf + 8; + int err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; + adjust_proxy_tun_qkey(dev, vhcr, context); + return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +} + +int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + struct mlx4_qp_context *context = inbox->buf + 8; + + err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; + err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave); + if (err) + return err; + + adjust_proxy_tun_qkey(dev, vhcr, context); + update_gid(dev, inbox, (u8)slave); + update_pkey_index(dev, slave, inbox); + return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +} + +int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + struct mlx4_qp_context *context = inbox->buf + 8; + + err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; + err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave); + if (err) + return err; + + adjust_proxy_tun_qkey(dev, vhcr, context); + update_gid(dev, inbox, (u8)slave); + update_pkey_index(dev, slave, inbox); + return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); +} + +int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int qpn = vhcr->in_modifier & 0x7fffff; + struct res_qp *qp; + + err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0); + if (err) + return err; + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + if (err) + goto ex_abort; + + atomic_dec(&qp->mtt->ref_count); + atomic_dec(&qp->rcq->ref_count); + atomic_dec(&qp->scq->ref_count); + if (qp->srq) + atomic_dec(&qp->srq->ref_count); + res_end_move(dev, slave, RES_QP, qpn); + return 0; + +ex_abort: + res_abort_move(dev, slave, RES_QP, qpn); + + return err; +} + +static struct res_gid *find_gid(struct mlx4_dev *dev, int slave, + struct res_qp *rqp, u8 *gid) +{ + struct res_gid *res; + + list_for_each_entry(res, &rqp->mcg_list, list) { + if (!memcmp(res->gid, gid, 16)) + return res; + } + return NULL; +} + +static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, + u8 *gid, enum mlx4_protocol prot, + enum mlx4_steer_type steer, u64 reg_id) +{ + struct res_gid *res; + int err; + + res = kzalloc(sizeof *res, GFP_KERNEL); + if (!res) + return -ENOMEM; + + spin_lock_irq(&rqp->mcg_spl); + if (find_gid(dev, slave, rqp, gid)) { + kfree(res); + err = -EEXIST; + } else { + memcpy(res->gid, gid, 16); + res->prot = prot; + res->steer = steer; + res->reg_id = reg_id; + list_add_tail(&res->list, &rqp->mcg_list); + err = 0; + } + spin_unlock_irq(&rqp->mcg_spl); + + return err; +} + +static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, + u8 *gid, enum mlx4_protocol prot, + enum mlx4_steer_type steer, u64 *reg_id) +{ + struct res_gid *res; + int err; + + spin_lock_irq(&rqp->mcg_spl); + res = find_gid(dev, slave, rqp, gid); + if (!res || res->prot != prot || res->steer != steer) + err = -EINVAL; + else { + *reg_id = res->reg_id; + list_del(&res->list); + kfree(res); + err = 0; + } + spin_unlock_irq(&rqp->mcg_spl); + + return err; +} + +static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp, + u8 gid[16], int block_loopback, enum mlx4_protocol prot, + enum mlx4_steer_type type, u64 *reg_id) +{ + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_DEVICE_MANAGED: { + int port = mlx4_slave_convert_port(dev, slave, gid[5]); + if (port < 0) + return port; + return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, + block_loopback, prot, + reg_id); + } + case MLX4_STEERING_MODE_B0: + if (prot == MLX4_PROT_ETH) { + int port = mlx4_slave_convert_port(dev, slave, gid[5]); + if (port < 0) + return port; + gid[5] = port; + } + return mlx4_qp_attach_common(dev, qp, gid, + block_loopback, prot, type); + default: + return -EINVAL; + } +} + +static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, + u8 gid[16], enum mlx4_protocol prot, + enum mlx4_steer_type type, u64 reg_id) +{ + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_DEVICE_MANAGED: + return mlx4_flow_detach(dev, reg_id); + case MLX4_STEERING_MODE_B0: + return mlx4_qp_detach_common(dev, qp, gid, prot, type); + default: + return -EINVAL; + } +} + +static int mlx4_adjust_port(struct mlx4_dev *dev, int slave, + u8 *gid, enum mlx4_protocol prot) +{ + int real_port; + + if (prot != MLX4_PROT_ETH) + return 0; + + if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 || + dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { + real_port = mlx4_slave_convert_port(dev, slave, gid[5]); + if (real_port < 0) + return -EINVAL; + gid[5] = real_port; + } + + return 0; +} + +int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_qp qp; /* dummy for calling attach/detach */ + u8 *gid = inbox->buf; + enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7; + int err; + int qpn; + struct res_qp *rqp; + u64 reg_id = 0; + int attach = vhcr->op_modifier; + int block_loopback = vhcr->in_modifier >> 31; + u8 steer_type_mask = 2; + enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1; + + qpn = vhcr->in_modifier & 0xffffff; + err = get_res(dev, slave, qpn, RES_QP, &rqp); + if (err) + return err; + + qp.qpn = qpn; + if (attach) { + err = qp_attach(dev, slave, &qp, gid, block_loopback, prot, + type, ®_id); + if (err) { + pr_err("Fail to attach rule to qp 0x%x\n", qpn); + goto ex_put; + } + err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id); + if (err) + goto ex_detach; + } else { + err = mlx4_adjust_port(dev, slave, gid, prot); + if (err) + goto ex_put; + + err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id); + if (err) + goto ex_put; + + err = qp_detach(dev, &qp, gid, prot, type, reg_id); + if (err) + pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n", + qpn, reg_id); + } + put_res(dev, slave, qpn, RES_QP); + return err; + +ex_detach: + qp_detach(dev, &qp, gid, prot, type, reg_id); +ex_put: + put_res(dev, slave, qpn, RES_QP); + return err; +} + +/* + * MAC validation for Flow Steering rules. + * VF can attach rules only with a mac address which is assigned to it. + */ +static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header, + struct list_head *rlist) +{ + struct mac_res *res, *tmp; + __be64 be_mac; + + /* make sure it isn't multicast or broadcast mac*/ + if (!is_multicast_ether_addr(eth_header->eth.dst_mac) && + !is_broadcast_ether_addr(eth_header->eth.dst_mac)) { + list_for_each_entry_safe(res, tmp, rlist, list) { + be_mac = cpu_to_be64(res->mac << 16); + if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac)) + return 0; + } + pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n", + eth_header->eth.dst_mac, slave); + return -EINVAL; + } + return 0; +} + +/* + * In case of missing eth header, append eth header with a MAC address + * assigned to the VF. + */ +static int add_eth_header(struct mlx4_dev *dev, int slave, + struct mlx4_cmd_mailbox *inbox, + struct list_head *rlist, int header_id) +{ + struct mac_res *res, *tmp; + u8 port; + struct mlx4_net_trans_rule_hw_ctrl *ctrl; + struct mlx4_net_trans_rule_hw_eth *eth_header; + struct mlx4_net_trans_rule_hw_ipv4 *ip_header; + struct mlx4_net_trans_rule_hw_tcp_udp *l4_header; + __be64 be_mac = 0; + __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16); + + ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; + port = ctrl->port; + eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1); + + /* Clear a space in the inbox for eth header */ + switch (header_id) { + case MLX4_NET_TRANS_RULE_ID_IPV4: + ip_header = + (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1); + memmove(ip_header, eth_header, + sizeof(*ip_header) + sizeof(*l4_header)); + break; + case MLX4_NET_TRANS_RULE_ID_TCP: + case MLX4_NET_TRANS_RULE_ID_UDP: + l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *) + (eth_header + 1); + memmove(l4_header, eth_header, sizeof(*l4_header)); + break; + default: + return -EINVAL; + } + list_for_each_entry_safe(res, tmp, rlist, list) { + if (port == res->port) { + be_mac = cpu_to_be64(res->mac << 16); + break; + } + } + if (!be_mac) { + pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n", + port); + return -EINVAL; + } + + memset(eth_header, 0, sizeof(*eth_header)); + eth_header->size = sizeof(*eth_header) >> 2; + eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]); + memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN); + memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN); + + return 0; + +} + +#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX) +int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd_info) +{ + int err; + u32 qpn = vhcr->in_modifier & 0xffffff; + struct res_qp *rqp; + u64 mac; + unsigned port; + u64 pri_addr_path_mask; + struct mlx4_update_qp_context *cmd; + int smac_index; + + cmd = (struct mlx4_update_qp_context *)inbox->buf; + + pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask); + if (cmd->qp_mask || cmd->secondary_addr_path_mask || + (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED)) + return -EPERM; + + /* Just change the smac for the QP */ + err = get_res(dev, slave, qpn, RES_QP, &rqp); + if (err) { + mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave); + return err; + } + + port = (rqp->sched_queue >> 6 & 1) + 1; + + if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) { + smac_index = cmd->qp_context.pri_path.grh_mylmc; + err = mac_find_smac_ix_in_slave(dev, slave, port, + smac_index, &mac); + + if (err) { + mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", + qpn, smac_index); + goto err_mac; + } + } + + err = mlx4_cmd(dev, inbox->dma, + vhcr->in_modifier, 0, + MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) { + mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn); + goto err_mac; + } + +err_mac: + put_res(dev, slave, qpn, RES_QP); + return err; +} + +int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; + int err; + int qpn; + struct res_qp *rqp; + struct mlx4_net_trans_rule_hw_ctrl *ctrl; + struct _rule_hw *rule_header; + int header_id; + + if (dev->caps.steering_mode != + MLX4_STEERING_MODE_DEVICE_MANAGED) + return -EOPNOTSUPP; + + ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; + ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port); + if (ctrl->port <= 0) + return -EINVAL; + qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; + err = get_res(dev, slave, qpn, RES_QP, &rqp); + if (err) { + pr_err("Steering rule with qpn 0x%x rejected\n", qpn); + return err; + } + rule_header = (struct _rule_hw *)(ctrl + 1); + header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id)); + + switch (header_id) { + case MLX4_NET_TRANS_RULE_ID_ETH: + if (validate_eth_header_mac(slave, rule_header, rlist)) { + err = -EINVAL; + goto err_put; + } + break; + case MLX4_NET_TRANS_RULE_ID_IB: + break; + case MLX4_NET_TRANS_RULE_ID_IPV4: + case MLX4_NET_TRANS_RULE_ID_TCP: + case MLX4_NET_TRANS_RULE_ID_UDP: + pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n"); + if (add_eth_header(dev, slave, inbox, rlist, header_id)) { + err = -EINVAL; + goto err_put; + } + vhcr->in_modifier += + sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; + break; + default: + pr_err("Corrupted mailbox\n"); + err = -EINVAL; + goto err_put; + } + + err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, + vhcr->in_modifier, 0, + MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + goto err_put; + + err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); + if (err) { + mlx4_err(dev, "Fail to add flow steering resources\n"); + /* detach rule*/ + mlx4_cmd(dev, vhcr->out_param, 0, 0, + MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + goto err_put; + } + atomic_inc(&rqp->ref_count); +err_put: + put_res(dev, slave, qpn, RES_QP); + return err; +} + +int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + struct res_qp *rqp; + struct res_fs_rule *rrule; + + if (dev->caps.steering_mode != + MLX4_STEERING_MODE_DEVICE_MANAGED) + return -EOPNOTSUPP; + + err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule); + if (err) + return err; + /* Release the rule form busy state before removal */ + put_res(dev, slave, vhcr->in_param, RES_FS_RULE); + err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp); + if (err) + return err; + + err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); + if (err) { + mlx4_err(dev, "Fail to remove flow steering resources\n"); + goto out; + } + + err = mlx4_cmd(dev, vhcr->in_param, 0, 0, + MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (!err) + atomic_dec(&rqp->ref_count); +out: + put_res(dev, slave, rrule->qpn, RES_QP); + return err; +} + +enum { + BUSY_MAX_RETRIES = 10 +}; + +int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + int index = vhcr->in_modifier & 0xffff; + + err = get_res(dev, slave, index, RES_COUNTER, NULL); + if (err) + return err; + + err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); + put_res(dev, slave, index, RES_COUNTER); + return err; +} + +static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp) +{ + struct res_gid *rgid; + struct res_gid *tmp; + struct mlx4_qp qp; /* dummy for calling attach/detach */ + + list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) { + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_DEVICE_MANAGED: + mlx4_flow_detach(dev, rgid->reg_id); + break; + case MLX4_STEERING_MODE_B0: + qp.qpn = rqp->local_qpn; + (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, + rgid->prot, rgid->steer); + break; + } + list_del(&rgid->list); + kfree(rgid); + } +} + +static int _move_all_busy(struct mlx4_dev *dev, int slave, + enum mlx4_resource type, int print) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = + &priv->mfunc.master.res_tracker; + struct list_head *rlist = &tracker->slave_list[slave].res_list[type]; + struct res_common *r; + struct res_common *tmp; + int busy; + + busy = 0; + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(r, tmp, rlist, list) { + if (r->owner == slave) { + if (!r->removing) { + if (r->state == RES_ANY_BUSY) { + if (print) + mlx4_dbg(dev, + "%s id 0x%llx is busy\n", + resource_str(type), + r->res_id); + ++busy; + } else { + r->from_state = r->state; + r->state = RES_ANY_BUSY; + r->removing = 1; + } + } + } + } + spin_unlock_irq(mlx4_tlock(dev)); + + return busy; +} + +static int move_all_busy(struct mlx4_dev *dev, int slave, + enum mlx4_resource type) +{ + unsigned long begin; + int busy; + + begin = jiffies; + do { + busy = _move_all_busy(dev, slave, type, 0); + if (time_after(jiffies, begin + 5 * HZ)) + break; + if (busy) + cond_resched(); + } while (busy); + + if (busy) + busy = _move_all_busy(dev, slave, type, 1); + + return busy; +} +static void rem_slave_qps(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *qp_list = + &tracker->slave_list[slave].res_list[RES_QP]; + struct res_qp *qp; + struct res_qp *tmp; + int state; + u64 in_param; + int qpn; + int err; + + err = move_all_busy(dev, slave, RES_QP); + if (err) + mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n", + slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(qp, tmp, qp_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (qp->com.owner == slave) { + qpn = qp->com.res_id; + detach_qp(dev, slave, qp); + state = qp->com.from_state; + while (state != 0) { + switch (state) { + case RES_QP_RESERVED: + spin_lock_irq(mlx4_tlock(dev)); + rb_erase(&qp->com.node, + &tracker->res_tree[RES_QP]); + list_del(&qp->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + if (!valid_reserved(dev, slave, qpn)) { + __mlx4_qp_release_range(dev, qpn, 1); + mlx4_release_resource(dev, slave, + RES_QP, 1, 0); + } + kfree(qp); + state = 0; + break; + case RES_QP_MAPPED: + if (!valid_reserved(dev, slave, qpn)) + __mlx4_qp_free_icm(dev, qpn); + state = RES_QP_RESERVED; + break; + case RES_QP_HW: + in_param = slave; + err = mlx4_cmd(dev, in_param, + qp->local_qpn, 2, + MLX4_CMD_2RST_QP, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n", + slave, qp->local_qpn); + atomic_dec(&qp->rcq->ref_count); + atomic_dec(&qp->scq->ref_count); + atomic_dec(&qp->mtt->ref_count); + if (qp->srq) + atomic_dec(&qp->srq->ref_count); + state = RES_QP_MAPPED; + break; + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_srqs(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *srq_list = + &tracker->slave_list[slave].res_list[RES_SRQ]; + struct res_srq *srq; + struct res_srq *tmp; + int state; + u64 in_param; + LIST_HEAD(tlist); + int srqn; + int err; + + err = move_all_busy(dev, slave, RES_SRQ); + if (err) + mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n", + slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(srq, tmp, srq_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (srq->com.owner == slave) { + srqn = srq->com.res_id; + state = srq->com.from_state; + while (state != 0) { + switch (state) { + case RES_SRQ_ALLOCATED: + __mlx4_srq_free_icm(dev, srqn); + spin_lock_irq(mlx4_tlock(dev)); + rb_erase(&srq->com.node, + &tracker->res_tree[RES_SRQ]); + list_del(&srq->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + mlx4_release_resource(dev, slave, + RES_SRQ, 1, 0); + kfree(srq); + state = 0; + break; + + case RES_SRQ_HW: + in_param = slave; + err = mlx4_cmd(dev, in_param, srqn, 1, + MLX4_CMD_HW2SW_SRQ, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n", + slave, srqn); + + atomic_dec(&srq->mtt->ref_count); + if (srq->cq) + atomic_dec(&srq->cq->ref_count); + state = RES_SRQ_ALLOCATED; + break; + + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_cqs(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *cq_list = + &tracker->slave_list[slave].res_list[RES_CQ]; + struct res_cq *cq; + struct res_cq *tmp; + int state; + u64 in_param; + LIST_HEAD(tlist); + int cqn; + int err; + + err = move_all_busy(dev, slave, RES_CQ); + if (err) + mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n", + slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(cq, tmp, cq_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) { + cqn = cq->com.res_id; + state = cq->com.from_state; + while (state != 0) { + switch (state) { + case RES_CQ_ALLOCATED: + __mlx4_cq_free_icm(dev, cqn); + spin_lock_irq(mlx4_tlock(dev)); + rb_erase(&cq->com.node, + &tracker->res_tree[RES_CQ]); + list_del(&cq->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + mlx4_release_resource(dev, slave, + RES_CQ, 1, 0); + kfree(cq); + state = 0; + break; + + case RES_CQ_HW: + in_param = slave; + err = mlx4_cmd(dev, in_param, cqn, 1, + MLX4_CMD_HW2SW_CQ, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n", + slave, cqn); + atomic_dec(&cq->mtt->ref_count); + state = RES_CQ_ALLOCATED; + break; + + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_mrs(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *mpt_list = + &tracker->slave_list[slave].res_list[RES_MPT]; + struct res_mpt *mpt; + struct res_mpt *tmp; + int state; + u64 in_param; + LIST_HEAD(tlist); + int mptn; + int err; + + err = move_all_busy(dev, slave, RES_MPT); + if (err) + mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n", + slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (mpt->com.owner == slave) { + mptn = mpt->com.res_id; + state = mpt->com.from_state; + while (state != 0) { + switch (state) { + case RES_MPT_RESERVED: + __mlx4_mpt_release(dev, mpt->key); + spin_lock_irq(mlx4_tlock(dev)); + rb_erase(&mpt->com.node, + &tracker->res_tree[RES_MPT]); + list_del(&mpt->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + mlx4_release_resource(dev, slave, + RES_MPT, 1, 0); + kfree(mpt); + state = 0; + break; + + case RES_MPT_MAPPED: + __mlx4_mpt_free_icm(dev, mpt->key); + state = RES_MPT_RESERVED; + break; + + case RES_MPT_HW: + in_param = slave; + err = mlx4_cmd(dev, in_param, mptn, 0, + MLX4_CMD_HW2SW_MPT, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n", + slave, mptn); + if (mpt->mtt) + atomic_dec(&mpt->mtt->ref_count); + state = RES_MPT_MAPPED; + break; + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_mtts(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = + &priv->mfunc.master.res_tracker; + struct list_head *mtt_list = + &tracker->slave_list[slave].res_list[RES_MTT]; + struct res_mtt *mtt; + struct res_mtt *tmp; + int state; + LIST_HEAD(tlist); + int base; + int err; + + err = move_all_busy(dev, slave, RES_MTT); + if (err) + mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n", + slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (mtt->com.owner == slave) { + base = mtt->com.res_id; + state = mtt->com.from_state; + while (state != 0) { + switch (state) { + case RES_MTT_ALLOCATED: + __mlx4_free_mtt_range(dev, base, + mtt->order); + spin_lock_irq(mlx4_tlock(dev)); + rb_erase(&mtt->com.node, + &tracker->res_tree[RES_MTT]); + list_del(&mtt->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + mlx4_release_resource(dev, slave, RES_MTT, + 1 << mtt->order, 0); + kfree(mtt); + state = 0; + break; + + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = + &priv->mfunc.master.res_tracker; + struct list_head *fs_rule_list = + &tracker->slave_list[slave].res_list[RES_FS_RULE]; + struct res_fs_rule *fs_rule; + struct res_fs_rule *tmp; + int state; + u64 base; + int err; + + err = move_all_busy(dev, slave, RES_FS_RULE); + if (err) + mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n", + slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (fs_rule->com.owner == slave) { + base = fs_rule->com.res_id; + state = fs_rule->com.from_state; + while (state != 0) { + switch (state) { + case RES_FS_RULE_ALLOCATED: + /* detach rule */ + err = mlx4_cmd(dev, base, 0, 0, + MLX4_QP_FLOW_STEERING_DETACH, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + spin_lock_irq(mlx4_tlock(dev)); + rb_erase(&fs_rule->com.node, + &tracker->res_tree[RES_FS_RULE]); + list_del(&fs_rule->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + kfree(fs_rule); + state = 0; + break; + + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_eqs(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *eq_list = + &tracker->slave_list[slave].res_list[RES_EQ]; + struct res_eq *eq; + struct res_eq *tmp; + int err; + int state; + LIST_HEAD(tlist); + int eqn; + + err = move_all_busy(dev, slave, RES_EQ); + if (err) + mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n", + slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(eq, tmp, eq_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (eq->com.owner == slave) { + eqn = eq->com.res_id; + state = eq->com.from_state; + while (state != 0) { + switch (state) { + case RES_EQ_RESERVED: + spin_lock_irq(mlx4_tlock(dev)); + rb_erase(&eq->com.node, + &tracker->res_tree[RES_EQ]); + list_del(&eq->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + kfree(eq); + state = 0; + break; + + case RES_EQ_HW: + err = mlx4_cmd(dev, slave, eqn & 0x3ff, + 1, MLX4_CMD_HW2SW_EQ, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n", + slave, eqn & 0x3ff); + atomic_dec(&eq->mtt->ref_count); + state = RES_EQ_RESERVED; + break; + + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_counters(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *counter_list = + &tracker->slave_list[slave].res_list[RES_COUNTER]; + struct res_counter *counter; + struct res_counter *tmp; + int err; + int index; + + err = move_all_busy(dev, slave, RES_COUNTER); + if (err) + mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n", + slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(counter, tmp, counter_list, com.list) { + if (counter->com.owner == slave) { + index = counter->com.res_id; + rb_erase(&counter->com.node, + &tracker->res_tree[RES_COUNTER]); + list_del(&counter->com.list); + kfree(counter); + __mlx4_counter_free(dev, index); + mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); + } + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *xrcdn_list = + &tracker->slave_list[slave].res_list[RES_XRCD]; + struct res_xrcdn *xrcd; + struct res_xrcdn *tmp; + int err; + int xrcdn; + + err = move_all_busy(dev, slave, RES_XRCD); + if (err) + mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n", + slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) { + if (xrcd->com.owner == slave) { + xrcdn = xrcd->com.res_id; + rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]); + list_del(&xrcd->com.list); + kfree(xrcd); + __mlx4_xrcd_free(dev, xrcdn); + } + } + spin_unlock_irq(mlx4_tlock(dev)); +} + +void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + mlx4_reset_roce_gids(dev, slave); + mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); + rem_slave_vlans(dev, slave); + rem_slave_macs(dev, slave); + rem_slave_fs_rule(dev, slave); + rem_slave_qps(dev, slave); + rem_slave_srqs(dev, slave); + rem_slave_cqs(dev, slave); + rem_slave_mrs(dev, slave); + rem_slave_eqs(dev, slave); + rem_slave_mtts(dev, slave); + rem_slave_counters(dev, slave); + rem_slave_xrcdns(dev, slave); + mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); +} + +void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) +{ + struct mlx4_vf_immed_vlan_work *work = + container_of(_work, struct mlx4_vf_immed_vlan_work, work); + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_update_qp_context *upd_context; + struct mlx4_dev *dev = &work->priv->dev; + struct mlx4_resource_tracker *tracker = + &work->priv->mfunc.master.res_tracker; + struct list_head *qp_list = + &tracker->slave_list[work->slave].res_list[RES_QP]; + struct res_qp *qp; + struct res_qp *tmp; + u64 qp_path_mask_vlan_ctrl = + ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED)); + + u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) | + (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) | + (1ULL << MLX4_UPD_QP_PATH_MASK_CV) | + (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) | + (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) | + (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) | + (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE)); + + int err; + int port, errors = 0; + u8 vlan_control; + + if (mlx4_is_slave(dev)) { + mlx4_warn(dev, "Trying to update-qp in slave %d\n", + work->slave); + goto out; + } + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + goto out; + if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */ + vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; + else if (!work->vlan_id) + vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; + else + vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; + + upd_context = mailbox->buf; + upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(qp, tmp, qp_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (qp->com.owner == work->slave) { + if (qp->com.from_state != RES_QP_HW || + !qp->sched_queue || /* no INIT2RTR trans yet */ + mlx4_is_qp_reserved(dev, qp->local_qpn) || + qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) { + spin_lock_irq(mlx4_tlock(dev)); + continue; + } + port = (qp->sched_queue >> 6 & 1) + 1; + if (port != work->port) { + spin_lock_irq(mlx4_tlock(dev)); + continue; + } + if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff)) + upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask); + else + upd_context->primary_addr_path_mask = + cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl); + if (work->vlan_id == MLX4_VGT) { + upd_context->qp_context.param3 = qp->param3; + upd_context->qp_context.pri_path.vlan_control = qp->vlan_control; + upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx; + upd_context->qp_context.pri_path.vlan_index = qp->vlan_index; + upd_context->qp_context.pri_path.fl = qp->pri_path_fl; + upd_context->qp_context.pri_path.feup = qp->feup; + upd_context->qp_context.pri_path.sched_queue = + qp->sched_queue; + } else { + upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN); + upd_context->qp_context.pri_path.vlan_control = vlan_control; + upd_context->qp_context.pri_path.vlan_index = work->vlan_ix; + upd_context->qp_context.pri_path.fvl_rx = + qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN; + upd_context->qp_context.pri_path.fl = + qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN; + upd_context->qp_context.pri_path.feup = + qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; + upd_context->qp_context.pri_path.sched_queue = + qp->sched_queue & 0xC7; + upd_context->qp_context.pri_path.sched_queue |= + ((work->qos & 0x7) << 3); + upd_context->qp_mask |= + cpu_to_be64(1ULL << + MLX4_UPD_QP_MASK_QOS_VPP); + upd_context->qp_context.qos_vport = + work->qos_vport; + } + + err = mlx4_cmd(dev, mailbox->dma, + qp->local_qpn & 0xffffff, + 0, MLX4_CMD_UPDATE_QP, + MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); + if (err) { + mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n", + work->slave, port, qp->local_qpn, err); + errors++; + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); + mlx4_free_cmd_mailbox(dev, mailbox); + + if (errors) + mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n", + errors, work->slave, work->port); + + /* unregister previous vlan_id if needed and we had no errors + * while updating the QPs + */ + if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors && + NO_INDX != work->orig_vlan_ix) + __mlx4_unregister_vlan(&work->priv->dev, work->port, + work->orig_vlan_id); +out: + kfree(work); + return; +} diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c new file mode 100644 index 000000000..094773d88 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/sense.c @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include + +#include + +#include "mlx4.h" + +int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, + enum mlx4_port_type *type) +{ + u64 out_param; + int err = 0; + + err = mlx4_cmd_imm(dev, 0, &out_param, port, 0, + MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + if (err) { + mlx4_err(dev, "Sense command failed for port: %d\n", port); + return err; + } + + if (out_param > 2) { + mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param); + return -EINVAL; + } + + *type = out_param; + return 0; +} + +void mlx4_do_sense_ports(struct mlx4_dev *dev, + enum mlx4_port_type *stype, + enum mlx4_port_type *defaults) +{ + struct mlx4_sense *sense = &mlx4_priv(dev)->sense; + int err; + int i; + + for (i = 1; i <= dev->caps.num_ports; i++) { + stype[i - 1] = 0; + if (sense->do_sense_port[i] && sense->sense_allowed[i] && + dev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { + err = mlx4_SENSE_PORT(dev, i, &stype[i - 1]); + if (err) + stype[i - 1] = defaults[i - 1]; + } else + stype[i - 1] = defaults[i - 1]; + } + + /* + * If sensed nothing, remain in current configuration. + */ + for (i = 0; i < dev->caps.num_ports; i++) + stype[i] = stype[i] ? stype[i] : defaults[i]; + +} + +static void mlx4_sense_port(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct mlx4_sense *sense = container_of(delay, struct mlx4_sense, + sense_poll); + struct mlx4_dev *dev = sense->dev; + struct mlx4_priv *priv = mlx4_priv(dev); + enum mlx4_port_type stype[MLX4_MAX_PORTS]; + + mutex_lock(&priv->port_mutex); + mlx4_do_sense_ports(dev, stype, &dev->caps.port_type[1]); + + if (mlx4_check_port_params(dev, stype)) + goto sense_again; + + if (mlx4_change_port_types(dev, stype)) + mlx4_err(dev, "Failed to change port_types\n"); + +sense_again: + mutex_unlock(&priv->port_mutex); + queue_delayed_work(mlx4_wq , &sense->sense_poll, + round_jiffies_relative(MLX4_SENSE_RANGE)); +} + +void mlx4_start_sense(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_sense *sense = &priv->sense; + + if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) + return; + + queue_delayed_work(mlx4_wq , &sense->sense_poll, + round_jiffies_relative(MLX4_SENSE_RANGE)); +} + +void mlx4_stop_sense(struct mlx4_dev *dev) +{ + cancel_delayed_work_sync(&mlx4_priv(dev)->sense.sense_poll); +} + +void mlx4_sense_init(struct mlx4_dev *dev) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_sense *sense = &priv->sense; + int port; + + sense->dev = dev; + for (port = 1; port <= dev->caps.num_ports; port++) + sense->do_sense_port[port] = 1; + + INIT_DEFERRABLE_WORK(&sense->sense_poll, mlx4_sense_port); +} diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c new file mode 100644 index 000000000..67146624e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx4/srq.c @@ -0,0 +1,313 @@ +/* + * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#include +#include +#include +#include + +#include "mlx4.h" +#include "icm.h" + +void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + struct mlx4_srq *srq; + + spin_lock(&srq_table->lock); + + srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1)); + if (srq) + atomic_inc(&srq->refcount); + + spin_unlock(&srq_table->lock); + + if (!srq) { + mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn); + return; + } + + srq->event(srq, event_type); + + if (atomic_dec_and_test(&srq->refcount)) + complete(&srq->free); +} + +static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int srq_num) +{ + return mlx4_cmd(dev, mailbox->dma, srq_num, 0, + MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_WRAPPED); +} + +static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int srq_num) +{ + return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num, + mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); +} + +static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark) +{ + return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); +} + +static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, + int srq_num) +{ + return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); +} + +int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + int err; + + + *srqn = mlx4_bitmap_alloc(&srq_table->bitmap); + if (*srqn == -1) + return -ENOMEM; + + err = mlx4_table_get(dev, &srq_table->table, *srqn, GFP_KERNEL); + if (err) + goto err_out; + + err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn, GFP_KERNEL); + if (err) + goto err_put; + return 0; + +err_put: + mlx4_table_put(dev, &srq_table->table, *srqn); + +err_out: + mlx4_bitmap_free(&srq_table->bitmap, *srqn, MLX4_NO_RR); + return err; +} + +static int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn) +{ + u64 out_param; + int err; + + if (mlx4_is_mfunc(dev)) { + err = mlx4_cmd_imm(dev, 0, &out_param, RES_SRQ, + RES_OP_RESERVE_AND_MAP, + MLX4_CMD_ALLOC_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); + if (!err) + *srqn = get_param_l(&out_param); + + return err; + } + return __mlx4_srq_alloc_icm(dev, srqn); +} + +void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + + mlx4_table_put(dev, &srq_table->cmpt_table, srqn); + mlx4_table_put(dev, &srq_table->table, srqn); + mlx4_bitmap_free(&srq_table->bitmap, srqn, MLX4_NO_RR); +} + +static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) +{ + u64 in_param = 0; + + if (mlx4_is_mfunc(dev)) { + set_param_l(&in_param, srqn); + if (mlx4_cmd(dev, in_param, RES_SRQ, RES_OP_RESERVE_AND_MAP, + MLX4_CMD_FREE_RES, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED)) + mlx4_warn(dev, "Failed freeing cq:%d\n", srqn); + return; + } + __mlx4_srq_free_icm(dev, srqn); +} + +int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd, + struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_srq_context *srq_context; + u64 mtt_addr; + int err; + + err = mlx4_srq_alloc_icm(dev, &srq->srqn); + if (err) + return err; + + spin_lock_irq(&srq_table->lock); + err = radix_tree_insert(&srq_table->tree, srq->srqn, srq); + spin_unlock_irq(&srq_table->lock); + if (err) + goto err_icm; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto err_radix; + } + + srq_context = mailbox->buf; + srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) | + srq->srqn); + srq_context->logstride = srq->wqe_shift - 4; + srq_context->xrcd = cpu_to_be16(xrcd); + srq_context->pg_offset_cqn = cpu_to_be32(cqn & 0xffffff); + srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; + + mtt_addr = mlx4_mtt_addr(dev, mtt); + srq_context->mtt_base_addr_h = mtt_addr >> 32; + srq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); + srq_context->pd = cpu_to_be32(pdn); + srq_context->db_rec_addr = cpu_to_be64(db_rec); + + err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn); + mlx4_free_cmd_mailbox(dev, mailbox); + if (err) + goto err_radix; + + atomic_set(&srq->refcount, 1); + init_completion(&srq->free); + + return 0; + +err_radix: + spin_lock_irq(&srq_table->lock); + radix_tree_delete(&srq_table->tree, srq->srqn); + spin_unlock_irq(&srq_table->lock); + +err_icm: + mlx4_srq_free_icm(dev, srq->srqn); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_srq_alloc); + +void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + int err; + + err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn); + if (err) + mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn); + + spin_lock_irq(&srq_table->lock); + radix_tree_delete(&srq_table->tree, srq->srqn); + spin_unlock_irq(&srq_table->lock); + + if (atomic_dec_and_test(&srq->refcount)) + complete(&srq->free); + wait_for_completion(&srq->free); + + mlx4_srq_free_icm(dev, srq->srqn); +} +EXPORT_SYMBOL_GPL(mlx4_srq_free); + +int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark) +{ + return mlx4_ARM_SRQ(dev, srq->srqn, limit_watermark); +} +EXPORT_SYMBOL_GPL(mlx4_srq_arm); + +int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_srq_context *srq_context; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + srq_context = mailbox->buf; + + err = mlx4_QUERY_SRQ(dev, mailbox, srq->srqn); + if (err) + goto err_out; + *limit_watermark = be16_to_cpu(srq_context->limit_watermark); + +err_out: + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_srq_query); + +int mlx4_init_srq_table(struct mlx4_dev *dev) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + int err; + + spin_lock_init(&srq_table->lock); + INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); + if (mlx4_is_slave(dev)) + return 0; + + err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, + dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0); + if (err) + return err; + + return 0; +} + +void mlx4_cleanup_srq_table(struct mlx4_dev *dev) +{ + if (mlx4_is_slave(dev)) + return; + mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap); +} + +struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + struct mlx4_srq *srq; + unsigned long flags; + + spin_lock_irqsave(&srq_table->lock, flags); + srq = radix_tree_lookup(&srq_table->tree, + srqn & (dev->caps.num_srqs - 1)); + spin_unlock_irqrestore(&srq_table->lock, flags); + + return srq; +} +EXPORT_SYMBOL_GPL(mlx4_srq_lookup); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig new file mode 100644 index 000000000..8ff57e8e3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -0,0 +1,8 @@ +# +# Mellanox driver configuration +# + +config MLX5_CORE + tristate + depends on PCI + default n diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile new file mode 100644 index 000000000..105780bb9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_MLX5_CORE) += mlx5_core.o + +mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ + health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ + mad.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c new file mode 100644 index 000000000..ac0f7bf4b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mlx5_core.h" + +/* Handling for queue buffers -- we allocate a bunch of memory and + * register it in a memory region at HCA virtual address 0. If the + * requested size is > max_direct, we split the allocation into + * multiple pages, so we don't require too much contiguous memory. + */ + +int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, + struct mlx5_buf *buf) +{ + dma_addr_t t; + + buf->size = size; + if (size <= max_direct) { + buf->nbufs = 1; + buf->npages = 1; + buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; + buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev, + size, &t, GFP_KERNEL); + if (!buf->direct.buf) + return -ENOMEM; + + buf->direct.map = t; + + while (t & ((1 << buf->page_shift) - 1)) { + --buf->page_shift; + buf->npages *= 2; + } + } else { + int i; + + buf->direct.buf = NULL; + buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; + buf->npages = buf->nbufs; + buf->page_shift = PAGE_SHIFT; + buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), + GFP_KERNEL); + if (!buf->page_list) + return -ENOMEM; + + for (i = 0; i < buf->nbufs; i++) { + buf->page_list[i].buf = + dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE, + &t, GFP_KERNEL); + if (!buf->page_list[i].buf) + goto err_free; + + buf->page_list[i].map = t; + } + + if (BITS_PER_LONG == 64) { + struct page **pages; + pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL); + if (!pages) + goto err_free; + for (i = 0; i < buf->nbufs; i++) + pages[i] = virt_to_page(buf->page_list[i].buf); + buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); + kfree(pages); + if (!buf->direct.buf) + goto err_free; + } + } + + return 0; + +err_free: + mlx5_buf_free(dev, buf); + + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(mlx5_buf_alloc); + +void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) +{ + int i; + + if (buf->nbufs == 1) + dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf, + buf->direct.map); + else { + if (BITS_PER_LONG == 64) + vunmap(buf->direct.buf); + + for (i = 0; i < buf->nbufs; i++) + if (buf->page_list[i].buf) + dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, + buf->page_list[i].buf, + buf->page_list[i].map); + kfree(buf->page_list); + } +} +EXPORT_SYMBOL_GPL(mlx5_buf_free); + +static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device) +{ + struct mlx5_db_pgdir *pgdir; + + pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); + if (!pgdir) + return NULL; + + bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE); + pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, + &pgdir->db_dma, GFP_KERNEL); + if (!pgdir->db_page) { + kfree(pgdir); + return NULL; + } + + return pgdir; +} + +static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir, + struct mlx5_db *db) +{ + int offset; + int i; + + i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE); + if (i >= MLX5_DB_PER_PAGE) + return -ENOMEM; + + __clear_bit(i, pgdir->bitmap); + + db->u.pgdir = pgdir; + db->index = i; + offset = db->index * L1_CACHE_BYTES; + db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page); + db->dma = pgdir->db_dma + offset; + + db->db[0] = 0; + db->db[1] = 0; + + return 0; +} + +int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) +{ + struct mlx5_db_pgdir *pgdir; + int ret = 0; + + mutex_lock(&dev->priv.pgdir_mutex); + + list_for_each_entry(pgdir, &dev->priv.pgdir_list, list) + if (!mlx5_alloc_db_from_pgdir(pgdir, db)) + goto out; + + pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev)); + if (!pgdir) { + ret = -ENOMEM; + goto out; + } + + list_add(&pgdir->list, &dev->priv.pgdir_list); + + /* This should never fail -- we just allocated an empty page: */ + WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db)); + +out: + mutex_unlock(&dev->priv.pgdir_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(mlx5_db_alloc); + +void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) +{ + mutex_lock(&dev->priv.pgdir_mutex); + + __set_bit(db->index, db->u.pgdir->bitmap); + + if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) { + dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, + db->u.pgdir->db_page, db->u.pgdir->db_dma); + list_del(&db->u.pgdir->list); + kfree(db->u.pgdir); + } + + mutex_unlock(&dev->priv.pgdir_mutex); +} +EXPORT_SYMBOL_GPL(mlx5_db_free); + + +void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas) +{ + u64 addr; + int i; + + for (i = 0; i < buf->npages; i++) { + if (buf->nbufs == 1) + addr = buf->direct.map + (i << buf->page_shift); + else + addr = buf->page_list[i].map; + + pas[i] = cpu_to_be64(addr); + } +} +EXPORT_SYMBOL_GPL(mlx5_fill_page_array); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c new file mode 100644 index 000000000..e3273faf4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -0,0 +1,1587 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mlx5_core.h" + +enum { + CMD_IF_REV = 5, +}; + +enum { + CMD_MODE_POLLING, + CMD_MODE_EVENTS +}; + +enum { + NUM_LONG_LISTS = 2, + NUM_MED_LISTS = 64, + LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + + MLX5_CMD_DATA_BLOCK_SIZE, + MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, +}; + +enum { + MLX5_CMD_DELIVERY_STAT_OK = 0x0, + MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, + MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, + MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, + MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, + MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, + MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, + MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, + MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, + MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, + MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, +}; + +enum { + MLX5_CMD_STAT_OK = 0x0, + MLX5_CMD_STAT_INT_ERR = 0x1, + MLX5_CMD_STAT_BAD_OP_ERR = 0x2, + MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3, + MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, + MLX5_CMD_STAT_BAD_RES_ERR = 0x5, + MLX5_CMD_STAT_RES_BUSY = 0x6, + MLX5_CMD_STAT_LIM_ERR = 0x8, + MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, + MLX5_CMD_STAT_IX_ERR = 0xa, + MLX5_CMD_STAT_NO_RES_ERR = 0xf, + MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50, + MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, + MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10, + MLX5_CMD_STAT_BAD_PKT_ERR = 0x30, + MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, +}; + +static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, + struct mlx5_cmd_msg *in, + struct mlx5_cmd_msg *out, + void *uout, int uout_size, + mlx5_cmd_cbk_t cbk, + void *context, int page_queue) +{ + gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; + struct mlx5_cmd_work_ent *ent; + + ent = kzalloc(sizeof(*ent), alloc_flags); + if (!ent) + return ERR_PTR(-ENOMEM); + + ent->in = in; + ent->out = out; + ent->uout = uout; + ent->uout_size = uout_size; + ent->callback = cbk; + ent->context = context; + ent->cmd = cmd; + ent->page_queue = page_queue; + + return ent; +} + +static u8 alloc_token(struct mlx5_cmd *cmd) +{ + u8 token; + + spin_lock(&cmd->token_lock); + cmd->token++; + if (cmd->token == 0) + cmd->token++; + token = cmd->token; + spin_unlock(&cmd->token_lock); + + return token; +} + +static int alloc_ent(struct mlx5_cmd *cmd) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&cmd->alloc_lock, flags); + ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); + if (ret < cmd->max_reg_cmds) + clear_bit(ret, &cmd->bitmask); + spin_unlock_irqrestore(&cmd->alloc_lock, flags); + + return ret < cmd->max_reg_cmds ? ret : -ENOMEM; +} + +static void free_ent(struct mlx5_cmd *cmd, int idx) +{ + unsigned long flags; + + spin_lock_irqsave(&cmd->alloc_lock, flags); + set_bit(idx, &cmd->bitmask); + spin_unlock_irqrestore(&cmd->alloc_lock, flags); +} + +static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) +{ + return cmd->cmd_buf + (idx << cmd->log_stride); +} + +static u8 xor8_buf(void *buf, int len) +{ + u8 *ptr = buf; + u8 sum = 0; + int i; + + for (i = 0; i < len; i++) + sum ^= ptr[i]; + + return sum; +} + +static int verify_block_sig(struct mlx5_cmd_prot_block *block) +{ + if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) + return -EINVAL; + + if (xor8_buf(block, sizeof(*block)) != 0xff) + return -EINVAL; + + return 0; +} + +static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, + int csum) +{ + block->token = token; + if (csum) { + block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - + sizeof(block->data) - 2); + block->sig = ~xor8_buf(block, sizeof(*block) - 1); + } +} + +static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) +{ + struct mlx5_cmd_mailbox *next = msg->next; + + while (next) { + calc_block_sig(next->buf, token, csum); + next = next->next; + } +} + +static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) +{ + ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); + calc_chain_sig(ent->in, ent->token, csum); + calc_chain_sig(ent->out, ent->token, csum); +} + +static void poll_timeout(struct mlx5_cmd_work_ent *ent) +{ + unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000); + u8 own; + + do { + own = ent->lay->status_own; + if (!(own & CMD_OWNER_HW)) { + ent->ret = 0; + return; + } + usleep_range(5000, 10000); + } while (time_before(jiffies, poll_end)); + + ent->ret = -ETIMEDOUT; +} + +static void free_cmd(struct mlx5_cmd_work_ent *ent) +{ + kfree(ent); +} + + +static int verify_signature(struct mlx5_cmd_work_ent *ent) +{ + struct mlx5_cmd_mailbox *next = ent->out->next; + int err; + u8 sig; + + sig = xor8_buf(ent->lay, sizeof(*ent->lay)); + if (sig != 0xff) + return -EINVAL; + + while (next) { + err = verify_block_sig(next->buf); + if (err) + return err; + + next = next->next; + } + + return 0; +} + +static void dump_buf(void *buf, int size, int data_only, int offset) +{ + __be32 *p = buf; + int i; + + for (i = 0; i < size; i += 16) { + pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), + be32_to_cpu(p[1]), be32_to_cpu(p[2]), + be32_to_cpu(p[3])); + p += 4; + offset += 16; + } + if (!data_only) + pr_debug("\n"); +} + +const char *mlx5_command_str(int command) +{ + switch (command) { + case MLX5_CMD_OP_QUERY_HCA_CAP: + return "QUERY_HCA_CAP"; + + case MLX5_CMD_OP_SET_HCA_CAP: + return "SET_HCA_CAP"; + + case MLX5_CMD_OP_QUERY_ADAPTER: + return "QUERY_ADAPTER"; + + case MLX5_CMD_OP_INIT_HCA: + return "INIT_HCA"; + + case MLX5_CMD_OP_TEARDOWN_HCA: + return "TEARDOWN_HCA"; + + case MLX5_CMD_OP_ENABLE_HCA: + return "MLX5_CMD_OP_ENABLE_HCA"; + + case MLX5_CMD_OP_DISABLE_HCA: + return "MLX5_CMD_OP_DISABLE_HCA"; + + case MLX5_CMD_OP_QUERY_PAGES: + return "QUERY_PAGES"; + + case MLX5_CMD_OP_MANAGE_PAGES: + return "MANAGE_PAGES"; + + case MLX5_CMD_OP_CREATE_MKEY: + return "CREATE_MKEY"; + + case MLX5_CMD_OP_QUERY_MKEY: + return "QUERY_MKEY"; + + case MLX5_CMD_OP_DESTROY_MKEY: + return "DESTROY_MKEY"; + + case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: + return "QUERY_SPECIAL_CONTEXTS"; + + case MLX5_CMD_OP_CREATE_EQ: + return "CREATE_EQ"; + + case MLX5_CMD_OP_DESTROY_EQ: + return "DESTROY_EQ"; + + case MLX5_CMD_OP_QUERY_EQ: + return "QUERY_EQ"; + + case MLX5_CMD_OP_CREATE_CQ: + return "CREATE_CQ"; + + case MLX5_CMD_OP_DESTROY_CQ: + return "DESTROY_CQ"; + + case MLX5_CMD_OP_QUERY_CQ: + return "QUERY_CQ"; + + case MLX5_CMD_OP_MODIFY_CQ: + return "MODIFY_CQ"; + + case MLX5_CMD_OP_CREATE_QP: + return "CREATE_QP"; + + case MLX5_CMD_OP_DESTROY_QP: + return "DESTROY_QP"; + + case MLX5_CMD_OP_RST2INIT_QP: + return "RST2INIT_QP"; + + case MLX5_CMD_OP_INIT2RTR_QP: + return "INIT2RTR_QP"; + + case MLX5_CMD_OP_RTR2RTS_QP: + return "RTR2RTS_QP"; + + case MLX5_CMD_OP_RTS2RTS_QP: + return "RTS2RTS_QP"; + + case MLX5_CMD_OP_SQERR2RTS_QP: + return "SQERR2RTS_QP"; + + case MLX5_CMD_OP_2ERR_QP: + return "2ERR_QP"; + + case MLX5_CMD_OP_2RST_QP: + return "2RST_QP"; + + case MLX5_CMD_OP_QUERY_QP: + return "QUERY_QP"; + + case MLX5_CMD_OP_MAD_IFC: + return "MAD_IFC"; + + case MLX5_CMD_OP_INIT2INIT_QP: + return "INIT2INIT_QP"; + + case MLX5_CMD_OP_CREATE_PSV: + return "CREATE_PSV"; + + case MLX5_CMD_OP_DESTROY_PSV: + return "DESTROY_PSV"; + + case MLX5_CMD_OP_CREATE_SRQ: + return "CREATE_SRQ"; + + case MLX5_CMD_OP_DESTROY_SRQ: + return "DESTROY_SRQ"; + + case MLX5_CMD_OP_QUERY_SRQ: + return "QUERY_SRQ"; + + case MLX5_CMD_OP_ARM_RQ: + return "ARM_RQ"; + + case MLX5_CMD_OP_RESIZE_SRQ: + return "RESIZE_SRQ"; + + case MLX5_CMD_OP_ALLOC_PD: + return "ALLOC_PD"; + + case MLX5_CMD_OP_DEALLOC_PD: + return "DEALLOC_PD"; + + case MLX5_CMD_OP_ALLOC_UAR: + return "ALLOC_UAR"; + + case MLX5_CMD_OP_DEALLOC_UAR: + return "DEALLOC_UAR"; + + case MLX5_CMD_OP_ATTACH_TO_MCG: + return "ATTACH_TO_MCG"; + + case MLX5_CMD_OP_DETACH_FROM_MCG: + return "DETACH_FROM_MCG"; + + case MLX5_CMD_OP_ALLOC_XRCD: + return "ALLOC_XRCD"; + + case MLX5_CMD_OP_DEALLOC_XRCD: + return "DEALLOC_XRCD"; + + case MLX5_CMD_OP_ACCESS_REG: + return "MLX5_CMD_OP_ACCESS_REG"; + + default: return "unknown command opcode"; + } +} + +static void dump_command(struct mlx5_core_dev *dev, + struct mlx5_cmd_work_ent *ent, int input) +{ + u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode); + struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; + struct mlx5_cmd_mailbox *next = msg->next; + int data_only; + u32 offset = 0; + int dump_len; + + data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); + + if (data_only) + mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, + "dump command data %s(0x%x) %s\n", + mlx5_command_str(op), op, + input ? "INPUT" : "OUTPUT"); + else + mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n", + mlx5_command_str(op), op, + input ? "INPUT" : "OUTPUT"); + + if (data_only) { + if (input) { + dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); + offset += sizeof(ent->lay->in); + } else { + dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); + offset += sizeof(ent->lay->out); + } + } else { + dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); + offset += sizeof(*ent->lay); + } + + while (next && offset < msg->len) { + if (data_only) { + dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset); + dump_buf(next->buf, dump_len, 1, offset); + offset += MLX5_CMD_DATA_BLOCK_SIZE; + } else { + mlx5_core_dbg(dev, "command block:\n"); + dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset); + offset += sizeof(struct mlx5_cmd_prot_block); + } + next = next->next; + } + + if (data_only) + pr_debug("\n"); +} + +static void cmd_work_handler(struct work_struct *work) +{ + struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); + struct mlx5_cmd *cmd = ent->cmd; + struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); + struct mlx5_cmd_layout *lay; + struct semaphore *sem; + + sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; + down(sem); + if (!ent->page_queue) { + ent->idx = alloc_ent(cmd); + if (ent->idx < 0) { + mlx5_core_err(dev, "failed to allocate command entry\n"); + up(sem); + return; + } + } else { + ent->idx = cmd->max_reg_cmds; + } + + ent->token = alloc_token(cmd); + cmd->ent_arr[ent->idx] = ent; + lay = get_inst(cmd, ent->idx); + ent->lay = lay; + memset(lay, 0, sizeof(*lay)); + memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); + ent->op = be32_to_cpu(lay->in[0]) >> 16; + if (ent->in->next) + lay->in_ptr = cpu_to_be64(ent->in->next->dma); + lay->inlen = cpu_to_be32(ent->in->len); + if (ent->out->next) + lay->out_ptr = cpu_to_be64(ent->out->next->dma); + lay->outlen = cpu_to_be32(ent->out->len); + lay->type = MLX5_PCI_CMD_XPORT; + lay->token = ent->token; + lay->status_own = CMD_OWNER_HW; + set_signature(ent, !cmd->checksum_disabled); + dump_command(dev, ent, 1); + ent->ts1 = ktime_get_ns(); + + /* ring doorbell after the descriptor is valid */ + mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); + wmb(); + iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); + mmiowb(); + /* if not in polling don't use ent after this point */ + if (cmd->mode == CMD_MODE_POLLING) { + poll_timeout(ent); + /* make sure we read the descriptor after ownership is SW */ + rmb(); + mlx5_cmd_comp_handler(dev, 1UL << ent->idx); + } +} + +static const char *deliv_status_to_str(u8 status) +{ + switch (status) { + case MLX5_CMD_DELIVERY_STAT_OK: + return "no errors"; + case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: + return "signature error"; + case MLX5_CMD_DELIVERY_STAT_TOK_ERR: + return "token error"; + case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: + return "bad block number"; + case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: + return "output pointer not aligned to block size"; + case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: + return "input pointer not aligned to block size"; + case MLX5_CMD_DELIVERY_STAT_FW_ERR: + return "firmware internal error"; + case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: + return "command input length error"; + case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: + return "command ouput length error"; + case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: + return "reserved fields not cleared"; + case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: + return "bad command descriptor type"; + default: + return "unknown status code"; + } +} + +static u16 msg_to_opcode(struct mlx5_cmd_msg *in) +{ + struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); + + return be16_to_cpu(hdr->opcode); +} + +static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) +{ + unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); + struct mlx5_cmd *cmd = &dev->cmd; + int err; + + if (cmd->mode == CMD_MODE_POLLING) { + wait_for_completion(&ent->done); + err = ent->ret; + } else { + if (!wait_for_completion_timeout(&ent->done, timeout)) + err = -ETIMEDOUT; + else + err = 0; + } + if (err == -ETIMEDOUT) { + mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", + mlx5_command_str(msg_to_opcode(ent->in)), + msg_to_opcode(ent->in)); + } + mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", + err, deliv_status_to_str(ent->status), ent->status); + + return err; +} + +/* Notes: + * 1. Callback functions may not sleep + * 2. page queue commands do not support asynchrous completion + */ +static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, + struct mlx5_cmd_msg *out, void *uout, int uout_size, + mlx5_cmd_cbk_t callback, + void *context, int page_queue, u8 *status) +{ + struct mlx5_cmd *cmd = &dev->cmd; + struct mlx5_cmd_work_ent *ent; + struct mlx5_cmd_stats *stats; + int err = 0; + s64 ds; + u16 op; + + if (callback && page_queue) + return -EINVAL; + + ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context, + page_queue); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + if (!callback) + init_completion(&ent->done); + + INIT_WORK(&ent->work, cmd_work_handler); + if (page_queue) { + cmd_work_handler(&ent->work); + } else if (!queue_work(cmd->wq, &ent->work)) { + mlx5_core_warn(dev, "failed to queue work\n"); + err = -ENOMEM; + goto out_free; + } + + if (!callback) { + err = wait_func(dev, ent); + if (err == -ETIMEDOUT) + goto out; + + ds = ent->ts2 - ent->ts1; + op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); + if (op < ARRAY_SIZE(cmd->stats)) { + stats = &cmd->stats[op]; + spin_lock_irq(&stats->lock); + stats->sum += ds; + ++stats->n; + spin_unlock_irq(&stats->lock); + } + mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, + "fw exec time for %s is %lld nsec\n", + mlx5_command_str(op), ds); + *status = ent->status; + free_cmd(ent); + } + + return err; + +out_free: + free_cmd(ent); +out: + return err; +} + +static ssize_t dbg_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + char lbuf[3]; + int err; + + if (!dbg->in_msg || !dbg->out_msg) + return -ENOMEM; + + if (copy_from_user(lbuf, buf, sizeof(lbuf))) + return -EFAULT; + + lbuf[sizeof(lbuf) - 1] = 0; + + if (strcmp(lbuf, "go")) + return -EINVAL; + + err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen); + + return err ? err : count; +} + + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = dbg_write, +}; + +static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size) +{ + struct mlx5_cmd_prot_block *block; + struct mlx5_cmd_mailbox *next; + int copy; + + if (!to || !from) + return -ENOMEM; + + copy = min_t(int, size, sizeof(to->first.data)); + memcpy(to->first.data, from, copy); + size -= copy; + from += copy; + + next = to->next; + while (size) { + if (!next) { + /* this is a BUG */ + return -ENOMEM; + } + + copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); + block = next->buf; + memcpy(block->data, from, copy); + from += copy; + size -= copy; + next = next->next; + } + + return 0; +} + +static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) +{ + struct mlx5_cmd_prot_block *block; + struct mlx5_cmd_mailbox *next; + int copy; + + if (!to || !from) + return -ENOMEM; + + copy = min_t(int, size, sizeof(from->first.data)); + memcpy(to, from->first.data, copy); + size -= copy; + to += copy; + + next = from->next; + while (size) { + if (!next) { + /* this is a BUG */ + return -ENOMEM; + } + + copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); + block = next->buf; + + memcpy(to, block->data, copy); + to += copy; + size -= copy; + next = next->next; + } + + return 0; +} + +static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev, + gfp_t flags) +{ + struct mlx5_cmd_mailbox *mailbox; + + mailbox = kmalloc(sizeof(*mailbox), flags); + if (!mailbox) + return ERR_PTR(-ENOMEM); + + mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags, + &mailbox->dma); + if (!mailbox->buf) { + mlx5_core_dbg(dev, "failed allocation\n"); + kfree(mailbox); + return ERR_PTR(-ENOMEM); + } + memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block)); + mailbox->next = NULL; + + return mailbox; +} + +static void free_cmd_box(struct mlx5_core_dev *dev, + struct mlx5_cmd_mailbox *mailbox) +{ + pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); + kfree(mailbox); +} + +static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, + gfp_t flags, int size) +{ + struct mlx5_cmd_mailbox *tmp, *head = NULL; + struct mlx5_cmd_prot_block *block; + struct mlx5_cmd_msg *msg; + int blen; + int err; + int n; + int i; + + msg = kzalloc(sizeof(*msg), flags); + if (!msg) + return ERR_PTR(-ENOMEM); + + blen = size - min_t(int, sizeof(msg->first.data), size); + n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE; + + for (i = 0; i < n; i++) { + tmp = alloc_cmd_box(dev, flags); + if (IS_ERR(tmp)) { + mlx5_core_warn(dev, "failed allocating block\n"); + err = PTR_ERR(tmp); + goto err_alloc; + } + + block = tmp->buf; + tmp->next = head; + block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); + block->block_num = cpu_to_be32(n - i - 1); + head = tmp; + } + msg->next = head; + msg->len = size; + return msg; + +err_alloc: + while (head) { + tmp = head->next; + free_cmd_box(dev, head); + head = tmp; + } + kfree(msg); + + return ERR_PTR(err); +} + +static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, + struct mlx5_cmd_msg *msg) +{ + struct mlx5_cmd_mailbox *head = msg->next; + struct mlx5_cmd_mailbox *next; + + while (head) { + next = head->next; + free_cmd_box(dev, head); + head = next; + } + kfree(msg); +} + +static ssize_t data_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + void *ptr; + int err; + + if (*pos != 0) + return -EINVAL; + + kfree(dbg->in_msg); + dbg->in_msg = NULL; + dbg->inlen = 0; + + ptr = kzalloc(count, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + if (copy_from_user(ptr, buf, count)) { + err = -EFAULT; + goto out; + } + dbg->in_msg = ptr; + dbg->inlen = count; + + *pos = count; + + return count; + +out: + kfree(ptr); + return err; +} + +static ssize_t data_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + int copy; + + if (*pos) + return 0; + + if (!dbg->out_msg) + return -ENOMEM; + + copy = min_t(int, count, dbg->outlen); + if (copy_to_user(buf, dbg->out_msg, copy)) + return -EFAULT; + + *pos += copy; + + return copy; +} + +static const struct file_operations dfops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = data_write, + .read = data_read, +}; + +static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + char outlen[8]; + int err; + + if (*pos) + return 0; + + err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen); + if (err < 0) + return err; + + if (copy_to_user(buf, &outlen, err)) + return -EFAULT; + + *pos += err; + + return err; +} + +static ssize_t outlen_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mlx5_core_dev *dev = filp->private_data; + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + char outlen_str[8]; + int outlen; + void *ptr; + int err; + + if (*pos != 0 || count > 6) + return -EINVAL; + + kfree(dbg->out_msg); + dbg->out_msg = NULL; + dbg->outlen = 0; + + if (copy_from_user(outlen_str, buf, count)) + return -EFAULT; + + outlen_str[7] = 0; + + err = sscanf(outlen_str, "%d", &outlen); + if (err < 0) + return err; + + ptr = kzalloc(outlen, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + dbg->out_msg = ptr; + dbg->outlen = outlen; + + *pos = count; + + return count; +} + +static const struct file_operations olfops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = outlen_write, + .read = outlen_read, +}; + +static void set_wqname(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + + snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", + dev_name(&dev->pdev->dev)); +} + +static void clean_debug_files(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + + if (!mlx5_debugfs_root) + return; + + mlx5_cmdif_debugfs_cleanup(dev); + debugfs_remove_recursive(dbg->dbg_root); +} + +static int create_debugfs_files(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; + int err = -ENOMEM; + + if (!mlx5_debugfs_root) + return 0; + + dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root); + if (!dbg->dbg_root) + return err; + + dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root, + dev, &dfops); + if (!dbg->dbg_in) + goto err_dbg; + + dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root, + dev, &dfops); + if (!dbg->dbg_out) + goto err_dbg; + + dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root, + dev, &olfops); + if (!dbg->dbg_outlen) + goto err_dbg; + + dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root, + &dbg->status); + if (!dbg->dbg_status) + goto err_dbg; + + dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops); + if (!dbg->dbg_run) + goto err_dbg; + + mlx5_cmdif_debugfs_init(dev); + + return 0; + +err_dbg: + clean_debug_files(dev); + return err; +} + +void mlx5_cmd_use_events(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + down(&cmd->sem); + + down(&cmd->pages_sem); + + flush_workqueue(cmd->wq); + + cmd->mode = CMD_MODE_EVENTS; + + up(&cmd->pages_sem); + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + +void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + down(&cmd->sem); + + down(&cmd->pages_sem); + + flush_workqueue(cmd->wq); + cmd->mode = CMD_MODE_POLLING; + + up(&cmd->pages_sem); + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + +static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) +{ + unsigned long flags; + + if (msg->cache) { + spin_lock_irqsave(&msg->cache->lock, flags); + list_add_tail(&msg->list, &msg->cache->head); + spin_unlock_irqrestore(&msg->cache->lock, flags); + } else { + mlx5_free_cmd_msg(dev, msg); + } +} + +void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector) +{ + struct mlx5_cmd *cmd = &dev->cmd; + struct mlx5_cmd_work_ent *ent; + mlx5_cmd_cbk_t callback; + void *context; + int err; + int i; + s64 ds; + struct mlx5_cmd_stats *stats; + unsigned long flags; + + for (i = 0; i < (1 << cmd->log_sz); i++) { + if (test_bit(i, &vector)) { + struct semaphore *sem; + + ent = cmd->ent_arr[i]; + if (ent->page_queue) + sem = &cmd->pages_sem; + else + sem = &cmd->sem; + ent->ts2 = ktime_get_ns(); + memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); + dump_command(dev, ent, 0); + if (!ent->ret) { + if (!cmd->checksum_disabled) + ent->ret = verify_signature(ent); + else + ent->ret = 0; + ent->status = ent->lay->status_own >> 1; + mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", + ent->ret, deliv_status_to_str(ent->status), ent->status); + } + free_ent(cmd, ent->idx); + if (ent->callback) { + ds = ent->ts2 - ent->ts1; + if (ent->op < ARRAY_SIZE(cmd->stats)) { + stats = &cmd->stats[ent->op]; + spin_lock_irqsave(&stats->lock, flags); + stats->sum += ds; + ++stats->n; + spin_unlock_irqrestore(&stats->lock, flags); + } + + callback = ent->callback; + context = ent->context; + err = ent->ret; + if (!err) + err = mlx5_copy_from_msg(ent->uout, + ent->out, + ent->uout_size); + + mlx5_free_cmd_msg(dev, ent->out); + free_msg(dev, ent->in); + + free_cmd(ent); + callback(err, context); + } else { + complete(&ent->done); + } + up(sem); + } + } +} +EXPORT_SYMBOL(mlx5_cmd_comp_handler); + +static int status_to_err(u8 status) +{ + return status ? -1 : 0; /* TBD more meaningful codes */ +} + +static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, + gfp_t gfp) +{ + struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); + struct mlx5_cmd *cmd = &dev->cmd; + struct cache_ent *ent = NULL; + + if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) + ent = &cmd->cache.large; + else if (in_size > 16 && in_size <= MED_LIST_SIZE) + ent = &cmd->cache.med; + + if (ent) { + spin_lock_irq(&ent->lock); + if (!list_empty(&ent->head)) { + msg = list_entry(ent->head.next, typeof(*msg), list); + /* For cached lists, we must explicitly state what is + * the real size + */ + msg->len = in_size; + list_del(&msg->list); + } + spin_unlock_irq(&ent->lock); + } + + if (IS_ERR(msg)) + msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); + + return msg; +} + +static int is_manage_pages(struct mlx5_inbox_hdr *in) +{ + return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; +} + +static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, + int out_size, mlx5_cmd_cbk_t callback, void *context) +{ + struct mlx5_cmd_msg *inb; + struct mlx5_cmd_msg *outb; + int pages_queue; + gfp_t gfp; + int err; + u8 status = 0; + + pages_queue = is_manage_pages(in); + gfp = callback ? GFP_ATOMIC : GFP_KERNEL; + + inb = alloc_msg(dev, in_size, gfp); + if (IS_ERR(inb)) { + err = PTR_ERR(inb); + return err; + } + + err = mlx5_copy_to_msg(inb, in, in_size); + if (err) { + mlx5_core_warn(dev, "err %d\n", err); + goto out_in; + } + + outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); + if (IS_ERR(outb)) { + err = PTR_ERR(outb); + goto out_in; + } + + err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, + pages_queue, &status); + if (err) + goto out_out; + + mlx5_core_dbg(dev, "err %d, status %d\n", err, status); + if (status) { + err = status_to_err(status); + goto out_out; + } + + if (!callback) + err = mlx5_copy_from_msg(out, outb, out_size); + +out_out: + if (!callback) + mlx5_free_cmd_msg(dev, outb); + +out_in: + if (!callback) + free_msg(dev, inb); + return err; +} + +int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, + int out_size) +{ + return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL); +} +EXPORT_SYMBOL(mlx5_cmd_exec); + +int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, + void *out, int out_size, mlx5_cmd_cbk_t callback, + void *context) +{ + return cmd_exec(dev, in, in_size, out, out_size, callback, context); +} +EXPORT_SYMBOL(mlx5_cmd_exec_cb); + +static void destroy_msg_cache(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + struct mlx5_cmd_msg *msg; + struct mlx5_cmd_msg *n; + + list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { + list_del(&msg->list); + mlx5_free_cmd_msg(dev, msg); + } + + list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { + list_del(&msg->list); + mlx5_free_cmd_msg(dev, msg); + } +} + +static int create_msg_cache(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + struct mlx5_cmd_msg *msg; + int err; + int i; + + spin_lock_init(&cmd->cache.large.lock); + INIT_LIST_HEAD(&cmd->cache.large.head); + spin_lock_init(&cmd->cache.med.lock); + INIT_LIST_HEAD(&cmd->cache.med.head); + + for (i = 0; i < NUM_LONG_LISTS; i++) { + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); + if (IS_ERR(msg)) { + err = PTR_ERR(msg); + goto ex_err; + } + msg->cache = &cmd->cache.large; + list_add_tail(&msg->list, &cmd->cache.large.head); + } + + for (i = 0; i < NUM_MED_LISTS; i++) { + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); + if (IS_ERR(msg)) { + err = PTR_ERR(msg); + goto ex_err; + } + msg->cache = &cmd->cache.med; + list_add_tail(&msg->list, &cmd->cache.med.head); + } + + return 0; + +ex_err: + destroy_msg_cache(dev); + return err; +} + +static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) +{ + struct device *ddev = &dev->pdev->dev; + + cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, + &cmd->alloc_dma, GFP_KERNEL); + if (!cmd->cmd_alloc_buf) + return -ENOMEM; + + /* make sure it is aligned to 4K */ + if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) { + cmd->cmd_buf = cmd->cmd_alloc_buf; + cmd->dma = cmd->alloc_dma; + cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE; + return 0; + } + + dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, + cmd->alloc_dma); + cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, + 2 * MLX5_ADAPTER_PAGE_SIZE - 1, + &cmd->alloc_dma, GFP_KERNEL); + if (!cmd->cmd_alloc_buf) + return -ENOMEM; + + cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE); + cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE); + cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1; + return 0; +} + +static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) +{ + struct device *ddev = &dev->pdev->dev; + + dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf, + cmd->alloc_dma); +} + +int mlx5_cmd_init(struct mlx5_core_dev *dev) +{ + int size = sizeof(struct mlx5_cmd_prot_block); + int align = roundup_pow_of_two(size); + struct mlx5_cmd *cmd = &dev->cmd; + u32 cmd_h, cmd_l; + u16 cmd_if_rev; + int err; + int i; + + cmd_if_rev = cmdif_rev(dev); + if (cmd_if_rev != CMD_IF_REV) { + dev_err(&dev->pdev->dev, + "Driver cmdif rev(%d) differs from firmware's(%d)\n", + CMD_IF_REV, cmd_if_rev); + return -EINVAL; + } + + cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0); + if (!cmd->pool) + return -ENOMEM; + + err = alloc_cmd_page(dev, cmd); + if (err) + goto err_free_pool; + + cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; + cmd->log_sz = cmd_l >> 4 & 0xf; + cmd->log_stride = cmd_l & 0xf; + if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { + dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n", + 1 << cmd->log_sz); + err = -EINVAL; + goto err_free_page; + } + + if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { + dev_err(&dev->pdev->dev, "command queue size overflow\n"); + err = -EINVAL; + goto err_free_page; + } + + cmd->checksum_disabled = 1; + cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; + cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; + + cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; + if (cmd->cmdif_rev > CMD_IF_REV) { + dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n", + CMD_IF_REV, cmd->cmdif_rev); + err = -ENOTSUPP; + goto err_free_page; + } + + spin_lock_init(&cmd->alloc_lock); + spin_lock_init(&cmd->token_lock); + for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) + spin_lock_init(&cmd->stats[i].lock); + + sema_init(&cmd->sem, cmd->max_reg_cmds); + sema_init(&cmd->pages_sem, 1); + + cmd_h = (u32)((u64)(cmd->dma) >> 32); + cmd_l = (u32)(cmd->dma); + if (cmd_l & 0xfff) { + dev_err(&dev->pdev->dev, "invalid command queue address\n"); + err = -ENOMEM; + goto err_free_page; + } + + iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); + iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); + + /* Make sure firmware sees the complete address before we proceed */ + wmb(); + + mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); + + cmd->mode = CMD_MODE_POLLING; + + err = create_msg_cache(dev); + if (err) { + dev_err(&dev->pdev->dev, "failed to create command cache\n"); + goto err_free_page; + } + + set_wqname(dev); + cmd->wq = create_singlethread_workqueue(cmd->wq_name); + if (!cmd->wq) { + dev_err(&dev->pdev->dev, "failed to create command workqueue\n"); + err = -ENOMEM; + goto err_cache; + } + + err = create_debugfs_files(dev); + if (err) { + err = -ENOMEM; + goto err_wq; + } + + return 0; + +err_wq: + destroy_workqueue(cmd->wq); + +err_cache: + destroy_msg_cache(dev); + +err_free_page: + free_cmd_page(dev, cmd); + +err_free_pool: + pci_pool_destroy(cmd->pool); + + return err; +} +EXPORT_SYMBOL(mlx5_cmd_init); + +void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + + clean_debug_files(dev); + destroy_workqueue(cmd->wq); + destroy_msg_cache(dev); + free_cmd_page(dev, cmd); + pci_pool_destroy(cmd->pool); +} +EXPORT_SYMBOL(mlx5_cmd_cleanup); + +static const char *cmd_status_str(u8 status) +{ + switch (status) { + case MLX5_CMD_STAT_OK: + return "OK"; + case MLX5_CMD_STAT_INT_ERR: + return "internal error"; + case MLX5_CMD_STAT_BAD_OP_ERR: + return "bad operation"; + case MLX5_CMD_STAT_BAD_PARAM_ERR: + return "bad parameter"; + case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: + return "bad system state"; + case MLX5_CMD_STAT_BAD_RES_ERR: + return "bad resource"; + case MLX5_CMD_STAT_RES_BUSY: + return "resource busy"; + case MLX5_CMD_STAT_LIM_ERR: + return "limits exceeded"; + case MLX5_CMD_STAT_BAD_RES_STATE_ERR: + return "bad resource state"; + case MLX5_CMD_STAT_IX_ERR: + return "bad index"; + case MLX5_CMD_STAT_NO_RES_ERR: + return "no resources"; + case MLX5_CMD_STAT_BAD_INP_LEN_ERR: + return "bad input length"; + case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: + return "bad output length"; + case MLX5_CMD_STAT_BAD_QP_STATE_ERR: + return "bad QP state"; + case MLX5_CMD_STAT_BAD_PKT_ERR: + return "bad packet (discarded)"; + case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: + return "bad size too many outstanding CQEs"; + default: + return "unknown status"; + } +} + +static int cmd_status_to_err(u8 status) +{ + switch (status) { + case MLX5_CMD_STAT_OK: return 0; + case MLX5_CMD_STAT_INT_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; + case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; + case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; + case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; + case MLX5_CMD_STAT_IX_ERR: return -EINVAL; + case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; + case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; + case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; + case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; + default: return -EIO; + } +} + +/* this will be available till all the commands use set/get macros */ +int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) +{ + if (!hdr->status) + return 0; + + pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n", + cmd_status_str(hdr->status), hdr->status, + be32_to_cpu(hdr->syndrome)); + + return cmd_status_to_err(hdr->status); +} + +int mlx5_cmd_status_to_err_v2(void *ptr) +{ + u32 syndrome; + u8 status; + + status = be32_to_cpu(*(__be32 *)ptr) >> 24; + if (!status) + return 0; + + syndrome = be32_to_cpu(*(__be32 *)(ptr + 4)); + + pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n", + cmd_status_str(status), status, syndrome); + + return cmd_status_to_err(status); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c new file mode 100644 index 000000000..eb0cf81f5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "mlx5_core.h" + +void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn) +{ + struct mlx5_core_cq *cq; + struct mlx5_cq_table *table = &dev->priv.cq_table; + + spin_lock(&table->lock); + cq = radix_tree_lookup(&table->tree, cqn); + if (likely(cq)) + atomic_inc(&cq->refcount); + spin_unlock(&table->lock); + + if (!cq) { + mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn); + return; + } + + ++cq->arm_sn; + + cq->comp(cq); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + +void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) +{ + struct mlx5_cq_table *table = &dev->priv.cq_table; + struct mlx5_core_cq *cq; + + spin_lock(&table->lock); + + cq = radix_tree_lookup(&table->tree, cqn); + if (cq) + atomic_inc(&cq->refcount); + + spin_unlock(&table->lock); + + if (!cq) { + mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn); + return; + } + + cq->event(cq, event_type); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + + +int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + struct mlx5_create_cq_mbox_in *in, int inlen) +{ + int err; + struct mlx5_cq_table *table = &dev->priv.cq_table; + struct mlx5_create_cq_mbox_out out; + struct mlx5_destroy_cq_mbox_in din; + struct mlx5_destroy_cq_mbox_out dout; + + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ); + memset(&out, 0, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + cq->cqn = be32_to_cpu(out.cqn) & 0xffffff; + cq->cons_index = 0; + cq->arm_sn = 0; + atomic_set(&cq->refcount, 1); + init_completion(&cq->free); + + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, cq->cqn, cq); + spin_unlock_irq(&table->lock); + if (err) + goto err_cmd; + + cq->pid = current->pid; + err = mlx5_debug_cq_add(dev, cq); + if (err) + mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n", + cq->cqn); + + return 0; + +err_cmd: + memset(&din, 0, sizeof(din)); + memset(&dout, 0, sizeof(dout)); + din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); + mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); + return err; +} +EXPORT_SYMBOL(mlx5_core_create_cq); + +int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) +{ + struct mlx5_cq_table *table = &dev->priv.cq_table; + struct mlx5_destroy_cq_mbox_in in; + struct mlx5_destroy_cq_mbox_out out; + struct mlx5_core_cq *tmp; + int err; + + spin_lock_irq(&table->lock); + tmp = radix_tree_delete(&table->tree, cq->cqn); + spin_unlock_irq(&table->lock); + if (!tmp) { + mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn); + return -EINVAL; + } + if (tmp != cq) { + mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn); + return -EINVAL; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); + in.cqn = cpu_to_be32(cq->cqn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + synchronize_irq(cq->irqn); + + mlx5_debug_cq_remove(dev, cq); + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); + wait_for_completion(&cq->free); + + return 0; +} +EXPORT_SYMBOL(mlx5_core_destroy_cq); + +int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + struct mlx5_query_cq_mbox_out *out) +{ + struct mlx5_query_cq_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, sizeof(*out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ); + in.cqn = cpu_to_be32(cq->cqn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + return err; + + if (out->hdr.status) + return mlx5_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_query_cq); + + +int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + struct mlx5_modify_cq_mbox_in *in, int in_sz) +{ + struct mlx5_modify_cq_mbox_out out; + int err; + + memset(&out, 0, sizeof(out)); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ); + err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + return 0; +} +EXPORT_SYMBOL(mlx5_core_modify_cq); + +int mlx5_init_cq_table(struct mlx5_core_dev *dev) +{ + struct mlx5_cq_table *table = &dev->priv.cq_table; + int err; + + spin_lock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); + err = mlx5_cq_debugfs_init(dev); + + return err; +} + +void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev) +{ + mlx5_cq_debugfs_cleanup(dev); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c new file mode 100644 index 000000000..5210d92e6 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -0,0 +1,610 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include "mlx5_core.h" + +enum { + QP_PID, + QP_STATE, + QP_XPORT, + QP_MTU, + QP_N_RECV, + QP_RECV_SZ, + QP_N_SEND, + QP_LOG_PG_SZ, + QP_RQPN, +}; + +static char *qp_fields[] = { + [QP_PID] = "pid", + [QP_STATE] = "state", + [QP_XPORT] = "transport", + [QP_MTU] = "mtu", + [QP_N_RECV] = "num_recv", + [QP_RECV_SZ] = "rcv_wqe_sz", + [QP_N_SEND] = "num_send", + [QP_LOG_PG_SZ] = "log2_page_sz", + [QP_RQPN] = "remote_qpn", +}; + +enum { + EQ_NUM_EQES, + EQ_INTR, + EQ_LOG_PG_SZ, +}; + +static char *eq_fields[] = { + [EQ_NUM_EQES] = "num_eqes", + [EQ_INTR] = "intr", + [EQ_LOG_PG_SZ] = "log_page_size", +}; + +enum { + CQ_PID, + CQ_NUM_CQES, + CQ_LOG_PG_SZ, +}; + +static char *cq_fields[] = { + [CQ_PID] = "pid", + [CQ_NUM_CQES] = "num_cqes", + [CQ_LOG_PG_SZ] = "log_page_size", +}; + +struct dentry *mlx5_debugfs_root; +EXPORT_SYMBOL(mlx5_debugfs_root); + +void mlx5_register_debugfs(void) +{ + mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL); + if (IS_ERR_OR_NULL(mlx5_debugfs_root)) + mlx5_debugfs_root = NULL; +} + +void mlx5_unregister_debugfs(void) +{ + debugfs_remove(mlx5_debugfs_root); +} + +int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return 0; + + atomic_set(&dev->num_qps, 0); + + dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root); + if (!dev->priv.qp_debugfs) + return -ENOMEM; + + return 0; +} + +void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->priv.qp_debugfs); +} + +int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return 0; + + dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root); + if (!dev->priv.eq_debugfs) + return -ENOMEM; + + return 0; +} + +void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->priv.eq_debugfs); +} + +static ssize_t average_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_cmd_stats *stats; + u64 field = 0; + int ret; + char tbuf[22]; + + if (*pos) + return 0; + + stats = filp->private_data; + spin_lock_irq(&stats->lock); + if (stats->n) + field = div64_u64(stats->sum, stats->n); + spin_unlock_irq(&stats->lock); + ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field); + if (ret > 0) { + if (copy_to_user(buf, tbuf, ret)) + return -EFAULT; + } + + *pos += ret; + return ret; +} + + +static ssize_t average_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct mlx5_cmd_stats *stats; + + stats = filp->private_data; + spin_lock_irq(&stats->lock); + stats->sum = 0; + stats->n = 0; + spin_unlock_irq(&stats->lock); + + *pos += count; + + return count; +} + +static const struct file_operations stats_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = average_read, + .write = average_write, +}; + +int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_stats *stats; + struct dentry **cmd; + const char *namep; + int err; + int i; + + if (!mlx5_debugfs_root) + return 0; + + cmd = &dev->priv.cmdif_debugfs; + *cmd = debugfs_create_dir("commands", dev->priv.dbg_root); + if (!*cmd) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) { + stats = &dev->cmd.stats[i]; + namep = mlx5_command_str(i); + if (strcmp(namep, "unknown command opcode")) { + stats->root = debugfs_create_dir(namep, *cmd); + if (!stats->root) { + mlx5_core_warn(dev, "failed adding command %d\n", + i); + err = -ENOMEM; + goto out; + } + + stats->avg = debugfs_create_file("average", 0400, + stats->root, stats, + &stats_fops); + if (!stats->avg) { + mlx5_core_warn(dev, "failed creating debugfs file\n"); + err = -ENOMEM; + goto out; + } + + stats->count = debugfs_create_u64("n", 0400, + stats->root, + &stats->n); + if (!stats->count) { + mlx5_core_warn(dev, "failed creating debugfs file\n"); + err = -ENOMEM; + goto out; + } + } + } + + return 0; +out: + debugfs_remove_recursive(dev->priv.cmdif_debugfs); + return err; +} + +void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->priv.cmdif_debugfs); +} + +int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return 0; + + dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root); + if (!dev->priv.cq_debugfs) + return -ENOMEM; + + return 0; +} + +void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->priv.cq_debugfs); +} + +static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, + int index, int *is_str) +{ + struct mlx5_query_qp_mbox_out *out; + struct mlx5_qp_context *ctx; + u64 param = 0; + int err; + int no_sq; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + err = mlx5_core_qp_query(dev, qp, out, sizeof(*out)); + if (err) { + mlx5_core_warn(dev, "failed to query qp\n"); + goto out; + } + + *is_str = 0; + ctx = &out->ctx; + switch (index) { + case QP_PID: + param = qp->pid; + break; + case QP_STATE: + param = (unsigned long)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28); + *is_str = 1; + break; + case QP_XPORT: + param = (unsigned long)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff); + *is_str = 1; + break; + case QP_MTU: + switch (ctx->mtu_msgmax >> 5) { + case IB_MTU_256: + param = 256; + break; + case IB_MTU_512: + param = 512; + break; + case IB_MTU_1024: + param = 1024; + break; + case IB_MTU_2048: + param = 2048; + break; + case IB_MTU_4096: + param = 4096; + break; + default: + param = 0; + } + break; + case QP_N_RECV: + param = 1 << ((ctx->rq_size_stride >> 3) & 0xf); + break; + case QP_RECV_SZ: + param = 1 << ((ctx->rq_size_stride & 7) + 4); + break; + case QP_N_SEND: + no_sq = be16_to_cpu(ctx->sq_crq_size) >> 15; + if (!no_sq) + param = 1 << (be16_to_cpu(ctx->sq_crq_size) >> 11); + else + param = 0; + break; + case QP_LOG_PG_SZ: + param = (be32_to_cpu(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f; + param += 12; + break; + case QP_RQPN: + param = be32_to_cpu(ctx->log_pg_sz_remote_qpn) & 0xffffff; + break; + } + +out: + kfree(out); + return param; +} + +static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + int index) +{ + struct mlx5_query_eq_mbox_out *out; + struct mlx5_eq_context *ctx; + u64 param = 0; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + ctx = &out->ctx; + + err = mlx5_core_eq_query(dev, eq, out, sizeof(*out)); + if (err) { + mlx5_core_warn(dev, "failed to query eq\n"); + goto out; + } + + switch (index) { + case EQ_NUM_EQES: + param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f); + break; + case EQ_INTR: + param = ctx->intr; + break; + case EQ_LOG_PG_SZ: + param = (ctx->log_page_size & 0x1f) + 12; + break; + } + +out: + kfree(out); + return param; +} + +static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + int index) +{ + struct mlx5_query_cq_mbox_out *out; + struct mlx5_cq_context *ctx; + u64 param = 0; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + ctx = &out->ctx; + + err = mlx5_core_query_cq(dev, cq, out); + if (err) { + mlx5_core_warn(dev, "failed to query cq\n"); + goto out; + } + + switch (index) { + case CQ_PID: + param = cq->pid; + break; + case CQ_NUM_CQES: + param = 1 << ((be32_to_cpu(ctx->log_sz_usr_page) >> 24) & 0x1f); + break; + case CQ_LOG_PG_SZ: + param = (ctx->log_pg_sz & 0x1f) + 12; + break; + } + +out: + kfree(out); + return param; +} + +static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct mlx5_field_desc *desc; + struct mlx5_rsc_debug *d; + char tbuf[18]; + int is_str = 0; + u64 field; + int ret; + + if (*pos) + return 0; + + desc = filp->private_data; + d = (void *)(desc - desc->i) - sizeof(*d); + switch (d->type) { + case MLX5_DBG_RSC_QP: + field = qp_read_field(d->dev, d->object, desc->i, &is_str); + break; + + case MLX5_DBG_RSC_EQ: + field = eq_read_field(d->dev, d->object, desc->i); + break; + + case MLX5_DBG_RSC_CQ: + field = cq_read_field(d->dev, d->object, desc->i); + break; + + default: + mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type); + return -EINVAL; + } + + + if (is_str) + ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field); + else + ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field); + + if (ret > 0) { + if (copy_to_user(buf, tbuf, ret)) + return -EFAULT; + } + + *pos += ret; + return ret; +} + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = dbg_read, +}; + +static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type, + struct dentry *root, struct mlx5_rsc_debug **dbg, + int rsn, char **field, int nfile, void *data) +{ + struct mlx5_rsc_debug *d; + char resn[32]; + int err; + int i; + + d = kzalloc(sizeof(*d) + nfile * sizeof(d->fields[0]), GFP_KERNEL); + if (!d) + return -ENOMEM; + + d->dev = dev; + d->object = data; + d->type = type; + sprintf(resn, "0x%x", rsn); + d->root = debugfs_create_dir(resn, root); + if (!d->root) { + err = -ENOMEM; + goto out_free; + } + + for (i = 0; i < nfile; i++) { + d->fields[i].i = i; + d->fields[i].dent = debugfs_create_file(field[i], 0400, + d->root, &d->fields[i], + &fops); + if (!d->fields[i].dent) { + err = -ENOMEM; + goto out_rem; + } + } + *dbg = d; + + return 0; +out_rem: + debugfs_remove_recursive(d->root); + +out_free: + kfree(d); + return err; +} + +static void rem_res_tree(struct mlx5_rsc_debug *d) +{ + debugfs_remove_recursive(d->root); + kfree(d); +} + +int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) +{ + int err; + + if (!mlx5_debugfs_root) + return 0; + + err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs, + &qp->dbg, qp->qpn, qp_fields, + ARRAY_SIZE(qp_fields), qp); + if (err) + qp->dbg = NULL; + + return err; +} + +void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) +{ + if (!mlx5_debugfs_root) + return; + + if (qp->dbg) + rem_res_tree(qp->dbg); +} + + +int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +{ + int err; + + if (!mlx5_debugfs_root) + return 0; + + err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs, + &eq->dbg, eq->eqn, eq_fields, + ARRAY_SIZE(eq_fields), eq); + if (err) + eq->dbg = NULL; + + return err; +} + +void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +{ + if (!mlx5_debugfs_root) + return; + + if (eq->dbg) + rem_res_tree(eq->dbg); +} + +int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) +{ + int err; + + if (!mlx5_debugfs_root) + return 0; + + err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs, + &cq->dbg, cq->cqn, cq_fields, + ARRAY_SIZE(cq_fields), cq); + if (err) + cq->dbg = NULL; + + return err; +} + +void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) +{ + if (!mlx5_debugfs_root) + return; + + if (cq->dbg) + rem_res_tree(cq->dbg); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c new file mode 100644 index 000000000..58800e4f3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -0,0 +1,539 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include "mlx5_core.h" + +enum { + MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), + MLX5_EQE_OWNER_INIT_VAL = 0x1, +}; + +enum { + MLX5_EQ_STATE_ARMED = 0x9, + MLX5_EQ_STATE_FIRED = 0xa, + MLX5_EQ_STATE_ALWAYS_ARMED = 0xb, +}; + +enum { + MLX5_NUM_SPARE_EQE = 0x80, + MLX5_NUM_ASYNC_EQE = 0x100, + MLX5_NUM_CMD_EQE = 32, +}; + +enum { + MLX5_EQ_DOORBEL_OFFSET = 0x40, +}; + +#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \ + (1ull << MLX5_EVENT_TYPE_COMM_EST) | \ + (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \ + (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \ + (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \ + (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \ + (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ + (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \ + (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \ + (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \ + (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \ + (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)) + +struct map_eq_in { + u64 mask; + u32 reserved; + u32 unmap_eqn; +}; + +struct cre_des_eq { + u8 reserved[15]; + u8 eqn; +}; + +static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) +{ + struct mlx5_destroy_eq_mbox_in in; + struct mlx5_destroy_eq_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_EQ); + in.eqn = eqn; + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (!err) + goto ex; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + +ex: + return err; +} + +static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry) +{ + return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE); +} + +static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq) +{ + struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); + + return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; +} + +static const char *eqe_type_str(u8 type) +{ + switch (type) { + case MLX5_EVENT_TYPE_COMP: + return "MLX5_EVENT_TYPE_COMP"; + case MLX5_EVENT_TYPE_PATH_MIG: + return "MLX5_EVENT_TYPE_PATH_MIG"; + case MLX5_EVENT_TYPE_COMM_EST: + return "MLX5_EVENT_TYPE_COMM_EST"; + case MLX5_EVENT_TYPE_SQ_DRAINED: + return "MLX5_EVENT_TYPE_SQ_DRAINED"; + case MLX5_EVENT_TYPE_SRQ_LAST_WQE: + return "MLX5_EVENT_TYPE_SRQ_LAST_WQE"; + case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: + return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT"; + case MLX5_EVENT_TYPE_CQ_ERROR: + return "MLX5_EVENT_TYPE_CQ_ERROR"; + case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: + return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR"; + case MLX5_EVENT_TYPE_PATH_MIG_FAILED: + return "MLX5_EVENT_TYPE_PATH_MIG_FAILED"; + case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR"; + case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: + return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR"; + case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: + return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR"; + case MLX5_EVENT_TYPE_INTERNAL_ERROR: + return "MLX5_EVENT_TYPE_INTERNAL_ERROR"; + case MLX5_EVENT_TYPE_PORT_CHANGE: + return "MLX5_EVENT_TYPE_PORT_CHANGE"; + case MLX5_EVENT_TYPE_GPIO_EVENT: + return "MLX5_EVENT_TYPE_GPIO_EVENT"; + case MLX5_EVENT_TYPE_REMOTE_CONFIG: + return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; + case MLX5_EVENT_TYPE_DB_BF_CONGESTION: + return "MLX5_EVENT_TYPE_DB_BF_CONGESTION"; + case MLX5_EVENT_TYPE_STALL_EVENT: + return "MLX5_EVENT_TYPE_STALL_EVENT"; + case MLX5_EVENT_TYPE_CMD: + return "MLX5_EVENT_TYPE_CMD"; + case MLX5_EVENT_TYPE_PAGE_REQUEST: + return "MLX5_EVENT_TYPE_PAGE_REQUEST"; + case MLX5_EVENT_TYPE_PAGE_FAULT: + return "MLX5_EVENT_TYPE_PAGE_FAULT"; + default: + return "Unrecognized event"; + } +} + +static enum mlx5_dev_event port_subtype_event(u8 subtype) +{ + switch (subtype) { + case MLX5_PORT_CHANGE_SUBTYPE_DOWN: + return MLX5_DEV_EVENT_PORT_DOWN; + case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: + return MLX5_DEV_EVENT_PORT_UP; + case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: + return MLX5_DEV_EVENT_PORT_INITIALIZED; + case MLX5_PORT_CHANGE_SUBTYPE_LID: + return MLX5_DEV_EVENT_LID_CHANGE; + case MLX5_PORT_CHANGE_SUBTYPE_PKEY: + return MLX5_DEV_EVENT_PKEY_CHANGE; + case MLX5_PORT_CHANGE_SUBTYPE_GUID: + return MLX5_DEV_EVENT_GUID_CHANGE; + case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: + return MLX5_DEV_EVENT_CLIENT_REREG; + } + return -1; +} + +static void eq_update_ci(struct mlx5_eq *eq, int arm) +{ + __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); + u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); + __raw_writel((__force u32) cpu_to_be32(val), addr); + /* We still want ordering, just not swabbing, so add a barrier */ + mb(); +} + +static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +{ + struct mlx5_eqe *eqe; + int eqes_found = 0; + int set_ci = 0; + u32 cqn; + u32 rsn; + u8 port; + + while ((eqe = next_eqe_sw(eq))) { + /* + * Make sure we read EQ entry contents after we've + * checked the ownership bit. + */ + dma_rmb(); + + mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", + eq->eqn, eqe_type_str(eqe->type)); + switch (eqe->type) { + case MLX5_EVENT_TYPE_COMP: + cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; + mlx5_cq_completion(dev, cqn); + break; + + case MLX5_EVENT_TYPE_PATH_MIG: + case MLX5_EVENT_TYPE_COMM_EST: + case MLX5_EVENT_TYPE_SQ_DRAINED: + case MLX5_EVENT_TYPE_SRQ_LAST_WQE: + case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: + case MLX5_EVENT_TYPE_PATH_MIG_FAILED: + case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: + rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; + mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n", + eqe_type_str(eqe->type), eqe->type, rsn); + mlx5_rsc_event(dev, rsn, eqe->type); + break; + + case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: + case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: + rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; + mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n", + eqe_type_str(eqe->type), eqe->type, rsn); + mlx5_srq_event(dev, rsn, eqe->type); + break; + + case MLX5_EVENT_TYPE_CMD: + mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector)); + break; + + case MLX5_EVENT_TYPE_PORT_CHANGE: + port = (eqe->data.port.port >> 4) & 0xf; + switch (eqe->sub_type) { + case MLX5_PORT_CHANGE_SUBTYPE_DOWN: + case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: + case MLX5_PORT_CHANGE_SUBTYPE_LID: + case MLX5_PORT_CHANGE_SUBTYPE_PKEY: + case MLX5_PORT_CHANGE_SUBTYPE_GUID: + case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: + case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: + if (dev->event) + dev->event(dev, port_subtype_event(eqe->sub_type), + (unsigned long)port); + break; + default: + mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n", + port, eqe->sub_type); + } + break; + case MLX5_EVENT_TYPE_CQ_ERROR: + cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; + mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n", + cqn, eqe->data.cq_err.syndrome); + mlx5_cq_event(dev, cqn, eqe->type); + break; + + case MLX5_EVENT_TYPE_PAGE_REQUEST: + { + u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); + s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); + + mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n", + func_id, npages); + mlx5_core_req_pages_handler(dev, func_id, npages); + } + break; + +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + case MLX5_EVENT_TYPE_PAGE_FAULT: + mlx5_eq_pagefault(dev, eqe); + break; +#endif + + default: + mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", + eqe->type, eq->eqn); + break; + } + + ++eq->cons_index; + eqes_found = 1; + ++set_ci; + + /* The HCA will think the queue has overflowed if we + * don't tell it we've been processing events. We + * create our EQs with MLX5_NUM_SPARE_EQE extra + * entries, so we must update our consumer index at + * least that often. + */ + if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { + eq_update_ci(eq, 0); + set_ci = 0; + } + } + + eq_update_ci(eq, 1); + + return eqes_found; +} + +static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr) +{ + struct mlx5_eq *eq = eq_ptr; + struct mlx5_core_dev *dev = eq->dev; + + mlx5_eq_int(dev, eq); + + /* MSI-X vectors always belong to us */ + return IRQ_HANDLED; +} + +static void init_eq_buf(struct mlx5_eq *eq) +{ + struct mlx5_eqe *eqe; + int i; + + for (i = 0; i < eq->nent; i++) { + eqe = get_eqe(eq, i); + eqe->owner = MLX5_EQE_OWNER_INIT_VAL; + } +} + +int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, + int nent, u64 mask, const char *name, struct mlx5_uar *uar) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + struct mlx5_create_eq_mbox_in *in; + struct mlx5_create_eq_mbox_out out; + int err; + int inlen; + + eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); + err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE, + &eq->buf); + if (err) + return err; + + init_eq_buf(eq); + + inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages; + in = mlx5_vzalloc(inlen); + if (!in) { + err = -ENOMEM; + goto err_buf; + } + memset(&out, 0, sizeof(out)); + + mlx5_fill_page_array(&eq->buf, in->pas); + + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ); + in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index); + in->ctx.intr = vecidx; + in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; + in->events_mask = cpu_to_be64(mask); + + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) + goto err_in; + + if (out.hdr.status) { + err = mlx5_cmd_status_to_err(&out.hdr); + goto err_in; + } + + snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s", + name, pci_name(dev->pdev)); + eq->eqn = out.eq_number; + eq->irqn = vecidx; + eq->dev = dev; + eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; + err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, + eq->name, eq); + if (err) + goto err_eq; + + err = mlx5_debug_eq_add(dev, eq); + if (err) + goto err_irq; + + /* EQs are created in ARMED state + */ + eq_update_ci(eq, 1); + + kvfree(in); + return 0; + +err_irq: + free_irq(table->msix_arr[vecidx].vector, eq); + +err_eq: + mlx5_cmd_destroy_eq(dev, eq->eqn); + +err_in: + kvfree(in); + +err_buf: + mlx5_buf_free(dev, &eq->buf); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_create_map_eq); + +int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + int err; + + mlx5_debug_eq_remove(dev, eq); + free_irq(table->msix_arr[eq->irqn].vector, eq); + err = mlx5_cmd_destroy_eq(dev, eq->eqn); + if (err) + mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", + eq->eqn); + synchronize_irq(table->msix_arr[eq->irqn].vector); + mlx5_buf_free(dev, &eq->buf); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq); + +int mlx5_eq_init(struct mlx5_core_dev *dev) +{ + int err; + + spin_lock_init(&dev->priv.eq_table.lock); + + err = mlx5_eq_debugfs_init(dev); + + return err; +} + + +void mlx5_eq_cleanup(struct mlx5_core_dev *dev) +{ + mlx5_eq_debugfs_cleanup(dev); +} + +int mlx5_start_eqs(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + u32 async_event_mask = MLX5_ASYNC_EVENT_MASK; + int err; + + if (dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG) + async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT); + + err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, + MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, + "mlx5_cmd_eq", &dev->priv.uuari.uars[0]); + if (err) { + mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); + return err; + } + + mlx5_cmd_use_events(dev); + + err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, + MLX5_NUM_ASYNC_EQE, async_event_mask, + "mlx5_async_eq", &dev->priv.uuari.uars[0]); + if (err) { + mlx5_core_warn(dev, "failed to create async EQ %d\n", err); + goto err1; + } + + err = mlx5_create_map_eq(dev, &table->pages_eq, + MLX5_EQ_VEC_PAGES, + dev->caps.gen.max_vf + 1, + 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", + &dev->priv.uuari.uars[0]); + if (err) { + mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); + goto err2; + } + + return err; + +err2: + mlx5_destroy_unmap_eq(dev, &table->async_eq); + +err1: + mlx5_cmd_use_polling(dev); + mlx5_destroy_unmap_eq(dev, &table->cmd_eq); + return err; +} + +int mlx5_stop_eqs(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + int err; + + err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); + if (err) + return err; + + mlx5_destroy_unmap_eq(dev, &table->async_eq); + mlx5_cmd_use_polling(dev); + + err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq); + if (err) + mlx5_cmd_use_events(dev); + + return err; +} + +int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + struct mlx5_query_eq_mbox_out *out, int outlen) +{ + struct mlx5_query_eq_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, outlen); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ); + in.eqn = eq->eqn; + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); + if (err) + return err; + + if (out->hdr.status) + err = mlx5_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_eq_query); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c new file mode 100644 index 000000000..4b4cda3bc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include "mlx5_core.h" + +int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_query_adapter_mbox_out *out; + struct mlx5_cmd_query_adapter_mbox_in in; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_ADAPTER); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + goto out_out; + + if (out->hdr.status) { + err = mlx5_cmd_status_to_err(&out->hdr); + goto out_out; + } + + memcpy(dev->board_id, out->vsd_psid, sizeof(out->vsd_psid)); + +out_out: + kfree(out); + + return err; +} + +int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps) +{ + return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR); +} + +int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *caps) +{ + u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; + int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); + void *out; + int err; + + if (!(dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)) + return -ENOTSUPP; + + memset(in, 0, sizeof(in)); + out = kzalloc(out_sz, GFP_KERNEL); + if (!out) + return -ENOMEM; + MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); + MLX5_SET(query_hca_cap_in, in, op_mod, HCA_CAP_OPMOD_GET_ODP_CUR); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); + if (err) + goto out; + + err = mlx5_cmd_status_to_err_v2(out); + if (err) { + mlx5_core_warn(dev, "query cur hca ODP caps failed, %d\n", err); + goto out; + } + + memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct), + sizeof(*caps)); + + mlx5_core_dbg(dev, "on-demand paging capabilities:\nrc: %08x\nuc: %08x\nud: %08x\n", + be32_to_cpu(caps->per_transport_caps.rc_odp_caps), + be32_to_cpu(caps->per_transport_caps.uc_odp_caps), + be32_to_cpu(caps->per_transport_caps.ud_odp_caps)); + +out: + kfree(out); + return err; +} +EXPORT_SYMBOL(mlx5_query_odp_caps); + +int mlx5_cmd_init_hca(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_init_hca_mbox_in in; + struct mlx5_cmd_init_hca_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_INIT_HCA); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} + +int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd_teardown_hca_mbox_in in; + struct mlx5_cmd_teardown_hca_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_TEARDOWN_HCA); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c new file mode 100644 index 000000000..292d76f2a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include "mlx5_core.h" + +enum { + MLX5_HEALTH_POLL_INTERVAL = 2 * HZ, + MAX_MISSES = 3, +}; + +enum { + MLX5_HEALTH_SYNDR_FW_ERR = 0x1, + MLX5_HEALTH_SYNDR_IRISC_ERR = 0x7, + MLX5_HEALTH_SYNDR_CRC_ERR = 0x9, + MLX5_HEALTH_SYNDR_FETCH_PCI_ERR = 0xa, + MLX5_HEALTH_SYNDR_HW_FTL_ERR = 0xb, + MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR = 0xc, + MLX5_HEALTH_SYNDR_EQ_ERR = 0xd, + MLX5_HEALTH_SYNDR_FFSER_ERR = 0xf, +}; + +static DEFINE_SPINLOCK(health_lock); +static LIST_HEAD(health_list); +static struct work_struct health_work; + +static void health_care(struct work_struct *work) +{ + struct mlx5_core_health *health, *n; + struct mlx5_core_dev *dev; + struct mlx5_priv *priv; + LIST_HEAD(tlist); + + spin_lock_irq(&health_lock); + list_splice_init(&health_list, &tlist); + + spin_unlock_irq(&health_lock); + + list_for_each_entry_safe(health, n, &tlist, list) { + priv = container_of(health, struct mlx5_priv, health); + dev = container_of(priv, struct mlx5_core_dev, priv); + mlx5_core_warn(dev, "handling bad device here\n"); + /* nothing yet */ + spin_lock_irq(&health_lock); + list_del_init(&health->list); + spin_unlock_irq(&health_lock); + } +} + +static const char *hsynd_str(u8 synd) +{ + switch (synd) { + case MLX5_HEALTH_SYNDR_FW_ERR: + return "firmware internal error"; + case MLX5_HEALTH_SYNDR_IRISC_ERR: + return "irisc not responding"; + case MLX5_HEALTH_SYNDR_CRC_ERR: + return "firmware CRC error"; + case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR: + return "ICM fetch PCI error"; + case MLX5_HEALTH_SYNDR_HW_FTL_ERR: + return "HW fatal error\n"; + case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR: + return "async EQ buffer overrun"; + case MLX5_HEALTH_SYNDR_EQ_ERR: + return "EQ error"; + case MLX5_HEALTH_SYNDR_FFSER_ERR: + return "FFSER error"; + default: + return "unrecognized error"; + } +} + +static u16 read_be16(__be16 __iomem *p) +{ + return swab16(readl((__force u16 __iomem *) p)); +} + +static u32 read_be32(__be32 __iomem *p) +{ + return swab32(readl((__force u32 __iomem *) p)); +} + +static void print_health_info(struct mlx5_core_dev *dev) +{ + struct mlx5_core_health *health = &dev->priv.health; + struct health_buffer __iomem *h = health->health; + int i; + + for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) + pr_info("assert_var[%d] 0x%08x\n", i, read_be32(h->assert_var + i)); + + pr_info("assert_exit_ptr 0x%08x\n", read_be32(&h->assert_exit_ptr)); + pr_info("assert_callra 0x%08x\n", read_be32(&h->assert_callra)); + pr_info("fw_ver 0x%08x\n", read_be32(&h->fw_ver)); + pr_info("hw_id 0x%08x\n", read_be32(&h->hw_id)); + pr_info("irisc_index %d\n", readb(&h->irisc_index)); + pr_info("synd 0x%x: %s\n", readb(&h->synd), hsynd_str(readb(&h->synd))); + pr_info("ext_sync 0x%04x\n", read_be16(&h->ext_sync)); +} + +static void poll_health(unsigned long data) +{ + struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data; + struct mlx5_core_health *health = &dev->priv.health; + unsigned long next; + u32 count; + + count = ioread32be(health->health_counter); + if (count == health->prev) + ++health->miss_counter; + else + health->miss_counter = 0; + + health->prev = count; + if (health->miss_counter == MAX_MISSES) { + mlx5_core_err(dev, "device's health compromised\n"); + print_health_info(dev); + spin_lock_irq(&health_lock); + list_add_tail(&health->list, &health_list); + spin_unlock_irq(&health_lock); + + queue_work(mlx5_core_wq, &health_work); + } else { + get_random_bytes(&next, sizeof(next)); + next %= HZ; + next += jiffies + MLX5_HEALTH_POLL_INTERVAL; + mod_timer(&health->timer, next); + } +} + +void mlx5_start_health_poll(struct mlx5_core_dev *dev) +{ + struct mlx5_core_health *health = &dev->priv.health; + + INIT_LIST_HEAD(&health->list); + init_timer(&health->timer); + health->health = &dev->iseg->health; + health->health_counter = &dev->iseg->health_counter; + + health->timer.data = (unsigned long)dev; + health->timer.function = poll_health; + health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL); + add_timer(&health->timer); +} + +void mlx5_stop_health_poll(struct mlx5_core_dev *dev) +{ + struct mlx5_core_health *health = &dev->priv.health; + + del_timer_sync(&health->timer); + + spin_lock_irq(&health_lock); + if (!list_empty(&health->list)) + list_del_init(&health->list); + spin_unlock_irq(&health_lock); +} + +void mlx5_health_cleanup(void) +{ +} + +void __init mlx5_health_init(void) +{ + INIT_WORK(&health_work, health_care); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c new file mode 100644 index 000000000..ee1b0b965 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/mad.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include "mlx5_core.h" + +int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, + u16 opmod, u8 port) +{ + struct mlx5_mad_ifc_mbox_in *in = NULL; + struct mlx5_mad_ifc_mbox_out *out = NULL; + int err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) { + err = -ENOMEM; + goto out; + } + + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MAD_IFC); + in->hdr.opmod = cpu_to_be16(opmod); + in->port = port; + + memcpy(in->data, inb, sizeof(in->data)); + + err = mlx5_cmd_exec(dev, in, sizeof(*in), out, sizeof(*out)); + if (err) + goto out; + + if (out->hdr.status) { + err = mlx5_cmd_status_to_err(&out->hdr); + goto out; + } + + memcpy(outb, out->data, sizeof(out->data)); + +out: + kfree(out); + kfree(in); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_mad_ifc); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c new file mode 100644 index 000000000..28425e5ea --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -0,0 +1,1070 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mlx5_core.h" + +#define DRIVER_NAME "mlx5_core" +#define DRIVER_VERSION "3.0" +#define DRIVER_RELDATE "January 2015" + +MODULE_AUTHOR("Eli Cohen "); +MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRIVER_VERSION); + +int mlx5_core_debug_mask; +module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644); +MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); + +#define MLX5_DEFAULT_PROF 2 +static int prof_sel = MLX5_DEFAULT_PROF; +module_param_named(prof_sel, prof_sel, int, 0444); +MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2"); + +struct workqueue_struct *mlx5_core_wq; +static LIST_HEAD(intf_list); +static LIST_HEAD(dev_list); +static DEFINE_MUTEX(intf_mutex); + +struct mlx5_device_context { + struct list_head list; + struct mlx5_interface *intf; + void *context; +}; + +static struct mlx5_profile profile[] = { + [0] = { + .mask = 0, + }, + [1] = { + .mask = MLX5_PROF_MASK_QP_SIZE, + .log_max_qp = 12, + }, + [2] = { + .mask = MLX5_PROF_MASK_QP_SIZE | + MLX5_PROF_MASK_MR_CACHE, + .log_max_qp = 17, + .mr_cache[0] = { + .size = 500, + .limit = 250 + }, + .mr_cache[1] = { + .size = 500, + .limit = 250 + }, + .mr_cache[2] = { + .size = 500, + .limit = 250 + }, + .mr_cache[3] = { + .size = 500, + .limit = 250 + }, + .mr_cache[4] = { + .size = 500, + .limit = 250 + }, + .mr_cache[5] = { + .size = 500, + .limit = 250 + }, + .mr_cache[6] = { + .size = 500, + .limit = 250 + }, + .mr_cache[7] = { + .size = 500, + .limit = 250 + }, + .mr_cache[8] = { + .size = 500, + .limit = 250 + }, + .mr_cache[9] = { + .size = 500, + .limit = 250 + }, + .mr_cache[10] = { + .size = 500, + .limit = 250 + }, + .mr_cache[11] = { + .size = 500, + .limit = 250 + }, + .mr_cache[12] = { + .size = 64, + .limit = 32 + }, + .mr_cache[13] = { + .size = 32, + .limit = 16 + }, + .mr_cache[14] = { + .size = 16, + .limit = 8 + }, + .mr_cache[15] = { + .size = 8, + .limit = 4 + }, + }, +}; + +static int set_dma_caps(struct pci_dev *pdev) +{ + int err; + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); + return err; + } + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, + "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "Can't set consistent PCI DMA mask, aborting\n"); + return err; + } + } + + dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); + return err; +} + +static int request_bar(struct pci_dev *pdev) +{ + int err = 0; + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "Missing registers BAR, aborting\n"); + return -ENODEV; + } + + err = pci_request_regions(pdev, DRIVER_NAME); + if (err) + dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); + + return err; +} + +static void release_bar(struct pci_dev *pdev) +{ + pci_release_regions(pdev); +} + +static int mlx5_enable_msix(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + int num_eqs = 1 << dev->caps.gen.log_max_eq; + int nvec; + int i; + + nvec = dev->caps.gen.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE; + nvec = min_t(int, nvec, num_eqs); + if (nvec <= MLX5_EQ_VEC_COMP_BASE) + return -ENOMEM; + + table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL); + if (!table->msix_arr) + return -ENOMEM; + + for (i = 0; i < nvec; i++) + table->msix_arr[i].entry = i; + + nvec = pci_enable_msix_range(dev->pdev, table->msix_arr, + MLX5_EQ_VEC_COMP_BASE + 1, nvec); + if (nvec < 0) + return nvec; + + table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; + + return 0; +} + +static void mlx5_disable_msix(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + + pci_disable_msix(dev->pdev); + kfree(table->msix_arr); +} + +struct mlx5_reg_host_endianess { + u8 he; + u8 rsvd[15]; +}; + + +#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) + +enum { + MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | + MLX5_DEV_CAP_FLAG_DCT, +}; + +static u16 to_fw_pkey_sz(u32 size) +{ + switch (size) { + case 128: + return 0; + case 256: + return 1; + case 512: + return 2; + case 1024: + return 3; + case 2048: + return 4; + case 4096: + return 5; + default: + pr_warn("invalid pkey table size %d\n", size); + return 0; + } +} + +/* selectively copy writable fields clearing any reserved area + */ +static void copy_rw_fields(void *to, struct mlx5_caps *from) +{ + __be64 *flags_off = (__be64 *)MLX5_ADDR_OF(cmd_hca_cap, to, reserved_22); + u64 v64; + + MLX5_SET(cmd_hca_cap, to, log_max_qp, from->gen.log_max_qp); + MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp); + MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp); + MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size); + MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size)); + MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12); + v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK; + *flags_off = cpu_to_be64(v64); +} + +static u16 get_pkey_table_size(int pkey) +{ + if (pkey > MLX5_MAX_LOG_PKEY_TABLE) + return 0; + + return MLX5_MIN_PKEY_TABLE_SIZE << pkey; +} + +static void fw2drv_caps(struct mlx5_caps *caps, void *out) +{ + struct mlx5_general_caps *gen = &caps->gen; + + gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz); + gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz); + gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp); + gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz); + gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs); + gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz); + gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq); + gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz); + gen->log_max_mkey = MLX5_GET_PR(cmd_hca_cap, out, log_max_mkey); + gen->log_max_eq = MLX5_GET_PR(cmd_hca_cap, out, log_max_eq); + gen->max_indirection = MLX5_GET_PR(cmd_hca_cap, out, max_indirection); + gen->log_max_mrw_sz = MLX5_GET_PR(cmd_hca_cap, out, log_max_mrw_sz); + gen->log_max_bsf_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_bsf_list_size); + gen->log_max_klm_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_klm_list_size); + gen->log_max_ra_req_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_dc); + gen->log_max_ra_res_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_dc); + gen->log_max_ra_req_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_qp); + gen->log_max_ra_res_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_qp); + gen->max_qp_counters = MLX5_GET_PR(cmd_hca_cap, out, max_qp_cnt); + gen->pkey_table_size = get_pkey_table_size(MLX5_GET_PR(cmd_hca_cap, out, pkey_table_size)); + gen->local_ca_ack_delay = MLX5_GET_PR(cmd_hca_cap, out, local_ca_ack_delay); + gen->num_ports = MLX5_GET_PR(cmd_hca_cap, out, num_ports); + gen->log_max_msg = MLX5_GET_PR(cmd_hca_cap, out, log_max_msg); + gen->stat_rate_support = MLX5_GET_PR(cmd_hca_cap, out, stat_rate_support); + gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22)); + pr_debug("flags = 0x%llx\n", gen->flags); + gen->uar_sz = MLX5_GET_PR(cmd_hca_cap, out, uar_sz); + gen->min_log_pg_sz = MLX5_GET_PR(cmd_hca_cap, out, log_pg_sz); + gen->bf_reg_size = MLX5_GET_PR(cmd_hca_cap, out, bf); + gen->bf_reg_size = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_bf_reg_size); + gen->max_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq); + gen->max_rq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_rq); + gen->max_dc_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq_dc); + gen->max_qp_mcg = MLX5_GET_PR(cmd_hca_cap, out, max_qp_mcg); + gen->log_max_pd = MLX5_GET_PR(cmd_hca_cap, out, log_max_pd); + gen->log_max_xrcd = MLX5_GET_PR(cmd_hca_cap, out, log_max_xrcd); + gen->log_uar_page_sz = MLX5_GET_PR(cmd_hca_cap, out, log_uar_page_sz); +} + +static const char *caps_opmod_str(u16 opmod) +{ + switch (opmod) { + case HCA_CAP_OPMOD_GET_MAX: + return "GET_MAX"; + case HCA_CAP_OPMOD_GET_CUR: + return "GET_CUR"; + default: + return "Invalid"; + } +} + +int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps, + u16 opmod) +{ + u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; + int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); + void *out; + int err; + + memset(in, 0, sizeof(in)); + out = kzalloc(out_sz, GFP_KERNEL); + if (!out) + return -ENOMEM; + MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); + MLX5_SET(query_hca_cap_in, in, op_mod, opmod); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); + if (err) + goto query_ex; + + err = mlx5_cmd_status_to_err_v2(out); + if (err) { + mlx5_core_warn(dev, "query max hca cap failed, %d\n", err); + goto query_ex; + } + mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod)); + fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct)); + +query_ex: + kfree(out); + return err; +} + +static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz) +{ + u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)]; + int err; + + memset(out, 0, sizeof(out)); + + MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP); + err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); + if (err) + return err; + + err = mlx5_cmd_status_to_err_v2(out); + + return err; +} + +static int handle_hca_cap(struct mlx5_core_dev *dev) +{ + void *set_ctx = NULL; + struct mlx5_profile *prof = dev->profile; + struct mlx5_caps *cur_caps = NULL; + struct mlx5_caps *max_caps = NULL; + int err = -ENOMEM; + int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); + + set_ctx = kzalloc(set_sz, GFP_KERNEL); + if (!set_ctx) + goto query_ex; + + max_caps = kzalloc(sizeof(*max_caps), GFP_KERNEL); + if (!max_caps) + goto query_ex; + + cur_caps = kzalloc(sizeof(*cur_caps), GFP_KERNEL); + if (!cur_caps) + goto query_ex; + + err = mlx5_core_get_caps(dev, max_caps, HCA_CAP_OPMOD_GET_MAX); + if (err) + goto query_ex; + + err = mlx5_core_get_caps(dev, cur_caps, HCA_CAP_OPMOD_GET_CUR); + if (err) + goto query_ex; + + /* we limit the size of the pkey table to 128 entries for now */ + cur_caps->gen.pkey_table_size = 128; + + if (prof->mask & MLX5_PROF_MASK_QP_SIZE) + cur_caps->gen.log_max_qp = prof->log_max_qp; + + /* disable checksum */ + cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM; + + copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct), + cur_caps); + err = set_caps(dev, set_ctx, set_sz); + +query_ex: + kfree(cur_caps); + kfree(max_caps); + kfree(set_ctx); + + return err; +} + +static int set_hca_ctrl(struct mlx5_core_dev *dev) +{ + struct mlx5_reg_host_endianess he_in; + struct mlx5_reg_host_endianess he_out; + int err; + + memset(&he_in, 0, sizeof(he_in)); + he_in.he = MLX5_SET_HOST_ENDIANNESS; + err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in), + &he_out, sizeof(he_out), + MLX5_REG_HOST_ENDIANNESS, 0, 1); + return err; +} + +static int mlx5_core_enable_hca(struct mlx5_core_dev *dev) +{ + int err; + struct mlx5_enable_hca_mbox_in in; + struct mlx5_enable_hca_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ENABLE_HCA); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + return 0; +} + +static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) +{ + int err; + struct mlx5_disable_hca_mbox_in in; + struct mlx5_disable_hca_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DISABLE_HCA); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + return 0; +} + +int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + struct mlx5_eq *eq, *n; + int err = -ENOENT; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { + if (eq->index == vector) { + *eqn = eq->eqn; + *irqn = eq->irqn; + err = 0; + break; + } + } + spin_unlock(&table->lock); + + return err; +} +EXPORT_SYMBOL(mlx5_vector2eqn); + +static void free_comp_eqs(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + struct mlx5_eq *eq, *n; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { + list_del(&eq->list); + spin_unlock(&table->lock); + if (mlx5_destroy_unmap_eq(dev, eq)) + mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n", + eq->eqn); + kfree(eq); + spin_lock(&table->lock); + } + spin_unlock(&table->lock); +} + +static int alloc_comp_eqs(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = &dev->priv.eq_table; + char name[MLX5_MAX_EQ_NAME]; + struct mlx5_eq *eq; + int ncomp_vec; + int nent; + int err; + int i; + + INIT_LIST_HEAD(&table->comp_eqs_list); + ncomp_vec = table->num_comp_vectors; + nent = MLX5_COMP_EQ_SIZE; + for (i = 0; i < ncomp_vec; i++) { + eq = kzalloc(sizeof(*eq), GFP_KERNEL); + if (!eq) { + err = -ENOMEM; + goto clean; + } + + snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); + err = mlx5_create_map_eq(dev, eq, + i + MLX5_EQ_VEC_COMP_BASE, nent, 0, + name, &dev->priv.uuari.uars[0]); + if (err) { + kfree(eq); + goto clean; + } + mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn); + eq->index = i; + spin_lock(&table->lock); + list_add_tail(&eq->list, &table->comp_eqs_list); + spin_unlock(&table->lock); + } + + return 0; + +clean: + free_comp_eqs(dev); + return err; +} + +static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) +{ + struct mlx5_priv *priv = &dev->priv; + int err; + + dev->pdev = pdev; + pci_set_drvdata(dev->pdev, dev); + strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN); + priv->name[MLX5_MAX_NAME_LEN - 1] = 0; + + mutex_init(&priv->pgdir_mutex); + INIT_LIST_HEAD(&priv->pgdir_list); + spin_lock_init(&priv->mkey_lock); + + priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root); + if (!priv->dbg_root) + return -ENOMEM; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); + goto err_dbg; + } + + err = request_bar(pdev); + if (err) { + dev_err(&pdev->dev, "error requesting BARs, aborting\n"); + goto err_disable; + } + + pci_set_master(pdev); + + err = set_dma_caps(pdev); + if (err) { + dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n"); + goto err_clr_master; + } + + dev->iseg_base = pci_resource_start(dev->pdev, 0); + dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); + if (!dev->iseg) { + err = -ENOMEM; + dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n"); + goto err_clr_master; + } + dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev), + fw_rev_min(dev), fw_rev_sub(dev)); + + err = mlx5_cmd_init(dev); + if (err) { + dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); + goto err_unmap; + } + + mlx5_pagealloc_init(dev); + + err = mlx5_core_enable_hca(dev); + if (err) { + dev_err(&pdev->dev, "enable hca failed\n"); + goto err_pagealloc_cleanup; + } + + err = mlx5_satisfy_startup_pages(dev, 1); + if (err) { + dev_err(&pdev->dev, "failed to allocate boot pages\n"); + goto err_disable_hca; + } + + err = set_hca_ctrl(dev); + if (err) { + dev_err(&pdev->dev, "set_hca_ctrl failed\n"); + goto reclaim_boot_pages; + } + + err = handle_hca_cap(dev); + if (err) { + dev_err(&pdev->dev, "handle_hca_cap failed\n"); + goto reclaim_boot_pages; + } + + err = mlx5_satisfy_startup_pages(dev, 0); + if (err) { + dev_err(&pdev->dev, "failed to allocate init pages\n"); + goto reclaim_boot_pages; + } + + err = mlx5_pagealloc_start(dev); + if (err) { + dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n"); + goto reclaim_boot_pages; + } + + err = mlx5_cmd_init_hca(dev); + if (err) { + dev_err(&pdev->dev, "init hca failed\n"); + goto err_pagealloc_stop; + } + + mlx5_start_health_poll(dev); + + err = mlx5_cmd_query_hca_cap(dev, &dev->caps); + if (err) { + dev_err(&pdev->dev, "query hca failed\n"); + goto err_stop_poll; + } + + err = mlx5_cmd_query_adapter(dev); + if (err) { + dev_err(&pdev->dev, "query adapter failed\n"); + goto err_stop_poll; + } + + err = mlx5_enable_msix(dev); + if (err) { + dev_err(&pdev->dev, "enable msix failed\n"); + goto err_stop_poll; + } + + err = mlx5_eq_init(dev); + if (err) { + dev_err(&pdev->dev, "failed to initialize eq\n"); + goto disable_msix; + } + + err = mlx5_alloc_uuars(dev, &priv->uuari); + if (err) { + dev_err(&pdev->dev, "Failed allocating uar, aborting\n"); + goto err_eq_cleanup; + } + + err = mlx5_start_eqs(dev); + if (err) { + dev_err(&pdev->dev, "Failed to start pages and async EQs\n"); + goto err_free_uar; + } + + err = alloc_comp_eqs(dev); + if (err) { + dev_err(&pdev->dev, "Failed to alloc completion EQs\n"); + goto err_stop_eqs; + } + + MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); + + mlx5_init_cq_table(dev); + mlx5_init_qp_table(dev); + mlx5_init_srq_table(dev); + mlx5_init_mr_table(dev); + + return 0; + +err_stop_eqs: + mlx5_stop_eqs(dev); + +err_free_uar: + mlx5_free_uuars(dev, &priv->uuari); + +err_eq_cleanup: + mlx5_eq_cleanup(dev); + +disable_msix: + mlx5_disable_msix(dev); + +err_stop_poll: + mlx5_stop_health_poll(dev); + if (mlx5_cmd_teardown_hca(dev)) { + dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); + return err; + } + +err_pagealloc_stop: + mlx5_pagealloc_stop(dev); + +reclaim_boot_pages: + mlx5_reclaim_startup_pages(dev); + +err_disable_hca: + mlx5_core_disable_hca(dev); + +err_pagealloc_cleanup: + mlx5_pagealloc_cleanup(dev); + mlx5_cmd_cleanup(dev); + +err_unmap: + iounmap(dev->iseg); + +err_clr_master: + pci_clear_master(dev->pdev); + release_bar(dev->pdev); + +err_disable: + pci_disable_device(dev->pdev); + +err_dbg: + debugfs_remove(priv->dbg_root); + return err; +} + +static void mlx5_dev_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_priv *priv = &dev->priv; + + mlx5_cleanup_srq_table(dev); + mlx5_cleanup_qp_table(dev); + mlx5_cleanup_cq_table(dev); + free_comp_eqs(dev); + mlx5_stop_eqs(dev); + mlx5_free_uuars(dev, &priv->uuari); + mlx5_eq_cleanup(dev); + mlx5_disable_msix(dev); + mlx5_stop_health_poll(dev); + if (mlx5_cmd_teardown_hca(dev)) { + dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); + return; + } + mlx5_pagealloc_stop(dev); + mlx5_reclaim_startup_pages(dev); + mlx5_core_disable_hca(dev); + mlx5_pagealloc_cleanup(dev); + mlx5_cmd_cleanup(dev); + iounmap(dev->iseg); + pci_clear_master(dev->pdev); + release_bar(dev->pdev); + pci_disable_device(dev->pdev); + debugfs_remove(priv->dbg_root); +} + +static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) +{ + struct mlx5_device_context *dev_ctx; + struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); + + dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL); + if (!dev_ctx) { + pr_warn("mlx5_add_device: alloc context failed\n"); + return; + } + + dev_ctx->intf = intf; + dev_ctx->context = intf->add(dev); + + if (dev_ctx->context) { + spin_lock_irq(&priv->ctx_lock); + list_add_tail(&dev_ctx->list, &priv->ctx_list); + spin_unlock_irq(&priv->ctx_lock); + } else { + kfree(dev_ctx); + } +} + +static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv) +{ + struct mlx5_device_context *dev_ctx; + struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); + + list_for_each_entry(dev_ctx, &priv->ctx_list, list) + if (dev_ctx->intf == intf) { + spin_lock_irq(&priv->ctx_lock); + list_del(&dev_ctx->list); + spin_unlock_irq(&priv->ctx_lock); + + intf->remove(dev, dev_ctx->context); + kfree(dev_ctx); + return; + } +} +static int mlx5_register_device(struct mlx5_core_dev *dev) +{ + struct mlx5_priv *priv = &dev->priv; + struct mlx5_interface *intf; + + mutex_lock(&intf_mutex); + list_add_tail(&priv->dev_list, &dev_list); + list_for_each_entry(intf, &intf_list, list) + mlx5_add_device(intf, priv); + mutex_unlock(&intf_mutex); + + return 0; +} +static void mlx5_unregister_device(struct mlx5_core_dev *dev) +{ + struct mlx5_priv *priv = &dev->priv; + struct mlx5_interface *intf; + + mutex_lock(&intf_mutex); + list_for_each_entry(intf, &intf_list, list) + mlx5_remove_device(intf, priv); + list_del(&priv->dev_list); + mutex_unlock(&intf_mutex); +} + +int mlx5_register_interface(struct mlx5_interface *intf) +{ + struct mlx5_priv *priv; + + if (!intf->add || !intf->remove) + return -EINVAL; + + mutex_lock(&intf_mutex); + list_add_tail(&intf->list, &intf_list); + list_for_each_entry(priv, &dev_list, dev_list) + mlx5_add_device(intf, priv); + mutex_unlock(&intf_mutex); + + return 0; +} +EXPORT_SYMBOL(mlx5_register_interface); + +void mlx5_unregister_interface(struct mlx5_interface *intf) +{ + struct mlx5_priv *priv; + + mutex_lock(&intf_mutex); + list_for_each_entry(priv, &dev_list, dev_list) + mlx5_remove_device(intf, priv); + list_del(&intf->list); + mutex_unlock(&intf_mutex); +} +EXPORT_SYMBOL(mlx5_unregister_interface); + +void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol) +{ + struct mlx5_priv *priv = &mdev->priv; + struct mlx5_device_context *dev_ctx; + unsigned long flags; + void *result = NULL; + + spin_lock_irqsave(&priv->ctx_lock, flags); + + list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list) + if ((dev_ctx->intf->protocol == protocol) && + dev_ctx->intf->get_dev) { + result = dev_ctx->intf->get_dev(dev_ctx->context); + break; + } + + spin_unlock_irqrestore(&priv->ctx_lock, flags); + + return result; +} +EXPORT_SYMBOL(mlx5_get_protocol_dev); + +static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, + unsigned long param) +{ + struct mlx5_priv *priv = &dev->priv; + struct mlx5_device_context *dev_ctx; + unsigned long flags; + + spin_lock_irqsave(&priv->ctx_lock, flags); + + list_for_each_entry(dev_ctx, &priv->ctx_list, list) + if (dev_ctx->intf->event) + dev_ctx->intf->event(dev, dev_ctx->context, event, param); + + spin_unlock_irqrestore(&priv->ctx_lock, flags); +} + +struct mlx5_core_event_handler { + void (*event)(struct mlx5_core_dev *dev, + enum mlx5_dev_event event, + void *data); +}; + +#define MLX5_IB_MOD "mlx5_ib" + +static int init_one(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct mlx5_core_dev *dev; + struct mlx5_priv *priv; + int err; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) { + dev_err(&pdev->dev, "kzalloc failed\n"); + return -ENOMEM; + } + priv = &dev->priv; + + pci_set_drvdata(pdev, dev); + + if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) { + pr_warn("selected profile out of range, selecting default (%d)\n", + MLX5_DEFAULT_PROF); + prof_sel = MLX5_DEFAULT_PROF; + } + dev->profile = &profile[prof_sel]; + dev->event = mlx5_core_event; + + INIT_LIST_HEAD(&priv->ctx_list); + spin_lock_init(&priv->ctx_lock); + err = mlx5_dev_init(dev, pdev); + if (err) { + dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err); + goto out; + } + + err = mlx5_register_device(dev); + if (err) { + dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err); + goto out_init; + } + + err = request_module_nowait(MLX5_IB_MOD); + if (err) + pr_info("failed request module on %s\n", MLX5_IB_MOD); + + return 0; + +out_init: + mlx5_dev_cleanup(dev); +out: + kfree(dev); + return err; +} +static void remove_one(struct pci_dev *pdev) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + + mlx5_unregister_device(dev); + mlx5_dev_cleanup(dev); + kfree(dev); +} + +static const struct pci_device_id mlx5_core_pci_table[] = { + { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */ + { PCI_VDEVICE(MELLANOX, 0x1012) }, /* Connect-IB VF */ + { PCI_VDEVICE(MELLANOX, 0x1013) }, /* ConnectX-4 */ + { PCI_VDEVICE(MELLANOX, 0x1014) }, /* ConnectX-4 VF */ + { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ + { PCI_VDEVICE(MELLANOX, 0x1016) }, /* ConnectX-4LX VF */ + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table); + +static struct pci_driver mlx5_core_driver = { + .name = DRIVER_NAME, + .id_table = mlx5_core_pci_table, + .probe = init_one, + .remove = remove_one +}; + +static int __init init(void) +{ + int err; + + mlx5_register_debugfs(); + mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq"); + if (!mlx5_core_wq) { + err = -ENOMEM; + goto err_debug; + } + mlx5_health_init(); + + err = pci_register_driver(&mlx5_core_driver); + if (err) + goto err_health; + + return 0; + +err_health: + mlx5_health_cleanup(); + destroy_workqueue(mlx5_core_wq); +err_debug: + mlx5_unregister_debugfs(); + return err; +} + +static void __exit cleanup(void) +{ + pci_unregister_driver(&mlx5_core_driver); + mlx5_health_cleanup(); + destroy_workqueue(mlx5_core_wq); + mlx5_unregister_debugfs(); +} + +module_init(init); +module_exit(cleanup); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c new file mode 100644 index 000000000..d79fd85d1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include "mlx5_core.h" + +struct mlx5_attach_mcg_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 qpn; + __be32 rsvd; + u8 gid[16]; +}; + +struct mlx5_attach_mcg_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvf[8]; +}; + +struct mlx5_detach_mcg_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 qpn; + __be32 rsvd; + u8 gid[16]; +}; + +struct mlx5_detach_mcg_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvf[8]; +}; + +int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) +{ + struct mlx5_attach_mcg_mbox_in in; + struct mlx5_attach_mcg_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ATTACH_TO_MCG); + memcpy(in.gid, mgid, sizeof(*mgid)); + in.qpn = cpu_to_be32(qpn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_attach_mcg); + +int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) +{ + struct mlx5_detach_mcg_mbox_in in; + struct mlx5_detach_mcg_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETACH_FROM_MCG); + memcpy(in.gid, mgid, sizeof(*mgid)); + in.qpn = cpu_to_be32(qpn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_detach_mcg); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h new file mode 100644 index 000000000..a051b906a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_CORE_H__ +#define __MLX5_CORE_H__ + +#include +#include +#include + +extern int mlx5_core_debug_mask; + +#define mlx5_core_dbg(dev, format, ...) \ + pr_debug("%s:%s:%d:(pid %d): " format, \ + (dev)->priv.name, __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +#define mlx5_core_dbg_mask(dev, mask, format, ...) \ +do { \ + if ((mask) & mlx5_core_debug_mask) \ + mlx5_core_dbg(dev, format, ##__VA_ARGS__); \ +} while (0) + +#define mlx5_core_err(dev, format, ...) \ + pr_err("%s:%s:%d:(pid %d): " format, \ + (dev)->priv.name, __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +#define mlx5_core_warn(dev, format, ...) \ + pr_warn("%s:%s:%d:(pid %d): " format, \ + (dev)->priv.name, __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +enum { + MLX5_CMD_DATA, /* print command payload only */ + MLX5_CMD_TIME, /* print command execution time */ +}; + + +int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, + struct mlx5_caps *caps); +int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev); +int mlx5_cmd_init_hca(struct mlx5_core_dev *dev); +int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); + +#endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c new file mode 100644 index 000000000..1adb300dd --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -0,0 +1,248 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include "mlx5_core.h" + +void mlx5_init_mr_table(struct mlx5_core_dev *dev) +{ + struct mlx5_mr_table *table = &dev->priv.mr_table; + + rwlock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); +} + +void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev) +{ +} + +int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, + struct mlx5_create_mkey_mbox_in *in, int inlen, + mlx5_cmd_cbk_t callback, void *context, + struct mlx5_create_mkey_mbox_out *out) +{ + struct mlx5_mr_table *table = &dev->priv.mr_table; + struct mlx5_create_mkey_mbox_out lout; + int err; + u8 key; + + memset(&lout, 0, sizeof(lout)); + spin_lock_irq(&dev->priv.mkey_lock); + key = dev->priv.mkey_key++; + spin_unlock_irq(&dev->priv.mkey_lock); + in->seg.qpn_mkey7_0 |= cpu_to_be32(key); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY); + if (callback) { + err = mlx5_cmd_exec_cb(dev, in, inlen, out, sizeof(*out), + callback, context); + return err; + } else { + err = mlx5_cmd_exec(dev, in, inlen, &lout, sizeof(lout)); + } + + if (err) { + mlx5_core_dbg(dev, "cmd exec failed %d\n", err); + return err; + } + + if (lout.hdr.status) { + mlx5_core_dbg(dev, "status %d\n", lout.hdr.status); + return mlx5_cmd_status_to_err(&lout.hdr); + } + + mr->iova = be64_to_cpu(in->seg.start_addr); + mr->size = be64_to_cpu(in->seg.len); + mr->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key; + mr->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff; + + mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", + be32_to_cpu(lout.mkey), key, mr->key); + + /* connect to MR tree */ + write_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr); + write_unlock_irq(&table->lock); + if (err) { + mlx5_core_warn(dev, "failed radix tree insert of mr 0x%x, %d\n", + mlx5_base_mkey(mr->key), err); + mlx5_core_destroy_mkey(dev, mr); + } + + return err; +} +EXPORT_SYMBOL(mlx5_core_create_mkey); + +int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) +{ + struct mlx5_mr_table *table = &dev->priv.mr_table; + struct mlx5_destroy_mkey_mbox_in in; + struct mlx5_destroy_mkey_mbox_out out; + struct mlx5_core_mr *deleted_mr; + unsigned long flags; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + write_lock_irqsave(&table->lock, flags); + deleted_mr = radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key)); + write_unlock_irqrestore(&table->lock, flags); + if (!deleted_mr) { + mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n", + mlx5_base_mkey(mr->key)); + return -ENOENT; + } + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY); + in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key)); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_destroy_mkey); + +int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, + struct mlx5_query_mkey_mbox_out *out, int outlen) +{ + struct mlx5_query_mkey_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, outlen); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_MKEY); + in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key)); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); + if (err) + return err; + + if (out->hdr.status) + return mlx5_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_query_mkey); + +int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, + u32 *mkey) +{ + struct mlx5_query_special_ctxs_mbox_in in; + struct mlx5_query_special_ctxs_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + *mkey = be32_to_cpu(out.dump_fill_mkey); + + return err; +} +EXPORT_SYMBOL(mlx5_core_dump_fill_mkey); + +int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, + int npsvs, u32 *sig_index) +{ + struct mlx5_allocate_psv_in in; + struct mlx5_allocate_psv_out out; + int i, err; + + if (npsvs > MLX5_MAX_PSVS) + return -EINVAL; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_PSV); + in.npsv_pd = cpu_to_be32((npsvs << 28) | pdn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) { + mlx5_core_err(dev, "cmd exec failed %d\n", err); + return err; + } + + if (out.hdr.status) { + mlx5_core_err(dev, "create_psv bad status %d\n", + out.hdr.status); + return mlx5_cmd_status_to_err(&out.hdr); + } + + for (i = 0; i < npsvs; i++) + sig_index[i] = be32_to_cpu(out.psv_idx[i]) & 0xffffff; + + return err; +} +EXPORT_SYMBOL(mlx5_core_create_psv); + +int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num) +{ + struct mlx5_destroy_psv_in in; + struct mlx5_destroy_psv_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.psv_number = cpu_to_be32(psv_num); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_PSV); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) { + mlx5_core_err(dev, "destroy_psv cmd exec failed %d\n", err); + goto out; + } + + if (out.hdr.status) { + mlx5_core_err(dev, "destroy_psv bad status %d\n", + out.hdr.status); + err = mlx5_cmd_status_to_err(&out.hdr); + goto out; + } + +out: + return err; +} +EXPORT_SYMBOL(mlx5_core_destroy_psv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c new file mode 100644 index 000000000..8a64542ab --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -0,0 +1,534 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include "mlx5_core.h" + +enum { + MLX5_PAGES_CANT_GIVE = 0, + MLX5_PAGES_GIVE = 1, + MLX5_PAGES_TAKE = 2 +}; + +enum { + MLX5_BOOT_PAGES = 1, + MLX5_INIT_PAGES = 2, + MLX5_POST_INIT_PAGES = 3 +}; + +struct mlx5_pages_req { + struct mlx5_core_dev *dev; + u16 func_id; + s32 npages; + struct work_struct work; +}; + +struct fw_page { + struct rb_node rb_node; + u64 addr; + struct page *page; + u16 func_id; + unsigned long bitmask; + struct list_head list; + unsigned free_count; +}; + +struct mlx5_query_pages_inbox { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_query_pages_outbox { + struct mlx5_outbox_hdr hdr; + __be16 rsvd; + __be16 func_id; + __be32 num_pages; +}; + +struct mlx5_manage_pages_inbox { + struct mlx5_inbox_hdr hdr; + __be16 rsvd; + __be16 func_id; + __be32 num_entries; + __be64 pas[0]; +}; + +struct mlx5_manage_pages_outbox { + struct mlx5_outbox_hdr hdr; + __be32 num_entries; + u8 rsvd[4]; + __be64 pas[0]; +}; + +enum { + MAX_RECLAIM_TIME_MSECS = 5000, +}; + +enum { + MLX5_MAX_RECLAIM_TIME_MILI = 5000, + MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, +}; + +static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) +{ + struct rb_root *root = &dev->priv.page_root; + struct rb_node **new = &root->rb_node; + struct rb_node *parent = NULL; + struct fw_page *nfp; + struct fw_page *tfp; + int i; + + while (*new) { + parent = *new; + tfp = rb_entry(parent, struct fw_page, rb_node); + if (tfp->addr < addr) + new = &parent->rb_left; + else if (tfp->addr > addr) + new = &parent->rb_right; + else + return -EEXIST; + } + + nfp = kzalloc(sizeof(*nfp), GFP_KERNEL); + if (!nfp) + return -ENOMEM; + + nfp->addr = addr; + nfp->page = page; + nfp->func_id = func_id; + nfp->free_count = MLX5_NUM_4K_IN_PAGE; + for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++) + set_bit(i, &nfp->bitmask); + + rb_link_node(&nfp->rb_node, parent, new); + rb_insert_color(&nfp->rb_node, root); + list_add(&nfp->list, &dev->priv.free_list); + + return 0; +} + +static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr) +{ + struct rb_root *root = &dev->priv.page_root; + struct rb_node *tmp = root->rb_node; + struct fw_page *result = NULL; + struct fw_page *tfp; + + while (tmp) { + tfp = rb_entry(tmp, struct fw_page, rb_node); + if (tfp->addr < addr) { + tmp = tmp->rb_left; + } else if (tfp->addr > addr) { + tmp = tmp->rb_right; + } else { + result = tfp; + break; + } + } + + return result; +} + +static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, + s32 *npages, int boot) +{ + struct mlx5_query_pages_inbox in; + struct mlx5_query_pages_outbox out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); + in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES); + + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + *npages = be32_to_cpu(out.num_pages); + *func_id = be16_to_cpu(out.func_id); + + return err; +} + +static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr) +{ + struct fw_page *fp; + unsigned n; + + if (list_empty(&dev->priv.free_list)) + return -ENOMEM; + + fp = list_entry(dev->priv.free_list.next, struct fw_page, list); + n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask)); + if (n >= MLX5_NUM_4K_IN_PAGE) { + mlx5_core_warn(dev, "alloc 4k bug\n"); + return -ENOENT; + } + clear_bit(n, &fp->bitmask); + fp->free_count--; + if (!fp->free_count) + list_del(&fp->list); + + *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE; + + return 0; +} + +#define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT) + +static void free_4k(struct mlx5_core_dev *dev, u64 addr) +{ + struct fw_page *fwp; + int n; + + fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK); + if (!fwp) { + mlx5_core_warn(dev, "page not found\n"); + return; + } + + n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT; + fwp->free_count++; + set_bit(n, &fwp->bitmask); + if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) { + rb_erase(&fwp->rb_node, &dev->priv.page_root); + if (fwp->free_count != 1) + list_del(&fwp->list); + dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK, + PAGE_SIZE, DMA_BIDIRECTIONAL); + __free_page(fwp->page); + kfree(fwp); + } else if (fwp->free_count == 1) { + list_add(&fwp->list, &dev->priv.free_list); + } +} + +static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) +{ + struct page *page; + u64 addr; + int err; + int nid = dev_to_node(&dev->pdev->dev); + + page = alloc_pages_node(nid, GFP_HIGHUSER, 0); + if (!page) { + mlx5_core_warn(dev, "failed to allocate page\n"); + return -ENOMEM; + } + addr = dma_map_page(&dev->pdev->dev, page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&dev->pdev->dev, addr)) { + mlx5_core_warn(dev, "failed dma mapping page\n"); + err = -ENOMEM; + goto out_alloc; + } + err = insert_page(dev, addr, page, func_id); + if (err) { + mlx5_core_err(dev, "failed to track allocated page\n"); + goto out_mapping; + } + + return 0; + +out_mapping: + dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + +out_alloc: + __free_page(page); + + return err; +} +static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, + int notify_fail) +{ + struct mlx5_manage_pages_inbox *in; + struct mlx5_manage_pages_outbox out; + struct mlx5_manage_pages_inbox *nin; + int inlen; + u64 addr; + int err; + int i; + + inlen = sizeof(*in) + npages * sizeof(in->pas[0]); + in = mlx5_vzalloc(inlen); + if (!in) { + mlx5_core_warn(dev, "vzalloc failed %d\n", inlen); + return -ENOMEM; + } + memset(&out, 0, sizeof(out)); + + for (i = 0; i < npages; i++) { +retry: + err = alloc_4k(dev, &addr); + if (err) { + if (err == -ENOMEM) + err = alloc_system_page(dev, func_id); + if (err) + goto out_4k; + + goto retry; + } + in->pas[i] = cpu_to_be64(addr); + } + + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); + in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); + in->func_id = cpu_to_be16(func_id); + in->num_entries = cpu_to_be32(npages); + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) { + mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", + func_id, npages, err); + goto out_alloc; + } + dev->priv.fw_pages += npages; + + if (out.hdr.status) { + err = mlx5_cmd_status_to_err(&out.hdr); + if (err) { + mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", + func_id, npages, out.hdr.status); + goto out_alloc; + } + } + + mlx5_core_dbg(dev, "err %d\n", err); + + goto out_free; + +out_alloc: + if (notify_fail) { + nin = kzalloc(sizeof(*nin), GFP_KERNEL); + if (!nin) { + mlx5_core_warn(dev, "allocation failed\n"); + goto out_4k; + } + memset(&out, 0, sizeof(out)); + nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); + nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE); + if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out))) + mlx5_core_warn(dev, "page notify failed\n"); + kfree(nin); + } + +out_4k: + for (i--; i >= 0; i--) + free_4k(dev, be64_to_cpu(in->pas[i])); +out_free: + kvfree(in); + return err; +} + +static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, + int *nclaimed) +{ + struct mlx5_manage_pages_inbox in; + struct mlx5_manage_pages_outbox *out; + int num_claimed; + int outlen; + u64 addr; + int err; + int i; + + if (nclaimed) + *nclaimed = 0; + + memset(&in, 0, sizeof(in)); + outlen = sizeof(*out) + npages * sizeof(out->pas[0]); + out = mlx5_vzalloc(outlen); + if (!out) + return -ENOMEM; + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); + in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); + in.func_id = cpu_to_be16(func_id); + in.num_entries = cpu_to_be32(npages); + mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); + if (err) { + mlx5_core_err(dev, "failed reclaiming pages\n"); + goto out_free; + } + dev->priv.fw_pages -= npages; + + if (out->hdr.status) { + err = mlx5_cmd_status_to_err(&out->hdr); + goto out_free; + } + + num_claimed = be32_to_cpu(out->num_entries); + if (nclaimed) + *nclaimed = num_claimed; + + for (i = 0; i < num_claimed; i++) { + addr = be64_to_cpu(out->pas[i]); + free_4k(dev, addr); + } + +out_free: + kvfree(out); + return err; +} + +static void pages_work_handler(struct work_struct *work) +{ + struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work); + struct mlx5_core_dev *dev = req->dev; + int err = 0; + + if (req->npages < 0) + err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL); + else if (req->npages > 0) + err = give_pages(dev, req->func_id, req->npages, 1); + + if (err) + mlx5_core_warn(dev, "%s fail %d\n", + req->npages < 0 ? "reclaim" : "give", err); + + kfree(req); +} + +void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, + s32 npages) +{ + struct mlx5_pages_req *req; + + req = kzalloc(sizeof(*req), GFP_ATOMIC); + if (!req) { + mlx5_core_warn(dev, "failed to allocate pages request\n"); + return; + } + + req->dev = dev; + req->func_id = func_id; + req->npages = npages; + INIT_WORK(&req->work, pages_work_handler); + queue_work(dev->priv.pg_wq, &req->work); +} + +int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) +{ + u16 uninitialized_var(func_id); + s32 uninitialized_var(npages); + int err; + + err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); + if (err) + return err; + + mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", + npages, boot ? "boot" : "init", func_id); + + return give_pages(dev, func_id, npages, 0); +} + +enum { + MLX5_BLKS_FOR_RECLAIM_PAGES = 12 +}; + +static int optimal_reclaimed_pages(void) +{ + struct mlx5_cmd_prot_block *block; + struct mlx5_cmd_layout *lay; + int ret; + + ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) - + sizeof(struct mlx5_manage_pages_outbox)) / + FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]); + + return ret; +} + +int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) +{ + unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); + struct fw_page *fwp; + struct rb_node *p; + int nclaimed = 0; + int err; + + do { + p = rb_first(&dev->priv.page_root); + if (p) { + fwp = rb_entry(p, struct fw_page, rb_node); + err = reclaim_pages(dev, fwp->func_id, + optimal_reclaimed_pages(), + &nclaimed); + if (err) { + mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", + err); + return err; + } + if (nclaimed) + end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); + } + if (time_after(jiffies, end)) { + mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); + break; + } + } while (p); + + return 0; +} + +void mlx5_pagealloc_init(struct mlx5_core_dev *dev) +{ + dev->priv.page_root = RB_ROOT; + INIT_LIST_HEAD(&dev->priv.free_list); +} + +void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) +{ + /* nothing */ +} + +int mlx5_pagealloc_start(struct mlx5_core_dev *dev) +{ + dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); + if (!dev->priv.pg_wq) + return -ENOMEM; + + return 0; +} + +void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) +{ + destroy_workqueue(dev->priv.pg_wq); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c new file mode 100644 index 000000000..f2d3aee90 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/pd.c @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include "mlx5_core.h" + +struct mlx5_alloc_pd_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_alloc_pd_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 pdn; + u8 rsvd[4]; +}; + +struct mlx5_dealloc_pd_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 pdn; + u8 rsvd[4]; +}; + +struct mlx5_dealloc_pd_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn) +{ + struct mlx5_alloc_pd_mbox_in in; + struct mlx5_alloc_pd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_PD); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + *pdn = be32_to_cpu(out.pdn) & 0xffffff; + return err; +} +EXPORT_SYMBOL(mlx5_core_alloc_pd); + +int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn) +{ + struct mlx5_dealloc_pd_mbox_in in; + struct mlx5_dealloc_pd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_PD); + in.pdn = cpu_to_be32(pdn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_dealloc_pd); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c new file mode 100644 index 000000000..49e90f261 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include "mlx5_core.h" + +int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, + int size_in, void *data_out, int size_out, + u16 reg_num, int arg, int write) +{ + struct mlx5_access_reg_mbox_in *in = NULL; + struct mlx5_access_reg_mbox_out *out = NULL; + int err = -ENOMEM; + + in = mlx5_vzalloc(sizeof(*in) + size_in); + if (!in) + return -ENOMEM; + + out = mlx5_vzalloc(sizeof(*out) + size_out); + if (!out) + goto ex1; + + memcpy(in->data, data_in, size_in); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ACCESS_REG); + in->hdr.opmod = cpu_to_be16(!write); + in->arg = cpu_to_be32(arg); + in->register_id = cpu_to_be16(reg_num); + err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out, + sizeof(*out) + size_out); + if (err) + goto ex2; + + if (out->hdr.status) + err = mlx5_cmd_status_to_err(&out->hdr); + + if (!err) + memcpy(data_out, out->data, size_out); + +ex2: + kvfree(out); +ex1: + kvfree(in); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_access_reg); + + +struct mlx5_reg_pcap { + u8 rsvd0; + u8 port_num; + u8 rsvd1[2]; + __be32 caps_127_96; + __be32 caps_95_64; + __be32 caps_63_32; + __be32 caps_31_0; +}; + +int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps) +{ + struct mlx5_reg_pcap in; + struct mlx5_reg_pcap out; + int err; + + memset(&in, 0, sizeof(in)); + in.caps_127_96 = cpu_to_be32(caps); + in.port_num = port_num; + + err = mlx5_core_access_reg(dev, &in, sizeof(in), &out, + sizeof(out), MLX5_REG_PCAP, 0, 1); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_set_port_caps); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c new file mode 100644 index 000000000..dc7dbf7e9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -0,0 +1,443 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#include +#include +#include +#include +#include + +#include "mlx5_core.h" + +static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev, + u32 rsn) +{ + struct mlx5_qp_table *table = &dev->priv.qp_table; + struct mlx5_core_rsc_common *common; + + spin_lock(&table->lock); + + common = radix_tree_lookup(&table->tree, rsn); + if (common) + atomic_inc(&common->refcount); + + spin_unlock(&table->lock); + + if (!common) { + mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", + rsn); + return NULL; + } + return common; +} + +void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common) +{ + if (atomic_dec_and_test(&common->refcount)) + complete(&common->free); +} + +void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type) +{ + struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn); + struct mlx5_core_qp *qp; + + if (!common) + return; + + switch (common->res) { + case MLX5_RES_QP: + qp = (struct mlx5_core_qp *)common; + qp->event(qp, event_type); + break; + + default: + mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn); + } + + mlx5_core_put_rsc(common); +} + +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) +{ + struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault; + int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK; + struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn); + struct mlx5_core_qp *qp = + container_of(common, struct mlx5_core_qp, common); + struct mlx5_pagefault pfault; + + if (!qp) { + mlx5_core_warn(dev, "ODP event for non-existent QP %06x\n", + qpn); + return; + } + + pfault.event_subtype = eqe->sub_type; + pfault.flags = (be32_to_cpu(pf_eqe->flags_qpn) >> MLX5_QPN_BITS) & + (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE | MLX5_PFAULT_RDMA); + pfault.bytes_committed = be32_to_cpu( + pf_eqe->bytes_committed); + + mlx5_core_dbg(dev, + "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n", + eqe->sub_type, pfault.flags); + + switch (eqe->sub_type) { + case MLX5_PFAULT_SUBTYPE_RDMA: + /* RDMA based event */ + pfault.rdma.r_key = + be32_to_cpu(pf_eqe->rdma.r_key); + pfault.rdma.packet_size = + be16_to_cpu(pf_eqe->rdma.packet_length); + pfault.rdma.rdma_op_len = + be32_to_cpu(pf_eqe->rdma.rdma_op_len); + pfault.rdma.rdma_va = + be64_to_cpu(pf_eqe->rdma.rdma_va); + mlx5_core_dbg(dev, + "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n", + qpn, pfault.rdma.r_key); + mlx5_core_dbg(dev, + "PAGE_FAULT: rdma_op_len: 0x%08x,\n", + pfault.rdma.rdma_op_len); + mlx5_core_dbg(dev, + "PAGE_FAULT: rdma_va: 0x%016llx,\n", + pfault.rdma.rdma_va); + mlx5_core_dbg(dev, + "PAGE_FAULT: bytes_committed: 0x%06x\n", + pfault.bytes_committed); + break; + + case MLX5_PFAULT_SUBTYPE_WQE: + /* WQE based event */ + pfault.wqe.wqe_index = + be16_to_cpu(pf_eqe->wqe.wqe_index); + pfault.wqe.packet_size = + be16_to_cpu(pf_eqe->wqe.packet_length); + mlx5_core_dbg(dev, + "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n", + qpn, pfault.wqe.wqe_index); + mlx5_core_dbg(dev, + "PAGE_FAULT: bytes_committed: 0x%06x\n", + pfault.bytes_committed); + break; + + default: + mlx5_core_warn(dev, + "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n", + eqe->sub_type, qpn); + /* Unsupported page faults should still be resolved by the + * page fault handler + */ + } + + if (qp->pfault_handler) { + qp->pfault_handler(qp, &pfault); + } else { + mlx5_core_err(dev, + "ODP event for QP %08x, without a fault handler in QP\n", + qpn); + /* Page fault will remain unresolved. QP will hang until it is + * destroyed + */ + } + + mlx5_core_put_rsc(common); +} +#endif + +int mlx5_core_create_qp(struct mlx5_core_dev *dev, + struct mlx5_core_qp *qp, + struct mlx5_create_qp_mbox_in *in, + int inlen) +{ + struct mlx5_qp_table *table = &dev->priv.qp_table; + struct mlx5_create_qp_mbox_out out; + struct mlx5_destroy_qp_mbox_in din; + struct mlx5_destroy_qp_mbox_out dout; + int err; + + memset(&out, 0, sizeof(out)); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP); + + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) { + mlx5_core_warn(dev, "ret %d\n", err); + return err; + } + + if (out.hdr.status) { + mlx5_core_warn(dev, "current num of QPs 0x%x\n", + atomic_read(&dev->num_qps)); + return mlx5_cmd_status_to_err(&out.hdr); + } + + qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; + mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); + + qp->common.res = MLX5_RES_QP; + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, qp->qpn, qp); + spin_unlock_irq(&table->lock); + if (err) { + mlx5_core_warn(dev, "err %d\n", err); + goto err_cmd; + } + + err = mlx5_debug_qp_add(dev, qp); + if (err) + mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n", + qp->qpn); + + qp->pid = current->pid; + atomic_set(&qp->common.refcount, 1); + atomic_inc(&dev->num_qps); + init_completion(&qp->common.free); + + return 0; + +err_cmd: + memset(&din, 0, sizeof(din)); + memset(&dout, 0, sizeof(dout)); + din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP); + din.qpn = cpu_to_be32(qp->qpn); + mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout)); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_create_qp); + +int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, + struct mlx5_core_qp *qp) +{ + struct mlx5_destroy_qp_mbox_in in; + struct mlx5_destroy_qp_mbox_out out; + struct mlx5_qp_table *table = &dev->priv.qp_table; + unsigned long flags; + int err; + + mlx5_debug_qp_remove(dev, qp); + + spin_lock_irqsave(&table->lock, flags); + radix_tree_delete(&table->tree, qp->qpn); + spin_unlock_irqrestore(&table->lock, flags); + + mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp); + wait_for_completion(&qp->common.free); + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP); + in.qpn = cpu_to_be32(qp->qpn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + atomic_dec(&dev->num_qps); + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp); + +int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state, + enum mlx5_qp_state new_state, + struct mlx5_modify_qp_mbox_in *in, int sqd_event, + struct mlx5_core_qp *qp) +{ + static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = { + [MLX5_QP_STATE_RST] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP, + }, + [MLX5_QP_STATE_INIT] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP, + [MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP, + }, + [MLX5_QP_STATE_RTR] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP, + }, + [MLX5_QP_STATE_RTS] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP, + }, + [MLX5_QP_STATE_SQD] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + }, + [MLX5_QP_STATE_SQER] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP, + }, + [MLX5_QP_STATE_ERR] = { + [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, + [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, + } + }; + + struct mlx5_modify_qp_mbox_out out; + int err = 0; + u16 op; + + if (cur_state >= MLX5_QP_NUM_STATE || new_state >= MLX5_QP_NUM_STATE || + !optab[cur_state][new_state]) + return -EINVAL; + + memset(&out, 0, sizeof(out)); + op = optab[cur_state][new_state]; + in->hdr.opcode = cpu_to_be16(op); + in->qpn = cpu_to_be32(qp->qpn); + err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); + if (err) + return err; + + return mlx5_cmd_status_to_err(&out.hdr); +} +EXPORT_SYMBOL_GPL(mlx5_core_qp_modify); + +void mlx5_init_qp_table(struct mlx5_core_dev *dev) +{ + struct mlx5_qp_table *table = &dev->priv.qp_table; + + spin_lock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); + mlx5_qp_debugfs_init(dev); +} + +void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev) +{ + mlx5_qp_debugfs_cleanup(dev); +} + +int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, + struct mlx5_query_qp_mbox_out *out, int outlen) +{ + struct mlx5_query_qp_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, outlen); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP); + in.qpn = cpu_to_be32(qp->qpn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); + if (err) + return err; + + if (out->hdr.status) + return mlx5_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_qp_query); + +int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn) +{ + struct mlx5_alloc_xrcd_mbox_in in; + struct mlx5_alloc_xrcd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + else + *xrcdn = be32_to_cpu(out.xrcdn); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc); + +int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn) +{ + struct mlx5_dealloc_xrcd_mbox_in in; + struct mlx5_dealloc_xrcd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD); + in.xrcdn = cpu_to_be32(xrcdn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc); + +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, + u8 flags, int error) +{ + struct mlx5_page_fault_resume_mbox_in in; + struct mlx5_page_fault_resume_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME); + in.hdr.opmod = 0; + flags &= (MLX5_PAGE_FAULT_RESUME_REQUESTOR | + MLX5_PAGE_FAULT_RESUME_WRITE | + MLX5_PAGE_FAULT_RESUME_RDMA); + flags |= (error ? MLX5_PAGE_FAULT_RESUME_ERROR : 0); + in.flags_qpn = cpu_to_be32((qpn & MLX5_QPN_MASK) | + (flags << MLX5_QPN_BITS)); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume); +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c new file mode 100644 index 000000000..f9d25dcd0 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include "mlx5_core.h" + +void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type) +{ + struct mlx5_srq_table *table = &dev->priv.srq_table; + struct mlx5_core_srq *srq; + + spin_lock(&table->lock); + + srq = radix_tree_lookup(&table->tree, srqn); + if (srq) + atomic_inc(&srq->refcount); + + spin_unlock(&table->lock); + + if (!srq) { + mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn); + return; + } + + srq->event(srq, event_type); + + if (atomic_dec_and_test(&srq->refcount)) + complete(&srq->free); +} + +struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn) +{ + struct mlx5_srq_table *table = &dev->priv.srq_table; + struct mlx5_core_srq *srq; + + spin_lock(&table->lock); + + srq = radix_tree_lookup(&table->tree, srqn); + if (srq) + atomic_inc(&srq->refcount); + + spin_unlock(&table->lock); + + return srq; +} +EXPORT_SYMBOL(mlx5_core_get_srq); + +int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, int inlen) +{ + struct mlx5_create_srq_mbox_out out; + struct mlx5_srq_table *table = &dev->priv.srq_table; + struct mlx5_destroy_srq_mbox_in din; + struct mlx5_destroy_srq_mbox_out dout; + int err; + + memset(&out, 0, sizeof(out)); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ); + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + srq->srqn = be32_to_cpu(out.srqn) & 0xffffff; + + atomic_set(&srq->refcount, 1); + init_completion(&srq->free); + + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, srq->srqn, srq); + spin_unlock_irq(&table->lock); + if (err) { + mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn); + goto err_cmd; + } + + return 0; + +err_cmd: + memset(&din, 0, sizeof(din)); + memset(&dout, 0, sizeof(dout)); + din.srqn = cpu_to_be32(srq->srqn); + din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); + mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); + return err; +} +EXPORT_SYMBOL(mlx5_core_create_srq); + +int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) +{ + struct mlx5_destroy_srq_mbox_in in; + struct mlx5_destroy_srq_mbox_out out; + struct mlx5_srq_table *table = &dev->priv.srq_table; + struct mlx5_core_srq *tmp; + int err; + + spin_lock_irq(&table->lock); + tmp = radix_tree_delete(&table->tree, srq->srqn); + spin_unlock_irq(&table->lock); + if (!tmp) { + mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn); + return -EINVAL; + } + if (tmp != srq) { + mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn); + return -EINVAL; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); + in.srqn = cpu_to_be32(srq->srqn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + if (atomic_dec_and_test(&srq->refcount)) + complete(&srq->free); + wait_for_completion(&srq->free); + + return 0; +} +EXPORT_SYMBOL(mlx5_core_destroy_srq); + +int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_query_srq_mbox_out *out) +{ + struct mlx5_query_srq_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, sizeof(*out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ); + in.srqn = cpu_to_be32(srq->srqn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + return err; + + if (out->hdr.status) + return mlx5_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_query_srq); + +int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + u16 lwm, int is_srq) +{ + struct mlx5_arm_srq_mbox_in in; + struct mlx5_arm_srq_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ); + in.hdr.opmod = cpu_to_be16(!!is_srq); + in.srqn = cpu_to_be32(srq->srqn); + in.lwm = cpu_to_be16(lwm); + + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(mlx5_core_arm_srq); + +void mlx5_init_srq_table(struct mlx5_core_dev *dev) +{ + struct mlx5_srq_table *table = &dev->priv.srq_table; + + spin_lock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); +} + +void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev) +{ + /* nothing */ +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c new file mode 100644 index 000000000..5a89bb1d6 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include "mlx5_core.h" + +enum { + NUM_DRIVER_UARS = 4, + NUM_LOW_LAT_UUARS = 4, +}; + + +struct mlx5_alloc_uar_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_alloc_uar_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 uarn; + u8 rsvd[4]; +}; + +struct mlx5_free_uar_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 uarn; + u8 rsvd[4]; +}; + +struct mlx5_free_uar_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn) +{ + struct mlx5_alloc_uar_mbox_in in; + struct mlx5_alloc_uar_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_UAR); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + goto ex; + + if (out.hdr.status) { + err = mlx5_cmd_status_to_err(&out.hdr); + goto ex; + } + + *uarn = be32_to_cpu(out.uarn) & 0xffffff; + +ex: + return err; +} +EXPORT_SYMBOL(mlx5_cmd_alloc_uar); + +int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn) +{ + struct mlx5_free_uar_mbox_in in; + struct mlx5_free_uar_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_UAR); + in.uarn = cpu_to_be32(uarn); + err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + goto ex; + + if (out.hdr.status) + err = mlx5_cmd_status_to_err(&out.hdr); + +ex: + return err; +} +EXPORT_SYMBOL(mlx5_cmd_free_uar); + +static int need_uuar_lock(int uuarn) +{ + int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; + + if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS) + return 0; + + return 1; +} + +int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) +{ + int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE; + struct mlx5_bf *bf; + phys_addr_t addr; + int err; + int i; + + uuari->num_uars = NUM_DRIVER_UARS; + uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS; + + mutex_init(&uuari->lock); + uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL); + if (!uuari->uars) + return -ENOMEM; + + uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL); + if (!uuari->bfs) { + err = -ENOMEM; + goto out_uars; + } + + uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap), + GFP_KERNEL); + if (!uuari->bitmap) { + err = -ENOMEM; + goto out_bfs; + } + + uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL); + if (!uuari->count) { + err = -ENOMEM; + goto out_bitmap; + } + + for (i = 0; i < uuari->num_uars; i++) { + err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index); + if (err) + goto out_count; + + addr = dev->iseg_base + ((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT); + uuari->uars[i].map = ioremap(addr, PAGE_SIZE); + if (!uuari->uars[i].map) { + mlx5_cmd_free_uar(dev, uuari->uars[i].index); + err = -ENOMEM; + goto out_count; + } + mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n", + uuari->uars[i].index, uuari->uars[i].map); + } + + for (i = 0; i < tot_uuars; i++) { + bf = &uuari->bfs[i]; + + bf->buf_size = dev->caps.gen.bf_reg_size / 2; + bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE]; + bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map; + bf->reg = NULL; /* Add WC support */ + bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.gen.bf_reg_size + + MLX5_BF_OFFSET; + bf->need_lock = need_uuar_lock(i); + spin_lock_init(&bf->lock); + spin_lock_init(&bf->lock32); + bf->uuarn = i; + } + + return 0; + +out_count: + for (i--; i >= 0; i--) { + iounmap(uuari->uars[i].map); + mlx5_cmd_free_uar(dev, uuari->uars[i].index); + } + kfree(uuari->count); + +out_bitmap: + kfree(uuari->bitmap); + +out_bfs: + kfree(uuari->bfs); + +out_uars: + kfree(uuari->uars); + return err; +} + +int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) +{ + int i = uuari->num_uars; + + for (i--; i >= 0; i--) { + iounmap(uuari->uars[i].map); + mlx5_cmd_free_uar(dev, uuari->uars[i].index); + } + + kfree(uuari->count); + kfree(uuari->bitmap); + kfree(uuari->bfs); + kfree(uuari->uars); + + return 0; +} diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig new file mode 100644 index 000000000..d16b11ed2 --- /dev/null +++ b/drivers/net/ethernet/micrel/Kconfig @@ -0,0 +1,66 @@ +# +# Micrel device configuration +# + +config NET_VENDOR_MICREL + bool "Micrel devices" + default y + depends on (HAS_IOMEM && DMA_ENGINE) || SPI || PCI || HAS_IOMEM || \ + (ARM && ARCH_KS8695) + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Micrel devices. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_MICREL + +config ARM_KS8695_ETHER + tristate "KS8695 Ethernet support" + depends on ARM && ARCH_KS8695 + select MII + ---help--- + If you wish to compile a kernel for the KS8695 and want to + use the internal ethernet then you should answer Y to this. + +config KS8842 + tristate "Micrel KSZ8841/42 with generic bus interface" + depends on HAS_IOMEM && DMA_ENGINE + ---help--- + This platform driver is for KSZ8841(1-port) / KS8842(2-port) + ethernet switch chip (managed, VLAN, QoS) from Micrel or + Timberdale(FPGA). + +config KS8851 + tristate "Micrel KS8851 SPI" + depends on SPI + select MII + select CRC32 + select EEPROM_93CX6 + ---help--- + SPI driver for Micrel KS8851 SPI attached network chip. + +config KS8851_MLL + tristate "Micrel KS8851 MLL" + depends on HAS_IOMEM + select MII + ---help--- + This platform driver is for Micrel KS8851 Address/data bus + multiplexed network chip. + +config KSZ884X_PCI + tristate "Micrel KSZ8841/2 PCI" + depends on PCI + select MII + select CRC32 + ---help--- + This PCI driver is for Micrel KSZ8841/KSZ8842 PCI Ethernet chip. + + To compile this driver as a module, choose M here. The module + will be called ksz884x. + +endif # NET_VENDOR_MICREL diff --git a/drivers/net/ethernet/micrel/Makefile b/drivers/net/ethernet/micrel/Makefile new file mode 100644 index 000000000..c83e4bc50 --- /dev/null +++ b/drivers/net/ethernet/micrel/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for the Micrel network device drivers. +# + +obj-$(CONFIG_ARM_KS8695_ETHER) += ks8695net.o +obj-$(CONFIG_KS8842) += ks8842.o +obj-$(CONFIG_KS8851) += ks8851.o +obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o +obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c new file mode 100644 index 000000000..a8522d8af --- /dev/null +++ b/drivers/net/ethernet/micrel/ks8695net.c @@ -0,0 +1,1630 @@ +/* + * Micrel KS8695 (Centaur) Ethernet. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Copyright 2008 Simtec Electronics + * Daniel Silverstone + * Vincent Sanders + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include "ks8695net.h" + +#define MODULENAME "ks8695_ether" +#define MODULEVERSION "1.02" + +/* + * Transmit and device reset timeout, default 5 seconds. + */ +static int watchdog = 5000; + +/* Hardware structures */ + +/** + * struct rx_ring_desc - Receive descriptor ring element + * @status: The status of the descriptor element (E.g. who owns it) + * @length: The number of bytes in the block pointed to by data_ptr + * @data_ptr: The physical address of the data block to receive into + * @next_desc: The physical address of the next descriptor element. + */ +struct rx_ring_desc { + __le32 status; + __le32 length; + __le32 data_ptr; + __le32 next_desc; +}; + +/** + * struct tx_ring_desc - Transmit descriptor ring element + * @owner: Who owns the descriptor + * @status: The number of bytes in the block pointed to by data_ptr + * @data_ptr: The physical address of the data block to receive into + * @next_desc: The physical address of the next descriptor element. + */ +struct tx_ring_desc { + __le32 owner; + __le32 status; + __le32 data_ptr; + __le32 next_desc; +}; + +/** + * struct ks8695_skbuff - sk_buff wrapper for rx/tx rings. + * @skb: The buffer in the ring + * @dma_ptr: The mapped DMA pointer of the buffer + * @length: The number of bytes mapped to dma_ptr + */ +struct ks8695_skbuff { + struct sk_buff *skb; + dma_addr_t dma_ptr; + u32 length; +}; + +/* Private device structure */ + +#define MAX_TX_DESC 8 +#define MAX_TX_DESC_MASK 0x7 +#define MAX_RX_DESC 16 +#define MAX_RX_DESC_MASK 0xf + +/*napi_weight have better more than rx DMA buffers*/ +#define NAPI_WEIGHT 64 + +#define MAX_RXBUF_SIZE 0x700 + +#define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC) +#define RX_RING_DMA_SIZE (sizeof(struct rx_ring_desc) * MAX_RX_DESC) +#define RING_DMA_SIZE (TX_RING_DMA_SIZE + RX_RING_DMA_SIZE) + +/** + * enum ks8695_dtype - Device type + * @KS8695_DTYPE_WAN: This device is a WAN interface + * @KS8695_DTYPE_LAN: This device is a LAN interface + * @KS8695_DTYPE_HPNA: This device is an HPNA interface + */ +enum ks8695_dtype { + KS8695_DTYPE_WAN, + KS8695_DTYPE_LAN, + KS8695_DTYPE_HPNA, +}; + +/** + * struct ks8695_priv - Private data for the KS8695 Ethernet + * @in_suspend: Flag to indicate if we're suspending/resuming + * @ndev: The net_device for this interface + * @dev: The platform device object for this interface + * @dtype: The type of this device + * @io_regs: The ioremapped registers for this interface + * @napi : Add support NAPI for Rx + * @rx_irq_name: The textual name of the RX IRQ from the platform data + * @tx_irq_name: The textual name of the TX IRQ from the platform data + * @link_irq_name: The textual name of the link IRQ from the + * platform data if available + * @rx_irq: The IRQ number for the RX IRQ + * @tx_irq: The IRQ number for the TX IRQ + * @link_irq: The IRQ number for the link IRQ if available + * @regs_req: The resource request for the registers region + * @phyiface_req: The resource request for the phy/switch region + * if available + * @phyiface_regs: The ioremapped registers for the phy/switch if available + * @ring_base: The base pointer of the dma coherent memory for the rings + * @ring_base_dma: The DMA mapped equivalent of ring_base + * @tx_ring: The pointer in ring_base of the TX ring + * @tx_ring_used: The number of slots in the TX ring which are occupied + * @tx_ring_next_slot: The next slot to fill in the TX ring + * @tx_ring_dma: The DMA mapped equivalent of tx_ring + * @tx_buffers: The sk_buff mappings for the TX ring + * @txq_lock: A lock to protect the tx_buffers tx_ring_used etc variables + * @rx_ring: The pointer in ring_base of the RX ring + * @rx_ring_dma: The DMA mapped equivalent of rx_ring + * @rx_buffers: The sk_buff mappings for the RX ring + * @next_rx_desc_read: The next RX descriptor to read from on IRQ + * @rx_lock: A lock to protect Rx irq function + * @msg_enable: The flags for which messages to emit + */ +struct ks8695_priv { + int in_suspend; + struct net_device *ndev; + struct device *dev; + enum ks8695_dtype dtype; + void __iomem *io_regs; + + struct napi_struct napi; + + const char *rx_irq_name, *tx_irq_name, *link_irq_name; + int rx_irq, tx_irq, link_irq; + + struct resource *regs_req, *phyiface_req; + void __iomem *phyiface_regs; + + void *ring_base; + dma_addr_t ring_base_dma; + + struct tx_ring_desc *tx_ring; + int tx_ring_used; + int tx_ring_next_slot; + dma_addr_t tx_ring_dma; + struct ks8695_skbuff tx_buffers[MAX_TX_DESC]; + spinlock_t txq_lock; + + struct rx_ring_desc *rx_ring; + dma_addr_t rx_ring_dma; + struct ks8695_skbuff rx_buffers[MAX_RX_DESC]; + int next_rx_desc_read; + spinlock_t rx_lock; + + int msg_enable; +}; + +/* Register access */ + +/** + * ks8695_readreg - Read from a KS8695 ethernet register + * @ksp: The device to read from + * @reg: The register to read + */ +static inline u32 +ks8695_readreg(struct ks8695_priv *ksp, int reg) +{ + return readl(ksp->io_regs + reg); +} + +/** + * ks8695_writereg - Write to a KS8695 ethernet register + * @ksp: The device to write to + * @reg: The register to write + * @value: The value to write to the register + */ +static inline void +ks8695_writereg(struct ks8695_priv *ksp, int reg, u32 value) +{ + writel(value, ksp->io_regs + reg); +} + +/* Utility functions */ + +/** + * ks8695_port_type - Retrieve port-type as user-friendly string + * @ksp: The device to return the type for + * + * Returns a string indicating which of the WAN, LAN or HPNA + * ports this device is likely to represent. + */ +static const char * +ks8695_port_type(struct ks8695_priv *ksp) +{ + switch (ksp->dtype) { + case KS8695_DTYPE_LAN: + return "LAN"; + case KS8695_DTYPE_WAN: + return "WAN"; + case KS8695_DTYPE_HPNA: + return "HPNA"; + } + + return "UNKNOWN"; +} + +/** + * ks8695_update_mac - Update the MAC registers in the device + * @ksp: The device to update + * + * Updates the MAC registers in the KS8695 device from the address in the + * net_device structure associated with this interface. + */ +static void +ks8695_update_mac(struct ks8695_priv *ksp) +{ + /* Update the HW with the MAC from the net_device */ + struct net_device *ndev = ksp->ndev; + u32 machigh, maclow; + + maclow = ((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) | + (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5] << 0)); + machigh = ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1] << 0)); + + ks8695_writereg(ksp, KS8695_MAL, maclow); + ks8695_writereg(ksp, KS8695_MAH, machigh); + +} + +/** + * ks8695_refill_rxbuffers - Re-fill the RX buffer ring + * @ksp: The device to refill + * + * Iterates the RX ring of the device looking for empty slots. + * For each empty slot, we allocate and map a new SKB and give it + * to the hardware. + * This can be called from interrupt context safely. + */ +static void +ks8695_refill_rxbuffers(struct ks8695_priv *ksp) +{ + /* Run around the RX ring, filling in any missing sk_buff's */ + int buff_n; + + for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) { + if (!ksp->rx_buffers[buff_n].skb) { + struct sk_buff *skb = + netdev_alloc_skb(ksp->ndev, MAX_RXBUF_SIZE); + dma_addr_t mapping; + + ksp->rx_buffers[buff_n].skb = skb; + if (skb == NULL) { + /* Failed to allocate one, perhaps + * we'll try again later. + */ + break; + } + + mapping = dma_map_single(ksp->dev, skb->data, + MAX_RXBUF_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(ksp->dev, mapping))) { + /* Failed to DMA map this SKB, try later */ + dev_kfree_skb_irq(skb); + ksp->rx_buffers[buff_n].skb = NULL; + break; + } + ksp->rx_buffers[buff_n].dma_ptr = mapping; + ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE; + + /* Record this into the DMA ring */ + ksp->rx_ring[buff_n].data_ptr = cpu_to_le32(mapping); + ksp->rx_ring[buff_n].length = + cpu_to_le32(MAX_RXBUF_SIZE); + + wmb(); + + /* And give ownership over to the hardware */ + ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN); + } + } +} + +/* Maximum number of multicast addresses which the KS8695 HW supports */ +#define KS8695_NR_ADDRESSES 16 + +/** + * ks8695_init_partial_multicast - Init the mcast addr registers + * @ksp: The device to initialise + * @addr: The multicast address list to use + * @nr_addr: The number of addresses in the list + * + * This routine is a helper for ks8695_set_multicast - it writes + * the additional-address registers in the KS8695 ethernet device + * and cleans up any others left behind. + */ +static void +ks8695_init_partial_multicast(struct ks8695_priv *ksp, + struct net_device *ndev) +{ + u32 low, high; + int i; + struct netdev_hw_addr *ha; + + i = 0; + netdev_for_each_mc_addr(ha, ndev) { + /* Ran out of space in chip? */ + BUG_ON(i == KS8695_NR_ADDRESSES); + + low = (ha->addr[2] << 24) | (ha->addr[3] << 16) | + (ha->addr[4] << 8) | (ha->addr[5]); + high = (ha->addr[0] << 8) | (ha->addr[1]); + + ks8695_writereg(ksp, KS8695_AAL_(i), low); + ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high); + i++; + } + + /* Clear the remaining Additional Station Addresses */ + for (; i < KS8695_NR_ADDRESSES; i++) { + ks8695_writereg(ksp, KS8695_AAL_(i), 0); + ks8695_writereg(ksp, KS8695_AAH_(i), 0); + } +} + +/* Interrupt handling */ + +/** + * ks8695_tx_irq - Transmit IRQ handler + * @irq: The IRQ which went off (ignored) + * @dev_id: The net_device for the interrupt + * + * Process the TX ring, clearing out any transmitted slots. + * Allows the net_device to pass us new packets once slots are + * freed. + */ +static irqreturn_t +ks8695_tx_irq(int irq, void *dev_id) +{ + struct net_device *ndev = (struct net_device *)dev_id; + struct ks8695_priv *ksp = netdev_priv(ndev); + int buff_n; + + for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) { + if (ksp->tx_buffers[buff_n].skb && + !(ksp->tx_ring[buff_n].owner & cpu_to_le32(TDES_OWN))) { + rmb(); + /* An SKB which is not owned by HW is present */ + /* Update the stats for the net_device */ + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += ksp->tx_buffers[buff_n].length; + + /* Free the packet from the ring */ + ksp->tx_ring[buff_n].data_ptr = 0; + + /* Free the sk_buff */ + dma_unmap_single(ksp->dev, + ksp->tx_buffers[buff_n].dma_ptr, + ksp->tx_buffers[buff_n].length, + DMA_TO_DEVICE); + dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb); + ksp->tx_buffers[buff_n].skb = NULL; + ksp->tx_ring_used--; + } + } + + netif_wake_queue(ndev); + + return IRQ_HANDLED; +} + +/** + * ks8695_get_rx_enable_bit - Get rx interrupt enable/status bit + * @ksp: Private data for the KS8695 Ethernet + * + * For KS8695 document: + * Interrupt Enable Register (offset 0xE204) + * Bit29 : WAN MAC Receive Interrupt Enable + * Bit16 : LAN MAC Receive Interrupt Enable + * Interrupt Status Register (Offset 0xF208) + * Bit29: WAN MAC Receive Status + * Bit16: LAN MAC Receive Status + * So, this Rx interrupt enable/status bit number is equal + * as Rx IRQ number. + */ +static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp) +{ + return ksp->rx_irq; +} + +/** + * ks8695_rx_irq - Receive IRQ handler + * @irq: The IRQ which went off (ignored) + * @dev_id: The net_device for the interrupt + * + * Inform NAPI that packet reception needs to be scheduled + */ + +static irqreturn_t +ks8695_rx_irq(int irq, void *dev_id) +{ + struct net_device *ndev = (struct net_device *)dev_id; + struct ks8695_priv *ksp = netdev_priv(ndev); + + spin_lock(&ksp->rx_lock); + + if (napi_schedule_prep(&ksp->napi)) { + unsigned long status = readl(KS8695_IRQ_VA + KS8695_INTEN); + unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp); + /*disable rx interrupt*/ + status &= ~mask_bit; + writel(status , KS8695_IRQ_VA + KS8695_INTEN); + __napi_schedule(&ksp->napi); + } + + spin_unlock(&ksp->rx_lock); + return IRQ_HANDLED; +} + +/** + * ks8695_rx - Receive packets called by NAPI poll method + * @ksp: Private data for the KS8695 Ethernet + * @budget: Number of packets allowed to process + */ +static int ks8695_rx(struct ks8695_priv *ksp, int budget) +{ + struct net_device *ndev = ksp->ndev; + struct sk_buff *skb; + int buff_n; + u32 flags; + int pktlen; + int received = 0; + + buff_n = ksp->next_rx_desc_read; + while (received < budget + && ksp->rx_buffers[buff_n].skb + && (!(ksp->rx_ring[buff_n].status & + cpu_to_le32(RDES_OWN)))) { + rmb(); + flags = le32_to_cpu(ksp->rx_ring[buff_n].status); + + /* Found an SKB which we own, this means we + * received a packet + */ + if ((flags & (RDES_FS | RDES_LS)) != + (RDES_FS | RDES_LS)) { + /* This packet is not the first and + * the last segment. Therefore it is + * a "spanning" packet and we can't + * handle it + */ + goto rx_failure; + } + + if (flags & (RDES_ES | RDES_RE)) { + /* It's an error packet */ + ndev->stats.rx_errors++; + if (flags & RDES_TL) + ndev->stats.rx_length_errors++; + if (flags & RDES_RF) + ndev->stats.rx_length_errors++; + if (flags & RDES_CE) + ndev->stats.rx_crc_errors++; + if (flags & RDES_RE) + ndev->stats.rx_missed_errors++; + + goto rx_failure; + } + + pktlen = flags & RDES_FLEN; + pktlen -= 4; /* Drop the CRC */ + + /* Retrieve the sk_buff */ + skb = ksp->rx_buffers[buff_n].skb; + + /* Clear it from the ring */ + ksp->rx_buffers[buff_n].skb = NULL; + ksp->rx_ring[buff_n].data_ptr = 0; + + /* Unmap the SKB */ + dma_unmap_single(ksp->dev, + ksp->rx_buffers[buff_n].dma_ptr, + ksp->rx_buffers[buff_n].length, + DMA_FROM_DEVICE); + + /* Relinquish the SKB to the network layer */ + skb_put(skb, pktlen); + skb->protocol = eth_type_trans(skb, ndev); + netif_receive_skb(skb); + + /* Record stats */ + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += pktlen; + goto rx_finished; + +rx_failure: + /* This ring entry is an error, but we can + * re-use the skb + */ + /* Give the ring entry back to the hardware */ + ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN); +rx_finished: + received++; + buff_n = (buff_n + 1) & MAX_RX_DESC_MASK; + } + + /* And note which RX descriptor we last did */ + ksp->next_rx_desc_read = buff_n; + + /* And refill the buffers */ + ks8695_refill_rxbuffers(ksp); + + /* Kick the RX DMA engine, in case it became suspended */ + ks8695_writereg(ksp, KS8695_DRSC, 0); + + return received; +} + + +/** + * ks8695_poll - Receive packet by NAPI poll method + * @ksp: Private data for the KS8695 Ethernet + * @budget: The remaining number packets for network subsystem + * + * Invoked by the network core when it requests for new + * packets from the driver + */ +static int ks8695_poll(struct napi_struct *napi, int budget) +{ + struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi); + unsigned long work_done; + + unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN); + unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp); + + work_done = ks8695_rx(ksp, budget); + + if (work_done < budget) { + unsigned long flags; + spin_lock_irqsave(&ksp->rx_lock, flags); + __napi_complete(napi); + /*enable rx interrupt*/ + writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN); + spin_unlock_irqrestore(&ksp->rx_lock, flags); + } + return work_done; +} + +/** + * ks8695_link_irq - Link change IRQ handler + * @irq: The IRQ which went off (ignored) + * @dev_id: The net_device for the interrupt + * + * The WAN interface can generate an IRQ when the link changes, + * report this to the net layer and the user. + */ +static irqreturn_t +ks8695_link_irq(int irq, void *dev_id) +{ + struct net_device *ndev = (struct net_device *)dev_id; + struct ks8695_priv *ksp = netdev_priv(ndev); + u32 ctrl; + + ctrl = readl(ksp->phyiface_regs + KS8695_WMC); + if (ctrl & WMC_WLS) { + netif_carrier_on(ndev); + if (netif_msg_link(ksp)) + dev_info(ksp->dev, + "%s: Link is now up (10%sMbps/%s-duplex)\n", + ndev->name, + (ctrl & WMC_WSS) ? "0" : "", + (ctrl & WMC_WDS) ? "Full" : "Half"); + } else { + netif_carrier_off(ndev); + if (netif_msg_link(ksp)) + dev_info(ksp->dev, "%s: Link is now down.\n", + ndev->name); + } + + return IRQ_HANDLED; +} + + +/* KS8695 Device functions */ + +/** + * ks8695_reset - Reset a KS8695 ethernet interface + * @ksp: The interface to reset + * + * Perform an engine reset of the interface and re-program it + * with sensible defaults. + */ +static void +ks8695_reset(struct ks8695_priv *ksp) +{ + int reset_timeout = watchdog; + /* Issue the reset via the TX DMA control register */ + ks8695_writereg(ksp, KS8695_DTXC, DTXC_TRST); + while (reset_timeout--) { + if (!(ks8695_readreg(ksp, KS8695_DTXC) & DTXC_TRST)) + break; + msleep(1); + } + + if (reset_timeout < 0) { + dev_crit(ksp->dev, + "Timeout waiting for DMA engines to reset\n"); + /* And blithely carry on */ + } + + /* Definitely wait long enough before attempting to program + * the engines + */ + msleep(10); + + /* RX: unicast and broadcast */ + ks8695_writereg(ksp, KS8695_DRXC, DRXC_RU | DRXC_RB); + /* TX: pad and add CRC */ + ks8695_writereg(ksp, KS8695_DTXC, DTXC_TEP | DTXC_TAC); +} + +/** + * ks8695_shutdown - Shut down a KS8695 ethernet interface + * @ksp: The interface to shut down + * + * This disables packet RX/TX, cleans up IRQs, drains the rings, + * and basically places the interface into a clean shutdown + * state. + */ +static void +ks8695_shutdown(struct ks8695_priv *ksp) +{ + u32 ctrl; + int buff_n; + + /* Disable packet transmission */ + ctrl = ks8695_readreg(ksp, KS8695_DTXC); + ks8695_writereg(ksp, KS8695_DTXC, ctrl & ~DTXC_TE); + + /* Disable packet reception */ + ctrl = ks8695_readreg(ksp, KS8695_DRXC); + ks8695_writereg(ksp, KS8695_DRXC, ctrl & ~DRXC_RE); + + /* Release the IRQs */ + free_irq(ksp->rx_irq, ksp->ndev); + free_irq(ksp->tx_irq, ksp->ndev); + if (ksp->link_irq != -1) + free_irq(ksp->link_irq, ksp->ndev); + + /* Throw away any pending TX packets */ + for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) { + if (ksp->tx_buffers[buff_n].skb) { + /* Remove this SKB from the TX ring */ + ksp->tx_ring[buff_n].owner = 0; + ksp->tx_ring[buff_n].status = 0; + ksp->tx_ring[buff_n].data_ptr = 0; + + /* Unmap and bin this SKB */ + dma_unmap_single(ksp->dev, + ksp->tx_buffers[buff_n].dma_ptr, + ksp->tx_buffers[buff_n].length, + DMA_TO_DEVICE); + dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb); + ksp->tx_buffers[buff_n].skb = NULL; + } + } + + /* Purge the RX buffers */ + for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) { + if (ksp->rx_buffers[buff_n].skb) { + /* Remove the SKB from the RX ring */ + ksp->rx_ring[buff_n].status = 0; + ksp->rx_ring[buff_n].data_ptr = 0; + + /* Unmap and bin the SKB */ + dma_unmap_single(ksp->dev, + ksp->rx_buffers[buff_n].dma_ptr, + ksp->rx_buffers[buff_n].length, + DMA_FROM_DEVICE); + dev_kfree_skb_irq(ksp->rx_buffers[buff_n].skb); + ksp->rx_buffers[buff_n].skb = NULL; + } + } +} + + +/** + * ks8695_setup_irq - IRQ setup helper function + * @irq: The IRQ number to claim + * @irq_name: The name to give the IRQ claimant + * @handler: The function to call to handle the IRQ + * @ndev: The net_device to pass in as the dev_id argument to the handler + * + * Return 0 on success. + */ +static int +ks8695_setup_irq(int irq, const char *irq_name, + irq_handler_t handler, struct net_device *ndev) +{ + int ret; + + ret = request_irq(irq, handler, IRQF_SHARED, irq_name, ndev); + + if (ret) { + dev_err(&ndev->dev, "failure to request IRQ %d\n", irq); + return ret; + } + + return 0; +} + +/** + * ks8695_init_net - Initialise a KS8695 ethernet interface + * @ksp: The interface to initialise + * + * This routine fills the RX ring, initialises the DMA engines, + * allocates the IRQs and then starts the packet TX and RX + * engines. + */ +static int +ks8695_init_net(struct ks8695_priv *ksp) +{ + int ret; + u32 ctrl; + + ks8695_refill_rxbuffers(ksp); + + /* Initialise the DMA engines */ + ks8695_writereg(ksp, KS8695_RDLB, (u32) ksp->rx_ring_dma); + ks8695_writereg(ksp, KS8695_TDLB, (u32) ksp->tx_ring_dma); + + /* Request the IRQs */ + ret = ks8695_setup_irq(ksp->rx_irq, ksp->rx_irq_name, + ks8695_rx_irq, ksp->ndev); + if (ret) + return ret; + ret = ks8695_setup_irq(ksp->tx_irq, ksp->tx_irq_name, + ks8695_tx_irq, ksp->ndev); + if (ret) + return ret; + if (ksp->link_irq != -1) { + ret = ks8695_setup_irq(ksp->link_irq, ksp->link_irq_name, + ks8695_link_irq, ksp->ndev); + if (ret) + return ret; + } + + /* Set up the ring indices */ + ksp->next_rx_desc_read = 0; + ksp->tx_ring_next_slot = 0; + ksp->tx_ring_used = 0; + + /* Bring up transmission */ + ctrl = ks8695_readreg(ksp, KS8695_DTXC); + /* Enable packet transmission */ + ks8695_writereg(ksp, KS8695_DTXC, ctrl | DTXC_TE); + + /* Bring up the reception */ + ctrl = ks8695_readreg(ksp, KS8695_DRXC); + /* Enable packet reception */ + ks8695_writereg(ksp, KS8695_DRXC, ctrl | DRXC_RE); + /* And start the DMA engine */ + ks8695_writereg(ksp, KS8695_DRSC, 0); + + /* All done */ + return 0; +} + +/** + * ks8695_release_device - HW resource release for KS8695 e-net + * @ksp: The device to be freed + * + * This unallocates io memory regions, dma-coherent regions etc + * which were allocated in ks8695_probe. + */ +static void +ks8695_release_device(struct ks8695_priv *ksp) +{ + /* Unmap the registers */ + iounmap(ksp->io_regs); + if (ksp->phyiface_regs) + iounmap(ksp->phyiface_regs); + + /* And release the request */ + release_resource(ksp->regs_req); + kfree(ksp->regs_req); + if (ksp->phyiface_req) { + release_resource(ksp->phyiface_req); + kfree(ksp->phyiface_req); + } + + /* Free the ring buffers */ + dma_free_coherent(ksp->dev, RING_DMA_SIZE, + ksp->ring_base, ksp->ring_base_dma); +} + +/* Ethtool support */ + +/** + * ks8695_get_msglevel - Get the messages enabled for emission + * @ndev: The network device to read from + */ +static u32 +ks8695_get_msglevel(struct net_device *ndev) +{ + struct ks8695_priv *ksp = netdev_priv(ndev); + + return ksp->msg_enable; +} + +/** + * ks8695_set_msglevel - Set the messages enabled for emission + * @ndev: The network device to configure + * @value: The messages to set for emission + */ +static void +ks8695_set_msglevel(struct net_device *ndev, u32 value) +{ + struct ks8695_priv *ksp = netdev_priv(ndev); + + ksp->msg_enable = value; +} + +/** + * ks8695_wan_get_settings - Get device-specific settings. + * @ndev: The network device to read settings from + * @cmd: The ethtool structure to read into + */ +static int +ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) +{ + struct ks8695_priv *ksp = netdev_priv(ndev); + u32 ctrl; + + /* All ports on the KS8695 support these... */ + cmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_TP | SUPPORTED_MII); + cmd->transceiver = XCVR_INTERNAL; + + cmd->advertising = ADVERTISED_TP | ADVERTISED_MII; + cmd->port = PORT_MII; + cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause); + cmd->phy_address = 0; + + ctrl = readl(ksp->phyiface_regs + KS8695_WMC); + if ((ctrl & WMC_WAND) == 0) { + /* auto-negotiation is enabled */ + cmd->advertising |= ADVERTISED_Autoneg; + if (ctrl & WMC_WANA100F) + cmd->advertising |= ADVERTISED_100baseT_Full; + if (ctrl & WMC_WANA100H) + cmd->advertising |= ADVERTISED_100baseT_Half; + if (ctrl & WMC_WANA10F) + cmd->advertising |= ADVERTISED_10baseT_Full; + if (ctrl & WMC_WANA10H) + cmd->advertising |= ADVERTISED_10baseT_Half; + if (ctrl & WMC_WANAP) + cmd->advertising |= ADVERTISED_Pause; + cmd->autoneg = AUTONEG_ENABLE; + + ethtool_cmd_speed_set(cmd, + (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10); + cmd->duplex = (ctrl & WMC_WDS) ? + DUPLEX_FULL : DUPLEX_HALF; + } else { + /* auto-negotiation is disabled */ + cmd->autoneg = AUTONEG_DISABLE; + + ethtool_cmd_speed_set(cmd, ((ctrl & WMC_WANF100) ? + SPEED_100 : SPEED_10)); + cmd->duplex = (ctrl & WMC_WANFF) ? + DUPLEX_FULL : DUPLEX_HALF; + } + + return 0; +} + +/** + * ks8695_wan_set_settings - Set device-specific settings. + * @ndev: The network device to configure + * @cmd: The settings to configure + */ +static int +ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) +{ + struct ks8695_priv *ksp = netdev_priv(ndev); + u32 ctrl; + + if ((cmd->speed != SPEED_10) && (cmd->speed != SPEED_100)) + return -EINVAL; + if ((cmd->duplex != DUPLEX_HALF) && (cmd->duplex != DUPLEX_FULL)) + return -EINVAL; + if (cmd->port != PORT_MII) + return -EINVAL; + if (cmd->transceiver != XCVR_INTERNAL) + return -EINVAL; + if ((cmd->autoneg != AUTONEG_DISABLE) && + (cmd->autoneg != AUTONEG_ENABLE)) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_ENABLE) { + if ((cmd->advertising & (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full)) == 0) + return -EINVAL; + + ctrl = readl(ksp->phyiface_regs + KS8695_WMC); + + ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H | + WMC_WANA10F | WMC_WANA10H); + if (cmd->advertising & ADVERTISED_100baseT_Full) + ctrl |= WMC_WANA100F; + if (cmd->advertising & ADVERTISED_100baseT_Half) + ctrl |= WMC_WANA100H; + if (cmd->advertising & ADVERTISED_10baseT_Full) + ctrl |= WMC_WANA10F; + if (cmd->advertising & ADVERTISED_10baseT_Half) + ctrl |= WMC_WANA10H; + + /* force a re-negotiation */ + ctrl |= WMC_WANR; + writel(ctrl, ksp->phyiface_regs + KS8695_WMC); + } else { + ctrl = readl(ksp->phyiface_regs + KS8695_WMC); + + /* disable auto-negotiation */ + ctrl |= WMC_WAND; + ctrl &= ~(WMC_WANF100 | WMC_WANFF); + + if (cmd->speed == SPEED_100) + ctrl |= WMC_WANF100; + if (cmd->duplex == DUPLEX_FULL) + ctrl |= WMC_WANFF; + + writel(ctrl, ksp->phyiface_regs + KS8695_WMC); + } + + return 0; +} + +/** + * ks8695_wan_nwayreset - Restart the autonegotiation on the port. + * @ndev: The network device to restart autoneotiation on + */ +static int +ks8695_wan_nwayreset(struct net_device *ndev) +{ + struct ks8695_priv *ksp = netdev_priv(ndev); + u32 ctrl; + + ctrl = readl(ksp->phyiface_regs + KS8695_WMC); + + if ((ctrl & WMC_WAND) == 0) + writel(ctrl | WMC_WANR, + ksp->phyiface_regs + KS8695_WMC); + else + /* auto-negotiation not enabled */ + return -EINVAL; + + return 0; +} + +/** + * ks8695_wan_get_pause - Retrieve network pause/flow-control advertising + * @ndev: The device to retrieve settings from + * @param: The structure to fill out with the information + */ +static void +ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param) +{ + struct ks8695_priv *ksp = netdev_priv(ndev); + u32 ctrl; + + ctrl = readl(ksp->phyiface_regs + KS8695_WMC); + + /* advertise Pause */ + param->autoneg = (ctrl & WMC_WANAP); + + /* current Rx Flow-control */ + ctrl = ks8695_readreg(ksp, KS8695_DRXC); + param->rx_pause = (ctrl & DRXC_RFCE); + + /* current Tx Flow-control */ + ctrl = ks8695_readreg(ksp, KS8695_DTXC); + param->tx_pause = (ctrl & DTXC_TFCE); +} + +/** + * ks8695_get_drvinfo - Retrieve driver information + * @ndev: The network device to retrieve info about + * @info: The info structure to fill out. + */ +static void +ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, MODULENAME, sizeof(info->driver)); + strlcpy(info->version, MODULEVERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(ndev->dev.parent), + sizeof(info->bus_info)); +} + +static const struct ethtool_ops ks8695_ethtool_ops = { + .get_msglevel = ks8695_get_msglevel, + .set_msglevel = ks8695_set_msglevel, + .get_drvinfo = ks8695_get_drvinfo, +}; + +static const struct ethtool_ops ks8695_wan_ethtool_ops = { + .get_msglevel = ks8695_get_msglevel, + .set_msglevel = ks8695_set_msglevel, + .get_settings = ks8695_wan_get_settings, + .set_settings = ks8695_wan_set_settings, + .nway_reset = ks8695_wan_nwayreset, + .get_link = ethtool_op_get_link, + .get_pauseparam = ks8695_wan_get_pause, + .get_drvinfo = ks8695_get_drvinfo, +}; + +/* Network device interface functions */ + +/** + * ks8695_set_mac - Update MAC in net dev and HW + * @ndev: The network device to update + * @addr: The new MAC address to set + */ +static int +ks8695_set_mac(struct net_device *ndev, void *addr) +{ + struct ks8695_priv *ksp = netdev_priv(ndev); + struct sockaddr *address = addr; + + if (!is_valid_ether_addr(address->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len); + + ks8695_update_mac(ksp); + + dev_dbg(ksp->dev, "%s: Updated MAC address to %pM\n", + ndev->name, ndev->dev_addr); + + return 0; +} + +/** + * ks8695_set_multicast - Set up the multicast behaviour of the interface + * @ndev: The net_device to configure + * + * This routine, called by the net layer, configures promiscuity + * and multicast reception behaviour for the interface. + */ +static void +ks8695_set_multicast(struct net_device *ndev) +{ + struct ks8695_priv *ksp = netdev_priv(ndev); + u32 ctrl; + + ctrl = ks8695_readreg(ksp, KS8695_DRXC); + + if (ndev->flags & IFF_PROMISC) { + /* enable promiscuous mode */ + ctrl |= DRXC_RA; + } else if (ndev->flags & ~IFF_PROMISC) { + /* disable promiscuous mode */ + ctrl &= ~DRXC_RA; + } + + if (ndev->flags & IFF_ALLMULTI) { + /* enable all multicast mode */ + ctrl |= DRXC_RM; + } else if (netdev_mc_count(ndev) > KS8695_NR_ADDRESSES) { + /* more specific multicast addresses than can be + * handled in hardware + */ + ctrl |= DRXC_RM; + } else { + /* enable specific multicasts */ + ctrl &= ~DRXC_RM; + ks8695_init_partial_multicast(ksp, ndev); + } + + ks8695_writereg(ksp, KS8695_DRXC, ctrl); +} + +/** + * ks8695_timeout - Handle a network tx/rx timeout. + * @ndev: The net_device which timed out. + * + * A network transaction timed out, reset the device. + */ +static void +ks8695_timeout(struct net_device *ndev) +{ + struct ks8695_priv *ksp = netdev_priv(ndev); + + netif_stop_queue(ndev); + ks8695_shutdown(ksp); + + ks8695_reset(ksp); + + ks8695_update_mac(ksp); + + /* We ignore the return from this since it managed to init + * before it probably will be okay to init again. + */ + ks8695_init_net(ksp); + + /* Reconfigure promiscuity etc */ + ks8695_set_multicast(ndev); + + /* And start the TX queue once more */ + netif_start_queue(ndev); +} + +/** + * ks8695_start_xmit - Start a packet transmission + * @skb: The packet to transmit + * @ndev: The network device to send the packet on + * + * This routine, called by the net layer, takes ownership of the + * sk_buff and adds it to the TX ring. It then kicks the TX DMA + * engine to ensure transmission begins. + */ +static int +ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct ks8695_priv *ksp = netdev_priv(ndev); + int buff_n; + dma_addr_t dmap; + + spin_lock_irq(&ksp->txq_lock); + + if (ksp->tx_ring_used == MAX_TX_DESC) { + /* Somehow we got entered when we have no room */ + spin_unlock_irq(&ksp->txq_lock); + return NETDEV_TX_BUSY; + } + + buff_n = ksp->tx_ring_next_slot; + + BUG_ON(ksp->tx_buffers[buff_n].skb); + + dmap = dma_map_single(ksp->dev, skb->data, skb->len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(ksp->dev, dmap))) { + /* Failed to DMA map this SKB, give it back for now */ + spin_unlock_irq(&ksp->txq_lock); + dev_dbg(ksp->dev, "%s: Could not map DMA memory for "\ + "transmission, trying later\n", ndev->name); + return NETDEV_TX_BUSY; + } + + ksp->tx_buffers[buff_n].dma_ptr = dmap; + /* Mapped okay, store the buffer pointer and length for later */ + ksp->tx_buffers[buff_n].skb = skb; + ksp->tx_buffers[buff_n].length = skb->len; + + /* Fill out the TX descriptor */ + ksp->tx_ring[buff_n].data_ptr = + cpu_to_le32(ksp->tx_buffers[buff_n].dma_ptr); + ksp->tx_ring[buff_n].status = + cpu_to_le32(TDES_IC | TDES_FS | TDES_LS | + (skb->len & TDES_TBS)); + + wmb(); + + /* Hand it over to the hardware */ + ksp->tx_ring[buff_n].owner = cpu_to_le32(TDES_OWN); + + if (++ksp->tx_ring_used == MAX_TX_DESC) + netif_stop_queue(ndev); + + /* Kick the TX DMA in case it decided to go IDLE */ + ks8695_writereg(ksp, KS8695_DTSC, 0); + + /* And update the next ring slot */ + ksp->tx_ring_next_slot = (buff_n + 1) & MAX_TX_DESC_MASK; + + spin_unlock_irq(&ksp->txq_lock); + return NETDEV_TX_OK; +} + +/** + * ks8695_stop - Stop (shutdown) a KS8695 ethernet interface + * @ndev: The net_device to stop + * + * This disables the TX queue and cleans up a KS8695 ethernet + * device. + */ +static int +ks8695_stop(struct net_device *ndev) +{ + struct ks8695_priv *ksp = netdev_priv(ndev); + + netif_stop_queue(ndev); + napi_disable(&ksp->napi); + + ks8695_shutdown(ksp); + + return 0; +} + +/** + * ks8695_open - Open (bring up) a KS8695 ethernet interface + * @ndev: The net_device to open + * + * This resets, configures the MAC, initialises the RX ring and + * DMA engines and starts the TX queue for a KS8695 ethernet + * device. + */ +static int +ks8695_open(struct net_device *ndev) +{ + struct ks8695_priv *ksp = netdev_priv(ndev); + int ret; + + ks8695_reset(ksp); + + ks8695_update_mac(ksp); + + ret = ks8695_init_net(ksp); + if (ret) { + ks8695_shutdown(ksp); + return ret; + } + + napi_enable(&ksp->napi); + netif_start_queue(ndev); + + return 0; +} + +/* Platform device driver */ + +/** + * ks8695_init_switch - Init LAN switch to known good defaults. + * @ksp: The device to initialise + * + * This initialises the LAN switch in the KS8695 to a known-good + * set of defaults. + */ +static void +ks8695_init_switch(struct ks8695_priv *ksp) +{ + u32 ctrl; + + /* Default value for SEC0 according to datasheet */ + ctrl = 0x40819e00; + + /* LED0 = Speed LED1 = Link/Activity */ + ctrl &= ~(SEC0_LLED1S | SEC0_LLED0S); + ctrl |= (LLED0S_LINK | LLED1S_LINK_ACTIVITY); + + /* Enable Switch */ + ctrl |= SEC0_ENABLE; + + writel(ctrl, ksp->phyiface_regs + KS8695_SEC0); + + /* Defaults for SEC1 */ + writel(0x9400100, ksp->phyiface_regs + KS8695_SEC1); +} + +/** + * ks8695_init_wan_phy - Initialise the WAN PHY to sensible defaults + * @ksp: The device to initialise + * + * This initialises a KS8695's WAN phy to sensible values for + * autonegotiation etc. + */ +static void +ks8695_init_wan_phy(struct ks8695_priv *ksp) +{ + u32 ctrl; + + /* Support auto-negotiation */ + ctrl = (WMC_WANAP | WMC_WANA100F | WMC_WANA100H | + WMC_WANA10F | WMC_WANA10H); + + /* LED0 = Activity , LED1 = Link */ + ctrl |= (WLED0S_ACTIVITY | WLED1S_LINK); + + /* Restart Auto-negotiation */ + ctrl |= WMC_WANR; + + writel(ctrl, ksp->phyiface_regs + KS8695_WMC); + + writel(0, ksp->phyiface_regs + KS8695_WPPM); + writel(0, ksp->phyiface_regs + KS8695_PPS); +} + +static const struct net_device_ops ks8695_netdev_ops = { + .ndo_open = ks8695_open, + .ndo_stop = ks8695_stop, + .ndo_start_xmit = ks8695_start_xmit, + .ndo_tx_timeout = ks8695_timeout, + .ndo_set_mac_address = ks8695_set_mac, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = ks8695_set_multicast, +}; + +/** + * ks8695_probe - Probe and initialise a KS8695 ethernet interface + * @pdev: The platform device to probe + * + * Initialise a KS8695 ethernet device from platform data. + * + * This driver requires at least one IORESOURCE_MEM for the + * registers and two IORESOURCE_IRQ for the RX and TX IRQs + * respectively. It can optionally take an additional + * IORESOURCE_MEM for the switch or phy in the case of the lan or + * wan ports, and an IORESOURCE_IRQ for the link IRQ for the wan + * port. + */ +static int +ks8695_probe(struct platform_device *pdev) +{ + struct ks8695_priv *ksp; + struct net_device *ndev; + struct resource *regs_res, *phyiface_res; + struct resource *rxirq_res, *txirq_res, *linkirq_res; + int ret = 0; + int buff_n; + u32 machigh, maclow; + + /* Initialise a net_device */ + ndev = alloc_etherdev(sizeof(struct ks8695_priv)); + if (!ndev) + return -ENOMEM; + + SET_NETDEV_DEV(ndev, &pdev->dev); + + dev_dbg(&pdev->dev, "ks8695_probe() called\n"); + + /* Configure our private structure a little */ + ksp = netdev_priv(ndev); + + ksp->dev = &pdev->dev; + ksp->ndev = ndev; + ksp->msg_enable = NETIF_MSG_LINK; + + /* Retrieve resources */ + regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + phyiface_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + + rxirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + txirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); + linkirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 2); + + if (!(regs_res && rxirq_res && txirq_res)) { + dev_err(ksp->dev, "insufficient resources\n"); + ret = -ENOENT; + goto failure; + } + + ksp->regs_req = request_mem_region(regs_res->start, + resource_size(regs_res), + pdev->name); + + if (!ksp->regs_req) { + dev_err(ksp->dev, "cannot claim register space\n"); + ret = -EIO; + goto failure; + } + + ksp->io_regs = ioremap(regs_res->start, resource_size(regs_res)); + + if (!ksp->io_regs) { + dev_err(ksp->dev, "failed to ioremap registers\n"); + ret = -EINVAL; + goto failure; + } + + if (phyiface_res) { + ksp->phyiface_req = + request_mem_region(phyiface_res->start, + resource_size(phyiface_res), + phyiface_res->name); + + if (!ksp->phyiface_req) { + dev_err(ksp->dev, + "cannot claim switch register space\n"); + ret = -EIO; + goto failure; + } + + ksp->phyiface_regs = ioremap(phyiface_res->start, + resource_size(phyiface_res)); + + if (!ksp->phyiface_regs) { + dev_err(ksp->dev, + "failed to ioremap switch registers\n"); + ret = -EINVAL; + goto failure; + } + } + + ksp->rx_irq = rxirq_res->start; + ksp->rx_irq_name = rxirq_res->name ? rxirq_res->name : "Ethernet RX"; + ksp->tx_irq = txirq_res->start; + ksp->tx_irq_name = txirq_res->name ? txirq_res->name : "Ethernet TX"; + ksp->link_irq = (linkirq_res ? linkirq_res->start : -1); + ksp->link_irq_name = (linkirq_res && linkirq_res->name) ? + linkirq_res->name : "Ethernet Link"; + + /* driver system setup */ + ndev->netdev_ops = &ks8695_netdev_ops; + ndev->watchdog_timeo = msecs_to_jiffies(watchdog); + + netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT); + + /* Retrieve the default MAC addr from the chip. */ + /* The bootloader should have left it in there for us. */ + + machigh = ks8695_readreg(ksp, KS8695_MAH); + maclow = ks8695_readreg(ksp, KS8695_MAL); + + ndev->dev_addr[0] = (machigh >> 8) & 0xFF; + ndev->dev_addr[1] = machigh & 0xFF; + ndev->dev_addr[2] = (maclow >> 24) & 0xFF; + ndev->dev_addr[3] = (maclow >> 16) & 0xFF; + ndev->dev_addr[4] = (maclow >> 8) & 0xFF; + ndev->dev_addr[5] = maclow & 0xFF; + + if (!is_valid_ether_addr(ndev->dev_addr)) + dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please " + "set using ifconfig\n", ndev->name); + + /* In order to be efficient memory-wise, we allocate both + * rings in one go. + */ + ksp->ring_base = dma_alloc_coherent(&pdev->dev, RING_DMA_SIZE, + &ksp->ring_base_dma, GFP_KERNEL); + if (!ksp->ring_base) { + ret = -ENOMEM; + goto failure; + } + + /* Specify the TX DMA ring buffer */ + ksp->tx_ring = ksp->ring_base; + ksp->tx_ring_dma = ksp->ring_base_dma; + + /* And initialise the queue's lock */ + spin_lock_init(&ksp->txq_lock); + spin_lock_init(&ksp->rx_lock); + + /* Specify the RX DMA ring buffer */ + ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE; + ksp->rx_ring_dma = ksp->ring_base_dma + TX_RING_DMA_SIZE; + + /* Zero the descriptor rings */ + memset(ksp->tx_ring, 0, TX_RING_DMA_SIZE); + memset(ksp->rx_ring, 0, RX_RING_DMA_SIZE); + + /* Build the rings */ + for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) { + ksp->tx_ring[buff_n].next_desc = + cpu_to_le32(ksp->tx_ring_dma + + (sizeof(struct tx_ring_desc) * + ((buff_n + 1) & MAX_TX_DESC_MASK))); + } + + for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) { + ksp->rx_ring[buff_n].next_desc = + cpu_to_le32(ksp->rx_ring_dma + + (sizeof(struct rx_ring_desc) * + ((buff_n + 1) & MAX_RX_DESC_MASK))); + } + + /* Initialise the port (physically) */ + if (ksp->phyiface_regs && ksp->link_irq == -1) { + ks8695_init_switch(ksp); + ksp->dtype = KS8695_DTYPE_LAN; + ndev->ethtool_ops = &ks8695_ethtool_ops; + } else if (ksp->phyiface_regs && ksp->link_irq != -1) { + ks8695_init_wan_phy(ksp); + ksp->dtype = KS8695_DTYPE_WAN; + ndev->ethtool_ops = &ks8695_wan_ethtool_ops; + } else { + /* No initialisation since HPNA does not have a PHY */ + ksp->dtype = KS8695_DTYPE_HPNA; + ndev->ethtool_ops = &ks8695_ethtool_ops; + } + + /* And bring up the net_device with the net core */ + platform_set_drvdata(pdev, ndev); + ret = register_netdev(ndev); + + if (ret == 0) { + dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n", + ks8695_port_type(ksp), ndev->dev_addr); + } else { + /* Report the failure to register the net_device */ + dev_err(ksp->dev, "ks8695net: failed to register netdev.\n"); + goto failure; + } + + /* All is well */ + return 0; + + /* Error exit path */ +failure: + ks8695_release_device(ksp); + free_netdev(ndev); + + return ret; +} + +/** + * ks8695_drv_suspend - Suspend a KS8695 ethernet platform device. + * @pdev: The device to suspend + * @state: The suspend state + * + * This routine detaches and shuts down a KS8695 ethernet device. + */ +static int +ks8695_drv_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct ks8695_priv *ksp = netdev_priv(ndev); + + ksp->in_suspend = 1; + + if (netif_running(ndev)) { + netif_device_detach(ndev); + ks8695_shutdown(ksp); + } + + return 0; +} + +/** + * ks8695_drv_resume - Resume a KS8695 ethernet platform device. + * @pdev: The device to resume + * + * This routine re-initialises and re-attaches a KS8695 ethernet + * device. + */ +static int +ks8695_drv_resume(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct ks8695_priv *ksp = netdev_priv(ndev); + + if (netif_running(ndev)) { + ks8695_reset(ksp); + ks8695_init_net(ksp); + ks8695_set_multicast(ndev); + netif_device_attach(ndev); + } + + ksp->in_suspend = 0; + + return 0; +} + +/** + * ks8695_drv_remove - Remove a KS8695 net device on driver unload. + * @pdev: The platform device to remove + * + * This unregisters and releases a KS8695 ethernet device. + */ +static int +ks8695_drv_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct ks8695_priv *ksp = netdev_priv(ndev); + + netif_napi_del(&ksp->napi); + + unregister_netdev(ndev); + ks8695_release_device(ksp); + free_netdev(ndev); + + dev_dbg(&pdev->dev, "released and freed device\n"); + return 0; +} + +static struct platform_driver ks8695_driver = { + .driver = { + .name = MODULENAME, + }, + .probe = ks8695_probe, + .remove = ks8695_drv_remove, + .suspend = ks8695_drv_suspend, + .resume = ks8695_drv_resume, +}; + +module_platform_driver(ks8695_driver); + +MODULE_AUTHOR("Simtec Electronics"); +MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" MODULENAME); + +module_param(watchdog, int, 0400); +MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); diff --git a/drivers/net/ethernet/micrel/ks8695net.h b/drivers/net/ethernet/micrel/ks8695net.h new file mode 100644 index 000000000..80eff6ea5 --- /dev/null +++ b/drivers/net/ethernet/micrel/ks8695net.h @@ -0,0 +1,107 @@ +/* + * Micrel KS8695 (Centaur) Ethernet. + * + * Copyright 2008 Simtec Electronics + * Daniel Silverstone + * Vincent Sanders + */ + +#ifndef KS8695NET_H +#define KS8695NET_H + +/* Receive descriptor flags */ +#define RDES_OWN (1 << 31) /* Ownership */ +#define RDES_FS (1 << 30) /* First Descriptor */ +#define RDES_LS (1 << 29) /* Last Descriptor */ +#define RDES_IPE (1 << 28) /* IP Checksum error */ +#define RDES_TCPE (1 << 27) /* TCP Checksum error */ +#define RDES_UDPE (1 << 26) /* UDP Checksum error */ +#define RDES_ES (1 << 25) /* Error summary */ +#define RDES_MF (1 << 24) /* Multicast Frame */ +#define RDES_RE (1 << 19) /* MII Error reported */ +#define RDES_TL (1 << 18) /* Frame too Long */ +#define RDES_RF (1 << 17) /* Runt Frame */ +#define RDES_CE (1 << 16) /* CRC error */ +#define RDES_FT (1 << 15) /* Frame Type */ +#define RDES_FLEN (0x7ff) /* Frame Length */ + +#define RDES_RER (1 << 25) /* Receive End of Ring */ +#define RDES_RBS (0x7ff) /* Receive Buffer Size */ + +/* Transmit descriptor flags */ + +#define TDES_OWN (1 << 31) /* Ownership */ + +#define TDES_IC (1 << 31) /* Interrupt on Completion */ +#define TDES_FS (1 << 30) /* First Segment */ +#define TDES_LS (1 << 29) /* Last Segment */ +#define TDES_IPCKG (1 << 28) /* IP Checksum generate */ +#define TDES_TCPCKG (1 << 27) /* TCP Checksum generate */ +#define TDES_UDPCKG (1 << 26) /* UDP Checksum generate */ +#define TDES_TER (1 << 25) /* Transmit End of Ring */ +#define TDES_TBS (0x7ff) /* Transmit Buffer Size */ + +/* + * Network controller register offsets + */ +#define KS8695_DTXC (0x00) /* DMA Transmit Control */ +#define KS8695_DRXC (0x04) /* DMA Receive Control */ +#define KS8695_DTSC (0x08) /* DMA Transmit Start Command */ +#define KS8695_DRSC (0x0c) /* DMA Receive Start Command */ +#define KS8695_TDLB (0x10) /* Transmit Descriptor List + * Base Address + */ +#define KS8695_RDLB (0x14) /* Receive Descriptor List + * Base Address + */ +#define KS8695_MAL (0x18) /* MAC Station Address Low */ +#define KS8695_MAH (0x1c) /* MAC Station Address High */ +#define KS8695_AAL_(n) (0x80 + ((n)*8)) /* MAC Additional + * Station Address + * (0..15) Low + */ +#define KS8695_AAH_(n) (0x84 + ((n)*8)) /* MAC Additional + * Station Address + * (0..15) High + */ + + +/* DMA Transmit Control Register */ +#define DTXC_TRST (1 << 31) /* Soft Reset */ +#define DTXC_TBS (0x3f << 24) /* Transmit Burst Size */ +#define DTXC_TUCG (1 << 18) /* Transmit UDP + * Checksum Generate + */ +#define DTXC_TTCG (1 << 17) /* Transmit TCP + * Checksum Generate + */ +#define DTXC_TICG (1 << 16) /* Transmit IP + * Checksum Generate + */ +#define DTXC_TFCE (1 << 9) /* Transmit Flow + * Control Enable + */ +#define DTXC_TLB (1 << 8) /* Loopback mode */ +#define DTXC_TEP (1 << 2) /* Transmit Enable Padding */ +#define DTXC_TAC (1 << 1) /* Transmit Add CRC */ +#define DTXC_TE (1 << 0) /* TX Enable */ + +/* DMA Receive Control Register */ +#define DRXC_RBS (0x3f << 24) /* Receive Burst Size */ +#define DRXC_RUCC (1 << 18) /* Receive UDP Checksum check */ +#define DRXC_RTCG (1 << 17) /* Receive TCP Checksum check */ +#define DRXC_RICG (1 << 16) /* Receive IP Checksum check */ +#define DRXC_RFCE (1 << 9) /* Receive Flow Control + * Enable + */ +#define DRXC_RB (1 << 6) /* Receive Broadcast */ +#define DRXC_RM (1 << 5) /* Receive Multicast */ +#define DRXC_RU (1 << 4) /* Receive Unicast */ +#define DRXC_RERR (1 << 3) /* Receive Error Frame */ +#define DRXC_RA (1 << 2) /* Receive All */ +#define DRXC_RE (1 << 0) /* RX Enable */ + +/* Additional Station Address High */ +#define AAH_E (1 << 31) /* Address Enabled */ + +#endif /* KS8695NET_H */ diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c new file mode 100644 index 000000000..f78909a00 --- /dev/null +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -0,0 +1,1269 @@ +/* + * ks8842.c timberdale KS8842 ethernet driver + * Copyright (c) 2009 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* Supports: + * The Micrel KS8842 behind the timberdale FPGA + * The genuine Micrel KS8841/42 device with ISA 16/32bit bus interface + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "ks8842" + +/* Timberdale specific Registers */ +#define REG_TIMB_RST 0x1c +#define REG_TIMB_FIFO 0x20 +#define REG_TIMB_ISR 0x24 +#define REG_TIMB_IER 0x28 +#define REG_TIMB_IAR 0x2C +#define REQ_TIMB_DMA_RESUME 0x30 + +/* KS8842 registers */ + +#define REG_SELECT_BANK 0x0e + +/* bank 0 registers */ +#define REG_QRFCR 0x04 + +/* bank 2 registers */ +#define REG_MARL 0x00 +#define REG_MARM 0x02 +#define REG_MARH 0x04 + +/* bank 3 registers */ +#define REG_GRR 0x06 + +/* bank 16 registers */ +#define REG_TXCR 0x00 +#define REG_TXSR 0x02 +#define REG_RXCR 0x04 +#define REG_TXMIR 0x08 +#define REG_RXMIR 0x0A + +/* bank 17 registers */ +#define REG_TXQCR 0x00 +#define REG_RXQCR 0x02 +#define REG_TXFDPR 0x04 +#define REG_RXFDPR 0x06 +#define REG_QMU_DATA_LO 0x08 +#define REG_QMU_DATA_HI 0x0A + +/* bank 18 registers */ +#define REG_IER 0x00 +#define IRQ_LINK_CHANGE 0x8000 +#define IRQ_TX 0x4000 +#define IRQ_RX 0x2000 +#define IRQ_RX_OVERRUN 0x0800 +#define IRQ_TX_STOPPED 0x0200 +#define IRQ_RX_STOPPED 0x0100 +#define IRQ_RX_ERROR 0x0080 +#define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \ + IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR) +/* When running via timberdale in DMA mode, the RX interrupt should be + enabled in the KS8842, but not in the FPGA IP, since the IP handles + RX DMA internally. + TX interrupts are not needed it is handled by the FPGA the driver is + notified via DMA callbacks. +*/ +#define ENABLED_IRQS_DMA_IP (IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \ + IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR) +#define ENABLED_IRQS_DMA (ENABLED_IRQS_DMA_IP | IRQ_RX) +#define REG_ISR 0x02 +#define REG_RXSR 0x04 +#define RXSR_VALID 0x8000 +#define RXSR_BROADCAST 0x80 +#define RXSR_MULTICAST 0x40 +#define RXSR_UNICAST 0x20 +#define RXSR_FRAMETYPE 0x08 +#define RXSR_TOO_LONG 0x04 +#define RXSR_RUNT 0x02 +#define RXSR_CRC_ERROR 0x01 +#define RXSR_ERROR (RXSR_TOO_LONG | RXSR_RUNT | RXSR_CRC_ERROR) + +/* bank 32 registers */ +#define REG_SW_ID_AND_ENABLE 0x00 +#define REG_SGCR1 0x02 +#define REG_SGCR2 0x04 +#define REG_SGCR3 0x06 + +/* bank 39 registers */ +#define REG_MACAR1 0x00 +#define REG_MACAR2 0x02 +#define REG_MACAR3 0x04 + +/* bank 45 registers */ +#define REG_P1MBCR 0x00 +#define REG_P1MBSR 0x02 + +/* bank 46 registers */ +#define REG_P2MBCR 0x00 +#define REG_P2MBSR 0x02 + +/* bank 48 registers */ +#define REG_P1CR2 0x02 + +/* bank 49 registers */ +#define REG_P1CR4 0x02 +#define REG_P1SR 0x04 + +/* flags passed by platform_device for configuration */ +#define MICREL_KS884X 0x01 /* 0=Timeberdale(FPGA), 1=Micrel */ +#define KS884X_16BIT 0x02 /* 1=16bit, 0=32bit */ + +#define DMA_BUFFER_SIZE 2048 + +struct ks8842_tx_dma_ctl { + struct dma_chan *chan; + struct dma_async_tx_descriptor *adesc; + void *buf; + struct scatterlist sg; + int channel; +}; + +struct ks8842_rx_dma_ctl { + struct dma_chan *chan; + struct dma_async_tx_descriptor *adesc; + struct sk_buff *skb; + struct scatterlist sg; + struct tasklet_struct tasklet; + int channel; +}; + +#define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \ + ((adapter)->dma_rx.channel != -1)) + +struct ks8842_adapter { + void __iomem *hw_addr; + int irq; + unsigned long conf_flags; /* copy of platform_device config */ + struct tasklet_struct tasklet; + spinlock_t lock; /* spinlock to be interrupt safe */ + struct work_struct timeout_work; + struct net_device *netdev; + struct device *dev; + struct ks8842_tx_dma_ctl dma_tx; + struct ks8842_rx_dma_ctl dma_rx; +}; + +static void ks8842_dma_rx_cb(void *data); +static void ks8842_dma_tx_cb(void *data); + +static inline void ks8842_resume_dma(struct ks8842_adapter *adapter) +{ + iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME); +} + +static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank) +{ + iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK); +} + +static inline void ks8842_write8(struct ks8842_adapter *adapter, u16 bank, + u8 value, int offset) +{ + ks8842_select_bank(adapter, bank); + iowrite8(value, adapter->hw_addr + offset); +} + +static inline void ks8842_write16(struct ks8842_adapter *adapter, u16 bank, + u16 value, int offset) +{ + ks8842_select_bank(adapter, bank); + iowrite16(value, adapter->hw_addr + offset); +} + +static inline void ks8842_enable_bits(struct ks8842_adapter *adapter, u16 bank, + u16 bits, int offset) +{ + u16 reg; + ks8842_select_bank(adapter, bank); + reg = ioread16(adapter->hw_addr + offset); + reg |= bits; + iowrite16(reg, adapter->hw_addr + offset); +} + +static inline void ks8842_clear_bits(struct ks8842_adapter *adapter, u16 bank, + u16 bits, int offset) +{ + u16 reg; + ks8842_select_bank(adapter, bank); + reg = ioread16(adapter->hw_addr + offset); + reg &= ~bits; + iowrite16(reg, adapter->hw_addr + offset); +} + +static inline void ks8842_write32(struct ks8842_adapter *adapter, u16 bank, + u32 value, int offset) +{ + ks8842_select_bank(adapter, bank); + iowrite32(value, adapter->hw_addr + offset); +} + +static inline u8 ks8842_read8(struct ks8842_adapter *adapter, u16 bank, + int offset) +{ + ks8842_select_bank(adapter, bank); + return ioread8(adapter->hw_addr + offset); +} + +static inline u16 ks8842_read16(struct ks8842_adapter *adapter, u16 bank, + int offset) +{ + ks8842_select_bank(adapter, bank); + return ioread16(adapter->hw_addr + offset); +} + +static inline u32 ks8842_read32(struct ks8842_adapter *adapter, u16 bank, + int offset) +{ + ks8842_select_bank(adapter, bank); + return ioread32(adapter->hw_addr + offset); +} + +static void ks8842_reset(struct ks8842_adapter *adapter) +{ + if (adapter->conf_flags & MICREL_KS884X) { + ks8842_write16(adapter, 3, 1, REG_GRR); + msleep(10); + iowrite16(0, adapter->hw_addr + REG_GRR); + } else { + /* The KS8842 goes haywire when doing softare reset + * a work around in the timberdale IP is implemented to + * do a hardware reset instead + ks8842_write16(adapter, 3, 1, REG_GRR); + msleep(10); + iowrite16(0, adapter->hw_addr + REG_GRR); + */ + iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST); + msleep(20); + } +} + +static void ks8842_update_link_status(struct net_device *netdev, + struct ks8842_adapter *adapter) +{ + /* check the status of the link */ + if (ks8842_read16(adapter, 45, REG_P1MBSR) & 0x4) { + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } else { + netif_stop_queue(netdev); + netif_carrier_off(netdev); + } +} + +static void ks8842_enable_tx(struct ks8842_adapter *adapter) +{ + ks8842_enable_bits(adapter, 16, 0x01, REG_TXCR); +} + +static void ks8842_disable_tx(struct ks8842_adapter *adapter) +{ + ks8842_clear_bits(adapter, 16, 0x01, REG_TXCR); +} + +static void ks8842_enable_rx(struct ks8842_adapter *adapter) +{ + ks8842_enable_bits(adapter, 16, 0x01, REG_RXCR); +} + +static void ks8842_disable_rx(struct ks8842_adapter *adapter) +{ + ks8842_clear_bits(adapter, 16, 0x01, REG_RXCR); +} + +static void ks8842_reset_hw(struct ks8842_adapter *adapter) +{ + /* reset the HW */ + ks8842_reset(adapter); + + /* Enable QMU Transmit flow control / transmit padding / Transmit CRC */ + ks8842_write16(adapter, 16, 0x000E, REG_TXCR); + + /* enable the receiver, uni + multi + broadcast + flow ctrl + + crc strip */ + ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80 | 0x400, + REG_RXCR); + + /* TX frame pointer autoincrement */ + ks8842_write16(adapter, 17, 0x4000, REG_TXFDPR); + + /* RX frame pointer autoincrement */ + ks8842_write16(adapter, 17, 0x4000, REG_RXFDPR); + + /* RX 2 kb high watermark */ + ks8842_write16(adapter, 0, 0x1000, REG_QRFCR); + + /* aggressive back off in half duplex */ + ks8842_enable_bits(adapter, 32, 1 << 8, REG_SGCR1); + + /* enable no excessive collison drop */ + ks8842_enable_bits(adapter, 32, 1 << 3, REG_SGCR2); + + /* Enable port 1 force flow control / back pressure / transmit / recv */ + ks8842_write16(adapter, 48, 0x1E07, REG_P1CR2); + + /* restart port auto-negotiation */ + ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4); + + /* Enable the transmitter */ + ks8842_enable_tx(adapter); + + /* Enable the receiver */ + ks8842_enable_rx(adapter); + + /* clear all interrupts */ + ks8842_write16(adapter, 18, 0xffff, REG_ISR); + + /* enable interrupts */ + if (KS8842_USE_DMA(adapter)) { + /* When running in DMA Mode the RX interrupt is not enabled in + timberdale because RX data is received by DMA callbacks + it must still be enabled in the KS8842 because it indicates + to timberdale when there is RX data for it's DMA FIFOs */ + iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER); + ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER); + } else { + if (!(adapter->conf_flags & MICREL_KS884X)) + iowrite16(ENABLED_IRQS, + adapter->hw_addr + REG_TIMB_IER); + ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); + } + /* enable the switch */ + ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE); +} + +static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest) +{ + int i; + u16 mac; + + for (i = 0; i < ETH_ALEN; i++) + dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i); + + if (adapter->conf_flags & MICREL_KS884X) { + /* + the sequence of saving mac addr between MAC and Switch is + different. + */ + + mac = ks8842_read16(adapter, 2, REG_MARL); + ks8842_write16(adapter, 39, mac, REG_MACAR3); + mac = ks8842_read16(adapter, 2, REG_MARM); + ks8842_write16(adapter, 39, mac, REG_MACAR2); + mac = ks8842_read16(adapter, 2, REG_MARH); + ks8842_write16(adapter, 39, mac, REG_MACAR1); + } else { + + /* make sure the switch port uses the same MAC as the QMU */ + mac = ks8842_read16(adapter, 2, REG_MARL); + ks8842_write16(adapter, 39, mac, REG_MACAR1); + mac = ks8842_read16(adapter, 2, REG_MARM); + ks8842_write16(adapter, 39, mac, REG_MACAR2); + mac = ks8842_read16(adapter, 2, REG_MARH); + ks8842_write16(adapter, 39, mac, REG_MACAR3); + } +} + +static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac) +{ + unsigned long flags; + unsigned i; + + spin_lock_irqsave(&adapter->lock, flags); + for (i = 0; i < ETH_ALEN; i++) { + ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i); + if (!(adapter->conf_flags & MICREL_KS884X)) + ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1], + REG_MACAR1 + i); + } + + if (adapter->conf_flags & MICREL_KS884X) { + /* + the sequence of saving mac addr between MAC and Switch is + different. + */ + + u16 mac; + + mac = ks8842_read16(adapter, 2, REG_MARL); + ks8842_write16(adapter, 39, mac, REG_MACAR3); + mac = ks8842_read16(adapter, 2, REG_MARM); + ks8842_write16(adapter, 39, mac, REG_MACAR2); + mac = ks8842_read16(adapter, 2, REG_MARH); + ks8842_write16(adapter, 39, mac, REG_MACAR1); + } + spin_unlock_irqrestore(&adapter->lock, flags); +} + +static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter) +{ + return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff; +} + +static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev) +{ + struct ks8842_adapter *adapter = netdev_priv(netdev); + struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx; + u8 *buf = ctl->buf; + + if (ctl->adesc) { + netdev_dbg(netdev, "%s: TX ongoing\n", __func__); + /* transfer ongoing */ + return NETDEV_TX_BUSY; + } + + sg_dma_len(&ctl->sg) = skb->len + sizeof(u32); + + /* copy data to the TX buffer */ + /* the control word, enable IRQ, port 1 and the length */ + *buf++ = 0x00; + *buf++ = 0x01; /* Port 1 */ + *buf++ = skb->len & 0xff; + *buf++ = (skb->len >> 8) & 0xff; + skb_copy_from_linear_data(skb, buf, skb->len); + + dma_sync_single_range_for_device(adapter->dev, + sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg), + DMA_TO_DEVICE); + + /* make sure the length is a multiple of 4 */ + if (sg_dma_len(&ctl->sg) % 4) + sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; + + ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, + &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); + if (!ctl->adesc) + return NETDEV_TX_BUSY; + + ctl->adesc->callback_param = netdev; + ctl->adesc->callback = ks8842_dma_tx_cb; + ctl->adesc->tx_submit(ctl->adesc); + + netdev->stats.tx_bytes += skb->len; + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct ks8842_adapter *adapter = netdev_priv(netdev); + int len = skb->len; + + netdev_dbg(netdev, "%s: len %u head %p data %p tail %p end %p\n", + __func__, skb->len, skb->head, skb->data, + skb_tail_pointer(skb), skb_end_pointer(skb)); + + /* check FIFO buffer space, we need space for CRC and command bits */ + if (ks8842_tx_fifo_space(adapter) < len + 8) + return NETDEV_TX_BUSY; + + if (adapter->conf_flags & KS884X_16BIT) { + u16 *ptr16 = (u16 *)skb->data; + ks8842_write16(adapter, 17, 0x8000 | 0x100, REG_QMU_DATA_LO); + ks8842_write16(adapter, 17, (u16)len, REG_QMU_DATA_HI); + netdev->stats.tx_bytes += len; + + /* copy buffer */ + while (len > 0) { + iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_LO); + iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_HI); + len -= sizeof(u32); + } + } else { + + u32 *ptr = (u32 *)skb->data; + u32 ctrl; + /* the control word, enable IRQ, port 1 and the length */ + ctrl = 0x8000 | 0x100 | (len << 16); + ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO); + + netdev->stats.tx_bytes += len; + + /* copy buffer */ + while (len > 0) { + iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO); + len -= sizeof(u32); + ptr++; + } + } + + /* enqueue packet */ + ks8842_write16(adapter, 17, 1, REG_TXQCR); + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status) +{ + netdev_dbg(netdev, "RX error, status: %x\n", status); + + netdev->stats.rx_errors++; + if (status & RXSR_TOO_LONG) + netdev->stats.rx_length_errors++; + if (status & RXSR_CRC_ERROR) + netdev->stats.rx_crc_errors++; + if (status & RXSR_RUNT) + netdev->stats.rx_frame_errors++; +} + +static void ks8842_update_rx_counters(struct net_device *netdev, u32 status, + int len) +{ + netdev_dbg(netdev, "RX packet, len: %d\n", len); + + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += len; + if (status & RXSR_MULTICAST) + netdev->stats.multicast++; +} + +static int __ks8842_start_new_rx_dma(struct net_device *netdev) +{ + struct ks8842_adapter *adapter = netdev_priv(netdev); + struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx; + struct scatterlist *sg = &ctl->sg; + int err; + + ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE); + if (ctl->skb) { + sg_init_table(sg, 1); + sg_dma_address(sg) = dma_map_single(adapter->dev, + ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); + err = dma_mapping_error(adapter->dev, sg_dma_address(sg)); + if (unlikely(err)) { + sg_dma_address(sg) = 0; + goto out; + } + + sg_dma_len(sg) = DMA_BUFFER_SIZE; + + ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, + sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); + + if (!ctl->adesc) + goto out; + + ctl->adesc->callback_param = netdev; + ctl->adesc->callback = ks8842_dma_rx_cb; + ctl->adesc->tx_submit(ctl->adesc); + } else { + err = -ENOMEM; + sg_dma_address(sg) = 0; + goto out; + } + + return err; +out: + if (sg_dma_address(sg)) + dma_unmap_single(adapter->dev, sg_dma_address(sg), + DMA_BUFFER_SIZE, DMA_FROM_DEVICE); + sg_dma_address(sg) = 0; + if (ctl->skb) + dev_kfree_skb(ctl->skb); + + ctl->skb = NULL; + + printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err); + return err; +} + +static void ks8842_rx_frame_dma_tasklet(unsigned long arg) +{ + struct net_device *netdev = (struct net_device *)arg; + struct ks8842_adapter *adapter = netdev_priv(netdev); + struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx; + struct sk_buff *skb = ctl->skb; + dma_addr_t addr = sg_dma_address(&ctl->sg); + u32 status; + + ctl->adesc = NULL; + + /* kick next transfer going */ + __ks8842_start_new_rx_dma(netdev); + + /* now handle the data we got */ + dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); + + status = *((u32 *)skb->data); + + netdev_dbg(netdev, "%s - rx_data: status: %x\n", + __func__, status & 0xffff); + + /* check the status */ + if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { + int len = (status >> 16) & 0x7ff; + + ks8842_update_rx_counters(netdev, status, len); + + /* reserve 4 bytes which is the status word */ + skb_reserve(skb, 4); + skb_put(skb, len); + + skb->protocol = eth_type_trans(skb, netdev); + netif_rx(skb); + } else { + ks8842_update_rx_err_counters(netdev, status); + dev_kfree_skb(skb); + } +} + +static void ks8842_rx_frame(struct net_device *netdev, + struct ks8842_adapter *adapter) +{ + u32 status; + int len; + + if (adapter->conf_flags & KS884X_16BIT) { + status = ks8842_read16(adapter, 17, REG_QMU_DATA_LO); + len = ks8842_read16(adapter, 17, REG_QMU_DATA_HI); + netdev_dbg(netdev, "%s - rx_data: status: %x\n", + __func__, status); + } else { + status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO); + len = (status >> 16) & 0x7ff; + status &= 0xffff; + netdev_dbg(netdev, "%s - rx_data: status: %x\n", + __func__, status); + } + + /* check the status */ + if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { + struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len + 3); + + if (skb) { + + ks8842_update_rx_counters(netdev, status, len); + + if (adapter->conf_flags & KS884X_16BIT) { + u16 *data16 = (u16 *)skb_put(skb, len); + ks8842_select_bank(adapter, 17); + while (len > 0) { + *data16++ = ioread16(adapter->hw_addr + + REG_QMU_DATA_LO); + *data16++ = ioread16(adapter->hw_addr + + REG_QMU_DATA_HI); + len -= sizeof(u32); + } + } else { + u32 *data = (u32 *)skb_put(skb, len); + + ks8842_select_bank(adapter, 17); + while (len > 0) { + *data++ = ioread32(adapter->hw_addr + + REG_QMU_DATA_LO); + len -= sizeof(u32); + } + } + skb->protocol = eth_type_trans(skb, netdev); + netif_rx(skb); + } else + netdev->stats.rx_dropped++; + } else + ks8842_update_rx_err_counters(netdev, status); + + /* set high watermark to 3K */ + ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR); + + /* release the frame */ + ks8842_write16(adapter, 17, 0x01, REG_RXQCR); + + /* set high watermark to 2K */ + ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR); +} + +static void ks8842_handle_rx(struct net_device *netdev, + struct ks8842_adapter *adapter) +{ + u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff; + netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data); + while (rx_data) { + ks8842_rx_frame(netdev, adapter); + rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff; + } +} + +static void ks8842_handle_tx(struct net_device *netdev, + struct ks8842_adapter *adapter) +{ + u16 sr = ks8842_read16(adapter, 16, REG_TXSR); + netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr); + netdev->stats.tx_packets++; + if (netif_queue_stopped(netdev)) + netif_wake_queue(netdev); +} + +static void ks8842_handle_rx_overrun(struct net_device *netdev, + struct ks8842_adapter *adapter) +{ + netdev_dbg(netdev, "%s: entry\n", __func__); + netdev->stats.rx_errors++; + netdev->stats.rx_fifo_errors++; +} + +static void ks8842_tasklet(unsigned long arg) +{ + struct net_device *netdev = (struct net_device *)arg; + struct ks8842_adapter *adapter = netdev_priv(netdev); + u16 isr; + unsigned long flags; + u16 entry_bank; + + /* read current bank to be able to set it back */ + spin_lock_irqsave(&adapter->lock, flags); + entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK); + spin_unlock_irqrestore(&adapter->lock, flags); + + isr = ks8842_read16(adapter, 18, REG_ISR); + netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); + + /* when running in DMA mode, do not ack RX interrupts, it is handled + internally by timberdale, otherwise it's DMA FIFO:s would stop + */ + if (KS8842_USE_DMA(adapter)) + isr &= ~IRQ_RX; + + /* Ack */ + ks8842_write16(adapter, 18, isr, REG_ISR); + + if (!(adapter->conf_flags & MICREL_KS884X)) + /* Ack in the timberdale IP as well */ + iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR); + + if (!netif_running(netdev)) + return; + + if (isr & IRQ_LINK_CHANGE) + ks8842_update_link_status(netdev, adapter); + + /* should not get IRQ_RX when running DMA mode */ + if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter)) + ks8842_handle_rx(netdev, adapter); + + /* should only happen when in PIO mode */ + if (isr & IRQ_TX) + ks8842_handle_tx(netdev, adapter); + + if (isr & IRQ_RX_OVERRUN) + ks8842_handle_rx_overrun(netdev, adapter); + + if (isr & IRQ_TX_STOPPED) { + ks8842_disable_tx(adapter); + ks8842_enable_tx(adapter); + } + + if (isr & IRQ_RX_STOPPED) { + ks8842_disable_rx(adapter); + ks8842_enable_rx(adapter); + } + + /* re-enable interrupts, put back the bank selection register */ + spin_lock_irqsave(&adapter->lock, flags); + if (KS8842_USE_DMA(adapter)) + ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER); + else + ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); + iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); + + /* Make sure timberdale continues DMA operations, they are stopped while + we are handling the ks8842 because we might change bank */ + if (KS8842_USE_DMA(adapter)) + ks8842_resume_dma(adapter); + + spin_unlock_irqrestore(&adapter->lock, flags); +} + +static irqreturn_t ks8842_irq(int irq, void *devid) +{ + struct net_device *netdev = devid; + struct ks8842_adapter *adapter = netdev_priv(netdev); + u16 isr; + u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK); + irqreturn_t ret = IRQ_NONE; + + isr = ks8842_read16(adapter, 18, REG_ISR); + netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); + + if (isr) { + if (KS8842_USE_DMA(adapter)) + /* disable all but RX IRQ, since the FPGA relies on it*/ + ks8842_write16(adapter, 18, IRQ_RX, REG_IER); + else + /* disable IRQ */ + ks8842_write16(adapter, 18, 0x00, REG_IER); + + /* schedule tasklet */ + tasklet_schedule(&adapter->tasklet); + + ret = IRQ_HANDLED; + } + + iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); + + /* After an interrupt, tell timberdale to continue DMA operations. + DMA is disabled while we are handling the ks8842 because we might + change bank */ + ks8842_resume_dma(adapter); + + return ret; +} + +static void ks8842_dma_rx_cb(void *data) +{ + struct net_device *netdev = data; + struct ks8842_adapter *adapter = netdev_priv(netdev); + + netdev_dbg(netdev, "RX DMA finished\n"); + /* schedule tasklet */ + if (adapter->dma_rx.adesc) + tasklet_schedule(&adapter->dma_rx.tasklet); +} + +static void ks8842_dma_tx_cb(void *data) +{ + struct net_device *netdev = data; + struct ks8842_adapter *adapter = netdev_priv(netdev); + struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx; + + netdev_dbg(netdev, "TX DMA finished\n"); + + if (!ctl->adesc) + return; + + netdev->stats.tx_packets++; + ctl->adesc = NULL; + + if (netif_queue_stopped(netdev)) + netif_wake_queue(netdev); +} + +static void ks8842_stop_dma(struct ks8842_adapter *adapter) +{ + struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; + struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; + + tx_ctl->adesc = NULL; + if (tx_ctl->chan) + dmaengine_terminate_all(tx_ctl->chan); + + rx_ctl->adesc = NULL; + if (rx_ctl->chan) + dmaengine_terminate_all(rx_ctl->chan); + + if (sg_dma_address(&rx_ctl->sg)) + dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg), + DMA_BUFFER_SIZE, DMA_FROM_DEVICE); + sg_dma_address(&rx_ctl->sg) = 0; + + dev_kfree_skb(rx_ctl->skb); + rx_ctl->skb = NULL; +} + +static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter) +{ + struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; + struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; + + ks8842_stop_dma(adapter); + + if (tx_ctl->chan) + dma_release_channel(tx_ctl->chan); + tx_ctl->chan = NULL; + + if (rx_ctl->chan) + dma_release_channel(rx_ctl->chan); + rx_ctl->chan = NULL; + + tasklet_kill(&rx_ctl->tasklet); + + if (sg_dma_address(&tx_ctl->sg)) + dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg), + DMA_BUFFER_SIZE, DMA_TO_DEVICE); + sg_dma_address(&tx_ctl->sg) = 0; + + kfree(tx_ctl->buf); + tx_ctl->buf = NULL; +} + +static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param) +{ + return chan->chan_id == (long)filter_param; +} + +static int ks8842_alloc_dma_bufs(struct net_device *netdev) +{ + struct ks8842_adapter *adapter = netdev_priv(netdev); + struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; + struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; + int err; + + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + dma_cap_set(DMA_PRIVATE, mask); + + sg_init_table(&tx_ctl->sg, 1); + + tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn, + (void *)(long)tx_ctl->channel); + if (!tx_ctl->chan) { + err = -ENODEV; + goto err; + } + + /* allocate DMA buffer */ + tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL); + if (!tx_ctl->buf) { + err = -ENOMEM; + goto err; + } + + sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev, + tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE); + err = dma_mapping_error(adapter->dev, + sg_dma_address(&tx_ctl->sg)); + if (err) { + sg_dma_address(&tx_ctl->sg) = 0; + goto err; + } + + rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn, + (void *)(long)rx_ctl->channel); + if (!rx_ctl->chan) { + err = -ENODEV; + goto err; + } + + tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet, + (unsigned long)netdev); + + return 0; +err: + ks8842_dealloc_dma_bufs(adapter); + return err; +} + +/* Netdevice operations */ + +static int ks8842_open(struct net_device *netdev) +{ + struct ks8842_adapter *adapter = netdev_priv(netdev); + int err; + + netdev_dbg(netdev, "%s - entry\n", __func__); + + if (KS8842_USE_DMA(adapter)) { + err = ks8842_alloc_dma_bufs(netdev); + + if (!err) { + /* start RX dma */ + err = __ks8842_start_new_rx_dma(netdev); + if (err) + ks8842_dealloc_dma_bufs(adapter); + } + + if (err) { + printk(KERN_WARNING DRV_NAME + ": Failed to initiate DMA, running PIO\n"); + ks8842_dealloc_dma_bufs(adapter); + adapter->dma_rx.channel = -1; + adapter->dma_tx.channel = -1; + } + } + + /* reset the HW */ + ks8842_reset_hw(adapter); + + ks8842_write_mac_addr(adapter, netdev->dev_addr); + + ks8842_update_link_status(netdev, adapter); + + err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME, + netdev); + if (err) { + pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err); + return err; + } + + return 0; +} + +static int ks8842_close(struct net_device *netdev) +{ + struct ks8842_adapter *adapter = netdev_priv(netdev); + + netdev_dbg(netdev, "%s - entry\n", __func__); + + cancel_work_sync(&adapter->timeout_work); + + if (KS8842_USE_DMA(adapter)) + ks8842_dealloc_dma_bufs(adapter); + + /* free the irq */ + free_irq(adapter->irq, netdev); + + /* disable the switch */ + ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE); + + return 0; +} + +static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + int ret; + struct ks8842_adapter *adapter = netdev_priv(netdev); + + netdev_dbg(netdev, "%s: entry\n", __func__); + + if (KS8842_USE_DMA(adapter)) { + unsigned long flags; + ret = ks8842_tx_frame_dma(skb, netdev); + /* for now only allow one transfer at the time */ + spin_lock_irqsave(&adapter->lock, flags); + if (adapter->dma_tx.adesc) + netif_stop_queue(netdev); + spin_unlock_irqrestore(&adapter->lock, flags); + return ret; + } + + ret = ks8842_tx_frame(skb, netdev); + + if (ks8842_tx_fifo_space(adapter) < netdev->mtu + 8) + netif_stop_queue(netdev); + + return ret; +} + +static int ks8842_set_mac(struct net_device *netdev, void *p) +{ + struct ks8842_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + char *mac = (u8 *)addr->sa_data; + + netdev_dbg(netdev, "%s: entry\n", __func__); + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, mac, netdev->addr_len); + + ks8842_write_mac_addr(adapter, mac); + return 0; +} + +static void ks8842_tx_timeout_work(struct work_struct *work) +{ + struct ks8842_adapter *adapter = + container_of(work, struct ks8842_adapter, timeout_work); + struct net_device *netdev = adapter->netdev; + unsigned long flags; + + netdev_dbg(netdev, "%s: entry\n", __func__); + + spin_lock_irqsave(&adapter->lock, flags); + + if (KS8842_USE_DMA(adapter)) + ks8842_stop_dma(adapter); + + /* disable interrupts */ + ks8842_write16(adapter, 18, 0, REG_IER); + ks8842_write16(adapter, 18, 0xFFFF, REG_ISR); + + netif_stop_queue(netdev); + + spin_unlock_irqrestore(&adapter->lock, flags); + + ks8842_reset_hw(adapter); + + ks8842_write_mac_addr(adapter, netdev->dev_addr); + + ks8842_update_link_status(netdev, adapter); + + if (KS8842_USE_DMA(adapter)) + __ks8842_start_new_rx_dma(netdev); +} + +static void ks8842_tx_timeout(struct net_device *netdev) +{ + struct ks8842_adapter *adapter = netdev_priv(netdev); + + netdev_dbg(netdev, "%s: entry\n", __func__); + + schedule_work(&adapter->timeout_work); +} + +static const struct net_device_ops ks8842_netdev_ops = { + .ndo_open = ks8842_open, + .ndo_stop = ks8842_close, + .ndo_start_xmit = ks8842_xmit_frame, + .ndo_set_mac_address = ks8842_set_mac, + .ndo_tx_timeout = ks8842_tx_timeout, + .ndo_validate_addr = eth_validate_addr +}; + +static const struct ethtool_ops ks8842_ethtool_ops = { + .get_link = ethtool_op_get_link, +}; + +static int ks8842_probe(struct platform_device *pdev) +{ + int err = -ENOMEM; + struct resource *iomem; + struct net_device *netdev; + struct ks8842_adapter *adapter; + struct ks8842_platform_data *pdata = dev_get_platdata(&pdev->dev); + u16 id; + unsigned i; + + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME)) + goto err_mem_region; + + netdev = alloc_etherdev(sizeof(struct ks8842_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work); + adapter->hw_addr = ioremap(iomem->start, resource_size(iomem)); + adapter->conf_flags = iomem->flags; + + if (!adapter->hw_addr) + goto err_ioremap; + + adapter->irq = platform_get_irq(pdev, 0); + if (adapter->irq < 0) { + err = adapter->irq; + goto err_get_irq; + } + + adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev; + + /* DMA is only supported when accessed via timberdale */ + if (!(adapter->conf_flags & MICREL_KS884X) && pdata && + (pdata->tx_dma_channel != -1) && + (pdata->rx_dma_channel != -1)) { + adapter->dma_rx.channel = pdata->rx_dma_channel; + adapter->dma_tx.channel = pdata->tx_dma_channel; + } else { + adapter->dma_rx.channel = -1; + adapter->dma_tx.channel = -1; + } + + tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev); + spin_lock_init(&adapter->lock); + + netdev->netdev_ops = &ks8842_netdev_ops; + netdev->ethtool_ops = &ks8842_ethtool_ops; + + /* Check if a mac address was given */ + i = netdev->addr_len; + if (pdata) { + for (i = 0; i < netdev->addr_len; i++) + if (pdata->macaddr[i] != 0) + break; + + if (i < netdev->addr_len) + /* an address was passed, use it */ + memcpy(netdev->dev_addr, pdata->macaddr, + netdev->addr_len); + } + + if (i == netdev->addr_len) { + ks8842_read_mac_addr(adapter, netdev->dev_addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) + eth_hw_addr_random(netdev); + } + + id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + platform_set_drvdata(pdev, netdev); + + pr_info("Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n", + (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7); + + return 0; + +err_register: +err_get_irq: + iounmap(adapter->hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + release_mem_region(iomem->start, resource_size(iomem)); +err_mem_region: + return err; +} + +static int ks8842_remove(struct platform_device *pdev) +{ + struct net_device *netdev = platform_get_drvdata(pdev); + struct ks8842_adapter *adapter = netdev_priv(netdev); + struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + unregister_netdev(netdev); + tasklet_kill(&adapter->tasklet); + iounmap(adapter->hw_addr); + free_netdev(netdev); + release_mem_region(iomem->start, resource_size(iomem)); + return 0; +} + + +static struct platform_driver ks8842_platform_driver = { + .driver = { + .name = DRV_NAME, + }, + .probe = ks8842_probe, + .remove = ks8842_remove, +}; + +module_platform_driver(ks8842_platform_driver); + +MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver"); +MODULE_AUTHOR("Mocean Laboratories "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:ks8842"); + diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c new file mode 100644 index 000000000..66d4ab703 --- /dev/null +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -0,0 +1,1623 @@ +/* drivers/net/ethernet/micrel/ks8851.c + * + * Copyright 2009 Simtec Electronics + * http://www.simtec.co.uk/ + * Ben Dooks + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DEBUG + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "ks8851.h" + +/** + * struct ks8851_rxctrl - KS8851 driver rx control + * @mchash: Multicast hash-table data. + * @rxcr1: KS_RXCR1 register setting + * @rxcr2: KS_RXCR2 register setting + * + * Representation of the settings needs to control the receive filtering + * such as the multicast hash-filter and the receive register settings. This + * is used to make the job of working out if the receive settings change and + * then issuing the new settings to the worker that will send the necessary + * commands. + */ +struct ks8851_rxctrl { + u16 mchash[4]; + u16 rxcr1; + u16 rxcr2; +}; + +/** + * union ks8851_tx_hdr - tx header data + * @txb: The header as bytes + * @txw: The header as 16bit, little-endian words + * + * A dual representation of the tx header data to allow + * access to individual bytes, and to allow 16bit accesses + * with 16bit alignment. + */ +union ks8851_tx_hdr { + u8 txb[6]; + __le16 txw[3]; +}; + +/** + * struct ks8851_net - KS8851 driver private data + * @netdev: The network device we're bound to + * @spidev: The spi device we're bound to. + * @lock: Lock to ensure that the device is not accessed when busy. + * @statelock: Lock on this structure for tx list. + * @mii: The MII state information for the mii calls. + * @rxctrl: RX settings for @rxctrl_work. + * @tx_work: Work queue for tx packets + * @rxctrl_work: Work queue for updating RX mode and multicast lists + * @txq: Queue of packets for transmission. + * @spi_msg1: pre-setup SPI transfer with one message, @spi_xfer1. + * @spi_msg2: pre-setup SPI transfer with two messages, @spi_xfer2. + * @txh: Space for generating packet TX header in DMA-able data + * @rxd: Space for receiving SPI data, in DMA-able space. + * @txd: Space for transmitting SPI data, in DMA-able space. + * @msg_enable: The message flags controlling driver output (see ethtool). + * @fid: Incrementing frame id tag. + * @rc_ier: Cached copy of KS_IER. + * @rc_ccr: Cached copy of KS_CCR. + * @rc_rxqcr: Cached copy of KS_RXQCR. + * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom + * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. + * @vdd_reg: Optional regulator supplying the chip + * @vdd_io: Optional digital power supply for IO + * @gpio: Optional reset_n gpio + * + * The @lock ensures that the chip is protected when certain operations are + * in progress. When the read or write packet transfer is in progress, most + * of the chip registers are not ccessible until the transfer is finished and + * the DMA has been de-asserted. + * + * The @statelock is used to protect information in the structure which may + * need to be accessed via several sources, such as the network driver layer + * or one of the work queues. + * + * We align the buffers we may use for rx/tx to ensure that if the SPI driver + * wants to DMA map them, it will not have any problems with data the driver + * modifies. + */ +struct ks8851_net { + struct net_device *netdev; + struct spi_device *spidev; + struct mutex lock; + spinlock_t statelock; + + union ks8851_tx_hdr txh ____cacheline_aligned; + u8 rxd[8]; + u8 txd[8]; + + u32 msg_enable ____cacheline_aligned; + u16 tx_space; + u8 fid; + + u16 rc_ier; + u16 rc_rxqcr; + u16 rc_ccr; + u16 eeprom_size; + + struct mii_if_info mii; + struct ks8851_rxctrl rxctrl; + + struct work_struct tx_work; + struct work_struct rxctrl_work; + + struct sk_buff_head txq; + + struct spi_message spi_msg1; + struct spi_message spi_msg2; + struct spi_transfer spi_xfer1; + struct spi_transfer spi_xfer2[2]; + + struct eeprom_93cx6 eeprom; + struct regulator *vdd_reg; + struct regulator *vdd_io; + int gpio; +}; + +static int msg_enable; + +/* shift for byte-enable data */ +#define BYTE_EN(_x) ((_x) << 2) + +/* turn register number and byte-enable mask into data for start of packet */ +#define MK_OP(_byteen, _reg) (BYTE_EN(_byteen) | (_reg) << (8+2) | (_reg) >> 6) + +/* SPI register read/write calls. + * + * All these calls issue SPI transactions to access the chip's registers. They + * all require that the necessary lock is held to prevent accesses when the + * chip is busy transferring packet data (RX/TX FIFO accesses). + */ + +/** + * ks8851_wrreg16 - write 16bit register value to chip + * @ks: The chip state + * @reg: The register address + * @val: The value to write + * + * Issue a write to put the value @val into the register specified in @reg. + */ +static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val) +{ + struct spi_transfer *xfer = &ks->spi_xfer1; + struct spi_message *msg = &ks->spi_msg1; + __le16 txb[2]; + int ret; + + txb[0] = cpu_to_le16(MK_OP(reg & 2 ? 0xC : 0x03, reg) | KS_SPIOP_WR); + txb[1] = cpu_to_le16(val); + + xfer->tx_buf = txb; + xfer->rx_buf = NULL; + xfer->len = 4; + + ret = spi_sync(ks->spidev, msg); + if (ret < 0) + netdev_err(ks->netdev, "spi_sync() failed\n"); +} + +/** + * ks8851_wrreg8 - write 8bit register value to chip + * @ks: The chip state + * @reg: The register address + * @val: The value to write + * + * Issue a write to put the value @val into the register specified in @reg. + */ +static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val) +{ + struct spi_transfer *xfer = &ks->spi_xfer1; + struct spi_message *msg = &ks->spi_msg1; + __le16 txb[2]; + int ret; + int bit; + + bit = 1 << (reg & 3); + + txb[0] = cpu_to_le16(MK_OP(bit, reg) | KS_SPIOP_WR); + txb[1] = val; + + xfer->tx_buf = txb; + xfer->rx_buf = NULL; + xfer->len = 3; + + ret = spi_sync(ks->spidev, msg); + if (ret < 0) + netdev_err(ks->netdev, "spi_sync() failed\n"); +} + +/** + * ks8851_rx_1msg - select whether to use one or two messages for spi read + * @ks: The device structure + * + * Return whether to generate a single message with a tx and rx buffer + * supplied to spi_sync(), or alternatively send the tx and rx buffers + * as separate messages. + * + * Depending on the hardware in use, a single message may be more efficient + * on interrupts or work done by the driver. + * + * This currently always returns true until we add some per-device data passed + * from the platform code to specify which mode is better. + */ +static inline bool ks8851_rx_1msg(struct ks8851_net *ks) +{ + return true; +} + +/** + * ks8851_rdreg - issue read register command and return the data + * @ks: The device state + * @op: The register address and byte enables in message format. + * @rxb: The RX buffer to return the result into + * @rxl: The length of data expected. + * + * This is the low level read call that issues the necessary spi message(s) + * to read data from the register specified in @op. + */ +static void ks8851_rdreg(struct ks8851_net *ks, unsigned op, + u8 *rxb, unsigned rxl) +{ + struct spi_transfer *xfer; + struct spi_message *msg; + __le16 *txb = (__le16 *)ks->txd; + u8 *trx = ks->rxd; + int ret; + + txb[0] = cpu_to_le16(op | KS_SPIOP_RD); + + if (ks8851_rx_1msg(ks)) { + msg = &ks->spi_msg1; + xfer = &ks->spi_xfer1; + + xfer->tx_buf = txb; + xfer->rx_buf = trx; + xfer->len = rxl + 2; + } else { + msg = &ks->spi_msg2; + xfer = ks->spi_xfer2; + + xfer->tx_buf = txb; + xfer->rx_buf = NULL; + xfer->len = 2; + + xfer++; + xfer->tx_buf = NULL; + xfer->rx_buf = trx; + xfer->len = rxl; + } + + ret = spi_sync(ks->spidev, msg); + if (ret < 0) + netdev_err(ks->netdev, "read: spi_sync() failed\n"); + else if (ks8851_rx_1msg(ks)) + memcpy(rxb, trx + 2, rxl); + else + memcpy(rxb, trx, rxl); +} + +/** + * ks8851_rdreg8 - read 8 bit register from device + * @ks: The chip information + * @reg: The register address + * + * Read a 8bit register from the chip, returning the result +*/ +static unsigned ks8851_rdreg8(struct ks8851_net *ks, unsigned reg) +{ + u8 rxb[1]; + + ks8851_rdreg(ks, MK_OP(1 << (reg & 3), reg), rxb, 1); + return rxb[0]; +} + +/** + * ks8851_rdreg16 - read 16 bit register from device + * @ks: The chip information + * @reg: The register address + * + * Read a 16bit register from the chip, returning the result +*/ +static unsigned ks8851_rdreg16(struct ks8851_net *ks, unsigned reg) +{ + __le16 rx = 0; + + ks8851_rdreg(ks, MK_OP(reg & 2 ? 0xC : 0x3, reg), (u8 *)&rx, 2); + return le16_to_cpu(rx); +} + +/** + * ks8851_rdreg32 - read 32 bit register from device + * @ks: The chip information + * @reg: The register address + * + * Read a 32bit register from the chip. + * + * Note, this read requires the address be aligned to 4 bytes. +*/ +static unsigned ks8851_rdreg32(struct ks8851_net *ks, unsigned reg) +{ + __le32 rx = 0; + + WARN_ON(reg & 3); + + ks8851_rdreg(ks, MK_OP(0xf, reg), (u8 *)&rx, 4); + return le32_to_cpu(rx); +} + +/** + * ks8851_soft_reset - issue one of the soft reset to the device + * @ks: The device state. + * @op: The bit(s) to set in the GRR + * + * Issue the relevant soft-reset command to the device's GRR register + * specified by @op. + * + * Note, the delays are in there as a caution to ensure that the reset + * has time to take effect and then complete. Since the datasheet does + * not currently specify the exact sequence, we have chosen something + * that seems to work with our device. + */ +static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op) +{ + ks8851_wrreg16(ks, KS_GRR, op); + mdelay(1); /* wait a short time to effect reset */ + ks8851_wrreg16(ks, KS_GRR, 0); + mdelay(1); /* wait for condition to clear */ +} + +/** + * ks8851_set_powermode - set power mode of the device + * @ks: The device state + * @pwrmode: The power mode value to write to KS_PMECR. + * + * Change the power mode of the chip. + */ +static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode) +{ + unsigned pmecr; + + netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode); + + pmecr = ks8851_rdreg16(ks, KS_PMECR); + pmecr &= ~PMECR_PM_MASK; + pmecr |= pwrmode; + + ks8851_wrreg16(ks, KS_PMECR, pmecr); +} + +/** + * ks8851_write_mac_addr - write mac address to device registers + * @dev: The network device + * + * Update the KS8851 MAC address registers from the address in @dev. + * + * This call assumes that the chip is not running, so there is no need to + * shutdown the RXQ process whilst setting this. +*/ +static int ks8851_write_mac_addr(struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + int i; + + mutex_lock(&ks->lock); + + /* + * Wake up chip in case it was powered off when stopped; otherwise, + * the first write to the MAC address does not take effect. + */ + ks8851_set_powermode(ks, PMECR_PM_NORMAL); + for (i = 0; i < ETH_ALEN; i++) + ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]); + if (!netif_running(dev)) + ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN); + + mutex_unlock(&ks->lock); + + return 0; +} + +/** + * ks8851_read_mac_addr - read mac address from device registers + * @dev: The network device + * + * Update our copy of the KS8851 MAC address from the registers of @dev. +*/ +static void ks8851_read_mac_addr(struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + int i; + + mutex_lock(&ks->lock); + + for (i = 0; i < ETH_ALEN; i++) + dev->dev_addr[i] = ks8851_rdreg8(ks, KS_MAR(i)); + + mutex_unlock(&ks->lock); +} + +/** + * ks8851_init_mac - initialise the mac address + * @ks: The device structure + * + * Get or create the initial mac address for the device and then set that + * into the station address register. If there is an EEPROM present, then + * we try that. If no valid mac address is found we use eth_random_addr() + * to create a new one. + */ +static void ks8851_init_mac(struct ks8851_net *ks) +{ + struct net_device *dev = ks->netdev; + + /* first, try reading what we've got already */ + if (ks->rc_ccr & CCR_EEPROM) { + ks8851_read_mac_addr(dev); + if (is_valid_ether_addr(dev->dev_addr)) + return; + + netdev_err(ks->netdev, "invalid mac address read %pM\n", + dev->dev_addr); + } + + eth_hw_addr_random(dev); + ks8851_write_mac_addr(dev); +} + +/** + * ks8851_rdfifo - read data from the receive fifo + * @ks: The device state. + * @buff: The buffer address + * @len: The length of the data to read + * + * Issue an RXQ FIFO read command and read the @len amount of data from + * the FIFO into the buffer specified by @buff. + */ +static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len) +{ + struct spi_transfer *xfer = ks->spi_xfer2; + struct spi_message *msg = &ks->spi_msg2; + u8 txb[1]; + int ret; + + netif_dbg(ks, rx_status, ks->netdev, + "%s: %d@%p\n", __func__, len, buff); + + /* set the operation we're issuing */ + txb[0] = KS_SPIOP_RXFIFO; + + xfer->tx_buf = txb; + xfer->rx_buf = NULL; + xfer->len = 1; + + xfer++; + xfer->rx_buf = buff; + xfer->tx_buf = NULL; + xfer->len = len; + + ret = spi_sync(ks->spidev, msg); + if (ret < 0) + netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__); +} + +/** + * ks8851_dbg_dumpkkt - dump initial packet contents to debug + * @ks: The device state + * @rxpkt: The data for the received packet + * + * Dump the initial data from the packet to dev_dbg(). +*/ +static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt) +{ + netdev_dbg(ks->netdev, + "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", + rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7], + rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11], + rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]); +} + +/** + * ks8851_rx_pkts - receive packets from the host + * @ks: The device information. + * + * This is called from the IRQ work queue when the system detects that there + * are packets in the receive queue. Find out how many packets there are and + * read them from the FIFO. + */ +static void ks8851_rx_pkts(struct ks8851_net *ks) +{ + struct sk_buff *skb; + unsigned rxfc; + unsigned rxlen; + unsigned rxstat; + u32 rxh; + u8 *rxpkt; + + rxfc = ks8851_rdreg8(ks, KS_RXFC); + + netif_dbg(ks, rx_status, ks->netdev, + "%s: %d packets\n", __func__, rxfc); + + /* Currently we're issuing a read per packet, but we could possibly + * improve the code by issuing a single read, getting the receive + * header, allocating the packet and then reading the packet data + * out in one go. + * + * This form of operation would require us to hold the SPI bus' + * chipselect low during the entie transaction to avoid any + * reset to the data stream coming from the chip. + */ + + for (; rxfc != 0; rxfc--) { + rxh = ks8851_rdreg32(ks, KS_RXFHSR); + rxstat = rxh & 0xffff; + rxlen = (rxh >> 16) & 0xfff; + + netif_dbg(ks, rx_status, ks->netdev, + "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen); + + /* the length of the packet includes the 32bit CRC */ + + /* set dma read address */ + ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00); + + /* start the packet dma process, and set auto-dequeue rx */ + ks8851_wrreg16(ks, KS_RXQCR, + ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE); + + if (rxlen > 4) { + unsigned int rxalign; + + rxlen -= 4; + rxalign = ALIGN(rxlen, 4); + skb = netdev_alloc_skb_ip_align(ks->netdev, rxalign); + if (skb) { + + /* 4 bytes of status header + 4 bytes of + * garbage: we put them before ethernet + * header, so that they are copied, + * but ignored. + */ + + rxpkt = skb_put(skb, rxlen) - 8; + + ks8851_rdfifo(ks, rxpkt, rxalign + 8); + + if (netif_msg_pktdata(ks)) + ks8851_dbg_dumpkkt(ks, rxpkt); + + skb->protocol = eth_type_trans(skb, ks->netdev); + netif_rx_ni(skb); + + ks->netdev->stats.rx_packets++; + ks->netdev->stats.rx_bytes += rxlen; + } + } + + ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); + } +} + +/** + * ks8851_irq - IRQ handler for dealing with interrupt requests + * @irq: IRQ number + * @_ks: cookie + * + * This handler is invoked when the IRQ line asserts to find out what happened. + * As we cannot allow ourselves to sleep in HARDIRQ context, this handler runs + * in thread context. + * + * Read the interrupt status, work out what needs to be done and then clear + * any of the interrupts that are not needed. + */ +static irqreturn_t ks8851_irq(int irq, void *_ks) +{ + struct ks8851_net *ks = _ks; + unsigned status; + unsigned handled = 0; + + mutex_lock(&ks->lock); + + status = ks8851_rdreg16(ks, KS_ISR); + + netif_dbg(ks, intr, ks->netdev, + "%s: status 0x%04x\n", __func__, status); + + if (status & IRQ_LCI) + handled |= IRQ_LCI; + + if (status & IRQ_LDI) { + u16 pmecr = ks8851_rdreg16(ks, KS_PMECR); + pmecr &= ~PMECR_WKEVT_MASK; + ks8851_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK); + + handled |= IRQ_LDI; + } + + if (status & IRQ_RXPSI) + handled |= IRQ_RXPSI; + + if (status & IRQ_TXI) { + handled |= IRQ_TXI; + + /* no lock here, tx queue should have been stopped */ + + /* update our idea of how much tx space is available to the + * system */ + ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR); + + netif_dbg(ks, intr, ks->netdev, + "%s: txspace %d\n", __func__, ks->tx_space); + } + + if (status & IRQ_RXI) + handled |= IRQ_RXI; + + if (status & IRQ_SPIBEI) { + dev_err(&ks->spidev->dev, "%s: spi bus error\n", __func__); + handled |= IRQ_SPIBEI; + } + + ks8851_wrreg16(ks, KS_ISR, handled); + + if (status & IRQ_RXI) { + /* the datasheet says to disable the rx interrupt during + * packet read-out, however we're masking the interrupt + * from the device so do not bother masking just the RX + * from the device. */ + + ks8851_rx_pkts(ks); + } + + /* if something stopped the rx process, probably due to wanting + * to change the rx settings, then do something about restarting + * it. */ + if (status & IRQ_RXPSI) { + struct ks8851_rxctrl *rxc = &ks->rxctrl; + + /* update the multicast hash table */ + ks8851_wrreg16(ks, KS_MAHTR0, rxc->mchash[0]); + ks8851_wrreg16(ks, KS_MAHTR1, rxc->mchash[1]); + ks8851_wrreg16(ks, KS_MAHTR2, rxc->mchash[2]); + ks8851_wrreg16(ks, KS_MAHTR3, rxc->mchash[3]); + + ks8851_wrreg16(ks, KS_RXCR2, rxc->rxcr2); + ks8851_wrreg16(ks, KS_RXCR1, rxc->rxcr1); + } + + mutex_unlock(&ks->lock); + + if (status & IRQ_LCI) + mii_check_link(&ks->mii); + + if (status & IRQ_TXI) + netif_wake_queue(ks->netdev); + + return IRQ_HANDLED; +} + +/** + * calc_txlen - calculate size of message to send packet + * @len: Length of data + * + * Returns the size of the TXFIFO message needed to send + * this packet. + */ +static inline unsigned calc_txlen(unsigned len) +{ + return ALIGN(len + 4, 4); +} + +/** + * ks8851_wrpkt - write packet to TX FIFO + * @ks: The device state. + * @txp: The sk_buff to transmit. + * @irq: IRQ on completion of the packet. + * + * Send the @txp to the chip. This means creating the relevant packet header + * specifying the length of the packet and the other information the chip + * needs, such as IRQ on completion. Send the header and the packet data to + * the device. + */ +static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq) +{ + struct spi_transfer *xfer = ks->spi_xfer2; + struct spi_message *msg = &ks->spi_msg2; + unsigned fid = 0; + int ret; + + netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p, irq %d\n", + __func__, txp, txp->len, txp->data, irq); + + fid = ks->fid++; + fid &= TXFR_TXFID_MASK; + + if (irq) + fid |= TXFR_TXIC; /* irq on completion */ + + /* start header at txb[1] to align txw entries */ + ks->txh.txb[1] = KS_SPIOP_TXFIFO; + ks->txh.txw[1] = cpu_to_le16(fid); + ks->txh.txw[2] = cpu_to_le16(txp->len); + + xfer->tx_buf = &ks->txh.txb[1]; + xfer->rx_buf = NULL; + xfer->len = 5; + + xfer++; + xfer->tx_buf = txp->data; + xfer->rx_buf = NULL; + xfer->len = ALIGN(txp->len, 4); + + ret = spi_sync(ks->spidev, msg); + if (ret < 0) + netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__); +} + +/** + * ks8851_done_tx - update and then free skbuff after transmitting + * @ks: The device state + * @txb: The buffer transmitted + */ +static void ks8851_done_tx(struct ks8851_net *ks, struct sk_buff *txb) +{ + struct net_device *dev = ks->netdev; + + dev->stats.tx_bytes += txb->len; + dev->stats.tx_packets++; + + dev_kfree_skb(txb); +} + +/** + * ks8851_tx_work - process tx packet(s) + * @work: The work strucutre what was scheduled. + * + * This is called when a number of packets have been scheduled for + * transmission and need to be sent to the device. + */ +static void ks8851_tx_work(struct work_struct *work) +{ + struct ks8851_net *ks = container_of(work, struct ks8851_net, tx_work); + struct sk_buff *txb; + bool last = skb_queue_empty(&ks->txq); + + mutex_lock(&ks->lock); + + while (!last) { + txb = skb_dequeue(&ks->txq); + last = skb_queue_empty(&ks->txq); + + if (txb != NULL) { + ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); + ks8851_wrpkt(ks, txb, last); + ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); + ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE); + + ks8851_done_tx(ks, txb); + } + } + + mutex_unlock(&ks->lock); +} + +/** + * ks8851_net_open - open network device + * @dev: The network device being opened. + * + * Called when the network device is marked active, such as a user executing + * 'ifconfig up' on the device. + */ +static int ks8851_net_open(struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + + /* lock the card, even if we may not actually be doing anything + * else at the moment */ + mutex_lock(&ks->lock); + + netif_dbg(ks, ifup, ks->netdev, "opening\n"); + + /* bring chip out of any power saving mode it was in */ + ks8851_set_powermode(ks, PMECR_PM_NORMAL); + + /* issue a soft reset to the RX/TX QMU to put it into a known + * state. */ + ks8851_soft_reset(ks, GRR_QMU); + + /* setup transmission parameters */ + + ks8851_wrreg16(ks, KS_TXCR, (TXCR_TXE | /* enable transmit process */ + TXCR_TXPE | /* pad to min length */ + TXCR_TXCRC | /* add CRC */ + TXCR_TXFCE)); /* enable flow control */ + + /* auto-increment tx data, reset tx pointer */ + ks8851_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI); + + /* setup receiver control */ + + ks8851_wrreg16(ks, KS_RXCR1, (RXCR1_RXPAFMA | /* from mac filter */ + RXCR1_RXFCE | /* enable flow control */ + RXCR1_RXBE | /* broadcast enable */ + RXCR1_RXUE | /* unicast enable */ + RXCR1_RXE)); /* enable rx block */ + + /* transfer entire frames out in one go */ + ks8851_wrreg16(ks, KS_RXCR2, RXCR2_SRDBL_FRAME); + + /* set receive counter timeouts */ + ks8851_wrreg16(ks, KS_RXDTTR, 1000); /* 1ms after first frame to IRQ */ + ks8851_wrreg16(ks, KS_RXDBCTR, 4096); /* >4Kbytes in buffer to IRQ */ + ks8851_wrreg16(ks, KS_RXFCTR, 10); /* 10 frames to IRQ */ + + ks->rc_rxqcr = (RXQCR_RXFCTE | /* IRQ on frame count exceeded */ + RXQCR_RXDBCTE | /* IRQ on byte count exceeded */ + RXQCR_RXDTTE); /* IRQ on time exceeded */ + + ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); + + /* clear then enable interrupts */ + +#define STD_IRQ (IRQ_LCI | /* Link Change */ \ + IRQ_TXI | /* TX done */ \ + IRQ_RXI | /* RX done */ \ + IRQ_SPIBEI | /* SPI bus error */ \ + IRQ_TXPSI | /* TX process stop */ \ + IRQ_RXPSI) /* RX process stop */ + + ks->rc_ier = STD_IRQ; + ks8851_wrreg16(ks, KS_ISR, STD_IRQ); + ks8851_wrreg16(ks, KS_IER, STD_IRQ); + + netif_start_queue(ks->netdev); + + netif_dbg(ks, ifup, ks->netdev, "network device up\n"); + + mutex_unlock(&ks->lock); + return 0; +} + +/** + * ks8851_net_stop - close network device + * @dev: The device being closed. + * + * Called to close down a network device which has been active. Cancell any + * work, shutdown the RX and TX process and then place the chip into a low + * power state whilst it is not being used. + */ +static int ks8851_net_stop(struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + + netif_info(ks, ifdown, dev, "shutting down\n"); + + netif_stop_queue(dev); + + mutex_lock(&ks->lock); + /* turn off the IRQs and ack any outstanding */ + ks8851_wrreg16(ks, KS_IER, 0x0000); + ks8851_wrreg16(ks, KS_ISR, 0xffff); + mutex_unlock(&ks->lock); + + /* stop any outstanding work */ + flush_work(&ks->tx_work); + flush_work(&ks->rxctrl_work); + + mutex_lock(&ks->lock); + /* shutdown RX process */ + ks8851_wrreg16(ks, KS_RXCR1, 0x0000); + + /* shutdown TX process */ + ks8851_wrreg16(ks, KS_TXCR, 0x0000); + + /* set powermode to soft power down to save power */ + ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN); + mutex_unlock(&ks->lock); + + /* ensure any queued tx buffers are dumped */ + while (!skb_queue_empty(&ks->txq)) { + struct sk_buff *txb = skb_dequeue(&ks->txq); + + netif_dbg(ks, ifdown, ks->netdev, + "%s: freeing txb %p\n", __func__, txb); + + dev_kfree_skb(txb); + } + + return 0; +} + +/** + * ks8851_start_xmit - transmit packet + * @skb: The buffer to transmit + * @dev: The device used to transmit the packet. + * + * Called by the network layer to transmit the @skb. Queue the packet for + * the device and schedule the necessary work to transmit the packet when + * it is free. + * + * We do this to firstly avoid sleeping with the network device locked, + * and secondly so we can round up more than one packet to transmit which + * means we can try and avoid generating too many transmit done interrupts. + */ +static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + unsigned needed = calc_txlen(skb->len); + netdev_tx_t ret = NETDEV_TX_OK; + + netif_dbg(ks, tx_queued, ks->netdev, + "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data); + + spin_lock(&ks->statelock); + + if (needed > ks->tx_space) { + netif_stop_queue(dev); + ret = NETDEV_TX_BUSY; + } else { + ks->tx_space -= needed; + skb_queue_tail(&ks->txq, skb); + } + + spin_unlock(&ks->statelock); + schedule_work(&ks->tx_work); + + return ret; +} + +/** + * ks8851_rxctrl_work - work handler to change rx mode + * @work: The work structure this belongs to. + * + * Lock the device and issue the necessary changes to the receive mode from + * the network device layer. This is done so that we can do this without + * having to sleep whilst holding the network device lock. + * + * Since the recommendation from Micrel is that the RXQ is shutdown whilst the + * receive parameters are programmed, we issue a write to disable the RXQ and + * then wait for the interrupt handler to be triggered once the RXQ shutdown is + * complete. The interrupt handler then writes the new values into the chip. + */ +static void ks8851_rxctrl_work(struct work_struct *work) +{ + struct ks8851_net *ks = container_of(work, struct ks8851_net, rxctrl_work); + + mutex_lock(&ks->lock); + + /* need to shutdown RXQ before modifying filter parameters */ + ks8851_wrreg16(ks, KS_RXCR1, 0x00); + + mutex_unlock(&ks->lock); +} + +static void ks8851_set_rx_mode(struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + struct ks8851_rxctrl rxctrl; + + memset(&rxctrl, 0, sizeof(rxctrl)); + + if (dev->flags & IFF_PROMISC) { + /* interface to receive everything */ + + rxctrl.rxcr1 = RXCR1_RXAE | RXCR1_RXINVF; + } else if (dev->flags & IFF_ALLMULTI) { + /* accept all multicast packets */ + + rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE | + RXCR1_RXPAFMA | RXCR1_RXMAFMA); + } else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + u32 crc; + + /* accept some multicast */ + + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc(ETH_ALEN, ha->addr); + crc >>= (32 - 6); /* get top six bits */ + + rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf)); + } + + rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA; + } else { + /* just accept broadcast / unicast */ + rxctrl.rxcr1 = RXCR1_RXPAFMA; + } + + rxctrl.rxcr1 |= (RXCR1_RXUE | /* unicast enable */ + RXCR1_RXBE | /* broadcast enable */ + RXCR1_RXE | /* RX process enable */ + RXCR1_RXFCE); /* enable flow control */ + + rxctrl.rxcr2 |= RXCR2_SRDBL_FRAME; + + /* schedule work to do the actual set of the data if needed */ + + spin_lock(&ks->statelock); + + if (memcmp(&rxctrl, &ks->rxctrl, sizeof(rxctrl)) != 0) { + memcpy(&ks->rxctrl, &rxctrl, sizeof(ks->rxctrl)); + schedule_work(&ks->rxctrl_work); + } + + spin_unlock(&ks->statelock); +} + +static int ks8851_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *sa = addr; + + if (netif_running(dev)) + return -EBUSY; + + if (!is_valid_ether_addr(sa->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); + return ks8851_write_mac_addr(dev); +} + +static int ks8851_net_ioctl(struct net_device *dev, struct ifreq *req, int cmd) +{ + struct ks8851_net *ks = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL); +} + +static const struct net_device_ops ks8851_netdev_ops = { + .ndo_open = ks8851_net_open, + .ndo_stop = ks8851_net_stop, + .ndo_do_ioctl = ks8851_net_ioctl, + .ndo_start_xmit = ks8851_start_xmit, + .ndo_set_mac_address = ks8851_set_mac_address, + .ndo_set_rx_mode = ks8851_set_rx_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + +/* ethtool support */ + +static void ks8851_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *di) +{ + strlcpy(di->driver, "KS8851", sizeof(di->driver)); + strlcpy(di->version, "1.00", sizeof(di->version)); + strlcpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info)); +} + +static u32 ks8851_get_msglevel(struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + return ks->msg_enable; +} + +static void ks8851_set_msglevel(struct net_device *dev, u32 to) +{ + struct ks8851_net *ks = netdev_priv(dev); + ks->msg_enable = to; +} + +static int ks8851_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ks8851_net *ks = netdev_priv(dev); + return mii_ethtool_gset(&ks->mii, cmd); +} + +static int ks8851_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ks8851_net *ks = netdev_priv(dev); + return mii_ethtool_sset(&ks->mii, cmd); +} + +static u32 ks8851_get_link(struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + return mii_link_ok(&ks->mii); +} + +static int ks8851_nway_reset(struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + return mii_nway_restart(&ks->mii); +} + +/* EEPROM support */ + +static void ks8851_eeprom_regread(struct eeprom_93cx6 *ee) +{ + struct ks8851_net *ks = ee->data; + unsigned val; + + val = ks8851_rdreg16(ks, KS_EEPCR); + + ee->reg_data_out = (val & EEPCR_EESB) ? 1 : 0; + ee->reg_data_clock = (val & EEPCR_EESCK) ? 1 : 0; + ee->reg_chip_select = (val & EEPCR_EECS) ? 1 : 0; +} + +static void ks8851_eeprom_regwrite(struct eeprom_93cx6 *ee) +{ + struct ks8851_net *ks = ee->data; + unsigned val = EEPCR_EESA; /* default - eeprom access on */ + + if (ee->drive_data) + val |= EEPCR_EESRWA; + if (ee->reg_data_in) + val |= EEPCR_EEDO; + if (ee->reg_data_clock) + val |= EEPCR_EESCK; + if (ee->reg_chip_select) + val |= EEPCR_EECS; + + ks8851_wrreg16(ks, KS_EEPCR, val); +} + +/** + * ks8851_eeprom_claim - claim device EEPROM and activate the interface + * @ks: The network device state. + * + * Check for the presence of an EEPROM, and then activate software access + * to the device. + */ +static int ks8851_eeprom_claim(struct ks8851_net *ks) +{ + if (!(ks->rc_ccr & CCR_EEPROM)) + return -ENOENT; + + mutex_lock(&ks->lock); + + /* start with clock low, cs high */ + ks8851_wrreg16(ks, KS_EEPCR, EEPCR_EESA | EEPCR_EECS); + return 0; +} + +/** + * ks8851_eeprom_release - release the EEPROM interface + * @ks: The device state + * + * Release the software access to the device EEPROM + */ +static void ks8851_eeprom_release(struct ks8851_net *ks) +{ + unsigned val = ks8851_rdreg16(ks, KS_EEPCR); + + ks8851_wrreg16(ks, KS_EEPCR, val & ~EEPCR_EESA); + mutex_unlock(&ks->lock); +} + +#define KS_EEPROM_MAGIC (0x00008851) + +static int ks8851_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct ks8851_net *ks = netdev_priv(dev); + int offset = ee->offset; + int len = ee->len; + u16 tmp; + + /* currently only support byte writing */ + if (len != 1) + return -EINVAL; + + if (ee->magic != KS_EEPROM_MAGIC) + return -EINVAL; + + if (ks8851_eeprom_claim(ks)) + return -ENOENT; + + eeprom_93cx6_wren(&ks->eeprom, true); + + /* ethtool currently only supports writing bytes, which means + * we have to read/modify/write our 16bit EEPROMs */ + + eeprom_93cx6_read(&ks->eeprom, offset/2, &tmp); + + if (offset & 1) { + tmp &= 0xff; + tmp |= *data << 8; + } else { + tmp &= 0xff00; + tmp |= *data; + } + + eeprom_93cx6_write(&ks->eeprom, offset/2, tmp); + eeprom_93cx6_wren(&ks->eeprom, false); + + ks8851_eeprom_release(ks); + + return 0; +} + +static int ks8851_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct ks8851_net *ks = netdev_priv(dev); + int offset = ee->offset; + int len = ee->len; + + /* must be 2 byte aligned */ + if (len & 1 || offset & 1) + return -EINVAL; + + if (ks8851_eeprom_claim(ks)) + return -ENOENT; + + ee->magic = KS_EEPROM_MAGIC; + + eeprom_93cx6_multiread(&ks->eeprom, offset/2, (__le16 *)data, len/2); + ks8851_eeprom_release(ks); + + return 0; +} + +static int ks8851_get_eeprom_len(struct net_device *dev) +{ + struct ks8851_net *ks = netdev_priv(dev); + + /* currently, we assume it is an 93C46 attached, so return 128 */ + return ks->rc_ccr & CCR_EEPROM ? 128 : 0; +} + +static const struct ethtool_ops ks8851_ethtool_ops = { + .get_drvinfo = ks8851_get_drvinfo, + .get_msglevel = ks8851_get_msglevel, + .set_msglevel = ks8851_set_msglevel, + .get_settings = ks8851_get_settings, + .set_settings = ks8851_set_settings, + .get_link = ks8851_get_link, + .nway_reset = ks8851_nway_reset, + .get_eeprom_len = ks8851_get_eeprom_len, + .get_eeprom = ks8851_get_eeprom, + .set_eeprom = ks8851_set_eeprom, +}; + +/* MII interface controls */ + +/** + * ks8851_phy_reg - convert MII register into a KS8851 register + * @reg: MII register number. + * + * Return the KS8851 register number for the corresponding MII PHY register + * if possible. Return zero if the MII register has no direct mapping to the + * KS8851 register set. + */ +static int ks8851_phy_reg(int reg) +{ + switch (reg) { + case MII_BMCR: + return KS_P1MBCR; + case MII_BMSR: + return KS_P1MBSR; + case MII_PHYSID1: + return KS_PHY1ILR; + case MII_PHYSID2: + return KS_PHY1IHR; + case MII_ADVERTISE: + return KS_P1ANAR; + case MII_LPA: + return KS_P1ANLPR; + } + + return 0x0; +} + +/** + * ks8851_phy_read - MII interface PHY register read. + * @dev: The network device the PHY is on. + * @phy_addr: Address of PHY (ignored as we only have one) + * @reg: The register to read. + * + * This call reads data from the PHY register specified in @reg. Since the + * device does not support all the MII registers, the non-existent values + * are always returned as zero. + * + * We return zero for unsupported registers as the MII code does not check + * the value returned for any error status, and simply returns it to the + * caller. The mii-tool that the driver was tested with takes any -ve error + * as real PHY capabilities, thus displaying incorrect data to the user. + */ +static int ks8851_phy_read(struct net_device *dev, int phy_addr, int reg) +{ + struct ks8851_net *ks = netdev_priv(dev); + int ksreg; + int result; + + ksreg = ks8851_phy_reg(reg); + if (!ksreg) + return 0x0; /* no error return allowed, so use zero */ + + mutex_lock(&ks->lock); + result = ks8851_rdreg16(ks, ksreg); + mutex_unlock(&ks->lock); + + return result; +} + +static void ks8851_phy_write(struct net_device *dev, + int phy, int reg, int value) +{ + struct ks8851_net *ks = netdev_priv(dev); + int ksreg; + + ksreg = ks8851_phy_reg(reg); + if (ksreg) { + mutex_lock(&ks->lock); + ks8851_wrreg16(ks, ksreg, value); + mutex_unlock(&ks->lock); + } +} + +/** + * ks8851_read_selftest - read the selftest memory info. + * @ks: The device state + * + * Read and check the TX/RX memory selftest information. + */ +static int ks8851_read_selftest(struct ks8851_net *ks) +{ + unsigned both_done = MBIR_TXMBF | MBIR_RXMBF; + int ret = 0; + unsigned rd; + + rd = ks8851_rdreg16(ks, KS_MBIR); + + if ((rd & both_done) != both_done) { + netdev_warn(ks->netdev, "Memory selftest not finished\n"); + return 0; + } + + if (rd & MBIR_TXMBFA) { + netdev_err(ks->netdev, "TX memory selftest fail\n"); + ret |= 1; + } + + if (rd & MBIR_RXMBFA) { + netdev_err(ks->netdev, "RX memory selftest fail\n"); + ret |= 2; + } + + return 0; +} + +/* driver bus management functions */ + +#ifdef CONFIG_PM_SLEEP + +static int ks8851_suspend(struct device *dev) +{ + struct ks8851_net *ks = dev_get_drvdata(dev); + struct net_device *netdev = ks->netdev; + + if (netif_running(netdev)) { + netif_device_detach(netdev); + ks8851_net_stop(netdev); + } + + return 0; +} + +static int ks8851_resume(struct device *dev) +{ + struct ks8851_net *ks = dev_get_drvdata(dev); + struct net_device *netdev = ks->netdev; + + if (netif_running(netdev)) { + ks8851_net_open(netdev); + netif_device_attach(netdev); + } + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(ks8851_pm_ops, ks8851_suspend, ks8851_resume); + +static int ks8851_probe(struct spi_device *spi) +{ + struct net_device *ndev; + struct ks8851_net *ks; + int ret; + unsigned cider; + int gpio; + + ndev = alloc_etherdev(sizeof(struct ks8851_net)); + if (!ndev) + return -ENOMEM; + + spi->bits_per_word = 8; + + ks = netdev_priv(ndev); + + ks->netdev = ndev; + ks->spidev = spi; + ks->tx_space = 6144; + + gpio = of_get_named_gpio_flags(spi->dev.of_node, "reset-gpios", + 0, NULL); + if (gpio == -EPROBE_DEFER) { + ret = gpio; + goto err_gpio; + } + + ks->gpio = gpio; + if (gpio_is_valid(gpio)) { + ret = devm_gpio_request_one(&spi->dev, gpio, + GPIOF_OUT_INIT_LOW, "ks8851_rst_n"); + if (ret) { + dev_err(&spi->dev, "reset gpio request failed\n"); + goto err_gpio; + } + } + + ks->vdd_io = devm_regulator_get(&spi->dev, "vdd-io"); + if (IS_ERR(ks->vdd_io)) { + ret = PTR_ERR(ks->vdd_io); + goto err_reg_io; + } + + ret = regulator_enable(ks->vdd_io); + if (ret) { + dev_err(&spi->dev, "regulator vdd_io enable fail: %d\n", + ret); + goto err_reg_io; + } + + ks->vdd_reg = devm_regulator_get(&spi->dev, "vdd"); + if (IS_ERR(ks->vdd_reg)) { + ret = PTR_ERR(ks->vdd_reg); + goto err_reg; + } + + ret = regulator_enable(ks->vdd_reg); + if (ret) { + dev_err(&spi->dev, "regulator vdd enable fail: %d\n", + ret); + goto err_reg; + } + + if (gpio_is_valid(gpio)) { + usleep_range(10000, 11000); + gpio_set_value(gpio, 1); + } + + mutex_init(&ks->lock); + spin_lock_init(&ks->statelock); + + INIT_WORK(&ks->tx_work, ks8851_tx_work); + INIT_WORK(&ks->rxctrl_work, ks8851_rxctrl_work); + + /* initialise pre-made spi transfer messages */ + + spi_message_init(&ks->spi_msg1); + spi_message_add_tail(&ks->spi_xfer1, &ks->spi_msg1); + + spi_message_init(&ks->spi_msg2); + spi_message_add_tail(&ks->spi_xfer2[0], &ks->spi_msg2); + spi_message_add_tail(&ks->spi_xfer2[1], &ks->spi_msg2); + + /* setup EEPROM state */ + + ks->eeprom.data = ks; + ks->eeprom.width = PCI_EEPROM_WIDTH_93C46; + ks->eeprom.register_read = ks8851_eeprom_regread; + ks->eeprom.register_write = ks8851_eeprom_regwrite; + + /* setup mii state */ + ks->mii.dev = ndev; + ks->mii.phy_id = 1, + ks->mii.phy_id_mask = 1; + ks->mii.reg_num_mask = 0xf; + ks->mii.mdio_read = ks8851_phy_read; + ks->mii.mdio_write = ks8851_phy_write; + + dev_info(&spi->dev, "message enable is %d\n", msg_enable); + + /* set the default message enable */ + ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV | + NETIF_MSG_PROBE | + NETIF_MSG_LINK)); + + skb_queue_head_init(&ks->txq); + + ndev->ethtool_ops = &ks8851_ethtool_ops; + SET_NETDEV_DEV(ndev, &spi->dev); + + spi_set_drvdata(spi, ks); + + ndev->if_port = IF_PORT_100BASET; + ndev->netdev_ops = &ks8851_netdev_ops; + ndev->irq = spi->irq; + + /* issue a global soft reset to reset the device. */ + ks8851_soft_reset(ks, GRR_GSR); + + /* simple check for a valid chip being connected to the bus */ + cider = ks8851_rdreg16(ks, KS_CIDER); + if ((cider & ~CIDER_REV_MASK) != CIDER_ID) { + dev_err(&spi->dev, "failed to read device ID\n"); + ret = -ENODEV; + goto err_id; + } + + /* cache the contents of the CCR register for EEPROM, etc. */ + ks->rc_ccr = ks8851_rdreg16(ks, KS_CCR); + + if (ks->rc_ccr & CCR_EEPROM) + ks->eeprom_size = 128; + else + ks->eeprom_size = 0; + + ks8851_read_selftest(ks); + ks8851_init_mac(ks); + + ret = request_threaded_irq(spi->irq, NULL, ks8851_irq, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + ndev->name, ks); + if (ret < 0) { + dev_err(&spi->dev, "failed to get irq\n"); + goto err_irq; + } + + ret = register_netdev(ndev); + if (ret) { + dev_err(&spi->dev, "failed to register network device\n"); + goto err_netdev; + } + + netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n", + CIDER_REV_GET(cider), ndev->dev_addr, ndev->irq, + ks->rc_ccr & CCR_EEPROM ? "has" : "no"); + + return 0; + + +err_netdev: + free_irq(ndev->irq, ks); + +err_irq: + if (gpio_is_valid(gpio)) + gpio_set_value(gpio, 0); +err_id: + regulator_disable(ks->vdd_reg); +err_reg: + regulator_disable(ks->vdd_io); +err_reg_io: +err_gpio: + free_netdev(ndev); + return ret; +} + +static int ks8851_remove(struct spi_device *spi) +{ + struct ks8851_net *priv = spi_get_drvdata(spi); + + if (netif_msg_drv(priv)) + dev_info(&spi->dev, "remove\n"); + + unregister_netdev(priv->netdev); + free_irq(spi->irq, priv); + if (gpio_is_valid(priv->gpio)) + gpio_set_value(priv->gpio, 0); + regulator_disable(priv->vdd_reg); + regulator_disable(priv->vdd_io); + free_netdev(priv->netdev); + + return 0; +} + +static const struct of_device_id ks8851_match_table[] = { + { .compatible = "micrel,ks8851" }, + { } +}; + +static struct spi_driver ks8851_driver = { + .driver = { + .name = "ks8851", + .of_match_table = ks8851_match_table, + .owner = THIS_MODULE, + .pm = &ks8851_pm_ops, + }, + .probe = ks8851_probe, + .remove = ks8851_remove, +}; +module_spi_driver(ks8851_driver); + +MODULE_DESCRIPTION("KS8851 Network driver"); +MODULE_AUTHOR("Ben Dooks "); +MODULE_LICENSE("GPL"); + +module_param_named(message, msg_enable, int, 0); +MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)"); +MODULE_ALIAS("spi:ks8851"); diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h new file mode 100644 index 000000000..852256ef1 --- /dev/null +++ b/drivers/net/ethernet/micrel/ks8851.h @@ -0,0 +1,298 @@ +/* drivers/net/ethernet/micrel/ks8851.h + * + * Copyright 2009 Simtec Electronics + * Ben Dooks + * + * KS8851 register definitions + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#define KS_CCR 0x08 +#define CCR_EEPROM (1 << 9) +#define CCR_SPI (1 << 8) +#define CCR_32PIN (1 << 0) + +/* MAC address registers */ +#define KS_MAR(_m) (0x15 - (_m)) +#define KS_MARL 0x10 +#define KS_MARM 0x12 +#define KS_MARH 0x14 + +#define KS_OBCR 0x20 +#define OBCR_ODS_16mA (1 << 6) + +#define KS_EEPCR 0x22 +#define EEPCR_EESRWA (1 << 5) +#define EEPCR_EESA (1 << 4) +#define EEPCR_EESB (1 << 3) +#define EEPCR_EEDO (1 << 2) +#define EEPCR_EESCK (1 << 1) +#define EEPCR_EECS (1 << 0) + +#define KS_MBIR 0x24 +#define MBIR_TXMBF (1 << 12) +#define MBIR_TXMBFA (1 << 11) +#define MBIR_RXMBF (1 << 4) +#define MBIR_RXMBFA (1 << 3) + +#define KS_GRR 0x26 +#define GRR_QMU (1 << 1) +#define GRR_GSR (1 << 0) + +#define KS_WFCR 0x2A +#define WFCR_MPRXE (1 << 7) +#define WFCR_WF3E (1 << 3) +#define WFCR_WF2E (1 << 2) +#define WFCR_WF1E (1 << 1) +#define WFCR_WF0E (1 << 0) + +#define KS_WF0CRC0 0x30 +#define KS_WF0CRC1 0x32 +#define KS_WF0BM0 0x34 +#define KS_WF0BM1 0x36 +#define KS_WF0BM2 0x38 +#define KS_WF0BM3 0x3A + +#define KS_WF1CRC0 0x40 +#define KS_WF1CRC1 0x42 +#define KS_WF1BM0 0x44 +#define KS_WF1BM1 0x46 +#define KS_WF1BM2 0x48 +#define KS_WF1BM3 0x4A + +#define KS_WF2CRC0 0x50 +#define KS_WF2CRC1 0x52 +#define KS_WF2BM0 0x54 +#define KS_WF2BM1 0x56 +#define KS_WF2BM2 0x58 +#define KS_WF2BM3 0x5A + +#define KS_WF3CRC0 0x60 +#define KS_WF3CRC1 0x62 +#define KS_WF3BM0 0x64 +#define KS_WF3BM1 0x66 +#define KS_WF3BM2 0x68 +#define KS_WF3BM3 0x6A + +#define KS_TXCR 0x70 +#define TXCR_TCGICMP (1 << 8) +#define TXCR_TCGUDP (1 << 7) +#define TXCR_TCGTCP (1 << 6) +#define TXCR_TCGIP (1 << 5) +#define TXCR_FTXQ (1 << 4) +#define TXCR_TXFCE (1 << 3) +#define TXCR_TXPE (1 << 2) +#define TXCR_TXCRC (1 << 1) +#define TXCR_TXE (1 << 0) + +#define KS_TXSR 0x72 +#define TXSR_TXLC (1 << 13) +#define TXSR_TXMC (1 << 12) +#define TXSR_TXFID_MASK (0x3f << 0) +#define TXSR_TXFID_SHIFT (0) +#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f) + +#define KS_RXCR1 0x74 +#define RXCR1_FRXQ (1 << 15) +#define RXCR1_RXUDPFCC (1 << 14) +#define RXCR1_RXTCPFCC (1 << 13) +#define RXCR1_RXIPFCC (1 << 12) +#define RXCR1_RXPAFMA (1 << 11) +#define RXCR1_RXFCE (1 << 10) +#define RXCR1_RXEFE (1 << 9) +#define RXCR1_RXMAFMA (1 << 8) +#define RXCR1_RXBE (1 << 7) +#define RXCR1_RXME (1 << 6) +#define RXCR1_RXUE (1 << 5) +#define RXCR1_RXAE (1 << 4) +#define RXCR1_RXINVF (1 << 1) +#define RXCR1_RXE (1 << 0) + +#define KS_RXCR2 0x76 +#define RXCR2_SRDBL_MASK (0x7 << 5) +#define RXCR2_SRDBL_SHIFT (5) +#define RXCR2_SRDBL_4B (0x0 << 5) +#define RXCR2_SRDBL_8B (0x1 << 5) +#define RXCR2_SRDBL_16B (0x2 << 5) +#define RXCR2_SRDBL_32B (0x3 << 5) +#define RXCR2_SRDBL_FRAME (0x4 << 5) +#define RXCR2_IUFFP (1 << 4) +#define RXCR2_RXIUFCEZ (1 << 3) +#define RXCR2_UDPLFE (1 << 2) +#define RXCR2_RXICMPFCC (1 << 1) +#define RXCR2_RXSAF (1 << 0) + +#define KS_TXMIR 0x78 + +#define KS_RXFHSR 0x7C +#define RXFSHR_RXFV (1 << 15) +#define RXFSHR_RXICMPFCS (1 << 13) +#define RXFSHR_RXIPFCS (1 << 12) +#define RXFSHR_RXTCPFCS (1 << 11) +#define RXFSHR_RXUDPFCS (1 << 10) +#define RXFSHR_RXBF (1 << 7) +#define RXFSHR_RXMF (1 << 6) +#define RXFSHR_RXUF (1 << 5) +#define RXFSHR_RXMR (1 << 4) +#define RXFSHR_RXFT (1 << 3) +#define RXFSHR_RXFTL (1 << 2) +#define RXFSHR_RXRF (1 << 1) +#define RXFSHR_RXCE (1 << 0) + +#define KS_RXFHBCR 0x7E +#define KS_TXQCR 0x80 +#define TXQCR_AETFE (1 << 2) +#define TXQCR_TXQMAM (1 << 1) +#define TXQCR_METFE (1 << 0) + +#define KS_RXQCR 0x82 +#define RXQCR_RXDTTS (1 << 12) +#define RXQCR_RXDBCTS (1 << 11) +#define RXQCR_RXFCTS (1 << 10) +#define RXQCR_RXIPHTOE (1 << 9) +#define RXQCR_RXDTTE (1 << 7) +#define RXQCR_RXDBCTE (1 << 6) +#define RXQCR_RXFCTE (1 << 5) +#define RXQCR_ADRFE (1 << 4) +#define RXQCR_SDA (1 << 3) +#define RXQCR_RRXEF (1 << 0) + +#define KS_TXFDPR 0x84 +#define TXFDPR_TXFPAI (1 << 14) +#define TXFDPR_TXFP_MASK (0x7ff << 0) +#define TXFDPR_TXFP_SHIFT (0) + +#define KS_RXFDPR 0x86 +#define RXFDPR_RXFPAI (1 << 14) + +#define KS_RXDTTR 0x8C +#define KS_RXDBCTR 0x8E + +#define KS_IER 0x90 +#define KS_ISR 0x92 +#define IRQ_LCI (1 << 15) +#define IRQ_TXI (1 << 14) +#define IRQ_RXI (1 << 13) +#define IRQ_RXOI (1 << 11) +#define IRQ_TXPSI (1 << 9) +#define IRQ_RXPSI (1 << 8) +#define IRQ_TXSAI (1 << 6) +#define IRQ_RXWFDI (1 << 5) +#define IRQ_RXMPDI (1 << 4) +#define IRQ_LDI (1 << 3) +#define IRQ_EDI (1 << 2) +#define IRQ_SPIBEI (1 << 1) +#define IRQ_DEDI (1 << 0) + +#define KS_RXFCTR 0x9C +#define KS_RXFC 0x9D +#define RXFCTR_RXFC_MASK (0xff << 8) +#define RXFCTR_RXFC_SHIFT (8) +#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff) +#define RXFCTR_RXFCT_MASK (0xff << 0) +#define RXFCTR_RXFCT_SHIFT (0) + +#define KS_TXNTFSR 0x9E + +#define KS_MAHTR0 0xA0 +#define KS_MAHTR1 0xA2 +#define KS_MAHTR2 0xA4 +#define KS_MAHTR3 0xA6 + +#define KS_FCLWR 0xB0 +#define KS_FCHWR 0xB2 +#define KS_FCOWR 0xB4 + +#define KS_CIDER 0xC0 +#define CIDER_ID 0x8870 +#define CIDER_REV_MASK (0x7 << 1) +#define CIDER_REV_SHIFT (1) +#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7) + +#define KS_CGCR 0xC6 + +#define KS_IACR 0xC8 +#define IACR_RDEN (1 << 12) +#define IACR_TSEL_MASK (0x3 << 10) +#define IACR_TSEL_SHIFT (10) +#define IACR_TSEL_MIB (0x3 << 10) +#define IACR_ADDR_MASK (0x1f << 0) +#define IACR_ADDR_SHIFT (0) + +#define KS_IADLR 0xD0 +#define KS_IAHDR 0xD2 + +#define KS_PMECR 0xD4 +#define PMECR_PME_DELAY (1 << 14) +#define PMECR_PME_POL (1 << 12) +#define PMECR_WOL_WAKEUP (1 << 11) +#define PMECR_WOL_MAGICPKT (1 << 10) +#define PMECR_WOL_LINKUP (1 << 9) +#define PMECR_WOL_ENERGY (1 << 8) +#define PMECR_AUTO_WAKE_EN (1 << 7) +#define PMECR_WAKEUP_NORMAL (1 << 6) +#define PMECR_WKEVT_MASK (0xf << 2) +#define PMECR_WKEVT_SHIFT (2) +#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf) +#define PMECR_WKEVT_ENERGY (0x1 << 2) +#define PMECR_WKEVT_LINK (0x2 << 2) +#define PMECR_WKEVT_MAGICPKT (0x4 << 2) +#define PMECR_WKEVT_FRAME (0x8 << 2) +#define PMECR_PM_MASK (0x3 << 0) +#define PMECR_PM_SHIFT (0) +#define PMECR_PM_NORMAL (0x0 << 0) +#define PMECR_PM_ENERGY (0x1 << 0) +#define PMECR_PM_SOFTDOWN (0x2 << 0) +#define PMECR_PM_POWERSAVE (0x3 << 0) + +/* Standard MII PHY data */ +#define KS_P1MBCR 0xE4 +#define KS_P1MBSR 0xE6 +#define KS_PHY1ILR 0xE8 +#define KS_PHY1IHR 0xEA +#define KS_P1ANAR 0xEC +#define KS_P1ANLPR 0xEE + +#define KS_P1SCLMD 0xF4 +#define P1SCLMD_LEDOFF (1 << 15) +#define P1SCLMD_TXIDS (1 << 14) +#define P1SCLMD_RESTARTAN (1 << 13) +#define P1SCLMD_DISAUTOMDIX (1 << 10) +#define P1SCLMD_FORCEMDIX (1 << 9) +#define P1SCLMD_AUTONEGEN (1 << 7) +#define P1SCLMD_FORCE100 (1 << 6) +#define P1SCLMD_FORCEFDX (1 << 5) +#define P1SCLMD_ADV_FLOW (1 << 4) +#define P1SCLMD_ADV_100BT_FDX (1 << 3) +#define P1SCLMD_ADV_100BT_HDX (1 << 2) +#define P1SCLMD_ADV_10BT_FDX (1 << 1) +#define P1SCLMD_ADV_10BT_HDX (1 << 0) + +#define KS_P1CR 0xF6 +#define P1CR_HP_MDIX (1 << 15) +#define P1CR_REV_POL (1 << 13) +#define P1CR_OP_100M (1 << 10) +#define P1CR_OP_FDX (1 << 9) +#define P1CR_OP_MDI (1 << 7) +#define P1CR_AN_DONE (1 << 6) +#define P1CR_LINK_GOOD (1 << 5) +#define P1CR_PNTR_FLOW (1 << 4) +#define P1CR_PNTR_100BT_FDX (1 << 3) +#define P1CR_PNTR_100BT_HDX (1 << 2) +#define P1CR_PNTR_10BT_FDX (1 << 1) +#define P1CR_PNTR_10BT_HDX (1 << 0) + +/* TX Frame control */ + +#define TXFR_TXIC (1 << 15) +#define TXFR_TXFID_MASK (0x3f << 0) +#define TXFR_TXFID_SHIFT (0) + +/* SPI frame opcodes */ +#define KS_SPIOP_RD (0x00) +#define KS_SPIOP_WR (0x40) +#define KS_SPIOP_RXFIFO (0x80) +#define KS_SPIOP_TXFIFO (0xC0) diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c new file mode 100644 index 000000000..2fc5cd56c --- /dev/null +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -0,0 +1,1695 @@ +/** + * drivers/net/ethernet/micrel/ks8851_mll.c + * Copyright (c) 2009 Micrel Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* Supports: + * KS8851 16bit MLL chip from Micrel Inc. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "ks8851_mll" + +static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; +#define MAX_RECV_FRAMES 255 +#define MAX_BUF_SIZE 2048 +#define TX_BUF_SIZE 2000 +#define RX_BUF_SIZE 2000 + +#define KS_CCR 0x08 +#define CCR_EEPROM (1 << 9) +#define CCR_SPI (1 << 8) +#define CCR_8BIT (1 << 7) +#define CCR_16BIT (1 << 6) +#define CCR_32BIT (1 << 5) +#define CCR_SHARED (1 << 4) +#define CCR_32PIN (1 << 0) + +/* MAC address registers */ +#define KS_MARL 0x10 +#define KS_MARM 0x12 +#define KS_MARH 0x14 + +#define KS_OBCR 0x20 +#define OBCR_ODS_16MA (1 << 6) + +#define KS_EEPCR 0x22 +#define EEPCR_EESA (1 << 4) +#define EEPCR_EESB (1 << 3) +#define EEPCR_EEDO (1 << 2) +#define EEPCR_EESCK (1 << 1) +#define EEPCR_EECS (1 << 0) + +#define KS_MBIR 0x24 +#define MBIR_TXMBF (1 << 12) +#define MBIR_TXMBFA (1 << 11) +#define MBIR_RXMBF (1 << 4) +#define MBIR_RXMBFA (1 << 3) + +#define KS_GRR 0x26 +#define GRR_QMU (1 << 1) +#define GRR_GSR (1 << 0) + +#define KS_WFCR 0x2A +#define WFCR_MPRXE (1 << 7) +#define WFCR_WF3E (1 << 3) +#define WFCR_WF2E (1 << 2) +#define WFCR_WF1E (1 << 1) +#define WFCR_WF0E (1 << 0) + +#define KS_WF0CRC0 0x30 +#define KS_WF0CRC1 0x32 +#define KS_WF0BM0 0x34 +#define KS_WF0BM1 0x36 +#define KS_WF0BM2 0x38 +#define KS_WF0BM3 0x3A + +#define KS_WF1CRC0 0x40 +#define KS_WF1CRC1 0x42 +#define KS_WF1BM0 0x44 +#define KS_WF1BM1 0x46 +#define KS_WF1BM2 0x48 +#define KS_WF1BM3 0x4A + +#define KS_WF2CRC0 0x50 +#define KS_WF2CRC1 0x52 +#define KS_WF2BM0 0x54 +#define KS_WF2BM1 0x56 +#define KS_WF2BM2 0x58 +#define KS_WF2BM3 0x5A + +#define KS_WF3CRC0 0x60 +#define KS_WF3CRC1 0x62 +#define KS_WF3BM0 0x64 +#define KS_WF3BM1 0x66 +#define KS_WF3BM2 0x68 +#define KS_WF3BM3 0x6A + +#define KS_TXCR 0x70 +#define TXCR_TCGICMP (1 << 8) +#define TXCR_TCGUDP (1 << 7) +#define TXCR_TCGTCP (1 << 6) +#define TXCR_TCGIP (1 << 5) +#define TXCR_FTXQ (1 << 4) +#define TXCR_TXFCE (1 << 3) +#define TXCR_TXPE (1 << 2) +#define TXCR_TXCRC (1 << 1) +#define TXCR_TXE (1 << 0) + +#define KS_TXSR 0x72 +#define TXSR_TXLC (1 << 13) +#define TXSR_TXMC (1 << 12) +#define TXSR_TXFID_MASK (0x3f << 0) +#define TXSR_TXFID_SHIFT (0) +#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f) + + +#define KS_RXCR1 0x74 +#define RXCR1_FRXQ (1 << 15) +#define RXCR1_RXUDPFCC (1 << 14) +#define RXCR1_RXTCPFCC (1 << 13) +#define RXCR1_RXIPFCC (1 << 12) +#define RXCR1_RXPAFMA (1 << 11) +#define RXCR1_RXFCE (1 << 10) +#define RXCR1_RXEFE (1 << 9) +#define RXCR1_RXMAFMA (1 << 8) +#define RXCR1_RXBE (1 << 7) +#define RXCR1_RXME (1 << 6) +#define RXCR1_RXUE (1 << 5) +#define RXCR1_RXAE (1 << 4) +#define RXCR1_RXINVF (1 << 1) +#define RXCR1_RXE (1 << 0) +#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \ + RXCR1_RXMAFMA | RXCR1_RXPAFMA) + +#define KS_RXCR2 0x76 +#define RXCR2_SRDBL_MASK (0x7 << 5) +#define RXCR2_SRDBL_SHIFT (5) +#define RXCR2_SRDBL_4B (0x0 << 5) +#define RXCR2_SRDBL_8B (0x1 << 5) +#define RXCR2_SRDBL_16B (0x2 << 5) +#define RXCR2_SRDBL_32B (0x3 << 5) +/* #define RXCR2_SRDBL_FRAME (0x4 << 5) */ +#define RXCR2_IUFFP (1 << 4) +#define RXCR2_RXIUFCEZ (1 << 3) +#define RXCR2_UDPLFE (1 << 2) +#define RXCR2_RXICMPFCC (1 << 1) +#define RXCR2_RXSAF (1 << 0) + +#define KS_TXMIR 0x78 + +#define KS_RXFHSR 0x7C +#define RXFSHR_RXFV (1 << 15) +#define RXFSHR_RXICMPFCS (1 << 13) +#define RXFSHR_RXIPFCS (1 << 12) +#define RXFSHR_RXTCPFCS (1 << 11) +#define RXFSHR_RXUDPFCS (1 << 10) +#define RXFSHR_RXBF (1 << 7) +#define RXFSHR_RXMF (1 << 6) +#define RXFSHR_RXUF (1 << 5) +#define RXFSHR_RXMR (1 << 4) +#define RXFSHR_RXFT (1 << 3) +#define RXFSHR_RXFTL (1 << 2) +#define RXFSHR_RXRF (1 << 1) +#define RXFSHR_RXCE (1 << 0) +#define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\ + RXFSHR_RXFTL | RXFSHR_RXMR |\ + RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\ + RXFSHR_RXTCPFCS) +#define KS_RXFHBCR 0x7E +#define RXFHBCR_CNT_MASK 0x0FFF + +#define KS_TXQCR 0x80 +#define TXQCR_AETFE (1 << 2) +#define TXQCR_TXQMAM (1 << 1) +#define TXQCR_METFE (1 << 0) + +#define KS_RXQCR 0x82 +#define RXQCR_RXDTTS (1 << 12) +#define RXQCR_RXDBCTS (1 << 11) +#define RXQCR_RXFCTS (1 << 10) +#define RXQCR_RXIPHTOE (1 << 9) +#define RXQCR_RXDTTE (1 << 7) +#define RXQCR_RXDBCTE (1 << 6) +#define RXQCR_RXFCTE (1 << 5) +#define RXQCR_ADRFE (1 << 4) +#define RXQCR_SDA (1 << 3) +#define RXQCR_RRXEF (1 << 0) +#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE) + +#define KS_TXFDPR 0x84 +#define TXFDPR_TXFPAI (1 << 14) +#define TXFDPR_TXFP_MASK (0x7ff << 0) +#define TXFDPR_TXFP_SHIFT (0) + +#define KS_RXFDPR 0x86 +#define RXFDPR_RXFPAI (1 << 14) + +#define KS_RXDTTR 0x8C +#define KS_RXDBCTR 0x8E + +#define KS_IER 0x90 +#define KS_ISR 0x92 +#define IRQ_LCI (1 << 15) +#define IRQ_TXI (1 << 14) +#define IRQ_RXI (1 << 13) +#define IRQ_RXOI (1 << 11) +#define IRQ_TXPSI (1 << 9) +#define IRQ_RXPSI (1 << 8) +#define IRQ_TXSAI (1 << 6) +#define IRQ_RXWFDI (1 << 5) +#define IRQ_RXMPDI (1 << 4) +#define IRQ_LDI (1 << 3) +#define IRQ_EDI (1 << 2) +#define IRQ_SPIBEI (1 << 1) +#define IRQ_DEDI (1 << 0) + +#define KS_RXFCTR 0x9C +#define RXFCTR_THRESHOLD_MASK 0x00FF + +#define KS_RXFC 0x9D +#define RXFCTR_RXFC_MASK (0xff << 8) +#define RXFCTR_RXFC_SHIFT (8) +#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff) +#define RXFCTR_RXFCT_MASK (0xff << 0) +#define RXFCTR_RXFCT_SHIFT (0) + +#define KS_TXNTFSR 0x9E + +#define KS_MAHTR0 0xA0 +#define KS_MAHTR1 0xA2 +#define KS_MAHTR2 0xA4 +#define KS_MAHTR3 0xA6 + +#define KS_FCLWR 0xB0 +#define KS_FCHWR 0xB2 +#define KS_FCOWR 0xB4 + +#define KS_CIDER 0xC0 +#define CIDER_ID 0x8870 +#define CIDER_REV_MASK (0x7 << 1) +#define CIDER_REV_SHIFT (1) +#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7) + +#define KS_CGCR 0xC6 +#define KS_IACR 0xC8 +#define IACR_RDEN (1 << 12) +#define IACR_TSEL_MASK (0x3 << 10) +#define IACR_TSEL_SHIFT (10) +#define IACR_TSEL_MIB (0x3 << 10) +#define IACR_ADDR_MASK (0x1f << 0) +#define IACR_ADDR_SHIFT (0) + +#define KS_IADLR 0xD0 +#define KS_IAHDR 0xD2 + +#define KS_PMECR 0xD4 +#define PMECR_PME_DELAY (1 << 14) +#define PMECR_PME_POL (1 << 12) +#define PMECR_WOL_WAKEUP (1 << 11) +#define PMECR_WOL_MAGICPKT (1 << 10) +#define PMECR_WOL_LINKUP (1 << 9) +#define PMECR_WOL_ENERGY (1 << 8) +#define PMECR_AUTO_WAKE_EN (1 << 7) +#define PMECR_WAKEUP_NORMAL (1 << 6) +#define PMECR_WKEVT_MASK (0xf << 2) +#define PMECR_WKEVT_SHIFT (2) +#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf) +#define PMECR_WKEVT_ENERGY (0x1 << 2) +#define PMECR_WKEVT_LINK (0x2 << 2) +#define PMECR_WKEVT_MAGICPKT (0x4 << 2) +#define PMECR_WKEVT_FRAME (0x8 << 2) +#define PMECR_PM_MASK (0x3 << 0) +#define PMECR_PM_SHIFT (0) +#define PMECR_PM_NORMAL (0x0 << 0) +#define PMECR_PM_ENERGY (0x1 << 0) +#define PMECR_PM_SOFTDOWN (0x2 << 0) +#define PMECR_PM_POWERSAVE (0x3 << 0) + +/* Standard MII PHY data */ +#define KS_P1MBCR 0xE4 +#define P1MBCR_FORCE_FDX (1 << 8) + +#define KS_P1MBSR 0xE6 +#define P1MBSR_AN_COMPLETE (1 << 5) +#define P1MBSR_AN_CAPABLE (1 << 3) +#define P1MBSR_LINK_UP (1 << 2) + +#define KS_PHY1ILR 0xE8 +#define KS_PHY1IHR 0xEA +#define KS_P1ANAR 0xEC +#define KS_P1ANLPR 0xEE + +#define KS_P1SCLMD 0xF4 +#define P1SCLMD_LEDOFF (1 << 15) +#define P1SCLMD_TXIDS (1 << 14) +#define P1SCLMD_RESTARTAN (1 << 13) +#define P1SCLMD_DISAUTOMDIX (1 << 10) +#define P1SCLMD_FORCEMDIX (1 << 9) +#define P1SCLMD_AUTONEGEN (1 << 7) +#define P1SCLMD_FORCE100 (1 << 6) +#define P1SCLMD_FORCEFDX (1 << 5) +#define P1SCLMD_ADV_FLOW (1 << 4) +#define P1SCLMD_ADV_100BT_FDX (1 << 3) +#define P1SCLMD_ADV_100BT_HDX (1 << 2) +#define P1SCLMD_ADV_10BT_FDX (1 << 1) +#define P1SCLMD_ADV_10BT_HDX (1 << 0) + +#define KS_P1CR 0xF6 +#define P1CR_HP_MDIX (1 << 15) +#define P1CR_REV_POL (1 << 13) +#define P1CR_OP_100M (1 << 10) +#define P1CR_OP_FDX (1 << 9) +#define P1CR_OP_MDI (1 << 7) +#define P1CR_AN_DONE (1 << 6) +#define P1CR_LINK_GOOD (1 << 5) +#define P1CR_PNTR_FLOW (1 << 4) +#define P1CR_PNTR_100BT_FDX (1 << 3) +#define P1CR_PNTR_100BT_HDX (1 << 2) +#define P1CR_PNTR_10BT_FDX (1 << 1) +#define P1CR_PNTR_10BT_HDX (1 << 0) + +/* TX Frame control */ + +#define TXFR_TXIC (1 << 15) +#define TXFR_TXFID_MASK (0x3f << 0) +#define TXFR_TXFID_SHIFT (0) + +#define KS_P1SR 0xF8 +#define P1SR_HP_MDIX (1 << 15) +#define P1SR_REV_POL (1 << 13) +#define P1SR_OP_100M (1 << 10) +#define P1SR_OP_FDX (1 << 9) +#define P1SR_OP_MDI (1 << 7) +#define P1SR_AN_DONE (1 << 6) +#define P1SR_LINK_GOOD (1 << 5) +#define P1SR_PNTR_FLOW (1 << 4) +#define P1SR_PNTR_100BT_FDX (1 << 3) +#define P1SR_PNTR_100BT_HDX (1 << 2) +#define P1SR_PNTR_10BT_FDX (1 << 1) +#define P1SR_PNTR_10BT_HDX (1 << 0) + +#define ENUM_BUS_NONE 0 +#define ENUM_BUS_8BIT 1 +#define ENUM_BUS_16BIT 2 +#define ENUM_BUS_32BIT 3 + +#define MAX_MCAST_LST 32 +#define HW_MCAST_SIZE 8 + +/** + * union ks_tx_hdr - tx header data + * @txb: The header as bytes + * @txw: The header as 16bit, little-endian words + * + * A dual representation of the tx header data to allow + * access to individual bytes, and to allow 16bit accesses + * with 16bit alignment. + */ +union ks_tx_hdr { + u8 txb[4]; + __le16 txw[2]; +}; + +/** + * struct ks_net - KS8851 driver private data + * @net_device : The network device we're bound to + * @hw_addr : start address of data register. + * @hw_addr_cmd : start address of command register. + * @txh : temporaly buffer to save status/length. + * @lock : Lock to ensure that the device is not accessed when busy. + * @pdev : Pointer to platform device. + * @mii : The MII state information for the mii calls. + * @frame_head_info : frame header information for multi-pkt rx. + * @statelock : Lock on this structure for tx list. + * @msg_enable : The message flags controlling driver output (see ethtool). + * @frame_cnt : number of frames received. + * @bus_width : i/o bus width. + * @rc_rxqcr : Cached copy of KS_RXQCR. + * @rc_txcr : Cached copy of KS_TXCR. + * @rc_ier : Cached copy of KS_IER. + * @sharedbus : Multipex(addr and data bus) mode indicator. + * @cmd_reg_cache : command register cached. + * @cmd_reg_cache_int : command register cached. Used in the irq handler. + * @promiscuous : promiscuous mode indicator. + * @all_mcast : mutlicast indicator. + * @mcast_lst_size : size of multicast list. + * @mcast_lst : multicast list. + * @mcast_bits : multicast enabed. + * @mac_addr : MAC address assigned to this device. + * @fid : frame id. + * @extra_byte : number of extra byte prepended rx pkt. + * @enabled : indicator this device works. + * + * The @lock ensures that the chip is protected when certain operations are + * in progress. When the read or write packet transfer is in progress, most + * of the chip registers are not accessible until the transfer is finished and + * the DMA has been de-asserted. + * + * The @statelock is used to protect information in the structure which may + * need to be accessed via several sources, such as the network driver layer + * or one of the work queues. + * + */ + +/* Receive multiplex framer header info */ +struct type_frame_head { + u16 sts; /* Frame status */ + u16 len; /* Byte count */ +}; + +struct ks_net { + struct net_device *netdev; + void __iomem *hw_addr; + void __iomem *hw_addr_cmd; + union ks_tx_hdr txh ____cacheline_aligned; + struct mutex lock; /* spinlock to be interrupt safe */ + struct platform_device *pdev; + struct mii_if_info mii; + struct type_frame_head *frame_head_info; + spinlock_t statelock; + u32 msg_enable; + u32 frame_cnt; + int bus_width; + + u16 rc_rxqcr; + u16 rc_txcr; + u16 rc_ier; + u16 sharedbus; + u16 cmd_reg_cache; + u16 cmd_reg_cache_int; + u16 promiscuous; + u16 all_mcast; + u16 mcast_lst_size; + u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN]; + u8 mcast_bits[HW_MCAST_SIZE]; + u8 mac_addr[6]; + u8 fid; + u8 extra_byte; + u8 enabled; +}; + +static int msg_enable; + +#define BE3 0x8000 /* Byte Enable 3 */ +#define BE2 0x4000 /* Byte Enable 2 */ +#define BE1 0x2000 /* Byte Enable 1 */ +#define BE0 0x1000 /* Byte Enable 0 */ + +/* register read/write calls. + * + * All these calls issue transactions to access the chip's registers. They + * all require that the necessary lock is held to prevent accesses when the + * chip is busy transferring packet data (RX/TX FIFO accesses). + */ + +/** + * ks_rdreg8 - read 8 bit register from device + * @ks : The chip information + * @offset: The register address + * + * Read a 8bit register from the chip, returning the result + */ +static u8 ks_rdreg8(struct ks_net *ks, int offset) +{ + u16 data; + u8 shift_bit = offset & 0x03; + u8 shift_data = (offset & 1) << 3; + ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit); + iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); + data = ioread16(ks->hw_addr); + return (u8)(data >> shift_data); +} + +/** + * ks_rdreg16 - read 16 bit register from device + * @ks : The chip information + * @offset: The register address + * + * Read a 16bit register from the chip, returning the result + */ + +static u16 ks_rdreg16(struct ks_net *ks, int offset) +{ + ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02)); + iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); + return ioread16(ks->hw_addr); +} + +/** + * ks_wrreg8 - write 8bit register value to chip + * @ks: The chip information + * @offset: The register address + * @value: The value to write + * + */ +static void ks_wrreg8(struct ks_net *ks, int offset, u8 value) +{ + u8 shift_bit = (offset & 0x03); + u16 value_write = (u16)(value << ((offset & 1) << 3)); + ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit); + iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); + iowrite16(value_write, ks->hw_addr); +} + +/** + * ks_wrreg16 - write 16bit register value to chip + * @ks: The chip information + * @offset: The register address + * @value: The value to write + * + */ + +static void ks_wrreg16(struct ks_net *ks, int offset, u16 value) +{ + ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02)); + iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); + iowrite16(value, ks->hw_addr); +} + +/** + * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled. + * @ks: The chip state + * @wptr: buffer address to save data + * @len: length in byte to read + * + */ +static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len) +{ + len >>= 1; + while (len--) + *wptr++ = (u16)ioread16(ks->hw_addr); +} + +/** + * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled. + * @ks: The chip information + * @wptr: buffer address + * @len: length in byte to write + * + */ +static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len) +{ + len >>= 1; + while (len--) + iowrite16(*wptr++, ks->hw_addr); +} + +static void ks_disable_int(struct ks_net *ks) +{ + ks_wrreg16(ks, KS_IER, 0x0000); +} /* ks_disable_int */ + +static void ks_enable_int(struct ks_net *ks) +{ + ks_wrreg16(ks, KS_IER, ks->rc_ier); +} /* ks_enable_int */ + +/** + * ks_tx_fifo_space - return the available hardware buffer size. + * @ks: The chip information + * + */ +static inline u16 ks_tx_fifo_space(struct ks_net *ks) +{ + return ks_rdreg16(ks, KS_TXMIR) & 0x1fff; +} + +/** + * ks_save_cmd_reg - save the command register from the cache. + * @ks: The chip information + * + */ +static inline void ks_save_cmd_reg(struct ks_net *ks) +{ + /*ks8851 MLL has a bug to read back the command register. + * So rely on software to save the content of command register. + */ + ks->cmd_reg_cache_int = ks->cmd_reg_cache; +} + +/** + * ks_restore_cmd_reg - restore the command register from the cache and + * write to hardware register. + * @ks: The chip information + * + */ +static inline void ks_restore_cmd_reg(struct ks_net *ks) +{ + ks->cmd_reg_cache = ks->cmd_reg_cache_int; + iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd); +} + +/** + * ks_set_powermode - set power mode of the device + * @ks: The chip information + * @pwrmode: The power mode value to write to KS_PMECR. + * + * Change the power mode of the chip. + */ +static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode) +{ + unsigned pmecr; + + netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode); + + ks_rdreg16(ks, KS_GRR); + pmecr = ks_rdreg16(ks, KS_PMECR); + pmecr &= ~PMECR_PM_MASK; + pmecr |= pwrmode; + + ks_wrreg16(ks, KS_PMECR, pmecr); +} + +/** + * ks_read_config - read chip configuration of bus width. + * @ks: The chip information + * + */ +static void ks_read_config(struct ks_net *ks) +{ + u16 reg_data = 0; + + /* Regardless of bus width, 8 bit read should always work.*/ + reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF; + reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8; + + /* addr/data bus are multiplexed */ + ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED; + + /* There are garbage data when reading data from QMU, + depending on bus-width. + */ + + if (reg_data & CCR_8BIT) { + ks->bus_width = ENUM_BUS_8BIT; + ks->extra_byte = 1; + } else if (reg_data & CCR_16BIT) { + ks->bus_width = ENUM_BUS_16BIT; + ks->extra_byte = 2; + } else { + ks->bus_width = ENUM_BUS_32BIT; + ks->extra_byte = 4; + } +} + +/** + * ks_soft_reset - issue one of the soft reset to the device + * @ks: The device state. + * @op: The bit(s) to set in the GRR + * + * Issue the relevant soft-reset command to the device's GRR register + * specified by @op. + * + * Note, the delays are in there as a caution to ensure that the reset + * has time to take effect and then complete. Since the datasheet does + * not currently specify the exact sequence, we have chosen something + * that seems to work with our device. + */ +static void ks_soft_reset(struct ks_net *ks, unsigned op) +{ + /* Disable interrupt first */ + ks_wrreg16(ks, KS_IER, 0x0000); + ks_wrreg16(ks, KS_GRR, op); + mdelay(10); /* wait a short time to effect reset */ + ks_wrreg16(ks, KS_GRR, 0); + mdelay(1); /* wait for condition to clear */ +} + + +static void ks_enable_qmu(struct ks_net *ks) +{ + u16 w; + + w = ks_rdreg16(ks, KS_TXCR); + /* Enables QMU Transmit (TXCR). */ + ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE); + + /* + * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame + * Enable + */ + + w = ks_rdreg16(ks, KS_RXQCR); + ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE); + + /* Enables QMU Receive (RXCR1). */ + w = ks_rdreg16(ks, KS_RXCR1); + ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE); + ks->enabled = true; +} /* ks_enable_qmu */ + +static void ks_disable_qmu(struct ks_net *ks) +{ + u16 w; + + w = ks_rdreg16(ks, KS_TXCR); + + /* Disables QMU Transmit (TXCR). */ + w &= ~TXCR_TXE; + ks_wrreg16(ks, KS_TXCR, w); + + /* Disables QMU Receive (RXCR1). */ + w = ks_rdreg16(ks, KS_RXCR1); + w &= ~RXCR1_RXE ; + ks_wrreg16(ks, KS_RXCR1, w); + + ks->enabled = false; + +} /* ks_disable_qmu */ + +/** + * ks_read_qmu - read 1 pkt data from the QMU. + * @ks: The chip information + * @buf: buffer address to save 1 pkt + * @len: Pkt length + * Here is the sequence to read 1 pkt: + * 1. set sudo DMA mode + * 2. read prepend data + * 3. read pkt data + * 4. reset sudo DMA Mode + */ +static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len) +{ + u32 r = ks->extra_byte & 0x1 ; + u32 w = ks->extra_byte - r; + + /* 1. set sudo DMA mode */ + ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI); + ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff); + + /* 2. read prepend data */ + /** + * read 4 + extra bytes and discard them. + * extra bytes for dummy, 2 for status, 2 for len + */ + + /* use likely(r) for 8 bit access for performance */ + if (unlikely(r)) + ioread8(ks->hw_addr); + ks_inblk(ks, buf, w + 2 + 2); + + /* 3. read pkt data */ + ks_inblk(ks, buf, ALIGN(len, 4)); + + /* 4. reset sudo DMA Mode */ + ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr); +} + +/** + * ks_rcv - read multiple pkts data from the QMU. + * @ks: The chip information + * @netdev: The network device being opened. + * + * Read all of header information before reading pkt content. + * It is not allowed only port of pkts in QMU after issuing + * interrupt ack. + */ +static void ks_rcv(struct ks_net *ks, struct net_device *netdev) +{ + u32 i; + struct type_frame_head *frame_hdr = ks->frame_head_info; + struct sk_buff *skb; + + ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8; + + /* read all header information */ + for (i = 0; i < ks->frame_cnt; i++) { + /* Checking Received packet status */ + frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR); + /* Get packet len from hardware */ + frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR); + frame_hdr++; + } + + frame_hdr = ks->frame_head_info; + while (ks->frame_cnt--) { + if (unlikely(!(frame_hdr->sts & RXFSHR_RXFV) || + frame_hdr->len >= RX_BUF_SIZE || + frame_hdr->len <= 0)) { + + /* discard an invalid packet */ + ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF)); + netdev->stats.rx_dropped++; + if (!(frame_hdr->sts & RXFSHR_RXFV)) + netdev->stats.rx_frame_errors++; + else + netdev->stats.rx_length_errors++; + frame_hdr++; + continue; + } + + skb = netdev_alloc_skb(netdev, frame_hdr->len + 16); + if (likely(skb)) { + skb_reserve(skb, 2); + /* read data block including CRC 4 bytes */ + ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len); + skb_put(skb, frame_hdr->len - 4); + skb->protocol = eth_type_trans(skb, netdev); + netif_rx(skb); + /* exclude CRC size */ + netdev->stats.rx_bytes += frame_hdr->len - 4; + netdev->stats.rx_packets++; + } else { + ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF)); + netdev->stats.rx_dropped++; + } + frame_hdr++; + } +} + +/** + * ks_update_link_status - link status update. + * @netdev: The network device being opened. + * @ks: The chip information + * + */ + +static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks) +{ + /* check the status of the link */ + u32 link_up_status; + if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) { + netif_carrier_on(netdev); + link_up_status = true; + } else { + netif_carrier_off(netdev); + link_up_status = false; + } + netif_dbg(ks, link, ks->netdev, + "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN"); +} + +/** + * ks_irq - device interrupt handler + * @irq: Interrupt number passed from the IRQ handler. + * @pw: The private word passed to register_irq(), our struct ks_net. + * + * This is the handler invoked to find out what happened + * + * Read the interrupt status, work out what needs to be done and then clear + * any of the interrupts that are not needed. + */ + +static irqreturn_t ks_irq(int irq, void *pw) +{ + struct net_device *netdev = pw; + struct ks_net *ks = netdev_priv(netdev); + u16 status; + + /*this should be the first in IRQ handler */ + ks_save_cmd_reg(ks); + + status = ks_rdreg16(ks, KS_ISR); + if (unlikely(!status)) { + ks_restore_cmd_reg(ks); + return IRQ_NONE; + } + + ks_wrreg16(ks, KS_ISR, status); + + if (likely(status & IRQ_RXI)) + ks_rcv(ks, netdev); + + if (unlikely(status & IRQ_LCI)) + ks_update_link_status(netdev, ks); + + if (unlikely(status & IRQ_TXI)) + netif_wake_queue(netdev); + + if (unlikely(status & IRQ_LDI)) { + + u16 pmecr = ks_rdreg16(ks, KS_PMECR); + pmecr &= ~PMECR_WKEVT_MASK; + ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK); + } + + if (unlikely(status & IRQ_RXOI)) + ks->netdev->stats.rx_over_errors++; + /* this should be the last in IRQ handler*/ + ks_restore_cmd_reg(ks); + return IRQ_HANDLED; +} + + +/** + * ks_net_open - open network device + * @netdev: The network device being opened. + * + * Called when the network device is marked active, such as a user executing + * 'ifconfig up' on the device. + */ +static int ks_net_open(struct net_device *netdev) +{ + struct ks_net *ks = netdev_priv(netdev); + int err; + +#define KS_INT_FLAGS IRQF_TRIGGER_LOW + /* lock the card, even if we may not actually do anything + * else at the moment. + */ + + netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__); + + /* reset the HW */ + err = request_irq(netdev->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev); + + if (err) { + pr_err("Failed to request IRQ: %d: %d\n", netdev->irq, err); + return err; + } + + /* wake up powermode to normal mode */ + ks_set_powermode(ks, PMECR_PM_NORMAL); + mdelay(1); /* wait for normal mode to take effect */ + + ks_wrreg16(ks, KS_ISR, 0xffff); + ks_enable_int(ks); + ks_enable_qmu(ks); + netif_start_queue(ks->netdev); + + netif_dbg(ks, ifup, ks->netdev, "network device up\n"); + + return 0; +} + +/** + * ks_net_stop - close network device + * @netdev: The device being closed. + * + * Called to close down a network device which has been active. Cancell any + * work, shutdown the RX and TX process and then place the chip into a low + * power state whilst it is not being used. + */ +static int ks_net_stop(struct net_device *netdev) +{ + struct ks_net *ks = netdev_priv(netdev); + + netif_info(ks, ifdown, netdev, "shutting down\n"); + + netif_stop_queue(netdev); + + mutex_lock(&ks->lock); + + /* turn off the IRQs and ack any outstanding */ + ks_wrreg16(ks, KS_IER, 0x0000); + ks_wrreg16(ks, KS_ISR, 0xffff); + + /* shutdown RX/TX QMU */ + ks_disable_qmu(ks); + + /* set powermode to soft power down to save power */ + ks_set_powermode(ks, PMECR_PM_SOFTDOWN); + free_irq(netdev->irq, netdev); + mutex_unlock(&ks->lock); + return 0; +} + + +/** + * ks_write_qmu - write 1 pkt data to the QMU. + * @ks: The chip information + * @pdata: buffer address to save 1 pkt + * @len: Pkt length in byte + * Here is the sequence to write 1 pkt: + * 1. set sudo DMA mode + * 2. write status/length + * 3. write pkt data + * 4. reset sudo DMA Mode + * 5. reset sudo DMA mode + * 6. Wait until pkt is out + */ +static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len) +{ + /* start header at txb[0] to align txw entries */ + ks->txh.txw[0] = 0; + ks->txh.txw[1] = cpu_to_le16(len); + + /* 1. set sudo-DMA mode */ + ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff); + /* 2. write status/lenth info */ + ks_outblk(ks, ks->txh.txw, 4); + /* 3. write pkt data */ + ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4)); + /* 4. reset sudo-DMA mode */ + ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr); + /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */ + ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE); + /* 6. wait until TXQCR_METFE is auto-cleared */ + while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE) + ; +} + +/** + * ks_start_xmit - transmit packet + * @skb : The buffer to transmit + * @netdev : The device used to transmit the packet. + * + * Called by the network layer to transmit the @skb. + * spin_lock_irqsave is required because tx and rx should be mutual exclusive. + * So while tx is in-progress, prevent IRQ interrupt from happenning. + */ +static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + int retv = NETDEV_TX_OK; + struct ks_net *ks = netdev_priv(netdev); + + disable_irq(netdev->irq); + ks_disable_int(ks); + spin_lock(&ks->statelock); + + /* Extra space are required: + * 4 byte for alignment, 4 for status/length, 4 for CRC + */ + + if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) { + ks_write_qmu(ks, skb->data, skb->len); + /* add tx statistics */ + netdev->stats.tx_bytes += skb->len; + netdev->stats.tx_packets++; + dev_kfree_skb(skb); + } else + retv = NETDEV_TX_BUSY; + spin_unlock(&ks->statelock); + ks_enable_int(ks); + enable_irq(netdev->irq); + return retv; +} + +/** + * ks_start_rx - ready to serve pkts + * @ks : The chip information + * + */ +static void ks_start_rx(struct ks_net *ks) +{ + u16 cntl; + + /* Enables QMU Receive (RXCR1). */ + cntl = ks_rdreg16(ks, KS_RXCR1); + cntl |= RXCR1_RXE ; + ks_wrreg16(ks, KS_RXCR1, cntl); +} /* ks_start_rx */ + +/** + * ks_stop_rx - stop to serve pkts + * @ks : The chip information + * + */ +static void ks_stop_rx(struct ks_net *ks) +{ + u16 cntl; + + /* Disables QMU Receive (RXCR1). */ + cntl = ks_rdreg16(ks, KS_RXCR1); + cntl &= ~RXCR1_RXE ; + ks_wrreg16(ks, KS_RXCR1, cntl); + +} /* ks_stop_rx */ + +static unsigned long const ethernet_polynomial = 0x04c11db7U; + +static unsigned long ether_gen_crc(int length, u8 *data) +{ + long crc = -1; + while (--length >= 0) { + u8 current_octet = *data++; + int bit; + + for (bit = 0; bit < 8; bit++, current_octet >>= 1) { + crc = (crc << 1) ^ + ((crc < 0) ^ (current_octet & 1) ? + ethernet_polynomial : 0); + } + } + return (unsigned long)crc; +} /* ether_gen_crc */ + +/** +* ks_set_grpaddr - set multicast information +* @ks : The chip information +*/ + +static void ks_set_grpaddr(struct ks_net *ks) +{ + u8 i; + u32 index, position, value; + + memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE); + + for (i = 0; i < ks->mcast_lst_size; i++) { + position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f; + index = position >> 3; + value = 1 << (position & 7); + ks->mcast_bits[index] |= (u8)value; + } + + for (i = 0; i < HW_MCAST_SIZE; i++) { + if (i & 1) { + ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1), + (ks->mcast_bits[i] << 8) | + ks->mcast_bits[i - 1]); + } + } +} /* ks_set_grpaddr */ + +/** +* ks_clear_mcast - clear multicast information +* +* @ks : The chip information +* This routine removes all mcast addresses set in the hardware. +*/ + +static void ks_clear_mcast(struct ks_net *ks) +{ + u16 i, mcast_size; + for (i = 0; i < HW_MCAST_SIZE; i++) + ks->mcast_bits[i] = 0; + + mcast_size = HW_MCAST_SIZE >> 2; + for (i = 0; i < mcast_size; i++) + ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0); +} + +static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode) +{ + u16 cntl; + ks->promiscuous = promiscuous_mode; + ks_stop_rx(ks); /* Stop receiving for reconfiguration */ + cntl = ks_rdreg16(ks, KS_RXCR1); + + cntl &= ~RXCR1_FILTER_MASK; + if (promiscuous_mode) + /* Enable Promiscuous mode */ + cntl |= RXCR1_RXAE | RXCR1_RXINVF; + else + /* Disable Promiscuous mode (default normal mode) */ + cntl |= RXCR1_RXPAFMA; + + ks_wrreg16(ks, KS_RXCR1, cntl); + + if (ks->enabled) + ks_start_rx(ks); + +} /* ks_set_promis */ + +static void ks_set_mcast(struct ks_net *ks, u16 mcast) +{ + u16 cntl; + + ks->all_mcast = mcast; + ks_stop_rx(ks); /* Stop receiving for reconfiguration */ + cntl = ks_rdreg16(ks, KS_RXCR1); + cntl &= ~RXCR1_FILTER_MASK; + if (mcast) + /* Enable "Perfect with Multicast address passed mode" */ + cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA); + else + /** + * Disable "Perfect with Multicast address passed + * mode" (normal mode). + */ + cntl |= RXCR1_RXPAFMA; + + ks_wrreg16(ks, KS_RXCR1, cntl); + + if (ks->enabled) + ks_start_rx(ks); +} /* ks_set_mcast */ + +static void ks_set_rx_mode(struct net_device *netdev) +{ + struct ks_net *ks = netdev_priv(netdev); + struct netdev_hw_addr *ha; + + /* Turn on/off promiscuous mode. */ + if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC) + ks_set_promis(ks, + (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC)); + /* Turn on/off all mcast mode. */ + else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI) + ks_set_mcast(ks, + (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)); + else + ks_set_promis(ks, false); + + if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) { + if (netdev_mc_count(netdev) <= MAX_MCAST_LST) { + int i = 0; + + netdev_for_each_mc_addr(ha, netdev) { + if (i >= MAX_MCAST_LST) + break; + memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN); + } + ks->mcast_lst_size = (u8)i; + ks_set_grpaddr(ks); + } else { + /** + * List too big to support so + * turn on all mcast mode. + */ + ks->mcast_lst_size = MAX_MCAST_LST; + ks_set_mcast(ks, true); + } + } else { + ks->mcast_lst_size = 0; + ks_clear_mcast(ks); + } +} /* ks_set_rx_mode */ + +static void ks_set_mac(struct ks_net *ks, u8 *data) +{ + u16 *pw = (u16 *)data; + u16 w, u; + + ks_stop_rx(ks); /* Stop receiving for reconfiguration */ + + u = *pw++; + w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF); + ks_wrreg16(ks, KS_MARH, w); + + u = *pw++; + w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF); + ks_wrreg16(ks, KS_MARM, w); + + u = *pw; + w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF); + ks_wrreg16(ks, KS_MARL, w); + + memcpy(ks->mac_addr, data, ETH_ALEN); + + if (ks->enabled) + ks_start_rx(ks); +} + +static int ks_set_mac_address(struct net_device *netdev, void *paddr) +{ + struct ks_net *ks = netdev_priv(netdev); + struct sockaddr *addr = paddr; + u8 *da; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + + da = (u8 *)netdev->dev_addr; + + ks_set_mac(ks, da); + return 0; +} + +static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +{ + struct ks_net *ks = netdev_priv(netdev); + + if (!netif_running(netdev)) + return -EINVAL; + + return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL); +} + +static const struct net_device_ops ks_netdev_ops = { + .ndo_open = ks_net_open, + .ndo_stop = ks_net_stop, + .ndo_do_ioctl = ks_net_ioctl, + .ndo_start_xmit = ks_start_xmit, + .ndo_set_mac_address = ks_set_mac_address, + .ndo_set_rx_mode = ks_set_rx_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + +/* ethtool support */ + +static void ks_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *di) +{ + strlcpy(di->driver, DRV_NAME, sizeof(di->driver)); + strlcpy(di->version, "1.00", sizeof(di->version)); + strlcpy(di->bus_info, dev_name(netdev->dev.parent), + sizeof(di->bus_info)); +} + +static u32 ks_get_msglevel(struct net_device *netdev) +{ + struct ks_net *ks = netdev_priv(netdev); + return ks->msg_enable; +} + +static void ks_set_msglevel(struct net_device *netdev, u32 to) +{ + struct ks_net *ks = netdev_priv(netdev); + ks->msg_enable = to; +} + +static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +{ + struct ks_net *ks = netdev_priv(netdev); + return mii_ethtool_gset(&ks->mii, cmd); +} + +static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +{ + struct ks_net *ks = netdev_priv(netdev); + return mii_ethtool_sset(&ks->mii, cmd); +} + +static u32 ks_get_link(struct net_device *netdev) +{ + struct ks_net *ks = netdev_priv(netdev); + return mii_link_ok(&ks->mii); +} + +static int ks_nway_reset(struct net_device *netdev) +{ + struct ks_net *ks = netdev_priv(netdev); + return mii_nway_restart(&ks->mii); +} + +static const struct ethtool_ops ks_ethtool_ops = { + .get_drvinfo = ks_get_drvinfo, + .get_msglevel = ks_get_msglevel, + .set_msglevel = ks_set_msglevel, + .get_settings = ks_get_settings, + .set_settings = ks_set_settings, + .get_link = ks_get_link, + .nway_reset = ks_nway_reset, +}; + +/* MII interface controls */ + +/** + * ks_phy_reg - convert MII register into a KS8851 register + * @reg: MII register number. + * + * Return the KS8851 register number for the corresponding MII PHY register + * if possible. Return zero if the MII register has no direct mapping to the + * KS8851 register set. + */ +static int ks_phy_reg(int reg) +{ + switch (reg) { + case MII_BMCR: + return KS_P1MBCR; + case MII_BMSR: + return KS_P1MBSR; + case MII_PHYSID1: + return KS_PHY1ILR; + case MII_PHYSID2: + return KS_PHY1IHR; + case MII_ADVERTISE: + return KS_P1ANAR; + case MII_LPA: + return KS_P1ANLPR; + } + + return 0x0; +} + +/** + * ks_phy_read - MII interface PHY register read. + * @netdev: The network device the PHY is on. + * @phy_addr: Address of PHY (ignored as we only have one) + * @reg: The register to read. + * + * This call reads data from the PHY register specified in @reg. Since the + * device does not support all the MII registers, the non-existent values + * are always returned as zero. + * + * We return zero for unsupported registers as the MII code does not check + * the value returned for any error status, and simply returns it to the + * caller. The mii-tool that the driver was tested with takes any -ve error + * as real PHY capabilities, thus displaying incorrect data to the user. + */ +static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg) +{ + struct ks_net *ks = netdev_priv(netdev); + int ksreg; + int result; + + ksreg = ks_phy_reg(reg); + if (!ksreg) + return 0x0; /* no error return allowed, so use zero */ + + mutex_lock(&ks->lock); + result = ks_rdreg16(ks, ksreg); + mutex_unlock(&ks->lock); + + return result; +} + +static void ks_phy_write(struct net_device *netdev, + int phy, int reg, int value) +{ + struct ks_net *ks = netdev_priv(netdev); + int ksreg; + + ksreg = ks_phy_reg(reg); + if (ksreg) { + mutex_lock(&ks->lock); + ks_wrreg16(ks, ksreg, value); + mutex_unlock(&ks->lock); + } +} + +/** + * ks_read_selftest - read the selftest memory info. + * @ks: The device state + * + * Read and check the TX/RX memory selftest information. + */ +static int ks_read_selftest(struct ks_net *ks) +{ + unsigned both_done = MBIR_TXMBF | MBIR_RXMBF; + int ret = 0; + unsigned rd; + + rd = ks_rdreg16(ks, KS_MBIR); + + if ((rd & both_done) != both_done) { + netdev_warn(ks->netdev, "Memory selftest not finished\n"); + return 0; + } + + if (rd & MBIR_TXMBFA) { + netdev_err(ks->netdev, "TX memory selftest fails\n"); + ret |= 1; + } + + if (rd & MBIR_RXMBFA) { + netdev_err(ks->netdev, "RX memory selftest fails\n"); + ret |= 2; + } + + netdev_info(ks->netdev, "the selftest passes\n"); + return ret; +} + +static void ks_setup(struct ks_net *ks) +{ + u16 w; + + /** + * Configure QMU Transmit + */ + + /* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */ + ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI); + + /* Setup Receive Frame Data Pointer Auto-Increment */ + ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI); + + /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */ + ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK); + + /* Setup RxQ Command Control (RXQCR) */ + ks->rc_rxqcr = RXQCR_CMD_CNTL; + ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); + + /** + * set the force mode to half duplex, default is full duplex + * because if the auto-negotiation fails, most switch uses + * half-duplex. + */ + + w = ks_rdreg16(ks, KS_P1MBCR); + w &= ~P1MBCR_FORCE_FDX; + ks_wrreg16(ks, KS_P1MBCR, w); + + w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP; + ks_wrreg16(ks, KS_TXCR, w); + + w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC; + + if (ks->promiscuous) /* bPromiscuous */ + w |= (RXCR1_RXAE | RXCR1_RXINVF); + else if (ks->all_mcast) /* Multicast address passed mode */ + w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA); + else /* Normal mode */ + w |= RXCR1_RXPAFMA; + + ks_wrreg16(ks, KS_RXCR1, w); +} /*ks_setup */ + + +static void ks_setup_int(struct ks_net *ks) +{ + ks->rc_ier = 0x00; + /* Clear the interrupts status of the hardware. */ + ks_wrreg16(ks, KS_ISR, 0xffff); + + /* Enables the interrupts of the hardware. */ + ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI); +} /* ks_setup_int */ + +static int ks_hw_init(struct ks_net *ks) +{ +#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES) + ks->promiscuous = 0; + ks->all_mcast = 0; + ks->mcast_lst_size = 0; + + ks->frame_head_info = devm_kmalloc(&ks->pdev->dev, MHEADER_SIZE, + GFP_KERNEL); + if (!ks->frame_head_info) + return false; + + ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS); + return true; +} + +#if defined(CONFIG_OF) +static const struct of_device_id ks8851_ml_dt_ids[] = { + { .compatible = "micrel,ks8851-mll" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ks8851_ml_dt_ids); +#endif + +static int ks8851_probe(struct platform_device *pdev) +{ + int err; + struct resource *io_d, *io_c; + struct net_device *netdev; + struct ks_net *ks; + u16 id, data; + const char *mac; + + netdev = alloc_etherdev(sizeof(struct ks_net)); + if (!netdev) + return -ENOMEM; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + ks = netdev_priv(netdev); + ks->netdev = netdev; + + io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ks->hw_addr = devm_ioremap_resource(&pdev->dev, io_d); + if (IS_ERR(ks->hw_addr)) { + err = PTR_ERR(ks->hw_addr); + goto err_free; + } + + io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1); + ks->hw_addr_cmd = devm_ioremap_resource(&pdev->dev, io_c); + if (IS_ERR(ks->hw_addr_cmd)) { + err = PTR_ERR(ks->hw_addr_cmd); + goto err_free; + } + + netdev->irq = platform_get_irq(pdev, 0); + + if ((int)netdev->irq < 0) { + err = netdev->irq; + goto err_free; + } + + ks->pdev = pdev; + + mutex_init(&ks->lock); + spin_lock_init(&ks->statelock); + + netdev->netdev_ops = &ks_netdev_ops; + netdev->ethtool_ops = &ks_ethtool_ops; + + /* setup mii state */ + ks->mii.dev = netdev; + ks->mii.phy_id = 1, + ks->mii.phy_id_mask = 1; + ks->mii.reg_num_mask = 0xf; + ks->mii.mdio_read = ks_phy_read; + ks->mii.mdio_write = ks_phy_write; + + netdev_info(netdev, "message enable is %d\n", msg_enable); + /* set the default message enable */ + ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV | + NETIF_MSG_PROBE | + NETIF_MSG_LINK)); + ks_read_config(ks); + + /* simple check for a valid chip being connected to the bus */ + if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { + netdev_err(netdev, "failed to read device ID\n"); + err = -ENODEV; + goto err_free; + } + + if (ks_read_selftest(ks)) { + netdev_err(netdev, "failed to read device ID\n"); + err = -ENODEV; + goto err_free; + } + + err = register_netdev(netdev); + if (err) + goto err_free; + + platform_set_drvdata(pdev, netdev); + + ks_soft_reset(ks, GRR_GSR); + ks_hw_init(ks); + ks_disable_qmu(ks); + ks_setup(ks); + ks_setup_int(ks); + + data = ks_rdreg16(ks, KS_OBCR); + ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA); + + /* overwriting the default MAC address */ + if (pdev->dev.of_node) { + mac = of_get_mac_address(pdev->dev.of_node); + if (mac) + memcpy(ks->mac_addr, mac, ETH_ALEN); + } else { + struct ks8851_mll_platform_data *pdata; + + pdata = dev_get_platdata(&pdev->dev); + if (!pdata) { + netdev_err(netdev, "No platform data\n"); + err = -ENODEV; + goto err_pdata; + } + memcpy(ks->mac_addr, pdata->mac_addr, ETH_ALEN); + } + if (!is_valid_ether_addr(ks->mac_addr)) { + /* Use random MAC address if none passed */ + eth_random_addr(ks->mac_addr); + netdev_info(netdev, "Using random mac address\n"); + } + netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr); + + memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN); + + ks_set_mac(ks, netdev->dev_addr); + + id = ks_rdreg16(ks, KS_CIDER); + + netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n", + (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7); + return 0; + +err_pdata: + unregister_netdev(netdev); +err_free: + free_netdev(netdev); + return err; +} + +static int ks8851_remove(struct platform_device *pdev) +{ + struct net_device *netdev = platform_get_drvdata(pdev); + + unregister_netdev(netdev); + free_netdev(netdev); + return 0; + +} + +static struct platform_driver ks8851_platform_driver = { + .driver = { + .name = DRV_NAME, + .of_match_table = of_match_ptr(ks8851_ml_dt_ids), + }, + .probe = ks8851_probe, + .remove = ks8851_remove, +}; + +module_platform_driver(ks8851_platform_driver); + +MODULE_DESCRIPTION("KS8851 MLL Network driver"); +MODULE_AUTHOR("David Choi "); +MODULE_LICENSE("GPL"); +module_param_named(message, msg_enable, int, 0); +MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)"); + diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c new file mode 100644 index 000000000..6f332ebdf --- /dev/null +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -0,0 +1,7255 @@ +/** + * drivers/net/ethernet/micrel/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver + * + * Copyright (c) 2009-2010 Micrel, Inc. + * Tristram Ha + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* DMA Registers */ + +#define KS_DMA_TX_CTRL 0x0000 +#define DMA_TX_ENABLE 0x00000001 +#define DMA_TX_CRC_ENABLE 0x00000002 +#define DMA_TX_PAD_ENABLE 0x00000004 +#define DMA_TX_LOOPBACK 0x00000100 +#define DMA_TX_FLOW_ENABLE 0x00000200 +#define DMA_TX_CSUM_IP 0x00010000 +#define DMA_TX_CSUM_TCP 0x00020000 +#define DMA_TX_CSUM_UDP 0x00040000 +#define DMA_TX_BURST_SIZE 0x3F000000 + +#define KS_DMA_RX_CTRL 0x0004 +#define DMA_RX_ENABLE 0x00000001 +#define KS884X_DMA_RX_MULTICAST 0x00000002 +#define DMA_RX_PROMISCUOUS 0x00000004 +#define DMA_RX_ERROR 0x00000008 +#define DMA_RX_UNICAST 0x00000010 +#define DMA_RX_ALL_MULTICAST 0x00000020 +#define DMA_RX_BROADCAST 0x00000040 +#define DMA_RX_FLOW_ENABLE 0x00000200 +#define DMA_RX_CSUM_IP 0x00010000 +#define DMA_RX_CSUM_TCP 0x00020000 +#define DMA_RX_CSUM_UDP 0x00040000 +#define DMA_RX_BURST_SIZE 0x3F000000 + +#define DMA_BURST_SHIFT 24 +#define DMA_BURST_DEFAULT 8 + +#define KS_DMA_TX_START 0x0008 +#define KS_DMA_RX_START 0x000C +#define DMA_START 0x00000001 + +#define KS_DMA_TX_ADDR 0x0010 +#define KS_DMA_RX_ADDR 0x0014 + +#define DMA_ADDR_LIST_MASK 0xFFFFFFFC +#define DMA_ADDR_LIST_SHIFT 2 + +/* MTR0 */ +#define KS884X_MULTICAST_0_OFFSET 0x0020 +#define KS884X_MULTICAST_1_OFFSET 0x0021 +#define KS884X_MULTICAST_2_OFFSET 0x0022 +#define KS884x_MULTICAST_3_OFFSET 0x0023 +/* MTR1 */ +#define KS884X_MULTICAST_4_OFFSET 0x0024 +#define KS884X_MULTICAST_5_OFFSET 0x0025 +#define KS884X_MULTICAST_6_OFFSET 0x0026 +#define KS884X_MULTICAST_7_OFFSET 0x0027 + +/* Interrupt Registers */ + +/* INTEN */ +#define KS884X_INTERRUPTS_ENABLE 0x0028 +/* INTST */ +#define KS884X_INTERRUPTS_STATUS 0x002C + +#define KS884X_INT_RX_STOPPED 0x02000000 +#define KS884X_INT_TX_STOPPED 0x04000000 +#define KS884X_INT_RX_OVERRUN 0x08000000 +#define KS884X_INT_TX_EMPTY 0x10000000 +#define KS884X_INT_RX 0x20000000 +#define KS884X_INT_TX 0x40000000 +#define KS884X_INT_PHY 0x80000000 + +#define KS884X_INT_RX_MASK \ + (KS884X_INT_RX | KS884X_INT_RX_OVERRUN) +#define KS884X_INT_TX_MASK \ + (KS884X_INT_TX | KS884X_INT_TX_EMPTY) +#define KS884X_INT_MASK (KS884X_INT_RX | KS884X_INT_TX | KS884X_INT_PHY) + +/* MAC Additional Station Address */ + +/* MAAL0 */ +#define KS_ADD_ADDR_0_LO 0x0080 +/* MAAH0 */ +#define KS_ADD_ADDR_0_HI 0x0084 +/* MAAL1 */ +#define KS_ADD_ADDR_1_LO 0x0088 +/* MAAH1 */ +#define KS_ADD_ADDR_1_HI 0x008C +/* MAAL2 */ +#define KS_ADD_ADDR_2_LO 0x0090 +/* MAAH2 */ +#define KS_ADD_ADDR_2_HI 0x0094 +/* MAAL3 */ +#define KS_ADD_ADDR_3_LO 0x0098 +/* MAAH3 */ +#define KS_ADD_ADDR_3_HI 0x009C +/* MAAL4 */ +#define KS_ADD_ADDR_4_LO 0x00A0 +/* MAAH4 */ +#define KS_ADD_ADDR_4_HI 0x00A4 +/* MAAL5 */ +#define KS_ADD_ADDR_5_LO 0x00A8 +/* MAAH5 */ +#define KS_ADD_ADDR_5_HI 0x00AC +/* MAAL6 */ +#define KS_ADD_ADDR_6_LO 0x00B0 +/* MAAH6 */ +#define KS_ADD_ADDR_6_HI 0x00B4 +/* MAAL7 */ +#define KS_ADD_ADDR_7_LO 0x00B8 +/* MAAH7 */ +#define KS_ADD_ADDR_7_HI 0x00BC +/* MAAL8 */ +#define KS_ADD_ADDR_8_LO 0x00C0 +/* MAAH8 */ +#define KS_ADD_ADDR_8_HI 0x00C4 +/* MAAL9 */ +#define KS_ADD_ADDR_9_LO 0x00C8 +/* MAAH9 */ +#define KS_ADD_ADDR_9_HI 0x00CC +/* MAAL10 */ +#define KS_ADD_ADDR_A_LO 0x00D0 +/* MAAH10 */ +#define KS_ADD_ADDR_A_HI 0x00D4 +/* MAAL11 */ +#define KS_ADD_ADDR_B_LO 0x00D8 +/* MAAH11 */ +#define KS_ADD_ADDR_B_HI 0x00DC +/* MAAL12 */ +#define KS_ADD_ADDR_C_LO 0x00E0 +/* MAAH12 */ +#define KS_ADD_ADDR_C_HI 0x00E4 +/* MAAL13 */ +#define KS_ADD_ADDR_D_LO 0x00E8 +/* MAAH13 */ +#define KS_ADD_ADDR_D_HI 0x00EC +/* MAAL14 */ +#define KS_ADD_ADDR_E_LO 0x00F0 +/* MAAH14 */ +#define KS_ADD_ADDR_E_HI 0x00F4 +/* MAAL15 */ +#define KS_ADD_ADDR_F_LO 0x00F8 +/* MAAH15 */ +#define KS_ADD_ADDR_F_HI 0x00FC + +#define ADD_ADDR_HI_MASK 0x0000FFFF +#define ADD_ADDR_ENABLE 0x80000000 +#define ADD_ADDR_INCR 8 + +/* Miscellaneous Registers */ + +/* MARL */ +#define KS884X_ADDR_0_OFFSET 0x0200 +#define KS884X_ADDR_1_OFFSET 0x0201 +/* MARM */ +#define KS884X_ADDR_2_OFFSET 0x0202 +#define KS884X_ADDR_3_OFFSET 0x0203 +/* MARH */ +#define KS884X_ADDR_4_OFFSET 0x0204 +#define KS884X_ADDR_5_OFFSET 0x0205 + +/* OBCR */ +#define KS884X_BUS_CTRL_OFFSET 0x0210 + +#define BUS_SPEED_125_MHZ 0x0000 +#define BUS_SPEED_62_5_MHZ 0x0001 +#define BUS_SPEED_41_66_MHZ 0x0002 +#define BUS_SPEED_25_MHZ 0x0003 + +/* EEPCR */ +#define KS884X_EEPROM_CTRL_OFFSET 0x0212 + +#define EEPROM_CHIP_SELECT 0x0001 +#define EEPROM_SERIAL_CLOCK 0x0002 +#define EEPROM_DATA_OUT 0x0004 +#define EEPROM_DATA_IN 0x0008 +#define EEPROM_ACCESS_ENABLE 0x0010 + +/* MBIR */ +#define KS884X_MEM_INFO_OFFSET 0x0214 + +#define RX_MEM_TEST_FAILED 0x0008 +#define RX_MEM_TEST_FINISHED 0x0010 +#define TX_MEM_TEST_FAILED 0x0800 +#define TX_MEM_TEST_FINISHED 0x1000 + +/* GCR */ +#define KS884X_GLOBAL_CTRL_OFFSET 0x0216 +#define GLOBAL_SOFTWARE_RESET 0x0001 + +#define KS8841_POWER_MANAGE_OFFSET 0x0218 + +/* WFCR */ +#define KS8841_WOL_CTRL_OFFSET 0x021A +#define KS8841_WOL_MAGIC_ENABLE 0x0080 +#define KS8841_WOL_FRAME3_ENABLE 0x0008 +#define KS8841_WOL_FRAME2_ENABLE 0x0004 +#define KS8841_WOL_FRAME1_ENABLE 0x0002 +#define KS8841_WOL_FRAME0_ENABLE 0x0001 + +/* WF0 */ +#define KS8841_WOL_FRAME_CRC_OFFSET 0x0220 +#define KS8841_WOL_FRAME_BYTE0_OFFSET 0x0224 +#define KS8841_WOL_FRAME_BYTE2_OFFSET 0x0228 + +/* IACR */ +#define KS884X_IACR_P 0x04A0 +#define KS884X_IACR_OFFSET KS884X_IACR_P + +/* IADR1 */ +#define KS884X_IADR1_P 0x04A2 +#define KS884X_IADR2_P 0x04A4 +#define KS884X_IADR3_P 0x04A6 +#define KS884X_IADR4_P 0x04A8 +#define KS884X_IADR5_P 0x04AA + +#define KS884X_ACC_CTRL_SEL_OFFSET KS884X_IACR_P +#define KS884X_ACC_CTRL_INDEX_OFFSET (KS884X_ACC_CTRL_SEL_OFFSET + 1) + +#define KS884X_ACC_DATA_0_OFFSET KS884X_IADR4_P +#define KS884X_ACC_DATA_1_OFFSET (KS884X_ACC_DATA_0_OFFSET + 1) +#define KS884X_ACC_DATA_2_OFFSET KS884X_IADR5_P +#define KS884X_ACC_DATA_3_OFFSET (KS884X_ACC_DATA_2_OFFSET + 1) +#define KS884X_ACC_DATA_4_OFFSET KS884X_IADR2_P +#define KS884X_ACC_DATA_5_OFFSET (KS884X_ACC_DATA_4_OFFSET + 1) +#define KS884X_ACC_DATA_6_OFFSET KS884X_IADR3_P +#define KS884X_ACC_DATA_7_OFFSET (KS884X_ACC_DATA_6_OFFSET + 1) +#define KS884X_ACC_DATA_8_OFFSET KS884X_IADR1_P + +/* P1MBCR */ +#define KS884X_P1MBCR_P 0x04D0 +#define KS884X_P1MBSR_P 0x04D2 +#define KS884X_PHY1ILR_P 0x04D4 +#define KS884X_PHY1IHR_P 0x04D6 +#define KS884X_P1ANAR_P 0x04D8 +#define KS884X_P1ANLPR_P 0x04DA + +/* P2MBCR */ +#define KS884X_P2MBCR_P 0x04E0 +#define KS884X_P2MBSR_P 0x04E2 +#define KS884X_PHY2ILR_P 0x04E4 +#define KS884X_PHY2IHR_P 0x04E6 +#define KS884X_P2ANAR_P 0x04E8 +#define KS884X_P2ANLPR_P 0x04EA + +#define KS884X_PHY_1_CTRL_OFFSET KS884X_P1MBCR_P +#define PHY_CTRL_INTERVAL (KS884X_P2MBCR_P - KS884X_P1MBCR_P) + +#define KS884X_PHY_CTRL_OFFSET 0x00 + +/* Mode Control Register */ +#define PHY_REG_CTRL 0 + +#define PHY_RESET 0x8000 +#define PHY_LOOPBACK 0x4000 +#define PHY_SPEED_100MBIT 0x2000 +#define PHY_AUTO_NEG_ENABLE 0x1000 +#define PHY_POWER_DOWN 0x0800 +#define PHY_MII_DISABLE 0x0400 +#define PHY_AUTO_NEG_RESTART 0x0200 +#define PHY_FULL_DUPLEX 0x0100 +#define PHY_COLLISION_TEST 0x0080 +#define PHY_HP_MDIX 0x0020 +#define PHY_FORCE_MDIX 0x0010 +#define PHY_AUTO_MDIX_DISABLE 0x0008 +#define PHY_REMOTE_FAULT_DISABLE 0x0004 +#define PHY_TRANSMIT_DISABLE 0x0002 +#define PHY_LED_DISABLE 0x0001 + +#define KS884X_PHY_STATUS_OFFSET 0x02 + +/* Mode Status Register */ +#define PHY_REG_STATUS 1 + +#define PHY_100BT4_CAPABLE 0x8000 +#define PHY_100BTX_FD_CAPABLE 0x4000 +#define PHY_100BTX_CAPABLE 0x2000 +#define PHY_10BT_FD_CAPABLE 0x1000 +#define PHY_10BT_CAPABLE 0x0800 +#define PHY_MII_SUPPRESS_CAPABLE 0x0040 +#define PHY_AUTO_NEG_ACKNOWLEDGE 0x0020 +#define PHY_REMOTE_FAULT 0x0010 +#define PHY_AUTO_NEG_CAPABLE 0x0008 +#define PHY_LINK_STATUS 0x0004 +#define PHY_JABBER_DETECT 0x0002 +#define PHY_EXTENDED_CAPABILITY 0x0001 + +#define KS884X_PHY_ID_1_OFFSET 0x04 +#define KS884X_PHY_ID_2_OFFSET 0x06 + +/* PHY Identifier Registers */ +#define PHY_REG_ID_1 2 +#define PHY_REG_ID_2 3 + +#define KS884X_PHY_AUTO_NEG_OFFSET 0x08 + +/* Auto-Negotiation Advertisement Register */ +#define PHY_REG_AUTO_NEGOTIATION 4 + +#define PHY_AUTO_NEG_NEXT_PAGE 0x8000 +#define PHY_AUTO_NEG_REMOTE_FAULT 0x2000 +/* Not supported. */ +#define PHY_AUTO_NEG_ASYM_PAUSE 0x0800 +#define PHY_AUTO_NEG_SYM_PAUSE 0x0400 +#define PHY_AUTO_NEG_100BT4 0x0200 +#define PHY_AUTO_NEG_100BTX_FD 0x0100 +#define PHY_AUTO_NEG_100BTX 0x0080 +#define PHY_AUTO_NEG_10BT_FD 0x0040 +#define PHY_AUTO_NEG_10BT 0x0020 +#define PHY_AUTO_NEG_SELECTOR 0x001F +#define PHY_AUTO_NEG_802_3 0x0001 + +#define PHY_AUTO_NEG_PAUSE (PHY_AUTO_NEG_SYM_PAUSE | PHY_AUTO_NEG_ASYM_PAUSE) + +#define KS884X_PHY_REMOTE_CAP_OFFSET 0x0A + +/* Auto-Negotiation Link Partner Ability Register */ +#define PHY_REG_REMOTE_CAPABILITY 5 + +#define PHY_REMOTE_NEXT_PAGE 0x8000 +#define PHY_REMOTE_ACKNOWLEDGE 0x4000 +#define PHY_REMOTE_REMOTE_FAULT 0x2000 +#define PHY_REMOTE_SYM_PAUSE 0x0400 +#define PHY_REMOTE_100BTX_FD 0x0100 +#define PHY_REMOTE_100BTX 0x0080 +#define PHY_REMOTE_10BT_FD 0x0040 +#define PHY_REMOTE_10BT 0x0020 + +/* P1VCT */ +#define KS884X_P1VCT_P 0x04F0 +#define KS884X_P1PHYCTRL_P 0x04F2 + +/* P2VCT */ +#define KS884X_P2VCT_P 0x04F4 +#define KS884X_P2PHYCTRL_P 0x04F6 + +#define KS884X_PHY_SPECIAL_OFFSET KS884X_P1VCT_P +#define PHY_SPECIAL_INTERVAL (KS884X_P2VCT_P - KS884X_P1VCT_P) + +#define KS884X_PHY_LINK_MD_OFFSET 0x00 + +#define PHY_START_CABLE_DIAG 0x8000 +#define PHY_CABLE_DIAG_RESULT 0x6000 +#define PHY_CABLE_STAT_NORMAL 0x0000 +#define PHY_CABLE_STAT_OPEN 0x2000 +#define PHY_CABLE_STAT_SHORT 0x4000 +#define PHY_CABLE_STAT_FAILED 0x6000 +#define PHY_CABLE_10M_SHORT 0x1000 +#define PHY_CABLE_FAULT_COUNTER 0x01FF + +#define KS884X_PHY_PHY_CTRL_OFFSET 0x02 + +#define PHY_STAT_REVERSED_POLARITY 0x0020 +#define PHY_STAT_MDIX 0x0010 +#define PHY_FORCE_LINK 0x0008 +#define PHY_POWER_SAVING_DISABLE 0x0004 +#define PHY_REMOTE_LOOPBACK 0x0002 + +/* SIDER */ +#define KS884X_SIDER_P 0x0400 +#define KS884X_CHIP_ID_OFFSET KS884X_SIDER_P +#define KS884X_FAMILY_ID_OFFSET (KS884X_CHIP_ID_OFFSET + 1) + +#define REG_FAMILY_ID 0x88 + +#define REG_CHIP_ID_41 0x8810 +#define REG_CHIP_ID_42 0x8800 + +#define KS884X_CHIP_ID_MASK_41 0xFF10 +#define KS884X_CHIP_ID_MASK 0xFFF0 +#define KS884X_CHIP_ID_SHIFT 4 +#define KS884X_REVISION_MASK 0x000E +#define KS884X_REVISION_SHIFT 1 +#define KS8842_START 0x0001 + +#define CHIP_IP_41_M 0x8810 +#define CHIP_IP_42_M 0x8800 +#define CHIP_IP_61_M 0x8890 +#define CHIP_IP_62_M 0x8880 + +#define CHIP_IP_41_P 0x8850 +#define CHIP_IP_42_P 0x8840 +#define CHIP_IP_61_P 0x88D0 +#define CHIP_IP_62_P 0x88C0 + +/* SGCR1 */ +#define KS8842_SGCR1_P 0x0402 +#define KS8842_SWITCH_CTRL_1_OFFSET KS8842_SGCR1_P + +#define SWITCH_PASS_ALL 0x8000 +#define SWITCH_TX_FLOW_CTRL 0x2000 +#define SWITCH_RX_FLOW_CTRL 0x1000 +#define SWITCH_CHECK_LENGTH 0x0800 +#define SWITCH_AGING_ENABLE 0x0400 +#define SWITCH_FAST_AGING 0x0200 +#define SWITCH_AGGR_BACKOFF 0x0100 +#define SWITCH_PASS_PAUSE 0x0008 +#define SWITCH_LINK_AUTO_AGING 0x0001 + +/* SGCR2 */ +#define KS8842_SGCR2_P 0x0404 +#define KS8842_SWITCH_CTRL_2_OFFSET KS8842_SGCR2_P + +#define SWITCH_VLAN_ENABLE 0x8000 +#define SWITCH_IGMP_SNOOP 0x4000 +#define IPV6_MLD_SNOOP_ENABLE 0x2000 +#define IPV6_MLD_SNOOP_OPTION 0x1000 +#define PRIORITY_SCHEME_SELECT 0x0800 +#define SWITCH_MIRROR_RX_TX 0x0100 +#define UNICAST_VLAN_BOUNDARY 0x0080 +#define MULTICAST_STORM_DISABLE 0x0040 +#define SWITCH_BACK_PRESSURE 0x0020 +#define FAIR_FLOW_CTRL 0x0010 +#define NO_EXC_COLLISION_DROP 0x0008 +#define SWITCH_HUGE_PACKET 0x0004 +#define SWITCH_LEGAL_PACKET 0x0002 +#define SWITCH_BUF_RESERVE 0x0001 + +/* SGCR3 */ +#define KS8842_SGCR3_P 0x0406 +#define KS8842_SWITCH_CTRL_3_OFFSET KS8842_SGCR3_P + +#define BROADCAST_STORM_RATE_LO 0xFF00 +#define SWITCH_REPEATER 0x0080 +#define SWITCH_HALF_DUPLEX 0x0040 +#define SWITCH_FLOW_CTRL 0x0020 +#define SWITCH_10_MBIT 0x0010 +#define SWITCH_REPLACE_NULL_VID 0x0008 +#define BROADCAST_STORM_RATE_HI 0x0007 + +#define BROADCAST_STORM_RATE 0x07FF + +/* SGCR4 */ +#define KS8842_SGCR4_P 0x0408 + +/* SGCR5 */ +#define KS8842_SGCR5_P 0x040A +#define KS8842_SWITCH_CTRL_5_OFFSET KS8842_SGCR5_P + +#define LED_MODE 0x8200 +#define LED_SPEED_DUPLEX_ACT 0x0000 +#define LED_SPEED_DUPLEX_LINK_ACT 0x8000 +#define LED_DUPLEX_10_100 0x0200 + +/* SGCR6 */ +#define KS8842_SGCR6_P 0x0410 +#define KS8842_SWITCH_CTRL_6_OFFSET KS8842_SGCR6_P + +#define KS8842_PRIORITY_MASK 3 +#define KS8842_PRIORITY_SHIFT 2 + +/* SGCR7 */ +#define KS8842_SGCR7_P 0x0412 +#define KS8842_SWITCH_CTRL_7_OFFSET KS8842_SGCR7_P + +#define SWITCH_UNK_DEF_PORT_ENABLE 0x0008 +#define SWITCH_UNK_DEF_PORT_3 0x0004 +#define SWITCH_UNK_DEF_PORT_2 0x0002 +#define SWITCH_UNK_DEF_PORT_1 0x0001 + +/* MACAR1 */ +#define KS8842_MACAR1_P 0x0470 +#define KS8842_MACAR2_P 0x0472 +#define KS8842_MACAR3_P 0x0474 +#define KS8842_MAC_ADDR_1_OFFSET KS8842_MACAR1_P +#define KS8842_MAC_ADDR_0_OFFSET (KS8842_MAC_ADDR_1_OFFSET + 1) +#define KS8842_MAC_ADDR_3_OFFSET KS8842_MACAR2_P +#define KS8842_MAC_ADDR_2_OFFSET (KS8842_MAC_ADDR_3_OFFSET + 1) +#define KS8842_MAC_ADDR_5_OFFSET KS8842_MACAR3_P +#define KS8842_MAC_ADDR_4_OFFSET (KS8842_MAC_ADDR_5_OFFSET + 1) + +/* TOSR1 */ +#define KS8842_TOSR1_P 0x0480 +#define KS8842_TOSR2_P 0x0482 +#define KS8842_TOSR3_P 0x0484 +#define KS8842_TOSR4_P 0x0486 +#define KS8842_TOSR5_P 0x0488 +#define KS8842_TOSR6_P 0x048A +#define KS8842_TOSR7_P 0x0490 +#define KS8842_TOSR8_P 0x0492 +#define KS8842_TOS_1_OFFSET KS8842_TOSR1_P +#define KS8842_TOS_2_OFFSET KS8842_TOSR2_P +#define KS8842_TOS_3_OFFSET KS8842_TOSR3_P +#define KS8842_TOS_4_OFFSET KS8842_TOSR4_P +#define KS8842_TOS_5_OFFSET KS8842_TOSR5_P +#define KS8842_TOS_6_OFFSET KS8842_TOSR6_P + +#define KS8842_TOS_7_OFFSET KS8842_TOSR7_P +#define KS8842_TOS_8_OFFSET KS8842_TOSR8_P + +/* P1CR1 */ +#define KS8842_P1CR1_P 0x0500 +#define KS8842_P1CR2_P 0x0502 +#define KS8842_P1VIDR_P 0x0504 +#define KS8842_P1CR3_P 0x0506 +#define KS8842_P1IRCR_P 0x0508 +#define KS8842_P1ERCR_P 0x050A +#define KS884X_P1SCSLMD_P 0x0510 +#define KS884X_P1CR4_P 0x0512 +#define KS884X_P1SR_P 0x0514 + +/* P2CR1 */ +#define KS8842_P2CR1_P 0x0520 +#define KS8842_P2CR2_P 0x0522 +#define KS8842_P2VIDR_P 0x0524 +#define KS8842_P2CR3_P 0x0526 +#define KS8842_P2IRCR_P 0x0528 +#define KS8842_P2ERCR_P 0x052A +#define KS884X_P2SCSLMD_P 0x0530 +#define KS884X_P2CR4_P 0x0532 +#define KS884X_P2SR_P 0x0534 + +/* P3CR1 */ +#define KS8842_P3CR1_P 0x0540 +#define KS8842_P3CR2_P 0x0542 +#define KS8842_P3VIDR_P 0x0544 +#define KS8842_P3CR3_P 0x0546 +#define KS8842_P3IRCR_P 0x0548 +#define KS8842_P3ERCR_P 0x054A + +#define KS8842_PORT_1_CTRL_1 KS8842_P1CR1_P +#define KS8842_PORT_2_CTRL_1 KS8842_P2CR1_P +#define KS8842_PORT_3_CTRL_1 KS8842_P3CR1_P + +#define PORT_CTRL_ADDR(port, addr) \ + (addr = KS8842_PORT_1_CTRL_1 + (port) * \ + (KS8842_PORT_2_CTRL_1 - KS8842_PORT_1_CTRL_1)) + +#define KS8842_PORT_CTRL_1_OFFSET 0x00 + +#define PORT_BROADCAST_STORM 0x0080 +#define PORT_DIFFSERV_ENABLE 0x0040 +#define PORT_802_1P_ENABLE 0x0020 +#define PORT_BASED_PRIORITY_MASK 0x0018 +#define PORT_BASED_PRIORITY_BASE 0x0003 +#define PORT_BASED_PRIORITY_SHIFT 3 +#define PORT_BASED_PRIORITY_0 0x0000 +#define PORT_BASED_PRIORITY_1 0x0008 +#define PORT_BASED_PRIORITY_2 0x0010 +#define PORT_BASED_PRIORITY_3 0x0018 +#define PORT_INSERT_TAG 0x0004 +#define PORT_REMOVE_TAG 0x0002 +#define PORT_PRIO_QUEUE_ENABLE 0x0001 + +#define KS8842_PORT_CTRL_2_OFFSET 0x02 + +#define PORT_INGRESS_VLAN_FILTER 0x4000 +#define PORT_DISCARD_NON_VID 0x2000 +#define PORT_FORCE_FLOW_CTRL 0x1000 +#define PORT_BACK_PRESSURE 0x0800 +#define PORT_TX_ENABLE 0x0400 +#define PORT_RX_ENABLE 0x0200 +#define PORT_LEARN_DISABLE 0x0100 +#define PORT_MIRROR_SNIFFER 0x0080 +#define PORT_MIRROR_RX 0x0040 +#define PORT_MIRROR_TX 0x0020 +#define PORT_USER_PRIORITY_CEILING 0x0008 +#define PORT_VLAN_MEMBERSHIP 0x0007 + +#define KS8842_PORT_CTRL_VID_OFFSET 0x04 + +#define PORT_DEFAULT_VID 0x0001 + +#define KS8842_PORT_CTRL_3_OFFSET 0x06 + +#define PORT_INGRESS_LIMIT_MODE 0x000C +#define PORT_INGRESS_ALL 0x0000 +#define PORT_INGRESS_UNICAST 0x0004 +#define PORT_INGRESS_MULTICAST 0x0008 +#define PORT_INGRESS_BROADCAST 0x000C +#define PORT_COUNT_IFG 0x0002 +#define PORT_COUNT_PREAMBLE 0x0001 + +#define KS8842_PORT_IN_RATE_OFFSET 0x08 +#define KS8842_PORT_OUT_RATE_OFFSET 0x0A + +#define PORT_PRIORITY_RATE 0x0F +#define PORT_PRIORITY_RATE_SHIFT 4 + +#define KS884X_PORT_LINK_MD 0x10 + +#define PORT_CABLE_10M_SHORT 0x8000 +#define PORT_CABLE_DIAG_RESULT 0x6000 +#define PORT_CABLE_STAT_NORMAL 0x0000 +#define PORT_CABLE_STAT_OPEN 0x2000 +#define PORT_CABLE_STAT_SHORT 0x4000 +#define PORT_CABLE_STAT_FAILED 0x6000 +#define PORT_START_CABLE_DIAG 0x1000 +#define PORT_FORCE_LINK 0x0800 +#define PORT_POWER_SAVING_DISABLE 0x0400 +#define PORT_PHY_REMOTE_LOOPBACK 0x0200 +#define PORT_CABLE_FAULT_COUNTER 0x01FF + +#define KS884X_PORT_CTRL_4_OFFSET 0x12 + +#define PORT_LED_OFF 0x8000 +#define PORT_TX_DISABLE 0x4000 +#define PORT_AUTO_NEG_RESTART 0x2000 +#define PORT_REMOTE_FAULT_DISABLE 0x1000 +#define PORT_POWER_DOWN 0x0800 +#define PORT_AUTO_MDIX_DISABLE 0x0400 +#define PORT_FORCE_MDIX 0x0200 +#define PORT_LOOPBACK 0x0100 +#define PORT_AUTO_NEG_ENABLE 0x0080 +#define PORT_FORCE_100_MBIT 0x0040 +#define PORT_FORCE_FULL_DUPLEX 0x0020 +#define PORT_AUTO_NEG_SYM_PAUSE 0x0010 +#define PORT_AUTO_NEG_100BTX_FD 0x0008 +#define PORT_AUTO_NEG_100BTX 0x0004 +#define PORT_AUTO_NEG_10BT_FD 0x0002 +#define PORT_AUTO_NEG_10BT 0x0001 + +#define KS884X_PORT_STATUS_OFFSET 0x14 + +#define PORT_HP_MDIX 0x8000 +#define PORT_REVERSED_POLARITY 0x2000 +#define PORT_RX_FLOW_CTRL 0x0800 +#define PORT_TX_FLOW_CTRL 0x1000 +#define PORT_STATUS_SPEED_100MBIT 0x0400 +#define PORT_STATUS_FULL_DUPLEX 0x0200 +#define PORT_REMOTE_FAULT 0x0100 +#define PORT_MDIX_STATUS 0x0080 +#define PORT_AUTO_NEG_COMPLETE 0x0040 +#define PORT_STATUS_LINK_GOOD 0x0020 +#define PORT_REMOTE_SYM_PAUSE 0x0010 +#define PORT_REMOTE_100BTX_FD 0x0008 +#define PORT_REMOTE_100BTX 0x0004 +#define PORT_REMOTE_10BT_FD 0x0002 +#define PORT_REMOTE_10BT 0x0001 + +/* +#define STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF +#define STATIC_MAC_TABLE_FWD_PORTS 00-00070000-00000000 +#define STATIC_MAC_TABLE_VALID 00-00080000-00000000 +#define STATIC_MAC_TABLE_OVERRIDE 00-00100000-00000000 +#define STATIC_MAC_TABLE_USE_FID 00-00200000-00000000 +#define STATIC_MAC_TABLE_FID 00-03C00000-00000000 +*/ + +#define STATIC_MAC_TABLE_ADDR 0x0000FFFF +#define STATIC_MAC_TABLE_FWD_PORTS 0x00070000 +#define STATIC_MAC_TABLE_VALID 0x00080000 +#define STATIC_MAC_TABLE_OVERRIDE 0x00100000 +#define STATIC_MAC_TABLE_USE_FID 0x00200000 +#define STATIC_MAC_TABLE_FID 0x03C00000 + +#define STATIC_MAC_FWD_PORTS_SHIFT 16 +#define STATIC_MAC_FID_SHIFT 22 + +/* +#define VLAN_TABLE_VID 00-00000000-00000FFF +#define VLAN_TABLE_FID 00-00000000-0000F000 +#define VLAN_TABLE_MEMBERSHIP 00-00000000-00070000 +#define VLAN_TABLE_VALID 00-00000000-00080000 +*/ + +#define VLAN_TABLE_VID 0x00000FFF +#define VLAN_TABLE_FID 0x0000F000 +#define VLAN_TABLE_MEMBERSHIP 0x00070000 +#define VLAN_TABLE_VALID 0x00080000 + +#define VLAN_TABLE_FID_SHIFT 12 +#define VLAN_TABLE_MEMBERSHIP_SHIFT 16 + +/* +#define DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF +#define DYNAMIC_MAC_TABLE_FID 00-000F0000-00000000 +#define DYNAMIC_MAC_TABLE_SRC_PORT 00-00300000-00000000 +#define DYNAMIC_MAC_TABLE_TIMESTAMP 00-00C00000-00000000 +#define DYNAMIC_MAC_TABLE_ENTRIES 03-FF000000-00000000 +#define DYNAMIC_MAC_TABLE_MAC_EMPTY 04-00000000-00000000 +#define DYNAMIC_MAC_TABLE_RESERVED 78-00000000-00000000 +#define DYNAMIC_MAC_TABLE_NOT_READY 80-00000000-00000000 +*/ + +#define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF +#define DYNAMIC_MAC_TABLE_FID 0x000F0000 +#define DYNAMIC_MAC_TABLE_SRC_PORT 0x00300000 +#define DYNAMIC_MAC_TABLE_TIMESTAMP 0x00C00000 +#define DYNAMIC_MAC_TABLE_ENTRIES 0xFF000000 + +#define DYNAMIC_MAC_TABLE_ENTRIES_H 0x03 +#define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x04 +#define DYNAMIC_MAC_TABLE_RESERVED 0x78 +#define DYNAMIC_MAC_TABLE_NOT_READY 0x80 + +#define DYNAMIC_MAC_FID_SHIFT 16 +#define DYNAMIC_MAC_SRC_PORT_SHIFT 20 +#define DYNAMIC_MAC_TIMESTAMP_SHIFT 22 +#define DYNAMIC_MAC_ENTRIES_SHIFT 24 +#define DYNAMIC_MAC_ENTRIES_H_SHIFT 8 + +/* +#define MIB_COUNTER_VALUE 00-00000000-3FFFFFFF +#define MIB_COUNTER_VALID 00-00000000-40000000 +#define MIB_COUNTER_OVERFLOW 00-00000000-80000000 +*/ + +#define MIB_COUNTER_VALUE 0x3FFFFFFF +#define MIB_COUNTER_VALID 0x40000000 +#define MIB_COUNTER_OVERFLOW 0x80000000 + +#define MIB_PACKET_DROPPED 0x0000FFFF + +#define KS_MIB_PACKET_DROPPED_TX_0 0x100 +#define KS_MIB_PACKET_DROPPED_TX_1 0x101 +#define KS_MIB_PACKET_DROPPED_TX 0x102 +#define KS_MIB_PACKET_DROPPED_RX_0 0x103 +#define KS_MIB_PACKET_DROPPED_RX_1 0x104 +#define KS_MIB_PACKET_DROPPED_RX 0x105 + +/* Change default LED mode. */ +#define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT + +#define MAC_ADDR_ORDER(i) (ETH_ALEN - 1 - (i)) + +#define MAX_ETHERNET_BODY_SIZE 1500 +#define ETHERNET_HEADER_SIZE (14 + VLAN_HLEN) + +#define MAX_ETHERNET_PACKET_SIZE \ + (MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE) + +#define REGULAR_RX_BUF_SIZE (MAX_ETHERNET_PACKET_SIZE + 4) +#define MAX_RX_BUF_SIZE (1912 + 4) + +#define ADDITIONAL_ENTRIES 16 +#define MAX_MULTICAST_LIST 32 + +#define HW_MULTICAST_SIZE 8 + +#define HW_TO_DEV_PORT(port) (port - 1) + +enum { + media_connected, + media_disconnected +}; + +enum { + OID_COUNTER_UNKOWN, + + OID_COUNTER_FIRST, + + /* total transmit errors */ + OID_COUNTER_XMIT_ERROR, + + /* total receive errors */ + OID_COUNTER_RCV_ERROR, + + OID_COUNTER_LAST +}; + +/* + * Hardware descriptor definitions + */ + +#define DESC_ALIGNMENT 16 +#define BUFFER_ALIGNMENT 8 + +#define NUM_OF_RX_DESC 64 +#define NUM_OF_TX_DESC 64 + +#define KS_DESC_RX_FRAME_LEN 0x000007FF +#define KS_DESC_RX_FRAME_TYPE 0x00008000 +#define KS_DESC_RX_ERROR_CRC 0x00010000 +#define KS_DESC_RX_ERROR_RUNT 0x00020000 +#define KS_DESC_RX_ERROR_TOO_LONG 0x00040000 +#define KS_DESC_RX_ERROR_PHY 0x00080000 +#define KS884X_DESC_RX_PORT_MASK 0x00300000 +#define KS_DESC_RX_MULTICAST 0x01000000 +#define KS_DESC_RX_ERROR 0x02000000 +#define KS_DESC_RX_ERROR_CSUM_UDP 0x04000000 +#define KS_DESC_RX_ERROR_CSUM_TCP 0x08000000 +#define KS_DESC_RX_ERROR_CSUM_IP 0x10000000 +#define KS_DESC_RX_LAST 0x20000000 +#define KS_DESC_RX_FIRST 0x40000000 +#define KS_DESC_RX_ERROR_COND \ + (KS_DESC_RX_ERROR_CRC | \ + KS_DESC_RX_ERROR_RUNT | \ + KS_DESC_RX_ERROR_PHY | \ + KS_DESC_RX_ERROR_TOO_LONG) + +#define KS_DESC_HW_OWNED 0x80000000 + +#define KS_DESC_BUF_SIZE 0x000007FF +#define KS884X_DESC_TX_PORT_MASK 0x00300000 +#define KS_DESC_END_OF_RING 0x02000000 +#define KS_DESC_TX_CSUM_GEN_UDP 0x04000000 +#define KS_DESC_TX_CSUM_GEN_TCP 0x08000000 +#define KS_DESC_TX_CSUM_GEN_IP 0x10000000 +#define KS_DESC_TX_LAST 0x20000000 +#define KS_DESC_TX_FIRST 0x40000000 +#define KS_DESC_TX_INTERRUPT 0x80000000 + +#define KS_DESC_PORT_SHIFT 20 + +#define KS_DESC_RX_MASK (KS_DESC_BUF_SIZE) + +#define KS_DESC_TX_MASK \ + (KS_DESC_TX_INTERRUPT | \ + KS_DESC_TX_FIRST | \ + KS_DESC_TX_LAST | \ + KS_DESC_TX_CSUM_GEN_IP | \ + KS_DESC_TX_CSUM_GEN_TCP | \ + KS_DESC_TX_CSUM_GEN_UDP | \ + KS_DESC_BUF_SIZE) + +struct ksz_desc_rx_stat { +#ifdef __BIG_ENDIAN_BITFIELD + u32 hw_owned:1; + u32 first_desc:1; + u32 last_desc:1; + u32 csum_err_ip:1; + u32 csum_err_tcp:1; + u32 csum_err_udp:1; + u32 error:1; + u32 multicast:1; + u32 src_port:4; + u32 err_phy:1; + u32 err_too_long:1; + u32 err_runt:1; + u32 err_crc:1; + u32 frame_type:1; + u32 reserved1:4; + u32 frame_len:11; +#else + u32 frame_len:11; + u32 reserved1:4; + u32 frame_type:1; + u32 err_crc:1; + u32 err_runt:1; + u32 err_too_long:1; + u32 err_phy:1; + u32 src_port:4; + u32 multicast:1; + u32 error:1; + u32 csum_err_udp:1; + u32 csum_err_tcp:1; + u32 csum_err_ip:1; + u32 last_desc:1; + u32 first_desc:1; + u32 hw_owned:1; +#endif +}; + +struct ksz_desc_tx_stat { +#ifdef __BIG_ENDIAN_BITFIELD + u32 hw_owned:1; + u32 reserved1:31; +#else + u32 reserved1:31; + u32 hw_owned:1; +#endif +}; + +struct ksz_desc_rx_buf { +#ifdef __BIG_ENDIAN_BITFIELD + u32 reserved4:6; + u32 end_of_ring:1; + u32 reserved3:14; + u32 buf_size:11; +#else + u32 buf_size:11; + u32 reserved3:14; + u32 end_of_ring:1; + u32 reserved4:6; +#endif +}; + +struct ksz_desc_tx_buf { +#ifdef __BIG_ENDIAN_BITFIELD + u32 intr:1; + u32 first_seg:1; + u32 last_seg:1; + u32 csum_gen_ip:1; + u32 csum_gen_tcp:1; + u32 csum_gen_udp:1; + u32 end_of_ring:1; + u32 reserved4:1; + u32 dest_port:4; + u32 reserved3:9; + u32 buf_size:11; +#else + u32 buf_size:11; + u32 reserved3:9; + u32 dest_port:4; + u32 reserved4:1; + u32 end_of_ring:1; + u32 csum_gen_udp:1; + u32 csum_gen_tcp:1; + u32 csum_gen_ip:1; + u32 last_seg:1; + u32 first_seg:1; + u32 intr:1; +#endif +}; + +union desc_stat { + struct ksz_desc_rx_stat rx; + struct ksz_desc_tx_stat tx; + u32 data; +}; + +union desc_buf { + struct ksz_desc_rx_buf rx; + struct ksz_desc_tx_buf tx; + u32 data; +}; + +/** + * struct ksz_hw_desc - Hardware descriptor data structure + * @ctrl: Descriptor control value. + * @buf: Descriptor buffer value. + * @addr: Physical address of memory buffer. + * @next: Pointer to next hardware descriptor. + */ +struct ksz_hw_desc { + union desc_stat ctrl; + union desc_buf buf; + u32 addr; + u32 next; +}; + +/** + * struct ksz_sw_desc - Software descriptor data structure + * @ctrl: Descriptor control value. + * @buf: Descriptor buffer value. + * @buf_size: Current buffers size value in hardware descriptor. + */ +struct ksz_sw_desc { + union desc_stat ctrl; + union desc_buf buf; + u32 buf_size; +}; + +/** + * struct ksz_dma_buf - OS dependent DMA buffer data structure + * @skb: Associated socket buffer. + * @dma: Associated physical DMA address. + * len: Actual len used. + */ +struct ksz_dma_buf { + struct sk_buff *skb; + dma_addr_t dma; + int len; +}; + +/** + * struct ksz_desc - Descriptor structure + * @phw: Hardware descriptor pointer to uncached physical memory. + * @sw: Cached memory to hold hardware descriptor values for + * manipulation. + * @dma_buf: Operating system dependent data structure to hold physical + * memory buffer allocation information. + */ +struct ksz_desc { + struct ksz_hw_desc *phw; + struct ksz_sw_desc sw; + struct ksz_dma_buf dma_buf; +}; + +#define DMA_BUFFER(desc) ((struct ksz_dma_buf *)(&(desc)->dma_buf)) + +/** + * struct ksz_desc_info - Descriptor information data structure + * @ring: First descriptor in the ring. + * @cur: Current descriptor being manipulated. + * @ring_virt: First hardware descriptor in the ring. + * @ring_phys: The physical address of the first descriptor of the ring. + * @size: Size of hardware descriptor. + * @alloc: Number of descriptors allocated. + * @avail: Number of descriptors available for use. + * @last: Index for last descriptor released to hardware. + * @next: Index for next descriptor available for use. + * @mask: Mask for index wrapping. + */ +struct ksz_desc_info { + struct ksz_desc *ring; + struct ksz_desc *cur; + struct ksz_hw_desc *ring_virt; + u32 ring_phys; + int size; + int alloc; + int avail; + int last; + int next; + int mask; +}; + +/* + * KSZ8842 switch definitions + */ + +enum { + TABLE_STATIC_MAC = 0, + TABLE_VLAN, + TABLE_DYNAMIC_MAC, + TABLE_MIB +}; + +#define LEARNED_MAC_TABLE_ENTRIES 1024 +#define STATIC_MAC_TABLE_ENTRIES 8 + +/** + * struct ksz_mac_table - Static MAC table data structure + * @mac_addr: MAC address to filter. + * @vid: VID value. + * @fid: FID value. + * @ports: Port membership. + * @override: Override setting. + * @use_fid: FID use setting. + * @valid: Valid setting indicating the entry is being used. + */ +struct ksz_mac_table { + u8 mac_addr[ETH_ALEN]; + u16 vid; + u8 fid; + u8 ports; + u8 override:1; + u8 use_fid:1; + u8 valid:1; +}; + +#define VLAN_TABLE_ENTRIES 16 + +/** + * struct ksz_vlan_table - VLAN table data structure + * @vid: VID value. + * @fid: FID value. + * @member: Port membership. + */ +struct ksz_vlan_table { + u16 vid; + u8 fid; + u8 member; +}; + +#define DIFFSERV_ENTRIES 64 +#define PRIO_802_1P_ENTRIES 8 +#define PRIO_QUEUES 4 + +#define SWITCH_PORT_NUM 2 +#define TOTAL_PORT_NUM (SWITCH_PORT_NUM + 1) +#define HOST_MASK (1 << SWITCH_PORT_NUM) +#define PORT_MASK 7 + +#define MAIN_PORT 0 +#define OTHER_PORT 1 +#define HOST_PORT SWITCH_PORT_NUM + +#define PORT_COUNTER_NUM 0x20 +#define TOTAL_PORT_COUNTER_NUM (PORT_COUNTER_NUM + 2) + +#define MIB_COUNTER_RX_LO_PRIORITY 0x00 +#define MIB_COUNTER_RX_HI_PRIORITY 0x01 +#define MIB_COUNTER_RX_UNDERSIZE 0x02 +#define MIB_COUNTER_RX_FRAGMENT 0x03 +#define MIB_COUNTER_RX_OVERSIZE 0x04 +#define MIB_COUNTER_RX_JABBER 0x05 +#define MIB_COUNTER_RX_SYMBOL_ERR 0x06 +#define MIB_COUNTER_RX_CRC_ERR 0x07 +#define MIB_COUNTER_RX_ALIGNMENT_ERR 0x08 +#define MIB_COUNTER_RX_CTRL_8808 0x09 +#define MIB_COUNTER_RX_PAUSE 0x0A +#define MIB_COUNTER_RX_BROADCAST 0x0B +#define MIB_COUNTER_RX_MULTICAST 0x0C +#define MIB_COUNTER_RX_UNICAST 0x0D +#define MIB_COUNTER_RX_OCTET_64 0x0E +#define MIB_COUNTER_RX_OCTET_65_127 0x0F +#define MIB_COUNTER_RX_OCTET_128_255 0x10 +#define MIB_COUNTER_RX_OCTET_256_511 0x11 +#define MIB_COUNTER_RX_OCTET_512_1023 0x12 +#define MIB_COUNTER_RX_OCTET_1024_1522 0x13 +#define MIB_COUNTER_TX_LO_PRIORITY 0x14 +#define MIB_COUNTER_TX_HI_PRIORITY 0x15 +#define MIB_COUNTER_TX_LATE_COLLISION 0x16 +#define MIB_COUNTER_TX_PAUSE 0x17 +#define MIB_COUNTER_TX_BROADCAST 0x18 +#define MIB_COUNTER_TX_MULTICAST 0x19 +#define MIB_COUNTER_TX_UNICAST 0x1A +#define MIB_COUNTER_TX_DEFERRED 0x1B +#define MIB_COUNTER_TX_TOTAL_COLLISION 0x1C +#define MIB_COUNTER_TX_EXCESS_COLLISION 0x1D +#define MIB_COUNTER_TX_SINGLE_COLLISION 0x1E +#define MIB_COUNTER_TX_MULTI_COLLISION 0x1F + +#define MIB_COUNTER_RX_DROPPED_PACKET 0x20 +#define MIB_COUNTER_TX_DROPPED_PACKET 0x21 + +/** + * struct ksz_port_mib - Port MIB data structure + * @cnt_ptr: Current pointer to MIB counter index. + * @link_down: Indication the link has just gone down. + * @state: Connection status of the port. + * @mib_start: The starting counter index. Some ports do not start at 0. + * @counter: 64-bit MIB counter value. + * @dropped: Temporary buffer to remember last read packet dropped values. + * + * MIB counters needs to be read periodically so that counters do not get + * overflowed and give incorrect values. A right balance is needed to + * satisfy this condition and not waste too much CPU time. + * + * It is pointless to read MIB counters when the port is disconnected. The + * @state provides the connection status so that MIB counters are read only + * when the port is connected. The @link_down indicates the port is just + * disconnected so that all MIB counters are read one last time to update the + * information. + */ +struct ksz_port_mib { + u8 cnt_ptr; + u8 link_down; + u8 state; + u8 mib_start; + + u64 counter[TOTAL_PORT_COUNTER_NUM]; + u32 dropped[2]; +}; + +/** + * struct ksz_port_cfg - Port configuration data structure + * @vid: VID value. + * @member: Port membership. + * @port_prio: Port priority. + * @rx_rate: Receive priority rate. + * @tx_rate: Transmit priority rate. + * @stp_state: Current Spanning Tree Protocol state. + */ +struct ksz_port_cfg { + u16 vid; + u8 member; + u8 port_prio; + u32 rx_rate[PRIO_QUEUES]; + u32 tx_rate[PRIO_QUEUES]; + int stp_state; +}; + +/** + * struct ksz_switch - KSZ8842 switch data structure + * @mac_table: MAC table entries information. + * @vlan_table: VLAN table entries information. + * @port_cfg: Port configuration information. + * @diffserv: DiffServ priority settings. Possible values from 6-bit of ToS + * (bit7 ~ bit2) field. + * @p_802_1p: 802.1P priority settings. Possible values from 3-bit of 802.1p + * Tag priority field. + * @br_addr: Bridge address. Used for STP. + * @other_addr: Other MAC address. Used for multiple network device mode. + * @broad_per: Broadcast storm percentage. + * @member: Current port membership. Used for STP. + */ +struct ksz_switch { + struct ksz_mac_table mac_table[STATIC_MAC_TABLE_ENTRIES]; + struct ksz_vlan_table vlan_table[VLAN_TABLE_ENTRIES]; + struct ksz_port_cfg port_cfg[TOTAL_PORT_NUM]; + + u8 diffserv[DIFFSERV_ENTRIES]; + u8 p_802_1p[PRIO_802_1P_ENTRIES]; + + u8 br_addr[ETH_ALEN]; + u8 other_addr[ETH_ALEN]; + + u8 broad_per; + u8 member; +}; + +#define TX_RATE_UNIT 10000 + +/** + * struct ksz_port_info - Port information data structure + * @state: Connection status of the port. + * @tx_rate: Transmit rate divided by 10000 to get Mbit. + * @duplex: Duplex mode. + * @advertised: Advertised auto-negotiation setting. Used to determine link. + * @partner: Auto-negotiation partner setting. Used to determine link. + * @port_id: Port index to access actual hardware register. + * @pdev: Pointer to OS dependent network device. + */ +struct ksz_port_info { + uint state; + uint tx_rate; + u8 duplex; + u8 advertised; + u8 partner; + u8 port_id; + void *pdev; +}; + +#define MAX_TX_HELD_SIZE 52000 + +/* Hardware features and bug fixes. */ +#define LINK_INT_WORKING (1 << 0) +#define SMALL_PACKET_TX_BUG (1 << 1) +#define HALF_DUPLEX_SIGNAL_BUG (1 << 2) +#define RX_HUGE_FRAME (1 << 4) +#define STP_SUPPORT (1 << 8) + +/* Software overrides. */ +#define PAUSE_FLOW_CTRL (1 << 0) +#define FAST_AGING (1 << 1) + +/** + * struct ksz_hw - KSZ884X hardware data structure + * @io: Virtual address assigned. + * @ksz_switch: Pointer to KSZ8842 switch. + * @port_info: Port information. + * @port_mib: Port MIB information. + * @dev_count: Number of network devices this hardware supports. + * @dst_ports: Destination ports in switch for transmission. + * @id: Hardware ID. Used for display only. + * @mib_cnt: Number of MIB counters this hardware has. + * @mib_port_cnt: Number of ports with MIB counters. + * @tx_cfg: Cached transmit control settings. + * @rx_cfg: Cached receive control settings. + * @intr_mask: Current interrupt mask. + * @intr_set: Current interrup set. + * @intr_blocked: Interrupt blocked. + * @rx_desc_info: Receive descriptor information. + * @tx_desc_info: Transmit descriptor information. + * @tx_int_cnt: Transmit interrupt count. Used for TX optimization. + * @tx_int_mask: Transmit interrupt mask. Used for TX optimization. + * @tx_size: Transmit data size. Used for TX optimization. + * The maximum is defined by MAX_TX_HELD_SIZE. + * @perm_addr: Permanent MAC address. + * @override_addr: Overrided MAC address. + * @address: Additional MAC address entries. + * @addr_list_size: Additional MAC address list size. + * @mac_override: Indication of MAC address overrided. + * @promiscuous: Counter to keep track of promiscuous mode set. + * @all_multi: Counter to keep track of all multicast mode set. + * @multi_list: Multicast address entries. + * @multi_bits: Cached multicast hash table settings. + * @multi_list_size: Multicast address list size. + * @enabled: Indication of hardware enabled. + * @rx_stop: Indication of receive process stop. + * @features: Hardware features to enable. + * @overrides: Hardware features to override. + * @parent: Pointer to parent, network device private structure. + */ +struct ksz_hw { + void __iomem *io; + + struct ksz_switch *ksz_switch; + struct ksz_port_info port_info[SWITCH_PORT_NUM]; + struct ksz_port_mib port_mib[TOTAL_PORT_NUM]; + int dev_count; + int dst_ports; + int id; + int mib_cnt; + int mib_port_cnt; + + u32 tx_cfg; + u32 rx_cfg; + u32 intr_mask; + u32 intr_set; + uint intr_blocked; + + struct ksz_desc_info rx_desc_info; + struct ksz_desc_info tx_desc_info; + + int tx_int_cnt; + int tx_int_mask; + int tx_size; + + u8 perm_addr[ETH_ALEN]; + u8 override_addr[ETH_ALEN]; + u8 address[ADDITIONAL_ENTRIES][ETH_ALEN]; + u8 addr_list_size; + u8 mac_override; + u8 promiscuous; + u8 all_multi; + u8 multi_list[MAX_MULTICAST_LIST][ETH_ALEN]; + u8 multi_bits[HW_MULTICAST_SIZE]; + u8 multi_list_size; + + u8 enabled; + u8 rx_stop; + u8 reserved2[1]; + + uint features; + uint overrides; + + void *parent; +}; + +enum { + PHY_NO_FLOW_CTRL, + PHY_FLOW_CTRL, + PHY_TX_ONLY, + PHY_RX_ONLY +}; + +/** + * struct ksz_port - Virtual port data structure + * @duplex: Duplex mode setting. 1 for half duplex, 2 for full + * duplex, and 0 for auto, which normally results in full + * duplex. + * @speed: Speed setting. 10 for 10 Mbit, 100 for 100 Mbit, and + * 0 for auto, which normally results in 100 Mbit. + * @force_link: Force link setting. 0 for auto-negotiation, and 1 for + * force. + * @flow_ctrl: Flow control setting. PHY_NO_FLOW_CTRL for no flow + * control, and PHY_FLOW_CTRL for flow control. + * PHY_TX_ONLY and PHY_RX_ONLY are not supported for 100 + * Mbit PHY. + * @first_port: Index of first port this port supports. + * @mib_port_cnt: Number of ports with MIB counters. + * @port_cnt: Number of ports this port supports. + * @counter: Port statistics counter. + * @hw: Pointer to hardware structure. + * @linked: Pointer to port information linked to this port. + */ +struct ksz_port { + u8 duplex; + u8 speed; + u8 force_link; + u8 flow_ctrl; + + int first_port; + int mib_port_cnt; + int port_cnt; + u64 counter[OID_COUNTER_LAST]; + + struct ksz_hw *hw; + struct ksz_port_info *linked; +}; + +/** + * struct ksz_timer_info - Timer information data structure + * @timer: Kernel timer. + * @cnt: Running timer counter. + * @max: Number of times to run timer; -1 for infinity. + * @period: Timer period in jiffies. + */ +struct ksz_timer_info { + struct timer_list timer; + int cnt; + int max; + int period; +}; + +/** + * struct ksz_shared_mem - OS dependent shared memory data structure + * @dma_addr: Physical DMA address allocated. + * @alloc_size: Allocation size. + * @phys: Actual physical address used. + * @alloc_virt: Virtual address allocated. + * @virt: Actual virtual address used. + */ +struct ksz_shared_mem { + dma_addr_t dma_addr; + uint alloc_size; + uint phys; + u8 *alloc_virt; + u8 *virt; +}; + +/** + * struct ksz_counter_info - OS dependent counter information data structure + * @counter: Wait queue to wakeup after counters are read. + * @time: Next time in jiffies to read counter. + * @read: Indication of counters read in full or not. + */ +struct ksz_counter_info { + wait_queue_head_t counter; + unsigned long time; + int read; +}; + +/** + * struct dev_info - Network device information data structure + * @dev: Pointer to network device. + * @pdev: Pointer to PCI device. + * @hw: Hardware structure. + * @desc_pool: Physical memory used for descriptor pool. + * @hwlock: Spinlock to prevent hardware from accessing. + * @lock: Mutex lock to prevent device from accessing. + * @dev_rcv: Receive process function used. + * @last_skb: Socket buffer allocated for descriptor rx fragments. + * @skb_index: Buffer index for receiving fragments. + * @skb_len: Buffer length for receiving fragments. + * @mib_read: Workqueue to read MIB counters. + * @mib_timer_info: Timer to read MIB counters. + * @counter: Used for MIB reading. + * @mtu: Current MTU used. The default is REGULAR_RX_BUF_SIZE; + * the maximum is MAX_RX_BUF_SIZE. + * @opened: Counter to keep track of device open. + * @rx_tasklet: Receive processing tasklet. + * @tx_tasklet: Transmit processing tasklet. + * @wol_enable: Wake-on-LAN enable set by ethtool. + * @wol_support: Wake-on-LAN support used by ethtool. + * @pme_wait: Used for KSZ8841 power management. + */ +struct dev_info { + struct net_device *dev; + struct pci_dev *pdev; + + struct ksz_hw hw; + struct ksz_shared_mem desc_pool; + + spinlock_t hwlock; + struct mutex lock; + + int (*dev_rcv)(struct dev_info *); + + struct sk_buff *last_skb; + int skb_index; + int skb_len; + + struct work_struct mib_read; + struct ksz_timer_info mib_timer_info; + struct ksz_counter_info counter[TOTAL_PORT_NUM]; + + int mtu; + int opened; + + struct tasklet_struct rx_tasklet; + struct tasklet_struct tx_tasklet; + + int wol_enable; + int wol_support; + unsigned long pme_wait; +}; + +/** + * struct dev_priv - Network device private data structure + * @adapter: Adapter device information. + * @port: Port information. + * @monitor_time_info: Timer to monitor ports. + * @proc_sem: Semaphore for proc accessing. + * @id: Device ID. + * @mii_if: MII interface information. + * @advertising: Temporary variable to store advertised settings. + * @msg_enable: The message flags controlling driver output. + * @media_state: The connection status of the device. + * @multicast: The all multicast state of the device. + * @promiscuous: The promiscuous state of the device. + */ +struct dev_priv { + struct dev_info *adapter; + struct ksz_port port; + struct ksz_timer_info monitor_timer_info; + + struct semaphore proc_sem; + int id; + + struct mii_if_info mii_if; + u32 advertising; + + u32 msg_enable; + int media_state; + int multicast; + int promiscuous; +}; + +#define DRV_NAME "KSZ884X PCI" +#define DEVICE_NAME "KSZ884x PCI" +#define DRV_VERSION "1.0.0" +#define DRV_RELDATE "Feb 8, 2010" + +static char version[] = + "Micrel " DEVICE_NAME " " DRV_VERSION " (" DRV_RELDATE ")"; + +static u8 DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x88, 0x42, 0x01 }; + +/* + * Interrupt processing primary routines + */ + +static inline void hw_ack_intr(struct ksz_hw *hw, uint interrupt) +{ + writel(interrupt, hw->io + KS884X_INTERRUPTS_STATUS); +} + +static inline void hw_dis_intr(struct ksz_hw *hw) +{ + hw->intr_blocked = hw->intr_mask; + writel(0, hw->io + KS884X_INTERRUPTS_ENABLE); + hw->intr_set = readl(hw->io + KS884X_INTERRUPTS_ENABLE); +} + +static inline void hw_set_intr(struct ksz_hw *hw, uint interrupt) +{ + hw->intr_set = interrupt; + writel(interrupt, hw->io + KS884X_INTERRUPTS_ENABLE); +} + +static inline void hw_ena_intr(struct ksz_hw *hw) +{ + hw->intr_blocked = 0; + hw_set_intr(hw, hw->intr_mask); +} + +static inline void hw_dis_intr_bit(struct ksz_hw *hw, uint bit) +{ + hw->intr_mask &= ~(bit); +} + +static inline void hw_turn_off_intr(struct ksz_hw *hw, uint interrupt) +{ + u32 read_intr; + + read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE); + hw->intr_set = read_intr & ~interrupt; + writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE); + hw_dis_intr_bit(hw, interrupt); +} + +/** + * hw_turn_on_intr - turn on specified interrupts + * @hw: The hardware instance. + * @bit: The interrupt bits to be on. + * + * This routine turns on the specified interrupts in the interrupt mask so that + * those interrupts will be enabled. + */ +static void hw_turn_on_intr(struct ksz_hw *hw, u32 bit) +{ + hw->intr_mask |= bit; + + if (!hw->intr_blocked) + hw_set_intr(hw, hw->intr_mask); +} + +static inline void hw_ena_intr_bit(struct ksz_hw *hw, uint interrupt) +{ + u32 read_intr; + + read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE); + hw->intr_set = read_intr | interrupt; + writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE); +} + +static inline void hw_read_intr(struct ksz_hw *hw, uint *status) +{ + *status = readl(hw->io + KS884X_INTERRUPTS_STATUS); + *status = *status & hw->intr_set; +} + +static inline void hw_restore_intr(struct ksz_hw *hw, uint interrupt) +{ + if (interrupt) + hw_ena_intr(hw); +} + +/** + * hw_block_intr - block hardware interrupts + * + * This function blocks all interrupts of the hardware and returns the current + * interrupt enable mask so that interrupts can be restored later. + * + * Return the current interrupt enable mask. + */ +static uint hw_block_intr(struct ksz_hw *hw) +{ + uint interrupt = 0; + + if (!hw->intr_blocked) { + hw_dis_intr(hw); + interrupt = hw->intr_blocked; + } + return interrupt; +} + +/* + * Hardware descriptor routines + */ + +static inline void reset_desc(struct ksz_desc *desc, union desc_stat status) +{ + status.rx.hw_owned = 0; + desc->phw->ctrl.data = cpu_to_le32(status.data); +} + +static inline void release_desc(struct ksz_desc *desc) +{ + desc->sw.ctrl.tx.hw_owned = 1; + if (desc->sw.buf_size != desc->sw.buf.data) { + desc->sw.buf_size = desc->sw.buf.data; + desc->phw->buf.data = cpu_to_le32(desc->sw.buf.data); + } + desc->phw->ctrl.data = cpu_to_le32(desc->sw.ctrl.data); +} + +static void get_rx_pkt(struct ksz_desc_info *info, struct ksz_desc **desc) +{ + *desc = &info->ring[info->last]; + info->last++; + info->last &= info->mask; + info->avail--; + (*desc)->sw.buf.data &= ~KS_DESC_RX_MASK; +} + +static inline void set_rx_buf(struct ksz_desc *desc, u32 addr) +{ + desc->phw->addr = cpu_to_le32(addr); +} + +static inline void set_rx_len(struct ksz_desc *desc, u32 len) +{ + desc->sw.buf.rx.buf_size = len; +} + +static inline void get_tx_pkt(struct ksz_desc_info *info, + struct ksz_desc **desc) +{ + *desc = &info->ring[info->next]; + info->next++; + info->next &= info->mask; + info->avail--; + (*desc)->sw.buf.data &= ~KS_DESC_TX_MASK; +} + +static inline void set_tx_buf(struct ksz_desc *desc, u32 addr) +{ + desc->phw->addr = cpu_to_le32(addr); +} + +static inline void set_tx_len(struct ksz_desc *desc, u32 len) +{ + desc->sw.buf.tx.buf_size = len; +} + +/* Switch functions */ + +#define TABLE_READ 0x10 +#define TABLE_SEL_SHIFT 2 + +#define HW_DELAY(hw, reg) \ + do { \ + u16 dummy; \ + dummy = readw(hw->io + reg); \ + } while (0) + +/** + * sw_r_table - read 4 bytes of data from switch table + * @hw: The hardware instance. + * @table: The table selector. + * @addr: The address of the table entry. + * @data: Buffer to store the read data. + * + * This routine reads 4 bytes of data from the table of the switch. + * Hardware interrupts are disabled to minimize corruption of read data. + */ +static void sw_r_table(struct ksz_hw *hw, int table, u16 addr, u32 *data) +{ + u16 ctrl_addr; + uint interrupt; + + ctrl_addr = (((table << TABLE_SEL_SHIFT) | TABLE_READ) << 8) | addr; + + interrupt = hw_block_intr(hw); + + writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET); + HW_DELAY(hw, KS884X_IACR_OFFSET); + *data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET); + + hw_restore_intr(hw, interrupt); +} + +/** + * sw_w_table_64 - write 8 bytes of data to the switch table + * @hw: The hardware instance. + * @table: The table selector. + * @addr: The address of the table entry. + * @data_hi: The high part of data to be written (bit63 ~ bit32). + * @data_lo: The low part of data to be written (bit31 ~ bit0). + * + * This routine writes 8 bytes of data to the table of the switch. + * Hardware interrupts are disabled to minimize corruption of written data. + */ +static void sw_w_table_64(struct ksz_hw *hw, int table, u16 addr, u32 data_hi, + u32 data_lo) +{ + u16 ctrl_addr; + uint interrupt; + + ctrl_addr = ((table << TABLE_SEL_SHIFT) << 8) | addr; + + interrupt = hw_block_intr(hw); + + writel(data_hi, hw->io + KS884X_ACC_DATA_4_OFFSET); + writel(data_lo, hw->io + KS884X_ACC_DATA_0_OFFSET); + + writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET); + HW_DELAY(hw, KS884X_IACR_OFFSET); + + hw_restore_intr(hw, interrupt); +} + +/** + * sw_w_sta_mac_table - write to the static MAC table + * @hw: The hardware instance. + * @addr: The address of the table entry. + * @mac_addr: The MAC address. + * @ports: The port members. + * @override: The flag to override the port receive/transmit settings. + * @valid: The flag to indicate entry is valid. + * @use_fid: The flag to indicate the FID is valid. + * @fid: The FID value. + * + * This routine writes an entry of the static MAC table of the switch. It + * calls sw_w_table_64() to write the data. + */ +static void sw_w_sta_mac_table(struct ksz_hw *hw, u16 addr, u8 *mac_addr, + u8 ports, int override, int valid, int use_fid, u8 fid) +{ + u32 data_hi; + u32 data_lo; + + data_lo = ((u32) mac_addr[2] << 24) | + ((u32) mac_addr[3] << 16) | + ((u32) mac_addr[4] << 8) | mac_addr[5]; + data_hi = ((u32) mac_addr[0] << 8) | mac_addr[1]; + data_hi |= (u32) ports << STATIC_MAC_FWD_PORTS_SHIFT; + + if (override) + data_hi |= STATIC_MAC_TABLE_OVERRIDE; + if (use_fid) { + data_hi |= STATIC_MAC_TABLE_USE_FID; + data_hi |= (u32) fid << STATIC_MAC_FID_SHIFT; + } + if (valid) + data_hi |= STATIC_MAC_TABLE_VALID; + + sw_w_table_64(hw, TABLE_STATIC_MAC, addr, data_hi, data_lo); +} + +/** + * sw_r_vlan_table - read from the VLAN table + * @hw: The hardware instance. + * @addr: The address of the table entry. + * @vid: Buffer to store the VID. + * @fid: Buffer to store the VID. + * @member: Buffer to store the port membership. + * + * This function reads an entry of the VLAN table of the switch. It calls + * sw_r_table() to get the data. + * + * Return 0 if the entry is valid; otherwise -1. + */ +static int sw_r_vlan_table(struct ksz_hw *hw, u16 addr, u16 *vid, u8 *fid, + u8 *member) +{ + u32 data; + + sw_r_table(hw, TABLE_VLAN, addr, &data); + if (data & VLAN_TABLE_VALID) { + *vid = (u16)(data & VLAN_TABLE_VID); + *fid = (u8)((data & VLAN_TABLE_FID) >> VLAN_TABLE_FID_SHIFT); + *member = (u8)((data & VLAN_TABLE_MEMBERSHIP) >> + VLAN_TABLE_MEMBERSHIP_SHIFT); + return 0; + } + return -1; +} + +/** + * port_r_mib_cnt - read MIB counter + * @hw: The hardware instance. + * @port: The port index. + * @addr: The address of the counter. + * @cnt: Buffer to store the counter. + * + * This routine reads a MIB counter of the port. + * Hardware interrupts are disabled to minimize corruption of read data. + */ +static void port_r_mib_cnt(struct ksz_hw *hw, int port, u16 addr, u64 *cnt) +{ + u32 data; + u16 ctrl_addr; + uint interrupt; + int timeout; + + ctrl_addr = addr + PORT_COUNTER_NUM * port; + + interrupt = hw_block_intr(hw); + + ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ) << 8); + writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET); + HW_DELAY(hw, KS884X_IACR_OFFSET); + + for (timeout = 100; timeout > 0; timeout--) { + data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET); + + if (data & MIB_COUNTER_VALID) { + if (data & MIB_COUNTER_OVERFLOW) + *cnt += MIB_COUNTER_VALUE + 1; + *cnt += data & MIB_COUNTER_VALUE; + break; + } + } + + hw_restore_intr(hw, interrupt); +} + +/** + * port_r_mib_pkt - read dropped packet counts + * @hw: The hardware instance. + * @port: The port index. + * @cnt: Buffer to store the receive and transmit dropped packet counts. + * + * This routine reads the dropped packet counts of the port. + * Hardware interrupts are disabled to minimize corruption of read data. + */ +static void port_r_mib_pkt(struct ksz_hw *hw, int port, u32 *last, u64 *cnt) +{ + u32 cur; + u32 data; + u16 ctrl_addr; + uint interrupt; + int index; + + index = KS_MIB_PACKET_DROPPED_RX_0 + port; + do { + interrupt = hw_block_intr(hw); + + ctrl_addr = (u16) index; + ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ) + << 8); + writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET); + HW_DELAY(hw, KS884X_IACR_OFFSET); + data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET); + + hw_restore_intr(hw, interrupt); + + data &= MIB_PACKET_DROPPED; + cur = *last; + if (data != cur) { + *last = data; + if (data < cur) + data += MIB_PACKET_DROPPED + 1; + data -= cur; + *cnt += data; + } + ++last; + ++cnt; + index -= KS_MIB_PACKET_DROPPED_TX - + KS_MIB_PACKET_DROPPED_TX_0 + 1; + } while (index >= KS_MIB_PACKET_DROPPED_TX_0 + port); +} + +/** + * port_r_cnt - read MIB counters periodically + * @hw: The hardware instance. + * @port: The port index. + * + * This routine is used to read the counters of the port periodically to avoid + * counter overflow. The hardware should be acquired first before calling this + * routine. + * + * Return non-zero when not all counters not read. + */ +static int port_r_cnt(struct ksz_hw *hw, int port) +{ + struct ksz_port_mib *mib = &hw->port_mib[port]; + + if (mib->mib_start < PORT_COUNTER_NUM) + while (mib->cnt_ptr < PORT_COUNTER_NUM) { + port_r_mib_cnt(hw, port, mib->cnt_ptr, + &mib->counter[mib->cnt_ptr]); + ++mib->cnt_ptr; + } + if (hw->mib_cnt > PORT_COUNTER_NUM) + port_r_mib_pkt(hw, port, mib->dropped, + &mib->counter[PORT_COUNTER_NUM]); + mib->cnt_ptr = 0; + return 0; +} + +/** + * port_init_cnt - initialize MIB counter values + * @hw: The hardware instance. + * @port: The port index. + * + * This routine is used to initialize all counters to zero if the hardware + * cannot do it after reset. + */ +static void port_init_cnt(struct ksz_hw *hw, int port) +{ + struct ksz_port_mib *mib = &hw->port_mib[port]; + + mib->cnt_ptr = 0; + if (mib->mib_start < PORT_COUNTER_NUM) + do { + port_r_mib_cnt(hw, port, mib->cnt_ptr, + &mib->counter[mib->cnt_ptr]); + ++mib->cnt_ptr; + } while (mib->cnt_ptr < PORT_COUNTER_NUM); + if (hw->mib_cnt > PORT_COUNTER_NUM) + port_r_mib_pkt(hw, port, mib->dropped, + &mib->counter[PORT_COUNTER_NUM]); + memset((void *) mib->counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM); + mib->cnt_ptr = 0; +} + +/* + * Port functions + */ + +/** + * port_chk - check port register bits + * @hw: The hardware instance. + * @port: The port index. + * @offset: The offset of the port register. + * @bits: The data bits to check. + * + * This function checks whether the specified bits of the port register are set + * or not. + * + * Return 0 if the bits are not set. + */ +static int port_chk(struct ksz_hw *hw, int port, int offset, u16 bits) +{ + u32 addr; + u16 data; + + PORT_CTRL_ADDR(port, addr); + addr += offset; + data = readw(hw->io + addr); + return (data & bits) == bits; +} + +/** + * port_cfg - set port register bits + * @hw: The hardware instance. + * @port: The port index. + * @offset: The offset of the port register. + * @bits: The data bits to set. + * @set: The flag indicating whether the bits are to be set or not. + * + * This routine sets or resets the specified bits of the port register. + */ +static void port_cfg(struct ksz_hw *hw, int port, int offset, u16 bits, + int set) +{ + u32 addr; + u16 data; + + PORT_CTRL_ADDR(port, addr); + addr += offset; + data = readw(hw->io + addr); + if (set) + data |= bits; + else + data &= ~bits; + writew(data, hw->io + addr); +} + +/** + * port_chk_shift - check port bit + * @hw: The hardware instance. + * @port: The port index. + * @offset: The offset of the register. + * @shift: Number of bits to shift. + * + * This function checks whether the specified port is set in the register or + * not. + * + * Return 0 if the port is not set. + */ +static int port_chk_shift(struct ksz_hw *hw, int port, u32 addr, int shift) +{ + u16 data; + u16 bit = 1 << port; + + data = readw(hw->io + addr); + data >>= shift; + return (data & bit) == bit; +} + +/** + * port_cfg_shift - set port bit + * @hw: The hardware instance. + * @port: The port index. + * @offset: The offset of the register. + * @shift: Number of bits to shift. + * @set: The flag indicating whether the port is to be set or not. + * + * This routine sets or resets the specified port in the register. + */ +static void port_cfg_shift(struct ksz_hw *hw, int port, u32 addr, int shift, + int set) +{ + u16 data; + u16 bits = 1 << port; + + data = readw(hw->io + addr); + bits <<= shift; + if (set) + data |= bits; + else + data &= ~bits; + writew(data, hw->io + addr); +} + +/** + * port_r8 - read byte from port register + * @hw: The hardware instance. + * @port: The port index. + * @offset: The offset of the port register. + * @data: Buffer to store the data. + * + * This routine reads a byte from the port register. + */ +static void port_r8(struct ksz_hw *hw, int port, int offset, u8 *data) +{ + u32 addr; + + PORT_CTRL_ADDR(port, addr); + addr += offset; + *data = readb(hw->io + addr); +} + +/** + * port_r16 - read word from port register. + * @hw: The hardware instance. + * @port: The port index. + * @offset: The offset of the port register. + * @data: Buffer to store the data. + * + * This routine reads a word from the port register. + */ +static void port_r16(struct ksz_hw *hw, int port, int offset, u16 *data) +{ + u32 addr; + + PORT_CTRL_ADDR(port, addr); + addr += offset; + *data = readw(hw->io + addr); +} + +/** + * port_w16 - write word to port register. + * @hw: The hardware instance. + * @port: The port index. + * @offset: The offset of the port register. + * @data: Data to write. + * + * This routine writes a word to the port register. + */ +static void port_w16(struct ksz_hw *hw, int port, int offset, u16 data) +{ + u32 addr; + + PORT_CTRL_ADDR(port, addr); + addr += offset; + writew(data, hw->io + addr); +} + +/** + * sw_chk - check switch register bits + * @hw: The hardware instance. + * @addr: The address of the switch register. + * @bits: The data bits to check. + * + * This function checks whether the specified bits of the switch register are + * set or not. + * + * Return 0 if the bits are not set. + */ +static int sw_chk(struct ksz_hw *hw, u32 addr, u16 bits) +{ + u16 data; + + data = readw(hw->io + addr); + return (data & bits) == bits; +} + +/** + * sw_cfg - set switch register bits + * @hw: The hardware instance. + * @addr: The address of the switch register. + * @bits: The data bits to set. + * @set: The flag indicating whether the bits are to be set or not. + * + * This function sets or resets the specified bits of the switch register. + */ +static void sw_cfg(struct ksz_hw *hw, u32 addr, u16 bits, int set) +{ + u16 data; + + data = readw(hw->io + addr); + if (set) + data |= bits; + else + data &= ~bits; + writew(data, hw->io + addr); +} + +/* Bandwidth */ + +static inline void port_cfg_broad_storm(struct ksz_hw *hw, int p, int set) +{ + port_cfg(hw, p, + KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM, set); +} + +static inline int port_chk_broad_storm(struct ksz_hw *hw, int p) +{ + return port_chk(hw, p, + KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM); +} + +/* Driver set switch broadcast storm protection at 10% rate. */ +#define BROADCAST_STORM_PROTECTION_RATE 10 + +/* 148,800 frames * 67 ms / 100 */ +#define BROADCAST_STORM_VALUE 9969 + +/** + * sw_cfg_broad_storm - configure broadcast storm threshold + * @hw: The hardware instance. + * @percent: Broadcast storm threshold in percent of transmit rate. + * + * This routine configures the broadcast storm threshold of the switch. + */ +static void sw_cfg_broad_storm(struct ksz_hw *hw, u8 percent) +{ + u16 data; + u32 value = ((u32) BROADCAST_STORM_VALUE * (u32) percent / 100); + + if (value > BROADCAST_STORM_RATE) + value = BROADCAST_STORM_RATE; + + data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET); + data &= ~(BROADCAST_STORM_RATE_LO | BROADCAST_STORM_RATE_HI); + data |= ((value & 0x00FF) << 8) | ((value & 0xFF00) >> 8); + writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET); +} + +/** + * sw_get_board_storm - get broadcast storm threshold + * @hw: The hardware instance. + * @percent: Buffer to store the broadcast storm threshold percentage. + * + * This routine retrieves the broadcast storm threshold of the switch. + */ +static void sw_get_broad_storm(struct ksz_hw *hw, u8 *percent) +{ + int num; + u16 data; + + data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET); + num = (data & BROADCAST_STORM_RATE_HI); + num <<= 8; + num |= (data & BROADCAST_STORM_RATE_LO) >> 8; + num = (num * 100 + BROADCAST_STORM_VALUE / 2) / BROADCAST_STORM_VALUE; + *percent = (u8) num; +} + +/** + * sw_dis_broad_storm - disable broadstorm + * @hw: The hardware instance. + * @port: The port index. + * + * This routine disables the broadcast storm limit function of the switch. + */ +static void sw_dis_broad_storm(struct ksz_hw *hw, int port) +{ + port_cfg_broad_storm(hw, port, 0); +} + +/** + * sw_ena_broad_storm - enable broadcast storm + * @hw: The hardware instance. + * @port: The port index. + * + * This routine enables the broadcast storm limit function of the switch. + */ +static void sw_ena_broad_storm(struct ksz_hw *hw, int port) +{ + sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per); + port_cfg_broad_storm(hw, port, 1); +} + +/** + * sw_init_broad_storm - initialize broadcast storm + * @hw: The hardware instance. + * + * This routine initializes the broadcast storm limit function of the switch. + */ +static void sw_init_broad_storm(struct ksz_hw *hw) +{ + int port; + + hw->ksz_switch->broad_per = 1; + sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per); + for (port = 0; port < TOTAL_PORT_NUM; port++) + sw_dis_broad_storm(hw, port); + sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, MULTICAST_STORM_DISABLE, 1); +} + +/** + * hw_cfg_broad_storm - configure broadcast storm + * @hw: The hardware instance. + * @percent: Broadcast storm threshold in percent of transmit rate. + * + * This routine configures the broadcast storm threshold of the switch. + * It is called by user functions. The hardware should be acquired first. + */ +static void hw_cfg_broad_storm(struct ksz_hw *hw, u8 percent) +{ + if (percent > 100) + percent = 100; + + sw_cfg_broad_storm(hw, percent); + sw_get_broad_storm(hw, &percent); + hw->ksz_switch->broad_per = percent; +} + +/** + * sw_dis_prio_rate - disable switch priority rate + * @hw: The hardware instance. + * @port: The port index. + * + * This routine disables the priority rate function of the switch. + */ +static void sw_dis_prio_rate(struct ksz_hw *hw, int port) +{ + u32 addr; + + PORT_CTRL_ADDR(port, addr); + addr += KS8842_PORT_IN_RATE_OFFSET; + writel(0, hw->io + addr); +} + +/** + * sw_init_prio_rate - initialize switch prioirty rate + * @hw: The hardware instance. + * + * This routine initializes the priority rate function of the switch. + */ +static void sw_init_prio_rate(struct ksz_hw *hw) +{ + int port; + int prio; + struct ksz_switch *sw = hw->ksz_switch; + + for (port = 0; port < TOTAL_PORT_NUM; port++) { + for (prio = 0; prio < PRIO_QUEUES; prio++) { + sw->port_cfg[port].rx_rate[prio] = + sw->port_cfg[port].tx_rate[prio] = 0; + } + sw_dis_prio_rate(hw, port); + } +} + +/* Communication */ + +static inline void port_cfg_back_pressure(struct ksz_hw *hw, int p, int set) +{ + port_cfg(hw, p, + KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE, set); +} + +static inline void port_cfg_force_flow_ctrl(struct ksz_hw *hw, int p, int set) +{ + port_cfg(hw, p, + KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL, set); +} + +static inline int port_chk_back_pressure(struct ksz_hw *hw, int p) +{ + return port_chk(hw, p, + KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE); +} + +static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p) +{ + return port_chk(hw, p, + KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL); +} + +/* Spanning Tree */ + +static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set) +{ + port_cfg(hw, p, + KS8842_PORT_CTRL_2_OFFSET, PORT_RX_ENABLE, set); +} + +static inline void port_cfg_tx(struct ksz_hw *hw, int p, int set) +{ + port_cfg(hw, p, + KS8842_PORT_CTRL_2_OFFSET, PORT_TX_ENABLE, set); +} + +static inline void sw_cfg_fast_aging(struct ksz_hw *hw, int set) +{ + sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, SWITCH_FAST_AGING, set); +} + +static inline void sw_flush_dyn_mac_table(struct ksz_hw *hw) +{ + if (!(hw->overrides & FAST_AGING)) { + sw_cfg_fast_aging(hw, 1); + mdelay(1); + sw_cfg_fast_aging(hw, 0); + } +} + +/* VLAN */ + +static inline void port_cfg_ins_tag(struct ksz_hw *hw, int p, int insert) +{ + port_cfg(hw, p, + KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG, insert); +} + +static inline void port_cfg_rmv_tag(struct ksz_hw *hw, int p, int remove) +{ + port_cfg(hw, p, + KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG, remove); +} + +static inline int port_chk_ins_tag(struct ksz_hw *hw, int p) +{ + return port_chk(hw, p, + KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG); +} + +static inline int port_chk_rmv_tag(struct ksz_hw *hw, int p) +{ + return port_chk(hw, p, + KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG); +} + +static inline void port_cfg_dis_non_vid(struct ksz_hw *hw, int p, int set) +{ + port_cfg(hw, p, + KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID, set); +} + +static inline void port_cfg_in_filter(struct ksz_hw *hw, int p, int set) +{ + port_cfg(hw, p, + KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER, set); +} + +static inline int port_chk_dis_non_vid(struct ksz_hw *hw, int p) +{ + return port_chk(hw, p, + KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID); +} + +static inline int port_chk_in_filter(struct ksz_hw *hw, int p) +{ + return port_chk(hw, p, + KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER); +} + +/* Mirroring */ + +static inline void port_cfg_mirror_sniffer(struct ksz_hw *hw, int p, int set) +{ + port_cfg(hw, p, + KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_SNIFFER, set); +} + +static inline void port_cfg_mirror_rx(struct ksz_hw *hw, int p, int set) +{ + port_cfg(hw, p, + KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_RX, set); +} + +static inline void port_cfg_mirror_tx(struct ksz_hw *hw, int p, int set) +{ + port_cfg(hw, p, + KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_TX, set); +} + +static inline void sw_cfg_mirror_rx_tx(struct ksz_hw *hw, int set) +{ + sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, SWITCH_MIRROR_RX_TX, set); +} + +static void sw_init_mirror(struct ksz_hw *hw) +{ + int port; + + for (port = 0; port < TOTAL_PORT_NUM; port++) { + port_cfg_mirror_sniffer(hw, port, 0); + port_cfg_mirror_rx(hw, port, 0); + port_cfg_mirror_tx(hw, port, 0); + } + sw_cfg_mirror_rx_tx(hw, 0); +} + +static inline void sw_cfg_unk_def_deliver(struct ksz_hw *hw, int set) +{ + sw_cfg(hw, KS8842_SWITCH_CTRL_7_OFFSET, + SWITCH_UNK_DEF_PORT_ENABLE, set); +} + +static inline int sw_cfg_chk_unk_def_deliver(struct ksz_hw *hw) +{ + return sw_chk(hw, KS8842_SWITCH_CTRL_7_OFFSET, + SWITCH_UNK_DEF_PORT_ENABLE); +} + +static inline void sw_cfg_unk_def_port(struct ksz_hw *hw, int port, int set) +{ + port_cfg_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0, set); +} + +static inline int sw_chk_unk_def_port(struct ksz_hw *hw, int port) +{ + return port_chk_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0); +} + +/* Priority */ + +static inline void port_cfg_diffserv(struct ksz_hw *hw, int p, int set) +{ + port_cfg(hw, p, + KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE, set); +} + +static inline void port_cfg_802_1p(struct ksz_hw *hw, int p, int set) +{ + port_cfg(hw, p, + KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE, set); +} + +static inline void port_cfg_replace_vid(struct ksz_hw *hw, int p, int set) +{ + port_cfg(hw, p, + KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING, set); +} + +static inline void port_cfg_prio(struct ksz_hw *hw, int p, int set) +{ + port_cfg(hw, p, + KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE, set); +} + +static inline int port_chk_diffserv(struct ksz_hw *hw, int p) +{ + return port_chk(hw, p, + KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE); +} + +static inline int port_chk_802_1p(struct ksz_hw *hw, int p) +{ + return port_chk(hw, p, + KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE); +} + +static inline int port_chk_replace_vid(struct ksz_hw *hw, int p) +{ + return port_chk(hw, p, + KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING); +} + +static inline int port_chk_prio(struct ksz_hw *hw, int p) +{ + return port_chk(hw, p, + KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE); +} + +/** + * sw_dis_diffserv - disable switch DiffServ priority + * @hw: The hardware instance. + * @port: The port index. + * + * This routine disables the DiffServ priority function of the switch. + */ +static void sw_dis_diffserv(struct ksz_hw *hw, int port) +{ + port_cfg_diffserv(hw, port, 0); +} + +/** + * sw_dis_802_1p - disable switch 802.1p priority + * @hw: The hardware instance. + * @port: The port index. + * + * This routine disables the 802.1p priority function of the switch. + */ +static void sw_dis_802_1p(struct ksz_hw *hw, int port) +{ + port_cfg_802_1p(hw, port, 0); +} + +/** + * sw_cfg_replace_null_vid - + * @hw: The hardware instance. + * @set: The flag to disable or enable. + * + */ +static void sw_cfg_replace_null_vid(struct ksz_hw *hw, int set) +{ + sw_cfg(hw, KS8842_SWITCH_CTRL_3_OFFSET, SWITCH_REPLACE_NULL_VID, set); +} + +/** + * sw_cfg_replace_vid - enable switch 802.10 priority re-mapping + * @hw: The hardware instance. + * @port: The port index. + * @set: The flag to disable or enable. + * + * This routine enables the 802.1p priority re-mapping function of the switch. + * That allows 802.1p priority field to be replaced with the port's default + * tag's priority value if the ingress packet's 802.1p priority has a higher + * priority than port's default tag's priority. + */ +static void sw_cfg_replace_vid(struct ksz_hw *hw, int port, int set) +{ + port_cfg_replace_vid(hw, port, set); +} + +/** + * sw_cfg_port_based - configure switch port based priority + * @hw: The hardware instance. + * @port: The port index. + * @prio: The priority to set. + * + * This routine configures the port based priority of the switch. + */ +static void sw_cfg_port_based(struct ksz_hw *hw, int port, u8 prio) +{ + u16 data; + + if (prio > PORT_BASED_PRIORITY_BASE) + prio = PORT_BASED_PRIORITY_BASE; + + hw->ksz_switch->port_cfg[port].port_prio = prio; + + port_r16(hw, port, KS8842_PORT_CTRL_1_OFFSET, &data); + data &= ~PORT_BASED_PRIORITY_MASK; + data |= prio << PORT_BASED_PRIORITY_SHIFT; + port_w16(hw, port, KS8842_PORT_CTRL_1_OFFSET, data); +} + +/** + * sw_dis_multi_queue - disable transmit multiple queues + * @hw: The hardware instance. + * @port: The port index. + * + * This routine disables the transmit multiple queues selection of the switch + * port. Only single transmit queue on the port. + */ +static void sw_dis_multi_queue(struct ksz_hw *hw, int port) +{ + port_cfg_prio(hw, port, 0); +} + +/** + * sw_init_prio - initialize switch priority + * @hw: The hardware instance. + * + * This routine initializes the switch QoS priority functions. + */ +static void sw_init_prio(struct ksz_hw *hw) +{ + int port; + int tos; + struct ksz_switch *sw = hw->ksz_switch; + + /* + * Init all the 802.1p tag priority value to be assigned to different + * priority queue. + */ + sw->p_802_1p[0] = 0; + sw->p_802_1p[1] = 0; + sw->p_802_1p[2] = 1; + sw->p_802_1p[3] = 1; + sw->p_802_1p[4] = 2; + sw->p_802_1p[5] = 2; + sw->p_802_1p[6] = 3; + sw->p_802_1p[7] = 3; + + /* + * Init all the DiffServ priority value to be assigned to priority + * queue 0. + */ + for (tos = 0; tos < DIFFSERV_ENTRIES; tos++) + sw->diffserv[tos] = 0; + + /* All QoS functions disabled. */ + for (port = 0; port < TOTAL_PORT_NUM; port++) { + sw_dis_multi_queue(hw, port); + sw_dis_diffserv(hw, port); + sw_dis_802_1p(hw, port); + sw_cfg_replace_vid(hw, port, 0); + + sw->port_cfg[port].port_prio = 0; + sw_cfg_port_based(hw, port, sw->port_cfg[port].port_prio); + } + sw_cfg_replace_null_vid(hw, 0); +} + +/** + * port_get_def_vid - get port default VID. + * @hw: The hardware instance. + * @port: The port index. + * @vid: Buffer to store the VID. + * + * This routine retrieves the default VID of the port. + */ +static void port_get_def_vid(struct ksz_hw *hw, int port, u16 *vid) +{ + u32 addr; + + PORT_CTRL_ADDR(port, addr); + addr += KS8842_PORT_CTRL_VID_OFFSET; + *vid = readw(hw->io + addr); +} + +/** + * sw_init_vlan - initialize switch VLAN + * @hw: The hardware instance. + * + * This routine initializes the VLAN function of the switch. + */ +static void sw_init_vlan(struct ksz_hw *hw) +{ + int port; + int entry; + struct ksz_switch *sw = hw->ksz_switch; + + /* Read 16 VLAN entries from device's VLAN table. */ + for (entry = 0; entry < VLAN_TABLE_ENTRIES; entry++) { + sw_r_vlan_table(hw, entry, + &sw->vlan_table[entry].vid, + &sw->vlan_table[entry].fid, + &sw->vlan_table[entry].member); + } + + for (port = 0; port < TOTAL_PORT_NUM; port++) { + port_get_def_vid(hw, port, &sw->port_cfg[port].vid); + sw->port_cfg[port].member = PORT_MASK; + } +} + +/** + * sw_cfg_port_base_vlan - configure port-based VLAN membership + * @hw: The hardware instance. + * @port: The port index. + * @member: The port-based VLAN membership. + * + * This routine configures the port-based VLAN membership of the port. + */ +static void sw_cfg_port_base_vlan(struct ksz_hw *hw, int port, u8 member) +{ + u32 addr; + u8 data; + + PORT_CTRL_ADDR(port, addr); + addr += KS8842_PORT_CTRL_2_OFFSET; + + data = readb(hw->io + addr); + data &= ~PORT_VLAN_MEMBERSHIP; + data |= (member & PORT_MASK); + writeb(data, hw->io + addr); + + hw->ksz_switch->port_cfg[port].member = member; +} + +/** + * sw_get_addr - get the switch MAC address. + * @hw: The hardware instance. + * @mac_addr: Buffer to store the MAC address. + * + * This function retrieves the MAC address of the switch. + */ +static inline void sw_get_addr(struct ksz_hw *hw, u8 *mac_addr) +{ + int i; + + for (i = 0; i < 6; i += 2) { + mac_addr[i] = readb(hw->io + KS8842_MAC_ADDR_0_OFFSET + i); + mac_addr[1 + i] = readb(hw->io + KS8842_MAC_ADDR_1_OFFSET + i); + } +} + +/** + * sw_set_addr - configure switch MAC address + * @hw: The hardware instance. + * @mac_addr: The MAC address. + * + * This function configures the MAC address of the switch. + */ +static void sw_set_addr(struct ksz_hw *hw, u8 *mac_addr) +{ + int i; + + for (i = 0; i < 6; i += 2) { + writeb(mac_addr[i], hw->io + KS8842_MAC_ADDR_0_OFFSET + i); + writeb(mac_addr[1 + i], hw->io + KS8842_MAC_ADDR_1_OFFSET + i); + } +} + +/** + * sw_set_global_ctrl - set switch global control + * @hw: The hardware instance. + * + * This routine sets the global control of the switch function. + */ +static void sw_set_global_ctrl(struct ksz_hw *hw) +{ + u16 data; + + /* Enable switch MII flow control. */ + data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET); + data |= SWITCH_FLOW_CTRL; + writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET); + + data = readw(hw->io + KS8842_SWITCH_CTRL_1_OFFSET); + + /* Enable aggressive back off algorithm in half duplex mode. */ + data |= SWITCH_AGGR_BACKOFF; + + /* Enable automatic fast aging when link changed detected. */ + data |= SWITCH_AGING_ENABLE; + data |= SWITCH_LINK_AUTO_AGING; + + if (hw->overrides & FAST_AGING) + data |= SWITCH_FAST_AGING; + else + data &= ~SWITCH_FAST_AGING; + writew(data, hw->io + KS8842_SWITCH_CTRL_1_OFFSET); + + data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET); + + /* Enable no excessive collision drop. */ + data |= NO_EXC_COLLISION_DROP; + writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET); +} + +enum { + STP_STATE_DISABLED = 0, + STP_STATE_LISTENING, + STP_STATE_LEARNING, + STP_STATE_FORWARDING, + STP_STATE_BLOCKED, + STP_STATE_SIMPLE +}; + +/** + * port_set_stp_state - configure port spanning tree state + * @hw: The hardware instance. + * @port: The port index. + * @state: The spanning tree state. + * + * This routine configures the spanning tree state of the port. + */ +static void port_set_stp_state(struct ksz_hw *hw, int port, int state) +{ + u16 data; + + port_r16(hw, port, KS8842_PORT_CTRL_2_OFFSET, &data); + switch (state) { + case STP_STATE_DISABLED: + data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE); + data |= PORT_LEARN_DISABLE; + break; + case STP_STATE_LISTENING: +/* + * No need to turn on transmit because of port direct mode. + * Turning on receive is required if static MAC table is not setup. + */ + data &= ~PORT_TX_ENABLE; + data |= PORT_RX_ENABLE; + data |= PORT_LEARN_DISABLE; + break; + case STP_STATE_LEARNING: + data &= ~PORT_TX_ENABLE; + data |= PORT_RX_ENABLE; + data &= ~PORT_LEARN_DISABLE; + break; + case STP_STATE_FORWARDING: + data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); + data &= ~PORT_LEARN_DISABLE; + break; + case STP_STATE_BLOCKED: +/* + * Need to setup static MAC table with override to keep receiving BPDU + * messages. See sw_init_stp routine. + */ + data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE); + data |= PORT_LEARN_DISABLE; + break; + case STP_STATE_SIMPLE: + data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); + data |= PORT_LEARN_DISABLE; + break; + } + port_w16(hw, port, KS8842_PORT_CTRL_2_OFFSET, data); + hw->ksz_switch->port_cfg[port].stp_state = state; +} + +#define STP_ENTRY 0 +#define BROADCAST_ENTRY 1 +#define BRIDGE_ADDR_ENTRY 2 +#define IPV6_ADDR_ENTRY 3 + +/** + * sw_clr_sta_mac_table - clear static MAC table + * @hw: The hardware instance. + * + * This routine clears the static MAC table. + */ +static void sw_clr_sta_mac_table(struct ksz_hw *hw) +{ + struct ksz_mac_table *entry; + int i; + + for (i = 0; i < STATIC_MAC_TABLE_ENTRIES; i++) { + entry = &hw->ksz_switch->mac_table[i]; + sw_w_sta_mac_table(hw, i, + entry->mac_addr, entry->ports, + entry->override, 0, + entry->use_fid, entry->fid); + } +} + +/** + * sw_init_stp - initialize switch spanning tree support + * @hw: The hardware instance. + * + * This routine initializes the spanning tree support of the switch. + */ +static void sw_init_stp(struct ksz_hw *hw) +{ + struct ksz_mac_table *entry; + + entry = &hw->ksz_switch->mac_table[STP_ENTRY]; + entry->mac_addr[0] = 0x01; + entry->mac_addr[1] = 0x80; + entry->mac_addr[2] = 0xC2; + entry->mac_addr[3] = 0x00; + entry->mac_addr[4] = 0x00; + entry->mac_addr[5] = 0x00; + entry->ports = HOST_MASK; + entry->override = 1; + entry->valid = 1; + sw_w_sta_mac_table(hw, STP_ENTRY, + entry->mac_addr, entry->ports, + entry->override, entry->valid, + entry->use_fid, entry->fid); +} + +/** + * sw_block_addr - block certain packets from the host port + * @hw: The hardware instance. + * + * This routine blocks certain packets from reaching to the host port. + */ +static void sw_block_addr(struct ksz_hw *hw) +{ + struct ksz_mac_table *entry; + int i; + + for (i = BROADCAST_ENTRY; i <= IPV6_ADDR_ENTRY; i++) { + entry = &hw->ksz_switch->mac_table[i]; + entry->valid = 0; + sw_w_sta_mac_table(hw, i, + entry->mac_addr, entry->ports, + entry->override, entry->valid, + entry->use_fid, entry->fid); + } +} + +#define PHY_LINK_SUPPORT \ + (PHY_AUTO_NEG_ASYM_PAUSE | \ + PHY_AUTO_NEG_SYM_PAUSE | \ + PHY_AUTO_NEG_100BT4 | \ + PHY_AUTO_NEG_100BTX_FD | \ + PHY_AUTO_NEG_100BTX | \ + PHY_AUTO_NEG_10BT_FD | \ + PHY_AUTO_NEG_10BT) + +static inline void hw_r_phy_ctrl(struct ksz_hw *hw, int phy, u16 *data) +{ + *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET); +} + +static inline void hw_w_phy_ctrl(struct ksz_hw *hw, int phy, u16 data) +{ + writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET); +} + +static inline void hw_r_phy_link_stat(struct ksz_hw *hw, int phy, u16 *data) +{ + *data = readw(hw->io + phy + KS884X_PHY_STATUS_OFFSET); +} + +static inline void hw_r_phy_auto_neg(struct ksz_hw *hw, int phy, u16 *data) +{ + *data = readw(hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET); +} + +static inline void hw_w_phy_auto_neg(struct ksz_hw *hw, int phy, u16 data) +{ + writew(data, hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET); +} + +static inline void hw_r_phy_rem_cap(struct ksz_hw *hw, int phy, u16 *data) +{ + *data = readw(hw->io + phy + KS884X_PHY_REMOTE_CAP_OFFSET); +} + +static inline void hw_r_phy_crossover(struct ksz_hw *hw, int phy, u16 *data) +{ + *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET); +} + +static inline void hw_w_phy_crossover(struct ksz_hw *hw, int phy, u16 data) +{ + writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET); +} + +static inline void hw_r_phy_polarity(struct ksz_hw *hw, int phy, u16 *data) +{ + *data = readw(hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET); +} + +static inline void hw_w_phy_polarity(struct ksz_hw *hw, int phy, u16 data) +{ + writew(data, hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET); +} + +static inline void hw_r_phy_link_md(struct ksz_hw *hw, int phy, u16 *data) +{ + *data = readw(hw->io + phy + KS884X_PHY_LINK_MD_OFFSET); +} + +static inline void hw_w_phy_link_md(struct ksz_hw *hw, int phy, u16 data) +{ + writew(data, hw->io + phy + KS884X_PHY_LINK_MD_OFFSET); +} + +/** + * hw_r_phy - read data from PHY register + * @hw: The hardware instance. + * @port: Port to read. + * @reg: PHY register to read. + * @val: Buffer to store the read data. + * + * This routine reads data from the PHY register. + */ +static void hw_r_phy(struct ksz_hw *hw, int port, u16 reg, u16 *val) +{ + int phy; + + phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg; + *val = readw(hw->io + phy); +} + +/** + * port_w_phy - write data to PHY register + * @hw: The hardware instance. + * @port: Port to write. + * @reg: PHY register to write. + * @val: Word data to write. + * + * This routine writes data to the PHY register. + */ +static void hw_w_phy(struct ksz_hw *hw, int port, u16 reg, u16 val) +{ + int phy; + + phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg; + writew(val, hw->io + phy); +} + +/* + * EEPROM access functions + */ + +#define AT93C_CODE 0 +#define AT93C_WR_OFF 0x00 +#define AT93C_WR_ALL 0x10 +#define AT93C_ER_ALL 0x20 +#define AT93C_WR_ON 0x30 + +#define AT93C_WRITE 1 +#define AT93C_READ 2 +#define AT93C_ERASE 3 + +#define EEPROM_DELAY 4 + +static inline void drop_gpio(struct ksz_hw *hw, u8 gpio) +{ + u16 data; + + data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET); + data &= ~gpio; + writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET); +} + +static inline void raise_gpio(struct ksz_hw *hw, u8 gpio) +{ + u16 data; + + data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET); + data |= gpio; + writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET); +} + +static inline u8 state_gpio(struct ksz_hw *hw, u8 gpio) +{ + u16 data; + + data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET); + return (u8)(data & gpio); +} + +static void eeprom_clk(struct ksz_hw *hw) +{ + raise_gpio(hw, EEPROM_SERIAL_CLOCK); + udelay(EEPROM_DELAY); + drop_gpio(hw, EEPROM_SERIAL_CLOCK); + udelay(EEPROM_DELAY); +} + +static u16 spi_r(struct ksz_hw *hw) +{ + int i; + u16 temp = 0; + + for (i = 15; i >= 0; i--) { + raise_gpio(hw, EEPROM_SERIAL_CLOCK); + udelay(EEPROM_DELAY); + + temp |= (state_gpio(hw, EEPROM_DATA_IN)) ? 1 << i : 0; + + drop_gpio(hw, EEPROM_SERIAL_CLOCK); + udelay(EEPROM_DELAY); + } + return temp; +} + +static void spi_w(struct ksz_hw *hw, u16 data) +{ + int i; + + for (i = 15; i >= 0; i--) { + (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) : + drop_gpio(hw, EEPROM_DATA_OUT); + eeprom_clk(hw); + } +} + +static void spi_reg(struct ksz_hw *hw, u8 data, u8 reg) +{ + int i; + + /* Initial start bit */ + raise_gpio(hw, EEPROM_DATA_OUT); + eeprom_clk(hw); + + /* AT93C operation */ + for (i = 1; i >= 0; i--) { + (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) : + drop_gpio(hw, EEPROM_DATA_OUT); + eeprom_clk(hw); + } + + /* Address location */ + for (i = 5; i >= 0; i--) { + (reg & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) : + drop_gpio(hw, EEPROM_DATA_OUT); + eeprom_clk(hw); + } +} + +#define EEPROM_DATA_RESERVED 0 +#define EEPROM_DATA_MAC_ADDR_0 1 +#define EEPROM_DATA_MAC_ADDR_1 2 +#define EEPROM_DATA_MAC_ADDR_2 3 +#define EEPROM_DATA_SUBSYS_ID 4 +#define EEPROM_DATA_SUBSYS_VEN_ID 5 +#define EEPROM_DATA_PM_CAP 6 + +/* User defined EEPROM data */ +#define EEPROM_DATA_OTHER_MAC_ADDR 9 + +/** + * eeprom_read - read from AT93C46 EEPROM + * @hw: The hardware instance. + * @reg: The register offset. + * + * This function reads a word from the AT93C46 EEPROM. + * + * Return the data value. + */ +static u16 eeprom_read(struct ksz_hw *hw, u8 reg) +{ + u16 data; + + raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT); + + spi_reg(hw, AT93C_READ, reg); + data = spi_r(hw); + + drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT); + + return data; +} + +/** + * eeprom_write - write to AT93C46 EEPROM + * @hw: The hardware instance. + * @reg: The register offset. + * @data: The data value. + * + * This procedure writes a word to the AT93C46 EEPROM. + */ +static void eeprom_write(struct ksz_hw *hw, u8 reg, u16 data) +{ + int timeout; + + raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT); + + /* Enable write. */ + spi_reg(hw, AT93C_CODE, AT93C_WR_ON); + drop_gpio(hw, EEPROM_CHIP_SELECT); + udelay(1); + + /* Erase the register. */ + raise_gpio(hw, EEPROM_CHIP_SELECT); + spi_reg(hw, AT93C_ERASE, reg); + drop_gpio(hw, EEPROM_CHIP_SELECT); + udelay(1); + + /* Check operation complete. */ + raise_gpio(hw, EEPROM_CHIP_SELECT); + timeout = 8; + mdelay(2); + do { + mdelay(1); + } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout); + drop_gpio(hw, EEPROM_CHIP_SELECT); + udelay(1); + + /* Write the register. */ + raise_gpio(hw, EEPROM_CHIP_SELECT); + spi_reg(hw, AT93C_WRITE, reg); + spi_w(hw, data); + drop_gpio(hw, EEPROM_CHIP_SELECT); + udelay(1); + + /* Check operation complete. */ + raise_gpio(hw, EEPROM_CHIP_SELECT); + timeout = 8; + mdelay(2); + do { + mdelay(1); + } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout); + drop_gpio(hw, EEPROM_CHIP_SELECT); + udelay(1); + + /* Disable write. */ + raise_gpio(hw, EEPROM_CHIP_SELECT); + spi_reg(hw, AT93C_CODE, AT93C_WR_OFF); + + drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT); +} + +/* + * Link detection routines + */ + +static u16 advertised_flow_ctrl(struct ksz_port *port, u16 ctrl) +{ + ctrl &= ~PORT_AUTO_NEG_SYM_PAUSE; + switch (port->flow_ctrl) { + case PHY_FLOW_CTRL: + ctrl |= PORT_AUTO_NEG_SYM_PAUSE; + break; + /* Not supported. */ + case PHY_TX_ONLY: + case PHY_RX_ONLY: + default: + break; + } + return ctrl; +} + +static void set_flow_ctrl(struct ksz_hw *hw, int rx, int tx) +{ + u32 rx_cfg; + u32 tx_cfg; + + rx_cfg = hw->rx_cfg; + tx_cfg = hw->tx_cfg; + if (rx) + hw->rx_cfg |= DMA_RX_FLOW_ENABLE; + else + hw->rx_cfg &= ~DMA_RX_FLOW_ENABLE; + if (tx) + hw->tx_cfg |= DMA_TX_FLOW_ENABLE; + else + hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE; + if (hw->enabled) { + if (rx_cfg != hw->rx_cfg) + writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL); + if (tx_cfg != hw->tx_cfg) + writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL); + } +} + +static void determine_flow_ctrl(struct ksz_hw *hw, struct ksz_port *port, + u16 local, u16 remote) +{ + int rx; + int tx; + + if (hw->overrides & PAUSE_FLOW_CTRL) + return; + + rx = tx = 0; + if (port->force_link) + rx = tx = 1; + if (remote & PHY_AUTO_NEG_SYM_PAUSE) { + if (local & PHY_AUTO_NEG_SYM_PAUSE) { + rx = tx = 1; + } else if ((remote & PHY_AUTO_NEG_ASYM_PAUSE) && + (local & PHY_AUTO_NEG_PAUSE) == + PHY_AUTO_NEG_ASYM_PAUSE) { + tx = 1; + } + } else if (remote & PHY_AUTO_NEG_ASYM_PAUSE) { + if ((local & PHY_AUTO_NEG_PAUSE) == PHY_AUTO_NEG_PAUSE) + rx = 1; + } + if (!hw->ksz_switch) + set_flow_ctrl(hw, rx, tx); +} + +static inline void port_cfg_change(struct ksz_hw *hw, struct ksz_port *port, + struct ksz_port_info *info, u16 link_status) +{ + if ((hw->features & HALF_DUPLEX_SIGNAL_BUG) && + !(hw->overrides & PAUSE_FLOW_CTRL)) { + u32 cfg = hw->tx_cfg; + + /* Disable flow control in the half duplex mode. */ + if (1 == info->duplex) + hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE; + if (hw->enabled && cfg != hw->tx_cfg) + writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL); + } +} + +/** + * port_get_link_speed - get current link status + * @port: The port instance. + * + * This routine reads PHY registers to determine the current link status of the + * switch ports. + */ +static void port_get_link_speed(struct ksz_port *port) +{ + uint interrupt; + struct ksz_port_info *info; + struct ksz_port_info *linked = NULL; + struct ksz_hw *hw = port->hw; + u16 data; + u16 status; + u8 local; + u8 remote; + int i; + int p; + int change = 0; + + interrupt = hw_block_intr(hw); + + for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { + info = &hw->port_info[p]; + port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data); + port_r16(hw, p, KS884X_PORT_STATUS_OFFSET, &status); + + /* + * Link status is changing all the time even when there is no + * cable connection! + */ + remote = status & (PORT_AUTO_NEG_COMPLETE | + PORT_STATUS_LINK_GOOD); + local = (u8) data; + + /* No change to status. */ + if (local == info->advertised && remote == info->partner) + continue; + + info->advertised = local; + info->partner = remote; + if (status & PORT_STATUS_LINK_GOOD) { + + /* Remember the first linked port. */ + if (!linked) + linked = info; + + info->tx_rate = 10 * TX_RATE_UNIT; + if (status & PORT_STATUS_SPEED_100MBIT) + info->tx_rate = 100 * TX_RATE_UNIT; + + info->duplex = 1; + if (status & PORT_STATUS_FULL_DUPLEX) + info->duplex = 2; + + if (media_connected != info->state) { + hw_r_phy(hw, p, KS884X_PHY_AUTO_NEG_OFFSET, + &data); + hw_r_phy(hw, p, KS884X_PHY_REMOTE_CAP_OFFSET, + &status); + determine_flow_ctrl(hw, port, data, status); + if (hw->ksz_switch) { + port_cfg_back_pressure(hw, p, + (1 == info->duplex)); + } + change |= 1 << i; + port_cfg_change(hw, port, info, status); + } + info->state = media_connected; + } else { + if (media_disconnected != info->state) { + change |= 1 << i; + + /* Indicate the link just goes down. */ + hw->port_mib[p].link_down = 1; + } + info->state = media_disconnected; + } + hw->port_mib[p].state = (u8) info->state; + } + + if (linked && media_disconnected == port->linked->state) + port->linked = linked; + + hw_restore_intr(hw, interrupt); +} + +#define PHY_RESET_TIMEOUT 10 + +/** + * port_set_link_speed - set port speed + * @port: The port instance. + * + * This routine sets the link speed of the switch ports. + */ +static void port_set_link_speed(struct ksz_port *port) +{ + struct ksz_port_info *info; + struct ksz_hw *hw = port->hw; + u16 data; + u16 cfg; + u8 status; + int i; + int p; + + for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { + info = &hw->port_info[p]; + + port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data); + port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status); + + cfg = 0; + if (status & PORT_STATUS_LINK_GOOD) + cfg = data; + + data |= PORT_AUTO_NEG_ENABLE; + data = advertised_flow_ctrl(port, data); + + data |= PORT_AUTO_NEG_100BTX_FD | PORT_AUTO_NEG_100BTX | + PORT_AUTO_NEG_10BT_FD | PORT_AUTO_NEG_10BT; + + /* Check if manual configuration is specified by the user. */ + if (port->speed || port->duplex) { + if (10 == port->speed) + data &= ~(PORT_AUTO_NEG_100BTX_FD | + PORT_AUTO_NEG_100BTX); + else if (100 == port->speed) + data &= ~(PORT_AUTO_NEG_10BT_FD | + PORT_AUTO_NEG_10BT); + if (1 == port->duplex) + data &= ~(PORT_AUTO_NEG_100BTX_FD | + PORT_AUTO_NEG_10BT_FD); + else if (2 == port->duplex) + data &= ~(PORT_AUTO_NEG_100BTX | + PORT_AUTO_NEG_10BT); + } + if (data != cfg) { + data |= PORT_AUTO_NEG_RESTART; + port_w16(hw, p, KS884X_PORT_CTRL_4_OFFSET, data); + } + } +} + +/** + * port_force_link_speed - force port speed + * @port: The port instance. + * + * This routine forces the link speed of the switch ports. + */ +static void port_force_link_speed(struct ksz_port *port) +{ + struct ksz_hw *hw = port->hw; + u16 data; + int i; + int phy; + int p; + + for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { + phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL; + hw_r_phy_ctrl(hw, phy, &data); + + data &= ~PHY_AUTO_NEG_ENABLE; + + if (10 == port->speed) + data &= ~PHY_SPEED_100MBIT; + else if (100 == port->speed) + data |= PHY_SPEED_100MBIT; + if (1 == port->duplex) + data &= ~PHY_FULL_DUPLEX; + else if (2 == port->duplex) + data |= PHY_FULL_DUPLEX; + hw_w_phy_ctrl(hw, phy, data); + } +} + +static void port_set_power_saving(struct ksz_port *port, int enable) +{ + struct ksz_hw *hw = port->hw; + int i; + int p; + + for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) + port_cfg(hw, p, + KS884X_PORT_CTRL_4_OFFSET, PORT_POWER_DOWN, enable); +} + +/* + * KSZ8841 power management functions + */ + +/** + * hw_chk_wol_pme_status - check PMEN pin + * @hw: The hardware instance. + * + * This function is used to check PMEN pin is asserted. + * + * Return 1 if PMEN pin is asserted; otherwise, 0. + */ +static int hw_chk_wol_pme_status(struct ksz_hw *hw) +{ + struct dev_info *hw_priv = container_of(hw, struct dev_info, hw); + struct pci_dev *pdev = hw_priv->pdev; + u16 data; + + if (!pdev->pm_cap) + return 0; + pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data); + return (data & PCI_PM_CTRL_PME_STATUS) == PCI_PM_CTRL_PME_STATUS; +} + +/** + * hw_clr_wol_pme_status - clear PMEN pin + * @hw: The hardware instance. + * + * This routine is used to clear PME_Status to deassert PMEN pin. + */ +static void hw_clr_wol_pme_status(struct ksz_hw *hw) +{ + struct dev_info *hw_priv = container_of(hw, struct dev_info, hw); + struct pci_dev *pdev = hw_priv->pdev; + u16 data; + + if (!pdev->pm_cap) + return; + + /* Clear PME_Status to deassert PMEN pin. */ + pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data); + data |= PCI_PM_CTRL_PME_STATUS; + pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data); +} + +/** + * hw_cfg_wol_pme - enable or disable Wake-on-LAN + * @hw: The hardware instance. + * @set: The flag indicating whether to enable or disable. + * + * This routine is used to enable or disable Wake-on-LAN. + */ +static void hw_cfg_wol_pme(struct ksz_hw *hw, int set) +{ + struct dev_info *hw_priv = container_of(hw, struct dev_info, hw); + struct pci_dev *pdev = hw_priv->pdev; + u16 data; + + if (!pdev->pm_cap) + return; + pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data); + data &= ~PCI_PM_CTRL_STATE_MASK; + if (set) + data |= PCI_PM_CTRL_PME_ENABLE | PCI_D3hot; + else + data &= ~PCI_PM_CTRL_PME_ENABLE; + pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data); +} + +/** + * hw_cfg_wol - configure Wake-on-LAN features + * @hw: The hardware instance. + * @frame: The pattern frame bit. + * @set: The flag indicating whether to enable or disable. + * + * This routine is used to enable or disable certain Wake-on-LAN features. + */ +static void hw_cfg_wol(struct ksz_hw *hw, u16 frame, int set) +{ + u16 data; + + data = readw(hw->io + KS8841_WOL_CTRL_OFFSET); + if (set) + data |= frame; + else + data &= ~frame; + writew(data, hw->io + KS8841_WOL_CTRL_OFFSET); +} + +/** + * hw_set_wol_frame - program Wake-on-LAN pattern + * @hw: The hardware instance. + * @i: The frame index. + * @mask_size: The size of the mask. + * @mask: Mask to ignore certain bytes in the pattern. + * @frame_size: The size of the frame. + * @pattern: The frame data. + * + * This routine is used to program Wake-on-LAN pattern. + */ +static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size, + const u8 *mask, uint frame_size, const u8 *pattern) +{ + int bits; + int from; + int len; + int to; + u32 crc; + u8 data[64]; + u8 val = 0; + + if (frame_size > mask_size * 8) + frame_size = mask_size * 8; + if (frame_size > 64) + frame_size = 64; + + i *= 0x10; + writel(0, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i); + writel(0, hw->io + KS8841_WOL_FRAME_BYTE2_OFFSET + i); + + bits = len = from = to = 0; + do { + if (bits) { + if ((val & 1)) + data[to++] = pattern[from]; + val >>= 1; + ++from; + --bits; + } else { + val = mask[len]; + writeb(val, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i + + len); + ++len; + if (val) + bits = 8; + else + from += 8; + } + } while (from < (int) frame_size); + if (val) { + bits = mask[len - 1]; + val <<= (from % 8); + bits &= ~val; + writeb(bits, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i + len - + 1); + } + crc = ether_crc(to, data); + writel(crc, hw->io + KS8841_WOL_FRAME_CRC_OFFSET + i); +} + +/** + * hw_add_wol_arp - add ARP pattern + * @hw: The hardware instance. + * @ip_addr: The IPv4 address assigned to the device. + * + * This routine is used to add ARP pattern for waking up the host. + */ +static void hw_add_wol_arp(struct ksz_hw *hw, const u8 *ip_addr) +{ + static const u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 }; + u8 pattern[42] = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x08, 0x06, + 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00 }; + + memcpy(&pattern[38], ip_addr, 4); + hw_set_wol_frame(hw, 3, 6, mask, 42, pattern); +} + +/** + * hw_add_wol_bcast - add broadcast pattern + * @hw: The hardware instance. + * + * This routine is used to add broadcast pattern for waking up the host. + */ +static void hw_add_wol_bcast(struct ksz_hw *hw) +{ + static const u8 mask[] = { 0x3F }; + static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + + hw_set_wol_frame(hw, 2, 1, mask, ETH_ALEN, pattern); +} + +/** + * hw_add_wol_mcast - add multicast pattern + * @hw: The hardware instance. + * + * This routine is used to add multicast pattern for waking up the host. + * + * It is assumed the multicast packet is the ICMPv6 neighbor solicitation used + * by IPv6 ping command. Note that multicast packets are filtred through the + * multicast hash table, so not all multicast packets can wake up the host. + */ +static void hw_add_wol_mcast(struct ksz_hw *hw) +{ + static const u8 mask[] = { 0x3F }; + u8 pattern[] = { 0x33, 0x33, 0xFF, 0x00, 0x00, 0x00 }; + + memcpy(&pattern[3], &hw->override_addr[3], 3); + hw_set_wol_frame(hw, 1, 1, mask, 6, pattern); +} + +/** + * hw_add_wol_ucast - add unicast pattern + * @hw: The hardware instance. + * + * This routine is used to add unicast pattern to wakeup the host. + * + * It is assumed the unicast packet is directed to the device, as the hardware + * can only receive them in normal case. + */ +static void hw_add_wol_ucast(struct ksz_hw *hw) +{ + static const u8 mask[] = { 0x3F }; + + hw_set_wol_frame(hw, 0, 1, mask, ETH_ALEN, hw->override_addr); +} + +/** + * hw_enable_wol - enable Wake-on-LAN + * @hw: The hardware instance. + * @wol_enable: The Wake-on-LAN settings. + * @net_addr: The IPv4 address assigned to the device. + * + * This routine is used to enable Wake-on-LAN depending on driver settings. + */ +static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, const u8 *net_addr) +{ + hw_cfg_wol(hw, KS8841_WOL_MAGIC_ENABLE, (wol_enable & WAKE_MAGIC)); + hw_cfg_wol(hw, KS8841_WOL_FRAME0_ENABLE, (wol_enable & WAKE_UCAST)); + hw_add_wol_ucast(hw); + hw_cfg_wol(hw, KS8841_WOL_FRAME1_ENABLE, (wol_enable & WAKE_MCAST)); + hw_add_wol_mcast(hw); + hw_cfg_wol(hw, KS8841_WOL_FRAME2_ENABLE, (wol_enable & WAKE_BCAST)); + hw_cfg_wol(hw, KS8841_WOL_FRAME3_ENABLE, (wol_enable & WAKE_ARP)); + hw_add_wol_arp(hw, net_addr); +} + +/** + * hw_init - check driver is correct for the hardware + * @hw: The hardware instance. + * + * This function checks the hardware is correct for this driver and sets the + * hardware up for proper initialization. + * + * Return number of ports or 0 if not right. + */ +static int hw_init(struct ksz_hw *hw) +{ + int rc = 0; + u16 data; + u16 revision; + + /* Set bus speed to 125MHz. */ + writew(BUS_SPEED_125_MHZ, hw->io + KS884X_BUS_CTRL_OFFSET); + + /* Check KSZ884x chip ID. */ + data = readw(hw->io + KS884X_CHIP_ID_OFFSET); + + revision = (data & KS884X_REVISION_MASK) >> KS884X_REVISION_SHIFT; + data &= KS884X_CHIP_ID_MASK_41; + if (REG_CHIP_ID_41 == data) + rc = 1; + else if (REG_CHIP_ID_42 == data) + rc = 2; + else + return 0; + + /* Setup hardware features or bug workarounds. */ + if (revision <= 1) { + hw->features |= SMALL_PACKET_TX_BUG; + if (1 == rc) + hw->features |= HALF_DUPLEX_SIGNAL_BUG; + } + return rc; +} + +/** + * hw_reset - reset the hardware + * @hw: The hardware instance. + * + * This routine resets the hardware. + */ +static void hw_reset(struct ksz_hw *hw) +{ + writew(GLOBAL_SOFTWARE_RESET, hw->io + KS884X_GLOBAL_CTRL_OFFSET); + + /* Wait for device to reset. */ + mdelay(10); + + /* Write 0 to clear device reset. */ + writew(0, hw->io + KS884X_GLOBAL_CTRL_OFFSET); +} + +/** + * hw_setup - setup the hardware + * @hw: The hardware instance. + * + * This routine setup the hardware for proper operation. + */ +static void hw_setup(struct ksz_hw *hw) +{ +#if SET_DEFAULT_LED + u16 data; + + /* Change default LED mode. */ + data = readw(hw->io + KS8842_SWITCH_CTRL_5_OFFSET); + data &= ~LED_MODE; + data |= SET_DEFAULT_LED; + writew(data, hw->io + KS8842_SWITCH_CTRL_5_OFFSET); +#endif + + /* Setup transmit control. */ + hw->tx_cfg = (DMA_TX_PAD_ENABLE | DMA_TX_CRC_ENABLE | + (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_TX_ENABLE); + + /* Setup receive control. */ + hw->rx_cfg = (DMA_RX_BROADCAST | DMA_RX_UNICAST | + (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_RX_ENABLE); + hw->rx_cfg |= KS884X_DMA_RX_MULTICAST; + + /* Hardware cannot handle UDP packet in IP fragments. */ + hw->rx_cfg |= (DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP); + + if (hw->all_multi) + hw->rx_cfg |= DMA_RX_ALL_MULTICAST; + if (hw->promiscuous) + hw->rx_cfg |= DMA_RX_PROMISCUOUS; +} + +/** + * hw_setup_intr - setup interrupt mask + * @hw: The hardware instance. + * + * This routine setup the interrupt mask for proper operation. + */ +static void hw_setup_intr(struct ksz_hw *hw) +{ + hw->intr_mask = KS884X_INT_MASK | KS884X_INT_RX_OVERRUN; +} + +static void ksz_check_desc_num(struct ksz_desc_info *info) +{ +#define MIN_DESC_SHIFT 2 + + int alloc = info->alloc; + int shift; + + shift = 0; + while (!(alloc & 1)) { + shift++; + alloc >>= 1; + } + if (alloc != 1 || shift < MIN_DESC_SHIFT) { + pr_alert("Hardware descriptor numbers not right!\n"); + while (alloc) { + shift++; + alloc >>= 1; + } + if (shift < MIN_DESC_SHIFT) + shift = MIN_DESC_SHIFT; + alloc = 1 << shift; + info->alloc = alloc; + } + info->mask = info->alloc - 1; +} + +static void hw_init_desc(struct ksz_desc_info *desc_info, int transmit) +{ + int i; + u32 phys = desc_info->ring_phys; + struct ksz_hw_desc *desc = desc_info->ring_virt; + struct ksz_desc *cur = desc_info->ring; + struct ksz_desc *previous = NULL; + + for (i = 0; i < desc_info->alloc; i++) { + cur->phw = desc++; + phys += desc_info->size; + previous = cur++; + previous->phw->next = cpu_to_le32(phys); + } + previous->phw->next = cpu_to_le32(desc_info->ring_phys); + previous->sw.buf.rx.end_of_ring = 1; + previous->phw->buf.data = cpu_to_le32(previous->sw.buf.data); + + desc_info->avail = desc_info->alloc; + desc_info->last = desc_info->next = 0; + + desc_info->cur = desc_info->ring; +} + +/** + * hw_set_desc_base - set descriptor base addresses + * @hw: The hardware instance. + * @tx_addr: The transmit descriptor base. + * @rx_addr: The receive descriptor base. + * + * This routine programs the descriptor base addresses after reset. + */ +static void hw_set_desc_base(struct ksz_hw *hw, u32 tx_addr, u32 rx_addr) +{ + /* Set base address of Tx/Rx descriptors. */ + writel(tx_addr, hw->io + KS_DMA_TX_ADDR); + writel(rx_addr, hw->io + KS_DMA_RX_ADDR); +} + +static void hw_reset_pkts(struct ksz_desc_info *info) +{ + info->cur = info->ring; + info->avail = info->alloc; + info->last = info->next = 0; +} + +static inline void hw_resume_rx(struct ksz_hw *hw) +{ + writel(DMA_START, hw->io + KS_DMA_RX_START); +} + +/** + * hw_start_rx - start receiving + * @hw: The hardware instance. + * + * This routine starts the receive function of the hardware. + */ +static void hw_start_rx(struct ksz_hw *hw) +{ + writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL); + + /* Notify when the receive stops. */ + hw->intr_mask |= KS884X_INT_RX_STOPPED; + + writel(DMA_START, hw->io + KS_DMA_RX_START); + hw_ack_intr(hw, KS884X_INT_RX_STOPPED); + hw->rx_stop++; + + /* Variable overflows. */ + if (0 == hw->rx_stop) + hw->rx_stop = 2; +} + +/** + * hw_stop_rx - stop receiving + * @hw: The hardware instance. + * + * This routine stops the receive function of the hardware. + */ +static void hw_stop_rx(struct ksz_hw *hw) +{ + hw->rx_stop = 0; + hw_turn_off_intr(hw, KS884X_INT_RX_STOPPED); + writel((hw->rx_cfg & ~DMA_RX_ENABLE), hw->io + KS_DMA_RX_CTRL); +} + +/** + * hw_start_tx - start transmitting + * @hw: The hardware instance. + * + * This routine starts the transmit function of the hardware. + */ +static void hw_start_tx(struct ksz_hw *hw) +{ + writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL); +} + +/** + * hw_stop_tx - stop transmitting + * @hw: The hardware instance. + * + * This routine stops the transmit function of the hardware. + */ +static void hw_stop_tx(struct ksz_hw *hw) +{ + writel((hw->tx_cfg & ~DMA_TX_ENABLE), hw->io + KS_DMA_TX_CTRL); +} + +/** + * hw_disable - disable hardware + * @hw: The hardware instance. + * + * This routine disables the hardware. + */ +static void hw_disable(struct ksz_hw *hw) +{ + hw_stop_rx(hw); + hw_stop_tx(hw); + hw->enabled = 0; +} + +/** + * hw_enable - enable hardware + * @hw: The hardware instance. + * + * This routine enables the hardware. + */ +static void hw_enable(struct ksz_hw *hw) +{ + hw_start_tx(hw); + hw_start_rx(hw); + hw->enabled = 1; +} + +/** + * hw_alloc_pkt - allocate enough descriptors for transmission + * @hw: The hardware instance. + * @length: The length of the packet. + * @physical: Number of descriptors required. + * + * This function allocates descriptors for transmission. + * + * Return 0 if not successful; 1 for buffer copy; or number of descriptors. + */ +static int hw_alloc_pkt(struct ksz_hw *hw, int length, int physical) +{ + /* Always leave one descriptor free. */ + if (hw->tx_desc_info.avail <= 1) + return 0; + + /* Allocate a descriptor for transmission and mark it current. */ + get_tx_pkt(&hw->tx_desc_info, &hw->tx_desc_info.cur); + hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1; + + /* Keep track of number of transmit descriptors used so far. */ + ++hw->tx_int_cnt; + hw->tx_size += length; + + /* Cannot hold on too much data. */ + if (hw->tx_size >= MAX_TX_HELD_SIZE) + hw->tx_int_cnt = hw->tx_int_mask + 1; + + if (physical > hw->tx_desc_info.avail) + return 1; + + return hw->tx_desc_info.avail; +} + +/** + * hw_send_pkt - mark packet for transmission + * @hw: The hardware instance. + * + * This routine marks the packet for transmission in PCI version. + */ +static void hw_send_pkt(struct ksz_hw *hw) +{ + struct ksz_desc *cur = hw->tx_desc_info.cur; + + cur->sw.buf.tx.last_seg = 1; + + /* Interrupt only after specified number of descriptors used. */ + if (hw->tx_int_cnt > hw->tx_int_mask) { + cur->sw.buf.tx.intr = 1; + hw->tx_int_cnt = 0; + hw->tx_size = 0; + } + + /* KSZ8842 supports port directed transmission. */ + cur->sw.buf.tx.dest_port = hw->dst_ports; + + release_desc(cur); + + writel(0, hw->io + KS_DMA_TX_START); +} + +static int empty_addr(u8 *addr) +{ + u32 *addr1 = (u32 *) addr; + u16 *addr2 = (u16 *) &addr[4]; + + return 0 == *addr1 && 0 == *addr2; +} + +/** + * hw_set_addr - set MAC address + * @hw: The hardware instance. + * + * This routine programs the MAC address of the hardware when the address is + * overrided. + */ +static void hw_set_addr(struct ksz_hw *hw) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + writeb(hw->override_addr[MAC_ADDR_ORDER(i)], + hw->io + KS884X_ADDR_0_OFFSET + i); + + sw_set_addr(hw, hw->override_addr); +} + +/** + * hw_read_addr - read MAC address + * @hw: The hardware instance. + * + * This routine retrieves the MAC address of the hardware. + */ +static void hw_read_addr(struct ksz_hw *hw) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io + + KS884X_ADDR_0_OFFSET + i); + + if (!hw->mac_override) { + memcpy(hw->override_addr, hw->perm_addr, ETH_ALEN); + if (empty_addr(hw->override_addr)) { + memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, ETH_ALEN); + memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS, + ETH_ALEN); + hw->override_addr[5] += hw->id; + hw_set_addr(hw); + } + } +} + +static void hw_ena_add_addr(struct ksz_hw *hw, int index, u8 *mac_addr) +{ + int i; + u32 mac_addr_lo; + u32 mac_addr_hi; + + mac_addr_hi = 0; + for (i = 0; i < 2; i++) { + mac_addr_hi <<= 8; + mac_addr_hi |= mac_addr[i]; + } + mac_addr_hi |= ADD_ADDR_ENABLE; + mac_addr_lo = 0; + for (i = 2; i < 6; i++) { + mac_addr_lo <<= 8; + mac_addr_lo |= mac_addr[i]; + } + index *= ADD_ADDR_INCR; + + writel(mac_addr_lo, hw->io + index + KS_ADD_ADDR_0_LO); + writel(mac_addr_hi, hw->io + index + KS_ADD_ADDR_0_HI); +} + +static void hw_set_add_addr(struct ksz_hw *hw) +{ + int i; + + for (i = 0; i < ADDITIONAL_ENTRIES; i++) { + if (empty_addr(hw->address[i])) + writel(0, hw->io + ADD_ADDR_INCR * i + + KS_ADD_ADDR_0_HI); + else + hw_ena_add_addr(hw, i, hw->address[i]); + } +} + +static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr) +{ + int i; + int j = ADDITIONAL_ENTRIES; + + if (ether_addr_equal(hw->override_addr, mac_addr)) + return 0; + for (i = 0; i < hw->addr_list_size; i++) { + if (ether_addr_equal(hw->address[i], mac_addr)) + return 0; + if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i])) + j = i; + } + if (j < ADDITIONAL_ENTRIES) { + memcpy(hw->address[j], mac_addr, ETH_ALEN); + hw_ena_add_addr(hw, j, hw->address[j]); + return 0; + } + return -1; +} + +static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr) +{ + int i; + + for (i = 0; i < hw->addr_list_size; i++) { + if (ether_addr_equal(hw->address[i], mac_addr)) { + eth_zero_addr(hw->address[i]); + writel(0, hw->io + ADD_ADDR_INCR * i + + KS_ADD_ADDR_0_HI); + return 0; + } + } + return -1; +} + +/** + * hw_clr_multicast - clear multicast addresses + * @hw: The hardware instance. + * + * This routine removes all multicast addresses set in the hardware. + */ +static void hw_clr_multicast(struct ksz_hw *hw) +{ + int i; + + for (i = 0; i < HW_MULTICAST_SIZE; i++) { + hw->multi_bits[i] = 0; + + writeb(0, hw->io + KS884X_MULTICAST_0_OFFSET + i); + } +} + +/** + * hw_set_grp_addr - set multicast addresses + * @hw: The hardware instance. + * + * This routine programs multicast addresses for the hardware to accept those + * addresses. + */ +static void hw_set_grp_addr(struct ksz_hw *hw) +{ + int i; + int index; + int position; + int value; + + memset(hw->multi_bits, 0, sizeof(u8) * HW_MULTICAST_SIZE); + + for (i = 0; i < hw->multi_list_size; i++) { + position = (ether_crc(6, hw->multi_list[i]) >> 26) & 0x3f; + index = position >> 3; + value = 1 << (position & 7); + hw->multi_bits[index] |= (u8) value; + } + + for (i = 0; i < HW_MULTICAST_SIZE; i++) + writeb(hw->multi_bits[i], hw->io + KS884X_MULTICAST_0_OFFSET + + i); +} + +/** + * hw_set_multicast - enable or disable all multicast receiving + * @hw: The hardware instance. + * @multicast: To turn on or off the all multicast feature. + * + * This routine enables/disables the hardware to accept all multicast packets. + */ +static void hw_set_multicast(struct ksz_hw *hw, u8 multicast) +{ + /* Stop receiving for reconfiguration. */ + hw_stop_rx(hw); + + if (multicast) + hw->rx_cfg |= DMA_RX_ALL_MULTICAST; + else + hw->rx_cfg &= ~DMA_RX_ALL_MULTICAST; + + if (hw->enabled) + hw_start_rx(hw); +} + +/** + * hw_set_promiscuous - enable or disable promiscuous receiving + * @hw: The hardware instance. + * @prom: To turn on or off the promiscuous feature. + * + * This routine enables/disables the hardware to accept all packets. + */ +static void hw_set_promiscuous(struct ksz_hw *hw, u8 prom) +{ + /* Stop receiving for reconfiguration. */ + hw_stop_rx(hw); + + if (prom) + hw->rx_cfg |= DMA_RX_PROMISCUOUS; + else + hw->rx_cfg &= ~DMA_RX_PROMISCUOUS; + + if (hw->enabled) + hw_start_rx(hw); +} + +/** + * sw_enable - enable the switch + * @hw: The hardware instance. + * @enable: The flag to enable or disable the switch + * + * This routine is used to enable/disable the switch in KSZ8842. + */ +static void sw_enable(struct ksz_hw *hw, int enable) +{ + int port; + + for (port = 0; port < SWITCH_PORT_NUM; port++) { + if (hw->dev_count > 1) { + /* Set port-base vlan membership with host port. */ + sw_cfg_port_base_vlan(hw, port, + HOST_MASK | (1 << port)); + port_set_stp_state(hw, port, STP_STATE_DISABLED); + } else { + sw_cfg_port_base_vlan(hw, port, PORT_MASK); + port_set_stp_state(hw, port, STP_STATE_FORWARDING); + } + } + if (hw->dev_count > 1) + port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE); + else + port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_FORWARDING); + + if (enable) + enable = KS8842_START; + writew(enable, hw->io + KS884X_CHIP_ID_OFFSET); +} + +/** + * sw_setup - setup the switch + * @hw: The hardware instance. + * + * This routine setup the hardware switch engine for default operation. + */ +static void sw_setup(struct ksz_hw *hw) +{ + int port; + + sw_set_global_ctrl(hw); + + /* Enable switch broadcast storm protection at 10% percent rate. */ + sw_init_broad_storm(hw); + hw_cfg_broad_storm(hw, BROADCAST_STORM_PROTECTION_RATE); + for (port = 0; port < SWITCH_PORT_NUM; port++) + sw_ena_broad_storm(hw, port); + + sw_init_prio(hw); + + sw_init_mirror(hw); + + sw_init_prio_rate(hw); + + sw_init_vlan(hw); + + if (hw->features & STP_SUPPORT) + sw_init_stp(hw); + if (!sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET, + SWITCH_TX_FLOW_CTRL | SWITCH_RX_FLOW_CTRL)) + hw->overrides |= PAUSE_FLOW_CTRL; + sw_enable(hw, 1); +} + +/** + * ksz_start_timer - start kernel timer + * @info: Kernel timer information. + * @time: The time tick. + * + * This routine starts the kernel timer after the specified time tick. + */ +static void ksz_start_timer(struct ksz_timer_info *info, int time) +{ + info->cnt = 0; + info->timer.expires = jiffies + time; + add_timer(&info->timer); + + /* infinity */ + info->max = -1; +} + +/** + * ksz_stop_timer - stop kernel timer + * @info: Kernel timer information. + * + * This routine stops the kernel timer. + */ +static void ksz_stop_timer(struct ksz_timer_info *info) +{ + if (info->max) { + info->max = 0; + del_timer_sync(&info->timer); + } +} + +static void ksz_init_timer(struct ksz_timer_info *info, int period, + void (*function)(unsigned long), void *data) +{ + info->max = 0; + info->period = period; + setup_timer(&info->timer, function, (unsigned long)data); +} + +static void ksz_update_timer(struct ksz_timer_info *info) +{ + ++info->cnt; + if (info->max > 0) { + if (info->cnt < info->max) { + info->timer.expires = jiffies + info->period; + add_timer(&info->timer); + } else + info->max = 0; + } else if (info->max < 0) { + info->timer.expires = jiffies + info->period; + add_timer(&info->timer); + } +} + +/** + * ksz_alloc_soft_desc - allocate software descriptors + * @desc_info: Descriptor information structure. + * @transmit: Indication that descriptors are for transmit. + * + * This local function allocates software descriptors for manipulation in + * memory. + * + * Return 0 if successful. + */ +static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit) +{ + desc_info->ring = kzalloc(sizeof(struct ksz_desc) * desc_info->alloc, + GFP_KERNEL); + if (!desc_info->ring) + return 1; + hw_init_desc(desc_info, transmit); + return 0; +} + +/** + * ksz_alloc_desc - allocate hardware descriptors + * @adapter: Adapter information structure. + * + * This local function allocates hardware descriptors for receiving and + * transmitting. + * + * Return 0 if successful. + */ +static int ksz_alloc_desc(struct dev_info *adapter) +{ + struct ksz_hw *hw = &adapter->hw; + int offset; + + /* Allocate memory for RX & TX descriptors. */ + adapter->desc_pool.alloc_size = + hw->rx_desc_info.size * hw->rx_desc_info.alloc + + hw->tx_desc_info.size * hw->tx_desc_info.alloc + + DESC_ALIGNMENT; + + adapter->desc_pool.alloc_virt = + pci_zalloc_consistent(adapter->pdev, + adapter->desc_pool.alloc_size, + &adapter->desc_pool.dma_addr); + if (adapter->desc_pool.alloc_virt == NULL) { + adapter->desc_pool.alloc_size = 0; + return 1; + } + + /* Align to the next cache line boundary. */ + offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ? + (DESC_ALIGNMENT - + ((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT)) : 0); + adapter->desc_pool.virt = adapter->desc_pool.alloc_virt + offset; + adapter->desc_pool.phys = adapter->desc_pool.dma_addr + offset; + + /* Allocate receive/transmit descriptors. */ + hw->rx_desc_info.ring_virt = (struct ksz_hw_desc *) + adapter->desc_pool.virt; + hw->rx_desc_info.ring_phys = adapter->desc_pool.phys; + offset = hw->rx_desc_info.alloc * hw->rx_desc_info.size; + hw->tx_desc_info.ring_virt = (struct ksz_hw_desc *) + (adapter->desc_pool.virt + offset); + hw->tx_desc_info.ring_phys = adapter->desc_pool.phys + offset; + + if (ksz_alloc_soft_desc(&hw->rx_desc_info, 0)) + return 1; + if (ksz_alloc_soft_desc(&hw->tx_desc_info, 1)) + return 1; + + return 0; +} + +/** + * free_dma_buf - release DMA buffer resources + * @adapter: Adapter information structure. + * + * This routine is just a helper function to release the DMA buffer resources. + */ +static void free_dma_buf(struct dev_info *adapter, struct ksz_dma_buf *dma_buf, + int direction) +{ + pci_unmap_single(adapter->pdev, dma_buf->dma, dma_buf->len, direction); + dev_kfree_skb(dma_buf->skb); + dma_buf->skb = NULL; + dma_buf->dma = 0; +} + +/** + * ksz_init_rx_buffers - initialize receive descriptors + * @adapter: Adapter information structure. + * + * This routine initializes DMA buffers for receiving. + */ +static void ksz_init_rx_buffers(struct dev_info *adapter) +{ + int i; + struct ksz_desc *desc; + struct ksz_dma_buf *dma_buf; + struct ksz_hw *hw = &adapter->hw; + struct ksz_desc_info *info = &hw->rx_desc_info; + + for (i = 0; i < hw->rx_desc_info.alloc; i++) { + get_rx_pkt(info, &desc); + + dma_buf = DMA_BUFFER(desc); + if (dma_buf->skb && dma_buf->len != adapter->mtu) + free_dma_buf(adapter, dma_buf, PCI_DMA_FROMDEVICE); + dma_buf->len = adapter->mtu; + if (!dma_buf->skb) + dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC); + if (dma_buf->skb && !dma_buf->dma) + dma_buf->dma = pci_map_single( + adapter->pdev, + skb_tail_pointer(dma_buf->skb), + dma_buf->len, + PCI_DMA_FROMDEVICE); + + /* Set descriptor. */ + set_rx_buf(desc, dma_buf->dma); + set_rx_len(desc, dma_buf->len); + release_desc(desc); + } +} + +/** + * ksz_alloc_mem - allocate memory for hardware descriptors + * @adapter: Adapter information structure. + * + * This function allocates memory for use by hardware descriptors for receiving + * and transmitting. + * + * Return 0 if successful. + */ +static int ksz_alloc_mem(struct dev_info *adapter) +{ + struct ksz_hw *hw = &adapter->hw; + + /* Determine the number of receive and transmit descriptors. */ + hw->rx_desc_info.alloc = NUM_OF_RX_DESC; + hw->tx_desc_info.alloc = NUM_OF_TX_DESC; + + /* Determine how many descriptors to skip transmit interrupt. */ + hw->tx_int_cnt = 0; + hw->tx_int_mask = NUM_OF_TX_DESC / 4; + if (hw->tx_int_mask > 8) + hw->tx_int_mask = 8; + while (hw->tx_int_mask) { + hw->tx_int_cnt++; + hw->tx_int_mask >>= 1; + } + if (hw->tx_int_cnt) { + hw->tx_int_mask = (1 << (hw->tx_int_cnt - 1)) - 1; + hw->tx_int_cnt = 0; + } + + /* Determine the descriptor size. */ + hw->rx_desc_info.size = + (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) / + DESC_ALIGNMENT) * DESC_ALIGNMENT); + hw->tx_desc_info.size = + (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) / + DESC_ALIGNMENT) * DESC_ALIGNMENT); + if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc)) + pr_alert("Hardware descriptor size not right!\n"); + ksz_check_desc_num(&hw->rx_desc_info); + ksz_check_desc_num(&hw->tx_desc_info); + + /* Allocate descriptors. */ + if (ksz_alloc_desc(adapter)) + return 1; + + return 0; +} + +/** + * ksz_free_desc - free software and hardware descriptors + * @adapter: Adapter information structure. + * + * This local routine frees the software and hardware descriptors allocated by + * ksz_alloc_desc(). + */ +static void ksz_free_desc(struct dev_info *adapter) +{ + struct ksz_hw *hw = &adapter->hw; + + /* Reset descriptor. */ + hw->rx_desc_info.ring_virt = NULL; + hw->tx_desc_info.ring_virt = NULL; + hw->rx_desc_info.ring_phys = 0; + hw->tx_desc_info.ring_phys = 0; + + /* Free memory. */ + if (adapter->desc_pool.alloc_virt) + pci_free_consistent( + adapter->pdev, + adapter->desc_pool.alloc_size, + adapter->desc_pool.alloc_virt, + adapter->desc_pool.dma_addr); + + /* Reset resource pool. */ + adapter->desc_pool.alloc_size = 0; + adapter->desc_pool.alloc_virt = NULL; + + kfree(hw->rx_desc_info.ring); + hw->rx_desc_info.ring = NULL; + kfree(hw->tx_desc_info.ring); + hw->tx_desc_info.ring = NULL; +} + +/** + * ksz_free_buffers - free buffers used in the descriptors + * @adapter: Adapter information structure. + * @desc_info: Descriptor information structure. + * + * This local routine frees buffers used in the DMA buffers. + */ +static void ksz_free_buffers(struct dev_info *adapter, + struct ksz_desc_info *desc_info, int direction) +{ + int i; + struct ksz_dma_buf *dma_buf; + struct ksz_desc *desc = desc_info->ring; + + for (i = 0; i < desc_info->alloc; i++) { + dma_buf = DMA_BUFFER(desc); + if (dma_buf->skb) + free_dma_buf(adapter, dma_buf, direction); + desc++; + } +} + +/** + * ksz_free_mem - free all resources used by descriptors + * @adapter: Adapter information structure. + * + * This local routine frees all the resources allocated by ksz_alloc_mem(). + */ +static void ksz_free_mem(struct dev_info *adapter) +{ + /* Free transmit buffers. */ + ksz_free_buffers(adapter, &adapter->hw.tx_desc_info, + PCI_DMA_TODEVICE); + + /* Free receive buffers. */ + ksz_free_buffers(adapter, &adapter->hw.rx_desc_info, + PCI_DMA_FROMDEVICE); + + /* Free descriptors. */ + ksz_free_desc(adapter); +} + +static void get_mib_counters(struct ksz_hw *hw, int first, int cnt, + u64 *counter) +{ + int i; + int mib; + int port; + struct ksz_port_mib *port_mib; + + memset(counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM); + for (i = 0, port = first; i < cnt; i++, port++) { + port_mib = &hw->port_mib[port]; + for (mib = port_mib->mib_start; mib < hw->mib_cnt; mib++) + counter[mib] += port_mib->counter[mib]; + } +} + +/** + * send_packet - send packet + * @skb: Socket buffer. + * @dev: Network device. + * + * This routine is used to send a packet out to the network. + */ +static void send_packet(struct sk_buff *skb, struct net_device *dev) +{ + struct ksz_desc *desc; + struct ksz_desc *first; + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_hw *hw = &hw_priv->hw; + struct ksz_desc_info *info = &hw->tx_desc_info; + struct ksz_dma_buf *dma_buf; + int len; + int last_frag = skb_shinfo(skb)->nr_frags; + + /* + * KSZ8842 with multiple device interfaces needs to be told which port + * to send. + */ + if (hw->dev_count > 1) + hw->dst_ports = 1 << priv->port.first_port; + + /* Hardware will pad the length to 60. */ + len = skb->len; + + /* Remember the very first descriptor. */ + first = info->cur; + desc = first; + + dma_buf = DMA_BUFFER(desc); + if (last_frag) { + int frag; + skb_frag_t *this_frag; + + dma_buf->len = skb_headlen(skb); + + dma_buf->dma = pci_map_single( + hw_priv->pdev, skb->data, dma_buf->len, + PCI_DMA_TODEVICE); + set_tx_buf(desc, dma_buf->dma); + set_tx_len(desc, dma_buf->len); + + frag = 0; + do { + this_frag = &skb_shinfo(skb)->frags[frag]; + + /* Get a new descriptor. */ + get_tx_pkt(info, &desc); + + /* Keep track of descriptors used so far. */ + ++hw->tx_int_cnt; + + dma_buf = DMA_BUFFER(desc); + dma_buf->len = skb_frag_size(this_frag); + + dma_buf->dma = pci_map_single( + hw_priv->pdev, + skb_frag_address(this_frag), + dma_buf->len, + PCI_DMA_TODEVICE); + set_tx_buf(desc, dma_buf->dma); + set_tx_len(desc, dma_buf->len); + + frag++; + if (frag == last_frag) + break; + + /* Do not release the last descriptor here. */ + release_desc(desc); + } while (1); + + /* current points to the last descriptor. */ + info->cur = desc; + + /* Release the first descriptor. */ + release_desc(first); + } else { + dma_buf->len = len; + + dma_buf->dma = pci_map_single( + hw_priv->pdev, skb->data, dma_buf->len, + PCI_DMA_TODEVICE); + set_tx_buf(desc, dma_buf->dma); + set_tx_len(desc, dma_buf->len); + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + (desc)->sw.buf.tx.csum_gen_tcp = 1; + (desc)->sw.buf.tx.csum_gen_udp = 1; + } + + /* + * The last descriptor holds the packet so that it can be returned to + * network subsystem after all descriptors are transmitted. + */ + dma_buf->skb = skb; + + hw_send_pkt(hw); + + /* Update transmit statistics. */ + dev->stats.tx_packets++; + dev->stats.tx_bytes += len; +} + +/** + * transmit_cleanup - clean up transmit descriptors + * @dev: Network device. + * + * This routine is called to clean up the transmitted buffers. + */ +static void transmit_cleanup(struct dev_info *hw_priv, int normal) +{ + int last; + union desc_stat status; + struct ksz_hw *hw = &hw_priv->hw; + struct ksz_desc_info *info = &hw->tx_desc_info; + struct ksz_desc *desc; + struct ksz_dma_buf *dma_buf; + struct net_device *dev = NULL; + + spin_lock_irq(&hw_priv->hwlock); + last = info->last; + + while (info->avail < info->alloc) { + /* Get next descriptor which is not hardware owned. */ + desc = &info->ring[last]; + status.data = le32_to_cpu(desc->phw->ctrl.data); + if (status.tx.hw_owned) { + if (normal) + break; + else + reset_desc(desc, status); + } + + dma_buf = DMA_BUFFER(desc); + pci_unmap_single( + hw_priv->pdev, dma_buf->dma, dma_buf->len, + PCI_DMA_TODEVICE); + + /* This descriptor contains the last buffer in the packet. */ + if (dma_buf->skb) { + dev = dma_buf->skb->dev; + + /* Release the packet back to network subsystem. */ + dev_kfree_skb_irq(dma_buf->skb); + dma_buf->skb = NULL; + } + + /* Free the transmitted descriptor. */ + last++; + last &= info->mask; + info->avail++; + } + info->last = last; + spin_unlock_irq(&hw_priv->hwlock); + + /* Notify the network subsystem that the packet has been sent. */ + if (dev) + dev->trans_start = jiffies; +} + +/** + * transmit_done - transmit done processing + * @dev: Network device. + * + * This routine is called when the transmit interrupt is triggered, indicating + * either a packet is sent successfully or there are transmit errors. + */ +static void tx_done(struct dev_info *hw_priv) +{ + struct ksz_hw *hw = &hw_priv->hw; + int port; + + transmit_cleanup(hw_priv, 1); + + for (port = 0; port < hw->dev_count; port++) { + struct net_device *dev = hw->port_info[port].pdev; + + if (netif_running(dev) && netif_queue_stopped(dev)) + netif_wake_queue(dev); + } +} + +static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb) +{ + skb->dev = old->dev; + skb->protocol = old->protocol; + skb->ip_summed = old->ip_summed; + skb->csum = old->csum; + skb_set_network_header(skb, ETH_HLEN); + + dev_consume_skb_any(old); +} + +/** + * netdev_tx - send out packet + * @skb: Socket buffer. + * @dev: Network device. + * + * This function is used by the upper network layer to send out a packet. + * + * Return 0 if successful; otherwise an error code indicating failure. + */ +static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_hw *hw = &hw_priv->hw; + int left; + int num = 1; + int rc = 0; + + if (hw->features & SMALL_PACKET_TX_BUG) { + struct sk_buff *org_skb = skb; + + if (skb->len <= 48) { + if (skb_end_pointer(skb) - skb->data >= 50) { + memset(&skb->data[skb->len], 0, 50 - skb->len); + skb->len = 50; + } else { + skb = netdev_alloc_skb(dev, 50); + if (!skb) + return NETDEV_TX_BUSY; + memcpy(skb->data, org_skb->data, org_skb->len); + memset(&skb->data[org_skb->len], 0, + 50 - org_skb->len); + skb->len = 50; + copy_old_skb(org_skb, skb); + } + } + } + + spin_lock_irq(&hw_priv->hwlock); + + num = skb_shinfo(skb)->nr_frags + 1; + left = hw_alloc_pkt(hw, skb->len, num); + if (left) { + if (left < num || + (CHECKSUM_PARTIAL == skb->ip_summed && + skb->protocol == htons(ETH_P_IPV6))) { + struct sk_buff *org_skb = skb; + + skb = netdev_alloc_skb(dev, org_skb->len); + if (!skb) { + rc = NETDEV_TX_BUSY; + goto unlock; + } + skb_copy_and_csum_dev(org_skb, skb->data); + org_skb->ip_summed = CHECKSUM_NONE; + skb->len = org_skb->len; + copy_old_skb(org_skb, skb); + } + send_packet(skb, dev); + if (left <= num) + netif_stop_queue(dev); + } else { + /* Stop the transmit queue until packet is allocated. */ + netif_stop_queue(dev); + rc = NETDEV_TX_BUSY; + } +unlock: + spin_unlock_irq(&hw_priv->hwlock); + + return rc; +} + +/** + * netdev_tx_timeout - transmit timeout processing + * @dev: Network device. + * + * This routine is called when the transmit timer expires. That indicates the + * hardware is not running correctly because transmit interrupts are not + * triggered to free up resources so that the transmit routine can continue + * sending out packets. The hardware is reset to correct the problem. + */ +static void netdev_tx_timeout(struct net_device *dev) +{ + static unsigned long last_reset; + + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_hw *hw = &hw_priv->hw; + int port; + + if (hw->dev_count > 1) { + /* + * Only reset the hardware if time between calls is long + * enough. + */ + if (time_before_eq(jiffies, last_reset + dev->watchdog_timeo)) + hw_priv = NULL; + } + + last_reset = jiffies; + if (hw_priv) { + hw_dis_intr(hw); + hw_disable(hw); + + transmit_cleanup(hw_priv, 0); + hw_reset_pkts(&hw->rx_desc_info); + hw_reset_pkts(&hw->tx_desc_info); + ksz_init_rx_buffers(hw_priv); + + hw_reset(hw); + + hw_set_desc_base(hw, + hw->tx_desc_info.ring_phys, + hw->rx_desc_info.ring_phys); + hw_set_addr(hw); + if (hw->all_multi) + hw_set_multicast(hw, hw->all_multi); + else if (hw->multi_list_size) + hw_set_grp_addr(hw); + + if (hw->dev_count > 1) { + hw_set_add_addr(hw); + for (port = 0; port < SWITCH_PORT_NUM; port++) { + struct net_device *port_dev; + + port_set_stp_state(hw, port, + STP_STATE_DISABLED); + + port_dev = hw->port_info[port].pdev; + if (netif_running(port_dev)) + port_set_stp_state(hw, port, + STP_STATE_SIMPLE); + } + } + + hw_enable(hw); + hw_ena_intr(hw); + } + + dev->trans_start = jiffies; + netif_wake_queue(dev); +} + +static inline void csum_verified(struct sk_buff *skb) +{ + unsigned short protocol; + struct iphdr *iph; + + protocol = skb->protocol; + skb_reset_network_header(skb); + iph = (struct iphdr *) skb_network_header(skb); + if (protocol == htons(ETH_P_8021Q)) { + protocol = iph->tot_len; + skb_set_network_header(skb, VLAN_HLEN); + iph = (struct iphdr *) skb_network_header(skb); + } + if (protocol == htons(ETH_P_IP)) { + if (iph->protocol == IPPROTO_TCP) + skb->ip_summed = CHECKSUM_UNNECESSARY; + } +} + +static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw, + struct ksz_desc *desc, union desc_stat status) +{ + int packet_len; + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_dma_buf *dma_buf; + struct sk_buff *skb; + int rx_status; + + /* Received length includes 4-byte CRC. */ + packet_len = status.rx.frame_len - 4; + + dma_buf = DMA_BUFFER(desc); + pci_dma_sync_single_for_cpu( + hw_priv->pdev, dma_buf->dma, packet_len + 4, + PCI_DMA_FROMDEVICE); + + do { + /* skb->data != skb->head */ + skb = netdev_alloc_skb(dev, packet_len + 2); + if (!skb) { + dev->stats.rx_dropped++; + return -ENOMEM; + } + + /* + * Align socket buffer in 4-byte boundary for better + * performance. + */ + skb_reserve(skb, 2); + + memcpy(skb_put(skb, packet_len), + dma_buf->skb->data, packet_len); + } while (0); + + skb->protocol = eth_type_trans(skb, dev); + + if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP)) + csum_verified(skb); + + /* Update receive statistics. */ + dev->stats.rx_packets++; + dev->stats.rx_bytes += packet_len; + + /* Notify upper layer for received packet. */ + rx_status = netif_rx(skb); + + return 0; +} + +static int dev_rcv_packets(struct dev_info *hw_priv) +{ + int next; + union desc_stat status; + struct ksz_hw *hw = &hw_priv->hw; + struct net_device *dev = hw->port_info[0].pdev; + struct ksz_desc_info *info = &hw->rx_desc_info; + int left = info->alloc; + struct ksz_desc *desc; + int received = 0; + + next = info->next; + while (left--) { + /* Get next descriptor which is not hardware owned. */ + desc = &info->ring[next]; + status.data = le32_to_cpu(desc->phw->ctrl.data); + if (status.rx.hw_owned) + break; + + /* Status valid only when last descriptor bit is set. */ + if (status.rx.last_desc && status.rx.first_desc) { + if (rx_proc(dev, hw, desc, status)) + goto release_packet; + received++; + } + +release_packet: + release_desc(desc); + next++; + next &= info->mask; + } + info->next = next; + + return received; +} + +static int port_rcv_packets(struct dev_info *hw_priv) +{ + int next; + union desc_stat status; + struct ksz_hw *hw = &hw_priv->hw; + struct net_device *dev = hw->port_info[0].pdev; + struct ksz_desc_info *info = &hw->rx_desc_info; + int left = info->alloc; + struct ksz_desc *desc; + int received = 0; + + next = info->next; + while (left--) { + /* Get next descriptor which is not hardware owned. */ + desc = &info->ring[next]; + status.data = le32_to_cpu(desc->phw->ctrl.data); + if (status.rx.hw_owned) + break; + + if (hw->dev_count > 1) { + /* Get received port number. */ + int p = HW_TO_DEV_PORT(status.rx.src_port); + + dev = hw->port_info[p].pdev; + if (!netif_running(dev)) + goto release_packet; + } + + /* Status valid only when last descriptor bit is set. */ + if (status.rx.last_desc && status.rx.first_desc) { + if (rx_proc(dev, hw, desc, status)) + goto release_packet; + received++; + } + +release_packet: + release_desc(desc); + next++; + next &= info->mask; + } + info->next = next; + + return received; +} + +static int dev_rcv_special(struct dev_info *hw_priv) +{ + int next; + union desc_stat status; + struct ksz_hw *hw = &hw_priv->hw; + struct net_device *dev = hw->port_info[0].pdev; + struct ksz_desc_info *info = &hw->rx_desc_info; + int left = info->alloc; + struct ksz_desc *desc; + int received = 0; + + next = info->next; + while (left--) { + /* Get next descriptor which is not hardware owned. */ + desc = &info->ring[next]; + status.data = le32_to_cpu(desc->phw->ctrl.data); + if (status.rx.hw_owned) + break; + + if (hw->dev_count > 1) { + /* Get received port number. */ + int p = HW_TO_DEV_PORT(status.rx.src_port); + + dev = hw->port_info[p].pdev; + if (!netif_running(dev)) + goto release_packet; + } + + /* Status valid only when last descriptor bit is set. */ + if (status.rx.last_desc && status.rx.first_desc) { + /* + * Receive without error. With receive errors + * disabled, packets with receive errors will be + * dropped, so no need to check the error bit. + */ + if (!status.rx.error || (status.data & + KS_DESC_RX_ERROR_COND) == + KS_DESC_RX_ERROR_TOO_LONG) { + if (rx_proc(dev, hw, desc, status)) + goto release_packet; + received++; + } else { + struct dev_priv *priv = netdev_priv(dev); + + /* Update receive error statistics. */ + priv->port.counter[OID_COUNTER_RCV_ERROR]++; + } + } + +release_packet: + release_desc(desc); + next++; + next &= info->mask; + } + info->next = next; + + return received; +} + +static void rx_proc_task(unsigned long data) +{ + struct dev_info *hw_priv = (struct dev_info *) data; + struct ksz_hw *hw = &hw_priv->hw; + + if (!hw->enabled) + return; + if (unlikely(!hw_priv->dev_rcv(hw_priv))) { + + /* In case receive process is suspended because of overrun. */ + hw_resume_rx(hw); + + /* tasklets are interruptible. */ + spin_lock_irq(&hw_priv->hwlock); + hw_turn_on_intr(hw, KS884X_INT_RX_MASK); + spin_unlock_irq(&hw_priv->hwlock); + } else { + hw_ack_intr(hw, KS884X_INT_RX); + tasklet_schedule(&hw_priv->rx_tasklet); + } +} + +static void tx_proc_task(unsigned long data) +{ + struct dev_info *hw_priv = (struct dev_info *) data; + struct ksz_hw *hw = &hw_priv->hw; + + hw_ack_intr(hw, KS884X_INT_TX_MASK); + + tx_done(hw_priv); + + /* tasklets are interruptible. */ + spin_lock_irq(&hw_priv->hwlock); + hw_turn_on_intr(hw, KS884X_INT_TX); + spin_unlock_irq(&hw_priv->hwlock); +} + +static inline void handle_rx_stop(struct ksz_hw *hw) +{ + /* Receive just has been stopped. */ + if (0 == hw->rx_stop) + hw->intr_mask &= ~KS884X_INT_RX_STOPPED; + else if (hw->rx_stop > 1) { + if (hw->enabled && (hw->rx_cfg & DMA_RX_ENABLE)) { + hw_start_rx(hw); + } else { + hw->intr_mask &= ~KS884X_INT_RX_STOPPED; + hw->rx_stop = 0; + } + } else + /* Receive just has been started. */ + hw->rx_stop++; +} + +/** + * netdev_intr - interrupt handling + * @irq: Interrupt number. + * @dev_id: Network device. + * + * This function is called by upper network layer to signal interrupt. + * + * Return IRQ_HANDLED if interrupt is handled. + */ +static irqreturn_t netdev_intr(int irq, void *dev_id) +{ + uint int_enable = 0; + struct net_device *dev = (struct net_device *) dev_id; + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_hw *hw = &hw_priv->hw; + + spin_lock(&hw_priv->hwlock); + + hw_read_intr(hw, &int_enable); + + /* Not our interrupt! */ + if (!int_enable) { + spin_unlock(&hw_priv->hwlock); + return IRQ_NONE; + } + + do { + hw_ack_intr(hw, int_enable); + int_enable &= hw->intr_mask; + + if (unlikely(int_enable & KS884X_INT_TX_MASK)) { + hw_dis_intr_bit(hw, KS884X_INT_TX_MASK); + tasklet_schedule(&hw_priv->tx_tasklet); + } + + if (likely(int_enable & KS884X_INT_RX)) { + hw_dis_intr_bit(hw, KS884X_INT_RX); + tasklet_schedule(&hw_priv->rx_tasklet); + } + + if (unlikely(int_enable & KS884X_INT_RX_OVERRUN)) { + dev->stats.rx_fifo_errors++; + hw_resume_rx(hw); + } + + if (unlikely(int_enable & KS884X_INT_PHY)) { + struct ksz_port *port = &priv->port; + + hw->features |= LINK_INT_WORKING; + port_get_link_speed(port); + } + + if (unlikely(int_enable & KS884X_INT_RX_STOPPED)) { + handle_rx_stop(hw); + break; + } + + if (unlikely(int_enable & KS884X_INT_TX_STOPPED)) { + u32 data; + + hw->intr_mask &= ~KS884X_INT_TX_STOPPED; + pr_info("Tx stopped\n"); + data = readl(hw->io + KS_DMA_TX_CTRL); + if (!(data & DMA_TX_ENABLE)) + pr_info("Tx disabled\n"); + break; + } + } while (0); + + hw_ena_intr(hw); + + spin_unlock(&hw_priv->hwlock); + + return IRQ_HANDLED; +} + +/* + * Linux network device functions + */ + +static unsigned long next_jiffies; + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void netdev_netpoll(struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + + hw_dis_intr(&hw_priv->hw); + netdev_intr(dev->irq, dev); +} +#endif + +static void bridge_change(struct ksz_hw *hw) +{ + int port; + u8 member; + struct ksz_switch *sw = hw->ksz_switch; + + /* No ports in forwarding state. */ + if (!sw->member) { + port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE); + sw_block_addr(hw); + } + for (port = 0; port < SWITCH_PORT_NUM; port++) { + if (STP_STATE_FORWARDING == sw->port_cfg[port].stp_state) + member = HOST_MASK | sw->member; + else + member = HOST_MASK | (1 << port); + if (member != sw->port_cfg[port].member) + sw_cfg_port_base_vlan(hw, port, member); + } +} + +/** + * netdev_close - close network device + * @dev: Network device. + * + * This function process the close operation of network device. This is caused + * by the user command "ifconfig ethX down." + * + * Return 0 if successful; otherwise an error code indicating failure. + */ +static int netdev_close(struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_port *port = &priv->port; + struct ksz_hw *hw = &hw_priv->hw; + int pi; + + netif_stop_queue(dev); + + ksz_stop_timer(&priv->monitor_timer_info); + + /* Need to shut the port manually in multiple device interfaces mode. */ + if (hw->dev_count > 1) { + port_set_stp_state(hw, port->first_port, STP_STATE_DISABLED); + + /* Port is closed. Need to change bridge setting. */ + if (hw->features & STP_SUPPORT) { + pi = 1 << port->first_port; + if (hw->ksz_switch->member & pi) { + hw->ksz_switch->member &= ~pi; + bridge_change(hw); + } + } + } + if (port->first_port > 0) + hw_del_addr(hw, dev->dev_addr); + if (!hw_priv->wol_enable) + port_set_power_saving(port, true); + + if (priv->multicast) + --hw->all_multi; + if (priv->promiscuous) + --hw->promiscuous; + + hw_priv->opened--; + if (!(hw_priv->opened)) { + ksz_stop_timer(&hw_priv->mib_timer_info); + flush_work(&hw_priv->mib_read); + + hw_dis_intr(hw); + hw_disable(hw); + hw_clr_multicast(hw); + + /* Delay for receive task to stop scheduling itself. */ + msleep(2000 / HZ); + + tasklet_kill(&hw_priv->rx_tasklet); + tasklet_kill(&hw_priv->tx_tasklet); + free_irq(dev->irq, hw_priv->dev); + + transmit_cleanup(hw_priv, 0); + hw_reset_pkts(&hw->rx_desc_info); + hw_reset_pkts(&hw->tx_desc_info); + + /* Clean out static MAC table when the switch is shutdown. */ + if (hw->features & STP_SUPPORT) + sw_clr_sta_mac_table(hw); + } + + return 0; +} + +static void hw_cfg_huge_frame(struct dev_info *hw_priv, struct ksz_hw *hw) +{ + if (hw->ksz_switch) { + u32 data; + + data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET); + if (hw->features & RX_HUGE_FRAME) + data |= SWITCH_HUGE_PACKET; + else + data &= ~SWITCH_HUGE_PACKET; + writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET); + } + if (hw->features & RX_HUGE_FRAME) { + hw->rx_cfg |= DMA_RX_ERROR; + hw_priv->dev_rcv = dev_rcv_special; + } else { + hw->rx_cfg &= ~DMA_RX_ERROR; + if (hw->dev_count > 1) + hw_priv->dev_rcv = port_rcv_packets; + else + hw_priv->dev_rcv = dev_rcv_packets; + } +} + +static int prepare_hardware(struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_hw *hw = &hw_priv->hw; + int rc = 0; + + /* Remember the network device that requests interrupts. */ + hw_priv->dev = dev; + rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev); + if (rc) + return rc; + tasklet_init(&hw_priv->rx_tasklet, rx_proc_task, + (unsigned long) hw_priv); + tasklet_init(&hw_priv->tx_tasklet, tx_proc_task, + (unsigned long) hw_priv); + + hw->promiscuous = 0; + hw->all_multi = 0; + hw->multi_list_size = 0; + + hw_reset(hw); + + hw_set_desc_base(hw, + hw->tx_desc_info.ring_phys, hw->rx_desc_info.ring_phys); + hw_set_addr(hw); + hw_cfg_huge_frame(hw_priv, hw); + ksz_init_rx_buffers(hw_priv); + return 0; +} + +static void set_media_state(struct net_device *dev, int media_state) +{ + struct dev_priv *priv = netdev_priv(dev); + + if (media_state == priv->media_state) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + netif_info(priv, link, dev, "link %s\n", + media_state == priv->media_state ? "on" : "off"); +} + +/** + * netdev_open - open network device + * @dev: Network device. + * + * This function process the open operation of network device. This is caused + * by the user command "ifconfig ethX up." + * + * Return 0 if successful; otherwise an error code indicating failure. + */ +static int netdev_open(struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_hw *hw = &hw_priv->hw; + struct ksz_port *port = &priv->port; + int i; + int p; + int rc = 0; + + priv->multicast = 0; + priv->promiscuous = 0; + + /* Reset device statistics. */ + memset(&dev->stats, 0, sizeof(struct net_device_stats)); + memset((void *) port->counter, 0, + (sizeof(u64) * OID_COUNTER_LAST)); + + if (!(hw_priv->opened)) { + rc = prepare_hardware(dev); + if (rc) + return rc; + for (i = 0; i < hw->mib_port_cnt; i++) { + if (next_jiffies < jiffies) + next_jiffies = jiffies + HZ * 2; + else + next_jiffies += HZ * 1; + hw_priv->counter[i].time = next_jiffies; + hw->port_mib[i].state = media_disconnected; + port_init_cnt(hw, i); + } + if (hw->ksz_switch) + hw->port_mib[HOST_PORT].state = media_connected; + else { + hw_add_wol_bcast(hw); + hw_cfg_wol_pme(hw, 0); + hw_clr_wol_pme_status(&hw_priv->hw); + } + } + port_set_power_saving(port, false); + + for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) { + /* + * Initialize to invalid value so that link detection + * is done. + */ + hw->port_info[p].partner = 0xFF; + hw->port_info[p].state = media_disconnected; + } + + /* Need to open the port in multiple device interfaces mode. */ + if (hw->dev_count > 1) { + port_set_stp_state(hw, port->first_port, STP_STATE_SIMPLE); + if (port->first_port > 0) + hw_add_addr(hw, dev->dev_addr); + } + + port_get_link_speed(port); + if (port->force_link) + port_force_link_speed(port); + else + port_set_link_speed(port); + + if (!(hw_priv->opened)) { + hw_setup_intr(hw); + hw_enable(hw); + hw_ena_intr(hw); + + if (hw->mib_port_cnt) + ksz_start_timer(&hw_priv->mib_timer_info, + hw_priv->mib_timer_info.period); + } + + hw_priv->opened++; + + ksz_start_timer(&priv->monitor_timer_info, + priv->monitor_timer_info.period); + + priv->media_state = port->linked->state; + + set_media_state(dev, media_connected); + netif_start_queue(dev); + + return 0; +} + +/* RX errors = rx_errors */ +/* RX dropped = rx_dropped */ +/* RX overruns = rx_fifo_errors */ +/* RX frame = rx_crc_errors + rx_frame_errors + rx_length_errors */ +/* TX errors = tx_errors */ +/* TX dropped = tx_dropped */ +/* TX overruns = tx_fifo_errors */ +/* TX carrier = tx_aborted_errors + tx_carrier_errors + tx_window_errors */ +/* collisions = collisions */ + +/** + * netdev_query_statistics - query network device statistics + * @dev: Network device. + * + * This function returns the statistics of the network device. The device + * needs not be opened. + * + * Return network device statistics. + */ +static struct net_device_stats *netdev_query_statistics(struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + struct ksz_port *port = &priv->port; + struct ksz_hw *hw = &priv->adapter->hw; + struct ksz_port_mib *mib; + int i; + int p; + + dev->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR]; + dev->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR]; + + /* Reset to zero to add count later. */ + dev->stats.multicast = 0; + dev->stats.collisions = 0; + dev->stats.rx_length_errors = 0; + dev->stats.rx_crc_errors = 0; + dev->stats.rx_frame_errors = 0; + dev->stats.tx_window_errors = 0; + + for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) { + mib = &hw->port_mib[p]; + + dev->stats.multicast += (unsigned long) + mib->counter[MIB_COUNTER_RX_MULTICAST]; + + dev->stats.collisions += (unsigned long) + mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION]; + + dev->stats.rx_length_errors += (unsigned long)( + mib->counter[MIB_COUNTER_RX_UNDERSIZE] + + mib->counter[MIB_COUNTER_RX_FRAGMENT] + + mib->counter[MIB_COUNTER_RX_OVERSIZE] + + mib->counter[MIB_COUNTER_RX_JABBER]); + dev->stats.rx_crc_errors += (unsigned long) + mib->counter[MIB_COUNTER_RX_CRC_ERR]; + dev->stats.rx_frame_errors += (unsigned long)( + mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] + + mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]); + + dev->stats.tx_window_errors += (unsigned long) + mib->counter[MIB_COUNTER_TX_LATE_COLLISION]; + } + + return &dev->stats; +} + +/** + * netdev_set_mac_address - set network device MAC address + * @dev: Network device. + * @addr: Buffer of MAC address. + * + * This function is used to set the MAC address of the network device. + * + * Return 0 to indicate success. + */ +static int netdev_set_mac_address(struct net_device *dev, void *addr) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_hw *hw = &hw_priv->hw; + struct sockaddr *mac = addr; + uint interrupt; + + if (priv->port.first_port > 0) + hw_del_addr(hw, dev->dev_addr); + else { + hw->mac_override = 1; + memcpy(hw->override_addr, mac->sa_data, ETH_ALEN); + } + + memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN); + + interrupt = hw_block_intr(hw); + + if (priv->port.first_port > 0) + hw_add_addr(hw, dev->dev_addr); + else + hw_set_addr(hw); + hw_restore_intr(hw, interrupt); + + return 0; +} + +static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv, + struct ksz_hw *hw, int promiscuous) +{ + if (promiscuous != priv->promiscuous) { + u8 prev_state = hw->promiscuous; + + if (promiscuous) + ++hw->promiscuous; + else + --hw->promiscuous; + priv->promiscuous = promiscuous; + + /* Turn on/off promiscuous mode. */ + if (hw->promiscuous <= 1 && prev_state <= 1) + hw_set_promiscuous(hw, hw->promiscuous); + + /* + * Port is not in promiscuous mode, meaning it is released + * from the bridge. + */ + if ((hw->features & STP_SUPPORT) && !promiscuous && + (dev->priv_flags & IFF_BRIDGE_PORT)) { + struct ksz_switch *sw = hw->ksz_switch; + int port = priv->port.first_port; + + port_set_stp_state(hw, port, STP_STATE_DISABLED); + port = 1 << port; + if (sw->member & port) { + sw->member &= ~port; + bridge_change(hw); + } + } + } +} + +static void dev_set_multicast(struct dev_priv *priv, struct ksz_hw *hw, + int multicast) +{ + if (multicast != priv->multicast) { + u8 all_multi = hw->all_multi; + + if (multicast) + ++hw->all_multi; + else + --hw->all_multi; + priv->multicast = multicast; + + /* Turn on/off all multicast mode. */ + if (hw->all_multi <= 1 && all_multi <= 1) + hw_set_multicast(hw, hw->all_multi); + } +} + +/** + * netdev_set_rx_mode + * @dev: Network device. + * + * This routine is used to set multicast addresses or put the network device + * into promiscuous mode. + */ +static void netdev_set_rx_mode(struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_hw *hw = &hw_priv->hw; + struct netdev_hw_addr *ha; + int multicast = (dev->flags & IFF_ALLMULTI); + + dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC)); + + if (hw_priv->hw.dev_count > 1) + multicast |= (dev->flags & IFF_MULTICAST); + dev_set_multicast(priv, hw, multicast); + + /* Cannot use different hashes in multiple device interfaces mode. */ + if (hw_priv->hw.dev_count > 1) + return; + + if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) { + int i = 0; + + /* List too big to support so turn on all multicast mode. */ + if (netdev_mc_count(dev) > MAX_MULTICAST_LIST) { + if (MAX_MULTICAST_LIST != hw->multi_list_size) { + hw->multi_list_size = MAX_MULTICAST_LIST; + ++hw->all_multi; + hw_set_multicast(hw, hw->all_multi); + } + return; + } + + netdev_for_each_mc_addr(ha, dev) { + if (i >= MAX_MULTICAST_LIST) + break; + memcpy(hw->multi_list[i++], ha->addr, ETH_ALEN); + } + hw->multi_list_size = (u8) i; + hw_set_grp_addr(hw); + } else { + if (MAX_MULTICAST_LIST == hw->multi_list_size) { + --hw->all_multi; + hw_set_multicast(hw, hw->all_multi); + } + hw->multi_list_size = 0; + hw_clr_multicast(hw); + } +} + +static int netdev_change_mtu(struct net_device *dev, int new_mtu) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_hw *hw = &hw_priv->hw; + int hw_mtu; + + if (netif_running(dev)) + return -EBUSY; + + /* Cannot use different MTU in multiple device interfaces mode. */ + if (hw->dev_count > 1) + if (dev != hw_priv->dev) + return 0; + if (new_mtu < 60) + return -EINVAL; + + if (dev->mtu != new_mtu) { + hw_mtu = new_mtu + ETHERNET_HEADER_SIZE + 4; + if (hw_mtu > MAX_RX_BUF_SIZE) + return -EINVAL; + if (hw_mtu > REGULAR_RX_BUF_SIZE) { + hw->features |= RX_HUGE_FRAME; + hw_mtu = MAX_RX_BUF_SIZE; + } else { + hw->features &= ~RX_HUGE_FRAME; + hw_mtu = REGULAR_RX_BUF_SIZE; + } + hw_mtu = (hw_mtu + 3) & ~3; + hw_priv->mtu = hw_mtu; + dev->mtu = new_mtu; + } + return 0; +} + +/** + * netdev_ioctl - I/O control processing + * @dev: Network device. + * @ifr: Interface request structure. + * @cmd: I/O control code. + * + * This function is used to process I/O control calls. + * + * Return 0 to indicate success. + */ +static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_hw *hw = &hw_priv->hw; + struct ksz_port *port = &priv->port; + int result = 0; + struct mii_ioctl_data *data = if_mii(ifr); + + if (down_interruptible(&priv->proc_sem)) + return -ERESTARTSYS; + + switch (cmd) { + /* Get address of MII PHY in use. */ + case SIOCGMIIPHY: + data->phy_id = priv->id; + + /* Fallthrough... */ + + /* Read MII PHY register. */ + case SIOCGMIIREG: + if (data->phy_id != priv->id || data->reg_num >= 6) + result = -EIO; + else + hw_r_phy(hw, port->linked->port_id, data->reg_num, + &data->val_out); + break; + + /* Write MII PHY register. */ + case SIOCSMIIREG: + if (!capable(CAP_NET_ADMIN)) + result = -EPERM; + else if (data->phy_id != priv->id || data->reg_num >= 6) + result = -EIO; + else + hw_w_phy(hw, port->linked->port_id, data->reg_num, + data->val_in); + break; + + default: + result = -EOPNOTSUPP; + } + + up(&priv->proc_sem); + + return result; +} + +/* + * MII support + */ + +/** + * mdio_read - read PHY register + * @dev: Network device. + * @phy_id: The PHY id. + * @reg_num: The register number. + * + * This function returns the PHY register value. + * + * Return the register value. + */ +static int mdio_read(struct net_device *dev, int phy_id, int reg_num) +{ + struct dev_priv *priv = netdev_priv(dev); + struct ksz_port *port = &priv->port; + struct ksz_hw *hw = port->hw; + u16 val_out; + + hw_r_phy(hw, port->linked->port_id, reg_num << 1, &val_out); + return val_out; +} + +/** + * mdio_write - set PHY register + * @dev: Network device. + * @phy_id: The PHY id. + * @reg_num: The register number. + * @val: The register value. + * + * This procedure sets the PHY register value. + */ +static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val) +{ + struct dev_priv *priv = netdev_priv(dev); + struct ksz_port *port = &priv->port; + struct ksz_hw *hw = port->hw; + int i; + int pi; + + for (i = 0, pi = port->first_port; i < port->port_cnt; i++, pi++) + hw_w_phy(hw, pi, reg_num << 1, val); +} + +/* + * ethtool support + */ + +#define EEPROM_SIZE 0x40 + +static u16 eeprom_data[EEPROM_SIZE] = { 0 }; + +#define ADVERTISED_ALL \ + (ADVERTISED_10baseT_Half | \ + ADVERTISED_10baseT_Full | \ + ADVERTISED_100baseT_Half | \ + ADVERTISED_100baseT_Full) + +/* These functions use the MII functions in mii.c. */ + +/** + * netdev_get_settings - get network device settings + * @dev: Network device. + * @cmd: Ethtool command. + * + * This function queries the PHY and returns its state in the ethtool command. + * + * Return 0 if successful; otherwise an error code. + */ +static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + + mutex_lock(&hw_priv->lock); + mii_ethtool_gset(&priv->mii_if, cmd); + cmd->advertising |= SUPPORTED_TP; + mutex_unlock(&hw_priv->lock); + + /* Save advertised settings for workaround in next function. */ + priv->advertising = cmd->advertising; + return 0; +} + +/** + * netdev_set_settings - set network device settings + * @dev: Network device. + * @cmd: Ethtool command. + * + * This function sets the PHY according to the ethtool command. + * + * Return 0 if successful; otherwise an error code. + */ +static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_port *port = &priv->port; + u32 speed = ethtool_cmd_speed(cmd); + int rc; + + /* + * ethtool utility does not change advertised setting if auto + * negotiation is not specified explicitly. + */ + if (cmd->autoneg && priv->advertising == cmd->advertising) { + cmd->advertising |= ADVERTISED_ALL; + if (10 == speed) + cmd->advertising &= + ~(ADVERTISED_100baseT_Full | + ADVERTISED_100baseT_Half); + else if (100 == speed) + cmd->advertising &= + ~(ADVERTISED_10baseT_Full | + ADVERTISED_10baseT_Half); + if (0 == cmd->duplex) + cmd->advertising &= + ~(ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Full); + else if (1 == cmd->duplex) + cmd->advertising &= + ~(ADVERTISED_100baseT_Half | + ADVERTISED_10baseT_Half); + } + mutex_lock(&hw_priv->lock); + if (cmd->autoneg && + (cmd->advertising & ADVERTISED_ALL) == + ADVERTISED_ALL) { + port->duplex = 0; + port->speed = 0; + port->force_link = 0; + } else { + port->duplex = cmd->duplex + 1; + if (1000 != speed) + port->speed = speed; + if (cmd->autoneg) + port->force_link = 0; + else + port->force_link = 1; + } + rc = mii_ethtool_sset(&priv->mii_if, cmd); + mutex_unlock(&hw_priv->lock); + return rc; +} + +/** + * netdev_nway_reset - restart auto-negotiation + * @dev: Network device. + * + * This function restarts the PHY for auto-negotiation. + * + * Return 0 if successful; otherwise an error code. + */ +static int netdev_nway_reset(struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + int rc; + + mutex_lock(&hw_priv->lock); + rc = mii_nway_restart(&priv->mii_if); + mutex_unlock(&hw_priv->lock); + return rc; +} + +/** + * netdev_get_link - get network device link status + * @dev: Network device. + * + * This function gets the link status from the PHY. + * + * Return true if PHY is linked and false otherwise. + */ +static u32 netdev_get_link(struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + int rc; + + rc = mii_link_ok(&priv->mii_if); + return rc; +} + +/** + * netdev_get_drvinfo - get network driver information + * @dev: Network device. + * @info: Ethtool driver info data structure. + * + * This procedure returns the driver information. + */ +static void netdev_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(hw_priv->pdev), + sizeof(info->bus_info)); +} + +/** + * netdev_get_regs_len - get length of register dump + * @dev: Network device. + * + * This function returns the length of the register dump. + * + * Return length of the register dump. + */ +static struct hw_regs { + int start; + int end; +} hw_regs_range[] = { + { KS_DMA_TX_CTRL, KS884X_INTERRUPTS_STATUS }, + { KS_ADD_ADDR_0_LO, KS_ADD_ADDR_F_HI }, + { KS884X_ADDR_0_OFFSET, KS8841_WOL_FRAME_BYTE2_OFFSET }, + { KS884X_SIDER_P, KS8842_SGCR7_P }, + { KS8842_MACAR1_P, KS8842_TOSR8_P }, + { KS884X_P1MBCR_P, KS8842_P3ERCR_P }, + { 0, 0 } +}; + +static int netdev_get_regs_len(struct net_device *dev) +{ + struct hw_regs *range = hw_regs_range; + int regs_len = 0x10 * sizeof(u32); + + while (range->end > range->start) { + regs_len += (range->end - range->start + 3) / 4 * 4; + range++; + } + return regs_len; +} + +/** + * netdev_get_regs - get register dump + * @dev: Network device. + * @regs: Ethtool registers data structure. + * @ptr: Buffer to store the register values. + * + * This procedure dumps the register values in the provided buffer. + */ +static void netdev_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *ptr) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_hw *hw = &hw_priv->hw; + int *buf = (int *) ptr; + struct hw_regs *range = hw_regs_range; + int len; + + mutex_lock(&hw_priv->lock); + regs->version = 0; + for (len = 0; len < 0x40; len += 4) { + pci_read_config_dword(hw_priv->pdev, len, buf); + buf++; + } + while (range->end > range->start) { + for (len = range->start; len < range->end; len += 4) { + *buf = readl(hw->io + len); + buf++; + } + range++; + } + mutex_unlock(&hw_priv->lock); +} + +#define WOL_SUPPORT \ + (WAKE_PHY | WAKE_MAGIC | \ + WAKE_UCAST | WAKE_MCAST | \ + WAKE_BCAST | WAKE_ARP) + +/** + * netdev_get_wol - get Wake-on-LAN support + * @dev: Network device. + * @wol: Ethtool Wake-on-LAN data structure. + * + * This procedure returns Wake-on-LAN support. + */ +static void netdev_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + + wol->supported = hw_priv->wol_support; + wol->wolopts = hw_priv->wol_enable; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +/** + * netdev_set_wol - set Wake-on-LAN support + * @dev: Network device. + * @wol: Ethtool Wake-on-LAN data structure. + * + * This function sets Wake-on-LAN support. + * + * Return 0 if successful; otherwise an error code. + */ +static int netdev_set_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + + /* Need to find a way to retrieve the device IP address. */ + static const u8 net_addr[] = { 192, 168, 1, 1 }; + + if (wol->wolopts & ~hw_priv->wol_support) + return -EINVAL; + + hw_priv->wol_enable = wol->wolopts; + + /* Link wakeup cannot really be disabled. */ + if (wol->wolopts) + hw_priv->wol_enable |= WAKE_PHY; + hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr); + return 0; +} + +/** + * netdev_get_msglevel - get debug message level + * @dev: Network device. + * + * This function returns current debug message level. + * + * Return current debug message flags. + */ +static u32 netdev_get_msglevel(struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + + return priv->msg_enable; +} + +/** + * netdev_set_msglevel - set debug message level + * @dev: Network device. + * @value: Debug message flags. + * + * This procedure sets debug message level. + */ +static void netdev_set_msglevel(struct net_device *dev, u32 value) +{ + struct dev_priv *priv = netdev_priv(dev); + + priv->msg_enable = value; +} + +/** + * netdev_get_eeprom_len - get EEPROM length + * @dev: Network device. + * + * This function returns the length of the EEPROM. + * + * Return length of the EEPROM. + */ +static int netdev_get_eeprom_len(struct net_device *dev) +{ + return EEPROM_SIZE * 2; +} + +/** + * netdev_get_eeprom - get EEPROM data + * @dev: Network device. + * @eeprom: Ethtool EEPROM data structure. + * @data: Buffer to store the EEPROM data. + * + * This function dumps the EEPROM data in the provided buffer. + * + * Return 0 if successful; otherwise an error code. + */ +#define EEPROM_MAGIC 0x10A18842 + +static int netdev_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + u8 *eeprom_byte = (u8 *) eeprom_data; + int i; + int len; + + len = (eeprom->offset + eeprom->len + 1) / 2; + for (i = eeprom->offset / 2; i < len; i++) + eeprom_data[i] = eeprom_read(&hw_priv->hw, i); + eeprom->magic = EEPROM_MAGIC; + memcpy(data, &eeprom_byte[eeprom->offset], eeprom->len); + + return 0; +} + +/** + * netdev_set_eeprom - write EEPROM data + * @dev: Network device. + * @eeprom: Ethtool EEPROM data structure. + * @data: Data buffer. + * + * This function modifies the EEPROM data one byte at a time. + * + * Return 0 if successful; otherwise an error code. + */ +static int netdev_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + u16 eeprom_word[EEPROM_SIZE]; + u8 *eeprom_byte = (u8 *) eeprom_word; + int i; + int len; + + if (eeprom->magic != EEPROM_MAGIC) + return -EINVAL; + + len = (eeprom->offset + eeprom->len + 1) / 2; + for (i = eeprom->offset / 2; i < len; i++) + eeprom_data[i] = eeprom_read(&hw_priv->hw, i); + memcpy(eeprom_word, eeprom_data, EEPROM_SIZE * 2); + memcpy(&eeprom_byte[eeprom->offset], data, eeprom->len); + for (i = 0; i < EEPROM_SIZE; i++) + if (eeprom_word[i] != eeprom_data[i]) { + eeprom_data[i] = eeprom_word[i]; + eeprom_write(&hw_priv->hw, i, eeprom_data[i]); + } + + return 0; +} + +/** + * netdev_get_pauseparam - get flow control parameters + * @dev: Network device. + * @pause: Ethtool PAUSE settings data structure. + * + * This procedure returns the PAUSE control flow settings. + */ +static void netdev_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_hw *hw = &hw_priv->hw; + + pause->autoneg = (hw->overrides & PAUSE_FLOW_CTRL) ? 0 : 1; + if (!hw->ksz_switch) { + pause->rx_pause = + (hw->rx_cfg & DMA_RX_FLOW_ENABLE) ? 1 : 0; + pause->tx_pause = + (hw->tx_cfg & DMA_TX_FLOW_ENABLE) ? 1 : 0; + } else { + pause->rx_pause = + (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET, + SWITCH_RX_FLOW_CTRL)) ? 1 : 0; + pause->tx_pause = + (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET, + SWITCH_TX_FLOW_CTRL)) ? 1 : 0; + } +} + +/** + * netdev_set_pauseparam - set flow control parameters + * @dev: Network device. + * @pause: Ethtool PAUSE settings data structure. + * + * This function sets the PAUSE control flow settings. + * Not implemented yet. + * + * Return 0 if successful; otherwise an error code. + */ +static int netdev_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_hw *hw = &hw_priv->hw; + struct ksz_port *port = &priv->port; + + mutex_lock(&hw_priv->lock); + if (pause->autoneg) { + if (!pause->rx_pause && !pause->tx_pause) + port->flow_ctrl = PHY_NO_FLOW_CTRL; + else + port->flow_ctrl = PHY_FLOW_CTRL; + hw->overrides &= ~PAUSE_FLOW_CTRL; + port->force_link = 0; + if (hw->ksz_switch) { + sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, + SWITCH_RX_FLOW_CTRL, 1); + sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, + SWITCH_TX_FLOW_CTRL, 1); + } + port_set_link_speed(port); + } else { + hw->overrides |= PAUSE_FLOW_CTRL; + if (hw->ksz_switch) { + sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, + SWITCH_RX_FLOW_CTRL, pause->rx_pause); + sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, + SWITCH_TX_FLOW_CTRL, pause->tx_pause); + } else + set_flow_ctrl(hw, pause->rx_pause, pause->tx_pause); + } + mutex_unlock(&hw_priv->lock); + + return 0; +} + +/** + * netdev_get_ringparam - get tx/rx ring parameters + * @dev: Network device. + * @pause: Ethtool RING settings data structure. + * + * This procedure returns the TX/RX ring settings. + */ +static void netdev_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_hw *hw = &hw_priv->hw; + + ring->tx_max_pending = (1 << 9); + ring->tx_pending = hw->tx_desc_info.alloc; + ring->rx_max_pending = (1 << 9); + ring->rx_pending = hw->rx_desc_info.alloc; +} + +#define STATS_LEN (TOTAL_PORT_COUNTER_NUM) + +static struct { + char string[ETH_GSTRING_LEN]; +} ethtool_stats_keys[STATS_LEN] = { + { "rx_lo_priority_octets" }, + { "rx_hi_priority_octets" }, + { "rx_undersize_packets" }, + { "rx_fragments" }, + { "rx_oversize_packets" }, + { "rx_jabbers" }, + { "rx_symbol_errors" }, + { "rx_crc_errors" }, + { "rx_align_errors" }, + { "rx_mac_ctrl_packets" }, + { "rx_pause_packets" }, + { "rx_bcast_packets" }, + { "rx_mcast_packets" }, + { "rx_ucast_packets" }, + { "rx_64_or_less_octet_packets" }, + { "rx_65_to_127_octet_packets" }, + { "rx_128_to_255_octet_packets" }, + { "rx_256_to_511_octet_packets" }, + { "rx_512_to_1023_octet_packets" }, + { "rx_1024_to_1522_octet_packets" }, + + { "tx_lo_priority_octets" }, + { "tx_hi_priority_octets" }, + { "tx_late_collisions" }, + { "tx_pause_packets" }, + { "tx_bcast_packets" }, + { "tx_mcast_packets" }, + { "tx_ucast_packets" }, + { "tx_deferred" }, + { "tx_total_collisions" }, + { "tx_excessive_collisions" }, + { "tx_single_collisions" }, + { "tx_mult_collisions" }, + + { "rx_discards" }, + { "tx_discards" }, +}; + +/** + * netdev_get_strings - get statistics identity strings + * @dev: Network device. + * @stringset: String set identifier. + * @buf: Buffer to store the strings. + * + * This procedure returns the strings used to identify the statistics. + */ +static void netdev_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_hw *hw = &hw_priv->hw; + + if (ETH_SS_STATS == stringset) + memcpy(buf, ðtool_stats_keys, + ETH_GSTRING_LEN * hw->mib_cnt); +} + +/** + * netdev_get_sset_count - get statistics size + * @dev: Network device. + * @sset: The statistics set number. + * + * This function returns the size of the statistics to be reported. + * + * Return size of the statistics to be reported. + */ +static int netdev_get_sset_count(struct net_device *dev, int sset) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_hw *hw = &hw_priv->hw; + + switch (sset) { + case ETH_SS_STATS: + return hw->mib_cnt; + default: + return -EOPNOTSUPP; + } +} + +/** + * netdev_get_ethtool_stats - get network device statistics + * @dev: Network device. + * @stats: Ethtool statistics data structure. + * @data: Buffer to store the statistics. + * + * This procedure returns the statistics. + */ +static void netdev_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_hw *hw = &hw_priv->hw; + struct ksz_port *port = &priv->port; + int n_stats = stats->n_stats; + int i; + int n; + int p; + int rc; + u64 counter[TOTAL_PORT_COUNTER_NUM]; + + mutex_lock(&hw_priv->lock); + n = SWITCH_PORT_NUM; + for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) { + if (media_connected == hw->port_mib[p].state) { + hw_priv->counter[p].read = 1; + + /* Remember first port that requests read. */ + if (n == SWITCH_PORT_NUM) + n = p; + } + } + mutex_unlock(&hw_priv->lock); + + if (n < SWITCH_PORT_NUM) + schedule_work(&hw_priv->mib_read); + + if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) { + p = n; + rc = wait_event_interruptible_timeout( + hw_priv->counter[p].counter, + 2 == hw_priv->counter[p].read, + HZ * 1); + } else + for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) { + if (0 == i) { + rc = wait_event_interruptible_timeout( + hw_priv->counter[p].counter, + 2 == hw_priv->counter[p].read, + HZ * 2); + } else if (hw->port_mib[p].cnt_ptr) { + rc = wait_event_interruptible_timeout( + hw_priv->counter[p].counter, + 2 == hw_priv->counter[p].read, + HZ * 1); + } + } + + get_mib_counters(hw, port->first_port, port->mib_port_cnt, counter); + n = hw->mib_cnt; + if (n > n_stats) + n = n_stats; + n_stats -= n; + for (i = 0; i < n; i++) + *data++ = counter[i]; +} + +/** + * netdev_set_features - set receive checksum support + * @dev: Network device. + * @features: New device features (offloads). + * + * This function sets receive checksum support setting. + * + * Return 0 if successful; otherwise an error code. + */ +static int netdev_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_hw *hw = &hw_priv->hw; + + mutex_lock(&hw_priv->lock); + + /* see note in hw_setup() */ + if (features & NETIF_F_RXCSUM) + hw->rx_cfg |= DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP; + else + hw->rx_cfg &= ~(DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP); + + if (hw->enabled) + writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL); + + mutex_unlock(&hw_priv->lock); + + return 0; +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_settings = netdev_get_settings, + .set_settings = netdev_set_settings, + .nway_reset = netdev_nway_reset, + .get_link = netdev_get_link, + .get_drvinfo = netdev_get_drvinfo, + .get_regs_len = netdev_get_regs_len, + .get_regs = netdev_get_regs, + .get_wol = netdev_get_wol, + .set_wol = netdev_set_wol, + .get_msglevel = netdev_get_msglevel, + .set_msglevel = netdev_set_msglevel, + .get_eeprom_len = netdev_get_eeprom_len, + .get_eeprom = netdev_get_eeprom, + .set_eeprom = netdev_set_eeprom, + .get_pauseparam = netdev_get_pauseparam, + .set_pauseparam = netdev_set_pauseparam, + .get_ringparam = netdev_get_ringparam, + .get_strings = netdev_get_strings, + .get_sset_count = netdev_get_sset_count, + .get_ethtool_stats = netdev_get_ethtool_stats, +}; + +/* + * Hardware monitoring + */ + +static void update_link(struct net_device *dev, struct dev_priv *priv, + struct ksz_port *port) +{ + if (priv->media_state != port->linked->state) { + priv->media_state = port->linked->state; + if (netif_running(dev)) + set_media_state(dev, media_connected); + } +} + +static void mib_read_work(struct work_struct *work) +{ + struct dev_info *hw_priv = + container_of(work, struct dev_info, mib_read); + struct ksz_hw *hw = &hw_priv->hw; + struct ksz_port_mib *mib; + int i; + + next_jiffies = jiffies; + for (i = 0; i < hw->mib_port_cnt; i++) { + mib = &hw->port_mib[i]; + + /* Reading MIB counters or requested to read. */ + if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) { + + /* Need to process receive interrupt. */ + if (port_r_cnt(hw, i)) + break; + hw_priv->counter[i].read = 0; + + /* Finish reading counters. */ + if (0 == mib->cnt_ptr) { + hw_priv->counter[i].read = 2; + wake_up_interruptible( + &hw_priv->counter[i].counter); + } + } else if (jiffies >= hw_priv->counter[i].time) { + /* Only read MIB counters when the port is connected. */ + if (media_connected == mib->state) + hw_priv->counter[i].read = 1; + next_jiffies += HZ * 1 * hw->mib_port_cnt; + hw_priv->counter[i].time = next_jiffies; + + /* Port is just disconnected. */ + } else if (mib->link_down) { + mib->link_down = 0; + + /* Read counters one last time after link is lost. */ + hw_priv->counter[i].read = 1; + } + } +} + +static void mib_monitor(unsigned long ptr) +{ + struct dev_info *hw_priv = (struct dev_info *) ptr; + + mib_read_work(&hw_priv->mib_read); + + /* This is used to verify Wake-on-LAN is working. */ + if (hw_priv->pme_wait) { + if (hw_priv->pme_wait <= jiffies) { + hw_clr_wol_pme_status(&hw_priv->hw); + hw_priv->pme_wait = 0; + } + } else if (hw_chk_wol_pme_status(&hw_priv->hw)) { + + /* PME is asserted. Wait 2 seconds to clear it. */ + hw_priv->pme_wait = jiffies + HZ * 2; + } + + ksz_update_timer(&hw_priv->mib_timer_info); +} + +/** + * dev_monitor - periodic monitoring + * @ptr: Network device pointer. + * + * This routine is run in a kernel timer to monitor the network device. + */ +static void dev_monitor(unsigned long ptr) +{ + struct net_device *dev = (struct net_device *) ptr; + struct dev_priv *priv = netdev_priv(dev); + struct dev_info *hw_priv = priv->adapter; + struct ksz_hw *hw = &hw_priv->hw; + struct ksz_port *port = &priv->port; + + if (!(hw->features & LINK_INT_WORKING)) + port_get_link_speed(port); + update_link(dev, priv, port); + + ksz_update_timer(&priv->monitor_timer_info); +} + +/* + * Linux network device interface functions + */ + +/* Driver exported variables */ + +static int msg_enable; + +static char *macaddr = ":"; +static char *mac1addr = ":"; + +/* + * This enables multiple network device mode for KSZ8842, which contains a + * switch with two physical ports. Some users like to take control of the + * ports for running Spanning Tree Protocol. The driver will create an + * additional eth? device for the other port. + * + * Some limitations are the network devices cannot have different MTU and + * multicast hash tables. + */ +static int multi_dev; + +/* + * As most users select multiple network device mode to use Spanning Tree + * Protocol, this enables a feature in which most unicast and multicast packets + * are forwarded inside the switch and not passed to the host. Only packets + * that need the host's attention are passed to it. This prevents the host + * wasting CPU time to examine each and every incoming packets and do the + * forwarding itself. + * + * As the hack requires the private bridge header, the driver cannot compile + * with just the kernel headers. + * + * Enabling STP support also turns on multiple network device mode. + */ +static int stp; + +/* + * This enables fast aging in the KSZ8842 switch. Not sure what situation + * needs that. However, fast aging is used to flush the dynamic MAC table when + * STP support is enabled. + */ +static int fast_aging; + +/** + * netdev_init - initialize network device. + * @dev: Network device. + * + * This function initializes the network device. + * + * Return 0 if successful; otherwise an error code indicating failure. + */ +static int __init netdev_init(struct net_device *dev) +{ + struct dev_priv *priv = netdev_priv(dev); + + /* 500 ms timeout */ + ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000, + dev_monitor, dev); + + /* 500 ms timeout */ + dev->watchdog_timeo = HZ / 2; + + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_RXCSUM; + + /* + * Hardware does not really support IPv6 checksum generation, but + * driver actually runs faster with this on. + */ + dev->hw_features |= NETIF_F_IPV6_CSUM; + + dev->features |= dev->hw_features; + + sema_init(&priv->proc_sem, 1); + + priv->mii_if.phy_id_mask = 0x1; + priv->mii_if.reg_num_mask = 0x7; + priv->mii_if.dev = dev; + priv->mii_if.mdio_read = mdio_read; + priv->mii_if.mdio_write = mdio_write; + priv->mii_if.phy_id = priv->port.first_port + 1; + + priv->msg_enable = netif_msg_init(msg_enable, + (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)); + + return 0; +} + +static const struct net_device_ops netdev_ops = { + .ndo_init = netdev_init, + .ndo_open = netdev_open, + .ndo_stop = netdev_close, + .ndo_get_stats = netdev_query_statistics, + .ndo_start_xmit = netdev_tx, + .ndo_tx_timeout = netdev_tx_timeout, + .ndo_change_mtu = netdev_change_mtu, + .ndo_set_features = netdev_set_features, + .ndo_set_mac_address = netdev_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = netdev_ioctl, + .ndo_set_rx_mode = netdev_set_rx_mode, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = netdev_netpoll, +#endif +}; + +static void netdev_free(struct net_device *dev) +{ + if (dev->watchdog_timeo) + unregister_netdev(dev); + + free_netdev(dev); +} + +struct platform_info { + struct dev_info dev_info; + struct net_device *netdev[SWITCH_PORT_NUM]; +}; + +static int net_device_present; + +static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port) +{ + int i; + int j; + int got_num; + int num; + + i = j = num = got_num = 0; + while (j < ETH_ALEN) { + if (macaddr[i]) { + int digit; + + got_num = 1; + digit = hex_to_bin(macaddr[i]); + if (digit >= 0) + num = num * 16 + digit; + else if (':' == macaddr[i]) + got_num = 2; + else + break; + } else if (got_num) + got_num = 2; + else + break; + if (2 == got_num) { + if (MAIN_PORT == port) { + hw_priv->hw.override_addr[j++] = (u8) num; + hw_priv->hw.override_addr[5] += + hw_priv->hw.id; + } else { + hw_priv->hw.ksz_switch->other_addr[j++] = + (u8) num; + hw_priv->hw.ksz_switch->other_addr[5] += + hw_priv->hw.id; + } + num = got_num = 0; + } + i++; + } + if (ETH_ALEN == j) { + if (MAIN_PORT == port) + hw_priv->hw.mac_override = 1; + } +} + +#define KS884X_DMA_MASK (~0x0UL) + +static void read_other_addr(struct ksz_hw *hw) +{ + int i; + u16 data[3]; + struct ksz_switch *sw = hw->ksz_switch; + + for (i = 0; i < 3; i++) + data[i] = eeprom_read(hw, i + EEPROM_DATA_OTHER_MAC_ADDR); + if ((data[0] || data[1] || data[2]) && data[0] != 0xffff) { + sw->other_addr[5] = (u8) data[0]; + sw->other_addr[4] = (u8)(data[0] >> 8); + sw->other_addr[3] = (u8) data[1]; + sw->other_addr[2] = (u8)(data[1] >> 8); + sw->other_addr[1] = (u8) data[2]; + sw->other_addr[0] = (u8)(data[2] >> 8); + } +} + +#ifndef PCI_VENDOR_ID_MICREL_KS +#define PCI_VENDOR_ID_MICREL_KS 0x16c6 +#endif + +static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct net_device *dev; + struct dev_priv *priv; + struct dev_info *hw_priv; + struct ksz_hw *hw; + struct platform_info *info; + struct ksz_port *port; + unsigned long reg_base; + unsigned long reg_len; + int cnt; + int i; + int mib_port_count; + int pi; + int port_count; + int result; + char banner[sizeof(version)]; + struct ksz_switch *sw = NULL; + + result = pci_enable_device(pdev); + if (result) + return result; + + result = -ENODEV; + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) || + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) + return result; + + reg_base = pci_resource_start(pdev, 0); + reg_len = pci_resource_len(pdev, 0); + if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) + return result; + + if (!request_mem_region(reg_base, reg_len, DRV_NAME)) + return result; + pci_set_master(pdev); + + result = -ENOMEM; + + info = kzalloc(sizeof(struct platform_info), GFP_KERNEL); + if (!info) + goto pcidev_init_dev_err; + + hw_priv = &info->dev_info; + hw_priv->pdev = pdev; + + hw = &hw_priv->hw; + + hw->io = ioremap(reg_base, reg_len); + if (!hw->io) + goto pcidev_init_io_err; + + cnt = hw_init(hw); + if (!cnt) { + if (msg_enable & NETIF_MSG_PROBE) + pr_alert("chip not detected\n"); + result = -ENODEV; + goto pcidev_init_alloc_err; + } + + snprintf(banner, sizeof(banner), "%s", version); + banner[13] = cnt + '0'; /* Replace x in "Micrel KSZ884x" */ + dev_info(&hw_priv->pdev->dev, "%s\n", banner); + dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq); + + /* Assume device is KSZ8841. */ + hw->dev_count = 1; + port_count = 1; + mib_port_count = 1; + hw->addr_list_size = 0; + hw->mib_cnt = PORT_COUNTER_NUM; + hw->mib_port_cnt = 1; + + /* KSZ8842 has a switch with multiple ports. */ + if (2 == cnt) { + if (fast_aging) + hw->overrides |= FAST_AGING; + + hw->mib_cnt = TOTAL_PORT_COUNTER_NUM; + + /* Multiple network device interfaces are required. */ + if (multi_dev) { + hw->dev_count = SWITCH_PORT_NUM; + hw->addr_list_size = SWITCH_PORT_NUM - 1; + } + + /* Single network device has multiple ports. */ + if (1 == hw->dev_count) { + port_count = SWITCH_PORT_NUM; + mib_port_count = SWITCH_PORT_NUM; + } + hw->mib_port_cnt = TOTAL_PORT_NUM; + hw->ksz_switch = kzalloc(sizeof(struct ksz_switch), GFP_KERNEL); + if (!hw->ksz_switch) + goto pcidev_init_alloc_err; + + sw = hw->ksz_switch; + } + for (i = 0; i < hw->mib_port_cnt; i++) + hw->port_mib[i].mib_start = 0; + + hw->parent = hw_priv; + + /* Default MTU is 1500. */ + hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3; + + if (ksz_alloc_mem(hw_priv)) + goto pcidev_init_mem_err; + + hw_priv->hw.id = net_device_present; + + spin_lock_init(&hw_priv->hwlock); + mutex_init(&hw_priv->lock); + + for (i = 0; i < TOTAL_PORT_NUM; i++) + init_waitqueue_head(&hw_priv->counter[i].counter); + + if (macaddr[0] != ':') + get_mac_addr(hw_priv, macaddr, MAIN_PORT); + + /* Read MAC address and initialize override address if not overrided. */ + hw_read_addr(hw); + + /* Multiple device interfaces mode requires a second MAC address. */ + if (hw->dev_count > 1) { + memcpy(sw->other_addr, hw->override_addr, ETH_ALEN); + read_other_addr(hw); + if (mac1addr[0] != ':') + get_mac_addr(hw_priv, mac1addr, OTHER_PORT); + } + + hw_setup(hw); + if (hw->ksz_switch) + sw_setup(hw); + else { + hw_priv->wol_support = WOL_SUPPORT; + hw_priv->wol_enable = 0; + } + + INIT_WORK(&hw_priv->mib_read, mib_read_work); + + /* 500 ms timeout */ + ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000, + mib_monitor, hw_priv); + + for (i = 0; i < hw->dev_count; i++) { + dev = alloc_etherdev(sizeof(struct dev_priv)); + if (!dev) + goto pcidev_init_reg_err; + SET_NETDEV_DEV(dev, &pdev->dev); + info->netdev[i] = dev; + + priv = netdev_priv(dev); + priv->adapter = hw_priv; + priv->id = net_device_present++; + + port = &priv->port; + port->port_cnt = port_count; + port->mib_port_cnt = mib_port_count; + port->first_port = i; + port->flow_ctrl = PHY_FLOW_CTRL; + + port->hw = hw; + port->linked = &hw->port_info[port->first_port]; + + for (cnt = 0, pi = i; cnt < port_count; cnt++, pi++) { + hw->port_info[pi].port_id = pi; + hw->port_info[pi].pdev = dev; + hw->port_info[pi].state = media_disconnected; + } + + dev->mem_start = (unsigned long) hw->io; + dev->mem_end = dev->mem_start + reg_len - 1; + dev->irq = pdev->irq; + if (MAIN_PORT == i) + memcpy(dev->dev_addr, hw_priv->hw.override_addr, + ETH_ALEN); + else { + memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN); + if (ether_addr_equal(sw->other_addr, hw->override_addr)) + dev->dev_addr[5] += port->first_port; + } + + dev->netdev_ops = &netdev_ops; + dev->ethtool_ops = &netdev_ethtool_ops; + if (register_netdev(dev)) + goto pcidev_init_reg_err; + port_set_power_saving(port, true); + } + + pci_dev_get(hw_priv->pdev); + pci_set_drvdata(pdev, info); + return 0; + +pcidev_init_reg_err: + for (i = 0; i < hw->dev_count; i++) { + if (info->netdev[i]) { + netdev_free(info->netdev[i]); + info->netdev[i] = NULL; + } + } + +pcidev_init_mem_err: + ksz_free_mem(hw_priv); + kfree(hw->ksz_switch); + +pcidev_init_alloc_err: + iounmap(hw->io); + +pcidev_init_io_err: + kfree(info); + +pcidev_init_dev_err: + release_mem_region(reg_base, reg_len); + + return result; +} + +static void pcidev_exit(struct pci_dev *pdev) +{ + int i; + struct platform_info *info = pci_get_drvdata(pdev); + struct dev_info *hw_priv = &info->dev_info; + + release_mem_region(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + for (i = 0; i < hw_priv->hw.dev_count; i++) { + if (info->netdev[i]) + netdev_free(info->netdev[i]); + } + if (hw_priv->hw.io) + iounmap(hw_priv->hw.io); + ksz_free_mem(hw_priv); + kfree(hw_priv->hw.ksz_switch); + pci_dev_put(hw_priv->pdev); + kfree(info); +} + +#ifdef CONFIG_PM +static int pcidev_resume(struct pci_dev *pdev) +{ + int i; + struct platform_info *info = pci_get_drvdata(pdev); + struct dev_info *hw_priv = &info->dev_info; + struct ksz_hw *hw = &hw_priv->hw; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_enable_wake(pdev, PCI_D0, 0); + + if (hw_priv->wol_enable) + hw_cfg_wol_pme(hw, 0); + for (i = 0; i < hw->dev_count; i++) { + if (info->netdev[i]) { + struct net_device *dev = info->netdev[i]; + + if (netif_running(dev)) { + netdev_open(dev); + netif_device_attach(dev); + } + } + } + return 0; +} + +static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int i; + struct platform_info *info = pci_get_drvdata(pdev); + struct dev_info *hw_priv = &info->dev_info; + struct ksz_hw *hw = &hw_priv->hw; + + /* Need to find a way to retrieve the device IP address. */ + static const u8 net_addr[] = { 192, 168, 1, 1 }; + + for (i = 0; i < hw->dev_count; i++) { + if (info->netdev[i]) { + struct net_device *dev = info->netdev[i]; + + if (netif_running(dev)) { + netif_device_detach(dev); + netdev_close(dev); + } + } + } + if (hw_priv->wol_enable) { + hw_enable_wol(hw, hw_priv->wol_enable, net_addr); + hw_cfg_wol_pme(hw, 1); + } + + pci_save_state(pdev); + pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} +#endif + +static char pcidev_name[] = "ksz884xp"; + +static const struct pci_device_id pcidev_table[] = { + { PCI_VENDOR_ID_MICREL_KS, 0x8841, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_MICREL_KS, 0x8842, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, pcidev_table); + +static struct pci_driver pci_device_driver = { +#ifdef CONFIG_PM + .suspend = pcidev_suspend, + .resume = pcidev_resume, +#endif + .name = pcidev_name, + .id_table = pcidev_table, + .probe = pcidev_init, + .remove = pcidev_exit +}; + +module_pci_driver(pci_device_driver); + +MODULE_DESCRIPTION("KSZ8841/2 PCI network driver"); +MODULE_AUTHOR("Tristram Ha "); +MODULE_LICENSE("GPL"); + +module_param_named(message, msg_enable, int, 0); +MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)"); + +module_param(macaddr, charp, 0); +module_param(mac1addr, charp, 0); +module_param(fast_aging, int, 0); +module_param(multi_dev, int, 0); +module_param(stp, int, 0); +MODULE_PARM_DESC(macaddr, "MAC address"); +MODULE_PARM_DESC(mac1addr, "Second MAC address"); +MODULE_PARM_DESC(fast_aging, "Fast aging"); +MODULE_PARM_DESC(multi_dev, "Multiple device interfaces"); +MODULE_PARM_DESC(stp, "STP support"); diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig new file mode 100644 index 000000000..afaf0c07f --- /dev/null +++ b/drivers/net/ethernet/microchip/Kconfig @@ -0,0 +1,38 @@ +# +# Microchip network device configuration +# + +config NET_VENDOR_MICROCHIP + bool "Microchip devices" + default y + depends on SPI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Microchip cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_MICROCHIP + +config ENC28J60 + tristate "ENC28J60 support" + depends on SPI + select CRC32 + ---help--- + Support for the Microchip EN28J60 ethernet chip. + + To compile this driver as a module, choose M here. The module will be + called enc28j60. + +config ENC28J60_WRITEVERIFY + bool "Enable write verify" + depends on ENC28J60 + ---help--- + Enable the verify after the buffer write useful for debugging purpose. + If unsure, say N. + +endif # NET_VENDOR_MICROCHIP diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile new file mode 100644 index 000000000..573d4292b --- /dev/null +++ b/drivers/net/ethernet/microchip/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Microchip network device drivers. +# + +obj-$(CONFIG_ENC28J60) += enc28j60.o diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c new file mode 100644 index 000000000..b1b5f66b8 --- /dev/null +++ b/drivers/net/ethernet/microchip/enc28j60.c @@ -0,0 +1,1663 @@ +/* + * Microchip ENC28J60 ethernet driver (MAC + PHY) + * + * Copyright (C) 2007 Eurek srl + * Author: Claudio Lanconelli + * based on enc28j60.c written by David Anders for 2.4 kernel version + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * $Id: enc28j60.c,v 1.22 2007/12/20 10:47:01 claudio Exp $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "enc28j60_hw.h" + +#define DRV_NAME "enc28j60" +#define DRV_VERSION "1.01" + +#define SPI_OPLEN 1 + +#define ENC28J60_MSG_DEFAULT \ + (NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK) + +/* Buffer size required for the largest SPI transfer (i.e., reading a + * frame). */ +#define SPI_TRANSFER_BUF_LEN (4 + MAX_FRAMELEN) + +#define TX_TIMEOUT (4 * HZ) + +/* Max TX retries in case of collision as suggested by errata datasheet */ +#define MAX_TX_RETRYCOUNT 16 + +enum { + RXFILTER_NORMAL, + RXFILTER_MULTI, + RXFILTER_PROMISC +}; + +/* Driver local data */ +struct enc28j60_net { + struct net_device *netdev; + struct spi_device *spi; + struct mutex lock; + struct sk_buff *tx_skb; + struct work_struct tx_work; + struct work_struct irq_work; + struct work_struct setrx_work; + struct work_struct restart_work; + u8 bank; /* current register bank selected */ + u16 next_pk_ptr; /* next packet pointer within FIFO */ + u16 max_pk_counter; /* statistics: max packet counter */ + u16 tx_retry_count; + bool hw_enable; + bool full_duplex; + int rxfilter; + u32 msg_enable; + u8 spi_transfer_buf[SPI_TRANSFER_BUF_LEN]; +}; + +/* use ethtool to change the level for any given device */ +static struct { + u32 msg_enable; +} debug = { -1 }; + +/* + * SPI read buffer + * wait for the SPI transfer and copy received data to destination + */ +static int +spi_read_buf(struct enc28j60_net *priv, int len, u8 *data) +{ + u8 *rx_buf = priv->spi_transfer_buf + 4; + u8 *tx_buf = priv->spi_transfer_buf; + struct spi_transfer t = { + .tx_buf = tx_buf, + .rx_buf = rx_buf, + .len = SPI_OPLEN + len, + }; + struct spi_message msg; + int ret; + + tx_buf[0] = ENC28J60_READ_BUF_MEM; + tx_buf[1] = tx_buf[2] = tx_buf[3] = 0; /* don't care */ + + spi_message_init(&msg); + spi_message_add_tail(&t, &msg); + ret = spi_sync(priv->spi, &msg); + if (ret == 0) { + memcpy(data, &rx_buf[SPI_OPLEN], len); + ret = msg.status; + } + if (ret && netif_msg_drv(priv)) + printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", + __func__, ret); + + return ret; +} + +/* + * SPI write buffer + */ +static int spi_write_buf(struct enc28j60_net *priv, int len, + const u8 *data) +{ + int ret; + + if (len > SPI_TRANSFER_BUF_LEN - 1 || len <= 0) + ret = -EINVAL; + else { + priv->spi_transfer_buf[0] = ENC28J60_WRITE_BUF_MEM; + memcpy(&priv->spi_transfer_buf[1], data, len); + ret = spi_write(priv->spi, priv->spi_transfer_buf, len + 1); + if (ret && netif_msg_drv(priv)) + printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", + __func__, ret); + } + return ret; +} + +/* + * basic SPI read operation + */ +static u8 spi_read_op(struct enc28j60_net *priv, u8 op, + u8 addr) +{ + u8 tx_buf[2]; + u8 rx_buf[4]; + u8 val = 0; + int ret; + int slen = SPI_OPLEN; + + /* do dummy read if needed */ + if (addr & SPRD_MASK) + slen++; + + tx_buf[0] = op | (addr & ADDR_MASK); + ret = spi_write_then_read(priv->spi, tx_buf, 1, rx_buf, slen); + if (ret) + printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", + __func__, ret); + else + val = rx_buf[slen - 1]; + + return val; +} + +/* + * basic SPI write operation + */ +static int spi_write_op(struct enc28j60_net *priv, u8 op, + u8 addr, u8 val) +{ + int ret; + + priv->spi_transfer_buf[0] = op | (addr & ADDR_MASK); + priv->spi_transfer_buf[1] = val; + ret = spi_write(priv->spi, priv->spi_transfer_buf, 2); + if (ret && netif_msg_drv(priv)) + printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", + __func__, ret); + return ret; +} + +static void enc28j60_soft_reset(struct enc28j60_net *priv) +{ + if (netif_msg_hw(priv)) + printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); + + spi_write_op(priv, ENC28J60_SOFT_RESET, 0, ENC28J60_SOFT_RESET); + /* Errata workaround #1, CLKRDY check is unreliable, + * delay at least 1 mS instead */ + udelay(2000); +} + +/* + * select the current register bank if necessary + */ +static void enc28j60_set_bank(struct enc28j60_net *priv, u8 addr) +{ + u8 b = (addr & BANK_MASK) >> 5; + + /* These registers (EIE, EIR, ESTAT, ECON2, ECON1) + * are present in all banks, no need to switch bank + */ + if (addr >= EIE && addr <= ECON1) + return; + + /* Clear or set each bank selection bit as needed */ + if ((b & ECON1_BSEL0) != (priv->bank & ECON1_BSEL0)) { + if (b & ECON1_BSEL0) + spi_write_op(priv, ENC28J60_BIT_FIELD_SET, ECON1, + ECON1_BSEL0); + else + spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, ECON1, + ECON1_BSEL0); + } + if ((b & ECON1_BSEL1) != (priv->bank & ECON1_BSEL1)) { + if (b & ECON1_BSEL1) + spi_write_op(priv, ENC28J60_BIT_FIELD_SET, ECON1, + ECON1_BSEL1); + else + spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, ECON1, + ECON1_BSEL1); + } + priv->bank = b; +} + +/* + * Register access routines through the SPI bus. + * Every register access comes in two flavours: + * - nolock_xxx: caller needs to invoke mutex_lock, usually to access + * atomically more than one register + * - locked_xxx: caller doesn't need to invoke mutex_lock, single access + * + * Some registers can be accessed through the bit field clear and + * bit field set to avoid a read modify write cycle. + */ + +/* + * Register bit field Set + */ +static void nolock_reg_bfset(struct enc28j60_net *priv, + u8 addr, u8 mask) +{ + enc28j60_set_bank(priv, addr); + spi_write_op(priv, ENC28J60_BIT_FIELD_SET, addr, mask); +} + +static void locked_reg_bfset(struct enc28j60_net *priv, + u8 addr, u8 mask) +{ + mutex_lock(&priv->lock); + nolock_reg_bfset(priv, addr, mask); + mutex_unlock(&priv->lock); +} + +/* + * Register bit field Clear + */ +static void nolock_reg_bfclr(struct enc28j60_net *priv, + u8 addr, u8 mask) +{ + enc28j60_set_bank(priv, addr); + spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, addr, mask); +} + +static void locked_reg_bfclr(struct enc28j60_net *priv, + u8 addr, u8 mask) +{ + mutex_lock(&priv->lock); + nolock_reg_bfclr(priv, addr, mask); + mutex_unlock(&priv->lock); +} + +/* + * Register byte read + */ +static int nolock_regb_read(struct enc28j60_net *priv, + u8 address) +{ + enc28j60_set_bank(priv, address); + return spi_read_op(priv, ENC28J60_READ_CTRL_REG, address); +} + +static int locked_regb_read(struct enc28j60_net *priv, + u8 address) +{ + int ret; + + mutex_lock(&priv->lock); + ret = nolock_regb_read(priv, address); + mutex_unlock(&priv->lock); + + return ret; +} + +/* + * Register word read + */ +static int nolock_regw_read(struct enc28j60_net *priv, + u8 address) +{ + int rl, rh; + + enc28j60_set_bank(priv, address); + rl = spi_read_op(priv, ENC28J60_READ_CTRL_REG, address); + rh = spi_read_op(priv, ENC28J60_READ_CTRL_REG, address + 1); + + return (rh << 8) | rl; +} + +static int locked_regw_read(struct enc28j60_net *priv, + u8 address) +{ + int ret; + + mutex_lock(&priv->lock); + ret = nolock_regw_read(priv, address); + mutex_unlock(&priv->lock); + + return ret; +} + +/* + * Register byte write + */ +static void nolock_regb_write(struct enc28j60_net *priv, + u8 address, u8 data) +{ + enc28j60_set_bank(priv, address); + spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, data); +} + +static void locked_regb_write(struct enc28j60_net *priv, + u8 address, u8 data) +{ + mutex_lock(&priv->lock); + nolock_regb_write(priv, address, data); + mutex_unlock(&priv->lock); +} + +/* + * Register word write + */ +static void nolock_regw_write(struct enc28j60_net *priv, + u8 address, u16 data) +{ + enc28j60_set_bank(priv, address); + spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, (u8) data); + spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address + 1, + (u8) (data >> 8)); +} + +static void locked_regw_write(struct enc28j60_net *priv, + u8 address, u16 data) +{ + mutex_lock(&priv->lock); + nolock_regw_write(priv, address, data); + mutex_unlock(&priv->lock); +} + +/* + * Buffer memory read + * Select the starting address and execute a SPI buffer read + */ +static void enc28j60_mem_read(struct enc28j60_net *priv, + u16 addr, int len, u8 *data) +{ + mutex_lock(&priv->lock); + nolock_regw_write(priv, ERDPTL, addr); +#ifdef CONFIG_ENC28J60_WRITEVERIFY + if (netif_msg_drv(priv)) { + u16 reg; + reg = nolock_regw_read(priv, ERDPTL); + if (reg != addr) + printk(KERN_DEBUG DRV_NAME ": %s() error writing ERDPT " + "(0x%04x - 0x%04x)\n", __func__, reg, addr); + } +#endif + spi_read_buf(priv, len, data); + mutex_unlock(&priv->lock); +} + +/* + * Write packet to enc28j60 TX buffer memory + */ +static void +enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data) +{ + mutex_lock(&priv->lock); + /* Set the write pointer to start of transmit buffer area */ + nolock_regw_write(priv, EWRPTL, TXSTART_INIT); +#ifdef CONFIG_ENC28J60_WRITEVERIFY + if (netif_msg_drv(priv)) { + u16 reg; + reg = nolock_regw_read(priv, EWRPTL); + if (reg != TXSTART_INIT) + printk(KERN_DEBUG DRV_NAME + ": %s() ERWPT:0x%04x != 0x%04x\n", + __func__, reg, TXSTART_INIT); + } +#endif + /* Set the TXND pointer to correspond to the packet size given */ + nolock_regw_write(priv, ETXNDL, TXSTART_INIT + len); + /* write per-packet control byte */ + spi_write_op(priv, ENC28J60_WRITE_BUF_MEM, 0, 0x00); + if (netif_msg_hw(priv)) + printk(KERN_DEBUG DRV_NAME + ": %s() after control byte ERWPT:0x%04x\n", + __func__, nolock_regw_read(priv, EWRPTL)); + /* copy the packet into the transmit buffer */ + spi_write_buf(priv, len, data); + if (netif_msg_hw(priv)) + printk(KERN_DEBUG DRV_NAME + ": %s() after write packet ERWPT:0x%04x, len=%d\n", + __func__, nolock_regw_read(priv, EWRPTL), len); + mutex_unlock(&priv->lock); +} + +static unsigned long msec20_to_jiffies; + +static int poll_ready(struct enc28j60_net *priv, u8 reg, u8 mask, u8 val) +{ + unsigned long timeout = jiffies + msec20_to_jiffies; + + /* 20 msec timeout read */ + while ((nolock_regb_read(priv, reg) & mask) != val) { + if (time_after(jiffies, timeout)) { + if (netif_msg_drv(priv)) + dev_dbg(&priv->spi->dev, + "reg %02x ready timeout!\n", reg); + return -ETIMEDOUT; + } + cpu_relax(); + } + return 0; +} + +/* + * Wait until the PHY operation is complete. + */ +static int wait_phy_ready(struct enc28j60_net *priv) +{ + return poll_ready(priv, MISTAT, MISTAT_BUSY, 0) ? 0 : 1; +} + +/* + * PHY register read + * PHY registers are not accessed directly, but through the MII + */ +static u16 enc28j60_phy_read(struct enc28j60_net *priv, u8 address) +{ + u16 ret; + + mutex_lock(&priv->lock); + /* set the PHY register address */ + nolock_regb_write(priv, MIREGADR, address); + /* start the register read operation */ + nolock_regb_write(priv, MICMD, MICMD_MIIRD); + /* wait until the PHY read completes */ + wait_phy_ready(priv); + /* quit reading */ + nolock_regb_write(priv, MICMD, 0x00); + /* return the data */ + ret = nolock_regw_read(priv, MIRDL); + mutex_unlock(&priv->lock); + + return ret; +} + +static int enc28j60_phy_write(struct enc28j60_net *priv, u8 address, u16 data) +{ + int ret; + + mutex_lock(&priv->lock); + /* set the PHY register address */ + nolock_regb_write(priv, MIREGADR, address); + /* write the PHY data */ + nolock_regw_write(priv, MIWRL, data); + /* wait until the PHY write completes and return */ + ret = wait_phy_ready(priv); + mutex_unlock(&priv->lock); + + return ret; +} + +/* + * Program the hardware MAC address from dev->dev_addr. + */ +static int enc28j60_set_hw_macaddr(struct net_device *ndev) +{ + int ret; + struct enc28j60_net *priv = netdev_priv(ndev); + + mutex_lock(&priv->lock); + if (!priv->hw_enable) { + if (netif_msg_drv(priv)) + printk(KERN_INFO DRV_NAME + ": %s: Setting MAC address to %pM\n", + ndev->name, ndev->dev_addr); + /* NOTE: MAC address in ENC28J60 is byte-backward */ + nolock_regb_write(priv, MAADR5, ndev->dev_addr[0]); + nolock_regb_write(priv, MAADR4, ndev->dev_addr[1]); + nolock_regb_write(priv, MAADR3, ndev->dev_addr[2]); + nolock_regb_write(priv, MAADR2, ndev->dev_addr[3]); + nolock_regb_write(priv, MAADR1, ndev->dev_addr[4]); + nolock_regb_write(priv, MAADR0, ndev->dev_addr[5]); + ret = 0; + } else { + if (netif_msg_drv(priv)) + printk(KERN_DEBUG DRV_NAME + ": %s() Hardware must be disabled to set " + "Mac address\n", __func__); + ret = -EBUSY; + } + mutex_unlock(&priv->lock); + return ret; +} + +/* + * Store the new hardware address in dev->dev_addr, and update the MAC. + */ +static int enc28j60_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *address = addr; + + if (netif_running(dev)) + return -EBUSY; + if (!is_valid_ether_addr(address->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, address->sa_data, dev->addr_len); + return enc28j60_set_hw_macaddr(dev); +} + +/* + * Debug routine to dump useful register contents + */ +static void enc28j60_dump_regs(struct enc28j60_net *priv, const char *msg) +{ + mutex_lock(&priv->lock); + printk(KERN_DEBUG DRV_NAME " %s\n" + "HwRevID: 0x%02x\n" + "Cntrl: ECON1 ECON2 ESTAT EIR EIE\n" + " 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n" + "MAC : MACON1 MACON3 MACON4\n" + " 0x%02x 0x%02x 0x%02x\n" + "Rx : ERXST ERXND ERXWRPT ERXRDPT ERXFCON EPKTCNT MAMXFL\n" + " 0x%04x 0x%04x 0x%04x 0x%04x " + "0x%02x 0x%02x 0x%04x\n" + "Tx : ETXST ETXND MACLCON1 MACLCON2 MAPHSUP\n" + " 0x%04x 0x%04x 0x%02x 0x%02x 0x%02x\n", + msg, nolock_regb_read(priv, EREVID), + nolock_regb_read(priv, ECON1), nolock_regb_read(priv, ECON2), + nolock_regb_read(priv, ESTAT), nolock_regb_read(priv, EIR), + nolock_regb_read(priv, EIE), nolock_regb_read(priv, MACON1), + nolock_regb_read(priv, MACON3), nolock_regb_read(priv, MACON4), + nolock_regw_read(priv, ERXSTL), nolock_regw_read(priv, ERXNDL), + nolock_regw_read(priv, ERXWRPTL), + nolock_regw_read(priv, ERXRDPTL), + nolock_regb_read(priv, ERXFCON), + nolock_regb_read(priv, EPKTCNT), + nolock_regw_read(priv, MAMXFLL), nolock_regw_read(priv, ETXSTL), + nolock_regw_read(priv, ETXNDL), + nolock_regb_read(priv, MACLCON1), + nolock_regb_read(priv, MACLCON2), + nolock_regb_read(priv, MAPHSUP)); + mutex_unlock(&priv->lock); +} + +/* + * ERXRDPT need to be set always at odd addresses, refer to errata datasheet + */ +static u16 erxrdpt_workaround(u16 next_packet_ptr, u16 start, u16 end) +{ + u16 erxrdpt; + + if ((next_packet_ptr - 1 < start) || (next_packet_ptr - 1 > end)) + erxrdpt = end; + else + erxrdpt = next_packet_ptr - 1; + + return erxrdpt; +} + +/* + * Calculate wrap around when reading beyond the end of the RX buffer + */ +static u16 rx_packet_start(u16 ptr) +{ + if (ptr + RSV_SIZE > RXEND_INIT) + return (ptr + RSV_SIZE) - (RXEND_INIT - RXSTART_INIT + 1); + else + return ptr + RSV_SIZE; +} + +static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end) +{ + u16 erxrdpt; + + if (start > 0x1FFF || end > 0x1FFF || start > end) { + if (netif_msg_drv(priv)) + printk(KERN_ERR DRV_NAME ": %s(%d, %d) RXFIFO " + "bad parameters!\n", __func__, start, end); + return; + } + /* set receive buffer start + end */ + priv->next_pk_ptr = start; + nolock_regw_write(priv, ERXSTL, start); + erxrdpt = erxrdpt_workaround(priv->next_pk_ptr, start, end); + nolock_regw_write(priv, ERXRDPTL, erxrdpt); + nolock_regw_write(priv, ERXNDL, end); +} + +static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end) +{ + if (start > 0x1FFF || end > 0x1FFF || start > end) { + if (netif_msg_drv(priv)) + printk(KERN_ERR DRV_NAME ": %s(%d, %d) TXFIFO " + "bad parameters!\n", __func__, start, end); + return; + } + /* set transmit buffer start + end */ + nolock_regw_write(priv, ETXSTL, start); + nolock_regw_write(priv, ETXNDL, end); +} + +/* + * Low power mode shrinks power consumption about 100x, so we'd like + * the chip to be in that mode whenever it's inactive. (However, we + * can't stay in lowpower mode during suspend with WOL active.) + */ +static void enc28j60_lowpower(struct enc28j60_net *priv, bool is_low) +{ + if (netif_msg_drv(priv)) + dev_dbg(&priv->spi->dev, "%s power...\n", + is_low ? "low" : "high"); + + mutex_lock(&priv->lock); + if (is_low) { + nolock_reg_bfclr(priv, ECON1, ECON1_RXEN); + poll_ready(priv, ESTAT, ESTAT_RXBUSY, 0); + poll_ready(priv, ECON1, ECON1_TXRTS, 0); + /* ECON2_VRPS was set during initialization */ + nolock_reg_bfset(priv, ECON2, ECON2_PWRSV); + } else { + nolock_reg_bfclr(priv, ECON2, ECON2_PWRSV); + poll_ready(priv, ESTAT, ESTAT_CLKRDY, ESTAT_CLKRDY); + /* caller sets ECON1_RXEN */ + } + mutex_unlock(&priv->lock); +} + +static int enc28j60_hw_init(struct enc28j60_net *priv) +{ + u8 reg; + + if (netif_msg_drv(priv)) + printk(KERN_DEBUG DRV_NAME ": %s() - %s\n", __func__, + priv->full_duplex ? "FullDuplex" : "HalfDuplex"); + + mutex_lock(&priv->lock); + /* first reset the chip */ + enc28j60_soft_reset(priv); + /* Clear ECON1 */ + spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, ECON1, 0x00); + priv->bank = 0; + priv->hw_enable = false; + priv->tx_retry_count = 0; + priv->max_pk_counter = 0; + priv->rxfilter = RXFILTER_NORMAL; + /* enable address auto increment and voltage regulator powersave */ + nolock_regb_write(priv, ECON2, ECON2_AUTOINC | ECON2_VRPS); + + nolock_rxfifo_init(priv, RXSTART_INIT, RXEND_INIT); + nolock_txfifo_init(priv, TXSTART_INIT, TXEND_INIT); + mutex_unlock(&priv->lock); + + /* + * Check the RevID. + * If it's 0x00 or 0xFF probably the enc28j60 is not mounted or + * damaged + */ + reg = locked_regb_read(priv, EREVID); + if (netif_msg_drv(priv)) + printk(KERN_INFO DRV_NAME ": chip RevID: 0x%02x\n", reg); + if (reg == 0x00 || reg == 0xff) { + if (netif_msg_drv(priv)) + printk(KERN_DEBUG DRV_NAME ": %s() Invalid RevId %d\n", + __func__, reg); + return 0; + } + + /* default filter mode: (unicast OR broadcast) AND crc valid */ + locked_regb_write(priv, ERXFCON, + ERXFCON_UCEN | ERXFCON_CRCEN | ERXFCON_BCEN); + + /* enable MAC receive */ + locked_regb_write(priv, MACON1, + MACON1_MARXEN | MACON1_TXPAUS | MACON1_RXPAUS); + /* enable automatic padding and CRC operations */ + if (priv->full_duplex) { + locked_regb_write(priv, MACON3, + MACON3_PADCFG0 | MACON3_TXCRCEN | + MACON3_FRMLNEN | MACON3_FULDPX); + /* set inter-frame gap (non-back-to-back) */ + locked_regb_write(priv, MAIPGL, 0x12); + /* set inter-frame gap (back-to-back) */ + locked_regb_write(priv, MABBIPG, 0x15); + } else { + locked_regb_write(priv, MACON3, + MACON3_PADCFG0 | MACON3_TXCRCEN | + MACON3_FRMLNEN); + locked_regb_write(priv, MACON4, 1 << 6); /* DEFER bit */ + /* set inter-frame gap (non-back-to-back) */ + locked_regw_write(priv, MAIPGL, 0x0C12); + /* set inter-frame gap (back-to-back) */ + locked_regb_write(priv, MABBIPG, 0x12); + } + /* + * MACLCON1 (default) + * MACLCON2 (default) + * Set the maximum packet size which the controller will accept + */ + locked_regw_write(priv, MAMXFLL, MAX_FRAMELEN); + + /* Configure LEDs */ + if (!enc28j60_phy_write(priv, PHLCON, ENC28J60_LAMPS_MODE)) + return 0; + + if (priv->full_duplex) { + if (!enc28j60_phy_write(priv, PHCON1, PHCON1_PDPXMD)) + return 0; + if (!enc28j60_phy_write(priv, PHCON2, 0x00)) + return 0; + } else { + if (!enc28j60_phy_write(priv, PHCON1, 0x00)) + return 0; + if (!enc28j60_phy_write(priv, PHCON2, PHCON2_HDLDIS)) + return 0; + } + if (netif_msg_hw(priv)) + enc28j60_dump_regs(priv, "Hw initialized."); + + return 1; +} + +static void enc28j60_hw_enable(struct enc28j60_net *priv) +{ + /* enable interrupts */ + if (netif_msg_hw(priv)) + printk(KERN_DEBUG DRV_NAME ": %s() enabling interrupts.\n", + __func__); + + enc28j60_phy_write(priv, PHIE, PHIE_PGEIE | PHIE_PLNKIE); + + mutex_lock(&priv->lock); + nolock_reg_bfclr(priv, EIR, EIR_DMAIF | EIR_LINKIF | + EIR_TXIF | EIR_TXERIF | EIR_RXERIF | EIR_PKTIF); + nolock_regb_write(priv, EIE, EIE_INTIE | EIE_PKTIE | EIE_LINKIE | + EIE_TXIE | EIE_TXERIE | EIE_RXERIE); + + /* enable receive logic */ + nolock_reg_bfset(priv, ECON1, ECON1_RXEN); + priv->hw_enable = true; + mutex_unlock(&priv->lock); +} + +static void enc28j60_hw_disable(struct enc28j60_net *priv) +{ + mutex_lock(&priv->lock); + /* disable interrutps and packet reception */ + nolock_regb_write(priv, EIE, 0x00); + nolock_reg_bfclr(priv, ECON1, ECON1_RXEN); + priv->hw_enable = false; + mutex_unlock(&priv->lock); +} + +static int +enc28j60_setlink(struct net_device *ndev, u8 autoneg, u16 speed, u8 duplex) +{ + struct enc28j60_net *priv = netdev_priv(ndev); + int ret = 0; + + if (!priv->hw_enable) { + /* link is in low power mode now; duplex setting + * will take effect on next enc28j60_hw_init(). + */ + if (autoneg == AUTONEG_DISABLE && speed == SPEED_10) + priv->full_duplex = (duplex == DUPLEX_FULL); + else { + if (netif_msg_link(priv)) + dev_warn(&ndev->dev, + "unsupported link setting\n"); + ret = -EOPNOTSUPP; + } + } else { + if (netif_msg_link(priv)) + dev_warn(&ndev->dev, "Warning: hw must be disabled " + "to set link mode\n"); + ret = -EBUSY; + } + return ret; +} + +/* + * Read the Transmit Status Vector + */ +static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE]) +{ + int endptr; + + endptr = locked_regw_read(priv, ETXNDL); + if (netif_msg_hw(priv)) + printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n", + endptr + 1); + enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv); +} + +static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg, + u8 tsv[TSV_SIZE]) +{ + u16 tmp1, tmp2; + + printk(KERN_DEBUG DRV_NAME ": %s - TSV:\n", msg); + tmp1 = tsv[1]; + tmp1 <<= 8; + tmp1 |= tsv[0]; + + tmp2 = tsv[5]; + tmp2 <<= 8; + tmp2 |= tsv[4]; + + printk(KERN_DEBUG DRV_NAME ": ByteCount: %d, CollisionCount: %d," + " TotByteOnWire: %d\n", tmp1, tsv[2] & 0x0f, tmp2); + printk(KERN_DEBUG DRV_NAME ": TxDone: %d, CRCErr:%d, LenChkErr: %d," + " LenOutOfRange: %d\n", TSV_GETBIT(tsv, TSV_TXDONE), + TSV_GETBIT(tsv, TSV_TXCRCERROR), + TSV_GETBIT(tsv, TSV_TXLENCHKERROR), + TSV_GETBIT(tsv, TSV_TXLENOUTOFRANGE)); + printk(KERN_DEBUG DRV_NAME ": Multicast: %d, Broadcast: %d, " + "PacketDefer: %d, ExDefer: %d\n", + TSV_GETBIT(tsv, TSV_TXMULTICAST), + TSV_GETBIT(tsv, TSV_TXBROADCAST), + TSV_GETBIT(tsv, TSV_TXPACKETDEFER), + TSV_GETBIT(tsv, TSV_TXEXDEFER)); + printk(KERN_DEBUG DRV_NAME ": ExCollision: %d, LateCollision: %d, " + "Giant: %d, Underrun: %d\n", + TSV_GETBIT(tsv, TSV_TXEXCOLLISION), + TSV_GETBIT(tsv, TSV_TXLATECOLLISION), + TSV_GETBIT(tsv, TSV_TXGIANT), TSV_GETBIT(tsv, TSV_TXUNDERRUN)); + printk(KERN_DEBUG DRV_NAME ": ControlFrame: %d, PauseFrame: %d, " + "BackPressApp: %d, VLanTagFrame: %d\n", + TSV_GETBIT(tsv, TSV_TXCONTROLFRAME), + TSV_GETBIT(tsv, TSV_TXPAUSEFRAME), + TSV_GETBIT(tsv, TSV_BACKPRESSUREAPP), + TSV_GETBIT(tsv, TSV_TXVLANTAGFRAME)); +} + +/* + * Receive Status vector + */ +static void enc28j60_dump_rsv(struct enc28j60_net *priv, const char *msg, + u16 pk_ptr, int len, u16 sts) +{ + printk(KERN_DEBUG DRV_NAME ": %s - NextPk: 0x%04x - RSV:\n", + msg, pk_ptr); + printk(KERN_DEBUG DRV_NAME ": ByteCount: %d, DribbleNibble: %d\n", len, + RSV_GETBIT(sts, RSV_DRIBBLENIBBLE)); + printk(KERN_DEBUG DRV_NAME ": RxOK: %d, CRCErr:%d, LenChkErr: %d," + " LenOutOfRange: %d\n", RSV_GETBIT(sts, RSV_RXOK), + RSV_GETBIT(sts, RSV_CRCERROR), + RSV_GETBIT(sts, RSV_LENCHECKERR), + RSV_GETBIT(sts, RSV_LENOUTOFRANGE)); + printk(KERN_DEBUG DRV_NAME ": Multicast: %d, Broadcast: %d, " + "LongDropEvent: %d, CarrierEvent: %d\n", + RSV_GETBIT(sts, RSV_RXMULTICAST), + RSV_GETBIT(sts, RSV_RXBROADCAST), + RSV_GETBIT(sts, RSV_RXLONGEVDROPEV), + RSV_GETBIT(sts, RSV_CARRIEREV)); + printk(KERN_DEBUG DRV_NAME ": ControlFrame: %d, PauseFrame: %d," + " UnknownOp: %d, VLanTagFrame: %d\n", + RSV_GETBIT(sts, RSV_RXCONTROLFRAME), + RSV_GETBIT(sts, RSV_RXPAUSEFRAME), + RSV_GETBIT(sts, RSV_RXUNKNOWNOPCODE), + RSV_GETBIT(sts, RSV_RXTYPEVLAN)); +} + +static void dump_packet(const char *msg, int len, const char *data) +{ + printk(KERN_DEBUG DRV_NAME ": %s - packet len:%d\n", msg, len); + print_hex_dump(KERN_DEBUG, "pk data: ", DUMP_PREFIX_OFFSET, 16, 1, + data, len, true); +} + +/* + * Hardware receive function. + * Read the buffer memory, update the FIFO pointer to free the buffer, + * check the status vector and decrement the packet counter. + */ +static void enc28j60_hw_rx(struct net_device *ndev) +{ + struct enc28j60_net *priv = netdev_priv(ndev); + struct sk_buff *skb = NULL; + u16 erxrdpt, next_packet, rxstat; + u8 rsv[RSV_SIZE]; + int len; + + if (netif_msg_rx_status(priv)) + printk(KERN_DEBUG DRV_NAME ": RX pk_addr:0x%04x\n", + priv->next_pk_ptr); + + if (unlikely(priv->next_pk_ptr > RXEND_INIT)) { + if (netif_msg_rx_err(priv)) + dev_err(&ndev->dev, + "%s() Invalid packet address!! 0x%04x\n", + __func__, priv->next_pk_ptr); + /* packet address corrupted: reset RX logic */ + mutex_lock(&priv->lock); + nolock_reg_bfclr(priv, ECON1, ECON1_RXEN); + nolock_reg_bfset(priv, ECON1, ECON1_RXRST); + nolock_reg_bfclr(priv, ECON1, ECON1_RXRST); + nolock_rxfifo_init(priv, RXSTART_INIT, RXEND_INIT); + nolock_reg_bfclr(priv, EIR, EIR_RXERIF); + nolock_reg_bfset(priv, ECON1, ECON1_RXEN); + mutex_unlock(&priv->lock); + ndev->stats.rx_errors++; + return; + } + /* Read next packet pointer and rx status vector */ + enc28j60_mem_read(priv, priv->next_pk_ptr, sizeof(rsv), rsv); + + next_packet = rsv[1]; + next_packet <<= 8; + next_packet |= rsv[0]; + + len = rsv[3]; + len <<= 8; + len |= rsv[2]; + + rxstat = rsv[5]; + rxstat <<= 8; + rxstat |= rsv[4]; + + if (netif_msg_rx_status(priv)) + enc28j60_dump_rsv(priv, __func__, next_packet, len, rxstat); + + if (!RSV_GETBIT(rxstat, RSV_RXOK) || len > MAX_FRAMELEN) { + if (netif_msg_rx_err(priv)) + dev_err(&ndev->dev, "Rx Error (%04x)\n", rxstat); + ndev->stats.rx_errors++; + if (RSV_GETBIT(rxstat, RSV_CRCERROR)) + ndev->stats.rx_crc_errors++; + if (RSV_GETBIT(rxstat, RSV_LENCHECKERR)) + ndev->stats.rx_frame_errors++; + if (len > MAX_FRAMELEN) + ndev->stats.rx_over_errors++; + } else { + skb = netdev_alloc_skb(ndev, len + NET_IP_ALIGN); + if (!skb) { + if (netif_msg_rx_err(priv)) + dev_err(&ndev->dev, + "out of memory for Rx'd frame\n"); + ndev->stats.rx_dropped++; + } else { + skb_reserve(skb, NET_IP_ALIGN); + /* copy the packet from the receive buffer */ + enc28j60_mem_read(priv, + rx_packet_start(priv->next_pk_ptr), + len, skb_put(skb, len)); + if (netif_msg_pktdata(priv)) + dump_packet(__func__, skb->len, skb->data); + skb->protocol = eth_type_trans(skb, ndev); + /* update statistics */ + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += len; + netif_rx_ni(skb); + } + } + /* + * Move the RX read pointer to the start of the next + * received packet. + * This frees the memory we just read out + */ + erxrdpt = erxrdpt_workaround(next_packet, RXSTART_INIT, RXEND_INIT); + if (netif_msg_hw(priv)) + printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT:0x%04x\n", + __func__, erxrdpt); + + mutex_lock(&priv->lock); + nolock_regw_write(priv, ERXRDPTL, erxrdpt); +#ifdef CONFIG_ENC28J60_WRITEVERIFY + if (netif_msg_drv(priv)) { + u16 reg; + reg = nolock_regw_read(priv, ERXRDPTL); + if (reg != erxrdpt) + printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT verify " + "error (0x%04x - 0x%04x)\n", __func__, + reg, erxrdpt); + } +#endif + priv->next_pk_ptr = next_packet; + /* we are done with this packet, decrement the packet counter */ + nolock_reg_bfset(priv, ECON2, ECON2_PKTDEC); + mutex_unlock(&priv->lock); +} + +/* + * Calculate free space in RxFIFO + */ +static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv) +{ + int epkcnt, erxst, erxnd, erxwr, erxrd; + int free_space; + + mutex_lock(&priv->lock); + epkcnt = nolock_regb_read(priv, EPKTCNT); + if (epkcnt >= 255) + free_space = -1; + else { + erxst = nolock_regw_read(priv, ERXSTL); + erxnd = nolock_regw_read(priv, ERXNDL); + erxwr = nolock_regw_read(priv, ERXWRPTL); + erxrd = nolock_regw_read(priv, ERXRDPTL); + + if (erxwr > erxrd) + free_space = (erxnd - erxst) - (erxwr - erxrd); + else if (erxwr == erxrd) + free_space = (erxnd - erxst); + else + free_space = erxrd - erxwr - 1; + } + mutex_unlock(&priv->lock); + if (netif_msg_rx_status(priv)) + printk(KERN_DEBUG DRV_NAME ": %s() free_space = %d\n", + __func__, free_space); + return free_space; +} + +/* + * Access the PHY to determine link status + */ +static void enc28j60_check_link_status(struct net_device *ndev) +{ + struct enc28j60_net *priv = netdev_priv(ndev); + u16 reg; + int duplex; + + reg = enc28j60_phy_read(priv, PHSTAT2); + if (netif_msg_hw(priv)) + printk(KERN_DEBUG DRV_NAME ": %s() PHSTAT1: %04x, " + "PHSTAT2: %04x\n", __func__, + enc28j60_phy_read(priv, PHSTAT1), reg); + duplex = reg & PHSTAT2_DPXSTAT; + + if (reg & PHSTAT2_LSTAT) { + netif_carrier_on(ndev); + if (netif_msg_ifup(priv)) + dev_info(&ndev->dev, "link up - %s\n", + duplex ? "Full duplex" : "Half duplex"); + } else { + if (netif_msg_ifdown(priv)) + dev_info(&ndev->dev, "link down\n"); + netif_carrier_off(ndev); + } +} + +static void enc28j60_tx_clear(struct net_device *ndev, bool err) +{ + struct enc28j60_net *priv = netdev_priv(ndev); + + if (err) + ndev->stats.tx_errors++; + else + ndev->stats.tx_packets++; + + if (priv->tx_skb) { + if (!err) + ndev->stats.tx_bytes += priv->tx_skb->len; + dev_kfree_skb(priv->tx_skb); + priv->tx_skb = NULL; + } + locked_reg_bfclr(priv, ECON1, ECON1_TXRTS); + netif_wake_queue(ndev); +} + +/* + * RX handler + * ignore PKTIF because is unreliable! (look at the errata datasheet) + * check EPKTCNT is the suggested workaround. + * We don't need to clear interrupt flag, automatically done when + * enc28j60_hw_rx() decrements the packet counter. + * Returns how many packet processed. + */ +static int enc28j60_rx_interrupt(struct net_device *ndev) +{ + struct enc28j60_net *priv = netdev_priv(ndev); + int pk_counter, ret; + + pk_counter = locked_regb_read(priv, EPKTCNT); + if (pk_counter && netif_msg_intr(priv)) + printk(KERN_DEBUG DRV_NAME ": intRX, pk_cnt: %d\n", pk_counter); + if (pk_counter > priv->max_pk_counter) { + /* update statistics */ + priv->max_pk_counter = pk_counter; + if (netif_msg_rx_status(priv) && priv->max_pk_counter > 1) + printk(KERN_DEBUG DRV_NAME ": RX max_pk_cnt: %d\n", + priv->max_pk_counter); + } + ret = pk_counter; + while (pk_counter-- > 0) + enc28j60_hw_rx(ndev); + + return ret; +} + +static void enc28j60_irq_work_handler(struct work_struct *work) +{ + struct enc28j60_net *priv = + container_of(work, struct enc28j60_net, irq_work); + struct net_device *ndev = priv->netdev; + int intflags, loop; + + if (netif_msg_intr(priv)) + printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); + /* disable further interrupts */ + locked_reg_bfclr(priv, EIE, EIE_INTIE); + + do { + loop = 0; + intflags = locked_regb_read(priv, EIR); + /* DMA interrupt handler (not currently used) */ + if ((intflags & EIR_DMAIF) != 0) { + loop++; + if (netif_msg_intr(priv)) + printk(KERN_DEBUG DRV_NAME + ": intDMA(%d)\n", loop); + locked_reg_bfclr(priv, EIR, EIR_DMAIF); + } + /* LINK changed handler */ + if ((intflags & EIR_LINKIF) != 0) { + loop++; + if (netif_msg_intr(priv)) + printk(KERN_DEBUG DRV_NAME + ": intLINK(%d)\n", loop); + enc28j60_check_link_status(ndev); + /* read PHIR to clear the flag */ + enc28j60_phy_read(priv, PHIR); + } + /* TX complete handler */ + if ((intflags & EIR_TXIF) != 0) { + bool err = false; + loop++; + if (netif_msg_intr(priv)) + printk(KERN_DEBUG DRV_NAME + ": intTX(%d)\n", loop); + priv->tx_retry_count = 0; + if (locked_regb_read(priv, ESTAT) & ESTAT_TXABRT) { + if (netif_msg_tx_err(priv)) + dev_err(&ndev->dev, + "Tx Error (aborted)\n"); + err = true; + } + if (netif_msg_tx_done(priv)) { + u8 tsv[TSV_SIZE]; + enc28j60_read_tsv(priv, tsv); + enc28j60_dump_tsv(priv, "Tx Done", tsv); + } + enc28j60_tx_clear(ndev, err); + locked_reg_bfclr(priv, EIR, EIR_TXIF); + } + /* TX Error handler */ + if ((intflags & EIR_TXERIF) != 0) { + u8 tsv[TSV_SIZE]; + + loop++; + if (netif_msg_intr(priv)) + printk(KERN_DEBUG DRV_NAME + ": intTXErr(%d)\n", loop); + locked_reg_bfclr(priv, ECON1, ECON1_TXRTS); + enc28j60_read_tsv(priv, tsv); + if (netif_msg_tx_err(priv)) + enc28j60_dump_tsv(priv, "Tx Error", tsv); + /* Reset TX logic */ + mutex_lock(&priv->lock); + nolock_reg_bfset(priv, ECON1, ECON1_TXRST); + nolock_reg_bfclr(priv, ECON1, ECON1_TXRST); + nolock_txfifo_init(priv, TXSTART_INIT, TXEND_INIT); + mutex_unlock(&priv->lock); + /* Transmit Late collision check for retransmit */ + if (TSV_GETBIT(tsv, TSV_TXLATECOLLISION)) { + if (netif_msg_tx_err(priv)) + printk(KERN_DEBUG DRV_NAME + ": LateCollision TXErr (%d)\n", + priv->tx_retry_count); + if (priv->tx_retry_count++ < MAX_TX_RETRYCOUNT) + locked_reg_bfset(priv, ECON1, + ECON1_TXRTS); + else + enc28j60_tx_clear(ndev, true); + } else + enc28j60_tx_clear(ndev, true); + locked_reg_bfclr(priv, EIR, EIR_TXERIF); + } + /* RX Error handler */ + if ((intflags & EIR_RXERIF) != 0) { + loop++; + if (netif_msg_intr(priv)) + printk(KERN_DEBUG DRV_NAME + ": intRXErr(%d)\n", loop); + /* Check free FIFO space to flag RX overrun */ + if (enc28j60_get_free_rxfifo(priv) <= 0) { + if (netif_msg_rx_err(priv)) + printk(KERN_DEBUG DRV_NAME + ": RX Overrun\n"); + ndev->stats.rx_dropped++; + } + locked_reg_bfclr(priv, EIR, EIR_RXERIF); + } + /* RX handler */ + if (enc28j60_rx_interrupt(ndev)) + loop++; + } while (loop); + + /* re-enable interrupts */ + locked_reg_bfset(priv, EIE, EIE_INTIE); + if (netif_msg_intr(priv)) + printk(KERN_DEBUG DRV_NAME ": %s() exit\n", __func__); +} + +/* + * Hardware transmit function. + * Fill the buffer memory and send the contents of the transmit buffer + * onto the network + */ +static void enc28j60_hw_tx(struct enc28j60_net *priv) +{ + if (netif_msg_tx_queued(priv)) + printk(KERN_DEBUG DRV_NAME + ": Tx Packet Len:%d\n", priv->tx_skb->len); + + if (netif_msg_pktdata(priv)) + dump_packet(__func__, + priv->tx_skb->len, priv->tx_skb->data); + enc28j60_packet_write(priv, priv->tx_skb->len, priv->tx_skb->data); + +#ifdef CONFIG_ENC28J60_WRITEVERIFY + /* readback and verify written data */ + if (netif_msg_drv(priv)) { + int test_len, k; + u8 test_buf[64]; /* limit the test to the first 64 bytes */ + int okflag; + + test_len = priv->tx_skb->len; + if (test_len > sizeof(test_buf)) + test_len = sizeof(test_buf); + + /* + 1 to skip control byte */ + enc28j60_mem_read(priv, TXSTART_INIT + 1, test_len, test_buf); + okflag = 1; + for (k = 0; k < test_len; k++) { + if (priv->tx_skb->data[k] != test_buf[k]) { + printk(KERN_DEBUG DRV_NAME + ": Error, %d location differ: " + "0x%02x-0x%02x\n", k, + priv->tx_skb->data[k], test_buf[k]); + okflag = 0; + } + } + if (!okflag) + printk(KERN_DEBUG DRV_NAME ": Tx write buffer, " + "verify ERROR!\n"); + } +#endif + /* set TX request flag */ + locked_reg_bfset(priv, ECON1, ECON1_TXRTS); +} + +static netdev_tx_t enc28j60_send_packet(struct sk_buff *skb, + struct net_device *dev) +{ + struct enc28j60_net *priv = netdev_priv(dev); + + if (netif_msg_tx_queued(priv)) + printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); + + /* If some error occurs while trying to transmit this + * packet, you should return '1' from this function. + * In such a case you _may not_ do anything to the + * SKB, it is still owned by the network queueing + * layer when an error is returned. This means you + * may not modify any SKB fields, you may not free + * the SKB, etc. + */ + netif_stop_queue(dev); + + /* Remember the skb for deferred processing */ + priv->tx_skb = skb; + schedule_work(&priv->tx_work); + + return NETDEV_TX_OK; +} + +static void enc28j60_tx_work_handler(struct work_struct *work) +{ + struct enc28j60_net *priv = + container_of(work, struct enc28j60_net, tx_work); + + /* actual delivery of data */ + enc28j60_hw_tx(priv); +} + +static irqreturn_t enc28j60_irq(int irq, void *dev_id) +{ + struct enc28j60_net *priv = dev_id; + + /* + * Can't do anything in interrupt context because we need to + * block (spi_sync() is blocking) so fire of the interrupt + * handling workqueue. + * Remember that we access enc28j60 registers through SPI bus + * via spi_sync() call. + */ + schedule_work(&priv->irq_work); + + return IRQ_HANDLED; +} + +static void enc28j60_tx_timeout(struct net_device *ndev) +{ + struct enc28j60_net *priv = netdev_priv(ndev); + + if (netif_msg_timer(priv)) + dev_err(&ndev->dev, DRV_NAME " tx timeout\n"); + + ndev->stats.tx_errors++; + /* can't restart safely under softirq */ + schedule_work(&priv->restart_work); +} + +/* + * Open/initialize the board. This is called (in the current kernel) + * sometime after booting when the 'ifconfig' program is run. + * + * This routine should set everything up anew at each open, even + * registers that "should" only need to be set once at boot, so that + * there is non-reboot way to recover if something goes wrong. + */ +static int enc28j60_net_open(struct net_device *dev) +{ + struct enc28j60_net *priv = netdev_priv(dev); + + if (netif_msg_drv(priv)) + printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); + + if (!is_valid_ether_addr(dev->dev_addr)) { + if (netif_msg_ifup(priv)) + dev_err(&dev->dev, "invalid MAC address %pM\n", + dev->dev_addr); + return -EADDRNOTAVAIL; + } + /* Reset the hardware here (and take it out of low power mode) */ + enc28j60_lowpower(priv, false); + enc28j60_hw_disable(priv); + if (!enc28j60_hw_init(priv)) { + if (netif_msg_ifup(priv)) + dev_err(&dev->dev, "hw_reset() failed\n"); + return -EINVAL; + } + /* Update the MAC address (in case user has changed it) */ + enc28j60_set_hw_macaddr(dev); + /* Enable interrupts */ + enc28j60_hw_enable(priv); + /* check link status */ + enc28j60_check_link_status(dev); + /* We are now ready to accept transmit requests from + * the queueing layer of the networking. + */ + netif_start_queue(dev); + + return 0; +} + +/* The inverse routine to net_open(). */ +static int enc28j60_net_close(struct net_device *dev) +{ + struct enc28j60_net *priv = netdev_priv(dev); + + if (netif_msg_drv(priv)) + printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); + + enc28j60_hw_disable(priv); + enc28j60_lowpower(priv, true); + netif_stop_queue(dev); + + return 0; +} + +/* + * Set or clear the multicast filter for this adapter + * num_addrs == -1 Promiscuous mode, receive all packets + * num_addrs == 0 Normal mode, filter out multicast packets + * num_addrs > 0 Multicast mode, receive normal and MC packets + */ +static void enc28j60_set_multicast_list(struct net_device *dev) +{ + struct enc28j60_net *priv = netdev_priv(dev); + int oldfilter = priv->rxfilter; + + if (dev->flags & IFF_PROMISC) { + if (netif_msg_link(priv)) + dev_info(&dev->dev, "promiscuous mode\n"); + priv->rxfilter = RXFILTER_PROMISC; + } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) { + if (netif_msg_link(priv)) + dev_info(&dev->dev, "%smulticast mode\n", + (dev->flags & IFF_ALLMULTI) ? "all-" : ""); + priv->rxfilter = RXFILTER_MULTI; + } else { + if (netif_msg_link(priv)) + dev_info(&dev->dev, "normal mode\n"); + priv->rxfilter = RXFILTER_NORMAL; + } + + if (oldfilter != priv->rxfilter) + schedule_work(&priv->setrx_work); +} + +static void enc28j60_setrx_work_handler(struct work_struct *work) +{ + struct enc28j60_net *priv = + container_of(work, struct enc28j60_net, setrx_work); + + if (priv->rxfilter == RXFILTER_PROMISC) { + if (netif_msg_drv(priv)) + printk(KERN_DEBUG DRV_NAME ": promiscuous mode\n"); + locked_regb_write(priv, ERXFCON, 0x00); + } else if (priv->rxfilter == RXFILTER_MULTI) { + if (netif_msg_drv(priv)) + printk(KERN_DEBUG DRV_NAME ": multicast mode\n"); + locked_regb_write(priv, ERXFCON, + ERXFCON_UCEN | ERXFCON_CRCEN | + ERXFCON_BCEN | ERXFCON_MCEN); + } else { + if (netif_msg_drv(priv)) + printk(KERN_DEBUG DRV_NAME ": normal mode\n"); + locked_regb_write(priv, ERXFCON, + ERXFCON_UCEN | ERXFCON_CRCEN | + ERXFCON_BCEN); + } +} + +static void enc28j60_restart_work_handler(struct work_struct *work) +{ + struct enc28j60_net *priv = + container_of(work, struct enc28j60_net, restart_work); + struct net_device *ndev = priv->netdev; + int ret; + + rtnl_lock(); + if (netif_running(ndev)) { + enc28j60_net_close(ndev); + ret = enc28j60_net_open(ndev); + if (unlikely(ret)) { + dev_info(&ndev->dev, " could not restart %d\n", ret); + dev_close(ndev); + } + } + rtnl_unlock(); +} + +/* ......................... ETHTOOL SUPPORT ........................... */ + +static void +enc28j60_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, + dev_name(dev->dev.parent), sizeof(info->bus_info)); +} + +static int +enc28j60_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct enc28j60_net *priv = netdev_priv(dev); + + cmd->transceiver = XCVR_INTERNAL; + cmd->supported = SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full + | SUPPORTED_TP; + ethtool_cmd_speed_set(cmd, SPEED_10); + cmd->duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; + cmd->port = PORT_TP; + cmd->autoneg = AUTONEG_DISABLE; + + return 0; +} + +static int +enc28j60_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + return enc28j60_setlink(dev, cmd->autoneg, + ethtool_cmd_speed(cmd), cmd->duplex); +} + +static u32 enc28j60_get_msglevel(struct net_device *dev) +{ + struct enc28j60_net *priv = netdev_priv(dev); + return priv->msg_enable; +} + +static void enc28j60_set_msglevel(struct net_device *dev, u32 val) +{ + struct enc28j60_net *priv = netdev_priv(dev); + priv->msg_enable = val; +} + +static const struct ethtool_ops enc28j60_ethtool_ops = { + .get_settings = enc28j60_get_settings, + .set_settings = enc28j60_set_settings, + .get_drvinfo = enc28j60_get_drvinfo, + .get_msglevel = enc28j60_get_msglevel, + .set_msglevel = enc28j60_set_msglevel, +}; + +static int enc28j60_chipset_init(struct net_device *dev) +{ + struct enc28j60_net *priv = netdev_priv(dev); + + return enc28j60_hw_init(priv); +} + +static const struct net_device_ops enc28j60_netdev_ops = { + .ndo_open = enc28j60_net_open, + .ndo_stop = enc28j60_net_close, + .ndo_start_xmit = enc28j60_send_packet, + .ndo_set_rx_mode = enc28j60_set_multicast_list, + .ndo_set_mac_address = enc28j60_set_mac_address, + .ndo_tx_timeout = enc28j60_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + +static int enc28j60_probe(struct spi_device *spi) +{ + struct net_device *dev; + struct enc28j60_net *priv; + int ret = 0; + + if (netif_msg_drv(&debug)) + dev_info(&spi->dev, DRV_NAME " Ethernet driver %s loaded\n", + DRV_VERSION); + + dev = alloc_etherdev(sizeof(struct enc28j60_net)); + if (!dev) { + ret = -ENOMEM; + goto error_alloc; + } + priv = netdev_priv(dev); + + priv->netdev = dev; /* priv to netdev reference */ + priv->spi = spi; /* priv to spi reference */ + priv->msg_enable = netif_msg_init(debug.msg_enable, + ENC28J60_MSG_DEFAULT); + mutex_init(&priv->lock); + INIT_WORK(&priv->tx_work, enc28j60_tx_work_handler); + INIT_WORK(&priv->setrx_work, enc28j60_setrx_work_handler); + INIT_WORK(&priv->irq_work, enc28j60_irq_work_handler); + INIT_WORK(&priv->restart_work, enc28j60_restart_work_handler); + spi_set_drvdata(spi, priv); /* spi to priv reference */ + SET_NETDEV_DEV(dev, &spi->dev); + + if (!enc28j60_chipset_init(dev)) { + if (netif_msg_probe(priv)) + dev_info(&spi->dev, DRV_NAME " chip not found\n"); + ret = -EIO; + goto error_irq; + } + eth_hw_addr_random(dev); + enc28j60_set_hw_macaddr(dev); + + /* Board setup must set the relevant edge trigger type; + * level triggers won't currently work. + */ + ret = request_irq(spi->irq, enc28j60_irq, 0, DRV_NAME, priv); + if (ret < 0) { + if (netif_msg_probe(priv)) + dev_err(&spi->dev, DRV_NAME ": request irq %d failed " + "(ret = %d)\n", spi->irq, ret); + goto error_irq; + } + + dev->if_port = IF_PORT_10BASET; + dev->irq = spi->irq; + dev->netdev_ops = &enc28j60_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + dev->ethtool_ops = &enc28j60_ethtool_ops; + + enc28j60_lowpower(priv, true); + + ret = register_netdev(dev); + if (ret) { + if (netif_msg_probe(priv)) + dev_err(&spi->dev, "register netdev " DRV_NAME + " failed (ret = %d)\n", ret); + goto error_register; + } + dev_info(&dev->dev, DRV_NAME " driver registered\n"); + + return 0; + +error_register: + free_irq(spi->irq, priv); +error_irq: + free_netdev(dev); +error_alloc: + return ret; +} + +static int enc28j60_remove(struct spi_device *spi) +{ + struct enc28j60_net *priv = spi_get_drvdata(spi); + + if (netif_msg_drv(priv)) + printk(KERN_DEBUG DRV_NAME ": remove\n"); + + unregister_netdev(priv->netdev); + free_irq(spi->irq, priv); + free_netdev(priv->netdev); + + return 0; +} + +static struct spi_driver enc28j60_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, + .probe = enc28j60_probe, + .remove = enc28j60_remove, +}; + +static int __init enc28j60_init(void) +{ + msec20_to_jiffies = msecs_to_jiffies(20); + + return spi_register_driver(&enc28j60_driver); +} + +module_init(enc28j60_init); + +static void __exit enc28j60_exit(void) +{ + spi_unregister_driver(&enc28j60_driver); +} + +module_exit(enc28j60_exit); + +MODULE_DESCRIPTION(DRV_NAME " ethernet driver"); +MODULE_AUTHOR("Claudio Lanconelli "); +MODULE_LICENSE("GPL"); +module_param_named(debug, debug.msg_enable, int, 0); +MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., ffff=all)"); +MODULE_ALIAS("spi:" DRV_NAME); diff --git a/drivers/net/ethernet/microchip/enc28j60_hw.h b/drivers/net/ethernet/microchip/enc28j60_hw.h new file mode 100644 index 000000000..25b41de49 --- /dev/null +++ b/drivers/net/ethernet/microchip/enc28j60_hw.h @@ -0,0 +1,309 @@ +/* + * enc28j60_hw.h: EDTP FrameThrower style enc28j60 registers + * + * $Id: enc28j60_hw.h,v 1.9 2007/12/14 11:59:16 claudio Exp $ + */ + +#ifndef _ENC28J60_HW_H +#define _ENC28J60_HW_H + +/* + * ENC28J60 Control Registers + * Control register definitions are a combination of address, + * bank number, and Ethernet/MAC/PHY indicator bits. + * - Register address (bits 0-4) + * - Bank number (bits 5-6) + * - MAC/MII indicator (bit 7) + */ +#define ADDR_MASK 0x1F +#define BANK_MASK 0x60 +#define SPRD_MASK 0x80 +/* All-bank registers */ +#define EIE 0x1B +#define EIR 0x1C +#define ESTAT 0x1D +#define ECON2 0x1E +#define ECON1 0x1F +/* Bank 0 registers */ +#define ERDPTL (0x00|0x00) +#define ERDPTH (0x01|0x00) +#define EWRPTL (0x02|0x00) +#define EWRPTH (0x03|0x00) +#define ETXSTL (0x04|0x00) +#define ETXSTH (0x05|0x00) +#define ETXNDL (0x06|0x00) +#define ETXNDH (0x07|0x00) +#define ERXSTL (0x08|0x00) +#define ERXSTH (0x09|0x00) +#define ERXNDL (0x0A|0x00) +#define ERXNDH (0x0B|0x00) +#define ERXRDPTL (0x0C|0x00) +#define ERXRDPTH (0x0D|0x00) +#define ERXWRPTL (0x0E|0x00) +#define ERXWRPTH (0x0F|0x00) +#define EDMASTL (0x10|0x00) +#define EDMASTH (0x11|0x00) +#define EDMANDL (0x12|0x00) +#define EDMANDH (0x13|0x00) +#define EDMADSTL (0x14|0x00) +#define EDMADSTH (0x15|0x00) +#define EDMACSL (0x16|0x00) +#define EDMACSH (0x17|0x00) +/* Bank 1 registers */ +#define EHT0 (0x00|0x20) +#define EHT1 (0x01|0x20) +#define EHT2 (0x02|0x20) +#define EHT3 (0x03|0x20) +#define EHT4 (0x04|0x20) +#define EHT5 (0x05|0x20) +#define EHT6 (0x06|0x20) +#define EHT7 (0x07|0x20) +#define EPMM0 (0x08|0x20) +#define EPMM1 (0x09|0x20) +#define EPMM2 (0x0A|0x20) +#define EPMM3 (0x0B|0x20) +#define EPMM4 (0x0C|0x20) +#define EPMM5 (0x0D|0x20) +#define EPMM6 (0x0E|0x20) +#define EPMM7 (0x0F|0x20) +#define EPMCSL (0x10|0x20) +#define EPMCSH (0x11|0x20) +#define EPMOL (0x14|0x20) +#define EPMOH (0x15|0x20) +#define EWOLIE (0x16|0x20) +#define EWOLIR (0x17|0x20) +#define ERXFCON (0x18|0x20) +#define EPKTCNT (0x19|0x20) +/* Bank 2 registers */ +#define MACON1 (0x00|0x40|SPRD_MASK) +/* #define MACON2 (0x01|0x40|SPRD_MASK) */ +#define MACON3 (0x02|0x40|SPRD_MASK) +#define MACON4 (0x03|0x40|SPRD_MASK) +#define MABBIPG (0x04|0x40|SPRD_MASK) +#define MAIPGL (0x06|0x40|SPRD_MASK) +#define MAIPGH (0x07|0x40|SPRD_MASK) +#define MACLCON1 (0x08|0x40|SPRD_MASK) +#define MACLCON2 (0x09|0x40|SPRD_MASK) +#define MAMXFLL (0x0A|0x40|SPRD_MASK) +#define MAMXFLH (0x0B|0x40|SPRD_MASK) +#define MAPHSUP (0x0D|0x40|SPRD_MASK) +#define MICON (0x11|0x40|SPRD_MASK) +#define MICMD (0x12|0x40|SPRD_MASK) +#define MIREGADR (0x14|0x40|SPRD_MASK) +#define MIWRL (0x16|0x40|SPRD_MASK) +#define MIWRH (0x17|0x40|SPRD_MASK) +#define MIRDL (0x18|0x40|SPRD_MASK) +#define MIRDH (0x19|0x40|SPRD_MASK) +/* Bank 3 registers */ +#define MAADR1 (0x00|0x60|SPRD_MASK) +#define MAADR0 (0x01|0x60|SPRD_MASK) +#define MAADR3 (0x02|0x60|SPRD_MASK) +#define MAADR2 (0x03|0x60|SPRD_MASK) +#define MAADR5 (0x04|0x60|SPRD_MASK) +#define MAADR4 (0x05|0x60|SPRD_MASK) +#define EBSTSD (0x06|0x60) +#define EBSTCON (0x07|0x60) +#define EBSTCSL (0x08|0x60) +#define EBSTCSH (0x09|0x60) +#define MISTAT (0x0A|0x60|SPRD_MASK) +#define EREVID (0x12|0x60) +#define ECOCON (0x15|0x60) +#define EFLOCON (0x17|0x60) +#define EPAUSL (0x18|0x60) +#define EPAUSH (0x19|0x60) +/* PHY registers */ +#define PHCON1 0x00 +#define PHSTAT1 0x01 +#define PHHID1 0x02 +#define PHHID2 0x03 +#define PHCON2 0x10 +#define PHSTAT2 0x11 +#define PHIE 0x12 +#define PHIR 0x13 +#define PHLCON 0x14 + +/* ENC28J60 EIE Register Bit Definitions */ +#define EIE_INTIE 0x80 +#define EIE_PKTIE 0x40 +#define EIE_DMAIE 0x20 +#define EIE_LINKIE 0x10 +#define EIE_TXIE 0x08 +/* #define EIE_WOLIE 0x04 (reserved) */ +#define EIE_TXERIE 0x02 +#define EIE_RXERIE 0x01 +/* ENC28J60 EIR Register Bit Definitions */ +#define EIR_PKTIF 0x40 +#define EIR_DMAIF 0x20 +#define EIR_LINKIF 0x10 +#define EIR_TXIF 0x08 +/* #define EIR_WOLIF 0x04 (reserved) */ +#define EIR_TXERIF 0x02 +#define EIR_RXERIF 0x01 +/* ENC28J60 ESTAT Register Bit Definitions */ +#define ESTAT_INT 0x80 +#define ESTAT_LATECOL 0x10 +#define ESTAT_RXBUSY 0x04 +#define ESTAT_TXABRT 0x02 +#define ESTAT_CLKRDY 0x01 +/* ENC28J60 ECON2 Register Bit Definitions */ +#define ECON2_AUTOINC 0x80 +#define ECON2_PKTDEC 0x40 +#define ECON2_PWRSV 0x20 +#define ECON2_VRPS 0x08 +/* ENC28J60 ECON1 Register Bit Definitions */ +#define ECON1_TXRST 0x80 +#define ECON1_RXRST 0x40 +#define ECON1_DMAST 0x20 +#define ECON1_CSUMEN 0x10 +#define ECON1_TXRTS 0x08 +#define ECON1_RXEN 0x04 +#define ECON1_BSEL1 0x02 +#define ECON1_BSEL0 0x01 +/* ENC28J60 MACON1 Register Bit Definitions */ +#define MACON1_LOOPBK 0x10 +#define MACON1_TXPAUS 0x08 +#define MACON1_RXPAUS 0x04 +#define MACON1_PASSALL 0x02 +#define MACON1_MARXEN 0x01 +/* ENC28J60 MACON2 Register Bit Definitions */ +#define MACON2_MARST 0x80 +#define MACON2_RNDRST 0x40 +#define MACON2_MARXRST 0x08 +#define MACON2_RFUNRST 0x04 +#define MACON2_MATXRST 0x02 +#define MACON2_TFUNRST 0x01 +/* ENC28J60 MACON3 Register Bit Definitions */ +#define MACON3_PADCFG2 0x80 +#define MACON3_PADCFG1 0x40 +#define MACON3_PADCFG0 0x20 +#define MACON3_TXCRCEN 0x10 +#define MACON3_PHDRLEN 0x08 +#define MACON3_HFRMLEN 0x04 +#define MACON3_FRMLNEN 0x02 +#define MACON3_FULDPX 0x01 +/* ENC28J60 MICMD Register Bit Definitions */ +#define MICMD_MIISCAN 0x02 +#define MICMD_MIIRD 0x01 +/* ENC28J60 MISTAT Register Bit Definitions */ +#define MISTAT_NVALID 0x04 +#define MISTAT_SCAN 0x02 +#define MISTAT_BUSY 0x01 +/* ENC28J60 ERXFCON Register Bit Definitions */ +#define ERXFCON_UCEN 0x80 +#define ERXFCON_ANDOR 0x40 +#define ERXFCON_CRCEN 0x20 +#define ERXFCON_PMEN 0x10 +#define ERXFCON_MPEN 0x08 +#define ERXFCON_HTEN 0x04 +#define ERXFCON_MCEN 0x02 +#define ERXFCON_BCEN 0x01 + +/* ENC28J60 PHY PHCON1 Register Bit Definitions */ +#define PHCON1_PRST 0x8000 +#define PHCON1_PLOOPBK 0x4000 +#define PHCON1_PPWRSV 0x0800 +#define PHCON1_PDPXMD 0x0100 +/* ENC28J60 PHY PHSTAT1 Register Bit Definitions */ +#define PHSTAT1_PFDPX 0x1000 +#define PHSTAT1_PHDPX 0x0800 +#define PHSTAT1_LLSTAT 0x0004 +#define PHSTAT1_JBSTAT 0x0002 +/* ENC28J60 PHY PHSTAT2 Register Bit Definitions */ +#define PHSTAT2_TXSTAT (1 << 13) +#define PHSTAT2_RXSTAT (1 << 12) +#define PHSTAT2_COLSTAT (1 << 11) +#define PHSTAT2_LSTAT (1 << 10) +#define PHSTAT2_DPXSTAT (1 << 9) +#define PHSTAT2_PLRITY (1 << 5) +/* ENC28J60 PHY PHCON2 Register Bit Definitions */ +#define PHCON2_FRCLINK 0x4000 +#define PHCON2_TXDIS 0x2000 +#define PHCON2_JABBER 0x0400 +#define PHCON2_HDLDIS 0x0100 +/* ENC28J60 PHY PHIE Register Bit Definitions */ +#define PHIE_PLNKIE (1 << 4) +#define PHIE_PGEIE (1 << 1) +/* ENC28J60 PHY PHIR Register Bit Definitions */ +#define PHIR_PLNKIF (1 << 4) +#define PHIR_PGEIF (1 << 1) + +/* ENC28J60 Packet Control Byte Bit Definitions */ +#define PKTCTRL_PHUGEEN 0x08 +#define PKTCTRL_PPADEN 0x04 +#define PKTCTRL_PCRCEN 0x02 +#define PKTCTRL_POVERRIDE 0x01 + +/* ENC28J60 Transmit Status Vector */ +#define TSV_TXBYTECNT 0 +#define TSV_TXCOLLISIONCNT 16 +#define TSV_TXCRCERROR 20 +#define TSV_TXLENCHKERROR 21 +#define TSV_TXLENOUTOFRANGE 22 +#define TSV_TXDONE 23 +#define TSV_TXMULTICAST 24 +#define TSV_TXBROADCAST 25 +#define TSV_TXPACKETDEFER 26 +#define TSV_TXEXDEFER 27 +#define TSV_TXEXCOLLISION 28 +#define TSV_TXLATECOLLISION 29 +#define TSV_TXGIANT 30 +#define TSV_TXUNDERRUN 31 +#define TSV_TOTBYTETXONWIRE 32 +#define TSV_TXCONTROLFRAME 48 +#define TSV_TXPAUSEFRAME 49 +#define TSV_BACKPRESSUREAPP 50 +#define TSV_TXVLANTAGFRAME 51 + +#define TSV_SIZE 7 +#define TSV_BYTEOF(x) ((x) / 8) +#define TSV_BITMASK(x) (1 << ((x) % 8)) +#define TSV_GETBIT(x, y) (((x)[TSV_BYTEOF(y)] & TSV_BITMASK(y)) ? 1 : 0) + +/* ENC28J60 Receive Status Vector */ +#define RSV_RXLONGEVDROPEV 16 +#define RSV_CARRIEREV 18 +#define RSV_CRCERROR 20 +#define RSV_LENCHECKERR 21 +#define RSV_LENOUTOFRANGE 22 +#define RSV_RXOK 23 +#define RSV_RXMULTICAST 24 +#define RSV_RXBROADCAST 25 +#define RSV_DRIBBLENIBBLE 26 +#define RSV_RXCONTROLFRAME 27 +#define RSV_RXPAUSEFRAME 28 +#define RSV_RXUNKNOWNOPCODE 29 +#define RSV_RXTYPEVLAN 30 + +#define RSV_SIZE 6 +#define RSV_BITMASK(x) (1 << ((x) - 16)) +#define RSV_GETBIT(x, y) (((x) & RSV_BITMASK(y)) ? 1 : 0) + + +/* SPI operation codes */ +#define ENC28J60_READ_CTRL_REG 0x00 +#define ENC28J60_READ_BUF_MEM 0x3A +#define ENC28J60_WRITE_CTRL_REG 0x40 +#define ENC28J60_WRITE_BUF_MEM 0x7A +#define ENC28J60_BIT_FIELD_SET 0x80 +#define ENC28J60_BIT_FIELD_CLR 0xA0 +#define ENC28J60_SOFT_RESET 0xFF + + +/* buffer boundaries applied to internal 8K ram + * entire available packet buffer space is allocated. + * Give TX buffer space for one full ethernet frame (~1500 bytes) + * receive buffer gets the rest */ +#define TXSTART_INIT 0x1A00 +#define TXEND_INIT 0x1FFF + +/* Put RX buffer at 0 as suggested by the Errata datasheet */ +#define RXSTART_INIT 0x0000 +#define RXEND_INIT 0x19FF + +/* maximum ethernet frame length */ +#define MAX_FRAMELEN 1518 + +/* Preferred half duplex: LEDA: Link status LEDB: Rx/Tx activity */ +#define ENC28J60_LAMPS_MODE 0x3476 + +#endif diff --git a/drivers/net/ethernet/moxa/Kconfig b/drivers/net/ethernet/moxa/Kconfig new file mode 100644 index 000000000..1731e050f --- /dev/null +++ b/drivers/net/ethernet/moxa/Kconfig @@ -0,0 +1,30 @@ +# +# MOXART device configuration +# + +config NET_VENDOR_MOXART + bool "MOXA ART devices" + default y + depends on (ARM && ARCH_MOXART) + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about MOXA ART devices. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_MOXART + +config ARM_MOXART_ETHER + tristate "MOXART Ethernet support" + depends on ARM && ARCH_MOXART + select NET_CORE + ---help--- + If you wish to compile a kernel for a hardware with MOXA ART SoC and + want to use the internal ethernet then you should answer Y to this. + + +endif # NET_VENDOR_MOXART diff --git a/drivers/net/ethernet/moxa/Makefile b/drivers/net/ethernet/moxa/Makefile new file mode 100644 index 000000000..aa3c73e9e --- /dev/null +++ b/drivers/net/ethernet/moxa/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the MOXART network device drivers. +# + +obj-$(CONFIG_ARM_MOXART_ETHER) += moxart_ether.o diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c new file mode 100644 index 000000000..81d0f1c86 --- /dev/null +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -0,0 +1,569 @@ +/* MOXA ART Ethernet (RTL8201CP) driver. + * + * Copyright (C) 2013 Jonas Jensen + * + * Jonas Jensen + * + * Based on code from + * Moxa Technology Co., Ltd. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "moxart_ether.h" + +static inline void moxart_emac_write(struct net_device *ndev, + unsigned int reg, unsigned long value) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + + writel(value, priv->base + reg); +} + +static void moxart_update_mac_address(struct net_device *ndev) +{ + moxart_emac_write(ndev, REG_MAC_MS_ADDRESS, + ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]))); + moxart_emac_write(ndev, REG_MAC_MS_ADDRESS + 4, + ((ndev->dev_addr[2] << 24) | + (ndev->dev_addr[3] << 16) | + (ndev->dev_addr[4] << 8) | + (ndev->dev_addr[5]))); +} + +static int moxart_set_mac_address(struct net_device *ndev, void *addr) +{ + struct sockaddr *address = addr; + + if (!is_valid_ether_addr(address->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len); + moxart_update_mac_address(ndev); + + return 0; +} + +static void moxart_mac_free_memory(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + int i; + + for (i = 0; i < RX_DESC_NUM; i++) + dma_unmap_single(&ndev->dev, priv->rx_mapping[i], + priv->rx_buf_size, DMA_FROM_DEVICE); + + if (priv->tx_desc_base) + dma_free_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM, + priv->tx_desc_base, priv->tx_base); + + if (priv->rx_desc_base) + dma_free_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM, + priv->rx_desc_base, priv->rx_base); + + kfree(priv->tx_buf_base); + kfree(priv->rx_buf_base); +} + +static void moxart_mac_reset(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + + writel(SW_RST, priv->base + REG_MAC_CTRL); + while (readl(priv->base + REG_MAC_CTRL) & SW_RST) + mdelay(10); + + writel(0, priv->base + REG_INTERRUPT_MASK); + + priv->reg_maccr = RX_BROADPKT | FULLDUP | CRC_APD | RX_FTL; +} + +static void moxart_mac_enable(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + + writel(0x00001010, priv->base + REG_INT_TIMER_CTRL); + writel(0x00000001, priv->base + REG_APOLL_TIMER_CTRL); + writel(0x00000390, priv->base + REG_DMA_BLEN_CTRL); + + priv->reg_imr |= (RPKT_FINISH_M | XPKT_FINISH_M); + writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK); + + priv->reg_maccr |= (RCV_EN | XMT_EN | RDMA_EN | XDMA_EN); + writel(priv->reg_maccr, priv->base + REG_MAC_CTRL); +} + +static void moxart_mac_setup_desc_ring(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + void __iomem *desc; + int i; + + for (i = 0; i < TX_DESC_NUM; i++) { + desc = priv->tx_desc_base + i * TX_REG_DESC_SIZE; + memset(desc, 0, TX_REG_DESC_SIZE); + + priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i; + } + writel(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1); + + priv->tx_head = 0; + priv->tx_tail = 0; + + for (i = 0; i < RX_DESC_NUM; i++) { + desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE; + memset(desc, 0, RX_REG_DESC_SIZE); + writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); + writel(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK, + desc + RX_REG_OFFSET_DESC1); + + priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i; + priv->rx_mapping[i] = dma_map_single(&ndev->dev, + priv->rx_buf[i], + priv->rx_buf_size, + DMA_FROM_DEVICE); + if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i])) + netdev_err(ndev, "DMA mapping error\n"); + + writel(priv->rx_mapping[i], + desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS); + writel(priv->rx_buf[i], + desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT); + } + writel(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1); + + priv->rx_head = 0; + + /* reset the MAC controller TX/RX desciptor base address */ + writel(priv->tx_base, priv->base + REG_TXR_BASE_ADDRESS); + writel(priv->rx_base, priv->base + REG_RXR_BASE_ADDRESS); +} + +static int moxart_mac_open(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + + if (!is_valid_ether_addr(ndev->dev_addr)) + return -EADDRNOTAVAIL; + + napi_enable(&priv->napi); + + moxart_mac_reset(ndev); + moxart_update_mac_address(ndev); + moxart_mac_setup_desc_ring(ndev); + moxart_mac_enable(ndev); + netif_start_queue(ndev); + + netdev_dbg(ndev, "%s: IMR=0x%x, MACCR=0x%x\n", + __func__, readl(priv->base + REG_INTERRUPT_MASK), + readl(priv->base + REG_MAC_CTRL)); + + return 0; +} + +static int moxart_mac_stop(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + + napi_disable(&priv->napi); + + netif_stop_queue(ndev); + + /* disable all interrupts */ + writel(0, priv->base + REG_INTERRUPT_MASK); + + /* disable all functions */ + writel(0, priv->base + REG_MAC_CTRL); + + return 0; +} + +static int moxart_rx_poll(struct napi_struct *napi, int budget) +{ + struct moxart_mac_priv_t *priv = container_of(napi, + struct moxart_mac_priv_t, + napi); + struct net_device *ndev = priv->ndev; + struct sk_buff *skb; + void __iomem *desc; + unsigned int desc0, len; + int rx_head = priv->rx_head; + int rx = 0; + + while (rx < budget) { + desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head); + desc0 = readl(desc + RX_REG_OFFSET_DESC0); + + if (desc0 & RX_DESC0_DMA_OWN) + break; + + if (desc0 & (RX_DESC0_ERR | RX_DESC0_CRC_ERR | RX_DESC0_FTL | + RX_DESC0_RUNT | RX_DESC0_ODD_NB)) { + net_dbg_ratelimited("packet error\n"); + priv->stats.rx_dropped++; + priv->stats.rx_errors++; + goto rx_next; + } + + len = desc0 & RX_DESC0_FRAME_LEN_MASK; + + if (len > RX_BUF_SIZE) + len = RX_BUF_SIZE; + + dma_sync_single_for_cpu(&ndev->dev, + priv->rx_mapping[rx_head], + priv->rx_buf_size, DMA_FROM_DEVICE); + skb = netdev_alloc_skb_ip_align(ndev, len); + + if (unlikely(!skb)) { + net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n"); + priv->stats.rx_dropped++; + priv->stats.rx_errors++; + goto rx_next; + } + + memcpy(skb->data, priv->rx_buf[rx_head], len); + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, ndev); + napi_gro_receive(&priv->napi, skb); + rx++; + + ndev->last_rx = jiffies; + priv->stats.rx_packets++; + priv->stats.rx_bytes += len; + if (desc0 & RX_DESC0_MULTICAST) + priv->stats.multicast++; + +rx_next: + writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); + + rx_head = RX_NEXT(rx_head); + priv->rx_head = rx_head; + } + + if (rx < budget) { + napi_complete(napi); + } + + priv->reg_imr |= RPKT_FINISH_M; + writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK); + + return rx; +} + +static void moxart_tx_finished(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + unsigned tx_head = priv->tx_head; + unsigned tx_tail = priv->tx_tail; + + while (tx_tail != tx_head) { + dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail], + priv->tx_len[tx_tail], DMA_TO_DEVICE); + + priv->stats.tx_packets++; + priv->stats.tx_bytes += priv->tx_skb[tx_tail]->len; + + dev_kfree_skb_irq(priv->tx_skb[tx_tail]); + priv->tx_skb[tx_tail] = NULL; + + tx_tail = TX_NEXT(tx_tail); + } + priv->tx_tail = tx_tail; +} + +static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = (struct net_device *) dev_id; + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + unsigned int ists = readl(priv->base + REG_INTERRUPT_STATUS); + + if (ists & XPKT_OK_INT_STS) + moxart_tx_finished(ndev); + + if (ists & RPKT_FINISH) { + if (napi_schedule_prep(&priv->napi)) { + priv->reg_imr &= ~RPKT_FINISH_M; + writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK); + __napi_schedule(&priv->napi); + } + } + + return IRQ_HANDLED; +} + +static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + void __iomem *desc; + unsigned int len; + unsigned int tx_head = priv->tx_head; + u32 txdes1; + int ret = NETDEV_TX_BUSY; + + desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head); + + spin_lock_irq(&priv->txlock); + if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) { + net_dbg_ratelimited("no TX space for packet\n"); + priv->stats.tx_dropped++; + goto out_unlock; + } + + len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len; + + priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data, + len, DMA_TO_DEVICE); + if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) { + netdev_err(ndev, "DMA mapping error\n"); + goto out_unlock; + } + + priv->tx_len[tx_head] = len; + priv->tx_skb[tx_head] = skb; + + writel(priv->tx_mapping[tx_head], + desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS); + writel(skb->data, + desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT); + + if (skb->len < ETH_ZLEN) { + memset(&skb->data[skb->len], + 0, ETH_ZLEN - skb->len); + len = ETH_ZLEN; + } + + dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head], + priv->tx_buf_size, DMA_TO_DEVICE); + + txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK); + if (tx_head == TX_DESC_NUM_MASK) + txdes1 |= TX_DESC1_END; + writel(txdes1, desc + TX_REG_OFFSET_DESC1); + writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0); + + /* start to send packet */ + writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND); + + priv->tx_head = TX_NEXT(tx_head); + + ndev->trans_start = jiffies; + ret = NETDEV_TX_OK; +out_unlock: + spin_unlock_irq(&priv->txlock); + + return ret; +} + +static struct net_device_stats *moxart_mac_get_stats(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + + return &priv->stats; +} + +static void moxart_mac_setmulticast(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + struct netdev_hw_addr *ha; + int crc_val; + + netdev_for_each_mc_addr(ha, ndev) { + crc_val = crc32_le(~0, ha->addr, ETH_ALEN); + crc_val = (crc_val >> 26) & 0x3f; + if (crc_val >= 32) { + writel(readl(priv->base + REG_MCAST_HASH_TABLE1) | + (1UL << (crc_val - 32)), + priv->base + REG_MCAST_HASH_TABLE1); + } else { + writel(readl(priv->base + REG_MCAST_HASH_TABLE0) | + (1UL << crc_val), + priv->base + REG_MCAST_HASH_TABLE0); + } + } +} + +static void moxart_mac_set_rx_mode(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + + spin_lock_irq(&priv->txlock); + + (ndev->flags & IFF_PROMISC) ? (priv->reg_maccr |= RCV_ALL) : + (priv->reg_maccr &= ~RCV_ALL); + + (ndev->flags & IFF_ALLMULTI) ? (priv->reg_maccr |= RX_MULTIPKT) : + (priv->reg_maccr &= ~RX_MULTIPKT); + + if ((ndev->flags & IFF_MULTICAST) && netdev_mc_count(ndev)) { + priv->reg_maccr |= HT_MULTI_EN; + moxart_mac_setmulticast(ndev); + } else { + priv->reg_maccr &= ~HT_MULTI_EN; + } + + writel(priv->reg_maccr, priv->base + REG_MAC_CTRL); + + spin_unlock_irq(&priv->txlock); +} + +static struct net_device_ops moxart_netdev_ops = { + .ndo_open = moxart_mac_open, + .ndo_stop = moxart_mac_stop, + .ndo_start_xmit = moxart_mac_start_xmit, + .ndo_get_stats = moxart_mac_get_stats, + .ndo_set_rx_mode = moxart_mac_set_rx_mode, + .ndo_set_mac_address = moxart_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static int moxart_mac_probe(struct platform_device *pdev) +{ + struct device *p_dev = &pdev->dev; + struct device_node *node = p_dev->of_node; + struct net_device *ndev; + struct moxart_mac_priv_t *priv; + struct resource *res; + unsigned int irq; + int ret; + + ndev = alloc_etherdev(sizeof(struct moxart_mac_priv_t)); + if (!ndev) + return -ENOMEM; + + irq = irq_of_parse_and_map(node, 0); + if (irq <= 0) { + netdev_err(ndev, "irq_of_parse_and_map failed\n"); + ret = -EINVAL; + goto irq_map_fail; + } + + priv = netdev_priv(ndev); + priv->ndev = ndev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ndev->base_addr = res->start; + priv->base = devm_ioremap_resource(p_dev, res); + ret = IS_ERR(priv->base); + if (ret) { + dev_err(p_dev, "devm_ioremap_resource failed\n"); + goto init_fail; + } + + spin_lock_init(&priv->txlock); + + priv->tx_buf_size = TX_BUF_SIZE; + priv->rx_buf_size = RX_BUF_SIZE; + + priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * + TX_DESC_NUM, &priv->tx_base, + GFP_DMA | GFP_KERNEL); + if (priv->tx_desc_base == NULL) { + ret = -ENOMEM; + goto init_fail; + } + + priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE * + RX_DESC_NUM, &priv->rx_base, + GFP_DMA | GFP_KERNEL); + if (priv->rx_desc_base == NULL) { + ret = -ENOMEM; + goto init_fail; + } + + priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM, + GFP_ATOMIC); + if (!priv->tx_buf_base) { + ret = -ENOMEM; + goto init_fail; + } + + priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM, + GFP_ATOMIC); + if (!priv->rx_buf_base) { + ret = -ENOMEM; + goto init_fail; + } + + platform_set_drvdata(pdev, ndev); + + ret = devm_request_irq(p_dev, irq, moxart_mac_interrupt, 0, + pdev->name, ndev); + if (ret) { + netdev_err(ndev, "devm_request_irq failed\n"); + goto init_fail; + } + + ndev->netdev_ops = &moxart_netdev_ops; + netif_napi_add(ndev, &priv->napi, moxart_rx_poll, RX_DESC_NUM); + ndev->priv_flags |= IFF_UNICAST_FLT; + ndev->irq = irq; + + SET_NETDEV_DEV(ndev, &pdev->dev); + + ret = register_netdev(ndev); + if (ret) { + free_netdev(ndev); + goto init_fail; + } + + netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n", + __func__, ndev->irq, ndev->dev_addr); + + return 0; + +init_fail: + netdev_err(ndev, "init failed\n"); + moxart_mac_free_memory(ndev); +irq_map_fail: + free_netdev(ndev); + return ret; +} + +static int moxart_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + + unregister_netdev(ndev); + free_irq(ndev->irq, ndev); + moxart_mac_free_memory(ndev); + free_netdev(ndev); + + return 0; +} + +static const struct of_device_id moxart_mac_match[] = { + { .compatible = "moxa,moxart-mac" }, + { } +}; + +static struct platform_driver moxart_mac_driver = { + .probe = moxart_mac_probe, + .remove = moxart_remove, + .driver = { + .name = "moxart-ethernet", + .of_match_table = moxart_mac_match, + }, +}; +module_platform_driver(moxart_mac_driver); + +MODULE_DESCRIPTION("MOXART RTL8201CP Ethernet driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jonas Jensen "); diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h new file mode 100644 index 000000000..2be9280d6 --- /dev/null +++ b/drivers/net/ethernet/moxa/moxart_ether.h @@ -0,0 +1,330 @@ +/* MOXA ART Ethernet (RTL8201CP) driver. + * + * Copyright (C) 2013 Jonas Jensen + * + * Jonas Jensen + * + * Based on code from + * Moxa Technology Co., Ltd. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _MOXART_ETHERNET_H +#define _MOXART_ETHERNET_H + +#define TX_REG_OFFSET_DESC0 0 +#define TX_REG_OFFSET_DESC1 4 +#define TX_REG_OFFSET_DESC2 8 +#define TX_REG_DESC_SIZE 16 + +#define RX_REG_OFFSET_DESC0 0 +#define RX_REG_OFFSET_DESC1 4 +#define RX_REG_OFFSET_DESC2 8 +#define RX_REG_DESC_SIZE 16 + +#define TX_DESC0_PKT_LATE_COL 0x1 /* abort, late collision */ +#define TX_DESC0_RX_PKT_EXS_COL 0x2 /* abort, >16 collisions */ +#define TX_DESC0_DMA_OWN 0x80000000 /* owned by controller */ +#define TX_DESC1_BUF_SIZE_MASK 0x7ff +#define TX_DESC1_LTS 0x8000000 /* last TX packet */ +#define TX_DESC1_FTS 0x10000000 /* first TX packet */ +#define TX_DESC1_FIFO_COMPLETE 0x20000000 +#define TX_DESC1_INTR_COMPLETE 0x40000000 +#define TX_DESC1_END 0x80000000 +#define TX_DESC2_ADDRESS_PHYS 0 +#define TX_DESC2_ADDRESS_VIRT 4 + +#define RX_DESC0_FRAME_LEN 0 +#define RX_DESC0_FRAME_LEN_MASK 0x7FF +#define RX_DESC0_MULTICAST 0x10000 +#define RX_DESC0_BROADCAST 0x20000 +#define RX_DESC0_ERR 0x40000 +#define RX_DESC0_CRC_ERR 0x80000 +#define RX_DESC0_FTL 0x100000 +#define RX_DESC0_RUNT 0x200000 /* packet less than 64 bytes */ +#define RX_DESC0_ODD_NB 0x400000 /* receive odd nibbles */ +#define RX_DESC0_LRS 0x10000000 /* last receive segment */ +#define RX_DESC0_FRS 0x20000000 /* first receive segment */ +#define RX_DESC0_DMA_OWN 0x80000000 +#define RX_DESC1_BUF_SIZE_MASK 0x7FF +#define RX_DESC1_END 0x80000000 +#define RX_DESC2_ADDRESS_PHYS 0 +#define RX_DESC2_ADDRESS_VIRT 4 + +#define TX_DESC_NUM 64 +#define TX_DESC_NUM_MASK (TX_DESC_NUM-1) +#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK)) +#define TX_BUF_SIZE 1600 +#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1) + +#define RX_DESC_NUM 64 +#define RX_DESC_NUM_MASK (RX_DESC_NUM-1) +#define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM_MASK)) +#define RX_BUF_SIZE 1600 +#define RX_BUF_SIZE_MAX (RX_DESC1_BUF_SIZE_MASK+1) + +#define REG_INTERRUPT_STATUS 0 +#define REG_INTERRUPT_MASK 4 +#define REG_MAC_MS_ADDRESS 8 +#define REG_MAC_LS_ADDRESS 12 +#define REG_MCAST_HASH_TABLE0 16 +#define REG_MCAST_HASH_TABLE1 20 +#define REG_TX_POLL_DEMAND 24 +#define REG_RX_POLL_DEMAND 28 +#define REG_TXR_BASE_ADDRESS 32 +#define REG_RXR_BASE_ADDRESS 36 +#define REG_INT_TIMER_CTRL 40 +#define REG_APOLL_TIMER_CTRL 44 +#define REG_DMA_BLEN_CTRL 48 +#define REG_RESERVED1 52 +#define REG_MAC_CTRL 136 +#define REG_MAC_STATUS 140 +#define REG_PHY_CTRL 144 +#define REG_PHY_WRITE_DATA 148 +#define REG_FLOW_CTRL 152 +#define REG_BACK_PRESSURE 156 +#define REG_RESERVED2 160 +#define REG_TEST_SEED 196 +#define REG_DMA_FIFO_STATE 200 +#define REG_TEST_MODE 204 +#define REG_RESERVED3 208 +#define REG_TX_COL_COUNTER 212 +#define REG_RPF_AEP_COUNTER 216 +#define REG_XM_PG_COUNTER 220 +#define REG_RUNT_TLC_COUNTER 224 +#define REG_CRC_FTL_COUNTER 228 +#define REG_RLC_RCC_COUNTER 232 +#define REG_BROC_COUNTER 236 +#define REG_MULCA_COUNTER 240 +#define REG_RP_COUNTER 244 +#define REG_XP_COUNTER 248 + +#define REG_PHY_CTRL_OFFSET 0x0 +#define REG_PHY_STATUS 0x1 +#define REG_PHY_ID1 0x2 +#define REG_PHY_ID2 0x3 +#define REG_PHY_ANA 0x4 +#define REG_PHY_ANLPAR 0x5 +#define REG_PHY_ANE 0x6 +#define REG_PHY_ECTRL1 0x10 +#define REG_PHY_QPDS 0x11 +#define REG_PHY_10BOP 0x12 +#define REG_PHY_ECTRL2 0x13 +#define REG_PHY_FTMAC100_WRITE 0x8000000 +#define REG_PHY_FTMAC100_READ 0x4000000 + +/* REG_INTERRUPT_STATUS */ +#define RPKT_FINISH BIT(0) /* DMA data received */ +#define NORXBUF BIT(1) /* receive buffer unavailable */ +#define XPKT_FINISH BIT(2) /* DMA moved data to TX FIFO */ +#define NOTXBUF BIT(3) /* transmit buffer unavailable */ +#define XPKT_OK_INT_STS BIT(4) /* transmit to ethernet success */ +#define XPKT_LOST_INT_STS BIT(5) /* transmit ethernet lost (collision) */ +#define RPKT_SAV BIT(6) /* FIFO receive success */ +#define RPKT_LOST_INT_STS BIT(7) /* FIFO full, receive failed */ +#define AHB_ERR BIT(8) /* AHB error */ +#define PHYSTS_CHG BIT(9) /* PHY link status change */ + +/* REG_INTERRUPT_MASK */ +#define RPKT_FINISH_M BIT(0) +#define NORXBUF_M BIT(1) +#define XPKT_FINISH_M BIT(2) +#define NOTXBUF_M BIT(3) +#define XPKT_OK_M BIT(4) +#define XPKT_LOST_M BIT(5) +#define RPKT_SAV_M BIT(6) +#define RPKT_LOST_M BIT(7) +#define AHB_ERR_M BIT(8) +#define PHYSTS_CHG_M BIT(9) + +/* REG_MAC_MS_ADDRESS */ +#define MAC_MADR_MASK 0xffff /* 2 MSB MAC address */ + +/* REG_INT_TIMER_CTRL */ +#define TXINT_TIME_SEL BIT(15) /* TX cycle time period */ +#define TXINT_THR_MASK 0x7000 +#define TXINT_CNT_MASK 0xf00 +#define RXINT_TIME_SEL BIT(7) /* RX cycle time period */ +#define RXINT_THR_MASK 0x70 +#define RXINT_CNT_MASK 0xF + +/* REG_APOLL_TIMER_CTRL */ +#define TXPOLL_TIME_SEL BIT(12) /* TX poll time period */ +#define TXPOLL_CNT_MASK 0xf00 +#define TXPOLL_CNT_SHIFT_BIT 8 +#define RXPOLL_TIME_SEL BIT(4) /* RX poll time period */ +#define RXPOLL_CNT_MASK 0xF +#define RXPOLL_CNT_SHIFT_BIT 0 + +/* REG_DMA_BLEN_CTRL */ +#define RX_THR_EN BIT(9) /* RX FIFO threshold arbitration */ +#define RXFIFO_HTHR_MASK 0x1c0 +#define RXFIFO_LTHR_MASK 0x38 +#define INCR16_EN BIT(2) /* AHB bus INCR16 burst command */ +#define INCR8_EN BIT(1) /* AHB bus INCR8 burst command */ +#define INCR4_EN BIT(0) /* AHB bus INCR4 burst command */ + +/* REG_MAC_CTRL */ +#define RX_BROADPKT BIT(17) /* receive broadcast packets */ +#define RX_MULTIPKT BIT(16) /* receive all multicast packets */ +#define FULLDUP BIT(15) /* full duplex */ +#define CRC_APD BIT(14) /* append CRC to transmitted packet */ +#define RCV_ALL BIT(12) /* ignore incoming packet destination */ +#define RX_FTL BIT(11) /* accept packets larger than 1518 B */ +#define RX_RUNT BIT(10) /* accept packets smaller than 64 B */ +#define HT_MULTI_EN BIT(9) /* accept on hash and mcast pass */ +#define RCV_EN BIT(8) /* receiver enable */ +#define ENRX_IN_HALFTX BIT(6) /* enable receive in half duplex mode */ +#define XMT_EN BIT(5) /* transmit enable */ +#define CRC_DIS BIT(4) /* disable CRC check when receiving */ +#define LOOP_EN BIT(3) /* internal loop-back */ +#define SW_RST BIT(2) /* software reset, last 64 AHB clocks */ +#define RDMA_EN BIT(1) /* enable receive DMA chan */ +#define XDMA_EN BIT(0) /* enable transmit DMA chan */ + +/* REG_MAC_STATUS */ +#define COL_EXCEED BIT(11) /* more than 16 collisions */ +#define LATE_COL BIT(10) /* transmit late collision detected */ +#define XPKT_LOST BIT(9) /* transmit to ethernet lost */ +#define XPKT_OK BIT(8) /* transmit to ethernet success */ +#define RUNT_MAC_STS BIT(7) /* receive runt detected */ +#define FTL_MAC_STS BIT(6) /* receive frame too long detected */ +#define CRC_ERR_MAC_STS BIT(5) +#define RPKT_LOST BIT(4) /* RX FIFO full, receive failed */ +#define RPKT_SAVE BIT(3) /* RX FIFO receive success */ +#define COL BIT(2) /* collision, incoming packet dropped */ +#define MCPU_BROADCAST BIT(1) +#define MCPU_MULTICAST BIT(0) + +/* REG_PHY_CTRL */ +#define MIIWR BIT(27) /* init write sequence (auto cleared)*/ +#define MIIRD BIT(26) +#define REGAD_MASK 0x3e00000 +#define PHYAD_MASK 0x1f0000 +#define MIIRDATA_MASK 0xffff + +/* REG_PHY_WRITE_DATA */ +#define MIIWDATA_MASK 0xffff + +/* REG_FLOW_CTRL */ +#define PAUSE_TIME_MASK 0xffff0000 +#define FC_HIGH_MASK 0xf000 +#define FC_LOW_MASK 0xf00 +#define RX_PAUSE BIT(4) /* receive pause frame */ +#define TX_PAUSED BIT(3) /* transmit pause due to receive */ +#define FCTHR_EN BIT(2) /* enable threshold mode. */ +#define TX_PAUSE BIT(1) /* transmit pause frame */ +#define FC_EN BIT(0) /* flow control mode enable */ + +/* REG_BACK_PRESSURE */ +#define BACKP_LOW_MASK 0xf00 +#define BACKP_JAM_LEN_MASK 0xf0 +#define BACKP_MODE BIT(1) /* address mode */ +#define BACKP_ENABLE BIT(0) + +/* REG_TEST_SEED */ +#define TEST_SEED_MASK 0x3fff + +/* REG_DMA_FIFO_STATE */ +#define TX_DMA_REQUEST BIT(31) +#define RX_DMA_REQUEST BIT(30) +#define TX_DMA_GRANT BIT(29) +#define RX_DMA_GRANT BIT(28) +#define TX_FIFO_EMPTY BIT(27) +#define RX_FIFO_EMPTY BIT(26) +#define TX_DMA2_SM_MASK 0x7000 +#define TX_DMA1_SM_MASK 0xf00 +#define RX_DMA2_SM_MASK 0x70 +#define RX_DMA1_SM_MASK 0xF + +/* REG_TEST_MODE */ +#define SINGLE_PKT BIT(26) /* single packet mode */ +#define PTIMER_TEST BIT(25) /* automatic polling timer test mode */ +#define ITIMER_TEST BIT(24) /* interrupt timer test mode */ +#define TEST_SEED_SELECT BIT(22) +#define SEED_SELECT BIT(21) +#define TEST_MODE BIT(20) +#define TEST_TIME_MASK 0xffc00 +#define TEST_EXCEL_MASK 0x3e0 + +/* REG_TX_COL_COUNTER */ +#define TX_MCOL_MASK 0xffff0000 +#define TX_MCOL_SHIFT_BIT 16 +#define TX_SCOL_MASK 0xffff +#define TX_SCOL_SHIFT_BIT 0 + +/* REG_RPF_AEP_COUNTER */ +#define RPF_MASK 0xffff0000 +#define RPF_SHIFT_BIT 16 +#define AEP_MASK 0xffff +#define AEP_SHIFT_BIT 0 + +/* REG_XM_PG_COUNTER */ +#define XM_MASK 0xffff0000 +#define XM_SHIFT_BIT 16 +#define PG_MASK 0xffff +#define PG_SHIFT_BIT 0 + +/* REG_RUNT_TLC_COUNTER */ +#define RUNT_CNT_MASK 0xffff0000 +#define RUNT_CNT_SHIFT_BIT 16 +#define TLCC_MASK 0xffff +#define TLCC_SHIFT_BIT 0 + +/* REG_CRC_FTL_COUNTER */ +#define CRCER_CNT_MASK 0xffff0000 +#define CRCER_CNT_SHIFT_BIT 16 +#define FTL_CNT_MASK 0xffff +#define FTL_CNT_SHIFT_BIT 0 + +/* REG_RLC_RCC_COUNTER */ +#define RLC_MASK 0xffff0000 +#define RLC_SHIFT_BIT 16 +#define RCC_MASK 0xffff +#define RCC_SHIFT_BIT 0 + +/* REG_PHY_STATUS */ +#define AN_COMPLETE 0x20 +#define LINK_STATUS 0x4 + +struct moxart_mac_priv_t { + void __iomem *base; + struct net_device_stats stats; + unsigned int reg_maccr; + unsigned int reg_imr; + struct napi_struct napi; + struct net_device *ndev; + + dma_addr_t rx_base; + dma_addr_t rx_mapping[RX_DESC_NUM]; + void __iomem *rx_desc_base; + unsigned char *rx_buf_base; + unsigned char *rx_buf[RX_DESC_NUM]; + unsigned int rx_head; + unsigned int rx_buf_size; + + dma_addr_t tx_base; + dma_addr_t tx_mapping[TX_DESC_NUM]; + void __iomem *tx_desc_base; + unsigned char *tx_buf_base; + unsigned char *tx_buf[RX_DESC_NUM]; + unsigned int tx_head; + unsigned int tx_buf_size; + + spinlock_t txlock; + unsigned int tx_len[TX_DESC_NUM]; + struct sk_buff *tx_skb[TX_DESC_NUM]; + unsigned int tx_tail; +}; + +#if TX_BUF_SIZE >= TX_BUF_SIZE_MAX +#error MOXA ART Ethernet device driver TX buffer is too large! +#endif +#if RX_BUF_SIZE >= RX_BUF_SIZE_MAX +#error MOXA ART Ethernet device driver RX buffer is too large! +#endif + +#endif diff --git a/drivers/net/ethernet/myricom/Kconfig b/drivers/net/ethernet/myricom/Kconfig new file mode 100644 index 000000000..3932d081f --- /dev/null +++ b/drivers/net/ethernet/myricom/Kconfig @@ -0,0 +1,46 @@ +# +# Myricom device configuration +# + +config NET_VENDOR_MYRI + bool "Myricom devices" + default y + depends on PCI && INET + ---help--- + If you have a network (Ethernet) card belonging to this class, say + Y and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Myricom cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_MYRI + +config MYRI10GE + tristate "Myricom Myri-10G Ethernet support" + depends on PCI && INET + select FW_LOADER + select CRC32 + ---help--- + This driver supports Myricom Myri-10G Dual Protocol interface in + Ethernet mode. If the eeprom on your board is not recent enough, + you will need a newer firmware image. + You may get this image or more information, at: + + + + To compile this driver as a module, choose M here. The module + will be called myri10ge. + +config MYRI10GE_DCA + bool "Direct Cache Access (DCA) Support" + default y + depends on MYRI10GE && DCA && !(MYRI10GE=y && DCA=m) + ---help--- + Say Y here if you want to use Direct Cache Access (DCA) in the + driver. DCA is a method for warming the CPU cache before data + is used, with the intent of lessening the impact of cache misses. + +endif # NET_VENDOR_MYRI diff --git a/drivers/net/ethernet/myricom/Makefile b/drivers/net/ethernet/myricom/Makefile new file mode 100644 index 000000000..296c0a100 --- /dev/null +++ b/drivers/net/ethernet/myricom/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Myricom network device drivers. +# + +obj-$(CONFIG_MYRI10GE) += myri10ge/ diff --git a/drivers/net/ethernet/myricom/myri10ge/Makefile b/drivers/net/ethernet/myricom/myri10ge/Makefile new file mode 100644 index 000000000..5df891647 --- /dev/null +++ b/drivers/net/ethernet/myricom/myri10ge/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Myricom Myri-10G ethernet driver +# + +obj-$(CONFIG_MYRI10GE) += myri10ge.o diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c new file mode 100644 index 000000000..6478fcc4a --- /dev/null +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -0,0 +1,4279 @@ +/************************************************************************* + * myri10ge.c: Myricom Myri-10G Ethernet driver. + * + * Copyright (C) 2005 - 2011 Myricom, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Myricom, Inc. nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * + * If the eeprom on your board is not recent enough, you will need to get a + * newer firmware image at: + * http://www.myri.com/scs/download-Myri10GE.html + * + * Contact Information: + * + * Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006 + *************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "myri10ge_mcp.h" +#include "myri10ge_mcp_gen_header.h" + +#define MYRI10GE_VERSION_STR "1.5.3-1.534" + +MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); +MODULE_AUTHOR("Maintainer: help@myri.com"); +MODULE_VERSION(MYRI10GE_VERSION_STR); +MODULE_LICENSE("Dual BSD/GPL"); + +#define MYRI10GE_MAX_ETHER_MTU 9014 + +#define MYRI10GE_ETH_STOPPED 0 +#define MYRI10GE_ETH_STOPPING 1 +#define MYRI10GE_ETH_STARTING 2 +#define MYRI10GE_ETH_RUNNING 3 +#define MYRI10GE_ETH_OPEN_FAILED 4 + +#define MYRI10GE_EEPROM_STRINGS_SIZE 256 +#define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2) + +#define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff) +#define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff + +#define MYRI10GE_ALLOC_ORDER 0 +#define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE) +#define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1) + +#define MYRI10GE_MAX_SLICES 32 + +struct myri10ge_rx_buffer_state { + struct page *page; + int page_offset; + DEFINE_DMA_UNMAP_ADDR(bus); + DEFINE_DMA_UNMAP_LEN(len); +}; + +struct myri10ge_tx_buffer_state { + struct sk_buff *skb; + int last; + DEFINE_DMA_UNMAP_ADDR(bus); + DEFINE_DMA_UNMAP_LEN(len); +}; + +struct myri10ge_cmd { + u32 data0; + u32 data1; + u32 data2; +}; + +struct myri10ge_rx_buf { + struct mcp_kreq_ether_recv __iomem *lanai; /* lanai ptr for recv ring */ + struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */ + struct myri10ge_rx_buffer_state *info; + struct page *page; + dma_addr_t bus; + int page_offset; + int cnt; + int fill_cnt; + int alloc_fail; + int mask; /* number of rx slots -1 */ + int watchdog_needed; +}; + +struct myri10ge_tx_buf { + struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */ + __be32 __iomem *send_go; /* "go" doorbell ptr */ + __be32 __iomem *send_stop; /* "stop" doorbell ptr */ + struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */ + char *req_bytes; + struct myri10ge_tx_buffer_state *info; + int mask; /* number of transmit slots -1 */ + int req ____cacheline_aligned; /* transmit slots submitted */ + int pkt_start; /* packets started */ + int stop_queue; + int linearized; + int done ____cacheline_aligned; /* transmit slots completed */ + int pkt_done; /* packets completed */ + int wake_queue; + int queue_active; +}; + +struct myri10ge_rx_done { + struct mcp_slot *entry; + dma_addr_t bus; + int cnt; + int idx; +}; + +struct myri10ge_slice_netstats { + unsigned long rx_packets; + unsigned long tx_packets; + unsigned long rx_bytes; + unsigned long tx_bytes; + unsigned long rx_dropped; + unsigned long tx_dropped; +}; + +struct myri10ge_slice_state { + struct myri10ge_tx_buf tx; /* transmit ring */ + struct myri10ge_rx_buf rx_small; + struct myri10ge_rx_buf rx_big; + struct myri10ge_rx_done rx_done; + struct net_device *dev; + struct napi_struct napi; + struct myri10ge_priv *mgp; + struct myri10ge_slice_netstats stats; + __be32 __iomem *irq_claim; + struct mcp_irq_data *fw_stats; + dma_addr_t fw_stats_bus; + int watchdog_tx_done; + int watchdog_tx_req; + int watchdog_rx_done; + int stuck; +#ifdef CONFIG_MYRI10GE_DCA + int cached_dca_tag; + int cpu; + __be32 __iomem *dca_tag; +#endif +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int state; +#define SLICE_STATE_IDLE 0 +#define SLICE_STATE_NAPI 1 /* NAPI owns this slice */ +#define SLICE_STATE_POLL 2 /* poll owns this slice */ +#define SLICE_LOCKED (SLICE_STATE_NAPI | SLICE_STATE_POLL) +#define SLICE_STATE_NAPI_YIELD 4 /* NAPI yielded this slice */ +#define SLICE_STATE_POLL_YIELD 8 /* poll yielded this slice */ +#define SLICE_USER_PEND (SLICE_STATE_POLL | SLICE_STATE_POLL_YIELD) + spinlock_t lock; + unsigned long lock_napi_yield; + unsigned long lock_poll_yield; + unsigned long busy_poll_miss; + unsigned long busy_poll_cnt; +#endif /* CONFIG_NET_RX_BUSY_POLL */ + char irq_desc[32]; +}; + +struct myri10ge_priv { + struct myri10ge_slice_state *ss; + int tx_boundary; /* boundary transmits cannot cross */ + int num_slices; + int running; /* running? */ + int small_bytes; + int big_bytes; + int max_intr_slots; + struct net_device *dev; + u8 __iomem *sram; + int sram_size; + unsigned long board_span; + unsigned long iomem_base; + __be32 __iomem *irq_deassert; + char *mac_addr_string; + struct mcp_cmd_response *cmd; + dma_addr_t cmd_bus; + struct pci_dev *pdev; + int msi_enabled; + int msix_enabled; + struct msix_entry *msix_vectors; +#ifdef CONFIG_MYRI10GE_DCA + int dca_enabled; + int relaxed_order; +#endif + u32 link_state; + unsigned int rdma_tags_available; + int intr_coal_delay; + __be32 __iomem *intr_coal_delay_ptr; + int wc_cookie; + int down_cnt; + wait_queue_head_t down_wq; + struct work_struct watchdog_work; + struct timer_list watchdog_timer; + int watchdog_resets; + int watchdog_pause; + int pause; + bool fw_name_allocated; + char *fw_name; + char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE]; + char *product_code_string; + char fw_version[128]; + int fw_ver_major; + int fw_ver_minor; + int fw_ver_tiny; + int adopted_rx_filter_bug; + u8 mac_addr[ETH_ALEN]; /* eeprom mac address */ + unsigned long serial_number; + int vendor_specific_offset; + int fw_multicast_support; + u32 features; + u32 max_tso6; + u32 read_dma; + u32 write_dma; + u32 read_write_dma; + u32 link_changes; + u32 msg_enable; + unsigned int board_number; + int rebooted; +}; + +static char *myri10ge_fw_unaligned = "/*(DEBLOBBED)*/"; +static char *myri10ge_fw_aligned = "/*(DEBLOBBED)*/"; +static char *myri10ge_fw_rss_unaligned = "/*(DEBLOBBED)*/"; +static char *myri10ge_fw_rss_aligned = "/*(DEBLOBBED)*/"; +/*(DEBLOBBED)*/ + +/* Careful: must be accessed under kparam_block_sysfs_write */ +static char *myri10ge_fw_name = NULL; +module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name"); + +#define MYRI10GE_MAX_BOARDS 8 +static char *myri10ge_fw_names[MYRI10GE_MAX_BOARDS] = + {[0 ... (MYRI10GE_MAX_BOARDS - 1)] = NULL }; +module_param_array_named(myri10ge_fw_names, myri10ge_fw_names, charp, NULL, + 0444); +MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image names per board"); + +static int myri10ge_ecrc_enable = 1; +module_param(myri10ge_ecrc_enable, int, S_IRUGO); +MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E"); + +static int myri10ge_small_bytes = -1; /* -1 == auto */ +module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets"); + +static int myri10ge_msi = 1; /* enable msi by default */ +module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts"); + +static int myri10ge_intr_coal_delay = 75; +module_param(myri10ge_intr_coal_delay, int, S_IRUGO); +MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay"); + +static int myri10ge_flow_control = 1; +module_param(myri10ge_flow_control, int, S_IRUGO); +MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter"); + +static int myri10ge_deassert_wait = 1; +module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(myri10ge_deassert_wait, + "Wait when deasserting legacy interrupts"); + +static int myri10ge_force_firmware = 0; +module_param(myri10ge_force_firmware, int, S_IRUGO); +MODULE_PARM_DESC(myri10ge_force_firmware, + "Force firmware to assume aligned completions"); + +static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; +module_param(myri10ge_initial_mtu, int, S_IRUGO); +MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU"); + +static int myri10ge_napi_weight = 64; +module_param(myri10ge_napi_weight, int, S_IRUGO); +MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight"); + +static int myri10ge_watchdog_timeout = 1; +module_param(myri10ge_watchdog_timeout, int, S_IRUGO); +MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout"); + +static int myri10ge_max_irq_loops = 1048576; +module_param(myri10ge_max_irq_loops, int, S_IRUGO); +MODULE_PARM_DESC(myri10ge_max_irq_loops, + "Set stuck legacy IRQ detection threshold"); + +#define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK + +static int myri10ge_debug = -1; /* defaults above */ +module_param(myri10ge_debug, int, 0); +MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)"); + +static int myri10ge_fill_thresh = 256; +module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed"); + +static int myri10ge_reset_recover = 1; + +static int myri10ge_max_slices = 1; +module_param(myri10ge_max_slices, int, S_IRUGO); +MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues"); + +static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT; +module_param(myri10ge_rss_hash, int, S_IRUGO); +MODULE_PARM_DESC(myri10ge_rss_hash, "Type of RSS hashing to do"); + +static int myri10ge_dca = 1; +module_param(myri10ge_dca, int, S_IRUGO); +MODULE_PARM_DESC(myri10ge_dca, "Enable DCA if possible"); + +#define MYRI10GE_FW_OFFSET 1024*1024 +#define MYRI10GE_HIGHPART_TO_U32(X) \ +(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0) +#define MYRI10GE_LOWPART_TO_U32(X) ((u32)(X)) + +#define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8) + +static void myri10ge_set_multicast_list(struct net_device *dev); +static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb, + struct net_device *dev); + +static inline void put_be32(__be32 val, __be32 __iomem * p) +{ + __raw_writel((__force __u32) val, (__force void __iomem *)p); +} + +static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats); + +static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated) +{ + if (mgp->fw_name_allocated) + kfree(mgp->fw_name); + mgp->fw_name = name; + mgp->fw_name_allocated = allocated; +} + +static int +myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd, + struct myri10ge_cmd *data, int atomic) +{ + struct mcp_cmd *buf; + char buf_bytes[sizeof(*buf) + 8]; + struct mcp_cmd_response *response = mgp->cmd; + char __iomem *cmd_addr = mgp->sram + MXGEFW_ETH_CMD; + u32 dma_low, dma_high, result, value; + int sleep_total = 0; + + /* ensure buf is aligned to 8 bytes */ + buf = (struct mcp_cmd *)ALIGN((unsigned long)buf_bytes, 8); + + buf->data0 = htonl(data->data0); + buf->data1 = htonl(data->data1); + buf->data2 = htonl(data->data2); + buf->cmd = htonl(cmd); + dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus); + dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus); + + buf->response_addr.low = htonl(dma_low); + buf->response_addr.high = htonl(dma_high); + response->result = htonl(MYRI10GE_NO_RESPONSE_RESULT); + mb(); + myri10ge_pio_copy(cmd_addr, buf, sizeof(*buf)); + + /* wait up to 15ms. Longest command is the DMA benchmark, + * which is capped at 5ms, but runs from a timeout handler + * that runs every 7.8ms. So a 15ms timeout leaves us with + * a 2.2ms margin + */ + if (atomic) { + /* if atomic is set, do not sleep, + * and try to get the completion quickly + * (1ms will be enough for those commands) */ + for (sleep_total = 0; + sleep_total < 1000 && + response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT); + sleep_total += 10) { + udelay(10); + mb(); + } + } else { + /* use msleep for most command */ + for (sleep_total = 0; + sleep_total < 15 && + response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT); + sleep_total++) + msleep(1); + } + + result = ntohl(response->result); + value = ntohl(response->data); + if (result != MYRI10GE_NO_RESPONSE_RESULT) { + if (result == 0) { + data->data0 = value; + return 0; + } else if (result == MXGEFW_CMD_UNKNOWN) { + return -ENOSYS; + } else if (result == MXGEFW_CMD_ERROR_UNALIGNED) { + return -E2BIG; + } else if (result == MXGEFW_CMD_ERROR_RANGE && + cmd == MXGEFW_CMD_ENABLE_RSS_QUEUES && + (data-> + data1 & MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES) != + 0) { + return -ERANGE; + } else { + dev_err(&mgp->pdev->dev, + "command %d failed, result = %d\n", + cmd, result); + return -ENXIO; + } + } + + dev_err(&mgp->pdev->dev, "command %d timed out, result = %d\n", + cmd, result); + return -EAGAIN; +} + +/* + * The eeprom strings on the lanaiX have the format + * SN=x\0 + * MAC=x:x:x:x:x:x\0 + * PT:ddd mmm xx xx:xx:xx xx\0 + * PV:ddd mmm xx xx:xx:xx xx\0 + */ +static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp) +{ + char *ptr, *limit; + int i; + + ptr = mgp->eeprom_strings; + limit = mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE; + + while (*ptr != '\0' && ptr < limit) { + if (memcmp(ptr, "MAC=", 4) == 0) { + ptr += 4; + mgp->mac_addr_string = ptr; + for (i = 0; i < 6; i++) { + if ((ptr + 2) > limit) + goto abort; + mgp->mac_addr[i] = + simple_strtoul(ptr, &ptr, 16); + ptr += 1; + } + } + if (memcmp(ptr, "PC=", 3) == 0) { + ptr += 3; + mgp->product_code_string = ptr; + } + if (memcmp((const void *)ptr, "SN=", 3) == 0) { + ptr += 3; + mgp->serial_number = simple_strtoul(ptr, &ptr, 10); + } + while (ptr < limit && *ptr++) ; + } + + return 0; + +abort: + dev_err(&mgp->pdev->dev, "failed to parse eeprom_strings\n"); + return -ENXIO; +} + +/* + * Enable or disable periodic RDMAs from the host to make certain + * chipsets resend dropped PCIe messages + */ + +static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable) +{ + char __iomem *submit; + __be32 buf[16] __attribute__ ((__aligned__(8))); + u32 dma_low, dma_high; + int i; + + /* clear confirmation addr */ + mgp->cmd->data = 0; + mb(); + + /* send a rdma command to the PCIe engine, and wait for the + * response in the confirmation address. The firmware should + * write a -1 there to indicate it is alive and well + */ + dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus); + dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus); + + buf[0] = htonl(dma_high); /* confirm addr MSW */ + buf[1] = htonl(dma_low); /* confirm addr LSW */ + buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */ + buf[3] = htonl(dma_high); /* dummy addr MSW */ + buf[4] = htonl(dma_low); /* dummy addr LSW */ + buf[5] = htonl(enable); /* enable? */ + + submit = mgp->sram + MXGEFW_BOOT_DUMMY_RDMA; + + myri10ge_pio_copy(submit, &buf, sizeof(buf)); + for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++) + msleep(1); + if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) + dev_err(&mgp->pdev->dev, "dummy rdma %s failed\n", + (enable ? "enable" : "disable")); +} + +static int +myri10ge_validate_firmware(struct myri10ge_priv *mgp, + struct mcp_gen_header *hdr) +{ + struct device *dev = &mgp->pdev->dev; + + /* check firmware type */ + if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) { + dev_err(dev, "Bad firmware type: 0x%x\n", ntohl(hdr->mcp_type)); + return -EINVAL; + } + + /* save firmware version for ethtool */ + strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version)); + mgp->fw_version[sizeof(mgp->fw_version) - 1] = '\0'; + + sscanf(mgp->fw_version, "%d.%d.%d", &mgp->fw_ver_major, + &mgp->fw_ver_minor, &mgp->fw_ver_tiny); + + if (!(mgp->fw_ver_major == MXGEFW_VERSION_MAJOR && + mgp->fw_ver_minor == MXGEFW_VERSION_MINOR)) { + dev_err(dev, "Found firmware version %s\n", mgp->fw_version); + dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR, + MXGEFW_VERSION_MINOR); + return -EINVAL; + } + return 0; +} + +static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size) +{ + unsigned crc, reread_crc; + const struct firmware *fw; + struct device *dev = &mgp->pdev->dev; + unsigned char *fw_readback; + struct mcp_gen_header *hdr; + size_t hdr_offset; + int status; + unsigned i; + + if ((status = reject_firmware(&fw, mgp->fw_name, dev)) < 0) { + dev_err(dev, "Unable to load %s firmware image via hotplug\n", + mgp->fw_name); + status = -EINVAL; + goto abort_with_nothing; + } + + /* check size */ + + if (fw->size >= mgp->sram_size - MYRI10GE_FW_OFFSET || + fw->size < MCP_HEADER_PTR_OFFSET + 4) { + dev_err(dev, "Firmware size invalid:%d\n", (int)fw->size); + status = -EINVAL; + goto abort_with_fw; + } + + /* check id */ + hdr_offset = ntohl(*(__be32 *) (fw->data + MCP_HEADER_PTR_OFFSET)); + if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw->size) { + dev_err(dev, "Bad firmware file\n"); + status = -EINVAL; + goto abort_with_fw; + } + hdr = (void *)(fw->data + hdr_offset); + + status = myri10ge_validate_firmware(mgp, hdr); + if (status != 0) + goto abort_with_fw; + + crc = crc32(~0, fw->data, fw->size); + for (i = 0; i < fw->size; i += 256) { + myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i, + fw->data + i, + min(256U, (unsigned)(fw->size - i))); + mb(); + readb(mgp->sram); + } + fw_readback = vmalloc(fw->size); + if (!fw_readback) { + status = -ENOMEM; + goto abort_with_fw; + } + /* corruption checking is good for parity recovery and buggy chipset */ + memcpy_fromio(fw_readback, mgp->sram + MYRI10GE_FW_OFFSET, fw->size); + reread_crc = crc32(~0, fw_readback, fw->size); + vfree(fw_readback); + if (crc != reread_crc) { + dev_err(dev, "CRC failed(fw-len=%u), got 0x%x (expect 0x%x)\n", + (unsigned)fw->size, reread_crc, crc); + status = -EIO; + goto abort_with_fw; + } + *size = (u32) fw->size; + +abort_with_fw: + release_firmware(fw); + +abort_with_nothing: + return status; +} + +static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp) +{ + struct mcp_gen_header *hdr; + struct device *dev = &mgp->pdev->dev; + const size_t bytes = sizeof(struct mcp_gen_header); + size_t hdr_offset; + int status; + + /* find running firmware header */ + hdr_offset = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET)); + + if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > mgp->sram_size) { + dev_err(dev, "Running firmware has bad header offset (%d)\n", + (int)hdr_offset); + return -EIO; + } + + /* copy header of running firmware from SRAM to host memory to + * validate firmware */ + hdr = kmalloc(bytes, GFP_KERNEL); + if (hdr == NULL) + return -ENOMEM; + + memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes); + status = myri10ge_validate_firmware(mgp, hdr); + kfree(hdr); + + /* check to see if adopted firmware has bug where adopting + * it will cause broadcasts to be filtered unless the NIC + * is kept in ALLMULTI mode */ + if (mgp->fw_ver_major == 1 && mgp->fw_ver_minor == 4 && + mgp->fw_ver_tiny >= 4 && mgp->fw_ver_tiny <= 11) { + mgp->adopted_rx_filter_bug = 1; + dev_warn(dev, "Adopting fw %d.%d.%d: " + "working around rx filter bug\n", + mgp->fw_ver_major, mgp->fw_ver_minor, + mgp->fw_ver_tiny); + } + return status; +} + +static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) +{ + struct myri10ge_cmd cmd; + int status; + + /* probe for IPv6 TSO support */ + mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, + &cmd, 0); + if (status == 0) { + mgp->max_tso6 = cmd.data0; + mgp->features |= NETIF_F_TSO6; + } + + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); + if (status != 0) { + dev_err(&mgp->pdev->dev, + "failed MXGEFW_CMD_GET_RX_RING_SIZE\n"); + return -ENXIO; + } + + mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr)); + + return 0; +} + +static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt) +{ + char __iomem *submit; + __be32 buf[16] __attribute__ ((__aligned__(8))); + u32 dma_low, dma_high, size; + int status, i; + + size = 0; + status = myri10ge_load_hotplug_firmware(mgp, &size); + if (status) { + if (!adopt) + return status; + dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n"); + + /* Do not attempt to adopt firmware if there + * was a bad crc */ + if (status == -EIO) + return status; + + status = myri10ge_adopt_running_firmware(mgp); + if (status != 0) { + dev_err(&mgp->pdev->dev, + "failed to adopt running firmware\n"); + return status; + } + dev_info(&mgp->pdev->dev, + "Successfully adopted running firmware\n"); + if (mgp->tx_boundary == 4096) { + dev_warn(&mgp->pdev->dev, + "Using firmware currently running on NIC" + ". For optimal\n"); + dev_warn(&mgp->pdev->dev, + "performance consider loading optimized " + "firmware\n"); + dev_warn(&mgp->pdev->dev, "via hotplug\n"); + } + + set_fw_name(mgp, "adopted", false); + mgp->tx_boundary = 2048; + myri10ge_dummy_rdma(mgp, 1); + status = myri10ge_get_firmware_capabilities(mgp); + return status; + } + + /* clear confirmation addr */ + mgp->cmd->data = 0; + mb(); + + /* send a reload command to the bootstrap MCP, and wait for the + * response in the confirmation address. The firmware should + * write a -1 there to indicate it is alive and well + */ + dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus); + dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus); + + buf[0] = htonl(dma_high); /* confirm addr MSW */ + buf[1] = htonl(dma_low); /* confirm addr LSW */ + buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */ + + /* FIX: All newest firmware should un-protect the bottom of + * the sram before handoff. However, the very first interfaces + * do not. Therefore the handoff copy must skip the first 8 bytes + */ + buf[3] = htonl(MYRI10GE_FW_OFFSET + 8); /* where the code starts */ + buf[4] = htonl(size - 8); /* length of code */ + buf[5] = htonl(8); /* where to copy to */ + buf[6] = htonl(0); /* where to jump to */ + + submit = mgp->sram + MXGEFW_BOOT_HANDOFF; + + myri10ge_pio_copy(submit, &buf, sizeof(buf)); + mb(); + msleep(1); + mb(); + i = 0; + while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) { + msleep(1 << i); + i++; + } + if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) { + dev_err(&mgp->pdev->dev, "handoff failed\n"); + return -ENXIO; + } + myri10ge_dummy_rdma(mgp, 1); + status = myri10ge_get_firmware_capabilities(mgp); + + return status; +} + +static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr) +{ + struct myri10ge_cmd cmd; + int status; + + cmd.data0 = ((addr[0] << 24) | (addr[1] << 16) + | (addr[2] << 8) | addr[3]); + + cmd.data1 = ((addr[4] << 8) | (addr[5])); + + status = myri10ge_send_cmd(mgp, MXGEFW_SET_MAC_ADDRESS, &cmd, 0); + return status; +} + +static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause) +{ + struct myri10ge_cmd cmd; + int status, ctl; + + ctl = pause ? MXGEFW_ENABLE_FLOW_CONTROL : MXGEFW_DISABLE_FLOW_CONTROL; + status = myri10ge_send_cmd(mgp, ctl, &cmd, 0); + + if (status) { + netdev_err(mgp->dev, "Failed to set flow control mode\n"); + return status; + } + mgp->pause = pause; + return 0; +} + +static void +myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic) +{ + struct myri10ge_cmd cmd; + int status, ctl; + + ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC; + status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic); + if (status) + netdev_err(mgp->dev, "Failed to set promisc mode\n"); +} + +static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type) +{ + struct myri10ge_cmd cmd; + int status; + u32 len; + struct page *dmatest_page; + dma_addr_t dmatest_bus; + char *test = " "; + + dmatest_page = alloc_page(GFP_KERNEL); + if (!dmatest_page) + return -ENOMEM; + dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (unlikely(pci_dma_mapping_error(mgp->pdev, dmatest_bus))) { + __free_page(dmatest_page); + return -ENOMEM; + } + + /* Run a small DMA test. + * The magic multipliers to the length tell the firmware + * to do DMA read, write, or read+write tests. The + * results are returned in cmd.data0. The upper 16 + * bits or the return is the number of transfers completed. + * The lower 16 bits is the time in 0.5us ticks that the + * transfers took to complete. + */ + + len = mgp->tx_boundary; + + cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus); + cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus); + cmd.data2 = len * 0x10000; + status = myri10ge_send_cmd(mgp, test_type, &cmd, 0); + if (status != 0) { + test = "read"; + goto abort; + } + mgp->read_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff); + cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus); + cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus); + cmd.data2 = len * 0x1; + status = myri10ge_send_cmd(mgp, test_type, &cmd, 0); + if (status != 0) { + test = "write"; + goto abort; + } + mgp->write_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff); + + cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus); + cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus); + cmd.data2 = len * 0x10001; + status = myri10ge_send_cmd(mgp, test_type, &cmd, 0); + if (status != 0) { + test = "read/write"; + goto abort; + } + mgp->read_write_dma = ((cmd.data0 >> 16) * len * 2 * 2) / + (cmd.data0 & 0xffff); + +abort: + pci_unmap_page(mgp->pdev, dmatest_bus, PAGE_SIZE, DMA_BIDIRECTIONAL); + put_page(dmatest_page); + + if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST) + dev_warn(&mgp->pdev->dev, "DMA %s benchmark failed: %d\n", + test, status); + + return status; +} + +#ifdef CONFIG_NET_RX_BUSY_POLL +static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss) +{ + spin_lock_init(&ss->lock); + ss->state = SLICE_STATE_IDLE; +} + +static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss) +{ + bool rc = true; + spin_lock(&ss->lock); + if ((ss->state & SLICE_LOCKED)) { + WARN_ON((ss->state & SLICE_STATE_NAPI)); + ss->state |= SLICE_STATE_NAPI_YIELD; + rc = false; + ss->lock_napi_yield++; + } else + ss->state = SLICE_STATE_NAPI; + spin_unlock(&ss->lock); + return rc; +} + +static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss) +{ + spin_lock(&ss->lock); + WARN_ON((ss->state & (SLICE_STATE_POLL | SLICE_STATE_NAPI_YIELD))); + ss->state = SLICE_STATE_IDLE; + spin_unlock(&ss->lock); +} + +static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss) +{ + bool rc = true; + spin_lock_bh(&ss->lock); + if ((ss->state & SLICE_LOCKED)) { + ss->state |= SLICE_STATE_POLL_YIELD; + rc = false; + ss->lock_poll_yield++; + } else + ss->state |= SLICE_STATE_POLL; + spin_unlock_bh(&ss->lock); + return rc; +} + +static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss) +{ + spin_lock_bh(&ss->lock); + WARN_ON((ss->state & SLICE_STATE_NAPI)); + ss->state = SLICE_STATE_IDLE; + spin_unlock_bh(&ss->lock); +} + +static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss) +{ + WARN_ON(!(ss->state & SLICE_LOCKED)); + return (ss->state & SLICE_USER_PEND); +} +#else /* CONFIG_NET_RX_BUSY_POLL */ +static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss) +{ +} + +static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss) +{ + return false; +} + +static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss) +{ +} + +static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss) +{ + return false; +} + +static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss) +{ +} + +static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss) +{ + return false; +} +#endif + +static int myri10ge_reset(struct myri10ge_priv *mgp) +{ + struct myri10ge_cmd cmd; + struct myri10ge_slice_state *ss; + int i, status; + size_t bytes; +#ifdef CONFIG_MYRI10GE_DCA + unsigned long dca_tag_off; +#endif + + /* try to send a reset command to the card to see if it + * is alive */ + memset(&cmd, 0, sizeof(cmd)); + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0); + if (status != 0) { + dev_err(&mgp->pdev->dev, "failed reset\n"); + return -ENXIO; + } + + (void)myri10ge_dma_test(mgp, MXGEFW_DMA_TEST); + /* + * Use non-ndis mcp_slot (eg, 4 bytes total, + * no toeplitz hash value returned. Older firmware will + * not understand this command, but will use the correct + * sized mcp_slot, so we ignore error returns + */ + cmd.data0 = MXGEFW_RSS_MCP_SLOT_TYPE_MIN; + (void)myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, &cmd, 0); + + /* Now exchange information about interrupts */ + + bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry); + cmd.data0 = (u32) bytes; + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0); + + /* + * Even though we already know how many slices are supported + * via myri10ge_probe_slices() MXGEFW_CMD_GET_MAX_RSS_QUEUES + * has magic side effects, and must be called after a reset. + * It must be called prior to calling any RSS related cmds, + * including assigning an interrupt queue for anything but + * slice 0. It must also be called *after* + * MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by + * the firmware to compute offsets. + */ + + if (mgp->num_slices > 1) { + + /* ask the maximum number of slices it supports */ + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES, + &cmd, 0); + if (status != 0) { + dev_err(&mgp->pdev->dev, + "failed to get number of slices\n"); + } + + /* + * MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior + * to setting up the interrupt queue DMA + */ + + cmd.data0 = mgp->num_slices; + cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE; + if (mgp->dev->real_num_tx_queues > 1) + cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES; + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES, + &cmd, 0); + + /* Firmware older than 1.4.32 only supports multiple + * RX queues, so if we get an error, first retry using a + * single TX queue before giving up */ + if (status != 0 && mgp->dev->real_num_tx_queues > 1) { + netif_set_real_num_tx_queues(mgp->dev, 1); + cmd.data0 = mgp->num_slices; + cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE; + status = myri10ge_send_cmd(mgp, + MXGEFW_CMD_ENABLE_RSS_QUEUES, + &cmd, 0); + } + + if (status != 0) { + dev_err(&mgp->pdev->dev, + "failed to set number of slices\n"); + + return status; + } + } + for (i = 0; i < mgp->num_slices; i++) { + ss = &mgp->ss[i]; + cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus); + cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus); + cmd.data2 = i; + status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, + &cmd, 0); + } + + status |= + myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0); + for (i = 0; i < mgp->num_slices; i++) { + ss = &mgp->ss[i]; + ss->irq_claim = + (__iomem __be32 *) (mgp->sram + cmd.data0 + 8 * i); + } + status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, + &cmd, 0); + mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0); + + status |= myri10ge_send_cmd + (mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0); + mgp->intr_coal_delay_ptr = (__iomem __be32 *) (mgp->sram + cmd.data0); + if (status != 0) { + dev_err(&mgp->pdev->dev, "failed set interrupt parameters\n"); + return status; + } + put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); + +#ifdef CONFIG_MYRI10GE_DCA + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0); + dca_tag_off = cmd.data0; + for (i = 0; i < mgp->num_slices; i++) { + ss = &mgp->ss[i]; + if (status == 0) { + ss->dca_tag = (__iomem __be32 *) + (mgp->sram + dca_tag_off + 4 * i); + } else { + ss->dca_tag = NULL; + } + } +#endif /* CONFIG_MYRI10GE_DCA */ + + /* reset mcp/driver shared state back to 0 */ + + mgp->link_changes = 0; + for (i = 0; i < mgp->num_slices; i++) { + ss = &mgp->ss[i]; + + memset(ss->rx_done.entry, 0, bytes); + ss->tx.req = 0; + ss->tx.done = 0; + ss->tx.pkt_start = 0; + ss->tx.pkt_done = 0; + ss->rx_big.cnt = 0; + ss->rx_small.cnt = 0; + ss->rx_done.idx = 0; + ss->rx_done.cnt = 0; + ss->tx.wake_queue = 0; + ss->tx.stop_queue = 0; + } + + status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr); + myri10ge_change_pause(mgp, mgp->pause); + myri10ge_set_multicast_list(mgp->dev); + return status; +} + +#ifdef CONFIG_MYRI10GE_DCA +static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on) +{ + int ret; + u16 ctl; + + pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &ctl); + + ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4; + if (ret != on) { + ctl &= ~PCI_EXP_DEVCTL_RELAX_EN; + ctl |= (on << 4); + pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, ctl); + } + return ret; +} + +static void +myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag) +{ + ss->cached_dca_tag = tag; + put_be32(htonl(tag), ss->dca_tag); +} + +static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss) +{ + int cpu = get_cpu(); + int tag; + + if (cpu != ss->cpu) { + tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu); + if (ss->cached_dca_tag != tag) + myri10ge_write_dca(ss, cpu, tag); + ss->cpu = cpu; + } + put_cpu(); +} + +static void myri10ge_setup_dca(struct myri10ge_priv *mgp) +{ + int err, i; + struct pci_dev *pdev = mgp->pdev; + + if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled) + return; + if (!myri10ge_dca) { + dev_err(&pdev->dev, "dca disabled by administrator\n"); + return; + } + err = dca_add_requester(&pdev->dev); + if (err) { + if (err != -ENODEV) + dev_err(&pdev->dev, + "dca_add_requester() failed, err=%d\n", err); + return; + } + mgp->relaxed_order = myri10ge_toggle_relaxed(pdev, 0); + mgp->dca_enabled = 1; + for (i = 0; i < mgp->num_slices; i++) { + mgp->ss[i].cpu = -1; + mgp->ss[i].cached_dca_tag = -1; + myri10ge_update_dca(&mgp->ss[i]); + } +} + +static void myri10ge_teardown_dca(struct myri10ge_priv *mgp) +{ + struct pci_dev *pdev = mgp->pdev; + + if (!mgp->dca_enabled) + return; + mgp->dca_enabled = 0; + if (mgp->relaxed_order) + myri10ge_toggle_relaxed(pdev, 1); + dca_remove_requester(&pdev->dev); +} + +static int myri10ge_notify_dca_device(struct device *dev, void *data) +{ + struct myri10ge_priv *mgp; + unsigned long event; + + mgp = dev_get_drvdata(dev); + event = *(unsigned long *)data; + + if (event == DCA_PROVIDER_ADD) + myri10ge_setup_dca(mgp); + else if (event == DCA_PROVIDER_REMOVE) + myri10ge_teardown_dca(mgp); + return 0; +} +#endif /* CONFIG_MYRI10GE_DCA */ + +static inline void +myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst, + struct mcp_kreq_ether_recv *src) +{ + __be32 low; + + low = src->addr_low; + src->addr_low = htonl(DMA_BIT_MASK(32)); + myri10ge_pio_copy(dst, src, 4 * sizeof(*src)); + mb(); + myri10ge_pio_copy(dst + 4, src + 4, 4 * sizeof(*src)); + mb(); + src->addr_low = low; + put_be32(low, &dst->addr_low); + mb(); +} + +static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum) +{ + struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data); + + if ((skb->protocol == htons(ETH_P_8021Q)) && + (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) || + vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) { + skb->csum = hw_csum; + skb->ip_summed = CHECKSUM_COMPLETE; + } +} + +static void +myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, + int bytes, int watchdog) +{ + struct page *page; + dma_addr_t bus; + int idx; +#if MYRI10GE_ALLOC_SIZE > 4096 + int end_offset; +#endif + + if (unlikely(rx->watchdog_needed && !watchdog)) + return; + + /* try to refill entire ring */ + while (rx->fill_cnt != (rx->cnt + rx->mask + 1)) { + idx = rx->fill_cnt & rx->mask; + if (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE) { + /* we can use part of previous page */ + get_page(rx->page); + } else { + /* we need a new page */ + page = + alloc_pages(GFP_ATOMIC | __GFP_COMP, + MYRI10GE_ALLOC_ORDER); + if (unlikely(page == NULL)) { + if (rx->fill_cnt - rx->cnt < 16) + rx->watchdog_needed = 1; + return; + } + + bus = pci_map_page(mgp->pdev, page, 0, + MYRI10GE_ALLOC_SIZE, + PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) { + __free_pages(page, MYRI10GE_ALLOC_ORDER); + if (rx->fill_cnt - rx->cnt < 16) + rx->watchdog_needed = 1; + return; + } + + rx->page = page; + rx->page_offset = 0; + rx->bus = bus; + + } + rx->info[idx].page = rx->page; + rx->info[idx].page_offset = rx->page_offset; + /* note that this is the address of the start of the + * page */ + dma_unmap_addr_set(&rx->info[idx], bus, rx->bus); + rx->shadow[idx].addr_low = + htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset); + rx->shadow[idx].addr_high = + htonl(MYRI10GE_HIGHPART_TO_U32(rx->bus)); + + /* start next packet on a cacheline boundary */ + rx->page_offset += SKB_DATA_ALIGN(bytes); + +#if MYRI10GE_ALLOC_SIZE > 4096 + /* don't cross a 4KB boundary */ + end_offset = rx->page_offset + bytes - 1; + if ((unsigned)(rx->page_offset ^ end_offset) > 4095) + rx->page_offset = end_offset & ~4095; +#endif + rx->fill_cnt++; + + /* copy 8 descriptors to the firmware at a time */ + if ((idx & 7) == 7) { + myri10ge_submit_8rx(&rx->lanai[idx - 7], + &rx->shadow[idx - 7]); + } + } +} + +static inline void +myri10ge_unmap_rx_page(struct pci_dev *pdev, + struct myri10ge_rx_buffer_state *info, int bytes) +{ + /* unmap the recvd page if we're the only or last user of it */ + if (bytes >= MYRI10GE_ALLOC_SIZE / 2 || + (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) { + pci_unmap_page(pdev, (dma_unmap_addr(info, bus) + & ~(MYRI10GE_ALLOC_SIZE - 1)), + MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE); + } +} + +/* + * GRO does not support acceleration of tagged vlan frames, and + * this NIC does not support vlan tag offload, so we must pop + * the tag ourselves to be able to achieve GRO performance that + * is comparable to LRO. + */ + +static inline void +myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb) +{ + u8 *va; + struct vlan_ethhdr *veh; + struct skb_frag_struct *frag; + __wsum vsum; + + va = addr; + va += MXGEFW_PAD; + veh = (struct vlan_ethhdr *)va; + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) == + NETIF_F_HW_VLAN_CTAG_RX && + veh->h_vlan_proto == htons(ETH_P_8021Q)) { + /* fixup csum if needed */ + if (skb->ip_summed == CHECKSUM_COMPLETE) { + vsum = csum_partial(va + ETH_HLEN, VLAN_HLEN, 0); + skb->csum = csum_sub(skb->csum, vsum); + } + /* pop tag */ + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(veh->h_vlan_TCI)); + memmove(va + VLAN_HLEN, va, 2 * ETH_ALEN); + skb->len -= VLAN_HLEN; + skb->data_len -= VLAN_HLEN; + frag = skb_shinfo(skb)->frags; + frag->page_offset += VLAN_HLEN; + skb_frag_size_set(frag, skb_frag_size(frag) - VLAN_HLEN); + } +} + +#define MYRI10GE_HLEN 64 /* Bytes to copy from page to skb linear memory */ + +static inline int +myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) +{ + struct myri10ge_priv *mgp = ss->mgp; + struct sk_buff *skb; + struct skb_frag_struct *rx_frags; + struct myri10ge_rx_buf *rx; + int i, idx, remainder, bytes; + struct pci_dev *pdev = mgp->pdev; + struct net_device *dev = mgp->dev; + u8 *va; + bool polling; + + if (len <= mgp->small_bytes) { + rx = &ss->rx_small; + bytes = mgp->small_bytes; + } else { + rx = &ss->rx_big; + bytes = mgp->big_bytes; + } + + len += MXGEFW_PAD; + idx = rx->cnt & rx->mask; + va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; + prefetch(va); + + /* When busy polling in user context, allocate skb and copy headers to + * skb's linear memory ourselves. When not busy polling, use the napi + * gro api. + */ + polling = myri10ge_ss_busy_polling(ss); + if (polling) + skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16); + else + skb = napi_get_frags(&ss->napi); + if (unlikely(skb == NULL)) { + ss->stats.rx_dropped++; + for (i = 0, remainder = len; remainder > 0; i++) { + myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes); + put_page(rx->info[idx].page); + rx->cnt++; + idx = rx->cnt & rx->mask; + remainder -= MYRI10GE_ALLOC_SIZE; + } + return 0; + } + rx_frags = skb_shinfo(skb)->frags; + /* Fill skb_frag_struct(s) with data from our receive */ + for (i = 0, remainder = len; remainder > 0; i++) { + myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes); + skb_fill_page_desc(skb, i, rx->info[idx].page, + rx->info[idx].page_offset, + remainder < MYRI10GE_ALLOC_SIZE ? + remainder : MYRI10GE_ALLOC_SIZE); + rx->cnt++; + idx = rx->cnt & rx->mask; + remainder -= MYRI10GE_ALLOC_SIZE; + } + + /* remove padding */ + rx_frags[0].page_offset += MXGEFW_PAD; + rx_frags[0].size -= MXGEFW_PAD; + len -= MXGEFW_PAD; + + skb->len = len; + skb->data_len = len; + skb->truesize += len; + if (dev->features & NETIF_F_RXCSUM) { + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = csum; + } + myri10ge_vlan_rx(mgp->dev, va, skb); + skb_record_rx_queue(skb, ss - &mgp->ss[0]); + skb_mark_napi_id(skb, &ss->napi); + + if (polling) { + int hlen; + + /* myri10ge_vlan_rx might have moved the header, so compute + * length and address again. + */ + hlen = MYRI10GE_HLEN > skb->len ? skb->len : MYRI10GE_HLEN; + va = page_address(skb_frag_page(&rx_frags[0])) + + rx_frags[0].page_offset; + /* Copy header into the skb linear memory */ + skb_copy_to_linear_data(skb, va, hlen); + rx_frags[0].page_offset += hlen; + rx_frags[0].size -= hlen; + skb->data_len -= hlen; + skb->tail += hlen; + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); + } + else + napi_gro_frags(&ss->napi); + + return 1; +} + +static inline void +myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) +{ + struct pci_dev *pdev = ss->mgp->pdev; + struct myri10ge_tx_buf *tx = &ss->tx; + struct netdev_queue *dev_queue; + struct sk_buff *skb; + int idx, len; + + while (tx->pkt_done != mcp_index) { + idx = tx->done & tx->mask; + skb = tx->info[idx].skb; + + /* Mark as free */ + tx->info[idx].skb = NULL; + if (tx->info[idx].last) { + tx->pkt_done++; + tx->info[idx].last = 0; + } + tx->done++; + len = dma_unmap_len(&tx->info[idx], len); + dma_unmap_len_set(&tx->info[idx], len, 0); + if (skb) { + ss->stats.tx_bytes += skb->len; + ss->stats.tx_packets++; + dev_kfree_skb_irq(skb); + if (len) + pci_unmap_single(pdev, + dma_unmap_addr(&tx->info[idx], + bus), len, + PCI_DMA_TODEVICE); + } else { + if (len) + pci_unmap_page(pdev, + dma_unmap_addr(&tx->info[idx], + bus), len, + PCI_DMA_TODEVICE); + } + } + + dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss); + /* + * Make a minimal effort to prevent the NIC from polling an + * idle tx queue. If we can't get the lock we leave the queue + * active. In this case, either a thread was about to start + * using the queue anyway, or we lost a race and the NIC will + * waste some of its resources polling an inactive queue for a + * while. + */ + + if ((ss->mgp->dev->real_num_tx_queues > 1) && + __netif_tx_trylock(dev_queue)) { + if (tx->req == tx->done) { + tx->queue_active = 0; + put_be32(htonl(1), tx->send_stop); + mb(); + mmiowb(); + } + __netif_tx_unlock(dev_queue); + } + + /* start the queue if we've stopped it */ + if (netif_tx_queue_stopped(dev_queue) && + tx->req - tx->done < (tx->mask >> 1) && + ss->mgp->running == MYRI10GE_ETH_RUNNING) { + tx->wake_queue++; + netif_tx_wake_queue(dev_queue); + } +} + +static inline int +myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) +{ + struct myri10ge_rx_done *rx_done = &ss->rx_done; + struct myri10ge_priv *mgp = ss->mgp; + unsigned long rx_bytes = 0; + unsigned long rx_packets = 0; + unsigned long rx_ok; + int idx = rx_done->idx; + int cnt = rx_done->cnt; + int work_done = 0; + u16 length; + __wsum checksum; + + while (rx_done->entry[idx].length != 0 && work_done < budget) { + length = ntohs(rx_done->entry[idx].length); + rx_done->entry[idx].length = 0; + checksum = csum_unfold(rx_done->entry[idx].checksum); + rx_ok = myri10ge_rx_done(ss, length, checksum); + rx_packets += rx_ok; + rx_bytes += rx_ok * (unsigned long)length; + cnt++; + idx = cnt & (mgp->max_intr_slots - 1); + work_done++; + } + rx_done->idx = idx; + rx_done->cnt = cnt; + ss->stats.rx_packets += rx_packets; + ss->stats.rx_bytes += rx_bytes; + + /* restock receive rings if needed */ + if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh) + myri10ge_alloc_rx_pages(mgp, &ss->rx_small, + mgp->small_bytes + MXGEFW_PAD, 0); + if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh) + myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); + + return work_done; +} + +static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) +{ + struct mcp_irq_data *stats = mgp->ss[0].fw_stats; + + if (unlikely(stats->stats_updated)) { + unsigned link_up = ntohl(stats->link_up); + if (mgp->link_state != link_up) { + mgp->link_state = link_up; + + if (mgp->link_state == MXGEFW_LINK_UP) { + netif_info(mgp, link, mgp->dev, "link up\n"); + netif_carrier_on(mgp->dev); + mgp->link_changes++; + } else { + netif_info(mgp, link, mgp->dev, "link %s\n", + (link_up == MXGEFW_LINK_MYRINET ? + "mismatch (Myrinet detected)" : + "down")); + netif_carrier_off(mgp->dev); + mgp->link_changes++; + } + } + if (mgp->rdma_tags_available != + ntohl(stats->rdma_tags_available)) { + mgp->rdma_tags_available = + ntohl(stats->rdma_tags_available); + netdev_warn(mgp->dev, "RDMA timed out! %d tags left\n", + mgp->rdma_tags_available); + } + mgp->down_cnt += stats->link_down; + if (stats->link_down) + wake_up(&mgp->down_wq); + } +} + +static int myri10ge_poll(struct napi_struct *napi, int budget) +{ + struct myri10ge_slice_state *ss = + container_of(napi, struct myri10ge_slice_state, napi); + int work_done; + +#ifdef CONFIG_MYRI10GE_DCA + if (ss->mgp->dca_enabled) + myri10ge_update_dca(ss); +#endif + /* Try later if the busy_poll handler is running. */ + if (!myri10ge_ss_lock_napi(ss)) + return budget; + + /* process as many rx events as NAPI will allow */ + work_done = myri10ge_clean_rx_done(ss, budget); + + myri10ge_ss_unlock_napi(ss); + if (work_done < budget) { + napi_complete(napi); + put_be32(htonl(3), ss->irq_claim); + } + return work_done; +} + +#ifdef CONFIG_NET_RX_BUSY_POLL +static int myri10ge_busy_poll(struct napi_struct *napi) +{ + struct myri10ge_slice_state *ss = + container_of(napi, struct myri10ge_slice_state, napi); + struct myri10ge_priv *mgp = ss->mgp; + int work_done; + + /* Poll only when the link is up */ + if (mgp->link_state != MXGEFW_LINK_UP) + return LL_FLUSH_FAILED; + + if (!myri10ge_ss_lock_poll(ss)) + return LL_FLUSH_BUSY; + + /* Process a small number of packets */ + work_done = myri10ge_clean_rx_done(ss, 4); + if (work_done) + ss->busy_poll_cnt += work_done; + else + ss->busy_poll_miss++; + + myri10ge_ss_unlock_poll(ss); + + return work_done; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +static irqreturn_t myri10ge_intr(int irq, void *arg) +{ + struct myri10ge_slice_state *ss = arg; + struct myri10ge_priv *mgp = ss->mgp; + struct mcp_irq_data *stats = ss->fw_stats; + struct myri10ge_tx_buf *tx = &ss->tx; + u32 send_done_count; + int i; + + /* an interrupt on a non-zero receive-only slice is implicitly + * valid since MSI-X irqs are not shared */ + if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) { + napi_schedule(&ss->napi); + return IRQ_HANDLED; + } + + /* make sure it is our IRQ, and that the DMA has finished */ + if (unlikely(!stats->valid)) + return IRQ_NONE; + + /* low bit indicates receives are present, so schedule + * napi poll handler */ + if (stats->valid & 1) + napi_schedule(&ss->napi); + + if (!mgp->msi_enabled && !mgp->msix_enabled) { + put_be32(0, mgp->irq_deassert); + if (!myri10ge_deassert_wait) + stats->valid = 0; + mb(); + } else + stats->valid = 0; + + /* Wait for IRQ line to go low, if using INTx */ + i = 0; + while (1) { + i++; + /* check for transmit completes and receives */ + send_done_count = ntohl(stats->send_done_count); + if (send_done_count != tx->pkt_done) + myri10ge_tx_done(ss, (int)send_done_count); + if (unlikely(i > myri10ge_max_irq_loops)) { + netdev_warn(mgp->dev, "irq stuck?\n"); + stats->valid = 0; + schedule_work(&mgp->watchdog_work); + } + if (likely(stats->valid == 0)) + break; + cpu_relax(); + barrier(); + } + + /* Only slice 0 updates stats */ + if (ss == mgp->ss) + myri10ge_check_statblock(mgp); + + put_be32(htonl(3), ss->irq_claim + 1); + return IRQ_HANDLED; +} + +static int +myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +{ + struct myri10ge_priv *mgp = netdev_priv(netdev); + char *ptr; + int i; + + cmd->autoneg = AUTONEG_DISABLE; + ethtool_cmd_speed_set(cmd, SPEED_10000); + cmd->duplex = DUPLEX_FULL; + + /* + * parse the product code to deterimine the interface type + * (CX4, XFP, Quad Ribbon Fiber) by looking at the character + * after the 3rd dash in the driver's cached copy of the + * EEPROM's product code string. + */ + ptr = mgp->product_code_string; + if (ptr == NULL) { + netdev_err(netdev, "Missing product code\n"); + return 0; + } + for (i = 0; i < 3; i++, ptr++) { + ptr = strchr(ptr, '-'); + if (ptr == NULL) { + netdev_err(netdev, "Invalid product code %s\n", + mgp->product_code_string); + return 0; + } + } + if (*ptr == '2') + ptr++; + if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') { + /* We've found either an XFP, quad ribbon fiber, or SFP+ */ + cmd->port = PORT_FIBRE; + cmd->supported |= SUPPORTED_FIBRE; + cmd->advertising |= ADVERTISED_FIBRE; + } else { + cmd->port = PORT_OTHER; + } + if (*ptr == 'R' || *ptr == 'S') + cmd->transceiver = XCVR_EXTERNAL; + else + cmd->transceiver = XCVR_INTERNAL; + + return 0; +} + +static void +myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) +{ + struct myri10ge_priv *mgp = netdev_priv(netdev); + + strlcpy(info->driver, "myri10ge", sizeof(info->driver)); + strlcpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version)); + strlcpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version)); + strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info)); +} + +static int +myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) +{ + struct myri10ge_priv *mgp = netdev_priv(netdev); + + coal->rx_coalesce_usecs = mgp->intr_coal_delay; + return 0; +} + +static int +myri10ge_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) +{ + struct myri10ge_priv *mgp = netdev_priv(netdev); + + mgp->intr_coal_delay = coal->rx_coalesce_usecs; + put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); + return 0; +} + +static void +myri10ge_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct myri10ge_priv *mgp = netdev_priv(netdev); + + pause->autoneg = 0; + pause->rx_pause = mgp->pause; + pause->tx_pause = mgp->pause; +} + +static int +myri10ge_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct myri10ge_priv *mgp = netdev_priv(netdev); + + if (pause->tx_pause != mgp->pause) + return myri10ge_change_pause(mgp, pause->tx_pause); + if (pause->rx_pause != mgp->pause) + return myri10ge_change_pause(mgp, pause->rx_pause); + if (pause->autoneg != 0) + return -EINVAL; + return 0; +} + +static void +myri10ge_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct myri10ge_priv *mgp = netdev_priv(netdev); + + ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1; + ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1; + ring->rx_jumbo_max_pending = 0; + ring->tx_max_pending = mgp->ss[0].tx.mask + 1; + ring->rx_mini_pending = ring->rx_mini_max_pending; + ring->rx_pending = ring->rx_max_pending; + ring->rx_jumbo_pending = ring->rx_jumbo_max_pending; + ring->tx_pending = ring->tx_max_pending; +} + +static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = { + "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", + "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", + "rx_length_errors", "rx_over_errors", "rx_crc_errors", + "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors", + "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", + "tx_heartbeat_errors", "tx_window_errors", + /* device-specific stats */ + "tx_boundary", "irq", "MSI", "MSIX", + "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", + "serial_number", "watchdog_resets", +#ifdef CONFIG_MYRI10GE_DCA + "dca_capable_firmware", "dca_device_present", +#endif + "link_changes", "link_up", "dropped_link_overflow", + "dropped_link_error_or_filtered", + "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32", + "dropped_unicast_filtered", "dropped_multicast_filtered", + "dropped_runt", "dropped_overrun", "dropped_no_small_buffer", + "dropped_no_big_buffer" +}; + +static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = { + "----------- slice ---------", + "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done", + "rx_small_cnt", "rx_big_cnt", + "wake_queue", "stop_queue", "tx_linearized", +#ifdef CONFIG_NET_RX_BUSY_POLL + "rx_lock_napi_yield", "rx_lock_poll_yield", "rx_busy_poll_miss", + "rx_busy_poll_cnt", +#endif +}; + +#define MYRI10GE_NET_STATS_LEN 21 +#define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats) +#define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats) + +static void +myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data) +{ + struct myri10ge_priv *mgp = netdev_priv(netdev); + int i; + + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, *myri10ge_gstrings_main_stats, + sizeof(myri10ge_gstrings_main_stats)); + data += sizeof(myri10ge_gstrings_main_stats); + for (i = 0; i < mgp->num_slices; i++) { + memcpy(data, *myri10ge_gstrings_slice_stats, + sizeof(myri10ge_gstrings_slice_stats)); + data += sizeof(myri10ge_gstrings_slice_stats); + } + break; + } +} + +static int myri10ge_get_sset_count(struct net_device *netdev, int sset) +{ + struct myri10ge_priv *mgp = netdev_priv(netdev); + + switch (sset) { + case ETH_SS_STATS: + return MYRI10GE_MAIN_STATS_LEN + + mgp->num_slices * MYRI10GE_SLICE_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void +myri10ge_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 * data) +{ + struct myri10ge_priv *mgp = netdev_priv(netdev); + struct myri10ge_slice_state *ss; + struct rtnl_link_stats64 link_stats; + int slice; + int i; + + /* force stats update */ + memset(&link_stats, 0, sizeof(link_stats)); + (void)myri10ge_get_stats(netdev, &link_stats); + for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) + data[i] = ((u64 *)&link_stats)[i]; + + data[i++] = (unsigned int)mgp->tx_boundary; + data[i++] = (unsigned int)mgp->pdev->irq; + data[i++] = (unsigned int)mgp->msi_enabled; + data[i++] = (unsigned int)mgp->msix_enabled; + data[i++] = (unsigned int)mgp->read_dma; + data[i++] = (unsigned int)mgp->write_dma; + data[i++] = (unsigned int)mgp->read_write_dma; + data[i++] = (unsigned int)mgp->serial_number; + data[i++] = (unsigned int)mgp->watchdog_resets; +#ifdef CONFIG_MYRI10GE_DCA + data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL); + data[i++] = (unsigned int)(mgp->dca_enabled); +#endif + data[i++] = (unsigned int)mgp->link_changes; + + /* firmware stats are useful only in the first slice */ + ss = &mgp->ss[0]; + data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow); + data[i++] = + (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered); + data[i++] = + (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer); + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer); + + for (slice = 0; slice < mgp->num_slices; slice++) { + ss = &mgp->ss[slice]; + data[i++] = slice; + data[i++] = (unsigned int)ss->tx.pkt_start; + data[i++] = (unsigned int)ss->tx.pkt_done; + data[i++] = (unsigned int)ss->tx.req; + data[i++] = (unsigned int)ss->tx.done; + data[i++] = (unsigned int)ss->rx_small.cnt; + data[i++] = (unsigned int)ss->rx_big.cnt; + data[i++] = (unsigned int)ss->tx.wake_queue; + data[i++] = (unsigned int)ss->tx.stop_queue; + data[i++] = (unsigned int)ss->tx.linearized; +#ifdef CONFIG_NET_RX_BUSY_POLL + data[i++] = ss->lock_napi_yield; + data[i++] = ss->lock_poll_yield; + data[i++] = ss->busy_poll_miss; + data[i++] = ss->busy_poll_cnt; +#endif + } +} + +static void myri10ge_set_msglevel(struct net_device *netdev, u32 value) +{ + struct myri10ge_priv *mgp = netdev_priv(netdev); + mgp->msg_enable = value; +} + +static u32 myri10ge_get_msglevel(struct net_device *netdev) +{ + struct myri10ge_priv *mgp = netdev_priv(netdev); + return mgp->msg_enable; +} + +/* + * Use a low-level command to change the LED behavior. Rather than + * blinking (which is the normal case), when identify is used, the + * yellow LED turns solid. + */ +static int myri10ge_led(struct myri10ge_priv *mgp, int on) +{ + struct mcp_gen_header *hdr; + struct device *dev = &mgp->pdev->dev; + size_t hdr_off, pattern_off, hdr_len; + u32 pattern = 0xfffffffe; + + /* find running firmware header */ + hdr_off = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET)); + if ((hdr_off & 3) || hdr_off + sizeof(*hdr) > mgp->sram_size) { + dev_err(dev, "Running firmware has bad header offset (%d)\n", + (int)hdr_off); + return -EIO; + } + hdr_len = swab32(readl(mgp->sram + hdr_off + + offsetof(struct mcp_gen_header, header_length))); + pattern_off = hdr_off + offsetof(struct mcp_gen_header, led_pattern); + if (pattern_off >= (hdr_len + hdr_off)) { + dev_info(dev, "Firmware does not support LED identification\n"); + return -EINVAL; + } + if (!on) + pattern = swab32(readl(mgp->sram + pattern_off + 4)); + writel(swab32(pattern), mgp->sram + pattern_off); + return 0; +} + +static int +myri10ge_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + struct myri10ge_priv *mgp = netdev_priv(netdev); + int rc; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + rc = myri10ge_led(mgp, 1); + break; + + case ETHTOOL_ID_INACTIVE: + rc = myri10ge_led(mgp, 0); + break; + + default: + rc = -EINVAL; + } + + return rc; +} + +static const struct ethtool_ops myri10ge_ethtool_ops = { + .get_settings = myri10ge_get_settings, + .get_drvinfo = myri10ge_get_drvinfo, + .get_coalesce = myri10ge_get_coalesce, + .set_coalesce = myri10ge_set_coalesce, + .get_pauseparam = myri10ge_get_pauseparam, + .set_pauseparam = myri10ge_set_pauseparam, + .get_ringparam = myri10ge_get_ringparam, + .get_link = ethtool_op_get_link, + .get_strings = myri10ge_get_strings, + .get_sset_count = myri10ge_get_sset_count, + .get_ethtool_stats = myri10ge_get_ethtool_stats, + .set_msglevel = myri10ge_set_msglevel, + .get_msglevel = myri10ge_get_msglevel, + .set_phys_id = myri10ge_phys_id, +}; + +static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss) +{ + struct myri10ge_priv *mgp = ss->mgp; + struct myri10ge_cmd cmd; + struct net_device *dev = mgp->dev; + int tx_ring_size, rx_ring_size; + int tx_ring_entries, rx_ring_entries; + int i, slice, status; + size_t bytes; + + /* get ring sizes */ + slice = ss - mgp->ss; + cmd.data0 = slice; + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0); + tx_ring_size = cmd.data0; + cmd.data0 = slice; + status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); + if (status != 0) + return status; + rx_ring_size = cmd.data0; + + tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send); + rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr); + ss->tx.mask = tx_ring_entries - 1; + ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1; + + status = -ENOMEM; + + /* allocate the host shadow rings */ + + bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4) + * sizeof(*ss->tx.req_list); + ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); + if (ss->tx.req_bytes == NULL) + goto abort_with_nothing; + + /* ensure req_list entries are aligned to 8 bytes */ + ss->tx.req_list = (struct mcp_kreq_ether_send *) + ALIGN((unsigned long)ss->tx.req_bytes, 8); + ss->tx.queue_active = 0; + + bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow); + ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); + if (ss->rx_small.shadow == NULL) + goto abort_with_tx_req_bytes; + + bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow); + ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); + if (ss->rx_big.shadow == NULL) + goto abort_with_rx_small_shadow; + + /* allocate the host info rings */ + + bytes = tx_ring_entries * sizeof(*ss->tx.info); + ss->tx.info = kzalloc(bytes, GFP_KERNEL); + if (ss->tx.info == NULL) + goto abort_with_rx_big_shadow; + + bytes = rx_ring_entries * sizeof(*ss->rx_small.info); + ss->rx_small.info = kzalloc(bytes, GFP_KERNEL); + if (ss->rx_small.info == NULL) + goto abort_with_tx_info; + + bytes = rx_ring_entries * sizeof(*ss->rx_big.info); + ss->rx_big.info = kzalloc(bytes, GFP_KERNEL); + if (ss->rx_big.info == NULL) + goto abort_with_rx_small_info; + + /* Fill the receive rings */ + ss->rx_big.cnt = 0; + ss->rx_small.cnt = 0; + ss->rx_big.fill_cnt = 0; + ss->rx_small.fill_cnt = 0; + ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; + ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; + ss->rx_small.watchdog_needed = 0; + ss->rx_big.watchdog_needed = 0; + if (mgp->small_bytes == 0) { + ss->rx_small.fill_cnt = ss->rx_small.mask + 1; + } else { + myri10ge_alloc_rx_pages(mgp, &ss->rx_small, + mgp->small_bytes + MXGEFW_PAD, 0); + } + + if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) { + netdev_err(dev, "slice-%d: alloced only %d small bufs\n", + slice, ss->rx_small.fill_cnt); + goto abort_with_rx_small_ring; + } + + myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); + if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) { + netdev_err(dev, "slice-%d: alloced only %d big bufs\n", + slice, ss->rx_big.fill_cnt); + goto abort_with_rx_big_ring; + } + + return 0; + +abort_with_rx_big_ring: + for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { + int idx = i & ss->rx_big.mask; + myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], + mgp->big_bytes); + put_page(ss->rx_big.info[idx].page); + } + +abort_with_rx_small_ring: + if (mgp->small_bytes == 0) + ss->rx_small.fill_cnt = ss->rx_small.cnt; + for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { + int idx = i & ss->rx_small.mask; + myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], + mgp->small_bytes + MXGEFW_PAD); + put_page(ss->rx_small.info[idx].page); + } + + kfree(ss->rx_big.info); + +abort_with_rx_small_info: + kfree(ss->rx_small.info); + +abort_with_tx_info: + kfree(ss->tx.info); + +abort_with_rx_big_shadow: + kfree(ss->rx_big.shadow); + +abort_with_rx_small_shadow: + kfree(ss->rx_small.shadow); + +abort_with_tx_req_bytes: + kfree(ss->tx.req_bytes); + ss->tx.req_bytes = NULL; + ss->tx.req_list = NULL; + +abort_with_nothing: + return status; +} + +static void myri10ge_free_rings(struct myri10ge_slice_state *ss) +{ + struct myri10ge_priv *mgp = ss->mgp; + struct sk_buff *skb; + struct myri10ge_tx_buf *tx; + int i, len, idx; + + /* If not allocated, skip it */ + if (ss->tx.req_list == NULL) + return; + + for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { + idx = i & ss->rx_big.mask; + if (i == ss->rx_big.fill_cnt - 1) + ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; + myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], + mgp->big_bytes); + put_page(ss->rx_big.info[idx].page); + } + + if (mgp->small_bytes == 0) + ss->rx_small.fill_cnt = ss->rx_small.cnt; + for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { + idx = i & ss->rx_small.mask; + if (i == ss->rx_small.fill_cnt - 1) + ss->rx_small.info[idx].page_offset = + MYRI10GE_ALLOC_SIZE; + myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], + mgp->small_bytes + MXGEFW_PAD); + put_page(ss->rx_small.info[idx].page); + } + tx = &ss->tx; + while (tx->done != tx->req) { + idx = tx->done & tx->mask; + skb = tx->info[idx].skb; + + /* Mark as free */ + tx->info[idx].skb = NULL; + tx->done++; + len = dma_unmap_len(&tx->info[idx], len); + dma_unmap_len_set(&tx->info[idx], len, 0); + if (skb) { + ss->stats.tx_dropped++; + dev_kfree_skb_any(skb); + if (len) + pci_unmap_single(mgp->pdev, + dma_unmap_addr(&tx->info[idx], + bus), len, + PCI_DMA_TODEVICE); + } else { + if (len) + pci_unmap_page(mgp->pdev, + dma_unmap_addr(&tx->info[idx], + bus), len, + PCI_DMA_TODEVICE); + } + } + kfree(ss->rx_big.info); + + kfree(ss->rx_small.info); + + kfree(ss->tx.info); + + kfree(ss->rx_big.shadow); + + kfree(ss->rx_small.shadow); + + kfree(ss->tx.req_bytes); + ss->tx.req_bytes = NULL; + ss->tx.req_list = NULL; +} + +static int myri10ge_request_irq(struct myri10ge_priv *mgp) +{ + struct pci_dev *pdev = mgp->pdev; + struct myri10ge_slice_state *ss; + struct net_device *netdev = mgp->dev; + int i; + int status; + + mgp->msi_enabled = 0; + mgp->msix_enabled = 0; + status = 0; + if (myri10ge_msi) { + if (mgp->num_slices > 1) { + status = pci_enable_msix_range(pdev, mgp->msix_vectors, + mgp->num_slices, mgp->num_slices); + if (status < 0) { + dev_err(&pdev->dev, + "Error %d setting up MSI-X\n", status); + return status; + } + mgp->msix_enabled = 1; + } + if (mgp->msix_enabled == 0) { + status = pci_enable_msi(pdev); + if (status != 0) { + dev_err(&pdev->dev, + "Error %d setting up MSI; falling back to xPIC\n", + status); + } else { + mgp->msi_enabled = 1; + } + } + } + if (mgp->msix_enabled) { + for (i = 0; i < mgp->num_slices; i++) { + ss = &mgp->ss[i]; + snprintf(ss->irq_desc, sizeof(ss->irq_desc), + "%s:slice-%d", netdev->name, i); + status = request_irq(mgp->msix_vectors[i].vector, + myri10ge_intr, 0, ss->irq_desc, + ss); + if (status != 0) { + dev_err(&pdev->dev, + "slice %d failed to allocate IRQ\n", i); + i--; + while (i >= 0) { + free_irq(mgp->msix_vectors[i].vector, + &mgp->ss[i]); + i--; + } + pci_disable_msix(pdev); + return status; + } + } + } else { + status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED, + mgp->dev->name, &mgp->ss[0]); + if (status != 0) { + dev_err(&pdev->dev, "failed to allocate IRQ\n"); + if (mgp->msi_enabled) + pci_disable_msi(pdev); + } + } + return status; +} + +static void myri10ge_free_irq(struct myri10ge_priv *mgp) +{ + struct pci_dev *pdev = mgp->pdev; + int i; + + if (mgp->msix_enabled) { + for (i = 0; i < mgp->num_slices; i++) + free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]); + } else { + free_irq(pdev->irq, &mgp->ss[0]); + } + if (mgp->msi_enabled) + pci_disable_msi(pdev); + if (mgp->msix_enabled) + pci_disable_msix(pdev); +} + +static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice) +{ + struct myri10ge_cmd cmd; + struct myri10ge_slice_state *ss; + int status; + + ss = &mgp->ss[slice]; + status = 0; + if (slice == 0 || (mgp->dev->real_num_tx_queues > 1)) { + cmd.data0 = slice; + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, + &cmd, 0); + ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *) + (mgp->sram + cmd.data0); + } + cmd.data0 = slice; + status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, + &cmd, 0); + ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *) + (mgp->sram + cmd.data0); + + cmd.data0 = slice; + status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0); + ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *) + (mgp->sram + cmd.data0); + + ss->tx.send_go = (__iomem __be32 *) + (mgp->sram + MXGEFW_ETH_SEND_GO + 64 * slice); + ss->tx.send_stop = (__iomem __be32 *) + (mgp->sram + MXGEFW_ETH_SEND_STOP + 64 * slice); + return status; + +} + +static int myri10ge_set_stats(struct myri10ge_priv *mgp, int slice) +{ + struct myri10ge_cmd cmd; + struct myri10ge_slice_state *ss; + int status; + + ss = &mgp->ss[slice]; + cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus); + cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus); + cmd.data2 = sizeof(struct mcp_irq_data) | (slice << 16); + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0); + if (status == -ENOSYS) { + dma_addr_t bus = ss->fw_stats_bus; + if (slice != 0) + return -EINVAL; + bus += offsetof(struct mcp_irq_data, send_done_count); + cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus); + cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus); + status = myri10ge_send_cmd(mgp, + MXGEFW_CMD_SET_STATS_DMA_OBSOLETE, + &cmd, 0); + /* Firmware cannot support multicast without STATS_DMA_V2 */ + mgp->fw_multicast_support = 0; + } else { + mgp->fw_multicast_support = 1; + } + return 0; +} + +static int myri10ge_open(struct net_device *dev) +{ + struct myri10ge_slice_state *ss; + struct myri10ge_priv *mgp = netdev_priv(dev); + struct myri10ge_cmd cmd; + int i, status, big_pow2, slice; + u8 __iomem *itable; + + if (mgp->running != MYRI10GE_ETH_STOPPED) + return -EBUSY; + + mgp->running = MYRI10GE_ETH_STARTING; + status = myri10ge_reset(mgp); + if (status != 0) { + netdev_err(dev, "failed reset\n"); + goto abort_with_nothing; + } + + if (mgp->num_slices > 1) { + cmd.data0 = mgp->num_slices; + cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE; + if (mgp->dev->real_num_tx_queues > 1) + cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES; + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES, + &cmd, 0); + if (status != 0) { + netdev_err(dev, "failed to set number of slices\n"); + goto abort_with_nothing; + } + /* setup the indirection table */ + cmd.data0 = mgp->num_slices; + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_TABLE_SIZE, + &cmd, 0); + + status |= myri10ge_send_cmd(mgp, + MXGEFW_CMD_GET_RSS_TABLE_OFFSET, + &cmd, 0); + if (status != 0) { + netdev_err(dev, "failed to setup rss tables\n"); + goto abort_with_nothing; + } + + /* just enable an identity mapping */ + itable = mgp->sram + cmd.data0; + for (i = 0; i < mgp->num_slices; i++) + __raw_writeb(i, &itable[i]); + + cmd.data0 = 1; + cmd.data1 = myri10ge_rss_hash; + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE, + &cmd, 0); + if (status != 0) { + netdev_err(dev, "failed to enable slices\n"); + goto abort_with_nothing; + } + } + + status = myri10ge_request_irq(mgp); + if (status != 0) + goto abort_with_nothing; + + /* decide what small buffer size to use. For good TCP rx + * performance, it is important to not receive 1514 byte + * frames into jumbo buffers, as it confuses the socket buffer + * accounting code, leading to drops and erratic performance. + */ + + if (dev->mtu <= ETH_DATA_LEN) + /* enough for a TCP header */ + mgp->small_bytes = (128 > SMP_CACHE_BYTES) + ? (128 - MXGEFW_PAD) + : (SMP_CACHE_BYTES - MXGEFW_PAD); + else + /* enough for a vlan encapsulated ETH_DATA_LEN frame */ + mgp->small_bytes = VLAN_ETH_FRAME_LEN; + + /* Override the small buffer size? */ + if (myri10ge_small_bytes >= 0) + mgp->small_bytes = myri10ge_small_bytes; + + /* Firmware needs the big buff size as a power of 2. Lie and + * tell him the buffer is larger, because we only use 1 + * buffer/pkt, and the mtu will prevent overruns. + */ + big_pow2 = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD; + if (big_pow2 < MYRI10GE_ALLOC_SIZE / 2) { + while (!is_power_of_2(big_pow2)) + big_pow2++; + mgp->big_bytes = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD; + } else { + big_pow2 = MYRI10GE_ALLOC_SIZE; + mgp->big_bytes = big_pow2; + } + + /* setup the per-slice data structures */ + for (slice = 0; slice < mgp->num_slices; slice++) { + ss = &mgp->ss[slice]; + + status = myri10ge_get_txrx(mgp, slice); + if (status != 0) { + netdev_err(dev, "failed to get ring sizes or locations\n"); + goto abort_with_rings; + } + status = myri10ge_allocate_rings(ss); + if (status != 0) + goto abort_with_rings; + + /* only firmware which supports multiple TX queues + * supports setting up the tx stats on non-zero + * slices */ + if (slice == 0 || mgp->dev->real_num_tx_queues > 1) + status = myri10ge_set_stats(mgp, slice); + if (status) { + netdev_err(dev, "Couldn't set stats DMA\n"); + goto abort_with_rings; + } + + /* Initialize the slice spinlock and state used for polling */ + myri10ge_ss_init_lock(ss); + + /* must happen prior to any irq */ + napi_enable(&(ss)->napi); + } + + /* now give firmware buffers sizes, and MTU */ + cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN; + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_MTU, &cmd, 0); + cmd.data0 = mgp->small_bytes; + status |= + myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd, 0); + cmd.data0 = big_pow2; + status |= + myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0); + if (status) { + netdev_err(dev, "Couldn't set buffer sizes\n"); + goto abort_with_rings; + } + + /* + * Set Linux style TSO mode; this is needed only on newer + * firmware versions. Older versions default to Linux + * style TSO + */ + cmd.data0 = 0; + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd, 0); + if (status && status != -ENOSYS) { + netdev_err(dev, "Couldn't set TSO mode\n"); + goto abort_with_rings; + } + + mgp->link_state = ~0U; + mgp->rdma_tags_available = 15; + + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); + if (status) { + netdev_err(dev, "Couldn't bring up link\n"); + goto abort_with_rings; + } + + mgp->running = MYRI10GE_ETH_RUNNING; + mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ; + add_timer(&mgp->watchdog_timer); + netif_tx_wake_all_queues(dev); + + return 0; + +abort_with_rings: + while (slice) { + slice--; + napi_disable(&mgp->ss[slice].napi); + } + for (i = 0; i < mgp->num_slices; i++) + myri10ge_free_rings(&mgp->ss[i]); + + myri10ge_free_irq(mgp); + +abort_with_nothing: + mgp->running = MYRI10GE_ETH_STOPPED; + return -ENOMEM; +} + +static int myri10ge_close(struct net_device *dev) +{ + struct myri10ge_priv *mgp = netdev_priv(dev); + struct myri10ge_cmd cmd; + int status, old_down_cnt; + int i; + + if (mgp->running != MYRI10GE_ETH_RUNNING) + return 0; + + if (mgp->ss[0].tx.req_bytes == NULL) + return 0; + + del_timer_sync(&mgp->watchdog_timer); + mgp->running = MYRI10GE_ETH_STOPPING; + local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */ + for (i = 0; i < mgp->num_slices; i++) { + napi_disable(&mgp->ss[i].napi); + /* Lock the slice to prevent the busy_poll handler from + * accessing it. Later when we bring the NIC up, myri10ge_open + * resets the slice including this lock. + */ + while (!myri10ge_ss_lock_napi(&mgp->ss[i])) { + pr_info("Slice %d locked\n", i); + mdelay(1); + } + } + local_bh_enable(); + netif_carrier_off(dev); + + netif_tx_stop_all_queues(dev); + if (mgp->rebooted == 0) { + old_down_cnt = mgp->down_cnt; + mb(); + status = + myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0); + if (status) + netdev_err(dev, "Couldn't bring down link\n"); + + wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt, + HZ); + if (old_down_cnt == mgp->down_cnt) + netdev_err(dev, "never got down irq\n"); + } + netif_tx_disable(dev); + myri10ge_free_irq(mgp); + for (i = 0; i < mgp->num_slices; i++) + myri10ge_free_rings(&mgp->ss[i]); + + mgp->running = MYRI10GE_ETH_STOPPED; + return 0; +} + +/* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy + * backwards one at a time and handle ring wraps */ + +static inline void +myri10ge_submit_req_backwards(struct myri10ge_tx_buf *tx, + struct mcp_kreq_ether_send *src, int cnt) +{ + int idx, starting_slot; + starting_slot = tx->req; + while (cnt > 1) { + cnt--; + idx = (starting_slot + cnt) & tx->mask; + myri10ge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src)); + mb(); + } +} + +/* + * copy an array of struct mcp_kreq_ether_send's to the mcp. Copy + * at most 32 bytes at a time, so as to avoid involving the software + * pio handler in the nic. We re-write the first segment's flags + * to mark them valid only after writing the entire chain. + */ + +static inline void +myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src, + int cnt) +{ + int idx, i; + struct mcp_kreq_ether_send __iomem *dstp, *dst; + struct mcp_kreq_ether_send *srcp; + u8 last_flags; + + idx = tx->req & tx->mask; + + last_flags = src->flags; + src->flags = 0; + mb(); + dst = dstp = &tx->lanai[idx]; + srcp = src; + + if ((idx + cnt) < tx->mask) { + for (i = 0; i < (cnt - 1); i += 2) { + myri10ge_pio_copy(dstp, srcp, 2 * sizeof(*src)); + mb(); /* force write every 32 bytes */ + srcp += 2; + dstp += 2; + } + } else { + /* submit all but the first request, and ensure + * that it is submitted below */ + myri10ge_submit_req_backwards(tx, src, cnt); + i = 0; + } + if (i < cnt) { + /* submit the first request */ + myri10ge_pio_copy(dstp, srcp, sizeof(*src)); + mb(); /* barrier before setting valid flag */ + } + + /* re-write the last 32-bits with the valid flags */ + src->flags = last_flags; + put_be32(*((__be32 *) src + 3), (__be32 __iomem *) dst + 3); + tx->req += cnt; + mb(); +} + +static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp, + struct myri10ge_tx_buf *tx, int idx) +{ + unsigned int len; + int last_idx; + + /* Free any DMA resources we've alloced and clear out the skb slot */ + last_idx = (idx + 1) & tx->mask; + idx = tx->req & tx->mask; + do { + len = dma_unmap_len(&tx->info[idx], len); + if (len) { + if (tx->info[idx].skb != NULL) + pci_unmap_single(mgp->pdev, + dma_unmap_addr(&tx->info[idx], + bus), len, + PCI_DMA_TODEVICE); + else + pci_unmap_page(mgp->pdev, + dma_unmap_addr(&tx->info[idx], + bus), len, + PCI_DMA_TODEVICE); + dma_unmap_len_set(&tx->info[idx], len, 0); + tx->info[idx].skb = NULL; + } + idx = (idx + 1) & tx->mask; + } while (idx != last_idx); +} + +/* + * Transmit a packet. We need to split the packet so that a single + * segment does not cross myri10ge->tx_boundary, so this makes segment + * counting tricky. So rather than try to count segments up front, we + * just give up if there are too few segments to hold a reasonably + * fragmented packet currently available. If we run + * out of segments while preparing a packet for DMA, we just linearize + * it and try again. + */ + +static netdev_tx_t myri10ge_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct myri10ge_priv *mgp = netdev_priv(dev); + struct myri10ge_slice_state *ss; + struct mcp_kreq_ether_send *req; + struct myri10ge_tx_buf *tx; + struct skb_frag_struct *frag; + struct netdev_queue *netdev_queue; + dma_addr_t bus; + u32 low; + __be32 high_swapped; + unsigned int len; + int idx, avail, frag_cnt, frag_idx, count, mss, max_segments; + u16 pseudo_hdr_offset, cksum_offset, queue; + int cum_len, seglen, boundary, rdma_count; + u8 flags, odd_flag; + + queue = skb_get_queue_mapping(skb); + ss = &mgp->ss[queue]; + netdev_queue = netdev_get_tx_queue(mgp->dev, queue); + tx = &ss->tx; + +again: + req = tx->req_list; + avail = tx->mask - 1 - (tx->req - tx->done); + + mss = 0; + max_segments = MXGEFW_MAX_SEND_DESC; + + if (skb_is_gso(skb)) { + mss = skb_shinfo(skb)->gso_size; + max_segments = MYRI10GE_MAX_SEND_DESC_TSO; + } + + if ((unlikely(avail < max_segments))) { + /* we are out of transmit resources */ + tx->stop_queue++; + netif_tx_stop_queue(netdev_queue); + return NETDEV_TX_BUSY; + } + + /* Setup checksum offloading, if needed */ + cksum_offset = 0; + pseudo_hdr_offset = 0; + odd_flag = 0; + flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST); + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + cksum_offset = skb_checksum_start_offset(skb); + pseudo_hdr_offset = cksum_offset + skb->csum_offset; + /* If the headers are excessively large, then we must + * fall back to a software checksum */ + if (unlikely(!mss && (cksum_offset > 255 || + pseudo_hdr_offset > 127))) { + if (skb_checksum_help(skb)) + goto drop; + cksum_offset = 0; + pseudo_hdr_offset = 0; + } else { + odd_flag = MXGEFW_FLAGS_ALIGN_ODD; + flags |= MXGEFW_FLAGS_CKSUM; + } + } + + cum_len = 0; + + if (mss) { /* TSO */ + /* this removes any CKSUM flag from before */ + flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST); + + /* negative cum_len signifies to the + * send loop that we are still in the + * header portion of the TSO packet. + * TSO header can be at most 1KB long */ + cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb)); + + /* for IPv6 TSO, the checksum offset stores the + * TCP header length, to save the firmware from + * the need to parse the headers */ + if (skb_is_gso_v6(skb)) { + cksum_offset = tcp_hdrlen(skb); + /* Can only handle headers <= max_tso6 long */ + if (unlikely(-cum_len > mgp->max_tso6)) + return myri10ge_sw_tso(skb, dev); + } + /* for TSO, pseudo_hdr_offset holds mss. + * The firmware figures out where to put + * the checksum by parsing the header. */ + pseudo_hdr_offset = mss; + } else + /* Mark small packets, and pad out tiny packets */ + if (skb->len <= MXGEFW_SEND_SMALL_SIZE) { + flags |= MXGEFW_FLAGS_SMALL; + + /* pad frames to at least ETH_ZLEN bytes */ + if (eth_skb_pad(skb)) { + /* The packet is gone, so we must + * return 0 */ + ss->stats.tx_dropped += 1; + return NETDEV_TX_OK; + } + } + + /* map the skb for DMA */ + len = skb_headlen(skb); + bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); + if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) + goto drop; + + idx = tx->req & tx->mask; + tx->info[idx].skb = skb; + dma_unmap_addr_set(&tx->info[idx], bus, bus); + dma_unmap_len_set(&tx->info[idx], len, len); + + frag_cnt = skb_shinfo(skb)->nr_frags; + frag_idx = 0; + count = 0; + rdma_count = 0; + + /* "rdma_count" is the number of RDMAs belonging to the + * current packet BEFORE the current send request. For + * non-TSO packets, this is equal to "count". + * For TSO packets, rdma_count needs to be reset + * to 0 after a segment cut. + * + * The rdma_count field of the send request is + * the number of RDMAs of the packet starting at + * that request. For TSO send requests with one ore more cuts + * in the middle, this is the number of RDMAs starting + * after the last cut in the request. All previous + * segments before the last cut implicitly have 1 RDMA. + * + * Since the number of RDMAs is not known beforehand, + * it must be filled-in retroactively - after each + * segmentation cut or at the end of the entire packet. + */ + + while (1) { + /* Break the SKB or Fragment up into pieces which + * do not cross mgp->tx_boundary */ + low = MYRI10GE_LOWPART_TO_U32(bus); + high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus)); + while (len) { + u8 flags_next; + int cum_len_next; + + if (unlikely(count == max_segments)) + goto abort_linearize; + + boundary = + (low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1); + seglen = boundary - low; + if (seglen > len) + seglen = len; + flags_next = flags & ~MXGEFW_FLAGS_FIRST; + cum_len_next = cum_len + seglen; + if (mss) { /* TSO */ + (req - rdma_count)->rdma_count = rdma_count + 1; + + if (likely(cum_len >= 0)) { /* payload */ + int next_is_first, chop; + + chop = (cum_len_next > mss); + cum_len_next = cum_len_next % mss; + next_is_first = (cum_len_next == 0); + flags |= chop * MXGEFW_FLAGS_TSO_CHOP; + flags_next |= next_is_first * + MXGEFW_FLAGS_FIRST; + rdma_count |= -(chop | next_is_first); + rdma_count += chop & ~next_is_first; + } else if (likely(cum_len_next >= 0)) { /* header ends */ + int small; + + rdma_count = -1; + cum_len_next = 0; + seglen = -cum_len; + small = (mss <= MXGEFW_SEND_SMALL_SIZE); + flags_next = MXGEFW_FLAGS_TSO_PLD | + MXGEFW_FLAGS_FIRST | + (small * MXGEFW_FLAGS_SMALL); + } + } + req->addr_high = high_swapped; + req->addr_low = htonl(low); + req->pseudo_hdr_offset = htons(pseudo_hdr_offset); + req->pad = 0; /* complete solid 16-byte block; does this matter? */ + req->rdma_count = 1; + req->length = htons(seglen); + req->cksum_offset = cksum_offset; + req->flags = flags | ((cum_len & 1) * odd_flag); + + low += seglen; + len -= seglen; + cum_len = cum_len_next; + flags = flags_next; + req++; + count++; + rdma_count++; + if (cksum_offset != 0 && !(mss && skb_is_gso_v6(skb))) { + if (unlikely(cksum_offset > seglen)) + cksum_offset -= seglen; + else + cksum_offset = 0; + } + } + if (frag_idx == frag_cnt) + break; + + /* map next fragment for DMA */ + frag = &skb_shinfo(skb)->frags[frag_idx]; + frag_idx++; + len = skb_frag_size(frag); + bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len, + DMA_TO_DEVICE); + if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) { + myri10ge_unmap_tx_dma(mgp, tx, idx); + goto drop; + } + idx = (count + tx->req) & tx->mask; + dma_unmap_addr_set(&tx->info[idx], bus, bus); + dma_unmap_len_set(&tx->info[idx], len, len); + } + + (req - rdma_count)->rdma_count = rdma_count; + if (mss) + do { + req--; + req->flags |= MXGEFW_FLAGS_TSO_LAST; + } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP | + MXGEFW_FLAGS_FIRST))); + idx = ((count - 1) + tx->req) & tx->mask; + tx->info[idx].last = 1; + myri10ge_submit_req(tx, tx->req_list, count); + /* if using multiple tx queues, make sure NIC polls the + * current slice */ + if ((mgp->dev->real_num_tx_queues > 1) && tx->queue_active == 0) { + tx->queue_active = 1; + put_be32(htonl(1), tx->send_go); + mb(); + mmiowb(); + } + tx->pkt_start++; + if ((avail - count) < MXGEFW_MAX_SEND_DESC) { + tx->stop_queue++; + netif_tx_stop_queue(netdev_queue); + } + return NETDEV_TX_OK; + +abort_linearize: + myri10ge_unmap_tx_dma(mgp, tx, idx); + + if (skb_is_gso(skb)) { + netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n"); + goto drop; + } + + if (skb_linearize(skb)) + goto drop; + + tx->linearized++; + goto again; + +drop: + dev_kfree_skb_any(skb); + ss->stats.tx_dropped += 1; + return NETDEV_TX_OK; + +} + +static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb, + struct net_device *dev) +{ + struct sk_buff *segs, *curr; + struct myri10ge_priv *mgp = netdev_priv(dev); + struct myri10ge_slice_state *ss; + netdev_tx_t status; + + segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); + if (IS_ERR(segs)) + goto drop; + + while (segs) { + curr = segs; + segs = segs->next; + curr->next = NULL; + status = myri10ge_xmit(curr, dev); + if (status != 0) { + dev_kfree_skb_any(curr); + if (segs != NULL) { + curr = segs; + segs = segs->next; + curr->next = NULL; + dev_kfree_skb_any(segs); + } + goto drop; + } + } + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + +drop: + ss = &mgp->ss[skb_get_queue_mapping(skb)]; + dev_kfree_skb_any(skb); + ss->stats.tx_dropped += 1; + return NETDEV_TX_OK; +} + +static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + const struct myri10ge_priv *mgp = netdev_priv(dev); + const struct myri10ge_slice_netstats *slice_stats; + int i; + + for (i = 0; i < mgp->num_slices; i++) { + slice_stats = &mgp->ss[i].stats; + stats->rx_packets += slice_stats->rx_packets; + stats->tx_packets += slice_stats->tx_packets; + stats->rx_bytes += slice_stats->rx_bytes; + stats->tx_bytes += slice_stats->tx_bytes; + stats->rx_dropped += slice_stats->rx_dropped; + stats->tx_dropped += slice_stats->tx_dropped; + } + return stats; +} + +static void myri10ge_set_multicast_list(struct net_device *dev) +{ + struct myri10ge_priv *mgp = netdev_priv(dev); + struct myri10ge_cmd cmd; + struct netdev_hw_addr *ha; + __be32 data[2] = { 0, 0 }; + int err; + + /* can be called from atomic contexts, + * pass 1 to force atomicity in myri10ge_send_cmd() */ + myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1); + + /* This firmware is known to not support multicast */ + if (!mgp->fw_multicast_support) + return; + + /* Disable multicast filtering */ + + err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1); + if (err != 0) { + netdev_err(dev, "Failed MXGEFW_ENABLE_ALLMULTI, error status: %d\n", + err); + goto abort; + } + + if ((dev->flags & IFF_ALLMULTI) || mgp->adopted_rx_filter_bug) { + /* request to disable multicast filtering, so quit here */ + return; + } + + /* Flush the filters */ + + err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, + &cmd, 1); + if (err != 0) { + netdev_err(dev, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, error status: %d\n", + err); + goto abort; + } + + /* Walk the multicast list, and add each address */ + netdev_for_each_mc_addr(ha, dev) { + memcpy(data, &ha->addr, ETH_ALEN); + cmd.data0 = ntohl(data[0]); + cmd.data1 = ntohl(data[1]); + err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP, + &cmd, 1); + + if (err != 0) { + netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n", + err, ha->addr); + goto abort; + } + } + /* Enable multicast filtering */ + err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1); + if (err != 0) { + netdev_err(dev, "Failed MXGEFW_DISABLE_ALLMULTI, error status: %d\n", + err); + goto abort; + } + + return; + +abort: + return; +} + +static int myri10ge_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *sa = addr; + struct myri10ge_priv *mgp = netdev_priv(dev); + int status; + + if (!is_valid_ether_addr(sa->sa_data)) + return -EADDRNOTAVAIL; + + status = myri10ge_update_mac_address(mgp, sa->sa_data); + if (status != 0) { + netdev_err(dev, "changing mac address failed with %d\n", + status); + return status; + } + + /* change the dev structure */ + memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); + return 0; +} + +static int myri10ge_change_mtu(struct net_device *dev, int new_mtu) +{ + struct myri10ge_priv *mgp = netdev_priv(dev); + int error = 0; + + if ((new_mtu < 68) || (ETH_HLEN + new_mtu > MYRI10GE_MAX_ETHER_MTU)) { + netdev_err(dev, "new mtu (%d) is not valid\n", new_mtu); + return -EINVAL; + } + netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu); + if (mgp->running) { + /* if we change the mtu on an active device, we must + * reset the device so the firmware sees the change */ + myri10ge_close(dev); + dev->mtu = new_mtu; + myri10ge_open(dev); + } else + dev->mtu = new_mtu; + + return error; +} + +/* + * Enable ECRC to align PCI-E Completion packets on an 8-byte boundary. + * Only do it if the bridge is a root port since we don't want to disturb + * any other device, except if forced with myri10ge_ecrc_enable > 1. + */ + +static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp) +{ + struct pci_dev *bridge = mgp->pdev->bus->self; + struct device *dev = &mgp->pdev->dev; + int cap; + unsigned err_cap; + int ret; + + if (!myri10ge_ecrc_enable || !bridge) + return; + + /* check that the bridge is a root port */ + if (pci_pcie_type(bridge) != PCI_EXP_TYPE_ROOT_PORT) { + if (myri10ge_ecrc_enable > 1) { + struct pci_dev *prev_bridge, *old_bridge = bridge; + + /* Walk the hierarchy up to the root port + * where ECRC has to be enabled */ + do { + prev_bridge = bridge; + bridge = bridge->bus->self; + if (!bridge || prev_bridge == bridge) { + dev_err(dev, + "Failed to find root port" + " to force ECRC\n"); + return; + } + } while (pci_pcie_type(bridge) != + PCI_EXP_TYPE_ROOT_PORT); + + dev_info(dev, + "Forcing ECRC on non-root port %s" + " (enabling on root port %s)\n", + pci_name(old_bridge), pci_name(bridge)); + } else { + dev_err(dev, + "Not enabling ECRC on non-root port %s\n", + pci_name(bridge)); + return; + } + } + + cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR); + if (!cap) + return; + + ret = pci_read_config_dword(bridge, cap + PCI_ERR_CAP, &err_cap); + if (ret) { + dev_err(dev, "failed reading ext-conf-space of %s\n", + pci_name(bridge)); + dev_err(dev, "\t pci=nommconf in use? " + "or buggy/incomplete/absent ACPI MCFG attr?\n"); + return; + } + if (!(err_cap & PCI_ERR_CAP_ECRC_GENC)) + return; + + err_cap |= PCI_ERR_CAP_ECRC_GENE; + pci_write_config_dword(bridge, cap + PCI_ERR_CAP, err_cap); + dev_info(dev, "Enabled ECRC on upstream bridge %s\n", pci_name(bridge)); +} + +/* + * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput + * when the PCI-E Completion packets are aligned on an 8-byte + * boundary. Some PCI-E chip sets always align Completion packets; on + * the ones that do not, the alignment can be enforced by enabling + * ECRC generation (if supported). + * + * When PCI-E Completion packets are not aligned, it is actually more + * efficient to limit Read-DMA transactions to 2KB, rather than 4KB. + * + * /*(DEBLOBBED)*/ + +static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) +{ + struct pci_dev *pdev = mgp->pdev; + struct device *dev = &pdev->dev; + int status; + + mgp->tx_boundary = 4096; + /* + * Verify the max read request size was set to 4KB + * before trying the test with 4KB. + */ + status = pcie_get_readrq(pdev); + if (status < 0) { + dev_err(dev, "Couldn't read max read req size: %d\n", status); + goto abort; + } + if (status != 4096) { + dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status); + mgp->tx_boundary = 2048; + } + /* + * load the optimized firmware (which assumes aligned PCIe + * completions) in order to see if it works on this host. + */ + set_fw_name(mgp, myri10ge_fw_aligned, false); + status = myri10ge_load_firmware(mgp, 1); + if (status != 0) { + goto abort; + } + + /* + * Enable ECRC if possible + */ + myri10ge_enable_ecrc(mgp); + + /* + * Run a DMA test which watches for unaligned completions and + * aborts on the first one seen. + */ + + status = myri10ge_dma_test(mgp, MXGEFW_CMD_UNALIGNED_TEST); + if (status == 0) + return; /* keep the aligned firmware */ + + if (status != -E2BIG) + dev_warn(dev, "DMA test failed: %d\n", status); + if (status == -ENOSYS) + dev_warn(dev, "Falling back to ethp! " + "Please install up to date fw\n"); +abort: + /* fall back to using the unaligned firmware */ + mgp->tx_boundary = 2048; + set_fw_name(mgp, myri10ge_fw_unaligned, false); +} + +static void myri10ge_select_firmware(struct myri10ge_priv *mgp) +{ + int overridden = 0; + + if (myri10ge_force_firmware == 0) { + int link_width; + u16 lnk; + + pcie_capability_read_word(mgp->pdev, PCI_EXP_LNKSTA, &lnk); + link_width = (lnk >> 4) & 0x3f; + + /* Check to see if Link is less than 8 or if the + * upstream bridge is known to provide aligned + * completions */ + if (link_width < 8) { + dev_info(&mgp->pdev->dev, "PCIE x%d Link\n", + link_width); + mgp->tx_boundary = 4096; + set_fw_name(mgp, myri10ge_fw_aligned, false); + } else { + myri10ge_firmware_probe(mgp); + } + } else { + if (myri10ge_force_firmware == 1) { + dev_info(&mgp->pdev->dev, + "Assuming aligned completions (forced)\n"); + mgp->tx_boundary = 4096; + set_fw_name(mgp, myri10ge_fw_aligned, false); + } else { + dev_info(&mgp->pdev->dev, + "Assuming unaligned completions (forced)\n"); + mgp->tx_boundary = 2048; + set_fw_name(mgp, myri10ge_fw_unaligned, false); + } + } + + kparam_block_sysfs_write(myri10ge_fw_name); + if (myri10ge_fw_name != NULL) { + char *fw_name = kstrdup(myri10ge_fw_name, GFP_KERNEL); + if (fw_name) { + overridden = 1; + set_fw_name(mgp, fw_name, true); + } + } + kparam_unblock_sysfs_write(myri10ge_fw_name); + + if (mgp->board_number < MYRI10GE_MAX_BOARDS && + myri10ge_fw_names[mgp->board_number] != NULL && + strlen(myri10ge_fw_names[mgp->board_number])) { + set_fw_name(mgp, myri10ge_fw_names[mgp->board_number], false); + overridden = 1; + } + if (overridden) + dev_info(&mgp->pdev->dev, "overriding firmware to %s\n", + mgp->fw_name); +} + +static void myri10ge_mask_surprise_down(struct pci_dev *pdev) +{ + struct pci_dev *bridge = pdev->bus->self; + int cap; + u32 mask; + + if (bridge == NULL) + return; + + cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR); + if (cap) { + /* a sram parity error can cause a surprise link + * down; since we expect and can recover from sram + * parity errors, mask surprise link down events */ + pci_read_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, &mask); + mask |= 0x20; + pci_write_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, mask); + } +} + +#ifdef CONFIG_PM +static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct myri10ge_priv *mgp; + struct net_device *netdev; + + mgp = pci_get_drvdata(pdev); + if (mgp == NULL) + return -EINVAL; + netdev = mgp->dev; + + netif_device_detach(netdev); + if (netif_running(netdev)) { + netdev_info(netdev, "closing\n"); + rtnl_lock(); + myri10ge_close(netdev); + rtnl_unlock(); + } + myri10ge_dummy_rdma(mgp, 0); + pci_save_state(pdev); + pci_disable_device(pdev); + + return pci_set_power_state(pdev, pci_choose_state(pdev, state)); +} + +static int myri10ge_resume(struct pci_dev *pdev) +{ + struct myri10ge_priv *mgp; + struct net_device *netdev; + int status; + u16 vendor; + + mgp = pci_get_drvdata(pdev); + if (mgp == NULL) + return -EINVAL; + netdev = mgp->dev; + pci_set_power_state(pdev, PCI_D0); /* zeros conf space as a side effect */ + msleep(5); /* give card time to respond */ + pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor); + if (vendor == 0xffff) { + netdev_err(mgp->dev, "device disappeared!\n"); + return -EIO; + } + + pci_restore_state(pdev); + + status = pci_enable_device(pdev); + if (status) { + dev_err(&pdev->dev, "failed to enable device\n"); + return status; + } + + pci_set_master(pdev); + + myri10ge_reset(mgp); + myri10ge_dummy_rdma(mgp, 1); + + /* Save configuration space to be restored if the + * nic resets due to a parity error */ + pci_save_state(pdev); + + if (netif_running(netdev)) { + rtnl_lock(); + status = myri10ge_open(netdev); + rtnl_unlock(); + if (status != 0) + goto abort_with_enabled; + + } + netif_device_attach(netdev); + + return 0; + +abort_with_enabled: + pci_disable_device(pdev); + return -EIO; + +} +#endif /* CONFIG_PM */ + +static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp) +{ + struct pci_dev *pdev = mgp->pdev; + int vs = mgp->vendor_specific_offset; + u32 reboot; + + /*enter read32 mode */ + pci_write_config_byte(pdev, vs + 0x10, 0x3); + + /*read REBOOT_STATUS (0xfffffff0) */ + pci_write_config_dword(pdev, vs + 0x18, 0xfffffff0); + pci_read_config_dword(pdev, vs + 0x14, &reboot); + return reboot; +} + +static void +myri10ge_check_slice(struct myri10ge_slice_state *ss, int *reset_needed, + int *busy_slice_cnt, u32 rx_pause_cnt) +{ + struct myri10ge_priv *mgp = ss->mgp; + int slice = ss - mgp->ss; + + if (ss->tx.req != ss->tx.done && + ss->tx.done == ss->watchdog_tx_done && + ss->watchdog_tx_req != ss->watchdog_tx_done) { + /* nic seems like it might be stuck.. */ + if (rx_pause_cnt != mgp->watchdog_pause) { + if (net_ratelimit()) + netdev_warn(mgp->dev, "slice %d: TX paused, " + "check link partner\n", slice); + } else { + netdev_warn(mgp->dev, + "slice %d: TX stuck %d %d %d %d %d %d\n", + slice, ss->tx.queue_active, ss->tx.req, + ss->tx.done, ss->tx.pkt_start, + ss->tx.pkt_done, + (int)ntohl(mgp->ss[slice].fw_stats-> + send_done_count)); + *reset_needed = 1; + ss->stuck = 1; + } + } + if (ss->watchdog_tx_done != ss->tx.done || + ss->watchdog_rx_done != ss->rx_done.cnt) { + *busy_slice_cnt += 1; + } + ss->watchdog_tx_done = ss->tx.done; + ss->watchdog_tx_req = ss->tx.req; + ss->watchdog_rx_done = ss->rx_done.cnt; +} + +/* + * This watchdog is used to check whether the board has suffered + * from a parity error and needs to be recovered. + */ +static void myri10ge_watchdog(struct work_struct *work) +{ + struct myri10ge_priv *mgp = + container_of(work, struct myri10ge_priv, watchdog_work); + struct myri10ge_slice_state *ss; + u32 reboot, rx_pause_cnt; + int status, rebooted; + int i; + int reset_needed = 0; + int busy_slice_cnt = 0; + u16 cmd, vendor; + + mgp->watchdog_resets++; + pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd); + rebooted = 0; + if ((cmd & PCI_COMMAND_MASTER) == 0) { + /* Bus master DMA disabled? Check to see + * if the card rebooted due to a parity error + * For now, just report it */ + reboot = myri10ge_read_reboot(mgp); + netdev_err(mgp->dev, "NIC rebooted (0x%x),%s resetting\n", + reboot, myri10ge_reset_recover ? "" : " not"); + if (myri10ge_reset_recover == 0) + return; + rtnl_lock(); + mgp->rebooted = 1; + rebooted = 1; + myri10ge_close(mgp->dev); + myri10ge_reset_recover--; + mgp->rebooted = 0; + /* + * A rebooted nic will come back with config space as + * it was after power was applied to PCIe bus. + * Attempt to restore config space which was saved + * when the driver was loaded, or the last time the + * nic was resumed from power saving mode. + */ + pci_restore_state(mgp->pdev); + + /* save state again for accounting reasons */ + pci_save_state(mgp->pdev); + + } else { + /* if we get back -1's from our slot, perhaps somebody + * powered off our card. Don't try to reset it in + * this case */ + if (cmd == 0xffff) { + pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor); + if (vendor == 0xffff) { + netdev_err(mgp->dev, "device disappeared!\n"); + return; + } + } + /* Perhaps it is a software error. See if stuck slice + * has recovered, reset if not */ + rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause); + for (i = 0; i < mgp->num_slices; i++) { + ss = mgp->ss; + if (ss->stuck) { + myri10ge_check_slice(ss, &reset_needed, + &busy_slice_cnt, + rx_pause_cnt); + ss->stuck = 0; + } + } + if (!reset_needed) { + netdev_dbg(mgp->dev, "not resetting\n"); + return; + } + + netdev_err(mgp->dev, "device timeout, resetting\n"); + } + + if (!rebooted) { + rtnl_lock(); + myri10ge_close(mgp->dev); + } + status = myri10ge_load_firmware(mgp, 1); + if (status != 0) + netdev_err(mgp->dev, "failed to load firmware\n"); + else + myri10ge_open(mgp->dev); + rtnl_unlock(); +} + +/* + * We use our own timer routine rather than relying upon + * netdev->tx_timeout because we have a very large hardware transmit + * queue. Due to the large queue, the netdev->tx_timeout function + * cannot detect a NIC with a parity error in a timely fashion if the + * NIC is lightly loaded. + */ +static void myri10ge_watchdog_timer(unsigned long arg) +{ + struct myri10ge_priv *mgp; + struct myri10ge_slice_state *ss; + int i, reset_needed, busy_slice_cnt; + u32 rx_pause_cnt; + u16 cmd; + + mgp = (struct myri10ge_priv *)arg; + + rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause); + busy_slice_cnt = 0; + for (i = 0, reset_needed = 0; + i < mgp->num_slices && reset_needed == 0; ++i) { + + ss = &mgp->ss[i]; + if (ss->rx_small.watchdog_needed) { + myri10ge_alloc_rx_pages(mgp, &ss->rx_small, + mgp->small_bytes + MXGEFW_PAD, + 1); + if (ss->rx_small.fill_cnt - ss->rx_small.cnt >= + myri10ge_fill_thresh) + ss->rx_small.watchdog_needed = 0; + } + if (ss->rx_big.watchdog_needed) { + myri10ge_alloc_rx_pages(mgp, &ss->rx_big, + mgp->big_bytes, 1); + if (ss->rx_big.fill_cnt - ss->rx_big.cnt >= + myri10ge_fill_thresh) + ss->rx_big.watchdog_needed = 0; + } + myri10ge_check_slice(ss, &reset_needed, &busy_slice_cnt, + rx_pause_cnt); + } + /* if we've sent or received no traffic, poll the NIC to + * ensure it is still there. Otherwise, we risk not noticing + * an error in a timely fashion */ + if (busy_slice_cnt == 0) { + pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd); + if ((cmd & PCI_COMMAND_MASTER) == 0) { + reset_needed = 1; + } + } + mgp->watchdog_pause = rx_pause_cnt; + + if (reset_needed) { + schedule_work(&mgp->watchdog_work); + } else { + /* rearm timer */ + mod_timer(&mgp->watchdog_timer, + jiffies + myri10ge_watchdog_timeout * HZ); + } +} + +static void myri10ge_free_slices(struct myri10ge_priv *mgp) +{ + struct myri10ge_slice_state *ss; + struct pci_dev *pdev = mgp->pdev; + size_t bytes; + int i; + + if (mgp->ss == NULL) + return; + + for (i = 0; i < mgp->num_slices; i++) { + ss = &mgp->ss[i]; + if (ss->rx_done.entry != NULL) { + bytes = mgp->max_intr_slots * + sizeof(*ss->rx_done.entry); + dma_free_coherent(&pdev->dev, bytes, + ss->rx_done.entry, ss->rx_done.bus); + ss->rx_done.entry = NULL; + } + if (ss->fw_stats != NULL) { + bytes = sizeof(*ss->fw_stats); + dma_free_coherent(&pdev->dev, bytes, + ss->fw_stats, ss->fw_stats_bus); + ss->fw_stats = NULL; + } + napi_hash_del(&ss->napi); + netif_napi_del(&ss->napi); + } + /* Wait till napi structs are no longer used, and then free ss. */ + synchronize_rcu(); + kfree(mgp->ss); + mgp->ss = NULL; +} + +static int myri10ge_alloc_slices(struct myri10ge_priv *mgp) +{ + struct myri10ge_slice_state *ss; + struct pci_dev *pdev = mgp->pdev; + size_t bytes; + int i; + + bytes = sizeof(*mgp->ss) * mgp->num_slices; + mgp->ss = kzalloc(bytes, GFP_KERNEL); + if (mgp->ss == NULL) { + return -ENOMEM; + } + + for (i = 0; i < mgp->num_slices; i++) { + ss = &mgp->ss[i]; + bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); + ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes, + &ss->rx_done.bus, + GFP_KERNEL); + if (ss->rx_done.entry == NULL) + goto abort; + bytes = sizeof(*ss->fw_stats); + ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes, + &ss->fw_stats_bus, + GFP_KERNEL); + if (ss->fw_stats == NULL) + goto abort; + ss->mgp = mgp; + ss->dev = mgp->dev; + netif_napi_add(ss->dev, &ss->napi, myri10ge_poll, + myri10ge_napi_weight); + napi_hash_add(&ss->napi); + } + return 0; +abort: + myri10ge_free_slices(mgp); + return -ENOMEM; +} + +/* + * This function determines the number of slices supported. + * The number slices is the minimum of the number of CPUS, + * the number of MSI-X irqs supported, the number of slices + * supported by the firmware + */ +static void myri10ge_probe_slices(struct myri10ge_priv *mgp) +{ + struct myri10ge_cmd cmd; + struct pci_dev *pdev = mgp->pdev; + char *old_fw; + bool old_allocated; + int i, status, ncpus; + + mgp->num_slices = 1; + ncpus = netif_get_num_default_rss_queues(); + + if (myri10ge_max_slices == 1 || !pdev->msix_cap || + (myri10ge_max_slices == -1 && ncpus < 2)) + return; + + /* try to load the slice aware rss firmware */ + old_fw = mgp->fw_name; + old_allocated = mgp->fw_name_allocated; + /* don't free old_fw if we override it. */ + mgp->fw_name_allocated = false; + + if (myri10ge_fw_name != NULL) { + dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n", + myri10ge_fw_name); + set_fw_name(mgp, myri10ge_fw_name, false); + } else if (old_fw == myri10ge_fw_aligned) + set_fw_name(mgp, myri10ge_fw_rss_aligned, false); + else + set_fw_name(mgp, myri10ge_fw_rss_unaligned, false); + status = myri10ge_load_firmware(mgp, 0); + if (status != 0) { + dev_info(&pdev->dev, "Rss firmware not found\n"); + if (old_allocated) + kfree(old_fw); + return; + } + + /* hit the board with a reset to ensure it is alive */ + memset(&cmd, 0, sizeof(cmd)); + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0); + if (status != 0) { + dev_err(&mgp->pdev->dev, "failed reset\n"); + goto abort_with_fw; + } + + mgp->max_intr_slots = cmd.data0 / sizeof(struct mcp_slot); + + /* tell it the size of the interrupt queues */ + cmd.data0 = mgp->max_intr_slots * sizeof(struct mcp_slot); + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0); + if (status != 0) { + dev_err(&mgp->pdev->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n"); + goto abort_with_fw; + } + + /* ask the maximum number of slices it supports */ + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd, 0); + if (status != 0) + goto abort_with_fw; + else + mgp->num_slices = cmd.data0; + + /* Only allow multiple slices if MSI-X is usable */ + if (!myri10ge_msi) { + goto abort_with_fw; + } + + /* if the admin did not specify a limit to how many + * slices we should use, cap it automatically to the + * number of CPUs currently online */ + if (myri10ge_max_slices == -1) + myri10ge_max_slices = ncpus; + + if (mgp->num_slices > myri10ge_max_slices) + mgp->num_slices = myri10ge_max_slices; + + /* Now try to allocate as many MSI-X vectors as we have + * slices. We give up on MSI-X if we can only get a single + * vector. */ + + mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors), + GFP_KERNEL); + if (mgp->msix_vectors == NULL) + goto no_msix; + for (i = 0; i < mgp->num_slices; i++) { + mgp->msix_vectors[i].entry = i; + } + + while (mgp->num_slices > 1) { + mgp->num_slices = rounddown_pow_of_two(mgp->num_slices); + if (mgp->num_slices == 1) + goto no_msix; + status = pci_enable_msix_range(pdev, + mgp->msix_vectors, + mgp->num_slices, + mgp->num_slices); + if (status < 0) + goto no_msix; + + pci_disable_msix(pdev); + + if (status == mgp->num_slices) { + if (old_allocated) + kfree(old_fw); + return; + } else { + mgp->num_slices = status; + } + } + +no_msix: + if (mgp->msix_vectors != NULL) { + kfree(mgp->msix_vectors); + mgp->msix_vectors = NULL; + } + +abort_with_fw: + mgp->num_slices = 1; + set_fw_name(mgp, old_fw, old_allocated); + myri10ge_load_firmware(mgp, 0); +} + +static const struct net_device_ops myri10ge_netdev_ops = { + .ndo_open = myri10ge_open, + .ndo_stop = myri10ge_close, + .ndo_start_xmit = myri10ge_xmit, + .ndo_get_stats64 = myri10ge_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = myri10ge_change_mtu, + .ndo_set_rx_mode = myri10ge_set_multicast_list, + .ndo_set_mac_address = myri10ge_set_mac_address, +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = myri10ge_busy_poll, +#endif +}; + +static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct myri10ge_priv *mgp; + struct device *dev = &pdev->dev; + int i; + int status = -ENXIO; + int dac_enabled; + unsigned hdr_offset, ss_offset; + static int board_number; + + netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES); + if (netdev == NULL) + return -ENOMEM; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + mgp = netdev_priv(netdev); + mgp->dev = netdev; + mgp->pdev = pdev; + mgp->pause = myri10ge_flow_control; + mgp->intr_coal_delay = myri10ge_intr_coal_delay; + mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT); + mgp->board_number = board_number; + init_waitqueue_head(&mgp->down_wq); + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, "pci_enable_device call failed\n"); + status = -ENODEV; + goto abort_with_netdev; + } + + /* Find the vendor-specific cap so we can check + * the reboot register later on */ + mgp->vendor_specific_offset + = pci_find_capability(pdev, PCI_CAP_ID_VNDR); + + /* Set our max read request to 4KB */ + status = pcie_set_readrq(pdev, 4096); + if (status != 0) { + dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n", + status); + goto abort_with_enabled; + } + + myri10ge_mask_surprise_down(pdev); + pci_set_master(pdev); + dac_enabled = 1; + status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (status != 0) { + dac_enabled = 0; + dev_err(&pdev->dev, + "64-bit pci address mask was refused, " + "trying 32-bit\n"); + status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + } + if (status != 0) { + dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); + goto abort_with_enabled; + } + (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), + &mgp->cmd_bus, GFP_KERNEL); + if (!mgp->cmd) { + status = -ENOMEM; + goto abort_with_enabled; + } + + mgp->board_span = pci_resource_len(pdev, 0); + mgp->iomem_base = pci_resource_start(pdev, 0); + mgp->wc_cookie = arch_phys_wc_add(mgp->iomem_base, mgp->board_span); + mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span); + if (mgp->sram == NULL) { + dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n", + mgp->board_span, mgp->iomem_base); + status = -ENXIO; + goto abort_with_mtrr; + } + hdr_offset = + swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET)) & 0xffffc; + ss_offset = hdr_offset + offsetof(struct mcp_gen_header, string_specs); + mgp->sram_size = swab32(readl(mgp->sram + ss_offset)); + if (mgp->sram_size > mgp->board_span || + mgp->sram_size <= MYRI10GE_FW_OFFSET) { + dev_err(&pdev->dev, + "invalid sram_size %dB or board span %ldB\n", + mgp->sram_size, mgp->board_span); + goto abort_with_ioremap; + } + memcpy_fromio(mgp->eeprom_strings, + mgp->sram + mgp->sram_size, MYRI10GE_EEPROM_STRINGS_SIZE); + memset(mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE - 2, 0, 2); + status = myri10ge_read_mac_addr(mgp); + if (status) + goto abort_with_ioremap; + + for (i = 0; i < ETH_ALEN; i++) + netdev->dev_addr[i] = mgp->mac_addr[i]; + + myri10ge_select_firmware(mgp); + + status = myri10ge_load_firmware(mgp, 1); + if (status != 0) { + dev_err(&pdev->dev, "failed to load firmware\n"); + goto abort_with_ioremap; + } + myri10ge_probe_slices(mgp); + status = myri10ge_alloc_slices(mgp); + if (status != 0) { + dev_err(&pdev->dev, "failed to alloc slice state\n"); + goto abort_with_firmware; + } + netif_set_real_num_tx_queues(netdev, mgp->num_slices); + netif_set_real_num_rx_queues(netdev, mgp->num_slices); + status = myri10ge_reset(mgp); + if (status != 0) { + dev_err(&pdev->dev, "failed reset\n"); + goto abort_with_slices; + } +#ifdef CONFIG_MYRI10GE_DCA + myri10ge_setup_dca(mgp); +#endif + pci_set_drvdata(pdev, mgp); + if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU) + myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; + if ((myri10ge_initial_mtu + ETH_HLEN) < 68) + myri10ge_initial_mtu = 68; + + netdev->netdev_ops = &myri10ge_netdev_ops; + netdev->mtu = myri10ge_initial_mtu; + netdev->hw_features = mgp->features | NETIF_F_RXCSUM; + + /* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */ + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + + netdev->features = netdev->hw_features; + + if (dac_enabled) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= mgp->features; + if (mgp->fw_ver_tiny < 37) + netdev->vlan_features &= ~NETIF_F_TSO6; + if (mgp->fw_ver_tiny < 32) + netdev->vlan_features &= ~NETIF_F_TSO; + + /* make sure we can get an irq, and that MSI can be + * setup (if available). */ + status = myri10ge_request_irq(mgp); + if (status != 0) + goto abort_with_firmware; + myri10ge_free_irq(mgp); + + /* Save configuration space to be restored if the + * nic resets due to a parity error */ + pci_save_state(pdev); + + /* Setup the watchdog timer */ + setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer, + (unsigned long)mgp); + + netdev->ethtool_ops = &myri10ge_ethtool_ops; + INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog); + status = register_netdev(netdev); + if (status != 0) { + dev_err(&pdev->dev, "register_netdev failed: %d\n", status); + goto abort_with_state; + } + if (mgp->msix_enabled) + dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, MTRR %s, WC Enabled\n", + mgp->num_slices, mgp->tx_boundary, mgp->fw_name, + (mgp->wc_cookie > 0 ? "Enabled" : "Disabled")); + else + dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, MTRR %s, WC Enabled\n", + mgp->msi_enabled ? "MSI" : "xPIC", + pdev->irq, mgp->tx_boundary, mgp->fw_name, + (mgp->wc_cookie > 0 ? "Enabled" : "Disabled")); + + board_number++; + return 0; + +abort_with_state: + pci_restore_state(pdev); + +abort_with_slices: + myri10ge_free_slices(mgp); + +abort_with_firmware: + myri10ge_dummy_rdma(mgp, 0); + +abort_with_ioremap: + if (mgp->mac_addr_string != NULL) + dev_err(&pdev->dev, + "myri10ge_probe() failed: MAC=%s, SN=%ld\n", + mgp->mac_addr_string, mgp->serial_number); + iounmap(mgp->sram); + +abort_with_mtrr: + arch_phys_wc_del(mgp->wc_cookie); + dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), + mgp->cmd, mgp->cmd_bus); + +abort_with_enabled: + pci_disable_device(pdev); + +abort_with_netdev: + set_fw_name(mgp, NULL, false); + free_netdev(netdev); + return status; +} + +/* + * myri10ge_remove + * + * Does what is necessary to shutdown one Myrinet device. Called + * once for each Myrinet card by the kernel when a module is + * unloaded. + */ +static void myri10ge_remove(struct pci_dev *pdev) +{ + struct myri10ge_priv *mgp; + struct net_device *netdev; + + mgp = pci_get_drvdata(pdev); + if (mgp == NULL) + return; + + cancel_work_sync(&mgp->watchdog_work); + netdev = mgp->dev; + unregister_netdev(netdev); + +#ifdef CONFIG_MYRI10GE_DCA + myri10ge_teardown_dca(mgp); +#endif + myri10ge_dummy_rdma(mgp, 0); + + /* avoid a memory leak */ + pci_restore_state(pdev); + + iounmap(mgp->sram); + arch_phys_wc_del(mgp->wc_cookie); + myri10ge_free_slices(mgp); + kfree(mgp->msix_vectors); + dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), + mgp->cmd, mgp->cmd_bus); + + set_fw_name(mgp, NULL, false); + free_netdev(netdev); + pci_disable_device(pdev); +} + +#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008 +#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009 + +static const struct pci_device_id myri10ge_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)}, + {PCI_DEVICE + (PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)}, + {0}, +}; + +MODULE_DEVICE_TABLE(pci, myri10ge_pci_tbl); + +static struct pci_driver myri10ge_driver = { + .name = "myri10ge", + .probe = myri10ge_probe, + .remove = myri10ge_remove, + .id_table = myri10ge_pci_tbl, +#ifdef CONFIG_PM + .suspend = myri10ge_suspend, + .resume = myri10ge_resume, +#endif +}; + +#ifdef CONFIG_MYRI10GE_DCA +static int +myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p) +{ + int err = driver_for_each_device(&myri10ge_driver.driver, + NULL, &event, + myri10ge_notify_dca_device); + + if (err) + return NOTIFY_BAD; + return NOTIFY_DONE; +} + +static struct notifier_block myri10ge_dca_notifier = { + .notifier_call = myri10ge_notify_dca, + .next = NULL, + .priority = 0, +}; +#endif /* CONFIG_MYRI10GE_DCA */ + +static __init int myri10ge_init_module(void) +{ + pr_info("Version %s\n", MYRI10GE_VERSION_STR); + + if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_MAX) { + pr_err("Illegal rssh hash type %d, defaulting to source port\n", + myri10ge_rss_hash); + myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT; + } +#ifdef CONFIG_MYRI10GE_DCA + dca_register_notify(&myri10ge_dca_notifier); +#endif + if (myri10ge_max_slices > MYRI10GE_MAX_SLICES) + myri10ge_max_slices = MYRI10GE_MAX_SLICES; + + return pci_register_driver(&myri10ge_driver); +} + +module_init(myri10ge_init_module); + +static __exit void myri10ge_cleanup_module(void) +{ +#ifdef CONFIG_MYRI10GE_DCA + dca_unregister_notify(&myri10ge_dca_notifier); +#endif + pci_unregister_driver(&myri10ge_driver); +} + +module_exit(myri10ge_cleanup_module); diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h new file mode 100644 index 000000000..b7fc26c4f --- /dev/null +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h @@ -0,0 +1,435 @@ +#ifndef __MYRI10GE_MCP_H__ +#define __MYRI10GE_MCP_H__ + +#define MXGEFW_VERSION_MAJOR 1 +#define MXGEFW_VERSION_MINOR 4 + +/* 8 Bytes */ +struct mcp_dma_addr { + __be32 high; + __be32 low; +}; + +/* 4 Bytes */ +struct mcp_slot { + __sum16 checksum; + __be16 length; +}; + +/* 64 Bytes */ +struct mcp_cmd { + __be32 cmd; + __be32 data0; /* will be low portion if data > 32 bits */ + /* 8 */ + __be32 data1; /* will be high portion if data > 32 bits */ + __be32 data2; /* currently unused.. */ + /* 16 */ + struct mcp_dma_addr response_addr; + /* 24 */ + u8 pad[40]; +}; + +/* 8 Bytes */ +struct mcp_cmd_response { + __be32 data; + __be32 result; +}; + +/* + * flags used in mcp_kreq_ether_send_t: + * + * The SMALL flag is only needed in the first segment. It is raised + * for packets that are total less or equal 512 bytes. + * + * The CKSUM flag must be set in all segments. + * + * The PADDED flags is set if the packet needs to be padded, and it + * must be set for all segments. + * + * The MXGEFW_FLAGS_ALIGN_ODD must be set if the cumulative + * length of all previous segments was odd. + */ + +#define MXGEFW_FLAGS_SMALL 0x1 +#define MXGEFW_FLAGS_TSO_HDR 0x1 +#define MXGEFW_FLAGS_FIRST 0x2 +#define MXGEFW_FLAGS_ALIGN_ODD 0x4 +#define MXGEFW_FLAGS_CKSUM 0x8 +#define MXGEFW_FLAGS_TSO_LAST 0x8 +#define MXGEFW_FLAGS_NO_TSO 0x10 +#define MXGEFW_FLAGS_TSO_CHOP 0x10 +#define MXGEFW_FLAGS_TSO_PLD 0x20 + +#define MXGEFW_SEND_SMALL_SIZE 1520 +#define MXGEFW_MAX_MTU 9400 + +union mcp_pso_or_cumlen { + u16 pseudo_hdr_offset; + u16 cum_len; +}; + +#define MXGEFW_MAX_SEND_DESC 12 +#define MXGEFW_PAD 2 + +/* 16 Bytes */ +struct mcp_kreq_ether_send { + __be32 addr_high; + __be32 addr_low; + __be16 pseudo_hdr_offset; + __be16 length; + u8 pad; + u8 rdma_count; + u8 cksum_offset; /* where to start computing cksum */ + u8 flags; /* as defined above */ +}; + +/* 8 Bytes */ +struct mcp_kreq_ether_recv { + __be32 addr_high; + __be32 addr_low; +}; + +/* Commands */ + +#define MXGEFW_BOOT_HANDOFF 0xfc0000 +#define MXGEFW_BOOT_DUMMY_RDMA 0xfc01c0 + +#define MXGEFW_ETH_CMD 0xf80000 +#define MXGEFW_ETH_SEND_4 0x200000 +#define MXGEFW_ETH_SEND_1 0x240000 +#define MXGEFW_ETH_SEND_2 0x280000 +#define MXGEFW_ETH_SEND_3 0x2c0000 +#define MXGEFW_ETH_RECV_SMALL 0x300000 +#define MXGEFW_ETH_RECV_BIG 0x340000 +#define MXGEFW_ETH_SEND_GO 0x380000 +#define MXGEFW_ETH_SEND_STOP 0x3C0000 + +#define MXGEFW_ETH_SEND(n) (0x200000 + (((n) & 0x03) * 0x40000)) +#define MXGEFW_ETH_SEND_OFFSET(n) (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4) + +enum myri10ge_mcp_cmd_type { + MXGEFW_CMD_NONE = 0, + /* Reset the mcp, it is left in a safe state, waiting + * for the driver to set all its parameters */ + MXGEFW_CMD_RESET = 1, + + /* get the version number of the current firmware.. + * (may be available in the eeprom strings..? */ + MXGEFW_GET_MCP_VERSION = 2, + + /* Parameters which must be set by the driver before it can + * issue MXGEFW_CMD_ETHERNET_UP. They persist until the next + * MXGEFW_CMD_RESET is issued */ + + MXGEFW_CMD_SET_INTRQ_DMA = 3, + /* data0 = LSW of the host address + * data1 = MSW of the host address + * data2 = slice number if multiple slices are used + */ + + MXGEFW_CMD_SET_BIG_BUFFER_SIZE = 4, /* in bytes, power of 2 */ + MXGEFW_CMD_SET_SMALL_BUFFER_SIZE = 5, /* in bytes */ + + /* Parameters which refer to lanai SRAM addresses where the + * driver must issue PIO writes for various things */ + + MXGEFW_CMD_GET_SEND_OFFSET = 6, + MXGEFW_CMD_GET_SMALL_RX_OFFSET = 7, + MXGEFW_CMD_GET_BIG_RX_OFFSET = 8, + /* data0 = slice number if multiple slices are used */ + + MXGEFW_CMD_GET_IRQ_ACK_OFFSET = 9, + MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET = 10, + + /* Parameters which refer to rings stored on the MCP, + * and whose size is controlled by the mcp */ + + MXGEFW_CMD_GET_SEND_RING_SIZE = 11, /* in bytes */ + MXGEFW_CMD_GET_RX_RING_SIZE = 12, /* in bytes */ + + /* Parameters which refer to rings stored in the host, + * and whose size is controlled by the host. Note that + * all must be physically contiguous and must contain + * a power of 2 number of entries. */ + + MXGEFW_CMD_SET_INTRQ_SIZE = 13, /* in bytes */ +#define MXGEFW_CMD_SET_INTRQ_SIZE_FLAG_NO_STRICT_SIZE_CHECK (1 << 31) + + /* command to bring ethernet interface up. Above parameters + * (plus mtu & mac address) must have been exchanged prior + * to issuing this command */ + MXGEFW_CMD_ETHERNET_UP = 14, + + /* command to bring ethernet interface down. No further sends + * or receives may be processed until an MXGEFW_CMD_ETHERNET_UP + * is issued, and all interrupt queues must be flushed prior + * to ack'ing this command */ + + MXGEFW_CMD_ETHERNET_DOWN = 15, + + /* commands the driver may issue live, without resetting + * the nic. Note that increasing the mtu "live" should + * only be done if the driver has already supplied buffers + * sufficiently large to handle the new mtu. Decreasing + * the mtu live is safe */ + + MXGEFW_CMD_SET_MTU = 16, + MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET = 17, /* in microseconds */ + MXGEFW_CMD_SET_STATS_INTERVAL = 18, /* in microseconds */ + MXGEFW_CMD_SET_STATS_DMA_OBSOLETE = 19, /* replaced by SET_STATS_DMA_V2 */ + + MXGEFW_ENABLE_PROMISC = 20, + MXGEFW_DISABLE_PROMISC = 21, + MXGEFW_SET_MAC_ADDRESS = 22, + + MXGEFW_ENABLE_FLOW_CONTROL = 23, + MXGEFW_DISABLE_FLOW_CONTROL = 24, + + /* do a DMA test + * data0,data1 = DMA address + * data2 = RDMA length (MSH), WDMA length (LSH) + * command return data = repetitions (MSH), 0.5-ms ticks (LSH) + */ + MXGEFW_DMA_TEST = 25, + + MXGEFW_ENABLE_ALLMULTI = 26, + MXGEFW_DISABLE_ALLMULTI = 27, + + /* returns MXGEFW_CMD_ERROR_MULTICAST + * if there is no room in the cache + * data0,MSH(data1) = multicast group address */ + MXGEFW_JOIN_MULTICAST_GROUP = 28, + /* returns MXGEFW_CMD_ERROR_MULTICAST + * if the address is not in the cache, + * or is equal to FF-FF-FF-FF-FF-FF + * data0,MSH(data1) = multicast group address */ + MXGEFW_LEAVE_MULTICAST_GROUP = 29, + MXGEFW_LEAVE_ALL_MULTICAST_GROUPS = 30, + + MXGEFW_CMD_SET_STATS_DMA_V2 = 31, + /* data0, data1 = bus addr, + * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows + * adding new stuff to mcp_irq_data without changing the ABI + * + * If multiple slices are used, data2 contains both the size of the + * structure (in the lower 16 bits) and the slice number + * (in the upper 16 bits). + */ + + MXGEFW_CMD_UNALIGNED_TEST = 32, + /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned + * chipset */ + + MXGEFW_CMD_UNALIGNED_STATUS = 33, + /* return data = boolean, true if the chipset is known to be unaligned */ + + MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS = 34, + /* data0 = number of big buffers to use. It must be 0 or a power of 2. + * 0 indicates that the NIC consumes as many buffers as they are required + * for packet. This is the default behavior. + * A power of 2 number indicates that the NIC always uses the specified + * number of buffers for each big receive packet. + * It is up to the driver to ensure that this value is big enough for + * the NIC to be able to receive maximum-sized packets. + */ + + MXGEFW_CMD_GET_MAX_RSS_QUEUES = 35, + MXGEFW_CMD_ENABLE_RSS_QUEUES = 36, + /* data0 = number of slices n (0, 1, ..., n-1) to enable + * data1 = interrupt mode | use of multiple transmit queues. + * 0=share one INTx/MSI. + * 1=use one MSI-X per queue. + * If all queues share one interrupt, the driver must have set + * RSS_SHARED_INTERRUPT_DMA before enabling queues. + * 2=enable both receive and send queues. + * Without this bit set, only one send queue (slice 0's send queue) + * is enabled. The receive queues are always enabled. + */ +#define MXGEFW_SLICE_INTR_MODE_SHARED 0x0 +#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 0x1 +#define MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES 0x2 + + MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET = 37, + MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA = 38, + /* data0, data1 = bus address lsw, msw */ + MXGEFW_CMD_GET_RSS_TABLE_OFFSET = 39, + /* get the offset of the indirection table */ + MXGEFW_CMD_SET_RSS_TABLE_SIZE = 40, + /* set the size of the indirection table */ + MXGEFW_CMD_GET_RSS_KEY_OFFSET = 41, + /* get the offset of the secret key */ + MXGEFW_CMD_RSS_KEY_UPDATED = 42, + /* tell nic that the secret key's been updated */ + MXGEFW_CMD_SET_RSS_ENABLE = 43, + /* data0 = enable/disable rss + * 0: disable rss. nic does not distribute receive packets. + * 1: enable rss. nic distributes receive packets among queues. + * data1 = hash type + * 1: IPV4 (required by RSS) + * 2: TCP_IPV4 (required by RSS) + * 3: IPV4 | TCP_IPV4 (required by RSS) + * 4: source port + * 5: source port + destination port + */ +#define MXGEFW_RSS_HASH_TYPE_IPV4 0x1 +#define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2 +#define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4 +#define MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT 0x5 +#define MXGEFW_RSS_HASH_TYPE_MAX 0x5 + + MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE = 44, + /* Return data = the max. size of the entire headers of a IPv6 TSO packet. + * If the header size of a IPv6 TSO packet is larger than the specified + * value, then the driver must not use TSO. + * This size restriction only applies to IPv6 TSO. + * For IPv4 TSO, the maximum size of the headers is fixed, and the NIC + * always has enough header buffer to store maximum-sized headers. + */ + + MXGEFW_CMD_SET_TSO_MODE = 45, + /* data0 = TSO mode. + * 0: Linux/FreeBSD style (NIC default) + * 1: NDIS/NetBSD style + */ +#define MXGEFW_TSO_MODE_LINUX 0 +#define MXGEFW_TSO_MODE_NDIS 1 + + MXGEFW_CMD_MDIO_READ = 46, + /* data0 = dev_addr (PMA/PMD or PCS ...), data1 = register/addr */ + MXGEFW_CMD_MDIO_WRITE = 47, + /* data0 = dev_addr, data1 = register/addr, data2 = value */ + + MXGEFW_CMD_I2C_READ = 48, + /* Starts to get a fresh copy of one byte or of the module i2c table, the + * obtained data is cached inside the xaui-xfi chip : + * data0 : 0 => get one byte, 1=> get 256 bytes + * data1 : If data0 == 0: location to refresh + * bit 7:0 register location + * bit 8:15 is the i2c slave addr (0 is interpreted as 0xA1) + * bit 23:16 is the i2c bus number (for multi-port NICs) + * If data0 == 1: unused + * The operation might take ~1ms for a single byte or ~65ms when refreshing all 256 bytes + * During the i2c operation, MXGEFW_CMD_I2C_READ or MXGEFW_CMD_I2C_BYTE attempts + * will return MXGEFW_CMD_ERROR_BUSY + */ + MXGEFW_CMD_I2C_BYTE = 49, + /* Return the last obtained copy of a given byte in the xfp i2c table + * (copy cached during the last relevant MXGEFW_CMD_I2C_READ) + * data0 : index of the desired table entry + * Return data = the byte stored at the requested index in the table + */ + + MXGEFW_CMD_GET_VPUMP_OFFSET = 50, + /* Return data = NIC memory offset of mcp_vpump_public_global */ + MXGEFW_CMD_RESET_VPUMP = 51, + /* Resets the VPUMP state */ + + MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE = 52, + /* data0 = mcp_slot type to use. + * 0 = the default 4B mcp_slot + * 1 = 8B mcp_slot_8 + */ +#define MXGEFW_RSS_MCP_SLOT_TYPE_MIN 0 +#define MXGEFW_RSS_MCP_SLOT_TYPE_WITH_HASH 1 + + MXGEFW_CMD_SET_THROTTLE_FACTOR = 53, + /* set the throttle factor for ethp_z8e + * data0 = throttle_factor + * throttle_factor = 256 * pcie-raw-speed / tx_speed + * tx_speed = 256 * pcie-raw-speed / throttle_factor + * + * For PCI-E x8: pcie-raw-speed == 16Gb/s + * For PCI-E x4: pcie-raw-speed == 8Gb/s + * + * ex1: throttle_factor == 0x1a0 (416), tx_speed == 1.23GB/s == 9.846 Gb/s + * ex2: throttle_factor == 0x200 (512), tx_speed == 1.0GB/s == 8 Gb/s + * + * with tx_boundary == 2048, max-throttle-factor == 8191 => min-speed == 500Mb/s + * with tx_boundary == 4096, max-throttle-factor == 4095 => min-speed == 1Gb/s + */ + + MXGEFW_CMD_VPUMP_UP = 54, + /* Allocates VPump Connection, Send Request and Zero copy buffer address tables */ + MXGEFW_CMD_GET_VPUMP_CLK = 55, + /* Get the lanai clock */ + + MXGEFW_CMD_GET_DCA_OFFSET = 56, + /* offset of dca control for WDMAs */ + + /* VMware NetQueue commands */ + MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE = 57, + MXGEFW_CMD_NETQ_ADD_FILTER = 58, + /* data0 = filter_id << 16 | queue << 8 | type */ + /* data1 = MS4 of MAC Addr */ + /* data2 = LS2_MAC << 16 | VLAN_tag */ + MXGEFW_CMD_NETQ_DEL_FILTER = 59, + /* data0 = filter_id */ + MXGEFW_CMD_NETQ_QUERY1 = 60, + MXGEFW_CMD_NETQ_QUERY2 = 61, + MXGEFW_CMD_NETQ_QUERY3 = 62, + MXGEFW_CMD_NETQ_QUERY4 = 63, + + MXGEFW_CMD_RELAX_RXBUFFER_ALIGNMENT = 64, + /* When set, small receive buffers can cross page boundaries. + * Both small and big receive buffers may start at any address. + * This option has performance implications, so use with caution. + */ +}; + +enum myri10ge_mcp_cmd_status { + MXGEFW_CMD_OK = 0, + MXGEFW_CMD_UNKNOWN = 1, + MXGEFW_CMD_ERROR_RANGE = 2, + MXGEFW_CMD_ERROR_BUSY = 3, + MXGEFW_CMD_ERROR_EMPTY = 4, + MXGEFW_CMD_ERROR_CLOSED = 5, + MXGEFW_CMD_ERROR_HASH_ERROR = 6, + MXGEFW_CMD_ERROR_BAD_PORT = 7, + MXGEFW_CMD_ERROR_RESOURCES = 8, + MXGEFW_CMD_ERROR_MULTICAST = 9, + MXGEFW_CMD_ERROR_UNALIGNED = 10, + MXGEFW_CMD_ERROR_NO_MDIO = 11, + MXGEFW_CMD_ERROR_I2C_FAILURE = 12, + MXGEFW_CMD_ERROR_I2C_ABSENT = 13, + MXGEFW_CMD_ERROR_BAD_PCIE_LINK = 14 +}; + +#define MXGEFW_OLD_IRQ_DATA_LEN 40 + +struct mcp_irq_data { + /* add new counters at the beginning */ + __be32 future_use[1]; + __be32 dropped_pause; + __be32 dropped_unicast_filtered; + __be32 dropped_bad_crc32; + __be32 dropped_bad_phy; + __be32 dropped_multicast_filtered; + /* 40 Bytes */ + __be32 send_done_count; + +#define MXGEFW_LINK_DOWN 0 +#define MXGEFW_LINK_UP 1 +#define MXGEFW_LINK_MYRINET 2 +#define MXGEFW_LINK_UNKNOWN 3 + __be32 link_up; + __be32 dropped_link_overflow; + __be32 dropped_link_error_or_filtered; + __be32 dropped_runt; + __be32 dropped_overrun; + __be32 dropped_no_small_buffer; + __be32 dropped_no_big_buffer; + __be32 rdma_tags_available; + + u8 tx_stopped; + u8 link_down; + u8 stats_updated; + u8 valid; +}; + +/* definitions for NETQ filter type */ +#define MXGEFW_NETQ_FILTERTYPE_NONE 0 +#define MXGEFW_NETQ_FILTERTYPE_MACADDR 1 +#define MXGEFW_NETQ_FILTERTYPE_VLAN 2 +#define MXGEFW_NETQ_FILTERTYPE_VLANMACADDR 3 + +#endif /* __MYRI10GE_MCP_H__ */ diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h new file mode 100644 index 000000000..75ec5e7cf --- /dev/null +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h @@ -0,0 +1,60 @@ +#ifndef __MYRI10GE_MCP_GEN_HEADER_H__ +#define __MYRI10GE_MCP_GEN_HEADER_H__ + + +#define MCP_HEADER_PTR_OFFSET 0x3c + +#define MCP_TYPE_MX 0x4d582020 /* "MX " */ +#define MCP_TYPE_PCIE 0x70636965 /* "PCIE" pcie-only MCP */ +#define MCP_TYPE_ETH 0x45544820 /* "ETH " */ +#define MCP_TYPE_MCP0 0x4d435030 /* "MCP0" */ +#define MCP_TYPE_DFLT 0x20202020 /* " " */ +#define MCP_TYPE_ETHZ 0x4554485a /* "ETHZ" */ + +struct mcp_gen_header { + /* the first 4 fields are filled at compile time */ + unsigned header_length; + __be32 mcp_type; + char version[128]; + unsigned mcp_private; /* pointer to mcp-type specific structure */ + + /* filled by the MCP at run-time */ + unsigned sram_size; + unsigned string_specs; /* either the original STRING_SPECS or a superset */ + unsigned string_specs_len; + + /* Fields above this comment are guaranteed to be present. + * + * Fields below this comment are extensions added in later versions + * of this struct, drivers should compare the header_length against + * offsetof(field) to check whether a given MCP implements them. + * + * Never remove any field. Keep everything naturally align. + */ + + /* Specifies if the running mcp is mcp0, 1, or 2. */ + unsigned char mcp_index; + unsigned char disable_rabbit; + unsigned char unaligned_tlp; + unsigned char pcie_link_algo; + unsigned counters_addr; + unsigned copy_block_info; /* for small mcps loaded with "lload -d" */ + unsigned short handoff_id_major; /* must be equal */ + unsigned short handoff_id_caps; /* bitfield: new mcp must have superset */ + unsigned msix_table_addr; /* start address of msix table in firmware */ + unsigned bss_addr; /* start of bss */ + unsigned features; + unsigned ee_hdr_addr; + unsigned led_pattern; + unsigned led_pattern_dflt; + /* 8 */ +}; + +struct zmcp_info { + unsigned info_len; + unsigned zmcp_addr; + unsigned zmcp_len; + unsigned mcp_edata; +}; + +#endif /* __MYRI10GE_MCP_GEN_HEADER_H__ */ diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig new file mode 100644 index 000000000..a100860d4 --- /dev/null +++ b/drivers/net/ethernet/natsemi/Kconfig @@ -0,0 +1,67 @@ +# +# National Semi-conductor device configuration +# + +config NET_VENDOR_NATSEMI + bool "National Semi-conductor devices" + default y + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about National Semi-conductor devices. If you say Y, + you will be asked for your specific card in the following questions. + +if NET_VENDOR_NATSEMI + +config MACSONIC + tristate "Macintosh SONIC based ethernet (onboard, NuBus, LC, CS)" + depends on MAC + ---help--- + Support for NatSemi SONIC based Ethernet devices. This includes + the onboard Ethernet in many Quadras as well as some LC-PDS, + a few Nubus and all known Comm Slot Ethernet cards. If you have + one of these say Y and read the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. This module will + be called macsonic. + +config MIPS_JAZZ_SONIC + tristate "MIPS JAZZ onboard SONIC Ethernet support" + depends on MACH_JAZZ + ---help--- + This is the driver for the onboard card of MIPS Magnum 4000, + Acer PICA, Olivetti M700-10 and a few other identical OEM systems. + +config NATSEMI + tristate "National Semiconductor DP8381x series PCI Ethernet support" + depends on PCI + select CRC32 + ---help--- + This driver is for the National Semiconductor DP83810 series, + which is used in cards from PureData, NetGear, Linksys + and others, including the 83815 chip. + More specific information and updates are available from + . + +config NS83820 + tristate "National Semiconductor DP83820 support" + depends on PCI + ---help--- + This is a driver for the National Semiconductor DP83820 series + of gigabit ethernet MACs. Cards using this chipset include + the D-Link DGE-500T, PureData's PDP8023Z-TG, SMC's SMC9462TX, + SOHO-GA2000T, SOHO-GA2500T. The driver supports the use of + zero copy. + +config XTENSA_XT2000_SONIC + tristate "Xtensa XT2000 onboard SONIC Ethernet support" + depends on XTENSA_PLATFORM_XT2000 + ---help--- + This is the driver for the onboard card of the Xtensa XT2000 board. + +endif # NET_VENDOR_NATSEMI diff --git a/drivers/net/ethernet/natsemi/Makefile b/drivers/net/ethernet/natsemi/Makefile new file mode 100644 index 000000000..764c532a9 --- /dev/null +++ b/drivers/net/ethernet/natsemi/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for the National Semi-conductor Sonic devices. +# + +obj-$(CONFIG_MACSONIC) += macsonic.o +obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o +obj-$(CONFIG_NATSEMI) += natsemi.o +obj-$(CONFIG_NS83820) += ns83820.o +obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c new file mode 100644 index 000000000..acf3f11e3 --- /dev/null +++ b/drivers/net/ethernet/natsemi/jazzsonic.c @@ -0,0 +1,293 @@ +/* + * jazzsonic.c + * + * (C) 2005 Finn Thain + * + * Converted to DMA API, and (from the mac68k project) introduced + * dhd's support for 16-bit cards. + * + * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de) + * + * This driver is based on work from Andreas Busse, but most of + * the code is rewritten. + * + * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de) + * + * A driver for the onboard Sonic ethernet controller on Mips Jazz + * systems (Acer Pica-61, Mips Magnum 4000, Olivetti M700 and + * perhaps others, too) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +static char jazz_sonic_string[] = "jazzsonic"; + +#define SONIC_MEM_SIZE 0x100 + +#include "sonic.h" + +/* + * Macros to access SONIC registers + */ +#define SONIC_READ(reg) (*((volatile unsigned int *)dev->base_addr+reg)) + +#define SONIC_WRITE(reg,val) \ +do { \ + *((volatile unsigned int *)dev->base_addr+(reg)) = (val); \ +} while (0) + + +/* use 0 for production, 1 for verification, >1 for debug */ +#ifdef SONIC_DEBUG +static unsigned int sonic_debug = SONIC_DEBUG; +#else +static unsigned int sonic_debug = 1; +#endif + +/* + * We cannot use station (ethernet) address prefixes to detect the + * sonic controller since these are board manufacturer depended. + * So we check for known Silicon Revision IDs instead. + */ +static unsigned short known_revisions[] = +{ + 0x04, /* Mips Magnum 4000 */ + 0xffff /* end of list */ +}; + +static int jazzsonic_open(struct net_device* dev) +{ + int retval; + + retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev); + if (retval) { + printk(KERN_ERR "%s: unable to get IRQ %d.\n", + dev->name, dev->irq); + return retval; + } + + retval = sonic_open(dev); + if (retval) + free_irq(dev->irq, dev); + return retval; +} + +static int jazzsonic_close(struct net_device* dev) +{ + int err; + err = sonic_close(dev); + free_irq(dev->irq, dev); + return err; +} + +static const struct net_device_ops sonic_netdev_ops = { + .ndo_open = jazzsonic_open, + .ndo_stop = jazzsonic_close, + .ndo_start_xmit = sonic_send_packet, + .ndo_get_stats = sonic_get_stats, + .ndo_set_rx_mode = sonic_multicast_list, + .ndo_tx_timeout = sonic_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +}; + +static int sonic_probe1(struct net_device *dev) +{ + static unsigned version_printed; + unsigned int silicon_revision; + unsigned int val; + struct sonic_local *lp = netdev_priv(dev); + int err = -ENODEV; + int i; + + if (!request_mem_region(dev->base_addr, SONIC_MEM_SIZE, jazz_sonic_string)) + return -EBUSY; + + /* + * get the Silicon Revision ID. If this is one of the known + * one assume that we found a SONIC ethernet controller at + * the expected location. + */ + silicon_revision = SONIC_READ(SONIC_SR); + if (sonic_debug > 1) + printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision); + + i = 0; + while (known_revisions[i] != 0xffff && + known_revisions[i] != silicon_revision) + i++; + + if (known_revisions[i] == 0xffff) { + printk("SONIC ethernet controller not found (0x%4x)\n", + silicon_revision); + goto out; + } + + if (sonic_debug && version_printed++ == 0) + printk(version); + + printk(KERN_INFO "%s: Sonic ethernet found at 0x%08lx, ", + dev_name(lp->device), dev->base_addr); + + /* + * Put the sonic into software reset, then + * retrieve and print the ethernet address. + */ + SONIC_WRITE(SONIC_CMD,SONIC_CR_RST); + SONIC_WRITE(SONIC_CEP,0); + for (i=0; i<3; i++) { + val = SONIC_READ(SONIC_CAP0-i); + dev->dev_addr[i*2] = val; + dev->dev_addr[i*2+1] = val >> 8; + } + + err = -ENOMEM; + + /* Initialize the device structure. */ + + lp->dma_bitmode = SONIC_BITMODE32; + + /* Allocate the entire chunk of memory for the descriptors. + Note that this cannot cross a 64K boundary. */ + lp->descriptors = dma_alloc_coherent(lp->device, + SIZEOF_SONIC_DESC * + SONIC_BUS_SCALE(lp->dma_bitmode), + &lp->descriptors_laddr, + GFP_KERNEL); + if (lp->descriptors == NULL) + goto out; + + /* Now set up the pointers to point to the appropriate places */ + lp->cda = lp->descriptors; + lp->tda = lp->cda + (SIZEOF_SONIC_CDA + * SONIC_BUS_SCALE(lp->dma_bitmode)); + lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS + * SONIC_BUS_SCALE(lp->dma_bitmode)); + lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS + * SONIC_BUS_SCALE(lp->dma_bitmode)); + + lp->cda_laddr = lp->descriptors_laddr; + lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA + * SONIC_BUS_SCALE(lp->dma_bitmode)); + lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS + * SONIC_BUS_SCALE(lp->dma_bitmode)); + lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS + * SONIC_BUS_SCALE(lp->dma_bitmode)); + + dev->netdev_ops = &sonic_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + /* + * clear tally counter + */ + SONIC_WRITE(SONIC_CRCT,0xffff); + SONIC_WRITE(SONIC_FAET,0xffff); + SONIC_WRITE(SONIC_MPT,0xffff); + + return 0; +out: + release_mem_region(dev->base_addr, SONIC_MEM_SIZE); + return err; +} + +/* + * Probe for a SONIC ethernet controller on a Mips Jazz board. + * Actually probing is superfluous but we're paranoid. + */ +static int jazz_sonic_probe(struct platform_device *pdev) +{ + struct net_device *dev; + struct sonic_local *lp; + struct resource *res; + int err = 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + dev = alloc_etherdev(sizeof(struct sonic_local)); + if (!dev) + return -ENOMEM; + + lp = netdev_priv(dev); + lp->device = &pdev->dev; + SET_NETDEV_DEV(dev, &pdev->dev); + platform_set_drvdata(pdev, dev); + + netdev_boot_setup_check(dev); + + dev->base_addr = res->start; + dev->irq = platform_get_irq(pdev, 0); + err = sonic_probe1(dev); + if (err) + goto out; + err = register_netdev(dev); + if (err) + goto out1; + + printk("%s: MAC %pM IRQ %d\n", dev->name, dev->dev_addr, dev->irq); + + return 0; + +out1: + release_mem_region(dev->base_addr, SONIC_MEM_SIZE); +out: + free_netdev(dev); + + return err; +} + +MODULE_DESCRIPTION("Jazz SONIC ethernet driver"); +module_param(sonic_debug, int, 0); +MODULE_PARM_DESC(sonic_debug, "jazzsonic debug level (1-4)"); +MODULE_ALIAS("platform:jazzsonic"); + +#include "sonic.c" + +static int jazz_sonic_device_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct sonic_local* lp = netdev_priv(dev); + + unregister_netdev(dev); + dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), + lp->descriptors, lp->descriptors_laddr); + release_mem_region(dev->base_addr, SONIC_MEM_SIZE); + free_netdev(dev); + + return 0; +} + +static struct platform_driver jazz_sonic_driver = { + .probe = jazz_sonic_probe, + .remove = jazz_sonic_device_remove, + .driver = { + .name = jazz_sonic_string, + }, +}; + +module_platform_driver(jazz_sonic_driver); diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c new file mode 100644 index 000000000..d98f5b8a1 --- /dev/null +++ b/drivers/net/ethernet/natsemi/macsonic.c @@ -0,0 +1,636 @@ +/* + * macsonic.c + * + * (C) 2005 Finn Thain + * + * Converted to DMA API, converted to unified driver model, made it work as + * a module again, and from the mac68k project, introduced more 32-bit cards + * and dhd's support for 16-bit cards. + * + * (C) 1998 Alan Cox + * + * Debugging Andreas Ehliar, Michael Schmitz + * + * Based on code + * (C) 1996 by Thomas Bogendoerfer (tsbogend@bigbug.franken.de) + * + * This driver is based on work from Andreas Busse, but most of + * the code is rewritten. + * + * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de) + * + * A driver for the Mac onboard Sonic ethernet chip. + * + * 98/12/21 MSch: judged from tests on Q800, it's basically working, + * but eating up both receive and transmit resources + * and duplicating packets. Needs more testing. + * + * 99/01/03 MSch: upgraded to version 0.92 of the core driver, fixed. + * + * 00/10/31 sammy@oh.verio.com: Updated driver for 2.4 kernels, fixed problems + * on centris. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +static char mac_sonic_string[] = "macsonic"; + +#include "sonic.h" + +/* These should basically be bus-size and endian independent (since + the SONIC is at least smart enough that it uses the same endianness + as the host, unlike certain less enlightened Macintosh NICs) */ +#define SONIC_READ(reg) (nubus_readw(dev->base_addr + (reg * 4) \ + + lp->reg_offset)) +#define SONIC_WRITE(reg,val) (nubus_writew(val, dev->base_addr + (reg * 4) \ + + lp->reg_offset)) + +/* use 0 for production, 1 for verification, >1 for debug */ +#ifdef SONIC_DEBUG +static unsigned int sonic_debug = SONIC_DEBUG; +#else +static unsigned int sonic_debug = 1; +#endif + +static int sonic_version_printed; + +/* For onboard SONIC */ +#define ONBOARD_SONIC_REGISTERS 0x50F0A000 +#define ONBOARD_SONIC_PROM_BASE 0x50f08000 + +enum macsonic_type { + MACSONIC_DUODOCK, + MACSONIC_APPLE, + MACSONIC_APPLE16, + MACSONIC_DAYNA, + MACSONIC_DAYNALINK +}; + +/* For the built-in SONIC in the Duo Dock */ +#define DUODOCK_SONIC_REGISTERS 0xe10000 +#define DUODOCK_SONIC_PROM_BASE 0xe12000 + +/* For Apple-style NuBus SONIC */ +#define APPLE_SONIC_REGISTERS 0 +#define APPLE_SONIC_PROM_BASE 0x40000 + +/* Daynalink LC SONIC */ +#define DAYNALINK_PROM_BASE 0x400000 + +/* For Dayna-style NuBus SONIC (haven't seen one yet) */ +#define DAYNA_SONIC_REGISTERS 0x180000 +/* This is what OpenBSD says. However, this is definitely in NuBus + ROM space so we should be able to get it by walking the NuBus + resource directories */ +#define DAYNA_SONIC_MAC_ADDR 0xffe004 + +#define SONIC_READ_PROM(addr) nubus_readb(prom_addr+addr) + +/* + * For reversing the PROM address + */ + +static inline void bit_reverse_addr(unsigned char addr[6]) +{ + int i; + + for(i = 0; i < 6; i++) + addr[i] = bitrev8(addr[i]); +} + +static irqreturn_t macsonic_interrupt(int irq, void *dev_id) +{ + irqreturn_t result; + unsigned long flags; + + local_irq_save(flags); + result = sonic_interrupt(irq, dev_id); + local_irq_restore(flags); + return result; +} + +static int macsonic_open(struct net_device* dev) +{ + int retval; + + retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev); + if (retval) { + printk(KERN_ERR "%s: unable to get IRQ %d.\n", + dev->name, dev->irq); + goto err; + } + /* Under the A/UX interrupt scheme, the onboard SONIC interrupt comes + * in at priority level 3. However, we sometimes get the level 2 inter- + * rupt as well, which must prevent re-entrance of the sonic handler. + */ + if (dev->irq == IRQ_AUTO_3) { + retval = request_irq(IRQ_NUBUS_9, macsonic_interrupt, 0, + "sonic", dev); + if (retval) { + printk(KERN_ERR "%s: unable to get IRQ %d.\n", + dev->name, IRQ_NUBUS_9); + goto err_irq; + } + } + retval = sonic_open(dev); + if (retval) + goto err_irq_nubus; + return 0; + +err_irq_nubus: + if (dev->irq == IRQ_AUTO_3) + free_irq(IRQ_NUBUS_9, dev); +err_irq: + free_irq(dev->irq, dev); +err: + return retval; +} + +static int macsonic_close(struct net_device* dev) +{ + int err; + err = sonic_close(dev); + free_irq(dev->irq, dev); + if (dev->irq == IRQ_AUTO_3) + free_irq(IRQ_NUBUS_9, dev); + return err; +} + +static const struct net_device_ops macsonic_netdev_ops = { + .ndo_open = macsonic_open, + .ndo_stop = macsonic_close, + .ndo_start_xmit = sonic_send_packet, + .ndo_set_rx_mode = sonic_multicast_list, + .ndo_tx_timeout = sonic_tx_timeout, + .ndo_get_stats = sonic_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + +static int macsonic_init(struct net_device *dev) +{ + struct sonic_local* lp = netdev_priv(dev); + + /* Allocate the entire chunk of memory for the descriptors. + Note that this cannot cross a 64K boundary. */ + lp->descriptors = dma_alloc_coherent(lp->device, + SIZEOF_SONIC_DESC * + SONIC_BUS_SCALE(lp->dma_bitmode), + &lp->descriptors_laddr, + GFP_KERNEL); + if (lp->descriptors == NULL) + return -ENOMEM; + + /* Now set up the pointers to point to the appropriate places */ + lp->cda = lp->descriptors; + lp->tda = lp->cda + (SIZEOF_SONIC_CDA + * SONIC_BUS_SCALE(lp->dma_bitmode)); + lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS + * SONIC_BUS_SCALE(lp->dma_bitmode)); + lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS + * SONIC_BUS_SCALE(lp->dma_bitmode)); + + lp->cda_laddr = lp->descriptors_laddr; + lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA + * SONIC_BUS_SCALE(lp->dma_bitmode)); + lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS + * SONIC_BUS_SCALE(lp->dma_bitmode)); + lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS + * SONIC_BUS_SCALE(lp->dma_bitmode)); + + dev->netdev_ops = &macsonic_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + /* + * clear tally counter + */ + SONIC_WRITE(SONIC_CRCT, 0xffff); + SONIC_WRITE(SONIC_FAET, 0xffff); + SONIC_WRITE(SONIC_MPT, 0xffff); + + return 0; +} + +#define INVALID_MAC(mac) (memcmp(mac, "\x08\x00\x07", 3) && \ + memcmp(mac, "\x00\xA0\x40", 3) && \ + memcmp(mac, "\x00\x80\x19", 3) && \ + memcmp(mac, "\x00\x05\x02", 3)) + +static void mac_onboard_sonic_ethernet_addr(struct net_device *dev) +{ + struct sonic_local *lp = netdev_priv(dev); + const int prom_addr = ONBOARD_SONIC_PROM_BASE; + unsigned short val; + + /* + * On NuBus boards we can sometimes look in the ROM resources. + * No such luck for comm-slot/onboard. + * On the PowerBook 520, the PROM base address is a mystery. + */ + if (hwreg_present((void *)prom_addr)) { + int i; + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = SONIC_READ_PROM(i); + if (!INVALID_MAC(dev->dev_addr)) + return; + + /* + * Most of the time, the address is bit-reversed. The NetBSD + * source has a rather long and detailed historical account of + * why this is so. + */ + bit_reverse_addr(dev->dev_addr); + if (!INVALID_MAC(dev->dev_addr)) + return; + + /* + * If we still have what seems to be a bogus address, we'll + * look in the CAM. The top entry should be ours. + */ + printk(KERN_WARNING "macsonic: MAC address in PROM seems " + "to be invalid, trying CAM\n"); + } else { + printk(KERN_WARNING "macsonic: cannot read MAC address from " + "PROM, trying CAM\n"); + } + + /* This only works if MacOS has already initialized the card. */ + + SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); + SONIC_WRITE(SONIC_CEP, 15); + + val = SONIC_READ(SONIC_CAP2); + dev->dev_addr[5] = val >> 8; + dev->dev_addr[4] = val & 0xff; + val = SONIC_READ(SONIC_CAP1); + dev->dev_addr[3] = val >> 8; + dev->dev_addr[2] = val & 0xff; + val = SONIC_READ(SONIC_CAP0); + dev->dev_addr[1] = val >> 8; + dev->dev_addr[0] = val & 0xff; + + if (!INVALID_MAC(dev->dev_addr)) + return; + + /* Still nonsense ... messed up someplace! */ + + printk(KERN_WARNING "macsonic: MAC address in CAM entry 15 " + "seems invalid, will use a random MAC\n"); + eth_hw_addr_random(dev); +} + +static int mac_onboard_sonic_probe(struct net_device *dev) +{ + struct sonic_local* lp = netdev_priv(dev); + int sr; + int commslot = 0; + + if (!MACH_IS_MAC) + return -ENODEV; + + printk(KERN_INFO "Checking for internal Macintosh ethernet (SONIC).. "); + + /* Bogus probing, on the models which may or may not have + Ethernet (BTW, the Ethernet *is* always at the same + address, and nothing else lives there, at least if Apple's + documentation is to be believed) */ + if (macintosh_config->ident == MAC_MODEL_Q630 || + macintosh_config->ident == MAC_MODEL_P588 || + macintosh_config->ident == MAC_MODEL_P575 || + macintosh_config->ident == MAC_MODEL_C610) { + int card_present; + + card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS); + if (!card_present) { + printk("none.\n"); + return -ENODEV; + } + commslot = 1; + } + + printk("yes\n"); + + /* Danger! My arms are flailing wildly! You *must* set lp->reg_offset + * and dev->base_addr before using SONIC_READ() or SONIC_WRITE() */ + dev->base_addr = ONBOARD_SONIC_REGISTERS; + if (via_alt_mapping) + dev->irq = IRQ_AUTO_3; + else + dev->irq = IRQ_NUBUS_9; + + if (!sonic_version_printed) { + printk(KERN_INFO "%s", version); + sonic_version_printed = 1; + } + printk(KERN_INFO "%s: onboard / comm-slot SONIC at 0x%08lx\n", + dev_name(lp->device), dev->base_addr); + + /* The PowerBook's SONIC is 16 bit always. */ + if (macintosh_config->ident == MAC_MODEL_PB520) { + lp->reg_offset = 0; + lp->dma_bitmode = SONIC_BITMODE16; + sr = SONIC_READ(SONIC_SR); + } else if (commslot) { + /* Some of the comm-slot cards are 16 bit. But some + of them are not. The 32-bit cards use offset 2 and + have known revisions, we try reading the revision + register at offset 2, if we don't get a known revision + we assume 16 bit at offset 0. */ + lp->reg_offset = 2; + lp->dma_bitmode = SONIC_BITMODE16; + + sr = SONIC_READ(SONIC_SR); + if (sr == 0x0004 || sr == 0x0006 || sr == 0x0100 || sr == 0x0101) + /* 83932 is 0x0004 or 0x0006, 83934 is 0x0100 or 0x0101 */ + lp->dma_bitmode = SONIC_BITMODE32; + else { + lp->dma_bitmode = SONIC_BITMODE16; + lp->reg_offset = 0; + sr = SONIC_READ(SONIC_SR); + } + } else { + /* All onboard cards are at offset 2 with 32 bit DMA. */ + lp->reg_offset = 2; + lp->dma_bitmode = SONIC_BITMODE32; + sr = SONIC_READ(SONIC_SR); + } + printk(KERN_INFO + "%s: revision 0x%04x, using %d bit DMA and register offset %d\n", + dev_name(lp->device), sr, lp->dma_bitmode?32:16, lp->reg_offset); + +#if 0 /* This is sometimes useful to find out how MacOS configured the card. */ + printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", dev_name(lp->device), + SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff); +#endif + + /* Software reset, then initialize control registers. */ + SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); + + SONIC_WRITE(SONIC_DCR, SONIC_DCR_EXBUS | SONIC_DCR_BMS | + SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | + (lp->dma_bitmode ? SONIC_DCR_DW : 0)); + + /* This *must* be written back to in order to restore the + * extended programmable output bits, as it may not have been + * initialised since the hardware reset. */ + SONIC_WRITE(SONIC_DCR2, 0); + + /* Clear *and* disable interrupts to be on the safe side */ + SONIC_WRITE(SONIC_IMR, 0); + SONIC_WRITE(SONIC_ISR, 0x7fff); + + /* Now look for the MAC address. */ + mac_onboard_sonic_ethernet_addr(dev); + + /* Shared init code */ + return macsonic_init(dev); +} + +static int mac_nubus_sonic_ethernet_addr(struct net_device *dev, + unsigned long prom_addr, int id) +{ + int i; + for(i = 0; i < 6; i++) + dev->dev_addr[i] = SONIC_READ_PROM(i); + + /* Some of the addresses are bit-reversed */ + if (id != MACSONIC_DAYNA) + bit_reverse_addr(dev->dev_addr); + + return 0; +} + +static int macsonic_ident(struct nubus_dev *ndev) +{ + if (ndev->dr_hw == NUBUS_DRHW_ASANTE_LC && + ndev->dr_sw == NUBUS_DRSW_SONIC_LC) + return MACSONIC_DAYNALINK; + if (ndev->dr_hw == NUBUS_DRHW_SONIC && + ndev->dr_sw == NUBUS_DRSW_APPLE) { + /* There has to be a better way to do this... */ + if (strstr(ndev->board->name, "DuoDock")) + return MACSONIC_DUODOCK; + else + return MACSONIC_APPLE; + } + + if (ndev->dr_hw == NUBUS_DRHW_SMC9194 && + ndev->dr_sw == NUBUS_DRSW_DAYNA) + return MACSONIC_DAYNA; + + if (ndev->dr_hw == NUBUS_DRHW_APPLE_SONIC_LC && + ndev->dr_sw == 0) { /* huh? */ + return MACSONIC_APPLE16; + } + return -1; +} + +static int mac_nubus_sonic_probe(struct net_device *dev) +{ + static int slots; + struct nubus_dev* ndev = NULL; + struct sonic_local* lp = netdev_priv(dev); + unsigned long base_addr, prom_addr; + u16 sonic_dcr; + int id = -1; + int reg_offset, dma_bitmode; + + /* Find the first SONIC that hasn't been initialized already */ + while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, + NUBUS_TYPE_ETHERNET, ndev)) != NULL) + { + /* Have we seen it already? */ + if (slots & (1<board->slot)) + continue; + slots |= 1<board->slot; + + /* Is it one of ours? */ + if ((id = macsonic_ident(ndev)) != -1) + break; + } + + if (ndev == NULL) + return -ENODEV; + + switch (id) { + case MACSONIC_DUODOCK: + base_addr = ndev->board->slot_addr + DUODOCK_SONIC_REGISTERS; + prom_addr = ndev->board->slot_addr + DUODOCK_SONIC_PROM_BASE; + sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT0 | SONIC_DCR_RFT1 | + SONIC_DCR_TFT0; + reg_offset = 2; + dma_bitmode = SONIC_BITMODE32; + break; + case MACSONIC_APPLE: + base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS; + prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE; + sonic_dcr = SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0; + reg_offset = 0; + dma_bitmode = SONIC_BITMODE32; + break; + case MACSONIC_APPLE16: + base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS; + prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE; + sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | + SONIC_DCR_PO1 | SONIC_DCR_BMS; + reg_offset = 0; + dma_bitmode = SONIC_BITMODE16; + break; + case MACSONIC_DAYNALINK: + base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS; + prom_addr = ndev->board->slot_addr + DAYNALINK_PROM_BASE; + sonic_dcr = SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | + SONIC_DCR_PO1 | SONIC_DCR_BMS; + reg_offset = 0; + dma_bitmode = SONIC_BITMODE16; + break; + case MACSONIC_DAYNA: + base_addr = ndev->board->slot_addr + DAYNA_SONIC_REGISTERS; + prom_addr = ndev->board->slot_addr + DAYNA_SONIC_MAC_ADDR; + sonic_dcr = SONIC_DCR_BMS | + SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1; + reg_offset = 0; + dma_bitmode = SONIC_BITMODE16; + break; + default: + printk(KERN_ERR "macsonic: WTF, id is %d\n", id); + return -ENODEV; + } + + /* Danger! My arms are flailing wildly! You *must* set lp->reg_offset + * and dev->base_addr before using SONIC_READ() or SONIC_WRITE() */ + dev->base_addr = base_addr; + lp->reg_offset = reg_offset; + lp->dma_bitmode = dma_bitmode; + dev->irq = SLOT2IRQ(ndev->board->slot); + + if (!sonic_version_printed) { + printk(KERN_INFO "%s", version); + sonic_version_printed = 1; + } + printk(KERN_INFO "%s: %s in slot %X\n", + dev_name(lp->device), ndev->board->name, ndev->board->slot); + printk(KERN_INFO "%s: revision 0x%04x, using %d bit DMA and register offset %d\n", + dev_name(lp->device), SONIC_READ(SONIC_SR), dma_bitmode?32:16, reg_offset); + +#if 0 /* This is sometimes useful to find out how MacOS configured the card. */ + printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", dev_name(lp->device), + SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff); +#endif + + /* Software reset, then initialize control registers. */ + SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); + SONIC_WRITE(SONIC_DCR, sonic_dcr | (dma_bitmode ? SONIC_DCR_DW : 0)); + /* This *must* be written back to in order to restore the + * extended programmable output bits, since it may not have been + * initialised since the hardware reset. */ + SONIC_WRITE(SONIC_DCR2, 0); + + /* Clear *and* disable interrupts to be on the safe side */ + SONIC_WRITE(SONIC_IMR, 0); + SONIC_WRITE(SONIC_ISR, 0x7fff); + + /* Now look for the MAC address. */ + if (mac_nubus_sonic_ethernet_addr(dev, prom_addr, id) != 0) + return -ENODEV; + + /* Shared init code */ + return macsonic_init(dev); +} + +static int mac_sonic_probe(struct platform_device *pdev) +{ + struct net_device *dev; + struct sonic_local *lp; + int err; + + dev = alloc_etherdev(sizeof(struct sonic_local)); + if (!dev) + return -ENOMEM; + + lp = netdev_priv(dev); + lp->device = &pdev->dev; + SET_NETDEV_DEV(dev, &pdev->dev); + platform_set_drvdata(pdev, dev); + + /* This will catch fatal stuff like -ENOMEM as well as success */ + err = mac_onboard_sonic_probe(dev); + if (err == 0) + goto found; + if (err != -ENODEV) + goto out; + err = mac_nubus_sonic_probe(dev); + if (err) + goto out; +found: + err = register_netdev(dev); + if (err) + goto out; + + printk("%s: MAC %pM IRQ %d\n", dev->name, dev->dev_addr, dev->irq); + + return 0; + +out: + free_netdev(dev); + + return err; +} + +MODULE_DESCRIPTION("Macintosh SONIC ethernet driver"); +module_param(sonic_debug, int, 0); +MODULE_PARM_DESC(sonic_debug, "macsonic debug level (1-4)"); +MODULE_ALIAS("platform:macsonic"); + +#include "sonic.c" + +static int mac_sonic_device_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct sonic_local* lp = netdev_priv(dev); + + unregister_netdev(dev); + dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), + lp->descriptors, lp->descriptors_laddr); + free_netdev(dev); + + return 0; +} + +static struct platform_driver mac_sonic_driver = { + .probe = mac_sonic_probe, + .remove = mac_sonic_device_remove, + .driver = { + .name = mac_sonic_string, + }, +}; + +module_platform_driver(mac_sonic_driver); diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c new file mode 100644 index 000000000..b83f7c0fc --- /dev/null +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -0,0 +1,3374 @@ +/* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */ +/* + Written/copyright 1999-2001 by Donald Becker. + Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com) + Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com) + Portions copyright 2004 Harald Welte + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. License for under other terms may be + available. Contact the original author for details. + + The original author may be reached as becker@scyld.com, or at + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + Support information and updates available at + http://www.scyld.com/network/netsemi.html + [link no longer provides useful info -jgarzik] + + + TODO: + * big endian support with CFG:BEM instead of cpu_to_le32 +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* Processor type for cache alignment. */ +#include +#include +#include + +#define DRV_NAME "natsemi" +#define DRV_VERSION "2.1" +#define DRV_RELDATE "Sept 11, 2006" + +#define RX_OFFSET 2 + +/* Updated to recommendations in pci-skeleton v2.03. */ + +/* The user-configurable values. + These may be modified when a driver module is loaded.*/ + +#define NATSEMI_DEF_MSG (NETIF_MSG_DRV | \ + NETIF_MSG_LINK | \ + NETIF_MSG_WOL | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) +static int debug = -1; + +static int mtu; + +/* Maximum number of multicast addresses to filter (vs. rx-all-multicast). + This chip uses a 512 element hash table based on the Ethernet CRC. */ +static const int multicast_filter_limit = 100; + +/* Set the copy breakpoint for the copy-only-tiny-frames scheme. + Setting to > 1518 effectively disables this feature. */ +static int rx_copybreak; + +static int dspcfg_workaround = 1; + +/* Used to pass the media type, etc. + Both 'options[]' and 'full_duplex[]' should exist for driver + interoperability. + The media type is usually passed in 'options[]'. +*/ +#define MAX_UNITS 8 /* More are supported, limit only on options */ +static int options[MAX_UNITS]; +static int full_duplex[MAX_UNITS]; + +/* Operational parameters that are set at compile time. */ + +/* Keep the ring sizes a power of two for compile efficiency. + The compiler will convert '%'<2^N> into a bit mask. + Making the Tx ring too large decreases the effectiveness of channel + bonding and packet priority. + There are no ill effects from too-large receive rings. */ +#define TX_RING_SIZE 16 +#define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */ +#define RX_RING_SIZE 32 + +/* Operational parameters that usually are not changed. */ +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (2*HZ) + +#define NATSEMI_HW_TIMEOUT 400 +#define NATSEMI_TIMER_FREQ 5*HZ +#define NATSEMI_PG0_NREGS 64 +#define NATSEMI_RFDR_NREGS 8 +#define NATSEMI_PG1_NREGS 4 +#define NATSEMI_NREGS (NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \ + NATSEMI_PG1_NREGS) +#define NATSEMI_REGS_VER 1 /* v1 added RFDR registers */ +#define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32)) + +/* Buffer sizes: + * The nic writes 32-bit values, even if the upper bytes of + * a 32-bit value are beyond the end of the buffer. + */ +#define NATSEMI_HEADERS 22 /* 2*mac,type,vlan,crc */ +#define NATSEMI_PADDING 16 /* 2 bytes should be sufficient */ +#define NATSEMI_LONGPKT 1518 /* limit for normal packets */ +#define NATSEMI_RX_LIMIT 2046 /* maximum supported by hardware */ + +/* These identify the driver base version and may not be removed. */ +static const char version[] = + KERN_INFO DRV_NAME " dp8381x driver, version " + DRV_VERSION ", " DRV_RELDATE "\n" + " originally by Donald Becker \n" + " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n"; + +MODULE_AUTHOR("Donald Becker "); +MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver"); +MODULE_LICENSE("GPL"); + +module_param(mtu, int, 0); +module_param(debug, int, 0); +module_param(rx_copybreak, int, 0); +module_param(dspcfg_workaround, int, 0); +module_param_array(options, int, NULL, 0); +module_param_array(full_duplex, int, NULL, 0); +MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)"); +MODULE_PARM_DESC(debug, "DP8381x default debug level"); +MODULE_PARM_DESC(rx_copybreak, + "DP8381x copy breakpoint for copy-only-tiny-frames"); +MODULE_PARM_DESC(dspcfg_workaround, "DP8381x: control DspCfg workaround"); +MODULE_PARM_DESC(options, + "DP8381x: Bits 0-3: media type, bit 17: full duplex"); +MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)"); + +/* + Theory of Operation + +I. Board Compatibility + +This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC. +It also works with other chips in in the DP83810 series. + +II. Board-specific settings + +This driver requires the PCI interrupt line to be valid. +It honors the EEPROM-set values. + +III. Driver operation + +IIIa. Ring buffers + +This driver uses two statically allocated fixed-size descriptor lists +formed into rings by a branch from the final descriptor to the beginning of +the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. +The NatSemi design uses a 'next descriptor' pointer that the driver forms +into a list. + +IIIb/c. Transmit/Receive Structure + +This driver uses a zero-copy receive and transmit scheme. +The driver allocates full frame size skbuffs for the Rx ring buffers at +open() time and passes the skb->data field to the chip as receive data +buffers. When an incoming frame is less than RX_COPYBREAK bytes long, +a fresh skbuff is allocated and the frame is copied to the new skbuff. +When the incoming frame is larger, the skbuff is passed directly up the +protocol stack. Buffers consumed this way are replaced by newly allocated +skbuffs in a later phase of receives. + +The RX_COPYBREAK value is chosen to trade-off the memory wasted by +using a full-sized skbuff for small frames vs. the copying costs of larger +frames. New boards are typically used in generously configured machines +and the underfilled buffers have negligible impact compared to the benefit of +a single allocation size, so the default value of zero results in never +copying packets. When copying is done, the cost is usually mitigated by using +a combined copy/checksum routine. Copying also preloads the cache, which is +most useful with small frames. + +A subtle aspect of the operation is that unaligned buffers are not permitted +by the hardware. Thus the IP header at offset 14 in an ethernet frame isn't +longword aligned for further processing. On copies frames are put into the +skbuff at an offset of "+2", 16-byte aligning the IP header. + +IIId. Synchronization + +Most operations are synchronized on the np->lock irq spinlock, except the +receive and transmit paths which are synchronised using a combination of +hardware descriptor ownership, disabling interrupts and NAPI poll scheduling. + +IVb. References + +http://www.scyld.com/expert/100mbps.html +http://www.scyld.com/expert/NWay.html +Datasheet is available from: +http://www.national.com/pf/DP/DP83815.html + +IVc. Errata + +None characterised. +*/ + + + +/* + * Support for fibre connections on Am79C874: + * This phy needs a special setup when connected to a fibre cable. + * http://www.amd.com/files/connectivitysolutions/networking/archivednetworking/22235.pdf + */ +#define PHYID_AM79C874 0x0022561b + +enum { + MII_MCTRL = 0x15, /* mode control register */ + MII_FX_SEL = 0x0001, /* 100BASE-FX (fiber) */ + MII_EN_SCRM = 0x0004, /* enable scrambler (tp) */ +}; + +enum { + NATSEMI_FLAG_IGNORE_PHY = 0x1, +}; + +/* array of board data directly indexed by pci_tbl[x].driver_data */ +static struct { + const char *name; + unsigned long flags; + unsigned int eeprom_size; +} natsemi_pci_info[] = { + { "Aculab E1/T1 PMXc cPCI carrier card", NATSEMI_FLAG_IGNORE_PHY, 128 }, + { "NatSemi DP8381[56]", 0, 24 }, +}; + +static const struct pci_device_id natsemi_pci_tbl[] = { + { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 }, + { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, + { } /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl); + +/* Offsets to the device registers. + Unlike software-only systems, device drivers interact with complex hardware. + It's not useful to define symbolic names for every register bit in the + device. +*/ +enum register_offsets { + ChipCmd = 0x00, + ChipConfig = 0x04, + EECtrl = 0x08, + PCIBusCfg = 0x0C, + IntrStatus = 0x10, + IntrMask = 0x14, + IntrEnable = 0x18, + IntrHoldoff = 0x1C, /* DP83816 only */ + TxRingPtr = 0x20, + TxConfig = 0x24, + RxRingPtr = 0x30, + RxConfig = 0x34, + ClkRun = 0x3C, + WOLCmd = 0x40, + PauseCmd = 0x44, + RxFilterAddr = 0x48, + RxFilterData = 0x4C, + BootRomAddr = 0x50, + BootRomData = 0x54, + SiliconRev = 0x58, + StatsCtrl = 0x5C, + StatsData = 0x60, + RxPktErrs = 0x60, + RxMissed = 0x68, + RxCRCErrs = 0x64, + BasicControl = 0x80, + BasicStatus = 0x84, + AnegAdv = 0x90, + AnegPeer = 0x94, + PhyStatus = 0xC0, + MIntrCtrl = 0xC4, + MIntrStatus = 0xC8, + PhyCtrl = 0xE4, + + /* These are from the spec, around page 78... on a separate table. + * The meaning of these registers depend on the value of PGSEL. */ + PGSEL = 0xCC, + PMDCSR = 0xE4, + TSTDAT = 0xFC, + DSPCFG = 0xF4, + SDCFG = 0xF8 +}; +/* the values for the 'magic' registers above (PGSEL=1) */ +#define PMDCSR_VAL 0x189c /* enable preferred adaptation circuitry */ +#define TSTDAT_VAL 0x0 +#define DSPCFG_VAL 0x5040 +#define SDCFG_VAL 0x008c /* set voltage thresholds for Signal Detect */ +#define DSPCFG_LOCK 0x20 /* coefficient lock bit in DSPCFG */ +#define DSPCFG_COEF 0x1000 /* see coefficient (in TSTDAT) bit in DSPCFG */ +#define TSTDAT_FIXED 0xe8 /* magic number for bad coefficients */ + +/* misc PCI space registers */ +enum pci_register_offsets { + PCIPM = 0x44, +}; + +enum ChipCmd_bits { + ChipReset = 0x100, + RxReset = 0x20, + TxReset = 0x10, + RxOff = 0x08, + RxOn = 0x04, + TxOff = 0x02, + TxOn = 0x01, +}; + +enum ChipConfig_bits { + CfgPhyDis = 0x200, + CfgPhyRst = 0x400, + CfgExtPhy = 0x1000, + CfgAnegEnable = 0x2000, + CfgAneg100 = 0x4000, + CfgAnegFull = 0x8000, + CfgAnegDone = 0x8000000, + CfgFullDuplex = 0x20000000, + CfgSpeed100 = 0x40000000, + CfgLink = 0x80000000, +}; + +enum EECtrl_bits { + EE_ShiftClk = 0x04, + EE_DataIn = 0x01, + EE_ChipSelect = 0x08, + EE_DataOut = 0x02, + MII_Data = 0x10, + MII_Write = 0x20, + MII_ShiftClk = 0x40, +}; + +enum PCIBusCfg_bits { + EepromReload = 0x4, +}; + +/* Bits in the interrupt status/mask registers. */ +enum IntrStatus_bits { + IntrRxDone = 0x0001, + IntrRxIntr = 0x0002, + IntrRxErr = 0x0004, + IntrRxEarly = 0x0008, + IntrRxIdle = 0x0010, + IntrRxOverrun = 0x0020, + IntrTxDone = 0x0040, + IntrTxIntr = 0x0080, + IntrTxErr = 0x0100, + IntrTxIdle = 0x0200, + IntrTxUnderrun = 0x0400, + StatsMax = 0x0800, + SWInt = 0x1000, + WOLPkt = 0x2000, + LinkChange = 0x4000, + IntrHighBits = 0x8000, + RxStatusFIFOOver = 0x10000, + IntrPCIErr = 0xf00000, + RxResetDone = 0x1000000, + TxResetDone = 0x2000000, + IntrAbnormalSummary = 0xCD20, +}; + +/* + * Default Interrupts: + * Rx OK, Rx Packet Error, Rx Overrun, + * Tx OK, Tx Packet Error, Tx Underrun, + * MIB Service, Phy Interrupt, High Bits, + * Rx Status FIFO overrun, + * Received Target Abort, Received Master Abort, + * Signalled System Error, Received Parity Error + */ +#define DEFAULT_INTR 0x00f1cd65 + +enum TxConfig_bits { + TxDrthMask = 0x3f, + TxFlthMask = 0x3f00, + TxMxdmaMask = 0x700000, + TxMxdma_512 = 0x0, + TxMxdma_4 = 0x100000, + TxMxdma_8 = 0x200000, + TxMxdma_16 = 0x300000, + TxMxdma_32 = 0x400000, + TxMxdma_64 = 0x500000, + TxMxdma_128 = 0x600000, + TxMxdma_256 = 0x700000, + TxCollRetry = 0x800000, + TxAutoPad = 0x10000000, + TxMacLoop = 0x20000000, + TxHeartIgn = 0x40000000, + TxCarrierIgn = 0x80000000 +}; + +/* + * Tx Configuration: + * - 256 byte DMA burst length + * - fill threshold 512 bytes (i.e. restart DMA when 512 bytes are free) + * - 64 bytes initial drain threshold (i.e. begin actual transmission + * when 64 byte are in the fifo) + * - on tx underruns, increase drain threshold by 64. + * - at most use a drain threshold of 1472 bytes: The sum of the fill + * threshold and the drain threshold must be less than 2016 bytes. + * + */ +#define TX_FLTH_VAL ((512/32) << 8) +#define TX_DRTH_VAL_START (64/32) +#define TX_DRTH_VAL_INC 2 +#define TX_DRTH_VAL_LIMIT (1472/32) + +enum RxConfig_bits { + RxDrthMask = 0x3e, + RxMxdmaMask = 0x700000, + RxMxdma_512 = 0x0, + RxMxdma_4 = 0x100000, + RxMxdma_8 = 0x200000, + RxMxdma_16 = 0x300000, + RxMxdma_32 = 0x400000, + RxMxdma_64 = 0x500000, + RxMxdma_128 = 0x600000, + RxMxdma_256 = 0x700000, + RxAcceptLong = 0x8000000, + RxAcceptTx = 0x10000000, + RxAcceptRunt = 0x40000000, + RxAcceptErr = 0x80000000 +}; +#define RX_DRTH_VAL (128/8) + +enum ClkRun_bits { + PMEEnable = 0x100, + PMEStatus = 0x8000, +}; + +enum WolCmd_bits { + WakePhy = 0x1, + WakeUnicast = 0x2, + WakeMulticast = 0x4, + WakeBroadcast = 0x8, + WakeArp = 0x10, + WakePMatch0 = 0x20, + WakePMatch1 = 0x40, + WakePMatch2 = 0x80, + WakePMatch3 = 0x100, + WakeMagic = 0x200, + WakeMagicSecure = 0x400, + SecureHack = 0x100000, + WokePhy = 0x400000, + WokeUnicast = 0x800000, + WokeMulticast = 0x1000000, + WokeBroadcast = 0x2000000, + WokeArp = 0x4000000, + WokePMatch0 = 0x8000000, + WokePMatch1 = 0x10000000, + WokePMatch2 = 0x20000000, + WokePMatch3 = 0x40000000, + WokeMagic = 0x80000000, + WakeOptsSummary = 0x7ff +}; + +enum RxFilterAddr_bits { + RFCRAddressMask = 0x3ff, + AcceptMulticast = 0x00200000, + AcceptMyPhys = 0x08000000, + AcceptAllPhys = 0x10000000, + AcceptAllMulticast = 0x20000000, + AcceptBroadcast = 0x40000000, + RxFilterEnable = 0x80000000 +}; + +enum StatsCtrl_bits { + StatsWarn = 0x1, + StatsFreeze = 0x2, + StatsClear = 0x4, + StatsStrobe = 0x8, +}; + +enum MIntrCtrl_bits { + MICRIntEn = 0x2, +}; + +enum PhyCtrl_bits { + PhyAddrMask = 0x1f, +}; + +#define PHY_ADDR_NONE 32 +#define PHY_ADDR_INTERNAL 1 + +/* values we might find in the silicon revision register */ +#define SRR_DP83815_C 0x0302 +#define SRR_DP83815_D 0x0403 +#define SRR_DP83816_A4 0x0504 +#define SRR_DP83816_A5 0x0505 + +/* The Rx and Tx buffer descriptors. */ +/* Note that using only 32 bit fields simplifies conversion to big-endian + architectures. */ +struct netdev_desc { + __le32 next_desc; + __le32 cmd_status; + __le32 addr; + __le32 software_use; +}; + +/* Bits in network_desc.status */ +enum desc_status_bits { + DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000, + DescNoCRC=0x10000000, DescPktOK=0x08000000, + DescSizeMask=0xfff, + + DescTxAbort=0x04000000, DescTxFIFO=0x02000000, + DescTxCarrier=0x01000000, DescTxDefer=0x00800000, + DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000, + DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000, + + DescRxAbort=0x04000000, DescRxOver=0x02000000, + DescRxDest=0x01800000, DescRxLong=0x00400000, + DescRxRunt=0x00200000, DescRxInvalid=0x00100000, + DescRxCRC=0x00080000, DescRxAlign=0x00040000, + DescRxLoop=0x00020000, DesRxColl=0x00010000, +}; + +struct netdev_private { + /* Descriptor rings first for alignment */ + dma_addr_t ring_dma; + struct netdev_desc *rx_ring; + struct netdev_desc *tx_ring; + /* The addresses of receive-in-place skbuffs */ + struct sk_buff *rx_skbuff[RX_RING_SIZE]; + dma_addr_t rx_dma[RX_RING_SIZE]; + /* address of a sent-in-place packet/buffer, for later free() */ + struct sk_buff *tx_skbuff[TX_RING_SIZE]; + dma_addr_t tx_dma[TX_RING_SIZE]; + struct net_device *dev; + void __iomem *ioaddr; + struct napi_struct napi; + /* Media monitoring timer */ + struct timer_list timer; + /* Frequently used values: keep some adjacent for cache effect */ + struct pci_dev *pci_dev; + struct netdev_desc *rx_head_desc; + /* Producer/consumer ring indices */ + unsigned int cur_rx, dirty_rx; + unsigned int cur_tx, dirty_tx; + /* Based on MTU+slack. */ + unsigned int rx_buf_sz; + int oom; + /* Interrupt status */ + u32 intr_status; + /* Do not touch the nic registers */ + int hands_off; + /* Don't pay attention to the reported link state. */ + int ignore_phy; + /* external phy that is used: only valid if dev->if_port != PORT_TP */ + int mii; + int phy_addr_external; + unsigned int full_duplex; + /* Rx filter */ + u32 cur_rx_mode; + u32 rx_filter[16]; + /* FIFO and PCI burst thresholds */ + u32 tx_config, rx_config; + /* original contents of ClkRun register */ + u32 SavedClkRun; + /* silicon revision */ + u32 srr; + /* expected DSPCFG value */ + u16 dspcfg; + int dspcfg_workaround; + /* parms saved in ethtool format */ + u16 speed; /* The forced speed, 10Mb, 100Mb, gigabit */ + u8 duplex; /* Duplex, half or full */ + u8 autoneg; /* Autonegotiation enabled */ + /* MII transceiver section */ + u16 advertising; + unsigned int iosize; + spinlock_t lock; + u32 msg_enable; + /* EEPROM data */ + int eeprom_size; +}; + +static void move_int_phy(struct net_device *dev, int addr); +static int eeprom_read(void __iomem *ioaddr, int location); +static int mdio_read(struct net_device *dev, int reg); +static void mdio_write(struct net_device *dev, int reg, u16 data); +static void init_phy_fixup(struct net_device *dev); +static int miiport_read(struct net_device *dev, int phy_id, int reg); +static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data); +static int find_mii(struct net_device *dev); +static void natsemi_reset(struct net_device *dev); +static void natsemi_reload_eeprom(struct net_device *dev); +static void natsemi_stop_rxtx(struct net_device *dev); +static int netdev_open(struct net_device *dev); +static void do_cable_magic(struct net_device *dev); +static void undo_cable_magic(struct net_device *dev); +static void check_link(struct net_device *dev); +static void netdev_timer(unsigned long data); +static void dump_ring(struct net_device *dev); +static void ns_tx_timeout(struct net_device *dev); +static int alloc_ring(struct net_device *dev); +static void refill_rx(struct net_device *dev); +static void init_ring(struct net_device *dev); +static void drain_tx(struct net_device *dev); +static void drain_ring(struct net_device *dev); +static void free_ring(struct net_device *dev); +static void reinit_ring(struct net_device *dev); +static void init_registers(struct net_device *dev); +static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); +static irqreturn_t intr_handler(int irq, void *dev_instance); +static void netdev_error(struct net_device *dev, int intr_status); +static int natsemi_poll(struct napi_struct *napi, int budget); +static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do); +static void netdev_tx_done(struct net_device *dev); +static int natsemi_change_mtu(struct net_device *dev, int new_mtu); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void natsemi_poll_controller(struct net_device *dev); +#endif +static void __set_rx_mode(struct net_device *dev); +static void set_rx_mode(struct net_device *dev); +static void __get_stats(struct net_device *dev); +static struct net_device_stats *get_stats(struct net_device *dev); +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int netdev_set_wol(struct net_device *dev, u32 newval); +static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur); +static int netdev_set_sopass(struct net_device *dev, u8 *newval); +static int netdev_get_sopass(struct net_device *dev, u8 *data); +static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd); +static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd); +static void enable_wol_mode(struct net_device *dev, int enable_intr); +static int netdev_close(struct net_device *dev); +static int netdev_get_regs(struct net_device *dev, u8 *buf); +static int netdev_get_eeprom(struct net_device *dev, u8 *buf); +static const struct ethtool_ops ethtool_ops; + +#define NATSEMI_ATTR(_name) \ +static ssize_t natsemi_show_##_name(struct device *dev, \ + struct device_attribute *attr, char *buf); \ + static ssize_t natsemi_set_##_name(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count); \ + static DEVICE_ATTR(_name, 0644, natsemi_show_##_name, natsemi_set_##_name) + +#define NATSEMI_CREATE_FILE(_dev, _name) \ + device_create_file(&_dev->dev, &dev_attr_##_name) +#define NATSEMI_REMOVE_FILE(_dev, _name) \ + device_remove_file(&_dev->dev, &dev_attr_##_name) + +NATSEMI_ATTR(dspcfg_workaround); + +static ssize_t natsemi_show_dspcfg_workaround(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct netdev_private *np = netdev_priv(to_net_dev(dev)); + + return sprintf(buf, "%s\n", np->dspcfg_workaround ? "on" : "off"); +} + +static ssize_t natsemi_set_dspcfg_workaround(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netdev_private *np = netdev_priv(to_net_dev(dev)); + int new_setting; + unsigned long flags; + + /* Find out the new setting */ + if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1)) + new_setting = 1; + else if (!strncmp("off", buf, count - 1) || + !strncmp("0", buf, count - 1)) + new_setting = 0; + else + return count; + + spin_lock_irqsave(&np->lock, flags); + + np->dspcfg_workaround = new_setting; + + spin_unlock_irqrestore(&np->lock, flags); + + return count; +} + +static inline void __iomem *ns_ioaddr(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + + return np->ioaddr; +} + +static inline void natsemi_irq_enable(struct net_device *dev) +{ + writel(1, ns_ioaddr(dev) + IntrEnable); + readl(ns_ioaddr(dev) + IntrEnable); +} + +static inline void natsemi_irq_disable(struct net_device *dev) +{ + writel(0, ns_ioaddr(dev) + IntrEnable); + readl(ns_ioaddr(dev) + IntrEnable); +} + +static void move_int_phy(struct net_device *dev, int addr) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = ns_ioaddr(dev); + int target = 31; + + /* + * The internal phy is visible on the external mii bus. Therefore we must + * move it away before we can send commands to an external phy. + * There are two addresses we must avoid: + * - the address on the external phy that is used for transmission. + * - the address that we want to access. User space can access phys + * on the mii bus with SIOCGMIIREG/SIOCSMIIREG, independent from the + * phy that is used for transmission. + */ + + if (target == addr) + target--; + if (target == np->phy_addr_external) + target--; + writew(target, ioaddr + PhyCtrl); + readw(ioaddr + PhyCtrl); + udelay(1); +} + +static void natsemi_init_media(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + u32 tmp; + + if (np->ignore_phy) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + + /* get the initial settings from hardware */ + tmp = mdio_read(dev, MII_BMCR); + np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10; + np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF; + np->autoneg = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE; + np->advertising= mdio_read(dev, MII_ADVERTISE); + + if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL && + netif_msg_probe(np)) { + printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s " + "10%s %s duplex.\n", + pci_name(np->pci_dev), + (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)? + "enabled, advertise" : "disabled, force", + (np->advertising & + (ADVERTISE_100FULL|ADVERTISE_100HALF))? + "0" : "", + (np->advertising & + (ADVERTISE_100FULL|ADVERTISE_10FULL))? + "full" : "half"); + } + if (netif_msg_probe(np)) + printk(KERN_INFO + "natsemi %s: Transceiver status %#04x advertising %#04x.\n", + pci_name(np->pci_dev), mdio_read(dev, MII_BMSR), + np->advertising); + +} + +static const struct net_device_ops natsemi_netdev_ops = { + .ndo_open = netdev_open, + .ndo_stop = netdev_close, + .ndo_start_xmit = start_tx, + .ndo_get_stats = get_stats, + .ndo_set_rx_mode = set_rx_mode, + .ndo_change_mtu = natsemi_change_mtu, + .ndo_do_ioctl = netdev_ioctl, + .ndo_tx_timeout = ns_tx_timeout, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = natsemi_poll_controller, +#endif +}; + +static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *dev; + struct netdev_private *np; + int i, option, irq, chip_idx = ent->driver_data; + static int find_cnt = -1; + resource_size_t iostart; + unsigned long iosize; + void __iomem *ioaddr; + const int pcibar = 1; /* PCI base address register */ + int prev_eedata; + u32 tmp; + +/* when built into the kernel, we only print version if device is found */ +#ifndef MODULE + static int printed_version; + if (!printed_version++) + printk(version); +#endif + + i = pci_enable_device(pdev); + if (i) return i; + + /* natsemi has a non-standard PM control register + * in PCI config space. Some boards apparently need + * to be brought to D0 in this manner. + */ + pci_read_config_dword(pdev, PCIPM, &tmp); + if (tmp & PCI_PM_CTRL_STATE_MASK) { + /* D0 state, disable PME assertion */ + u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK; + pci_write_config_dword(pdev, PCIPM, newtmp); + } + + find_cnt++; + iostart = pci_resource_start(pdev, pcibar); + iosize = pci_resource_len(pdev, pcibar); + irq = pdev->irq; + + pci_set_master(pdev); + + dev = alloc_etherdev(sizeof (struct netdev_private)); + if (!dev) + return -ENOMEM; + SET_NETDEV_DEV(dev, &pdev->dev); + + i = pci_request_regions(pdev, DRV_NAME); + if (i) + goto err_pci_request_regions; + + ioaddr = ioremap(iostart, iosize); + if (!ioaddr) { + i = -ENOMEM; + goto err_ioremap; + } + + /* Work around the dropped serial bit. */ + prev_eedata = eeprom_read(ioaddr, 6); + for (i = 0; i < 3; i++) { + int eedata = eeprom_read(ioaddr, i + 7); + dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15); + dev->dev_addr[i*2+1] = eedata >> 7; + prev_eedata = eedata; + } + + np = netdev_priv(dev); + np->ioaddr = ioaddr; + + netif_napi_add(dev, &np->napi, natsemi_poll, 64); + np->dev = dev; + + np->pci_dev = pdev; + pci_set_drvdata(pdev, dev); + np->iosize = iosize; + spin_lock_init(&np->lock); + np->msg_enable = (debug >= 0) ? (1<hands_off = 0; + np->intr_status = 0; + np->eeprom_size = natsemi_pci_info[chip_idx].eeprom_size; + if (natsemi_pci_info[chip_idx].flags & NATSEMI_FLAG_IGNORE_PHY) + np->ignore_phy = 1; + else + np->ignore_phy = 0; + np->dspcfg_workaround = dspcfg_workaround; + + /* Initial port: + * - If configured to ignore the PHY set up for external. + * - If the nic was configured to use an external phy and if find_mii + * finds a phy: use external port, first phy that replies. + * - Otherwise: internal port. + * Note that the phy address for the internal phy doesn't matter: + * The address would be used to access a phy over the mii bus, but + * the internal phy is accessed through mapped registers. + */ + if (np->ignore_phy || readl(ioaddr + ChipConfig) & CfgExtPhy) + dev->if_port = PORT_MII; + else + dev->if_port = PORT_TP; + /* Reset the chip to erase previous misconfiguration. */ + natsemi_reload_eeprom(dev); + natsemi_reset(dev); + + if (dev->if_port != PORT_TP) { + np->phy_addr_external = find_mii(dev); + /* If we're ignoring the PHY it doesn't matter if we can't + * find one. */ + if (!np->ignore_phy && np->phy_addr_external == PHY_ADDR_NONE) { + dev->if_port = PORT_TP; + np->phy_addr_external = PHY_ADDR_INTERNAL; + } + } else { + np->phy_addr_external = PHY_ADDR_INTERNAL; + } + + option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; + /* The lower four bits are the media type. */ + if (option) { + if (option & 0x200) + np->full_duplex = 1; + if (option & 15) + printk(KERN_INFO + "natsemi %s: ignoring user supplied media type %d", + pci_name(np->pci_dev), option & 15); + } + if (find_cnt < MAX_UNITS && full_duplex[find_cnt]) + np->full_duplex = 1; + + dev->netdev_ops = &natsemi_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + dev->ethtool_ops = ðtool_ops; + + if (mtu) + dev->mtu = mtu; + + natsemi_init_media(dev); + + /* save the silicon revision for later querying */ + np->srr = readl(ioaddr + SiliconRev); + if (netif_msg_hw(np)) + printk(KERN_INFO "natsemi %s: silicon revision %#04x.\n", + pci_name(np->pci_dev), np->srr); + + i = register_netdev(dev); + if (i) + goto err_register_netdev; + i = NATSEMI_CREATE_FILE(pdev, dspcfg_workaround); + if (i) + goto err_create_file; + + if (netif_msg_drv(np)) { + printk(KERN_INFO "natsemi %s: %s at %#08llx " + "(%s), %pM, IRQ %d", + dev->name, natsemi_pci_info[chip_idx].name, + (unsigned long long)iostart, pci_name(np->pci_dev), + dev->dev_addr, irq); + if (dev->if_port == PORT_TP) + printk(", port TP.\n"); + else if (np->ignore_phy) + printk(", port MII, ignoring PHY\n"); + else + printk(", port MII, phy ad %d.\n", np->phy_addr_external); + } + return 0; + + err_create_file: + unregister_netdev(dev); + + err_register_netdev: + iounmap(ioaddr); + + err_ioremap: + pci_release_regions(pdev); + + err_pci_request_regions: + free_netdev(dev); + return i; +} + + +/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. + The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */ + +/* Delay between EEPROM clock transitions. + No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need + a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that + made udelay() unreliable. + The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is + deprecated. +*/ +#define eeprom_delay(ee_addr) readl(ee_addr) + +#define EE_Write0 (EE_ChipSelect) +#define EE_Write1 (EE_ChipSelect | EE_DataIn) + +/* The EEPROM commands include the alway-set leading bit. */ +enum EEPROM_Cmds { + EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6), +}; + +static int eeprom_read(void __iomem *addr, int location) +{ + int i; + int retval = 0; + void __iomem *ee_addr = addr + EECtrl; + int read_cmd = location | EE_ReadCmd; + + writel(EE_Write0, ee_addr); + + /* Shift the read command bits out. */ + for (i = 10; i >= 0; i--) { + short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0; + writel(dataval, ee_addr); + eeprom_delay(ee_addr); + writel(dataval | EE_ShiftClk, ee_addr); + eeprom_delay(ee_addr); + } + writel(EE_ChipSelect, ee_addr); + eeprom_delay(ee_addr); + + for (i = 0; i < 16; i++) { + writel(EE_ChipSelect | EE_ShiftClk, ee_addr); + eeprom_delay(ee_addr); + retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0; + writel(EE_ChipSelect, ee_addr); + eeprom_delay(ee_addr); + } + + /* Terminate the EEPROM access. */ + writel(EE_Write0, ee_addr); + writel(0, ee_addr); + return retval; +} + +/* MII transceiver control section. + * The 83815 series has an internal transceiver, and we present the + * internal management registers as if they were MII connected. + * External Phy registers are referenced through the MII interface. + */ + +/* clock transitions >= 20ns (25MHz) + * One readl should be good to PCI @ 100MHz + */ +#define mii_delay(ioaddr) readl(ioaddr + EECtrl) + +static int mii_getbit (struct net_device *dev) +{ + int data; + void __iomem *ioaddr = ns_ioaddr(dev); + + writel(MII_ShiftClk, ioaddr + EECtrl); + data = readl(ioaddr + EECtrl); + writel(0, ioaddr + EECtrl); + mii_delay(ioaddr); + return (data & MII_Data)? 1 : 0; +} + +static void mii_send_bits (struct net_device *dev, u32 data, int len) +{ + u32 i; + void __iomem *ioaddr = ns_ioaddr(dev); + + for (i = (1 << (len-1)); i; i >>= 1) + { + u32 mdio_val = MII_Write | ((data & i)? MII_Data : 0); + writel(mdio_val, ioaddr + EECtrl); + mii_delay(ioaddr); + writel(mdio_val | MII_ShiftClk, ioaddr + EECtrl); + mii_delay(ioaddr); + } + writel(0, ioaddr + EECtrl); + mii_delay(ioaddr); +} + +static int miiport_read(struct net_device *dev, int phy_id, int reg) +{ + u32 cmd; + int i; + u32 retval = 0; + + /* Ensure sync */ + mii_send_bits (dev, 0xffffffff, 32); + /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ + /* ST,OP = 0110'b for read operation */ + cmd = (0x06 << 10) | (phy_id << 5) | reg; + mii_send_bits (dev, cmd, 14); + /* Turnaround */ + if (mii_getbit (dev)) + return 0; + /* Read data */ + for (i = 0; i < 16; i++) { + retval <<= 1; + retval |= mii_getbit (dev); + } + /* End cycle */ + mii_getbit (dev); + return retval; +} + +static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data) +{ + u32 cmd; + + /* Ensure sync */ + mii_send_bits (dev, 0xffffffff, 32); + /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ + /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */ + cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data; + mii_send_bits (dev, cmd, 32); + /* End cycle */ + mii_getbit (dev); +} + +static int mdio_read(struct net_device *dev, int reg) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = ns_ioaddr(dev); + + /* The 83815 series has two ports: + * - an internal transceiver + * - an external mii bus + */ + if (dev->if_port == PORT_TP) + return readw(ioaddr+BasicControl+(reg<<2)); + else + return miiport_read(dev, np->phy_addr_external, reg); +} + +static void mdio_write(struct net_device *dev, int reg, u16 data) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = ns_ioaddr(dev); + + /* The 83815 series has an internal transceiver; handle separately */ + if (dev->if_port == PORT_TP) + writew(data, ioaddr+BasicControl+(reg<<2)); + else + miiport_write(dev, np->phy_addr_external, reg, data); +} + +static void init_phy_fixup(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = ns_ioaddr(dev); + int i; + u32 cfg; + u16 tmp; + + /* restore stuff lost when power was out */ + tmp = mdio_read(dev, MII_BMCR); + if (np->autoneg == AUTONEG_ENABLE) { + /* renegotiate if something changed */ + if ((tmp & BMCR_ANENABLE) == 0 || + np->advertising != mdio_read(dev, MII_ADVERTISE)) + { + /* turn on autonegotiation and force negotiation */ + tmp |= (BMCR_ANENABLE | BMCR_ANRESTART); + mdio_write(dev, MII_ADVERTISE, np->advertising); + } + } else { + /* turn off auto negotiation, set speed and duplexity */ + tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX); + if (np->speed == SPEED_100) + tmp |= BMCR_SPEED100; + if (np->duplex == DUPLEX_FULL) + tmp |= BMCR_FULLDPLX; + /* + * Note: there is no good way to inform the link partner + * that our capabilities changed. The user has to unplug + * and replug the network cable after some changes, e.g. + * after switching from 10HD, autoneg off to 100 HD, + * autoneg off. + */ + } + mdio_write(dev, MII_BMCR, tmp); + readl(ioaddr + ChipConfig); + udelay(1); + + /* find out what phy this is */ + np->mii = (mdio_read(dev, MII_PHYSID1) << 16) + + mdio_read(dev, MII_PHYSID2); + + /* handle external phys here */ + switch (np->mii) { + case PHYID_AM79C874: + /* phy specific configuration for fibre/tp operation */ + tmp = mdio_read(dev, MII_MCTRL); + tmp &= ~(MII_FX_SEL | MII_EN_SCRM); + if (dev->if_port == PORT_FIBRE) + tmp |= MII_FX_SEL; + else + tmp |= MII_EN_SCRM; + mdio_write(dev, MII_MCTRL, tmp); + break; + default: + break; + } + cfg = readl(ioaddr + ChipConfig); + if (cfg & CfgExtPhy) + return; + + /* On page 78 of the spec, they recommend some settings for "optimum + performance" to be done in sequence. These settings optimize some + of the 100Mbit autodetection circuitry. They say we only want to + do this for rev C of the chip, but engineers at NSC (Bradley + Kennedy) recommends always setting them. If you don't, you get + errors on some autonegotiations that make the device unusable. + + It seems that the DSP needs a few usec to reinitialize after + the start of the phy. Just retry writing these values until they + stick. + */ + for (i=0;idspcfg = (np->srr <= SRR_DP83815_C)? + DSPCFG_VAL : (DSPCFG_COEF | readw(ioaddr + DSPCFG)); + writew(np->dspcfg, ioaddr + DSPCFG); + writew(SDCFG_VAL, ioaddr + SDCFG); + writew(0, ioaddr + PGSEL); + readl(ioaddr + ChipConfig); + udelay(10); + + writew(1, ioaddr + PGSEL); + dspcfg = readw(ioaddr + DSPCFG); + writew(0, ioaddr + PGSEL); + if (np->dspcfg == dspcfg) + break; + } + + if (netif_msg_link(np)) { + if (i==NATSEMI_HW_TIMEOUT) { + printk(KERN_INFO + "%s: DSPCFG mismatch after retrying for %d usec.\n", + dev->name, i*10); + } else { + printk(KERN_INFO + "%s: DSPCFG accepted after %d usec.\n", + dev->name, i*10); + } + } + /* + * Enable PHY Specific event based interrupts. Link state change + * and Auto-Negotiation Completion are among the affected. + * Read the intr status to clear it (needed for wake events). + */ + readw(ioaddr + MIntrStatus); + writew(MICRIntEn, ioaddr + MIntrCtrl); +} + +static int switch_port_external(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = ns_ioaddr(dev); + u32 cfg; + + cfg = readl(ioaddr + ChipConfig); + if (cfg & CfgExtPhy) + return 0; + + if (netif_msg_link(np)) { + printk(KERN_INFO "%s: switching to external transceiver.\n", + dev->name); + } + + /* 1) switch back to external phy */ + writel(cfg | (CfgExtPhy | CfgPhyDis), ioaddr + ChipConfig); + readl(ioaddr + ChipConfig); + udelay(1); + + /* 2) reset the external phy: */ + /* resetting the external PHY has been known to cause a hub supplying + * power over Ethernet to kill the power. We don't want to kill + * power to this computer, so we avoid resetting the phy. + */ + + /* 3) reinit the phy fixup, it got lost during power down. */ + move_int_phy(dev, np->phy_addr_external); + init_phy_fixup(dev); + + return 1; +} + +static int switch_port_internal(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = ns_ioaddr(dev); + int i; + u32 cfg; + u16 bmcr; + + cfg = readl(ioaddr + ChipConfig); + if (!(cfg &CfgExtPhy)) + return 0; + + if (netif_msg_link(np)) { + printk(KERN_INFO "%s: switching to internal transceiver.\n", + dev->name); + } + /* 1) switch back to internal phy: */ + cfg = cfg & ~(CfgExtPhy | CfgPhyDis); + writel(cfg, ioaddr + ChipConfig); + readl(ioaddr + ChipConfig); + udelay(1); + + /* 2) reset the internal phy: */ + bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2)); + writel(bmcr | BMCR_RESET, ioaddr+BasicControl+(MII_BMCR<<2)); + readl(ioaddr + ChipConfig); + udelay(10); + for (i=0;iname, i*10); + } + /* 3) reinit the phy fixup, it got lost during power down. */ + init_phy_fixup(dev); + + return 1; +} + +/* Scan for a PHY on the external mii bus. + * There are two tricky points: + * - Do not scan while the internal phy is enabled. The internal phy will + * crash: e.g. reads from the DSPCFG register will return odd values and + * the nasty random phy reset code will reset the nic every few seconds. + * - The internal phy must be moved around, an external phy could + * have the same address as the internal phy. + */ +static int find_mii(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + int tmp; + int i; + int did_switch; + + /* Switch to external phy */ + did_switch = switch_port_external(dev); + + /* Scan the possible phy addresses: + * + * PHY address 0 means that the phy is in isolate mode. Not yet + * supported due to lack of test hardware. User space should + * handle it through ethtool. + */ + for (i = 1; i <= 31; i++) { + move_int_phy(dev, i); + tmp = miiport_read(dev, i, MII_BMSR); + if (tmp != 0xffff && tmp != 0x0000) { + /* found something! */ + np->mii = (mdio_read(dev, MII_PHYSID1) << 16) + + mdio_read(dev, MII_PHYSID2); + if (netif_msg_probe(np)) { + printk(KERN_INFO "natsemi %s: found external phy %08x at address %d.\n", + pci_name(np->pci_dev), np->mii, i); + } + break; + } + } + /* And switch back to internal phy: */ + if (did_switch) + switch_port_internal(dev); + return i; +} + +/* CFG bits [13:16] [18:23] */ +#define CFG_RESET_SAVE 0xfde000 +/* WCSR bits [0:4] [9:10] */ +#define WCSR_RESET_SAVE 0x61f +/* RFCR bits [20] [22] [27:31] */ +#define RFCR_RESET_SAVE 0xf8500000 + +static void natsemi_reset(struct net_device *dev) +{ + int i; + u32 cfg; + u32 wcsr; + u32 rfcr; + u16 pmatch[3]; + u16 sopass[3]; + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = ns_ioaddr(dev); + + /* + * Resetting the chip causes some registers to be lost. + * Natsemi suggests NOT reloading the EEPROM while live, so instead + * we save the state that would have been loaded from EEPROM + * on a normal power-up (see the spec EEPROM map). This assumes + * whoever calls this will follow up with init_registers() eventually. + */ + + /* CFG */ + cfg = readl(ioaddr + ChipConfig) & CFG_RESET_SAVE; + /* WCSR */ + wcsr = readl(ioaddr + WOLCmd) & WCSR_RESET_SAVE; + /* RFCR */ + rfcr = readl(ioaddr + RxFilterAddr) & RFCR_RESET_SAVE; + /* PMATCH */ + for (i = 0; i < 3; i++) { + writel(i*2, ioaddr + RxFilterAddr); + pmatch[i] = readw(ioaddr + RxFilterData); + } + /* SOPAS */ + for (i = 0; i < 3; i++) { + writel(0xa+(i*2), ioaddr + RxFilterAddr); + sopass[i] = readw(ioaddr + RxFilterData); + } + + /* now whack the chip */ + writel(ChipReset, ioaddr + ChipCmd); + for (i=0;iname, i*5); + } else if (netif_msg_hw(np)) { + printk(KERN_DEBUG "%s: reset completed in %d usec.\n", + dev->name, i*5); + } + + /* restore CFG */ + cfg |= readl(ioaddr + ChipConfig) & ~CFG_RESET_SAVE; + /* turn on external phy if it was selected */ + if (dev->if_port == PORT_TP) + cfg &= ~(CfgExtPhy | CfgPhyDis); + else + cfg |= (CfgExtPhy | CfgPhyDis); + writel(cfg, ioaddr + ChipConfig); + /* restore WCSR */ + wcsr |= readl(ioaddr + WOLCmd) & ~WCSR_RESET_SAVE; + writel(wcsr, ioaddr + WOLCmd); + /* read RFCR */ + rfcr |= readl(ioaddr + RxFilterAddr) & ~RFCR_RESET_SAVE; + /* restore PMATCH */ + for (i = 0; i < 3; i++) { + writel(i*2, ioaddr + RxFilterAddr); + writew(pmatch[i], ioaddr + RxFilterData); + } + for (i = 0; i < 3; i++) { + writel(0xa+(i*2), ioaddr + RxFilterAddr); + writew(sopass[i], ioaddr + RxFilterData); + } + /* restore RFCR */ + writel(rfcr, ioaddr + RxFilterAddr); +} + +static void reset_rx(struct net_device *dev) +{ + int i; + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = ns_ioaddr(dev); + + np->intr_status &= ~RxResetDone; + + writel(RxReset, ioaddr + ChipCmd); + + for (i=0;iintr_status |= readl(ioaddr + IntrStatus); + if (np->intr_status & RxResetDone) + break; + udelay(15); + } + if (i==NATSEMI_HW_TIMEOUT) { + printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n", + dev->name, i*15); + } else if (netif_msg_hw(np)) { + printk(KERN_WARNING "%s: RX reset took %d usec.\n", + dev->name, i*15); + } +} + +static void natsemi_reload_eeprom(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = ns_ioaddr(dev); + int i; + + writel(EepromReload, ioaddr + PCIBusCfg); + for (i=0;ipci_dev), i*50); + } else if (netif_msg_hw(np)) { + printk(KERN_DEBUG "natsemi %s: EEPROM reloaded in %d usec.\n", + pci_name(np->pci_dev), i*50); + } +} + +static void natsemi_stop_rxtx(struct net_device *dev) +{ + void __iomem * ioaddr = ns_ioaddr(dev); + struct netdev_private *np = netdev_priv(dev); + int i; + + writel(RxOff | TxOff, ioaddr + ChipCmd); + for(i=0;i< NATSEMI_HW_TIMEOUT;i++) { + if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0) + break; + udelay(5); + } + if (i==NATSEMI_HW_TIMEOUT) { + printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n", + dev->name, i*5); + } else if (netif_msg_hw(np)) { + printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n", + dev->name, i*5); + } +} + +static int netdev_open(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem * ioaddr = ns_ioaddr(dev); + const int irq = np->pci_dev->irq; + int i; + + /* Reset the chip, just in case. */ + natsemi_reset(dev); + + i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev); + if (i) return i; + + if (netif_msg_ifup(np)) + printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", + dev->name, irq); + i = alloc_ring(dev); + if (i < 0) { + free_irq(irq, dev); + return i; + } + napi_enable(&np->napi); + + init_ring(dev); + spin_lock_irq(&np->lock); + init_registers(dev); + /* now set the MAC address according to dev->dev_addr */ + for (i = 0; i < 3; i++) { + u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i]; + + writel(i*2, ioaddr + RxFilterAddr); + writew(mac, ioaddr + RxFilterData); + } + writel(np->cur_rx_mode, ioaddr + RxFilterAddr); + spin_unlock_irq(&np->lock); + + netif_start_queue(dev); + + if (netif_msg_ifup(np)) + printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n", + dev->name, (int)readl(ioaddr + ChipCmd)); + + /* Set the timer to check for link beat. */ + init_timer(&np->timer); + np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ); + np->timer.data = (unsigned long)dev; + np->timer.function = netdev_timer; /* timer handler */ + add_timer(&np->timer); + + return 0; +} + +static void do_cable_magic(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem *ioaddr = ns_ioaddr(dev); + + if (dev->if_port != PORT_TP) + return; + + if (np->srr >= SRR_DP83816_A5) + return; + + /* + * 100 MBit links with short cables can trip an issue with the chip. + * The problem manifests as lots of CRC errors and/or flickering + * activity LED while idle. This process is based on instructions + * from engineers at National. + */ + if (readl(ioaddr + ChipConfig) & CfgSpeed100) { + u16 data; + + writew(1, ioaddr + PGSEL); + /* + * coefficient visibility should already be enabled via + * DSPCFG | 0x1000 + */ + data = readw(ioaddr + TSTDAT) & 0xff; + /* + * the value must be negative, and within certain values + * (these values all come from National) + */ + if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) { + np = netdev_priv(dev); + + /* the bug has been triggered - fix the coefficient */ + writew(TSTDAT_FIXED, ioaddr + TSTDAT); + /* lock the value */ + data = readw(ioaddr + DSPCFG); + np->dspcfg = data | DSPCFG_LOCK; + writew(np->dspcfg, ioaddr + DSPCFG); + } + writew(0, ioaddr + PGSEL); + } +} + +static void undo_cable_magic(struct net_device *dev) +{ + u16 data; + struct netdev_private *np = netdev_priv(dev); + void __iomem * ioaddr = ns_ioaddr(dev); + + if (dev->if_port != PORT_TP) + return; + + if (np->srr >= SRR_DP83816_A5) + return; + + writew(1, ioaddr + PGSEL); + /* make sure the lock bit is clear */ + data = readw(ioaddr + DSPCFG); + np->dspcfg = data & ~DSPCFG_LOCK; + writew(np->dspcfg, ioaddr + DSPCFG); + writew(0, ioaddr + PGSEL); +} + +static void check_link(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem * ioaddr = ns_ioaddr(dev); + int duplex = np->duplex; + u16 bmsr; + + /* If we are ignoring the PHY then don't try reading it. */ + if (np->ignore_phy) + goto propagate_state; + + /* The link status field is latched: it remains low after a temporary + * link failure until it's read. We need the current link status, + * thus read twice. + */ + mdio_read(dev, MII_BMSR); + bmsr = mdio_read(dev, MII_BMSR); + + if (!(bmsr & BMSR_LSTATUS)) { + if (netif_carrier_ok(dev)) { + if (netif_msg_link(np)) + printk(KERN_NOTICE "%s: link down.\n", + dev->name); + netif_carrier_off(dev); + undo_cable_magic(dev); + } + return; + } + if (!netif_carrier_ok(dev)) { + if (netif_msg_link(np)) + printk(KERN_NOTICE "%s: link up.\n", dev->name); + netif_carrier_on(dev); + do_cable_magic(dev); + } + + duplex = np->full_duplex; + if (!duplex) { + if (bmsr & BMSR_ANEGCOMPLETE) { + int tmp = mii_nway_result( + np->advertising & mdio_read(dev, MII_LPA)); + if (tmp == LPA_100FULL || tmp == LPA_10FULL) + duplex = 1; + } else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX) + duplex = 1; + } + +propagate_state: + /* if duplex is set then bit 28 must be set, too */ + if (duplex ^ !!(np->rx_config & RxAcceptTx)) { + if (netif_msg_link(np)) + printk(KERN_INFO + "%s: Setting %s-duplex based on negotiated " + "link capability.\n", dev->name, + duplex ? "full" : "half"); + if (duplex) { + np->rx_config |= RxAcceptTx; + np->tx_config |= TxCarrierIgn | TxHeartIgn; + } else { + np->rx_config &= ~RxAcceptTx; + np->tx_config &= ~(TxCarrierIgn | TxHeartIgn); + } + writel(np->tx_config, ioaddr + TxConfig); + writel(np->rx_config, ioaddr + RxConfig); + } +} + +static void init_registers(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem * ioaddr = ns_ioaddr(dev); + + init_phy_fixup(dev); + + /* clear any interrupts that are pending, such as wake events */ + readl(ioaddr + IntrStatus); + + writel(np->ring_dma, ioaddr + RxRingPtr); + writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc), + ioaddr + TxRingPtr); + + /* Initialize other registers. + * Configure the PCI bus bursts and FIFO thresholds. + * Configure for standard, in-spec Ethernet. + * Start with half-duplex. check_link will update + * to the correct settings. + */ + + /* DRTH: 2: start tx if 64 bytes are in the fifo + * FLTH: 0x10: refill with next packet if 512 bytes are free + * MXDMA: 0: up to 256 byte bursts. + * MXDMA must be <= FLTH + * ECRETRY=1 + * ATP=1 + */ + np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 | + TX_FLTH_VAL | TX_DRTH_VAL_START; + writel(np->tx_config, ioaddr + TxConfig); + + /* DRTH 0x10: start copying to memory if 128 bytes are in the fifo + * MXDMA 0: up to 256 byte bursts + */ + np->rx_config = RxMxdma_256 | RX_DRTH_VAL; + /* if receive ring now has bigger buffers than normal, enable jumbo */ + if (np->rx_buf_sz > NATSEMI_LONGPKT) + np->rx_config |= RxAcceptLong; + + writel(np->rx_config, ioaddr + RxConfig); + + /* Disable PME: + * The PME bit is initialized from the EEPROM contents. + * PCI cards probably have PME disabled, but motherboard + * implementations may have PME set to enable WakeOnLan. + * With PME set the chip will scan incoming packets but + * nothing will be written to memory. */ + np->SavedClkRun = readl(ioaddr + ClkRun); + writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun); + if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) { + printk(KERN_NOTICE "%s: Wake-up event %#08x\n", + dev->name, readl(ioaddr + WOLCmd)); + } + + check_link(dev); + __set_rx_mode(dev); + + /* Enable interrupts by setting the interrupt mask. */ + writel(DEFAULT_INTR, ioaddr + IntrMask); + natsemi_irq_enable(dev); + + writel(RxOn | TxOn, ioaddr + ChipCmd); + writel(StatsClear, ioaddr + StatsCtrl); /* Clear Stats */ +} + +/* + * netdev_timer: + * Purpose: + * 1) check for link changes. Usually they are handled by the MII interrupt + * but it doesn't hurt to check twice. + * 2) check for sudden death of the NIC: + * It seems that a reference set for this chip went out with incorrect info, + * and there exist boards that aren't quite right. An unexpected voltage + * drop can cause the PHY to get itself in a weird state (basically reset). + * NOTE: this only seems to affect revC chips. The user can disable + * this check via dspcfg_workaround sysfs option. + * 3) check of death of the RX path due to OOM + */ +static void netdev_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct netdev_private *np = netdev_priv(dev); + void __iomem * ioaddr = ns_ioaddr(dev); + int next_tick = NATSEMI_TIMER_FREQ; + const int irq = np->pci_dev->irq; + + if (netif_msg_timer(np)) { + /* DO NOT read the IntrStatus register, + * a read clears any pending interrupts. + */ + printk(KERN_DEBUG "%s: Media selection timer tick.\n", + dev->name); + } + + if (dev->if_port == PORT_TP) { + u16 dspcfg; + + spin_lock_irq(&np->lock); + /* check for a nasty random phy-reset - use dspcfg as a flag */ + writew(1, ioaddr+PGSEL); + dspcfg = readw(ioaddr+DSPCFG); + writew(0, ioaddr+PGSEL); + if (np->dspcfg_workaround && dspcfg != np->dspcfg) { + if (!netif_queue_stopped(dev)) { + spin_unlock_irq(&np->lock); + if (netif_msg_drv(np)) + printk(KERN_NOTICE "%s: possible phy reset: " + "re-initializing\n", dev->name); + disable_irq(irq); + spin_lock_irq(&np->lock); + natsemi_stop_rxtx(dev); + dump_ring(dev); + reinit_ring(dev); + init_registers(dev); + spin_unlock_irq(&np->lock); + enable_irq(irq); + } else { + /* hurry back */ + next_tick = HZ; + spin_unlock_irq(&np->lock); + } + } else { + /* init_registers() calls check_link() for the above case */ + check_link(dev); + spin_unlock_irq(&np->lock); + } + } else { + spin_lock_irq(&np->lock); + check_link(dev); + spin_unlock_irq(&np->lock); + } + if (np->oom) { + disable_irq(irq); + np->oom = 0; + refill_rx(dev); + enable_irq(irq); + if (!np->oom) { + writel(RxOn, ioaddr + ChipCmd); + } else { + next_tick = 1; + } + } + + if (next_tick > 1) + mod_timer(&np->timer, round_jiffies(jiffies + next_tick)); + else + mod_timer(&np->timer, jiffies + next_tick); +} + +static void dump_ring(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + + if (netif_msg_pktdata(np)) { + int i; + printk(KERN_DEBUG " Tx ring at %p:\n", np->tx_ring); + for (i = 0; i < TX_RING_SIZE; i++) { + printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n", + i, np->tx_ring[i].next_desc, + np->tx_ring[i].cmd_status, + np->tx_ring[i].addr); + } + printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring); + for (i = 0; i < RX_RING_SIZE; i++) { + printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n", + i, np->rx_ring[i].next_desc, + np->rx_ring[i].cmd_status, + np->rx_ring[i].addr); + } + } +} + +static void ns_tx_timeout(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem * ioaddr = ns_ioaddr(dev); + const int irq = np->pci_dev->irq; + + disable_irq(irq); + spin_lock_irq(&np->lock); + if (!np->hands_off) { + if (netif_msg_tx_err(np)) + printk(KERN_WARNING + "%s: Transmit timed out, status %#08x," + " resetting...\n", + dev->name, readl(ioaddr + IntrStatus)); + dump_ring(dev); + + natsemi_reset(dev); + reinit_ring(dev); + init_registers(dev); + } else { + printk(KERN_WARNING + "%s: tx_timeout while in hands_off state?\n", + dev->name); + } + spin_unlock_irq(&np->lock); + enable_irq(irq); + + dev->trans_start = jiffies; /* prevent tx timeout */ + dev->stats.tx_errors++; + netif_wake_queue(dev); +} + +static int alloc_ring(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + np->rx_ring = pci_alloc_consistent(np->pci_dev, + sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE), + &np->ring_dma); + if (!np->rx_ring) + return -ENOMEM; + np->tx_ring = &np->rx_ring[RX_RING_SIZE]; + return 0; +} + +static void refill_rx(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + + /* Refill the Rx ring buffers. */ + for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { + struct sk_buff *skb; + int entry = np->dirty_rx % RX_RING_SIZE; + if (np->rx_skbuff[entry] == NULL) { + unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING; + skb = netdev_alloc_skb(dev, buflen); + np->rx_skbuff[entry] = skb; + if (skb == NULL) + break; /* Better luck next round. */ + np->rx_dma[entry] = pci_map_single(np->pci_dev, + skb->data, buflen, PCI_DMA_FROMDEVICE); + np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]); + } + np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz); + } + if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) { + if (netif_msg_rx_err(np)) + printk(KERN_WARNING "%s: going OOM.\n", dev->name); + np->oom = 1; + } +} + +static void set_bufsize(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + if (dev->mtu <= ETH_DATA_LEN) + np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS; + else + np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS; +} + +/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ +static void init_ring(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + int i; + + /* 1) TX ring */ + np->dirty_tx = np->cur_tx = 0; + for (i = 0; i < TX_RING_SIZE; i++) { + np->tx_skbuff[i] = NULL; + np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma + +sizeof(struct netdev_desc) + *((i+1)%TX_RING_SIZE+RX_RING_SIZE)); + np->tx_ring[i].cmd_status = 0; + } + + /* 2) RX ring */ + np->dirty_rx = 0; + np->cur_rx = RX_RING_SIZE; + np->oom = 0; + set_bufsize(dev); + + np->rx_head_desc = &np->rx_ring[0]; + + /* Please be careful before changing this loop - at least gcc-2.95.1 + * miscompiles it otherwise. + */ + /* Initialize all Rx descriptors. */ + for (i = 0; i < RX_RING_SIZE; i++) { + np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma + +sizeof(struct netdev_desc) + *((i+1)%RX_RING_SIZE)); + np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn); + np->rx_skbuff[i] = NULL; + } + refill_rx(dev); + dump_ring(dev); +} + +static void drain_tx(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + int i; + + for (i = 0; i < TX_RING_SIZE; i++) { + if (np->tx_skbuff[i]) { + pci_unmap_single(np->pci_dev, + np->tx_dma[i], np->tx_skbuff[i]->len, + PCI_DMA_TODEVICE); + dev_kfree_skb(np->tx_skbuff[i]); + dev->stats.tx_dropped++; + } + np->tx_skbuff[i] = NULL; + } +} + +static void drain_rx(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + unsigned int buflen = np->rx_buf_sz; + int i; + + /* Free all the skbuffs in the Rx queue. */ + for (i = 0; i < RX_RING_SIZE; i++) { + np->rx_ring[i].cmd_status = 0; + np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ + if (np->rx_skbuff[i]) { + pci_unmap_single(np->pci_dev, np->rx_dma[i], + buflen + NATSEMI_PADDING, + PCI_DMA_FROMDEVICE); + dev_kfree_skb(np->rx_skbuff[i]); + } + np->rx_skbuff[i] = NULL; + } +} + +static void drain_ring(struct net_device *dev) +{ + drain_rx(dev); + drain_tx(dev); +} + +static void free_ring(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + pci_free_consistent(np->pci_dev, + sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE), + np->rx_ring, np->ring_dma); +} + +static void reinit_rx(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + int i; + + /* RX Ring */ + np->dirty_rx = 0; + np->cur_rx = RX_RING_SIZE; + np->rx_head_desc = &np->rx_ring[0]; + /* Initialize all Rx descriptors. */ + for (i = 0; i < RX_RING_SIZE; i++) + np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn); + + refill_rx(dev); +} + +static void reinit_ring(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + int i; + + /* drain TX ring */ + drain_tx(dev); + np->dirty_tx = np->cur_tx = 0; + for (i=0;itx_ring[i].cmd_status = 0; + + reinit_rx(dev); +} + +static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem * ioaddr = ns_ioaddr(dev); + unsigned entry; + unsigned long flags; + + /* Note: Ordering is important here, set the field with the + "ownership" bit last, and only then increment cur_tx. */ + + /* Calculate the next Tx descriptor entry. */ + entry = np->cur_tx % TX_RING_SIZE; + + np->tx_skbuff[entry] = skb; + np->tx_dma[entry] = pci_map_single(np->pci_dev, + skb->data,skb->len, PCI_DMA_TODEVICE); + + np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]); + + spin_lock_irqsave(&np->lock, flags); + + if (!np->hands_off) { + np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len); + /* StrongARM: Explicitly cache flush np->tx_ring and + * skb->data,skb->len. */ + wmb(); + np->cur_tx++; + if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) { + netdev_tx_done(dev); + if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) + netif_stop_queue(dev); + } + /* Wake the potentially-idle transmit channel. */ + writel(TxOn, ioaddr + ChipCmd); + } else { + dev_kfree_skb_irq(skb); + dev->stats.tx_dropped++; + } + spin_unlock_irqrestore(&np->lock, flags); + + if (netif_msg_tx_queued(np)) { + printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", + dev->name, np->cur_tx, entry); + } + return NETDEV_TX_OK; +} + +static void netdev_tx_done(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + + for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { + int entry = np->dirty_tx % TX_RING_SIZE; + if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn)) + break; + if (netif_msg_tx_done(np)) + printk(KERN_DEBUG + "%s: tx frame #%d finished, status %#08x.\n", + dev->name, np->dirty_tx, + le32_to_cpu(np->tx_ring[entry].cmd_status)); + if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += np->tx_skbuff[entry]->len; + } else { /* Various Tx errors */ + int tx_status = + le32_to_cpu(np->tx_ring[entry].cmd_status); + if (tx_status & (DescTxAbort|DescTxExcColl)) + dev->stats.tx_aborted_errors++; + if (tx_status & DescTxFIFO) + dev->stats.tx_fifo_errors++; + if (tx_status & DescTxCarrier) + dev->stats.tx_carrier_errors++; + if (tx_status & DescTxOOWCol) + dev->stats.tx_window_errors++; + dev->stats.tx_errors++; + } + pci_unmap_single(np->pci_dev,np->tx_dma[entry], + np->tx_skbuff[entry]->len, + PCI_DMA_TODEVICE); + /* Free the original skb. */ + dev_kfree_skb_irq(np->tx_skbuff[entry]); + np->tx_skbuff[entry] = NULL; + } + if (netif_queue_stopped(dev) && + np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { + /* The ring is no longer full, wake queue. */ + netif_wake_queue(dev); + } +} + +/* The interrupt handler doesn't actually handle interrupts itself, it + * schedules a NAPI poll if there is anything to do. */ +static irqreturn_t intr_handler(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct netdev_private *np = netdev_priv(dev); + void __iomem * ioaddr = ns_ioaddr(dev); + + /* Reading IntrStatus automatically acknowledges so don't do + * that while interrupts are disabled, (for example, while a + * poll is scheduled). */ + if (np->hands_off || !readl(ioaddr + IntrEnable)) + return IRQ_NONE; + + np->intr_status = readl(ioaddr + IntrStatus); + + if (!np->intr_status) + return IRQ_NONE; + + if (netif_msg_intr(np)) + printk(KERN_DEBUG + "%s: Interrupt, status %#08x, mask %#08x.\n", + dev->name, np->intr_status, + readl(ioaddr + IntrMask)); + + prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]); + + if (napi_schedule_prep(&np->napi)) { + /* Disable interrupts and register for poll */ + natsemi_irq_disable(dev); + __napi_schedule(&np->napi); + } else + printk(KERN_WARNING + "%s: Ignoring interrupt, status %#08x, mask %#08x.\n", + dev->name, np->intr_status, + readl(ioaddr + IntrMask)); + + return IRQ_HANDLED; +} + +/* This is the NAPI poll routine. As well as the standard RX handling + * it also handles all other interrupts that the chip might raise. + */ +static int natsemi_poll(struct napi_struct *napi, int budget) +{ + struct netdev_private *np = container_of(napi, struct netdev_private, napi); + struct net_device *dev = np->dev; + void __iomem * ioaddr = ns_ioaddr(dev); + int work_done = 0; + + do { + if (netif_msg_intr(np)) + printk(KERN_DEBUG + "%s: Poll, status %#08x, mask %#08x.\n", + dev->name, np->intr_status, + readl(ioaddr + IntrMask)); + + /* netdev_rx() may read IntrStatus again if the RX state + * machine falls over so do it first. */ + if (np->intr_status & + (IntrRxDone | IntrRxIntr | RxStatusFIFOOver | + IntrRxErr | IntrRxOverrun)) { + netdev_rx(dev, &work_done, budget); + } + + if (np->intr_status & + (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) { + spin_lock(&np->lock); + netdev_tx_done(dev); + spin_unlock(&np->lock); + } + + /* Abnormal error summary/uncommon events handlers. */ + if (np->intr_status & IntrAbnormalSummary) + netdev_error(dev, np->intr_status); + + if (work_done >= budget) + return work_done; + + np->intr_status = readl(ioaddr + IntrStatus); + } while (np->intr_status); + + napi_complete(napi); + + /* Reenable interrupts providing nothing is trying to shut + * the chip down. */ + spin_lock(&np->lock); + if (!np->hands_off) + natsemi_irq_enable(dev); + spin_unlock(&np->lock); + + return work_done; +} + +/* This routine is logically part of the interrupt handler, but separated + for clarity and better register allocation. */ +static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do) +{ + struct netdev_private *np = netdev_priv(dev); + int entry = np->cur_rx % RX_RING_SIZE; + int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx; + s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status); + unsigned int buflen = np->rx_buf_sz; + void __iomem * ioaddr = ns_ioaddr(dev); + + /* If the driver owns the next entry it's a new packet. Send it up. */ + while (desc_status < 0) { /* e.g. & DescOwn */ + int pkt_len; + if (netif_msg_rx_status(np)) + printk(KERN_DEBUG + " netdev_rx() entry %d status was %#08x.\n", + entry, desc_status); + if (--boguscnt < 0) + break; + + if (*work_done >= work_to_do) + break; + + (*work_done)++; + + pkt_len = (desc_status & DescSizeMask) - 4; + if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){ + if (desc_status & DescMore) { + unsigned long flags; + + if (netif_msg_rx_err(np)) + printk(KERN_WARNING + "%s: Oversized(?) Ethernet " + "frame spanned multiple " + "buffers, entry %#08x " + "status %#08x.\n", dev->name, + np->cur_rx, desc_status); + dev->stats.rx_length_errors++; + + /* The RX state machine has probably + * locked up beneath us. Follow the + * reset procedure documented in + * AN-1287. */ + + spin_lock_irqsave(&np->lock, flags); + reset_rx(dev); + reinit_rx(dev); + writel(np->ring_dma, ioaddr + RxRingPtr); + check_link(dev); + spin_unlock_irqrestore(&np->lock, flags); + + /* We'll enable RX on exit from this + * function. */ + break; + + } else { + /* There was an error. */ + dev->stats.rx_errors++; + if (desc_status & (DescRxAbort|DescRxOver)) + dev->stats.rx_over_errors++; + if (desc_status & (DescRxLong|DescRxRunt)) + dev->stats.rx_length_errors++; + if (desc_status & (DescRxInvalid|DescRxAlign)) + dev->stats.rx_frame_errors++; + if (desc_status & DescRxCRC) + dev->stats.rx_crc_errors++; + } + } else if (pkt_len > np->rx_buf_sz) { + /* if this is the tail of a double buffer + * packet, we've already counted the error + * on the first part. Ignore the second half. + */ + } else { + struct sk_buff *skb; + /* Omit CRC size. */ + /* Check if the packet is long enough to accept + * without copying to a minimally-sized skbuff. */ + if (pkt_len < rx_copybreak && + (skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) { + /* 16 byte align the IP header */ + skb_reserve(skb, RX_OFFSET); + pci_dma_sync_single_for_cpu(np->pci_dev, + np->rx_dma[entry], + buflen, + PCI_DMA_FROMDEVICE); + skb_copy_to_linear_data(skb, + np->rx_skbuff[entry]->data, pkt_len); + skb_put(skb, pkt_len); + pci_dma_sync_single_for_device(np->pci_dev, + np->rx_dma[entry], + buflen, + PCI_DMA_FROMDEVICE); + } else { + pci_unmap_single(np->pci_dev, np->rx_dma[entry], + buflen + NATSEMI_PADDING, + PCI_DMA_FROMDEVICE); + skb_put(skb = np->rx_skbuff[entry], pkt_len); + np->rx_skbuff[entry] = NULL; + } + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + entry = (++np->cur_rx) % RX_RING_SIZE; + np->rx_head_desc = &np->rx_ring[entry]; + desc_status = le32_to_cpu(np->rx_head_desc->cmd_status); + } + refill_rx(dev); + + /* Restart Rx engine if stopped. */ + if (np->oom) + mod_timer(&np->timer, jiffies + 1); + else + writel(RxOn, ioaddr + ChipCmd); +} + +static void netdev_error(struct net_device *dev, int intr_status) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem * ioaddr = ns_ioaddr(dev); + + spin_lock(&np->lock); + if (intr_status & LinkChange) { + u16 lpa = mdio_read(dev, MII_LPA); + if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE && + netif_msg_link(np)) { + printk(KERN_INFO + "%s: Autonegotiation advertising" + " %#04x partner %#04x.\n", dev->name, + np->advertising, lpa); + } + + /* read MII int status to clear the flag */ + readw(ioaddr + MIntrStatus); + check_link(dev); + } + if (intr_status & StatsMax) { + __get_stats(dev); + } + if (intr_status & IntrTxUnderrun) { + if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) { + np->tx_config += TX_DRTH_VAL_INC; + if (netif_msg_tx_err(np)) + printk(KERN_NOTICE + "%s: increased tx threshold, txcfg %#08x.\n", + dev->name, np->tx_config); + } else { + if (netif_msg_tx_err(np)) + printk(KERN_NOTICE + "%s: tx underrun with maximum tx threshold, txcfg %#08x.\n", + dev->name, np->tx_config); + } + writel(np->tx_config, ioaddr + TxConfig); + } + if (intr_status & WOLPkt && netif_msg_wol(np)) { + int wol_status = readl(ioaddr + WOLCmd); + printk(KERN_NOTICE "%s: Link wake-up event %#08x\n", + dev->name, wol_status); + } + if (intr_status & RxStatusFIFOOver) { + if (netif_msg_rx_err(np) && netif_msg_intr(np)) { + printk(KERN_NOTICE "%s: Rx status FIFO overrun\n", + dev->name); + } + dev->stats.rx_fifo_errors++; + dev->stats.rx_errors++; + } + /* Hmmmmm, it's not clear how to recover from PCI faults. */ + if (intr_status & IntrPCIErr) { + printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name, + intr_status & IntrPCIErr); + dev->stats.tx_fifo_errors++; + dev->stats.tx_errors++; + dev->stats.rx_fifo_errors++; + dev->stats.rx_errors++; + } + spin_unlock(&np->lock); +} + +static void __get_stats(struct net_device *dev) +{ + void __iomem * ioaddr = ns_ioaddr(dev); + + /* The chip only need report frame silently dropped. */ + dev->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs); + dev->stats.rx_missed_errors += readl(ioaddr + RxMissed); +} + +static struct net_device_stats *get_stats(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + + /* The chip only need report frame silently dropped. */ + spin_lock_irq(&np->lock); + if (netif_running(dev) && !np->hands_off) + __get_stats(dev); + spin_unlock_irq(&np->lock); + + return &dev->stats; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void natsemi_poll_controller(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + const int irq = np->pci_dev->irq; + + disable_irq(irq); + intr_handler(irq, dev); + enable_irq(irq); +} +#endif + +#define HASH_TABLE 0x200 +static void __set_rx_mode(struct net_device *dev) +{ + void __iomem * ioaddr = ns_ioaddr(dev); + struct netdev_private *np = netdev_priv(dev); + u8 mc_filter[64]; /* Multicast hash filter */ + u32 rx_mode; + + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + rx_mode = RxFilterEnable | AcceptBroadcast + | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { + rx_mode = RxFilterEnable | AcceptBroadcast + | AcceptAllMulticast | AcceptMyPhys; + } else { + struct netdev_hw_addr *ha; + int i; + + memset(mc_filter, 0, sizeof(mc_filter)); + netdev_for_each_mc_addr(ha, dev) { + int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff; + mc_filter[b/8] |= (1 << (b & 0x07)); + } + rx_mode = RxFilterEnable | AcceptBroadcast + | AcceptMulticast | AcceptMyPhys; + for (i = 0; i < 64; i += 2) { + writel(HASH_TABLE + i, ioaddr + RxFilterAddr); + writel((mc_filter[i + 1] << 8) + mc_filter[i], + ioaddr + RxFilterData); + } + } + writel(rx_mode, ioaddr + RxFilterAddr); + np->cur_rx_mode = rx_mode; +} + +static int natsemi_change_mtu(struct net_device *dev, int new_mtu) +{ + if (new_mtu < 64 || new_mtu > NATSEMI_RX_LIMIT-NATSEMI_HEADERS) + return -EINVAL; + + dev->mtu = new_mtu; + + /* synchronized against open : rtnl_lock() held by caller */ + if (netif_running(dev)) { + struct netdev_private *np = netdev_priv(dev); + void __iomem * ioaddr = ns_ioaddr(dev); + const int irq = np->pci_dev->irq; + + disable_irq(irq); + spin_lock(&np->lock); + /* stop engines */ + natsemi_stop_rxtx(dev); + /* drain rx queue */ + drain_rx(dev); + /* change buffers */ + set_bufsize(dev); + reinit_rx(dev); + writel(np->ring_dma, ioaddr + RxRingPtr); + /* restart engines */ + writel(RxOn | TxOn, ioaddr + ChipCmd); + spin_unlock(&np->lock); + enable_irq(irq); + } + return 0; +} + +static void set_rx_mode(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + spin_lock_irq(&np->lock); + if (!np->hands_off) + __set_rx_mode(dev); + spin_unlock_irq(&np->lock); +} + +static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct netdev_private *np = netdev_priv(dev); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); +} + +static int get_regs_len(struct net_device *dev) +{ + return NATSEMI_REGS_SIZE; +} + +static int get_eeprom_len(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + return np->eeprom_size; +} + +static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct netdev_private *np = netdev_priv(dev); + spin_lock_irq(&np->lock); + netdev_get_ecmd(dev, ecmd); + spin_unlock_irq(&np->lock); + return 0; +} + +static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct netdev_private *np = netdev_priv(dev); + int res; + spin_lock_irq(&np->lock); + res = netdev_set_ecmd(dev, ecmd); + spin_unlock_irq(&np->lock); + return res; +} + +static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct netdev_private *np = netdev_priv(dev); + spin_lock_irq(&np->lock); + netdev_get_wol(dev, &wol->supported, &wol->wolopts); + netdev_get_sopass(dev, wol->sopass); + spin_unlock_irq(&np->lock); +} + +static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct netdev_private *np = netdev_priv(dev); + int res; + spin_lock_irq(&np->lock); + netdev_set_wol(dev, wol->wolopts); + res = netdev_set_sopass(dev, wol->sopass); + spin_unlock_irq(&np->lock); + return res; +} + +static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) +{ + struct netdev_private *np = netdev_priv(dev); + regs->version = NATSEMI_REGS_VER; + spin_lock_irq(&np->lock); + netdev_get_regs(dev, buf); + spin_unlock_irq(&np->lock); +} + +static u32 get_msglevel(struct net_device *dev) +{ + struct netdev_private *np = netdev_priv(dev); + return np->msg_enable; +} + +static void set_msglevel(struct net_device *dev, u32 val) +{ + struct netdev_private *np = netdev_priv(dev); + np->msg_enable = val; +} + +static int nway_reset(struct net_device *dev) +{ + int tmp; + int r = -EINVAL; + /* if autoneg is off, it's an error */ + tmp = mdio_read(dev, MII_BMCR); + if (tmp & BMCR_ANENABLE) { + tmp |= (BMCR_ANRESTART); + mdio_write(dev, MII_BMCR, tmp); + r = 0; + } + return r; +} + +static u32 get_link(struct net_device *dev) +{ + /* LSTATUS is latched low until a read - so read twice */ + mdio_read(dev, MII_BMSR); + return (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0; +} + +static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) +{ + struct netdev_private *np = netdev_priv(dev); + u8 *eebuf; + int res; + + eebuf = kmalloc(np->eeprom_size, GFP_KERNEL); + if (!eebuf) + return -ENOMEM; + + eeprom->magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16); + spin_lock_irq(&np->lock); + res = netdev_get_eeprom(dev, eebuf); + spin_unlock_irq(&np->lock); + if (!res) + memcpy(data, eebuf+eeprom->offset, eeprom->len); + kfree(eebuf); + return res; +} + +static const struct ethtool_ops ethtool_ops = { + .get_drvinfo = get_drvinfo, + .get_regs_len = get_regs_len, + .get_eeprom_len = get_eeprom_len, + .get_settings = get_settings, + .set_settings = set_settings, + .get_wol = get_wol, + .set_wol = set_wol, + .get_regs = get_regs, + .get_msglevel = get_msglevel, + .set_msglevel = set_msglevel, + .nway_reset = nway_reset, + .get_link = get_link, + .get_eeprom = get_eeprom, +}; + +static int netdev_set_wol(struct net_device *dev, u32 newval) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem * ioaddr = ns_ioaddr(dev); + u32 data = readl(ioaddr + WOLCmd) & ~WakeOptsSummary; + + /* translate to bitmasks this chip understands */ + if (newval & WAKE_PHY) + data |= WakePhy; + if (newval & WAKE_UCAST) + data |= WakeUnicast; + if (newval & WAKE_MCAST) + data |= WakeMulticast; + if (newval & WAKE_BCAST) + data |= WakeBroadcast; + if (newval & WAKE_ARP) + data |= WakeArp; + if (newval & WAKE_MAGIC) + data |= WakeMagic; + if (np->srr >= SRR_DP83815_D) { + if (newval & WAKE_MAGICSECURE) { + data |= WakeMagicSecure; + } + } + + writel(data, ioaddr + WOLCmd); + + return 0; +} + +static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem * ioaddr = ns_ioaddr(dev); + u32 regval = readl(ioaddr + WOLCmd); + + *supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST + | WAKE_ARP | WAKE_MAGIC); + + if (np->srr >= SRR_DP83815_D) { + /* SOPASS works on revD and higher */ + *supported |= WAKE_MAGICSECURE; + } + *cur = 0; + + /* translate from chip bitmasks */ + if (regval & WakePhy) + *cur |= WAKE_PHY; + if (regval & WakeUnicast) + *cur |= WAKE_UCAST; + if (regval & WakeMulticast) + *cur |= WAKE_MCAST; + if (regval & WakeBroadcast) + *cur |= WAKE_BCAST; + if (regval & WakeArp) + *cur |= WAKE_ARP; + if (regval & WakeMagic) + *cur |= WAKE_MAGIC; + if (regval & WakeMagicSecure) { + /* this can be on in revC, but it's broken */ + *cur |= WAKE_MAGICSECURE; + } + + return 0; +} + +static int netdev_set_sopass(struct net_device *dev, u8 *newval) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem * ioaddr = ns_ioaddr(dev); + u16 *sval = (u16 *)newval; + u32 addr; + + if (np->srr < SRR_DP83815_D) { + return 0; + } + + /* enable writing to these registers by disabling the RX filter */ + addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask; + addr &= ~RxFilterEnable; + writel(addr, ioaddr + RxFilterAddr); + + /* write the three words to (undocumented) RFCR vals 0xa, 0xc, 0xe */ + writel(addr | 0xa, ioaddr + RxFilterAddr); + writew(sval[0], ioaddr + RxFilterData); + + writel(addr | 0xc, ioaddr + RxFilterAddr); + writew(sval[1], ioaddr + RxFilterData); + + writel(addr | 0xe, ioaddr + RxFilterAddr); + writew(sval[2], ioaddr + RxFilterData); + + /* re-enable the RX filter */ + writel(addr | RxFilterEnable, ioaddr + RxFilterAddr); + + return 0; +} + +static int netdev_get_sopass(struct net_device *dev, u8 *data) +{ + struct netdev_private *np = netdev_priv(dev); + void __iomem * ioaddr = ns_ioaddr(dev); + u16 *sval = (u16 *)data; + u32 addr; + + if (np->srr < SRR_DP83815_D) { + sval[0] = sval[1] = sval[2] = 0; + return 0; + } + + /* read the three words from (undocumented) RFCR vals 0xa, 0xc, 0xe */ + addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask; + + writel(addr | 0xa, ioaddr + RxFilterAddr); + sval[0] = readw(ioaddr + RxFilterData); + + writel(addr | 0xc, ioaddr + RxFilterAddr); + sval[1] = readw(ioaddr + RxFilterData); + + writel(addr | 0xe, ioaddr + RxFilterAddr); + sval[2] = readw(ioaddr + RxFilterData); + + writel(addr, ioaddr + RxFilterAddr); + + return 0; +} + +static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct netdev_private *np = netdev_priv(dev); + u32 tmp; + + ecmd->port = dev->if_port; + ethtool_cmd_speed_set(ecmd, np->speed); + ecmd->duplex = np->duplex; + ecmd->autoneg = np->autoneg; + ecmd->advertising = 0; + if (np->advertising & ADVERTISE_10HALF) + ecmd->advertising |= ADVERTISED_10baseT_Half; + if (np->advertising & ADVERTISE_10FULL) + ecmd->advertising |= ADVERTISED_10baseT_Full; + if (np->advertising & ADVERTISE_100HALF) + ecmd->advertising |= ADVERTISED_100baseT_Half; + if (np->advertising & ADVERTISE_100FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + ecmd->supported = (SUPPORTED_Autoneg | + SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE); + ecmd->phy_address = np->phy_addr_external; + /* + * We intentionally report the phy address of the external + * phy, even if the internal phy is used. This is necessary + * to work around a deficiency of the ethtool interface: + * It's only possible to query the settings of the active + * port. Therefore + * # ethtool -s ethX port mii + * actually sends an ioctl to switch to port mii with the + * settings that are used for the current active port. + * If we would report a different phy address in this + * command, then + * # ethtool -s ethX port tp;ethtool -s ethX port mii + * would unintentionally change the phy address. + * + * Fortunately the phy address doesn't matter with the + * internal phy... + */ + + /* set information based on active port type */ + switch (ecmd->port) { + default: + case PORT_TP: + ecmd->advertising |= ADVERTISED_TP; + ecmd->transceiver = XCVR_INTERNAL; + break; + case PORT_MII: + ecmd->advertising |= ADVERTISED_MII; + ecmd->transceiver = XCVR_EXTERNAL; + break; + case PORT_FIBRE: + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + break; + } + + /* if autonegotiation is on, try to return the active speed/duplex */ + if (ecmd->autoneg == AUTONEG_ENABLE) { + ecmd->advertising |= ADVERTISED_Autoneg; + tmp = mii_nway_result( + np->advertising & mdio_read(dev, MII_LPA)); + if (tmp == LPA_100FULL || tmp == LPA_100HALF) + ethtool_cmd_speed_set(ecmd, SPEED_100); + else + ethtool_cmd_speed_set(ecmd, SPEED_10); + if (tmp == LPA_100FULL || tmp == LPA_10FULL) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } + + /* ignore maxtxpkt, maxrxpkt for now */ + + return 0; +} + +static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct netdev_private *np = netdev_priv(dev); + + if (ecmd->port != PORT_TP && ecmd->port != PORT_MII && ecmd->port != PORT_FIBRE) + return -EINVAL; + if (ecmd->transceiver != XCVR_INTERNAL && ecmd->transceiver != XCVR_EXTERNAL) + return -EINVAL; + if (ecmd->autoneg == AUTONEG_ENABLE) { + if ((ecmd->advertising & (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full)) == 0) { + return -EINVAL; + } + } else if (ecmd->autoneg == AUTONEG_DISABLE) { + u32 speed = ethtool_cmd_speed(ecmd); + if (speed != SPEED_10 && speed != SPEED_100) + return -EINVAL; + if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + return -EINVAL; + } else { + return -EINVAL; + } + + /* + * If we're ignoring the PHY then autoneg and the internal + * transceiver are really not going to work so don't let the + * user select them. + */ + if (np->ignore_phy && (ecmd->autoneg == AUTONEG_ENABLE || + ecmd->port == PORT_TP)) + return -EINVAL; + + /* + * maxtxpkt, maxrxpkt: ignored for now. + * + * transceiver: + * PORT_TP is always XCVR_INTERNAL, PORT_MII and PORT_FIBRE are always + * XCVR_EXTERNAL. The implementation thus ignores ecmd->transceiver and + * selects based on ecmd->port. + * + * Actually PORT_FIBRE is nearly identical to PORT_MII: it's for fibre + * phys that are connected to the mii bus. It's used to apply fibre + * specific updates. + */ + + /* WHEW! now lets bang some bits */ + + /* save the parms */ + dev->if_port = ecmd->port; + np->autoneg = ecmd->autoneg; + np->phy_addr_external = ecmd->phy_address & PhyAddrMask; + if (np->autoneg == AUTONEG_ENABLE) { + /* advertise only what has been requested */ + np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); + if (ecmd->advertising & ADVERTISED_10baseT_Half) + np->advertising |= ADVERTISE_10HALF; + if (ecmd->advertising & ADVERTISED_10baseT_Full) + np->advertising |= ADVERTISE_10FULL; + if (ecmd->advertising & ADVERTISED_100baseT_Half) + np->advertising |= ADVERTISE_100HALF; + if (ecmd->advertising & ADVERTISED_100baseT_Full) + np->advertising |= ADVERTISE_100FULL; + } else { + np->speed = ethtool_cmd_speed(ecmd); + np->duplex = ecmd->duplex; + /* user overriding the initial full duplex parm? */ + if (np->duplex == DUPLEX_HALF) + np->full_duplex = 0; + } + + /* get the right phy enabled */ + if (ecmd->port == PORT_TP) + switch_port_internal(dev); + else + switch_port_external(dev); + + /* set parms and see how this affected our link status */ + init_phy_fixup(dev); + check_link(dev); + return 0; +} + +static int netdev_get_regs(struct net_device *dev, u8 *buf) +{ + int i; + int j; + u32 rfcr; + u32 *rbuf = (u32 *)buf; + void __iomem * ioaddr = ns_ioaddr(dev); + + /* read non-mii page 0 of registers */ + for (i = 0; i < NATSEMI_PG0_NREGS/2; i++) { + rbuf[i] = readl(ioaddr + i*4); + } + + /* read current mii registers */ + for (i = NATSEMI_PG0_NREGS/2; i < NATSEMI_PG0_NREGS; i++) + rbuf[i] = mdio_read(dev, i & 0x1f); + + /* read only the 'magic' registers from page 1 */ + writew(1, ioaddr + PGSEL); + rbuf[i++] = readw(ioaddr + PMDCSR); + rbuf[i++] = readw(ioaddr + TSTDAT); + rbuf[i++] = readw(ioaddr + DSPCFG); + rbuf[i++] = readw(ioaddr + SDCFG); + writew(0, ioaddr + PGSEL); + + /* read RFCR indexed registers */ + rfcr = readl(ioaddr + RxFilterAddr); + for (j = 0; j < NATSEMI_RFDR_NREGS; j++) { + writel(j*2, ioaddr + RxFilterAddr); + rbuf[i++] = readw(ioaddr + RxFilterData); + } + writel(rfcr, ioaddr + RxFilterAddr); + + /* the interrupt status is clear-on-read - see if we missed any */ + if (rbuf[4] & rbuf[5]) { + printk(KERN_WARNING + "%s: shoot, we dropped an interrupt (%#08x)\n", + dev->name, rbuf[4] & rbuf[5]); + } + + return 0; +} + +#define SWAP_BITS(x) ( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \ + | (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9) \ + | (((x) & 0x0010) << 7) | (((x) & 0x0020) << 5) \ + | (((x) & 0x0040) << 3) | (((x) & 0x0080) << 1) \ + | (((x) & 0x0100) >> 1) | (((x) & 0x0200) >> 3) \ + | (((x) & 0x0400) >> 5) | (((x) & 0x0800) >> 7) \ + | (((x) & 0x1000) >> 9) | (((x) & 0x2000) >> 11) \ + | (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) ) + +static int netdev_get_eeprom(struct net_device *dev, u8 *buf) +{ + int i; + u16 *ebuf = (u16 *)buf; + void __iomem * ioaddr = ns_ioaddr(dev); + struct netdev_private *np = netdev_priv(dev); + + /* eeprom_read reads 16 bits, and indexes by 16 bits */ + for (i = 0; i < np->eeprom_size/2; i++) { + ebuf[i] = eeprom_read(ioaddr, i); + /* The EEPROM itself stores data bit-swapped, but eeprom_read + * reads it back "sanely". So we swap it back here in order to + * present it to userland as it is stored. */ + ebuf[i] = SWAP_BITS(ebuf[i]); + } + return 0; +} + +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct mii_ioctl_data *data = if_mii(rq); + struct netdev_private *np = netdev_priv(dev); + + switch(cmd) { + case SIOCGMIIPHY: /* Get address of MII PHY in use. */ + data->phy_id = np->phy_addr_external; + /* Fall Through */ + + case SIOCGMIIREG: /* Read MII PHY register. */ + /* The phy_id is not enough to uniquely identify + * the intended target. Therefore the command is sent to + * the given mii on the current port. + */ + if (dev->if_port == PORT_TP) { + if ((data->phy_id & 0x1f) == np->phy_addr_external) + data->val_out = mdio_read(dev, + data->reg_num & 0x1f); + else + data->val_out = 0; + } else { + move_int_phy(dev, data->phy_id & 0x1f); + data->val_out = miiport_read(dev, data->phy_id & 0x1f, + data->reg_num & 0x1f); + } + return 0; + + case SIOCSMIIREG: /* Write MII PHY register. */ + if (dev->if_port == PORT_TP) { + if ((data->phy_id & 0x1f) == np->phy_addr_external) { + if ((data->reg_num & 0x1f) == MII_ADVERTISE) + np->advertising = data->val_in; + mdio_write(dev, data->reg_num & 0x1f, + data->val_in); + } + } else { + if ((data->phy_id & 0x1f) == np->phy_addr_external) { + if ((data->reg_num & 0x1f) == MII_ADVERTISE) + np->advertising = data->val_in; + } + move_int_phy(dev, data->phy_id & 0x1f); + miiport_write(dev, data->phy_id & 0x1f, + data->reg_num & 0x1f, + data->val_in); + } + return 0; + default: + return -EOPNOTSUPP; + } +} + +static void enable_wol_mode(struct net_device *dev, int enable_intr) +{ + void __iomem * ioaddr = ns_ioaddr(dev); + struct netdev_private *np = netdev_priv(dev); + + if (netif_msg_wol(np)) + printk(KERN_INFO "%s: remaining active for wake-on-lan\n", + dev->name); + + /* For WOL we must restart the rx process in silent mode. + * Write NULL to the RxRingPtr. Only possible if + * rx process is stopped + */ + writel(0, ioaddr + RxRingPtr); + + /* read WoL status to clear */ + readl(ioaddr + WOLCmd); + + /* PME on, clear status */ + writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun); + + /* and restart the rx process */ + writel(RxOn, ioaddr + ChipCmd); + + if (enable_intr) { + /* enable the WOL interrupt. + * Could be used to send a netlink message. + */ + writel(WOLPkt | LinkChange, ioaddr + IntrMask); + natsemi_irq_enable(dev); + } +} + +static int netdev_close(struct net_device *dev) +{ + void __iomem * ioaddr = ns_ioaddr(dev); + struct netdev_private *np = netdev_priv(dev); + const int irq = np->pci_dev->irq; + + if (netif_msg_ifdown(np)) + printk(KERN_DEBUG + "%s: Shutting down ethercard, status was %#04x.\n", + dev->name, (int)readl(ioaddr + ChipCmd)); + if (netif_msg_pktdata(np)) + printk(KERN_DEBUG + "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", + dev->name, np->cur_tx, np->dirty_tx, + np->cur_rx, np->dirty_rx); + + napi_disable(&np->napi); + + /* + * FIXME: what if someone tries to close a device + * that is suspended? + * Should we reenable the nic to switch to + * the final WOL settings? + */ + + del_timer_sync(&np->timer); + disable_irq(irq); + spin_lock_irq(&np->lock); + natsemi_irq_disable(dev); + np->hands_off = 1; + spin_unlock_irq(&np->lock); + enable_irq(irq); + + free_irq(irq, dev); + + /* Interrupt disabled, interrupt handler released, + * queue stopped, timer deleted, rtnl_lock held + * All async codepaths that access the driver are disabled. + */ + spin_lock_irq(&np->lock); + np->hands_off = 0; + readl(ioaddr + IntrMask); + readw(ioaddr + MIntrStatus); + + /* Freeze Stats */ + writel(StatsFreeze, ioaddr + StatsCtrl); + + /* Stop the chip's Tx and Rx processes. */ + natsemi_stop_rxtx(dev); + + __get_stats(dev); + spin_unlock_irq(&np->lock); + + /* clear the carrier last - an interrupt could reenable it otherwise */ + netif_carrier_off(dev); + netif_stop_queue(dev); + + dump_ring(dev); + drain_ring(dev); + free_ring(dev); + + { + u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary; + if (wol) { + /* restart the NIC in WOL mode. + * The nic must be stopped for this. + */ + enable_wol_mode(dev, 0); + } else { + /* Restore PME enable bit unmolested */ + writel(np->SavedClkRun, ioaddr + ClkRun); + } + } + return 0; +} + + +static void natsemi_remove1(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + void __iomem * ioaddr = ns_ioaddr(dev); + + NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround); + unregister_netdev (dev); + pci_release_regions (pdev); + iounmap(ioaddr); + free_netdev (dev); +} + +#ifdef CONFIG_PM + +/* + * The ns83815 chip doesn't have explicit RxStop bits. + * Kicking the Rx or Tx process for a new packet reenables the Rx process + * of the nic, thus this function must be very careful: + * + * suspend/resume synchronization: + * entry points: + * netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler, + * start_tx, ns_tx_timeout + * + * No function accesses the hardware without checking np->hands_off. + * the check occurs under spin_lock_irq(&np->lock); + * exceptions: + * * netdev_ioctl: noncritical access. + * * netdev_open: cannot happen due to the device_detach + * * netdev_close: doesn't hurt. + * * netdev_timer: timer stopped by natsemi_suspend. + * * intr_handler: doesn't acquire the spinlock. suspend calls + * disable_irq() to enforce synchronization. + * * natsemi_poll: checks before reenabling interrupts. suspend + * sets hands_off, disables interrupts and then waits with + * napi_disable(). + * + * Interrupts must be disabled, otherwise hands_off can cause irq storms. + */ + +static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata (pdev); + struct netdev_private *np = netdev_priv(dev); + void __iomem * ioaddr = ns_ioaddr(dev); + + rtnl_lock(); + if (netif_running (dev)) { + const int irq = np->pci_dev->irq; + + del_timer_sync(&np->timer); + + disable_irq(irq); + spin_lock_irq(&np->lock); + + natsemi_irq_disable(dev); + np->hands_off = 1; + natsemi_stop_rxtx(dev); + netif_stop_queue(dev); + + spin_unlock_irq(&np->lock); + enable_irq(irq); + + napi_disable(&np->napi); + + /* Update the error counts. */ + __get_stats(dev); + + /* pci_power_off(pdev, -1); */ + drain_ring(dev); + { + u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary; + /* Restore PME enable bit */ + if (wol) { + /* restart the NIC in WOL mode. + * The nic must be stopped for this. + * FIXME: use the WOL interrupt + */ + enable_wol_mode(dev, 0); + } else { + /* Restore PME enable bit unmolested */ + writel(np->SavedClkRun, ioaddr + ClkRun); + } + } + } + netif_device_detach(dev); + rtnl_unlock(); + return 0; +} + + +static int natsemi_resume (struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata (pdev); + struct netdev_private *np = netdev_priv(dev); + int ret = 0; + + rtnl_lock(); + if (netif_device_present(dev)) + goto out; + if (netif_running(dev)) { + const int irq = np->pci_dev->irq; + + BUG_ON(!np->hands_off); + ret = pci_enable_device(pdev); + if (ret < 0) { + dev_err(&pdev->dev, + "pci_enable_device() failed: %d\n", ret); + goto out; + } + /* pci_power_on(pdev); */ + + napi_enable(&np->napi); + + natsemi_reset(dev); + init_ring(dev); + disable_irq(irq); + spin_lock_irq(&np->lock); + np->hands_off = 0; + init_registers(dev); + netif_device_attach(dev); + spin_unlock_irq(&np->lock); + enable_irq(irq); + + mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ)); + } + netif_device_attach(dev); +out: + rtnl_unlock(); + return ret; +} + +#endif /* CONFIG_PM */ + +static struct pci_driver natsemi_driver = { + .name = DRV_NAME, + .id_table = natsemi_pci_tbl, + .probe = natsemi_probe1, + .remove = natsemi_remove1, +#ifdef CONFIG_PM + .suspend = natsemi_suspend, + .resume = natsemi_resume, +#endif +}; + +static int __init natsemi_init_mod (void) +{ +/* when a module, this is printed whether or not devices are found in probe */ +#ifdef MODULE + printk(version); +#endif + + return pci_register_driver(&natsemi_driver); +} + +static void __exit natsemi_exit_mod (void) +{ + pci_unregister_driver (&natsemi_driver); +} + +module_init(natsemi_init_mod); +module_exit(natsemi_exit_mod); + diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c new file mode 100644 index 000000000..eb807b0dc --- /dev/null +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -0,0 +1,2307 @@ +#define VERSION "0.23" +/* ns83820.c by Benjamin LaHaise with contributions. + * + * Questions/comments/discussion to linux-ns83820@kvack.org. + * + * $Revision: 1.34.2.23 $ + * + * Copyright 2001 Benjamin LaHaise. + * Copyright 2001, 2002 Red Hat. + * + * Mmmm, chocolate vanilla mocha... + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * + * ChangeLog + * ========= + * 20010414 0.1 - created + * 20010622 0.2 - basic rx and tx. + * 20010711 0.3 - added duplex and link state detection support. + * 20010713 0.4 - zero copy, no hangs. + * 0.5 - 64 bit dma support (davem will hate me for this) + * - disable jumbo frames to avoid tx hangs + * - work around tx deadlocks on my 1.02 card via + * fiddling with TXCFG + * 20010810 0.6 - use pci dma api for ringbuffers, work on ia64 + * 20010816 0.7 - misc cleanups + * 20010826 0.8 - fix critical zero copy bugs + * 0.9 - internal experiment + * 20010827 0.10 - fix ia64 unaligned access. + * 20010906 0.11 - accept all packets with checksum errors as + * otherwise fragments get lost + * - fix >> 32 bugs + * 0.12 - add statistics counters + * - add allmulti/promisc support + * 20011009 0.13 - hotplug support, other smaller pci api cleanups + * 20011204 0.13a - optical transceiver support added + * by Michael Clark + * 20011205 0.13b - call register_netdev earlier in initialization + * suppress duplicate link status messages + * 20011117 0.14 - ethtool GDRVINFO, GLINK support from jgarzik + * 20011204 0.15 get ppc (big endian) working + * 20011218 0.16 various cleanups + * 20020310 0.17 speedups + * 20020610 0.18 - actually use the pci dma api for highmem + * - remove pci latency register fiddling + * 0.19 - better bist support + * - add ihr and reset_phy parameters + * - gmii bus probing + * - fix missed txok introduced during performance + * tuning + * 0.20 - fix stupid RFEN thinko. i am such a smurf. + * 20040828 0.21 - add hardware vlan accleration + * by Neil Horman + * 20050406 0.22 - improved DAC ifdefs from Andi Kleen + * - removal of dead code from Adrian Bunk + * - fix half duplex collision behaviour + * Driver Overview + * =============== + * + * This driver was originally written for the National Semiconductor + * 83820 chip, a 10/100/1000 Mbps 64 bit PCI ethernet NIC. Hopefully + * this code will turn out to be a) clean, b) correct, and c) fast. + * With that in mind, I'm aiming to split the code up as much as + * reasonably possible. At present there are X major sections that + * break down into a) packet receive, b) packet transmit, c) link + * management, d) initialization and configuration. Where possible, + * these code paths are designed to run in parallel. + * + * This driver has been tested and found to work with the following + * cards (in no particular order): + * + * Cameo SOHO-GA2000T SOHO-GA2500T + * D-Link DGE-500T + * PureData PDP8023Z-TG + * SMC SMC9452TX SMC9462TX + * Netgear GA621 + * + * Special thanks to SMC for providing hardware to test this driver on. + * + * Reports of success or failure would be greatly appreciated. + */ +//#define dprintk printk +#define dprintk(x...) do { } while (0) + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for iph */ +#include /* for IPPROTO_... */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define DRV_NAME "ns83820" + +/* Global parameters. See module_param near the bottom. */ +static int ihr = 2; +static int reset_phy = 0; +static int lnksts = 0; /* CFG_LNKSTS bit polarity */ + +/* Dprintk is used for more interesting debug events */ +#undef Dprintk +#define Dprintk dprintk + +/* tunables */ +#define RX_BUF_SIZE 1500 /* 8192 */ +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#define NS83820_VLAN_ACCEL_SUPPORT +#endif + +/* Must not exceed ~65000. */ +#define NR_RX_DESC 64 +#define NR_TX_DESC 128 + +/* not tunable */ +#define REAL_RX_BUF_SIZE (RX_BUF_SIZE + 14) /* rx/tx mac addr + type */ + +#define MIN_TX_DESC_FREE 8 + +/* register defines */ +#define CFGCS 0x04 + +#define CR_TXE 0x00000001 +#define CR_TXD 0x00000002 +/* Ramit : Here's a tip, don't do a RXD immediately followed by an RXE + * The Receive engine skips one descriptor and moves + * onto the next one!! */ +#define CR_RXE 0x00000004 +#define CR_RXD 0x00000008 +#define CR_TXR 0x00000010 +#define CR_RXR 0x00000020 +#define CR_SWI 0x00000080 +#define CR_RST 0x00000100 + +#define PTSCR_EEBIST_FAIL 0x00000001 +#define PTSCR_EEBIST_EN 0x00000002 +#define PTSCR_EELOAD_EN 0x00000004 +#define PTSCR_RBIST_FAIL 0x000001b8 +#define PTSCR_RBIST_DONE 0x00000200 +#define PTSCR_RBIST_EN 0x00000400 +#define PTSCR_RBIST_RST 0x00002000 + +#define MEAR_EEDI 0x00000001 +#define MEAR_EEDO 0x00000002 +#define MEAR_EECLK 0x00000004 +#define MEAR_EESEL 0x00000008 +#define MEAR_MDIO 0x00000010 +#define MEAR_MDDIR 0x00000020 +#define MEAR_MDC 0x00000040 + +#define ISR_TXDESC3 0x40000000 +#define ISR_TXDESC2 0x20000000 +#define ISR_TXDESC1 0x10000000 +#define ISR_TXDESC0 0x08000000 +#define ISR_RXDESC3 0x04000000 +#define ISR_RXDESC2 0x02000000 +#define ISR_RXDESC1 0x01000000 +#define ISR_RXDESC0 0x00800000 +#define ISR_TXRCMP 0x00400000 +#define ISR_RXRCMP 0x00200000 +#define ISR_DPERR 0x00100000 +#define ISR_SSERR 0x00080000 +#define ISR_RMABT 0x00040000 +#define ISR_RTABT 0x00020000 +#define ISR_RXSOVR 0x00010000 +#define ISR_HIBINT 0x00008000 +#define ISR_PHY 0x00004000 +#define ISR_PME 0x00002000 +#define ISR_SWI 0x00001000 +#define ISR_MIB 0x00000800 +#define ISR_TXURN 0x00000400 +#define ISR_TXIDLE 0x00000200 +#define ISR_TXERR 0x00000100 +#define ISR_TXDESC 0x00000080 +#define ISR_TXOK 0x00000040 +#define ISR_RXORN 0x00000020 +#define ISR_RXIDLE 0x00000010 +#define ISR_RXEARLY 0x00000008 +#define ISR_RXERR 0x00000004 +#define ISR_RXDESC 0x00000002 +#define ISR_RXOK 0x00000001 + +#define TXCFG_CSI 0x80000000 +#define TXCFG_HBI 0x40000000 +#define TXCFG_MLB 0x20000000 +#define TXCFG_ATP 0x10000000 +#define TXCFG_ECRETRY 0x00800000 +#define TXCFG_BRST_DIS 0x00080000 +#define TXCFG_MXDMA1024 0x00000000 +#define TXCFG_MXDMA512 0x00700000 +#define TXCFG_MXDMA256 0x00600000 +#define TXCFG_MXDMA128 0x00500000 +#define TXCFG_MXDMA64 0x00400000 +#define TXCFG_MXDMA32 0x00300000 +#define TXCFG_MXDMA16 0x00200000 +#define TXCFG_MXDMA8 0x00100000 + +#define CFG_LNKSTS 0x80000000 +#define CFG_SPDSTS 0x60000000 +#define CFG_SPDSTS1 0x40000000 +#define CFG_SPDSTS0 0x20000000 +#define CFG_DUPSTS 0x10000000 +#define CFG_TBI_EN 0x01000000 +#define CFG_MODE_1000 0x00400000 +/* Ramit : Dont' ever use AUTO_1000, it never works and is buggy. + * Read the Phy response and then configure the MAC accordingly */ +#define CFG_AUTO_1000 0x00200000 +#define CFG_PINT_CTL 0x001c0000 +#define CFG_PINT_DUPSTS 0x00100000 +#define CFG_PINT_LNKSTS 0x00080000 +#define CFG_PINT_SPDSTS 0x00040000 +#define CFG_TMRTEST 0x00020000 +#define CFG_MRM_DIS 0x00010000 +#define CFG_MWI_DIS 0x00008000 +#define CFG_T64ADDR 0x00004000 +#define CFG_PCI64_DET 0x00002000 +#define CFG_DATA64_EN 0x00001000 +#define CFG_M64ADDR 0x00000800 +#define CFG_PHY_RST 0x00000400 +#define CFG_PHY_DIS 0x00000200 +#define CFG_EXTSTS_EN 0x00000100 +#define CFG_REQALG 0x00000080 +#define CFG_SB 0x00000040 +#define CFG_POW 0x00000020 +#define CFG_EXD 0x00000010 +#define CFG_PESEL 0x00000008 +#define CFG_BROM_DIS 0x00000004 +#define CFG_EXT_125 0x00000002 +#define CFG_BEM 0x00000001 + +#define EXTSTS_UDPPKT 0x00200000 +#define EXTSTS_TCPPKT 0x00080000 +#define EXTSTS_IPPKT 0x00020000 +#define EXTSTS_VPKT 0x00010000 +#define EXTSTS_VTG_MASK 0x0000ffff + +#define SPDSTS_POLARITY (CFG_SPDSTS1 | CFG_SPDSTS0 | CFG_DUPSTS | (lnksts ? CFG_LNKSTS : 0)) + +#define MIBC_MIBS 0x00000008 +#define MIBC_ACLR 0x00000004 +#define MIBC_FRZ 0x00000002 +#define MIBC_WRN 0x00000001 + +#define PCR_PSEN (1 << 31) +#define PCR_PS_MCAST (1 << 30) +#define PCR_PS_DA (1 << 29) +#define PCR_STHI_8 (3 << 23) +#define PCR_STLO_4 (1 << 23) +#define PCR_FFHI_8K (3 << 21) +#define PCR_FFLO_4K (1 << 21) +#define PCR_PAUSE_CNT 0xFFFE + +#define RXCFG_AEP 0x80000000 +#define RXCFG_ARP 0x40000000 +#define RXCFG_STRIPCRC 0x20000000 +#define RXCFG_RX_FD 0x10000000 +#define RXCFG_ALP 0x08000000 +#define RXCFG_AIRL 0x04000000 +#define RXCFG_MXDMA512 0x00700000 +#define RXCFG_DRTH 0x0000003e +#define RXCFG_DRTH0 0x00000002 + +#define RFCR_RFEN 0x80000000 +#define RFCR_AAB 0x40000000 +#define RFCR_AAM 0x20000000 +#define RFCR_AAU 0x10000000 +#define RFCR_APM 0x08000000 +#define RFCR_APAT 0x07800000 +#define RFCR_APAT3 0x04000000 +#define RFCR_APAT2 0x02000000 +#define RFCR_APAT1 0x01000000 +#define RFCR_APAT0 0x00800000 +#define RFCR_AARP 0x00400000 +#define RFCR_MHEN 0x00200000 +#define RFCR_UHEN 0x00100000 +#define RFCR_ULM 0x00080000 + +#define VRCR_RUDPE 0x00000080 +#define VRCR_RTCPE 0x00000040 +#define VRCR_RIPE 0x00000020 +#define VRCR_IPEN 0x00000010 +#define VRCR_DUTF 0x00000008 +#define VRCR_DVTF 0x00000004 +#define VRCR_VTREN 0x00000002 +#define VRCR_VTDEN 0x00000001 + +#define VTCR_PPCHK 0x00000008 +#define VTCR_GCHK 0x00000004 +#define VTCR_VPPTI 0x00000002 +#define VTCR_VGTI 0x00000001 + +#define CR 0x00 +#define CFG 0x04 +#define MEAR 0x08 +#define PTSCR 0x0c +#define ISR 0x10 +#define IMR 0x14 +#define IER 0x18 +#define IHR 0x1c +#define TXDP 0x20 +#define TXDP_HI 0x24 +#define TXCFG 0x28 +#define GPIOR 0x2c +#define RXDP 0x30 +#define RXDP_HI 0x34 +#define RXCFG 0x38 +#define PQCR 0x3c +#define WCSR 0x40 +#define PCR 0x44 +#define RFCR 0x48 +#define RFDR 0x4c + +#define SRR 0x58 + +#define VRCR 0xbc +#define VTCR 0xc0 +#define VDR 0xc4 +#define CCSR 0xcc + +#define TBICR 0xe0 +#define TBISR 0xe4 +#define TANAR 0xe8 +#define TANLPAR 0xec +#define TANER 0xf0 +#define TESR 0xf4 + +#define TBICR_MR_AN_ENABLE 0x00001000 +#define TBICR_MR_RESTART_AN 0x00000200 + +#define TBISR_MR_LINK_STATUS 0x00000020 +#define TBISR_MR_AN_COMPLETE 0x00000004 + +#define TANAR_PS2 0x00000100 +#define TANAR_PS1 0x00000080 +#define TANAR_HALF_DUP 0x00000040 +#define TANAR_FULL_DUP 0x00000020 + +#define GPIOR_GP5_OE 0x00000200 +#define GPIOR_GP4_OE 0x00000100 +#define GPIOR_GP3_OE 0x00000080 +#define GPIOR_GP2_OE 0x00000040 +#define GPIOR_GP1_OE 0x00000020 +#define GPIOR_GP3_OUT 0x00000004 +#define GPIOR_GP1_OUT 0x00000001 + +#define LINK_AUTONEGOTIATE 0x01 +#define LINK_DOWN 0x02 +#define LINK_UP 0x04 + +#define HW_ADDR_LEN sizeof(dma_addr_t) +#define desc_addr_set(desc, addr) \ + do { \ + ((desc)[0] = cpu_to_le32(addr)); \ + if (HW_ADDR_LEN == 8) \ + (desc)[1] = cpu_to_le32(((u64)addr) >> 32); \ + } while(0) +#define desc_addr_get(desc) \ + (le32_to_cpu((desc)[0]) | \ + (HW_ADDR_LEN == 8 ? ((dma_addr_t)le32_to_cpu((desc)[1]))<<32 : 0)) + +#define DESC_LINK 0 +#define DESC_BUFPTR (DESC_LINK + HW_ADDR_LEN/4) +#define DESC_CMDSTS (DESC_BUFPTR + HW_ADDR_LEN/4) +#define DESC_EXTSTS (DESC_CMDSTS + 4/4) + +#define CMDSTS_OWN 0x80000000 +#define CMDSTS_MORE 0x40000000 +#define CMDSTS_INTR 0x20000000 +#define CMDSTS_ERR 0x10000000 +#define CMDSTS_OK 0x08000000 +#define CMDSTS_RUNT 0x00200000 +#define CMDSTS_LEN_MASK 0x0000ffff + +#define CMDSTS_DEST_MASK 0x01800000 +#define CMDSTS_DEST_SELF 0x00800000 +#define CMDSTS_DEST_MULTI 0x01000000 + +#define DESC_SIZE 8 /* Should be cache line sized */ + +struct rx_info { + spinlock_t lock; + int up; + unsigned long idle; + + struct sk_buff *skbs[NR_RX_DESC]; + + __le32 *next_rx_desc; + u16 next_rx, next_empty; + + __le32 *descs; + dma_addr_t phy_descs; +}; + + +struct ns83820 { + u8 __iomem *base; + + struct pci_dev *pci_dev; + struct net_device *ndev; + + struct rx_info rx_info; + struct tasklet_struct rx_tasklet; + + unsigned ihr; + struct work_struct tq_refill; + + /* protects everything below. irqsave when using. */ + spinlock_t misc_lock; + + u32 CFG_cache; + + u32 MEAR_cache; + u32 IMR_cache; + + unsigned linkstate; + + spinlock_t tx_lock; + + u16 tx_done_idx; + u16 tx_idx; + volatile u16 tx_free_idx; /* idx of free desc chain */ + u16 tx_intr_idx; + + atomic_t nr_tx_skbs; + struct sk_buff *tx_skbs[NR_TX_DESC]; + + char pad[16] __attribute__((aligned(16))); + __le32 *tx_descs; + dma_addr_t tx_phy_descs; + + struct timer_list tx_watchdog; +}; + +static inline struct ns83820 *PRIV(struct net_device *dev) +{ + return netdev_priv(dev); +} + +#define __kick_rx(dev) writel(CR_RXE, dev->base + CR) + +static inline void kick_rx(struct net_device *ndev) +{ + struct ns83820 *dev = PRIV(ndev); + dprintk("kick_rx: maybe kicking\n"); + if (test_and_clear_bit(0, &dev->rx_info.idle)) { + dprintk("actually kicking\n"); + writel(dev->rx_info.phy_descs + + (4 * DESC_SIZE * dev->rx_info.next_rx), + dev->base + RXDP); + if (dev->rx_info.next_rx == dev->rx_info.next_empty) + printk(KERN_DEBUG "%s: uh-oh: next_rx == next_empty???\n", + ndev->name); + __kick_rx(dev); + } +} + +//free = (tx_done_idx + NR_TX_DESC-2 - free_idx) % NR_TX_DESC +#define start_tx_okay(dev) \ + (((NR_TX_DESC-2 + dev->tx_done_idx - dev->tx_free_idx) % NR_TX_DESC) > MIN_TX_DESC_FREE) + +/* Packet Receiver + * + * The hardware supports linked lists of receive descriptors for + * which ownership is transferred back and forth by means of an + * ownership bit. While the hardware does support the use of a + * ring for receive descriptors, we only make use of a chain in + * an attempt to reduce bus traffic under heavy load scenarios. + * This will also make bugs a bit more obvious. The current code + * only makes use of a single rx chain; I hope to implement + * priority based rx for version 1.0. Goal: even under overload + * conditions, still route realtime traffic with as low jitter as + * possible. + */ +static inline void build_rx_desc(struct ns83820 *dev, __le32 *desc, dma_addr_t link, dma_addr_t buf, u32 cmdsts, u32 extsts) +{ + desc_addr_set(desc + DESC_LINK, link); + desc_addr_set(desc + DESC_BUFPTR, buf); + desc[DESC_EXTSTS] = cpu_to_le32(extsts); + mb(); + desc[DESC_CMDSTS] = cpu_to_le32(cmdsts); +} + +#define nr_rx_empty(dev) ((NR_RX_DESC-2 + dev->rx_info.next_rx - dev->rx_info.next_empty) % NR_RX_DESC) +static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb) +{ + unsigned next_empty; + u32 cmdsts; + __le32 *sg; + dma_addr_t buf; + + next_empty = dev->rx_info.next_empty; + + /* don't overrun last rx marker */ + if (unlikely(nr_rx_empty(dev) <= 2)) { + kfree_skb(skb); + return 1; + } + +#if 0 + dprintk("next_empty[%d] nr_used[%d] next_rx[%d]\n", + dev->rx_info.next_empty, + dev->rx_info.nr_used, + dev->rx_info.next_rx + ); +#endif + + sg = dev->rx_info.descs + (next_empty * DESC_SIZE); + BUG_ON(NULL != dev->rx_info.skbs[next_empty]); + dev->rx_info.skbs[next_empty] = skb; + + dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC; + cmdsts = REAL_RX_BUF_SIZE | CMDSTS_INTR; + buf = pci_map_single(dev->pci_dev, skb->data, + REAL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + build_rx_desc(dev, sg, 0, buf, cmdsts, 0); + /* update link of previous rx */ + if (likely(next_empty != dev->rx_info.next_rx)) + dev->rx_info.descs[((NR_RX_DESC + next_empty - 1) % NR_RX_DESC) * DESC_SIZE] = cpu_to_le32(dev->rx_info.phy_descs + (next_empty * DESC_SIZE * 4)); + + return 0; +} + +static inline int rx_refill(struct net_device *ndev, gfp_t gfp) +{ + struct ns83820 *dev = PRIV(ndev); + unsigned i; + unsigned long flags = 0; + + if (unlikely(nr_rx_empty(dev) <= 2)) + return 0; + + dprintk("rx_refill(%p)\n", ndev); + if (gfp == GFP_ATOMIC) + spin_lock_irqsave(&dev->rx_info.lock, flags); + for (i=0; idata - PTR_ALIGN(skb->data, 16)); + if (gfp != GFP_ATOMIC) + spin_lock_irqsave(&dev->rx_info.lock, flags); + res = ns83820_add_rx_skb(dev, skb); + if (gfp != GFP_ATOMIC) + spin_unlock_irqrestore(&dev->rx_info.lock, flags); + if (res) { + i = 1; + break; + } + } + if (gfp == GFP_ATOMIC) + spin_unlock_irqrestore(&dev->rx_info.lock, flags); + + return i ? 0 : -ENOMEM; +} + +static void rx_refill_atomic(struct net_device *ndev) +{ + rx_refill(ndev, GFP_ATOMIC); +} + +/* REFILL */ +static inline void queue_refill(struct work_struct *work) +{ + struct ns83820 *dev = container_of(work, struct ns83820, tq_refill); + struct net_device *ndev = dev->ndev; + + rx_refill(ndev, GFP_KERNEL); + if (dev->rx_info.up) + kick_rx(ndev); +} + +static inline void clear_rx_desc(struct ns83820 *dev, unsigned i) +{ + build_rx_desc(dev, dev->rx_info.descs + (DESC_SIZE * i), 0, 0, CMDSTS_OWN, 0); +} + +static void phy_intr(struct net_device *ndev) +{ + struct ns83820 *dev = PRIV(ndev); + static const char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" }; + u32 cfg, new_cfg; + u32 tbisr, tanar, tanlpar; + int speed, fullduplex, newlinkstate; + + cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY; + + if (dev->CFG_cache & CFG_TBI_EN) { + /* we have an optical transceiver */ + tbisr = readl(dev->base + TBISR); + tanar = readl(dev->base + TANAR); + tanlpar = readl(dev->base + TANLPAR); + dprintk("phy_intr: tbisr=%08x, tanar=%08x, tanlpar=%08x\n", + tbisr, tanar, tanlpar); + + if ( (fullduplex = (tanlpar & TANAR_FULL_DUP) && + (tanar & TANAR_FULL_DUP)) ) { + + /* both of us are full duplex */ + writel(readl(dev->base + TXCFG) + | TXCFG_CSI | TXCFG_HBI | TXCFG_ATP, + dev->base + TXCFG); + writel(readl(dev->base + RXCFG) | RXCFG_RX_FD, + dev->base + RXCFG); + /* Light up full duplex LED */ + writel(readl(dev->base + GPIOR) | GPIOR_GP1_OUT, + dev->base + GPIOR); + + } else if (((tanlpar & TANAR_HALF_DUP) && + (tanar & TANAR_HALF_DUP)) || + ((tanlpar & TANAR_FULL_DUP) && + (tanar & TANAR_HALF_DUP)) || + ((tanlpar & TANAR_HALF_DUP) && + (tanar & TANAR_FULL_DUP))) { + + /* one or both of us are half duplex */ + writel((readl(dev->base + TXCFG) + & ~(TXCFG_CSI | TXCFG_HBI)) | TXCFG_ATP, + dev->base + TXCFG); + writel(readl(dev->base + RXCFG) & ~RXCFG_RX_FD, + dev->base + RXCFG); + /* Turn off full duplex LED */ + writel(readl(dev->base + GPIOR) & ~GPIOR_GP1_OUT, + dev->base + GPIOR); + } + + speed = 4; /* 1000F */ + + } else { + /* we have a copper transceiver */ + new_cfg = dev->CFG_cache & ~(CFG_SB | CFG_MODE_1000 | CFG_SPDSTS); + + if (cfg & CFG_SPDSTS1) + new_cfg |= CFG_MODE_1000; + else + new_cfg &= ~CFG_MODE_1000; + + speed = ((cfg / CFG_SPDSTS0) & 3); + fullduplex = (cfg & CFG_DUPSTS); + + if (fullduplex) { + new_cfg |= CFG_SB; + writel(readl(dev->base + TXCFG) + | TXCFG_CSI | TXCFG_HBI, + dev->base + TXCFG); + writel(readl(dev->base + RXCFG) | RXCFG_RX_FD, + dev->base + RXCFG); + } else { + writel(readl(dev->base + TXCFG) + & ~(TXCFG_CSI | TXCFG_HBI), + dev->base + TXCFG); + writel(readl(dev->base + RXCFG) & ~(RXCFG_RX_FD), + dev->base + RXCFG); + } + + if ((cfg & CFG_LNKSTS) && + ((new_cfg ^ dev->CFG_cache) != 0)) { + writel(new_cfg, dev->base + CFG); + dev->CFG_cache = new_cfg; + } + + dev->CFG_cache &= ~CFG_SPDSTS; + dev->CFG_cache |= cfg & CFG_SPDSTS; + } + + newlinkstate = (cfg & CFG_LNKSTS) ? LINK_UP : LINK_DOWN; + + if (newlinkstate & LINK_UP && + dev->linkstate != newlinkstate) { + netif_start_queue(ndev); + netif_wake_queue(ndev); + printk(KERN_INFO "%s: link now %s mbps, %s duplex and up.\n", + ndev->name, + speeds[speed], + fullduplex ? "full" : "half"); + } else if (newlinkstate & LINK_DOWN && + dev->linkstate != newlinkstate) { + netif_stop_queue(ndev); + printk(KERN_INFO "%s: link now down.\n", ndev->name); + } + + dev->linkstate = newlinkstate; +} + +static int ns83820_setup_rx(struct net_device *ndev) +{ + struct ns83820 *dev = PRIV(ndev); + unsigned i; + int ret; + + dprintk("ns83820_setup_rx(%p)\n", ndev); + + dev->rx_info.idle = 1; + dev->rx_info.next_rx = 0; + dev->rx_info.next_rx_desc = dev->rx_info.descs; + dev->rx_info.next_empty = 0; + + for (i=0; ibase + RXDP_HI); + writel(dev->rx_info.phy_descs, dev->base + RXDP); + + ret = rx_refill(ndev, GFP_KERNEL); + if (!ret) { + dprintk("starting receiver\n"); + /* prevent the interrupt handler from stomping on us */ + spin_lock_irq(&dev->rx_info.lock); + + writel(0x0001, dev->base + CCSR); + writel(0, dev->base + RFCR); + writel(0x7fc00000, dev->base + RFCR); + writel(0xffc00000, dev->base + RFCR); + + dev->rx_info.up = 1; + + phy_intr(ndev); + + /* Okay, let it rip */ + spin_lock(&dev->misc_lock); + dev->IMR_cache |= ISR_PHY; + dev->IMR_cache |= ISR_RXRCMP; + //dev->IMR_cache |= ISR_RXERR; + //dev->IMR_cache |= ISR_RXOK; + dev->IMR_cache |= ISR_RXORN; + dev->IMR_cache |= ISR_RXSOVR; + dev->IMR_cache |= ISR_RXDESC; + dev->IMR_cache |= ISR_RXIDLE; + dev->IMR_cache |= ISR_TXDESC; + dev->IMR_cache |= ISR_TXIDLE; + + writel(dev->IMR_cache, dev->base + IMR); + writel(1, dev->base + IER); + spin_unlock(&dev->misc_lock); + + kick_rx(ndev); + + spin_unlock_irq(&dev->rx_info.lock); + } + return ret; +} + +static void ns83820_cleanup_rx(struct ns83820 *dev) +{ + unsigned i; + unsigned long flags; + + dprintk("ns83820_cleanup_rx(%p)\n", dev); + + /* disable receive interrupts */ + spin_lock_irqsave(&dev->misc_lock, flags); + dev->IMR_cache &= ~(ISR_RXOK | ISR_RXDESC | ISR_RXERR | ISR_RXEARLY | ISR_RXIDLE); + writel(dev->IMR_cache, dev->base + IMR); + spin_unlock_irqrestore(&dev->misc_lock, flags); + + /* synchronize with the interrupt handler and kill it */ + dev->rx_info.up = 0; + synchronize_irq(dev->pci_dev->irq); + + /* touch the pci bus... */ + readl(dev->base + IMR); + + /* assumes the transmitter is already disabled and reset */ + writel(0, dev->base + RXDP_HI); + writel(0, dev->base + RXDP); + + for (i=0; irx_info.skbs[i]; + dev->rx_info.skbs[i] = NULL; + clear_rx_desc(dev, i); + kfree_skb(skb); + } +} + +static void ns83820_rx_kick(struct net_device *ndev) +{ + struct ns83820 *dev = PRIV(ndev); + /*if (nr_rx_empty(dev) >= NR_RX_DESC/4)*/ { + if (dev->rx_info.up) { + rx_refill_atomic(ndev); + kick_rx(ndev); + } + } + + if (dev->rx_info.up && nr_rx_empty(dev) > NR_RX_DESC*3/4) + schedule_work(&dev->tq_refill); + else + kick_rx(ndev); + if (dev->rx_info.idle) + printk(KERN_DEBUG "%s: BAD\n", ndev->name); +} + +/* rx_irq + * + */ +static void rx_irq(struct net_device *ndev) +{ + struct ns83820 *dev = PRIV(ndev); + struct rx_info *info = &dev->rx_info; + unsigned next_rx; + int rx_rc, len; + u32 cmdsts; + __le32 *desc; + unsigned long flags; + int nr = 0; + + dprintk("rx_irq(%p)\n", ndev); + dprintk("rxdp: %08x, descs: %08lx next_rx[%d]: %p next_empty[%d]: %p\n", + readl(dev->base + RXDP), + (long)(dev->rx_info.phy_descs), + (int)dev->rx_info.next_rx, + (dev->rx_info.descs + (DESC_SIZE * dev->rx_info.next_rx)), + (int)dev->rx_info.next_empty, + (dev->rx_info.descs + (DESC_SIZE * dev->rx_info.next_empty)) + ); + + spin_lock_irqsave(&info->lock, flags); + if (!info->up) + goto out; + + dprintk("walking descs\n"); + next_rx = info->next_rx; + desc = info->next_rx_desc; + while ((CMDSTS_OWN & (cmdsts = le32_to_cpu(desc[DESC_CMDSTS]))) && + (cmdsts != CMDSTS_OWN)) { + struct sk_buff *skb; + u32 extsts = le32_to_cpu(desc[DESC_EXTSTS]); + dma_addr_t bufptr = desc_addr_get(desc + DESC_BUFPTR); + + dprintk("cmdsts: %08x\n", cmdsts); + dprintk("link: %08x\n", cpu_to_le32(desc[DESC_LINK])); + dprintk("extsts: %08x\n", extsts); + + skb = info->skbs[next_rx]; + info->skbs[next_rx] = NULL; + info->next_rx = (next_rx + 1) % NR_RX_DESC; + + mb(); + clear_rx_desc(dev, next_rx); + + pci_unmap_single(dev->pci_dev, bufptr, + RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + len = cmdsts & CMDSTS_LEN_MASK; +#ifdef NS83820_VLAN_ACCEL_SUPPORT + /* NH: As was mentioned below, this chip is kinda + * brain dead about vlan tag stripping. Frames + * that are 64 bytes with a vlan header appended + * like arp frames, or pings, are flagged as Runts + * when the tag is stripped and hardware. This + * also means that the OK bit in the descriptor + * is cleared when the frame comes in so we have + * to do a specific length check here to make sure + * the frame would have been ok, had we not stripped + * the tag. + */ + if (likely((CMDSTS_OK & cmdsts) || + ((cmdsts & CMDSTS_RUNT) && len >= 56))) { +#else + if (likely(CMDSTS_OK & cmdsts)) { +#endif + skb_put(skb, len); + if (unlikely(!skb)) + goto netdev_mangle_me_harder_failed; + if (cmdsts & CMDSTS_DEST_MULTI) + ndev->stats.multicast++; + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += len; + if ((extsts & 0x002a0000) && !(extsts & 0x00540000)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + skb_checksum_none_assert(skb); + } + skb->protocol = eth_type_trans(skb, ndev); +#ifdef NS83820_VLAN_ACCEL_SUPPORT + if(extsts & EXTSTS_VPKT) { + unsigned short tag; + + tag = ntohs(extsts & EXTSTS_VTG_MASK); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag); + } +#endif + rx_rc = netif_rx(skb); + if (NET_RX_DROP == rx_rc) { +netdev_mangle_me_harder_failed: + ndev->stats.rx_dropped++; + } + } else { + kfree_skb(skb); + } + + nr++; + next_rx = info->next_rx; + desc = info->descs + (DESC_SIZE * next_rx); + } + info->next_rx = next_rx; + info->next_rx_desc = info->descs + (DESC_SIZE * next_rx); + +out: + if (0 && !nr) { + Dprintk("dazed: cmdsts_f: %08x\n", cmdsts); + } + + spin_unlock_irqrestore(&info->lock, flags); +} + +static void rx_action(unsigned long _dev) +{ + struct net_device *ndev = (void *)_dev; + struct ns83820 *dev = PRIV(ndev); + rx_irq(ndev); + writel(ihr, dev->base + IHR); + + spin_lock_irq(&dev->misc_lock); + dev->IMR_cache |= ISR_RXDESC; + writel(dev->IMR_cache, dev->base + IMR); + spin_unlock_irq(&dev->misc_lock); + + rx_irq(ndev); + ns83820_rx_kick(ndev); +} + +/* Packet Transmit code + */ +static inline void kick_tx(struct ns83820 *dev) +{ + dprintk("kick_tx(%p): tx_idx=%d free_idx=%d\n", + dev, dev->tx_idx, dev->tx_free_idx); + writel(CR_TXE, dev->base + CR); +} + +/* No spinlock needed on the transmit irq path as the interrupt handler is + * serialized. + */ +static void do_tx_done(struct net_device *ndev) +{ + struct ns83820 *dev = PRIV(ndev); + u32 cmdsts, tx_done_idx; + __le32 *desc; + + dprintk("do_tx_done(%p)\n", ndev); + tx_done_idx = dev->tx_done_idx; + desc = dev->tx_descs + (tx_done_idx * DESC_SIZE); + + dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n", + tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS])); + while ((tx_done_idx != dev->tx_free_idx) && + !(CMDSTS_OWN & (cmdsts = le32_to_cpu(desc[DESC_CMDSTS]))) ) { + struct sk_buff *skb; + unsigned len; + dma_addr_t addr; + + if (cmdsts & CMDSTS_ERR) + ndev->stats.tx_errors++; + if (cmdsts & CMDSTS_OK) + ndev->stats.tx_packets++; + if (cmdsts & CMDSTS_OK) + ndev->stats.tx_bytes += cmdsts & 0xffff; + + dprintk("tx_done_idx=%d free_idx=%d cmdsts=%08x\n", + tx_done_idx, dev->tx_free_idx, cmdsts); + skb = dev->tx_skbs[tx_done_idx]; + dev->tx_skbs[tx_done_idx] = NULL; + dprintk("done(%p)\n", skb); + + len = cmdsts & CMDSTS_LEN_MASK; + addr = desc_addr_get(desc + DESC_BUFPTR); + if (skb) { + pci_unmap_single(dev->pci_dev, + addr, + len, + PCI_DMA_TODEVICE); + dev_kfree_skb_irq(skb); + atomic_dec(&dev->nr_tx_skbs); + } else + pci_unmap_page(dev->pci_dev, + addr, + len, + PCI_DMA_TODEVICE); + + tx_done_idx = (tx_done_idx + 1) % NR_TX_DESC; + dev->tx_done_idx = tx_done_idx; + desc[DESC_CMDSTS] = cpu_to_le32(0); + mb(); + desc = dev->tx_descs + (tx_done_idx * DESC_SIZE); + } + + /* Allow network stack to resume queueing packets after we've + * finished transmitting at least 1/4 of the packets in the queue. + */ + if (netif_queue_stopped(ndev) && start_tx_okay(dev)) { + dprintk("start_queue(%p)\n", ndev); + netif_start_queue(ndev); + netif_wake_queue(ndev); + } +} + +static void ns83820_cleanup_tx(struct ns83820 *dev) +{ + unsigned i; + + for (i=0; itx_skbs[i]; + dev->tx_skbs[i] = NULL; + if (skb) { + __le32 *desc = dev->tx_descs + (i * DESC_SIZE); + pci_unmap_single(dev->pci_dev, + desc_addr_get(desc + DESC_BUFPTR), + le32_to_cpu(desc[DESC_CMDSTS]) & CMDSTS_LEN_MASK, + PCI_DMA_TODEVICE); + dev_kfree_skb_irq(skb); + atomic_dec(&dev->nr_tx_skbs); + } + } + + memset(dev->tx_descs, 0, NR_TX_DESC * DESC_SIZE * 4); +} + +/* transmit routine. This code relies on the network layer serializing + * its calls in, but will run happily in parallel with the interrupt + * handler. This code currently has provisions for fragmenting tx buffers + * while trying to track down a bug in either the zero copy code or + * the tx fifo (hence the MAX_FRAG_LEN). + */ +static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct ns83820 *dev = PRIV(ndev); + u32 free_idx, cmdsts, extsts; + int nr_free, nr_frags; + unsigned tx_done_idx, last_idx; + dma_addr_t buf; + unsigned len; + skb_frag_t *frag; + int stopped = 0; + int do_intr = 0; + volatile __le32 *first_desc; + + dprintk("ns83820_hard_start_xmit\n"); + + nr_frags = skb_shinfo(skb)->nr_frags; +again: + if (unlikely(dev->CFG_cache & CFG_LNKSTS)) { + netif_stop_queue(ndev); + if (unlikely(dev->CFG_cache & CFG_LNKSTS)) + return NETDEV_TX_BUSY; + netif_start_queue(ndev); + } + + last_idx = free_idx = dev->tx_free_idx; + tx_done_idx = dev->tx_done_idx; + nr_free = (tx_done_idx + NR_TX_DESC-2 - free_idx) % NR_TX_DESC; + nr_free -= 1; + if (nr_free <= nr_frags) { + dprintk("stop_queue - not enough(%p)\n", ndev); + netif_stop_queue(ndev); + + /* Check again: we may have raced with a tx done irq */ + if (dev->tx_done_idx != tx_done_idx) { + dprintk("restart queue(%p)\n", ndev); + netif_start_queue(ndev); + goto again; + } + return NETDEV_TX_BUSY; + } + + if (free_idx == dev->tx_intr_idx) { + do_intr = 1; + dev->tx_intr_idx = (dev->tx_intr_idx + NR_TX_DESC/4) % NR_TX_DESC; + } + + nr_free -= nr_frags; + if (nr_free < MIN_TX_DESC_FREE) { + dprintk("stop_queue - last entry(%p)\n", ndev); + netif_stop_queue(ndev); + stopped = 1; + } + + frag = skb_shinfo(skb)->frags; + if (!nr_frags) + frag = NULL; + extsts = 0; + if (skb->ip_summed == CHECKSUM_PARTIAL) { + extsts |= EXTSTS_IPPKT; + if (IPPROTO_TCP == ip_hdr(skb)->protocol) + extsts |= EXTSTS_TCPPKT; + else if (IPPROTO_UDP == ip_hdr(skb)->protocol) + extsts |= EXTSTS_UDPPKT; + } + +#ifdef NS83820_VLAN_ACCEL_SUPPORT + if (skb_vlan_tag_present(skb)) { + /* fetch the vlan tag info out of the + * ancillary data if the vlan code + * is using hw vlan acceleration + */ + short tag = skb_vlan_tag_get(skb); + extsts |= (EXTSTS_VPKT | htons(tag)); + } +#endif + + len = skb->len; + if (nr_frags) + len -= skb->data_len; + buf = pci_map_single(dev->pci_dev, skb->data, len, PCI_DMA_TODEVICE); + + first_desc = dev->tx_descs + (free_idx * DESC_SIZE); + + for (;;) { + volatile __le32 *desc = dev->tx_descs + (free_idx * DESC_SIZE); + + dprintk("frag[%3u]: %4u @ 0x%08Lx\n", free_idx, len, + (unsigned long long)buf); + last_idx = free_idx; + free_idx = (free_idx + 1) % NR_TX_DESC; + desc[DESC_LINK] = cpu_to_le32(dev->tx_phy_descs + (free_idx * DESC_SIZE * 4)); + desc_addr_set(desc + DESC_BUFPTR, buf); + desc[DESC_EXTSTS] = cpu_to_le32(extsts); + + cmdsts = ((nr_frags) ? CMDSTS_MORE : do_intr ? CMDSTS_INTR : 0); + cmdsts |= (desc == first_desc) ? 0 : CMDSTS_OWN; + cmdsts |= len; + desc[DESC_CMDSTS] = cpu_to_le32(cmdsts); + + if (!nr_frags) + break; + + buf = skb_frag_dma_map(&dev->pci_dev->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + dprintk("frag: buf=%08Lx page=%08lx offset=%08lx\n", + (long long)buf, (long) page_to_pfn(frag->page), + frag->page_offset); + len = skb_frag_size(frag); + frag++; + nr_frags--; + } + dprintk("done pkt\n"); + + spin_lock_irq(&dev->tx_lock); + dev->tx_skbs[last_idx] = skb; + first_desc[DESC_CMDSTS] |= cpu_to_le32(CMDSTS_OWN); + dev->tx_free_idx = free_idx; + atomic_inc(&dev->nr_tx_skbs); + spin_unlock_irq(&dev->tx_lock); + + kick_tx(dev); + + /* Check again: we may have raced with a tx done irq */ + if (stopped && (dev->tx_done_idx != tx_done_idx) && start_tx_okay(dev)) + netif_start_queue(ndev); + + return NETDEV_TX_OK; +} + +static void ns83820_update_stats(struct ns83820 *dev) +{ + struct net_device *ndev = dev->ndev; + u8 __iomem *base = dev->base; + + /* the DP83820 will freeze counters, so we need to read all of them */ + ndev->stats.rx_errors += readl(base + 0x60) & 0xffff; + ndev->stats.rx_crc_errors += readl(base + 0x64) & 0xffff; + ndev->stats.rx_missed_errors += readl(base + 0x68) & 0xffff; + ndev->stats.rx_frame_errors += readl(base + 0x6c) & 0xffff; + /*ndev->stats.rx_symbol_errors +=*/ readl(base + 0x70); + ndev->stats.rx_length_errors += readl(base + 0x74) & 0xffff; + ndev->stats.rx_length_errors += readl(base + 0x78) & 0xffff; + /*ndev->stats.rx_badopcode_errors += */ readl(base + 0x7c); + /*ndev->stats.rx_pause_count += */ readl(base + 0x80); + /*ndev->stats.tx_pause_count += */ readl(base + 0x84); + ndev->stats.tx_carrier_errors += readl(base + 0x88) & 0xff; +} + +static struct net_device_stats *ns83820_get_stats(struct net_device *ndev) +{ + struct ns83820 *dev = PRIV(ndev); + + /* somewhat overkill */ + spin_lock_irq(&dev->misc_lock); + ns83820_update_stats(dev); + spin_unlock_irq(&dev->misc_lock); + + return &ndev->stats; +} + +/* Let ethtool retrieve info */ +static int ns83820_get_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct ns83820 *dev = PRIV(ndev); + u32 cfg, tanar, tbicr; + int fullduplex = 0; + + /* + * Here's the list of available ethtool commands from other drivers: + * cmd->advertising = + * ethtool_cmd_speed_set(cmd, ...) + * cmd->duplex = + * cmd->port = 0; + * cmd->phy_address = + * cmd->transceiver = 0; + * cmd->autoneg = + * cmd->maxtxpkt = 0; + * cmd->maxrxpkt = 0; + */ + + /* read current configuration */ + cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY; + tanar = readl(dev->base + TANAR); + tbicr = readl(dev->base + TBICR); + + fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0; + + cmd->supported = SUPPORTED_Autoneg; + + if (dev->CFG_cache & CFG_TBI_EN) { + /* we have optical interface */ + cmd->supported |= SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE; + cmd->port = PORT_FIBRE; + } else { + /* we have copper */ + cmd->supported |= SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full | + SUPPORTED_MII; + cmd->port = PORT_MII; + } + + cmd->duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF; + switch (cfg / CFG_SPDSTS0 & 3) { + case 2: + ethtool_cmd_speed_set(cmd, SPEED_1000); + break; + case 1: + ethtool_cmd_speed_set(cmd, SPEED_100); + break; + default: + ethtool_cmd_speed_set(cmd, SPEED_10); + break; + } + cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE) + ? AUTONEG_ENABLE : AUTONEG_DISABLE; + return 0; +} + +/* Let ethool change settings*/ +static int ns83820_set_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct ns83820 *dev = PRIV(ndev); + u32 cfg, tanar; + int have_optical = 0; + int fullduplex = 0; + + /* read current configuration */ + cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY; + tanar = readl(dev->base + TANAR); + + if (dev->CFG_cache & CFG_TBI_EN) { + /* we have optical */ + have_optical = 1; + fullduplex = (tanar & TANAR_FULL_DUP); + + } else { + /* we have copper */ + fullduplex = cfg & CFG_DUPSTS; + } + + spin_lock_irq(&dev->misc_lock); + spin_lock(&dev->tx_lock); + + /* Set duplex */ + if (cmd->duplex != fullduplex) { + if (have_optical) { + /*set full duplex*/ + if (cmd->duplex == DUPLEX_FULL) { + /* force full duplex */ + writel(readl(dev->base + TXCFG) + | TXCFG_CSI | TXCFG_HBI | TXCFG_ATP, + dev->base + TXCFG); + writel(readl(dev->base + RXCFG) | RXCFG_RX_FD, + dev->base + RXCFG); + /* Light up full duplex LED */ + writel(readl(dev->base + GPIOR) | GPIOR_GP1_OUT, + dev->base + GPIOR); + } else { + /*TODO: set half duplex */ + } + + } else { + /*we have copper*/ + /* TODO: Set duplex for copper cards */ + } + printk(KERN_INFO "%s: Duplex set via ethtool\n", + ndev->name); + } + + /* Set autonegotiation */ + if (1) { + if (cmd->autoneg == AUTONEG_ENABLE) { + /* restart auto negotiation */ + writel(TBICR_MR_AN_ENABLE | TBICR_MR_RESTART_AN, + dev->base + TBICR); + writel(TBICR_MR_AN_ENABLE, dev->base + TBICR); + dev->linkstate = LINK_AUTONEGOTIATE; + + printk(KERN_INFO "%s: autoneg enabled via ethtool\n", + ndev->name); + } else { + /* disable auto negotiation */ + writel(0x00000000, dev->base + TBICR); + } + + printk(KERN_INFO "%s: autoneg %s via ethtool\n", ndev->name, + cmd->autoneg ? "ENABLED" : "DISABLED"); + } + + phy_intr(ndev); + spin_unlock(&dev->tx_lock); + spin_unlock_irq(&dev->misc_lock); + + return 0; +} +/* end ethtool get/set support -df */ + +static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) +{ + struct ns83820 *dev = PRIV(ndev); + strlcpy(info->driver, "ns83820", sizeof(info->driver)); + strlcpy(info->version, VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(dev->pci_dev), sizeof(info->bus_info)); +} + +static u32 ns83820_get_link(struct net_device *ndev) +{ + struct ns83820 *dev = PRIV(ndev); + u32 cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY; + return cfg & CFG_LNKSTS ? 1 : 0; +} + +static const struct ethtool_ops ops = { + .get_settings = ns83820_get_settings, + .set_settings = ns83820_set_settings, + .get_drvinfo = ns83820_get_drvinfo, + .get_link = ns83820_get_link +}; + +static inline void ns83820_disable_interrupts(struct ns83820 *dev) +{ + writel(0, dev->base + IMR); + writel(0, dev->base + IER); + readl(dev->base + IER); +} + +/* this function is called in irq context from the ISR */ +static void ns83820_mib_isr(struct ns83820 *dev) +{ + unsigned long flags; + spin_lock_irqsave(&dev->misc_lock, flags); + ns83820_update_stats(dev); + spin_unlock_irqrestore(&dev->misc_lock, flags); +} + +static void ns83820_do_isr(struct net_device *ndev, u32 isr); +static irqreturn_t ns83820_irq(int foo, void *data) +{ + struct net_device *ndev = data; + struct ns83820 *dev = PRIV(ndev); + u32 isr; + dprintk("ns83820_irq(%p)\n", ndev); + + dev->ihr = 0; + + isr = readl(dev->base + ISR); + dprintk("irq: %08x\n", isr); + ns83820_do_isr(ndev, isr); + return IRQ_HANDLED; +} + +static void ns83820_do_isr(struct net_device *ndev, u32 isr) +{ + struct ns83820 *dev = PRIV(ndev); + unsigned long flags; + +#ifdef DEBUG + if (isr & ~(ISR_PHY | ISR_RXDESC | ISR_RXEARLY | ISR_RXOK | ISR_RXERR | ISR_TXIDLE | ISR_TXOK | ISR_TXDESC)) + Dprintk("odd isr? 0x%08x\n", isr); +#endif + + if (ISR_RXIDLE & isr) { + dev->rx_info.idle = 1; + Dprintk("oh dear, we are idle\n"); + ns83820_rx_kick(ndev); + } + + if ((ISR_RXDESC | ISR_RXOK) & isr) { + prefetch(dev->rx_info.next_rx_desc); + + spin_lock_irqsave(&dev->misc_lock, flags); + dev->IMR_cache &= ~(ISR_RXDESC | ISR_RXOK); + writel(dev->IMR_cache, dev->base + IMR); + spin_unlock_irqrestore(&dev->misc_lock, flags); + + tasklet_schedule(&dev->rx_tasklet); + //rx_irq(ndev); + //writel(4, dev->base + IHR); + } + + if ((ISR_RXIDLE | ISR_RXORN | ISR_RXDESC | ISR_RXOK | ISR_RXERR) & isr) + ns83820_rx_kick(ndev); + + if (unlikely(ISR_RXSOVR & isr)) { + //printk("overrun: rxsovr\n"); + ndev->stats.rx_fifo_errors++; + } + + if (unlikely(ISR_RXORN & isr)) { + //printk("overrun: rxorn\n"); + ndev->stats.rx_fifo_errors++; + } + + if ((ISR_RXRCMP & isr) && dev->rx_info.up) + writel(CR_RXE, dev->base + CR); + + if (ISR_TXIDLE & isr) { + u32 txdp; + txdp = readl(dev->base + TXDP); + dprintk("txdp: %08x\n", txdp); + txdp -= dev->tx_phy_descs; + dev->tx_idx = txdp / (DESC_SIZE * 4); + if (dev->tx_idx >= NR_TX_DESC) { + printk(KERN_ALERT "%s: BUG -- txdp out of range\n", ndev->name); + dev->tx_idx = 0; + } + /* The may have been a race between a pci originated read + * and the descriptor update from the cpu. Just in case, + * kick the transmitter if the hardware thinks it is on a + * different descriptor than we are. + */ + if (dev->tx_idx != dev->tx_free_idx) + kick_tx(dev); + } + + /* Defer tx ring processing until more than a minimum amount of + * work has accumulated + */ + if ((ISR_TXDESC | ISR_TXIDLE | ISR_TXOK | ISR_TXERR) & isr) { + spin_lock_irqsave(&dev->tx_lock, flags); + do_tx_done(ndev); + spin_unlock_irqrestore(&dev->tx_lock, flags); + + /* Disable TxOk if there are no outstanding tx packets. + */ + if ((dev->tx_done_idx == dev->tx_free_idx) && + (dev->IMR_cache & ISR_TXOK)) { + spin_lock_irqsave(&dev->misc_lock, flags); + dev->IMR_cache &= ~ISR_TXOK; + writel(dev->IMR_cache, dev->base + IMR); + spin_unlock_irqrestore(&dev->misc_lock, flags); + } + } + + /* The TxIdle interrupt can come in before the transmit has + * completed. Normally we reap packets off of the combination + * of TxDesc and TxIdle and leave TxOk disabled (since it + * occurs on every packet), but when no further irqs of this + * nature are expected, we must enable TxOk. + */ + if ((ISR_TXIDLE & isr) && (dev->tx_done_idx != dev->tx_free_idx)) { + spin_lock_irqsave(&dev->misc_lock, flags); + dev->IMR_cache |= ISR_TXOK; + writel(dev->IMR_cache, dev->base + IMR); + spin_unlock_irqrestore(&dev->misc_lock, flags); + } + + /* MIB interrupt: one of the statistics counters is about to overflow */ + if (unlikely(ISR_MIB & isr)) + ns83820_mib_isr(dev); + + /* PHY: Link up/down/negotiation state change */ + if (unlikely(ISR_PHY & isr)) + phy_intr(ndev); + +#if 0 /* Still working on the interrupt mitigation strategy */ + if (dev->ihr) + writel(dev->ihr, dev->base + IHR); +#endif +} + +static void ns83820_do_reset(struct ns83820 *dev, u32 which) +{ + Dprintk("resetting chip...\n"); + writel(which, dev->base + CR); + do { + schedule(); + } while (readl(dev->base + CR) & which); + Dprintk("okay!\n"); +} + +static int ns83820_stop(struct net_device *ndev) +{ + struct ns83820 *dev = PRIV(ndev); + + /* FIXME: protect against interrupt handler? */ + del_timer_sync(&dev->tx_watchdog); + + ns83820_disable_interrupts(dev); + + dev->rx_info.up = 0; + synchronize_irq(dev->pci_dev->irq); + + ns83820_do_reset(dev, CR_RST); + + synchronize_irq(dev->pci_dev->irq); + + spin_lock_irq(&dev->misc_lock); + dev->IMR_cache &= ~(ISR_TXURN | ISR_TXIDLE | ISR_TXERR | ISR_TXDESC | ISR_TXOK); + spin_unlock_irq(&dev->misc_lock); + + ns83820_cleanup_rx(dev); + ns83820_cleanup_tx(dev); + + return 0; +} + +static void ns83820_tx_timeout(struct net_device *ndev) +{ + struct ns83820 *dev = PRIV(ndev); + u32 tx_done_idx; + __le32 *desc; + unsigned long flags; + + spin_lock_irqsave(&dev->tx_lock, flags); + + tx_done_idx = dev->tx_done_idx; + desc = dev->tx_descs + (tx_done_idx * DESC_SIZE); + + printk(KERN_INFO "%s: tx_timeout: tx_done_idx=%d free_idx=%d cmdsts=%08x\n", + ndev->name, + tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS])); + +#if defined(DEBUG) + { + u32 isr; + isr = readl(dev->base + ISR); + printk("irq: %08x imr: %08x\n", isr, dev->IMR_cache); + ns83820_do_isr(ndev, isr); + } +#endif + + do_tx_done(ndev); + + tx_done_idx = dev->tx_done_idx; + desc = dev->tx_descs + (tx_done_idx * DESC_SIZE); + + printk(KERN_INFO "%s: after: tx_done_idx=%d free_idx=%d cmdsts=%08x\n", + ndev->name, + tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS])); + + spin_unlock_irqrestore(&dev->tx_lock, flags); +} + +static void ns83820_tx_watch(unsigned long data) +{ + struct net_device *ndev = (void *)data; + struct ns83820 *dev = PRIV(ndev); + +#if defined(DEBUG) + printk("ns83820_tx_watch: %u %u %d\n", + dev->tx_done_idx, dev->tx_free_idx, atomic_read(&dev->nr_tx_skbs) + ); +#endif + + if (time_after(jiffies, dev_trans_start(ndev) + 1*HZ) && + dev->tx_done_idx != dev->tx_free_idx) { + printk(KERN_DEBUG "%s: ns83820_tx_watch: %u %u %d\n", + ndev->name, + dev->tx_done_idx, dev->tx_free_idx, + atomic_read(&dev->nr_tx_skbs)); + ns83820_tx_timeout(ndev); + } + + mod_timer(&dev->tx_watchdog, jiffies + 2*HZ); +} + +static int ns83820_open(struct net_device *ndev) +{ + struct ns83820 *dev = PRIV(ndev); + unsigned i; + u32 desc; + int ret; + + dprintk("ns83820_open\n"); + + writel(0, dev->base + PQCR); + + ret = ns83820_setup_rx(ndev); + if (ret) + goto failed; + + memset(dev->tx_descs, 0, 4 * NR_TX_DESC * DESC_SIZE); + for (i=0; itx_descs[(i * DESC_SIZE) + DESC_LINK] + = cpu_to_le32( + dev->tx_phy_descs + + ((i+1) % NR_TX_DESC) * DESC_SIZE * 4); + } + + dev->tx_idx = 0; + dev->tx_done_idx = 0; + desc = dev->tx_phy_descs; + writel(0, dev->base + TXDP_HI); + writel(desc, dev->base + TXDP); + + init_timer(&dev->tx_watchdog); + dev->tx_watchdog.data = (unsigned long)ndev; + dev->tx_watchdog.function = ns83820_tx_watch; + mod_timer(&dev->tx_watchdog, jiffies + 2*HZ); + + netif_start_queue(ndev); /* FIXME: wait for phy to come up */ + + return 0; + +failed: + ns83820_stop(ndev); + return ret; +} + +static void ns83820_getmac(struct ns83820 *dev, u8 *mac) +{ + unsigned i; + for (i=0; i<3; i++) { + u32 data; + + /* Read from the perfect match memory: this is loaded by + * the chip from the EEPROM via the EELOAD self test. + */ + writel(i*2, dev->base + RFCR); + data = readl(dev->base + RFDR); + + *mac++ = data; + *mac++ = data >> 8; + } +} + +static int ns83820_change_mtu(struct net_device *ndev, int new_mtu) +{ + if (new_mtu > RX_BUF_SIZE) + return -EINVAL; + ndev->mtu = new_mtu; + return 0; +} + +static void ns83820_set_multicast(struct net_device *ndev) +{ + struct ns83820 *dev = PRIV(ndev); + u8 __iomem *rfcr = dev->base + RFCR; + u32 and_mask = 0xffffffff; + u32 or_mask = 0; + u32 val; + + if (ndev->flags & IFF_PROMISC) + or_mask |= RFCR_AAU | RFCR_AAM; + else + and_mask &= ~(RFCR_AAU | RFCR_AAM); + + if (ndev->flags & IFF_ALLMULTI || netdev_mc_count(ndev)) + or_mask |= RFCR_AAM; + else + and_mask &= ~RFCR_AAM; + + spin_lock_irq(&dev->misc_lock); + val = (readl(rfcr) & and_mask) | or_mask; + /* Ramit : RFCR Write Fix doc says RFEN must be 0 modify other bits */ + writel(val & ~RFCR_RFEN, rfcr); + writel(val, rfcr); + spin_unlock_irq(&dev->misc_lock); +} + +static void ns83820_run_bist(struct net_device *ndev, const char *name, u32 enable, u32 done, u32 fail) +{ + struct ns83820 *dev = PRIV(ndev); + int timed_out = 0; + unsigned long start; + u32 status; + int loops = 0; + + dprintk("%s: start %s\n", ndev->name, name); + + start = jiffies; + + writel(enable, dev->base + PTSCR); + for (;;) { + loops++; + status = readl(dev->base + PTSCR); + if (!(status & enable)) + break; + if (status & done) + break; + if (status & fail) + break; + if (time_after_eq(jiffies, start + HZ)) { + timed_out = 1; + break; + } + schedule_timeout_uninterruptible(1); + } + + if (status & fail) + printk(KERN_INFO "%s: %s failed! (0x%08x & 0x%08x)\n", + ndev->name, name, status, fail); + else if (timed_out) + printk(KERN_INFO "%s: run_bist %s timed out! (%08x)\n", + ndev->name, name, status); + + dprintk("%s: done %s in %d loops\n", ndev->name, name, loops); +} + +#ifdef PHY_CODE_IS_FINISHED +static void ns83820_mii_write_bit(struct ns83820 *dev, int bit) +{ + /* drive MDC low */ + dev->MEAR_cache &= ~MEAR_MDC; + writel(dev->MEAR_cache, dev->base + MEAR); + readl(dev->base + MEAR); + + /* enable output, set bit */ + dev->MEAR_cache |= MEAR_MDDIR; + if (bit) + dev->MEAR_cache |= MEAR_MDIO; + else + dev->MEAR_cache &= ~MEAR_MDIO; + + /* set the output bit */ + writel(dev->MEAR_cache, dev->base + MEAR); + readl(dev->base + MEAR); + + /* Wait. Max clock rate is 2.5MHz, this way we come in under 1MHz */ + udelay(1); + + /* drive MDC high causing the data bit to be latched */ + dev->MEAR_cache |= MEAR_MDC; + writel(dev->MEAR_cache, dev->base + MEAR); + readl(dev->base + MEAR); + + /* Wait again... */ + udelay(1); +} + +static int ns83820_mii_read_bit(struct ns83820 *dev) +{ + int bit; + + /* drive MDC low, disable output */ + dev->MEAR_cache &= ~MEAR_MDC; + dev->MEAR_cache &= ~MEAR_MDDIR; + writel(dev->MEAR_cache, dev->base + MEAR); + readl(dev->base + MEAR); + + /* Wait. Max clock rate is 2.5MHz, this way we come in under 1MHz */ + udelay(1); + + /* drive MDC high causing the data bit to be latched */ + bit = (readl(dev->base + MEAR) & MEAR_MDIO) ? 1 : 0; + dev->MEAR_cache |= MEAR_MDC; + writel(dev->MEAR_cache, dev->base + MEAR); + + /* Wait again... */ + udelay(1); + + return bit; +} + +static unsigned ns83820_mii_read_reg(struct ns83820 *dev, unsigned phy, unsigned reg) +{ + unsigned data = 0; + int i; + + /* read some garbage so that we eventually sync up */ + for (i=0; i<64; i++) + ns83820_mii_read_bit(dev); + + ns83820_mii_write_bit(dev, 0); /* start */ + ns83820_mii_write_bit(dev, 1); + ns83820_mii_write_bit(dev, 1); /* opcode read */ + ns83820_mii_write_bit(dev, 0); + + /* write out the phy address: 5 bits, msb first */ + for (i=0; i<5; i++) + ns83820_mii_write_bit(dev, phy & (0x10 >> i)); + + /* write out the register address, 5 bits, msb first */ + for (i=0; i<5; i++) + ns83820_mii_write_bit(dev, reg & (0x10 >> i)); + + ns83820_mii_read_bit(dev); /* turn around cycles */ + ns83820_mii_read_bit(dev); + + /* read in the register data, 16 bits msb first */ + for (i=0; i<16; i++) { + data <<= 1; + data |= ns83820_mii_read_bit(dev); + } + + return data; +} + +static unsigned ns83820_mii_write_reg(struct ns83820 *dev, unsigned phy, unsigned reg, unsigned data) +{ + int i; + + /* read some garbage so that we eventually sync up */ + for (i=0; i<64; i++) + ns83820_mii_read_bit(dev); + + ns83820_mii_write_bit(dev, 0); /* start */ + ns83820_mii_write_bit(dev, 1); + ns83820_mii_write_bit(dev, 0); /* opcode read */ + ns83820_mii_write_bit(dev, 1); + + /* write out the phy address: 5 bits, msb first */ + for (i=0; i<5; i++) + ns83820_mii_write_bit(dev, phy & (0x10 >> i)); + + /* write out the register address, 5 bits, msb first */ + for (i=0; i<5; i++) + ns83820_mii_write_bit(dev, reg & (0x10 >> i)); + + ns83820_mii_read_bit(dev); /* turn around cycles */ + ns83820_mii_read_bit(dev); + + /* read in the register data, 16 bits msb first */ + for (i=0; i<16; i++) + ns83820_mii_write_bit(dev, (data >> (15 - i)) & 1); + + return data; +} + +static void ns83820_probe_phy(struct net_device *ndev) +{ + struct ns83820 *dev = PRIV(ndev); + static int first; + int i; +#define MII_PHYIDR1 0x02 +#define MII_PHYIDR2 0x03 + +#if 0 + if (!first) { + unsigned tmp; + ns83820_mii_read_reg(dev, 1, 0x09); + ns83820_mii_write_reg(dev, 1, 0x10, 0x0d3e); + + tmp = ns83820_mii_read_reg(dev, 1, 0x00); + ns83820_mii_write_reg(dev, 1, 0x00, tmp | 0x8000); + udelay(1300); + ns83820_mii_read_reg(dev, 1, 0x09); + } +#endif + first = 1; + + for (i=1; i<2; i++) { + int j; + unsigned a, b; + a = ns83820_mii_read_reg(dev, i, MII_PHYIDR1); + b = ns83820_mii_read_reg(dev, i, MII_PHYIDR2); + + //printk("%s: phy %d: 0x%04x 0x%04x\n", + // ndev->name, i, a, b); + + for (j=0; j<0x16; j+=4) { + dprintk("%s: [0x%02x] %04x %04x %04x %04x\n", + ndev->name, j, + ns83820_mii_read_reg(dev, i, 0 + j), + ns83820_mii_read_reg(dev, i, 1 + j), + ns83820_mii_read_reg(dev, i, 2 + j), + ns83820_mii_read_reg(dev, i, 3 + j) + ); + } + } + { + unsigned a, b; + /* read firmware version: memory addr is 0x8402 and 0x8403 */ + ns83820_mii_write_reg(dev, 1, 0x16, 0x000d); + ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e); + a = ns83820_mii_read_reg(dev, 1, 0x1d); + + ns83820_mii_write_reg(dev, 1, 0x16, 0x000d); + ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e); + b = ns83820_mii_read_reg(dev, 1, 0x1d); + dprintk("version: 0x%04x 0x%04x\n", a, b); + } +} +#endif + +static const struct net_device_ops netdev_ops = { + .ndo_open = ns83820_open, + .ndo_stop = ns83820_stop, + .ndo_start_xmit = ns83820_hard_start_xmit, + .ndo_get_stats = ns83820_get_stats, + .ndo_change_mtu = ns83820_change_mtu, + .ndo_set_rx_mode = ns83820_set_multicast, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_tx_timeout = ns83820_tx_timeout, +}; + +static int ns83820_init_one(struct pci_dev *pci_dev, + const struct pci_device_id *id) +{ + struct net_device *ndev; + struct ns83820 *dev; + long addr; + int err; + int using_dac = 0; + + /* See if we can set the dma mask early on; failure is fatal. */ + if (sizeof(dma_addr_t) == 8 && + !pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) { + using_dac = 1; + } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { + using_dac = 0; + } else { + dev_warn(&pci_dev->dev, "pci_set_dma_mask failed!\n"); + return -ENODEV; + } + + ndev = alloc_etherdev(sizeof(struct ns83820)); + err = -ENOMEM; + if (!ndev) + goto out; + + dev = PRIV(ndev); + dev->ndev = ndev; + + spin_lock_init(&dev->rx_info.lock); + spin_lock_init(&dev->tx_lock); + spin_lock_init(&dev->misc_lock); + dev->pci_dev = pci_dev; + + SET_NETDEV_DEV(ndev, &pci_dev->dev); + + INIT_WORK(&dev->tq_refill, queue_refill); + tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev); + + err = pci_enable_device(pci_dev); + if (err) { + dev_info(&pci_dev->dev, "pci_enable_dev failed: %d\n", err); + goto out_free; + } + + pci_set_master(pci_dev); + addr = pci_resource_start(pci_dev, 1); + dev->base = ioremap_nocache(addr, PAGE_SIZE); + dev->tx_descs = pci_alloc_consistent(pci_dev, + 4 * DESC_SIZE * NR_TX_DESC, &dev->tx_phy_descs); + dev->rx_info.descs = pci_alloc_consistent(pci_dev, + 4 * DESC_SIZE * NR_RX_DESC, &dev->rx_info.phy_descs); + err = -ENOMEM; + if (!dev->base || !dev->tx_descs || !dev->rx_info.descs) + goto out_disable; + + dprintk("%p: %08lx %p: %08lx\n", + dev->tx_descs, (long)dev->tx_phy_descs, + dev->rx_info.descs, (long)dev->rx_info.phy_descs); + + ns83820_disable_interrupts(dev); + + dev->IMR_cache = 0; + + err = request_irq(pci_dev->irq, ns83820_irq, IRQF_SHARED, + DRV_NAME, ndev); + if (err) { + dev_info(&pci_dev->dev, "unable to register irq %d, err %d\n", + pci_dev->irq, err); + goto out_disable; + } + + /* + * FIXME: we are holding rtnl_lock() over obscenely long area only + * because some of the setup code uses dev->name. It's Wrong(tm) - + * we should be using driver-specific names for all that stuff. + * For now that will do, but we really need to come back and kill + * most of the dev_alloc_name() users later. + */ + rtnl_lock(); + err = dev_alloc_name(ndev, ndev->name); + if (err < 0) { + dev_info(&pci_dev->dev, "unable to get netdev name: %d\n", err); + goto out_free_irq; + } + + printk("%s: ns83820.c: 0x22c: %08x, subsystem: %04x:%04x\n", + ndev->name, le32_to_cpu(readl(dev->base + 0x22c)), + pci_dev->subsystem_vendor, pci_dev->subsystem_device); + + ndev->netdev_ops = &netdev_ops; + ndev->ethtool_ops = &ops; + ndev->watchdog_timeo = 5 * HZ; + pci_set_drvdata(pci_dev, ndev); + + ns83820_do_reset(dev, CR_RST); + + /* Must reset the ram bist before running it */ + writel(PTSCR_RBIST_RST, dev->base + PTSCR); + ns83820_run_bist(ndev, "sram bist", PTSCR_RBIST_EN, + PTSCR_RBIST_DONE, PTSCR_RBIST_FAIL); + ns83820_run_bist(ndev, "eeprom bist", PTSCR_EEBIST_EN, 0, + PTSCR_EEBIST_FAIL); + ns83820_run_bist(ndev, "eeprom load", PTSCR_EELOAD_EN, 0, 0); + + /* I love config registers */ + dev->CFG_cache = readl(dev->base + CFG); + + if ((dev->CFG_cache & CFG_PCI64_DET)) { + printk(KERN_INFO "%s: detected 64 bit PCI data bus.\n", + ndev->name); + /*dev->CFG_cache |= CFG_DATA64_EN;*/ + if (!(dev->CFG_cache & CFG_DATA64_EN)) + printk(KERN_INFO "%s: EEPROM did not enable 64 bit bus. Disabled.\n", + ndev->name); + } else + dev->CFG_cache &= ~(CFG_DATA64_EN); + + dev->CFG_cache &= (CFG_TBI_EN | CFG_MRM_DIS | CFG_MWI_DIS | + CFG_T64ADDR | CFG_DATA64_EN | CFG_EXT_125 | + CFG_M64ADDR); + dev->CFG_cache |= CFG_PINT_DUPSTS | CFG_PINT_LNKSTS | CFG_PINT_SPDSTS | + CFG_EXTSTS_EN | CFG_EXD | CFG_PESEL; + dev->CFG_cache |= CFG_REQALG; + dev->CFG_cache |= CFG_POW; + dev->CFG_cache |= CFG_TMRTEST; + + /* When compiled with 64 bit addressing, we must always enable + * the 64 bit descriptor format. + */ + if (sizeof(dma_addr_t) == 8) + dev->CFG_cache |= CFG_M64ADDR; + if (using_dac) + dev->CFG_cache |= CFG_T64ADDR; + + /* Big endian mode does not seem to do what the docs suggest */ + dev->CFG_cache &= ~CFG_BEM; + + /* setup optical transceiver if we have one */ + if (dev->CFG_cache & CFG_TBI_EN) { + printk(KERN_INFO "%s: enabling optical transceiver\n", + ndev->name); + writel(readl(dev->base + GPIOR) | 0x3e8, dev->base + GPIOR); + + /* setup auto negotiation feature advertisement */ + writel(readl(dev->base + TANAR) + | TANAR_HALF_DUP | TANAR_FULL_DUP, + dev->base + TANAR); + + /* start auto negotiation */ + writel(TBICR_MR_AN_ENABLE | TBICR_MR_RESTART_AN, + dev->base + TBICR); + writel(TBICR_MR_AN_ENABLE, dev->base + TBICR); + dev->linkstate = LINK_AUTONEGOTIATE; + + dev->CFG_cache |= CFG_MODE_1000; + } + + writel(dev->CFG_cache, dev->base + CFG); + dprintk("CFG: %08x\n", dev->CFG_cache); + + if (reset_phy) { + printk(KERN_INFO "%s: resetting phy\n", ndev->name); + writel(dev->CFG_cache | CFG_PHY_RST, dev->base + CFG); + msleep(10); + writel(dev->CFG_cache, dev->base + CFG); + } + +#if 0 /* Huh? This sets the PCI latency register. Should be done via + * the PCI layer. FIXME. + */ + if (readl(dev->base + SRR)) + writel(readl(dev->base+0x20c) | 0xfe00, dev->base + 0x20c); +#endif + + /* Note! The DMA burst size interacts with packet + * transmission, such that the largest packet that + * can be transmitted is 8192 - FLTH - burst size. + * If only the transmit fifo was larger... + */ + /* Ramit : 1024 DMA is not a good idea, it ends up banging + * some DELL and COMPAQ SMP systems */ + writel(TXCFG_CSI | TXCFG_HBI | TXCFG_ATP | TXCFG_MXDMA512 + | ((1600 / 32) * 0x100), + dev->base + TXCFG); + + /* Flush the interrupt holdoff timer */ + writel(0x000, dev->base + IHR); + writel(0x100, dev->base + IHR); + writel(0x000, dev->base + IHR); + + /* Set Rx to full duplex, don't accept runt, errored, long or length + * range errored packets. Use 512 byte DMA. + */ + /* Ramit : 1024 DMA is not a good idea, it ends up banging + * some DELL and COMPAQ SMP systems + * Turn on ALP, only we are accpeting Jumbo Packets */ + writel(RXCFG_AEP | RXCFG_ARP | RXCFG_AIRL | RXCFG_RX_FD + | RXCFG_STRIPCRC + //| RXCFG_ALP + | (RXCFG_MXDMA512) | 0, dev->base + RXCFG); + + /* Disable priority queueing */ + writel(0, dev->base + PQCR); + + /* Enable IP checksum validation and detetion of VLAN headers. + * Note: do not set the reject options as at least the 0x102 + * revision of the chip does not properly accept IP fragments + * at least for UDP. + */ + /* Ramit : Be sure to turn on RXCFG_ARP if VLAN's are enabled, since + * the MAC it calculates the packetsize AFTER stripping the VLAN + * header, and if a VLAN Tagged packet of 64 bytes is received (like + * a ping with a VLAN header) then the card, strips the 4 byte VLAN + * tag and then checks the packet size, so if RXCFG_ARP is not enabled, + * it discrards it!. These guys...... + * also turn on tag stripping if hardware acceleration is enabled + */ +#ifdef NS83820_VLAN_ACCEL_SUPPORT +#define VRCR_INIT_VALUE (VRCR_IPEN|VRCR_VTDEN|VRCR_VTREN) +#else +#define VRCR_INIT_VALUE (VRCR_IPEN|VRCR_VTDEN) +#endif + writel(VRCR_INIT_VALUE, dev->base + VRCR); + + /* Enable per-packet TCP/UDP/IP checksumming + * and per packet vlan tag insertion if + * vlan hardware acceleration is enabled + */ +#ifdef NS83820_VLAN_ACCEL_SUPPORT +#define VTCR_INIT_VALUE (VTCR_PPCHK|VTCR_VPPTI) +#else +#define VTCR_INIT_VALUE VTCR_PPCHK +#endif + writel(VTCR_INIT_VALUE, dev->base + VTCR); + + /* Ramit : Enable async and sync pause frames */ + /* writel(0, dev->base + PCR); */ + writel((PCR_PS_MCAST | PCR_PS_DA | PCR_PSEN | PCR_FFLO_4K | + PCR_FFHI_8K | PCR_STLO_4 | PCR_STHI_8 | PCR_PAUSE_CNT), + dev->base + PCR); + + /* Disable Wake On Lan */ + writel(0, dev->base + WCSR); + + ns83820_getmac(dev, ndev->dev_addr); + + /* Yes, we support dumb IP checksum on transmit */ + ndev->features |= NETIF_F_SG; + ndev->features |= NETIF_F_IP_CSUM; + +#ifdef NS83820_VLAN_ACCEL_SUPPORT + /* We also support hardware vlan acceleration */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; +#endif + + if (using_dac) { + printk(KERN_INFO "%s: using 64 bit addressing.\n", + ndev->name); + ndev->features |= NETIF_F_HIGHDMA; + } + + printk(KERN_INFO "%s: ns83820 v" VERSION ": DP83820 v%u.%u: %pM io=0x%08lx irq=%d f=%s\n", + ndev->name, + (unsigned)readl(dev->base + SRR) >> 8, + (unsigned)readl(dev->base + SRR) & 0xff, + ndev->dev_addr, addr, pci_dev->irq, + (ndev->features & NETIF_F_HIGHDMA) ? "h,sg" : "sg" + ); + +#ifdef PHY_CODE_IS_FINISHED + ns83820_probe_phy(ndev); +#endif + + err = register_netdevice(ndev); + if (err) { + printk(KERN_INFO "ns83820: unable to register netdev: %d\n", err); + goto out_cleanup; + } + rtnl_unlock(); + + return 0; + +out_cleanup: + ns83820_disable_interrupts(dev); /* paranoia */ +out_free_irq: + rtnl_unlock(); + free_irq(pci_dev->irq, ndev); +out_disable: + if (dev->base) + iounmap(dev->base); + pci_free_consistent(pci_dev, 4 * DESC_SIZE * NR_TX_DESC, dev->tx_descs, dev->tx_phy_descs); + pci_free_consistent(pci_dev, 4 * DESC_SIZE * NR_RX_DESC, dev->rx_info.descs, dev->rx_info.phy_descs); + pci_disable_device(pci_dev); +out_free: + free_netdev(ndev); +out: + return err; +} + +static void ns83820_remove_one(struct pci_dev *pci_dev) +{ + struct net_device *ndev = pci_get_drvdata(pci_dev); + struct ns83820 *dev = PRIV(ndev); /* ok even if NULL */ + + if (!ndev) /* paranoia */ + return; + + ns83820_disable_interrupts(dev); /* paranoia */ + + unregister_netdev(ndev); + free_irq(dev->pci_dev->irq, ndev); + iounmap(dev->base); + pci_free_consistent(dev->pci_dev, 4 * DESC_SIZE * NR_TX_DESC, + dev->tx_descs, dev->tx_phy_descs); + pci_free_consistent(dev->pci_dev, 4 * DESC_SIZE * NR_RX_DESC, + dev->rx_info.descs, dev->rx_info.phy_descs); + pci_disable_device(dev->pci_dev); + free_netdev(ndev); +} + +static const struct pci_device_id ns83820_pci_tbl[] = { + { 0x100b, 0x0022, PCI_ANY_ID, PCI_ANY_ID, 0, .driver_data = 0, }, + { 0, }, +}; + +static struct pci_driver driver = { + .name = "ns83820", + .id_table = ns83820_pci_tbl, + .probe = ns83820_init_one, + .remove = ns83820_remove_one, +#if 0 /* FIXME: implement */ + .suspend = , + .resume = , +#endif +}; + + +static int __init ns83820_init(void) +{ + printk(KERN_INFO "ns83820.c: National Semiconductor DP83820 10/100/1000 driver.\n"); + return pci_register_driver(&driver); +} + +static void __exit ns83820_exit(void) +{ + pci_unregister_driver(&driver); +} + +MODULE_AUTHOR("Benjamin LaHaise "); +MODULE_DESCRIPTION("National Semiconductor DP83820 10/100/1000 driver"); +MODULE_LICENSE("GPL"); + +MODULE_DEVICE_TABLE(pci, ns83820_pci_tbl); + +module_param(lnksts, int, 0); +MODULE_PARM_DESC(lnksts, "Polarity of LNKSTS bit"); + +module_param(ihr, int, 0); +MODULE_PARM_DESC(ihr, "Time in 100 us increments to delay interrupts (range 0-127)"); + +module_param(reset_phy, int, 0); +MODULE_PARM_DESC(reset_phy, "Set to 1 to reset the PHY on startup"); + +module_init(ns83820_init); +module_exit(ns83820_exit); diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c new file mode 100644 index 000000000..1bd419dbd --- /dev/null +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -0,0 +1,741 @@ +/* + * sonic.c + * + * (C) 2005 Finn Thain + * + * Converted to DMA API, added zero-copy buffer handling, and + * (from the mac68k project) introduced dhd's support for 16-bit cards. + * + * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de) + * + * This driver is based on work from Andreas Busse, but most of + * the code is rewritten. + * + * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de) + * + * Core code included by system sonic drivers + * + * And... partially rewritten again by David Huggins-Daines in order + * to cope with screwed up Macintosh NICs that may or may not use + * 16-bit DMA. + * + * (C) 1999 David Huggins-Daines + * + */ + +/* + * Sources: Olivetti M700-10 Risc Personal Computer hardware handbook, + * National Semiconductors data sheet for the DP83932B Sonic Ethernet + * controller, and the files "8390.c" and "skeleton.c" in this directory. + * + * Additional sources: Nat Semi data sheet for the DP83932C and Nat Semi + * Application Note AN-746, the files "lance.c" and "ibmlana.c". See also + * the NetBSD file "sys/arch/mac68k/dev/if_sn.c". + */ + + + +/* + * Open/initialize the SONIC controller. + * + * This routine should set everything up anew at each open, even + * registers that "should" only need to be set once at boot, so that + * there is non-reboot way to recover if something goes wrong. + */ +static int sonic_open(struct net_device *dev) +{ + struct sonic_local *lp = netdev_priv(dev); + int i; + + if (sonic_debug > 2) + printk("sonic_open: initializing sonic driver.\n"); + + for (i = 0; i < SONIC_NUM_RRS; i++) { + struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); + if (skb == NULL) { + while(i > 0) { /* free any that were allocated successfully */ + i--; + dev_kfree_skb(lp->rx_skb[i]); + lp->rx_skb[i] = NULL; + } + printk(KERN_ERR "%s: couldn't allocate receive buffers\n", + dev->name); + return -ENOMEM; + } + /* align IP header unless DMA requires otherwise */ + if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2) + skb_reserve(skb, 2); + lp->rx_skb[i] = skb; + } + + for (i = 0; i < SONIC_NUM_RRS; i++) { + dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE), + SONIC_RBSIZE, DMA_FROM_DEVICE); + if (!laddr) { + while(i > 0) { /* free any that were mapped successfully */ + i--; + dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE); + lp->rx_laddr[i] = (dma_addr_t)0; + } + for (i = 0; i < SONIC_NUM_RRS; i++) { + dev_kfree_skb(lp->rx_skb[i]); + lp->rx_skb[i] = NULL; + } + printk(KERN_ERR "%s: couldn't map rx DMA buffers\n", + dev->name); + return -ENOMEM; + } + lp->rx_laddr[i] = laddr; + } + + /* + * Initialize the SONIC + */ + sonic_init(dev); + + netif_start_queue(dev); + + if (sonic_debug > 2) + printk("sonic_open: Initialization done.\n"); + + return 0; +} + + +/* + * Close the SONIC device + */ +static int sonic_close(struct net_device *dev) +{ + struct sonic_local *lp = netdev_priv(dev); + int i; + + if (sonic_debug > 2) + printk("sonic_close\n"); + + netif_stop_queue(dev); + + /* + * stop the SONIC, disable interrupts + */ + SONIC_WRITE(SONIC_IMR, 0); + SONIC_WRITE(SONIC_ISR, 0x7fff); + SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); + + /* unmap and free skbs that haven't been transmitted */ + for (i = 0; i < SONIC_NUM_TDS; i++) { + if(lp->tx_laddr[i]) { + dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE); + lp->tx_laddr[i] = (dma_addr_t)0; + } + if(lp->tx_skb[i]) { + dev_kfree_skb(lp->tx_skb[i]); + lp->tx_skb[i] = NULL; + } + } + + /* unmap and free the receive buffers */ + for (i = 0; i < SONIC_NUM_RRS; i++) { + if(lp->rx_laddr[i]) { + dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE); + lp->rx_laddr[i] = (dma_addr_t)0; + } + if(lp->rx_skb[i]) { + dev_kfree_skb(lp->rx_skb[i]); + lp->rx_skb[i] = NULL; + } + } + + return 0; +} + +static void sonic_tx_timeout(struct net_device *dev) +{ + struct sonic_local *lp = netdev_priv(dev); + int i; + /* + * put the Sonic into software-reset mode and + * disable all interrupts before releasing DMA buffers + */ + SONIC_WRITE(SONIC_IMR, 0); + SONIC_WRITE(SONIC_ISR, 0x7fff); + SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); + /* We could resend the original skbs. Easier to re-initialise. */ + for (i = 0; i < SONIC_NUM_TDS; i++) { + if(lp->tx_laddr[i]) { + dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE); + lp->tx_laddr[i] = (dma_addr_t)0; + } + if(lp->tx_skb[i]) { + dev_kfree_skb(lp->tx_skb[i]); + lp->tx_skb[i] = NULL; + } + } + /* Try to restart the adaptor. */ + sonic_init(dev); + lp->stats.tx_errors++; + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); +} + +/* + * transmit packet + * + * Appends new TD during transmission thus avoiding any TX interrupts + * until we run out of TDs. + * This routine interacts closely with the ISR in that it may, + * set tx_skb[i] + * reset the status flags of the new TD + * set and reset EOL flags + * stop the tx queue + * The ISR interacts with this routine in various ways. It may, + * reset tx_skb[i] + * test the EOL and status flags of the TDs + * wake the tx queue + * Concurrently with all of this, the SONIC is potentially writing to + * the status flags of the TDs. + * Until some mutual exclusion is added, this code will not work with SMP. However, + * MIPS Jazz machines and m68k Macs were all uni-processor machines. + */ + +static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) +{ + struct sonic_local *lp = netdev_priv(dev); + dma_addr_t laddr; + int length; + int entry = lp->next_tx; + + if (sonic_debug > 2) + printk("sonic_send_packet: skb=%p, dev=%p\n", skb, dev); + + length = skb->len; + if (length < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + length = ETH_ZLEN; + } + + /* + * Map the packet data into the logical DMA address space + */ + + laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE); + if (!laddr) { + printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name); + dev_kfree_skb(skb); + return NETDEV_TX_BUSY; + } + + sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */ + sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */ + sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */ + sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff); + sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16); + sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length); + sonic_tda_put(dev, entry, SONIC_TD_LINK, + sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL); + + /* + * Must set tx_skb[entry] only after clearing status, and + * before clearing EOL and before stopping queue + */ + wmb(); + lp->tx_len[entry] = length; + lp->tx_laddr[entry] = laddr; + lp->tx_skb[entry] = skb; + + wmb(); + sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK, + sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL); + lp->eol_tx = entry; + + lp->next_tx = (entry + 1) & SONIC_TDS_MASK; + if (lp->tx_skb[lp->next_tx] != NULL) { + /* The ring is full, the ISR has yet to process the next TD. */ + if (sonic_debug > 3) + printk("%s: stopping queue\n", dev->name); + netif_stop_queue(dev); + /* after this packet, wait for ISR to free up some TDAs */ + } else netif_start_queue(dev); + + if (sonic_debug > 2) + printk("sonic_send_packet: issuing Tx command\n"); + + SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); + + return NETDEV_TX_OK; +} + +/* + * The typical workload of the driver: + * Handle the network interface interrupts. + */ +static irqreturn_t sonic_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct sonic_local *lp = netdev_priv(dev); + int status; + + if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT)) + return IRQ_NONE; + + do { + if (status & SONIC_INT_PKTRX) { + if (sonic_debug > 2) + printk("%s: packet rx\n", dev->name); + sonic_rx(dev); /* got packet(s) */ + SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */ + } + + if (status & SONIC_INT_TXDN) { + int entry = lp->cur_tx; + int td_status; + int freed_some = 0; + + /* At this point, cur_tx is the index of a TD that is one of: + * unallocated/freed (status set & tx_skb[entry] clear) + * allocated and sent (status set & tx_skb[entry] set ) + * allocated and not yet sent (status clear & tx_skb[entry] set ) + * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear) + */ + + if (sonic_debug > 2) + printk("%s: tx done\n", dev->name); + + while (lp->tx_skb[entry] != NULL) { + if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0) + break; + + if (td_status & 0x0001) { + lp->stats.tx_packets++; + lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE); + } else { + lp->stats.tx_errors++; + if (td_status & 0x0642) + lp->stats.tx_aborted_errors++; + if (td_status & 0x0180) + lp->stats.tx_carrier_errors++; + if (td_status & 0x0020) + lp->stats.tx_window_errors++; + if (td_status & 0x0004) + lp->stats.tx_fifo_errors++; + } + + /* We must free the original skb */ + dev_kfree_skb_irq(lp->tx_skb[entry]); + lp->tx_skb[entry] = NULL; + /* and unmap DMA buffer */ + dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE); + lp->tx_laddr[entry] = (dma_addr_t)0; + freed_some = 1; + + if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) { + entry = (entry + 1) & SONIC_TDS_MASK; + break; + } + entry = (entry + 1) & SONIC_TDS_MASK; + } + + if (freed_some || lp->tx_skb[entry] == NULL) + netif_wake_queue(dev); /* The ring is no longer full */ + lp->cur_tx = entry; + SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */ + } + + /* + * check error conditions + */ + if (status & SONIC_INT_RFO) { + if (sonic_debug > 1) + printk("%s: rx fifo overrun\n", dev->name); + lp->stats.rx_fifo_errors++; + SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */ + } + if (status & SONIC_INT_RDE) { + if (sonic_debug > 1) + printk("%s: rx descriptors exhausted\n", dev->name); + lp->stats.rx_dropped++; + SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */ + } + if (status & SONIC_INT_RBAE) { + if (sonic_debug > 1) + printk("%s: rx buffer area exceeded\n", dev->name); + lp->stats.rx_dropped++; + SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */ + } + + /* counter overruns; all counters are 16bit wide */ + if (status & SONIC_INT_FAE) { + lp->stats.rx_frame_errors += 65536; + SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */ + } + if (status & SONIC_INT_CRC) { + lp->stats.rx_crc_errors += 65536; + SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */ + } + if (status & SONIC_INT_MP) { + lp->stats.rx_missed_errors += 65536; + SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */ + } + + /* transmit error */ + if (status & SONIC_INT_TXER) { + if ((SONIC_READ(SONIC_TCR) & SONIC_TCR_FU) && (sonic_debug > 2)) + printk(KERN_ERR "%s: tx fifo underrun\n", dev->name); + SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */ + } + + /* bus retry */ + if (status & SONIC_INT_BR) { + printk(KERN_ERR "%s: Bus retry occurred! Device interrupt disabled.\n", + dev->name); + /* ... to help debug DMA problems causing endless interrupts. */ + /* Bounce the eth interface to turn on the interrupt again. */ + SONIC_WRITE(SONIC_IMR, 0); + SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */ + } + + /* load CAM done */ + if (status & SONIC_INT_LCD) + SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */ + } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT)); + return IRQ_HANDLED; +} + +/* + * We have a good packet(s), pass it/them up the network stack. + */ +static void sonic_rx(struct net_device *dev) +{ + struct sonic_local *lp = netdev_priv(dev); + int status; + int entry = lp->cur_rx; + + while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) { + struct sk_buff *used_skb; + struct sk_buff *new_skb; + dma_addr_t new_laddr; + u16 bufadr_l; + u16 bufadr_h; + int pkt_len; + + status = sonic_rda_get(dev, entry, SONIC_RD_STATUS); + if (status & SONIC_RCR_PRX) { + /* Malloc up new buffer. */ + new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); + if (new_skb == NULL) { + lp->stats.rx_dropped++; + break; + } + /* provide 16 byte IP header alignment unless DMA requires otherwise */ + if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2) + skb_reserve(new_skb, 2); + + new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE), + SONIC_RBSIZE, DMA_FROM_DEVICE); + if (!new_laddr) { + dev_kfree_skb(new_skb); + printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name); + lp->stats.rx_dropped++; + break; + } + + /* now we have a new skb to replace it, pass the used one up the stack */ + dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE); + used_skb = lp->rx_skb[entry]; + pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN); + skb_trim(used_skb, pkt_len); + used_skb->protocol = eth_type_trans(used_skb, dev); + netif_rx(used_skb); + lp->stats.rx_packets++; + lp->stats.rx_bytes += pkt_len; + + /* and insert the new skb */ + lp->rx_laddr[entry] = new_laddr; + lp->rx_skb[entry] = new_skb; + + bufadr_l = (unsigned long)new_laddr & 0xffff; + bufadr_h = (unsigned long)new_laddr >> 16; + sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l); + sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h); + } else { + /* This should only happen, if we enable accepting broken packets. */ + lp->stats.rx_errors++; + if (status & SONIC_RCR_FAER) + lp->stats.rx_frame_errors++; + if (status & SONIC_RCR_CRCR) + lp->stats.rx_crc_errors++; + } + if (status & SONIC_RCR_LPKT) { + /* + * this was the last packet out of the current receive buffer + * give the buffer back to the SONIC + */ + lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode); + if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff; + SONIC_WRITE(SONIC_RWP, lp->cur_rwp); + if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) { + if (sonic_debug > 2) + printk("%s: rx buffer exhausted\n", dev->name); + SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */ + } + } else + printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n", + dev->name); + /* + * give back the descriptor + */ + sonic_rda_put(dev, entry, SONIC_RD_LINK, + sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL); + sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1); + sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, + sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL); + lp->eol_rx = entry; + lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK; + } + /* + * If any worth-while packets have been received, netif_rx() + * has done a mark_bh(NET_BH) for us and will work on them + * when we get to the bottom-half routine. + */ +} + + +/* + * Get the current statistics. + * This may be called with the device open or closed. + */ +static struct net_device_stats *sonic_get_stats(struct net_device *dev) +{ + struct sonic_local *lp = netdev_priv(dev); + + /* read the tally counter from the SONIC and reset them */ + lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT); + SONIC_WRITE(SONIC_CRCT, 0xffff); + lp->stats.rx_frame_errors += SONIC_READ(SONIC_FAET); + SONIC_WRITE(SONIC_FAET, 0xffff); + lp->stats.rx_missed_errors += SONIC_READ(SONIC_MPT); + SONIC_WRITE(SONIC_MPT, 0xffff); + + return &lp->stats; +} + + +/* + * Set or clear the multicast filter for this adaptor. + */ +static void sonic_multicast_list(struct net_device *dev) +{ + struct sonic_local *lp = netdev_priv(dev); + unsigned int rcr; + struct netdev_hw_addr *ha; + unsigned char *addr; + int i; + + rcr = SONIC_READ(SONIC_RCR) & ~(SONIC_RCR_PRO | SONIC_RCR_AMC); + rcr |= SONIC_RCR_BRD; /* accept broadcast packets */ + + if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */ + rcr |= SONIC_RCR_PRO; + } else { + if ((dev->flags & IFF_ALLMULTI) || + (netdev_mc_count(dev) > 15)) { + rcr |= SONIC_RCR_AMC; + } else { + if (sonic_debug > 2) + printk("sonic_multicast_list: mc_count %d\n", + netdev_mc_count(dev)); + sonic_set_cam_enable(dev, 1); /* always enable our own address */ + i = 1; + netdev_for_each_mc_addr(ha, dev) { + addr = ha->addr; + sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]); + sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]); + sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]); + sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i)); + i++; + } + SONIC_WRITE(SONIC_CDC, 16); + /* issue Load CAM command */ + SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff); + SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM); + } + } + + if (sonic_debug > 2) + printk("sonic_multicast_list: setting RCR=%x\n", rcr); + + SONIC_WRITE(SONIC_RCR, rcr); +} + + +/* + * Initialize the SONIC ethernet controller. + */ +static int sonic_init(struct net_device *dev) +{ + unsigned int cmd; + struct sonic_local *lp = netdev_priv(dev); + int i; + + /* + * put the Sonic into software-reset mode and + * disable all interrupts + */ + SONIC_WRITE(SONIC_IMR, 0); + SONIC_WRITE(SONIC_ISR, 0x7fff); + SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); + + /* + * clear software reset flag, disable receiver, clear and + * enable interrupts, then completely initialize the SONIC + */ + SONIC_WRITE(SONIC_CMD, 0); + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); + + /* + * initialize the receive resource area + */ + if (sonic_debug > 2) + printk("sonic_init: initialize receive resource area\n"); + + for (i = 0; i < SONIC_NUM_RRS; i++) { + u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff; + u16 bufadr_h = (unsigned long)lp->rx_laddr[i] >> 16; + sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l); + sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h); + sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_L, SONIC_RBSIZE >> 1); + sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_H, 0); + } + + /* initialize all RRA registers */ + lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR * + SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff; + lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR * + SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff; + + SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff); + SONIC_WRITE(SONIC_REA, lp->rra_end); + SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff); + SONIC_WRITE(SONIC_RWP, lp->cur_rwp); + SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16); + SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1)); + + /* load the resource pointers */ + if (sonic_debug > 3) + printk("sonic_init: issuing RRRA command\n"); + + SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA); + i = 0; + while (i++ < 100) { + if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA) + break; + } + + if (sonic_debug > 2) + printk("sonic_init: status=%x i=%d\n", SONIC_READ(SONIC_CMD), i); + + /* + * Initialize the receive descriptors so that they + * become a circular linked list, ie. let the last + * descriptor point to the first again. + */ + if (sonic_debug > 2) + printk("sonic_init: initialize receive descriptors\n"); + for (i=0; irda_laddr + + ((i+1) * SIZEOF_SONIC_RD * SONIC_BUS_SCALE(lp->dma_bitmode))); + } + /* fix last descriptor */ + sonic_rda_put(dev, SONIC_NUM_RDS - 1, SONIC_RD_LINK, + (lp->rda_laddr & 0xffff) | SONIC_EOL); + lp->eol_rx = SONIC_NUM_RDS - 1; + lp->cur_rx = 0; + SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16); + SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff); + + /* + * initialize transmit descriptors + */ + if (sonic_debug > 2) + printk("sonic_init: initialize transmit descriptors\n"); + for (i = 0; i < SONIC_NUM_TDS; i++) { + sonic_tda_put(dev, i, SONIC_TD_STATUS, 0); + sonic_tda_put(dev, i, SONIC_TD_CONFIG, 0); + sonic_tda_put(dev, i, SONIC_TD_PKTSIZE, 0); + sonic_tda_put(dev, i, SONIC_TD_FRAG_COUNT, 0); + sonic_tda_put(dev, i, SONIC_TD_LINK, + (lp->tda_laddr & 0xffff) + + (i + 1) * SIZEOF_SONIC_TD * SONIC_BUS_SCALE(lp->dma_bitmode)); + lp->tx_skb[i] = NULL; + } + /* fix last descriptor */ + sonic_tda_put(dev, SONIC_NUM_TDS - 1, SONIC_TD_LINK, + (lp->tda_laddr & 0xffff)); + + SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16); + SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff); + lp->cur_tx = lp->next_tx = 0; + lp->eol_tx = SONIC_NUM_TDS - 1; + + /* + * put our own address to CAM desc[0] + */ + sonic_cda_put(dev, 0, SONIC_CD_CAP0, dev->dev_addr[1] << 8 | dev->dev_addr[0]); + sonic_cda_put(dev, 0, SONIC_CD_CAP1, dev->dev_addr[3] << 8 | dev->dev_addr[2]); + sonic_cda_put(dev, 0, SONIC_CD_CAP2, dev->dev_addr[5] << 8 | dev->dev_addr[4]); + sonic_set_cam_enable(dev, 1); + + for (i = 0; i < 16; i++) + sonic_cda_put(dev, i, SONIC_CD_ENTRY_POINTER, i); + + /* + * initialize CAM registers + */ + SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff); + SONIC_WRITE(SONIC_CDC, 16); + + /* + * load the CAM + */ + SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM); + + i = 0; + while (i++ < 100) { + if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD) + break; + } + if (sonic_debug > 2) { + printk("sonic_init: CMD=%x, ISR=%x\n, i=%d", + SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i); + } + + /* + * enable receiver, disable loopback + * and enable all interrupts + */ + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP); + SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT); + SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT); + SONIC_WRITE(SONIC_ISR, 0x7fff); + SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT); + + cmd = SONIC_READ(SONIC_CMD); + if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0) + printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd); + + if (sonic_debug > 2) + printk("sonic_init: new status=%x\n", + SONIC_READ(SONIC_CMD)); + + return 0; +} + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h new file mode 100644 index 000000000..07091dd27 --- /dev/null +++ b/drivers/net/ethernet/natsemi/sonic.h @@ -0,0 +1,450 @@ +/* + * Header file for sonic.c + * + * (C) Waldorf Electronics, Germany + * Written by Andreas Busse + * + * NOTE: most of the structure definitions here are endian dependent. + * If you want to use this driver on big endian machines, the data + * and pad structure members must be exchanged. Also, the structures + * need to be changed accordingly to the bus size. + * + * 981229 MSch: did just that for the 68k Mac port (32 bit, big endian) + * + * 990611 David Huggins-Daines : This machine abstraction + * does not cope with 16-bit bus sizes very well. Therefore I have + * rewritten it with ugly macros and evil inlines. + * + * 050625 Finn Thain: introduced more 32-bit cards and dhd's support + * for 16-bit cards (from the mac68k project). + */ + +#ifndef SONIC_H +#define SONIC_H + + +/* + * SONIC register offsets + */ + +#define SONIC_CMD 0x00 +#define SONIC_DCR 0x01 +#define SONIC_RCR 0x02 +#define SONIC_TCR 0x03 +#define SONIC_IMR 0x04 +#define SONIC_ISR 0x05 + +#define SONIC_UTDA 0x06 +#define SONIC_CTDA 0x07 + +#define SONIC_URDA 0x0d +#define SONIC_CRDA 0x0e +#define SONIC_EOBC 0x13 +#define SONIC_URRA 0x14 +#define SONIC_RSA 0x15 +#define SONIC_REA 0x16 +#define SONIC_RRP 0x17 +#define SONIC_RWP 0x18 +#define SONIC_RSC 0x2b + +#define SONIC_CEP 0x21 +#define SONIC_CAP2 0x22 +#define SONIC_CAP1 0x23 +#define SONIC_CAP0 0x24 +#define SONIC_CE 0x25 +#define SONIC_CDP 0x26 +#define SONIC_CDC 0x27 + +#define SONIC_WT0 0x29 +#define SONIC_WT1 0x2a + +#define SONIC_SR 0x28 + + +/* test-only registers */ + +#define SONIC_TPS 0x08 +#define SONIC_TFC 0x09 +#define SONIC_TSA0 0x0a +#define SONIC_TSA1 0x0b +#define SONIC_TFS 0x0c + +#define SONIC_CRBA0 0x0f +#define SONIC_CRBA1 0x10 +#define SONIC_RBWC0 0x11 +#define SONIC_RBWC1 0x12 +#define SONIC_TTDA 0x20 +#define SONIC_MDT 0x2f + +#define SONIC_TRBA0 0x19 +#define SONIC_TRBA1 0x1a +#define SONIC_TBWC0 0x1b +#define SONIC_TBWC1 0x1c +#define SONIC_LLFA 0x1f + +#define SONIC_ADDR0 0x1d +#define SONIC_ADDR1 0x1e + +/* + * Error counters + */ + +#define SONIC_CRCT 0x2c +#define SONIC_FAET 0x2d +#define SONIC_MPT 0x2e + +#define SONIC_DCR2 0x3f + +/* + * SONIC command bits + */ + +#define SONIC_CR_LCAM 0x0200 +#define SONIC_CR_RRRA 0x0100 +#define SONIC_CR_RST 0x0080 +#define SONIC_CR_ST 0x0020 +#define SONIC_CR_STP 0x0010 +#define SONIC_CR_RXEN 0x0008 +#define SONIC_CR_RXDIS 0x0004 +#define SONIC_CR_TXP 0x0002 +#define SONIC_CR_HTX 0x0001 + +/* + * SONIC data configuration bits + */ + +#define SONIC_DCR_EXBUS 0x8000 +#define SONIC_DCR_LBR 0x2000 +#define SONIC_DCR_PO1 0x1000 +#define SONIC_DCR_PO0 0x0800 +#define SONIC_DCR_SBUS 0x0400 +#define SONIC_DCR_USR1 0x0200 +#define SONIC_DCR_USR0 0x0100 +#define SONIC_DCR_WC1 0x0080 +#define SONIC_DCR_WC0 0x0040 +#define SONIC_DCR_DW 0x0020 +#define SONIC_DCR_BMS 0x0010 +#define SONIC_DCR_RFT1 0x0008 +#define SONIC_DCR_RFT0 0x0004 +#define SONIC_DCR_TFT1 0x0002 +#define SONIC_DCR_TFT0 0x0001 + +/* + * Constants for the SONIC receive control register. + */ + +#define SONIC_RCR_ERR 0x8000 +#define SONIC_RCR_RNT 0x4000 +#define SONIC_RCR_BRD 0x2000 +#define SONIC_RCR_PRO 0x1000 +#define SONIC_RCR_AMC 0x0800 +#define SONIC_RCR_LB1 0x0400 +#define SONIC_RCR_LB0 0x0200 + +#define SONIC_RCR_MC 0x0100 +#define SONIC_RCR_BC 0x0080 +#define SONIC_RCR_LPKT 0x0040 +#define SONIC_RCR_CRS 0x0020 +#define SONIC_RCR_COL 0x0010 +#define SONIC_RCR_CRCR 0x0008 +#define SONIC_RCR_FAER 0x0004 +#define SONIC_RCR_LBK 0x0002 +#define SONIC_RCR_PRX 0x0001 + +#define SONIC_RCR_LB_OFF 0 +#define SONIC_RCR_LB_MAC SONIC_RCR_LB0 +#define SONIC_RCR_LB_ENDEC SONIC_RCR_LB1 +#define SONIC_RCR_LB_TRANS (SONIC_RCR_LB0 | SONIC_RCR_LB1) + +/* default RCR setup */ + +#define SONIC_RCR_DEFAULT (SONIC_RCR_BRD) + + +/* + * SONIC Transmit Control register bits + */ + +#define SONIC_TCR_PINTR 0x8000 +#define SONIC_TCR_POWC 0x4000 +#define SONIC_TCR_CRCI 0x2000 +#define SONIC_TCR_EXDIS 0x1000 +#define SONIC_TCR_EXD 0x0400 +#define SONIC_TCR_DEF 0x0200 +#define SONIC_TCR_NCRS 0x0100 +#define SONIC_TCR_CRLS 0x0080 +#define SONIC_TCR_EXC 0x0040 +#define SONIC_TCR_PMB 0x0008 +#define SONIC_TCR_FU 0x0004 +#define SONIC_TCR_BCM 0x0002 +#define SONIC_TCR_PTX 0x0001 + +#define SONIC_TCR_DEFAULT 0x0000 + +/* + * Constants for the SONIC_INTERRUPT_MASK and + * SONIC_INTERRUPT_STATUS registers. + */ + +#define SONIC_INT_BR 0x4000 +#define SONIC_INT_HBL 0x2000 +#define SONIC_INT_LCD 0x1000 +#define SONIC_INT_PINT 0x0800 +#define SONIC_INT_PKTRX 0x0400 +#define SONIC_INT_TXDN 0x0200 +#define SONIC_INT_TXER 0x0100 +#define SONIC_INT_TC 0x0080 +#define SONIC_INT_RDE 0x0040 +#define SONIC_INT_RBE 0x0020 +#define SONIC_INT_RBAE 0x0010 +#define SONIC_INT_CRC 0x0008 +#define SONIC_INT_FAE 0x0004 +#define SONIC_INT_MP 0x0002 +#define SONIC_INT_RFO 0x0001 + + +/* + * The interrupts we allow. + */ + +#define SONIC_IMR_DEFAULT ( SONIC_INT_BR | \ + SONIC_INT_LCD | \ + SONIC_INT_RFO | \ + SONIC_INT_PKTRX | \ + SONIC_INT_TXDN | \ + SONIC_INT_TXER | \ + SONIC_INT_RDE | \ + SONIC_INT_RBAE | \ + SONIC_INT_CRC | \ + SONIC_INT_FAE | \ + SONIC_INT_MP) + + +#define SONIC_EOL 0x0001 +#define CAM_DESCRIPTORS 16 + +/* Offsets in the various DMA buffers accessed by the SONIC */ + +#define SONIC_BITMODE16 0 +#define SONIC_BITMODE32 1 +#define SONIC_BUS_SCALE(bitmode) ((bitmode) ? 4 : 2) +/* Note! These are all measured in bus-size units, so use SONIC_BUS_SCALE */ +#define SIZEOF_SONIC_RR 4 +#define SONIC_RR_BUFADR_L 0 +#define SONIC_RR_BUFADR_H 1 +#define SONIC_RR_BUFSIZE_L 2 +#define SONIC_RR_BUFSIZE_H 3 + +#define SIZEOF_SONIC_RD 7 +#define SONIC_RD_STATUS 0 +#define SONIC_RD_PKTLEN 1 +#define SONIC_RD_PKTPTR_L 2 +#define SONIC_RD_PKTPTR_H 3 +#define SONIC_RD_SEQNO 4 +#define SONIC_RD_LINK 5 +#define SONIC_RD_IN_USE 6 + +#define SIZEOF_SONIC_TD 8 +#define SONIC_TD_STATUS 0 +#define SONIC_TD_CONFIG 1 +#define SONIC_TD_PKTSIZE 2 +#define SONIC_TD_FRAG_COUNT 3 +#define SONIC_TD_FRAG_PTR_L 4 +#define SONIC_TD_FRAG_PTR_H 5 +#define SONIC_TD_FRAG_SIZE 6 +#define SONIC_TD_LINK 7 + +#define SIZEOF_SONIC_CD 4 +#define SONIC_CD_ENTRY_POINTER 0 +#define SONIC_CD_CAP0 1 +#define SONIC_CD_CAP1 2 +#define SONIC_CD_CAP2 3 + +#define SIZEOF_SONIC_CDA ((CAM_DESCRIPTORS * SIZEOF_SONIC_CD) + 1) +#define SONIC_CDA_CAM_ENABLE (CAM_DESCRIPTORS * SIZEOF_SONIC_CD) + +/* + * Some tunables for the buffer areas. Power of 2 is required + * the current driver uses one receive buffer for each descriptor. + * + * MSch: use more buffer space for the slow m68k Macs! + */ +#define SONIC_NUM_RRS 16 /* number of receive resources */ +#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */ +#define SONIC_NUM_TDS 16 /* number of transmit descriptors */ + +#define SONIC_RDS_MASK (SONIC_NUM_RDS-1) +#define SONIC_TDS_MASK (SONIC_NUM_TDS-1) + +#define SONIC_RBSIZE 1520 /* size of one resource buffer */ + +/* Again, measured in bus size units! */ +#define SIZEOF_SONIC_DESC (SIZEOF_SONIC_CDA \ + + (SIZEOF_SONIC_TD * SONIC_NUM_TDS) \ + + (SIZEOF_SONIC_RD * SONIC_NUM_RDS) \ + + (SIZEOF_SONIC_RR * SONIC_NUM_RRS)) + +/* Information that need to be kept for each board. */ +struct sonic_local { + /* Bus size. 0 == 16 bits, 1 == 32 bits. */ + int dma_bitmode; + /* Register offset within the longword (independent of endianness, + and varies from one type of Macintosh SONIC to another + (Aarrgh)) */ + int reg_offset; + void *descriptors; + /* Crud. These areas have to be within the same 64K. Therefore + we allocate a desriptors page, and point these to places within it. */ + void *cda; /* CAM descriptor area */ + void *tda; /* Transmit descriptor area */ + void *rra; /* Receive resource area */ + void *rda; /* Receive descriptor area */ + struct sk_buff* volatile rx_skb[SONIC_NUM_RRS]; /* packets to be received */ + struct sk_buff* volatile tx_skb[SONIC_NUM_TDS]; /* packets to be transmitted */ + unsigned int tx_len[SONIC_NUM_TDS]; /* lengths of tx DMA mappings */ + /* Logical DMA addresses on MIPS, bus addresses on m68k + * (so "laddr" is a bit misleading) */ + dma_addr_t descriptors_laddr; + u32 cda_laddr; /* logical DMA address of CDA */ + u32 tda_laddr; /* logical DMA address of TDA */ + u32 rra_laddr; /* logical DMA address of RRA */ + u32 rda_laddr; /* logical DMA address of RDA */ + dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */ + dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */ + unsigned int rra_end; + unsigned int cur_rwp; + unsigned int cur_rx; + unsigned int cur_tx; /* first unacked transmit packet */ + unsigned int eol_rx; + unsigned int eol_tx; /* last unacked transmit packet */ + unsigned int next_tx; /* next free TD */ + struct device *device; /* generic device */ + struct net_device_stats stats; +}; + +#define TX_TIMEOUT (3 * HZ) + +/* Index to functions, as function prototypes. */ + +static int sonic_open(struct net_device *dev); +static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev); +static irqreturn_t sonic_interrupt(int irq, void *dev_id); +static void sonic_rx(struct net_device *dev); +static int sonic_close(struct net_device *dev); +static struct net_device_stats *sonic_get_stats(struct net_device *dev); +static void sonic_multicast_list(struct net_device *dev); +static int sonic_init(struct net_device *dev); +static void sonic_tx_timeout(struct net_device *dev); + +/* Internal inlines for reading/writing DMA buffers. Note that bus + size and endianness matter here, whereas they don't for registers, + as far as we can tell. */ +/* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put() + is a much better name. */ +static inline void sonic_buf_put(void* base, int bitmode, + int offset, __u16 val) +{ + if (bitmode) +#ifdef __BIG_ENDIAN + ((__u16 *) base + (offset*2))[1] = val; +#else + ((__u16 *) base + (offset*2))[0] = val; +#endif + else + ((__u16 *) base)[offset] = val; +} + +static inline __u16 sonic_buf_get(void* base, int bitmode, + int offset) +{ + if (bitmode) +#ifdef __BIG_ENDIAN + return ((volatile __u16 *) base + (offset*2))[1]; +#else + return ((volatile __u16 *) base + (offset*2))[0]; +#endif + else + return ((volatile __u16 *) base)[offset]; +} + +/* Inlines that you should actually use for reading/writing DMA buffers */ +static inline void sonic_cda_put(struct net_device* dev, int entry, + int offset, __u16 val) +{ + struct sonic_local *lp = netdev_priv(dev); + sonic_buf_put(lp->cda, lp->dma_bitmode, + (entry * SIZEOF_SONIC_CD) + offset, val); +} + +static inline __u16 sonic_cda_get(struct net_device* dev, int entry, + int offset) +{ + struct sonic_local *lp = netdev_priv(dev); + return sonic_buf_get(lp->cda, lp->dma_bitmode, + (entry * SIZEOF_SONIC_CD) + offset); +} + +static inline void sonic_set_cam_enable(struct net_device* dev, __u16 val) +{ + struct sonic_local *lp = netdev_priv(dev); + sonic_buf_put(lp->cda, lp->dma_bitmode, SONIC_CDA_CAM_ENABLE, val); +} + +static inline __u16 sonic_get_cam_enable(struct net_device* dev) +{ + struct sonic_local *lp = netdev_priv(dev); + return sonic_buf_get(lp->cda, lp->dma_bitmode, SONIC_CDA_CAM_ENABLE); +} + +static inline void sonic_tda_put(struct net_device* dev, int entry, + int offset, __u16 val) +{ + struct sonic_local *lp = netdev_priv(dev); + sonic_buf_put(lp->tda, lp->dma_bitmode, + (entry * SIZEOF_SONIC_TD) + offset, val); +} + +static inline __u16 sonic_tda_get(struct net_device* dev, int entry, + int offset) +{ + struct sonic_local *lp = netdev_priv(dev); + return sonic_buf_get(lp->tda, lp->dma_bitmode, + (entry * SIZEOF_SONIC_TD) + offset); +} + +static inline void sonic_rda_put(struct net_device* dev, int entry, + int offset, __u16 val) +{ + struct sonic_local *lp = netdev_priv(dev); + sonic_buf_put(lp->rda, lp->dma_bitmode, + (entry * SIZEOF_SONIC_RD) + offset, val); +} + +static inline __u16 sonic_rda_get(struct net_device* dev, int entry, + int offset) +{ + struct sonic_local *lp = netdev_priv(dev); + return sonic_buf_get(lp->rda, lp->dma_bitmode, + (entry * SIZEOF_SONIC_RD) + offset); +} + +static inline void sonic_rra_put(struct net_device* dev, int entry, + int offset, __u16 val) +{ + struct sonic_local *lp = netdev_priv(dev); + sonic_buf_put(lp->rra, lp->dma_bitmode, + (entry * SIZEOF_SONIC_RR) + offset, val); +} + +static inline __u16 sonic_rra_get(struct net_device* dev, int entry, + int offset) +{ + struct sonic_local *lp = netdev_priv(dev); + return sonic_buf_get(lp->rra, lp->dma_bitmode, + (entry * SIZEOF_SONIC_RR) + offset); +} + +static const char *version = + "sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n"; + +#endif /* SONIC_H */ diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c new file mode 100644 index 000000000..7007d212f --- /dev/null +++ b/drivers/net/ethernet/natsemi/xtsonic.c @@ -0,0 +1,321 @@ +/* + * xtsonic.c + * + * (C) 2001 - 2007 Tensilica Inc. + * Kevin Chea + * Marc Gauthier + * Chris Zankel + * + * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de) + * + * This driver is based on work from Andreas Busse, but most of + * the code is rewritten. + * + * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de) + * + * A driver for the onboard Sonic ethernet controller on the XT2000. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static char xtsonic_string[] = "xtsonic"; + +extern unsigned xtboard_nvram_valid(void); +extern void xtboard_get_ether_addr(unsigned char *buf); + +#include "sonic.h" + +/* + * According to the documentation for the Sonic ethernet controller, + * EOBC should be 760 words (1520 bytes) for 32-bit applications, and, + * as such, 2 words less than the buffer size. The value for RBSIZE + * defined in sonic.h, however is only 1520. + * + * (Note that in 16-bit configurations, EOBC is 759 words (1518 bytes) and + * RBSIZE 1520 bytes) + */ +#undef SONIC_RBSIZE +#define SONIC_RBSIZE 1524 + +/* + * The chip provides 256 byte register space. + */ +#define SONIC_MEM_SIZE 0x100 + +/* + * Macros to access SONIC registers + */ +#define SONIC_READ(reg) \ + (0xffff & *((volatile unsigned int *)dev->base_addr+reg)) + +#define SONIC_WRITE(reg,val) \ + *((volatile unsigned int *)dev->base_addr+reg) = val + + +/* Use 0 for production, 1 for verification, and >2 for debug */ +#ifdef SONIC_DEBUG +static unsigned int sonic_debug = SONIC_DEBUG; +#else +static unsigned int sonic_debug = 1; +#endif + +/* + * We cannot use station (ethernet) address prefixes to detect the + * sonic controller since these are board manufacturer depended. + * So we check for known Silicon Revision IDs instead. + */ +static unsigned short known_revisions[] = +{ + 0x101, /* SONIC 83934 */ + 0xffff /* end of list */ +}; + +static int xtsonic_open(struct net_device *dev) +{ + int retval; + + retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev); + if (retval) { + printk(KERN_ERR "%s: unable to get IRQ %d.\n", + dev->name, dev->irq); + return -EAGAIN; + } + + retval = sonic_open(dev); + if (retval) + free_irq(dev->irq, dev); + return retval; +} + +static int xtsonic_close(struct net_device *dev) +{ + int err; + err = sonic_close(dev); + free_irq(dev->irq, dev); + return err; +} + +static const struct net_device_ops xtsonic_netdev_ops = { + .ndo_open = xtsonic_open, + .ndo_stop = xtsonic_close, + .ndo_start_xmit = sonic_send_packet, + .ndo_get_stats = sonic_get_stats, + .ndo_set_rx_mode = sonic_multicast_list, + .ndo_tx_timeout = sonic_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + +static int __init sonic_probe1(struct net_device *dev) +{ + static unsigned version_printed = 0; + unsigned int silicon_revision; + struct sonic_local *lp = netdev_priv(dev); + unsigned int base_addr = dev->base_addr; + int i; + int err = 0; + + if (!request_mem_region(base_addr, 0x100, xtsonic_string)) + return -EBUSY; + + /* + * get the Silicon Revision ID. If this is one of the known + * one assume that we found a SONIC ethernet controller at + * the expected location. + */ + silicon_revision = SONIC_READ(SONIC_SR); + if (sonic_debug > 1) + printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision); + + i = 0; + while ((known_revisions[i] != 0xffff) && + (known_revisions[i] != silicon_revision)) + i++; + + if (known_revisions[i] == 0xffff) { + printk("SONIC ethernet controller not found (0x%4x)\n", + silicon_revision); + return -ENODEV; + } + + if (sonic_debug && version_printed++ == 0) + printk(version); + + /* + * Put the sonic into software reset, then retrieve ethernet address. + * Note: we are assuming that the boot-loader has initialized the cam. + */ + SONIC_WRITE(SONIC_CMD,SONIC_CR_RST); + SONIC_WRITE(SONIC_DCR, + SONIC_DCR_WC0|SONIC_DCR_DW|SONIC_DCR_LBR|SONIC_DCR_SBUS); + SONIC_WRITE(SONIC_CEP,0); + SONIC_WRITE(SONIC_IMR,0); + + SONIC_WRITE(SONIC_CMD,SONIC_CR_RST); + SONIC_WRITE(SONIC_CEP,0); + + for (i=0; i<3; i++) { + unsigned int val = SONIC_READ(SONIC_CAP0-i); + dev->dev_addr[i*2] = val; + dev->dev_addr[i*2+1] = val >> 8; + } + + /* Initialize the device structure. */ + + lp->dma_bitmode = SONIC_BITMODE32; + + /* + * Allocate local private descriptor areas in uncached space. + * The entire structure must be located within the same 64kb segment. + * A simple way to ensure this is to allocate twice the + * size of the structure -- given that the structure is + * much less than 64 kB, at least one of the halves of + * the allocated area will be contained entirely in 64 kB. + * We also allocate extra space for a pointer to allow freeing + * this structure later on (in xtsonic_cleanup_module()). + */ + lp->descriptors = dma_alloc_coherent(lp->device, + SIZEOF_SONIC_DESC * + SONIC_BUS_SCALE(lp->dma_bitmode), + &lp->descriptors_laddr, + GFP_KERNEL); + if (lp->descriptors == NULL) { + err = -ENOMEM; + goto out; + } + + lp->cda = lp->descriptors; + lp->tda = lp->cda + (SIZEOF_SONIC_CDA + * SONIC_BUS_SCALE(lp->dma_bitmode)); + lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS + * SONIC_BUS_SCALE(lp->dma_bitmode)); + lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS + * SONIC_BUS_SCALE(lp->dma_bitmode)); + + /* get the virtual dma address */ + + lp->cda_laddr = lp->descriptors_laddr; + lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA + * SONIC_BUS_SCALE(lp->dma_bitmode)); + lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS + * SONIC_BUS_SCALE(lp->dma_bitmode)); + lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS + * SONIC_BUS_SCALE(lp->dma_bitmode)); + + dev->netdev_ops = &xtsonic_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + /* + * clear tally counter + */ + SONIC_WRITE(SONIC_CRCT,0xffff); + SONIC_WRITE(SONIC_FAET,0xffff); + SONIC_WRITE(SONIC_MPT,0xffff); + + return 0; +out: + release_region(dev->base_addr, SONIC_MEM_SIZE); + return err; +} + + +/* + * Probe for a SONIC ethernet controller on an XT2000 board. + * Actually probing is superfluous but we're paranoid. + */ + +int xtsonic_probe(struct platform_device *pdev) +{ + struct net_device *dev; + struct sonic_local *lp; + struct resource *resmem, *resirq; + int err = 0; + + if ((resmem = platform_get_resource(pdev, IORESOURCE_MEM, 0)) == NULL) + return -ENODEV; + + if ((resirq = platform_get_resource(pdev, IORESOURCE_IRQ, 0)) == NULL) + return -ENODEV; + + if ((dev = alloc_etherdev(sizeof(struct sonic_local))) == NULL) + return -ENOMEM; + + lp = netdev_priv(dev); + lp->device = &pdev->dev; + platform_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + netdev_boot_setup_check(dev); + + dev->base_addr = resmem->start; + dev->irq = resirq->start; + + if ((err = sonic_probe1(dev))) + goto out; + if ((err = register_netdev(dev))) + goto out1; + + printk("%s: SONIC ethernet @%08lx, MAC %pM, IRQ %d\n", dev->name, + dev->base_addr, dev->dev_addr, dev->irq); + + return 0; + +out1: + release_region(dev->base_addr, SONIC_MEM_SIZE); +out: + free_netdev(dev); + + return err; +} + +MODULE_DESCRIPTION("Xtensa XT2000 SONIC ethernet driver"); +module_param(sonic_debug, int, 0); +MODULE_PARM_DESC(sonic_debug, "xtsonic debug level (1-4)"); + +#include "sonic.c" + +static int xtsonic_device_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct sonic_local *lp = netdev_priv(dev); + + unregister_netdev(dev); + dma_free_coherent(lp->device, + SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), + lp->descriptors, lp->descriptors_laddr); + release_region (dev->base_addr, SONIC_MEM_SIZE); + free_netdev(dev); + + return 0; +} + +static struct platform_driver xtsonic_driver = { + .probe = xtsonic_probe, + .remove = xtsonic_device_remove, + .driver = { + .name = xtsonic_string, + }, +}; + +module_platform_driver(xtsonic_driver); diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig new file mode 100644 index 000000000..87abb4f10 --- /dev/null +++ b/drivers/net/ethernet/neterion/Kconfig @@ -0,0 +1,55 @@ +# +# Exar device configuration +# + +config NET_VENDOR_EXAR + bool "Exar devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say + Y and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Exar cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_EXAR + +config S2IO + tristate "Exar Xframe 10Gb Ethernet Adapter" + depends on PCI + ---help--- + This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters. + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called s2io. + +config VXGE + tristate "Exar X3100 Series 10GbE PCIe Server Adapter" + depends on PCI + ---help--- + This driver supports Exar Corp's X3100 Series 10 GbE PCIe + I/O Virtualized Server Adapter. + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called vxge. + +config VXGE_DEBUG_TRACE_ALL + bool "Enabling All Debug trace statements in driver" + default n + depends on VXGE + ---help--- + Say Y here if you want to enabling all the debug trace statements in + the vxge driver. By default only few debug trace statements are + enabled. + +endif # NET_VENDOR_EXAR diff --git a/drivers/net/ethernet/neterion/Makefile b/drivers/net/ethernet/neterion/Makefile new file mode 100644 index 000000000..70c8058a6 --- /dev/null +++ b/drivers/net/ethernet/neterion/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the Exar network device drivers. +# + +obj-$(CONFIG_S2IO) += s2io.o +obj-$(CONFIG_VXGE) += vxge/ diff --git a/drivers/net/ethernet/neterion/s2io-regs.h b/drivers/net/ethernet/neterion/s2io-regs.h new file mode 100644 index 000000000..3688325c1 --- /dev/null +++ b/drivers/net/ethernet/neterion/s2io-regs.h @@ -0,0 +1,958 @@ +/************************************************************************ + * regs.h: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC + * Copyright(c) 2002-2010 Exar Corp. + + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + ************************************************************************/ +#ifndef _REGS_H +#define _REGS_H + +#define TBD 0 + +struct XENA_dev_config { +/* Convention: mHAL_XXX is mask, vHAL_XXX is value */ + +/* General Control-Status Registers */ + u64 general_int_status; +#define GEN_INTR_TXPIC s2BIT(0) +#define GEN_INTR_TXDMA s2BIT(1) +#define GEN_INTR_TXMAC s2BIT(2) +#define GEN_INTR_TXXGXS s2BIT(3) +#define GEN_INTR_TXTRAFFIC s2BIT(8) +#define GEN_INTR_RXPIC s2BIT(32) +#define GEN_INTR_RXDMA s2BIT(33) +#define GEN_INTR_RXMAC s2BIT(34) +#define GEN_INTR_MC s2BIT(35) +#define GEN_INTR_RXXGXS s2BIT(36) +#define GEN_INTR_RXTRAFFIC s2BIT(40) +#define GEN_ERROR_INTR GEN_INTR_TXPIC | GEN_INTR_RXPIC | \ + GEN_INTR_TXDMA | GEN_INTR_RXDMA | \ + GEN_INTR_TXMAC | GEN_INTR_RXMAC | \ + GEN_INTR_TXXGXS| GEN_INTR_RXXGXS| \ + GEN_INTR_MC + + u64 general_int_mask; + + u8 unused0[0x100 - 0x10]; + + u64 sw_reset; +/* XGXS must be removed from reset only once. */ +#define SW_RESET_XENA vBIT(0xA5,0,8) +#define SW_RESET_FLASH vBIT(0xA5,8,8) +#define SW_RESET_EOI vBIT(0xA5,16,8) +#define SW_RESET_ALL (SW_RESET_XENA | \ + SW_RESET_FLASH | \ + SW_RESET_EOI) +/* The SW_RESET register must read this value after a successful reset. */ +#define SW_RESET_RAW_VAL 0xA5000000 + + + u64 adapter_status; +#define ADAPTER_STATUS_TDMA_READY s2BIT(0) +#define ADAPTER_STATUS_RDMA_READY s2BIT(1) +#define ADAPTER_STATUS_PFC_READY s2BIT(2) +#define ADAPTER_STATUS_TMAC_BUF_EMPTY s2BIT(3) +#define ADAPTER_STATUS_PIC_QUIESCENT s2BIT(5) +#define ADAPTER_STATUS_RMAC_REMOTE_FAULT s2BIT(6) +#define ADAPTER_STATUS_RMAC_LOCAL_FAULT s2BIT(7) +#define ADAPTER_STATUS_RMAC_PCC_IDLE vBIT(0xFF,8,8) +#define ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE vBIT(0x0F,8,8) +#define ADAPTER_STATUS_RC_PRC_QUIESCENT vBIT(0xFF,16,8) +#define ADAPTER_STATUS_MC_DRAM_READY s2BIT(24) +#define ADAPTER_STATUS_MC_QUEUES_READY s2BIT(25) +#define ADAPTER_STATUS_RIC_RUNNING s2BIT(26) +#define ADAPTER_STATUS_M_PLL_LOCK s2BIT(30) +#define ADAPTER_STATUS_P_PLL_LOCK s2BIT(31) + + u64 adapter_control; +#define ADAPTER_CNTL_EN s2BIT(7) +#define ADAPTER_EOI_TX_ON s2BIT(15) +#define ADAPTER_LED_ON s2BIT(23) +#define ADAPTER_UDPI(val) vBIT(val,36,4) +#define ADAPTER_WAIT_INT s2BIT(48) +#define ADAPTER_ECC_EN s2BIT(55) + + u64 serr_source; +#define SERR_SOURCE_PIC s2BIT(0) +#define SERR_SOURCE_TXDMA s2BIT(1) +#define SERR_SOURCE_RXDMA s2BIT(2) +#define SERR_SOURCE_MAC s2BIT(3) +#define SERR_SOURCE_MC s2BIT(4) +#define SERR_SOURCE_XGXS s2BIT(5) +#define SERR_SOURCE_ANY (SERR_SOURCE_PIC | \ + SERR_SOURCE_TXDMA | \ + SERR_SOURCE_RXDMA | \ + SERR_SOURCE_MAC | \ + SERR_SOURCE_MC | \ + SERR_SOURCE_XGXS) + + u64 pci_mode; +#define GET_PCI_MODE(val) ((val & vBIT(0xF, 0, 4)) >> 60) +#define PCI_MODE_PCI_33 0 +#define PCI_MODE_PCI_66 0x1 +#define PCI_MODE_PCIX_M1_66 0x2 +#define PCI_MODE_PCIX_M1_100 0x3 +#define PCI_MODE_PCIX_M1_133 0x4 +#define PCI_MODE_PCIX_M2_66 0x5 +#define PCI_MODE_PCIX_M2_100 0x6 +#define PCI_MODE_PCIX_M2_133 0x7 +#define PCI_MODE_UNSUPPORTED s2BIT(0) +#define PCI_MODE_32_BITS s2BIT(8) +#define PCI_MODE_UNKNOWN_MODE s2BIT(9) + + u8 unused_0[0x800 - 0x128]; + +/* PCI-X Controller registers */ + u64 pic_int_status; + u64 pic_int_mask; +#define PIC_INT_TX s2BIT(0) +#define PIC_INT_FLSH s2BIT(1) +#define PIC_INT_MDIO s2BIT(2) +#define PIC_INT_IIC s2BIT(3) +#define PIC_INT_GPIO s2BIT(4) +#define PIC_INT_RX s2BIT(32) + + u64 txpic_int_reg; + u64 txpic_int_mask; +#define PCIX_INT_REG_ECC_SG_ERR s2BIT(0) +#define PCIX_INT_REG_ECC_DB_ERR s2BIT(1) +#define PCIX_INT_REG_FLASHR_R_FSM_ERR s2BIT(8) +#define PCIX_INT_REG_FLASHR_W_FSM_ERR s2BIT(9) +#define PCIX_INT_REG_INI_TX_FSM_SERR s2BIT(10) +#define PCIX_INT_REG_INI_TXO_FSM_ERR s2BIT(11) +#define PCIX_INT_REG_TRT_FSM_SERR s2BIT(13) +#define PCIX_INT_REG_SRT_FSM_SERR s2BIT(14) +#define PCIX_INT_REG_PIFR_FSM_SERR s2BIT(15) +#define PCIX_INT_REG_WRC_TX_SEND_FSM_SERR s2BIT(21) +#define PCIX_INT_REG_RRC_TX_REQ_FSM_SERR s2BIT(23) +#define PCIX_INT_REG_INI_RX_FSM_SERR s2BIT(48) +#define PCIX_INT_REG_RA_RX_FSM_SERR s2BIT(50) +/* +#define PCIX_INT_REG_WRC_RX_SEND_FSM_SERR s2BIT(52) +#define PCIX_INT_REG_RRC_RX_REQ_FSM_SERR s2BIT(54) +#define PCIX_INT_REG_RRC_RX_SPLIT_FSM_SERR s2BIT(58) +*/ + u64 txpic_alarms; + u64 rxpic_int_reg; + u64 rxpic_int_mask; + u64 rxpic_alarms; + + u64 flsh_int_reg; + u64 flsh_int_mask; +#define PIC_FLSH_INT_REG_CYCLE_FSM_ERR s2BIT(63) +#define PIC_FLSH_INT_REG_ERR s2BIT(62) + u64 flash_alarms; + + u64 mdio_int_reg; + u64 mdio_int_mask; +#define MDIO_INT_REG_MDIO_BUS_ERR s2BIT(0) +#define MDIO_INT_REG_DTX_BUS_ERR s2BIT(8) +#define MDIO_INT_REG_LASI s2BIT(39) + u64 mdio_alarms; + + u64 iic_int_reg; + u64 iic_int_mask; +#define IIC_INT_REG_BUS_FSM_ERR s2BIT(4) +#define IIC_INT_REG_BIT_FSM_ERR s2BIT(5) +#define IIC_INT_REG_CYCLE_FSM_ERR s2BIT(6) +#define IIC_INT_REG_REQ_FSM_ERR s2BIT(7) +#define IIC_INT_REG_ACK_ERR s2BIT(8) + u64 iic_alarms; + + u8 unused4[0x08]; + + u64 gpio_int_reg; +#define GPIO_INT_REG_DP_ERR_INT s2BIT(0) +#define GPIO_INT_REG_LINK_DOWN s2BIT(1) +#define GPIO_INT_REG_LINK_UP s2BIT(2) + u64 gpio_int_mask; +#define GPIO_INT_MASK_LINK_DOWN s2BIT(1) +#define GPIO_INT_MASK_LINK_UP s2BIT(2) + u64 gpio_alarms; + + u8 unused5[0x38]; + + u64 tx_traffic_int; +#define TX_TRAFFIC_INT_n(n) s2BIT(n) + u64 tx_traffic_mask; + + u64 rx_traffic_int; +#define RX_TRAFFIC_INT_n(n) s2BIT(n) + u64 rx_traffic_mask; + +/* PIC Control registers */ + u64 pic_control; +#define PIC_CNTL_RX_ALARM_MAP_1 s2BIT(0) +#define PIC_CNTL_SHARED_SPLITS(n) vBIT(n,11,5) + + u64 swapper_ctrl; +#define SWAPPER_CTRL_PIF_R_FE s2BIT(0) +#define SWAPPER_CTRL_PIF_R_SE s2BIT(1) +#define SWAPPER_CTRL_PIF_W_FE s2BIT(8) +#define SWAPPER_CTRL_PIF_W_SE s2BIT(9) +#define SWAPPER_CTRL_TXP_FE s2BIT(16) +#define SWAPPER_CTRL_TXP_SE s2BIT(17) +#define SWAPPER_CTRL_TXD_R_FE s2BIT(18) +#define SWAPPER_CTRL_TXD_R_SE s2BIT(19) +#define SWAPPER_CTRL_TXD_W_FE s2BIT(20) +#define SWAPPER_CTRL_TXD_W_SE s2BIT(21) +#define SWAPPER_CTRL_TXF_R_FE s2BIT(22) +#define SWAPPER_CTRL_TXF_R_SE s2BIT(23) +#define SWAPPER_CTRL_RXD_R_FE s2BIT(32) +#define SWAPPER_CTRL_RXD_R_SE s2BIT(33) +#define SWAPPER_CTRL_RXD_W_FE s2BIT(34) +#define SWAPPER_CTRL_RXD_W_SE s2BIT(35) +#define SWAPPER_CTRL_RXF_W_FE s2BIT(36) +#define SWAPPER_CTRL_RXF_W_SE s2BIT(37) +#define SWAPPER_CTRL_XMSI_FE s2BIT(40) +#define SWAPPER_CTRL_XMSI_SE s2BIT(41) +#define SWAPPER_CTRL_STATS_FE s2BIT(48) +#define SWAPPER_CTRL_STATS_SE s2BIT(49) + + u64 pif_rd_swapper_fb; +#define IF_RD_SWAPPER_FB 0x0123456789ABCDEF + + u64 scheduled_int_ctrl; +#define SCHED_INT_CTRL_TIMER_EN s2BIT(0) +#define SCHED_INT_CTRL_ONE_SHOT s2BIT(1) +#define SCHED_INT_CTRL_INT2MSI(val) vBIT(val,10,6) +#define SCHED_INT_PERIOD TBD + + u64 txreqtimeout; +#define TXREQTO_VAL(val) vBIT(val,0,32) +#define TXREQTO_EN s2BIT(63) + + u64 statsreqtimeout; +#define STATREQTO_VAL(n) TBD +#define STATREQTO_EN s2BIT(63) + + u64 read_retry_delay; + u64 read_retry_acceleration; + u64 write_retry_delay; + u64 write_retry_acceleration; + + u64 xmsi_control; + u64 xmsi_access; + u64 xmsi_address; + u64 xmsi_data; + + u64 rx_mat; +#define RX_MAT_SET(ring, msi) vBIT(msi, (8 * ring), 8) + + u8 unused6[0x8]; + + u64 tx_mat0_n[0x8]; +#define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8) + + u64 xmsi_mask_reg; + u64 stat_byte_cnt; +#define STAT_BC(n) vBIT(n,4,12) + + /* Automated statistics collection */ + u64 stat_cfg; +#define STAT_CFG_STAT_EN s2BIT(0) +#define STAT_CFG_ONE_SHOT_EN s2BIT(1) +#define STAT_CFG_STAT_NS_EN s2BIT(8) +#define STAT_CFG_STAT_RO s2BIT(9) +#define STAT_TRSF_PER(n) TBD +#define PER_SEC 0x208d5 +#define SET_UPDT_PERIOD(n) vBIT((PER_SEC*n),32,32) +#define SET_UPDT_CLICKS(val) vBIT(val, 32, 32) + + u64 stat_addr; + + /* General Configuration */ + u64 mdio_control; +#define MDIO_MMD_INDX_ADDR(val) vBIT(val, 0, 16) +#define MDIO_MMD_DEV_ADDR(val) vBIT(val, 19, 5) +#define MDIO_MMS_PRT_ADDR(val) vBIT(val, 27, 5) +#define MDIO_CTRL_START_TRANS(val) vBIT(val, 56, 4) +#define MDIO_OP(val) vBIT(val, 60, 2) +#define MDIO_OP_ADDR_TRANS 0x0 +#define MDIO_OP_WRITE_TRANS 0x1 +#define MDIO_OP_READ_POST_INC_TRANS 0x2 +#define MDIO_OP_READ_TRANS 0x3 +#define MDIO_MDIO_DATA(val) vBIT(val, 32, 16) + + u64 dtx_control; + + u64 i2c_control; +#define I2C_CONTROL_DEV_ID(id) vBIT(id,1,3) +#define I2C_CONTROL_ADDR(addr) vBIT(addr,5,11) +#define I2C_CONTROL_BYTE_CNT(cnt) vBIT(cnt,22,2) +#define I2C_CONTROL_READ s2BIT(24) +#define I2C_CONTROL_NACK s2BIT(25) +#define I2C_CONTROL_CNTL_START vBIT(0xE,28,4) +#define I2C_CONTROL_CNTL_END(val) (val & vBIT(0x1,28,4)) +#define I2C_CONTROL_GET_DATA(val) (u32)(val & 0xFFFFFFFF) +#define I2C_CONTROL_SET_DATA(val) vBIT(val,32,32) + + u64 gpio_control; +#define GPIO_CTRL_GPIO_0 s2BIT(8) + u64 misc_control; +#define FAULT_BEHAVIOUR s2BIT(0) +#define EXT_REQ_EN s2BIT(1) +#define MISC_LINK_STABILITY_PRD(val) vBIT(val,29,3) + + u8 unused7_1[0x230 - 0x208]; + + u64 pic_control2; + u64 ini_dperr_ctrl; + + u64 wreq_split_mask; +#define WREQ_SPLIT_MASK_SET_MASK(val) vBIT(val, 52, 12) + + u8 unused7_2[0x800 - 0x248]; + +/* TxDMA registers */ + u64 txdma_int_status; + u64 txdma_int_mask; +#define TXDMA_PFC_INT s2BIT(0) +#define TXDMA_TDA_INT s2BIT(1) +#define TXDMA_PCC_INT s2BIT(2) +#define TXDMA_TTI_INT s2BIT(3) +#define TXDMA_LSO_INT s2BIT(4) +#define TXDMA_TPA_INT s2BIT(5) +#define TXDMA_SM_INT s2BIT(6) + u64 pfc_err_reg; +#define PFC_ECC_SG_ERR s2BIT(7) +#define PFC_ECC_DB_ERR s2BIT(15) +#define PFC_SM_ERR_ALARM s2BIT(23) +#define PFC_MISC_0_ERR s2BIT(31) +#define PFC_MISC_1_ERR s2BIT(32) +#define PFC_PCIX_ERR s2BIT(39) + u64 pfc_err_mask; + u64 pfc_err_alarm; + + u64 tda_err_reg; +#define TDA_Fn_ECC_SG_ERR vBIT(0xff,0,8) +#define TDA_Fn_ECC_DB_ERR vBIT(0xff,8,8) +#define TDA_SM0_ERR_ALARM s2BIT(22) +#define TDA_SM1_ERR_ALARM s2BIT(23) +#define TDA_PCIX_ERR s2BIT(39) + u64 tda_err_mask; + u64 tda_err_alarm; + + u64 pcc_err_reg; +#define PCC_FB_ECC_SG_ERR vBIT(0xFF,0,8) +#define PCC_TXB_ECC_SG_ERR vBIT(0xFF,8,8) +#define PCC_FB_ECC_DB_ERR vBIT(0xFF,16, 8) +#define PCC_TXB_ECC_DB_ERR vBIT(0xff,24,8) +#define PCC_SM_ERR_ALARM vBIT(0xff,32,8) +#define PCC_WR_ERR_ALARM vBIT(0xff,40,8) +#define PCC_N_SERR vBIT(0xff,48,8) +#define PCC_6_COF_OV_ERR s2BIT(56) +#define PCC_7_COF_OV_ERR s2BIT(57) +#define PCC_6_LSO_OV_ERR s2BIT(58) +#define PCC_7_LSO_OV_ERR s2BIT(59) +#define PCC_ENABLE_FOUR vBIT(0x0F,0,8) + u64 pcc_err_mask; + u64 pcc_err_alarm; + + u64 tti_err_reg; +#define TTI_ECC_SG_ERR s2BIT(7) +#define TTI_ECC_DB_ERR s2BIT(15) +#define TTI_SM_ERR_ALARM s2BIT(23) + u64 tti_err_mask; + u64 tti_err_alarm; + + u64 lso_err_reg; +#define LSO6_SEND_OFLOW s2BIT(12) +#define LSO7_SEND_OFLOW s2BIT(13) +#define LSO6_ABORT s2BIT(14) +#define LSO7_ABORT s2BIT(15) +#define LSO6_SM_ERR_ALARM s2BIT(22) +#define LSO7_SM_ERR_ALARM s2BIT(23) + u64 lso_err_mask; + u64 lso_err_alarm; + + u64 tpa_err_reg; +#define TPA_TX_FRM_DROP s2BIT(7) +#define TPA_SM_ERR_ALARM s2BIT(23) + + u64 tpa_err_mask; + u64 tpa_err_alarm; + + u64 sm_err_reg; +#define SM_SM_ERR_ALARM s2BIT(15) + u64 sm_err_mask; + u64 sm_err_alarm; + + u8 unused8[0x100 - 0xB8]; + +/* TxDMA arbiter */ + u64 tx_dma_wrap_stat; + +/* Tx FIFO controller */ +#define X_MAX_FIFOS 8 +#define X_FIFO_MAX_LEN 0x1FFF /*8191 */ + u64 tx_fifo_partition_0; +#define TX_FIFO_PARTITION_EN s2BIT(0) +#define TX_FIFO_PARTITION_0_PRI(val) vBIT(val,5,3) +#define TX_FIFO_PARTITION_0_LEN(val) vBIT(val,19,13) +#define TX_FIFO_PARTITION_1_PRI(val) vBIT(val,37,3) +#define TX_FIFO_PARTITION_1_LEN(val) vBIT(val,51,13 ) + + u64 tx_fifo_partition_1; +#define TX_FIFO_PARTITION_2_PRI(val) vBIT(val,5,3) +#define TX_FIFO_PARTITION_2_LEN(val) vBIT(val,19,13) +#define TX_FIFO_PARTITION_3_PRI(val) vBIT(val,37,3) +#define TX_FIFO_PARTITION_3_LEN(val) vBIT(val,51,13) + + u64 tx_fifo_partition_2; +#define TX_FIFO_PARTITION_4_PRI(val) vBIT(val,5,3) +#define TX_FIFO_PARTITION_4_LEN(val) vBIT(val,19,13) +#define TX_FIFO_PARTITION_5_PRI(val) vBIT(val,37,3) +#define TX_FIFO_PARTITION_5_LEN(val) vBIT(val,51,13) + + u64 tx_fifo_partition_3; +#define TX_FIFO_PARTITION_6_PRI(val) vBIT(val,5,3) +#define TX_FIFO_PARTITION_6_LEN(val) vBIT(val,19,13) +#define TX_FIFO_PARTITION_7_PRI(val) vBIT(val,37,3) +#define TX_FIFO_PARTITION_7_LEN(val) vBIT(val,51,13) + +#define TX_FIFO_PARTITION_PRI_0 0 /* highest */ +#define TX_FIFO_PARTITION_PRI_1 1 +#define TX_FIFO_PARTITION_PRI_2 2 +#define TX_FIFO_PARTITION_PRI_3 3 +#define TX_FIFO_PARTITION_PRI_4 4 +#define TX_FIFO_PARTITION_PRI_5 5 +#define TX_FIFO_PARTITION_PRI_6 6 +#define TX_FIFO_PARTITION_PRI_7 7 /* lowest */ + + u64 tx_w_round_robin_0; + u64 tx_w_round_robin_1; + u64 tx_w_round_robin_2; + u64 tx_w_round_robin_3; + u64 tx_w_round_robin_4; + + u64 tti_command_mem; +#define TTI_CMD_MEM_WE s2BIT(7) +#define TTI_CMD_MEM_STROBE_NEW_CMD s2BIT(15) +#define TTI_CMD_MEM_STROBE_BEING_EXECUTED s2BIT(15) +#define TTI_CMD_MEM_OFFSET(n) vBIT(n,26,6) + + u64 tti_data1_mem; +#define TTI_DATA1_MEM_TX_TIMER_VAL(n) vBIT(n,6,26) +#define TTI_DATA1_MEM_TX_TIMER_AC_CI(n) vBIT(n,38,2) +#define TTI_DATA1_MEM_TX_TIMER_AC_EN s2BIT(38) +#define TTI_DATA1_MEM_TX_TIMER_CI_EN s2BIT(39) +#define TTI_DATA1_MEM_TX_URNG_A(n) vBIT(n,41,7) +#define TTI_DATA1_MEM_TX_URNG_B(n) vBIT(n,49,7) +#define TTI_DATA1_MEM_TX_URNG_C(n) vBIT(n,57,7) + + u64 tti_data2_mem; +#define TTI_DATA2_MEM_TX_UFC_A(n) vBIT(n,0,16) +#define TTI_DATA2_MEM_TX_UFC_B(n) vBIT(n,16,16) +#define TTI_DATA2_MEM_TX_UFC_C(n) vBIT(n,32,16) +#define TTI_DATA2_MEM_TX_UFC_D(n) vBIT(n,48,16) + +/* Tx Protocol assist */ + u64 tx_pa_cfg; +#define TX_PA_CFG_IGNORE_FRM_ERR s2BIT(1) +#define TX_PA_CFG_IGNORE_SNAP_OUI s2BIT(2) +#define TX_PA_CFG_IGNORE_LLC_CTRL s2BIT(3) +#define TX_PA_CFG_IGNORE_L2_ERR s2BIT(6) +#define RX_PA_CFG_STRIP_VLAN_TAG s2BIT(15) + +/* Recent add, used only debug purposes. */ + u64 pcc_enable; + + u8 unused9[0x700 - 0x178]; + + u64 txdma_debug_ctrl; + + u8 unused10[0x1800 - 0x1708]; + +/* RxDMA Registers */ + u64 rxdma_int_status; + u64 rxdma_int_mask; +#define RXDMA_INT_RC_INT_M s2BIT(0) +#define RXDMA_INT_RPA_INT_M s2BIT(1) +#define RXDMA_INT_RDA_INT_M s2BIT(2) +#define RXDMA_INT_RTI_INT_M s2BIT(3) + + u64 rda_err_reg; +#define RDA_RXDn_ECC_SG_ERR vBIT(0xFF,0,8) +#define RDA_RXDn_ECC_DB_ERR vBIT(0xFF,8,8) +#define RDA_FRM_ECC_SG_ERR s2BIT(23) +#define RDA_FRM_ECC_DB_N_AERR s2BIT(31) +#define RDA_SM1_ERR_ALARM s2BIT(38) +#define RDA_SM0_ERR_ALARM s2BIT(39) +#define RDA_MISC_ERR s2BIT(47) +#define RDA_PCIX_ERR s2BIT(55) +#define RDA_RXD_ECC_DB_SERR s2BIT(63) + u64 rda_err_mask; + u64 rda_err_alarm; + + u64 rc_err_reg; +#define RC_PRCn_ECC_SG_ERR vBIT(0xFF,0,8) +#define RC_PRCn_ECC_DB_ERR vBIT(0xFF,8,8) +#define RC_FTC_ECC_SG_ERR s2BIT(23) +#define RC_FTC_ECC_DB_ERR s2BIT(31) +#define RC_PRCn_SM_ERR_ALARM vBIT(0xFF,32,8) +#define RC_FTC_SM_ERR_ALARM s2BIT(47) +#define RC_RDA_FAIL_WR_Rn vBIT(0xFF,48,8) + u64 rc_err_mask; + u64 rc_err_alarm; + + u64 prc_pcix_err_reg; +#define PRC_PCI_AB_RD_Rn vBIT(0xFF,0,8) +#define PRC_PCI_DP_RD_Rn vBIT(0xFF,8,8) +#define PRC_PCI_AB_WR_Rn vBIT(0xFF,16,8) +#define PRC_PCI_DP_WR_Rn vBIT(0xFF,24,8) +#define PRC_PCI_AB_F_WR_Rn vBIT(0xFF,32,8) +#define PRC_PCI_DP_F_WR_Rn vBIT(0xFF,40,8) + u64 prc_pcix_err_mask; + u64 prc_pcix_err_alarm; + + u64 rpa_err_reg; +#define RPA_ECC_SG_ERR s2BIT(7) +#define RPA_ECC_DB_ERR s2BIT(15) +#define RPA_FLUSH_REQUEST s2BIT(22) +#define RPA_SM_ERR_ALARM s2BIT(23) +#define RPA_CREDIT_ERR s2BIT(31) + u64 rpa_err_mask; + u64 rpa_err_alarm; + + u64 rti_err_reg; +#define RTI_ECC_SG_ERR s2BIT(7) +#define RTI_ECC_DB_ERR s2BIT(15) +#define RTI_SM_ERR_ALARM s2BIT(23) + u64 rti_err_mask; + u64 rti_err_alarm; + + u8 unused11[0x100 - 0x88]; + +/* DMA arbiter */ + u64 rx_queue_priority; +#define RX_QUEUE_0_PRIORITY(val) vBIT(val,5,3) +#define RX_QUEUE_1_PRIORITY(val) vBIT(val,13,3) +#define RX_QUEUE_2_PRIORITY(val) vBIT(val,21,3) +#define RX_QUEUE_3_PRIORITY(val) vBIT(val,29,3) +#define RX_QUEUE_4_PRIORITY(val) vBIT(val,37,3) +#define RX_QUEUE_5_PRIORITY(val) vBIT(val,45,3) +#define RX_QUEUE_6_PRIORITY(val) vBIT(val,53,3) +#define RX_QUEUE_7_PRIORITY(val) vBIT(val,61,3) + +#define RX_QUEUE_PRI_0 0 /* highest */ +#define RX_QUEUE_PRI_1 1 +#define RX_QUEUE_PRI_2 2 +#define RX_QUEUE_PRI_3 3 +#define RX_QUEUE_PRI_4 4 +#define RX_QUEUE_PRI_5 5 +#define RX_QUEUE_PRI_6 6 +#define RX_QUEUE_PRI_7 7 /* lowest */ + + u64 rx_w_round_robin_0; + u64 rx_w_round_robin_1; + u64 rx_w_round_robin_2; + u64 rx_w_round_robin_3; + u64 rx_w_round_robin_4; + + /* Per-ring controller regs */ +#define RX_MAX_RINGS 8 +#if 0 +#define RX_MAX_RINGS_SZ 0xFFFF /* 65536 */ +#define RX_MIN_RINGS_SZ 0x3F /* 63 */ +#endif + u64 prc_rxd0_n[RX_MAX_RINGS]; + u64 prc_ctrl_n[RX_MAX_RINGS]; +#define PRC_CTRL_RC_ENABLED s2BIT(7) +#define PRC_CTRL_RING_MODE (s2BIT(14)|s2BIT(15)) +#define PRC_CTRL_RING_MODE_1 vBIT(0,14,2) +#define PRC_CTRL_RING_MODE_3 vBIT(1,14,2) +#define PRC_CTRL_RING_MODE_5 vBIT(2,14,2) +#define PRC_CTRL_RING_MODE_x vBIT(3,14,2) +#define PRC_CTRL_NO_SNOOP (s2BIT(22)|s2BIT(23)) +#define PRC_CTRL_NO_SNOOP_DESC s2BIT(22) +#define PRC_CTRL_NO_SNOOP_BUFF s2BIT(23) +#define PRC_CTRL_BIMODAL_INTERRUPT s2BIT(37) +#define PRC_CTRL_GROUP_READS s2BIT(38) +#define PRC_CTRL_RXD_BACKOFF_INTERVAL(val) vBIT(val,40,24) + + u64 prc_alarm_action; +#define PRC_ALARM_ACTION_RR_R0_STOP s2BIT(3) +#define PRC_ALARM_ACTION_RW_R0_STOP s2BIT(7) +#define PRC_ALARM_ACTION_RR_R1_STOP s2BIT(11) +#define PRC_ALARM_ACTION_RW_R1_STOP s2BIT(15) +#define PRC_ALARM_ACTION_RR_R2_STOP s2BIT(19) +#define PRC_ALARM_ACTION_RW_R2_STOP s2BIT(23) +#define PRC_ALARM_ACTION_RR_R3_STOP s2BIT(27) +#define PRC_ALARM_ACTION_RW_R3_STOP s2BIT(31) +#define PRC_ALARM_ACTION_RR_R4_STOP s2BIT(35) +#define PRC_ALARM_ACTION_RW_R4_STOP s2BIT(39) +#define PRC_ALARM_ACTION_RR_R5_STOP s2BIT(43) +#define PRC_ALARM_ACTION_RW_R5_STOP s2BIT(47) +#define PRC_ALARM_ACTION_RR_R6_STOP s2BIT(51) +#define PRC_ALARM_ACTION_RW_R6_STOP s2BIT(55) +#define PRC_ALARM_ACTION_RR_R7_STOP s2BIT(59) +#define PRC_ALARM_ACTION_RW_R7_STOP s2BIT(63) + +/* Receive traffic interrupts */ + u64 rti_command_mem; +#define RTI_CMD_MEM_WE s2BIT(7) +#define RTI_CMD_MEM_STROBE s2BIT(15) +#define RTI_CMD_MEM_STROBE_NEW_CMD s2BIT(15) +#define RTI_CMD_MEM_STROBE_CMD_BEING_EXECUTED s2BIT(15) +#define RTI_CMD_MEM_OFFSET(n) vBIT(n,29,3) + + u64 rti_data1_mem; +#define RTI_DATA1_MEM_RX_TIMER_VAL(n) vBIT(n,3,29) +#define RTI_DATA1_MEM_RX_TIMER_AC_EN s2BIT(38) +#define RTI_DATA1_MEM_RX_TIMER_CI_EN s2BIT(39) +#define RTI_DATA1_MEM_RX_URNG_A(n) vBIT(n,41,7) +#define RTI_DATA1_MEM_RX_URNG_B(n) vBIT(n,49,7) +#define RTI_DATA1_MEM_RX_URNG_C(n) vBIT(n,57,7) + + u64 rti_data2_mem; +#define RTI_DATA2_MEM_RX_UFC_A(n) vBIT(n,0,16) +#define RTI_DATA2_MEM_RX_UFC_B(n) vBIT(n,16,16) +#define RTI_DATA2_MEM_RX_UFC_C(n) vBIT(n,32,16) +#define RTI_DATA2_MEM_RX_UFC_D(n) vBIT(n,48,16) + + u64 rx_pa_cfg; +#define RX_PA_CFG_IGNORE_FRM_ERR s2BIT(1) +#define RX_PA_CFG_IGNORE_SNAP_OUI s2BIT(2) +#define RX_PA_CFG_IGNORE_LLC_CTRL s2BIT(3) +#define RX_PA_CFG_IGNORE_L2_ERR s2BIT(6) + + u64 unused_11_1; + + u64 ring_bump_counter1; + u64 ring_bump_counter2; + + u8 unused12[0x700 - 0x1F0]; + + u64 rxdma_debug_ctrl; + + u8 unused13[0x2000 - 0x1f08]; + +/* Media Access Controller Register */ + u64 mac_int_status; + u64 mac_int_mask; +#define MAC_INT_STATUS_TMAC_INT s2BIT(0) +#define MAC_INT_STATUS_RMAC_INT s2BIT(1) + + u64 mac_tmac_err_reg; +#define TMAC_ECC_SG_ERR s2BIT(7) +#define TMAC_ECC_DB_ERR s2BIT(15) +#define TMAC_TX_BUF_OVRN s2BIT(23) +#define TMAC_TX_CRI_ERR s2BIT(31) +#define TMAC_TX_SM_ERR s2BIT(39) +#define TMAC_DESC_ECC_SG_ERR s2BIT(47) +#define TMAC_DESC_ECC_DB_ERR s2BIT(55) + + u64 mac_tmac_err_mask; + u64 mac_tmac_err_alarm; + + u64 mac_rmac_err_reg; +#define RMAC_RX_BUFF_OVRN s2BIT(0) +#define RMAC_FRM_RCVD_INT s2BIT(1) +#define RMAC_UNUSED_INT s2BIT(2) +#define RMAC_RTS_PNUM_ECC_SG_ERR s2BIT(5) +#define RMAC_RTS_DS_ECC_SG_ERR s2BIT(6) +#define RMAC_RD_BUF_ECC_SG_ERR s2BIT(7) +#define RMAC_RTH_MAP_ECC_SG_ERR s2BIT(8) +#define RMAC_RTH_SPDM_ECC_SG_ERR s2BIT(9) +#define RMAC_RTS_VID_ECC_SG_ERR s2BIT(10) +#define RMAC_DA_SHADOW_ECC_SG_ERR s2BIT(11) +#define RMAC_RTS_PNUM_ECC_DB_ERR s2BIT(13) +#define RMAC_RTS_DS_ECC_DB_ERR s2BIT(14) +#define RMAC_RD_BUF_ECC_DB_ERR s2BIT(15) +#define RMAC_RTH_MAP_ECC_DB_ERR s2BIT(16) +#define RMAC_RTH_SPDM_ECC_DB_ERR s2BIT(17) +#define RMAC_RTS_VID_ECC_DB_ERR s2BIT(18) +#define RMAC_DA_SHADOW_ECC_DB_ERR s2BIT(19) +#define RMAC_LINK_STATE_CHANGE_INT s2BIT(31) +#define RMAC_RX_SM_ERR s2BIT(39) +#define RMAC_SINGLE_ECC_ERR (s2BIT(5) | s2BIT(6) | s2BIT(7) |\ + s2BIT(8) | s2BIT(9) | s2BIT(10)|\ + s2BIT(11)) +#define RMAC_DOUBLE_ECC_ERR (s2BIT(13) | s2BIT(14) | s2BIT(15) |\ + s2BIT(16) | s2BIT(17) | s2BIT(18)|\ + s2BIT(19)) + u64 mac_rmac_err_mask; + u64 mac_rmac_err_alarm; + + u8 unused14[0x100 - 0x40]; + + u64 mac_cfg; +#define MAC_CFG_TMAC_ENABLE s2BIT(0) +#define MAC_CFG_RMAC_ENABLE s2BIT(1) +#define MAC_CFG_LAN_NOT_WAN s2BIT(2) +#define MAC_CFG_TMAC_LOOPBACK s2BIT(3) +#define MAC_CFG_TMAC_APPEND_PAD s2BIT(4) +#define MAC_CFG_RMAC_STRIP_FCS s2BIT(5) +#define MAC_CFG_RMAC_STRIP_PAD s2BIT(6) +#define MAC_CFG_RMAC_PROM_ENABLE s2BIT(7) +#define MAC_RMAC_DISCARD_PFRM s2BIT(8) +#define MAC_RMAC_BCAST_ENABLE s2BIT(9) +#define MAC_RMAC_ALL_ADDR_ENABLE s2BIT(10) +#define MAC_RMAC_INVLD_IPG_THR(val) vBIT(val,16,8) + + u64 tmac_avg_ipg; +#define TMAC_AVG_IPG(val) vBIT(val,0,8) + + u64 rmac_max_pyld_len; +#define RMAC_MAX_PYLD_LEN(val) vBIT(val,2,14) +#define RMAC_MAX_PYLD_LEN_DEF vBIT(1500,2,14) +#define RMAC_MAX_PYLD_LEN_JUMBO_DEF vBIT(9600,2,14) + + u64 rmac_err_cfg; +#define RMAC_ERR_FCS s2BIT(0) +#define RMAC_ERR_FCS_ACCEPT s2BIT(1) +#define RMAC_ERR_TOO_LONG s2BIT(1) +#define RMAC_ERR_TOO_LONG_ACCEPT s2BIT(1) +#define RMAC_ERR_RUNT s2BIT(2) +#define RMAC_ERR_RUNT_ACCEPT s2BIT(2) +#define RMAC_ERR_LEN_MISMATCH s2BIT(3) +#define RMAC_ERR_LEN_MISMATCH_ACCEPT s2BIT(3) + + u64 rmac_cfg_key; +#define RMAC_CFG_KEY(val) vBIT(val,0,16) + +#define S2IO_MAC_ADDR_START_OFFSET 0 + +#define S2IO_XENA_MAX_MC_ADDRESSES 64 /* multicast addresses */ +#define S2IO_HERC_MAX_MC_ADDRESSES 256 + +#define S2IO_XENA_MAX_MAC_ADDRESSES 16 +#define S2IO_HERC_MAX_MAC_ADDRESSES 64 + +#define S2IO_XENA_MC_ADDR_START_OFFSET 16 +#define S2IO_HERC_MC_ADDR_START_OFFSET 64 + + u64 rmac_addr_cmd_mem; +#define RMAC_ADDR_CMD_MEM_WE s2BIT(7) +#define RMAC_ADDR_CMD_MEM_RD 0 +#define RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD s2BIT(15) +#define RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING s2BIT(15) +#define RMAC_ADDR_CMD_MEM_OFFSET(n) vBIT(n,26,6) + + u64 rmac_addr_data0_mem; +#define RMAC_ADDR_DATA0_MEM_ADDR(n) vBIT(n,0,48) +#define RMAC_ADDR_DATA0_MEM_USER s2BIT(48) + + u64 rmac_addr_data1_mem; +#define RMAC_ADDR_DATA1_MEM_MASK(n) vBIT(n,0,48) + + u8 unused15[0x8]; + +/* + u64 rmac_addr_cfg; +#define RMAC_ADDR_UCASTn_EN(n) mBIT(0)_n(n) +#define RMAC_ADDR_MCASTn_EN(n) mBIT(0)_n(n) +#define RMAC_ADDR_BCAST_EN vBIT(0)_48 +#define RMAC_ADDR_ALL_ADDR_EN vBIT(0)_49 +*/ + u64 tmac_ipg_cfg; + + u64 rmac_pause_cfg; +#define RMAC_PAUSE_GEN s2BIT(0) +#define RMAC_PAUSE_GEN_ENABLE s2BIT(0) +#define RMAC_PAUSE_RX s2BIT(1) +#define RMAC_PAUSE_RX_ENABLE s2BIT(1) +#define RMAC_PAUSE_HG_PTIME_DEF vBIT(0xFFFF,16,16) +#define RMAC_PAUSE_HG_PTIME(val) vBIT(val,16,16) + + u64 rmac_red_cfg; + + u64 rmac_red_rate_q0q3; + u64 rmac_red_rate_q4q7; + + u64 mac_link_util; +#define MAC_TX_LINK_UTIL vBIT(0xFE,1,7) +#define MAC_TX_LINK_UTIL_DISABLE vBIT(0xF, 8,4) +#define MAC_TX_LINK_UTIL_VAL( n ) vBIT(n,8,4) +#define MAC_RX_LINK_UTIL vBIT(0xFE,33,7) +#define MAC_RX_LINK_UTIL_DISABLE vBIT(0xF,40,4) +#define MAC_RX_LINK_UTIL_VAL( n ) vBIT(n,40,4) + +#define MAC_LINK_UTIL_DISABLE MAC_TX_LINK_UTIL_DISABLE | \ + MAC_RX_LINK_UTIL_DISABLE + + u64 rmac_invalid_ipg; + +/* rx traffic steering */ +#define MAC_RTS_FRM_LEN_SET(len) vBIT(len,2,14) + u64 rts_frm_len_n[8]; + + u64 rts_qos_steering; + +#define MAX_DIX_MAP 4 + u64 rts_dix_map_n[MAX_DIX_MAP]; +#define RTS_DIX_MAP_ETYPE(val) vBIT(val,0,16) +#define RTS_DIX_MAP_SCW(val) s2BIT(val,21) + + u64 rts_q_alternates; + u64 rts_default_q; + + u64 rts_ctrl; +#define RTS_CTRL_IGNORE_SNAP_OUI s2BIT(2) +#define RTS_CTRL_IGNORE_LLC_CTRL s2BIT(3) + + u64 rts_pn_cam_ctrl; +#define RTS_PN_CAM_CTRL_WE s2BIT(7) +#define RTS_PN_CAM_CTRL_STROBE_NEW_CMD s2BIT(15) +#define RTS_PN_CAM_CTRL_STROBE_BEING_EXECUTED s2BIT(15) +#define RTS_PN_CAM_CTRL_OFFSET(n) vBIT(n,24,8) + u64 rts_pn_cam_data; +#define RTS_PN_CAM_DATA_TCP_SELECT s2BIT(7) +#define RTS_PN_CAM_DATA_PORT(val) vBIT(val,8,16) +#define RTS_PN_CAM_DATA_SCW(val) vBIT(val,24,8) + + u64 rts_ds_mem_ctrl; +#define RTS_DS_MEM_CTRL_WE s2BIT(7) +#define RTS_DS_MEM_CTRL_STROBE_NEW_CMD s2BIT(15) +#define RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED s2BIT(15) +#define RTS_DS_MEM_CTRL_OFFSET(n) vBIT(n,26,6) + u64 rts_ds_mem_data; +#define RTS_DS_MEM_DATA(n) vBIT(n,0,8) + + u8 unused16[0x700 - 0x220]; + + u64 mac_debug_ctrl; +#define MAC_DBG_ACTIVITY_VALUE 0x411040400000000ULL + + u8 unused17[0x2800 - 0x2708]; + +/* memory controller registers */ + u64 mc_int_status; +#define MC_INT_STATUS_MC_INT s2BIT(0) + u64 mc_int_mask; +#define MC_INT_MASK_MC_INT s2BIT(0) + + u64 mc_err_reg; +#define MC_ERR_REG_ECC_DB_ERR_L s2BIT(14) +#define MC_ERR_REG_ECC_DB_ERR_U s2BIT(15) +#define MC_ERR_REG_MIRI_ECC_DB_ERR_0 s2BIT(18) +#define MC_ERR_REG_MIRI_ECC_DB_ERR_1 s2BIT(20) +#define MC_ERR_REG_MIRI_CRI_ERR_0 s2BIT(22) +#define MC_ERR_REG_MIRI_CRI_ERR_1 s2BIT(23) +#define MC_ERR_REG_SM_ERR s2BIT(31) +#define MC_ERR_REG_ECC_ALL_SNG (s2BIT(2) | s2BIT(3) | s2BIT(4) | s2BIT(5) |\ + s2BIT(17) | s2BIT(19)) +#define MC_ERR_REG_ECC_ALL_DBL (s2BIT(10) | s2BIT(11) | s2BIT(12) |\ + s2BIT(13) | s2BIT(18) | s2BIT(20)) +#define PLL_LOCK_N s2BIT(39) + u64 mc_err_mask; + u64 mc_err_alarm; + + u8 unused18[0x100 - 0x28]; + +/* MC configuration */ + u64 rx_queue_cfg; +#define RX_QUEUE_CFG_Q0_SZ(n) vBIT(n,0,8) +#define RX_QUEUE_CFG_Q1_SZ(n) vBIT(n,8,8) +#define RX_QUEUE_CFG_Q2_SZ(n) vBIT(n,16,8) +#define RX_QUEUE_CFG_Q3_SZ(n) vBIT(n,24,8) +#define RX_QUEUE_CFG_Q4_SZ(n) vBIT(n,32,8) +#define RX_QUEUE_CFG_Q5_SZ(n) vBIT(n,40,8) +#define RX_QUEUE_CFG_Q6_SZ(n) vBIT(n,48,8) +#define RX_QUEUE_CFG_Q7_SZ(n) vBIT(n,56,8) + + u64 mc_rldram_mrs; +#define MC_RLDRAM_QUEUE_SIZE_ENABLE s2BIT(39) +#define MC_RLDRAM_MRS_ENABLE s2BIT(47) + + u64 mc_rldram_interleave; + + u64 mc_pause_thresh_q0q3; + u64 mc_pause_thresh_q4q7; + + u64 mc_red_thresh_q[8]; + + u8 unused19[0x200 - 0x168]; + u64 mc_rldram_ref_per; + u8 unused20[0x220 - 0x208]; + u64 mc_rldram_test_ctrl; +#define MC_RLDRAM_TEST_MODE s2BIT(47) +#define MC_RLDRAM_TEST_WRITE s2BIT(7) +#define MC_RLDRAM_TEST_GO s2BIT(15) +#define MC_RLDRAM_TEST_DONE s2BIT(23) +#define MC_RLDRAM_TEST_PASS s2BIT(31) + + u8 unused21[0x240 - 0x228]; + u64 mc_rldram_test_add; + u8 unused22[0x260 - 0x248]; + u64 mc_rldram_test_d0; + u8 unused23[0x280 - 0x268]; + u64 mc_rldram_test_d1; + u8 unused24[0x300 - 0x288]; + u64 mc_rldram_test_d2; + + u8 unused24_1[0x360 - 0x308]; + u64 mc_rldram_ctrl; +#define MC_RLDRAM_ENABLE_ODT s2BIT(7) + + u8 unused24_2[0x640 - 0x368]; + u64 mc_rldram_ref_per_herc; +#define MC_RLDRAM_SET_REF_PERIOD(val) vBIT(val, 0, 16) + + u8 unused24_3[0x660 - 0x648]; + u64 mc_rldram_mrs_herc; + + u8 unused25[0x700 - 0x668]; + u64 mc_debug_ctrl; + + u8 unused26[0x3000 - 0x2f08]; + +/* XGXG */ + /* XGXS control registers */ + + u64 xgxs_int_status; +#define XGXS_INT_STATUS_TXGXS s2BIT(0) +#define XGXS_INT_STATUS_RXGXS s2BIT(1) + u64 xgxs_int_mask; +#define XGXS_INT_MASK_TXGXS s2BIT(0) +#define XGXS_INT_MASK_RXGXS s2BIT(1) + + u64 xgxs_txgxs_err_reg; +#define TXGXS_ECC_SG_ERR s2BIT(7) +#define TXGXS_ECC_DB_ERR s2BIT(15) +#define TXGXS_ESTORE_UFLOW s2BIT(31) +#define TXGXS_TX_SM_ERR s2BIT(39) + + u64 xgxs_txgxs_err_mask; + u64 xgxs_txgxs_err_alarm; + + u64 xgxs_rxgxs_err_reg; +#define RXGXS_ESTORE_OFLOW s2BIT(7) +#define RXGXS_RX_SM_ERR s2BIT(39) + u64 xgxs_rxgxs_err_mask; + u64 xgxs_rxgxs_err_alarm; + + u8 unused27[0x100 - 0x40]; + + u64 xgxs_cfg; + u64 xgxs_status; + + u64 xgxs_cfg_key; + u64 xgxs_efifo_cfg; /* CHANGED */ + u64 rxgxs_ber_0; /* CHANGED */ + u64 rxgxs_ber_1; /* CHANGED */ + + u64 spi_control; +#define SPI_CONTROL_KEY(key) vBIT(key,0,4) +#define SPI_CONTROL_BYTECNT(cnt) vBIT(cnt,29,3) +#define SPI_CONTROL_CMD(cmd) vBIT(cmd,32,8) +#define SPI_CONTROL_ADDR(addr) vBIT(addr,40,24) +#define SPI_CONTROL_SEL1 s2BIT(4) +#define SPI_CONTROL_REQ s2BIT(7) +#define SPI_CONTROL_NACK s2BIT(5) +#define SPI_CONTROL_DONE s2BIT(6) + u64 spi_data; +#define SPI_DATA_WRITE(data,len) vBIT(data,0,len) +}; + +#define XENA_REG_SPACE sizeof(struct XENA_dev_config) +#define XENA_EEPROM_SPACE (0x01 << 11) + +#endif /* _REGS_H */ diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c new file mode 100644 index 000000000..1e0f72b65 --- /dev/null +++ b/drivers/net/ethernet/neterion/s2io.c @@ -0,0 +1,8661 @@ +/************************************************************************ + * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC + * Copyright(c) 2002-2010 Exar Corp. + * + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + * + * Credits: + * Jeff Garzik : For pointing out the improper error condition + * check in the s2io_xmit routine and also some + * issues in the Tx watch dog function. Also for + * patiently answering all those innumerable + * questions regaring the 2.6 porting issues. + * Stephen Hemminger : Providing proper 2.6 porting mechanism for some + * macros available only in 2.6 Kernel. + * Francois Romieu : For pointing out all code part that were + * deprecated and also styling related comments. + * Grant Grundler : For helping me get rid of some Architecture + * dependent code. + * Christopher Hellwig : Some more 2.6 specific issues in the driver. + * + * The module loadable parameters that are supported by the driver and a brief + * explanation of all the variables. + * + * rx_ring_num : This can be used to program the number of receive rings used + * in the driver. + * rx_ring_sz: This defines the number of receive blocks each ring can have. + * This is also an array of size 8. + * rx_ring_mode: This defines the operation mode of all 8 rings. The valid + * values are 1, 2. + * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver. + * tx_fifo_len: This too is an array of 8. Each element defines the number of + * Tx descriptors that can be associated with each corresponding FIFO. + * intr_type: This defines the type of interrupt. The values can be 0(INTA), + * 2(MSI_X). Default value is '2(MSI_X)' + * lro_max_pkts: This parameter defines maximum number of packets can be + * aggregated as a single large packet + * napi: This parameter used to enable/disable NAPI (polling Rx) + * Possible values '1' for enable and '0' for disable. Default is '1' + * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO) + * Possible values '1' for enable and '0' for disable. Default is '0' + * vlan_tag_strip: This can be used to enable or disable vlan stripping. + * Possible values '1' for enable , '0' for disable. + * Default is '2' - which means disable in promisc mode + * and enable in non-promiscuous mode. + * multiq: This parameter used to enable/disable MULTIQUEUE support. + * Possible values '1' for enable and '0' for disable. Default is '0' + ************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* local include */ +#include "s2io.h" +#include "s2io-regs.h" + +#define DRV_VERSION "2.0.26.28" + +/* S2io Driver name & version. */ +static const char s2io_driver_name[] = "Neterion"; +static const char s2io_driver_version[] = DRV_VERSION; + +static const int rxd_size[2] = {32, 48}; +static const int rxd_count[2] = {127, 85}; + +static inline int RXD_IS_UP2DT(struct RxD_t *rxdp) +{ + int ret; + + ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) && + (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK)); + + return ret; +} + +/* + * Cards with following subsystem_id have a link state indication + * problem, 600B, 600C, 600D, 640B, 640C and 640D. + * macro below identifies these cards given the subsystem_id. + */ +#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \ + (dev_type == XFRAME_I_DEVICE) ? \ + ((((subid >= 0x600B) && (subid <= 0x600D)) || \ + ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0 + +#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \ + ADAPTER_STATUS_RMAC_LOCAL_FAULT))) + +static inline int is_s2io_card_up(const struct s2io_nic *sp) +{ + return test_bit(__S2IO_STATE_CARD_UP, &sp->state); +} + +/* Ethtool related variables and Macros. */ +static const char s2io_gstrings[][ETH_GSTRING_LEN] = { + "Register test\t(offline)", + "Eeprom test\t(offline)", + "Link test\t(online)", + "RLDRAM test\t(offline)", + "BIST Test\t(offline)" +}; + +static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = { + {"tmac_frms"}, + {"tmac_data_octets"}, + {"tmac_drop_frms"}, + {"tmac_mcst_frms"}, + {"tmac_bcst_frms"}, + {"tmac_pause_ctrl_frms"}, + {"tmac_ttl_octets"}, + {"tmac_ucst_frms"}, + {"tmac_nucst_frms"}, + {"tmac_any_err_frms"}, + {"tmac_ttl_less_fb_octets"}, + {"tmac_vld_ip_octets"}, + {"tmac_vld_ip"}, + {"tmac_drop_ip"}, + {"tmac_icmp"}, + {"tmac_rst_tcp"}, + {"tmac_tcp"}, + {"tmac_udp"}, + {"rmac_vld_frms"}, + {"rmac_data_octets"}, + {"rmac_fcs_err_frms"}, + {"rmac_drop_frms"}, + {"rmac_vld_mcst_frms"}, + {"rmac_vld_bcst_frms"}, + {"rmac_in_rng_len_err_frms"}, + {"rmac_out_rng_len_err_frms"}, + {"rmac_long_frms"}, + {"rmac_pause_ctrl_frms"}, + {"rmac_unsup_ctrl_frms"}, + {"rmac_ttl_octets"}, + {"rmac_accepted_ucst_frms"}, + {"rmac_accepted_nucst_frms"}, + {"rmac_discarded_frms"}, + {"rmac_drop_events"}, + {"rmac_ttl_less_fb_octets"}, + {"rmac_ttl_frms"}, + {"rmac_usized_frms"}, + {"rmac_osized_frms"}, + {"rmac_frag_frms"}, + {"rmac_jabber_frms"}, + {"rmac_ttl_64_frms"}, + {"rmac_ttl_65_127_frms"}, + {"rmac_ttl_128_255_frms"}, + {"rmac_ttl_256_511_frms"}, + {"rmac_ttl_512_1023_frms"}, + {"rmac_ttl_1024_1518_frms"}, + {"rmac_ip"}, + {"rmac_ip_octets"}, + {"rmac_hdr_err_ip"}, + {"rmac_drop_ip"}, + {"rmac_icmp"}, + {"rmac_tcp"}, + {"rmac_udp"}, + {"rmac_err_drp_udp"}, + {"rmac_xgmii_err_sym"}, + {"rmac_frms_q0"}, + {"rmac_frms_q1"}, + {"rmac_frms_q2"}, + {"rmac_frms_q3"}, + {"rmac_frms_q4"}, + {"rmac_frms_q5"}, + {"rmac_frms_q6"}, + {"rmac_frms_q7"}, + {"rmac_full_q0"}, + {"rmac_full_q1"}, + {"rmac_full_q2"}, + {"rmac_full_q3"}, + {"rmac_full_q4"}, + {"rmac_full_q5"}, + {"rmac_full_q6"}, + {"rmac_full_q7"}, + {"rmac_pause_cnt"}, + {"rmac_xgmii_data_err_cnt"}, + {"rmac_xgmii_ctrl_err_cnt"}, + {"rmac_accepted_ip"}, + {"rmac_err_tcp"}, + {"rd_req_cnt"}, + {"new_rd_req_cnt"}, + {"new_rd_req_rtry_cnt"}, + {"rd_rtry_cnt"}, + {"wr_rtry_rd_ack_cnt"}, + {"wr_req_cnt"}, + {"new_wr_req_cnt"}, + {"new_wr_req_rtry_cnt"}, + {"wr_rtry_cnt"}, + {"wr_disc_cnt"}, + {"rd_rtry_wr_ack_cnt"}, + {"txp_wr_cnt"}, + {"txd_rd_cnt"}, + {"txd_wr_cnt"}, + {"rxd_rd_cnt"}, + {"rxd_wr_cnt"}, + {"txf_rd_cnt"}, + {"rxf_wr_cnt"} +}; + +static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = { + {"rmac_ttl_1519_4095_frms"}, + {"rmac_ttl_4096_8191_frms"}, + {"rmac_ttl_8192_max_frms"}, + {"rmac_ttl_gt_max_frms"}, + {"rmac_osized_alt_frms"}, + {"rmac_jabber_alt_frms"}, + {"rmac_gt_max_alt_frms"}, + {"rmac_vlan_frms"}, + {"rmac_len_discard"}, + {"rmac_fcs_discard"}, + {"rmac_pf_discard"}, + {"rmac_da_discard"}, + {"rmac_red_discard"}, + {"rmac_rts_discard"}, + {"rmac_ingm_full_discard"}, + {"link_fault_cnt"} +}; + +static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = { + {"\n DRIVER STATISTICS"}, + {"single_bit_ecc_errs"}, + {"double_bit_ecc_errs"}, + {"parity_err_cnt"}, + {"serious_err_cnt"}, + {"soft_reset_cnt"}, + {"fifo_full_cnt"}, + {"ring_0_full_cnt"}, + {"ring_1_full_cnt"}, + {"ring_2_full_cnt"}, + {"ring_3_full_cnt"}, + {"ring_4_full_cnt"}, + {"ring_5_full_cnt"}, + {"ring_6_full_cnt"}, + {"ring_7_full_cnt"}, + {"alarm_transceiver_temp_high"}, + {"alarm_transceiver_temp_low"}, + {"alarm_laser_bias_current_high"}, + {"alarm_laser_bias_current_low"}, + {"alarm_laser_output_power_high"}, + {"alarm_laser_output_power_low"}, + {"warn_transceiver_temp_high"}, + {"warn_transceiver_temp_low"}, + {"warn_laser_bias_current_high"}, + {"warn_laser_bias_current_low"}, + {"warn_laser_output_power_high"}, + {"warn_laser_output_power_low"}, + {"lro_aggregated_pkts"}, + {"lro_flush_both_count"}, + {"lro_out_of_sequence_pkts"}, + {"lro_flush_due_to_max_pkts"}, + {"lro_avg_aggr_pkts"}, + {"mem_alloc_fail_cnt"}, + {"pci_map_fail_cnt"}, + {"watchdog_timer_cnt"}, + {"mem_allocated"}, + {"mem_freed"}, + {"link_up_cnt"}, + {"link_down_cnt"}, + {"link_up_time"}, + {"link_down_time"}, + {"tx_tcode_buf_abort_cnt"}, + {"tx_tcode_desc_abort_cnt"}, + {"tx_tcode_parity_err_cnt"}, + {"tx_tcode_link_loss_cnt"}, + {"tx_tcode_list_proc_err_cnt"}, + {"rx_tcode_parity_err_cnt"}, + {"rx_tcode_abort_cnt"}, + {"rx_tcode_parity_abort_cnt"}, + {"rx_tcode_rda_fail_cnt"}, + {"rx_tcode_unkn_prot_cnt"}, + {"rx_tcode_fcs_err_cnt"}, + {"rx_tcode_buf_size_err_cnt"}, + {"rx_tcode_rxd_corrupt_cnt"}, + {"rx_tcode_unkn_err_cnt"}, + {"tda_err_cnt"}, + {"pfc_err_cnt"}, + {"pcc_err_cnt"}, + {"tti_err_cnt"}, + {"tpa_err_cnt"}, + {"sm_err_cnt"}, + {"lso_err_cnt"}, + {"mac_tmac_err_cnt"}, + {"mac_rmac_err_cnt"}, + {"xgxs_txgxs_err_cnt"}, + {"xgxs_rxgxs_err_cnt"}, + {"rc_err_cnt"}, + {"prc_pcix_err_cnt"}, + {"rpa_err_cnt"}, + {"rda_err_cnt"}, + {"rti_err_cnt"}, + {"mc_err_cnt"} +}; + +#define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys) +#define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys) +#define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys) + +#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN) +#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN) + +#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN) +#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN) + +#define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings) +#define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN) + +#define S2IO_TIMER_CONF(timer, handle, arg, exp) \ + init_timer(&timer); \ + timer.function = handle; \ + timer.data = (unsigned long)arg; \ + mod_timer(&timer, (jiffies + exp)) \ + +/* copy mac addr to def_mac_addr array */ +static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr) +{ + sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr); + sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8); + sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16); + sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24); + sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32); + sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40); +} + +/* + * Constants to be programmed into the Xena's registers, to configure + * the XAUI. + */ + +#define END_SIGN 0x0 +static const u64 herc_act_dtx_cfg[] = { + /* Set address */ + 0x8000051536750000ULL, 0x80000515367500E0ULL, + /* Write data */ + 0x8000051536750004ULL, 0x80000515367500E4ULL, + /* Set address */ + 0x80010515003F0000ULL, 0x80010515003F00E0ULL, + /* Write data */ + 0x80010515003F0004ULL, 0x80010515003F00E4ULL, + /* Set address */ + 0x801205150D440000ULL, 0x801205150D4400E0ULL, + /* Write data */ + 0x801205150D440004ULL, 0x801205150D4400E4ULL, + /* Set address */ + 0x80020515F2100000ULL, 0x80020515F21000E0ULL, + /* Write data */ + 0x80020515F2100004ULL, 0x80020515F21000E4ULL, + /* Done */ + END_SIGN +}; + +static const u64 xena_dtx_cfg[] = { + /* Set address */ + 0x8000051500000000ULL, 0x80000515000000E0ULL, + /* Write data */ + 0x80000515D9350004ULL, 0x80000515D93500E4ULL, + /* Set address */ + 0x8001051500000000ULL, 0x80010515000000E0ULL, + /* Write data */ + 0x80010515001E0004ULL, 0x80010515001E00E4ULL, + /* Set address */ + 0x8002051500000000ULL, 0x80020515000000E0ULL, + /* Write data */ + 0x80020515F2100004ULL, 0x80020515F21000E4ULL, + END_SIGN +}; + +/* + * Constants for Fixing the MacAddress problem seen mostly on + * Alpha machines. + */ +static const u64 fix_mac[] = { + 0x0060000000000000ULL, 0x0060600000000000ULL, + 0x0040600000000000ULL, 0x0000600000000000ULL, + 0x0020600000000000ULL, 0x0060600000000000ULL, + 0x0020600000000000ULL, 0x0060600000000000ULL, + 0x0020600000000000ULL, 0x0060600000000000ULL, + 0x0020600000000000ULL, 0x0060600000000000ULL, + 0x0020600000000000ULL, 0x0060600000000000ULL, + 0x0020600000000000ULL, 0x0060600000000000ULL, + 0x0020600000000000ULL, 0x0060600000000000ULL, + 0x0020600000000000ULL, 0x0060600000000000ULL, + 0x0020600000000000ULL, 0x0060600000000000ULL, + 0x0020600000000000ULL, 0x0060600000000000ULL, + 0x0020600000000000ULL, 0x0000600000000000ULL, + 0x0040600000000000ULL, 0x0060600000000000ULL, + END_SIGN +}; + +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + + +/* Module Loadable parameters. */ +S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM); +S2IO_PARM_INT(rx_ring_num, 1); +S2IO_PARM_INT(multiq, 0); +S2IO_PARM_INT(rx_ring_mode, 1); +S2IO_PARM_INT(use_continuous_tx_intrs, 1); +S2IO_PARM_INT(rmac_pause_time, 0x100); +S2IO_PARM_INT(mc_pause_threshold_q0q3, 187); +S2IO_PARM_INT(mc_pause_threshold_q4q7, 187); +S2IO_PARM_INT(shared_splits, 0); +S2IO_PARM_INT(tmac_util_period, 5); +S2IO_PARM_INT(rmac_util_period, 5); +S2IO_PARM_INT(l3l4hdr_size, 128); +/* 0 is no steering, 1 is Priority steering, 2 is Default steering */ +S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING); +/* Frequency of Rx desc syncs expressed as power of 2 */ +S2IO_PARM_INT(rxsync_frequency, 3); +/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */ +S2IO_PARM_INT(intr_type, 2); +/* Large receive offload feature */ + +/* Max pkts to be aggregated by LRO at one time. If not specified, + * aggregation happens until we hit max IP pkt size(64K) + */ +S2IO_PARM_INT(lro_max_pkts, 0xFFFF); +S2IO_PARM_INT(indicate_max_pkts, 0); + +S2IO_PARM_INT(napi, 1); +S2IO_PARM_INT(ufo, 0); +S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC); + +static unsigned int tx_fifo_len[MAX_TX_FIFOS] = +{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; +static unsigned int rx_ring_sz[MAX_RX_RINGS] = +{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; +static unsigned int rts_frm_len[MAX_RX_RINGS] = +{[0 ...(MAX_RX_RINGS - 1)] = 0 }; + +module_param_array(tx_fifo_len, uint, NULL, 0); +module_param_array(rx_ring_sz, uint, NULL, 0); +module_param_array(rts_frm_len, uint, NULL, 0); + +/* + * S2IO device table. + * This table lists all the devices that this driver supports. + */ +static const struct pci_device_id s2io_tbl[] = { + {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN, + PCI_ANY_ID, PCI_ANY_ID}, + {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI, + PCI_ANY_ID, PCI_ANY_ID}, + {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN, + PCI_ANY_ID, PCI_ANY_ID}, + {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI, + PCI_ANY_ID, PCI_ANY_ID}, + {0,} +}; + +MODULE_DEVICE_TABLE(pci, s2io_tbl); + +static const struct pci_error_handlers s2io_err_handler = { + .error_detected = s2io_io_error_detected, + .slot_reset = s2io_io_slot_reset, + .resume = s2io_io_resume, +}; + +static struct pci_driver s2io_driver = { + .name = "S2IO", + .id_table = s2io_tbl, + .probe = s2io_init_nic, + .remove = s2io_rem_nic, + .err_handler = &s2io_err_handler, +}; + +/* A simplifier macro used both by init and free shared_mem Fns(). */ +#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each) + +/* netqueue manipulation helper functions */ +static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp) +{ + if (!sp->config.multiq) { + int i; + + for (i = 0; i < sp->config.tx_fifo_num; i++) + sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP; + } + netif_tx_stop_all_queues(sp->dev); +} + +static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no) +{ + if (!sp->config.multiq) + sp->mac_control.fifos[fifo_no].queue_state = + FIFO_QUEUE_STOP; + + netif_tx_stop_all_queues(sp->dev); +} + +static inline void s2io_start_all_tx_queue(struct s2io_nic *sp) +{ + if (!sp->config.multiq) { + int i; + + for (i = 0; i < sp->config.tx_fifo_num; i++) + sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START; + } + netif_tx_start_all_queues(sp->dev); +} + +static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp) +{ + if (!sp->config.multiq) { + int i; + + for (i = 0; i < sp->config.tx_fifo_num; i++) + sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START; + } + netif_tx_wake_all_queues(sp->dev); +} + +static inline void s2io_wake_tx_queue( + struct fifo_info *fifo, int cnt, u8 multiq) +{ + + if (multiq) { + if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no)) + netif_wake_subqueue(fifo->dev, fifo->fifo_no); + } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) { + if (netif_queue_stopped(fifo->dev)) { + fifo->queue_state = FIFO_QUEUE_START; + netif_wake_queue(fifo->dev); + } + } +} + +/** + * init_shared_mem - Allocation and Initialization of Memory + * @nic: Device private variable. + * Description: The function allocates all the memory areas shared + * between the NIC and the driver. This includes Tx descriptors, + * Rx descriptors and the statistics block. + */ + +static int init_shared_mem(struct s2io_nic *nic) +{ + u32 size; + void *tmp_v_addr, *tmp_v_addr_next; + dma_addr_t tmp_p_addr, tmp_p_addr_next; + struct RxD_block *pre_rxd_blk = NULL; + int i, j, blk_cnt; + int lst_size, lst_per_page; + struct net_device *dev = nic->dev; + unsigned long tmp; + struct buffAdd *ba; + struct config_param *config = &nic->config; + struct mac_info *mac_control = &nic->mac_control; + unsigned long long mem_allocated = 0; + + /* Allocation and initialization of TXDLs in FIFOs */ + size = 0; + for (i = 0; i < config->tx_fifo_num; i++) { + struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; + + size += tx_cfg->fifo_len; + } + if (size > MAX_AVAILABLE_TXDS) { + DBG_PRINT(ERR_DBG, + "Too many TxDs requested: %d, max supported: %d\n", + size, MAX_AVAILABLE_TXDS); + return -EINVAL; + } + + size = 0; + for (i = 0; i < config->tx_fifo_num; i++) { + struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; + + size = tx_cfg->fifo_len; + /* + * Legal values are from 2 to 8192 + */ + if (size < 2) { + DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - " + "Valid lengths are 2 through 8192\n", + i, size); + return -EINVAL; + } + } + + lst_size = (sizeof(struct TxD) * config->max_txds); + lst_per_page = PAGE_SIZE / lst_size; + + for (i = 0; i < config->tx_fifo_num; i++) { + struct fifo_info *fifo = &mac_control->fifos[i]; + struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; + int fifo_len = tx_cfg->fifo_len; + int list_holder_size = fifo_len * sizeof(struct list_info_hold); + + fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL); + if (!fifo->list_info) { + DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n"); + return -ENOMEM; + } + mem_allocated += list_holder_size; + } + for (i = 0; i < config->tx_fifo_num; i++) { + int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len, + lst_per_page); + struct fifo_info *fifo = &mac_control->fifos[i]; + struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; + + fifo->tx_curr_put_info.offset = 0; + fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1; + fifo->tx_curr_get_info.offset = 0; + fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1; + fifo->fifo_no = i; + fifo->nic = nic; + fifo->max_txds = MAX_SKB_FRAGS + 2; + fifo->dev = dev; + + for (j = 0; j < page_num; j++) { + int k = 0; + dma_addr_t tmp_p; + void *tmp_v; + tmp_v = pci_alloc_consistent(nic->pdev, + PAGE_SIZE, &tmp_p); + if (!tmp_v) { + DBG_PRINT(INFO_DBG, + "pci_alloc_consistent failed for TxDL\n"); + return -ENOMEM; + } + /* If we got a zero DMA address(can happen on + * certain platforms like PPC), reallocate. + * Store virtual address of page we don't want, + * to be freed later. + */ + if (!tmp_p) { + mac_control->zerodma_virt_addr = tmp_v; + DBG_PRINT(INIT_DBG, + "%s: Zero DMA address for TxDL. " + "Virtual address %p\n", + dev->name, tmp_v); + tmp_v = pci_alloc_consistent(nic->pdev, + PAGE_SIZE, &tmp_p); + if (!tmp_v) { + DBG_PRINT(INFO_DBG, + "pci_alloc_consistent failed for TxDL\n"); + return -ENOMEM; + } + mem_allocated += PAGE_SIZE; + } + while (k < lst_per_page) { + int l = (j * lst_per_page) + k; + if (l == tx_cfg->fifo_len) + break; + fifo->list_info[l].list_virt_addr = + tmp_v + (k * lst_size); + fifo->list_info[l].list_phy_addr = + tmp_p + (k * lst_size); + k++; + } + } + } + + for (i = 0; i < config->tx_fifo_num; i++) { + struct fifo_info *fifo = &mac_control->fifos[i]; + struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; + + size = tx_cfg->fifo_len; + fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL); + if (!fifo->ufo_in_band_v) + return -ENOMEM; + mem_allocated += (size * sizeof(u64)); + } + + /* Allocation and initialization of RXDs in Rings */ + size = 0; + for (i = 0; i < config->rx_ring_num; i++) { + struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; + struct ring_info *ring = &mac_control->rings[i]; + + if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) { + DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a " + "multiple of RxDs per Block\n", + dev->name, i); + return FAILURE; + } + size += rx_cfg->num_rxd; + ring->block_count = rx_cfg->num_rxd / + (rxd_count[nic->rxd_mode] + 1); + ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count; + } + if (nic->rxd_mode == RXD_MODE_1) + size = (size * (sizeof(struct RxD1))); + else + size = (size * (sizeof(struct RxD3))); + + for (i = 0; i < config->rx_ring_num; i++) { + struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; + struct ring_info *ring = &mac_control->rings[i]; + + ring->rx_curr_get_info.block_index = 0; + ring->rx_curr_get_info.offset = 0; + ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1; + ring->rx_curr_put_info.block_index = 0; + ring->rx_curr_put_info.offset = 0; + ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1; + ring->nic = nic; + ring->ring_no = i; + + blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1); + /* Allocating all the Rx blocks */ + for (j = 0; j < blk_cnt; j++) { + struct rx_block_info *rx_blocks; + int l; + + rx_blocks = &ring->rx_blocks[j]; + size = SIZE_OF_BLOCK; /* size is always page size */ + tmp_v_addr = pci_alloc_consistent(nic->pdev, size, + &tmp_p_addr); + if (tmp_v_addr == NULL) { + /* + * In case of failure, free_shared_mem() + * is called, which should free any + * memory that was alloced till the + * failure happened. + */ + rx_blocks->block_virt_addr = tmp_v_addr; + return -ENOMEM; + } + mem_allocated += size; + memset(tmp_v_addr, 0, size); + + size = sizeof(struct rxd_info) * + rxd_count[nic->rxd_mode]; + rx_blocks->block_virt_addr = tmp_v_addr; + rx_blocks->block_dma_addr = tmp_p_addr; + rx_blocks->rxds = kmalloc(size, GFP_KERNEL); + if (!rx_blocks->rxds) + return -ENOMEM; + mem_allocated += size; + for (l = 0; l < rxd_count[nic->rxd_mode]; l++) { + rx_blocks->rxds[l].virt_addr = + rx_blocks->block_virt_addr + + (rxd_size[nic->rxd_mode] * l); + rx_blocks->rxds[l].dma_addr = + rx_blocks->block_dma_addr + + (rxd_size[nic->rxd_mode] * l); + } + } + /* Interlinking all Rx Blocks */ + for (j = 0; j < blk_cnt; j++) { + int next = (j + 1) % blk_cnt; + tmp_v_addr = ring->rx_blocks[j].block_virt_addr; + tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr; + tmp_p_addr = ring->rx_blocks[j].block_dma_addr; + tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr; + + pre_rxd_blk = tmp_v_addr; + pre_rxd_blk->reserved_2_pNext_RxD_block = + (unsigned long)tmp_v_addr_next; + pre_rxd_blk->pNext_RxD_Blk_physical = + (u64)tmp_p_addr_next; + } + } + if (nic->rxd_mode == RXD_MODE_3B) { + /* + * Allocation of Storages for buffer addresses in 2BUFF mode + * and the buffers as well. + */ + for (i = 0; i < config->rx_ring_num; i++) { + struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; + struct ring_info *ring = &mac_control->rings[i]; + + blk_cnt = rx_cfg->num_rxd / + (rxd_count[nic->rxd_mode] + 1); + size = sizeof(struct buffAdd *) * blk_cnt; + ring->ba = kmalloc(size, GFP_KERNEL); + if (!ring->ba) + return -ENOMEM; + mem_allocated += size; + for (j = 0; j < blk_cnt; j++) { + int k = 0; + + size = sizeof(struct buffAdd) * + (rxd_count[nic->rxd_mode] + 1); + ring->ba[j] = kmalloc(size, GFP_KERNEL); + if (!ring->ba[j]) + return -ENOMEM; + mem_allocated += size; + while (k != rxd_count[nic->rxd_mode]) { + ba = &ring->ba[j][k]; + size = BUF0_LEN + ALIGN_SIZE; + ba->ba_0_org = kmalloc(size, GFP_KERNEL); + if (!ba->ba_0_org) + return -ENOMEM; + mem_allocated += size; + tmp = (unsigned long)ba->ba_0_org; + tmp += ALIGN_SIZE; + tmp &= ~((unsigned long)ALIGN_SIZE); + ba->ba_0 = (void *)tmp; + + size = BUF1_LEN + ALIGN_SIZE; + ba->ba_1_org = kmalloc(size, GFP_KERNEL); + if (!ba->ba_1_org) + return -ENOMEM; + mem_allocated += size; + tmp = (unsigned long)ba->ba_1_org; + tmp += ALIGN_SIZE; + tmp &= ~((unsigned long)ALIGN_SIZE); + ba->ba_1 = (void *)tmp; + k++; + } + } + } + } + + /* Allocation and initialization of Statistics block */ + size = sizeof(struct stat_block); + mac_control->stats_mem = + pci_alloc_consistent(nic->pdev, size, + &mac_control->stats_mem_phy); + + if (!mac_control->stats_mem) { + /* + * In case of failure, free_shared_mem() is called, which + * should free any memory that was alloced till the + * failure happened. + */ + return -ENOMEM; + } + mem_allocated += size; + mac_control->stats_mem_sz = size; + + tmp_v_addr = mac_control->stats_mem; + mac_control->stats_info = tmp_v_addr; + memset(tmp_v_addr, 0, size); + DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n", + dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr); + mac_control->stats_info->sw_stat.mem_allocated += mem_allocated; + return SUCCESS; +} + +/** + * free_shared_mem - Free the allocated Memory + * @nic: Device private variable. + * Description: This function is to free all memory locations allocated by + * the init_shared_mem() function and return it to the kernel. + */ + +static void free_shared_mem(struct s2io_nic *nic) +{ + int i, j, blk_cnt, size; + void *tmp_v_addr; + dma_addr_t tmp_p_addr; + int lst_size, lst_per_page; + struct net_device *dev; + int page_num = 0; + struct config_param *config; + struct mac_info *mac_control; + struct stat_block *stats; + struct swStat *swstats; + + if (!nic) + return; + + dev = nic->dev; + + config = &nic->config; + mac_control = &nic->mac_control; + stats = mac_control->stats_info; + swstats = &stats->sw_stat; + + lst_size = sizeof(struct TxD) * config->max_txds; + lst_per_page = PAGE_SIZE / lst_size; + + for (i = 0; i < config->tx_fifo_num; i++) { + struct fifo_info *fifo = &mac_control->fifos[i]; + struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; + + page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page); + for (j = 0; j < page_num; j++) { + int mem_blks = (j * lst_per_page); + struct list_info_hold *fli; + + if (!fifo->list_info) + return; + + fli = &fifo->list_info[mem_blks]; + if (!fli->list_virt_addr) + break; + pci_free_consistent(nic->pdev, PAGE_SIZE, + fli->list_virt_addr, + fli->list_phy_addr); + swstats->mem_freed += PAGE_SIZE; + } + /* If we got a zero DMA address during allocation, + * free the page now + */ + if (mac_control->zerodma_virt_addr) { + pci_free_consistent(nic->pdev, PAGE_SIZE, + mac_control->zerodma_virt_addr, + (dma_addr_t)0); + DBG_PRINT(INIT_DBG, + "%s: Freeing TxDL with zero DMA address. " + "Virtual address %p\n", + dev->name, mac_control->zerodma_virt_addr); + swstats->mem_freed += PAGE_SIZE; + } + kfree(fifo->list_info); + swstats->mem_freed += tx_cfg->fifo_len * + sizeof(struct list_info_hold); + } + + size = SIZE_OF_BLOCK; + for (i = 0; i < config->rx_ring_num; i++) { + struct ring_info *ring = &mac_control->rings[i]; + + blk_cnt = ring->block_count; + for (j = 0; j < blk_cnt; j++) { + tmp_v_addr = ring->rx_blocks[j].block_virt_addr; + tmp_p_addr = ring->rx_blocks[j].block_dma_addr; + if (tmp_v_addr == NULL) + break; + pci_free_consistent(nic->pdev, size, + tmp_v_addr, tmp_p_addr); + swstats->mem_freed += size; + kfree(ring->rx_blocks[j].rxds); + swstats->mem_freed += sizeof(struct rxd_info) * + rxd_count[nic->rxd_mode]; + } + } + + if (nic->rxd_mode == RXD_MODE_3B) { + /* Freeing buffer storage addresses in 2BUFF mode. */ + for (i = 0; i < config->rx_ring_num; i++) { + struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; + struct ring_info *ring = &mac_control->rings[i]; + + blk_cnt = rx_cfg->num_rxd / + (rxd_count[nic->rxd_mode] + 1); + for (j = 0; j < blk_cnt; j++) { + int k = 0; + if (!ring->ba[j]) + continue; + while (k != rxd_count[nic->rxd_mode]) { + struct buffAdd *ba = &ring->ba[j][k]; + kfree(ba->ba_0_org); + swstats->mem_freed += + BUF0_LEN + ALIGN_SIZE; + kfree(ba->ba_1_org); + swstats->mem_freed += + BUF1_LEN + ALIGN_SIZE; + k++; + } + kfree(ring->ba[j]); + swstats->mem_freed += sizeof(struct buffAdd) * + (rxd_count[nic->rxd_mode] + 1); + } + kfree(ring->ba); + swstats->mem_freed += sizeof(struct buffAdd *) * + blk_cnt; + } + } + + for (i = 0; i < nic->config.tx_fifo_num; i++) { + struct fifo_info *fifo = &mac_control->fifos[i]; + struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; + + if (fifo->ufo_in_band_v) { + swstats->mem_freed += tx_cfg->fifo_len * + sizeof(u64); + kfree(fifo->ufo_in_band_v); + } + } + + if (mac_control->stats_mem) { + swstats->mem_freed += mac_control->stats_mem_sz; + pci_free_consistent(nic->pdev, + mac_control->stats_mem_sz, + mac_control->stats_mem, + mac_control->stats_mem_phy); + } +} + +/** + * s2io_verify_pci_mode - + */ + +static int s2io_verify_pci_mode(struct s2io_nic *nic) +{ + struct XENA_dev_config __iomem *bar0 = nic->bar0; + register u64 val64 = 0; + int mode; + + val64 = readq(&bar0->pci_mode); + mode = (u8)GET_PCI_MODE(val64); + + if (val64 & PCI_MODE_UNKNOWN_MODE) + return -1; /* Unknown PCI mode */ + return mode; +} + +#define NEC_VENID 0x1033 +#define NEC_DEVID 0x0125 +static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev) +{ + struct pci_dev *tdev = NULL; + for_each_pci_dev(tdev) { + if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) { + if (tdev->bus == s2io_pdev->bus->parent) { + pci_dev_put(tdev); + return 1; + } + } + } + return 0; +} + +static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266}; +/** + * s2io_print_pci_mode - + */ +static int s2io_print_pci_mode(struct s2io_nic *nic) +{ + struct XENA_dev_config __iomem *bar0 = nic->bar0; + register u64 val64 = 0; + int mode; + struct config_param *config = &nic->config; + const char *pcimode; + + val64 = readq(&bar0->pci_mode); + mode = (u8)GET_PCI_MODE(val64); + + if (val64 & PCI_MODE_UNKNOWN_MODE) + return -1; /* Unknown PCI mode */ + + config->bus_speed = bus_speed[mode]; + + if (s2io_on_nec_bridge(nic->pdev)) { + DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n", + nic->dev->name); + return mode; + } + + switch (mode) { + case PCI_MODE_PCI_33: + pcimode = "33MHz PCI bus"; + break; + case PCI_MODE_PCI_66: + pcimode = "66MHz PCI bus"; + break; + case PCI_MODE_PCIX_M1_66: + pcimode = "66MHz PCIX(M1) bus"; + break; + case PCI_MODE_PCIX_M1_100: + pcimode = "100MHz PCIX(M1) bus"; + break; + case PCI_MODE_PCIX_M1_133: + pcimode = "133MHz PCIX(M1) bus"; + break; + case PCI_MODE_PCIX_M2_66: + pcimode = "133MHz PCIX(M2) bus"; + break; + case PCI_MODE_PCIX_M2_100: + pcimode = "200MHz PCIX(M2) bus"; + break; + case PCI_MODE_PCIX_M2_133: + pcimode = "266MHz PCIX(M2) bus"; + break; + default: + pcimode = "unsupported bus!"; + mode = -1; + } + + DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n", + nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode); + + return mode; +} + +/** + * init_tti - Initialization transmit traffic interrupt scheme + * @nic: device private variable + * @link: link status (UP/DOWN) used to enable/disable continuous + * transmit interrupts + * Description: The function configures transmit traffic interrupts + * Return Value: SUCCESS on success and + * '-1' on failure + */ + +static int init_tti(struct s2io_nic *nic, int link) +{ + struct XENA_dev_config __iomem *bar0 = nic->bar0; + register u64 val64 = 0; + int i; + struct config_param *config = &nic->config; + + for (i = 0; i < config->tx_fifo_num; i++) { + /* + * TTI Initialization. Default Tx timer gets us about + * 250 interrupts per sec. Continuous interrupts are enabled + * by default. + */ + if (nic->device_type == XFRAME_II_DEVICE) { + int count = (nic->config.bus_speed * 125)/2; + val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count); + } else + val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078); + + val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) | + TTI_DATA1_MEM_TX_URNG_B(0x10) | + TTI_DATA1_MEM_TX_URNG_C(0x30) | + TTI_DATA1_MEM_TX_TIMER_AC_EN; + if (i == 0) + if (use_continuous_tx_intrs && (link == LINK_UP)) + val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN; + writeq(val64, &bar0->tti_data1_mem); + + if (nic->config.intr_type == MSI_X) { + val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | + TTI_DATA2_MEM_TX_UFC_B(0x100) | + TTI_DATA2_MEM_TX_UFC_C(0x200) | + TTI_DATA2_MEM_TX_UFC_D(0x300); + } else { + if ((nic->config.tx_steering_type == + TX_DEFAULT_STEERING) && + (config->tx_fifo_num > 1) && + (i >= nic->udp_fifo_idx) && + (i < (nic->udp_fifo_idx + + nic->total_udp_fifos))) + val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) | + TTI_DATA2_MEM_TX_UFC_B(0x80) | + TTI_DATA2_MEM_TX_UFC_C(0x100) | + TTI_DATA2_MEM_TX_UFC_D(0x120); + else + val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | + TTI_DATA2_MEM_TX_UFC_B(0x20) | + TTI_DATA2_MEM_TX_UFC_C(0x40) | + TTI_DATA2_MEM_TX_UFC_D(0x80); + } + + writeq(val64, &bar0->tti_data2_mem); + + val64 = TTI_CMD_MEM_WE | + TTI_CMD_MEM_STROBE_NEW_CMD | + TTI_CMD_MEM_OFFSET(i); + writeq(val64, &bar0->tti_command_mem); + + if (wait_for_cmd_complete(&bar0->tti_command_mem, + TTI_CMD_MEM_STROBE_NEW_CMD, + S2IO_BIT_RESET) != SUCCESS) + return FAILURE; + } + + return SUCCESS; +} + +/** + * init_nic - Initialization of hardware + * @nic: device private variable + * Description: The function sequentially configures every block + * of the H/W from their reset values. + * Return Value: SUCCESS on success and + * '-1' on failure (endian settings incorrect). + */ + +static int init_nic(struct s2io_nic *nic) +{ + struct XENA_dev_config __iomem *bar0 = nic->bar0; + struct net_device *dev = nic->dev; + register u64 val64 = 0; + void __iomem *add; + u32 time; + int i, j; + int dtx_cnt = 0; + unsigned long long mem_share; + int mem_size; + struct config_param *config = &nic->config; + struct mac_info *mac_control = &nic->mac_control; + + /* to set the swapper controle on the card */ + if (s2io_set_swapper(nic)) { + DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n"); + return -EIO; + } + + /* + * Herc requires EOI to be removed from reset before XGXS, so.. + */ + if (nic->device_type & XFRAME_II_DEVICE) { + val64 = 0xA500000000ULL; + writeq(val64, &bar0->sw_reset); + msleep(500); + val64 = readq(&bar0->sw_reset); + } + + /* Remove XGXS from reset state */ + val64 = 0; + writeq(val64, &bar0->sw_reset); + msleep(500); + val64 = readq(&bar0->sw_reset); + + /* Ensure that it's safe to access registers by checking + * RIC_RUNNING bit is reset. Check is valid only for XframeII. + */ + if (nic->device_type == XFRAME_II_DEVICE) { + for (i = 0; i < 50; i++) { + val64 = readq(&bar0->adapter_status); + if (!(val64 & ADAPTER_STATUS_RIC_RUNNING)) + break; + msleep(10); + } + if (i == 50) + return -ENODEV; + } + + /* Enable Receiving broadcasts */ + add = &bar0->mac_cfg; + val64 = readq(&bar0->mac_cfg); + val64 |= MAC_RMAC_BCAST_ENABLE; + writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); + writel((u32)val64, add); + writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); + writel((u32) (val64 >> 32), (add + 4)); + + /* Read registers in all blocks */ + val64 = readq(&bar0->mac_int_mask); + val64 = readq(&bar0->mc_int_mask); + val64 = readq(&bar0->xgxs_int_mask); + + /* Set MTU */ + val64 = dev->mtu; + writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len); + + if (nic->device_type & XFRAME_II_DEVICE) { + while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) { + SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt], + &bar0->dtx_control, UF); + if (dtx_cnt & 0x1) + msleep(1); /* Necessary!! */ + dtx_cnt++; + } + } else { + while (xena_dtx_cfg[dtx_cnt] != END_SIGN) { + SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt], + &bar0->dtx_control, UF); + val64 = readq(&bar0->dtx_control); + dtx_cnt++; + } + } + + /* Tx DMA Initialization */ + val64 = 0; + writeq(val64, &bar0->tx_fifo_partition_0); + writeq(val64, &bar0->tx_fifo_partition_1); + writeq(val64, &bar0->tx_fifo_partition_2); + writeq(val64, &bar0->tx_fifo_partition_3); + + for (i = 0, j = 0; i < config->tx_fifo_num; i++) { + struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; + + val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) | + vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3); + + if (i == (config->tx_fifo_num - 1)) { + if (i % 2 == 0) + i++; + } + + switch (i) { + case 1: + writeq(val64, &bar0->tx_fifo_partition_0); + val64 = 0; + j = 0; + break; + case 3: + writeq(val64, &bar0->tx_fifo_partition_1); + val64 = 0; + j = 0; + break; + case 5: + writeq(val64, &bar0->tx_fifo_partition_2); + val64 = 0; + j = 0; + break; + case 7: + writeq(val64, &bar0->tx_fifo_partition_3); + val64 = 0; + j = 0; + break; + default: + j++; + break; + } + } + + /* + * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug + * SXE-008 TRANSMIT DMA ARBITRATION ISSUE. + */ + if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4)) + writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable); + + val64 = readq(&bar0->tx_fifo_partition_0); + DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n", + &bar0->tx_fifo_partition_0, (unsigned long long)val64); + + /* + * Initialization of Tx_PA_CONFIG register to ignore packet + * integrity checking. + */ + val64 = readq(&bar0->tx_pa_cfg); + val64 |= TX_PA_CFG_IGNORE_FRM_ERR | + TX_PA_CFG_IGNORE_SNAP_OUI | + TX_PA_CFG_IGNORE_LLC_CTRL | + TX_PA_CFG_IGNORE_L2_ERR; + writeq(val64, &bar0->tx_pa_cfg); + + /* Rx DMA initialization. */ + val64 = 0; + for (i = 0; i < config->rx_ring_num; i++) { + struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; + + val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3); + } + writeq(val64, &bar0->rx_queue_priority); + + /* + * Allocating equal share of memory to all the + * configured Rings. + */ + val64 = 0; + if (nic->device_type & XFRAME_II_DEVICE) + mem_size = 32; + else + mem_size = 64; + + for (i = 0; i < config->rx_ring_num; i++) { + switch (i) { + case 0: + mem_share = (mem_size / config->rx_ring_num + + mem_size % config->rx_ring_num); + val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share); + continue; + case 1: + mem_share = (mem_size / config->rx_ring_num); + val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share); + continue; + case 2: + mem_share = (mem_size / config->rx_ring_num); + val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share); + continue; + case 3: + mem_share = (mem_size / config->rx_ring_num); + val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share); + continue; + case 4: + mem_share = (mem_size / config->rx_ring_num); + val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share); + continue; + case 5: + mem_share = (mem_size / config->rx_ring_num); + val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share); + continue; + case 6: + mem_share = (mem_size / config->rx_ring_num); + val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share); + continue; + case 7: + mem_share = (mem_size / config->rx_ring_num); + val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share); + continue; + } + } + writeq(val64, &bar0->rx_queue_cfg); + + /* + * Filling Tx round robin registers + * as per the number of FIFOs for equal scheduling priority + */ + switch (config->tx_fifo_num) { + case 1: + val64 = 0x0; + writeq(val64, &bar0->tx_w_round_robin_0); + writeq(val64, &bar0->tx_w_round_robin_1); + writeq(val64, &bar0->tx_w_round_robin_2); + writeq(val64, &bar0->tx_w_round_robin_3); + writeq(val64, &bar0->tx_w_round_robin_4); + break; + case 2: + val64 = 0x0001000100010001ULL; + writeq(val64, &bar0->tx_w_round_robin_0); + writeq(val64, &bar0->tx_w_round_robin_1); + writeq(val64, &bar0->tx_w_round_robin_2); + writeq(val64, &bar0->tx_w_round_robin_3); + val64 = 0x0001000100000000ULL; + writeq(val64, &bar0->tx_w_round_robin_4); + break; + case 3: + val64 = 0x0001020001020001ULL; + writeq(val64, &bar0->tx_w_round_robin_0); + val64 = 0x0200010200010200ULL; + writeq(val64, &bar0->tx_w_round_robin_1); + val64 = 0x0102000102000102ULL; + writeq(val64, &bar0->tx_w_round_robin_2); + val64 = 0x0001020001020001ULL; + writeq(val64, &bar0->tx_w_round_robin_3); + val64 = 0x0200010200000000ULL; + writeq(val64, &bar0->tx_w_round_robin_4); + break; + case 4: + val64 = 0x0001020300010203ULL; + writeq(val64, &bar0->tx_w_round_robin_0); + writeq(val64, &bar0->tx_w_round_robin_1); + writeq(val64, &bar0->tx_w_round_robin_2); + writeq(val64, &bar0->tx_w_round_robin_3); + val64 = 0x0001020300000000ULL; + writeq(val64, &bar0->tx_w_round_robin_4); + break; + case 5: + val64 = 0x0001020304000102ULL; + writeq(val64, &bar0->tx_w_round_robin_0); + val64 = 0x0304000102030400ULL; + writeq(val64, &bar0->tx_w_round_robin_1); + val64 = 0x0102030400010203ULL; + writeq(val64, &bar0->tx_w_round_robin_2); + val64 = 0x0400010203040001ULL; + writeq(val64, &bar0->tx_w_round_robin_3); + val64 = 0x0203040000000000ULL; + writeq(val64, &bar0->tx_w_round_robin_4); + break; + case 6: + val64 = 0x0001020304050001ULL; + writeq(val64, &bar0->tx_w_round_robin_0); + val64 = 0x0203040500010203ULL; + writeq(val64, &bar0->tx_w_round_robin_1); + val64 = 0x0405000102030405ULL; + writeq(val64, &bar0->tx_w_round_robin_2); + val64 = 0x0001020304050001ULL; + writeq(val64, &bar0->tx_w_round_robin_3); + val64 = 0x0203040500000000ULL; + writeq(val64, &bar0->tx_w_round_robin_4); + break; + case 7: + val64 = 0x0001020304050600ULL; + writeq(val64, &bar0->tx_w_round_robin_0); + val64 = 0x0102030405060001ULL; + writeq(val64, &bar0->tx_w_round_robin_1); + val64 = 0x0203040506000102ULL; + writeq(val64, &bar0->tx_w_round_robin_2); + val64 = 0x0304050600010203ULL; + writeq(val64, &bar0->tx_w_round_robin_3); + val64 = 0x0405060000000000ULL; + writeq(val64, &bar0->tx_w_round_robin_4); + break; + case 8: + val64 = 0x0001020304050607ULL; + writeq(val64, &bar0->tx_w_round_robin_0); + writeq(val64, &bar0->tx_w_round_robin_1); + writeq(val64, &bar0->tx_w_round_robin_2); + writeq(val64, &bar0->tx_w_round_robin_3); + val64 = 0x0001020300000000ULL; + writeq(val64, &bar0->tx_w_round_robin_4); + break; + } + + /* Enable all configured Tx FIFO partitions */ + val64 = readq(&bar0->tx_fifo_partition_0); + val64 |= (TX_FIFO_PARTITION_EN); + writeq(val64, &bar0->tx_fifo_partition_0); + + /* Filling the Rx round robin registers as per the + * number of Rings and steering based on QoS with + * equal priority. + */ + switch (config->rx_ring_num) { + case 1: + val64 = 0x0; + writeq(val64, &bar0->rx_w_round_robin_0); + writeq(val64, &bar0->rx_w_round_robin_1); + writeq(val64, &bar0->rx_w_round_robin_2); + writeq(val64, &bar0->rx_w_round_robin_3); + writeq(val64, &bar0->rx_w_round_robin_4); + + val64 = 0x8080808080808080ULL; + writeq(val64, &bar0->rts_qos_steering); + break; + case 2: + val64 = 0x0001000100010001ULL; + writeq(val64, &bar0->rx_w_round_robin_0); + writeq(val64, &bar0->rx_w_round_robin_1); + writeq(val64, &bar0->rx_w_round_robin_2); + writeq(val64, &bar0->rx_w_round_robin_3); + val64 = 0x0001000100000000ULL; + writeq(val64, &bar0->rx_w_round_robin_4); + + val64 = 0x8080808040404040ULL; + writeq(val64, &bar0->rts_qos_steering); + break; + case 3: + val64 = 0x0001020001020001ULL; + writeq(val64, &bar0->rx_w_round_robin_0); + val64 = 0x0200010200010200ULL; + writeq(val64, &bar0->rx_w_round_robin_1); + val64 = 0x0102000102000102ULL; + writeq(val64, &bar0->rx_w_round_robin_2); + val64 = 0x0001020001020001ULL; + writeq(val64, &bar0->rx_w_round_robin_3); + val64 = 0x0200010200000000ULL; + writeq(val64, &bar0->rx_w_round_robin_4); + + val64 = 0x8080804040402020ULL; + writeq(val64, &bar0->rts_qos_steering); + break; + case 4: + val64 = 0x0001020300010203ULL; + writeq(val64, &bar0->rx_w_round_robin_0); + writeq(val64, &bar0->rx_w_round_robin_1); + writeq(val64, &bar0->rx_w_round_robin_2); + writeq(val64, &bar0->rx_w_round_robin_3); + val64 = 0x0001020300000000ULL; + writeq(val64, &bar0->rx_w_round_robin_4); + + val64 = 0x8080404020201010ULL; + writeq(val64, &bar0->rts_qos_steering); + break; + case 5: + val64 = 0x0001020304000102ULL; + writeq(val64, &bar0->rx_w_round_robin_0); + val64 = 0x0304000102030400ULL; + writeq(val64, &bar0->rx_w_round_robin_1); + val64 = 0x0102030400010203ULL; + writeq(val64, &bar0->rx_w_round_robin_2); + val64 = 0x0400010203040001ULL; + writeq(val64, &bar0->rx_w_round_robin_3); + val64 = 0x0203040000000000ULL; + writeq(val64, &bar0->rx_w_round_robin_4); + + val64 = 0x8080404020201008ULL; + writeq(val64, &bar0->rts_qos_steering); + break; + case 6: + val64 = 0x0001020304050001ULL; + writeq(val64, &bar0->rx_w_round_robin_0); + val64 = 0x0203040500010203ULL; + writeq(val64, &bar0->rx_w_round_robin_1); + val64 = 0x0405000102030405ULL; + writeq(val64, &bar0->rx_w_round_robin_2); + val64 = 0x0001020304050001ULL; + writeq(val64, &bar0->rx_w_round_robin_3); + val64 = 0x0203040500000000ULL; + writeq(val64, &bar0->rx_w_round_robin_4); + + val64 = 0x8080404020100804ULL; + writeq(val64, &bar0->rts_qos_steering); + break; + case 7: + val64 = 0x0001020304050600ULL; + writeq(val64, &bar0->rx_w_round_robin_0); + val64 = 0x0102030405060001ULL; + writeq(val64, &bar0->rx_w_round_robin_1); + val64 = 0x0203040506000102ULL; + writeq(val64, &bar0->rx_w_round_robin_2); + val64 = 0x0304050600010203ULL; + writeq(val64, &bar0->rx_w_round_robin_3); + val64 = 0x0405060000000000ULL; + writeq(val64, &bar0->rx_w_round_robin_4); + + val64 = 0x8080402010080402ULL; + writeq(val64, &bar0->rts_qos_steering); + break; + case 8: + val64 = 0x0001020304050607ULL; + writeq(val64, &bar0->rx_w_round_robin_0); + writeq(val64, &bar0->rx_w_round_robin_1); + writeq(val64, &bar0->rx_w_round_robin_2); + writeq(val64, &bar0->rx_w_round_robin_3); + val64 = 0x0001020300000000ULL; + writeq(val64, &bar0->rx_w_round_robin_4); + + val64 = 0x8040201008040201ULL; + writeq(val64, &bar0->rts_qos_steering); + break; + } + + /* UDP Fix */ + val64 = 0; + for (i = 0; i < 8; i++) + writeq(val64, &bar0->rts_frm_len_n[i]); + + /* Set the default rts frame length for the rings configured */ + val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22); + for (i = 0 ; i < config->rx_ring_num ; i++) + writeq(val64, &bar0->rts_frm_len_n[i]); + + /* Set the frame length for the configured rings + * desired by the user + */ + for (i = 0; i < config->rx_ring_num; i++) { + /* If rts_frm_len[i] == 0 then it is assumed that user not + * specified frame length steering. + * If the user provides the frame length then program + * the rts_frm_len register for those values or else + * leave it as it is. + */ + if (rts_frm_len[i] != 0) { + writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]), + &bar0->rts_frm_len_n[i]); + } + } + + /* Disable differentiated services steering logic */ + for (i = 0; i < 64; i++) { + if (rts_ds_steer(nic, i, 0) == FAILURE) { + DBG_PRINT(ERR_DBG, + "%s: rts_ds_steer failed on codepoint %d\n", + dev->name, i); + return -ENODEV; + } + } + + /* Program statistics memory */ + writeq(mac_control->stats_mem_phy, &bar0->stat_addr); + + if (nic->device_type == XFRAME_II_DEVICE) { + val64 = STAT_BC(0x320); + writeq(val64, &bar0->stat_byte_cnt); + } + + /* + * Initializing the sampling rate for the device to calculate the + * bandwidth utilization. + */ + val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) | + MAC_RX_LINK_UTIL_VAL(rmac_util_period); + writeq(val64, &bar0->mac_link_util); + + /* + * Initializing the Transmit and Receive Traffic Interrupt + * Scheme. + */ + + /* Initialize TTI */ + if (SUCCESS != init_tti(nic, nic->last_link_state)) + return -ENODEV; + + /* RTI Initialization */ + if (nic->device_type == XFRAME_II_DEVICE) { + /* + * Programmed to generate Apprx 500 Intrs per + * second + */ + int count = (nic->config.bus_speed * 125)/4; + val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count); + } else + val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF); + val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) | + RTI_DATA1_MEM_RX_URNG_B(0x10) | + RTI_DATA1_MEM_RX_URNG_C(0x30) | + RTI_DATA1_MEM_RX_TIMER_AC_EN; + + writeq(val64, &bar0->rti_data1_mem); + + val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) | + RTI_DATA2_MEM_RX_UFC_B(0x2) ; + if (nic->config.intr_type == MSI_X) + val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | + RTI_DATA2_MEM_RX_UFC_D(0x40)); + else + val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | + RTI_DATA2_MEM_RX_UFC_D(0x80)); + writeq(val64, &bar0->rti_data2_mem); + + for (i = 0; i < config->rx_ring_num; i++) { + val64 = RTI_CMD_MEM_WE | + RTI_CMD_MEM_STROBE_NEW_CMD | + RTI_CMD_MEM_OFFSET(i); + writeq(val64, &bar0->rti_command_mem); + + /* + * Once the operation completes, the Strobe bit of the + * command register will be reset. We poll for this + * particular condition. We wait for a maximum of 500ms + * for the operation to complete, if it's not complete + * by then we return error. + */ + time = 0; + while (true) { + val64 = readq(&bar0->rti_command_mem); + if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) + break; + + if (time > 10) { + DBG_PRINT(ERR_DBG, "%s: RTI init failed\n", + dev->name); + return -ENODEV; + } + time++; + msleep(50); + } + } + + /* + * Initializing proper values as Pause threshold into all + * the 8 Queues on Rx side. + */ + writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3); + writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7); + + /* Disable RMAC PAD STRIPPING */ + add = &bar0->mac_cfg; + val64 = readq(&bar0->mac_cfg); + val64 &= ~(MAC_CFG_RMAC_STRIP_PAD); + writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); + writel((u32) (val64), add); + writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); + writel((u32) (val64 >> 32), (add + 4)); + val64 = readq(&bar0->mac_cfg); + + /* Enable FCS stripping by adapter */ + add = &bar0->mac_cfg; + val64 = readq(&bar0->mac_cfg); + val64 |= MAC_CFG_RMAC_STRIP_FCS; + if (nic->device_type == XFRAME_II_DEVICE) + writeq(val64, &bar0->mac_cfg); + else { + writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); + writel((u32) (val64), add); + writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); + writel((u32) (val64 >> 32), (add + 4)); + } + + /* + * Set the time value to be inserted in the pause frame + * generated by xena. + */ + val64 = readq(&bar0->rmac_pause_cfg); + val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff)); + val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time); + writeq(val64, &bar0->rmac_pause_cfg); + + /* + * Set the Threshold Limit for Generating the pause frame + * If the amount of data in any Queue exceeds ratio of + * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256 + * pause frame is generated + */ + val64 = 0; + for (i = 0; i < 4; i++) { + val64 |= (((u64)0xFF00 | + nic->mac_control.mc_pause_threshold_q0q3) + << (i * 2 * 8)); + } + writeq(val64, &bar0->mc_pause_thresh_q0q3); + + val64 = 0; + for (i = 0; i < 4; i++) { + val64 |= (((u64)0xFF00 | + nic->mac_control.mc_pause_threshold_q4q7) + << (i * 2 * 8)); + } + writeq(val64, &bar0->mc_pause_thresh_q4q7); + + /* + * TxDMA will stop Read request if the number of read split has + * exceeded the limit pointed by shared_splits + */ + val64 = readq(&bar0->pic_control); + val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits); + writeq(val64, &bar0->pic_control); + + if (nic->config.bus_speed == 266) { + writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout); + writeq(0x0, &bar0->read_retry_delay); + writeq(0x0, &bar0->write_retry_delay); + } + + /* + * Programming the Herc to split every write transaction + * that does not start on an ADB to reduce disconnects. + */ + if (nic->device_type == XFRAME_II_DEVICE) { + val64 = FAULT_BEHAVIOUR | EXT_REQ_EN | + MISC_LINK_STABILITY_PRD(3); + writeq(val64, &bar0->misc_control); + val64 = readq(&bar0->pic_control2); + val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15)); + writeq(val64, &bar0->pic_control2); + } + if (strstr(nic->product_name, "CX4")) { + val64 = TMAC_AVG_IPG(0x17); + writeq(val64, &bar0->tmac_avg_ipg); + } + + return SUCCESS; +} +#define LINK_UP_DOWN_INTERRUPT 1 +#define MAC_RMAC_ERR_TIMER 2 + +static int s2io_link_fault_indication(struct s2io_nic *nic) +{ + if (nic->device_type == XFRAME_II_DEVICE) + return LINK_UP_DOWN_INTERRUPT; + else + return MAC_RMAC_ERR_TIMER; +} + +/** + * do_s2io_write_bits - update alarm bits in alarm register + * @value: alarm bits + * @flag: interrupt status + * @addr: address value + * Description: update alarm bits in alarm register + * Return Value: + * NONE. + */ +static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr) +{ + u64 temp64; + + temp64 = readq(addr); + + if (flag == ENABLE_INTRS) + temp64 &= ~((u64)value); + else + temp64 |= ((u64)value); + writeq(temp64, addr); +} + +static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag) +{ + struct XENA_dev_config __iomem *bar0 = nic->bar0; + register u64 gen_int_mask = 0; + u64 interruptible; + + writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask); + if (mask & TX_DMA_INTR) { + gen_int_mask |= TXDMA_INT_M; + + do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT | + TXDMA_PCC_INT | TXDMA_TTI_INT | + TXDMA_LSO_INT | TXDMA_TPA_INT | + TXDMA_SM_INT, flag, &bar0->txdma_int_mask); + + do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM | + PFC_MISC_0_ERR | PFC_MISC_1_ERR | + PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag, + &bar0->pfc_err_mask); + + do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM | + TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR | + TDA_PCIX_ERR, flag, &bar0->tda_err_mask); + + do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR | + PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM | + PCC_N_SERR | PCC_6_COF_OV_ERR | + PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR | + PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR | + PCC_TXB_ECC_SG_ERR, + flag, &bar0->pcc_err_mask); + + do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR | + TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask); + + do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT | + LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM | + LSO6_SEND_OFLOW | LSO7_SEND_OFLOW, + flag, &bar0->lso_err_mask); + + do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP, + flag, &bar0->tpa_err_mask); + + do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask); + } + + if (mask & TX_MAC_INTR) { + gen_int_mask |= TXMAC_INT_M; + do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag, + &bar0->mac_int_mask); + do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR | + TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR | + TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR, + flag, &bar0->mac_tmac_err_mask); + } + + if (mask & TX_XGXS_INTR) { + gen_int_mask |= TXXGXS_INT_M; + do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag, + &bar0->xgxs_int_mask); + do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR | + TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR, + flag, &bar0->xgxs_txgxs_err_mask); + } + + if (mask & RX_DMA_INTR) { + gen_int_mask |= RXDMA_INT_M; + do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M | + RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M, + flag, &bar0->rxdma_int_mask); + do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR | + RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM | + RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR | + RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask); + do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn | + PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn | + PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag, + &bar0->prc_pcix_err_mask); + do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR | + RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag, + &bar0->rpa_err_mask); + do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR | + RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM | + RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR | + RDA_FRM_ECC_SG_ERR | + RDA_MISC_ERR|RDA_PCIX_ERR, + flag, &bar0->rda_err_mask); + do_s2io_write_bits(RTI_SM_ERR_ALARM | + RTI_ECC_SG_ERR | RTI_ECC_DB_ERR, + flag, &bar0->rti_err_mask); + } + + if (mask & RX_MAC_INTR) { + gen_int_mask |= RXMAC_INT_M; + do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag, + &bar0->mac_int_mask); + interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR | + RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR | + RMAC_DOUBLE_ECC_ERR); + if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) + interruptible |= RMAC_LINK_STATE_CHANGE_INT; + do_s2io_write_bits(interruptible, + flag, &bar0->mac_rmac_err_mask); + } + + if (mask & RX_XGXS_INTR) { + gen_int_mask |= RXXGXS_INT_M; + do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag, + &bar0->xgxs_int_mask); + do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag, + &bar0->xgxs_rxgxs_err_mask); + } + + if (mask & MC_INTR) { + gen_int_mask |= MC_INT_M; + do_s2io_write_bits(MC_INT_MASK_MC_INT, + flag, &bar0->mc_int_mask); + do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG | + MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag, + &bar0->mc_err_mask); + } + nic->general_int_mask = gen_int_mask; + + /* Remove this line when alarm interrupts are enabled */ + nic->general_int_mask = 0; +} + +/** + * en_dis_able_nic_intrs - Enable or Disable the interrupts + * @nic: device private variable, + * @mask: A mask indicating which Intr block must be modified and, + * @flag: A flag indicating whether to enable or disable the Intrs. + * Description: This function will either disable or enable the interrupts + * depending on the flag argument. The mask argument can be used to + * enable/disable any Intr block. + * Return Value: NONE. + */ + +static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag) +{ + struct XENA_dev_config __iomem *bar0 = nic->bar0; + register u64 temp64 = 0, intr_mask = 0; + + intr_mask = nic->general_int_mask; + + /* Top level interrupt classification */ + /* PIC Interrupts */ + if (mask & TX_PIC_INTR) { + /* Enable PIC Intrs in the general intr mask register */ + intr_mask |= TXPIC_INT_M; + if (flag == ENABLE_INTRS) { + /* + * If Hercules adapter enable GPIO otherwise + * disable all PCIX, Flash, MDIO, IIC and GPIO + * interrupts for now. + * TODO + */ + if (s2io_link_fault_indication(nic) == + LINK_UP_DOWN_INTERRUPT) { + do_s2io_write_bits(PIC_INT_GPIO, flag, + &bar0->pic_int_mask); + do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag, + &bar0->gpio_int_mask); + } else + writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask); + } else if (flag == DISABLE_INTRS) { + /* + * Disable PIC Intrs in the general + * intr mask register + */ + writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask); + } + } + + /* Tx traffic interrupts */ + if (mask & TX_TRAFFIC_INTR) { + intr_mask |= TXTRAFFIC_INT_M; + if (flag == ENABLE_INTRS) { + /* + * Enable all the Tx side interrupts + * writing 0 Enables all 64 TX interrupt levels + */ + writeq(0x0, &bar0->tx_traffic_mask); + } else if (flag == DISABLE_INTRS) { + /* + * Disable Tx Traffic Intrs in the general intr mask + * register. + */ + writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask); + } + } + + /* Rx traffic interrupts */ + if (mask & RX_TRAFFIC_INTR) { + intr_mask |= RXTRAFFIC_INT_M; + if (flag == ENABLE_INTRS) { + /* writing 0 Enables all 8 RX interrupt levels */ + writeq(0x0, &bar0->rx_traffic_mask); + } else if (flag == DISABLE_INTRS) { + /* + * Disable Rx Traffic Intrs in the general intr mask + * register. + */ + writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask); + } + } + + temp64 = readq(&bar0->general_int_mask); + if (flag == ENABLE_INTRS) + temp64 &= ~((u64)intr_mask); + else + temp64 = DISABLE_ALL_INTRS; + writeq(temp64, &bar0->general_int_mask); + + nic->general_int_mask = readq(&bar0->general_int_mask); +} + +/** + * verify_pcc_quiescent- Checks for PCC quiescent state + * Return: 1 If PCC is quiescence + * 0 If PCC is not quiescence + */ +static int verify_pcc_quiescent(struct s2io_nic *sp, int flag) +{ + int ret = 0, herc; + struct XENA_dev_config __iomem *bar0 = sp->bar0; + u64 val64 = readq(&bar0->adapter_status); + + herc = (sp->device_type == XFRAME_II_DEVICE); + + if (flag == false) { + if ((!herc && (sp->pdev->revision >= 4)) || herc) { + if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE)) + ret = 1; + } else { + if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE)) + ret = 1; + } + } else { + if ((!herc && (sp->pdev->revision >= 4)) || herc) { + if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) == + ADAPTER_STATUS_RMAC_PCC_IDLE)) + ret = 1; + } else { + if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) == + ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE)) + ret = 1; + } + } + + return ret; +} +/** + * verify_xena_quiescence - Checks whether the H/W is ready + * Description: Returns whether the H/W is ready to go or not. Depending + * on whether adapter enable bit was written or not the comparison + * differs and the calling function passes the input argument flag to + * indicate this. + * Return: 1 If xena is quiescence + * 0 If Xena is not quiescence + */ + +static int verify_xena_quiescence(struct s2io_nic *sp) +{ + int mode; + struct XENA_dev_config __iomem *bar0 = sp->bar0; + u64 val64 = readq(&bar0->adapter_status); + mode = s2io_verify_pci_mode(sp); + + if (!(val64 & ADAPTER_STATUS_TDMA_READY)) { + DBG_PRINT(ERR_DBG, "TDMA is not ready!\n"); + return 0; + } + if (!(val64 & ADAPTER_STATUS_RDMA_READY)) { + DBG_PRINT(ERR_DBG, "RDMA is not ready!\n"); + return 0; + } + if (!(val64 & ADAPTER_STATUS_PFC_READY)) { + DBG_PRINT(ERR_DBG, "PFC is not ready!\n"); + return 0; + } + if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) { + DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n"); + return 0; + } + if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) { + DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n"); + return 0; + } + if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) { + DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n"); + return 0; + } + if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) { + DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n"); + return 0; + } + if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) { + DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n"); + return 0; + } + + /* + * In PCI 33 mode, the P_PLL is not used, and therefore, + * the the P_PLL_LOCK bit in the adapter_status register will + * not be asserted. + */ + if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) && + sp->device_type == XFRAME_II_DEVICE && + mode != PCI_MODE_PCI_33) { + DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n"); + return 0; + } + if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) == + ADAPTER_STATUS_RC_PRC_QUIESCENT)) { + DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n"); + return 0; + } + return 1; +} + +/** + * fix_mac_address - Fix for Mac addr problem on Alpha platforms + * @sp: Pointer to device specifc structure + * Description : + * New procedure to clear mac address reading problems on Alpha platforms + * + */ + +static void fix_mac_address(struct s2io_nic *sp) +{ + struct XENA_dev_config __iomem *bar0 = sp->bar0; + int i = 0; + + while (fix_mac[i] != END_SIGN) { + writeq(fix_mac[i++], &bar0->gpio_control); + udelay(10); + (void) readq(&bar0->gpio_control); + } +} + +/** + * start_nic - Turns the device on + * @nic : device private variable. + * Description: + * This function actually turns the device on. Before this function is + * called,all Registers are configured from their reset states + * and shared memory is allocated but the NIC is still quiescent. On + * calling this function, the device interrupts are cleared and the NIC is + * literally switched on by writing into the adapter control register. + * Return Value: + * SUCCESS on success and -1 on failure. + */ + +static int start_nic(struct s2io_nic *nic) +{ + struct XENA_dev_config __iomem *bar0 = nic->bar0; + struct net_device *dev = nic->dev; + register u64 val64 = 0; + u16 subid, i; + struct config_param *config = &nic->config; + struct mac_info *mac_control = &nic->mac_control; + + /* PRC Initialization and configuration */ + for (i = 0; i < config->rx_ring_num; i++) { + struct ring_info *ring = &mac_control->rings[i]; + + writeq((u64)ring->rx_blocks[0].block_dma_addr, + &bar0->prc_rxd0_n[i]); + + val64 = readq(&bar0->prc_ctrl_n[i]); + if (nic->rxd_mode == RXD_MODE_1) + val64 |= PRC_CTRL_RC_ENABLED; + else + val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3; + if (nic->device_type == XFRAME_II_DEVICE) + val64 |= PRC_CTRL_GROUP_READS; + val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF); + val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000); + writeq(val64, &bar0->prc_ctrl_n[i]); + } + + if (nic->rxd_mode == RXD_MODE_3B) { + /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */ + val64 = readq(&bar0->rx_pa_cfg); + val64 |= RX_PA_CFG_IGNORE_L2_ERR; + writeq(val64, &bar0->rx_pa_cfg); + } + + if (vlan_tag_strip == 0) { + val64 = readq(&bar0->rx_pa_cfg); + val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG; + writeq(val64, &bar0->rx_pa_cfg); + nic->vlan_strip_flag = 0; + } + + /* + * Enabling MC-RLDRAM. After enabling the device, we timeout + * for around 100ms, which is approximately the time required + * for the device to be ready for operation. + */ + val64 = readq(&bar0->mc_rldram_mrs); + val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE; + SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF); + val64 = readq(&bar0->mc_rldram_mrs); + + msleep(100); /* Delay by around 100 ms. */ + + /* Enabling ECC Protection. */ + val64 = readq(&bar0->adapter_control); + val64 &= ~ADAPTER_ECC_EN; + writeq(val64, &bar0->adapter_control); + + /* + * Verify if the device is ready to be enabled, if so enable + * it. + */ + val64 = readq(&bar0->adapter_status); + if (!verify_xena_quiescence(nic)) { + DBG_PRINT(ERR_DBG, "%s: device is not ready, " + "Adapter status reads: 0x%llx\n", + dev->name, (unsigned long long)val64); + return FAILURE; + } + + /* + * With some switches, link might be already up at this point. + * Because of this weird behavior, when we enable laser, + * we may not get link. We need to handle this. We cannot + * figure out which switch is misbehaving. So we are forced to + * make a global change. + */ + + /* Enabling Laser. */ + val64 = readq(&bar0->adapter_control); + val64 |= ADAPTER_EOI_TX_ON; + writeq(val64, &bar0->adapter_control); + + if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) { + /* + * Dont see link state interrupts initially on some switches, + * so directly scheduling the link state task here. + */ + schedule_work(&nic->set_link_task); + } + /* SXE-002: Initialize link and activity LED */ + subid = nic->pdev->subsystem_device; + if (((subid & 0xFF) >= 0x07) && + (nic->device_type == XFRAME_I_DEVICE)) { + val64 = readq(&bar0->gpio_control); + val64 |= 0x0000800000000000ULL; + writeq(val64, &bar0->gpio_control); + val64 = 0x0411040400000000ULL; + writeq(val64, (void __iomem *)bar0 + 0x2700); + } + + return SUCCESS; +} +/** + * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb + */ +static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, + struct TxD *txdlp, int get_off) +{ + struct s2io_nic *nic = fifo_data->nic; + struct sk_buff *skb; + struct TxD *txds; + u16 j, frg_cnt; + + txds = txdlp; + if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) { + pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer, + sizeof(u64), PCI_DMA_TODEVICE); + txds++; + } + + skb = (struct sk_buff *)((unsigned long)txds->Host_Control); + if (!skb) { + memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds)); + return NULL; + } + pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer, + skb_headlen(skb), PCI_DMA_TODEVICE); + frg_cnt = skb_shinfo(skb)->nr_frags; + if (frg_cnt) { + txds++; + for (j = 0; j < frg_cnt; j++, txds++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; + if (!txds->Buffer_Pointer) + break; + pci_unmap_page(nic->pdev, + (dma_addr_t)txds->Buffer_Pointer, + skb_frag_size(frag), PCI_DMA_TODEVICE); + } + } + memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds)); + return skb; +} + +/** + * free_tx_buffers - Free all queued Tx buffers + * @nic : device private variable. + * Description: + * Free all queued Tx buffers. + * Return Value: void + */ + +static void free_tx_buffers(struct s2io_nic *nic) +{ + struct net_device *dev = nic->dev; + struct sk_buff *skb; + struct TxD *txdp; + int i, j; + int cnt = 0; + struct config_param *config = &nic->config; + struct mac_info *mac_control = &nic->mac_control; + struct stat_block *stats = mac_control->stats_info; + struct swStat *swstats = &stats->sw_stat; + + for (i = 0; i < config->tx_fifo_num; i++) { + struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; + struct fifo_info *fifo = &mac_control->fifos[i]; + unsigned long flags; + + spin_lock_irqsave(&fifo->tx_lock, flags); + for (j = 0; j < tx_cfg->fifo_len; j++) { + txdp = fifo->list_info[j].list_virt_addr; + skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j); + if (skb) { + swstats->mem_freed += skb->truesize; + dev_kfree_skb(skb); + cnt++; + } + } + DBG_PRINT(INTR_DBG, + "%s: forcibly freeing %d skbs on FIFO%d\n", + dev->name, cnt, i); + fifo->tx_curr_get_info.offset = 0; + fifo->tx_curr_put_info.offset = 0; + spin_unlock_irqrestore(&fifo->tx_lock, flags); + } +} + +/** + * stop_nic - To stop the nic + * @nic ; device private variable. + * Description: + * This function does exactly the opposite of what the start_nic() + * function does. This function is called to stop the device. + * Return Value: + * void. + */ + +static void stop_nic(struct s2io_nic *nic) +{ + struct XENA_dev_config __iomem *bar0 = nic->bar0; + register u64 val64 = 0; + u16 interruptible; + + /* Disable all interrupts */ + en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS); + interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; + interruptible |= TX_PIC_INTR; + en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS); + + /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */ + val64 = readq(&bar0->adapter_control); + val64 &= ~(ADAPTER_CNTL_EN); + writeq(val64, &bar0->adapter_control); +} + +/** + * fill_rx_buffers - Allocates the Rx side skbs + * @ring_info: per ring structure + * @from_card_up: If this is true, we will map the buffer to get + * the dma address for buf0 and buf1 to give it to the card. + * Else we will sync the already mapped buffer to give it to the card. + * Description: + * The function allocates Rx side skbs and puts the physical + * address of these buffers into the RxD buffer pointers, so that the NIC + * can DMA the received frame into these locations. + * The NIC supports 3 receive modes, viz + * 1. single buffer, + * 2. three buffer and + * 3. Five buffer modes. + * Each mode defines how many fragments the received frame will be split + * up into by the NIC. The frame is split into L3 header, L4 Header, + * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself + * is split into 3 fragments. As of now only single buffer mode is + * supported. + * Return Value: + * SUCCESS on success or an appropriate -ve value on failure. + */ +static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, + int from_card_up) +{ + struct sk_buff *skb; + struct RxD_t *rxdp; + int off, size, block_no, block_no1; + u32 alloc_tab = 0; + u32 alloc_cnt; + u64 tmp; + struct buffAdd *ba; + struct RxD_t *first_rxdp = NULL; + u64 Buffer0_ptr = 0, Buffer1_ptr = 0; + int rxd_index = 0; + struct RxD1 *rxdp1; + struct RxD3 *rxdp3; + struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat; + + alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left; + + block_no1 = ring->rx_curr_get_info.block_index; + while (alloc_tab < alloc_cnt) { + block_no = ring->rx_curr_put_info.block_index; + + off = ring->rx_curr_put_info.offset; + + rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr; + + rxd_index = off + 1; + if (block_no) + rxd_index += (block_no * ring->rxd_count); + + if ((block_no == block_no1) && + (off == ring->rx_curr_get_info.offset) && + (rxdp->Host_Control)) { + DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n", + ring->dev->name); + goto end; + } + if (off && (off == ring->rxd_count)) { + ring->rx_curr_put_info.block_index++; + if (ring->rx_curr_put_info.block_index == + ring->block_count) + ring->rx_curr_put_info.block_index = 0; + block_no = ring->rx_curr_put_info.block_index; + off = 0; + ring->rx_curr_put_info.offset = off; + rxdp = ring->rx_blocks[block_no].block_virt_addr; + DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", + ring->dev->name, rxdp); + + } + + if ((rxdp->Control_1 & RXD_OWN_XENA) && + ((ring->rxd_mode == RXD_MODE_3B) && + (rxdp->Control_2 & s2BIT(0)))) { + ring->rx_curr_put_info.offset = off; + goto end; + } + /* calculate size of skb based on ring mode */ + size = ring->mtu + + HEADER_ETHERNET_II_802_3_SIZE + + HEADER_802_2_SIZE + HEADER_SNAP_SIZE; + if (ring->rxd_mode == RXD_MODE_1) + size += NET_IP_ALIGN; + else + size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4; + + /* allocate skb */ + skb = netdev_alloc_skb(nic->dev, size); + if (!skb) { + DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n", + ring->dev->name); + if (first_rxdp) { + dma_wmb(); + first_rxdp->Control_1 |= RXD_OWN_XENA; + } + swstats->mem_alloc_fail_cnt++; + + return -ENOMEM ; + } + swstats->mem_allocated += skb->truesize; + + if (ring->rxd_mode == RXD_MODE_1) { + /* 1 buffer mode - normal operation mode */ + rxdp1 = (struct RxD1 *)rxdp; + memset(rxdp, 0, sizeof(struct RxD1)); + skb_reserve(skb, NET_IP_ALIGN); + rxdp1->Buffer0_ptr = + pci_map_single(ring->pdev, skb->data, + size - NET_IP_ALIGN, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(nic->pdev, + rxdp1->Buffer0_ptr)) + goto pci_map_failed; + + rxdp->Control_2 = + SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); + rxdp->Host_Control = (unsigned long)skb; + } else if (ring->rxd_mode == RXD_MODE_3B) { + /* + * 2 buffer mode - + * 2 buffer mode provides 128 + * byte aligned receive buffers. + */ + + rxdp3 = (struct RxD3 *)rxdp; + /* save buffer pointers to avoid frequent dma mapping */ + Buffer0_ptr = rxdp3->Buffer0_ptr; + Buffer1_ptr = rxdp3->Buffer1_ptr; + memset(rxdp, 0, sizeof(struct RxD3)); + /* restore the buffer pointers for dma sync*/ + rxdp3->Buffer0_ptr = Buffer0_ptr; + rxdp3->Buffer1_ptr = Buffer1_ptr; + + ba = &ring->ba[block_no][off]; + skb_reserve(skb, BUF0_LEN); + tmp = (u64)(unsigned long)skb->data; + tmp += ALIGN_SIZE; + tmp &= ~ALIGN_SIZE; + skb->data = (void *) (unsigned long)tmp; + skb_reset_tail_pointer(skb); + + if (from_card_up) { + rxdp3->Buffer0_ptr = + pci_map_single(ring->pdev, ba->ba_0, + BUF0_LEN, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(nic->pdev, + rxdp3->Buffer0_ptr)) + goto pci_map_failed; + } else + pci_dma_sync_single_for_device(ring->pdev, + (dma_addr_t)rxdp3->Buffer0_ptr, + BUF0_LEN, + PCI_DMA_FROMDEVICE); + + rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); + if (ring->rxd_mode == RXD_MODE_3B) { + /* Two buffer mode */ + + /* + * Buffer2 will have L3/L4 header plus + * L4 payload + */ + rxdp3->Buffer2_ptr = pci_map_single(ring->pdev, + skb->data, + ring->mtu + 4, + PCI_DMA_FROMDEVICE); + + if (pci_dma_mapping_error(nic->pdev, + rxdp3->Buffer2_ptr)) + goto pci_map_failed; + + if (from_card_up) { + rxdp3->Buffer1_ptr = + pci_map_single(ring->pdev, + ba->ba_1, + BUF1_LEN, + PCI_DMA_FROMDEVICE); + + if (pci_dma_mapping_error(nic->pdev, + rxdp3->Buffer1_ptr)) { + pci_unmap_single(ring->pdev, + (dma_addr_t)(unsigned long) + skb->data, + ring->mtu + 4, + PCI_DMA_FROMDEVICE); + goto pci_map_failed; + } + } + rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); + rxdp->Control_2 |= SET_BUFFER2_SIZE_3 + (ring->mtu + 4); + } + rxdp->Control_2 |= s2BIT(0); + rxdp->Host_Control = (unsigned long) (skb); + } + if (alloc_tab & ((1 << rxsync_frequency) - 1)) + rxdp->Control_1 |= RXD_OWN_XENA; + off++; + if (off == (ring->rxd_count + 1)) + off = 0; + ring->rx_curr_put_info.offset = off; + + rxdp->Control_2 |= SET_RXD_MARKER; + if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) { + if (first_rxdp) { + dma_wmb(); + first_rxdp->Control_1 |= RXD_OWN_XENA; + } + first_rxdp = rxdp; + } + ring->rx_bufs_left += 1; + alloc_tab++; + } + +end: + /* Transfer ownership of first descriptor to adapter just before + * exiting. Before that, use memory barrier so that ownership + * and other fields are seen by adapter correctly. + */ + if (first_rxdp) { + dma_wmb(); + first_rxdp->Control_1 |= RXD_OWN_XENA; + } + + return SUCCESS; + +pci_map_failed: + swstats->pci_map_fail_cnt++; + swstats->mem_freed += skb->truesize; + dev_kfree_skb_irq(skb); + return -ENOMEM; +} + +static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk) +{ + struct net_device *dev = sp->dev; + int j; + struct sk_buff *skb; + struct RxD_t *rxdp; + struct RxD1 *rxdp1; + struct RxD3 *rxdp3; + struct mac_info *mac_control = &sp->mac_control; + struct stat_block *stats = mac_control->stats_info; + struct swStat *swstats = &stats->sw_stat; + + for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) { + rxdp = mac_control->rings[ring_no]. + rx_blocks[blk].rxds[j].virt_addr; + skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control); + if (!skb) + continue; + if (sp->rxd_mode == RXD_MODE_1) { + rxdp1 = (struct RxD1 *)rxdp; + pci_unmap_single(sp->pdev, + (dma_addr_t)rxdp1->Buffer0_ptr, + dev->mtu + + HEADER_ETHERNET_II_802_3_SIZE + + HEADER_802_2_SIZE + HEADER_SNAP_SIZE, + PCI_DMA_FROMDEVICE); + memset(rxdp, 0, sizeof(struct RxD1)); + } else if (sp->rxd_mode == RXD_MODE_3B) { + rxdp3 = (struct RxD3 *)rxdp; + pci_unmap_single(sp->pdev, + (dma_addr_t)rxdp3->Buffer0_ptr, + BUF0_LEN, + PCI_DMA_FROMDEVICE); + pci_unmap_single(sp->pdev, + (dma_addr_t)rxdp3->Buffer1_ptr, + BUF1_LEN, + PCI_DMA_FROMDEVICE); + pci_unmap_single(sp->pdev, + (dma_addr_t)rxdp3->Buffer2_ptr, + dev->mtu + 4, + PCI_DMA_FROMDEVICE); + memset(rxdp, 0, sizeof(struct RxD3)); + } + swstats->mem_freed += skb->truesize; + dev_kfree_skb(skb); + mac_control->rings[ring_no].rx_bufs_left -= 1; + } +} + +/** + * free_rx_buffers - Frees all Rx buffers + * @sp: device private variable. + * Description: + * This function will free all Rx buffers allocated by host. + * Return Value: + * NONE. + */ + +static void free_rx_buffers(struct s2io_nic *sp) +{ + struct net_device *dev = sp->dev; + int i, blk = 0, buf_cnt = 0; + struct config_param *config = &sp->config; + struct mac_info *mac_control = &sp->mac_control; + + for (i = 0; i < config->rx_ring_num; i++) { + struct ring_info *ring = &mac_control->rings[i]; + + for (blk = 0; blk < rx_ring_sz[i]; blk++) + free_rxd_blk(sp, i, blk); + + ring->rx_curr_put_info.block_index = 0; + ring->rx_curr_get_info.block_index = 0; + ring->rx_curr_put_info.offset = 0; + ring->rx_curr_get_info.offset = 0; + ring->rx_bufs_left = 0; + DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n", + dev->name, buf_cnt, i); + } +} + +static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring) +{ + if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) { + DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n", + ring->dev->name); + } + return 0; +} + +/** + * s2io_poll - Rx interrupt handler for NAPI support + * @napi : pointer to the napi structure. + * @budget : The number of packets that were budgeted to be processed + * during one pass through the 'Poll" function. + * Description: + * Comes into picture only if NAPI support has been incorporated. It does + * the same thing that rx_intr_handler does, but not in a interrupt context + * also It will process only a given number of packets. + * Return value: + * 0 on success and 1 if there are No Rx packets to be processed. + */ + +static int s2io_poll_msix(struct napi_struct *napi, int budget) +{ + struct ring_info *ring = container_of(napi, struct ring_info, napi); + struct net_device *dev = ring->dev; + int pkts_processed = 0; + u8 __iomem *addr = NULL; + u8 val8 = 0; + struct s2io_nic *nic = netdev_priv(dev); + struct XENA_dev_config __iomem *bar0 = nic->bar0; + int budget_org = budget; + + if (unlikely(!is_s2io_card_up(nic))) + return 0; + + pkts_processed = rx_intr_handler(ring, budget); + s2io_chk_rx_buffers(nic, ring); + + if (pkts_processed < budget_org) { + napi_complete(napi); + /*Re Enable MSI-Rx Vector*/ + addr = (u8 __iomem *)&bar0->xmsi_mask_reg; + addr += 7 - ring->ring_no; + val8 = (ring->ring_no == 0) ? 0x3f : 0xbf; + writeb(val8, addr); + val8 = readb(addr); + } + return pkts_processed; +} + +static int s2io_poll_inta(struct napi_struct *napi, int budget) +{ + struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); + int pkts_processed = 0; + int ring_pkts_processed, i; + struct XENA_dev_config __iomem *bar0 = nic->bar0; + int budget_org = budget; + struct config_param *config = &nic->config; + struct mac_info *mac_control = &nic->mac_control; + + if (unlikely(!is_s2io_card_up(nic))) + return 0; + + for (i = 0; i < config->rx_ring_num; i++) { + struct ring_info *ring = &mac_control->rings[i]; + ring_pkts_processed = rx_intr_handler(ring, budget); + s2io_chk_rx_buffers(nic, ring); + pkts_processed += ring_pkts_processed; + budget -= ring_pkts_processed; + if (budget <= 0) + break; + } + if (pkts_processed < budget_org) { + napi_complete(napi); + /* Re enable the Rx interrupts for the ring */ + writeq(0, &bar0->rx_traffic_mask); + readl(&bar0->rx_traffic_mask); + } + return pkts_processed; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * s2io_netpoll - netpoll event handler entry point + * @dev : pointer to the device structure. + * Description: + * This function will be called by upper layer to check for events on the + * interface in situations where interrupts are disabled. It is used for + * specific in-kernel networking tasks, such as remote consoles and kernel + * debugging over the network (example netdump in RedHat). + */ +static void s2io_netpoll(struct net_device *dev) +{ + struct s2io_nic *nic = netdev_priv(dev); + const int irq = nic->pdev->irq; + struct XENA_dev_config __iomem *bar0 = nic->bar0; + u64 val64 = 0xFFFFFFFFFFFFFFFFULL; + int i; + struct config_param *config = &nic->config; + struct mac_info *mac_control = &nic->mac_control; + + if (pci_channel_offline(nic->pdev)) + return; + + disable_irq(irq); + + writeq(val64, &bar0->rx_traffic_int); + writeq(val64, &bar0->tx_traffic_int); + + /* we need to free up the transmitted skbufs or else netpoll will + * run out of skbs and will fail and eventually netpoll application such + * as netdump will fail. + */ + for (i = 0; i < config->tx_fifo_num; i++) + tx_intr_handler(&mac_control->fifos[i]); + + /* check for received packet and indicate up to network */ + for (i = 0; i < config->rx_ring_num; i++) { + struct ring_info *ring = &mac_control->rings[i]; + + rx_intr_handler(ring, 0); + } + + for (i = 0; i < config->rx_ring_num; i++) { + struct ring_info *ring = &mac_control->rings[i]; + + if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) { + DBG_PRINT(INFO_DBG, + "%s: Out of memory in Rx Netpoll!!\n", + dev->name); + break; + } + } + enable_irq(irq); +} +#endif + +/** + * rx_intr_handler - Rx interrupt handler + * @ring_info: per ring structure. + * @budget: budget for napi processing. + * Description: + * If the interrupt is because of a received frame or if the + * receive ring contains fresh as yet un-processed frames,this function is + * called. It picks out the RxD at which place the last Rx processing had + * stopped and sends the skb to the OSM's Rx handler and then increments + * the offset. + * Return Value: + * No. of napi packets processed. + */ +static int rx_intr_handler(struct ring_info *ring_data, int budget) +{ + int get_block, put_block; + struct rx_curr_get_info get_info, put_info; + struct RxD_t *rxdp; + struct sk_buff *skb; + int pkt_cnt = 0, napi_pkts = 0; + int i; + struct RxD1 *rxdp1; + struct RxD3 *rxdp3; + + if (budget <= 0) + return napi_pkts; + + get_info = ring_data->rx_curr_get_info; + get_block = get_info.block_index; + memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info)); + put_block = put_info.block_index; + rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr; + + while (RXD_IS_UP2DT(rxdp)) { + /* + * If your are next to put index then it's + * FIFO full condition + */ + if ((get_block == put_block) && + (get_info.offset + 1) == put_info.offset) { + DBG_PRINT(INTR_DBG, "%s: Ring Full\n", + ring_data->dev->name); + break; + } + skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control); + if (skb == NULL) { + DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n", + ring_data->dev->name); + return 0; + } + if (ring_data->rxd_mode == RXD_MODE_1) { + rxdp1 = (struct RxD1 *)rxdp; + pci_unmap_single(ring_data->pdev, (dma_addr_t) + rxdp1->Buffer0_ptr, + ring_data->mtu + + HEADER_ETHERNET_II_802_3_SIZE + + HEADER_802_2_SIZE + + HEADER_SNAP_SIZE, + PCI_DMA_FROMDEVICE); + } else if (ring_data->rxd_mode == RXD_MODE_3B) { + rxdp3 = (struct RxD3 *)rxdp; + pci_dma_sync_single_for_cpu(ring_data->pdev, + (dma_addr_t)rxdp3->Buffer0_ptr, + BUF0_LEN, + PCI_DMA_FROMDEVICE); + pci_unmap_single(ring_data->pdev, + (dma_addr_t)rxdp3->Buffer2_ptr, + ring_data->mtu + 4, + PCI_DMA_FROMDEVICE); + } + prefetch(skb->data); + rx_osm_handler(ring_data, rxdp); + get_info.offset++; + ring_data->rx_curr_get_info.offset = get_info.offset; + rxdp = ring_data->rx_blocks[get_block]. + rxds[get_info.offset].virt_addr; + if (get_info.offset == rxd_count[ring_data->rxd_mode]) { + get_info.offset = 0; + ring_data->rx_curr_get_info.offset = get_info.offset; + get_block++; + if (get_block == ring_data->block_count) + get_block = 0; + ring_data->rx_curr_get_info.block_index = get_block; + rxdp = ring_data->rx_blocks[get_block].block_virt_addr; + } + + if (ring_data->nic->config.napi) { + budget--; + napi_pkts++; + if (!budget) + break; + } + pkt_cnt++; + if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts)) + break; + } + if (ring_data->lro) { + /* Clear all LRO sessions before exiting */ + for (i = 0; i < MAX_LRO_SESSIONS; i++) { + struct lro *lro = &ring_data->lro0_n[i]; + if (lro->in_use) { + update_L3L4_header(ring_data->nic, lro); + queue_rx_frame(lro->parent, lro->vlan_tag); + clear_lro_session(lro); + } + } + } + return napi_pkts; +} + +/** + * tx_intr_handler - Transmit interrupt handler + * @nic : device private variable + * Description: + * If an interrupt was raised to indicate DMA complete of the + * Tx packet, this function is called. It identifies the last TxD + * whose buffer was freed and frees all skbs whose data have already + * DMA'ed into the NICs internal memory. + * Return Value: + * NONE + */ + +static void tx_intr_handler(struct fifo_info *fifo_data) +{ + struct s2io_nic *nic = fifo_data->nic; + struct tx_curr_get_info get_info, put_info; + struct sk_buff *skb = NULL; + struct TxD *txdlp; + int pkt_cnt = 0; + unsigned long flags = 0; + u8 err_mask; + struct stat_block *stats = nic->mac_control.stats_info; + struct swStat *swstats = &stats->sw_stat; + + if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags)) + return; + + get_info = fifo_data->tx_curr_get_info; + memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info)); + txdlp = fifo_data->list_info[get_info.offset].list_virt_addr; + while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) && + (get_info.offset != put_info.offset) && + (txdlp->Host_Control)) { + /* Check for TxD errors */ + if (txdlp->Control_1 & TXD_T_CODE) { + unsigned long long err; + err = txdlp->Control_1 & TXD_T_CODE; + if (err & 0x1) { + swstats->parity_err_cnt++; + } + + /* update t_code statistics */ + err_mask = err >> 48; + switch (err_mask) { + case 2: + swstats->tx_buf_abort_cnt++; + break; + + case 3: + swstats->tx_desc_abort_cnt++; + break; + + case 7: + swstats->tx_parity_err_cnt++; + break; + + case 10: + swstats->tx_link_loss_cnt++; + break; + + case 15: + swstats->tx_list_proc_err_cnt++; + break; + } + } + + skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset); + if (skb == NULL) { + spin_unlock_irqrestore(&fifo_data->tx_lock, flags); + DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n", + __func__); + return; + } + pkt_cnt++; + + /* Updating the statistics block */ + swstats->mem_freed += skb->truesize; + dev_kfree_skb_irq(skb); + + get_info.offset++; + if (get_info.offset == get_info.fifo_len + 1) + get_info.offset = 0; + txdlp = fifo_data->list_info[get_info.offset].list_virt_addr; + fifo_data->tx_curr_get_info.offset = get_info.offset; + } + + s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq); + + spin_unlock_irqrestore(&fifo_data->tx_lock, flags); +} + +/** + * s2io_mdio_write - Function to write in to MDIO registers + * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS) + * @addr : address value + * @value : data value + * @dev : pointer to net_device structure + * Description: + * This function is used to write values to the MDIO registers + * NONE + */ +static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, + struct net_device *dev) +{ + u64 val64; + struct s2io_nic *sp = netdev_priv(dev); + struct XENA_dev_config __iomem *bar0 = sp->bar0; + + /* address transaction */ + val64 = MDIO_MMD_INDX_ADDR(addr) | + MDIO_MMD_DEV_ADDR(mmd_type) | + MDIO_MMS_PRT_ADDR(0x0); + writeq(val64, &bar0->mdio_control); + val64 = val64 | MDIO_CTRL_START_TRANS(0xE); + writeq(val64, &bar0->mdio_control); + udelay(100); + + /* Data transaction */ + val64 = MDIO_MMD_INDX_ADDR(addr) | + MDIO_MMD_DEV_ADDR(mmd_type) | + MDIO_MMS_PRT_ADDR(0x0) | + MDIO_MDIO_DATA(value) | + MDIO_OP(MDIO_OP_WRITE_TRANS); + writeq(val64, &bar0->mdio_control); + val64 = val64 | MDIO_CTRL_START_TRANS(0xE); + writeq(val64, &bar0->mdio_control); + udelay(100); + + val64 = MDIO_MMD_INDX_ADDR(addr) | + MDIO_MMD_DEV_ADDR(mmd_type) | + MDIO_MMS_PRT_ADDR(0x0) | + MDIO_OP(MDIO_OP_READ_TRANS); + writeq(val64, &bar0->mdio_control); + val64 = val64 | MDIO_CTRL_START_TRANS(0xE); + writeq(val64, &bar0->mdio_control); + udelay(100); +} + +/** + * s2io_mdio_read - Function to write in to MDIO registers + * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS) + * @addr : address value + * @dev : pointer to net_device structure + * Description: + * This function is used to read values to the MDIO registers + * NONE + */ +static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev) +{ + u64 val64 = 0x0; + u64 rval64 = 0x0; + struct s2io_nic *sp = netdev_priv(dev); + struct XENA_dev_config __iomem *bar0 = sp->bar0; + + /* address transaction */ + val64 = val64 | (MDIO_MMD_INDX_ADDR(addr) + | MDIO_MMD_DEV_ADDR(mmd_type) + | MDIO_MMS_PRT_ADDR(0x0)); + writeq(val64, &bar0->mdio_control); + val64 = val64 | MDIO_CTRL_START_TRANS(0xE); + writeq(val64, &bar0->mdio_control); + udelay(100); + + /* Data transaction */ + val64 = MDIO_MMD_INDX_ADDR(addr) | + MDIO_MMD_DEV_ADDR(mmd_type) | + MDIO_MMS_PRT_ADDR(0x0) | + MDIO_OP(MDIO_OP_READ_TRANS); + writeq(val64, &bar0->mdio_control); + val64 = val64 | MDIO_CTRL_START_TRANS(0xE); + writeq(val64, &bar0->mdio_control); + udelay(100); + + /* Read the value from regs */ + rval64 = readq(&bar0->mdio_control); + rval64 = rval64 & 0xFFFF0000; + rval64 = rval64 >> 16; + return rval64; +} + +/** + * s2io_chk_xpak_counter - Function to check the status of the xpak counters + * @counter : counter value to be updated + * @flag : flag to indicate the status + * @type : counter type + * Description: + * This function is to check the status of the xpak counters value + * NONE + */ + +static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, + u16 flag, u16 type) +{ + u64 mask = 0x3; + u64 val64; + int i; + for (i = 0; i < index; i++) + mask = mask << 0x2; + + if (flag > 0) { + *counter = *counter + 1; + val64 = *regs_stat & mask; + val64 = val64 >> (index * 0x2); + val64 = val64 + 1; + if (val64 == 3) { + switch (type) { + case 1: + DBG_PRINT(ERR_DBG, + "Take Xframe NIC out of service.\n"); + DBG_PRINT(ERR_DBG, +"Excessive temperatures may result in premature transceiver failure.\n"); + break; + case 2: + DBG_PRINT(ERR_DBG, + "Take Xframe NIC out of service.\n"); + DBG_PRINT(ERR_DBG, +"Excessive bias currents may indicate imminent laser diode failure.\n"); + break; + case 3: + DBG_PRINT(ERR_DBG, + "Take Xframe NIC out of service.\n"); + DBG_PRINT(ERR_DBG, +"Excessive laser output power may saturate far-end receiver.\n"); + break; + default: + DBG_PRINT(ERR_DBG, + "Incorrect XPAK Alarm type\n"); + } + val64 = 0x0; + } + val64 = val64 << (index * 0x2); + *regs_stat = (*regs_stat & (~mask)) | (val64); + + } else { + *regs_stat = *regs_stat & (~mask); + } +} + +/** + * s2io_updt_xpak_counter - Function to update the xpak counters + * @dev : pointer to net_device struct + * Description: + * This function is to upate the status of the xpak counters value + * NONE + */ +static void s2io_updt_xpak_counter(struct net_device *dev) +{ + u16 flag = 0x0; + u16 type = 0x0; + u16 val16 = 0x0; + u64 val64 = 0x0; + u64 addr = 0x0; + + struct s2io_nic *sp = netdev_priv(dev); + struct stat_block *stats = sp->mac_control.stats_info; + struct xpakStat *xstats = &stats->xpak_stat; + + /* Check the communication with the MDIO slave */ + addr = MDIO_CTRL1; + val64 = 0x0; + val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev); + if ((val64 == 0xFFFF) || (val64 == 0x0000)) { + DBG_PRINT(ERR_DBG, + "ERR: MDIO slave access failed - Returned %llx\n", + (unsigned long long)val64); + return; + } + + /* Check for the expected value of control reg 1 */ + if (val64 != MDIO_CTRL1_SPEED10G) { + DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - " + "Returned: %llx- Expected: 0x%x\n", + (unsigned long long)val64, MDIO_CTRL1_SPEED10G); + return; + } + + /* Loading the DOM register to MDIO register */ + addr = 0xA100; + s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev); + val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev); + + /* Reading the Alarm flags */ + addr = 0xA070; + val64 = 0x0; + val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev); + + flag = CHECKBIT(val64, 0x7); + type = 1; + s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high, + &xstats->xpak_regs_stat, + 0x0, flag, type); + + if (CHECKBIT(val64, 0x6)) + xstats->alarm_transceiver_temp_low++; + + flag = CHECKBIT(val64, 0x3); + type = 2; + s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high, + &xstats->xpak_regs_stat, + 0x2, flag, type); + + if (CHECKBIT(val64, 0x2)) + xstats->alarm_laser_bias_current_low++; + + flag = CHECKBIT(val64, 0x1); + type = 3; + s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high, + &xstats->xpak_regs_stat, + 0x4, flag, type); + + if (CHECKBIT(val64, 0x0)) + xstats->alarm_laser_output_power_low++; + + /* Reading the Warning flags */ + addr = 0xA074; + val64 = 0x0; + val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev); + + if (CHECKBIT(val64, 0x7)) + xstats->warn_transceiver_temp_high++; + + if (CHECKBIT(val64, 0x6)) + xstats->warn_transceiver_temp_low++; + + if (CHECKBIT(val64, 0x3)) + xstats->warn_laser_bias_current_high++; + + if (CHECKBIT(val64, 0x2)) + xstats->warn_laser_bias_current_low++; + + if (CHECKBIT(val64, 0x1)) + xstats->warn_laser_output_power_high++; + + if (CHECKBIT(val64, 0x0)) + xstats->warn_laser_output_power_low++; +} + +/** + * wait_for_cmd_complete - waits for a command to complete. + * @sp : private member of the device structure, which is a pointer to the + * s2io_nic structure. + * Description: Function that waits for a command to Write into RMAC + * ADDR DATA registers to be completed and returns either success or + * error depending on whether the command was complete or not. + * Return value: + * SUCCESS on success and FAILURE on failure. + */ + +static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit, + int bit_state) +{ + int ret = FAILURE, cnt = 0, delay = 1; + u64 val64; + + if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET)) + return FAILURE; + + do { + val64 = readq(addr); + if (bit_state == S2IO_BIT_RESET) { + if (!(val64 & busy_bit)) { + ret = SUCCESS; + break; + } + } else { + if (val64 & busy_bit) { + ret = SUCCESS; + break; + } + } + + if (in_interrupt()) + mdelay(delay); + else + msleep(delay); + + if (++cnt >= 10) + delay = 50; + } while (cnt < 20); + return ret; +} +/** + * check_pci_device_id - Checks if the device id is supported + * @id : device id + * Description: Function to check if the pci device id is supported by driver. + * Return value: Actual device id if supported else PCI_ANY_ID + */ +static u16 check_pci_device_id(u16 id) +{ + switch (id) { + case PCI_DEVICE_ID_HERC_WIN: + case PCI_DEVICE_ID_HERC_UNI: + return XFRAME_II_DEVICE; + case PCI_DEVICE_ID_S2IO_UNI: + case PCI_DEVICE_ID_S2IO_WIN: + return XFRAME_I_DEVICE; + default: + return PCI_ANY_ID; + } +} + +/** + * s2io_reset - Resets the card. + * @sp : private member of the device structure. + * Description: Function to Reset the card. This function then also + * restores the previously saved PCI configuration space registers as + * the card reset also resets the configuration space. + * Return value: + * void. + */ + +static void s2io_reset(struct s2io_nic *sp) +{ + struct XENA_dev_config __iomem *bar0 = sp->bar0; + u64 val64; + u16 subid, pci_cmd; + int i; + u16 val16; + unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt; + unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt; + struct stat_block *stats; + struct swStat *swstats; + + DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n", + __func__, pci_name(sp->pdev)); + + /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */ + pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd)); + + val64 = SW_RESET_ALL; + writeq(val64, &bar0->sw_reset); + if (strstr(sp->product_name, "CX4")) + msleep(750); + msleep(250); + for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) { + + /* Restore the PCI state saved during initialization. */ + pci_restore_state(sp->pdev); + pci_save_state(sp->pdev); + pci_read_config_word(sp->pdev, 0x2, &val16); + if (check_pci_device_id(val16) != (u16)PCI_ANY_ID) + break; + msleep(200); + } + + if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) + DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__); + + pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd); + + s2io_init_pci(sp); + + /* Set swapper to enable I/O register access */ + s2io_set_swapper(sp); + + /* restore mac_addr entries */ + do_s2io_restore_unicast_mc(sp); + + /* Restore the MSIX table entries from local variables */ + restore_xmsi_data(sp); + + /* Clear certain PCI/PCI-X fields after reset */ + if (sp->device_type == XFRAME_II_DEVICE) { + /* Clear "detected parity error" bit */ + pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000); + + /* Clearing PCIX Ecc status register */ + pci_write_config_dword(sp->pdev, 0x68, 0x7C); + + /* Clearing PCI_STATUS error reflected here */ + writeq(s2BIT(62), &bar0->txpic_int_reg); + } + + /* Reset device statistics maintained by OS */ + memset(&sp->stats, 0, sizeof(struct net_device_stats)); + + stats = sp->mac_control.stats_info; + swstats = &stats->sw_stat; + + /* save link up/down time/cnt, reset/memory/watchdog cnt */ + up_cnt = swstats->link_up_cnt; + down_cnt = swstats->link_down_cnt; + up_time = swstats->link_up_time; + down_time = swstats->link_down_time; + reset_cnt = swstats->soft_reset_cnt; + mem_alloc_cnt = swstats->mem_allocated; + mem_free_cnt = swstats->mem_freed; + watchdog_cnt = swstats->watchdog_timer_cnt; + + memset(stats, 0, sizeof(struct stat_block)); + + /* restore link up/down time/cnt, reset/memory/watchdog cnt */ + swstats->link_up_cnt = up_cnt; + swstats->link_down_cnt = down_cnt; + swstats->link_up_time = up_time; + swstats->link_down_time = down_time; + swstats->soft_reset_cnt = reset_cnt; + swstats->mem_allocated = mem_alloc_cnt; + swstats->mem_freed = mem_free_cnt; + swstats->watchdog_timer_cnt = watchdog_cnt; + + /* SXE-002: Configure link and activity LED to turn it off */ + subid = sp->pdev->subsystem_device; + if (((subid & 0xFF) >= 0x07) && + (sp->device_type == XFRAME_I_DEVICE)) { + val64 = readq(&bar0->gpio_control); + val64 |= 0x0000800000000000ULL; + writeq(val64, &bar0->gpio_control); + val64 = 0x0411040400000000ULL; + writeq(val64, (void __iomem *)bar0 + 0x2700); + } + + /* + * Clear spurious ECC interrupts that would have occurred on + * XFRAME II cards after reset. + */ + if (sp->device_type == XFRAME_II_DEVICE) { + val64 = readq(&bar0->pcc_err_reg); + writeq(val64, &bar0->pcc_err_reg); + } + + sp->device_enabled_once = false; +} + +/** + * s2io_set_swapper - to set the swapper controle on the card + * @sp : private member of the device structure, + * pointer to the s2io_nic structure. + * Description: Function to set the swapper control on the card + * correctly depending on the 'endianness' of the system. + * Return value: + * SUCCESS on success and FAILURE on failure. + */ + +static int s2io_set_swapper(struct s2io_nic *sp) +{ + struct net_device *dev = sp->dev; + struct XENA_dev_config __iomem *bar0 = sp->bar0; + u64 val64, valt, valr; + + /* + * Set proper endian settings and verify the same by reading + * the PIF Feed-back register. + */ + + val64 = readq(&bar0->pif_rd_swapper_fb); + if (val64 != 0x0123456789ABCDEFULL) { + int i = 0; + static const u64 value[] = { + 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */ + 0x8100008181000081ULL, /* FE=1, SE=0 */ + 0x4200004242000042ULL, /* FE=0, SE=1 */ + 0 /* FE=0, SE=0 */ + }; + + while (i < 4) { + writeq(value[i], &bar0->swapper_ctrl); + val64 = readq(&bar0->pif_rd_swapper_fb); + if (val64 == 0x0123456789ABCDEFULL) + break; + i++; + } + if (i == 4) { + DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, " + "feedback read %llx\n", + dev->name, (unsigned long long)val64); + return FAILURE; + } + valr = value[i]; + } else { + valr = readq(&bar0->swapper_ctrl); + } + + valt = 0x0123456789ABCDEFULL; + writeq(valt, &bar0->xmsi_address); + val64 = readq(&bar0->xmsi_address); + + if (val64 != valt) { + int i = 0; + static const u64 value[] = { + 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */ + 0x0081810000818100ULL, /* FE=1, SE=0 */ + 0x0042420000424200ULL, /* FE=0, SE=1 */ + 0 /* FE=0, SE=0 */ + }; + + while (i < 4) { + writeq((value[i] | valr), &bar0->swapper_ctrl); + writeq(valt, &bar0->xmsi_address); + val64 = readq(&bar0->xmsi_address); + if (val64 == valt) + break; + i++; + } + if (i == 4) { + unsigned long long x = val64; + DBG_PRINT(ERR_DBG, + "Write failed, Xmsi_addr reads:0x%llx\n", x); + return FAILURE; + } + } + val64 = readq(&bar0->swapper_ctrl); + val64 &= 0xFFFF000000000000ULL; + +#ifdef __BIG_ENDIAN + /* + * The device by default set to a big endian format, so a + * big endian driver need not set anything. + */ + val64 |= (SWAPPER_CTRL_TXP_FE | + SWAPPER_CTRL_TXP_SE | + SWAPPER_CTRL_TXD_R_FE | + SWAPPER_CTRL_TXD_W_FE | + SWAPPER_CTRL_TXF_R_FE | + SWAPPER_CTRL_RXD_R_FE | + SWAPPER_CTRL_RXD_W_FE | + SWAPPER_CTRL_RXF_W_FE | + SWAPPER_CTRL_XMSI_FE | + SWAPPER_CTRL_STATS_FE | + SWAPPER_CTRL_STATS_SE); + if (sp->config.intr_type == INTA) + val64 |= SWAPPER_CTRL_XMSI_SE; + writeq(val64, &bar0->swapper_ctrl); +#else + /* + * Initially we enable all bits to make it accessible by the + * driver, then we selectively enable only those bits that + * we want to set. + */ + val64 |= (SWAPPER_CTRL_TXP_FE | + SWAPPER_CTRL_TXP_SE | + SWAPPER_CTRL_TXD_R_FE | + SWAPPER_CTRL_TXD_R_SE | + SWAPPER_CTRL_TXD_W_FE | + SWAPPER_CTRL_TXD_W_SE | + SWAPPER_CTRL_TXF_R_FE | + SWAPPER_CTRL_RXD_R_FE | + SWAPPER_CTRL_RXD_R_SE | + SWAPPER_CTRL_RXD_W_FE | + SWAPPER_CTRL_RXD_W_SE | + SWAPPER_CTRL_RXF_W_FE | + SWAPPER_CTRL_XMSI_FE | + SWAPPER_CTRL_STATS_FE | + SWAPPER_CTRL_STATS_SE); + if (sp->config.intr_type == INTA) + val64 |= SWAPPER_CTRL_XMSI_SE; + writeq(val64, &bar0->swapper_ctrl); +#endif + val64 = readq(&bar0->swapper_ctrl); + + /* + * Verifying if endian settings are accurate by reading a + * feedback register. + */ + val64 = readq(&bar0->pif_rd_swapper_fb); + if (val64 != 0x0123456789ABCDEFULL) { + /* Endian settings are incorrect, calls for another dekko. */ + DBG_PRINT(ERR_DBG, + "%s: Endian settings are wrong, feedback read %llx\n", + dev->name, (unsigned long long)val64); + return FAILURE; + } + + return SUCCESS; +} + +static int wait_for_msix_trans(struct s2io_nic *nic, int i) +{ + struct XENA_dev_config __iomem *bar0 = nic->bar0; + u64 val64; + int ret = 0, cnt = 0; + + do { + val64 = readq(&bar0->xmsi_access); + if (!(val64 & s2BIT(15))) + break; + mdelay(1); + cnt++; + } while (cnt < 5); + if (cnt == 5) { + DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i); + ret = 1; + } + + return ret; +} + +static void restore_xmsi_data(struct s2io_nic *nic) +{ + struct XENA_dev_config __iomem *bar0 = nic->bar0; + u64 val64; + int i, msix_index; + + if (nic->device_type == XFRAME_I_DEVICE) + return; + + for (i = 0; i < MAX_REQUESTED_MSI_X; i++) { + msix_index = (i) ? ((i-1) * 8 + 1) : 0; + writeq(nic->msix_info[i].addr, &bar0->xmsi_address); + writeq(nic->msix_info[i].data, &bar0->xmsi_data); + val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6)); + writeq(val64, &bar0->xmsi_access); + if (wait_for_msix_trans(nic, msix_index)) { + DBG_PRINT(ERR_DBG, "%s: index: %d failed\n", + __func__, msix_index); + continue; + } + } +} + +static void store_xmsi_data(struct s2io_nic *nic) +{ + struct XENA_dev_config __iomem *bar0 = nic->bar0; + u64 val64, addr, data; + int i, msix_index; + + if (nic->device_type == XFRAME_I_DEVICE) + return; + + /* Store and display */ + for (i = 0; i < MAX_REQUESTED_MSI_X; i++) { + msix_index = (i) ? ((i-1) * 8 + 1) : 0; + val64 = (s2BIT(15) | vBIT(msix_index, 26, 6)); + writeq(val64, &bar0->xmsi_access); + if (wait_for_msix_trans(nic, msix_index)) { + DBG_PRINT(ERR_DBG, "%s: index: %d failed\n", + __func__, msix_index); + continue; + } + addr = readq(&bar0->xmsi_address); + data = readq(&bar0->xmsi_data); + if (addr && data) { + nic->msix_info[i].addr = addr; + nic->msix_info[i].data = data; + } + } +} + +static int s2io_enable_msi_x(struct s2io_nic *nic) +{ + struct XENA_dev_config __iomem *bar0 = nic->bar0; + u64 rx_mat; + u16 msi_control; /* Temp variable */ + int ret, i, j, msix_indx = 1; + int size; + struct stat_block *stats = nic->mac_control.stats_info; + struct swStat *swstats = &stats->sw_stat; + + size = nic->num_entries * sizeof(struct msix_entry); + nic->entries = kzalloc(size, GFP_KERNEL); + if (!nic->entries) { + DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", + __func__); + swstats->mem_alloc_fail_cnt++; + return -ENOMEM; + } + swstats->mem_allocated += size; + + size = nic->num_entries * sizeof(struct s2io_msix_entry); + nic->s2io_entries = kzalloc(size, GFP_KERNEL); + if (!nic->s2io_entries) { + DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", + __func__); + swstats->mem_alloc_fail_cnt++; + kfree(nic->entries); + swstats->mem_freed + += (nic->num_entries * sizeof(struct msix_entry)); + return -ENOMEM; + } + swstats->mem_allocated += size; + + nic->entries[0].entry = 0; + nic->s2io_entries[0].entry = 0; + nic->s2io_entries[0].in_use = MSIX_FLG; + nic->s2io_entries[0].type = MSIX_ALARM_TYPE; + nic->s2io_entries[0].arg = &nic->mac_control.fifos; + + for (i = 1; i < nic->num_entries; i++) { + nic->entries[i].entry = ((i - 1) * 8) + 1; + nic->s2io_entries[i].entry = ((i - 1) * 8) + 1; + nic->s2io_entries[i].arg = NULL; + nic->s2io_entries[i].in_use = 0; + } + + rx_mat = readq(&bar0->rx_mat); + for (j = 0; j < nic->config.rx_ring_num; j++) { + rx_mat |= RX_MAT_SET(j, msix_indx); + nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j]; + nic->s2io_entries[j+1].type = MSIX_RING_TYPE; + nic->s2io_entries[j+1].in_use = MSIX_FLG; + msix_indx += 8; + } + writeq(rx_mat, &bar0->rx_mat); + readq(&bar0->rx_mat); + + ret = pci_enable_msix_range(nic->pdev, nic->entries, + nic->num_entries, nic->num_entries); + /* We fail init if error or we get less vectors than min required */ + if (ret < 0) { + DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n"); + kfree(nic->entries); + swstats->mem_freed += nic->num_entries * + sizeof(struct msix_entry); + kfree(nic->s2io_entries); + swstats->mem_freed += nic->num_entries * + sizeof(struct s2io_msix_entry); + nic->entries = NULL; + nic->s2io_entries = NULL; + return -ENOMEM; + } + + /* + * To enable MSI-X, MSI also needs to be enabled, due to a bug + * in the herc NIC. (Temp change, needs to be removed later) + */ + pci_read_config_word(nic->pdev, 0x42, &msi_control); + msi_control |= 0x1; /* Enable MSI */ + pci_write_config_word(nic->pdev, 0x42, msi_control); + + return 0; +} + +/* Handle software interrupt used during MSI(X) test */ +static irqreturn_t s2io_test_intr(int irq, void *dev_id) +{ + struct s2io_nic *sp = dev_id; + + sp->msi_detected = 1; + wake_up(&sp->msi_wait); + + return IRQ_HANDLED; +} + +/* Test interrupt path by forcing a a software IRQ */ +static int s2io_test_msi(struct s2io_nic *sp) +{ + struct pci_dev *pdev = sp->pdev; + struct XENA_dev_config __iomem *bar0 = sp->bar0; + int err; + u64 val64, saved64; + + err = request_irq(sp->entries[1].vector, s2io_test_intr, 0, + sp->name, sp); + if (err) { + DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n", + sp->dev->name, pci_name(pdev), pdev->irq); + return err; + } + + init_waitqueue_head(&sp->msi_wait); + sp->msi_detected = 0; + + saved64 = val64 = readq(&bar0->scheduled_int_ctrl); + val64 |= SCHED_INT_CTRL_ONE_SHOT; + val64 |= SCHED_INT_CTRL_TIMER_EN; + val64 |= SCHED_INT_CTRL_INT2MSI(1); + writeq(val64, &bar0->scheduled_int_ctrl); + + wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10); + + if (!sp->msi_detected) { + /* MSI(X) test failed, go back to INTx mode */ + DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated " + "using MSI(X) during test\n", + sp->dev->name, pci_name(pdev)); + + err = -EOPNOTSUPP; + } + + free_irq(sp->entries[1].vector, sp); + + writeq(saved64, &bar0->scheduled_int_ctrl); + + return err; +} + +static void remove_msix_isr(struct s2io_nic *sp) +{ + int i; + u16 msi_control; + + for (i = 0; i < sp->num_entries; i++) { + if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) { + int vector = sp->entries[i].vector; + void *arg = sp->s2io_entries[i].arg; + free_irq(vector, arg); + } + } + + kfree(sp->entries); + kfree(sp->s2io_entries); + sp->entries = NULL; + sp->s2io_entries = NULL; + + pci_read_config_word(sp->pdev, 0x42, &msi_control); + msi_control &= 0xFFFE; /* Disable MSI */ + pci_write_config_word(sp->pdev, 0x42, msi_control); + + pci_disable_msix(sp->pdev); +} + +static void remove_inta_isr(struct s2io_nic *sp) +{ + free_irq(sp->pdev->irq, sp->dev); +} + +/* ********************************************************* * + * Functions defined below concern the OS part of the driver * + * ********************************************************* */ + +/** + * s2io_open - open entry point of the driver + * @dev : pointer to the device structure. + * Description: + * This function is the open entry point of the driver. It mainly calls a + * function to allocate Rx buffers and inserts them into the buffer + * descriptors and then enables the Rx part of the NIC. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ + +static int s2io_open(struct net_device *dev) +{ + struct s2io_nic *sp = netdev_priv(dev); + struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; + int err = 0; + + /* + * Make sure you have link off by default every time + * Nic is initialized + */ + netif_carrier_off(dev); + sp->last_link_state = 0; + + /* Initialize H/W and enable interrupts */ + err = s2io_card_up(sp); + if (err) { + DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", + dev->name); + goto hw_init_failed; + } + + if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) { + DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n"); + s2io_card_down(sp); + err = -ENODEV; + goto hw_init_failed; + } + s2io_start_all_tx_queue(sp); + return 0; + +hw_init_failed: + if (sp->config.intr_type == MSI_X) { + if (sp->entries) { + kfree(sp->entries); + swstats->mem_freed += sp->num_entries * + sizeof(struct msix_entry); + } + if (sp->s2io_entries) { + kfree(sp->s2io_entries); + swstats->mem_freed += sp->num_entries * + sizeof(struct s2io_msix_entry); + } + } + return err; +} + +/** + * s2io_close -close entry point of the driver + * @dev : device pointer. + * Description: + * This is the stop entry point of the driver. It needs to undo exactly + * whatever was done by the open entry point,thus it's usually referred to + * as the close function.Among other things this function mainly stops the + * Rx side of the NIC and frees all the Rx buffers in the Rx rings. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ + +static int s2io_close(struct net_device *dev) +{ + struct s2io_nic *sp = netdev_priv(dev); + struct config_param *config = &sp->config; + u64 tmp64; + int offset; + + /* Return if the device is already closed * + * Can happen when s2io_card_up failed in change_mtu * + */ + if (!is_s2io_card_up(sp)) + return 0; + + s2io_stop_all_tx_queue(sp); + /* delete all populated mac entries */ + for (offset = 1; offset < config->max_mc_addr; offset++) { + tmp64 = do_s2io_read_unicast_mc(sp, offset); + if (tmp64 != S2IO_DISABLE_MAC_ENTRY) + do_s2io_delete_unicast_mc(sp, tmp64); + } + + s2io_card_down(sp); + + return 0; +} + +/** + * s2io_xmit - Tx entry point of te driver + * @skb : the socket buffer containing the Tx data. + * @dev : device pointer. + * Description : + * This function is the Tx entry point of the driver. S2IO NIC supports + * certain protocol assist features on Tx side, namely CSO, S/G, LSO. + * NOTE: when device can't queue the pkt,just the trans_start variable will + * not be upadted. + * Return value: + * 0 on success & 1 on failure. + */ + +static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct s2io_nic *sp = netdev_priv(dev); + u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off; + register u64 val64; + struct TxD *txdp; + struct TxFIFO_element __iomem *tx_fifo; + unsigned long flags = 0; + u16 vlan_tag = 0; + struct fifo_info *fifo = NULL; + int do_spin_lock = 1; + int offload_type; + int enable_per_list_interrupt = 0; + struct config_param *config = &sp->config; + struct mac_info *mac_control = &sp->mac_control; + struct stat_block *stats = mac_control->stats_info; + struct swStat *swstats = &stats->sw_stat; + + DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name); + + if (unlikely(skb->len <= 0)) { + DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (!is_s2io_card_up(sp)) { + DBG_PRINT(TX_DBG, "%s: Card going down for reset\n", + dev->name); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + queue = 0; + if (skb_vlan_tag_present(skb)) + vlan_tag = skb_vlan_tag_get(skb); + if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) { + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *ip; + struct tcphdr *th; + ip = ip_hdr(skb); + + if (!ip_is_fragment(ip)) { + th = (struct tcphdr *)(((unsigned char *)ip) + + ip->ihl*4); + + if (ip->protocol == IPPROTO_TCP) { + queue_len = sp->total_tcp_fifos; + queue = (ntohs(th->source) + + ntohs(th->dest)) & + sp->fifo_selector[queue_len - 1]; + if (queue >= queue_len) + queue = queue_len - 1; + } else if (ip->protocol == IPPROTO_UDP) { + queue_len = sp->total_udp_fifos; + queue = (ntohs(th->source) + + ntohs(th->dest)) & + sp->fifo_selector[queue_len - 1]; + if (queue >= queue_len) + queue = queue_len - 1; + queue += sp->udp_fifo_idx; + if (skb->len > 1024) + enable_per_list_interrupt = 1; + do_spin_lock = 0; + } + } + } + } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING) + /* get fifo number based on skb->priority value */ + queue = config->fifo_mapping + [skb->priority & (MAX_TX_FIFOS - 1)]; + fifo = &mac_control->fifos[queue]; + + if (do_spin_lock) + spin_lock_irqsave(&fifo->tx_lock, flags); + else { + if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags))) + return NETDEV_TX_LOCKED; + } + + if (sp->config.multiq) { + if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { + spin_unlock_irqrestore(&fifo->tx_lock, flags); + return NETDEV_TX_BUSY; + } + } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) { + if (netif_queue_stopped(dev)) { + spin_unlock_irqrestore(&fifo->tx_lock, flags); + return NETDEV_TX_BUSY; + } + } + + put_off = (u16)fifo->tx_curr_put_info.offset; + get_off = (u16)fifo->tx_curr_get_info.offset; + txdp = fifo->list_info[put_off].list_virt_addr; + + queue_len = fifo->tx_curr_put_info.fifo_len + 1; + /* Avoid "put" pointer going beyond "get" pointer */ + if (txdp->Host_Control || + ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) { + DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n"); + s2io_stop_tx_queue(sp, fifo->fifo_no); + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&fifo->tx_lock, flags); + return NETDEV_TX_OK; + } + + offload_type = s2io_offload_type(skb); + if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { + txdp->Control_1 |= TXD_TCP_LSO_EN; + txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb)); + } + if (skb->ip_summed == CHECKSUM_PARTIAL) { + txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN | + TXD_TX_CKO_TCP_EN | + TXD_TX_CKO_UDP_EN); + } + txdp->Control_1 |= TXD_GATHER_CODE_FIRST; + txdp->Control_1 |= TXD_LIST_OWN_XENA; + txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no); + if (enable_per_list_interrupt) + if (put_off & (queue_len >> 5)) + txdp->Control_2 |= TXD_INT_TYPE_PER_LIST; + if (vlan_tag) { + txdp->Control_2 |= TXD_VLAN_ENABLE; + txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag); + } + + frg_len = skb_headlen(skb); + if (offload_type == SKB_GSO_UDP) { + int ufo_size; + + ufo_size = s2io_udp_mss(skb); + ufo_size &= ~7; + txdp->Control_1 |= TXD_UFO_EN; + txdp->Control_1 |= TXD_UFO_MSS(ufo_size); + txdp->Control_1 |= TXD_BUFFER0_SIZE(8); +#ifdef __BIG_ENDIAN + /* both variants do cpu_to_be64(be32_to_cpu(...)) */ + fifo->ufo_in_band_v[put_off] = + (__force u64)skb_shinfo(skb)->ip6_frag_id; +#else + fifo->ufo_in_band_v[put_off] = + (__force u64)skb_shinfo(skb)->ip6_frag_id << 32; +#endif + txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v; + txdp->Buffer_Pointer = pci_map_single(sp->pdev, + fifo->ufo_in_band_v, + sizeof(u64), + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) + goto pci_map_failed; + txdp++; + } + + txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data, + frg_len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) + goto pci_map_failed; + + txdp->Host_Control = (unsigned long)skb; + txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); + if (offload_type == SKB_GSO_UDP) + txdp->Control_1 |= TXD_UFO_EN; + + frg_cnt = skb_shinfo(skb)->nr_frags; + /* For fragmented SKB. */ + for (i = 0; i < frg_cnt; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + /* A '0' length fragment will be ignored */ + if (!skb_frag_size(frag)) + continue; + txdp++; + txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev, + frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag)); + if (offload_type == SKB_GSO_UDP) + txdp->Control_1 |= TXD_UFO_EN; + } + txdp->Control_1 |= TXD_GATHER_CODE_LAST; + + if (offload_type == SKB_GSO_UDP) + frg_cnt++; /* as Txd0 was used for inband header */ + + tx_fifo = mac_control->tx_FIFO_start[queue]; + val64 = fifo->list_info[put_off].list_phy_addr; + writeq(val64, &tx_fifo->TxDL_Pointer); + + val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | + TX_FIFO_LAST_LIST); + if (offload_type) + val64 |= TX_FIFO_SPECIAL_FUNC; + + writeq(val64, &tx_fifo->List_Control); + + mmiowb(); + + put_off++; + if (put_off == fifo->tx_curr_put_info.fifo_len + 1) + put_off = 0; + fifo->tx_curr_put_info.offset = put_off; + + /* Avoid "put" pointer going beyond "get" pointer */ + if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) { + swstats->fifo_full_cnt++; + DBG_PRINT(TX_DBG, + "No free TxDs for xmit, Put: 0x%x Get:0x%x\n", + put_off, get_off); + s2io_stop_tx_queue(sp, fifo->fifo_no); + } + swstats->mem_allocated += skb->truesize; + spin_unlock_irqrestore(&fifo->tx_lock, flags); + + if (sp->config.intr_type == MSI_X) + tx_intr_handler(fifo); + + return NETDEV_TX_OK; + +pci_map_failed: + swstats->pci_map_fail_cnt++; + s2io_stop_tx_queue(sp, fifo->fifo_no); + swstats->mem_freed += skb->truesize; + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&fifo->tx_lock, flags); + return NETDEV_TX_OK; +} + +static void +s2io_alarm_handle(unsigned long data) +{ + struct s2io_nic *sp = (struct s2io_nic *)data; + struct net_device *dev = sp->dev; + + s2io_handle_errors(dev); + mod_timer(&sp->alarm_timer, jiffies + HZ / 2); +} + +static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) +{ + struct ring_info *ring = (struct ring_info *)dev_id; + struct s2io_nic *sp = ring->nic; + struct XENA_dev_config __iomem *bar0 = sp->bar0; + + if (unlikely(!is_s2io_card_up(sp))) + return IRQ_HANDLED; + + if (sp->config.napi) { + u8 __iomem *addr = NULL; + u8 val8 = 0; + + addr = (u8 __iomem *)&bar0->xmsi_mask_reg; + addr += (7 - ring->ring_no); + val8 = (ring->ring_no == 0) ? 0x7f : 0xff; + writeb(val8, addr); + val8 = readb(addr); + napi_schedule(&ring->napi); + } else { + rx_intr_handler(ring, 0); + s2io_chk_rx_buffers(sp, ring); + } + + return IRQ_HANDLED; +} + +static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) +{ + int i; + struct fifo_info *fifos = (struct fifo_info *)dev_id; + struct s2io_nic *sp = fifos->nic; + struct XENA_dev_config __iomem *bar0 = sp->bar0; + struct config_param *config = &sp->config; + u64 reason; + + if (unlikely(!is_s2io_card_up(sp))) + return IRQ_NONE; + + reason = readq(&bar0->general_int_status); + if (unlikely(reason == S2IO_MINUS_ONE)) + /* Nothing much can be done. Get out */ + return IRQ_HANDLED; + + if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) { + writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); + + if (reason & GEN_INTR_TXPIC) + s2io_txpic_intr_handle(sp); + + if (reason & GEN_INTR_TXTRAFFIC) + writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); + + for (i = 0; i < config->tx_fifo_num; i++) + tx_intr_handler(&fifos[i]); + + writeq(sp->general_int_mask, &bar0->general_int_mask); + readl(&bar0->general_int_status); + return IRQ_HANDLED; + } + /* The interrupt was not raised by us */ + return IRQ_NONE; +} + +static void s2io_txpic_intr_handle(struct s2io_nic *sp) +{ + struct XENA_dev_config __iomem *bar0 = sp->bar0; + u64 val64; + + val64 = readq(&bar0->pic_int_status); + if (val64 & PIC_INT_GPIO) { + val64 = readq(&bar0->gpio_int_reg); + if ((val64 & GPIO_INT_REG_LINK_DOWN) && + (val64 & GPIO_INT_REG_LINK_UP)) { + /* + * This is unstable state so clear both up/down + * interrupt and adapter to re-evaluate the link state. + */ + val64 |= GPIO_INT_REG_LINK_DOWN; + val64 |= GPIO_INT_REG_LINK_UP; + writeq(val64, &bar0->gpio_int_reg); + val64 = readq(&bar0->gpio_int_mask); + val64 &= ~(GPIO_INT_MASK_LINK_UP | + GPIO_INT_MASK_LINK_DOWN); + writeq(val64, &bar0->gpio_int_mask); + } else if (val64 & GPIO_INT_REG_LINK_UP) { + val64 = readq(&bar0->adapter_status); + /* Enable Adapter */ + val64 = readq(&bar0->adapter_control); + val64 |= ADAPTER_CNTL_EN; + writeq(val64, &bar0->adapter_control); + val64 |= ADAPTER_LED_ON; + writeq(val64, &bar0->adapter_control); + if (!sp->device_enabled_once) + sp->device_enabled_once = 1; + + s2io_link(sp, LINK_UP); + /* + * unmask link down interrupt and mask link-up + * intr + */ + val64 = readq(&bar0->gpio_int_mask); + val64 &= ~GPIO_INT_MASK_LINK_DOWN; + val64 |= GPIO_INT_MASK_LINK_UP; + writeq(val64, &bar0->gpio_int_mask); + + } else if (val64 & GPIO_INT_REG_LINK_DOWN) { + val64 = readq(&bar0->adapter_status); + s2io_link(sp, LINK_DOWN); + /* Link is down so unmaks link up interrupt */ + val64 = readq(&bar0->gpio_int_mask); + val64 &= ~GPIO_INT_MASK_LINK_UP; + val64 |= GPIO_INT_MASK_LINK_DOWN; + writeq(val64, &bar0->gpio_int_mask); + + /* turn off LED */ + val64 = readq(&bar0->adapter_control); + val64 = val64 & (~ADAPTER_LED_ON); + writeq(val64, &bar0->adapter_control); + } + } + val64 = readq(&bar0->gpio_int_mask); +} + +/** + * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter + * @value: alarm bits + * @addr: address value + * @cnt: counter variable + * Description: Check for alarm and increment the counter + * Return Value: + * 1 - if alarm bit set + * 0 - if alarm bit is not set + */ +static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr, + unsigned long long *cnt) +{ + u64 val64; + val64 = readq(addr); + if (val64 & value) { + writeq(val64, addr); + (*cnt)++; + return 1; + } + return 0; + +} + +/** + * s2io_handle_errors - Xframe error indication handler + * @nic: device private variable + * Description: Handle alarms such as loss of link, single or + * double ECC errors, critical and serious errors. + * Return Value: + * NONE + */ +static void s2io_handle_errors(void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct s2io_nic *sp = netdev_priv(dev); + struct XENA_dev_config __iomem *bar0 = sp->bar0; + u64 temp64 = 0, val64 = 0; + int i = 0; + + struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat; + struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat; + + if (!is_s2io_card_up(sp)) + return; + + if (pci_channel_offline(sp->pdev)) + return; + + memset(&sw_stat->ring_full_cnt, 0, + sizeof(sw_stat->ring_full_cnt)); + + /* Handling the XPAK counters update */ + if (stats->xpak_timer_count < 72000) { + /* waiting for an hour */ + stats->xpak_timer_count++; + } else { + s2io_updt_xpak_counter(dev); + /* reset the count to zero */ + stats->xpak_timer_count = 0; + } + + /* Handling link status change error Intr */ + if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) { + val64 = readq(&bar0->mac_rmac_err_reg); + writeq(val64, &bar0->mac_rmac_err_reg); + if (val64 & RMAC_LINK_STATE_CHANGE_INT) + schedule_work(&sp->set_link_task); + } + + /* In case of a serious error, the device will be Reset. */ + if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source, + &sw_stat->serious_err_cnt)) + goto reset; + + /* Check for data parity error */ + if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg, + &sw_stat->parity_err_cnt)) + goto reset; + + /* Check for ring full counter */ + if (sp->device_type == XFRAME_II_DEVICE) { + val64 = readq(&bar0->ring_bump_counter1); + for (i = 0; i < 4; i++) { + temp64 = (val64 & vBIT(0xFFFF, (i*16), 16)); + temp64 >>= 64 - ((i+1)*16); + sw_stat->ring_full_cnt[i] += temp64; + } + + val64 = readq(&bar0->ring_bump_counter2); + for (i = 0; i < 4; i++) { + temp64 = (val64 & vBIT(0xFFFF, (i*16), 16)); + temp64 >>= 64 - ((i+1)*16); + sw_stat->ring_full_cnt[i+4] += temp64; + } + } + + val64 = readq(&bar0->txdma_int_status); + /*check for pfc_err*/ + if (val64 & TXDMA_PFC_INT) { + if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM | + PFC_MISC_0_ERR | PFC_MISC_1_ERR | + PFC_PCIX_ERR, + &bar0->pfc_err_reg, + &sw_stat->pfc_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, + &bar0->pfc_err_reg, + &sw_stat->pfc_err_cnt); + } + + /*check for tda_err*/ + if (val64 & TXDMA_TDA_INT) { + if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | + TDA_SM0_ERR_ALARM | + TDA_SM1_ERR_ALARM, + &bar0->tda_err_reg, + &sw_stat->tda_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR, + &bar0->tda_err_reg, + &sw_stat->tda_err_cnt); + } + /*check for pcc_err*/ + if (val64 & TXDMA_PCC_INT) { + if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM | + PCC_N_SERR | PCC_6_COF_OV_ERR | + PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR | + PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR | + PCC_TXB_ECC_DB_ERR, + &bar0->pcc_err_reg, + &sw_stat->pcc_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR, + &bar0->pcc_err_reg, + &sw_stat->pcc_err_cnt); + } + + /*check for tti_err*/ + if (val64 & TXDMA_TTI_INT) { + if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, + &bar0->tti_err_reg, + &sw_stat->tti_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR, + &bar0->tti_err_reg, + &sw_stat->tti_err_cnt); + } + + /*check for lso_err*/ + if (val64 & TXDMA_LSO_INT) { + if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT | + LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM, + &bar0->lso_err_reg, + &sw_stat->lso_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW, + &bar0->lso_err_reg, + &sw_stat->lso_err_cnt); + } + + /*check for tpa_err*/ + if (val64 & TXDMA_TPA_INT) { + if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, + &bar0->tpa_err_reg, + &sw_stat->tpa_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, + &bar0->tpa_err_reg, + &sw_stat->tpa_err_cnt); + } + + /*check for sm_err*/ + if (val64 & TXDMA_SM_INT) { + if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, + &bar0->sm_err_reg, + &sw_stat->sm_err_cnt)) + goto reset; + } + + val64 = readq(&bar0->mac_int_status); + if (val64 & MAC_INT_STATUS_TMAC_INT) { + if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR, + &bar0->mac_tmac_err_reg, + &sw_stat->mac_tmac_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR | + TMAC_DESC_ECC_SG_ERR | + TMAC_DESC_ECC_DB_ERR, + &bar0->mac_tmac_err_reg, + &sw_stat->mac_tmac_err_cnt); + } + + val64 = readq(&bar0->xgxs_int_status); + if (val64 & XGXS_INT_STATUS_TXGXS) { + if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR, + &bar0->xgxs_txgxs_err_reg, + &sw_stat->xgxs_txgxs_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR, + &bar0->xgxs_txgxs_err_reg, + &sw_stat->xgxs_txgxs_err_cnt); + } + + val64 = readq(&bar0->rxdma_int_status); + if (val64 & RXDMA_INT_RC_INT_M) { + if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | + RC_FTC_ECC_DB_ERR | + RC_PRCn_SM_ERR_ALARM | + RC_FTC_SM_ERR_ALARM, + &bar0->rc_err_reg, + &sw_stat->rc_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | + RC_FTC_ECC_SG_ERR | + RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg, + &sw_stat->rc_err_cnt); + if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | + PRC_PCI_AB_WR_Rn | + PRC_PCI_AB_F_WR_Rn, + &bar0->prc_pcix_err_reg, + &sw_stat->prc_pcix_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | + PRC_PCI_DP_WR_Rn | + PRC_PCI_DP_F_WR_Rn, + &bar0->prc_pcix_err_reg, + &sw_stat->prc_pcix_err_cnt); + } + + if (val64 & RXDMA_INT_RPA_INT_M) { + if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR, + &bar0->rpa_err_reg, + &sw_stat->rpa_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, + &bar0->rpa_err_reg, + &sw_stat->rpa_err_cnt); + } + + if (val64 & RXDMA_INT_RDA_INT_M) { + if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR | + RDA_FRM_ECC_DB_N_AERR | + RDA_SM1_ERR_ALARM | + RDA_SM0_ERR_ALARM | + RDA_RXD_ECC_DB_SERR, + &bar0->rda_err_reg, + &sw_stat->rda_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | + RDA_FRM_ECC_SG_ERR | + RDA_MISC_ERR | + RDA_PCIX_ERR, + &bar0->rda_err_reg, + &sw_stat->rda_err_cnt); + } + + if (val64 & RXDMA_INT_RTI_INT_M) { + if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, + &bar0->rti_err_reg, + &sw_stat->rti_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR, + &bar0->rti_err_reg, + &sw_stat->rti_err_cnt); + } + + val64 = readq(&bar0->mac_int_status); + if (val64 & MAC_INT_STATUS_RMAC_INT) { + if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR, + &bar0->mac_rmac_err_reg, + &sw_stat->mac_rmac_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(RMAC_UNUSED_INT | + RMAC_SINGLE_ECC_ERR | + RMAC_DOUBLE_ECC_ERR, + &bar0->mac_rmac_err_reg, + &sw_stat->mac_rmac_err_cnt); + } + + val64 = readq(&bar0->xgxs_int_status); + if (val64 & XGXS_INT_STATUS_RXGXS) { + if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, + &bar0->xgxs_rxgxs_err_reg, + &sw_stat->xgxs_rxgxs_err_cnt)) + goto reset; + } + + val64 = readq(&bar0->mc_int_status); + if (val64 & MC_INT_STATUS_MC_INT) { + if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, + &bar0->mc_err_reg, + &sw_stat->mc_err_cnt)) + goto reset; + + /* Handling Ecc errors */ + if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) { + writeq(val64, &bar0->mc_err_reg); + if (val64 & MC_ERR_REG_ECC_ALL_DBL) { + sw_stat->double_ecc_errs++; + if (sp->device_type != XFRAME_II_DEVICE) { + /* + * Reset XframeI only if critical error + */ + if (val64 & + (MC_ERR_REG_MIRI_ECC_DB_ERR_0 | + MC_ERR_REG_MIRI_ECC_DB_ERR_1)) + goto reset; + } + } else + sw_stat->single_ecc_errs++; + } + } + return; + +reset: + s2io_stop_all_tx_queue(sp); + schedule_work(&sp->rst_timer_task); + sw_stat->soft_reset_cnt++; +} + +/** + * s2io_isr - ISR handler of the device . + * @irq: the irq of the device. + * @dev_id: a void pointer to the dev structure of the NIC. + * Description: This function is the ISR handler of the device. It + * identifies the reason for the interrupt and calls the relevant + * service routines. As a contongency measure, this ISR allocates the + * recv buffers, if their numbers are below the panic value which is + * presently set to 25% of the original number of rcv buffers allocated. + * Return value: + * IRQ_HANDLED: will be returned if IRQ was handled by this routine + * IRQ_NONE: will be returned if interrupt is not from our device + */ +static irqreturn_t s2io_isr(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct s2io_nic *sp = netdev_priv(dev); + struct XENA_dev_config __iomem *bar0 = sp->bar0; + int i; + u64 reason = 0; + struct mac_info *mac_control; + struct config_param *config; + + /* Pretend we handled any irq's from a disconnected card */ + if (pci_channel_offline(sp->pdev)) + return IRQ_NONE; + + if (!is_s2io_card_up(sp)) + return IRQ_NONE; + + config = &sp->config; + mac_control = &sp->mac_control; + + /* + * Identify the cause for interrupt and call the appropriate + * interrupt handler. Causes for the interrupt could be; + * 1. Rx of packet. + * 2. Tx complete. + * 3. Link down. + */ + reason = readq(&bar0->general_int_status); + + if (unlikely(reason == S2IO_MINUS_ONE)) + return IRQ_HANDLED; /* Nothing much can be done. Get out */ + + if (reason & + (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) { + writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); + + if (config->napi) { + if (reason & GEN_INTR_RXTRAFFIC) { + napi_schedule(&sp->napi); + writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask); + writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); + readl(&bar0->rx_traffic_int); + } + } else { + /* + * rx_traffic_int reg is an R1 register, writing all 1's + * will ensure that the actual interrupt causing bit + * get's cleared and hence a read can be avoided. + */ + if (reason & GEN_INTR_RXTRAFFIC) + writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); + + for (i = 0; i < config->rx_ring_num; i++) { + struct ring_info *ring = &mac_control->rings[i]; + + rx_intr_handler(ring, 0); + } + } + + /* + * tx_traffic_int reg is an R1 register, writing all 1's + * will ensure that the actual interrupt causing bit get's + * cleared and hence a read can be avoided. + */ + if (reason & GEN_INTR_TXTRAFFIC) + writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); + + for (i = 0; i < config->tx_fifo_num; i++) + tx_intr_handler(&mac_control->fifos[i]); + + if (reason & GEN_INTR_TXPIC) + s2io_txpic_intr_handle(sp); + + /* + * Reallocate the buffers from the interrupt handler itself. + */ + if (!config->napi) { + for (i = 0; i < config->rx_ring_num; i++) { + struct ring_info *ring = &mac_control->rings[i]; + + s2io_chk_rx_buffers(sp, ring); + } + } + writeq(sp->general_int_mask, &bar0->general_int_mask); + readl(&bar0->general_int_status); + + return IRQ_HANDLED; + + } else if (!reason) { + /* The interrupt was not raised by us */ + return IRQ_NONE; + } + + return IRQ_HANDLED; +} + +/** + * s2io_updt_stats - + */ +static void s2io_updt_stats(struct s2io_nic *sp) +{ + struct XENA_dev_config __iomem *bar0 = sp->bar0; + u64 val64; + int cnt = 0; + + if (is_s2io_card_up(sp)) { + /* Apprx 30us on a 133 MHz bus */ + val64 = SET_UPDT_CLICKS(10) | + STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN; + writeq(val64, &bar0->stat_cfg); + do { + udelay(100); + val64 = readq(&bar0->stat_cfg); + if (!(val64 & s2BIT(0))) + break; + cnt++; + if (cnt == 5) + break; /* Updt failed */ + } while (1); + } +} + +/** + * s2io_get_stats - Updates the device statistics structure. + * @dev : pointer to the device structure. + * Description: + * This function updates the device statistics structure in the s2io_nic + * structure and returns a pointer to the same. + * Return value: + * pointer to the updated net_device_stats structure. + */ +static struct net_device_stats *s2io_get_stats(struct net_device *dev) +{ + struct s2io_nic *sp = netdev_priv(dev); + struct mac_info *mac_control = &sp->mac_control; + struct stat_block *stats = mac_control->stats_info; + u64 delta; + + /* Configure Stats for immediate updt */ + s2io_updt_stats(sp); + + /* A device reset will cause the on-adapter statistics to be zero'ed. + * This can be done while running by changing the MTU. To prevent the + * system from having the stats zero'ed, the driver keeps a copy of the + * last update to the system (which is also zero'ed on reset). This + * enables the driver to accurately know the delta between the last + * update and the current update. + */ + delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 | + le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets; + sp->stats.rx_packets += delta; + dev->stats.rx_packets += delta; + + delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 | + le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets; + sp->stats.tx_packets += delta; + dev->stats.tx_packets += delta; + + delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 | + le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes; + sp->stats.rx_bytes += delta; + dev->stats.rx_bytes += delta; + + delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 | + le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes; + sp->stats.tx_bytes += delta; + dev->stats.tx_bytes += delta; + + delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors; + sp->stats.rx_errors += delta; + dev->stats.rx_errors += delta; + + delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 | + le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors; + sp->stats.tx_errors += delta; + dev->stats.tx_errors += delta; + + delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped; + sp->stats.rx_dropped += delta; + dev->stats.rx_dropped += delta; + + delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped; + sp->stats.tx_dropped += delta; + dev->stats.tx_dropped += delta; + + /* The adapter MAC interprets pause frames as multicast packets, but + * does not pass them up. This erroneously increases the multicast + * packet count and needs to be deducted when the multicast frame count + * is queried. + */ + delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 | + le32_to_cpu(stats->rmac_vld_mcst_frms); + delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms); + delta -= sp->stats.multicast; + sp->stats.multicast += delta; + dev->stats.multicast += delta; + + delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 | + le32_to_cpu(stats->rmac_usized_frms)) + + le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors; + sp->stats.rx_length_errors += delta; + dev->stats.rx_length_errors += delta; + + delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors; + sp->stats.rx_crc_errors += delta; + dev->stats.rx_crc_errors += delta; + + return &dev->stats; +} + +/** + * s2io_set_multicast - entry point for multicast address enable/disable. + * @dev : pointer to the device structure + * Description: + * This function is a driver entry point which gets called by the kernel + * whenever multicast addresses must be enabled/disabled. This also gets + * called to set/reset promiscuous mode. Depending on the deivce flag, we + * determine, if multicast address must be enabled or if promiscuous mode + * is to be disabled etc. + * Return value: + * void. + */ + +static void s2io_set_multicast(struct net_device *dev) +{ + int i, j, prev_cnt; + struct netdev_hw_addr *ha; + struct s2io_nic *sp = netdev_priv(dev); + struct XENA_dev_config __iomem *bar0 = sp->bar0; + u64 val64 = 0, multi_mac = 0x010203040506ULL, mask = + 0xfeffffffffffULL; + u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0; + void __iomem *add; + struct config_param *config = &sp->config; + + if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) { + /* Enable all Multicast addresses */ + writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac), + &bar0->rmac_addr_data0_mem); + writeq(RMAC_ADDR_DATA1_MEM_MASK(mask), + &bar0->rmac_addr_data1_mem); + val64 = RMAC_ADDR_CMD_MEM_WE | + RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | + RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1); + writeq(val64, &bar0->rmac_addr_cmd_mem); + /* Wait till command completes */ + wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, + RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, + S2IO_BIT_RESET); + + sp->m_cast_flg = 1; + sp->all_multi_pos = config->max_mc_addr - 1; + } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) { + /* Disable all Multicast addresses */ + writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr), + &bar0->rmac_addr_data0_mem); + writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0), + &bar0->rmac_addr_data1_mem); + val64 = RMAC_ADDR_CMD_MEM_WE | + RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | + RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos); + writeq(val64, &bar0->rmac_addr_cmd_mem); + /* Wait till command completes */ + wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, + RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, + S2IO_BIT_RESET); + + sp->m_cast_flg = 0; + sp->all_multi_pos = 0; + } + + if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) { + /* Put the NIC into promiscuous mode */ + add = &bar0->mac_cfg; + val64 = readq(&bar0->mac_cfg); + val64 |= MAC_CFG_RMAC_PROM_ENABLE; + + writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); + writel((u32)val64, add); + writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); + writel((u32) (val64 >> 32), (add + 4)); + + if (vlan_tag_strip != 1) { + val64 = readq(&bar0->rx_pa_cfg); + val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG; + writeq(val64, &bar0->rx_pa_cfg); + sp->vlan_strip_flag = 0; + } + + val64 = readq(&bar0->mac_cfg); + sp->promisc_flg = 1; + DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n", + dev->name); + } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) { + /* Remove the NIC from promiscuous mode */ + add = &bar0->mac_cfg; + val64 = readq(&bar0->mac_cfg); + val64 &= ~MAC_CFG_RMAC_PROM_ENABLE; + + writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); + writel((u32)val64, add); + writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); + writel((u32) (val64 >> 32), (add + 4)); + + if (vlan_tag_strip != 0) { + val64 = readq(&bar0->rx_pa_cfg); + val64 |= RX_PA_CFG_STRIP_VLAN_TAG; + writeq(val64, &bar0->rx_pa_cfg); + sp->vlan_strip_flag = 1; + } + + val64 = readq(&bar0->mac_cfg); + sp->promisc_flg = 0; + DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name); + } + + /* Update individual M_CAST address list */ + if ((!sp->m_cast_flg) && netdev_mc_count(dev)) { + if (netdev_mc_count(dev) > + (config->max_mc_addr - config->max_mac_addr)) { + DBG_PRINT(ERR_DBG, + "%s: No more Rx filters can be added - " + "please enable ALL_MULTI instead\n", + dev->name); + return; + } + + prev_cnt = sp->mc_addr_count; + sp->mc_addr_count = netdev_mc_count(dev); + + /* Clear out the previous list of Mc in the H/W. */ + for (i = 0; i < prev_cnt; i++) { + writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr), + &bar0->rmac_addr_data0_mem); + writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL), + &bar0->rmac_addr_data1_mem); + val64 = RMAC_ADDR_CMD_MEM_WE | + RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | + RMAC_ADDR_CMD_MEM_OFFSET + (config->mc_start_offset + i); + writeq(val64, &bar0->rmac_addr_cmd_mem); + + /* Wait for command completes */ + if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, + RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, + S2IO_BIT_RESET)) { + DBG_PRINT(ERR_DBG, + "%s: Adding Multicasts failed\n", + dev->name); + return; + } + } + + /* Create the new Rx filter list and update the same in H/W. */ + i = 0; + netdev_for_each_mc_addr(ha, dev) { + mac_addr = 0; + for (j = 0; j < ETH_ALEN; j++) { + mac_addr |= ha->addr[j]; + mac_addr <<= 8; + } + mac_addr >>= 8; + writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr), + &bar0->rmac_addr_data0_mem); + writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL), + &bar0->rmac_addr_data1_mem); + val64 = RMAC_ADDR_CMD_MEM_WE | + RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | + RMAC_ADDR_CMD_MEM_OFFSET + (i + config->mc_start_offset); + writeq(val64, &bar0->rmac_addr_cmd_mem); + + /* Wait for command completes */ + if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, + RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, + S2IO_BIT_RESET)) { + DBG_PRINT(ERR_DBG, + "%s: Adding Multicasts failed\n", + dev->name); + return; + } + i++; + } + } +} + +/* read from CAM unicast & multicast addresses and store it in + * def_mac_addr structure + */ +static void do_s2io_store_unicast_mc(struct s2io_nic *sp) +{ + int offset; + u64 mac_addr = 0x0; + struct config_param *config = &sp->config; + + /* store unicast & multicast mac addresses */ + for (offset = 0; offset < config->max_mc_addr; offset++) { + mac_addr = do_s2io_read_unicast_mc(sp, offset); + /* if read fails disable the entry */ + if (mac_addr == FAILURE) + mac_addr = S2IO_DISABLE_MAC_ENTRY; + do_s2io_copy_mac_addr(sp, offset, mac_addr); + } +} + +/* restore unicast & multicast MAC to CAM from def_mac_addr structure */ +static void do_s2io_restore_unicast_mc(struct s2io_nic *sp) +{ + int offset; + struct config_param *config = &sp->config; + /* restore unicast mac address */ + for (offset = 0; offset < config->max_mac_addr; offset++) + do_s2io_prog_unicast(sp->dev, + sp->def_mac_addr[offset].mac_addr); + + /* restore multicast mac address */ + for (offset = config->mc_start_offset; + offset < config->max_mc_addr; offset++) + do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr); +} + +/* add a multicast MAC address to CAM */ +static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr) +{ + int i; + u64 mac_addr = 0; + struct config_param *config = &sp->config; + + for (i = 0; i < ETH_ALEN; i++) { + mac_addr <<= 8; + mac_addr |= addr[i]; + } + if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY)) + return SUCCESS; + + /* check if the multicast mac already preset in CAM */ + for (i = config->mc_start_offset; i < config->max_mc_addr; i++) { + u64 tmp64; + tmp64 = do_s2io_read_unicast_mc(sp, i); + if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */ + break; + + if (tmp64 == mac_addr) + return SUCCESS; + } + if (i == config->max_mc_addr) { + DBG_PRINT(ERR_DBG, + "CAM full no space left for multicast MAC\n"); + return FAILURE; + } + /* Update the internal structure with this new mac address */ + do_s2io_copy_mac_addr(sp, i, mac_addr); + + return do_s2io_add_mac(sp, mac_addr, i); +} + +/* add MAC address to CAM */ +static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off) +{ + u64 val64; + struct XENA_dev_config __iomem *bar0 = sp->bar0; + + writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr), + &bar0->rmac_addr_data0_mem); + + val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | + RMAC_ADDR_CMD_MEM_OFFSET(off); + writeq(val64, &bar0->rmac_addr_cmd_mem); + + /* Wait till command completes */ + if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, + RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, + S2IO_BIT_RESET)) { + DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n"); + return FAILURE; + } + return SUCCESS; +} +/* deletes a specified unicast/multicast mac entry from CAM */ +static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr) +{ + int offset; + u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64; + struct config_param *config = &sp->config; + + for (offset = 1; + offset < config->max_mc_addr; offset++) { + tmp64 = do_s2io_read_unicast_mc(sp, offset); + if (tmp64 == addr) { + /* disable the entry by writing 0xffffffffffffULL */ + if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE) + return FAILURE; + /* store the new mac list from CAM */ + do_s2io_store_unicast_mc(sp); + return SUCCESS; + } + } + DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n", + (unsigned long long)addr); + return FAILURE; +} + +/* read mac entries from CAM */ +static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset) +{ + u64 tmp64 = 0xffffffffffff0000ULL, val64; + struct XENA_dev_config __iomem *bar0 = sp->bar0; + + /* read mac addr */ + val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | + RMAC_ADDR_CMD_MEM_OFFSET(offset); + writeq(val64, &bar0->rmac_addr_cmd_mem); + + /* Wait till command completes */ + if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, + RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, + S2IO_BIT_RESET)) { + DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n"); + return FAILURE; + } + tmp64 = readq(&bar0->rmac_addr_data0_mem); + + return tmp64 >> 16; +} + +/** + * s2io_set_mac_addr - driver entry point + */ + +static int s2io_set_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + /* store the MAC address in CAM */ + return do_s2io_prog_unicast(dev, dev->dev_addr); +} +/** + * do_s2io_prog_unicast - Programs the Xframe mac address + * @dev : pointer to the device structure. + * @addr: a uchar pointer to the new mac address which is to be set. + * Description : This procedure will program the Xframe to receive + * frames with new Mac Address + * Return value: SUCCESS on success and an appropriate (-)ve integer + * as defined in errno.h file on failure. + */ + +static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr) +{ + struct s2io_nic *sp = netdev_priv(dev); + register u64 mac_addr = 0, perm_addr = 0; + int i; + u64 tmp64; + struct config_param *config = &sp->config; + + /* + * Set the new MAC address as the new unicast filter and reflect this + * change on the device address registered with the OS. It will be + * at offset 0. + */ + for (i = 0; i < ETH_ALEN; i++) { + mac_addr <<= 8; + mac_addr |= addr[i]; + perm_addr <<= 8; + perm_addr |= sp->def_mac_addr[0].mac_addr[i]; + } + + /* check if the dev_addr is different than perm_addr */ + if (mac_addr == perm_addr) + return SUCCESS; + + /* check if the mac already preset in CAM */ + for (i = 1; i < config->max_mac_addr; i++) { + tmp64 = do_s2io_read_unicast_mc(sp, i); + if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */ + break; + + if (tmp64 == mac_addr) { + DBG_PRINT(INFO_DBG, + "MAC addr:0x%llx already present in CAM\n", + (unsigned long long)mac_addr); + return SUCCESS; + } + } + if (i == config->max_mac_addr) { + DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n"); + return FAILURE; + } + /* Update the internal structure with this new mac address */ + do_s2io_copy_mac_addr(sp, i, mac_addr); + + return do_s2io_add_mac(sp, mac_addr, i); +} + +/** + * s2io_ethtool_sset - Sets different link parameters. + * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure. + * @info: pointer to the structure with parameters given by ethtool to set + * link information. + * Description: + * The function sets different link parameters provided by the user onto + * the NIC. + * Return value: + * 0 on success. + */ + +static int s2io_ethtool_sset(struct net_device *dev, + struct ethtool_cmd *info) +{ + struct s2io_nic *sp = netdev_priv(dev); + if ((info->autoneg == AUTONEG_ENABLE) || + (ethtool_cmd_speed(info) != SPEED_10000) || + (info->duplex != DUPLEX_FULL)) + return -EINVAL; + else { + s2io_close(sp->dev); + s2io_open(sp->dev); + } + + return 0; +} + +/** + * s2io_ethtol_gset - Return link specific information. + * @sp : private member of the device structure, pointer to the + * s2io_nic structure. + * @info : pointer to the structure with parameters given by ethtool + * to return link information. + * Description: + * Returns link specific information like speed, duplex etc.. to ethtool. + * Return value : + * return 0 on success. + */ + +static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info) +{ + struct s2io_nic *sp = netdev_priv(dev); + info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + info->port = PORT_FIBRE; + + /* info->transceiver */ + info->transceiver = XCVR_EXTERNAL; + + if (netif_carrier_ok(sp->dev)) { + ethtool_cmd_speed_set(info, SPEED_10000); + info->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(info, SPEED_UNKNOWN); + info->duplex = DUPLEX_UNKNOWN; + } + + info->autoneg = AUTONEG_DISABLE; + return 0; +} + +/** + * s2io_ethtool_gdrvinfo - Returns driver specific information. + * @sp : private member of the device structure, which is a pointer to the + * s2io_nic structure. + * @info : pointer to the structure with parameters given by ethtool to + * return driver information. + * Description: + * Returns driver specefic information like name, version etc.. to ethtool. + * Return value: + * void + */ + +static void s2io_ethtool_gdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct s2io_nic *sp = netdev_priv(dev); + + strlcpy(info->driver, s2io_driver_name, sizeof(info->driver)); + strlcpy(info->version, s2io_driver_version, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info)); + info->regdump_len = XENA_REG_SPACE; + info->eedump_len = XENA_EEPROM_SPACE; +} + +/** + * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer. + * @sp: private member of the device structure, which is a pointer to the + * s2io_nic structure. + * @regs : pointer to the structure with parameters given by ethtool for + * dumping the registers. + * @reg_space: The input argumnet into which all the registers are dumped. + * Description: + * Dumps the entire register space of xFrame NIC into the user given + * buffer area. + * Return value : + * void . + */ + +static void s2io_ethtool_gregs(struct net_device *dev, + struct ethtool_regs *regs, void *space) +{ + int i; + u64 reg; + u8 *reg_space = (u8 *)space; + struct s2io_nic *sp = netdev_priv(dev); + + regs->len = XENA_REG_SPACE; + regs->version = sp->pdev->subsystem_device; + + for (i = 0; i < regs->len; i += 8) { + reg = readq(sp->bar0 + i); + memcpy((reg_space + i), ®, 8); + } +} + +/* + * s2io_set_led - control NIC led + */ +static void s2io_set_led(struct s2io_nic *sp, bool on) +{ + struct XENA_dev_config __iomem *bar0 = sp->bar0; + u16 subid = sp->pdev->subsystem_device; + u64 val64; + + if ((sp->device_type == XFRAME_II_DEVICE) || + ((subid & 0xFF) >= 0x07)) { + val64 = readq(&bar0->gpio_control); + if (on) + val64 |= GPIO_CTRL_GPIO_0; + else + val64 &= ~GPIO_CTRL_GPIO_0; + + writeq(val64, &bar0->gpio_control); + } else { + val64 = readq(&bar0->adapter_control); + if (on) + val64 |= ADAPTER_LED_ON; + else + val64 &= ~ADAPTER_LED_ON; + + writeq(val64, &bar0->adapter_control); + } + +} + +/** + * s2io_ethtool_set_led - To physically identify the nic on the system. + * @dev : network device + * @state: led setting + * + * Description: Used to physically identify the NIC on the system. + * The Link LED will blink for a time specified by the user for + * identification. + * NOTE: The Link has to be Up to be able to blink the LED. Hence + * identification is possible only if it's link is up. + */ + +static int s2io_ethtool_set_led(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct s2io_nic *sp = netdev_priv(dev); + struct XENA_dev_config __iomem *bar0 = sp->bar0; + u16 subid = sp->pdev->subsystem_device; + + if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) { + u64 val64 = readq(&bar0->adapter_control); + if (!(val64 & ADAPTER_CNTL_EN)) { + pr_err("Adapter Link down, cannot blink LED\n"); + return -EAGAIN; + } + } + + switch (state) { + case ETHTOOL_ID_ACTIVE: + sp->adapt_ctrl_org = readq(&bar0->gpio_control); + return 1; /* cycle on/off once per second */ + + case ETHTOOL_ID_ON: + s2io_set_led(sp, true); + break; + + case ETHTOOL_ID_OFF: + s2io_set_led(sp, false); + break; + + case ETHTOOL_ID_INACTIVE: + if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) + writeq(sp->adapt_ctrl_org, &bar0->gpio_control); + } + + return 0; +} + +static void s2io_ethtool_gringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct s2io_nic *sp = netdev_priv(dev); + int i, tx_desc_count = 0, rx_desc_count = 0; + + if (sp->rxd_mode == RXD_MODE_1) { + ering->rx_max_pending = MAX_RX_DESC_1; + ering->rx_jumbo_max_pending = MAX_RX_DESC_1; + } else { + ering->rx_max_pending = MAX_RX_DESC_2; + ering->rx_jumbo_max_pending = MAX_RX_DESC_2; + } + + ering->tx_max_pending = MAX_TX_DESC; + + for (i = 0; i < sp->config.rx_ring_num; i++) + rx_desc_count += sp->config.rx_cfg[i].num_rxd; + ering->rx_pending = rx_desc_count; + ering->rx_jumbo_pending = rx_desc_count; + + for (i = 0; i < sp->config.tx_fifo_num; i++) + tx_desc_count += sp->config.tx_cfg[i].fifo_len; + ering->tx_pending = tx_desc_count; + DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds); +} + +/** + * s2io_ethtool_getpause_data -Pause frame frame generation and reception. + * @sp : private member of the device structure, which is a pointer to the + * s2io_nic structure. + * @ep : pointer to the structure with pause parameters given by ethtool. + * Description: + * Returns the Pause frame generation and reception capability of the NIC. + * Return value: + * void + */ +static void s2io_ethtool_getpause_data(struct net_device *dev, + struct ethtool_pauseparam *ep) +{ + u64 val64; + struct s2io_nic *sp = netdev_priv(dev); + struct XENA_dev_config __iomem *bar0 = sp->bar0; + + val64 = readq(&bar0->rmac_pause_cfg); + if (val64 & RMAC_PAUSE_GEN_ENABLE) + ep->tx_pause = true; + if (val64 & RMAC_PAUSE_RX_ENABLE) + ep->rx_pause = true; + ep->autoneg = false; +} + +/** + * s2io_ethtool_setpause_data - set/reset pause frame generation. + * @sp : private member of the device structure, which is a pointer to the + * s2io_nic structure. + * @ep : pointer to the structure with pause parameters given by ethtool. + * Description: + * It can be used to set or reset Pause frame generation or reception + * support of the NIC. + * Return value: + * int, returns 0 on Success + */ + +static int s2io_ethtool_setpause_data(struct net_device *dev, + struct ethtool_pauseparam *ep) +{ + u64 val64; + struct s2io_nic *sp = netdev_priv(dev); + struct XENA_dev_config __iomem *bar0 = sp->bar0; + + val64 = readq(&bar0->rmac_pause_cfg); + if (ep->tx_pause) + val64 |= RMAC_PAUSE_GEN_ENABLE; + else + val64 &= ~RMAC_PAUSE_GEN_ENABLE; + if (ep->rx_pause) + val64 |= RMAC_PAUSE_RX_ENABLE; + else + val64 &= ~RMAC_PAUSE_RX_ENABLE; + writeq(val64, &bar0->rmac_pause_cfg); + return 0; +} + +/** + * read_eeprom - reads 4 bytes of data from user given offset. + * @sp : private member of the device structure, which is a pointer to the + * s2io_nic structure. + * @off : offset at which the data must be written + * @data : Its an output parameter where the data read at the given + * offset is stored. + * Description: + * Will read 4 bytes of data from the user given offset and return the + * read data. + * NOTE: Will allow to read only part of the EEPROM visible through the + * I2C bus. + * Return value: + * -1 on failure and 0 on success. + */ + +#define S2IO_DEV_ID 5 +static int read_eeprom(struct s2io_nic *sp, int off, u64 *data) +{ + int ret = -1; + u32 exit_cnt = 0; + u64 val64; + struct XENA_dev_config __iomem *bar0 = sp->bar0; + + if (sp->device_type == XFRAME_I_DEVICE) { + val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | + I2C_CONTROL_ADDR(off) | + I2C_CONTROL_BYTE_CNT(0x3) | + I2C_CONTROL_READ | + I2C_CONTROL_CNTL_START; + SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF); + + while (exit_cnt < 5) { + val64 = readq(&bar0->i2c_control); + if (I2C_CONTROL_CNTL_END(val64)) { + *data = I2C_CONTROL_GET_DATA(val64); + ret = 0; + break; + } + msleep(50); + exit_cnt++; + } + } + + if (sp->device_type == XFRAME_II_DEVICE) { + val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 | + SPI_CONTROL_BYTECNT(0x3) | + SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off); + SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF); + val64 |= SPI_CONTROL_REQ; + SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF); + while (exit_cnt < 5) { + val64 = readq(&bar0->spi_control); + if (val64 & SPI_CONTROL_NACK) { + ret = 1; + break; + } else if (val64 & SPI_CONTROL_DONE) { + *data = readq(&bar0->spi_data); + *data &= 0xffffff; + ret = 0; + break; + } + msleep(50); + exit_cnt++; + } + } + return ret; +} + +/** + * write_eeprom - actually writes the relevant part of the data value. + * @sp : private member of the device structure, which is a pointer to the + * s2io_nic structure. + * @off : offset at which the data must be written + * @data : The data that is to be written + * @cnt : Number of bytes of the data that are actually to be written into + * the Eeprom. (max of 3) + * Description: + * Actually writes the relevant part of the data value into the Eeprom + * through the I2C bus. + * Return value: + * 0 on success, -1 on failure. + */ + +static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt) +{ + int exit_cnt = 0, ret = -1; + u64 val64; + struct XENA_dev_config __iomem *bar0 = sp->bar0; + + if (sp->device_type == XFRAME_I_DEVICE) { + val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | + I2C_CONTROL_ADDR(off) | + I2C_CONTROL_BYTE_CNT(cnt) | + I2C_CONTROL_SET_DATA((u32)data) | + I2C_CONTROL_CNTL_START; + SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF); + + while (exit_cnt < 5) { + val64 = readq(&bar0->i2c_control); + if (I2C_CONTROL_CNTL_END(val64)) { + if (!(val64 & I2C_CONTROL_NACK)) + ret = 0; + break; + } + msleep(50); + exit_cnt++; + } + } + + if (sp->device_type == XFRAME_II_DEVICE) { + int write_cnt = (cnt == 8) ? 0 : cnt; + writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data); + + val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 | + SPI_CONTROL_BYTECNT(write_cnt) | + SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off); + SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF); + val64 |= SPI_CONTROL_REQ; + SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF); + while (exit_cnt < 5) { + val64 = readq(&bar0->spi_control); + if (val64 & SPI_CONTROL_NACK) { + ret = 1; + break; + } else if (val64 & SPI_CONTROL_DONE) { + ret = 0; + break; + } + msleep(50); + exit_cnt++; + } + } + return ret; +} +static void s2io_vpd_read(struct s2io_nic *nic) +{ + u8 *vpd_data; + u8 data; + int i = 0, cnt, len, fail = 0; + int vpd_addr = 0x80; + struct swStat *swstats = &nic->mac_control.stats_info->sw_stat; + + if (nic->device_type == XFRAME_II_DEVICE) { + strcpy(nic->product_name, "Xframe II 10GbE network adapter"); + vpd_addr = 0x80; + } else { + strcpy(nic->product_name, "Xframe I 10GbE network adapter"); + vpd_addr = 0x50; + } + strcpy(nic->serial_num, "NOT AVAILABLE"); + + vpd_data = kmalloc(256, GFP_KERNEL); + if (!vpd_data) { + swstats->mem_alloc_fail_cnt++; + return; + } + swstats->mem_allocated += 256; + + for (i = 0; i < 256; i += 4) { + pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); + pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data); + pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0); + for (cnt = 0; cnt < 5; cnt++) { + msleep(2); + pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data); + if (data == 0x80) + break; + } + if (cnt >= 5) { + DBG_PRINT(ERR_DBG, "Read of VPD data failed\n"); + fail = 1; + break; + } + pci_read_config_dword(nic->pdev, (vpd_addr + 4), + (u32 *)&vpd_data[i]); + } + + if (!fail) { + /* read serial number of adapter */ + for (cnt = 0; cnt < 252; cnt++) { + if ((vpd_data[cnt] == 'S') && + (vpd_data[cnt+1] == 'N')) { + len = vpd_data[cnt+2]; + if (len < min(VPD_STRING_LEN, 256-cnt-2)) { + memcpy(nic->serial_num, + &vpd_data[cnt + 3], + len); + memset(nic->serial_num+len, + 0, + VPD_STRING_LEN-len); + break; + } + } + } + } + + if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) { + len = vpd_data[1]; + memcpy(nic->product_name, &vpd_data[3], len); + nic->product_name[len] = 0; + } + kfree(vpd_data); + swstats->mem_freed += 256; +} + +/** + * s2io_ethtool_geeprom - reads the value stored in the Eeprom. + * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure. + * @eeprom : pointer to the user level structure provided by ethtool, + * containing all relevant information. + * @data_buf : user defined value to be written into Eeprom. + * Description: Reads the values stored in the Eeprom at given offset + * for a given length. Stores these values int the input argument data + * buffer 'data_buf' and returns these to the caller (ethtool.) + * Return value: + * int 0 on success + */ + +static int s2io_ethtool_geeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 * data_buf) +{ + u32 i, valid; + u64 data; + struct s2io_nic *sp = netdev_priv(dev); + + eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16); + + if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE)) + eeprom->len = XENA_EEPROM_SPACE - eeprom->offset; + + for (i = 0; i < eeprom->len; i += 4) { + if (read_eeprom(sp, (eeprom->offset + i), &data)) { + DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n"); + return -EFAULT; + } + valid = INV(data); + memcpy((data_buf + i), &valid, 4); + } + return 0; +} + +/** + * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom + * @sp : private member of the device structure, which is a pointer to the + * s2io_nic structure. + * @eeprom : pointer to the user level structure provided by ethtool, + * containing all relevant information. + * @data_buf ; user defined value to be written into Eeprom. + * Description: + * Tries to write the user provided value in the Eeprom, at the offset + * given by the user. + * Return value: + * 0 on success, -EFAULT on failure. + */ + +static int s2io_ethtool_seeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, + u8 *data_buf) +{ + int len = eeprom->len, cnt = 0; + u64 valid = 0, data; + struct s2io_nic *sp = netdev_priv(dev); + + if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) { + DBG_PRINT(ERR_DBG, + "ETHTOOL_WRITE_EEPROM Err: " + "Magic value is wrong, it is 0x%x should be 0x%x\n", + (sp->pdev->vendor | (sp->pdev->device << 16)), + eeprom->magic); + return -EFAULT; + } + + while (len) { + data = (u32)data_buf[cnt] & 0x000000FF; + if (data) + valid = (u32)(data << 24); + else + valid = data; + + if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) { + DBG_PRINT(ERR_DBG, + "ETHTOOL_WRITE_EEPROM Err: " + "Cannot write into the specified offset\n"); + return -EFAULT; + } + cnt++; + len--; + } + + return 0; +} + +/** + * s2io_register_test - reads and writes into all clock domains. + * @sp : private member of the device structure, which is a pointer to the + * s2io_nic structure. + * @data : variable that returns the result of each of the test conducted b + * by the driver. + * Description: + * Read and write into all clock domains. The NIC has 3 clock domains, + * see that registers in all the three regions are accessible. + * Return value: + * 0 on success. + */ + +static int s2io_register_test(struct s2io_nic *sp, uint64_t *data) +{ + struct XENA_dev_config __iomem *bar0 = sp->bar0; + u64 val64 = 0, exp_val; + int fail = 0; + + val64 = readq(&bar0->pif_rd_swapper_fb); + if (val64 != 0x123456789abcdefULL) { + fail = 1; + DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1); + } + + val64 = readq(&bar0->rmac_pause_cfg); + if (val64 != 0xc000ffff00000000ULL) { + fail = 1; + DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2); + } + + val64 = readq(&bar0->rx_queue_cfg); + if (sp->device_type == XFRAME_II_DEVICE) + exp_val = 0x0404040404040404ULL; + else + exp_val = 0x0808080808080808ULL; + if (val64 != exp_val) { + fail = 1; + DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3); + } + + val64 = readq(&bar0->xgxs_efifo_cfg); + if (val64 != 0x000000001923141EULL) { + fail = 1; + DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4); + } + + val64 = 0x5A5A5A5A5A5A5A5AULL; + writeq(val64, &bar0->xmsi_data); + val64 = readq(&bar0->xmsi_data); + if (val64 != 0x5A5A5A5A5A5A5A5AULL) { + fail = 1; + DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1); + } + + val64 = 0xA5A5A5A5A5A5A5A5ULL; + writeq(val64, &bar0->xmsi_data); + val64 = readq(&bar0->xmsi_data); + if (val64 != 0xA5A5A5A5A5A5A5A5ULL) { + fail = 1; + DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2); + } + + *data = fail; + return fail; +} + +/** + * s2io_eeprom_test - to verify that EEprom in the xena can be programmed. + * @sp : private member of the device structure, which is a pointer to the + * s2io_nic structure. + * @data:variable that returns the result of each of the test conducted by + * the driver. + * Description: + * Verify that EEPROM in the xena can be programmed using I2C_CONTROL + * register. + * Return value: + * 0 on success. + */ + +static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data) +{ + int fail = 0; + u64 ret_data, org_4F0, org_7F0; + u8 saved_4F0 = 0, saved_7F0 = 0; + struct net_device *dev = sp->dev; + + /* Test Write Error at offset 0 */ + /* Note that SPI interface allows write access to all areas + * of EEPROM. Hence doing all negative testing only for Xframe I. + */ + if (sp->device_type == XFRAME_I_DEVICE) + if (!write_eeprom(sp, 0, 0, 3)) + fail = 1; + + /* Save current values at offsets 0x4F0 and 0x7F0 */ + if (!read_eeprom(sp, 0x4F0, &org_4F0)) + saved_4F0 = 1; + if (!read_eeprom(sp, 0x7F0, &org_7F0)) + saved_7F0 = 1; + + /* Test Write at offset 4f0 */ + if (write_eeprom(sp, 0x4F0, 0x012345, 3)) + fail = 1; + if (read_eeprom(sp, 0x4F0, &ret_data)) + fail = 1; + + if (ret_data != 0x012345) { + DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. " + "Data written %llx Data read %llx\n", + dev->name, (unsigned long long)0x12345, + (unsigned long long)ret_data); + fail = 1; + } + + /* Reset the EEPROM data go FFFF */ + write_eeprom(sp, 0x4F0, 0xFFFFFF, 3); + + /* Test Write Request Error at offset 0x7c */ + if (sp->device_type == XFRAME_I_DEVICE) + if (!write_eeprom(sp, 0x07C, 0, 3)) + fail = 1; + + /* Test Write Request at offset 0x7f0 */ + if (write_eeprom(sp, 0x7F0, 0x012345, 3)) + fail = 1; + if (read_eeprom(sp, 0x7F0, &ret_data)) + fail = 1; + + if (ret_data != 0x012345) { + DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. " + "Data written %llx Data read %llx\n", + dev->name, (unsigned long long)0x12345, + (unsigned long long)ret_data); + fail = 1; + } + + /* Reset the EEPROM data go FFFF */ + write_eeprom(sp, 0x7F0, 0xFFFFFF, 3); + + if (sp->device_type == XFRAME_I_DEVICE) { + /* Test Write Error at offset 0x80 */ + if (!write_eeprom(sp, 0x080, 0, 3)) + fail = 1; + + /* Test Write Error at offset 0xfc */ + if (!write_eeprom(sp, 0x0FC, 0, 3)) + fail = 1; + + /* Test Write Error at offset 0x100 */ + if (!write_eeprom(sp, 0x100, 0, 3)) + fail = 1; + + /* Test Write Error at offset 4ec */ + if (!write_eeprom(sp, 0x4EC, 0, 3)) + fail = 1; + } + + /* Restore values at offsets 0x4F0 and 0x7F0 */ + if (saved_4F0) + write_eeprom(sp, 0x4F0, org_4F0, 3); + if (saved_7F0) + write_eeprom(sp, 0x7F0, org_7F0, 3); + + *data = fail; + return fail; +} + +/** + * s2io_bist_test - invokes the MemBist test of the card . + * @sp : private member of the device structure, which is a pointer to the + * s2io_nic structure. + * @data:variable that returns the result of each of the test conducted by + * the driver. + * Description: + * This invokes the MemBist test of the card. We give around + * 2 secs time for the Test to complete. If it's still not complete + * within this peiod, we consider that the test failed. + * Return value: + * 0 on success and -1 on failure. + */ + +static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data) +{ + u8 bist = 0; + int cnt = 0, ret = -1; + + pci_read_config_byte(sp->pdev, PCI_BIST, &bist); + bist |= PCI_BIST_START; + pci_write_config_word(sp->pdev, PCI_BIST, bist); + + while (cnt < 20) { + pci_read_config_byte(sp->pdev, PCI_BIST, &bist); + if (!(bist & PCI_BIST_START)) { + *data = (bist & PCI_BIST_CODE_MASK); + ret = 0; + break; + } + msleep(100); + cnt++; + } + + return ret; +} + +/** + * s2io_link_test - verifies the link state of the nic + * @sp ; private member of the device structure, which is a pointer to the + * s2io_nic structure. + * @data: variable that returns the result of each of the test conducted by + * the driver. + * Description: + * The function verifies the link state of the NIC and updates the input + * argument 'data' appropriately. + * Return value: + * 0 on success. + */ + +static int s2io_link_test(struct s2io_nic *sp, uint64_t *data) +{ + struct XENA_dev_config __iomem *bar0 = sp->bar0; + u64 val64; + + val64 = readq(&bar0->adapter_status); + if (!(LINK_IS_UP(val64))) + *data = 1; + else + *data = 0; + + return *data; +} + +/** + * s2io_rldram_test - offline test for access to the RldRam chip on the NIC + * @sp: private member of the device structure, which is a pointer to the + * s2io_nic structure. + * @data: variable that returns the result of each of the test + * conducted by the driver. + * Description: + * This is one of the offline test that tests the read and write + * access to the RldRam chip on the NIC. + * Return value: + * 0 on success. + */ + +static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data) +{ + struct XENA_dev_config __iomem *bar0 = sp->bar0; + u64 val64; + int cnt, iteration = 0, test_fail = 0; + + val64 = readq(&bar0->adapter_control); + val64 &= ~ADAPTER_ECC_EN; + writeq(val64, &bar0->adapter_control); + + val64 = readq(&bar0->mc_rldram_test_ctrl); + val64 |= MC_RLDRAM_TEST_MODE; + SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF); + + val64 = readq(&bar0->mc_rldram_mrs); + val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE; + SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF); + + val64 |= MC_RLDRAM_MRS_ENABLE; + SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF); + + while (iteration < 2) { + val64 = 0x55555555aaaa0000ULL; + if (iteration == 1) + val64 ^= 0xFFFFFFFFFFFF0000ULL; + writeq(val64, &bar0->mc_rldram_test_d0); + + val64 = 0xaaaa5a5555550000ULL; + if (iteration == 1) + val64 ^= 0xFFFFFFFFFFFF0000ULL; + writeq(val64, &bar0->mc_rldram_test_d1); + + val64 = 0x55aaaaaaaa5a0000ULL; + if (iteration == 1) + val64 ^= 0xFFFFFFFFFFFF0000ULL; + writeq(val64, &bar0->mc_rldram_test_d2); + + val64 = (u64) (0x0000003ffffe0100ULL); + writeq(val64, &bar0->mc_rldram_test_add); + + val64 = MC_RLDRAM_TEST_MODE | + MC_RLDRAM_TEST_WRITE | + MC_RLDRAM_TEST_GO; + SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF); + + for (cnt = 0; cnt < 5; cnt++) { + val64 = readq(&bar0->mc_rldram_test_ctrl); + if (val64 & MC_RLDRAM_TEST_DONE) + break; + msleep(200); + } + + if (cnt == 5) + break; + + val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO; + SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF); + + for (cnt = 0; cnt < 5; cnt++) { + val64 = readq(&bar0->mc_rldram_test_ctrl); + if (val64 & MC_RLDRAM_TEST_DONE) + break; + msleep(500); + } + + if (cnt == 5) + break; + + val64 = readq(&bar0->mc_rldram_test_ctrl); + if (!(val64 & MC_RLDRAM_TEST_PASS)) + test_fail = 1; + + iteration++; + } + + *data = test_fail; + + /* Bring the adapter out of test mode */ + SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF); + + return test_fail; +} + +/** + * s2io_ethtool_test - conducts 6 tsets to determine the health of card. + * @sp : private member of the device structure, which is a pointer to the + * s2io_nic structure. + * @ethtest : pointer to a ethtool command specific structure that will be + * returned to the user. + * @data : variable that returns the result of each of the test + * conducted by the driver. + * Description: + * This function conducts 6 tests ( 4 offline and 2 online) to determine + * the health of the card. + * Return value: + * void + */ + +static void s2io_ethtool_test(struct net_device *dev, + struct ethtool_test *ethtest, + uint64_t *data) +{ + struct s2io_nic *sp = netdev_priv(dev); + int orig_state = netif_running(sp->dev); + + if (ethtest->flags == ETH_TEST_FL_OFFLINE) { + /* Offline Tests. */ + if (orig_state) + s2io_close(sp->dev); + + if (s2io_register_test(sp, &data[0])) + ethtest->flags |= ETH_TEST_FL_FAILED; + + s2io_reset(sp); + + if (s2io_rldram_test(sp, &data[3])) + ethtest->flags |= ETH_TEST_FL_FAILED; + + s2io_reset(sp); + + if (s2io_eeprom_test(sp, &data[1])) + ethtest->flags |= ETH_TEST_FL_FAILED; + + if (s2io_bist_test(sp, &data[4])) + ethtest->flags |= ETH_TEST_FL_FAILED; + + if (orig_state) + s2io_open(sp->dev); + + data[2] = 0; + } else { + /* Online Tests. */ + if (!orig_state) { + DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n", + dev->name); + data[0] = -1; + data[1] = -1; + data[2] = -1; + data[3] = -1; + data[4] = -1; + } + + if (s2io_link_test(sp, &data[2])) + ethtest->flags |= ETH_TEST_FL_FAILED; + + data[0] = 0; + data[1] = 0; + data[3] = 0; + data[4] = 0; + } +} + +static void s2io_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *estats, + u64 *tmp_stats) +{ + int i = 0, k; + struct s2io_nic *sp = netdev_priv(dev); + struct stat_block *stats = sp->mac_control.stats_info; + struct swStat *swstats = &stats->sw_stat; + struct xpakStat *xstats = &stats->xpak_stat; + + s2io_updt_stats(sp); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 | + le32_to_cpu(stats->tmac_frms); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 | + le32_to_cpu(stats->tmac_data_octets); + tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 | + le32_to_cpu(stats->tmac_mcst_frms); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 | + le32_to_cpu(stats->tmac_bcst_frms); + tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 | + le32_to_cpu(stats->tmac_ttl_octets); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 | + le32_to_cpu(stats->tmac_ucst_frms); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 | + le32_to_cpu(stats->tmac_nucst_frms); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 | + le32_to_cpu(stats->tmac_any_err_frms); + tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets); + tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 | + le32_to_cpu(stats->tmac_vld_ip); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 | + le32_to_cpu(stats->tmac_drop_ip); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 | + le32_to_cpu(stats->tmac_icmp); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 | + le32_to_cpu(stats->tmac_rst_tcp); + tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp); + tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 | + le32_to_cpu(stats->tmac_udp); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 | + le32_to_cpu(stats->rmac_vld_frms); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 | + le32_to_cpu(stats->rmac_data_octets); + tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms); + tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 | + le32_to_cpu(stats->rmac_vld_mcst_frms); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 | + le32_to_cpu(stats->rmac_vld_bcst_frms); + tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms); + tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms); + tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms); + tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms); + tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 | + le32_to_cpu(stats->rmac_ttl_octets); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32 + | le32_to_cpu(stats->rmac_accepted_ucst_frms); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow) + << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 | + le32_to_cpu(stats->rmac_discarded_frms); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->rmac_drop_events_oflow) + << 32 | le32_to_cpu(stats->rmac_drop_events); + tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets); + tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 | + le32_to_cpu(stats->rmac_usized_frms); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 | + le32_to_cpu(stats->rmac_osized_frms); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 | + le32_to_cpu(stats->rmac_frag_frms); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 | + le32_to_cpu(stats->rmac_jabber_frms); + tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms); + tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms); + tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms); + tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms); + tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms); + tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 | + le32_to_cpu(stats->rmac_ip); + tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets); + tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 | + le32_to_cpu(stats->rmac_drop_ip); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 | + le32_to_cpu(stats->rmac_icmp); + tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 | + le32_to_cpu(stats->rmac_udp); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 | + le32_to_cpu(stats->rmac_err_drp_udp); + tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym); + tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0); + tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1); + tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2); + tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3); + tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4); + tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5); + tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6); + tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7); + tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0); + tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1); + tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2); + tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3); + tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4); + tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5); + tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6); + tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 | + le32_to_cpu(stats->rmac_pause_cnt); + tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt); + tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt); + tmp_stats[i++] = + (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 | + le32_to_cpu(stats->rmac_accepted_ip); + tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp); + tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt); + tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt); + tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt); + tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt); + tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt); + tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt); + tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt); + tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt); + tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt); + tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt); + tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt); + tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt); + tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt); + tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt); + tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt); + tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt); + tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt); + tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt); + + /* Enhanced statistics exist only for Hercules */ + if (sp->device_type == XFRAME_II_DEVICE) { + tmp_stats[i++] = + le64_to_cpu(stats->rmac_ttl_1519_4095_frms); + tmp_stats[i++] = + le64_to_cpu(stats->rmac_ttl_4096_8191_frms); + tmp_stats[i++] = + le64_to_cpu(stats->rmac_ttl_8192_max_frms); + tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms); + tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms); + tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms); + tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms); + tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms); + tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard); + tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard); + tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard); + tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard); + tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard); + tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard); + tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard); + tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt); + } + + tmp_stats[i++] = 0; + tmp_stats[i++] = swstats->single_ecc_errs; + tmp_stats[i++] = swstats->double_ecc_errs; + tmp_stats[i++] = swstats->parity_err_cnt; + tmp_stats[i++] = swstats->serious_err_cnt; + tmp_stats[i++] = swstats->soft_reset_cnt; + tmp_stats[i++] = swstats->fifo_full_cnt; + for (k = 0; k < MAX_RX_RINGS; k++) + tmp_stats[i++] = swstats->ring_full_cnt[k]; + tmp_stats[i++] = xstats->alarm_transceiver_temp_high; + tmp_stats[i++] = xstats->alarm_transceiver_temp_low; + tmp_stats[i++] = xstats->alarm_laser_bias_current_high; + tmp_stats[i++] = xstats->alarm_laser_bias_current_low; + tmp_stats[i++] = xstats->alarm_laser_output_power_high; + tmp_stats[i++] = xstats->alarm_laser_output_power_low; + tmp_stats[i++] = xstats->warn_transceiver_temp_high; + tmp_stats[i++] = xstats->warn_transceiver_temp_low; + tmp_stats[i++] = xstats->warn_laser_bias_current_high; + tmp_stats[i++] = xstats->warn_laser_bias_current_low; + tmp_stats[i++] = xstats->warn_laser_output_power_high; + tmp_stats[i++] = xstats->warn_laser_output_power_low; + tmp_stats[i++] = swstats->clubbed_frms_cnt; + tmp_stats[i++] = swstats->sending_both; + tmp_stats[i++] = swstats->outof_sequence_pkts; + tmp_stats[i++] = swstats->flush_max_pkts; + if (swstats->num_aggregations) { + u64 tmp = swstats->sum_avg_pkts_aggregated; + int count = 0; + /* + * Since 64-bit divide does not work on all platforms, + * do repeated subtraction. + */ + while (tmp >= swstats->num_aggregations) { + tmp -= swstats->num_aggregations; + count++; + } + tmp_stats[i++] = count; + } else + tmp_stats[i++] = 0; + tmp_stats[i++] = swstats->mem_alloc_fail_cnt; + tmp_stats[i++] = swstats->pci_map_fail_cnt; + tmp_stats[i++] = swstats->watchdog_timer_cnt; + tmp_stats[i++] = swstats->mem_allocated; + tmp_stats[i++] = swstats->mem_freed; + tmp_stats[i++] = swstats->link_up_cnt; + tmp_stats[i++] = swstats->link_down_cnt; + tmp_stats[i++] = swstats->link_up_time; + tmp_stats[i++] = swstats->link_down_time; + + tmp_stats[i++] = swstats->tx_buf_abort_cnt; + tmp_stats[i++] = swstats->tx_desc_abort_cnt; + tmp_stats[i++] = swstats->tx_parity_err_cnt; + tmp_stats[i++] = swstats->tx_link_loss_cnt; + tmp_stats[i++] = swstats->tx_list_proc_err_cnt; + + tmp_stats[i++] = swstats->rx_parity_err_cnt; + tmp_stats[i++] = swstats->rx_abort_cnt; + tmp_stats[i++] = swstats->rx_parity_abort_cnt; + tmp_stats[i++] = swstats->rx_rda_fail_cnt; + tmp_stats[i++] = swstats->rx_unkn_prot_cnt; + tmp_stats[i++] = swstats->rx_fcs_err_cnt; + tmp_stats[i++] = swstats->rx_buf_size_err_cnt; + tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt; + tmp_stats[i++] = swstats->rx_unkn_err_cnt; + tmp_stats[i++] = swstats->tda_err_cnt; + tmp_stats[i++] = swstats->pfc_err_cnt; + tmp_stats[i++] = swstats->pcc_err_cnt; + tmp_stats[i++] = swstats->tti_err_cnt; + tmp_stats[i++] = swstats->tpa_err_cnt; + tmp_stats[i++] = swstats->sm_err_cnt; + tmp_stats[i++] = swstats->lso_err_cnt; + tmp_stats[i++] = swstats->mac_tmac_err_cnt; + tmp_stats[i++] = swstats->mac_rmac_err_cnt; + tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt; + tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt; + tmp_stats[i++] = swstats->rc_err_cnt; + tmp_stats[i++] = swstats->prc_pcix_err_cnt; + tmp_stats[i++] = swstats->rpa_err_cnt; + tmp_stats[i++] = swstats->rda_err_cnt; + tmp_stats[i++] = swstats->rti_err_cnt; + tmp_stats[i++] = swstats->mc_err_cnt; +} + +static int s2io_ethtool_get_regs_len(struct net_device *dev) +{ + return XENA_REG_SPACE; +} + + +static int s2io_get_eeprom_len(struct net_device *dev) +{ + return XENA_EEPROM_SPACE; +} + +static int s2io_get_sset_count(struct net_device *dev, int sset) +{ + struct s2io_nic *sp = netdev_priv(dev); + + switch (sset) { + case ETH_SS_TEST: + return S2IO_TEST_LEN; + case ETH_SS_STATS: + switch (sp->device_type) { + case XFRAME_I_DEVICE: + return XFRAME_I_STAT_LEN; + case XFRAME_II_DEVICE: + return XFRAME_II_STAT_LEN; + default: + return 0; + } + default: + return -EOPNOTSUPP; + } +} + +static void s2io_ethtool_get_strings(struct net_device *dev, + u32 stringset, u8 *data) +{ + int stat_size = 0; + struct s2io_nic *sp = netdev_priv(dev); + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN); + break; + case ETH_SS_STATS: + stat_size = sizeof(ethtool_xena_stats_keys); + memcpy(data, ðtool_xena_stats_keys, stat_size); + if (sp->device_type == XFRAME_II_DEVICE) { + memcpy(data + stat_size, + ðtool_enhanced_stats_keys, + sizeof(ethtool_enhanced_stats_keys)); + stat_size += sizeof(ethtool_enhanced_stats_keys); + } + + memcpy(data + stat_size, ðtool_driver_stats_keys, + sizeof(ethtool_driver_stats_keys)); + } +} + +static int s2io_set_features(struct net_device *dev, netdev_features_t features) +{ + struct s2io_nic *sp = netdev_priv(dev); + netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO; + + if (changed && netif_running(dev)) { + int rc; + + s2io_stop_all_tx_queue(sp); + s2io_card_down(sp); + dev->features = features; + rc = s2io_card_up(sp); + if (rc) + s2io_reset(sp); + else + s2io_start_all_tx_queue(sp); + + return rc ? rc : 1; + } + + return 0; +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_settings = s2io_ethtool_gset, + .set_settings = s2io_ethtool_sset, + .get_drvinfo = s2io_ethtool_gdrvinfo, + .get_regs_len = s2io_ethtool_get_regs_len, + .get_regs = s2io_ethtool_gregs, + .get_link = ethtool_op_get_link, + .get_eeprom_len = s2io_get_eeprom_len, + .get_eeprom = s2io_ethtool_geeprom, + .set_eeprom = s2io_ethtool_seeprom, + .get_ringparam = s2io_ethtool_gringparam, + .get_pauseparam = s2io_ethtool_getpause_data, + .set_pauseparam = s2io_ethtool_setpause_data, + .self_test = s2io_ethtool_test, + .get_strings = s2io_ethtool_get_strings, + .set_phys_id = s2io_ethtool_set_led, + .get_ethtool_stats = s2io_get_ethtool_stats, + .get_sset_count = s2io_get_sset_count, +}; + +/** + * s2io_ioctl - Entry point for the Ioctl + * @dev : Device pointer. + * @ifr : An IOCTL specefic structure, that can contain a pointer to + * a proprietary structure used to pass information to the driver. + * @cmd : This is used to distinguish between the different commands that + * can be passed to the IOCTL functions. + * Description: + * Currently there are no special functionality supported in IOCTL, hence + * function always return EOPNOTSUPPORTED + */ + +static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + return -EOPNOTSUPP; +} + +/** + * s2io_change_mtu - entry point to change MTU size for the device. + * @dev : device pointer. + * @new_mtu : the new MTU size for the device. + * Description: A driver entry point to change MTU size for the device. + * Before changing the MTU the device must be stopped. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ + +static int s2io_change_mtu(struct net_device *dev, int new_mtu) +{ + struct s2io_nic *sp = netdev_priv(dev); + int ret = 0; + + if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) { + DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", dev->name); + return -EPERM; + } + + dev->mtu = new_mtu; + if (netif_running(dev)) { + s2io_stop_all_tx_queue(sp); + s2io_card_down(sp); + ret = s2io_card_up(sp); + if (ret) { + DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", + __func__); + return ret; + } + s2io_wake_all_tx_queue(sp); + } else { /* Device is down */ + struct XENA_dev_config __iomem *bar0 = sp->bar0; + u64 val64 = new_mtu; + + writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len); + } + + return ret; +} + +/** + * s2io_set_link - Set the LInk status + * @data: long pointer to device private structue + * Description: Sets the link status for the adapter + */ + +static void s2io_set_link(struct work_struct *work) +{ + struct s2io_nic *nic = container_of(work, struct s2io_nic, + set_link_task); + struct net_device *dev = nic->dev; + struct XENA_dev_config __iomem *bar0 = nic->bar0; + register u64 val64; + u16 subid; + + rtnl_lock(); + + if (!netif_running(dev)) + goto out_unlock; + + if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) { + /* The card is being reset, no point doing anything */ + goto out_unlock; + } + + subid = nic->pdev->subsystem_device; + if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) { + /* + * Allow a small delay for the NICs self initiated + * cleanup to complete. + */ + msleep(100); + } + + val64 = readq(&bar0->adapter_status); + if (LINK_IS_UP(val64)) { + if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) { + if (verify_xena_quiescence(nic)) { + val64 = readq(&bar0->adapter_control); + val64 |= ADAPTER_CNTL_EN; + writeq(val64, &bar0->adapter_control); + if (CARDS_WITH_FAULTY_LINK_INDICATORS( + nic->device_type, subid)) { + val64 = readq(&bar0->gpio_control); + val64 |= GPIO_CTRL_GPIO_0; + writeq(val64, &bar0->gpio_control); + val64 = readq(&bar0->gpio_control); + } else { + val64 |= ADAPTER_LED_ON; + writeq(val64, &bar0->adapter_control); + } + nic->device_enabled_once = true; + } else { + DBG_PRINT(ERR_DBG, + "%s: Error: device is not Quiescent\n", + dev->name); + s2io_stop_all_tx_queue(nic); + } + } + val64 = readq(&bar0->adapter_control); + val64 |= ADAPTER_LED_ON; + writeq(val64, &bar0->adapter_control); + s2io_link(nic, LINK_UP); + } else { + if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type, + subid)) { + val64 = readq(&bar0->gpio_control); + val64 &= ~GPIO_CTRL_GPIO_0; + writeq(val64, &bar0->gpio_control); + val64 = readq(&bar0->gpio_control); + } + /* turn off LED */ + val64 = readq(&bar0->adapter_control); + val64 = val64 & (~ADAPTER_LED_ON); + writeq(val64, &bar0->adapter_control); + s2io_link(nic, LINK_DOWN); + } + clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state)); + +out_unlock: + rtnl_unlock(); +} + +static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, + struct buffAdd *ba, + struct sk_buff **skb, u64 *temp0, u64 *temp1, + u64 *temp2, int size) +{ + struct net_device *dev = sp->dev; + struct swStat *stats = &sp->mac_control.stats_info->sw_stat; + + if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) { + struct RxD1 *rxdp1 = (struct RxD1 *)rxdp; + /* allocate skb */ + if (*skb) { + DBG_PRINT(INFO_DBG, "SKB is not NULL\n"); + /* + * As Rx frame are not going to be processed, + * using same mapped address for the Rxd + * buffer pointer + */ + rxdp1->Buffer0_ptr = *temp0; + } else { + *skb = netdev_alloc_skb(dev, size); + if (!(*skb)) { + DBG_PRINT(INFO_DBG, + "%s: Out of memory to allocate %s\n", + dev->name, "1 buf mode SKBs"); + stats->mem_alloc_fail_cnt++; + return -ENOMEM ; + } + stats->mem_allocated += (*skb)->truesize; + /* storing the mapped addr in a temp variable + * such it will be used for next rxd whose + * Host Control is NULL + */ + rxdp1->Buffer0_ptr = *temp0 = + pci_map_single(sp->pdev, (*skb)->data, + size - NET_IP_ALIGN, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr)) + goto memalloc_failed; + rxdp->Host_Control = (unsigned long) (*skb); + } + } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) { + struct RxD3 *rxdp3 = (struct RxD3 *)rxdp; + /* Two buffer Mode */ + if (*skb) { + rxdp3->Buffer2_ptr = *temp2; + rxdp3->Buffer0_ptr = *temp0; + rxdp3->Buffer1_ptr = *temp1; + } else { + *skb = netdev_alloc_skb(dev, size); + if (!(*skb)) { + DBG_PRINT(INFO_DBG, + "%s: Out of memory to allocate %s\n", + dev->name, + "2 buf mode SKBs"); + stats->mem_alloc_fail_cnt++; + return -ENOMEM; + } + stats->mem_allocated += (*skb)->truesize; + rxdp3->Buffer2_ptr = *temp2 = + pci_map_single(sp->pdev, (*skb)->data, + dev->mtu + 4, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr)) + goto memalloc_failed; + rxdp3->Buffer0_ptr = *temp0 = + pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(sp->pdev, + rxdp3->Buffer0_ptr)) { + pci_unmap_single(sp->pdev, + (dma_addr_t)rxdp3->Buffer2_ptr, + dev->mtu + 4, + PCI_DMA_FROMDEVICE); + goto memalloc_failed; + } + rxdp->Host_Control = (unsigned long) (*skb); + + /* Buffer-1 will be dummy buffer not used */ + rxdp3->Buffer1_ptr = *temp1 = + pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(sp->pdev, + rxdp3->Buffer1_ptr)) { + pci_unmap_single(sp->pdev, + (dma_addr_t)rxdp3->Buffer0_ptr, + BUF0_LEN, PCI_DMA_FROMDEVICE); + pci_unmap_single(sp->pdev, + (dma_addr_t)rxdp3->Buffer2_ptr, + dev->mtu + 4, + PCI_DMA_FROMDEVICE); + goto memalloc_failed; + } + } + } + return 0; + +memalloc_failed: + stats->pci_map_fail_cnt++; + stats->mem_freed += (*skb)->truesize; + dev_kfree_skb(*skb); + return -ENOMEM; +} + +static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp, + int size) +{ + struct net_device *dev = sp->dev; + if (sp->rxd_mode == RXD_MODE_1) { + rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); + } else if (sp->rxd_mode == RXD_MODE_3B) { + rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); + rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); + rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4); + } +} + +static int rxd_owner_bit_reset(struct s2io_nic *sp) +{ + int i, j, k, blk_cnt = 0, size; + struct config_param *config = &sp->config; + struct mac_info *mac_control = &sp->mac_control; + struct net_device *dev = sp->dev; + struct RxD_t *rxdp = NULL; + struct sk_buff *skb = NULL; + struct buffAdd *ba = NULL; + u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0; + + /* Calculate the size based on ring mode */ + size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + + HEADER_802_2_SIZE + HEADER_SNAP_SIZE; + if (sp->rxd_mode == RXD_MODE_1) + size += NET_IP_ALIGN; + else if (sp->rxd_mode == RXD_MODE_3B) + size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; + + for (i = 0; i < config->rx_ring_num; i++) { + struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; + struct ring_info *ring = &mac_control->rings[i]; + + blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1); + + for (j = 0; j < blk_cnt; j++) { + for (k = 0; k < rxd_count[sp->rxd_mode]; k++) { + rxdp = ring->rx_blocks[j].rxds[k].virt_addr; + if (sp->rxd_mode == RXD_MODE_3B) + ba = &ring->ba[j][k]; + if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb, + &temp0_64, + &temp1_64, + &temp2_64, + size) == -ENOMEM) { + return 0; + } + + set_rxd_buffer_size(sp, rxdp, size); + dma_wmb(); + /* flip the Ownership bit to Hardware */ + rxdp->Control_1 |= RXD_OWN_XENA; + } + } + } + return 0; + +} + +static int s2io_add_isr(struct s2io_nic *sp) +{ + int ret = 0; + struct net_device *dev = sp->dev; + int err = 0; + + if (sp->config.intr_type == MSI_X) + ret = s2io_enable_msi_x(sp); + if (ret) { + DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name); + sp->config.intr_type = INTA; + } + + /* + * Store the values of the MSIX table in + * the struct s2io_nic structure + */ + store_xmsi_data(sp); + + /* After proper initialization of H/W, register ISR */ + if (sp->config.intr_type == MSI_X) { + int i, msix_rx_cnt = 0; + + for (i = 0; i < sp->num_entries; i++) { + if (sp->s2io_entries[i].in_use == MSIX_FLG) { + if (sp->s2io_entries[i].type == + MSIX_RING_TYPE) { + snprintf(sp->desc[i], + sizeof(sp->desc[i]), + "%s:MSI-X-%d-RX", + dev->name, i); + err = request_irq(sp->entries[i].vector, + s2io_msix_ring_handle, + 0, + sp->desc[i], + sp->s2io_entries[i].arg); + } else if (sp->s2io_entries[i].type == + MSIX_ALARM_TYPE) { + snprintf(sp->desc[i], + sizeof(sp->desc[i]), + "%s:MSI-X-%d-TX", + dev->name, i); + err = request_irq(sp->entries[i].vector, + s2io_msix_fifo_handle, + 0, + sp->desc[i], + sp->s2io_entries[i].arg); + + } + /* if either data or addr is zero print it. */ + if (!(sp->msix_info[i].addr && + sp->msix_info[i].data)) { + DBG_PRINT(ERR_DBG, + "%s @Addr:0x%llx Data:0x%llx\n", + sp->desc[i], + (unsigned long long) + sp->msix_info[i].addr, + (unsigned long long) + ntohl(sp->msix_info[i].data)); + } else + msix_rx_cnt++; + if (err) { + remove_msix_isr(sp); + + DBG_PRINT(ERR_DBG, + "%s:MSI-X-%d registration " + "failed\n", dev->name, i); + + DBG_PRINT(ERR_DBG, + "%s: Defaulting to INTA\n", + dev->name); + sp->config.intr_type = INTA; + break; + } + sp->s2io_entries[i].in_use = + MSIX_REGISTERED_SUCCESS; + } + } + if (!err) { + pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt); + DBG_PRINT(INFO_DBG, + "MSI-X-TX entries enabled through alarm vector\n"); + } + } + if (sp->config.intr_type == INTA) { + err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED, + sp->name, dev); + if (err) { + DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n", + dev->name); + return -1; + } + } + return 0; +} + +static void s2io_rem_isr(struct s2io_nic *sp) +{ + if (sp->config.intr_type == MSI_X) + remove_msix_isr(sp); + else + remove_inta_isr(sp); +} + +static void do_s2io_card_down(struct s2io_nic *sp, int do_io) +{ + int cnt = 0; + struct XENA_dev_config __iomem *bar0 = sp->bar0; + register u64 val64 = 0; + struct config_param *config; + config = &sp->config; + + if (!is_s2io_card_up(sp)) + return; + + del_timer_sync(&sp->alarm_timer); + /* If s2io_set_link task is executing, wait till it completes. */ + while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) + msleep(50); + clear_bit(__S2IO_STATE_CARD_UP, &sp->state); + + /* Disable napi */ + if (sp->config.napi) { + int off = 0; + if (config->intr_type == MSI_X) { + for (; off < sp->config.rx_ring_num; off++) + napi_disable(&sp->mac_control.rings[off].napi); + } + else + napi_disable(&sp->napi); + } + + /* disable Tx and Rx traffic on the NIC */ + if (do_io) + stop_nic(sp); + + s2io_rem_isr(sp); + + /* stop the tx queue, indicate link down */ + s2io_link(sp, LINK_DOWN); + + /* Check if the device is Quiescent and then Reset the NIC */ + while (do_io) { + /* As per the HW requirement we need to replenish the + * receive buffer to avoid the ring bump. Since there is + * no intention of processing the Rx frame at this pointwe are + * just setting the ownership bit of rxd in Each Rx + * ring to HW and set the appropriate buffer size + * based on the ring mode + */ + rxd_owner_bit_reset(sp); + + val64 = readq(&bar0->adapter_status); + if (verify_xena_quiescence(sp)) { + if (verify_pcc_quiescent(sp, sp->device_enabled_once)) + break; + } + + msleep(50); + cnt++; + if (cnt == 10) { + DBG_PRINT(ERR_DBG, "Device not Quiescent - " + "adapter status reads 0x%llx\n", + (unsigned long long)val64); + break; + } + } + if (do_io) + s2io_reset(sp); + + /* Free all Tx buffers */ + free_tx_buffers(sp); + + /* Free all Rx buffers */ + free_rx_buffers(sp); + + clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state)); +} + +static void s2io_card_down(struct s2io_nic *sp) +{ + do_s2io_card_down(sp, 1); +} + +static int s2io_card_up(struct s2io_nic *sp) +{ + int i, ret = 0; + struct config_param *config; + struct mac_info *mac_control; + struct net_device *dev = sp->dev; + u16 interruptible; + + /* Initialize the H/W I/O registers */ + ret = init_nic(sp); + if (ret != 0) { + DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", + dev->name); + if (ret != -EIO) + s2io_reset(sp); + return ret; + } + + /* + * Initializing the Rx buffers. For now we are considering only 1 + * Rx ring and initializing buffers into 30 Rx blocks + */ + config = &sp->config; + mac_control = &sp->mac_control; + + for (i = 0; i < config->rx_ring_num; i++) { + struct ring_info *ring = &mac_control->rings[i]; + + ring->mtu = dev->mtu; + ring->lro = !!(dev->features & NETIF_F_LRO); + ret = fill_rx_buffers(sp, ring, 1); + if (ret) { + DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", + dev->name); + s2io_reset(sp); + free_rx_buffers(sp); + return -ENOMEM; + } + DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i, + ring->rx_bufs_left); + } + + /* Initialise napi */ + if (config->napi) { + if (config->intr_type == MSI_X) { + for (i = 0; i < sp->config.rx_ring_num; i++) + napi_enable(&sp->mac_control.rings[i].napi); + } else { + napi_enable(&sp->napi); + } + } + + /* Maintain the state prior to the open */ + if (sp->promisc_flg) + sp->promisc_flg = 0; + if (sp->m_cast_flg) { + sp->m_cast_flg = 0; + sp->all_multi_pos = 0; + } + + /* Setting its receive mode */ + s2io_set_multicast(dev); + + if (dev->features & NETIF_F_LRO) { + /* Initialize max aggregatable pkts per session based on MTU */ + sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; + /* Check if we can use (if specified) user provided value */ + if (lro_max_pkts < sp->lro_max_aggr_per_sess) + sp->lro_max_aggr_per_sess = lro_max_pkts; + } + + /* Enable Rx Traffic and interrupts on the NIC */ + if (start_nic(sp)) { + DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name); + s2io_reset(sp); + free_rx_buffers(sp); + return -ENODEV; + } + + /* Add interrupt service routine */ + if (s2io_add_isr(sp) != 0) { + if (sp->config.intr_type == MSI_X) + s2io_rem_isr(sp); + s2io_reset(sp); + free_rx_buffers(sp); + return -ENODEV; + } + + S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2)); + + set_bit(__S2IO_STATE_CARD_UP, &sp->state); + + /* Enable select interrupts */ + en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); + if (sp->config.intr_type != INTA) { + interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR; + en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS); + } else { + interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; + interruptible |= TX_PIC_INTR; + en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS); + } + + return 0; +} + +/** + * s2io_restart_nic - Resets the NIC. + * @data : long pointer to the device private structure + * Description: + * This function is scheduled to be run by the s2io_tx_watchdog + * function after 0.5 secs to reset the NIC. The idea is to reduce + * the run time of the watch dog routine which is run holding a + * spin lock. + */ + +static void s2io_restart_nic(struct work_struct *work) +{ + struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task); + struct net_device *dev = sp->dev; + + rtnl_lock(); + + if (!netif_running(dev)) + goto out_unlock; + + s2io_card_down(sp); + if (s2io_card_up(sp)) { + DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name); + } + s2io_wake_all_tx_queue(sp); + DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name); +out_unlock: + rtnl_unlock(); +} + +/** + * s2io_tx_watchdog - Watchdog for transmit side. + * @dev : Pointer to net device structure + * Description: + * This function is triggered if the Tx Queue is stopped + * for a pre-defined amount of time when the Interface is still up. + * If the Interface is jammed in such a situation, the hardware is + * reset (by s2io_close) and restarted again (by s2io_open) to + * overcome any problem that might have been caused in the hardware. + * Return value: + * void + */ + +static void s2io_tx_watchdog(struct net_device *dev) +{ + struct s2io_nic *sp = netdev_priv(dev); + struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; + + if (netif_carrier_ok(dev)) { + swstats->watchdog_timer_cnt++; + schedule_work(&sp->rst_timer_task); + swstats->soft_reset_cnt++; + } +} + +/** + * rx_osm_handler - To perform some OS related operations on SKB. + * @sp: private member of the device structure,pointer to s2io_nic structure. + * @skb : the socket buffer pointer. + * @len : length of the packet + * @cksum : FCS checksum of the frame. + * @ring_no : the ring from which this RxD was extracted. + * Description: + * This function is called by the Rx interrupt serivce routine to perform + * some OS related operations on the SKB before passing it to the upper + * layers. It mainly checks if the checksum is OK, if so adds it to the + * SKBs cksum variable, increments the Rx packet count and passes the SKB + * to the upper layer. If the checksum is wrong, it increments the Rx + * packet error count, frees the SKB and returns error. + * Return value: + * SUCCESS on success and -1 on failure. + */ +static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) +{ + struct s2io_nic *sp = ring_data->nic; + struct net_device *dev = ring_data->dev; + struct sk_buff *skb = (struct sk_buff *) + ((unsigned long)rxdp->Host_Control); + int ring_no = ring_data->ring_no; + u16 l3_csum, l4_csum; + unsigned long long err = rxdp->Control_1 & RXD_T_CODE; + struct lro *uninitialized_var(lro); + u8 err_mask; + struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; + + skb->dev = dev; + + if (err) { + /* Check for parity error */ + if (err & 0x1) + swstats->parity_err_cnt++; + + err_mask = err >> 48; + switch (err_mask) { + case 1: + swstats->rx_parity_err_cnt++; + break; + + case 2: + swstats->rx_abort_cnt++; + break; + + case 3: + swstats->rx_parity_abort_cnt++; + break; + + case 4: + swstats->rx_rda_fail_cnt++; + break; + + case 5: + swstats->rx_unkn_prot_cnt++; + break; + + case 6: + swstats->rx_fcs_err_cnt++; + break; + + case 7: + swstats->rx_buf_size_err_cnt++; + break; + + case 8: + swstats->rx_rxd_corrupt_cnt++; + break; + + case 15: + swstats->rx_unkn_err_cnt++; + break; + } + /* + * Drop the packet if bad transfer code. Exception being + * 0x5, which could be due to unsupported IPv6 extension header. + * In this case, we let stack handle the packet. + * Note that in this case, since checksum will be incorrect, + * stack will validate the same. + */ + if (err_mask != 0x5) { + DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n", + dev->name, err_mask); + dev->stats.rx_crc_errors++; + swstats->mem_freed + += skb->truesize; + dev_kfree_skb(skb); + ring_data->rx_bufs_left -= 1; + rxdp->Host_Control = 0; + return 0; + } + } + + rxdp->Host_Control = 0; + if (sp->rxd_mode == RXD_MODE_1) { + int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2); + + skb_put(skb, len); + } else if (sp->rxd_mode == RXD_MODE_3B) { + int get_block = ring_data->rx_curr_get_info.block_index; + int get_off = ring_data->rx_curr_get_info.offset; + int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2); + int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2); + unsigned char *buff = skb_push(skb, buf0_len); + + struct buffAdd *ba = &ring_data->ba[get_block][get_off]; + memcpy(buff, ba->ba_0, buf0_len); + skb_put(skb, buf2_len); + } + + if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && + ((!ring_data->lro) || + (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) && + (dev->features & NETIF_F_RXCSUM)) { + l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); + l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); + if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) { + /* + * NIC verifies if the Checksum of the received + * frame is Ok or not and accordingly returns + * a flag in the RxD. + */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + if (ring_data->lro) { + u32 tcp_len = 0; + u8 *tcp; + int ret = 0; + + ret = s2io_club_tcp_session(ring_data, + skb->data, &tcp, + &tcp_len, &lro, + rxdp, sp); + switch (ret) { + case 3: /* Begin anew */ + lro->parent = skb; + goto aggregate; + case 1: /* Aggregate */ + lro_append_pkt(sp, lro, skb, tcp_len); + goto aggregate; + case 4: /* Flush session */ + lro_append_pkt(sp, lro, skb, tcp_len); + queue_rx_frame(lro->parent, + lro->vlan_tag); + clear_lro_session(lro); + swstats->flush_max_pkts++; + goto aggregate; + case 2: /* Flush both */ + lro->parent->data_len = lro->frags_len; + swstats->sending_both++; + queue_rx_frame(lro->parent, + lro->vlan_tag); + clear_lro_session(lro); + goto send_up; + case 0: /* sessions exceeded */ + case -1: /* non-TCP or not L2 aggregatable */ + case 5: /* + * First pkt in session not + * L3/L4 aggregatable + */ + break; + default: + DBG_PRINT(ERR_DBG, + "%s: Samadhana!!\n", + __func__); + BUG(); + } + } + } else { + /* + * Packet with erroneous checksum, let the + * upper layers deal with it. + */ + skb_checksum_none_assert(skb); + } + } else + skb_checksum_none_assert(skb); + + swstats->mem_freed += skb->truesize; +send_up: + skb_record_rx_queue(skb, ring_no); + queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2)); +aggregate: + sp->mac_control.rings[ring_no].rx_bufs_left -= 1; + return SUCCESS; +} + +/** + * s2io_link - stops/starts the Tx queue. + * @sp : private member of the device structure, which is a pointer to the + * s2io_nic structure. + * @link : inidicates whether link is UP/DOWN. + * Description: + * This function stops/starts the Tx queue depending on whether the link + * status of the NIC is is down or up. This is called by the Alarm + * interrupt handler whenever a link change interrupt comes up. + * Return value: + * void. + */ + +static void s2io_link(struct s2io_nic *sp, int link) +{ + struct net_device *dev = sp->dev; + struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; + + if (link != sp->last_link_state) { + init_tti(sp, link); + if (link == LINK_DOWN) { + DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name); + s2io_stop_all_tx_queue(sp); + netif_carrier_off(dev); + if (swstats->link_up_cnt) + swstats->link_up_time = + jiffies - sp->start_time; + swstats->link_down_cnt++; + } else { + DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name); + if (swstats->link_down_cnt) + swstats->link_down_time = + jiffies - sp->start_time; + swstats->link_up_cnt++; + netif_carrier_on(dev); + s2io_wake_all_tx_queue(sp); + } + } + sp->last_link_state = link; + sp->start_time = jiffies; +} + +/** + * s2io_init_pci -Initialization of PCI and PCI-X configuration registers . + * @sp : private member of the device structure, which is a pointer to the + * s2io_nic structure. + * Description: + * This function initializes a few of the PCI and PCI-X configuration registers + * with recommended values. + * Return value: + * void + */ + +static void s2io_init_pci(struct s2io_nic *sp) +{ + u16 pci_cmd = 0, pcix_cmd = 0; + + /* Enable Data Parity Error Recovery in PCI-X command register. */ + pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, + &(pcix_cmd)); + pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, + (pcix_cmd | 1)); + pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, + &(pcix_cmd)); + + /* Set the PErr Response bit in PCI command register. */ + pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); + pci_write_config_word(sp->pdev, PCI_COMMAND, + (pci_cmd | PCI_COMMAND_PARITY)); + pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); +} + +static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type, + u8 *dev_multiq) +{ + int i; + + if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) { + DBG_PRINT(ERR_DBG, "Requested number of tx fifos " + "(%d) not supported\n", tx_fifo_num); + + if (tx_fifo_num < 1) + tx_fifo_num = 1; + else + tx_fifo_num = MAX_TX_FIFOS; + + DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num); + } + + if (multiq) + *dev_multiq = multiq; + + if (tx_steering_type && (1 == tx_fifo_num)) { + if (tx_steering_type != TX_DEFAULT_STEERING) + DBG_PRINT(ERR_DBG, + "Tx steering is not supported with " + "one fifo. Disabling Tx steering.\n"); + tx_steering_type = NO_STEERING; + } + + if ((tx_steering_type < NO_STEERING) || + (tx_steering_type > TX_DEFAULT_STEERING)) { + DBG_PRINT(ERR_DBG, + "Requested transmit steering not supported\n"); + DBG_PRINT(ERR_DBG, "Disabling transmit steering\n"); + tx_steering_type = NO_STEERING; + } + + if (rx_ring_num > MAX_RX_RINGS) { + DBG_PRINT(ERR_DBG, + "Requested number of rx rings not supported\n"); + DBG_PRINT(ERR_DBG, "Default to %d rx rings\n", + MAX_RX_RINGS); + rx_ring_num = MAX_RX_RINGS; + } + + if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) { + DBG_PRINT(ERR_DBG, "Wrong intr_type requested. " + "Defaulting to INTA\n"); + *dev_intr_type = INTA; + } + + if ((*dev_intr_type == MSI_X) && + ((pdev->device != PCI_DEVICE_ID_HERC_WIN) && + (pdev->device != PCI_DEVICE_ID_HERC_UNI))) { + DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. " + "Defaulting to INTA\n"); + *dev_intr_type = INTA; + } + + if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) { + DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n"); + DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n"); + rx_ring_mode = 1; + } + + for (i = 0; i < MAX_RX_RINGS; i++) + if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) { + DBG_PRINT(ERR_DBG, "Requested rx ring size not " + "supported\nDefaulting to %d\n", + MAX_RX_BLOCKS_PER_RING); + rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING; + } + + return SUCCESS; +} + +/** + * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS + * or Traffic class respectively. + * @nic: device private variable + * Description: The function configures the receive steering to + * desired receive ring. + * Return Value: SUCCESS on success and + * '-1' on failure (endian settings incorrect). + */ +static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring) +{ + struct XENA_dev_config __iomem *bar0 = nic->bar0; + register u64 val64 = 0; + + if (ds_codepoint > 63) + return FAILURE; + + val64 = RTS_DS_MEM_DATA(ring); + writeq(val64, &bar0->rts_ds_mem_data); + + val64 = RTS_DS_MEM_CTRL_WE | + RTS_DS_MEM_CTRL_STROBE_NEW_CMD | + RTS_DS_MEM_CTRL_OFFSET(ds_codepoint); + + writeq(val64, &bar0->rts_ds_mem_ctrl); + + return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl, + RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED, + S2IO_BIT_RESET); +} + +static const struct net_device_ops s2io_netdev_ops = { + .ndo_open = s2io_open, + .ndo_stop = s2io_close, + .ndo_get_stats = s2io_get_stats, + .ndo_start_xmit = s2io_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = s2io_set_multicast, + .ndo_do_ioctl = s2io_ioctl, + .ndo_set_mac_address = s2io_set_mac_addr, + .ndo_change_mtu = s2io_change_mtu, + .ndo_set_features = s2io_set_features, + .ndo_tx_timeout = s2io_tx_watchdog, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = s2io_netpoll, +#endif +}; + +/** + * s2io_init_nic - Initialization of the adapter . + * @pdev : structure containing the PCI related information of the device. + * @pre: List of PCI devices supported by the driver listed in s2io_tbl. + * Description: + * The function initializes an adapter identified by the pci_dec structure. + * All OS related initialization including memory and device structure and + * initlaization of the device private variable is done. Also the swapper + * control register is initialized to enable read and write into the I/O + * registers of the device. + * Return value: + * returns 0 on success and negative on failure. + */ + +static int +s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) +{ + struct s2io_nic *sp; + struct net_device *dev; + int i, j, ret; + int dma_flag = false; + u32 mac_up, mac_down; + u64 val64 = 0, tmp64 = 0; + struct XENA_dev_config __iomem *bar0 = NULL; + u16 subid; + struct config_param *config; + struct mac_info *mac_control; + int mode; + u8 dev_intr_type = intr_type; + u8 dev_multiq = 0; + + ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq); + if (ret) + return ret; + + ret = pci_enable_device(pdev); + if (ret) { + DBG_PRINT(ERR_DBG, + "%s: pci_enable_device failed\n", __func__); + return ret; + } + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__); + dma_flag = true; + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + DBG_PRINT(ERR_DBG, + "Unable to obtain 64bit DMA " + "for consistent allocations\n"); + pci_disable_device(pdev); + return -ENOMEM; + } + } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__); + } else { + pci_disable_device(pdev); + return -ENOMEM; + } + ret = pci_request_regions(pdev, s2io_driver_name); + if (ret) { + DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n", + __func__, ret); + pci_disable_device(pdev); + return -ENODEV; + } + if (dev_multiq) + dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num); + else + dev = alloc_etherdev(sizeof(struct s2io_nic)); + if (dev == NULL) { + pci_disable_device(pdev); + pci_release_regions(pdev); + return -ENODEV; + } + + pci_set_master(pdev); + pci_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + + /* Private member variable initialized to s2io NIC structure */ + sp = netdev_priv(dev); + sp->dev = dev; + sp->pdev = pdev; + sp->high_dma_flag = dma_flag; + sp->device_enabled_once = false; + if (rx_ring_mode == 1) + sp->rxd_mode = RXD_MODE_1; + if (rx_ring_mode == 2) + sp->rxd_mode = RXD_MODE_3B; + + sp->config.intr_type = dev_intr_type; + + if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) || + (pdev->device == PCI_DEVICE_ID_HERC_UNI)) + sp->device_type = XFRAME_II_DEVICE; + else + sp->device_type = XFRAME_I_DEVICE; + + + /* Initialize some PCI/PCI-X fields of the NIC. */ + s2io_init_pci(sp); + + /* + * Setting the device configuration parameters. + * Most of these parameters can be specified by the user during + * module insertion as they are module loadable parameters. If + * these parameters are not not specified during load time, they + * are initialized with default values. + */ + config = &sp->config; + mac_control = &sp->mac_control; + + config->napi = napi; + config->tx_steering_type = tx_steering_type; + + /* Tx side parameters. */ + if (config->tx_steering_type == TX_PRIORITY_STEERING) + config->tx_fifo_num = MAX_TX_FIFOS; + else + config->tx_fifo_num = tx_fifo_num; + + /* Initialize the fifos used for tx steering */ + if (config->tx_fifo_num < 5) { + if (config->tx_fifo_num == 1) + sp->total_tcp_fifos = 1; + else + sp->total_tcp_fifos = config->tx_fifo_num - 1; + sp->udp_fifo_idx = config->tx_fifo_num - 1; + sp->total_udp_fifos = 1; + sp->other_fifo_idx = sp->total_tcp_fifos - 1; + } else { + sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM - + FIFO_OTHER_MAX_NUM); + sp->udp_fifo_idx = sp->total_tcp_fifos; + sp->total_udp_fifos = FIFO_UDP_MAX_NUM; + sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM; + } + + config->multiq = dev_multiq; + for (i = 0; i < config->tx_fifo_num; i++) { + struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; + + tx_cfg->fifo_len = tx_fifo_len[i]; + tx_cfg->fifo_priority = i; + } + + /* mapping the QoS priority to the configured fifos */ + for (i = 0; i < MAX_TX_FIFOS; i++) + config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i]; + + /* map the hashing selector table to the configured fifos */ + for (i = 0; i < config->tx_fifo_num; i++) + sp->fifo_selector[i] = fifo_selector[i]; + + + config->tx_intr_type = TXD_INT_TYPE_UTILZ; + for (i = 0; i < config->tx_fifo_num; i++) { + struct tx_fifo_config *tx_cfg = &config->tx_cfg[i]; + + tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER); + if (tx_cfg->fifo_len < 65) { + config->tx_intr_type = TXD_INT_TYPE_PER_LIST; + break; + } + } + /* + 2 because one Txd for skb->data and one Txd for UFO */ + config->max_txds = MAX_SKB_FRAGS + 2; + + /* Rx side parameters. */ + config->rx_ring_num = rx_ring_num; + for (i = 0; i < config->rx_ring_num; i++) { + struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; + struct ring_info *ring = &mac_control->rings[i]; + + rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1); + rx_cfg->ring_priority = i; + ring->rx_bufs_left = 0; + ring->rxd_mode = sp->rxd_mode; + ring->rxd_count = rxd_count[sp->rxd_mode]; + ring->pdev = sp->pdev; + ring->dev = sp->dev; + } + + for (i = 0; i < rx_ring_num; i++) { + struct rx_ring_config *rx_cfg = &config->rx_cfg[i]; + + rx_cfg->ring_org = RING_ORG_BUFF1; + rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER); + } + + /* Setting Mac Control parameters */ + mac_control->rmac_pause_time = rmac_pause_time; + mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3; + mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7; + + + /* initialize the shared memory used by the NIC and the host */ + if (init_shared_mem(sp)) { + DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name); + ret = -ENOMEM; + goto mem_alloc_failed; + } + + sp->bar0 = pci_ioremap_bar(pdev, 0); + if (!sp->bar0) { + DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n", + dev->name); + ret = -ENOMEM; + goto bar0_remap_failed; + } + + sp->bar1 = pci_ioremap_bar(pdev, 2); + if (!sp->bar1) { + DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n", + dev->name); + ret = -ENOMEM; + goto bar1_remap_failed; + } + + /* Initializing the BAR1 address as the start of the FIFO pointer. */ + for (j = 0; j < MAX_TX_FIFOS; j++) { + mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000); + } + + /* Driver entry points */ + dev->netdev_ops = &s2io_netdev_ops; + dev->ethtool_ops = &netdev_ethtool_ops; + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_RXCSUM | NETIF_F_LRO; + dev->features |= dev->hw_features | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + if (sp->device_type & XFRAME_II_DEVICE) { + dev->hw_features |= NETIF_F_UFO; + if (ufo) + dev->features |= NETIF_F_UFO; + } + if (sp->high_dma_flag == true) + dev->features |= NETIF_F_HIGHDMA; + dev->watchdog_timeo = WATCH_DOG_TIMEOUT; + INIT_WORK(&sp->rst_timer_task, s2io_restart_nic); + INIT_WORK(&sp->set_link_task, s2io_set_link); + + pci_save_state(sp->pdev); + + /* Setting swapper control on the NIC, for proper reset operation */ + if (s2io_set_swapper(sp)) { + DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n", + dev->name); + ret = -EAGAIN; + goto set_swap_failed; + } + + /* Verify if the Herc works on the slot its placed into */ + if (sp->device_type & XFRAME_II_DEVICE) { + mode = s2io_verify_pci_mode(sp); + if (mode < 0) { + DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n", + __func__); + ret = -EBADSLT; + goto set_swap_failed; + } + } + + if (sp->config.intr_type == MSI_X) { + sp->num_entries = config->rx_ring_num + 1; + ret = s2io_enable_msi_x(sp); + + if (!ret) { + ret = s2io_test_msi(sp); + /* rollback MSI-X, will re-enable during add_isr() */ + remove_msix_isr(sp); + } + if (ret) { + + DBG_PRINT(ERR_DBG, + "MSI-X requested but failed to enable\n"); + sp->config.intr_type = INTA; + } + } + + if (config->intr_type == MSI_X) { + for (i = 0; i < config->rx_ring_num ; i++) { + struct ring_info *ring = &mac_control->rings[i]; + + netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64); + } + } else { + netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64); + } + + /* Not needed for Herc */ + if (sp->device_type & XFRAME_I_DEVICE) { + /* + * Fix for all "FFs" MAC address problems observed on + * Alpha platforms + */ + fix_mac_address(sp); + s2io_reset(sp); + } + + /* + * MAC address initialization. + * For now only one mac address will be read and used. + */ + bar0 = sp->bar0; + val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | + RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET); + writeq(val64, &bar0->rmac_addr_cmd_mem); + wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, + RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, + S2IO_BIT_RESET); + tmp64 = readq(&bar0->rmac_addr_data0_mem); + mac_down = (u32)tmp64; + mac_up = (u32) (tmp64 >> 32); + + sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up); + sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8); + sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16); + sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24); + sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16); + sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24); + + /* Set the factory defined MAC address initially */ + dev->addr_len = ETH_ALEN; + memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); + + /* initialize number of multicast & unicast MAC entries variables */ + if (sp->device_type == XFRAME_I_DEVICE) { + config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES; + config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES; + config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET; + } else if (sp->device_type == XFRAME_II_DEVICE) { + config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES; + config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES; + config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET; + } + + /* store mac addresses from CAM to s2io_nic structure */ + do_s2io_store_unicast_mc(sp); + + /* Configure MSIX vector for number of rings configured plus one */ + if ((sp->device_type == XFRAME_II_DEVICE) && + (config->intr_type == MSI_X)) + sp->num_entries = config->rx_ring_num + 1; + + /* Store the values of the MSIX table in the s2io_nic structure */ + store_xmsi_data(sp); + /* reset Nic and bring it to known state */ + s2io_reset(sp); + + /* + * Initialize link state flags + * and the card state parameter + */ + sp->state = 0; + + /* Initialize spinlocks */ + for (i = 0; i < sp->config.tx_fifo_num; i++) { + struct fifo_info *fifo = &mac_control->fifos[i]; + + spin_lock_init(&fifo->tx_lock); + } + + /* + * SXE-002: Configure link and activity LED to init state + * on driver load. + */ + subid = sp->pdev->subsystem_device; + if ((subid & 0xFF) >= 0x07) { + val64 = readq(&bar0->gpio_control); + val64 |= 0x0000800000000000ULL; + writeq(val64, &bar0->gpio_control); + val64 = 0x0411040400000000ULL; + writeq(val64, (void __iomem *)bar0 + 0x2700); + val64 = readq(&bar0->gpio_control); + } + + sp->rx_csum = 1; /* Rx chksum verify enabled by default */ + + if (register_netdev(dev)) { + DBG_PRINT(ERR_DBG, "Device registration failed\n"); + ret = -ENODEV; + goto register_failed; + } + s2io_vpd_read(sp); + DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n"); + DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name, + sp->product_name, pdev->revision); + DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, + s2io_driver_version); + DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr); + DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num); + if (sp->device_type & XFRAME_II_DEVICE) { + mode = s2io_print_pci_mode(sp); + if (mode < 0) { + ret = -EBADSLT; + unregister_netdev(dev); + goto set_swap_failed; + } + } + switch (sp->rxd_mode) { + case RXD_MODE_1: + DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n", + dev->name); + break; + case RXD_MODE_3B: + DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n", + dev->name); + break; + } + + switch (sp->config.napi) { + case 0: + DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name); + break; + case 1: + DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); + break; + } + + DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, + sp->config.tx_fifo_num); + + DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name, + sp->config.rx_ring_num); + + switch (sp->config.intr_type) { + case INTA: + DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); + break; + case MSI_X: + DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name); + break; + } + if (sp->config.multiq) { + for (i = 0; i < sp->config.tx_fifo_num; i++) { + struct fifo_info *fifo = &mac_control->fifos[i]; + + fifo->multiq = config->multiq; + } + DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n", + dev->name); + } else + DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n", + dev->name); + + switch (sp->config.tx_steering_type) { + case NO_STEERING: + DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n", + dev->name); + break; + case TX_PRIORITY_STEERING: + DBG_PRINT(ERR_DBG, + "%s: Priority steering enabled for transmit\n", + dev->name); + break; + case TX_DEFAULT_STEERING: + DBG_PRINT(ERR_DBG, + "%s: Default steering enabled for transmit\n", + dev->name); + } + + DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", + dev->name); + if (ufo) + DBG_PRINT(ERR_DBG, + "%s: UDP Fragmentation Offload(UFO) enabled\n", + dev->name); + /* Initialize device name */ + snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name, + sp->product_name); + + if (vlan_tag_strip) + sp->vlan_strip_flag = 1; + else + sp->vlan_strip_flag = 0; + + /* + * Make Link state as off at this point, when the Link change + * interrupt comes the state will be automatically changed to + * the right state. + */ + netif_carrier_off(dev); + + return 0; + +register_failed: +set_swap_failed: + iounmap(sp->bar1); +bar1_remap_failed: + iounmap(sp->bar0); +bar0_remap_failed: +mem_alloc_failed: + free_shared_mem(sp); + pci_disable_device(pdev); + pci_release_regions(pdev); + free_netdev(dev); + + return ret; +} + +/** + * s2io_rem_nic - Free the PCI device + * @pdev: structure containing the PCI related information of the device. + * Description: This function is called by the Pci subsystem to release a + * PCI device and free up all resource held up by the device. This could + * be in response to a Hot plug event or when the driver is to be removed + * from memory. + */ + +static void s2io_rem_nic(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct s2io_nic *sp; + + if (dev == NULL) { + DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n"); + return; + } + + sp = netdev_priv(dev); + + cancel_work_sync(&sp->rst_timer_task); + cancel_work_sync(&sp->set_link_task); + + unregister_netdev(dev); + + free_shared_mem(sp); + iounmap(sp->bar0); + iounmap(sp->bar1); + pci_release_regions(pdev); + free_netdev(dev); + pci_disable_device(pdev); +} + +/** + * s2io_starter - Entry point for the driver + * Description: This function is the entry point for the driver. It verifies + * the module loadable parameters and initializes PCI configuration space. + */ + +static int __init s2io_starter(void) +{ + return pci_register_driver(&s2io_driver); +} + +/** + * s2io_closer - Cleanup routine for the driver + * Description: This function is the cleanup routine for the driver. It + * unregisters the driver. + */ + +static __exit void s2io_closer(void) +{ + pci_unregister_driver(&s2io_driver); + DBG_PRINT(INIT_DBG, "cleanup done\n"); +} + +module_init(s2io_starter); +module_exit(s2io_closer); + +static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip, + struct tcphdr **tcp, struct RxD_t *rxdp, + struct s2io_nic *sp) +{ + int ip_off; + u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len; + + if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) { + DBG_PRINT(INIT_DBG, + "%s: Non-TCP frames not supported for LRO\n", + __func__); + return -1; + } + + /* Checking for DIX type or DIX type with VLAN */ + if ((l2_type == 0) || (l2_type == 4)) { + ip_off = HEADER_ETHERNET_II_802_3_SIZE; + /* + * If vlan stripping is disabled and the frame is VLAN tagged, + * shift the offset by the VLAN header size bytes. + */ + if ((!sp->vlan_strip_flag) && + (rxdp->Control_1 & RXD_FRAME_VLAN_TAG)) + ip_off += HEADER_VLAN_SIZE; + } else { + /* LLC, SNAP etc are considered non-mergeable */ + return -1; + } + + *ip = (struct iphdr *)(buffer + ip_off); + ip_len = (u8)((*ip)->ihl); + ip_len <<= 2; + *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len); + + return 0; +} + +static int check_for_socket_match(struct lro *lro, struct iphdr *ip, + struct tcphdr *tcp) +{ + DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__); + if ((lro->iph->saddr != ip->saddr) || + (lro->iph->daddr != ip->daddr) || + (lro->tcph->source != tcp->source) || + (lro->tcph->dest != tcp->dest)) + return -1; + return 0; +} + +static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp) +{ + return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2); +} + +static void initiate_new_session(struct lro *lro, u8 *l2h, + struct iphdr *ip, struct tcphdr *tcp, + u32 tcp_pyld_len, u16 vlan_tag) +{ + DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__); + lro->l2h = l2h; + lro->iph = ip; + lro->tcph = tcp; + lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq); + lro->tcp_ack = tcp->ack_seq; + lro->sg_num = 1; + lro->total_len = ntohs(ip->tot_len); + lro->frags_len = 0; + lro->vlan_tag = vlan_tag; + /* + * Check if we saw TCP timestamp. + * Other consistency checks have already been done. + */ + if (tcp->doff == 8) { + __be32 *ptr; + ptr = (__be32 *)(tcp+1); + lro->saw_ts = 1; + lro->cur_tsval = ntohl(*(ptr+1)); + lro->cur_tsecr = *(ptr+2); + } + lro->in_use = 1; +} + +static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro) +{ + struct iphdr *ip = lro->iph; + struct tcphdr *tcp = lro->tcph; + struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; + + DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__); + + /* Update L3 header */ + csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len)); + ip->tot_len = htons(lro->total_len); + + /* Update L4 header */ + tcp->ack_seq = lro->tcp_ack; + tcp->window = lro->window; + + /* Update tsecr field if this session has timestamps enabled */ + if (lro->saw_ts) { + __be32 *ptr = (__be32 *)(tcp + 1); + *(ptr+2) = lro->cur_tsecr; + } + + /* Update counters required for calculation of + * average no. of packets aggregated. + */ + swstats->sum_avg_pkts_aggregated += lro->sg_num; + swstats->num_aggregations++; +} + +static void aggregate_new_rx(struct lro *lro, struct iphdr *ip, + struct tcphdr *tcp, u32 l4_pyld) +{ + DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__); + lro->total_len += l4_pyld; + lro->frags_len += l4_pyld; + lro->tcp_next_seq += l4_pyld; + lro->sg_num++; + + /* Update ack seq no. and window ad(from this pkt) in LRO object */ + lro->tcp_ack = tcp->ack_seq; + lro->window = tcp->window; + + if (lro->saw_ts) { + __be32 *ptr; + /* Update tsecr and tsval from this packet */ + ptr = (__be32 *)(tcp+1); + lro->cur_tsval = ntohl(*(ptr+1)); + lro->cur_tsecr = *(ptr + 2); + } +} + +static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip, + struct tcphdr *tcp, u32 tcp_pyld_len) +{ + u8 *ptr; + + DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__); + + if (!tcp_pyld_len) { + /* Runt frame or a pure ack */ + return -1; + } + + if (ip->ihl != 5) /* IP has options */ + return -1; + + /* If we see CE codepoint in IP header, packet is not mergeable */ + if (INET_ECN_is_ce(ipv4_get_dsfield(ip))) + return -1; + + /* If we see ECE or CWR flags in TCP header, packet is not mergeable */ + if (tcp->urg || tcp->psh || tcp->rst || + tcp->syn || tcp->fin || + tcp->ece || tcp->cwr || !tcp->ack) { + /* + * Currently recognize only the ack control word and + * any other control field being set would result in + * flushing the LRO session + */ + return -1; + } + + /* + * Allow only one TCP timestamp option. Don't aggregate if + * any other options are detected. + */ + if (tcp->doff != 5 && tcp->doff != 8) + return -1; + + if (tcp->doff == 8) { + ptr = (u8 *)(tcp + 1); + while (*ptr == TCPOPT_NOP) + ptr++; + if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP) + return -1; + + /* Ensure timestamp value increases monotonically */ + if (l_lro) + if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2)))) + return -1; + + /* timestamp echo reply should be non-zero */ + if (*((__be32 *)(ptr+6)) == 0) + return -1; + } + + return 0; +} + +static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, + u8 **tcp, u32 *tcp_len, struct lro **lro, + struct RxD_t *rxdp, struct s2io_nic *sp) +{ + struct iphdr *ip; + struct tcphdr *tcph; + int ret = 0, i; + u16 vlan_tag = 0; + struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; + + ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp, + rxdp, sp); + if (ret) + return ret; + + DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr); + + vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2); + tcph = (struct tcphdr *)*tcp; + *tcp_len = get_l4_pyld_length(ip, tcph); + for (i = 0; i < MAX_LRO_SESSIONS; i++) { + struct lro *l_lro = &ring_data->lro0_n[i]; + if (l_lro->in_use) { + if (check_for_socket_match(l_lro, ip, tcph)) + continue; + /* Sock pair matched */ + *lro = l_lro; + + if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) { + DBG_PRINT(INFO_DBG, "%s: Out of sequence. " + "expected 0x%x, actual 0x%x\n", + __func__, + (*lro)->tcp_next_seq, + ntohl(tcph->seq)); + + swstats->outof_sequence_pkts++; + ret = 2; + break; + } + + if (!verify_l3_l4_lro_capable(l_lro, ip, tcph, + *tcp_len)) + ret = 1; /* Aggregate */ + else + ret = 2; /* Flush both */ + break; + } + } + + if (ret == 0) { + /* Before searching for available LRO objects, + * check if the pkt is L3/L4 aggregatable. If not + * don't create new LRO session. Just send this + * packet up. + */ + if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) + return 5; + + for (i = 0; i < MAX_LRO_SESSIONS; i++) { + struct lro *l_lro = &ring_data->lro0_n[i]; + if (!(l_lro->in_use)) { + *lro = l_lro; + ret = 3; /* Begin anew */ + break; + } + } + } + + if (ret == 0) { /* sessions exceeded */ + DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n", + __func__); + *lro = NULL; + return ret; + } + + switch (ret) { + case 3: + initiate_new_session(*lro, buffer, ip, tcph, *tcp_len, + vlan_tag); + break; + case 2: + update_L3L4_header(sp, *lro); + break; + case 1: + aggregate_new_rx(*lro, ip, tcph, *tcp_len); + if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) { + update_L3L4_header(sp, *lro); + ret = 4; /* Flush the LRO */ + } + break; + default: + DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__); + break; + } + + return ret; +} + +static void clear_lro_session(struct lro *lro) +{ + static u16 lro_struct_size = sizeof(struct lro); + + memset(lro, 0, lro_struct_size); +} + +static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag) +{ + struct net_device *dev = skb->dev; + struct s2io_nic *sp = netdev_priv(dev); + + skb->protocol = eth_type_trans(skb, dev); + if (vlan_tag && sp->vlan_strip_flag) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + if (sp->config.napi) + netif_receive_skb(skb); + else + netif_rx(skb); +} + +static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro, + struct sk_buff *skb, u32 tcp_len) +{ + struct sk_buff *first = lro->parent; + struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; + + first->len += tcp_len; + first->data_len = lro->frags_len; + skb_pull(skb, (skb->len - tcp_len)); + if (skb_shinfo(first)->frag_list) + lro->last_frag->next = skb; + else + skb_shinfo(first)->frag_list = skb; + first->truesize += skb->truesize; + lro->last_frag = skb; + swstats->clubbed_frms_cnt++; +} + +/** + * s2io_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct s2io_nic *sp = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) { + /* Bring down the card, while avoiding PCI I/O */ + do_s2io_card_down(sp, 0); + } + pci_disable_device(pdev); + + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * s2io_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + * At this point, the card has exprienced a hard reset, + * followed by fixups by BIOS, and has its config space + * set up identically to what it was at cold boot. + */ +static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct s2io_nic *sp = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + pr_err("Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_set_master(pdev); + s2io_reset(sp); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * s2io_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells + * us that its OK to resume normal operation. + */ +static void s2io_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct s2io_nic *sp = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (s2io_card_up(sp)) { + pr_err("Can't bring device back up after reset.\n"); + return; + } + + if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) { + s2io_card_down(sp); + pr_err("Can't restore mac addr after reset.\n"); + return; + } + } + + netif_device_attach(netdev); + netif_tx_wake_all_queues(netdev); +} diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h new file mode 100644 index 000000000..d89b6ed82 --- /dev/null +++ b/drivers/net/ethernet/neterion/s2io.h @@ -0,0 +1,1147 @@ +/************************************************************************ + * s2io.h: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC + * Copyright(c) 2002-2010 Exar Corp. + + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + ************************************************************************/ +#ifndef _S2IO_H +#define _S2IO_H + +#define TBD 0 +#define s2BIT(loc) (0x8000000000000000ULL >> (loc)) +#define vBIT(val, loc, sz) (((u64)val) << (64-loc-sz)) +#define INV(d) ((d&0xff)<<24) | (((d>>8)&0xff)<<16) | (((d>>16)&0xff)<<8)| ((d>>24)&0xff) + +#undef SUCCESS +#define SUCCESS 0 +#define FAILURE -1 +#define S2IO_MINUS_ONE 0xFFFFFFFFFFFFFFFFULL +#define S2IO_DISABLE_MAC_ENTRY 0xFFFFFFFFFFFFULL +#define S2IO_MAX_PCI_CONFIG_SPACE_REINIT 100 +#define S2IO_BIT_RESET 1 +#define S2IO_BIT_SET 2 +#define CHECKBIT(value, nbit) (value & (1 << nbit)) + +/* Maximum time to flicker LED when asked to identify NIC using ethtool */ +#define MAX_FLICKER_TIME 60000 /* 60 Secs */ + +/* Maximum outstanding splits to be configured into xena. */ +enum { + XENA_ONE_SPLIT_TRANSACTION = 0, + XENA_TWO_SPLIT_TRANSACTION = 1, + XENA_THREE_SPLIT_TRANSACTION = 2, + XENA_FOUR_SPLIT_TRANSACTION = 3, + XENA_EIGHT_SPLIT_TRANSACTION = 4, + XENA_TWELVE_SPLIT_TRANSACTION = 5, + XENA_SIXTEEN_SPLIT_TRANSACTION = 6, + XENA_THIRTYTWO_SPLIT_TRANSACTION = 7 +}; +#define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4) + +/* OS concerned variables and constants */ +#define WATCH_DOG_TIMEOUT 15*HZ +#define EFILL 0x1234 +#define ALIGN_SIZE 127 +#define PCIX_COMMAND_REGISTER 0x62 + +/* + * Debug related variables. + */ +/* different debug levels. */ +#define ERR_DBG 0 +#define INIT_DBG 1 +#define INFO_DBG 2 +#define TX_DBG 3 +#define INTR_DBG 4 + +/* Global variable that defines the present debug level of the driver. */ +static int debug_level = ERR_DBG; + +/* DEBUG message print. */ +#define DBG_PRINT(dbg_level, fmt, args...) do { \ + if (dbg_level <= debug_level) \ + pr_info(fmt, ##args); \ + } while (0) + +/* Protocol assist features of the NIC */ +#define L3_CKSUM_OK 0xFFFF +#define L4_CKSUM_OK 0xFFFF +#define S2IO_JUMBO_SIZE 9600 + +/* Driver statistics maintained by driver */ +struct swStat { + unsigned long long single_ecc_errs; + unsigned long long double_ecc_errs; + unsigned long long parity_err_cnt; + unsigned long long serious_err_cnt; + unsigned long long soft_reset_cnt; + unsigned long long fifo_full_cnt; + unsigned long long ring_full_cnt[8]; + /* LRO statistics */ + unsigned long long clubbed_frms_cnt; + unsigned long long sending_both; + unsigned long long outof_sequence_pkts; + unsigned long long flush_max_pkts; + unsigned long long sum_avg_pkts_aggregated; + unsigned long long num_aggregations; + /* Other statistics */ + unsigned long long mem_alloc_fail_cnt; + unsigned long long pci_map_fail_cnt; + unsigned long long watchdog_timer_cnt; + unsigned long long mem_allocated; + unsigned long long mem_freed; + unsigned long long link_up_cnt; + unsigned long long link_down_cnt; + unsigned long long link_up_time; + unsigned long long link_down_time; + + /* Transfer Code statistics */ + unsigned long long tx_buf_abort_cnt; + unsigned long long tx_desc_abort_cnt; + unsigned long long tx_parity_err_cnt; + unsigned long long tx_link_loss_cnt; + unsigned long long tx_list_proc_err_cnt; + + unsigned long long rx_parity_err_cnt; + unsigned long long rx_abort_cnt; + unsigned long long rx_parity_abort_cnt; + unsigned long long rx_rda_fail_cnt; + unsigned long long rx_unkn_prot_cnt; + unsigned long long rx_fcs_err_cnt; + unsigned long long rx_buf_size_err_cnt; + unsigned long long rx_rxd_corrupt_cnt; + unsigned long long rx_unkn_err_cnt; + + /* Error/alarm statistics*/ + unsigned long long tda_err_cnt; + unsigned long long pfc_err_cnt; + unsigned long long pcc_err_cnt; + unsigned long long tti_err_cnt; + unsigned long long lso_err_cnt; + unsigned long long tpa_err_cnt; + unsigned long long sm_err_cnt; + unsigned long long mac_tmac_err_cnt; + unsigned long long mac_rmac_err_cnt; + unsigned long long xgxs_txgxs_err_cnt; + unsigned long long xgxs_rxgxs_err_cnt; + unsigned long long rc_err_cnt; + unsigned long long prc_pcix_err_cnt; + unsigned long long rpa_err_cnt; + unsigned long long rda_err_cnt; + unsigned long long rti_err_cnt; + unsigned long long mc_err_cnt; + +}; + +/* Xpak releated alarm and warnings */ +struct xpakStat { + u64 alarm_transceiver_temp_high; + u64 alarm_transceiver_temp_low; + u64 alarm_laser_bias_current_high; + u64 alarm_laser_bias_current_low; + u64 alarm_laser_output_power_high; + u64 alarm_laser_output_power_low; + u64 warn_transceiver_temp_high; + u64 warn_transceiver_temp_low; + u64 warn_laser_bias_current_high; + u64 warn_laser_bias_current_low; + u64 warn_laser_output_power_high; + u64 warn_laser_output_power_low; + u64 xpak_regs_stat; + u32 xpak_timer_count; +}; + + +/* The statistics block of Xena */ +struct stat_block { +/* Tx MAC statistics counters. */ + __le32 tmac_data_octets; + __le32 tmac_frms; + __le64 tmac_drop_frms; + __le32 tmac_bcst_frms; + __le32 tmac_mcst_frms; + __le64 tmac_pause_ctrl_frms; + __le32 tmac_ucst_frms; + __le32 tmac_ttl_octets; + __le32 tmac_any_err_frms; + __le32 tmac_nucst_frms; + __le64 tmac_ttl_less_fb_octets; + __le64 tmac_vld_ip_octets; + __le32 tmac_drop_ip; + __le32 tmac_vld_ip; + __le32 tmac_rst_tcp; + __le32 tmac_icmp; + __le64 tmac_tcp; + __le32 reserved_0; + __le32 tmac_udp; + +/* Rx MAC Statistics counters. */ + __le32 rmac_data_octets; + __le32 rmac_vld_frms; + __le64 rmac_fcs_err_frms; + __le64 rmac_drop_frms; + __le32 rmac_vld_bcst_frms; + __le32 rmac_vld_mcst_frms; + __le32 rmac_out_rng_len_err_frms; + __le32 rmac_in_rng_len_err_frms; + __le64 rmac_long_frms; + __le64 rmac_pause_ctrl_frms; + __le64 rmac_unsup_ctrl_frms; + __le32 rmac_accepted_ucst_frms; + __le32 rmac_ttl_octets; + __le32 rmac_discarded_frms; + __le32 rmac_accepted_nucst_frms; + __le32 reserved_1; + __le32 rmac_drop_events; + __le64 rmac_ttl_less_fb_octets; + __le64 rmac_ttl_frms; + __le64 reserved_2; + __le32 rmac_usized_frms; + __le32 reserved_3; + __le32 rmac_frag_frms; + __le32 rmac_osized_frms; + __le32 reserved_4; + __le32 rmac_jabber_frms; + __le64 rmac_ttl_64_frms; + __le64 rmac_ttl_65_127_frms; + __le64 reserved_5; + __le64 rmac_ttl_128_255_frms; + __le64 rmac_ttl_256_511_frms; + __le64 reserved_6; + __le64 rmac_ttl_512_1023_frms; + __le64 rmac_ttl_1024_1518_frms; + __le32 rmac_ip; + __le32 reserved_7; + __le64 rmac_ip_octets; + __le32 rmac_drop_ip; + __le32 rmac_hdr_err_ip; + __le32 reserved_8; + __le32 rmac_icmp; + __le64 rmac_tcp; + __le32 rmac_err_drp_udp; + __le32 rmac_udp; + __le64 rmac_xgmii_err_sym; + __le64 rmac_frms_q0; + __le64 rmac_frms_q1; + __le64 rmac_frms_q2; + __le64 rmac_frms_q3; + __le64 rmac_frms_q4; + __le64 rmac_frms_q5; + __le64 rmac_frms_q6; + __le64 rmac_frms_q7; + __le16 rmac_full_q3; + __le16 rmac_full_q2; + __le16 rmac_full_q1; + __le16 rmac_full_q0; + __le16 rmac_full_q7; + __le16 rmac_full_q6; + __le16 rmac_full_q5; + __le16 rmac_full_q4; + __le32 reserved_9; + __le32 rmac_pause_cnt; + __le64 rmac_xgmii_data_err_cnt; + __le64 rmac_xgmii_ctrl_err_cnt; + __le32 rmac_err_tcp; + __le32 rmac_accepted_ip; + +/* PCI/PCI-X Read transaction statistics. */ + __le32 new_rd_req_cnt; + __le32 rd_req_cnt; + __le32 rd_rtry_cnt; + __le32 new_rd_req_rtry_cnt; + +/* PCI/PCI-X Write/Read transaction statistics. */ + __le32 wr_req_cnt; + __le32 wr_rtry_rd_ack_cnt; + __le32 new_wr_req_rtry_cnt; + __le32 new_wr_req_cnt; + __le32 wr_disc_cnt; + __le32 wr_rtry_cnt; + +/* PCI/PCI-X Write / DMA Transaction statistics. */ + __le32 txp_wr_cnt; + __le32 rd_rtry_wr_ack_cnt; + __le32 txd_wr_cnt; + __le32 txd_rd_cnt; + __le32 rxd_wr_cnt; + __le32 rxd_rd_cnt; + __le32 rxf_wr_cnt; + __le32 txf_rd_cnt; + +/* Tx MAC statistics overflow counters. */ + __le32 tmac_data_octets_oflow; + __le32 tmac_frms_oflow; + __le32 tmac_bcst_frms_oflow; + __le32 tmac_mcst_frms_oflow; + __le32 tmac_ucst_frms_oflow; + __le32 tmac_ttl_octets_oflow; + __le32 tmac_any_err_frms_oflow; + __le32 tmac_nucst_frms_oflow; + __le64 tmac_vlan_frms; + __le32 tmac_drop_ip_oflow; + __le32 tmac_vld_ip_oflow; + __le32 tmac_rst_tcp_oflow; + __le32 tmac_icmp_oflow; + __le32 tpa_unknown_protocol; + __le32 tmac_udp_oflow; + __le32 reserved_10; + __le32 tpa_parse_failure; + +/* Rx MAC Statistics overflow counters. */ + __le32 rmac_data_octets_oflow; + __le32 rmac_vld_frms_oflow; + __le32 rmac_vld_bcst_frms_oflow; + __le32 rmac_vld_mcst_frms_oflow; + __le32 rmac_accepted_ucst_frms_oflow; + __le32 rmac_ttl_octets_oflow; + __le32 rmac_discarded_frms_oflow; + __le32 rmac_accepted_nucst_frms_oflow; + __le32 rmac_usized_frms_oflow; + __le32 rmac_drop_events_oflow; + __le32 rmac_frag_frms_oflow; + __le32 rmac_osized_frms_oflow; + __le32 rmac_ip_oflow; + __le32 rmac_jabber_frms_oflow; + __le32 rmac_icmp_oflow; + __le32 rmac_drop_ip_oflow; + __le32 rmac_err_drp_udp_oflow; + __le32 rmac_udp_oflow; + __le32 reserved_11; + __le32 rmac_pause_cnt_oflow; + __le64 rmac_ttl_1519_4095_frms; + __le64 rmac_ttl_4096_8191_frms; + __le64 rmac_ttl_8192_max_frms; + __le64 rmac_ttl_gt_max_frms; + __le64 rmac_osized_alt_frms; + __le64 rmac_jabber_alt_frms; + __le64 rmac_gt_max_alt_frms; + __le64 rmac_vlan_frms; + __le32 rmac_len_discard; + __le32 rmac_fcs_discard; + __le32 rmac_pf_discard; + __le32 rmac_da_discard; + __le32 rmac_red_discard; + __le32 rmac_rts_discard; + __le32 reserved_12; + __le32 rmac_ingm_full_discard; + __le32 reserved_13; + __le32 rmac_accepted_ip_oflow; + __le32 reserved_14; + __le32 link_fault_cnt; + u8 buffer[20]; + struct swStat sw_stat; + struct xpakStat xpak_stat; +}; + +/* Default value for 'vlan_strip_tag' configuration parameter */ +#define NO_STRIP_IN_PROMISC 2 + +/* + * Structures representing different init time configuration + * parameters of the NIC. + */ + +#define MAX_TX_FIFOS 8 +#define MAX_RX_RINGS 8 + +#define FIFO_DEFAULT_NUM 5 +#define FIFO_UDP_MAX_NUM 2 /* 0 - even, 1 -odd ports */ +#define FIFO_OTHER_MAX_NUM 1 + + +#define MAX_RX_DESC_1 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 128) +#define MAX_RX_DESC_2 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 86) +#define MAX_TX_DESC (MAX_AVAILABLE_TXDS) + +/* FIFO mappings for all possible number of fifos configured */ +static const int fifo_map[][MAX_TX_FIFOS] = { + {0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 1, 1, 1, 1}, + {0, 0, 0, 1, 1, 1, 2, 2}, + {0, 0, 1, 1, 2, 2, 3, 3}, + {0, 0, 1, 1, 2, 2, 3, 4}, + {0, 0, 1, 1, 2, 3, 4, 5}, + {0, 0, 1, 2, 3, 4, 5, 6}, + {0, 1, 2, 3, 4, 5, 6, 7}, +}; + +static const u16 fifo_selector[MAX_TX_FIFOS] = {0, 1, 3, 3, 7, 7, 7, 7}; + +/* Maintains Per FIFO related information. */ +struct tx_fifo_config { +#define MAX_AVAILABLE_TXDS 8192 + u32 fifo_len; /* specifies len of FIFO up to 8192, ie no of TxDLs */ +/* Priority definition */ +#define TX_FIFO_PRI_0 0 /*Highest */ +#define TX_FIFO_PRI_1 1 +#define TX_FIFO_PRI_2 2 +#define TX_FIFO_PRI_3 3 +#define TX_FIFO_PRI_4 4 +#define TX_FIFO_PRI_5 5 +#define TX_FIFO_PRI_6 6 +#define TX_FIFO_PRI_7 7 /*lowest */ + u8 fifo_priority; /* specifies pointer level for FIFO */ + /* user should not set twos fifos with same pri */ + u8 f_no_snoop; +#define NO_SNOOP_TXD 0x01 +#define NO_SNOOP_TXD_BUFFER 0x02 +}; + + +/* Maintains per Ring related information */ +struct rx_ring_config { + u32 num_rxd; /*No of RxDs per Rx Ring */ +#define RX_RING_PRI_0 0 /* highest */ +#define RX_RING_PRI_1 1 +#define RX_RING_PRI_2 2 +#define RX_RING_PRI_3 3 +#define RX_RING_PRI_4 4 +#define RX_RING_PRI_5 5 +#define RX_RING_PRI_6 6 +#define RX_RING_PRI_7 7 /* lowest */ + + u8 ring_priority; /*Specifies service priority of ring */ + /* OSM should not set any two rings with same priority */ + u8 ring_org; /*Organization of ring */ +#define RING_ORG_BUFF1 0x01 +#define RX_RING_ORG_BUFF3 0x03 +#define RX_RING_ORG_BUFF5 0x05 + + u8 f_no_snoop; +#define NO_SNOOP_RXD 0x01 +#define NO_SNOOP_RXD_BUFFER 0x02 +}; + +/* This structure provides contains values of the tunable parameters + * of the H/W + */ +struct config_param { +/* Tx Side */ + u32 tx_fifo_num; /*Number of Tx FIFOs */ + + /* 0-No steering, 1-Priority steering, 2-Default fifo map */ +#define NO_STEERING 0 +#define TX_PRIORITY_STEERING 0x1 +#define TX_DEFAULT_STEERING 0x2 + u8 tx_steering_type; + + u8 fifo_mapping[MAX_TX_FIFOS]; + struct tx_fifo_config tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */ + u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */ + u64 tx_intr_type; +#define INTA 0 +#define MSI_X 2 + u8 intr_type; + u8 napi; + + /* Specifies if Tx Intr is UTILZ or PER_LIST type. */ + +/* Rx Side */ + u32 rx_ring_num; /*Number of receive rings */ +#define MAX_RX_BLOCKS_PER_RING 150 + + struct rx_ring_config rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */ + +#define HEADER_ETHERNET_II_802_3_SIZE 14 +#define HEADER_802_2_SIZE 3 +#define HEADER_SNAP_SIZE 5 +#define HEADER_VLAN_SIZE 4 + +#define MIN_MTU 46 +#define MAX_PYLD 1500 +#define MAX_MTU (MAX_PYLD+18) +#define MAX_MTU_VLAN (MAX_PYLD+22) +#define MAX_PYLD_JUMBO 9600 +#define MAX_MTU_JUMBO (MAX_PYLD_JUMBO+18) +#define MAX_MTU_JUMBO_VLAN (MAX_PYLD_JUMBO+22) + u16 bus_speed; + int max_mc_addr; /* xena=64 herc=256 */ + int max_mac_addr; /* xena=16 herc=64 */ + int mc_start_offset; /* xena=16 herc=64 */ + u8 multiq; +}; + +/* Structure representing MAC Addrs */ +struct mac_addr { + u8 mac_addr[ETH_ALEN]; +}; + +/* Structure that represent every FIFO element in the BAR1 + * Address location. + */ +struct TxFIFO_element { + u64 TxDL_Pointer; + + u64 List_Control; +#define TX_FIFO_LAST_TXD_NUM( val) vBIT(val,0,8) +#define TX_FIFO_FIRST_LIST s2BIT(14) +#define TX_FIFO_LAST_LIST s2BIT(15) +#define TX_FIFO_FIRSTNLAST_LIST vBIT(3,14,2) +#define TX_FIFO_SPECIAL_FUNC s2BIT(23) +#define TX_FIFO_DS_NO_SNOOP s2BIT(31) +#define TX_FIFO_BUFF_NO_SNOOP s2BIT(30) +}; + +/* Tx descriptor structure */ +struct TxD { + u64 Control_1; +/* bit mask */ +#define TXD_LIST_OWN_XENA s2BIT(7) +#define TXD_T_CODE (s2BIT(12)|s2BIT(13)|s2BIT(14)|s2BIT(15)) +#define TXD_T_CODE_OK(val) (|(val & TXD_T_CODE)) +#define GET_TXD_T_CODE(val) ((val & TXD_T_CODE)<<12) +#define TXD_GATHER_CODE (s2BIT(22) | s2BIT(23)) +#define TXD_GATHER_CODE_FIRST s2BIT(22) +#define TXD_GATHER_CODE_LAST s2BIT(23) +#define TXD_TCP_LSO_EN s2BIT(30) +#define TXD_UDP_COF_EN s2BIT(31) +#define TXD_UFO_EN s2BIT(31) | s2BIT(30) +#define TXD_TCP_LSO_MSS(val) vBIT(val,34,14) +#define TXD_UFO_MSS(val) vBIT(val,34,14) +#define TXD_BUFFER0_SIZE(val) vBIT(val,48,16) + + u64 Control_2; +#define TXD_TX_CKO_CONTROL (s2BIT(5)|s2BIT(6)|s2BIT(7)) +#define TXD_TX_CKO_IPV4_EN s2BIT(5) +#define TXD_TX_CKO_TCP_EN s2BIT(6) +#define TXD_TX_CKO_UDP_EN s2BIT(7) +#define TXD_VLAN_ENABLE s2BIT(15) +#define TXD_VLAN_TAG(val) vBIT(val,16,16) +#define TXD_INT_NUMBER(val) vBIT(val,34,6) +#define TXD_INT_TYPE_PER_LIST s2BIT(47) +#define TXD_INT_TYPE_UTILZ s2BIT(46) +#define TXD_SET_MARKER vBIT(0x6,0,4) + + u64 Buffer_Pointer; + u64 Host_Control; /* reserved for host */ +}; + +/* Structure to hold the phy and virt addr of every TxDL. */ +struct list_info_hold { + dma_addr_t list_phy_addr; + void *list_virt_addr; +}; + +/* Rx descriptor structure for 1 buffer mode */ +struct RxD_t { + u64 Host_Control; /* reserved for host */ + u64 Control_1; +#define RXD_OWN_XENA s2BIT(7) +#define RXD_T_CODE (s2BIT(12)|s2BIT(13)|s2BIT(14)|s2BIT(15)) +#define RXD_FRAME_PROTO vBIT(0xFFFF,24,8) +#define RXD_FRAME_VLAN_TAG s2BIT(24) +#define RXD_FRAME_PROTO_IPV4 s2BIT(27) +#define RXD_FRAME_PROTO_IPV6 s2BIT(28) +#define RXD_FRAME_IP_FRAG s2BIT(29) +#define RXD_FRAME_PROTO_TCP s2BIT(30) +#define RXD_FRAME_PROTO_UDP s2BIT(31) +#define TCP_OR_UDP_FRAME (RXD_FRAME_PROTO_TCP | RXD_FRAME_PROTO_UDP) +#define RXD_GET_L3_CKSUM(val) ((u16)(val>> 16) & 0xFFFF) +#define RXD_GET_L4_CKSUM(val) ((u16)(val) & 0xFFFF) + + u64 Control_2; +#define THE_RXD_MARK 0x3 +#define SET_RXD_MARKER vBIT(THE_RXD_MARK, 0, 2) +#define GET_RXD_MARKER(ctrl) ((ctrl & SET_RXD_MARKER) >> 62) + +#define MASK_VLAN_TAG vBIT(0xFFFF,48,16) +#define SET_VLAN_TAG(val) vBIT(val,48,16) +#define SET_NUM_TAG(val) vBIT(val,16,32) + + +}; +/* Rx descriptor structure for 1 buffer mode */ +struct RxD1 { + struct RxD_t h; + +#define MASK_BUFFER0_SIZE_1 vBIT(0x3FFF,2,14) +#define SET_BUFFER0_SIZE_1(val) vBIT(val,2,14) +#define RXD_GET_BUFFER0_SIZE_1(_Control_2) \ + (u16)((_Control_2 & MASK_BUFFER0_SIZE_1) >> 48) + u64 Buffer0_ptr; +}; +/* Rx descriptor structure for 3 or 2 buffer mode */ + +struct RxD3 { + struct RxD_t h; + +#define MASK_BUFFER0_SIZE_3 vBIT(0xFF,2,14) +#define MASK_BUFFER1_SIZE_3 vBIT(0xFFFF,16,16) +#define MASK_BUFFER2_SIZE_3 vBIT(0xFFFF,32,16) +#define SET_BUFFER0_SIZE_3(val) vBIT(val,8,8) +#define SET_BUFFER1_SIZE_3(val) vBIT(val,16,16) +#define SET_BUFFER2_SIZE_3(val) vBIT(val,32,16) +#define RXD_GET_BUFFER0_SIZE_3(Control_2) \ + (u8)((Control_2 & MASK_BUFFER0_SIZE_3) >> 48) +#define RXD_GET_BUFFER1_SIZE_3(Control_2) \ + (u16)((Control_2 & MASK_BUFFER1_SIZE_3) >> 32) +#define RXD_GET_BUFFER2_SIZE_3(Control_2) \ + (u16)((Control_2 & MASK_BUFFER2_SIZE_3) >> 16) +#define BUF0_LEN 40 +#define BUF1_LEN 1 + + u64 Buffer0_ptr; + u64 Buffer1_ptr; + u64 Buffer2_ptr; +}; + + +/* Structure that represents the Rx descriptor block which contains + * 128 Rx descriptors. + */ +struct RxD_block { +#define MAX_RXDS_PER_BLOCK_1 127 + struct RxD1 rxd[MAX_RXDS_PER_BLOCK_1]; + + u64 reserved_0; +#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL + u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last + * Rxd in this blk */ + u64 reserved_2_pNext_RxD_block; /* Logical ptr to next */ + u64 pNext_RxD_Blk_physical; /* Buff0_ptr.In a 32 bit arch + * the upper 32 bits should + * be 0 */ +}; + +#define SIZE_OF_BLOCK 4096 + +#define RXD_MODE_1 0 /* One Buffer mode */ +#define RXD_MODE_3B 1 /* Two Buffer mode */ + +/* Structure to hold virtual addresses of Buf0 and Buf1 in + * 2buf mode. */ +struct buffAdd { + void *ba_0_org; + void *ba_1_org; + void *ba_0; + void *ba_1; +}; + +/* Structure which stores all the MAC control parameters */ + +/* This structure stores the offset of the RxD in the ring + * from which the Rx Interrupt processor can start picking + * up the RxDs for processing. + */ +struct rx_curr_get_info { + u32 block_index; + u32 offset; + u32 ring_len; +}; + +struct rx_curr_put_info { + u32 block_index; + u32 offset; + u32 ring_len; +}; + +/* This structure stores the offset of the TxDl in the FIFO + * from which the Tx Interrupt processor can start picking + * up the TxDLs for send complete interrupt processing. + */ +struct tx_curr_get_info { + u32 offset; + u32 fifo_len; +}; + +struct tx_curr_put_info { + u32 offset; + u32 fifo_len; +}; + +struct rxd_info { + void *virt_addr; + dma_addr_t dma_addr; +}; + +/* Structure that holds the Phy and virt addresses of the Blocks */ +struct rx_block_info { + void *block_virt_addr; + dma_addr_t block_dma_addr; + struct rxd_info *rxds; +}; + +/* Data structure to represent a LRO session */ +struct lro { + struct sk_buff *parent; + struct sk_buff *last_frag; + u8 *l2h; + struct iphdr *iph; + struct tcphdr *tcph; + u32 tcp_next_seq; + __be32 tcp_ack; + int total_len; + int frags_len; + int sg_num; + int in_use; + __be16 window; + u16 vlan_tag; + u32 cur_tsval; + __be32 cur_tsecr; + u8 saw_ts; +} ____cacheline_aligned; + +/* Ring specific structure */ +struct ring_info { + /* The ring number */ + int ring_no; + + /* per-ring buffer counter */ + u32 rx_bufs_left; + +#define MAX_LRO_SESSIONS 32 + struct lro lro0_n[MAX_LRO_SESSIONS]; + u8 lro; + + /* copy of sp->rxd_mode flag */ + int rxd_mode; + + /* Number of rxds per block for the rxd_mode */ + int rxd_count; + + /* copy of sp pointer */ + struct s2io_nic *nic; + + /* copy of sp->dev pointer */ + struct net_device *dev; + + /* copy of sp->pdev pointer */ + struct pci_dev *pdev; + + /* Per ring napi struct */ + struct napi_struct napi; + + unsigned long interrupt_count; + + /* + * Place holders for the virtual and physical addresses of + * all the Rx Blocks + */ + struct rx_block_info rx_blocks[MAX_RX_BLOCKS_PER_RING]; + int block_count; + int pkt_cnt; + + /* + * Put pointer info which indictes which RxD has to be replenished + * with a new buffer. + */ + struct rx_curr_put_info rx_curr_put_info; + + /* + * Get pointer info which indictes which is the last RxD that was + * processed by the driver. + */ + struct rx_curr_get_info rx_curr_get_info; + + /* interface MTU value */ + unsigned mtu; + + /* Buffer Address store. */ + struct buffAdd **ba; +} ____cacheline_aligned; + +/* Fifo specific structure */ +struct fifo_info { + /* FIFO number */ + int fifo_no; + + /* Maximum TxDs per TxDL */ + int max_txds; + + /* Place holder of all the TX List's Phy and Virt addresses. */ + struct list_info_hold *list_info; + + /* + * Current offset within the tx FIFO where driver would write + * new Tx frame + */ + struct tx_curr_put_info tx_curr_put_info; + + /* + * Current offset within tx FIFO from where the driver would start freeing + * the buffers + */ + struct tx_curr_get_info tx_curr_get_info; +#define FIFO_QUEUE_START 0 +#define FIFO_QUEUE_STOP 1 + int queue_state; + + /* copy of sp->dev pointer */ + struct net_device *dev; + + /* copy of multiq status */ + u8 multiq; + + /* Per fifo lock */ + spinlock_t tx_lock; + + /* Per fifo UFO in band structure */ + u64 *ufo_in_band_v; + + struct s2io_nic *nic; +} ____cacheline_aligned; + +/* Information related to the Tx and Rx FIFOs and Rings of Xena + * is maintained in this structure. + */ +struct mac_info { +/* tx side stuff */ + /* logical pointer of start of each Tx FIFO */ + struct TxFIFO_element __iomem *tx_FIFO_start[MAX_TX_FIFOS]; + + /* Fifo specific structure */ + struct fifo_info fifos[MAX_TX_FIFOS]; + + /* Save virtual address of TxD page with zero DMA addr(if any) */ + void *zerodma_virt_addr; + +/* rx side stuff */ + /* Ring specific structure */ + struct ring_info rings[MAX_RX_RINGS]; + + u16 rmac_pause_time; + u16 mc_pause_threshold_q0q3; + u16 mc_pause_threshold_q4q7; + + void *stats_mem; /* orignal pointer to allocated mem */ + dma_addr_t stats_mem_phy; /* Physical address of the stat block */ + u32 stats_mem_sz; + struct stat_block *stats_info; /* Logical address of the stat block */ +}; + +/* Default Tunable parameters of the NIC. */ +#define DEFAULT_FIFO_0_LEN 4096 +#define DEFAULT_FIFO_1_7_LEN 512 +#define SMALL_BLK_CNT 30 +#define LARGE_BLK_CNT 100 + +/* + * Structure to keep track of the MSI-X vectors and the corresponding + * argument registered against each vector + */ +#define MAX_REQUESTED_MSI_X 9 +struct s2io_msix_entry +{ + u16 vector; + u16 entry; + void *arg; + + u8 type; +#define MSIX_ALARM_TYPE 1 +#define MSIX_RING_TYPE 2 + + u8 in_use; +#define MSIX_REGISTERED_SUCCESS 0xAA +}; + +struct msix_info_st { + u64 addr; + u64 data; +}; + +/* These flags represent the devices temporary state */ +enum s2io_device_state_t +{ + __S2IO_STATE_LINK_TASK=0, + __S2IO_STATE_CARD_UP +}; + +/* Structure representing one instance of the NIC */ +struct s2io_nic { + int rxd_mode; + /* + * Count of packets to be processed in a given iteration, it will be indicated + * by the quota field of the device structure when NAPI is enabled. + */ + int pkts_to_process; + struct net_device *dev; + struct mac_info mac_control; + struct config_param config; + struct pci_dev *pdev; + void __iomem *bar0; + void __iomem *bar1; +#define MAX_MAC_SUPPORTED 16 +#define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED + + struct mac_addr def_mac_addr[256]; + + struct net_device_stats stats; + int high_dma_flag; + int device_enabled_once; + + char name[60]; + + /* Timer that handles I/O errors/exceptions */ + struct timer_list alarm_timer; + + /* Space to back up the PCI config space */ + u32 config_space[256 / sizeof(u32)]; + +#define PROMISC 1 +#define ALL_MULTI 2 + +#define MAX_ADDRS_SUPPORTED 64 + u16 mc_addr_count; + + u16 m_cast_flg; + u16 all_multi_pos; + u16 promisc_flg; + + /* Restart timer, used to restart NIC if the device is stuck and + * a schedule task that will set the correct Link state once the + * NIC's PHY has stabilized after a state change. + */ + struct work_struct rst_timer_task; + struct work_struct set_link_task; + + /* Flag that can be used to turn on or turn off the Rx checksum + * offload feature. + */ + int rx_csum; + + /* Below variables are used for fifo selection to transmit a packet */ + u16 fifo_selector[MAX_TX_FIFOS]; + + /* Total fifos for tcp packets */ + u8 total_tcp_fifos; + + /* + * Beginning index of udp for udp packets + * Value will be equal to + * (tx_fifo_num - FIFO_UDP_MAX_NUM - FIFO_OTHER_MAX_NUM) + */ + u8 udp_fifo_idx; + + u8 total_udp_fifos; + + /* + * Beginning index of fifo for all other packets + * Value will be equal to (tx_fifo_num - FIFO_OTHER_MAX_NUM) + */ + u8 other_fifo_idx; + + struct napi_struct napi; + /* after blink, the adapter must be restored with original + * values. + */ + u64 adapt_ctrl_org; + + /* Last known link state. */ + u16 last_link_state; +#define LINK_DOWN 1 +#define LINK_UP 2 + + int task_flag; + unsigned long long start_time; + int vlan_strip_flag; +#define MSIX_FLG 0xA5 + int num_entries; + struct msix_entry *entries; + int msi_detected; + wait_queue_head_t msi_wait; + struct s2io_msix_entry *s2io_entries; + char desc[MAX_REQUESTED_MSI_X][25]; + + int avail_msix_vectors; /* No. of MSI-X vectors granted by system */ + + struct msix_info_st msix_info[0x3f]; + +#define XFRAME_I_DEVICE 1 +#define XFRAME_II_DEVICE 2 + u8 device_type; + + unsigned long clubbed_frms_cnt; + unsigned long sending_both; + u16 lro_max_aggr_per_sess; + volatile unsigned long state; + u64 general_int_mask; + +#define VPD_STRING_LEN 80 + u8 product_name[VPD_STRING_LEN]; + u8 serial_num[VPD_STRING_LEN]; +}; + +#define RESET_ERROR 1 +#define CMD_ERROR 2 + +/* OS related system calls */ +#ifndef readq +static inline u64 readq(void __iomem *addr) +{ + u64 ret = 0; + ret = readl(addr + 4); + ret <<= 32; + ret |= readl(addr); + + return ret; +} +#endif + +#ifndef writeq +static inline void writeq(u64 val, void __iomem *addr) +{ + writel((u32) (val), addr); + writel((u32) (val >> 32), (addr + 4)); +} +#endif + +/* + * Some registers have to be written in a particular order to + * expect correct hardware operation. The macro SPECIAL_REG_WRITE + * is used to perform such ordered writes. Defines UF (Upper First) + * and LF (Lower First) will be used to specify the required write order. + */ +#define UF 1 +#define LF 2 +static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order) +{ + if (order == LF) { + writel((u32) (val), addr); + (void) readl(addr); + writel((u32) (val >> 32), (addr + 4)); + (void) readl(addr + 4); + } else { + writel((u32) (val >> 32), (addr + 4)); + (void) readl(addr + 4); + writel((u32) (val), addr); + (void) readl(addr); + } +} + +/* Interrupt related values of Xena */ + +#define ENABLE_INTRS 1 +#define DISABLE_INTRS 2 + +/* Highest level interrupt blocks */ +#define TX_PIC_INTR (0x0001<<0) +#define TX_DMA_INTR (0x0001<<1) +#define TX_MAC_INTR (0x0001<<2) +#define TX_XGXS_INTR (0x0001<<3) +#define TX_TRAFFIC_INTR (0x0001<<4) +#define RX_PIC_INTR (0x0001<<5) +#define RX_DMA_INTR (0x0001<<6) +#define RX_MAC_INTR (0x0001<<7) +#define RX_XGXS_INTR (0x0001<<8) +#define RX_TRAFFIC_INTR (0x0001<<9) +#define MC_INTR (0x0001<<10) +#define ENA_ALL_INTRS ( TX_PIC_INTR | \ + TX_DMA_INTR | \ + TX_MAC_INTR | \ + TX_XGXS_INTR | \ + TX_TRAFFIC_INTR | \ + RX_PIC_INTR | \ + RX_DMA_INTR | \ + RX_MAC_INTR | \ + RX_XGXS_INTR | \ + RX_TRAFFIC_INTR | \ + MC_INTR ) + +/* Interrupt masks for the general interrupt mask register */ +#define DISABLE_ALL_INTRS 0xFFFFFFFFFFFFFFFFULL + +#define TXPIC_INT_M s2BIT(0) +#define TXDMA_INT_M s2BIT(1) +#define TXMAC_INT_M s2BIT(2) +#define TXXGXS_INT_M s2BIT(3) +#define TXTRAFFIC_INT_M s2BIT(8) +#define PIC_RX_INT_M s2BIT(32) +#define RXDMA_INT_M s2BIT(33) +#define RXMAC_INT_M s2BIT(34) +#define MC_INT_M s2BIT(35) +#define RXXGXS_INT_M s2BIT(36) +#define RXTRAFFIC_INT_M s2BIT(40) + +/* PIC level Interrupts TODO*/ + +/* DMA level Inressupts */ +#define TXDMA_PFC_INT_M s2BIT(0) +#define TXDMA_PCC_INT_M s2BIT(2) + +/* PFC block interrupts */ +#define PFC_MISC_ERR_1 s2BIT(0) /* Interrupt to indicate FIFO full */ + +/* PCC block interrupts. */ +#define PCC_FB_ECC_ERR vBIT(0xff, 16, 8) /* Interrupt to indicate + PCC_FB_ECC Error. */ + +#define RXD_GET_VLAN_TAG(Control_2) (u16)(Control_2 & MASK_VLAN_TAG) +/* + * Prototype declaration. + */ +static int s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre); +static void s2io_rem_nic(struct pci_dev *pdev); +static int init_shared_mem(struct s2io_nic *sp); +static void free_shared_mem(struct s2io_nic *sp); +static int init_nic(struct s2io_nic *nic); +static int rx_intr_handler(struct ring_info *ring_data, int budget); +static void s2io_txpic_intr_handle(struct s2io_nic *sp); +static void tx_intr_handler(struct fifo_info *fifo_data); +static void s2io_handle_errors(void * dev_id); + +static int s2io_starter(void); +static void s2io_closer(void); +static void s2io_tx_watchdog(struct net_device *dev); +static void s2io_set_multicast(struct net_device *dev); +static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); +static void s2io_link(struct s2io_nic * sp, int link); +static void s2io_reset(struct s2io_nic * sp); +static int s2io_poll_msix(struct napi_struct *napi, int budget); +static int s2io_poll_inta(struct napi_struct *napi, int budget); +static void s2io_init_pci(struct s2io_nic * sp); +static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr); +static void s2io_alarm_handle(unsigned long data); +static irqreturn_t +s2io_msix_ring_handle(int irq, void *dev_id); +static irqreturn_t +s2io_msix_fifo_handle(int irq, void *dev_id); +static irqreturn_t s2io_isr(int irq, void *dev_id); +static int verify_xena_quiescence(struct s2io_nic *sp); +static const struct ethtool_ops netdev_ethtool_ops; +static void s2io_set_link(struct work_struct *work); +static int s2io_set_swapper(struct s2io_nic * sp); +static void s2io_card_down(struct s2io_nic *nic); +static int s2io_card_up(struct s2io_nic *nic); +static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit, + int bit_state); +static int s2io_add_isr(struct s2io_nic * sp); +static void s2io_rem_isr(struct s2io_nic * sp); + +static void restore_xmsi_data(struct s2io_nic *nic); +static void do_s2io_store_unicast_mc(struct s2io_nic *sp); +static void do_s2io_restore_unicast_mc(struct s2io_nic *sp); +static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset); +static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr); +static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int offset); +static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr); + +static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, + u8 **tcp, u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp, + struct s2io_nic *sp); +static void clear_lro_session(struct lro *lro); +static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag); +static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro); +static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro, + struct sk_buff *skb, u32 tcp_len); +static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring); + +static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state); +static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev); +static void s2io_io_resume(struct pci_dev *pdev); + +#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size +#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size +#define s2io_offload_type(skb) skb_shinfo(skb)->gso_type + +#define S2IO_PARM_INT(X, def_val) \ + static unsigned int X = def_val;\ + module_param(X , uint, 0); + +#endif /* _S2IO_H */ diff --git a/drivers/net/ethernet/neterion/vxge/Makefile b/drivers/net/ethernet/neterion/vxge/Makefile new file mode 100644 index 000000000..b625e2c50 --- /dev/null +++ b/drivers/net/ethernet/neterion/vxge/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for Exar Corp's X3100 Series 10 GbE PCIe I/O +# Virtualized Server Adapter linux driver + +obj-$(CONFIG_VXGE) += vxge.o + +vxge-objs := vxge-config.o vxge-traffic.o vxge-ethtool.o vxge-main.o diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c new file mode 100644 index 000000000..6223930a8 --- /dev/null +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -0,0 +1,5114 @@ +/****************************************************************************** + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + * + * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O + * Virtualized Server Adapter. + * Copyright(c) 2002-2010 Exar Corp. + ******************************************************************************/ +#include +#include +#include +#include +#include + +#include "vxge-traffic.h" +#include "vxge-config.h" +#include "vxge-main.h" + +#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \ + status = __vxge_hw_vpath_stats_access(vpath, \ + VXGE_HW_STATS_OP_READ, \ + offset, \ + &val64); \ + if (status != VXGE_HW_OK) \ + return status; \ +} + +static void +vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg) +{ + u64 val64; + + val64 = readq(&vp_reg->rxmac_vcfg0); + val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); + writeq(val64, &vp_reg->rxmac_vcfg0); + val64 = readq(&vp_reg->rxmac_vcfg0); +} + +/* + * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle + */ +int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id) +{ + struct vxge_hw_vpath_reg __iomem *vp_reg; + struct __vxge_hw_virtualpath *vpath; + u64 val64, rxd_count, rxd_spat; + int count = 0, total_count = 0; + + vpath = &hldev->virtual_paths[vp_id]; + vp_reg = vpath->vp_reg; + + vxge_hw_vpath_set_zero_rx_frm_len(vp_reg); + + /* Check that the ring controller for this vpath has enough free RxDs + * to send frames to the host. This is done by reading the + * PRC_RXD_DOORBELL_VPn register and comparing the read value to the + * RXD_SPAT value for the vpath. + */ + val64 = readq(&vp_reg->prc_cfg6); + rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1; + /* Use a factor of 2 when comparing rxd_count against rxd_spat for some + * leg room. + */ + rxd_spat *= 2; + + do { + mdelay(1); + + rxd_count = readq(&vp_reg->prc_rxd_doorbell); + + /* Check that the ring controller for this vpath does + * not have any frame in its pipeline. + */ + val64 = readq(&vp_reg->frm_in_progress_cnt); + if ((rxd_count <= rxd_spat) || (val64 > 0)) + count = 0; + else + count++; + total_count++; + } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) && + (total_count < VXGE_HW_MAX_POLLING_COUNT)); + + if (total_count >= VXGE_HW_MAX_POLLING_COUNT) + printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n", + __func__); + + return total_count; +} + +/* vxge_hw_device_wait_receive_idle - This function waits until all frames + * stored in the frame buffer for each vpath assigned to the given + * function (hldev) have been sent to the host. + */ +void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev) +{ + int i, total_count = 0; + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + if (!(hldev->vpaths_deployed & vxge_mBIT(i))) + continue; + + total_count += vxge_hw_vpath_wait_receive_idle(hldev, i); + if (total_count >= VXGE_HW_MAX_POLLING_COUNT) + break; + } +} + +/* + * __vxge_hw_device_register_poll + * Will poll certain register for specified amount of time. + * Will poll until masked bit is not cleared. + */ +static enum vxge_hw_status +__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis) +{ + u64 val64; + u32 i = 0; + + udelay(10); + + do { + val64 = readq(reg); + if (!(val64 & mask)) + return VXGE_HW_OK; + udelay(100); + } while (++i <= 9); + + i = 0; + do { + val64 = readq(reg); + if (!(val64 & mask)) + return VXGE_HW_OK; + mdelay(1); + } while (++i <= max_millis); + + return VXGE_HW_FAIL; +} + +static inline enum vxge_hw_status +__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr, + u64 mask, u32 max_millis) +{ + __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr); + wmb(); + __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr); + wmb(); + + return __vxge_hw_device_register_poll(addr, mask, max_millis); +} + +static enum vxge_hw_status +vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action, + u32 fw_memo, u32 offset, u64 *data0, u64 *data1, + u64 *steer_ctrl) +{ + struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; + enum vxge_hw_status status; + u64 val64; + u32 retry = 0, max_retry = 3; + + spin_lock(&vpath->lock); + if (!vpath->vp_open) { + spin_unlock(&vpath->lock); + max_retry = 100; + } + + writeq(*data0, &vp_reg->rts_access_steer_data0); + writeq(*data1, &vp_reg->rts_access_steer_data1); + wmb(); + + val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) | + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE | + *steer_ctrl; + + status = __vxge_hw_pio_mem_write64(val64, + &vp_reg->rts_access_steer_ctrl, + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, + VXGE_HW_DEF_DEVICE_POLL_MILLIS); + + /* The __vxge_hw_device_register_poll can udelay for a significant + * amount of time, blocking other process from the CPU. If it delays + * for ~5secs, a NMI error can occur. A way around this is to give up + * the processor via msleep, but this is not allowed is under lock. + * So, only allow it to sleep for ~4secs if open. Otherwise, delay for + * 1sec and sleep for 10ms until the firmware operation has completed + * or timed-out. + */ + while ((status != VXGE_HW_OK) && retry++ < max_retry) { + if (!vpath->vp_open) + msleep(20); + status = __vxge_hw_device_register_poll( + &vp_reg->rts_access_steer_ctrl, + VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE, + VXGE_HW_DEF_DEVICE_POLL_MILLIS); + } + + if (status != VXGE_HW_OK) + goto out; + + val64 = readq(&vp_reg->rts_access_steer_ctrl); + if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { + *data0 = readq(&vp_reg->rts_access_steer_data0); + *data1 = readq(&vp_reg->rts_access_steer_data1); + *steer_ctrl = val64; + } else + status = VXGE_HW_FAIL; + +out: + if (vpath->vp_open) + spin_unlock(&vpath->lock); + return status; +} + +enum vxge_hw_status +vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major, + u32 *minor, u32 *build) +{ + u64 data0 = 0, data1 = 0, steer_ctrl = 0; + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status; + + vpath = &hldev->virtual_paths[hldev->first_vp_id]; + + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_FW_UPGRADE_ACTION, + VXGE_HW_FW_UPGRADE_MEMO, + VXGE_HW_FW_UPGRADE_OFFSET_READ, + &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) + return status; + + *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0); + *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0); + *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0); + + return status; +} + +enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev) +{ + u64 data0 = 0, data1 = 0, steer_ctrl = 0; + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status; + u32 ret; + + vpath = &hldev->virtual_paths[hldev->first_vp_id]; + + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_FW_UPGRADE_ACTION, + VXGE_HW_FW_UPGRADE_MEMO, + VXGE_HW_FW_UPGRADE_OFFSET_COMMIT, + &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__); + goto exit; + } + + ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F; + if (ret != 1) { + vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d", + __func__, ret); + status = VXGE_HW_FAIL; + } + +exit: + return status; +} + +enum vxge_hw_status +vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size) +{ + u64 data0 = 0, data1 = 0, steer_ctrl = 0; + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status; + int ret_code, sec_code; + + vpath = &hldev->virtual_paths[hldev->first_vp_id]; + + /* send upgrade start command */ + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_FW_UPGRADE_ACTION, + VXGE_HW_FW_UPGRADE_MEMO, + VXGE_HW_FW_UPGRADE_OFFSET_START, + &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed", + __func__); + return status; + } + + /* Transfer fw image to adapter 16 bytes at a time */ + for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) { + steer_ctrl = 0; + + /* The next 128bits of fwdata to be loaded onto the adapter */ + data0 = *((u64 *)fwdata); + data1 = *((u64 *)fwdata + 1); + + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_FW_UPGRADE_ACTION, + VXGE_HW_FW_UPGRADE_MEMO, + VXGE_HW_FW_UPGRADE_OFFSET_SEND, + &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed", + __func__); + goto out; + } + + ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0); + switch (ret_code) { + case VXGE_HW_FW_UPGRADE_OK: + /* All OK, send next 16 bytes. */ + break; + case VXGE_FW_UPGRADE_BYTES2SKIP: + /* skip bytes in the stream */ + fwdata += (data0 >> 8) & 0xFFFFFFFF; + break; + case VXGE_HW_FW_UPGRADE_DONE: + goto out; + case VXGE_HW_FW_UPGRADE_ERR: + sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0); + switch (sec_code) { + case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: + case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: + printk(KERN_ERR + "corrupted data from .ncf file\n"); + break; + case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: + case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: + case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: + case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: + case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: + printk(KERN_ERR "invalid .ncf file\n"); + break; + case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: + printk(KERN_ERR "buffer overflow\n"); + break; + case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: + printk(KERN_ERR "failed to flash the image\n"); + break; + case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: + printk(KERN_ERR + "generic error. Unknown error type\n"); + break; + default: + printk(KERN_ERR "Unknown error of type %d\n", + sec_code); + break; + } + status = VXGE_HW_FAIL; + goto out; + default: + printk(KERN_ERR "Unknown FW error: %d\n", ret_code); + status = VXGE_HW_FAIL; + goto out; + } + /* point to next 16 bytes */ + fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE; + } +out: + return status; +} + +enum vxge_hw_status +vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev, + struct eprom_image *img) +{ + u64 data0 = 0, data1 = 0, steer_ctrl = 0; + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status; + int i; + + vpath = &hldev->virtual_paths[hldev->first_vp_id]; + + for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) { + data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i); + data1 = steer_ctrl = 0; + + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_FW_API_GET_EPROM_REV, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, + 0, &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) + break; + + img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0); + img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0); + img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0); + img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0); + } + + return status; +} + +/* + * __vxge_hw_channel_free - Free memory allocated for channel + * This function deallocates memory from the channel and various arrays + * in the channel + */ +static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel) +{ + kfree(channel->work_arr); + kfree(channel->free_arr); + kfree(channel->reserve_arr); + kfree(channel->orig_arr); + kfree(channel); +} + +/* + * __vxge_hw_channel_initialize - Initialize a channel + * This function initializes a channel by properly setting the + * various references + */ +static enum vxge_hw_status +__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel) +{ + u32 i; + struct __vxge_hw_virtualpath *vpath; + + vpath = channel->vph->vpath; + + if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) { + for (i = 0; i < channel->length; i++) + channel->orig_arr[i] = channel->reserve_arr[i]; + } + + switch (channel->type) { + case VXGE_HW_CHANNEL_TYPE_FIFO: + vpath->fifoh = (struct __vxge_hw_fifo *)channel; + channel->stats = &((struct __vxge_hw_fifo *) + channel)->stats->common_stats; + break; + case VXGE_HW_CHANNEL_TYPE_RING: + vpath->ringh = (struct __vxge_hw_ring *)channel; + channel->stats = &((struct __vxge_hw_ring *) + channel)->stats->common_stats; + break; + default: + break; + } + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_channel_reset - Resets a channel + * This function resets a channel by properly setting the various references + */ +static enum vxge_hw_status +__vxge_hw_channel_reset(struct __vxge_hw_channel *channel) +{ + u32 i; + + for (i = 0; i < channel->length; i++) { + if (channel->reserve_arr != NULL) + channel->reserve_arr[i] = channel->orig_arr[i]; + if (channel->free_arr != NULL) + channel->free_arr[i] = NULL; + if (channel->work_arr != NULL) + channel->work_arr[i] = NULL; + } + channel->free_ptr = channel->length; + channel->reserve_ptr = channel->length; + channel->reserve_top = 0; + channel->post_index = 0; + channel->compl_index = 0; + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_device_pci_e_init + * Initialize certain PCI/PCI-X configuration registers + * with recommended values. Save config space for future hw resets. + */ +static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev) +{ + u16 cmd = 0; + + /* Set the PErr Repconse bit and SERR in PCI command register. */ + pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd); + cmd |= 0x140; + pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd); + + pci_save_state(hldev->pdev); +} + +/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset + * in progress + * This routine checks the vpath reset in progress register is turned zero + */ +static enum vxge_hw_status +__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog) +{ + enum vxge_hw_status status; + status = __vxge_hw_device_register_poll(vpath_rst_in_prog, + VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff), + VXGE_HW_DEF_DEVICE_POLL_MILLIS); + return status; +} + +/* + * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion. + * Set the swapper bits appropriately for the lagacy section. + */ +static enum vxge_hw_status +__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + + val64 = readq(&legacy_reg->toc_swapper_fb); + + wmb(); + + switch (val64) { + case VXGE_HW_SWAPPER_INITIAL_VALUE: + return status; + + case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED: + writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, + &legacy_reg->pifm_rd_swap_en); + writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, + &legacy_reg->pifm_rd_flip_en); + writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, + &legacy_reg->pifm_wr_swap_en); + writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, + &legacy_reg->pifm_wr_flip_en); + break; + + case VXGE_HW_SWAPPER_BYTE_SWAPPED: + writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE, + &legacy_reg->pifm_rd_swap_en); + writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE, + &legacy_reg->pifm_wr_swap_en); + break; + + case VXGE_HW_SWAPPER_BIT_FLIPPED: + writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE, + &legacy_reg->pifm_rd_flip_en); + writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE, + &legacy_reg->pifm_wr_flip_en); + break; + } + + wmb(); + + val64 = readq(&legacy_reg->toc_swapper_fb); + + if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE) + status = VXGE_HW_ERR_SWAPPER_CTRL; + + return status; +} + +/* + * __vxge_hw_device_toc_get + * This routine sets the swapper and reads the toc pointer and returns the + * memory mapped address of the toc + */ +static struct vxge_hw_toc_reg __iomem * +__vxge_hw_device_toc_get(void __iomem *bar0) +{ + u64 val64; + struct vxge_hw_toc_reg __iomem *toc = NULL; + enum vxge_hw_status status; + + struct vxge_hw_legacy_reg __iomem *legacy_reg = + (struct vxge_hw_legacy_reg __iomem *)bar0; + + status = __vxge_hw_legacy_swapper_set(legacy_reg); + if (status != VXGE_HW_OK) + goto exit; + + val64 = readq(&legacy_reg->toc_first_pointer); + toc = bar0 + val64; +exit: + return toc; +} + +/* + * __vxge_hw_device_reg_addr_get + * This routine sets the swapper and reads the toc pointer and initializes the + * register location pointers in the device object. It waits until the ric is + * completed initializing registers. + */ +static enum vxge_hw_status +__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev) +{ + u64 val64; + u32 i; + enum vxge_hw_status status = VXGE_HW_OK; + + hldev->legacy_reg = hldev->bar0; + + hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0); + if (hldev->toc_reg == NULL) { + status = VXGE_HW_FAIL; + goto exit; + } + + val64 = readq(&hldev->toc_reg->toc_common_pointer); + hldev->common_reg = hldev->bar0 + val64; + + val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer); + hldev->mrpcim_reg = hldev->bar0 + val64; + + for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) { + val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]); + hldev->srpcim_reg[i] = hldev->bar0 + val64; + } + + for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) { + val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]); + hldev->vpmgmt_reg[i] = hldev->bar0 + val64; + } + + for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) { + val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]); + hldev->vpath_reg[i] = hldev->bar0 + val64; + } + + val64 = readq(&hldev->toc_reg->toc_kdfc); + + switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) { + case 0: + hldev->kdfc = hldev->bar0 + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64) ; + break; + default: + break; + } + + status = __vxge_hw_device_vpath_reset_in_prog_check( + (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog); +exit: + return status; +} + +/* + * __vxge_hw_device_access_rights_get: Get Access Rights of the driver + * This routine returns the Access Rights of the driver + */ +static u32 +__vxge_hw_device_access_rights_get(u32 host_type, u32 func_id) +{ + u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH; + + switch (host_type) { + case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION: + if (func_id == 0) { + access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | + VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; + } + break; + case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION: + access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | + VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; + break; + case VXGE_HW_NO_MR_SR_VH0_FUNCTION0: + access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | + VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; + break; + case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION: + case VXGE_HW_SR_VH_VIRTUAL_FUNCTION: + case VXGE_HW_MR_SR_VH0_INVALID_CONFIG: + break; + case VXGE_HW_SR_VH_FUNCTION0: + case VXGE_HW_VH_NORMAL_FUNCTION: + access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; + break; + } + + return access_rights; +} +/* + * __vxge_hw_device_is_privilaged + * This routine checks if the device function is privilaged or not + */ + +enum vxge_hw_status +__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id) +{ + if (__vxge_hw_device_access_rights_get(host_type, + func_id) & + VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) + return VXGE_HW_OK; + else + return VXGE_HW_ERR_PRIVILAGED_OPEARATION; +} + +/* + * __vxge_hw_vpath_func_id_get - Get the function id of the vpath. + * Returns the function number of the vpath. + */ +static u32 +__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg) +{ + u64 val64; + + val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1); + + return + (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64); +} + +/* + * __vxge_hw_device_host_info_get + * This routine returns the host type assignments + */ +static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev) +{ + u64 val64; + u32 i; + + val64 = readq(&hldev->common_reg->host_type_assignments); + + hldev->host_type = + (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); + + hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments); + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + if (!(hldev->vpath_assignments & vxge_mBIT(i))) + continue; + + hldev->func_id = + __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]); + + hldev->access_rights = __vxge_hw_device_access_rights_get( + hldev->host_type, hldev->func_id); + + hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN; + hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i]; + + hldev->first_vp_id = i; + break; + } +} + +/* + * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as + * link width and signalling rate. + */ +static enum vxge_hw_status +__vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev) +{ + struct pci_dev *dev = hldev->pdev; + u16 lnk; + + /* Get the negotiated link width and speed from PCI config space */ + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnk); + + if ((lnk & PCI_EXP_LNKSTA_CLS) != 1) + return VXGE_HW_ERR_INVALID_PCI_INFO; + + switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) { + case PCIE_LNK_WIDTH_RESRV: + case PCIE_LNK_X1: + case PCIE_LNK_X2: + case PCIE_LNK_X4: + case PCIE_LNK_X8: + break; + default: + return VXGE_HW_ERR_INVALID_PCI_INFO; + } + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_device_initialize + * Initialize Titan-V hardware. + */ +static enum vxge_hw_status +__vxge_hw_device_initialize(struct __vxge_hw_device *hldev) +{ + enum vxge_hw_status status = VXGE_HW_OK; + + if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type, + hldev->func_id)) { + /* Validate the pci-e link width and speed */ + status = __vxge_hw_verify_pci_e_info(hldev); + if (status != VXGE_HW_OK) + goto exit; + } + +exit: + return status; +} + +/* + * __vxge_hw_vpath_fw_ver_get - Get the fw version + * Returns FW Version + */ +static enum vxge_hw_status +__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath, + struct vxge_hw_device_hw_info *hw_info) +{ + struct vxge_hw_device_version *fw_version = &hw_info->fw_version; + struct vxge_hw_device_date *fw_date = &hw_info->fw_date; + struct vxge_hw_device_version *flash_version = &hw_info->flash_version; + struct vxge_hw_device_date *flash_date = &hw_info->flash_date; + u64 data0, data1 = 0, steer_ctrl = 0; + enum vxge_hw_status status; + + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, + 0, &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) + goto exit; + + fw_date->day = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0); + fw_date->month = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0); + fw_date->year = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0); + + snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d", + fw_date->month, fw_date->day, fw_date->year); + + fw_version->major = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0); + fw_version->minor = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0); + fw_version->build = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0); + + snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", + fw_version->major, fw_version->minor, fw_version->build); + + flash_date->day = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1); + flash_date->month = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1); + flash_date->year = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1); + + snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d", + flash_date->month, flash_date->day, flash_date->year); + + flash_version->major = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1); + flash_version->minor = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1); + flash_version->build = + (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1); + + snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d", + flash_version->major, flash_version->minor, + flash_version->build); + +exit: + return status; +} + +/* + * __vxge_hw_vpath_card_info_get - Get the serial numbers, + * part number and product description. + */ +static enum vxge_hw_status +__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath, + struct vxge_hw_device_hw_info *hw_info) +{ + enum vxge_hw_status status; + u64 data0, data1 = 0, steer_ctrl = 0; + u8 *serial_number = hw_info->serial_number; + u8 *part_number = hw_info->part_number; + u8 *product_desc = hw_info->product_desc; + u32 i, j = 0; + + data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER; + + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, + 0, &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) + return status; + + ((u64 *)serial_number)[0] = be64_to_cpu(data0); + ((u64 *)serial_number)[1] = be64_to_cpu(data1); + + data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER; + data1 = steer_ctrl = 0; + + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, + 0, &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) + return status; + + ((u64 *)part_number)[0] = be64_to_cpu(data0); + ((u64 *)part_number)[1] = be64_to_cpu(data1); + + for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0; + i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) { + data0 = i; + data1 = steer_ctrl = 0; + + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, + 0, &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) + return status; + + ((u64 *)product_desc)[j++] = be64_to_cpu(data0); + ((u64 *)product_desc)[j++] = be64_to_cpu(data1); + } + + return status; +} + +/* + * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode + * Returns pci function mode + */ +static enum vxge_hw_status +__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath, + struct vxge_hw_device_hw_info *hw_info) +{ + u64 data0, data1 = 0, steer_ctrl = 0; + enum vxge_hw_status status; + + data0 = 0; + + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_FW_API_GET_FUNC_MODE, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, + 0, &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) + return status; + + hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0); + return status; +} + +/* + * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath + * from MAC address table. + */ +static enum vxge_hw_status +__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath, + u8 *macaddr, u8 *macaddr_mask) +{ + u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY, + data0 = 0, data1 = 0, steer_ctrl = 0; + enum vxge_hw_status status; + int i; + + do { + status = vxge_hw_vpath_fw_api(vpath, action, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, + 0, &data0, &data1, &steer_ctrl); + if (status != VXGE_HW_OK) + goto exit; + + data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0); + data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK( + data1); + + for (i = ETH_ALEN; i > 0; i--) { + macaddr[i - 1] = (u8) (data0 & 0xFF); + data0 >>= 8; + + macaddr_mask[i - 1] = (u8) (data1 & 0xFF); + data1 >>= 8; + } + + action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY; + data0 = 0, data1 = 0, steer_ctrl = 0; + + } while (!is_valid_ether_addr(macaddr)); +exit: + return status; +} + +/** + * vxge_hw_device_hw_info_get - Get the hw information + * Returns the vpath mask that has the bits set for each vpath allocated + * for the driver, FW version information, and the first mac address for + * each vpath + */ +enum vxge_hw_status +vxge_hw_device_hw_info_get(void __iomem *bar0, + struct vxge_hw_device_hw_info *hw_info) +{ + u32 i; + u64 val64; + struct vxge_hw_toc_reg __iomem *toc; + struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg; + struct vxge_hw_common_reg __iomem *common_reg; + struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; + enum vxge_hw_status status; + struct __vxge_hw_virtualpath vpath; + + memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info)); + + toc = __vxge_hw_device_toc_get(bar0); + if (toc == NULL) { + status = VXGE_HW_ERR_CRITICAL; + goto exit; + } + + val64 = readq(&toc->toc_common_pointer); + common_reg = bar0 + val64; + + status = __vxge_hw_device_vpath_reset_in_prog_check( + (u64 __iomem *)&common_reg->vpath_rst_in_prog); + if (status != VXGE_HW_OK) + goto exit; + + hw_info->vpath_mask = readq(&common_reg->vpath_assignments); + + val64 = readq(&common_reg->host_type_assignments); + + hw_info->host_type = + (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + if (!((hw_info->vpath_mask) & vxge_mBIT(i))) + continue; + + val64 = readq(&toc->toc_vpmgmt_pointer[i]); + + vpmgmt_reg = bar0 + val64; + + hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg); + if (__vxge_hw_device_access_rights_get(hw_info->host_type, + hw_info->func_id) & + VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) { + + val64 = readq(&toc->toc_mrpcim_pointer); + + mrpcim_reg = bar0 + val64; + + writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask); + wmb(); + } + + val64 = readq(&toc->toc_vpath_pointer[i]); + + spin_lock_init(&vpath.lock); + vpath.vp_reg = bar0 + val64; + vpath.vp_open = VXGE_HW_VP_NOT_OPEN; + + status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info); + if (status != VXGE_HW_OK) + goto exit; + + status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info); + if (status != VXGE_HW_OK) + goto exit; + + status = __vxge_hw_vpath_card_info_get(&vpath, hw_info); + if (status != VXGE_HW_OK) + goto exit; + + break; + } + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + if (!((hw_info->vpath_mask) & vxge_mBIT(i))) + continue; + + val64 = readq(&toc->toc_vpath_pointer[i]); + vpath.vp_reg = bar0 + val64; + vpath.vp_open = VXGE_HW_VP_NOT_OPEN; + + status = __vxge_hw_vpath_addr_get(&vpath, + hw_info->mac_addrs[i], + hw_info->mac_addr_masks[i]); + if (status != VXGE_HW_OK) + goto exit; + } +exit: + return status; +} + +/* + * __vxge_hw_blockpool_destroy - Deallocates the block pool + */ +static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) +{ + struct __vxge_hw_device *hldev; + struct list_head *p, *n; + u16 ret; + + if (blockpool == NULL) { + ret = 1; + goto exit; + } + + hldev = blockpool->hldev; + + list_for_each_safe(p, n, &blockpool->free_block_list) { + pci_unmap_single(hldev->pdev, + ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, + ((struct __vxge_hw_blockpool_entry *)p)->length, + PCI_DMA_BIDIRECTIONAL); + + vxge_os_dma_free(hldev->pdev, + ((struct __vxge_hw_blockpool_entry *)p)->memblock, + &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); + + list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); + kfree(p); + blockpool->pool_size--; + } + + list_for_each_safe(p, n, &blockpool->free_entry_list) { + list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); + kfree((void *)p); + } + ret = 0; +exit: + return; +} + +/* + * __vxge_hw_blockpool_create - Create block pool + */ +static enum vxge_hw_status +__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, + struct __vxge_hw_blockpool *blockpool, + u32 pool_size, + u32 pool_max) +{ + u32 i; + struct __vxge_hw_blockpool_entry *entry = NULL; + void *memblock; + dma_addr_t dma_addr; + struct pci_dev *dma_handle; + struct pci_dev *acc_handle; + enum vxge_hw_status status = VXGE_HW_OK; + + if (blockpool == NULL) { + status = VXGE_HW_FAIL; + goto blockpool_create_exit; + } + + blockpool->hldev = hldev; + blockpool->block_size = VXGE_HW_BLOCK_SIZE; + blockpool->pool_size = 0; + blockpool->pool_max = pool_max; + blockpool->req_out = 0; + + INIT_LIST_HEAD(&blockpool->free_block_list); + INIT_LIST_HEAD(&blockpool->free_entry_list); + + for (i = 0; i < pool_size + pool_max; i++) { + entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry), + GFP_KERNEL); + if (entry == NULL) { + __vxge_hw_blockpool_destroy(blockpool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto blockpool_create_exit; + } + list_add(&entry->item, &blockpool->free_entry_list); + } + + for (i = 0; i < pool_size; i++) { + memblock = vxge_os_dma_malloc( + hldev->pdev, + VXGE_HW_BLOCK_SIZE, + &dma_handle, + &acc_handle); + if (memblock == NULL) { + __vxge_hw_blockpool_destroy(blockpool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto blockpool_create_exit; + } + + dma_addr = pci_map_single(hldev->pdev, memblock, + VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL); + if (unlikely(pci_dma_mapping_error(hldev->pdev, + dma_addr))) { + vxge_os_dma_free(hldev->pdev, memblock, &acc_handle); + __vxge_hw_blockpool_destroy(blockpool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto blockpool_create_exit; + } + + if (!list_empty(&blockpool->free_entry_list)) + entry = (struct __vxge_hw_blockpool_entry *) + list_first_entry(&blockpool->free_entry_list, + struct __vxge_hw_blockpool_entry, + item); + + if (entry == NULL) + entry = + kzalloc(sizeof(struct __vxge_hw_blockpool_entry), + GFP_KERNEL); + if (entry != NULL) { + list_del(&entry->item); + entry->length = VXGE_HW_BLOCK_SIZE; + entry->memblock = memblock; + entry->dma_addr = dma_addr; + entry->acc_handle = acc_handle; + entry->dma_handle = dma_handle; + list_add(&entry->item, + &blockpool->free_block_list); + blockpool->pool_size++; + } else { + __vxge_hw_blockpool_destroy(blockpool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto blockpool_create_exit; + } + } + +blockpool_create_exit: + return status; +} + +/* + * __vxge_hw_device_fifo_config_check - Check fifo configuration. + * Check the fifo configuration + */ +static enum vxge_hw_status +__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config) +{ + if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) || + (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS)) + return VXGE_HW_BADCFG_FIFO_BLOCKS; + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_device_vpath_config_check - Check vpath configuration. + * Check the vpath configuration + */ +static enum vxge_hw_status +__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config) +{ + enum vxge_hw_status status; + + if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) || + (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX)) + return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH; + + status = __vxge_hw_device_fifo_config_check(&vp_config->fifo); + if (status != VXGE_HW_OK) + return status; + + if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) && + ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) || + (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU))) + return VXGE_HW_BADCFG_VPATH_MTU; + + if ((vp_config->rpa_strip_vlan_tag != + VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) && + (vp_config->rpa_strip_vlan_tag != + VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) && + (vp_config->rpa_strip_vlan_tag != + VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE)) + return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG; + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_device_config_check - Check device configuration. + * Check the device configuration + */ +static enum vxge_hw_status +__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config) +{ + u32 i; + enum vxge_hw_status status; + + if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && + (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) && + (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && + (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF)) + return VXGE_HW_BADCFG_INTR_MODE; + + if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) && + (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE)) + return VXGE_HW_BADCFG_RTS_MAC_EN; + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + status = __vxge_hw_device_vpath_config_check( + &new_config->vp_config[i]); + if (status != VXGE_HW_OK) + return status; + } + + return VXGE_HW_OK; +} + +/* + * vxge_hw_device_initialize - Initialize Titan device. + * Initialize Titan device. Note that all the arguments of this public API + * are 'IN', including @hldev. Driver cooperates with + * OS to find new Titan device, locate its PCI and memory spaces. + * + * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW + * to enable the latter to perform Titan hardware initialization. + */ +enum vxge_hw_status +vxge_hw_device_initialize( + struct __vxge_hw_device **devh, + struct vxge_hw_device_attr *attr, + struct vxge_hw_device_config *device_config) +{ + u32 i; + u32 nblocks = 0; + struct __vxge_hw_device *hldev = NULL; + enum vxge_hw_status status = VXGE_HW_OK; + + status = __vxge_hw_device_config_check(device_config); + if (status != VXGE_HW_OK) + goto exit; + + hldev = vzalloc(sizeof(struct __vxge_hw_device)); + if (hldev == NULL) { + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + hldev->magic = VXGE_HW_DEVICE_MAGIC; + + vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL); + + /* apply config */ + memcpy(&hldev->config, device_config, + sizeof(struct vxge_hw_device_config)); + + hldev->bar0 = attr->bar0; + hldev->pdev = attr->pdev; + + hldev->uld_callbacks = attr->uld_callbacks; + + __vxge_hw_device_pci_e_init(hldev); + + status = __vxge_hw_device_reg_addr_get(hldev); + if (status != VXGE_HW_OK) { + vfree(hldev); + goto exit; + } + + __vxge_hw_device_host_info_get(hldev); + + /* Incrementing for stats blocks */ + nblocks++; + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + if (!(hldev->vpath_assignments & vxge_mBIT(i))) + continue; + + if (device_config->vp_config[i].ring.enable == + VXGE_HW_RING_ENABLE) + nblocks += device_config->vp_config[i].ring.ring_blocks; + + if (device_config->vp_config[i].fifo.enable == + VXGE_HW_FIFO_ENABLE) + nblocks += device_config->vp_config[i].fifo.fifo_blocks; + nblocks++; + } + + if (__vxge_hw_blockpool_create(hldev, + &hldev->block_pool, + device_config->dma_blockpool_initial + nblocks, + device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) { + + vxge_hw_device_terminate(hldev); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + status = __vxge_hw_device_initialize(hldev); + if (status != VXGE_HW_OK) { + vxge_hw_device_terminate(hldev); + goto exit; + } + + *devh = hldev; +exit: + return status; +} + +/* + * vxge_hw_device_terminate - Terminate Titan device. + * Terminate HW device. + */ +void +vxge_hw_device_terminate(struct __vxge_hw_device *hldev) +{ + vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC); + + hldev->magic = VXGE_HW_DEVICE_DEAD; + __vxge_hw_blockpool_destroy(&hldev->block_pool); + vfree(hldev); +} + +/* + * __vxge_hw_vpath_stats_access - Get the statistics from the given location + * and offset and perform an operation + */ +static enum vxge_hw_status +__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath, + u32 operation, u32 offset, u64 *stat) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_hw_vpath_reg __iomem *vp_reg; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto vpath_stats_access_exit; + } + + vp_reg = vpath->vp_reg; + + val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) | + VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE | + VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset); + + status = __vxge_hw_pio_mem_write64(val64, + &vp_reg->xmac_stats_access_cmd, + VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE, + vpath->hldev->config.device_poll_millis); + if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ)) + *stat = readq(&vp_reg->xmac_stats_access_data); + else + *stat = 0; + +vpath_stats_access_exit: + return status; +} + +/* + * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath + */ +static enum vxge_hw_status +__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath, + struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats) +{ + u64 *val64; + int i; + u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET; + enum vxge_hw_status status = VXGE_HW_OK; + + val64 = (u64 *)vpath_tx_stats; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto exit; + } + + for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) { + status = __vxge_hw_vpath_stats_access(vpath, + VXGE_HW_STATS_OP_READ, + offset, val64); + if (status != VXGE_HW_OK) + goto exit; + offset++; + val64++; + } +exit: + return status; +} + +/* + * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath + */ +static enum vxge_hw_status +__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, + struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats) +{ + u64 *val64; + enum vxge_hw_status status = VXGE_HW_OK; + int i; + u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET; + val64 = (u64 *) vpath_rx_stats; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto exit; + } + for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) { + status = __vxge_hw_vpath_stats_access(vpath, + VXGE_HW_STATS_OP_READ, + offset >> 3, val64); + if (status != VXGE_HW_OK) + goto exit; + + offset += 8; + val64++; + } +exit: + return status; +} + +/* + * __vxge_hw_vpath_stats_get - Get the vpath hw statistics. + */ +static enum vxge_hw_status +__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath, + struct vxge_hw_vpath_stats_hw_info *hw_stats) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_hw_vpath_reg __iomem *vp_reg; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto exit; + } + vp_reg = vpath->vp_reg; + + val64 = readq(&vp_reg->vpath_debug_stats0); + hw_stats->ini_num_mwr_sent = + (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64); + + val64 = readq(&vp_reg->vpath_debug_stats1); + hw_stats->ini_num_mrd_sent = + (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64); + + val64 = readq(&vp_reg->vpath_debug_stats2); + hw_stats->ini_num_cpl_rcvd = + (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64); + + val64 = readq(&vp_reg->vpath_debug_stats3); + hw_stats->ini_num_mwr_byte_sent = + VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64); + + val64 = readq(&vp_reg->vpath_debug_stats4); + hw_stats->ini_num_cpl_byte_rcvd = + VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64); + + val64 = readq(&vp_reg->vpath_debug_stats5); + hw_stats->wrcrdtarb_xoff = + (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64); + + val64 = readq(&vp_reg->vpath_debug_stats6); + hw_stats->rdcrdtarb_xoff = + (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64); + + val64 = readq(&vp_reg->vpath_genstats_count01); + hw_stats->vpath_genstats_count0 = + (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0( + val64); + + val64 = readq(&vp_reg->vpath_genstats_count01); + hw_stats->vpath_genstats_count1 = + (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1( + val64); + + val64 = readq(&vp_reg->vpath_genstats_count23); + hw_stats->vpath_genstats_count2 = + (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2( + val64); + + val64 = readq(&vp_reg->vpath_genstats_count01); + hw_stats->vpath_genstats_count3 = + (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3( + val64); + + val64 = readq(&vp_reg->vpath_genstats_count4); + hw_stats->vpath_genstats_count4 = + (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4( + val64); + + val64 = readq(&vp_reg->vpath_genstats_count5); + hw_stats->vpath_genstats_count5 = + (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5( + val64); + + status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats); + if (status != VXGE_HW_OK) + goto exit; + + status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats); + if (status != VXGE_HW_OK) + goto exit; + + VXGE_HW_VPATH_STATS_PIO_READ( + VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET); + + hw_stats->prog_event_vnum0 = + (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64); + + hw_stats->prog_event_vnum1 = + (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64); + + VXGE_HW_VPATH_STATS_PIO_READ( + VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET); + + hw_stats->prog_event_vnum2 = + (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64); + + hw_stats->prog_event_vnum3 = + (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64); + + val64 = readq(&vp_reg->rx_multi_cast_stats); + hw_stats->rx_multi_cast_frame_discard = + (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64); + + val64 = readq(&vp_reg->rx_frm_transferred); + hw_stats->rx_frm_transferred = + (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64); + + val64 = readq(&vp_reg->rxd_returned); + hw_stats->rxd_returned = + (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64); + + val64 = readq(&vp_reg->dbg_stats_rx_mpa); + hw_stats->rx_mpa_len_fail_frms = + (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64); + hw_stats->rx_mpa_mrk_fail_frms = + (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64); + hw_stats->rx_mpa_crc_fail_frms = + (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64); + + val64 = readq(&vp_reg->dbg_stats_rx_fau); + hw_stats->rx_permitted_frms = + (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64); + hw_stats->rx_vp_reset_discarded_frms = + (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64); + hw_stats->rx_wol_frms = + (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64); + + val64 = readq(&vp_reg->tx_vp_reset_discarded_frms); + hw_stats->tx_vp_reset_discarded_frms = + (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS( + val64); +exit: + return status; +} + +/* + * vxge_hw_device_stats_get - Get the device hw statistics. + * Returns the vpath h/w stats for the device. + */ +enum vxge_hw_status +vxge_hw_device_stats_get(struct __vxge_hw_device *hldev, + struct vxge_hw_device_stats_hw_info *hw_stats) +{ + u32 i; + enum vxge_hw_status status = VXGE_HW_OK; + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + if (!(hldev->vpaths_deployed & vxge_mBIT(i)) || + (hldev->virtual_paths[i].vp_open == + VXGE_HW_VP_NOT_OPEN)) + continue; + + memcpy(hldev->virtual_paths[i].hw_stats_sav, + hldev->virtual_paths[i].hw_stats, + sizeof(struct vxge_hw_vpath_stats_hw_info)); + + status = __vxge_hw_vpath_stats_get( + &hldev->virtual_paths[i], + hldev->virtual_paths[i].hw_stats); + } + + memcpy(hw_stats, &hldev->stats.hw_dev_info_stats, + sizeof(struct vxge_hw_device_stats_hw_info)); + + return status; +} + +/* + * vxge_hw_driver_stats_get - Get the device sw statistics. + * Returns the vpath s/w stats for the device. + */ +enum vxge_hw_status vxge_hw_driver_stats_get( + struct __vxge_hw_device *hldev, + struct vxge_hw_device_stats_sw_info *sw_stats) +{ + memcpy(sw_stats, &hldev->stats.sw_dev_info_stats, + sizeof(struct vxge_hw_device_stats_sw_info)); + + return VXGE_HW_OK; +} + +/* + * vxge_hw_mrpcim_stats_access - Access the statistics from the given location + * and offset and perform an operation + * Get the statistics from the given location and offset. + */ +enum vxge_hw_status +vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev, + u32 operation, u32 location, u32 offset, u64 *stat) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + + status = __vxge_hw_device_is_privilaged(hldev->host_type, + hldev->func_id); + if (status != VXGE_HW_OK) + goto exit; + + val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) | + VXGE_HW_XMAC_STATS_SYS_CMD_STROBE | + VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) | + VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset); + + status = __vxge_hw_pio_mem_write64(val64, + &hldev->mrpcim_reg->xmac_stats_sys_cmd, + VXGE_HW_XMAC_STATS_SYS_CMD_STROBE, + hldev->config.device_poll_millis); + + if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ)) + *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data); + else + *stat = 0; +exit: + return status; +} + +/* + * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port + * Get the Statistics on aggregate port + */ +static enum vxge_hw_status +vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port, + struct vxge_hw_xmac_aggr_stats *aggr_stats) +{ + u64 *val64; + int i; + u32 offset = VXGE_HW_STATS_AGGRn_OFFSET; + enum vxge_hw_status status = VXGE_HW_OK; + + val64 = (u64 *)aggr_stats; + + status = __vxge_hw_device_is_privilaged(hldev->host_type, + hldev->func_id); + if (status != VXGE_HW_OK) + goto exit; + + for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) { + status = vxge_hw_mrpcim_stats_access(hldev, + VXGE_HW_STATS_OP_READ, + VXGE_HW_STATS_LOC_AGGR, + ((offset + (104 * port)) >> 3), val64); + if (status != VXGE_HW_OK) + goto exit; + + offset += 8; + val64++; + } +exit: + return status; +} + +/* + * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port + * Get the Statistics on port + */ +static enum vxge_hw_status +vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port, + struct vxge_hw_xmac_port_stats *port_stats) +{ + u64 *val64; + enum vxge_hw_status status = VXGE_HW_OK; + int i; + u32 offset = 0x0; + val64 = (u64 *) port_stats; + + status = __vxge_hw_device_is_privilaged(hldev->host_type, + hldev->func_id); + if (status != VXGE_HW_OK) + goto exit; + + for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) { + status = vxge_hw_mrpcim_stats_access(hldev, + VXGE_HW_STATS_OP_READ, + VXGE_HW_STATS_LOC_AGGR, + ((offset + (608 * port)) >> 3), val64); + if (status != VXGE_HW_OK) + goto exit; + + offset += 8; + val64++; + } + +exit: + return status; +} + +/* + * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics + * Get the XMAC Statistics + */ +enum vxge_hw_status +vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev, + struct vxge_hw_xmac_stats *xmac_stats) +{ + enum vxge_hw_status status = VXGE_HW_OK; + u32 i; + + status = vxge_hw_device_xmac_aggr_stats_get(hldev, + 0, &xmac_stats->aggr_stats[0]); + if (status != VXGE_HW_OK) + goto exit; + + status = vxge_hw_device_xmac_aggr_stats_get(hldev, + 1, &xmac_stats->aggr_stats[1]); + if (status != VXGE_HW_OK) + goto exit; + + for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) { + + status = vxge_hw_device_xmac_port_stats_get(hldev, + i, &xmac_stats->port_stats[i]); + if (status != VXGE_HW_OK) + goto exit; + } + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + + if (!(hldev->vpaths_deployed & vxge_mBIT(i))) + continue; + + status = __vxge_hw_vpath_xmac_tx_stats_get( + &hldev->virtual_paths[i], + &xmac_stats->vpath_tx_stats[i]); + if (status != VXGE_HW_OK) + goto exit; + + status = __vxge_hw_vpath_xmac_rx_stats_get( + &hldev->virtual_paths[i], + &xmac_stats->vpath_rx_stats[i]); + if (status != VXGE_HW_OK) + goto exit; + } +exit: + return status; +} + +/* + * vxge_hw_device_debug_set - Set the debug module, level and timestamp + * This routine is used to dynamically change the debug output + */ +void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev, + enum vxge_debug_level level, u32 mask) +{ + if (hldev == NULL) + return; + +#if defined(VXGE_DEBUG_TRACE_MASK) || \ + defined(VXGE_DEBUG_ERR_MASK) + hldev->debug_module_mask = mask; + hldev->debug_level = level; +#endif + +#if defined(VXGE_DEBUG_ERR_MASK) + hldev->level_err = level & VXGE_ERR; +#endif + +#if defined(VXGE_DEBUG_TRACE_MASK) + hldev->level_trace = level & VXGE_TRACE; +#endif +} + +/* + * vxge_hw_device_error_level_get - Get the error level + * This routine returns the current error level set + */ +u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev) +{ +#if defined(VXGE_DEBUG_ERR_MASK) + if (hldev == NULL) + return VXGE_ERR; + else + return hldev->level_err; +#else + return 0; +#endif +} + +/* + * vxge_hw_device_trace_level_get - Get the trace level + * This routine returns the current trace level set + */ +u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev) +{ +#if defined(VXGE_DEBUG_TRACE_MASK) + if (hldev == NULL) + return VXGE_TRACE; + else + return hldev->level_trace; +#else + return 0; +#endif +} + +/* + * vxge_hw_getpause_data -Pause frame frame generation and reception. + * Returns the Pause frame generation and reception capability of the NIC. + */ +enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev, + u32 port, u32 *tx, u32 *rx) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + + if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { + status = VXGE_HW_ERR_INVALID_DEVICE; + goto exit; + } + + if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) { + status = VXGE_HW_ERR_INVALID_PORT; + goto exit; + } + + if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { + status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; + goto exit; + } + + val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); + if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN) + *tx = 1; + if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN) + *rx = 1; +exit: + return status; +} + +/* + * vxge_hw_device_setpause_data - set/reset pause frame generation. + * It can be used to set or reset Pause frame generation or reception + * support of the NIC. + */ +enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev, + u32 port, u32 tx, u32 rx) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + + if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { + status = VXGE_HW_ERR_INVALID_DEVICE; + goto exit; + } + + if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) { + status = VXGE_HW_ERR_INVALID_PORT; + goto exit; + } + + status = __vxge_hw_device_is_privilaged(hldev->host_type, + hldev->func_id); + if (status != VXGE_HW_OK) + goto exit; + + val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); + if (tx) + val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN; + else + val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN; + if (rx) + val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN; + else + val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN; + + writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); +exit: + return status; +} + +u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev) +{ + struct pci_dev *dev = hldev->pdev; + u16 lnk; + + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnk); + return (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4; +} + +/* + * __vxge_hw_ring_block_memblock_idx - Return the memblock index + * This function returns the index of memory block + */ +static inline u32 +__vxge_hw_ring_block_memblock_idx(u8 *block) +{ + return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)); +} + +/* + * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index + * This function sets index to a memory block + */ +static inline void +__vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx) +{ + *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx; +} + +/* + * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer + * in RxD block + * Sets the next block pointer in RxD block + */ +static inline void +__vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next) +{ + *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next; +} + +/* + * __vxge_hw_ring_first_block_address_get - Returns the dma address of the + * first block + * Returns the dma address of the first RxD block + */ +static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring) +{ + struct vxge_hw_mempool_dma *dma_object; + + dma_object = ring->mempool->memblocks_dma_arr; + vxge_assert(dma_object != NULL); + + return dma_object->addr; +} + +/* + * __vxge_hw_ring_item_dma_addr - Return the dma address of an item + * This function returns the dma address of a given item + */ +static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh, + void *item) +{ + u32 memblock_idx; + void *memblock; + struct vxge_hw_mempool_dma *memblock_dma_object; + ptrdiff_t dma_item_offset; + + /* get owner memblock index */ + memblock_idx = __vxge_hw_ring_block_memblock_idx(item); + + /* get owner memblock by memblock index */ + memblock = mempoolh->memblocks_arr[memblock_idx]; + + /* get memblock DMA object by memblock index */ + memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx; + + /* calculate offset in the memblock of this item */ + dma_item_offset = (u8 *)item - (u8 *)memblock; + + return memblock_dma_object->addr + dma_item_offset; +} + +/* + * __vxge_hw_ring_rxdblock_link - Link the RxD blocks + * This function returns the dma address of a given item + */ +static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh, + struct __vxge_hw_ring *ring, u32 from, + u32 to) +{ + u8 *to_item , *from_item; + dma_addr_t to_dma; + + /* get "from" RxD block */ + from_item = mempoolh->items_arr[from]; + vxge_assert(from_item); + + /* get "to" RxD block */ + to_item = mempoolh->items_arr[to]; + vxge_assert(to_item); + + /* return address of the beginning of previous RxD block */ + to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item); + + /* set next pointer for this RxD block to point on + * previous item's DMA start address */ + __vxge_hw_ring_block_next_pointer_set(from_item, to_dma); +} + +/* + * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD + * block callback + * This function is callback passed to __vxge_hw_mempool_create to create memory + * pool for RxD block + */ +static void +__vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh, + u32 memblock_index, + struct vxge_hw_mempool_dma *dma_object, + u32 index, u32 is_last) +{ + u32 i; + void *item = mempoolh->items_arr[index]; + struct __vxge_hw_ring *ring = + (struct __vxge_hw_ring *)mempoolh->userdata; + + /* format rxds array */ + for (i = 0; i < ring->rxds_per_block; i++) { + void *rxdblock_priv; + void *uld_priv; + struct vxge_hw_ring_rxd_1 *rxdp; + + u32 reserve_index = ring->channel.reserve_ptr - + (index * ring->rxds_per_block + i + 1); + u32 memblock_item_idx; + + ring->channel.reserve_arr[reserve_index] = ((u8 *)item) + + i * ring->rxd_size; + + /* Note: memblock_item_idx is index of the item within + * the memblock. For instance, in case of three RxD-blocks + * per memblock this value can be 0, 1 or 2. */ + rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh, + memblock_index, item, + &memblock_item_idx); + + rxdp = ring->channel.reserve_arr[reserve_index]; + + uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i); + + /* pre-format Host_Control */ + rxdp->host_control = (u64)(size_t)uld_priv; + } + + __vxge_hw_ring_block_memblock_idx_set(item, memblock_index); + + if (is_last) { + /* link last one with first one */ + __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0); + } + + if (index > 0) { + /* link this RxD block with previous one */ + __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index); + } +} + +/* + * __vxge_hw_ring_replenish - Initial replenish of RxDs + * This function replenishes the RxDs from reserve array to work array + */ +static enum vxge_hw_status +vxge_hw_ring_replenish(struct __vxge_hw_ring *ring) +{ + void *rxd; + struct __vxge_hw_channel *channel; + enum vxge_hw_status status = VXGE_HW_OK; + + channel = &ring->channel; + + while (vxge_hw_channel_dtr_count(channel) > 0) { + + status = vxge_hw_ring_rxd_reserve(ring, &rxd); + + vxge_assert(status == VXGE_HW_OK); + + if (ring->rxd_init) { + status = ring->rxd_init(rxd, channel->userdata); + if (status != VXGE_HW_OK) { + vxge_hw_ring_rxd_free(ring, rxd); + goto exit; + } + } + + vxge_hw_ring_rxd_post(ring, rxd); + } + status = VXGE_HW_OK; +exit: + return status; +} + +/* + * __vxge_hw_channel_allocate - Allocate memory for channel + * This function allocates required memory for the channel and various arrays + * in the channel + */ +static struct __vxge_hw_channel * +__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, + enum __vxge_hw_channel_type type, + u32 length, u32 per_dtr_space, + void *userdata) +{ + struct __vxge_hw_channel *channel; + struct __vxge_hw_device *hldev; + int size = 0; + u32 vp_id; + + hldev = vph->vpath->hldev; + vp_id = vph->vpath->vp_id; + + switch (type) { + case VXGE_HW_CHANNEL_TYPE_FIFO: + size = sizeof(struct __vxge_hw_fifo); + break; + case VXGE_HW_CHANNEL_TYPE_RING: + size = sizeof(struct __vxge_hw_ring); + break; + default: + break; + } + + channel = kzalloc(size, GFP_KERNEL); + if (channel == NULL) + goto exit0; + INIT_LIST_HEAD(&channel->item); + + channel->common_reg = hldev->common_reg; + channel->first_vp_id = hldev->first_vp_id; + channel->type = type; + channel->devh = hldev; + channel->vph = vph; + channel->userdata = userdata; + channel->per_dtr_space = per_dtr_space; + channel->length = length; + channel->vp_id = vp_id; + + channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); + if (channel->work_arr == NULL) + goto exit1; + + channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); + if (channel->free_arr == NULL) + goto exit1; + channel->free_ptr = length; + + channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); + if (channel->reserve_arr == NULL) + goto exit1; + channel->reserve_ptr = length; + channel->reserve_top = 0; + + channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); + if (channel->orig_arr == NULL) + goto exit1; + + return channel; +exit1: + __vxge_hw_channel_free(channel); + +exit0: + return NULL; +} + +/* + * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async + * Adds a block to block pool + */ +static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh, + void *block_addr, + u32 length, + struct pci_dev *dma_h, + struct pci_dev *acc_handle) +{ + struct __vxge_hw_blockpool *blockpool; + struct __vxge_hw_blockpool_entry *entry = NULL; + dma_addr_t dma_addr; + enum vxge_hw_status status = VXGE_HW_OK; + u32 req_out; + + blockpool = &devh->block_pool; + + if (block_addr == NULL) { + blockpool->req_out--; + status = VXGE_HW_FAIL; + goto exit; + } + + dma_addr = pci_map_single(devh->pdev, block_addr, length, + PCI_DMA_BIDIRECTIONAL); + + if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) { + vxge_os_dma_free(devh->pdev, block_addr, &acc_handle); + blockpool->req_out--; + status = VXGE_HW_FAIL; + goto exit; + } + + if (!list_empty(&blockpool->free_entry_list)) + entry = (struct __vxge_hw_blockpool_entry *) + list_first_entry(&blockpool->free_entry_list, + struct __vxge_hw_blockpool_entry, + item); + + if (entry == NULL) + entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry)); + else + list_del(&entry->item); + + if (entry != NULL) { + entry->length = length; + entry->memblock = block_addr; + entry->dma_addr = dma_addr; + entry->acc_handle = acc_handle; + entry->dma_handle = dma_h; + list_add(&entry->item, &blockpool->free_block_list); + blockpool->pool_size++; + status = VXGE_HW_OK; + } else + status = VXGE_HW_ERR_OUT_OF_MEMORY; + + blockpool->req_out--; + + req_out = blockpool->req_out; +exit: + return; +} + +static inline void +vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size) +{ + gfp_t flags; + void *vaddr; + + if (in_interrupt()) + flags = GFP_ATOMIC | GFP_DMA; + else + flags = GFP_KERNEL | GFP_DMA; + + vaddr = kmalloc((size), flags); + + vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev); +} + +/* + * __vxge_hw_blockpool_blocks_add - Request additional blocks + */ +static +void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool) +{ + u32 nreq = 0, i; + + if ((blockpool->pool_size + blockpool->req_out) < + VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) { + nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; + blockpool->req_out += nreq; + } + + for (i = 0; i < nreq; i++) + vxge_os_dma_malloc_async( + (blockpool->hldev)->pdev, + blockpool->hldev, VXGE_HW_BLOCK_SIZE); +} + +/* + * __vxge_hw_blockpool_malloc - Allocate a memory block from pool + * Allocates a block of memory of given size, either from block pool + * or by calling vxge_os_dma_malloc() + */ +static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, + struct vxge_hw_mempool_dma *dma_object) +{ + struct __vxge_hw_blockpool_entry *entry = NULL; + struct __vxge_hw_blockpool *blockpool; + void *memblock = NULL; + enum vxge_hw_status status = VXGE_HW_OK; + + blockpool = &devh->block_pool; + + if (size != blockpool->block_size) { + + memblock = vxge_os_dma_malloc(devh->pdev, size, + &dma_object->handle, + &dma_object->acc_handle); + + if (memblock == NULL) { + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + dma_object->addr = pci_map_single(devh->pdev, memblock, size, + PCI_DMA_BIDIRECTIONAL); + + if (unlikely(pci_dma_mapping_error(devh->pdev, + dma_object->addr))) { + vxge_os_dma_free(devh->pdev, memblock, + &dma_object->acc_handle); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + } else { + + if (!list_empty(&blockpool->free_block_list)) + entry = (struct __vxge_hw_blockpool_entry *) + list_first_entry(&blockpool->free_block_list, + struct __vxge_hw_blockpool_entry, + item); + + if (entry != NULL) { + list_del(&entry->item); + dma_object->addr = entry->dma_addr; + dma_object->handle = entry->dma_handle; + dma_object->acc_handle = entry->acc_handle; + memblock = entry->memblock; + + list_add(&entry->item, + &blockpool->free_entry_list); + blockpool->pool_size--; + } + + if (memblock != NULL) + __vxge_hw_blockpool_blocks_add(blockpool); + } +exit: + return memblock; +} + +/* + * __vxge_hw_blockpool_blocks_remove - Free additional blocks + */ +static void +__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool) +{ + struct list_head *p, *n; + + list_for_each_safe(p, n, &blockpool->free_block_list) { + + if (blockpool->pool_size < blockpool->pool_max) + break; + + pci_unmap_single( + (blockpool->hldev)->pdev, + ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, + ((struct __vxge_hw_blockpool_entry *)p)->length, + PCI_DMA_BIDIRECTIONAL); + + vxge_os_dma_free( + (blockpool->hldev)->pdev, + ((struct __vxge_hw_blockpool_entry *)p)->memblock, + &((struct __vxge_hw_blockpool_entry *)p)->acc_handle); + + list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); + + list_add(p, &blockpool->free_entry_list); + + blockpool->pool_size--; + + } +} + +/* + * __vxge_hw_blockpool_free - Frees the memory allcoated with + * __vxge_hw_blockpool_malloc + */ +static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh, + void *memblock, u32 size, + struct vxge_hw_mempool_dma *dma_object) +{ + struct __vxge_hw_blockpool_entry *entry = NULL; + struct __vxge_hw_blockpool *blockpool; + enum vxge_hw_status status = VXGE_HW_OK; + + blockpool = &devh->block_pool; + + if (size != blockpool->block_size) { + pci_unmap_single(devh->pdev, dma_object->addr, size, + PCI_DMA_BIDIRECTIONAL); + vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); + } else { + + if (!list_empty(&blockpool->free_entry_list)) + entry = (struct __vxge_hw_blockpool_entry *) + list_first_entry(&blockpool->free_entry_list, + struct __vxge_hw_blockpool_entry, + item); + + if (entry == NULL) + entry = vmalloc(sizeof( + struct __vxge_hw_blockpool_entry)); + else + list_del(&entry->item); + + if (entry != NULL) { + entry->length = size; + entry->memblock = memblock; + entry->dma_addr = dma_object->addr; + entry->acc_handle = dma_object->acc_handle; + entry->dma_handle = dma_object->handle; + list_add(&entry->item, + &blockpool->free_block_list); + blockpool->pool_size++; + status = VXGE_HW_OK; + } else + status = VXGE_HW_ERR_OUT_OF_MEMORY; + + if (status == VXGE_HW_OK) + __vxge_hw_blockpool_blocks_remove(blockpool); + } +} + +/* + * vxge_hw_mempool_destroy + */ +static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool) +{ + u32 i, j; + struct __vxge_hw_device *devh = mempool->devh; + + for (i = 0; i < mempool->memblocks_allocated; i++) { + struct vxge_hw_mempool_dma *dma_object; + + vxge_assert(mempool->memblocks_arr[i]); + vxge_assert(mempool->memblocks_dma_arr + i); + + dma_object = mempool->memblocks_dma_arr + i; + + for (j = 0; j < mempool->items_per_memblock; j++) { + u32 index = i * mempool->items_per_memblock + j; + + /* to skip last partially filled(if any) memblock */ + if (index >= mempool->items_current) + break; + } + + vfree(mempool->memblocks_priv_arr[i]); + + __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i], + mempool->memblock_size, dma_object); + } + + vfree(mempool->items_arr); + vfree(mempool->memblocks_dma_arr); + vfree(mempool->memblocks_priv_arr); + vfree(mempool->memblocks_arr); + vfree(mempool); +} + +/* + * __vxge_hw_mempool_grow + * Will resize mempool up to %num_allocate value. + */ +static enum vxge_hw_status +__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate, + u32 *num_allocated) +{ + u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0; + u32 n_items = mempool->items_per_memblock; + u32 start_block_idx = mempool->memblocks_allocated; + u32 end_block_idx = mempool->memblocks_allocated + num_allocate; + enum vxge_hw_status status = VXGE_HW_OK; + + *num_allocated = 0; + + if (end_block_idx > mempool->memblocks_max) { + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + for (i = start_block_idx; i < end_block_idx; i++) { + u32 j; + u32 is_last = ((end_block_idx - 1) == i); + struct vxge_hw_mempool_dma *dma_object = + mempool->memblocks_dma_arr + i; + void *the_memblock; + + /* allocate memblock's private part. Each DMA memblock + * has a space allocated for item's private usage upon + * mempool's user request. Each time mempool grows, it will + * allocate new memblock and its private part at once. + * This helps to minimize memory usage a lot. */ + mempool->memblocks_priv_arr[i] = + vzalloc(mempool->items_priv_size * n_items); + if (mempool->memblocks_priv_arr[i] == NULL) { + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + /* allocate DMA-capable memblock */ + mempool->memblocks_arr[i] = + __vxge_hw_blockpool_malloc(mempool->devh, + mempool->memblock_size, dma_object); + if (mempool->memblocks_arr[i] == NULL) { + vfree(mempool->memblocks_priv_arr[i]); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + (*num_allocated)++; + mempool->memblocks_allocated++; + + memset(mempool->memblocks_arr[i], 0, mempool->memblock_size); + + the_memblock = mempool->memblocks_arr[i]; + + /* fill the items hash array */ + for (j = 0; j < n_items; j++) { + u32 index = i * n_items + j; + + if (first_time && index >= mempool->items_initial) + break; + + mempool->items_arr[index] = + ((char *)the_memblock + j*mempool->item_size); + + /* let caller to do more job on each item */ + if (mempool->item_func_alloc != NULL) + mempool->item_func_alloc(mempool, i, + dma_object, index, is_last); + + mempool->items_current = index + 1; + } + + if (first_time && mempool->items_current == + mempool->items_initial) + break; + } +exit: + return status; +} + +/* + * vxge_hw_mempool_create + * This function will create memory pool object. Pool may grow but will + * never shrink. Pool consists of number of dynamically allocated blocks + * with size enough to hold %items_initial number of items. Memory is + * DMA-able but client must map/unmap before interoperating with the device. + */ +static struct vxge_hw_mempool * +__vxge_hw_mempool_create(struct __vxge_hw_device *devh, + u32 memblock_size, + u32 item_size, + u32 items_priv_size, + u32 items_initial, + u32 items_max, + const struct vxge_hw_mempool_cbs *mp_callback, + void *userdata) +{ + enum vxge_hw_status status = VXGE_HW_OK; + u32 memblocks_to_allocate; + struct vxge_hw_mempool *mempool = NULL; + u32 allocated; + + if (memblock_size < item_size) { + status = VXGE_HW_FAIL; + goto exit; + } + + mempool = vzalloc(sizeof(struct vxge_hw_mempool)); + if (mempool == NULL) { + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + mempool->devh = devh; + mempool->memblock_size = memblock_size; + mempool->items_max = items_max; + mempool->items_initial = items_initial; + mempool->item_size = item_size; + mempool->items_priv_size = items_priv_size; + mempool->item_func_alloc = mp_callback->item_func_alloc; + mempool->userdata = userdata; + + mempool->memblocks_allocated = 0; + + mempool->items_per_memblock = memblock_size / item_size; + + mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) / + mempool->items_per_memblock; + + /* allocate array of memblocks */ + mempool->memblocks_arr = + vzalloc(sizeof(void *) * mempool->memblocks_max); + if (mempool->memblocks_arr == NULL) { + __vxge_hw_mempool_destroy(mempool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + mempool = NULL; + goto exit; + } + + /* allocate array of private parts of items per memblocks */ + mempool->memblocks_priv_arr = + vzalloc(sizeof(void *) * mempool->memblocks_max); + if (mempool->memblocks_priv_arr == NULL) { + __vxge_hw_mempool_destroy(mempool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + mempool = NULL; + goto exit; + } + + /* allocate array of memblocks DMA objects */ + mempool->memblocks_dma_arr = + vzalloc(sizeof(struct vxge_hw_mempool_dma) * + mempool->memblocks_max); + if (mempool->memblocks_dma_arr == NULL) { + __vxge_hw_mempool_destroy(mempool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + mempool = NULL; + goto exit; + } + + /* allocate hash array of items */ + mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max); + if (mempool->items_arr == NULL) { + __vxge_hw_mempool_destroy(mempool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + mempool = NULL; + goto exit; + } + + /* calculate initial number of memblocks */ + memblocks_to_allocate = (mempool->items_initial + + mempool->items_per_memblock - 1) / + mempool->items_per_memblock; + + /* pre-allocate the mempool */ + status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate, + &allocated); + if (status != VXGE_HW_OK) { + __vxge_hw_mempool_destroy(mempool); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + mempool = NULL; + goto exit; + } + +exit: + return mempool; +} + +/* + * __vxge_hw_ring_abort - Returns the RxD + * This function terminates the RxDs of ring + */ +static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring) +{ + void *rxdh; + struct __vxge_hw_channel *channel; + + channel = &ring->channel; + + for (;;) { + vxge_hw_channel_dtr_try_complete(channel, &rxdh); + + if (rxdh == NULL) + break; + + vxge_hw_channel_dtr_complete(channel); + + if (ring->rxd_term) + ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED, + channel->userdata); + + vxge_hw_channel_dtr_free(channel, rxdh); + } + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_ring_reset - Resets the ring + * This function resets the ring during vpath reset operation + */ +static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_channel *channel; + + channel = &ring->channel; + + __vxge_hw_ring_abort(ring); + + status = __vxge_hw_channel_reset(channel); + + if (status != VXGE_HW_OK) + goto exit; + + if (ring->rxd_init) { + status = vxge_hw_ring_replenish(ring); + if (status != VXGE_HW_OK) + goto exit; + } +exit: + return status; +} + +/* + * __vxge_hw_ring_delete - Removes the ring + * This function freeup the memory pool and removes the ring + */ +static enum vxge_hw_status +__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp) +{ + struct __vxge_hw_ring *ring = vp->vpath->ringh; + + __vxge_hw_ring_abort(ring); + + if (ring->mempool) + __vxge_hw_mempool_destroy(ring->mempool); + + vp->vpath->ringh = NULL; + __vxge_hw_channel_free(&ring->channel); + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_ring_create - Create a Ring + * This function creates Ring and initializes it. + */ +static enum vxge_hw_status +__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, + struct vxge_hw_ring_attr *attr) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_ring *ring; + u32 ring_length; + struct vxge_hw_ring_config *config; + struct __vxge_hw_device *hldev; + u32 vp_id; + static const struct vxge_hw_mempool_cbs ring_mp_callback = { + .item_func_alloc = __vxge_hw_ring_mempool_item_alloc, + }; + + if ((vp == NULL) || (attr == NULL)) { + status = VXGE_HW_FAIL; + goto exit; + } + + hldev = vp->vpath->hldev; + vp_id = vp->vpath->vp_id; + + config = &hldev->config.vp_config[vp_id].ring; + + ring_length = config->ring_blocks * + vxge_hw_ring_rxds_per_block_get(config->buffer_mode); + + ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp, + VXGE_HW_CHANNEL_TYPE_RING, + ring_length, + attr->per_rxd_space, + attr->userdata); + if (ring == NULL) { + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + vp->vpath->ringh = ring; + ring->vp_id = vp_id; + ring->vp_reg = vp->vpath->vp_reg; + ring->common_reg = hldev->common_reg; + ring->stats = &vp->vpath->sw_stats->ring_stats; + ring->config = config; + ring->callback = attr->callback; + ring->rxd_init = attr->rxd_init; + ring->rxd_term = attr->rxd_term; + ring->buffer_mode = config->buffer_mode; + ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved; + ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved; + ring->rxds_limit = config->rxds_limit; + + ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); + ring->rxd_priv_size = + sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space; + ring->per_rxd_space = attr->per_rxd_space; + + ring->rxd_priv_size = + ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) / + VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; + + /* how many RxDs can fit into one block. Depends on configured + * buffer_mode. */ + ring->rxds_per_block = + vxge_hw_ring_rxds_per_block_get(config->buffer_mode); + + /* calculate actual RxD block private size */ + ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block; + ring->mempool = __vxge_hw_mempool_create(hldev, + VXGE_HW_BLOCK_SIZE, + VXGE_HW_BLOCK_SIZE, + ring->rxdblock_priv_size, + ring->config->ring_blocks, + ring->config->ring_blocks, + &ring_mp_callback, + ring); + if (ring->mempool == NULL) { + __vxge_hw_ring_delete(vp); + return VXGE_HW_ERR_OUT_OF_MEMORY; + } + + status = __vxge_hw_channel_initialize(&ring->channel); + if (status != VXGE_HW_OK) { + __vxge_hw_ring_delete(vp); + goto exit; + } + + /* Note: + * Specifying rxd_init callback means two things: + * 1) rxds need to be initialized by driver at channel-open time; + * 2) rxds need to be posted at channel-open time + * (that's what the initial_replenish() below does) + * Currently we don't have a case when the 1) is done without the 2). + */ + if (ring->rxd_init) { + status = vxge_hw_ring_replenish(ring); + if (status != VXGE_HW_OK) { + __vxge_hw_ring_delete(vp); + goto exit; + } + } + + /* initial replenish will increment the counter in its post() routine, + * we have to reset it */ + ring->stats->common_stats.usage_cnt = 0; +exit: + return status; +} + +/* + * vxge_hw_device_config_default_get - Initialize device config with defaults. + * Initialize Titan device config with default values. + */ +enum vxge_hw_status +vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config) +{ + u32 i; + + device_config->dma_blockpool_initial = + VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE; + device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE; + device_config->intr_mode = VXGE_HW_INTR_MODE_DEF; + device_config->rth_en = VXGE_HW_RTH_DEFAULT; + device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT; + device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS; + device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT; + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + device_config->vp_config[i].vp_id = i; + + device_config->vp_config[i].min_bandwidth = + VXGE_HW_VPATH_BANDWIDTH_DEFAULT; + + device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT; + + device_config->vp_config[i].ring.ring_blocks = + VXGE_HW_DEF_RING_BLOCKS; + + device_config->vp_config[i].ring.buffer_mode = + VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT; + + device_config->vp_config[i].ring.scatter_mode = + VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT; + + device_config->vp_config[i].ring.rxds_limit = + VXGE_HW_DEF_RING_RXDS_LIMIT; + + device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE; + + device_config->vp_config[i].fifo.fifo_blocks = + VXGE_HW_MIN_FIFO_BLOCKS; + + device_config->vp_config[i].fifo.max_frags = + VXGE_HW_MAX_FIFO_FRAGS; + + device_config->vp_config[i].fifo.memblock_size = + VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE; + + device_config->vp_config[i].fifo.alignment_size = + VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE; + + device_config->vp_config[i].fifo.intr = + VXGE_HW_FIFO_QUEUE_INTR_DEFAULT; + + device_config->vp_config[i].fifo.no_snoop_bits = + VXGE_HW_FIFO_NO_SNOOP_DEFAULT; + device_config->vp_config[i].tti.intr_enable = + VXGE_HW_TIM_INTR_DEFAULT; + + device_config->vp_config[i].tti.btimer_val = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.timer_ac_en = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.timer_ci_en = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.timer_ri_en = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.rtimer_val = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.util_sel = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.ltimer_val = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.urange_a = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.uec_a = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.urange_b = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.uec_b = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.urange_c = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.uec_c = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].tti.uec_d = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.intr_enable = + VXGE_HW_TIM_INTR_DEFAULT; + + device_config->vp_config[i].rti.btimer_val = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.timer_ac_en = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.timer_ci_en = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.timer_ri_en = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.rtimer_val = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.util_sel = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.ltimer_val = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.urange_a = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.uec_a = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.urange_b = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.uec_b = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.urange_c = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.uec_c = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].rti.uec_d = + VXGE_HW_USE_FLASH_DEFAULT; + + device_config->vp_config[i].mtu = + VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU; + + device_config->vp_config[i].rpa_strip_vlan_tag = + VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT; + } + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath. + * Set the swapper bits appropriately for the vpath. + */ +static enum vxge_hw_status +__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg) +{ +#ifndef __BIG_ENDIAN + u64 val64; + + val64 = readq(&vpath_reg->vpath_general_cfg1); + wmb(); + val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN; + writeq(val64, &vpath_reg->vpath_general_cfg1); + wmb(); +#endif + return VXGE_HW_OK; +} + +/* + * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc. + * Set the swapper bits appropriately for the vpath. + */ +static enum vxge_hw_status +__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg, + struct vxge_hw_vpath_reg __iomem *vpath_reg) +{ + u64 val64; + + val64 = readq(&legacy_reg->pifm_wr_swap_en); + + if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) { + val64 = readq(&vpath_reg->kdfcctl_cfg0); + wmb(); + + val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 | + VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 | + VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2; + + writeq(val64, &vpath_reg->kdfcctl_cfg0); + wmb(); + } + + return VXGE_HW_OK; +} + +/* + * vxge_hw_mgmt_reg_read - Read Titan register. + */ +enum vxge_hw_status +vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev, + enum vxge_hw_mgmt_reg_type type, + u32 index, u32 offset, u64 *value) +{ + enum vxge_hw_status status = VXGE_HW_OK; + + if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { + status = VXGE_HW_ERR_INVALID_DEVICE; + goto exit; + } + + switch (type) { + case vxge_hw_mgmt_reg_type_legacy: + if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + *value = readq((void __iomem *)hldev->legacy_reg + offset); + break; + case vxge_hw_mgmt_reg_type_toc: + if (offset > sizeof(struct vxge_hw_toc_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + *value = readq((void __iomem *)hldev->toc_reg + offset); + break; + case vxge_hw_mgmt_reg_type_common: + if (offset > sizeof(struct vxge_hw_common_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + *value = readq((void __iomem *)hldev->common_reg + offset); + break; + case vxge_hw_mgmt_reg_type_mrpcim: + if (!(hldev->access_rights & + VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { + status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; + break; + } + if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + *value = readq((void __iomem *)hldev->mrpcim_reg + offset); + break; + case vxge_hw_mgmt_reg_type_srpcim: + if (!(hldev->access_rights & + VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) { + status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; + break; + } + if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) { + status = VXGE_HW_ERR_INVALID_INDEX; + break; + } + if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + *value = readq((void __iomem *)hldev->srpcim_reg[index] + + offset); + break; + case vxge_hw_mgmt_reg_type_vpmgmt: + if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) || + (!(hldev->vpath_assignments & vxge_mBIT(index)))) { + status = VXGE_HW_ERR_INVALID_INDEX; + break; + } + if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + *value = readq((void __iomem *)hldev->vpmgmt_reg[index] + + offset); + break; + case vxge_hw_mgmt_reg_type_vpath: + if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) || + (!(hldev->vpath_assignments & vxge_mBIT(index)))) { + status = VXGE_HW_ERR_INVALID_INDEX; + break; + } + if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) { + status = VXGE_HW_ERR_INVALID_INDEX; + break; + } + if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + *value = readq((void __iomem *)hldev->vpath_reg[index] + + offset); + break; + default: + status = VXGE_HW_ERR_INVALID_TYPE; + break; + } + +exit: + return status; +} + +/* + * vxge_hw_vpath_strip_fcs_check - Check for FCS strip. + */ +enum vxge_hw_status +vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask) +{ + struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; + int i = 0, j = 0; + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + if (!((vpath_mask) & vxge_mBIT(i))) + continue; + vpmgmt_reg = hldev->vpmgmt_reg[i]; + for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) { + if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j]) + & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS) + return VXGE_HW_FAIL; + } + } + return VXGE_HW_OK; +} +/* + * vxge_hw_mgmt_reg_Write - Write Titan register. + */ +enum vxge_hw_status +vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev, + enum vxge_hw_mgmt_reg_type type, + u32 index, u32 offset, u64 value) +{ + enum vxge_hw_status status = VXGE_HW_OK; + + if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) { + status = VXGE_HW_ERR_INVALID_DEVICE; + goto exit; + } + + switch (type) { + case vxge_hw_mgmt_reg_type_legacy: + if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + writeq(value, (void __iomem *)hldev->legacy_reg + offset); + break; + case vxge_hw_mgmt_reg_type_toc: + if (offset > sizeof(struct vxge_hw_toc_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + writeq(value, (void __iomem *)hldev->toc_reg + offset); + break; + case vxge_hw_mgmt_reg_type_common: + if (offset > sizeof(struct vxge_hw_common_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + writeq(value, (void __iomem *)hldev->common_reg + offset); + break; + case vxge_hw_mgmt_reg_type_mrpcim: + if (!(hldev->access_rights & + VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { + status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; + break; + } + if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + writeq(value, (void __iomem *)hldev->mrpcim_reg + offset); + break; + case vxge_hw_mgmt_reg_type_srpcim: + if (!(hldev->access_rights & + VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) { + status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; + break; + } + if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) { + status = VXGE_HW_ERR_INVALID_INDEX; + break; + } + if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + writeq(value, (void __iomem *)hldev->srpcim_reg[index] + + offset); + + break; + case vxge_hw_mgmt_reg_type_vpmgmt: + if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) || + (!(hldev->vpath_assignments & vxge_mBIT(index)))) { + status = VXGE_HW_ERR_INVALID_INDEX; + break; + } + if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] + + offset); + break; + case vxge_hw_mgmt_reg_type_vpath: + if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) || + (!(hldev->vpath_assignments & vxge_mBIT(index)))) { + status = VXGE_HW_ERR_INVALID_INDEX; + break; + } + if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) { + status = VXGE_HW_ERR_INVALID_OFFSET; + break; + } + writeq(value, (void __iomem *)hldev->vpath_reg[index] + + offset); + break; + default: + status = VXGE_HW_ERR_INVALID_TYPE; + break; + } +exit: + return status; +} + +/* + * __vxge_hw_fifo_abort - Returns the TxD + * This function terminates the TxDs of fifo + */ +static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo) +{ + void *txdlh; + + for (;;) { + vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh); + + if (txdlh == NULL) + break; + + vxge_hw_channel_dtr_complete(&fifo->channel); + + if (fifo->txdl_term) { + fifo->txdl_term(txdlh, + VXGE_HW_TXDL_STATE_POSTED, + fifo->channel.userdata); + } + + vxge_hw_channel_dtr_free(&fifo->channel, txdlh); + } + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_fifo_reset - Resets the fifo + * This function resets the fifo during vpath reset operation + */ +static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo) +{ + enum vxge_hw_status status = VXGE_HW_OK; + + __vxge_hw_fifo_abort(fifo); + status = __vxge_hw_channel_reset(&fifo->channel); + + return status; +} + +/* + * __vxge_hw_fifo_delete - Removes the FIFO + * This function freeup the memory pool and removes the FIFO + */ +static enum vxge_hw_status +__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp) +{ + struct __vxge_hw_fifo *fifo = vp->vpath->fifoh; + + __vxge_hw_fifo_abort(fifo); + + if (fifo->mempool) + __vxge_hw_mempool_destroy(fifo->mempool); + + vp->vpath->fifoh = NULL; + + __vxge_hw_channel_free(&fifo->channel); + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD + * list callback + * This function is callback passed to __vxge_hw_mempool_create to create memory + * pool for TxD list + */ +static void +__vxge_hw_fifo_mempool_item_alloc( + struct vxge_hw_mempool *mempoolh, + u32 memblock_index, struct vxge_hw_mempool_dma *dma_object, + u32 index, u32 is_last) +{ + u32 memblock_item_idx; + struct __vxge_hw_fifo_txdl_priv *txdl_priv; + struct vxge_hw_fifo_txd *txdp = + (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index]; + struct __vxge_hw_fifo *fifo = + (struct __vxge_hw_fifo *)mempoolh->userdata; + void *memblock = mempoolh->memblocks_arr[memblock_index]; + + vxge_assert(txdp); + + txdp->host_control = (u64) (size_t) + __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp, + &memblock_item_idx); + + txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp); + + vxge_assert(txdl_priv); + + fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp; + + /* pre-format HW's TxDL's private */ + txdl_priv->dma_offset = (char *)txdp - (char *)memblock; + txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset; + txdl_priv->dma_handle = dma_object->handle; + txdl_priv->memblock = memblock; + txdl_priv->first_txdp = txdp; + txdl_priv->next_txdl_priv = NULL; + txdl_priv->alloc_frags = 0; +} + +/* + * __vxge_hw_fifo_create - Create a FIFO + * This function creates FIFO and initializes it. + */ +static enum vxge_hw_status +__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, + struct vxge_hw_fifo_attr *attr) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_fifo *fifo; + struct vxge_hw_fifo_config *config; + u32 txdl_size, txdl_per_memblock; + struct vxge_hw_mempool_cbs fifo_mp_callback; + struct __vxge_hw_virtualpath *vpath; + + if ((vp == NULL) || (attr == NULL)) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + vpath = vp->vpath; + config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo; + + txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd); + + txdl_per_memblock = config->memblock_size / txdl_size; + + fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp, + VXGE_HW_CHANNEL_TYPE_FIFO, + config->fifo_blocks * txdl_per_memblock, + attr->per_txdl_space, attr->userdata); + + if (fifo == NULL) { + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + vpath->fifoh = fifo; + fifo->nofl_db = vpath->nofl_db; + + fifo->vp_id = vpath->vp_id; + fifo->vp_reg = vpath->vp_reg; + fifo->stats = &vpath->sw_stats->fifo_stats; + + fifo->config = config; + + /* apply "interrupts per txdl" attribute */ + fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ; + fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved; + fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved; + + if (fifo->config->intr) + fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST; + + fifo->no_snoop_bits = config->no_snoop_bits; + + /* + * FIFO memory management strategy: + * + * TxDL split into three independent parts: + * - set of TxD's + * - TxD HW private part + * - driver private part + * + * Adaptative memory allocation used. i.e. Memory allocated on + * demand with the size which will fit into one memory block. + * One memory block may contain more than one TxDL. + * + * During "reserve" operations more memory can be allocated on demand + * for example due to FIFO full condition. + * + * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close + * routine which will essentially stop the channel and free resources. + */ + + /* TxDL common private size == TxDL private + driver private */ + fifo->priv_size = + sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space; + fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) / + VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; + + fifo->per_txdl_space = attr->per_txdl_space; + + /* recompute txdl size to be cacheline aligned */ + fifo->txdl_size = txdl_size; + fifo->txdl_per_memblock = txdl_per_memblock; + + fifo->txdl_term = attr->txdl_term; + fifo->callback = attr->callback; + + if (fifo->txdl_per_memblock == 0) { + __vxge_hw_fifo_delete(vp); + status = VXGE_HW_ERR_INVALID_BLOCK_SIZE; + goto exit; + } + + fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc; + + fifo->mempool = + __vxge_hw_mempool_create(vpath->hldev, + fifo->config->memblock_size, + fifo->txdl_size, + fifo->priv_size, + (fifo->config->fifo_blocks * fifo->txdl_per_memblock), + (fifo->config->fifo_blocks * fifo->txdl_per_memblock), + &fifo_mp_callback, + fifo); + + if (fifo->mempool == NULL) { + __vxge_hw_fifo_delete(vp); + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto exit; + } + + status = __vxge_hw_channel_initialize(&fifo->channel); + if (status != VXGE_HW_OK) { + __vxge_hw_fifo_delete(vp); + goto exit; + } + + vxge_assert(fifo->channel.reserve_ptr); +exit: + return status; +} + +/* + * __vxge_hw_vpath_pci_read - Read the content of given address + * in pci config space. + * Read from the vpath pci config space. + */ +static enum vxge_hw_status +__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath, + u32 phy_func_0, u32 offset, u32 *val) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; + + val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset); + + if (phy_func_0) + val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0; + + writeq(val64, &vp_reg->pci_config_access_cfg1); + wmb(); + writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ, + &vp_reg->pci_config_access_cfg2); + wmb(); + + status = __vxge_hw_device_register_poll( + &vp_reg->pci_config_access_cfg2, + VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS); + + if (status != VXGE_HW_OK) + goto exit; + + val64 = readq(&vp_reg->pci_config_access_status); + + if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) { + status = VXGE_HW_FAIL; + *val = 0; + } else + *val = (u32)vxge_bVALn(val64, 32, 32); +exit: + return status; +} + +/** + * vxge_hw_device_flick_link_led - Flick (blink) link LED. + * @hldev: HW device. + * @on_off: TRUE if flickering to be on, FALSE to be off + * + * Flicker the link LED. + */ +enum vxge_hw_status +vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off) +{ + struct __vxge_hw_virtualpath *vpath; + u64 data0, data1 = 0, steer_ctrl = 0; + enum vxge_hw_status status; + + if (hldev == NULL) { + status = VXGE_HW_ERR_INVALID_DEVICE; + goto exit; + } + + vpath = &hldev->virtual_paths[hldev->first_vp_id]; + + data0 = on_off; + status = vxge_hw_vpath_fw_api(vpath, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, + 0, &data0, &data1, &steer_ctrl); +exit: + return status; +} + +/* + * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables + */ +enum vxge_hw_status +__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp, + u32 action, u32 rts_table, u32 offset, + u64 *data0, u64 *data1) +{ + enum vxge_hw_status status; + u64 steer_ctrl = 0; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + if ((rts_table == + VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) || + (rts_table == + VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) || + (rts_table == + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) || + (rts_table == + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) { + steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL; + } + + status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset, + data0, data1, &steer_ctrl); + if (status != VXGE_HW_OK) + goto exit; + + if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) && + (rts_table != + VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) + *data1 = 0; +exit: + return status; +} + +/* + * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables + */ +enum vxge_hw_status +__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action, + u32 rts_table, u32 offset, u64 steer_data0, + u64 steer_data1) +{ + u64 data0, data1 = 0, steer_ctrl = 0; + enum vxge_hw_status status; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + data0 = steer_data0; + + if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || + (rts_table == + VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) + data1 = steer_data1; + + status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset, + &data0, &data1, &steer_ctrl); +exit: + return status; +} + +/* + * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing. + */ +enum vxge_hw_status vxge_hw_vpath_rts_rth_set( + struct __vxge_hw_vpath_handle *vp, + enum vxge_hw_rth_algoritms algorithm, + struct vxge_hw_rth_hash_types *hash_type, + u16 bucket_size) +{ + u64 data0, data1; + enum vxge_hw_status status = VXGE_HW_OK; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + status = __vxge_hw_vpath_rts_table_get(vp, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, + 0, &data0, &data1); + if (status != VXGE_HW_OK) + goto exit; + + data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) | + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3)); + + data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN | + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) | + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm); + + if (hash_type->hash_type_tcpipv4_en) + data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN; + + if (hash_type->hash_type_ipv4_en) + data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN; + + if (hash_type->hash_type_tcpipv6_en) + data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN; + + if (hash_type->hash_type_ipv6_en) + data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN; + + if (hash_type->hash_type_tcpipv6ex_en) + data0 |= + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN; + + if (hash_type->hash_type_ipv6ex_en) + data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN; + + if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0)) + data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE; + else + data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE; + + status = __vxge_hw_vpath_rts_table_set(vp, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, + 0, data0, 0); +exit: + return status; +} + +static void +vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1, + u16 flag, u8 *itable) +{ + switch (flag) { + case 1: + *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)| + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN | + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA( + itable[j]); + case 2: + *data0 |= + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)| + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN | + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA( + itable[j]); + case 3: + *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)| + VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN | + VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA( + itable[j]); + case 4: + *data1 |= + VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)| + VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN | + VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA( + itable[j]); + default: + return; + } +} +/* + * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT). + */ +enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set( + struct __vxge_hw_vpath_handle **vpath_handles, + u32 vpath_count, + u8 *mtable, + u8 *itable, + u32 itable_size) +{ + u32 i, j, action, rts_table; + u64 data0; + u64 data1; + u32 max_entries; + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_vpath_handle *vp = vpath_handles[0]; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + max_entries = (((u32)1) << itable_size); + + if (vp->vpath->hldev->config.rth_it_type + == VXGE_HW_RTH_IT_TYPE_SOLO_IT) { + action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY; + rts_table = + VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT; + + for (j = 0; j < max_entries; j++) { + + data1 = 0; + + data0 = + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA( + itable[j]); + + status = __vxge_hw_vpath_rts_table_set(vpath_handles[0], + action, rts_table, j, data0, data1); + + if (status != VXGE_HW_OK) + goto exit; + } + + for (j = 0; j < max_entries; j++) { + + data1 = 0; + + data0 = + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN | + VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA( + itable[j]); + + status = __vxge_hw_vpath_rts_table_set( + vpath_handles[mtable[itable[j]]], action, + rts_table, j, data0, data1); + + if (status != VXGE_HW_OK) + goto exit; + } + } else { + action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY; + rts_table = + VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT; + for (i = 0; i < vpath_count; i++) { + + for (j = 0; j < max_entries;) { + + data0 = 0; + data1 = 0; + + while (j < max_entries) { + if (mtable[itable[j]] != i) { + j++; + continue; + } + vxge_hw_rts_rth_data0_data1_get(j, + &data0, &data1, 1, itable); + j++; + break; + } + + while (j < max_entries) { + if (mtable[itable[j]] != i) { + j++; + continue; + } + vxge_hw_rts_rth_data0_data1_get(j, + &data0, &data1, 2, itable); + j++; + break; + } + + while (j < max_entries) { + if (mtable[itable[j]] != i) { + j++; + continue; + } + vxge_hw_rts_rth_data0_data1_get(j, + &data0, &data1, 3, itable); + j++; + break; + } + + while (j < max_entries) { + if (mtable[itable[j]] != i) { + j++; + continue; + } + vxge_hw_rts_rth_data0_data1_get(j, + &data0, &data1, 4, itable); + j++; + break; + } + + if (data0 != 0) { + status = __vxge_hw_vpath_rts_table_set( + vpath_handles[i], + action, rts_table, + 0, data0, data1); + + if (status != VXGE_HW_OK) + goto exit; + } + } + } + } +exit: + return status; +} + +/** + * vxge_hw_vpath_check_leak - Check for memory leak + * @ringh: Handle to the ring object used for receive + * + * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to + * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred. + * Returns: VXGE_HW_FAIL, if leak has occurred. + * + */ +enum vxge_hw_status +vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring) +{ + enum vxge_hw_status status = VXGE_HW_OK; + u64 rxd_new_count, rxd_spat; + + if (ring == NULL) + return status; + + rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell); + rxd_spat = readq(&ring->vp_reg->prc_cfg6); + rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat); + + if (rxd_new_count >= rxd_spat) + status = VXGE_HW_FAIL; + + return status; +} + +/* + * __vxge_hw_vpath_mgmt_read + * This routine reads the vpath_mgmt registers + */ +static enum vxge_hw_status +__vxge_hw_vpath_mgmt_read( + struct __vxge_hw_device *hldev, + struct __vxge_hw_virtualpath *vpath) +{ + u32 i, mtu = 0, max_pyld = 0; + u64 val64; + + for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) { + + val64 = readq(&vpath->vpmgmt_reg-> + rxmac_cfg0_port_vpmgmt_clone[i]); + max_pyld = + (u32) + VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN + (val64); + if (mtu < max_pyld) + mtu = max_pyld; + } + + vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE; + + val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp); + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + if (val64 & vxge_mBIT(i)) + vpath->vsport_number = i; + } + + val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone); + + if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK) + VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP); + else + VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN); + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed + * This routine checks the vpath_rst_in_prog register to see if + * adapter completed the reset process for the vpath + */ +static enum vxge_hw_status +__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath) +{ + enum vxge_hw_status status; + + status = __vxge_hw_device_register_poll( + &vpath->hldev->common_reg->vpath_rst_in_prog, + VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG( + 1 << (16 - vpath->vp_id)), + vpath->hldev->config.device_poll_millis); + + return status; +} + +/* + * __vxge_hw_vpath_reset + * This routine resets the vpath on the device + */ +static enum vxge_hw_status +__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id) +{ + u64 val64; + + val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id)); + + __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), + &hldev->common_reg->cmn_rsthdlr_cfg0); + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_vpath_sw_reset + * This routine resets the vpath structures + */ +static enum vxge_hw_status +__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_virtualpath *vpath; + + vpath = &hldev->virtual_paths[vp_id]; + + if (vpath->ringh) { + status = __vxge_hw_ring_reset(vpath->ringh); + if (status != VXGE_HW_OK) + goto exit; + } + + if (vpath->fifoh) + status = __vxge_hw_fifo_reset(vpath->fifoh); +exit: + return status; +} + +/* + * __vxge_hw_vpath_prc_configure + * This routine configures the prc registers of virtual path using the config + * passed + */ +static void +__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id) +{ + u64 val64; + struct __vxge_hw_virtualpath *vpath; + struct vxge_hw_vp_config *vp_config; + struct vxge_hw_vpath_reg __iomem *vp_reg; + + vpath = &hldev->virtual_paths[vp_id]; + vp_reg = vpath->vp_reg; + vp_config = vpath->vp_config; + + if (vp_config->ring.enable == VXGE_HW_RING_DISABLE) + return; + + val64 = readq(&vp_reg->prc_cfg1); + val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE; + writeq(val64, &vp_reg->prc_cfg1); + + val64 = readq(&vpath->vp_reg->prc_cfg6); + val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN; + writeq(val64, &vpath->vp_reg->prc_cfg6); + + val64 = readq(&vp_reg->prc_cfg7); + + if (vpath->vp_config->ring.scatter_mode != + VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) { + + val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3); + + switch (vpath->vp_config->ring.scatter_mode) { + case VXGE_HW_RING_SCATTER_MODE_A: + val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE( + VXGE_HW_PRC_CFG7_SCATTER_MODE_A); + break; + case VXGE_HW_RING_SCATTER_MODE_B: + val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE( + VXGE_HW_PRC_CFG7_SCATTER_MODE_B); + break; + case VXGE_HW_RING_SCATTER_MODE_C: + val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE( + VXGE_HW_PRC_CFG7_SCATTER_MODE_C); + break; + } + } + + writeq(val64, &vp_reg->prc_cfg7); + + writeq(VXGE_HW_PRC_CFG5_RXD0_ADD( + __vxge_hw_ring_first_block_address_get( + vpath->ringh) >> 3), &vp_reg->prc_cfg5); + + val64 = readq(&vp_reg->prc_cfg4); + val64 |= VXGE_HW_PRC_CFG4_IN_SVC; + val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3); + + val64 |= VXGE_HW_PRC_CFG4_RING_MODE( + VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER); + + if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE) + val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE; + else + val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE; + + writeq(val64, &vp_reg->prc_cfg4); +} + +/* + * __vxge_hw_vpath_kdfc_configure + * This routine configures the kdfc registers of virtual path using the + * config passed + */ +static enum vxge_hw_status +__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id) +{ + u64 val64; + u64 vpath_stride; + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_virtualpath *vpath; + struct vxge_hw_vpath_reg __iomem *vp_reg; + + vpath = &hldev->virtual_paths[vp_id]; + vp_reg = vpath->vp_reg; + status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg); + + if (status != VXGE_HW_OK) + goto exit; + + val64 = readq(&vp_reg->kdfc_drbl_triplet_total); + + vpath->max_kdfc_db = + (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE( + val64+1)/2; + + if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { + + vpath->max_nofl_db = vpath->max_kdfc_db; + + if (vpath->max_nofl_db < + ((vpath->vp_config->fifo.memblock_size / + (vpath->vp_config->fifo.max_frags * + sizeof(struct vxge_hw_fifo_txd))) * + vpath->vp_config->fifo.fifo_blocks)) { + + return VXGE_HW_BADCFG_FIFO_BLOCKS; + } + val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0( + (vpath->max_nofl_db*2)-1); + } + + writeq(val64, &vp_reg->kdfc_fifo_trpl_partition); + + writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE, + &vp_reg->kdfc_fifo_trpl_ctrl); + + val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl); + + val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) | + VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF)); + + val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE( + VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) | +#ifndef __BIG_ENDIAN + VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN | +#endif + VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0); + + writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl); + writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address); + wmb(); + vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride); + + vpath->nofl_db = + (struct __vxge_hw_non_offload_db_wrapper __iomem *) + (hldev->kdfc + (vp_id * + VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE( + vpath_stride))); +exit: + return status; +} + +/* + * __vxge_hw_vpath_mac_configure + * This routine configures the mac of virtual path using the config passed + */ +static enum vxge_hw_status +__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id) +{ + u64 val64; + struct __vxge_hw_virtualpath *vpath; + struct vxge_hw_vp_config *vp_config; + struct vxge_hw_vpath_reg __iomem *vp_reg; + + vpath = &hldev->virtual_paths[vp_id]; + vp_reg = vpath->vp_reg; + vp_config = vpath->vp_config; + + writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER( + vpath->vsport_number), &vp_reg->xmac_vsport_choice); + + if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) { + + val64 = readq(&vp_reg->xmac_rpa_vcfg); + + if (vp_config->rpa_strip_vlan_tag != + VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) { + if (vp_config->rpa_strip_vlan_tag) + val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG; + else + val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG; + } + + writeq(val64, &vp_reg->xmac_rpa_vcfg); + val64 = readq(&vp_reg->rxmac_vcfg0); + + if (vp_config->mtu != + VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) { + val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); + if ((vp_config->mtu + + VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu) + val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN( + vp_config->mtu + + VXGE_HW_MAC_HEADER_MAX_SIZE); + else + val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN( + vpath->max_mtu); + } + + writeq(val64, &vp_reg->rxmac_vcfg0); + + val64 = readq(&vp_reg->rxmac_vcfg1); + + val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) | + VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE); + + if (hldev->config.rth_it_type == + VXGE_HW_RTH_IT_TYPE_MULTI_IT) { + val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE( + 0x2) | + VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE; + } + + writeq(val64, &vp_reg->rxmac_vcfg1); + } + return VXGE_HW_OK; +} + +/* + * __vxge_hw_vpath_tim_configure + * This routine configures the tim registers of virtual path using the config + * passed + */ +static enum vxge_hw_status +__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) +{ + u64 val64; + struct __vxge_hw_virtualpath *vpath; + struct vxge_hw_vpath_reg __iomem *vp_reg; + struct vxge_hw_vp_config *config; + + vpath = &hldev->virtual_paths[vp_id]; + vp_reg = vpath->vp_reg; + config = vpath->vp_config; + + writeq(0, &vp_reg->tim_dest_addr); + writeq(0, &vp_reg->tim_vpath_map); + writeq(0, &vp_reg->tim_bitmap); + writeq(0, &vp_reg->tim_remap); + + if (config->ring.enable == VXGE_HW_RING_ENABLE) + writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM( + (vp_id * VXGE_HW_MAX_INTR_PER_VP) + + VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn); + + val64 = readq(&vp_reg->tim_pci_cfg); + val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD; + writeq(val64, &vp_reg->tim_pci_cfg); + + if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) { + + val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); + + if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( + 0x3ffffff); + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( + config->tti.btimer_val); + } + + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN; + + if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) { + if (config->tti.timer_ac_en) + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; + else + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; + } + + if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) { + if (config->tti.timer_ci_en) + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; + else + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; + } + + if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f); + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A( + config->tti.urange_a); + } + + if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f); + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B( + config->tti.urange_b); + } + + if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f); + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C( + config->tti.urange_c); + } + + writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); + vpath->tim_tti_cfg1_saved = val64; + + val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); + + if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff); + val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A( + config->tti.uec_a); + } + + if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff); + val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B( + config->tti.uec_b); + } + + if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff); + val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C( + config->tti.uec_c); + } + + if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff); + val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D( + config->tti.uec_d); + } + + writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); + val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); + + if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) { + if (config->tti.timer_ri_en) + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; + else + val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; + } + + if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( + 0x3ffffff); + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( + config->tti.rtimer_val); + } + + if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id); + } + + if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( + 0x3ffffff); + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( + config->tti.ltimer_val); + } + + writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); + vpath->tim_tti_cfg3_saved = val64; + } + + if (config->ring.enable == VXGE_HW_RING_ENABLE) { + + val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); + + if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( + 0x3ffffff); + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL( + config->rti.btimer_val); + } + + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN; + + if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) { + if (config->rti.timer_ac_en) + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; + else + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC; + } + + if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) { + if (config->rti.timer_ci_en) + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; + else + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; + } + + if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f); + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A( + config->rti.urange_a); + } + + if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f); + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B( + config->rti.urange_b); + } + + if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f); + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C( + config->rti.urange_c); + } + + writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); + vpath->tim_rti_cfg1_saved = val64; + + val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); + + if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff); + val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A( + config->rti.uec_a); + } + + if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff); + val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B( + config->rti.uec_b); + } + + if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff); + val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C( + config->rti.uec_c); + } + + if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff); + val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D( + config->rti.uec_d); + } + + writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); + val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); + + if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) { + if (config->rti.timer_ri_en) + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; + else + val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI; + } + + if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( + 0x3ffffff); + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL( + config->rti.rtimer_val); + } + + if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id); + } + + if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { + val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( + 0x3ffffff); + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL( + config->rti.ltimer_val); + } + + writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); + vpath->tim_rti_cfg3_saved = val64; + } + + val64 = 0; + writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]); + writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]); + writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]); + writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]); + writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]); + writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]); + + val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150); + val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0); + val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3); + writeq(val64, &vp_reg->tim_wrkld_clc); + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_vpath_initialize + * This routine is the final phase of init which initializes the + * registers of the vpath using the configuration passed. + */ +static enum vxge_hw_status +__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id) +{ + u64 val64; + u32 val32; + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_virtualpath *vpath; + struct vxge_hw_vpath_reg __iomem *vp_reg; + + vpath = &hldev->virtual_paths[vp_id]; + + if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) { + status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE; + goto exit; + } + vp_reg = vpath->vp_reg; + + status = __vxge_hw_vpath_swapper_set(vpath->vp_reg); + if (status != VXGE_HW_OK) + goto exit; + + status = __vxge_hw_vpath_mac_configure(hldev, vp_id); + if (status != VXGE_HW_OK) + goto exit; + + status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id); + if (status != VXGE_HW_OK) + goto exit; + + status = __vxge_hw_vpath_tim_configure(hldev, vp_id); + if (status != VXGE_HW_OK) + goto exit; + + val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl); + + /* Get MRRS value from device control */ + status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32); + if (status == VXGE_HW_OK) { + val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12; + val64 &= + ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7)); + val64 |= + VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32); + + val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE; + } + + val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7)); + val64 |= + VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY( + VXGE_HW_MAX_PAYLOAD_SIZE_512); + + val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN; + writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl); + +exit: + return status; +} + +/* + * __vxge_hw_vp_terminate - Terminate Virtual Path structure + * This routine closes all channels it opened and freeup memory + */ +static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id) +{ + struct __vxge_hw_virtualpath *vpath; + + vpath = &hldev->virtual_paths[vp_id]; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) + goto exit; + + VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0, + vpath->hldev->tim_int_mask1, vpath->vp_id); + hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL; + + /* If the whole struct __vxge_hw_virtualpath is zeroed, nothing will + * work after the interface is brought down. + */ + spin_lock(&vpath->lock); + vpath->vp_open = VXGE_HW_VP_NOT_OPEN; + spin_unlock(&vpath->lock); + + vpath->vpmgmt_reg = NULL; + vpath->nofl_db = NULL; + vpath->max_mtu = 0; + vpath->vsport_number = 0; + vpath->max_kdfc_db = 0; + vpath->max_nofl_db = 0; + vpath->ringh = NULL; + vpath->fifoh = NULL; + memset(&vpath->vpath_handles, 0, sizeof(struct list_head)); + vpath->stats_block = NULL; + vpath->hw_stats = NULL; + vpath->hw_stats_sav = NULL; + vpath->sw_stats = NULL; + +exit: + return; +} + +/* + * __vxge_hw_vp_initialize - Initialize Virtual Path structure + * This routine is the initial phase of init which resets the vpath and + * initializes the software support structures. + */ +static enum vxge_hw_status +__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, + struct vxge_hw_vp_config *config) +{ + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status = VXGE_HW_OK; + + if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) { + status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE; + goto exit; + } + + vpath = &hldev->virtual_paths[vp_id]; + + spin_lock_init(&vpath->lock); + vpath->vp_id = vp_id; + vpath->vp_open = VXGE_HW_VP_OPEN; + vpath->hldev = hldev; + vpath->vp_config = config; + vpath->vp_reg = hldev->vpath_reg[vp_id]; + vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id]; + + __vxge_hw_vpath_reset(hldev, vp_id); + + status = __vxge_hw_vpath_reset_check(vpath); + if (status != VXGE_HW_OK) { + memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); + goto exit; + } + + status = __vxge_hw_vpath_mgmt_read(hldev, vpath); + if (status != VXGE_HW_OK) { + memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); + goto exit; + } + + INIT_LIST_HEAD(&vpath->vpath_handles); + + vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id]; + + VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0, + hldev->tim_int_mask1, vp_id); + + status = __vxge_hw_vpath_initialize(hldev, vp_id); + if (status != VXGE_HW_OK) + __vxge_hw_vp_terminate(hldev, vp_id); +exit: + return status; +} + +/* + * vxge_hw_vpath_mtu_set - Set MTU. + * Set new MTU value. Example, to use jumbo frames: + * vxge_hw_vpath_mtu_set(my_device, 9600); + */ +enum vxge_hw_status +vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu) +{ + u64 val64; + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_virtualpath *vpath; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + vpath = vp->vpath; + + new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE; + + if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu)) + status = VXGE_HW_ERR_INVALID_MTU_SIZE; + + val64 = readq(&vpath->vp_reg->rxmac_vcfg0); + + val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff); + val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu); + + writeq(val64, &vpath->vp_reg->rxmac_vcfg0); + + vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE; + +exit: + return status; +} + +/* + * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics. + * Enable the DMA vpath statistics. The function is to be called to re-enable + * the adapter to update stats into the host memory + */ +static enum vxge_hw_status +vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_virtualpath *vpath; + + vpath = vp->vpath; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto exit; + } + + memcpy(vpath->hw_stats_sav, vpath->hw_stats, + sizeof(struct vxge_hw_vpath_stats_hw_info)); + + status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats); +exit: + return status; +} + +/* + * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool + * This function allocates a block from block pool or from the system + */ +static struct __vxge_hw_blockpool_entry * +__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size) +{ + struct __vxge_hw_blockpool_entry *entry = NULL; + struct __vxge_hw_blockpool *blockpool; + + blockpool = &devh->block_pool; + + if (size == blockpool->block_size) { + + if (!list_empty(&blockpool->free_block_list)) + entry = (struct __vxge_hw_blockpool_entry *) + list_first_entry(&blockpool->free_block_list, + struct __vxge_hw_blockpool_entry, + item); + + if (entry != NULL) { + list_del(&entry->item); + blockpool->pool_size--; + } + } + + if (entry != NULL) + __vxge_hw_blockpool_blocks_add(blockpool); + + return entry; +} + +/* + * vxge_hw_vpath_open - Open a virtual path on a given adapter + * This function is used to open access to virtual path of an + * adapter for offload, GRO operations. This function returns + * synchronously. + */ +enum vxge_hw_status +vxge_hw_vpath_open(struct __vxge_hw_device *hldev, + struct vxge_hw_vpath_attr *attr, + struct __vxge_hw_vpath_handle **vpath_handle) +{ + struct __vxge_hw_virtualpath *vpath; + struct __vxge_hw_vpath_handle *vp; + enum vxge_hw_status status; + + vpath = &hldev->virtual_paths[attr->vp_id]; + + if (vpath->vp_open == VXGE_HW_VP_OPEN) { + status = VXGE_HW_ERR_INVALID_STATE; + goto vpath_open_exit1; + } + + status = __vxge_hw_vp_initialize(hldev, attr->vp_id, + &hldev->config.vp_config[attr->vp_id]); + if (status != VXGE_HW_OK) + goto vpath_open_exit1; + + vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle)); + if (vp == NULL) { + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto vpath_open_exit2; + } + + vp->vpath = vpath; + + if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { + status = __vxge_hw_fifo_create(vp, &attr->fifo_attr); + if (status != VXGE_HW_OK) + goto vpath_open_exit6; + } + + if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) { + status = __vxge_hw_ring_create(vp, &attr->ring_attr); + if (status != VXGE_HW_OK) + goto vpath_open_exit7; + + __vxge_hw_vpath_prc_configure(hldev, attr->vp_id); + } + + vpath->fifoh->tx_intr_num = + (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) + + VXGE_HW_VPATH_INTR_TX; + + vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev, + VXGE_HW_BLOCK_SIZE); + if (vpath->stats_block == NULL) { + status = VXGE_HW_ERR_OUT_OF_MEMORY; + goto vpath_open_exit8; + } + + vpath->hw_stats = vpath->stats_block->memblock; + memset(vpath->hw_stats, 0, + sizeof(struct vxge_hw_vpath_stats_hw_info)); + + hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] = + vpath->hw_stats; + + vpath->hw_stats_sav = + &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id]; + memset(vpath->hw_stats_sav, 0, + sizeof(struct vxge_hw_vpath_stats_hw_info)); + + writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg); + + status = vxge_hw_vpath_stats_enable(vp); + if (status != VXGE_HW_OK) + goto vpath_open_exit8; + + list_add(&vp->item, &vpath->vpath_handles); + + hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id); + + *vpath_handle = vp; + + attr->fifo_attr.userdata = vpath->fifoh; + attr->ring_attr.userdata = vpath->ringh; + + return VXGE_HW_OK; + +vpath_open_exit8: + if (vpath->ringh != NULL) + __vxge_hw_ring_delete(vp); +vpath_open_exit7: + if (vpath->fifoh != NULL) + __vxge_hw_fifo_delete(vp); +vpath_open_exit6: + vfree(vp); +vpath_open_exit2: + __vxge_hw_vp_terminate(hldev, attr->vp_id); +vpath_open_exit1: + + return status; +} + +/** + * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath + * (vpath) open + * @vp: Handle got from previous vpath open + * + * This function is used to close access to virtual path opened + * earlier. + */ +void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp) +{ + struct __vxge_hw_virtualpath *vpath = vp->vpath; + struct __vxge_hw_ring *ring = vpath->ringh; + struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev); + u64 new_count, val64, val164; + + if (vdev->titan1) { + new_count = readq(&vpath->vp_reg->rxdmem_size); + new_count &= 0x1fff; + } else + new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8; + + val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count); + + writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164), + &vpath->vp_reg->prc_rxd_doorbell); + readl(&vpath->vp_reg->prc_rxd_doorbell); + + val164 /= 2; + val64 = readq(&vpath->vp_reg->prc_cfg6); + val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64); + val64 &= 0x1ff; + + /* + * Each RxD is of 4 qwords + */ + new_count -= (val64 + 1); + val64 = min(val164, new_count) / 4; + + ring->rxds_limit = min(ring->rxds_limit, val64); + if (ring->rxds_limit < 4) + ring->rxds_limit = 4; +} + +/* + * __vxge_hw_blockpool_block_free - Frees a block from block pool + * @devh: Hal device + * @entry: Entry of block to be freed + * + * This function frees a block from block pool + */ +static void +__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh, + struct __vxge_hw_blockpool_entry *entry) +{ + struct __vxge_hw_blockpool *blockpool; + + blockpool = &devh->block_pool; + + if (entry->length == blockpool->block_size) { + list_add(&entry->item, &blockpool->free_block_list); + blockpool->pool_size++; + } + + __vxge_hw_blockpool_blocks_remove(blockpool); +} + +/* + * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open + * This function is used to close access to virtual path opened + * earlier. + */ +enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp) +{ + struct __vxge_hw_virtualpath *vpath = NULL; + struct __vxge_hw_device *devh = NULL; + u32 vp_id = vp->vpath->vp_id; + u32 is_empty = TRUE; + enum vxge_hw_status status = VXGE_HW_OK; + + vpath = vp->vpath; + devh = vpath->hldev; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto vpath_close_exit; + } + + list_del(&vp->item); + + if (!list_empty(&vpath->vpath_handles)) { + list_add(&vp->item, &vpath->vpath_handles); + is_empty = FALSE; + } + + if (!is_empty) { + status = VXGE_HW_FAIL; + goto vpath_close_exit; + } + + devh->vpaths_deployed &= ~vxge_mBIT(vp_id); + + if (vpath->ringh != NULL) + __vxge_hw_ring_delete(vp); + + if (vpath->fifoh != NULL) + __vxge_hw_fifo_delete(vp); + + if (vpath->stats_block != NULL) + __vxge_hw_blockpool_block_free(devh, vpath->stats_block); + + vfree(vp); + + __vxge_hw_vp_terminate(devh, vp_id); + +vpath_close_exit: + return status; +} + +/* + * vxge_hw_vpath_reset - Resets vpath + * This function is used to request a reset of vpath + */ +enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp) +{ + enum vxge_hw_status status; + u32 vp_id; + struct __vxge_hw_virtualpath *vpath = vp->vpath; + + vp_id = vpath->vp_id; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto exit; + } + + status = __vxge_hw_vpath_reset(vpath->hldev, vp_id); + if (status == VXGE_HW_OK) + vpath->sw_stats->soft_reset_cnt++; +exit: + return status; +} + +/* + * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize. + * This function poll's for the vpath reset completion and re initializes + * the vpath. + */ +enum vxge_hw_status +vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp) +{ + struct __vxge_hw_virtualpath *vpath = NULL; + enum vxge_hw_status status; + struct __vxge_hw_device *hldev; + u32 vp_id; + + vp_id = vp->vpath->vp_id; + vpath = vp->vpath; + hldev = vpath->hldev; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto exit; + } + + status = __vxge_hw_vpath_reset_check(vpath); + if (status != VXGE_HW_OK) + goto exit; + + status = __vxge_hw_vpath_sw_reset(hldev, vp_id); + if (status != VXGE_HW_OK) + goto exit; + + status = __vxge_hw_vpath_initialize(hldev, vp_id); + if (status != VXGE_HW_OK) + goto exit; + + if (vpath->ringh != NULL) + __vxge_hw_vpath_prc_configure(hldev, vp_id); + + memset(vpath->hw_stats, 0, + sizeof(struct vxge_hw_vpath_stats_hw_info)); + + memset(vpath->hw_stats_sav, 0, + sizeof(struct vxge_hw_vpath_stats_hw_info)); + + writeq(vpath->stats_block->dma_addr, + &vpath->vp_reg->stats_cfg); + + status = vxge_hw_vpath_stats_enable(vp); + +exit: + return status; +} + +/* + * vxge_hw_vpath_enable - Enable vpath. + * This routine clears the vpath reset thereby enabling a vpath + * to start forwarding frames and generating interrupts. + */ +void +vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp) +{ + struct __vxge_hw_device *hldev; + u64 val64; + + hldev = vp->vpath->hldev; + + val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET( + 1 << (16 - vp->vpath->vp_id)); + + __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), + &hldev->common_reg->cmn_rsthdlr_cfg1); +} diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h new file mode 100644 index 000000000..6ce4412fc --- /dev/null +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h @@ -0,0 +1,2111 @@ +/****************************************************************************** + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + * + * vxge-config.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O + * Virtualized Server Adapter. + * Copyright(c) 2002-2010 Exar Corp. + ******************************************************************************/ +#ifndef VXGE_CONFIG_H +#define VXGE_CONFIG_H +#include +#include +#include +#include + +#ifndef VXGE_CACHE_LINE_SIZE +#define VXGE_CACHE_LINE_SIZE 128 +#endif + +#ifndef VXGE_ALIGN +#define VXGE_ALIGN(adrs, size) \ + (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1)) +#endif + +#define VXGE_HW_MIN_MTU 68 +#define VXGE_HW_MAX_MTU 9600 +#define VXGE_HW_DEFAULT_MTU 1500 + +#define VXGE_HW_MAX_ROM_IMAGES 8 + +struct eprom_image { + u8 is_valid:1; + u8 index; + u8 type; + u16 version; +}; + +#ifdef VXGE_DEBUG_ASSERT +/** + * vxge_assert + * @test: C-condition to check + * @fmt: printf like format string + * + * This function implements traditional assert. By default assertions + * are enabled. It can be disabled by undefining VXGE_DEBUG_ASSERT macro in + * compilation + * time. + */ +#define vxge_assert(test) BUG_ON(!(test)) +#else +#define vxge_assert(test) +#endif /* end of VXGE_DEBUG_ASSERT */ + +/** + * enum vxge_debug_level + * @VXGE_NONE: debug disabled + * @VXGE_ERR: all errors going to be logged out + * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs + * going to be logged out. Very noisy. + * + * This enumeration going to be used to switch between different + * debug levels during runtime if DEBUG macro defined during + * compilation. If DEBUG macro not defined than code will be + * compiled out. + */ +enum vxge_debug_level { + VXGE_NONE = 0, + VXGE_TRACE = 1, + VXGE_ERR = 2 +}; + +#define NULL_VPID 0xFFFFFFFF +#ifdef CONFIG_VXGE_DEBUG_TRACE_ALL +#define VXGE_DEBUG_MODULE_MASK 0xffffffff +#define VXGE_DEBUG_TRACE_MASK 0xffffffff +#define VXGE_DEBUG_ERR_MASK 0xffffffff +#define VXGE_DEBUG_MASK 0x000001ff +#else +#define VXGE_DEBUG_MODULE_MASK 0x20000000 +#define VXGE_DEBUG_TRACE_MASK 0x20000000 +#define VXGE_DEBUG_ERR_MASK 0x20000000 +#define VXGE_DEBUG_MASK 0x00000001 +#endif + +/* + * @VXGE_COMPONENT_LL: do debug for vxge link layer module + * @VXGE_COMPONENT_ALL: activate debug for all modules with no exceptions + * + * This enumeration going to be used to distinguish modules + * or libraries during compilation and runtime. Makefile must declare + * VXGE_DEBUG_MODULE_MASK macro and set it to proper value. + */ +#define VXGE_COMPONENT_LL 0x20000000 +#define VXGE_COMPONENT_ALL 0xffffffff + +#define VXGE_HW_BASE_INF 100 +#define VXGE_HW_BASE_ERR 200 +#define VXGE_HW_BASE_BADCFG 300 + +enum vxge_hw_status { + VXGE_HW_OK = 0, + VXGE_HW_FAIL = 1, + VXGE_HW_PENDING = 2, + VXGE_HW_COMPLETIONS_REMAIN = 3, + + VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1, + VXGE_HW_INF_OUT_OF_DESCRIPTORS = VXGE_HW_BASE_INF + 2, + + VXGE_HW_ERR_INVALID_HANDLE = VXGE_HW_BASE_ERR + 1, + VXGE_HW_ERR_OUT_OF_MEMORY = VXGE_HW_BASE_ERR + 2, + VXGE_HW_ERR_VPATH_NOT_AVAILABLE = VXGE_HW_BASE_ERR + 3, + VXGE_HW_ERR_VPATH_NOT_OPEN = VXGE_HW_BASE_ERR + 4, + VXGE_HW_ERR_WRONG_IRQ = VXGE_HW_BASE_ERR + 5, + VXGE_HW_ERR_SWAPPER_CTRL = VXGE_HW_BASE_ERR + 6, + VXGE_HW_ERR_INVALID_MTU_SIZE = VXGE_HW_BASE_ERR + 7, + VXGE_HW_ERR_INVALID_INDEX = VXGE_HW_BASE_ERR + 8, + VXGE_HW_ERR_INVALID_TYPE = VXGE_HW_BASE_ERR + 9, + VXGE_HW_ERR_INVALID_OFFSET = VXGE_HW_BASE_ERR + 10, + VXGE_HW_ERR_INVALID_DEVICE = VXGE_HW_BASE_ERR + 11, + VXGE_HW_ERR_VERSION_CONFLICT = VXGE_HW_BASE_ERR + 12, + VXGE_HW_ERR_INVALID_PCI_INFO = VXGE_HW_BASE_ERR + 13, + VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14, + VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15, + VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16, + VXGE_HW_ERR_PRIVILAGED_OPEARATION = VXGE_HW_BASE_ERR + 17, + VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18, + VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19, + VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20, + VXGE_HW_ERR_CRITICAL = VXGE_HW_BASE_ERR + 21, + VXGE_HW_ERR_SLOT_FREEZE = VXGE_HW_BASE_ERR + 22, + + VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS = VXGE_HW_BASE_BADCFG + 1, + VXGE_HW_BADCFG_FIFO_BLOCKS = VXGE_HW_BASE_BADCFG + 2, + VXGE_HW_BADCFG_VPATH_MTU = VXGE_HW_BASE_BADCFG + 3, + VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG = VXGE_HW_BASE_BADCFG + 4, + VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH = VXGE_HW_BASE_BADCFG + 5, + VXGE_HW_BADCFG_INTR_MODE = VXGE_HW_BASE_BADCFG + 6, + VXGE_HW_BADCFG_RTS_MAC_EN = VXGE_HW_BASE_BADCFG + 7, + + VXGE_HW_EOF_TRACE_BUF = -1 +}; + +/** + * enum enum vxge_hw_device_link_state - Link state enumeration. + * @VXGE_HW_LINK_NONE: Invalid link state. + * @VXGE_HW_LINK_DOWN: Link is down. + * @VXGE_HW_LINK_UP: Link is up. + * + */ +enum vxge_hw_device_link_state { + VXGE_HW_LINK_NONE, + VXGE_HW_LINK_DOWN, + VXGE_HW_LINK_UP +}; + +/** + * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes. + * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes + * @VXGE_HW_FW_UPGRADE_DONE: upload completed + * @VXGE_HW_FW_UPGRADE_ERR: upload error + * @VXGE_FW_UPGRADE_BYTES2SKIP: skip bytes in the stream + * + */ +enum vxge_hw_fw_upgrade_code { + VXGE_HW_FW_UPGRADE_OK = 0, + VXGE_HW_FW_UPGRADE_DONE = 1, + VXGE_HW_FW_UPGRADE_ERR = 2, + VXGE_FW_UPGRADE_BYTES2SKIP = 3 +}; + +/** + * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes. + * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data + * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow + * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file + * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file + * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file + * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file + * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data + * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file + * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type + * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed + */ +enum vxge_hw_fw_upgrade_err_code { + VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1 = 1, + VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW = 2, + VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3 = 3, + VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4 = 4, + VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5 = 5, + VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6 = 6, + VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7 = 7, + VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8 = 8, + VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN = 9, + VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH = 10 +}; + +/** + * struct vxge_hw_device_date - Date Format + * @day: Day + * @month: Month + * @year: Year + * @date: Date in string format + * + * Structure for returning date + */ + +#define VXGE_HW_FW_STRLEN 32 +struct vxge_hw_device_date { + u32 day; + u32 month; + u32 year; + char date[VXGE_HW_FW_STRLEN]; +}; + +struct vxge_hw_device_version { + u32 major; + u32 minor; + u32 build; + char version[VXGE_HW_FW_STRLEN]; +}; + +/** + * struct vxge_hw_fifo_config - Configuration of fifo. + * @enable: Is this fifo to be commissioned + * @fifo_blocks: Numbers of TxDL (that is, lists of Tx descriptors) + * blocks per queue. + * @max_frags: Max number of Tx buffers per TxDL (that is, per single + * transmit operation). + * No more than 256 transmit buffers can be specified. + * @memblock_size: Fifo descriptors are allocated in blocks of @mem_block_size + * bytes. Setting @memblock_size to page size ensures + * by-page allocation of descriptors. 128K bytes is the + * maximum supported block size. + * @alignment_size: per Tx fragment DMA-able memory used to align transmit data + * (e.g., to align on a cache line). + * @intr: Boolean. Use 1 to generate interrupt for each completed TxDL. + * Use 0 otherwise. + * @no_snoop_bits: If non-zero, specifies no-snoop PCI operation, + * which generally improves latency of the host bridge operation + * (see PCI specification). For valid values please refer + * to struct vxge_hw_fifo_config{} in the driver sources. + * Configuration of all Titan fifos. + * Note: Valid (min, max) range for each attribute is specified in the body of + * the struct vxge_hw_fifo_config{} structure. + */ +struct vxge_hw_fifo_config { + u32 enable; +#define VXGE_HW_FIFO_ENABLE 1 +#define VXGE_HW_FIFO_DISABLE 0 + + u32 fifo_blocks; +#define VXGE_HW_MIN_FIFO_BLOCKS 2 +#define VXGE_HW_MAX_FIFO_BLOCKS 128 + + u32 max_frags; +#define VXGE_HW_MIN_FIFO_FRAGS 1 +#define VXGE_HW_MAX_FIFO_FRAGS 256 + + u32 memblock_size; +#define VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE VXGE_HW_BLOCK_SIZE +#define VXGE_HW_MAX_FIFO_MEMBLOCK_SIZE 131072 +#define VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE 8096 + + u32 alignment_size; +#define VXGE_HW_MIN_FIFO_ALIGNMENT_SIZE 0 +#define VXGE_HW_MAX_FIFO_ALIGNMENT_SIZE 65536 +#define VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE VXGE_CACHE_LINE_SIZE + + u32 intr; +#define VXGE_HW_FIFO_QUEUE_INTR_ENABLE 1 +#define VXGE_HW_FIFO_QUEUE_INTR_DISABLE 0 +#define VXGE_HW_FIFO_QUEUE_INTR_DEFAULT 0 + + u32 no_snoop_bits; +#define VXGE_HW_FIFO_NO_SNOOP_DISABLED 0 +#define VXGE_HW_FIFO_NO_SNOOP_TXD 1 +#define VXGE_HW_FIFO_NO_SNOOP_FRM 2 +#define VXGE_HW_FIFO_NO_SNOOP_ALL 3 +#define VXGE_HW_FIFO_NO_SNOOP_DEFAULT 0 + +}; +/** + * struct vxge_hw_ring_config - Ring configurations. + * @enable: Is this ring to be commissioned + * @ring_blocks: Numbers of RxD blocks in the ring + * @buffer_mode: Receive buffer mode (1, 2, 3, or 5); for details please refer + * to Titan User Guide. + * @scatter_mode: Titan supports two receive scatter modes: A and B. + * For details please refer to Titan User Guide. + * @rx_timer_val: The number of 32ns periods that would be counted between two + * timer interrupts. + * @greedy_return: If Set it forces the device to return absolutely all RxD + * that are consumed and still on board when a timer interrupt + * triggers. If Clear, then if the device has already returned + * RxD before current timer interrupt trigerred and after the + * previous timer interrupt triggered, then the device is not + * forced to returned the rest of the consumed RxD that it has + * on board which account for a byte count less than the one + * programmed into PRC_CFG6.RXD_CRXDT field + * @rx_timer_ci: TBD + * @backoff_interval_us: Time (in microseconds), after which Titan + * tries to download RxDs posted by the host. + * Note that the "backoff" does not happen if host posts receive + * descriptors in the timely fashion. + * Ring configuration. + */ +struct vxge_hw_ring_config { + u32 enable; +#define VXGE_HW_RING_ENABLE 1 +#define VXGE_HW_RING_DISABLE 0 +#define VXGE_HW_RING_DEFAULT 1 + + u32 ring_blocks; +#define VXGE_HW_MIN_RING_BLOCKS 1 +#define VXGE_HW_MAX_RING_BLOCKS 128 +#define VXGE_HW_DEF_RING_BLOCKS 2 + + u32 buffer_mode; +#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1 +#define VXGE_HW_RING_RXD_BUFFER_MODE_3 3 +#define VXGE_HW_RING_RXD_BUFFER_MODE_5 5 +#define VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT 1 + + u32 scatter_mode; +#define VXGE_HW_RING_SCATTER_MODE_A 0 +#define VXGE_HW_RING_SCATTER_MODE_B 1 +#define VXGE_HW_RING_SCATTER_MODE_C 2 +#define VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT 0xffffffff + + u64 rxds_limit; +#define VXGE_HW_DEF_RING_RXDS_LIMIT 44 +}; + +/** + * struct vxge_hw_vp_config - Configuration of virtual path + * @vp_id: Virtual Path Id + * @min_bandwidth: Minimum Guaranteed bandwidth + * @ring: See struct vxge_hw_ring_config{}. + * @fifo: See struct vxge_hw_fifo_config{}. + * @tti: Configuration of interrupt associated with Transmit. + * see struct vxge_hw_tim_intr_config(); + * @rti: Configuration of interrupt associated with Receive. + * see struct vxge_hw_tim_intr_config(); + * @mtu: mtu size used on this port. + * @rpa_strip_vlan_tag: Strip VLAN Tag enable/disable. Instructs the device to + * remove the VLAN tag from all received tagged frames that are not + * replicated at the internal L2 switch. + * 0 - Do not strip the VLAN tag. + * 1 - Strip the VLAN tag. Regardless of this setting, VLAN tags are + * always placed into the RxDMA descriptor. + * + * This structure is used by the driver to pass the configuration parameters to + * configure Virtual Path. + */ +struct vxge_hw_vp_config { + u32 vp_id; + +#define VXGE_HW_VPATH_PRIORITY_MIN 0 +#define VXGE_HW_VPATH_PRIORITY_MAX 16 +#define VXGE_HW_VPATH_PRIORITY_DEFAULT 0 + + u32 min_bandwidth; +#define VXGE_HW_VPATH_BANDWIDTH_MIN 0 +#define VXGE_HW_VPATH_BANDWIDTH_MAX 100 +#define VXGE_HW_VPATH_BANDWIDTH_DEFAULT 0 + + struct vxge_hw_ring_config ring; + struct vxge_hw_fifo_config fifo; + struct vxge_hw_tim_intr_config tti; + struct vxge_hw_tim_intr_config rti; + + u32 mtu; +#define VXGE_HW_VPATH_MIN_INITIAL_MTU VXGE_HW_MIN_MTU +#define VXGE_HW_VPATH_MAX_INITIAL_MTU VXGE_HW_MAX_MTU +#define VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU 0xffffffff + + u32 rpa_strip_vlan_tag; +#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE 1 +#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE 0 +#define VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT 0xffffffff + +}; +/** + * struct vxge_hw_device_config - Device configuration. + * @dma_blockpool_initial: Initial size of DMA Pool + * @dma_blockpool_max: Maximum blocks in DMA pool + * @intr_mode: Line, or MSI-X interrupt. + * + * @rth_en: Enable Receive Traffic Hashing(RTH) using IT(Indirection Table). + * @rth_it_type: RTH IT table programming type + * @rts_mac_en: Enable Receive Traffic Steering using MAC destination address + * @vp_config: Configuration for virtual paths + * @device_poll_millis: Specify the interval (in mulliseconds) + * to wait for register reads + * + * Titan configuration. + * Contains per-device configuration parameters, including: + * - stats sampling interval, etc. + * + * In addition, struct vxge_hw_device_config{} includes "subordinate" + * configurations, including: + * - fifos and rings; + * - MAC (done at firmware level). + * + * See Titan User Guide for more details. + * Note: Valid (min, max) range for each attribute is specified in the body of + * the struct vxge_hw_device_config{} structure. Please refer to the + * corresponding include file. + * See also: struct vxge_hw_tim_intr_config{}. + */ +struct vxge_hw_device_config { + u32 device_poll_millis; +#define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1 +#define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000 +#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000 + + u32 dma_blockpool_initial; + u32 dma_blockpool_max; +#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0 +#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0 +#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4 +#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096 + +#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2 + + u32 intr_mode:2, +#define VXGE_HW_INTR_MODE_IRQLINE 0 +#define VXGE_HW_INTR_MODE_MSIX 1 +#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2 + +#define VXGE_HW_INTR_MODE_DEF 0 + + rth_en:1, +#define VXGE_HW_RTH_DISABLE 0 +#define VXGE_HW_RTH_ENABLE 1 +#define VXGE_HW_RTH_DEFAULT 0 + + rth_it_type:1, +#define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0 +#define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1 +#define VXGE_HW_RTH_IT_TYPE_DEFAULT 0 + + rts_mac_en:1, +#define VXGE_HW_RTS_MAC_DISABLE 0 +#define VXGE_HW_RTS_MAC_ENABLE 1 +#define VXGE_HW_RTS_MAC_DEFAULT 0 + + hwts_en:1; +#define VXGE_HW_HWTS_DISABLE 0 +#define VXGE_HW_HWTS_ENABLE 1 +#define VXGE_HW_HWTS_DEFAULT 1 + + struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS]; +}; + +/** + * function vxge_uld_link_up_f - Link-Up callback provided by driver. + * @devh: HW device handle. + * Link-up notification callback provided by the driver. + * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}. + * + * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_down_f{}, + * vxge_hw_driver_initialize(). + */ + +/** + * function vxge_uld_link_down_f - Link-Down callback provided by + * driver. + * @devh: HW device handle. + * + * Link-Down notification callback provided by the driver. + * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}. + * + * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{}, + * vxge_hw_driver_initialize(). + */ + +/** + * function vxge_uld_crit_err_f - Critical Error notification callback. + * @devh: HW device handle. + * (typically - at HW device iinitialization time). + * @type: Enumerated hw error, e.g.: double ECC. + * @serr_data: Titan status. + * @ext_data: Extended data. The contents depends on the @type. + * + * Link-Down notification callback provided by the driver. + * This is one of the per-driver callbacks, see struct vxge_hw_uld_cbs{}. + * + * See also: struct vxge_hw_uld_cbs{}, enum vxge_hw_event{}, + * vxge_hw_driver_initialize(). + */ + +/** + * struct vxge_hw_uld_cbs - driver "slow-path" callbacks. + * @link_up: See vxge_uld_link_up_f{}. + * @link_down: See vxge_uld_link_down_f{}. + * @crit_err: See vxge_uld_crit_err_f{}. + * + * Driver slow-path (per-driver) callbacks. + * Implemented by driver and provided to HW via + * vxge_hw_driver_initialize(). + * Note that these callbacks are not mandatory: HW will not invoke + * a callback if NULL is specified. + * + * See also: vxge_hw_driver_initialize(). + */ +struct vxge_hw_uld_cbs { + void (*link_up)(struct __vxge_hw_device *devh); + void (*link_down)(struct __vxge_hw_device *devh); + void (*crit_err)(struct __vxge_hw_device *devh, + enum vxge_hw_event type, u64 ext_data); +}; + +/* + * struct __vxge_hw_blockpool_entry - Block private data structure + * @item: List header used to link. + * @length: Length of the block + * @memblock: Virtual address block + * @dma_addr: DMA Address of the block. + * @dma_handle: DMA handle of the block. + * @acc_handle: DMA acc handle + * + * Block is allocated with a header to put the blocks into list. + * + */ +struct __vxge_hw_blockpool_entry { + struct list_head item; + u32 length; + void *memblock; + dma_addr_t dma_addr; + struct pci_dev *dma_handle; + struct pci_dev *acc_handle; +}; + +/* + * struct __vxge_hw_blockpool - Block Pool + * @hldev: HW device + * @block_size: size of each block. + * @Pool_size: Number of blocks in the pool + * @pool_max: Maximum number of blocks above which to free additional blocks + * @req_out: Number of block requests with OS out standing + * @free_block_list: List of free blocks + * + * Block pool contains the DMA blocks preallocated. + * + */ +struct __vxge_hw_blockpool { + struct __vxge_hw_device *hldev; + u32 block_size; + u32 pool_size; + u32 pool_max; + u32 req_out; + struct list_head free_block_list; + struct list_head free_entry_list; +}; + +/* + * enum enum __vxge_hw_channel_type - Enumerated channel types. + * @VXGE_HW_CHANNEL_TYPE_UNKNOWN: Unknown channel. + * @VXGE_HW_CHANNEL_TYPE_FIFO: fifo. + * @VXGE_HW_CHANNEL_TYPE_RING: ring. + * @VXGE_HW_CHANNEL_TYPE_MAX: Maximum number of HW-supported + * (and recognized) channel types. Currently: 2. + * + * Enumerated channel types. Currently there are only two link-layer + * channels - Titan fifo and Titan ring. In the future the list will grow. + */ +enum __vxge_hw_channel_type { + VXGE_HW_CHANNEL_TYPE_UNKNOWN = 0, + VXGE_HW_CHANNEL_TYPE_FIFO = 1, + VXGE_HW_CHANNEL_TYPE_RING = 2, + VXGE_HW_CHANNEL_TYPE_MAX = 3 +}; + +/* + * struct __vxge_hw_channel + * @item: List item; used to maintain a list of open channels. + * @type: Channel type. See enum vxge_hw_channel_type{}. + * @devh: Device handle. HW device object that contains _this_ channel. + * @vph: Virtual path handle. Virtual Path Object that contains _this_ channel. + * @length: Channel length. Currently allocated number of descriptors. + * The channel length "grows" when more descriptors get allocated. + * See _hw_mempool_grow. + * @reserve_arr: Reserve array. Contains descriptors that can be reserved + * by driver for the subsequent send or receive operation. + * See vxge_hw_fifo_txdl_reserve(), + * vxge_hw_ring_rxd_reserve(). + * @reserve_ptr: Current pointer in the resrve array + * @reserve_top: Reserve top gives the maximum number of dtrs available in + * reserve array. + * @work_arr: Work array. Contains descriptors posted to the channel. + * Note that at any point in time @work_arr contains 3 types of + * descriptors: + * 1) posted but not yet consumed by Titan device; + * 2) consumed but not yet completed; + * 3) completed but not yet freed + * (via vxge_hw_fifo_txdl_free() or vxge_hw_ring_rxd_free()) + * @post_index: Post index. At any point in time points on the + * position in the channel, which'll contain next to-be-posted + * descriptor. + * @compl_index: Completion index. At any point in time points on the + * position in the channel, which will contain next + * to-be-completed descriptor. + * @free_arr: Free array. Contains completed descriptors that were freed + * (i.e., handed over back to HW) by driver. + * See vxge_hw_fifo_txdl_free(), vxge_hw_ring_rxd_free(). + * @free_ptr: current pointer in free array + * @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize + * to store per-operation control information. + * @stats: Pointer to common statistics + * @userdata: Per-channel opaque (void*) user-defined context, which may be + * driver object, ULP connection, etc. + * Once channel is open, @userdata is passed back to user via + * vxge_hw_channel_callback_f. + * + * HW channel object. + * + * See also: enum vxge_hw_channel_type{}, enum vxge_hw_channel_flag + */ +struct __vxge_hw_channel { + struct list_head item; + enum __vxge_hw_channel_type type; + struct __vxge_hw_device *devh; + struct __vxge_hw_vpath_handle *vph; + u32 length; + u32 vp_id; + void **reserve_arr; + u32 reserve_ptr; + u32 reserve_top; + void **work_arr; + u32 post_index ____cacheline_aligned; + u32 compl_index ____cacheline_aligned; + void **free_arr; + u32 free_ptr; + void **orig_arr; + u32 per_dtr_space; + void *userdata; + struct vxge_hw_common_reg __iomem *common_reg; + u32 first_vp_id; + struct vxge_hw_vpath_stats_sw_common_info *stats; + +} ____cacheline_aligned; + +/* + * struct __vxge_hw_virtualpath - Virtual Path + * + * @vp_id: Virtual path id + * @vp_open: This flag specifies if vxge_hw_vp_open is called from LL Driver + * @hldev: Hal device + * @vp_config: Virtual Path Config + * @vp_reg: VPATH Register map address in BAR0 + * @vpmgmt_reg: VPATH_MGMT register map address + * @max_mtu: Max mtu that can be supported + * @vsport_number: vsport attached to this vpath + * @max_kdfc_db: Maximum kernel mode doorbells + * @max_nofl_db: Maximum non offload doorbells + * @tx_intr_num: Interrupt Number associated with the TX + + * @ringh: Ring Queue + * @fifoh: FIFO Queue + * @vpath_handles: Virtual Path handles list + * @stats_block: Memory for DMAing stats + * @stats: Vpath statistics + * + * Virtual path structure to encapsulate the data related to a virtual path. + * Virtual paths are allocated by the HW upon getting configuration from the + * driver and inserted into the list of virtual paths. + */ +struct __vxge_hw_virtualpath { + u32 vp_id; + + u32 vp_open; +#define VXGE_HW_VP_NOT_OPEN 0 +#define VXGE_HW_VP_OPEN 1 + + struct __vxge_hw_device *hldev; + struct vxge_hw_vp_config *vp_config; + struct vxge_hw_vpath_reg __iomem *vp_reg; + struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; + struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db; + + u32 max_mtu; + u32 vsport_number; + u32 max_kdfc_db; + u32 max_nofl_db; + u64 tim_tti_cfg1_saved; + u64 tim_tti_cfg3_saved; + u64 tim_rti_cfg1_saved; + u64 tim_rti_cfg3_saved; + + struct __vxge_hw_ring *____cacheline_aligned ringh; + struct __vxge_hw_fifo *____cacheline_aligned fifoh; + struct list_head vpath_handles; + struct __vxge_hw_blockpool_entry *stats_block; + struct vxge_hw_vpath_stats_hw_info *hw_stats; + struct vxge_hw_vpath_stats_hw_info *hw_stats_sav; + struct vxge_hw_vpath_stats_sw_info *sw_stats; + spinlock_t lock; +}; + +/* + * struct __vxge_hw_vpath_handle - List item to store callback information + * @item: List head to keep the item in linked list + * @vpath: Virtual path to which this item belongs + * + * This structure is used to store the callback information. + */ +struct __vxge_hw_vpath_handle { + struct list_head item; + struct __vxge_hw_virtualpath *vpath; +}; + +/* + * struct __vxge_hw_device + * + * HW device object. + */ +/** + * struct __vxge_hw_device - Hal device object + * @magic: Magic Number + * @bar0: BAR0 virtual address. + * @pdev: Physical device handle + * @config: Confguration passed by the LL driver at initialization + * @link_state: Link state + * + * HW device object. Represents Titan adapter + */ +struct __vxge_hw_device { + u32 magic; +#define VXGE_HW_DEVICE_MAGIC 0x12345678 +#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD + void __iomem *bar0; + struct pci_dev *pdev; + struct net_device *ndev; + struct vxge_hw_device_config config; + enum vxge_hw_device_link_state link_state; + + const struct vxge_hw_uld_cbs *uld_callbacks; + + u32 host_type; + u32 func_id; + u32 access_rights; +#define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1 +#define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2 +#define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4 + struct vxge_hw_legacy_reg __iomem *legacy_reg; + struct vxge_hw_toc_reg __iomem *toc_reg; + struct vxge_hw_common_reg __iomem *common_reg; + struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg; + struct vxge_hw_srpcim_reg __iomem *srpcim_reg \ + [VXGE_HW_TITAN_SRPCIM_REG_SPACES]; + struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg \ + [VXGE_HW_TITAN_VPMGMT_REG_SPACES]; + struct vxge_hw_vpath_reg __iomem *vpath_reg \ + [VXGE_HW_TITAN_VPATH_REG_SPACES]; + u8 __iomem *kdfc; + u8 __iomem *usdc; + struct __vxge_hw_virtualpath virtual_paths \ + [VXGE_HW_MAX_VIRTUAL_PATHS]; + u64 vpath_assignments; + u64 vpaths_deployed; + u32 first_vp_id; + u64 tim_int_mask0[4]; + u32 tim_int_mask1[4]; + + struct __vxge_hw_blockpool block_pool; + struct vxge_hw_device_stats stats; + u32 debug_module_mask; + u32 debug_level; + u32 level_err; + u32 level_trace; + u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES]; +}; + +#define VXGE_HW_INFO_LEN 64 +/** + * struct vxge_hw_device_hw_info - Device information + * @host_type: Host Type + * @func_id: Function Id + * @vpath_mask: vpath bit mask + * @fw_version: Firmware version + * @fw_date: Firmware Date + * @flash_version: Firmware version + * @flash_date: Firmware Date + * @mac_addrs: Mac addresses for each vpath + * @mac_addr_masks: Mac address masks for each vpath + * + * Returns the vpath mask that has the bits set for each vpath allocated + * for the driver and the first mac address for each vpath + */ +struct vxge_hw_device_hw_info { + u32 host_type; +#define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0 +#define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1 +#define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2 +#define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3 +#define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4 +#define VXGE_HW_SR_VH_FUNCTION0 5 +#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6 +#define VXGE_HW_VH_NORMAL_FUNCTION 7 + u64 function_mode; +#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0 +#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1 +#define VXGE_HW_FUNCTION_MODE_SRIOV 2 +#define VXGE_HW_FUNCTION_MODE_MRIOV 3 +#define VXGE_HW_FUNCTION_MODE_MRIOV_8 4 +#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5 +#define VXGE_HW_FUNCTION_MODE_SRIOV_8 6 +#define VXGE_HW_FUNCTION_MODE_SRIOV_4 7 +#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8 +#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9 +#define VXGE_HW_FUNCTION_MODE_MRIOV_4 10 + + u32 func_id; + u64 vpath_mask; + struct vxge_hw_device_version fw_version; + struct vxge_hw_device_date fw_date; + struct vxge_hw_device_version flash_version; + struct vxge_hw_device_date flash_date; + u8 serial_number[VXGE_HW_INFO_LEN]; + u8 part_number[VXGE_HW_INFO_LEN]; + u8 product_desc[VXGE_HW_INFO_LEN]; + u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; + u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; +}; + +/** + * struct vxge_hw_device_attr - Device memory spaces. + * @bar0: BAR0 virtual address. + * @pdev: PCI device object. + * + * Device memory spaces. Includes configuration, BAR0 etc. per device + * mapped memories. Also, includes a pointer to OS-specific PCI device object. + */ +struct vxge_hw_device_attr { + void __iomem *bar0; + struct pci_dev *pdev; + const struct vxge_hw_uld_cbs *uld_callbacks; +}; + +#define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls) + +#define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \ + if (i < 16) { \ + m0[0] |= vxge_vBIT(0x8, (i*4), 4); \ + m0[1] |= vxge_vBIT(0x4, (i*4), 4); \ + } \ + else { \ + m1[0] = 0x80000000; \ + m1[1] = 0x40000000; \ + } \ +} + +#define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \ + if (i < 16) { \ + m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \ + m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \ + } \ + else { \ + m1[0] = 0; \ + m1[1] = 0; \ + } \ +} + +#define VXGE_HW_DEVICE_STATS_PIO_READ(loc, offset) { \ + status = vxge_hw_mrpcim_stats_access(hldev, \ + VXGE_HW_STATS_OP_READ, \ + loc, \ + offset, \ + &val64); \ + if (status != VXGE_HW_OK) \ + return status; \ +} + +/* + * struct __vxge_hw_ring - Ring channel. + * @channel: Channel "base" of this ring, the common part of all HW + * channels. + * @mempool: Memory pool, the pool from which descriptors get allocated. + * (See vxge_hw_mm.h). + * @config: Ring configuration, part of device configuration + * (see struct vxge_hw_device_config{}). + * @ring_length: Length of the ring + * @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode, + * as per Titan User Guide. + * @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Titan spec, + * 1-buffer mode descriptor is 32 byte long, etc. + * @rxd_priv_size: Per RxD size reserved (by HW) for driver to keep + * per-descriptor data (e.g., DMA handle for Solaris) + * @per_rxd_space: Per rxd space requested by driver + * @rxds_per_block: Number of descriptors per hardware-defined RxD + * block. Depends on the (1-, 3-, 5-) buffer mode. + * @rxdblock_priv_size: Reserved at the end of each RxD block. HW internal + * usage. Not to confuse with @rxd_priv_size. + * @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR. + * @callback: Channel completion callback. HW invokes the callback when there + * are new completions on that channel. In many implementations + * the @callback executes in the hw interrupt context. + * @rxd_init: Channel's descriptor-initialize callback. + * See vxge_hw_ring_rxd_init_f{}. + * If not NULL, HW invokes the callback when opening + * the ring. + * @rxd_term: Channel's descriptor-terminate callback. If not NULL, + * HW invokes the callback when closing the corresponding channel. + * See also vxge_hw_channel_rxd_term_f{}. + * @stats: Statistics for ring + * Ring channel. + * + * Note: The structure is cache line aligned to better utilize + * CPU cache performance. + */ +struct __vxge_hw_ring { + struct __vxge_hw_channel channel; + struct vxge_hw_mempool *mempool; + struct vxge_hw_vpath_reg __iomem *vp_reg; + struct vxge_hw_common_reg __iomem *common_reg; + u32 ring_length; + u32 buffer_mode; + u32 rxd_size; + u32 rxd_priv_size; + u32 per_rxd_space; + u32 rxds_per_block; + u32 rxdblock_priv_size; + u32 cmpl_cnt; + u32 vp_id; + u32 doorbell_cnt; + u32 total_db_cnt; + u64 rxds_limit; + u32 rtimer; + u64 tim_rti_cfg1_saved; + u64 tim_rti_cfg3_saved; + + enum vxge_hw_status (*callback)( + struct __vxge_hw_ring *ringh, + void *rxdh, + u8 t_code, + void *userdata); + + enum vxge_hw_status (*rxd_init)( + void *rxdh, + void *userdata); + + void (*rxd_term)( + void *rxdh, + enum vxge_hw_rxd_state state, + void *userdata); + + struct vxge_hw_vpath_stats_sw_ring_info *stats ____cacheline_aligned; + struct vxge_hw_ring_config *config; +} ____cacheline_aligned; + +/** + * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state. + * @VXGE_HW_TXDL_STATE_NONE: Invalid state. + * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation. + * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the + * device. + * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for + * filling-in and posting later. + * + * Titan/HW descriptor states. + * + */ +enum vxge_hw_txdl_state { + VXGE_HW_TXDL_STATE_NONE = 0, + VXGE_HW_TXDL_STATE_AVAIL = 1, + VXGE_HW_TXDL_STATE_POSTED = 2, + VXGE_HW_TXDL_STATE_FREED = 3 +}; +/* + * struct __vxge_hw_fifo - Fifo. + * @channel: Channel "base" of this fifo, the common part of all HW + * channels. + * @mempool: Memory pool, from which descriptors get allocated. + * @config: Fifo configuration, part of device configuration + * (see struct vxge_hw_device_config{}). + * @interrupt_type: Interrupt type to be used + * @no_snoop_bits: See struct vxge_hw_fifo_config{}. + * @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock. + * on TxDL please refer to Titan UG. + * @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus + * per-TxDL HW private space (struct __vxge_hw_fifo_txdl_priv). + * @priv_size: Per-Tx descriptor space reserved for driver + * usage. + * @per_txdl_space: Per txdl private space for the driver + * @callback: Fifo completion callback. HW invokes the callback when there + * are new completions on that fifo. In many implementations + * the @callback executes in the hw interrupt context. + * @txdl_term: Fifo's descriptor-terminate callback. If not NULL, + * HW invokes the callback when closing the corresponding fifo. + * See also vxge_hw_fifo_txdl_term_f{}. + * @stats: Statistics of this fifo + * + * Fifo channel. + * Note: The structure is cache line aligned. + */ +struct __vxge_hw_fifo { + struct __vxge_hw_channel channel; + struct vxge_hw_mempool *mempool; + struct vxge_hw_fifo_config *config; + struct vxge_hw_vpath_reg __iomem *vp_reg; + struct __vxge_hw_non_offload_db_wrapper __iomem *nofl_db; + u64 interrupt_type; + u32 no_snoop_bits; + u32 txdl_per_memblock; + u32 txdl_size; + u32 priv_size; + u32 per_txdl_space; + u32 vp_id; + u32 tx_intr_num; + u32 rtimer; + u64 tim_tti_cfg1_saved; + u64 tim_tti_cfg3_saved; + + enum vxge_hw_status (*callback)( + struct __vxge_hw_fifo *fifo_handle, + void *txdlh, + enum vxge_hw_fifo_tcode t_code, + void *userdata, + struct sk_buff ***skb_ptr, + int nr_skb, + int *more); + + void (*txdl_term)( + void *txdlh, + enum vxge_hw_txdl_state state, + void *userdata); + + struct vxge_hw_vpath_stats_sw_fifo_info *stats ____cacheline_aligned; +} ____cacheline_aligned; + +/* + * struct __vxge_hw_fifo_txdl_priv - Transmit descriptor HW-private data. + * @dma_addr: DMA (mapped) address of _this_ descriptor. + * @dma_handle: DMA handle used to map the descriptor onto device. + * @dma_offset: Descriptor's offset in the memory block. HW allocates + * descriptors in memory blocks (see struct vxge_hw_fifo_config{}) + * Each memblock is a contiguous block of DMA-able memory. + * @frags: Total number of fragments (that is, contiguous data buffers) + * carried by this TxDL. + * @align_vaddr_start: Aligned virtual address start + * @align_vaddr: Virtual address of the per-TxDL area in memory used for + * alignement. Used to place one or more mis-aligned fragments + * @align_dma_addr: DMA address translated from the @align_vaddr. + * @align_dma_handle: DMA handle that corresponds to @align_dma_addr. + * @align_dma_acch: DMA access handle corresponds to @align_dma_addr. + * @align_dma_offset: The current offset into the @align_vaddr area. + * Grows while filling the descriptor, gets reset. + * @align_used_frags: Number of fragments used. + * @alloc_frags: Total number of fragments allocated. + * @unused: TODO + * @next_txdl_priv: (TODO). + * @first_txdp: (TODO). + * @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous + * TxDL list. + * @txdlh: Corresponding txdlh to this TxDL. + * @memblock: Pointer to the TxDL memory block or memory page. + * on the next send operation. + * @dma_object: DMA address and handle of the memory block that contains + * the descriptor. This member is used only in the "checked" + * version of the HW (to enforce certain assertions); + * otherwise it gets compiled out. + * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage. + * + * Per-transmit decsriptor HW-private data. HW uses the space to keep DMA + * information associated with the descriptor. Note that driver can ask HW + * to allocate additional per-descriptor space for its own (driver-specific) + * purposes. + * + * See also: struct vxge_hw_ring_rxd_priv{}. + */ +struct __vxge_hw_fifo_txdl_priv { + dma_addr_t dma_addr; + struct pci_dev *dma_handle; + ptrdiff_t dma_offset; + u32 frags; + u8 *align_vaddr_start; + u8 *align_vaddr; + dma_addr_t align_dma_addr; + struct pci_dev *align_dma_handle; + struct pci_dev *align_dma_acch; + ptrdiff_t align_dma_offset; + u32 align_used_frags; + u32 alloc_frags; + u32 unused; + struct __vxge_hw_fifo_txdl_priv *next_txdl_priv; + struct vxge_hw_fifo_txd *first_txdp; + void *memblock; +}; + +/* + * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper + * @control_0: Bits 0 to 7 - Doorbell type. + * Bits 8 to 31 - Reserved. + * Bits 32 to 39 - The highest TxD in this TxDL. + * Bits 40 to 47 - Reserved. + * Bits 48 to 55 - Reserved. + * Bits 56 to 63 - No snoop flags. + * @txdl_ptr: The starting location of the TxDL in host memory. + * + * Created by the host and written to the adapter via PIO to a Kernel Doorbell + * FIFO. All non-offload doorbell wrapper fields must be written by the host as + * part of a doorbell write. Consumed by the adapter but is not written by the + * adapter. + */ +struct __vxge_hw_non_offload_db_wrapper { + u64 control_0; +#define VXGE_HW_NODBW_GET_TYPE(ctrl0) vxge_bVALn(ctrl0, 0, 8) +#define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_NODBW_TYPE_NODBW 0 + +#define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0) vxge_bVALn(ctrl0, 32, 8) +#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8) + +#define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0) vxge_bVALn(ctrl0, 56, 8) +#define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8) +#define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE 0x2 +#define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ 0x1 + + u64 txdl_ptr; +}; + +/* + * TX Descriptor + */ + +/** + * struct vxge_hw_fifo_txd - Transmit Descriptor + * @control_0: Bits 0 to 6 - Reserved. + * Bit 7 - List Ownership. This field should be initialized + * to '1' by the driver before the transmit list pointer is + * written to the adapter. This field will be set to '0' by the + * adapter once it has completed transmitting the frame or frames in + * the list. Note - This field is only valid in TxD0. Additionally, + * for multi-list sequences, the driver should not release any + * buffers until the ownership of the last list in the multi-list + * sequence has been returned to the host. + * Bits 8 to 11 - Reserved + * Bits 12 to 15 - Transfer_Code. This field is only valid in + * TxD0. It is used to describe the status of the transmit data + * buffer transfer. This field is always overwritten by the + * adapter, so this field may be initialized to any value. + * Bits 16 to 17 - Host steering. This field allows the host to + * override the selection of the physical transmit port. + * Attention: + * Normal sounds as if learned from the switch rather than from + * the aggregation algorythms. + * 00: Normal. Use Destination/MAC Address + * lookup to determine the transmit port. + * 01: Send on physical Port1. + * 10: Send on physical Port0. + * 11: Send on both ports. + * Bits 18 to 21 - Reserved + * Bits 22 to 23 - Gather_Code. This field is set by the host and + * is used to describe how individual buffers comprise a frame. + * 10: First descriptor of a frame. + * 00: Middle of a multi-descriptor frame. + * 01: Last descriptor of a frame. + * 11: First and last descriptor of a frame (the entire frame + * resides in a single buffer). + * For multi-descriptor frames, the only valid gather code sequence + * is {10, [00], 01}. In other words, the descriptors must be placed + * in the list in the correct order. + * Bits 24 to 27 - Reserved + * Bits 28 to 29 - LSO_Frm_Encap. LSO Frame Encapsulation + * definition. Only valid in TxD0. This field allows the host to + * indicate the Ethernet encapsulation of an outbound LSO packet. + * 00 - classic mode (best guess) + * 01 - LLC + * 10 - SNAP + * 11 - DIX + * If "classic mode" is selected, the adapter will attempt to + * decode the frame's Ethernet encapsulation by examining the L/T + * field as follows: + * <= 0x05DC LLC/SNAP encoding; must examine DSAP/SSAP to determine + * if packet is IPv4 or IPv6. + * 0x8870 Jumbo-SNAP encoding. + * 0x0800 IPv4 DIX encoding + * 0x86DD IPv6 DIX encoding + * others illegal encapsulation + * Bits 30 - LSO_ Flag. Large Send Offload (LSO) flag. + * Set to 1 to perform segmentation offload for TCP/UDP. + * This field is valid only in TxD0. + * Bits 31 to 33 - Reserved. + * Bits 34 to 47 - LSO_MSS. TCP/UDP LSO Maximum Segment Size + * This field is meaningful only when LSO_Control is non-zero. + * When LSO_Control is set to TCP_LSO, the single (possibly large) + * TCP segment described by this TxDL will be sent as a series of + * TCP segments each of which contains no more than LSO_MSS + * payload bytes. + * When LSO_Control is set to UDP_LSO, the single (possibly large) + * UDP datagram described by this TxDL will be sent as a series of + * UDP datagrams each of which contains no more than LSO_MSS + * payload bytes. + * All outgoing frames from this TxDL will have LSO_MSS bytes of UDP + * or TCP payload, with the exception of the last, which will have + * <= LSO_MSS bytes of payload. + * Bits 48 to 63 - Buffer_Size. Number of valid bytes in the + * buffer to be read by the adapter. This field is written by the + * host. A value of 0 is illegal. + * Bits 32 to 63 - This value is written by the adapter upon + * completion of a UDP or TCP LSO operation and indicates the number + * of UDP or TCP payload bytes that were transmitted. 0x0000 will be + * returned for any non-LSO operation. + * @control_1: Bits 0 to 4 - Reserved. + * Bit 5 - Tx_CKO_IPv4 Set to a '1' to enable IPv4 header checksum + * offload. This field is only valid in the first TxD of a frame. + * Bit 6 - Tx_CKO_TCP Set to a '1' to enable TCP checksum offload. + * This field is only valid in the first TxD of a frame (the TxD's + * gather code must be 10 or 11). The driver should only set this + * bit if it can guarantee that TCP is present. + * Bit 7 - Tx_CKO_UDP Set to a '1' to enable UDP checksum offload. + * This field is only valid in the first TxD of a frame (the TxD's + * gather code must be 10 or 11). The driver should only set this + * bit if it can guarantee that UDP is present. + * Bits 8 to 14 - Reserved. + * Bit 15 - Tx_VLAN_Enable VLAN tag insertion flag. Set to a '1' to + * instruct the adapter to insert the VLAN tag specified by the + * Tx_VLAN_Tag field. This field is only valid in the first TxD of + * a frame. + * Bits 16 to 31 - Tx_VLAN_Tag. Variable portion of the VLAN tag + * to be inserted into the frame by the adapter (the first two bytes + * of a VLAN tag are always 0x8100). This field is only valid if the + * Tx_VLAN_Enable field is set to '1'. + * Bits 32 to 33 - Reserved. + * Bits 34 to 39 - Tx_Int_Number. Indicates which Tx interrupt + * number the frame associated with. This field is written by the + * host. It is only valid in the first TxD of a frame. + * Bits 40 to 42 - Reserved. + * Bit 43 - Set to 1 to exclude the frame from bandwidth metering + * functions. This field is valid only in the first TxD + * of a frame. + * Bits 44 to 45 - Reserved. + * Bit 46 - Tx_Int_Per_List Set to a '1' to instruct the adapter to + * generate an interrupt as soon as all of the frames in the list + * have been transmitted. In order to have per-frame interrupts, + * the driver should place a maximum of one frame per list. This + * field is only valid in the first TxD of a frame. + * Bit 47 - Tx_Int_Utilization Set to a '1' to instruct the adapter + * to count the frame toward the utilization interrupt specified in + * the Tx_Int_Number field. This field is only valid in the first + * TxD of a frame. + * Bits 48 to 63 - Reserved. + * @buffer_pointer: Buffer start address. + * @host_control: Host_Control.Opaque 64bit data stored by driver inside the + * Titan descriptor prior to posting the latter on the fifo + * via vxge_hw_fifo_txdl_post().The %host_control is returned as is + * to the driver with each completed descriptor. + * + * Transmit descriptor (TxD).Fifo descriptor contains configured number + * (list) of TxDs. * For more details please refer to Titan User Guide, + * Section 5.4.2 "Transmit Descriptor (TxD) Format". + */ +struct vxge_hw_fifo_txd { + u64 control_0; +#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER vxge_mBIT(7) + +#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4) +#define VXGE_HW_FIFO_TXD_T_CODE(val) vxge_vBIT(val, 12, 4) +#define VXGE_HW_FIFO_TXD_T_CODE_UNUSED VXGE_HW_FIFO_T_CODE_UNUSED + + +#define VXGE_HW_FIFO_TXD_GATHER_CODE(val) vxge_vBIT(val, 22, 2) +#define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST VXGE_HW_FIFO_GATHER_CODE_FIRST +#define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST VXGE_HW_FIFO_GATHER_CODE_LAST + + +#define VXGE_HW_FIFO_TXD_LSO_EN vxge_mBIT(30) + +#define VXGE_HW_FIFO_TXD_LSO_MSS(val) vxge_vBIT(val, 34, 14) + +#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) vxge_vBIT(val, 48, 16) + + u64 control_1; +#define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN vxge_mBIT(5) +#define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN vxge_mBIT(6) +#define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN vxge_mBIT(7) +#define VXGE_HW_FIFO_TXD_VLAN_ENABLE vxge_mBIT(15) + +#define VXGE_HW_FIFO_TXD_VLAN_TAG(val) vxge_vBIT(val, 16, 16) + +#define VXGE_HW_FIFO_TXD_INT_NUMBER(val) vxge_vBIT(val, 34, 6) + +#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST vxge_mBIT(46) +#define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ vxge_mBIT(47) + + u64 buffer_pointer; + + u64 host_control; +}; + +/** + * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring + * @host_control: This field is exclusively for host use and is "readonly" + * from the adapter's perspective. + * @control_0:Bits 0 to 6 - RTH_Bucket get + * Bit 7 - Own Descriptor ownership bit. This bit is set to 1 + * by the host, and is set to 0 by the adapter. + * 0 - Host owns RxD and buffer. + * 1 - The adapter owns RxD and buffer. + * Bit 8 - Fast_Path_Eligible When set, indicates that the + * received frame meets all of the criteria for fast path processing. + * The required criteria are as follows: + * !SYN & + * (Transfer_Code == "Transfer OK") & + * (!Is_IP_Fragment) & + * ((Is_IPv4 & computed_L3_checksum == 0xFFFF) | + * (Is_IPv6)) & + * ((Is_TCP & computed_L4_checksum == 0xFFFF) | + * (Is_UDP & (computed_L4_checksum == 0xFFFF | + * computed _L4_checksum == 0x0000))) + * (same meaning for all RxD buffer modes) + * Bit 9 - L3 Checksum Correct + * Bit 10 - L4 Checksum Correct + * Bit 11 - Reserved + * Bit 12 to 15 - This field is written by the adapter. It is + * used to report the status of the frame transfer to the host. + * 0x0 - Transfer OK + * 0x4 - RDA Failure During Transfer + * 0x5 - Unparseable Packet, such as unknown IPv6 header. + * 0x6 - Frame integrity error (FCS or ECC). + * 0x7 - Buffer Size Error. The provided buffer(s) were not + * appropriately sized and data loss occurred. + * 0x8 - Internal ECC Error. RxD corrupted. + * 0x9 - IPv4 Checksum error + * 0xA - TCP/UDP Checksum error + * 0xF - Unknown Error or Multiple Error. Indicates an + * unknown problem or that more than one of transfer codes is set. + * Bit 16 - SYN The adapter sets this field to indicate that + * the incoming frame contained a TCP segment with its SYN bit + * set and its ACK bit NOT set. (same meaning for all RxD buffer + * modes) + * Bit 17 - Is ICMP + * Bit 18 - RTH_SPDM_HIT Set to 1 if there was a match in the + * Socket Pair Direct Match Table and the frame was steered based + * on SPDM. + * Bit 19 - RTH_IT_HIT Set to 1 if there was a match in the + * Indirection Table and the frame was steered based on hash + * indirection. + * Bit 20 to 23 - RTH_HASH_TYPE Indicates the function (hash + * type) that was used to calculate the hash. + * Bit 19 - IS_VLAN Set to '1' if the frame was/is VLAN + * tagged. + * Bit 25 to 26 - ETHER_ENCAP Reflects the Ethernet encapsulation + * of the received frame. + * 0x0 - Ethernet DIX + * 0x1 - LLC + * 0x2 - SNAP (includes Jumbo-SNAP) + * 0x3 - IPX + * Bit 27 - IS_IPV4 Set to '1' if the frame contains an IPv4 packet. + * Bit 28 - IS_IPV6 Set to '1' if the frame contains an IPv6 packet. + * Bit 29 - IS_IP_FRAG Set to '1' if the frame contains a fragmented + * IP packet. + * Bit 30 - IS_TCP Set to '1' if the frame contains a TCP segment. + * Bit 31 - IS_UDP Set to '1' if the frame contains a UDP message. + * Bit 32 to 47 - L3_Checksum[0:15] The IPv4 checksum value that + * arrived with the frame. If the resulting computed IPv4 header + * checksum for the frame did not produce the expected 0xFFFF value, + * then the transfer code would be set to 0x9. + * Bit 48 to 63 - L4_Checksum[0:15] The TCP/UDP checksum value that + * arrived with the frame. If the resulting computed TCP/UDP checksum + * for the frame did not produce the expected 0xFFFF value, then the + * transfer code would be set to 0xA. + * @control_1:Bits 0 to 1 - Reserved + * Bits 2 to 15 - Buffer0_Size.This field is set by the host and + * eventually overwritten by the adapter. The host writes the + * available buffer size in bytes when it passes the descriptor to + * the adapter. When a frame is delivered the host, the adapter + * populates this field with the number of bytes written into the + * buffer. The largest supported buffer is 16, 383 bytes. + * Bit 16 to 47 - RTH Hash Value 32-bit RTH hash value. Only valid if + * RTH_HASH_TYPE (Control_0, bits 20:23) is nonzero. + * Bit 48 to 63 - VLAN_Tag[0:15] The contents of the variable portion + * of the VLAN tag, if one was detected by the adapter. This field is + * populated even if VLAN-tag stripping is enabled. + * @buffer0_ptr: Pointer to buffer. This field is populated by the driver. + * + * One buffer mode RxD for ring structure + */ +struct vxge_hw_ring_rxd_1 { + u64 host_control; + u64 control_0; +#define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0) vxge_bVALn(ctrl0, 0, 7) + +#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER vxge_mBIT(7) + +#define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0) vxge_bVALn(ctrl0, 8, 1) + +#define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 9, 1) + +#define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 10, 1) + +#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4) +#define VXGE_HW_RING_RXD_T_CODE(val) vxge_vBIT(val, 12, 4) + +#define VXGE_HW_RING_RXD_T_CODE_UNUSED VXGE_HW_RING_T_CODE_UNUSED + +#define VXGE_HW_RING_RXD_SYN_GET(ctrl0) vxge_bVALn(ctrl0, 16, 1) + +#define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0) vxge_bVALn(ctrl0, 17, 1) + +#define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 18, 1) + +#define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 19, 1) + +#define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0) vxge_bVALn(ctrl0, 20, 4) + +#define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0) vxge_bVALn(ctrl0, 24, 1) + +#define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0) vxge_bVALn(ctrl0, 25, 2) + +#define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0) vxge_bVALn(ctrl0, 27, 5) + +#define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 32, 16) + +#define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 48, 16) + + u64 control_1; + +#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1) vxge_bVALn(ctrl1, 2, 14) +#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14) +#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK vxge_vBIT(0x3FFF, 2, 14) + +#define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1) vxge_bVALn(ctrl1, 16, 32) + +#define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1) vxge_bVALn(ctrl1, 48, 16) + + u64 buffer0_ptr; +}; + +enum vxge_hw_rth_algoritms { + RTH_ALG_JENKINS = 0, + RTH_ALG_MS_RSS = 1, + RTH_ALG_CRC32C = 2 +}; + +/** + * struct vxge_hw_rth_hash_types - RTH hash types. + * @hash_type_tcpipv4_en: Enables RTH field type HashTypeTcpIPv4 + * @hash_type_ipv4_en: Enables RTH field type HashTypeIPv4 + * @hash_type_tcpipv6_en: Enables RTH field type HashTypeTcpIPv6 + * @hash_type_ipv6_en: Enables RTH field type HashTypeIPv6 + * @hash_type_tcpipv6ex_en: Enables RTH field type HashTypeTcpIPv6Ex + * @hash_type_ipv6ex_en: Enables RTH field type HashTypeIPv6Ex + * + * Used to pass RTH hash types to rts_rts_set. + * + * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get(). + */ +struct vxge_hw_rth_hash_types { + u8 hash_type_tcpipv4_en:1, + hash_type_ipv4_en:1, + hash_type_tcpipv6_en:1, + hash_type_ipv6_en:1, + hash_type_tcpipv6ex_en:1, + hash_type_ipv6ex_en:1; +}; + +void vxge_hw_device_debug_set( + struct __vxge_hw_device *devh, + enum vxge_debug_level level, + u32 mask); + +u32 +vxge_hw_device_error_level_get(struct __vxge_hw_device *devh); + +u32 +vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh); + +/** + * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor. + * @buf_mode: Buffer mode (1, 3 or 5) + * + * This function returns the size of RxD for given buffer mode + */ +static inline u32 vxge_hw_ring_rxd_size_get(u32 buf_mode) +{ + return sizeof(struct vxge_hw_ring_rxd_1); +} + +/** + * vxge_hw_ring_rxds_per_block_get - Get the number of rxds per block. + * @buf_mode: Buffer mode (1 buffer mode only) + * + * This function returns the number of RxD for RxD block for given buffer mode + */ +static inline u32 vxge_hw_ring_rxds_per_block_get(u32 buf_mode) +{ + return (u32)((VXGE_HW_BLOCK_SIZE-16) / + sizeof(struct vxge_hw_ring_rxd_1)); +} + +/** + * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor. + * @rxdh: Descriptor handle. + * @dma_pointer: DMA address of a single receive buffer this descriptor + * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called, + * the receive buffer should be already mapped to the device + * @size: Size of the receive @dma_pointer buffer. + * + * Prepare 1-buffer-mode Rx descriptor for posting + * (via vxge_hw_ring_rxd_post()). + * + * This inline helper-function does not return any parameters and always + * succeeds. + * + */ +static inline +void vxge_hw_ring_rxd_1b_set( + void *rxdh, + dma_addr_t dma_pointer, + u32 size) +{ + struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; + rxdp->buffer0_ptr = dma_pointer; + rxdp->control_1 &= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK; + rxdp->control_1 |= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size); +} + +/** + * vxge_hw_ring_rxd_1b_get - Get data from the completed 1-buf + * descriptor. + * @vpath_handle: Virtual Path handle. + * @rxdh: Descriptor handle. + * @dma_pointer: DMA address of a single receive buffer this descriptor + * carries. Returned by HW. + * @pkt_length: Length (in bytes) of the data in the buffer pointed by + * + * Retrieve protocol data from the completed 1-buffer-mode Rx descriptor. + * This inline helper-function uses completed descriptor to populate receive + * buffer pointer and other "out" parameters. The function always succeeds. + * + */ +static inline +void vxge_hw_ring_rxd_1b_get( + struct __vxge_hw_ring *ring_handle, + void *rxdh, + u32 *pkt_length) +{ + struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; + + *pkt_length = + (u32)VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxdp->control_1); +} + +/** + * vxge_hw_ring_rxd_1b_info_get - Get extended information associated with + * a completed receive descriptor for 1b mode. + * @vpath_handle: Virtual Path handle. + * @rxdh: Descriptor handle. + * @rxd_info: Descriptor information + * + * Retrieve extended information associated with a completed receive descriptor. + * + */ +static inline +void vxge_hw_ring_rxd_1b_info_get( + struct __vxge_hw_ring *ring_handle, + void *rxdh, + struct vxge_hw_ring_rxd_info *rxd_info) +{ + + struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; + rxd_info->syn_flag = + (u32)VXGE_HW_RING_RXD_SYN_GET(rxdp->control_0); + rxd_info->is_icmp = + (u32)VXGE_HW_RING_RXD_IS_ICMP_GET(rxdp->control_0); + rxd_info->fast_path_eligible = + (u32)VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(rxdp->control_0); + rxd_info->l3_cksum_valid = + (u32)VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(rxdp->control_0); + rxd_info->l3_cksum = + (u32)VXGE_HW_RING_RXD_L3_CKSUM_GET(rxdp->control_0); + rxd_info->l4_cksum_valid = + (u32)VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(rxdp->control_0); + rxd_info->l4_cksum = + (u32)VXGE_HW_RING_RXD_L4_CKSUM_GET(rxdp->control_0); + rxd_info->frame = + (u32)VXGE_HW_RING_RXD_ETHER_ENCAP_GET(rxdp->control_0); + rxd_info->proto = + (u32)VXGE_HW_RING_RXD_FRAME_PROTO_GET(rxdp->control_0); + rxd_info->is_vlan = + (u32)VXGE_HW_RING_RXD_IS_VLAN_GET(rxdp->control_0); + rxd_info->vlan = + (u32)VXGE_HW_RING_RXD_VLAN_TAG_GET(rxdp->control_1); + rxd_info->rth_bucket = + (u32)VXGE_HW_RING_RXD_RTH_BUCKET_GET(rxdp->control_0); + rxd_info->rth_it_hit = + (u32)VXGE_HW_RING_RXD_RTH_IT_HIT_GET(rxdp->control_0); + rxd_info->rth_spdm_hit = + (u32)VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(rxdp->control_0); + rxd_info->rth_hash_type = + (u32)VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(rxdp->control_0); + rxd_info->rth_value = + (u32)VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(rxdp->control_1); +} + +/** + * vxge_hw_ring_rxd_private_get - Get driver private per-descriptor data + * of 1b mode 3b mode ring. + * @rxdh: Descriptor handle. + * + * Returns: private driver info associated with the descriptor. + * driver requests per-descriptor space via vxge_hw_ring_attr. + * + */ +static inline void *vxge_hw_ring_rxd_private_get(void *rxdh) +{ + struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; + return (void *)(size_t)rxdp->host_control; +} + +/** + * vxge_hw_fifo_txdl_cksum_set_bits - Offload checksum. + * @txdlh: Descriptor handle. + * @cksum_bits: Specifies which checksums are to be offloaded: IPv4, + * and/or TCP and/or UDP. + * + * Ask Titan to calculate IPv4 & transport checksums for _this_ transmit + * descriptor. + * This API is part of the preparation of the transmit descriptor for posting + * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include + * vxge_hw_fifo_txdl_mss_set(), vxge_hw_fifo_txdl_buffer_set_aligned(), + * and vxge_hw_fifo_txdl_buffer_set(). + * All these APIs fill in the fields of the fifo descriptor, + * in accordance with the Titan specification. + * + */ +static inline void vxge_hw_fifo_txdl_cksum_set_bits(void *txdlh, u64 cksum_bits) +{ + struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh; + txdp->control_1 |= cksum_bits; +} + +/** + * vxge_hw_fifo_txdl_mss_set - Set MSS. + * @txdlh: Descriptor handle. + * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the + * driver, which in turn inserts the MSS into the @txdlh. + * + * This API is part of the preparation of the transmit descriptor for posting + * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include + * vxge_hw_fifo_txdl_buffer_set(), vxge_hw_fifo_txdl_buffer_set_aligned(), + * and vxge_hw_fifo_txdl_cksum_set_bits(). + * All these APIs fill in the fields of the fifo descriptor, + * in accordance with the Titan specification. + * + */ +static inline void vxge_hw_fifo_txdl_mss_set(void *txdlh, int mss) +{ + struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh; + + txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_EN; + txdp->control_0 |= VXGE_HW_FIFO_TXD_LSO_MSS(mss); +} + +/** + * vxge_hw_fifo_txdl_vlan_set - Set VLAN tag. + * @txdlh: Descriptor handle. + * @vlan_tag: 16bit VLAN tag. + * + * Insert VLAN tag into specified transmit descriptor. + * The actual insertion of the tag into outgoing frame is done by the hardware. + */ +static inline void vxge_hw_fifo_txdl_vlan_set(void *txdlh, u16 vlan_tag) +{ + struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh; + + txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_ENABLE; + txdp->control_1 |= VXGE_HW_FIFO_TXD_VLAN_TAG(vlan_tag); +} + +/** + * vxge_hw_fifo_txdl_private_get - Retrieve per-descriptor private data. + * @txdlh: Descriptor handle. + * + * Retrieve per-descriptor private data. + * Note that driver requests per-descriptor space via + * struct vxge_hw_fifo_attr passed to + * vxge_hw_vpath_open(). + * + * Returns: private driver data associated with the descriptor. + */ +static inline void *vxge_hw_fifo_txdl_private_get(void *txdlh) +{ + struct vxge_hw_fifo_txd *txdp = (struct vxge_hw_fifo_txd *)txdlh; + + return (void *)(size_t)txdp->host_control; +} + +/** + * struct vxge_hw_ring_attr - Ring open "template". + * @callback: Ring completion callback. HW invokes the callback when there + * are new completions on that ring. In many implementations + * the @callback executes in the hw interrupt context. + * @rxd_init: Ring's descriptor-initialize callback. + * See vxge_hw_ring_rxd_init_f{}. + * If not NULL, HW invokes the callback when opening + * the ring. + * @rxd_term: Ring's descriptor-terminate callback. If not NULL, + * HW invokes the callback when closing the corresponding ring. + * See also vxge_hw_ring_rxd_term_f{}. + * @userdata: User-defined "context" of _that_ ring. Passed back to the + * user as one of the @callback, @rxd_init, and @rxd_term arguments. + * @per_rxd_space: If specified (i.e., greater than zero): extra space + * reserved by HW per each receive descriptor. + * Can be used to store + * and retrieve on completion, information specific + * to the driver. + * + * Ring open "template". User fills the structure with ring + * attributes and passes it to vxge_hw_vpath_open(). + */ +struct vxge_hw_ring_attr { + enum vxge_hw_status (*callback)( + struct __vxge_hw_ring *ringh, + void *rxdh, + u8 t_code, + void *userdata); + + enum vxge_hw_status (*rxd_init)( + void *rxdh, + void *userdata); + + void (*rxd_term)( + void *rxdh, + enum vxge_hw_rxd_state state, + void *userdata); + + void *userdata; + u32 per_rxd_space; +}; + +/** + * function vxge_hw_fifo_callback_f - FIFO callback. + * @vpath_handle: Virtual path whose Fifo "containing" 1 or more completed + * descriptors. + * @txdlh: First completed descriptor. + * @txdl_priv: Pointer to per txdl space allocated + * @t_code: Transfer code, as per Titan User Guide. + * Returned by HW. + * @host_control: Opaque 64bit data stored by driver inside the Titan + * descriptor prior to posting the latter on the fifo + * via vxge_hw_fifo_txdl_post(). The @host_control is returned + * as is to the driver with each completed descriptor. + * @userdata: Opaque per-fifo data specified at fifo open + * time, via vxge_hw_vpath_open(). + * + * Fifo completion callback (type declaration). A single per-fifo + * callback is specified at fifo open time, via + * vxge_hw_vpath_open(). Typically gets called as part of the processing + * of the Interrupt Service Routine. + * + * Fifo callback gets called by HW if, and only if, there is at least + * one new completion on a given fifo. Upon processing the first @txdlh driver + * is _supposed_ to continue consuming completions using: + * - vxge_hw_fifo_txdl_next_completed() + * + * Note that failure to process new completions in a timely fashion + * leads to VXGE_HW_INF_OUT_OF_DESCRIPTORS condition. + * + * Non-zero @t_code means failure to process transmit descriptor. + * + * In the "transmit" case the failure could happen, for instance, when the + * link is down, in which case Titan completes the descriptor because it + * is not able to send the data out. + * + * For details please refer to Titan User Guide. + * + * See also: vxge_hw_fifo_txdl_next_completed(), vxge_hw_fifo_txdl_term_f{}. + */ +/** + * function vxge_hw_fifo_txdl_term_f - Terminate descriptor callback. + * @txdlh: First completed descriptor. + * @txdl_priv: Pointer to per txdl space allocated + * @state: One of the enum vxge_hw_txdl_state{} enumerated states. + * @userdata: Per-fifo user data (a.k.a. context) specified at + * fifo open time, via vxge_hw_vpath_open(). + * + * Terminate descriptor callback. Unless NULL is specified in the + * struct vxge_hw_fifo_attr{} structure passed to vxge_hw_vpath_open()), + * HW invokes the callback as part of closing fifo, prior to + * de-allocating the ring and associated data structures + * (including descriptors). + * driver should utilize the callback to (for instance) unmap + * and free DMA data buffers associated with the posted (state = + * VXGE_HW_TXDL_STATE_POSTED) descriptors, + * as well as other relevant cleanup functions. + * + * See also: struct vxge_hw_fifo_attr{} + */ +/** + * struct vxge_hw_fifo_attr - Fifo open "template". + * @callback: Fifo completion callback. HW invokes the callback when there + * are new completions on that fifo. In many implementations + * the @callback executes in the hw interrupt context. + * @txdl_term: Fifo's descriptor-terminate callback. If not NULL, + * HW invokes the callback when closing the corresponding fifo. + * See also vxge_hw_fifo_txdl_term_f{}. + * @userdata: User-defined "context" of _that_ fifo. Passed back to the + * user as one of the @callback, and @txdl_term arguments. + * @per_txdl_space: If specified (i.e., greater than zero): extra space + * reserved by HW per each transmit descriptor. Can be used to + * store, and retrieve on completion, information specific + * to the driver. + * + * Fifo open "template". User fills the structure with fifo + * attributes and passes it to vxge_hw_vpath_open(). + */ +struct vxge_hw_fifo_attr { + + enum vxge_hw_status (*callback)( + struct __vxge_hw_fifo *fifo_handle, + void *txdlh, + enum vxge_hw_fifo_tcode t_code, + void *userdata, + struct sk_buff ***skb_ptr, + int nr_skb, int *more); + + void (*txdl_term)( + void *txdlh, + enum vxge_hw_txdl_state state, + void *userdata); + + void *userdata; + u32 per_txdl_space; +}; + +/** + * struct vxge_hw_vpath_attr - Attributes of virtual path + * @vp_id: Identifier of Virtual Path + * @ring_attr: Attributes of ring for non-offload receive + * @fifo_attr: Attributes of fifo for non-offload transmit + * + * Attributes of virtual path. This structure is passed as parameter + * to the vxge_hw_vpath_open() routine to set the attributes of ring and fifo. + */ +struct vxge_hw_vpath_attr { + u32 vp_id; + struct vxge_hw_ring_attr ring_attr; + struct vxge_hw_fifo_attr fifo_attr; +}; + +enum vxge_hw_status vxge_hw_device_hw_info_get( + void __iomem *bar0, + struct vxge_hw_device_hw_info *hw_info); + +enum vxge_hw_status vxge_hw_device_config_default_get( + struct vxge_hw_device_config *device_config); + +/** + * vxge_hw_device_link_state_get - Get link state. + * @devh: HW device handle. + * + * Get link state. + * Returns: link state. + */ +static inline +enum vxge_hw_device_link_state vxge_hw_device_link_state_get( + struct __vxge_hw_device *devh) +{ + return devh->link_state; +} + +void vxge_hw_device_terminate(struct __vxge_hw_device *devh); + +const u8 * +vxge_hw_device_serial_number_get(struct __vxge_hw_device *devh); + +u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *devh); + +const u8 * +vxge_hw_device_product_name_get(struct __vxge_hw_device *devh); + +enum vxge_hw_status vxge_hw_device_initialize( + struct __vxge_hw_device **devh, + struct vxge_hw_device_attr *attr, + struct vxge_hw_device_config *device_config); + +enum vxge_hw_status vxge_hw_device_getpause_data( + struct __vxge_hw_device *devh, + u32 port, + u32 *tx, + u32 *rx); + +enum vxge_hw_status vxge_hw_device_setpause_data( + struct __vxge_hw_device *devh, + u32 port, + u32 tx, + u32 rx); + +static inline void *vxge_os_dma_malloc(struct pci_dev *pdev, + unsigned long size, + struct pci_dev **p_dmah, + struct pci_dev **p_dma_acch) +{ + gfp_t flags; + void *vaddr; + unsigned long misaligned = 0; + int realloc_flag = 0; + *p_dma_acch = *p_dmah = NULL; + + if (in_interrupt()) + flags = GFP_ATOMIC | GFP_DMA; + else + flags = GFP_KERNEL | GFP_DMA; +realloc: + vaddr = kmalloc((size), flags); + if (vaddr == NULL) + return vaddr; + misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr, + VXGE_CACHE_LINE_SIZE); + if (realloc_flag) + goto out; + + if (misaligned) { + /* misaligned, free current one and try allocating + * size + VXGE_CACHE_LINE_SIZE memory + */ + kfree(vaddr); + size += VXGE_CACHE_LINE_SIZE; + realloc_flag = 1; + goto realloc; + } +out: + *(unsigned long *)p_dma_acch = misaligned; + vaddr = (void *)((u8 *)vaddr + misaligned); + return vaddr; +} + +static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr, + struct pci_dev **p_dma_acch) +{ + unsigned long misaligned = *(unsigned long *)p_dma_acch; + u8 *tmp = (u8 *)vaddr; + tmp -= misaligned; + kfree((void *)tmp); +} + +/* + * __vxge_hw_mempool_item_priv - will return pointer on per item private space + */ +static inline void* +__vxge_hw_mempool_item_priv( + struct vxge_hw_mempool *mempool, + u32 memblock_idx, + void *item, + u32 *memblock_item_idx) +{ + ptrdiff_t offset; + void *memblock = mempool->memblocks_arr[memblock_idx]; + + + offset = (u32)((u8 *)item - (u8 *)memblock); + vxge_assert(offset >= 0 && (u32)offset < mempool->memblock_size); + + (*memblock_item_idx) = (u32) offset / mempool->item_size; + vxge_assert((*memblock_item_idx) < mempool->items_per_memblock); + + return (u8 *)mempool->memblocks_priv_arr[memblock_idx] + + (*memblock_item_idx) * mempool->items_priv_size; +} + +/* + * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated + * for the fifo. + * @fifo: Fifo + * @txdp: Poniter to a TxD + */ +static inline struct __vxge_hw_fifo_txdl_priv * +__vxge_hw_fifo_txdl_priv( + struct __vxge_hw_fifo *fifo, + struct vxge_hw_fifo_txd *txdp) +{ + return (struct __vxge_hw_fifo_txdl_priv *) + (((char *)((ulong)txdp->host_control)) + + fifo->per_txdl_space); +} + +enum vxge_hw_status vxge_hw_vpath_open( + struct __vxge_hw_device *devh, + struct vxge_hw_vpath_attr *attr, + struct __vxge_hw_vpath_handle **vpath_handle); + +enum vxge_hw_status vxge_hw_vpath_close( + struct __vxge_hw_vpath_handle *vpath_handle); + +enum vxge_hw_status +vxge_hw_vpath_reset( + struct __vxge_hw_vpath_handle *vpath_handle); + +enum vxge_hw_status +vxge_hw_vpath_recover_from_reset( + struct __vxge_hw_vpath_handle *vpath_handle); + +void +vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp); + +enum vxge_hw_status +vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ringh); + +enum vxge_hw_status vxge_hw_vpath_mtu_set( + struct __vxge_hw_vpath_handle *vpath_handle, + u32 new_mtu); + +void +vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp); + +#ifndef readq +static inline u64 readq(void __iomem *addr) +{ + u64 ret = 0; + ret = readl(addr + 4); + ret <<= 32; + ret |= readl(addr); + + return ret; +} +#endif + +#ifndef writeq +static inline void writeq(u64 val, void __iomem *addr) +{ + writel((u32) (val), addr); + writel((u32) (val >> 32), (addr + 4)); +} +#endif + +static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr) +{ + writel(val, addr + 4); +} + +static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr) +{ + writel(val, addr); +} + +enum vxge_hw_status +vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off); + +enum vxge_hw_status +vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask); + +/** + * vxge_debug_ll + * @level: level of debug verbosity. + * @mask: mask for the debug + * @buf: Circular buffer for tracing + * @fmt: printf like format string + * + * Provides logging facilities. Can be customized on per-module + * basis or/and with debug levels. Input parameters, except + * module and level, are the same as posix printf. This function + * may be compiled out if DEBUG macro was never defined. + * See also: enum vxge_debug_level{}. + */ +#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK) +#define vxge_debug_ll(level, mask, fmt, ...) do { \ + if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \ + (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\ + if ((mask & VXGE_DEBUG_MASK) == mask) \ + printk(fmt "\n", __VA_ARGS__); \ +} while (0) +#else +#define vxge_debug_ll(level, mask, fmt, ...) +#endif + +enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set( + struct __vxge_hw_vpath_handle **vpath_handles, + u32 vpath_count, + u8 *mtable, + u8 *itable, + u32 itable_size); + +enum vxge_hw_status vxge_hw_vpath_rts_rth_set( + struct __vxge_hw_vpath_handle *vpath_handle, + enum vxge_hw_rth_algoritms algorithm, + struct vxge_hw_rth_hash_types *hash_type, + u16 bucket_size); + +enum vxge_hw_status +__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id); + +#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5 +#define VXGE_HW_MAX_POLLING_COUNT 100 + +void +vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev); + +enum vxge_hw_status +vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major, + u32 *minor, u32 *build); + +enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev); + +enum vxge_hw_status +vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf, + int size); + +enum vxge_hw_status +vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev, + struct eprom_image *eprom_image_data); + +int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id); +#endif diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c new file mode 100644 index 000000000..be916eb2f --- /dev/null +++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c @@ -0,0 +1,1151 @@ +/****************************************************************************** + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + * + * vxge-ethtool.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O + * Virtualized Server Adapter. + * Copyright(c) 2002-2010 Exar Corp. + ******************************************************************************/ +#include +#include +#include +#include + +#include "vxge-ethtool.h" + +static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = { + {"\n DRIVER STATISTICS"}, + {"vpaths_opened"}, + {"vpath_open_fail_cnt"}, + {"link_up_cnt"}, + {"link_down_cnt"}, + {"tx_frms"}, + {"tx_errors"}, + {"tx_bytes"}, + {"txd_not_free"}, + {"txd_out_of_desc"}, + {"rx_frms"}, + {"rx_errors"}, + {"rx_bytes"}, + {"rx_mcast"}, + {"pci_map_fail_cnt"}, + {"skb_alloc_fail_cnt"} +}; + +/** + * vxge_ethtool_sset - Sets different link parameters. + * @dev: device pointer. + * @info: pointer to the structure with parameters given by ethtool to set + * link information. + * + * The function sets different link parameters provided by the user onto + * the NIC. + * Return value: + * 0 on success. + */ +static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info) +{ + /* We currently only support 10Gb/FULL */ + if ((info->autoneg == AUTONEG_ENABLE) || + (ethtool_cmd_speed(info) != SPEED_10000) || + (info->duplex != DUPLEX_FULL)) + return -EINVAL; + + return 0; +} + +/** + * vxge_ethtool_gset - Return link specific information. + * @dev: device pointer. + * @info: pointer to the structure with parameters given by ethtool + * to return link information. + * + * Returns link specific information like speed, duplex etc.. to ethtool. + * Return value : + * return 0 on success. + */ +static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info) +{ + info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + info->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + info->port = PORT_FIBRE; + + info->transceiver = XCVR_EXTERNAL; + + if (netif_carrier_ok(dev)) { + ethtool_cmd_speed_set(info, SPEED_10000); + info->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(info, SPEED_UNKNOWN); + info->duplex = DUPLEX_UNKNOWN; + } + + info->autoneg = AUTONEG_DISABLE; + return 0; +} + +/** + * vxge_ethtool_gdrvinfo - Returns driver specific information. + * @dev: device pointer. + * @info: pointer to the structure with parameters given by ethtool to + * return driver information. + * + * Returns driver specefic information like name, version etc.. to ethtool. + */ +static void vxge_ethtool_gdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct vxgedev *vdev = netdev_priv(dev); + strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, vdev->fw_version, sizeof(info->fw_version)); + strlcpy(info->bus_info, pci_name(vdev->pdev), sizeof(info->bus_info)); + info->regdump_len = sizeof(struct vxge_hw_vpath_reg) + * vdev->no_of_vpath; + + info->n_stats = STAT_LEN; +} + +/** + * vxge_ethtool_gregs - dumps the entire space of Titan into the buffer. + * @dev: device pointer. + * @regs: pointer to the structure with parameters given by ethtool for + * dumping the registers. + * @reg_space: The input argumnet into which all the registers are dumped. + * + * Dumps the vpath register space of Titan NIC into the user given + * buffer area. + */ +static void vxge_ethtool_gregs(struct net_device *dev, + struct ethtool_regs *regs, void *space) +{ + int index, offset; + enum vxge_hw_status status; + u64 reg; + u64 *reg_space = (u64 *)space; + struct vxgedev *vdev = netdev_priv(dev); + struct __vxge_hw_device *hldev = vdev->devh; + + regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath; + regs->version = vdev->pdev->subsystem_device; + for (index = 0; index < vdev->no_of_vpath; index++) { + for (offset = 0; offset < sizeof(struct vxge_hw_vpath_reg); + offset += 8) { + status = vxge_hw_mgmt_reg_read(hldev, + vxge_hw_mgmt_reg_type_vpath, + vdev->vpaths[index].device_id, + offset, ®); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s:%d Getting reg dump Failed", + __func__, __LINE__); + return; + } + *reg_space++ = reg; + } + } +} + +/** + * vxge_ethtool_idnic - To physically identify the nic on the system. + * @dev : device pointer. + * @state : requested LED state + * + * Used to physically identify the NIC on the system. + * 0 on success + */ +static int vxge_ethtool_idnic(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct vxgedev *vdev = netdev_priv(dev); + struct __vxge_hw_device *hldev = vdev->devh; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON); + break; + + case ETHTOOL_ID_INACTIVE: + vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_OFF); + break; + + default: + return -EINVAL; + } + + return 0; +} + +/** + * vxge_ethtool_getpause_data - Pause frame frame generation and reception. + * @dev : device pointer. + * @ep : pointer to the structure with pause parameters given by ethtool. + * Description: + * Returns the Pause frame generation and reception capability of the NIC. + * Return value: + * void + */ +static void vxge_ethtool_getpause_data(struct net_device *dev, + struct ethtool_pauseparam *ep) +{ + struct vxgedev *vdev = netdev_priv(dev); + struct __vxge_hw_device *hldev = vdev->devh; + + vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause); +} + +/** + * vxge_ethtool_setpause_data - set/reset pause frame generation. + * @dev : device pointer. + * @ep : pointer to the structure with pause parameters given by ethtool. + * Description: + * It can be used to set or reset Pause frame generation or reception + * support of the NIC. + * Return value: + * int, returns 0 on Success + */ +static int vxge_ethtool_setpause_data(struct net_device *dev, + struct ethtool_pauseparam *ep) +{ + struct vxgedev *vdev = netdev_priv(dev); + struct __vxge_hw_device *hldev = vdev->devh; + + vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause); + + vdev->config.tx_pause_enable = ep->tx_pause; + vdev->config.rx_pause_enable = ep->rx_pause; + + return 0; +} + +static void vxge_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *estats, u64 *tmp_stats) +{ + int j, k; + enum vxge_hw_status status; + enum vxge_hw_status swstatus; + struct vxge_vpath *vpath = NULL; + struct vxgedev *vdev = netdev_priv(dev); + struct __vxge_hw_device *hldev = vdev->devh; + struct vxge_hw_xmac_stats *xmac_stats; + struct vxge_hw_device_stats_sw_info *sw_stats; + struct vxge_hw_device_stats_hw_info *hw_stats; + + u64 *ptr = tmp_stats; + + memset(tmp_stats, 0, + vxge_ethtool_get_sset_count(dev, ETH_SS_STATS) * sizeof(u64)); + + xmac_stats = kzalloc(sizeof(struct vxge_hw_xmac_stats), GFP_KERNEL); + if (xmac_stats == NULL) { + vxge_debug_init(VXGE_ERR, + "%s : %d Memory Allocation failed for xmac_stats", + __func__, __LINE__); + return; + } + + sw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_sw_info), + GFP_KERNEL); + if (sw_stats == NULL) { + kfree(xmac_stats); + vxge_debug_init(VXGE_ERR, + "%s : %d Memory Allocation failed for sw_stats", + __func__, __LINE__); + return; + } + + hw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_hw_info), + GFP_KERNEL); + if (hw_stats == NULL) { + kfree(xmac_stats); + kfree(sw_stats); + vxge_debug_init(VXGE_ERR, + "%s : %d Memory Allocation failed for hw_stats", + __func__, __LINE__); + return; + } + + *ptr++ = 0; + status = vxge_hw_device_xmac_stats_get(hldev, xmac_stats); + if (status != VXGE_HW_OK) { + if (status != VXGE_HW_ERR_PRIVILAGED_OPEARATION) { + vxge_debug_init(VXGE_ERR, + "%s : %d Failure in getting xmac stats", + __func__, __LINE__); + } + } + swstatus = vxge_hw_driver_stats_get(hldev, sw_stats); + if (swstatus != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s : %d Failure in getting sw stats", + __func__, __LINE__); + } + + status = vxge_hw_device_stats_get(hldev, hw_stats); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s : %d hw_stats_get error", __func__, __LINE__); + } + + for (k = 0; k < vdev->no_of_vpath; k++) { + struct vxge_hw_vpath_stats_hw_info *vpath_info; + + vpath = &vdev->vpaths[k]; + j = vpath->device_id; + vpath_info = hw_stats->vpath_info[j]; + if (!vpath_info) { + memset(ptr, 0, (VXGE_HW_VPATH_TX_STATS_LEN + + VXGE_HW_VPATH_RX_STATS_LEN) * sizeof(u64)); + ptr += (VXGE_HW_VPATH_TX_STATS_LEN + + VXGE_HW_VPATH_RX_STATS_LEN); + continue; + } + + *ptr++ = vpath_info->tx_stats.tx_ttl_eth_frms; + *ptr++ = vpath_info->tx_stats.tx_ttl_eth_octets; + *ptr++ = vpath_info->tx_stats.tx_data_octets; + *ptr++ = vpath_info->tx_stats.tx_mcast_frms; + *ptr++ = vpath_info->tx_stats.tx_bcast_frms; + *ptr++ = vpath_info->tx_stats.tx_ucast_frms; + *ptr++ = vpath_info->tx_stats.tx_tagged_frms; + *ptr++ = vpath_info->tx_stats.tx_vld_ip; + *ptr++ = vpath_info->tx_stats.tx_vld_ip_octets; + *ptr++ = vpath_info->tx_stats.tx_icmp; + *ptr++ = vpath_info->tx_stats.tx_tcp; + *ptr++ = vpath_info->tx_stats.tx_rst_tcp; + *ptr++ = vpath_info->tx_stats.tx_udp; + *ptr++ = vpath_info->tx_stats.tx_unknown_protocol; + *ptr++ = vpath_info->tx_stats.tx_lost_ip; + *ptr++ = vpath_info->tx_stats.tx_parse_error; + *ptr++ = vpath_info->tx_stats.tx_tcp_offload; + *ptr++ = vpath_info->tx_stats.tx_retx_tcp_offload; + *ptr++ = vpath_info->tx_stats.tx_lost_ip_offload; + *ptr++ = vpath_info->rx_stats.rx_ttl_eth_frms; + *ptr++ = vpath_info->rx_stats.rx_vld_frms; + *ptr++ = vpath_info->rx_stats.rx_offload_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_eth_octets; + *ptr++ = vpath_info->rx_stats.rx_data_octets; + *ptr++ = vpath_info->rx_stats.rx_offload_octets; + *ptr++ = vpath_info->rx_stats.rx_vld_mcast_frms; + *ptr++ = vpath_info->rx_stats.rx_vld_bcast_frms; + *ptr++ = vpath_info->rx_stats.rx_accepted_ucast_frms; + *ptr++ = vpath_info->rx_stats.rx_accepted_nucast_frms; + *ptr++ = vpath_info->rx_stats.rx_tagged_frms; + *ptr++ = vpath_info->rx_stats.rx_long_frms; + *ptr++ = vpath_info->rx_stats.rx_usized_frms; + *ptr++ = vpath_info->rx_stats.rx_osized_frms; + *ptr++ = vpath_info->rx_stats.rx_frag_frms; + *ptr++ = vpath_info->rx_stats.rx_jabber_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_64_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_65_127_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_128_255_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_256_511_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_512_1023_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_1024_1518_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_1519_4095_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_4096_8191_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_8192_max_frms; + *ptr++ = vpath_info->rx_stats.rx_ttl_gt_max_frms; + *ptr++ = vpath_info->rx_stats.rx_ip; + *ptr++ = vpath_info->rx_stats.rx_accepted_ip; + *ptr++ = vpath_info->rx_stats.rx_ip_octets; + *ptr++ = vpath_info->rx_stats.rx_err_ip; + *ptr++ = vpath_info->rx_stats.rx_icmp; + *ptr++ = vpath_info->rx_stats.rx_tcp; + *ptr++ = vpath_info->rx_stats.rx_udp; + *ptr++ = vpath_info->rx_stats.rx_err_tcp; + *ptr++ = vpath_info->rx_stats.rx_lost_frms; + *ptr++ = vpath_info->rx_stats.rx_lost_ip; + *ptr++ = vpath_info->rx_stats.rx_lost_ip_offload; + *ptr++ = vpath_info->rx_stats.rx_various_discard; + *ptr++ = vpath_info->rx_stats.rx_sleep_discard; + *ptr++ = vpath_info->rx_stats.rx_red_discard; + *ptr++ = vpath_info->rx_stats.rx_queue_full_discard; + *ptr++ = vpath_info->rx_stats.rx_mpa_ok_frms; + } + *ptr++ = 0; + for (k = 0; k < vdev->max_config_port; k++) { + *ptr++ = xmac_stats->aggr_stats[k].tx_frms; + *ptr++ = xmac_stats->aggr_stats[k].tx_data_octets; + *ptr++ = xmac_stats->aggr_stats[k].tx_mcast_frms; + *ptr++ = xmac_stats->aggr_stats[k].tx_bcast_frms; + *ptr++ = xmac_stats->aggr_stats[k].tx_discarded_frms; + *ptr++ = xmac_stats->aggr_stats[k].tx_errored_frms; + *ptr++ = xmac_stats->aggr_stats[k].rx_frms; + *ptr++ = xmac_stats->aggr_stats[k].rx_data_octets; + *ptr++ = xmac_stats->aggr_stats[k].rx_mcast_frms; + *ptr++ = xmac_stats->aggr_stats[k].rx_bcast_frms; + *ptr++ = xmac_stats->aggr_stats[k].rx_discarded_frms; + *ptr++ = xmac_stats->aggr_stats[k].rx_errored_frms; + *ptr++ = xmac_stats->aggr_stats[k].rx_unknown_slow_proto_frms; + } + *ptr++ = 0; + for (k = 0; k < vdev->max_config_port; k++) { + *ptr++ = xmac_stats->port_stats[k].tx_ttl_frms; + *ptr++ = xmac_stats->port_stats[k].tx_ttl_octets; + *ptr++ = xmac_stats->port_stats[k].tx_data_octets; + *ptr++ = xmac_stats->port_stats[k].tx_mcast_frms; + *ptr++ = xmac_stats->port_stats[k].tx_bcast_frms; + *ptr++ = xmac_stats->port_stats[k].tx_ucast_frms; + *ptr++ = xmac_stats->port_stats[k].tx_tagged_frms; + *ptr++ = xmac_stats->port_stats[k].tx_vld_ip; + *ptr++ = xmac_stats->port_stats[k].tx_vld_ip_octets; + *ptr++ = xmac_stats->port_stats[k].tx_icmp; + *ptr++ = xmac_stats->port_stats[k].tx_tcp; + *ptr++ = xmac_stats->port_stats[k].tx_rst_tcp; + *ptr++ = xmac_stats->port_stats[k].tx_udp; + *ptr++ = xmac_stats->port_stats[k].tx_parse_error; + *ptr++ = xmac_stats->port_stats[k].tx_unknown_protocol; + *ptr++ = xmac_stats->port_stats[k].tx_pause_ctrl_frms; + *ptr++ = xmac_stats->port_stats[k].tx_marker_pdu_frms; + *ptr++ = xmac_stats->port_stats[k].tx_lacpdu_frms; + *ptr++ = xmac_stats->port_stats[k].tx_drop_ip; + *ptr++ = xmac_stats->port_stats[k].tx_marker_resp_pdu_frms; + *ptr++ = xmac_stats->port_stats[k].tx_xgmii_char2_match; + *ptr++ = xmac_stats->port_stats[k].tx_xgmii_char1_match; + *ptr++ = xmac_stats->port_stats[k].tx_xgmii_column2_match; + *ptr++ = xmac_stats->port_stats[k].tx_xgmii_column1_match; + *ptr++ = xmac_stats->port_stats[k].tx_any_err_frms; + *ptr++ = xmac_stats->port_stats[k].tx_drop_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_frms; + *ptr++ = xmac_stats->port_stats[k].rx_vld_frms; + *ptr++ = xmac_stats->port_stats[k].rx_offload_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_octets; + *ptr++ = xmac_stats->port_stats[k].rx_data_octets; + *ptr++ = xmac_stats->port_stats[k].rx_offload_octets; + *ptr++ = xmac_stats->port_stats[k].rx_vld_mcast_frms; + *ptr++ = xmac_stats->port_stats[k].rx_vld_bcast_frms; + *ptr++ = xmac_stats->port_stats[k].rx_accepted_ucast_frms; + *ptr++ = xmac_stats->port_stats[k].rx_accepted_nucast_frms; + *ptr++ = xmac_stats->port_stats[k].rx_tagged_frms; + *ptr++ = xmac_stats->port_stats[k].rx_long_frms; + *ptr++ = xmac_stats->port_stats[k].rx_usized_frms; + *ptr++ = xmac_stats->port_stats[k].rx_osized_frms; + *ptr++ = xmac_stats->port_stats[k].rx_frag_frms; + *ptr++ = xmac_stats->port_stats[k].rx_jabber_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_64_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_65_127_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_128_255_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_256_511_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_512_1023_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_1024_1518_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_1519_4095_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_4096_8191_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_8192_max_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ttl_gt_max_frms; + *ptr++ = xmac_stats->port_stats[k].rx_ip; + *ptr++ = xmac_stats->port_stats[k].rx_accepted_ip; + *ptr++ = xmac_stats->port_stats[k].rx_ip_octets; + *ptr++ = xmac_stats->port_stats[k].rx_err_ip; + *ptr++ = xmac_stats->port_stats[k].rx_icmp; + *ptr++ = xmac_stats->port_stats[k].rx_tcp; + *ptr++ = xmac_stats->port_stats[k].rx_udp; + *ptr++ = xmac_stats->port_stats[k].rx_err_tcp; + *ptr++ = xmac_stats->port_stats[k].rx_pause_count; + *ptr++ = xmac_stats->port_stats[k].rx_pause_ctrl_frms; + *ptr++ = xmac_stats->port_stats[k].rx_unsup_ctrl_frms; + *ptr++ = xmac_stats->port_stats[k].rx_fcs_err_frms; + *ptr++ = xmac_stats->port_stats[k].rx_in_rng_len_err_frms; + *ptr++ = xmac_stats->port_stats[k].rx_out_rng_len_err_frms; + *ptr++ = xmac_stats->port_stats[k].rx_drop_frms; + *ptr++ = xmac_stats->port_stats[k].rx_discarded_frms; + *ptr++ = xmac_stats->port_stats[k].rx_drop_ip; + *ptr++ = xmac_stats->port_stats[k].rx_drop_udp; + *ptr++ = xmac_stats->port_stats[k].rx_marker_pdu_frms; + *ptr++ = xmac_stats->port_stats[k].rx_lacpdu_frms; + *ptr++ = xmac_stats->port_stats[k].rx_unknown_pdu_frms; + *ptr++ = xmac_stats->port_stats[k].rx_marker_resp_pdu_frms; + *ptr++ = xmac_stats->port_stats[k].rx_fcs_discard; + *ptr++ = xmac_stats->port_stats[k].rx_illegal_pdu_frms; + *ptr++ = xmac_stats->port_stats[k].rx_switch_discard; + *ptr++ = xmac_stats->port_stats[k].rx_len_discard; + *ptr++ = xmac_stats->port_stats[k].rx_rpa_discard; + *ptr++ = xmac_stats->port_stats[k].rx_l2_mgmt_discard; + *ptr++ = xmac_stats->port_stats[k].rx_rts_discard; + *ptr++ = xmac_stats->port_stats[k].rx_trash_discard; + *ptr++ = xmac_stats->port_stats[k].rx_buff_full_discard; + *ptr++ = xmac_stats->port_stats[k].rx_red_discard; + *ptr++ = xmac_stats->port_stats[k].rx_xgmii_ctrl_err_cnt; + *ptr++ = xmac_stats->port_stats[k].rx_xgmii_data_err_cnt; + *ptr++ = xmac_stats->port_stats[k].rx_xgmii_char1_match; + *ptr++ = xmac_stats->port_stats[k].rx_xgmii_err_sym; + *ptr++ = xmac_stats->port_stats[k].rx_xgmii_column1_match; + *ptr++ = xmac_stats->port_stats[k].rx_xgmii_char2_match; + *ptr++ = xmac_stats->port_stats[k].rx_local_fault; + *ptr++ = xmac_stats->port_stats[k].rx_xgmii_column2_match; + *ptr++ = xmac_stats->port_stats[k].rx_jettison; + *ptr++ = xmac_stats->port_stats[k].rx_remote_fault; + } + + *ptr++ = 0; + for (k = 0; k < vdev->no_of_vpath; k++) { + struct vxge_hw_vpath_stats_sw_info *vpath_info; + + vpath = &vdev->vpaths[k]; + j = vpath->device_id; + vpath_info = (struct vxge_hw_vpath_stats_sw_info *) + &sw_stats->vpath_info[j]; + *ptr++ = vpath_info->soft_reset_cnt; + *ptr++ = vpath_info->error_stats.unknown_alarms; + *ptr++ = vpath_info->error_stats.network_sustained_fault; + *ptr++ = vpath_info->error_stats.network_sustained_ok; + *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_overwrite; + *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_poison; + *ptr++ = vpath_info->error_stats.kdfcctl_fifo0_dma_error; + *ptr++ = vpath_info->error_stats.dblgen_fifo0_overflow; + *ptr++ = vpath_info->error_stats.statsb_pif_chain_error; + *ptr++ = vpath_info->error_stats.statsb_drop_timeout; + *ptr++ = vpath_info->error_stats.target_illegal_access; + *ptr++ = vpath_info->error_stats.ini_serr_det; + *ptr++ = vpath_info->error_stats.prc_ring_bumps; + *ptr++ = vpath_info->error_stats.prc_rxdcm_sc_err; + *ptr++ = vpath_info->error_stats.prc_rxdcm_sc_abort; + *ptr++ = vpath_info->error_stats.prc_quanta_size_err; + *ptr++ = vpath_info->ring_stats.common_stats.full_cnt; + *ptr++ = vpath_info->ring_stats.common_stats.usage_cnt; + *ptr++ = vpath_info->ring_stats.common_stats.usage_max; + *ptr++ = vpath_info->ring_stats.common_stats. + reserve_free_swaps_cnt; + *ptr++ = vpath_info->ring_stats.common_stats.total_compl_cnt; + for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++) + *ptr++ = vpath_info->ring_stats.rxd_t_code_err_cnt[j]; + *ptr++ = vpath_info->fifo_stats.common_stats.full_cnt; + *ptr++ = vpath_info->fifo_stats.common_stats.usage_cnt; + *ptr++ = vpath_info->fifo_stats.common_stats.usage_max; + *ptr++ = vpath_info->fifo_stats.common_stats. + reserve_free_swaps_cnt; + *ptr++ = vpath_info->fifo_stats.common_stats.total_compl_cnt; + *ptr++ = vpath_info->fifo_stats.total_posts; + *ptr++ = vpath_info->fifo_stats.total_buffers; + for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++) + *ptr++ = vpath_info->fifo_stats.txd_t_code_err_cnt[j]; + } + + *ptr++ = 0; + for (k = 0; k < vdev->no_of_vpath; k++) { + struct vxge_hw_vpath_stats_hw_info *vpath_info; + vpath = &vdev->vpaths[k]; + j = vpath->device_id; + vpath_info = hw_stats->vpath_info[j]; + if (!vpath_info) { + memset(ptr, 0, VXGE_HW_VPATH_STATS_LEN * sizeof(u64)); + ptr += VXGE_HW_VPATH_STATS_LEN; + continue; + } + *ptr++ = vpath_info->ini_num_mwr_sent; + *ptr++ = vpath_info->ini_num_mrd_sent; + *ptr++ = vpath_info->ini_num_cpl_rcvd; + *ptr++ = vpath_info->ini_num_mwr_byte_sent; + *ptr++ = vpath_info->ini_num_cpl_byte_rcvd; + *ptr++ = vpath_info->wrcrdtarb_xoff; + *ptr++ = vpath_info->rdcrdtarb_xoff; + *ptr++ = vpath_info->vpath_genstats_count0; + *ptr++ = vpath_info->vpath_genstats_count1; + *ptr++ = vpath_info->vpath_genstats_count2; + *ptr++ = vpath_info->vpath_genstats_count3; + *ptr++ = vpath_info->vpath_genstats_count4; + *ptr++ = vpath_info->vpath_genstats_count5; + *ptr++ = vpath_info->prog_event_vnum0; + *ptr++ = vpath_info->prog_event_vnum1; + *ptr++ = vpath_info->prog_event_vnum2; + *ptr++ = vpath_info->prog_event_vnum3; + *ptr++ = vpath_info->rx_multi_cast_frame_discard; + *ptr++ = vpath_info->rx_frm_transferred; + *ptr++ = vpath_info->rxd_returned; + *ptr++ = vpath_info->rx_mpa_len_fail_frms; + *ptr++ = vpath_info->rx_mpa_mrk_fail_frms; + *ptr++ = vpath_info->rx_mpa_crc_fail_frms; + *ptr++ = vpath_info->rx_permitted_frms; + *ptr++ = vpath_info->rx_vp_reset_discarded_frms; + *ptr++ = vpath_info->rx_wol_frms; + *ptr++ = vpath_info->tx_vp_reset_discarded_frms; + } + + *ptr++ = 0; + *ptr++ = vdev->stats.vpaths_open; + *ptr++ = vdev->stats.vpath_open_fail; + *ptr++ = vdev->stats.link_up; + *ptr++ = vdev->stats.link_down; + + for (k = 0; k < vdev->no_of_vpath; k++) { + *ptr += vdev->vpaths[k].fifo.stats.tx_frms; + *(ptr + 1) += vdev->vpaths[k].fifo.stats.tx_errors; + *(ptr + 2) += vdev->vpaths[k].fifo.stats.tx_bytes; + *(ptr + 3) += vdev->vpaths[k].fifo.stats.txd_not_free; + *(ptr + 4) += vdev->vpaths[k].fifo.stats.txd_out_of_desc; + *(ptr + 5) += vdev->vpaths[k].ring.stats.rx_frms; + *(ptr + 6) += vdev->vpaths[k].ring.stats.rx_errors; + *(ptr + 7) += vdev->vpaths[k].ring.stats.rx_bytes; + *(ptr + 8) += vdev->vpaths[k].ring.stats.rx_mcast; + *(ptr + 9) += vdev->vpaths[k].fifo.stats.pci_map_fail + + vdev->vpaths[k].ring.stats.pci_map_fail; + *(ptr + 10) += vdev->vpaths[k].ring.stats.skb_alloc_fail; + } + + ptr += 12; + + kfree(xmac_stats); + kfree(sw_stats); + kfree(hw_stats); +} + +static void vxge_ethtool_get_strings(struct net_device *dev, u32 stringset, + u8 *data) +{ + int stat_size = 0; + int i, j; + struct vxgedev *vdev = netdev_priv(dev); + switch (stringset) { + case ETH_SS_STATS: + vxge_add_string("VPATH STATISTICS%s\t\t\t", + &stat_size, data, ""); + for (i = 0; i < vdev->no_of_vpath; i++) { + vxge_add_string("tx_ttl_eth_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_ttl_eth_octects_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_data_octects_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_mcast_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_bcast_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_ucast_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_tagged_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_vld_ip_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_vld_ip_octects_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_icmp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_tcp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_rst_tcp_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_udp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_unknown_proto_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_lost_ip_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_parse_error_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_tcp_offload_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_retx_tcp_offload_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_lost_ip_offload_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_eth_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_vld_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_offload_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_eth_octects_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_data_octects_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_offload_octects_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_vld_mcast_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_vld_bcast_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_accepted_ucast_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_accepted_nucast_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_tagged_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_long_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_usized_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_osized_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_frag_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_jabber_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_64_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_65_127_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_128_255_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_256_511_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_512_1023_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_8192_max_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_gt_max_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ip%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_accepted_ip_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_ip_octects_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_err_ip_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_icmp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_tcp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_udp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_err_tcp_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_lost_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_lost_ip_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_lost_ip_offload_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_various_discard_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_sleep_discard_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_red_discard_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_queue_full_discard_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_mpa_ok_frms_%d\t\t\t", + &stat_size, data, i); + } + + vxge_add_string("\nAGGR STATISTICS%s\t\t\t\t", + &stat_size, data, ""); + for (i = 0; i < vdev->max_config_port; i++) { + vxge_add_string("tx_frms_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_data_octects_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_mcast_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_bcast_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_discarded_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_errored_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_frms_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_data_octects_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_mcast_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_bcast_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_discarded_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_errored_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_unknown_slow_proto_frms_%d\t", + &stat_size, data, i); + } + + vxge_add_string("\nPORT STATISTICS%s\t\t\t\t", + &stat_size, data, ""); + for (i = 0; i < vdev->max_config_port; i++) { + vxge_add_string("tx_ttl_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_ttl_octects_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_data_octects_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_mcast_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_bcast_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_ucast_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_tagged_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_vld_ip_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_vld_ip_octects_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_icmp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_tcp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_rst_tcp_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_udp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_parse_error_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_unknown_protocol_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_pause_ctrl_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_marker_pdu_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_lacpdu_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_drop_ip_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_marker_resp_pdu_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_xgmii_char2_match_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_xgmii_char1_match_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_xgmii_column2_match_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_xgmii_column1_match_%d\t\t", + &stat_size, data, i); + vxge_add_string("tx_any_err_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_drop_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_vld_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_offload_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_octects_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_data_octects_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_offload_octects_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_vld_mcast_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_vld_bcast_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_accepted_ucast_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_accepted_nucast_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_tagged_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_long_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_usized_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_osized_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_frag_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_jabber_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_64_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_65_127_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_128_255_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_256_511_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_512_1023_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_8192_max_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ttl_gt_max_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_ip_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_accepted_ip_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_ip_octets_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_err_ip_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_icmp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_tcp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_udp_%d\t\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_err_tcp_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_pause_count_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_pause_ctrl_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_unsup_ctrl_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_fcs_err_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_in_rng_len_err_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_out_rng_len_err_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_drop_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_discard_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_drop_ip_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_drop_udp_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_marker_pdu_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_lacpdu_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_unknown_pdu_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_marker_resp_pdu_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_fcs_discard_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_illegal_pdu_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_switch_discard_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_len_discard_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_rpa_discard_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_l2_mgmt_discard_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_rts_discard_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_trash_discard_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_buff_full_discard_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_red_discard_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_xgmii_ctrl_err_cnt_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_xgmii_data_err_cnt_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_xgmii_char1_match_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_xgmii_err_sym_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_xgmii_column1_match_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_xgmii_char2_match_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_local_fault_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_xgmii_column2_match_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_jettison_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_remote_fault_%d\t\t\t", + &stat_size, data, i); + } + + vxge_add_string("\n SOFTWARE STATISTICS%s\t\t\t", + &stat_size, data, ""); + for (i = 0; i < vdev->no_of_vpath; i++) { + vxge_add_string("soft_reset_cnt_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("unknown_alarms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("network_sustained_fault_%d\t\t", + &stat_size, data, i); + vxge_add_string("network_sustained_ok_%d\t\t", + &stat_size, data, i); + vxge_add_string("kdfcctl_fifo0_overwrite_%d\t\t", + &stat_size, data, i); + vxge_add_string("kdfcctl_fifo0_poison_%d\t\t", + &stat_size, data, i); + vxge_add_string("kdfcctl_fifo0_dma_error_%d\t\t", + &stat_size, data, i); + vxge_add_string("dblgen_fifo0_overflow_%d\t\t", + &stat_size, data, i); + vxge_add_string("statsb_pif_chain_error_%d\t\t", + &stat_size, data, i); + vxge_add_string("statsb_drop_timeout_%d\t\t", + &stat_size, data, i); + vxge_add_string("target_illegal_access_%d\t\t", + &stat_size, data, i); + vxge_add_string("ini_serr_det_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("prc_ring_bumps_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("prc_rxdcm_sc_err_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("prc_rxdcm_sc_abort_%d\t\t", + &stat_size, data, i); + vxge_add_string("prc_quanta_size_err_%d\t\t", + &stat_size, data, i); + vxge_add_string("ring_full_cnt_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("ring_usage_cnt_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("ring_usage_max_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("ring_reserve_free_swaps_cnt_%d\t", + &stat_size, data, i); + vxge_add_string("ring_total_compl_cnt_%d\t\t", + &stat_size, data, i); + for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++) + vxge_add_string("rxd_t_code_err_cnt%d_%d\t\t", + &stat_size, data, j, i); + vxge_add_string("fifo_full_cnt_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("fifo_usage_cnt_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("fifo_usage_max_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("fifo_reserve_free_swaps_cnt_%d\t", + &stat_size, data, i); + vxge_add_string("fifo_total_compl_cnt_%d\t\t", + &stat_size, data, i); + vxge_add_string("fifo_total_posts_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("fifo_total_buffers_%d\t\t", + &stat_size, data, i); + for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++) + vxge_add_string("txd_t_code_err_cnt%d_%d\t\t", + &stat_size, data, j, i); + } + + vxge_add_string("\n HARDWARE STATISTICS%s\t\t\t", + &stat_size, data, ""); + for (i = 0; i < vdev->no_of_vpath; i++) { + vxge_add_string("ini_num_mwr_sent_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("ini_num_mrd_sent_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("ini_num_cpl_rcvd_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("ini_num_mwr_byte_sent_%d\t\t", + &stat_size, data, i); + vxge_add_string("ini_num_cpl_byte_rcvd_%d\t\t", + &stat_size, data, i); + vxge_add_string("wrcrdtarb_xoff_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rdcrdtarb_xoff_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("vpath_genstats_count0_%d\t\t", + &stat_size, data, i); + vxge_add_string("vpath_genstats_count1_%d\t\t", + &stat_size, data, i); + vxge_add_string("vpath_genstats_count2_%d\t\t", + &stat_size, data, i); + vxge_add_string("vpath_genstats_count3_%d\t\t", + &stat_size, data, i); + vxge_add_string("vpath_genstats_count4_%d\t\t", + &stat_size, data, i); + vxge_add_string("vpath_genstats_count5_%d\t\t", + &stat_size, data, i); + vxge_add_string("prog_event_vnum0_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("prog_event_vnum1_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("prog_event_vnum2_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("prog_event_vnum3_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_multi_cast_frame_discard_%d\t", + &stat_size, data, i); + vxge_add_string("rx_frm_transferred_%d\t\t", + &stat_size, data, i); + vxge_add_string("rxd_returned_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("rx_mpa_len_fail_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_mpa_mrk_fail_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_mpa_crc_fail_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_permitted_frms_%d\t\t", + &stat_size, data, i); + vxge_add_string("rx_vp_reset_discarded_frms_%d\t", + &stat_size, data, i); + vxge_add_string("rx_wol_frms_%d\t\t\t", + &stat_size, data, i); + vxge_add_string("tx_vp_reset_discarded_frms_%d\t", + &stat_size, data, i); + } + + memcpy(data + stat_size, ðtool_driver_stats_keys, + sizeof(ethtool_driver_stats_keys)); + } +} + +static int vxge_ethtool_get_regs_len(struct net_device *dev) +{ + struct vxgedev *vdev = netdev_priv(dev); + + return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath; +} + +static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset) +{ + struct vxgedev *vdev = netdev_priv(dev); + + switch (sset) { + case ETH_SS_STATS: + return VXGE_TITLE_LEN + + (vdev->no_of_vpath * VXGE_HW_VPATH_STATS_LEN) + + (vdev->max_config_port * VXGE_HW_AGGR_STATS_LEN) + + (vdev->max_config_port * VXGE_HW_PORT_STATS_LEN) + + (vdev->no_of_vpath * VXGE_HW_VPATH_TX_STATS_LEN) + + (vdev->no_of_vpath * VXGE_HW_VPATH_RX_STATS_LEN) + + (vdev->no_of_vpath * VXGE_SW_STATS_LEN) + + DRIVER_STAT_LEN; + default: + return -EOPNOTSUPP; + } +} + +static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms) +{ + struct vxgedev *vdev = netdev_priv(dev); + + if (vdev->max_vpath_supported != VXGE_HW_MAX_VIRTUAL_PATHS) { + printk(KERN_INFO "Single Function Mode is required to flash the" + " firmware\n"); + return -EINVAL; + } + + if (netif_running(dev)) { + printk(KERN_INFO "Interface %s must be down to flash the " + "firmware\n", dev->name); + return -EBUSY; + } + + return vxge_fw_upgrade(vdev, parms->data, 1); +} + +static const struct ethtool_ops vxge_ethtool_ops = { + .get_settings = vxge_ethtool_gset, + .set_settings = vxge_ethtool_sset, + .get_drvinfo = vxge_ethtool_gdrvinfo, + .get_regs_len = vxge_ethtool_get_regs_len, + .get_regs = vxge_ethtool_gregs, + .get_link = ethtool_op_get_link, + .get_pauseparam = vxge_ethtool_getpause_data, + .set_pauseparam = vxge_ethtool_setpause_data, + .get_strings = vxge_ethtool_get_strings, + .set_phys_id = vxge_ethtool_idnic, + .get_sset_count = vxge_ethtool_get_sset_count, + .get_ethtool_stats = vxge_get_ethtool_stats, + .flash_device = vxge_fw_flash, +}; + +void vxge_initialize_ethtool_ops(struct net_device *ndev) +{ + ndev->ethtool_ops = &vxge_ethtool_ops; +} diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h new file mode 100644 index 000000000..065a2c042 --- /dev/null +++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.h @@ -0,0 +1,48 @@ +/****************************************************************************** + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + * + * vxge-ethtool.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O + * Virtualized Server Adapter. + * Copyright(c) 2002-2010 Exar Corp. + ******************************************************************************/ +#ifndef _VXGE_ETHTOOL_H +#define _VXGE_ETHTOOL_H + +#include "vxge-main.h" + +/* Ethtool related variables and Macros. */ +static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset); + +#define VXGE_TITLE_LEN 5 +#define VXGE_HW_VPATH_STATS_LEN 27 +#define VXGE_HW_AGGR_STATS_LEN 13 +#define VXGE_HW_PORT_STATS_LEN 94 +#define VXGE_HW_VPATH_TX_STATS_LEN 19 +#define VXGE_HW_VPATH_RX_STATS_LEN 42 +#define VXGE_SW_STATS_LEN 60 +#define VXGE_HW_STATS_LEN (VXGE_HW_VPATH_STATS_LEN +\ + VXGE_HW_AGGR_STATS_LEN +\ + VXGE_HW_PORT_STATS_LEN +\ + VXGE_HW_VPATH_TX_STATS_LEN +\ + VXGE_HW_VPATH_RX_STATS_LEN) + +#define DRIVER_STAT_LEN (sizeof(ethtool_driver_stats_keys)/ETH_GSTRING_LEN) +#define STAT_LEN (VXGE_HW_STATS_LEN + DRIVER_STAT_LEN + VXGE_SW_STATS_LEN) + +/* Maximum flicker time of adapter LED */ +#define VXGE_MAX_FLICKER_TIME (60 * HZ) /* 60 seconds */ +#define VXGE_FLICKER_ON 1 +#define VXGE_FLICKER_OFF 0 + +#define vxge_add_string(fmt, size, buf, ...) {\ + snprintf(buf + *size, ETH_GSTRING_LEN, fmt, __VA_ARGS__); \ + *size += ETH_GSTRING_LEN; \ +} + +#endif /*_VXGE_ETHTOOL_H*/ diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c new file mode 100644 index 000000000..4e3545355 --- /dev/null +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -0,0 +1,4870 @@ +/****************************************************************************** +* This software may be used and distributed according to the terms of +* the GNU General Public License (GPL), incorporated herein by reference. +* Drivers based on or derived from this code fall under the GPL and must +* retain the authorship, copyright and license notice. This file is not +* a complete program and may only be used when the entire operating +* system is licensed under the GPL. +* See the file COPYING in this distribution for more information. +* +* vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O +* Virtualized Server Adapter. +* Copyright(c) 2002-2010 Exar Corp. +* +* The module loadable parameters that are supported by the driver and a brief +* explanation of all the variables: +* vlan_tag_strip: +* Strip VLAN Tag enable/disable. Instructs the device to remove +* the VLAN tag from all received tagged frames that are not +* replicated at the internal L2 switch. +* 0 - Do not strip the VLAN tag. +* 1 - Strip the VLAN tag. +* +* addr_learn_en: +* Enable learning the mac address of the guest OS interface in +* a virtualization environment. +* 0 - DISABLE +* 1 - ENABLE +* +* max_config_port: +* Maximum number of port to be supported. +* MIN -1 and MAX - 2 +* +* max_config_vpath: +* This configures the maximum no of VPATH configures for each +* device function. +* MIN - 1 and MAX - 17 +* +* max_config_dev: +* This configures maximum no of Device function to be enabled. +* MIN - 1 and MAX - 17 +* +******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "vxge-main.h" +#include "vxge-reg.h" + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O" + "Virtualized Server Adapter"); + +static const struct pci_device_id vxge_id_table[] = { + {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID, + PCI_ANY_ID}, + {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID, + PCI_ANY_ID}, + {0} +}; + +MODULE_DEVICE_TABLE(pci, vxge_id_table); + +VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE); +VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT); +VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT); +VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT); +VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT); +VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV); + +static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] = + {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31}; +static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] = + {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF}; +module_param_array(bw_percentage, uint, NULL, 0); + +static struct vxge_drv_config *driver_config; +static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); + +static inline int is_vxge_card_up(struct vxgedev *vdev) +{ + return test_bit(__VXGE_STATE_CARD_UP, &vdev->state); +} + +static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo) +{ + struct sk_buff **skb_ptr = NULL; + struct sk_buff **temp; +#define NR_SKB_COMPLETED 128 + struct sk_buff *completed[NR_SKB_COMPLETED]; + int more; + + do { + more = 0; + skb_ptr = completed; + + if (__netif_tx_trylock(fifo->txq)) { + vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr, + NR_SKB_COMPLETED, &more); + __netif_tx_unlock(fifo->txq); + } + + /* free SKBs */ + for (temp = completed; temp != skb_ptr; temp++) + dev_kfree_skb_irq(*temp); + } while (more); +} + +static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev) +{ + int i; + + /* Complete all transmits */ + for (i = 0; i < vdev->no_of_vpath; i++) + VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo); +} + +static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev) +{ + int i; + struct vxge_ring *ring; + + /* Complete all receives*/ + for (i = 0; i < vdev->no_of_vpath; i++) { + ring = &vdev->vpaths[i].ring; + vxge_hw_vpath_poll_rx(ring->handle); + } +} + +/* + * vxge_callback_link_up + * + * This function is called during interrupt context to notify link up state + * change. + */ +static void vxge_callback_link_up(struct __vxge_hw_device *hldev) +{ + struct net_device *dev = hldev->ndev; + struct vxgedev *vdev = netdev_priv(dev); + + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", + vdev->ndev->name, __func__, __LINE__); + netdev_notice(vdev->ndev, "Link Up\n"); + vdev->stats.link_up++; + + netif_carrier_on(vdev->ndev); + netif_tx_wake_all_queues(vdev->ndev); + + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); +} + +/* + * vxge_callback_link_down + * + * This function is called during interrupt context to notify link down state + * change. + */ +static void vxge_callback_link_down(struct __vxge_hw_device *hldev) +{ + struct net_device *dev = hldev->ndev; + struct vxgedev *vdev = netdev_priv(dev); + + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); + netdev_notice(vdev->ndev, "Link Down\n"); + + vdev->stats.link_down++; + netif_carrier_off(vdev->ndev); + netif_tx_stop_all_queues(vdev->ndev); + + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); +} + +/* + * vxge_rx_alloc + * + * Allocate SKB. + */ +static struct sk_buff * +vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size) +{ + struct net_device *dev; + struct sk_buff *skb; + struct vxge_rx_priv *rx_priv; + + dev = ring->ndev; + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", + ring->ndev->name, __func__, __LINE__); + + rx_priv = vxge_hw_ring_rxd_private_get(dtrh); + + /* try to allocate skb first. this one may fail */ + skb = netdev_alloc_skb(dev, skb_size + + VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); + if (skb == NULL) { + vxge_debug_mem(VXGE_ERR, + "%s: out of memory to allocate SKB", dev->name); + ring->stats.skb_alloc_fail++; + return NULL; + } + + vxge_debug_mem(VXGE_TRACE, + "%s: %s:%d Skb : 0x%p", ring->ndev->name, + __func__, __LINE__, skb); + + skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); + + rx_priv->skb = skb; + rx_priv->skb_data = NULL; + rx_priv->data_size = skb_size; + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); + + return skb; +} + +/* + * vxge_rx_map + */ +static int vxge_rx_map(void *dtrh, struct vxge_ring *ring) +{ + struct vxge_rx_priv *rx_priv; + dma_addr_t dma_addr; + + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", + ring->ndev->name, __func__, __LINE__); + rx_priv = vxge_hw_ring_rxd_private_get(dtrh); + + rx_priv->skb_data = rx_priv->skb->data; + dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data, + rx_priv->data_size, PCI_DMA_FROMDEVICE); + + if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) { + ring->stats.pci_map_fail++; + return -EIO; + } + vxge_debug_mem(VXGE_TRACE, + "%s: %s:%d 1 buffer mode dma_addr = 0x%llx", + ring->ndev->name, __func__, __LINE__, + (unsigned long long)dma_addr); + vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size); + + rx_priv->data_dma = dma_addr; + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); + + return 0; +} + +/* + * vxge_rx_initial_replenish + * Allocation of RxD as an initial replenish procedure. + */ +static enum vxge_hw_status +vxge_rx_initial_replenish(void *dtrh, void *userdata) +{ + struct vxge_ring *ring = (struct vxge_ring *)userdata; + struct vxge_rx_priv *rx_priv; + + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", + ring->ndev->name, __func__, __LINE__); + if (vxge_rx_alloc(dtrh, ring, + VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL) + return VXGE_HW_FAIL; + + if (vxge_rx_map(dtrh, ring)) { + rx_priv = vxge_hw_ring_rxd_private_get(dtrh); + dev_kfree_skb(rx_priv->skb); + + return VXGE_HW_FAIL; + } + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); + + return VXGE_HW_OK; +} + +static inline void +vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan, + int pkt_length, struct vxge_hw_ring_rxd_info *ext_info) +{ + + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", + ring->ndev->name, __func__, __LINE__); + skb_record_rx_queue(skb, ring->driver_id); + skb->protocol = eth_type_trans(skb, ring->ndev); + + u64_stats_update_begin(&ring->stats.syncp); + ring->stats.rx_frms++; + ring->stats.rx_bytes += pkt_length; + + if (skb->pkt_type == PACKET_MULTICAST) + ring->stats.rx_mcast++; + u64_stats_update_end(&ring->stats.syncp); + + vxge_debug_rx(VXGE_TRACE, + "%s: %s:%d skb protocol = %d", + ring->ndev->name, __func__, __LINE__, skb->protocol); + + if (ext_info->vlan && + ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ext_info->vlan); + napi_gro_receive(ring->napi_p, skb); + + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); +} + +static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring, + struct vxge_rx_priv *rx_priv) +{ + pci_dma_sync_single_for_device(ring->pdev, + rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE); + + vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size); + vxge_hw_ring_rxd_pre_post(ring->handle, dtr); +} + +static inline void vxge_post(int *dtr_cnt, void **first_dtr, + void *post_dtr, struct __vxge_hw_ring *ringh) +{ + int dtr_count = *dtr_cnt; + if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) { + if (*first_dtr) + vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr); + *first_dtr = post_dtr; + } else + vxge_hw_ring_rxd_post_post(ringh, post_dtr); + dtr_count++; + *dtr_cnt = dtr_count; +} + +/* + * vxge_rx_1b_compl + * + * If the interrupt is because of a received frame or if the receive ring + * contains fresh as yet un-processed frames, this function is called. + */ +static enum vxge_hw_status +vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, + u8 t_code, void *userdata) +{ + struct vxge_ring *ring = (struct vxge_ring *)userdata; + struct net_device *dev = ring->ndev; + unsigned int dma_sizes; + void *first_dtr = NULL; + int dtr_cnt = 0; + int data_size; + dma_addr_t data_dma; + int pkt_length; + struct sk_buff *skb; + struct vxge_rx_priv *rx_priv; + struct vxge_hw_ring_rxd_info ext_info; + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", + ring->ndev->name, __func__, __LINE__); + + if (ring->budget <= 0) + goto out; + + do { + prefetch((char *)dtr + L1_CACHE_BYTES); + rx_priv = vxge_hw_ring_rxd_private_get(dtr); + skb = rx_priv->skb; + data_size = rx_priv->data_size; + data_dma = rx_priv->data_dma; + prefetch(rx_priv->skb_data); + + vxge_debug_rx(VXGE_TRACE, + "%s: %s:%d skb = 0x%p", + ring->ndev->name, __func__, __LINE__, skb); + + vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes); + pkt_length = dma_sizes; + + pkt_length -= ETH_FCS_LEN; + + vxge_debug_rx(VXGE_TRACE, + "%s: %s:%d Packet Length = %d", + ring->ndev->name, __func__, __LINE__, pkt_length); + + vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info); + + /* check skb validity */ + vxge_assert(skb); + + prefetch((char *)skb + L1_CACHE_BYTES); + if (unlikely(t_code)) { + if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) != + VXGE_HW_OK) { + + ring->stats.rx_errors++; + vxge_debug_rx(VXGE_TRACE, + "%s: %s :%d Rx T_code is %d", + ring->ndev->name, __func__, + __LINE__, t_code); + + /* If the t_code is not supported and if the + * t_code is other than 0x5 (unparseable packet + * such as unknown UPV6 header), Drop it !!! + */ + vxge_re_pre_post(dtr, ring, rx_priv); + + vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); + ring->stats.rx_dropped++; + continue; + } + } + + if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) { + if (vxge_rx_alloc(dtr, ring, data_size) != NULL) { + if (!vxge_rx_map(dtr, ring)) { + skb_put(skb, pkt_length); + + pci_unmap_single(ring->pdev, data_dma, + data_size, PCI_DMA_FROMDEVICE); + + vxge_hw_ring_rxd_pre_post(ringh, dtr); + vxge_post(&dtr_cnt, &first_dtr, dtr, + ringh); + } else { + dev_kfree_skb(rx_priv->skb); + rx_priv->skb = skb; + rx_priv->data_size = data_size; + vxge_re_pre_post(dtr, ring, rx_priv); + + vxge_post(&dtr_cnt, &first_dtr, dtr, + ringh); + ring->stats.rx_dropped++; + break; + } + } else { + vxge_re_pre_post(dtr, ring, rx_priv); + + vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); + ring->stats.rx_dropped++; + break; + } + } else { + struct sk_buff *skb_up; + + skb_up = netdev_alloc_skb(dev, pkt_length + + VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); + if (skb_up != NULL) { + skb_reserve(skb_up, + VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); + + pci_dma_sync_single_for_cpu(ring->pdev, + data_dma, data_size, + PCI_DMA_FROMDEVICE); + + vxge_debug_mem(VXGE_TRACE, + "%s: %s:%d skb_up = %p", + ring->ndev->name, __func__, + __LINE__, skb); + memcpy(skb_up->data, skb->data, pkt_length); + + vxge_re_pre_post(dtr, ring, rx_priv); + + vxge_post(&dtr_cnt, &first_dtr, dtr, + ringh); + /* will netif_rx small SKB instead */ + skb = skb_up; + skb_put(skb, pkt_length); + } else { + vxge_re_pre_post(dtr, ring, rx_priv); + + vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); + vxge_debug_rx(VXGE_ERR, + "%s: vxge_rx_1b_compl: out of " + "memory", dev->name); + ring->stats.skb_alloc_fail++; + break; + } + } + + if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) && + !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) && + (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */ + ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK && + ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + + + if (ring->rx_hwts) { + struct skb_shared_hwtstamps *skb_hwts; + u32 ns = *(u32 *)(skb->head + pkt_length); + + skb_hwts = skb_hwtstamps(skb); + skb_hwts->hwtstamp = ns_to_ktime(ns); + } + + /* rth_hash_type and rth_it_hit are non-zero regardless of + * whether rss is enabled. Only the rth_value is zero/non-zero + * if rss is disabled/enabled, so key off of that. + */ + if (ext_info.rth_value) + skb_set_hash(skb, ext_info.rth_value, + PKT_HASH_TYPE_L3); + + vxge_rx_complete(ring, skb, ext_info.vlan, + pkt_length, &ext_info); + + ring->budget--; + ring->pkts_processed++; + if (!ring->budget) + break; + + } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr, + &t_code) == VXGE_HW_OK); + + if (first_dtr) + vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr); + +out: + vxge_debug_entryexit(VXGE_TRACE, + "%s:%d Exiting...", + __func__, __LINE__); + return VXGE_HW_OK; +} + +/* + * vxge_xmit_compl + * + * If an interrupt was raised to indicate DMA complete of the Tx packet, + * this function is called. It identifies the last TxD whose buffer was + * freed and frees all skbs whose data have already DMA'ed into the NICs + * internal memory. + */ +static enum vxge_hw_status +vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, + enum vxge_hw_fifo_tcode t_code, void *userdata, + struct sk_buff ***skb_ptr, int nr_skb, int *more) +{ + struct vxge_fifo *fifo = (struct vxge_fifo *)userdata; + struct sk_buff *skb, **done_skb = *skb_ptr; + int pkt_cnt = 0; + + vxge_debug_entryexit(VXGE_TRACE, + "%s:%d Entered....", __func__, __LINE__); + + do { + int frg_cnt; + skb_frag_t *frag; + int i = 0, j; + struct vxge_tx_priv *txd_priv = + vxge_hw_fifo_txdl_private_get(dtr); + + skb = txd_priv->skb; + frg_cnt = skb_shinfo(skb)->nr_frags; + frag = &skb_shinfo(skb)->frags[0]; + + vxge_debug_tx(VXGE_TRACE, + "%s: %s:%d fifo_hw = %p dtr = %p " + "tcode = 0x%x", fifo->ndev->name, __func__, + __LINE__, fifo_hw, dtr, t_code); + /* check skb validity */ + vxge_assert(skb); + vxge_debug_tx(VXGE_TRACE, + "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d", + fifo->ndev->name, __func__, __LINE__, + skb, txd_priv, frg_cnt); + if (unlikely(t_code)) { + fifo->stats.tx_errors++; + vxge_debug_tx(VXGE_ERR, + "%s: tx: dtr %p completed due to " + "error t_code %01x", fifo->ndev->name, + dtr, t_code); + vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code); + } + + /* for unfragmented skb */ + pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++], + skb_headlen(skb), PCI_DMA_TODEVICE); + + for (j = 0; j < frg_cnt; j++) { + pci_unmap_page(fifo->pdev, + txd_priv->dma_buffers[i++], + skb_frag_size(frag), PCI_DMA_TODEVICE); + frag += 1; + } + + vxge_hw_fifo_txdl_free(fifo_hw, dtr); + + /* Updating the statistics block */ + u64_stats_update_begin(&fifo->stats.syncp); + fifo->stats.tx_frms++; + fifo->stats.tx_bytes += skb->len; + u64_stats_update_end(&fifo->stats.syncp); + + *done_skb++ = skb; + + if (--nr_skb <= 0) { + *more = 1; + break; + } + + pkt_cnt++; + if (pkt_cnt > fifo->indicate_max_pkts) + break; + + } while (vxge_hw_fifo_txdl_next_completed(fifo_hw, + &dtr, &t_code) == VXGE_HW_OK); + + *skb_ptr = done_skb; + if (netif_tx_queue_stopped(fifo->txq)) + netif_tx_wake_queue(fifo->txq); + + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d Exiting...", + fifo->ndev->name, __func__, __LINE__); + return VXGE_HW_OK; +} + +/* select a vpath to transmit the packet */ +static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb) +{ + u16 queue_len, counter = 0; + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *ip; + struct tcphdr *th; + + ip = ip_hdr(skb); + + if (!ip_is_fragment(ip)) { + th = (struct tcphdr *)(((unsigned char *)ip) + + ip->ihl*4); + + queue_len = vdev->no_of_vpath; + counter = (ntohs(th->source) + + ntohs(th->dest)) & + vdev->vpath_selector[queue_len - 1]; + if (counter >= queue_len) + counter = queue_len - 1; + } + } + return counter; +} + +static enum vxge_hw_status vxge_search_mac_addr_in_list( + struct vxge_vpath *vpath, u64 del_mac) +{ + struct list_head *entry, *next; + list_for_each_safe(entry, next, &vpath->mac_addr_list) { + if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) + return TRUE; + } + return FALSE; +} + +static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac) +{ + struct vxge_mac_addrs *new_mac_entry; + u8 *mac_address = NULL; + + if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT) + return TRUE; + + new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC); + if (!new_mac_entry) { + vxge_debug_mem(VXGE_ERR, + "%s: memory allocation failed", + VXGE_DRIVER_NAME); + return FALSE; + } + + list_add(&new_mac_entry->item, &vpath->mac_addr_list); + + /* Copy the new mac address to the list */ + mac_address = (u8 *)&new_mac_entry->macaddr; + memcpy(mac_address, mac->macaddr, ETH_ALEN); + + new_mac_entry->state = mac->state; + vpath->mac_addr_cnt++; + + if (is_multicast_ether_addr(mac->macaddr)) + vpath->mcast_addr_cnt++; + + return TRUE; +} + +/* Add a mac address to DA table */ +static enum vxge_hw_status +vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_vpath *vpath; + enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode; + + if (is_multicast_ether_addr(mac->macaddr)) + duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE; + else + duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE; + + vpath = &vdev->vpaths[mac->vpath_no]; + status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr, + mac->macmask, duplicate_mode); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "DA config add entry failed for vpath:%d", + vpath->device_id); + } else + if (FALSE == vxge_mac_list_add(vpath, mac)) + status = -EPERM; + + return status; +} + +static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header) +{ + struct macInfo mac_info; + u8 *mac_address = NULL; + u64 mac_addr = 0, vpath_vector = 0; + int vpath_idx = 0; + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_vpath *vpath = NULL; + + mac_address = (u8 *)&mac_addr; + memcpy(mac_address, mac_header, ETH_ALEN); + + /* Is this mac address already in the list? */ + for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { + vpath = &vdev->vpaths[vpath_idx]; + if (vxge_search_mac_addr_in_list(vpath, mac_addr)) + return vpath_idx; + } + + memset(&mac_info, 0, sizeof(struct macInfo)); + memcpy(mac_info.macaddr, mac_header, ETH_ALEN); + + /* Any vpath has room to add mac address to its da table? */ + for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { + vpath = &vdev->vpaths[vpath_idx]; + if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) { + /* Add this mac address to this vpath */ + mac_info.vpath_no = vpath_idx; + mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE; + status = vxge_add_mac_addr(vdev, &mac_info); + if (status != VXGE_HW_OK) + return -EPERM; + return vpath_idx; + } + } + + mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST; + vpath_idx = 0; + mac_info.vpath_no = vpath_idx; + /* Is the first vpath already selected as catch-basin ? */ + vpath = &vdev->vpaths[vpath_idx]; + if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) { + /* Add this mac address to this vpath */ + if (FALSE == vxge_mac_list_add(vpath, &mac_info)) + return -EPERM; + return vpath_idx; + } + + /* Select first vpath as catch-basin */ + vpath_vector = vxge_mBIT(vpath->device_id); + status = vxge_hw_mgmt_reg_write(vpath->vdev->devh, + vxge_hw_mgmt_reg_type_mrpcim, + 0, + (ulong)offsetof( + struct vxge_hw_mrpcim_reg, + rts_mgr_cbasin_cfg), + vpath_vector); + if (status != VXGE_HW_OK) { + vxge_debug_tx(VXGE_ERR, + "%s: Unable to set the vpath-%d in catch-basin mode", + VXGE_DRIVER_NAME, vpath->device_id); + return -EPERM; + } + + if (FALSE == vxge_mac_list_add(vpath, &mac_info)) + return -EPERM; + + return vpath_idx; +} + +/** + * vxge_xmit + * @skb : the socket buffer containing the Tx data. + * @dev : device pointer. + * + * This function is the Tx entry point of the driver. Neterion NIC supports + * certain protocol assist features on Tx side, namely CSO, S/G, LSO. +*/ +static netdev_tx_t +vxge_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct vxge_fifo *fifo = NULL; + void *dtr_priv; + void *dtr = NULL; + struct vxgedev *vdev = NULL; + enum vxge_hw_status status; + int frg_cnt, first_frg_len; + skb_frag_t *frag; + int i = 0, j = 0, avail; + u64 dma_pointer; + struct vxge_tx_priv *txdl_priv = NULL; + struct __vxge_hw_fifo *fifo_hw; + int offload_type; + int vpath_no = 0; + + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", + dev->name, __func__, __LINE__); + + /* A buffer with no data will be dropped */ + if (unlikely(skb->len <= 0)) { + vxge_debug_tx(VXGE_ERR, + "%s: Buffer has no data..", dev->name); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + vdev = netdev_priv(dev); + + if (unlikely(!is_vxge_card_up(vdev))) { + vxge_debug_tx(VXGE_ERR, + "%s: vdev not initialized", dev->name); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (vdev->config.addr_learn_en) { + vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN); + if (vpath_no == -EPERM) { + vxge_debug_tx(VXGE_ERR, + "%s: Failed to store the mac address", + dev->name); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } + + if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) + vpath_no = skb_get_queue_mapping(skb); + else if (vdev->config.tx_steering_type == TX_PORT_STEERING) + vpath_no = vxge_get_vpath_no(vdev, skb); + + vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no); + + if (vpath_no >= vdev->no_of_vpath) + vpath_no = 0; + + fifo = &vdev->vpaths[vpath_no].fifo; + fifo_hw = fifo->handle; + + if (netif_tx_queue_stopped(fifo->txq)) + return NETDEV_TX_BUSY; + + avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw); + if (avail == 0) { + vxge_debug_tx(VXGE_ERR, + "%s: No free TXDs available", dev->name); + fifo->stats.txd_not_free++; + goto _exit0; + } + + /* Last TXD? Stop tx queue to avoid dropping packets. TX + * completion will resume the queue. + */ + if (avail == 1) + netif_tx_stop_queue(fifo->txq); + + status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv); + if (unlikely(status != VXGE_HW_OK)) { + vxge_debug_tx(VXGE_ERR, + "%s: Out of descriptors .", dev->name); + fifo->stats.txd_out_of_desc++; + goto _exit0; + } + + vxge_debug_tx(VXGE_TRACE, + "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p", + dev->name, __func__, __LINE__, + fifo_hw, dtr, dtr_priv); + + if (skb_vlan_tag_present(skb)) { + u16 vlan_tag = skb_vlan_tag_get(skb); + vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag); + } + + first_frg_len = skb_headlen(skb); + + dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len, + PCI_DMA_TODEVICE); + + if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) { + vxge_hw_fifo_txdl_free(fifo_hw, dtr); + fifo->stats.pci_map_fail++; + goto _exit0; + } + + txdl_priv = vxge_hw_fifo_txdl_private_get(dtr); + txdl_priv->skb = skb; + txdl_priv->dma_buffers[j] = dma_pointer; + + frg_cnt = skb_shinfo(skb)->nr_frags; + vxge_debug_tx(VXGE_TRACE, + "%s: %s:%d skb = %p txdl_priv = %p " + "frag_cnt = %d dma_pointer = 0x%llx", dev->name, + __func__, __LINE__, skb, txdl_priv, + frg_cnt, (unsigned long long)dma_pointer); + + vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer, + first_frg_len); + + frag = &skb_shinfo(skb)->frags[0]; + for (i = 0; i < frg_cnt; i++) { + /* ignore 0 length fragment */ + if (!skb_frag_size(frag)) + continue; + + dma_pointer = (u64)skb_frag_dma_map(&fifo->pdev->dev, frag, + 0, skb_frag_size(frag), + DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer))) + goto _exit2; + vxge_debug_tx(VXGE_TRACE, + "%s: %s:%d frag = %d dma_pointer = 0x%llx", + dev->name, __func__, __LINE__, i, + (unsigned long long)dma_pointer); + + txdl_priv->dma_buffers[j] = dma_pointer; + vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer, + skb_frag_size(frag)); + frag += 1; + } + + offload_type = vxge_offload_type(skb); + + if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { + int mss = vxge_tcp_mss(skb); + if (mss) { + vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d", + dev->name, __func__, __LINE__, mss); + vxge_hw_fifo_txdl_mss_set(dtr, mss); + } else { + vxge_assert(skb->len <= + dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE); + vxge_assert(0); + goto _exit1; + } + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) + vxge_hw_fifo_txdl_cksum_set_bits(dtr, + VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN | + VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN | + VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN); + + vxge_hw_fifo_txdl_post(fifo_hw, dtr); + + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", + dev->name, __func__, __LINE__); + return NETDEV_TX_OK; + +_exit2: + vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name); +_exit1: + j = 0; + frag = &skb_shinfo(skb)->frags[0]; + + pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++], + skb_headlen(skb), PCI_DMA_TODEVICE); + + for (; j < i; j++) { + pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j], + skb_frag_size(frag), PCI_DMA_TODEVICE); + frag += 1; + } + + vxge_hw_fifo_txdl_free(fifo_hw, dtr); +_exit0: + netif_tx_stop_queue(fifo->txq); + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +/* + * vxge_rx_term + * + * Function will be called by hw function to abort all outstanding receive + * descriptors. + */ +static void +vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata) +{ + struct vxge_ring *ring = (struct vxge_ring *)userdata; + struct vxge_rx_priv *rx_priv = + vxge_hw_ring_rxd_private_get(dtrh); + + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", + ring->ndev->name, __func__, __LINE__); + if (state != VXGE_HW_RXD_STATE_POSTED) + return; + + pci_unmap_single(ring->pdev, rx_priv->data_dma, + rx_priv->data_size, PCI_DMA_FROMDEVICE); + + dev_kfree_skb(rx_priv->skb); + rx_priv->skb_data = NULL; + + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d Exiting...", + ring->ndev->name, __func__, __LINE__); +} + +/* + * vxge_tx_term + * + * Function will be called to abort all outstanding tx descriptors + */ +static void +vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata) +{ + struct vxge_fifo *fifo = (struct vxge_fifo *)userdata; + skb_frag_t *frag; + int i = 0, j, frg_cnt; + struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh); + struct sk_buff *skb = txd_priv->skb; + + vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); + + if (state != VXGE_HW_TXDL_STATE_POSTED) + return; + + /* check skb validity */ + vxge_assert(skb); + frg_cnt = skb_shinfo(skb)->nr_frags; + frag = &skb_shinfo(skb)->frags[0]; + + /* for unfragmented skb */ + pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++], + skb_headlen(skb), PCI_DMA_TODEVICE); + + for (j = 0; j < frg_cnt; j++) { + pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++], + skb_frag_size(frag), PCI_DMA_TODEVICE); + frag += 1; + } + + dev_kfree_skb(skb); + + vxge_debug_entryexit(VXGE_TRACE, + "%s:%d Exiting...", __func__, __LINE__); +} + +static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac) +{ + struct list_head *entry, *next; + u64 del_mac = 0; + u8 *mac_address = (u8 *) (&del_mac); + + /* Copy the mac address to delete from the list */ + memcpy(mac_address, mac->macaddr, ETH_ALEN); + + list_for_each_safe(entry, next, &vpath->mac_addr_list) { + if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) { + list_del(entry); + kfree((struct vxge_mac_addrs *)entry); + vpath->mac_addr_cnt--; + + if (is_multicast_ether_addr(mac->macaddr)) + vpath->mcast_addr_cnt--; + return TRUE; + } + } + + return FALSE; +} + +/* delete a mac address from DA table */ +static enum vxge_hw_status +vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_vpath *vpath; + + vpath = &vdev->vpaths[mac->vpath_no]; + status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr, + mac->macmask); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "DA config delete entry failed for vpath:%d", + vpath->device_id); + } else + vxge_mac_list_del(vpath, mac); + return status; +} + +/** + * vxge_set_multicast + * @dev: pointer to the device structure + * + * Entry point for multicast address enable/disable + * This function is a driver entry point which gets called by the kernel + * whenever multicast addresses must be enabled/disabled. This also gets + * called to set/reset promiscuous mode. Depending on the deivce flag, we + * determine, if multicast address must be enabled or if promiscuous mode + * is to be disabled etc. + */ +static void vxge_set_multicast(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + struct vxgedev *vdev; + int i, mcast_cnt = 0; + struct __vxge_hw_device *hldev; + struct vxge_vpath *vpath; + enum vxge_hw_status status = VXGE_HW_OK; + struct macInfo mac_info; + int vpath_idx = 0; + struct vxge_mac_addrs *mac_entry; + struct list_head *list_head; + struct list_head *entry, *next; + u8 *mac_address = NULL; + + vxge_debug_entryexit(VXGE_TRACE, + "%s:%d", __func__, __LINE__); + + vdev = netdev_priv(dev); + hldev = vdev->devh; + + if (unlikely(!is_vxge_card_up(vdev))) + return; + + if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) { + for (i = 0; i < vdev->no_of_vpath; i++) { + vpath = &vdev->vpaths[i]; + vxge_assert(vpath->is_open); + status = vxge_hw_vpath_mcast_enable(vpath->handle); + if (status != VXGE_HW_OK) + vxge_debug_init(VXGE_ERR, "failed to enable " + "multicast, status %d", status); + vdev->all_multi_flg = 1; + } + } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) { + for (i = 0; i < vdev->no_of_vpath; i++) { + vpath = &vdev->vpaths[i]; + vxge_assert(vpath->is_open); + status = vxge_hw_vpath_mcast_disable(vpath->handle); + if (status != VXGE_HW_OK) + vxge_debug_init(VXGE_ERR, "failed to disable " + "multicast, status %d", status); + vdev->all_multi_flg = 0; + } + } + + + if (!vdev->config.addr_learn_en) { + for (i = 0; i < vdev->no_of_vpath; i++) { + vpath = &vdev->vpaths[i]; + vxge_assert(vpath->is_open); + + if (dev->flags & IFF_PROMISC) + status = vxge_hw_vpath_promisc_enable( + vpath->handle); + else + status = vxge_hw_vpath_promisc_disable( + vpath->handle); + if (status != VXGE_HW_OK) + vxge_debug_init(VXGE_ERR, "failed to %s promisc" + ", status %d", dev->flags&IFF_PROMISC ? + "enable" : "disable", status); + } + } + + memset(&mac_info, 0, sizeof(struct macInfo)); + /* Update individual M_CAST address list */ + if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) { + mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; + list_head = &vdev->vpaths[0].mac_addr_list; + if ((netdev_mc_count(dev) + + (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) > + vdev->vpaths[0].max_mac_addr_cnt) + goto _set_all_mcast; + + /* Delete previous MC's */ + for (i = 0; i < mcast_cnt; i++) { + list_for_each_safe(entry, next, list_head) { + mac_entry = (struct vxge_mac_addrs *)entry; + /* Copy the mac address to delete */ + mac_address = (u8 *)&mac_entry->macaddr; + memcpy(mac_info.macaddr, mac_address, ETH_ALEN); + + if (is_multicast_ether_addr(mac_info.macaddr)) { + for (vpath_idx = 0; vpath_idx < + vdev->no_of_vpath; + vpath_idx++) { + mac_info.vpath_no = vpath_idx; + status = vxge_del_mac_addr( + vdev, + &mac_info); + } + } + } + } + + /* Add new ones */ + netdev_for_each_mc_addr(ha, dev) { + memcpy(mac_info.macaddr, ha->addr, ETH_ALEN); + for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; + vpath_idx++) { + mac_info.vpath_no = vpath_idx; + mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE; + status = vxge_add_mac_addr(vdev, &mac_info); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s:%d Setting individual" + "multicast address failed", + __func__, __LINE__); + goto _set_all_mcast; + } + } + } + + return; +_set_all_mcast: + mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; + /* Delete previous MC's */ + for (i = 0; i < mcast_cnt; i++) { + list_for_each_safe(entry, next, list_head) { + mac_entry = (struct vxge_mac_addrs *)entry; + /* Copy the mac address to delete */ + mac_address = (u8 *)&mac_entry->macaddr; + memcpy(mac_info.macaddr, mac_address, ETH_ALEN); + + if (is_multicast_ether_addr(mac_info.macaddr)) + break; + } + + for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; + vpath_idx++) { + mac_info.vpath_no = vpath_idx; + status = vxge_del_mac_addr(vdev, &mac_info); + } + } + + /* Enable all multicast */ + for (i = 0; i < vdev->no_of_vpath; i++) { + vpath = &vdev->vpaths[i]; + vxge_assert(vpath->is_open); + + status = vxge_hw_vpath_mcast_enable(vpath->handle); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s:%d Enabling all multicasts failed", + __func__, __LINE__); + } + vdev->all_multi_flg = 1; + } + dev->flags |= IFF_ALLMULTI; + } + + vxge_debug_entryexit(VXGE_TRACE, + "%s:%d Exiting...", __func__, __LINE__); +} + +/** + * vxge_set_mac_addr + * @dev: pointer to the device structure + * + * Update entry "0" (default MAC addr) + */ +static int vxge_set_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct vxgedev *vdev; + struct __vxge_hw_device *hldev; + enum vxge_hw_status status = VXGE_HW_OK; + struct macInfo mac_info_new, mac_info_old; + int vpath_idx = 0; + + vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); + + vdev = netdev_priv(dev); + hldev = vdev->devh; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + memset(&mac_info_new, 0, sizeof(struct macInfo)); + memset(&mac_info_old, 0, sizeof(struct macInfo)); + + vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", + __func__, __LINE__); + + /* Get the old address */ + memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len); + + /* Copy the new address */ + memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len); + + /* First delete the old mac address from all the vpaths + as we can't specify the index while adding new mac address */ + for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { + struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx]; + if (!vpath->is_open) { + /* This can happen when this interface is added/removed + to the bonding interface. Delete this station address + from the linked list */ + vxge_mac_list_del(vpath, &mac_info_old); + + /* Add this new address to the linked list + for later restoring */ + vxge_mac_list_add(vpath, &mac_info_new); + + continue; + } + /* Delete the station address */ + mac_info_old.vpath_no = vpath_idx; + status = vxge_del_mac_addr(vdev, &mac_info_old); + } + + if (unlikely(!is_vxge_card_up(vdev))) { + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + return VXGE_HW_OK; + } + + /* Set this mac address to all the vpaths */ + for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { + mac_info_new.vpath_no = vpath_idx; + mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE; + status = vxge_add_mac_addr(vdev, &mac_info_new); + if (status != VXGE_HW_OK) + return -EINVAL; + } + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + return status; +} + +/* + * vxge_vpath_intr_enable + * @vdev: pointer to vdev + * @vp_id: vpath for which to enable the interrupts + * + * Enables the interrupts for the vpath +*/ +static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id) +{ + struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; + int msix_id = 0; + int tim_msix_id[4] = {0, 1, 0, 0}; + int alarm_msix_id = VXGE_ALARM_MSIX_ID; + + vxge_hw_vpath_intr_enable(vpath->handle); + + if (vdev->config.intr_type == INTA) + vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle); + else { + vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, + alarm_msix_id); + + msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE; + vxge_hw_vpath_msix_unmask(vpath->handle, msix_id); + vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1); + + /* enable the alarm vector */ + msix_id = (vpath->handle->vpath->hldev->first_vp_id * + VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id; + vxge_hw_vpath_msix_unmask(vpath->handle, msix_id); + } +} + +/* + * vxge_vpath_intr_disable + * @vdev: pointer to vdev + * @vp_id: vpath for which to disable the interrupts + * + * Disables the interrupts for the vpath +*/ +static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) +{ + struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; + struct __vxge_hw_device *hldev; + int msix_id; + + hldev = pci_get_drvdata(vdev->pdev); + + vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id); + + vxge_hw_vpath_intr_disable(vpath->handle); + + if (vdev->config.intr_type == INTA) + vxge_hw_vpath_inta_mask_tx_rx(vpath->handle); + else { + msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE; + vxge_hw_vpath_msix_mask(vpath->handle, msix_id); + vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1); + + /* disable the alarm vector */ + msix_id = (vpath->handle->vpath->hldev->first_vp_id * + VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; + vxge_hw_vpath_msix_mask(vpath->handle, msix_id); + } +} + +/* list all mac addresses from DA table */ +static enum vxge_hw_status +vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac) +{ + enum vxge_hw_status status = VXGE_HW_OK; + unsigned char macmask[ETH_ALEN]; + unsigned char macaddr[ETH_ALEN]; + + status = vxge_hw_vpath_mac_addr_get(vpath->handle, + macaddr, macmask); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "DA config list entry failed for vpath:%d", + vpath->device_id); + return status; + } + + while (!ether_addr_equal(mac->macaddr, macaddr)) { + status = vxge_hw_vpath_mac_addr_get_next(vpath->handle, + macaddr, macmask); + if (status != VXGE_HW_OK) + break; + } + + return status; +} + +/* Store all mac addresses from the list to the DA table */ +static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct macInfo mac_info; + u8 *mac_address = NULL; + struct list_head *entry, *next; + + memset(&mac_info, 0, sizeof(struct macInfo)); + + if (vpath->is_open) { + list_for_each_safe(entry, next, &vpath->mac_addr_list) { + mac_address = + (u8 *)& + ((struct vxge_mac_addrs *)entry)->macaddr; + memcpy(mac_info.macaddr, mac_address, ETH_ALEN); + ((struct vxge_mac_addrs *)entry)->state = + VXGE_LL_MAC_ADDR_IN_DA_TABLE; + /* does this mac address already exist in da table? */ + status = vxge_search_mac_addr_in_da_table(vpath, + &mac_info); + if (status != VXGE_HW_OK) { + /* Add this mac address to the DA table */ + status = vxge_hw_vpath_mac_addr_add( + vpath->handle, mac_info.macaddr, + mac_info.macmask, + VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "DA add entry failed for vpath:%d", + vpath->device_id); + ((struct vxge_mac_addrs *)entry)->state + = VXGE_LL_MAC_ADDR_IN_LIST; + } + } + } + } + + return status; +} + +/* Store all vlan ids from the list to the vid table */ +static enum vxge_hw_status +vxge_restore_vpath_vid_table(struct vxge_vpath *vpath) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct vxgedev *vdev = vpath->vdev; + u16 vid; + + if (!vpath->is_open) + return status; + + for_each_set_bit(vid, vdev->active_vlans, VLAN_N_VID) + status = vxge_hw_vpath_vid_add(vpath->handle, vid); + + return status; +} + +/* + * vxge_reset_vpath + * @vdev: pointer to vdev + * @vp_id: vpath to reset + * + * Resets the vpath +*/ +static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; + int ret = 0; + + /* check if device is down already */ + if (unlikely(!is_vxge_card_up(vdev))) + return 0; + + /* is device reset already scheduled */ + if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) + return 0; + + if (vpath->handle) { + if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) { + if (is_vxge_card_up(vdev) && + vxge_hw_vpath_recover_from_reset(vpath->handle) + != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "vxge_hw_vpath_recover_from_reset" + "failed for vpath:%d", vp_id); + return status; + } + } else { + vxge_debug_init(VXGE_ERR, + "vxge_hw_vpath_reset failed for" + "vpath:%d", vp_id); + return status; + } + } else + return VXGE_HW_FAIL; + + vxge_restore_vpath_mac_addr(vpath); + vxge_restore_vpath_vid_table(vpath); + + /* Enable all broadcast */ + vxge_hw_vpath_bcast_enable(vpath->handle); + + /* Enable all multicast */ + if (vdev->all_multi_flg) { + status = vxge_hw_vpath_mcast_enable(vpath->handle); + if (status != VXGE_HW_OK) + vxge_debug_init(VXGE_ERR, + "%s:%d Enabling multicast failed", + __func__, __LINE__); + } + + /* Enable the interrupts */ + vxge_vpath_intr_enable(vdev, vp_id); + + smp_wmb(); + + /* Enable the flow of traffic through the vpath */ + vxge_hw_vpath_enable(vpath->handle); + + smp_wmb(); + vxge_hw_vpath_rx_doorbell_init(vpath->handle); + vpath->ring.last_status = VXGE_HW_OK; + + /* Vpath reset done */ + clear_bit(vp_id, &vdev->vp_reset); + + /* Start the vpath queue */ + if (netif_tx_queue_stopped(vpath->fifo.txq)) + netif_tx_wake_queue(vpath->fifo.txq); + + return ret; +} + +/* Configure CI */ +static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev) +{ + int i = 0; + + /* Enable CI for RTI */ + if (vdev->config.intr_type == MSI_X) { + for (i = 0; i < vdev->no_of_vpath; i++) { + struct __vxge_hw_ring *hw_ring; + + hw_ring = vdev->vpaths[i].ring.handle; + vxge_hw_vpath_dynamic_rti_ci_set(hw_ring); + } + } + + /* Enable CI for TTI */ + for (i = 0; i < vdev->no_of_vpath; i++) { + struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle; + vxge_hw_vpath_tti_ci_set(hw_fifo); + /* + * For Inta (with or without napi), Set CI ON for only one + * vpath. (Have only one free running timer). + */ + if ((vdev->config.intr_type == INTA) && (i == 0)) + break; + } + + return; +} + +static int do_vxge_reset(struct vxgedev *vdev, int event) +{ + enum vxge_hw_status status; + int ret = 0, vp_id, i; + + vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); + + if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) { + /* check if device is down already */ + if (unlikely(!is_vxge_card_up(vdev))) + return 0; + + /* is reset already scheduled */ + if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) + return 0; + } + + if (event == VXGE_LL_FULL_RESET) { + netif_carrier_off(vdev->ndev); + + /* wait for all the vpath reset to complete */ + for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { + while (test_bit(vp_id, &vdev->vp_reset)) + msleep(50); + } + + netif_carrier_on(vdev->ndev); + + /* if execution mode is set to debug, don't reset the adapter */ + if (unlikely(vdev->exec_mode)) { + vxge_debug_init(VXGE_ERR, + "%s: execution mode is debug, returning..", + vdev->ndev->name); + clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); + netif_tx_stop_all_queues(vdev->ndev); + return 0; + } + } + + if (event == VXGE_LL_FULL_RESET) { + vxge_hw_device_wait_receive_idle(vdev->devh); + vxge_hw_device_intr_disable(vdev->devh); + + switch (vdev->cric_err_event) { + case VXGE_HW_EVENT_UNKNOWN: + netif_tx_stop_all_queues(vdev->ndev); + vxge_debug_init(VXGE_ERR, + "fatal: %s: Disabling device due to" + "unknown error", + vdev->ndev->name); + ret = -EPERM; + goto out; + case VXGE_HW_EVENT_RESET_START: + break; + case VXGE_HW_EVENT_RESET_COMPLETE: + case VXGE_HW_EVENT_LINK_DOWN: + case VXGE_HW_EVENT_LINK_UP: + case VXGE_HW_EVENT_ALARM_CLEARED: + case VXGE_HW_EVENT_ECCERR: + case VXGE_HW_EVENT_MRPCIM_ECCERR: + ret = -EPERM; + goto out; + case VXGE_HW_EVENT_FIFO_ERR: + case VXGE_HW_EVENT_VPATH_ERR: + break; + case VXGE_HW_EVENT_CRITICAL_ERR: + netif_tx_stop_all_queues(vdev->ndev); + vxge_debug_init(VXGE_ERR, + "fatal: %s: Disabling device due to" + "serious error", + vdev->ndev->name); + /* SOP or device reset required */ + /* This event is not currently used */ + ret = -EPERM; + goto out; + case VXGE_HW_EVENT_SERR: + netif_tx_stop_all_queues(vdev->ndev); + vxge_debug_init(VXGE_ERR, + "fatal: %s: Disabling device due to" + "serious error", + vdev->ndev->name); + ret = -EPERM; + goto out; + case VXGE_HW_EVENT_SRPCIM_SERR: + case VXGE_HW_EVENT_MRPCIM_SERR: + ret = -EPERM; + goto out; + case VXGE_HW_EVENT_SLOT_FREEZE: + netif_tx_stop_all_queues(vdev->ndev); + vxge_debug_init(VXGE_ERR, + "fatal: %s: Disabling device due to" + "slot freeze", + vdev->ndev->name); + ret = -EPERM; + goto out; + default: + break; + + } + } + + if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) + netif_tx_stop_all_queues(vdev->ndev); + + if (event == VXGE_LL_FULL_RESET) { + status = vxge_reset_all_vpaths(vdev); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "fatal: %s: can not reset vpaths", + vdev->ndev->name); + ret = -EPERM; + goto out; + } + } + + if (event == VXGE_LL_COMPL_RESET) { + for (i = 0; i < vdev->no_of_vpath; i++) + if (vdev->vpaths[i].handle) { + if (vxge_hw_vpath_recover_from_reset( + vdev->vpaths[i].handle) + != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "vxge_hw_vpath_recover_" + "from_reset failed for vpath: " + "%d", i); + ret = -EPERM; + goto out; + } + } else { + vxge_debug_init(VXGE_ERR, + "vxge_hw_vpath_reset failed for " + "vpath:%d", i); + ret = -EPERM; + goto out; + } + } + + if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) { + /* Reprogram the DA table with populated mac addresses */ + for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { + vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]); + vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]); + } + + /* enable vpath interrupts */ + for (i = 0; i < vdev->no_of_vpath; i++) + vxge_vpath_intr_enable(vdev, i); + + vxge_hw_device_intr_enable(vdev->devh); + + smp_wmb(); + + /* Indicate card up */ + set_bit(__VXGE_STATE_CARD_UP, &vdev->state); + + /* Get the traffic to flow through the vpaths */ + for (i = 0; i < vdev->no_of_vpath; i++) { + vxge_hw_vpath_enable(vdev->vpaths[i].handle); + smp_wmb(); + vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle); + } + + netif_tx_wake_all_queues(vdev->ndev); + } + + /* configure CI */ + vxge_config_ci_for_tti_rti(vdev); + +out: + vxge_debug_entryexit(VXGE_TRACE, + "%s:%d Exiting...", __func__, __LINE__); + + /* Indicate reset done */ + if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) + clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state); + return ret; +} + +/* + * vxge_reset + * @vdev: pointer to ll device + * + * driver may reset the chip on events of serr, eccerr, etc + */ +static void vxge_reset(struct work_struct *work) +{ + struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task); + + if (!netif_running(vdev->ndev)) + return; + + do_vxge_reset(vdev, VXGE_LL_FULL_RESET); +} + +/** + * vxge_poll - Receive handler when Receive Polling is used. + * @dev: pointer to the device structure. + * @budget: Number of packets budgeted to be processed in this iteration. + * + * This function comes into picture only if Receive side is being handled + * through polling (called NAPI in linux). It mostly does what the normal + * Rx interrupt handler does in terms of descriptor and packet processing + * but not in an interrupt context. Also it will process a specified number + * of packets at most in one iteration. This value is passed down by the + * kernel as the function argument 'budget'. + */ +static int vxge_poll_msix(struct napi_struct *napi, int budget) +{ + struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi); + int pkts_processed; + int budget_org = budget; + + ring->budget = budget; + ring->pkts_processed = 0; + vxge_hw_vpath_poll_rx(ring->handle); + pkts_processed = ring->pkts_processed; + + if (ring->pkts_processed < budget_org) { + napi_complete(napi); + + /* Re enable the Rx interrupts for the vpath */ + vxge_hw_channel_msix_unmask( + (struct __vxge_hw_channel *)ring->handle, + ring->rx_vector_no); + mmiowb(); + } + + /* We are copying and returning the local variable, in case if after + * clearing the msix interrupt above, if the interrupt fires right + * away which can preempt this NAPI thread */ + return pkts_processed; +} + +static int vxge_poll_inta(struct napi_struct *napi, int budget) +{ + struct vxgedev *vdev = container_of(napi, struct vxgedev, napi); + int pkts_processed = 0; + int i; + int budget_org = budget; + struct vxge_ring *ring; + + struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev); + + for (i = 0; i < vdev->no_of_vpath; i++) { + ring = &vdev->vpaths[i].ring; + ring->budget = budget; + ring->pkts_processed = 0; + vxge_hw_vpath_poll_rx(ring->handle); + pkts_processed += ring->pkts_processed; + budget -= ring->pkts_processed; + if (budget <= 0) + break; + } + + VXGE_COMPLETE_ALL_TX(vdev); + + if (pkts_processed < budget_org) { + napi_complete(napi); + /* Re enable the Rx interrupts for the ring */ + vxge_hw_device_unmask_all(hldev); + vxge_hw_device_flush_io(hldev); + } + + return pkts_processed; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * vxge_netpoll - netpoll event handler entry point + * @dev : pointer to the device structure. + * Description: + * This function will be called by upper layer to check for events on the + * interface in situations where interrupts are disabled. It is used for + * specific in-kernel networking tasks, such as remote consoles and kernel + * debugging over the network (example netdump in RedHat). + */ +static void vxge_netpoll(struct net_device *dev) +{ + struct vxgedev *vdev = netdev_priv(dev); + struct pci_dev *pdev = vdev->pdev; + struct __vxge_hw_device *hldev = pci_get_drvdata(pdev); + const int irq = pdev->irq; + + vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); + + if (pci_channel_offline(pdev)) + return; + + disable_irq(irq); + vxge_hw_device_clear_tx_rx(hldev); + + vxge_hw_device_clear_tx_rx(hldev); + VXGE_COMPLETE_ALL_RX(vdev); + VXGE_COMPLETE_ALL_TX(vdev); + + enable_irq(irq); + + vxge_debug_entryexit(VXGE_TRACE, + "%s:%d Exiting...", __func__, __LINE__); +} +#endif + +/* RTH configuration */ +static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_hw_rth_hash_types hash_types; + u8 itable[256] = {0}; /* indirection table */ + u8 mtable[256] = {0}; /* CPU to vpath mapping */ + int index; + + /* + * Filling + * - itable with bucket numbers + * - mtable with bucket-to-vpath mapping + */ + for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) { + itable[index] = index; + mtable[index] = index % vdev->no_of_vpath; + } + + /* set indirection table, bucket-to-vpath mapping */ + status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles, + vdev->no_of_vpath, + mtable, itable, + vdev->config.rth_bkt_sz); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "RTH indirection table configuration failed " + "for vpath:%d", vdev->vpaths[0].device_id); + return status; + } + + /* Fill RTH hash types */ + hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4; + hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4; + hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6; + hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6; + hash_types.hash_type_tcpipv6ex_en = + vdev->config.rth_hash_type_tcpipv6ex; + hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex; + + /* + * Because the itable_set() method uses the active_table field + * for the target virtual path the RTH config should be updated + * for all VPATHs. The h/w only uses the lowest numbered VPATH + * when steering frames. + */ + for (index = 0; index < vdev->no_of_vpath; index++) { + status = vxge_hw_vpath_rts_rth_set( + vdev->vpaths[index].handle, + vdev->config.rth_algorithm, + &hash_types, + vdev->config.rth_bkt_sz); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "RTH configuration failed for vpath:%d", + vdev->vpaths[index].device_id); + return status; + } + } + + return status; +} + +/* reset vpaths */ +static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) +{ + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_vpath *vpath; + int i; + + for (i = 0; i < vdev->no_of_vpath; i++) { + vpath = &vdev->vpaths[i]; + if (vpath->handle) { + if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) { + if (is_vxge_card_up(vdev) && + vxge_hw_vpath_recover_from_reset( + vpath->handle) != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "vxge_hw_vpath_recover_" + "from_reset failed for vpath: " + "%d", i); + return status; + } + } else { + vxge_debug_init(VXGE_ERR, + "vxge_hw_vpath_reset failed for " + "vpath:%d", i); + return status; + } + } + } + + return status; +} + +/* close vpaths */ +static void vxge_close_vpaths(struct vxgedev *vdev, int index) +{ + struct vxge_vpath *vpath; + int i; + + for (i = index; i < vdev->no_of_vpath; i++) { + vpath = &vdev->vpaths[i]; + + if (vpath->handle && vpath->is_open) { + vxge_hw_vpath_close(vpath->handle); + vdev->stats.vpaths_open--; + } + vpath->is_open = 0; + vpath->handle = NULL; + } +} + +/* open vpaths */ +static int vxge_open_vpaths(struct vxgedev *vdev) +{ + struct vxge_hw_vpath_attr attr; + enum vxge_hw_status status; + struct vxge_vpath *vpath; + u32 vp_id = 0; + int i; + + for (i = 0; i < vdev->no_of_vpath; i++) { + vpath = &vdev->vpaths[i]; + vxge_assert(vpath->is_configured); + + if (!vdev->titan1) { + struct vxge_hw_vp_config *vcfg; + vcfg = &vdev->devh->config.vp_config[vpath->device_id]; + + vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A; + vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B; + vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C; + vcfg->tti.uec_a = TTI_T1A_TX_UFC_A; + vcfg->tti.uec_b = TTI_T1A_TX_UFC_B; + vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu); + vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu); + vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL; + vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL; + } + + attr.vp_id = vpath->device_id; + attr.fifo_attr.callback = vxge_xmit_compl; + attr.fifo_attr.txdl_term = vxge_tx_term; + attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv); + attr.fifo_attr.userdata = &vpath->fifo; + + attr.ring_attr.callback = vxge_rx_1b_compl; + attr.ring_attr.rxd_init = vxge_rx_initial_replenish; + attr.ring_attr.rxd_term = vxge_rx_term; + attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv); + attr.ring_attr.userdata = &vpath->ring; + + vpath->ring.ndev = vdev->ndev; + vpath->ring.pdev = vdev->pdev; + + status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle); + if (status == VXGE_HW_OK) { + vpath->fifo.handle = + (struct __vxge_hw_fifo *)attr.fifo_attr.userdata; + vpath->ring.handle = + (struct __vxge_hw_ring *)attr.ring_attr.userdata; + vpath->fifo.tx_steering_type = + vdev->config.tx_steering_type; + vpath->fifo.ndev = vdev->ndev; + vpath->fifo.pdev = vdev->pdev; + + u64_stats_init(&vpath->fifo.stats.syncp); + u64_stats_init(&vpath->ring.stats.syncp); + + if (vdev->config.tx_steering_type) + vpath->fifo.txq = + netdev_get_tx_queue(vdev->ndev, i); + else + vpath->fifo.txq = + netdev_get_tx_queue(vdev->ndev, 0); + vpath->fifo.indicate_max_pkts = + vdev->config.fifo_indicate_max_pkts; + vpath->fifo.tx_vector_no = 0; + vpath->ring.rx_vector_no = 0; + vpath->ring.rx_hwts = vdev->rx_hwts; + vpath->is_open = 1; + vdev->vp_handles[i] = vpath->handle; + vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip; + vdev->stats.vpaths_open++; + } else { + vdev->stats.vpath_open_fail++; + vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to " + "open with status: %d", + vdev->ndev->name, vpath->device_id, + status); + vxge_close_vpaths(vdev, 0); + return -EPERM; + } + + vp_id = vpath->handle->vpath->vp_id; + vdev->vpaths_deployed |= vxge_mBIT(vp_id); + } + + return VXGE_HW_OK; +} + +/** + * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing + * if the interrupts are not within a range + * @fifo: pointer to transmit fifo structure + * Description: The function changes boundary timer and restriction timer + * value depends on the traffic + * Return Value: None + */ +static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo) +{ + fifo->interrupt_count++; + if (time_before(fifo->jiffies + HZ / 100, jiffies)) { + struct __vxge_hw_fifo *hw_fifo = fifo->handle; + + fifo->jiffies = jiffies; + if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT && + hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) { + hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL; + vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo); + } else if (hw_fifo->rtimer != 0) { + hw_fifo->rtimer = 0; + vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo); + } + fifo->interrupt_count = 0; + } +} + +/** + * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing + * if the interrupts are not within a range + * @ring: pointer to receive ring structure + * Description: The function increases of decreases the packet counts within + * the ranges of traffic utilization, if the interrupts due to this ring are + * not within a fixed range. + * Return Value: Nothing + */ +static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring) +{ + ring->interrupt_count++; + if (time_before(ring->jiffies + HZ / 100, jiffies)) { + struct __vxge_hw_ring *hw_ring = ring->handle; + + ring->jiffies = jiffies; + if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT && + hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) { + hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL; + vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring); + } else if (hw_ring->rtimer != 0) { + hw_ring->rtimer = 0; + vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring); + } + ring->interrupt_count = 0; + } +} + +/* + * vxge_isr_napi + * @irq: the irq of the device. + * @dev_id: a void pointer to the hldev structure of the Titan device + * @ptregs: pointer to the registers pushed on the stack. + * + * This function is the ISR handler of the device when napi is enabled. It + * identifies the reason for the interrupt and calls the relevant service + * routines. + */ +static irqreturn_t vxge_isr_napi(int irq, void *dev_id) +{ + struct net_device *dev; + struct __vxge_hw_device *hldev; + u64 reason; + enum vxge_hw_status status; + struct vxgedev *vdev = (struct vxgedev *)dev_id; + + vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__); + + dev = vdev->ndev; + hldev = pci_get_drvdata(vdev->pdev); + + if (pci_channel_offline(vdev->pdev)) + return IRQ_NONE; + + if (unlikely(!is_vxge_card_up(vdev))) + return IRQ_HANDLED; + + status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason); + if (status == VXGE_HW_OK) { + vxge_hw_device_mask_all(hldev); + + if (reason & + VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT( + vdev->vpaths_deployed >> + (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) { + + vxge_hw_device_clear_tx_rx(hldev); + napi_schedule(&vdev->napi); + vxge_debug_intr(VXGE_TRACE, + "%s:%d Exiting...", __func__, __LINE__); + return IRQ_HANDLED; + } else + vxge_hw_device_unmask_all(hldev); + } else if (unlikely((status == VXGE_HW_ERR_VPATH) || + (status == VXGE_HW_ERR_CRITICAL) || + (status == VXGE_HW_ERR_FIFO))) { + vxge_hw_device_mask_all(hldev); + vxge_hw_device_flush_io(hldev); + return IRQ_HANDLED; + } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE)) + return IRQ_HANDLED; + + vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); + return IRQ_NONE; +} + +#ifdef CONFIG_PCI_MSI + +static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id) +{ + struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; + + adaptive_coalesce_tx_interrupts(fifo); + + vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle, + fifo->tx_vector_no); + + vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle, + fifo->tx_vector_no); + + VXGE_COMPLETE_VPATH_TX(fifo); + + vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle, + fifo->tx_vector_no); + + mmiowb(); + + return IRQ_HANDLED; +} + +static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id) +{ + struct vxge_ring *ring = (struct vxge_ring *)dev_id; + + adaptive_coalesce_rx_interrupts(ring); + + vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle, + ring->rx_vector_no); + + vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle, + ring->rx_vector_no); + + napi_schedule(&ring->napi); + return IRQ_HANDLED; +} + +static irqreturn_t +vxge_alarm_msix_handle(int irq, void *dev_id) +{ + int i; + enum vxge_hw_status status; + struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id; + struct vxgedev *vdev = vpath->vdev; + int msix_id = (vpath->handle->vpath->vp_id * + VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; + + for (i = 0; i < vdev->no_of_vpath; i++) { + /* Reduce the chance of losing alarm interrupts by masking + * the vector. A pending bit will be set if an alarm is + * generated and on unmask the interrupt will be fired. + */ + vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); + vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id); + mmiowb(); + + status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, + vdev->exec_mode); + if (status == VXGE_HW_OK) { + vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, + msix_id); + mmiowb(); + continue; + } + vxge_debug_intr(VXGE_ERR, + "%s: vxge_hw_vpath_alarm_process failed %x ", + VXGE_DRIVER_NAME, status); + } + return IRQ_HANDLED; +} + +static int vxge_alloc_msix(struct vxgedev *vdev) +{ + int j, i, ret = 0; + int msix_intr_vect = 0, temp; + vdev->intr_cnt = 0; + +start: + /* Tx/Rx MSIX Vectors count */ + vdev->intr_cnt = vdev->no_of_vpath * 2; + + /* Alarm MSIX Vectors count */ + vdev->intr_cnt++; + + vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry), + GFP_KERNEL); + if (!vdev->entries) { + vxge_debug_init(VXGE_ERR, + "%s: memory allocation failed", + VXGE_DRIVER_NAME); + ret = -ENOMEM; + goto alloc_entries_failed; + } + + vdev->vxge_entries = kcalloc(vdev->intr_cnt, + sizeof(struct vxge_msix_entry), + GFP_KERNEL); + if (!vdev->vxge_entries) { + vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", + VXGE_DRIVER_NAME); + ret = -ENOMEM; + goto alloc_vxge_entries_failed; + } + + for (i = 0, j = 0; i < vdev->no_of_vpath; i++) { + + msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; + + /* Initialize the fifo vector */ + vdev->entries[j].entry = msix_intr_vect; + vdev->vxge_entries[j].entry = msix_intr_vect; + vdev->vxge_entries[j].in_use = 0; + j++; + + /* Initialize the ring vector */ + vdev->entries[j].entry = msix_intr_vect + 1; + vdev->vxge_entries[j].entry = msix_intr_vect + 1; + vdev->vxge_entries[j].in_use = 0; + j++; + } + + /* Initialize the alarm vector */ + vdev->entries[j].entry = VXGE_ALARM_MSIX_ID; + vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID; + vdev->vxge_entries[j].in_use = 0; + + ret = pci_enable_msix_range(vdev->pdev, + vdev->entries, 3, vdev->intr_cnt); + if (ret < 0) { + ret = -ENODEV; + goto enable_msix_failed; + } else if (ret < vdev->intr_cnt) { + pci_disable_msix(vdev->pdev); + + vxge_debug_init(VXGE_ERR, + "%s: MSI-X enable failed for %d vectors, ret: %d", + VXGE_DRIVER_NAME, vdev->intr_cnt, ret); + if (max_config_vpath != VXGE_USE_DEFAULT) { + ret = -ENODEV; + goto enable_msix_failed; + } + + kfree(vdev->entries); + kfree(vdev->vxge_entries); + vdev->entries = NULL; + vdev->vxge_entries = NULL; + /* Try with less no of vector by reducing no of vpaths count */ + temp = (ret - 1)/2; + vxge_close_vpaths(vdev, temp); + vdev->no_of_vpath = temp; + goto start; + } + return 0; + +enable_msix_failed: + kfree(vdev->vxge_entries); +alloc_vxge_entries_failed: + kfree(vdev->entries); +alloc_entries_failed: + return ret; +} + +static int vxge_enable_msix(struct vxgedev *vdev) +{ + + int i, ret = 0; + /* 0 - Tx, 1 - Rx */ + int tim_msix_id[4] = {0, 1, 0, 0}; + + vdev->intr_cnt = 0; + + /* allocate msix vectors */ + ret = vxge_alloc_msix(vdev); + if (!ret) { + for (i = 0; i < vdev->no_of_vpath; i++) { + struct vxge_vpath *vpath = &vdev->vpaths[i]; + + /* If fifo or ring are not enabled, the MSIX vector for + * it should be set to 0. + */ + vpath->ring.rx_vector_no = (vpath->device_id * + VXGE_HW_VPATH_MSIX_ACTIVE) + 1; + + vpath->fifo.tx_vector_no = (vpath->device_id * + VXGE_HW_VPATH_MSIX_ACTIVE); + + vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, + VXGE_ALARM_MSIX_ID); + } + } + + return ret; +} + +static void vxge_rem_msix_isr(struct vxgedev *vdev) +{ + int intr_cnt; + + for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1); + intr_cnt++) { + if (vdev->vxge_entries[intr_cnt].in_use) { + synchronize_irq(vdev->entries[intr_cnt].vector); + free_irq(vdev->entries[intr_cnt].vector, + vdev->vxge_entries[intr_cnt].arg); + vdev->vxge_entries[intr_cnt].in_use = 0; + } + } + + kfree(vdev->entries); + kfree(vdev->vxge_entries); + vdev->entries = NULL; + vdev->vxge_entries = NULL; + + if (vdev->config.intr_type == MSI_X) + pci_disable_msix(vdev->pdev); +} +#endif + +static void vxge_rem_isr(struct vxgedev *vdev) +{ +#ifdef CONFIG_PCI_MSI + if (vdev->config.intr_type == MSI_X) { + vxge_rem_msix_isr(vdev); + } else +#endif + if (vdev->config.intr_type == INTA) { + synchronize_irq(vdev->pdev->irq); + free_irq(vdev->pdev->irq, vdev); + } +} + +static int vxge_add_isr(struct vxgedev *vdev) +{ + int ret = 0; +#ifdef CONFIG_PCI_MSI + int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0; + int pci_fun = PCI_FUNC(vdev->pdev->devfn); + + if (vdev->config.intr_type == MSI_X) + ret = vxge_enable_msix(vdev); + + if (ret) { + vxge_debug_init(VXGE_ERR, + "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME); + vxge_debug_init(VXGE_ERR, + "%s: Defaulting to INTA", VXGE_DRIVER_NAME); + vdev->config.intr_type = INTA; + } + + if (vdev->config.intr_type == MSI_X) { + for (intr_idx = 0; + intr_idx < (vdev->no_of_vpath * + VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) { + + msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE; + irq_req = 0; + + switch (msix_idx) { + case 0: + snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, + "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d", + vdev->ndev->name, + vdev->entries[intr_cnt].entry, + pci_fun, vp_idx); + ret = request_irq( + vdev->entries[intr_cnt].vector, + vxge_tx_msix_handle, 0, + vdev->desc[intr_cnt], + &vdev->vpaths[vp_idx].fifo); + vdev->vxge_entries[intr_cnt].arg = + &vdev->vpaths[vp_idx].fifo; + irq_req = 1; + break; + case 1: + snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, + "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d", + vdev->ndev->name, + vdev->entries[intr_cnt].entry, + pci_fun, vp_idx); + ret = request_irq( + vdev->entries[intr_cnt].vector, + vxge_rx_msix_napi_handle, + 0, + vdev->desc[intr_cnt], + &vdev->vpaths[vp_idx].ring); + vdev->vxge_entries[intr_cnt].arg = + &vdev->vpaths[vp_idx].ring; + irq_req = 1; + break; + } + + if (ret) { + vxge_debug_init(VXGE_ERR, + "%s: MSIX - %d Registration failed", + vdev->ndev->name, intr_cnt); + vxge_rem_msix_isr(vdev); + vdev->config.intr_type = INTA; + vxge_debug_init(VXGE_ERR, + "%s: Defaulting to INTA" + , vdev->ndev->name); + goto INTA_MODE; + } + + if (irq_req) { + /* We requested for this msix interrupt */ + vdev->vxge_entries[intr_cnt].in_use = 1; + msix_idx += vdev->vpaths[vp_idx].device_id * + VXGE_HW_VPATH_MSIX_ACTIVE; + vxge_hw_vpath_msix_unmask( + vdev->vpaths[vp_idx].handle, + msix_idx); + intr_cnt++; + } + + /* Point to next vpath handler */ + if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) && + (vp_idx < (vdev->no_of_vpath - 1))) + vp_idx++; + } + + intr_cnt = vdev->no_of_vpath * 2; + snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, + "%s:vxge:MSI-X %d - Alarm - fn:%d", + vdev->ndev->name, + vdev->entries[intr_cnt].entry, + pci_fun); + /* For Alarm interrupts */ + ret = request_irq(vdev->entries[intr_cnt].vector, + vxge_alarm_msix_handle, 0, + vdev->desc[intr_cnt], + &vdev->vpaths[0]); + if (ret) { + vxge_debug_init(VXGE_ERR, + "%s: MSIX - %d Registration failed", + vdev->ndev->name, intr_cnt); + vxge_rem_msix_isr(vdev); + vdev->config.intr_type = INTA; + vxge_debug_init(VXGE_ERR, + "%s: Defaulting to INTA", + vdev->ndev->name); + goto INTA_MODE; + } + + msix_idx = (vdev->vpaths[0].handle->vpath->vp_id * + VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; + vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle, + msix_idx); + vdev->vxge_entries[intr_cnt].in_use = 1; + vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0]; + } +INTA_MODE: +#endif + + if (vdev->config.intr_type == INTA) { + snprintf(vdev->desc[0], VXGE_INTR_STRLEN, + "%s:vxge:INTA", vdev->ndev->name); + vxge_hw_device_set_intr_type(vdev->devh, + VXGE_HW_INTR_MODE_IRQLINE); + + vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle); + + ret = request_irq((int) vdev->pdev->irq, + vxge_isr_napi, + IRQF_SHARED, vdev->desc[0], vdev); + if (ret) { + vxge_debug_init(VXGE_ERR, + "%s %s-%d: ISR registration failed", + VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq); + return -ENODEV; + } + vxge_debug_init(VXGE_TRACE, + "new %s-%d line allocated", + "IRQ", vdev->pdev->irq); + } + + return VXGE_HW_OK; +} + +static void vxge_poll_vp_reset(unsigned long data) +{ + struct vxgedev *vdev = (struct vxgedev *)data; + int i, j = 0; + + for (i = 0; i < vdev->no_of_vpath; i++) { + if (test_bit(i, &vdev->vp_reset)) { + vxge_reset_vpath(vdev, i); + j++; + } + } + if (j && (vdev->config.intr_type != MSI_X)) { + vxge_hw_device_unmask_all(vdev->devh); + vxge_hw_device_flush_io(vdev->devh); + } + + mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2); +} + +static void vxge_poll_vp_lockup(unsigned long data) +{ + struct vxgedev *vdev = (struct vxgedev *)data; + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_vpath *vpath; + struct vxge_ring *ring; + int i; + unsigned long rx_frms; + + for (i = 0; i < vdev->no_of_vpath; i++) { + ring = &vdev->vpaths[i].ring; + + /* Truncated to machine word size number of frames */ + rx_frms = ACCESS_ONCE(ring->stats.rx_frms); + + /* Did this vpath received any packets */ + if (ring->stats.prev_rx_frms == rx_frms) { + status = vxge_hw_vpath_check_leak(ring->handle); + + /* Did it received any packets last time */ + if ((VXGE_HW_FAIL == status) && + (VXGE_HW_FAIL == ring->last_status)) { + + /* schedule vpath reset */ + if (!test_and_set_bit(i, &vdev->vp_reset)) { + vpath = &vdev->vpaths[i]; + + /* disable interrupts for this vpath */ + vxge_vpath_intr_disable(vdev, i); + + /* stop the queue for this vpath */ + netif_tx_stop_queue(vpath->fifo.txq); + continue; + } + } + } + ring->stats.prev_rx_frms = rx_frms; + ring->last_status = status; + } + + /* Check every 1 milli second */ + mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000); +} + +static netdev_features_t vxge_fix_features(struct net_device *dev, + netdev_features_t features) +{ + netdev_features_t changed = dev->features ^ features; + + /* Enabling RTH requires some of the logic in vxge_device_register and a + * vpath reset. Due to these restrictions, only allow modification + * while the interface is down. + */ + if ((changed & NETIF_F_RXHASH) && netif_running(dev)) + features ^= NETIF_F_RXHASH; + + return features; +} + +static int vxge_set_features(struct net_device *dev, netdev_features_t features) +{ + struct vxgedev *vdev = netdev_priv(dev); + netdev_features_t changed = dev->features ^ features; + + if (!(changed & NETIF_F_RXHASH)) + return 0; + + /* !netif_running() ensured by vxge_fix_features() */ + + vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH); + if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) { + dev->features = features ^ NETIF_F_RXHASH; + vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH); + return -EIO; + } + + return 0; +} + +/** + * vxge_open + * @dev: pointer to the device structure. + * + * This function is the open entry point of the driver. It mainly calls a + * function to allocate Rx buffers and inserts them into the buffer + * descriptors and then enables the Rx part of the NIC. + * Return value: '0' on success and an appropriate (-)ve integer as + * defined in errno.h file on failure. + */ +static int vxge_open(struct net_device *dev) +{ + enum vxge_hw_status status; + struct vxgedev *vdev; + struct __vxge_hw_device *hldev; + struct vxge_vpath *vpath; + int ret = 0; + int i; + u64 val64, function_mode; + + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d", dev->name, __func__, __LINE__); + + vdev = netdev_priv(dev); + hldev = pci_get_drvdata(vdev->pdev); + function_mode = vdev->config.device_hw_info.function_mode; + + /* make sure you have link off by default every time Nic is + * initialized */ + netif_carrier_off(dev); + + /* Open VPATHs */ + status = vxge_open_vpaths(vdev); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s: fatal: Vpath open failed", vdev->ndev->name); + ret = -EPERM; + goto out0; + } + + vdev->mtu = dev->mtu; + + status = vxge_add_isr(vdev); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s: fatal: ISR add failed", dev->name); + ret = -EPERM; + goto out1; + } + + if (vdev->config.intr_type != MSI_X) { + netif_napi_add(dev, &vdev->napi, vxge_poll_inta, + vdev->config.napi_weight); + napi_enable(&vdev->napi); + for (i = 0; i < vdev->no_of_vpath; i++) { + vpath = &vdev->vpaths[i]; + vpath->ring.napi_p = &vdev->napi; + } + } else { + for (i = 0; i < vdev->no_of_vpath; i++) { + vpath = &vdev->vpaths[i]; + netif_napi_add(dev, &vpath->ring.napi, + vxge_poll_msix, vdev->config.napi_weight); + napi_enable(&vpath->ring.napi); + vpath->ring.napi_p = &vpath->ring.napi; + } + } + + /* configure RTH */ + if (vdev->config.rth_steering) { + status = vxge_rth_configure(vdev); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s: fatal: RTH configuration failed", + dev->name); + ret = -EPERM; + goto out2; + } + } + printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name, + hldev->config.rth_en ? "enabled" : "disabled"); + + for (i = 0; i < vdev->no_of_vpath; i++) { + vpath = &vdev->vpaths[i]; + + /* set initial mtu before enabling the device */ + status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s: fatal: can not set new MTU", dev->name); + ret = -EPERM; + goto out2; + } + } + + VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev); + vxge_debug_init(vdev->level_trace, + "%s: MTU is %d", vdev->ndev->name, vdev->mtu); + VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev); + + /* Restore the DA, VID table and also multicast and promiscuous mode + * states + */ + if (vdev->all_multi_flg) { + for (i = 0; i < vdev->no_of_vpath; i++) { + vpath = &vdev->vpaths[i]; + vxge_restore_vpath_mac_addr(vpath); + vxge_restore_vpath_vid_table(vpath); + + status = vxge_hw_vpath_mcast_enable(vpath->handle); + if (status != VXGE_HW_OK) + vxge_debug_init(VXGE_ERR, + "%s:%d Enabling multicast failed", + __func__, __LINE__); + } + } + + /* Enable vpath to sniff all unicast/multicast traffic that not + * addressed to them. We allow promiscuous mode for PF only + */ + + val64 = 0; + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) + val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i); + + vxge_hw_mgmt_reg_write(vdev->devh, + vxge_hw_mgmt_reg_type_mrpcim, + 0, + (ulong)offsetof(struct vxge_hw_mrpcim_reg, + rxmac_authorize_all_addr), + val64); + + vxge_hw_mgmt_reg_write(vdev->devh, + vxge_hw_mgmt_reg_type_mrpcim, + 0, + (ulong)offsetof(struct vxge_hw_mrpcim_reg, + rxmac_authorize_all_vid), + val64); + + vxge_set_multicast(dev); + + /* Enabling Bcast and mcast for all vpath */ + for (i = 0; i < vdev->no_of_vpath; i++) { + vpath = &vdev->vpaths[i]; + status = vxge_hw_vpath_bcast_enable(vpath->handle); + if (status != VXGE_HW_OK) + vxge_debug_init(VXGE_ERR, + "%s : Can not enable bcast for vpath " + "id %d", dev->name, i); + if (vdev->config.addr_learn_en) { + status = vxge_hw_vpath_mcast_enable(vpath->handle); + if (status != VXGE_HW_OK) + vxge_debug_init(VXGE_ERR, + "%s : Can not enable mcast for vpath " + "id %d", dev->name, i); + } + } + + vxge_hw_device_setpause_data(vdev->devh, 0, + vdev->config.tx_pause_enable, + vdev->config.rx_pause_enable); + + if (vdev->vp_reset_timer.function == NULL) + vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset, vdev, + HZ / 2); + + /* There is no need to check for RxD leak and RxD lookup on Titan1A */ + if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL) + vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev, + HZ / 2); + + set_bit(__VXGE_STATE_CARD_UP, &vdev->state); + + smp_wmb(); + + if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) { + netif_carrier_on(vdev->ndev); + netdev_notice(vdev->ndev, "Link Up\n"); + vdev->stats.link_up++; + } + + vxge_hw_device_intr_enable(vdev->devh); + + smp_wmb(); + + for (i = 0; i < vdev->no_of_vpath; i++) { + vpath = &vdev->vpaths[i]; + + vxge_hw_vpath_enable(vpath->handle); + smp_wmb(); + vxge_hw_vpath_rx_doorbell_init(vpath->handle); + } + + netif_tx_start_all_queues(vdev->ndev); + + /* configure CI */ + vxge_config_ci_for_tti_rti(vdev); + + goto out0; + +out2: + vxge_rem_isr(vdev); + + /* Disable napi */ + if (vdev->config.intr_type != MSI_X) + napi_disable(&vdev->napi); + else { + for (i = 0; i < vdev->no_of_vpath; i++) + napi_disable(&vdev->vpaths[i].ring.napi); + } + +out1: + vxge_close_vpaths(vdev, 0); +out0: + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d Exiting...", + dev->name, __func__, __LINE__); + return ret; +} + +/* Loop through the mac address list and delete all the entries */ +static void vxge_free_mac_add_list(struct vxge_vpath *vpath) +{ + + struct list_head *entry, *next; + if (list_empty(&vpath->mac_addr_list)) + return; + + list_for_each_safe(entry, next, &vpath->mac_addr_list) { + list_del(entry); + kfree((struct vxge_mac_addrs *)entry); + } +} + +static void vxge_napi_del_all(struct vxgedev *vdev) +{ + int i; + if (vdev->config.intr_type != MSI_X) + netif_napi_del(&vdev->napi); + else { + for (i = 0; i < vdev->no_of_vpath; i++) + netif_napi_del(&vdev->vpaths[i].ring.napi); + } +} + +static int do_vxge_close(struct net_device *dev, int do_io) +{ + enum vxge_hw_status status; + struct vxgedev *vdev; + struct __vxge_hw_device *hldev; + int i; + u64 val64, vpath_vector; + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", + dev->name, __func__, __LINE__); + + vdev = netdev_priv(dev); + hldev = pci_get_drvdata(vdev->pdev); + + if (unlikely(!is_vxge_card_up(vdev))) + return 0; + + /* If vxge_handle_crit_err task is executing, + * wait till it completes. */ + while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) + msleep(50); + + if (do_io) { + /* Put the vpath back in normal mode */ + vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id); + status = vxge_hw_mgmt_reg_read(vdev->devh, + vxge_hw_mgmt_reg_type_mrpcim, + 0, + (ulong)offsetof( + struct vxge_hw_mrpcim_reg, + rts_mgr_cbasin_cfg), + &val64); + if (status == VXGE_HW_OK) { + val64 &= ~vpath_vector; + status = vxge_hw_mgmt_reg_write(vdev->devh, + vxge_hw_mgmt_reg_type_mrpcim, + 0, + (ulong)offsetof( + struct vxge_hw_mrpcim_reg, + rts_mgr_cbasin_cfg), + val64); + } + + /* Remove the function 0 from promiscuous mode */ + vxge_hw_mgmt_reg_write(vdev->devh, + vxge_hw_mgmt_reg_type_mrpcim, + 0, + (ulong)offsetof(struct vxge_hw_mrpcim_reg, + rxmac_authorize_all_addr), + 0); + + vxge_hw_mgmt_reg_write(vdev->devh, + vxge_hw_mgmt_reg_type_mrpcim, + 0, + (ulong)offsetof(struct vxge_hw_mrpcim_reg, + rxmac_authorize_all_vid), + 0); + + smp_wmb(); + } + + if (vdev->titan1) + del_timer_sync(&vdev->vp_lockup_timer); + + del_timer_sync(&vdev->vp_reset_timer); + + if (do_io) + vxge_hw_device_wait_receive_idle(hldev); + + clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); + + /* Disable napi */ + if (vdev->config.intr_type != MSI_X) + napi_disable(&vdev->napi); + else { + for (i = 0; i < vdev->no_of_vpath; i++) + napi_disable(&vdev->vpaths[i].ring.napi); + } + + netif_carrier_off(vdev->ndev); + netdev_notice(vdev->ndev, "Link Down\n"); + netif_tx_stop_all_queues(vdev->ndev); + + /* Note that at this point xmit() is stopped by upper layer */ + if (do_io) + vxge_hw_device_intr_disable(vdev->devh); + + vxge_rem_isr(vdev); + + vxge_napi_del_all(vdev); + + if (do_io) + vxge_reset_all_vpaths(vdev); + + vxge_close_vpaths(vdev, 0); + + vxge_debug_entryexit(VXGE_TRACE, + "%s: %s:%d Exiting...", dev->name, __func__, __LINE__); + + clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state); + + return 0; +} + +/** + * vxge_close + * @dev: device pointer. + * + * This is the stop entry point of the driver. It needs to undo exactly + * whatever was done by the open entry point, thus it's usually referred to + * as the close function.Among other things this function mainly stops the + * Rx side of the NIC and frees all the Rx buffers in the Rx rings. + * Return value: '0' on success and an appropriate (-)ve integer as + * defined in errno.h file on failure. + */ +static int vxge_close(struct net_device *dev) +{ + do_vxge_close(dev, 1); + return 0; +} + +/** + * vxge_change_mtu + * @dev: net device pointer. + * @new_mtu :the new MTU size for the device. + * + * A driver entry point to change MTU size for the device. Before changing + * the MTU the device must be stopped. + */ +static int vxge_change_mtu(struct net_device *dev, int new_mtu) +{ + struct vxgedev *vdev = netdev_priv(dev); + + vxge_debug_entryexit(vdev->level_trace, + "%s:%d", __func__, __LINE__); + if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) { + vxge_debug_init(vdev->level_err, + "%s: mtu size is invalid", dev->name); + return -EPERM; + } + + /* check if device is down already */ + if (unlikely(!is_vxge_card_up(vdev))) { + /* just store new value, will use later on open() */ + dev->mtu = new_mtu; + vxge_debug_init(vdev->level_err, + "%s", "device is down on MTU change"); + return 0; + } + + vxge_debug_init(vdev->level_trace, + "trying to apply new MTU %d", new_mtu); + + if (vxge_close(dev)) + return -EIO; + + dev->mtu = new_mtu; + vdev->mtu = new_mtu; + + if (vxge_open(dev)) + return -EIO; + + vxge_debug_init(vdev->level_trace, + "%s: MTU changed to %d", vdev->ndev->name, new_mtu); + + vxge_debug_entryexit(vdev->level_trace, + "%s:%d Exiting...", __func__, __LINE__); + + return 0; +} + +/** + * vxge_get_stats64 + * @dev: pointer to the device structure + * @stats: pointer to struct rtnl_link_stats64 + * + */ +static struct rtnl_link_stats64 * +vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) +{ + struct vxgedev *vdev = netdev_priv(dev); + int k; + + /* net_stats already zeroed by caller */ + for (k = 0; k < vdev->no_of_vpath; k++) { + struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats; + struct vxge_fifo_stats *txstats = &vdev->vpaths[k].fifo.stats; + unsigned int start; + u64 packets, bytes, multicast; + + do { + start = u64_stats_fetch_begin_irq(&rxstats->syncp); + + packets = rxstats->rx_frms; + multicast = rxstats->rx_mcast; + bytes = rxstats->rx_bytes; + } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start)); + + net_stats->rx_packets += packets; + net_stats->rx_bytes += bytes; + net_stats->multicast += multicast; + + net_stats->rx_errors += rxstats->rx_errors; + net_stats->rx_dropped += rxstats->rx_dropped; + + do { + start = u64_stats_fetch_begin_irq(&txstats->syncp); + + packets = txstats->tx_frms; + bytes = txstats->tx_bytes; + } while (u64_stats_fetch_retry_irq(&txstats->syncp, start)); + + net_stats->tx_packets += packets; + net_stats->tx_bytes += bytes; + net_stats->tx_errors += txstats->tx_errors; + } + + return net_stats; +} + +static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh) +{ + enum vxge_hw_status status; + u64 val64; + + /* Timestamp is passed to the driver via the FCS, therefore we + * must disable the FCS stripping by the adapter. Since this is + * required for the driver to load (due to a hardware bug), + * there is no need to do anything special here. + */ + val64 = VXGE_HW_XMAC_TIMESTAMP_EN | + VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) | + VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0); + + status = vxge_hw_mgmt_reg_write(devh, + vxge_hw_mgmt_reg_type_mrpcim, + 0, + offsetof(struct vxge_hw_mrpcim_reg, + xmac_timestamp), + val64); + vxge_hw_device_flush_io(devh); + devh->config.hwts_en = VXGE_HW_HWTS_ENABLE; + return status; +} + +static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data) +{ + struct hwtstamp_config config; + int i; + + if (copy_from_user(&config, data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + /* Transmit HW Timestamp not supported */ + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + break; + case HWTSTAMP_TX_ON: + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + vdev->rx_hwts = 0; + config.rx_filter = HWTSTAMP_FILTER_NONE; + break; + + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE) + return -EFAULT; + + vdev->rx_hwts = 1; + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + + default: + return -ERANGE; + } + + for (i = 0; i < vdev->no_of_vpath; i++) + vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts; + + if (copy_to_user(data, &config, sizeof(config))) + return -EFAULT; + + return 0; +} + +static int vxge_hwtstamp_get(struct vxgedev *vdev, void __user *data) +{ + struct hwtstamp_config config; + + config.flags = 0; + config.tx_type = HWTSTAMP_TX_OFF; + config.rx_filter = (vdev->rx_hwts ? + HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE); + + if (copy_to_user(data, &config, sizeof(config))) + return -EFAULT; + + return 0; +} + +/** + * vxge_ioctl + * @dev: Device pointer. + * @ifr: An IOCTL specific structure, that can contain a pointer to + * a proprietary structure used to pass information to the driver. + * @cmd: This is used to distinguish between the different commands that + * can be passed to the IOCTL functions. + * + * Entry point for the Ioctl. + */ +static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct vxgedev *vdev = netdev_priv(dev); + + switch (cmd) { + case SIOCSHWTSTAMP: + return vxge_hwtstamp_set(vdev, rq->ifr_data); + case SIOCGHWTSTAMP: + return vxge_hwtstamp_get(vdev, rq->ifr_data); + default: + return -EOPNOTSUPP; + } +} + +/** + * vxge_tx_watchdog + * @dev: pointer to net device structure + * + * Watchdog for transmit side. + * This function is triggered if the Tx Queue is stopped + * for a pre-defined amount of time when the Interface is still up. + */ +static void vxge_tx_watchdog(struct net_device *dev) +{ + struct vxgedev *vdev; + + vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); + + vdev = netdev_priv(dev); + + vdev->cric_err_event = VXGE_HW_EVENT_RESET_START; + + schedule_work(&vdev->reset_task); + vxge_debug_entryexit(VXGE_TRACE, + "%s:%d Exiting...", __func__, __LINE__); +} + +/** + * vxge_vlan_rx_add_vid + * @dev: net device pointer. + * @proto: vlan protocol + * @vid: vid + * + * Add the vlan id to the devices vlan id table + */ +static int +vxge_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct vxgedev *vdev = netdev_priv(dev); + struct vxge_vpath *vpath; + int vp_id; + + /* Add these vlan to the vid table */ + for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { + vpath = &vdev->vpaths[vp_id]; + if (!vpath->is_open) + continue; + vxge_hw_vpath_vid_add(vpath->handle, vid); + } + set_bit(vid, vdev->active_vlans); + return 0; +} + +/** + * vxge_vlan_rx_kill_vid + * @dev: net device pointer. + * @proto: vlan protocol + * @vid: vid + * + * Remove the vlan id from the device's vlan id table + */ +static int +vxge_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct vxgedev *vdev = netdev_priv(dev); + struct vxge_vpath *vpath; + int vp_id; + + vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); + + /* Delete this vlan from the vid table */ + for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { + vpath = &vdev->vpaths[vp_id]; + if (!vpath->is_open) + continue; + vxge_hw_vpath_vid_delete(vpath->handle, vid); + } + vxge_debug_entryexit(VXGE_TRACE, + "%s:%d Exiting...", __func__, __LINE__); + clear_bit(vid, vdev->active_vlans); + return 0; +} + +static const struct net_device_ops vxge_netdev_ops = { + .ndo_open = vxge_open, + .ndo_stop = vxge_close, + .ndo_get_stats64 = vxge_get_stats64, + .ndo_start_xmit = vxge_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = vxge_set_multicast, + .ndo_do_ioctl = vxge_ioctl, + .ndo_set_mac_address = vxge_set_mac_addr, + .ndo_change_mtu = vxge_change_mtu, + .ndo_fix_features = vxge_fix_features, + .ndo_set_features = vxge_set_features, + .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid, + .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid, + .ndo_tx_timeout = vxge_tx_watchdog, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = vxge_netpoll, +#endif +}; + +static int vxge_device_register(struct __vxge_hw_device *hldev, + struct vxge_config *config, int high_dma, + int no_of_vpath, struct vxgedev **vdev_out) +{ + struct net_device *ndev; + enum vxge_hw_status status = VXGE_HW_OK; + struct vxgedev *vdev; + int ret = 0, no_of_queue = 1; + u64 stat; + + *vdev_out = NULL; + if (config->tx_steering_type) + no_of_queue = no_of_vpath; + + ndev = alloc_etherdev_mq(sizeof(struct vxgedev), + no_of_queue); + if (ndev == NULL) { + vxge_debug_init( + vxge_hw_device_trace_level_get(hldev), + "%s : device allocation failed", __func__); + ret = -ENODEV; + goto _out0; + } + + vxge_debug_entryexit( + vxge_hw_device_trace_level_get(hldev), + "%s: %s:%d Entering...", + ndev->name, __func__, __LINE__); + + vdev = netdev_priv(ndev); + memset(vdev, 0, sizeof(struct vxgedev)); + + vdev->ndev = ndev; + vdev->devh = hldev; + vdev->pdev = hldev->pdev; + memcpy(&vdev->config, config, sizeof(struct vxge_config)); + vdev->rx_hwts = 0; + vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION); + + SET_NETDEV_DEV(ndev, &vdev->pdev->dev); + + ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_HW_VLAN_CTAG_TX; + if (vdev->config.rth_steering != NO_STEERING) + ndev->hw_features |= NETIF_F_RXHASH; + + ndev->features |= ndev->hw_features | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; + + + ndev->netdev_ops = &vxge_netdev_ops; + + ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT; + INIT_WORK(&vdev->reset_task, vxge_reset); + + vxge_initialize_ethtool_ops(ndev); + + /* Allocate memory for vpath */ + vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) * + no_of_vpath, GFP_KERNEL); + if (!vdev->vpaths) { + vxge_debug_init(VXGE_ERR, + "%s: vpath memory allocation failed", + vdev->ndev->name); + ret = -ENOMEM; + goto _out1; + } + + vxge_debug_init(vxge_hw_device_trace_level_get(hldev), + "%s : checksumming enabled", __func__); + + if (high_dma) { + ndev->features |= NETIF_F_HIGHDMA; + vxge_debug_init(vxge_hw_device_trace_level_get(hldev), + "%s : using High DMA", __func__); + } + + ret = register_netdev(ndev); + if (ret) { + vxge_debug_init(vxge_hw_device_trace_level_get(hldev), + "%s: %s : device registration failed!", + ndev->name, __func__); + goto _out2; + } + + /* Set the factory defined MAC address initially */ + ndev->addr_len = ETH_ALEN; + + /* Make Link state as off at this point, when the Link change + * interrupt comes the state will be automatically changed to + * the right state. + */ + netif_carrier_off(ndev); + + vxge_debug_init(vxge_hw_device_trace_level_get(hldev), + "%s: Ethernet device registered", + ndev->name); + + hldev->ndev = ndev; + *vdev_out = vdev; + + /* Resetting the Device stats */ + status = vxge_hw_mrpcim_stats_access( + hldev, + VXGE_HW_STATS_OP_CLEAR_ALL_STATS, + 0, + 0, + &stat); + + if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION) + vxge_debug_init( + vxge_hw_device_trace_level_get(hldev), + "%s: device stats clear returns" + "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name); + + vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev), + "%s: %s:%d Exiting...", + ndev->name, __func__, __LINE__); + + return ret; +_out2: + kfree(vdev->vpaths); +_out1: + free_netdev(ndev); +_out0: + return ret; +} + +/* + * vxge_device_unregister + * + * This function will unregister and free network device + */ +static void vxge_device_unregister(struct __vxge_hw_device *hldev) +{ + struct vxgedev *vdev; + struct net_device *dev; + char buf[IFNAMSIZ]; + + dev = hldev->ndev; + vdev = netdev_priv(dev); + + vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name, + __func__, __LINE__); + + strlcpy(buf, dev->name, IFNAMSIZ); + + flush_work(&vdev->reset_task); + + /* in 2.6 will call stop() if device is up */ + unregister_netdev(dev); + + kfree(vdev->vpaths); + + /* we are safe to free it now */ + free_netdev(dev); + + vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered", + buf); + vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf, + __func__, __LINE__); +} + +/* + * vxge_callback_crit_err + * + * This function is called by the alarm handler in interrupt context. + * Driver must analyze it based on the event type. + */ +static void +vxge_callback_crit_err(struct __vxge_hw_device *hldev, + enum vxge_hw_event type, u64 vp_id) +{ + struct net_device *dev = hldev->ndev; + struct vxgedev *vdev = netdev_priv(dev); + struct vxge_vpath *vpath = NULL; + int vpath_idx; + + vxge_debug_entryexit(vdev->level_trace, + "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); + + /* Note: This event type should be used for device wide + * indications only - Serious errors, Slot freeze and critical errors + */ + vdev->cric_err_event = type; + + for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { + vpath = &vdev->vpaths[vpath_idx]; + if (vpath->device_id == vp_id) + break; + } + + if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) { + if (type == VXGE_HW_EVENT_SLOT_FREEZE) { + vxge_debug_init(VXGE_ERR, + "%s: Slot is frozen", vdev->ndev->name); + } else if (type == VXGE_HW_EVENT_SERR) { + vxge_debug_init(VXGE_ERR, + "%s: Encountered Serious Error", + vdev->ndev->name); + } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) + vxge_debug_init(VXGE_ERR, + "%s: Encountered Critical Error", + vdev->ndev->name); + } + + if ((type == VXGE_HW_EVENT_SERR) || + (type == VXGE_HW_EVENT_SLOT_FREEZE)) { + if (unlikely(vdev->exec_mode)) + clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); + } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) { + vxge_hw_device_mask_all(hldev); + if (unlikely(vdev->exec_mode)) + clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); + } else if ((type == VXGE_HW_EVENT_FIFO_ERR) || + (type == VXGE_HW_EVENT_VPATH_ERR)) { + + if (unlikely(vdev->exec_mode)) + clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); + else { + /* check if this vpath is already set for reset */ + if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) { + + /* disable interrupts for this vpath */ + vxge_vpath_intr_disable(vdev, vpath_idx); + + /* stop the queue for this vpath */ + netif_tx_stop_queue(vpath->fifo.txq); + } + } + } + + vxge_debug_entryexit(vdev->level_trace, + "%s: %s:%d Exiting...", + vdev->ndev->name, __func__, __LINE__); +} + +static void verify_bandwidth(void) +{ + int i, band_width, total = 0, equal_priority = 0; + + /* 1. If user enters 0 for some fifo, give equal priority to all */ + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + if (bw_percentage[i] == 0) { + equal_priority = 1; + break; + } + } + + if (!equal_priority) { + /* 2. If sum exceeds 100, give equal priority to all */ + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + if (bw_percentage[i] == 0xFF) + break; + + total += bw_percentage[i]; + if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) { + equal_priority = 1; + break; + } + } + } + + if (!equal_priority) { + /* Is all the bandwidth consumed? */ + if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) { + if (i < VXGE_HW_MAX_VIRTUAL_PATHS) { + /* Split rest of bw equally among next VPs*/ + band_width = + (VXGE_HW_VPATH_BANDWIDTH_MAX - total) / + (VXGE_HW_MAX_VIRTUAL_PATHS - i); + if (band_width < 2) /* min of 2% */ + equal_priority = 1; + else { + for (; i < VXGE_HW_MAX_VIRTUAL_PATHS; + i++) + bw_percentage[i] = + band_width; + } + } + } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS) + equal_priority = 1; + } + + if (equal_priority) { + vxge_debug_init(VXGE_ERR, + "%s: Assigning equal bandwidth to all the vpaths", + VXGE_DRIVER_NAME); + bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX / + VXGE_HW_MAX_VIRTUAL_PATHS; + for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) + bw_percentage[i] = bw_percentage[0]; + } +} + +/* + * Vpath configuration + */ +static int vxge_config_vpaths(struct vxge_hw_device_config *device_config, + u64 vpath_mask, struct vxge_config *config_param) +{ + int i, no_of_vpaths = 0, default_no_vpath = 0, temp; + u32 txdl_size, txdl_per_memblock; + + temp = driver_config->vpath_per_dev; + if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) && + (max_config_dev == VXGE_MAX_CONFIG_DEV)) { + /* No more CPU. Return vpath number as zero.*/ + if (driver_config->g_no_cpus == -1) + return 0; + + if (!driver_config->g_no_cpus) + driver_config->g_no_cpus = + netif_get_num_default_rss_queues(); + + driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1; + if (!driver_config->vpath_per_dev) + driver_config->vpath_per_dev = 1; + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) + if (!vxge_bVALn(vpath_mask, i, 1)) + continue; + else + default_no_vpath++; + if (default_no_vpath < driver_config->vpath_per_dev) + driver_config->vpath_per_dev = default_no_vpath; + + driver_config->g_no_cpus = driver_config->g_no_cpus - + (driver_config->vpath_per_dev * 2); + if (driver_config->g_no_cpus <= 0) + driver_config->g_no_cpus = -1; + } + + if (driver_config->vpath_per_dev == 1) { + vxge_debug_ll_config(VXGE_TRACE, + "%s: Disable tx and rx steering, " + "as single vpath is configured", VXGE_DRIVER_NAME); + config_param->rth_steering = NO_STEERING; + config_param->tx_steering_type = NO_STEERING; + device_config->rth_en = 0; + } + + /* configure bandwidth */ + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) + device_config->vp_config[i].min_bandwidth = bw_percentage[i]; + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + device_config->vp_config[i].vp_id = i; + device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU; + if (no_of_vpaths < driver_config->vpath_per_dev) { + if (!vxge_bVALn(vpath_mask, i, 1)) { + vxge_debug_ll_config(VXGE_TRACE, + "%s: vpath: %d is not available", + VXGE_DRIVER_NAME, i); + continue; + } else { + vxge_debug_ll_config(VXGE_TRACE, + "%s: vpath: %d available", + VXGE_DRIVER_NAME, i); + no_of_vpaths++; + } + } else { + vxge_debug_ll_config(VXGE_TRACE, + "%s: vpath: %d is not configured, " + "max_config_vpath exceeded", + VXGE_DRIVER_NAME, i); + break; + } + + /* Configure Tx fifo's */ + device_config->vp_config[i].fifo.enable = + VXGE_HW_FIFO_ENABLE; + device_config->vp_config[i].fifo.max_frags = + MAX_SKB_FRAGS + 1; + device_config->vp_config[i].fifo.memblock_size = + VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE; + + txdl_size = device_config->vp_config[i].fifo.max_frags * + sizeof(struct vxge_hw_fifo_txd); + txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size; + + device_config->vp_config[i].fifo.fifo_blocks = + ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1; + + device_config->vp_config[i].fifo.intr = + VXGE_HW_FIFO_QUEUE_INTR_DISABLE; + + /* Configure tti properties */ + device_config->vp_config[i].tti.intr_enable = + VXGE_HW_TIM_INTR_ENABLE; + + device_config->vp_config[i].tti.btimer_val = + (VXGE_TTI_BTIMER_VAL * 1000) / 272; + + device_config->vp_config[i].tti.timer_ac_en = + VXGE_HW_TIM_TIMER_AC_ENABLE; + + /* For msi-x with napi (each vector has a handler of its own) - + * Set CI to OFF for all vpaths + */ + device_config->vp_config[i].tti.timer_ci_en = + VXGE_HW_TIM_TIMER_CI_DISABLE; + + device_config->vp_config[i].tti.timer_ri_en = + VXGE_HW_TIM_TIMER_RI_DISABLE; + + device_config->vp_config[i].tti.util_sel = + VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL; + + device_config->vp_config[i].tti.ltimer_val = + (VXGE_TTI_LTIMER_VAL * 1000) / 272; + + device_config->vp_config[i].tti.rtimer_val = + (VXGE_TTI_RTIMER_VAL * 1000) / 272; + + device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A; + device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B; + device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C; + device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A; + device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B; + device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C; + device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D; + + /* Configure Rx rings */ + device_config->vp_config[i].ring.enable = + VXGE_HW_RING_ENABLE; + + device_config->vp_config[i].ring.ring_blocks = + VXGE_HW_DEF_RING_BLOCKS; + + device_config->vp_config[i].ring.buffer_mode = + VXGE_HW_RING_RXD_BUFFER_MODE_1; + + device_config->vp_config[i].ring.rxds_limit = + VXGE_HW_DEF_RING_RXDS_LIMIT; + + device_config->vp_config[i].ring.scatter_mode = + VXGE_HW_RING_SCATTER_MODE_A; + + /* Configure rti properties */ + device_config->vp_config[i].rti.intr_enable = + VXGE_HW_TIM_INTR_ENABLE; + + device_config->vp_config[i].rti.btimer_val = + (VXGE_RTI_BTIMER_VAL * 1000)/272; + + device_config->vp_config[i].rti.timer_ac_en = + VXGE_HW_TIM_TIMER_AC_ENABLE; + + device_config->vp_config[i].rti.timer_ci_en = + VXGE_HW_TIM_TIMER_CI_DISABLE; + + device_config->vp_config[i].rti.timer_ri_en = + VXGE_HW_TIM_TIMER_RI_DISABLE; + + device_config->vp_config[i].rti.util_sel = + VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL; + + device_config->vp_config[i].rti.urange_a = + RTI_RX_URANGE_A; + device_config->vp_config[i].rti.urange_b = + RTI_RX_URANGE_B; + device_config->vp_config[i].rti.urange_c = + RTI_RX_URANGE_C; + device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A; + device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B; + device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C; + device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D; + + device_config->vp_config[i].rti.rtimer_val = + (VXGE_RTI_RTIMER_VAL * 1000) / 272; + + device_config->vp_config[i].rti.ltimer_val = + (VXGE_RTI_LTIMER_VAL * 1000) / 272; + + device_config->vp_config[i].rpa_strip_vlan_tag = + vlan_tag_strip; + } + + driver_config->vpath_per_dev = temp; + return no_of_vpaths; +} + +/* initialize device configuratrions */ +static void vxge_device_config_init(struct vxge_hw_device_config *device_config, + int *intr_type) +{ + /* Used for CQRQ/SRQ. */ + device_config->dma_blockpool_initial = + VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE; + + device_config->dma_blockpool_max = + VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE; + + if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT) + max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT; + +#ifndef CONFIG_PCI_MSI + vxge_debug_init(VXGE_ERR, + "%s: This Kernel does not support " + "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME); + *intr_type = INTA; +#endif + + /* Configure whether MSI-X or IRQL. */ + switch (*intr_type) { + case INTA: + device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE; + break; + + case MSI_X: + device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT; + break; + } + + /* Timer period between device poll */ + device_config->device_poll_millis = VXGE_TIMER_DELAY; + + /* Configure mac based steering. */ + device_config->rts_mac_en = addr_learn_en; + + /* Configure Vpaths */ + device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT; + + vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ", + __func__); + vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d", + device_config->intr_mode); + vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d", + device_config->device_poll_millis); + vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d", + device_config->rth_en); + vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d", + device_config->rth_it_type); +} + +static void vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask) +{ + int i; + + vxge_debug_init(VXGE_TRACE, + "%s: %d Vpath(s) opened", + vdev->ndev->name, vdev->no_of_vpath); + + switch (vdev->config.intr_type) { + case INTA: + vxge_debug_init(VXGE_TRACE, + "%s: Interrupt type INTA", vdev->ndev->name); + break; + + case MSI_X: + vxge_debug_init(VXGE_TRACE, + "%s: Interrupt type MSI-X", vdev->ndev->name); + break; + } + + if (vdev->config.rth_steering) { + vxge_debug_init(VXGE_TRACE, + "%s: RTH steering enabled for TCP_IPV4", + vdev->ndev->name); + } else { + vxge_debug_init(VXGE_TRACE, + "%s: RTH steering disabled", vdev->ndev->name); + } + + switch (vdev->config.tx_steering_type) { + case NO_STEERING: + vxge_debug_init(VXGE_TRACE, + "%s: Tx steering disabled", vdev->ndev->name); + break; + case TX_PRIORITY_STEERING: + vxge_debug_init(VXGE_TRACE, + "%s: Unsupported tx steering option", + vdev->ndev->name); + vxge_debug_init(VXGE_TRACE, + "%s: Tx steering disabled", vdev->ndev->name); + vdev->config.tx_steering_type = 0; + break; + case TX_VLAN_STEERING: + vxge_debug_init(VXGE_TRACE, + "%s: Unsupported tx steering option", + vdev->ndev->name); + vxge_debug_init(VXGE_TRACE, + "%s: Tx steering disabled", vdev->ndev->name); + vdev->config.tx_steering_type = 0; + break; + case TX_MULTIQ_STEERING: + vxge_debug_init(VXGE_TRACE, + "%s: Tx multiqueue steering enabled", + vdev->ndev->name); + break; + case TX_PORT_STEERING: + vxge_debug_init(VXGE_TRACE, + "%s: Tx port steering enabled", + vdev->ndev->name); + break; + default: + vxge_debug_init(VXGE_ERR, + "%s: Unsupported tx steering type", + vdev->ndev->name); + vxge_debug_init(VXGE_TRACE, + "%s: Tx steering disabled", vdev->ndev->name); + vdev->config.tx_steering_type = 0; + } + + if (vdev->config.addr_learn_en) + vxge_debug_init(VXGE_TRACE, + "%s: MAC Address learning enabled", vdev->ndev->name); + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + if (!vxge_bVALn(vpath_mask, i, 1)) + continue; + vxge_debug_ll_config(VXGE_TRACE, + "%s: MTU size - %d", vdev->ndev->name, + ((vdev->devh))-> + config.vp_config[i].mtu); + vxge_debug_init(VXGE_TRACE, + "%s: VLAN tag stripping %s", vdev->ndev->name, + ((vdev->devh))-> + config.vp_config[i].rpa_strip_vlan_tag + ? "Enabled" : "Disabled"); + vxge_debug_ll_config(VXGE_TRACE, + "%s: Max frags : %d", vdev->ndev->name, + ((vdev->devh))-> + config.vp_config[i].fifo.max_frags); + break; + } +} + +#ifdef CONFIG_PM +/** + * vxge_pm_suspend - vxge power management suspend entry point + * + */ +static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state) +{ + return -ENOSYS; +} +/** + * vxge_pm_resume - vxge power management resume entry point + * + */ +static int vxge_pm_resume(struct pci_dev *pdev) +{ + return -ENOSYS; +} + +#endif + +/** + * vxge_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct __vxge_hw_device *hldev = pci_get_drvdata(pdev); + struct net_device *netdev = hldev->ndev; + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) { + /* Bring down the card, while avoiding PCI I/O */ + do_vxge_close(netdev, 0); + } + + pci_disable_device(pdev); + + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * vxge_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + * At this point, the card has exprienced a hard reset, + * followed by fixups by BIOS, and has its config space + * set up identically to what it was at cold boot. + */ +static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) +{ + struct __vxge_hw_device *hldev = pci_get_drvdata(pdev); + struct net_device *netdev = hldev->ndev; + + struct vxgedev *vdev = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + netdev_err(netdev, "Cannot re-enable device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_set_master(pdev); + do_vxge_reset(vdev, VXGE_LL_FULL_RESET); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * vxge_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells + * us that its OK to resume normal operation. + */ +static void vxge_io_resume(struct pci_dev *pdev) +{ + struct __vxge_hw_device *hldev = pci_get_drvdata(pdev); + struct net_device *netdev = hldev->ndev; + + if (netif_running(netdev)) { + if (vxge_open(netdev)) { + netdev_err(netdev, + "Can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +static inline u32 vxge_get_num_vfs(u64 function_mode) +{ + u32 num_functions = 0; + + switch (function_mode) { + case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION: + case VXGE_HW_FUNCTION_MODE_SRIOV_8: + num_functions = 8; + break; + case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION: + num_functions = 1; + break; + case VXGE_HW_FUNCTION_MODE_SRIOV: + case VXGE_HW_FUNCTION_MODE_MRIOV: + case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17: + num_functions = 17; + break; + case VXGE_HW_FUNCTION_MODE_SRIOV_4: + num_functions = 4; + break; + case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2: + num_functions = 2; + break; + case VXGE_HW_FUNCTION_MODE_MRIOV_8: + num_functions = 8; /* TODO */ + break; + } + return num_functions; +} + +int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override) +{ + struct __vxge_hw_device *hldev = vdev->devh; + u32 maj, min, bld, cmaj, cmin, cbld; + enum vxge_hw_status status; + const struct firmware *fw; + int ret; + + ret = reject_firmware(&fw, fw_name, &vdev->pdev->dev); + if (ret) { + vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found", + VXGE_DRIVER_NAME, fw_name); + goto out; + } + + /* Load the new firmware onto the adapter */ + status = vxge_update_fw_image(hldev, fw->data, fw->size); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s: FW image download to adapter failed '%s'.", + VXGE_DRIVER_NAME, fw_name); + ret = -EIO; + goto out; + } + + /* Read the version of the new firmware */ + status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s: Upgrade read version failed '%s'.", + VXGE_DRIVER_NAME, fw_name); + ret = -EIO; + goto out; + } + + cmaj = vdev->config.device_hw_info.fw_version.major; + cmin = vdev->config.device_hw_info.fw_version.minor; + cbld = vdev->config.device_hw_info.fw_version.build; + /* It's possible the version in /lib/firmware is not the latest version. + * If so, we could get into a loop of trying to upgrade to the latest + * and flashing the older version. + */ + if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) && + !override) { + ret = -EINVAL; + goto out; + } + + printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n", + maj, min, bld); + + /* Flash the adapter with the new firmware */ + status = vxge_hw_flash_fw(hldev); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.", + VXGE_DRIVER_NAME, fw_name); + ret = -EIO; + goto out; + } + + printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be " + "hard reset before using, thus requiring a system reboot or a " + "hotplug event.\n"); + +out: + release_firmware(fw); + return ret; +} + +static int vxge_probe_fw_update(struct vxgedev *vdev) +{ + u32 maj, min, bld; + int ret, gpxe = 0; + char *fw_name; + + maj = vdev->config.device_hw_info.fw_version.major; + min = vdev->config.device_hw_info.fw_version.minor; + bld = vdev->config.device_hw_info.fw_version.build; + + if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER) + return 0; + + /* Ignore the build number when determining if the current firmware is + * "too new" to load the driver + */ + if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) { + vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known " + "version, unable to load driver\n", + VXGE_DRIVER_NAME); + return -EINVAL; + } + + /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to + * work with this driver. + */ + if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) { + vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be " + "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld); + return -EINVAL; + } + + /* If file not specified, determine gPXE or not */ + if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) { + int i; + for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) + if (vdev->devh->eprom_versions[i]) { + gpxe = 1; + break; + } + } + if (gpxe) + fw_name = "/*(DEBLOBBED)*/"; + else + fw_name = "/*(DEBLOBBED)*/"; + + ret = vxge_fw_upgrade(vdev, fw_name, 0); + /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on + * probe, so ignore them + */ + if (ret != -EINVAL && ret != -ENOENT) + return -EIO; + else + ret = 0; + + if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) > + VXGE_FW_VER(maj, min, 0)) { + vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to" + " be used with this driver.", + VXGE_DRIVER_NAME, maj, min, bld); + return -EINVAL; + } + + return ret; +} + +static int is_sriov_initialized(struct pci_dev *pdev) +{ + int pos; + u16 ctrl; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (pos) { + pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl); + if (ctrl & PCI_SRIOV_CTRL_VFE) + return 1; + } + return 0; +} + +static const struct vxge_hw_uld_cbs vxge_callbacks = { + .link_up = vxge_callback_link_up, + .link_down = vxge_callback_link_down, + .crit_err = vxge_callback_crit_err, +}; + +/** + * vxge_probe + * @pdev : structure containing the PCI related information of the device. + * @pre: List of PCI devices supported by the driver listed in vxge_id_table. + * Description: + * This function is called when a new PCI device gets detected and initializes + * it. + * Return value: + * returns 0 on success and negative on failure. + * + */ +static int +vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) +{ + struct __vxge_hw_device *hldev; + enum vxge_hw_status status; + int ret; + int high_dma = 0; + u64 vpath_mask = 0; + struct vxgedev *vdev; + struct vxge_config *ll_config = NULL; + struct vxge_hw_device_config *device_config = NULL; + struct vxge_hw_device_attr attr; + int i, j, no_of_vpath = 0, max_vpath_supported = 0; + u8 *macaddr; + struct vxge_mac_addrs *entry; + static int bus = -1, device = -1; + u32 host_type; + u8 new_device = 0; + enum vxge_hw_status is_privileged; + u32 function_mode; + u32 num_vfs = 0; + + vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); + attr.pdev = pdev; + + /* In SRIOV-17 mode, functions of the same adapter + * can be deployed on different buses + */ + if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) && + !pdev->is_virtfn) + new_device = 1; + + bus = pdev->bus->number; + device = PCI_SLOT(pdev->devfn); + + if (new_device) { + if (driver_config->config_dev_cnt && + (driver_config->config_dev_cnt != + driver_config->total_dev_cnt)) + vxge_debug_init(VXGE_ERR, + "%s: Configured %d of %d devices", + VXGE_DRIVER_NAME, + driver_config->config_dev_cnt, + driver_config->total_dev_cnt); + driver_config->config_dev_cnt = 0; + driver_config->total_dev_cnt = 0; + } + + /* Now making the CPU based no of vpath calculation + * applicable for individual functions as well. + */ + driver_config->g_no_cpus = 0; + driver_config->vpath_per_dev = max_config_vpath; + + driver_config->total_dev_cnt++; + if (++driver_config->config_dev_cnt > max_config_dev) { + ret = 0; + goto _exit0; + } + + device_config = kzalloc(sizeof(struct vxge_hw_device_config), + GFP_KERNEL); + if (!device_config) { + ret = -ENOMEM; + vxge_debug_init(VXGE_ERR, + "device_config : malloc failed %s %d", + __FILE__, __LINE__); + goto _exit0; + } + + ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL); + if (!ll_config) { + ret = -ENOMEM; + vxge_debug_init(VXGE_ERR, + "device_config : malloc failed %s %d", + __FILE__, __LINE__); + goto _exit0; + } + ll_config->tx_steering_type = TX_MULTIQ_STEERING; + ll_config->intr_type = MSI_X; + ll_config->napi_weight = NEW_NAPI_WEIGHT; + ll_config->rth_steering = RTH_STEERING; + + /* get the default configuration parameters */ + vxge_hw_device_config_default_get(device_config); + + /* initialize configuration parameters */ + vxge_device_config_init(device_config, &ll_config->intr_type); + + ret = pci_enable_device(pdev); + if (ret) { + vxge_debug_init(VXGE_ERR, + "%s : can not enable PCI device", __func__); + goto _exit0; + } + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + vxge_debug_ll_config(VXGE_TRACE, + "%s : using 64bit DMA", __func__); + + high_dma = 1; + + if (pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(64))) { + vxge_debug_init(VXGE_ERR, + "%s : unable to obtain 64bit DMA for " + "consistent allocations", __func__); + ret = -ENOMEM; + goto _exit1; + } + } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + vxge_debug_ll_config(VXGE_TRACE, + "%s : using 32bit DMA", __func__); + } else { + ret = -ENOMEM; + goto _exit1; + } + + ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME); + if (ret) { + vxge_debug_init(VXGE_ERR, + "%s : request regions failed", __func__); + goto _exit1; + } + + pci_set_master(pdev); + + attr.bar0 = pci_ioremap_bar(pdev, 0); + if (!attr.bar0) { + vxge_debug_init(VXGE_ERR, + "%s : cannot remap io memory bar0", __func__); + ret = -ENODEV; + goto _exit2; + } + vxge_debug_ll_config(VXGE_TRACE, + "pci ioremap bar0: %p:0x%llx", + attr.bar0, + (unsigned long long)pci_resource_start(pdev, 0)); + + status = vxge_hw_device_hw_info_get(attr.bar0, + &ll_config->device_hw_info); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "%s: Reading of hardware info failed." + "Please try upgrading the firmware.", VXGE_DRIVER_NAME); + ret = -EINVAL; + goto _exit3; + } + + vpath_mask = ll_config->device_hw_info.vpath_mask; + if (vpath_mask == 0) { + vxge_debug_ll_config(VXGE_TRACE, + "%s: No vpaths available in device", VXGE_DRIVER_NAME); + ret = -EINVAL; + goto _exit3; + } + + vxge_debug_ll_config(VXGE_TRACE, + "%s:%d Vpath mask = %llx", __func__, __LINE__, + (unsigned long long)vpath_mask); + + function_mode = ll_config->device_hw_info.function_mode; + host_type = ll_config->device_hw_info.host_type; + is_privileged = __vxge_hw_device_is_privilaged(host_type, + ll_config->device_hw_info.func_id); + + /* Check how many vpaths are available */ + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + if (!((vpath_mask) & vxge_mBIT(i))) + continue; + max_vpath_supported++; + } + + if (new_device) + num_vfs = vxge_get_num_vfs(function_mode) - 1; + + /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ + if (is_sriov(function_mode) && !is_sriov_initialized(pdev) && + (ll_config->intr_type != INTA)) { + ret = pci_enable_sriov(pdev, num_vfs); + if (ret) + vxge_debug_ll_config(VXGE_ERR, + "Failed in enabling SRIOV mode: %d\n", ret); + /* No need to fail out, as an error here is non-fatal */ + } + + /* + * Configure vpaths and get driver configured number of vpaths + * which is less than or equal to the maximum vpaths per function. + */ + no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config); + if (!no_of_vpath) { + vxge_debug_ll_config(VXGE_ERR, + "%s: No more vpaths to configure", VXGE_DRIVER_NAME); + ret = 0; + goto _exit3; + } + + /* Setting driver callbacks */ + attr.uld_callbacks = &vxge_callbacks; + + status = vxge_hw_device_initialize(&hldev, &attr, device_config); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, + "Failed to initialize device (%d)", status); + ret = -EINVAL; + goto _exit3; + } + + if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major, + ll_config->device_hw_info.fw_version.minor, + ll_config->device_hw_info.fw_version.build) >= + VXGE_EPROM_FW_VER) { + struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES]; + + status = vxge_hw_vpath_eprom_img_ver_get(hldev, img); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed", + VXGE_DRIVER_NAME); + /* This is a non-fatal error, continue */ + } + + for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) { + hldev->eprom_versions[i] = img[i].version; + if (!img[i].is_valid) + break; + vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version " + "%d.%d.%d.%d", VXGE_DRIVER_NAME, i, + VXGE_EPROM_IMG_MAJOR(img[i].version), + VXGE_EPROM_IMG_MINOR(img[i].version), + VXGE_EPROM_IMG_FIX(img[i].version), + VXGE_EPROM_IMG_BUILD(img[i].version)); + } + } + + /* if FCS stripping is not disabled in MAC fail driver load */ + status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC" + " failing driver load", VXGE_DRIVER_NAME); + ret = -EINVAL; + goto _exit4; + } + + /* Always enable HWTS. This will always cause the FCS to be invalid, + * due to the fact that HWTS is using the FCS as the location of the + * timestamp. The HW FCS checking will still correctly determine if + * there is a valid checksum, and the FCS is being removed by the driver + * anyway. So no fucntionality is being lost. Since it is always + * enabled, we now simply use the ioctl call to set whether or not the + * driver should be paying attention to the HWTS. + */ + if (is_privileged == VXGE_HW_OK) { + status = vxge_timestamp_config(hldev); + if (status != VXGE_HW_OK) { + vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed", + VXGE_DRIVER_NAME); + ret = -EFAULT; + goto _exit4; + } + } + + vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL); + + /* set private device info */ + pci_set_drvdata(pdev, hldev); + + ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; + ll_config->addr_learn_en = addr_learn_en; + ll_config->rth_algorithm = RTH_ALG_JENKINS; + ll_config->rth_hash_type_tcpipv4 = 1; + ll_config->rth_hash_type_ipv4 = 0; + ll_config->rth_hash_type_tcpipv6 = 0; + ll_config->rth_hash_type_ipv6 = 0; + ll_config->rth_hash_type_tcpipv6ex = 0; + ll_config->rth_hash_type_ipv6ex = 0; + ll_config->rth_bkt_sz = RTH_BUCKET_SIZE; + ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; + ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; + + ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath, + &vdev); + if (ret) { + ret = -EINVAL; + goto _exit4; + } + + ret = vxge_probe_fw_update(vdev); + if (ret) + goto _exit5; + + vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL); + VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), + vxge_hw_device_trace_level_get(hldev)); + + /* set private HW device info */ + vdev->mtu = VXGE_HW_DEFAULT_MTU; + vdev->bar0 = attr.bar0; + vdev->max_vpath_supported = max_vpath_supported; + vdev->no_of_vpath = no_of_vpath; + + /* Virtual Path count */ + for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + if (!vxge_bVALn(vpath_mask, i, 1)) + continue; + if (j >= vdev->no_of_vpath) + break; + + vdev->vpaths[j].is_configured = 1; + vdev->vpaths[j].device_id = i; + vdev->vpaths[j].ring.driver_id = j; + vdev->vpaths[j].vdev = vdev; + vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath; + memcpy((u8 *)vdev->vpaths[j].macaddr, + ll_config->device_hw_info.mac_addrs[i], + ETH_ALEN); + + /* Initialize the mac address list header */ + INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list); + + vdev->vpaths[j].mac_addr_cnt = 0; + vdev->vpaths[j].mcast_addr_cnt = 0; + j++; + } + vdev->exec_mode = VXGE_EXEC_MODE_DISABLE; + vdev->max_config_port = max_config_port; + + vdev->vlan_tag_strip = vlan_tag_strip; + + /* map the hashing selector table to the configured vpaths */ + for (i = 0; i < vdev->no_of_vpath; i++) + vdev->vpath_selector[i] = vpath_selector[i]; + + macaddr = (u8 *)vdev->vpaths[0].macaddr; + + ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0'; + ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0'; + ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0'; + + vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s", + vdev->ndev->name, ll_config->device_hw_info.serial_number); + + vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s", + vdev->ndev->name, ll_config->device_hw_info.part_number); + + vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter", + vdev->ndev->name, ll_config->device_hw_info.product_desc); + + vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM", + vdev->ndev->name, macaddr); + + vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d", + vdev->ndev->name, vxge_hw_device_link_width_get(hldev)); + + vxge_debug_init(VXGE_TRACE, + "%s: Firmware version : %s Date : %s", vdev->ndev->name, + ll_config->device_hw_info.fw_version.version, + ll_config->device_hw_info.fw_date.date); + + if (new_device) { + switch (ll_config->device_hw_info.function_mode) { + case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION: + vxge_debug_init(VXGE_TRACE, + "%s: Single Function Mode Enabled", vdev->ndev->name); + break; + case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION: + vxge_debug_init(VXGE_TRACE, + "%s: Multi Function Mode Enabled", vdev->ndev->name); + break; + case VXGE_HW_FUNCTION_MODE_SRIOV: + vxge_debug_init(VXGE_TRACE, + "%s: Single Root IOV Mode Enabled", vdev->ndev->name); + break; + case VXGE_HW_FUNCTION_MODE_MRIOV: + vxge_debug_init(VXGE_TRACE, + "%s: Multi Root IOV Mode Enabled", vdev->ndev->name); + break; + } + } + + vxge_print_parm(vdev, vpath_mask); + + /* Store the fw version for ethttool option */ + strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version); + memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN); + + /* Copy the station mac address to the list */ + for (i = 0; i < vdev->no_of_vpath; i++) { + entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL); + if (NULL == entry) { + vxge_debug_init(VXGE_ERR, + "%s: mac_addr_list : memory allocation failed", + vdev->ndev->name); + ret = -EPERM; + goto _exit6; + } + macaddr = (u8 *)&entry->macaddr; + memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN); + list_add(&entry->item, &vdev->vpaths[i].mac_addr_list); + vdev->vpaths[i].mac_addr_cnt = 1; + } + + kfree(device_config); + + /* + * INTA is shared in multi-function mode. This is unlike the INTA + * implementation in MR mode, where each VH has its own INTA message. + * - INTA is masked (disabled) as long as at least one function sets + * its TITAN_MASK_ALL_INT.ALARM bit. + * - INTA is unmasked (enabled) when all enabled functions have cleared + * their own TITAN_MASK_ALL_INT.ALARM bit. + * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up. + * Though this driver leaves the top level interrupts unmasked while + * leaving the required module interrupt bits masked on exit, there + * could be a rougue driver around that does not follow this procedure + * resulting in a failure to generate interrupts. The following code is + * present to prevent such a failure. + */ + + if (ll_config->device_hw_info.function_mode == + VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) + if (vdev->config.intr_type == INTA) + vxge_hw_device_unmask_all(hldev); + + vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", + vdev->ndev->name, __func__, __LINE__); + + vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL); + VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), + vxge_hw_device_trace_level_get(hldev)); + + kfree(ll_config); + return 0; + +_exit6: + for (i = 0; i < vdev->no_of_vpath; i++) + vxge_free_mac_add_list(&vdev->vpaths[i]); +_exit5: + vxge_device_unregister(hldev); +_exit4: + vxge_hw_device_terminate(hldev); + pci_disable_sriov(pdev); +_exit3: + iounmap(attr.bar0); +_exit2: + pci_release_region(pdev, 0); +_exit1: + pci_disable_device(pdev); +_exit0: + kfree(ll_config); + kfree(device_config); + driver_config->config_dev_cnt--; + driver_config->total_dev_cnt--; + return ret; +} + +/** + * vxge_rem_nic - Free the PCI device + * @pdev: structure containing the PCI related information of the device. + * Description: This function is called by the Pci subsystem to release a + * PCI device and free up all resource held up by the device. + */ +static void vxge_remove(struct pci_dev *pdev) +{ + struct __vxge_hw_device *hldev; + struct vxgedev *vdev; + int i; + + hldev = pci_get_drvdata(pdev); + if (hldev == NULL) + return; + + vdev = netdev_priv(hldev->ndev); + + vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__); + vxge_debug_init(vdev->level_trace, "%s : removing PCI device...", + __func__); + + for (i = 0; i < vdev->no_of_vpath; i++) + vxge_free_mac_add_list(&vdev->vpaths[i]); + + vxge_device_unregister(hldev); + /* Do not call pci_disable_sriov here, as it will break child devices */ + vxge_hw_device_terminate(hldev); + iounmap(vdev->bar0); + pci_release_region(pdev, 0); + pci_disable_device(pdev); + driver_config->config_dev_cnt--; + driver_config->total_dev_cnt--; + + vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered", + __func__, __LINE__); + vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__, + __LINE__); +} + +static const struct pci_error_handlers vxge_err_handler = { + .error_detected = vxge_io_error_detected, + .slot_reset = vxge_io_slot_reset, + .resume = vxge_io_resume, +}; + +static struct pci_driver vxge_driver = { + .name = VXGE_DRIVER_NAME, + .id_table = vxge_id_table, + .probe = vxge_probe, + .remove = vxge_remove, +#ifdef CONFIG_PM + .suspend = vxge_pm_suspend, + .resume = vxge_pm_resume, +#endif + .err_handler = &vxge_err_handler, +}; + +static int __init +vxge_starter(void) +{ + int ret = 0; + + pr_info("Copyright(c) 2002-2010 Exar Corp.\n"); + pr_info("Driver version: %s\n", DRV_VERSION); + + verify_bandwidth(); + + driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL); + if (!driver_config) + return -ENOMEM; + + ret = pci_register_driver(&vxge_driver); + if (ret) { + kfree(driver_config); + goto err; + } + + if (driver_config->config_dev_cnt && + (driver_config->config_dev_cnt != driver_config->total_dev_cnt)) + vxge_debug_init(VXGE_ERR, + "%s: Configured %d of %d devices", + VXGE_DRIVER_NAME, driver_config->config_dev_cnt, + driver_config->total_dev_cnt); +err: + return ret; +} + +static void __exit +vxge_closer(void) +{ + pci_unregister_driver(&vxge_driver); + kfree(driver_config); +} +module_init(vxge_starter); +module_exit(vxge_closer); diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h new file mode 100644 index 000000000..3a79d93b8 --- /dev/null +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h @@ -0,0 +1,520 @@ +/****************************************************************************** + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + * + * vxge-main.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O + * Virtualized Server Adapter. + * Copyright(c) 2002-2010 Exar Corp. + ******************************************************************************/ +#ifndef VXGE_MAIN_H +#define VXGE_MAIN_H + +#include "vxge-traffic.h" +#include "vxge-config.h" +#include "vxge-version.h" +#include +#include +#include + +#define VXGE_DRIVER_NAME "vxge" +#define VXGE_DRIVER_VENDOR "Neterion, Inc" +#define VXGE_DRIVER_FW_VERSION_MAJOR 1 + +#define DRV_VERSION VXGE_VERSION_MAJOR"."VXGE_VERSION_MINOR"."\ + VXGE_VERSION_FIX"."VXGE_VERSION_BUILD"-"\ + VXGE_VERSION_FOR + +#define PCI_DEVICE_ID_TITAN_WIN 0x5733 +#define PCI_DEVICE_ID_TITAN_UNI 0x5833 +#define VXGE_HW_TITAN1_PCI_REVISION 1 +#define VXGE_HW_TITAN1A_PCI_REVISION 2 + +#define VXGE_USE_DEFAULT 0xffffffff +#define VXGE_HW_VPATH_MSIX_ACTIVE 4 +#define VXGE_ALARM_MSIX_ID 2 +#define VXGE_HW_RXSYNC_FREQ_CNT 4 +#define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ) +#define VXGE_LL_RX_COPY_THRESHOLD 256 +#define VXGE_DEF_FIFO_LENGTH 84 + +#define NO_STEERING 0 +#define PORT_STEERING 0x1 +#define RTH_STEERING 0x2 +#define RX_TOS_STEERING 0x3 +#define RX_VLAN_STEERING 0x4 +#define RTH_BUCKET_SIZE 4 + +#define TX_PRIORITY_STEERING 1 +#define TX_VLAN_STEERING 2 +#define TX_PORT_STEERING 3 +#define TX_MULTIQ_STEERING 4 + +#define VXGE_HW_MAC_ADDR_LEARN_DEFAULT VXGE_HW_RTS_MAC_DISABLE + +#define VXGE_TTI_BTIMER_VAL 250000 + +#define VXGE_TTI_LTIMER_VAL 1000 +#define VXGE_T1A_TTI_LTIMER_VAL 80 +#define VXGE_TTI_RTIMER_VAL 0 +#define VXGE_TTI_RTIMER_ADAPT_VAL 10 +#define VXGE_T1A_TTI_RTIMER_VAL 400 +#define VXGE_RTI_BTIMER_VAL 250 +#define VXGE_RTI_LTIMER_VAL 100 +#define VXGE_RTI_RTIMER_VAL 0 +#define VXGE_RTI_RTIMER_ADAPT_VAL 15 +#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH +#define VXGE_ISR_POLLING_CNT 8 +#define VXGE_MAX_CONFIG_DEV 0xFF +#define VXGE_EXEC_MODE_DISABLE 0 +#define VXGE_EXEC_MODE_ENABLE 1 +#define VXGE_MAX_CONFIG_PORT 1 +#define VXGE_ALL_VID_DISABLE 0 +#define VXGE_ALL_VID_ENABLE 1 +#define VXGE_PAUSE_CTRL_DISABLE 0 +#define VXGE_PAUSE_CTRL_ENABLE 1 + +#define TTI_TX_URANGE_A 5 +#define TTI_TX_URANGE_B 15 +#define TTI_TX_URANGE_C 40 +#define TTI_TX_UFC_A 5 +#define TTI_TX_UFC_B 40 +#define TTI_TX_UFC_C 60 +#define TTI_TX_UFC_D 100 +#define TTI_T1A_TX_UFC_A 30 +#define TTI_T1A_TX_UFC_B 80 +/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */ +/* Slope - 93 */ +/* 60 - 9k Mtu, 140 - 1.5k mtu */ +#define TTI_T1A_TX_UFC_C(mtu) (60 + ((VXGE_HW_MAX_MTU - mtu) / 93)) + +/* Slope - 37 */ +/* 100 - 9k Mtu, 300 - 1.5k mtu */ +#define TTI_T1A_TX_UFC_D(mtu) (100 + ((VXGE_HW_MAX_MTU - mtu) / 37)) + + +#define RTI_RX_URANGE_A 5 +#define RTI_RX_URANGE_B 15 +#define RTI_RX_URANGE_C 40 +#define RTI_T1A_RX_URANGE_A 1 +#define RTI_T1A_RX_URANGE_B 20 +#define RTI_T1A_RX_URANGE_C 50 +#define RTI_RX_UFC_A 1 +#define RTI_RX_UFC_B 5 +#define RTI_RX_UFC_C 10 +#define RTI_RX_UFC_D 15 +#define RTI_T1A_RX_UFC_B 20 +#define RTI_T1A_RX_UFC_C 50 +#define RTI_T1A_RX_UFC_D 60 + +/* + * The interrupt rate is maintained at 3k per second with the moderation + * parameters for most traffic but not all. This is the maximum interrupt + * count allowed per function with INTA or per vector in the case of + * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A. + */ +#define VXGE_T1A_MAX_INTERRUPT_COUNT 100 +#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT 200 + +/* Milli secs timer period */ +#define VXGE_TIMER_DELAY 10000 + +#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE) + +#define is_sriov(function_mode) \ + ((function_mode == VXGE_HW_FUNCTION_MODE_SRIOV) || \ + (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_8) || \ + (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_4)) + +enum vxge_reset_event { + /* reset events */ + VXGE_LL_VPATH_RESET = 0, + VXGE_LL_DEVICE_RESET = 1, + VXGE_LL_FULL_RESET = 2, + VXGE_LL_START_RESET = 3, + VXGE_LL_COMPL_RESET = 4 +}; +/* These flags represent the devices temporary state */ +enum vxge_device_state_t { +__VXGE_STATE_RESET_CARD = 0, +__VXGE_STATE_CARD_UP +}; + +enum vxge_mac_addr_state { + /* mac address states */ + VXGE_LL_MAC_ADDR_IN_LIST = 0, + VXGE_LL_MAC_ADDR_IN_DA_TABLE = 1 +}; + +struct vxge_drv_config { + int config_dev_cnt; + int total_dev_cnt; + int g_no_cpus; + unsigned int vpath_per_dev; +}; + +struct macInfo { + unsigned char macaddr[ETH_ALEN]; + unsigned char macmask[ETH_ALEN]; + unsigned int vpath_no; + enum vxge_mac_addr_state state; +}; + +struct vxge_config { + int tx_pause_enable; + int rx_pause_enable; + +#define NEW_NAPI_WEIGHT 64 + int napi_weight; + int intr_type; +#define INTA 0 +#define MSI 1 +#define MSI_X 2 + + int addr_learn_en; + + u32 rth_steering:2, + rth_algorithm:2, + rth_hash_type_tcpipv4:1, + rth_hash_type_ipv4:1, + rth_hash_type_tcpipv6:1, + rth_hash_type_ipv6:1, + rth_hash_type_tcpipv6ex:1, + rth_hash_type_ipv6ex:1, + rth_bkt_sz:8; + int rth_jhash_golden_ratio; + int tx_steering_type; + int fifo_indicate_max_pkts; + struct vxge_hw_device_hw_info device_hw_info; +}; + +struct vxge_msix_entry { + /* Mimicing the msix_entry struct of Kernel. */ + u16 vector; + u16 entry; + u16 in_use; + void *arg; +}; + +/* Software Statistics */ + +struct vxge_sw_stats { + + /* Virtual Path */ + unsigned long vpaths_open; + unsigned long vpath_open_fail; + + /* Misc. */ + unsigned long link_up; + unsigned long link_down; +}; + +struct vxge_mac_addrs { + struct list_head item; + u64 macaddr; + u64 macmask; + enum vxge_mac_addr_state state; +}; + +struct vxgedev; + +struct vxge_fifo_stats { + struct u64_stats_sync syncp; + u64 tx_frms; + u64 tx_bytes; + + unsigned long tx_errors; + unsigned long txd_not_free; + unsigned long txd_out_of_desc; + unsigned long pci_map_fail; +}; + +struct vxge_fifo { + struct net_device *ndev; + struct pci_dev *pdev; + struct __vxge_hw_fifo *handle; + struct netdev_queue *txq; + + int tx_steering_type; + int indicate_max_pkts; + + /* Adaptive interrupt moderation parameters used in T1A */ + unsigned long interrupt_count; + unsigned long jiffies; + + u32 tx_vector_no; + /* Tx stats */ + struct vxge_fifo_stats stats; +} ____cacheline_aligned; + +struct vxge_ring_stats { + struct u64_stats_sync syncp; + u64 rx_frms; + u64 rx_mcast; + u64 rx_bytes; + + unsigned long rx_errors; + unsigned long rx_dropped; + unsigned long prev_rx_frms; + unsigned long pci_map_fail; + unsigned long skb_alloc_fail; +}; + +struct vxge_ring { + struct net_device *ndev; + struct pci_dev *pdev; + struct __vxge_hw_ring *handle; + /* The vpath id maintained in the driver - + * 0 to 'maximum_vpaths_in_function - 1' + */ + int driver_id; + + /* Adaptive interrupt moderation parameters used in T1A */ + unsigned long interrupt_count; + unsigned long jiffies; + + /* copy of the flag indicating whether rx_hwts is to be used */ + u32 rx_hwts:1; + + int pkts_processed; + int budget; + + struct napi_struct napi; + struct napi_struct *napi_p; + +#define VXGE_MAX_MAC_ADDR_COUNT 30 + + int vlan_tag_strip; + u32 rx_vector_no; + enum vxge_hw_status last_status; + + /* Rx stats */ + struct vxge_ring_stats stats; +} ____cacheline_aligned; + +struct vxge_vpath { + struct vxge_fifo fifo; + struct vxge_ring ring; + + struct __vxge_hw_vpath_handle *handle; + + /* Actual vpath id for this vpath in the device - 0 to 16 */ + int device_id; + int max_mac_addr_cnt; + int is_configured; + int is_open; + struct vxgedev *vdev; + u8 macaddr[ETH_ALEN]; + u8 macmask[ETH_ALEN]; + +#define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048 + /* mac addresses currently programmed into NIC */ + u16 mac_addr_cnt; + u16 mcast_addr_cnt; + struct list_head mac_addr_list; + + u32 level_err; + u32 level_trace; +}; +#define VXGE_COPY_DEBUG_INFO_TO_LL(vdev, err, trace) { \ + for (i = 0; i < vdev->no_of_vpath; i++) { \ + vdev->vpaths[i].level_err = err; \ + vdev->vpaths[i].level_trace = trace; \ + } \ + vdev->level_err = err; \ + vdev->level_trace = trace; \ +} + +struct vxgedev { + struct net_device *ndev; + struct pci_dev *pdev; + struct __vxge_hw_device *devh; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + int vlan_tag_strip; + struct vxge_config config; + unsigned long state; + + /* Indicates which vpath to reset */ + unsigned long vp_reset; + + /* Timer used for polling vpath resets */ + struct timer_list vp_reset_timer; + + /* Timer used for polling vpath lockup */ + struct timer_list vp_lockup_timer; + + /* + * Flags to track whether device is in All Multicast + * or in promiscuous mode. + */ + u16 all_multi_flg; + + /* A flag indicating whether rx_hwts is to be used or not. */ + u32 rx_hwts:1, + titan1:1; + + struct vxge_msix_entry *vxge_entries; + struct msix_entry *entries; + /* + * 4 for each vpath * 17; + * total is 68 + */ +#define VXGE_MAX_REQUESTED_MSIX 68 +#define VXGE_INTR_STRLEN 80 + char desc[VXGE_MAX_REQUESTED_MSIX][VXGE_INTR_STRLEN]; + + enum vxge_hw_event cric_err_event; + + int max_vpath_supported; + int no_of_vpath; + + struct napi_struct napi; + /* A debug option, when enabled and if error condition occurs, + * the driver will do following steps: + * - mask all interrupts + * - Not clear the source of the alarm + * - gracefully stop all I/O + * A diagnostic dump of register and stats at this point + * reveals very useful information. + */ + int exec_mode; + int max_config_port; + struct vxge_vpath *vpaths; + + struct __vxge_hw_vpath_handle *vp_handles[VXGE_HW_MAX_VIRTUAL_PATHS]; + void __iomem *bar0; + struct vxge_sw_stats stats; + int mtu; + /* Below variables are used for vpath selection to transmit a packet */ + u8 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS]; + u64 vpaths_deployed; + + u32 intr_cnt; + u32 level_err; + u32 level_trace; + char fw_version[VXGE_HW_FW_STRLEN]; + struct work_struct reset_task; +}; + +struct vxge_rx_priv { + struct sk_buff *skb; + unsigned char *skb_data; + dma_addr_t data_dma; + dma_addr_t data_size; +}; + +struct vxge_tx_priv { + struct sk_buff *skb; + dma_addr_t dma_buffers[MAX_SKB_FRAGS+1]; +}; + +#define VXGE_MODULE_PARAM_INT(p, val) \ + static int p = val; \ + module_param(p, int, 0) + +static inline +void vxge_os_timer(struct timer_list *timer, void (*func)(unsigned long data), + struct vxgedev *vdev, unsigned long timeout) +{ + init_timer(timer); + timer->function = func; + timer->data = (unsigned long)vdev; + mod_timer(timer, jiffies + timeout); +} + +void vxge_initialize_ethtool_ops(struct net_device *ndev); +int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override); + +/* #define VXGE_DEBUG_INIT: debug for initialization functions + * #define VXGE_DEBUG_TX : debug transmit related functions + * #define VXGE_DEBUG_RX : debug recevice related functions + * #define VXGE_DEBUG_MEM : debug memory module + * #define VXGE_DEBUG_LOCK: debug locks + * #define VXGE_DEBUG_SEM : debug semaphore + * #define VXGE_DEBUG_ENTRYEXIT: debug functions by adding entry exit statements +*/ +#define VXGE_DEBUG_INIT 0x00000001 +#define VXGE_DEBUG_TX 0x00000002 +#define VXGE_DEBUG_RX 0x00000004 +#define VXGE_DEBUG_MEM 0x00000008 +#define VXGE_DEBUG_LOCK 0x00000010 +#define VXGE_DEBUG_SEM 0x00000020 +#define VXGE_DEBUG_ENTRYEXIT 0x00000040 +#define VXGE_DEBUG_INTR 0x00000080 +#define VXGE_DEBUG_LL_CONFIG 0x00000100 + +/* Debug tracing for VXGE driver */ +#ifndef VXGE_DEBUG_MASK +#define VXGE_DEBUG_MASK 0x0 +#endif + +#if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK) +#define vxge_debug_ll_config(level, fmt, ...) \ + vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, __VA_ARGS__) +#else +#define vxge_debug_ll_config(level, fmt, ...) +#endif + +#if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) +#define vxge_debug_init(level, fmt, ...) \ + vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, __VA_ARGS__) +#else +#define vxge_debug_init(level, fmt, ...) +#endif + +#if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK) +#define vxge_debug_tx(level, fmt, ...) \ + vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, __VA_ARGS__) +#else +#define vxge_debug_tx(level, fmt, ...) +#endif + +#if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK) +#define vxge_debug_rx(level, fmt, ...) \ + vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, __VA_ARGS__) +#else +#define vxge_debug_rx(level, fmt, ...) +#endif + +#if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK) +#define vxge_debug_mem(level, fmt, ...) \ + vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, __VA_ARGS__) +#else +#define vxge_debug_mem(level, fmt, ...) +#endif + +#if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK) +#define vxge_debug_entryexit(level, fmt, ...) \ + vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, __VA_ARGS__) +#else +#define vxge_debug_entryexit(level, fmt, ...) +#endif + +#if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK) +#define vxge_debug_intr(level, fmt, ...) \ + vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, __VA_ARGS__) +#else +#define vxge_debug_intr(level, fmt, ...) +#endif + +#define VXGE_DEVICE_DEBUG_LEVEL_SET(level, mask, vdev) {\ + vxge_hw_device_debug_set((struct __vxge_hw_device *)vdev->devh, \ + level, mask);\ + VXGE_COPY_DEBUG_INFO_TO_LL(vdev, \ + vxge_hw_device_error_level_get((struct __vxge_hw_device *) \ + vdev->devh), \ + vxge_hw_device_trace_level_get((struct __vxge_hw_device *) \ + vdev->devh));\ +} + +#ifdef NETIF_F_GSO +#define vxge_tcp_mss(skb) (skb_shinfo(skb)->gso_size) +#define vxge_udp_mss(skb) (skb_shinfo(skb)->gso_size) +#define vxge_offload_type(skb) (skb_shinfo(skb)->gso_type) +#endif + +#endif diff --git a/drivers/net/ethernet/neterion/vxge/vxge-reg.h b/drivers/net/ethernet/neterion/vxge/vxge-reg.h new file mode 100644 index 000000000..3e658b175 --- /dev/null +++ b/drivers/net/ethernet/neterion/vxge/vxge-reg.h @@ -0,0 +1,4636 @@ +/****************************************************************************** + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + * + * vxge-reg.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O Virtualized + * Server Adapter. + * Copyright(c) 2002-2010 Exar Corp. + ******************************************************************************/ +#ifndef VXGE_REG_H +#define VXGE_REG_H + +/* + * vxge_mBIT(loc) - set bit at offset + */ +#define vxge_mBIT(loc) (0x8000000000000000ULL >> (loc)) + +/* + * vxge_vBIT(val, loc, sz) - set bits at offset + */ +#define vxge_vBIT(val, loc, sz) (((u64)(val)) << (64-(loc)-(sz))) +#define vxge_vBIT32(val, loc, sz) (((u32)(val)) << (32-(loc)-(sz))) + +/* + * vxge_bVALn(bits, loc, n) - Get the value of n bits at location + */ +#define vxge_bVALn(bits, loc, n) \ + ((((u64)bits) >> (64-(loc+n))) & ((0x1ULL << n) - 1)) + +#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(bits) \ + vxge_bVALn(bits, 0, 16) +#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(bits) \ + vxge_bVALn(bits, 48, 8) +#define VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(bits) \ + vxge_bVALn(bits, 56, 8) + +#define VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(bits) \ + vxge_bVALn(bits, 3, 5) +#define VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(bits) \ + vxge_bVALn(bits, 5, 3) +#define VXGE_HW_PF_SW_RESET_COMMAND 0xA5 + +#define VXGE_HW_TITAN_PCICFGMGMT_REG_SPACES 17 +#define VXGE_HW_TITAN_SRPCIM_REG_SPACES 17 +#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17 +#define VXGE_HW_TITAN_VPATH_REG_SPACES 17 + +#define VXGE_HW_FW_API_GET_EPROM_REV 31 + +#define VXGE_EPROM_IMG_MAJOR(val) (u32) vxge_bVALn(val, 48, 4) +#define VXGE_EPROM_IMG_MINOR(val) (u32) vxge_bVALn(val, 52, 4) +#define VXGE_EPROM_IMG_FIX(val) (u32) vxge_bVALn(val, 56, 4) +#define VXGE_EPROM_IMG_BUILD(val) (u32) vxge_bVALn(val, 60, 4) + +#define VXGE_HW_GET_EPROM_IMAGE_INDEX(val) vxge_bVALn(val, 16, 8) +#define VXGE_HW_GET_EPROM_IMAGE_VALID(val) vxge_bVALn(val, 31, 1) +#define VXGE_HW_GET_EPROM_IMAGE_TYPE(val) vxge_bVALn(val, 40, 8) +#define VXGE_HW_GET_EPROM_IMAGE_REV(val) vxge_bVALn(val, 48, 16) +#define VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(val) vxge_vBIT(val, 16, 8) + +#define VXGE_HW_FW_API_GET_FUNC_MODE 29 +#define VXGE_HW_GET_FUNC_MODE_VAL(val) (val & 0xFF) + +#define VXGE_HW_FW_UPGRADE_MEMO 13 +#define VXGE_HW_FW_UPGRADE_ACTION 16 +#define VXGE_HW_FW_UPGRADE_OFFSET_START 2 +#define VXGE_HW_FW_UPGRADE_OFFSET_SEND 3 +#define VXGE_HW_FW_UPGRADE_OFFSET_COMMIT 4 +#define VXGE_HW_FW_UPGRADE_OFFSET_READ 5 + +#define VXGE_HW_FW_UPGRADE_BLK_SIZE 16 +#define VXGE_HW_UPGRADE_GET_RET_ERR_CODE(val) (val & 0xff) +#define VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(val) ((val >> 8) & 0xff) + +#define VXGE_HW_ASIC_MODE_RESERVED 0 +#define VXGE_HW_ASIC_MODE_NO_IOV 1 +#define VXGE_HW_ASIC_MODE_SR_IOV 2 +#define VXGE_HW_ASIC_MODE_MR_IOV 3 + +#define VXGE_HW_TXMAC_GEN_CFG1_TMAC_PERMA_STOP_EN vxge_mBIT(3) +#define VXGE_HW_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_WIRE vxge_mBIT(19) +#define VXGE_HW_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_SWITCH vxge_mBIT(23) +#define VXGE_HW_TXMAC_GEN_CFG1_HOST_APPEND_FCS vxge_mBIT(31) + +#define VXGE_HW_VPATH_IS_FIRST_GET_VPATH_IS_FIRST(bits) vxge_bVALn(bits, 3, 1) + +#define VXGE_HW_TIM_VPATH_ASSIGNMENT_GET_BMAP_ROOT(bits) \ + vxge_bVALn(bits, 0, 32) + +#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN(bits) \ + vxge_bVALn(bits, 50, 14) + +#define VXGE_HW_XMAC_VSPORT_CHOICES_VP_GET_VSPORT_VECTOR(bits) \ + vxge_bVALn(bits, 0, 17) + +#define VXGE_HW_XMAC_VPATH_TO_VSPORT_VPMGMT_CLONE_GET_VSPORT_NUMBER(bits) \ + vxge_bVALn(bits, 3, 5) + +#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(bits) \ + vxge_bVALn(bits, 17, 15) + +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_LEGACY_MODE 0 +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY 1 +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_MULTI_OP_MODE 2 + +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE_MESSAGES_ONLY 0 +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE_MULTI_OP_MODE 1 + +#define VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val) \ + (val&~VXGE_HW_TOC_KDFC_INITIAL_BIR(7)) +#define VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val) \ + vxge_bVALn(val, 61, 3) +#define VXGE_HW_TOC_GET_USDC_INITIAL_OFFSET(val) \ + (val&~VXGE_HW_TOC_USDC_INITIAL_BIR(7)) +#define VXGE_HW_TOC_GET_USDC_INITIAL_BIR(val) \ + vxge_bVALn(val, 61, 3) + +#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(bits) bits +#define VXGE_HW_TOC_KDFC_FIFO_STRIDE_GET_TOC_KDFC_FIFO_STRIDE(bits) bits + +#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR0(bits) \ + vxge_bVALn(bits, 1, 15) +#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR1(bits) \ + vxge_bVALn(bits, 17, 15) +#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_GET_KDFC_RCTR2(bits) \ + vxge_bVALn(bits, 33, 15) + +#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_VAPTH_NUM(val) vxge_vBIT(val, 42, 5) +#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_FIFO_NUM(val) vxge_vBIT(val, 47, 2) +#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_FIFO_OFFSET(val) \ + vxge_vBIT(val, 49, 15) + +#define VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER 0 +#define VXGE_HW_PRC_CFG4_RING_MODE_THREE_BUFFER 1 +#define VXGE_HW_PRC_CFG4_RING_MODE_FIVE_BUFFER 2 + +#define VXGE_HW_PRC_CFG7_SCATTER_MODE_A 0 +#define VXGE_HW_PRC_CFG7_SCATTER_MODE_B 2 +#define VXGE_HW_PRC_CFG7_SCATTER_MODE_C 1 + +#define VXGE_HW_RTS_MGR_STEER_CTRL_WE_READ 0 +#define VXGE_HW_RTS_MGR_STEER_CTRL_WE_WRITE 1 + +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DA 0 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_VID 1 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_PN 3 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RANGE_PN 4 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_QOS 10 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DS 11 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12 +#define VXGE_HW_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_FW_VERSION 13 + +#define VXGE_HW_RTS_MGR_STEER_DATA0_GET_DA_MAC_ADDR(bits) \ + vxge_bVALn(bits, 0, 48) +#define VXGE_HW_RTS_MGR_STEER_DATA0_DA_MAC_ADDR(val) vxge_vBIT(val, 0, 48) + +#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_MASK(bits) \ + vxge_bVALn(bits, 0, 48) +#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MASK(val) vxge_vBIT(val, 0, 48) +#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_ADD_PRIVILEGED_MODE \ + vxge_mBIT(54) +#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_ADD_VPATH(bits) \ + vxge_bVALn(bits, 55, 5) +#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_ADD_VPATH(val) \ + vxge_vBIT(val, 55, 5) +#define VXGE_HW_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_ADD_MODE(bits) \ + vxge_bVALn(bits, 62, 2) +#define VXGE_HW_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MODE(val) vxge_vBIT(val, 62, 2) + +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY 0 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY 1 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY 2 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY 3 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY 0 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY 1 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY 3 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL 4 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ALL_CLEAR 172 + +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA 0 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID 1 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5 +#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11 +#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12 +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13 + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \ + vxge_bVALn(bits, 0, 48) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(val) vxge_vBIT(val, 0, 48) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(bits) vxge_bVALn(bits, 0, 12) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(val) vxge_vBIT(val, 0, 12) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_ETYPE(bits) vxge_bVALn(bits, 0, 11) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_ETYPE(val) vxge_vBIT(val, 0, 16) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_SRC_DEST_SEL(bits) \ + vxge_bVALn(bits, 3, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_SRC_DEST_SEL vxge_mBIT(3) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_TCP_UDP_SEL(bits) \ + vxge_bVALn(bits, 7, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_TCP_UDP_SEL vxge_mBIT(7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_PN_PORT_NUM(bits) \ + vxge_bVALn(bits, 8, 16) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_PN_PORT_NUM(val) vxge_vBIT(val, 8, 16) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_EN(bits) \ + vxge_bVALn(bits, 3, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN vxge_mBIT(3) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_BUCKET_SIZE(bits) \ + vxge_bVALn(bits, 4, 4) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(val) \ + vxge_vBIT(val, 4, 4) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ALG_SEL(bits) \ + vxge_bVALn(bits, 10, 2) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(val) \ + vxge_vBIT(val, 10, 2) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_JENKINS 0 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_MS_RSS 1 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL_CRC32C 2 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV4_EN(bits) \ + vxge_bVALn(bits, 15, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN vxge_mBIT(15) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV4_EN(bits) \ + vxge_bVALn(bits, 19, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN vxge_mBIT(19) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV6_EN(bits) \ + vxge_bVALn(bits, 23, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN vxge_mBIT(23) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV6_EN(bits) \ + vxge_bVALn(bits, 27, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN vxge_mBIT(27) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_TCP_IPV6_EX_EN(bits) \ + vxge_bVALn(bits, 31, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN vxge_mBIT(31) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_RTH_IPV6_EX_EN(bits) \ + vxge_bVALn(bits, 35, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN vxge_mBIT(35) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(bits) \ + vxge_bVALn(bits, 39, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE vxge_mBIT(39) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_REPL_ENTRY_EN(bits) \ + vxge_bVALn(bits, 43, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_REPL_ENTRY_EN vxge_mBIT(43) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_SOLO_IT_ENTRY_EN(bits) \ + vxge_bVALn(bits, 3, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN vxge_mBIT(3) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_SOLO_IT_BUCKET_DATA(bits) \ + vxge_bVALn(bits, 9, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(val) \ + vxge_vBIT(val, 9, 7) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_BUCKET_NUM(bits) \ + vxge_bVALn(bits, 0, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(val) \ + vxge_vBIT(val, 0, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_ENTRY_EN(bits) \ + vxge_bVALn(bits, 8, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN vxge_mBIT(8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM0_BUCKET_DATA(bits) \ + vxge_bVALn(bits, 9, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(val) \ + vxge_vBIT(val, 9, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_BUCKET_NUM(bits) \ + vxge_bVALn(bits, 16, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(val) \ + vxge_vBIT(val, 16, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_ENTRY_EN(bits) \ + vxge_bVALn(bits, 24, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN vxge_mBIT(24) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_ITEM1_BUCKET_DATA(bits) \ + vxge_bVALn(bits, 25, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(val) \ + vxge_vBIT(val, 25, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_BUCKET_NUM(bits) \ + vxge_bVALn(bits, 0, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(val) \ + vxge_vBIT(val, 0, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_ENTRY_EN(bits) \ + vxge_bVALn(bits, 8, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN vxge_mBIT(8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM0_BUCKET_DATA(bits) \ + vxge_bVALn(bits, 9, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(val) \ + vxge_vBIT(val, 9, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_BUCKET_NUM(bits) \ + vxge_bVALn(bits, 16, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(val) \ + vxge_vBIT(val, 16, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_ENTRY_EN(bits) \ + vxge_bVALn(bits, 24, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN vxge_mBIT(24) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM1_BUCKET_DATA(bits) \ + vxge_bVALn(bits, 25, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(val) \ + vxge_vBIT(val, 25, 7) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_JHASH_CFG_GOLDEN_RATIO(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_JHASH_CFG_GOLDEN_RATIO(val) \ + vxge_vBIT(val, 0, 32) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_JHASH_CFG_INIT_VALUE(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_JHASH_CFG_INIT_VALUE(val) \ + vxge_vBIT(val, 32, 32) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV6_SA_MASK(bits) \ + vxge_bVALn(bits, 0, 16) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV6_SA_MASK(val) \ + vxge_vBIT(val, 0, 16) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV6_DA_MASK(bits) \ + vxge_bVALn(bits, 16, 16) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV6_DA_MASK(val) \ + vxge_vBIT(val, 16, 16) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV4_SA_MASK(bits) \ + vxge_bVALn(bits, 32, 4) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV4_SA_MASK(val) \ + vxge_vBIT(val, 32, 4) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_IPV4_DA_MASK(bits) \ + vxge_bVALn(bits, 36, 4) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_IPV4_DA_MASK(val) \ + vxge_vBIT(val, 36, 4) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_L4SP_MASK(bits) \ + vxge_bVALn(bits, 40, 2) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_L4SP_MASK(val) \ + vxge_vBIT(val, 40, 2) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_MASK_L4DP_MASK(bits) \ + vxge_bVALn(bits, 42, 2) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_MASK_L4DP_MASK(val) \ + vxge_vBIT(val, 42, 2) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_KEY_KEY(bits) \ + vxge_bVALn(bits, 0, 64) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_KEY_KEY vxge_vBIT(val, 0, 64) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_QOS_ENTRY_EN(bits) \ + vxge_bVALn(bits, 3, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_QOS_ENTRY_EN vxge_mBIT(3) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DS_ENTRY_EN(bits) \ + vxge_bVALn(bits, 3, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DS_ENTRY_EN vxge_mBIT(3) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(bits) \ + vxge_bVALn(bits, 0, 48) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(val) \ + vxge_vBIT(val, 0, 48) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(val) \ + vxge_vBIT(val, 62, 2) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_BUCKET_NUM(bits) \ + vxge_bVALn(bits, 0, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_BUCKET_NUM(val) \ + vxge_vBIT(val, 0, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_ENTRY_EN(bits) \ + vxge_bVALn(bits, 8, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_ENTRY_EN vxge_mBIT(8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM4_BUCKET_DATA(bits) \ + vxge_bVALn(bits, 9, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM4_BUCKET_DATA(val) \ + vxge_vBIT(val, 9, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_BUCKET_NUM(bits) \ + vxge_bVALn(bits, 16, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_BUCKET_NUM(val) \ + vxge_vBIT(val, 16, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_ENTRY_EN(bits) \ + vxge_bVALn(bits, 24, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_ENTRY_EN vxge_mBIT(24) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM5_BUCKET_DATA(bits) \ + vxge_bVALn(bits, 25, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM5_BUCKET_DATA(val) \ + vxge_vBIT(val, 25, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_BUCKET_NUM(bits) \ + vxge_bVALn(bits, 32, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_BUCKET_NUM(val) \ + vxge_vBIT(val, 32, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_ENTRY_EN(bits) \ + vxge_bVALn(bits, 40, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_ENTRY_EN vxge_mBIT(40) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM6_BUCKET_DATA(bits) \ + vxge_bVALn(bits, 41, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM6_BUCKET_DATA(val) \ + vxge_vBIT(val, 41, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_BUCKET_NUM(bits) \ + vxge_bVALn(bits, 48, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_BUCKET_NUM(val) \ + vxge_vBIT(val, 48, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_ENTRY_EN(bits) \ + vxge_bVALn(bits, 56, 1) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_ENTRY_EN vxge_mBIT(56) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_RTH_ITEM7_BUCKET_DATA(bits) \ + vxge_bVALn(bits, 57, 7) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM7_BUCKET_DATA(val) \ + vxge_vBIT(val, 57, 7) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER 0 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER 1 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_VERSION 2 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE 3 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0 4 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_1 5 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_2 6 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3 7 + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_LED_CONTROL_ON 1 +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_LED_CONTROL_OFF 0 + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(bits) \ + vxge_bVALn(bits, 0, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_DAY(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(bits) \ + vxge_bVALn(bits, 8, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MONTH(val) vxge_vBIT(val, 8, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(bits) \ + vxge_bVALn(bits, 16, 16) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_YEAR(val) \ + vxge_vBIT(val, 16, 16) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(bits) \ + vxge_bVALn(bits, 32, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MAJOR vxge_vBIT(val, 32, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(bits) \ + vxge_bVALn(bits, 40, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_MINOR vxge_vBIT(val, 40, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(bits) \ + vxge_bVALn(bits, 48, 16) +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_FW_VER_BUILD vxge_vBIT(val, 48, 16) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(bits) \ + vxge_bVALn(bits, 0, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_DAY(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(bits) \ + vxge_bVALn(bits, 8, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MONTH(val) vxge_vBIT(val, 8, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(bits) \ + vxge_bVALn(bits, 16, 16) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_YEAR(val) \ + vxge_vBIT(val, 16, 16) + +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(bits) \ + vxge_bVALn(bits, 32, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MAJOR vxge_vBIT(val, 32, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(bits) \ + vxge_bVALn(bits, 40, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_MINOR vxge_vBIT(val, 40, 8) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \ + vxge_bVALn(bits, 48, 16) +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16) +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(bits) vxge_bVALn(bits, 0, 8) + +#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\ + vxge_bVALn(bits, 0, 18) + +#define VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(bits) \ + vxge_bVALn(bits, 48, 16) +#define VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(bits) vxge_bVALn(bits, 48, 16) +#define VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(bits) (bits) +#define VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(bits) (bits) +#define VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(bits\ +) vxge_bVALn(bits, 48, 16) +#define VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(bits) vxge_bVALn(bits, 0, 16) +#define VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(bits) \ + vxge_bVALn(bits, 16, 16) +#define VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(bits) \ + vxge_bVALn(bits, 32, 16) +#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(bits) vxge_bVALn(bits, 0, 16) +#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(bits) \ + vxge_bVALn(bits, 16, 16) +#define VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(bits) \ + vxge_bVALn(bits, 32, 16) + +#define VXGE_HW_MRPCIM_DEBUG_STATS0_GET_INI_WR_DROP(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_MRPCIM_DEBUG_STATS0_GET_INI_RD_DROP(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_MRPCIM_DEBUG_STATS1_GET_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED(bits\ +) vxge_bVALn(bits, 32, 32) +#define VXGE_HW_MRPCIM_DEBUG_STATS2_GET_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED(bits\ +) vxge_bVALn(bits, 32, 32) +#define \ +VXGE_HW_MRPCIM_DEBUG_STATS3_GET_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_MRPCIM_DEBUG_STATS4_GET_INI_WR_VPIN_DROP(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_MRPCIM_DEBUG_STATS4_GET_INI_RD_VPIN_DROP(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_GENSTATS_COUNT01_GET_GENSTATS_COUNT1(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_GENSTATS_COUNT01_GET_GENSTATS_COUNT0(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_GENSTATS_COUNT23_GET_GENSTATS_COUNT3(bits) \ + vxge_bVALn(bits, 0, 32) +#define VXGE_HW_GENSTATS_COUNT23_GET_GENSTATS_COUNT2(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_GENSTATS_COUNT4_GET_GENSTATS_COUNT4(bits) \ + vxge_bVALn(bits, 32, 32) +#define VXGE_HW_GENSTATS_COUNT5_GET_GENSTATS_COUNT5(bits) \ + vxge_bVALn(bits, 32, 32) + +#define VXGE_HW_DEBUG_STATS0_GET_RSTDROP_MSG(bits) vxge_bVALn(bits, 0, 32) +#define VXGE_HW_DEBUG_STATS0_GET_RSTDROP_CPL(bits) vxge_bVALn(bits, 32, 32) +#define VXGE_HW_DEBUG_STATS1_GET_RSTDROP_CLIENT0(bits) vxge_bVALn(bits, 0, 32) +#define VXGE_HW_DEBUG_STATS1_GET_RSTDROP_CLIENT1(bits) vxge_bVALn(bits, 32, 32) +#define VXGE_HW_DEBUG_STATS2_GET_RSTDROP_CLIENT2(bits) vxge_bVALn(bits, 0, 32) +#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_PH(bits) vxge_bVALn(bits, 0, 16) +#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_NPH(bits) vxge_bVALn(bits, 16, 16) +#define VXGE_HW_DEBUG_STATS3_GET_VPLANE_DEPL_CPLH(bits) vxge_bVALn(bits, 32, 16) +#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_PD(bits) vxge_bVALn(bits, 0, 16) +#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_NPD(bits) bVAL(bits, 16, 16) +#define VXGE_HW_DEBUG_STATS4_GET_VPLANE_DEPL_CPLD(bits) vxge_bVALn(bits, 32, 16) + +#define VXGE_HW_DBG_STATS_TPA_TX_PATH_GET_TX_PERMITTED_FRMS(bits) \ + vxge_bVALn(bits, 32, 32) + +#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT0_TX_ANY_FRMS(bits) \ + vxge_bVALn(bits, 0, 8) +#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT1_TX_ANY_FRMS(bits) \ + vxge_bVALn(bits, 8, 8) +#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_GET_PORT2_TX_ANY_FRMS(bits) \ + vxge_bVALn(bits, 16, 8) + +#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT0_RX_ANY_FRMS(bits) \ + vxge_bVALn(bits, 0, 8) +#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT1_RX_ANY_FRMS(bits) \ + vxge_bVALn(bits, 8, 8) +#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_GET_PORT2_RX_ANY_FRMS(bits) \ + vxge_bVALn(bits, 16, 8) + +#define VXGE_HW_CONFIG_PRIV_H + +#define VXGE_HW_SWAPPER_INITIAL_VALUE 0x0123456789abcdefULL +#define VXGE_HW_SWAPPER_BYTE_SWAPPED 0xefcdab8967452301ULL +#define VXGE_HW_SWAPPER_BIT_FLIPPED 0x80c4a2e691d5b3f7ULL +#define VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED 0xf7b3d591e6a2c480ULL + +#define VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE 0xFFFFFFFFFFFFFFFFULL +#define VXGE_HW_SWAPPER_READ_BYTE_SWAP_DISABLE 0x0000000000000000ULL + +#define VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE 0xFFFFFFFFFFFFFFFFULL +#define VXGE_HW_SWAPPER_READ_BIT_FLAP_DISABLE 0x0000000000000000ULL + +#define VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE 0xFFFFFFFFFFFFFFFFULL +#define VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_DISABLE 0x0000000000000000ULL + +#define VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE 0xFFFFFFFFFFFFFFFFULL +#define VXGE_HW_SWAPPER_WRITE_BIT_FLAP_DISABLE 0x0000000000000000ULL + +/* + * The registers are memory mapped and are native big-endian byte order. The + * little-endian hosts are handled by enabling hardware byte-swapping for + * register and dma operations. + */ +struct vxge_hw_legacy_reg { + + u8 unused00010[0x00010]; + +/*0x00010*/ u64 toc_swapper_fb; +#define VXGE_HW_TOC_SWAPPER_FB_INITIAL_VAL(val) vxge_vBIT(val, 0, 64) +/*0x00018*/ u64 pifm_rd_swap_en; +#define VXGE_HW_PIFM_RD_SWAP_EN_PIFM_RD_SWAP_EN(val) vxge_vBIT(val, 0, 64) +/*0x00020*/ u64 pifm_rd_flip_en; +#define VXGE_HW_PIFM_RD_FLIP_EN_PIFM_RD_FLIP_EN(val) vxge_vBIT(val, 0, 64) +/*0x00028*/ u64 pifm_wr_swap_en; +#define VXGE_HW_PIFM_WR_SWAP_EN_PIFM_WR_SWAP_EN(val) vxge_vBIT(val, 0, 64) +/*0x00030*/ u64 pifm_wr_flip_en; +#define VXGE_HW_PIFM_WR_FLIP_EN_PIFM_WR_FLIP_EN(val) vxge_vBIT(val, 0, 64) +/*0x00038*/ u64 toc_first_pointer; +#define VXGE_HW_TOC_FIRST_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64) +/*0x00040*/ u64 host_access_en; +#define VXGE_HW_HOST_ACCESS_EN_HOST_ACCESS_EN(val) vxge_vBIT(val, 0, 64) + +} __packed; + +struct vxge_hw_toc_reg { + + u8 unused00050[0x00050]; + +/*0x00050*/ u64 toc_common_pointer; +#define VXGE_HW_TOC_COMMON_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64) +/*0x00058*/ u64 toc_memrepair_pointer; +#define VXGE_HW_TOC_MEMREPAIR_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64) +/*0x00060*/ u64 toc_pcicfgmgmt_pointer[17]; +#define VXGE_HW_TOC_PCICFGMGMT_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64) + u8 unused001e0[0x001e0-0x000e8]; + +/*0x001e0*/ u64 toc_mrpcim_pointer; +#define VXGE_HW_TOC_MRPCIM_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64) +/*0x001e8*/ u64 toc_srpcim_pointer[17]; +#define VXGE_HW_TOC_SRPCIM_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64) + u8 unused00278[0x00278-0x00270]; + +/*0x00278*/ u64 toc_vpmgmt_pointer[17]; +#define VXGE_HW_TOC_VPMGMT_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64) + u8 unused00390[0x00390-0x00300]; + +/*0x00390*/ u64 toc_vpath_pointer[17]; +#define VXGE_HW_TOC_VPATH_POINTER_INITIAL_VAL(val) vxge_vBIT(val, 0, 64) + u8 unused004a0[0x004a0-0x00418]; + +/*0x004a0*/ u64 toc_kdfc; +#define VXGE_HW_TOC_KDFC_INITIAL_OFFSET(val) vxge_vBIT(val, 0, 61) +#define VXGE_HW_TOC_KDFC_INITIAL_BIR(val) vxge_vBIT(val, 61, 3) +/*0x004a8*/ u64 toc_usdc; +#define VXGE_HW_TOC_USDC_INITIAL_OFFSET(val) vxge_vBIT(val, 0, 61) +#define VXGE_HW_TOC_USDC_INITIAL_BIR(val) vxge_vBIT(val, 61, 3) +/*0x004b0*/ u64 toc_kdfc_vpath_stride; +#define VXGE_HW_TOC_KDFC_VPATH_STRIDE_INITIAL_TOC_KDFC_VPATH_STRIDE(val) \ + vxge_vBIT(val, 0, 64) +/*0x004b8*/ u64 toc_kdfc_fifo_stride; +#define VXGE_HW_TOC_KDFC_FIFO_STRIDE_INITIAL_TOC_KDFC_FIFO_STRIDE(val) \ + vxge_vBIT(val, 0, 64) + +} __packed; + +struct vxge_hw_common_reg { + + u8 unused00a00[0x00a00]; + +/*0x00a00*/ u64 prc_status1; +#define VXGE_HW_PRC_STATUS1_PRC_VP_QUIESCENT(n) vxge_mBIT(n) +/*0x00a08*/ u64 rxdcm_reset_in_progress; +#define VXGE_HW_RXDCM_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n) +/*0x00a10*/ u64 replicq_flush_in_progress; +#define VXGE_HW_REPLICQ_FLUSH_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n) +/*0x00a18*/ u64 rxpe_cmds_reset_in_progress; +#define VXGE_HW_RXPE_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n) +/*0x00a20*/ u64 mxp_cmds_reset_in_progress; +#define VXGE_HW_MXP_CMDS_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n) +/*0x00a28*/ u64 noffload_reset_in_progress; +#define VXGE_HW_NOFFLOAD_RESET_IN_PROGRESS_PRC_VP(n) vxge_mBIT(n) +/*0x00a30*/ u64 rd_req_in_progress; +#define VXGE_HW_RD_REQ_IN_PROGRESS_VP(n) vxge_mBIT(n) +/*0x00a38*/ u64 rd_req_outstanding; +#define VXGE_HW_RD_REQ_OUTSTANDING_VP(n) vxge_mBIT(n) +/*0x00a40*/ u64 kdfc_reset_in_progress; +#define VXGE_HW_KDFC_RESET_IN_PROGRESS_NOA_VP(n) vxge_mBIT(n) + u8 unused00b00[0x00b00-0x00a48]; + +/*0x00b00*/ u64 one_cfg_vp; +#define VXGE_HW_ONE_CFG_VP_RDY(n) vxge_mBIT(n) +/*0x00b08*/ u64 one_common; +#define VXGE_HW_ONE_COMMON_PET_VPATH_RESET_IN_PROGRESS(n) vxge_mBIT(n) + u8 unused00b80[0x00b80-0x00b10]; + +/*0x00b80*/ u64 tim_int_en; +#define VXGE_HW_TIM_INT_EN_TIM_VP(n) vxge_mBIT(n) +/*0x00b88*/ u64 tim_set_int_en; +#define VXGE_HW_TIM_SET_INT_EN_VP(n) vxge_mBIT(n) +/*0x00b90*/ u64 tim_clr_int_en; +#define VXGE_HW_TIM_CLR_INT_EN_VP(n) vxge_mBIT(n) +/*0x00b98*/ u64 tim_mask_int_during_reset; +#define VXGE_HW_TIM_MASK_INT_DURING_RESET_VPATH(n) vxge_mBIT(n) +/*0x00ba0*/ u64 tim_reset_in_progress; +#define VXGE_HW_TIM_RESET_IN_PROGRESS_TIM_VPATH(n) vxge_mBIT(n) +/*0x00ba8*/ u64 tim_outstanding_bmap; +#define VXGE_HW_TIM_OUTSTANDING_BMAP_TIM_VPATH(n) vxge_mBIT(n) + u8 unused00c00[0x00c00-0x00bb0]; + +/*0x00c00*/ u64 msg_reset_in_progress; +#define VXGE_HW_MSG_RESET_IN_PROGRESS_MSG_COMPOSITE(val) vxge_vBIT(val, 0, 17) +/*0x00c08*/ u64 msg_mxp_mr_ready; +#define VXGE_HW_MSG_MXP_MR_READY_MP_BOOTED(n) vxge_mBIT(n) +/*0x00c10*/ u64 msg_uxp_mr_ready; +#define VXGE_HW_MSG_UXP_MR_READY_UP_BOOTED(n) vxge_mBIT(n) +/*0x00c18*/ u64 msg_dmq_noni_rtl_prefetch; +#define VXGE_HW_MSG_DMQ_NONI_RTL_PREFETCH_BYPASS_ENABLE(n) vxge_mBIT(n) +/*0x00c20*/ u64 msg_umq_rtl_bwr; +#define VXGE_HW_MSG_UMQ_RTL_BWR_PREFETCH_DISABLE(n) vxge_mBIT(n) + u8 unused00d00[0x00d00-0x00c28]; + +/*0x00d00*/ u64 cmn_rsthdlr_cfg0; +#define VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(val) vxge_vBIT(val, 0, 17) +/*0x00d08*/ u64 cmn_rsthdlr_cfg1; +#define VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(val) vxge_vBIT(val, 0, 17) +/*0x00d10*/ u64 cmn_rsthdlr_cfg2; +#define VXGE_HW_CMN_RSTHDLR_CFG2_SW_RESET_FIFO0(val) vxge_vBIT(val, 0, 17) +/*0x00d18*/ u64 cmn_rsthdlr_cfg3; +#define VXGE_HW_CMN_RSTHDLR_CFG3_SW_RESET_FIFO1(val) vxge_vBIT(val, 0, 17) +/*0x00d20*/ u64 cmn_rsthdlr_cfg4; +#define VXGE_HW_CMN_RSTHDLR_CFG4_SW_RESET_FIFO2(val) vxge_vBIT(val, 0, 17) + u8 unused00d40[0x00d40-0x00d28]; + +/*0x00d40*/ u64 cmn_rsthdlr_cfg8; +#define VXGE_HW_CMN_RSTHDLR_CFG8_INCR_VPATH_INST_NUM(val) vxge_vBIT(val, 0, 17) +/*0x00d48*/ u64 stats_cfg0; +#define VXGE_HW_STATS_CFG0_STATS_ENABLE(val) vxge_vBIT(val, 0, 17) + u8 unused00da8[0x00da8-0x00d50]; + +/*0x00da8*/ u64 clear_msix_mask_vect[4]; +#define VXGE_HW_CLEAR_MSIX_MASK_VECT_CLEAR_MSIX_MASK_VECT(val) \ + vxge_vBIT(val, 0, 17) +/*0x00dc8*/ u64 set_msix_mask_vect[4]; +#define VXGE_HW_SET_MSIX_MASK_VECT_SET_MSIX_MASK_VECT(val) vxge_vBIT(val, 0, 17) +/*0x00de8*/ u64 clear_msix_mask_all_vect; +#define VXGE_HW_CLEAR_MSIX_MASK_ALL_VECT_CLEAR_MSIX_MASK_ALL_VECT(val) \ + vxge_vBIT(val, 0, 17) +/*0x00df0*/ u64 set_msix_mask_all_vect; +#define VXGE_HW_SET_MSIX_MASK_ALL_VECT_SET_MSIX_MASK_ALL_VECT(val) \ + vxge_vBIT(val, 0, 17) +/*0x00df8*/ u64 mask_vector[4]; +#define VXGE_HW_MASK_VECTOR_MASK_VECTOR(val) vxge_vBIT(val, 0, 17) +/*0x00e18*/ u64 msix_pending_vector[4]; +#define VXGE_HW_MSIX_PENDING_VECTOR_MSIX_PENDING_VECTOR(val) \ + vxge_vBIT(val, 0, 17) +/*0x00e38*/ u64 clr_msix_one_shot_vec[4]; +#define VXGE_HW_CLR_MSIX_ONE_SHOT_VEC_CLR_MSIX_ONE_SHOT_VEC(val) \ + vxge_vBIT(val, 0, 17) +/*0x00e58*/ u64 titan_asic_id; +#define VXGE_HW_TITAN_ASIC_ID_INITIAL_DEVICE_ID(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_TITAN_ASIC_ID_INITIAL_MAJOR_REVISION(val) vxge_vBIT(val, 48, 8) +#define VXGE_HW_TITAN_ASIC_ID_INITIAL_MINOR_REVISION(val) vxge_vBIT(val, 56, 8) +/*0x00e60*/ u64 titan_general_int_status; +#define VXGE_HW_TITAN_GENERAL_INT_STATUS_MRPCIM_ALARM_INT vxge_mBIT(0) +#define VXGE_HW_TITAN_GENERAL_INT_STATUS_SRPCIM_ALARM_INT vxge_mBIT(1) +#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT vxge_mBIT(2) +#define VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(val) \ + vxge_vBIT(val, 3, 17) + u8 unused00e70[0x00e70-0x00e68]; + +/*0x00e70*/ u64 titan_mask_all_int; +#define VXGE_HW_TITAN_MASK_ALL_INT_ALARM vxge_mBIT(7) +#define VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC vxge_mBIT(15) + u8 unused00e80[0x00e80-0x00e78]; + +/*0x00e80*/ u64 tim_int_status0; +#define VXGE_HW_TIM_INT_STATUS0_TIM_INT_STATUS0(val) vxge_vBIT(val, 0, 64) +/*0x00e88*/ u64 tim_int_mask0; +#define VXGE_HW_TIM_INT_MASK0_TIM_INT_MASK0(val) vxge_vBIT(val, 0, 64) +/*0x00e90*/ u64 tim_int_status1; +#define VXGE_HW_TIM_INT_STATUS1_TIM_INT_STATUS1(val) vxge_vBIT(val, 0, 4) +/*0x00e98*/ u64 tim_int_mask1; +#define VXGE_HW_TIM_INT_MASK1_TIM_INT_MASK1(val) vxge_vBIT(val, 0, 4) +/*0x00ea0*/ u64 rti_int_status; +#define VXGE_HW_RTI_INT_STATUS_RTI_INT_STATUS(val) vxge_vBIT(val, 0, 17) +/*0x00ea8*/ u64 rti_int_mask; +#define VXGE_HW_RTI_INT_MASK_RTI_INT_MASK(val) vxge_vBIT(val, 0, 17) +/*0x00eb0*/ u64 adapter_status; +#define VXGE_HW_ADAPTER_STATUS_RTDMA_RTDMA_READY vxge_mBIT(0) +#define VXGE_HW_ADAPTER_STATUS_WRDMA_WRDMA_READY vxge_mBIT(1) +#define VXGE_HW_ADAPTER_STATUS_KDFC_KDFC_READY vxge_mBIT(2) +#define VXGE_HW_ADAPTER_STATUS_TPA_TMAC_BUF_EMPTY vxge_mBIT(3) +#define VXGE_HW_ADAPTER_STATUS_RDCTL_PIC_QUIESCENT vxge_mBIT(4) +#define VXGE_HW_ADAPTER_STATUS_XGMAC_NETWORK_FAULT vxge_mBIT(5) +#define VXGE_HW_ADAPTER_STATUS_ROCRC_OFFLOAD_QUIESCENT vxge_mBIT(6) +#define VXGE_HW_ADAPTER_STATUS_G3IF_FB_G3IF_FB_GDDR3_READY vxge_mBIT(7) +#define VXGE_HW_ADAPTER_STATUS_G3IF_CM_G3IF_CM_GDDR3_READY vxge_mBIT(8) +#define VXGE_HW_ADAPTER_STATUS_RIC_RIC_RUNNING vxge_mBIT(9) +#define VXGE_HW_ADAPTER_STATUS_CMG_C_PLL_IN_LOCK vxge_mBIT(10) +#define VXGE_HW_ADAPTER_STATUS_XGMAC_X_PLL_IN_LOCK vxge_mBIT(11) +#define VXGE_HW_ADAPTER_STATUS_FBIF_M_PLL_IN_LOCK vxge_mBIT(12) +#define VXGE_HW_ADAPTER_STATUS_PCC_PCC_IDLE(val) vxge_vBIT(val, 24, 8) +#define VXGE_HW_ADAPTER_STATUS_ROCRC_RC_PRC_QUIESCENT(val) vxge_vBIT(val, 44, 8) +/*0x00eb8*/ u64 gen_ctrl; +#define VXGE_HW_GEN_CTRL_SPI_MRPCIM_WR_DIS vxge_mBIT(0) +#define VXGE_HW_GEN_CTRL_SPI_MRPCIM_RD_DIS vxge_mBIT(1) +#define VXGE_HW_GEN_CTRL_SPI_SRPCIM_WR_DIS vxge_mBIT(2) +#define VXGE_HW_GEN_CTRL_SPI_SRPCIM_RD_DIS vxge_mBIT(3) +#define VXGE_HW_GEN_CTRL_SPI_DEBUG_DIS vxge_mBIT(4) +#define VXGE_HW_GEN_CTRL_SPI_APP_LTSSM_TIMER_DIS vxge_mBIT(5) +#define VXGE_HW_GEN_CTRL_SPI_NOT_USED(val) vxge_vBIT(val, 6, 4) + u8 unused00ed0[0x00ed0-0x00ec0]; + +/*0x00ed0*/ u64 adapter_ready; +#define VXGE_HW_ADAPTER_READY_ADAPTER_READY vxge_mBIT(63) +/*0x00ed8*/ u64 outstanding_read; +#define VXGE_HW_OUTSTANDING_READ_OUTSTANDING_READ(val) vxge_vBIT(val, 0, 17) +/*0x00ee0*/ u64 vpath_rst_in_prog; +#define VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(val) vxge_vBIT(val, 0, 17) +/*0x00ee8*/ u64 vpath_reg_modified; +#define VXGE_HW_VPATH_REG_MODIFIED_VPATH_REG_MODIFIED(val) vxge_vBIT(val, 0, 17) + u8 unused00fc0[0x00fc0-0x00ef0]; + +/*0x00fc0*/ u64 cp_reset_in_progress; +#define VXGE_HW_CP_RESET_IN_PROGRESS_CP_VPATH(n) vxge_mBIT(n) + u8 unused01080[0x01080-0x00fc8]; + +/*0x01080*/ u64 xgmac_ready; +#define VXGE_HW_XGMAC_READY_XMACJ_READY(val) vxge_vBIT(val, 0, 17) + u8 unused010c0[0x010c0-0x01088]; + +/*0x010c0*/ u64 fbif_ready; +#define VXGE_HW_FBIF_READY_FAU_READY(val) vxge_vBIT(val, 0, 17) + u8 unused01100[0x01100-0x010c8]; + +/*0x01100*/ u64 vplane_assignments; +#define VXGE_HW_VPLANE_ASSIGNMENTS_VPLANE_ASSIGNMENTS(val) vxge_vBIT(val, 3, 5) +/*0x01108*/ u64 vpath_assignments; +#define VXGE_HW_VPATH_ASSIGNMENTS_VPATH_ASSIGNMENTS(val) vxge_vBIT(val, 0, 17) +/*0x01110*/ u64 resource_assignments; +#define VXGE_HW_RESOURCE_ASSIGNMENTS_RESOURCE_ASSIGNMENTS(val) \ + vxge_vBIT(val, 0, 17) +/*0x01118*/ u64 host_type_assignments; +#define VXGE_HW_HOST_TYPE_ASSIGNMENTS_HOST_TYPE_ASSIGNMENTS(val) \ + vxge_vBIT(val, 5, 3) + u8 unused01128[0x01128-0x01120]; + +/*0x01128*/ u64 max_resource_assignments; +#define VXGE_HW_MAX_RESOURCE_ASSIGNMENTS_PCI_MAX_VPLANE(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_MAX_RESOURCE_ASSIGNMENTS_PCI_MAX_VPATHS(val) \ + vxge_vBIT(val, 11, 5) +/*0x01130*/ u64 pf_vpath_assignments; +#define VXGE_HW_PF_VPATH_ASSIGNMENTS_PF_VPATH_ASSIGNMENTS(val) \ + vxge_vBIT(val, 0, 17) + u8 unused01200[0x01200-0x01138]; + +/*0x01200*/ u64 rts_access_icmp; +#define VXGE_HW_RTS_ACCESS_ICMP_EN(val) vxge_vBIT(val, 0, 17) +/*0x01208*/ u64 rts_access_tcpsyn; +#define VXGE_HW_RTS_ACCESS_TCPSYN_EN(val) vxge_vBIT(val, 0, 17) +/*0x01210*/ u64 rts_access_zl4pyld; +#define VXGE_HW_RTS_ACCESS_ZL4PYLD_EN(val) vxge_vBIT(val, 0, 17) +/*0x01218*/ u64 rts_access_l4prtcl_tcp; +#define VXGE_HW_RTS_ACCESS_L4PRTCL_TCP_EN(val) vxge_vBIT(val, 0, 17) +/*0x01220*/ u64 rts_access_l4prtcl_udp; +#define VXGE_HW_RTS_ACCESS_L4PRTCL_UDP_EN(val) vxge_vBIT(val, 0, 17) +/*0x01228*/ u64 rts_access_l4prtcl_flex; +#define VXGE_HW_RTS_ACCESS_L4PRTCL_FLEX_EN(val) vxge_vBIT(val, 0, 17) +/*0x01230*/ u64 rts_access_ipfrag; +#define VXGE_HW_RTS_ACCESS_IPFRAG_EN(val) vxge_vBIT(val, 0, 17) + +} __packed; + +struct vxge_hw_memrepair_reg { + u64 unused1; + u64 unused2; +} __packed; + +struct vxge_hw_pcicfgmgmt_reg { + +/*0x00000*/ u64 resource_no; +#define VXGE_HW_RESOURCE_NO_PFN_OR_VF BIT(3) +/*0x00008*/ u64 bargrp_pf_or_vf_bar0_mask; +#define VXGE_HW_BARGRP_PF_OR_VF_BAR0_MASK_BARGRP_PF_OR_VF_BAR0_MASK(val) \ + vxge_vBIT(val, 2, 6) +/*0x00010*/ u64 bargrp_pf_or_vf_bar1_mask; +#define VXGE_HW_BARGRP_PF_OR_VF_BAR1_MASK_BARGRP_PF_OR_VF_BAR1_MASK(val) \ + vxge_vBIT(val, 2, 6) +/*0x00018*/ u64 bargrp_pf_or_vf_bar2_mask; +#define VXGE_HW_BARGRP_PF_OR_VF_BAR2_MASK_BARGRP_PF_OR_VF_BAR2_MASK(val) \ + vxge_vBIT(val, 2, 6) +/*0x00020*/ u64 msixgrp_no; +#define VXGE_HW_MSIXGRP_NO_TABLE_SIZE(val) vxge_vBIT(val, 5, 11) + +} __packed; + +struct vxge_hw_mrpcim_reg { +/*0x00000*/ u64 g3fbct_int_status; +#define VXGE_HW_G3FBCT_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0) +/*0x00008*/ u64 g3fbct_int_mask; +/*0x00010*/ u64 g3fbct_err_reg; +#define VXGE_HW_G3FBCT_ERR_REG_G3IF_SM_ERR vxge_mBIT(4) +#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_DECC vxge_mBIT(5) +#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_U_DECC vxge_mBIT(6) +#define VXGE_HW_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_DECC vxge_mBIT(7) +#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_SECC vxge_mBIT(29) +#define VXGE_HW_G3FBCT_ERR_REG_G3IF_GDDR3_U_SECC vxge_mBIT(30) +#define VXGE_HW_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_SECC vxge_mBIT(31) +/*0x00018*/ u64 g3fbct_err_mask; +/*0x00020*/ u64 g3fbct_err_alarm; + + u8 unused00a00[0x00a00-0x00028]; + +/*0x00a00*/ u64 wrdma_int_status; +#define VXGE_HW_WRDMA_INT_STATUS_RC_ALARM_RC_INT vxge_mBIT(0) +#define VXGE_HW_WRDMA_INT_STATUS_RXDRM_SM_ERR_RXDRM_INT vxge_mBIT(1) +#define VXGE_HW_WRDMA_INT_STATUS_RXDCM_SM_ERR_RXDCM_SM_INT vxge_mBIT(2) +#define VXGE_HW_WRDMA_INT_STATUS_RXDWM_SM_ERR_RXDWM_INT vxge_mBIT(3) +#define VXGE_HW_WRDMA_INT_STATUS_RDA_ERR_RDA_INT vxge_mBIT(6) +#define VXGE_HW_WRDMA_INT_STATUS_RDA_ECC_DB_RDA_ECC_DB_INT vxge_mBIT(8) +#define VXGE_HW_WRDMA_INT_STATUS_RDA_ECC_SG_RDA_ECC_SG_INT vxge_mBIT(9) +#define VXGE_HW_WRDMA_INT_STATUS_FRF_ALARM_FRF_INT vxge_mBIT(12) +#define VXGE_HW_WRDMA_INT_STATUS_ROCRC_ALARM_ROCRC_INT vxge_mBIT(13) +#define VXGE_HW_WRDMA_INT_STATUS_WDE0_ALARM_WDE0_INT vxge_mBIT(14) +#define VXGE_HW_WRDMA_INT_STATUS_WDE1_ALARM_WDE1_INT vxge_mBIT(15) +#define VXGE_HW_WRDMA_INT_STATUS_WDE2_ALARM_WDE2_INT vxge_mBIT(16) +#define VXGE_HW_WRDMA_INT_STATUS_WDE3_ALARM_WDE3_INT vxge_mBIT(17) +/*0x00a08*/ u64 wrdma_int_mask; +/*0x00a10*/ u64 rc_alarm_reg; +#define VXGE_HW_RC_ALARM_REG_FTC_SM_ERR vxge_mBIT(0) +#define VXGE_HW_RC_ALARM_REG_FTC_SM_PHASE_ERR vxge_mBIT(1) +#define VXGE_HW_RC_ALARM_REG_BTDWM_SM_ERR vxge_mBIT(2) +#define VXGE_HW_RC_ALARM_REG_BTC_SM_ERR vxge_mBIT(3) +#define VXGE_HW_RC_ALARM_REG_BTDCM_SM_ERR vxge_mBIT(4) +#define VXGE_HW_RC_ALARM_REG_BTDRM_SM_ERR vxge_mBIT(5) +#define VXGE_HW_RC_ALARM_REG_RMM_RXD_RC_ECC_DB_ERR vxge_mBIT(6) +#define VXGE_HW_RC_ALARM_REG_RMM_RXD_RC_ECC_SG_ERR vxge_mBIT(7) +#define VXGE_HW_RC_ALARM_REG_RHS_RXD_RHS_ECC_DB_ERR vxge_mBIT(8) +#define VXGE_HW_RC_ALARM_REG_RHS_RXD_RHS_ECC_SG_ERR vxge_mBIT(9) +#define VXGE_HW_RC_ALARM_REG_RMM_SM_ERR vxge_mBIT(10) +#define VXGE_HW_RC_ALARM_REG_BTC_VPATH_MISMATCH_ERR vxge_mBIT(12) +/*0x00a18*/ u64 rc_alarm_mask; +/*0x00a20*/ u64 rc_alarm_alarm; +/*0x00a28*/ u64 rxdrm_sm_err_reg; +#define VXGE_HW_RXDRM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n) +/*0x00a30*/ u64 rxdrm_sm_err_mask; +/*0x00a38*/ u64 rxdrm_sm_err_alarm; +/*0x00a40*/ u64 rxdcm_sm_err_reg; +#define VXGE_HW_RXDCM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n) +/*0x00a48*/ u64 rxdcm_sm_err_mask; +/*0x00a50*/ u64 rxdcm_sm_err_alarm; +/*0x00a58*/ u64 rxdwm_sm_err_reg; +#define VXGE_HW_RXDWM_SM_ERR_REG_PRC_VP(n) vxge_mBIT(n) +/*0x00a60*/ u64 rxdwm_sm_err_mask; +/*0x00a68*/ u64 rxdwm_sm_err_alarm; +/*0x00a70*/ u64 rda_err_reg; +#define VXGE_HW_RDA_ERR_REG_RDA_SM0_ERR_ALARM vxge_mBIT(0) +#define VXGE_HW_RDA_ERR_REG_RDA_MISC_ERR vxge_mBIT(1) +#define VXGE_HW_RDA_ERR_REG_RDA_PCIX_ERR vxge_mBIT(2) +#define VXGE_HW_RDA_ERR_REG_RDA_RXD_ECC_DB_ERR vxge_mBIT(3) +#define VXGE_HW_RDA_ERR_REG_RDA_FRM_ECC_DB_ERR vxge_mBIT(4) +#define VXGE_HW_RDA_ERR_REG_RDA_UQM_ECC_DB_ERR vxge_mBIT(5) +#define VXGE_HW_RDA_ERR_REG_RDA_IMM_ECC_DB_ERR vxge_mBIT(6) +#define VXGE_HW_RDA_ERR_REG_RDA_TIM_ECC_DB_ERR vxge_mBIT(7) +/*0x00a78*/ u64 rda_err_mask; +/*0x00a80*/ u64 rda_err_alarm; +/*0x00a88*/ u64 rda_ecc_db_reg; +#define VXGE_HW_RDA_ECC_DB_REG_RDA_RXD_ERR(n) vxge_mBIT(n) +/*0x00a90*/ u64 rda_ecc_db_mask; +/*0x00a98*/ u64 rda_ecc_db_alarm; +/*0x00aa0*/ u64 rda_ecc_sg_reg; +#define VXGE_HW_RDA_ECC_SG_REG_RDA_RXD_ERR(n) vxge_mBIT(n) +/*0x00aa8*/ u64 rda_ecc_sg_mask; +/*0x00ab0*/ u64 rda_ecc_sg_alarm; +/*0x00ab8*/ u64 rqa_err_reg; +#define VXGE_HW_RQA_ERR_REG_RQA_SM_ERR_ALARM vxge_mBIT(0) +/*0x00ac0*/ u64 rqa_err_mask; +/*0x00ac8*/ u64 rqa_err_alarm; +/*0x00ad0*/ u64 frf_alarm_reg; +#define VXGE_HW_FRF_ALARM_REG_PRC_VP_FRF_SM_ERR(n) vxge_mBIT(n) +/*0x00ad8*/ u64 frf_alarm_mask; +/*0x00ae0*/ u64 frf_alarm_alarm; +/*0x00ae8*/ u64 rocrc_alarm_reg; +#define VXGE_HW_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_DB vxge_mBIT(0) +#define VXGE_HW_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_SG vxge_mBIT(1) +#define VXGE_HW_ROCRC_ALARM_REG_NOA_NMA_SM_ERR vxge_mBIT(2) +#define VXGE_HW_ROCRC_ALARM_REG_NOA_IMMM_ECC_DB vxge_mBIT(3) +#define VXGE_HW_ROCRC_ALARM_REG_NOA_IMMM_ECC_SG vxge_mBIT(4) +#define VXGE_HW_ROCRC_ALARM_REG_UDQ_UMQM_ECC_DB vxge_mBIT(5) +#define VXGE_HW_ROCRC_ALARM_REG_UDQ_UMQM_ECC_SG vxge_mBIT(6) +#define VXGE_HW_ROCRC_ALARM_REG_NOA_RCBM_ECC_DB vxge_mBIT(11) +#define VXGE_HW_ROCRC_ALARM_REG_NOA_RCBM_ECC_SG vxge_mBIT(12) +#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_EGB_RSVD_ERR vxge_mBIT(13) +#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_EGB_OWN_ERR vxge_mBIT(14) +#define VXGE_HW_ROCRC_ALARM_REG_QCQ_MULTI_BYP_OWN_ERR vxge_mBIT(15) +#define VXGE_HW_ROCRC_ALARM_REG_QCQ_OWN_NOT_ASSIGNED_ERR vxge_mBIT(16) +#define VXGE_HW_ROCRC_ALARM_REG_QCQ_OWN_RSVD_SYNC_ERR vxge_mBIT(17) +#define VXGE_HW_ROCRC_ALARM_REG_QCQ_LOST_EGB_ERR vxge_mBIT(18) +#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ0_OVERFLOW vxge_mBIT(19) +#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ1_OVERFLOW vxge_mBIT(20) +#define VXGE_HW_ROCRC_ALARM_REG_RCQ_BYPQ2_OVERFLOW vxge_mBIT(21) +#define VXGE_HW_ROCRC_ALARM_REG_NOA_WCT_CMD_FIFO_ERR vxge_mBIT(22) +/*0x00af0*/ u64 rocrc_alarm_mask; +/*0x00af8*/ u64 rocrc_alarm_alarm; +/*0x00b00*/ u64 wde0_alarm_reg; +#define VXGE_HW_WDE0_ALARM_REG_WDE0_DCC_SM_ERR vxge_mBIT(0) +#define VXGE_HW_WDE0_ALARM_REG_WDE0_PRM_SM_ERR vxge_mBIT(1) +#define VXGE_HW_WDE0_ALARM_REG_WDE0_CP_SM_ERR vxge_mBIT(2) +#define VXGE_HW_WDE0_ALARM_REG_WDE0_CP_CMD_ERR vxge_mBIT(3) +#define VXGE_HW_WDE0_ALARM_REG_WDE0_PCR_SM_ERR vxge_mBIT(4) +/*0x00b08*/ u64 wde0_alarm_mask; +/*0x00b10*/ u64 wde0_alarm_alarm; +/*0x00b18*/ u64 wde1_alarm_reg; +#define VXGE_HW_WDE1_ALARM_REG_WDE1_DCC_SM_ERR vxge_mBIT(0) +#define VXGE_HW_WDE1_ALARM_REG_WDE1_PRM_SM_ERR vxge_mBIT(1) +#define VXGE_HW_WDE1_ALARM_REG_WDE1_CP_SM_ERR vxge_mBIT(2) +#define VXGE_HW_WDE1_ALARM_REG_WDE1_CP_CMD_ERR vxge_mBIT(3) +#define VXGE_HW_WDE1_ALARM_REG_WDE1_PCR_SM_ERR vxge_mBIT(4) +/*0x00b20*/ u64 wde1_alarm_mask; +/*0x00b28*/ u64 wde1_alarm_alarm; +/*0x00b30*/ u64 wde2_alarm_reg; +#define VXGE_HW_WDE2_ALARM_REG_WDE2_DCC_SM_ERR vxge_mBIT(0) +#define VXGE_HW_WDE2_ALARM_REG_WDE2_PRM_SM_ERR vxge_mBIT(1) +#define VXGE_HW_WDE2_ALARM_REG_WDE2_CP_SM_ERR vxge_mBIT(2) +#define VXGE_HW_WDE2_ALARM_REG_WDE2_CP_CMD_ERR vxge_mBIT(3) +#define VXGE_HW_WDE2_ALARM_REG_WDE2_PCR_SM_ERR vxge_mBIT(4) +/*0x00b38*/ u64 wde2_alarm_mask; +/*0x00b40*/ u64 wde2_alarm_alarm; +/*0x00b48*/ u64 wde3_alarm_reg; +#define VXGE_HW_WDE3_ALARM_REG_WDE3_DCC_SM_ERR vxge_mBIT(0) +#define VXGE_HW_WDE3_ALARM_REG_WDE3_PRM_SM_ERR vxge_mBIT(1) +#define VXGE_HW_WDE3_ALARM_REG_WDE3_CP_SM_ERR vxge_mBIT(2) +#define VXGE_HW_WDE3_ALARM_REG_WDE3_CP_CMD_ERR vxge_mBIT(3) +#define VXGE_HW_WDE3_ALARM_REG_WDE3_PCR_SM_ERR vxge_mBIT(4) +/*0x00b50*/ u64 wde3_alarm_mask; +/*0x00b58*/ u64 wde3_alarm_alarm; + + u8 unused00be8[0x00be8-0x00b60]; + +/*0x00be8*/ u64 rx_w_round_robin_0; +#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_0(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_1(val) vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_2(val) vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_3(val) vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_4(val) vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_5(val) vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_6(val) vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_7(val) vxge_vBIT(val, 59, 5) +/*0x00bf0*/ u64 rx_w_round_robin_1; +#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_8(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_9(val) vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_10(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_11(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_12(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_13(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_14(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_1_RX_W_PRIORITY_SS_15(val) \ + vxge_vBIT(val, 59, 5) +/*0x00bf8*/ u64 rx_w_round_robin_2; +#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_16(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_17(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_18(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_19(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_20(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_21(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_22(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_2_RX_W_PRIORITY_SS_23(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c00*/ u64 rx_w_round_robin_3; +#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_24(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_25(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_26(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_27(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_28(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_29(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_30(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_3_RX_W_PRIORITY_SS_31(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c08*/ u64 rx_w_round_robin_4; +#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_32(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_33(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_34(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_35(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_36(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_37(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_38(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_4_RX_W_PRIORITY_SS_39(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c10*/ u64 rx_w_round_robin_5; +#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_40(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_41(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_42(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_43(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_44(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_45(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_46(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_5_RX_W_PRIORITY_SS_47(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c18*/ u64 rx_w_round_robin_6; +#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_48(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_49(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_50(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_51(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_52(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_53(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_54(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_6_RX_W_PRIORITY_SS_55(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c20*/ u64 rx_w_round_robin_7; +#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_56(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_57(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_58(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_59(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_60(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_61(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_62(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_7_RX_W_PRIORITY_SS_63(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c28*/ u64 rx_w_round_robin_8; +#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_64(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_65(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_66(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_67(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_68(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_69(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_70(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_8_RX_W_PRIORITY_SS_71(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c30*/ u64 rx_w_round_robin_9; +#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_72(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_73(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_74(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_75(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_76(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_77(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_78(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_9_RX_W_PRIORITY_SS_79(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c38*/ u64 rx_w_round_robin_10; +#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_80(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_81(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_82(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_83(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_84(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_85(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_86(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_10_RX_W_PRIORITY_SS_87(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c40*/ u64 rx_w_round_robin_11; +#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_88(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_89(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_90(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_91(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_92(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_93(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_94(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_11_RX_W_PRIORITY_SS_95(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c48*/ u64 rx_w_round_robin_12; +#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_96(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_97(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_98(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_99(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_100(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_101(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_102(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_12_RX_W_PRIORITY_SS_103(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c50*/ u64 rx_w_round_robin_13; +#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_104(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_105(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_106(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_107(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_108(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_109(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_110(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_13_RX_W_PRIORITY_SS_111(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c58*/ u64 rx_w_round_robin_14; +#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_112(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_113(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_114(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_115(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_116(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_117(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_118(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_14_RX_W_PRIORITY_SS_119(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c60*/ u64 rx_w_round_robin_15; +#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_120(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_121(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_122(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_123(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_124(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_125(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_126(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_15_RX_W_PRIORITY_SS_127(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c68*/ u64 rx_w_round_robin_16; +#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_128(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_129(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_130(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_131(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_132(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_133(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_134(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_16_RX_W_PRIORITY_SS_135(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c70*/ u64 rx_w_round_robin_17; +#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_136(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_137(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_138(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_139(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_140(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_141(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_142(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_17_RX_W_PRIORITY_SS_143(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c78*/ u64 rx_w_round_robin_18; +#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_144(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_145(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_146(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_147(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_148(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_149(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_150(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_18_RX_W_PRIORITY_SS_151(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c80*/ u64 rx_w_round_robin_19; +#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_152(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_153(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_154(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_155(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_156(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_157(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_158(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_19_RX_W_PRIORITY_SS_159(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c88*/ u64 rx_w_round_robin_20; +#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_160(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_161(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_162(val) \ + vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_163(val) \ + vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_164(val) \ + vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_165(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_166(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_20_RX_W_PRIORITY_SS_167(val) \ + vxge_vBIT(val, 59, 5) +/*0x00c90*/ u64 rx_w_round_robin_21; +#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_168(val) \ + vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_169(val) \ + vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_W_ROUND_ROBIN_21_RX_W_PRIORITY_SS_170(val) \ + vxge_vBIT(val, 19, 5) + +#define VXGE_HW_WRR_RING_SERVICE_STATES 171 +#define VXGE_HW_WRR_RING_COUNT 22 + +/*0x00c98*/ u64 rx_queue_priority_0; +#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_0(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_1(val) vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_2(val) vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_3(val) vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_4(val) vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_5(val) vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_6(val) vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_7(val) vxge_vBIT(val, 59, 5) +/*0x00ca0*/ u64 rx_queue_priority_1; +#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_8(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_9(val) vxge_vBIT(val, 11, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_10(val) vxge_vBIT(val, 19, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_11(val) vxge_vBIT(val, 27, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_12(val) vxge_vBIT(val, 35, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_13(val) vxge_vBIT(val, 43, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_14(val) vxge_vBIT(val, 51, 5) +#define VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_15(val) vxge_vBIT(val, 59, 5) +/*0x00ca8*/ u64 rx_queue_priority_2; +#define VXGE_HW_RX_QUEUE_PRIORITY_2_RX_Q_NUMBER_16(val) vxge_vBIT(val, 3, 5) + u8 unused00cc8[0x00cc8-0x00cb0]; + +/*0x00cc8*/ u64 replication_queue_priority; +#define VXGE_HW_REPLICATION_QUEUE_PRIORITY_REPLICATION_QUEUE_PRIORITY(val) \ + vxge_vBIT(val, 59, 5) +/*0x00cd0*/ u64 rx_queue_select; +#define VXGE_HW_RX_QUEUE_SELECT_NUMBER(n) vxge_mBIT(n) +#define VXGE_HW_RX_QUEUE_SELECT_ENABLE_CODE vxge_mBIT(15) +#define VXGE_HW_RX_QUEUE_SELECT_ENABLE_HIERARCHICAL_PRTY vxge_mBIT(23) +/*0x00cd8*/ u64 rqa_vpbp_ctrl; +#define VXGE_HW_RQA_VPBP_CTRL_WR_XON_DIS vxge_mBIT(15) +#define VXGE_HW_RQA_VPBP_CTRL_ROCRC_DIS vxge_mBIT(23) +#define VXGE_HW_RQA_VPBP_CTRL_TXPE_DIS vxge_mBIT(31) +/*0x00ce0*/ u64 rx_multi_cast_ctrl; +#define VXGE_HW_RX_MULTI_CAST_CTRL_TIME_OUT_DIS vxge_mBIT(0) +#define VXGE_HW_RX_MULTI_CAST_CTRL_FRM_DROP_DIS vxge_mBIT(1) +#define VXGE_HW_RX_MULTI_CAST_CTRL_NO_RXD_TIME_OUT_CNT(val) \ + vxge_vBIT(val, 2, 30) +#define VXGE_HW_RX_MULTI_CAST_CTRL_TIME_OUT_CNT(val) vxge_vBIT(val, 32, 32) +/*0x00ce8*/ u64 wde_prm_ctrl; +#define VXGE_HW_WDE_PRM_CTRL_SPAV_THRESHOLD(val) vxge_vBIT(val, 2, 10) +#define VXGE_HW_WDE_PRM_CTRL_SPLIT_THRESHOLD(val) vxge_vBIT(val, 18, 14) +#define VXGE_HW_WDE_PRM_CTRL_SPLIT_ON_1ST_ROW vxge_mBIT(32) +#define VXGE_HW_WDE_PRM_CTRL_SPLIT_ON_ROW_BNDRY vxge_mBIT(33) +#define VXGE_HW_WDE_PRM_CTRL_FB_ROW_SIZE(val) vxge_vBIT(val, 46, 2) +/*0x00cf0*/ u64 noa_ctrl; +#define VXGE_HW_NOA_CTRL_FRM_PRTY_QUOTA(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_NOA_CTRL_NON_FRM_PRTY_QUOTA(val) vxge_vBIT(val, 11, 5) +#define VXGE_HW_NOA_CTRL_IGNORE_KDFC_IF_STATUS vxge_mBIT(16) +#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE0(val) vxge_vBIT(val, 37, 4) +#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE1(val) vxge_vBIT(val, 45, 4) +#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE2(val) vxge_vBIT(val, 53, 4) +#define VXGE_HW_NOA_CTRL_MAX_JOB_CNT_FOR_WDE3(val) vxge_vBIT(val, 60, 4) +/*0x00cf8*/ u64 phase_cfg; +#define VXGE_HW_PHASE_CFG_QCC_WR_PHASE_EN vxge_mBIT(0) +#define VXGE_HW_PHASE_CFG_QCC_RD_PHASE_EN vxge_mBIT(3) +#define VXGE_HW_PHASE_CFG_IMMM_WR_PHASE_EN vxge_mBIT(7) +#define VXGE_HW_PHASE_CFG_IMMM_RD_PHASE_EN vxge_mBIT(11) +#define VXGE_HW_PHASE_CFG_UMQM_WR_PHASE_EN vxge_mBIT(15) +#define VXGE_HW_PHASE_CFG_UMQM_RD_PHASE_EN vxge_mBIT(19) +#define VXGE_HW_PHASE_CFG_RCBM_WR_PHASE_EN vxge_mBIT(23) +#define VXGE_HW_PHASE_CFG_RCBM_RD_PHASE_EN vxge_mBIT(27) +#define VXGE_HW_PHASE_CFG_RXD_RC_WR_PHASE_EN vxge_mBIT(31) +#define VXGE_HW_PHASE_CFG_RXD_RC_RD_PHASE_EN vxge_mBIT(35) +#define VXGE_HW_PHASE_CFG_RXD_RHS_WR_PHASE_EN vxge_mBIT(39) +#define VXGE_HW_PHASE_CFG_RXD_RHS_RD_PHASE_EN vxge_mBIT(43) +/*0x00d00*/ u64 rcq_bypq_cfg; +#define VXGE_HW_RCQ_BYPQ_CFG_OVERFLOW_THRESHOLD(val) vxge_vBIT(val, 10, 22) +#define VXGE_HW_RCQ_BYPQ_CFG_BYP_ON_THRESHOLD(val) vxge_vBIT(val, 39, 9) +#define VXGE_HW_RCQ_BYPQ_CFG_BYP_OFF_THRESHOLD(val) vxge_vBIT(val, 55, 9) + u8 unused00e00[0x00e00-0x00d08]; + +/*0x00e00*/ u64 doorbell_int_status; +#define VXGE_HW_DOORBELL_INT_STATUS_KDFC_ERR_REG_TXDMA_KDFC_INT vxge_mBIT(7) +#define VXGE_HW_DOORBELL_INT_STATUS_USDC_ERR_REG_TXDMA_USDC_INT vxge_mBIT(15) +/*0x00e08*/ u64 doorbell_int_mask; +/*0x00e10*/ u64 kdfc_err_reg; +#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_ECC_SG_ERR vxge_mBIT(7) +#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_ECC_DB_ERR vxge_mBIT(15) +#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_SM_ERR_ALARM vxge_mBIT(23) +#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_MISC_ERR_1 vxge_mBIT(32) +#define VXGE_HW_KDFC_ERR_REG_KDFC_KDFC_PCIX_ERR vxge_mBIT(39) +/*0x00e18*/ u64 kdfc_err_mask; +/*0x00e20*/ u64 kdfc_err_reg_alarm; +#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_ECC_SG_ERR vxge_mBIT(7) +#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_ECC_DB_ERR vxge_mBIT(15) +#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_SM_ERR_ALARM vxge_mBIT(23) +#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_MISC_ERR_1 vxge_mBIT(32) +#define VXGE_HW_KDFC_ERR_REG_ALARM_KDFC_KDFC_PCIX_ERR vxge_mBIT(39) + u8 unused00e40[0x00e40-0x00e28]; +/*0x00e40*/ u64 kdfc_vp_partition_0; +#define VXGE_HW_KDFC_VP_PARTITION_0_ENABLE vxge_mBIT(0) +#define VXGE_HW_KDFC_VP_PARTITION_0_NUMBER_0(val) vxge_vBIT(val, 5, 3) +#define VXGE_HW_KDFC_VP_PARTITION_0_LENGTH_0(val) vxge_vBIT(val, 17, 15) +#define VXGE_HW_KDFC_VP_PARTITION_0_NUMBER_1(val) vxge_vBIT(val, 37, 3) +#define VXGE_HW_KDFC_VP_PARTITION_0_LENGTH_1(val) vxge_vBIT(val, 49, 15) +/*0x00e48*/ u64 kdfc_vp_partition_1; +#define VXGE_HW_KDFC_VP_PARTITION_1_NUMBER_2(val) vxge_vBIT(val, 5, 3) +#define VXGE_HW_KDFC_VP_PARTITION_1_LENGTH_2(val) vxge_vBIT(val, 17, 15) +#define VXGE_HW_KDFC_VP_PARTITION_1_NUMBER_3(val) vxge_vBIT(val, 37, 3) +#define VXGE_HW_KDFC_VP_PARTITION_1_LENGTH_3(val) vxge_vBIT(val, 49, 15) +/*0x00e50*/ u64 kdfc_vp_partition_2; +#define VXGE_HW_KDFC_VP_PARTITION_2_NUMBER_4(val) vxge_vBIT(val, 5, 3) +#define VXGE_HW_KDFC_VP_PARTITION_2_LENGTH_4(val) vxge_vBIT(val, 17, 15) +#define VXGE_HW_KDFC_VP_PARTITION_2_NUMBER_5(val) vxge_vBIT(val, 37, 3) +#define VXGE_HW_KDFC_VP_PARTITION_2_LENGTH_5(val) vxge_vBIT(val, 49, 15) +/*0x00e58*/ u64 kdfc_vp_partition_3; +#define VXGE_HW_KDFC_VP_PARTITION_3_NUMBER_6(val) vxge_vBIT(val, 5, 3) +#define VXGE_HW_KDFC_VP_PARTITION_3_LENGTH_6(val) vxge_vBIT(val, 17, 15) +#define VXGE_HW_KDFC_VP_PARTITION_3_NUMBER_7(val) vxge_vBIT(val, 37, 3) +#define VXGE_HW_KDFC_VP_PARTITION_3_LENGTH_7(val) vxge_vBIT(val, 49, 15) +/*0x00e60*/ u64 kdfc_vp_partition_4; +#define VXGE_HW_KDFC_VP_PARTITION_4_LENGTH_8(val) vxge_vBIT(val, 17, 15) +#define VXGE_HW_KDFC_VP_PARTITION_4_LENGTH_9(val) vxge_vBIT(val, 49, 15) +/*0x00e68*/ u64 kdfc_vp_partition_5; +#define VXGE_HW_KDFC_VP_PARTITION_5_LENGTH_10(val) vxge_vBIT(val, 17, 15) +#define VXGE_HW_KDFC_VP_PARTITION_5_LENGTH_11(val) vxge_vBIT(val, 49, 15) +/*0x00e70*/ u64 kdfc_vp_partition_6; +#define VXGE_HW_KDFC_VP_PARTITION_6_LENGTH_12(val) vxge_vBIT(val, 17, 15) +#define VXGE_HW_KDFC_VP_PARTITION_6_LENGTH_13(val) vxge_vBIT(val, 49, 15) +/*0x00e78*/ u64 kdfc_vp_partition_7; +#define VXGE_HW_KDFC_VP_PARTITION_7_LENGTH_14(val) vxge_vBIT(val, 17, 15) +#define VXGE_HW_KDFC_VP_PARTITION_7_LENGTH_15(val) vxge_vBIT(val, 49, 15) +/*0x00e80*/ u64 kdfc_vp_partition_8; +#define VXGE_HW_KDFC_VP_PARTITION_8_LENGTH_16(val) vxge_vBIT(val, 17, 15) +/*0x00e88*/ u64 kdfc_w_round_robin_0; +#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_0(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_1(val) vxge_vBIT(val, 11, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_2(val) vxge_vBIT(val, 19, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_3(val) vxge_vBIT(val, 27, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_4(val) vxge_vBIT(val, 35, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_5(val) vxge_vBIT(val, 43, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_6(val) vxge_vBIT(val, 51, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_7(val) vxge_vBIT(val, 59, 5) + + u8 unused0f28[0x0f28-0x0e90]; + +/*0x00f28*/ u64 kdfc_w_round_robin_20; +#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_0(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_1(val) vxge_vBIT(val, 11, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_2(val) vxge_vBIT(val, 19, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_3(val) vxge_vBIT(val, 27, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_4(val) vxge_vBIT(val, 35, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_5(val) vxge_vBIT(val, 43, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_6(val) vxge_vBIT(val, 51, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_20_NUMBER_7(val) vxge_vBIT(val, 59, 5) + +#define VXGE_HW_WRR_FIFO_COUNT 20 + + u8 unused0fc8[0x0fc8-0x0f30]; + +/*0x00fc8*/ u64 kdfc_w_round_robin_40; +#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_0(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_1(val) vxge_vBIT(val, 11, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_2(val) vxge_vBIT(val, 19, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_3(val) vxge_vBIT(val, 27, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_4(val) vxge_vBIT(val, 35, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_5(val) vxge_vBIT(val, 43, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_6(val) vxge_vBIT(val, 51, 5) +#define VXGE_HW_KDFC_W_ROUND_ROBIN_40_NUMBER_7(val) vxge_vBIT(val, 59, 5) + + u8 unused1068[0x01068-0x0fd0]; + +/*0x01068*/ u64 kdfc_entry_type_sel_0; +#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_0(val) vxge_vBIT(val, 6, 2) +#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_1(val) vxge_vBIT(val, 14, 2) +#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_2(val) vxge_vBIT(val, 22, 2) +#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_3(val) vxge_vBIT(val, 30, 2) +#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_4(val) vxge_vBIT(val, 38, 2) +#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_5(val) vxge_vBIT(val, 46, 2) +#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_6(val) vxge_vBIT(val, 54, 2) +#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_7(val) vxge_vBIT(val, 62, 2) +/*0x01070*/ u64 kdfc_entry_type_sel_1; +#define VXGE_HW_KDFC_ENTRY_TYPE_SEL_1_NUMBER_8(val) vxge_vBIT(val, 6, 2) +/*0x01078*/ u64 kdfc_fifo_0_ctrl; +#define VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_WEIGHTED_RR_SERVICE_STATES 176 +#define VXGE_HW_WRR_FIFO_SERVICE_STATES 153 + + u8 unused1100[0x01100-0x1080]; + +/*0x01100*/ u64 kdfc_fifo_17_ctrl; +#define VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(val) vxge_vBIT(val, 3, 5) + + u8 unused1600[0x01600-0x1108]; + +/*0x01600*/ u64 rxmac_int_status; +#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_GEN_ERR_RXMAC_GEN_INT vxge_mBIT(3) +#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_ECC_ERR_RXMAC_ECC_INT vxge_mBIT(7) +#define VXGE_HW_RXMAC_INT_STATUS_RXMAC_VARIOUS_ERR_RXMAC_VARIOUS_INT \ + vxge_mBIT(11) +/*0x01608*/ u64 rxmac_int_mask; + u8 unused01618[0x01618-0x01610]; + +/*0x01618*/ u64 rxmac_gen_err_reg; +/*0x01620*/ u64 rxmac_gen_err_mask; +/*0x01628*/ u64 rxmac_gen_err_alarm; +/*0x01630*/ u64 rxmac_ecc_err_reg; +#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_SG_ERR(val) \ + vxge_vBIT(val, 0, 4) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_DB_ERR(val) \ + vxge_vBIT(val, 4, 4) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_SG_ERR(val) \ + vxge_vBIT(val, 8, 4) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_DB_ERR(val) \ + vxge_vBIT(val, 12, 4) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_SG_ERR(val) \ + vxge_vBIT(val, 16, 4) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_DB_ERR(val) \ + vxge_vBIT(val, 20, 4) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_SG_ERR(val) \ + vxge_vBIT(val, 24, 2) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_DB_ERR(val) \ + vxge_vBIT(val, 26, 2) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_SG_ERR(val) \ + vxge_vBIT(val, 28, 2) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_DB_ERR(val) \ + vxge_vBIT(val, 30, 2) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_SG_ERR vxge_mBIT(32) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_DB_ERR vxge_mBIT(33) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_SG_ERR vxge_mBIT(34) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_DB_ERR vxge_mBIT(35) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_SG_ERR vxge_mBIT(36) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_DB_ERR vxge_mBIT(37) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_SG_ERR vxge_mBIT(38) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_DB_ERR vxge_mBIT(39) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_SG_ERR(val) \ + vxge_vBIT(val, 40, 7) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_DB_ERR(val) \ + vxge_vBIT(val, 47, 7) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_SG_ERR(val) \ + vxge_vBIT(val, 54, 3) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_DB_ERR(val) \ + vxge_vBIT(val, 57, 3) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_SG_ERR \ + vxge_mBIT(60) +#define VXGE_HW_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_DB_ERR \ + vxge_mBIT(61) +/*0x01638*/ u64 rxmac_ecc_err_mask; +/*0x01640*/ u64 rxmac_ecc_err_alarm; +/*0x01648*/ u64 rxmac_various_err_reg; +#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT0_FSM_ERR vxge_mBIT(0) +#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT1_FSM_ERR vxge_mBIT(1) +#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT2_FSM_ERR vxge_mBIT(2) +#define VXGE_HW_RXMAC_VARIOUS_ERR_REG_RMACJ_RMACJ_FSM_ERR vxge_mBIT(3) +/*0x01650*/ u64 rxmac_various_err_mask; +/*0x01658*/ u64 rxmac_various_err_alarm; +/*0x01660*/ u64 rxmac_gen_cfg; +#define VXGE_HW_RXMAC_GEN_CFG_SCALE_RMAC_UTIL vxge_mBIT(11) +/*0x01668*/ u64 rxmac_authorize_all_addr; +#define VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(n) vxge_mBIT(n) +/*0x01670*/ u64 rxmac_authorize_all_vid; +#define VXGE_HW_RXMAC_AUTHORIZE_ALL_VID_VP(n) vxge_mBIT(n) + u8 unused016c0[0x016c0-0x01678]; + +/*0x016c0*/ u64 rxmac_red_rate_repl_queue; +#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR0(val) vxge_vBIT(val, 0, 4) +#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR1(val) vxge_vBIT(val, 4, 4) +#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR2(val) vxge_vBIT(val, 8, 4) +#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_CRATE_THR3(val) vxge_vBIT(val, 12, 4) +#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR0(val) vxge_vBIT(val, 16, 4) +#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR1(val) vxge_vBIT(val, 20, 4) +#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR2(val) vxge_vBIT(val, 24, 4) +#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_FRATE_THR3(val) vxge_vBIT(val, 28, 4) +#define VXGE_HW_RXMAC_RED_RATE_REPL_QUEUE_TRICKLE_EN vxge_mBIT(35) + u8 unused016e0[0x016e0-0x016c8]; + +/*0x016e0*/ u64 rxmac_cfg0_port[3]; +#define VXGE_HW_RXMAC_CFG0_PORT_RMAC_EN vxge_mBIT(3) +#define VXGE_HW_RXMAC_CFG0_PORT_STRIP_FCS vxge_mBIT(7) +#define VXGE_HW_RXMAC_CFG0_PORT_DISCARD_PFRM vxge_mBIT(11) +#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_FCS_ERR vxge_mBIT(15) +#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_LONG_ERR vxge_mBIT(19) +#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_USIZED_ERR vxge_mBIT(23) +#define VXGE_HW_RXMAC_CFG0_PORT_IGNORE_LEN_MISMATCH vxge_mBIT(27) +#define VXGE_HW_RXMAC_CFG0_PORT_MAX_PYLD_LEN(val) vxge_vBIT(val, 50, 14) + u8 unused01710[0x01710-0x016f8]; + +/*0x01710*/ u64 rxmac_cfg2_port[3]; +#define VXGE_HW_RXMAC_CFG2_PORT_PROM_EN vxge_mBIT(3) +/*0x01728*/ u64 rxmac_pause_cfg_port[3]; +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN vxge_mBIT(3) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN vxge_mBIT(7) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_ACCEL_SEND(val) vxge_vBIT(val, 9, 3) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_DUAL_THR vxge_mBIT(15) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_HIGH_PTIME(val) vxge_vBIT(val, 20, 16) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_IGNORE_PF_FCS_ERR vxge_mBIT(39) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_IGNORE_PF_LEN_ERR vxge_mBIT(43) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_LIMITER_EN vxge_mBIT(47) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_MAX_LIMIT(val) vxge_vBIT(val, 48, 8) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_PERMIT_RATEMGMT_CTRL vxge_mBIT(59) + u8 unused01758[0x01758-0x01740]; + +/*0x01758*/ u64 rxmac_red_cfg0_port[3]; +#define VXGE_HW_RXMAC_RED_CFG0_PORT_RED_EN_VP(n) vxge_mBIT(n) +/*0x01770*/ u64 rxmac_red_cfg1_port[3]; +#define VXGE_HW_RXMAC_RED_CFG1_PORT_FINE_EN vxge_mBIT(3) +#define VXGE_HW_RXMAC_RED_CFG1_PORT_RED_EN_REPL_QUEUE vxge_mBIT(11) +/*0x01788*/ u64 rxmac_red_cfg2_port[3]; +#define VXGE_HW_RXMAC_RED_CFG2_PORT_TRICKLE_EN_VP(n) vxge_mBIT(n) +/*0x017a0*/ u64 rxmac_link_util_port[3]; +#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_UTILIZATION(val) \ + vxge_vBIT(val, 1, 7) +#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_UTIL_CFG(val) vxge_vBIT(val, 8, 4) +#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_FRAC_UTIL(val) \ + vxge_vBIT(val, 12, 4) +#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_PKT_WEIGHT(val) vxge_vBIT(val, 16, 4) +#define VXGE_HW_RXMAC_LINK_UTIL_PORT_RMAC_RMAC_SCALE_FACTOR vxge_mBIT(23) + u8 unused017d0[0x017d0-0x017b8]; + +/*0x017d0*/ u64 rxmac_status_port[3]; +#define VXGE_HW_RXMAC_STATUS_PORT_RMAC_RX_FRM_RCVD vxge_mBIT(3) + u8 unused01800[0x01800-0x017e8]; + +/*0x01800*/ u64 rxmac_rx_pa_cfg0; +#define VXGE_HW_RXMAC_RX_PA_CFG0_IGNORE_FRAME_ERR vxge_mBIT(3) +#define VXGE_HW_RXMAC_RX_PA_CFG0_SUPPORT_SNAP_AB_N vxge_mBIT(7) +#define VXGE_HW_RXMAC_RX_PA_CFG0_SEARCH_FOR_HAO vxge_mBIT(18) +#define VXGE_HW_RXMAC_RX_PA_CFG0_SUPPORT_MOBILE_IPV6_HDRS vxge_mBIT(19) +#define VXGE_HW_RXMAC_RX_PA_CFG0_IPV6_STOP_SEARCHING vxge_mBIT(23) +#define VXGE_HW_RXMAC_RX_PA_CFG0_NO_PS_IF_UNKNOWN vxge_mBIT(27) +#define VXGE_HW_RXMAC_RX_PA_CFG0_SEARCH_FOR_ETYPE vxge_mBIT(35) +#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_L3_CSUM_ERR vxge_mBIT(39) +#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_L3_CSUM_ERR vxge_mBIT(43) +#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_L4_CSUM_ERR vxge_mBIT(47) +#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_L4_CSUM_ERR vxge_mBIT(51) +#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_ANY_FRM_IF_RPA_ERR vxge_mBIT(55) +#define VXGE_HW_RXMAC_RX_PA_CFG0_TOSS_OFFLD_FRM_IF_RPA_ERR vxge_mBIT(59) +#define VXGE_HW_RXMAC_RX_PA_CFG0_JUMBO_SNAP_EN vxge_mBIT(63) +/*0x01808*/ u64 rxmac_rx_pa_cfg1; +#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV4_TCP_INCL_PH vxge_mBIT(3) +#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV6_TCP_INCL_PH vxge_mBIT(7) +#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV4_UDP_INCL_PH vxge_mBIT(11) +#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_IPV6_UDP_INCL_PH vxge_mBIT(15) +#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_L4_INCL_CF vxge_mBIT(19) +#define VXGE_HW_RXMAC_RX_PA_CFG1_REPL_STRIP_VLAN_TAG vxge_mBIT(23) + u8 unused01828[0x01828-0x01810]; + +/*0x01828*/ u64 rts_mgr_cfg0; +#define VXGE_HW_RTS_MGR_CFG0_RTS_DP_SP_PRIORITY vxge_mBIT(3) +#define VXGE_HW_RTS_MGR_CFG0_FLEX_L4PRTCL_VALUE(val) vxge_vBIT(val, 24, 8) +#define VXGE_HW_RTS_MGR_CFG0_ICMP_TRASH vxge_mBIT(35) +#define VXGE_HW_RTS_MGR_CFG0_TCPSYN_TRASH vxge_mBIT(39) +#define VXGE_HW_RTS_MGR_CFG0_ZL4PYLD_TRASH vxge_mBIT(43) +#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_TCP_TRASH vxge_mBIT(47) +#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_UDP_TRASH vxge_mBIT(51) +#define VXGE_HW_RTS_MGR_CFG0_L4PRTCL_FLEX_TRASH vxge_mBIT(55) +#define VXGE_HW_RTS_MGR_CFG0_IPFRAG_TRASH vxge_mBIT(59) +/*0x01830*/ u64 rts_mgr_cfg1; +#define VXGE_HW_RTS_MGR_CFG1_DA_ACTIVE_TABLE vxge_mBIT(3) +#define VXGE_HW_RTS_MGR_CFG1_PN_ACTIVE_TABLE vxge_mBIT(7) +/*0x01838*/ u64 rts_mgr_criteria_priority; +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ETYPE(val) vxge_vBIT(val, 5, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ICMP_TCPSYN(val) vxge_vBIT(val, 9, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_L4PN(val) vxge_vBIT(val, 13, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_RANGE_L4PN(val) vxge_vBIT(val, 17, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_RTH_IT(val) vxge_vBIT(val, 21, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_DS(val) vxge_vBIT(val, 25, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_QOS(val) vxge_vBIT(val, 29, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_ZL4PYLD(val) vxge_vBIT(val, 33, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_L4PRTCL(val) vxge_vBIT(val, 37, 3) +/*0x01840*/ u64 rts_mgr_da_pause_cfg; +#define VXGE_HW_RTS_MGR_DA_PAUSE_CFG_VPATH_VECTOR(val) vxge_vBIT(val, 0, 17) +/*0x01848*/ u64 rts_mgr_da_slow_proto_cfg; +#define VXGE_HW_RTS_MGR_DA_SLOW_PROTO_CFG_VPATH_VECTOR(val) \ + vxge_vBIT(val, 0, 17) + u8 unused01890[0x01890-0x01850]; +/*0x01890*/ u64 rts_mgr_cbasin_cfg; + u8 unused01968[0x01968-0x01898]; + +/*0x01968*/ u64 dbg_stat_rx_any_frms; +#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT0_RX_ANY_FRMS(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT1_RX_ANY_FRMS(val) vxge_vBIT(val, 8, 8) +#define VXGE_HW_DBG_STAT_RX_ANY_FRMS_PORT2_RX_ANY_FRMS(val) \ + vxge_vBIT(val, 16, 8) + u8 unused01a00[0x01a00-0x01970]; + +/*0x01a00*/ u64 rxmac_red_rate_vp[17]; +#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR0(val) vxge_vBIT(val, 0, 4) +#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR1(val) vxge_vBIT(val, 4, 4) +#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR2(val) vxge_vBIT(val, 8, 4) +#define VXGE_HW_RXMAC_RED_RATE_VP_CRATE_THR3(val) vxge_vBIT(val, 12, 4) +#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR0(val) vxge_vBIT(val, 16, 4) +#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR1(val) vxge_vBIT(val, 20, 4) +#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR2(val) vxge_vBIT(val, 24, 4) +#define VXGE_HW_RXMAC_RED_RATE_VP_FRATE_THR3(val) vxge_vBIT(val, 28, 4) + u8 unused01e00[0x01e00-0x01a88]; + +/*0x01e00*/ u64 xgmac_int_status; +#define VXGE_HW_XGMAC_INT_STATUS_XMAC_GEN_ERR_XMAC_GEN_INT vxge_mBIT(3) +#define VXGE_HW_XGMAC_INT_STATUS_XMAC_LINK_ERR_PORT0_XMAC_LINK_INT_PORT0 \ + vxge_mBIT(7) +#define VXGE_HW_XGMAC_INT_STATUS_XMAC_LINK_ERR_PORT1_XMAC_LINK_INT_PORT1 \ + vxge_mBIT(11) +#define VXGE_HW_XGMAC_INT_STATUS_XGXS_GEN_ERR_XGXS_GEN_INT vxge_mBIT(15) +#define VXGE_HW_XGMAC_INT_STATUS_ASIC_NTWK_ERR_ASIC_NTWK_INT vxge_mBIT(19) +#define VXGE_HW_XGMAC_INT_STATUS_ASIC_GPIO_ERR_ASIC_GPIO_INT vxge_mBIT(23) +/*0x01e08*/ u64 xgmac_int_mask; +/*0x01e10*/ u64 xmac_gen_err_reg; +#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_ACTOR_CHURN_DETECTED \ + vxge_mBIT(7) +#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_PARTNER_CHURN_DETECTED \ + vxge_mBIT(11) +#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT0_RECEIVED_LACPDU vxge_mBIT(15) +#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_ACTOR_CHURN_DETECTED \ + vxge_mBIT(19) +#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_PARTNER_CHURN_DETECTED \ + vxge_mBIT(23) +#define VXGE_HW_XMAC_GEN_ERR_REG_LAGC_LAG_PORT1_RECEIVED_LACPDU vxge_mBIT(27) +#define VXGE_HW_XMAC_GEN_ERR_REG_XLCM_LAG_FAILOVER_DETECTED vxge_mBIT(31) +#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_SG_ERR(val) \ + vxge_vBIT(val, 40, 2) +#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_DB_ERR(val) \ + vxge_vBIT(val, 42, 2) +#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_SG_ERR(val) \ + vxge_vBIT(val, 44, 2) +#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_DB_ERR(val) \ + vxge_vBIT(val, 46, 2) +#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_SG_ERR(val) \ + vxge_vBIT(val, 48, 2) +#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_DB_ERR(val) \ + vxge_vBIT(val, 50, 2) +#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_SG_ERR(val) \ + vxge_vBIT(val, 52, 2) +#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_DB_ERR(val) \ + vxge_vBIT(val, 54, 2) +#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_SG_ERR(val) \ + vxge_vBIT(val, 56, 2) +#define VXGE_HW_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_DB_ERR(val) \ + vxge_vBIT(val, 58, 2) +#define VXGE_HW_XMAC_GEN_ERR_REG_XMACJ_XMAC_FSM_ERR vxge_mBIT(63) +/*0x01e18*/ u64 xmac_gen_err_mask; +/*0x01e20*/ u64 xmac_gen_err_alarm; +/*0x01e28*/ u64 xmac_link_err_port0_reg; +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_DOWN vxge_mBIT(3) +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_UP vxge_mBIT(7) +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_DOWN vxge_mBIT(11) +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_WENT_UP vxge_mBIT(15) +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_REAFFIRMED_FAULT \ + vxge_mBIT(19) +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_PORT_REAFFIRMED_OK vxge_mBIT(23) +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_LINK_DOWN vxge_mBIT(27) +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMACJ_LINK_UP vxge_mBIT(31) +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_RATE_CHANGE vxge_mBIT(35) +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_RATEMGMT_LASI_INV vxge_mBIT(39) +#define VXGE_HW_XMAC_LINK_ERR_PORT_REG_XMDIO_MDIO_MGR_ACCESS_COMPLETE \ + vxge_mBIT(47) +/*0x01e30*/ u64 xmac_link_err_port0_mask; +/*0x01e38*/ u64 xmac_link_err_port0_alarm; +/*0x01e40*/ u64 xmac_link_err_port1_reg; +/*0x01e48*/ u64 xmac_link_err_port1_mask; +/*0x01e50*/ u64 xmac_link_err_port1_alarm; +/*0x01e58*/ u64 xgxs_gen_err_reg; +#define VXGE_HW_XGXS_GEN_ERR_REG_XGXS_XGXS_FSM_ERR vxge_mBIT(63) +/*0x01e60*/ u64 xgxs_gen_err_mask; +/*0x01e68*/ u64 xgxs_gen_err_alarm; +/*0x01e70*/ u64 asic_ntwk_err_reg; +#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_DOWN vxge_mBIT(3) +#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_UP vxge_mBIT(7) +#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_DOWN vxge_mBIT(11) +#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_UP vxge_mBIT(15) +#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT vxge_mBIT(19) +#define VXGE_HW_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK vxge_mBIT(23) +/*0x01e78*/ u64 asic_ntwk_err_mask; +/*0x01e80*/ u64 asic_ntwk_err_alarm; +/*0x01e88*/ u64 asic_gpio_err_reg; +#define VXGE_HW_ASIC_GPIO_ERR_REG_XMACJ_GPIO_INT(n) vxge_mBIT(n) +/*0x01e90*/ u64 asic_gpio_err_mask; +/*0x01e98*/ u64 asic_gpio_err_alarm; +/*0x01ea0*/ u64 xgmac_gen_status; +#define VXGE_HW_XGMAC_GEN_STATUS_XMACJ_NTWK_OK vxge_mBIT(3) +#define VXGE_HW_XGMAC_GEN_STATUS_XMACJ_NTWK_DATA_RATE vxge_mBIT(11) +/*0x01ea8*/ u64 xgmac_gen_fw_memo_status; +#define VXGE_HW_XGMAC_GEN_FW_MEMO_STATUS_XMACJ_EVENTS_PENDING(val) \ + vxge_vBIT(val, 0, 17) +/*0x01eb0*/ u64 xgmac_gen_fw_memo_mask; +#define VXGE_HW_XGMAC_GEN_FW_MEMO_MASK_MASK(val) vxge_vBIT(val, 0, 64) +/*0x01eb8*/ u64 xgmac_gen_fw_vpath_to_vsport_status; +#define VXGE_HW_XGMAC_GEN_FW_VPATH_TO_VSPORT_STATUS_XMACJ_EVENTS_PENDING(val) \ + vxge_vBIT(val, 0, 17) +/*0x01ec0*/ u64 xgmac_main_cfg_port[2]; +#define VXGE_HW_XGMAC_MAIN_CFG_PORT_PORT_EN vxge_mBIT(3) + u8 unused01f40[0x01f40-0x01ed0]; + +/*0x01f40*/ u64 xmac_gen_cfg; +#define VXGE_HW_XMAC_GEN_CFG_RATEMGMT_MAC_RATE_SEL(val) vxge_vBIT(val, 2, 2) +#define VXGE_HW_XMAC_GEN_CFG_TX_HEAD_DROP_WHEN_FAULT vxge_mBIT(7) +#define VXGE_HW_XMAC_GEN_CFG_FAULT_BEHAVIOUR vxge_mBIT(27) +#define VXGE_HW_XMAC_GEN_CFG_PERIOD_NTWK_UP(val) vxge_vBIT(val, 28, 4) +#define VXGE_HW_XMAC_GEN_CFG_PERIOD_NTWK_DOWN(val) vxge_vBIT(val, 32, 4) +/*0x01f48*/ u64 xmac_timestamp; +#define VXGE_HW_XMAC_TIMESTAMP_EN vxge_mBIT(3) +#define VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(val) vxge_vBIT(val, 6, 2) +#define VXGE_HW_XMAC_TIMESTAMP_INTERVAL(val) vxge_vBIT(val, 12, 4) +#define VXGE_HW_XMAC_TIMESTAMP_TIMER_RESTART vxge_mBIT(19) +#define VXGE_HW_XMAC_TIMESTAMP_XMACJ_ROLLOVER_CNT(val) vxge_vBIT(val, 32, 16) +/*0x01f50*/ u64 xmac_stats_gen_cfg; +#define VXGE_HW_XMAC_STATS_GEN_CFG_PRTAGGR_CUM_TIMER(val) vxge_vBIT(val, 4, 4) +#define VXGE_HW_XMAC_STATS_GEN_CFG_VPATH_CUM_TIMER(val) vxge_vBIT(val, 8, 4) +#define VXGE_HW_XMAC_STATS_GEN_CFG_VLAN_HANDLING vxge_mBIT(15) +/*0x01f58*/ u64 xmac_stats_sys_cmd; +#define VXGE_HW_XMAC_STATS_SYS_CMD_OP(val) vxge_vBIT(val, 5, 3) +#define VXGE_HW_XMAC_STATS_SYS_CMD_STROBE vxge_mBIT(15) +#define VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(val) vxge_vBIT(val, 27, 5) +#define VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(val) vxge_vBIT(val, 32, 8) +/*0x01f60*/ u64 xmac_stats_sys_data; +#define VXGE_HW_XMAC_STATS_SYS_DATA_XSMGR_DATA(val) vxge_vBIT(val, 0, 64) + u8 unused01f80[0x01f80-0x01f68]; + +/*0x01f80*/ u64 asic_ntwk_ctrl; +#define VXGE_HW_ASIC_NTWK_CTRL_REQ_TEST_NTWK vxge_mBIT(3) +#define VXGE_HW_ASIC_NTWK_CTRL_PORT0_REQ_TEST_PORT vxge_mBIT(11) +#define VXGE_HW_ASIC_NTWK_CTRL_PORT1_REQ_TEST_PORT vxge_mBIT(15) +/*0x01f88*/ u64 asic_ntwk_cfg_show_port_info; +#define VXGE_HW_ASIC_NTWK_CFG_SHOW_PORT_INFO_VP(n) vxge_mBIT(n) +/*0x01f90*/ u64 asic_ntwk_cfg_port_num; +#define VXGE_HW_ASIC_NTWK_CFG_PORT_NUM_VP(n) vxge_mBIT(n) +/*0x01f98*/ u64 xmac_cfg_port[3]; +#define VXGE_HW_XMAC_CFG_PORT_XGMII_LOOPBACK vxge_mBIT(3) +#define VXGE_HW_XMAC_CFG_PORT_XGMII_REVERSE_LOOPBACK vxge_mBIT(7) +#define VXGE_HW_XMAC_CFG_PORT_XGMII_TX_BEHAV vxge_mBIT(11) +#define VXGE_HW_XMAC_CFG_PORT_XGMII_RX_BEHAV vxge_mBIT(15) +/*0x01fb0*/ u64 xmac_station_addr_port[2]; +#define VXGE_HW_XMAC_STATION_ADDR_PORT_MAC_ADDR(val) vxge_vBIT(val, 0, 48) + u8 unused02020[0x02020-0x01fc0]; + +/*0x02020*/ u64 lag_cfg; +#define VXGE_HW_LAG_CFG_EN vxge_mBIT(3) +#define VXGE_HW_LAG_CFG_MODE(val) vxge_vBIT(val, 6, 2) +#define VXGE_HW_LAG_CFG_TX_DISCARD_BEHAV vxge_mBIT(11) +#define VXGE_HW_LAG_CFG_RX_DISCARD_BEHAV vxge_mBIT(15) +#define VXGE_HW_LAG_CFG_PREF_INDIV_PORT_NUM vxge_mBIT(19) +/*0x02028*/ u64 lag_status; +#define VXGE_HW_LAG_STATUS_XLCM_WAITING_TO_FAILBACK vxge_mBIT(3) +#define VXGE_HW_LAG_STATUS_XLCM_TIMER_VAL_COLD_FAILOVER(val) \ + vxge_vBIT(val, 8, 8) +/*0x02030*/ u64 lag_active_passive_cfg; +#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_HOT_STANDBY vxge_mBIT(3) +#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_LACP_DECIDES vxge_mBIT(7) +#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_PREF_ACTIVE_PORT_NUM vxge_mBIT(11) +#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_AUTO_FAILBACK vxge_mBIT(15) +#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_FAILBACK_EN vxge_mBIT(19) +#define VXGE_HW_LAG_ACTIVE_PASSIVE_CFG_COLD_FAILOVER_TIMEOUT(val) \ + vxge_vBIT(val, 32, 16) + u8 unused02040[0x02040-0x02038]; + +/*0x02040*/ u64 lag_lacp_cfg; +#define VXGE_HW_LAG_LACP_CFG_EN vxge_mBIT(3) +#define VXGE_HW_LAG_LACP_CFG_LACP_BEGIN vxge_mBIT(7) +#define VXGE_HW_LAG_LACP_CFG_DISCARD_LACP vxge_mBIT(11) +#define VXGE_HW_LAG_LACP_CFG_LIBERAL_LEN_CHK vxge_mBIT(15) +/*0x02048*/ u64 lag_timer_cfg_1; +#define VXGE_HW_LAG_TIMER_CFG_1_FAST_PER(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_LAG_TIMER_CFG_1_SLOW_PER(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_LAG_TIMER_CFG_1_SHORT_TIMEOUT(val) vxge_vBIT(val, 32, 16) +#define VXGE_HW_LAG_TIMER_CFG_1_LONG_TIMEOUT(val) vxge_vBIT(val, 48, 16) +/*0x02050*/ u64 lag_timer_cfg_2; +#define VXGE_HW_LAG_TIMER_CFG_2_CHURN_DET(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_LAG_TIMER_CFG_2_AGGR_WAIT(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_LAG_TIMER_CFG_2_SHORT_TIMER_SCALE(val) vxge_vBIT(val, 32, 16) +#define VXGE_HW_LAG_TIMER_CFG_2_LONG_TIMER_SCALE(val) vxge_vBIT(val, 48, 16) +/*0x02058*/ u64 lag_sys_id; +#define VXGE_HW_LAG_SYS_ID_ADDR(val) vxge_vBIT(val, 0, 48) +#define VXGE_HW_LAG_SYS_ID_USE_PORT_ADDR vxge_mBIT(51) +#define VXGE_HW_LAG_SYS_ID_ADDR_SEL vxge_mBIT(55) +/*0x02060*/ u64 lag_sys_cfg; +#define VXGE_HW_LAG_SYS_CFG_SYS_PRI(val) vxge_vBIT(val, 0, 16) + u8 unused02070[0x02070-0x02068]; + +/*0x02070*/ u64 lag_aggr_addr_cfg[2]; +#define VXGE_HW_LAG_AGGR_ADDR_CFG_ADDR(val) vxge_vBIT(val, 0, 48) +#define VXGE_HW_LAG_AGGR_ADDR_CFG_USE_PORT_ADDR vxge_mBIT(51) +#define VXGE_HW_LAG_AGGR_ADDR_CFG_ADDR_SEL vxge_mBIT(55) +/*0x02080*/ u64 lag_aggr_id_cfg[2]; +#define VXGE_HW_LAG_AGGR_ID_CFG_ID(val) vxge_vBIT(val, 0, 16) +/*0x02090*/ u64 lag_aggr_admin_key[2]; +#define VXGE_HW_LAG_AGGR_ADMIN_KEY_KEY(val) vxge_vBIT(val, 0, 16) +/*0x020a0*/ u64 lag_aggr_alt_admin_key; +#define VXGE_HW_LAG_AGGR_ALT_ADMIN_KEY_KEY(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_LAG_AGGR_ALT_ADMIN_KEY_ALT_AGGR vxge_mBIT(19) +/*0x020a8*/ u64 lag_aggr_oper_key[2]; +#define VXGE_HW_LAG_AGGR_OPER_KEY_LAGC_KEY(val) vxge_vBIT(val, 0, 16) +/*0x020b8*/ u64 lag_aggr_partner_sys_id[2]; +#define VXGE_HW_LAG_AGGR_PARTNER_SYS_ID_LAGC_ADDR(val) vxge_vBIT(val, 0, 48) +/*0x020c8*/ u64 lag_aggr_partner_info[2]; +#define VXGE_HW_LAG_AGGR_PARTNER_INFO_LAGC_SYS_PRI(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_LAG_AGGR_PARTNER_INFO_LAGC_OPER_KEY(val) \ + vxge_vBIT(val, 16, 16) +/*0x020d8*/ u64 lag_aggr_state[2]; +#define VXGE_HW_LAG_AGGR_STATE_LAGC_TX vxge_mBIT(3) +#define VXGE_HW_LAG_AGGR_STATE_LAGC_RX vxge_mBIT(7) +#define VXGE_HW_LAG_AGGR_STATE_LAGC_READY vxge_mBIT(11) +#define VXGE_HW_LAG_AGGR_STATE_LAGC_INDIVIDUAL vxge_mBIT(15) + u8 unused020f0[0x020f0-0x020e8]; + +/*0x020f0*/ u64 lag_port_cfg[2]; +#define VXGE_HW_LAG_PORT_CFG_EN vxge_mBIT(3) +#define VXGE_HW_LAG_PORT_CFG_DISCARD_SLOW_PROTO vxge_mBIT(7) +#define VXGE_HW_LAG_PORT_CFG_HOST_CHOSEN_AGGR vxge_mBIT(11) +#define VXGE_HW_LAG_PORT_CFG_DISCARD_UNKNOWN_SLOW_PROTO vxge_mBIT(15) +/*0x02100*/ u64 lag_port_actor_admin_cfg[2]; +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_PORT_NUM(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_PORT_PRI(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_KEY_10G(val) vxge_vBIT(val, 32, 16) +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_CFG_KEY_1G(val) vxge_vBIT(val, 48, 16) +/*0x02110*/ u64 lag_port_actor_admin_state[2]; +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_LACP_ACTIVITY vxge_mBIT(3) +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_LACP_TIMEOUT vxge_mBIT(7) +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_AGGREGATION vxge_mBIT(11) +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_SYNCHRONIZATION vxge_mBIT(15) +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_COLLECTING vxge_mBIT(19) +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_DISTRIBUTING vxge_mBIT(23) +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_DEFAULTED vxge_mBIT(27) +#define VXGE_HW_LAG_PORT_ACTOR_ADMIN_STATE_EXPIRED vxge_mBIT(31) +/*0x02120*/ u64 lag_port_partner_admin_sys_id[2]; +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_SYS_ID_ADDR(val) vxge_vBIT(val, 0, 48) +/*0x02130*/ u64 lag_port_partner_admin_cfg[2]; +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_SYS_PRI(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_KEY(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_PORT_NUM(val) \ + vxge_vBIT(val, 32, 16) +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_CFG_PORT_PRI(val) \ + vxge_vBIT(val, 48, 16) +/*0x02140*/ u64 lag_port_partner_admin_state[2]; +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_LACP_ACTIVITY vxge_mBIT(3) +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_LACP_TIMEOUT vxge_mBIT(7) +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_AGGREGATION vxge_mBIT(11) +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_SYNCHRONIZATION vxge_mBIT(15) +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_COLLECTING vxge_mBIT(19) +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_DISTRIBUTING vxge_mBIT(23) +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_DEFAULTED vxge_mBIT(27) +#define VXGE_HW_LAG_PORT_PARTNER_ADMIN_STATE_EXPIRED vxge_mBIT(31) +/*0x02150*/ u64 lag_port_to_aggr[2]; +#define VXGE_HW_LAG_PORT_TO_AGGR_LAGC_AGGR_ID(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_LAG_PORT_TO_AGGR_LAGC_AGGR_VLD_ID vxge_mBIT(19) +/*0x02160*/ u64 lag_port_actor_oper_key[2]; +#define VXGE_HW_LAG_PORT_ACTOR_OPER_KEY_LAGC_KEY(val) vxge_vBIT(val, 0, 16) +/*0x02170*/ u64 lag_port_actor_oper_state[2]; +#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_LACP_ACTIVITY vxge_mBIT(3) +#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_LACP_TIMEOUT vxge_mBIT(7) +#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_AGGREGATION vxge_mBIT(11) +#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_SYNCHRONIZATION vxge_mBIT(15) +#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_COLLECTING vxge_mBIT(19) +#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_DISTRIBUTING vxge_mBIT(23) +#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_DEFAULTED vxge_mBIT(27) +#define VXGE_HW_LAG_PORT_ACTOR_OPER_STATE_LAGC_EXPIRED vxge_mBIT(31) +/*0x02180*/ u64 lag_port_partner_oper_sys_id[2]; +#define VXGE_HW_LAG_PORT_PARTNER_OPER_SYS_ID_LAGC_ADDR(val) \ + vxge_vBIT(val, 0, 48) +/*0x02190*/ u64 lag_port_partner_oper_info[2]; +#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_SYS_PRI(val) \ + vxge_vBIT(val, 0, 16) +#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_KEY(val) \ + vxge_vBIT(val, 16, 16) +#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_PORT_NUM(val) \ + vxge_vBIT(val, 32, 16) +#define VXGE_HW_LAG_PORT_PARTNER_OPER_INFO_LAGC_PORT_PRI(val) \ + vxge_vBIT(val, 48, 16) +/*0x021a0*/ u64 lag_port_partner_oper_state[2]; +#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_LACP_ACTIVITY vxge_mBIT(3) +#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_LACP_TIMEOUT vxge_mBIT(7) +#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_AGGREGATION vxge_mBIT(11) +#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_SYNCHRONIZATION \ + vxge_mBIT(15) +#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_COLLECTING vxge_mBIT(19) +#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_DISTRIBUTING vxge_mBIT(23) +#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_DEFAULTED vxge_mBIT(27) +#define VXGE_HW_LAG_PORT_PARTNER_OPER_STATE_LAGC_EXPIRED vxge_mBIT(31) +/*0x021b0*/ u64 lag_port_state_vars[2]; +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_READY vxge_mBIT(3) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_SELECTED(val) vxge_vBIT(val, 6, 2) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_AGGR_NUM vxge_mBIT(11) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_MOVED vxge_mBIT(15) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_ENABLED vxge_mBIT(18) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PORT_DISABLED vxge_mBIT(19) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_NTT vxge_mBIT(23) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN vxge_mBIT(27) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN vxge_mBIT(31) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_INFO_LEN_MISMATCH \ + vxge_mBIT(32) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_INFO_LEN_MISMATCH \ + vxge_mBIT(33) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_COLL_INFO_LEN_MISMATCH vxge_mBIT(34) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_TERM_INFO_LEN_MISMATCH vxge_mBIT(35) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_RX_FSM_STATE(val) vxge_vBIT(val, 37, 3) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_MUX_FSM_STATE(val) \ + vxge_vBIT(val, 41, 3) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_MUX_REASON(val) vxge_vBIT(val, 44, 4) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN_STATE vxge_mBIT(54) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN_STATE vxge_mBIT(55) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_ACTOR_CHURN_COUNT(val) \ + vxge_vBIT(val, 56, 4) +#define VXGE_HW_LAG_PORT_STATE_VARS_LAGC_PARTNER_CHURN_COUNT(val) \ + vxge_vBIT(val, 60, 4) +/*0x021c0*/ u64 lag_port_timer_cntr[2]; +#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_CURRENT_WHILE(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PERIODIC_WHILE(val) \ + vxge_vBIT(val, 8, 8) +#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_WAIT_WHILE(val) vxge_vBIT(val, 16, 8) +#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_TX_LACP(val) vxge_vBIT(val, 24, 8) +#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_ACTOR_SYNC_TRANSITION_COUNT(val) \ + vxge_vBIT(val, 32, 8) +#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PARTNER_SYNC_TRANSITION_COUNT(val) \ + vxge_vBIT(val, 40, 8) +#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_ACTOR_CHANGE_COUNT(val) \ + vxge_vBIT(val, 48, 8) +#define VXGE_HW_LAG_PORT_TIMER_CNTR_LAGC_PARTNER_CHANGE_COUNT(val) \ + vxge_vBIT(val, 56, 8) + u8 unused02208[0x02700-0x021d0]; + +/*0x02700*/ u64 rtdma_int_status; +#define VXGE_HW_RTDMA_INT_STATUS_PDA_ALARM_PDA_INT vxge_mBIT(1) +#define VXGE_HW_RTDMA_INT_STATUS_PCC_ERROR_PCC_INT vxge_mBIT(2) +#define VXGE_HW_RTDMA_INT_STATUS_LSO_ERROR_LSO_INT vxge_mBIT(4) +#define VXGE_HW_RTDMA_INT_STATUS_SM_ERROR_SM_INT vxge_mBIT(5) +/*0x02708*/ u64 rtdma_int_mask; +/*0x02710*/ u64 pda_alarm_reg; +#define VXGE_HW_PDA_ALARM_REG_PDA_HSC_FIFO_ERR vxge_mBIT(0) +#define VXGE_HW_PDA_ALARM_REG_PDA_SM_ERR vxge_mBIT(1) +/*0x02718*/ u64 pda_alarm_mask; +/*0x02720*/ u64 pda_alarm_alarm; +/*0x02728*/ u64 pcc_error_reg; +#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_SBE(n) vxge_mBIT(n) +#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_SBE(n) vxge_mBIT(n) +#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FRM_BUF_DBE(n) vxge_mBIT(n) +#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_TXDO_DBE(n) vxge_mBIT(n) +#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_FSM_ERR_ALARM(n) vxge_mBIT(n) +#define VXGE_HW_PCC_ERROR_REG_PCC_PCC_SERR(n) vxge_mBIT(n) +/*0x02730*/ u64 pcc_error_mask; +/*0x02738*/ u64 pcc_error_alarm; +/*0x02740*/ u64 lso_error_reg; +#define VXGE_HW_LSO_ERROR_REG_PCC_LSO_ABORT(n) vxge_mBIT(n) +#define VXGE_HW_LSO_ERROR_REG_PCC_LSO_FSM_ERR_ALARM(n) vxge_mBIT(n) +/*0x02748*/ u64 lso_error_mask; +/*0x02750*/ u64 lso_error_alarm; +/*0x02758*/ u64 sm_error_reg; +#define VXGE_HW_SM_ERROR_REG_SM_FSM_ERR_ALARM vxge_mBIT(15) +/*0x02760*/ u64 sm_error_mask; +/*0x02768*/ u64 sm_error_alarm; + + u8 unused027a8[0x027a8-0x02770]; + +/*0x027a8*/ u64 txd_ownership_ctrl; +#define VXGE_HW_TXD_OWNERSHIP_CTRL_KEEP_OWNERSHIP vxge_mBIT(7) +/*0x027b0*/ u64 pcc_cfg; +#define VXGE_HW_PCC_CFG_PCC_ENABLE(n) vxge_mBIT(n) +#define VXGE_HW_PCC_CFG_PCC_ECC_ENABLE_N(n) vxge_mBIT(n) +/*0x027b8*/ u64 pcc_control; +#define VXGE_HW_PCC_CONTROL_FE_ENABLE(val) vxge_vBIT(val, 6, 2) +#define VXGE_HW_PCC_CONTROL_EARLY_ASSIGN_EN vxge_mBIT(15) +#define VXGE_HW_PCC_CONTROL_UNBLOCK_DB_ERR vxge_mBIT(31) +/*0x027c0*/ u64 pda_status1; +#define VXGE_HW_PDA_STATUS1_PDA_WRAP_0_CTR(val) vxge_vBIT(val, 4, 4) +#define VXGE_HW_PDA_STATUS1_PDA_WRAP_1_CTR(val) vxge_vBIT(val, 12, 4) +#define VXGE_HW_PDA_STATUS1_PDA_WRAP_2_CTR(val) vxge_vBIT(val, 20, 4) +#define VXGE_HW_PDA_STATUS1_PDA_WRAP_3_CTR(val) vxge_vBIT(val, 28, 4) +#define VXGE_HW_PDA_STATUS1_PDA_WRAP_4_CTR(val) vxge_vBIT(val, 36, 4) +#define VXGE_HW_PDA_STATUS1_PDA_WRAP_5_CTR(val) vxge_vBIT(val, 44, 4) +#define VXGE_HW_PDA_STATUS1_PDA_WRAP_6_CTR(val) vxge_vBIT(val, 52, 4) +#define VXGE_HW_PDA_STATUS1_PDA_WRAP_7_CTR(val) vxge_vBIT(val, 60, 4) +/*0x027c8*/ u64 rtdma_bw_timer; +#define VXGE_HW_RTDMA_BW_TIMER_TIMER_CTRL(val) vxge_vBIT(val, 12, 4) + + u8 unused02900[0x02900-0x027d0]; +/*0x02900*/ u64 g3cmct_int_status; +#define VXGE_HW_G3CMCT_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0) +/*0x02908*/ u64 g3cmct_int_mask; +/*0x02910*/ u64 g3cmct_err_reg; +#define VXGE_HW_G3CMCT_ERR_REG_G3IF_SM_ERR vxge_mBIT(4) +#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_DECC vxge_mBIT(5) +#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_U_DECC vxge_mBIT(6) +#define VXGE_HW_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_DECC vxge_mBIT(7) +#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_SECC vxge_mBIT(29) +#define VXGE_HW_G3CMCT_ERR_REG_G3IF_GDDR3_U_SECC vxge_mBIT(30) +#define VXGE_HW_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_SECC vxge_mBIT(31) +/*0x02918*/ u64 g3cmct_err_mask; +/*0x02920*/ u64 g3cmct_err_alarm; + u8 unused03000[0x03000-0x02928]; + +/*0x03000*/ u64 mc_int_status; +#define VXGE_HW_MC_INT_STATUS_MC_ERR_MC_INT vxge_mBIT(3) +#define VXGE_HW_MC_INT_STATUS_GROCRC_ALARM_ROCRC_INT vxge_mBIT(7) +#define VXGE_HW_MC_INT_STATUS_FAU_GEN_ERR_FAU_GEN_INT vxge_mBIT(11) +#define VXGE_HW_MC_INT_STATUS_FAU_ECC_ERR_FAU_ECC_INT vxge_mBIT(15) +/*0x03008*/ u64 mc_int_mask; +/*0x03010*/ u64 mc_err_reg; +#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_SG_ERR_A vxge_mBIT(3) +#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_SG_ERR_B vxge_mBIT(4) +#define VXGE_HW_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_SG_ERR vxge_mBIT(5) +#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_SG_ERR_0 vxge_mBIT(6) +#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_SG_ERR_1 vxge_mBIT(7) +#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_A vxge_mBIT(10) +#define VXGE_HW_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_B vxge_mBIT(11) +#define VXGE_HW_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_DB_ERR vxge_mBIT(12) +#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_0 vxge_mBIT(13) +#define VXGE_HW_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_1 vxge_mBIT(14) +#define VXGE_HW_MC_ERR_REG_MC_SM_ERR vxge_mBIT(15) +/*0x03018*/ u64 mc_err_mask; +/*0x03020*/ u64 mc_err_alarm; +/*0x03028*/ u64 grocrc_alarm_reg; +#define VXGE_HW_GROCRC_ALARM_REG_XFMD_WR_FIFO_ERR vxge_mBIT(3) +#define VXGE_HW_GROCRC_ALARM_REG_WDE2MSR_RD_FIFO_ERR vxge_mBIT(7) +/*0x03030*/ u64 grocrc_alarm_mask; +/*0x03038*/ u64 grocrc_alarm_alarm; + u8 unused03100[0x03100-0x03040]; + +/*0x03100*/ u64 rx_thresh_cfg_repl; +#define VXGE_HW_RX_THRESH_CFG_REPL_PAUSE_LOW_THR(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_RX_THRESH_CFG_REPL_PAUSE_HIGH_THR(val) vxge_vBIT(val, 8, 8) +#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_0(val) vxge_vBIT(val, 16, 8) +#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_1(val) vxge_vBIT(val, 24, 8) +#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_2(val) vxge_vBIT(val, 32, 8) +#define VXGE_HW_RX_THRESH_CFG_REPL_RED_THR_3(val) vxge_vBIT(val, 40, 8) +#define VXGE_HW_RX_THRESH_CFG_REPL_GLOBAL_WOL_EN vxge_mBIT(62) +#define VXGE_HW_RX_THRESH_CFG_REPL_EXACT_VP_MATCH_REQ vxge_mBIT(63) + u8 unused033b8[0x033b8-0x03108]; + +/*0x033b8*/ u64 fbmc_ecc_cfg; +#define VXGE_HW_FBMC_ECC_CFG_ENABLE(val) vxge_vBIT(val, 3, 5) + u8 unused03400[0x03400-0x033c0]; + +/*0x03400*/ u64 pcipif_int_status; +#define VXGE_HW_PCIPIF_INT_STATUS_DBECC_ERR_DBECC_ERR_INT vxge_mBIT(3) +#define VXGE_HW_PCIPIF_INT_STATUS_SBECC_ERR_SBECC_ERR_INT vxge_mBIT(7) +#define VXGE_HW_PCIPIF_INT_STATUS_GENERAL_ERR_GENERAL_ERR_INT vxge_mBIT(11) +#define VXGE_HW_PCIPIF_INT_STATUS_SRPCIM_MSG_SRPCIM_MSG_INT vxge_mBIT(15) +#define VXGE_HW_PCIPIF_INT_STATUS_MRPCIM_SPARE_R1_MRPCIM_SPARE_R1_INT \ + vxge_mBIT(19) +/*0x03408*/ u64 pcipif_int_mask; +/*0x03410*/ u64 dbecc_err_reg; +#define VXGE_HW_DBECC_ERR_REG_PCI_RETRY_BUF_DB_ERR vxge_mBIT(3) +#define VXGE_HW_DBECC_ERR_REG_PCI_RETRY_SOT_DB_ERR vxge_mBIT(7) +#define VXGE_HW_DBECC_ERR_REG_PCI_P_HDR_DB_ERR vxge_mBIT(11) +#define VXGE_HW_DBECC_ERR_REG_PCI_P_DATA_DB_ERR vxge_mBIT(15) +#define VXGE_HW_DBECC_ERR_REG_PCI_NP_HDR_DB_ERR vxge_mBIT(19) +#define VXGE_HW_DBECC_ERR_REG_PCI_NP_DATA_DB_ERR vxge_mBIT(23) +/*0x03418*/ u64 dbecc_err_mask; +/*0x03420*/ u64 dbecc_err_alarm; +/*0x03428*/ u64 sbecc_err_reg; +#define VXGE_HW_SBECC_ERR_REG_PCI_RETRY_BUF_SG_ERR vxge_mBIT(3) +#define VXGE_HW_SBECC_ERR_REG_PCI_RETRY_SOT_SG_ERR vxge_mBIT(7) +#define VXGE_HW_SBECC_ERR_REG_PCI_P_HDR_SG_ERR vxge_mBIT(11) +#define VXGE_HW_SBECC_ERR_REG_PCI_P_DATA_SG_ERR vxge_mBIT(15) +#define VXGE_HW_SBECC_ERR_REG_PCI_NP_HDR_SG_ERR vxge_mBIT(19) +#define VXGE_HW_SBECC_ERR_REG_PCI_NP_DATA_SG_ERR vxge_mBIT(23) +/*0x03430*/ u64 sbecc_err_mask; +/*0x03438*/ u64 sbecc_err_alarm; +/*0x03440*/ u64 general_err_reg; +#define VXGE_HW_GENERAL_ERR_REG_PCI_DROPPED_ILLEGAL_CFG vxge_mBIT(3) +#define VXGE_HW_GENERAL_ERR_REG_PCI_ILLEGAL_MEM_MAP_PROG vxge_mBIT(7) +#define VXGE_HW_GENERAL_ERR_REG_PCI_LINK_RST_FSM_ERR vxge_mBIT(11) +#define VXGE_HW_GENERAL_ERR_REG_PCI_RX_ILLEGAL_TLP_VPLANE vxge_mBIT(15) +#define VXGE_HW_GENERAL_ERR_REG_PCI_TRAINING_RESET_DET vxge_mBIT(19) +#define VXGE_HW_GENERAL_ERR_REG_PCI_PCI_LINK_DOWN_DET vxge_mBIT(23) +#define VXGE_HW_GENERAL_ERR_REG_PCI_RESET_ACK_DLLP vxge_mBIT(27) +/*0x03448*/ u64 general_err_mask; +/*0x03450*/ u64 general_err_alarm; +/*0x03458*/ u64 srpcim_msg_reg; +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE0_RMSG_INT \ + vxge_mBIT(0) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE1_RMSG_INT \ + vxge_mBIT(1) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE2_RMSG_INT \ + vxge_mBIT(2) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE3_RMSG_INT \ + vxge_mBIT(3) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE4_RMSG_INT \ + vxge_mBIT(4) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE5_RMSG_INT \ + vxge_mBIT(5) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE6_RMSG_INT \ + vxge_mBIT(6) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE7_RMSG_INT \ + vxge_mBIT(7) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE8_RMSG_INT \ + vxge_mBIT(8) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE9_RMSG_INT \ + vxge_mBIT(9) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE10_RMSG_INT \ + vxge_mBIT(10) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE11_RMSG_INT \ + vxge_mBIT(11) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE12_RMSG_INT \ + vxge_mBIT(12) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE13_RMSG_INT \ + vxge_mBIT(13) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE14_RMSG_INT \ + vxge_mBIT(14) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE15_RMSG_INT \ + vxge_mBIT(15) +#define VXGE_HW_SRPCIM_MSG_REG_SWIF_SRPCIM_TO_MRPCIM_VPLANE16_RMSG_INT \ + vxge_mBIT(16) +/*0x03460*/ u64 srpcim_msg_mask; +/*0x03468*/ u64 srpcim_msg_alarm; + u8 unused03600[0x03600-0x03470]; + +/*0x03600*/ u64 gcmg1_int_status; +#define VXGE_HW_GCMG1_INT_STATUS_GSSCC_ERR_GSSCC_INT vxge_mBIT(0) +#define VXGE_HW_GCMG1_INT_STATUS_GSSC0_ERR0_GSSC0_0_INT vxge_mBIT(1) +#define VXGE_HW_GCMG1_INT_STATUS_GSSC0_ERR1_GSSC0_1_INT vxge_mBIT(2) +#define VXGE_HW_GCMG1_INT_STATUS_GSSC1_ERR0_GSSC1_0_INT vxge_mBIT(3) +#define VXGE_HW_GCMG1_INT_STATUS_GSSC1_ERR1_GSSC1_1_INT vxge_mBIT(4) +#define VXGE_HW_GCMG1_INT_STATUS_GSSC2_ERR0_GSSC2_0_INT vxge_mBIT(5) +#define VXGE_HW_GCMG1_INT_STATUS_GSSC2_ERR1_GSSC2_1_INT vxge_mBIT(6) +#define VXGE_HW_GCMG1_INT_STATUS_UQM_ERR_UQM_INT vxge_mBIT(7) +#define VXGE_HW_GCMG1_INT_STATUS_GQCC_ERR_GQCC_INT vxge_mBIT(8) +/*0x03608*/ u64 gcmg1_int_mask; + u8 unused03a00[0x03a00-0x03610]; + +/*0x03a00*/ u64 pcmg1_int_status; +#define VXGE_HW_PCMG1_INT_STATUS_PSSCC_ERR_PSSCC_INT vxge_mBIT(0) +#define VXGE_HW_PCMG1_INT_STATUS_PQCC_ERR_PQCC_INT vxge_mBIT(1) +#define VXGE_HW_PCMG1_INT_STATUS_PQCC_CQM_ERR_PQCC_CQM_INT vxge_mBIT(2) +#define VXGE_HW_PCMG1_INT_STATUS_PQCC_SQM_ERR_PQCC_SQM_INT vxge_mBIT(3) +/*0x03a08*/ u64 pcmg1_int_mask; + u8 unused04000[0x04000-0x03a10]; + +/*0x04000*/ u64 one_int_status; +#define VXGE_HW_ONE_INT_STATUS_RXPE_ERR_RXPE_INT vxge_mBIT(7) +#define VXGE_HW_ONE_INT_STATUS_TXPE_BCC_MEM_SG_ECC_ERR_TXPE_BCC_MEM_SG_ECC_INT \ + vxge_mBIT(13) +#define VXGE_HW_ONE_INT_STATUS_TXPE_BCC_MEM_DB_ECC_ERR_TXPE_BCC_MEM_DB_ECC_INT \ + vxge_mBIT(14) +#define VXGE_HW_ONE_INT_STATUS_TXPE_ERR_TXPE_INT vxge_mBIT(15) +#define VXGE_HW_ONE_INT_STATUS_DLM_ERR_DLM_INT vxge_mBIT(23) +#define VXGE_HW_ONE_INT_STATUS_PE_ERR_PE_INT vxge_mBIT(31) +#define VXGE_HW_ONE_INT_STATUS_RPE_ERR_RPE_INT vxge_mBIT(39) +#define VXGE_HW_ONE_INT_STATUS_RPE_FSM_ERR_RPE_FSM_INT vxge_mBIT(47) +#define VXGE_HW_ONE_INT_STATUS_OES_ERR_OES_INT vxge_mBIT(55) +/*0x04008*/ u64 one_int_mask; + u8 unused04818[0x04818-0x04010]; + +/*0x04818*/ u64 noa_wct_ctrl; +#define VXGE_HW_NOA_WCT_CTRL_VP_INT_NUM vxge_mBIT(0) +/*0x04820*/ u64 rc_cfg2; +#define VXGE_HW_RC_CFG2_BUFF1_SIZE(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_RC_CFG2_BUFF2_SIZE(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_RC_CFG2_BUFF3_SIZE(val) vxge_vBIT(val, 32, 16) +#define VXGE_HW_RC_CFG2_BUFF4_SIZE(val) vxge_vBIT(val, 48, 16) +/*0x04828*/ u64 rc_cfg3; +#define VXGE_HW_RC_CFG3_BUFF5_SIZE(val) vxge_vBIT(val, 0, 16) +/*0x04830*/ u64 rx_multi_cast_ctrl1; +#define VXGE_HW_RX_MULTI_CAST_CTRL1_ENABLE vxge_mBIT(7) +#define VXGE_HW_RX_MULTI_CAST_CTRL1_DELAY_COUNT(val) vxge_vBIT(val, 11, 5) +/*0x04838*/ u64 rxdm_dbg_rd; +#define VXGE_HW_RXDM_DBG_RD_ADDR(val) vxge_vBIT(val, 0, 12) +#define VXGE_HW_RXDM_DBG_RD_ENABLE vxge_mBIT(31) +/*0x04840*/ u64 rxdm_dbg_rd_data; +#define VXGE_HW_RXDM_DBG_RD_DATA_RMC_RXDM_DBG_RD_DATA(val) vxge_vBIT(val, 0, 64) +/*0x04848*/ u64 rqa_top_prty_for_vh[17]; +#define VXGE_HW_RQA_TOP_PRTY_FOR_VH_RQA_TOP_PRTY_FOR_VH(val) \ + vxge_vBIT(val, 59, 5) + u8 unused04900[0x04900-0x048d0]; + +/*0x04900*/ u64 tim_status; +#define VXGE_HW_TIM_STATUS_TIM_RESET_IN_PROGRESS vxge_mBIT(0) +/*0x04908*/ u64 tim_ecc_enable; +#define VXGE_HW_TIM_ECC_ENABLE_VBLS_N vxge_mBIT(7) +#define VXGE_HW_TIM_ECC_ENABLE_BMAP_N vxge_mBIT(15) +#define VXGE_HW_TIM_ECC_ENABLE_BMAP_MSG_N vxge_mBIT(23) +/*0x04910*/ u64 tim_bp_ctrl; +#define VXGE_HW_TIM_BP_CTRL_RD_XON vxge_mBIT(7) +#define VXGE_HW_TIM_BP_CTRL_WR_XON vxge_mBIT(15) +#define VXGE_HW_TIM_BP_CTRL_ROCRC_BYP vxge_mBIT(23) +/*0x04918*/ u64 tim_resource_assignment_vh[17]; +#define VXGE_HW_TIM_RESOURCE_ASSIGNMENT_VH_BMAP_ROOT(val) vxge_vBIT(val, 0, 32) +/*0x049a0*/ u64 tim_bmap_mapping_vp_err[17]; +#define VXGE_HW_TIM_BMAP_MAPPING_VP_ERR_TIM_DEST_VPATH(val) vxge_vBIT(val, 3, 5) + u8 unused04b00[0x04b00-0x04a28]; + +/*0x04b00*/ u64 gcmg2_int_status; +#define VXGE_HW_GCMG2_INT_STATUS_GXTMC_ERR_GXTMC_INT vxge_mBIT(7) +#define VXGE_HW_GCMG2_INT_STATUS_GCP_ERR_GCP_INT vxge_mBIT(15) +#define VXGE_HW_GCMG2_INT_STATUS_CMC_ERR_CMC_INT vxge_mBIT(23) +/*0x04b08*/ u64 gcmg2_int_mask; +/*0x04b10*/ u64 gxtmc_err_reg; +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_MEM_DB_ERR(val) vxge_vBIT(val, 0, 4) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_MEM_SG_ERR(val) vxge_vBIT(val, 4, 4) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMC_RD_DATA_DB_ERR vxge_mBIT(8) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_REQ_FIFO_ERR vxge_mBIT(9) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR vxge_mBIT(10) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR vxge_mBIT(11) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR vxge_mBIT(12) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_WRP_FIFO_ERR vxge_mBIT(13) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_WRP_ERR vxge_mBIT(14) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_RRP_FIFO_ERR vxge_mBIT(15) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_RRP_ERR vxge_mBIT(16) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_DATA_SM_ERR vxge_mBIT(17) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_CMC0_IF_ERR vxge_mBIT(18) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_ARB_SM_ERR vxge_mBIT(19) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_CFC_SM_ERR vxge_mBIT(20) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_OVERFLOW \ + vxge_mBIT(21) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_UNDERFLOW \ + vxge_mBIT(22) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_SM_ERR vxge_mBIT(23) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_OVERFLOW \ + vxge_mBIT(24) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_UNDERFLOW \ + vxge_mBIT(25) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_SM_ERR vxge_mBIT(26) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_SM_ERR vxge_mBIT(27) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_TAG_ERR vxge_mBIT(28) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_SM_ERR vxge_mBIT(29) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_FIFO_ERR vxge_mBIT(30) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_POP_ERR vxge_mBIT(31) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_CMI_OP_ERR vxge_mBIT(32) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFETCH_OP_ERR vxge_mBIT(33) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFIFO_ERR vxge_mBIT(34) +#define VXGE_HW_GXTMC_ERR_REG_XTMC_CMI_ARB_SM_ERR vxge_mBIT(35) +/*0x04b18*/ u64 gxtmc_err_mask; +/*0x04b20*/ u64 gxtmc_err_alarm; +/*0x04b28*/ u64 cmc_err_reg; +#define VXGE_HW_CMC_ERR_REG_CMC_CMC_SM_ERR vxge_mBIT(0) +/*0x04b30*/ u64 cmc_err_mask; +/*0x04b38*/ u64 cmc_err_alarm; +/*0x04b40*/ u64 gcp_err_reg; +#define VXGE_HW_GCP_ERR_REG_CP_H2L2CP_FIFO_ERR vxge_mBIT(0) +#define VXGE_HW_GCP_ERR_REG_CP_STC2CP_FIFO_ERR vxge_mBIT(1) +#define VXGE_HW_GCP_ERR_REG_CP_STE2CP_FIFO_ERR vxge_mBIT(2) +#define VXGE_HW_GCP_ERR_REG_CP_TTE2CP_FIFO_ERR vxge_mBIT(3) +/*0x04b48*/ u64 gcp_err_mask; +/*0x04b50*/ u64 gcp_err_alarm; + u8 unused04f00[0x04f00-0x04b58]; + +/*0x04f00*/ u64 pcmg2_int_status; +#define VXGE_HW_PCMG2_INT_STATUS_PXTMC_ERR_PXTMC_INT vxge_mBIT(7) +#define VXGE_HW_PCMG2_INT_STATUS_CP_EXC_CP_XT_EXC_INT vxge_mBIT(15) +#define VXGE_HW_PCMG2_INT_STATUS_CP_ERR_CP_ERR_INT vxge_mBIT(23) +/*0x04f08*/ u64 pcmg2_int_mask; +/*0x04f10*/ u64 pxtmc_err_reg; +#define VXGE_HW_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_DB_ERR(val) vxge_vBIT(val, 0, 2) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_FIFO_ERR vxge_mBIT(2) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_PRSP_FIFO_ERR vxge_mBIT(3) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_WRSP_FIFO_ERR vxge_mBIT(4) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_FIFO_ERR vxge_mBIT(5) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_PRSP_FIFO_ERR vxge_mBIT(6) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_WRSP_FIFO_ERR vxge_mBIT(7) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_FIFO_ERR vxge_mBIT(8) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_PRSP_FIFO_ERR vxge_mBIT(9) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_WRSP_FIFO_ERR vxge_mBIT(10) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_REQ_FIFO_ERR vxge_mBIT(11) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR vxge_mBIT(12) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR vxge_mBIT(13) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR vxge_mBIT(14) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_SHADOW_ERR vxge_mBIT(15) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_RSP_SHADOW_ERR vxge_mBIT(16) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_SHADOW_ERR vxge_mBIT(17) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_RSP_SHADOW_ERR vxge_mBIT(18) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_SHADOW_ERR vxge_mBIT(19) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_RSP_SHADOW_ERR vxge_mBIT(20) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_XIL_SHADOW_ERR vxge_mBIT(21) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_ARB_SHADOW_ERR vxge_mBIT(22) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_RAM_SHADOW_ERR vxge_mBIT(23) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMW_SHADOW_ERR vxge_mBIT(24) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMR_SHADOW_ERR vxge_mBIT(25) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_REQ_FSM_ERR vxge_mBIT(26) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MPT_RSP_FSM_ERR vxge_mBIT(27) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_REQ_FSM_ERR vxge_mBIT(28) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UPT_RSP_FSM_ERR vxge_mBIT(29) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_REQ_FSM_ERR vxge_mBIT(30) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CPT_RSP_FSM_ERR vxge_mBIT(31) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_XIL_FSM_ERR vxge_mBIT(32) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_ARB_FSM_ERR vxge_mBIT(33) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMW_FSM_ERR vxge_mBIT(34) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CMR_FSM_ERR vxge_mBIT(35) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_ERR vxge_mBIT(36) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_ERR vxge_mBIT(37) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_ERR vxge_mBIT(38) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_ERR vxge_mBIT(39) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_ERR vxge_mBIT(40) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_ERR vxge_mBIT(41) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_ERR vxge_mBIT(42) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_ERR vxge_mBIT(43) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_ERR vxge_mBIT(44) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_INFO_ERR vxge_mBIT(45) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_INFO_ERR vxge_mBIT(46) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_INFO_ERR vxge_mBIT(47) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_INFO_ERR vxge_mBIT(48) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_INFO_ERR vxge_mBIT(49) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_INFO_ERR vxge_mBIT(50) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_INFO_ERR vxge_mBIT(51) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_INFO_ERR vxge_mBIT(52) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_INFO_ERR vxge_mBIT(53) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_SG_ERR(val) vxge_vBIT(val, 54, 2) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CP2BDT_DFIFO_PUSH_ERR vxge_mBIT(56) +#define VXGE_HW_PXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_PUSH_ERR vxge_mBIT(57) +/*0x04f18*/ u64 pxtmc_err_mask; +/*0x04f20*/ u64 pxtmc_err_alarm; +/*0x04f28*/ u64 cp_err_reg; +#define VXGE_HW_CP_ERR_REG_CP_CP_DCACHE_SG_ERR(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_CP_ERR_REG_CP_CP_ICACHE_SG_ERR(val) vxge_vBIT(val, 8, 2) +#define VXGE_HW_CP_ERR_REG_CP_CP_DTAG_SG_ERR vxge_mBIT(10) +#define VXGE_HW_CP_ERR_REG_CP_CP_ITAG_SG_ERR vxge_mBIT(11) +#define VXGE_HW_CP_ERR_REG_CP_CP_TRACE_SG_ERR vxge_mBIT(12) +#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_SG_ERR vxge_mBIT(13) +#define VXGE_HW_CP_ERR_REG_CP_MP2CP_SG_ERR vxge_mBIT(14) +#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_SG_ERR vxge_mBIT(15) +#define VXGE_HW_CP_ERR_REG_CP_STC2CP_SG_ERR(val) vxge_vBIT(val, 16, 2) +#define VXGE_HW_CP_ERR_REG_CP_CP_DCACHE_DB_ERR(val) vxge_vBIT(val, 24, 8) +#define VXGE_HW_CP_ERR_REG_CP_CP_ICACHE_DB_ERR(val) vxge_vBIT(val, 32, 2) +#define VXGE_HW_CP_ERR_REG_CP_CP_DTAG_DB_ERR vxge_mBIT(34) +#define VXGE_HW_CP_ERR_REG_CP_CP_ITAG_DB_ERR vxge_mBIT(35) +#define VXGE_HW_CP_ERR_REG_CP_CP_TRACE_DB_ERR vxge_mBIT(36) +#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_DB_ERR vxge_mBIT(37) +#define VXGE_HW_CP_ERR_REG_CP_MP2CP_DB_ERR vxge_mBIT(38) +#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_DB_ERR vxge_mBIT(39) +#define VXGE_HW_CP_ERR_REG_CP_STC2CP_DB_ERR(val) vxge_vBIT(val, 40, 2) +#define VXGE_HW_CP_ERR_REG_CP_H2L2CP_FIFO_ERR vxge_mBIT(48) +#define VXGE_HW_CP_ERR_REG_CP_STC2CP_FIFO_ERR vxge_mBIT(49) +#define VXGE_HW_CP_ERR_REG_CP_STE2CP_FIFO_ERR vxge_mBIT(50) +#define VXGE_HW_CP_ERR_REG_CP_TTE2CP_FIFO_ERR vxge_mBIT(51) +#define VXGE_HW_CP_ERR_REG_CP_SWIF2CP_FIFO_ERR vxge_mBIT(52) +#define VXGE_HW_CP_ERR_REG_CP_CP2DMA_FIFO_ERR vxge_mBIT(53) +#define VXGE_HW_CP_ERR_REG_CP_DAM2CP_FIFO_ERR vxge_mBIT(54) +#define VXGE_HW_CP_ERR_REG_CP_MP2CP_FIFO_ERR vxge_mBIT(55) +#define VXGE_HW_CP_ERR_REG_CP_QCC2CP_FIFO_ERR vxge_mBIT(56) +#define VXGE_HW_CP_ERR_REG_CP_DMA2CP_FIFO_ERR vxge_mBIT(57) +#define VXGE_HW_CP_ERR_REG_CP_CP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(60) +#define VXGE_HW_CP_ERR_REG_CP_CP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(61) +#define VXGE_HW_CP_ERR_REG_CP_DMA_RD_SHADOW_ERR vxge_mBIT(62) +#define VXGE_HW_CP_ERR_REG_CP_PIFT_CREDIT_ERR vxge_mBIT(63) +/*0x04f30*/ u64 cp_err_mask; +/*0x04f38*/ u64 cp_err_alarm; + u8 unused04fe8[0x04f50-0x04f40]; + +/*0x04f50*/ u64 cp_exc_reg; +#define VXGE_HW_CP_EXC_REG_CP_CP_CAUSE_INFO_INT vxge_mBIT(47) +#define VXGE_HW_CP_EXC_REG_CP_CP_CAUSE_CRIT_INT vxge_mBIT(55) +#define VXGE_HW_CP_EXC_REG_CP_CP_SERR vxge_mBIT(63) +/*0x04f58*/ u64 cp_exc_mask; +/*0x04f60*/ u64 cp_exc_alarm; +/*0x04f68*/ u64 cp_exc_cause; +#define VXGE_HW_CP_EXC_CAUSE_CP_CP_CAUSE(val) vxge_vBIT(val, 32, 32) + u8 unused05200[0x05200-0x04f70]; + +/*0x05200*/ u64 msg_int_status; +#define VXGE_HW_MSG_INT_STATUS_TIM_ERR_TIM_INT vxge_mBIT(7) +#define VXGE_HW_MSG_INT_STATUS_MSG_EXC_MSG_XT_EXC_INT vxge_mBIT(60) +#define VXGE_HW_MSG_INT_STATUS_MSG_ERR3_MSG_ERR3_INT vxge_mBIT(61) +#define VXGE_HW_MSG_INT_STATUS_MSG_ERR2_MSG_ERR2_INT vxge_mBIT(62) +#define VXGE_HW_MSG_INT_STATUS_MSG_ERR_MSG_ERR_INT vxge_mBIT(63) +/*0x05208*/ u64 msg_int_mask; +/*0x05210*/ u64 tim_err_reg; +#define VXGE_HW_TIM_ERR_REG_TIM_VBLS_SG_ERR vxge_mBIT(4) +#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PA_SG_ERR vxge_mBIT(5) +#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PB_SG_ERR vxge_mBIT(6) +#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_SG_ERR vxge_mBIT(7) +#define VXGE_HW_TIM_ERR_REG_TIM_VBLS_DB_ERR vxge_mBIT(12) +#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PA_DB_ERR vxge_mBIT(13) +#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_PB_DB_ERR vxge_mBIT(14) +#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_DB_ERR vxge_mBIT(15) +#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MEM_CNTRL_SM_ERR vxge_mBIT(18) +#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MSG_MEM_CNTRL_SM_ERR vxge_mBIT(19) +#define VXGE_HW_TIM_ERR_REG_TIM_MPIF_PCIWR_ERR vxge_mBIT(20) +#define VXGE_HW_TIM_ERR_REG_TIM_ROCRC_BMAP_UPDT_FIFO_ERR vxge_mBIT(22) +#define VXGE_HW_TIM_ERR_REG_TIM_CREATE_BMAPMSG_FIFO_ERR vxge_mBIT(23) +#define VXGE_HW_TIM_ERR_REG_TIM_ROCRCIF_MISMATCH vxge_mBIT(46) +#define VXGE_HW_TIM_ERR_REG_TIM_BMAP_MAPPING_VP_ERR(n) vxge_mBIT(n) +/*0x05218*/ u64 tim_err_mask; +/*0x05220*/ u64 tim_err_alarm; +/*0x05228*/ u64 msg_err_reg; +#define VXGE_HW_MSG_ERR_REG_UP_UXP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(0) +#define VXGE_HW_MSG_ERR_REG_MP_MXP_WAKE_FSM_INTEGRITY_ERR vxge_mBIT(1) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_DMA_READ_CMD_FSM_INTEGRITY_ERR \ + vxge_mBIT(2) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_DMA_RESP_FSM_INTEGRITY_ERR \ + vxge_mBIT(3) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_OWN_FSM_INTEGRITY_ERR vxge_mBIT(4) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_PDA_ACC_FSM_INTEGRITY_ERR vxge_mBIT(5) +#define VXGE_HW_MSG_ERR_REG_MP_MXP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(6) +#define VXGE_HW_MSG_ERR_REG_UP_UXP_PMON_FSM_INTEGRITY_ERR vxge_mBIT(7) +#define VXGE_HW_MSG_ERR_REG_UP_UXP_DTAG_SG_ERR vxge_mBIT(8) +#define VXGE_HW_MSG_ERR_REG_UP_UXP_ITAG_SG_ERR vxge_mBIT(10) +#define VXGE_HW_MSG_ERR_REG_MP_MXP_DTAG_SG_ERR vxge_mBIT(12) +#define VXGE_HW_MSG_ERR_REG_MP_MXP_ITAG_SG_ERR vxge_mBIT(14) +#define VXGE_HW_MSG_ERR_REG_UP_UXP_TRACE_SG_ERR vxge_mBIT(16) +#define VXGE_HW_MSG_ERR_REG_MP_MXP_TRACE_SG_ERR vxge_mBIT(17) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CMG2MSG_SG_ERR vxge_mBIT(18) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_TXPE2MSG_SG_ERR vxge_mBIT(19) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RXPE2MSG_SG_ERR vxge_mBIT(20) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RPE2MSG_SG_ERR vxge_mBIT(21) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_SG_ERR vxge_mBIT(26) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_PF_SG_ERR vxge_mBIT(27) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_ECC_SG_ERR vxge_mBIT(29) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_SG_ERR vxge_mBIT(31) +#define VXGE_HW_MSG_ERR_REG_MSG_XFMDQRY_FSM_INTEGRITY_ERR vxge_mBIT(33) +#define VXGE_HW_MSG_ERR_REG_MSG_FRMQRY_FSM_INTEGRITY_ERR vxge_mBIT(34) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_WRITE_FSM_INTEGRITY_ERR vxge_mBIT(35) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_BWR_PF_FSM_INTEGRITY_ERR \ + vxge_mBIT(36) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_REG_RESP_FIFO_ERR vxge_mBIT(38) +#define VXGE_HW_MSG_ERR_REG_UP_UXP_DTAG_DB_ERR vxge_mBIT(39) +#define VXGE_HW_MSG_ERR_REG_UP_UXP_ITAG_DB_ERR vxge_mBIT(41) +#define VXGE_HW_MSG_ERR_REG_MP_MXP_DTAG_DB_ERR vxge_mBIT(43) +#define VXGE_HW_MSG_ERR_REG_MP_MXP_ITAG_DB_ERR vxge_mBIT(45) +#define VXGE_HW_MSG_ERR_REG_UP_UXP_TRACE_DB_ERR vxge_mBIT(47) +#define VXGE_HW_MSG_ERR_REG_MP_MXP_TRACE_DB_ERR vxge_mBIT(48) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CMG2MSG_DB_ERR vxge_mBIT(49) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_TXPE2MSG_DB_ERR vxge_mBIT(50) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RXPE2MSG_DB_ERR vxge_mBIT(51) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_RPE2MSG_DB_ERR vxge_mBIT(52) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_REG_READ_FIFO_ERR vxge_mBIT(53) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_MXP2UXP_FIFO_ERR vxge_mBIT(54) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_KDFC_SIF_FIFO_ERR vxge_mBIT(55) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_CXP2SWIF_FIFO_ERR vxge_mBIT(56) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UMQ_DB_ERR vxge_mBIT(57) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_PF_DB_ERR vxge_mBIT(58) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_BWR_SIF_FIFO_ERR vxge_mBIT(59) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMQ_ECC_DB_ERR vxge_mBIT(60) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_READ_FIFO_ERR vxge_mBIT(61) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_DB_ERR vxge_mBIT(62) +#define VXGE_HW_MSG_ERR_REG_MSG_QUE_UXP2MXP_FIFO_ERR vxge_mBIT(63) +/*0x05230*/ u64 msg_err_mask; +/*0x05238*/ u64 msg_err_alarm; + u8 unused05340[0x05340-0x05240]; + +/*0x05340*/ u64 msg_exc_reg; +#define VXGE_HW_MSG_EXC_REG_MP_MXP_CAUSE_INFO_INT vxge_mBIT(50) +#define VXGE_HW_MSG_EXC_REG_MP_MXP_CAUSE_CRIT_INT vxge_mBIT(51) +#define VXGE_HW_MSG_EXC_REG_UP_UXP_CAUSE_INFO_INT vxge_mBIT(54) +#define VXGE_HW_MSG_EXC_REG_UP_UXP_CAUSE_CRIT_INT vxge_mBIT(55) +#define VXGE_HW_MSG_EXC_REG_MP_MXP_SERR vxge_mBIT(62) +#define VXGE_HW_MSG_EXC_REG_UP_UXP_SERR vxge_mBIT(63) +/*0x05348*/ u64 msg_exc_mask; +/*0x05350*/ u64 msg_exc_alarm; +/*0x05358*/ u64 msg_exc_cause; +#define VXGE_HW_MSG_EXC_CAUSE_MP_MXP(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_MSG_EXC_CAUSE_UP_UXP(val) vxge_vBIT(val, 32, 32) + u8 unused05368[0x05380-0x05360]; + +/*0x05380*/ u64 msg_err2_reg; +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_CMG2MSG_DISPATCH_FSM_INTEGRITY_ERR \ + vxge_mBIT(0) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_DMQ_DISPATCH_FSM_INTEGRITY_ERR \ + vxge_mBIT(1) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIF_DISPATCH_FSM_INTEGRITY_ERR \ + vxge_mBIT(2) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_PIC_WRITE_FSM_INTEGRITY_ERR \ + vxge_mBIT(3) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIFREG_FSM_INTEGRITY_ERR vxge_mBIT(4) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TIM_WRITE_FSM_INTEGRITY_ERR \ + vxge_mBIT(5) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_UMQ_TA_FSM_INTEGRITY_ERR vxge_mBIT(6) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TXPE_TA_FSM_INTEGRITY_ERR vxge_mBIT(7) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RXPE_TA_FSM_INTEGRITY_ERR vxge_mBIT(8) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_SWIF_TA_FSM_INTEGRITY_ERR vxge_mBIT(9) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_DMA_TA_FSM_INTEGRITY_ERR vxge_mBIT(10) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_CP_TA_FSM_INTEGRITY_ERR vxge_mBIT(11) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA16_FSM_INTEGRITY_ERR \ + vxge_mBIT(12) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA15_FSM_INTEGRITY_ERR \ + vxge_mBIT(13) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA14_FSM_INTEGRITY_ERR \ + vxge_mBIT(14) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA13_FSM_INTEGRITY_ERR \ + vxge_mBIT(15) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA12_FSM_INTEGRITY_ERR \ + vxge_mBIT(16) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA11_FSM_INTEGRITY_ERR \ + vxge_mBIT(17) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA10_FSM_INTEGRITY_ERR \ + vxge_mBIT(18) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA9_FSM_INTEGRITY_ERR \ + vxge_mBIT(19) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA8_FSM_INTEGRITY_ERR \ + vxge_mBIT(20) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA7_FSM_INTEGRITY_ERR \ + vxge_mBIT(21) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA6_FSM_INTEGRITY_ERR \ + vxge_mBIT(22) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA5_FSM_INTEGRITY_ERR \ + vxge_mBIT(23) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA4_FSM_INTEGRITY_ERR \ + vxge_mBIT(24) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA3_FSM_INTEGRITY_ERR \ + vxge_mBIT(25) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA2_FSM_INTEGRITY_ERR \ + vxge_mBIT(26) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA1_FSM_INTEGRITY_ERR \ + vxge_mBIT(27) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA0_FSM_INTEGRITY_ERR \ + vxge_mBIT(28) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_FBMC_OWN_FSM_INTEGRITY_ERR vxge_mBIT(29) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_TXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \ + vxge_mBIT(30) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \ + vxge_mBIT(31) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_RPE2MSG_DISPATCH_FSM_INTEGRITY_ERR \ + vxge_mBIT(32) +#define VXGE_HW_MSG_ERR2_REG_MP_MP_PIFT_IF_CREDIT_CNT_ERR vxge_mBIT(33) +#define VXGE_HW_MSG_ERR2_REG_UP_UP_PIFT_IF_CREDIT_CNT_ERR vxge_mBIT(34) +#define VXGE_HW_MSG_ERR2_REG_MSG_QUE_UMQ2PIC_CMD_FIFO_ERR vxge_mBIT(62) +#define VXGE_HW_MSG_ERR2_REG_TIM_TIM2MSG_CMD_FIFO_ERR vxge_mBIT(63) +/*0x05388*/ u64 msg_err2_mask; +/*0x05390*/ u64 msg_err2_alarm; +/*0x05398*/ u64 msg_err3_reg; +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR0 vxge_mBIT(0) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR1 vxge_mBIT(1) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR2 vxge_mBIT(2) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR3 vxge_mBIT(3) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR4 vxge_mBIT(4) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR5 vxge_mBIT(5) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR6 vxge_mBIT(6) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_SG_ERR7 vxge_mBIT(7) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_SG_ERR0 vxge_mBIT(8) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_SG_ERR1 vxge_mBIT(9) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR0 vxge_mBIT(16) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR1 vxge_mBIT(17) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR2 vxge_mBIT(18) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR3 vxge_mBIT(19) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR4 vxge_mBIT(20) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR5 vxge_mBIT(21) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR6 vxge_mBIT(22) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_SG_ERR7 vxge_mBIT(23) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_SG_ERR0 vxge_mBIT(24) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_SG_ERR1 vxge_mBIT(25) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR0 vxge_mBIT(32) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR1 vxge_mBIT(33) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR2 vxge_mBIT(34) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR3 vxge_mBIT(35) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR4 vxge_mBIT(36) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR5 vxge_mBIT(37) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR6 vxge_mBIT(38) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR7 vxge_mBIT(39) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR0 vxge_mBIT(40) +#define VXGE_HW_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR1 vxge_mBIT(41) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR0 vxge_mBIT(48) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR1 vxge_mBIT(49) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR2 vxge_mBIT(50) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR3 vxge_mBIT(51) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR4 vxge_mBIT(52) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR5 vxge_mBIT(53) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR6 vxge_mBIT(54) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR7 vxge_mBIT(55) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR0 vxge_mBIT(56) +#define VXGE_HW_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR1 vxge_mBIT(57) +/*0x053a0*/ u64 msg_err3_mask; +/*0x053a8*/ u64 msg_err3_alarm; + u8 unused05600[0x05600-0x053b0]; + +/*0x05600*/ u64 fau_gen_err_reg; +#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT0_PERMANENT_STOP vxge_mBIT(3) +#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT1_PERMANENT_STOP vxge_mBIT(7) +#define VXGE_HW_FAU_GEN_ERR_REG_FMPF_PORT2_PERMANENT_STOP vxge_mBIT(11) +#define VXGE_HW_FAU_GEN_ERR_REG_FALR_AUTO_LRO_NOTIFICATION vxge_mBIT(15) +/*0x05608*/ u64 fau_gen_err_mask; +/*0x05610*/ u64 fau_gen_err_alarm; +/*0x05618*/ u64 fau_ecc_err_reg; +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_SG_ERR vxge_mBIT(0) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_DB_ERR vxge_mBIT(1) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_SG_ERR(val) \ + vxge_vBIT(val, 2, 2) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_DB_ERR(val) \ + vxge_vBIT(val, 4, 2) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_SG_ERR vxge_mBIT(6) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_DB_ERR vxge_mBIT(7) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_SG_ERR(val) \ + vxge_vBIT(val, 8, 2) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_DB_ERR(val) \ + vxge_vBIT(val, 10, 2) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_SG_ERR vxge_mBIT(12) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_DB_ERR vxge_mBIT(13) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_SG_ERR(val) \ + vxge_vBIT(val, 14, 2) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_DB_ERR(val) \ + vxge_vBIT(val, 16, 2) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_SG_ERR(val) \ + vxge_vBIT(val, 18, 2) +#define VXGE_HW_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_DB_ERR(val) \ + vxge_vBIT(val, 20, 2) +#define VXGE_HW_FAU_ECC_ERR_REG_FAUJ_FAU_FSM_ERR vxge_mBIT(31) +/*0x05620*/ u64 fau_ecc_err_mask; +/*0x05628*/ u64 fau_ecc_err_alarm; + u8 unused05658[0x05658-0x05630]; +/*0x05658*/ u64 fau_pa_cfg; +#define VXGE_HW_FAU_PA_CFG_REPL_L4_COMP_CSUM vxge_mBIT(3) +#define VXGE_HW_FAU_PA_CFG_REPL_L3_INCL_CF vxge_mBIT(7) +#define VXGE_HW_FAU_PA_CFG_REPL_L3_COMP_CSUM vxge_mBIT(11) + u8 unused05668[0x05668-0x05660]; + +/*0x05668*/ u64 dbg_stats_fau_rx_path; +#define VXGE_HW_DBG_STATS_FAU_RX_PATH_RX_PERMITTED_FRMS(val) \ + vxge_vBIT(val, 32, 32) + u8 unused056c0[0x056c0-0x05670]; + +/*0x056c0*/ u64 fau_lag_cfg; +#define VXGE_HW_FAU_LAG_CFG_COLL_ALG(val) vxge_vBIT(val, 2, 2) +#define VXGE_HW_FAU_LAG_CFG_INCR_RX_AGGR_STATS vxge_mBIT(7) + u8 unused05800[0x05800-0x056c8]; + +/*0x05800*/ u64 tpa_int_status; +#define VXGE_HW_TPA_INT_STATUS_ORP_ERR_ORP_INT vxge_mBIT(15) +#define VXGE_HW_TPA_INT_STATUS_PTM_ALARM_PTM_INT vxge_mBIT(23) +#define VXGE_HW_TPA_INT_STATUS_TPA_ERROR_TPA_INT vxge_mBIT(31) +/*0x05808*/ u64 tpa_int_mask; +/*0x05810*/ u64 orp_err_reg; +#define VXGE_HW_ORP_ERR_REG_ORP_FIFO_SG_ERR vxge_mBIT(3) +#define VXGE_HW_ORP_ERR_REG_ORP_FIFO_DB_ERR vxge_mBIT(7) +#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_FIFO_UFLOW_ERR vxge_mBIT(11) +#define VXGE_HW_ORP_ERR_REG_ORP_FRM_FIFO_UFLOW_ERR vxge_mBIT(15) +#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_RCV_FSM_ERR vxge_mBIT(19) +#define VXGE_HW_ORP_ERR_REG_ORP_OUTREAD_FSM_ERR vxge_mBIT(23) +#define VXGE_HW_ORP_ERR_REG_ORP_OUTQEM_FSM_ERR vxge_mBIT(27) +#define VXGE_HW_ORP_ERR_REG_ORP_XFMD_RCV_SHADOW_ERR vxge_mBIT(31) +#define VXGE_HW_ORP_ERR_REG_ORP_OUTREAD_SHADOW_ERR vxge_mBIT(35) +#define VXGE_HW_ORP_ERR_REG_ORP_OUTQEM_SHADOW_ERR vxge_mBIT(39) +#define VXGE_HW_ORP_ERR_REG_ORP_OUTFRM_SHADOW_ERR vxge_mBIT(43) +#define VXGE_HW_ORP_ERR_REG_ORP_OPTPRS_SHADOW_ERR vxge_mBIT(47) +/*0x05818*/ u64 orp_err_mask; +/*0x05820*/ u64 orp_err_alarm; +/*0x05828*/ u64 ptm_alarm_reg; +#define VXGE_HW_PTM_ALARM_REG_PTM_RDCTRL_SYNC_ERR vxge_mBIT(3) +#define VXGE_HW_PTM_ALARM_REG_PTM_RDCTRL_FIFO_ERR vxge_mBIT(7) +#define VXGE_HW_PTM_ALARM_REG_XFMD_RD_FIFO_ERR vxge_mBIT(11) +#define VXGE_HW_PTM_ALARM_REG_WDE2MSR_WR_FIFO_ERR vxge_mBIT(15) +#define VXGE_HW_PTM_ALARM_REG_PTM_FRMM_ECC_DB_ERR(val) vxge_vBIT(val, 18, 2) +#define VXGE_HW_PTM_ALARM_REG_PTM_FRMM_ECC_SG_ERR(val) vxge_vBIT(val, 22, 2) +/*0x05830*/ u64 ptm_alarm_mask; +/*0x05838*/ u64 ptm_alarm_alarm; +/*0x05840*/ u64 tpa_error_reg; +#define VXGE_HW_TPA_ERROR_REG_TPA_FSM_ERR_ALARM vxge_mBIT(3) +#define VXGE_HW_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_DB_ERR vxge_mBIT(7) +#define VXGE_HW_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_SG_ERR vxge_mBIT(11) +/*0x05848*/ u64 tpa_error_mask; +/*0x05850*/ u64 tpa_error_alarm; +/*0x05858*/ u64 tpa_global_cfg; +#define VXGE_HW_TPA_GLOBAL_CFG_SUPPORT_SNAP_AB_N vxge_mBIT(7) +#define VXGE_HW_TPA_GLOBAL_CFG_ECC_ENABLE_N vxge_mBIT(35) + u8 unused05868[0x05870-0x05860]; + +/*0x05870*/ u64 ptm_ecc_cfg; +#define VXGE_HW_PTM_ECC_CFG_PTM_FRMM_ECC_EN_N vxge_mBIT(3) +/*0x05878*/ u64 ptm_phase_cfg; +#define VXGE_HW_PTM_PHASE_CFG_FRMM_WR_PHASE_EN vxge_mBIT(3) +#define VXGE_HW_PTM_PHASE_CFG_FRMM_RD_PHASE_EN vxge_mBIT(7) + u8 unused05898[0x05898-0x05880]; + +/*0x05898*/ u64 dbg_stats_tpa_tx_path; +#define VXGE_HW_DBG_STATS_TPA_TX_PATH_TX_PERMITTED_FRMS(val) \ + vxge_vBIT(val, 32, 32) + u8 unused05900[0x05900-0x058a0]; + +/*0x05900*/ u64 tmac_int_status; +#define VXGE_HW_TMAC_INT_STATUS_TXMAC_GEN_ERR_TXMAC_GEN_INT vxge_mBIT(3) +#define VXGE_HW_TMAC_INT_STATUS_TXMAC_ECC_ERR_TXMAC_ECC_INT vxge_mBIT(7) +/*0x05908*/ u64 tmac_int_mask; +/*0x05910*/ u64 txmac_gen_err_reg; +#define VXGE_HW_TXMAC_GEN_ERR_REG_TMACJ_PERMANENT_STOP vxge_mBIT(3) +#define VXGE_HW_TXMAC_GEN_ERR_REG_TMACJ_NO_VALID_VSPORT vxge_mBIT(7) +/*0x05918*/ u64 txmac_gen_err_mask; +/*0x05920*/ u64 txmac_gen_err_alarm; +/*0x05928*/ u64 txmac_ecc_err_reg; +#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_SG_ERR vxge_mBIT(3) +#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_DB_ERR vxge_mBIT(7) +#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_SG_ERR vxge_mBIT(11) +#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_DB_ERR vxge_mBIT(15) +#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_SG_ERR vxge_mBIT(19) +#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_DB_ERR vxge_mBIT(23) +#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT0_FSM_ERR vxge_mBIT(27) +#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT1_FSM_ERR vxge_mBIT(31) +#define VXGE_HW_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT2_FSM_ERR vxge_mBIT(35) +#define VXGE_HW_TXMAC_ECC_ERR_REG_TMACJ_TMACJ_FSM_ERR vxge_mBIT(39) +/*0x05930*/ u64 txmac_ecc_err_mask; +/*0x05938*/ u64 txmac_ecc_err_alarm; + u8 unused05978[0x05978-0x05940]; + +/*0x05978*/ u64 dbg_stat_tx_any_frms; +#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT0_TX_ANY_FRMS(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT1_TX_ANY_FRMS(val) vxge_vBIT(val, 8, 8) +#define VXGE_HW_DBG_STAT_TX_ANY_FRMS_PORT2_TX_ANY_FRMS(val) \ + vxge_vBIT(val, 16, 8) + u8 unused059a0[0x059a0-0x05980]; + +/*0x059a0*/ u64 txmac_link_util_port[3]; +#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_UTILIZATION(val) \ + vxge_vBIT(val, 1, 7) +#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_UTIL_CFG(val) vxge_vBIT(val, 8, 4) +#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_FRAC_UTIL(val) \ + vxge_vBIT(val, 12, 4) +#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_PKT_WEIGHT(val) vxge_vBIT(val, 16, 4) +#define VXGE_HW_TXMAC_LINK_UTIL_PORT_TMAC_TMAC_SCALE_FACTOR vxge_mBIT(23) +/*0x059b8*/ u64 txmac_cfg0_port[3]; +#define VXGE_HW_TXMAC_CFG0_PORT_TMAC_EN vxge_mBIT(3) +#define VXGE_HW_TXMAC_CFG0_PORT_APPEND_PAD vxge_mBIT(7) +#define VXGE_HW_TXMAC_CFG0_PORT_PAD_BYTE(val) vxge_vBIT(val, 8, 8) +/*0x059d0*/ u64 txmac_cfg1_port[3]; +#define VXGE_HW_TXMAC_CFG1_PORT_AVG_IPG(val) vxge_vBIT(val, 40, 8) +/*0x059e8*/ u64 txmac_status_port[3]; +#define VXGE_HW_TXMAC_STATUS_PORT_TMAC_TX_FRM_SENT vxge_mBIT(3) + u8 unused05a20[0x05a20-0x05a00]; + +/*0x05a20*/ u64 lag_distrib_dest; +#define VXGE_HW_LAG_DISTRIB_DEST_MAP_VPATH(n) vxge_mBIT(n) +/*0x05a28*/ u64 lag_marker_cfg; +#define VXGE_HW_LAG_MARKER_CFG_GEN_RCVR_EN vxge_mBIT(3) +#define VXGE_HW_LAG_MARKER_CFG_RESP_EN vxge_mBIT(7) +#define VXGE_HW_LAG_MARKER_CFG_RESP_TIMEOUT(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_LAG_MARKER_CFG_SLOW_PROTO_MRKR_MIN_INTERVAL(val) \ + vxge_vBIT(val, 32, 16) +#define VXGE_HW_LAG_MARKER_CFG_THROTTLE_MRKR_RESP vxge_mBIT(51) +/*0x05a30*/ u64 lag_tx_cfg; +#define VXGE_HW_LAG_TX_CFG_INCR_TX_AGGR_STATS vxge_mBIT(3) +#define VXGE_HW_LAG_TX_CFG_DISTRIB_ALG_SEL(val) vxge_vBIT(val, 6, 2) +#define VXGE_HW_LAG_TX_CFG_DISTRIB_REMAP_IF_FAIL vxge_mBIT(11) +#define VXGE_HW_LAG_TX_CFG_COLL_MAX_DELAY(val) vxge_vBIT(val, 16, 16) +/*0x05a38*/ u64 lag_tx_status; +#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_EMPTIED_LINK(val) \ + vxge_vBIT(val, 0, 8) +#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_SLOW_PROTO_MRKR(val) \ + vxge_vBIT(val, 8, 8) +#define VXGE_HW_LAG_TX_STATUS_TLAG_TIMER_VAL_SLOW_PROTO_MRKRRESP(val) \ + vxge_vBIT(val, 16, 8) + u8 unused05d48[0x05d48-0x05a40]; + +/*0x05d48*/ u64 srpcim_to_mrpcim_vplane_rmsg[17]; +#define \ +VXGE_HAL_SRPCIM_TO_MRPCIM_VPLANE_RMSG_SWIF_SRPCIM_TO_MRPCIM_VPLANE_RMSG(val)\ + vxge_vBIT(val, 0, 64) + u8 unused06420[0x06420-0x05dd0]; + +/*0x06420*/ u64 mrpcim_to_srpcim_vplane_wmsg[17]; +#define VXGE_HW_MRPCIM_TO_SRPCIM_VPLANE_WMSG_MRPCIM_TO_SRPCIM_VPLANE_WMSG(val) \ + vxge_vBIT(val, 0, 64) +/*0x064a8*/ u64 mrpcim_to_srpcim_vplane_wmsg_trig[17]; + +/*0x06530*/ u64 debug_stats0; +#define VXGE_HW_DEBUG_STATS0_RSTDROP_MSG(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_DEBUG_STATS0_RSTDROP_CPL(val) vxge_vBIT(val, 32, 32) +/*0x06538*/ u64 debug_stats1; +#define VXGE_HW_DEBUG_STATS1_RSTDROP_CLIENT0(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_DEBUG_STATS1_RSTDROP_CLIENT1(val) vxge_vBIT(val, 32, 32) +/*0x06540*/ u64 debug_stats2; +#define VXGE_HW_DEBUG_STATS2_RSTDROP_CLIENT2(val) vxge_vBIT(val, 0, 32) +/*0x06548*/ u64 debug_stats3_vplane[17]; +#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_PH(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_NPH(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_DEBUG_STATS3_VPLANE_DEPL_CPLH(val) vxge_vBIT(val, 32, 16) +/*0x065d0*/ u64 debug_stats4_vplane[17]; +#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_PD(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_NPD(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_DEBUG_STATS4_VPLANE_DEPL_CPLD(val) vxge_vBIT(val, 32, 16) + + u8 unused07000[0x07000-0x06658]; + +/*0x07000*/ u64 mrpcim_general_int_status; +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PIC_INT vxge_mBIT(0) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCI_INT vxge_mBIT(1) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_RTDMA_INT vxge_mBIT(2) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_WRDMA_INT vxge_mBIT(3) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMCT_INT vxge_mBIT(4) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG1_INT vxge_mBIT(5) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG2_INT vxge_mBIT(6) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_GCMG3_INT vxge_mBIT(7) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMIFL_INT vxge_mBIT(8) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3CMIFU_INT vxge_mBIT(9) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG1_INT vxge_mBIT(10) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG2_INT vxge_mBIT(11) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_PCMG3_INT vxge_mBIT(12) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_XMAC_INT vxge_mBIT(13) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_RXMAC_INT vxge_mBIT(14) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_TMAC_INT vxge_mBIT(15) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3FBIF_INT vxge_mBIT(16) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_FBMC_INT vxge_mBIT(17) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_G3FBCT_INT vxge_mBIT(18) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_TPA_INT vxge_mBIT(19) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_DRBELL_INT vxge_mBIT(20) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_ONE_INT vxge_mBIT(21) +#define VXGE_HW_MRPCIM_GENERAL_INT_STATUS_MSG_INT vxge_mBIT(22) +/*0x07008*/ u64 mrpcim_general_int_mask; +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PIC_INT vxge_mBIT(0) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCI_INT vxge_mBIT(1) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_RTDMA_INT vxge_mBIT(2) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_WRDMA_INT vxge_mBIT(3) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMCT_INT vxge_mBIT(4) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG1_INT vxge_mBIT(5) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG2_INT vxge_mBIT(6) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_GCMG3_INT vxge_mBIT(7) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMIFL_INT vxge_mBIT(8) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3CMIFU_INT vxge_mBIT(9) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG1_INT vxge_mBIT(10) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG2_INT vxge_mBIT(11) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_PCMG3_INT vxge_mBIT(12) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_XMAC_INT vxge_mBIT(13) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_RXMAC_INT vxge_mBIT(14) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_TMAC_INT vxge_mBIT(15) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3FBIF_INT vxge_mBIT(16) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_FBMC_INT vxge_mBIT(17) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_G3FBCT_INT vxge_mBIT(18) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_TPA_INT vxge_mBIT(19) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_DRBELL_INT vxge_mBIT(20) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_ONE_INT vxge_mBIT(21) +#define VXGE_HW_MRPCIM_GENERAL_INT_MASK_MSG_INT vxge_mBIT(22) +/*0x07010*/ u64 mrpcim_ppif_int_status; +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_INI_ERRORS_INI_INT vxge_mBIT(3) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_DMA_ERRORS_DMA_INT vxge_mBIT(7) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_TGT_ERRORS_TGT_INT vxge_mBIT(11) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CONFIG_ERRORS_CONFIG_INT vxge_mBIT(15) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_CRDT_INT vxge_mBIT(19) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_PLL_ERRORS_PLL_INT vxge_mBIT(27) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE0_CRD_INT_VPLANE0_INT\ + vxge_mBIT(31) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE1_CRD_INT_VPLANE1_INT\ + vxge_mBIT(32) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE2_CRD_INT_VPLANE2_INT\ + vxge_mBIT(33) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE3_CRD_INT_VPLANE3_INT\ + vxge_mBIT(34) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE4_CRD_INT_VPLANE4_INT\ + vxge_mBIT(35) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE5_CRD_INT_VPLANE5_INT\ + vxge_mBIT(36) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE6_CRD_INT_VPLANE6_INT\ + vxge_mBIT(37) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE7_CRD_INT_VPLANE7_INT\ + vxge_mBIT(38) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE8_CRD_INT_VPLANE8_INT\ + vxge_mBIT(39) +#define VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE9_CRD_INT_VPLANE9_INT\ + vxge_mBIT(40) +#define \ +VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE10_CRD_INT_VPLANE10_INT \ + vxge_mBIT(41) +#define \ +VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE11_CRD_INT_VPLANE11_INT \ + vxge_mBIT(42) +#define \ +VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE12_CRD_INT_VPLANE12_INT \ + vxge_mBIT(43) +#define \ +VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE13_CRD_INT_VPLANE13_INT \ + vxge_mBIT(44) +#define \ +VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE14_CRD_INT_VPLANE14_INT \ + vxge_mBIT(45) +#define \ +VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE15_CRD_INT_VPLANE15_INT \ + vxge_mBIT(46) +#define \ +VXGE_HW_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_VPLANE16_CRD_INT_VPLANE16_INT \ + vxge_mBIT(47) +#define \ +VXGE_HW_MRPCIM_PPIF_INT_STATUS_VPATH_TO_MRPCIM_ALARM_VPATH_TO_MRPCIM_ALARM_INT \ + vxge_mBIT(55) +/*0x07018*/ u64 mrpcim_ppif_int_mask; + u8 unused07028[0x07028-0x07020]; + +/*0x07028*/ u64 ini_errors_reg; +#define VXGE_HW_INI_ERRORS_REG_SCPL_CPL_TIMEOUT_UNUSED_TAG vxge_mBIT(3) +#define VXGE_HW_INI_ERRORS_REG_SCPL_CPL_TIMEOUT vxge_mBIT(7) +#define VXGE_HW_INI_ERRORS_REG_DCPL_FSM_ERR vxge_mBIT(11) +#define VXGE_HW_INI_ERRORS_REG_DCPL_POISON vxge_mBIT(12) +#define VXGE_HW_INI_ERRORS_REG_DCPL_UNSUPPORTED vxge_mBIT(15) +#define VXGE_HW_INI_ERRORS_REG_DCPL_ABORT vxge_mBIT(19) +#define VXGE_HW_INI_ERRORS_REG_INI_TLP_ABORT vxge_mBIT(23) +#define VXGE_HW_INI_ERRORS_REG_INI_DLLP_ABORT vxge_mBIT(27) +#define VXGE_HW_INI_ERRORS_REG_INI_ECRC_ERR vxge_mBIT(31) +#define VXGE_HW_INI_ERRORS_REG_INI_BUF_DB_ERR vxge_mBIT(35) +#define VXGE_HW_INI_ERRORS_REG_INI_BUF_SG_ERR vxge_mBIT(39) +#define VXGE_HW_INI_ERRORS_REG_INI_DATA_OVERFLOW vxge_mBIT(43) +#define VXGE_HW_INI_ERRORS_REG_INI_HDR_OVERFLOW vxge_mBIT(47) +#define VXGE_HW_INI_ERRORS_REG_INI_MRD_SYS_DROP vxge_mBIT(51) +#define VXGE_HW_INI_ERRORS_REG_INI_MWR_SYS_DROP vxge_mBIT(55) +#define VXGE_HW_INI_ERRORS_REG_INI_MRD_CLIENT_DROP vxge_mBIT(59) +#define VXGE_HW_INI_ERRORS_REG_INI_MWR_CLIENT_DROP vxge_mBIT(63) +/*0x07030*/ u64 ini_errors_mask; +/*0x07038*/ u64 ini_errors_alarm; +/*0x07040*/ u64 dma_errors_reg; +#define VXGE_HW_DMA_ERRORS_REG_RDARB_FSM_ERR vxge_mBIT(3) +#define VXGE_HW_DMA_ERRORS_REG_WRARB_FSM_ERR vxge_mBIT(7) +#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_OVERFLOW vxge_mBIT(8) +#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_UNDERFLOW vxge_mBIT(9) +#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_OVERFLOW vxge_mBIT(10) +#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_UNDERFLOW vxge_mBIT(11) +#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_HDR_OVERFLOW vxge_mBIT(12) +#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_HDR_UNDERFLOW vxge_mBIT(13) +#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_DATA_OVERFLOW vxge_mBIT(14) +#define VXGE_HW_DMA_ERRORS_REG_DMA_MSG_WR_DATA_UNDERFLOW vxge_mBIT(15) +#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_HDR_OVERFLOW vxge_mBIT(16) +#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_HDR_UNDERFLOW vxge_mBIT(17) +#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_DATA_OVERFLOW vxge_mBIT(18) +#define VXGE_HW_DMA_ERRORS_REG_DMA_STATS_WR_DATA_UNDERFLOW vxge_mBIT(19) +#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_OVERFLOW vxge_mBIT(20) +#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_UNDERFLOW vxge_mBIT(21) +#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_OVERFLOW vxge_mBIT(22) +#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_UNDERFLOW vxge_mBIT(23) +#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_OVERFLOW vxge_mBIT(24) +#define VXGE_HW_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_UNDERFLOW vxge_mBIT(25) +#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_OVERFLOW vxge_mBIT(28) +#define VXGE_HW_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_UNDERFLOW vxge_mBIT(29) +#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_FSM_ERR vxge_mBIT(32) +#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_CREDIT_FSM_ERR vxge_mBIT(33) +#define VXGE_HW_DMA_ERRORS_REG_DBLGEN_DMA_WRR_SM_ERR vxge_mBIT(34) +/*0x07048*/ u64 dma_errors_mask; +/*0x07050*/ u64 dma_errors_alarm; +/*0x07058*/ u64 tgt_errors_reg; +#define VXGE_HW_TGT_ERRORS_REG_TGT_VENDOR_MSG vxge_mBIT(0) +#define VXGE_HW_TGT_ERRORS_REG_TGT_MSG_UNLOCK vxge_mBIT(1) +#define VXGE_HW_TGT_ERRORS_REG_TGT_ILLEGAL_TLP_BE vxge_mBIT(2) +#define VXGE_HW_TGT_ERRORS_REG_TGT_BOOT_WRITE vxge_mBIT(3) +#define VXGE_HW_TGT_ERRORS_REG_TGT_PIF_WR_CROSS_QWRANGE vxge_mBIT(4) +#define VXGE_HW_TGT_ERRORS_REG_TGT_PIF_READ_CROSS_QWRANGE vxge_mBIT(5) +#define VXGE_HW_TGT_ERRORS_REG_TGT_KDFC_READ vxge_mBIT(6) +#define VXGE_HW_TGT_ERRORS_REG_TGT_USDC_READ vxge_mBIT(7) +#define VXGE_HW_TGT_ERRORS_REG_TGT_USDC_WR_CROSS_QWRANGE vxge_mBIT(8) +#define VXGE_HW_TGT_ERRORS_REG_TGT_MSIX_BEYOND_RANGE vxge_mBIT(9) +#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_KDFC_POISON vxge_mBIT(10) +#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_USDC_POISON vxge_mBIT(11) +#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_PIF_POISON vxge_mBIT(12) +#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_MSIX_POISON vxge_mBIT(13) +#define VXGE_HW_TGT_ERRORS_REG_TGT_WR_TO_MRIOV_POISON vxge_mBIT(14) +#define VXGE_HW_TGT_ERRORS_REG_TGT_NOT_MEM_TLP vxge_mBIT(15) +#define VXGE_HW_TGT_ERRORS_REG_TGT_UNKNOWN_MEM_TLP vxge_mBIT(16) +#define VXGE_HW_TGT_ERRORS_REG_TGT_REQ_FSM_ERR vxge_mBIT(17) +#define VXGE_HW_TGT_ERRORS_REG_TGT_CPL_FSM_ERR vxge_mBIT(18) +#define VXGE_HW_TGT_ERRORS_REG_TGT_KDFC_PROT_ERR vxge_mBIT(19) +#define VXGE_HW_TGT_ERRORS_REG_TGT_SWIF_PROT_ERR vxge_mBIT(20) +#define VXGE_HW_TGT_ERRORS_REG_TGT_MRIOV_MEM_MAP_CFG_ERR vxge_mBIT(21) +/*0x07060*/ u64 tgt_errors_mask; +/*0x07068*/ u64 tgt_errors_alarm; +/*0x07070*/ u64 config_errors_reg; +#define VXGE_HW_CONFIG_ERRORS_REG_I2C_ILLEGAL_STOP_COND vxge_mBIT(3) +#define VXGE_HW_CONFIG_ERRORS_REG_I2C_ILLEGAL_START_COND vxge_mBIT(7) +#define VXGE_HW_CONFIG_ERRORS_REG_I2C_EXP_RD_CNT vxge_mBIT(11) +#define VXGE_HW_CONFIG_ERRORS_REG_I2C_EXTRA_CYCLE vxge_mBIT(15) +#define VXGE_HW_CONFIG_ERRORS_REG_I2C_MAIN_FSM_ERR vxge_mBIT(19) +#define VXGE_HW_CONFIG_ERRORS_REG_I2C_REQ_COLLISION vxge_mBIT(23) +#define VXGE_HW_CONFIG_ERRORS_REG_I2C_REG_FSM_ERR vxge_mBIT(27) +#define VXGE_HW_CONFIG_ERRORS_REG_CFGM_I2C_TIMEOUT vxge_mBIT(31) +#define VXGE_HW_CONFIG_ERRORS_REG_RIC_I2C_TIMEOUT vxge_mBIT(35) +#define VXGE_HW_CONFIG_ERRORS_REG_CFGM_FSM_ERR vxge_mBIT(39) +#define VXGE_HW_CONFIG_ERRORS_REG_RIC_FSM_ERR vxge_mBIT(43) +#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_ILLEGAL_ACCESS vxge_mBIT(47) +#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_TIMEOUT vxge_mBIT(51) +#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_FSM_ERR vxge_mBIT(55) +#define VXGE_HW_CONFIG_ERRORS_REG_PIFM_TO_FSM_ERR vxge_mBIT(59) +#define VXGE_HW_CONFIG_ERRORS_REG_RIC_RIC_RD_TIMEOUT vxge_mBIT(63) +/*0x07078*/ u64 config_errors_mask; +/*0x07080*/ u64 config_errors_alarm; + u8 unused07090[0x07090-0x07088]; + +/*0x07090*/ u64 crdt_errors_reg; +#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_FSM_ERR vxge_mBIT(11) +#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_INTCTL_ILLEGAL_CRD_DEAL \ + vxge_mBIT(15) +#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_PDA_ILLEGAL_CRD_DEAL vxge_mBIT(19) +#define VXGE_HW_CRDT_ERRORS_REG_WRCRDTARB_PCI_MSG_ILLEGAL_CRD_DEAL \ + vxge_mBIT(23) +#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_FSM_ERR vxge_mBIT(35) +#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_RDA_ILLEGAL_CRD_DEAL vxge_mBIT(39) +#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_PDA_ILLEGAL_CRD_DEAL vxge_mBIT(43) +#define VXGE_HW_CRDT_ERRORS_REG_RDCRDTARB_DBLGEN_ILLEGAL_CRD_DEAL \ + vxge_mBIT(47) +/*0x07098*/ u64 crdt_errors_mask; +/*0x070a0*/ u64 crdt_errors_alarm; + u8 unused070b0[0x070b0-0x070a8]; + +/*0x070b0*/ u64 mrpcim_general_errors_reg; +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_STATSB_FSM_ERR vxge_mBIT(3) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_XGEN_FSM_ERR vxge_mBIT(7) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_XMEM_FSM_ERR vxge_mBIT(11) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_KDFCCTL_FSM_ERR vxge_mBIT(15) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_MRIOVCTL_FSM_ERR vxge_mBIT(19) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_FLSH_ERR vxge_mBIT(23) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_ACK_ERR vxge_mBIT(27) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_CHKSUM_ERR vxge_mBIT(31) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INI_SERR_DET vxge_mBIT(35) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSIX_FSM_ERR vxge_mBIT(39) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSI_OVERFLOW vxge_mBIT(43) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_PPIF_PCI_NOT_FLUSH_DURING_SW_RESET \ + vxge_mBIT(47) +#define VXGE_HW_MRPCIM_GENERAL_ERRORS_REG_PPIF_SW_RESET_FSM_ERR vxge_mBIT(51) +/*0x070b8*/ u64 mrpcim_general_errors_mask; +/*0x070c0*/ u64 mrpcim_general_errors_alarm; + u8 unused070d0[0x070d0-0x070c8]; + +/*0x070d0*/ u64 pll_errors_reg; +#define VXGE_HW_PLL_ERRORS_REG_CORE_CMG_PLL_OOL vxge_mBIT(3) +#define VXGE_HW_PLL_ERRORS_REG_CORE_FB_PLL_OOL vxge_mBIT(7) +#define VXGE_HW_PLL_ERRORS_REG_CORE_X_PLL_OOL vxge_mBIT(11) +/*0x070d8*/ u64 pll_errors_mask; +/*0x070e0*/ u64 pll_errors_alarm; +/*0x070e8*/ u64 srpcim_to_mrpcim_alarm_reg; +#define VXGE_HW_SRPCIM_TO_MRPCIM_ALARM_REG_PPIF_SRPCIM_TO_MRPCIM_ALARM(val) \ + vxge_vBIT(val, 0, 17) +/*0x070f0*/ u64 srpcim_to_mrpcim_alarm_mask; +/*0x070f8*/ u64 srpcim_to_mrpcim_alarm_alarm; +/*0x07100*/ u64 vpath_to_mrpcim_alarm_reg; +#define VXGE_HW_VPATH_TO_MRPCIM_ALARM_REG_PPIF_VPATH_TO_MRPCIM_ALARM(val) \ + vxge_vBIT(val, 0, 17) +/*0x07108*/ u64 vpath_to_mrpcim_alarm_mask; +/*0x07110*/ u64 vpath_to_mrpcim_alarm_alarm; + u8 unused07128[0x07128-0x07118]; + +/*0x07128*/ u64 crdt_errors_vplane_reg[17]; +#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_H_CONSUME_CRDT_ERR \ + vxge_mBIT(3) +#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_D_CONSUME_CRDT_ERR \ + vxge_mBIT(7) +#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_H_RETURN_CRDT_ERR \ + vxge_mBIT(11) +#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_WRCRDTARB_P_D_RETURN_CRDT_ERR \ + vxge_mBIT(15) +#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_NP_H_CONSUME_CRDT_ERR \ + vxge_mBIT(19) +#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_NP_H_RETURN_CRDT_ERR \ + vxge_mBIT(23) +#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_TAG_CONSUME_TAG_ERR \ + vxge_mBIT(27) +#define VXGE_HW_CRDT_ERRORS_VPLANE_REG_RDCRDTARB_TAG_RETURN_TAG_ERR \ + vxge_mBIT(31) +/*0x07130*/ u64 crdt_errors_vplane_mask[17]; +/*0x07138*/ u64 crdt_errors_vplane_alarm[17]; + u8 unused072f0[0x072f0-0x072c0]; + +/*0x072f0*/ u64 mrpcim_rst_in_prog; +#define VXGE_HW_MRPCIM_RST_IN_PROG_MRPCIM_RST_IN_PROG vxge_mBIT(7) +/*0x072f8*/ u64 mrpcim_reg_modified; +#define VXGE_HW_MRPCIM_REG_MODIFIED_MRPCIM_REG_MODIFIED vxge_mBIT(7) + + u8 unused07378[0x07378-0x07300]; + +/*0x07378*/ u64 write_arb_pending; +#define VXGE_HW_WRITE_ARB_PENDING_WRARB_WRDMA vxge_mBIT(3) +#define VXGE_HW_WRITE_ARB_PENDING_WRARB_RTDMA vxge_mBIT(7) +#define VXGE_HW_WRITE_ARB_PENDING_WRARB_MSG vxge_mBIT(11) +#define VXGE_HW_WRITE_ARB_PENDING_WRARB_STATSB vxge_mBIT(15) +#define VXGE_HW_WRITE_ARB_PENDING_WRARB_INTCTL vxge_mBIT(19) +/*0x07380*/ u64 read_arb_pending; +#define VXGE_HW_READ_ARB_PENDING_RDARB_WRDMA vxge_mBIT(3) +#define VXGE_HW_READ_ARB_PENDING_RDARB_RTDMA vxge_mBIT(7) +#define VXGE_HW_READ_ARB_PENDING_RDARB_DBLGEN vxge_mBIT(11) +/*0x07388*/ u64 dmaif_dmadbl_pending; +#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_WRDMA_WR vxge_mBIT(0) +#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_WRDMA_RD vxge_mBIT(1) +#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_RTDMA_WR vxge_mBIT(2) +#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_RTDMA_RD vxge_mBIT(3) +#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_MSG_WR vxge_mBIT(4) +#define VXGE_HW_DMAIF_DMADBL_PENDING_DMAIF_STATS_WR vxge_mBIT(5) +#define VXGE_HW_DMAIF_DMADBL_PENDING_DBLGEN_IN_PROG(val) \ + vxge_vBIT(val, 13, 51) +/*0x07390*/ u64 wrcrdtarb_status0_vplane[17]; +#define VXGE_HW_WRCRDTARB_STATUS0_VPLANE_WRCRDTARB_ABS_AVAIL_P_H(val) \ + vxge_vBIT(val, 0, 8) +/*0x07418*/ u64 wrcrdtarb_status1_vplane[17]; +#define VXGE_HW_WRCRDTARB_STATUS1_VPLANE_WRCRDTARB_ABS_AVAIL_P_D(val) \ + vxge_vBIT(val, 4, 12) + u8 unused07500[0x07500-0x074a0]; + +/*0x07500*/ u64 mrpcim_general_cfg1; +#define VXGE_HW_MRPCIM_GENERAL_CFG1_CLEAR_SERR vxge_mBIT(7) +/*0x07508*/ u64 mrpcim_general_cfg2; +#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_WR_TD vxge_mBIT(3) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_RD_TD vxge_mBIT(7) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_INS_TX_CPL_TD vxge_mBIT(11) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_INI_TIMEOUT_EN_MWR vxge_mBIT(15) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_INI_TIMEOUT_EN_MRD vxge_mBIT(19) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_IGNORE_VPATH_RST_FOR_MSIX vxge_mBIT(23) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_FLASH_READ_MSB vxge_mBIT(27) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_DIS_HOST_PIPELINE_WR vxge_mBIT(31) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_ENABLE vxge_mBIT(43) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_MAP_TO_VPATH(val) \ + vxge_vBIT(val, 47, 5) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_EN_BLOCK_MSIX_DUE_TO_SERR vxge_mBIT(55) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_FORCE_SENDING_INTA vxge_mBIT(59) +#define VXGE_HW_MRPCIM_GENERAL_CFG2_DIS_SWIF_PROT_ON_RDS vxge_mBIT(63) +/*0x07510*/ u64 mrpcim_general_cfg3; +#define VXGE_HW_MRPCIM_GENERAL_CFG3_PROTECTION_CA_OR_UNSUPN vxge_mBIT(0) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_ILLEGAL_RD_CA_OR_UNSUPN vxge_mBIT(3) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_RD_BYTE_SWAPEN vxge_mBIT(7) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_RD_BIT_FLIPEN vxge_mBIT(11) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_WR_BYTE_SWAPEN vxge_mBIT(15) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_WR_BIT_FLIPEN vxge_mBIT(19) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_MR_MAX_MVFS(val) vxge_vBIT(val, 20, 16) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_MR_MVF_TBL_SIZE(val) \ + vxge_vBIT(val, 36, 16) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_PF0_SW_RESET_EN vxge_mBIT(55) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_REG_MODIFIED_CFG(val) vxge_vBIT(val, 56, 2) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_CPL_ECC_ENABLE_N vxge_mBIT(59) +#define VXGE_HW_MRPCIM_GENERAL_CFG3_BYPASS_DAISY_CHAIN vxge_mBIT(63) +/*0x07518*/ u64 mrpcim_stats_start_host_addr; +#define VXGE_HW_MRPCIM_STATS_START_HOST_ADDR_MRPCIM_STATS_START_HOST_ADDR(val)\ + vxge_vBIT(val, 0, 57) + + u8 unused07950[0x07950-0x07520]; + +/*0x07950*/ u64 rdcrdtarb_cfg0; +#define VXGE_HW_RDCRDTARB_CFG0_RDA_MAX_OUTSTANDING_RDS(val) \ + vxge_vBIT(val, 18, 6) +#define VXGE_HW_RDCRDTARB_CFG0_PDA_MAX_OUTSTANDING_RDS(val) \ + vxge_vBIT(val, 26, 6) +#define VXGE_HW_RDCRDTARB_CFG0_DBLGEN_MAX_OUTSTANDING_RDS(val) \ + vxge_vBIT(val, 34, 6) +#define VXGE_HW_RDCRDTARB_CFG0_WAIT_CNT(val) vxge_vBIT(val, 48, 4) +#define VXGE_HW_RDCRDTARB_CFG0_MAX_OUTSTANDING_RDS(val) vxge_vBIT(val, 54, 6) +#define VXGE_HW_RDCRDTARB_CFG0_EN_XON vxge_mBIT(63) + u8 unused07be8[0x07be8-0x07958]; + +/*0x07be8*/ u64 bf_sw_reset; +#define VXGE_HW_BF_SW_RESET_BF_SW_RESET(val) vxge_vBIT(val, 0, 8) +/*0x07bf0*/ u64 sw_reset_status; +#define VXGE_HW_SW_RESET_STATUS_RESET_CMPLT vxge_mBIT(7) +#define VXGE_HW_SW_RESET_STATUS_INIT_CMPLT vxge_mBIT(15) + u8 unused07d30[0x07d30-0x07bf8]; + +/*0x07d30*/ u64 mrpcim_debug_stats0; +#define VXGE_HW_MRPCIM_DEBUG_STATS0_INI_WR_DROP(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_MRPCIM_DEBUG_STATS0_INI_RD_DROP(val) vxge_vBIT(val, 32, 32) +/*0x07d38*/ u64 mrpcim_debug_stats1_vplane[17]; +#define VXGE_HW_MRPCIM_DEBUG_STATS1_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED(val) \ + vxge_vBIT(val, 32, 32) +/*0x07dc0*/ u64 mrpcim_debug_stats2_vplane[17]; +#define VXGE_HW_MRPCIM_DEBUG_STATS2_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED(val) \ + vxge_vBIT(val, 32, 32) +/*0x07e48*/ u64 mrpcim_debug_stats3_vplane[17]; +#define VXGE_HW_MRPCIM_DEBUG_STATS3_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED(val) \ + vxge_vBIT(val, 32, 32) +/*0x07ed0*/ u64 mrpcim_debug_stats4; +#define VXGE_HW_MRPCIM_DEBUG_STATS4_INI_WR_VPIN_DROP(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_MRPCIM_DEBUG_STATS4_INI_RD_VPIN_DROP(val) \ + vxge_vBIT(val, 32, 32) +/*0x07ed8*/ u64 genstats_count01; +#define VXGE_HW_GENSTATS_COUNT01_GENSTATS_COUNT1(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_GENSTATS_COUNT01_GENSTATS_COUNT0(val) vxge_vBIT(val, 32, 32) +/*0x07ee0*/ u64 genstats_count23; +#define VXGE_HW_GENSTATS_COUNT23_GENSTATS_COUNT3(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_GENSTATS_COUNT23_GENSTATS_COUNT2(val) vxge_vBIT(val, 32, 32) +/*0x07ee8*/ u64 genstats_count4; +#define VXGE_HW_GENSTATS_COUNT4_GENSTATS_COUNT4(val) vxge_vBIT(val, 32, 32) +/*0x07ef0*/ u64 genstats_count5; +#define VXGE_HW_GENSTATS_COUNT5_GENSTATS_COUNT5(val) vxge_vBIT(val, 32, 32) + + u8 unused07f08[0x07f08-0x07ef8]; + +/*0x07f08*/ u64 genstats_cfg[6]; +#define VXGE_HW_GENSTATS_CFG_DTYPE_SEL(val) vxge_vBIT(val, 3, 5) +#define VXGE_HW_GENSTATS_CFG_CLIENT_NO_SEL(val) vxge_vBIT(val, 9, 3) +#define VXGE_HW_GENSTATS_CFG_WR_RD_CPL_SEL(val) vxge_vBIT(val, 14, 2) +#define VXGE_HW_GENSTATS_CFG_VPATH_SEL(val) vxge_vBIT(val, 31, 17) +/*0x07f38*/ u64 genstat_64bit_cfg; +#define VXGE_HW_GENSTAT_64BIT_CFG_EN_FOR_GENSTATS0 vxge_mBIT(3) +#define VXGE_HW_GENSTAT_64BIT_CFG_EN_FOR_GENSTATS2 vxge_mBIT(7) + u8 unused08000[0x08000-0x07f40]; +/*0x08000*/ u64 gcmg3_int_status; +#define VXGE_HW_GCMG3_INT_STATUS_GSTC_ERR0_GSTC0_INT vxge_mBIT(0) +#define VXGE_HW_GCMG3_INT_STATUS_GSTC_ERR1_GSTC1_INT vxge_mBIT(1) +#define VXGE_HW_GCMG3_INT_STATUS_GH2L_ERR0_GH2L0_INT vxge_mBIT(2) +#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR_GH2L1_INT vxge_mBIT(3) +#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR2_GH2L2_INT vxge_mBIT(4) +#define VXGE_HW_GCMG3_INT_STATUS_GH2L_SMERR0_GH2L3_INT vxge_mBIT(5) +#define VXGE_HW_GCMG3_INT_STATUS_GHSQ_ERR3_GH2L4_INT vxge_mBIT(6) +/*0x08008*/ u64 gcmg3_int_mask; + u8 unused09000[0x09000-0x8010]; + +/*0x09000*/ u64 g3ifcmd_fb_int_status; +#define VXGE_HW_G3IFCMD_FB_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0) +/*0x09008*/ u64 g3ifcmd_fb_int_mask; +/*0x09010*/ u64 g3ifcmd_fb_err_reg; +#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6) +#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_SM_ERR vxge_mBIT(7) +#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \ + vxge_vBIT(val, 24, 8) +#define VXGE_HW_G3IFCMD_FB_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55) +/*0x09018*/ u64 g3ifcmd_fb_err_mask; +/*0x09020*/ u64 g3ifcmd_fb_err_alarm; + + u8 unused09400[0x09400-0x09028]; + +/*0x09400*/ u64 g3ifcmd_cmu_int_status; +#define VXGE_HW_G3IFCMD_CMU_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0) +/*0x09408*/ u64 g3ifcmd_cmu_int_mask; +/*0x09410*/ u64 g3ifcmd_cmu_err_reg; +#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6) +#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_SM_ERR vxge_mBIT(7) +#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \ + vxge_vBIT(val, 24, 8) +#define VXGE_HW_G3IFCMD_CMU_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55) +/*0x09418*/ u64 g3ifcmd_cmu_err_mask; +/*0x09420*/ u64 g3ifcmd_cmu_err_alarm; + + u8 unused09800[0x09800-0x09428]; + +/*0x09800*/ u64 g3ifcmd_cml_int_status; +#define VXGE_HW_G3IFCMD_CML_INT_STATUS_ERR_G3IF_INT vxge_mBIT(0) +/*0x09808*/ u64 g3ifcmd_cml_int_mask; +/*0x09810*/ u64 g3ifcmd_cml_err_reg; +#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_CK_DLL_LOCK vxge_mBIT(6) +#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_SM_ERR vxge_mBIT(7) +#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_RWDQS_DLL_LOCK(val) \ + vxge_vBIT(val, 24, 8) +#define VXGE_HW_G3IFCMD_CML_ERR_REG_G3IF_IOCAL_FAULT vxge_mBIT(55) +/*0x09818*/ u64 g3ifcmd_cml_err_mask; +/*0x09820*/ u64 g3ifcmd_cml_err_alarm; + u8 unused09b00[0x09b00-0x09828]; + +/*0x09b00*/ u64 vpath_to_vplane_map[17]; +#define VXGE_HW_VPATH_TO_VPLANE_MAP_VPATH_TO_VPLANE_MAP(val) \ + vxge_vBIT(val, 3, 5) + u8 unused09c30[0x09c30-0x09b88]; + +/*0x09c30*/ u64 xgxs_cfg_port[2]; +#define VXGE_HW_XGXS_CFG_PORT_SIG_DETECT_FORCE_LOS(val) vxge_vBIT(val, 16, 4) +#define VXGE_HW_XGXS_CFG_PORT_SIG_DETECT_FORCE_VALID(val) vxge_vBIT(val, 20, 4) +#define VXGE_HW_XGXS_CFG_PORT_SEL_INFO_0 vxge_mBIT(27) +#define VXGE_HW_XGXS_CFG_PORT_SEL_INFO_1(val) vxge_vBIT(val, 29, 3) +#define VXGE_HW_XGXS_CFG_PORT_TX_LANE0_SKEW(val) vxge_vBIT(val, 32, 4) +#define VXGE_HW_XGXS_CFG_PORT_TX_LANE1_SKEW(val) vxge_vBIT(val, 36, 4) +#define VXGE_HW_XGXS_CFG_PORT_TX_LANE2_SKEW(val) vxge_vBIT(val, 40, 4) +#define VXGE_HW_XGXS_CFG_PORT_TX_LANE3_SKEW(val) vxge_vBIT(val, 44, 4) +/*0x09c40*/ u64 xgxs_rxber_cfg_port[2]; +#define VXGE_HW_XGXS_RXBER_CFG_PORT_INTERVAL_DUR(val) vxge_vBIT(val, 0, 4) +#define VXGE_HW_XGXS_RXBER_CFG_PORT_RXGXS_INTERVAL_CNT(val) \ + vxge_vBIT(val, 16, 48) +/*0x09c50*/ u64 xgxs_rxber_status_port[2]; +#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_A_ERR_CNT(val) \ + vxge_vBIT(val, 0, 16) +#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_B_ERR_CNT(val) \ + vxge_vBIT(val, 16, 16) +#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_C_ERR_CNT(val) \ + vxge_vBIT(val, 32, 16) +#define VXGE_HW_XGXS_RXBER_STATUS_PORT_RXGXS_RXGXS_LANE_D_ERR_CNT(val) \ + vxge_vBIT(val, 48, 16) +/*0x09c60*/ u64 xgxs_status_port[2]; +#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_TX_ACTIVITY(val) vxge_vBIT(val, 0, 4) +#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_RX_ACTIVITY(val) vxge_vBIT(val, 4, 4) +#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_CTC_FIFO_ERR BIT(11) +#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_BYTE_SYNC_LOST(val) \ + vxge_vBIT(val, 12, 4) +#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_CTC_ERR(val) vxge_vBIT(val, 16, 4) +#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_ALIGNMENT_ERR vxge_mBIT(23) +#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_DEC_ERR(val) vxge_vBIT(val, 24, 8) +#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_SKIP_INS_REQ(val) \ + vxge_vBIT(val, 32, 4) +#define VXGE_HW_XGXS_STATUS_PORT_XMACJ_PCS_SKIP_DEL_REQ(val) \ + vxge_vBIT(val, 36, 4) +/*0x09c70*/ u64 xgxs_pma_reset_port[2]; +#define VXGE_HW_XGXS_PMA_RESET_PORT_SERDES_RESET(val) vxge_vBIT(val, 0, 8) + u8 unused09c90[0x09c90-0x09c80]; + +/*0x09c90*/ u64 xgxs_static_cfg_port[2]; +#define VXGE_HW_XGXS_STATIC_CFG_PORT_FW_CTRL_SERDES vxge_mBIT(3) + u8 unused09d40[0x09d40-0x09ca0]; + +/*0x09d40*/ u64 xgxs_info_port[2]; +#define VXGE_HW_XGXS_INFO_PORT_XMACJ_INFO_0(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_XGXS_INFO_PORT_XMACJ_INFO_1(val) vxge_vBIT(val, 32, 32) +/*0x09d50*/ u64 ratemgmt_cfg_port[2]; +#define VXGE_HW_RATEMGMT_CFG_PORT_MODE(val) vxge_vBIT(val, 2, 2) +#define VXGE_HW_RATEMGMT_CFG_PORT_RATE vxge_mBIT(7) +#define VXGE_HW_RATEMGMT_CFG_PORT_FIXED_USE_FSM vxge_mBIT(11) +#define VXGE_HW_RATEMGMT_CFG_PORT_ANTP_USE_FSM vxge_mBIT(15) +#define VXGE_HW_RATEMGMT_CFG_PORT_ANBE_USE_FSM vxge_mBIT(19) +/*0x09d60*/ u64 ratemgmt_status_port[2]; +#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_COMPLETE vxge_mBIT(3) +#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_RATE vxge_mBIT(7) +#define VXGE_HW_RATEMGMT_STATUS_PORT_RATEMGMT_MAC_MATCHES_PHY vxge_mBIT(11) + u8 unused09d80[0x09d80-0x09d70]; + +/*0x09d80*/ u64 ratemgmt_fixed_cfg_port[2]; +#define VXGE_HW_RATEMGMT_FIXED_CFG_PORT_RESTART vxge_mBIT(7) +/*0x09d90*/ u64 ratemgmt_antp_cfg_port[2]; +#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_RESTART vxge_mBIT(7) +#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_USE_PREAMBLE_EXT_PHY vxge_mBIT(11) +#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_USE_ACT_SEL vxge_mBIT(15) +#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_RETRY_PHY_QUERY(val) \ + vxge_vBIT(val, 16, 4) +#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_WAIT_MDIO_RESPONSE(val) \ + vxge_vBIT(val, 20, 4) +#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_T_LDOWN_REAUTO_RESPONSE(val) \ + vxge_vBIT(val, 24, 4) +#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_ADVERTISE_10G vxge_mBIT(31) +#define VXGE_HW_RATEMGMT_ANTP_CFG_PORT_ADVERTISE_1G vxge_mBIT(35) +/*0x09da0*/ u64 ratemgmt_anbe_cfg_port[2]; +#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_RESTART vxge_mBIT(7) +#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_PARALLEL_DETECT_10G_KX4_ENABLE \ + vxge_mBIT(11) +#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_PARALLEL_DETECT_1G_KX_ENABLE \ + vxge_mBIT(15) +#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_SYNC_10G_KX4(val) vxge_vBIT(val, 16, 4) +#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_SYNC_1G_KX(val) vxge_vBIT(val, 20, 4) +#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_T_DME_EXCHANGE(val) vxge_vBIT(val, 24, 4) +#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_ADVERTISE_10G_KX4 vxge_mBIT(31) +#define VXGE_HW_RATEMGMT_ANBE_CFG_PORT_ADVERTISE_1G_KX vxge_mBIT(35) +/*0x09db0*/ u64 anbe_cfg_port[2]; +#define VXGE_HW_ANBE_CFG_PORT_RESET_CFG_REGS(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_ANBE_CFG_PORT_ALIGN_10G_KX4_OVERRIDE(val) vxge_vBIT(val, 10, 2) +#define VXGE_HW_ANBE_CFG_PORT_SYNC_1G_KX_OVERRIDE(val) vxge_vBIT(val, 14, 2) +/*0x09dc0*/ u64 anbe_mgr_ctrl_port[2]; +#define VXGE_HW_ANBE_MGR_CTRL_PORT_WE vxge_mBIT(3) +#define VXGE_HW_ANBE_MGR_CTRL_PORT_STROBE vxge_mBIT(7) +#define VXGE_HW_ANBE_MGR_CTRL_PORT_ADDR(val) vxge_vBIT(val, 15, 9) +#define VXGE_HW_ANBE_MGR_CTRL_PORT_DATA(val) vxge_vBIT(val, 32, 32) + u8 unused09de0[0x09de0-0x09dd0]; + +/*0x09de0*/ u64 anbe_fw_mstr_port[2]; +#define VXGE_HW_ANBE_FW_MSTR_PORT_CONNECT_BEAN_TO_SERDES vxge_mBIT(3) +#define VXGE_HW_ANBE_FW_MSTR_PORT_TX_ZEROES_TO_SERDES vxge_mBIT(7) +/*0x09df0*/ u64 anbe_hwfsm_gen_status_port[2]; +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G_KX4_USING_PD \ + vxge_mBIT(3) +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G_KX4_USING_DME \ + vxge_mBIT(7) +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G_KX_USING_PD \ + vxge_mBIT(11) +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G_KX_USING_DME \ + vxge_mBIT(15) +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_ANBEFSM_STATE(val) \ + vxge_vBIT(val, 18, 6) +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_NEXT_PAGE_RECEIVED \ + vxge_mBIT(27) +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_BASE_PAGE_RECEIVED \ + vxge_mBIT(35) +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_BEAN_AUTONEG_COMPLETE \ + vxge_mBIT(39) +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NP_BEFORE_BP \ + vxge_mBIT(43) +#define \ +VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_AN_COMPLETE_BEFORE_BP \ + vxge_mBIT(47) +#define \ +VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_AN_COMPLETE_BEFORE_NP \ +vxge_mBIT(51) +#define \ +VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_MODE_WHEN_AN_COMPLETE \ + vxge_mBIT(55) +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_COUNT_BP(val) \ + vxge_vBIT(val, 56, 4) +#define VXGE_HW_ANBE_HWFSM_GEN_STATUS_PORT_RATEMGMT_COUNT_NP(val) \ + vxge_vBIT(val, 60, 4) +/*0x09e00*/ u64 anbe_hwfsm_bp_status_port[2]; +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_FEC_ENABLE \ + vxge_mBIT(32) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_FEC_ABILITY \ + vxge_mBIT(33) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_10G_KR_CAPABLE \ + vxge_mBIT(40) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_10G_KX4_CAPABLE \ + vxge_mBIT(41) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_1G_KX_CAPABLE \ + vxge_mBIT(42) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_TX_NONCE(val) \ + vxge_vBIT(val, 43, 5) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_NP vxge_mBIT(48) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ACK vxge_mBIT(49) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_REMOTE_FAULT \ + vxge_mBIT(50) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ASM_DIR vxge_mBIT(51) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_PAUSE vxge_mBIT(53) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ECHOED_NONCE(val) \ + vxge_vBIT(val, 54, 5) +#define VXGE_HW_ANBE_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_SELECTOR_FIELD(val) \ + vxge_vBIT(val, 59, 5) +/*0x09e10*/ u64 anbe_hwfsm_np_status_port[2]; +#define VXGE_HW_ANBE_HWFSM_NP_STATUS_PORT_RATEMGMT_NP_BITS_47_TO_32(val) \ + vxge_vBIT(val, 16, 16) +#define VXGE_HW_ANBE_HWFSM_NP_STATUS_PORT_RATEMGMT_NP_BITS_31_TO_0(val) \ + vxge_vBIT(val, 32, 32) + u8 unused09e30[0x09e30-0x09e20]; + +/*0x09e30*/ u64 antp_gen_cfg_port[2]; +/*0x09e40*/ u64 antp_hwfsm_gen_status_port[2]; +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_10G vxge_mBIT(3) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_CHOSE_1G vxge_mBIT(7) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_ANTPFSM_STATE(val) \ + vxge_vBIT(val, 10, 6) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_AUTONEG_COMPLETE \ + vxge_mBIT(23) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NO_LP_XNP \ + vxge_mBIT(27) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_GOT_LP_XNP vxge_mBIT(31) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_MESSAGE_CODE \ + vxge_mBIT(35) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_NO_HCD \ + vxge_mBIT(43) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_FOUND_HCD vxge_mBIT(47) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_UNEXPECTED_INVALID_RATE \ + vxge_mBIT(51) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_VALID_RATE vxge_mBIT(55) +#define VXGE_HW_ANTP_HWFSM_GEN_STATUS_PORT_RATEMGMT_PERSISTENT_LDOWN \ + vxge_mBIT(59) +/*0x09e50*/ u64 antp_hwfsm_bp_status_port[2]; +#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_NP vxge_mBIT(0) +#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ACK vxge_mBIT(1) +#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_RF vxge_mBIT(2) +#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_XNP vxge_mBIT(3) +#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_ABILITY_FIELD(val) \ + vxge_vBIT(val, 4, 7) +#define VXGE_HW_ANTP_HWFSM_BP_STATUS_PORT_RATEMGMT_BP_SELECTOR_FIELD(val) \ + vxge_vBIT(val, 11, 5) +/*0x09e60*/ u64 antp_hwfsm_xnp_status_port[2]; +#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_NP vxge_mBIT(0) +#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_ACK vxge_mBIT(1) +#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_MP vxge_mBIT(2) +#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_ACK2 vxge_mBIT(3) +#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_TOGGLE vxge_mBIT(4) +#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_MESSAGE_CODE(val) \ + vxge_vBIT(val, 5, 11) +#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_UNF_CODE_FIELD1(val) \ + vxge_vBIT(val, 16, 16) +#define VXGE_HW_ANTP_HWFSM_XNP_STATUS_PORT_RATEMGMT_XNP_UNF_CODE_FIELD2(val) \ + vxge_vBIT(val, 32, 16) +/*0x09e70*/ u64 mdio_mgr_access_port[2]; +#define VXGE_HW_MDIO_MGR_ACCESS_PORT_STROBE_ONE BIT(3) +#define VXGE_HW_MDIO_MGR_ACCESS_PORT_OP_TYPE(val) vxge_vBIT(val, 5, 3) +#define VXGE_HW_MDIO_MGR_ACCESS_PORT_DEVAD(val) vxge_vBIT(val, 11, 5) +#define VXGE_HW_MDIO_MGR_ACCESS_PORT_ADDR(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_MDIO_MGR_ACCESS_PORT_DATA(val) vxge_vBIT(val, 32, 16) +#define VXGE_HW_MDIO_MGR_ACCESS_PORT_ST_PATTERN(val) vxge_vBIT(val, 49, 2) +#define VXGE_HW_MDIO_MGR_ACCESS_PORT_PREAMBLE vxge_mBIT(51) +#define VXGE_HW_MDIO_MGR_ACCESS_PORT_PRTAD(val) vxge_vBIT(val, 55, 5) +#define VXGE_HW_MDIO_MGR_ACCESS_PORT_STROBE_TWO vxge_mBIT(63) + u8 unused0a200[0x0a200-0x09e80]; +/*0x0a200*/ u64 xmac_vsport_choices_vh[17]; +#define VXGE_HW_XMAC_VSPORT_CHOICES_VH_VSPORT_VECTOR(val) vxge_vBIT(val, 0, 17) + u8 unused0a400[0x0a400-0x0a288]; + +/*0x0a400*/ u64 rx_thresh_cfg_vp[17]; +#define VXGE_HW_RX_THRESH_CFG_VP_PAUSE_LOW_THR(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_RX_THRESH_CFG_VP_PAUSE_HIGH_THR(val) vxge_vBIT(val, 8, 8) +#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_0(val) vxge_vBIT(val, 16, 8) +#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_1(val) vxge_vBIT(val, 24, 8) +#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_2(val) vxge_vBIT(val, 32, 8) +#define VXGE_HW_RX_THRESH_CFG_VP_RED_THR_3(val) vxge_vBIT(val, 40, 8) + u8 unused0ac90[0x0ac90-0x0a488]; +} __packed; + +/*VXGE_HW_SRPCIM_REGS_H*/ +struct vxge_hw_srpcim_reg { + +/*0x00000*/ u64 tim_mr2sr_resource_assignment_vh; +#define VXGE_HW_TIM_MR2SR_RESOURCE_ASSIGNMENT_VH_BMAP_ROOT(val) \ + vxge_vBIT(val, 0, 32) + u8 unused00100[0x00100-0x00008]; + +/*0x00100*/ u64 srpcim_pcipif_int_status; +#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_MRPCIM_MSG_MRPCIM_MSG_INT BIT(3) +#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_VPATH_MSG_VPATH_MSG_INT BIT(7) +#define VXGE_HW_SRPCIM_PCIPIF_INT_STATUS_SRPCIM_SPARE_R1_SRPCIM_SPARE_R1_INT \ + BIT(11) +/*0x00108*/ u64 srpcim_pcipif_int_mask; +/*0x00110*/ u64 mrpcim_msg_reg; +#define VXGE_HW_MRPCIM_MSG_REG_SWIF_MRPCIM_TO_SRPCIM_RMSG_INT BIT(3) +/*0x00118*/ u64 mrpcim_msg_mask; +/*0x00120*/ u64 mrpcim_msg_alarm; +/*0x00128*/ u64 vpath_msg_reg; +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH0_TO_SRPCIM_RMSG_INT BIT(0) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH1_TO_SRPCIM_RMSG_INT BIT(1) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH2_TO_SRPCIM_RMSG_INT BIT(2) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH3_TO_SRPCIM_RMSG_INT BIT(3) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH4_TO_SRPCIM_RMSG_INT BIT(4) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH5_TO_SRPCIM_RMSG_INT BIT(5) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH6_TO_SRPCIM_RMSG_INT BIT(6) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH7_TO_SRPCIM_RMSG_INT BIT(7) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH8_TO_SRPCIM_RMSG_INT BIT(8) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH9_TO_SRPCIM_RMSG_INT BIT(9) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH10_TO_SRPCIM_RMSG_INT BIT(10) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH11_TO_SRPCIM_RMSG_INT BIT(11) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH12_TO_SRPCIM_RMSG_INT BIT(12) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH13_TO_SRPCIM_RMSG_INT BIT(13) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH14_TO_SRPCIM_RMSG_INT BIT(14) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH15_TO_SRPCIM_RMSG_INT BIT(15) +#define VXGE_HW_VPATH_MSG_REG_SWIF_VPATH16_TO_SRPCIM_RMSG_INT BIT(16) +/*0x00130*/ u64 vpath_msg_mask; +/*0x00138*/ u64 vpath_msg_alarm; + u8 unused00160[0x00160-0x00140]; + +/*0x00160*/ u64 srpcim_to_mrpcim_wmsg; +#define VXGE_HW_SRPCIM_TO_MRPCIM_WMSG_SRPCIM_TO_MRPCIM_WMSG(val) \ + vxge_vBIT(val, 0, 64) +/*0x00168*/ u64 srpcim_to_mrpcim_wmsg_trig; +#define VXGE_HW_SRPCIM_TO_MRPCIM_WMSG_TRIG_SRPCIM_TO_MRPCIM_WMSG_TRIG BIT(0) +/*0x00170*/ u64 mrpcim_to_srpcim_rmsg; +#define VXGE_HW_MRPCIM_TO_SRPCIM_RMSG_SWIF_MRPCIM_TO_SRPCIM_RMSG(val) \ + vxge_vBIT(val, 0, 64) +/*0x00178*/ u64 vpath_to_srpcim_rmsg_sel; +#define VXGE_HW_VPATH_TO_SRPCIM_RMSG_SEL_VPATH_TO_SRPCIM_RMSG_SEL(val) \ + vxge_vBIT(val, 0, 5) +/*0x00180*/ u64 vpath_to_srpcim_rmsg; +#define VXGE_HW_VPATH_TO_SRPCIM_RMSG_SWIF_VPATH_TO_SRPCIM_RMSG(val) \ + vxge_vBIT(val, 0, 64) + u8 unused00200[0x00200-0x00188]; + +/*0x00200*/ u64 srpcim_general_int_status; +#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_PIC_INT BIT(0) +#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_PCI_INT BIT(3) +#define VXGE_HW_SRPCIM_GENERAL_INT_STATUS_XMAC_INT BIT(7) + u8 unused00210[0x00210-0x00208]; + +/*0x00210*/ u64 srpcim_general_int_mask; +#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_PIC_INT BIT(0) +#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_PCI_INT BIT(3) +#define VXGE_HW_SRPCIM_GENERAL_INT_MASK_XMAC_INT BIT(7) + u8 unused00220[0x00220-0x00218]; + +/*0x00220*/ u64 srpcim_ppif_int_status; + +/*0x00228*/ u64 srpcim_ppif_int_mask; +/*0x00230*/ u64 srpcim_gen_errors_reg; +#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_STATUS_ERR BIT(3) +#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_UNCOR_ERR BIT(7) +#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_PCICONFIG_PF_COR_ERR BIT(11) +#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_INTCTRL_SCHED_INT BIT(15) +#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_INI_SERR_DET BIT(19) +#define VXGE_HW_SRPCIM_GEN_ERRORS_REG_TGT_PF_ILLEGAL_ACCESS BIT(23) +/*0x00238*/ u64 srpcim_gen_errors_mask; +/*0x00240*/ u64 srpcim_gen_errors_alarm; +/*0x00248*/ u64 mrpcim_to_srpcim_alarm_reg; +#define VXGE_HW_MRPCIM_TO_SRPCIM_ALARM_REG_PPIF_MRPCIM_TO_SRPCIM_ALARM BIT(3) +/*0x00250*/ u64 mrpcim_to_srpcim_alarm_mask; +/*0x00258*/ u64 mrpcim_to_srpcim_alarm_alarm; +/*0x00260*/ u64 vpath_to_srpcim_alarm_reg; + +/*0x00268*/ u64 vpath_to_srpcim_alarm_mask; +/*0x00270*/ u64 vpath_to_srpcim_alarm_alarm; + u8 unused00280[0x00280-0x00278]; + +/*0x00280*/ u64 pf_sw_reset; +#define VXGE_HW_PF_SW_RESET_PF_SW_RESET(val) vxge_vBIT(val, 0, 8) +/*0x00288*/ u64 srpcim_general_cfg1; +#define VXGE_HW_SRPCIM_GENERAL_CFG1_BOOT_BYTE_SWAPEN BIT(19) +#define VXGE_HW_SRPCIM_GENERAL_CFG1_BOOT_BIT_FLIPEN BIT(23) +#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_ADDR_SWAPEN BIT(27) +#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_ADDR_FLIPEN BIT(31) +#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_DATA_SWAPEN BIT(35) +#define VXGE_HW_SRPCIM_GENERAL_CFG1_MSIX_DATA_FLIPEN BIT(39) +/*0x00290*/ u64 srpcim_interrupt_cfg1; +#define VXGE_HW_SRPCIM_INTERRUPT_CFG1_ALARM_MAP_TO_MSG(val) vxge_vBIT(val, 1, 7) +#define VXGE_HW_SRPCIM_INTERRUPT_CFG1_TRAFFIC_CLASS(val) vxge_vBIT(val, 9, 3) + u8 unused002a8[0x002a8-0x00298]; + +/*0x002a8*/ u64 srpcim_clear_msix_mask; +#define VXGE_HW_SRPCIM_CLEAR_MSIX_MASK_SRPCIM_CLEAR_MSIX_MASK BIT(0) +/*0x002b0*/ u64 srpcim_set_msix_mask; +#define VXGE_HW_SRPCIM_SET_MSIX_MASK_SRPCIM_SET_MSIX_MASK BIT(0) +/*0x002b8*/ u64 srpcim_clr_msix_one_shot; +#define VXGE_HW_SRPCIM_CLR_MSIX_ONE_SHOT_SRPCIM_CLR_MSIX_ONE_SHOT BIT(0) +/*0x002c0*/ u64 srpcim_rst_in_prog; +#define VXGE_HW_SRPCIM_RST_IN_PROG_SRPCIM_RST_IN_PROG BIT(7) +/*0x002c8*/ u64 srpcim_reg_modified; +#define VXGE_HW_SRPCIM_REG_MODIFIED_SRPCIM_REG_MODIFIED BIT(7) +/*0x002d0*/ u64 tgt_pf_illegal_access; +#define VXGE_HW_TGT_PF_ILLEGAL_ACCESS_SWIF_REGION(val) vxge_vBIT(val, 1, 7) +/*0x002d8*/ u64 srpcim_msix_status; +#define VXGE_HW_SRPCIM_MSIX_STATUS_INTCTL_SRPCIM_MSIX_MASK BIT(3) +#define VXGE_HW_SRPCIM_MSIX_STATUS_INTCTL_SRPCIM_MSIX_PENDING_VECTOR BIT(7) + u8 unused00880[0x00880-0x002e0]; + +/*0x00880*/ u64 xgmac_sr_int_status; +#define VXGE_HW_XGMAC_SR_INT_STATUS_ASIC_NTWK_SR_ERR_ASIC_NTWK_SR_INT BIT(3) +/*0x00888*/ u64 xgmac_sr_int_mask; +/*0x00890*/ u64 asic_ntwk_sr_err_reg; +#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_FAULT BIT(3) +#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_OK BIT(7) +#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_FAULT_OCCURRED \ + BIT(11) +#define VXGE_HW_ASIC_NTWK_SR_ERR_REG_XMACJ_NTWK_SUSTAINED_OK_OCCURRED BIT(15) +/*0x00898*/ u64 asic_ntwk_sr_err_mask; +/*0x008a0*/ u64 asic_ntwk_sr_err_alarm; + u8 unused008c0[0x008c0-0x008a8]; + +/*0x008c0*/ u64 xmac_vsport_choices_sr_clone; +#define VXGE_HW_XMAC_VSPORT_CHOICES_SR_CLONE_VSPORT_VECTOR(val) \ + vxge_vBIT(val, 0, 17) + u8 unused00900[0x00900-0x008c8]; + +/*0x00900*/ u64 mr_rqa_top_prty_for_vh; +#define VXGE_HW_MR_RQA_TOP_PRTY_FOR_VH_RQA_TOP_PRTY_FOR_VH(val) \ + vxge_vBIT(val, 59, 5) +/*0x00908*/ u64 umq_vh_data_list_empty; +#define VXGE_HW_UMQ_VH_DATA_LIST_EMPTY_ROCRC_UMQ_VH_DATA_LIST_EMPTY \ + BIT(0) +/*0x00910*/ u64 wde_cfg; +#define VXGE_HW_WDE_CFG_NS0_FORCE_MWB_START BIT(0) +#define VXGE_HW_WDE_CFG_NS0_FORCE_MWB_END BIT(1) +#define VXGE_HW_WDE_CFG_NS0_FORCE_QB_START BIT(2) +#define VXGE_HW_WDE_CFG_NS0_FORCE_QB_END BIT(3) +#define VXGE_HW_WDE_CFG_NS0_FORCE_MPSB_START BIT(4) +#define VXGE_HW_WDE_CFG_NS0_FORCE_MPSB_END BIT(5) +#define VXGE_HW_WDE_CFG_NS0_MWB_OPT_EN BIT(6) +#define VXGE_HW_WDE_CFG_NS0_QB_OPT_EN BIT(7) +#define VXGE_HW_WDE_CFG_NS0_MPSB_OPT_EN BIT(8) +#define VXGE_HW_WDE_CFG_NS1_FORCE_MWB_START BIT(9) +#define VXGE_HW_WDE_CFG_NS1_FORCE_MWB_END BIT(10) +#define VXGE_HW_WDE_CFG_NS1_FORCE_QB_START BIT(11) +#define VXGE_HW_WDE_CFG_NS1_FORCE_QB_END BIT(12) +#define VXGE_HW_WDE_CFG_NS1_FORCE_MPSB_START BIT(13) +#define VXGE_HW_WDE_CFG_NS1_FORCE_MPSB_END BIT(14) +#define VXGE_HW_WDE_CFG_NS1_MWB_OPT_EN BIT(15) +#define VXGE_HW_WDE_CFG_NS1_QB_OPT_EN BIT(16) +#define VXGE_HW_WDE_CFG_NS1_MPSB_OPT_EN BIT(17) +#define VXGE_HW_WDE_CFG_DISABLE_QPAD_FOR_UNALIGNED_ADDR BIT(19) +#define VXGE_HW_WDE_CFG_ALIGNMENT_PREFERENCE(val) vxge_vBIT(val, 30, 2) +#define VXGE_HW_WDE_CFG_MEM_WORD_SIZE(val) vxge_vBIT(val, 46, 2) + +} __packed; + +/*VXGE_HW_VPMGMT_REGS_H*/ +struct vxge_hw_vpmgmt_reg { + + u8 unused00040[0x00040-0x00000]; + +/*0x00040*/ u64 vpath_to_func_map_cfg1; +#define VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_VPATH_TO_FUNC_MAP_CFG1(val) \ + vxge_vBIT(val, 3, 5) +/*0x00048*/ u64 vpath_is_first; +#define VXGE_HW_VPATH_IS_FIRST_VPATH_IS_FIRST vxge_mBIT(3) +/*0x00050*/ u64 srpcim_to_vpath_wmsg; +#define VXGE_HW_SRPCIM_TO_VPATH_WMSG_SRPCIM_TO_VPATH_WMSG(val) \ + vxge_vBIT(val, 0, 64) +/*0x00058*/ u64 srpcim_to_vpath_wmsg_trig; +#define VXGE_HW_SRPCIM_TO_VPATH_WMSG_TRIG_SRPCIM_TO_VPATH_WMSG_TRIG \ + vxge_mBIT(0) + u8 unused00100[0x00100-0x00060]; + +/*0x00100*/ u64 tim_vpath_assignment; +#define VXGE_HW_TIM_VPATH_ASSIGNMENT_BMAP_ROOT(val) vxge_vBIT(val, 0, 32) + u8 unused00140[0x00140-0x00108]; + +/*0x00140*/ u64 rqa_top_prty_for_vp; +#define VXGE_HW_RQA_TOP_PRTY_FOR_VP_RQA_TOP_PRTY_FOR_VP(val) \ + vxge_vBIT(val, 59, 5) + u8 unused001c0[0x001c0-0x00148]; + +/*0x001c0*/ u64 rxmac_rx_pa_cfg0_vpmgmt_clone; +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_IGNORE_FRAME_ERR vxge_mBIT(3) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SUPPORT_SNAP_AB_N vxge_mBIT(7) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SEARCH_FOR_HAO vxge_mBIT(18) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SUPPORT_MOBILE_IPV6_HDRS \ + vxge_mBIT(19) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_IPV6_STOP_SEARCHING \ + vxge_mBIT(23) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_NO_PS_IF_UNKNOWN vxge_mBIT(27) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_SEARCH_FOR_ETYPE vxge_mBIT(35) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_L3_CSUM_ERR \ + vxge_mBIT(39) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_L3_CSUM_ERR \ + vxge_mBIT(43) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_L4_CSUM_ERR \ + vxge_mBIT(47) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_L4_CSUM_ERR \ + vxge_mBIT(51) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_ANY_FRM_IF_RPA_ERR \ + vxge_mBIT(55) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_TOSS_OFFLD_FRM_IF_RPA_ERR \ + vxge_mBIT(59) +#define VXGE_HW_RXMAC_RX_PA_CFG0_VPMGMT_CLONE_JUMBO_SNAP_EN vxge_mBIT(63) +/*0x001c8*/ u64 rts_mgr_cfg0_vpmgmt_clone; +#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_RTS_DP_SP_PRIORITY vxge_mBIT(3) +#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_FLEX_L4PRTCL_VALUE(val) \ + vxge_vBIT(val, 24, 8) +#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_ICMP_TRASH vxge_mBIT(35) +#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_TCPSYN_TRASH vxge_mBIT(39) +#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_ZL4PYLD_TRASH vxge_mBIT(43) +#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_TCP_TRASH vxge_mBIT(47) +#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_UDP_TRASH vxge_mBIT(51) +#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_L4PRTCL_FLEX_TRASH vxge_mBIT(55) +#define VXGE_HW_RTS_MGR_CFG0_VPMGMT_CLONE_IPFRAG_TRASH vxge_mBIT(59) +/*0x001d0*/ u64 rts_mgr_criteria_priority_vpmgmt_clone; +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ETYPE(val) \ + vxge_vBIT(val, 5, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ICMP_TCPSYN(val) \ + vxge_vBIT(val, 9, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_L4PN(val) \ + vxge_vBIT(val, 13, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_RANGE_L4PN(val) \ + vxge_vBIT(val, 17, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_RTH_IT(val) \ + vxge_vBIT(val, 21, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_DS(val) \ + vxge_vBIT(val, 25, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_QOS(val) \ + vxge_vBIT(val, 29, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_ZL4PYLD(val) \ + vxge_vBIT(val, 33, 3) +#define VXGE_HW_RTS_MGR_CRITERIA_PRIORITY_VPMGMT_CLONE_L4PRTCL(val) \ + vxge_vBIT(val, 37, 3) +/*0x001d8*/ u64 rxmac_cfg0_port_vpmgmt_clone[3]; +#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_RMAC_EN vxge_mBIT(3) +#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS vxge_mBIT(7) +#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_DISCARD_PFRM vxge_mBIT(11) +#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_FCS_ERR vxge_mBIT(15) +#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_LONG_ERR vxge_mBIT(19) +#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_USIZED_ERR vxge_mBIT(23) +#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_IGNORE_LEN_MISMATCH \ + vxge_mBIT(27) +#define VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_MAX_PYLD_LEN(val) \ + vxge_vBIT(val, 50, 14) +/*0x001f0*/ u64 rxmac_pause_cfg_port_vpmgmt_clone[3]; +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_GEN_EN vxge_mBIT(3) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_RCV_EN vxge_mBIT(7) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_ACCEL_SEND(val) \ + vxge_vBIT(val, 9, 3) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_DUAL_THR vxge_mBIT(15) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_HIGH_PTIME(val) \ + vxge_vBIT(val, 20, 16) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_IGNORE_PF_FCS_ERR \ + vxge_mBIT(39) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_IGNORE_PF_LEN_ERR \ + vxge_mBIT(43) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_LIMITER_EN vxge_mBIT(47) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_MAX_LIMIT(val) \ + vxge_vBIT(val, 48, 8) +#define VXGE_HW_RXMAC_PAUSE_CFG_PORT_VPMGMT_CLONE_PERMIT_RATEMGMT_CTRL \ + vxge_mBIT(59) + u8 unused00240[0x00240-0x00208]; + +/*0x00240*/ u64 xmac_vsport_choices_vp; +#define VXGE_HW_XMAC_VSPORT_CHOICES_VP_VSPORT_VECTOR(val) vxge_vBIT(val, 0, 17) + u8 unused00260[0x00260-0x00248]; + +/*0x00260*/ u64 xgmac_gen_status_vpmgmt_clone; +#define VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK vxge_mBIT(3) +#define VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_DATA_RATE \ + vxge_mBIT(11) +/*0x00268*/ u64 xgmac_status_port_vpmgmt_clone[2]; +#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_RMAC_REMOTE_FAULT \ + vxge_mBIT(3) +#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_RMAC_LOCAL_FAULT vxge_mBIT(7) +#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_XMACJ_MAC_PHY_LAYER_AVAIL \ + vxge_mBIT(11) +#define VXGE_HW_XGMAC_STATUS_PORT_VPMGMT_CLONE_XMACJ_PORT_OK vxge_mBIT(15) +/*0x00278*/ u64 xmac_gen_cfg_vpmgmt_clone; +#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_RATEMGMT_MAC_RATE_SEL(val) \ + vxge_vBIT(val, 2, 2) +#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_TX_HEAD_DROP_WHEN_FAULT \ + vxge_mBIT(7) +#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_FAULT_BEHAVIOUR vxge_mBIT(27) +#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_PERIOD_NTWK_UP(val) \ + vxge_vBIT(val, 28, 4) +#define VXGE_HW_XMAC_GEN_CFG_VPMGMT_CLONE_PERIOD_NTWK_DOWN(val) \ + vxge_vBIT(val, 32, 4) +/*0x00280*/ u64 xmac_timestamp_vpmgmt_clone; +#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_EN vxge_mBIT(3) +#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_USE_LINK_ID(val) \ + vxge_vBIT(val, 6, 2) +#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_INTERVAL(val) vxge_vBIT(val, 12, 4) +#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_TIMER_RESTART vxge_mBIT(19) +#define VXGE_HW_XMAC_TIMESTAMP_VPMGMT_CLONE_XMACJ_ROLLOVER_CNT(val) \ + vxge_vBIT(val, 32, 16) +/*0x00288*/ u64 xmac_stats_gen_cfg_vpmgmt_clone; +#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_PRTAGGR_CUM_TIMER(val) \ + vxge_vBIT(val, 4, 4) +#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_VPATH_CUM_TIMER(val) \ + vxge_vBIT(val, 8, 4) +#define VXGE_HW_XMAC_STATS_GEN_CFG_VPMGMT_CLONE_VLAN_HANDLING vxge_mBIT(15) +/*0x00290*/ u64 xmac_cfg_port_vpmgmt_clone[3]; +#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_LOOPBACK vxge_mBIT(3) +#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_REVERSE_LOOPBACK \ + vxge_mBIT(7) +#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_TX_BEHAV vxge_mBIT(11) +#define VXGE_HW_XMAC_CFG_PORT_VPMGMT_CLONE_XGMII_RX_BEHAV vxge_mBIT(15) + u8 unused002c0[0x002c0-0x002a8]; + +/*0x002c0*/ u64 txmac_gen_cfg0_vpmgmt_clone; +#define VXGE_HW_TXMAC_GEN_CFG0_VPMGMT_CLONE_CHOSEN_TX_PORT vxge_mBIT(7) +/*0x002c8*/ u64 txmac_cfg0_port_vpmgmt_clone[3]; +#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_TMAC_EN vxge_mBIT(3) +#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_APPEND_PAD vxge_mBIT(7) +#define VXGE_HW_TXMAC_CFG0_PORT_VPMGMT_CLONE_PAD_BYTE(val) vxge_vBIT(val, 8, 8) + u8 unused00300[0x00300-0x002e0]; + +/*0x00300*/ u64 wol_mp_crc; +#define VXGE_HW_WOL_MP_CRC_CRC(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_WOL_MP_CRC_RC_EN vxge_mBIT(63) +/*0x00308*/ u64 wol_mp_mask_a; +#define VXGE_HW_WOL_MP_MASK_A_MASK(val) vxge_vBIT(val, 0, 64) +/*0x00310*/ u64 wol_mp_mask_b; +#define VXGE_HW_WOL_MP_MASK_B_MASK(val) vxge_vBIT(val, 0, 64) + u8 unused00360[0x00360-0x00318]; + +/*0x00360*/ u64 fau_pa_cfg_vpmgmt_clone; +#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L4_COMP_CSUM vxge_mBIT(3) +#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L3_INCL_CF vxge_mBIT(7) +#define VXGE_HW_FAU_PA_CFG_VPMGMT_CLONE_REPL_L3_COMP_CSUM vxge_mBIT(11) +/*0x00368*/ u64 rx_datapath_util_vp_clone; +#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_FAU_RX_UTILIZATION(val) \ + vxge_vBIT(val, 7, 9) +#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_RX_UTIL_CFG(val) \ + vxge_vBIT(val, 16, 4) +#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_FAU_RX_FRAC_UTIL(val) \ + vxge_vBIT(val, 20, 4) +#define VXGE_HW_RX_DATAPATH_UTIL_VP_CLONE_RX_PKT_WEIGHT(val) \ + vxge_vBIT(val, 24, 4) + u8 unused00380[0x00380-0x00370]; + +/*0x00380*/ u64 tx_datapath_util_vp_clone; +#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TPA_TX_UTILIZATION(val) \ + vxge_vBIT(val, 7, 9) +#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TX_UTIL_CFG(val) \ + vxge_vBIT(val, 16, 4) +#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TPA_TX_FRAC_UTIL(val) \ + vxge_vBIT(val, 20, 4) +#define VXGE_HW_TX_DATAPATH_UTIL_VP_CLONE_TX_PKT_WEIGHT(val) \ + vxge_vBIT(val, 24, 4) + +} __packed; + +struct vxge_hw_vpath_reg { + + u8 unused00300[0x00300]; + +/*0x00300*/ u64 usdc_vpath; +#define VXGE_HW_USDC_VPATH_SGRP_ASSIGN(val) vxge_vBIT(val, 0, 32) + u8 unused00a00[0x00a00-0x00308]; + +/*0x00a00*/ u64 wrdma_alarm_status; +#define VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT vxge_mBIT(1) +/*0x00a08*/ u64 wrdma_alarm_mask; + u8 unused00a30[0x00a30-0x00a10]; + +/*0x00a30*/ u64 prc_alarm_reg; +#define VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP vxge_mBIT(0) +#define VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR vxge_mBIT(1) +#define VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT vxge_mBIT(2) +#define VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR vxge_mBIT(3) +/*0x00a38*/ u64 prc_alarm_mask; +/*0x00a40*/ u64 prc_alarm_alarm; +/*0x00a48*/ u64 prc_cfg1; +#define VXGE_HW_PRC_CFG1_RX_TIMER_VAL(val) vxge_vBIT(val, 3, 29) +#define VXGE_HW_PRC_CFG1_TIM_RING_BUMP_INT_ENABLE vxge_mBIT(34) +#define VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE vxge_mBIT(35) +#define VXGE_HW_PRC_CFG1_GREEDY_RETURN vxge_mBIT(36) +#define VXGE_HW_PRC_CFG1_QUICK_SHOT vxge_mBIT(37) +#define VXGE_HW_PRC_CFG1_RX_TIMER_CI vxge_mBIT(39) +#define VXGE_HW_PRC_CFG1_RESET_TIMER_ON_RXD_RET(val) vxge_vBIT(val, 40, 2) + u8 unused00a60[0x00a60-0x00a50]; + +/*0x00a60*/ u64 prc_cfg4; +#define VXGE_HW_PRC_CFG4_IN_SVC vxge_mBIT(7) +#define VXGE_HW_PRC_CFG4_RING_MODE(val) vxge_vBIT(val, 14, 2) +#define VXGE_HW_PRC_CFG4_RXD_NO_SNOOP vxge_mBIT(22) +#define VXGE_HW_PRC_CFG4_FRM_NO_SNOOP vxge_mBIT(23) +#define VXGE_HW_PRC_CFG4_RTH_DISABLE vxge_mBIT(31) +#define VXGE_HW_PRC_CFG4_IGNORE_OWNERSHIP vxge_mBIT(32) +#define VXGE_HW_PRC_CFG4_SIGNAL_BENIGN_OVFLW vxge_mBIT(36) +#define VXGE_HW_PRC_CFG4_BIMODAL_INTERRUPT vxge_mBIT(37) +#define VXGE_HW_PRC_CFG4_BACKOFF_INTERVAL(val) vxge_vBIT(val, 40, 24) +/*0x00a68*/ u64 prc_cfg5; +#define VXGE_HW_PRC_CFG5_RXD0_ADD(val) vxge_vBIT(val, 0, 61) +/*0x00a70*/ u64 prc_cfg6; +#define VXGE_HW_PRC_CFG6_FRM_PAD_EN vxge_mBIT(0) +#define VXGE_HW_PRC_CFG6_QSIZE_ALIGNED_RXD vxge_mBIT(2) +#define VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN vxge_mBIT(5) +#define VXGE_HW_PRC_CFG6_L3_CPC_TRSFR_CODE_EN vxge_mBIT(8) +#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9) +#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9) +#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9) +#define VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val) vxge_bVALn(val, 36, 9) +/*0x00a78*/ u64 prc_cfg7; +#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2) +#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11) +#define VXGE_HW_PRC_CFG7_RXD_NS_CHG_EN vxge_mBIT(12) +#define VXGE_HW_PRC_CFG7_NO_HDR_SEPARATION vxge_mBIT(14) +#define VXGE_HW_PRC_CFG7_RXD_BUFF_SIZE_MASK(val) vxge_vBIT(val, 20, 4) +#define VXGE_HW_PRC_CFG7_BUFF_SIZE0_MASK(val) vxge_vBIT(val, 27, 5) +/*0x00a80*/ u64 tim_dest_addr; +#define VXGE_HW_TIM_DEST_ADDR_TIM_DEST_ADDR(val) vxge_vBIT(val, 0, 64) +/*0x00a88*/ u64 prc_rxd_doorbell; +#define VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val) vxge_vBIT(val, 48, 16) +/*0x00a90*/ u64 rqa_prty_for_vp; +#define VXGE_HW_RQA_PRTY_FOR_VP_RQA_PRTY_FOR_VP(val) vxge_vBIT(val, 59, 5) +/*0x00a98*/ u64 rxdmem_size; +#define VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(val) vxge_vBIT(val, 51, 13) +/*0x00aa0*/ u64 frm_in_progress_cnt; +#define VXGE_HW_FRM_IN_PROGRESS_CNT_PRC_FRM_IN_PROGRESS_CNT(val) \ + vxge_vBIT(val, 59, 5) +/*0x00aa8*/ u64 rx_multi_cast_stats; +#define VXGE_HW_RX_MULTI_CAST_STATS_FRAME_DISCARD(val) vxge_vBIT(val, 48, 16) +/*0x00ab0*/ u64 rx_frm_transferred; +#define VXGE_HW_RX_FRM_TRANSFERRED_RX_FRM_TRANSFERRED(val) \ + vxge_vBIT(val, 32, 32) +/*0x00ab8*/ u64 rxd_returned; +#define VXGE_HW_RXD_RETURNED_RXD_RETURNED(val) vxge_vBIT(val, 48, 16) + u8 unused00c00[0x00c00-0x00ac0]; + +/*0x00c00*/ u64 kdfc_fifo_trpl_partition; +#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(val) vxge_vBIT(val, 17, 15) +#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_1(val) vxge_vBIT(val, 33, 15) +#define VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_2(val) vxge_vBIT(val, 49, 15) +/*0x00c08*/ u64 kdfc_fifo_trpl_ctrl; +#define VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE vxge_mBIT(7) +/*0x00c10*/ u64 kdfc_trpl_fifo_0_ctrl; +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(val) vxge_vBIT(val, 14, 2) +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_FLIP_EN vxge_mBIT(22) +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN vxge_mBIT(23) +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2) +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_CTRL_STRUC vxge_mBIT(28) +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_ADD_PAD vxge_mBIT(29) +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_NO_SNOOP vxge_mBIT(30) +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_RLX_ORD vxge_mBIT(31) +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(val) vxge_vBIT(val, 32, 8) +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7) +#define VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16) +/*0x00c18*/ u64 kdfc_trpl_fifo_1_ctrl; +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_MODE(val) vxge_vBIT(val, 14, 2) +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_FLIP_EN vxge_mBIT(22) +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_SWAP_EN vxge_mBIT(23) +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2) +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_CTRL_STRUC vxge_mBIT(28) +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_ADD_PAD vxge_mBIT(29) +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_NO_SNOOP vxge_mBIT(30) +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_RLX_ORD vxge_mBIT(31) +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_SELECT(val) vxge_vBIT(val, 32, 8) +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7) +#define VXGE_HW_KDFC_TRPL_FIFO_1_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16) +/*0x00c20*/ u64 kdfc_trpl_fifo_2_ctrl; +#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_FLIP_EN vxge_mBIT(22) +#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_SWAP_EN vxge_mBIT(23) +#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_INT_CTRL(val) vxge_vBIT(val, 26, 2) +#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_CTRL_STRUC vxge_mBIT(28) +#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_ADD_PAD vxge_mBIT(29) +#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_NO_SNOOP vxge_mBIT(30) +#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_RLX_ORD vxge_mBIT(31) +#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_SELECT(val) vxge_vBIT(val, 32, 8) +#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_INT_NO(val) vxge_vBIT(val, 41, 7) +#define VXGE_HW_KDFC_TRPL_FIFO_2_CTRL_BIT_MAP(val) vxge_vBIT(val, 48, 16) +/*0x00c28*/ u64 kdfc_trpl_fifo_0_wb_address; +#define VXGE_HW_KDFC_TRPL_FIFO_0_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64) +/*0x00c30*/ u64 kdfc_trpl_fifo_1_wb_address; +#define VXGE_HW_KDFC_TRPL_FIFO_1_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64) +/*0x00c38*/ u64 kdfc_trpl_fifo_2_wb_address; +#define VXGE_HW_KDFC_TRPL_FIFO_2_WB_ADDRESS_ADD(val) vxge_vBIT(val, 0, 64) +/*0x00c40*/ u64 kdfc_trpl_fifo_offset; +#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR0(val) vxge_vBIT(val, 1, 15) +#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR1(val) vxge_vBIT(val, 17, 15) +#define VXGE_HW_KDFC_TRPL_FIFO_OFFSET_KDFC_RCTR2(val) vxge_vBIT(val, 33, 15) +/*0x00c48*/ u64 kdfc_drbl_triplet_total; +#define VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_KDFC_MAX_SIZE(val) \ + vxge_vBIT(val, 17, 15) + u8 unused00c60[0x00c60-0x00c50]; + +/*0x00c60*/ u64 usdc_drbl_ctrl; +#define VXGE_HW_USDC_DRBL_CTRL_FLIP_EN vxge_mBIT(22) +#define VXGE_HW_USDC_DRBL_CTRL_SWAP_EN vxge_mBIT(23) +/*0x00c68*/ u64 usdc_vp_ready; +#define VXGE_HW_USDC_VP_READY_USDC_HTN_READY vxge_mBIT(7) +#define VXGE_HW_USDC_VP_READY_USDC_SRQ_READY vxge_mBIT(15) +#define VXGE_HW_USDC_VP_READY_USDC_CQRQ_READY vxge_mBIT(23) +/*0x00c70*/ u64 kdfc_status; +#define VXGE_HW_KDFC_STATUS_KDFC_WRR_0_READY vxge_mBIT(0) +#define VXGE_HW_KDFC_STATUS_KDFC_WRR_1_READY vxge_mBIT(1) +#define VXGE_HW_KDFC_STATUS_KDFC_WRR_2_READY vxge_mBIT(2) + u8 unused00c80[0x00c80-0x00c78]; + +/*0x00c80*/ u64 xmac_rpa_vcfg; +#define VXGE_HW_XMAC_RPA_VCFG_IPV4_TCP_INCL_PH vxge_mBIT(3) +#define VXGE_HW_XMAC_RPA_VCFG_IPV6_TCP_INCL_PH vxge_mBIT(7) +#define VXGE_HW_XMAC_RPA_VCFG_IPV4_UDP_INCL_PH vxge_mBIT(11) +#define VXGE_HW_XMAC_RPA_VCFG_IPV6_UDP_INCL_PH vxge_mBIT(15) +#define VXGE_HW_XMAC_RPA_VCFG_L4_INCL_CF vxge_mBIT(19) +#define VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG vxge_mBIT(23) +/*0x00c88*/ u64 rxmac_vcfg0; +#define VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(val) vxge_vBIT(val, 2, 14) +#define VXGE_HW_RXMAC_VCFG0_RTS_USE_MIN_LEN vxge_mBIT(19) +#define VXGE_HW_RXMAC_VCFG0_RTS_MIN_FRM_LEN(val) vxge_vBIT(val, 26, 14) +#define VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN vxge_mBIT(43) +#define VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN vxge_mBIT(47) +#define VXGE_HW_RXMAC_VCFG0_BCAST_EN vxge_mBIT(51) +#define VXGE_HW_RXMAC_VCFG0_ALL_VID_EN vxge_mBIT(55) +/*0x00c90*/ u64 rxmac_vcfg1; +#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(val) vxge_vBIT(val, 42, 2) +#define VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE vxge_mBIT(47) +#define VXGE_HW_RXMAC_VCFG1_CONTRIB_L2_FLOW vxge_mBIT(51) +/*0x00c98*/ u64 rts_access_steer_ctrl; +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(val) vxge_vBIT(val, 1, 7) +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(val) vxge_vBIT(val, 8, 4) +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE vxge_mBIT(15) +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_BEHAV_TBL_SEL vxge_mBIT(23) +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL vxge_mBIT(27) +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS vxge_mBIT(0) +#define VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(val) vxge_vBIT(val, 40, 8) +/*0x00ca0*/ u64 rts_access_steer_data0; +#define VXGE_HW_RTS_ACCESS_STEER_DATA0_DATA(val) vxge_vBIT(val, 0, 64) +/*0x00ca8*/ u64 rts_access_steer_data1; +#define VXGE_HW_RTS_ACCESS_STEER_DATA1_DATA(val) vxge_vBIT(val, 0, 64) + u8 unused00d00[0x00d00-0x00cb0]; + +/*0x00d00*/ u64 xmac_vsport_choice; +#define VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(val) vxge_vBIT(val, 3, 5) +/*0x00d08*/ u64 xmac_stats_cfg; +/*0x00d10*/ u64 xmac_stats_access_cmd; +#define VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(val) vxge_vBIT(val, 6, 2) +#define VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE vxge_mBIT(15) +#define VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(val) vxge_vBIT(val, 32, 8) +/*0x00d18*/ u64 xmac_stats_access_data; +#define VXGE_HW_XMAC_STATS_ACCESS_DATA_XSMGR_DATA(val) vxge_vBIT(val, 0, 64) +/*0x00d20*/ u64 asic_ntwk_vp_ctrl; +#define VXGE_HW_ASIC_NTWK_VP_CTRL_REQ_TEST_NTWK vxge_mBIT(3) +#define VXGE_HW_ASIC_NTWK_VP_CTRL_XMACJ_SHOW_PORT_INFO vxge_mBIT(55) +#define VXGE_HW_ASIC_NTWK_VP_CTRL_XMACJ_PORT_NUM vxge_mBIT(63) + u8 unused00d30[0x00d30-0x00d28]; + +/*0x00d30*/ u64 xgmac_vp_int_status; +#define VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT \ + vxge_mBIT(3) +/*0x00d38*/ u64 xgmac_vp_int_mask; +/*0x00d40*/ u64 asic_ntwk_vp_err_reg; +#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT vxge_mBIT(3) +#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK vxge_mBIT(7) +#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR \ + vxge_mBIT(11) +#define VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR \ + vxge_mBIT(15) +#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT \ + vxge_mBIT(19) +#define VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK vxge_mBIT(23) +/*0x00d48*/ u64 asic_ntwk_vp_err_mask; +/*0x00d50*/ u64 asic_ntwk_vp_err_alarm; + u8 unused00d80[0x00d80-0x00d58]; + +/*0x00d80*/ u64 rtdma_bw_ctrl; +#define VXGE_HW_RTDMA_BW_CTRL_BW_CTRL_EN vxge_mBIT(39) +#define VXGE_HW_RTDMA_BW_CTRL_DESIRED_BW(val) vxge_vBIT(val, 46, 18) +/*0x00d88*/ u64 rtdma_rd_optimization_ctrl; +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_GEN_INT_AFTER_ABORT vxge_mBIT(3) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_PAD_MODE(val) vxge_vBIT(val, 6, 2) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_PAD_PATTERN(val) vxge_vBIT(val, 8, 8) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE vxge_mBIT(19) +#define VXGE_HW_PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */ +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val) \ + vxge_vBIT(val, 21, 3) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_PYLD_WMARK_EN vxge_mBIT(28) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_PYLD_WMARK(val) \ + vxge_vBIT(val, 29, 3) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN vxge_mBIT(35) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(val) \ + vxge_vBIT(val, 37, 3) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_WAIT_FOR_SPACE vxge_mBIT(43) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_FILL_THRESH(val) \ + vxge_vBIT(val, 51, 5) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_ADDR_BDRY_EN vxge_mBIT(59) +#define VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_TXD_ADDR_BDRY(val) \ + vxge_vBIT(val, 61, 3) +/*0x00d90*/ u64 pda_pcc_job_monitor; +#define VXGE_HW_PDA_PCC_JOB_MONITOR_PDA_PCC_JOB_STATUS vxge_mBIT(7) +/*0x00d98*/ u64 tx_protocol_assist_cfg; +#define VXGE_HW_TX_PROTOCOL_ASSIST_CFG_LSOV2_EN vxge_mBIT(6) +#define VXGE_HW_TX_PROTOCOL_ASSIST_CFG_IPV6_KEEP_SEARCHING vxge_mBIT(7) + u8 unused01000[0x01000-0x00da0]; + +/*0x01000*/ u64 tim_cfg1_int_num[4]; +#define VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(val) vxge_vBIT(val, 6, 26) +#define VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN vxge_mBIT(35) +#define VXGE_HW_TIM_CFG1_INT_NUM_TXFRM_CNT_EN vxge_mBIT(36) +#define VXGE_HW_TIM_CFG1_INT_NUM_TXD_CNT_EN vxge_mBIT(37) +#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC vxge_mBIT(38) +#define VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI vxge_mBIT(39) +#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(val) vxge_vBIT(val, 41, 7) +#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(val) vxge_vBIT(val, 49, 7) +#define VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(val) vxge_vBIT(val, 57, 7) +/*0x01020*/ u64 tim_cfg2_int_num[4]; +#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(val) vxge_vBIT(val, 32, 16) +#define VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(val) vxge_vBIT(val, 48, 16) +/*0x01040*/ u64 tim_cfg3_int_num[4]; +#define VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI vxge_mBIT(0) +#define VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(val) vxge_vBIT(val, 1, 4) +#define VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(val) vxge_vBIT(val, 6, 26) +#define VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(val) vxge_vBIT(val, 32, 6) +#define VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(val) vxge_vBIT(val, 38, 26) +/*0x01060*/ u64 tim_wrkld_clc; +#define VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(val) vxge_vBIT(val, 35, 5) +#define VXGE_HW_TIM_WRKLD_CLC_CNT_FRM_BYTE vxge_mBIT(40) +#define VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(val) vxge_vBIT(val, 41, 2) +#define VXGE_HW_TIM_WRKLD_CLC_CNT_LNK_EN vxge_mBIT(43) +#define VXGE_HW_TIM_WRKLD_CLC_HOST_UTIL(val) vxge_vBIT(val, 57, 7) +/*0x01068*/ u64 tim_bitmap; +#define VXGE_HW_TIM_BITMAP_MASK(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_TIM_BITMAP_LLROOT_RXD_EN vxge_mBIT(32) +#define VXGE_HW_TIM_BITMAP_LLROOT_TXD_EN vxge_mBIT(33) +/*0x01070*/ u64 tim_ring_assn; +#define VXGE_HW_TIM_RING_ASSN_INT_NUM(val) vxge_vBIT(val, 6, 2) +/*0x01078*/ u64 tim_remap; +#define VXGE_HW_TIM_REMAP_TX_EN vxge_mBIT(5) +#define VXGE_HW_TIM_REMAP_RX_EN vxge_mBIT(6) +#define VXGE_HW_TIM_REMAP_OFFLOAD_EN vxge_mBIT(7) +#define VXGE_HW_TIM_REMAP_TO_VPATH_NUM(val) vxge_vBIT(val, 11, 5) +/*0x01080*/ u64 tim_vpath_map; +#define VXGE_HW_TIM_VPATH_MAP_BMAP_ROOT(val) vxge_vBIT(val, 0, 32) +/*0x01088*/ u64 tim_pci_cfg; +#define VXGE_HW_TIM_PCI_CFG_ADD_PAD vxge_mBIT(7) +#define VXGE_HW_TIM_PCI_CFG_NO_SNOOP vxge_mBIT(15) +#define VXGE_HW_TIM_PCI_CFG_RELAXED vxge_mBIT(23) +#define VXGE_HW_TIM_PCI_CFG_CTL_STR vxge_mBIT(31) + u8 unused01100[0x01100-0x01090]; + +/*0x01100*/ u64 sgrp_assign; +#define VXGE_HW_SGRP_ASSIGN_SGRP_ASSIGN(val) vxge_vBIT(val, 0, 64) +/*0x01108*/ u64 sgrp_aoa_and_result; +#define VXGE_HW_SGRP_AOA_AND_RESULT_PET_SGRP_AOA_AND_RESULT(val) \ + vxge_vBIT(val, 0, 64) +/*0x01110*/ u64 rpe_pci_cfg; +#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_DATA_ENABLE vxge_mBIT(7) +#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_HDR_ENABLE vxge_mBIT(8) +#define VXGE_HW_RPE_PCI_CFG_PAD_LRO_CQE_ENABLE vxge_mBIT(9) +#define VXGE_HW_RPE_PCI_CFG_PAD_NONLL_CQE_ENABLE vxge_mBIT(10) +#define VXGE_HW_RPE_PCI_CFG_PAD_BASE_LL_CQE_ENABLE vxge_mBIT(11) +#define VXGE_HW_RPE_PCI_CFG_PAD_LL_CQE_IDATA_ENABLE vxge_mBIT(12) +#define VXGE_HW_RPE_PCI_CFG_PAD_CQRQ_IR_ENABLE vxge_mBIT(13) +#define VXGE_HW_RPE_PCI_CFG_PAD_CQSQ_IR_ENABLE vxge_mBIT(14) +#define VXGE_HW_RPE_PCI_CFG_PAD_CQRR_IR_ENABLE vxge_mBIT(15) +#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_DATA vxge_mBIT(18) +#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_NONLL_CQE vxge_mBIT(19) +#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_LL_CQE vxge_mBIT(20) +#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQRQ_IR vxge_mBIT(21) +#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQSQ_IR vxge_mBIT(22) +#define VXGE_HW_RPE_PCI_CFG_NOSNOOP_CQRR_IR vxge_mBIT(23) +#define VXGE_HW_RPE_PCI_CFG_RELAXED_DATA vxge_mBIT(26) +#define VXGE_HW_RPE_PCI_CFG_RELAXED_NONLL_CQE vxge_mBIT(27) +#define VXGE_HW_RPE_PCI_CFG_RELAXED_LL_CQE vxge_mBIT(28) +#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQRQ_IR vxge_mBIT(29) +#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQSQ_IR vxge_mBIT(30) +#define VXGE_HW_RPE_PCI_CFG_RELAXED_CQRR_IR vxge_mBIT(31) +/*0x01118*/ u64 rpe_lro_cfg; +#define VXGE_HW_RPE_LRO_CFG_SUPPRESS_LRO_ETH_TRLR vxge_mBIT(7) +#define VXGE_HW_RPE_LRO_CFG_ALLOW_LRO_SNAP_SNAPJUMBO_MRG vxge_mBIT(11) +#define VXGE_HW_RPE_LRO_CFG_ALLOW_LRO_LLC_LLCJUMBO_MRG vxge_mBIT(15) +#define VXGE_HW_RPE_LRO_CFG_INCL_ACK_CNT_IN_CQE vxge_mBIT(23) +/*0x01120*/ u64 pe_mr2vp_ack_blk_limit; +#define VXGE_HW_PE_MR2VP_ACK_BLK_LIMIT_BLK_LIMIT(val) vxge_vBIT(val, 32, 32) +/*0x01128*/ u64 pe_mr2vp_rirr_lirr_blk_limit; +#define VXGE_HW_PE_MR2VP_RIRR_LIRR_BLK_LIMIT_RIRR_BLK_LIMIT(val) \ + vxge_vBIT(val, 0, 32) +#define VXGE_HW_PE_MR2VP_RIRR_LIRR_BLK_LIMIT_LIRR_BLK_LIMIT(val) \ + vxge_vBIT(val, 32, 32) +/*0x01130*/ u64 txpe_pci_nce_cfg; +#define VXGE_HW_TXPE_PCI_NCE_CFG_NCE_THRESH(val) vxge_vBIT(val, 0, 32) +#define VXGE_HW_TXPE_PCI_NCE_CFG_PAD_TOWI_ENABLE vxge_mBIT(55) +#define VXGE_HW_TXPE_PCI_NCE_CFG_NOSNOOP_TOWI vxge_mBIT(63) + u8 unused01180[0x01180-0x01138]; + +/*0x01180*/ u64 msg_qpad_en_cfg; +#define VXGE_HW_MSG_QPAD_EN_CFG_UMQ_BWR_READ vxge_mBIT(3) +#define VXGE_HW_MSG_QPAD_EN_CFG_DMQ_BWR_READ vxge_mBIT(7) +#define VXGE_HW_MSG_QPAD_EN_CFG_MXP_GENDMA_READ vxge_mBIT(11) +#define VXGE_HW_MSG_QPAD_EN_CFG_UXP_GENDMA_READ vxge_mBIT(15) +#define VXGE_HW_MSG_QPAD_EN_CFG_UMQ_MSG_WRITE vxge_mBIT(19) +#define VXGE_HW_MSG_QPAD_EN_CFG_UMQDMQ_IR_WRITE vxge_mBIT(23) +#define VXGE_HW_MSG_QPAD_EN_CFG_MXP_GENDMA_WRITE vxge_mBIT(27) +#define VXGE_HW_MSG_QPAD_EN_CFG_UXP_GENDMA_WRITE vxge_mBIT(31) +/*0x01188*/ u64 msg_pci_cfg; +#define VXGE_HW_MSG_PCI_CFG_GENDMA_NO_SNOOP vxge_mBIT(3) +#define VXGE_HW_MSG_PCI_CFG_UMQDMQ_IR_NO_SNOOP vxge_mBIT(7) +#define VXGE_HW_MSG_PCI_CFG_UMQ_NO_SNOOP vxge_mBIT(11) +#define VXGE_HW_MSG_PCI_CFG_DMQ_NO_SNOOP vxge_mBIT(15) +/*0x01190*/ u64 umqdmq_ir_init; +#define VXGE_HW_UMQDMQ_IR_INIT_HOST_WRITE_ADD(val) vxge_vBIT(val, 0, 64) +/*0x01198*/ u64 dmq_ir_int; +#define VXGE_HW_DMQ_IR_INT_IMMED_ENABLE vxge_mBIT(6) +#define VXGE_HW_DMQ_IR_INT_EVENT_ENABLE vxge_mBIT(7) +#define VXGE_HW_DMQ_IR_INT_NUMBER(val) vxge_vBIT(val, 9, 7) +#define VXGE_HW_DMQ_IR_INT_BITMAP(val) vxge_vBIT(val, 16, 16) +/*0x011a0*/ u64 dmq_bwr_init_add; +#define VXGE_HW_DMQ_BWR_INIT_ADD_HOST(val) vxge_vBIT(val, 0, 64) +/*0x011a8*/ u64 dmq_bwr_init_byte; +#define VXGE_HW_DMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32) +/*0x011b0*/ u64 dmq_ir; +#define VXGE_HW_DMQ_IR_POLICY(val) vxge_vBIT(val, 0, 8) +/*0x011b8*/ u64 umq_int; +#define VXGE_HW_UMQ_INT_IMMED_ENABLE vxge_mBIT(6) +#define VXGE_HW_UMQ_INT_EVENT_ENABLE vxge_mBIT(7) +#define VXGE_HW_UMQ_INT_NUMBER(val) vxge_vBIT(val, 9, 7) +#define VXGE_HW_UMQ_INT_BITMAP(val) vxge_vBIT(val, 16, 16) +/*0x011c0*/ u64 umq_mr2vp_bwr_pfch_init; +#define VXGE_HW_UMQ_MR2VP_BWR_PFCH_INIT_NUMBER(val) vxge_vBIT(val, 0, 8) +/*0x011c8*/ u64 umq_bwr_pfch_ctrl; +#define VXGE_HW_UMQ_BWR_PFCH_CTRL_POLL_EN vxge_mBIT(3) +/*0x011d0*/ u64 umq_mr2vp_bwr_eol; +#define VXGE_HW_UMQ_MR2VP_BWR_EOL_POLL_LATENCY(val) vxge_vBIT(val, 32, 32) +/*0x011d8*/ u64 umq_bwr_init_add; +#define VXGE_HW_UMQ_BWR_INIT_ADD_HOST(val) vxge_vBIT(val, 0, 64) +/*0x011e0*/ u64 umq_bwr_init_byte; +#define VXGE_HW_UMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32) +/*0x011e8*/ u64 gendma_int; +/*0x011f0*/ u64 umqdmq_ir_init_notify; +#define VXGE_HW_UMQDMQ_IR_INIT_NOTIFY_PULSE vxge_mBIT(3) +/*0x011f8*/ u64 dmq_init_notify; +#define VXGE_HW_DMQ_INIT_NOTIFY_PULSE vxge_mBIT(3) +/*0x01200*/ u64 umq_init_notify; +#define VXGE_HW_UMQ_INIT_NOTIFY_PULSE vxge_mBIT(3) + u8 unused01380[0x01380-0x01208]; + +/*0x01380*/ u64 tpa_cfg; +#define VXGE_HW_TPA_CFG_IGNORE_FRAME_ERR vxge_mBIT(3) +#define VXGE_HW_TPA_CFG_IPV6_STOP_SEARCHING vxge_mBIT(7) +#define VXGE_HW_TPA_CFG_L4_PSHDR_PRESENT vxge_mBIT(11) +#define VXGE_HW_TPA_CFG_SUPPORT_MOBILE_IPV6_HDRS vxge_mBIT(15) + u8 unused01400[0x01400-0x01388]; + +/*0x01400*/ u64 tx_vp_reset_discarded_frms; +#define VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_TX_VP_RESET_DISCARDED_FRMS(val) \ + vxge_vBIT(val, 48, 16) + u8 unused01480[0x01480-0x01408]; + +/*0x01480*/ u64 fau_rpa_vcfg; +#define VXGE_HW_FAU_RPA_VCFG_L4_COMP_CSUM vxge_mBIT(7) +#define VXGE_HW_FAU_RPA_VCFG_L3_INCL_CF vxge_mBIT(11) +#define VXGE_HW_FAU_RPA_VCFG_L3_COMP_CSUM vxge_mBIT(15) + u8 unused014d0[0x014d0-0x01488]; + +/*0x014d0*/ u64 dbg_stats_rx_mpa; +#define VXGE_HW_DBG_STATS_RX_MPA_CRC_FAIL_FRMS(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_DBG_STATS_RX_MPA_MRK_FAIL_FRMS(val) vxge_vBIT(val, 16, 16) +#define VXGE_HW_DBG_STATS_RX_MPA_LEN_FAIL_FRMS(val) vxge_vBIT(val, 32, 16) +/*0x014d8*/ u64 dbg_stats_rx_fau; +#define VXGE_HW_DBG_STATS_RX_FAU_RX_WOL_FRMS(val) vxge_vBIT(val, 0, 16) +#define VXGE_HW_DBG_STATS_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val) \ + vxge_vBIT(val, 16, 16) +#define VXGE_HW_DBG_STATS_RX_FAU_RX_PERMITTED_FRMS(val) \ + vxge_vBIT(val, 32, 32) + u8 unused014f0[0x014f0-0x014e0]; + +/*0x014f0*/ u64 fbmc_vp_rdy; +#define VXGE_HW_FBMC_VP_RDY_QUEUE_SPAV_FM vxge_mBIT(0) + u8 unused01e00[0x01e00-0x014f8]; + +/*0x01e00*/ u64 vpath_pcipif_int_status; +#define \ +VXGE_HW_VPATH_PCIPIF_INT_STATUS_SRPCIM_MSG_TO_VPATH_SRPCIM_MSG_TO_VPATH_INT \ + vxge_mBIT(3) +#define VXGE_HW_VPATH_PCIPIF_INT_STATUS_VPATH_SPARE_R1_VPATH_SPARE_R1_INT \ + vxge_mBIT(7) +/*0x01e08*/ u64 vpath_pcipif_int_mask; + u8 unused01e20[0x01e20-0x01e10]; + +/*0x01e20*/ u64 srpcim_msg_to_vpath_reg; +#define VXGE_HW_SRPCIM_MSG_TO_VPATH_REG_SWIF_SRPCIM_TO_VPATH_RMSG_INT \ + vxge_mBIT(3) +/*0x01e28*/ u64 srpcim_msg_to_vpath_mask; +/*0x01e30*/ u64 srpcim_msg_to_vpath_alarm; + u8 unused01ea0[0x01ea0-0x01e38]; + +/*0x01ea0*/ u64 vpath_to_srpcim_wmsg; +#define VXGE_HW_VPATH_TO_SRPCIM_WMSG_VPATH_TO_SRPCIM_WMSG(val) \ + vxge_vBIT(val, 0, 64) +/*0x01ea8*/ u64 vpath_to_srpcim_wmsg_trig; +#define VXGE_HW_VPATH_TO_SRPCIM_WMSG_TRIG_VPATH_TO_SRPCIM_WMSG_TRIG \ + vxge_mBIT(0) + u8 unused02000[0x02000-0x01eb0]; + +/*0x02000*/ u64 vpath_general_int_status; +#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT vxge_mBIT(3) +#define VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT vxge_mBIT(7) +#define VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT vxge_mBIT(15) +#define VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT vxge_mBIT(19) +/*0x02008*/ u64 vpath_general_int_mask; +#define VXGE_HW_VPATH_GENERAL_INT_MASK_PIC_INT vxge_mBIT(3) +#define VXGE_HW_VPATH_GENERAL_INT_MASK_PCI_INT vxge_mBIT(7) +#define VXGE_HW_VPATH_GENERAL_INT_MASK_WRDMA_INT vxge_mBIT(15) +#define VXGE_HW_VPATH_GENERAL_INT_MASK_XMAC_INT vxge_mBIT(19) +/*0x02010*/ u64 vpath_ppif_int_status; +#define VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT \ + vxge_mBIT(3) +#define VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT \ + vxge_mBIT(7) +#define VXGE_HW_VPATH_PPIF_INT_STATUS_PCI_CONFIG_ERRORS_PCI_CONFIG_INT \ + vxge_mBIT(11) +#define \ +VXGE_HW_VPATH_PPIF_INT_STATUS_MRPCIM_TO_VPATH_ALARM_MRPCIM_TO_VPATH_ALARM_INT \ + vxge_mBIT(15) +#define \ +VXGE_HW_VPATH_PPIF_INT_STATUS_SRPCIM_TO_VPATH_ALARM_SRPCIM_TO_VPATH_ALARM_INT \ + vxge_mBIT(19) +/*0x02018*/ u64 vpath_ppif_int_mask; +/*0x02020*/ u64 kdfcctl_errors_reg; +#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR vxge_mBIT(3) +#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR vxge_mBIT(7) +#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR vxge_mBIT(11) +#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON vxge_mBIT(15) +#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON vxge_mBIT(19) +#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON vxge_mBIT(23) +#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR vxge_mBIT(31) +#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR vxge_mBIT(35) +#define VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR vxge_mBIT(39) +/*0x02028*/ u64 kdfcctl_errors_mask; +/*0x02030*/ u64 kdfcctl_errors_alarm; + u8 unused02040[0x02040-0x02038]; + +/*0x02040*/ u64 general_errors_reg; +#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW vxge_mBIT(3) +#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW vxge_mBIT(7) +#define VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW vxge_mBIT(11) +#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR vxge_mBIT(15) +#define VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ vxge_mBIT(19) +#define VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS vxge_mBIT(27) +#define VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET vxge_mBIT(31) +/*0x02048*/ u64 general_errors_mask; +/*0x02050*/ u64 general_errors_alarm; +/*0x02058*/ u64 pci_config_errors_reg; +#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_STATUS_ERR vxge_mBIT(3) +#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_UNCOR_ERR vxge_mBIT(7) +#define VXGE_HW_PCI_CONFIG_ERRORS_REG_PCICONFIG_COR_ERR vxge_mBIT(11) +/*0x02060*/ u64 pci_config_errors_mask; +/*0x02068*/ u64 pci_config_errors_alarm; +/*0x02070*/ u64 mrpcim_to_vpath_alarm_reg; +#define VXGE_HW_MRPCIM_TO_VPATH_ALARM_REG_PPIF_MRPCIM_TO_VPATH_ALARM \ + vxge_mBIT(3) +/*0x02078*/ u64 mrpcim_to_vpath_alarm_mask; +/*0x02080*/ u64 mrpcim_to_vpath_alarm_alarm; +/*0x02088*/ u64 srpcim_to_vpath_alarm_reg; +#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_PPIF_SRPCIM_TO_VPATH_ALARM(val) \ + vxge_vBIT(val, 0, 17) +/*0x02090*/ u64 srpcim_to_vpath_alarm_mask; +/*0x02098*/ u64 srpcim_to_vpath_alarm_alarm; + u8 unused02108[0x02108-0x020a0]; + +/*0x02108*/ u64 kdfcctl_status; +#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO0_PRES(val) vxge_vBIT(val, 0, 8) +#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO1_PRES(val) vxge_vBIT(val, 8, 8) +#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO2_PRES(val) vxge_vBIT(val, 16, 8) +#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO0_OVRWR(val) vxge_vBIT(val, 24, 8) +#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO1_OVRWR(val) vxge_vBIT(val, 32, 8) +#define VXGE_HW_KDFCCTL_STATUS_KDFCCTL_FIFO2_OVRWR(val) vxge_vBIT(val, 40, 8) +/*0x02110*/ u64 rsthdlr_status; +#define VXGE_HW_RSTHDLR_STATUS_RSTHDLR_CURRENT_RESET vxge_mBIT(3) +#define VXGE_HW_RSTHDLR_STATUS_RSTHDLR_CURRENT_VPIN(val) vxge_vBIT(val, 6, 2) +/*0x02118*/ u64 fifo0_status; +#define VXGE_HW_FIFO0_STATUS_DBLGEN_FIFO0_RDIDX(val) vxge_vBIT(val, 0, 12) +/*0x02120*/ u64 fifo1_status; +#define VXGE_HW_FIFO1_STATUS_DBLGEN_FIFO1_RDIDX(val) vxge_vBIT(val, 0, 12) +/*0x02128*/ u64 fifo2_status; +#define VXGE_HW_FIFO2_STATUS_DBLGEN_FIFO2_RDIDX(val) vxge_vBIT(val, 0, 12) + u8 unused02158[0x02158-0x02130]; + +/*0x02158*/ u64 tgt_illegal_access; +#define VXGE_HW_TGT_ILLEGAL_ACCESS_SWIF_REGION(val) vxge_vBIT(val, 1, 7) + u8 unused02200[0x02200-0x02160]; + +/*0x02200*/ u64 vpath_general_cfg1; +#define VXGE_HW_VPATH_GENERAL_CFG1_TC_VALUE(val) vxge_vBIT(val, 1, 3) +#define VXGE_HW_VPATH_GENERAL_CFG1_DATA_BYTE_SWAPEN vxge_mBIT(7) +#define VXGE_HW_VPATH_GENERAL_CFG1_DATA_FLIPEN vxge_mBIT(11) +#define VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN vxge_mBIT(15) +#define VXGE_HW_VPATH_GENERAL_CFG1_CTL_FLIPEN vxge_mBIT(23) +#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_ADDR_SWAPEN vxge_mBIT(51) +#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_ADDR_FLIPEN vxge_mBIT(55) +#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_DATA_SWAPEN vxge_mBIT(59) +#define VXGE_HW_VPATH_GENERAL_CFG1_MSIX_DATA_FLIPEN vxge_mBIT(63) +/*0x02208*/ u64 vpath_general_cfg2; +#define VXGE_HW_VPATH_GENERAL_CFG2_SIZE_QUANTUM(val) vxge_vBIT(val, 1, 3) +/*0x02210*/ u64 vpath_general_cfg3; +#define VXGE_HW_VPATH_GENERAL_CFG3_IGNORE_VPATH_RST_FOR_INTA vxge_mBIT(3) + u8 unused02220[0x02220-0x02218]; + +/*0x02220*/ u64 kdfcctl_cfg0; +#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 vxge_mBIT(1) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 vxge_mBIT(2) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2 vxge_mBIT(3) +#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO0 vxge_mBIT(5) +#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO1 vxge_mBIT(6) +#define VXGE_HW_KDFCCTL_CFG0_BIT_FLIPEN_FIFO2 vxge_mBIT(7) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO0 vxge_mBIT(9) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO1 vxge_mBIT(10) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE0_FIFO2 vxge_mBIT(11) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO0 vxge_mBIT(13) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO1 vxge_mBIT(14) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE1_FIFO2 vxge_mBIT(15) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO0 vxge_mBIT(17) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO1 vxge_mBIT(18) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE2_FIFO2 vxge_mBIT(19) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO0 vxge_mBIT(21) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO1 vxge_mBIT(22) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE3_FIFO2 vxge_mBIT(23) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO0 vxge_mBIT(25) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO1 vxge_mBIT(26) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE4_FIFO2 vxge_mBIT(27) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO0 vxge_mBIT(29) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO1 vxge_mBIT(30) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE5_FIFO2 vxge_mBIT(31) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO0 vxge_mBIT(33) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO1 vxge_mBIT(34) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE6_FIFO2 vxge_mBIT(35) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO0 vxge_mBIT(37) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO1 vxge_mBIT(38) +#define VXGE_HW_KDFCCTL_CFG0_BYTE_MASK_BYTE7_FIFO2 vxge_mBIT(39) + + u8 unused02268[0x02268-0x02228]; + +/*0x02268*/ u64 stats_cfg; +#define VXGE_HW_STATS_CFG_START_HOST_ADDR(val) vxge_vBIT(val, 0, 57) +/*0x02270*/ u64 interrupt_cfg0; +#define VXGE_HW_INTERRUPT_CFG0_MSIX_FOR_RXTI(val) vxge_vBIT(val, 1, 7) +#define VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(val) vxge_vBIT(val, 9, 7) +#define VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(val) vxge_vBIT(val, 17, 7) +#define VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(val) vxge_vBIT(val, 25, 7) +#define VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(val) vxge_vBIT(val, 33, 7) + u8 unused02280[0x02280-0x02278]; + +/*0x02280*/ u64 interrupt_cfg2; +#define VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(val) vxge_vBIT(val, 1, 7) +/*0x02288*/ u64 one_shot_vect0_en; +#define VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN vxge_mBIT(3) +/*0x02290*/ u64 one_shot_vect1_en; +#define VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN vxge_mBIT(3) +/*0x02298*/ u64 one_shot_vect2_en; +#define VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN vxge_mBIT(3) +/*0x022a0*/ u64 one_shot_vect3_en; +#define VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN vxge_mBIT(3) + u8 unused022b0[0x022b0-0x022a8]; + +/*0x022b0*/ u64 pci_config_access_cfg1; +#define VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(val) vxge_vBIT(val, 0, 12) +#define VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0 vxge_mBIT(15) +/*0x022b8*/ u64 pci_config_access_cfg2; +#define VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ vxge_mBIT(0) +/*0x022c0*/ u64 pci_config_access_status; +#define VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR vxge_mBIT(0) +#define VXGE_HW_PCI_CONFIG_ACCESS_STATUS_DATA(val) vxge_vBIT(val, 32, 32) + u8 unused02300[0x02300-0x022c8]; + +/*0x02300*/ u64 vpath_debug_stats0; +#define VXGE_HW_VPATH_DEBUG_STATS0_INI_NUM_MWR_SENT(val) vxge_vBIT(val, 0, 32) +/*0x02308*/ u64 vpath_debug_stats1; +#define VXGE_HW_VPATH_DEBUG_STATS1_INI_NUM_MRD_SENT(val) vxge_vBIT(val, 0, 32) +/*0x02310*/ u64 vpath_debug_stats2; +#define VXGE_HW_VPATH_DEBUG_STATS2_INI_NUM_CPL_RCVD(val) vxge_vBIT(val, 0, 32) +/*0x02318*/ u64 vpath_debug_stats3; +#define VXGE_HW_VPATH_DEBUG_STATS3_INI_NUM_MWR_BYTE_SENT(val) \ + vxge_vBIT(val, 0, 64) +/*0x02320*/ u64 vpath_debug_stats4; +#define VXGE_HW_VPATH_DEBUG_STATS4_INI_NUM_CPL_BYTE_RCVD(val) \ + vxge_vBIT(val, 0, 64) +/*0x02328*/ u64 vpath_debug_stats5; +#define VXGE_HW_VPATH_DEBUG_STATS5_WRCRDTARB_XOFF(val) vxge_vBIT(val, 32, 32) +/*0x02330*/ u64 vpath_debug_stats6; +#define VXGE_HW_VPATH_DEBUG_STATS6_RDCRDTARB_XOFF(val) vxge_vBIT(val, 32, 32) +/*0x02338*/ u64 vpath_genstats_count01; +#define VXGE_HW_VPATH_GENSTATS_COUNT01_PPIF_VPATH_GENSTATS_COUNT1(val) \ + vxge_vBIT(val, 0, 32) +#define VXGE_HW_VPATH_GENSTATS_COUNT01_PPIF_VPATH_GENSTATS_COUNT0(val) \ + vxge_vBIT(val, 32, 32) +/*0x02340*/ u64 vpath_genstats_count23; +#define VXGE_HW_VPATH_GENSTATS_COUNT23_PPIF_VPATH_GENSTATS_COUNT3(val) \ + vxge_vBIT(val, 0, 32) +#define VXGE_HW_VPATH_GENSTATS_COUNT23_PPIF_VPATH_GENSTATS_COUNT2(val) \ + vxge_vBIT(val, 32, 32) +/*0x02348*/ u64 vpath_genstats_count4; +#define VXGE_HW_VPATH_GENSTATS_COUNT4_PPIF_VPATH_GENSTATS_COUNT4(val) \ + vxge_vBIT(val, 32, 32) +/*0x02350*/ u64 vpath_genstats_count5; +#define VXGE_HW_VPATH_GENSTATS_COUNT5_PPIF_VPATH_GENSTATS_COUNT5(val) \ + vxge_vBIT(val, 32, 32) + u8 unused02648[0x02648-0x02358]; +} __packed; + +#define VXGE_HW_EEPROM_SIZE (0x01 << 11) + +/* Capability lists */ +#define VXGE_HW_PCI_EXP_LNKCAP_LNK_SPEED 0xf /* Supported Link speeds */ +#define VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH 0x3f0 /* Supported Link speeds. */ +#define VXGE_HW_PCI_EXP_LNKCAP_LW_RES 0x0 /* Reserved. */ + +#endif diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c new file mode 100644 index 000000000..9e1aaa7f3 --- /dev/null +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c @@ -0,0 +1,2480 @@ +/****************************************************************************** + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + * + * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O + * Virtualized Server Adapter. + * Copyright(c) 2002-2010 Exar Corp. + ******************************************************************************/ +#include +#include + +#include "vxge-traffic.h" +#include "vxge-config.h" +#include "vxge-main.h" + +/* + * vxge_hw_vpath_intr_enable - Enable vpath interrupts. + * @vp: Virtual Path handle. + * + * Enable vpath interrupts. The function is to be executed the last in + * vpath initialization sequence. + * + * See also: vxge_hw_vpath_intr_disable() + */ +enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp) +{ + u64 val64; + + struct __vxge_hw_virtualpath *vpath; + struct vxge_hw_vpath_reg __iomem *vp_reg; + enum vxge_hw_status status = VXGE_HW_OK; + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + vpath = vp->vpath; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto exit; + } + + vp_reg = vpath->vp_reg; + + writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->general_errors_reg); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->pci_config_errors_reg); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->mrpcim_to_vpath_alarm_reg); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->srpcim_to_vpath_alarm_reg); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->vpath_ppif_int_status); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->srpcim_msg_to_vpath_reg); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->vpath_pcipif_int_status); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->prc_alarm_reg); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->wrdma_alarm_status); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->asic_ntwk_vp_err_reg); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->xgmac_vp_int_status); + + val64 = readq(&vp_reg->vpath_general_int_status); + + /* Mask unwanted interrupts */ + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->vpath_pcipif_int_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->srpcim_msg_to_vpath_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->srpcim_to_vpath_alarm_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->mrpcim_to_vpath_alarm_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->pci_config_errors_mask); + + /* Unmask the individual interrupts */ + + writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW| + VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW| + VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ| + VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32), + &vp_reg->general_errors_mask); + + __vxge_hw_pio_mem_write32_upper( + (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR| + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR| + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON| + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON| + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR| + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32), + &vp_reg->kdfcctl_errors_mask); + + __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask); + + __vxge_hw_pio_mem_write32_upper( + (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32), + &vp_reg->prc_alarm_mask); + + __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask); + __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask); + + if (vpath->hldev->first_vp_id != vpath->vp_id) + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->asic_ntwk_vp_err_mask); + else + __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(( + VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT | + VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32), + &vp_reg->asic_ntwk_vp_err_mask); + + __vxge_hw_pio_mem_write32_upper(0, + &vp_reg->vpath_general_int_mask); +exit: + return status; + +} + +/* + * vxge_hw_vpath_intr_disable - Disable vpath interrupts. + * @vp: Virtual Path handle. + * + * Disable vpath interrupts. The function is to be executed the last in + * vpath initialization sequence. + * + * See also: vxge_hw_vpath_intr_enable() + */ +enum vxge_hw_status vxge_hw_vpath_intr_disable( + struct __vxge_hw_vpath_handle *vp) +{ + u64 val64; + + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status = VXGE_HW_OK; + struct vxge_hw_vpath_reg __iomem *vp_reg; + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + vpath = vp->vpath; + + if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { + status = VXGE_HW_ERR_VPATH_NOT_OPEN; + goto exit; + } + vp_reg = vpath->vp_reg; + + __vxge_hw_pio_mem_write32_upper( + (u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->vpath_general_int_mask); + + val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id)); + + writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->general_errors_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->pci_config_errors_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->mrpcim_to_vpath_alarm_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->srpcim_to_vpath_alarm_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->vpath_ppif_int_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->srpcim_msg_to_vpath_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->vpath_pcipif_int_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->wrdma_alarm_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->prc_alarm_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->xgmac_vp_int_mask); + + __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, + &vp_reg->asic_ntwk_vp_err_mask); + +exit: + return status; +} + +void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo) +{ + struct vxge_hw_vpath_reg __iomem *vp_reg; + struct vxge_hw_vp_config *config; + u64 val64; + + if (fifo->config->enable != VXGE_HW_FIFO_ENABLE) + return; + + vp_reg = fifo->vp_reg; + config = container_of(fifo->config, struct vxge_hw_vp_config, fifo); + + if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) { + config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE; + val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; + fifo->tim_tti_cfg1_saved = val64; + writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); + } +} + +void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring) +{ + u64 val64 = ring->tim_rti_cfg1_saved; + + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; + ring->tim_rti_cfg1_saved = val64; + writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); +} + +void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo) +{ + u64 val64 = fifo->tim_tti_cfg3_saved; + u64 timer = (fifo->rtimer * 1000) / 272; + + val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff); + if (timer) + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) | + VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5); + + writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); + /* tti_cfg3_saved is not updated again because it is + * initialized at one place only - init time. + */ +} + +void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring) +{ + u64 val64 = ring->tim_rti_cfg3_saved; + u64 timer = (ring->rtimer * 1000) / 272; + + val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff); + if (timer) + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) | + VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4); + + writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); + /* rti_cfg3_saved is not updated again because it is + * initialized at one place only - init time. + */ +} + +/** + * vxge_hw_channel_msix_mask - Mask MSIX Vector. + * @channeh: Channel for rx or tx handle + * @msix_id: MSIX ID + * + * The function masks the msix interrupt for the given msix_id + * + * Returns: 0 + */ +void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id) +{ + + __vxge_hw_pio_mem_write32_upper( + (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), + &channel->common_reg->set_msix_mask_vect[msix_id%4]); +} + +/** + * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector. + * @channeh: Channel for rx or tx handle + * @msix_id: MSI ID + * + * The function unmasks the msix interrupt for the given msix_id + * + * Returns: 0 + */ +void +vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id) +{ + + __vxge_hw_pio_mem_write32_upper( + (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), + &channel->common_reg->clear_msix_mask_vect[msix_id%4]); +} + +/** + * vxge_hw_channel_msix_clear - Unmask the MSIX Vector. + * @channel: Channel for rx or tx handle + * @msix_id: MSI ID + * + * The function unmasks the msix interrupt for the given msix_id + * if configured in MSIX oneshot mode + * + * Returns: 0 + */ +void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id) +{ + __vxge_hw_pio_mem_write32_upper( + (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), + &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]); +} + +/** + * vxge_hw_device_set_intr_type - Updates the configuration + * with new interrupt type. + * @hldev: HW device handle. + * @intr_mode: New interrupt type + */ +u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode) +{ + + if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && + (intr_mode != VXGE_HW_INTR_MODE_MSIX) && + (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && + (intr_mode != VXGE_HW_INTR_MODE_DEF)) + intr_mode = VXGE_HW_INTR_MODE_IRQLINE; + + hldev->config.intr_mode = intr_mode; + return intr_mode; +} + +/** + * vxge_hw_device_intr_enable - Enable interrupts. + * @hldev: HW device handle. + * @op: One of the enum vxge_hw_device_intr enumerated values specifying + * the type(s) of interrupts to enable. + * + * Enable Titan interrupts. The function is to be executed the last in + * Titan initialization sequence. + * + * See also: vxge_hw_device_intr_disable() + */ +void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev) +{ + u32 i; + u64 val64; + u32 val32; + + vxge_hw_device_mask_all(hldev); + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + + if (!(hldev->vpaths_deployed & vxge_mBIT(i))) + continue; + + vxge_hw_vpath_intr_enable( + VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i])); + } + + if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) { + val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | + hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]; + + if (val64 != 0) { + writeq(val64, &hldev->common_reg->tim_int_status0); + + writeq(~val64, &hldev->common_reg->tim_int_mask0); + } + + val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | + hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]; + + if (val32 != 0) { + __vxge_hw_pio_mem_write32_upper(val32, + &hldev->common_reg->tim_int_status1); + + __vxge_hw_pio_mem_write32_upper(~val32, + &hldev->common_reg->tim_int_mask1); + } + } + + val64 = readq(&hldev->common_reg->titan_general_int_status); + + vxge_hw_device_unmask_all(hldev); +} + +/** + * vxge_hw_device_intr_disable - Disable Titan interrupts. + * @hldev: HW device handle. + * @op: One of the enum vxge_hw_device_intr enumerated values specifying + * the type(s) of interrupts to disable. + * + * Disable Titan interrupts. + * + * See also: vxge_hw_device_intr_enable() + */ +void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev) +{ + u32 i; + + vxge_hw_device_mask_all(hldev); + + /* mask all the tim interrupts */ + writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0); + __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32, + &hldev->common_reg->tim_int_mask1); + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + + if (!(hldev->vpaths_deployed & vxge_mBIT(i))) + continue; + + vxge_hw_vpath_intr_disable( + VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i])); + } +} + +/** + * vxge_hw_device_mask_all - Mask all device interrupts. + * @hldev: HW device handle. + * + * Mask all device interrupts. + * + * See also: vxge_hw_device_unmask_all() + */ +void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev) +{ + u64 val64; + + val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM | + VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC; + + __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), + &hldev->common_reg->titan_mask_all_int); +} + +/** + * vxge_hw_device_unmask_all - Unmask all device interrupts. + * @hldev: HW device handle. + * + * Unmask all device interrupts. + * + * See also: vxge_hw_device_mask_all() + */ +void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev) +{ + u64 val64 = 0; + + if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) + val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC; + + __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), + &hldev->common_reg->titan_mask_all_int); +} + +/** + * vxge_hw_device_flush_io - Flush io writes. + * @hldev: HW device handle. + * + * The function performs a read operation to flush io writes. + * + * Returns: void + */ +void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev) +{ + u32 val32; + + val32 = readl(&hldev->common_reg->titan_general_int_status); +} + +/** + * __vxge_hw_device_handle_error - Handle error + * @hldev: HW device + * @vp_id: Vpath Id + * @type: Error type. Please see enum vxge_hw_event{} + * + * Handle error. + */ +static enum vxge_hw_status +__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id, + enum vxge_hw_event type) +{ + switch (type) { + case VXGE_HW_EVENT_UNKNOWN: + break; + case VXGE_HW_EVENT_RESET_START: + case VXGE_HW_EVENT_RESET_COMPLETE: + case VXGE_HW_EVENT_LINK_DOWN: + case VXGE_HW_EVENT_LINK_UP: + goto out; + case VXGE_HW_EVENT_ALARM_CLEARED: + goto out; + case VXGE_HW_EVENT_ECCERR: + case VXGE_HW_EVENT_MRPCIM_ECCERR: + goto out; + case VXGE_HW_EVENT_FIFO_ERR: + case VXGE_HW_EVENT_VPATH_ERR: + case VXGE_HW_EVENT_CRITICAL_ERR: + case VXGE_HW_EVENT_SERR: + break; + case VXGE_HW_EVENT_SRPCIM_SERR: + case VXGE_HW_EVENT_MRPCIM_SERR: + goto out; + case VXGE_HW_EVENT_SLOT_FREEZE: + break; + default: + vxge_assert(0); + goto out; + } + + /* notify driver */ + if (hldev->uld_callbacks->crit_err) + hldev->uld_callbacks->crit_err(hldev, + type, vp_id); +out: + + return VXGE_HW_OK; +} + +/* + * __vxge_hw_device_handle_link_down_ind + * @hldev: HW device handle. + * + * Link down indication handler. The function is invoked by HW when + * Titan indicates that the link is down. + */ +static enum vxge_hw_status +__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev) +{ + /* + * If the previous link state is not down, return. + */ + if (hldev->link_state == VXGE_HW_LINK_DOWN) + goto exit; + + hldev->link_state = VXGE_HW_LINK_DOWN; + + /* notify driver */ + if (hldev->uld_callbacks->link_down) + hldev->uld_callbacks->link_down(hldev); +exit: + return VXGE_HW_OK; +} + +/* + * __vxge_hw_device_handle_link_up_ind + * @hldev: HW device handle. + * + * Link up indication handler. The function is invoked by HW when + * Titan indicates that the link is up for programmable amount of time. + */ +static enum vxge_hw_status +__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev) +{ + /* + * If the previous link state is not down, return. + */ + if (hldev->link_state == VXGE_HW_LINK_UP) + goto exit; + + hldev->link_state = VXGE_HW_LINK_UP; + + /* notify driver */ + if (hldev->uld_callbacks->link_up) + hldev->uld_callbacks->link_up(hldev); +exit: + return VXGE_HW_OK; +} + +/* + * __vxge_hw_vpath_alarm_process - Process Alarms. + * @vpath: Virtual Path. + * @skip_alarms: Do not clear the alarms + * + * Process vpath alarms. + * + */ +static enum vxge_hw_status +__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath, + u32 skip_alarms) +{ + u64 val64; + u64 alarm_status; + u64 pic_status; + struct __vxge_hw_device *hldev = NULL; + enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN; + u64 mask64; + struct vxge_hw_vpath_stats_sw_info *sw_stats; + struct vxge_hw_vpath_reg __iomem *vp_reg; + + if (vpath == NULL) { + alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, + alarm_event); + goto out2; + } + + hldev = vpath->hldev; + vp_reg = vpath->vp_reg; + alarm_status = readq(&vp_reg->vpath_general_int_status); + + if (alarm_status == VXGE_HW_ALL_FOXES) { + alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE, + alarm_event); + goto out; + } + + sw_stats = vpath->sw_stats; + + if (alarm_status & ~( + VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT | + VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT | + VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT | + VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) { + sw_stats->error_stats.unknown_alarms++; + + alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, + alarm_event); + goto out; + } + + if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) { + + val64 = readq(&vp_reg->xgmac_vp_int_status); + + if (val64 & + VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) { + + val64 = readq(&vp_reg->asic_ntwk_vp_err_reg); + + if (((val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) && + (!(val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) || + ((val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) && + (!(val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) + ))) { + sw_stats->error_stats.network_sustained_fault++; + + writeq( + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT, + &vp_reg->asic_ntwk_vp_err_mask); + + __vxge_hw_device_handle_link_down_ind(hldev); + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_LINK_DOWN, alarm_event); + } + + if (((val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) && + (!(val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) || + ((val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) && + (!(val64 & + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) + ))) { + + sw_stats->error_stats.network_sustained_ok++; + + writeq( + VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK, + &vp_reg->asic_ntwk_vp_err_mask); + + __vxge_hw_device_handle_link_up_ind(hldev); + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_LINK_UP, alarm_event); + } + + writeq(VXGE_HW_INTR_MASK_ALL, + &vp_reg->asic_ntwk_vp_err_reg); + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_ALARM_CLEARED, alarm_event); + + if (skip_alarms) + return VXGE_HW_OK; + } + } + + if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) { + + pic_status = readq(&vp_reg->vpath_ppif_int_status); + + if (pic_status & + VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) { + + val64 = readq(&vp_reg->general_errors_reg); + mask64 = readq(&vp_reg->general_errors_mask); + + if ((val64 & + VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) & + ~mask64) { + sw_stats->error_stats.ini_serr_det++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_SERR, alarm_event); + } + + if ((val64 & + VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) & + ~mask64) { + sw_stats->error_stats.dblgen_fifo0_overflow++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_FIFO_ERR, alarm_event); + } + + if ((val64 & + VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) & + ~mask64) + sw_stats->error_stats.statsb_pif_chain_error++; + + if ((val64 & + VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) & + ~mask64) + sw_stats->error_stats.statsb_drop_timeout++; + + if ((val64 & + VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) & + ~mask64) + sw_stats->error_stats.target_illegal_access++; + + if (!skip_alarms) { + writeq(VXGE_HW_INTR_MASK_ALL, + &vp_reg->general_errors_reg); + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_ALARM_CLEARED, + alarm_event); + } + } + + if (pic_status & + VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) { + + val64 = readq(&vp_reg->kdfcctl_errors_reg); + mask64 = readq(&vp_reg->kdfcctl_errors_mask); + + if ((val64 & + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) & + ~mask64) { + sw_stats->error_stats.kdfcctl_fifo0_overwrite++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_FIFO_ERR, + alarm_event); + } + + if ((val64 & + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) & + ~mask64) { + sw_stats->error_stats.kdfcctl_fifo0_poison++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_FIFO_ERR, + alarm_event); + } + + if ((val64 & + VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) & + ~mask64) { + sw_stats->error_stats.kdfcctl_fifo0_dma_error++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_FIFO_ERR, + alarm_event); + } + + if (!skip_alarms) { + writeq(VXGE_HW_INTR_MASK_ALL, + &vp_reg->kdfcctl_errors_reg); + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_ALARM_CLEARED, + alarm_event); + } + } + + } + + if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) { + + val64 = readq(&vp_reg->wrdma_alarm_status); + + if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) { + + val64 = readq(&vp_reg->prc_alarm_reg); + mask64 = readq(&vp_reg->prc_alarm_mask); + + if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)& + ~mask64) + sw_stats->error_stats.prc_ring_bumps++; + + if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) & + ~mask64) { + sw_stats->error_stats.prc_rxdcm_sc_err++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_VPATH_ERR, + alarm_event); + } + + if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT) + & ~mask64) { + sw_stats->error_stats.prc_rxdcm_sc_abort++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_VPATH_ERR, + alarm_event); + } + + if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR) + & ~mask64) { + sw_stats->error_stats.prc_quanta_size_err++; + + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_VPATH_ERR, + alarm_event); + } + + if (!skip_alarms) { + writeq(VXGE_HW_INTR_MASK_ALL, + &vp_reg->prc_alarm_reg); + alarm_event = VXGE_HW_SET_LEVEL( + VXGE_HW_EVENT_ALARM_CLEARED, + alarm_event); + } + } + } +out: + hldev->stats.sw_dev_err_stats.vpath_alarms++; +out2: + if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) || + (alarm_event == VXGE_HW_EVENT_UNKNOWN)) + return VXGE_HW_OK; + + __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event); + + if (alarm_event == VXGE_HW_EVENT_SERR) + return VXGE_HW_ERR_CRITICAL; + + return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ? + VXGE_HW_ERR_SLOT_FREEZE : + (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO : + VXGE_HW_ERR_VPATH; +} + +/** + * vxge_hw_device_begin_irq - Begin IRQ processing. + * @hldev: HW device handle. + * @skip_alarms: Do not clear the alarms + * @reason: "Reason" for the interrupt, the value of Titan's + * general_int_status register. + * + * The function performs two actions, It first checks whether (shared IRQ) the + * interrupt was raised by the device. Next, it masks the device interrupts. + * + * Note: + * vxge_hw_device_begin_irq() does not flush MMIO writes through the + * bridge. Therefore, two back-to-back interrupts are potentially possible. + * + * Returns: 0, if the interrupt is not "ours" (note that in this case the + * device remain enabled). + * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter + * status. + */ +enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev, + u32 skip_alarms, u64 *reason) +{ + u32 i; + u64 val64; + u64 adapter_status; + u64 vpath_mask; + enum vxge_hw_status ret = VXGE_HW_OK; + + val64 = readq(&hldev->common_reg->titan_general_int_status); + + if (unlikely(!val64)) { + /* not Titan interrupt */ + *reason = 0; + ret = VXGE_HW_ERR_WRONG_IRQ; + goto exit; + } + + if (unlikely(val64 == VXGE_HW_ALL_FOXES)) { + + adapter_status = readq(&hldev->common_reg->adapter_status); + + if (adapter_status == VXGE_HW_ALL_FOXES) { + + __vxge_hw_device_handle_error(hldev, + NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE); + *reason = 0; + ret = VXGE_HW_ERR_SLOT_FREEZE; + goto exit; + } + } + + hldev->stats.sw_dev_info_stats.total_intr_cnt++; + + *reason = val64; + + vpath_mask = hldev->vpaths_deployed >> + (64 - VXGE_HW_MAX_VIRTUAL_PATHS); + + if (val64 & + VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) { + hldev->stats.sw_dev_info_stats.traffic_intr_cnt++; + + return VXGE_HW_OK; + } + + hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++; + + if (unlikely(val64 & + VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) { + + enum vxge_hw_status error_level = VXGE_HW_OK; + + hldev->stats.sw_dev_err_stats.vpath_alarms++; + + for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { + + if (!(hldev->vpaths_deployed & vxge_mBIT(i))) + continue; + + ret = __vxge_hw_vpath_alarm_process( + &hldev->virtual_paths[i], skip_alarms); + + error_level = VXGE_HW_SET_LEVEL(ret, error_level); + + if (unlikely((ret == VXGE_HW_ERR_CRITICAL) || + (ret == VXGE_HW_ERR_SLOT_FREEZE))) + break; + } + + ret = error_level; + } +exit: + return ret; +} + +/** + * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the + * condition that has caused the Tx and RX interrupt. + * @hldev: HW device. + * + * Acknowledge (that is, clear) the condition that has caused + * the Tx and Rx interrupt. + * See also: vxge_hw_device_begin_irq(), + * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx(). + */ +void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev) +{ + + if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || + (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { + writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | + hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]), + &hldev->common_reg->tim_int_status0); + } + + if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || + (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { + __vxge_hw_pio_mem_write32_upper( + (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | + hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]), + &hldev->common_reg->tim_int_status1); + } +} + +/* + * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel + * @channel: Channel + * @dtrh: Buffer to return the DTR pointer + * + * Allocates a dtr from the reserve array. If the reserve array is empty, + * it swaps the reserve and free arrays. + * + */ +static enum vxge_hw_status +vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh) +{ + void **tmp_arr; + + if (channel->reserve_ptr - channel->reserve_top > 0) { +_alloc_after_swap: + *dtrh = channel->reserve_arr[--channel->reserve_ptr]; + + return VXGE_HW_OK; + } + + /* switch between empty and full arrays */ + + /* the idea behind such a design is that by having free and reserved + * arrays separated we basically separated irq and non-irq parts. + * i.e. no additional lock need to be done when we free a resource */ + + if (channel->length - channel->free_ptr > 0) { + + tmp_arr = channel->reserve_arr; + channel->reserve_arr = channel->free_arr; + channel->free_arr = tmp_arr; + channel->reserve_ptr = channel->length; + channel->reserve_top = channel->free_ptr; + channel->free_ptr = channel->length; + + channel->stats->reserve_free_swaps_cnt++; + + goto _alloc_after_swap; + } + + channel->stats->full_cnt++; + + *dtrh = NULL; + return VXGE_HW_INF_OUT_OF_DESCRIPTORS; +} + +/* + * vxge_hw_channel_dtr_post - Post a dtr to the channel + * @channelh: Channel + * @dtrh: DTR pointer + * + * Posts a dtr to work array. + * + */ +static void +vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh) +{ + vxge_assert(channel->work_arr[channel->post_index] == NULL); + + channel->work_arr[channel->post_index++] = dtrh; + + /* wrap-around */ + if (channel->post_index == channel->length) + channel->post_index = 0; +} + +/* + * vxge_hw_channel_dtr_try_complete - Returns next completed dtr + * @channel: Channel + * @dtr: Buffer to return the next completed DTR pointer + * + * Returns the next completed dtr with out removing it from work array + * + */ +void +vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh) +{ + vxge_assert(channel->compl_index < channel->length); + + *dtrh = channel->work_arr[channel->compl_index]; + prefetch(*dtrh); +} + +/* + * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array + * @channel: Channel handle + * + * Removes the next completed dtr from work array + * + */ +void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel) +{ + channel->work_arr[channel->compl_index] = NULL; + + /* wrap-around */ + if (++channel->compl_index == channel->length) + channel->compl_index = 0; + + channel->stats->total_compl_cnt++; +} + +/* + * vxge_hw_channel_dtr_free - Frees a dtr + * @channel: Channel handle + * @dtr: DTR pointer + * + * Returns the dtr to free array + * + */ +void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh) +{ + channel->free_arr[--channel->free_ptr] = dtrh; +} + +/* + * vxge_hw_channel_dtr_count + * @channel: Channel handle. Obtained via vxge_hw_channel_open(). + * + * Retrieve number of DTRs available. This function can not be called + * from data path. ring_initial_replenishi() is the only user. + */ +int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel) +{ + return (channel->reserve_ptr - channel->reserve_top) + + (channel->length - channel->free_ptr); +} + +/** + * vxge_hw_ring_rxd_reserve - Reserve ring descriptor. + * @ring: Handle to the ring object used for receive + * @rxdh: Reserved descriptor. On success HW fills this "out" parameter + * with a valid handle. + * + * Reserve Rx descriptor for the subsequent filling-in driver + * and posting on the corresponding channel (@channelh) + * via vxge_hw_ring_rxd_post(). + * + * Returns: VXGE_HW_OK - success. + * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available. + * + */ +enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring, + void **rxdh) +{ + enum vxge_hw_status status; + struct __vxge_hw_channel *channel; + + channel = &ring->channel; + + status = vxge_hw_channel_dtr_alloc(channel, rxdh); + + if (status == VXGE_HW_OK) { + struct vxge_hw_ring_rxd_1 *rxdp = + (struct vxge_hw_ring_rxd_1 *)*rxdh; + + rxdp->control_0 = rxdp->control_1 = 0; + } + + return status; +} + +/** + * vxge_hw_ring_rxd_free - Free descriptor. + * @ring: Handle to the ring object used for receive + * @rxdh: Descriptor handle. + * + * Free the reserved descriptor. This operation is "symmetrical" to + * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's + * lifecycle. + * + * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can + * be: + * + * - reserved (vxge_hw_ring_rxd_reserve); + * + * - posted (vxge_hw_ring_rxd_post); + * + * - completed (vxge_hw_ring_rxd_next_completed); + * + * - and recycled again (vxge_hw_ring_rxd_free). + * + * For alternative state transitions and more details please refer to + * the design doc. + * + */ +void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh) +{ + struct __vxge_hw_channel *channel; + + channel = &ring->channel; + + vxge_hw_channel_dtr_free(channel, rxdh); + +} + +/** + * vxge_hw_ring_rxd_pre_post - Prepare rxd and post + * @ring: Handle to the ring object used for receive + * @rxdh: Descriptor handle. + * + * This routine prepares a rxd and posts + */ +void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh) +{ + struct __vxge_hw_channel *channel; + + channel = &ring->channel; + + vxge_hw_channel_dtr_post(channel, rxdh); +} + +/** + * vxge_hw_ring_rxd_post_post - Process rxd after post. + * @ring: Handle to the ring object used for receive + * @rxdh: Descriptor handle. + * + * Processes rxd after post + */ +void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh) +{ + struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; + struct __vxge_hw_channel *channel; + + channel = &ring->channel; + + rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; + + if (ring->stats->common_stats.usage_cnt > 0) + ring->stats->common_stats.usage_cnt--; +} + +/** + * vxge_hw_ring_rxd_post - Post descriptor on the ring. + * @ring: Handle to the ring object used for receive + * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve(). + * + * Post descriptor on the ring. + * Prior to posting the descriptor should be filled in accordance with + * Host/Titan interface specification for a given service (LL, etc.). + * + */ +void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh) +{ + struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; + struct __vxge_hw_channel *channel; + + channel = &ring->channel; + + wmb(); + rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; + + vxge_hw_channel_dtr_post(channel, rxdh); + + if (ring->stats->common_stats.usage_cnt > 0) + ring->stats->common_stats.usage_cnt--; +} + +/** + * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier. + * @ring: Handle to the ring object used for receive + * @rxdh: Descriptor handle. + * + * Processes rxd after post with memory barrier. + */ +void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh) +{ + wmb(); + vxge_hw_ring_rxd_post_post(ring, rxdh); +} + +/** + * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor. + * @ring: Handle to the ring object used for receive + * @rxdh: Descriptor handle. Returned by HW. + * @t_code: Transfer code, as per Titan User Guide, + * Receive Descriptor Format. Returned by HW. + * + * Retrieve the _next_ completed descriptor. + * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy + * driver of new completed descriptors. After that + * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest + * completions (the very first completion is passed by HW via + * vxge_hw_ring_callback_f). + * + * Implementation-wise, the driver is free to call + * vxge_hw_ring_rxd_next_completed either immediately from inside the + * ring callback, or in a deferred fashion and separate (from HW) + * context. + * + * Non-zero @t_code means failure to fill-in receive buffer(s) + * of the descriptor. + * For instance, parity error detected during the data transfer. + * In this case Titan will complete the descriptor and indicate + * for the host that the received data is not to be used. + * For details please refer to Titan User Guide. + * + * Returns: VXGE_HW_OK - success. + * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors + * are currently available for processing. + * + * See also: vxge_hw_ring_callback_f{}, + * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}. + */ +enum vxge_hw_status vxge_hw_ring_rxd_next_completed( + struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code) +{ + struct __vxge_hw_channel *channel; + struct vxge_hw_ring_rxd_1 *rxdp; + enum vxge_hw_status status = VXGE_HW_OK; + u64 control_0, own; + + channel = &ring->channel; + + vxge_hw_channel_dtr_try_complete(channel, rxdh); + + rxdp = *rxdh; + if (rxdp == NULL) { + status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; + goto exit; + } + + control_0 = rxdp->control_0; + own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; + *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0); + + /* check whether it is not the end */ + if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) { + + vxge_assert((rxdp)->host_control != + 0); + + ++ring->cmpl_cnt; + vxge_hw_channel_dtr_complete(channel); + + vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED); + + ring->stats->common_stats.usage_cnt++; + if (ring->stats->common_stats.usage_max < + ring->stats->common_stats.usage_cnt) + ring->stats->common_stats.usage_max = + ring->stats->common_stats.usage_cnt; + + status = VXGE_HW_OK; + goto exit; + } + + /* reset it. since we don't want to return + * garbage to the driver */ + *rxdh = NULL; + status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; +exit: + return status; +} + +/** + * vxge_hw_ring_handle_tcode - Handle transfer code. + * @ring: Handle to the ring object used for receive + * @rxdh: Descriptor handle. + * @t_code: One of the enumerated (and documented in the Titan user guide) + * "transfer codes". + * + * Handle descriptor's transfer code. The latter comes with each completed + * descriptor. + * + * Returns: one of the enum vxge_hw_status{} enumerated types. + * VXGE_HW_OK - for success. + * VXGE_HW_ERR_CRITICAL - when encounters critical error. + */ +enum vxge_hw_status vxge_hw_ring_handle_tcode( + struct __vxge_hw_ring *ring, void *rxdh, u8 t_code) +{ + struct __vxge_hw_channel *channel; + enum vxge_hw_status status = VXGE_HW_OK; + + channel = &ring->channel; + + /* If the t_code is not supported and if the + * t_code is other than 0x5 (unparseable packet + * such as unknown UPV6 header), Drop it !!! + */ + + if (t_code == VXGE_HW_RING_T_CODE_OK || + t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) { + status = VXGE_HW_OK; + goto exit; + } + + if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) { + status = VXGE_HW_ERR_INVALID_TCODE; + goto exit; + } + + ring->stats->rxd_t_code_err_cnt[t_code]++; +exit: + return status; +} + +/** + * __vxge_hw_non_offload_db_post - Post non offload doorbell + * + * @fifo: fifohandle + * @txdl_ptr: The starting location of the TxDL in host memory + * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256) + * @no_snoop: No snoop flags + * + * This function posts a non-offload doorbell to doorbell FIFO + * + */ +static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo, + u64 txdl_ptr, u32 num_txds, u32 no_snoop) +{ + struct __vxge_hw_channel *channel; + + channel = &fifo->channel; + + writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) | + VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) | + VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop), + &fifo->nofl_db->control_0); + + mmiowb(); + + writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr); + + mmiowb(); +} + +/** + * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in + * the fifo + * @fifoh: Handle to the fifo object used for non offload send + */ +u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh) +{ + return vxge_hw_channel_dtr_count(&fifoh->channel); +} + +/** + * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor. + * @fifoh: Handle to the fifo object used for non offload send + * @txdlh: Reserved descriptor. On success HW fills this "out" parameter + * with a valid handle. + * @txdl_priv: Buffer to return the pointer to per txdl space + * + * Reserve a single TxDL (that is, fifo descriptor) + * for the subsequent filling-in by driver) + * and posting on the corresponding channel (@channelh) + * via vxge_hw_fifo_txdl_post(). + * + * Note: it is the responsibility of driver to reserve multiple descriptors + * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor + * carries up to configured number (fifo.max_frags) of contiguous buffers. + * + * Returns: VXGE_HW_OK - success; + * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available + * + */ +enum vxge_hw_status vxge_hw_fifo_txdl_reserve( + struct __vxge_hw_fifo *fifo, + void **txdlh, void **txdl_priv) +{ + struct __vxge_hw_channel *channel; + enum vxge_hw_status status; + int i; + + channel = &fifo->channel; + + status = vxge_hw_channel_dtr_alloc(channel, txdlh); + + if (status == VXGE_HW_OK) { + struct vxge_hw_fifo_txd *txdp = + (struct vxge_hw_fifo_txd *)*txdlh; + struct __vxge_hw_fifo_txdl_priv *priv; + + priv = __vxge_hw_fifo_txdl_priv(fifo, txdp); + + /* reset the TxDL's private */ + priv->align_dma_offset = 0; + priv->align_vaddr_start = priv->align_vaddr; + priv->align_used_frags = 0; + priv->frags = 0; + priv->alloc_frags = fifo->config->max_frags; + priv->next_txdl_priv = NULL; + + *txdl_priv = (void *)(size_t)txdp->host_control; + + for (i = 0; i < fifo->config->max_frags; i++) { + txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i; + txdp->control_0 = txdp->control_1 = 0; + } + } + + return status; +} + +/** + * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the + * descriptor. + * @fifo: Handle to the fifo object used for non offload send + * @txdlh: Descriptor handle. + * @frag_idx: Index of the data buffer in the caller's scatter-gather list + * (of buffers). + * @dma_pointer: DMA address of the data buffer referenced by @frag_idx. + * @size: Size of the data buffer (in bytes). + * + * This API is part of the preparation of the transmit descriptor for posting + * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include + * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits(). + * All three APIs fill in the fields of the fifo descriptor, + * in accordance with the Titan specification. + * + */ +void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo, + void *txdlh, u32 frag_idx, + dma_addr_t dma_pointer, u32 size) +{ + struct __vxge_hw_fifo_txdl_priv *txdl_priv; + struct vxge_hw_fifo_txd *txdp, *txdp_last; + struct __vxge_hw_channel *channel; + + channel = &fifo->channel; + + txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh); + txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags; + + if (frag_idx != 0) + txdp->control_0 = txdp->control_1 = 0; + else { + txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE( + VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST); + txdp->control_1 |= fifo->interrupt_type; + txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER( + fifo->tx_intr_num); + if (txdl_priv->frags) { + txdp_last = (struct vxge_hw_fifo_txd *)txdlh + + (txdl_priv->frags - 1); + txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE( + VXGE_HW_FIFO_TXD_GATHER_CODE_LAST); + } + } + + vxge_assert(frag_idx < txdl_priv->alloc_frags); + + txdp->buffer_pointer = (u64)dma_pointer; + txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size); + fifo->stats->total_buffers++; + txdl_priv->frags++; +} + +/** + * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel. + * @fifo: Handle to the fifo object used for non offload send + * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve() + * @frags: Number of contiguous buffers that are part of a single + * transmit operation. + * + * Post descriptor on the 'fifo' type channel for transmission. + * Prior to posting the descriptor should be filled in accordance with + * Host/Titan interface specification for a given service (LL, etc.). + * + */ +void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh) +{ + struct __vxge_hw_fifo_txdl_priv *txdl_priv; + struct vxge_hw_fifo_txd *txdp_last; + struct vxge_hw_fifo_txd *txdp_first; + struct __vxge_hw_channel *channel; + + channel = &fifo->channel; + + txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh); + txdp_first = txdlh; + + txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1); + txdp_last->control_0 |= + VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST); + txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER; + + vxge_hw_channel_dtr_post(&fifo->channel, txdlh); + + __vxge_hw_non_offload_db_post(fifo, + (u64)txdl_priv->dma_addr, + txdl_priv->frags - 1, + fifo->no_snoop_bits); + + fifo->stats->total_posts++; + fifo->stats->common_stats.usage_cnt++; + if (fifo->stats->common_stats.usage_max < + fifo->stats->common_stats.usage_cnt) + fifo->stats->common_stats.usage_max = + fifo->stats->common_stats.usage_cnt; +} + +/** + * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor. + * @fifo: Handle to the fifo object used for non offload send + * @txdlh: Descriptor handle. Returned by HW. + * @t_code: Transfer code, as per Titan User Guide, + * Transmit Descriptor Format. + * Returned by HW. + * + * Retrieve the _next_ completed descriptor. + * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy + * driver of new completed descriptors. After that + * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest + * completions (the very first completion is passed by HW via + * vxge_hw_channel_callback_f). + * + * Implementation-wise, the driver is free to call + * vxge_hw_fifo_txdl_next_completed either immediately from inside the + * channel callback, or in a deferred fashion and separate (from HW) + * context. + * + * Non-zero @t_code means failure to process the descriptor. + * The failure could happen, for instance, when the link is + * down, in which case Titan completes the descriptor because it + * is not able to send the data out. + * + * For details please refer to Titan User Guide. + * + * Returns: VXGE_HW_OK - success. + * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors + * are currently available for processing. + * + */ +enum vxge_hw_status vxge_hw_fifo_txdl_next_completed( + struct __vxge_hw_fifo *fifo, void **txdlh, + enum vxge_hw_fifo_tcode *t_code) +{ + struct __vxge_hw_channel *channel; + struct vxge_hw_fifo_txd *txdp; + enum vxge_hw_status status = VXGE_HW_OK; + + channel = &fifo->channel; + + vxge_hw_channel_dtr_try_complete(channel, txdlh); + + txdp = *txdlh; + if (txdp == NULL) { + status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; + goto exit; + } + + /* check whether host owns it */ + if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) { + + vxge_assert(txdp->host_control != 0); + + vxge_hw_channel_dtr_complete(channel); + + *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0); + + if (fifo->stats->common_stats.usage_cnt > 0) + fifo->stats->common_stats.usage_cnt--; + + status = VXGE_HW_OK; + goto exit; + } + + /* no more completions */ + *txdlh = NULL; + status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; +exit: + return status; +} + +/** + * vxge_hw_fifo_handle_tcode - Handle transfer code. + * @fifo: Handle to the fifo object used for non offload send + * @txdlh: Descriptor handle. + * @t_code: One of the enumerated (and documented in the Titan user guide) + * "transfer codes". + * + * Handle descriptor's transfer code. The latter comes with each completed + * descriptor. + * + * Returns: one of the enum vxge_hw_status{} enumerated types. + * VXGE_HW_OK - for success. + * VXGE_HW_ERR_CRITICAL - when encounters critical error. + */ +enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo, + void *txdlh, + enum vxge_hw_fifo_tcode t_code) +{ + struct __vxge_hw_channel *channel; + + enum vxge_hw_status status = VXGE_HW_OK; + channel = &fifo->channel; + + if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) { + status = VXGE_HW_ERR_INVALID_TCODE; + goto exit; + } + + fifo->stats->txd_t_code_err_cnt[t_code]++; +exit: + return status; +} + +/** + * vxge_hw_fifo_txdl_free - Free descriptor. + * @fifo: Handle to the fifo object used for non offload send + * @txdlh: Descriptor handle. + * + * Free the reserved descriptor. This operation is "symmetrical" to + * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's + * lifecycle. + * + * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can + * be: + * + * - reserved (vxge_hw_fifo_txdl_reserve); + * + * - posted (vxge_hw_fifo_txdl_post); + * + * - completed (vxge_hw_fifo_txdl_next_completed); + * + * - and recycled again (vxge_hw_fifo_txdl_free). + * + * For alternative state transitions and more details please refer to + * the design doc. + * + */ +void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh) +{ + struct __vxge_hw_fifo_txdl_priv *txdl_priv; + u32 max_frags; + struct __vxge_hw_channel *channel; + + channel = &fifo->channel; + + txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, + (struct vxge_hw_fifo_txd *)txdlh); + + max_frags = fifo->config->max_frags; + + vxge_hw_channel_dtr_free(channel, txdlh); +} + +/** + * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath + * to MAC address table. + * @vp: Vpath handle. + * @macaddr: MAC address to be added for this vpath into the list + * @macaddr_mask: MAC address mask for macaddr + * @duplicate_mode: Duplicate MAC address add mode. Please see + * enum vxge_hw_vpath_mac_addr_add_mode{} + * + * Adds the given mac address and mac address mask into the list for this + * vpath. + * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and + * vxge_hw_vpath_mac_addr_get_next + * + */ +enum vxge_hw_status +vxge_hw_vpath_mac_addr_add( + struct __vxge_hw_vpath_handle *vp, + u8 (macaddr)[ETH_ALEN], + u8 (macaddr_mask)[ETH_ALEN], + enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode) +{ + u32 i; + u64 data1 = 0ULL; + u64 data2 = 0ULL; + enum vxge_hw_status status = VXGE_HW_OK; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + for (i = 0; i < ETH_ALEN; i++) { + data1 <<= 8; + data1 |= (u8)macaddr[i]; + + data2 <<= 8; + data2 |= (u8)macaddr_mask[i]; + } + + switch (duplicate_mode) { + case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE: + i = 0; + break; + case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE: + i = 1; + break; + case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE: + i = 2; + break; + default: + i = 0; + break; + } + + status = __vxge_hw_vpath_rts_table_set(vp, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, + 0, + VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1), + VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)| + VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i)); +exit: + return status; +} + +/** + * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath + * from MAC address table. + * @vp: Vpath handle. + * @macaddr: First MAC address entry for this vpath in the list + * @macaddr_mask: MAC address mask for macaddr + * + * Returns the first mac address and mac address mask in the list for this + * vpath. + * see also: vxge_hw_vpath_mac_addr_get_next + * + */ +enum vxge_hw_status +vxge_hw_vpath_mac_addr_get( + struct __vxge_hw_vpath_handle *vp, + u8 (macaddr)[ETH_ALEN], + u8 (macaddr_mask)[ETH_ALEN]) +{ + u32 i; + u64 data1 = 0ULL; + u64 data2 = 0ULL; + enum vxge_hw_status status = VXGE_HW_OK; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + status = __vxge_hw_vpath_rts_table_get(vp, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, + 0, &data1, &data2); + + if (status != VXGE_HW_OK) + goto exit; + + data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1); + + data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2); + + for (i = ETH_ALEN; i > 0; i--) { + macaddr[i-1] = (u8)(data1 & 0xFF); + data1 >>= 8; + + macaddr_mask[i-1] = (u8)(data2 & 0xFF); + data2 >>= 8; + } +exit: + return status; +} + +/** + * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this + * vpath + * from MAC address table. + * @vp: Vpath handle. + * @macaddr: Next MAC address entry for this vpath in the list + * @macaddr_mask: MAC address mask for macaddr + * + * Returns the next mac address and mac address mask in the list for this + * vpath. + * see also: vxge_hw_vpath_mac_addr_get + * + */ +enum vxge_hw_status +vxge_hw_vpath_mac_addr_get_next( + struct __vxge_hw_vpath_handle *vp, + u8 (macaddr)[ETH_ALEN], + u8 (macaddr_mask)[ETH_ALEN]) +{ + u32 i; + u64 data1 = 0ULL; + u64 data2 = 0ULL; + enum vxge_hw_status status = VXGE_HW_OK; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + status = __vxge_hw_vpath_rts_table_get(vp, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, + 0, &data1, &data2); + + if (status != VXGE_HW_OK) + goto exit; + + data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1); + + data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2); + + for (i = ETH_ALEN; i > 0; i--) { + macaddr[i-1] = (u8)(data1 & 0xFF); + data1 >>= 8; + + macaddr_mask[i-1] = (u8)(data2 & 0xFF); + data2 >>= 8; + } + +exit: + return status; +} + +/** + * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath + * to MAC address table. + * @vp: Vpath handle. + * @macaddr: MAC address to be added for this vpath into the list + * @macaddr_mask: MAC address mask for macaddr + * + * Delete the given mac address and mac address mask into the list for this + * vpath. + * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and + * vxge_hw_vpath_mac_addr_get_next + * + */ +enum vxge_hw_status +vxge_hw_vpath_mac_addr_delete( + struct __vxge_hw_vpath_handle *vp, + u8 (macaddr)[ETH_ALEN], + u8 (macaddr_mask)[ETH_ALEN]) +{ + u32 i; + u64 data1 = 0ULL; + u64 data2 = 0ULL; + enum vxge_hw_status status = VXGE_HW_OK; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + for (i = 0; i < ETH_ALEN; i++) { + data1 <<= 8; + data1 |= (u8)macaddr[i]; + + data2 <<= 8; + data2 |= (u8)macaddr_mask[i]; + } + + status = __vxge_hw_vpath_rts_table_set(vp, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, + 0, + VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1), + VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)); +exit: + return status; +} + +/** + * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath + * to vlan id table. + * @vp: Vpath handle. + * @vid: vlan id to be added for this vpath into the list + * + * Adds the given vlan id into the list for this vpath. + * see also: vxge_hw_vpath_vid_delete + * + */ +enum vxge_hw_status +vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid) +{ + enum vxge_hw_status status = VXGE_HW_OK; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + status = __vxge_hw_vpath_rts_table_set(vp, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, + 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0); +exit: + return status; +} + +/** + * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath + * to vlan id table. + * @vp: Vpath handle. + * @vid: vlan id to be added for this vpath into the list + * + * Adds the given vlan id into the list for this vpath. + * see also: vxge_hw_vpath_vid_add + * + */ +enum vxge_hw_status +vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid) +{ + enum vxge_hw_status status = VXGE_HW_OK; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + status = __vxge_hw_vpath_rts_table_set(vp, + VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, + 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0); +exit: + return status; +} + +/** + * vxge_hw_vpath_promisc_enable - Enable promiscuous mode. + * @vp: Vpath handle. + * + * Enable promiscuous mode of Titan-e operation. + * + * See also: vxge_hw_vpath_promisc_disable(). + */ +enum vxge_hw_status vxge_hw_vpath_promisc_enable( + struct __vxge_hw_vpath_handle *vp) +{ + u64 val64; + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status = VXGE_HW_OK; + + if ((vp == NULL) || (vp->vpath->ringh == NULL)) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + vpath = vp->vpath; + + /* Enable promiscuous mode for function 0 only */ + if (!(vpath->hldev->access_rights & + VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) + return VXGE_HW_OK; + + val64 = readq(&vpath->vp_reg->rxmac_vcfg0); + + if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) { + + val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN | + VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN | + VXGE_HW_RXMAC_VCFG0_BCAST_EN | + VXGE_HW_RXMAC_VCFG0_ALL_VID_EN; + + writeq(val64, &vpath->vp_reg->rxmac_vcfg0); + } +exit: + return status; +} + +/** + * vxge_hw_vpath_promisc_disable - Disable promiscuous mode. + * @vp: Vpath handle. + * + * Disable promiscuous mode of Titan-e operation. + * + * See also: vxge_hw_vpath_promisc_enable(). + */ +enum vxge_hw_status vxge_hw_vpath_promisc_disable( + struct __vxge_hw_vpath_handle *vp) +{ + u64 val64; + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status = VXGE_HW_OK; + + if ((vp == NULL) || (vp->vpath->ringh == NULL)) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + vpath = vp->vpath; + + val64 = readq(&vpath->vp_reg->rxmac_vcfg0); + + if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) { + + val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN | + VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN | + VXGE_HW_RXMAC_VCFG0_ALL_VID_EN); + + writeq(val64, &vpath->vp_reg->rxmac_vcfg0); + } +exit: + return status; +} + +/* + * vxge_hw_vpath_bcast_enable - Enable broadcast + * @vp: Vpath handle. + * + * Enable receiving broadcasts. + */ +enum vxge_hw_status vxge_hw_vpath_bcast_enable( + struct __vxge_hw_vpath_handle *vp) +{ + u64 val64; + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status = VXGE_HW_OK; + + if ((vp == NULL) || (vp->vpath->ringh == NULL)) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + vpath = vp->vpath; + + val64 = readq(&vpath->vp_reg->rxmac_vcfg0); + + if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) { + val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN; + writeq(val64, &vpath->vp_reg->rxmac_vcfg0); + } +exit: + return status; +} + +/** + * vxge_hw_vpath_mcast_enable - Enable multicast addresses. + * @vp: Vpath handle. + * + * Enable Titan-e multicast addresses. + * Returns: VXGE_HW_OK on success. + * + */ +enum vxge_hw_status vxge_hw_vpath_mcast_enable( + struct __vxge_hw_vpath_handle *vp) +{ + u64 val64; + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status = VXGE_HW_OK; + + if ((vp == NULL) || (vp->vpath->ringh == NULL)) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + vpath = vp->vpath; + + val64 = readq(&vpath->vp_reg->rxmac_vcfg0); + + if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) { + val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN; + writeq(val64, &vpath->vp_reg->rxmac_vcfg0); + } +exit: + return status; +} + +/** + * vxge_hw_vpath_mcast_disable - Disable multicast addresses. + * @vp: Vpath handle. + * + * Disable Titan-e multicast addresses. + * Returns: VXGE_HW_OK - success. + * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle + * + */ +enum vxge_hw_status +vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp) +{ + u64 val64; + struct __vxge_hw_virtualpath *vpath; + enum vxge_hw_status status = VXGE_HW_OK; + + if ((vp == NULL) || (vp->vpath->ringh == NULL)) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + vpath = vp->vpath; + + val64 = readq(&vpath->vp_reg->rxmac_vcfg0); + + if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) { + val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN; + writeq(val64, &vpath->vp_reg->rxmac_vcfg0); + } +exit: + return status; +} + +/* + * vxge_hw_vpath_alarm_process - Process Alarms. + * @vpath: Virtual Path. + * @skip_alarms: Do not clear the alarms + * + * Process vpath alarms. + * + */ +enum vxge_hw_status vxge_hw_vpath_alarm_process( + struct __vxge_hw_vpath_handle *vp, + u32 skip_alarms) +{ + enum vxge_hw_status status = VXGE_HW_OK; + + if (vp == NULL) { + status = VXGE_HW_ERR_INVALID_HANDLE; + goto exit; + } + + status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms); +exit: + return status; +} + +/** + * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and + * alrms + * @vp: Virtual Path handle. + * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of + * interrupts(Can be repeated). If fifo or ring are not enabled + * the MSIX vector for that should be set to 0 + * @alarm_msix_id: MSIX vector for alarm. + * + * This API will associate a given MSIX vector numbers with the four TIM + * interrupts and alarm interrupt. + */ +void +vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id, + int alarm_msix_id) +{ + u64 val64; + struct __vxge_hw_virtualpath *vpath = vp->vpath; + struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; + u32 vp_id = vp->vpath->vp_id; + + val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI( + (vp_id * 4) + tim_msix_id[0]) | + VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI( + (vp_id * 4) + tim_msix_id[1]); + + writeq(val64, &vp_reg->interrupt_cfg0); + + writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG( + (vpath->hldev->first_vp_id * 4) + alarm_msix_id), + &vp_reg->interrupt_cfg2); + + if (vpath->hldev->config.intr_mode == + VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { + __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( + VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN, + 0, 32), &vp_reg->one_shot_vect0_en); + __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( + VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN, + 0, 32), &vp_reg->one_shot_vect1_en); + __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( + VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN, + 0, 32), &vp_reg->one_shot_vect2_en); + } +} + +/** + * vxge_hw_vpath_msix_mask - Mask MSIX Vector. + * @vp: Virtual Path handle. + * @msix_id: MSIX ID + * + * The function masks the msix interrupt for the given msix_id + * + * Returns: 0, + * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range + * status. + * See also: + */ +void +vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id) +{ + struct __vxge_hw_device *hldev = vp->vpath->hldev; + __vxge_hw_pio_mem_write32_upper( + (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), + &hldev->common_reg->set_msix_mask_vect[msix_id % 4]); +} + +/** + * vxge_hw_vpath_msix_clear - Clear MSIX Vector. + * @vp: Virtual Path handle. + * @msix_id: MSI ID + * + * The function clears the msix interrupt for the given msix_id + * + * Returns: 0, + * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range + * status. + * See also: + */ +void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id) +{ + struct __vxge_hw_device *hldev = vp->vpath->hldev; + + if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)) + __vxge_hw_pio_mem_write32_upper( + (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32), + &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]); + else + __vxge_hw_pio_mem_write32_upper( + (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32), + &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]); +} + +/** + * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector. + * @vp: Virtual Path handle. + * @msix_id: MSI ID + * + * The function unmasks the msix interrupt for the given msix_id + * + * Returns: 0, + * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range + * status. + * See also: + */ +void +vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id) +{ + struct __vxge_hw_device *hldev = vp->vpath->hldev; + __vxge_hw_pio_mem_write32_upper( + (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), + &hldev->common_reg->clear_msix_mask_vect[msix_id%4]); +} + +/** + * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts. + * @vp: Virtual Path handle. + * + * Mask Tx and Rx vpath interrupts. + * + * See also: vxge_hw_vpath_inta_mask_tx_rx() + */ +void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp) +{ + u64 tim_int_mask0[4] = {[0 ...3] = 0}; + u32 tim_int_mask1[4] = {[0 ...3] = 0}; + u64 val64; + struct __vxge_hw_device *hldev = vp->vpath->hldev; + + VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0, + tim_int_mask1, vp->vpath->vp_id); + + val64 = readq(&hldev->common_reg->tim_int_mask0); + + if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || + (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { + writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | + tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64), + &hldev->common_reg->tim_int_mask0); + } + + val64 = readl(&hldev->common_reg->tim_int_mask1); + + if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || + (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { + __vxge_hw_pio_mem_write32_upper( + (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | + tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64), + &hldev->common_reg->tim_int_mask1); + } +} + +/** + * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts. + * @vp: Virtual Path handle. + * + * Unmask Tx and Rx vpath interrupts. + * + * See also: vxge_hw_vpath_inta_mask_tx_rx() + */ +void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp) +{ + u64 tim_int_mask0[4] = {[0 ...3] = 0}; + u32 tim_int_mask1[4] = {[0 ...3] = 0}; + u64 val64; + struct __vxge_hw_device *hldev = vp->vpath->hldev; + + VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0, + tim_int_mask1, vp->vpath->vp_id); + + val64 = readq(&hldev->common_reg->tim_int_mask0); + + if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || + (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { + writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | + tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64, + &hldev->common_reg->tim_int_mask0); + } + + if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || + (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { + __vxge_hw_pio_mem_write32_upper( + (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | + tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64, + &hldev->common_reg->tim_int_mask1); + } +} + +/** + * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed + * descriptors and process the same. + * @ring: Handle to the ring object used for receive + * + * The function polls the Rx for the completed descriptors and calls + * the driver via supplied completion callback. + * + * Returns: VXGE_HW_OK, if the polling is completed successful. + * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed + * descriptors available which are yet to be processed. + * + * See also: vxge_hw_vpath_poll_rx() + */ +enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring) +{ + u8 t_code; + enum vxge_hw_status status = VXGE_HW_OK; + void *first_rxdh; + u64 val64 = 0; + int new_count = 0; + + ring->cmpl_cnt = 0; + + status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code); + if (status == VXGE_HW_OK) + ring->callback(ring, first_rxdh, + t_code, ring->channel.userdata); + + if (ring->cmpl_cnt != 0) { + ring->doorbell_cnt += ring->cmpl_cnt; + if (ring->doorbell_cnt >= ring->rxds_limit) { + /* + * Each RxD is of 4 qwords, update the number of + * qwords replenished + */ + new_count = (ring->doorbell_cnt * 4); + + /* For each block add 4 more qwords */ + ring->total_db_cnt += ring->doorbell_cnt; + if (ring->total_db_cnt >= ring->rxds_per_block) { + new_count += 4; + /* Reset total count */ + ring->total_db_cnt %= ring->rxds_per_block; + } + writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count), + &ring->vp_reg->prc_rxd_doorbell); + val64 = + readl(&ring->common_reg->titan_general_int_status); + ring->doorbell_cnt = 0; + } + } + + return status; +} + +/** + * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process + * the same. + * @fifo: Handle to the fifo object used for non offload send + * + * The function polls the Tx for the completed descriptors and calls + * the driver via supplied completion callback. + * + * Returns: VXGE_HW_OK, if the polling is completed successful. + * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed + * descriptors available which are yet to be processed. + */ +enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo, + struct sk_buff ***skb_ptr, int nr_skb, + int *more) +{ + enum vxge_hw_fifo_tcode t_code; + void *first_txdlh; + enum vxge_hw_status status = VXGE_HW_OK; + struct __vxge_hw_channel *channel; + + channel = &fifo->channel; + + status = vxge_hw_fifo_txdl_next_completed(fifo, + &first_txdlh, &t_code); + if (status == VXGE_HW_OK) + if (fifo->callback(fifo, first_txdlh, t_code, + channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK) + status = VXGE_HW_COMPLETIONS_REMAIN; + + return status; +} diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h new file mode 100644 index 000000000..ba6f833bb --- /dev/null +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h @@ -0,0 +1,2290 @@ +/****************************************************************************** + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + * + * vxge-traffic.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O + * Virtualized Server Adapter. + * Copyright(c) 2002-2010 Exar Corp. + ******************************************************************************/ +#ifndef VXGE_TRAFFIC_H +#define VXGE_TRAFFIC_H + +#include "vxge-reg.h" +#include "vxge-version.h" + +#define VXGE_HW_DTR_MAX_T_CODE 16 +#define VXGE_HW_ALL_FOXES 0xFFFFFFFFFFFFFFFFULL +#define VXGE_HW_INTR_MASK_ALL 0xFFFFFFFFFFFFFFFFULL +#define VXGE_HW_MAX_VIRTUAL_PATHS 17 + +#define VXGE_HW_MAC_MAX_MAC_PORT_ID 2 + +#define VXGE_HW_DEFAULT_32 0xffffffff +/* frames sizes */ +#define VXGE_HW_HEADER_802_2_SIZE 3 +#define VXGE_HW_HEADER_SNAP_SIZE 5 +#define VXGE_HW_HEADER_VLAN_SIZE 4 +#define VXGE_HW_MAC_HEADER_MAX_SIZE \ + (ETH_HLEN + \ + VXGE_HW_HEADER_802_2_SIZE + \ + VXGE_HW_HEADER_VLAN_SIZE + \ + VXGE_HW_HEADER_SNAP_SIZE) + +/* 32bit alignments */ +#define VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN 2 +#define VXGE_HW_HEADER_802_2_SNAP_ALIGN 2 +#define VXGE_HW_HEADER_802_2_ALIGN 3 +#define VXGE_HW_HEADER_SNAP_ALIGN 1 + +#define VXGE_HW_L3_CKSUM_OK 0xFFFF +#define VXGE_HW_L4_CKSUM_OK 0xFFFF + +/* Forward declarations */ +struct __vxge_hw_device; +struct __vxge_hw_vpath_handle; +struct vxge_hw_vp_config; +struct __vxge_hw_virtualpath; +struct __vxge_hw_channel; +struct __vxge_hw_fifo; +struct __vxge_hw_ring; +struct vxge_hw_ring_attr; +struct vxge_hw_mempool; + +#ifndef TRUE +#define TRUE 1 +#endif + +#ifndef FALSE +#define FALSE 0 +#endif + +/*VXGE_HW_STATUS_H*/ + +#define VXGE_HW_EVENT_BASE 0 +#define VXGE_LL_EVENT_BASE 100 + +/** + * enum vxge_hw_event- Enumerates slow-path HW events. + * @VXGE_HW_EVENT_UNKNOWN: Unknown (and invalid) event. + * @VXGE_HW_EVENT_SERR: Serious vpath hardware error event. + * @VXGE_HW_EVENT_ECCERR: vpath ECC error event. + * @VXGE_HW_EVENT_VPATH_ERR: Error local to the respective vpath + * @VXGE_HW_EVENT_FIFO_ERR: FIFO Doorbell fifo error. + * @VXGE_HW_EVENT_SRPCIM_SERR: srpcim hardware error event. + * @VXGE_HW_EVENT_MRPCIM_SERR: mrpcim hardware error event. + * @VXGE_HW_EVENT_MRPCIM_ECCERR: mrpcim ecc error event. + * @VXGE_HW_EVENT_RESET_START: Privileged entity is starting device reset + * @VXGE_HW_EVENT_RESET_COMPLETE: Device reset has been completed + * @VXGE_HW_EVENT_SLOT_FREEZE: Slot-freeze event. Driver tries to distinguish + * slot-freeze from the rest critical events (e.g. ECC) when it is + * impossible to PIO read "through" the bus, i.e. when getting all-foxes. + * + * enum vxge_hw_event enumerates slow-path HW eventis. + * + * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{}, + * vxge_uld_link_down_f{}. + */ +enum vxge_hw_event { + VXGE_HW_EVENT_UNKNOWN = 0, + /* HW events */ + VXGE_HW_EVENT_RESET_START = VXGE_HW_EVENT_BASE + 1, + VXGE_HW_EVENT_RESET_COMPLETE = VXGE_HW_EVENT_BASE + 2, + VXGE_HW_EVENT_LINK_DOWN = VXGE_HW_EVENT_BASE + 3, + VXGE_HW_EVENT_LINK_UP = VXGE_HW_EVENT_BASE + 4, + VXGE_HW_EVENT_ALARM_CLEARED = VXGE_HW_EVENT_BASE + 5, + VXGE_HW_EVENT_ECCERR = VXGE_HW_EVENT_BASE + 6, + VXGE_HW_EVENT_MRPCIM_ECCERR = VXGE_HW_EVENT_BASE + 7, + VXGE_HW_EVENT_FIFO_ERR = VXGE_HW_EVENT_BASE + 8, + VXGE_HW_EVENT_VPATH_ERR = VXGE_HW_EVENT_BASE + 9, + VXGE_HW_EVENT_CRITICAL_ERR = VXGE_HW_EVENT_BASE + 10, + VXGE_HW_EVENT_SERR = VXGE_HW_EVENT_BASE + 11, + VXGE_HW_EVENT_SRPCIM_SERR = VXGE_HW_EVENT_BASE + 12, + VXGE_HW_EVENT_MRPCIM_SERR = VXGE_HW_EVENT_BASE + 13, + VXGE_HW_EVENT_SLOT_FREEZE = VXGE_HW_EVENT_BASE + 14, +}; + +#define VXGE_HW_SET_LEVEL(a, b) (((a) > (b)) ? (a) : (b)) + +/* + * struct vxge_hw_mempool_dma - Represents DMA objects passed to the + caller. + */ +struct vxge_hw_mempool_dma { + dma_addr_t addr; + struct pci_dev *handle; + struct pci_dev *acc_handle; +}; + +/* + * vxge_hw_mempool_item_f - Mempool item alloc/free callback + * @mempoolh: Memory pool handle. + * @memblock: Address of memory block + * @memblock_index: Index of memory block + * @item: Item that gets allocated or freed. + * @index: Item's index in the memory pool. + * @is_last: True, if this item is the last one in the pool; false - otherwise. + * userdata: Per-pool user context. + * + * Memory pool allocation/deallocation callback. + */ + +/* + * struct vxge_hw_mempool - Memory pool. + */ +struct vxge_hw_mempool { + + void (*item_func_alloc)( + struct vxge_hw_mempool *mempoolh, + u32 memblock_index, + struct vxge_hw_mempool_dma *dma_object, + u32 index, + u32 is_last); + + void *userdata; + void **memblocks_arr; + void **memblocks_priv_arr; + struct vxge_hw_mempool_dma *memblocks_dma_arr; + struct __vxge_hw_device *devh; + u32 memblock_size; + u32 memblocks_max; + u32 memblocks_allocated; + u32 item_size; + u32 items_max; + u32 items_initial; + u32 items_current; + u32 items_per_memblock; + void **items_arr; + u32 items_priv_size; +}; + +#define VXGE_HW_MAX_INTR_PER_VP 4 +#define VXGE_HW_VPATH_INTR_TX 0 +#define VXGE_HW_VPATH_INTR_RX 1 +#define VXGE_HW_VPATH_INTR_EINTA 2 +#define VXGE_HW_VPATH_INTR_BMAP 3 + +#define VXGE_HW_BLOCK_SIZE 4096 + +/** + * struct vxge_hw_tim_intr_config - Titan Tim interrupt configuration. + * @intr_enable: Set to 1, if interrupt is enabled. + * @btimer_val: Boundary Timer Initialization value in units of 272 ns. + * @timer_ac_en: Timer Automatic Cancel. 1 : Automatic Canceling Enable: when + * asserted, other interrupt-generating entities will cancel the + * scheduled timer interrupt. + * @timer_ci_en: Timer Continuous Interrupt. 1 : Continuous Interrupting Enable: + * When asserted, an interrupt will be generated every time the + * boundary timer expires, even if no traffic has been transmitted + * on this interrupt. + * @timer_ri_en: Timer Consecutive (Re-) Interrupt 1 : Consecutive + * (Re-) Interrupt Enable: When asserted, an interrupt will be + * generated the next time the timer expires, even if no traffic has + * been transmitted on this interrupt. (This will only happen once + * each time that this value is written to the TIM.) This bit is + * cleared by H/W at the end of the current-timer-interval when + * the interrupt is triggered. + * @rtimer_val: Restriction Timer Initialization value in units of 272 ns. + * @util_sel: Utilization Selector. Selects which of the workload approximations + * to use (e.g. legacy Tx utilization, Tx/Rx utilization, host + * specified utilization etc.), selects one of + * the 17 host configured values. + * 0-Virtual Path 0 + * 1-Virtual Path 1 + * ... + * 16-Virtual Path 17 + * 17-Legacy Tx network utilization, provided by TPA + * 18-Legacy Rx network utilization, provided by FAU + * 19-Average of legacy Rx and Tx utilization calculated from link + * utilization values. + * 20-31-Invalid configurations + * 32-Host utilization for Virtual Path 0 + * 33-Host utilization for Virtual Path 1 + * ... + * 48-Host utilization for Virtual Path 17 + * 49-Legacy Tx network utilization, provided by TPA + * 50-Legacy Rx network utilization, provided by FAU + * 51-Average of legacy Rx and Tx utilization calculated from + * link utilization values. + * 52-63-Invalid configurations + * @ltimer_val: Latency Timer Initialization Value in units of 272 ns. + * @txd_cnt_en: TxD Return Event Count Enable. This configuration bit when set + * to 1 enables counting of TxD0 returns (signalled by PCC's), + * towards utilization event count values. + * @urange_a: Defines the upper limit (in percent) for this utilization range + * to be active. This range is considered active + * if 0 = UTIL = URNG_A + * and the UEC_A field (below) is non-zero. + * @uec_a: Utilization Event Count A. If this range is active, the adapter will + * wait until UEC_A events have occurred on the interrupt before + * generating an interrupt. + * @urange_b: Link utilization range B. + * @uec_b: Utilization Event Count B. + * @urange_c: Link utilization range C. + * @uec_c: Utilization Event Count C. + * @urange_d: Link utilization range D. + * @uec_d: Utilization Event Count D. + * Traffic Interrupt Controller Module interrupt configuration. + */ +struct vxge_hw_tim_intr_config { + + u32 intr_enable; +#define VXGE_HW_TIM_INTR_ENABLE 1 +#define VXGE_HW_TIM_INTR_DISABLE 0 +#define VXGE_HW_TIM_INTR_DEFAULT 0 + + u32 btimer_val; +#define VXGE_HW_MIN_TIM_BTIMER_VAL 0 +#define VXGE_HW_MAX_TIM_BTIMER_VAL 67108864 +#define VXGE_HW_USE_FLASH_DEFAULT (~0) + + u32 timer_ac_en; +#define VXGE_HW_TIM_TIMER_AC_ENABLE 1 +#define VXGE_HW_TIM_TIMER_AC_DISABLE 0 + + u32 timer_ci_en; +#define VXGE_HW_TIM_TIMER_CI_ENABLE 1 +#define VXGE_HW_TIM_TIMER_CI_DISABLE 0 + + u32 timer_ri_en; +#define VXGE_HW_TIM_TIMER_RI_ENABLE 1 +#define VXGE_HW_TIM_TIMER_RI_DISABLE 0 + + u32 rtimer_val; +#define VXGE_HW_MIN_TIM_RTIMER_VAL 0 +#define VXGE_HW_MAX_TIM_RTIMER_VAL 67108864 + + u32 util_sel; +#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL 17 +#define VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL 18 +#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_RX_AVE_NET_UTIL 19 +#define VXGE_HW_TIM_UTIL_SEL_PER_VPATH 63 + + u32 ltimer_val; +#define VXGE_HW_MIN_TIM_LTIMER_VAL 0 +#define VXGE_HW_MAX_TIM_LTIMER_VAL 67108864 + + /* Line utilization interrupts */ + u32 urange_a; +#define VXGE_HW_MIN_TIM_URANGE_A 0 +#define VXGE_HW_MAX_TIM_URANGE_A 100 + + u32 uec_a; +#define VXGE_HW_MIN_TIM_UEC_A 0 +#define VXGE_HW_MAX_TIM_UEC_A 65535 + + u32 urange_b; +#define VXGE_HW_MIN_TIM_URANGE_B 0 +#define VXGE_HW_MAX_TIM_URANGE_B 100 + + u32 uec_b; +#define VXGE_HW_MIN_TIM_UEC_B 0 +#define VXGE_HW_MAX_TIM_UEC_B 65535 + + u32 urange_c; +#define VXGE_HW_MIN_TIM_URANGE_C 0 +#define VXGE_HW_MAX_TIM_URANGE_C 100 + + u32 uec_c; +#define VXGE_HW_MIN_TIM_UEC_C 0 +#define VXGE_HW_MAX_TIM_UEC_C 65535 + + u32 uec_d; +#define VXGE_HW_MIN_TIM_UEC_D 0 +#define VXGE_HW_MAX_TIM_UEC_D 65535 +}; + +#define VXGE_HW_STATS_OP_READ 0 +#define VXGE_HW_STATS_OP_CLEAR_STAT 1 +#define VXGE_HW_STATS_OP_CLEAR_ALL_VPATH_STATS 2 +#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS_OF_LOC 2 +#define VXGE_HW_STATS_OP_CLEAR_ALL_STATS 3 + +#define VXGE_HW_STATS_LOC_AGGR 17 +#define VXGE_HW_STATS_AGGRn_OFFSET 0x00720 + +#define VXGE_HW_STATS_VPATH_TX_OFFSET 0x0 +#define VXGE_HW_STATS_VPATH_RX_OFFSET 0x00090 + +#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET (0x001d0 >> 3) +#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(bits) \ + vxge_bVALn(bits, 0, 32) + +#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(bits) \ + vxge_bVALn(bits, 32, 32) + +#define VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET (0x001d8 >> 3) +#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(bits) \ + vxge_bVALn(bits, 0, 32) + +#define VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(bits) \ + vxge_bVALn(bits, 32, 32) + +/** + * struct vxge_hw_xmac_aggr_stats - Per-Aggregator XMAC Statistics + * + * @tx_frms: Count of data frames transmitted on this Aggregator on all + * its Aggregation ports. Does not include LACPDUs or Marker PDUs. + * However, does include frames discarded by the Distribution + * function. + * @tx_data_octets: Count of data and padding octets of frames transmitted + * on this Aggregator on all its Aggregation ports. Does not include + * octets of LACPDUs or Marker PDUs. However, does include octets of + * frames discarded by the Distribution function. + * @tx_mcast_frms: Count of data frames transmitted (to a group destination + * address other than the broadcast address) on this Aggregator on + * all its Aggregation ports. Does not include LACPDUs or Marker + * PDUs. However, does include frames discarded by the Distribution + * function. + * @tx_bcast_frms: Count of broadcast data frames transmitted on this Aggregator + * on all its Aggregation ports. Does not include LACPDUs or Marker + * PDUs. However, does include frames discarded by the Distribution + * function. + * @tx_discarded_frms: Count of data frames to be transmitted on this Aggregator + * that are discarded by the Distribution function. This occurs when + * conversation are allocated to different ports and have to be + * flushed on old ports + * @tx_errored_frms: Count of data frames transmitted on this Aggregator that + * experience transmission errors on its Aggregation ports. + * @rx_frms: Count of data frames received on this Aggregator on all its + * Aggregation ports. Does not include LACPDUs or Marker PDUs. + * Also, does not include frames discarded by the Collection + * function. + * @rx_data_octets: Count of data and padding octets of frames received on this + * Aggregator on all its Aggregation ports. Does not include octets + * of LACPDUs or Marker PDUs. Also, does not include + * octets of frames + * discarded by the Collection function. + * @rx_mcast_frms: Count of data frames received (from a group destination + * address other than the broadcast address) on this Aggregator on + * all its Aggregation ports. Does not include LACPDUs or Marker + * PDUs. Also, does not include frames discarded by the Collection + * function. + * @rx_bcast_frms: Count of broadcast data frames received on this Aggregator on + * all its Aggregation ports. Does not include LACPDUs or Marker + * PDUs. Also, does not include frames discarded by the Collection + * function. + * @rx_discarded_frms: Count of data frames received on this Aggregator that are + * discarded by the Collection function because the Collection + * function was disabled on the port which the frames are received. + * @rx_errored_frms: Count of data frames received on this Aggregator that are + * discarded by its Aggregation ports, or are discarded by the + * Collection function of the Aggregator, or that are discarded by + * the Aggregator due to detection of an illegal Slow Protocols PDU. + * @rx_unknown_slow_proto_frms: Count of data frames received on this Aggregator + * that are discarded by its Aggregation ports due to detection of + * an unknown Slow Protocols PDU. + * + * Per aggregator XMAC RX statistics. + */ +struct vxge_hw_xmac_aggr_stats { +/*0x000*/ u64 tx_frms; +/*0x008*/ u64 tx_data_octets; +/*0x010*/ u64 tx_mcast_frms; +/*0x018*/ u64 tx_bcast_frms; +/*0x020*/ u64 tx_discarded_frms; +/*0x028*/ u64 tx_errored_frms; +/*0x030*/ u64 rx_frms; +/*0x038*/ u64 rx_data_octets; +/*0x040*/ u64 rx_mcast_frms; +/*0x048*/ u64 rx_bcast_frms; +/*0x050*/ u64 rx_discarded_frms; +/*0x058*/ u64 rx_errored_frms; +/*0x060*/ u64 rx_unknown_slow_proto_frms; +} __packed; + +/** + * struct vxge_hw_xmac_port_stats - XMAC Port Statistics + * + * @tx_ttl_frms: Count of successfully transmitted MAC frames + * @tx_ttl_octets: Count of total octets of transmitted frames, not including + * framing characters (i.e. less framing bits). To determine the + * total octets of transmitted frames, including framing characters, + * multiply PORTn_TX_TTL_FRMS by 8 and add it to this stat (unless + * otherwise configured, this stat only counts frames that have + * 8 bytes of preamble for each frame). This stat can be configured + * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything + * including the preamble octets. + * @tx_data_octets: Count of data and padding octets of successfully transmitted + * frames. + * @tx_mcast_frms: Count of successfully transmitted frames to a group address + * other than the broadcast address. + * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast + * group address. + * @tx_ucast_frms: Count of transmitted frames containing a unicast address. + * Includes discarded frames that are not sent to the network. + * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag. + * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network. + * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that + * are passed to the network. + * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent + * due to problems within ICMP. + * @tx_tcp: Count of transmitted TCP segments. Does not include segments + * containing retransmitted octets. + * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag. + * @tx_udp: Count of transmitted UDP datagrams. + * @tx_parse_error: Increments when the TPA is unable to parse a packet. This + * generally occurs when a packet is corrupt somehow, including + * packets that have IP version mismatches, invalid Layer 2 control + * fields, etc. L3/L4 checksums are not offloaded, but the packet + * is still be transmitted. + * @tx_unknown_protocol: Increments when the TPA encounters an unknown + * protocol, such as a new IPv6 extension header, or an unsupported + * Routing Type. The packet still has a checksum calculated but it + * may be incorrect. + * @tx_pause_ctrl_frms: Count of MAC PAUSE control frames that are transmitted. + * Since, the only control frames supported by this device are + * PAUSE frames, this register is a count of all transmitted MAC + * control frames. + * @tx_marker_pdu_frms: Count of Marker PDUs transmitted + * on this Aggregation port. + * @tx_lacpdu_frms: Count of LACPDUs transmitted on this Aggregation port. + * @tx_drop_ip: Count of transmitted IP datagrams that could not be passed to + * the network. Increments because of: + * 1) An internal processing error + * (such as an uncorrectable ECC error). 2) A frame parsing error + * during IP checksum calculation. + * @tx_marker_resp_pdu_frms: Count of Marker Response PDUs transmitted on this + * Aggregation port. + * @tx_xgmii_char2_match: Maintains a count of the number of transmitted XGMII + * characters that match a pattern that is programmable through + * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern + * is set to /T/ (i.e. the terminate character), thus the statistic + * tracks the number of transmitted Terminate characters. + * @tx_xgmii_char1_match: Maintains a count of the number of transmitted XGMII + * characters that match a pattern that is programmable through + * register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern + * is set to /S/ (i.e. the start character), + * thus the statistic tracks + * the number of transmitted Start characters. + * @tx_xgmii_column2_match: Maintains a count of the number of transmitted XGMII + * columns that match a pattern that is programmable through register + * XMAC_STATS_TX_XGMII_COLUMN2_PORTn. By default, the pattern is set + * to 4 x /E/ (i.e. a column containing all error characters), thus + * the statistic tracks the number of Error columns transmitted at + * any time. If XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is + * set to 1, then this stat increments when COLUMN2 is found within + * 'n' clocks after COLUMN1. Here, 'n' is defined by + * XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set + * to 0, then it means to search anywhere for COLUMN2). + * @tx_xgmii_column1_match: Maintains a count of the number of transmitted XGMII + * columns that match a pattern that is programmable through register + * XMAC_STATS_TX_XGMII_COLUMN1_PORTn. By default, the pattern is set + * to 4 x /I/ (i.e. a column containing all idle characters), + * thus the statistic tracks the number of transmitted Idle columns. + * @tx_any_err_frms: Count of transmitted frames containing any error that + * prevents them from being passed to the network. Increments if + * there is an ECC while reading the frame out of the transmit + * buffer. Also increments if the transmit protocol assist (TPA) + * block determines that the frame should not be sent. + * @tx_drop_frms: Count of frames that could not be sent for no other reason + * than internal MAC processing. Increments once whenever the + * transmit buffer is flushed (due to an ECC error on a memory + * descriptor). + * @rx_ttl_frms: Count of total received MAC frames, including frames received + * with frame-too-long, FCS, or length errors. This stat can be + * configured (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count + * everything, even "frames" as small one byte of preamble. + * @rx_vld_frms: Count of successfully received MAC frames. Does not include + * frames received with frame-too-long, FCS, or length errors. + * @rx_offload_frms: Count of offloaded received frames that are passed to + * the host. + * @rx_ttl_octets: Count of total octets of received frames, not including + * framing characters (i.e. less framing bits). To determine the + * total octets of received frames, including framing characters, + * multiply PORTn_RX_TTL_FRMS by 8 and add it to this stat (unless + * otherwise configured, this stat only counts frames that have 8 + * bytes of preamble for each frame). This stat can be configured + * (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything, + * even the preamble octets of "frames" as small one byte of preamble + * @rx_data_octets: Count of data and padding octets of successfully received + * frames. Does not include frames received with frame-too-long, + * FCS, or length errors. + * @rx_offload_octets: Count of total octets, not including framing + * characters, of offloaded received frames that are passed + * to the host. + * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a + * nonbroadcast group address. Does not include frames received + * with frame-too-long, FCS, or length errors. + * @rx_vld_bcast_frms: Count of successfully received MAC frames containing + * the broadcast group address. Does not include frames received + * with frame-too-long, FCS, or length errors. + * @rx_accepted_ucast_frms: Count of successfully received frames containing + * a unicast address. Only includes frames that are passed to + * the system. + * @rx_accepted_nucast_frms: Count of successfully received frames containing + * a non-unicast (broadcast or multicast) address. Only includes + * frames that are passed to the system. Could include, for instance, + * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG + * register is set to pass FCS-errored frames to the host. + * @rx_tagged_frms: Count of received frames containing a VLAN tag. + * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN + * + 18 bytes (+ 22 bytes if VLAN-tagged). + * @rx_usized_frms: Count of received frames of length (including FCS, but not + * framing bits) less than 64 octets, that are otherwise well-formed. + * In other words, counts runts. + * @rx_osized_frms: Count of received frames of length (including FCS, but not + * framing bits) more than 1518 octets, that are otherwise + * well-formed. Note: If register XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING + * is set to 1, then "more than 1518 octets" becomes "more than 1518 + * (1522 if VLAN-tagged) octets". + * @rx_frag_frms: Count of received frames of length (including FCS, but not + * framing bits) less than 64 octets that had bad FCS. In other + * words, counts fragments. + * @rx_jabber_frms: Count of received frames of length (including FCS, but not + * framing bits) more than 1518 octets that had bad FCS. In other + * words, counts jabbers. Note: If register + * XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING is set to 1, then "more than + * 1518 octets" becomes "more than 1518 (1522 if VLAN-tagged) + * octets". + * @rx_ttl_64_frms: Count of total received MAC frames with length (including + * FCS, but not framing bits) of exactly 64 octets. Includes frames + * received with frame-too-long, FCS, or length errors. + * @rx_ttl_65_127_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 65 and 127 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_128_255_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 128 and 255 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_256_511_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 256 and 511 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_512_1023_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 512 and 1023 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 1024 and 1518 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 1519 and 4095 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 4096 and 8191 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_8192_max_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 8192 and + * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received + * with frame-too-long, FCS, or length errors. + * @rx_ttl_gt_max_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) exceeding + * RX_MAX_PYLD_LEN+18 (+22 bytes if VLAN-tagged) octets inclusive. + * Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams. + * @rx_accepted_ip: Count of received IP datagrams that + * are passed to the system. + * @rx_ip_octets: Count of number of octets in received IP datagrams. Includes + * errored IP datagrams. + * @rx_err_ip: Count of received IP datagrams containing errors. For example, + * bad IP checksum. + * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages. + * @rx_tcp: Count of received TCP segments. Includes errored TCP segments. + * Note: This stat contains a count of all received TCP segments, + * regardless of whether or not they pertain to an established + * connection. + * @rx_udp: Count of received UDP datagrams. + * @rx_err_tcp: Count of received TCP segments containing errors. For example, + * bad TCP checksum. + * @rx_pause_count: Count of number of pause quanta that the MAC has been in + * the paused state. Recall, one pause quantum equates to 512 + * bit times. + * @rx_pause_ctrl_frms: Count of received MAC PAUSE control frames. + * @rx_unsup_ctrl_frms: Count of received MAC control frames that do not + * contain the PAUSE opcode. The sum of RX_PAUSE_CTRL_FRMS and + * this register is a count of all received MAC control frames. + * Note: This stat may be configured to count all layer 2 errors + * (i.e. length errors and FCS errors). + * @rx_fcs_err_frms: Count of received MAC frames that do not pass FCS. Does + * not include frames received with frame-too-long or + * frame-too-short error. + * @rx_in_rng_len_err_frms: Count of received frames with a length/type field + * value between 46 (42 for VLAN-tagged frames) and 1500 (also 1500 + * for VLAN-tagged frames), inclusive, that does not match the + * number of data octets (including pad) received. Also contains + * a count of received frames with a length/type field less than + * 46 (42 for VLAN-tagged frames) and the number of data octets + * (including pad) received is greater than 46 (42 for VLAN-tagged + * frames). + * @rx_out_rng_len_err_frms: Count of received frames with length/type field + * between 1501 and 1535 decimal, inclusive. + * @rx_drop_frms: Count of received frames that could not be passed to the host. + * See PORTn_RX_L2_MGMT_DISCARD, PORTn_RX_RPA_DISCARD, + * PORTn_RX_TRASH_DISCARD, PORTn_RX_RTS_DISCARD, PORTn_RX_RED_DISCARD + * for a list of reasons. Because the RMAC drops one frame at a time, + * this stat also indicates the number of drop events. + * @rx_discarded_frms: Count of received frames containing + * any error that prevents + * them from being passed to the system. See PORTn_RX_FCS_DISCARD, + * PORTn_RX_LEN_DISCARD, and PORTn_RX_SWITCH_DISCARD for a list of + * reasons. + * @rx_drop_ip: Count of received IP datagrams that could not be passed to the + * host. See PORTn_RX_DROP_FRMS for a list of reasons. + * @rx_drop_udp: Count of received UDP datagrams that are not delivered to the + * host. See PORTn_RX_DROP_FRMS for a list of reasons. + * @rx_marker_pdu_frms: Count of valid Marker PDUs received on this Aggregation + * port. + * @rx_lacpdu_frms: Count of valid LACPDUs received on this Aggregation port. + * @rx_unknown_pdu_frms: Count of received frames (on this Aggregation port) + * that carry the Slow Protocols EtherType, but contain an unknown + * PDU. Or frames that contain the Slow Protocols group MAC address, + * but do not carry the Slow Protocols EtherType. + * @rx_marker_resp_pdu_frms: Count of valid Marker Response PDUs received on + * this Aggregation port. + * @rx_fcs_discard: Count of received frames that are discarded because the + * FCS check failed. + * @rx_illegal_pdu_frms: Count of received frames (on this Aggregation port) + * that carry the Slow Protocols EtherType, but contain a badly + * formed PDU. Or frames that carry the Slow Protocols EtherType, + * but contain an illegal value of Protocol Subtype. + * @rx_switch_discard: Count of received frames that are discarded by the + * internal switch because they did not have an entry in the + * Filtering Database. This includes frames that had an invalid + * destination MAC address or VLAN ID. It also includes frames are + * discarded because they did not satisfy the length requirements + * of the target VPATH. + * @rx_len_discard: Count of received frames that are discarded because of an + * invalid frame length (includes fragments, oversized frames and + * mismatch between frame length and length/type field). This stat + * can be configured + * (see XMAC_STATS_GLOBAL_CFG.LEN_DISCARD_HANDLING). + * @rx_rpa_discard: Count of received frames that were discarded because the + * receive protocol assist (RPA) discovered and error in the frame + * or was unable to parse the frame. + * @rx_l2_mgmt_discard: Count of Layer 2 management frames (eg. pause frames, + * Link Aggregation Control Protocol (LACP) frames, etc.) that are + * discarded. + * @rx_rts_discard: Count of received frames that are discarded by the receive + * traffic steering (RTS) logic. Includes those frame discarded + * because the SSC response contradicted the switch table, because + * the SSC timed out, or because the target queue could not fit the + * frame. + * @rx_trash_discard: Count of received frames that are discarded because + * receive traffic steering (RTS) steered the frame to the trash + * queue. + * @rx_buff_full_discard: Count of received frames that are discarded because + * internal buffers are full. Includes frames discarded because the + * RTS logic is waiting for an SSC lookup that has no timeout bound. + * Also, includes frames that are dropped because the MAC2FAU buffer + * is nearly full -- this can happen if the external receive buffer + * is full and the receive path is backing up. + * @rx_red_discard: Count of received frames that are discarded because of RED + * (Random Early Discard). + * @rx_xgmii_ctrl_err_cnt: Maintains a count of unexpected or misplaced control + * characters occurring between times of normal data transmission + * (i.e. not included in RX_XGMII_DATA_ERR_CNT). This counter is + * incremented when either - + * 1) The Reconciliation Sublayer (RS) is expecting one control + * character and gets another (i.e. is expecting a Start + * character, but gets another control character). + * 2) Start control character is not in lane 0 + * Only increments the count by one for each XGMII column. + * @rx_xgmii_data_err_cnt: Maintains a count of unexpected control characters + * during normal data transmission. If the Reconciliation Sublayer + * (RS) receives a control character, other than a terminate control + * character, during receipt of data octets then this register is + * incremented. Also increments if the start frame delimiter is not + * found in the correct location. Only increments the count by one + * for each XGMII column. + * @rx_xgmii_char1_match: Maintains a count of the number of XGMII characters + * that match a pattern that is programmable through register + * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set + * to /E/ (i.e. the error character), thus the statistic tracks the + * number of Error characters received at any time. + * @rx_xgmii_err_sym: Count of the number of symbol errors in the received + * XGMII data (i.e. PHY indicates "Receive Error" on the XGMII). + * Only includes symbol errors that are observed between the XGMII + * Start Frame Delimiter and End Frame Delimiter, inclusive. And + * only increments the count by one for each frame. + * @rx_xgmii_column1_match: Maintains a count of the number of XGMII columns + * that match a pattern that is programmable through register + * XMAC_STATS_RX_XGMII_COLUMN1_PORTn. By default, the pattern is set + * to 4 x /E/ (i.e. a column containing all error characters), thus + * the statistic tracks the number of Error columns received at any + * time. + * @rx_xgmii_char2_match: Maintains a count of the number of XGMII characters + * that match a pattern that is programmable through register + * XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set + * to /E/ (i.e. the error character), thus the statistic tracks the + * number of Error characters received at any time. + * @rx_local_fault: Maintains a count of the number of times that link + * transitioned from "up" to "down" due to a local fault. + * @rx_xgmii_column2_match: Maintains a count of the number of XGMII columns + * that match a pattern that is programmable through register + * XMAC_STATS_RX_XGMII_COLUMN2_PORTn. By default, the pattern is set + * to 4 x /E/ (i.e. a column containing all error characters), thus + * the statistic tracks the number of Error columns received at any + * time. If XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is set + * to 1, then this stat increments when COLUMN2 is found within 'n' + * clocks after COLUMN1. Here, 'n' is defined by + * XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set to + * 0, then it means to search anywhere for COLUMN2). + * @rx_jettison: Count of received frames that are jettisoned because internal + * buffers are full. + * @rx_remote_fault: Maintains a count of the number of times that link + * transitioned from "up" to "down" due to a remote fault. + * + * XMAC Port Statistics. + */ +struct vxge_hw_xmac_port_stats { +/*0x000*/ u64 tx_ttl_frms; +/*0x008*/ u64 tx_ttl_octets; +/*0x010*/ u64 tx_data_octets; +/*0x018*/ u64 tx_mcast_frms; +/*0x020*/ u64 tx_bcast_frms; +/*0x028*/ u64 tx_ucast_frms; +/*0x030*/ u64 tx_tagged_frms; +/*0x038*/ u64 tx_vld_ip; +/*0x040*/ u64 tx_vld_ip_octets; +/*0x048*/ u64 tx_icmp; +/*0x050*/ u64 tx_tcp; +/*0x058*/ u64 tx_rst_tcp; +/*0x060*/ u64 tx_udp; +/*0x068*/ u32 tx_parse_error; +/*0x06c*/ u32 tx_unknown_protocol; +/*0x070*/ u64 tx_pause_ctrl_frms; +/*0x078*/ u32 tx_marker_pdu_frms; +/*0x07c*/ u32 tx_lacpdu_frms; +/*0x080*/ u32 tx_drop_ip; +/*0x084*/ u32 tx_marker_resp_pdu_frms; +/*0x088*/ u32 tx_xgmii_char2_match; +/*0x08c*/ u32 tx_xgmii_char1_match; +/*0x090*/ u32 tx_xgmii_column2_match; +/*0x094*/ u32 tx_xgmii_column1_match; +/*0x098*/ u32 unused1; +/*0x09c*/ u16 tx_any_err_frms; +/*0x09e*/ u16 tx_drop_frms; +/*0x0a0*/ u64 rx_ttl_frms; +/*0x0a8*/ u64 rx_vld_frms; +/*0x0b0*/ u64 rx_offload_frms; +/*0x0b8*/ u64 rx_ttl_octets; +/*0x0c0*/ u64 rx_data_octets; +/*0x0c8*/ u64 rx_offload_octets; +/*0x0d0*/ u64 rx_vld_mcast_frms; +/*0x0d8*/ u64 rx_vld_bcast_frms; +/*0x0e0*/ u64 rx_accepted_ucast_frms; +/*0x0e8*/ u64 rx_accepted_nucast_frms; +/*0x0f0*/ u64 rx_tagged_frms; +/*0x0f8*/ u64 rx_long_frms; +/*0x100*/ u64 rx_usized_frms; +/*0x108*/ u64 rx_osized_frms; +/*0x110*/ u64 rx_frag_frms; +/*0x118*/ u64 rx_jabber_frms; +/*0x120*/ u64 rx_ttl_64_frms; +/*0x128*/ u64 rx_ttl_65_127_frms; +/*0x130*/ u64 rx_ttl_128_255_frms; +/*0x138*/ u64 rx_ttl_256_511_frms; +/*0x140*/ u64 rx_ttl_512_1023_frms; +/*0x148*/ u64 rx_ttl_1024_1518_frms; +/*0x150*/ u64 rx_ttl_1519_4095_frms; +/*0x158*/ u64 rx_ttl_4096_8191_frms; +/*0x160*/ u64 rx_ttl_8192_max_frms; +/*0x168*/ u64 rx_ttl_gt_max_frms; +/*0x170*/ u64 rx_ip; +/*0x178*/ u64 rx_accepted_ip; +/*0x180*/ u64 rx_ip_octets; +/*0x188*/ u64 rx_err_ip; +/*0x190*/ u64 rx_icmp; +/*0x198*/ u64 rx_tcp; +/*0x1a0*/ u64 rx_udp; +/*0x1a8*/ u64 rx_err_tcp; +/*0x1b0*/ u64 rx_pause_count; +/*0x1b8*/ u64 rx_pause_ctrl_frms; +/*0x1c0*/ u64 rx_unsup_ctrl_frms; +/*0x1c8*/ u64 rx_fcs_err_frms; +/*0x1d0*/ u64 rx_in_rng_len_err_frms; +/*0x1d8*/ u64 rx_out_rng_len_err_frms; +/*0x1e0*/ u64 rx_drop_frms; +/*0x1e8*/ u64 rx_discarded_frms; +/*0x1f0*/ u64 rx_drop_ip; +/*0x1f8*/ u64 rx_drop_udp; +/*0x200*/ u32 rx_marker_pdu_frms; +/*0x204*/ u32 rx_lacpdu_frms; +/*0x208*/ u32 rx_unknown_pdu_frms; +/*0x20c*/ u32 rx_marker_resp_pdu_frms; +/*0x210*/ u32 rx_fcs_discard; +/*0x214*/ u32 rx_illegal_pdu_frms; +/*0x218*/ u32 rx_switch_discard; +/*0x21c*/ u32 rx_len_discard; +/*0x220*/ u32 rx_rpa_discard; +/*0x224*/ u32 rx_l2_mgmt_discard; +/*0x228*/ u32 rx_rts_discard; +/*0x22c*/ u32 rx_trash_discard; +/*0x230*/ u32 rx_buff_full_discard; +/*0x234*/ u32 rx_red_discard; +/*0x238*/ u32 rx_xgmii_ctrl_err_cnt; +/*0x23c*/ u32 rx_xgmii_data_err_cnt; +/*0x240*/ u32 rx_xgmii_char1_match; +/*0x244*/ u32 rx_xgmii_err_sym; +/*0x248*/ u32 rx_xgmii_column1_match; +/*0x24c*/ u32 rx_xgmii_char2_match; +/*0x250*/ u32 rx_local_fault; +/*0x254*/ u32 rx_xgmii_column2_match; +/*0x258*/ u32 rx_jettison; +/*0x25c*/ u32 rx_remote_fault; +} __packed; + +/** + * struct vxge_hw_xmac_vpath_tx_stats - XMAC Vpath Tx Statistics + * + * @tx_ttl_eth_frms: Count of successfully transmitted MAC frames. + * @tx_ttl_eth_octets: Count of total octets of transmitted frames, + * not including framing characters (i.e. less framing bits). + * To determine the total octets of transmitted frames, including + * framing characters, multiply TX_TTL_ETH_FRMS by 8 and add it to + * this stat (the device always prepends 8 bytes of preamble for + * each frame) + * @tx_data_octets: Count of data and padding octets of successfully transmitted + * frames. + * @tx_mcast_frms: Count of successfully transmitted frames to a group address + * other than the broadcast address. + * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast + * group address. + * @tx_ucast_frms: Count of transmitted frames containing a unicast address. + * Includes discarded frames that are not sent to the network. + * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag. + * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network. + * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that + * are passed to the network. + * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent due + * to problems within ICMP. + * @tx_tcp: Count of transmitted TCP segments. Does not include segments + * containing retransmitted octets. + * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag. + * @tx_udp: Count of transmitted UDP datagrams. + * @tx_unknown_protocol: Increments when the TPA encounters an unknown protocol, + * such as a new IPv6 extension header, or an unsupported Routing + * Type. The packet still has a checksum calculated but it may be + * incorrect. + * @tx_lost_ip: Count of transmitted IP datagrams that could not be passed + * to the network. Increments because of: 1) An internal processing + * error (such as an uncorrectable ECC error). 2) A frame parsing + * error during IP checksum calculation. + * @tx_parse_error: Increments when the TPA is unable to parse a packet. This + * generally occurs when a packet is corrupt somehow, including + * packets that have IP version mismatches, invalid Layer 2 control + * fields, etc. L3/L4 checksums are not offloaded, but the packet + * is still be transmitted. + * @tx_tcp_offload: For frames belonging to offloaded sessions only, a count + * of transmitted TCP segments. Does not include segments containing + * retransmitted octets. + * @tx_retx_tcp_offload: For frames belonging to offloaded sessions only, the + * total number of segments retransmitted. Retransmitted segments + * that are sourced by the host are counted by the host. + * @tx_lost_ip_offload: For frames belonging to offloaded sessions only, a count + * of transmitted IP datagrams that could not be passed to the + * network. + * + * XMAC Vpath TX Statistics. + */ +struct vxge_hw_xmac_vpath_tx_stats { + u64 tx_ttl_eth_frms; + u64 tx_ttl_eth_octets; + u64 tx_data_octets; + u64 tx_mcast_frms; + u64 tx_bcast_frms; + u64 tx_ucast_frms; + u64 tx_tagged_frms; + u64 tx_vld_ip; + u64 tx_vld_ip_octets; + u64 tx_icmp; + u64 tx_tcp; + u64 tx_rst_tcp; + u64 tx_udp; + u32 tx_unknown_protocol; + u32 tx_lost_ip; + u32 unused1; + u32 tx_parse_error; + u64 tx_tcp_offload; + u64 tx_retx_tcp_offload; + u64 tx_lost_ip_offload; +} __packed; + +/** + * struct vxge_hw_xmac_vpath_rx_stats - XMAC Vpath RX Statistics + * + * @rx_ttl_eth_frms: Count of successfully received MAC frames. + * @rx_vld_frms: Count of successfully received MAC frames. Does not include + * frames received with frame-too-long, FCS, or length errors. + * @rx_offload_frms: Count of offloaded received frames that are passed to + * the host. + * @rx_ttl_eth_octets: Count of total octets of received frames, not including + * framing characters (i.e. less framing bits). Only counts octets + * of frames that are at least 14 bytes (18 bytes for VLAN-tagged) + * before FCS. To determine the total octets of received frames, + * including framing characters, multiply RX_TTL_ETH_FRMS by 8 and + * add it to this stat (the stat RX_TTL_ETH_FRMS only counts frames + * that have the required 8 bytes of preamble). + * @rx_data_octets: Count of data and padding octets of successfully received + * frames. Does not include frames received with frame-too-long, + * FCS, or length errors. + * @rx_offload_octets: Count of total octets, not including framing characters, + * of offloaded received frames that are passed to the host. + * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a + * nonbroadcast group address. Does not include frames received with + * frame-too-long, FCS, or length errors. + * @rx_vld_bcast_frms: Count of successfully received MAC frames containing the + * broadcast group address. Does not include frames received with + * frame-too-long, FCS, or length errors. + * @rx_accepted_ucast_frms: Count of successfully received frames containing + * a unicast address. Only includes frames that are passed to the + * system. + * @rx_accepted_nucast_frms: Count of successfully received frames containing + * a non-unicast (broadcast or multicast) address. Only includes + * frames that are passed to the system. Could include, for instance, + * non-unicast frames that contain FCS errors if the MAC_ERROR_CFG + * register is set to pass FCS-errored frames to the host. + * @rx_tagged_frms: Count of received frames containing a VLAN tag. + * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN + * + 18 bytes (+ 22 bytes if VLAN-tagged). + * @rx_usized_frms: Count of received frames of length (including FCS, but not + * framing bits) less than 64 octets, that are otherwise well-formed. + * In other words, counts runts. + * @rx_osized_frms: Count of received frames of length (including FCS, but not + * framing bits) more than 1518 octets, that are otherwise + * well-formed. + * @rx_frag_frms: Count of received frames of length (including FCS, but not + * framing bits) less than 64 octets that had bad FCS. + * In other words, counts fragments. + * @rx_jabber_frms: Count of received frames of length (including FCS, but not + * framing bits) more than 1518 octets that had bad FCS. In other + * words, counts jabbers. + * @rx_ttl_64_frms: Count of total received MAC frames with length (including + * FCS, but not framing bits) of exactly 64 octets. Includes frames + * received with frame-too-long, FCS, or length errors. + * @rx_ttl_65_127_frms: Count of total received MAC frames + * with length (including + * FCS, but not framing bits) of between 65 and 127 octets inclusive. + * Includes frames received with frame-too-long, FCS, + * or length errors. + * @rx_ttl_128_255_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) + * of between 128 and 255 octets + * inclusive. Includes frames received with frame-too-long, FCS, + * or length errors. + * @rx_ttl_256_511_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) + * of between 256 and 511 octets + * inclusive. Includes frames received with frame-too-long, FCS, or + * length errors. + * @rx_ttl_512_1023_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 512 and 1023 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 1024 and 1518 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 1519 and 4095 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 4096 and 8191 + * octets inclusive. Includes frames received with frame-too-long, + * FCS, or length errors. + * @rx_ttl_8192_max_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) of between 8192 and + * RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received + * with frame-too-long, FCS, or length errors. + * @rx_ttl_gt_max_frms: Count of total received MAC frames with length + * (including FCS, but not framing bits) exceeding RX_MAX_PYLD_LEN+18 + * (+22 bytes if VLAN-tagged) octets inclusive. Includes frames + * received with frame-too-long, FCS, or length errors. + * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams. + * @rx_accepted_ip: Count of received IP datagrams that + * are passed to the system. + * @rx_ip_octets: Count of number of octets in received IP datagrams. + * Includes errored IP datagrams. + * @rx_err_ip: Count of received IP datagrams containing errors. For example, + * bad IP checksum. + * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages. + * @rx_tcp: Count of received TCP segments. Includes errored TCP segments. + * Note: This stat contains a count of all received TCP segments, + * regardless of whether or not they pertain to an established + * connection. + * @rx_udp: Count of received UDP datagrams. + * @rx_err_tcp: Count of received TCP segments containing errors. For example, + * bad TCP checksum. + * @rx_lost_frms: Count of received frames that could not be passed to the host. + * See RX_QUEUE_FULL_DISCARD and RX_RED_DISCARD + * for a list of reasons. + * @rx_lost_ip: Count of received IP datagrams that could not be passed to + * the host. See RX_LOST_FRMS for a list of reasons. + * @rx_lost_ip_offload: For frames belonging to offloaded sessions only, a count + * of received IP datagrams that could not be passed to the host. + * See RX_LOST_FRMS for a list of reasons. + * @rx_various_discard: Count of received frames that are discarded because + * the target receive queue is full. + * @rx_sleep_discard: Count of received frames that are discarded because the + * target VPATH is asleep (a Wake-on-LAN magic packet can be used + * to awaken the VPATH). + * @rx_red_discard: Count of received frames that are discarded because of RED + * (Random Early Discard). + * @rx_queue_full_discard: Count of received frames that are discarded because + * the target receive queue is full. + * @rx_mpa_ok_frms: Count of received frames that pass the MPA checks. + * + * XMAC Vpath RX Statistics. + */ +struct vxge_hw_xmac_vpath_rx_stats { + u64 rx_ttl_eth_frms; + u64 rx_vld_frms; + u64 rx_offload_frms; + u64 rx_ttl_eth_octets; + u64 rx_data_octets; + u64 rx_offload_octets; + u64 rx_vld_mcast_frms; + u64 rx_vld_bcast_frms; + u64 rx_accepted_ucast_frms; + u64 rx_accepted_nucast_frms; + u64 rx_tagged_frms; + u64 rx_long_frms; + u64 rx_usized_frms; + u64 rx_osized_frms; + u64 rx_frag_frms; + u64 rx_jabber_frms; + u64 rx_ttl_64_frms; + u64 rx_ttl_65_127_frms; + u64 rx_ttl_128_255_frms; + u64 rx_ttl_256_511_frms; + u64 rx_ttl_512_1023_frms; + u64 rx_ttl_1024_1518_frms; + u64 rx_ttl_1519_4095_frms; + u64 rx_ttl_4096_8191_frms; + u64 rx_ttl_8192_max_frms; + u64 rx_ttl_gt_max_frms; + u64 rx_ip; + u64 rx_accepted_ip; + u64 rx_ip_octets; + u64 rx_err_ip; + u64 rx_icmp; + u64 rx_tcp; + u64 rx_udp; + u64 rx_err_tcp; + u64 rx_lost_frms; + u64 rx_lost_ip; + u64 rx_lost_ip_offload; + u16 rx_various_discard; + u16 rx_sleep_discard; + u16 rx_red_discard; + u16 rx_queue_full_discard; + u64 rx_mpa_ok_frms; +} __packed; + +/** + * struct vxge_hw_xmac_stats - XMAC Statistics + * + * @aggr_stats: Statistics on aggregate port(port 0, port 1) + * @port_stats: Staticstics on ports(wire 0, wire 1, lag) + * @vpath_tx_stats: Per vpath XMAC TX stats + * @vpath_rx_stats: Per vpath XMAC RX stats + * + * XMAC Statistics. + */ +struct vxge_hw_xmac_stats { + struct vxge_hw_xmac_aggr_stats + aggr_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID]; + struct vxge_hw_xmac_port_stats + port_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID+1]; + struct vxge_hw_xmac_vpath_tx_stats + vpath_tx_stats[VXGE_HW_MAX_VIRTUAL_PATHS]; + struct vxge_hw_xmac_vpath_rx_stats + vpath_rx_stats[VXGE_HW_MAX_VIRTUAL_PATHS]; +}; + +/** + * struct vxge_hw_vpath_stats_hw_info - Titan vpath hardware statistics. + * @ini_num_mwr_sent: The number of PCI memory writes initiated by the PIC block + * for the given VPATH + * @ini_num_mrd_sent: The number of PCI memory reads initiated by the PIC block + * @ini_num_cpl_rcvd: The number of PCI read completions received by the + * PIC block + * @ini_num_mwr_byte_sent: The number of PCI memory write bytes sent by the PIC + * block to the host + * @ini_num_cpl_byte_rcvd: The number of PCI read completion bytes received by + * the PIC block + * @wrcrdtarb_xoff: TBD + * @rdcrdtarb_xoff: TBD + * @vpath_genstats_count0: TBD + * @vpath_genstats_count1: TBD + * @vpath_genstats_count2: TBD + * @vpath_genstats_count3: TBD + * @vpath_genstats_count4: TBD + * @vpath_gennstats_count5: TBD + * @tx_stats: Transmit stats + * @rx_stats: Receive stats + * @prog_event_vnum1: Programmable statistic. Increments when internal logic + * detects a certain event. See register + * XMAC_STATS_CFG.EVENT_VNUM1_CFG for more information. + * @prog_event_vnum0: Programmable statistic. Increments when internal logic + * detects a certain event. See register + * XMAC_STATS_CFG.EVENT_VNUM0_CFG for more information. + * @prog_event_vnum3: Programmable statistic. Increments when internal logic + * detects a certain event. See register + * XMAC_STATS_CFG.EVENT_VNUM3_CFG for more information. + * @prog_event_vnum2: Programmable statistic. Increments when internal logic + * detects a certain event. See register + * XMAC_STATS_CFG.EVENT_VNUM2_CFG for more information. + * @rx_multi_cast_frame_discard: TBD + * @rx_frm_transferred: TBD + * @rxd_returned: TBD + * @rx_mpa_len_fail_frms: Count of received frames + * that fail the MPA length check + * @rx_mpa_mrk_fail_frms: Count of received frames + * that fail the MPA marker check + * @rx_mpa_crc_fail_frms: Count of received frames that fail the MPA CRC check + * @rx_permitted_frms: Count of frames that pass through the FAU and on to the + * frame buffer (and subsequently to the host). + * @rx_vp_reset_discarded_frms: Count of receive frames that are discarded + * because the VPATH is in reset + * @rx_wol_frms: Count of received "magic packet" frames. Stat increments + * whenever the received frame matches the VPATH's Wake-on-LAN + * signature(s) CRC. + * @tx_vp_reset_discarded_frms: Count of transmit frames that are discarded + * because the VPATH is in reset. Includes frames that are discarded + * because the current VPIN does not match that VPIN of the frame + * + * Titan vpath hardware statistics. + */ +struct vxge_hw_vpath_stats_hw_info { +/*0x000*/ u32 ini_num_mwr_sent; +/*0x004*/ u32 unused1; +/*0x008*/ u32 ini_num_mrd_sent; +/*0x00c*/ u32 unused2; +/*0x010*/ u32 ini_num_cpl_rcvd; +/*0x014*/ u32 unused3; +/*0x018*/ u64 ini_num_mwr_byte_sent; +/*0x020*/ u64 ini_num_cpl_byte_rcvd; +/*0x028*/ u32 wrcrdtarb_xoff; +/*0x02c*/ u32 unused4; +/*0x030*/ u32 rdcrdtarb_xoff; +/*0x034*/ u32 unused5; +/*0x038*/ u32 vpath_genstats_count0; +/*0x03c*/ u32 vpath_genstats_count1; +/*0x040*/ u32 vpath_genstats_count2; +/*0x044*/ u32 vpath_genstats_count3; +/*0x048*/ u32 vpath_genstats_count4; +/*0x04c*/ u32 unused6; +/*0x050*/ u32 vpath_genstats_count5; +/*0x054*/ u32 unused7; +/*0x058*/ struct vxge_hw_xmac_vpath_tx_stats tx_stats; +/*0x0e8*/ struct vxge_hw_xmac_vpath_rx_stats rx_stats; +/*0x220*/ u64 unused9; +/*0x228*/ u32 prog_event_vnum1; +/*0x22c*/ u32 prog_event_vnum0; +/*0x230*/ u32 prog_event_vnum3; +/*0x234*/ u32 prog_event_vnum2; +/*0x238*/ u16 rx_multi_cast_frame_discard; +/*0x23a*/ u8 unused10[6]; +/*0x240*/ u32 rx_frm_transferred; +/*0x244*/ u32 unused11; +/*0x248*/ u16 rxd_returned; +/*0x24a*/ u8 unused12[6]; +/*0x252*/ u16 rx_mpa_len_fail_frms; +/*0x254*/ u16 rx_mpa_mrk_fail_frms; +/*0x256*/ u16 rx_mpa_crc_fail_frms; +/*0x258*/ u16 rx_permitted_frms; +/*0x25c*/ u64 rx_vp_reset_discarded_frms; +/*0x25e*/ u64 rx_wol_frms; +/*0x260*/ u64 tx_vp_reset_discarded_frms; +} __packed; + + +/** + * struct vxge_hw_device_stats_mrpcim_info - Titan mrpcim hardware statistics. + * @pic.ini_rd_drop 0x0000 4 Number of DMA reads initiated + * by the adapter that were discarded because the VPATH is out of service + * @pic.ini_wr_drop 0x0004 4 Number of DMA writes initiated by the + * adapter that were discared because the VPATH is out of service + * @pic.wrcrdtarb_ph_crdt_depleted[vplane0] 0x0008 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane1] 0x0010 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane2] 0x0018 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane3] 0x0020 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane4] 0x0028 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane5] 0x0030 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane6] 0x0038 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane7] 0x0040 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane8] 0x0048 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane9] 0x0050 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane10] 0x0058 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane11] 0x0060 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane12] 0x0068 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane13] 0x0070 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane14] 0x0078 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane15] 0x0080 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_ph_crdt_depleted[vplane16] 0x0088 4 Number of times + * the posted header credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane0] 0x0090 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane1] 0x0098 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane2] 0x00a0 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane3] 0x00a8 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane4] 0x00b0 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane5] 0x00b8 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane6] 0x00c0 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane7] 0x00c8 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane8] 0x00d0 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane9] 0x00d8 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane10] 0x00e0 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane11] 0x00e8 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane12] 0x00f0 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane13] 0x00f8 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane14] 0x0100 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane15] 0x0108 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.wrcrdtarb_pd_crdt_depleted[vplane16] 0x0110 4 Number of times + * the posted data credits for upstream PCI writes were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane0] 0x0118 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane1] 0x0120 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane2] 0x0128 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane3] 0x0130 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane4] 0x0138 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane5] 0x0140 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane6] 0x0148 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane7] 0x0150 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane8] 0x0158 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane9] 0x0160 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane10] 0x0168 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane11] 0x0170 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane12] 0x0178 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane13] 0x0180 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane14] 0x0188 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane15] 0x0190 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.rdcrdtarb_nph_crdt_depleted[vplane16] 0x0198 4 Number of times + * the non-posted header credits for upstream PCI reads were depleted + * @pic.ini_rd_vpin_drop 0x01a0 4 Number of DMA reads initiated by + * the adapter that were discarded because the VPATH instance number does + * not match + * @pic.ini_wr_vpin_drop 0x01a4 4 Number of DMA writes initiated + * by the adapter that were discarded because the VPATH instance number + * does not match + * @pic.genstats_count0 0x01a8 4 Configurable statistic #1. Refer + * to the GENSTATS0_CFG for information on configuring this statistic + * @pic.genstats_count1 0x01ac 4 Configurable statistic #2. Refer + * to the GENSTATS1_CFG for information on configuring this statistic + * @pic.genstats_count2 0x01b0 4 Configurable statistic #3. Refer + * to the GENSTATS2_CFG for information on configuring this statistic + * @pic.genstats_count3 0x01b4 4 Configurable statistic #4. Refer + * to the GENSTATS3_CFG for information on configuring this statistic + * @pic.genstats_count4 0x01b8 4 Configurable statistic #5. Refer + * to the GENSTATS4_CFG for information on configuring this statistic + * @pic.genstats_count5 0x01c0 4 Configurable statistic #6. Refer + * to the GENSTATS5_CFG for information on configuring this statistic + * @pci.rstdrop_cpl 0x01c8 4 + * @pci.rstdrop_msg 0x01cc 4 + * @pci.rstdrop_client1 0x01d0 4 + * @pci.rstdrop_client0 0x01d4 4 + * @pci.rstdrop_client2 0x01d8 4 + * @pci.depl_cplh[vplane0] 0x01e2 2 Number of times completion + * header credits were depleted + * @pci.depl_nph[vplane0] 0x01e4 2 Number of times non posted + * header credits were depleted + * @pci.depl_ph[vplane0] 0x01e6 2 Number of times the posted + * header credits were depleted + * @pci.depl_cplh[vplane1] 0x01ea 2 + * @pci.depl_nph[vplane1] 0x01ec 2 + * @pci.depl_ph[vplane1] 0x01ee 2 + * @pci.depl_cplh[vplane2] 0x01f2 2 + * @pci.depl_nph[vplane2] 0x01f4 2 + * @pci.depl_ph[vplane2] 0x01f6 2 + * @pci.depl_cplh[vplane3] 0x01fa 2 + * @pci.depl_nph[vplane3] 0x01fc 2 + * @pci.depl_ph[vplane3] 0x01fe 2 + * @pci.depl_cplh[vplane4] 0x0202 2 + * @pci.depl_nph[vplane4] 0x0204 2 + * @pci.depl_ph[vplane4] 0x0206 2 + * @pci.depl_cplh[vplane5] 0x020a 2 + * @pci.depl_nph[vplane5] 0x020c 2 + * @pci.depl_ph[vplane5] 0x020e 2 + * @pci.depl_cplh[vplane6] 0x0212 2 + * @pci.depl_nph[vplane6] 0x0214 2 + * @pci.depl_ph[vplane6] 0x0216 2 + * @pci.depl_cplh[vplane7] 0x021a 2 + * @pci.depl_nph[vplane7] 0x021c 2 + * @pci.depl_ph[vplane7] 0x021e 2 + * @pci.depl_cplh[vplane8] 0x0222 2 + * @pci.depl_nph[vplane8] 0x0224 2 + * @pci.depl_ph[vplane8] 0x0226 2 + * @pci.depl_cplh[vplane9] 0x022a 2 + * @pci.depl_nph[vplane9] 0x022c 2 + * @pci.depl_ph[vplane9] 0x022e 2 + * @pci.depl_cplh[vplane10] 0x0232 2 + * @pci.depl_nph[vplane10] 0x0234 2 + * @pci.depl_ph[vplane10] 0x0236 2 + * @pci.depl_cplh[vplane11] 0x023a 2 + * @pci.depl_nph[vplane11] 0x023c 2 + * @pci.depl_ph[vplane11] 0x023e 2 + * @pci.depl_cplh[vplane12] 0x0242 2 + * @pci.depl_nph[vplane12] 0x0244 2 + * @pci.depl_ph[vplane12] 0x0246 2 + * @pci.depl_cplh[vplane13] 0x024a 2 + * @pci.depl_nph[vplane13] 0x024c 2 + * @pci.depl_ph[vplane13] 0x024e 2 + * @pci.depl_cplh[vplane14] 0x0252 2 + * @pci.depl_nph[vplane14] 0x0254 2 + * @pci.depl_ph[vplane14] 0x0256 2 + * @pci.depl_cplh[vplane15] 0x025a 2 + * @pci.depl_nph[vplane15] 0x025c 2 + * @pci.depl_ph[vplane15] 0x025e 2 + * @pci.depl_cplh[vplane16] 0x0262 2 + * @pci.depl_nph[vplane16] 0x0264 2 + * @pci.depl_ph[vplane16] 0x0266 2 + * @pci.depl_cpld[vplane0] 0x026a 2 Number of times completion data + * credits were depleted + * @pci.depl_npd[vplane0] 0x026c 2 Number of times non posted data + * credits were depleted + * @pci.depl_pd[vplane0] 0x026e 2 Number of times the posted data + * credits were depleted + * @pci.depl_cpld[vplane1] 0x0272 2 + * @pci.depl_npd[vplane1] 0x0274 2 + * @pci.depl_pd[vplane1] 0x0276 2 + * @pci.depl_cpld[vplane2] 0x027a 2 + * @pci.depl_npd[vplane2] 0x027c 2 + * @pci.depl_pd[vplane2] 0x027e 2 + * @pci.depl_cpld[vplane3] 0x0282 2 + * @pci.depl_npd[vplane3] 0x0284 2 + * @pci.depl_pd[vplane3] 0x0286 2 + * @pci.depl_cpld[vplane4] 0x028a 2 + * @pci.depl_npd[vplane4] 0x028c 2 + * @pci.depl_pd[vplane4] 0x028e 2 + * @pci.depl_cpld[vplane5] 0x0292 2 + * @pci.depl_npd[vplane5] 0x0294 2 + * @pci.depl_pd[vplane5] 0x0296 2 + * @pci.depl_cpld[vplane6] 0x029a 2 + * @pci.depl_npd[vplane6] 0x029c 2 + * @pci.depl_pd[vplane6] 0x029e 2 + * @pci.depl_cpld[vplane7] 0x02a2 2 + * @pci.depl_npd[vplane7] 0x02a4 2 + * @pci.depl_pd[vplane7] 0x02a6 2 + * @pci.depl_cpld[vplane8] 0x02aa 2 + * @pci.depl_npd[vplane8] 0x02ac 2 + * @pci.depl_pd[vplane8] 0x02ae 2 + * @pci.depl_cpld[vplane9] 0x02b2 2 + * @pci.depl_npd[vplane9] 0x02b4 2 + * @pci.depl_pd[vplane9] 0x02b6 2 + * @pci.depl_cpld[vplane10] 0x02ba 2 + * @pci.depl_npd[vplane10] 0x02bc 2 + * @pci.depl_pd[vplane10] 0x02be 2 + * @pci.depl_cpld[vplane11] 0x02c2 2 + * @pci.depl_npd[vplane11] 0x02c4 2 + * @pci.depl_pd[vplane11] 0x02c6 2 + * @pci.depl_cpld[vplane12] 0x02ca 2 + * @pci.depl_npd[vplane12] 0x02cc 2 + * @pci.depl_pd[vplane12] 0x02ce 2 + * @pci.depl_cpld[vplane13] 0x02d2 2 + * @pci.depl_npd[vplane13] 0x02d4 2 + * @pci.depl_pd[vplane13] 0x02d6 2 + * @pci.depl_cpld[vplane14] 0x02da 2 + * @pci.depl_npd[vplane14] 0x02dc 2 + * @pci.depl_pd[vplane14] 0x02de 2 + * @pci.depl_cpld[vplane15] 0x02e2 2 + * @pci.depl_npd[vplane15] 0x02e4 2 + * @pci.depl_pd[vplane15] 0x02e6 2 + * @pci.depl_cpld[vplane16] 0x02ea 2 + * @pci.depl_npd[vplane16] 0x02ec 2 + * @pci.depl_pd[vplane16] 0x02ee 2 + * @xgmac_port[3]; + * @xgmac_aggr[2]; + * @xgmac.global_prog_event_gnum0 0x0ae0 8 Programmable statistic. + * Increments when internal logic detects a certain event. See register + * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM0_CFG for more information. + * @xgmac.global_prog_event_gnum1 0x0ae8 8 Programmable statistic. + * Increments when internal logic detects a certain event. See register + * XMAC_STATS_GLOBAL_CFG.EVENT_GNUM1_CFG for more information. + * @xgmac.orp_lro_events 0x0af8 8 + * @xgmac.orp_bs_events 0x0b00 8 + * @xgmac.orp_iwarp_events 0x0b08 8 + * @xgmac.tx_permitted_frms 0x0b14 4 + * @xgmac.port2_tx_any_frms 0x0b1d 1 + * @xgmac.port1_tx_any_frms 0x0b1e 1 + * @xgmac.port0_tx_any_frms 0x0b1f 1 + * @xgmac.port2_rx_any_frms 0x0b25 1 + * @xgmac.port1_rx_any_frms 0x0b26 1 + * @xgmac.port0_rx_any_frms 0x0b27 1 + * + * Titan mrpcim hardware statistics. + */ +struct vxge_hw_device_stats_mrpcim_info { +/*0x0000*/ u32 pic_ini_rd_drop; +/*0x0004*/ u32 pic_ini_wr_drop; +/*0x0008*/ struct { + /*0x0000*/ u32 pic_wrcrdtarb_ph_crdt_depleted; + /*0x0004*/ u32 unused1; + } pic_wrcrdtarb_ph_crdt_depleted_vplane[17]; +/*0x0090*/ struct { + /*0x0000*/ u32 pic_wrcrdtarb_pd_crdt_depleted; + /*0x0004*/ u32 unused2; + } pic_wrcrdtarb_pd_crdt_depleted_vplane[17]; +/*0x0118*/ struct { + /*0x0000*/ u32 pic_rdcrdtarb_nph_crdt_depleted; + /*0x0004*/ u32 unused3; + } pic_rdcrdtarb_nph_crdt_depleted_vplane[17]; +/*0x01a0*/ u32 pic_ini_rd_vpin_drop; +/*0x01a4*/ u32 pic_ini_wr_vpin_drop; +/*0x01a8*/ u32 pic_genstats_count0; +/*0x01ac*/ u32 pic_genstats_count1; +/*0x01b0*/ u32 pic_genstats_count2; +/*0x01b4*/ u32 pic_genstats_count3; +/*0x01b8*/ u32 pic_genstats_count4; +/*0x01bc*/ u32 unused4; +/*0x01c0*/ u32 pic_genstats_count5; +/*0x01c4*/ u32 unused5; +/*0x01c8*/ u32 pci_rstdrop_cpl; +/*0x01cc*/ u32 pci_rstdrop_msg; +/*0x01d0*/ u32 pci_rstdrop_client1; +/*0x01d4*/ u32 pci_rstdrop_client0; +/*0x01d8*/ u32 pci_rstdrop_client2; +/*0x01dc*/ u32 unused6; +/*0x01e0*/ struct { + /*0x0000*/ u16 unused7; + /*0x0002*/ u16 pci_depl_cplh; + /*0x0004*/ u16 pci_depl_nph; + /*0x0006*/ u16 pci_depl_ph; + } pci_depl_h_vplane[17]; +/*0x0268*/ struct { + /*0x0000*/ u16 unused8; + /*0x0002*/ u16 pci_depl_cpld; + /*0x0004*/ u16 pci_depl_npd; + /*0x0006*/ u16 pci_depl_pd; + } pci_depl_d_vplane[17]; +/*0x02f0*/ struct vxge_hw_xmac_port_stats xgmac_port[3]; +/*0x0a10*/ struct vxge_hw_xmac_aggr_stats xgmac_aggr[2]; +/*0x0ae0*/ u64 xgmac_global_prog_event_gnum0; +/*0x0ae8*/ u64 xgmac_global_prog_event_gnum1; +/*0x0af0*/ u64 unused7; +/*0x0af8*/ u64 unused8; +/*0x0b00*/ u64 unused9; +/*0x0b08*/ u64 unused10; +/*0x0b10*/ u32 unused11; +/*0x0b14*/ u32 xgmac_tx_permitted_frms; +/*0x0b18*/ u32 unused12; +/*0x0b1c*/ u8 unused13; +/*0x0b1d*/ u8 xgmac_port2_tx_any_frms; +/*0x0b1e*/ u8 xgmac_port1_tx_any_frms; +/*0x0b1f*/ u8 xgmac_port0_tx_any_frms; +/*0x0b20*/ u32 unused14; +/*0x0b24*/ u8 unused15; +/*0x0b25*/ u8 xgmac_port2_rx_any_frms; +/*0x0b26*/ u8 xgmac_port1_rx_any_frms; +/*0x0b27*/ u8 xgmac_port0_rx_any_frms; +} __packed; + +/** + * struct vxge_hw_device_stats_hw_info - Titan hardware statistics. + * @vpath_info: VPath statistics + * @vpath_info_sav: Vpath statistics saved + * + * Titan hardware statistics. + */ +struct vxge_hw_device_stats_hw_info { + struct vxge_hw_vpath_stats_hw_info + *vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS]; + struct vxge_hw_vpath_stats_hw_info + vpath_info_sav[VXGE_HW_MAX_VIRTUAL_PATHS]; +}; + +/** + * struct vxge_hw_vpath_stats_sw_common_info - HW common + * statistics for queues. + * @full_cnt: Number of times the queue was full + * @usage_cnt: usage count. + * @usage_max: Maximum usage + * @reserve_free_swaps_cnt: Reserve/free swap counter. Internal usage. + * @total_compl_cnt: Total descriptor completion count. + * + * Hw queue counters + * See also: struct vxge_hw_vpath_stats_sw_fifo_info{}, + * struct vxge_hw_vpath_stats_sw_ring_info{}, + */ +struct vxge_hw_vpath_stats_sw_common_info { + u32 full_cnt; + u32 usage_cnt; + u32 usage_max; + u32 reserve_free_swaps_cnt; + u32 total_compl_cnt; +}; + +/** + * struct vxge_hw_vpath_stats_sw_fifo_info - HW fifo statistics + * @common_stats: Common counters for all queues + * @total_posts: Total number of postings on the queue. + * @total_buffers: Total number of buffers posted. + * @txd_t_code_err_cnt: Array of transmit transfer codes. The position + * (index) in this array reflects the transfer code type, for instance + * 0xA - "loss of link". + * Value txd_t_code_err_cnt[i] reflects the + * number of times the corresponding transfer code was encountered. + * + * HW fifo counters + * See also: struct vxge_hw_vpath_stats_sw_common_info{}, + * struct vxge_hw_vpath_stats_sw_ring_info{}, + */ +struct vxge_hw_vpath_stats_sw_fifo_info { + struct vxge_hw_vpath_stats_sw_common_info common_stats; + u32 total_posts; + u32 total_buffers; + u32 txd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE]; +}; + +/** + * struct vxge_hw_vpath_stats_sw_ring_info - HW ring statistics + * @common_stats: Common counters for all queues + * @rxd_t_code_err_cnt: Array of receive transfer codes. The position + * (index) in this array reflects the transfer code type, + * for instance + * 0x7 - for "invalid receive buffer size", or 0x8 - for ECC. + * Value rxd_t_code_err_cnt[i] reflects the + * number of times the corresponding transfer code was encountered. + * + * HW ring counters + * See also: struct vxge_hw_vpath_stats_sw_common_info{}, + * struct vxge_hw_vpath_stats_sw_fifo_info{}, + */ +struct vxge_hw_vpath_stats_sw_ring_info { + struct vxge_hw_vpath_stats_sw_common_info common_stats; + u32 rxd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE]; + +}; + +/** + * struct vxge_hw_vpath_stats_sw_err - HW vpath error statistics + * @unknown_alarms: + * @network_sustained_fault: + * @network_sustained_ok: + * @kdfcctl_fifo0_overwrite: + * @kdfcctl_fifo0_poison: + * @kdfcctl_fifo0_dma_error: + * @dblgen_fifo0_overflow: + * @statsb_pif_chain_error: + * @statsb_drop_timeout: + * @target_illegal_access: + * @ini_serr_det: + * @prc_ring_bumps: + * @prc_rxdcm_sc_err: + * @prc_rxdcm_sc_abort: + * @prc_quanta_size_err: + * + * HW vpath error statistics + */ +struct vxge_hw_vpath_stats_sw_err { + u32 unknown_alarms; + u32 network_sustained_fault; + u32 network_sustained_ok; + u32 kdfcctl_fifo0_overwrite; + u32 kdfcctl_fifo0_poison; + u32 kdfcctl_fifo0_dma_error; + u32 dblgen_fifo0_overflow; + u32 statsb_pif_chain_error; + u32 statsb_drop_timeout; + u32 target_illegal_access; + u32 ini_serr_det; + u32 prc_ring_bumps; + u32 prc_rxdcm_sc_err; + u32 prc_rxdcm_sc_abort; + u32 prc_quanta_size_err; +}; + +/** + * struct vxge_hw_vpath_stats_sw_info - HW vpath sw statistics + * @soft_reset_cnt: Number of times soft reset is done on this vpath. + * @error_stats: error counters for the vpath + * @ring_stats: counters for ring belonging to the vpath + * @fifo_stats: counters for fifo belonging to the vpath + * + * HW vpath sw statistics + * See also: struct vxge_hw_device_info{} }. + */ +struct vxge_hw_vpath_stats_sw_info { + u32 soft_reset_cnt; + struct vxge_hw_vpath_stats_sw_err error_stats; + struct vxge_hw_vpath_stats_sw_ring_info ring_stats; + struct vxge_hw_vpath_stats_sw_fifo_info fifo_stats; +}; + +/** + * struct vxge_hw_device_stats_sw_info - HW own per-device statistics. + * + * @not_traffic_intr_cnt: Number of times the host was interrupted + * without new completions. + * "Non-traffic interrupt counter". + * @traffic_intr_cnt: Number of traffic interrupts for the device. + * @total_intr_cnt: Total number of traffic interrupts for the device. + * @total_intr_cnt == @traffic_intr_cnt + + * @not_traffic_intr_cnt + * @soft_reset_cnt: Number of times soft reset is done on this device. + * @vpath_info: please see struct vxge_hw_vpath_stats_sw_info{} + * HW per-device statistics. + */ +struct vxge_hw_device_stats_sw_info { + u32 not_traffic_intr_cnt; + u32 traffic_intr_cnt; + u32 total_intr_cnt; + u32 soft_reset_cnt; + struct vxge_hw_vpath_stats_sw_info + vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS]; +}; + +/** + * struct vxge_hw_device_stats_sw_err - HW device error statistics. + * @vpath_alarms: Number of vpath alarms + * + * HW Device error stats + */ +struct vxge_hw_device_stats_sw_err { + u32 vpath_alarms; +}; + +/** + * struct vxge_hw_device_stats - Contains HW per-device statistics, + * including hw. + * @devh: HW device handle. + * @dma_addr: DMA address of the %hw_info. Given to device to fill-in the stats. + * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory + * space. + * @hw_info_dma_acch: One more DMA handle used subsequently to free the + * DMA object. Note that this and the previous handle have + * physical meaning for Solaris; on Windows and Linux the + * corresponding value will be simply pointer to PCI device. + * + * @hw_dev_info_stats: Titan statistics maintained by the hardware. + * @sw_dev_info_stats: HW's "soft" device informational statistics, e.g. number + * of completions per interrupt. + * @sw_dev_err_stats: HW's "soft" device error statistics. + * + * Structure-container of HW per-device statistics. Note that per-channel + * statistics are kept in separate structures under HW's fifo and ring + * channels. + */ +struct vxge_hw_device_stats { + /* handles */ + struct __vxge_hw_device *devh; + + /* HW device hardware statistics */ + struct vxge_hw_device_stats_hw_info hw_dev_info_stats; + + /* HW device "soft" stats */ + struct vxge_hw_device_stats_sw_err sw_dev_err_stats; + struct vxge_hw_device_stats_sw_info sw_dev_info_stats; + +}; + +enum vxge_hw_status vxge_hw_device_hw_stats_enable( + struct __vxge_hw_device *devh); + +enum vxge_hw_status vxge_hw_device_stats_get( + struct __vxge_hw_device *devh, + struct vxge_hw_device_stats_hw_info *hw_stats); + +enum vxge_hw_status vxge_hw_driver_stats_get( + struct __vxge_hw_device *devh, + struct vxge_hw_device_stats_sw_info *sw_stats); + +enum vxge_hw_status vxge_hw_mrpcim_stats_enable(struct __vxge_hw_device *devh); + +enum vxge_hw_status vxge_hw_mrpcim_stats_disable(struct __vxge_hw_device *devh); + +enum vxge_hw_status +vxge_hw_mrpcim_stats_access( + struct __vxge_hw_device *devh, + u32 operation, + u32 location, + u32 offset, + u64 *stat); + +enum vxge_hw_status +vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh, + struct vxge_hw_xmac_stats *xmac_stats); + +/** + * enum enum vxge_hw_mgmt_reg_type - Register types. + * + * @vxge_hw_mgmt_reg_type_legacy: Legacy registers + * @vxge_hw_mgmt_reg_type_toc: TOC Registers + * @vxge_hw_mgmt_reg_type_common: Common Registers + * @vxge_hw_mgmt_reg_type_mrpcim: mrpcim registers + * @vxge_hw_mgmt_reg_type_srpcim: srpcim registers + * @vxge_hw_mgmt_reg_type_vpmgmt: vpath management registers + * @vxge_hw_mgmt_reg_type_vpath: vpath registers + * + * Register type enumaration + */ +enum vxge_hw_mgmt_reg_type { + vxge_hw_mgmt_reg_type_legacy = 0, + vxge_hw_mgmt_reg_type_toc = 1, + vxge_hw_mgmt_reg_type_common = 2, + vxge_hw_mgmt_reg_type_mrpcim = 3, + vxge_hw_mgmt_reg_type_srpcim = 4, + vxge_hw_mgmt_reg_type_vpmgmt = 5, + vxge_hw_mgmt_reg_type_vpath = 6 +}; + +enum vxge_hw_status +vxge_hw_mgmt_reg_read(struct __vxge_hw_device *devh, + enum vxge_hw_mgmt_reg_type type, + u32 index, + u32 offset, + u64 *value); + +enum vxge_hw_status +vxge_hw_mgmt_reg_write(struct __vxge_hw_device *devh, + enum vxge_hw_mgmt_reg_type type, + u32 index, + u32 offset, + u64 value); + +/** + * enum enum vxge_hw_rxd_state - Descriptor (RXD) state. + * @VXGE_HW_RXD_STATE_NONE: Invalid state. + * @VXGE_HW_RXD_STATE_AVAIL: Descriptor is available for reservation. + * @VXGE_HW_RXD_STATE_POSTED: Descriptor is posted for processing by the + * device. + * @VXGE_HW_RXD_STATE_FREED: Descriptor is free and can be reused for + * filling-in and posting later. + * + * Titan/HW descriptor states. + * + */ +enum vxge_hw_rxd_state { + VXGE_HW_RXD_STATE_NONE = 0, + VXGE_HW_RXD_STATE_AVAIL = 1, + VXGE_HW_RXD_STATE_POSTED = 2, + VXGE_HW_RXD_STATE_FREED = 3 +}; + +/** + * struct vxge_hw_ring_rxd_info - Extended information associated with a + * completed ring descriptor. + * @syn_flag: SYN flag + * @is_icmp: Is ICMP + * @fast_path_eligible: Fast Path Eligible flag + * @l3_cksum: in L3 checksum is valid + * @l3_cksum: Result of IP checksum check (by Titan hardware). + * This field containing VXGE_HW_L3_CKSUM_OK would mean that + * the checksum is correct, otherwise - the datagram is + * corrupted. + * @l4_cksum: in L4 checksum is valid + * @l4_cksum: Result of TCP/UDP checksum check (by Titan hardware). + * This field containing VXGE_HW_L4_CKSUM_OK would mean that + * the checksum is correct. Otherwise - the packet is + * corrupted. + * @frame: Zero or more of enum vxge_hw_frame_type flags. + * See enum vxge_hw_frame_type{}. + * @proto: zero or more of enum vxge_hw_frame_proto flags. Reporting bits for + * various higher-layer protocols, including (but note restricted to) + * TCP and UDP. See enum vxge_hw_frame_proto{}. + * @is_vlan: If vlan tag is valid + * @vlan: VLAN tag extracted from the received frame. + * @rth_bucket: RTH bucket + * @rth_it_hit: Set, If RTH hash value calculated by the Titan hardware + * has a matching entry in the Indirection table. + * @rth_spdm_hit: Set, If RTH hash value calculated by the Titan hardware + * has a matching entry in the Socket Pair Direct Match table. + * @rth_hash_type: RTH hash code of the function used to calculate the hash. + * @rth_value: Receive Traffic Hashing(RTH) hash value. Produced by Titan + * hardware if RTH is enabled. + */ +struct vxge_hw_ring_rxd_info { + u32 syn_flag; + u32 is_icmp; + u32 fast_path_eligible; + u32 l3_cksum_valid; + u32 l3_cksum; + u32 l4_cksum_valid; + u32 l4_cksum; + u32 frame; + u32 proto; + u32 is_vlan; + u32 vlan; + u32 rth_bucket; + u32 rth_it_hit; + u32 rth_spdm_hit; + u32 rth_hash_type; + u32 rth_value; +}; +/** + * enum vxge_hw_ring_tcode - Transfer codes returned by adapter + * @VXGE_HW_RING_T_CODE_OK: Transfer ok. + * @VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH: Layer 3 checksum presentation + * configuration mismatch. + * @VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH: Layer 4 checksum presentation + * configuration mismatch. + * @VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH: Layer 3 and Layer 4 checksum + * presentation configuration mismatch. + * @VXGE_HW_RING_T_CODE_L3_PKT_ERR: Layer 3 error unparseable packet, + * such as unknown IPv6 header. + * @VXGE_HW_RING_T_CODE_L2_FRM_ERR: Layer 2 error frame integrity + * error, such as FCS or ECC). + * @VXGE_HW_RING_T_CODE_BUF_SIZE_ERR: Buffer size error the RxD buffer( + * s) were not appropriately sized and data loss occurred. + * @VXGE_HW_RING_T_CODE_INT_ECC_ERR: Internal ECC error RxD corrupted. + * @VXGE_HW_RING_T_CODE_BENIGN_OVFLOW: Benign overflow the contents of + * Segment1 exceeded the capacity of Buffer1 and the remainder + * was placed in Buffer2. Segment2 now starts in Buffer3. + * No data loss or errors occurred. + * @VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF: Buffer size 0 one of the RxDs + * assigned buffers has a size of 0 bytes. + * @VXGE_HW_RING_T_CODE_FRM_DROP: Frame dropped either due to + * VPath Reset or because of a VPIN mismatch. + * @VXGE_HW_RING_T_CODE_UNUSED: Unused + * @VXGE_HW_RING_T_CODE_MULTI_ERR: Multiple errors more than one + * transfer code condition occurred. + * + * Transfer codes returned by adapter. + */ +enum vxge_hw_ring_tcode { + VXGE_HW_RING_T_CODE_OK = 0x0, + VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH = 0x1, + VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH = 0x2, + VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH = 0x3, + VXGE_HW_RING_T_CODE_L3_PKT_ERR = 0x5, + VXGE_HW_RING_T_CODE_L2_FRM_ERR = 0x6, + VXGE_HW_RING_T_CODE_BUF_SIZE_ERR = 0x7, + VXGE_HW_RING_T_CODE_INT_ECC_ERR = 0x8, + VXGE_HW_RING_T_CODE_BENIGN_OVFLOW = 0x9, + VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF = 0xA, + VXGE_HW_RING_T_CODE_FRM_DROP = 0xC, + VXGE_HW_RING_T_CODE_UNUSED = 0xE, + VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF +}; + +enum vxge_hw_status vxge_hw_ring_rxd_reserve( + struct __vxge_hw_ring *ring_handle, + void **rxdh); + +void +vxge_hw_ring_rxd_pre_post( + struct __vxge_hw_ring *ring_handle, + void *rxdh); + +void +vxge_hw_ring_rxd_post_post( + struct __vxge_hw_ring *ring_handle, + void *rxdh); + +void +vxge_hw_ring_rxd_post_post_wmb( + struct __vxge_hw_ring *ring_handle, + void *rxdh); + +void vxge_hw_ring_rxd_post( + struct __vxge_hw_ring *ring_handle, + void *rxdh); + +enum vxge_hw_status vxge_hw_ring_rxd_next_completed( + struct __vxge_hw_ring *ring_handle, + void **rxdh, + u8 *t_code); + +enum vxge_hw_status vxge_hw_ring_handle_tcode( + struct __vxge_hw_ring *ring_handle, + void *rxdh, + u8 t_code); + +void vxge_hw_ring_rxd_free( + struct __vxge_hw_ring *ring_handle, + void *rxdh); + +/** + * enum enum vxge_hw_frame_proto - Higher-layer ethernet protocols. + * @VXGE_HW_FRAME_PROTO_VLAN_TAGGED: VLAN. + * @VXGE_HW_FRAME_PROTO_IPV4: IPv4. + * @VXGE_HW_FRAME_PROTO_IPV6: IPv6. + * @VXGE_HW_FRAME_PROTO_IP_FRAG: IP fragmented. + * @VXGE_HW_FRAME_PROTO_TCP: TCP. + * @VXGE_HW_FRAME_PROTO_UDP: UDP. + * @VXGE_HW_FRAME_PROTO_TCP_OR_UDP: TCP or UDP. + * + * Higher layer ethernet protocols and options. + */ +enum vxge_hw_frame_proto { + VXGE_HW_FRAME_PROTO_VLAN_TAGGED = 0x80, + VXGE_HW_FRAME_PROTO_IPV4 = 0x10, + VXGE_HW_FRAME_PROTO_IPV6 = 0x08, + VXGE_HW_FRAME_PROTO_IP_FRAG = 0x04, + VXGE_HW_FRAME_PROTO_TCP = 0x02, + VXGE_HW_FRAME_PROTO_UDP = 0x01, + VXGE_HW_FRAME_PROTO_TCP_OR_UDP = (VXGE_HW_FRAME_PROTO_TCP | \ + VXGE_HW_FRAME_PROTO_UDP) +}; + +/** + * enum enum vxge_hw_fifo_gather_code - Gather codes used in fifo TxD + * @VXGE_HW_FIFO_GATHER_CODE_FIRST: First TxDL + * @VXGE_HW_FIFO_GATHER_CODE_MIDDLE: Middle TxDL + * @VXGE_HW_FIFO_GATHER_CODE_LAST: Last TxDL + * @VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST: First and Last TxDL. + * + * These gather codes are used to indicate the position of a TxD in a TxD list + */ +enum vxge_hw_fifo_gather_code { + VXGE_HW_FIFO_GATHER_CODE_FIRST = 0x2, + VXGE_HW_FIFO_GATHER_CODE_MIDDLE = 0x0, + VXGE_HW_FIFO_GATHER_CODE_LAST = 0x1, + VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST = 0x3 +}; + +/** + * enum enum vxge_hw_fifo_tcode - tcodes used in fifo + * @VXGE_HW_FIFO_T_CODE_OK: Transfer OK + * @VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT: PCI read transaction (either TxD or + * frame data) returned with corrupt data. + * @VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL:PCI read transaction was returned + * with no data. + * @VXGE_HW_FIFO_T_CODE_INVALID_MSS: The host attempted to send either a + * frame or LSO MSS that was too long (>9800B). + * @VXGE_HW_FIFO_T_CODE_LSO_ERROR: Error detected during TCP/UDP Large Send + * Offload operation, due to improper header template, + * unsupported protocol, etc. + * @VXGE_HW_FIFO_T_CODE_UNUSED: Unused + * @VXGE_HW_FIFO_T_CODE_MULTI_ERROR: Set to 1 by the adapter if multiple + * data buffer transfer errors are encountered (see below). + * Otherwise it is set to 0. + * + * These tcodes are returned in various API for TxD status + */ +enum vxge_hw_fifo_tcode { + VXGE_HW_FIFO_T_CODE_OK = 0x0, + VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT = 0x1, + VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL = 0x2, + VXGE_HW_FIFO_T_CODE_INVALID_MSS = 0x3, + VXGE_HW_FIFO_T_CODE_LSO_ERROR = 0x4, + VXGE_HW_FIFO_T_CODE_UNUSED = 0x7, + VXGE_HW_FIFO_T_CODE_MULTI_ERROR = 0x8 +}; + +enum vxge_hw_status vxge_hw_fifo_txdl_reserve( + struct __vxge_hw_fifo *fifoh, + void **txdlh, + void **txdl_priv); + +void vxge_hw_fifo_txdl_buffer_set( + struct __vxge_hw_fifo *fifo_handle, + void *txdlh, + u32 frag_idx, + dma_addr_t dma_pointer, + u32 size); + +void vxge_hw_fifo_txdl_post( + struct __vxge_hw_fifo *fifo_handle, + void *txdlh); + +u32 vxge_hw_fifo_free_txdl_count_get( + struct __vxge_hw_fifo *fifo_handle); + +enum vxge_hw_status vxge_hw_fifo_txdl_next_completed( + struct __vxge_hw_fifo *fifoh, + void **txdlh, + enum vxge_hw_fifo_tcode *t_code); + +enum vxge_hw_status vxge_hw_fifo_handle_tcode( + struct __vxge_hw_fifo *fifoh, + void *txdlh, + enum vxge_hw_fifo_tcode t_code); + +void vxge_hw_fifo_txdl_free( + struct __vxge_hw_fifo *fifoh, + void *txdlh); + +/* + * Device + */ + +#define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8) +#define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16) + +/* + * struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data. + * @dma_addr: DMA (mapped) address of _this_ descriptor. + * @dma_handle: DMA handle used to map the descriptor onto device. + * @dma_offset: Descriptor's offset in the memory block. HW allocates + * descriptors in memory blocks of %VXGE_HW_BLOCK_SIZE + * bytes. Each memblock is contiguous DMA-able memory. Each + * memblock contains 1 or more 4KB RxD blocks visible to the + * Titan hardware. + * @dma_object: DMA address and handle of the memory block that contains + * the descriptor. This member is used only in the "checked" + * version of the HW (to enforce certain assertions); + * otherwise it gets compiled out. + * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage. + * + * Per-receive decsriptor HW-private data. HW uses the space to keep DMA + * information associated with the descriptor. Note that driver can ask HW + * to allocate additional per-descriptor space for its own (driver-specific) + * purposes. + */ +struct __vxge_hw_ring_rxd_priv { + dma_addr_t dma_addr; + struct pci_dev *dma_handle; + ptrdiff_t dma_offset; +#ifdef VXGE_DEBUG_ASSERT + struct vxge_hw_mempool_dma *dma_object; +#endif +}; + +struct vxge_hw_mempool_cbs { + void (*item_func_alloc)( + struct vxge_hw_mempool *mempoolh, + u32 memblock_index, + struct vxge_hw_mempool_dma *dma_object, + u32 index, + u32 is_last); +}; + +#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \ + ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next) + +enum vxge_hw_status +__vxge_hw_vpath_rts_table_get( + struct __vxge_hw_vpath_handle *vpath_handle, + u32 action, + u32 rts_table, + u32 offset, + u64 *data1, + u64 *data2); + +enum vxge_hw_status +__vxge_hw_vpath_rts_table_set( + struct __vxge_hw_vpath_handle *vpath_handle, + u32 action, + u32 rts_table, + u32 offset, + u64 data1, + u64 data2); + +enum vxge_hw_status +__vxge_hw_vpath_enable( + struct __vxge_hw_device *devh, + u32 vp_id); + +void vxge_hw_device_intr_enable( + struct __vxge_hw_device *devh); + +u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *devh, u32 intr_mode); + +void vxge_hw_device_intr_disable( + struct __vxge_hw_device *devh); + +void vxge_hw_device_mask_all( + struct __vxge_hw_device *devh); + +void vxge_hw_device_unmask_all( + struct __vxge_hw_device *devh); + +enum vxge_hw_status vxge_hw_device_begin_irq( + struct __vxge_hw_device *devh, + u32 skip_alarms, + u64 *reason); + +void vxge_hw_device_clear_tx_rx( + struct __vxge_hw_device *devh); + +/* + * Virtual Paths + */ + +void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring); + +void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo); + +u32 vxge_hw_vpath_id( + struct __vxge_hw_vpath_handle *vpath_handle); + +enum vxge_hw_vpath_mac_addr_add_mode { + VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE = 0, + VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE = 1, + VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE = 2 +}; + +enum vxge_hw_status +vxge_hw_vpath_mac_addr_add( + struct __vxge_hw_vpath_handle *vpath_handle, + u8 *macaddr, + u8 *macaddr_mask, + enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode); + +enum vxge_hw_status +vxge_hw_vpath_mac_addr_get( + struct __vxge_hw_vpath_handle *vpath_handle, + u8 *macaddr, + u8 *macaddr_mask); + +enum vxge_hw_status +vxge_hw_vpath_mac_addr_get_next( + struct __vxge_hw_vpath_handle *vpath_handle, + u8 *macaddr, + u8 *macaddr_mask); + +enum vxge_hw_status +vxge_hw_vpath_mac_addr_delete( + struct __vxge_hw_vpath_handle *vpath_handle, + u8 *macaddr, + u8 *macaddr_mask); + +enum vxge_hw_status +vxge_hw_vpath_vid_add( + struct __vxge_hw_vpath_handle *vpath_handle, + u64 vid); + +enum vxge_hw_status +vxge_hw_vpath_vid_delete( + struct __vxge_hw_vpath_handle *vpath_handle, + u64 vid); + +enum vxge_hw_status +vxge_hw_vpath_etype_add( + struct __vxge_hw_vpath_handle *vpath_handle, + u64 etype); + +enum vxge_hw_status +vxge_hw_vpath_etype_get( + struct __vxge_hw_vpath_handle *vpath_handle, + u64 *etype); + +enum vxge_hw_status +vxge_hw_vpath_etype_get_next( + struct __vxge_hw_vpath_handle *vpath_handle, + u64 *etype); + +enum vxge_hw_status +vxge_hw_vpath_etype_delete( + struct __vxge_hw_vpath_handle *vpath_handle, + u64 etype); + +enum vxge_hw_status vxge_hw_vpath_promisc_enable( + struct __vxge_hw_vpath_handle *vpath_handle); + +enum vxge_hw_status vxge_hw_vpath_promisc_disable( + struct __vxge_hw_vpath_handle *vpath_handle); + +enum vxge_hw_status vxge_hw_vpath_bcast_enable( + struct __vxge_hw_vpath_handle *vpath_handle); + +enum vxge_hw_status vxge_hw_vpath_mcast_enable( + struct __vxge_hw_vpath_handle *vpath_handle); + +enum vxge_hw_status vxge_hw_vpath_mcast_disable( + struct __vxge_hw_vpath_handle *vpath_handle); + +enum vxge_hw_status vxge_hw_vpath_poll_rx( + struct __vxge_hw_ring *ringh); + +enum vxge_hw_status vxge_hw_vpath_poll_tx( + struct __vxge_hw_fifo *fifoh, + struct sk_buff ***skb_ptr, int nr_skb, int *more); + +enum vxge_hw_status vxge_hw_vpath_alarm_process( + struct __vxge_hw_vpath_handle *vpath_handle, + u32 skip_alarms); + +void +vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle, + int *tim_msix_id, int alarm_msix_id); + +void +vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle, + int msix_id); + +void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id); + +void vxge_hw_device_flush_io(struct __vxge_hw_device *devh); + +void +vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle, + int msix_id); + +enum vxge_hw_status vxge_hw_vpath_intr_enable( + struct __vxge_hw_vpath_handle *vpath_handle); + +enum vxge_hw_status vxge_hw_vpath_intr_disable( + struct __vxge_hw_vpath_handle *vpath_handle); + +void vxge_hw_vpath_inta_mask_tx_rx( + struct __vxge_hw_vpath_handle *vpath_handle); + +void vxge_hw_vpath_inta_unmask_tx_rx( + struct __vxge_hw_vpath_handle *vpath_handle); + +void +vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id); + +void +vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id); + +void +vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id); + +void +vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, + void **dtrh); + +void +vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel); + +void +vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh); + +int +vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel); + +void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo); + +void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring); + +#endif diff --git a/drivers/net/ethernet/neterion/vxge/vxge-version.h b/drivers/net/ethernet/neterion/vxge/vxge-version.h new file mode 100644 index 000000000..b9efa28ba --- /dev/null +++ b/drivers/net/ethernet/neterion/vxge/vxge-version.h @@ -0,0 +1,49 @@ +/****************************************************************************** + * This software may be used and distributed according to the terms of + * the GNU General Public License (GPL), incorporated herein by reference. + * Drivers based on or derived from this code fall under the GPL and must + * retain the authorship, copyright and license notice. This file is not + * a complete program and may only be used when the entire operating + * system is licensed under the GPL. + * See the file COPYING in this distribution for more information. + * + * vxge-version.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O + * Virtualized Server Adapter. + * Copyright(c) 2002-2010 Exar Corp. + ******************************************************************************/ +#ifndef VXGE_VERSION_H +#define VXGE_VERSION_H + +#define VXGE_VERSION_MAJOR "2" +#define VXGE_VERSION_MINOR "5" +#define VXGE_VERSION_FIX "3" +#define VXGE_VERSION_BUILD "22640" +#define VXGE_VERSION_FOR "k" + +#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld)) + +#define VXGE_DEAD_FW_VER_MAJOR 1 +#define VXGE_DEAD_FW_VER_MINOR 4 +#define VXGE_DEAD_FW_VER_BUILD 4 + +#define VXGE_FW_DEAD_VER VXGE_FW_VER(VXGE_DEAD_FW_VER_MAJOR, \ + VXGE_DEAD_FW_VER_MINOR, \ + VXGE_DEAD_FW_VER_BUILD) + +#define VXGE_EPROM_FW_VER_MAJOR 1 +#define VXGE_EPROM_FW_VER_MINOR 6 +#define VXGE_EPROM_FW_VER_BUILD 1 + +#define VXGE_EPROM_FW_VER VXGE_FW_VER(VXGE_EPROM_FW_VER_MAJOR, \ + VXGE_EPROM_FW_VER_MINOR, \ + VXGE_EPROM_FW_VER_BUILD) + +#define VXGE_CERT_FW_VER_MAJOR 1 +#define VXGE_CERT_FW_VER_MINOR 8 +#define VXGE_CERT_FW_VER_BUILD 1 + +#define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \ + VXGE_CERT_FW_VER_MINOR, \ + VXGE_CERT_FW_VER_BUILD) + +#endif diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c new file mode 100644 index 000000000..7096b11d1 --- /dev/null +++ b/drivers/net/ethernet/netx-eth.c @@ -0,0 +1,503 @@ +/* + * drivers/net/ethernet/netx-eth.c + * + * Copyright (c) 2005 Sascha Hauer , Pengutronix + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* XC Fifo Offsets */ +#define EMPTY_PTR_FIFO(xcno) (0 + ((xcno) << 3)) /* Index of the empty pointer FIFO */ +#define IND_FIFO_PORT_HI(xcno) (1 + ((xcno) << 3)) /* Index of the FIFO where received */ + /* Data packages are indicated by XC */ +#define IND_FIFO_PORT_LO(xcno) (2 + ((xcno) << 3)) /* Index of the FIFO where received */ + /* Data packages are indicated by XC */ +#define REQ_FIFO_PORT_HI(xcno) (3 + ((xcno) << 3)) /* Index of the FIFO where Data packages */ + /* have to be indicated by ARM which */ + /* shall be sent */ +#define REQ_FIFO_PORT_LO(xcno) (4 + ((xcno) << 3)) /* Index of the FIFO where Data packages */ + /* have to be indicated by ARM which shall */ + /* be sent */ +#define CON_FIFO_PORT_HI(xcno) (5 + ((xcno) << 3)) /* Index of the FIFO where sent Data packages */ + /* are confirmed */ +#define CON_FIFO_PORT_LO(xcno) (6 + ((xcno) << 3)) /* Index of the FIFO where sent Data */ + /* packages are confirmed */ +#define PFIFO_MASK(xcno) (0x7f << (xcno*8)) + +#define FIFO_PTR_FRAMELEN_SHIFT 0 +#define FIFO_PTR_FRAMELEN_MASK (0x7ff << 0) +#define FIFO_PTR_FRAMELEN(len) (((len) << 0) & FIFO_PTR_FRAMELEN_MASK) +#define FIFO_PTR_TIMETRIG (1<<11) +#define FIFO_PTR_MULTI_REQ +#define FIFO_PTR_ORIGIN (1<<14) +#define FIFO_PTR_VLAN (1<<15) +#define FIFO_PTR_FRAMENO_SHIFT 16 +#define FIFO_PTR_FRAMENO_MASK (0x3f << 16) +#define FIFO_PTR_FRAMENO(no) (((no) << 16) & FIFO_PTR_FRAMENO_MASK) +#define FIFO_PTR_SEGMENT_SHIFT 22 +#define FIFO_PTR_SEGMENT_MASK (0xf << 22) +#define FIFO_PTR_SEGMENT(seg) (((seg) & 0xf) << 22) +#define FIFO_PTR_ERROR_SHIFT 28 +#define FIFO_PTR_ERROR_MASK (0xf << 28) + +#define ISR_LINK_STATUS_CHANGE (1<<4) +#define ISR_IND_LO (1<<3) +#define ISR_CON_LO (1<<2) +#define ISR_IND_HI (1<<1) +#define ISR_CON_HI (1<<0) + +#define ETH_MAC_LOCAL_CONFIG 0x1560 +#define ETH_MAC_4321 0x1564 +#define ETH_MAC_65 0x1568 + +#define MAC_TRAFFIC_CLASS_ARRANGEMENT_SHIFT 16 +#define MAC_TRAFFIC_CLASS_ARRANGEMENT_MASK (0xf<data; + unsigned int len = skb->len; + + spin_lock_irq(&priv->lock); + memcpy_toio(priv->sram_base + 1560, (void *)buf, len); + if (len < 60) { + memset_io(priv->sram_base + 1560 + len, 0, 60 - len); + len = 60; + } + + pfifo_push(REQ_FIFO_PORT_LO(priv->id), + FIFO_PTR_SEGMENT(priv->id) | + FIFO_PTR_FRAMENO(1) | + FIFO_PTR_FRAMELEN(len)); + + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; + + netif_stop_queue(ndev); + spin_unlock_irq(&priv->lock); + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +static void netx_eth_receive(struct net_device *ndev) +{ + struct netx_eth_priv *priv = netdev_priv(ndev); + unsigned int val, frameno, seg, len; + unsigned char *data; + struct sk_buff *skb; + + val = pfifo_pop(IND_FIFO_PORT_LO(priv->id)); + + frameno = (val & FIFO_PTR_FRAMENO_MASK) >> FIFO_PTR_FRAMENO_SHIFT; + seg = (val & FIFO_PTR_SEGMENT_MASK) >> FIFO_PTR_SEGMENT_SHIFT; + len = (val & FIFO_PTR_FRAMELEN_MASK) >> FIFO_PTR_FRAMELEN_SHIFT; + + skb = netdev_alloc_skb(ndev, len); + if (unlikely(skb == NULL)) { + ndev->stats.rx_dropped++; + return; + } + + data = skb_put(skb, len); + + memcpy_fromio(data, priv->sram_base + frameno * 1560, len); + + pfifo_push(EMPTY_PTR_FIFO(priv->id), + FIFO_PTR_SEGMENT(seg) | FIFO_PTR_FRAMENO(frameno)); + + skb->protocol = eth_type_trans(skb, ndev); + netif_rx(skb); + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += len; +} + +static irqreturn_t +netx_eth_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct netx_eth_priv *priv = netdev_priv(ndev); + int status; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + status = readl(NETX_PFIFO_XPEC_ISR(priv->id)); + while (status) { + int fill_level; + writel(status, NETX_PFIFO_XPEC_ISR(priv->id)); + + if ((status & ISR_CON_HI) || (status & ISR_IND_HI)) + printk("%s: unexpected status: 0x%08x\n", + __func__, status); + + fill_level = + readl(NETX_PFIFO_FILL_LEVEL(IND_FIFO_PORT_LO(priv->id))); + while (fill_level--) + netx_eth_receive(ndev); + + if (status & ISR_CON_LO) + netif_wake_queue(ndev); + + if (status & ISR_LINK_STATUS_CHANGE) + mii_check_media(&priv->mii, netif_msg_link(priv), 1); + + status = readl(NETX_PFIFO_XPEC_ISR(priv->id)); + } + spin_unlock_irqrestore(&priv->lock, flags); + return IRQ_HANDLED; +} + +static int netx_eth_open(struct net_device *ndev) +{ + struct netx_eth_priv *priv = netdev_priv(ndev); + + if (request_irq + (ndev->irq, netx_eth_interrupt, IRQF_SHARED, ndev->name, ndev)) + return -EAGAIN; + + writel(ndev->dev_addr[0] | + ndev->dev_addr[1]<<8 | + ndev->dev_addr[2]<<16 | + ndev->dev_addr[3]<<24, + priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_4321); + writel(ndev->dev_addr[4] | + ndev->dev_addr[5]<<8, + priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_65); + + writel(LOCAL_CONFIG_LINK_STATUS_IRQ_EN | + LOCAL_CONFIG_CON_LO_IRQ_EN | + LOCAL_CONFIG_CON_HI_IRQ_EN | + LOCAL_CONFIG_IND_LO_IRQ_EN | + LOCAL_CONFIG_IND_HI_IRQ_EN, + priv->xpec_base + NETX_XPEC_RAM_START_OFS + + ETH_MAC_LOCAL_CONFIG); + + mii_check_media(&priv->mii, netif_msg_link(priv), 1); + netif_start_queue(ndev); + + return 0; +} + +static int netx_eth_close(struct net_device *ndev) +{ + struct netx_eth_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + + writel(0, + priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_LOCAL_CONFIG); + + free_irq(ndev->irq, ndev); + + return 0; +} + +static void netx_eth_timeout(struct net_device *ndev) +{ + struct netx_eth_priv *priv = netdev_priv(ndev); + int i; + + printk(KERN_ERR "%s: transmit timed out, resetting\n", ndev->name); + + spin_lock_irq(&priv->lock); + + xc_reset(priv->xc); + xc_start(priv->xc); + + for (i=2; i<=18; i++) + pfifo_push(EMPTY_PTR_FIFO(priv->id), + FIFO_PTR_FRAMENO(i) | FIFO_PTR_SEGMENT(priv->id)); + + spin_unlock_irq(&priv->lock); + + netif_wake_queue(ndev); +} + +static int +netx_eth_phy_read(struct net_device *ndev, int phy_id, int reg) +{ + unsigned int val; + + val = MIIMU_SNRDY | MIIMU_PREAMBLE | MIIMU_PHYADDR(phy_id) | + MIIMU_REGADDR(reg) | MIIMU_PHY_NRES; + + writel(val, NETX_MIIMU); + while (readl(NETX_MIIMU) & MIIMU_SNRDY); + + return readl(NETX_MIIMU) >> 16; + +} + +static void +netx_eth_phy_write(struct net_device *ndev, int phy_id, int reg, int value) +{ + unsigned int val; + + val = MIIMU_SNRDY | MIIMU_PREAMBLE | MIIMU_PHYADDR(phy_id) | + MIIMU_REGADDR(reg) | MIIMU_PHY_NRES | MIIMU_OPMODE_WRITE | + MIIMU_DATA(value); + + writel(val, NETX_MIIMU); + while (readl(NETX_MIIMU) & MIIMU_SNRDY); +} + +static const struct net_device_ops netx_eth_netdev_ops = { + .ndo_open = netx_eth_open, + .ndo_stop = netx_eth_close, + .ndo_start_xmit = netx_eth_hard_start_xmit, + .ndo_tx_timeout = netx_eth_timeout, + .ndo_set_rx_mode = netx_eth_set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +}; + +static int netx_eth_enable(struct net_device *ndev) +{ + struct netx_eth_priv *priv = netdev_priv(ndev); + unsigned int mac4321, mac65; + int running, i; + + ndev->netdev_ops = &netx_eth_netdev_ops; + ndev->watchdog_timeo = msecs_to_jiffies(5000); + + priv->msg_enable = NETIF_MSG_LINK; + priv->mii.phy_id_mask = 0x1f; + priv->mii.reg_num_mask = 0x1f; + priv->mii.force_media = 0; + priv->mii.full_duplex = 0; + priv->mii.dev = ndev; + priv->mii.mdio_read = netx_eth_phy_read; + priv->mii.mdio_write = netx_eth_phy_write; + priv->mii.phy_id = INTERNAL_PHY_ADR + priv->id; + + running = xc_running(priv->xc); + xc_stop(priv->xc); + + /* if the xc engine is already running, assume the bootloader has + * loaded the firmware for us + */ + if (running) { + /* get Node Address from hardware */ + mac4321 = readl(priv->xpec_base + + NETX_XPEC_RAM_START_OFS + ETH_MAC_4321); + mac65 = readl(priv->xpec_base + + NETX_XPEC_RAM_START_OFS + ETH_MAC_65); + + ndev->dev_addr[0] = mac4321 & 0xff; + ndev->dev_addr[1] = (mac4321 >> 8) & 0xff; + ndev->dev_addr[2] = (mac4321 >> 16) & 0xff; + ndev->dev_addr[3] = (mac4321 >> 24) & 0xff; + ndev->dev_addr[4] = mac65 & 0xff; + ndev->dev_addr[5] = (mac65 >> 8) & 0xff; + } else { + if (xc_request_firmware(priv->xc)) { + printk(CARDNAME ": requesting firmware failed\n"); + return -ENODEV; + } + } + + xc_reset(priv->xc); + xc_start(priv->xc); + + if (!is_valid_ether_addr(ndev->dev_addr)) + printk("%s: Invalid ethernet MAC address. Please " + "set using ifconfig\n", ndev->name); + + for (i=2; i<=18; i++) + pfifo_push(EMPTY_PTR_FIFO(priv->id), + FIFO_PTR_FRAMENO(i) | FIFO_PTR_SEGMENT(priv->id)); + + return register_netdev(ndev); + +} + +static int netx_eth_drv_probe(struct platform_device *pdev) +{ + struct netx_eth_priv *priv; + struct net_device *ndev; + struct netxeth_platform_data *pdata; + int ret; + + ndev = alloc_etherdev(sizeof (struct netx_eth_priv)); + if (!ndev) { + ret = -ENOMEM; + goto exit; + } + SET_NETDEV_DEV(ndev, &pdev->dev); + + platform_set_drvdata(pdev, ndev); + + priv = netdev_priv(ndev); + + pdata = dev_get_platdata(&pdev->dev); + priv->xc = request_xc(pdata->xcno, &pdev->dev); + if (!priv->xc) { + dev_err(&pdev->dev, "unable to request xc engine\n"); + ret = -ENODEV; + goto exit_free_netdev; + } + + ndev->irq = priv->xc->irq; + priv->id = pdev->id; + priv->xpec_base = priv->xc->xpec_base; + priv->xmac_base = priv->xc->xmac_base; + priv->sram_base = priv->xc->sram_base; + + spin_lock_init(&priv->lock); + + ret = pfifo_request(PFIFO_MASK(priv->id)); + if (ret) { + printk("unable to request PFIFO\n"); + goto exit_free_xc; + } + + ret = netx_eth_enable(ndev); + if (ret) + goto exit_free_pfifo; + + return 0; +exit_free_pfifo: + pfifo_free(PFIFO_MASK(priv->id)); +exit_free_xc: + free_xc(priv->xc); +exit_free_netdev: + free_netdev(ndev); +exit: + return ret; +} + +static int netx_eth_drv_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct netx_eth_priv *priv = netdev_priv(ndev); + + unregister_netdev(ndev); + xc_stop(priv->xc); + free_xc(priv->xc); + free_netdev(ndev); + pfifo_free(PFIFO_MASK(priv->id)); + + return 0; +} + +static int netx_eth_drv_suspend(struct platform_device *pdev, pm_message_t state) +{ + dev_err(&pdev->dev, "suspend not implemented\n"); + return 0; +} + +static int netx_eth_drv_resume(struct platform_device *pdev) +{ + dev_err(&pdev->dev, "resume not implemented\n"); + return 0; +} + +static struct platform_driver netx_eth_driver = { + .probe = netx_eth_drv_probe, + .remove = netx_eth_drv_remove, + .suspend = netx_eth_drv_suspend, + .resume = netx_eth_drv_resume, + .driver = { + .name = CARDNAME, + }, +}; + +static int __init netx_eth_init(void) +{ + unsigned int phy_control, val; + + printk("NetX Ethernet driver\n"); + + phy_control = PHY_CONTROL_PHY_ADDRESS(INTERNAL_PHY_ADR>>1) | + PHY_CONTROL_PHY1_MODE(PHY_MODE_ALL) | + PHY_CONTROL_PHY1_AUTOMDIX | + PHY_CONTROL_PHY1_EN | + PHY_CONTROL_PHY0_MODE(PHY_MODE_ALL) | + PHY_CONTROL_PHY0_AUTOMDIX | + PHY_CONTROL_PHY0_EN | + PHY_CONTROL_CLK_XLATIN; + + val = readl(NETX_SYSTEM_IOC_ACCESS_KEY); + writel(val, NETX_SYSTEM_IOC_ACCESS_KEY); + + writel(phy_control | PHY_CONTROL_RESET, NETX_SYSTEM_PHY_CONTROL); + udelay(100); + + val = readl(NETX_SYSTEM_IOC_ACCESS_KEY); + writel(val, NETX_SYSTEM_IOC_ACCESS_KEY); + + writel(phy_control, NETX_SYSTEM_PHY_CONTROL); + + return platform_driver_register(&netx_eth_driver); +} + +static void __exit netx_eth_cleanup(void) +{ + platform_driver_unregister(&netx_eth_driver); +} + +module_init(netx_eth_init); +module_exit(netx_eth_cleanup); + +MODULE_AUTHOR("Sascha Hauer, Pengutronix"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" CARDNAME); +/*(DEBLOBBED)*/ diff --git a/drivers/net/ethernet/nuvoton/Kconfig b/drivers/net/ethernet/nuvoton/Kconfig new file mode 100644 index 000000000..01182b559 --- /dev/null +++ b/drivers/net/ethernet/nuvoton/Kconfig @@ -0,0 +1,30 @@ +# +# Nuvoton network device configuration +# + +config NET_VENDOR_NUVOTON + bool "Nuvoton devices" + default y + depends on ARM && ARCH_W90X900 + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Nuvoton cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_NUVOTON + +config W90P910_ETH + tristate "Nuvoton w90p910 Ethernet support" + depends on ARM && ARCH_W90X900 + select PHYLIB + select MII + ---help--- + Say Y here if you want to use built-in Ethernet ports + on w90p910 processor. + +endif # NET_VENDOR_NUVOTON diff --git a/drivers/net/ethernet/nuvoton/Makefile b/drivers/net/ethernet/nuvoton/Makefile new file mode 100644 index 000000000..171aa044b --- /dev/null +++ b/drivers/net/ethernet/nuvoton/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Nuvoton network device drivers. +# + +obj-$(CONFIG_W90P910_ETH) += w90p910_ether.o diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c new file mode 100644 index 000000000..afa445842 --- /dev/null +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -0,0 +1,1093 @@ +/* + * Copyright (c) 2008-2009 Nuvoton technology corporation. + * + * Wan ZongShun + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation;version 2 of the License. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_MODULE_NAME "w90p910-emc" +#define DRV_MODULE_VERSION "0.1" + +/* Ethernet MAC Registers */ +#define REG_CAMCMR 0x00 +#define REG_CAMEN 0x04 +#define REG_CAMM_BASE 0x08 +#define REG_CAML_BASE 0x0c +#define REG_TXDLSA 0x88 +#define REG_RXDLSA 0x8C +#define REG_MCMDR 0x90 +#define REG_MIID 0x94 +#define REG_MIIDA 0x98 +#define REG_FFTCR 0x9C +#define REG_TSDR 0xa0 +#define REG_RSDR 0xa4 +#define REG_DMARFC 0xa8 +#define REG_MIEN 0xac +#define REG_MISTA 0xb0 +#define REG_CTXDSA 0xcc +#define REG_CTXBSA 0xd0 +#define REG_CRXDSA 0xd4 +#define REG_CRXBSA 0xd8 + +/* mac controller bit */ +#define MCMDR_RXON 0x01 +#define MCMDR_ACP (0x01 << 3) +#define MCMDR_SPCRC (0x01 << 5) +#define MCMDR_TXON (0x01 << 8) +#define MCMDR_FDUP (0x01 << 18) +#define MCMDR_ENMDC (0x01 << 19) +#define MCMDR_OPMOD (0x01 << 20) +#define SWR (0x01 << 24) + +/* cam command regiser */ +#define CAMCMR_AUP 0x01 +#define CAMCMR_AMP (0x01 << 1) +#define CAMCMR_ABP (0x01 << 2) +#define CAMCMR_CCAM (0x01 << 3) +#define CAMCMR_ECMP (0x01 << 4) +#define CAM0EN 0x01 + +/* mac mii controller bit */ +#define MDCCR (0x0a << 20) +#define PHYAD (0x01 << 8) +#define PHYWR (0x01 << 16) +#define PHYBUSY (0x01 << 17) +#define PHYPRESP (0x01 << 18) +#define CAM_ENTRY_SIZE 0x08 + +/* rx and tx status */ +#define TXDS_TXCP (0x01 << 19) +#define RXDS_CRCE (0x01 << 17) +#define RXDS_PTLE (0x01 << 19) +#define RXDS_RXGD (0x01 << 20) +#define RXDS_ALIE (0x01 << 21) +#define RXDS_RP (0x01 << 22) + +/* mac interrupt status*/ +#define MISTA_EXDEF (0x01 << 19) +#define MISTA_TXBERR (0x01 << 24) +#define MISTA_TDU (0x01 << 23) +#define MISTA_RDU (0x01 << 10) +#define MISTA_RXBERR (0x01 << 11) + +#define ENSTART 0x01 +#define ENRXINTR 0x01 +#define ENRXGD (0x01 << 4) +#define ENRXBERR (0x01 << 11) +#define ENTXINTR (0x01 << 16) +#define ENTXCP (0x01 << 18) +#define ENTXABT (0x01 << 21) +#define ENTXBERR (0x01 << 24) +#define ENMDC (0x01 << 19) +#define PHYBUSY (0x01 << 17) +#define MDCCR_VAL 0xa00000 + +/* rx and tx owner bit */ +#define RX_OWEN_DMA (0x01 << 31) +#define RX_OWEN_CPU (~(0x03 << 30)) +#define TX_OWEN_DMA (0x01 << 31) +#define TX_OWEN_CPU (~(0x01 << 31)) + +/* tx frame desc controller bit */ +#define MACTXINTEN 0x04 +#define CRCMODE 0x02 +#define PADDINGMODE 0x01 + +/* fftcr controller bit */ +#define TXTHD (0x03 << 8) +#define BLENGTH (0x01 << 20) + +/* global setting for driver */ +#define RX_DESC_SIZE 50 +#define TX_DESC_SIZE 10 +#define MAX_RBUFF_SZ 0x600 +#define MAX_TBUFF_SZ 0x600 +#define TX_TIMEOUT (HZ/2) +#define DELAY 1000 +#define CAM0 0x0 + +static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg); + +struct w90p910_rxbd { + unsigned int sl; + unsigned int buffer; + unsigned int reserved; + unsigned int next; +}; + +struct w90p910_txbd { + unsigned int mode; + unsigned int buffer; + unsigned int sl; + unsigned int next; +}; + +struct recv_pdesc { + struct w90p910_rxbd desclist[RX_DESC_SIZE]; + char recv_buf[RX_DESC_SIZE][MAX_RBUFF_SZ]; +}; + +struct tran_pdesc { + struct w90p910_txbd desclist[TX_DESC_SIZE]; + char tran_buf[TX_DESC_SIZE][MAX_TBUFF_SZ]; +}; + +struct w90p910_ether { + struct recv_pdesc *rdesc; + struct tran_pdesc *tdesc; + dma_addr_t rdesc_phys; + dma_addr_t tdesc_phys; + struct net_device_stats stats; + struct platform_device *pdev; + struct resource *res; + struct sk_buff *skb; + struct clk *clk; + struct clk *rmiiclk; + struct mii_if_info mii; + struct timer_list check_timer; + void __iomem *reg; + int rxirq; + int txirq; + unsigned int cur_tx; + unsigned int cur_rx; + unsigned int finish_tx; + unsigned int rx_packets; + unsigned int rx_bytes; + unsigned int start_tx_ptr; + unsigned int start_rx_ptr; + unsigned int linkflag; +}; + +static void update_linkspeed_register(struct net_device *dev, + unsigned int speed, unsigned int duplex) +{ + struct w90p910_ether *ether = netdev_priv(dev); + unsigned int val; + + val = __raw_readl(ether->reg + REG_MCMDR); + + if (speed == SPEED_100) { + /* 100 full/half duplex */ + if (duplex == DUPLEX_FULL) { + val |= (MCMDR_OPMOD | MCMDR_FDUP); + } else { + val |= MCMDR_OPMOD; + val &= ~MCMDR_FDUP; + } + } else { + /* 10 full/half duplex */ + if (duplex == DUPLEX_FULL) { + val |= MCMDR_FDUP; + val &= ~MCMDR_OPMOD; + } else { + val &= ~(MCMDR_FDUP | MCMDR_OPMOD); + } + } + + __raw_writel(val, ether->reg + REG_MCMDR); +} + +static void update_linkspeed(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + struct platform_device *pdev; + unsigned int bmsr, bmcr, lpa, speed, duplex; + + pdev = ether->pdev; + + if (!mii_link_ok(ðer->mii)) { + ether->linkflag = 0x0; + netif_carrier_off(dev); + dev_warn(&pdev->dev, "%s: Link down.\n", dev->name); + return; + } + + if (ether->linkflag == 1) + return; + + bmsr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMSR); + bmcr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMCR); + + if (bmcr & BMCR_ANENABLE) { + if (!(bmsr & BMSR_ANEGCOMPLETE)) + return; + + lpa = w90p910_mdio_read(dev, ether->mii.phy_id, MII_LPA); + + if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) + speed = SPEED_100; + else + speed = SPEED_10; + + if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) + duplex = DUPLEX_FULL; + else + duplex = DUPLEX_HALF; + + } else { + speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; + duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; + } + + update_linkspeed_register(dev, speed, duplex); + + dev_info(&pdev->dev, "%s: Link now %i-%s\n", dev->name, speed, + (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex"); + ether->linkflag = 0x01; + + netif_carrier_on(dev); +} + +static void w90p910_check_link(unsigned long dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct w90p910_ether *ether = netdev_priv(dev); + + update_linkspeed(dev); + mod_timer(ðer->check_timer, jiffies + msecs_to_jiffies(1000)); +} + +static void w90p910_write_cam(struct net_device *dev, + unsigned int x, unsigned char *pval) +{ + struct w90p910_ether *ether = netdev_priv(dev); + unsigned int msw, lsw; + + msw = (pval[0] << 24) | (pval[1] << 16) | (pval[2] << 8) | pval[3]; + + lsw = (pval[4] << 24) | (pval[5] << 16); + + __raw_writel(lsw, ether->reg + REG_CAML_BASE + x * CAM_ENTRY_SIZE); + __raw_writel(msw, ether->reg + REG_CAMM_BASE + x * CAM_ENTRY_SIZE); +} + +static int w90p910_init_desc(struct net_device *dev) +{ + struct w90p910_ether *ether; + struct w90p910_txbd *tdesc; + struct w90p910_rxbd *rdesc; + struct platform_device *pdev; + unsigned int i; + + ether = netdev_priv(dev); + pdev = ether->pdev; + + ether->tdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc), + ðer->tdesc_phys, GFP_KERNEL); + if (!ether->tdesc) + return -ENOMEM; + + ether->rdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc), + ðer->rdesc_phys, GFP_KERNEL); + if (!ether->rdesc) { + dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc), + ether->tdesc, ether->tdesc_phys); + return -ENOMEM; + } + + for (i = 0; i < TX_DESC_SIZE; i++) { + unsigned int offset; + + tdesc = &(ether->tdesc->desclist[i]); + + if (i == TX_DESC_SIZE - 1) + offset = offsetof(struct tran_pdesc, desclist[0]); + else + offset = offsetof(struct tran_pdesc, desclist[i + 1]); + + tdesc->next = ether->tdesc_phys + offset; + tdesc->buffer = ether->tdesc_phys + + offsetof(struct tran_pdesc, tran_buf[i]); + tdesc->sl = 0; + tdesc->mode = 0; + } + + ether->start_tx_ptr = ether->tdesc_phys; + + for (i = 0; i < RX_DESC_SIZE; i++) { + unsigned int offset; + + rdesc = &(ether->rdesc->desclist[i]); + + if (i == RX_DESC_SIZE - 1) + offset = offsetof(struct recv_pdesc, desclist[0]); + else + offset = offsetof(struct recv_pdesc, desclist[i + 1]); + + rdesc->next = ether->rdesc_phys + offset; + rdesc->sl = RX_OWEN_DMA; + rdesc->buffer = ether->rdesc_phys + + offsetof(struct recv_pdesc, recv_buf[i]); + } + + ether->start_rx_ptr = ether->rdesc_phys; + + return 0; +} + +static void w90p910_set_fifo_threshold(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + unsigned int val; + + val = TXTHD | BLENGTH; + __raw_writel(val, ether->reg + REG_FFTCR); +} + +static void w90p910_return_default_idle(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + unsigned int val; + + val = __raw_readl(ether->reg + REG_MCMDR); + val |= SWR; + __raw_writel(val, ether->reg + REG_MCMDR); +} + +static void w90p910_trigger_rx(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + + __raw_writel(ENSTART, ether->reg + REG_RSDR); +} + +static void w90p910_trigger_tx(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + + __raw_writel(ENSTART, ether->reg + REG_TSDR); +} + +static void w90p910_enable_mac_interrupt(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + unsigned int val; + + val = ENTXINTR | ENRXINTR | ENRXGD | ENTXCP; + val |= ENTXBERR | ENRXBERR | ENTXABT; + + __raw_writel(val, ether->reg + REG_MIEN); +} + +static void w90p910_get_and_clear_int(struct net_device *dev, + unsigned int *val) +{ + struct w90p910_ether *ether = netdev_priv(dev); + + *val = __raw_readl(ether->reg + REG_MISTA); + __raw_writel(*val, ether->reg + REG_MISTA); +} + +static void w90p910_set_global_maccmd(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + unsigned int val; + + val = __raw_readl(ether->reg + REG_MCMDR); + val |= MCMDR_SPCRC | MCMDR_ENMDC | MCMDR_ACP | ENMDC; + __raw_writel(val, ether->reg + REG_MCMDR); +} + +static void w90p910_enable_cam(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + unsigned int val; + + w90p910_write_cam(dev, CAM0, dev->dev_addr); + + val = __raw_readl(ether->reg + REG_CAMEN); + val |= CAM0EN; + __raw_writel(val, ether->reg + REG_CAMEN); +} + +static void w90p910_enable_cam_command(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + unsigned int val; + + val = CAMCMR_ECMP | CAMCMR_ABP | CAMCMR_AMP; + __raw_writel(val, ether->reg + REG_CAMCMR); +} + +static void w90p910_enable_tx(struct net_device *dev, unsigned int enable) +{ + struct w90p910_ether *ether = netdev_priv(dev); + unsigned int val; + + val = __raw_readl(ether->reg + REG_MCMDR); + + if (enable) + val |= MCMDR_TXON; + else + val &= ~MCMDR_TXON; + + __raw_writel(val, ether->reg + REG_MCMDR); +} + +static void w90p910_enable_rx(struct net_device *dev, unsigned int enable) +{ + struct w90p910_ether *ether = netdev_priv(dev); + unsigned int val; + + val = __raw_readl(ether->reg + REG_MCMDR); + + if (enable) + val |= MCMDR_RXON; + else + val &= ~MCMDR_RXON; + + __raw_writel(val, ether->reg + REG_MCMDR); +} + +static void w90p910_set_curdest(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + + __raw_writel(ether->start_rx_ptr, ether->reg + REG_RXDLSA); + __raw_writel(ether->start_tx_ptr, ether->reg + REG_TXDLSA); +} + +static void w90p910_reset_mac(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + + w90p910_enable_tx(dev, 0); + w90p910_enable_rx(dev, 0); + w90p910_set_fifo_threshold(dev); + w90p910_return_default_idle(dev); + + if (!netif_queue_stopped(dev)) + netif_stop_queue(dev); + + w90p910_init_desc(dev); + + dev->trans_start = jiffies; /* prevent tx timeout */ + ether->cur_tx = 0x0; + ether->finish_tx = 0x0; + ether->cur_rx = 0x0; + + w90p910_set_curdest(dev); + w90p910_enable_cam(dev); + w90p910_enable_cam_command(dev); + w90p910_enable_mac_interrupt(dev); + w90p910_enable_tx(dev, 1); + w90p910_enable_rx(dev, 1); + w90p910_trigger_tx(dev); + w90p910_trigger_rx(dev); + + dev->trans_start = jiffies; /* prevent tx timeout */ + + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); +} + +static void w90p910_mdio_write(struct net_device *dev, + int phy_id, int reg, int data) +{ + struct w90p910_ether *ether = netdev_priv(dev); + struct platform_device *pdev; + unsigned int val, i; + + pdev = ether->pdev; + + __raw_writel(data, ether->reg + REG_MIID); + + val = (phy_id << 0x08) | reg; + val |= PHYBUSY | PHYWR | MDCCR_VAL; + __raw_writel(val, ether->reg + REG_MIIDA); + + for (i = 0; i < DELAY; i++) { + if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0) + break; + } + + if (i == DELAY) + dev_warn(&pdev->dev, "mdio write timed out\n"); +} + +static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg) +{ + struct w90p910_ether *ether = netdev_priv(dev); + struct platform_device *pdev; + unsigned int val, i, data; + + pdev = ether->pdev; + + val = (phy_id << 0x08) | reg; + val |= PHYBUSY | MDCCR_VAL; + __raw_writel(val, ether->reg + REG_MIIDA); + + for (i = 0; i < DELAY; i++) { + if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0) + break; + } + + if (i == DELAY) { + dev_warn(&pdev->dev, "mdio read timed out\n"); + data = 0xffff; + } else { + data = __raw_readl(ether->reg + REG_MIID); + } + + return data; +} + +static int w90p910_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *address = addr; + + if (!is_valid_ether_addr(address->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, address->sa_data, dev->addr_len); + w90p910_write_cam(dev, CAM0, dev->dev_addr); + + return 0; +} + +static int w90p910_ether_close(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + struct platform_device *pdev; + + pdev = ether->pdev; + + dma_free_coherent(&pdev->dev, sizeof(struct recv_pdesc), + ether->rdesc, ether->rdesc_phys); + dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc), + ether->tdesc, ether->tdesc_phys); + + netif_stop_queue(dev); + + del_timer_sync(ðer->check_timer); + clk_disable(ether->rmiiclk); + clk_disable(ether->clk); + + free_irq(ether->txirq, dev); + free_irq(ether->rxirq, dev); + + return 0; +} + +static struct net_device_stats *w90p910_ether_stats(struct net_device *dev) +{ + struct w90p910_ether *ether; + + ether = netdev_priv(dev); + + return ðer->stats; +} + +static int w90p910_send_frame(struct net_device *dev, + unsigned char *data, int length) +{ + struct w90p910_ether *ether; + struct w90p910_txbd *txbd; + struct platform_device *pdev; + unsigned char *buffer; + + ether = netdev_priv(dev); + pdev = ether->pdev; + + txbd = ðer->tdesc->desclist[ether->cur_tx]; + buffer = ether->tdesc->tran_buf[ether->cur_tx]; + + if (length > 1514) { + dev_err(&pdev->dev, "send data %d bytes, check it\n", length); + length = 1514; + } + + txbd->sl = length & 0xFFFF; + + memcpy(buffer, data, length); + + txbd->mode = TX_OWEN_DMA | PADDINGMODE | CRCMODE | MACTXINTEN; + + w90p910_enable_tx(dev, 1); + + w90p910_trigger_tx(dev); + + if (++ether->cur_tx >= TX_DESC_SIZE) + ether->cur_tx = 0; + + txbd = ðer->tdesc->desclist[ether->cur_tx]; + + if (txbd->mode & TX_OWEN_DMA) + netif_stop_queue(dev); + + return 0; +} + +static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + + if (!(w90p910_send_frame(dev, skb->data, skb->len))) { + ether->skb = skb; + dev_kfree_skb_irq(skb); + return 0; + } + return -EAGAIN; +} + +static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id) +{ + struct w90p910_ether *ether; + struct w90p910_txbd *txbd; + struct platform_device *pdev; + struct net_device *dev; + unsigned int cur_entry, entry, status; + + dev = dev_id; + ether = netdev_priv(dev); + pdev = ether->pdev; + + w90p910_get_and_clear_int(dev, &status); + + cur_entry = __raw_readl(ether->reg + REG_CTXDSA); + + entry = ether->tdesc_phys + + offsetof(struct tran_pdesc, desclist[ether->finish_tx]); + + while (entry != cur_entry) { + txbd = ðer->tdesc->desclist[ether->finish_tx]; + + if (++ether->finish_tx >= TX_DESC_SIZE) + ether->finish_tx = 0; + + if (txbd->sl & TXDS_TXCP) { + ether->stats.tx_packets++; + ether->stats.tx_bytes += txbd->sl & 0xFFFF; + } else { + ether->stats.tx_errors++; + } + + txbd->sl = 0x0; + txbd->mode = 0x0; + + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + + entry = ether->tdesc_phys + + offsetof(struct tran_pdesc, desclist[ether->finish_tx]); + } + + if (status & MISTA_EXDEF) { + dev_err(&pdev->dev, "emc defer exceed interrupt\n"); + } else if (status & MISTA_TXBERR) { + dev_err(&pdev->dev, "emc bus error interrupt\n"); + w90p910_reset_mac(dev); + } else if (status & MISTA_TDU) { + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + } + + return IRQ_HANDLED; +} + +static void netdev_rx(struct net_device *dev) +{ + struct w90p910_ether *ether; + struct w90p910_rxbd *rxbd; + struct platform_device *pdev; + struct sk_buff *skb; + unsigned char *data; + unsigned int length, status, val, entry; + + ether = netdev_priv(dev); + pdev = ether->pdev; + + rxbd = ðer->rdesc->desclist[ether->cur_rx]; + + do { + val = __raw_readl(ether->reg + REG_CRXDSA); + + entry = ether->rdesc_phys + + offsetof(struct recv_pdesc, desclist[ether->cur_rx]); + + if (val == entry) + break; + + status = rxbd->sl; + length = status & 0xFFFF; + + if (status & RXDS_RXGD) { + data = ether->rdesc->recv_buf[ether->cur_rx]; + skb = netdev_alloc_skb(dev, length + 2); + if (!skb) { + ether->stats.rx_dropped++; + return; + } + + skb_reserve(skb, 2); + skb_put(skb, length); + skb_copy_to_linear_data(skb, data, length); + skb->protocol = eth_type_trans(skb, dev); + ether->stats.rx_packets++; + ether->stats.rx_bytes += length; + netif_rx(skb); + } else { + ether->stats.rx_errors++; + + if (status & RXDS_RP) { + dev_err(&pdev->dev, "rx runt err\n"); + ether->stats.rx_length_errors++; + } else if (status & RXDS_CRCE) { + dev_err(&pdev->dev, "rx crc err\n"); + ether->stats.rx_crc_errors++; + } else if (status & RXDS_ALIE) { + dev_err(&pdev->dev, "rx aligment err\n"); + ether->stats.rx_frame_errors++; + } else if (status & RXDS_PTLE) { + dev_err(&pdev->dev, "rx longer err\n"); + ether->stats.rx_over_errors++; + } + } + + rxbd->sl = RX_OWEN_DMA; + rxbd->reserved = 0x0; + + if (++ether->cur_rx >= RX_DESC_SIZE) + ether->cur_rx = 0; + + rxbd = ðer->rdesc->desclist[ether->cur_rx]; + + } while (1); +} + +static irqreturn_t w90p910_rx_interrupt(int irq, void *dev_id) +{ + struct net_device *dev; + struct w90p910_ether *ether; + struct platform_device *pdev; + unsigned int status; + + dev = dev_id; + ether = netdev_priv(dev); + pdev = ether->pdev; + + w90p910_get_and_clear_int(dev, &status); + + if (status & MISTA_RDU) { + netdev_rx(dev); + w90p910_trigger_rx(dev); + + return IRQ_HANDLED; + } else if (status & MISTA_RXBERR) { + dev_err(&pdev->dev, "emc rx bus error\n"); + w90p910_reset_mac(dev); + } + + netdev_rx(dev); + return IRQ_HANDLED; +} + +static int w90p910_ether_open(struct net_device *dev) +{ + struct w90p910_ether *ether; + struct platform_device *pdev; + + ether = netdev_priv(dev); + pdev = ether->pdev; + + w90p910_reset_mac(dev); + w90p910_set_fifo_threshold(dev); + w90p910_set_curdest(dev); + w90p910_enable_cam(dev); + w90p910_enable_cam_command(dev); + w90p910_enable_mac_interrupt(dev); + w90p910_set_global_maccmd(dev); + w90p910_enable_rx(dev, 1); + + clk_enable(ether->rmiiclk); + clk_enable(ether->clk); + + ether->rx_packets = 0x0; + ether->rx_bytes = 0x0; + + if (request_irq(ether->txirq, w90p910_tx_interrupt, + 0x0, pdev->name, dev)) { + dev_err(&pdev->dev, "register irq tx failed\n"); + return -EAGAIN; + } + + if (request_irq(ether->rxirq, w90p910_rx_interrupt, + 0x0, pdev->name, dev)) { + dev_err(&pdev->dev, "register irq rx failed\n"); + free_irq(ether->txirq, dev); + return -EAGAIN; + } + + mod_timer(ðer->check_timer, jiffies + msecs_to_jiffies(1000)); + netif_start_queue(dev); + w90p910_trigger_rx(dev); + + dev_info(&pdev->dev, "%s is OPENED\n", dev->name); + + return 0; +} + +static void w90p910_ether_set_multicast_list(struct net_device *dev) +{ + struct w90p910_ether *ether; + unsigned int rx_mode; + + ether = netdev_priv(dev); + + if (dev->flags & IFF_PROMISC) + rx_mode = CAMCMR_AUP | CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP; + else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) + rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP; + else + rx_mode = CAMCMR_ECMP | CAMCMR_ABP; + __raw_writel(rx_mode, ether->reg + REG_CAMCMR); +} + +static int w90p910_ether_ioctl(struct net_device *dev, + struct ifreq *ifr, int cmd) +{ + struct w90p910_ether *ether = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(ifr); + + return generic_mii_ioctl(ðer->mii, data, cmd, NULL); +} + +static void w90p910_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); +} + +static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct w90p910_ether *ether = netdev_priv(dev); + return mii_ethtool_gset(ðer->mii, cmd); +} + +static int w90p910_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct w90p910_ether *ether = netdev_priv(dev); + return mii_ethtool_sset(ðer->mii, cmd); +} + +static int w90p910_nway_reset(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + return mii_nway_restart(ðer->mii); +} + +static u32 w90p910_get_link(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + return mii_link_ok(ðer->mii); +} + +static const struct ethtool_ops w90p910_ether_ethtool_ops = { + .get_settings = w90p910_get_settings, + .set_settings = w90p910_set_settings, + .get_drvinfo = w90p910_get_drvinfo, + .nway_reset = w90p910_nway_reset, + .get_link = w90p910_get_link, +}; + +static const struct net_device_ops w90p910_ether_netdev_ops = { + .ndo_open = w90p910_ether_open, + .ndo_stop = w90p910_ether_close, + .ndo_start_xmit = w90p910_ether_start_xmit, + .ndo_get_stats = w90p910_ether_stats, + .ndo_set_rx_mode = w90p910_ether_set_multicast_list, + .ndo_set_mac_address = w90p910_set_mac_address, + .ndo_do_ioctl = w90p910_ether_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static void __init get_mac_address(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + struct platform_device *pdev; + char addr[ETH_ALEN]; + + pdev = ether->pdev; + + addr[0] = 0x00; + addr[1] = 0x02; + addr[2] = 0xac; + addr[3] = 0x55; + addr[4] = 0x88; + addr[5] = 0xa8; + + if (is_valid_ether_addr(addr)) + memcpy(dev->dev_addr, &addr, ETH_ALEN); + else + dev_err(&pdev->dev, "invalid mac address\n"); +} + +static int w90p910_ether_setup(struct net_device *dev) +{ + struct w90p910_ether *ether = netdev_priv(dev); + + dev->netdev_ops = &w90p910_ether_netdev_ops; + dev->ethtool_ops = &w90p910_ether_ethtool_ops; + + dev->tx_queue_len = 16; + dev->dma = 0x0; + dev->watchdog_timeo = TX_TIMEOUT; + + get_mac_address(dev); + + ether->cur_tx = 0x0; + ether->cur_rx = 0x0; + ether->finish_tx = 0x0; + ether->linkflag = 0x0; + ether->mii.phy_id = 0x01; + ether->mii.phy_id_mask = 0x1f; + ether->mii.reg_num_mask = 0x1f; + ether->mii.dev = dev; + ether->mii.mdio_read = w90p910_mdio_read; + ether->mii.mdio_write = w90p910_mdio_write; + + setup_timer(ðer->check_timer, w90p910_check_link, + (unsigned long)dev); + + return 0; +} + +static int w90p910_ether_probe(struct platform_device *pdev) +{ + struct w90p910_ether *ether; + struct net_device *dev; + int error; + + dev = alloc_etherdev(sizeof(struct w90p910_ether)); + if (!dev) + return -ENOMEM; + + ether = netdev_priv(dev); + + ether->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (ether->res == NULL) { + dev_err(&pdev->dev, "failed to get I/O memory\n"); + error = -ENXIO; + goto failed_free; + } + + if (!request_mem_region(ether->res->start, + resource_size(ether->res), pdev->name)) { + dev_err(&pdev->dev, "failed to request I/O memory\n"); + error = -EBUSY; + goto failed_free; + } + + ether->reg = ioremap(ether->res->start, resource_size(ether->res)); + if (ether->reg == NULL) { + dev_err(&pdev->dev, "failed to remap I/O memory\n"); + error = -ENXIO; + goto failed_free_mem; + } + + ether->txirq = platform_get_irq(pdev, 0); + if (ether->txirq < 0) { + dev_err(&pdev->dev, "failed to get ether tx irq\n"); + error = -ENXIO; + goto failed_free_io; + } + + ether->rxirq = platform_get_irq(pdev, 1); + if (ether->rxirq < 0) { + dev_err(&pdev->dev, "failed to get ether rx irq\n"); + error = -ENXIO; + goto failed_free_io; + } + + platform_set_drvdata(pdev, dev); + + ether->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(ether->clk)) { + dev_err(&pdev->dev, "failed to get ether clock\n"); + error = PTR_ERR(ether->clk); + goto failed_free_io; + } + + ether->rmiiclk = clk_get(&pdev->dev, "RMII"); + if (IS_ERR(ether->rmiiclk)) { + dev_err(&pdev->dev, "failed to get ether clock\n"); + error = PTR_ERR(ether->rmiiclk); + goto failed_put_clk; + } + + ether->pdev = pdev; + + w90p910_ether_setup(dev); + + error = register_netdev(dev); + if (error != 0) { + dev_err(&pdev->dev, "Regiter EMC w90p910 FAILED\n"); + error = -ENODEV; + goto failed_put_rmiiclk; + } + + return 0; +failed_put_rmiiclk: + clk_put(ether->rmiiclk); +failed_put_clk: + clk_put(ether->clk); +failed_free_io: + iounmap(ether->reg); +failed_free_mem: + release_mem_region(ether->res->start, resource_size(ether->res)); +failed_free: + free_netdev(dev); + return error; +} + +static int w90p910_ether_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct w90p910_ether *ether = netdev_priv(dev); + + unregister_netdev(dev); + + clk_put(ether->rmiiclk); + clk_put(ether->clk); + + iounmap(ether->reg); + release_mem_region(ether->res->start, resource_size(ether->res)); + + del_timer_sync(ðer->check_timer); + + free_netdev(dev); + return 0; +} + +static struct platform_driver w90p910_ether_driver = { + .probe = w90p910_ether_probe, + .remove = w90p910_ether_remove, + .driver = { + .name = "nuc900-emc", + }, +}; + +module_platform_driver(w90p910_ether_driver); + +MODULE_AUTHOR("Wan ZongShun "); +MODULE_DESCRIPTION("w90p910 MAC driver!"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:nuc900-emc"); + diff --git a/drivers/net/ethernet/nvidia/Kconfig b/drivers/net/ethernet/nvidia/Kconfig new file mode 100644 index 000000000..ace19e7f6 --- /dev/null +++ b/drivers/net/ethernet/nvidia/Kconfig @@ -0,0 +1,32 @@ +# +# NVIDIA network device configuration +# + +config NET_VENDOR_NVIDIA + bool "NVIDIA devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about NVIDIA cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_NVIDIA + +config FORCEDETH + tristate "nForce Ethernet support" + depends on PCI + ---help--- + If you have a network (Ethernet) controller of this type, say Y and + read the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module + will be called forcedeth. + +endif # NET_VENDOR_NVIDIA diff --git a/drivers/net/ethernet/nvidia/Makefile b/drivers/net/ethernet/nvidia/Makefile new file mode 100644 index 000000000..e079ae577 --- /dev/null +++ b/drivers/net/ethernet/nvidia/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the NVIDIA network device drivers. +# + +obj-$(CONFIG_FORCEDETH) += forcedeth.o diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c new file mode 100644 index 000000000..a41bb5e6b --- /dev/null +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -0,0 +1,6385 @@ +/* + * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. + * + * Note: This driver is a cleanroom reimplementation based on reverse + * engineered documentation written by Carl-Daniel Hailfinger + * and Andrew de Quincey. + * + * NVIDIA, nForce and other NVIDIA marks are trademarks or registered + * trademarks of NVIDIA Corporation in the United States and other + * countries. + * + * Copyright (C) 2003,4,5 Manfred Spraul + * Copyright (C) 2004 Andrew de Quincey (wol support) + * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane + * IRQ rate fixes, bigendian fixes, cleanups, verification) + * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * Known bugs: + * We suspect that on some hardware no TX done interrupts are generated. + * This means recovery from netif_stop_queue only happens if the hw timer + * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) + * and the timer is active in the IRQMask, or if a rx packet arrives by chance. + * If your hardware reliably generates tx done interrupts, then you can remove + * DEV_NEED_TIMERIRQ from the driver_data flags. + * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few + * superfluous timer interrupts from the nic. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define FORCEDETH_VERSION "0.64" +#define DRV_NAME "forcedeth" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define TX_WORK_PER_LOOP 64 +#define RX_WORK_PER_LOOP 64 + +/* + * Hardware access: + */ + +#define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */ +#define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */ +#define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */ +#define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */ +#define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */ +#define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */ +#define DEV_HAS_MSI 0x0000040 /* device supports MSI */ +#define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */ +#define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */ +#define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */ +#define DEV_HAS_STATISTICS_V2 0x0000400 /* device supports hw statistics version 2 */ +#define DEV_HAS_STATISTICS_V3 0x0000800 /* device supports hw statistics version 3 */ +#define DEV_HAS_STATISTICS_V12 0x0000600 /* device supports hw statistics version 1 and 2 */ +#define DEV_HAS_STATISTICS_V123 0x0000e00 /* device supports hw statistics version 1, 2, and 3 */ +#define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */ +#define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */ +#define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */ +#define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */ +#define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */ +#define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */ +#define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */ +#define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */ +#define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */ +#define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */ +#define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */ +#define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */ +#define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */ + +enum { + NvRegIrqStatus = 0x000, +#define NVREG_IRQSTAT_MIIEVENT 0x040 +#define NVREG_IRQSTAT_MASK 0x83ff + NvRegIrqMask = 0x004, +#define NVREG_IRQ_RX_ERROR 0x0001 +#define NVREG_IRQ_RX 0x0002 +#define NVREG_IRQ_RX_NOBUF 0x0004 +#define NVREG_IRQ_TX_ERR 0x0008 +#define NVREG_IRQ_TX_OK 0x0010 +#define NVREG_IRQ_TIMER 0x0020 +#define NVREG_IRQ_LINK 0x0040 +#define NVREG_IRQ_RX_FORCED 0x0080 +#define NVREG_IRQ_TX_FORCED 0x0100 +#define NVREG_IRQ_RECOVER_ERROR 0x8200 +#define NVREG_IRQMASK_THROUGHPUT 0x00df +#define NVREG_IRQMASK_CPU 0x0060 +#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) +#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) +#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR) + + NvRegUnknownSetupReg6 = 0x008, +#define NVREG_UNKSETUP6_VAL 3 + +/* + * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic + * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms + */ + NvRegPollingInterval = 0x00c, +#define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */ +#define NVREG_POLL_DEFAULT_CPU 13 + NvRegMSIMap0 = 0x020, + NvRegMSIMap1 = 0x024, + NvRegMSIIrqMask = 0x030, +#define NVREG_MSI_VECTOR_0_ENABLED 0x01 + NvRegMisc1 = 0x080, +#define NVREG_MISC1_PAUSE_TX 0x01 +#define NVREG_MISC1_HD 0x02 +#define NVREG_MISC1_FORCE 0x3b0f3c + + NvRegMacReset = 0x34, +#define NVREG_MAC_RESET_ASSERT 0x0F3 + NvRegTransmitterControl = 0x084, +#define NVREG_XMITCTL_START 0x01 +#define NVREG_XMITCTL_MGMT_ST 0x40000000 +#define NVREG_XMITCTL_SYNC_MASK 0x000f0000 +#define NVREG_XMITCTL_SYNC_NOT_READY 0x0 +#define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000 +#define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00 +#define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0 +#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 +#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 +#define NVREG_XMITCTL_HOST_LOADED 0x00004000 +#define NVREG_XMITCTL_TX_PATH_EN 0x01000000 +#define NVREG_XMITCTL_DATA_START 0x00100000 +#define NVREG_XMITCTL_DATA_READY 0x00010000 +#define NVREG_XMITCTL_DATA_ERROR 0x00020000 + NvRegTransmitterStatus = 0x088, +#define NVREG_XMITSTAT_BUSY 0x01 + + NvRegPacketFilterFlags = 0x8c, +#define NVREG_PFF_PAUSE_RX 0x08 +#define NVREG_PFF_ALWAYS 0x7F0000 +#define NVREG_PFF_PROMISC 0x80 +#define NVREG_PFF_MYADDR 0x20 +#define NVREG_PFF_LOOPBACK 0x10 + + NvRegOffloadConfig = 0x90, +#define NVREG_OFFLOAD_HOMEPHY 0x601 +#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE + NvRegReceiverControl = 0x094, +#define NVREG_RCVCTL_START 0x01 +#define NVREG_RCVCTL_RX_PATH_EN 0x01000000 + NvRegReceiverStatus = 0x98, +#define NVREG_RCVSTAT_BUSY 0x01 + + NvRegSlotTime = 0x9c, +#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000 +#define NVREG_SLOTTIME_10_100_FULL 0x00007f00 +#define NVREG_SLOTTIME_1000_FULL 0x0003ff00 +#define NVREG_SLOTTIME_HALF 0x0000ff00 +#define NVREG_SLOTTIME_DEFAULT 0x00007f00 +#define NVREG_SLOTTIME_MASK 0x000000ff + + NvRegTxDeferral = 0xA0, +#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f +#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f +#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f +#define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f +#define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f +#define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000 + NvRegRxDeferral = 0xA4, +#define NVREG_RX_DEFERRAL_DEFAULT 0x16 + NvRegMacAddrA = 0xA8, + NvRegMacAddrB = 0xAC, + NvRegMulticastAddrA = 0xB0, +#define NVREG_MCASTADDRA_FORCE 0x01 + NvRegMulticastAddrB = 0xB4, + NvRegMulticastMaskA = 0xB8, +#define NVREG_MCASTMASKA_NONE 0xffffffff + NvRegMulticastMaskB = 0xBC, +#define NVREG_MCASTMASKB_NONE 0xffff + + NvRegPhyInterface = 0xC0, +#define PHY_RGMII 0x10000000 + NvRegBackOffControl = 0xC4, +#define NVREG_BKOFFCTRL_DEFAULT 0x70000000 +#define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff +#define NVREG_BKOFFCTRL_SELECT 24 +#define NVREG_BKOFFCTRL_GEAR 12 + + NvRegTxRingPhysAddr = 0x100, + NvRegRxRingPhysAddr = 0x104, + NvRegRingSizes = 0x108, +#define NVREG_RINGSZ_TXSHIFT 0 +#define NVREG_RINGSZ_RXSHIFT 16 + NvRegTransmitPoll = 0x10c, +#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000 + NvRegLinkSpeed = 0x110, +#define NVREG_LINKSPEED_FORCE 0x10000 +#define NVREG_LINKSPEED_10 1000 +#define NVREG_LINKSPEED_100 100 +#define NVREG_LINKSPEED_1000 50 +#define NVREG_LINKSPEED_MASK (0xFFF) + NvRegUnknownSetupReg5 = 0x130, +#define NVREG_UNKSETUP5_BIT31 (1<<31) + NvRegTxWatermark = 0x13c, +#define NVREG_TX_WM_DESC1_DEFAULT 0x0200010 +#define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000 +#define NVREG_TX_WM_DESC2_3_1000 0xfe08000 + NvRegTxRxControl = 0x144, +#define NVREG_TXRXCTL_KICK 0x0001 +#define NVREG_TXRXCTL_BIT1 0x0002 +#define NVREG_TXRXCTL_BIT2 0x0004 +#define NVREG_TXRXCTL_IDLE 0x0008 +#define NVREG_TXRXCTL_RESET 0x0010 +#define NVREG_TXRXCTL_RXCHECK 0x0400 +#define NVREG_TXRXCTL_DESC_1 0 +#define NVREG_TXRXCTL_DESC_2 0x002100 +#define NVREG_TXRXCTL_DESC_3 0xc02200 +#define NVREG_TXRXCTL_VLANSTRIP 0x00040 +#define NVREG_TXRXCTL_VLANINS 0x00080 + NvRegTxRingPhysAddrHigh = 0x148, + NvRegRxRingPhysAddrHigh = 0x14C, + NvRegTxPauseFrame = 0x170, +#define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080 +#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 +#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 +#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 + NvRegTxPauseFrameLimit = 0x174, +#define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000 + NvRegMIIStatus = 0x180, +#define NVREG_MIISTAT_ERROR 0x0001 +#define NVREG_MIISTAT_LINKCHANGE 0x0008 +#define NVREG_MIISTAT_MASK_RW 0x0007 +#define NVREG_MIISTAT_MASK_ALL 0x000f + NvRegMIIMask = 0x184, +#define NVREG_MII_LINKCHANGE 0x0008 + + NvRegAdapterControl = 0x188, +#define NVREG_ADAPTCTL_START 0x02 +#define NVREG_ADAPTCTL_LINKUP 0x04 +#define NVREG_ADAPTCTL_PHYVALID 0x40000 +#define NVREG_ADAPTCTL_RUNNING 0x100000 +#define NVREG_ADAPTCTL_PHYSHIFT 24 + NvRegMIISpeed = 0x18c, +#define NVREG_MIISPEED_BIT8 (1<<8) +#define NVREG_MIIDELAY 5 + NvRegMIIControl = 0x190, +#define NVREG_MIICTL_INUSE 0x08000 +#define NVREG_MIICTL_WRITE 0x00400 +#define NVREG_MIICTL_ADDRSHIFT 5 + NvRegMIIData = 0x194, + NvRegTxUnicast = 0x1a0, + NvRegTxMulticast = 0x1a4, + NvRegTxBroadcast = 0x1a8, + NvRegWakeUpFlags = 0x200, +#define NVREG_WAKEUPFLAGS_VAL 0x7770 +#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 +#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 +#define NVREG_WAKEUPFLAGS_D3SHIFT 12 +#define NVREG_WAKEUPFLAGS_D2SHIFT 8 +#define NVREG_WAKEUPFLAGS_D1SHIFT 4 +#define NVREG_WAKEUPFLAGS_D0SHIFT 0 +#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 +#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 +#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 +#define NVREG_WAKEUPFLAGS_ENABLE 0x1111 + + NvRegMgmtUnitGetVersion = 0x204, +#define NVREG_MGMTUNITGETVERSION 0x01 + NvRegMgmtUnitVersion = 0x208, +#define NVREG_MGMTUNITVERSION 0x08 + NvRegPowerCap = 0x268, +#define NVREG_POWERCAP_D3SUPP (1<<30) +#define NVREG_POWERCAP_D2SUPP (1<<26) +#define NVREG_POWERCAP_D1SUPP (1<<25) + NvRegPowerState = 0x26c, +#define NVREG_POWERSTATE_POWEREDUP 0x8000 +#define NVREG_POWERSTATE_VALID 0x0100 +#define NVREG_POWERSTATE_MASK 0x0003 +#define NVREG_POWERSTATE_D0 0x0000 +#define NVREG_POWERSTATE_D1 0x0001 +#define NVREG_POWERSTATE_D2 0x0002 +#define NVREG_POWERSTATE_D3 0x0003 + NvRegMgmtUnitControl = 0x278, +#define NVREG_MGMTUNITCONTROL_INUSE 0x20000 + NvRegTxCnt = 0x280, + NvRegTxZeroReXmt = 0x284, + NvRegTxOneReXmt = 0x288, + NvRegTxManyReXmt = 0x28c, + NvRegTxLateCol = 0x290, + NvRegTxUnderflow = 0x294, + NvRegTxLossCarrier = 0x298, + NvRegTxExcessDef = 0x29c, + NvRegTxRetryErr = 0x2a0, + NvRegRxFrameErr = 0x2a4, + NvRegRxExtraByte = 0x2a8, + NvRegRxLateCol = 0x2ac, + NvRegRxRunt = 0x2b0, + NvRegRxFrameTooLong = 0x2b4, + NvRegRxOverflow = 0x2b8, + NvRegRxFCSErr = 0x2bc, + NvRegRxFrameAlignErr = 0x2c0, + NvRegRxLenErr = 0x2c4, + NvRegRxUnicast = 0x2c8, + NvRegRxMulticast = 0x2cc, + NvRegRxBroadcast = 0x2d0, + NvRegTxDef = 0x2d4, + NvRegTxFrame = 0x2d8, + NvRegRxCnt = 0x2dc, + NvRegTxPause = 0x2e0, + NvRegRxPause = 0x2e4, + NvRegRxDropFrame = 0x2e8, + NvRegVlanControl = 0x300, +#define NVREG_VLANCONTROL_ENABLE 0x2000 + NvRegMSIXMap0 = 0x3e0, + NvRegMSIXMap1 = 0x3e4, + NvRegMSIXIrqStatus = 0x3f0, + + NvRegPowerState2 = 0x600, +#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15 +#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 +#define NVREG_POWERSTATE2_PHY_RESET 0x0004 +#define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00 +}; + +/* Big endian: should work, but is untested */ +struct ring_desc { + __le32 buf; + __le32 flaglen; +}; + +struct ring_desc_ex { + __le32 bufhigh; + __le32 buflow; + __le32 txvlan; + __le32 flaglen; +}; + +union ring_type { + struct ring_desc *orig; + struct ring_desc_ex *ex; +}; + +#define FLAG_MASK_V1 0xffff0000 +#define FLAG_MASK_V2 0xffffc000 +#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) +#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) + +#define NV_TX_LASTPACKET (1<<16) +#define NV_TX_RETRYERROR (1<<19) +#define NV_TX_RETRYCOUNT_MASK (0xF<<20) +#define NV_TX_FORCED_INTERRUPT (1<<24) +#define NV_TX_DEFERRED (1<<26) +#define NV_TX_CARRIERLOST (1<<27) +#define NV_TX_LATECOLLISION (1<<28) +#define NV_TX_UNDERFLOW (1<<29) +#define NV_TX_ERROR (1<<30) +#define NV_TX_VALID (1<<31) + +#define NV_TX2_LASTPACKET (1<<29) +#define NV_TX2_RETRYERROR (1<<18) +#define NV_TX2_RETRYCOUNT_MASK (0xF<<19) +#define NV_TX2_FORCED_INTERRUPT (1<<30) +#define NV_TX2_DEFERRED (1<<25) +#define NV_TX2_CARRIERLOST (1<<26) +#define NV_TX2_LATECOLLISION (1<<27) +#define NV_TX2_UNDERFLOW (1<<28) +/* error and valid are the same for both */ +#define NV_TX2_ERROR (1<<30) +#define NV_TX2_VALID (1<<31) +#define NV_TX2_TSO (1<<28) +#define NV_TX2_TSO_SHIFT 14 +#define NV_TX2_TSO_MAX_SHIFT 14 +#define NV_TX2_TSO_MAX_SIZE (1<tx_bytes + 4*tx_packets */ + u64 tx_zero_rexmt; + u64 tx_one_rexmt; + u64 tx_many_rexmt; + u64 tx_late_collision; + u64 tx_fifo_errors; + u64 tx_carrier_errors; + u64 tx_excess_deferral; + u64 tx_retry_error; + u64 rx_frame_error; + u64 rx_extra_byte; + u64 rx_late_collision; + u64 rx_runt; + u64 rx_frame_too_long; + u64 rx_over_errors; + u64 rx_crc_errors; + u64 rx_frame_align_error; + u64 rx_length_error; + u64 rx_unicast; + u64 rx_multicast; + u64 rx_broadcast; + u64 rx_packets; /* should be ifconfig->rx_packets */ + u64 rx_errors_total; + u64 tx_errors_total; + + /* version 2 stats */ + u64 tx_deferral; + u64 tx_packets; /* should be ifconfig->tx_packets */ + u64 rx_bytes; /* should be ifconfig->rx_bytes + 4*rx_packets */ + u64 tx_pause; + u64 rx_pause; + u64 rx_drop_frame; + + /* version 3 stats */ + u64 tx_unicast; + u64 tx_multicast; + u64 tx_broadcast; +}; + +#define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) +#define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3) +#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) + +/* diagnostics */ +#define NV_TEST_COUNT_BASE 3 +#define NV_TEST_COUNT_EXTENDED 4 + +static const struct nv_ethtool_str nv_etests_str[] = { + { "link (online/offline)" }, + { "register (offline) " }, + { "interrupt (offline) " }, + { "loopback (offline) " } +}; + +struct register_test { + __u32 reg; + __u32 mask; +}; + +static const struct register_test nv_registers_test[] = { + { NvRegUnknownSetupReg6, 0x01 }, + { NvRegMisc1, 0x03c }, + { NvRegOffloadConfig, 0x03ff }, + { NvRegMulticastAddrA, 0xffffffff }, + { NvRegTxWatermark, 0x0ff }, + { NvRegWakeUpFlags, 0x07777 }, + { 0, 0 } +}; + +struct nv_skb_map { + struct sk_buff *skb; + dma_addr_t dma; + unsigned int dma_len:31; + unsigned int dma_single:1; + struct ring_desc_ex *first_tx_desc; + struct nv_skb_map *next_tx_ctx; +}; + +/* + * SMP locking: + * All hardware access under netdev_priv(dev)->lock, except the performance + * critical parts: + * - rx is (pseudo-) lockless: it relies on the single-threading provided + * by the arch code for interrupts. + * - tx setup is lockless: it relies on netif_tx_lock. Actual submission + * needs netdev_priv(dev)->lock :-( + * - set_multicast_list: preparation lockless, relies on netif_tx_lock. + * + * Hardware stats updates are protected by hwstats_lock: + * - updated by nv_do_stats_poll (timer). This is meant to avoid + * integer wraparound in the NIC stats registers, at low frequency + * (0.1 Hz) + * - updated by nv_get_ethtool_stats + nv_get_stats64 + * + * Software stats are accessed only through 64b synchronization points + * and are not subject to other synchronization techniques (single + * update thread on the TX or RX paths). + */ + +/* in dev: base, irq */ +struct fe_priv { + spinlock_t lock; + + struct net_device *dev; + struct napi_struct napi; + + /* hardware stats are updated in syscall and timer */ + spinlock_t hwstats_lock; + struct nv_ethtool_stats estats; + + int in_shutdown; + u32 linkspeed; + int duplex; + int autoneg; + int fixed_mode; + int phyaddr; + int wolenabled; + unsigned int phy_oui; + unsigned int phy_model; + unsigned int phy_rev; + u16 gigabit; + int intr_test; + int recover_error; + int quiet_count; + + /* General data: RO fields */ + dma_addr_t ring_addr; + struct pci_dev *pci_dev; + u32 orig_mac[2]; + u32 events; + u32 irqmask; + u32 desc_ver; + u32 txrxctl_bits; + u32 vlanctl_bits; + u32 driver_data; + u32 device_id; + u32 register_size; + u32 mac_in_use; + int mgmt_version; + int mgmt_sema; + + void __iomem *base; + + /* rx specific fields. + * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); + */ + union ring_type get_rx, put_rx, first_rx, last_rx; + struct nv_skb_map *get_rx_ctx, *put_rx_ctx; + struct nv_skb_map *first_rx_ctx, *last_rx_ctx; + struct nv_skb_map *rx_skb; + + union ring_type rx_ring; + unsigned int rx_buf_sz; + unsigned int pkt_limit; + struct timer_list oom_kick; + struct timer_list nic_poll; + struct timer_list stats_poll; + u32 nic_poll_irq; + int rx_ring_size; + + /* RX software stats */ + struct u64_stats_sync swstats_rx_syncp; + u64 stat_rx_packets; + u64 stat_rx_bytes; /* not always available in HW */ + u64 stat_rx_missed_errors; + u64 stat_rx_dropped; + + /* media detection workaround. + * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); + */ + int need_linktimer; + unsigned long link_timeout; + /* + * tx specific fields. + */ + union ring_type get_tx, put_tx, first_tx, last_tx; + struct nv_skb_map *get_tx_ctx, *put_tx_ctx; + struct nv_skb_map *first_tx_ctx, *last_tx_ctx; + struct nv_skb_map *tx_skb; + + union ring_type tx_ring; + u32 tx_flags; + int tx_ring_size; + int tx_limit; + u32 tx_pkts_in_progress; + struct nv_skb_map *tx_change_owner; + struct nv_skb_map *tx_end_flip; + int tx_stop; + + /* TX software stats */ + struct u64_stats_sync swstats_tx_syncp; + u64 stat_tx_packets; /* not always available in HW */ + u64 stat_tx_bytes; + u64 stat_tx_dropped; + + /* msi/msi-x fields */ + u32 msi_flags; + struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; + + /* flow control */ + u32 pause_flags; + + /* power saved state */ + u32 saved_config_space[NV_PCI_REGSZ_MAX/4]; + + /* for different msi-x irq type */ + char name_rx[IFNAMSIZ + 3]; /* -rx */ + char name_tx[IFNAMSIZ + 3]; /* -tx */ + char name_other[IFNAMSIZ + 6]; /* -other */ +}; + +/* + * Maximum number of loops until we assume that a bit in the irq mask + * is stuck. Overridable with module param. + */ +static int max_interrupt_work = 4; + +/* + * Optimization can be either throuput mode or cpu mode + * + * Throughput Mode: Every tx and rx packet will generate an interrupt. + * CPU Mode: Interrupts are controlled by a timer. + */ +enum { + NV_OPTIMIZATION_MODE_THROUGHPUT, + NV_OPTIMIZATION_MODE_CPU, + NV_OPTIMIZATION_MODE_DYNAMIC +}; +static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC; + +/* + * Poll interval for timer irq + * + * This interval determines how frequent an interrupt is generated. + * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] + * Min = 0, and Max = 65535 + */ +static int poll_interval = -1; + +/* + * MSI interrupts + */ +enum { + NV_MSI_INT_DISABLED, + NV_MSI_INT_ENABLED +}; +static int msi = NV_MSI_INT_ENABLED; + +/* + * MSIX interrupts + */ +enum { + NV_MSIX_INT_DISABLED, + NV_MSIX_INT_ENABLED +}; +static int msix = NV_MSIX_INT_ENABLED; + +/* + * DMA 64bit + */ +enum { + NV_DMA_64BIT_DISABLED, + NV_DMA_64BIT_ENABLED +}; +static int dma_64bit = NV_DMA_64BIT_ENABLED; + +/* + * Debug output control for tx_timeout + */ +static bool debug_tx_timeout = false; + +/* + * Crossover Detection + * Realtek 8201 phy + some OEM boards do not work properly. + */ +enum { + NV_CROSSOVER_DETECTION_DISABLED, + NV_CROSSOVER_DETECTION_ENABLED +}; +static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED; + +/* + * Power down phy when interface is down (persists through reboot; + * older Linux and other OSes may not power it up again) + */ +static int phy_power_down; + +static inline struct fe_priv *get_nvpriv(struct net_device *dev) +{ + return netdev_priv(dev); +} + +static inline u8 __iomem *get_hwbase(struct net_device *dev) +{ + return ((struct fe_priv *)netdev_priv(dev))->base; +} + +static inline void pci_push(u8 __iomem *base) +{ + /* force out pending posted writes */ + readl(base); +} + +static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) +{ + return le32_to_cpu(prd->flaglen) + & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); +} + +static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) +{ + return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; +} + +static bool nv_optimized(struct fe_priv *np) +{ + if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) + return false; + return true; +} + +static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, + int delay, int delaymax) +{ + u8 __iomem *base = get_hwbase(dev); + + pci_push(base); + do { + udelay(delay); + delaymax -= delay; + if (delaymax < 0) + return 1; + } while ((readl(base + offset) & mask) != target); + return 0; +} + +#define NV_SETUP_RX_RING 0x01 +#define NV_SETUP_TX_RING 0x02 + +static inline u32 dma_low(dma_addr_t addr) +{ + return addr; +} + +static inline u32 dma_high(dma_addr_t addr) +{ + return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */ +} + +static void setup_hw_rings(struct net_device *dev, int rxtx_flags) +{ + struct fe_priv *np = get_nvpriv(dev); + u8 __iomem *base = get_hwbase(dev); + + if (!nv_optimized(np)) { + if (rxtx_flags & NV_SETUP_RX_RING) + writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); + if (rxtx_flags & NV_SETUP_TX_RING) + writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); + } else { + if (rxtx_flags & NV_SETUP_RX_RING) { + writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); + writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh); + } + if (rxtx_flags & NV_SETUP_TX_RING) { + writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); + writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh); + } + } +} + +static void free_rings(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + + if (!nv_optimized(np)) { + if (np->rx_ring.orig) + pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), + np->rx_ring.orig, np->ring_addr); + } else { + if (np->rx_ring.ex) + pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), + np->rx_ring.ex, np->ring_addr); + } + kfree(np->rx_skb); + kfree(np->tx_skb); +} + +static int using_multi_irqs(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + + if (!(np->msi_flags & NV_MSI_X_ENABLED) || + ((np->msi_flags & NV_MSI_X_ENABLED) && + ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) + return 0; + else + return 1; +} + +static void nv_txrx_gate(struct net_device *dev, bool gate) +{ + struct fe_priv *np = get_nvpriv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 powerstate; + + if (!np->mac_in_use && + (np->driver_data & DEV_HAS_POWER_CNTRL)) { + powerstate = readl(base + NvRegPowerState2); + if (gate) + powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS; + else + powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS; + writel(powerstate, base + NvRegPowerState2); + } +} + +static void nv_enable_irq(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + + if (!using_multi_irqs(dev)) { + if (np->msi_flags & NV_MSI_X_ENABLED) + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + else + enable_irq(np->pci_dev->irq); + } else { + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); + } +} + +static void nv_disable_irq(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + + if (!using_multi_irqs(dev)) { + if (np->msi_flags & NV_MSI_X_ENABLED) + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + else + disable_irq(np->pci_dev->irq); + } else { + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); + } +} + +/* In MSIX mode, a write to irqmask behaves as XOR */ +static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) +{ + u8 __iomem *base = get_hwbase(dev); + + writel(mask, base + NvRegIrqMask); +} + +static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) +{ + struct fe_priv *np = get_nvpriv(dev); + u8 __iomem *base = get_hwbase(dev); + + if (np->msi_flags & NV_MSI_X_ENABLED) { + writel(mask, base + NvRegIrqMask); + } else { + if (np->msi_flags & NV_MSI_ENABLED) + writel(0, base + NvRegMSIIrqMask); + writel(0, base + NvRegIrqMask); + } +} + +static void nv_napi_enable(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + + napi_enable(&np->napi); +} + +static void nv_napi_disable(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + + napi_disable(&np->napi); +} + +#define MII_READ (-1) +/* mii_rw: read/write a register on the PHY. + * + * Caller must guarantee serialization + */ +static int mii_rw(struct net_device *dev, int addr, int miireg, int value) +{ + u8 __iomem *base = get_hwbase(dev); + u32 reg; + int retval; + + writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus); + + reg = readl(base + NvRegMIIControl); + if (reg & NVREG_MIICTL_INUSE) { + writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); + udelay(NV_MIIBUSY_DELAY); + } + + reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; + if (value != MII_READ) { + writel(value, base + NvRegMIIData); + reg |= NVREG_MIICTL_WRITE; + } + writel(reg, base + NvRegMIIControl); + + if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, + NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) { + retval = -1; + } else if (value != MII_READ) { + /* it was a write operation - fewer failures are detectable */ + retval = 0; + } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { + retval = -1; + } else { + retval = readl(base + NvRegMIIData); + } + + return retval; +} + +static int phy_reset(struct net_device *dev, u32 bmcr_setup) +{ + struct fe_priv *np = netdev_priv(dev); + u32 miicontrol; + unsigned int tries = 0; + + miicontrol = BMCR_RESET | bmcr_setup; + if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) + return -1; + + /* wait for 500ms */ + msleep(500); + + /* must wait till reset is deasserted */ + while (miicontrol & BMCR_RESET) { + usleep_range(10000, 20000); + miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + /* FIXME: 100 tries seem excessive */ + if (tries++ > 100) + return -1; + } + return 0; +} + +static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np) +{ + static const struct { + int reg; + int init; + } ri[] = { + { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 }, + { PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 }, + { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 }, + { PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 }, + { PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 }, + { PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 }, + { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 }, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(ri); i++) { + if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init)) + return PHY_ERROR; + } + + return 0; +} + +static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np) +{ + u32 reg; + u8 __iomem *base = get_hwbase(dev); + u32 powerstate = readl(base + NvRegPowerState2); + + /* need to perform hw phy reset */ + powerstate |= NVREG_POWERSTATE2_PHY_RESET; + writel(powerstate, base + NvRegPowerState2); + msleep(25); + + powerstate &= ~NVREG_POWERSTATE2_PHY_RESET; + writel(powerstate, base + NvRegPowerState2); + msleep(25); + + reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); + reg |= PHY_REALTEK_INIT9; + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) + return PHY_ERROR; + if (mii_rw(dev, np->phyaddr, + PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) + return PHY_ERROR; + reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ); + if (!(reg & PHY_REALTEK_INIT11)) { + reg |= PHY_REALTEK_INIT11; + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) + return PHY_ERROR; + } + if (mii_rw(dev, np->phyaddr, + PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) + return PHY_ERROR; + + return 0; +} + +static int init_realtek_8201(struct net_device *dev, struct fe_priv *np) +{ + u32 phy_reserved; + + if (np->driver_data & DEV_NEED_PHY_INIT_FIX) { + phy_reserved = mii_rw(dev, np->phyaddr, + PHY_REALTEK_INIT_REG6, MII_READ); + phy_reserved |= PHY_REALTEK_INIT7; + if (mii_rw(dev, np->phyaddr, + PHY_REALTEK_INIT_REG6, phy_reserved)) + return PHY_ERROR; + } + + return 0; +} + +static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np) +{ + u32 phy_reserved; + + if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { + if (mii_rw(dev, np->phyaddr, + PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) + return PHY_ERROR; + phy_reserved = mii_rw(dev, np->phyaddr, + PHY_REALTEK_INIT_REG2, MII_READ); + phy_reserved &= ~PHY_REALTEK_INIT_MSK1; + phy_reserved |= PHY_REALTEK_INIT3; + if (mii_rw(dev, np->phyaddr, + PHY_REALTEK_INIT_REG2, phy_reserved)) + return PHY_ERROR; + if (mii_rw(dev, np->phyaddr, + PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) + return PHY_ERROR; + } + + return 0; +} + +static int init_cicada(struct net_device *dev, struct fe_priv *np, + u32 phyinterface) +{ + u32 phy_reserved; + + if (phyinterface & PHY_RGMII) { + phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); + phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); + phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); + if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) + return PHY_ERROR; + phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); + phy_reserved |= PHY_CICADA_INIT5; + if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) + return PHY_ERROR; + } + phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); + phy_reserved |= PHY_CICADA_INIT6; + if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) + return PHY_ERROR; + + return 0; +} + +static int init_vitesse(struct net_device *dev, struct fe_priv *np) +{ + u32 phy_reserved; + + if (mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) + return PHY_ERROR; + if (mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) + return PHY_ERROR; + phy_reserved = mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG4, MII_READ); + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) + return PHY_ERROR; + phy_reserved = mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG3, MII_READ); + phy_reserved &= ~PHY_VITESSE_INIT_MSK1; + phy_reserved |= PHY_VITESSE_INIT3; + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) + return PHY_ERROR; + if (mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) + return PHY_ERROR; + if (mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) + return PHY_ERROR; + phy_reserved = mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG4, MII_READ); + phy_reserved &= ~PHY_VITESSE_INIT_MSK1; + phy_reserved |= PHY_VITESSE_INIT3; + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) + return PHY_ERROR; + phy_reserved = mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG3, MII_READ); + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) + return PHY_ERROR; + if (mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) + return PHY_ERROR; + if (mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) + return PHY_ERROR; + phy_reserved = mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG4, MII_READ); + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) + return PHY_ERROR; + phy_reserved = mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG3, MII_READ); + phy_reserved &= ~PHY_VITESSE_INIT_MSK2; + phy_reserved |= PHY_VITESSE_INIT8; + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) + return PHY_ERROR; + if (mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) + return PHY_ERROR; + if (mii_rw(dev, np->phyaddr, + PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) + return PHY_ERROR; + + return 0; +} + +static int phy_init(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 phyinterface; + u32 mii_status, mii_control, mii_control_1000, reg; + + /* phy errata for E3016 phy */ + if (np->phy_model == PHY_MODEL_MARVELL_E3016) { + reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); + reg &= ~PHY_MARVELL_E3016_INITMASK; + if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { + netdev_info(dev, "%s: phy write to errata reg failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } + if (np->phy_oui == PHY_OUI_REALTEK) { + if (np->phy_model == PHY_MODEL_REALTEK_8211 && + np->phy_rev == PHY_REV_REALTEK_8211B) { + if (init_realtek_8211b(dev, np)) { + netdev_info(dev, "%s: phy init failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } else if (np->phy_model == PHY_MODEL_REALTEK_8211 && + np->phy_rev == PHY_REV_REALTEK_8211C) { + if (init_realtek_8211c(dev, np)) { + netdev_info(dev, "%s: phy init failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } else if (np->phy_model == PHY_MODEL_REALTEK_8201) { + if (init_realtek_8201(dev, np)) { + netdev_info(dev, "%s: phy init failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } + } + + /* set advertise register */ + reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); + reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL | + ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { + netdev_info(dev, "%s: phy write to advertise failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + + /* get phy interface type */ + phyinterface = readl(base + NvRegPhyInterface); + + /* see if gigabit phy */ + mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); + if (mii_status & PHY_GIGABIT) { + np->gigabit = PHY_GIGABIT; + mii_control_1000 = mii_rw(dev, np->phyaddr, + MII_CTRL1000, MII_READ); + mii_control_1000 &= ~ADVERTISE_1000HALF; + if (phyinterface & PHY_RGMII) + mii_control_1000 |= ADVERTISE_1000FULL; + else + mii_control_1000 &= ~ADVERTISE_1000FULL; + + if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { + netdev_info(dev, "%s: phy init failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } else + np->gigabit = 0; + + mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + mii_control |= BMCR_ANENABLE; + + if (np->phy_oui == PHY_OUI_REALTEK && + np->phy_model == PHY_MODEL_REALTEK_8211 && + np->phy_rev == PHY_REV_REALTEK_8211C) { + /* start autoneg since we already performed hw reset above */ + mii_control |= BMCR_ANRESTART; + if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { + netdev_info(dev, "%s: phy init failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } else { + /* reset the phy + * (certain phys need bmcr to be setup with reset) + */ + if (phy_reset(dev, mii_control)) { + netdev_info(dev, "%s: phy reset failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } + + /* phy vendor specific configuration */ + if (np->phy_oui == PHY_OUI_CICADA) { + if (init_cicada(dev, np, phyinterface)) { + netdev_info(dev, "%s: phy init failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } else if (np->phy_oui == PHY_OUI_VITESSE) { + if (init_vitesse(dev, np)) { + netdev_info(dev, "%s: phy init failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } else if (np->phy_oui == PHY_OUI_REALTEK) { + if (np->phy_model == PHY_MODEL_REALTEK_8211 && + np->phy_rev == PHY_REV_REALTEK_8211B) { + /* reset could have cleared these out, set them back */ + if (init_realtek_8211b(dev, np)) { + netdev_info(dev, "%s: phy init failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } else if (np->phy_model == PHY_MODEL_REALTEK_8201) { + if (init_realtek_8201(dev, np) || + init_realtek_8201_cross(dev, np)) { + netdev_info(dev, "%s: phy init failed\n", + pci_name(np->pci_dev)); + return PHY_ERROR; + } + } + } + + /* some phys clear out pause advertisement on reset, set it back */ + mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); + + /* restart auto negotiation, power down phy */ + mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); + if (phy_power_down) + mii_control |= BMCR_PDOWN; + if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) + return PHY_ERROR; + + return 0; +} + +static void nv_start_rx(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 rx_ctrl = readl(base + NvRegReceiverControl); + + /* Already running? Stop it. */ + if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { + rx_ctrl &= ~NVREG_RCVCTL_START; + writel(rx_ctrl, base + NvRegReceiverControl); + pci_push(base); + } + writel(np->linkspeed, base + NvRegLinkSpeed); + pci_push(base); + rx_ctrl |= NVREG_RCVCTL_START; + if (np->mac_in_use) + rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; + writel(rx_ctrl, base + NvRegReceiverControl); + pci_push(base); +} + +static void nv_stop_rx(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 rx_ctrl = readl(base + NvRegReceiverControl); + + if (!np->mac_in_use) + rx_ctrl &= ~NVREG_RCVCTL_START; + else + rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; + writel(rx_ctrl, base + NvRegReceiverControl); + if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, + NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX)) + netdev_info(dev, "%s: ReceiverStatus remained busy\n", + __func__); + + udelay(NV_RXSTOP_DELAY2); + if (!np->mac_in_use) + writel(0, base + NvRegLinkSpeed); +} + +static void nv_start_tx(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 tx_ctrl = readl(base + NvRegTransmitterControl); + + tx_ctrl |= NVREG_XMITCTL_START; + if (np->mac_in_use) + tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; + writel(tx_ctrl, base + NvRegTransmitterControl); + pci_push(base); +} + +static void nv_stop_tx(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 tx_ctrl = readl(base + NvRegTransmitterControl); + + if (!np->mac_in_use) + tx_ctrl &= ~NVREG_XMITCTL_START; + else + tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; + writel(tx_ctrl, base + NvRegTransmitterControl); + if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, + NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX)) + netdev_info(dev, "%s: TransmitterStatus remained busy\n", + __func__); + + udelay(NV_TXSTOP_DELAY2); + if (!np->mac_in_use) + writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, + base + NvRegTransmitPoll); +} + +static void nv_start_rxtx(struct net_device *dev) +{ + nv_start_rx(dev); + nv_start_tx(dev); +} + +static void nv_stop_rxtx(struct net_device *dev) +{ + nv_stop_rx(dev); + nv_stop_tx(dev); +} + +static void nv_txrx_reset(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + + writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); + pci_push(base); + udelay(NV_TXRX_RESET_DELAY); + writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); + pci_push(base); +} + +static void nv_mac_reset(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 temp1, temp2, temp3; + + writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); + pci_push(base); + + /* save registers since they will be cleared on reset */ + temp1 = readl(base + NvRegMacAddrA); + temp2 = readl(base + NvRegMacAddrB); + temp3 = readl(base + NvRegTransmitPoll); + + writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); + pci_push(base); + udelay(NV_MAC_RESET_DELAY); + writel(0, base + NvRegMacReset); + pci_push(base); + udelay(NV_MAC_RESET_DELAY); + + /* restore saved registers */ + writel(temp1, base + NvRegMacAddrA); + writel(temp2, base + NvRegMacAddrB); + writel(temp3, base + NvRegTransmitPoll); + + writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); + pci_push(base); +} + +/* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */ +static void nv_update_stats(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + + /* If it happens that this is run in top-half context, then + * replace the spin_lock of hwstats_lock with + * spin_lock_irqsave() in calling functions. */ + WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half"); + assert_spin_locked(&np->hwstats_lock); + + /* query hardware */ + np->estats.tx_bytes += readl(base + NvRegTxCnt); + np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); + np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); + np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); + np->estats.tx_late_collision += readl(base + NvRegTxLateCol); + np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); + np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); + np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); + np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); + np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); + np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); + np->estats.rx_late_collision += readl(base + NvRegRxLateCol); + np->estats.rx_runt += readl(base + NvRegRxRunt); + np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); + np->estats.rx_over_errors += readl(base + NvRegRxOverflow); + np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); + np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); + np->estats.rx_length_error += readl(base + NvRegRxLenErr); + np->estats.rx_unicast += readl(base + NvRegRxUnicast); + np->estats.rx_multicast += readl(base + NvRegRxMulticast); + np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); + np->estats.rx_packets = + np->estats.rx_unicast + + np->estats.rx_multicast + + np->estats.rx_broadcast; + np->estats.rx_errors_total = + np->estats.rx_crc_errors + + np->estats.rx_over_errors + + np->estats.rx_frame_error + + (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + + np->estats.rx_late_collision + + np->estats.rx_runt + + np->estats.rx_frame_too_long; + np->estats.tx_errors_total = + np->estats.tx_late_collision + + np->estats.tx_fifo_errors + + np->estats.tx_carrier_errors + + np->estats.tx_excess_deferral + + np->estats.tx_retry_error; + + if (np->driver_data & DEV_HAS_STATISTICS_V2) { + np->estats.tx_deferral += readl(base + NvRegTxDef); + np->estats.tx_packets += readl(base + NvRegTxFrame); + np->estats.rx_bytes += readl(base + NvRegRxCnt); + np->estats.tx_pause += readl(base + NvRegTxPause); + np->estats.rx_pause += readl(base + NvRegRxPause); + np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); + np->estats.rx_errors_total += np->estats.rx_drop_frame; + } + + if (np->driver_data & DEV_HAS_STATISTICS_V3) { + np->estats.tx_unicast += readl(base + NvRegTxUnicast); + np->estats.tx_multicast += readl(base + NvRegTxMulticast); + np->estats.tx_broadcast += readl(base + NvRegTxBroadcast); + } +} + +/* + * nv_get_stats64: dev->ndo_get_stats64 function + * Get latest stats value from the nic. + * Called with read_lock(&dev_base_lock) held for read - + * only synchronized against unregister_netdevice. + */ +static struct rtnl_link_stats64* +nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) + __acquires(&netdev_priv(dev)->hwstats_lock) + __releases(&netdev_priv(dev)->hwstats_lock) +{ + struct fe_priv *np = netdev_priv(dev); + unsigned int syncp_start; + + /* + * Note: because HW stats are not always available and for + * consistency reasons, the following ifconfig stats are + * managed by software: rx_bytes, tx_bytes, rx_packets and + * tx_packets. The related hardware stats reported by ethtool + * should be equivalent to these ifconfig stats, with 4 + * additional bytes per packet (Ethernet FCS CRC), except for + * tx_packets when TSO kicks in. + */ + + /* software stats */ + do { + syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); + storage->rx_packets = np->stat_rx_packets; + storage->rx_bytes = np->stat_rx_bytes; + storage->rx_dropped = np->stat_rx_dropped; + storage->rx_missed_errors = np->stat_rx_missed_errors; + } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); + + do { + syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); + storage->tx_packets = np->stat_tx_packets; + storage->tx_bytes = np->stat_tx_bytes; + storage->tx_dropped = np->stat_tx_dropped; + } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); + + /* If the nic supports hw counters then retrieve latest values */ + if (np->driver_data & DEV_HAS_STATISTICS_V123) { + spin_lock_bh(&np->hwstats_lock); + + nv_update_stats(dev); + + /* generic stats */ + storage->rx_errors = np->estats.rx_errors_total; + storage->tx_errors = np->estats.tx_errors_total; + + /* meaningful only when NIC supports stats v3 */ + storage->multicast = np->estats.rx_multicast; + + /* detailed rx_errors */ + storage->rx_length_errors = np->estats.rx_length_error; + storage->rx_over_errors = np->estats.rx_over_errors; + storage->rx_crc_errors = np->estats.rx_crc_errors; + storage->rx_frame_errors = np->estats.rx_frame_align_error; + storage->rx_fifo_errors = np->estats.rx_drop_frame; + + /* detailed tx_errors */ + storage->tx_carrier_errors = np->estats.tx_carrier_errors; + storage->tx_fifo_errors = np->estats.tx_fifo_errors; + + spin_unlock_bh(&np->hwstats_lock); + } + + return storage; +} + +/* + * nv_alloc_rx: fill rx ring entries. + * Return 1 if the allocations for the skbs failed and the + * rx engine is without Available descriptors + */ +static int nv_alloc_rx(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + struct ring_desc *less_rx; + + less_rx = np->get_rx.orig; + if (less_rx-- == np->first_rx.orig) + less_rx = np->last_rx.orig; + + while (np->put_rx.orig != less_rx) { + struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); + if (skb) { + np->put_rx_ctx->skb = skb; + np->put_rx_ctx->dma = pci_map_single(np->pci_dev, + skb->data, + skb_tailroom(skb), + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(np->pci_dev, + np->put_rx_ctx->dma)) { + kfree_skb(skb); + goto packet_dropped; + } + np->put_rx_ctx->dma_len = skb_tailroom(skb); + np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); + wmb(); + np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); + if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) + np->put_rx.orig = np->first_rx.orig; + if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) + np->put_rx_ctx = np->first_rx_ctx; + } else { +packet_dropped: + u64_stats_update_begin(&np->swstats_rx_syncp); + np->stat_rx_dropped++; + u64_stats_update_end(&np->swstats_rx_syncp); + return 1; + } + } + return 0; +} + +static int nv_alloc_rx_optimized(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + struct ring_desc_ex *less_rx; + + less_rx = np->get_rx.ex; + if (less_rx-- == np->first_rx.ex) + less_rx = np->last_rx.ex; + + while (np->put_rx.ex != less_rx) { + struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); + if (skb) { + np->put_rx_ctx->skb = skb; + np->put_rx_ctx->dma = pci_map_single(np->pci_dev, + skb->data, + skb_tailroom(skb), + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(np->pci_dev, + np->put_rx_ctx->dma)) { + kfree_skb(skb); + goto packet_dropped; + } + np->put_rx_ctx->dma_len = skb_tailroom(skb); + np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); + np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); + wmb(); + np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); + if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) + np->put_rx.ex = np->first_rx.ex; + if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) + np->put_rx_ctx = np->first_rx_ctx; + } else { +packet_dropped: + u64_stats_update_begin(&np->swstats_rx_syncp); + np->stat_rx_dropped++; + u64_stats_update_end(&np->swstats_rx_syncp); + return 1; + } + } + return 0; +} + +/* If rx bufs are exhausted called after 50ms to attempt to refresh */ +static void nv_do_rx_refill(unsigned long data) +{ + struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); + + /* Just reschedule NAPI rx processing */ + napi_schedule(&np->napi); +} + +static void nv_init_rx(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + int i; + + np->get_rx = np->put_rx = np->first_rx = np->rx_ring; + + if (!nv_optimized(np)) + np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; + else + np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; + np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb; + np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; + + for (i = 0; i < np->rx_ring_size; i++) { + if (!nv_optimized(np)) { + np->rx_ring.orig[i].flaglen = 0; + np->rx_ring.orig[i].buf = 0; + } else { + np->rx_ring.ex[i].flaglen = 0; + np->rx_ring.ex[i].txvlan = 0; + np->rx_ring.ex[i].bufhigh = 0; + np->rx_ring.ex[i].buflow = 0; + } + np->rx_skb[i].skb = NULL; + np->rx_skb[i].dma = 0; + } +} + +static void nv_init_tx(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + int i; + + np->get_tx = np->put_tx = np->first_tx = np->tx_ring; + + if (!nv_optimized(np)) + np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; + else + np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; + np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; + np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; + netdev_reset_queue(np->dev); + np->tx_pkts_in_progress = 0; + np->tx_change_owner = NULL; + np->tx_end_flip = NULL; + np->tx_stop = 0; + + for (i = 0; i < np->tx_ring_size; i++) { + if (!nv_optimized(np)) { + np->tx_ring.orig[i].flaglen = 0; + np->tx_ring.orig[i].buf = 0; + } else { + np->tx_ring.ex[i].flaglen = 0; + np->tx_ring.ex[i].txvlan = 0; + np->tx_ring.ex[i].bufhigh = 0; + np->tx_ring.ex[i].buflow = 0; + } + np->tx_skb[i].skb = NULL; + np->tx_skb[i].dma = 0; + np->tx_skb[i].dma_len = 0; + np->tx_skb[i].dma_single = 0; + np->tx_skb[i].first_tx_desc = NULL; + np->tx_skb[i].next_tx_ctx = NULL; + } +} + +static int nv_init_ring(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + + nv_init_tx(dev); + nv_init_rx(dev); + + if (!nv_optimized(np)) + return nv_alloc_rx(dev); + else + return nv_alloc_rx_optimized(dev); +} + +static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) +{ + if (tx_skb->dma) { + if (tx_skb->dma_single) + pci_unmap_single(np->pci_dev, tx_skb->dma, + tx_skb->dma_len, + PCI_DMA_TODEVICE); + else + pci_unmap_page(np->pci_dev, tx_skb->dma, + tx_skb->dma_len, + PCI_DMA_TODEVICE); + tx_skb->dma = 0; + } +} + +static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) +{ + nv_unmap_txskb(np, tx_skb); + if (tx_skb->skb) { + dev_kfree_skb_any(tx_skb->skb); + tx_skb->skb = NULL; + return 1; + } + return 0; +} + +static void nv_drain_tx(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + unsigned int i; + + for (i = 0; i < np->tx_ring_size; i++) { + if (!nv_optimized(np)) { + np->tx_ring.orig[i].flaglen = 0; + np->tx_ring.orig[i].buf = 0; + } else { + np->tx_ring.ex[i].flaglen = 0; + np->tx_ring.ex[i].txvlan = 0; + np->tx_ring.ex[i].bufhigh = 0; + np->tx_ring.ex[i].buflow = 0; + } + if (nv_release_txskb(np, &np->tx_skb[i])) { + u64_stats_update_begin(&np->swstats_tx_syncp); + np->stat_tx_dropped++; + u64_stats_update_end(&np->swstats_tx_syncp); + } + np->tx_skb[i].dma = 0; + np->tx_skb[i].dma_len = 0; + np->tx_skb[i].dma_single = 0; + np->tx_skb[i].first_tx_desc = NULL; + np->tx_skb[i].next_tx_ctx = NULL; + } + np->tx_pkts_in_progress = 0; + np->tx_change_owner = NULL; + np->tx_end_flip = NULL; +} + +static void nv_drain_rx(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + int i; + + for (i = 0; i < np->rx_ring_size; i++) { + if (!nv_optimized(np)) { + np->rx_ring.orig[i].flaglen = 0; + np->rx_ring.orig[i].buf = 0; + } else { + np->rx_ring.ex[i].flaglen = 0; + np->rx_ring.ex[i].txvlan = 0; + np->rx_ring.ex[i].bufhigh = 0; + np->rx_ring.ex[i].buflow = 0; + } + wmb(); + if (np->rx_skb[i].skb) { + pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, + (skb_end_pointer(np->rx_skb[i].skb) - + np->rx_skb[i].skb->data), + PCI_DMA_FROMDEVICE); + dev_kfree_skb(np->rx_skb[i].skb); + np->rx_skb[i].skb = NULL; + } + } +} + +static void nv_drain_rxtx(struct net_device *dev) +{ + nv_drain_tx(dev); + nv_drain_rx(dev); +} + +static inline u32 nv_get_empty_tx_slots(struct fe_priv *np) +{ + return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size)); +} + +static void nv_legacybackoff_reseed(struct net_device *dev) +{ + u8 __iomem *base = get_hwbase(dev); + u32 reg; + u32 low; + int tx_status = 0; + + reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK; + get_random_bytes(&low, sizeof(low)); + reg |= low & NVREG_SLOTTIME_MASK; + + /* Need to stop tx before change takes effect. + * Caller has already gained np->lock. + */ + tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START; + if (tx_status) + nv_stop_tx(dev); + nv_stop_rx(dev); + writel(reg, base + NvRegSlotTime); + if (tx_status) + nv_start_tx(dev); + nv_start_rx(dev); +} + +/* Gear Backoff Seeds */ +#define BACKOFF_SEEDSET_ROWS 8 +#define BACKOFF_SEEDSET_LFSRS 15 + +/* Known Good seed sets */ +static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { + {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, + {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974}, + {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, + {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974}, + {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984}, + {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984}, + {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84}, + {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} }; + +static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { + {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, + {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, + {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397}, + {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, + {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, + {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, + {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, + {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} }; + +static void nv_gear_backoff_reseed(struct net_device *dev) +{ + u8 __iomem *base = get_hwbase(dev); + u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed; + u32 temp, seedset, combinedSeed; + int i; + + /* Setup seed for free running LFSR */ + /* We are going to read the time stamp counter 3 times + and swizzle bits around to increase randomness */ + get_random_bytes(&miniseed1, sizeof(miniseed1)); + miniseed1 &= 0x0fff; + if (miniseed1 == 0) + miniseed1 = 0xabc; + + get_random_bytes(&miniseed2, sizeof(miniseed2)); + miniseed2 &= 0x0fff; + if (miniseed2 == 0) + miniseed2 = 0xabc; + miniseed2_reversed = + ((miniseed2 & 0xF00) >> 8) | + (miniseed2 & 0x0F0) | + ((miniseed2 & 0x00F) << 8); + + get_random_bytes(&miniseed3, sizeof(miniseed3)); + miniseed3 &= 0x0fff; + if (miniseed3 == 0) + miniseed3 = 0xabc; + miniseed3_reversed = + ((miniseed3 & 0xF00) >> 8) | + (miniseed3 & 0x0F0) | + ((miniseed3 & 0x00F) << 8); + + combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) | + (miniseed2 ^ miniseed3_reversed); + + /* Seeds can not be zero */ + if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0) + combinedSeed |= 0x08; + if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0) + combinedSeed |= 0x8000; + + /* No need to disable tx here */ + temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT); + temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK; + temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR; + writel(temp, base + NvRegBackOffControl); + + /* Setup seeds for all gear LFSRs. */ + get_random_bytes(&seedset, sizeof(seedset)); + seedset = seedset % BACKOFF_SEEDSET_ROWS; + for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) { + temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT); + temp |= main_seedset[seedset][i-1] & 0x3ff; + temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR); + writel(temp, base + NvRegBackOffControl); + } +} + +/* + * nv_start_xmit: dev->hard_start_xmit function + * Called with netif_tx_lock held. + */ +static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u32 tx_flags = 0; + u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); + unsigned int fragments = skb_shinfo(skb)->nr_frags; + unsigned int i; + u32 offset = 0; + u32 bcnt; + u32 size = skb_headlen(skb); + u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); + u32 empty_slots; + struct ring_desc *put_tx; + struct ring_desc *start_tx; + struct ring_desc *prev_tx; + struct nv_skb_map *prev_tx_ctx; + struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL; + unsigned long flags; + + /* add fragments to entries count */ + for (i = 0; i < fragments; i++) { + u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); + + entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) + + ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); + } + + spin_lock_irqsave(&np->lock, flags); + empty_slots = nv_get_empty_tx_slots(np); + if (unlikely(empty_slots <= entries)) { + netif_stop_queue(dev); + np->tx_stop = 1; + spin_unlock_irqrestore(&np->lock, flags); + return NETDEV_TX_BUSY; + } + spin_unlock_irqrestore(&np->lock, flags); + + start_tx = put_tx = np->put_tx.orig; + + /* setup the header buffer */ + do { + prev_tx = put_tx; + prev_tx_ctx = np->put_tx_ctx; + bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; + np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(np->pci_dev, + np->put_tx_ctx->dma)) { + /* on DMA mapping error - drop the packet */ + dev_kfree_skb_any(skb); + u64_stats_update_begin(&np->swstats_tx_syncp); + np->stat_tx_dropped++; + u64_stats_update_end(&np->swstats_tx_syncp); + return NETDEV_TX_OK; + } + np->put_tx_ctx->dma_len = bcnt; + np->put_tx_ctx->dma_single = 1; + put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); + put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); + + tx_flags = np->tx_flags; + offset += bcnt; + size -= bcnt; + if (unlikely(put_tx++ == np->last_tx.orig)) + put_tx = np->first_tx.orig; + if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) + np->put_tx_ctx = np->first_tx_ctx; + } while (size); + + /* setup the fragments */ + for (i = 0; i < fragments; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + u32 frag_size = skb_frag_size(frag); + offset = 0; + + do { + prev_tx = put_tx; + prev_tx_ctx = np->put_tx_ctx; + if (!start_tx_ctx) + start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; + + bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; + np->put_tx_ctx->dma = skb_frag_dma_map( + &np->pci_dev->dev, + frag, offset, + bcnt, + DMA_TO_DEVICE); + if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) { + + /* Unwind the mapped fragments */ + do { + nv_unmap_txskb(np, start_tx_ctx); + if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) + tmp_tx_ctx = np->first_tx_ctx; + } while (tmp_tx_ctx != np->put_tx_ctx); + dev_kfree_skb_any(skb); + np->put_tx_ctx = start_tx_ctx; + u64_stats_update_begin(&np->swstats_tx_syncp); + np->stat_tx_dropped++; + u64_stats_update_end(&np->swstats_tx_syncp); + return NETDEV_TX_OK; + } + + np->put_tx_ctx->dma_len = bcnt; + np->put_tx_ctx->dma_single = 0; + put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); + put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); + + offset += bcnt; + frag_size -= bcnt; + if (unlikely(put_tx++ == np->last_tx.orig)) + put_tx = np->first_tx.orig; + if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) + np->put_tx_ctx = np->first_tx_ctx; + } while (frag_size); + } + + /* set last fragment flag */ + prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); + + /* save skb in this slot's context area */ + prev_tx_ctx->skb = skb; + + if (skb_is_gso(skb)) + tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); + else + tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? + NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; + + spin_lock_irqsave(&np->lock, flags); + + /* set tx flags */ + start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); + + netdev_sent_queue(np->dev, skb->len); + + skb_tx_timestamp(skb); + + np->put_tx.orig = put_tx; + + spin_unlock_irqrestore(&np->lock, flags); + + writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); + return NETDEV_TX_OK; +} + +static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, + struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u32 tx_flags = 0; + u32 tx_flags_extra; + unsigned int fragments = skb_shinfo(skb)->nr_frags; + unsigned int i; + u32 offset = 0; + u32 bcnt; + u32 size = skb_headlen(skb); + u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); + u32 empty_slots; + struct ring_desc_ex *put_tx; + struct ring_desc_ex *start_tx; + struct ring_desc_ex *prev_tx; + struct nv_skb_map *prev_tx_ctx; + struct nv_skb_map *start_tx_ctx = NULL; + struct nv_skb_map *tmp_tx_ctx = NULL; + unsigned long flags; + + /* add fragments to entries count */ + for (i = 0; i < fragments; i++) { + u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); + + entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) + + ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); + } + + spin_lock_irqsave(&np->lock, flags); + empty_slots = nv_get_empty_tx_slots(np); + if (unlikely(empty_slots <= entries)) { + netif_stop_queue(dev); + np->tx_stop = 1; + spin_unlock_irqrestore(&np->lock, flags); + return NETDEV_TX_BUSY; + } + spin_unlock_irqrestore(&np->lock, flags); + + start_tx = put_tx = np->put_tx.ex; + start_tx_ctx = np->put_tx_ctx; + + /* setup the header buffer */ + do { + prev_tx = put_tx; + prev_tx_ctx = np->put_tx_ctx; + bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; + np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(np->pci_dev, + np->put_tx_ctx->dma)) { + /* on DMA mapping error - drop the packet */ + dev_kfree_skb_any(skb); + u64_stats_update_begin(&np->swstats_tx_syncp); + np->stat_tx_dropped++; + u64_stats_update_end(&np->swstats_tx_syncp); + return NETDEV_TX_OK; + } + np->put_tx_ctx->dma_len = bcnt; + np->put_tx_ctx->dma_single = 1; + put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); + put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); + put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); + + tx_flags = NV_TX2_VALID; + offset += bcnt; + size -= bcnt; + if (unlikely(put_tx++ == np->last_tx.ex)) + put_tx = np->first_tx.ex; + if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) + np->put_tx_ctx = np->first_tx_ctx; + } while (size); + + /* setup the fragments */ + for (i = 0; i < fragments; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + u32 frag_size = skb_frag_size(frag); + offset = 0; + + do { + prev_tx = put_tx; + prev_tx_ctx = np->put_tx_ctx; + bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; + if (!start_tx_ctx) + start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; + np->put_tx_ctx->dma = skb_frag_dma_map( + &np->pci_dev->dev, + frag, offset, + bcnt, + DMA_TO_DEVICE); + + if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) { + + /* Unwind the mapped fragments */ + do { + nv_unmap_txskb(np, start_tx_ctx); + if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) + tmp_tx_ctx = np->first_tx_ctx; + } while (tmp_tx_ctx != np->put_tx_ctx); + dev_kfree_skb_any(skb); + np->put_tx_ctx = start_tx_ctx; + u64_stats_update_begin(&np->swstats_tx_syncp); + np->stat_tx_dropped++; + u64_stats_update_end(&np->swstats_tx_syncp); + return NETDEV_TX_OK; + } + np->put_tx_ctx->dma_len = bcnt; + np->put_tx_ctx->dma_single = 0; + put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); + put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); + put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); + + offset += bcnt; + frag_size -= bcnt; + if (unlikely(put_tx++ == np->last_tx.ex)) + put_tx = np->first_tx.ex; + if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) + np->put_tx_ctx = np->first_tx_ctx; + } while (frag_size); + } + + /* set last fragment flag */ + prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); + + /* save skb in this slot's context area */ + prev_tx_ctx->skb = skb; + + if (skb_is_gso(skb)) + tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); + else + tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? + NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; + + /* vlan tag */ + if (skb_vlan_tag_present(skb)) + start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | + skb_vlan_tag_get(skb)); + else + start_tx->txvlan = 0; + + spin_lock_irqsave(&np->lock, flags); + + if (np->tx_limit) { + /* Limit the number of outstanding tx. Setup all fragments, but + * do not set the VALID bit on the first descriptor. Save a pointer + * to that descriptor and also for next skb_map element. + */ + + if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) { + if (!np->tx_change_owner) + np->tx_change_owner = start_tx_ctx; + + /* remove VALID bit */ + tx_flags &= ~NV_TX2_VALID; + start_tx_ctx->first_tx_desc = start_tx; + start_tx_ctx->next_tx_ctx = np->put_tx_ctx; + np->tx_end_flip = np->put_tx_ctx; + } else { + np->tx_pkts_in_progress++; + } + } + + /* set tx flags */ + start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); + + netdev_sent_queue(np->dev, skb->len); + + skb_tx_timestamp(skb); + + np->put_tx.ex = put_tx; + + spin_unlock_irqrestore(&np->lock, flags); + + writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); + return NETDEV_TX_OK; +} + +static inline void nv_tx_flip_ownership(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + + np->tx_pkts_in_progress--; + if (np->tx_change_owner) { + np->tx_change_owner->first_tx_desc->flaglen |= + cpu_to_le32(NV_TX2_VALID); + np->tx_pkts_in_progress++; + + np->tx_change_owner = np->tx_change_owner->next_tx_ctx; + if (np->tx_change_owner == np->tx_end_flip) + np->tx_change_owner = NULL; + + writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); + } +} + +/* + * nv_tx_done: check for completed packets, release the skbs. + * + * Caller must own np->lock. + */ +static int nv_tx_done(struct net_device *dev, int limit) +{ + struct fe_priv *np = netdev_priv(dev); + u32 flags; + int tx_work = 0; + struct ring_desc *orig_get_tx = np->get_tx.orig; + unsigned int bytes_compl = 0; + + while ((np->get_tx.orig != np->put_tx.orig) && + !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && + (tx_work < limit)) { + + nv_unmap_txskb(np, np->get_tx_ctx); + + if (np->desc_ver == DESC_VER_1) { + if (flags & NV_TX_LASTPACKET) { + if (flags & NV_TX_ERROR) { + if ((flags & NV_TX_RETRYERROR) + && !(flags & NV_TX_RETRYCOUNT_MASK)) + nv_legacybackoff_reseed(dev); + } else { + u64_stats_update_begin(&np->swstats_tx_syncp); + np->stat_tx_packets++; + np->stat_tx_bytes += np->get_tx_ctx->skb->len; + u64_stats_update_end(&np->swstats_tx_syncp); + } + bytes_compl += np->get_tx_ctx->skb->len; + dev_kfree_skb_any(np->get_tx_ctx->skb); + np->get_tx_ctx->skb = NULL; + tx_work++; + } + } else { + if (flags & NV_TX2_LASTPACKET) { + if (flags & NV_TX2_ERROR) { + if ((flags & NV_TX2_RETRYERROR) + && !(flags & NV_TX2_RETRYCOUNT_MASK)) + nv_legacybackoff_reseed(dev); + } else { + u64_stats_update_begin(&np->swstats_tx_syncp); + np->stat_tx_packets++; + np->stat_tx_bytes += np->get_tx_ctx->skb->len; + u64_stats_update_end(&np->swstats_tx_syncp); + } + bytes_compl += np->get_tx_ctx->skb->len; + dev_kfree_skb_any(np->get_tx_ctx->skb); + np->get_tx_ctx->skb = NULL; + tx_work++; + } + } + if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) + np->get_tx.orig = np->first_tx.orig; + if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) + np->get_tx_ctx = np->first_tx_ctx; + } + + netdev_completed_queue(np->dev, tx_work, bytes_compl); + + if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { + np->tx_stop = 0; + netif_wake_queue(dev); + } + return tx_work; +} + +static int nv_tx_done_optimized(struct net_device *dev, int limit) +{ + struct fe_priv *np = netdev_priv(dev); + u32 flags; + int tx_work = 0; + struct ring_desc_ex *orig_get_tx = np->get_tx.ex; + unsigned long bytes_cleaned = 0; + + while ((np->get_tx.ex != np->put_tx.ex) && + !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && + (tx_work < limit)) { + + nv_unmap_txskb(np, np->get_tx_ctx); + + if (flags & NV_TX2_LASTPACKET) { + if (flags & NV_TX2_ERROR) { + if ((flags & NV_TX2_RETRYERROR) + && !(flags & NV_TX2_RETRYCOUNT_MASK)) { + if (np->driver_data & DEV_HAS_GEAR_MODE) + nv_gear_backoff_reseed(dev); + else + nv_legacybackoff_reseed(dev); + } + } else { + u64_stats_update_begin(&np->swstats_tx_syncp); + np->stat_tx_packets++; + np->stat_tx_bytes += np->get_tx_ctx->skb->len; + u64_stats_update_end(&np->swstats_tx_syncp); + } + + bytes_cleaned += np->get_tx_ctx->skb->len; + dev_kfree_skb_any(np->get_tx_ctx->skb); + np->get_tx_ctx->skb = NULL; + tx_work++; + + if (np->tx_limit) + nv_tx_flip_ownership(dev); + } + + if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) + np->get_tx.ex = np->first_tx.ex; + if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) + np->get_tx_ctx = np->first_tx_ctx; + } + + netdev_completed_queue(np->dev, tx_work, bytes_cleaned); + + if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { + np->tx_stop = 0; + netif_wake_queue(dev); + } + return tx_work; +} + +/* + * nv_tx_timeout: dev->tx_timeout function + * Called with netif_tx_lock held. + */ +static void nv_tx_timeout(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 status; + union ring_type put_tx; + int saved_tx_limit; + + if (np->msi_flags & NV_MSI_X_ENABLED) + status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; + else + status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; + + netdev_warn(dev, "Got tx_timeout. irq status: %08x\n", status); + + if (unlikely(debug_tx_timeout)) { + int i; + + netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr); + netdev_info(dev, "Dumping tx registers\n"); + for (i = 0; i <= np->register_size; i += 32) { + netdev_info(dev, + "%3x: %08x %08x %08x %08x " + "%08x %08x %08x %08x\n", + i, + readl(base + i + 0), readl(base + i + 4), + readl(base + i + 8), readl(base + i + 12), + readl(base + i + 16), readl(base + i + 20), + readl(base + i + 24), readl(base + i + 28)); + } + netdev_info(dev, "Dumping tx ring\n"); + for (i = 0; i < np->tx_ring_size; i += 4) { + if (!nv_optimized(np)) { + netdev_info(dev, + "%03x: %08x %08x // %08x %08x " + "// %08x %08x // %08x %08x\n", + i, + le32_to_cpu(np->tx_ring.orig[i].buf), + le32_to_cpu(np->tx_ring.orig[i].flaglen), + le32_to_cpu(np->tx_ring.orig[i+1].buf), + le32_to_cpu(np->tx_ring.orig[i+1].flaglen), + le32_to_cpu(np->tx_ring.orig[i+2].buf), + le32_to_cpu(np->tx_ring.orig[i+2].flaglen), + le32_to_cpu(np->tx_ring.orig[i+3].buf), + le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); + } else { + netdev_info(dev, + "%03x: %08x %08x %08x " + "// %08x %08x %08x " + "// %08x %08x %08x " + "// %08x %08x %08x\n", + i, + le32_to_cpu(np->tx_ring.ex[i].bufhigh), + le32_to_cpu(np->tx_ring.ex[i].buflow), + le32_to_cpu(np->tx_ring.ex[i].flaglen), + le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), + le32_to_cpu(np->tx_ring.ex[i+1].buflow), + le32_to_cpu(np->tx_ring.ex[i+1].flaglen), + le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), + le32_to_cpu(np->tx_ring.ex[i+2].buflow), + le32_to_cpu(np->tx_ring.ex[i+2].flaglen), + le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), + le32_to_cpu(np->tx_ring.ex[i+3].buflow), + le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); + } + } + } + + spin_lock_irq(&np->lock); + + /* 1) stop tx engine */ + nv_stop_tx(dev); + + /* 2) complete any outstanding tx and do not give HW any limited tx pkts */ + saved_tx_limit = np->tx_limit; + np->tx_limit = 0; /* prevent giving HW any limited pkts */ + np->tx_stop = 0; /* prevent waking tx queue */ + if (!nv_optimized(np)) + nv_tx_done(dev, np->tx_ring_size); + else + nv_tx_done_optimized(dev, np->tx_ring_size); + + /* save current HW position */ + if (np->tx_change_owner) + put_tx.ex = np->tx_change_owner->first_tx_desc; + else + put_tx = np->put_tx; + + /* 3) clear all tx state */ + nv_drain_tx(dev); + nv_init_tx(dev); + + /* 4) restore state to current HW position */ + np->get_tx = np->put_tx = put_tx; + np->tx_limit = saved_tx_limit; + + /* 5) restart tx engine */ + nv_start_tx(dev); + netif_wake_queue(dev); + spin_unlock_irq(&np->lock); +} + +/* + * Called when the nic notices a mismatch between the actual data len on the + * wire and the len indicated in the 802 header + */ +static int nv_getlen(struct net_device *dev, void *packet, int datalen) +{ + int hdrlen; /* length of the 802 header */ + int protolen; /* length as stored in the proto field */ + + /* 1) calculate len according to header */ + if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { + protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto); + hdrlen = VLAN_HLEN; + } else { + protolen = ntohs(((struct ethhdr *)packet)->h_proto); + hdrlen = ETH_HLEN; + } + if (protolen > ETH_DATA_LEN) + return datalen; /* Value in proto field not a len, no checks possible */ + + protolen += hdrlen; + /* consistency checks: */ + if (datalen > ETH_ZLEN) { + if (datalen >= protolen) { + /* more data on wire than in 802 header, trim of + * additional data. + */ + return protolen; + } else { + /* less data on wire than mentioned in header. + * Discard the packet. + */ + return -1; + } + } else { + /* short packet. Accept only if 802 values are also short */ + if (protolen > ETH_ZLEN) { + return -1; + } + return datalen; + } +} + +static int nv_rx_process(struct net_device *dev, int limit) +{ + struct fe_priv *np = netdev_priv(dev); + u32 flags; + int rx_work = 0; + struct sk_buff *skb; + int len; + + while ((np->get_rx.orig != np->put_rx.orig) && + !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && + (rx_work < limit)) { + + /* + * the packet is for us - immediately tear down the pci mapping. + * TODO: check if a prefetch of the first cacheline improves + * the performance. + */ + pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, + np->get_rx_ctx->dma_len, + PCI_DMA_FROMDEVICE); + skb = np->get_rx_ctx->skb; + np->get_rx_ctx->skb = NULL; + + /* look at what we actually got: */ + if (np->desc_ver == DESC_VER_1) { + if (likely(flags & NV_RX_DESCRIPTORVALID)) { + len = flags & LEN_MASK_V1; + if (unlikely(flags & NV_RX_ERROR)) { + if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) { + len = nv_getlen(dev, skb->data, len); + if (len < 0) { + dev_kfree_skb(skb); + goto next_pkt; + } + } + /* framing errors are soft errors */ + else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { + if (flags & NV_RX_SUBTRACT1) + len--; + } + /* the rest are hard errors */ + else { + if (flags & NV_RX_MISSEDFRAME) { + u64_stats_update_begin(&np->swstats_rx_syncp); + np->stat_rx_missed_errors++; + u64_stats_update_end(&np->swstats_rx_syncp); + } + dev_kfree_skb(skb); + goto next_pkt; + } + } + } else { + dev_kfree_skb(skb); + goto next_pkt; + } + } else { + if (likely(flags & NV_RX2_DESCRIPTORVALID)) { + len = flags & LEN_MASK_V2; + if (unlikely(flags & NV_RX2_ERROR)) { + if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { + len = nv_getlen(dev, skb->data, len); + if (len < 0) { + dev_kfree_skb(skb); + goto next_pkt; + } + } + /* framing errors are soft errors */ + else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { + if (flags & NV_RX2_SUBTRACT1) + len--; + } + /* the rest are hard errors */ + else { + dev_kfree_skb(skb); + goto next_pkt; + } + } + if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ + ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + dev_kfree_skb(skb); + goto next_pkt; + } + } + /* got a valid packet - forward it to the network core */ + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, dev); + napi_gro_receive(&np->napi, skb); + u64_stats_update_begin(&np->swstats_rx_syncp); + np->stat_rx_packets++; + np->stat_rx_bytes += len; + u64_stats_update_end(&np->swstats_rx_syncp); +next_pkt: + if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) + np->get_rx.orig = np->first_rx.orig; + if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) + np->get_rx_ctx = np->first_rx_ctx; + + rx_work++; + } + + return rx_work; +} + +static int nv_rx_process_optimized(struct net_device *dev, int limit) +{ + struct fe_priv *np = netdev_priv(dev); + u32 flags; + u32 vlanflags = 0; + int rx_work = 0; + struct sk_buff *skb; + int len; + + while ((np->get_rx.ex != np->put_rx.ex) && + !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && + (rx_work < limit)) { + + /* + * the packet is for us - immediately tear down the pci mapping. + * TODO: check if a prefetch of the first cacheline improves + * the performance. + */ + pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, + np->get_rx_ctx->dma_len, + PCI_DMA_FROMDEVICE); + skb = np->get_rx_ctx->skb; + np->get_rx_ctx->skb = NULL; + + /* look at what we actually got: */ + if (likely(flags & NV_RX2_DESCRIPTORVALID)) { + len = flags & LEN_MASK_V2; + if (unlikely(flags & NV_RX2_ERROR)) { + if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { + len = nv_getlen(dev, skb->data, len); + if (len < 0) { + dev_kfree_skb(skb); + goto next_pkt; + } + } + /* framing errors are soft errors */ + else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { + if (flags & NV_RX2_SUBTRACT1) + len--; + } + /* the rest are hard errors */ + else { + dev_kfree_skb(skb); + goto next_pkt; + } + } + + if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ + ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* got a valid packet - forward it to the network core */ + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, dev); + prefetch(skb->data); + + vlanflags = le32_to_cpu(np->get_rx.ex->buflow); + + /* + * There's need to check for NETIF_F_HW_VLAN_CTAG_RX + * here. Even if vlan rx accel is disabled, + * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set. + */ + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && + vlanflags & NV_RX3_VLAN_TAG_PRESENT) { + u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + napi_gro_receive(&np->napi, skb); + u64_stats_update_begin(&np->swstats_rx_syncp); + np->stat_rx_packets++; + np->stat_rx_bytes += len; + u64_stats_update_end(&np->swstats_rx_syncp); + } else { + dev_kfree_skb(skb); + } +next_pkt: + if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) + np->get_rx.ex = np->first_rx.ex; + if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) + np->get_rx_ctx = np->first_rx_ctx; + + rx_work++; + } + + return rx_work; +} + +static void set_bufsize(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + + if (dev->mtu <= ETH_DATA_LEN) + np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; + else + np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; +} + +/* + * nv_change_mtu: dev->change_mtu function + * Called with dev_base_lock held for read. + */ +static int nv_change_mtu(struct net_device *dev, int new_mtu) +{ + struct fe_priv *np = netdev_priv(dev); + int old_mtu; + + if (new_mtu < 64 || new_mtu > np->pkt_limit) + return -EINVAL; + + old_mtu = dev->mtu; + dev->mtu = new_mtu; + + /* return early if the buffer sizes will not change */ + if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) + return 0; + if (old_mtu == new_mtu) + return 0; + + /* synchronized against open : rtnl_lock() held by caller */ + if (netif_running(dev)) { + u8 __iomem *base = get_hwbase(dev); + /* + * It seems that the nic preloads valid ring entries into an + * internal buffer. The procedure for flushing everything is + * guessed, there is probably a simpler approach. + * Changing the MTU is a rare event, it shouldn't matter. + */ + nv_disable_irq(dev); + nv_napi_disable(dev); + netif_tx_lock_bh(dev); + netif_addr_lock(dev); + spin_lock(&np->lock); + /* stop engines */ + nv_stop_rxtx(dev); + nv_txrx_reset(dev); + /* drain rx queue */ + nv_drain_rxtx(dev); + /* reinit driver view of the rx queue */ + set_bufsize(dev); + if (nv_init_ring(dev)) { + if (!np->in_shutdown) + mod_timer(&np->oom_kick, jiffies + OOM_REFILL); + } + /* reinit nic view of the rx queue */ + writel(np->rx_buf_sz, base + NvRegOffloadConfig); + setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); + writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), + base + NvRegRingSizes); + pci_push(base); + writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); + pci_push(base); + + /* restart rx engine */ + nv_start_rxtx(dev); + spin_unlock(&np->lock); + netif_addr_unlock(dev); + netif_tx_unlock_bh(dev); + nv_napi_enable(dev); + nv_enable_irq(dev); + } + return 0; +} + +static void nv_copy_mac_to_hw(struct net_device *dev) +{ + u8 __iomem *base = get_hwbase(dev); + u32 mac[2]; + + mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + + (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); + mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); + + writel(mac[0], base + NvRegMacAddrA); + writel(mac[1], base + NvRegMacAddrB); +} + +/* + * nv_set_mac_address: dev->set_mac_address function + * Called with rtnl_lock() held. + */ +static int nv_set_mac_address(struct net_device *dev, void *addr) +{ + struct fe_priv *np = netdev_priv(dev); + struct sockaddr *macaddr = (struct sockaddr *)addr; + + if (!is_valid_ether_addr(macaddr->sa_data)) + return -EADDRNOTAVAIL; + + /* synchronized against open : rtnl_lock() held by caller */ + memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); + + if (netif_running(dev)) { + netif_tx_lock_bh(dev); + netif_addr_lock(dev); + spin_lock_irq(&np->lock); + + /* stop rx engine */ + nv_stop_rx(dev); + + /* set mac address */ + nv_copy_mac_to_hw(dev); + + /* restart rx engine */ + nv_start_rx(dev); + spin_unlock_irq(&np->lock); + netif_addr_unlock(dev); + netif_tx_unlock_bh(dev); + } else { + nv_copy_mac_to_hw(dev); + } + return 0; +} + +/* + * nv_set_multicast: dev->set_multicast function + * Called with netif_tx_lock held. + */ +static void nv_set_multicast(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 addr[2]; + u32 mask[2]; + u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; + + memset(addr, 0, sizeof(addr)); + memset(mask, 0, sizeof(mask)); + + if (dev->flags & IFF_PROMISC) { + pff |= NVREG_PFF_PROMISC; + } else { + pff |= NVREG_PFF_MYADDR; + + if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) { + u32 alwaysOff[2]; + u32 alwaysOn[2]; + + alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; + if (dev->flags & IFF_ALLMULTI) { + alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; + } else { + struct netdev_hw_addr *ha; + + netdev_for_each_mc_addr(ha, dev) { + unsigned char *hw_addr = ha->addr; + u32 a, b; + + a = le32_to_cpu(*(__le32 *) hw_addr); + b = le16_to_cpu(*(__le16 *) (&hw_addr[4])); + alwaysOn[0] &= a; + alwaysOff[0] &= ~a; + alwaysOn[1] &= b; + alwaysOff[1] &= ~b; + } + } + addr[0] = alwaysOn[0]; + addr[1] = alwaysOn[1]; + mask[0] = alwaysOn[0] | alwaysOff[0]; + mask[1] = alwaysOn[1] | alwaysOff[1]; + } else { + mask[0] = NVREG_MCASTMASKA_NONE; + mask[1] = NVREG_MCASTMASKB_NONE; + } + } + addr[0] |= NVREG_MCASTADDRA_FORCE; + pff |= NVREG_PFF_ALWAYS; + spin_lock_irq(&np->lock); + nv_stop_rx(dev); + writel(addr[0], base + NvRegMulticastAddrA); + writel(addr[1], base + NvRegMulticastAddrB); + writel(mask[0], base + NvRegMulticastMaskA); + writel(mask[1], base + NvRegMulticastMaskB); + writel(pff, base + NvRegPacketFilterFlags); + nv_start_rx(dev); + spin_unlock_irq(&np->lock); +} + +static void nv_update_pause(struct net_device *dev, u32 pause_flags) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + + np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); + + if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { + u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; + if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { + writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); + np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; + } else { + writel(pff, base + NvRegPacketFilterFlags); + } + } + if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { + u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; + if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { + u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; + if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) + pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; + if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) { + pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; + /* limit the number of tx pause frames to a default of 8 */ + writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit); + } + writel(pause_enable, base + NvRegTxPauseFrame); + writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); + np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; + } else { + writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); + writel(regmisc, base + NvRegMisc1); + } + } +} + +static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 phyreg, txreg; + int mii_status; + + np->linkspeed = NVREG_LINKSPEED_FORCE|speed; + np->duplex = duplex; + + /* see if gigabit phy */ + mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); + if (mii_status & PHY_GIGABIT) { + np->gigabit = PHY_GIGABIT; + phyreg = readl(base + NvRegSlotTime); + phyreg &= ~(0x3FF00); + if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) + phyreg |= NVREG_SLOTTIME_10_100_FULL; + else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) + phyreg |= NVREG_SLOTTIME_10_100_FULL; + else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) + phyreg |= NVREG_SLOTTIME_1000_FULL; + writel(phyreg, base + NvRegSlotTime); + } + + phyreg = readl(base + NvRegPhyInterface); + phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); + if (np->duplex == 0) + phyreg |= PHY_HALF; + if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) + phyreg |= PHY_100; + else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == + NVREG_LINKSPEED_1000) + phyreg |= PHY_1000; + writel(phyreg, base + NvRegPhyInterface); + + if (phyreg & PHY_RGMII) { + if ((np->linkspeed & NVREG_LINKSPEED_MASK) == + NVREG_LINKSPEED_1000) + txreg = NVREG_TX_DEFERRAL_RGMII_1000; + else + txreg = NVREG_TX_DEFERRAL_RGMII_10_100; + } else { + txreg = NVREG_TX_DEFERRAL_DEFAULT; + } + writel(txreg, base + NvRegTxDeferral); + + if (np->desc_ver == DESC_VER_1) { + txreg = NVREG_TX_WM_DESC1_DEFAULT; + } else { + if ((np->linkspeed & NVREG_LINKSPEED_MASK) == + NVREG_LINKSPEED_1000) + txreg = NVREG_TX_WM_DESC2_3_1000; + else + txreg = NVREG_TX_WM_DESC2_3_DEFAULT; + } + writel(txreg, base + NvRegTxWatermark); + + writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD), + base + NvRegMisc1); + pci_push(base); + writel(np->linkspeed, base + NvRegLinkSpeed); + pci_push(base); + + return; +} + +/** + * nv_update_linkspeed - Setup the MAC according to the link partner + * @dev: Network device to be configured + * + * The function queries the PHY and checks if there is a link partner. + * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is + * set to 10 MBit HD. + * + * The function returns 0 if there is no link partner and 1 if there is + * a good link partner. + */ +static int nv_update_linkspeed(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + int adv = 0; + int lpa = 0; + int adv_lpa, adv_pause, lpa_pause; + int newls = np->linkspeed; + int newdup = np->duplex; + int mii_status; + u32 bmcr; + int retval = 0; + u32 control_1000, status_1000, phyreg, pause_flags, txreg; + u32 txrxFlags = 0; + u32 phy_exp; + + /* If device loopback is enabled, set carrier on and enable max link + * speed. + */ + bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + if (bmcr & BMCR_LOOPBACK) { + if (netif_running(dev)) { + nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1); + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); + } + return 1; + } + + /* BMSR_LSTATUS is latched, read it twice: + * we want the current value. + */ + mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); + mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); + + if (!(mii_status & BMSR_LSTATUS)) { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; + newdup = 0; + retval = 0; + goto set_speed; + } + + if (np->autoneg == 0) { + if (np->fixed_mode & LPA_100FULL) { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; + newdup = 1; + } else if (np->fixed_mode & LPA_100HALF) { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; + newdup = 0; + } else if (np->fixed_mode & LPA_10FULL) { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; + newdup = 1; + } else { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; + newdup = 0; + } + retval = 1; + goto set_speed; + } + /* check auto negotiation is complete */ + if (!(mii_status & BMSR_ANEGCOMPLETE)) { + /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; + newdup = 0; + retval = 0; + goto set_speed; + } + + adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); + lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); + + retval = 1; + if (np->gigabit == PHY_GIGABIT) { + control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); + status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); + + if ((control_1000 & ADVERTISE_1000FULL) && + (status_1000 & LPA_1000FULL)) { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; + newdup = 1; + goto set_speed; + } + } + + /* FIXME: handle parallel detection properly */ + adv_lpa = lpa & adv; + if (adv_lpa & LPA_100FULL) { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; + newdup = 1; + } else if (adv_lpa & LPA_100HALF) { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; + newdup = 0; + } else if (adv_lpa & LPA_10FULL) { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; + newdup = 1; + } else if (adv_lpa & LPA_10HALF) { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; + newdup = 0; + } else { + newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; + newdup = 0; + } + +set_speed: + if (np->duplex == newdup && np->linkspeed == newls) + return retval; + + np->duplex = newdup; + np->linkspeed = newls; + + /* The transmitter and receiver must be restarted for safe update */ + if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) { + txrxFlags |= NV_RESTART_TX; + nv_stop_tx(dev); + } + if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { + txrxFlags |= NV_RESTART_RX; + nv_stop_rx(dev); + } + + if (np->gigabit == PHY_GIGABIT) { + phyreg = readl(base + NvRegSlotTime); + phyreg &= ~(0x3FF00); + if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) || + ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)) + phyreg |= NVREG_SLOTTIME_10_100_FULL; + else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) + phyreg |= NVREG_SLOTTIME_1000_FULL; + writel(phyreg, base + NvRegSlotTime); + } + + phyreg = readl(base + NvRegPhyInterface); + phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); + if (np->duplex == 0) + phyreg |= PHY_HALF; + if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) + phyreg |= PHY_100; + else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) + phyreg |= PHY_1000; + writel(phyreg, base + NvRegPhyInterface); + + phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */ + if (phyreg & PHY_RGMII) { + if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) { + txreg = NVREG_TX_DEFERRAL_RGMII_1000; + } else { + if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) { + if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10) + txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10; + else + txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100; + } else { + txreg = NVREG_TX_DEFERRAL_RGMII_10_100; + } + } + } else { + if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) + txreg = NVREG_TX_DEFERRAL_MII_STRETCH; + else + txreg = NVREG_TX_DEFERRAL_DEFAULT; + } + writel(txreg, base + NvRegTxDeferral); + + if (np->desc_ver == DESC_VER_1) { + txreg = NVREG_TX_WM_DESC1_DEFAULT; + } else { + if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) + txreg = NVREG_TX_WM_DESC2_3_1000; + else + txreg = NVREG_TX_WM_DESC2_3_DEFAULT; + } + writel(txreg, base + NvRegTxWatermark); + + writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD), + base + NvRegMisc1); + pci_push(base); + writel(np->linkspeed, base + NvRegLinkSpeed); + pci_push(base); + + pause_flags = 0; + /* setup pause frame */ + if (netif_running(dev) && (np->duplex != 0)) { + if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { + adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM); + + switch (adv_pause) { + case ADVERTISE_PAUSE_CAP: + if (lpa_pause & LPA_PAUSE_CAP) { + pause_flags |= NV_PAUSEFRAME_RX_ENABLE; + if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) + pause_flags |= NV_PAUSEFRAME_TX_ENABLE; + } + break; + case ADVERTISE_PAUSE_ASYM: + if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM)) + pause_flags |= NV_PAUSEFRAME_TX_ENABLE; + break; + case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM: + if (lpa_pause & LPA_PAUSE_CAP) { + pause_flags |= NV_PAUSEFRAME_RX_ENABLE; + if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) + pause_flags |= NV_PAUSEFRAME_TX_ENABLE; + } + if (lpa_pause == LPA_PAUSE_ASYM) + pause_flags |= NV_PAUSEFRAME_RX_ENABLE; + break; + } + } else { + pause_flags = np->pause_flags; + } + } + nv_update_pause(dev, pause_flags); + + if (txrxFlags & NV_RESTART_TX) + nv_start_tx(dev); + if (txrxFlags & NV_RESTART_RX) + nv_start_rx(dev); + + return retval; +} + +static void nv_linkchange(struct net_device *dev) +{ + if (nv_update_linkspeed(dev)) { + if (!netif_carrier_ok(dev)) { + netif_carrier_on(dev); + netdev_info(dev, "link up\n"); + nv_txrx_gate(dev, false); + nv_start_rx(dev); + } + } else { + if (netif_carrier_ok(dev)) { + netif_carrier_off(dev); + netdev_info(dev, "link down\n"); + nv_txrx_gate(dev, true); + nv_stop_rx(dev); + } + } +} + +static void nv_link_irq(struct net_device *dev) +{ + u8 __iomem *base = get_hwbase(dev); + u32 miistat; + + miistat = readl(base + NvRegMIIStatus); + writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus); + + if (miistat & (NVREG_MIISTAT_LINKCHANGE)) + nv_linkchange(dev); +} + +static void nv_msi_workaround(struct fe_priv *np) +{ + + /* Need to toggle the msi irq mask within the ethernet device, + * otherwise, future interrupts will not be detected. + */ + if (np->msi_flags & NV_MSI_ENABLED) { + u8 __iomem *base = np->base; + + writel(0, base + NvRegMSIIrqMask); + writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); + } +} + +static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work) +{ + struct fe_priv *np = netdev_priv(dev); + + if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) { + if (total_work > NV_DYNAMIC_THRESHOLD) { + /* transition to poll based interrupts */ + np->quiet_count = 0; + if (np->irqmask != NVREG_IRQMASK_CPU) { + np->irqmask = NVREG_IRQMASK_CPU; + return 1; + } + } else { + if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) { + np->quiet_count++; + } else { + /* reached a period of low activity, switch + to per tx/rx packet interrupts */ + if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) { + np->irqmask = NVREG_IRQMASK_THROUGHPUT; + return 1; + } + } + } + } + return 0; +} + +static irqreturn_t nv_nic_irq(int foo, void *data) +{ + struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + + if (!(np->msi_flags & NV_MSI_X_ENABLED)) { + np->events = readl(base + NvRegIrqStatus); + writel(np->events, base + NvRegIrqStatus); + } else { + np->events = readl(base + NvRegMSIXIrqStatus); + writel(np->events, base + NvRegMSIXIrqStatus); + } + if (!(np->events & np->irqmask)) + return IRQ_NONE; + + nv_msi_workaround(np); + + if (napi_schedule_prep(&np->napi)) { + /* + * Disable further irq's (msix not enabled with napi) + */ + writel(0, base + NvRegIrqMask); + __napi_schedule(&np->napi); + } + + return IRQ_HANDLED; +} + +/* All _optimized functions are used to help increase performance + * (reduce CPU and increase throughput). They use descripter version 3, + * compiler directives, and reduce memory accesses. + */ +static irqreturn_t nv_nic_irq_optimized(int foo, void *data) +{ + struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + + if (!(np->msi_flags & NV_MSI_X_ENABLED)) { + np->events = readl(base + NvRegIrqStatus); + writel(np->events, base + NvRegIrqStatus); + } else { + np->events = readl(base + NvRegMSIXIrqStatus); + writel(np->events, base + NvRegMSIXIrqStatus); + } + if (!(np->events & np->irqmask)) + return IRQ_NONE; + + nv_msi_workaround(np); + + if (napi_schedule_prep(&np->napi)) { + /* + * Disable further irq's (msix not enabled with napi) + */ + writel(0, base + NvRegIrqMask); + __napi_schedule(&np->napi); + } + + return IRQ_HANDLED; +} + +static irqreturn_t nv_nic_irq_tx(int foo, void *data) +{ + struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 events; + int i; + unsigned long flags; + + for (i = 0;; i++) { + events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; + writel(events, base + NvRegMSIXIrqStatus); + netdev_dbg(dev, "tx irq events: %08x\n", events); + if (!(events & np->irqmask)) + break; + + spin_lock_irqsave(&np->lock, flags); + nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); + spin_unlock_irqrestore(&np->lock, flags); + + if (unlikely(i > max_interrupt_work)) { + spin_lock_irqsave(&np->lock, flags); + /* disable interrupts on the nic */ + writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); + pci_push(base); + + if (!np->in_shutdown) { + np->nic_poll_irq |= NVREG_IRQ_TX_ALL; + mod_timer(&np->nic_poll, jiffies + POLL_WAIT); + } + spin_unlock_irqrestore(&np->lock, flags); + netdev_dbg(dev, "%s: too many iterations (%d)\n", + __func__, i); + break; + } + + } + + return IRQ_RETVAL(i); +} + +static int nv_napi_poll(struct napi_struct *napi, int budget) +{ + struct fe_priv *np = container_of(napi, struct fe_priv, napi); + struct net_device *dev = np->dev; + u8 __iomem *base = get_hwbase(dev); + unsigned long flags; + int retcode; + int rx_count, tx_work = 0, rx_work = 0; + + do { + if (!nv_optimized(np)) { + spin_lock_irqsave(&np->lock, flags); + tx_work += nv_tx_done(dev, np->tx_ring_size); + spin_unlock_irqrestore(&np->lock, flags); + + rx_count = nv_rx_process(dev, budget - rx_work); + retcode = nv_alloc_rx(dev); + } else { + spin_lock_irqsave(&np->lock, flags); + tx_work += nv_tx_done_optimized(dev, np->tx_ring_size); + spin_unlock_irqrestore(&np->lock, flags); + + rx_count = nv_rx_process_optimized(dev, + budget - rx_work); + retcode = nv_alloc_rx_optimized(dev); + } + } while (retcode == 0 && + rx_count > 0 && (rx_work += rx_count) < budget); + + if (retcode) { + spin_lock_irqsave(&np->lock, flags); + if (!np->in_shutdown) + mod_timer(&np->oom_kick, jiffies + OOM_REFILL); + spin_unlock_irqrestore(&np->lock, flags); + } + + nv_change_interrupt_mode(dev, tx_work + rx_work); + + if (unlikely(np->events & NVREG_IRQ_LINK)) { + spin_lock_irqsave(&np->lock, flags); + nv_link_irq(dev); + spin_unlock_irqrestore(&np->lock, flags); + } + if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { + spin_lock_irqsave(&np->lock, flags); + nv_linkchange(dev); + spin_unlock_irqrestore(&np->lock, flags); + np->link_timeout = jiffies + LINK_TIMEOUT; + } + if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { + spin_lock_irqsave(&np->lock, flags); + if (!np->in_shutdown) { + np->nic_poll_irq = np->irqmask; + np->recover_error = 1; + mod_timer(&np->nic_poll, jiffies + POLL_WAIT); + } + spin_unlock_irqrestore(&np->lock, flags); + napi_complete(napi); + return rx_work; + } + + if (rx_work < budget) { + /* re-enable interrupts + (msix not enabled in napi) */ + napi_complete(napi); + + writel(np->irqmask, base + NvRegIrqMask); + } + return rx_work; +} + +static irqreturn_t nv_nic_irq_rx(int foo, void *data) +{ + struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 events; + int i; + unsigned long flags; + + for (i = 0;; i++) { + events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; + writel(events, base + NvRegMSIXIrqStatus); + netdev_dbg(dev, "rx irq events: %08x\n", events); + if (!(events & np->irqmask)) + break; + + if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { + if (unlikely(nv_alloc_rx_optimized(dev))) { + spin_lock_irqsave(&np->lock, flags); + if (!np->in_shutdown) + mod_timer(&np->oom_kick, jiffies + OOM_REFILL); + spin_unlock_irqrestore(&np->lock, flags); + } + } + + if (unlikely(i > max_interrupt_work)) { + spin_lock_irqsave(&np->lock, flags); + /* disable interrupts on the nic */ + writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); + pci_push(base); + + if (!np->in_shutdown) { + np->nic_poll_irq |= NVREG_IRQ_RX_ALL; + mod_timer(&np->nic_poll, jiffies + POLL_WAIT); + } + spin_unlock_irqrestore(&np->lock, flags); + netdev_dbg(dev, "%s: too many iterations (%d)\n", + __func__, i); + break; + } + } + + return IRQ_RETVAL(i); +} + +static irqreturn_t nv_nic_irq_other(int foo, void *data) +{ + struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 events; + int i; + unsigned long flags; + + for (i = 0;; i++) { + events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; + writel(events, base + NvRegMSIXIrqStatus); + netdev_dbg(dev, "irq events: %08x\n", events); + if (!(events & np->irqmask)) + break; + + /* check tx in case we reached max loop limit in tx isr */ + spin_lock_irqsave(&np->lock, flags); + nv_tx_done_optimized(dev, TX_WORK_PER_LOOP); + spin_unlock_irqrestore(&np->lock, flags); + + if (events & NVREG_IRQ_LINK) { + spin_lock_irqsave(&np->lock, flags); + nv_link_irq(dev); + spin_unlock_irqrestore(&np->lock, flags); + } + if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { + spin_lock_irqsave(&np->lock, flags); + nv_linkchange(dev); + spin_unlock_irqrestore(&np->lock, flags); + np->link_timeout = jiffies + LINK_TIMEOUT; + } + if (events & NVREG_IRQ_RECOVER_ERROR) { + spin_lock_irqsave(&np->lock, flags); + /* disable interrupts on the nic */ + writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); + pci_push(base); + + if (!np->in_shutdown) { + np->nic_poll_irq |= NVREG_IRQ_OTHER; + np->recover_error = 1; + mod_timer(&np->nic_poll, jiffies + POLL_WAIT); + } + spin_unlock_irqrestore(&np->lock, flags); + break; + } + if (unlikely(i > max_interrupt_work)) { + spin_lock_irqsave(&np->lock, flags); + /* disable interrupts on the nic */ + writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); + pci_push(base); + + if (!np->in_shutdown) { + np->nic_poll_irq |= NVREG_IRQ_OTHER; + mod_timer(&np->nic_poll, jiffies + POLL_WAIT); + } + spin_unlock_irqrestore(&np->lock, flags); + netdev_dbg(dev, "%s: too many iterations (%d)\n", + __func__, i); + break; + } + + } + + return IRQ_RETVAL(i); +} + +static irqreturn_t nv_nic_irq_test(int foo, void *data) +{ + struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 events; + + if (!(np->msi_flags & NV_MSI_X_ENABLED)) { + events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; + writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus); + } else { + events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; + writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); + } + pci_push(base); + if (!(events & NVREG_IRQ_TIMER)) + return IRQ_RETVAL(0); + + nv_msi_workaround(np); + + spin_lock(&np->lock); + np->intr_test = 1; + spin_unlock(&np->lock); + + return IRQ_RETVAL(1); +} + +static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) +{ + u8 __iomem *base = get_hwbase(dev); + int i; + u32 msixmap = 0; + + /* Each interrupt bit can be mapped to a MSIX vector (4 bits). + * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents + * the remaining 8 interrupts. + */ + for (i = 0; i < 8; i++) { + if ((irqmask >> i) & 0x1) + msixmap |= vector << (i << 2); + } + writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); + + msixmap = 0; + for (i = 0; i < 8; i++) { + if ((irqmask >> (i + 8)) & 0x1) + msixmap |= vector << (i << 2); + } + writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); +} + +static int nv_request_irq(struct net_device *dev, int intr_test) +{ + struct fe_priv *np = get_nvpriv(dev); + u8 __iomem *base = get_hwbase(dev); + int ret; + int i; + irqreturn_t (*handler)(int foo, void *data); + + if (intr_test) { + handler = nv_nic_irq_test; + } else { + if (nv_optimized(np)) + handler = nv_nic_irq_optimized; + else + handler = nv_nic_irq; + } + + if (np->msi_flags & NV_MSI_X_CAPABLE) { + for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) + np->msi_x_entry[i].entry = i; + ret = pci_enable_msix_range(np->pci_dev, + np->msi_x_entry, + np->msi_flags & NV_MSI_X_VECTORS_MASK, + np->msi_flags & NV_MSI_X_VECTORS_MASK); + if (ret > 0) { + np->msi_flags |= NV_MSI_X_ENABLED; + if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { + /* Request irq for rx handling */ + sprintf(np->name_rx, "%s-rx", dev->name); + ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, + nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev); + if (ret) { + netdev_info(dev, + "request_irq failed for rx %d\n", + ret); + pci_disable_msix(np->pci_dev); + np->msi_flags &= ~NV_MSI_X_ENABLED; + goto out_err; + } + /* Request irq for tx handling */ + sprintf(np->name_tx, "%s-tx", dev->name); + ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, + nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev); + if (ret) { + netdev_info(dev, + "request_irq failed for tx %d\n", + ret); + pci_disable_msix(np->pci_dev); + np->msi_flags &= ~NV_MSI_X_ENABLED; + goto out_free_rx; + } + /* Request irq for link and timer handling */ + sprintf(np->name_other, "%s-other", dev->name); + ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, + nv_nic_irq_other, IRQF_SHARED, np->name_other, dev); + if (ret) { + netdev_info(dev, + "request_irq failed for link %d\n", + ret); + pci_disable_msix(np->pci_dev); + np->msi_flags &= ~NV_MSI_X_ENABLED; + goto out_free_tx; + } + /* map interrupts to their respective vector */ + writel(0, base + NvRegMSIXMap0); + writel(0, base + NvRegMSIXMap1); + set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); + set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); + set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); + } else { + /* Request irq for all interrupts */ + ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, + handler, IRQF_SHARED, dev->name, dev); + if (ret) { + netdev_info(dev, + "request_irq failed %d\n", + ret); + pci_disable_msix(np->pci_dev); + np->msi_flags &= ~NV_MSI_X_ENABLED; + goto out_err; + } + + /* map interrupts to vector 0 */ + writel(0, base + NvRegMSIXMap0); + writel(0, base + NvRegMSIXMap1); + } + netdev_info(dev, "MSI-X enabled\n"); + return 0; + } + } + if (np->msi_flags & NV_MSI_CAPABLE) { + ret = pci_enable_msi(np->pci_dev); + if (ret == 0) { + np->msi_flags |= NV_MSI_ENABLED; + ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev); + if (ret) { + netdev_info(dev, "request_irq failed %d\n", + ret); + pci_disable_msi(np->pci_dev); + np->msi_flags &= ~NV_MSI_ENABLED; + goto out_err; + } + + /* map interrupts to vector 0 */ + writel(0, base + NvRegMSIMap0); + writel(0, base + NvRegMSIMap1); + /* enable msi vector 0 */ + writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); + netdev_info(dev, "MSI enabled\n"); + return 0; + } + } + + if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) + goto out_err; + + return 0; +out_free_tx: + free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); +out_free_rx: + free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); +out_err: + return 1; +} + +static void nv_free_irq(struct net_device *dev) +{ + struct fe_priv *np = get_nvpriv(dev); + int i; + + if (np->msi_flags & NV_MSI_X_ENABLED) { + for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) + free_irq(np->msi_x_entry[i].vector, dev); + pci_disable_msix(np->pci_dev); + np->msi_flags &= ~NV_MSI_X_ENABLED; + } else { + free_irq(np->pci_dev->irq, dev); + if (np->msi_flags & NV_MSI_ENABLED) { + pci_disable_msi(np->pci_dev); + np->msi_flags &= ~NV_MSI_ENABLED; + } + } +} + +static void nv_do_nic_poll(unsigned long data) +{ + struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 mask = 0; + + /* + * First disable irq(s) and then + * reenable interrupts on the nic, we have to do this before calling + * nv_nic_irq because that may decide to do otherwise + */ + + if (!using_multi_irqs(dev)) { + if (np->msi_flags & NV_MSI_X_ENABLED) + disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + else + disable_irq_lockdep(np->pci_dev->irq); + mask = np->irqmask; + } else { + if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { + disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); + mask |= NVREG_IRQ_RX_ALL; + } + if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { + disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); + mask |= NVREG_IRQ_TX_ALL; + } + if (np->nic_poll_irq & NVREG_IRQ_OTHER) { + disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); + mask |= NVREG_IRQ_OTHER; + } + } + /* disable_irq() contains synchronize_irq, thus no irq handler can run now */ + + if (np->recover_error) { + np->recover_error = 0; + netdev_info(dev, "MAC in recoverable error state\n"); + if (netif_running(dev)) { + netif_tx_lock_bh(dev); + netif_addr_lock(dev); + spin_lock(&np->lock); + /* stop engines */ + nv_stop_rxtx(dev); + if (np->driver_data & DEV_HAS_POWER_CNTRL) + nv_mac_reset(dev); + nv_txrx_reset(dev); + /* drain rx queue */ + nv_drain_rxtx(dev); + /* reinit driver view of the rx queue */ + set_bufsize(dev); + if (nv_init_ring(dev)) { + if (!np->in_shutdown) + mod_timer(&np->oom_kick, jiffies + OOM_REFILL); + } + /* reinit nic view of the rx queue */ + writel(np->rx_buf_sz, base + NvRegOffloadConfig); + setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); + writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), + base + NvRegRingSizes); + pci_push(base); + writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); + pci_push(base); + /* clear interrupts */ + if (!(np->msi_flags & NV_MSI_X_ENABLED)) + writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); + else + writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); + + /* restart rx engine */ + nv_start_rxtx(dev); + spin_unlock(&np->lock); + netif_addr_unlock(dev); + netif_tx_unlock_bh(dev); + } + } + + writel(mask, base + NvRegIrqMask); + pci_push(base); + + if (!using_multi_irqs(dev)) { + np->nic_poll_irq = 0; + if (nv_optimized(np)) + nv_nic_irq_optimized(0, dev); + else + nv_nic_irq(0, dev); + if (np->msi_flags & NV_MSI_X_ENABLED) + enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + else + enable_irq_lockdep(np->pci_dev->irq); + } else { + if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { + np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL; + nv_nic_irq_rx(0, dev); + enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); + } + if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { + np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL; + nv_nic_irq_tx(0, dev); + enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); + } + if (np->nic_poll_irq & NVREG_IRQ_OTHER) { + np->nic_poll_irq &= ~NVREG_IRQ_OTHER; + nv_nic_irq_other(0, dev); + enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); + } + } + +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void nv_poll_controller(struct net_device *dev) +{ + nv_do_nic_poll((unsigned long) dev); +} +#endif + +static void nv_do_stats_poll(unsigned long data) + __acquires(&netdev_priv(dev)->hwstats_lock) + __releases(&netdev_priv(dev)->hwstats_lock) +{ + struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); + + /* If lock is currently taken, the stats are being refreshed + * and hence fresh enough */ + if (spin_trylock(&np->hwstats_lock)) { + nv_update_stats(dev); + spin_unlock(&np->hwstats_lock); + } + + if (!np->in_shutdown) + mod_timer(&np->stats_poll, + round_jiffies(jiffies + STATS_INTERVAL)); +} + +static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct fe_priv *np = netdev_priv(dev); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); +} + +static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) +{ + struct fe_priv *np = netdev_priv(dev); + wolinfo->supported = WAKE_MAGIC; + + spin_lock_irq(&np->lock); + if (np->wolenabled) + wolinfo->wolopts = WAKE_MAGIC; + spin_unlock_irq(&np->lock); +} + +static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 flags = 0; + + if (wolinfo->wolopts == 0) { + np->wolenabled = 0; + } else if (wolinfo->wolopts & WAKE_MAGIC) { + np->wolenabled = 1; + flags = NVREG_WAKEUPFLAGS_ENABLE; + } + if (netif_running(dev)) { + spin_lock_irq(&np->lock); + writel(flags, base + NvRegWakeUpFlags); + spin_unlock_irq(&np->lock); + } + device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled); + return 0; +} + +static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct fe_priv *np = netdev_priv(dev); + u32 speed; + int adv; + + spin_lock_irq(&np->lock); + ecmd->port = PORT_MII; + if (!netif_running(dev)) { + /* We do not track link speed / duplex setting if the + * interface is disabled. Force a link check */ + if (nv_update_linkspeed(dev)) { + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); + } else { + if (netif_carrier_ok(dev)) + netif_carrier_off(dev); + } + } + + if (netif_carrier_ok(dev)) { + switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) { + case NVREG_LINKSPEED_10: + speed = SPEED_10; + break; + case NVREG_LINKSPEED_100: + speed = SPEED_100; + break; + case NVREG_LINKSPEED_1000: + speed = SPEED_1000; + break; + default: + speed = -1; + break; + } + ecmd->duplex = DUPLEX_HALF; + if (np->duplex) + ecmd->duplex = DUPLEX_FULL; + } else { + speed = SPEED_UNKNOWN; + ecmd->duplex = DUPLEX_UNKNOWN; + } + ethtool_cmd_speed_set(ecmd, speed); + ecmd->autoneg = np->autoneg; + + ecmd->advertising = ADVERTISED_MII; + if (np->autoneg) { + ecmd->advertising |= ADVERTISED_Autoneg; + adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); + if (adv & ADVERTISE_10HALF) + ecmd->advertising |= ADVERTISED_10baseT_Half; + if (adv & ADVERTISE_10FULL) + ecmd->advertising |= ADVERTISED_10baseT_Full; + if (adv & ADVERTISE_100HALF) + ecmd->advertising |= ADVERTISED_100baseT_Half; + if (adv & ADVERTISE_100FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + if (np->gigabit == PHY_GIGABIT) { + adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); + if (adv & ADVERTISE_1000FULL) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } + } + ecmd->supported = (SUPPORTED_Autoneg | + SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_MII); + if (np->gigabit == PHY_GIGABIT) + ecmd->supported |= SUPPORTED_1000baseT_Full; + + ecmd->phy_address = np->phyaddr; + ecmd->transceiver = XCVR_EXTERNAL; + + /* ignore maxtxpkt, maxrxpkt for now */ + spin_unlock_irq(&np->lock); + return 0; +} + +static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct fe_priv *np = netdev_priv(dev); + u32 speed = ethtool_cmd_speed(ecmd); + + if (ecmd->port != PORT_MII) + return -EINVAL; + if (ecmd->transceiver != XCVR_EXTERNAL) + return -EINVAL; + if (ecmd->phy_address != np->phyaddr) { + /* TODO: support switching between multiple phys. Should be + * trivial, but not enabled due to lack of test hardware. */ + return -EINVAL; + } + if (ecmd->autoneg == AUTONEG_ENABLE) { + u32 mask; + + mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; + if (np->gigabit == PHY_GIGABIT) + mask |= ADVERTISED_1000baseT_Full; + + if ((ecmd->advertising & mask) == 0) + return -EINVAL; + + } else if (ecmd->autoneg == AUTONEG_DISABLE) { + /* Note: autonegotiation disable, speed 1000 intentionally + * forbidden - no one should need that. */ + + if (speed != SPEED_10 && speed != SPEED_100) + return -EINVAL; + if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + return -EINVAL; + } else { + return -EINVAL; + } + + netif_carrier_off(dev); + if (netif_running(dev)) { + unsigned long flags; + + nv_disable_irq(dev); + netif_tx_lock_bh(dev); + netif_addr_lock(dev); + /* with plain spinlock lockdep complains */ + spin_lock_irqsave(&np->lock, flags); + /* stop engines */ + /* FIXME: + * this can take some time, and interrupts are disabled + * due to spin_lock_irqsave, but let's hope no daemon + * is going to change the settings very often... + * Worst case: + * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX + * + some minor delays, which is up to a second approximately + */ + nv_stop_rxtx(dev); + spin_unlock_irqrestore(&np->lock, flags); + netif_addr_unlock(dev); + netif_tx_unlock_bh(dev); + } + + if (ecmd->autoneg == AUTONEG_ENABLE) { + int adv, bmcr; + + np->autoneg = 1; + + /* advertise only what has been requested */ + adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); + adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + if (ecmd->advertising & ADVERTISED_10baseT_Half) + adv |= ADVERTISE_10HALF; + if (ecmd->advertising & ADVERTISED_10baseT_Full) + adv |= ADVERTISE_10FULL; + if (ecmd->advertising & ADVERTISED_100baseT_Half) + adv |= ADVERTISE_100HALF; + if (ecmd->advertising & ADVERTISED_100baseT_Full) + adv |= ADVERTISE_100FULL; + if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */ + adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) + adv |= ADVERTISE_PAUSE_ASYM; + mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); + + if (np->gigabit == PHY_GIGABIT) { + adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); + adv &= ~ADVERTISE_1000FULL; + if (ecmd->advertising & ADVERTISED_1000baseT_Full) + adv |= ADVERTISE_1000FULL; + mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); + } + + if (netif_running(dev)) + netdev_info(dev, "link down\n"); + bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + if (np->phy_model == PHY_MODEL_MARVELL_E3016) { + bmcr |= BMCR_ANENABLE; + /* reset the phy in order for settings to stick, + * and cause autoneg to start */ + if (phy_reset(dev, bmcr)) { + netdev_info(dev, "phy reset failed\n"); + return -EINVAL; + } + } else { + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); + } + } else { + int adv, bmcr; + + np->autoneg = 0; + + adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); + adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + if (speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) + adv |= ADVERTISE_10HALF; + if (speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) + adv |= ADVERTISE_10FULL; + if (speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) + adv |= ADVERTISE_100HALF; + if (speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) + adv |= ADVERTISE_100FULL; + np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); + if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */ + adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; + } + if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { + adv |= ADVERTISE_PAUSE_ASYM; + np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; + } + mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); + np->fixed_mode = adv; + + if (np->gigabit == PHY_GIGABIT) { + adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); + adv &= ~ADVERTISE_1000FULL; + mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); + } + + bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); + if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) + bmcr |= BMCR_FULLDPLX; + if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) + bmcr |= BMCR_SPEED100; + if (np->phy_oui == PHY_OUI_MARVELL) { + /* reset the phy in order for forced mode settings to stick */ + if (phy_reset(dev, bmcr)) { + netdev_info(dev, "phy reset failed\n"); + return -EINVAL; + } + } else { + mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); + if (netif_running(dev)) { + /* Wait a bit and then reconfigure the nic. */ + udelay(10); + nv_linkchange(dev); + } + } + } + + if (netif_running(dev)) { + nv_start_rxtx(dev); + nv_enable_irq(dev); + } + + return 0; +} + +#define FORCEDETH_REGS_VER 1 + +static int nv_get_regs_len(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + return np->register_size; +} + +static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 *rbuf = buf; + int i; + + regs->version = FORCEDETH_REGS_VER; + spin_lock_irq(&np->lock); + for (i = 0; i < np->register_size/sizeof(u32); i++) + rbuf[i] = readl(base + i*sizeof(u32)); + spin_unlock_irq(&np->lock); +} + +static int nv_nway_reset(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + int ret; + + if (np->autoneg) { + int bmcr; + + netif_carrier_off(dev); + if (netif_running(dev)) { + nv_disable_irq(dev); + netif_tx_lock_bh(dev); + netif_addr_lock(dev); + spin_lock(&np->lock); + /* stop engines */ + nv_stop_rxtx(dev); + spin_unlock(&np->lock); + netif_addr_unlock(dev); + netif_tx_unlock_bh(dev); + netdev_info(dev, "link down\n"); + } + + bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + if (np->phy_model == PHY_MODEL_MARVELL_E3016) { + bmcr |= BMCR_ANENABLE; + /* reset the phy in order for settings to stick*/ + if (phy_reset(dev, bmcr)) { + netdev_info(dev, "phy reset failed\n"); + return -EINVAL; + } + } else { + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); + } + + if (netif_running(dev)) { + nv_start_rxtx(dev); + nv_enable_irq(dev); + } + ret = 0; + } else { + ret = -EINVAL; + } + + return ret; +} + +static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) +{ + struct fe_priv *np = netdev_priv(dev); + + ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; + ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; + + ring->rx_pending = np->rx_ring_size; + ring->tx_pending = np->tx_ring_size; +} + +static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u8 *rxtx_ring, *rx_skbuff, *tx_skbuff; + dma_addr_t ring_addr; + + if (ring->rx_pending < RX_RING_MIN || + ring->tx_pending < TX_RING_MIN || + ring->rx_mini_pending != 0 || + ring->rx_jumbo_pending != 0 || + (np->desc_ver == DESC_VER_1 && + (ring->rx_pending > RING_MAX_DESC_VER_1 || + ring->tx_pending > RING_MAX_DESC_VER_1)) || + (np->desc_ver != DESC_VER_1 && + (ring->rx_pending > RING_MAX_DESC_VER_2_3 || + ring->tx_pending > RING_MAX_DESC_VER_2_3))) { + return -EINVAL; + } + + /* allocate new rings */ + if (!nv_optimized(np)) { + rxtx_ring = pci_alloc_consistent(np->pci_dev, + sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), + &ring_addr); + } else { + rxtx_ring = pci_alloc_consistent(np->pci_dev, + sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), + &ring_addr); + } + rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL); + tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); + if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { + /* fall back to old rings */ + if (!nv_optimized(np)) { + if (rxtx_ring) + pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), + rxtx_ring, ring_addr); + } else { + if (rxtx_ring) + pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), + rxtx_ring, ring_addr); + } + + kfree(rx_skbuff); + kfree(tx_skbuff); + goto exit; + } + + if (netif_running(dev)) { + nv_disable_irq(dev); + nv_napi_disable(dev); + netif_tx_lock_bh(dev); + netif_addr_lock(dev); + spin_lock(&np->lock); + /* stop engines */ + nv_stop_rxtx(dev); + nv_txrx_reset(dev); + /* drain queues */ + nv_drain_rxtx(dev); + /* delete queues */ + free_rings(dev); + } + + /* set new values */ + np->rx_ring_size = ring->rx_pending; + np->tx_ring_size = ring->tx_pending; + + if (!nv_optimized(np)) { + np->rx_ring.orig = (struct ring_desc *)rxtx_ring; + np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; + } else { + np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring; + np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; + } + np->rx_skb = (struct nv_skb_map *)rx_skbuff; + np->tx_skb = (struct nv_skb_map *)tx_skbuff; + np->ring_addr = ring_addr; + + memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); + memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); + + if (netif_running(dev)) { + /* reinit driver view of the queues */ + set_bufsize(dev); + if (nv_init_ring(dev)) { + if (!np->in_shutdown) + mod_timer(&np->oom_kick, jiffies + OOM_REFILL); + } + + /* reinit nic view of the queues */ + writel(np->rx_buf_sz, base + NvRegOffloadConfig); + setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); + writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), + base + NvRegRingSizes); + pci_push(base); + writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); + pci_push(base); + + /* restart engines */ + nv_start_rxtx(dev); + spin_unlock(&np->lock); + netif_addr_unlock(dev); + netif_tx_unlock_bh(dev); + nv_napi_enable(dev); + nv_enable_irq(dev); + } + return 0; +exit: + return -ENOMEM; +} + +static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) +{ + struct fe_priv *np = netdev_priv(dev); + + pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; + pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; + pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; +} + +static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) +{ + struct fe_priv *np = netdev_priv(dev); + int adv, bmcr; + + if ((!np->autoneg && np->duplex == 0) || + (np->autoneg && !pause->autoneg && np->duplex == 0)) { + netdev_info(dev, "can not set pause settings when forced link is in half duplex\n"); + return -EINVAL; + } + if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { + netdev_info(dev, "hardware does not support tx pause frames\n"); + return -EINVAL; + } + + netif_carrier_off(dev); + if (netif_running(dev)) { + nv_disable_irq(dev); + netif_tx_lock_bh(dev); + netif_addr_lock(dev); + spin_lock(&np->lock); + /* stop engines */ + nv_stop_rxtx(dev); + spin_unlock(&np->lock); + netif_addr_unlock(dev); + netif_tx_unlock_bh(dev); + } + + np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); + if (pause->rx_pause) + np->pause_flags |= NV_PAUSEFRAME_RX_REQ; + if (pause->tx_pause) + np->pause_flags |= NV_PAUSEFRAME_TX_REQ; + + if (np->autoneg && pause->autoneg) { + np->pause_flags |= NV_PAUSEFRAME_AUTONEG; + + adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); + adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */ + adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) + adv |= ADVERTISE_PAUSE_ASYM; + mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); + + if (netif_running(dev)) + netdev_info(dev, "link down\n"); + bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); + } else { + np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); + if (pause->rx_pause) + np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; + if (pause->tx_pause) + np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; + + if (!netif_running(dev)) + nv_update_linkspeed(dev); + else + nv_update_pause(dev, np->pause_flags); + } + + if (netif_running(dev)) { + nv_start_rxtx(dev); + nv_enable_irq(dev); + } + return 0; +} + +static int nv_set_loopback(struct net_device *dev, netdev_features_t features) +{ + struct fe_priv *np = netdev_priv(dev); + unsigned long flags; + u32 miicontrol; + int err, retval = 0; + + spin_lock_irqsave(&np->lock, flags); + miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + if (features & NETIF_F_LOOPBACK) { + if (miicontrol & BMCR_LOOPBACK) { + spin_unlock_irqrestore(&np->lock, flags); + netdev_info(dev, "Loopback already enabled\n"); + return 0; + } + nv_disable_irq(dev); + /* Turn on loopback mode */ + miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000; + err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol); + if (err) { + retval = PHY_ERROR; + spin_unlock_irqrestore(&np->lock, flags); + phy_init(dev); + } else { + if (netif_running(dev)) { + /* Force 1000 Mbps full-duplex */ + nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, + 1); + /* Force link up */ + netif_carrier_on(dev); + } + spin_unlock_irqrestore(&np->lock, flags); + netdev_info(dev, + "Internal PHY loopback mode enabled.\n"); + } + } else { + if (!(miicontrol & BMCR_LOOPBACK)) { + spin_unlock_irqrestore(&np->lock, flags); + netdev_info(dev, "Loopback already disabled\n"); + return 0; + } + nv_disable_irq(dev); + /* Turn off loopback */ + spin_unlock_irqrestore(&np->lock, flags); + netdev_info(dev, "Internal PHY loopback mode disabled.\n"); + phy_init(dev); + } + msleep(500); + spin_lock_irqsave(&np->lock, flags); + nv_enable_irq(dev); + spin_unlock_irqrestore(&np->lock, flags); + + return retval; +} + +static netdev_features_t nv_fix_features(struct net_device *dev, + netdev_features_t features) +{ + /* vlan is dependent on rx checksum offload */ + if (features & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX)) + features |= NETIF_F_RXCSUM; + + return features; +} + +static void nv_vlan_mode(struct net_device *dev, netdev_features_t features) +{ + struct fe_priv *np = get_nvpriv(dev); + + spin_lock_irq(&np->lock); + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP; + else + np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; + + if (features & NETIF_F_HW_VLAN_CTAG_TX) + np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS; + else + np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; + + writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); + + spin_unlock_irq(&np->lock); +} + +static int nv_set_features(struct net_device *dev, netdev_features_t features) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + netdev_features_t changed = dev->features ^ features; + int retval; + + if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) { + retval = nv_set_loopback(dev, features); + if (retval != 0) + return retval; + } + + if (changed & NETIF_F_RXCSUM) { + spin_lock_irq(&np->lock); + + if (features & NETIF_F_RXCSUM) + np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; + else + np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; + + if (netif_running(dev)) + writel(np->txrxctl_bits, base + NvRegTxRxControl); + + spin_unlock_irq(&np->lock); + } + + if (changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX)) + nv_vlan_mode(dev, features); + + return 0; +} + +static int nv_get_sset_count(struct net_device *dev, int sset) +{ + struct fe_priv *np = netdev_priv(dev); + + switch (sset) { + case ETH_SS_TEST: + if (np->driver_data & DEV_HAS_TEST_EXTENDED) + return NV_TEST_COUNT_EXTENDED; + else + return NV_TEST_COUNT_BASE; + case ETH_SS_STATS: + if (np->driver_data & DEV_HAS_STATISTICS_V3) + return NV_DEV_STATISTICS_V3_COUNT; + else if (np->driver_data & DEV_HAS_STATISTICS_V2) + return NV_DEV_STATISTICS_V2_COUNT; + else if (np->driver_data & DEV_HAS_STATISTICS_V1) + return NV_DEV_STATISTICS_V1_COUNT; + else + return 0; + default: + return -EOPNOTSUPP; + } +} + +static void nv_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *estats, u64 *buffer) + __acquires(&netdev_priv(dev)->hwstats_lock) + __releases(&netdev_priv(dev)->hwstats_lock) +{ + struct fe_priv *np = netdev_priv(dev); + + spin_lock_bh(&np->hwstats_lock); + nv_update_stats(dev); + memcpy(buffer, &np->estats, + nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); + spin_unlock_bh(&np->hwstats_lock); +} + +static int nv_link_test(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + int mii_status; + + mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); + mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); + + /* check phy link status */ + if (!(mii_status & BMSR_LSTATUS)) + return 0; + else + return 1; +} + +static int nv_register_test(struct net_device *dev) +{ + u8 __iomem *base = get_hwbase(dev); + int i = 0; + u32 orig_read, new_read; + + do { + orig_read = readl(base + nv_registers_test[i].reg); + + /* xor with mask to toggle bits */ + orig_read ^= nv_registers_test[i].mask; + + writel(orig_read, base + nv_registers_test[i].reg); + + new_read = readl(base + nv_registers_test[i].reg); + + if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) + return 0; + + /* restore original value */ + orig_read ^= nv_registers_test[i].mask; + writel(orig_read, base + nv_registers_test[i].reg); + + } while (nv_registers_test[++i].reg != 0); + + return 1; +} + +static int nv_interrupt_test(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + int ret = 1; + int testcnt; + u32 save_msi_flags, save_poll_interval = 0; + + if (netif_running(dev)) { + /* free current irq */ + nv_free_irq(dev); + save_poll_interval = readl(base+NvRegPollingInterval); + } + + /* flag to test interrupt handler */ + np->intr_test = 0; + + /* setup test irq */ + save_msi_flags = np->msi_flags; + np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; + np->msi_flags |= 0x001; /* setup 1 vector */ + if (nv_request_irq(dev, 1)) + return 0; + + /* setup timer interrupt */ + writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); + writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); + + nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); + + /* wait for at least one interrupt */ + msleep(100); + + spin_lock_irq(&np->lock); + + /* flag should be set within ISR */ + testcnt = np->intr_test; + if (!testcnt) + ret = 2; + + nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); + if (!(np->msi_flags & NV_MSI_X_ENABLED)) + writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); + else + writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); + + spin_unlock_irq(&np->lock); + + nv_free_irq(dev); + + np->msi_flags = save_msi_flags; + + if (netif_running(dev)) { + writel(save_poll_interval, base + NvRegPollingInterval); + writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); + /* restore original irq */ + if (nv_request_irq(dev, 0)) + return 0; + } + + return ret; +} + +static int nv_loopback_test(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + struct sk_buff *tx_skb, *rx_skb; + dma_addr_t test_dma_addr; + u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); + u32 flags; + int len, i, pkt_len; + u8 *pkt_data; + u32 filter_flags = 0; + u32 misc1_flags = 0; + int ret = 1; + + if (netif_running(dev)) { + nv_disable_irq(dev); + filter_flags = readl(base + NvRegPacketFilterFlags); + misc1_flags = readl(base + NvRegMisc1); + } else { + nv_txrx_reset(dev); + } + + /* reinit driver view of the rx queue */ + set_bufsize(dev); + nv_init_ring(dev); + + /* setup hardware for loopback */ + writel(NVREG_MISC1_FORCE, base + NvRegMisc1); + writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags); + + /* reinit nic view of the rx queue */ + writel(np->rx_buf_sz, base + NvRegOffloadConfig); + setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); + writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), + base + NvRegRingSizes); + pci_push(base); + + /* restart rx engine */ + nv_start_rxtx(dev); + + /* setup packet for tx */ + pkt_len = ETH_DATA_LEN; + tx_skb = netdev_alloc_skb(dev, pkt_len); + if (!tx_skb) { + ret = 0; + goto out; + } + test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, + skb_tailroom(tx_skb), + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(np->pci_dev, + test_dma_addr)) { + dev_kfree_skb_any(tx_skb); + goto out; + } + pkt_data = skb_put(tx_skb, pkt_len); + for (i = 0; i < pkt_len; i++) + pkt_data[i] = (u8)(i & 0xff); + + if (!nv_optimized(np)) { + np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); + np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); + } else { + np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr)); + np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr)); + np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); + } + writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); + pci_push(get_hwbase(dev)); + + msleep(500); + + /* check for rx of the packet */ + if (!nv_optimized(np)) { + flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); + len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); + + } else { + flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); + len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); + } + + if (flags & NV_RX_AVAIL) { + ret = 0; + } else if (np->desc_ver == DESC_VER_1) { + if (flags & NV_RX_ERROR) + ret = 0; + } else { + if (flags & NV_RX2_ERROR) + ret = 0; + } + + if (ret) { + if (len != pkt_len) { + ret = 0; + } else { + rx_skb = np->rx_skb[0].skb; + for (i = 0; i < pkt_len; i++) { + if (rx_skb->data[i] != (u8)(i & 0xff)) { + ret = 0; + break; + } + } + } + } + + pci_unmap_single(np->pci_dev, test_dma_addr, + (skb_end_pointer(tx_skb) - tx_skb->data), + PCI_DMA_TODEVICE); + dev_kfree_skb_any(tx_skb); + out: + /* stop engines */ + nv_stop_rxtx(dev); + nv_txrx_reset(dev); + /* drain rx queue */ + nv_drain_rxtx(dev); + + if (netif_running(dev)) { + writel(misc1_flags, base + NvRegMisc1); + writel(filter_flags, base + NvRegPacketFilterFlags); + nv_enable_irq(dev); + } + + return ret; +} + +static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + int result, count; + + count = nv_get_sset_count(dev, ETH_SS_TEST); + memset(buffer, 0, count * sizeof(u64)); + + if (!nv_link_test(dev)) { + test->flags |= ETH_TEST_FL_FAILED; + buffer[0] = 1; + } + + if (test->flags & ETH_TEST_FL_OFFLINE) { + if (netif_running(dev)) { + netif_stop_queue(dev); + nv_napi_disable(dev); + netif_tx_lock_bh(dev); + netif_addr_lock(dev); + spin_lock_irq(&np->lock); + nv_disable_hw_interrupts(dev, np->irqmask); + if (!(np->msi_flags & NV_MSI_X_ENABLED)) + writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); + else + writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); + /* stop engines */ + nv_stop_rxtx(dev); + nv_txrx_reset(dev); + /* drain rx queue */ + nv_drain_rxtx(dev); + spin_unlock_irq(&np->lock); + netif_addr_unlock(dev); + netif_tx_unlock_bh(dev); + } + + if (!nv_register_test(dev)) { + test->flags |= ETH_TEST_FL_FAILED; + buffer[1] = 1; + } + + result = nv_interrupt_test(dev); + if (result != 1) { + test->flags |= ETH_TEST_FL_FAILED; + buffer[2] = 1; + } + if (result == 0) { + /* bail out */ + return; + } + + if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) { + test->flags |= ETH_TEST_FL_FAILED; + buffer[3] = 1; + } + + if (netif_running(dev)) { + /* reinit driver view of the rx queue */ + set_bufsize(dev); + if (nv_init_ring(dev)) { + if (!np->in_shutdown) + mod_timer(&np->oom_kick, jiffies + OOM_REFILL); + } + /* reinit nic view of the rx queue */ + writel(np->rx_buf_sz, base + NvRegOffloadConfig); + setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); + writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), + base + NvRegRingSizes); + pci_push(base); + writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); + pci_push(base); + /* restart rx engine */ + nv_start_rxtx(dev); + netif_start_queue(dev); + nv_napi_enable(dev); + nv_enable_hw_interrupts(dev, np->irqmask); + } + } +} + +static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str)); + break; + case ETH_SS_TEST: + memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str)); + break; + } +} + +static const struct ethtool_ops ops = { + .get_drvinfo = nv_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_wol = nv_get_wol, + .set_wol = nv_set_wol, + .get_settings = nv_get_settings, + .set_settings = nv_set_settings, + .get_regs_len = nv_get_regs_len, + .get_regs = nv_get_regs, + .nway_reset = nv_nway_reset, + .get_ringparam = nv_get_ringparam, + .set_ringparam = nv_set_ringparam, + .get_pauseparam = nv_get_pauseparam, + .set_pauseparam = nv_set_pauseparam, + .get_strings = nv_get_strings, + .get_ethtool_stats = nv_get_ethtool_stats, + .get_sset_count = nv_get_sset_count, + .self_test = nv_self_test, + .get_ts_info = ethtool_op_get_ts_info, +}; + +/* The mgmt unit and driver use a semaphore to access the phy during init */ +static int nv_mgmt_acquire_sema(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + int i; + u32 tx_ctrl, mgmt_sema; + + for (i = 0; i < 10; i++) { + mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK; + if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE) + break; + msleep(500); + } + + if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE) + return 0; + + for (i = 0; i < 2; i++) { + tx_ctrl = readl(base + NvRegTransmitterControl); + tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ; + writel(tx_ctrl, base + NvRegTransmitterControl); + + /* verify that semaphore was acquired */ + tx_ctrl = readl(base + NvRegTransmitterControl); + if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) && + ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) { + np->mgmt_sema = 1; + return 1; + } else + udelay(50); + } + + return 0; +} + +static void nv_mgmt_release_sema(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 tx_ctrl; + + if (np->driver_data & DEV_HAS_MGMT_UNIT) { + if (np->mgmt_sema) { + tx_ctrl = readl(base + NvRegTransmitterControl); + tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ; + writel(tx_ctrl, base + NvRegTransmitterControl); + } + } +} + + +static int nv_mgmt_get_version(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + u32 data_ready = readl(base + NvRegTransmitterControl); + u32 data_ready2 = 0; + unsigned long start; + int ready = 0; + + writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion); + writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl); + start = jiffies; + while (time_before(jiffies, start + 5*HZ)) { + data_ready2 = readl(base + NvRegTransmitterControl); + if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) { + ready = 1; + break; + } + schedule_timeout_uninterruptible(1); + } + + if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR)) + return 0; + + np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION; + + return 1; +} + +static int nv_open(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + int ret = 1; + int oom, i; + u32 low; + + /* power up phy */ + mii_rw(dev, np->phyaddr, MII_BMCR, + mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN); + + nv_txrx_gate(dev, false); + /* erase previous misconfiguration */ + if (np->driver_data & DEV_HAS_POWER_CNTRL) + nv_mac_reset(dev); + writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); + writel(0, base + NvRegMulticastAddrB); + writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); + writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); + writel(0, base + NvRegPacketFilterFlags); + + writel(0, base + NvRegTransmitterControl); + writel(0, base + NvRegReceiverControl); + + writel(0, base + NvRegAdapterControl); + + if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) + writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); + + /* initialize descriptor rings */ + set_bufsize(dev); + oom = nv_init_ring(dev); + + writel(0, base + NvRegLinkSpeed); + writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); + nv_txrx_reset(dev); + writel(0, base + NvRegUnknownSetupReg6); + + np->in_shutdown = 0; + + /* give hw rings */ + setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); + writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), + base + NvRegRingSizes); + + writel(np->linkspeed, base + NvRegLinkSpeed); + if (np->desc_ver == DESC_VER_1) + writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); + else + writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark); + writel(np->txrxctl_bits, base + NvRegTxRxControl); + writel(np->vlanctl_bits, base + NvRegVlanControl); + pci_push(base); + writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); + if (reg_delay(dev, NvRegUnknownSetupReg5, + NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, + NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX)) + netdev_info(dev, + "%s: SetupReg5, Bit 31 remained off\n", __func__); + + writel(0, base + NvRegMIIMask); + writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); + writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); + + writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); + writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); + writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); + writel(np->rx_buf_sz, base + NvRegOffloadConfig); + + writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); + + get_random_bytes(&low, sizeof(low)); + low &= NVREG_SLOTTIME_MASK; + if (np->desc_ver == DESC_VER_1) { + writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime); + } else { + if (!(np->driver_data & DEV_HAS_GEAR_MODE)) { + /* setup legacy backoff */ + writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime); + } else { + writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime); + nv_gear_backoff_reseed(dev); + } + } + writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); + writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); + if (poll_interval == -1) { + if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) + writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); + else + writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); + } else + writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); + writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); + writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, + base + NvRegAdapterControl); + writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); + writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask); + if (np->wolenabled) + writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); + + i = readl(base + NvRegPowerState); + if ((i & NVREG_POWERSTATE_POWEREDUP) == 0) + writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); + + pci_push(base); + udelay(10); + writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); + + nv_disable_hw_interrupts(dev, np->irqmask); + pci_push(base); + writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); + writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); + pci_push(base); + + if (nv_request_irq(dev, 0)) + goto out_drain; + + /* ask for interrupts */ + nv_enable_hw_interrupts(dev, np->irqmask); + + spin_lock_irq(&np->lock); + writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); + writel(0, base + NvRegMulticastAddrB); + writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); + writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); + writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); + /* One manual link speed update: Interrupts are enabled, future link + * speed changes cause interrupts and are handled by nv_link_irq(). + */ + { + u32 miistat; + miistat = readl(base + NvRegMIIStatus); + writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); + } + /* set linkspeed to invalid value, thus force nv_update_linkspeed + * to init hw */ + np->linkspeed = 0; + ret = nv_update_linkspeed(dev); + nv_start_rxtx(dev); + netif_start_queue(dev); + nv_napi_enable(dev); + + if (ret) { + netif_carrier_on(dev); + } else { + netdev_info(dev, "no link during initialization\n"); + netif_carrier_off(dev); + } + if (oom) + mod_timer(&np->oom_kick, jiffies + OOM_REFILL); + + /* start statistics timer */ + if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) + mod_timer(&np->stats_poll, + round_jiffies(jiffies + STATS_INTERVAL)); + + spin_unlock_irq(&np->lock); + + /* If the loopback feature was set while the device was down, make sure + * that it's set correctly now. + */ + if (dev->features & NETIF_F_LOOPBACK) + nv_set_loopback(dev, dev->features); + + return 0; +out_drain: + nv_drain_rxtx(dev); + return ret; +} + +static int nv_close(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base; + + spin_lock_irq(&np->lock); + np->in_shutdown = 1; + spin_unlock_irq(&np->lock); + nv_napi_disable(dev); + synchronize_irq(np->pci_dev->irq); + + del_timer_sync(&np->oom_kick); + del_timer_sync(&np->nic_poll); + del_timer_sync(&np->stats_poll); + + netif_stop_queue(dev); + spin_lock_irq(&np->lock); + nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */ + nv_stop_rxtx(dev); + nv_txrx_reset(dev); + + /* disable interrupts on the nic or we will lock up */ + base = get_hwbase(dev); + nv_disable_hw_interrupts(dev, np->irqmask); + pci_push(base); + + spin_unlock_irq(&np->lock); + + nv_free_irq(dev); + + nv_drain_rxtx(dev); + + if (np->wolenabled || !phy_power_down) { + nv_txrx_gate(dev, false); + writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); + nv_start_rx(dev); + } else { + /* power down phy */ + mii_rw(dev, np->phyaddr, MII_BMCR, + mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN); + nv_txrx_gate(dev, true); + } + + /* FIXME: power down nic */ + + return 0; +} + +static const struct net_device_ops nv_netdev_ops = { + .ndo_open = nv_open, + .ndo_stop = nv_close, + .ndo_get_stats64 = nv_get_stats64, + .ndo_start_xmit = nv_start_xmit, + .ndo_tx_timeout = nv_tx_timeout, + .ndo_change_mtu = nv_change_mtu, + .ndo_fix_features = nv_fix_features, + .ndo_set_features = nv_set_features, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = nv_set_mac_address, + .ndo_set_rx_mode = nv_set_multicast, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = nv_poll_controller, +#endif +}; + +static const struct net_device_ops nv_netdev_ops_optimized = { + .ndo_open = nv_open, + .ndo_stop = nv_close, + .ndo_get_stats64 = nv_get_stats64, + .ndo_start_xmit = nv_start_xmit_optimized, + .ndo_tx_timeout = nv_tx_timeout, + .ndo_change_mtu = nv_change_mtu, + .ndo_fix_features = nv_fix_features, + .ndo_set_features = nv_set_features, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = nv_set_mac_address, + .ndo_set_rx_mode = nv_set_multicast, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = nv_poll_controller, +#endif +}; + +static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) +{ + struct net_device *dev; + struct fe_priv *np; + unsigned long addr; + u8 __iomem *base; + int err, i; + u32 powerstate, txreg; + u32 phystate_orig = 0, phystate; + int phyinitialized = 0; + static int printed_version; + + if (!printed_version++) + pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n", + FORCEDETH_VERSION); + + dev = alloc_etherdev(sizeof(struct fe_priv)); + err = -ENOMEM; + if (!dev) + goto out; + + np = netdev_priv(dev); + np->dev = dev; + np->pci_dev = pci_dev; + spin_lock_init(&np->lock); + spin_lock_init(&np->hwstats_lock); + SET_NETDEV_DEV(dev, &pci_dev->dev); + u64_stats_init(&np->swstats_rx_syncp); + u64_stats_init(&np->swstats_tx_syncp); + + init_timer(&np->oom_kick); + np->oom_kick.data = (unsigned long) dev; + np->oom_kick.function = nv_do_rx_refill; /* timer handler */ + init_timer(&np->nic_poll); + np->nic_poll.data = (unsigned long) dev; + np->nic_poll.function = nv_do_nic_poll; /* timer handler */ + init_timer_deferrable(&np->stats_poll); + np->stats_poll.data = (unsigned long) dev; + np->stats_poll.function = nv_do_stats_poll; /* timer handler */ + + err = pci_enable_device(pci_dev); + if (err) + goto out_free; + + pci_set_master(pci_dev); + + err = pci_request_regions(pci_dev, DRV_NAME); + if (err < 0) + goto out_disable; + + if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) + np->register_size = NV_PCI_REGSZ_VER3; + else if (id->driver_data & DEV_HAS_STATISTICS_V1) + np->register_size = NV_PCI_REGSZ_VER2; + else + np->register_size = NV_PCI_REGSZ_VER1; + + err = -EINVAL; + addr = 0; + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && + pci_resource_len(pci_dev, i) >= np->register_size) { + addr = pci_resource_start(pci_dev, i); + break; + } + } + if (i == DEVICE_COUNT_RESOURCE) { + dev_info(&pci_dev->dev, "Couldn't find register window\n"); + goto out_relreg; + } + + /* copy of driver data */ + np->driver_data = id->driver_data; + /* copy of device id */ + np->device_id = id->device; + + /* handle different descriptor versions */ + if (id->driver_data & DEV_HAS_HIGH_DMA) { + /* packet format 3: supports 40-bit addressing */ + np->desc_ver = DESC_VER_3; + np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; + if (dma_64bit) { + if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39))) + dev_info(&pci_dev->dev, + "64-bit DMA failed, using 32-bit addressing\n"); + else + dev->features |= NETIF_F_HIGHDMA; + if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) { + dev_info(&pci_dev->dev, + "64-bit DMA (consistent) failed, using 32-bit ring buffers\n"); + } + } + } else if (id->driver_data & DEV_HAS_LARGEDESC) { + /* packet format 2: supports jumbo frames */ + np->desc_ver = DESC_VER_2; + np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; + } else { + /* original packet format */ + np->desc_ver = DESC_VER_1; + np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; + } + + np->pkt_limit = NV_PKTLIMIT_1; + if (id->driver_data & DEV_HAS_LARGEDESC) + np->pkt_limit = NV_PKTLIMIT_2; + + if (id->driver_data & DEV_HAS_CHECKSUM) { + np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; + dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_RXCSUM; + } + + np->vlanctl_bits = 0; + if (id->driver_data & DEV_HAS_VLAN) { + np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + } + + dev->features |= dev->hw_features; + + /* Add loopback capability to the device. */ + dev->hw_features |= NETIF_F_LOOPBACK; + + np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; + if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) || + (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) || + (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) { + np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; + } + + err = -ENOMEM; + np->base = ioremap(addr, np->register_size); + if (!np->base) + goto out_relreg; + + np->rx_ring_size = RX_RING_DEFAULT; + np->tx_ring_size = TX_RING_DEFAULT; + + if (!nv_optimized(np)) { + np->rx_ring.orig = pci_alloc_consistent(pci_dev, + sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), + &np->ring_addr); + if (!np->rx_ring.orig) + goto out_unmap; + np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; + } else { + np->rx_ring.ex = pci_alloc_consistent(pci_dev, + sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), + &np->ring_addr); + if (!np->rx_ring.ex) + goto out_unmap; + np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; + } + np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); + np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); + if (!np->rx_skb || !np->tx_skb) + goto out_freering; + + if (!nv_optimized(np)) + dev->netdev_ops = &nv_netdev_ops; + else + dev->netdev_ops = &nv_netdev_ops_optimized; + + netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); + dev->ethtool_ops = &ops; + dev->watchdog_timeo = NV_WATCHDOG_TIMEO; + + pci_set_drvdata(pci_dev, dev); + + /* read the mac address */ + base = get_hwbase(dev); + np->orig_mac[0] = readl(base + NvRegMacAddrA); + np->orig_mac[1] = readl(base + NvRegMacAddrB); + + /* check the workaround bit for correct mac address order */ + txreg = readl(base + NvRegTransmitPoll); + if (id->driver_data & DEV_HAS_CORRECT_MACADDR) { + /* mac address is already in correct order */ + dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; + dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; + dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; + dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; + dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; + dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; + } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { + /* mac address is already in correct order */ + dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; + dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; + dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; + dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; + dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; + dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; + /* + * Set orig mac address back to the reversed version. + * This flag will be cleared during low power transition. + * Therefore, we should always put back the reversed address. + */ + np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) + + (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24); + np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8); + } else { + /* need to reverse mac address to correct order */ + dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; + dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; + dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; + dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; + dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; + dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; + writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); + dev_dbg(&pci_dev->dev, + "%s: set workaround bit for reversed mac addr\n", + __func__); + } + + if (!is_valid_ether_addr(dev->dev_addr)) { + /* + * Bad mac address. At least one bios sets the mac address + * to 01:23:45:67:89:ab + */ + dev_err(&pci_dev->dev, + "Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n", + dev->dev_addr); + eth_hw_addr_random(dev); + dev_err(&pci_dev->dev, + "Using random MAC address: %pM\n", dev->dev_addr); + } + + /* set mac address */ + nv_copy_mac_to_hw(dev); + + /* disable WOL */ + writel(0, base + NvRegWakeUpFlags); + np->wolenabled = 0; + device_set_wakeup_enable(&pci_dev->dev, false); + + if (id->driver_data & DEV_HAS_POWER_CNTRL) { + + /* take phy and nic out of low power mode */ + powerstate = readl(base + NvRegPowerState2); + powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; + if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) && + pci_dev->revision >= 0xA3) + powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; + writel(powerstate, base + NvRegPowerState2); + } + + if (np->desc_ver == DESC_VER_1) + np->tx_flags = NV_TX_VALID; + else + np->tx_flags = NV_TX2_VALID; + + np->msi_flags = 0; + if ((id->driver_data & DEV_HAS_MSI) && msi) + np->msi_flags |= NV_MSI_CAPABLE; + + if ((id->driver_data & DEV_HAS_MSI_X) && msix) { + /* msix has had reported issues when modifying irqmask + as in the case of napi, therefore, disable for now + */ +#if 0 + np->msi_flags |= NV_MSI_X_CAPABLE; +#endif + } + + if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) { + np->irqmask = NVREG_IRQMASK_CPU; + if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ + np->msi_flags |= 0x0001; + } else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC && + !(id->driver_data & DEV_NEED_TIMERIRQ)) { + /* start off in throughput mode */ + np->irqmask = NVREG_IRQMASK_THROUGHPUT; + /* remove support for msix mode */ + np->msi_flags &= ~NV_MSI_X_CAPABLE; + } else { + optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; + np->irqmask = NVREG_IRQMASK_THROUGHPUT; + if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ + np->msi_flags |= 0x0003; + } + + if (id->driver_data & DEV_NEED_TIMERIRQ) + np->irqmask |= NVREG_IRQ_TIMER; + if (id->driver_data & DEV_NEED_LINKTIMER) { + np->need_linktimer = 1; + np->link_timeout = jiffies + LINK_TIMEOUT; + } else { + np->need_linktimer = 0; + } + + /* Limit the number of tx's outstanding for hw bug */ + if (id->driver_data & DEV_NEED_TX_LIMIT) { + np->tx_limit = 1; + if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) && + pci_dev->revision >= 0xA2) + np->tx_limit = 0; + } + + /* clear phy state and temporarily halt phy interrupts */ + writel(0, base + NvRegMIIMask); + phystate = readl(base + NvRegAdapterControl); + if (phystate & NVREG_ADAPTCTL_RUNNING) { + phystate_orig = 1; + phystate &= ~NVREG_ADAPTCTL_RUNNING; + writel(phystate, base + NvRegAdapterControl); + } + writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); + + if (id->driver_data & DEV_HAS_MGMT_UNIT) { + /* management unit running on the mac? */ + if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) && + (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) && + nv_mgmt_acquire_sema(dev) && + nv_mgmt_get_version(dev)) { + np->mac_in_use = 1; + if (np->mgmt_version > 0) + np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE; + /* management unit setup the phy already? */ + if (np->mac_in_use && + ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == + NVREG_XMITCTL_SYNC_PHY_INIT)) { + /* phy is inited by mgmt unit */ + phyinitialized = 1; + } else { + /* we need to init the phy */ + } + } + } + + /* find a suitable phy */ + for (i = 1; i <= 32; i++) { + int id1, id2; + int phyaddr = i & 0x1F; + + spin_lock_irq(&np->lock); + id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); + spin_unlock_irq(&np->lock); + if (id1 < 0 || id1 == 0xffff) + continue; + spin_lock_irq(&np->lock); + id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); + spin_unlock_irq(&np->lock); + if (id2 < 0 || id2 == 0xffff) + continue; + + np->phy_model = id2 & PHYID2_MODEL_MASK; + id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; + id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; + np->phyaddr = phyaddr; + np->phy_oui = id1 | id2; + + /* Realtek hardcoded phy id1 to all zero's on certain phys */ + if (np->phy_oui == PHY_OUI_REALTEK2) + np->phy_oui = PHY_OUI_REALTEK; + /* Setup phy revision for Realtek */ + if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211) + np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK; + + break; + } + if (i == 33) { + dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n"); + goto out_error; + } + + if (!phyinitialized) { + /* reset it */ + phy_init(dev); + } else { + /* see if it is a gigabit phy */ + u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); + if (mii_status & PHY_GIGABIT) + np->gigabit = PHY_GIGABIT; + } + + /* set default link speed settings */ + np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; + np->duplex = 0; + np->autoneg = 1; + + err = register_netdev(dev); + if (err) { + dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err); + goto out_error; + } + + netif_carrier_off(dev); + + /* Some NICs freeze when TX pause is enabled while NIC is + * down, and this stays across warm reboots. The sequence + * below should be enough to recover from that state. + */ + nv_update_pause(dev, 0); + nv_start_tx(dev); + nv_stop_tx(dev); + + if (id->driver_data & DEV_HAS_VLAN) + nv_vlan_mode(dev, dev->features); + + dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", + dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); + + dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", + dev->features & NETIF_F_HIGHDMA ? "highdma " : "", + dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? + "csum " : "", + dev->features & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX) ? + "vlan " : "", + dev->features & (NETIF_F_LOOPBACK) ? + "loopback " : "", + id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "", + id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "", + id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "", + np->gigabit == PHY_GIGABIT ? "gbit " : "", + np->need_linktimer ? "lnktim " : "", + np->msi_flags & NV_MSI_CAPABLE ? "msi " : "", + np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "", + np->desc_ver); + + return 0; + +out_error: + if (phystate_orig) + writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); +out_freering: + free_rings(dev); +out_unmap: + iounmap(get_hwbase(dev)); +out_relreg: + pci_release_regions(pci_dev); +out_disable: + pci_disable_device(pci_dev); +out_free: + free_netdev(dev); +out: + return err; +} + +static void nv_restore_phy(struct net_device *dev) +{ + struct fe_priv *np = netdev_priv(dev); + u16 phy_reserved, mii_control; + + if (np->phy_oui == PHY_OUI_REALTEK && + np->phy_model == PHY_MODEL_REALTEK_8201 && + phy_cross == NV_CROSSOVER_DETECTION_DISABLED) { + mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3); + phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); + phy_reserved &= ~PHY_REALTEK_INIT_MSK1; + phy_reserved |= PHY_REALTEK_INIT8; + mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved); + mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1); + + /* restart auto negotiation */ + mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); + mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); + mii_rw(dev, np->phyaddr, MII_BMCR, mii_control); + } +} + +static void nv_restore_mac_addr(struct pci_dev *pci_dev) +{ + struct net_device *dev = pci_get_drvdata(pci_dev); + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + + /* special op: write back the misordered MAC address - otherwise + * the next nv_probe would see a wrong address. + */ + writel(np->orig_mac[0], base + NvRegMacAddrA); + writel(np->orig_mac[1], base + NvRegMacAddrB); + writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV, + base + NvRegTransmitPoll); +} + +static void nv_remove(struct pci_dev *pci_dev) +{ + struct net_device *dev = pci_get_drvdata(pci_dev); + + unregister_netdev(dev); + + nv_restore_mac_addr(pci_dev); + + /* restore any phy related changes */ + nv_restore_phy(dev); + + nv_mgmt_release_sema(dev); + + /* free all structures */ + free_rings(dev); + iounmap(get_hwbase(dev)); + pci_release_regions(pci_dev); + pci_disable_device(pci_dev); + free_netdev(dev); +} + +#ifdef CONFIG_PM_SLEEP +static int nv_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + int i; + + if (netif_running(dev)) { + /* Gross. */ + nv_close(dev); + } + netif_device_detach(dev); + + /* save non-pci configuration space */ + for (i = 0; i <= np->register_size/sizeof(u32); i++) + np->saved_config_space[i] = readl(base + i*sizeof(u32)); + + return 0; +} + +static int nv_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct fe_priv *np = netdev_priv(dev); + u8 __iomem *base = get_hwbase(dev); + int i, rc = 0; + + /* restore non-pci configuration space */ + for (i = 0; i <= np->register_size/sizeof(u32); i++) + writel(np->saved_config_space[i], base+i*sizeof(u32)); + + if (np->driver_data & DEV_NEED_MSI_FIX) + pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE); + + /* restore phy state, including autoneg */ + phy_init(dev); + + netif_device_attach(dev); + if (netif_running(dev)) { + rc = nv_open(dev); + nv_set_multicast(dev); + } + return rc; +} + +static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume); +#define NV_PM_OPS (&nv_pm_ops) + +#else +#define NV_PM_OPS NULL +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM +static void nv_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct fe_priv *np = netdev_priv(dev); + + if (netif_running(dev)) + nv_close(dev); + + /* + * Restore the MAC so a kernel started by kexec won't get confused. + * If we really go for poweroff, we must not restore the MAC, + * otherwise the MAC for WOL will be reversed at least on some boards. + */ + if (system_state != SYSTEM_POWER_OFF) + nv_restore_mac_addr(pdev); + + pci_disable_device(pdev); + /* + * Apparently it is not possible to reinitialise from D3 hot, + * only put the device into D3 if we really go for poweroff. + */ + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, np->wolenabled); + pci_set_power_state(pdev, PCI_D3hot); + } +} +#else +#define nv_shutdown NULL +#endif /* CONFIG_PM */ + +static const struct pci_device_id pci_tbl[] = { + { /* nForce Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x01C3), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, + }, + { /* nForce2 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0066), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, + }, + { /* nForce3 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x00D6), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, + }, + { /* nForce3 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0086), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, + }, + { /* nForce3 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x008C), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, + }, + { /* nForce3 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x00E6), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, + }, + { /* nForce3 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x00DF), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, + }, + { /* CK804 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0056), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, + }, + { /* CK804 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0057), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, + }, + { /* MCP04 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0037), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, + }, + { /* MCP04 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0038), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT, + }, + { /* MCP51 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0268), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX, + }, + { /* MCP51 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0269), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX, + }, + { /* MCP55 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0372), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, + }, + { /* MCP55 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0373), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX, + }, + { /* MCP61 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x03E5), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, + }, + { /* MCP61 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x03E6), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, + }, + { /* MCP61 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x03EE), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, + }, + { /* MCP61 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x03EF), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX, + }, + { /* MCP65 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0450), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP65 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0451), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP65 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0452), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP65 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0453), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP67 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x054C), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP67 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x054D), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP67 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x054E), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP67 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x054F), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP73 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x07DC), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP73 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x07DD), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP73 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x07DE), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP73 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x07DF), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX, + }, + { /* MCP77 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0760), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + }, + { /* MCP77 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0761), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + }, + { /* MCP77 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0762), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + }, + { /* MCP77 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0763), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + }, + { /* MCP79 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0AB0), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + }, + { /* MCP79 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0AB1), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + }, + { /* MCP79 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0AB2), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + }, + { /* MCP79 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0AB3), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX, + }, + { /* MCP89 Ethernet Controller */ + PCI_DEVICE(0x10DE, 0x0D7D), + .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX, + }, + {0,}, +}; + +static struct pci_driver forcedeth_pci_driver = { + .name = DRV_NAME, + .id_table = pci_tbl, + .probe = nv_probe, + .remove = nv_remove, + .shutdown = nv_shutdown, + .driver.pm = NV_PM_OPS, +}; + +module_param(max_interrupt_work, int, 0); +MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); +module_param(optimization_mode, int, 0); +MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load."); +module_param(poll_interval, int, 0); +MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); +module_param(msi, int, 0); +MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0."); +module_param(msix, int, 0); +MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); +module_param(dma_64bit, int, 0); +MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); +module_param(phy_cross, int, 0); +MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0."); +module_param(phy_power_down, int, 0); +MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0)."); +module_param(debug_tx_timeout, bool, 0); +MODULE_PARM_DESC(debug_tx_timeout, + "Dump tx related registers and ring when tx_timeout happens"); + +module_pci_driver(forcedeth_pci_driver); +MODULE_AUTHOR("Manfred Spraul "); +MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, pci_tbl); diff --git a/drivers/net/ethernet/nxp/Kconfig b/drivers/net/ethernet/nxp/Kconfig new file mode 100644 index 000000000..0d9baf98a --- /dev/null +++ b/drivers/net/ethernet/nxp/Kconfig @@ -0,0 +1,8 @@ +config LPC_ENET + tristate "NXP ethernet MAC on LPC devices" + depends on ARCH_LPC32XX + select PHYLIB + help + Say Y or M here if you want to use the NXP ethernet MAC included on + some NXP LPC devices. You can safely enable this option for LPC32xx + SoC. Also available as a module. diff --git a/drivers/net/ethernet/nxp/Makefile b/drivers/net/ethernet/nxp/Makefile new file mode 100644 index 000000000..a128114e6 --- /dev/null +++ b/drivers/net/ethernet/nxp/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_LPC_ENET) += lpc_eth.o diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c new file mode 100644 index 000000000..66fd86815 --- /dev/null +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -0,0 +1,1609 @@ +/* + * drivers/net/ethernet/nxp/lpc_eth.c + * + * Author: Kevin Wells + * + * Copyright (C) 2010 NXP Semiconductors + * Copyright (C) 2012 Roland Stigge + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define MODNAME "lpc-eth" +#define DRV_VERSION "1.00" + +#define ENET_MAXF_SIZE 1536 +#define ENET_RX_DESC 48 +#define ENET_TX_DESC 16 + +#define NAPI_WEIGHT 16 + +/* + * Ethernet MAC controller Register offsets + */ +#define LPC_ENET_MAC1(x) (x + 0x000) +#define LPC_ENET_MAC2(x) (x + 0x004) +#define LPC_ENET_IPGT(x) (x + 0x008) +#define LPC_ENET_IPGR(x) (x + 0x00C) +#define LPC_ENET_CLRT(x) (x + 0x010) +#define LPC_ENET_MAXF(x) (x + 0x014) +#define LPC_ENET_SUPP(x) (x + 0x018) +#define LPC_ENET_TEST(x) (x + 0x01C) +#define LPC_ENET_MCFG(x) (x + 0x020) +#define LPC_ENET_MCMD(x) (x + 0x024) +#define LPC_ENET_MADR(x) (x + 0x028) +#define LPC_ENET_MWTD(x) (x + 0x02C) +#define LPC_ENET_MRDD(x) (x + 0x030) +#define LPC_ENET_MIND(x) (x + 0x034) +#define LPC_ENET_SA0(x) (x + 0x040) +#define LPC_ENET_SA1(x) (x + 0x044) +#define LPC_ENET_SA2(x) (x + 0x048) +#define LPC_ENET_COMMAND(x) (x + 0x100) +#define LPC_ENET_STATUS(x) (x + 0x104) +#define LPC_ENET_RXDESCRIPTOR(x) (x + 0x108) +#define LPC_ENET_RXSTATUS(x) (x + 0x10C) +#define LPC_ENET_RXDESCRIPTORNUMBER(x) (x + 0x110) +#define LPC_ENET_RXPRODUCEINDEX(x) (x + 0x114) +#define LPC_ENET_RXCONSUMEINDEX(x) (x + 0x118) +#define LPC_ENET_TXDESCRIPTOR(x) (x + 0x11C) +#define LPC_ENET_TXSTATUS(x) (x + 0x120) +#define LPC_ENET_TXDESCRIPTORNUMBER(x) (x + 0x124) +#define LPC_ENET_TXPRODUCEINDEX(x) (x + 0x128) +#define LPC_ENET_TXCONSUMEINDEX(x) (x + 0x12C) +#define LPC_ENET_TSV0(x) (x + 0x158) +#define LPC_ENET_TSV1(x) (x + 0x15C) +#define LPC_ENET_RSV(x) (x + 0x160) +#define LPC_ENET_FLOWCONTROLCOUNTER(x) (x + 0x170) +#define LPC_ENET_FLOWCONTROLSTATUS(x) (x + 0x174) +#define LPC_ENET_RXFILTER_CTRL(x) (x + 0x200) +#define LPC_ENET_RXFILTERWOLSTATUS(x) (x + 0x204) +#define LPC_ENET_RXFILTERWOLCLEAR(x) (x + 0x208) +#define LPC_ENET_HASHFILTERL(x) (x + 0x210) +#define LPC_ENET_HASHFILTERH(x) (x + 0x214) +#define LPC_ENET_INTSTATUS(x) (x + 0xFE0) +#define LPC_ENET_INTENABLE(x) (x + 0xFE4) +#define LPC_ENET_INTCLEAR(x) (x + 0xFE8) +#define LPC_ENET_INTSET(x) (x + 0xFEC) +#define LPC_ENET_POWERDOWN(x) (x + 0xFF4) + +/* + * mac1 register definitions + */ +#define LPC_MAC1_RECV_ENABLE (1 << 0) +#define LPC_MAC1_PASS_ALL_RX_FRAMES (1 << 1) +#define LPC_MAC1_RX_FLOW_CONTROL (1 << 2) +#define LPC_MAC1_TX_FLOW_CONTROL (1 << 3) +#define LPC_MAC1_LOOPBACK (1 << 4) +#define LPC_MAC1_RESET_TX (1 << 8) +#define LPC_MAC1_RESET_MCS_TX (1 << 9) +#define LPC_MAC1_RESET_RX (1 << 10) +#define LPC_MAC1_RESET_MCS_RX (1 << 11) +#define LPC_MAC1_SIMULATION_RESET (1 << 14) +#define LPC_MAC1_SOFT_RESET (1 << 15) + +/* + * mac2 register definitions + */ +#define LPC_MAC2_FULL_DUPLEX (1 << 0) +#define LPC_MAC2_FRAME_LENGTH_CHECKING (1 << 1) +#define LPC_MAC2_HUGH_LENGTH_CHECKING (1 << 2) +#define LPC_MAC2_DELAYED_CRC (1 << 3) +#define LPC_MAC2_CRC_ENABLE (1 << 4) +#define LPC_MAC2_PAD_CRC_ENABLE (1 << 5) +#define LPC_MAC2_VLAN_PAD_ENABLE (1 << 6) +#define LPC_MAC2_AUTO_DETECT_PAD_ENABLE (1 << 7) +#define LPC_MAC2_PURE_PREAMBLE_ENFORCEMENT (1 << 8) +#define LPC_MAC2_LONG_PREAMBLE_ENFORCEMENT (1 << 9) +#define LPC_MAC2_NO_BACKOFF (1 << 12) +#define LPC_MAC2_BACK_PRESSURE (1 << 13) +#define LPC_MAC2_EXCESS_DEFER (1 << 14) + +/* + * ipgt register definitions + */ +#define LPC_IPGT_LOAD(n) ((n) & 0x7F) + +/* + * ipgr register definitions + */ +#define LPC_IPGR_LOAD_PART2(n) ((n) & 0x7F) +#define LPC_IPGR_LOAD_PART1(n) (((n) & 0x7F) << 8) + +/* + * clrt register definitions + */ +#define LPC_CLRT_LOAD_RETRY_MAX(n) ((n) & 0xF) +#define LPC_CLRT_LOAD_COLLISION_WINDOW(n) (((n) & 0x3F) << 8) + +/* + * maxf register definitions + */ +#define LPC_MAXF_LOAD_MAX_FRAME_LEN(n) ((n) & 0xFFFF) + +/* + * supp register definitions + */ +#define LPC_SUPP_SPEED (1 << 8) +#define LPC_SUPP_RESET_RMII (1 << 11) + +/* + * test register definitions + */ +#define LPC_TEST_SHORTCUT_PAUSE_QUANTA (1 << 0) +#define LPC_TEST_PAUSE (1 << 1) +#define LPC_TEST_BACKPRESSURE (1 << 2) + +/* + * mcfg register definitions + */ +#define LPC_MCFG_SCAN_INCREMENT (1 << 0) +#define LPC_MCFG_SUPPRESS_PREAMBLE (1 << 1) +#define LPC_MCFG_CLOCK_SELECT(n) (((n) & 0x7) << 2) +#define LPC_MCFG_CLOCK_HOST_DIV_4 0 +#define LPC_MCFG_CLOCK_HOST_DIV_6 2 +#define LPC_MCFG_CLOCK_HOST_DIV_8 3 +#define LPC_MCFG_CLOCK_HOST_DIV_10 4 +#define LPC_MCFG_CLOCK_HOST_DIV_14 5 +#define LPC_MCFG_CLOCK_HOST_DIV_20 6 +#define LPC_MCFG_CLOCK_HOST_DIV_28 7 +#define LPC_MCFG_RESET_MII_MGMT (1 << 15) + +/* + * mcmd register definitions + */ +#define LPC_MCMD_READ (1 << 0) +#define LPC_MCMD_SCAN (1 << 1) + +/* + * madr register definitions + */ +#define LPC_MADR_REGISTER_ADDRESS(n) ((n) & 0x1F) +#define LPC_MADR_PHY_0ADDRESS(n) (((n) & 0x1F) << 8) + +/* + * mwtd register definitions + */ +#define LPC_MWDT_WRITE(n) ((n) & 0xFFFF) + +/* + * mrdd register definitions + */ +#define LPC_MRDD_READ_MASK 0xFFFF + +/* + * mind register definitions + */ +#define LPC_MIND_BUSY (1 << 0) +#define LPC_MIND_SCANNING (1 << 1) +#define LPC_MIND_NOT_VALID (1 << 2) +#define LPC_MIND_MII_LINK_FAIL (1 << 3) + +/* + * command register definitions + */ +#define LPC_COMMAND_RXENABLE (1 << 0) +#define LPC_COMMAND_TXENABLE (1 << 1) +#define LPC_COMMAND_REG_RESET (1 << 3) +#define LPC_COMMAND_TXRESET (1 << 4) +#define LPC_COMMAND_RXRESET (1 << 5) +#define LPC_COMMAND_PASSRUNTFRAME (1 << 6) +#define LPC_COMMAND_PASSRXFILTER (1 << 7) +#define LPC_COMMAND_TXFLOWCONTROL (1 << 8) +#define LPC_COMMAND_RMII (1 << 9) +#define LPC_COMMAND_FULLDUPLEX (1 << 10) + +/* + * status register definitions + */ +#define LPC_STATUS_RXACTIVE (1 << 0) +#define LPC_STATUS_TXACTIVE (1 << 1) + +/* + * tsv0 register definitions + */ +#define LPC_TSV0_CRC_ERROR (1 << 0) +#define LPC_TSV0_LENGTH_CHECK_ERROR (1 << 1) +#define LPC_TSV0_LENGTH_OUT_OF_RANGE (1 << 2) +#define LPC_TSV0_DONE (1 << 3) +#define LPC_TSV0_MULTICAST (1 << 4) +#define LPC_TSV0_BROADCAST (1 << 5) +#define LPC_TSV0_PACKET_DEFER (1 << 6) +#define LPC_TSV0_ESCESSIVE_DEFER (1 << 7) +#define LPC_TSV0_ESCESSIVE_COLLISION (1 << 8) +#define LPC_TSV0_LATE_COLLISION (1 << 9) +#define LPC_TSV0_GIANT (1 << 10) +#define LPC_TSV0_UNDERRUN (1 << 11) +#define LPC_TSV0_TOTAL_BYTES(n) (((n) >> 12) & 0xFFFF) +#define LPC_TSV0_CONTROL_FRAME (1 << 28) +#define LPC_TSV0_PAUSE (1 << 29) +#define LPC_TSV0_BACKPRESSURE (1 << 30) +#define LPC_TSV0_VLAN (1 << 31) + +/* + * tsv1 register definitions + */ +#define LPC_TSV1_TRANSMIT_BYTE_COUNT(n) ((n) & 0xFFFF) +#define LPC_TSV1_COLLISION_COUNT(n) (((n) >> 16) & 0xF) + +/* + * rsv register definitions + */ +#define LPC_RSV_RECEIVED_BYTE_COUNT(n) ((n) & 0xFFFF) +#define LPC_RSV_RXDV_EVENT_IGNORED (1 << 16) +#define LPC_RSV_RXDV_EVENT_PREVIOUSLY_SEEN (1 << 17) +#define LPC_RSV_CARRIER_EVNT_PREVIOUS_SEEN (1 << 18) +#define LPC_RSV_RECEIVE_CODE_VIOLATION (1 << 19) +#define LPC_RSV_CRC_ERROR (1 << 20) +#define LPC_RSV_LENGTH_CHECK_ERROR (1 << 21) +#define LPC_RSV_LENGTH_OUT_OF_RANGE (1 << 22) +#define LPC_RSV_RECEIVE_OK (1 << 23) +#define LPC_RSV_MULTICAST (1 << 24) +#define LPC_RSV_BROADCAST (1 << 25) +#define LPC_RSV_DRIBBLE_NIBBLE (1 << 26) +#define LPC_RSV_CONTROL_FRAME (1 << 27) +#define LPC_RSV_PAUSE (1 << 28) +#define LPC_RSV_UNSUPPORTED_OPCODE (1 << 29) +#define LPC_RSV_VLAN (1 << 30) + +/* + * flowcontrolcounter register definitions + */ +#define LPC_FCCR_MIRRORCOUNTER(n) ((n) & 0xFFFF) +#define LPC_FCCR_PAUSETIMER(n) (((n) >> 16) & 0xFFFF) + +/* + * flowcontrolstatus register definitions + */ +#define LPC_FCCR_MIRRORCOUNTERCURRENT(n) ((n) & 0xFFFF) + +/* + * rxfliterctrl, rxfilterwolstatus, and rxfilterwolclear shared + * register definitions + */ +#define LPC_RXFLTRW_ACCEPTUNICAST (1 << 0) +#define LPC_RXFLTRW_ACCEPTUBROADCAST (1 << 1) +#define LPC_RXFLTRW_ACCEPTUMULTICAST (1 << 2) +#define LPC_RXFLTRW_ACCEPTUNICASTHASH (1 << 3) +#define LPC_RXFLTRW_ACCEPTUMULTICASTHASH (1 << 4) +#define LPC_RXFLTRW_ACCEPTPERFECT (1 << 5) + +/* + * rxfliterctrl register definitions + */ +#define LPC_RXFLTRWSTS_MAGICPACKETENWOL (1 << 12) +#define LPC_RXFLTRWSTS_RXFILTERENWOL (1 << 13) + +/* + * rxfilterwolstatus/rxfilterwolclear register definitions + */ +#define LPC_RXFLTRWSTS_RXFILTERWOL (1 << 7) +#define LPC_RXFLTRWSTS_MAGICPACKETWOL (1 << 8) + +/* + * intstatus, intenable, intclear, and Intset shared register + * definitions + */ +#define LPC_MACINT_RXOVERRUNINTEN (1 << 0) +#define LPC_MACINT_RXERRORONINT (1 << 1) +#define LPC_MACINT_RXFINISHEDINTEN (1 << 2) +#define LPC_MACINT_RXDONEINTEN (1 << 3) +#define LPC_MACINT_TXUNDERRUNINTEN (1 << 4) +#define LPC_MACINT_TXERRORINTEN (1 << 5) +#define LPC_MACINT_TXFINISHEDINTEN (1 << 6) +#define LPC_MACINT_TXDONEINTEN (1 << 7) +#define LPC_MACINT_SOFTINTEN (1 << 12) +#define LPC_MACINT_WAKEUPINTEN (1 << 13) + +/* + * powerdown register definitions + */ +#define LPC_POWERDOWN_MACAHB (1 << 31) + +static phy_interface_t lpc_phy_interface_mode(struct device *dev) +{ + if (dev && dev->of_node) { + const char *mode = of_get_property(dev->of_node, + "phy-mode", NULL); + if (mode && !strcmp(mode, "mii")) + return PHY_INTERFACE_MODE_MII; + } + return PHY_INTERFACE_MODE_RMII; +} + +static bool use_iram_for_net(struct device *dev) +{ + if (dev && dev->of_node) + return of_property_read_bool(dev->of_node, "use-iram"); + return false; +} + +/* Receive Status information word */ +#define RXSTATUS_SIZE 0x000007FF +#define RXSTATUS_CONTROL (1 << 18) +#define RXSTATUS_VLAN (1 << 19) +#define RXSTATUS_FILTER (1 << 20) +#define RXSTATUS_MULTICAST (1 << 21) +#define RXSTATUS_BROADCAST (1 << 22) +#define RXSTATUS_CRC (1 << 23) +#define RXSTATUS_SYMBOL (1 << 24) +#define RXSTATUS_LENGTH (1 << 25) +#define RXSTATUS_RANGE (1 << 26) +#define RXSTATUS_ALIGN (1 << 27) +#define RXSTATUS_OVERRUN (1 << 28) +#define RXSTATUS_NODESC (1 << 29) +#define RXSTATUS_LAST (1 << 30) +#define RXSTATUS_ERROR (1 << 31) + +#define RXSTATUS_STATUS_ERROR \ + (RXSTATUS_NODESC | RXSTATUS_OVERRUN | RXSTATUS_ALIGN | \ + RXSTATUS_RANGE | RXSTATUS_LENGTH | RXSTATUS_SYMBOL | RXSTATUS_CRC) + +/* Receive Descriptor control word */ +#define RXDESC_CONTROL_SIZE 0x000007FF +#define RXDESC_CONTROL_INT (1 << 31) + +/* Transmit Status information word */ +#define TXSTATUS_COLLISIONS_GET(x) (((x) >> 21) & 0xF) +#define TXSTATUS_DEFER (1 << 25) +#define TXSTATUS_EXCESSDEFER (1 << 26) +#define TXSTATUS_EXCESSCOLL (1 << 27) +#define TXSTATUS_LATECOLL (1 << 28) +#define TXSTATUS_UNDERRUN (1 << 29) +#define TXSTATUS_NODESC (1 << 30) +#define TXSTATUS_ERROR (1 << 31) + +/* Transmit Descriptor control word */ +#define TXDESC_CONTROL_SIZE 0x000007FF +#define TXDESC_CONTROL_OVERRIDE (1 << 26) +#define TXDESC_CONTROL_HUGE (1 << 27) +#define TXDESC_CONTROL_PAD (1 << 28) +#define TXDESC_CONTROL_CRC (1 << 29) +#define TXDESC_CONTROL_LAST (1 << 30) +#define TXDESC_CONTROL_INT (1 << 31) + +/* + * Structure of a TX/RX descriptors and RX status + */ +struct txrx_desc_t { + __le32 packet; + __le32 control; +}; +struct rx_status_t { + __le32 statusinfo; + __le32 statushashcrc; +}; + +/* + * Device driver data structure + */ +struct netdata_local { + struct platform_device *pdev; + struct net_device *ndev; + spinlock_t lock; + void __iomem *net_base; + u32 msg_enable; + unsigned int skblen[ENET_TX_DESC]; + unsigned int last_tx_idx; + unsigned int num_used_tx_buffs; + struct mii_bus *mii_bus; + struct phy_device *phy_dev; + struct clk *clk; + dma_addr_t dma_buff_base_p; + void *dma_buff_base_v; + size_t dma_buff_size; + struct txrx_desc_t *tx_desc_v; + u32 *tx_stat_v; + void *tx_buff_v; + struct txrx_desc_t *rx_desc_v; + struct rx_status_t *rx_stat_v; + void *rx_buff_v; + int link; + int speed; + int duplex; + struct napi_struct napi; +}; + +/* + * MAC support functions + */ +static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac) +{ + u32 tmp; + + /* Set station address */ + tmp = mac[0] | ((u32)mac[1] << 8); + writel(tmp, LPC_ENET_SA2(pldat->net_base)); + tmp = mac[2] | ((u32)mac[3] << 8); + writel(tmp, LPC_ENET_SA1(pldat->net_base)); + tmp = mac[4] | ((u32)mac[5] << 8); + writel(tmp, LPC_ENET_SA0(pldat->net_base)); + + netdev_dbg(pldat->ndev, "Ethernet MAC address %pM\n", mac); +} + +static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac) +{ + u32 tmp; + + /* Get station address */ + tmp = readl(LPC_ENET_SA2(pldat->net_base)); + mac[0] = tmp & 0xFF; + mac[1] = tmp >> 8; + tmp = readl(LPC_ENET_SA1(pldat->net_base)); + mac[2] = tmp & 0xFF; + mac[3] = tmp >> 8; + tmp = readl(LPC_ENET_SA0(pldat->net_base)); + mac[4] = tmp & 0xFF; + mac[5] = tmp >> 8; +} + +static void __lpc_eth_clock_enable(struct netdata_local *pldat, + bool enable) +{ + if (enable) + clk_enable(pldat->clk); + else + clk_disable(pldat->clk); +} + +static void __lpc_params_setup(struct netdata_local *pldat) +{ + u32 tmp; + + if (pldat->duplex == DUPLEX_FULL) { + tmp = readl(LPC_ENET_MAC2(pldat->net_base)); + tmp |= LPC_MAC2_FULL_DUPLEX; + writel(tmp, LPC_ENET_MAC2(pldat->net_base)); + tmp = readl(LPC_ENET_COMMAND(pldat->net_base)); + tmp |= LPC_COMMAND_FULLDUPLEX; + writel(tmp, LPC_ENET_COMMAND(pldat->net_base)); + writel(LPC_IPGT_LOAD(0x15), LPC_ENET_IPGT(pldat->net_base)); + } else { + tmp = readl(LPC_ENET_MAC2(pldat->net_base)); + tmp &= ~LPC_MAC2_FULL_DUPLEX; + writel(tmp, LPC_ENET_MAC2(pldat->net_base)); + tmp = readl(LPC_ENET_COMMAND(pldat->net_base)); + tmp &= ~LPC_COMMAND_FULLDUPLEX; + writel(tmp, LPC_ENET_COMMAND(pldat->net_base)); + writel(LPC_IPGT_LOAD(0x12), LPC_ENET_IPGT(pldat->net_base)); + } + + if (pldat->speed == SPEED_100) + writel(LPC_SUPP_SPEED, LPC_ENET_SUPP(pldat->net_base)); + else + writel(0, LPC_ENET_SUPP(pldat->net_base)); +} + +static void __lpc_eth_reset(struct netdata_local *pldat) +{ + /* Reset all MAC logic */ + writel((LPC_MAC1_RESET_TX | LPC_MAC1_RESET_MCS_TX | LPC_MAC1_RESET_RX | + LPC_MAC1_RESET_MCS_RX | LPC_MAC1_SIMULATION_RESET | + LPC_MAC1_SOFT_RESET), LPC_ENET_MAC1(pldat->net_base)); + writel((LPC_COMMAND_REG_RESET | LPC_COMMAND_TXRESET | + LPC_COMMAND_RXRESET), LPC_ENET_COMMAND(pldat->net_base)); +} + +static int __lpc_mii_mngt_reset(struct netdata_local *pldat) +{ + /* Reset MII management hardware */ + writel(LPC_MCFG_RESET_MII_MGMT, LPC_ENET_MCFG(pldat->net_base)); + + /* Setup MII clock to slowest rate with a /28 divider */ + writel(LPC_MCFG_CLOCK_SELECT(LPC_MCFG_CLOCK_HOST_DIV_28), + LPC_ENET_MCFG(pldat->net_base)); + + return 0; +} + +static inline phys_addr_t __va_to_pa(void *addr, struct netdata_local *pldat) +{ + phys_addr_t phaddr; + + phaddr = addr - pldat->dma_buff_base_v; + phaddr += pldat->dma_buff_base_p; + + return phaddr; +} + +static void lpc_eth_enable_int(void __iomem *regbase) +{ + writel((LPC_MACINT_RXDONEINTEN | LPC_MACINT_TXDONEINTEN), + LPC_ENET_INTENABLE(regbase)); +} + +static void lpc_eth_disable_int(void __iomem *regbase) +{ + writel(0, LPC_ENET_INTENABLE(regbase)); +} + +/* Setup TX/RX descriptors */ +static void __lpc_txrx_desc_setup(struct netdata_local *pldat) +{ + u32 *ptxstat; + void *tbuff; + int i; + struct txrx_desc_t *ptxrxdesc; + struct rx_status_t *prxstat; + + tbuff = PTR_ALIGN(pldat->dma_buff_base_v, 16); + + /* Setup TX descriptors, status, and buffers */ + pldat->tx_desc_v = tbuff; + tbuff += sizeof(struct txrx_desc_t) * ENET_TX_DESC; + + pldat->tx_stat_v = tbuff; + tbuff += sizeof(u32) * ENET_TX_DESC; + + tbuff = PTR_ALIGN(tbuff, 16); + pldat->tx_buff_v = tbuff; + tbuff += ENET_MAXF_SIZE * ENET_TX_DESC; + + /* Setup RX descriptors, status, and buffers */ + pldat->rx_desc_v = tbuff; + tbuff += sizeof(struct txrx_desc_t) * ENET_RX_DESC; + + tbuff = PTR_ALIGN(tbuff, 16); + pldat->rx_stat_v = tbuff; + tbuff += sizeof(struct rx_status_t) * ENET_RX_DESC; + + tbuff = PTR_ALIGN(tbuff, 16); + pldat->rx_buff_v = tbuff; + tbuff += ENET_MAXF_SIZE * ENET_RX_DESC; + + /* Map the TX descriptors to the TX buffers in hardware */ + for (i = 0; i < ENET_TX_DESC; i++) { + ptxstat = &pldat->tx_stat_v[i]; + ptxrxdesc = &pldat->tx_desc_v[i]; + + ptxrxdesc->packet = __va_to_pa( + pldat->tx_buff_v + i * ENET_MAXF_SIZE, pldat); + ptxrxdesc->control = 0; + *ptxstat = 0; + } + + /* Map the RX descriptors to the RX buffers in hardware */ + for (i = 0; i < ENET_RX_DESC; i++) { + prxstat = &pldat->rx_stat_v[i]; + ptxrxdesc = &pldat->rx_desc_v[i]; + + ptxrxdesc->packet = __va_to_pa( + pldat->rx_buff_v + i * ENET_MAXF_SIZE, pldat); + ptxrxdesc->control = RXDESC_CONTROL_INT | (ENET_MAXF_SIZE - 1); + prxstat->statusinfo = 0; + prxstat->statushashcrc = 0; + } + + /* Setup base addresses in hardware to point to buffers and + * descriptors + */ + writel((ENET_TX_DESC - 1), + LPC_ENET_TXDESCRIPTORNUMBER(pldat->net_base)); + writel(__va_to_pa(pldat->tx_desc_v, pldat), + LPC_ENET_TXDESCRIPTOR(pldat->net_base)); + writel(__va_to_pa(pldat->tx_stat_v, pldat), + LPC_ENET_TXSTATUS(pldat->net_base)); + writel((ENET_RX_DESC - 1), + LPC_ENET_RXDESCRIPTORNUMBER(pldat->net_base)); + writel(__va_to_pa(pldat->rx_desc_v, pldat), + LPC_ENET_RXDESCRIPTOR(pldat->net_base)); + writel(__va_to_pa(pldat->rx_stat_v, pldat), + LPC_ENET_RXSTATUS(pldat->net_base)); +} + +static void __lpc_eth_init(struct netdata_local *pldat) +{ + u32 tmp; + + /* Disable controller and reset */ + tmp = readl(LPC_ENET_COMMAND(pldat->net_base)); + tmp &= ~LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE; + writel(tmp, LPC_ENET_COMMAND(pldat->net_base)); + tmp = readl(LPC_ENET_MAC1(pldat->net_base)); + tmp &= ~LPC_MAC1_RECV_ENABLE; + writel(tmp, LPC_ENET_MAC1(pldat->net_base)); + + /* Initial MAC setup */ + writel(LPC_MAC1_PASS_ALL_RX_FRAMES, LPC_ENET_MAC1(pldat->net_base)); + writel((LPC_MAC2_PAD_CRC_ENABLE | LPC_MAC2_CRC_ENABLE), + LPC_ENET_MAC2(pldat->net_base)); + writel(ENET_MAXF_SIZE, LPC_ENET_MAXF(pldat->net_base)); + + /* Collision window, gap */ + writel((LPC_CLRT_LOAD_RETRY_MAX(0xF) | + LPC_CLRT_LOAD_COLLISION_WINDOW(0x37)), + LPC_ENET_CLRT(pldat->net_base)); + writel(LPC_IPGR_LOAD_PART2(0x12), LPC_ENET_IPGR(pldat->net_base)); + + if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII) + writel(LPC_COMMAND_PASSRUNTFRAME, + LPC_ENET_COMMAND(pldat->net_base)); + else { + writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII), + LPC_ENET_COMMAND(pldat->net_base)); + writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base)); + } + + __lpc_params_setup(pldat); + + /* Setup TX and RX descriptors */ + __lpc_txrx_desc_setup(pldat); + + /* Setup packet filtering */ + writel((LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT), + LPC_ENET_RXFILTER_CTRL(pldat->net_base)); + + /* Get the next TX buffer output index */ + pldat->num_used_tx_buffs = 0; + pldat->last_tx_idx = + readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); + + /* Clear and enable interrupts */ + writel(0xFFFF, LPC_ENET_INTCLEAR(pldat->net_base)); + smp_wmb(); + lpc_eth_enable_int(pldat->net_base); + + /* Enable controller */ + tmp = readl(LPC_ENET_COMMAND(pldat->net_base)); + tmp |= LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE; + writel(tmp, LPC_ENET_COMMAND(pldat->net_base)); + tmp = readl(LPC_ENET_MAC1(pldat->net_base)); + tmp |= LPC_MAC1_RECV_ENABLE; + writel(tmp, LPC_ENET_MAC1(pldat->net_base)); +} + +static void __lpc_eth_shutdown(struct netdata_local *pldat) +{ + /* Reset ethernet and power down PHY */ + __lpc_eth_reset(pldat); + writel(0, LPC_ENET_MAC1(pldat->net_base)); + writel(0, LPC_ENET_MAC2(pldat->net_base)); +} + +/* + * MAC<--->PHY support functions + */ +static int lpc_mdio_read(struct mii_bus *bus, int phy_id, int phyreg) +{ + struct netdata_local *pldat = bus->priv; + unsigned long timeout = jiffies + msecs_to_jiffies(100); + int lps; + + writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base)); + writel(LPC_MCMD_READ, LPC_ENET_MCMD(pldat->net_base)); + + /* Wait for unbusy status */ + while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) { + if (time_after(jiffies, timeout)) + return -EIO; + cpu_relax(); + } + + lps = readl(LPC_ENET_MRDD(pldat->net_base)); + writel(0, LPC_ENET_MCMD(pldat->net_base)); + + return lps; +} + +static int lpc_mdio_write(struct mii_bus *bus, int phy_id, int phyreg, + u16 phydata) +{ + struct netdata_local *pldat = bus->priv; + unsigned long timeout = jiffies + msecs_to_jiffies(100); + + writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base)); + writel(phydata, LPC_ENET_MWTD(pldat->net_base)); + + /* Wait for completion */ + while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) { + if (time_after(jiffies, timeout)) + return -EIO; + cpu_relax(); + } + + return 0; +} + +static int lpc_mdio_reset(struct mii_bus *bus) +{ + return __lpc_mii_mngt_reset((struct netdata_local *)bus->priv); +} + +static void lpc_handle_link_change(struct net_device *ndev) +{ + struct netdata_local *pldat = netdev_priv(ndev); + struct phy_device *phydev = pldat->phy_dev; + unsigned long flags; + + bool status_change = false; + + spin_lock_irqsave(&pldat->lock, flags); + + if (phydev->link) { + if ((pldat->speed != phydev->speed) || + (pldat->duplex != phydev->duplex)) { + pldat->speed = phydev->speed; + pldat->duplex = phydev->duplex; + status_change = true; + } + } + + if (phydev->link != pldat->link) { + if (!phydev->link) { + pldat->speed = 0; + pldat->duplex = -1; + } + pldat->link = phydev->link; + + status_change = true; + } + + spin_unlock_irqrestore(&pldat->lock, flags); + + if (status_change) + __lpc_params_setup(pldat); +} + +static int lpc_mii_probe(struct net_device *ndev) +{ + struct netdata_local *pldat = netdev_priv(ndev); + struct phy_device *phydev = phy_find_first(pldat->mii_bus); + + if (!phydev) { + netdev_err(ndev, "no PHY found\n"); + return -ENODEV; + } + + /* Attach to the PHY */ + if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII) + netdev_info(ndev, "using MII interface\n"); + else + netdev_info(ndev, "using RMII interface\n"); + phydev = phy_connect(ndev, dev_name(&phydev->dev), + &lpc_handle_link_change, + lpc_phy_interface_mode(&pldat->pdev->dev)); + + if (IS_ERR(phydev)) { + netdev_err(ndev, "Could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + /* mask with MAC supported features */ + phydev->supported &= PHY_BASIC_FEATURES; + + phydev->advertising = phydev->supported; + + pldat->link = 0; + pldat->speed = 0; + pldat->duplex = -1; + pldat->phy_dev = phydev; + + netdev_info(ndev, + "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + return 0; +} + +static int lpc_mii_init(struct netdata_local *pldat) +{ + int err = -ENXIO, i; + + pldat->mii_bus = mdiobus_alloc(); + if (!pldat->mii_bus) { + err = -ENOMEM; + goto err_out; + } + + /* Setup MII mode */ + if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII) + writel(LPC_COMMAND_PASSRUNTFRAME, + LPC_ENET_COMMAND(pldat->net_base)); + else { + writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII), + LPC_ENET_COMMAND(pldat->net_base)); + writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base)); + } + + pldat->mii_bus->name = "lpc_mii_bus"; + pldat->mii_bus->read = &lpc_mdio_read; + pldat->mii_bus->write = &lpc_mdio_write; + pldat->mii_bus->reset = &lpc_mdio_reset; + snprintf(pldat->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pldat->pdev->name, pldat->pdev->id); + pldat->mii_bus->priv = pldat; + pldat->mii_bus->parent = &pldat->pdev->dev; + + pldat->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!pldat->mii_bus->irq) { + err = -ENOMEM; + goto err_out_1; + } + + for (i = 0; i < PHY_MAX_ADDR; i++) + pldat->mii_bus->irq[i] = PHY_POLL; + + platform_set_drvdata(pldat->pdev, pldat->mii_bus); + + if (mdiobus_register(pldat->mii_bus)) + goto err_out_free_mdio_irq; + + if (lpc_mii_probe(pldat->ndev) != 0) + goto err_out_unregister_bus; + + return 0; + +err_out_unregister_bus: + mdiobus_unregister(pldat->mii_bus); +err_out_free_mdio_irq: + kfree(pldat->mii_bus->irq); +err_out_1: + mdiobus_free(pldat->mii_bus); +err_out: + return err; +} + +static void __lpc_handle_xmit(struct net_device *ndev) +{ + struct netdata_local *pldat = netdev_priv(ndev); + u32 txcidx, *ptxstat, txstat; + + txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); + while (pldat->last_tx_idx != txcidx) { + unsigned int skblen = pldat->skblen[pldat->last_tx_idx]; + + /* A buffer is available, get buffer status */ + ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx]; + txstat = *ptxstat; + + /* Next buffer and decrement used buffer counter */ + pldat->num_used_tx_buffs--; + pldat->last_tx_idx++; + if (pldat->last_tx_idx >= ENET_TX_DESC) + pldat->last_tx_idx = 0; + + /* Update collision counter */ + ndev->stats.collisions += TXSTATUS_COLLISIONS_GET(txstat); + + /* Any errors occurred? */ + if (txstat & TXSTATUS_ERROR) { + if (txstat & TXSTATUS_UNDERRUN) { + /* FIFO underrun */ + ndev->stats.tx_fifo_errors++; + } + if (txstat & TXSTATUS_LATECOLL) { + /* Late collision */ + ndev->stats.tx_aborted_errors++; + } + if (txstat & TXSTATUS_EXCESSCOLL) { + /* Excessive collision */ + ndev->stats.tx_aborted_errors++; + } + if (txstat & TXSTATUS_EXCESSDEFER) { + /* Defer limit */ + ndev->stats.tx_aborted_errors++; + } + ndev->stats.tx_errors++; + } else { + /* Update stats */ + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skblen; + } + + txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); + } + + if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) { + if (netif_queue_stopped(ndev)) + netif_wake_queue(ndev); + } +} + +static int __lpc_handle_recv(struct net_device *ndev, int budget) +{ + struct netdata_local *pldat = netdev_priv(ndev); + struct sk_buff *skb; + u32 rxconsidx, len, ethst; + struct rx_status_t *prxstat; + u8 *prdbuf; + int rx_done = 0; + + /* Get the current RX buffer indexes */ + rxconsidx = readl(LPC_ENET_RXCONSUMEINDEX(pldat->net_base)); + while (rx_done < budget && rxconsidx != + readl(LPC_ENET_RXPRODUCEINDEX(pldat->net_base))) { + /* Get pointer to receive status */ + prxstat = &pldat->rx_stat_v[rxconsidx]; + len = (prxstat->statusinfo & RXSTATUS_SIZE) + 1; + + /* Status error? */ + ethst = prxstat->statusinfo; + if ((ethst & (RXSTATUS_ERROR | RXSTATUS_STATUS_ERROR)) == + (RXSTATUS_ERROR | RXSTATUS_RANGE)) + ethst &= ~RXSTATUS_ERROR; + + if (ethst & RXSTATUS_ERROR) { + int si = prxstat->statusinfo; + /* Check statuses */ + if (si & RXSTATUS_OVERRUN) { + /* Overrun error */ + ndev->stats.rx_fifo_errors++; + } else if (si & RXSTATUS_CRC) { + /* CRC error */ + ndev->stats.rx_crc_errors++; + } else if (si & RXSTATUS_LENGTH) { + /* Length error */ + ndev->stats.rx_length_errors++; + } else if (si & RXSTATUS_ERROR) { + /* Other error */ + ndev->stats.rx_length_errors++; + } + ndev->stats.rx_errors++; + } else { + /* Packet is good */ + skb = dev_alloc_skb(len); + if (!skb) { + ndev->stats.rx_dropped++; + } else { + prdbuf = skb_put(skb, len); + + /* Copy packet from buffer */ + memcpy(prdbuf, pldat->rx_buff_v + + rxconsidx * ENET_MAXF_SIZE, len); + + /* Pass to upper layer */ + skb->protocol = eth_type_trans(skb, ndev); + netif_receive_skb(skb); + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += len; + } + } + + /* Increment consume index */ + rxconsidx = rxconsidx + 1; + if (rxconsidx >= ENET_RX_DESC) + rxconsidx = 0; + writel(rxconsidx, + LPC_ENET_RXCONSUMEINDEX(pldat->net_base)); + rx_done++; + } + + return rx_done; +} + +static int lpc_eth_poll(struct napi_struct *napi, int budget) +{ + struct netdata_local *pldat = container_of(napi, + struct netdata_local, napi); + struct net_device *ndev = pldat->ndev; + int rx_done = 0; + struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0); + + __netif_tx_lock(txq, smp_processor_id()); + __lpc_handle_xmit(ndev); + __netif_tx_unlock(txq); + rx_done = __lpc_handle_recv(ndev, budget); + + if (rx_done < budget) { + napi_complete(napi); + lpc_eth_enable_int(pldat->net_base); + } + + return rx_done; +} + +static irqreturn_t __lpc_eth_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct netdata_local *pldat = netdev_priv(ndev); + u32 tmp; + + spin_lock(&pldat->lock); + + tmp = readl(LPC_ENET_INTSTATUS(pldat->net_base)); + /* Clear interrupts */ + writel(tmp, LPC_ENET_INTCLEAR(pldat->net_base)); + + lpc_eth_disable_int(pldat->net_base); + if (likely(napi_schedule_prep(&pldat->napi))) + __napi_schedule(&pldat->napi); + + spin_unlock(&pldat->lock); + + return IRQ_HANDLED; +} + +static int lpc_eth_close(struct net_device *ndev) +{ + unsigned long flags; + struct netdata_local *pldat = netdev_priv(ndev); + + if (netif_msg_ifdown(pldat)) + dev_dbg(&pldat->pdev->dev, "shutting down %s\n", ndev->name); + + napi_disable(&pldat->napi); + netif_stop_queue(ndev); + + if (pldat->phy_dev) + phy_stop(pldat->phy_dev); + + spin_lock_irqsave(&pldat->lock, flags); + __lpc_eth_reset(pldat); + netif_carrier_off(ndev); + writel(0, LPC_ENET_MAC1(pldat->net_base)); + writel(0, LPC_ENET_MAC2(pldat->net_base)); + spin_unlock_irqrestore(&pldat->lock, flags); + + __lpc_eth_clock_enable(pldat, false); + + return 0; +} + +static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct netdata_local *pldat = netdev_priv(ndev); + u32 len, txidx; + u32 *ptxstat; + struct txrx_desc_t *ptxrxdesc; + + len = skb->len; + + spin_lock_irq(&pldat->lock); + + if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) { + /* This function should never be called when there are no + buffers */ + netif_stop_queue(ndev); + spin_unlock_irq(&pldat->lock); + WARN(1, "BUG! TX request when no free TX buffers!\n"); + return NETDEV_TX_BUSY; + } + + /* Get the next TX descriptor index */ + txidx = readl(LPC_ENET_TXPRODUCEINDEX(pldat->net_base)); + + /* Setup control for the transfer */ + ptxstat = &pldat->tx_stat_v[txidx]; + *ptxstat = 0; + ptxrxdesc = &pldat->tx_desc_v[txidx]; + ptxrxdesc->control = + (len - 1) | TXDESC_CONTROL_LAST | TXDESC_CONTROL_INT; + + /* Copy data to the DMA buffer */ + memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len); + + /* Save the buffer and increment the buffer counter */ + pldat->skblen[txidx] = len; + pldat->num_used_tx_buffs++; + + /* Start transmit */ + txidx++; + if (txidx >= ENET_TX_DESC) + txidx = 0; + writel(txidx, LPC_ENET_TXPRODUCEINDEX(pldat->net_base)); + + /* Stop queue if no more TX buffers */ + if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) + netif_stop_queue(ndev); + + spin_unlock_irq(&pldat->lock); + + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static int lpc_set_mac_address(struct net_device *ndev, void *p) +{ + struct sockaddr *addr = p; + struct netdata_local *pldat = netdev_priv(ndev); + unsigned long flags; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN); + + spin_lock_irqsave(&pldat->lock, flags); + + /* Set station address */ + __lpc_set_mac(pldat, ndev->dev_addr); + + spin_unlock_irqrestore(&pldat->lock, flags); + + return 0; +} + +static void lpc_eth_set_multicast_list(struct net_device *ndev) +{ + struct netdata_local *pldat = netdev_priv(ndev); + struct netdev_hw_addr_list *mcptr = &ndev->mc; + struct netdev_hw_addr *ha; + u32 tmp32, hash_val, hashlo, hashhi; + unsigned long flags; + + spin_lock_irqsave(&pldat->lock, flags); + + /* Set station address */ + __lpc_set_mac(pldat, ndev->dev_addr); + + tmp32 = LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT; + + if (ndev->flags & IFF_PROMISC) + tmp32 |= LPC_RXFLTRW_ACCEPTUNICAST | + LPC_RXFLTRW_ACCEPTUMULTICAST; + if (ndev->flags & IFF_ALLMULTI) + tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICAST; + + if (netdev_hw_addr_list_count(mcptr)) + tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICASTHASH; + + writel(tmp32, LPC_ENET_RXFILTER_CTRL(pldat->net_base)); + + + /* Set initial hash table */ + hashlo = 0x0; + hashhi = 0x0; + + /* 64 bits : multicast address in hash table */ + netdev_hw_addr_list_for_each(ha, mcptr) { + hash_val = (ether_crc(6, ha->addr) >> 23) & 0x3F; + + if (hash_val >= 32) + hashhi |= 1 << (hash_val - 32); + else + hashlo |= 1 << hash_val; + } + + writel(hashlo, LPC_ENET_HASHFILTERL(pldat->net_base)); + writel(hashhi, LPC_ENET_HASHFILTERH(pldat->net_base)); + + spin_unlock_irqrestore(&pldat->lock, flags); +} + +static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) +{ + struct netdata_local *pldat = netdev_priv(ndev); + struct phy_device *phydev = pldat->phy_dev; + + if (!netif_running(ndev)) + return -EINVAL; + + if (!phydev) + return -ENODEV; + + return phy_mii_ioctl(phydev, req, cmd); +} + +static int lpc_eth_open(struct net_device *ndev) +{ + struct netdata_local *pldat = netdev_priv(ndev); + + if (netif_msg_ifup(pldat)) + dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name); + + __lpc_eth_clock_enable(pldat, true); + + /* Suspended PHY makes LPC ethernet core block, so resume now */ + phy_resume(pldat->phy_dev); + + /* Reset and initialize */ + __lpc_eth_reset(pldat); + __lpc_eth_init(pldat); + + /* schedule a link state check */ + phy_start(pldat->phy_dev); + netif_start_queue(ndev); + napi_enable(&pldat->napi); + + return 0; +} + +/* + * Ethtool ops + */ +static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, MODNAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(ndev->dev.parent), + sizeof(info->bus_info)); +} + +static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev) +{ + struct netdata_local *pldat = netdev_priv(ndev); + + return pldat->msg_enable; +} + +static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level) +{ + struct netdata_local *pldat = netdev_priv(ndev); + + pldat->msg_enable = level; +} + +static int lpc_eth_ethtool_getsettings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct netdata_local *pldat = netdev_priv(ndev); + struct phy_device *phydev = pldat->phy_dev; + + if (!phydev) + return -EOPNOTSUPP; + + return phy_ethtool_gset(phydev, cmd); +} + +static int lpc_eth_ethtool_setsettings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct netdata_local *pldat = netdev_priv(ndev); + struct phy_device *phydev = pldat->phy_dev; + + if (!phydev) + return -EOPNOTSUPP; + + return phy_ethtool_sset(phydev, cmd); +} + +static const struct ethtool_ops lpc_eth_ethtool_ops = { + .get_drvinfo = lpc_eth_ethtool_getdrvinfo, + .get_settings = lpc_eth_ethtool_getsettings, + .set_settings = lpc_eth_ethtool_setsettings, + .get_msglevel = lpc_eth_ethtool_getmsglevel, + .set_msglevel = lpc_eth_ethtool_setmsglevel, + .get_link = ethtool_op_get_link, +}; + +static const struct net_device_ops lpc_netdev_ops = { + .ndo_open = lpc_eth_open, + .ndo_stop = lpc_eth_close, + .ndo_start_xmit = lpc_eth_hard_start_xmit, + .ndo_set_rx_mode = lpc_eth_set_multicast_list, + .ndo_do_ioctl = lpc_eth_ioctl, + .ndo_set_mac_address = lpc_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static int lpc_eth_drv_probe(struct platform_device *pdev) +{ + struct resource *res; + struct net_device *ndev; + struct netdata_local *pldat; + struct phy_device *phydev; + dma_addr_t dma_handle; + int irq, ret; + u32 tmp; + + /* Setup network interface for RMII or MII mode */ + tmp = __raw_readl(LPC32XX_CLKPWR_MACCLK_CTRL); + tmp &= ~LPC32XX_CLKPWR_MACCTRL_PINS_MSK; + if (lpc_phy_interface_mode(&pdev->dev) == PHY_INTERFACE_MODE_MII) + tmp |= LPC32XX_CLKPWR_MACCTRL_USE_MII_PINS; + else + tmp |= LPC32XX_CLKPWR_MACCTRL_USE_RMII_PINS; + __raw_writel(tmp, LPC32XX_CLKPWR_MACCLK_CTRL); + + /* Get platform resources */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq = platform_get_irq(pdev, 0); + if ((!res) || (irq < 0) || (irq >= NR_IRQS)) { + dev_err(&pdev->dev, "error getting resources.\n"); + ret = -ENXIO; + goto err_exit; + } + + /* Allocate net driver data structure */ + ndev = alloc_etherdev(sizeof(struct netdata_local)); + if (!ndev) { + dev_err(&pdev->dev, "could not allocate device.\n"); + ret = -ENOMEM; + goto err_exit; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + + pldat = netdev_priv(ndev); + pldat->pdev = pdev; + pldat->ndev = ndev; + + spin_lock_init(&pldat->lock); + + /* Save resources */ + ndev->irq = irq; + + /* Get clock for the device */ + pldat->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(pldat->clk)) { + dev_err(&pdev->dev, "error getting clock.\n"); + ret = PTR_ERR(pldat->clk); + goto err_out_free_dev; + } + + /* Enable network clock */ + __lpc_eth_clock_enable(pldat, true); + + /* Map IO space */ + pldat->net_base = ioremap(res->start, resource_size(res)); + if (!pldat->net_base) { + dev_err(&pdev->dev, "failed to map registers\n"); + ret = -ENOMEM; + goto err_out_disable_clocks; + } + ret = request_irq(ndev->irq, __lpc_eth_interrupt, 0, + ndev->name, ndev); + if (ret) { + dev_err(&pdev->dev, "error requesting interrupt.\n"); + goto err_out_iounmap; + } + + /* Setup driver functions */ + ndev->netdev_ops = &lpc_netdev_ops; + ndev->ethtool_ops = &lpc_eth_ethtool_ops; + ndev->watchdog_timeo = msecs_to_jiffies(2500); + + /* Get size of DMA buffers/descriptors region */ + pldat->dma_buff_size = (ENET_TX_DESC + ENET_RX_DESC) * (ENET_MAXF_SIZE + + sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t)); + pldat->dma_buff_base_v = 0; + + if (use_iram_for_net(&pldat->pdev->dev)) { + dma_handle = LPC32XX_IRAM_BASE; + if (pldat->dma_buff_size <= lpc32xx_return_iram_size()) + pldat->dma_buff_base_v = + io_p2v(LPC32XX_IRAM_BASE); + else + netdev_err(ndev, + "IRAM not big enough for net buffers, using SDRAM instead.\n"); + } + + if (pldat->dma_buff_base_v == 0) { + ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) + goto err_out_free_irq; + + pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size); + + /* Allocate a chunk of memory for the DMA ethernet buffers + and descriptors */ + pldat->dma_buff_base_v = + dma_alloc_coherent(&pldat->pdev->dev, + pldat->dma_buff_size, &dma_handle, + GFP_KERNEL); + if (pldat->dma_buff_base_v == NULL) { + ret = -ENOMEM; + goto err_out_free_irq; + } + } + pldat->dma_buff_base_p = dma_handle; + + netdev_dbg(ndev, "IO address space :%pR\n", res); + netdev_dbg(ndev, "IO address size :%d\n", resource_size(res)); + netdev_dbg(ndev, "IO address (mapped) :0x%p\n", + pldat->net_base); + netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq); + netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size); + netdev_dbg(ndev, "DMA buffer P address :0x%08x\n", + pldat->dma_buff_base_p); + netdev_dbg(ndev, "DMA buffer V address :0x%p\n", + pldat->dma_buff_base_v); + + /* Get MAC address from current HW setting (POR state is all zeros) */ + __lpc_get_mac(pldat, ndev->dev_addr); + + if (!is_valid_ether_addr(ndev->dev_addr)) { + const char *macaddr = of_get_mac_address(pdev->dev.of_node); + if (macaddr) + memcpy(ndev->dev_addr, macaddr, ETH_ALEN); + } + if (!is_valid_ether_addr(ndev->dev_addr)) + eth_hw_addr_random(ndev); + + /* Reset the ethernet controller */ + __lpc_eth_reset(pldat); + + /* then shut everything down to save power */ + __lpc_eth_shutdown(pldat); + + /* Set default parameters */ + pldat->msg_enable = NETIF_MSG_LINK; + + /* Force an MII interface reset and clock setup */ + __lpc_mii_mngt_reset(pldat); + + /* Force default PHY interface setup in chip, this will probably be + changed by the PHY driver */ + pldat->link = 0; + pldat->speed = 100; + pldat->duplex = DUPLEX_FULL; + __lpc_params_setup(pldat); + + netif_napi_add(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT); + + ret = register_netdev(ndev); + if (ret) { + dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); + goto err_out_dma_unmap; + } + platform_set_drvdata(pdev, ndev); + + ret = lpc_mii_init(pldat); + if (ret) + goto err_out_unregister_netdev; + + netdev_info(ndev, "LPC mac at 0x%08x irq %d\n", + res->start, ndev->irq); + + phydev = pldat->phy_dev; + + device_init_wakeup(&pdev->dev, 1); + device_set_wakeup_enable(&pdev->dev, 0); + + return 0; + +err_out_unregister_netdev: + unregister_netdev(ndev); +err_out_dma_unmap: + if (!use_iram_for_net(&pldat->pdev->dev) || + pldat->dma_buff_size > lpc32xx_return_iram_size()) + dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size, + pldat->dma_buff_base_v, + pldat->dma_buff_base_p); +err_out_free_irq: + free_irq(ndev->irq, ndev); +err_out_iounmap: + iounmap(pldat->net_base); +err_out_disable_clocks: + clk_disable(pldat->clk); + clk_put(pldat->clk); +err_out_free_dev: + free_netdev(ndev); +err_exit: + pr_err("%s: not found (%d).\n", MODNAME, ret); + return ret; +} + +static int lpc_eth_drv_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct netdata_local *pldat = netdev_priv(ndev); + + unregister_netdev(ndev); + + if (!use_iram_for_net(&pldat->pdev->dev) || + pldat->dma_buff_size > lpc32xx_return_iram_size()) + dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size, + pldat->dma_buff_base_v, + pldat->dma_buff_base_p); + free_irq(ndev->irq, ndev); + iounmap(pldat->net_base); + mdiobus_unregister(pldat->mii_bus); + mdiobus_free(pldat->mii_bus); + clk_disable(pldat->clk); + clk_put(pldat->clk); + free_netdev(ndev); + + return 0; +} + +#ifdef CONFIG_PM +static int lpc_eth_drv_suspend(struct platform_device *pdev, + pm_message_t state) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct netdata_local *pldat = netdev_priv(ndev); + + if (device_may_wakeup(&pdev->dev)) + enable_irq_wake(ndev->irq); + + if (ndev) { + if (netif_running(ndev)) { + netif_device_detach(ndev); + __lpc_eth_shutdown(pldat); + clk_disable(pldat->clk); + + /* + * Reset again now clock is disable to be sure + * EMC_MDC is down + */ + __lpc_eth_reset(pldat); + } + } + + return 0; +} + +static int lpc_eth_drv_resume(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct netdata_local *pldat; + + if (device_may_wakeup(&pdev->dev)) + disable_irq_wake(ndev->irq); + + if (ndev) { + if (netif_running(ndev)) { + pldat = netdev_priv(ndev); + + /* Enable interface clock */ + clk_enable(pldat->clk); + + /* Reset and initialize */ + __lpc_eth_reset(pldat); + __lpc_eth_init(pldat); + + netif_device_attach(ndev); + } + } + + return 0; +} +#endif + +#ifdef CONFIG_OF +static const struct of_device_id lpc_eth_match[] = { + { .compatible = "nxp,lpc-eth" }, + { } +}; +MODULE_DEVICE_TABLE(of, lpc_eth_match); +#endif + +static struct platform_driver lpc_eth_driver = { + .probe = lpc_eth_drv_probe, + .remove = lpc_eth_drv_remove, +#ifdef CONFIG_PM + .suspend = lpc_eth_drv_suspend, + .resume = lpc_eth_drv_resume, +#endif + .driver = { + .name = MODNAME, + .of_match_table = of_match_ptr(lpc_eth_match), + }, +}; + +module_platform_driver(lpc_eth_driver); + +MODULE_AUTHOR("Kevin Wells "); +MODULE_AUTHOR("Roland Stigge "); +MODULE_DESCRIPTION("LPC Ethernet Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/octeon/Kconfig b/drivers/net/ethernet/octeon/Kconfig new file mode 100644 index 000000000..a7aa28054 --- /dev/null +++ b/drivers/net/ethernet/octeon/Kconfig @@ -0,0 +1,14 @@ +# +# Cavium network device configuration +# + +config OCTEON_MGMT_ETHERNET + tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)" + depends on CAVIUM_OCTEON_SOC + select PHYLIB + select MDIO_OCTEON + default y + ---help--- + This option enables the ethernet driver for the management + port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX, + CN54XX, CN52XX, and CN6XXX chips. diff --git a/drivers/net/ethernet/octeon/Makefile b/drivers/net/ethernet/octeon/Makefile new file mode 100644 index 000000000..efa41c1d9 --- /dev/null +++ b/drivers/net/ethernet/octeon/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Cavium network device drivers. +# + +obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon_mgmt.o diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c new file mode 100644 index 000000000..7bf9c028d --- /dev/null +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c @@ -0,0 +1,1597 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2009-2012 Cavium, Inc + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define DRV_NAME "octeon_mgmt" +#define DRV_VERSION "2.0" +#define DRV_DESCRIPTION \ + "Cavium Networks Octeon MII (management) port Network Driver" + +#define OCTEON_MGMT_NAPI_WEIGHT 16 + +/* Ring sizes that are powers of two allow for more efficient modulo + * opertions. + */ +#define OCTEON_MGMT_RX_RING_SIZE 512 +#define OCTEON_MGMT_TX_RING_SIZE 128 + +/* Allow 8 bytes for vlan and FCS. */ +#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) + +union mgmt_port_ring_entry { + u64 d64; + struct { +#define RING_ENTRY_CODE_DONE 0xf +#define RING_ENTRY_CODE_MORE 0x10 +#ifdef __BIG_ENDIAN_BITFIELD + u64 reserved_62_63:2; + /* Length of the buffer/packet in bytes */ + u64 len:14; + /* For TX, signals that the packet should be timestamped */ + u64 tstamp:1; + /* The RX error code */ + u64 code:7; + /* Physical address of the buffer */ + u64 addr:40; +#else + u64 addr:40; + u64 code:7; + u64 tstamp:1; + u64 len:14; + u64 reserved_62_63:2; +#endif + } s; +}; + +#define MIX_ORING1 0x0 +#define MIX_ORING2 0x8 +#define MIX_IRING1 0x10 +#define MIX_IRING2 0x18 +#define MIX_CTL 0x20 +#define MIX_IRHWM 0x28 +#define MIX_IRCNT 0x30 +#define MIX_ORHWM 0x38 +#define MIX_ORCNT 0x40 +#define MIX_ISR 0x48 +#define MIX_INTENA 0x50 +#define MIX_REMCNT 0x58 +#define MIX_BIST 0x78 + +#define AGL_GMX_PRT_CFG 0x10 +#define AGL_GMX_RX_FRM_CTL 0x18 +#define AGL_GMX_RX_FRM_MAX 0x30 +#define AGL_GMX_RX_JABBER 0x38 +#define AGL_GMX_RX_STATS_CTL 0x50 + +#define AGL_GMX_RX_STATS_PKTS_DRP 0xb0 +#define AGL_GMX_RX_STATS_OCTS_DRP 0xb8 +#define AGL_GMX_RX_STATS_PKTS_BAD 0xc0 + +#define AGL_GMX_RX_ADR_CTL 0x100 +#define AGL_GMX_RX_ADR_CAM_EN 0x108 +#define AGL_GMX_RX_ADR_CAM0 0x180 +#define AGL_GMX_RX_ADR_CAM1 0x188 +#define AGL_GMX_RX_ADR_CAM2 0x190 +#define AGL_GMX_RX_ADR_CAM3 0x198 +#define AGL_GMX_RX_ADR_CAM4 0x1a0 +#define AGL_GMX_RX_ADR_CAM5 0x1a8 + +#define AGL_GMX_TX_CLK 0x208 +#define AGL_GMX_TX_STATS_CTL 0x268 +#define AGL_GMX_TX_CTL 0x270 +#define AGL_GMX_TX_STAT0 0x280 +#define AGL_GMX_TX_STAT1 0x288 +#define AGL_GMX_TX_STAT2 0x290 +#define AGL_GMX_TX_STAT3 0x298 +#define AGL_GMX_TX_STAT4 0x2a0 +#define AGL_GMX_TX_STAT5 0x2a8 +#define AGL_GMX_TX_STAT6 0x2b0 +#define AGL_GMX_TX_STAT7 0x2b8 +#define AGL_GMX_TX_STAT8 0x2c0 +#define AGL_GMX_TX_STAT9 0x2c8 + +struct octeon_mgmt { + struct net_device *netdev; + u64 mix; + u64 agl; + u64 agl_prt_ctl; + int port; + int irq; + bool has_rx_tstamp; + u64 *tx_ring; + dma_addr_t tx_ring_handle; + unsigned int tx_next; + unsigned int tx_next_clean; + unsigned int tx_current_fill; + /* The tx_list lock also protects the ring related variables */ + struct sk_buff_head tx_list; + + /* RX variables only touched in napi_poll. No locking necessary. */ + u64 *rx_ring; + dma_addr_t rx_ring_handle; + unsigned int rx_next; + unsigned int rx_next_fill; + unsigned int rx_current_fill; + struct sk_buff_head rx_list; + + spinlock_t lock; + unsigned int last_duplex; + unsigned int last_link; + unsigned int last_speed; + struct device *dev; + struct napi_struct napi; + struct tasklet_struct tx_clean_tasklet; + struct phy_device *phydev; + struct device_node *phy_np; + resource_size_t mix_phys; + resource_size_t mix_size; + resource_size_t agl_phys; + resource_size_t agl_size; + resource_size_t agl_prt_ctl_phys; + resource_size_t agl_prt_ctl_size; +}; + +static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) +{ + union cvmx_mixx_intena mix_intena; + unsigned long flags; + + spin_lock_irqsave(&p->lock, flags); + mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); + mix_intena.s.ithena = enable ? 1 : 0; + cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); + spin_unlock_irqrestore(&p->lock, flags); +} + +static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) +{ + union cvmx_mixx_intena mix_intena; + unsigned long flags; + + spin_lock_irqsave(&p->lock, flags); + mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); + mix_intena.s.othena = enable ? 1 : 0; + cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); + spin_unlock_irqrestore(&p->lock, flags); +} + +static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) +{ + octeon_mgmt_set_rx_irq(p, 1); +} + +static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) +{ + octeon_mgmt_set_rx_irq(p, 0); +} + +static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) +{ + octeon_mgmt_set_tx_irq(p, 1); +} + +static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) +{ + octeon_mgmt_set_tx_irq(p, 0); +} + +static unsigned int ring_max_fill(unsigned int ring_size) +{ + return ring_size - 8; +} + +static unsigned int ring_size_to_bytes(unsigned int ring_size) +{ + return ring_size * sizeof(union mgmt_port_ring_entry); +} + +static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { + unsigned int size; + union mgmt_port_ring_entry re; + struct sk_buff *skb; + + /* CN56XX pass 1 needs 8 bytes of padding. */ + size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN; + + skb = netdev_alloc_skb(netdev, size); + if (!skb) + break; + skb_reserve(skb, NET_IP_ALIGN); + __skb_queue_tail(&p->rx_list, skb); + + re.d64 = 0; + re.s.len = size; + re.s.addr = dma_map_single(p->dev, skb->data, + size, + DMA_FROM_DEVICE); + + /* Put it in the ring. */ + p->rx_ring[p->rx_next_fill] = re.d64; + dma_sync_single_for_device(p->dev, p->rx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); + p->rx_next_fill = + (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; + p->rx_current_fill++; + /* Ring the bell. */ + cvmx_write_csr(p->mix + MIX_IRING2, 1); + } +} + +static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) +{ + union cvmx_mixx_orcnt mix_orcnt; + union mgmt_port_ring_entry re; + struct sk_buff *skb; + int cleaned = 0; + unsigned long flags; + + mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); + while (mix_orcnt.s.orcnt) { + spin_lock_irqsave(&p->tx_list.lock, flags); + + mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); + + if (mix_orcnt.s.orcnt == 0) { + spin_unlock_irqrestore(&p->tx_list.lock, flags); + break; + } + + dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + DMA_BIDIRECTIONAL); + + re.d64 = p->tx_ring[p->tx_next_clean]; + p->tx_next_clean = + (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; + skb = __skb_dequeue(&p->tx_list); + + mix_orcnt.u64 = 0; + mix_orcnt.s.orcnt = 1; + + /* Acknowledge to hardware that we have the buffer. */ + cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64); + p->tx_current_fill--; + + spin_unlock_irqrestore(&p->tx_list.lock, flags); + + dma_unmap_single(p->dev, re.s.addr, re.s.len, + DMA_TO_DEVICE); + + /* Read the hardware TX timestamp if one was recorded */ + if (unlikely(re.s.tstamp)) { + struct skb_shared_hwtstamps ts; + u64 ns; + + memset(&ts, 0, sizeof(ts)); + /* Read the timestamp */ + ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); + /* Remove the timestamp from the FIFO */ + cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); + /* Tell the kernel about the timestamp */ + ts.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &ts); + } + + dev_kfree_skb_any(skb); + cleaned++; + + mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); + } + + if (cleaned && netif_queue_stopped(p->netdev)) + netif_wake_queue(p->netdev); +} + +static void octeon_mgmt_clean_tx_tasklet(unsigned long arg) +{ + struct octeon_mgmt *p = (struct octeon_mgmt *)arg; + octeon_mgmt_clean_tx_buffers(p); + octeon_mgmt_enable_tx_irq(p); +} + +static void octeon_mgmt_update_rx_stats(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + unsigned long flags; + u64 drop, bad; + + /* These reads also clear the count registers. */ + drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP); + bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD); + + if (drop || bad) { + /* Do an atomic update. */ + spin_lock_irqsave(&p->lock, flags); + netdev->stats.rx_errors += bad; + netdev->stats.rx_dropped += drop; + spin_unlock_irqrestore(&p->lock, flags); + } +} + +static void octeon_mgmt_update_tx_stats(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + unsigned long flags; + + union cvmx_agl_gmx_txx_stat0 s0; + union cvmx_agl_gmx_txx_stat1 s1; + + /* These reads also clear the count registers. */ + s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0); + s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1); + + if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) { + /* Do an atomic update. */ + spin_lock_irqsave(&p->lock, flags); + netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol; + netdev->stats.collisions += s1.s.scol + s1.s.mcol; + spin_unlock_irqrestore(&p->lock, flags); + } +} + +/* + * Dequeue a receive skb and its corresponding ring entry. The ring + * entry is returned, *pskb is updated to point to the skb. + */ +static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, + struct sk_buff **pskb) +{ + union mgmt_port_ring_entry re; + + dma_sync_single_for_cpu(p->dev, p->rx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); + + re.d64 = p->rx_ring[p->rx_next]; + p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE; + p->rx_current_fill--; + *pskb = __skb_dequeue(&p->rx_list); + + dma_unmap_single(p->dev, re.s.addr, + ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM, + DMA_FROM_DEVICE); + + return re.d64; +} + + +static int octeon_mgmt_receive_one(struct octeon_mgmt *p) +{ + struct net_device *netdev = p->netdev; + union cvmx_mixx_ircnt mix_ircnt; + union mgmt_port_ring_entry re; + struct sk_buff *skb; + struct sk_buff *skb2; + struct sk_buff *skb_new; + union mgmt_port_ring_entry re2; + int rc = 1; + + + re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb); + if (likely(re.s.code == RING_ENTRY_CODE_DONE)) { + /* A good packet, send it up. */ + skb_put(skb, re.s.len); +good: + /* Process the RX timestamp if it was recorded */ + if (p->has_rx_tstamp) { + /* The first 8 bytes are the timestamp */ + u64 ns = *(u64 *)skb->data; + struct skb_shared_hwtstamps *ts; + ts = skb_hwtstamps(skb); + ts->hwtstamp = ns_to_ktime(ns); + __skb_pull(skb, 8); + } + skb->protocol = eth_type_trans(skb, netdev); + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += skb->len; + netif_receive_skb(skb); + rc = 0; + } else if (re.s.code == RING_ENTRY_CODE_MORE) { + /* Packet split across skbs. This can happen if we + * increase the MTU. Buffers that are already in the + * rx ring can then end up being too small. As the rx + * ring is refilled, buffers sized for the new MTU + * will be used and we should go back to the normal + * non-split case. + */ + skb_put(skb, re.s.len); + do { + re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); + if (re2.s.code != RING_ENTRY_CODE_MORE + && re2.s.code != RING_ENTRY_CODE_DONE) + goto split_error; + skb_put(skb2, re2.s.len); + skb_new = skb_copy_expand(skb, 0, skb2->len, + GFP_ATOMIC); + if (!skb_new) + goto split_error; + if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new), + skb2->len)) + goto split_error; + skb_put(skb_new, skb2->len); + dev_kfree_skb_any(skb); + dev_kfree_skb_any(skb2); + skb = skb_new; + } while (re2.s.code == RING_ENTRY_CODE_MORE); + goto good; + } else { + /* Some other error, discard it. */ + dev_kfree_skb_any(skb); + /* Error statistics are accumulated in + * octeon_mgmt_update_rx_stats. + */ + } + goto done; +split_error: + /* Discard the whole mess. */ + dev_kfree_skb_any(skb); + dev_kfree_skb_any(skb2); + while (re2.s.code == RING_ENTRY_CODE_MORE) { + re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); + dev_kfree_skb_any(skb2); + } + netdev->stats.rx_errors++; + +done: + /* Tell the hardware we processed a packet. */ + mix_ircnt.u64 = 0; + mix_ircnt.s.ircnt = 1; + cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64); + return rc; +} + +static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) +{ + unsigned int work_done = 0; + union cvmx_mixx_ircnt mix_ircnt; + int rc; + + mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); + while (work_done < budget && mix_ircnt.s.ircnt) { + + rc = octeon_mgmt_receive_one(p); + if (!rc) + work_done++; + + /* Check for more packets. */ + mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); + } + + octeon_mgmt_rx_fill_ring(p->netdev); + + return work_done; +} + +static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) +{ + struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi); + struct net_device *netdev = p->netdev; + unsigned int work_done = 0; + + work_done = octeon_mgmt_receive_packets(p, budget); + + if (work_done < budget) { + /* We stopped because no more packets were available. */ + napi_complete(napi); + octeon_mgmt_enable_rx_irq(p); + } + octeon_mgmt_update_rx_stats(netdev); + + return work_done; +} + +/* Reset the hardware to clean state. */ +static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) +{ + union cvmx_mixx_ctl mix_ctl; + union cvmx_mixx_bist mix_bist; + union cvmx_agl_gmx_bist agl_gmx_bist; + + mix_ctl.u64 = 0; + cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); + do { + mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); + } while (mix_ctl.s.busy); + mix_ctl.s.reset = 1; + cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); + cvmx_read_csr(p->mix + MIX_CTL); + octeon_io_clk_delay(64); + + mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST); + if (mix_bist.u64) + dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", + (unsigned long long)mix_bist.u64); + + agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST); + if (agl_gmx_bist.u64) + dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n", + (unsigned long long)agl_gmx_bist.u64); +} + +struct octeon_mgmt_cam_state { + u64 cam[6]; + u64 cam_mask; + int cam_index; +}; + +static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs, + unsigned char *addr) +{ + int i; + + for (i = 0; i < 6; i++) + cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index)); + cs->cam_mask |= (1ULL << cs->cam_index); + cs->cam_index++; +} + +static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + union cvmx_agl_gmx_rxx_adr_ctl adr_ctl; + union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx; + unsigned long flags; + unsigned int prev_packet_enable; + unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ + unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ + struct octeon_mgmt_cam_state cam_state; + struct netdev_hw_addr *ha; + int available_cam_entries; + + memset(&cam_state, 0, sizeof(cam_state)); + + if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) { + cam_mode = 0; + available_cam_entries = 8; + } else { + /* One CAM entry for the primary address, leaves seven + * for the secondary addresses. + */ + available_cam_entries = 7 - netdev->uc.count; + } + + if (netdev->flags & IFF_MULTICAST) { + if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) || + netdev_mc_count(netdev) > available_cam_entries) + multicast_mode = 2; /* 2 - Accept all multicast. */ + else + multicast_mode = 0; /* 0 - Use CAM. */ + } + + if (cam_mode == 1) { + /* Add primary address. */ + octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr); + netdev_for_each_uc_addr(ha, netdev) + octeon_mgmt_cam_state_add(&cam_state, ha->addr); + } + if (multicast_mode == 0) { + netdev_for_each_mc_addr(ha, netdev) + octeon_mgmt_cam_state_add(&cam_state, ha->addr); + } + + spin_lock_irqsave(&p->lock, flags); + + /* Disable packet I/O. */ + agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); + prev_packet_enable = agl_gmx_prtx.s.en; + agl_gmx_prtx.s.en = 0; + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); + + adr_ctl.u64 = 0; + adr_ctl.s.cam_mode = cam_mode; + adr_ctl.s.mcst = multicast_mode; + adr_ctl.s.bcst = 1; /* Allow broadcast */ + + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64); + + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask); + + /* Restore packet I/O. */ + agl_gmx_prtx.s.en = prev_packet_enable; + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); + + spin_unlock_irqrestore(&p->lock, flags); +} + +static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) +{ + int r = eth_mac_addr(netdev, addr); + + if (r) + return r; + + octeon_mgmt_set_rx_filtering(netdev); + + return 0; +} + +static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; + + /* Limit the MTU to make sure the ethernet packets are between + * 64 bytes and 16383 bytes. + */ + if (size_without_fcs < 64 || size_without_fcs > 16383) { + dev_warn(p->dev, "MTU must be between %d and %d.\n", + 64 - OCTEON_MGMT_RX_HEADROOM, + 16383 - OCTEON_MGMT_RX_HEADROOM); + return -EINVAL; + } + + netdev->mtu = new_mtu; + + cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs); + cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER, + (size_without_fcs + 7) & 0xfff8); + + return 0; +} + +static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct octeon_mgmt *p = netdev_priv(netdev); + union cvmx_mixx_isr mixx_isr; + + mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR); + + /* Clear any pending interrupts */ + cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64); + cvmx_read_csr(p->mix + MIX_ISR); + + if (mixx_isr.s.irthresh) { + octeon_mgmt_disable_rx_irq(p); + napi_schedule(&p->napi); + } + if (mixx_isr.s.orthresh) { + octeon_mgmt_disable_tx_irq(p); + tasklet_schedule(&p->tx_clean_tasklet); + } + + return IRQ_HANDLED; +} + +static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, + struct ifreq *rq, int cmd) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + struct hwtstamp_config config; + union cvmx_mio_ptp_clock_cfg ptp; + union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; + bool have_hw_timestamps = false; + + if (copy_from_user(&config, rq->ifr_data, sizeof(config))) + return -EFAULT; + + if (config.flags) /* reserved for future extensions */ + return -EINVAL; + + /* Check the status of hardware for tiemstamps */ + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + /* Get the current state of the PTP clock */ + ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG); + if (!ptp.s.ext_clk_en) { + /* The clock has not been configured to use an + * external source. Program it to use the main clock + * reference. + */ + u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate(); + if (!ptp.s.ptp_en) + cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp); + pr_info("PTP Clock: Using sclk reference at %lld Hz\n", + (NSEC_PER_SEC << 32) / clock_comp); + } else { + /* The clock is already programmed to use a GPIO */ + u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP); + pr_info("PTP Clock: Using GPIO %d at %lld Hz\n", + ptp.s.ext_clk_in, + (NSEC_PER_SEC << 32) / clock_comp); + } + + /* Enable the clock if it wasn't done already */ + if (!ptp.s.ptp_en) { + ptp.s.ptp_en = 1; + cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64); + } + have_hw_timestamps = true; + } + + if (!have_hw_timestamps) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + p->has_rx_tstamp = false; + rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); + rxx_frm_ctl.s.ptp_mode = 0; + cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + p->has_rx_tstamp = have_hw_timestamps; + config.rx_filter = HWTSTAMP_FILTER_ALL; + if (p->has_rx_tstamp) { + rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); + rxx_frm_ctl.s.ptp_mode = 1; + cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); + } + break; + default: + return -ERANGE; + } + + if (copy_to_user(rq->ifr_data, &config, sizeof(config))) + return -EFAULT; + + return 0; +} + +static int octeon_mgmt_ioctl(struct net_device *netdev, + struct ifreq *rq, int cmd) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + switch (cmd) { + case SIOCSHWTSTAMP: + return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd); + default: + if (p->phydev) + return phy_mii_ioctl(p->phydev, rq, cmd); + return -EINVAL; + } +} + +static void octeon_mgmt_disable_link(struct octeon_mgmt *p) +{ + union cvmx_agl_gmx_prtx_cfg prtx_cfg; + + /* Disable GMX before we make any changes. */ + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); + prtx_cfg.s.en = 0; + prtx_cfg.s.tx_en = 0; + prtx_cfg.s.rx_en = 0; + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); + + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + int i; + for (i = 0; i < 10; i++) { + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); + if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1) + break; + mdelay(1); + i++; + } + } +} + +static void octeon_mgmt_enable_link(struct octeon_mgmt *p) +{ + union cvmx_agl_gmx_prtx_cfg prtx_cfg; + + /* Restore the GMX enable state only if link is set */ + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); + prtx_cfg.s.tx_en = 1; + prtx_cfg.s.rx_en = 1; + prtx_cfg.s.en = 1; + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); +} + +static void octeon_mgmt_update_link(struct octeon_mgmt *p) +{ + union cvmx_agl_gmx_prtx_cfg prtx_cfg; + + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); + + if (!p->phydev->link) + prtx_cfg.s.duplex = 1; + else + prtx_cfg.s.duplex = p->phydev->duplex; + + switch (p->phydev->speed) { + case 10: + prtx_cfg.s.speed = 0; + prtx_cfg.s.slottime = 0; + + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + prtx_cfg.s.burst = 1; + prtx_cfg.s.speed_msb = 1; + } + break; + case 100: + prtx_cfg.s.speed = 0; + prtx_cfg.s.slottime = 0; + + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + prtx_cfg.s.burst = 1; + prtx_cfg.s.speed_msb = 0; + } + break; + case 1000: + /* 1000 MBits is only supported on 6XXX chips */ + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + prtx_cfg.s.speed = 1; + prtx_cfg.s.speed_msb = 0; + /* Only matters for half-duplex */ + prtx_cfg.s.slottime = 1; + prtx_cfg.s.burst = p->phydev->duplex; + } + break; + case 0: /* No link */ + default: + break; + } + + /* Write the new GMX setting with the port still disabled. */ + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); + + /* Read GMX CFG again to make sure the config is completed. */ + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); + + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + union cvmx_agl_gmx_txx_clk agl_clk; + union cvmx_agl_prtx_ctl prtx_ctl; + + prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); + agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK); + /* MII (both speeds) and RGMII 1000 speed. */ + agl_clk.s.clk_cnt = 1; + if (prtx_ctl.s.mode == 0) { /* RGMII mode */ + if (p->phydev->speed == 10) + agl_clk.s.clk_cnt = 50; + else if (p->phydev->speed == 100) + agl_clk.s.clk_cnt = 5; + } + cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64); + } +} + +static void octeon_mgmt_adjust_link(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + unsigned long flags; + int link_changed = 0; + + if (!p->phydev) + return; + + spin_lock_irqsave(&p->lock, flags); + + + if (!p->phydev->link && p->last_link) + link_changed = -1; + + if (p->phydev->link + && (p->last_duplex != p->phydev->duplex + || p->last_link != p->phydev->link + || p->last_speed != p->phydev->speed)) { + octeon_mgmt_disable_link(p); + link_changed = 1; + octeon_mgmt_update_link(p); + octeon_mgmt_enable_link(p); + } + + p->last_link = p->phydev->link; + p->last_speed = p->phydev->speed; + p->last_duplex = p->phydev->duplex; + + spin_unlock_irqrestore(&p->lock, flags); + + if (link_changed != 0) { + if (link_changed > 0) { + pr_info("%s: Link is up - %d/%s\n", netdev->name, + p->phydev->speed, + DUPLEX_FULL == p->phydev->duplex ? + "Full" : "Half"); + } else { + pr_info("%s: Link is down\n", netdev->name); + } + } +} + +static int octeon_mgmt_init_phy(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + if (octeon_is_simulation() || p->phy_np == NULL) { + /* No PHYs in the simulator. */ + netif_carrier_on(netdev); + return 0; + } + + p->phydev = of_phy_connect(netdev, p->phy_np, + octeon_mgmt_adjust_link, 0, + PHY_INTERFACE_MODE_MII); + + if (!p->phydev) + return -ENODEV; + + return 0; +} + +static int octeon_mgmt_open(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + union cvmx_mixx_ctl mix_ctl; + union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; + union cvmx_mixx_oring1 oring1; + union cvmx_mixx_iring1 iring1; + union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; + union cvmx_mixx_irhwm mix_irhwm; + union cvmx_mixx_orhwm mix_orhwm; + union cvmx_mixx_intena mix_intena; + struct sockaddr sa; + + /* Allocate ring buffers. */ + p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + GFP_KERNEL); + if (!p->tx_ring) + return -ENOMEM; + p->tx_ring_handle = + dma_map_single(p->dev, p->tx_ring, + ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + DMA_BIDIRECTIONAL); + p->tx_next = 0; + p->tx_next_clean = 0; + p->tx_current_fill = 0; + + + p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + GFP_KERNEL); + if (!p->rx_ring) + goto err_nomem; + p->rx_ring_handle = + dma_map_single(p->dev, p->rx_ring, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); + + p->rx_next = 0; + p->rx_next_fill = 0; + p->rx_current_fill = 0; + + octeon_mgmt_reset_hw(p); + + mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); + + /* Bring it out of reset if needed. */ + if (mix_ctl.s.reset) { + mix_ctl.s.reset = 0; + cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); + do { + mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); + } while (mix_ctl.s.reset); + } + + if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) { + agl_gmx_inf_mode.u64 = 0; + agl_gmx_inf_mode.s.en = 1; + cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); + } + if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) + || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { + /* Force compensation values, as they are not + * determined properly by HW + */ + union cvmx_agl_gmx_drv_ctl drv_ctl; + + drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); + if (p->port) { + drv_ctl.s.byp_en1 = 1; + drv_ctl.s.nctl1 = 6; + drv_ctl.s.pctl1 = 6; + } else { + drv_ctl.s.byp_en = 1; + drv_ctl.s.nctl = 6; + drv_ctl.s.pctl = 6; + } + cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); + } + + oring1.u64 = 0; + oring1.s.obase = p->tx_ring_handle >> 3; + oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE; + cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64); + + iring1.u64 = 0; + iring1.s.ibase = p->rx_ring_handle >> 3; + iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; + cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64); + + memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); + octeon_mgmt_set_mac_address(netdev, &sa); + + octeon_mgmt_change_mtu(netdev, netdev->mtu); + + /* Enable the port HW. Packets are not allowed until + * cvmx_mgmt_port_enable() is called. + */ + mix_ctl.u64 = 0; + mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */ + mix_ctl.s.en = 1; /* Enable the port */ + mix_ctl.s.nbtarb = 0; /* Arbitration mode */ + /* MII CB-request FIFO programmable high watermark */ + mix_ctl.s.mrq_hwm = 1; +#ifdef __LITTLE_ENDIAN + mix_ctl.s.lendian = 1; +#endif + cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); + + /* Read the PHY to find the mode of the interface. */ + if (octeon_mgmt_init_phy(netdev)) { + dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port); + goto err_noirq; + } + + /* Set the mode of the interface, RGMII/MII. */ + if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) { + union cvmx_agl_prtx_ctl agl_prtx_ctl; + int rgmii_mode = (p->phydev->supported & + (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0; + + agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); + agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1; + cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); + + /* MII clocks counts are based on the 125Mhz + * reference, which has an 8nS period. So our delays + * need to be multiplied by this factor. + */ +#define NS_PER_PHY_CLK 8 + + /* Take the DLL and clock tree out of reset */ + agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); + agl_prtx_ctl.s.clkrst = 0; + if (rgmii_mode) { + agl_prtx_ctl.s.dllrst = 0; + agl_prtx_ctl.s.clktx_byp = 0; + } + cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); + cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */ + + /* Wait for the DLL to lock. External 125 MHz + * reference clock must be stable at this point. + */ + ndelay(256 * NS_PER_PHY_CLK); + + /* Enable the interface */ + agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); + agl_prtx_ctl.s.enable = 1; + cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); + + /* Read the value back to force the previous write */ + agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); + + /* Enable the compensation controller */ + agl_prtx_ctl.s.comp = 1; + agl_prtx_ctl.s.drv_byp = 0; + cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); + /* Force write out before wait. */ + cvmx_read_csr(p->agl_prt_ctl); + + /* For compensation state to lock. */ + ndelay(1040 * NS_PER_PHY_CLK); + + /* Default Interframe Gaps are too small. Recommended + * workaround is. + * + * AGL_GMX_TX_IFG[IFG1]=14 + * AGL_GMX_TX_IFG[IFG2]=10 + */ + cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae); + } + + octeon_mgmt_rx_fill_ring(netdev); + + /* Clear statistics. */ + /* Clear on read. */ + cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1); + cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0); + cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0); + + cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1); + cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0); + cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0); + + /* Clear any pending interrupts */ + cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR)); + + if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, + netdev)) { + dev_err(p->dev, "request_irq(%d) failed.\n", p->irq); + goto err_noirq; + } + + /* Interrupt every single RX packet */ + mix_irhwm.u64 = 0; + mix_irhwm.s.irhwm = 0; + cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64); + + /* Interrupt when we have 1 or more packets to clean. */ + mix_orhwm.u64 = 0; + mix_orhwm.s.orhwm = 0; + cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64); + + /* Enable receive and transmit interrupts */ + mix_intena.u64 = 0; + mix_intena.s.ithena = 1; + mix_intena.s.othena = 1; + cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); + + /* Enable packet I/O. */ + + rxx_frm_ctl.u64 = 0; + rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0; + rxx_frm_ctl.s.pre_align = 1; + /* When set, disables the length check for non-min sized pkts + * with padding in the client data. + */ + rxx_frm_ctl.s.pad_len = 1; + /* When set, disables the length check for VLAN pkts */ + rxx_frm_ctl.s.vlan_len = 1; + /* When set, PREAMBLE checking is less strict */ + rxx_frm_ctl.s.pre_free = 1; + /* Control Pause Frames can match station SMAC */ + rxx_frm_ctl.s.ctl_smac = 0; + /* Control Pause Frames can match globally assign Multicast address */ + rxx_frm_ctl.s.ctl_mcst = 1; + /* Forward pause information to TX block */ + rxx_frm_ctl.s.ctl_bck = 1; + /* Drop Control Pause Frames */ + rxx_frm_ctl.s.ctl_drp = 1; + /* Strip off the preamble */ + rxx_frm_ctl.s.pre_strp = 1; + /* This port is configured to send PREAMBLE+SFD to begin every + * frame. GMX checks that the PREAMBLE is sent correctly. + */ + rxx_frm_ctl.s.pre_chk = 1; + cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); + + /* Configure the port duplex, speed and enables */ + octeon_mgmt_disable_link(p); + if (p->phydev) + octeon_mgmt_update_link(p); + octeon_mgmt_enable_link(p); + + p->last_link = 0; + p->last_speed = 0; + /* PHY is not present in simulator. The carrier is enabled + * while initializing the phy for simulator, leave it enabled. + */ + if (p->phydev) { + netif_carrier_off(netdev); + phy_start_aneg(p->phydev); + } + + netif_wake_queue(netdev); + napi_enable(&p->napi); + + return 0; +err_noirq: + octeon_mgmt_reset_hw(p); + dma_unmap_single(p->dev, p->rx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); + kfree(p->rx_ring); +err_nomem: + dma_unmap_single(p->dev, p->tx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + DMA_BIDIRECTIONAL); + kfree(p->tx_ring); + return -ENOMEM; +} + +static int octeon_mgmt_stop(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + napi_disable(&p->napi); + netif_stop_queue(netdev); + + if (p->phydev) + phy_disconnect(p->phydev); + p->phydev = NULL; + + netif_carrier_off(netdev); + + octeon_mgmt_reset_hw(p); + + free_irq(p->irq, netdev); + + /* dma_unmap is a nop on Octeon, so just free everything. */ + skb_queue_purge(&p->tx_list); + skb_queue_purge(&p->rx_list); + + dma_unmap_single(p->dev, p->rx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); + kfree(p->rx_ring); + + dma_unmap_single(p->dev, p->tx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + DMA_BIDIRECTIONAL); + kfree(p->tx_ring); + + return 0; +} + +static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + union mgmt_port_ring_entry re; + unsigned long flags; + int rv = NETDEV_TX_BUSY; + + re.d64 = 0; + re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0); + re.s.len = skb->len; + re.s.addr = dma_map_single(p->dev, skb->data, + skb->len, + DMA_TO_DEVICE); + + spin_lock_irqsave(&p->tx_list.lock, flags); + + if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) { + spin_unlock_irqrestore(&p->tx_list.lock, flags); + netif_stop_queue(netdev); + spin_lock_irqsave(&p->tx_list.lock, flags); + } + + if (unlikely(p->tx_current_fill >= + ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) { + spin_unlock_irqrestore(&p->tx_list.lock, flags); + dma_unmap_single(p->dev, re.s.addr, re.s.len, + DMA_TO_DEVICE); + goto out; + } + + __skb_queue_tail(&p->tx_list, skb); + + /* Put it in the ring. */ + p->tx_ring[p->tx_next] = re.d64; + p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE; + p->tx_current_fill++; + + spin_unlock_irqrestore(&p->tx_list.lock, flags); + + dma_sync_single_for_device(p->dev, p->tx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + DMA_BIDIRECTIONAL); + + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += skb->len; + + /* Ring the bell. */ + cvmx_write_csr(p->mix + MIX_ORING2, 1); + + netdev->trans_start = jiffies; + rv = NETDEV_TX_OK; +out: + octeon_mgmt_update_tx_stats(netdev); + return rv; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void octeon_mgmt_poll_controller(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + octeon_mgmt_receive_packets(p, 16); + octeon_mgmt_update_rx_stats(netdev); +} +#endif + +static void octeon_mgmt_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); + strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); + info->n_stats = 0; + info->testinfo_len = 0; + info->regdump_len = 0; + info->eedump_len = 0; +} + +static int octeon_mgmt_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + if (p->phydev) + return phy_ethtool_gset(p->phydev, cmd); + + return -EOPNOTSUPP; +} + +static int octeon_mgmt_set_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (p->phydev) + return phy_ethtool_sset(p->phydev, cmd); + + return -EOPNOTSUPP; +} + +static int octeon_mgmt_nway_reset(struct net_device *dev) +{ + struct octeon_mgmt *p = netdev_priv(dev); + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (p->phydev) + return phy_start_aneg(p->phydev); + + return -EOPNOTSUPP; +} + +static const struct ethtool_ops octeon_mgmt_ethtool_ops = { + .get_drvinfo = octeon_mgmt_get_drvinfo, + .get_settings = octeon_mgmt_get_settings, + .set_settings = octeon_mgmt_set_settings, + .nway_reset = octeon_mgmt_nway_reset, + .get_link = ethtool_op_get_link, +}; + +static const struct net_device_ops octeon_mgmt_ops = { + .ndo_open = octeon_mgmt_open, + .ndo_stop = octeon_mgmt_stop, + .ndo_start_xmit = octeon_mgmt_xmit, + .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, + .ndo_set_mac_address = octeon_mgmt_set_mac_address, + .ndo_do_ioctl = octeon_mgmt_ioctl, + .ndo_change_mtu = octeon_mgmt_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = octeon_mgmt_poll_controller, +#endif +}; + +static int octeon_mgmt_probe(struct platform_device *pdev) +{ + struct net_device *netdev; + struct octeon_mgmt *p; + const __be32 *data; + const u8 *mac; + struct resource *res_mix; + struct resource *res_agl; + struct resource *res_agl_prt_ctl; + int len; + int result; + + netdev = alloc_etherdev(sizeof(struct octeon_mgmt)); + if (netdev == NULL) + return -ENOMEM; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + platform_set_drvdata(pdev, netdev); + p = netdev_priv(netdev); + netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, + OCTEON_MGMT_NAPI_WEIGHT); + + p->netdev = netdev; + p->dev = &pdev->dev; + p->has_rx_tstamp = false; + + data = of_get_property(pdev->dev.of_node, "cell-index", &len); + if (data && len == sizeof(*data)) { + p->port = be32_to_cpup(data); + } else { + dev_err(&pdev->dev, "no 'cell-index' property\n"); + result = -ENXIO; + goto err; + } + + snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); + + result = platform_get_irq(pdev, 0); + if (result < 0) + goto err; + + p->irq = result; + + res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res_mix == NULL) { + dev_err(&pdev->dev, "no 'reg' resource\n"); + result = -ENXIO; + goto err; + } + + res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res_agl == NULL) { + dev_err(&pdev->dev, "no 'reg' resource\n"); + result = -ENXIO; + goto err; + } + + res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3); + if (res_agl_prt_ctl == NULL) { + dev_err(&pdev->dev, "no 'reg' resource\n"); + result = -ENXIO; + goto err; + } + + p->mix_phys = res_mix->start; + p->mix_size = resource_size(res_mix); + p->agl_phys = res_agl->start; + p->agl_size = resource_size(res_agl); + p->agl_prt_ctl_phys = res_agl_prt_ctl->start; + p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl); + + + if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size, + res_mix->name)) { + dev_err(&pdev->dev, "request_mem_region (%s) failed\n", + res_mix->name); + result = -ENXIO; + goto err; + } + + if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size, + res_agl->name)) { + result = -ENXIO; + dev_err(&pdev->dev, "request_mem_region (%s) failed\n", + res_agl->name); + goto err; + } + + if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys, + p->agl_prt_ctl_size, res_agl_prt_ctl->name)) { + result = -ENXIO; + dev_err(&pdev->dev, "request_mem_region (%s) failed\n", + res_agl_prt_ctl->name); + goto err; + } + + p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size); + p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size); + p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys, + p->agl_prt_ctl_size); + spin_lock_init(&p->lock); + + skb_queue_head_init(&p->tx_list); + skb_queue_head_init(&p->rx_list); + tasklet_init(&p->tx_clean_tasklet, + octeon_mgmt_clean_tx_tasklet, (unsigned long)p); + + netdev->priv_flags |= IFF_UNICAST_FLT; + + netdev->netdev_ops = &octeon_mgmt_ops; + netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; + + mac = of_get_mac_address(pdev->dev.of_node); + + if (mac) + memcpy(netdev->dev_addr, mac, ETH_ALEN); + else + eth_hw_addr_random(netdev); + + p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); + + result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (result) + goto err; + + netif_carrier_off(netdev); + result = register_netdev(netdev); + if (result) + goto err; + + dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); + return 0; + +err: + free_netdev(netdev); + return result; +} + +static int octeon_mgmt_remove(struct platform_device *pdev) +{ + struct net_device *netdev = platform_get_drvdata(pdev); + + unregister_netdev(netdev); + free_netdev(netdev); + return 0; +} + +static const struct of_device_id octeon_mgmt_match[] = { + { + .compatible = "cavium,octeon-5750-mix", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, octeon_mgmt_match); + +static struct platform_driver octeon_mgmt_driver = { + .driver = { + .name = "octeon_mgmt", + .of_match_table = octeon_mgmt_match, + }, + .probe = octeon_mgmt_probe, + .remove = octeon_mgmt_remove, +}; + +extern void octeon_mdiobus_force_mod_depencency(void); + +static int __init octeon_mgmt_mod_init(void) +{ + /* Force our mdiobus driver module to be loaded first. */ + octeon_mdiobus_force_mod_depencency(); + return platform_driver_register(&octeon_mgmt_driver); +} + +static void __exit octeon_mgmt_mod_exit(void) +{ + platform_driver_unregister(&octeon_mgmt_driver); +} + +module_init(octeon_mgmt_mod_init); +module_exit(octeon_mgmt_mod_exit); + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR("David Daney"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/net/ethernet/oki-semi/Kconfig b/drivers/net/ethernet/oki-semi/Kconfig new file mode 100644 index 000000000..ecd45f9ea --- /dev/null +++ b/drivers/net/ethernet/oki-semi/Kconfig @@ -0,0 +1,23 @@ +# +# OKI Semiconductor device configuration +# + +config NET_VENDOR_OKI + bool "OKI Semiconductor devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about OKI Semiconductor cards. If you say Y, you will + be asked for your specific card in the following questions. + +if NET_VENDOR_OKI + +source "drivers/net/ethernet/oki-semi/pch_gbe/Kconfig" + +endif # NET_VENDOR_OKI diff --git a/drivers/net/ethernet/oki-semi/Makefile b/drivers/net/ethernet/oki-semi/Makefile new file mode 100644 index 000000000..b6780c877 --- /dev/null +++ b/drivers/net/ethernet/oki-semi/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the OKI Semiconductor device drivers. +# + +obj-$(CONFIG_PCH_GBE) += pch_gbe/ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig new file mode 100644 index 000000000..5f7a35212 --- /dev/null +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig @@ -0,0 +1,23 @@ +# +# OKI Semiconductor device configuration +# + +config PCH_GBE + tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE" + depends on PCI && (X86_32 || COMPILE_TEST) + select MII + select PTP_1588_CLOCK_PCH + select NET_PTP_CLASSIFY + ---help--- + This is a gigabit ethernet driver for EG20T PCH. + EG20T PCH is the platform controller hub that is used in Intel's + general embedded platform. EG20T PCH has Gigabit Ethernet interface. + Using this interface, it is able to access system devices connected + to Gigabit Ethernet. This driver enables Gigabit Ethernet function. + + This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ + Output Hub), ML7223/ML7831. + ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general + purpose use. + ML7223/ML7831 is companion chip for Intel Atom E6xx series. + ML7223/ML7831 is completely compatible for Intel EG20T PCH. diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Makefile b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile new file mode 100644 index 000000000..31288d4ad --- /dev/null +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_PCH_GBE) += pch_gbe.o + +pch_gbe-y := pch_gbe_phy.o pch_gbe_ethtool.o pch_gbe_param.o +pch_gbe-y += pch_gbe_api.o pch_gbe_main.o diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h new file mode 100644 index 000000000..2a55d6d53 --- /dev/null +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -0,0 +1,689 @@ +/* + * Copyright (C) 1999 - 2010 Intel Corporation. + * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. + * + * This code was derived from the Intel e1000e Linux driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef _PCH_GBE_H_ +#define _PCH_GBE_H_ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * pch_gbe_regs_mac_adr - Structure holding values of mac address registers + * @high Denotes the 1st to 4th byte from the initial of MAC address + * @low Denotes the 5th to 6th byte from the initial of MAC address + */ +struct pch_gbe_regs_mac_adr { + u32 high; + u32 low; +}; +/** + * pch_udc_regs - Structure holding values of MAC registers + */ +struct pch_gbe_regs { + u32 INT_ST; + u32 INT_EN; + u32 MODE; + u32 RESET; + u32 TCPIP_ACC; + u32 EX_LIST; + u32 INT_ST_HOLD; + u32 PHY_INT_CTRL; + u32 MAC_RX_EN; + u32 RX_FCTRL; + u32 PAUSE_REQ; + u32 RX_MODE; + u32 TX_MODE; + u32 RX_FIFO_ST; + u32 TX_FIFO_ST; + u32 TX_FID; + u32 TX_RESULT; + u32 PAUSE_PKT1; + u32 PAUSE_PKT2; + u32 PAUSE_PKT3; + u32 PAUSE_PKT4; + u32 PAUSE_PKT5; + u32 reserve[2]; + struct pch_gbe_regs_mac_adr mac_adr[16]; + u32 ADDR_MASK; + u32 MIIM; + u32 MAC_ADDR_LOAD; + u32 RGMII_ST; + u32 RGMII_CTRL; + u32 reserve3[3]; + u32 DMA_CTRL; + u32 reserve4[3]; + u32 RX_DSC_BASE; + u32 RX_DSC_SIZE; + u32 RX_DSC_HW_P; + u32 RX_DSC_HW_P_HLD; + u32 RX_DSC_SW_P; + u32 reserve5[3]; + u32 TX_DSC_BASE; + u32 TX_DSC_SIZE; + u32 TX_DSC_HW_P; + u32 TX_DSC_HW_P_HLD; + u32 TX_DSC_SW_P; + u32 reserve6[3]; + u32 RX_DMA_ST; + u32 TX_DMA_ST; + u32 reserve7[2]; + u32 WOL_ST; + u32 WOL_CTRL; + u32 WOL_ADDR_MASK; +}; + +/* Interrupt Status */ +/* Interrupt Status Hold */ +/* Interrupt Enable */ +#define PCH_GBE_INT_RX_DMA_CMPLT 0x00000001 /* Receive DMA Transfer Complete */ +#define PCH_GBE_INT_RX_VALID 0x00000002 /* MAC Normal Receive Complete */ +#define PCH_GBE_INT_RX_FRAME_ERR 0x00000004 /* Receive frame error */ +#define PCH_GBE_INT_RX_FIFO_ERR 0x00000008 /* Receive FIFO Overflow */ +#define PCH_GBE_INT_RX_DMA_ERR 0x00000010 /* Receive DMA Transfer Error */ +#define PCH_GBE_INT_RX_DSC_EMP 0x00000020 /* Receive Descriptor Empty */ +#define PCH_GBE_INT_TX_CMPLT 0x00000100 /* MAC Transmission Complete */ +#define PCH_GBE_INT_TX_DMA_CMPLT 0x00000200 /* DMA Transfer Complete */ +#define PCH_GBE_INT_TX_FIFO_ERR 0x00000400 /* Transmission FIFO underflow. */ +#define PCH_GBE_INT_TX_DMA_ERR 0x00000800 /* Transmission DMA Error */ +#define PCH_GBE_INT_PAUSE_CMPLT 0x00001000 /* Pause Transmission complete */ +#define PCH_GBE_INT_MIIM_CMPLT 0x00010000 /* MIIM I/F Read completion */ +#define PCH_GBE_INT_PHY_INT 0x00100000 /* Interruption from PHY */ +#define PCH_GBE_INT_WOL_DET 0x01000000 /* Wake On LAN Event detection. */ +#define PCH_GBE_INT_TCPIP_ERR 0x10000000 /* TCP/IP Accelerator Error */ + +/* Mode */ +#define PCH_GBE_MODE_MII_ETHER 0x00000000 /* GIGA Ethernet Mode [MII] */ +#define PCH_GBE_MODE_GMII_ETHER 0x80000000 /* GIGA Ethernet Mode [GMII] */ +#define PCH_GBE_MODE_HALF_DUPLEX 0x00000000 /* Duplex Mode [half duplex] */ +#define PCH_GBE_MODE_FULL_DUPLEX 0x40000000 /* Duplex Mode [full duplex] */ +#define PCH_GBE_MODE_FR_BST 0x04000000 /* Frame bursting is done */ + +/* Reset */ +#define PCH_GBE_ALL_RST 0x80000000 /* All reset */ +#define PCH_GBE_TX_RST 0x00008000 /* TX MAC, TX FIFO, TX DMA reset */ +#define PCH_GBE_RX_RST 0x00004000 /* RX MAC, RX FIFO, RX DMA reset */ + +/* TCP/IP Accelerator Control */ +#define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */ +#define PCH_GBE_RX_TCPIPACC_OFF 0x00000004 /* RX TCP/IP ACC Disabled */ +#define PCH_GBE_TX_TCPIPACC_EN 0x00000002 /* TX TCP/IP ACC Enable */ +#define PCH_GBE_RX_TCPIPACC_EN 0x00000001 /* RX TCP/IP ACC Enable */ + +/* MAC RX Enable */ +#define PCH_GBE_MRE_MAC_RX_EN 0x00000001 /* MAC Receive Enable */ + +/* RX Flow Control */ +#define PCH_GBE_FL_CTRL_EN 0x80000000 /* Pause packet is enabled */ + +/* Pause Packet Request */ +#define PCH_GBE_PS_PKT_RQ 0x80000000 /* Pause packet Request */ + +/* RX Mode */ +#define PCH_GBE_ADD_FIL_EN 0x80000000 /* Address Filtering Enable */ +/* Multicast Filtering Enable */ +#define PCH_GBE_MLT_FIL_EN 0x40000000 +/* Receive Almost Empty Threshold */ +#define PCH_GBE_RH_ALM_EMP_4 0x00000000 /* 4 words */ +#define PCH_GBE_RH_ALM_EMP_8 0x00004000 /* 8 words */ +#define PCH_GBE_RH_ALM_EMP_16 0x00008000 /* 16 words */ +#define PCH_GBE_RH_ALM_EMP_32 0x0000C000 /* 32 words */ +/* Receive Almost Full Threshold */ +#define PCH_GBE_RH_ALM_FULL_4 0x00000000 /* 4 words */ +#define PCH_GBE_RH_ALM_FULL_8 0x00001000 /* 8 words */ +#define PCH_GBE_RH_ALM_FULL_16 0x00002000 /* 16 words */ +#define PCH_GBE_RH_ALM_FULL_32 0x00003000 /* 32 words */ +/* RX FIFO Read Triger Threshold */ +#define PCH_GBE_RH_RD_TRG_4 0x00000000 /* 4 words */ +#define PCH_GBE_RH_RD_TRG_8 0x00000200 /* 8 words */ +#define PCH_GBE_RH_RD_TRG_16 0x00000400 /* 16 words */ +#define PCH_GBE_RH_RD_TRG_32 0x00000600 /* 32 words */ +#define PCH_GBE_RH_RD_TRG_64 0x00000800 /* 64 words */ +#define PCH_GBE_RH_RD_TRG_128 0x00000A00 /* 128 words */ +#define PCH_GBE_RH_RD_TRG_256 0x00000C00 /* 256 words */ +#define PCH_GBE_RH_RD_TRG_512 0x00000E00 /* 512 words */ + +/* Receive Descriptor bit definitions */ +#define PCH_GBE_RXD_ACC_STAT_BCAST 0x00000400 +#define PCH_GBE_RXD_ACC_STAT_MCAST 0x00000200 +#define PCH_GBE_RXD_ACC_STAT_UCAST 0x00000100 +#define PCH_GBE_RXD_ACC_STAT_TCPIPOK 0x000000C0 +#define PCH_GBE_RXD_ACC_STAT_IPOK 0x00000080 +#define PCH_GBE_RXD_ACC_STAT_TCPOK 0x00000040 +#define PCH_GBE_RXD_ACC_STAT_IP6ERR 0x00000020 +#define PCH_GBE_RXD_ACC_STAT_OFLIST 0x00000010 +#define PCH_GBE_RXD_ACC_STAT_TYPEIP 0x00000008 +#define PCH_GBE_RXD_ACC_STAT_MACL 0x00000004 +#define PCH_GBE_RXD_ACC_STAT_PPPOE 0x00000002 +#define PCH_GBE_RXD_ACC_STAT_VTAGT 0x00000001 +#define PCH_GBE_RXD_GMAC_STAT_PAUSE 0x0200 +#define PCH_GBE_RXD_GMAC_STAT_MARBR 0x0100 +#define PCH_GBE_RXD_GMAC_STAT_MARMLT 0x0080 +#define PCH_GBE_RXD_GMAC_STAT_MARIND 0x0040 +#define PCH_GBE_RXD_GMAC_STAT_MARNOTMT 0x0020 +#define PCH_GBE_RXD_GMAC_STAT_TLONG 0x0010 +#define PCH_GBE_RXD_GMAC_STAT_TSHRT 0x0008 +#define PCH_GBE_RXD_GMAC_STAT_NOTOCTAL 0x0004 +#define PCH_GBE_RXD_GMAC_STAT_NBLERR 0x0002 +#define PCH_GBE_RXD_GMAC_STAT_CRCERR 0x0001 + +/* Transmit Descriptor bit definitions */ +#define PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF 0x0008 +#define PCH_GBE_TXD_CTRL_ITAG 0x0004 +#define PCH_GBE_TXD_CTRL_ICRC 0x0002 +#define PCH_GBE_TXD_CTRL_APAD 0x0001 +#define PCH_GBE_TXD_WORDS_SHIFT 2 +#define PCH_GBE_TXD_GMAC_STAT_CMPLT 0x2000 +#define PCH_GBE_TXD_GMAC_STAT_ABT 0x1000 +#define PCH_GBE_TXD_GMAC_STAT_EXCOL 0x0800 +#define PCH_GBE_TXD_GMAC_STAT_SNGCOL 0x0400 +#define PCH_GBE_TXD_GMAC_STAT_MLTCOL 0x0200 +#define PCH_GBE_TXD_GMAC_STAT_CRSER 0x0100 +#define PCH_GBE_TXD_GMAC_STAT_TLNG 0x0080 +#define PCH_GBE_TXD_GMAC_STAT_TSHRT 0x0040 +#define PCH_GBE_TXD_GMAC_STAT_LTCOL 0x0020 +#define PCH_GBE_TXD_GMAC_STAT_TFUNDFLW 0x0010 +#define PCH_GBE_TXD_GMAC_STAT_RTYCNT_MASK 0x000F + +/* TX Mode */ +#define PCH_GBE_TM_NO_RTRY 0x80000000 /* No Retransmission */ +#define PCH_GBE_TM_LONG_PKT 0x40000000 /* Long Packt TX Enable */ +#define PCH_GBE_TM_ST_AND_FD 0x20000000 /* Stare and Forward */ +#define PCH_GBE_TM_SHORT_PKT 0x10000000 /* Short Packet TX Enable */ +#define PCH_GBE_TM_LTCOL_RETX 0x08000000 /* Retransmission at Late Collision */ +/* Frame Start Threshold */ +#define PCH_GBE_TM_TH_TX_STRT_4 0x00000000 /* 4 words */ +#define PCH_GBE_TM_TH_TX_STRT_8 0x00004000 /* 8 words */ +#define PCH_GBE_TM_TH_TX_STRT_16 0x00008000 /* 16 words */ +#define PCH_GBE_TM_TH_TX_STRT_32 0x0000C000 /* 32 words */ +/* Transmit Almost Empty Threshold */ +#define PCH_GBE_TM_TH_ALM_EMP_4 0x00000000 /* 4 words */ +#define PCH_GBE_TM_TH_ALM_EMP_8 0x00000800 /* 8 words */ +#define PCH_GBE_TM_TH_ALM_EMP_16 0x00001000 /* 16 words */ +#define PCH_GBE_TM_TH_ALM_EMP_32 0x00001800 /* 32 words */ +#define PCH_GBE_TM_TH_ALM_EMP_64 0x00002000 /* 64 words */ +#define PCH_GBE_TM_TH_ALM_EMP_128 0x00002800 /* 128 words */ +#define PCH_GBE_TM_TH_ALM_EMP_256 0x00003000 /* 256 words */ +#define PCH_GBE_TM_TH_ALM_EMP_512 0x00003800 /* 512 words */ +/* Transmit Almost Full Threshold */ +#define PCH_GBE_TM_TH_ALM_FULL_4 0x00000000 /* 4 words */ +#define PCH_GBE_TM_TH_ALM_FULL_8 0x00000200 /* 8 words */ +#define PCH_GBE_TM_TH_ALM_FULL_16 0x00000400 /* 16 words */ +#define PCH_GBE_TM_TH_ALM_FULL_32 0x00000600 /* 32 words */ + +/* RX FIFO Status */ +#define PCH_GBE_RF_ALM_FULL 0x80000000 /* RX FIFO is almost full. */ +#define PCH_GBE_RF_ALM_EMP 0x40000000 /* RX FIFO is almost empty. */ +#define PCH_GBE_RF_RD_TRG 0x20000000 /* Become more than RH_RD_TRG. */ +#define PCH_GBE_RF_STRWD 0x1FFE0000 /* The word count of RX FIFO. */ +#define PCH_GBE_RF_RCVING 0x00010000 /* Stored in RX FIFO. */ + +/* MAC Address Mask */ +#define PCH_GBE_BUSY 0x80000000 + +/* MIIM */ +#define PCH_GBE_MIIM_OPER_WRITE 0x04000000 +#define PCH_GBE_MIIM_OPER_READ 0x00000000 +#define PCH_GBE_MIIM_OPER_READY 0x04000000 +#define PCH_GBE_MIIM_PHY_ADDR_SHIFT 21 +#define PCH_GBE_MIIM_REG_ADDR_SHIFT 16 + +/* RGMII Status */ +#define PCH_GBE_LINK_UP 0x80000008 +#define PCH_GBE_RXC_SPEED_MSK 0x00000006 +#define PCH_GBE_RXC_SPEED_2_5M 0x00000000 /* 2.5MHz */ +#define PCH_GBE_RXC_SPEED_25M 0x00000002 /* 25MHz */ +#define PCH_GBE_RXC_SPEED_125M 0x00000004 /* 100MHz */ +#define PCH_GBE_DUPLEX_FULL 0x00000001 + +/* RGMII Control */ +#define PCH_GBE_CRS_SEL 0x00000010 +#define PCH_GBE_RGMII_RATE_125M 0x00000000 +#define PCH_GBE_RGMII_RATE_25M 0x00000008 +#define PCH_GBE_RGMII_RATE_2_5M 0x0000000C +#define PCH_GBE_RGMII_MODE_GMII 0x00000000 +#define PCH_GBE_RGMII_MODE_RGMII 0x00000002 +#define PCH_GBE_CHIP_TYPE_EXTERNAL 0x00000000 +#define PCH_GBE_CHIP_TYPE_INTERNAL 0x00000001 + +/* DMA Control */ +#define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */ +#define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */ + +/* RX DMA STATUS */ +#define PCH_GBE_IDLE_CHECK 0xFFFFFFFE + +/* Wake On LAN Status */ +#define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */ +#define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */ + +/* The Frame registered in Address Recognizer */ +#define PCH_GBE_WLS_IND 0x00000002 +#define PCH_GBE_WLS_MP 0x00000001 /* Magic packet Address */ + +/* Wake On LAN Control */ +#define PCH_GBE_WLC_WOL_MODE 0x00010000 +#define PCH_GBE_WLC_IGN_TLONG 0x00000100 +#define PCH_GBE_WLC_IGN_TSHRT 0x00000080 +#define PCH_GBE_WLC_IGN_OCTER 0x00000040 +#define PCH_GBE_WLC_IGN_NBLER 0x00000020 +#define PCH_GBE_WLC_IGN_CRCER 0x00000010 +#define PCH_GBE_WLC_BR 0x00000008 +#define PCH_GBE_WLC_MLT 0x00000004 +#define PCH_GBE_WLC_IND 0x00000002 +#define PCH_GBE_WLC_MP 0x00000001 + +/* Wake On LAN Address Mask */ +#define PCH_GBE_WLA_BUSY 0x80000000 + + + +/* TX/RX descriptor defines */ +#define PCH_GBE_MAX_TXD 4096 +#define PCH_GBE_DEFAULT_TXD 256 +#define PCH_GBE_MIN_TXD 8 +#define PCH_GBE_MAX_RXD 4096 +#define PCH_GBE_DEFAULT_RXD 256 +#define PCH_GBE_MIN_RXD 8 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define PCH_GBE_TX_DESC_MULTIPLE 8 +#define PCH_GBE_RX_DESC_MULTIPLE 8 + +/* Read/Write operation is done through MII Management IF */ +#define PCH_GBE_HAL_MIIM_READ ((u32)0x00000000) +#define PCH_GBE_HAL_MIIM_WRITE ((u32)0x04000000) + +/* flow control values */ +#define PCH_GBE_FC_NONE 0 +#define PCH_GBE_FC_RX_PAUSE 1 +#define PCH_GBE_FC_TX_PAUSE 2 +#define PCH_GBE_FC_FULL 3 +#define PCH_GBE_FC_DEFAULT PCH_GBE_FC_FULL + + +struct pch_gbe_hw; +/** + * struct pch_gbe_functions - HAL APi function pointer + * @get_bus_info: for pch_gbe_hal_get_bus_info + * @init_hw: for pch_gbe_hal_init_hw + * @read_phy_reg: for pch_gbe_hal_read_phy_reg + * @write_phy_reg: for pch_gbe_hal_write_phy_reg + * @reset_phy: for pch_gbe_hal_phy_hw_reset + * @sw_reset_phy: for pch_gbe_hal_phy_sw_reset + * @power_up_phy: for pch_gbe_hal_power_up_phy + * @power_down_phy: for pch_gbe_hal_power_down_phy + * @read_mac_addr: for pch_gbe_hal_read_mac_addr + */ +struct pch_gbe_functions { + void (*get_bus_info) (struct pch_gbe_hw *); + s32 (*init_hw) (struct pch_gbe_hw *); + s32 (*read_phy_reg) (struct pch_gbe_hw *, u32, u16 *); + s32 (*write_phy_reg) (struct pch_gbe_hw *, u32, u16); + void (*reset_phy) (struct pch_gbe_hw *); + void (*sw_reset_phy) (struct pch_gbe_hw *); + void (*power_up_phy) (struct pch_gbe_hw *hw); + void (*power_down_phy) (struct pch_gbe_hw *hw); + s32 (*read_mac_addr) (struct pch_gbe_hw *); +}; + +/** + * struct pch_gbe_mac_info - MAC information + * @addr[6]: Store the MAC address + * @fc: Mode of flow control + * @fc_autoneg: Auto negotiation enable for flow control setting + * @tx_fc_enable: Enable flag of Transmit flow control + * @max_frame_size: Max transmit frame size + * @min_frame_size: Min transmit frame size + * @autoneg: Auto negotiation enable + * @link_speed: Link speed + * @link_duplex: Link duplex + */ +struct pch_gbe_mac_info { + u8 addr[6]; + u8 fc; + u8 fc_autoneg; + u8 tx_fc_enable; + u32 max_frame_size; + u32 min_frame_size; + u8 autoneg; + u16 link_speed; + u16 link_duplex; +}; + +/** + * struct pch_gbe_phy_info - PHY information + * @addr: PHY address + * @id: PHY's identifier + * @revision: PHY's revision + * @reset_delay_us: HW reset delay time[us] + * @autoneg_advertised: Autoneg advertised + */ +struct pch_gbe_phy_info { + u32 addr; + u32 id; + u32 revision; + u32 reset_delay_us; + u16 autoneg_advertised; +}; + +/*! + * @ingroup Gigabit Ether driver Layer + * @struct pch_gbe_bus_info + * @brief Bus information + */ +struct pch_gbe_bus_info { + u8 type; + u8 speed; + u8 width; +}; + +/*! + * @ingroup Gigabit Ether driver Layer + * @struct pch_gbe_hw + * @brief Hardware information + */ +struct pch_gbe_hw { + void *back; + + struct pch_gbe_regs __iomem *reg; + spinlock_t miim_lock; + + const struct pch_gbe_functions *func; + struct pch_gbe_mac_info mac; + struct pch_gbe_phy_info phy; + struct pch_gbe_bus_info bus; +}; + +/** + * struct pch_gbe_rx_desc - Receive Descriptor + * @buffer_addr: RX Frame Buffer Address + * @tcp_ip_status: TCP/IP Accelerator Status + * @rx_words_eob: RX word count and Byte position + * @gbec_status: GMAC Status + * @dma_status: DMA Status + * @reserved1: Reserved + * @reserved2: Reserved + */ +struct pch_gbe_rx_desc { + u32 buffer_addr; + u32 tcp_ip_status; + u16 rx_words_eob; + u16 gbec_status; + u8 dma_status; + u8 reserved1; + u16 reserved2; +}; + +/** + * struct pch_gbe_tx_desc - Transmit Descriptor + * @buffer_addr: TX Frame Buffer Address + * @length: Data buffer length + * @reserved1: Reserved + * @tx_words_eob: TX word count and Byte position + * @tx_frame_ctrl: TX Frame Control + * @dma_status: DMA Status + * @reserved2: Reserved + * @gbec_status: GMAC Status + */ +struct pch_gbe_tx_desc { + u32 buffer_addr; + u16 length; + u16 reserved1; + u16 tx_words_eob; + u16 tx_frame_ctrl; + u8 dma_status; + u8 reserved2; + u16 gbec_status; +}; + + +/** + * struct pch_gbe_buffer - Buffer information + * @skb: pointer to a socket buffer + * @dma: DMA address + * @time_stamp: time stamp + * @length: data size + */ +struct pch_gbe_buffer { + struct sk_buff *skb; + dma_addr_t dma; + unsigned char *rx_buffer; + unsigned long time_stamp; + u16 length; + bool mapped; +}; + +/** + * struct pch_gbe_tx_ring - tx ring information + * @tx_lock: spinlock structs + * @desc: pointer to the descriptor ring memory + * @dma: physical address of the descriptor ring + * @size: length of descriptor ring in bytes + * @count: number of descriptors in the ring + * @next_to_use: next descriptor to associate a buffer with + * @next_to_clean: next descriptor to check for DD status bit + * @buffer_info: array of buffer information structs + */ +struct pch_gbe_tx_ring { + spinlock_t tx_lock; + struct pch_gbe_tx_desc *desc; + dma_addr_t dma; + unsigned int size; + unsigned int count; + unsigned int next_to_use; + unsigned int next_to_clean; + struct pch_gbe_buffer *buffer_info; +}; + +/** + * struct pch_gbe_rx_ring - rx ring information + * @desc: pointer to the descriptor ring memory + * @dma: physical address of the descriptor ring + * @size: length of descriptor ring in bytes + * @count: number of descriptors in the ring + * @next_to_use: next descriptor to associate a buffer with + * @next_to_clean: next descriptor to check for DD status bit + * @buffer_info: array of buffer information structs + */ +struct pch_gbe_rx_ring { + struct pch_gbe_rx_desc *desc; + dma_addr_t dma; + unsigned char *rx_buff_pool; + dma_addr_t rx_buff_pool_logic; + unsigned int rx_buff_pool_size; + unsigned int size; + unsigned int count; + unsigned int next_to_use; + unsigned int next_to_clean; + struct pch_gbe_buffer *buffer_info; +}; + +/** + * struct pch_gbe_hw_stats - Statistics counters collected by the MAC + * @rx_packets: total packets received + * @tx_packets: total packets transmitted + * @rx_bytes: total bytes received + * @tx_bytes: total bytes transmitted + * @rx_errors: bad packets received + * @tx_errors: packet transmit problems + * @rx_dropped: no space in Linux buffers + * @tx_dropped: no space available in Linux + * @multicast: multicast packets received + * @collisions: collisions + * @rx_crc_errors: received packet with crc error + * @rx_frame_errors: received frame alignment error + * @rx_alloc_buff_failed: allocate failure of a receive buffer + * @tx_length_errors: transmit length error + * @tx_aborted_errors: transmit aborted error + * @tx_carrier_errors: transmit carrier error + * @tx_timeout_count: Number of transmit timeout + * @tx_restart_count: Number of transmit restert + * @intr_rx_dsc_empty_count: Interrupt count of receive descriptor empty + * @intr_rx_frame_err_count: Interrupt count of receive frame error + * @intr_rx_fifo_err_count: Interrupt count of receive FIFO error + * @intr_rx_dma_err_count: Interrupt count of receive DMA error + * @intr_tx_fifo_err_count: Interrupt count of transmit FIFO error + * @intr_tx_dma_err_count: Interrupt count of transmit DMA error + * @intr_tcpip_err_count: Interrupt count of TCP/IP Accelerator + */ +struct pch_gbe_hw_stats { + u32 rx_packets; + u32 tx_packets; + u32 rx_bytes; + u32 tx_bytes; + u32 rx_errors; + u32 tx_errors; + u32 rx_dropped; + u32 tx_dropped; + u32 multicast; + u32 collisions; + u32 rx_crc_errors; + u32 rx_frame_errors; + u32 rx_alloc_buff_failed; + u32 tx_length_errors; + u32 tx_aborted_errors; + u32 tx_carrier_errors; + u32 tx_timeout_count; + u32 tx_restart_count; + u32 intr_rx_dsc_empty_count; + u32 intr_rx_frame_err_count; + u32 intr_rx_fifo_err_count; + u32 intr_rx_dma_err_count; + u32 intr_tx_fifo_err_count; + u32 intr_tx_dma_err_count; + u32 intr_tcpip_err_count; +}; + +/** + * struct pch_gbe_privdata - PCI Device ID driver data + * @phy_tx_clk_delay: Bool, configure the PHY TX delay in software + * @phy_disable_hibernate: Bool, disable PHY hibernation + * @platform_init: Platform initialization callback, called from + * probe, prior to PHY initialization. + */ +struct pch_gbe_privdata { + bool phy_tx_clk_delay; + bool phy_disable_hibernate; + int (*platform_init)(struct pci_dev *pdev); +}; + +/** + * struct pch_gbe_adapter - board specific private data structure + * @stats_lock: Spinlock structure for status + * @ethtool_lock: Spinlock structure for ethtool + * @irq_sem: Semaphore for interrupt + * @netdev: Pointer of network device structure + * @pdev: Pointer of pci device structure + * @polling_netdev: Pointer of polling network device structure + * @napi: NAPI structure + * @hw: Pointer of hardware structure + * @stats: Hardware status + * @reset_task: Reset task + * @mii: MII information structure + * @watchdog_timer: Watchdog timer list + * @wake_up_evt: Wake up event + * @config_space: Configuration space + * @msg_enable: Driver message level + * @led_status: LED status + * @tx_ring: Pointer of Tx descriptor ring structure + * @rx_ring: Pointer of Rx descriptor ring structure + * @rx_buffer_len: Receive buffer length + * @tx_queue_len: Transmit queue length + * @have_msi: PCI MSI mode flag + * @pch_gbe_privdata: PCI Device ID driver_data + */ + +struct pch_gbe_adapter { + spinlock_t stats_lock; + spinlock_t ethtool_lock; + atomic_t irq_sem; + struct net_device *netdev; + struct pci_dev *pdev; + struct net_device *polling_netdev; + struct napi_struct napi; + struct pch_gbe_hw hw; + struct pch_gbe_hw_stats stats; + struct work_struct reset_task; + struct mii_if_info mii; + struct timer_list watchdog_timer; + u32 wake_up_evt; + u32 *config_space; + unsigned long led_status; + struct pch_gbe_tx_ring *tx_ring; + struct pch_gbe_rx_ring *rx_ring; + unsigned long rx_buffer_len; + unsigned long tx_queue_len; + bool have_msi; + bool rx_stop_flag; + int hwts_tx_en; + int hwts_rx_en; + struct pci_dev *ptp_pdev; + struct pch_gbe_privdata *pdata; +}; + +#define pch_gbe_hw_to_adapter(hw) container_of(hw, struct pch_gbe_adapter, hw) + +extern const char pch_driver_version[]; + +/* pch_gbe_main.c */ +int pch_gbe_up(struct pch_gbe_adapter *adapter); +void pch_gbe_down(struct pch_gbe_adapter *adapter); +void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter); +void pch_gbe_reset(struct pch_gbe_adapter *adapter); +int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter, + struct pch_gbe_tx_ring *txdr); +int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter, + struct pch_gbe_rx_ring *rxdr); +void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter, + struct pch_gbe_tx_ring *tx_ring); +void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter, + struct pch_gbe_rx_ring *rx_ring); +void pch_gbe_update_stats(struct pch_gbe_adapter *adapter); +u32 pch_ch_control_read(struct pci_dev *pdev); +void pch_ch_control_write(struct pci_dev *pdev, u32 val); +u32 pch_ch_event_read(struct pci_dev *pdev); +void pch_ch_event_write(struct pci_dev *pdev, u32 val); +u32 pch_src_uuid_lo_read(struct pci_dev *pdev); +u32 pch_src_uuid_hi_read(struct pci_dev *pdev); +u64 pch_rx_snap_read(struct pci_dev *pdev); +u64 pch_tx_snap_read(struct pci_dev *pdev); +int pch_set_station_address(u8 *addr, struct pci_dev *pdev); + +/* pch_gbe_param.c */ +void pch_gbe_check_options(struct pch_gbe_adapter *adapter); + +/* pch_gbe_ethtool.c */ +void pch_gbe_set_ethtool_ops(struct net_device *netdev); + +/* pch_gbe_mac.c */ +s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw); +s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw); +u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg, + u16 data); +#endif /* _PCH_GBE_H_ */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c new file mode 100644 index 000000000..512503635 --- /dev/null +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c @@ -0,0 +1,262 @@ +/* + * Copyright (C) 1999 - 2010 Intel Corporation. + * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. + * + * This code was derived from the Intel e1000e Linux driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ +#include "pch_gbe.h" +#include "pch_gbe_phy.h" +#include "pch_gbe_api.h" + +/* bus type values */ +#define pch_gbe_bus_type_unknown 0 +#define pch_gbe_bus_type_pci 1 +#define pch_gbe_bus_type_pcix 2 +#define pch_gbe_bus_type_pci_express 3 +#define pch_gbe_bus_type_reserved 4 + +/* bus speed values */ +#define pch_gbe_bus_speed_unknown 0 +#define pch_gbe_bus_speed_33 1 +#define pch_gbe_bus_speed_66 2 +#define pch_gbe_bus_speed_100 3 +#define pch_gbe_bus_speed_120 4 +#define pch_gbe_bus_speed_133 5 +#define pch_gbe_bus_speed_2500 6 +#define pch_gbe_bus_speed_reserved 7 + +/* bus width values */ +#define pch_gbe_bus_width_unknown 0 +#define pch_gbe_bus_width_pcie_x1 1 +#define pch_gbe_bus_width_pcie_x2 2 +#define pch_gbe_bus_width_pcie_x4 4 +#define pch_gbe_bus_width_32 5 +#define pch_gbe_bus_width_64 6 +#define pch_gbe_bus_width_reserved 7 + +/** + * pch_gbe_plat_get_bus_info - Obtain bus information for adapter + * @hw: Pointer to the HW structure + */ +static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw) +{ + hw->bus.type = pch_gbe_bus_type_pci_express; + hw->bus.speed = pch_gbe_bus_speed_2500; + hw->bus.width = pch_gbe_bus_width_pcie_x1; +} + +/** + * pch_gbe_plat_init_hw - Initialize hardware + * @hw: Pointer to the HW structure + * Returns: + * 0: Successfully + * Negative value: Failed-EBUSY + */ +static s32 pch_gbe_plat_init_hw(struct pch_gbe_hw *hw) +{ + s32 ret_val; + + ret_val = pch_gbe_phy_get_id(hw); + if (ret_val) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n"); + return ret_val; + } + pch_gbe_phy_init_setting(hw); + /* Setup Mac interface option RGMII */ +#ifdef PCH_GBE_MAC_IFOP_RGMII + pch_gbe_phy_set_rgmii(hw); +#endif + return ret_val; +} + +static const struct pch_gbe_functions pch_gbe_ops = { + .get_bus_info = pch_gbe_plat_get_bus_info, + .init_hw = pch_gbe_plat_init_hw, + .read_phy_reg = pch_gbe_phy_read_reg_miic, + .write_phy_reg = pch_gbe_phy_write_reg_miic, + .reset_phy = pch_gbe_phy_hw_reset, + .sw_reset_phy = pch_gbe_phy_sw_reset, + .power_up_phy = pch_gbe_phy_power_up, + .power_down_phy = pch_gbe_phy_power_down, + .read_mac_addr = pch_gbe_mac_read_mac_addr +}; + +/** + * pch_gbe_plat_init_function_pointers - Init func ptrs + * @hw: Pointer to the HW structure + */ +static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw) +{ + /* Set PHY parameter */ + hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US; + /* Set function pointers */ + hw->func = &pch_gbe_ops; +} + +/** + * pch_gbe_hal_setup_init_funcs - Initializes function pointers + * @hw: Pointer to the HW structure + * Returns: + * 0: Successfully + * ENOSYS: Function is not registered + */ +s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw) +{ + if (!hw->reg) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "ERROR: Registers not mapped\n"); + return -ENOSYS; + } + pch_gbe_plat_init_function_pointers(hw); + return 0; +} + +/** + * pch_gbe_hal_get_bus_info - Obtain bus information for adapter + * @hw: Pointer to the HW structure + */ +void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw) +{ + if (!hw->func->get_bus_info) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "ERROR: configuration\n"); + return; + } + hw->func->get_bus_info(hw); +} + +/** + * pch_gbe_hal_init_hw - Initialize hardware + * @hw: Pointer to the HW structure + * Returns: + * 0: Successfully + * ENOSYS: Function is not registered + */ +s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw) +{ + if (!hw->func->init_hw) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "ERROR: configuration\n"); + return -ENOSYS; + } + return hw->func->init_hw(hw); +} + +/** + * pch_gbe_hal_read_phy_reg - Reads PHY register + * @hw: Pointer to the HW structure + * @offset: The register to read + * @data: The buffer to store the 16-bit read. + * Returns: + * 0: Successfully + * Negative value: Failed + */ +s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, + u16 *data) +{ + if (!hw->func->read_phy_reg) + return 0; + return hw->func->read_phy_reg(hw, offset, data); +} + +/** + * pch_gbe_hal_write_phy_reg - Writes PHY register + * @hw: Pointer to the HW structure + * @offset: The register to read + * @data: The value to write. + * Returns: + * 0: Successfully + * Negative value: Failed + */ +s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, + u16 data) +{ + if (!hw->func->write_phy_reg) + return 0; + return hw->func->write_phy_reg(hw, offset, data); +} + +/** + * pch_gbe_hal_phy_hw_reset - Hard PHY reset + * @hw: Pointer to the HW structure + */ +void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw) +{ + if (!hw->func->reset_phy) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "ERROR: configuration\n"); + return; + } + hw->func->reset_phy(hw); +} + +/** + * pch_gbe_hal_phy_sw_reset - Soft PHY reset + * @hw: Pointer to the HW structure + */ +void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw) +{ + if (!hw->func->sw_reset_phy) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "ERROR: configuration\n"); + return; + } + hw->func->sw_reset_phy(hw); +} + +/** + * pch_gbe_hal_read_mac_addr - Reads MAC address + * @hw: Pointer to the HW structure + * Returns: + * 0: Successfully + * ENOSYS: Function is not registered + */ +s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw) +{ + if (!hw->func->read_mac_addr) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "ERROR: configuration\n"); + return -ENOSYS; + } + return hw->func->read_mac_addr(hw); +} + +/** + * pch_gbe_hal_power_up_phy - Power up PHY + * @hw: Pointer to the HW structure + */ +void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw) +{ + if (hw->func->power_up_phy) + hw->func->power_up_phy(hw); +} + +/** + * pch_gbe_hal_power_down_phy - Power down PHY + * @hw: Pointer to the HW structure + */ +void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw) +{ + if (hw->func->power_down_phy) + hw->func->power_down_phy(hw); +} diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h new file mode 100644 index 000000000..91ce07c83 --- /dev/null +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 1999 - 2010 Intel Corporation. + * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. + * + * This code was derived from the Intel e1000e Linux driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ +#ifndef _PCH_GBE_API_H_ +#define _PCH_GBE_API_H_ + +#include "pch_gbe_phy.h" + +s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw); +void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw); +s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw); +s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 *data); +s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 data); +void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw); +void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw); +s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw); +void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw); +void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw); + +#endif diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c new file mode 100644 index 000000000..f6fcf7450 --- /dev/null +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -0,0 +1,512 @@ +/* + * Copyright (C) 1999 - 2010 Intel Corporation. + * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. + * + * This code was derived from the Intel e1000e Linux driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ +#include "pch_gbe.h" +#include "pch_gbe_api.h" + +/** + * pch_gbe_stats - Stats item information + */ +struct pch_gbe_stats { + char string[ETH_GSTRING_LEN]; + size_t size; + size_t offset; +}; + +#define PCH_GBE_STAT(m) \ +{ \ + .string = #m, \ + .size = FIELD_SIZEOF(struct pch_gbe_hw_stats, m), \ + .offset = offsetof(struct pch_gbe_hw_stats, m), \ +} + +/** + * pch_gbe_gstrings_stats - ethtool information status name list + */ +static const struct pch_gbe_stats pch_gbe_gstrings_stats[] = { + PCH_GBE_STAT(rx_packets), + PCH_GBE_STAT(tx_packets), + PCH_GBE_STAT(rx_bytes), + PCH_GBE_STAT(tx_bytes), + PCH_GBE_STAT(rx_errors), + PCH_GBE_STAT(tx_errors), + PCH_GBE_STAT(rx_dropped), + PCH_GBE_STAT(tx_dropped), + PCH_GBE_STAT(multicast), + PCH_GBE_STAT(collisions), + PCH_GBE_STAT(rx_crc_errors), + PCH_GBE_STAT(rx_frame_errors), + PCH_GBE_STAT(rx_alloc_buff_failed), + PCH_GBE_STAT(tx_length_errors), + PCH_GBE_STAT(tx_aborted_errors), + PCH_GBE_STAT(tx_carrier_errors), + PCH_GBE_STAT(tx_timeout_count), + PCH_GBE_STAT(tx_restart_count), + PCH_GBE_STAT(intr_rx_dsc_empty_count), + PCH_GBE_STAT(intr_rx_frame_err_count), + PCH_GBE_STAT(intr_rx_fifo_err_count), + PCH_GBE_STAT(intr_rx_dma_err_count), + PCH_GBE_STAT(intr_tx_fifo_err_count), + PCH_GBE_STAT(intr_tx_dma_err_count), + PCH_GBE_STAT(intr_tcpip_err_count) +}; + +#define PCH_GBE_QUEUE_STATS_LEN 0 +#define PCH_GBE_GLOBAL_STATS_LEN ARRAY_SIZE(pch_gbe_gstrings_stats) +#define PCH_GBE_STATS_LEN (PCH_GBE_GLOBAL_STATS_LEN + PCH_GBE_QUEUE_STATS_LEN) + +#define PCH_GBE_MAC_REGS_LEN (sizeof(struct pch_gbe_regs) / 4) +#define PCH_GBE_REGS_LEN (PCH_GBE_MAC_REGS_LEN + PCH_GBE_PHY_REGS_LEN) +/** + * pch_gbe_get_settings - Get device-specific settings + * @netdev: Network interface device structure + * @ecmd: Ethtool command + * Returns: + * 0: Successful. + * Negative value: Failed. + */ +static int pch_gbe_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + int ret; + + ret = mii_ethtool_gset(&adapter->mii, ecmd); + ecmd->supported &= ~(SUPPORTED_TP | SUPPORTED_1000baseT_Half); + ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half); + + if (!netif_carrier_ok(adapter->netdev)) + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + return ret; +} + +/** + * pch_gbe_set_settings - Set device-specific settings + * @netdev: Network interface device structure + * @ecmd: Ethtool command + * Returns: + * 0: Successful. + * Negative value: Failed. + */ +static int pch_gbe_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + struct pch_gbe_hw *hw = &adapter->hw; + u32 speed = ethtool_cmd_speed(ecmd); + int ret; + + pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET); + + /* when set_settings() is called with a ethtool_cmd previously + * filled by get_settings() on a down link, speed is -1: */ + if (speed == UINT_MAX) { + speed = SPEED_1000; + ethtool_cmd_speed_set(ecmd, speed); + ecmd->duplex = DUPLEX_FULL; + } + ret = mii_ethtool_sset(&adapter->mii, ecmd); + if (ret) { + netdev_err(netdev, "Error: mii_ethtool_sset\n"); + return ret; + } + hw->mac.link_speed = speed; + hw->mac.link_duplex = ecmd->duplex; + hw->phy.autoneg_advertised = ecmd->advertising; + hw->mac.autoneg = ecmd->autoneg; + + /* reset the link */ + if (netif_running(adapter->netdev)) { + pch_gbe_down(adapter); + ret = pch_gbe_up(adapter); + } else { + pch_gbe_reset(adapter); + } + return ret; +} + +/** + * pch_gbe_get_regs_len - Report the size of device registers + * @netdev: Network interface device structure + * Returns: the size of device registers. + */ +static int pch_gbe_get_regs_len(struct net_device *netdev) +{ + return PCH_GBE_REGS_LEN * (int)sizeof(u32); +} + +/** + * pch_gbe_get_drvinfo - Report driver information + * @netdev: Network interface device structure + * @drvinfo: Driver information structure + */ +static void pch_gbe_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->regdump_len = pch_gbe_get_regs_len(netdev); +} + +/** + * pch_gbe_get_regs - Get device registers + * @netdev: Network interface device structure + * @regs: Ethtool register structure + * @p: Buffer pointer of read device register date + */ +static void pch_gbe_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + struct pch_gbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + u32 *regs_buff = p; + u16 i, tmp; + + regs->version = 0x1000000 | (__u32)pdev->revision << 16 | pdev->device; + for (i = 0; i < PCH_GBE_MAC_REGS_LEN; i++) + *regs_buff++ = ioread32(&hw->reg->INT_ST + i); + /* PHY register */ + for (i = 0; i < PCH_GBE_PHY_REGS_LEN; i++) { + pch_gbe_hal_read_phy_reg(&adapter->hw, i, &tmp); + *regs_buff++ = tmp; + } +} + +/** + * pch_gbe_get_wol - Report whether Wake-on-Lan is enabled + * @netdev: Network interface device structure + * @wol: Wake-on-Lan information + */ +static void pch_gbe_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + if ((adapter->wake_up_evt & PCH_GBE_WLC_IND)) + wol->wolopts |= WAKE_UCAST; + if ((adapter->wake_up_evt & PCH_GBE_WLC_MLT)) + wol->wolopts |= WAKE_MCAST; + if ((adapter->wake_up_evt & PCH_GBE_WLC_BR)) + wol->wolopts |= WAKE_BCAST; + if ((adapter->wake_up_evt & PCH_GBE_WLC_MP)) + wol->wolopts |= WAKE_MAGIC; +} + +/** + * pch_gbe_set_wol - Turn Wake-on-Lan on or off + * @netdev: Network interface device structure + * @wol: Pointer of wake-on-Lan information straucture + * Returns: + * 0: Successful. + * Negative value: Failed. + */ +static int pch_gbe_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + + if ((wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))) + return -EOPNOTSUPP; + /* these settings will always override what we currently have */ + adapter->wake_up_evt = 0; + + if ((wol->wolopts & WAKE_UCAST)) + adapter->wake_up_evt |= PCH_GBE_WLC_IND; + if ((wol->wolopts & WAKE_MCAST)) + adapter->wake_up_evt |= PCH_GBE_WLC_MLT; + if ((wol->wolopts & WAKE_BCAST)) + adapter->wake_up_evt |= PCH_GBE_WLC_BR; + if ((wol->wolopts & WAKE_MAGIC)) + adapter->wake_up_evt |= PCH_GBE_WLC_MP; + return 0; +} + +/** + * pch_gbe_nway_reset - Restart autonegotiation + * @netdev: Network interface device structure + * Returns: + * 0: Successful. + * Negative value: Failed. + */ +static int pch_gbe_nway_reset(struct net_device *netdev) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + + return mii_nway_restart(&adapter->mii); +} + +/** + * pch_gbe_get_ringparam - Report ring sizes + * @netdev: Network interface device structure + * @ring: Ring param structure + */ +static void pch_gbe_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + struct pch_gbe_tx_ring *txdr = adapter->tx_ring; + struct pch_gbe_rx_ring *rxdr = adapter->rx_ring; + + ring->rx_max_pending = PCH_GBE_MAX_RXD; + ring->tx_max_pending = PCH_GBE_MAX_TXD; + ring->rx_pending = rxdr->count; + ring->tx_pending = txdr->count; +} + +/** + * pch_gbe_set_ringparam - Set ring sizes + * @netdev: Network interface device structure + * @ring: Ring param structure + * Returns + * 0: Successful. + * Negative value: Failed. + */ +static int pch_gbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + struct pch_gbe_tx_ring *txdr, *tx_old; + struct pch_gbe_rx_ring *rxdr, *rx_old; + int tx_ring_size, rx_ring_size; + int err = 0; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + tx_ring_size = (int)sizeof(struct pch_gbe_tx_ring); + rx_ring_size = (int)sizeof(struct pch_gbe_rx_ring); + + if ((netif_running(adapter->netdev))) + pch_gbe_down(adapter); + tx_old = adapter->tx_ring; + rx_old = adapter->rx_ring; + + txdr = kzalloc(tx_ring_size, GFP_KERNEL); + if (!txdr) { + err = -ENOMEM; + goto err_alloc_tx; + } + rxdr = kzalloc(rx_ring_size, GFP_KERNEL); + if (!rxdr) { + err = -ENOMEM; + goto err_alloc_rx; + } + adapter->tx_ring = txdr; + adapter->rx_ring = rxdr; + + rxdr->count = + clamp_val(ring->rx_pending, PCH_GBE_MIN_RXD, PCH_GBE_MAX_RXD); + rxdr->count = roundup(rxdr->count, PCH_GBE_RX_DESC_MULTIPLE); + + txdr->count = + clamp_val(ring->tx_pending, PCH_GBE_MIN_RXD, PCH_GBE_MAX_RXD); + txdr->count = roundup(txdr->count, PCH_GBE_TX_DESC_MULTIPLE); + + if ((netif_running(adapter->netdev))) { + /* Try to get new resources before deleting old */ + err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring); + if (err) + goto err_setup_rx; + err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring); + if (err) + goto err_setup_tx; + /* save the new, restore the old in order to free it, + * then restore the new back again */ +#ifdef RINGFREE + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + pch_gbe_free_rx_resources(adapter, adapter->rx_ring); + pch_gbe_free_tx_resources(adapter, adapter->tx_ring); + kfree(tx_old); + kfree(rx_old); + adapter->rx_ring = rxdr; + adapter->tx_ring = txdr; +#else + pch_gbe_free_rx_resources(adapter, rx_old); + pch_gbe_free_tx_resources(adapter, tx_old); + kfree(tx_old); + kfree(rx_old); + adapter->rx_ring = rxdr; + adapter->tx_ring = txdr; +#endif + err = pch_gbe_up(adapter); + } + return err; + +err_setup_tx: + pch_gbe_free_rx_resources(adapter, adapter->rx_ring); +err_setup_rx: + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + kfree(rxdr); +err_alloc_rx: + kfree(txdr); +err_alloc_tx: + if (netif_running(adapter->netdev)) + pch_gbe_up(adapter); + return err; +} + +/** + * pch_gbe_get_pauseparam - Report pause parameters + * @netdev: Network interface device structure + * @pause: Pause parameters structure + */ +static void pch_gbe_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + struct pch_gbe_hw *hw = &adapter->hw; + + pause->autoneg = + ((hw->mac.fc_autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->mac.fc == PCH_GBE_FC_RX_PAUSE) { + pause->rx_pause = 1; + } else if (hw->mac.fc == PCH_GBE_FC_TX_PAUSE) { + pause->tx_pause = 1; + } else if (hw->mac.fc == PCH_GBE_FC_FULL) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +/** + * pch_gbe_set_pauseparam - Set pause parameters + * @netdev: Network interface device structure + * @pause: Pause parameters structure + * Returns: + * 0: Successful. + * Negative value: Failed. + */ +static int pch_gbe_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + struct pch_gbe_hw *hw = &adapter->hw; + int ret = 0; + + hw->mac.fc_autoneg = pause->autoneg; + if ((pause->rx_pause) && (pause->tx_pause)) + hw->mac.fc = PCH_GBE_FC_FULL; + else if ((pause->rx_pause) && (!pause->tx_pause)) + hw->mac.fc = PCH_GBE_FC_RX_PAUSE; + else if ((!pause->rx_pause) && (pause->tx_pause)) + hw->mac.fc = PCH_GBE_FC_TX_PAUSE; + else if ((!pause->rx_pause) && (!pause->tx_pause)) + hw->mac.fc = PCH_GBE_FC_NONE; + + if (hw->mac.fc_autoneg == AUTONEG_ENABLE) { + if ((netif_running(adapter->netdev))) { + pch_gbe_down(adapter); + ret = pch_gbe_up(adapter); + } else { + pch_gbe_reset(adapter); + } + } else { + ret = pch_gbe_mac_force_mac_fc(hw); + } + return ret; +} + +/** + * pch_gbe_get_strings - Return a set of strings that describe the requested + * objects + * @netdev: Network interface device structure + * @stringset: Select the stringset. [ETH_SS_TEST] [ETH_SS_STATS] + * @data: Pointer of read string data. + */ +static void pch_gbe_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case (u32) ETH_SS_STATS: + for (i = 0; i < PCH_GBE_GLOBAL_STATS_LEN; i++) { + memcpy(p, pch_gbe_gstrings_stats[i].string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +/** + * pch_gbe_get_ethtool_stats - Return statistics about the device + * @netdev: Network interface device structure + * @stats: Ethtool statue structure + * @data: Pointer of read status area + */ +static void pch_gbe_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + int i; + const struct pch_gbe_stats *gstats = pch_gbe_gstrings_stats; + char *hw_stats = (char *)&adapter->stats; + + pch_gbe_update_stats(adapter); + for (i = 0; i < PCH_GBE_GLOBAL_STATS_LEN; i++) { + char *p = hw_stats + gstats->offset; + data[i] = gstats->size == sizeof(u64) ? *(u64 *)p:(*(u32 *)p); + gstats++; + } +} + +static int pch_gbe_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return PCH_GBE_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static const struct ethtool_ops pch_gbe_ethtool_ops = { + .get_settings = pch_gbe_get_settings, + .set_settings = pch_gbe_set_settings, + .get_drvinfo = pch_gbe_get_drvinfo, + .get_regs_len = pch_gbe_get_regs_len, + .get_regs = pch_gbe_get_regs, + .get_wol = pch_gbe_get_wol, + .set_wol = pch_gbe_set_wol, + .nway_reset = pch_gbe_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = pch_gbe_get_ringparam, + .set_ringparam = pch_gbe_set_ringparam, + .get_pauseparam = pch_gbe_get_pauseparam, + .set_pauseparam = pch_gbe_set_pauseparam, + .get_strings = pch_gbe_get_strings, + .get_ethtool_stats = pch_gbe_get_ethtool_stats, + .get_sset_count = pch_gbe_get_sset_count, +}; + +void pch_gbe_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &pch_gbe_ethtool_ops; +} diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c new file mode 100644 index 000000000..3b98b263b --- /dev/null +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -0,0 +1,2845 @@ +/* + * Copyright (C) 1999 - 2010 Intel Corporation. + * Copyright (C) 2010 - 2012 LAPIS SEMICONDUCTOR CO., LTD. + * + * This code was derived from the Intel e1000e Linux driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "pch_gbe.h" +#include "pch_gbe_api.h" +#include +#include +#include +#include + +#define DRV_VERSION "1.01" +const char pch_driver_version[] = DRV_VERSION; + +#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802 /* Pci device ID */ +#define PCH_GBE_MAR_ENTRIES 16 +#define PCH_GBE_SHORT_PKT 64 +#define DSC_INIT16 0xC000 +#define PCH_GBE_DMA_ALIGN 0 +#define PCH_GBE_DMA_PADDING 2 +#define PCH_GBE_WATCHDOG_PERIOD (5 * HZ) /* watchdog time */ +#define PCH_GBE_COPYBREAK_DEFAULT 256 +#define PCH_GBE_PCI_BAR 1 +#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */ + +/* Macros for ML7223 */ +#define PCI_VENDOR_ID_ROHM 0x10db +#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 + +/* Macros for ML7831 */ +#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802 + +#define PCH_GBE_TX_WEIGHT 64 +#define PCH_GBE_RX_WEIGHT 64 +#define PCH_GBE_RX_BUFFER_WRITE 16 + +/* Initialize the wake-on-LAN settings */ +#define PCH_GBE_WL_INIT_SETTING (PCH_GBE_WLC_MP) + +#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \ + PCH_GBE_CHIP_TYPE_INTERNAL | \ + PCH_GBE_RGMII_MODE_RGMII \ + ) + +/* Ethertype field values */ +#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880 +#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318 +#define PCH_GBE_FRAME_SIZE_2048 2048 +#define PCH_GBE_FRAME_SIZE_4096 4096 +#define PCH_GBE_FRAME_SIZE_8192 8192 + +#define PCH_GBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define PCH_GBE_RX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc) +#define PCH_GBE_TX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc) +#define PCH_GBE_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +/* Pause packet value */ +#define PCH_GBE_PAUSE_PKT1_VALUE 0x00C28001 +#define PCH_GBE_PAUSE_PKT2_VALUE 0x00000100 +#define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888 +#define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF + + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define PCH_GBE_INT_ENABLE_MASK ( \ + PCH_GBE_INT_RX_DMA_CMPLT | \ + PCH_GBE_INT_RX_DSC_EMP | \ + PCH_GBE_INT_RX_FIFO_ERR | \ + PCH_GBE_INT_WOL_DET | \ + PCH_GBE_INT_TX_CMPLT \ + ) + +#define PCH_GBE_INT_DISABLE_ALL 0 + +/* Macros for ieee1588 */ +/* 0x40 Time Synchronization Channel Control Register Bits */ +#define MASTER_MODE (1<<0) +#define SLAVE_MODE (0) +#define V2_MODE (1<<31) +#define CAP_MODE0 (0) +#define CAP_MODE2 (1<<17) + +/* 0x44 Time Synchronization Channel Event Register Bits */ +#define TX_SNAPSHOT_LOCKED (1<<0) +#define RX_SNAPSHOT_LOCKED (1<<1) + +#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81" +#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00" + +#define MINNOW_PHY_RESET_GPIO 13 + +static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; + +static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); +static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, + int data); +static void pch_gbe_set_multi(struct net_device *netdev); + +static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) +{ + u8 *data = skb->data; + unsigned int offset; + u16 *hi, *id; + u32 lo; + + if (ptp_classify_raw(skb) == PTP_CLASS_NONE) + return 0; + + offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; + + if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid)) + return 0; + + hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID); + id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID); + + memcpy(&lo, &hi[1], sizeof(lo)); + + return (uid_hi == *hi && + uid_lo == lo && + seqid == *id); +} + +static void +pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb) +{ + struct skb_shared_hwtstamps *shhwtstamps; + struct pci_dev *pdev; + u64 ns; + u32 hi, lo, val; + u16 uid, seq; + + if (!adapter->hwts_rx_en) + return; + + /* Get ieee1588's dev information */ + pdev = adapter->ptp_pdev; + + val = pch_ch_event_read(pdev); + + if (!(val & RX_SNAPSHOT_LOCKED)) + return; + + lo = pch_src_uuid_lo_read(pdev); + hi = pch_src_uuid_hi_read(pdev); + + uid = hi & 0xffff; + seq = (hi >> 16) & 0xffff; + + if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq))) + goto out; + + ns = pch_rx_snap_read(pdev); + + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + shhwtstamps->hwtstamp = ns_to_ktime(ns); +out: + pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED); +} + +static void +pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb) +{ + struct skb_shared_hwtstamps shhwtstamps; + struct pci_dev *pdev; + struct skb_shared_info *shtx; + u64 ns; + u32 cnt, val; + + shtx = skb_shinfo(skb); + if (likely(!(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en))) + return; + + shtx->tx_flags |= SKBTX_IN_PROGRESS; + + /* Get ieee1588's dev information */ + pdev = adapter->ptp_pdev; + + /* + * This really stinks, but we have to poll for the Tx time stamp. + */ + for (cnt = 0; cnt < 100; cnt++) { + val = pch_ch_event_read(pdev); + if (val & TX_SNAPSHOT_LOCKED) + break; + udelay(1); + } + if (!(val & TX_SNAPSHOT_LOCKED)) { + shtx->tx_flags &= ~SKBTX_IN_PROGRESS; + return; + } + + ns = pch_tx_snap_read(pdev); + + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &shhwtstamps); + + pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED); +} + +static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct hwtstamp_config cfg; + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev; + u8 station[20]; + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + if (cfg.flags) /* reserved for future extensions */ + return -EINVAL; + + /* Get ieee1588's dev information */ + pdev = adapter->ptp_pdev; + + if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) + return -ERANGE; + + switch (cfg.rx_filter) { + case HWTSTAMP_FILTER_NONE: + adapter->hwts_rx_en = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + adapter->hwts_rx_en = 0; + pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + adapter->hwts_rx_en = 1; + pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0); + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + adapter->hwts_rx_en = 1; + pch_ch_control_write(pdev, V2_MODE | CAP_MODE2); + strcpy(station, PTP_L4_MULTICAST_SA); + pch_set_station_address(station, pdev); + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + adapter->hwts_rx_en = 1; + pch_ch_control_write(pdev, V2_MODE | CAP_MODE2); + strcpy(station, PTP_L2_MULTICAST_SA); + pch_set_station_address(station, pdev); + break; + default: + return -ERANGE; + } + + adapter->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; + + /* Clear out any old time stamps. */ + pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED); + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} + +static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) +{ + iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD); +} + +/** + * pch_gbe_mac_read_mac_addr - Read MAC address + * @hw: Pointer to the HW structure + * Returns: + * 0: Successful. + */ +s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw) +{ + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + u32 adr1a, adr1b; + + adr1a = ioread32(&hw->reg->mac_adr[0].high); + adr1b = ioread32(&hw->reg->mac_adr[0].low); + + hw->mac.addr[0] = (u8)(adr1a & 0xFF); + hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF); + hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF); + hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF); + hw->mac.addr[4] = (u8)(adr1b & 0xFF); + hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF); + + netdev_dbg(adapter->netdev, "hw->mac.addr : %pM\n", hw->mac.addr); + return 0; +} + +/** + * pch_gbe_wait_clr_bit - Wait to clear a bit + * @reg: Pointer of register + * @busy: Busy bit + */ +static void pch_gbe_wait_clr_bit(void *reg, u32 bit) +{ + u32 tmp; + + /* wait busy */ + tmp = 1000; + while ((ioread32(reg) & bit) && --tmp) + cpu_relax(); + if (!tmp) + pr_err("Error: busy bit is not cleared\n"); +} + +/** + * pch_gbe_mac_mar_set - Set MAC address register + * @hw: Pointer to the HW structure + * @addr: Pointer to the MAC address + * @index: MAC address array register + */ +static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index) +{ + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + u32 mar_low, mar_high, adrmask; + + netdev_dbg(adapter->netdev, "index : 0x%x\n", index); + + /* + * HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + mar_low = ((u32) addr[4] | ((u32) addr[5] << 8)); + /* Stop the MAC Address of index. */ + adrmask = ioread32(&hw->reg->ADDR_MASK); + iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK); + /* wait busy */ + pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY); + /* Set the MAC address to the MAC address 1A/1B register */ + iowrite32(mar_high, &hw->reg->mac_adr[index].high); + iowrite32(mar_low, &hw->reg->mac_adr[index].low); + /* Start the MAC address of index */ + iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK); + /* wait busy */ + pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY); +} + +/** + * pch_gbe_mac_reset_hw - Reset hardware + * @hw: Pointer to the HW structure + */ +static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw) +{ + /* Read the MAC address. and store to the private data */ + pch_gbe_mac_read_mac_addr(hw); + iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET); +#ifdef PCH_GBE_MAC_IFOP_RGMII + iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE); +#endif + pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST); + /* Setup the receive addresses */ + pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); + return; +} + +static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw) +{ + u32 rctl; + /* Disables Receive MAC */ + rctl = ioread32(&hw->reg->MAC_RX_EN); + iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN); +} + +static void pch_gbe_enable_mac_rx(struct pch_gbe_hw *hw) +{ + u32 rctl; + /* Enables Receive MAC */ + rctl = ioread32(&hw->reg->MAC_RX_EN); + iowrite32((rctl | PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN); +} + +/** + * pch_gbe_mac_init_rx_addrs - Initialize receive address's + * @hw: Pointer to the HW structure + * @mar_count: Receive address registers + */ +static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count) +{ + u32 i; + + /* Setup the receive address */ + pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); + + /* Zero out the other receive addresses */ + for (i = 1; i < mar_count; i++) { + iowrite32(0, &hw->reg->mac_adr[i].high); + iowrite32(0, &hw->reg->mac_adr[i].low); + } + iowrite32(0xFFFE, &hw->reg->ADDR_MASK); + /* wait busy */ + pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY); +} + + +/** + * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses + * @hw: Pointer to the HW structure + * @mc_addr_list: Array of multicast addresses to program + * @mc_addr_count: Number of multicast addresses to program + * @mar_used_count: The first MAC Address register free to program + * @mar_total_num: Total number of supported MAC Address Registers + */ +static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count, + u32 mar_used_count, u32 mar_total_num) +{ + u32 i, adrmask; + + /* Load the first set of multicast addresses into the exact + * filters (RAR). If there are not enough to fill the RAR + * array, clear the filters. + */ + for (i = mar_used_count; i < mar_total_num; i++) { + if (mc_addr_count) { + pch_gbe_mac_mar_set(hw, mc_addr_list, i); + mc_addr_count--; + mc_addr_list += ETH_ALEN; + } else { + /* Clear MAC address mask */ + adrmask = ioread32(&hw->reg->ADDR_MASK); + iowrite32((adrmask | (0x0001 << i)), + &hw->reg->ADDR_MASK); + /* wait busy */ + pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY); + /* Clear MAC address */ + iowrite32(0, &hw->reg->mac_adr[i].high); + iowrite32(0, &hw->reg->mac_adr[i].low); + } + } +} + +/** + * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings + * @hw: Pointer to the HW structure + * Returns: + * 0: Successful. + * Negative value: Failed. + */ +s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw) +{ + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + struct pch_gbe_mac_info *mac = &hw->mac; + u32 rx_fctrl; + + netdev_dbg(adapter->netdev, "mac->fc = %u\n", mac->fc); + + rx_fctrl = ioread32(&hw->reg->RX_FCTRL); + + switch (mac->fc) { + case PCH_GBE_FC_NONE: + rx_fctrl &= ~PCH_GBE_FL_CTRL_EN; + mac->tx_fc_enable = false; + break; + case PCH_GBE_FC_RX_PAUSE: + rx_fctrl |= PCH_GBE_FL_CTRL_EN; + mac->tx_fc_enable = false; + break; + case PCH_GBE_FC_TX_PAUSE: + rx_fctrl &= ~PCH_GBE_FL_CTRL_EN; + mac->tx_fc_enable = true; + break; + case PCH_GBE_FC_FULL: + rx_fctrl |= PCH_GBE_FL_CTRL_EN; + mac->tx_fc_enable = true; + break; + default: + netdev_err(adapter->netdev, + "Flow control param set incorrectly\n"); + return -EINVAL; + } + if (mac->link_duplex == DUPLEX_HALF) + rx_fctrl &= ~PCH_GBE_FL_CTRL_EN; + iowrite32(rx_fctrl, &hw->reg->RX_FCTRL); + netdev_dbg(adapter->netdev, + "RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n", + ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable); + return 0; +} + +/** + * pch_gbe_mac_set_wol_event - Set wake-on-lan event + * @hw: Pointer to the HW structure + * @wu_evt: Wake up event + */ +static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt) +{ + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + u32 addr_mask; + + netdev_dbg(adapter->netdev, "wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n", + wu_evt, ioread32(&hw->reg->ADDR_MASK)); + + if (wu_evt) { + /* Set Wake-On-Lan address mask */ + addr_mask = ioread32(&hw->reg->ADDR_MASK); + iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK); + /* wait busy */ + pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY); + iowrite32(0, &hw->reg->WOL_ST); + iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL); + iowrite32(0x02, &hw->reg->TCPIP_ACC); + iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN); + } else { + iowrite32(0, &hw->reg->WOL_CTRL); + iowrite32(0, &hw->reg->WOL_ST); + } + return; +} + +/** + * pch_gbe_mac_ctrl_miim - Control MIIM interface + * @hw: Pointer to the HW structure + * @addr: Address of PHY + * @dir: Operetion. (Write or Read) + * @reg: Access register of PHY + * @data: Write data. + * + * Returns: Read date. + */ +u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg, + u16 data) +{ + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + u32 data_out = 0; + unsigned int i; + unsigned long flags; + + spin_lock_irqsave(&hw->miim_lock, flags); + + for (i = 100; i; --i) { + if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY)) + break; + udelay(20); + } + if (i == 0) { + netdev_err(adapter->netdev, "pch-gbe.miim won't go Ready\n"); + spin_unlock_irqrestore(&hw->miim_lock, flags); + return 0; /* No way to indicate timeout error */ + } + iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) | + (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) | + dir | data), &hw->reg->MIIM); + for (i = 0; i < 100; i++) { + udelay(20); + data_out = ioread32(&hw->reg->MIIM); + if ((data_out & PCH_GBE_MIIM_OPER_READY)) + break; + } + spin_unlock_irqrestore(&hw->miim_lock, flags); + + netdev_dbg(adapter->netdev, "PHY %s: reg=%d, data=0x%04X\n", + dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg, + dir == PCH_GBE_MIIM_OPER_READ ? data_out : data); + return (u16) data_out; +} + +/** + * pch_gbe_mac_set_pause_packet - Set pause packet + * @hw: Pointer to the HW structure + */ +static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw) +{ + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + unsigned long tmp2, tmp3; + + /* Set Pause packet */ + tmp2 = hw->mac.addr[1]; + tmp2 = (tmp2 << 8) | hw->mac.addr[0]; + tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16); + + tmp3 = hw->mac.addr[5]; + tmp3 = (tmp3 << 8) | hw->mac.addr[4]; + tmp3 = (tmp3 << 8) | hw->mac.addr[3]; + tmp3 = (tmp3 << 8) | hw->mac.addr[2]; + + iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1); + iowrite32(tmp2, &hw->reg->PAUSE_PKT2); + iowrite32(tmp3, &hw->reg->PAUSE_PKT3); + iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4); + iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5); + + /* Transmit Pause Packet */ + iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ); + + netdev_dbg(adapter->netdev, + "PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + ioread32(&hw->reg->PAUSE_PKT1), + ioread32(&hw->reg->PAUSE_PKT2), + ioread32(&hw->reg->PAUSE_PKT3), + ioread32(&hw->reg->PAUSE_PKT4), + ioread32(&hw->reg->PAUSE_PKT5)); + + return; +} + + +/** + * pch_gbe_alloc_queues - Allocate memory for all rings + * @adapter: Board private structure to initialize + * Returns: + * 0: Successfully + * Negative value: Failed + */ +static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter) +{ + adapter->tx_ring = devm_kzalloc(&adapter->pdev->dev, + sizeof(*adapter->tx_ring), GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + + adapter->rx_ring = devm_kzalloc(&adapter->pdev->dev, + sizeof(*adapter->rx_ring), GFP_KERNEL); + if (!adapter->rx_ring) + return -ENOMEM; + return 0; +} + +/** + * pch_gbe_init_stats - Initialize status + * @adapter: Board private structure to initialize + */ +static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter) +{ + memset(&adapter->stats, 0, sizeof(adapter->stats)); + return; +} + +/** + * pch_gbe_init_phy - Initialize PHY + * @adapter: Board private structure to initialize + * Returns: + * 0: Successfully + * Negative value: Failed + */ +static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 addr; + u16 bmcr, stat; + + /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */ + for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) { + adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; + bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR); + stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR); + stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR); + if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0)))) + break; + } + adapter->hw.phy.addr = adapter->mii.phy_id; + netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id); + if (addr == PCH_GBE_PHY_REGS_LEN) + return -EAGAIN; + /* Selected the phy and isolate the rest */ + for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) { + if (addr != adapter->mii.phy_id) { + pch_gbe_mdio_write(netdev, addr, MII_BMCR, + BMCR_ISOLATE); + } else { + bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR); + pch_gbe_mdio_write(netdev, addr, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + } + } + + /* MII setup */ + adapter->mii.phy_id_mask = 0x1F; + adapter->mii.reg_num_mask = 0x1F; + adapter->mii.dev = adapter->netdev; + adapter->mii.mdio_read = pch_gbe_mdio_read; + adapter->mii.mdio_write = pch_gbe_mdio_write; + adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii); + return 0; +} + +/** + * pch_gbe_mdio_read - The read function for mii + * @netdev: Network interface device structure + * @addr: Phy ID + * @reg: Access location + * Returns: + * 0: Successfully + * Negative value: Failed + */ +static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + struct pch_gbe_hw *hw = &adapter->hw; + + return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg, + (u16) 0); +} + +/** + * pch_gbe_mdio_write - The write function for mii + * @netdev: Network interface device structure + * @addr: Phy ID (not used) + * @reg: Access location + * @data: Write data + */ +static void pch_gbe_mdio_write(struct net_device *netdev, + int addr, int reg, int data) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + struct pch_gbe_hw *hw = &adapter->hw; + + pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data); +} + +/** + * pch_gbe_reset_task - Reset processing at the time of transmission timeout + * @work: Pointer of board private structure + */ +static void pch_gbe_reset_task(struct work_struct *work) +{ + struct pch_gbe_adapter *adapter; + adapter = container_of(work, struct pch_gbe_adapter, reset_task); + + rtnl_lock(); + pch_gbe_reinit_locked(adapter); + rtnl_unlock(); +} + +/** + * pch_gbe_reinit_locked- Re-initialization + * @adapter: Board private structure + */ +void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter) +{ + pch_gbe_down(adapter); + pch_gbe_up(adapter); +} + +/** + * pch_gbe_reset - Reset GbE + * @adapter: Board private structure + */ +void pch_gbe_reset(struct pch_gbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + pch_gbe_mac_reset_hw(&adapter->hw); + /* reprogram multicast address register after reset */ + pch_gbe_set_multi(netdev); + /* Setup the receive address. */ + pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES); + if (pch_gbe_hal_init_hw(&adapter->hw)) + netdev_err(netdev, "Hardware Error\n"); +} + +/** + * pch_gbe_free_irq - Free an interrupt + * @adapter: Board private structure + */ +static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); + if (adapter->have_msi) { + pci_disable_msi(adapter->pdev); + netdev_dbg(netdev, "call pci_disable_msi\n"); + } +} + +/** + * pch_gbe_irq_disable - Mask off interrupt generation on the NIC + * @adapter: Board private structure + */ +static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter) +{ + struct pch_gbe_hw *hw = &adapter->hw; + + atomic_inc(&adapter->irq_sem); + iowrite32(0, &hw->reg->INT_EN); + ioread32(&hw->reg->INT_ST); + synchronize_irq(adapter->pdev->irq); + + netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n", + ioread32(&hw->reg->INT_EN)); +} + +/** + * pch_gbe_irq_enable - Enable default interrupt generation settings + * @adapter: Board private structure + */ +static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter) +{ + struct pch_gbe_hw *hw = &adapter->hw; + + if (likely(atomic_dec_and_test(&adapter->irq_sem))) + iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN); + ioread32(&hw->reg->INT_ST); + netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n", + ioread32(&hw->reg->INT_EN)); +} + + + +/** + * pch_gbe_setup_tctl - configure the Transmit control registers + * @adapter: Board private structure + */ +static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter) +{ + struct pch_gbe_hw *hw = &adapter->hw; + u32 tx_mode, tcpip; + + tx_mode = PCH_GBE_TM_LONG_PKT | + PCH_GBE_TM_ST_AND_FD | + PCH_GBE_TM_SHORT_PKT | + PCH_GBE_TM_TH_TX_STRT_8 | + PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8; + + iowrite32(tx_mode, &hw->reg->TX_MODE); + + tcpip = ioread32(&hw->reg->TCPIP_ACC); + tcpip |= PCH_GBE_TX_TCPIPACC_EN; + iowrite32(tcpip, &hw->reg->TCPIP_ACC); + return; +} + +/** + * pch_gbe_configure_tx - Configure Transmit Unit after Reset + * @adapter: Board private structure + */ +static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter) +{ + struct pch_gbe_hw *hw = &adapter->hw; + u32 tdba, tdlen, dctrl; + + netdev_dbg(adapter->netdev, "dma addr = 0x%08llx size = 0x%08x\n", + (unsigned long long)adapter->tx_ring->dma, + adapter->tx_ring->size); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + tdba = adapter->tx_ring->dma; + tdlen = adapter->tx_ring->size - 0x10; + iowrite32(tdba, &hw->reg->TX_DSC_BASE); + iowrite32(tdlen, &hw->reg->TX_DSC_SIZE); + iowrite32(tdba, &hw->reg->TX_DSC_SW_P); + + /* Enables Transmission DMA */ + dctrl = ioread32(&hw->reg->DMA_CTRL); + dctrl |= PCH_GBE_TX_DMA_EN; + iowrite32(dctrl, &hw->reg->DMA_CTRL); +} + +/** + * pch_gbe_setup_rctl - Configure the receive control registers + * @adapter: Board private structure + */ +static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter) +{ + struct pch_gbe_hw *hw = &adapter->hw; + u32 rx_mode, tcpip; + + rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN | + PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8; + + iowrite32(rx_mode, &hw->reg->RX_MODE); + + tcpip = ioread32(&hw->reg->TCPIP_ACC); + + tcpip |= PCH_GBE_RX_TCPIPACC_OFF; + tcpip &= ~PCH_GBE_RX_TCPIPACC_EN; + iowrite32(tcpip, &hw->reg->TCPIP_ACC); + return; +} + +/** + * pch_gbe_configure_rx - Configure Receive Unit after Reset + * @adapter: Board private structure + */ +static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter) +{ + struct pch_gbe_hw *hw = &adapter->hw; + u32 rdba, rdlen, rxdma; + + netdev_dbg(adapter->netdev, "dma adr = 0x%08llx size = 0x%08x\n", + (unsigned long long)adapter->rx_ring->dma, + adapter->rx_ring->size); + + pch_gbe_mac_force_mac_fc(hw); + + pch_gbe_disable_mac_rx(hw); + + /* Disables Receive DMA */ + rxdma = ioread32(&hw->reg->DMA_CTRL); + rxdma &= ~PCH_GBE_RX_DMA_EN; + iowrite32(rxdma, &hw->reg->DMA_CTRL); + + netdev_dbg(adapter->netdev, + "MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n", + ioread32(&hw->reg->MAC_RX_EN), + ioread32(&hw->reg->DMA_CTRL)); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring */ + rdba = adapter->rx_ring->dma; + rdlen = adapter->rx_ring->size - 0x10; + iowrite32(rdba, &hw->reg->RX_DSC_BASE); + iowrite32(rdlen, &hw->reg->RX_DSC_SIZE); + iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P); +} + +/** + * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer + * @adapter: Board private structure + * @buffer_info: Buffer information structure + */ +static void pch_gbe_unmap_and_free_tx_resource( + struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info) +{ + if (buffer_info->mapped) { + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + buffer_info->mapped = false; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } +} + +/** + * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer + * @adapter: Board private structure + * @buffer_info: Buffer information structure + */ +static void pch_gbe_unmap_and_free_rx_resource( + struct pch_gbe_adapter *adapter, + struct pch_gbe_buffer *buffer_info) +{ + if (buffer_info->mapped) { + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_FROM_DEVICE); + buffer_info->mapped = false; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } +} + +/** + * pch_gbe_clean_tx_ring - Free Tx Buffers + * @adapter: Board private structure + * @tx_ring: Ring to be cleaned + */ +static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter, + struct pch_gbe_tx_ring *tx_ring) +{ + struct pch_gbe_hw *hw = &adapter->hw; + struct pch_gbe_buffer *buffer_info; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info); + } + netdev_dbg(adapter->netdev, + "call pch_gbe_unmap_and_free_tx_resource() %d count\n", i); + + size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P); + iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE); +} + +/** + * pch_gbe_clean_rx_ring - Free Rx Buffers + * @adapter: Board private structure + * @rx_ring: Ring to free buffers from + */ +static void +pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter, + struct pch_gbe_rx_ring *rx_ring) +{ + struct pch_gbe_hw *hw = &adapter->hw; + struct pch_gbe_buffer *buffer_info; + unsigned long size; + unsigned int i; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info); + } + netdev_dbg(adapter->netdev, + "call pch_gbe_unmap_and_free_rx_resource() %d count\n", i); + size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P); + iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE); +} + +static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed, + u16 duplex) +{ + struct pch_gbe_hw *hw = &adapter->hw; + unsigned long rgmii = 0; + + /* Set the RGMII control. */ +#ifdef PCH_GBE_MAC_IFOP_RGMII + switch (speed) { + case SPEED_10: + rgmii = (PCH_GBE_RGMII_RATE_2_5M | + PCH_GBE_MAC_RGMII_CTRL_SETTING); + break; + case SPEED_100: + rgmii = (PCH_GBE_RGMII_RATE_25M | + PCH_GBE_MAC_RGMII_CTRL_SETTING); + break; + case SPEED_1000: + rgmii = (PCH_GBE_RGMII_RATE_125M | + PCH_GBE_MAC_RGMII_CTRL_SETTING); + break; + } + iowrite32(rgmii, &hw->reg->RGMII_CTRL); +#else /* GMII */ + rgmii = 0; + iowrite32(rgmii, &hw->reg->RGMII_CTRL); +#endif +} +static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed, + u16 duplex) +{ + struct net_device *netdev = adapter->netdev; + struct pch_gbe_hw *hw = &adapter->hw; + unsigned long mode = 0; + + /* Set the communication mode */ + switch (speed) { + case SPEED_10: + mode = PCH_GBE_MODE_MII_ETHER; + netdev->tx_queue_len = 10; + break; + case SPEED_100: + mode = PCH_GBE_MODE_MII_ETHER; + netdev->tx_queue_len = 100; + break; + case SPEED_1000: + mode = PCH_GBE_MODE_GMII_ETHER; + break; + } + if (duplex == DUPLEX_FULL) + mode |= PCH_GBE_MODE_FULL_DUPLEX; + else + mode |= PCH_GBE_MODE_HALF_DUPLEX; + iowrite32(mode, &hw->reg->MODE); +} + +/** + * pch_gbe_watchdog - Watchdog process + * @data: Board private structure + */ +static void pch_gbe_watchdog(unsigned long data) +{ + struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data; + struct net_device *netdev = adapter->netdev; + struct pch_gbe_hw *hw = &adapter->hw; + + netdev_dbg(netdev, "right now = %ld\n", jiffies); + + pch_gbe_update_stats(adapter); + if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) { + struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; + netdev->tx_queue_len = adapter->tx_queue_len; + /* mii library handles link maintenance tasks */ + if (mii_ethtool_gset(&adapter->mii, &cmd)) { + netdev_err(netdev, "ethtool get setting Error\n"); + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + + PCH_GBE_WATCHDOG_PERIOD)); + return; + } + hw->mac.link_speed = ethtool_cmd_speed(&cmd); + hw->mac.link_duplex = cmd.duplex; + /* Set the RGMII control. */ + pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed, + hw->mac.link_duplex); + /* Set the communication mode */ + pch_gbe_set_mode(adapter, hw->mac.link_speed, + hw->mac.link_duplex); + netdev_dbg(netdev, + "Link is Up %d Mbps %s-Duplex\n", + hw->mac.link_speed, + cmd.duplex == DUPLEX_FULL ? "Full" : "Half"); + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } else if ((!mii_link_ok(&adapter->mii)) && + (netif_carrier_ok(netdev))) { + netdev_dbg(netdev, "NIC Link is Down\n"); + hw->mac.link_speed = SPEED_10; + hw->mac.link_duplex = DUPLEX_HALF; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD)); +} + +/** + * pch_gbe_tx_queue - Carry out queuing of the transmission data + * @adapter: Board private structure + * @tx_ring: Tx descriptor ring structure + * @skb: Sockt buffer structure + */ +static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter, + struct pch_gbe_tx_ring *tx_ring, + struct sk_buff *skb) +{ + struct pch_gbe_hw *hw = &adapter->hw; + struct pch_gbe_tx_desc *tx_desc; + struct pch_gbe_buffer *buffer_info; + struct sk_buff *tmp_skb; + unsigned int frame_ctrl; + unsigned int ring_num; + + /*-- Set frame control --*/ + frame_ctrl = 0; + if (unlikely(skb->len < PCH_GBE_SHORT_PKT)) + frame_ctrl |= PCH_GBE_TXD_CTRL_APAD; + if (skb->ip_summed == CHECKSUM_NONE) + frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF; + + /* Performs checksum processing */ + /* + * It is because the hardware accelerator does not support a checksum, + * when the received data size is less than 64 bytes. + */ + if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) { + frame_ctrl |= PCH_GBE_TXD_CTRL_APAD | + PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF; + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + unsigned int offset; + offset = skb_transport_offset(skb); + if (iph->protocol == IPPROTO_TCP) { + skb->csum = 0; + tcp_hdr(skb)->check = 0; + skb->csum = skb_checksum(skb, offset, + skb->len - offset, 0); + tcp_hdr(skb)->check = + csum_tcpudp_magic(iph->saddr, + iph->daddr, + skb->len - offset, + IPPROTO_TCP, + skb->csum); + } else if (iph->protocol == IPPROTO_UDP) { + skb->csum = 0; + udp_hdr(skb)->check = 0; + skb->csum = + skb_checksum(skb, offset, + skb->len - offset, 0); + udp_hdr(skb)->check = + csum_tcpudp_magic(iph->saddr, + iph->daddr, + skb->len - offset, + IPPROTO_UDP, + skb->csum); + } + } + } + + ring_num = tx_ring->next_to_use; + if (unlikely((ring_num + 1) == tx_ring->count)) + tx_ring->next_to_use = 0; + else + tx_ring->next_to_use = ring_num + 1; + + + buffer_info = &tx_ring->buffer_info[ring_num]; + tmp_skb = buffer_info->skb; + + /* [Header:14][payload] ---> [Header:14][paddong:2][payload] */ + memcpy(tmp_skb->data, skb->data, ETH_HLEN); + tmp_skb->data[ETH_HLEN] = 0x00; + tmp_skb->data[ETH_HLEN + 1] = 0x00; + tmp_skb->len = skb->len; + memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN], + (skb->len - ETH_HLEN)); + /*-- Set Buffer information --*/ + buffer_info->length = tmp_skb->len; + buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data, + buffer_info->length, + DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { + netdev_err(adapter->netdev, "TX DMA map failed\n"); + buffer_info->dma = 0; + buffer_info->time_stamp = 0; + tx_ring->next_to_use = ring_num; + return; + } + buffer_info->mapped = true; + buffer_info->time_stamp = jiffies; + + /*-- Set Tx descriptor --*/ + tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num); + tx_desc->buffer_addr = (buffer_info->dma); + tx_desc->length = (tmp_skb->len); + tx_desc->tx_words_eob = ((tmp_skb->len + 3)); + tx_desc->tx_frame_ctrl = (frame_ctrl); + tx_desc->gbec_status = (DSC_INIT16); + + if (unlikely(++ring_num == tx_ring->count)) + ring_num = 0; + + /* Update software pointer of TX descriptor */ + iowrite32(tx_ring->dma + + (int)sizeof(struct pch_gbe_tx_desc) * ring_num, + &hw->reg->TX_DSC_SW_P); + + pch_tx_timestamp(adapter, skb); + + dev_kfree_skb_any(skb); +} + +/** + * pch_gbe_update_stats - Update the board statistics counters + * @adapter: Board private structure + */ +void pch_gbe_update_stats(struct pch_gbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct pch_gbe_hw_stats *stats = &adapter->stats; + unsigned long flags; + + /* + * Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal)) + return; + + spin_lock_irqsave(&adapter->stats_lock, flags); + + /* Update device status "adapter->stats" */ + stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors; + stats->tx_errors = stats->tx_length_errors + + stats->tx_aborted_errors + + stats->tx_carrier_errors + stats->tx_timeout_count; + + /* Update network device status "adapter->net_stats" */ + netdev->stats.rx_packets = stats->rx_packets; + netdev->stats.rx_bytes = stats->rx_bytes; + netdev->stats.rx_dropped = stats->rx_dropped; + netdev->stats.tx_packets = stats->tx_packets; + netdev->stats.tx_bytes = stats->tx_bytes; + netdev->stats.tx_dropped = stats->tx_dropped; + /* Fill out the OS statistics structure */ + netdev->stats.multicast = stats->multicast; + netdev->stats.collisions = stats->collisions; + /* Rx Errors */ + netdev->stats.rx_errors = stats->rx_errors; + netdev->stats.rx_crc_errors = stats->rx_crc_errors; + netdev->stats.rx_frame_errors = stats->rx_frame_errors; + /* Tx Errors */ + netdev->stats.tx_errors = stats->tx_errors; + netdev->stats.tx_aborted_errors = stats->tx_aborted_errors; + netdev->stats.tx_carrier_errors = stats->tx_carrier_errors; + + spin_unlock_irqrestore(&adapter->stats_lock, flags); +} + +static void pch_gbe_disable_dma_rx(struct pch_gbe_hw *hw) +{ + u32 rxdma; + + /* Disable Receive DMA */ + rxdma = ioread32(&hw->reg->DMA_CTRL); + rxdma &= ~PCH_GBE_RX_DMA_EN; + iowrite32(rxdma, &hw->reg->DMA_CTRL); +} + +static void pch_gbe_enable_dma_rx(struct pch_gbe_hw *hw) +{ + u32 rxdma; + + /* Enables Receive DMA */ + rxdma = ioread32(&hw->reg->DMA_CTRL); + rxdma |= PCH_GBE_RX_DMA_EN; + iowrite32(rxdma, &hw->reg->DMA_CTRL); +} + +/** + * pch_gbe_intr - Interrupt Handler + * @irq: Interrupt number + * @data: Pointer to a network interface device structure + * Returns: + * - IRQ_HANDLED: Our interrupt + * - IRQ_NONE: Not our interrupt + */ +static irqreturn_t pch_gbe_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + struct pch_gbe_hw *hw = &adapter->hw; + u32 int_st; + u32 int_en; + + /* Check request status */ + int_st = ioread32(&hw->reg->INT_ST); + int_st = int_st & ioread32(&hw->reg->INT_EN); + /* When request status is no interruption factor */ + if (unlikely(!int_st)) + return IRQ_NONE; /* Not our interrupt. End processing. */ + netdev_dbg(netdev, "%s occur int_st = 0x%08x\n", __func__, int_st); + if (int_st & PCH_GBE_INT_RX_FRAME_ERR) + adapter->stats.intr_rx_frame_err_count++; + if (int_st & PCH_GBE_INT_RX_FIFO_ERR) + if (!adapter->rx_stop_flag) { + adapter->stats.intr_rx_fifo_err_count++; + netdev_dbg(netdev, "Rx fifo over run\n"); + adapter->rx_stop_flag = true; + int_en = ioread32(&hw->reg->INT_EN); + iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), + &hw->reg->INT_EN); + pch_gbe_disable_dma_rx(&adapter->hw); + int_st |= ioread32(&hw->reg->INT_ST); + int_st = int_st & ioread32(&hw->reg->INT_EN); + } + if (int_st & PCH_GBE_INT_RX_DMA_ERR) + adapter->stats.intr_rx_dma_err_count++; + if (int_st & PCH_GBE_INT_TX_FIFO_ERR) + adapter->stats.intr_tx_fifo_err_count++; + if (int_st & PCH_GBE_INT_TX_DMA_ERR) + adapter->stats.intr_tx_dma_err_count++; + if (int_st & PCH_GBE_INT_TCPIP_ERR) + adapter->stats.intr_tcpip_err_count++; + /* When Rx descriptor is empty */ + if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) { + adapter->stats.intr_rx_dsc_empty_count++; + netdev_dbg(netdev, "Rx descriptor is empty\n"); + int_en = ioread32(&hw->reg->INT_EN); + iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN); + if (hw->mac.tx_fc_enable) { + /* Set Pause packet */ + pch_gbe_mac_set_pause_packet(hw); + } + } + + /* When request status is Receive interruption */ + if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) || + (adapter->rx_stop_flag)) { + if (likely(napi_schedule_prep(&adapter->napi))) { + /* Enable only Rx Descriptor empty */ + atomic_inc(&adapter->irq_sem); + int_en = ioread32(&hw->reg->INT_EN); + int_en &= + ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT); + iowrite32(int_en, &hw->reg->INT_EN); + /* Start polling for NAPI */ + __napi_schedule(&adapter->napi); + } + } + netdev_dbg(netdev, "return = 0x%08x INT_EN reg = 0x%08x\n", + IRQ_HANDLED, ioread32(&hw->reg->INT_EN)); + return IRQ_HANDLED; +} + +/** + * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended + * @adapter: Board private structure + * @rx_ring: Rx descriptor ring + * @cleaned_count: Cleaned count + */ +static void +pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter, + struct pch_gbe_rx_ring *rx_ring, int cleaned_count) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct pch_gbe_hw *hw = &adapter->hw; + struct pch_gbe_rx_desc *rx_desc; + struct pch_gbe_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz; + + bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; + i = rx_ring->next_to_use; + + while ((cleaned_count--)) { + buffer_info = &rx_ring->buffer_info[i]; + skb = netdev_alloc_skb(netdev, bufsz); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->stats.rx_alloc_buff_failed++; + break; + } + /* align */ + skb_reserve(skb, NET_IP_ALIGN); + buffer_info->skb = skb; + + buffer_info->dma = dma_map_single(&pdev->dev, + buffer_info->rx_buffer, + buffer_info->length, + DMA_FROM_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { + dev_kfree_skb(skb); + buffer_info->skb = NULL; + buffer_info->dma = 0; + adapter->stats.rx_alloc_buff_failed++; + break; /* while !buffer_info->skb */ + } + buffer_info->mapped = true; + rx_desc = PCH_GBE_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = (buffer_info->dma); + rx_desc->gbec_status = DSC_INIT16; + + netdev_dbg(netdev, + "i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n", + i, (unsigned long long)buffer_info->dma, + buffer_info->length); + + if (unlikely(++i == rx_ring->count)) + i = 0; + } + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + iowrite32(rx_ring->dma + + (int)sizeof(struct pch_gbe_rx_desc) * i, + &hw->reg->RX_DSC_SW_P); + } + return; +} + +static int +pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter, + struct pch_gbe_rx_ring *rx_ring, int cleaned_count) +{ + struct pci_dev *pdev = adapter->pdev; + struct pch_gbe_buffer *buffer_info; + unsigned int i; + unsigned int bufsz; + unsigned int size; + + bufsz = adapter->rx_buffer_len; + + size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; + rx_ring->rx_buff_pool = + dma_zalloc_coherent(&pdev->dev, size, + &rx_ring->rx_buff_pool_logic, GFP_KERNEL); + if (!rx_ring->rx_buff_pool) + return -ENOMEM; + + rx_ring->rx_buff_pool_size = size; + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i; + buffer_info->length = bufsz; + } + return 0; +} + +/** + * pch_gbe_alloc_tx_buffers - Allocate transmit buffers + * @adapter: Board private structure + * @tx_ring: Tx descriptor ring + */ +static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter, + struct pch_gbe_tx_ring *tx_ring) +{ + struct pch_gbe_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz; + struct pch_gbe_tx_desc *tx_desc; + + bufsz = + adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN; + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + skb = netdev_alloc_skb(adapter->netdev, bufsz); + skb_reserve(skb, PCH_GBE_DMA_ALIGN); + buffer_info->skb = skb; + tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); + tx_desc->gbec_status = (DSC_INIT16); + } + return; +} + +/** + * pch_gbe_clean_tx - Reclaim resources after transmit completes + * @adapter: Board private structure + * @tx_ring: Tx descriptor ring + * Returns: + * true: Cleaned the descriptor + * false: Not cleaned the descriptor + */ +static bool +pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, + struct pch_gbe_tx_ring *tx_ring) +{ + struct pch_gbe_tx_desc *tx_desc; + struct pch_gbe_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int cleaned_count = 0; + bool cleaned = false; + int unused, thresh; + + netdev_dbg(adapter->netdev, "next_to_clean : %d\n", + tx_ring->next_to_clean); + + i = tx_ring->next_to_clean; + tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); + netdev_dbg(adapter->netdev, "gbec_status:0x%04x dma_status:0x%04x\n", + tx_desc->gbec_status, tx_desc->dma_status); + + unused = PCH_GBE_DESC_UNUSED(tx_ring); + thresh = tx_ring->count - PCH_GBE_TX_WEIGHT; + if ((tx_desc->gbec_status == DSC_INIT16) && (unused < thresh)) + { /* current marked clean, tx queue filling up, do extra clean */ + int j, k; + if (unused < 8) { /* tx queue nearly full */ + netdev_dbg(adapter->netdev, + "clean_tx: transmit queue warning (%x,%x) unused=%d\n", + tx_ring->next_to_clean, tx_ring->next_to_use, + unused); + } + + /* current marked clean, scan for more that need cleaning. */ + k = i; + for (j = 0; j < PCH_GBE_TX_WEIGHT; j++) + { + tx_desc = PCH_GBE_TX_DESC(*tx_ring, k); + if (tx_desc->gbec_status != DSC_INIT16) break; /*found*/ + if (++k >= tx_ring->count) k = 0; /*increment, wrap*/ + } + if (j < PCH_GBE_TX_WEIGHT) { + netdev_dbg(adapter->netdev, + "clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n", + unused, j, i, k, tx_ring->next_to_use, + tx_desc->gbec_status); + i = k; /*found one to clean, usu gbec_status==2000.*/ + } + } + + while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { + netdev_dbg(adapter->netdev, "gbec_status:0x%04x\n", + tx_desc->gbec_status); + buffer_info = &tx_ring->buffer_info[i]; + skb = buffer_info->skb; + cleaned = true; + + if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) { + adapter->stats.tx_aborted_errors++; + netdev_err(adapter->netdev, "Transfer Abort Error\n"); + } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER) + ) { + adapter->stats.tx_carrier_errors++; + netdev_err(adapter->netdev, + "Transfer Carrier Sense Error\n"); + } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL) + ) { + adapter->stats.tx_aborted_errors++; + netdev_err(adapter->netdev, + "Transfer Collision Abort Error\n"); + } else if ((tx_desc->gbec_status & + (PCH_GBE_TXD_GMAC_STAT_SNGCOL | + PCH_GBE_TXD_GMAC_STAT_MLTCOL))) { + adapter->stats.collisions++; + adapter->stats.tx_packets++; + adapter->stats.tx_bytes += skb->len; + netdev_dbg(adapter->netdev, "Transfer Collision\n"); + } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT) + ) { + adapter->stats.tx_packets++; + adapter->stats.tx_bytes += skb->len; + } + if (buffer_info->mapped) { + netdev_dbg(adapter->netdev, + "unmap buffer_info->dma : %d\n", i); + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + buffer_info->mapped = false; + } + if (buffer_info->skb) { + netdev_dbg(adapter->netdev, + "trim buffer_info->skb : %d\n", i); + skb_trim(buffer_info->skb, 0); + } + tx_desc->gbec_status = DSC_INIT16; + if (unlikely(++i == tx_ring->count)) + i = 0; + tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); + + /* weight of a sort for tx, to avoid endless transmit cleanup */ + if (cleaned_count++ == PCH_GBE_TX_WEIGHT) { + cleaned = false; + break; + } + } + netdev_dbg(adapter->netdev, + "called pch_gbe_unmap_and_free_tx_resource() %d count\n", + cleaned_count); + if (cleaned_count > 0) { /*skip this if nothing cleaned*/ + /* Recover from running out of Tx resources in xmit_frame */ + spin_lock(&tx_ring->tx_lock); + if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) + { + netif_wake_queue(adapter->netdev); + adapter->stats.tx_restart_count++; + netdev_dbg(adapter->netdev, "Tx wake queue\n"); + } + + tx_ring->next_to_clean = i; + + netdev_dbg(adapter->netdev, "next_to_clean : %d\n", + tx_ring->next_to_clean); + spin_unlock(&tx_ring->tx_lock); + } + return cleaned; +} + +/** + * pch_gbe_clean_rx - Send received data up the network stack; legacy + * @adapter: Board private structure + * @rx_ring: Rx descriptor ring + * @work_done: Completed count + * @work_to_do: Request count + * Returns: + * true: Cleaned the descriptor + * false: Not cleaned the descriptor + */ +static bool +pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, + struct pch_gbe_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct pch_gbe_buffer *buffer_info; + struct pch_gbe_rx_desc *rx_desc; + u32 length; + unsigned int i; + unsigned int cleaned_count = 0; + bool cleaned = false; + struct sk_buff *skb; + u8 dma_status; + u16 gbec_status; + u32 tcp_ip_status; + + i = rx_ring->next_to_clean; + + while (*work_done < work_to_do) { + /* Check Rx descriptor status */ + rx_desc = PCH_GBE_RX_DESC(*rx_ring, i); + if (rx_desc->gbec_status == DSC_INIT16) + break; + cleaned = true; + cleaned_count++; + + dma_status = rx_desc->dma_status; + gbec_status = rx_desc->gbec_status; + tcp_ip_status = rx_desc->tcp_ip_status; + rx_desc->gbec_status = DSC_INIT16; + buffer_info = &rx_ring->buffer_info[i]; + skb = buffer_info->skb; + buffer_info->skb = NULL; + + /* unmap dma */ + dma_unmap_single(&pdev->dev, buffer_info->dma, + buffer_info->length, DMA_FROM_DEVICE); + buffer_info->mapped = false; + + netdev_dbg(netdev, + "RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x TCP:0x%08x] BufInf = 0x%p\n", + i, dma_status, gbec_status, tcp_ip_status, + buffer_info); + /* Error check */ + if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) { + adapter->stats.rx_frame_errors++; + netdev_err(netdev, "Receive Not Octal Error\n"); + } else if (unlikely(gbec_status & + PCH_GBE_RXD_GMAC_STAT_NBLERR)) { + adapter->stats.rx_frame_errors++; + netdev_err(netdev, "Receive Nibble Error\n"); + } else if (unlikely(gbec_status & + PCH_GBE_RXD_GMAC_STAT_CRCERR)) { + adapter->stats.rx_crc_errors++; + netdev_err(netdev, "Receive CRC Error\n"); + } else { + /* get receive length */ + /* length convert[-3], length includes FCS length */ + length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN; + if (rx_desc->rx_words_eob & 0x02) + length = length - 4; + /* + * buffer_info->rx_buffer: [Header:14][payload] + * skb->data: [Reserve:2][Header:14][payload] + */ + memcpy(skb->data, buffer_info->rx_buffer, length); + + /* update status of driver */ + adapter->stats.rx_bytes += length; + adapter->stats.rx_packets++; + if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT)) + adapter->stats.multicast++; + /* Write meta date of skb */ + skb_put(skb, length); + + pch_rx_timestamp(adapter, skb); + + skb->protocol = eth_type_trans(skb, netdev); + if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + + napi_gro_receive(&adapter->napi, skb); + (*work_done)++; + netdev_dbg(netdev, + "Receive skb->ip_summed: %d length: %d\n", + skb->ip_summed, length); + } + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) { + pch_gbe_alloc_rx_buffers(adapter, rx_ring, + cleaned_count); + cleaned_count = 0; + } + if (++i == rx_ring->count) + i = 0; + } + rx_ring->next_to_clean = i; + if (cleaned_count) + pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); + return cleaned; +} + +/** + * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors) + * @adapter: Board private structure + * @tx_ring: Tx descriptor ring (for a specific queue) to setup + * Returns: + * 0: Successfully + * Negative value: Failed + */ +int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter, + struct pch_gbe_tx_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + struct pch_gbe_tx_desc *tx_desc; + int size; + int desNo; + + size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count; + tx_ring->buffer_info = vzalloc(size); + if (!tx_ring->buffer_info) + return -ENOMEM; + + tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); + + tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) { + vfree(tx_ring->buffer_info); + return -ENOMEM; + } + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + spin_lock_init(&tx_ring->tx_lock); + + for (desNo = 0; desNo < tx_ring->count; desNo++) { + tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo); + tx_desc->gbec_status = DSC_INIT16; + } + netdev_dbg(adapter->netdev, + "tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx next_to_clean = 0x%08x next_to_use = 0x%08x\n", + tx_ring->desc, (unsigned long long)tx_ring->dma, + tx_ring->next_to_clean, tx_ring->next_to_use); + return 0; +} + +/** + * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors) + * @adapter: Board private structure + * @rx_ring: Rx descriptor ring (for a specific queue) to setup + * Returns: + * 0: Successfully + * Negative value: Failed + */ +int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter, + struct pch_gbe_rx_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + struct pch_gbe_rx_desc *rx_desc; + int size; + int desNo; + + size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count; + rx_ring->buffer_info = vzalloc(size); + if (!rx_ring->buffer_info) + return -ENOMEM; + + rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); + rx_ring->desc = dma_zalloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) { + vfree(rx_ring->buffer_info); + return -ENOMEM; + } + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + for (desNo = 0; desNo < rx_ring->count; desNo++) { + rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo); + rx_desc->gbec_status = DSC_INIT16; + } + netdev_dbg(adapter->netdev, + "rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx next_to_clean = 0x%08x next_to_use = 0x%08x\n", + rx_ring->desc, (unsigned long long)rx_ring->dma, + rx_ring->next_to_clean, rx_ring->next_to_use); + return 0; +} + +/** + * pch_gbe_free_tx_resources - Free Tx Resources + * @adapter: Board private structure + * @tx_ring: Tx descriptor ring for a specific queue + */ +void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter, + struct pch_gbe_tx_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + pch_gbe_clean_tx_ring(adapter, tx_ring); + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; +} + +/** + * pch_gbe_free_rx_resources - Free Rx Resources + * @adapter: Board private structure + * @rx_ring: Ring to clean the resources from + */ +void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter, + struct pch_gbe_rx_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + pch_gbe_clean_rx_ring(adapter, rx_ring); + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); + rx_ring->desc = NULL; +} + +/** + * pch_gbe_request_irq - Allocate an interrupt line + * @adapter: Board private structure + * Returns: + * 0: Successfully + * Negative value: Failed + */ +static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + int flags; + + flags = IRQF_SHARED; + adapter->have_msi = false; + err = pci_enable_msi(adapter->pdev); + netdev_dbg(netdev, "call pci_enable_msi\n"); + if (err) { + netdev_dbg(netdev, "call pci_enable_msi - Error: %d\n", err); + } else { + flags = 0; + adapter->have_msi = true; + } + err = request_irq(adapter->pdev->irq, &pch_gbe_intr, + flags, netdev->name, netdev); + if (err) + netdev_err(netdev, "Unable to allocate interrupt Error: %d\n", + err); + netdev_dbg(netdev, + "adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n", + adapter->have_msi, flags, err); + return err; +} + + +/** + * pch_gbe_up - Up GbE network device + * @adapter: Board private structure + * Returns: + * 0: Successfully + * Negative value: Failed + */ +int pch_gbe_up(struct pch_gbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; + struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; + int err = -EINVAL; + + /* Ensure we have a valid MAC */ + if (!is_valid_ether_addr(adapter->hw.mac.addr)) { + netdev_err(netdev, "Error: Invalid MAC address\n"); + goto out; + } + + /* hardware has been reset, we need to reload some things */ + pch_gbe_set_multi(netdev); + + pch_gbe_setup_tctl(adapter); + pch_gbe_configure_tx(adapter); + pch_gbe_setup_rctl(adapter); + pch_gbe_configure_rx(adapter); + + err = pch_gbe_request_irq(adapter); + if (err) { + netdev_err(netdev, + "Error: can't bring device up - irq request failed\n"); + goto out; + } + err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count); + if (err) { + netdev_err(netdev, + "Error: can't bring device up - alloc rx buffers pool failed\n"); + goto freeirq; + } + pch_gbe_alloc_tx_buffers(adapter, tx_ring); + pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); + adapter->tx_queue_len = netdev->tx_queue_len; + pch_gbe_enable_dma_rx(&adapter->hw); + pch_gbe_enable_mac_rx(&adapter->hw); + + mod_timer(&adapter->watchdog_timer, jiffies); + + napi_enable(&adapter->napi); + pch_gbe_irq_enable(adapter); + netif_start_queue(adapter->netdev); + + return 0; + +freeirq: + pch_gbe_free_irq(adapter); +out: + return err; +} + +/** + * pch_gbe_down - Down GbE network device + * @adapter: Board private structure + */ +void pch_gbe_down(struct pch_gbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; + + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer */ + napi_disable(&adapter->napi); + atomic_set(&adapter->irq_sem, 0); + + pch_gbe_irq_disable(adapter); + pch_gbe_free_irq(adapter); + + del_timer_sync(&adapter->watchdog_timer); + + netdev->tx_queue_len = adapter->tx_queue_len; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal)) + pch_gbe_reset(adapter); + pch_gbe_clean_tx_ring(adapter, adapter->tx_ring); + pch_gbe_clean_rx_ring(adapter, adapter->rx_ring); + + pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size, + rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic); + rx_ring->rx_buff_pool_logic = 0; + rx_ring->rx_buff_pool_size = 0; + rx_ring->rx_buff_pool = NULL; +} + +/** + * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter) + * @adapter: Board private structure to initialize + * Returns: + * 0: Successfully + * Negative value: Failed + */ +static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter) +{ + struct pch_gbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048; + hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + /* Initialize the hardware-specific values */ + if (pch_gbe_hal_setup_init_funcs(hw)) { + netdev_err(netdev, "Hardware Initialization Failure\n"); + return -EIO; + } + if (pch_gbe_alloc_queues(adapter)) { + netdev_err(netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + spin_lock_init(&adapter->hw.miim_lock); + spin_lock_init(&adapter->stats_lock); + spin_lock_init(&adapter->ethtool_lock); + atomic_set(&adapter->irq_sem, 0); + pch_gbe_irq_disable(adapter); + + pch_gbe_init_stats(adapter); + + netdev_dbg(netdev, + "rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n", + (u32) adapter->rx_buffer_len, + hw->mac.min_frame_size, hw->mac.max_frame_size); + return 0; +} + +/** + * pch_gbe_open - Called when a network interface is made active + * @netdev: Network interface device structure + * Returns: + * 0: Successfully + * Negative value: Failed + */ +static int pch_gbe_open(struct net_device *netdev) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + struct pch_gbe_hw *hw = &adapter->hw; + int err; + + /* allocate transmit descriptors */ + err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring); + if (err) + goto err_setup_tx; + /* allocate receive descriptors */ + err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring); + if (err) + goto err_setup_rx; + pch_gbe_hal_power_up_phy(hw); + err = pch_gbe_up(adapter); + if (err) + goto err_up; + netdev_dbg(netdev, "Success End\n"); + return 0; + +err_up: + if (!adapter->wake_up_evt) + pch_gbe_hal_power_down_phy(hw); + pch_gbe_free_rx_resources(adapter, adapter->rx_ring); +err_setup_rx: + pch_gbe_free_tx_resources(adapter, adapter->tx_ring); +err_setup_tx: + pch_gbe_reset(adapter); + netdev_err(netdev, "Error End\n"); + return err; +} + +/** + * pch_gbe_stop - Disables a network interface + * @netdev: Network interface device structure + * Returns: + * 0: Successfully + */ +static int pch_gbe_stop(struct net_device *netdev) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + struct pch_gbe_hw *hw = &adapter->hw; + + pch_gbe_down(adapter); + if (!adapter->wake_up_evt) + pch_gbe_hal_power_down_phy(hw); + pch_gbe_free_tx_resources(adapter, adapter->tx_ring); + pch_gbe_free_rx_resources(adapter, adapter->rx_ring); + return 0; +} + +/** + * pch_gbe_xmit_frame - Packet transmitting start + * @skb: Socket buffer structure + * @netdev: Network interface device structure + * Returns: + * - NETDEV_TX_OK: Normal end + * - NETDEV_TX_BUSY: Error end + */ +static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; + unsigned long flags; + + if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) { + /* Collision - tell upper layer to requeue */ + return NETDEV_TX_LOCKED; + } + if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) { + netif_stop_queue(netdev); + spin_unlock_irqrestore(&tx_ring->tx_lock, flags); + netdev_dbg(netdev, + "Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n", + tx_ring->next_to_use, tx_ring->next_to_clean); + return NETDEV_TX_BUSY; + } + + /* CRC,ITAG no support */ + pch_gbe_tx_queue(adapter, tx_ring, skb); + spin_unlock_irqrestore(&tx_ring->tx_lock, flags); + return NETDEV_TX_OK; +} + +/** + * pch_gbe_get_stats - Get System Network Statistics + * @netdev: Network interface device structure + * Returns: The current stats + */ +static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev) +{ + /* only return the current stats */ + return &netdev->stats; +} + +/** + * pch_gbe_set_multi - Multicast and Promiscuous mode set + * @netdev: Network interface device structure + */ +static void pch_gbe_set_multi(struct net_device *netdev) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + struct pch_gbe_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + u32 rctl; + int i; + int mc_count; + + netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags); + + /* Check for Promiscuous and All Multicast modes */ + rctl = ioread32(&hw->reg->RX_MODE); + mc_count = netdev_mc_count(netdev); + if ((netdev->flags & IFF_PROMISC)) { + rctl &= ~PCH_GBE_ADD_FIL_EN; + rctl &= ~PCH_GBE_MLT_FIL_EN; + } else if ((netdev->flags & IFF_ALLMULTI)) { + /* all the multicasting receive permissions */ + rctl |= PCH_GBE_ADD_FIL_EN; + rctl &= ~PCH_GBE_MLT_FIL_EN; + } else { + if (mc_count >= PCH_GBE_MAR_ENTRIES) { + /* all the multicasting receive permissions */ + rctl |= PCH_GBE_ADD_FIL_EN; + rctl &= ~PCH_GBE_MLT_FIL_EN; + } else { + rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN); + } + } + iowrite32(rctl, &hw->reg->RX_MODE); + + if (mc_count >= PCH_GBE_MAR_ENTRIES) + return; + mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC); + if (!mta_list) + return; + + /* The shared function expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) { + if (i == mc_count) + break; + memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN); + } + pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1, + PCH_GBE_MAR_ENTRIES); + kfree(mta_list); + + netdev_dbg(netdev, + "RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n", + ioread32(&hw->reg->RX_MODE), mc_count); +} + +/** + * pch_gbe_set_mac - Change the Ethernet Address of the NIC + * @netdev: Network interface device structure + * @addr: Pointer to an address structure + * Returns: + * 0: Successfully + * -EADDRNOTAVAIL: Failed + */ +static int pch_gbe_set_mac(struct net_device *netdev, void *addr) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + struct sockaddr *skaddr = addr; + int ret_val; + + if (!is_valid_ether_addr(skaddr->sa_data)) { + ret_val = -EADDRNOTAVAIL; + } else { + memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len); + pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0); + ret_val = 0; + } + netdev_dbg(netdev, "ret_val : 0x%08x\n", ret_val); + netdev_dbg(netdev, "dev_addr : %pM\n", netdev->dev_addr); + netdev_dbg(netdev, "mac_addr : %pM\n", adapter->hw.mac.addr); + netdev_dbg(netdev, "MAC_ADR1AB reg : 0x%08x 0x%08x\n", + ioread32(&adapter->hw.reg->mac_adr[0].high), + ioread32(&adapter->hw.reg->mac_adr[0].low)); + return ret_val; +} + +/** + * pch_gbe_change_mtu - Change the Maximum Transfer Unit + * @netdev: Network interface device structure + * @new_mtu: New value for maximum frame size + * Returns: + * 0: Successfully + * -EINVAL: Failed + */ +static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + int max_frame; + unsigned long old_rx_buffer_len = adapter->rx_buffer_len; + int err; + + max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || + (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) { + netdev_err(netdev, "Invalid MTU setting\n"); + return -EINVAL; + } + if (max_frame <= PCH_GBE_FRAME_SIZE_2048) + adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048; + else if (max_frame <= PCH_GBE_FRAME_SIZE_4096) + adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096; + else if (max_frame <= PCH_GBE_FRAME_SIZE_8192) + adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192; + else + adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE; + + if (netif_running(netdev)) { + pch_gbe_down(adapter); + err = pch_gbe_up(adapter); + if (err) { + adapter->rx_buffer_len = old_rx_buffer_len; + pch_gbe_up(adapter); + return err; + } else { + netdev->mtu = new_mtu; + adapter->hw.mac.max_frame_size = max_frame; + } + } else { + pch_gbe_reset(adapter); + netdev->mtu = new_mtu; + adapter->hw.mac.max_frame_size = max_frame; + } + + netdev_dbg(netdev, + "max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n", + max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, + adapter->hw.mac.max_frame_size); + return 0; +} + +/** + * pch_gbe_set_features - Reset device after features changed + * @netdev: Network interface device structure + * @features: New features + * Returns: + * 0: HW state updated successfully + */ +static int pch_gbe_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (!(changed & NETIF_F_RXCSUM)) + return 0; + + if (netif_running(netdev)) + pch_gbe_reinit_locked(adapter); + else + pch_gbe_reset(adapter); + + return 0; +} + +/** + * pch_gbe_ioctl - Controls register through a MII interface + * @netdev: Network interface device structure + * @ifr: Pointer to ifr structure + * @cmd: Control command + * Returns: + * 0: Successfully + * Negative value: Failed + */ +static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + + netdev_dbg(netdev, "cmd : 0x%04x\n", cmd); + + if (cmd == SIOCSHWTSTAMP) + return hwtstamp_ioctl(netdev, ifr, cmd); + + return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); +} + +/** + * pch_gbe_tx_timeout - Respond to a Tx Hang + * @netdev: Network interface device structure + */ +static void pch_gbe_tx_timeout(struct net_device *netdev) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->stats.tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +/** + * pch_gbe_napi_poll - NAPI receive and transfer polling callback + * @napi: Pointer of polling device struct + * @budget: The maximum number of a packet + * Returns: + * false: Exit the polling mode + * true: Continue the polling mode + */ +static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) +{ + struct pch_gbe_adapter *adapter = + container_of(napi, struct pch_gbe_adapter, napi); + int work_done = 0; + bool poll_end_flag = false; + bool cleaned = false; + + netdev_dbg(adapter->netdev, "budget : %d\n", budget); + + pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); + cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); + + if (cleaned) + work_done = budget; + /* If no Tx and not enough Rx work done, + * exit the polling mode + */ + if (work_done < budget) + poll_end_flag = true; + + if (poll_end_flag) { + napi_complete(napi); + pch_gbe_irq_enable(adapter); + } + + if (adapter->rx_stop_flag) { + adapter->rx_stop_flag = false; + pch_gbe_enable_dma_rx(&adapter->hw); + } + + netdev_dbg(adapter->netdev, + "poll_end_flag : %d work_done : %d budget : %d\n", + poll_end_flag, work_done, budget); + + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * pch_gbe_netpoll - Used by things like netconsole to send skbs + * @netdev: Network interface device structure + */ +static void pch_gbe_netpoll(struct net_device *netdev) +{ + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + pch_gbe_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +static const struct net_device_ops pch_gbe_netdev_ops = { + .ndo_open = pch_gbe_open, + .ndo_stop = pch_gbe_stop, + .ndo_start_xmit = pch_gbe_xmit_frame, + .ndo_get_stats = pch_gbe_get_stats, + .ndo_set_mac_address = pch_gbe_set_mac, + .ndo_tx_timeout = pch_gbe_tx_timeout, + .ndo_change_mtu = pch_gbe_change_mtu, + .ndo_set_features = pch_gbe_set_features, + .ndo_do_ioctl = pch_gbe_ioctl, + .ndo_set_rx_mode = pch_gbe_set_multi, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = pch_gbe_netpoll, +#endif +}; + +static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + if (netif_running(netdev)) + pch_gbe_down(adapter); + pci_disable_device(pdev); + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + struct pch_gbe_hw *hw = &adapter->hw; + + if (pci_enable_device(pdev)) { + netdev_err(netdev, "Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + pci_enable_wake(pdev, PCI_D0, 0); + pch_gbe_hal_power_up_phy(hw); + pch_gbe_reset(adapter); + /* Clear wake up status */ + pch_gbe_mac_set_wol_event(hw, 0); + + return PCI_ERS_RESULT_RECOVERED; +} + +static void pch_gbe_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (pch_gbe_up(adapter)) { + netdev_dbg(netdev, + "can't bring device back up after reset\n"); + return; + } + } + netif_device_attach(netdev); +} + +static int __pch_gbe_suspend(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + struct pch_gbe_hw *hw = &adapter->hw; + u32 wufc = adapter->wake_up_evt; + int retval = 0; + + netif_device_detach(netdev); + if (netif_running(netdev)) + pch_gbe_down(adapter); + if (wufc) { + pch_gbe_set_multi(netdev); + pch_gbe_setup_rctl(adapter); + pch_gbe_configure_rx(adapter); + pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed, + hw->mac.link_duplex); + pch_gbe_set_mode(adapter, hw->mac.link_speed, + hw->mac.link_duplex); + pch_gbe_mac_set_wol_event(hw, wufc); + pci_disable_device(pdev); + } else { + pch_gbe_hal_power_down_phy(hw); + pch_gbe_mac_set_wol_event(hw, wufc); + pci_disable_device(pdev); + } + return retval; +} + +#ifdef CONFIG_PM +static int pch_gbe_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + + return __pch_gbe_suspend(pdev); +} + +static int pch_gbe_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *netdev = pci_get_drvdata(pdev); + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + struct pch_gbe_hw *hw = &adapter->hw; + u32 err; + + err = pci_enable_device(pdev); + if (err) { + netdev_err(netdev, "Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + pch_gbe_hal_power_up_phy(hw); + pch_gbe_reset(adapter); + /* Clear wake on lan control and status */ + pch_gbe_mac_set_wol_event(hw, 0); + + if (netif_running(netdev)) + pch_gbe_up(adapter); + netif_device_attach(netdev); + + return 0; +} +#endif /* CONFIG_PM */ + +static void pch_gbe_shutdown(struct pci_dev *pdev) +{ + __pch_gbe_suspend(pdev); + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, true); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +static void pch_gbe_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct pch_gbe_adapter *adapter = netdev_priv(netdev); + + cancel_work_sync(&adapter->reset_task); + unregister_netdev(netdev); + + pch_gbe_hal_phy_hw_reset(&adapter->hw); + + free_netdev(netdev); +} + +static int pch_gbe_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_id) +{ + struct net_device *netdev; + struct pch_gbe_adapter *adapter; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) + || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) { + ret = pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(32)); + if (ret) { + dev_err(&pdev->dev, "ERR: No usable DMA " + "configuration, aborting\n"); + return ret; + } + } + } + + ret = pcim_iomap_regions(pdev, 1 << PCH_GBE_PCI_BAR, pci_name(pdev)); + if (ret) { + dev_err(&pdev->dev, + "ERR: Can't reserve PCI I/O and memory resources\n"); + return ret; + } + pci_set_master(pdev); + + netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter)); + if (!netdev) + return -ENOMEM; + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->hw.back = adapter; + adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR]; + adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data; + if (adapter->pdata && adapter->pdata->platform_init) + adapter->pdata->platform_init(pdev); + + adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number, + PCI_DEVFN(12, 4)); + + netdev->netdev_ops = &pch_gbe_netdev_ops; + netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD; + netif_napi_add(netdev, &adapter->napi, + pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT); + netdev->hw_features = NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + netdev->features = netdev->hw_features; + pch_gbe_set_ethtool_ops(netdev); + + pch_gbe_mac_load_mac_addr(&adapter->hw); + pch_gbe_mac_reset_hw(&adapter->hw); + + /* setup the private structure */ + ret = pch_gbe_sw_init(adapter); + if (ret) + goto err_free_netdev; + + /* Initialize PHY */ + ret = pch_gbe_init_phy(adapter); + if (ret) { + dev_err(&pdev->dev, "PHY initialize error\n"); + goto err_free_adapter; + } + pch_gbe_hal_get_bus_info(&adapter->hw); + + /* Read the MAC address. and store to the private data */ + ret = pch_gbe_hal_read_mac_addr(&adapter->hw); + if (ret) { + dev_err(&pdev->dev, "MAC address Read Error\n"); + goto err_free_adapter; + } + + memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); + if (!is_valid_ether_addr(netdev->dev_addr)) { + /* + * If the MAC is invalid (or just missing), display a warning + * but do not abort setting up the device. pch_gbe_up will + * prevent the interface from being brought up until a valid MAC + * is set. + */ + dev_err(&pdev->dev, "Invalid MAC address, " + "interface disabled.\n"); + } + setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog, + (unsigned long)adapter); + + INIT_WORK(&adapter->reset_task, pch_gbe_reset_task); + + pch_gbe_check_options(adapter); + + /* initialize the wol settings based on the eeprom settings */ + adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING; + dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr); + + /* reset the hardware with the new settings */ + pch_gbe_reset(adapter); + + ret = register_netdev(netdev); + if (ret) + goto err_free_adapter; + /* tell the stack to leave us alone until pch_gbe_open() is called */ + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + dev_dbg(&pdev->dev, "PCH Network Connection\n"); + + /* Disable hibernation on certain platforms */ + if (adapter->pdata && adapter->pdata->phy_disable_hibernate) + pch_gbe_phy_disable_hibernate(&adapter->hw); + + device_set_wakeup_enable(&pdev->dev, 1); + return 0; + +err_free_adapter: + pch_gbe_hal_phy_hw_reset(&adapter->hw); +err_free_netdev: + free_netdev(netdev); + return ret; +} + +/* The AR803X PHY on the MinnowBoard requires a physical pin to be toggled to + * ensure it is awake for probe and init. Request the line and reset the PHY. + */ +static int pch_gbe_minnow_platform_init(struct pci_dev *pdev) +{ + unsigned long flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH | GPIOF_EXPORT; + unsigned gpio = MINNOW_PHY_RESET_GPIO; + int ret; + + ret = devm_gpio_request_one(&pdev->dev, gpio, flags, + "minnow_phy_reset"); + if (ret) { + dev_err(&pdev->dev, + "ERR: Can't request PHY reset GPIO line '%d'\n", gpio); + return ret; + } + + gpio_set_value(gpio, 0); + usleep_range(1250, 1500); + gpio_set_value(gpio, 1); + usleep_range(1250, 1500); + + return ret; +} + +static struct pch_gbe_privdata pch_gbe_minnow_privdata = { + .phy_tx_clk_delay = true, + .phy_disable_hibernate = true, + .platform_init = pch_gbe_minnow_platform_init, +}; + +static const struct pci_device_id pch_gbe_pcidev_id[] = { + {.vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_IOH1_GBE, + .subvendor = PCI_VENDOR_ID_CIRCUITCO, + .subdevice = PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD, + .class = (PCI_CLASS_NETWORK_ETHERNET << 8), + .class_mask = (0xFFFF00), + .driver_data = (kernel_ulong_t)&pch_gbe_minnow_privdata + }, + {.vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_IOH1_GBE, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_NETWORK_ETHERNET << 8), + .class_mask = (0xFFFF00) + }, + {.vendor = PCI_VENDOR_ID_ROHM, + .device = PCI_DEVICE_ID_ROHM_ML7223_GBE, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_NETWORK_ETHERNET << 8), + .class_mask = (0xFFFF00) + }, + {.vendor = PCI_VENDOR_ID_ROHM, + .device = PCI_DEVICE_ID_ROHM_ML7831_GBE, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_NETWORK_ETHERNET << 8), + .class_mask = (0xFFFF00) + }, + /* required last entry */ + {0} +}; + +#ifdef CONFIG_PM +static const struct dev_pm_ops pch_gbe_pm_ops = { + .suspend = pch_gbe_suspend, + .resume = pch_gbe_resume, + .freeze = pch_gbe_suspend, + .thaw = pch_gbe_resume, + .poweroff = pch_gbe_suspend, + .restore = pch_gbe_resume, +}; +#endif + +static const struct pci_error_handlers pch_gbe_err_handler = { + .error_detected = pch_gbe_io_error_detected, + .slot_reset = pch_gbe_io_slot_reset, + .resume = pch_gbe_io_resume +}; + +static struct pci_driver pch_gbe_driver = { + .name = KBUILD_MODNAME, + .id_table = pch_gbe_pcidev_id, + .probe = pch_gbe_probe, + .remove = pch_gbe_remove, +#ifdef CONFIG_PM + .driver.pm = &pch_gbe_pm_ops, +#endif + .shutdown = pch_gbe_shutdown, + .err_handler = &pch_gbe_err_handler +}; + + +static int __init pch_gbe_init_module(void) +{ + int ret; + + pr_info("EG20T PCH Gigabit Ethernet Driver - version %s\n",DRV_VERSION); + ret = pci_register_driver(&pch_gbe_driver); + if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) { + if (copybreak == 0) { + pr_info("copybreak disabled\n"); + } else { + pr_info("copybreak enabled for packets <= %u bytes\n", + copybreak); + } + } + return ret; +} + +static void __exit pch_gbe_exit_module(void) +{ + pci_unregister_driver(&pch_gbe_driver); +} + +module_init(pch_gbe_init_module); +module_exit(pch_gbe_exit_module); + +MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver"); +MODULE_AUTHOR("LAPIS SEMICONDUCTOR, "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id); + +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +/* pch_gbe_main.c */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c new file mode 100644 index 000000000..08d4be616 --- /dev/null +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c @@ -0,0 +1,521 @@ +/* + * Copyright (C) 1999 - 2010 Intel Corporation. + * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. + * + * This code was derived from the Intel e1000e Linux driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "pch_gbe.h" +#include /* for __MODULE_STRING */ + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/** + * TxDescriptors - Transmit Descriptor Count + * @Valid Range: PCH_GBE_MIN_TXD - PCH_GBE_MAX_TXD + * @Default Value: PCH_GBE_DEFAULT_TXD + */ +static int TxDescriptors = OPTION_UNSET; +module_param(TxDescriptors, int, 0); +MODULE_PARM_DESC(TxDescriptors, "Number of transmit descriptors"); + +/** + * RxDescriptors -Receive Descriptor Count + * @Valid Range: PCH_GBE_MIN_RXD - PCH_GBE_MAX_RXD + * @Default Value: PCH_GBE_DEFAULT_RXD + */ +static int RxDescriptors = OPTION_UNSET; +module_param(RxDescriptors, int, 0); +MODULE_PARM_DESC(RxDescriptors, "Number of receive descriptors"); + +/** + * Speed - User Specified Speed Override + * @Valid Range: 0, 10, 100, 1000 + * - 0: auto-negotiate at all supported speeds + * - 10: only link at 10 Mbps + * - 100: only link at 100 Mbps + * - 1000: only link at 1000 Mbps + * @Default Value: 0 + */ +static int Speed = OPTION_UNSET; +module_param(Speed, int, 0); +MODULE_PARM_DESC(Speed, "Speed setting"); + +/** + * Duplex - User Specified Duplex Override + * @Valid Range: 0-2 + * - 0: auto-negotiate for duplex + * - 1: only link at half duplex + * - 2: only link at full duplex + * @Default Value: 0 + */ +static int Duplex = OPTION_UNSET; +module_param(Duplex, int, 0); +MODULE_PARM_DESC(Duplex, "Duplex setting"); + +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/** + * AutoNeg - Auto-negotiation Advertisement Override + * @Valid Range: 0x01-0x0F, 0x20-0x2F + * + * The AutoNeg value is a bit mask describing which speed and duplex + * combinations should be advertised during auto-negotiation. + * The supported speed and duplex modes are listed below + * + * Bit 7 6 5 4 3 2 1 0 + * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 + * Duplex Full Full Half Full Half + * + * @Default Value: 0x2F (copper) + */ +static int AutoNeg = OPTION_UNSET; +module_param(AutoNeg, int, 0); +MODULE_PARM_DESC(AutoNeg, "Advertised auto-negotiation setting"); + +#define PHY_ADVERTISE_10_HALF 0x0001 +#define PHY_ADVERTISE_10_FULL 0x0002 +#define PHY_ADVERTISE_100_HALF 0x0004 +#define PHY_ADVERTISE_100_FULL 0x0008 +#define PHY_ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define PHY_ADVERTISE_1000_FULL 0x0020 +#define PCH_AUTONEG_ADVERTISE_DEFAULT 0x2F + +/** + * FlowControl - User Specified Flow Control Override + * @Valid Range: 0-3 + * - 0: No Flow Control + * - 1: Rx only, respond to PAUSE frames but do not generate them + * - 2: Tx only, generate PAUSE frames but ignore them on receive + * - 3: Full Flow Control Support + * @Default Value: Read flow control settings from the EEPROM + */ +static int FlowControl = OPTION_UNSET; +module_param(FlowControl, int, 0); +MODULE_PARM_DESC(FlowControl, "Flow Control setting"); + +/* + * XsumRX - Receive Checksum Offload Enable/Disable + * @Valid Range: 0, 1 + * - 0: disables all checksum offload + * - 1: enables receive IP/TCP/UDP checksum offload + * @Default Value: PCH_GBE_DEFAULT_RX_CSUM + */ +static int XsumRX = OPTION_UNSET; +module_param(XsumRX, int, 0); +MODULE_PARM_DESC(XsumRX, "Disable or enable Receive Checksum offload"); + +#define PCH_GBE_DEFAULT_RX_CSUM true /* trueorfalse */ + +/* + * XsumTX - Transmit Checksum Offload Enable/Disable + * @Valid Range: 0, 1 + * - 0: disables all checksum offload + * - 1: enables transmit IP/TCP/UDP checksum offload + * @Default Value: PCH_GBE_DEFAULT_TX_CSUM + */ +static int XsumTX = OPTION_UNSET; +module_param(XsumTX, int, 0); +MODULE_PARM_DESC(XsumTX, "Disable or enable Transmit Checksum offload"); + +#define PCH_GBE_DEFAULT_TX_CSUM true /* trueorfalse */ + +/** + * pch_gbe_option - Force the MAC's flow control settings + * @hw: Pointer to the HW structure + * Returns: + * 0: Successful. + * Negative value: Failed. + */ +struct pch_gbe_option { + enum { enable_option, range_option, list_option } type; + char *name; + char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct pch_gbe_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static const struct pch_gbe_opt_list speed_list[] = { + { 0, "" }, + { SPEED_10, "" }, + { SPEED_100, "" }, + { SPEED_1000, "" } +}; + +static const struct pch_gbe_opt_list dplx_list[] = { + { 0, "" }, + { HALF_DUPLEX, "" }, + { FULL_DUPLEX, "" } +}; + +static const struct pch_gbe_opt_list an_list[] = + #define AA "AutoNeg advertising " + {{ 0x01, AA "10/HD" }, + { 0x02, AA "10/FD" }, + { 0x03, AA "10/FD, 10/HD" }, + { 0x04, AA "100/HD" }, + { 0x05, AA "100/HD, 10/HD" }, + { 0x06, AA "100/HD, 10/FD" }, + { 0x07, AA "100/HD, 10/FD, 10/HD" }, + { 0x08, AA "100/FD" }, + { 0x09, AA "100/FD, 10/HD" }, + { 0x0a, AA "100/FD, 10/FD" }, + { 0x0b, AA "100/FD, 10/FD, 10/HD" }, + { 0x0c, AA "100/FD, 100/HD" }, + { 0x0d, AA "100/FD, 100/HD, 10/HD" }, + { 0x0e, AA "100/FD, 100/HD, 10/FD" }, + { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" }, + { 0x20, AA "1000/FD" }, + { 0x21, AA "1000/FD, 10/HD" }, + { 0x22, AA "1000/FD, 10/FD" }, + { 0x23, AA "1000/FD, 10/FD, 10/HD" }, + { 0x24, AA "1000/FD, 100/HD" }, + { 0x25, AA "1000/FD, 100/HD, 10/HD" }, + { 0x26, AA "1000/FD, 100/HD, 10/FD" }, + { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" }, + { 0x28, AA "1000/FD, 100/FD" }, + { 0x29, AA "1000/FD, 100/FD, 10/HD" }, + { 0x2a, AA "1000/FD, 100/FD, 10/FD" }, + { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" }, + { 0x2c, AA "1000/FD, 100/FD, 100/HD" }, + { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" }, + { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, + { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" } +}; + +static const struct pch_gbe_opt_list fc_list[] = { + { PCH_GBE_FC_NONE, "Flow Control Disabled" }, + { PCH_GBE_FC_RX_PAUSE, "Flow Control Receive Only" }, + { PCH_GBE_FC_TX_PAUSE, "Flow Control Transmit Only" }, + { PCH_GBE_FC_FULL, "Flow Control Enabled" } +}; + +/** + * pch_gbe_validate_option - Validate option + * @value: value + * @opt: option + * @adapter: Board private structure + * Returns: + * 0: Successful. + * Negative value: Failed. + */ +static int pch_gbe_validate_option(int *value, + const struct pch_gbe_option *opt, + struct pch_gbe_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + netdev_dbg(adapter->netdev, "%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + netdev_dbg(adapter->netdev, "%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + netdev_dbg(adapter->netdev, "%s set to %i\n", + opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + const struct pch_gbe_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + netdev_dbg(adapter->netdev, "%s\n", + ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + netdev_dbg(adapter->netdev, "Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +/** + * pch_gbe_check_copper_options - Range Checking for Link Options, Copper Version + * @adapter: Board private structure + */ +static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter) +{ + struct pch_gbe_hw *hw = &adapter->hw; + int speed, dplx; + + { /* Speed */ + static const struct pch_gbe_option opt = { + .type = list_option, + .name = "Speed", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = (int)ARRAY_SIZE(speed_list), + .p = speed_list } } + }; + speed = Speed; + pch_gbe_validate_option(&speed, &opt, adapter); + } + { /* Duplex */ + static const struct pch_gbe_option opt = { + .type = list_option, + .name = "Duplex", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = (int)ARRAY_SIZE(dplx_list), + .p = dplx_list } } + }; + dplx = Duplex; + pch_gbe_validate_option(&dplx, &opt, adapter); + } + + { /* Autoneg */ + static const struct pch_gbe_option opt = { + .type = list_option, + .name = "AutoNeg", + .err = "parameter ignored", + .def = PCH_AUTONEG_ADVERTISE_DEFAULT, + .arg = { .l = { .nr = (int)ARRAY_SIZE(an_list), + .p = an_list} } + }; + if (speed || dplx) { + netdev_dbg(adapter->netdev, + "AutoNeg specified along with Speed or Duplex, AutoNeg parameter ignored\n"); + hw->phy.autoneg_advertised = opt.def; + } else { + int tmp = AutoNeg; + + pch_gbe_validate_option(&tmp, &opt, adapter); + hw->phy.autoneg_advertised = tmp; + } + } + + switch (speed + dplx) { + case 0: + hw->mac.autoneg = hw->mac.fc_autoneg = 1; + if ((speed || dplx)) + netdev_dbg(adapter->netdev, + "Speed and duplex autonegotiation enabled\n"); + hw->mac.link_speed = SPEED_10; + hw->mac.link_duplex = DUPLEX_HALF; + break; + case HALF_DUPLEX: + netdev_dbg(adapter->netdev, + "Half Duplex specified without Speed\n"); + netdev_dbg(adapter->netdev, + "Using Autonegotiation at Half Duplex only\n"); + hw->mac.autoneg = hw->mac.fc_autoneg = 1; + hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF | + PHY_ADVERTISE_100_HALF; + hw->mac.link_speed = SPEED_10; + hw->mac.link_duplex = DUPLEX_HALF; + break; + case FULL_DUPLEX: + netdev_dbg(adapter->netdev, + "Full Duplex specified without Speed\n"); + netdev_dbg(adapter->netdev, + "Using Autonegotiation at Full Duplex only\n"); + hw->mac.autoneg = hw->mac.fc_autoneg = 1; + hw->phy.autoneg_advertised = PHY_ADVERTISE_10_FULL | + PHY_ADVERTISE_100_FULL | + PHY_ADVERTISE_1000_FULL; + hw->mac.link_speed = SPEED_10; + hw->mac.link_duplex = DUPLEX_FULL; + break; + case SPEED_10: + netdev_dbg(adapter->netdev, + "10 Mbps Speed specified without Duplex\n"); + netdev_dbg(adapter->netdev, + "Using Autonegotiation at 10 Mbps only\n"); + hw->mac.autoneg = hw->mac.fc_autoneg = 1; + hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF | + PHY_ADVERTISE_10_FULL; + hw->mac.link_speed = SPEED_10; + hw->mac.link_duplex = DUPLEX_HALF; + break; + case SPEED_10 + HALF_DUPLEX: + netdev_dbg(adapter->netdev, "Forcing to 10 Mbps Half Duplex\n"); + hw->mac.autoneg = hw->mac.fc_autoneg = 0; + hw->phy.autoneg_advertised = 0; + hw->mac.link_speed = SPEED_10; + hw->mac.link_duplex = DUPLEX_HALF; + break; + case SPEED_10 + FULL_DUPLEX: + netdev_dbg(adapter->netdev, "Forcing to 10 Mbps Full Duplex\n"); + hw->mac.autoneg = hw->mac.fc_autoneg = 0; + hw->phy.autoneg_advertised = 0; + hw->mac.link_speed = SPEED_10; + hw->mac.link_duplex = DUPLEX_FULL; + break; + case SPEED_100: + netdev_dbg(adapter->netdev, + "100 Mbps Speed specified without Duplex\n"); + netdev_dbg(adapter->netdev, + "Using Autonegotiation at 100 Mbps only\n"); + hw->mac.autoneg = hw->mac.fc_autoneg = 1; + hw->phy.autoneg_advertised = PHY_ADVERTISE_100_HALF | + PHY_ADVERTISE_100_FULL; + hw->mac.link_speed = SPEED_100; + hw->mac.link_duplex = DUPLEX_HALF; + break; + case SPEED_100 + HALF_DUPLEX: + netdev_dbg(adapter->netdev, + "Forcing to 100 Mbps Half Duplex\n"); + hw->mac.autoneg = hw->mac.fc_autoneg = 0; + hw->phy.autoneg_advertised = 0; + hw->mac.link_speed = SPEED_100; + hw->mac.link_duplex = DUPLEX_HALF; + break; + case SPEED_100 + FULL_DUPLEX: + netdev_dbg(adapter->netdev, + "Forcing to 100 Mbps Full Duplex\n"); + hw->mac.autoneg = hw->mac.fc_autoneg = 0; + hw->phy.autoneg_advertised = 0; + hw->mac.link_speed = SPEED_100; + hw->mac.link_duplex = DUPLEX_FULL; + break; + case SPEED_1000: + netdev_dbg(adapter->netdev, + "1000 Mbps Speed specified without Duplex\n"); + goto full_duplex_only; + case SPEED_1000 + HALF_DUPLEX: + netdev_dbg(adapter->netdev, + "Half Duplex is not supported at 1000 Mbps\n"); + /* fall through */ + case SPEED_1000 + FULL_DUPLEX: +full_duplex_only: + netdev_dbg(adapter->netdev, + "Using Autonegotiation at 1000 Mbps Full Duplex only\n"); + hw->mac.autoneg = hw->mac.fc_autoneg = 1; + hw->phy.autoneg_advertised = PHY_ADVERTISE_1000_FULL; + hw->mac.link_speed = SPEED_1000; + hw->mac.link_duplex = DUPLEX_FULL; + break; + default: + BUG(); + } +} + +/** + * pch_gbe_check_options - Range Checking for Command Line Parameters + * @adapter: Board private structure + */ +void pch_gbe_check_options(struct pch_gbe_adapter *adapter) +{ + struct pch_gbe_hw *hw = &adapter->hw; + struct net_device *dev = adapter->netdev; + int val; + + { /* Transmit Descriptor Count */ + static const struct pch_gbe_option opt = { + .type = range_option, + .name = "Transmit Descriptors", + .err = "using default of " + __MODULE_STRING(PCH_GBE_DEFAULT_TXD), + .def = PCH_GBE_DEFAULT_TXD, + .arg = { .r = { .min = PCH_GBE_MIN_TXD, + .max = PCH_GBE_MAX_TXD } } + }; + struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; + tx_ring->count = TxDescriptors; + pch_gbe_validate_option(&tx_ring->count, &opt, adapter); + tx_ring->count = roundup(tx_ring->count, + PCH_GBE_TX_DESC_MULTIPLE); + } + { /* Receive Descriptor Count */ + static const struct pch_gbe_option opt = { + .type = range_option, + .name = "Receive Descriptors", + .err = "using default of " + __MODULE_STRING(PCH_GBE_DEFAULT_RXD), + .def = PCH_GBE_DEFAULT_RXD, + .arg = { .r = { .min = PCH_GBE_MIN_RXD, + .max = PCH_GBE_MAX_RXD } } + }; + struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; + rx_ring->count = RxDescriptors; + pch_gbe_validate_option(&rx_ring->count, &opt, adapter); + rx_ring->count = roundup(rx_ring->count, + PCH_GBE_RX_DESC_MULTIPLE); + } + { /* Checksum Offload Enable/Disable */ + static const struct pch_gbe_option opt = { + .type = enable_option, + .name = "Checksum Offload", + .err = "defaulting to Enabled", + .def = PCH_GBE_DEFAULT_RX_CSUM + }; + val = XsumRX; + pch_gbe_validate_option(&val, &opt, adapter); + if (!val) + dev->features &= ~NETIF_F_RXCSUM; + } + { /* Checksum Offload Enable/Disable */ + static const struct pch_gbe_option opt = { + .type = enable_option, + .name = "Checksum Offload", + .err = "defaulting to Enabled", + .def = PCH_GBE_DEFAULT_TX_CSUM + }; + val = XsumTX; + pch_gbe_validate_option(&val, &opt, adapter); + if (!val) + dev->features &= ~NETIF_F_ALL_CSUM; + } + { /* Flow Control */ + static const struct pch_gbe_option opt = { + .type = list_option, + .name = "Flow Control", + .err = "reading default settings from EEPROM", + .def = PCH_GBE_FC_DEFAULT, + .arg = { .l = { .nr = (int)ARRAY_SIZE(fc_list), + .p = fc_list } } + }; + int tmp = FlowControl; + + pch_gbe_validate_option(&tmp, &opt, adapter); + hw->mac.fc = tmp; + } + + pch_gbe_check_copper_options(adapter); +} diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c new file mode 100644 index 000000000..a5cad5ea9 --- /dev/null +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c @@ -0,0 +1,377 @@ +/* + * Copyright (C) 1999 - 2010 Intel Corporation. + * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. + * + * This code was derived from the Intel e1000e Linux driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "pch_gbe.h" +#include "pch_gbe_phy.h" + +#define PHY_MAX_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Regiser */ +#define PHY_ID1 0x02 /* Phy Id Register (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Register (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Register */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Register */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Register */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Register */ +#define PHY_PHYSP_CONTROL 0x10 /* PHY Specific Control Register */ +#define PHY_EXT_PHYSP_CONTROL 0x14 /* Extended PHY Specific Control Register */ +#define PHY_LED_CONTROL 0x18 /* LED Control Register */ +#define PHY_EXT_PHYSP_STATUS 0x1B /* Extended PHY Specific Status Register */ + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* AR8031 PHY Debug Registers */ +#define PHY_AR803X_ID 0x00001374 +#define PHY_AR8031_DBG_OFF 0x1D +#define PHY_AR8031_DBG_DAT 0x1E +#define PHY_AR8031_SERDES 0x05 +#define PHY_AR8031_HIBERNATE 0x0B +#define PHY_AR8031_SERDES_TX_CLK_DLY 0x0100 /* TX clock delay of 2.0ns */ +#define PHY_AR8031_PS_HIB_EN 0x8000 /* Hibernate enable */ + +/* Phy Id Register (word 2) */ +#define PHY_REVISION_MASK 0x000F + +/* PHY Specific Control Register */ +#define PHYSP_CTRL_ASSERT_CRS_TX 0x0800 + + +/* Default value of PHY register */ +#define PHY_CONTROL_DEFAULT 0x1140 /* Control Register */ +#define PHY_AUTONEG_ADV_DEFAULT 0x01e0 /* Autoneg Advertisement */ +#define PHY_NEXT_PAGE_TX_DEFAULT 0x2001 /* Next Page TX */ +#define PHY_1000T_CTRL_DEFAULT 0x0300 /* 1000Base-T Control Register */ +#define PHY_PHYSP_CONTROL_DEFAULT 0x01EE /* PHY Specific Control Register */ + +/** + * pch_gbe_phy_get_id - Retrieve the PHY ID and revision + * @hw: Pointer to the HW structure + * Returns + * 0: Successful. + * Negative value: Failed. + */ +s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw) +{ + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + struct pch_gbe_phy_info *phy = &hw->phy; + s32 ret; + u16 phy_id1; + u16 phy_id2; + + ret = pch_gbe_phy_read_reg_miic(hw, PHY_ID1, &phy_id1); + if (ret) + return ret; + ret = pch_gbe_phy_read_reg_miic(hw, PHY_ID2, &phy_id2); + if (ret) + return ret; + /* + * PHY_ID1: [bit15-0:ID(21-6)] + * PHY_ID2: [bit15-10:ID(5-0)][bit9-4:Model][bit3-0:revision] + */ + phy->id = (u32)phy_id1; + phy->id = ((phy->id << 6) | ((phy_id2 & 0xFC00) >> 10)); + phy->revision = (u32) (phy_id2 & 0x000F); + netdev_dbg(adapter->netdev, + "phy->id : 0x%08x phy->revision : 0x%08x\n", + phy->id, phy->revision); + return 0; +} + +/** + * pch_gbe_phy_read_reg_miic - Read MII control register + * @hw: Pointer to the HW structure + * @offset: Register offset to be read + * @data: Pointer to the read data + * Returns + * 0: Successful. + * -EINVAL: Invalid argument. + */ +s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data) +{ + struct pch_gbe_phy_info *phy = &hw->phy; + + if (offset > PHY_MAX_REG_ADDRESS) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "PHY Address %d is out of range\n", + offset); + return -EINVAL; + } + *data = pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_READ, + offset, (u16)0); + return 0; +} + +/** + * pch_gbe_phy_write_reg_miic - Write MII control register + * @hw: Pointer to the HW structure + * @offset: Register offset to be read + * @data: data to write to register at offset + * Returns + * 0: Successful. + * -EINVAL: Invalid argument. + */ +s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data) +{ + struct pch_gbe_phy_info *phy = &hw->phy; + + if (offset > PHY_MAX_REG_ADDRESS) { + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + + netdev_err(adapter->netdev, "PHY Address %d is out of range\n", + offset); + return -EINVAL; + } + pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_WRITE, + offset, data); + return 0; +} + +/** + * pch_gbe_phy_sw_reset - PHY software reset + * @hw: Pointer to the HW structure + */ +void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw) +{ + u16 phy_ctrl; + + pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &phy_ctrl); + phy_ctrl |= MII_CR_RESET; + pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, phy_ctrl); + udelay(1); +} + +/** + * pch_gbe_phy_hw_reset - PHY hardware reset + * @hw: Pointer to the HW structure + */ +void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw) +{ + pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, PHY_CONTROL_DEFAULT); + pch_gbe_phy_write_reg_miic(hw, PHY_AUTONEG_ADV, + PHY_AUTONEG_ADV_DEFAULT); + pch_gbe_phy_write_reg_miic(hw, PHY_NEXT_PAGE_TX, + PHY_NEXT_PAGE_TX_DEFAULT); + pch_gbe_phy_write_reg_miic(hw, PHY_1000T_CTRL, PHY_1000T_CTRL_DEFAULT); + pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, + PHY_PHYSP_CONTROL_DEFAULT); +} + +/** + * pch_gbe_phy_power_up - restore link in case the phy was powered down + * @hw: Pointer to the HW structure + */ +void pch_gbe_phy_power_up(struct pch_gbe_hw *hw) +{ + u16 mii_reg; + + mii_reg = 0; + /* Just clear the power down bit to wake the phy back up */ + /* according to the manual, the phy will retain its + * settings across a power-down/up cycle */ + pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, mii_reg); +} + +/** + * pch_gbe_phy_power_down - Power down PHY + * @hw: Pointer to the HW structure + */ +void pch_gbe_phy_power_down(struct pch_gbe_hw *hw) +{ + u16 mii_reg; + + mii_reg = 0; + /* Power down the PHY so no link is implied when interface is down * + * The PHY cannot be powered down if any of the following is TRUE * + * (a) WoL is enabled + * (b) AMT is active + */ + pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, mii_reg); + mdelay(1); +} + +/** + * pch_gbe_phy_set_rgmii - RGMII interface setting + * @hw: Pointer to the HW structure + */ +void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw) +{ + pch_gbe_phy_sw_reset(hw); +} + +/** + * pch_gbe_phy_tx_clk_delay - Setup TX clock delay via the PHY + * @hw: Pointer to the HW structure + * Returns + * 0: Successful. + * -EINVAL: Invalid argument. + */ +static int pch_gbe_phy_tx_clk_delay(struct pch_gbe_hw *hw) +{ + /* The RGMII interface requires a ~2ns TX clock delay. This is typically + * done in layout with a longer trace or via PHY strapping, but can also + * be done via PHY configuration registers. + */ + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + u16 mii_reg; + int ret = 0; + + switch (hw->phy.id) { + case PHY_AR803X_ID: + netdev_dbg(adapter->netdev, + "Configuring AR803X PHY for 2ns TX clock delay\n"); + pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_OFF, &mii_reg); + ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_OFF, + PHY_AR8031_SERDES); + if (ret) + break; + + pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_DAT, &mii_reg); + mii_reg |= PHY_AR8031_SERDES_TX_CLK_DLY; + ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_DAT, + mii_reg); + break; + default: + netdev_err(adapter->netdev, + "Unknown PHY (%x), could not set TX clock delay\n", + hw->phy.id); + return -EINVAL; + } + + if (ret) + netdev_err(adapter->netdev, + "Could not configure tx clock delay for PHY\n"); + return ret; +} + +/** + * pch_gbe_phy_init_setting - PHY initial setting + * @hw: Pointer to the HW structure + */ +void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw) +{ + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; + int ret; + u16 mii_reg; + + ret = mii_ethtool_gset(&adapter->mii, &cmd); + if (ret) + netdev_err(adapter->netdev, "Error: mii_ethtool_gset\n"); + + ethtool_cmd_speed_set(&cmd, hw->mac.link_speed); + cmd.duplex = hw->mac.link_duplex; + cmd.advertising = hw->phy.autoneg_advertised; + cmd.autoneg = hw->mac.autoneg; + pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET); + ret = mii_ethtool_sset(&adapter->mii, &cmd); + if (ret) + netdev_err(adapter->netdev, "Error: mii_ethtool_sset\n"); + + pch_gbe_phy_sw_reset(hw); + + pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg); + mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX; + pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg); + + /* Setup a TX clock delay on certain platforms */ + if (adapter->pdata && adapter->pdata->phy_tx_clk_delay) + pch_gbe_phy_tx_clk_delay(hw); +} + +/** + * pch_gbe_phy_disable_hibernate - Disable the PHY low power state + * @hw: Pointer to the HW structure + * Returns + * 0: Successful. + * -EINVAL: Invalid argument. + */ +int pch_gbe_phy_disable_hibernate(struct pch_gbe_hw *hw) +{ + struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw); + u16 mii_reg; + int ret = 0; + + switch (hw->phy.id) { + case PHY_AR803X_ID: + netdev_dbg(adapter->netdev, + "Disabling hibernation for AR803X PHY\n"); + ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_OFF, + PHY_AR8031_HIBERNATE); + if (ret) + break; + + pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_DAT, &mii_reg); + mii_reg &= ~PHY_AR8031_PS_HIB_EN; + ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_DAT, + mii_reg); + break; + default: + netdev_err(adapter->netdev, + "Unknown PHY (%x), could not disable hibernation\n", + hw->phy.id); + return -EINVAL; + } + + if (ret) + netdev_err(adapter->netdev, + "Could not disable PHY hibernation\n"); + return ret; +} diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h new file mode 100644 index 000000000..95ad0151a --- /dev/null +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 1999 - 2010 Intel Corporation. + * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. + * + * This code was derived from the Intel e1000e Linux driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ +#ifndef _PCH_GBE_PHY_H_ +#define _PCH_GBE_PHY_H_ + +#define PCH_GBE_PHY_REGS_LEN 32 +#define PCH_GBE_PHY_RESET_DELAY_US 10 +#define PCH_GBE_MAC_IFOP_RGMII + +s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw); +s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data); +s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data); +void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw); +void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw); +void pch_gbe_phy_power_up(struct pch_gbe_hw *hw); +void pch_gbe_phy_power_down(struct pch_gbe_hw *hw); +void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw); +void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw); +int pch_gbe_phy_disable_hibernate(struct pch_gbe_hw *hw); + +#endif /* _PCH_GBE_PHY_H_ */ diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig new file mode 100644 index 000000000..8d5180043 --- /dev/null +++ b/drivers/net/ethernet/packetengines/Kconfig @@ -0,0 +1,47 @@ +# +# Packet engine device configuration +# + +config NET_PACKET_ENGINE + bool "Packet Engine devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about packet engine devices. If you say Y, you will + be asked for your specific card in the following questions. + +if NET_PACKET_ENGINE + +config HAMACHI + tristate "Packet Engines Hamachi GNIC-II support" + depends on PCI + select MII + ---help--- + If you have a Gigabit Ethernet card of this type, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here. The module will be + called hamachi. + +config YELLOWFIN + tristate "Packet Engines Yellowfin Gigabit-NIC support" + depends on PCI + select CRC32 + ---help--- + Say Y here if you have a Packet Engines G-NIC PCI Gigabit Ethernet + adapter or the SYM53C885 Ethernet controller. The Gigabit adapter is + used by the Beowulf Linux cluster project. See + for more + information about this driver in particular and Beowulf in general. + + To compile this driver as a module, choose M here: the module + will be called yellowfin. This is recommended. + +endif # NET_PACKET_ENGINE diff --git a/drivers/net/ethernet/packetengines/Makefile b/drivers/net/ethernet/packetengines/Makefile new file mode 100644 index 000000000..995ccd077 --- /dev/null +++ b/drivers/net/ethernet/packetengines/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the Packet Engine network device drivers. +# + +obj-$(CONFIG_HAMACHI) += hamachi.o +obj-$(CONFIG_YELLOWFIN) += yellowfin.o diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c new file mode 100644 index 000000000..13d88a602 --- /dev/null +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -0,0 +1,1943 @@ +/* hamachi.c: A Packet Engines GNIC-II Gigabit Ethernet driver for Linux. */ +/* + Written 1998-2000 by Donald Becker. + Updates 2000 by Keith Underwood. + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + This driver is for the Packet Engines GNIC-II PCI Gigabit Ethernet + adapter. + + Support and updates available at + http://www.scyld.com/network/hamachi.html + [link no longer provides useful info -jgarzik] + or + http://www.parl.clemson.edu/~keithu/hamachi.html + +*/ + +#define DRV_NAME "hamachi" +#define DRV_VERSION "2.1" +#define DRV_RELDATE "Sept 11, 2006" + + +/* A few user-configurable values. */ + +static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ +#define final_version +#define hamachi_debug debug +/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ +static int max_interrupt_work = 40; +static int mtu; +/* Default values selected by testing on a dual processor PIII-450 */ +/* These six interrupt control parameters may be set directly when loading the + * module, or through the rx_params and tx_params variables + */ +static int max_rx_latency = 0x11; +static int max_rx_gap = 0x05; +static int min_rx_pkt = 0x18; +static int max_tx_latency = 0x00; +static int max_tx_gap = 0x00; +static int min_tx_pkt = 0x30; + +/* Set the copy breakpoint for the copy-only-tiny-frames scheme. + -Setting to > 1518 causes all frames to be copied + -Setting to 0 disables copies +*/ +static int rx_copybreak; + +/* An override for the hardware detection of bus width. + Set to 1 to force 32 bit PCI bus detection. Set to 4 to force 64 bit. + Add 2 to disable parity detection. +*/ +static int force32; + + +/* Used to pass the media type, etc. + These exist for driver interoperability. + No media types are currently defined. + - The lower 4 bits are reserved for the media type. + - The next three bits may be set to one of the following: + 0x00000000 : Autodetect PCI bus + 0x00000010 : Force 32 bit PCI bus + 0x00000020 : Disable parity detection + 0x00000040 : Force 64 bit PCI bus + Default is autodetect + - The next bit can be used to force half-duplex. This is a bad + idea since no known implementations implement half-duplex, and, + in general, half-duplex for gigabit ethernet is a bad idea. + 0x00000080 : Force half-duplex + Default is full-duplex. + - In the original driver, the ninth bit could be used to force + full-duplex. Maintain that for compatibility + 0x00000200 : Force full-duplex +*/ +#define MAX_UNITS 8 /* More are supported, limit only on options */ +static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; +static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; +/* The Hamachi chipset supports 3 parameters each for Rx and Tx + * interruput management. Parameters will be loaded as specified into + * the TxIntControl and RxIntControl registers. + * + * The registers are arranged as follows: + * 23 - 16 15 - 8 7 - 0 + * _________________________________ + * | min_pkt | max_gap | max_latency | + * --------------------------------- + * min_pkt : The minimum number of packets processed between + * interrupts. + * max_gap : The maximum inter-packet gap in units of 8.192 us + * max_latency : The absolute time between interrupts in units of 8.192 us + * + */ +static int rx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; +static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; + +/* Operational parameters that are set at compile time. */ + +/* Keep the ring sizes a power of two for compile efficiency. + The compiler will convert '%'<2^N> into a bit mask. + Making the Tx ring too large decreases the effectiveness of channel + bonding and packet priority. + There are no ill effects from too-large receive rings, except for + excessive memory usage */ +/* Empirically it appears that the Tx ring needs to be a little bigger + for these Gbit adapters or you get into an overrun condition really + easily. Also, things appear to work a bit better in back-to-back + configurations if the Rx ring is 8 times the size of the Tx ring +*/ +#define TX_RING_SIZE 64 +#define RX_RING_SIZE 512 +#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct hamachi_desc) +#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct hamachi_desc) + +/* + * Enable netdev_ioctl. Added interrupt coalescing parameter adjustment. + * 2/19/99 Pete Wyckoff + */ + +/* play with 64-bit addrlen; seems to be a teensy bit slower --pw */ +/* #define ADDRLEN 64 */ + +/* + * RX_CHECKSUM turns on card-generated receive checksum generation for + * TCP and UDP packets. Otherwise the upper layers do the calculation. + * 3/10/1999 Pete Wyckoff + */ +#define RX_CHECKSUM + +/* Operational parameters that usually are not changed. */ +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (5*HZ) + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include /* Processor type for cache alignment. */ +#include +#include +#include + +static const char version[] = +KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n" +" Some modifications by Eric kasten \n" +" Further modifications by Keith Underwood \n"; + + +/* IP_MF appears to be only defined in , however, + we need it for hardware checksumming support. FYI... some of + the definitions in conflict/duplicate those in + other linux headers causing many compiler warnings. +*/ +#ifndef IP_MF + #define IP_MF 0x2000 /* IP more frags from */ +#endif + +/* Define IP_OFFSET to be IPOPT_OFFSET */ +#ifndef IP_OFFSET + #ifdef IPOPT_OFFSET + #define IP_OFFSET IPOPT_OFFSET + #else + #define IP_OFFSET 2 + #endif +#endif + +#define RUN_AT(x) (jiffies + (x)) + +#ifndef ADDRLEN +#define ADDRLEN 32 +#endif + +/* Condensed bus+endian portability operations. */ +#if ADDRLEN == 64 +#define cpu_to_leXX(addr) cpu_to_le64(addr) +#define leXX_to_cpu(addr) le64_to_cpu(addr) +#else +#define cpu_to_leXX(addr) cpu_to_le32(addr) +#define leXX_to_cpu(addr) le32_to_cpu(addr) +#endif + + +/* + Theory of Operation + +I. Board Compatibility + +This device driver is designed for the Packet Engines "Hamachi" +Gigabit Ethernet chip. The only PCA currently supported is the GNIC-II 64-bit +66Mhz PCI card. + +II. Board-specific settings + +No jumpers exist on the board. The chip supports software correction of +various motherboard wiring errors, however this driver does not support +that feature. + +III. Driver operation + +IIIa. Ring buffers + +The Hamachi uses a typical descriptor based bus-master architecture. +The descriptor list is similar to that used by the Digital Tulip. +This driver uses two statically allocated fixed-size descriptor lists +formed into rings by a branch from the final descriptor to the beginning of +the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. + +This driver uses a zero-copy receive and transmit scheme similar my other +network drivers. +The driver allocates full frame size skbuffs for the Rx ring buffers at +open() time and passes the skb->data field to the Hamachi as receive data +buffers. When an incoming frame is less than RX_COPYBREAK bytes long, +a fresh skbuff is allocated and the frame is copied to the new skbuff. +When the incoming frame is larger, the skbuff is passed directly up the +protocol stack and replaced by a newly allocated skbuff. + +The RX_COPYBREAK value is chosen to trade-off the memory wasted by +using a full-sized skbuff for small frames vs. the copying costs of larger +frames. Gigabit cards are typically used on generously configured machines +and the underfilled buffers have negligible impact compared to the benefit of +a single allocation size, so the default value of zero results in never +copying packets. + +IIIb/c. Transmit/Receive Structure + +The Rx and Tx descriptor structure are straight-forward, with no historical +baggage that must be explained. Unlike the awkward DBDMA structure, there +are no unused fields or option bits that had only one allowable setting. + +Two details should be noted about the descriptors: The chip supports both 32 +bit and 64 bit address structures, and the length field is overwritten on +the receive descriptors. The descriptor length is set in the control word +for each channel. The development driver uses 32 bit addresses only, however +64 bit addresses may be enabled for 64 bit architectures e.g. the Alpha. + +IIId. Synchronization + +This driver is very similar to my other network drivers. +The driver runs as two independent, single-threaded flows of control. One +is the send-packet routine, which enforces single-threaded use by the +dev->tbusy flag. The other thread is the interrupt handler, which is single +threaded by the hardware and other software. + +The send packet thread has partial control over the Tx ring and 'dev->tbusy' +flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next +queue slot is empty, it clears the tbusy flag when finished otherwise it sets +the 'hmp->tx_full' flag. + +The interrupt handler has exclusive control over the Rx ring and records stats +from the Tx ring. After reaping the stats, it marks the Tx queue entry as +empty by incrementing the dirty_tx mark. Iff the 'hmp->tx_full' flag is set, it +clears both the tx_full and tbusy flags. + +IV. Notes + +Thanks to Kim Stearns of Packet Engines for providing a pair of GNIC-II boards. + +IVb. References + +Hamachi Engineering Design Specification, 5/15/97 +(Note: This version was marked "Confidential".) + +IVc. Errata + +None noted. + +V. Recent Changes + +01/15/1999 EPK Enlargement of the TX and RX ring sizes. This appears + to help avoid some stall conditions -- this needs further research. + +01/15/1999 EPK Creation of the hamachi_tx function. This function cleans + the Tx ring and is called from hamachi_start_xmit (this used to be + called from hamachi_interrupt but it tends to delay execution of the + interrupt handler and thus reduce bandwidth by reducing the latency + between hamachi_rx()'s). Notably, some modification has been made so + that the cleaning loop checks only to make sure that the DescOwn bit + isn't set in the status flag since the card is not required + to set the entire flag to zero after processing. + +01/15/1999 EPK In the hamachi_start_tx function, the Tx ring full flag is + checked before attempting to add a buffer to the ring. If the ring is full + an attempt is made to free any dirty buffers and thus find space for + the new buffer or the function returns non-zero which should case the + scheduler to reschedule the buffer later. + +01/15/1999 EPK Some adjustments were made to the chip initialization. + End-to-end flow control should now be fully active and the interrupt + algorithm vars have been changed. These could probably use further tuning. + +01/15/1999 EPK Added the max_{rx,tx}_latency options. These are used to + set the rx and tx latencies for the Hamachi interrupts. If you're having + problems with network stalls, try setting these to higher values. + Valid values are 0x00 through 0xff. + +01/15/1999 EPK In general, the overall bandwidth has increased and + latencies are better (sometimes by a factor of 2). Stalls are rare at + this point, however there still appears to be a bug somewhere between the + hardware and driver. TCP checksum errors under load also appear to be + eliminated at this point. + +01/18/1999 EPK Ensured that the DescEndRing bit was being set on both the + Rx and Tx rings. This appears to have been affecting whether a particular + peer-to-peer connection would hang under high load. I believe the Rx + rings was typically getting set correctly, but the Tx ring wasn't getting + the DescEndRing bit set during initialization. ??? Does this mean the + hamachi card is using the DescEndRing in processing even if a particular + slot isn't in use -- hypothetically, the card might be searching the + entire Tx ring for slots with the DescOwn bit set and then processing + them. If the DescEndRing bit isn't set, then it might just wander off + through memory until it hits a chunk of data with that bit set + and then looping back. + +02/09/1999 EPK Added Michel Mueller's TxDMA Interrupt and Tx-timeout + problem (TxCmd and RxCmd need only to be set when idle or stopped. + +02/09/1999 EPK Added code to check/reset dev->tbusy in hamachi_interrupt. + (Michel Mueller pointed out the ``permanently busy'' potential + problem here). + +02/22/1999 EPK Added Pete Wyckoff's ioctl to control the Tx/Rx latencies. + +02/23/1999 EPK Verified that the interrupt status field bits for Tx were + incorrectly defined and corrected (as per Michel Mueller). + +02/23/1999 EPK Corrected the Tx full check to check that at least 4 slots + were available before resetting the tbusy and tx_full flags + (as per Michel Mueller). + +03/11/1999 EPK Added Pete Wyckoff's hardware checksumming support. + +12/31/1999 KDU Cleaned up assorted things and added Don's code to force +32 bit. + +02/20/2000 KDU Some of the control was just plain odd. Cleaned up the +hamachi_start_xmit() and hamachi_interrupt() code. There is still some +re-structuring I would like to do. + +03/01/2000 KDU Experimenting with a WIDE range of interrupt mitigation +parameters on a dual P3-450 setup yielded the new default interrupt +mitigation parameters. Tx should interrupt VERY infrequently due to +Eric's scheme. Rx should be more often... + +03/13/2000 KDU Added a patch to make the Rx Checksum code interact +nicely with non-linux machines. + +03/13/2000 KDU Experimented with some of the configuration values: + + -It seems that enabling PCI performance commands for descriptors + (changing RxDMACtrl and TxDMACtrl lower nibble from 5 to D) has minimal + performance impact for any of my tests. (ttcp, netpipe, netperf) I will + leave them that way until I hear further feedback. + + -Increasing the PCI_LATENCY_TIMER to 130 + (2 + (burst size of 128 * (0 wait states + 1))) seems to slightly + degrade performance. Leaving default at 64 pending further information. + +03/14/2000 KDU Further tuning: + + -adjusted boguscnt in hamachi_rx() to depend on interrupt + mitigation parameters chosen. + + -Selected a set of interrupt parameters based on some extensive testing. + These may change with more testing. + +TO DO: + +-Consider borrowing from the acenic driver code to check PCI_COMMAND for +PCI_COMMAND_INVALIDATE. Set maximum burst size to cache line size in +that case. + +-fix the reset procedure. It doesn't quite work. +*/ + +/* A few values that may be tweaked. */ +/* Size of each temporary Rx buffer, calculated as: + * 1518 bytes (ethernet packet) + 2 bytes (to get 8 byte alignment for + * the card) + 8 bytes of status info + 8 bytes for the Rx Checksum + */ +#define PKT_BUF_SZ 1536 + +/* For now, this is going to be set to the maximum size of an ethernet + * packet. Eventually, we may want to make it a variable that is + * related to the MTU + */ +#define MAX_FRAME_SIZE 1518 + +/* The rest of these values should never change. */ + +static void hamachi_timer(unsigned long data); + +enum capability_flags {CanHaveMII=1, }; +static const struct chip_info { + u16 vendor_id, device_id, device_id_mask, pad; + const char *name; + void (*media_timer)(unsigned long data); + int flags; +} chip_tbl[] = { + {0x1318, 0x0911, 0xffff, 0, "Hamachi GNIC-II", hamachi_timer, 0}, + {0,}, +}; + +/* Offsets to the Hamachi registers. Various sizes. */ +enum hamachi_offsets { + TxDMACtrl=0x00, TxCmd=0x04, TxStatus=0x06, TxPtr=0x08, TxCurPtr=0x10, + RxDMACtrl=0x20, RxCmd=0x24, RxStatus=0x26, RxPtr=0x28, RxCurPtr=0x30, + PCIClkMeas=0x060, MiscStatus=0x066, ChipRev=0x68, ChipReset=0x06B, + LEDCtrl=0x06C, VirtualJumpers=0x06D, GPIO=0x6E, + TxChecksum=0x074, RxChecksum=0x076, + TxIntrCtrl=0x078, RxIntrCtrl=0x07C, + InterruptEnable=0x080, InterruptClear=0x084, IntrStatus=0x088, + EventStatus=0x08C, + MACCnfg=0x0A0, FrameGap0=0x0A2, FrameGap1=0x0A4, + /* See enum MII_offsets below. */ + MACCnfg2=0x0B0, RxDepth=0x0B8, FlowCtrl=0x0BC, MaxFrameSize=0x0CE, + AddrMode=0x0D0, StationAddr=0x0D2, + /* Gigabit AutoNegotiation. */ + ANCtrl=0x0E0, ANStatus=0x0E2, ANXchngCtrl=0x0E4, ANAdvertise=0x0E8, + ANLinkPartnerAbility=0x0EA, + EECmdStatus=0x0F0, EEData=0x0F1, EEAddr=0x0F2, + FIFOcfg=0x0F8, +}; + +/* Offsets to the MII-mode registers. */ +enum MII_offsets { + MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC, + MII_Status=0xAE, +}; + +/* Bits in the interrupt status/mask registers. */ +enum intr_status_bits { + IntrRxDone=0x01, IntrRxPCIFault=0x02, IntrRxPCIErr=0x04, + IntrTxDone=0x100, IntrTxPCIFault=0x200, IntrTxPCIErr=0x400, + LinkChange=0x10000, NegotiationChange=0x20000, StatsMax=0x40000, }; + +/* The Hamachi Rx and Tx buffer descriptors. */ +struct hamachi_desc { + __le32 status_n_length; +#if ADDRLEN == 64 + u32 pad; + __le64 addr; +#else + __le32 addr; +#endif +}; + +/* Bits in hamachi_desc.status_n_length */ +enum desc_status_bits { + DescOwn=0x80000000, DescEndPacket=0x40000000, DescEndRing=0x20000000, + DescIntr=0x10000000, +}; + +#define PRIV_ALIGN 15 /* Required alignment mask */ +#define MII_CNT 4 +struct hamachi_private { + /* Descriptor rings first for alignment. Tx requires a second descriptor + for status. */ + struct hamachi_desc *rx_ring; + struct hamachi_desc *tx_ring; + struct sk_buff* rx_skbuff[RX_RING_SIZE]; + struct sk_buff* tx_skbuff[TX_RING_SIZE]; + dma_addr_t tx_ring_dma; + dma_addr_t rx_ring_dma; + struct timer_list timer; /* Media selection timer. */ + /* Frequently used and paired value: keep adjacent for cache effect. */ + spinlock_t lock; + int chip_id; + unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ + unsigned int cur_tx, dirty_tx; + unsigned int rx_buf_sz; /* Based on MTU+slack. */ + unsigned int tx_full:1; /* The Tx queue is full. */ + unsigned int duplex_lock:1; + unsigned int default_port:4; /* Last dev->if_port value. */ + /* MII transceiver section. */ + int mii_cnt; /* MII device addresses. */ + struct mii_if_info mii_if; /* MII lib hooks/info */ + unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */ + u32 rx_int_var, tx_int_var; /* interrupt control variables */ + u32 option; /* Hold on to a copy of the options */ + struct pci_dev *pci_dev; + void __iomem *base; +}; + +MODULE_AUTHOR("Donald Becker , Eric Kasten , Keith Underwood "); +MODULE_DESCRIPTION("Packet Engines 'Hamachi' GNIC-II Gigabit Ethernet driver"); +MODULE_LICENSE("GPL"); + +module_param(max_interrupt_work, int, 0); +module_param(mtu, int, 0); +module_param(debug, int, 0); +module_param(min_rx_pkt, int, 0); +module_param(max_rx_gap, int, 0); +module_param(max_rx_latency, int, 0); +module_param(min_tx_pkt, int, 0); +module_param(max_tx_gap, int, 0); +module_param(max_tx_latency, int, 0); +module_param(rx_copybreak, int, 0); +module_param_array(rx_params, int, NULL, 0); +module_param_array(tx_params, int, NULL, 0); +module_param_array(options, int, NULL, 0); +module_param_array(full_duplex, int, NULL, 0); +module_param(force32, int, 0); +MODULE_PARM_DESC(max_interrupt_work, "GNIC-II maximum events handled per interrupt"); +MODULE_PARM_DESC(mtu, "GNIC-II MTU (all boards)"); +MODULE_PARM_DESC(debug, "GNIC-II debug level (0-7)"); +MODULE_PARM_DESC(min_rx_pkt, "GNIC-II minimum Rx packets processed between interrupts"); +MODULE_PARM_DESC(max_rx_gap, "GNIC-II maximum Rx inter-packet gap in 8.192 microsecond units"); +MODULE_PARM_DESC(max_rx_latency, "GNIC-II time between Rx interrupts in 8.192 microsecond units"); +MODULE_PARM_DESC(min_tx_pkt, "GNIC-II minimum Tx packets processed between interrupts"); +MODULE_PARM_DESC(max_tx_gap, "GNIC-II maximum Tx inter-packet gap in 8.192 microsecond units"); +MODULE_PARM_DESC(max_tx_latency, "GNIC-II time between Tx interrupts in 8.192 microsecond units"); +MODULE_PARM_DESC(rx_copybreak, "GNIC-II copy breakpoint for copy-only-tiny-frames"); +MODULE_PARM_DESC(rx_params, "GNIC-II min_rx_pkt+max_rx_gap+max_rx_latency"); +MODULE_PARM_DESC(tx_params, "GNIC-II min_tx_pkt+max_tx_gap+max_tx_latency"); +MODULE_PARM_DESC(options, "GNIC-II Bits 0-3: media type, bits 4-6: as force32, bit 7: half duplex, bit 9 full duplex"); +MODULE_PARM_DESC(full_duplex, "GNIC-II full duplex setting(s) (1)"); +MODULE_PARM_DESC(force32, "GNIC-II: Bit 0: 32 bit PCI, bit 1: disable parity, bit 2: 64 bit PCI (all boards)"); + +static int read_eeprom(void __iomem *ioaddr, int location); +static int mdio_read(struct net_device *dev, int phy_id, int location); +static void mdio_write(struct net_device *dev, int phy_id, int location, int value); +static int hamachi_open(struct net_device *dev); +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static void hamachi_timer(unsigned long data); +static void hamachi_tx_timeout(struct net_device *dev); +static void hamachi_init_ring(struct net_device *dev); +static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static irqreturn_t hamachi_interrupt(int irq, void *dev_instance); +static int hamachi_rx(struct net_device *dev); +static inline int hamachi_tx(struct net_device *dev); +static void hamachi_error(struct net_device *dev, int intr_status); +static int hamachi_close(struct net_device *dev); +static struct net_device_stats *hamachi_get_stats(struct net_device *dev); +static void set_rx_mode(struct net_device *dev); +static const struct ethtool_ops ethtool_ops; +static const struct ethtool_ops ethtool_ops_no_mii; + +static const struct net_device_ops hamachi_netdev_ops = { + .ndo_open = hamachi_open, + .ndo_stop = hamachi_close, + .ndo_start_xmit = hamachi_start_xmit, + .ndo_get_stats = hamachi_get_stats, + .ndo_set_rx_mode = set_rx_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_tx_timeout = hamachi_tx_timeout, + .ndo_do_ioctl = netdev_ioctl, +}; + + +static int hamachi_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct hamachi_private *hmp; + int option, i, rx_int_var, tx_int_var, boguscnt; + int chip_id = ent->driver_data; + int irq; + void __iomem *ioaddr; + unsigned long base; + static int card_idx; + struct net_device *dev; + void *ring_space; + dma_addr_t ring_dma; + int ret = -ENOMEM; + +/* when built into the kernel, we only print version if device is found */ +#ifndef MODULE + static int printed_version; + if (!printed_version++) + printk(version); +#endif + + if (pci_enable_device(pdev)) { + ret = -EIO; + goto err_out; + } + + base = pci_resource_start(pdev, 0); +#ifdef __alpha__ /* Really "64 bit addrs" */ + base |= (pci_resource_start(pdev, 1) << 32); +#endif + + pci_set_master(pdev); + + i = pci_request_regions(pdev, DRV_NAME); + if (i) + return i; + + irq = pdev->irq; + ioaddr = ioremap(base, 0x400); + if (!ioaddr) + goto err_out_release; + + dev = alloc_etherdev(sizeof(struct hamachi_private)); + if (!dev) + goto err_out_iounmap; + + SET_NETDEV_DEV(dev, &pdev->dev); + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = 1 ? read_eeprom(ioaddr, 4 + i) + : readb(ioaddr + StationAddr + i); + +#if ! defined(final_version) + if (hamachi_debug > 4) + for (i = 0; i < 0x10; i++) + printk("%2.2x%s", + read_eeprom(ioaddr, i), i % 16 != 15 ? " " : "\n"); +#endif + + hmp = netdev_priv(dev); + spin_lock_init(&hmp->lock); + + hmp->mii_if.dev = dev; + hmp->mii_if.mdio_read = mdio_read; + hmp->mii_if.mdio_write = mdio_write; + hmp->mii_if.phy_id_mask = 0x1f; + hmp->mii_if.reg_num_mask = 0x1f; + + ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); + if (!ring_space) + goto err_out_cleardev; + hmp->tx_ring = ring_space; + hmp->tx_ring_dma = ring_dma; + + ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); + if (!ring_space) + goto err_out_unmap_tx; + hmp->rx_ring = ring_space; + hmp->rx_ring_dma = ring_dma; + + /* Check for options being passed in */ + option = card_idx < MAX_UNITS ? options[card_idx] : 0; + if (dev->mem_start) + option = dev->mem_start; + + /* If the bus size is misidentified, do the following. */ + force32 = force32 ? force32 : + ((option >= 0) ? ((option & 0x00000070) >> 4) : 0 ); + if (force32) + writeb(force32, ioaddr + VirtualJumpers); + + /* Hmmm, do we really need to reset the chip???. */ + writeb(0x01, ioaddr + ChipReset); + + /* After a reset, the clock speed measurement of the PCI bus will not + * be valid for a moment. Wait for a little while until it is. If + * it takes more than 10ms, forget it. + */ + udelay(10); + i = readb(ioaddr + PCIClkMeas); + for (boguscnt = 0; (!(i & 0x080)) && boguscnt < 1000; boguscnt++){ + udelay(10); + i = readb(ioaddr + PCIClkMeas); + } + + hmp->base = ioaddr; + pci_set_drvdata(pdev, dev); + + hmp->chip_id = chip_id; + hmp->pci_dev = pdev; + + /* The lower four bits are the media type. */ + if (option > 0) { + hmp->option = option; + if (option & 0x200) + hmp->mii_if.full_duplex = 1; + else if (option & 0x080) + hmp->mii_if.full_duplex = 0; + hmp->default_port = option & 15; + if (hmp->default_port) + hmp->mii_if.force_media = 1; + } + if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0) + hmp->mii_if.full_duplex = 1; + + /* lock the duplex mode if someone specified a value */ + if (hmp->mii_if.full_duplex || (option & 0x080)) + hmp->duplex_lock = 1; + + /* Set interrupt tuning parameters */ + max_rx_latency = max_rx_latency & 0x00ff; + max_rx_gap = max_rx_gap & 0x00ff; + min_rx_pkt = min_rx_pkt & 0x00ff; + max_tx_latency = max_tx_latency & 0x00ff; + max_tx_gap = max_tx_gap & 0x00ff; + min_tx_pkt = min_tx_pkt & 0x00ff; + + rx_int_var = card_idx < MAX_UNITS ? rx_params[card_idx] : -1; + tx_int_var = card_idx < MAX_UNITS ? tx_params[card_idx] : -1; + hmp->rx_int_var = rx_int_var >= 0 ? rx_int_var : + (min_rx_pkt << 16 | max_rx_gap << 8 | max_rx_latency); + hmp->tx_int_var = tx_int_var >= 0 ? tx_int_var : + (min_tx_pkt << 16 | max_tx_gap << 8 | max_tx_latency); + + + /* The Hamachi-specific entries in the device structure. */ + dev->netdev_ops = &hamachi_netdev_ops; + dev->ethtool_ops = (chip_tbl[hmp->chip_id].flags & CanHaveMII) ? + ðtool_ops : ðtool_ops_no_mii; + dev->watchdog_timeo = TX_TIMEOUT; + if (mtu) + dev->mtu = mtu; + + i = register_netdev(dev); + if (i) { + ret = i; + goto err_out_unmap_rx; + } + + printk(KERN_INFO "%s: %s type %x at %p, %pM, IRQ %d.\n", + dev->name, chip_tbl[chip_id].name, readl(ioaddr + ChipRev), + ioaddr, dev->dev_addr, irq); + i = readb(ioaddr + PCIClkMeas); + printk(KERN_INFO "%s: %d-bit %d Mhz PCI bus (%d), Virtual Jumpers " + "%2.2x, LPA %4.4x.\n", + dev->name, readw(ioaddr + MiscStatus) & 1 ? 64 : 32, + i ? 2000/(i&0x7f) : 0, i&0x7f, (int)readb(ioaddr + VirtualJumpers), + readw(ioaddr + ANLinkPartnerAbility)); + + if (chip_tbl[hmp->chip_id].flags & CanHaveMII) { + int phy, phy_idx = 0; + for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) { + int mii_status = mdio_read(dev, phy, MII_BMSR); + if (mii_status != 0xffff && + mii_status != 0x0000) { + hmp->phys[phy_idx++] = phy; + hmp->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE); + printk(KERN_INFO "%s: MII PHY found at address %d, status " + "0x%4.4x advertising %4.4x.\n", + dev->name, phy, mii_status, hmp->mii_if.advertising); + } + } + hmp->mii_cnt = phy_idx; + if (hmp->mii_cnt > 0) + hmp->mii_if.phy_id = hmp->phys[0]; + else + memset(&hmp->mii_if, 0, sizeof(hmp->mii_if)); + } + /* Configure gigabit autonegotiation. */ + writew(0x0400, ioaddr + ANXchngCtrl); /* Enable legacy links. */ + writew(0x08e0, ioaddr + ANAdvertise); /* Set our advertise word. */ + writew(0x1000, ioaddr + ANCtrl); /* Enable negotiation */ + + card_idx++; + return 0; + +err_out_unmap_rx: + pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring, + hmp->rx_ring_dma); +err_out_unmap_tx: + pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring, + hmp->tx_ring_dma); +err_out_cleardev: + free_netdev (dev); +err_out_iounmap: + iounmap(ioaddr); +err_out_release: + pci_release_regions(pdev); +err_out: + return ret; +} + +static int read_eeprom(void __iomem *ioaddr, int location) +{ + int bogus_cnt = 1000; + + /* We should check busy first - per docs -KDU */ + while ((readb(ioaddr + EECmdStatus) & 0x40) && --bogus_cnt > 0); + writew(location, ioaddr + EEAddr); + writeb(0x02, ioaddr + EECmdStatus); + bogus_cnt = 1000; + while ((readb(ioaddr + EECmdStatus) & 0x40) && --bogus_cnt > 0); + if (hamachi_debug > 5) + printk(" EEPROM status is %2.2x after %d ticks.\n", + (int)readb(ioaddr + EECmdStatus), 1000- bogus_cnt); + return readb(ioaddr + EEData); +} + +/* MII Managemen Data I/O accesses. + These routines assume the MDIO controller is idle, and do not exit until + the command is finished. */ + +static int mdio_read(struct net_device *dev, int phy_id, int location) +{ + struct hamachi_private *hmp = netdev_priv(dev); + void __iomem *ioaddr = hmp->base; + int i; + + /* We should check busy first - per docs -KDU */ + for (i = 10000; i >= 0; i--) + if ((readw(ioaddr + MII_Status) & 1) == 0) + break; + writew((phy_id<<8) + location, ioaddr + MII_Addr); + writew(0x0001, ioaddr + MII_Cmd); + for (i = 10000; i >= 0; i--) + if ((readw(ioaddr + MII_Status) & 1) == 0) + break; + return readw(ioaddr + MII_Rd_Data); +} + +static void mdio_write(struct net_device *dev, int phy_id, int location, int value) +{ + struct hamachi_private *hmp = netdev_priv(dev); + void __iomem *ioaddr = hmp->base; + int i; + + /* We should check busy first - per docs -KDU */ + for (i = 10000; i >= 0; i--) + if ((readw(ioaddr + MII_Status) & 1) == 0) + break; + writew((phy_id<<8) + location, ioaddr + MII_Addr); + writew(value, ioaddr + MII_Wr_Data); + + /* Wait for the command to finish. */ + for (i = 10000; i >= 0; i--) + if ((readw(ioaddr + MII_Status) & 1) == 0) + break; +} + + +static int hamachi_open(struct net_device *dev) +{ + struct hamachi_private *hmp = netdev_priv(dev); + void __iomem *ioaddr = hmp->base; + int i; + u32 rx_int_var, tx_int_var; + u16 fifo_info; + + i = request_irq(hmp->pci_dev->irq, hamachi_interrupt, IRQF_SHARED, + dev->name, dev); + if (i) + return i; + + hamachi_init_ring(dev); + +#if ADDRLEN == 64 + /* writellll anyone ? */ + writel(hmp->rx_ring_dma, ioaddr + RxPtr); + writel(hmp->rx_ring_dma >> 32, ioaddr + RxPtr + 4); + writel(hmp->tx_ring_dma, ioaddr + TxPtr); + writel(hmp->tx_ring_dma >> 32, ioaddr + TxPtr + 4); +#else + writel(hmp->rx_ring_dma, ioaddr + RxPtr); + writel(hmp->tx_ring_dma, ioaddr + TxPtr); +#endif + + /* TODO: It would make sense to organize this as words since the card + * documentation does. -KDU + */ + for (i = 0; i < 6; i++) + writeb(dev->dev_addr[i], ioaddr + StationAddr + i); + + /* Initialize other registers: with so many this eventually this will + converted to an offset/value list. */ + + /* Configure the FIFO */ + fifo_info = (readw(ioaddr + GPIO) & 0x00C0) >> 6; + switch (fifo_info){ + case 0 : + /* No FIFO */ + writew(0x0000, ioaddr + FIFOcfg); + break; + case 1 : + /* Configure the FIFO for 512K external, 16K used for Tx. */ + writew(0x0028, ioaddr + FIFOcfg); + break; + case 2 : + /* Configure the FIFO for 1024 external, 32K used for Tx. */ + writew(0x004C, ioaddr + FIFOcfg); + break; + case 3 : + /* Configure the FIFO for 2048 external, 32K used for Tx. */ + writew(0x006C, ioaddr + FIFOcfg); + break; + default : + printk(KERN_WARNING "%s: Unsupported external memory config!\n", + dev->name); + /* Default to no FIFO */ + writew(0x0000, ioaddr + FIFOcfg); + break; + } + + if (dev->if_port == 0) + dev->if_port = hmp->default_port; + + + /* Setting the Rx mode will start the Rx process. */ + /* If someone didn't choose a duplex, default to full-duplex */ + if (hmp->duplex_lock != 1) + hmp->mii_if.full_duplex = 1; + + /* always 1, takes no more time to do it */ + writew(0x0001, ioaddr + RxChecksum); + writew(0x0000, ioaddr + TxChecksum); + writew(0x8000, ioaddr + MACCnfg); /* Soft reset the MAC */ + writew(0x215F, ioaddr + MACCnfg); + writew(0x000C, ioaddr + FrameGap0); + /* WHAT?!?!? Why isn't this documented somewhere? -KDU */ + writew(0x1018, ioaddr + FrameGap1); + /* Why do we enable receives/transmits here? -KDU */ + writew(0x0780, ioaddr + MACCnfg2); /* Upper 16 bits control LEDs. */ + /* Enable automatic generation of flow control frames, period 0xffff. */ + writel(0x0030FFFF, ioaddr + FlowCtrl); + writew(MAX_FRAME_SIZE, ioaddr + MaxFrameSize); /* dev->mtu+14 ??? */ + + /* Enable legacy links. */ + writew(0x0400, ioaddr + ANXchngCtrl); /* Enable legacy links. */ + /* Initial Link LED to blinking red. */ + writeb(0x03, ioaddr + LEDCtrl); + + /* Configure interrupt mitigation. This has a great effect on + performance, so systems tuning should start here!. */ + + rx_int_var = hmp->rx_int_var; + tx_int_var = hmp->tx_int_var; + + if (hamachi_debug > 1) { + printk("max_tx_latency: %d, max_tx_gap: %d, min_tx_pkt: %d\n", + tx_int_var & 0x00ff, (tx_int_var & 0x00ff00) >> 8, + (tx_int_var & 0x00ff0000) >> 16); + printk("max_rx_latency: %d, max_rx_gap: %d, min_rx_pkt: %d\n", + rx_int_var & 0x00ff, (rx_int_var & 0x00ff00) >> 8, + (rx_int_var & 0x00ff0000) >> 16); + printk("rx_int_var: %x, tx_int_var: %x\n", rx_int_var, tx_int_var); + } + + writel(tx_int_var, ioaddr + TxIntrCtrl); + writel(rx_int_var, ioaddr + RxIntrCtrl); + + set_rx_mode(dev); + + netif_start_queue(dev); + + /* Enable interrupts by setting the interrupt mask. */ + writel(0x80878787, ioaddr + InterruptEnable); + writew(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */ + + /* Configure and start the DMA channels. */ + /* Burst sizes are in the low three bits: size = 4<<(val&7) */ +#if ADDRLEN == 64 + writew(0x005D, ioaddr + RxDMACtrl); /* 128 dword bursts */ + writew(0x005D, ioaddr + TxDMACtrl); +#else + writew(0x001D, ioaddr + RxDMACtrl); + writew(0x001D, ioaddr + TxDMACtrl); +#endif + writew(0x0001, ioaddr + RxCmd); + + if (hamachi_debug > 2) { + printk(KERN_DEBUG "%s: Done hamachi_open(), status: Rx %x Tx %x.\n", + dev->name, readw(ioaddr + RxStatus), readw(ioaddr + TxStatus)); + } + /* Set the timer to check for link beat. */ + init_timer(&hmp->timer); + hmp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */ + hmp->timer.data = (unsigned long)dev; + hmp->timer.function = hamachi_timer; /* timer handler */ + add_timer(&hmp->timer); + + return 0; +} + +static inline int hamachi_tx(struct net_device *dev) +{ + struct hamachi_private *hmp = netdev_priv(dev); + + /* Update the dirty pointer until we find an entry that is + still owned by the card */ + for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++) { + int entry = hmp->dirty_tx % TX_RING_SIZE; + struct sk_buff *skb; + + if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn)) + break; + /* Free the original skb. */ + skb = hmp->tx_skbuff[entry]; + if (skb) { + pci_unmap_single(hmp->pci_dev, + leXX_to_cpu(hmp->tx_ring[entry].addr), + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb(skb); + hmp->tx_skbuff[entry] = NULL; + } + hmp->tx_ring[entry].status_n_length = 0; + if (entry >= TX_RING_SIZE-1) + hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= + cpu_to_le32(DescEndRing); + dev->stats.tx_packets++; + } + + return 0; +} + +static void hamachi_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct hamachi_private *hmp = netdev_priv(dev); + void __iomem *ioaddr = hmp->base; + int next_tick = 10*HZ; + + if (hamachi_debug > 2) { + printk(KERN_INFO "%s: Hamachi Autonegotiation status %4.4x, LPA " + "%4.4x.\n", dev->name, readw(ioaddr + ANStatus), + readw(ioaddr + ANLinkPartnerAbility)); + printk(KERN_INFO "%s: Autonegotiation regs %4.4x %4.4x %4.4x " + "%4.4x %4.4x %4.4x.\n", dev->name, + readw(ioaddr + 0x0e0), + readw(ioaddr + 0x0e2), + readw(ioaddr + 0x0e4), + readw(ioaddr + 0x0e6), + readw(ioaddr + 0x0e8), + readw(ioaddr + 0x0eA)); + } + /* We could do something here... nah. */ + hmp->timer.expires = RUN_AT(next_tick); + add_timer(&hmp->timer); +} + +static void hamachi_tx_timeout(struct net_device *dev) +{ + int i; + struct hamachi_private *hmp = netdev_priv(dev); + void __iomem *ioaddr = hmp->base; + + printk(KERN_WARNING "%s: Hamachi transmit timed out, status %8.8x," + " resetting...\n", dev->name, (int)readw(ioaddr + TxStatus)); + + { + printk(KERN_DEBUG " Rx ring %p: ", hmp->rx_ring); + for (i = 0; i < RX_RING_SIZE; i++) + printk(KERN_CONT " %8.8x", + le32_to_cpu(hmp->rx_ring[i].status_n_length)); + printk(KERN_CONT "\n"); + printk(KERN_DEBUG" Tx ring %p: ", hmp->tx_ring); + for (i = 0; i < TX_RING_SIZE; i++) + printk(KERN_CONT " %4.4x", + le32_to_cpu(hmp->tx_ring[i].status_n_length)); + printk(KERN_CONT "\n"); + } + + /* Reinit the hardware and make sure the Rx and Tx processes + are up and running. + */ + dev->if_port = 0; + /* The right way to do Reset. -KDU + * -Clear OWN bit in all Rx/Tx descriptors + * -Wait 50 uS for channels to go idle + * -Turn off MAC receiver + * -Issue Reset + */ + + for (i = 0; i < RX_RING_SIZE; i++) + hmp->rx_ring[i].status_n_length &= cpu_to_le32(~DescOwn); + + /* Presume that all packets in the Tx queue are gone if we have to + * re-init the hardware. + */ + for (i = 0; i < TX_RING_SIZE; i++){ + struct sk_buff *skb; + + if (i >= TX_RING_SIZE - 1) + hmp->tx_ring[i].status_n_length = + cpu_to_le32(DescEndRing) | + (hmp->tx_ring[i].status_n_length & + cpu_to_le32(0x0000ffff)); + else + hmp->tx_ring[i].status_n_length &= cpu_to_le32(0x0000ffff); + skb = hmp->tx_skbuff[i]; + if (skb){ + pci_unmap_single(hmp->pci_dev, leXX_to_cpu(hmp->tx_ring[i].addr), + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb(skb); + hmp->tx_skbuff[i] = NULL; + } + } + + udelay(60); /* Sleep 60 us just for safety sake */ + writew(0x0002, ioaddr + RxCmd); /* STOP Rx */ + + writeb(0x01, ioaddr + ChipReset); /* Reinit the hardware */ + + hmp->tx_full = 0; + hmp->cur_rx = hmp->cur_tx = 0; + hmp->dirty_rx = hmp->dirty_tx = 0; + /* Rx packets are also presumed lost; however, we need to make sure a + * ring of buffers is in tact. -KDU + */ + for (i = 0; i < RX_RING_SIZE; i++){ + struct sk_buff *skb = hmp->rx_skbuff[i]; + + if (skb){ + pci_unmap_single(hmp->pci_dev, + leXX_to_cpu(hmp->rx_ring[i].addr), + hmp->rx_buf_sz, PCI_DMA_FROMDEVICE); + dev_kfree_skb(skb); + hmp->rx_skbuff[i] = NULL; + } + } + /* Fill in the Rx buffers. Handle allocation failure gracefully. */ + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(dev, hmp->rx_buf_sz); + hmp->rx_skbuff[i] = skb; + if (skb == NULL) + break; + + hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, + skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); + hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn | + DescEndPacket | DescIntr | (hmp->rx_buf_sz - 2)); + } + hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); + /* Mark the last entry as wrapping the ring. */ + hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing); + + /* Trigger an immediate transmit demand. */ + dev->trans_start = jiffies; /* prevent tx timeout */ + dev->stats.tx_errors++; + + /* Restart the chip's Tx/Rx processes . */ + writew(0x0002, ioaddr + TxCmd); /* STOP Tx */ + writew(0x0001, ioaddr + TxCmd); /* START Tx */ + writew(0x0001, ioaddr + RxCmd); /* START Rx */ + + netif_wake_queue(dev); +} + + +/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ +static void hamachi_init_ring(struct net_device *dev) +{ + struct hamachi_private *hmp = netdev_priv(dev); + int i; + + hmp->tx_full = 0; + hmp->cur_rx = hmp->cur_tx = 0; + hmp->dirty_rx = hmp->dirty_tx = 0; + + /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the + * card needs room to do 8 byte alignment, +2 so we can reserve + * the first 2 bytes, and +16 gets room for the status word from the + * card. -KDU + */ + hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ : + (((dev->mtu+26+7) & ~7) + 16)); + + /* Initialize all Rx descriptors. */ + for (i = 0; i < RX_RING_SIZE; i++) { + hmp->rx_ring[i].status_n_length = 0; + hmp->rx_skbuff[i] = NULL; + } + /* Fill in the Rx buffers. Handle allocation failure gracefully. */ + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2); + hmp->rx_skbuff[i] = skb; + if (skb == NULL) + break; + skb_reserve(skb, 2); /* 16 byte align the IP header. */ + hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, + skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); + /* -2 because it doesn't REALLY have that first 2 bytes -KDU */ + hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn | + DescEndPacket | DescIntr | (hmp->rx_buf_sz -2)); + } + hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); + hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing); + + for (i = 0; i < TX_RING_SIZE; i++) { + hmp->tx_skbuff[i] = NULL; + hmp->tx_ring[i].status_n_length = 0; + } + /* Mark the last entry of the ring */ + hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing); +} + + +static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct hamachi_private *hmp = netdev_priv(dev); + unsigned entry; + u16 status; + + /* Ok, now make sure that the queue has space before trying to + add another skbuff. if we return non-zero the scheduler + should interpret this as a queue full and requeue the buffer + for later. + */ + if (hmp->tx_full) { + /* We should NEVER reach this point -KDU */ + printk(KERN_WARNING "%s: Hamachi transmit queue full at slot %d.\n",dev->name, hmp->cur_tx); + + /* Wake the potentially-idle transmit channel. */ + /* If we don't need to read status, DON'T -KDU */ + status=readw(hmp->base + TxStatus); + if( !(status & 0x0001) || (status & 0x0002)) + writew(0x0001, hmp->base + TxCmd); + return NETDEV_TX_BUSY; + } + + /* Caution: the write order is important here, set the field + with the "ownership" bits last. */ + + /* Calculate the next Tx descriptor entry. */ + entry = hmp->cur_tx % TX_RING_SIZE; + + hmp->tx_skbuff[entry] = skb; + + hmp->tx_ring[entry].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, + skb->data, skb->len, PCI_DMA_TODEVICE)); + + /* Hmmmm, could probably put a DescIntr on these, but the way + the driver is currently coded makes Tx interrupts unnecessary + since the clearing of the Tx ring is handled by the start_xmit + routine. This organization helps mitigate the interrupts a + bit and probably renders the max_tx_latency param useless. + + Update: Putting a DescIntr bit on all of the descriptors and + mitigating interrupt frequency with the tx_min_pkt parameter. -KDU + */ + if (entry >= TX_RING_SIZE-1) /* Wrap ring */ + hmp->tx_ring[entry].status_n_length = cpu_to_le32(DescOwn | + DescEndPacket | DescEndRing | DescIntr | skb->len); + else + hmp->tx_ring[entry].status_n_length = cpu_to_le32(DescOwn | + DescEndPacket | DescIntr | skb->len); + hmp->cur_tx++; + + /* Non-x86 Todo: explicitly flush cache lines here. */ + + /* Wake the potentially-idle transmit channel. */ + /* If we don't need to read status, DON'T -KDU */ + status=readw(hmp->base + TxStatus); + if( !(status & 0x0001) || (status & 0x0002)) + writew(0x0001, hmp->base + TxCmd); + + /* Immediately before returning, let's clear as many entries as we can. */ + hamachi_tx(dev); + + /* We should kick the bottom half here, since we are not accepting + * interrupts with every packet. i.e. realize that Gigabit ethernet + * can transmit faster than ordinary machines can load packets; + * hence, any packet that got put off because we were in the transmit + * routine should IMMEDIATELY get a chance to be re-queued. -KDU + */ + if ((hmp->cur_tx - hmp->dirty_tx) < (TX_RING_SIZE - 4)) + netif_wake_queue(dev); /* Typical path */ + else { + hmp->tx_full = 1; + netif_stop_queue(dev); + } + + if (hamachi_debug > 4) { + printk(KERN_DEBUG "%s: Hamachi transmit frame #%d queued in slot %d.\n", + dev->name, hmp->cur_tx, entry); + } + return NETDEV_TX_OK; +} + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ +static irqreturn_t hamachi_interrupt(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct hamachi_private *hmp = netdev_priv(dev); + void __iomem *ioaddr = hmp->base; + long boguscnt = max_interrupt_work; + int handled = 0; + +#ifndef final_version /* Can never occur. */ + if (dev == NULL) { + printk (KERN_ERR "hamachi_interrupt(): irq %d for unknown device.\n", irq); + return IRQ_NONE; + } +#endif + + spin_lock(&hmp->lock); + + do { + u32 intr_status = readl(ioaddr + InterruptClear); + + if (hamachi_debug > 4) + printk(KERN_DEBUG "%s: Hamachi interrupt, status %4.4x.\n", + dev->name, intr_status); + + if (intr_status == 0) + break; + + handled = 1; + + if (intr_status & IntrRxDone) + hamachi_rx(dev); + + if (intr_status & IntrTxDone){ + /* This code should RARELY need to execute. After all, this is + * a gigabit link, it should consume packets as fast as we put + * them in AND we clear the Tx ring in hamachi_start_xmit(). + */ + if (hmp->tx_full){ + for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++){ + int entry = hmp->dirty_tx % TX_RING_SIZE; + struct sk_buff *skb; + + if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn)) + break; + skb = hmp->tx_skbuff[entry]; + /* Free the original skb. */ + if (skb){ + pci_unmap_single(hmp->pci_dev, + leXX_to_cpu(hmp->tx_ring[entry].addr), + skb->len, + PCI_DMA_TODEVICE); + dev_kfree_skb_irq(skb); + hmp->tx_skbuff[entry] = NULL; + } + hmp->tx_ring[entry].status_n_length = 0; + if (entry >= TX_RING_SIZE-1) + hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= + cpu_to_le32(DescEndRing); + dev->stats.tx_packets++; + } + if (hmp->cur_tx - hmp->dirty_tx < TX_RING_SIZE - 4){ + /* The ring is no longer full */ + hmp->tx_full = 0; + netif_wake_queue(dev); + } + } else { + netif_wake_queue(dev); + } + } + + + /* Abnormal error summary/uncommon events handlers. */ + if (intr_status & + (IntrTxPCIFault | IntrTxPCIErr | IntrRxPCIFault | IntrRxPCIErr | + LinkChange | NegotiationChange | StatsMax)) + hamachi_error(dev, intr_status); + + if (--boguscnt < 0) { + printk(KERN_WARNING "%s: Too much work at interrupt, status=0x%4.4x.\n", + dev->name, intr_status); + break; + } + } while (1); + + if (hamachi_debug > 3) + printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", + dev->name, readl(ioaddr + IntrStatus)); + +#ifndef final_version + /* Code that should never be run! Perhaps remove after testing.. */ + { + static int stopit = 10; + if (dev->start == 0 && --stopit < 0) { + printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n", + dev->name); + free_irq(irq, dev); + } + } +#endif + + spin_unlock(&hmp->lock); + return IRQ_RETVAL(handled); +} + +/* This routine is logically part of the interrupt handler, but separated + for clarity and better register allocation. */ +static int hamachi_rx(struct net_device *dev) +{ + struct hamachi_private *hmp = netdev_priv(dev); + int entry = hmp->cur_rx % RX_RING_SIZE; + int boguscnt = (hmp->dirty_rx + RX_RING_SIZE) - hmp->cur_rx; + + if (hamachi_debug > 4) { + printk(KERN_DEBUG " In hamachi_rx(), entry %d status %4.4x.\n", + entry, hmp->rx_ring[entry].status_n_length); + } + + /* If EOP is set on the next entry, it's a new packet. Send it up. */ + while (1) { + struct hamachi_desc *desc = &(hmp->rx_ring[entry]); + u32 desc_status = le32_to_cpu(desc->status_n_length); + u16 data_size = desc_status; /* Implicit truncate */ + u8 *buf_addr; + s32 frame_status; + + if (desc_status & DescOwn) + break; + pci_dma_sync_single_for_cpu(hmp->pci_dev, + leXX_to_cpu(desc->addr), + hmp->rx_buf_sz, + PCI_DMA_FROMDEVICE); + buf_addr = (u8 *) hmp->rx_skbuff[entry]->data; + frame_status = get_unaligned_le32(&(buf_addr[data_size - 12])); + if (hamachi_debug > 4) + printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n", + frame_status); + if (--boguscnt < 0) + break; + if ( ! (desc_status & DescEndPacket)) { + printk(KERN_WARNING "%s: Oversized Ethernet frame spanned " + "multiple buffers, entry %#x length %d status %4.4x!\n", + dev->name, hmp->cur_rx, data_size, desc_status); + printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n", + dev->name, desc, &hmp->rx_ring[hmp->cur_rx % RX_RING_SIZE]); + printk(KERN_WARNING "%s: Oversized Ethernet frame -- next status %x/%x last status %x.\n", + dev->name, + le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0xffff0000, + le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0x0000ffff, + le32_to_cpu(hmp->rx_ring[(hmp->cur_rx-1) % RX_RING_SIZE].status_n_length)); + dev->stats.rx_length_errors++; + } /* else Omit for prototype errata??? */ + if (frame_status & 0x00380000) { + /* There was an error. */ + if (hamachi_debug > 2) + printk(KERN_DEBUG " hamachi_rx() Rx error was %8.8x.\n", + frame_status); + dev->stats.rx_errors++; + if (frame_status & 0x00600000) + dev->stats.rx_length_errors++; + if (frame_status & 0x00080000) + dev->stats.rx_frame_errors++; + if (frame_status & 0x00100000) + dev->stats.rx_crc_errors++; + if (frame_status < 0) + dev->stats.rx_dropped++; + } else { + struct sk_buff *skb; + /* Omit CRC */ + u16 pkt_len = (frame_status & 0x07ff) - 4; +#ifdef RX_CHECKSUM + u32 pfck = *(u32 *) &buf_addr[data_size - 8]; +#endif + + +#ifndef final_version + if (hamachi_debug > 4) + printk(KERN_DEBUG " hamachi_rx() normal Rx pkt length %d" + " of %d, bogus_cnt %d.\n", + pkt_len, data_size, boguscnt); + if (hamachi_debug > 5) + printk(KERN_DEBUG"%s: rx status %8.8x %8.8x %8.8x %8.8x %8.8x.\n", + dev->name, + *(s32*)&(buf_addr[data_size - 20]), + *(s32*)&(buf_addr[data_size - 16]), + *(s32*)&(buf_addr[data_size - 12]), + *(s32*)&(buf_addr[data_size - 8]), + *(s32*)&(buf_addr[data_size - 4])); +#endif + /* Check if the packet is long enough to accept without copying + to a minimally-sized skbuff. */ + if (pkt_len < rx_copybreak && + (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { +#ifdef RX_CHECKSUM + printk(KERN_ERR "%s: rx_copybreak non-zero " + "not good with RX_CHECKSUM\n", dev->name); +#endif + skb_reserve(skb, 2); /* 16 byte align the IP header */ + pci_dma_sync_single_for_cpu(hmp->pci_dev, + leXX_to_cpu(hmp->rx_ring[entry].addr), + hmp->rx_buf_sz, + PCI_DMA_FROMDEVICE); + /* Call copy + cksum if available. */ +#if 1 || USE_IP_COPYSUM + skb_copy_to_linear_data(skb, + hmp->rx_skbuff[entry]->data, pkt_len); + skb_put(skb, pkt_len); +#else + memcpy(skb_put(skb, pkt_len), hmp->rx_ring_dma + + entry*sizeof(*desc), pkt_len); +#endif + pci_dma_sync_single_for_device(hmp->pci_dev, + leXX_to_cpu(hmp->rx_ring[entry].addr), + hmp->rx_buf_sz, + PCI_DMA_FROMDEVICE); + } else { + pci_unmap_single(hmp->pci_dev, + leXX_to_cpu(hmp->rx_ring[entry].addr), + hmp->rx_buf_sz, PCI_DMA_FROMDEVICE); + skb_put(skb = hmp->rx_skbuff[entry], pkt_len); + hmp->rx_skbuff[entry] = NULL; + } + skb->protocol = eth_type_trans(skb, dev); + + +#ifdef RX_CHECKSUM + /* TCP or UDP on ipv4, DIX encoding */ + if (pfck>>24 == 0x91 || pfck>>24 == 0x51) { + struct iphdr *ih = (struct iphdr *) skb->data; + /* Check that IP packet is at least 46 bytes, otherwise, + * there may be pad bytes included in the hardware checksum. + * This wouldn't happen if everyone padded with 0. + */ + if (ntohs(ih->tot_len) >= 46){ + /* don't worry about frags */ + if (!(ih->frag_off & cpu_to_be16(IP_MF|IP_OFFSET))) { + u32 inv = *(u32 *) &buf_addr[data_size - 16]; + u32 *p = (u32 *) &buf_addr[data_size - 20]; + register u32 crc, p_r, p_r1; + + if (inv & 4) { + inv &= ~4; + --p; + } + p_r = *p; + p_r1 = *(p-1); + switch (inv) { + case 0: + crc = (p_r & 0xffff) + (p_r >> 16); + break; + case 1: + crc = (p_r >> 16) + (p_r & 0xffff) + + (p_r1 >> 16 & 0xff00); + break; + case 2: + crc = p_r + (p_r1 >> 16); + break; + case 3: + crc = p_r + (p_r1 & 0xff00) + (p_r1 >> 16); + break; + default: /*NOTREACHED*/ crc = 0; + } + if (crc & 0xffff0000) { + crc &= 0xffff; + ++crc; + } + /* tcp/udp will add in pseudo */ + skb->csum = ntohs(pfck & 0xffff); + if (skb->csum > crc) + skb->csum -= crc; + else + skb->csum += (~crc & 0xffff); + /* + * could do the pseudo myself and return + * CHECKSUM_UNNECESSARY + */ + skb->ip_summed = CHECKSUM_COMPLETE; + } + } + } +#endif /* RX_CHECKSUM */ + + netif_rx(skb); + dev->stats.rx_packets++; + } + entry = (++hmp->cur_rx) % RX_RING_SIZE; + } + + /* Refill the Rx ring buffers. */ + for (; hmp->cur_rx - hmp->dirty_rx > 0; hmp->dirty_rx++) { + struct hamachi_desc *desc; + + entry = hmp->dirty_rx % RX_RING_SIZE; + desc = &(hmp->rx_ring[entry]); + if (hmp->rx_skbuff[entry] == NULL) { + struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2); + + hmp->rx_skbuff[entry] = skb; + if (skb == NULL) + break; /* Better luck next round. */ + skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, + skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); + } + desc->status_n_length = cpu_to_le32(hmp->rx_buf_sz); + if (entry >= RX_RING_SIZE-1) + desc->status_n_length |= cpu_to_le32(DescOwn | + DescEndPacket | DescEndRing | DescIntr); + else + desc->status_n_length |= cpu_to_le32(DescOwn | + DescEndPacket | DescIntr); + } + + /* Restart Rx engine if stopped. */ + /* If we don't need to check status, don't. -KDU */ + if (readw(hmp->base + RxStatus) & 0x0002) + writew(0x0001, hmp->base + RxCmd); + + return 0; +} + +/* This is more properly named "uncommon interrupt events", as it covers more + than just errors. */ +static void hamachi_error(struct net_device *dev, int intr_status) +{ + struct hamachi_private *hmp = netdev_priv(dev); + void __iomem *ioaddr = hmp->base; + + if (intr_status & (LinkChange|NegotiationChange)) { + if (hamachi_debug > 1) + printk(KERN_INFO "%s: Link changed: AutoNegotiation Ctrl" + " %4.4x, Status %4.4x %4.4x Intr status %4.4x.\n", + dev->name, readw(ioaddr + 0x0E0), readw(ioaddr + 0x0E2), + readw(ioaddr + ANLinkPartnerAbility), + readl(ioaddr + IntrStatus)); + if (readw(ioaddr + ANStatus) & 0x20) + writeb(0x01, ioaddr + LEDCtrl); + else + writeb(0x03, ioaddr + LEDCtrl); + } + if (intr_status & StatsMax) { + hamachi_get_stats(dev); + /* Read the overflow bits to clear. */ + readl(ioaddr + 0x370); + readl(ioaddr + 0x3F0); + } + if ((intr_status & ~(LinkChange|StatsMax|NegotiationChange|IntrRxDone|IntrTxDone)) && + hamachi_debug) + printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n", + dev->name, intr_status); + /* Hmmmmm, it's not clear how to recover from PCI faults. */ + if (intr_status & (IntrTxPCIErr | IntrTxPCIFault)) + dev->stats.tx_fifo_errors++; + if (intr_status & (IntrRxPCIErr | IntrRxPCIFault)) + dev->stats.rx_fifo_errors++; +} + +static int hamachi_close(struct net_device *dev) +{ + struct hamachi_private *hmp = netdev_priv(dev); + void __iomem *ioaddr = hmp->base; + struct sk_buff *skb; + int i; + + netif_stop_queue(dev); + + if (hamachi_debug > 1) { + printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x Rx %4.4x Int %2.2x.\n", + dev->name, readw(ioaddr + TxStatus), + readw(ioaddr + RxStatus), readl(ioaddr + IntrStatus)); + printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", + dev->name, hmp->cur_tx, hmp->dirty_tx, hmp->cur_rx, hmp->dirty_rx); + } + + /* Disable interrupts by clearing the interrupt mask. */ + writel(0x0000, ioaddr + InterruptEnable); + + /* Stop the chip's Tx and Rx processes. */ + writel(2, ioaddr + RxCmd); + writew(2, ioaddr + TxCmd); + +#ifdef __i386__ + if (hamachi_debug > 2) { + printk(KERN_DEBUG " Tx ring at %8.8x:\n", + (int)hmp->tx_ring_dma); + for (i = 0; i < TX_RING_SIZE; i++) + printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x.\n", + readl(ioaddr + TxCurPtr) == (long)&hmp->tx_ring[i] ? '>' : ' ', + i, hmp->tx_ring[i].status_n_length, hmp->tx_ring[i].addr); + printk(KERN_DEBUG " Rx ring %8.8x:\n", + (int)hmp->rx_ring_dma); + for (i = 0; i < RX_RING_SIZE; i++) { + printk(KERN_DEBUG " %c #%d desc. %4.4x %8.8x\n", + readl(ioaddr + RxCurPtr) == (long)&hmp->rx_ring[i] ? '>' : ' ', + i, hmp->rx_ring[i].status_n_length, hmp->rx_ring[i].addr); + if (hamachi_debug > 6) { + if (*(u8*)hmp->rx_skbuff[i]->data != 0x69) { + u16 *addr = (u16 *) + hmp->rx_skbuff[i]->data; + int j; + printk(KERN_DEBUG "Addr: "); + for (j = 0; j < 0x50; j++) + printk(" %4.4x", addr[j]); + printk("\n"); + } + } + } + } +#endif /* __i386__ debugging only */ + + free_irq(hmp->pci_dev->irq, dev); + + del_timer_sync(&hmp->timer); + + /* Free all the skbuffs in the Rx queue. */ + for (i = 0; i < RX_RING_SIZE; i++) { + skb = hmp->rx_skbuff[i]; + hmp->rx_ring[i].status_n_length = 0; + if (skb) { + pci_unmap_single(hmp->pci_dev, + leXX_to_cpu(hmp->rx_ring[i].addr), + hmp->rx_buf_sz, PCI_DMA_FROMDEVICE); + dev_kfree_skb(skb); + hmp->rx_skbuff[i] = NULL; + } + hmp->rx_ring[i].addr = cpu_to_leXX(0xBADF00D0); /* An invalid address. */ + } + for (i = 0; i < TX_RING_SIZE; i++) { + skb = hmp->tx_skbuff[i]; + if (skb) { + pci_unmap_single(hmp->pci_dev, + leXX_to_cpu(hmp->tx_ring[i].addr), + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb(skb); + hmp->tx_skbuff[i] = NULL; + } + } + + writeb(0x00, ioaddr + LEDCtrl); + + return 0; +} + +static struct net_device_stats *hamachi_get_stats(struct net_device *dev) +{ + struct hamachi_private *hmp = netdev_priv(dev); + void __iomem *ioaddr = hmp->base; + + /* We should lock this segment of code for SMP eventually, although + the vulnerability window is very small and statistics are + non-critical. */ + /* Ok, what goes here? This appears to be stuck at 21 packets + according to ifconfig. It does get incremented in hamachi_tx(), + so I think I'll comment it out here and see if better things + happen. + */ + /* dev->stats.tx_packets = readl(ioaddr + 0x000); */ + + /* Total Uni+Brd+Multi */ + dev->stats.rx_bytes = readl(ioaddr + 0x330); + /* Total Uni+Brd+Multi */ + dev->stats.tx_bytes = readl(ioaddr + 0x3B0); + /* Multicast Rx */ + dev->stats.multicast = readl(ioaddr + 0x320); + + /* Over+Undersized */ + dev->stats.rx_length_errors = readl(ioaddr + 0x368); + /* Jabber */ + dev->stats.rx_over_errors = readl(ioaddr + 0x35C); + /* Jabber */ + dev->stats.rx_crc_errors = readl(ioaddr + 0x360); + /* Symbol Errs */ + dev->stats.rx_frame_errors = readl(ioaddr + 0x364); + /* Dropped */ + dev->stats.rx_missed_errors = readl(ioaddr + 0x36C); + + return &dev->stats; +} + +static void set_rx_mode(struct net_device *dev) +{ + struct hamachi_private *hmp = netdev_priv(dev); + void __iomem *ioaddr = hmp->base; + + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + writew(0x000F, ioaddr + AddrMode); + } else if ((netdev_mc_count(dev) > 63) || (dev->flags & IFF_ALLMULTI)) { + /* Too many to match, or accept all multicasts. */ + writew(0x000B, ioaddr + AddrMode); + } else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */ + struct netdev_hw_addr *ha; + int i = 0; + + netdev_for_each_mc_addr(ha, dev) { + writel(*(u32 *)(ha->addr), ioaddr + 0x100 + i*8); + writel(0x20000 | (*(u16 *)&ha->addr[4]), + ioaddr + 0x104 + i*8); + i++; + } + /* Clear remaining entries. */ + for (; i < 64; i++) + writel(0, ioaddr + 0x104 + i*8); + writew(0x0003, ioaddr + AddrMode); + } else { /* Normal, unicast/broadcast-only mode. */ + writew(0x0001, ioaddr + AddrMode); + } +} + +static int check_if_running(struct net_device *dev) +{ + if (!netif_running(dev)) + return -EINVAL; + return 0; +} + +static void hamachi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct hamachi_private *np = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); +} + +static int hamachi_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct hamachi_private *np = netdev_priv(dev); + spin_lock_irq(&np->lock); + mii_ethtool_gset(&np->mii_if, ecmd); + spin_unlock_irq(&np->lock); + return 0; +} + +static int hamachi_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct hamachi_private *np = netdev_priv(dev); + int res; + spin_lock_irq(&np->lock); + res = mii_ethtool_sset(&np->mii_if, ecmd); + spin_unlock_irq(&np->lock); + return res; +} + +static int hamachi_nway_reset(struct net_device *dev) +{ + struct hamachi_private *np = netdev_priv(dev); + return mii_nway_restart(&np->mii_if); +} + +static u32 hamachi_get_link(struct net_device *dev) +{ + struct hamachi_private *np = netdev_priv(dev); + return mii_link_ok(&np->mii_if); +} + +static const struct ethtool_ops ethtool_ops = { + .begin = check_if_running, + .get_drvinfo = hamachi_get_drvinfo, + .get_settings = hamachi_get_settings, + .set_settings = hamachi_set_settings, + .nway_reset = hamachi_nway_reset, + .get_link = hamachi_get_link, +}; + +static const struct ethtool_ops ethtool_ops_no_mii = { + .begin = check_if_running, + .get_drvinfo = hamachi_get_drvinfo, +}; + +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct hamachi_private *np = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(rq); + int rc; + + if (!netif_running(dev)) + return -EINVAL; + + if (cmd == (SIOCDEVPRIVATE+3)) { /* set rx,tx intr params */ + u32 *d = (u32 *)&rq->ifr_ifru; + /* Should add this check here or an ordinary user can do nasty + * things. -KDU + * + * TODO: Shut down the Rx and Tx engines while doing this. + */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + writel(d[0], np->base + TxIntrCtrl); + writel(d[1], np->base + RxIntrCtrl); + printk(KERN_NOTICE "%s: tx %08x, rx %08x intr\n", dev->name, + (u32) readl(np->base + TxIntrCtrl), + (u32) readl(np->base + RxIntrCtrl)); + rc = 0; + } + + else { + spin_lock_irq(&np->lock); + rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL); + spin_unlock_irq(&np->lock); + } + + return rc; +} + + +static void hamachi_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (dev) { + struct hamachi_private *hmp = netdev_priv(dev); + + pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring, + hmp->rx_ring_dma); + pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring, + hmp->tx_ring_dma); + unregister_netdev(dev); + iounmap(hmp->base); + free_netdev(dev); + pci_release_regions(pdev); + } +} + +static const struct pci_device_id hamachi_pci_tbl[] = { + { 0x1318, 0x0911, PCI_ANY_ID, PCI_ANY_ID, }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, hamachi_pci_tbl); + +static struct pci_driver hamachi_driver = { + .name = DRV_NAME, + .id_table = hamachi_pci_tbl, + .probe = hamachi_init_one, + .remove = hamachi_remove_one, +}; + +static int __init hamachi_init (void) +{ +/* when a module, this is printed whether or not devices are found in probe */ +#ifdef MODULE + printk(version); +#endif + return pci_register_driver(&hamachi_driver); +} + +static void __exit hamachi_exit (void) +{ + pci_unregister_driver(&hamachi_driver); +} + + +module_init(hamachi_init); +module_exit(hamachi_exit); diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c new file mode 100644 index 000000000..fa2db41e0 --- /dev/null +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -0,0 +1,1425 @@ +/* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */ +/* + Written 1997-2001 by Donald Becker. + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. + + This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter. + It also supports the Symbios Logic version of the same chip core. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + Support and updates available at + http://www.scyld.com/network/yellowfin.html + [link no longer provides useful info -jgarzik] + +*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DRV_NAME "yellowfin" +#define DRV_VERSION "2.1" +#define DRV_RELDATE "Sep 11, 2006" + +/* The user-configurable values. + These may be modified when a driver module is loaded.*/ + +static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ +/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ +static int max_interrupt_work = 20; +static int mtu; +#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */ +/* System-wide count of bogus-rx frames. */ +static int bogus_rx; +static int dma_ctrl = 0x004A0263; /* Constrained by errata */ +static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */ +#elif defined(YF_NEW) /* A future perfect board :->. */ +static int dma_ctrl = 0x00CAC277; /* Override when loading module! */ +static int fifo_cfg = 0x0028; +#else +static const int dma_ctrl = 0x004A0263; /* Constrained by errata */ +static const int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */ +#endif + +/* Set the copy breakpoint for the copy-only-tiny-frames scheme. + Setting to > 1514 effectively disables this feature. */ +static int rx_copybreak; + +/* Used to pass the media type, etc. + No media types are currently defined. These exist for driver + interoperability. +*/ +#define MAX_UNITS 8 /* More are supported, limit only on options */ +static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; +static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; + +/* Do ugly workaround for GX server chipset errata. */ +static int gx_fix; + +/* Operational parameters that are set at compile time. */ + +/* Keep the ring sizes a power of two for efficiency. + Making the Tx ring too long decreases the effectiveness of channel + bonding and packet priority. + There are no ill effects from too-large receive rings. */ +#define TX_RING_SIZE 16 +#define TX_QUEUE_SIZE 12 /* Must be > 4 && <= TX_RING_SIZE */ +#define RX_RING_SIZE 64 +#define STATUS_TOTAL_SIZE TX_RING_SIZE*sizeof(struct tx_status_words) +#define TX_TOTAL_SIZE 2*TX_RING_SIZE*sizeof(struct yellowfin_desc) +#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct yellowfin_desc) + +/* Operational parameters that usually are not changed. */ +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (2*HZ) +#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ + +#define yellowfin_debug debug + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* Processor type for cache alignment. */ +#include +#include + +/* These identify the driver base version and may not be removed. */ +static const char version[] = + KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker \n" + " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n"; + +MODULE_AUTHOR("Donald Becker "); +MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver"); +MODULE_LICENSE("GPL"); + +module_param(max_interrupt_work, int, 0); +module_param(mtu, int, 0); +module_param(debug, int, 0); +module_param(rx_copybreak, int, 0); +module_param_array(options, int, NULL, 0); +module_param_array(full_duplex, int, NULL, 0); +module_param(gx_fix, int, 0); +MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt"); +MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)"); +MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)"); +MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames"); +MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex"); +MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)"); +MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)"); + +/* + Theory of Operation + +I. Board Compatibility + +This device driver is designed for the Packet Engines "Yellowfin" Gigabit +Ethernet adapter. The G-NIC 64-bit PCI card is supported, as well as the +Symbios 53C885E dual function chip. + +II. Board-specific settings + +PCI bus devices are configured by the system at boot time, so no jumpers +need to be set on the board. The system BIOS preferably should assign the +PCI INTA signal to an otherwise unused system IRQ line. +Note: Kernel versions earlier than 1.3.73 do not support shared PCI +interrupt lines. + +III. Driver operation + +IIIa. Ring buffers + +The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple. +This is a descriptor list scheme similar to that used by the EEPro100 and +Tulip. This driver uses two statically allocated fixed-size descriptor lists +formed into rings by a branch from the final descriptor to the beginning of +the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. + +The driver allocates full frame size skbuffs for the Rx ring buffers at +open() time and passes the skb->data field to the Yellowfin as receive data +buffers. When an incoming frame is less than RX_COPYBREAK bytes long, +a fresh skbuff is allocated and the frame is copied to the new skbuff. +When the incoming frame is larger, the skbuff is passed directly up the +protocol stack and replaced by a newly allocated skbuff. + +The RX_COPYBREAK value is chosen to trade-off the memory wasted by +using a full-sized skbuff for small frames vs. the copying costs of larger +frames. For small frames the copying cost is negligible (esp. considering +that we are pre-loading the cache with immediately useful header +information). For large frames the copying cost is non-trivial, and the +larger copy might flush the cache of useful data. + +IIIC. Synchronization + +The driver runs as two independent, single-threaded flows of control. One +is the send-packet routine, which enforces single-threaded use by the +dev->tbusy flag. The other thread is the interrupt handler, which is single +threaded by the hardware and other software. + +The send packet thread has partial control over the Tx ring and 'dev->tbusy' +flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next +queue slot is empty, it clears the tbusy flag when finished otherwise it sets +the 'yp->tx_full' flag. + +The interrupt handler has exclusive control over the Rx ring and records stats +from the Tx ring. After reaping the stats, it marks the Tx queue entry as +empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it +clears both the tx_full and tbusy flags. + +IV. Notes + +Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards. +Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board +and an AlphaStation to verifty the Alpha port! + +IVb. References + +Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential +Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary + Data Manual v3.0 +http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html +http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html + +IVc. Errata + +See Packet Engines confidential appendix (prototype chips only). +*/ + + + +enum capability_flags { + HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16, + HasMACAddrBug=32, /* Only on early revs. */ + DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */ +}; + +/* The PCI I/O space extent. */ +enum { + YELLOWFIN_SIZE = 0x100, +}; + +struct pci_id_info { + const char *name; + struct match_info { + int pci, pci_mask, subsystem, subsystem_mask; + int revision, revision_mask; /* Only 8 bits. */ + } id; + int drv_flags; /* Driver use, intended as capability flags. */ +}; + +static const struct pci_id_info pci_id_tbl[] = { + {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff}, + FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom}, + {"Symbios SYM83C885", { 0x07011000, 0xffffffff}, + HasMII | DontUseEeprom }, + { } +}; + +static const struct pci_device_id yellowfin_pci_tbl[] = { + { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, + { } +}; +MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl); + + +/* Offsets to the Yellowfin registers. Various sizes and alignments. */ +enum yellowfin_offsets { + TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C, + TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18, + RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C, + RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58, + EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86, + ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94, + Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4, + MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC, + MII_Status=0xAE, + RxDepth=0xB8, FlowCtrl=0xBC, + AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8, + EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4, + EEFeature=0xF5, +}; + +/* The Yellowfin Rx and Tx buffer descriptors. + Elements are written as 32 bit for endian portability. */ +struct yellowfin_desc { + __le32 dbdma_cmd; + __le32 addr; + __le32 branch_addr; + __le32 result_status; +}; + +struct tx_status_words { +#ifdef __BIG_ENDIAN + u16 tx_errs; + u16 tx_cnt; + u16 paused; + u16 total_tx_cnt; +#else /* Little endian chips. */ + u16 tx_cnt; + u16 tx_errs; + u16 total_tx_cnt; + u16 paused; +#endif /* __BIG_ENDIAN */ +}; + +/* Bits in yellowfin_desc.cmd */ +enum desc_cmd_bits { + CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000, + CMD_NOP=0x60000000, CMD_STOP=0x70000000, + BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000, + BRANCH_IFTRUE=0x040000, +}; + +/* Bits in yellowfin_desc.status */ +enum desc_status_bits { RX_EOP=0x0040, }; + +/* Bits in the interrupt status/mask registers. */ +enum intr_status_bits { + IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08, + IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80, + IntrEarlyRx=0x100, IntrWakeup=0x200, }; + +#define PRIV_ALIGN 31 /* Required alignment mask */ +#define MII_CNT 4 +struct yellowfin_private { + /* Descriptor rings first for alignment. + Tx requires a second descriptor for status. */ + struct yellowfin_desc *rx_ring; + struct yellowfin_desc *tx_ring; + struct sk_buff* rx_skbuff[RX_RING_SIZE]; + struct sk_buff* tx_skbuff[TX_RING_SIZE]; + dma_addr_t rx_ring_dma; + dma_addr_t tx_ring_dma; + + struct tx_status_words *tx_status; + dma_addr_t tx_status_dma; + + struct timer_list timer; /* Media selection timer. */ + /* Frequently used and paired value: keep adjacent for cache effect. */ + int chip_id, drv_flags; + struct pci_dev *pci_dev; + unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ + unsigned int rx_buf_sz; /* Based on MTU+slack. */ + struct tx_status_words *tx_tail_desc; + unsigned int cur_tx, dirty_tx; + int tx_threshold; + unsigned int tx_full:1; /* The Tx queue is full. */ + unsigned int full_duplex:1; /* Full-duplex operation requested. */ + unsigned int duplex_lock:1; + unsigned int medialock:1; /* Do not sense media. */ + unsigned int default_port:4; /* Last dev->if_port value. */ + /* MII transceiver section. */ + int mii_cnt; /* MII device addresses. */ + u16 advertising; /* NWay media advertisement */ + unsigned char phys[MII_CNT]; /* MII device addresses, only first one used */ + spinlock_t lock; + void __iomem *base; +}; + +static int read_eeprom(void __iomem *ioaddr, int location); +static int mdio_read(void __iomem *ioaddr, int phy_id, int location); +static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value); +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int yellowfin_open(struct net_device *dev); +static void yellowfin_timer(unsigned long data); +static void yellowfin_tx_timeout(struct net_device *dev); +static int yellowfin_init_ring(struct net_device *dev); +static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance); +static int yellowfin_rx(struct net_device *dev); +static void yellowfin_error(struct net_device *dev, int intr_status); +static int yellowfin_close(struct net_device *dev); +static void set_rx_mode(struct net_device *dev); +static const struct ethtool_ops ethtool_ops; + +static const struct net_device_ops netdev_ops = { + .ndo_open = yellowfin_open, + .ndo_stop = yellowfin_close, + .ndo_start_xmit = yellowfin_start_xmit, + .ndo_set_rx_mode = set_rx_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_do_ioctl = netdev_ioctl, + .ndo_tx_timeout = yellowfin_tx_timeout, +}; + +static int yellowfin_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev; + struct yellowfin_private *np; + int irq; + int chip_idx = ent->driver_data; + static int find_cnt; + void __iomem *ioaddr; + int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; + int drv_flags = pci_id_tbl[chip_idx].drv_flags; + void *ring_space; + dma_addr_t ring_dma; +#ifdef USE_IO_OPS + int bar = 0; +#else + int bar = 1; +#endif + +/* when built into the kernel, we only print version if device is found */ +#ifndef MODULE + static int printed_version; + if (!printed_version++) + printk(version); +#endif + + i = pci_enable_device(pdev); + if (i) return i; + + dev = alloc_etherdev(sizeof(*np)); + if (!dev) + return -ENOMEM; + + SET_NETDEV_DEV(dev, &pdev->dev); + + np = netdev_priv(dev); + + if (pci_request_regions(pdev, DRV_NAME)) + goto err_out_free_netdev; + + pci_set_master (pdev); + + ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE); + if (!ioaddr) + goto err_out_free_res; + + irq = pdev->irq; + + if (drv_flags & DontUseEeprom) + for (i = 0; i < 6; i++) + dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i); + else { + int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0); + for (i = 0; i < 6; i++) + dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i); + } + + /* Reset the chip. */ + iowrite32(0x80000000, ioaddr + DMACtrl); + + pci_set_drvdata(pdev, dev); + spin_lock_init(&np->lock); + + np->pci_dev = pdev; + np->chip_id = chip_idx; + np->drv_flags = drv_flags; + np->base = ioaddr; + + ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); + if (!ring_space) + goto err_out_cleardev; + np->tx_ring = ring_space; + np->tx_ring_dma = ring_dma; + + ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); + if (!ring_space) + goto err_out_unmap_tx; + np->rx_ring = ring_space; + np->rx_ring_dma = ring_dma; + + ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma); + if (!ring_space) + goto err_out_unmap_rx; + np->tx_status = ring_space; + np->tx_status_dma = ring_dma; + + if (dev->mem_start) + option = dev->mem_start; + + /* The lower four bits are the media type. */ + if (option > 0) { + if (option & 0x200) + np->full_duplex = 1; + np->default_port = option & 15; + if (np->default_port) + np->medialock = 1; + } + if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0) + np->full_duplex = 1; + + if (np->full_duplex) + np->duplex_lock = 1; + + /* The Yellowfin-specific entries in the device structure. */ + dev->netdev_ops = &netdev_ops; + dev->ethtool_ops = ðtool_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + if (mtu) + dev->mtu = mtu; + + i = register_netdev(dev); + if (i) + goto err_out_unmap_status; + + netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n", + pci_id_tbl[chip_idx].name, + ioread32(ioaddr + ChipRev), ioaddr, + dev->dev_addr, irq); + + if (np->drv_flags & HasMII) { + int phy, phy_idx = 0; + for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) { + int mii_status = mdio_read(ioaddr, phy, 1); + if (mii_status != 0xffff && mii_status != 0x0000) { + np->phys[phy_idx++] = phy; + np->advertising = mdio_read(ioaddr, phy, 4); + netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n", + phy, mii_status, np->advertising); + } + } + np->mii_cnt = phy_idx; + } + + find_cnt++; + + return 0; + +err_out_unmap_status: + pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status, + np->tx_status_dma); +err_out_unmap_rx: + pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); +err_out_unmap_tx: + pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); +err_out_cleardev: + pci_iounmap(pdev, ioaddr); +err_out_free_res: + pci_release_regions(pdev); +err_out_free_netdev: + free_netdev (dev); + return -ENODEV; +} + +static int read_eeprom(void __iomem *ioaddr, int location) +{ + int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */ + + iowrite8(location, ioaddr + EEAddr); + iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl); + while ((ioread8(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0) + ; + return ioread8(ioaddr + EERead); +} + +/* MII Managemen Data I/O accesses. + These routines assume the MDIO controller is idle, and do not exit until + the command is finished. */ + +static int mdio_read(void __iomem *ioaddr, int phy_id, int location) +{ + int i; + + iowrite16((phy_id<<8) + location, ioaddr + MII_Addr); + iowrite16(1, ioaddr + MII_Cmd); + for (i = 10000; i >= 0; i--) + if ((ioread16(ioaddr + MII_Status) & 1) == 0) + break; + return ioread16(ioaddr + MII_Rd_Data); +} + +static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value) +{ + int i; + + iowrite16((phy_id<<8) + location, ioaddr + MII_Addr); + iowrite16(value, ioaddr + MII_Wr_Data); + + /* Wait for the command to finish. */ + for (i = 10000; i >= 0; i--) + if ((ioread16(ioaddr + MII_Status) & 1) == 0) + break; +} + + +static int yellowfin_open(struct net_device *dev) +{ + struct yellowfin_private *yp = netdev_priv(dev); + const int irq = yp->pci_dev->irq; + void __iomem *ioaddr = yp->base; + int i, rc; + + /* Reset the chip. */ + iowrite32(0x80000000, ioaddr + DMACtrl); + + rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev); + if (rc) + return rc; + + rc = yellowfin_init_ring(dev); + if (rc < 0) + goto err_free_irq; + + iowrite32(yp->rx_ring_dma, ioaddr + RxPtr); + iowrite32(yp->tx_ring_dma, ioaddr + TxPtr); + + for (i = 0; i < 6; i++) + iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i); + + /* Set up various condition 'select' registers. + There are no options here. */ + iowrite32(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */ + iowrite32(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */ + iowrite32(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */ + iowrite32(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */ + iowrite32(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */ + iowrite32(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */ + + /* Initialize other registers: with so many this eventually this will + converted to an offset/value list. */ + iowrite32(dma_ctrl, ioaddr + DMACtrl); + iowrite16(fifo_cfg, ioaddr + FIFOcfg); + /* Enable automatic generation of flow control frames, period 0xffff. */ + iowrite32(0x0030FFFF, ioaddr + FlowCtrl); + + yp->tx_threshold = 32; + iowrite32(yp->tx_threshold, ioaddr + TxThreshold); + + if (dev->if_port == 0) + dev->if_port = yp->default_port; + + netif_start_queue(dev); + + /* Setting the Rx mode will start the Rx process. */ + if (yp->drv_flags & IsGigabit) { + /* We are always in full-duplex mode with gigabit! */ + yp->full_duplex = 1; + iowrite16(0x01CF, ioaddr + Cnfg); + } else { + iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */ + iowrite16(0x1018, ioaddr + FrameGap1); + iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg); + } + set_rx_mode(dev); + + /* Enable interrupts by setting the interrupt mask. */ + iowrite16(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */ + iowrite16(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */ + iowrite32(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */ + iowrite32(0x80008000, ioaddr + TxCtrl); + + if (yellowfin_debug > 2) { + netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__); + } + + /* Set the timer to check for link beat. */ + init_timer(&yp->timer); + yp->timer.expires = jiffies + 3*HZ; + yp->timer.data = (unsigned long)dev; + yp->timer.function = yellowfin_timer; /* timer handler */ + add_timer(&yp->timer); +out: + return rc; + +err_free_irq: + free_irq(irq, dev); + goto out; +} + +static void yellowfin_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct yellowfin_private *yp = netdev_priv(dev); + void __iomem *ioaddr = yp->base; + int next_tick = 60*HZ; + + if (yellowfin_debug > 3) { + netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n", + ioread16(ioaddr + IntrStatus)); + } + + if (yp->mii_cnt) { + int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR); + int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA); + int negotiated = lpa & yp->advertising; + if (yellowfin_debug > 1) + netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n", + yp->phys[0], bmsr, lpa); + + yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated); + + iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg); + + if (bmsr & BMSR_LSTATUS) + next_tick = 60*HZ; + else + next_tick = 3*HZ; + } + + yp->timer.expires = jiffies + next_tick; + add_timer(&yp->timer); +} + +static void yellowfin_tx_timeout(struct net_device *dev) +{ + struct yellowfin_private *yp = netdev_priv(dev); + void __iomem *ioaddr = yp->base; + + netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n", + yp->cur_tx, yp->dirty_tx, + ioread32(ioaddr + TxStatus), + ioread32(ioaddr + RxStatus)); + + /* Note: these should be KERN_DEBUG. */ + if (yellowfin_debug) { + int i; + pr_warn(" Rx ring %p: ", yp->rx_ring); + for (i = 0; i < RX_RING_SIZE; i++) + pr_cont(" %08x", yp->rx_ring[i].result_status); + pr_cont("\n"); + pr_warn(" Tx ring %p: ", yp->tx_ring); + for (i = 0; i < TX_RING_SIZE; i++) + pr_cont(" %04x /%08x", + yp->tx_status[i].tx_errs, + yp->tx_ring[i].result_status); + pr_cont("\n"); + } + + /* If the hardware is found to hang regularly, we will update the code + to reinitialize the chip here. */ + dev->if_port = 0; + + /* Wake the potentially-idle transmit channel. */ + iowrite32(0x10001000, yp->base + TxCtrl); + if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE) + netif_wake_queue (dev); /* Typical path */ + + dev->trans_start = jiffies; /* prevent tx timeout */ + dev->stats.tx_errors++; +} + +/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ +static int yellowfin_init_ring(struct net_device *dev) +{ + struct yellowfin_private *yp = netdev_priv(dev); + int i, j; + + yp->tx_full = 0; + yp->cur_rx = yp->cur_tx = 0; + yp->dirty_tx = 0; + + yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); + + for (i = 0; i < RX_RING_SIZE; i++) { + yp->rx_ring[i].dbdma_cmd = + cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz); + yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma + + ((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc)); + } + + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2); + yp->rx_skbuff[i] = skb; + if (skb == NULL) + break; + skb_reserve(skb, 2); /* 16 byte align the IP header. */ + yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, + skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); + } + if (i != RX_RING_SIZE) { + for (j = 0; j < i; j++) + dev_kfree_skb(yp->rx_skbuff[j]); + return -ENOMEM; + } + yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); + yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); + +#define NO_TXSTATS +#ifdef NO_TXSTATS + /* In this mode the Tx ring needs only a single descriptor. */ + for (i = 0; i < TX_RING_SIZE; i++) { + yp->tx_skbuff[i] = NULL; + yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP); + yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma + + ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc)); + } + /* Wrap ring */ + yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS); +#else +{ + /* Tx ring needs a pair of descriptors, the second for the status. */ + for (i = 0; i < TX_RING_SIZE; i++) { + j = 2*i; + yp->tx_skbuff[i] = 0; + /* Branch on Tx error. */ + yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP); + yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma + + (j+1)*sizeof(struct yellowfin_desc)); + j++; + if (yp->flags & FullTxStatus) { + yp->tx_ring[j].dbdma_cmd = + cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status)); + yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status); + yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma + + i*sizeof(struct tx_status_words)); + } else { + /* Symbios chips write only tx_errs word. */ + yp->tx_ring[j].dbdma_cmd = + cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2); + yp->tx_ring[j].request_cnt = 2; + /* Om pade ummmmm... */ + yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma + + i*sizeof(struct tx_status_words) + + &(yp->tx_status[0].tx_errs) - + &(yp->tx_status[0])); + } + yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma + + ((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc)); + } + /* Wrap ring */ + yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS); +} +#endif + yp->tx_tail_desc = &yp->tx_status[0]; + return 0; +} + +static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct yellowfin_private *yp = netdev_priv(dev); + unsigned entry; + int len = skb->len; + + netif_stop_queue (dev); + + /* Note: Ordering is important here, set the field with the + "ownership" bit last, and only then increment cur_tx. */ + + /* Calculate the next Tx descriptor entry. */ + entry = yp->cur_tx % TX_RING_SIZE; + + if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */ + int cacheline_end = ((unsigned long)skb->data + skb->len) % 32; + /* Fix GX chipset errata. */ + if (cacheline_end > 24 || cacheline_end == 0) { + len = skb->len + 32 - cacheline_end + 1; + if (skb_padto(skb, len)) { + yp->tx_skbuff[entry] = NULL; + netif_wake_queue(dev); + return NETDEV_TX_OK; + } + } + } + yp->tx_skbuff[entry] = skb; + +#ifdef NO_TXSTATS + yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, + skb->data, len, PCI_DMA_TODEVICE)); + yp->tx_ring[entry].result_status = 0; + if (entry >= TX_RING_SIZE-1) { + /* New stop command. */ + yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP); + yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd = + cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len); + } else { + yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP); + yp->tx_ring[entry].dbdma_cmd = + cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len); + } + yp->cur_tx++; +#else + yp->tx_ring[entry<<1].request_cnt = len; + yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev, + skb->data, len, PCI_DMA_TODEVICE)); + /* The input_last (status-write) command is constant, but we must + rewrite the subsequent 'stop' command. */ + + yp->cur_tx++; + { + unsigned next_entry = yp->cur_tx % TX_RING_SIZE; + yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP); + } + /* Final step -- overwrite the old 'stop' command. */ + + yp->tx_ring[entry<<1].dbdma_cmd = + cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE : + CMD_TX_PKT | BRANCH_IFTRUE) | len); +#endif + + /* Non-x86 Todo: explicitly flush cache lines here. */ + + /* Wake the potentially-idle transmit channel. */ + iowrite32(0x10001000, yp->base + TxCtrl); + + if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE) + netif_start_queue (dev); /* Typical path */ + else + yp->tx_full = 1; + + if (yellowfin_debug > 4) { + netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n", + yp->cur_tx, entry); + } + return NETDEV_TX_OK; +} + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ +static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct yellowfin_private *yp; + void __iomem *ioaddr; + int boguscnt = max_interrupt_work; + unsigned int handled = 0; + + yp = netdev_priv(dev); + ioaddr = yp->base; + + spin_lock (&yp->lock); + + do { + u16 intr_status = ioread16(ioaddr + IntrClear); + + if (yellowfin_debug > 4) + netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n", + intr_status); + + if (intr_status == 0) + break; + handled = 1; + + if (intr_status & (IntrRxDone | IntrEarlyRx)) { + yellowfin_rx(dev); + iowrite32(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */ + } + +#ifdef NO_TXSTATS + for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) { + int entry = yp->dirty_tx % TX_RING_SIZE; + struct sk_buff *skb; + + if (yp->tx_ring[entry].result_status == 0) + break; + skb = yp->tx_skbuff[entry]; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + /* Free the original skb. */ + pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr), + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb_irq(skb); + yp->tx_skbuff[entry] = NULL; + } + if (yp->tx_full && + yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) { + /* The ring is no longer full, clear tbusy. */ + yp->tx_full = 0; + netif_wake_queue(dev); + } +#else + if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) { + unsigned dirty_tx = yp->dirty_tx; + + for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0; + dirty_tx++) { + /* Todo: optimize this. */ + int entry = dirty_tx % TX_RING_SIZE; + u16 tx_errs = yp->tx_status[entry].tx_errs; + struct sk_buff *skb; + +#ifndef final_version + if (yellowfin_debug > 5) + netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n", + entry, + yp->tx_status[entry].tx_cnt, + yp->tx_status[entry].tx_errs, + yp->tx_status[entry].total_tx_cnt, + yp->tx_status[entry].paused); +#endif + if (tx_errs == 0) + break; /* It still hasn't been Txed */ + skb = yp->tx_skbuff[entry]; + if (tx_errs & 0xF810) { + /* There was an major error, log it. */ +#ifndef final_version + if (yellowfin_debug > 1) + netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n", + tx_errs); +#endif + dev->stats.tx_errors++; + if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++; + if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++; + if (tx_errs & 0x2000) dev->stats.tx_window_errors++; + if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++; + } else { +#ifndef final_version + if (yellowfin_debug > 4) + netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n", + tx_errs); +#endif + dev->stats.tx_bytes += skb->len; + dev->stats.collisions += tx_errs & 15; + dev->stats.tx_packets++; + } + /* Free the original skb. */ + pci_unmap_single(yp->pci_dev, + yp->tx_ring[entry<<1].addr, skb->len, + PCI_DMA_TODEVICE); + dev_kfree_skb_irq(skb); + yp->tx_skbuff[entry] = 0; + /* Mark status as empty. */ + yp->tx_status[entry].tx_errs = 0; + } + +#ifndef final_version + if (yp->cur_tx - dirty_tx > TX_RING_SIZE) { + netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n", + dirty_tx, yp->cur_tx, yp->tx_full); + dirty_tx += TX_RING_SIZE; + } +#endif + + if (yp->tx_full && + yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) { + /* The ring is no longer full, clear tbusy. */ + yp->tx_full = 0; + netif_wake_queue(dev); + } + + yp->dirty_tx = dirty_tx; + yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE]; + } +#endif + + /* Log errors and other uncommon events. */ + if (intr_status & 0x2ee) /* Abnormal error summary. */ + yellowfin_error(dev, intr_status); + + if (--boguscnt < 0) { + netdev_warn(dev, "Too much work at interrupt, status=%#04x\n", + intr_status); + break; + } + } while (1); + + if (yellowfin_debug > 3) + netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n", + ioread16(ioaddr + IntrStatus)); + + spin_unlock (&yp->lock); + return IRQ_RETVAL(handled); +} + +/* This routine is logically part of the interrupt handler, but separated + for clarity and better register allocation. */ +static int yellowfin_rx(struct net_device *dev) +{ + struct yellowfin_private *yp = netdev_priv(dev); + int entry = yp->cur_rx % RX_RING_SIZE; + int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx; + + if (yellowfin_debug > 4) { + printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n", + entry, yp->rx_ring[entry].result_status); + printk(KERN_DEBUG " #%d desc. %08x %08x %08x\n", + entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr, + yp->rx_ring[entry].result_status); + } + + /* If EOP is set on the next entry, it's a new packet. Send it up. */ + while (1) { + struct yellowfin_desc *desc = &yp->rx_ring[entry]; + struct sk_buff *rx_skb = yp->rx_skbuff[entry]; + s16 frame_status; + u16 desc_status; + int data_size, yf_size; + u8 *buf_addr; + + if(!desc->result_status) + break; + pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr), + yp->rx_buf_sz, PCI_DMA_FROMDEVICE); + desc_status = le32_to_cpu(desc->result_status) >> 16; + buf_addr = rx_skb->data; + data_size = (le32_to_cpu(desc->dbdma_cmd) - + le32_to_cpu(desc->result_status)) & 0xffff; + frame_status = get_unaligned_le16(&(buf_addr[data_size - 2])); + if (yellowfin_debug > 4) + printk(KERN_DEBUG " %s() status was %04x\n", + __func__, frame_status); + if (--boguscnt < 0) + break; + + yf_size = sizeof(struct yellowfin_desc); + + if ( ! (desc_status & RX_EOP)) { + if (data_size != 0) + netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n", + desc_status, data_size); + dev->stats.rx_length_errors++; + } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) { + /* There was a error. */ + if (yellowfin_debug > 3) + printk(KERN_DEBUG " %s() Rx error was %04x\n", + __func__, frame_status); + dev->stats.rx_errors++; + if (frame_status & 0x0060) dev->stats.rx_length_errors++; + if (frame_status & 0x0008) dev->stats.rx_frame_errors++; + if (frame_status & 0x0010) dev->stats.rx_crc_errors++; + if (frame_status < 0) dev->stats.rx_dropped++; + } else if ( !(yp->drv_flags & IsGigabit) && + ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) { + u8 status1 = buf_addr[data_size-2]; + u8 status2 = buf_addr[data_size-1]; + dev->stats.rx_errors++; + if (status1 & 0xC0) dev->stats.rx_length_errors++; + if (status2 & 0x03) dev->stats.rx_frame_errors++; + if (status2 & 0x04) dev->stats.rx_crc_errors++; + if (status2 & 0x80) dev->stats.rx_dropped++; +#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */ + } else if ((yp->flags & HasMACAddrBug) && + !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma + + entry * yf_size), + dev->dev_addr) && + !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma + + entry * yf_size), + "\377\377\377\377\377\377")) { + if (bogus_rx++ == 0) + netdev_warn(dev, "Bad frame to %pM\n", + buf_addr); +#endif + } else { + struct sk_buff *skb; + int pkt_len = data_size - + (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]); + /* To verify: Yellowfin Length should omit the CRC! */ + +#ifndef final_version + if (yellowfin_debug > 4) + printk(KERN_DEBUG " %s() normal Rx pkt length %d of %d, bogus_cnt %d\n", + __func__, pkt_len, data_size, boguscnt); +#endif + /* Check if the packet is long enough to just pass up the skbuff + without copying to a properly sized skbuff. */ + if (pkt_len > rx_copybreak) { + skb_put(skb = rx_skb, pkt_len); + pci_unmap_single(yp->pci_dev, + le32_to_cpu(yp->rx_ring[entry].addr), + yp->rx_buf_sz, + PCI_DMA_FROMDEVICE); + yp->rx_skbuff[entry] = NULL; + } else { + skb = netdev_alloc_skb(dev, pkt_len + 2); + if (skb == NULL) + break; + skb_reserve(skb, 2); /* 16 byte align the IP header */ + skb_copy_to_linear_data(skb, rx_skb->data, pkt_len); + skb_put(skb, pkt_len); + pci_dma_sync_single_for_device(yp->pci_dev, + le32_to_cpu(desc->addr), + yp->rx_buf_sz, + PCI_DMA_FROMDEVICE); + } + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + entry = (++yp->cur_rx) % RX_RING_SIZE; + } + + /* Refill the Rx ring buffers. */ + for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) { + entry = yp->dirty_rx % RX_RING_SIZE; + if (yp->rx_skbuff[entry] == NULL) { + struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2); + if (skb == NULL) + break; /* Better luck next round. */ + yp->rx_skbuff[entry] = skb; + skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, + skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); + } + yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP); + yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */ + if (entry != 0) + yp->rx_ring[entry - 1].dbdma_cmd = + cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz); + else + yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd = + cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS + | yp->rx_buf_sz); + } + + return 0; +} + +static void yellowfin_error(struct net_device *dev, int intr_status) +{ + netdev_err(dev, "Something Wicked happened! %04x\n", intr_status); + /* Hmmmmm, it's not clear what to do here. */ + if (intr_status & (IntrTxPCIErr | IntrTxPCIFault)) + dev->stats.tx_errors++; + if (intr_status & (IntrRxPCIErr | IntrRxPCIFault)) + dev->stats.rx_errors++; +} + +static int yellowfin_close(struct net_device *dev) +{ + struct yellowfin_private *yp = netdev_priv(dev); + void __iomem *ioaddr = yp->base; + int i; + + netif_stop_queue (dev); + + if (yellowfin_debug > 1) { + netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n", + ioread16(ioaddr + TxStatus), + ioread16(ioaddr + RxStatus), + ioread16(ioaddr + IntrStatus)); + netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n", + yp->cur_tx, yp->dirty_tx, + yp->cur_rx, yp->dirty_rx); + } + + /* Disable interrupts by clearing the interrupt mask. */ + iowrite16(0x0000, ioaddr + IntrEnb); + + /* Stop the chip's Tx and Rx processes. */ + iowrite32(0x80000000, ioaddr + RxCtrl); + iowrite32(0x80000000, ioaddr + TxCtrl); + + del_timer(&yp->timer); + +#if defined(__i386__) + if (yellowfin_debug > 2) { + printk(KERN_DEBUG " Tx ring at %08llx:\n", + (unsigned long long)yp->tx_ring_dma); + for (i = 0; i < TX_RING_SIZE*2; i++) + printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n", + ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ', + i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr, + yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status); + printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status); + for (i = 0; i < TX_RING_SIZE; i++) + printk(KERN_DEBUG " #%d status %04x %04x %04x %04x\n", + i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs, + yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused); + + printk(KERN_DEBUG " Rx ring %08llx:\n", + (unsigned long long)yp->rx_ring_dma); + for (i = 0; i < RX_RING_SIZE; i++) { + printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n", + ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ', + i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr, + yp->rx_ring[i].result_status); + if (yellowfin_debug > 6) { + if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) { + int j; + + printk(KERN_DEBUG); + for (j = 0; j < 0x50; j++) + pr_cont(" %04x", + get_unaligned(((u16*)yp->rx_ring[i].addr) + j)); + pr_cont("\n"); + } + } + } + } +#endif /* __i386__ debugging only */ + + free_irq(yp->pci_dev->irq, dev); + + /* Free all the skbuffs in the Rx queue. */ + for (i = 0; i < RX_RING_SIZE; i++) { + yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP); + yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ + if (yp->rx_skbuff[i]) { + dev_kfree_skb(yp->rx_skbuff[i]); + } + yp->rx_skbuff[i] = NULL; + } + for (i = 0; i < TX_RING_SIZE; i++) { + if (yp->tx_skbuff[i]) + dev_kfree_skb(yp->tx_skbuff[i]); + yp->tx_skbuff[i] = NULL; + } + +#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */ + if (yellowfin_debug > 0) { + netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n", + bogus_rx); + } +#endif + + return 0; +} + +/* Set or clear the multicast filter for this adaptor. */ + +static void set_rx_mode(struct net_device *dev) +{ + struct yellowfin_private *yp = netdev_priv(dev); + void __iomem *ioaddr = yp->base; + u16 cfg_value = ioread16(ioaddr + Cnfg); + + /* Stop the Rx process to change any value. */ + iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg); + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + iowrite16(0x000F, ioaddr + AddrMode); + } else if ((netdev_mc_count(dev) > 64) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to filter well, or accept all multicasts. */ + iowrite16(0x000B, ioaddr + AddrMode); + } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */ + struct netdev_hw_addr *ha; + u16 hash_table[4]; + int i; + + memset(hash_table, 0, sizeof(hash_table)); + netdev_for_each_mc_addr(ha, dev) { + unsigned int bit; + + /* Due to a bug in the early chip versions, multiple filter + slots must be set for each address. */ + if (yp->drv_flags & HasMulticastBug) { + bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f; + hash_table[bit >> 4] |= (1 << bit); + bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f; + hash_table[bit >> 4] |= (1 << bit); + bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f; + hash_table[bit >> 4] |= (1 << bit); + } + bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f; + hash_table[bit >> 4] |= (1 << bit); + } + /* Copy the hash table to the chip. */ + for (i = 0; i < 4; i++) + iowrite16(hash_table[i], ioaddr + HashTbl + i*2); + iowrite16(0x0003, ioaddr + AddrMode); + } else { /* Normal, unicast/broadcast-only mode. */ + iowrite16(0x0001, ioaddr + AddrMode); + } + /* Restart the Rx process. */ + iowrite16(cfg_value | 0x1000, ioaddr + Cnfg); +} + +static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct yellowfin_private *np = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); +} + +static const struct ethtool_ops ethtool_ops = { + .get_drvinfo = yellowfin_get_drvinfo +}; + +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct yellowfin_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->base; + struct mii_ioctl_data *data = if_mii(rq); + + switch(cmd) { + case SIOCGMIIPHY: /* Get address of MII PHY in use. */ + data->phy_id = np->phys[0] & 0x1f; + /* Fall Through */ + + case SIOCGMIIREG: /* Read MII PHY register. */ + data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f); + return 0; + + case SIOCSMIIREG: /* Write MII PHY register. */ + if (data->phy_id == np->phys[0]) { + u16 value = data->val_in; + switch (data->reg_num) { + case 0: + /* Check for autonegotiation on or reset. */ + np->medialock = (value & 0x9000) ? 0 : 1; + if (np->medialock) + np->full_duplex = (value & 0x0100) ? 1 : 0; + break; + case 4: np->advertising = value; break; + } + /* Perhaps check_duplex(dev), depending on chip semantics. */ + } + mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); + return 0; + default: + return -EOPNOTSUPP; + } +} + + +static void yellowfin_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct yellowfin_private *np; + + BUG_ON(!dev); + np = netdev_priv(dev); + + pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status, + np->tx_status_dma); + pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); + pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); + unregister_netdev (dev); + + pci_iounmap(pdev, np->base); + + pci_release_regions (pdev); + + free_netdev (dev); +} + + +static struct pci_driver yellowfin_driver = { + .name = DRV_NAME, + .id_table = yellowfin_pci_tbl, + .probe = yellowfin_init_one, + .remove = yellowfin_remove_one, +}; + + +static int __init yellowfin_init (void) +{ +/* when a module, this is printed whether or not devices are found in probe */ +#ifdef MODULE + printk(version); +#endif + return pci_register_driver(&yellowfin_driver); +} + + +static void __exit yellowfin_cleanup (void) +{ + pci_unregister_driver (&yellowfin_driver); +} + + +module_init(yellowfin_init); +module_exit(yellowfin_cleanup); diff --git a/drivers/net/ethernet/pasemi/Kconfig b/drivers/net/ethernet/pasemi/Kconfig new file mode 100644 index 000000000..01e6c329d --- /dev/null +++ b/drivers/net/ethernet/pasemi/Kconfig @@ -0,0 +1,30 @@ +# +# PA Semi network device configuration +# + +config NET_VENDOR_PASEMI + bool "PA Semi devices" + default y + depends on PPC_PASEMI && PCI && INET + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about PA Semi cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_PASEMI + +config PASEMI_MAC + tristate "PA Semi 1/10Gbit MAC" + depends on PPC_PASEMI && PCI && INET + select PHYLIB + select INET_LRO + ---help--- + This driver supports the on-chip 1/10Gbit Ethernet controller on + PA Semi's PWRficient line of chips. + +endif # NET_VENDOR_PASEMI diff --git a/drivers/net/ethernet/pasemi/Makefile b/drivers/net/ethernet/pasemi/Makefile new file mode 100644 index 000000000..90497ffb1 --- /dev/null +++ b/drivers/net/ethernet/pasemi/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the A Semi network device drivers. +# + +obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o +pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c new file mode 100644 index 000000000..57a6e6cd7 --- /dev/null +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -0,0 +1,1902 @@ +/* + * Copyright (C) 2006-2007 PA Semi, Inc + * + * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "pasemi_mac.h" + +/* We have our own align, since ppc64 in general has it at 0 because + * of design flaws in some of the server bridge chips. However, for + * PWRficient doing the unaligned copies is more expensive than doing + * unaligned DMA, so make sure the data is aligned instead. + */ +#define LOCAL_SKB_ALIGN 2 + +/* TODO list + * + * - Multicast support + * - Large MTU support + * - SW LRO + * - Multiqueue RX/TX + */ + +#define LRO_MAX_AGGR 64 + +#define PE_MIN_MTU 64 +#define PE_MAX_MTU 9000 +#define PE_DEF_MTU ETH_DATA_LEN + +#define DEFAULT_MSG_ENABLE \ + (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR ("Olof Johansson "); +MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver"); + +static int debug = -1; /* -1 == use DEFAULT_MSG_ENABLE as value */ +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value"); + +extern const struct ethtool_ops pasemi_mac_ethtool_ops; + +static int translation_enabled(void) +{ +#if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE) + return 1; +#else + return firmware_has_feature(FW_FEATURE_LPAR); +#endif +} + +static void write_iob_reg(unsigned int reg, unsigned int val) +{ + pasemi_write_iob_reg(reg, val); +} + +static unsigned int read_mac_reg(const struct pasemi_mac *mac, unsigned int reg) +{ + return pasemi_read_mac_reg(mac->dma_if, reg); +} + +static void write_mac_reg(const struct pasemi_mac *mac, unsigned int reg, + unsigned int val) +{ + pasemi_write_mac_reg(mac->dma_if, reg, val); +} + +static unsigned int read_dma_reg(unsigned int reg) +{ + return pasemi_read_dma_reg(reg); +} + +static void write_dma_reg(unsigned int reg, unsigned int val) +{ + pasemi_write_dma_reg(reg, val); +} + +static struct pasemi_mac_rxring *rx_ring(const struct pasemi_mac *mac) +{ + return mac->rx; +} + +static struct pasemi_mac_txring *tx_ring(const struct pasemi_mac *mac) +{ + return mac->tx; +} + +static inline void prefetch_skb(const struct sk_buff *skb) +{ + const void *d = skb; + + prefetch(d); + prefetch(d+64); + prefetch(d+128); + prefetch(d+192); +} + +static int mac_to_intf(struct pasemi_mac *mac) +{ + struct pci_dev *pdev = mac->pdev; + u32 tmp; + int nintf, off, i, j; + int devfn = pdev->devfn; + + tmp = read_dma_reg(PAS_DMA_CAP_IFI); + nintf = (tmp & PAS_DMA_CAP_IFI_NIN_M) >> PAS_DMA_CAP_IFI_NIN_S; + off = (tmp & PAS_DMA_CAP_IFI_IOFF_M) >> PAS_DMA_CAP_IFI_IOFF_S; + + /* IOFF contains the offset to the registers containing the + * DMA interface-to-MAC-pci-id mappings, and NIN contains number + * of total interfaces. Each register contains 4 devfns. + * Just do a linear search until we find the devfn of the MAC + * we're trying to look up. + */ + + for (i = 0; i < (nintf+3)/4; i++) { + tmp = read_dma_reg(off+4*i); + for (j = 0; j < 4; j++) { + if (((tmp >> (8*j)) & 0xff) == devfn) + return i*4 + j; + } + } + return -1; +} + +static void pasemi_mac_intf_disable(struct pasemi_mac *mac) +{ + unsigned int flags; + + flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); + flags &= ~PAS_MAC_CFG_PCFG_PE; + write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); +} + +static void pasemi_mac_intf_enable(struct pasemi_mac *mac) +{ + unsigned int flags; + + flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); + flags |= PAS_MAC_CFG_PCFG_PE; + write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); +} + +static int pasemi_get_mac_addr(struct pasemi_mac *mac) +{ + struct pci_dev *pdev = mac->pdev; + struct device_node *dn = pci_device_to_OF_node(pdev); + int len; + const u8 *maddr; + u8 addr[ETH_ALEN]; + + if (!dn) { + dev_dbg(&pdev->dev, + "No device node for mac, not configuring\n"); + return -ENOENT; + } + + maddr = of_get_property(dn, "local-mac-address", &len); + + if (maddr && len == ETH_ALEN) { + memcpy(mac->mac_addr, maddr, ETH_ALEN); + return 0; + } + + /* Some old versions of firmware mistakenly uses mac-address + * (and as a string) instead of a byte array in local-mac-address. + */ + + if (maddr == NULL) + maddr = of_get_property(dn, "mac-address", NULL); + + if (maddr == NULL) { + dev_warn(&pdev->dev, + "no mac address in device tree, not configuring\n"); + return -ENOENT; + } + + if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", + &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) + != ETH_ALEN) { + dev_warn(&pdev->dev, + "can't parse mac address, not configuring\n"); + return -EINVAL; + } + + memcpy(mac->mac_addr, addr, ETH_ALEN); + + return 0; +} + +static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p) +{ + struct pasemi_mac *mac = netdev_priv(dev); + struct sockaddr *addr = p; + unsigned int adr0, adr1; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + adr0 = dev->dev_addr[2] << 24 | + dev->dev_addr[3] << 16 | + dev->dev_addr[4] << 8 | + dev->dev_addr[5]; + adr1 = read_mac_reg(mac, PAS_MAC_CFG_ADR1); + adr1 &= ~0xffff; + adr1 |= dev->dev_addr[0] << 8 | dev->dev_addr[1]; + + pasemi_mac_intf_disable(mac); + write_mac_reg(mac, PAS_MAC_CFG_ADR0, adr0); + write_mac_reg(mac, PAS_MAC_CFG_ADR1, adr1); + pasemi_mac_intf_enable(mac); + + return 0; +} + +static int get_skb_hdr(struct sk_buff *skb, void **iphdr, + void **tcph, u64 *hdr_flags, void *data) +{ + u64 macrx = (u64) data; + unsigned int ip_len; + struct iphdr *iph; + + /* IPv4 header checksum failed */ + if ((macrx & XCT_MACRX_HTY_M) != XCT_MACRX_HTY_IPV4_OK) + return -1; + + /* non tcp packet */ + skb_reset_network_header(skb); + iph = ip_hdr(skb); + if (iph->protocol != IPPROTO_TCP) + return -1; + + ip_len = ip_hdrlen(skb); + skb_set_transport_header(skb, ip_len); + *tcph = tcp_hdr(skb); + + /* check if ip header and tcp header are complete */ + if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) + return -1; + + *hdr_flags = LRO_IPV4 | LRO_TCP; + *iphdr = iph; + + return 0; +} + +static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac, + const int nfrags, + struct sk_buff *skb, + const dma_addr_t *dmas) +{ + int f; + struct pci_dev *pdev = mac->dma_pdev; + + pci_unmap_single(pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE); + + for (f = 0; f < nfrags; f++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + pci_unmap_page(pdev, dmas[f+1], skb_frag_size(frag), PCI_DMA_TODEVICE); + } + dev_kfree_skb_irq(skb); + + /* Freed descriptor slot + main SKB ptr + nfrags additional ptrs, + * aligned up to a power of 2 + */ + return (nfrags + 3) & ~1; +} + +static struct pasemi_mac_csring *pasemi_mac_setup_csring(struct pasemi_mac *mac) +{ + struct pasemi_mac_csring *ring; + u32 val; + unsigned int cfg; + int chno; + + ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_csring), + offsetof(struct pasemi_mac_csring, chan)); + + if (!ring) { + dev_err(&mac->pdev->dev, "Can't allocate checksum channel\n"); + goto out_chan; + } + + chno = ring->chan.chno; + + ring->size = CS_RING_SIZE; + ring->next_to_fill = 0; + + /* Allocate descriptors */ + if (pasemi_dma_alloc_ring(&ring->chan, CS_RING_SIZE)) + goto out_ring_desc; + + write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno), + PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma)); + val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32); + val |= PAS_DMA_TXCHAN_BASEU_SIZ(CS_RING_SIZE >> 3); + + write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val); + + ring->events[0] = pasemi_dma_alloc_flag(); + ring->events[1] = pasemi_dma_alloc_flag(); + if (ring->events[0] < 0 || ring->events[1] < 0) + goto out_flags; + + pasemi_dma_clear_flag(ring->events[0]); + pasemi_dma_clear_flag(ring->events[1]); + + ring->fun = pasemi_dma_alloc_fun(); + if (ring->fun < 0) + goto out_fun; + + cfg = PAS_DMA_TXCHAN_CFG_TY_FUNC | PAS_DMA_TXCHAN_CFG_UP | + PAS_DMA_TXCHAN_CFG_TATTR(ring->fun) | + PAS_DMA_TXCHAN_CFG_LPSQ | PAS_DMA_TXCHAN_CFG_LPDQ; + + if (translation_enabled()) + cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR; + + write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg); + + /* enable channel */ + pasemi_dma_start_chan(&ring->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ | + PAS_DMA_TXCHAN_TCMDSTA_DB | + PAS_DMA_TXCHAN_TCMDSTA_DE | + PAS_DMA_TXCHAN_TCMDSTA_DA); + + return ring; + +out_fun: +out_flags: + if (ring->events[0] >= 0) + pasemi_dma_free_flag(ring->events[0]); + if (ring->events[1] >= 0) + pasemi_dma_free_flag(ring->events[1]); + pasemi_dma_free_ring(&ring->chan); +out_ring_desc: + pasemi_dma_free_chan(&ring->chan); +out_chan: + + return NULL; +} + +static void pasemi_mac_setup_csrings(struct pasemi_mac *mac) +{ + int i; + mac->cs[0] = pasemi_mac_setup_csring(mac); + if (mac->type == MAC_TYPE_XAUI) + mac->cs[1] = pasemi_mac_setup_csring(mac); + else + mac->cs[1] = 0; + + for (i = 0; i < MAX_CS; i++) + if (mac->cs[i]) + mac->num_cs++; +} + +static void pasemi_mac_free_csring(struct pasemi_mac_csring *csring) +{ + pasemi_dma_stop_chan(&csring->chan); + pasemi_dma_free_flag(csring->events[0]); + pasemi_dma_free_flag(csring->events[1]); + pasemi_dma_free_ring(&csring->chan); + pasemi_dma_free_chan(&csring->chan); + pasemi_dma_free_fun(csring->fun); +} + +static int pasemi_mac_setup_rx_resources(const struct net_device *dev) +{ + struct pasemi_mac_rxring *ring; + struct pasemi_mac *mac = netdev_priv(dev); + int chno; + unsigned int cfg; + + ring = pasemi_dma_alloc_chan(RXCHAN, sizeof(struct pasemi_mac_rxring), + offsetof(struct pasemi_mac_rxring, chan)); + + if (!ring) { + dev_err(&mac->pdev->dev, "Can't allocate RX channel\n"); + goto out_chan; + } + chno = ring->chan.chno; + + spin_lock_init(&ring->lock); + + ring->size = RX_RING_SIZE; + ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) * + RX_RING_SIZE, GFP_KERNEL); + + if (!ring->ring_info) + goto out_ring_info; + + /* Allocate descriptors */ + if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) + goto out_ring_desc; + + ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev, + RX_RING_SIZE * sizeof(u64), + &ring->buf_dma, GFP_KERNEL); + if (!ring->buffers) + goto out_ring_desc; + + write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno), + PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma)); + + write_dma_reg(PAS_DMA_RXCHAN_BASEU(chno), + PAS_DMA_RXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32) | + PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3)); + + cfg = PAS_DMA_RXCHAN_CFG_HBU(2); + + if (translation_enabled()) + cfg |= PAS_DMA_RXCHAN_CFG_CTR; + + write_dma_reg(PAS_DMA_RXCHAN_CFG(chno), cfg); + + write_dma_reg(PAS_DMA_RXINT_BASEL(mac->dma_if), + PAS_DMA_RXINT_BASEL_BRBL(ring->buf_dma)); + + write_dma_reg(PAS_DMA_RXINT_BASEU(mac->dma_if), + PAS_DMA_RXINT_BASEU_BRBH(ring->buf_dma >> 32) | + PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3)); + + cfg = PAS_DMA_RXINT_CFG_DHL(2) | PAS_DMA_RXINT_CFG_L2 | + PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP | + PAS_DMA_RXINT_CFG_HEN; + + if (translation_enabled()) + cfg |= PAS_DMA_RXINT_CFG_ITRR | PAS_DMA_RXINT_CFG_ITR; + + write_dma_reg(PAS_DMA_RXINT_CFG(mac->dma_if), cfg); + + ring->next_to_fill = 0; + ring->next_to_clean = 0; + ring->mac = mac; + mac->rx = ring; + + return 0; + +out_ring_desc: + kfree(ring->ring_info); +out_ring_info: + pasemi_dma_free_chan(&ring->chan); +out_chan: + return -ENOMEM; +} + +static struct pasemi_mac_txring * +pasemi_mac_setup_tx_resources(const struct net_device *dev) +{ + struct pasemi_mac *mac = netdev_priv(dev); + u32 val; + struct pasemi_mac_txring *ring; + unsigned int cfg; + int chno; + + ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_txring), + offsetof(struct pasemi_mac_txring, chan)); + + if (!ring) { + dev_err(&mac->pdev->dev, "Can't allocate TX channel\n"); + goto out_chan; + } + + chno = ring->chan.chno; + + spin_lock_init(&ring->lock); + + ring->size = TX_RING_SIZE; + ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) * + TX_RING_SIZE, GFP_KERNEL); + if (!ring->ring_info) + goto out_ring_info; + + /* Allocate descriptors */ + if (pasemi_dma_alloc_ring(&ring->chan, TX_RING_SIZE)) + goto out_ring_desc; + + write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno), + PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma)); + val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32); + val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3); + + write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val); + + cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE | + PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) | + PAS_DMA_TXCHAN_CFG_UP | + PAS_DMA_TXCHAN_CFG_WT(4); + + if (translation_enabled()) + cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR; + + write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg); + + ring->next_to_fill = 0; + ring->next_to_clean = 0; + ring->mac = mac; + + return ring; + +out_ring_desc: + kfree(ring->ring_info); +out_ring_info: + pasemi_dma_free_chan(&ring->chan); +out_chan: + return NULL; +} + +static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac) +{ + struct pasemi_mac_txring *txring = tx_ring(mac); + unsigned int i, j; + struct pasemi_mac_buffer *info; + dma_addr_t dmas[MAX_SKB_FRAGS+1]; + int freed, nfrags; + int start, limit; + + start = txring->next_to_clean; + limit = txring->next_to_fill; + + /* Compensate for when fill has wrapped and clean has not */ + if (start > limit) + limit += TX_RING_SIZE; + + for (i = start; i < limit; i += freed) { + info = &txring->ring_info[(i+1) & (TX_RING_SIZE-1)]; + if (info->dma && info->skb) { + nfrags = skb_shinfo(info->skb)->nr_frags; + for (j = 0; j <= nfrags; j++) + dmas[j] = txring->ring_info[(i+1+j) & + (TX_RING_SIZE-1)].dma; + freed = pasemi_mac_unmap_tx_skb(mac, nfrags, + info->skb, dmas); + } else { + freed = 2; + } + } + + kfree(txring->ring_info); + pasemi_dma_free_chan(&txring->chan); + +} + +static void pasemi_mac_free_rx_buffers(struct pasemi_mac *mac) +{ + struct pasemi_mac_rxring *rx = rx_ring(mac); + unsigned int i; + struct pasemi_mac_buffer *info; + + for (i = 0; i < RX_RING_SIZE; i++) { + info = &RX_DESC_INFO(rx, i); + if (info->skb && info->dma) { + pci_unmap_single(mac->dma_pdev, + info->dma, + info->skb->len, + PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(info->skb); + } + info->dma = 0; + info->skb = NULL; + } + + for (i = 0; i < RX_RING_SIZE; i++) + RX_BUFF(rx, i) = 0; +} + +static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac) +{ + pasemi_mac_free_rx_buffers(mac); + + dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), + rx_ring(mac)->buffers, rx_ring(mac)->buf_dma); + + kfree(rx_ring(mac)->ring_info); + pasemi_dma_free_chan(&rx_ring(mac)->chan); + mac->rx = NULL; +} + +static void pasemi_mac_replenish_rx_ring(struct net_device *dev, + const int limit) +{ + const struct pasemi_mac *mac = netdev_priv(dev); + struct pasemi_mac_rxring *rx = rx_ring(mac); + int fill, count; + + if (limit <= 0) + return; + + fill = rx_ring(mac)->next_to_fill; + for (count = 0; count < limit; count++) { + struct pasemi_mac_buffer *info = &RX_DESC_INFO(rx, fill); + u64 *buff = &RX_BUFF(rx, fill); + struct sk_buff *skb; + dma_addr_t dma; + + /* Entry in use? */ + WARN_ON(*buff); + + skb = netdev_alloc_skb(dev, mac->bufsz); + skb_reserve(skb, LOCAL_SKB_ALIGN); + + if (unlikely(!skb)) + break; + + dma = pci_map_single(mac->dma_pdev, skb->data, + mac->bufsz - LOCAL_SKB_ALIGN, + PCI_DMA_FROMDEVICE); + + if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) { + dev_kfree_skb_irq(info->skb); + break; + } + + info->skb = skb; + info->dma = dma; + *buff = XCT_RXB_LEN(mac->bufsz) | XCT_RXB_ADDR(dma); + fill++; + } + + wmb(); + + write_dma_reg(PAS_DMA_RXINT_INCR(mac->dma_if), count); + + rx_ring(mac)->next_to_fill = (rx_ring(mac)->next_to_fill + count) & + (RX_RING_SIZE - 1); +} + +static void pasemi_mac_restart_rx_intr(const struct pasemi_mac *mac) +{ + struct pasemi_mac_rxring *rx = rx_ring(mac); + unsigned int reg, pcnt; + /* Re-enable packet count interrupts: finally + * ack the packet count interrupt we got in rx_intr. + */ + + pcnt = *rx->chan.status & PAS_STATUS_PCNT_M; + + reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC; + + if (*rx->chan.status & PAS_STATUS_TIMER) + reg |= PAS_IOB_DMA_RXCH_RESET_TINTC; + + write_iob_reg(PAS_IOB_DMA_RXCH_RESET(mac->rx->chan.chno), reg); +} + +static void pasemi_mac_restart_tx_intr(const struct pasemi_mac *mac) +{ + unsigned int reg, pcnt; + + /* Re-enable packet count interrupts */ + pcnt = *tx_ring(mac)->chan.status & PAS_STATUS_PCNT_M; + + reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC; + + write_iob_reg(PAS_IOB_DMA_TXCH_RESET(tx_ring(mac)->chan.chno), reg); +} + + +static inline void pasemi_mac_rx_error(const struct pasemi_mac *mac, + const u64 macrx) +{ + unsigned int rcmdsta, ccmdsta; + struct pasemi_dmachan *chan = &rx_ring(mac)->chan; + + if (!netif_msg_rx_err(mac)) + return; + + rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); + ccmdsta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno)); + + printk(KERN_ERR "pasemi_mac: rx error. macrx %016llx, rx status %llx\n", + macrx, *chan->status); + + printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n", + rcmdsta, ccmdsta); +} + +static inline void pasemi_mac_tx_error(const struct pasemi_mac *mac, + const u64 mactx) +{ + unsigned int cmdsta; + struct pasemi_dmachan *chan = &tx_ring(mac)->chan; + + if (!netif_msg_tx_err(mac)) + return; + + cmdsta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno)); + + printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016llx, "\ + "tx status 0x%016llx\n", mactx, *chan->status); + + printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta); +} + +static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx, + const int limit) +{ + const struct pasemi_dmachan *chan = &rx->chan; + struct pasemi_mac *mac = rx->mac; + struct pci_dev *pdev = mac->dma_pdev; + unsigned int n; + int count, buf_index, tot_bytes, packets; + struct pasemi_mac_buffer *info; + struct sk_buff *skb; + unsigned int len; + u64 macrx, eval; + dma_addr_t dma; + + tot_bytes = 0; + packets = 0; + + spin_lock(&rx->lock); + + n = rx->next_to_clean; + + prefetch(&RX_DESC(rx, n)); + + for (count = 0; count < limit; count++) { + macrx = RX_DESC(rx, n); + prefetch(&RX_DESC(rx, n+4)); + + if ((macrx & XCT_MACRX_E) || + (*chan->status & PAS_STATUS_ERROR)) + pasemi_mac_rx_error(mac, macrx); + + if (!(macrx & XCT_MACRX_O)) + break; + + info = NULL; + + BUG_ON(!(macrx & XCT_MACRX_RR_8BRES)); + + eval = (RX_DESC(rx, n+1) & XCT_RXRES_8B_EVAL_M) >> + XCT_RXRES_8B_EVAL_S; + buf_index = eval-1; + + dma = (RX_DESC(rx, n+2) & XCT_PTR_ADDR_M); + info = &RX_DESC_INFO(rx, buf_index); + + skb = info->skb; + + prefetch_skb(skb); + + len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S; + + pci_unmap_single(pdev, dma, mac->bufsz - LOCAL_SKB_ALIGN, + PCI_DMA_FROMDEVICE); + + if (macrx & XCT_MACRX_CRC) { + /* CRC error flagged */ + mac->netdev->stats.rx_errors++; + mac->netdev->stats.rx_crc_errors++; + /* No need to free skb, it'll be reused */ + goto next; + } + + info->skb = NULL; + info->dma = 0; + + if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum = (macrx & XCT_MACRX_CSUM_M) >> + XCT_MACRX_CSUM_S; + } else { + skb_checksum_none_assert(skb); + } + + packets++; + tot_bytes += len; + + /* Don't include CRC */ + skb_put(skb, len-4); + + skb->protocol = eth_type_trans(skb, mac->netdev); + lro_receive_skb(&mac->lro_mgr, skb, (void *)macrx); + +next: + RX_DESC(rx, n) = 0; + RX_DESC(rx, n+1) = 0; + + /* Need to zero it out since hardware doesn't, since the + * replenish loop uses it to tell when it's done. + */ + RX_BUFF(rx, buf_index) = 0; + + n += 4; + } + + if (n > RX_RING_SIZE) { + /* Errata 5971 workaround: L2 target of headers */ + write_iob_reg(PAS_IOB_COM_PKTHDRCNT, 0); + n &= (RX_RING_SIZE-1); + } + + rx_ring(mac)->next_to_clean = n; + + lro_flush_all(&mac->lro_mgr); + + /* Increase is in number of 16-byte entries, and since each descriptor + * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with + * count*2. + */ + write_dma_reg(PAS_DMA_RXCHAN_INCR(mac->rx->chan.chno), count << 1); + + pasemi_mac_replenish_rx_ring(mac->netdev, count); + + mac->netdev->stats.rx_bytes += tot_bytes; + mac->netdev->stats.rx_packets += packets; + + spin_unlock(&rx_ring(mac)->lock); + + return count; +} + +/* Can't make this too large or we blow the kernel stack limits */ +#define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS) + +static int pasemi_mac_clean_tx(struct pasemi_mac_txring *txring) +{ + struct pasemi_dmachan *chan = &txring->chan; + struct pasemi_mac *mac = txring->mac; + int i, j; + unsigned int start, descr_count, buf_count, batch_limit; + unsigned int ring_limit; + unsigned int total_count; + unsigned long flags; + struct sk_buff *skbs[TX_CLEAN_BATCHSIZE]; + dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1]; + int nf[TX_CLEAN_BATCHSIZE]; + int nr_frags; + + total_count = 0; + batch_limit = TX_CLEAN_BATCHSIZE; +restart: + spin_lock_irqsave(&txring->lock, flags); + + start = txring->next_to_clean; + ring_limit = txring->next_to_fill; + + prefetch(&TX_DESC_INFO(txring, start+1).skb); + + /* Compensate for when fill has wrapped but clean has not */ + if (start > ring_limit) + ring_limit += TX_RING_SIZE; + + buf_count = 0; + descr_count = 0; + + for (i = start; + descr_count < batch_limit && i < ring_limit; + i += buf_count) { + u64 mactx = TX_DESC(txring, i); + struct sk_buff *skb; + + if ((mactx & XCT_MACTX_E) || + (*chan->status & PAS_STATUS_ERROR)) + pasemi_mac_tx_error(mac, mactx); + + /* Skip over control descriptors */ + if (!(mactx & XCT_MACTX_LLEN_M)) { + TX_DESC(txring, i) = 0; + TX_DESC(txring, i+1) = 0; + buf_count = 2; + continue; + } + + skb = TX_DESC_INFO(txring, i+1).skb; + nr_frags = TX_DESC_INFO(txring, i).dma; + + if (unlikely(mactx & XCT_MACTX_O)) + /* Not yet transmitted */ + break; + + buf_count = 2 + nr_frags; + /* Since we always fill with an even number of entries, make + * sure we skip any unused one at the end as well. + */ + if (buf_count & 1) + buf_count++; + + for (j = 0; j <= nr_frags; j++) + dmas[descr_count][j] = TX_DESC_INFO(txring, i+1+j).dma; + + skbs[descr_count] = skb; + nf[descr_count] = nr_frags; + + TX_DESC(txring, i) = 0; + TX_DESC(txring, i+1) = 0; + + descr_count++; + } + txring->next_to_clean = i & (TX_RING_SIZE-1); + + spin_unlock_irqrestore(&txring->lock, flags); + netif_wake_queue(mac->netdev); + + for (i = 0; i < descr_count; i++) + pasemi_mac_unmap_tx_skb(mac, nf[i], skbs[i], dmas[i]); + + total_count += descr_count; + + /* If the batch was full, try to clean more */ + if (descr_count == batch_limit) + goto restart; + + return total_count; +} + + +static irqreturn_t pasemi_mac_rx_intr(int irq, void *data) +{ + const struct pasemi_mac_rxring *rxring = data; + struct pasemi_mac *mac = rxring->mac; + const struct pasemi_dmachan *chan = &rxring->chan; + unsigned int reg; + + if (!(*chan->status & PAS_STATUS_CAUSE_M)) + return IRQ_NONE; + + /* Don't reset packet count so it won't fire again but clear + * all others. + */ + + reg = 0; + if (*chan->status & PAS_STATUS_SOFT) + reg |= PAS_IOB_DMA_RXCH_RESET_SINTC; + if (*chan->status & PAS_STATUS_ERROR) + reg |= PAS_IOB_DMA_RXCH_RESET_DINTC; + + napi_schedule(&mac->napi); + + write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg); + + return IRQ_HANDLED; +} + +#define TX_CLEAN_INTERVAL HZ + +static void pasemi_mac_tx_timer(unsigned long data) +{ + struct pasemi_mac_txring *txring = (struct pasemi_mac_txring *)data; + struct pasemi_mac *mac = txring->mac; + + pasemi_mac_clean_tx(txring); + + mod_timer(&txring->clean_timer, jiffies + TX_CLEAN_INTERVAL); + + pasemi_mac_restart_tx_intr(mac); +} + +static irqreturn_t pasemi_mac_tx_intr(int irq, void *data) +{ + struct pasemi_mac_txring *txring = data; + const struct pasemi_dmachan *chan = &txring->chan; + struct pasemi_mac *mac = txring->mac; + unsigned int reg; + + if (!(*chan->status & PAS_STATUS_CAUSE_M)) + return IRQ_NONE; + + reg = 0; + + if (*chan->status & PAS_STATUS_SOFT) + reg |= PAS_IOB_DMA_TXCH_RESET_SINTC; + if (*chan->status & PAS_STATUS_ERROR) + reg |= PAS_IOB_DMA_TXCH_RESET_DINTC; + + mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2); + + napi_schedule(&mac->napi); + + if (reg) + write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg); + + return IRQ_HANDLED; +} + +static void pasemi_adjust_link(struct net_device *dev) +{ + struct pasemi_mac *mac = netdev_priv(dev); + int msg; + unsigned int flags; + unsigned int new_flags; + + if (!mac->phydev->link) { + /* If no link, MAC speed settings don't matter. Just report + * link down and return. + */ + if (mac->link && netif_msg_link(mac)) + printk(KERN_INFO "%s: Link is down.\n", dev->name); + + netif_carrier_off(dev); + pasemi_mac_intf_disable(mac); + mac->link = 0; + + return; + } else { + pasemi_mac_intf_enable(mac); + netif_carrier_on(dev); + } + + flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); + new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M | + PAS_MAC_CFG_PCFG_TSR_M); + + if (!mac->phydev->duplex) + new_flags |= PAS_MAC_CFG_PCFG_HD; + + switch (mac->phydev->speed) { + case 1000: + new_flags |= PAS_MAC_CFG_PCFG_SPD_1G | + PAS_MAC_CFG_PCFG_TSR_1G; + break; + case 100: + new_flags |= PAS_MAC_CFG_PCFG_SPD_100M | + PAS_MAC_CFG_PCFG_TSR_100M; + break; + case 10: + new_flags |= PAS_MAC_CFG_PCFG_SPD_10M | + PAS_MAC_CFG_PCFG_TSR_10M; + break; + default: + printk("Unsupported speed %d\n", mac->phydev->speed); + } + + /* Print on link or speed/duplex change */ + msg = mac->link != mac->phydev->link || flags != new_flags; + + mac->duplex = mac->phydev->duplex; + mac->speed = mac->phydev->speed; + mac->link = mac->phydev->link; + + if (new_flags != flags) + write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags); + + if (msg && netif_msg_link(mac)) + printk(KERN_INFO "%s: Link is up at %d Mbps, %s duplex.\n", + dev->name, mac->speed, mac->duplex ? "full" : "half"); +} + +static int pasemi_mac_phy_init(struct net_device *dev) +{ + struct pasemi_mac *mac = netdev_priv(dev); + struct device_node *dn, *phy_dn; + struct phy_device *phydev; + + dn = pci_device_to_OF_node(mac->pdev); + phy_dn = of_parse_phandle(dn, "phy-handle", 0); + of_node_put(phy_dn); + + mac->link = 0; + mac->speed = 0; + mac->duplex = -1; + + phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0, + PHY_INTERFACE_MODE_SGMII); + + if (!phydev) { + printk(KERN_ERR "%s: Could not attach to phy\n", dev->name); + return -ENODEV; + } + + mac->phydev = phydev; + + return 0; +} + + +static int pasemi_mac_open(struct net_device *dev) +{ + struct pasemi_mac *mac = netdev_priv(dev); + unsigned int flags; + int i, ret; + + flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) | + PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) | + PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12); + + write_mac_reg(mac, PAS_MAC_CFG_TXP, flags); + + ret = pasemi_mac_setup_rx_resources(dev); + if (ret) + goto out_rx_resources; + + mac->tx = pasemi_mac_setup_tx_resources(dev); + + if (!mac->tx) + goto out_tx_ring; + + /* We might already have allocated rings in case mtu was changed + * before interface was brought up. + */ + if (dev->mtu > 1500 && !mac->num_cs) { + pasemi_mac_setup_csrings(mac); + if (!mac->num_cs) + goto out_tx_ring; + } + + /* Zero out rmon counters */ + for (i = 0; i < 32; i++) + write_mac_reg(mac, PAS_MAC_RMON(i), 0); + + /* 0x3ff with 33MHz clock is about 31us */ + write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG, + PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0x3ff)); + + write_iob_reg(PAS_IOB_DMA_RXCH_CFG(mac->rx->chan.chno), + PAS_IOB_DMA_RXCH_CFG_CNTTH(256)); + + write_iob_reg(PAS_IOB_DMA_TXCH_CFG(mac->tx->chan.chno), + PAS_IOB_DMA_TXCH_CFG_CNTTH(32)); + + write_mac_reg(mac, PAS_MAC_IPC_CHNL, + PAS_MAC_IPC_CHNL_DCHNO(mac->rx->chan.chno) | + PAS_MAC_IPC_CHNL_BCH(mac->rx->chan.chno)); + + /* enable rx if */ + write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), + PAS_DMA_RXINT_RCMDSTA_EN | + PAS_DMA_RXINT_RCMDSTA_DROPS_M | + PAS_DMA_RXINT_RCMDSTA_BP | + PAS_DMA_RXINT_RCMDSTA_OO | + PAS_DMA_RXINT_RCMDSTA_BT); + + /* enable rx channel */ + pasemi_dma_start_chan(&rx_ring(mac)->chan, PAS_DMA_RXCHAN_CCMDSTA_DU | + PAS_DMA_RXCHAN_CCMDSTA_OD | + PAS_DMA_RXCHAN_CCMDSTA_FD | + PAS_DMA_RXCHAN_CCMDSTA_DT); + + /* enable tx channel */ + pasemi_dma_start_chan(&tx_ring(mac)->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ | + PAS_DMA_TXCHAN_TCMDSTA_DB | + PAS_DMA_TXCHAN_TCMDSTA_DE | + PAS_DMA_TXCHAN_TCMDSTA_DA); + + pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE); + + write_dma_reg(PAS_DMA_RXCHAN_INCR(rx_ring(mac)->chan.chno), + RX_RING_SIZE>>1); + + /* Clear out any residual packet count state from firmware */ + pasemi_mac_restart_rx_intr(mac); + pasemi_mac_restart_tx_intr(mac); + + flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE; + + if (mac->type == MAC_TYPE_GMAC) + flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G; + else + flags |= PAS_MAC_CFG_PCFG_TSR_10G | PAS_MAC_CFG_PCFG_SPD_10G; + + /* Enable interface in MAC */ + write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); + + ret = pasemi_mac_phy_init(dev); + if (ret) { + /* Since we won't get link notification, just enable RX */ + pasemi_mac_intf_enable(mac); + if (mac->type == MAC_TYPE_GMAC) { + /* Warn for missing PHY on SGMII (1Gig) ports */ + dev_warn(&mac->pdev->dev, + "PHY init failed: %d.\n", ret); + dev_warn(&mac->pdev->dev, + "Defaulting to 1Gbit full duplex\n"); + } + } + + netif_start_queue(dev); + napi_enable(&mac->napi); + + snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx", + dev->name); + + ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, 0, + mac->tx_irq_name, mac->tx); + if (ret) { + dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", + mac->tx->chan.irq, ret); + goto out_tx_int; + } + + snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx", + dev->name); + + ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, 0, + mac->rx_irq_name, mac->rx); + if (ret) { + dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", + mac->rx->chan.irq, ret); + goto out_rx_int; + } + + if (mac->phydev) + phy_start(mac->phydev); + + setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer, + (unsigned long)mac->tx); + mod_timer(&mac->tx->clean_timer, jiffies + HZ); + + return 0; + +out_rx_int: + free_irq(mac->tx->chan.irq, mac->tx); +out_tx_int: + napi_disable(&mac->napi); + netif_stop_queue(dev); +out_tx_ring: + if (mac->tx) + pasemi_mac_free_tx_resources(mac); + pasemi_mac_free_rx_resources(mac); +out_rx_resources: + + return ret; +} + +#define MAX_RETRIES 5000 + +static void pasemi_mac_pause_txchan(struct pasemi_mac *mac) +{ + unsigned int sta, retries; + int txch = tx_ring(mac)->chan.chno; + + write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), + PAS_DMA_TXCHAN_TCMDSTA_ST); + + for (retries = 0; retries < MAX_RETRIES; retries++) { + sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch)); + if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) + break; + cond_resched(); + } + + if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT) + dev_err(&mac->dma_pdev->dev, + "Failed to stop tx channel, tcmdsta %08x\n", sta); + + write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0); +} + +static void pasemi_mac_pause_rxchan(struct pasemi_mac *mac) +{ + unsigned int sta, retries; + int rxch = rx_ring(mac)->chan.chno; + + write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), + PAS_DMA_RXCHAN_CCMDSTA_ST); + for (retries = 0; retries < MAX_RETRIES; retries++) { + sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch)); + if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) + break; + cond_resched(); + } + + if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT) + dev_err(&mac->dma_pdev->dev, + "Failed to stop rx channel, ccmdsta 08%x\n", sta); + write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0); +} + +static void pasemi_mac_pause_rxint(struct pasemi_mac *mac) +{ + unsigned int sta, retries; + + write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), + PAS_DMA_RXINT_RCMDSTA_ST); + for (retries = 0; retries < MAX_RETRIES; retries++) { + sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); + if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT)) + break; + cond_resched(); + } + + if (sta & PAS_DMA_RXINT_RCMDSTA_ACT) + dev_err(&mac->dma_pdev->dev, + "Failed to stop rx interface, rcmdsta %08x\n", sta); + write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0); +} + +static int pasemi_mac_close(struct net_device *dev) +{ + struct pasemi_mac *mac = netdev_priv(dev); + unsigned int sta; + int rxch, txch, i; + + rxch = rx_ring(mac)->chan.chno; + txch = tx_ring(mac)->chan.chno; + + if (mac->phydev) { + phy_stop(mac->phydev); + phy_disconnect(mac->phydev); + } + + del_timer_sync(&mac->tx->clean_timer); + + netif_stop_queue(dev); + napi_disable(&mac->napi); + + sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); + if (sta & (PAS_DMA_RXINT_RCMDSTA_BP | + PAS_DMA_RXINT_RCMDSTA_OO | + PAS_DMA_RXINT_RCMDSTA_BT)) + printk(KERN_DEBUG "pasemi_mac: rcmdsta error: 0x%08x\n", sta); + + sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch)); + if (sta & (PAS_DMA_RXCHAN_CCMDSTA_DU | + PAS_DMA_RXCHAN_CCMDSTA_OD | + PAS_DMA_RXCHAN_CCMDSTA_FD | + PAS_DMA_RXCHAN_CCMDSTA_DT)) + printk(KERN_DEBUG "pasemi_mac: ccmdsta error: 0x%08x\n", sta); + + sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch)); + if (sta & (PAS_DMA_TXCHAN_TCMDSTA_SZ | PAS_DMA_TXCHAN_TCMDSTA_DB | + PAS_DMA_TXCHAN_TCMDSTA_DE | PAS_DMA_TXCHAN_TCMDSTA_DA)) + printk(KERN_DEBUG "pasemi_mac: tcmdsta error: 0x%08x\n", sta); + + /* Clean out any pending buffers */ + pasemi_mac_clean_tx(tx_ring(mac)); + pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); + + pasemi_mac_pause_txchan(mac); + pasemi_mac_pause_rxint(mac); + pasemi_mac_pause_rxchan(mac); + pasemi_mac_intf_disable(mac); + + free_irq(mac->tx->chan.irq, mac->tx); + free_irq(mac->rx->chan.irq, mac->rx); + + for (i = 0; i < mac->num_cs; i++) { + pasemi_mac_free_csring(mac->cs[i]); + mac->cs[i] = NULL; + } + + mac->num_cs = 0; + + /* Free resources */ + pasemi_mac_free_rx_resources(mac); + pasemi_mac_free_tx_resources(mac); + + return 0; +} + +static void pasemi_mac_queue_csdesc(const struct sk_buff *skb, + const dma_addr_t *map, + const unsigned int *map_size, + struct pasemi_mac_txring *txring, + struct pasemi_mac_csring *csring) +{ + u64 fund; + dma_addr_t cs_dest; + const int nh_off = skb_network_offset(skb); + const int nh_len = skb_network_header_len(skb); + const int nfrags = skb_shinfo(skb)->nr_frags; + int cs_size, i, fill, hdr, cpyhdr, evt; + dma_addr_t csdma; + + fund = XCT_FUN_ST | XCT_FUN_RR_8BRES | + XCT_FUN_O | XCT_FUN_FUN(csring->fun) | + XCT_FUN_CRM_SIG | XCT_FUN_LLEN(skb->len - nh_off) | + XCT_FUN_SHL(nh_len >> 2) | XCT_FUN_SE; + + switch (ip_hdr(skb)->protocol) { + case IPPROTO_TCP: + fund |= XCT_FUN_SIG_TCP4; + /* TCP checksum is 16 bytes into the header */ + cs_dest = map[0] + skb_transport_offset(skb) + 16; + break; + case IPPROTO_UDP: + fund |= XCT_FUN_SIG_UDP4; + /* UDP checksum is 6 bytes into the header */ + cs_dest = map[0] + skb_transport_offset(skb) + 6; + break; + default: + BUG(); + } + + /* Do the checksum offloaded */ + fill = csring->next_to_fill; + hdr = fill; + + CS_DESC(csring, fill++) = fund; + /* Room for 8BRES. Checksum result is really 2 bytes into it */ + csdma = csring->chan.ring_dma + (fill & (CS_RING_SIZE-1)) * 8 + 2; + CS_DESC(csring, fill++) = 0; + + CS_DESC(csring, fill) = XCT_PTR_LEN(map_size[0]-nh_off) | XCT_PTR_ADDR(map[0]+nh_off); + for (i = 1; i <= nfrags; i++) + CS_DESC(csring, fill+i) = XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]); + + fill += i; + if (fill & 1) + fill++; + + /* Copy the result into the TCP packet */ + cpyhdr = fill; + CS_DESC(csring, fill++) = XCT_FUN_O | XCT_FUN_FUN(csring->fun) | + XCT_FUN_LLEN(2) | XCT_FUN_SE; + CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(cs_dest) | XCT_PTR_T; + CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(csdma); + fill++; + + evt = !csring->last_event; + csring->last_event = evt; + + /* Event handshaking with MAC TX */ + CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O | + CTRL_CMD_ETYPE_SET | CTRL_CMD_REG(csring->events[evt]); + CS_DESC(csring, fill++) = 0; + CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O | + CTRL_CMD_ETYPE_WCLR | CTRL_CMD_REG(csring->events[!evt]); + CS_DESC(csring, fill++) = 0; + csring->next_to_fill = fill & (CS_RING_SIZE-1); + + cs_size = fill - hdr; + write_dma_reg(PAS_DMA_TXCHAN_INCR(csring->chan.chno), (cs_size) >> 1); + + /* TX-side event handshaking */ + fill = txring->next_to_fill; + TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O | + CTRL_CMD_ETYPE_WSET | CTRL_CMD_REG(csring->events[evt]); + TX_DESC(txring, fill++) = 0; + TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O | + CTRL_CMD_ETYPE_CLR | CTRL_CMD_REG(csring->events[!evt]); + TX_DESC(txring, fill++) = 0; + txring->next_to_fill = fill; + + write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2); +} + +static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct pasemi_mac * const mac = netdev_priv(dev); + struct pasemi_mac_txring * const txring = tx_ring(mac); + struct pasemi_mac_csring *csring; + u64 dflags = 0; + u64 mactx; + dma_addr_t map[MAX_SKB_FRAGS+1]; + unsigned int map_size[MAX_SKB_FRAGS+1]; + unsigned long flags; + int i, nfrags; + int fill; + const int nh_off = skb_network_offset(skb); + const int nh_len = skb_network_header_len(skb); + + prefetch(&txring->ring_info); + + dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD; + + nfrags = skb_shinfo(skb)->nr_frags; + + map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), + PCI_DMA_TODEVICE); + map_size[0] = skb_headlen(skb); + if (pci_dma_mapping_error(mac->dma_pdev, map[0])) + goto out_err_nolock; + + for (i = 0; i < nfrags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + map[i + 1] = skb_frag_dma_map(&mac->dma_pdev->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + map_size[i+1] = skb_frag_size(frag); + if (dma_mapping_error(&mac->dma_pdev->dev, map[i + 1])) { + nfrags = i; + goto out_err_nolock; + } + } + + if (skb->ip_summed == CHECKSUM_PARTIAL && skb->len <= 1540) { + switch (ip_hdr(skb)->protocol) { + case IPPROTO_TCP: + dflags |= XCT_MACTX_CSUM_TCP; + dflags |= XCT_MACTX_IPH(nh_len >> 2); + dflags |= XCT_MACTX_IPO(nh_off); + break; + case IPPROTO_UDP: + dflags |= XCT_MACTX_CSUM_UDP; + dflags |= XCT_MACTX_IPH(nh_len >> 2); + dflags |= XCT_MACTX_IPO(nh_off); + break; + default: + WARN_ON(1); + } + } + + mactx = dflags | XCT_MACTX_LLEN(skb->len); + + spin_lock_irqsave(&txring->lock, flags); + + /* Avoid stepping on the same cache line that the DMA controller + * is currently about to send, so leave at least 8 words available. + * Total free space needed is mactx + fragments + 8 + */ + if (RING_AVAIL(txring) < nfrags + 14) { + /* no room -- stop the queue and wait for tx intr */ + netif_stop_queue(dev); + goto out_err; + } + + /* Queue up checksum + event descriptors, if needed */ + if (mac->num_cs && skb->ip_summed == CHECKSUM_PARTIAL && skb->len > 1540) { + csring = mac->cs[mac->last_cs]; + mac->last_cs = (mac->last_cs + 1) % mac->num_cs; + + pasemi_mac_queue_csdesc(skb, map, map_size, txring, csring); + } + + fill = txring->next_to_fill; + TX_DESC(txring, fill) = mactx; + TX_DESC_INFO(txring, fill).dma = nfrags; + fill++; + TX_DESC_INFO(txring, fill).skb = skb; + for (i = 0; i <= nfrags; i++) { + TX_DESC(txring, fill+i) = + XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]); + TX_DESC_INFO(txring, fill+i).dma = map[i]; + } + + /* We have to add an even number of 8-byte entries to the ring + * even if the last one is unused. That means always an odd number + * of pointers + one mactx descriptor. + */ + if (nfrags & 1) + nfrags++; + + txring->next_to_fill = (fill + nfrags + 1) & (TX_RING_SIZE-1); + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + spin_unlock_irqrestore(&txring->lock, flags); + + write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), (nfrags+2) >> 1); + + return NETDEV_TX_OK; + +out_err: + spin_unlock_irqrestore(&txring->lock, flags); +out_err_nolock: + while (nfrags--) + pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags], + PCI_DMA_TODEVICE); + + return NETDEV_TX_BUSY; +} + +static void pasemi_mac_set_rx_mode(struct net_device *dev) +{ + const struct pasemi_mac *mac = netdev_priv(dev); + unsigned int flags; + + flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); + + /* Set promiscuous */ + if (dev->flags & IFF_PROMISC) + flags |= PAS_MAC_CFG_PCFG_PR; + else + flags &= ~PAS_MAC_CFG_PCFG_PR; + + write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); +} + + +static int pasemi_mac_poll(struct napi_struct *napi, int budget) +{ + struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi); + int pkts; + + pasemi_mac_clean_tx(tx_ring(mac)); + pkts = pasemi_mac_clean_rx(rx_ring(mac), budget); + if (pkts < budget) { + /* all done, no more packets present */ + napi_complete(napi); + + pasemi_mac_restart_rx_intr(mac); + pasemi_mac_restart_tx_intr(mac); + } + return pkts; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void pasemi_mac_netpoll(struct net_device *dev) +{ + const struct pasemi_mac *mac = netdev_priv(dev); + + disable_irq(mac->tx->chan.irq); + pasemi_mac_tx_intr(mac->tx->chan.irq, mac->tx); + enable_irq(mac->tx->chan.irq); + + disable_irq(mac->rx->chan.irq); + pasemi_mac_rx_intr(mac->rx->chan.irq, mac->rx); + enable_irq(mac->rx->chan.irq); +} +#endif + +static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu) +{ + struct pasemi_mac *mac = netdev_priv(dev); + unsigned int reg; + unsigned int rcmdsta = 0; + int running; + int ret = 0; + + if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU) + return -EINVAL; + + running = netif_running(dev); + + if (running) { + /* Need to stop the interface, clean out all already + * received buffers, free all unused buffers on the RX + * interface ring, then finally re-fill the rx ring with + * the new-size buffers and restart. + */ + + napi_disable(&mac->napi); + netif_tx_disable(dev); + pasemi_mac_intf_disable(mac); + + rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); + pasemi_mac_pause_rxint(mac); + pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); + pasemi_mac_free_rx_buffers(mac); + + } + + /* Setup checksum channels if large MTU and none already allocated */ + if (new_mtu > 1500 && !mac->num_cs) { + pasemi_mac_setup_csrings(mac); + if (!mac->num_cs) { + ret = -ENOMEM; + goto out; + } + } + + /* Change maxf, i.e. what size frames are accepted. + * Need room for ethernet header and CRC word + */ + reg = read_mac_reg(mac, PAS_MAC_CFG_MACCFG); + reg &= ~PAS_MAC_CFG_MACCFG_MAXF_M; + reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4); + write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg); + + dev->mtu = new_mtu; + /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ + mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; + +out: + if (running) { + write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), + rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN); + + rx_ring(mac)->next_to_fill = 0; + pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE-1); + + napi_enable(&mac->napi); + netif_start_queue(dev); + pasemi_mac_intf_enable(mac); + } + + return ret; +} + +static const struct net_device_ops pasemi_netdev_ops = { + .ndo_open = pasemi_mac_open, + .ndo_stop = pasemi_mac_close, + .ndo_start_xmit = pasemi_mac_start_tx, + .ndo_set_rx_mode = pasemi_mac_set_rx_mode, + .ndo_set_mac_address = pasemi_mac_set_mac_addr, + .ndo_change_mtu = pasemi_mac_change_mtu, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = pasemi_mac_netpoll, +#endif +}; + +static int +pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *dev; + struct pasemi_mac *mac; + int err, ret; + + err = pci_enable_device(pdev); + if (err) + return err; + + dev = alloc_etherdev(sizeof(struct pasemi_mac)); + if (dev == NULL) { + err = -ENOMEM; + goto out_disable_device; + } + + pci_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + + mac = netdev_priv(dev); + + mac->pdev = pdev; + mac->netdev = dev; + + netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64); + + dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG | + NETIF_F_HIGHDMA | NETIF_F_GSO; + + mac->lro_mgr.max_aggr = LRO_MAX_AGGR; + mac->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS; + mac->lro_mgr.lro_arr = mac->lro_desc; + mac->lro_mgr.get_skb_header = get_skb_hdr; + mac->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; + mac->lro_mgr.dev = mac->netdev; + mac->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; + mac->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; + + + mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); + if (!mac->dma_pdev) { + dev_err(&mac->pdev->dev, "Can't find DMA Controller\n"); + err = -ENODEV; + goto out; + } + + mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); + if (!mac->iob_pdev) { + dev_err(&mac->pdev->dev, "Can't find I/O Bridge\n"); + err = -ENODEV; + goto out; + } + + /* get mac addr from device tree */ + if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) { + err = -ENODEV; + goto out; + } + memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr)); + + ret = mac_to_intf(mac); + if (ret < 0) { + dev_err(&mac->pdev->dev, "Can't map DMA interface\n"); + err = -ENODEV; + goto out; + } + mac->dma_if = ret; + + switch (pdev->device) { + case 0xa005: + mac->type = MAC_TYPE_GMAC; + break; + case 0xa006: + mac->type = MAC_TYPE_XAUI; + break; + default: + err = -ENODEV; + goto out; + } + + dev->netdev_ops = &pasemi_netdev_ops; + dev->mtu = PE_DEF_MTU; + /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ + mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; + + dev->ethtool_ops = &pasemi_mac_ethtool_ops; + + if (err) + goto out; + + mac->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + /* Enable most messages by default */ + mac->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; + + err = register_netdev(dev); + + if (err) { + dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n", + err); + goto out; + } else if (netif_msg_probe(mac)) { + printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %pM\n", + dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI", + mac->dma_if, dev->dev_addr); + } + + return err; + +out: + pci_dev_put(mac->iob_pdev); + pci_dev_put(mac->dma_pdev); + + free_netdev(dev); +out_disable_device: + pci_disable_device(pdev); + return err; + +} + +static void pasemi_mac_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct pasemi_mac *mac; + + if (!netdev) + return; + + mac = netdev_priv(netdev); + + unregister_netdev(netdev); + + pci_disable_device(pdev); + pci_dev_put(mac->dma_pdev); + pci_dev_put(mac->iob_pdev); + + pasemi_dma_free_chan(&mac->tx->chan); + pasemi_dma_free_chan(&mac->rx->chan); + + free_netdev(netdev); +} + +static const struct pci_device_id pasemi_mac_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) }, + { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) }, + { }, +}; + +MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl); + +static struct pci_driver pasemi_mac_driver = { + .name = "pasemi_mac", + .id_table = pasemi_mac_pci_tbl, + .probe = pasemi_mac_probe, + .remove = pasemi_mac_remove, +}; + +static void __exit pasemi_mac_cleanup_module(void) +{ + pci_unregister_driver(&pasemi_mac_driver); +} + +int pasemi_mac_init_module(void) +{ + int err; + + err = pasemi_dma_init(); + if (err) + return err; + + return pci_register_driver(&pasemi_mac_driver); +} + +module_init(pasemi_mac_init_module); +module_exit(pasemi_mac_cleanup_module); diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h new file mode 100644 index 000000000..a5807703a --- /dev/null +++ b/drivers/net/ethernet/pasemi/pasemi_mac.h @@ -0,0 +1,215 @@ +/* + * Copyright (C) 2006 PA Semi, Inc + * + * Driver for the PA6T-1682M onchip 1G/10G Ethernet MACs, soft state and + * hardware register layouts. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef PASEMI_MAC_H +#define PASEMI_MAC_H + +#include +#include +#include +#include + +/* Must be a power of two */ +#define RX_RING_SIZE 2048 +#define TX_RING_SIZE 4096 +#define CS_RING_SIZE (TX_RING_SIZE*2) + + +#define MAX_LRO_DESCRIPTORS 8 +#define MAX_CS 2 + +struct pasemi_mac_txring { + struct pasemi_dmachan chan; /* Must be first */ + spinlock_t lock; + unsigned int size; + unsigned int next_to_fill; + unsigned int next_to_clean; + struct pasemi_mac_buffer *ring_info; + struct pasemi_mac *mac; /* Needed in intr handler */ + struct timer_list clean_timer; +}; + +struct pasemi_mac_rxring { + struct pasemi_dmachan chan; /* Must be first */ + spinlock_t lock; + u64 *buffers; /* RX interface buffer ring */ + dma_addr_t buf_dma; + unsigned int size; + unsigned int next_to_fill; + unsigned int next_to_clean; + struct pasemi_mac_buffer *ring_info; + struct pasemi_mac *mac; /* Needed in intr handler */ +}; + +struct pasemi_mac_csring { + struct pasemi_dmachan chan; + unsigned int size; + unsigned int next_to_fill; + int events[2]; + int last_event; + int fun; +}; + +struct pasemi_mac { + struct net_device *netdev; + struct pci_dev *pdev; + struct pci_dev *dma_pdev; + struct pci_dev *iob_pdev; + struct phy_device *phydev; + struct napi_struct napi; + + int bufsz; /* RX ring buffer size */ + int last_cs; + int num_cs; + u32 dma_if; + u8 type; +#define MAC_TYPE_GMAC 1 +#define MAC_TYPE_XAUI 2 + + u8 mac_addr[ETH_ALEN]; + + struct net_lro_mgr lro_mgr; + struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; + struct timer_list rxtimer; + unsigned int lro_max_aggr; + + struct pasemi_mac_txring *tx; + struct pasemi_mac_rxring *rx; + struct pasemi_mac_csring *cs[MAX_CS]; + char tx_irq_name[10]; /* "eth%d tx" */ + char rx_irq_name[10]; /* "eth%d rx" */ + int link; + int speed; + int duplex; + + unsigned int msg_enable; +}; + +/* Software status descriptor (ring_info) */ +struct pasemi_mac_buffer { + struct sk_buff *skb; + dma_addr_t dma; +}; + +#define TX_DESC(tx, num) ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)]) +#define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)]) +#define RX_DESC(rx, num) ((rx)->chan.ring_virt[(num) & (RX_RING_SIZE-1)]) +#define RX_DESC_INFO(rx, num) ((rx)->ring_info[(num) & (RX_RING_SIZE-1)]) +#define RX_BUFF(rx, num) ((rx)->buffers[(num) & (RX_RING_SIZE-1)]) +#define CS_DESC(cs, num) ((cs)->chan.ring_virt[(num) & (CS_RING_SIZE-1)]) + +#define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \ + & ((ring)->size - 1)) +#define RING_AVAIL(ring) ((ring->size) - RING_USED(ring)) + +/* PCI register offsets and formats */ + + +/* MAC CFG register offsets */ +enum { + PAS_MAC_CFG_PCFG = 0x80, + PAS_MAC_CFG_MACCFG = 0x84, + PAS_MAC_CFG_ADR0 = 0x8c, + PAS_MAC_CFG_ADR1 = 0x90, + PAS_MAC_CFG_TXP = 0x98, + PAS_MAC_CFG_RMON = 0x100, + PAS_MAC_IPC_CHNL = 0x208, +}; + +/* MAC CFG register fields */ +#define PAS_MAC_CFG_PCFG_PE 0x80000000 +#define PAS_MAC_CFG_PCFG_CE 0x40000000 +#define PAS_MAC_CFG_PCFG_BU 0x20000000 +#define PAS_MAC_CFG_PCFG_TT 0x10000000 +#define PAS_MAC_CFG_PCFG_TSR_M 0x0c000000 +#define PAS_MAC_CFG_PCFG_TSR_10M 0x00000000 +#define PAS_MAC_CFG_PCFG_TSR_100M 0x04000000 +#define PAS_MAC_CFG_PCFG_TSR_1G 0x08000000 +#define PAS_MAC_CFG_PCFG_TSR_10G 0x0c000000 +#define PAS_MAC_CFG_PCFG_T24 0x02000000 +#define PAS_MAC_CFG_PCFG_PR 0x01000000 +#define PAS_MAC_CFG_PCFG_CRO_M 0x00ff0000 +#define PAS_MAC_CFG_PCFG_CRO_S 16 +#define PAS_MAC_CFG_PCFG_IPO_M 0x0000ff00 +#define PAS_MAC_CFG_PCFG_IPO_S 8 +#define PAS_MAC_CFG_PCFG_S1 0x00000080 +#define PAS_MAC_CFG_PCFG_IO_M 0x00000060 +#define PAS_MAC_CFG_PCFG_IO_MAC 0x00000000 +#define PAS_MAC_CFG_PCFG_IO_OFF 0x00000020 +#define PAS_MAC_CFG_PCFG_IO_IND_ETH 0x00000040 +#define PAS_MAC_CFG_PCFG_IO_IND_IP 0x00000060 +#define PAS_MAC_CFG_PCFG_LP 0x00000010 +#define PAS_MAC_CFG_PCFG_TS 0x00000008 +#define PAS_MAC_CFG_PCFG_HD 0x00000004 +#define PAS_MAC_CFG_PCFG_SPD_M 0x00000003 +#define PAS_MAC_CFG_PCFG_SPD_10M 0x00000000 +#define PAS_MAC_CFG_PCFG_SPD_100M 0x00000001 +#define PAS_MAC_CFG_PCFG_SPD_1G 0x00000002 +#define PAS_MAC_CFG_PCFG_SPD_10G 0x00000003 + +#define PAS_MAC_CFG_MACCFG_TXT_M 0x70000000 +#define PAS_MAC_CFG_MACCFG_TXT_S 28 +#define PAS_MAC_CFG_MACCFG_PRES_M 0x0f000000 +#define PAS_MAC_CFG_MACCFG_PRES_S 24 +#define PAS_MAC_CFG_MACCFG_MAXF_M 0x00ffff00 +#define PAS_MAC_CFG_MACCFG_MAXF_S 8 +#define PAS_MAC_CFG_MACCFG_MAXF(x) (((x) << PAS_MAC_CFG_MACCFG_MAXF_S) & \ + PAS_MAC_CFG_MACCFG_MAXF_M) +#define PAS_MAC_CFG_MACCFG_MINF_M 0x000000ff +#define PAS_MAC_CFG_MACCFG_MINF_S 0 + +#define PAS_MAC_CFG_TXP_FCF 0x01000000 +#define PAS_MAC_CFG_TXP_FCE 0x00800000 +#define PAS_MAC_CFG_TXP_FC 0x00400000 +#define PAS_MAC_CFG_TXP_FPC_M 0x00300000 +#define PAS_MAC_CFG_TXP_FPC_S 20 +#define PAS_MAC_CFG_TXP_FPC(x) (((x) << PAS_MAC_CFG_TXP_FPC_S) & \ + PAS_MAC_CFG_TXP_FPC_M) +#define PAS_MAC_CFG_TXP_RT 0x00080000 +#define PAS_MAC_CFG_TXP_BL 0x00040000 +#define PAS_MAC_CFG_TXP_SL_M 0x00030000 +#define PAS_MAC_CFG_TXP_SL_S 16 +#define PAS_MAC_CFG_TXP_SL(x) (((x) << PAS_MAC_CFG_TXP_SL_S) & \ + PAS_MAC_CFG_TXP_SL_M) +#define PAS_MAC_CFG_TXP_COB_M 0x0000f000 +#define PAS_MAC_CFG_TXP_COB_S 12 +#define PAS_MAC_CFG_TXP_COB(x) (((x) << PAS_MAC_CFG_TXP_COB_S) & \ + PAS_MAC_CFG_TXP_COB_M) +#define PAS_MAC_CFG_TXP_TIFT_M 0x00000f00 +#define PAS_MAC_CFG_TXP_TIFT_S 8 +#define PAS_MAC_CFG_TXP_TIFT(x) (((x) << PAS_MAC_CFG_TXP_TIFT_S) & \ + PAS_MAC_CFG_TXP_TIFT_M) +#define PAS_MAC_CFG_TXP_TIFG_M 0x000000ff +#define PAS_MAC_CFG_TXP_TIFG_S 0 +#define PAS_MAC_CFG_TXP_TIFG(x) (((x) << PAS_MAC_CFG_TXP_TIFG_S) & \ + PAS_MAC_CFG_TXP_TIFG_M) + +#define PAS_MAC_RMON(r) (0x100+(r)*4) + +#define PAS_MAC_IPC_CHNL_DCHNO_M 0x003f0000 +#define PAS_MAC_IPC_CHNL_DCHNO_S 16 +#define PAS_MAC_IPC_CHNL_DCHNO(x) (((x) << PAS_MAC_IPC_CHNL_DCHNO_S) & \ + PAS_MAC_IPC_CHNL_DCHNO_M) +#define PAS_MAC_IPC_CHNL_BCH_M 0x0000003f +#define PAS_MAC_IPC_CHNL_BCH_S 0 +#define PAS_MAC_IPC_CHNL_BCH(x) (((x) << PAS_MAC_IPC_CHNL_BCH_S) & \ + PAS_MAC_IPC_CHNL_BCH_M) + + +#endif /* PASEMI_MAC_H */ diff --git a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c new file mode 100644 index 000000000..25fae5682 --- /dev/null +++ b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c @@ -0,0 +1,159 @@ +/* + * Copyright (C) 2006-2008 PA Semi, Inc + * + * Ethtool hooks for the PA Semi PWRficient onchip 1G/10G Ethernet MACs + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + + +#include +#include +#include +#include + +#include +#include "pasemi_mac.h" + +static struct { + const char str[ETH_GSTRING_LEN]; +} ethtool_stats_keys[] = { + { "rx-drops" }, + { "rx-bytes" }, + { "rx-packets" }, + { "rx-broadcast-packets" }, + { "rx-multicast-packets" }, + { "rx-crc-errors" }, + { "rx-undersize-errors" }, + { "rx-oversize-errors" }, + { "rx-short-fragment-errors" }, + { "rx-jabber-errors" }, + { "rx-64-byte-packets" }, + { "rx-65-127-byte-packets" }, + { "rx-128-255-byte-packets" }, + { "rx-256-511-byte-packets" }, + { "rx-512-1023-byte-packets" }, + { "rx-1024-1518-byte-packets" }, + { "rx-pause-frames" }, + { "tx-bytes" }, + { "tx-packets" }, + { "tx-broadcast-packets" }, + { "tx-multicast-packets" }, + { "tx-collisions" }, + { "tx-late-collisions" }, + { "tx-excessive-collisions" }, + { "tx-crc-errors" }, + { "tx-undersize-errors" }, + { "tx-oversize-errors" }, + { "tx-64-byte-packets" }, + { "tx-65-127-byte-packets" }, + { "tx-128-255-byte-packets" }, + { "tx-256-511-byte-packets" }, + { "tx-512-1023-byte-packets" }, + { "tx-1024-1518-byte-packets" }, +}; + +static int +pasemi_mac_ethtool_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct pasemi_mac *mac = netdev_priv(netdev); + struct phy_device *phydev = mac->phydev; + + if (!phydev) + return -EOPNOTSUPP; + + return phy_ethtool_gset(phydev, cmd); +} + +static int +pasemi_mac_ethtool_set_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct pasemi_mac *mac = netdev_priv(netdev); + struct phy_device *phydev = mac->phydev; + + if (!phydev) + return -EOPNOTSUPP; + + return phy_ethtool_sset(phydev, cmd); +} + +static u32 +pasemi_mac_ethtool_get_msglevel(struct net_device *netdev) +{ + struct pasemi_mac *mac = netdev_priv(netdev); + return mac->msg_enable; +} + +static void +pasemi_mac_ethtool_set_msglevel(struct net_device *netdev, + u32 level) +{ + struct pasemi_mac *mac = netdev_priv(netdev); + mac->msg_enable = level; +} + + +static void +pasemi_mac_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ering) +{ + struct pasemi_mac *mac = netdev_priv(netdev); + + ering->tx_max_pending = TX_RING_SIZE/2; + ering->tx_pending = RING_USED(mac->tx)/2; + ering->rx_max_pending = RX_RING_SIZE/4; + ering->rx_pending = RING_USED(mac->rx)/4; +} + +static int pasemi_mac_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(ethtool_stats_keys); + default: + return -EOPNOTSUPP; + } +} + +static void pasemi_mac_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct pasemi_mac *mac = netdev_priv(netdev); + int i; + + data[0] = pasemi_read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)) + >> PAS_DMA_RXINT_RCMDSTA_DROPS_S; + for (i = 0; i < 32; i++) + data[1+i] = pasemi_read_mac_reg(mac->dma_if, PAS_MAC_RMON(i)); +} + +static void pasemi_mac_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys)); +} + +const struct ethtool_ops pasemi_mac_ethtool_ops = { + .get_settings = pasemi_mac_ethtool_get_settings, + .set_settings = pasemi_mac_ethtool_set_settings, + .get_msglevel = pasemi_mac_ethtool_get_msglevel, + .set_msglevel = pasemi_mac_ethtool_set_msglevel, + .get_link = ethtool_op_get_link, + .get_ringparam = pasemi_mac_ethtool_get_ringparam, + .get_strings = pasemi_mac_get_strings, + .get_sset_count = pasemi_mac_get_sset_count, + .get_ethtool_stats = pasemi_mac_get_ethtool_stats, +}; + diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig new file mode 100644 index 000000000..d49cba129 --- /dev/null +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -0,0 +1,96 @@ +# +# QLogic network device configuration +# + +config NET_VENDOR_QLOGIC + bool "QLogic devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about QLogic cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_QLOGIC + +config QLA3XXX + tristate "QLogic QLA3XXX Network Driver Support" + depends on PCI + ---help--- + This driver supports QLogic ISP3XXX gigabit Ethernet cards. + + To compile this driver as a module, choose M here: the module + will be called qla3xxx. + +config QLCNIC + tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support" + depends on PCI + select FW_LOADER + ---help--- + This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet + devices. + +config QLCNIC_SRIOV + bool "QLOGIC QLCNIC 83XX family SR-IOV Support" + depends on QLCNIC && PCI_IOV + default y + ---help--- + This configuration parameter enables Single Root Input Output + Virtualization support for QLE83XX Converged Ethernet devices. + This allows for virtual function acceleration in virtualized + environments. + +config QLCNIC_DCB + bool "QLOGIC QLCNIC 82XX and 83XX family DCB Support" + depends on QLCNIC && DCB + default y + ---help--- + This configuration parameter enables DCB support in QLE83XX + and QLE82XX Converged Ethernet devices. This allows for DCB + get operations support through rtNetlink interface. Only CEE + mode of DCB is supported. PG and PFC values are related only + to Tx. + +config QLCNIC_VXLAN + bool "Virtual eXtensible Local Area Network (VXLAN) offload support" + default n + depends on QLCNIC && VXLAN && !(QLCNIC=y && VXLAN=m) + ---help--- + This enables hardware offload support for VXLAN protocol over QLogic's + 84XX series adapters. + Say Y here if you want to enable hardware offload support for + Virtual eXtensible Local Area Network (VXLAN) in the driver. + +config QLCNIC_HWMON + bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support" + depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m) + default y + ---help--- + This configuration parameter can be used to read the + board temperature in Converged Ethernet devices + supported by qlcnic. + + This data is available via the hwmon sysfs interface. + +config QLGE + tristate "QLogic QLGE 10Gb Ethernet Driver Support" + depends on PCI + ---help--- + This driver supports QLogic ISP8XXX 10Gb Ethernet cards. + + To compile this driver as a module, choose M here: the module + will be called qlge. + +config NETXEN_NIC + tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC" + depends on PCI + select FW_LOADER + ---help--- + This enables the support for NetXen's Gigabit Ethernet card. + +endif # NET_VENDOR_QLOGIC diff --git a/drivers/net/ethernet/qlogic/Makefile b/drivers/net/ethernet/qlogic/Makefile new file mode 100644 index 000000000..b2a283d9a --- /dev/null +++ b/drivers/net/ethernet/qlogic/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the QLogic network device drivers. +# + +obj-$(CONFIG_QLA3XXX) += qla3xxx.o +obj-$(CONFIG_QLCNIC) += qlcnic/ +obj-$(CONFIG_QLGE) += qlge/ +obj-$(CONFIG_NETXEN_NIC) += netxen/ diff --git a/drivers/net/ethernet/qlogic/netxen/Makefile b/drivers/net/ethernet/qlogic/netxen/Makefile new file mode 100644 index 000000000..e14e60c88 --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/Makefile @@ -0,0 +1,27 @@ +# Copyright (C) 2003 - 2009 NetXen, Inc. +# Copyright (C) 2009 - QLogic Corporation. +# All rights reserved. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# The full GNU General Public License is included in this distribution +# in the file called "COPYING". +# +# + + +obj-$(CONFIG_NETXEN_NIC) := netxen_nic.o + +netxen_nic-y := netxen_nic_hw.o netxen_nic_main.o netxen_nic_init.o \ + netxen_nic_ethtool.o netxen_nic_ctx.o diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h new file mode 100644 index 000000000..1c39b3dcd --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h @@ -0,0 +1,1889 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#ifndef _NETXEN_NIC_H_ +#define _NETXEN_NIC_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include + +#include "netxen_nic_hdr.h" +#include "netxen_nic_hw.h" + +#define _NETXEN_NIC_LINUX_MAJOR 4 +#define _NETXEN_NIC_LINUX_MINOR 0 +#define _NETXEN_NIC_LINUX_SUBVERSION 82 +#define NETXEN_NIC_LINUX_VERSIONID "4.0.82" + +#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) +#define _major(v) (((v) >> 24) & 0xff) +#define _minor(v) (((v) >> 16) & 0xff) +#define _build(v) ((v) & 0xffff) + +/* version in image has weird encoding: + * 7:0 - major + * 15:8 - minor + * 31:16 - build (little endian) + */ +#define NETXEN_DECODE_VERSION(v) \ + NETXEN_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16)) + +#define NETXEN_NUM_FLASH_SECTORS (64) +#define NETXEN_FLASH_SECTOR_SIZE (64 * 1024) +#define NETXEN_FLASH_TOTAL_SIZE (NETXEN_NUM_FLASH_SECTORS \ + * NETXEN_FLASH_SECTOR_SIZE) + +#define RCV_DESC_RINGSIZE(rds_ring) \ + (sizeof(struct rcv_desc) * (rds_ring)->num_desc) +#define RCV_BUFF_RINGSIZE(rds_ring) \ + (sizeof(struct netxen_rx_buffer) * rds_ring->num_desc) +#define STATUS_DESC_RINGSIZE(sds_ring) \ + (sizeof(struct status_desc) * (sds_ring)->num_desc) +#define TX_BUFF_RINGSIZE(tx_ring) \ + (sizeof(struct netxen_cmd_buffer) * tx_ring->num_desc) +#define TX_DESC_RINGSIZE(tx_ring) \ + (sizeof(struct cmd_desc_type0) * tx_ring->num_desc) + +#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a))) + +#define NETXEN_RCV_PRODUCER_OFFSET 0 +#define NETXEN_RCV_PEG_DB_ID 2 +#define NETXEN_HOST_DUMMY_DMA_SIZE 1024 +#define FLASH_SUCCESS 0 + +#define ADDR_IN_WINDOW1(off) \ + ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0 + +#define ADDR_IN_RANGE(addr, low, high) \ + (((addr) < (high)) && ((addr) >= (low))) + +/* + * normalize a 64MB crb address to 32MB PCI window + * To use NETXEN_CRB_NORMALIZE, window _must_ be set to 1 + */ +#define NETXEN_CRB_NORMAL(reg) \ + ((reg) - NETXEN_CRB_PCIX_HOST2 + NETXEN_CRB_PCIX_HOST) + +#define NETXEN_CRB_NORMALIZE(adapter, reg) \ + pci_base_offset(adapter, NETXEN_CRB_NORMAL(reg)) + +#define DB_NORMALIZE(adapter, off) \ + (adapter->ahw.db_base + (off)) + +#define NX_P2_C0 0x24 +#define NX_P2_C1 0x25 +#define NX_P3_A0 0x30 +#define NX_P3_A2 0x30 +#define NX_P3_B0 0x40 +#define NX_P3_B1 0x41 +#define NX_P3_B2 0x42 +#define NX_P3P_A0 0x50 + +#define NX_IS_REVISION_P2(REVISION) (REVISION <= NX_P2_C1) +#define NX_IS_REVISION_P3(REVISION) (REVISION >= NX_P3_A0) +#define NX_IS_REVISION_P3P(REVISION) (REVISION >= NX_P3P_A0) + +#define FIRST_PAGE_GROUP_START 0 +#define FIRST_PAGE_GROUP_END 0x100000 + +#define SECOND_PAGE_GROUP_START 0x6000000 +#define SECOND_PAGE_GROUP_END 0x68BC000 + +#define THIRD_PAGE_GROUP_START 0x70E4000 +#define THIRD_PAGE_GROUP_END 0x8000000 + +#define FIRST_PAGE_GROUP_SIZE FIRST_PAGE_GROUP_END - FIRST_PAGE_GROUP_START +#define SECOND_PAGE_GROUP_SIZE SECOND_PAGE_GROUP_END - SECOND_PAGE_GROUP_START +#define THIRD_PAGE_GROUP_SIZE THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START + +#define P2_MAX_MTU (8000) +#define P3_MAX_MTU (9600) +#define NX_ETHERMTU 1500 +#define NX_MAX_ETHERHDR 32 /* This contains some padding */ + +#define NX_P2_RX_BUF_MAX_LEN 1760 +#define NX_P3_RX_BUF_MAX_LEN (NX_MAX_ETHERHDR + NX_ETHERMTU) +#define NX_P2_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P2_MAX_MTU) +#define NX_P3_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P3_MAX_MTU) +#define NX_CT_DEFAULT_RX_BUF_LEN 2048 +#define NX_LRO_BUFFER_EXTRA 2048 + +#define NX_RX_LRO_BUFFER_LENGTH (8060) + +/* + * Maximum number of ring contexts + */ +#define MAX_RING_CTX 1 + +/* Opcodes to be used with the commands */ +#define TX_ETHER_PKT 0x01 +#define TX_TCP_PKT 0x02 +#define TX_UDP_PKT 0x03 +#define TX_IP_PKT 0x04 +#define TX_TCP_LSO 0x05 +#define TX_TCP_LSO6 0x06 +#define TX_IPSEC 0x07 +#define TX_IPSEC_CMD 0x0a +#define TX_TCPV6_PKT 0x0b +#define TX_UDPV6_PKT 0x0c + +/* The following opcodes are for internal consumption. */ +#define NETXEN_CONTROL_OP 0x10 +#define PEGNET_REQUEST 0x11 + +#define MAX_NUM_CARDS 4 + +#define NETXEN_MAX_FRAGS_PER_TX 14 +#define MAX_TSO_HEADER_DESC 2 +#define MGMT_CMD_DESC_RESV 4 +#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ + + MGMT_CMD_DESC_RESV) +#define NX_MAX_TX_TIMEOUTS 2 + +/* + * Following are the states of the Phantom. Phantom will set them and + * Host will read to check if the fields are correct. + */ +#define PHAN_INITIALIZE_START 0xff00 +#define PHAN_INITIALIZE_FAILED 0xffff +#define PHAN_INITIALIZE_COMPLETE 0xff01 + +/* Host writes the following to notify that it has done the init-handshake */ +#define PHAN_INITIALIZE_ACK 0xf00f + +#define NUM_RCV_DESC_RINGS 3 +#define NUM_STS_DESC_RINGS 4 + +#define RCV_RING_NORMAL 0 +#define RCV_RING_JUMBO 1 +#define RCV_RING_LRO 2 + +#define MIN_CMD_DESCRIPTORS 64 +#define MIN_RCV_DESCRIPTORS 64 +#define MIN_JUMBO_DESCRIPTORS 32 + +#define MAX_CMD_DESCRIPTORS 1024 +#define MAX_RCV_DESCRIPTORS_1G 4096 +#define MAX_RCV_DESCRIPTORS_10G 8192 +#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512 +#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024 +#define MAX_LRO_RCV_DESCRIPTORS 8 + +#define DEFAULT_RCV_DESCRIPTORS_1G 2048 +#define DEFAULT_RCV_DESCRIPTORS_10G 4096 + +#define NETXEN_CTX_SIGNATURE 0xdee0 +#define NETXEN_CTX_SIGNATURE_V2 0x0002dee0 +#define NETXEN_CTX_RESET 0xbad0 +#define NETXEN_CTX_D3_RESET 0xacc0 +#define NETXEN_RCV_PRODUCER(ringid) (ringid) + +#define PHAN_PEG_RCV_INITIALIZED 0xff01 +#define PHAN_PEG_RCV_START_INITIALIZE 0xff00 + +#define get_next_index(index, length) \ + (((index) + 1) & ((length) - 1)) + +#define get_index_range(index,length,count) \ + (((index) + (count)) & ((length) - 1)) + +#define MPORT_SINGLE_FUNCTION_MODE 0x1111 +#define MPORT_MULTI_FUNCTION_MODE 0x2222 + +#define NX_MAX_PCI_FUNC 8 + +/* + * NetXen host-peg signal message structure + * + * Bit 0-1 : peg_id => 0x2 for tx and 01 for rx + * Bit 2 : priv_id => must be 1 + * Bit 3-17 : count => for doorbell + * Bit 18-27 : ctx_id => Context id + * Bit 28-31 : opcode + */ + +typedef u32 netxen_ctx_msg; + +#define netxen_set_msg_peg_id(config_word, val) \ + ((config_word) &= ~3, (config_word) |= val & 3) +#define netxen_set_msg_privid(config_word) \ + ((config_word) |= 1 << 2) +#define netxen_set_msg_count(config_word, val) \ + ((config_word) &= ~(0x7fff<<3), (config_word) |= (val & 0x7fff) << 3) +#define netxen_set_msg_ctxid(config_word, val) \ + ((config_word) &= ~(0x3ff<<18), (config_word) |= (val & 0x3ff) << 18) +#define netxen_set_msg_opcode(config_word, val) \ + ((config_word) &= ~(0xf<<28), (config_word) |= (val & 0xf) << 28) + +struct netxen_rcv_ring { + __le64 addr; + __le32 size; + __le32 rsrvd; +}; + +struct netxen_sts_ring { + __le64 addr; + __le32 size; + __le16 msi_index; + __le16 rsvd; +} ; + +struct netxen_ring_ctx { + + /* one command ring */ + __le64 cmd_consumer_offset; + __le64 cmd_ring_addr; + __le32 cmd_ring_size; + __le32 rsrvd; + + /* three receive rings */ + struct netxen_rcv_ring rcv_rings[NUM_RCV_DESC_RINGS]; + + __le64 sts_ring_addr; + __le32 sts_ring_size; + + __le32 ctx_id; + + __le64 rsrvd_2[3]; + __le32 sts_ring_count; + __le32 rsrvd_3; + struct netxen_sts_ring sts_rings[NUM_STS_DESC_RINGS]; + +} __attribute__ ((aligned(64))); + +/* + * Following data structures describe the descriptors that will be used. + * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when + * we are doing LSO (above the 1500 size packet) only. + */ + +/* + * The size of reference handle been changed to 16 bits to pass the MSS fields + * for the LSO packet + */ + +#define FLAGS_CHECKSUM_ENABLED 0x01 +#define FLAGS_LSO_ENABLED 0x02 +#define FLAGS_IPSEC_SA_ADD 0x04 +#define FLAGS_IPSEC_SA_DELETE 0x08 +#define FLAGS_VLAN_TAGGED 0x10 +#define FLAGS_VLAN_OOB 0x40 + +#define netxen_set_tx_vlan_tci(cmd_desc, v) \ + (cmd_desc)->vlan_TCI = cpu_to_le16(v); + +#define netxen_set_cmd_desc_port(cmd_desc, var) \ + ((cmd_desc)->port_ctxid |= ((var) & 0x0F)) +#define netxen_set_cmd_desc_ctxid(cmd_desc, var) \ + ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0)) + +#define netxen_set_tx_port(_desc, _port) \ + (_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0) + +#define netxen_set_tx_flags_opcode(_desc, _flags, _opcode) \ + (_desc)->flags_opcode = \ + cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)) + +#define netxen_set_tx_frags_len(_desc, _frags, _len) \ + (_desc)->nfrags__length = \ + cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)) + +struct cmd_desc_type0 { + u8 tcp_hdr_offset; /* For LSO only */ + u8 ip_hdr_offset; /* For LSO only */ + __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */ + __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */ + + __le64 addr_buffer2; + + __le16 reference_handle; + __le16 mss; + u8 port_ctxid; /* 7:4 ctxid 3:0 port */ + u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */ + __le16 conn_id; /* IPSec offoad only */ + + __le64 addr_buffer3; + __le64 addr_buffer1; + + __le16 buffer_length[4]; + + __le64 addr_buffer4; + + __le32 reserved2; + __le16 reserved; + __le16 vlan_TCI; + +} __attribute__ ((aligned(64))); + +/* Note: sizeof(rcv_desc) should always be a multiple of 2 */ +struct rcv_desc { + __le16 reference_handle; + __le16 reserved; + __le32 buffer_length; /* allocated buffer length (usually 2K) */ + __le64 addr_buffer; +}; + +/* opcode field in status_desc */ +#define NETXEN_NIC_SYN_OFFLOAD 0x03 +#define NETXEN_NIC_RXPKT_DESC 0x04 +#define NETXEN_OLD_RXPKT_DESC 0x3f +#define NETXEN_NIC_RESPONSE_DESC 0x05 +#define NETXEN_NIC_LRO_DESC 0x12 + +/* for status field in status_desc */ +#define STATUS_NEED_CKSUM (1) +#define STATUS_CKSUM_OK (2) + +/* owner bits of status_desc */ +#define STATUS_OWNER_HOST (0x1ULL << 56) +#define STATUS_OWNER_PHANTOM (0x2ULL << 56) + +/* Status descriptor: + 0-3 port, 4-7 status, 8-11 type, 12-27 total_length + 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset + 53-55 desc_cnt, 56-57 owner, 58-63 opcode + */ +#define netxen_get_sts_port(sts_data) \ + ((sts_data) & 0x0F) +#define netxen_get_sts_status(sts_data) \ + (((sts_data) >> 4) & 0x0F) +#define netxen_get_sts_type(sts_data) \ + (((sts_data) >> 8) & 0x0F) +#define netxen_get_sts_totallength(sts_data) \ + (((sts_data) >> 12) & 0xFFFF) +#define netxen_get_sts_refhandle(sts_data) \ + (((sts_data) >> 28) & 0xFFFF) +#define netxen_get_sts_prot(sts_data) \ + (((sts_data) >> 44) & 0x0F) +#define netxen_get_sts_pkt_offset(sts_data) \ + (((sts_data) >> 48) & 0x1F) +#define netxen_get_sts_desc_cnt(sts_data) \ + (((sts_data) >> 53) & 0x7) +#define netxen_get_sts_opcode(sts_data) \ + (((sts_data) >> 58) & 0x03F) + +#define netxen_get_lro_sts_refhandle(sts_data) \ + ((sts_data) & 0x0FFFF) +#define netxen_get_lro_sts_length(sts_data) \ + (((sts_data) >> 16) & 0x0FFFF) +#define netxen_get_lro_sts_l2_hdr_offset(sts_data) \ + (((sts_data) >> 32) & 0x0FF) +#define netxen_get_lro_sts_l4_hdr_offset(sts_data) \ + (((sts_data) >> 40) & 0x0FF) +#define netxen_get_lro_sts_timestamp(sts_data) \ + (((sts_data) >> 48) & 0x1) +#define netxen_get_lro_sts_type(sts_data) \ + (((sts_data) >> 49) & 0x7) +#define netxen_get_lro_sts_push_flag(sts_data) \ + (((sts_data) >> 52) & 0x1) +#define netxen_get_lro_sts_seq_number(sts_data) \ + ((sts_data) & 0x0FFFFFFFF) +#define netxen_get_lro_sts_mss(sts_data1) \ + ((sts_data1 >> 32) & 0x0FFFF) + + +struct status_desc { + __le64 status_desc_data[2]; +} __attribute__ ((aligned(16))); + +/* UNIFIED ROMIMAGE *************************/ +#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0 +#define NX_UNI_DIR_SECT_BOOTLD 0x6 +#define NX_UNI_DIR_SECT_FW 0x7 + +/*Offsets */ +#define NX_UNI_CHIP_REV_OFF 10 +#define NX_UNI_FLAGS_OFF 11 +#define NX_UNI_BIOS_VERSION_OFF 12 +#define NX_UNI_BOOTLD_IDX_OFF 27 +#define NX_UNI_FIRMWARE_IDX_OFF 29 + +struct uni_table_desc{ + uint32_t findex; + uint32_t num_entries; + uint32_t entry_size; + uint32_t reserved[5]; +}; + +struct uni_data_desc{ + uint32_t findex; + uint32_t size; + uint32_t reserved[5]; +}; + +/* UNIFIED ROMIMAGE *************************/ + +/* The version of the main data structure */ +#define NETXEN_BDINFO_VERSION 1 + +/* Magic number to let user know flash is programmed */ +#define NETXEN_BDINFO_MAGIC 0x12345678 + +/* Max number of Gig ports on a Phantom board */ +#define NETXEN_MAX_PORTS 4 + +#define NETXEN_BRDTYPE_P1_BD 0x0000 +#define NETXEN_BRDTYPE_P1_SB 0x0001 +#define NETXEN_BRDTYPE_P1_SMAX 0x0002 +#define NETXEN_BRDTYPE_P1_SOCK 0x0003 + +#define NETXEN_BRDTYPE_P2_SOCK_31 0x0008 +#define NETXEN_BRDTYPE_P2_SOCK_35 0x0009 +#define NETXEN_BRDTYPE_P2_SB35_4G 0x000a +#define NETXEN_BRDTYPE_P2_SB31_10G 0x000b +#define NETXEN_BRDTYPE_P2_SB31_2G 0x000c + +#define NETXEN_BRDTYPE_P2_SB31_10G_IMEZ 0x000d +#define NETXEN_BRDTYPE_P2_SB31_10G_HMEZ 0x000e +#define NETXEN_BRDTYPE_P2_SB31_10G_CX4 0x000f + +#define NETXEN_BRDTYPE_P3_REF_QG 0x0021 +#define NETXEN_BRDTYPE_P3_HMEZ 0x0022 +#define NETXEN_BRDTYPE_P3_10G_CX4_LP 0x0023 +#define NETXEN_BRDTYPE_P3_4_GB 0x0024 +#define NETXEN_BRDTYPE_P3_IMEZ 0x0025 +#define NETXEN_BRDTYPE_P3_10G_SFP_PLUS 0x0026 +#define NETXEN_BRDTYPE_P3_10000_BASE_T 0x0027 +#define NETXEN_BRDTYPE_P3_XG_LOM 0x0028 +#define NETXEN_BRDTYPE_P3_4_GB_MM 0x0029 +#define NETXEN_BRDTYPE_P3_10G_SFP_CT 0x002a +#define NETXEN_BRDTYPE_P3_10G_SFP_QT 0x002b +#define NETXEN_BRDTYPE_P3_10G_CX4 0x0031 +#define NETXEN_BRDTYPE_P3_10G_XFP 0x0032 +#define NETXEN_BRDTYPE_P3_10G_TP 0x0080 + +/* Flash memory map */ +#define NETXEN_CRBINIT_START 0 /* crbinit section */ +#define NETXEN_BRDCFG_START 0x4000 /* board config */ +#define NETXEN_INITCODE_START 0x6000 /* pegtune code */ +#define NETXEN_BOOTLD_START 0x10000 /* bootld */ +#define NETXEN_IMAGE_START 0x43000 /* compressed image */ +#define NETXEN_SECONDARY_START 0x200000 /* backup images */ +#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */ +#define NETXEN_USER_START 0x3E8000 /* Firmware info */ +#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */ +#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */ + +#define NX_OLD_MAC_ADDR_OFFSET (NETXEN_USER_START) +#define NX_FW_VERSION_OFFSET (NETXEN_USER_START+0x408) +#define NX_FW_SIZE_OFFSET (NETXEN_USER_START+0x40c) +#define NX_FW_MAC_ADDR_OFFSET (NETXEN_USER_START+0x418) +#define NX_FW_SERIAL_NUM_OFFSET (NETXEN_USER_START+0x81c) +#define NX_BIOS_VERSION_OFFSET (NETXEN_USER_START+0x83c) + +#define NX_HDR_VERSION_OFFSET (NETXEN_BRDCFG_START) +#define NX_BRDTYPE_OFFSET (NETXEN_BRDCFG_START+0x8) +#define NX_FW_MAGIC_OFFSET (NETXEN_BRDCFG_START+0x128) + +#define NX_FW_MIN_SIZE (0x3fffff) +#define NX_P2_MN_ROMIMAGE 0 +#define NX_P3_CT_ROMIMAGE 1 +#define NX_P3_MN_ROMIMAGE 2 +#define NX_UNIFIED_ROMIMAGE 3 +#define NX_FLASH_ROMIMAGE 4 +#define NX_UNKNOWN_ROMIMAGE 0xff + +#define NX_P2_MN_ROMIMAGE_NAME "/*(DEBLOBBED)*/" +#define NX_P3_CT_ROMIMAGE_NAME "/*(DEBLOBBED)*/" +#define NX_P3_MN_ROMIMAGE_NAME "/*(DEBLOBBED)*/" +#define NX_UNIFIED_ROMIMAGE_NAME "/*(DEBLOBBED)*/" +#define NX_FLASH_ROMIMAGE_NAME "flash" + +extern char netxen_nic_driver_name[]; + +/* Number of status descriptors to handle per interrupt */ +#define MAX_STATUS_HANDLE (64) + +/* + * netxen_skb_frag{} is to contain mapping info for each SG list. This + * has to be freed when DMA is complete. This is part of netxen_tx_buffer{}. + */ +struct netxen_skb_frag { + u64 dma; + u64 length; +}; + +struct netxen_recv_crb { + u32 crb_rcv_producer[NUM_RCV_DESC_RINGS]; + u32 crb_sts_consumer[NUM_STS_DESC_RINGS]; + u32 sw_int_mask[NUM_STS_DESC_RINGS]; +}; + +/* Following defines are for the state of the buffers */ +#define NETXEN_BUFFER_FREE 0 +#define NETXEN_BUFFER_BUSY 1 + +/* + * There will be one netxen_buffer per skb packet. These will be + * used to save the dma info for pci_unmap_page() + */ +struct netxen_cmd_buffer { + struct sk_buff *skb; + struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1]; + u32 frag_count; +}; + +/* In rx_buffer, we do not need multiple fragments as is a single buffer */ +struct netxen_rx_buffer { + struct list_head list; + struct sk_buff *skb; + u64 dma; + u16 ref_handle; + u16 state; +}; + +/* Board types */ +#define NETXEN_NIC_GBE 0x01 +#define NETXEN_NIC_XGBE 0x02 + +/* + * One hardware_context{} per adapter + * contains interrupt info as well shared hardware info. + */ +struct netxen_hardware_context { + void __iomem *pci_base0; + void __iomem *pci_base1; + void __iomem *pci_base2; + void __iomem *db_base; + void __iomem *ocm_win_crb; + + unsigned long db_len; + unsigned long pci_len0; + + u32 ocm_win; + u32 crb_win; + + rwlock_t crb_lock; + spinlock_t mem_lock; + + u8 cut_through; + u8 revision_id; + u8 pci_func; + u8 linkup; + u16 port_type; + u16 board_type; +}; + +#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ +#define ETHERNET_FCS_SIZE 4 + +struct netxen_adapter_stats { + u64 xmitcalled; + u64 xmitfinished; + u64 rxdropped; + u64 txdropped; + u64 csummed; + u64 rx_pkts; + u64 lro_pkts; + u64 rxbytes; + u64 txbytes; +}; + +/* + * Rcv Descriptor Context. One such per Rcv Descriptor. There may + * be one Rcv Descriptor for normal packets, one for jumbo and may be others. + */ +struct nx_host_rds_ring { + u32 producer; + u32 num_desc; + u32 dma_size; + u32 skb_size; + u32 flags; + void __iomem *crb_rcv_producer; + struct rcv_desc *desc_head; + struct netxen_rx_buffer *rx_buf_arr; + struct list_head free_list; + spinlock_t lock; + dma_addr_t phys_addr; +}; + +struct nx_host_sds_ring { + u32 consumer; + u32 num_desc; + void __iomem *crb_sts_consumer; + void __iomem *crb_intr_mask; + + struct status_desc *desc_head; + struct netxen_adapter *adapter; + struct napi_struct napi; + struct list_head free_list[NUM_RCV_DESC_RINGS]; + + int irq; + + dma_addr_t phys_addr; + char name[IFNAMSIZ+4]; +}; + +struct nx_host_tx_ring { + u32 producer; + __le32 *hw_consumer; + u32 sw_consumer; + void __iomem *crb_cmd_producer; + void __iomem *crb_cmd_consumer; + u32 num_desc; + + struct netdev_queue *txq; + + struct netxen_cmd_buffer *cmd_buf_arr; + struct cmd_desc_type0 *desc_head; + dma_addr_t phys_addr; +}; + +/* + * Receive context. There is one such structure per instance of the + * receive processing. Any state information that is relevant to + * the receive, and is must be in this structure. The global data may be + * present elsewhere. + */ +struct netxen_recv_context { + u32 state; + u16 context_id; + u16 virt_port; + + struct nx_host_rds_ring *rds_rings; + struct nx_host_sds_ring *sds_rings; + + struct netxen_ring_ctx *hwctx; + dma_addr_t phys_addr; +}; + +struct _cdrp_cmd { + u32 cmd; + u32 arg1; + u32 arg2; + u32 arg3; +}; + +struct netxen_cmd_args { + struct _cdrp_cmd req; + struct _cdrp_cmd rsp; +}; + +/* New HW context creation */ + +#define NX_OS_CRB_RETRY_COUNT 4000 +#define NX_CDRP_SIGNATURE_MAKE(pcifn, version) \ + (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16)) + +#define NX_CDRP_CLEAR 0x00000000 +#define NX_CDRP_CMD_BIT 0x80000000 + +/* + * All responses must have the NX_CDRP_CMD_BIT cleared + * in the crb NX_CDRP_CRB_OFFSET. + */ +#define NX_CDRP_FORM_RSP(rsp) (rsp) +#define NX_CDRP_IS_RSP(rsp) (((rsp) & NX_CDRP_CMD_BIT) == 0) + +#define NX_CDRP_RSP_OK 0x00000001 +#define NX_CDRP_RSP_FAIL 0x00000002 +#define NX_CDRP_RSP_TIMEOUT 0x00000003 + +/* + * All commands must have the NX_CDRP_CMD_BIT set in + * the crb NX_CDRP_CRB_OFFSET. + */ +#define NX_CDRP_FORM_CMD(cmd) (NX_CDRP_CMD_BIT | (cmd)) +#define NX_CDRP_IS_CMD(cmd) (((cmd) & NX_CDRP_CMD_BIT) != 0) + +#define NX_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001 +#define NX_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002 +#define NX_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003 +#define NX_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004 +#define NX_CDRP_CMD_READ_MAX_RX_CTX 0x00000005 +#define NX_CDRP_CMD_READ_MAX_TX_CTX 0x00000006 +#define NX_CDRP_CMD_CREATE_RX_CTX 0x00000007 +#define NX_CDRP_CMD_DESTROY_RX_CTX 0x00000008 +#define NX_CDRP_CMD_CREATE_TX_CTX 0x00000009 +#define NX_CDRP_CMD_DESTROY_TX_CTX 0x0000000a +#define NX_CDRP_CMD_SETUP_STATISTICS 0x0000000e +#define NX_CDRP_CMD_GET_STATISTICS 0x0000000f +#define NX_CDRP_CMD_DELETE_STATISTICS 0x00000010 +#define NX_CDRP_CMD_SET_MTU 0x00000012 +#define NX_CDRP_CMD_READ_PHY 0x00000013 +#define NX_CDRP_CMD_WRITE_PHY 0x00000014 +#define NX_CDRP_CMD_READ_HW_REG 0x00000015 +#define NX_CDRP_CMD_GET_FLOW_CTL 0x00000016 +#define NX_CDRP_CMD_SET_FLOW_CTL 0x00000017 +#define NX_CDRP_CMD_READ_MAX_MTU 0x00000018 +#define NX_CDRP_CMD_READ_MAX_LRO 0x00000019 +#define NX_CDRP_CMD_CONFIGURE_TOE 0x0000001a +#define NX_CDRP_CMD_FUNC_ATTRIB 0x0000001b +#define NX_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c +#define NX_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d +#define NX_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e +#define NX_CDRP_CMD_CONFIG_GBE_PORT 0x0000001f +#define NX_CDRP_CMD_MAX 0x00000020 + +#define NX_RCODE_SUCCESS 0 +#define NX_RCODE_NO_HOST_MEM 1 +#define NX_RCODE_NO_HOST_RESOURCE 2 +#define NX_RCODE_NO_CARD_CRB 3 +#define NX_RCODE_NO_CARD_MEM 4 +#define NX_RCODE_NO_CARD_RESOURCE 5 +#define NX_RCODE_INVALID_ARGS 6 +#define NX_RCODE_INVALID_ACTION 7 +#define NX_RCODE_INVALID_STATE 8 +#define NX_RCODE_NOT_SUPPORTED 9 +#define NX_RCODE_NOT_PERMITTED 10 +#define NX_RCODE_NOT_READY 11 +#define NX_RCODE_DOES_NOT_EXIST 12 +#define NX_RCODE_ALREADY_EXISTS 13 +#define NX_RCODE_BAD_SIGNATURE 14 +#define NX_RCODE_CMD_NOT_IMPL 15 +#define NX_RCODE_CMD_INVALID 16 +#define NX_RCODE_TIMEOUT 17 +#define NX_RCODE_CMD_FAILED 18 +#define NX_RCODE_MAX_EXCEEDED 19 +#define NX_RCODE_MAX 20 + +#define NX_DESTROY_CTX_RESET 0 +#define NX_DESTROY_CTX_D3_RESET 1 +#define NX_DESTROY_CTX_MAX 2 + +/* + * Capabilities + */ +#define NX_CAP_BIT(class, bit) (1 << bit) +#define NX_CAP0_LEGACY_CONTEXT NX_CAP_BIT(0, 0) +#define NX_CAP0_MULTI_CONTEXT NX_CAP_BIT(0, 1) +#define NX_CAP0_LEGACY_MN NX_CAP_BIT(0, 2) +#define NX_CAP0_LEGACY_MS NX_CAP_BIT(0, 3) +#define NX_CAP0_CUT_THROUGH NX_CAP_BIT(0, 4) +#define NX_CAP0_LRO NX_CAP_BIT(0, 5) +#define NX_CAP0_LSO NX_CAP_BIT(0, 6) +#define NX_CAP0_JUMBO_CONTIGUOUS NX_CAP_BIT(0, 7) +#define NX_CAP0_LRO_CONTIGUOUS NX_CAP_BIT(0, 8) +#define NX_CAP0_HW_LRO NX_CAP_BIT(0, 10) +#define NX_CAP0_HW_LRO_MSS NX_CAP_BIT(0, 21) + +/* + * Context state + */ +#define NX_HOST_CTX_STATE_FREED 0 +#define NX_HOST_CTX_STATE_ALLOCATED 1 +#define NX_HOST_CTX_STATE_ACTIVE 2 +#define NX_HOST_CTX_STATE_DISABLED 3 +#define NX_HOST_CTX_STATE_QUIESCED 4 +#define NX_HOST_CTX_STATE_MAX 5 + +/* + * Rx context + */ + +typedef struct { + __le64 host_phys_addr; /* Ring base addr */ + __le32 ring_size; /* Ring entries */ + __le16 msi_index; + __le16 rsvd; /* Padding */ +} nx_hostrq_sds_ring_t; + +typedef struct { + __le64 host_phys_addr; /* Ring base addr */ + __le64 buff_size; /* Packet buffer size */ + __le32 ring_size; /* Ring entries */ + __le32 ring_kind; /* Class of ring */ +} nx_hostrq_rds_ring_t; + +typedef struct { + __le64 host_rsp_dma_addr; /* Response dma'd here */ + __le32 capabilities[4]; /* Flag bit vector */ + __le32 host_int_crb_mode; /* Interrupt crb usage */ + __le32 host_rds_crb_mode; /* RDS crb usage */ + /* These ring offsets are relative to data[0] below */ + __le32 rds_ring_offset; /* Offset to RDS config */ + __le32 sds_ring_offset; /* Offset to SDS config */ + __le16 num_rds_rings; /* Count of RDS rings */ + __le16 num_sds_rings; /* Count of SDS rings */ + __le16 rsvd1; /* Padding */ + __le16 rsvd2; /* Padding */ + u8 reserved[128]; /* reserve space for future expansion*/ + /* MUST BE 64-bit aligned. + The following is packed: + - N hostrq_rds_rings + - N hostrq_sds_rings */ + char data[0]; +} nx_hostrq_rx_ctx_t; + +typedef struct { + __le32 host_producer_crb; /* Crb to use */ + __le32 rsvd1; /* Padding */ +} nx_cardrsp_rds_ring_t; + +typedef struct { + __le32 host_consumer_crb; /* Crb to use */ + __le32 interrupt_crb; /* Crb to use */ +} nx_cardrsp_sds_ring_t; + +typedef struct { + /* These ring offsets are relative to data[0] below */ + __le32 rds_ring_offset; /* Offset to RDS config */ + __le32 sds_ring_offset; /* Offset to SDS config */ + __le32 host_ctx_state; /* Starting State */ + __le32 num_fn_per_port; /* How many PCI fn share the port */ + __le16 num_rds_rings; /* Count of RDS rings */ + __le16 num_sds_rings; /* Count of SDS rings */ + __le16 context_id; /* Handle for context */ + u8 phys_port; /* Physical id of port */ + u8 virt_port; /* Virtual/Logical id of port */ + u8 reserved[128]; /* save space for future expansion */ + /* MUST BE 64-bit aligned. + The following is packed: + - N cardrsp_rds_rings + - N cardrs_sds_rings */ + char data[0]; +} nx_cardrsp_rx_ctx_t; + +#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \ + (sizeof(HOSTRQ_RX) + \ + (rds_rings)*(sizeof(nx_hostrq_rds_ring_t)) + \ + (sds_rings)*(sizeof(nx_hostrq_sds_ring_t))) + +#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \ + (sizeof(CARDRSP_RX) + \ + (rds_rings)*(sizeof(nx_cardrsp_rds_ring_t)) + \ + (sds_rings)*(sizeof(nx_cardrsp_sds_ring_t))) + +/* + * Tx context + */ + +typedef struct { + __le64 host_phys_addr; /* Ring base addr */ + __le32 ring_size; /* Ring entries */ + __le32 rsvd; /* Padding */ +} nx_hostrq_cds_ring_t; + +typedef struct { + __le64 host_rsp_dma_addr; /* Response dma'd here */ + __le64 cmd_cons_dma_addr; /* */ + __le64 dummy_dma_addr; /* */ + __le32 capabilities[4]; /* Flag bit vector */ + __le32 host_int_crb_mode; /* Interrupt crb usage */ + __le32 rsvd1; /* Padding */ + __le16 rsvd2; /* Padding */ + __le16 interrupt_ctl; + __le16 msi_index; + __le16 rsvd3; /* Padding */ + nx_hostrq_cds_ring_t cds_ring; /* Desc of cds ring */ + u8 reserved[128]; /* future expansion */ +} nx_hostrq_tx_ctx_t; + +typedef struct { + __le32 host_producer_crb; /* Crb to use */ + __le32 interrupt_crb; /* Crb to use */ +} nx_cardrsp_cds_ring_t; + +typedef struct { + __le32 host_ctx_state; /* Starting state */ + __le16 context_id; /* Handle for context */ + u8 phys_port; /* Physical id of port */ + u8 virt_port; /* Virtual/Logical id of port */ + nx_cardrsp_cds_ring_t cds_ring; /* Card cds settings */ + u8 reserved[128]; /* future expansion */ +} nx_cardrsp_tx_ctx_t; + +#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX)) +#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX)) + +/* CRB */ + +#define NX_HOST_RDS_CRB_MODE_UNIQUE 0 +#define NX_HOST_RDS_CRB_MODE_SHARED 1 +#define NX_HOST_RDS_CRB_MODE_CUSTOM 2 +#define NX_HOST_RDS_CRB_MODE_MAX 3 + +#define NX_HOST_INT_CRB_MODE_UNIQUE 0 +#define NX_HOST_INT_CRB_MODE_SHARED 1 +#define NX_HOST_INT_CRB_MODE_NORX 2 +#define NX_HOST_INT_CRB_MODE_NOTX 3 +#define NX_HOST_INT_CRB_MODE_NORXTX 4 + + +/* MAC */ + +#define MC_COUNT_P2 16 +#define MC_COUNT_P3 38 + +#define NETXEN_MAC_NOOP 0 +#define NETXEN_MAC_ADD 1 +#define NETXEN_MAC_DEL 2 + +typedef struct nx_mac_list_s { + struct list_head list; + uint8_t mac_addr[ETH_ALEN+2]; +} nx_mac_list_t; + +struct nx_ip_list { + struct list_head list; + __be32 ip_addr; + bool master; +}; + +/* + * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is + * adjusted based on configured MTU. + */ +#define NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US 3 +#define NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS 256 +#define NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS 64 +#define NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US 4 + +#define NETXEN_NIC_INTR_DEFAULT 0x04 + +typedef union { + struct { + uint16_t rx_packets; + uint16_t rx_time_us; + uint16_t tx_packets; + uint16_t tx_time_us; + } data; + uint64_t word; +} nx_nic_intr_coalesce_data_t; + +typedef struct { + uint16_t stats_time_us; + uint16_t rate_sample_time; + uint16_t flags; + uint16_t rsvd_1; + uint32_t low_threshold; + uint32_t high_threshold; + nx_nic_intr_coalesce_data_t normal; + nx_nic_intr_coalesce_data_t low; + nx_nic_intr_coalesce_data_t high; + nx_nic_intr_coalesce_data_t irq; +} nx_nic_intr_coalesce_t; + +#define NX_HOST_REQUEST 0x13 +#define NX_NIC_REQUEST 0x14 + +#define NX_MAC_EVENT 0x1 + +#define NX_IP_UP 2 +#define NX_IP_DOWN 3 + +/* + * Driver --> Firmware + */ +#define NX_NIC_H2C_OPCODE_START 0 +#define NX_NIC_H2C_OPCODE_CONFIG_RSS 1 +#define NX_NIC_H2C_OPCODE_CONFIG_RSS_TBL 2 +#define NX_NIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3 +#define NX_NIC_H2C_OPCODE_CONFIG_LED 4 +#define NX_NIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5 +#define NX_NIC_H2C_OPCODE_CONFIG_L2_MAC 6 +#define NX_NIC_H2C_OPCODE_LRO_REQUEST 7 +#define NX_NIC_H2C_OPCODE_GET_SNMP_STATS 8 +#define NX_NIC_H2C_OPCODE_PROXY_START_REQUEST 9 +#define NX_NIC_H2C_OPCODE_PROXY_STOP_REQUEST 10 +#define NX_NIC_H2C_OPCODE_PROXY_SET_MTU 11 +#define NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12 +#define NX_NIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13 +#define NX_NIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14 +#define NX_NIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15 +#define NX_NIC_H2C_OPCODE_GET_NET_STATS 16 +#define NX_NIC_H2C_OPCODE_PROXY_UPDATE_P2V 17 +#define NX_NIC_H2C_OPCODE_CONFIG_IPADDR 18 +#define NX_NIC_H2C_OPCODE_CONFIG_LOOPBACK 19 +#define NX_NIC_H2C_OPCODE_PROXY_STOP_DONE 20 +#define NX_NIC_H2C_OPCODE_GET_LINKEVENT 21 +#define NX_NIC_C2C_OPCODE 22 +#define NX_NIC_H2C_OPCODE_CONFIG_BRIDGING 23 +#define NX_NIC_H2C_OPCODE_CONFIG_HW_LRO 24 +#define NX_NIC_H2C_OPCODE_LAST 25 + +/* + * Firmware --> Driver + */ + +#define NX_NIC_C2H_OPCODE_START 128 +#define NX_NIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129 +#define NX_NIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130 +#define NX_NIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131 +#define NX_NIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132 +#define NX_NIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133 +#define NX_NIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134 +#define NX_NIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135 +#define NX_NIC_C2H_OPCODE_GET_SNMP_STATS 136 +#define NX_NIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137 +#define NX_NIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138 +#define NX_NIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139 +#define NX_NIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140 +#define NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141 +#define NX_NIC_C2H_OPCODE_LAST 142 + +#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */ +#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */ +#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */ + +#define NX_NIC_LRO_REQUEST_FIRST 0 +#define NX_NIC_LRO_REQUEST_ADD_FLOW 1 +#define NX_NIC_LRO_REQUEST_DELETE_FLOW 2 +#define NX_NIC_LRO_REQUEST_TIMER 3 +#define NX_NIC_LRO_REQUEST_CLEANUP 4 +#define NX_NIC_LRO_REQUEST_ADD_FLOW_SCHEDULED 5 +#define NX_TOE_LRO_REQUEST_ADD_FLOW 6 +#define NX_TOE_LRO_REQUEST_ADD_FLOW_RESPONSE 7 +#define NX_TOE_LRO_REQUEST_DELETE_FLOW 8 +#define NX_TOE_LRO_REQUEST_DELETE_FLOW_RESPONSE 9 +#define NX_TOE_LRO_REQUEST_TIMER 10 +#define NX_NIC_LRO_REQUEST_LAST 11 + +#define NX_FW_CAPABILITY_LINK_NOTIFICATION (1 << 5) +#define NX_FW_CAPABILITY_SWITCHING (1 << 6) +#define NX_FW_CAPABILITY_PEXQ (1 << 7) +#define NX_FW_CAPABILITY_BDG (1 << 8) +#define NX_FW_CAPABILITY_FVLANTX (1 << 9) +#define NX_FW_CAPABILITY_HW_LRO (1 << 10) +#define NX_FW_CAPABILITY_GBE_LINK_CFG (1 << 11) +#define NX_FW_CAPABILITY_MORE_CAPS (1 << 31) +#define NX_FW_CAPABILITY_2_LRO_MAX_TCP_SEG (1 << 2) + +/* module types */ +#define LINKEVENT_MODULE_NOT_PRESENT 1 +#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2 +#define LINKEVENT_MODULE_OPTICAL_SRLR 3 +#define LINKEVENT_MODULE_OPTICAL_LRM 4 +#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5 +#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6 +#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7 +#define LINKEVENT_MODULE_TWINAX 8 + +#define LINKSPEED_10GBPS 10000 +#define LINKSPEED_1GBPS 1000 +#define LINKSPEED_100MBPS 100 +#define LINKSPEED_10MBPS 10 + +#define LINKSPEED_ENCODED_10MBPS 0 +#define LINKSPEED_ENCODED_100MBPS 1 +#define LINKSPEED_ENCODED_1GBPS 2 + +#define LINKEVENT_AUTONEG_DISABLED 0 +#define LINKEVENT_AUTONEG_ENABLED 1 + +#define LINKEVENT_HALF_DUPLEX 0 +#define LINKEVENT_FULL_DUPLEX 1 + +#define LINKEVENT_LINKSPEED_MBPS 0 +#define LINKEVENT_LINKSPEED_ENCODED 1 + +#define AUTO_FW_RESET_ENABLED 0xEF10AF12 +#define AUTO_FW_RESET_DISABLED 0xDCBAAF12 + +/* firmware response header: + * 63:58 - message type + * 57:56 - owner + * 55:53 - desc count + * 52:48 - reserved + * 47:40 - completion id + * 39:32 - opcode + * 31:16 - error code + * 15:00 - reserved + */ +#define netxen_get_nic_msgtype(msg_hdr) \ + ((msg_hdr >> 58) & 0x3F) +#define netxen_get_nic_msg_compid(msg_hdr) \ + ((msg_hdr >> 40) & 0xFF) +#define netxen_get_nic_msg_opcode(msg_hdr) \ + ((msg_hdr >> 32) & 0xFF) +#define netxen_get_nic_msg_errcode(msg_hdr) \ + ((msg_hdr >> 16) & 0xFFFF) + +typedef struct { + union { + struct { + u64 hdr; + u64 body[7]; + }; + u64 words[8]; + }; +} nx_fw_msg_t; + +typedef struct { + __le64 qhdr; + __le64 req_hdr; + __le64 words[6]; +} nx_nic_req_t; + +typedef struct { + u8 op; + u8 tag; + u8 mac_addr[6]; +} nx_mac_req_t; + +#define MAX_PENDING_DESC_BLOCK_SIZE 64 + +#define NETXEN_NIC_MSI_ENABLED 0x02 +#define NETXEN_NIC_MSIX_ENABLED 0x04 +#define NETXEN_NIC_LRO_ENABLED 0x08 +#define NETXEN_NIC_LRO_DISABLED 0x00 +#define NETXEN_NIC_BRIDGE_ENABLED 0X10 +#define NETXEN_NIC_DIAG_ENABLED 0x20 +#define NETXEN_FW_RESET_OWNER 0x40 +#define NETXEN_FW_MSS_CAP 0x80 +#define NETXEN_IS_MSI_FAMILY(adapter) \ + ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED)) + +#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS +#define NETXEN_MSIX_TBL_SPACE 8192 +#define NETXEN_PCI_REG_MSIX_TBL 0x44 + +#define NETXEN_DB_MAPSIZE_BYTES 0x1000 + +#define NETXEN_ADAPTER_UP_MAGIC 777 +#define NETXEN_NIC_PEG_TUNE 0 + +#define __NX_FW_ATTACHED 0 +#define __NX_DEV_UP 1 +#define __NX_RESETTING 2 + +/* Mini Coredump FW supported version */ +#define NX_MD_SUPPORT_MAJOR 4 +#define NX_MD_SUPPORT_MINOR 0 +#define NX_MD_SUPPORT_SUBVERSION 579 + +#define LSW(x) ((uint16_t)(x)) +#define LSD(x) ((uint32_t)((uint64_t)(x))) +#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16)) + +/* Mini Coredump mask level */ +#define NX_DUMP_MASK_MIN 0x03 +#define NX_DUMP_MASK_DEF 0x1f +#define NX_DUMP_MASK_MAX 0xff + +/* Mini Coredump CDRP commands */ +#define NX_CDRP_CMD_TEMP_SIZE 0x0000002f +#define NX_CDRP_CMD_GET_TEMP_HDR 0x00000030 + + +#define NX_DUMP_STATE_ARRAY_LEN 16 +#define NX_DUMP_CAP_SIZE_ARRAY_LEN 8 + +/* Mini Coredump sysfs entries flags*/ +#define NX_FORCE_FW_DUMP_KEY 0xdeadfeed +#define NX_ENABLE_FW_DUMP 0xaddfeed +#define NX_DISABLE_FW_DUMP 0xbadfeed +#define NX_FORCE_FW_RESET 0xdeaddead + + +/* Fw dump levels */ +static const u32 FW_DUMP_LEVELS[] = { 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff }; + +/* Flash read/write address */ +#define NX_FW_DUMP_REG1 0x00130060 +#define NX_FW_DUMP_REG2 0x001e0000 +#define NX_FLASH_SEM2_LK 0x0013C010 +#define NX_FLASH_SEM2_ULK 0x0013C014 +#define NX_FLASH_LOCK_ID 0x001B2100 +#define FLASH_ROM_WINDOW 0x42110030 +#define FLASH_ROM_DATA 0x42150000 + +/* Mini Coredump register read/write routine */ +#define NX_RD_DUMP_REG(addr, bar0, data) do { \ + writel((addr & 0xFFFF0000), (void __iomem *) (bar0 + \ + NX_FW_DUMP_REG1)); \ + readl((void __iomem *) (bar0 + NX_FW_DUMP_REG1)); \ + *data = readl((void __iomem *) (bar0 + NX_FW_DUMP_REG2 + \ + LSW(addr))); \ +} while (0) + +#define NX_WR_DUMP_REG(addr, bar0, data) do { \ + writel((addr & 0xFFFF0000), (void __iomem *) (bar0 + \ + NX_FW_DUMP_REG1)); \ + readl((void __iomem *) (bar0 + NX_FW_DUMP_REG1)); \ + writel(data, (void __iomem *) (bar0 + NX_FW_DUMP_REG2 + LSW(addr)));\ + readl((void __iomem *) (bar0 + NX_FW_DUMP_REG2 + LSW(addr))); \ +} while (0) + + +/* +Entry Type Defines +*/ + +#define RDNOP 0 +#define RDCRB 1 +#define RDMUX 2 +#define QUEUE 3 +#define BOARD 4 +#define RDSRE 5 +#define RDOCM 6 +#define PREGS 7 +#define L1DTG 8 +#define L1ITG 9 +#define CACHE 10 + +#define L1DAT 11 +#define L1INS 12 +#define RDSTK 13 +#define RDCON 14 + +#define L2DTG 21 +#define L2ITG 22 +#define L2DAT 23 +#define L2INS 24 +#define RDOC3 25 + +#define MEMBK 32 + +#define RDROM 71 +#define RDMEM 72 +#define RDMN 73 + +#define INFOR 81 +#define CNTRL 98 + +#define TLHDR 99 +#define RDEND 255 + +#define PRIMQ 103 +#define SQG2Q 104 +#define SQG3Q 105 + +/* +* Opcodes for Control Entries. +* These Flags are bit fields. +*/ +#define NX_DUMP_WCRB 0x01 +#define NX_DUMP_RWCRB 0x02 +#define NX_DUMP_ANDCRB 0x04 +#define NX_DUMP_ORCRB 0x08 +#define NX_DUMP_POLLCRB 0x10 +#define NX_DUMP_RD_SAVE 0x20 +#define NX_DUMP_WRT_SAVED 0x40 +#define NX_DUMP_MOD_SAVE_ST 0x80 + +/* Driver Flags */ +#define NX_DUMP_SKIP 0x80 /* driver skipped this entry */ +#define NX_DUMP_SIZE_ERR 0x40 /*entry size vs capture size mismatch*/ + +#define NX_PCI_READ_32(ADDR) readl((ADDR)) +#define NX_PCI_WRITE_32(DATA, ADDR) writel(DATA, (ADDR)) + + + +struct netxen_minidump { + u32 pos; /* position in the dump buffer */ + u8 fw_supports_md; /* FW supports Mini cordump */ + u8 has_valid_dump; /* indicates valid dump */ + u8 md_capture_mask; /* driver capture mask */ + u8 md_enabled; /* Turn Mini Coredump on/off */ + u32 md_dump_size; /* Total FW Mini Coredump size */ + u32 md_capture_size; /* FW dump capture size */ + u32 md_template_size; /* FW template size */ + u32 md_template_ver; /* FW template version */ + u64 md_timestamp; /* FW Mini dump timestamp */ + void *md_template; /* FW template will be stored */ + void *md_capture_buff; /* FW dump will be stored */ +}; + + + +struct netxen_minidump_template_hdr { + u32 entry_type; + u32 first_entry_offset; + u32 size_of_template; + u32 capture_mask; + u32 num_of_entries; + u32 version; + u32 driver_timestamp; + u32 checksum; + u32 driver_capture_mask; + u32 driver_info_word2; + u32 driver_info_word3; + u32 driver_info_word4; + u32 saved_state_array[NX_DUMP_STATE_ARRAY_LEN]; + u32 capture_size_array[NX_DUMP_CAP_SIZE_ARRAY_LEN]; + u32 rsvd[0]; +}; + +/* Common Entry Header: Common to All Entry Types */ +/* + * Driver Code is for driver to write some info about the entry. + * Currently not used. + */ + +struct netxen_common_entry_hdr { + u32 entry_type; + u32 entry_size; + u32 entry_capture_size; + union { + struct { + u8 entry_capture_mask; + u8 entry_code; + u8 driver_code; + u8 driver_flags; + }; + u32 entry_ctrl_word; + }; +}; + + +/* Generic Entry Including Header */ +struct netxen_minidump_entry { + struct netxen_common_entry_hdr hdr; + u32 entry_data00; + u32 entry_data01; + u32 entry_data02; + u32 entry_data03; + u32 entry_data04; + u32 entry_data05; + u32 entry_data06; + u32 entry_data07; +}; + +/* Read ROM Header */ +struct netxen_minidump_entry_rdrom { + struct netxen_common_entry_hdr h; + union { + struct { + u32 select_addr_reg; + }; + u32 rsvd_0; + }; + union { + struct { + u8 addr_stride; + u8 addr_cnt; + u16 data_size; + }; + u32 rsvd_1; + }; + union { + struct { + u32 op_count; + }; + u32 rsvd_2; + }; + union { + struct { + u32 read_addr_reg; + }; + u32 rsvd_3; + }; + union { + struct { + u32 write_mask; + }; + u32 rsvd_4; + }; + union { + struct { + u32 read_mask; + }; + u32 rsvd_5; + }; + u32 read_addr; + u32 read_data_size; +}; + + +/* Read CRB and Control Entry Header */ +struct netxen_minidump_entry_crb { + struct netxen_common_entry_hdr h; + u32 addr; + union { + struct { + u8 addr_stride; + u8 state_index_a; + u16 poll_timeout; + }; + u32 addr_cntrl; + }; + u32 data_size; + u32 op_count; + union { + struct { + u8 opcode; + u8 state_index_v; + u8 shl; + u8 shr; + }; + u32 control_value; + }; + u32 value_1; + u32 value_2; + u32 value_3; +}; + +/* Read Memory and MN Header */ +struct netxen_minidump_entry_rdmem { + struct netxen_common_entry_hdr h; + union { + struct { + u32 select_addr_reg; + }; + u32 rsvd_0; + }; + union { + struct { + u8 addr_stride; + u8 addr_cnt; + u16 data_size; + }; + u32 rsvd_1; + }; + union { + struct { + u32 op_count; + }; + u32 rsvd_2; + }; + union { + struct { + u32 read_addr_reg; + }; + u32 rsvd_3; + }; + union { + struct { + u32 cntrl_addr_reg; + }; + u32 rsvd_4; + }; + union { + struct { + u8 wr_byte0; + u8 wr_byte1; + u8 poll_mask; + u8 poll_cnt; + }; + u32 rsvd_5; + }; + u32 read_addr; + u32 read_data_size; +}; + +/* Read Cache L1 and L2 Header */ +struct netxen_minidump_entry_cache { + struct netxen_common_entry_hdr h; + u32 tag_reg_addr; + union { + struct { + u16 tag_value_stride; + u16 init_tag_value; + }; + u32 select_addr_cntrl; + }; + u32 data_size; + u32 op_count; + u32 control_addr; + union { + struct { + u16 write_value; + u8 poll_mask; + u8 poll_wait; + }; + u32 control_value; + }; + u32 read_addr; + union { + struct { + u8 read_addr_stride; + u8 read_addr_cnt; + u16 rsvd_1; + }; + u32 read_addr_cntrl; + }; +}; + +/* Read OCM Header */ +struct netxen_minidump_entry_rdocm { + struct netxen_common_entry_hdr h; + u32 rsvd_0; + union { + struct { + u32 rsvd_1; + }; + u32 select_addr_cntrl; + }; + u32 data_size; + u32 op_count; + u32 rsvd_2; + u32 rsvd_3; + u32 read_addr; + union { + struct { + u32 read_addr_stride; + }; + u32 read_addr_cntrl; + }; +}; + +/* Read MUX Header */ +struct netxen_minidump_entry_mux { + struct netxen_common_entry_hdr h; + u32 select_addr; + union { + struct { + u32 rsvd_0; + }; + u32 select_addr_cntrl; + }; + u32 data_size; + u32 op_count; + u32 select_value; + u32 select_value_stride; + u32 read_addr; + u32 rsvd_1; +}; + +/* Read Queue Header */ +struct netxen_minidump_entry_queue { + struct netxen_common_entry_hdr h; + u32 select_addr; + union { + struct { + u16 queue_id_stride; + u16 rsvd_0; + }; + u32 select_addr_cntrl; + }; + u32 data_size; + u32 op_count; + u32 rsvd_1; + u32 rsvd_2; + u32 read_addr; + union { + struct { + u8 read_addr_stride; + u8 read_addr_cnt; + u16 rsvd_3; + }; + u32 read_addr_cntrl; + }; +}; + +struct netxen_dummy_dma { + void *addr; + dma_addr_t phys_addr; +}; + +struct netxen_adapter { + struct netxen_hardware_context ahw; + + struct net_device *netdev; + struct pci_dev *pdev; + struct list_head mac_list; + struct list_head ip_list; + + spinlock_t tx_clean_lock; + + u16 num_txd; + u16 num_rxd; + u16 num_jumbo_rxd; + u16 num_lro_rxd; + + u8 max_rds_rings; + u8 max_sds_rings; + u8 driver_mismatch; + u8 msix_supported; + u8 __pad; + u8 pci_using_dac; + u8 portnum; + u8 physical_port; + + u8 mc_enabled; + u8 max_mc_count; + u8 rss_supported; + u8 link_changed; + u8 fw_wait_cnt; + u8 fw_fail_cnt; + u8 tx_timeo_cnt; + u8 need_fw_reset; + + u8 has_link_events; + u8 fw_type; + u16 tx_context_id; + u16 mtu; + u16 is_up; + + u16 link_speed; + u16 link_duplex; + u16 link_autoneg; + u16 module_type; + + u32 capabilities; + u32 flags; + u32 irq; + u32 temp; + + u32 int_vec_bit; + u32 heartbit; + + u8 mac_addr[ETH_ALEN]; + + struct netxen_adapter_stats stats; + + struct netxen_recv_context recv_ctx; + struct nx_host_tx_ring *tx_ring; + + int (*macaddr_set) (struct netxen_adapter *, u8 *); + int (*set_mtu) (struct netxen_adapter *, int); + int (*set_promisc) (struct netxen_adapter *, u32); + void (*set_multi) (struct net_device *); + int (*phy_read) (struct netxen_adapter *, u32 reg, u32 *); + int (*phy_write) (struct netxen_adapter *, u32 reg, u32 val); + int (*init_port) (struct netxen_adapter *, int); + int (*stop_port) (struct netxen_adapter *); + + u32 (*crb_read)(struct netxen_adapter *, ulong); + int (*crb_write)(struct netxen_adapter *, ulong, u32); + + int (*pci_mem_read)(struct netxen_adapter *, u64, u64 *); + int (*pci_mem_write)(struct netxen_adapter *, u64, u64); + + int (*pci_set_window)(struct netxen_adapter *, u64, u32 *); + + u32 (*io_read)(struct netxen_adapter *, void __iomem *); + void (*io_write)(struct netxen_adapter *, void __iomem *, u32); + + void __iomem *tgt_mask_reg; + void __iomem *pci_int_reg; + void __iomem *tgt_status_reg; + void __iomem *crb_int_state_reg; + void __iomem *isr_int_vec; + + struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER]; + + struct netxen_dummy_dma dummy_dma; + + struct delayed_work fw_work; + + struct work_struct tx_timeout_task; + + nx_nic_intr_coalesce_t coal; + + unsigned long state; + __le32 file_prd_off; /*File fw product offset*/ + u32 fw_version; + const struct firmware *fw; + struct netxen_minidump mdump; /* mdump ptr */ + int fw_mdump_rdy; /* for mdump ready */ +}; + +int nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val); +int nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val); + +#define NXRD32(adapter, off) \ + (adapter->crb_read(adapter, off)) +#define NXWR32(adapter, off, val) \ + (adapter->crb_write(adapter, off, val)) +#define NXRDIO(adapter, addr) \ + (adapter->io_read(adapter, addr)) +#define NXWRIO(adapter, addr, val) \ + (adapter->io_write(adapter, addr, val)) + +int netxen_pcie_sem_lock(struct netxen_adapter *, int, u32); +void netxen_pcie_sem_unlock(struct netxen_adapter *, int); + +#define netxen_rom_lock(a) \ + netxen_pcie_sem_lock((a), 2, NETXEN_ROM_LOCK_ID) +#define netxen_rom_unlock(a) \ + netxen_pcie_sem_unlock((a), 2) +#define netxen_phy_lock(a) \ + netxen_pcie_sem_lock((a), 3, NETXEN_PHY_LOCK_ID) +#define netxen_phy_unlock(a) \ + netxen_pcie_sem_unlock((a), 3) +#define netxen_api_lock(a) \ + netxen_pcie_sem_lock((a), 5, 0) +#define netxen_api_unlock(a) \ + netxen_pcie_sem_unlock((a), 5) +#define netxen_sw_lock(a) \ + netxen_pcie_sem_lock((a), 6, 0) +#define netxen_sw_unlock(a) \ + netxen_pcie_sem_unlock((a), 6) +#define crb_win_lock(a) \ + netxen_pcie_sem_lock((a), 7, NETXEN_CRB_WIN_LOCK_ID) +#define crb_win_unlock(a) \ + netxen_pcie_sem_unlock((a), 7) + +int netxen_nic_get_board_info(struct netxen_adapter *adapter); +int netxen_nic_wol_supported(struct netxen_adapter *adapter); + +/* Functions from netxen_nic_init.c */ +int netxen_init_dummy_dma(struct netxen_adapter *adapter); +void netxen_free_dummy_dma(struct netxen_adapter *adapter); + +int netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter); +int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val); +int netxen_load_firmware(struct netxen_adapter *adapter); +int netxen_need_fw_reset(struct netxen_adapter *adapter); +void netxen_request_firmware(struct netxen_adapter *adapter); +void netxen_release_firmware(struct netxen_adapter *adapter); +int netxen_pinit_from_rom(struct netxen_adapter *adapter); + +int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp); +int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr, + u8 *bytes, size_t size); +int netxen_rom_fast_write_words(struct netxen_adapter *adapter, int addr, + u8 *bytes, size_t size); +int netxen_flash_unlock(struct netxen_adapter *adapter); +int netxen_backup_crbinit(struct netxen_adapter *adapter); +int netxen_flash_erase_secondary(struct netxen_adapter *adapter); +int netxen_flash_erase_primary(struct netxen_adapter *adapter); +void netxen_halt_pegs(struct netxen_adapter *adapter); + +int netxen_rom_se(struct netxen_adapter *adapter, int addr); + +int netxen_alloc_sw_resources(struct netxen_adapter *adapter); +void netxen_free_sw_resources(struct netxen_adapter *adapter); + +void netxen_setup_hwops(struct netxen_adapter *adapter); +void __iomem *netxen_get_ioaddr(struct netxen_adapter *, u32); + +int netxen_alloc_hw_resources(struct netxen_adapter *adapter); +void netxen_free_hw_resources(struct netxen_adapter *adapter); + +void netxen_release_rx_buffers(struct netxen_adapter *adapter); +void netxen_release_tx_buffers(struct netxen_adapter *adapter); + +int netxen_init_firmware(struct netxen_adapter *adapter); +void netxen_nic_clear_stats(struct netxen_adapter *adapter); +void netxen_watchdog_task(struct work_struct *work); +void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, + struct nx_host_rds_ring *rds_ring); +int netxen_process_cmd_ring(struct netxen_adapter *adapter); +int netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max); + +void netxen_p3_free_mac_list(struct netxen_adapter *adapter); +int netxen_config_intr_coalesce(struct netxen_adapter *adapter); +int netxen_config_rss(struct netxen_adapter *adapter, int enable); +int netxen_config_ipaddr(struct netxen_adapter *adapter, __be32 ip, int cmd); +int netxen_linkevent_request(struct netxen_adapter *adapter, int enable); +void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup); +void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *); +void netxen_pci_camqm_write_2M(struct netxen_adapter *, u64, u64); + +int nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter, + u32 speed, u32 duplex, u32 autoneg); +int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu); +int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); +int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable); +int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable); +int netxen_send_lro_cleanup(struct netxen_adapter *adapter); +int netxen_setup_minidump(struct netxen_adapter *adapter); +void netxen_dump_fw(struct netxen_adapter *adapter); +void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, + struct nx_host_tx_ring *tx_ring); + +/* Functions from netxen_nic_main.c */ +int netxen_nic_reset_context(struct netxen_adapter *); + +int nx_dev_request_reset(struct netxen_adapter *adapter); + +/* + * NetXen Board information + */ + +#define NETXEN_MAX_SHORT_NAME 32 +struct netxen_brdinfo { + int brdtype; /* type of board */ + long ports; /* max no of physical ports */ + char short_name[NETXEN_MAX_SHORT_NAME]; +}; + +struct netxen_dimm_cfg { + u8 presence; + u8 mem_type; + u8 dimm_type; + u32 size; +}; + +static const struct netxen_brdinfo netxen_boards[] = { + {NETXEN_BRDTYPE_P2_SB31_10G_CX4, 1, "XGb CX4"}, + {NETXEN_BRDTYPE_P2_SB31_10G_HMEZ, 1, "XGb HMEZ"}, + {NETXEN_BRDTYPE_P2_SB31_10G_IMEZ, 2, "XGb IMEZ"}, + {NETXEN_BRDTYPE_P2_SB31_10G, 1, "XGb XFP"}, + {NETXEN_BRDTYPE_P2_SB35_4G, 4, "Quad Gb"}, + {NETXEN_BRDTYPE_P2_SB31_2G, 2, "Dual Gb"}, + {NETXEN_BRDTYPE_P3_REF_QG, 4, "Reference Quad Gig "}, + {NETXEN_BRDTYPE_P3_HMEZ, 2, "Dual XGb HMEZ"}, + {NETXEN_BRDTYPE_P3_10G_CX4_LP, 2, "Dual XGb CX4 LP"}, + {NETXEN_BRDTYPE_P3_4_GB, 4, "Quad Gig LP"}, + {NETXEN_BRDTYPE_P3_IMEZ, 2, "Dual XGb IMEZ"}, + {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"}, + {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"}, + {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"}, + {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "NX3031 Gigabit Ethernet"}, + {NETXEN_BRDTYPE_P3_10G_SFP_CT, 2, "NX3031 10 Gigabit Ethernet"}, + {NETXEN_BRDTYPE_P3_10G_SFP_QT, 2, "Quanta Dual XGb SFP+"}, + {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"}, + {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"} +}; + +#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards) + +static inline int netxen_nic_get_brd_name_by_type(u32 type, char *name) +{ + int i, found = 0; + for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { + if (netxen_boards[i].brdtype == type) { + strcpy(name, netxen_boards[i].short_name); + found = 1; + break; + } + } + + if (!found) { + strcpy(name, "Unknown"); + return -EINVAL; + } + + return 0; +} + +static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring) +{ + smp_mb(); + return find_diff_among(tx_ring->producer, + tx_ring->sw_consumer, tx_ring->num_desc); + +} + +int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac); +int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac); +void netxen_change_ringparam(struct netxen_adapter *adapter); +int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp); + +extern const struct ethtool_ops netxen_nic_ethtool_ops; + +#endif /* __NETXEN_NIC_H_ */ diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c new file mode 100644 index 000000000..b8d527035 --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c @@ -0,0 +1,941 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#include "netxen_nic_hw.h" +#include "netxen_nic.h" + +#define NXHAL_VERSION 1 + +static u32 +netxen_poll_rsp(struct netxen_adapter *adapter) +{ + u32 rsp = NX_CDRP_RSP_OK; + int timeout = 0; + + do { + /* give atleast 1ms for firmware to respond */ + msleep(1); + + if (++timeout > NX_OS_CRB_RETRY_COUNT) + return NX_CDRP_RSP_TIMEOUT; + + rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET); + } while (!NX_CDRP_IS_RSP(rsp)); + + return rsp; +} + +static u32 +netxen_issue_cmd(struct netxen_adapter *adapter, struct netxen_cmd_args *cmd) +{ + u32 rsp; + u32 signature = 0; + u32 rcode = NX_RCODE_SUCCESS; + + signature = NX_CDRP_SIGNATURE_MAKE(adapter->ahw.pci_func, + NXHAL_VERSION); + /* Acquire semaphore before accessing CRB */ + if (netxen_api_lock(adapter)) + return NX_RCODE_TIMEOUT; + + NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature); + + NXWR32(adapter, NX_ARG1_CRB_OFFSET, cmd->req.arg1); + + NXWR32(adapter, NX_ARG2_CRB_OFFSET, cmd->req.arg2); + + NXWR32(adapter, NX_ARG3_CRB_OFFSET, cmd->req.arg3); + + NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd->req.cmd)); + + rsp = netxen_poll_rsp(adapter); + + if (rsp == NX_CDRP_RSP_TIMEOUT) { + printk(KERN_ERR "%s: card response timeout.\n", + netxen_nic_driver_name); + + rcode = NX_RCODE_TIMEOUT; + } else if (rsp == NX_CDRP_RSP_FAIL) { + rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET); + + printk(KERN_ERR "%s: failed card response code:0x%x\n", + netxen_nic_driver_name, rcode); + } else if (rsp == NX_CDRP_RSP_OK) { + cmd->rsp.cmd = NX_RCODE_SUCCESS; + if (cmd->rsp.arg2) + cmd->rsp.arg2 = NXRD32(adapter, NX_ARG2_CRB_OFFSET); + if (cmd->rsp.arg3) + cmd->rsp.arg3 = NXRD32(adapter, NX_ARG3_CRB_OFFSET); + } + + if (cmd->rsp.arg1) + cmd->rsp.arg1 = NXRD32(adapter, NX_ARG1_CRB_OFFSET); + /* Release semaphore */ + netxen_api_unlock(adapter); + + return rcode; +} + +static int +netxen_get_minidump_template_size(struct netxen_adapter *adapter) +{ + struct netxen_cmd_args cmd; + memset(&cmd, 0, sizeof(cmd)); + cmd.req.cmd = NX_CDRP_CMD_TEMP_SIZE; + memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd)); + netxen_issue_cmd(adapter, &cmd); + if (cmd.rsp.cmd != NX_RCODE_SUCCESS) { + dev_info(&adapter->pdev->dev, + "Can't get template size %d\n", cmd.rsp.cmd); + return -EIO; + } + adapter->mdump.md_template_size = cmd.rsp.arg2; + adapter->mdump.md_template_ver = cmd.rsp.arg3; + return 0; +} + +static int +netxen_get_minidump_template(struct netxen_adapter *adapter) +{ + dma_addr_t md_template_addr; + void *addr; + u32 size; + struct netxen_cmd_args cmd; + size = adapter->mdump.md_template_size; + + if (size == 0) { + dev_err(&adapter->pdev->dev, "Can not capture Minidump " + "template. Invalid template size.\n"); + return NX_RCODE_INVALID_ARGS; + } + + addr = pci_zalloc_consistent(adapter->pdev, size, &md_template_addr); + if (!addr) { + dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n"); + return -ENOMEM; + } + + memset(&cmd, 0, sizeof(cmd)); + memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd)); + cmd.req.cmd = NX_CDRP_CMD_GET_TEMP_HDR; + cmd.req.arg1 = LSD(md_template_addr); + cmd.req.arg2 = MSD(md_template_addr); + cmd.req.arg3 |= size; + netxen_issue_cmd(adapter, &cmd); + + if ((cmd.rsp.cmd == NX_RCODE_SUCCESS) && (size == cmd.rsp.arg2)) { + memcpy(adapter->mdump.md_template, addr, size); + } else { + dev_err(&adapter->pdev->dev, "Failed to get minidump template, " + "err_code : %d, requested_size : %d, actual_size : %d\n ", + cmd.rsp.cmd, size, cmd.rsp.arg2); + } + pci_free_consistent(adapter->pdev, size, addr, md_template_addr); + return 0; +} + +static u32 +netxen_check_template_checksum(struct netxen_adapter *adapter) +{ + u64 sum = 0 ; + u32 *buff = adapter->mdump.md_template; + int count = adapter->mdump.md_template_size/sizeof(uint32_t) ; + + while (count-- > 0) + sum += *buff++ ; + while (sum >> 32) + sum = (sum & 0xFFFFFFFF) + (sum >> 32) ; + + return ~sum; +} + +int +netxen_setup_minidump(struct netxen_adapter *adapter) +{ + int err = 0, i; + u32 *template, *tmp_buf; + struct netxen_minidump_template_hdr *hdr; + err = netxen_get_minidump_template_size(adapter); + if (err) { + adapter->mdump.fw_supports_md = 0; + if ((err == NX_RCODE_CMD_INVALID) || + (err == NX_RCODE_CMD_NOT_IMPL)) { + dev_info(&adapter->pdev->dev, + "Flashed firmware version does not support minidump, " + "minimum version required is [ %u.%u.%u ].\n ", + NX_MD_SUPPORT_MAJOR, NX_MD_SUPPORT_MINOR, + NX_MD_SUPPORT_SUBVERSION); + } + return err; + } + + if (!adapter->mdump.md_template_size) { + dev_err(&adapter->pdev->dev, "Error : Invalid template size " + ",should be non-zero.\n"); + return -EIO; + } + adapter->mdump.md_template = + kmalloc(adapter->mdump.md_template_size, GFP_KERNEL); + + if (!adapter->mdump.md_template) + return -ENOMEM; + + err = netxen_get_minidump_template(adapter); + if (err) { + if (err == NX_RCODE_CMD_NOT_IMPL) + adapter->mdump.fw_supports_md = 0; + goto free_template; + } + + if (netxen_check_template_checksum(adapter)) { + dev_err(&adapter->pdev->dev, "Minidump template checksum Error\n"); + err = -EIO; + goto free_template; + } + + adapter->mdump.md_capture_mask = NX_DUMP_MASK_DEF; + tmp_buf = (u32 *) adapter->mdump.md_template; + template = (u32 *) adapter->mdump.md_template; + for (i = 0; i < adapter->mdump.md_template_size/sizeof(u32); i++) + *template++ = __le32_to_cpu(*tmp_buf++); + hdr = (struct netxen_minidump_template_hdr *) + adapter->mdump.md_template; + adapter->mdump.md_capture_buff = NULL; + adapter->mdump.fw_supports_md = 1; + adapter->mdump.md_enabled = 0; + + return err; + +free_template: + kfree(adapter->mdump.md_template); + adapter->mdump.md_template = NULL; + return err; +} + + +int +nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu) +{ + u32 rcode = NX_RCODE_SUCCESS; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + struct netxen_cmd_args cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.req.cmd = NX_CDRP_CMD_SET_MTU; + cmd.req.arg1 = recv_ctx->context_id; + cmd.req.arg2 = mtu; + cmd.req.arg3 = 0; + + if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) + netxen_issue_cmd(adapter, &cmd); + + if (rcode != NX_RCODE_SUCCESS) + return -EIO; + + return 0; +} + +int +nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter, + u32 speed, u32 duplex, u32 autoneg) +{ + struct netxen_cmd_args cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.req.cmd = NX_CDRP_CMD_CONFIG_GBE_PORT; + cmd.req.arg1 = speed; + cmd.req.arg2 = duplex; + cmd.req.arg3 = autoneg; + return netxen_issue_cmd(adapter, &cmd); +} + +static int +nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter) +{ + void *addr; + nx_hostrq_rx_ctx_t *prq; + nx_cardrsp_rx_ctx_t *prsp; + nx_hostrq_rds_ring_t *prq_rds; + nx_hostrq_sds_ring_t *prq_sds; + nx_cardrsp_rds_ring_t *prsp_rds; + nx_cardrsp_sds_ring_t *prsp_sds; + struct nx_host_rds_ring *rds_ring; + struct nx_host_sds_ring *sds_ring; + struct netxen_cmd_args cmd; + + dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; + u64 phys_addr; + + int i, nrds_rings, nsds_rings; + size_t rq_size, rsp_size; + u32 cap, reg, val; + + int err; + + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + nrds_rings = adapter->max_rds_rings; + nsds_rings = adapter->max_sds_rings; + + rq_size = + SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings); + rsp_size = + SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings); + + addr = pci_alloc_consistent(adapter->pdev, + rq_size, &hostrq_phys_addr); + if (addr == NULL) + return -ENOMEM; + prq = addr; + + addr = pci_alloc_consistent(adapter->pdev, + rsp_size, &cardrsp_phys_addr); + if (addr == NULL) { + err = -ENOMEM; + goto out_free_rq; + } + prsp = addr; + + prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); + + cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN); + cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS); + + if (adapter->flags & NETXEN_FW_MSS_CAP) + cap |= NX_CAP0_HW_LRO_MSS; + + prq->capabilities[0] = cpu_to_le32(cap); + prq->host_int_crb_mode = + cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED); + prq->host_rds_crb_mode = + cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE); + + prq->num_rds_rings = cpu_to_le16(nrds_rings); + prq->num_sds_rings = cpu_to_le16(nsds_rings); + prq->rds_ring_offset = cpu_to_le32(0); + + val = le32_to_cpu(prq->rds_ring_offset) + + (sizeof(nx_hostrq_rds_ring_t) * nrds_rings); + prq->sds_ring_offset = cpu_to_le32(val); + + prq_rds = (nx_hostrq_rds_ring_t *)(prq->data + + le32_to_cpu(prq->rds_ring_offset)); + + for (i = 0; i < nrds_rings; i++) { + + rds_ring = &recv_ctx->rds_rings[i]; + + prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); + prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); + prq_rds[i].ring_kind = cpu_to_le32(i); + prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); + } + + prq_sds = (nx_hostrq_sds_ring_t *)(prq->data + + le32_to_cpu(prq->sds_ring_offset)); + + for (i = 0; i < nsds_rings; i++) { + + sds_ring = &recv_ctx->sds_rings[i]; + + prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); + prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); + prq_sds[i].msi_index = cpu_to_le16(i); + } + + phys_addr = hostrq_phys_addr; + memset(&cmd, 0, sizeof(cmd)); + cmd.req.arg1 = (u32)(phys_addr >> 32); + cmd.req.arg2 = (u32)(phys_addr & 0xffffffff); + cmd.req.arg3 = rq_size; + cmd.req.cmd = NX_CDRP_CMD_CREATE_RX_CTX; + err = netxen_issue_cmd(adapter, &cmd); + if (err) { + printk(KERN_WARNING + "Failed to create rx ctx in firmware%d\n", err); + goto out_free_rsp; + } + + + prsp_rds = ((nx_cardrsp_rds_ring_t *) + &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]); + + for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { + rds_ring = &recv_ctx->rds_rings[i]; + + reg = le32_to_cpu(prsp_rds[i].host_producer_crb); + rds_ring->crb_rcv_producer = netxen_get_ioaddr(adapter, + NETXEN_NIC_REG(reg - 0x200)); + } + + prsp_sds = ((nx_cardrsp_sds_ring_t *) + &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); + + for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { + sds_ring = &recv_ctx->sds_rings[i]; + + reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); + sds_ring->crb_sts_consumer = netxen_get_ioaddr(adapter, + NETXEN_NIC_REG(reg - 0x200)); + + reg = le32_to_cpu(prsp_sds[i].interrupt_crb); + sds_ring->crb_intr_mask = netxen_get_ioaddr(adapter, + NETXEN_NIC_REG(reg - 0x200)); + } + + recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); + recv_ctx->context_id = le16_to_cpu(prsp->context_id); + recv_ctx->virt_port = prsp->virt_port; + +out_free_rsp: + pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr); +out_free_rq: + pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr); + return err; +} + +static void +nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter) +{ + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + struct netxen_cmd_args cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.req.arg1 = recv_ctx->context_id; + cmd.req.arg2 = NX_DESTROY_CTX_RESET; + cmd.req.arg3 = 0; + cmd.req.cmd = NX_CDRP_CMD_DESTROY_RX_CTX; + + if (netxen_issue_cmd(adapter, &cmd)) { + printk(KERN_WARNING + "%s: Failed to destroy rx ctx in firmware\n", + netxen_nic_driver_name); + } +} + +static int +nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter) +{ + nx_hostrq_tx_ctx_t *prq; + nx_hostrq_cds_ring_t *prq_cds; + nx_cardrsp_tx_ctx_t *prsp; + void *rq_addr, *rsp_addr; + size_t rq_size, rsp_size; + u32 temp; + int err = 0; + u64 offset, phys_addr; + dma_addr_t rq_phys_addr, rsp_phys_addr; + struct nx_host_tx_ring *tx_ring = adapter->tx_ring; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + struct netxen_cmd_args cmd; + + rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t); + rq_addr = pci_alloc_consistent(adapter->pdev, + rq_size, &rq_phys_addr); + if (!rq_addr) + return -ENOMEM; + + rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t); + rsp_addr = pci_alloc_consistent(adapter->pdev, + rsp_size, &rsp_phys_addr); + if (!rsp_addr) { + err = -ENOMEM; + goto out_free_rq; + } + + memset(rq_addr, 0, rq_size); + prq = rq_addr; + + memset(rsp_addr, 0, rsp_size); + prsp = rsp_addr; + + prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); + + temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO); + prq->capabilities[0] = cpu_to_le32(temp); + + prq->host_int_crb_mode = + cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED); + + prq->interrupt_ctl = 0; + prq->msi_index = 0; + + prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr); + + offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx); + prq->cmd_cons_dma_addr = cpu_to_le64(offset); + + prq_cds = &prq->cds_ring; + + prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); + prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); + + phys_addr = rq_phys_addr; + memset(&cmd, 0, sizeof(cmd)); + cmd.req.arg1 = (u32)(phys_addr >> 32); + cmd.req.arg2 = ((u32)phys_addr & 0xffffffff); + cmd.req.arg3 = rq_size; + cmd.req.cmd = NX_CDRP_CMD_CREATE_TX_CTX; + err = netxen_issue_cmd(adapter, &cmd); + + if (err == NX_RCODE_SUCCESS) { + temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); + tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter, + NETXEN_NIC_REG(temp - 0x200)); +#if 0 + adapter->tx_state = + le32_to_cpu(prsp->host_ctx_state); +#endif + adapter->tx_context_id = + le16_to_cpu(prsp->context_id); + } else { + printk(KERN_WARNING + "Failed to create tx ctx in firmware%d\n", err); + err = -EIO; + } + + pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr); + +out_free_rq: + pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr); + + return err; +} + +static void +nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter) +{ + struct netxen_cmd_args cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.req.arg1 = adapter->tx_context_id; + cmd.req.arg2 = NX_DESTROY_CTX_RESET; + cmd.req.arg3 = 0; + cmd.req.cmd = NX_CDRP_CMD_DESTROY_TX_CTX; + if (netxen_issue_cmd(adapter, &cmd)) { + printk(KERN_WARNING + "%s: Failed to destroy tx ctx in firmware\n", + netxen_nic_driver_name); + } +} + +int +nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val) +{ + u32 rcode; + struct netxen_cmd_args cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.req.arg1 = reg; + cmd.req.arg2 = 0; + cmd.req.arg3 = 0; + cmd.req.cmd = NX_CDRP_CMD_READ_PHY; + cmd.rsp.arg1 = 1; + rcode = netxen_issue_cmd(adapter, &cmd); + if (rcode != NX_RCODE_SUCCESS) + return -EIO; + + if (val == NULL) + return -EIO; + + *val = cmd.rsp.arg1; + return 0; +} + +int +nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val) +{ + u32 rcode; + struct netxen_cmd_args cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.req.arg1 = reg; + cmd.req.arg2 = val; + cmd.req.arg3 = 0; + cmd.req.cmd = NX_CDRP_CMD_WRITE_PHY; + rcode = netxen_issue_cmd(adapter, &cmd); + if (rcode != NX_RCODE_SUCCESS) + return -EIO; + + return 0; +} + +static u64 ctx_addr_sig_regs[][3] = { + {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)}, + {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)}, + {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)}, + {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)} +}; + +#define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0]) +#define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2]) +#define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1]) + +#define lower32(x) ((u32)((x) & 0xffffffff)) +#define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff)) + +static struct netxen_recv_crb recv_crb_registers[] = { + /* Instance 0 */ + { + /* crb_rcv_producer: */ + { + NETXEN_NIC_REG(0x100), + /* Jumbo frames */ + NETXEN_NIC_REG(0x110), + /* LRO */ + NETXEN_NIC_REG(0x120) + }, + /* crb_sts_consumer: */ + { + NETXEN_NIC_REG(0x138), + NETXEN_NIC_REG_2(0x000), + NETXEN_NIC_REG_2(0x004), + NETXEN_NIC_REG_2(0x008), + }, + /* sw_int_mask */ + { + CRB_SW_INT_MASK_0, + NETXEN_NIC_REG_2(0x044), + NETXEN_NIC_REG_2(0x048), + NETXEN_NIC_REG_2(0x04c), + }, + }, + /* Instance 1 */ + { + /* crb_rcv_producer: */ + { + NETXEN_NIC_REG(0x144), + /* Jumbo frames */ + NETXEN_NIC_REG(0x154), + /* LRO */ + NETXEN_NIC_REG(0x164) + }, + /* crb_sts_consumer: */ + { + NETXEN_NIC_REG(0x17c), + NETXEN_NIC_REG_2(0x020), + NETXEN_NIC_REG_2(0x024), + NETXEN_NIC_REG_2(0x028), + }, + /* sw_int_mask */ + { + CRB_SW_INT_MASK_1, + NETXEN_NIC_REG_2(0x064), + NETXEN_NIC_REG_2(0x068), + NETXEN_NIC_REG_2(0x06c), + }, + }, + /* Instance 2 */ + { + /* crb_rcv_producer: */ + { + NETXEN_NIC_REG(0x1d8), + /* Jumbo frames */ + NETXEN_NIC_REG(0x1f8), + /* LRO */ + NETXEN_NIC_REG(0x208) + }, + /* crb_sts_consumer: */ + { + NETXEN_NIC_REG(0x220), + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + }, + /* sw_int_mask */ + { + CRB_SW_INT_MASK_2, + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + }, + }, + /* Instance 3 */ + { + /* crb_rcv_producer: */ + { + NETXEN_NIC_REG(0x22c), + /* Jumbo frames */ + NETXEN_NIC_REG(0x23c), + /* LRO */ + NETXEN_NIC_REG(0x24c) + }, + /* crb_sts_consumer: */ + { + NETXEN_NIC_REG(0x264), + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + }, + /* sw_int_mask */ + { + CRB_SW_INT_MASK_3, + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + NETXEN_NIC_REG_2(0x03c), + }, + }, +}; + +static int +netxen_init_old_ctx(struct netxen_adapter *adapter) +{ + struct netxen_recv_context *recv_ctx; + struct nx_host_rds_ring *rds_ring; + struct nx_host_sds_ring *sds_ring; + struct nx_host_tx_ring *tx_ring; + int ring; + int port = adapter->portnum; + struct netxen_ring_ctx *hwctx; + u32 signature; + + tx_ring = adapter->tx_ring; + recv_ctx = &adapter->recv_ctx; + hwctx = recv_ctx->hwctx; + + hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr); + hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc); + + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + + hwctx->rcv_rings[ring].addr = + cpu_to_le64(rds_ring->phys_addr); + hwctx->rcv_rings[ring].size = + cpu_to_le32(rds_ring->num_desc); + } + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + + if (ring == 0) { + hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr); + hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc); + } + hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr); + hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc); + hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring); + } + hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings); + + signature = (adapter->max_sds_rings > 1) ? + NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE; + + NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port), + lower32(recv_ctx->phys_addr)); + NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port), + upper32(recv_ctx->phys_addr)); + NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port), + signature | port); + return 0; +} + +int netxen_alloc_hw_resources(struct netxen_adapter *adapter) +{ + void *addr; + int err = 0; + int ring; + struct netxen_recv_context *recv_ctx; + struct nx_host_rds_ring *rds_ring; + struct nx_host_sds_ring *sds_ring; + struct nx_host_tx_ring *tx_ring; + + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + int port = adapter->portnum; + + recv_ctx = &adapter->recv_ctx; + tx_ring = adapter->tx_ring; + + addr = pci_alloc_consistent(pdev, + sizeof(struct netxen_ring_ctx) + sizeof(uint32_t), + &recv_ctx->phys_addr); + if (addr == NULL) { + dev_err(&pdev->dev, "failed to allocate hw context\n"); + return -ENOMEM; + } + + memset(addr, 0, sizeof(struct netxen_ring_ctx)); + recv_ctx->hwctx = addr; + recv_ctx->hwctx->ctx_id = cpu_to_le32(port); + recv_ctx->hwctx->cmd_consumer_offset = + cpu_to_le64(recv_ctx->phys_addr + + sizeof(struct netxen_ring_ctx)); + tx_ring->hw_consumer = + (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx)); + + /* cmd desc ring */ + addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring), + &tx_ring->phys_addr); + + if (addr == NULL) { + dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n", + netdev->name); + err = -ENOMEM; + goto err_out_free; + } + + tx_ring->desc_head = addr; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + addr = pci_alloc_consistent(adapter->pdev, + RCV_DESC_RINGSIZE(rds_ring), + &rds_ring->phys_addr); + if (addr == NULL) { + dev_err(&pdev->dev, + "%s: failed to allocate rds ring [%d]\n", + netdev->name, ring); + err = -ENOMEM; + goto err_out_free; + } + rds_ring->desc_head = addr; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + rds_ring->crb_rcv_producer = + netxen_get_ioaddr(adapter, + recv_crb_registers[port].crb_rcv_producer[ring]); + } + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + + addr = pci_alloc_consistent(adapter->pdev, + STATUS_DESC_RINGSIZE(sds_ring), + &sds_ring->phys_addr); + if (addr == NULL) { + dev_err(&pdev->dev, + "%s: failed to allocate sds ring [%d]\n", + netdev->name, ring); + err = -ENOMEM; + goto err_out_free; + } + sds_ring->desc_head = addr; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + sds_ring->crb_sts_consumer = + netxen_get_ioaddr(adapter, + recv_crb_registers[port].crb_sts_consumer[ring]); + + sds_ring->crb_intr_mask = + netxen_get_ioaddr(adapter, + recv_crb_registers[port].sw_int_mask[ring]); + } + } + + + if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state)) + goto done; + err = nx_fw_cmd_create_rx_ctx(adapter); + if (err) + goto err_out_free; + err = nx_fw_cmd_create_tx_ctx(adapter); + if (err) + goto err_out_free; + } else { + err = netxen_init_old_ctx(adapter); + if (err) + goto err_out_free; + } + +done: + return 0; + +err_out_free: + netxen_free_hw_resources(adapter); + return err; +} + +void netxen_free_hw_resources(struct netxen_adapter *adapter) +{ + struct netxen_recv_context *recv_ctx; + struct nx_host_rds_ring *rds_ring; + struct nx_host_sds_ring *sds_ring; + struct nx_host_tx_ring *tx_ring; + int ring; + + int port = adapter->portnum; + + if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + if (!test_and_clear_bit(__NX_FW_ATTACHED, &adapter->state)) + goto done; + + nx_fw_cmd_destroy_rx_ctx(adapter); + nx_fw_cmd_destroy_tx_ctx(adapter); + } else { + netxen_api_lock(adapter); + NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port), + NETXEN_CTX_D3_RESET | port); + netxen_api_unlock(adapter); + } + + /* Allow dma queues to drain after context reset */ + msleep(20); + +done: + recv_ctx = &adapter->recv_ctx; + + if (recv_ctx->hwctx != NULL) { + pci_free_consistent(adapter->pdev, + sizeof(struct netxen_ring_ctx) + + sizeof(uint32_t), + recv_ctx->hwctx, + recv_ctx->phys_addr); + recv_ctx->hwctx = NULL; + } + + tx_ring = adapter->tx_ring; + if (tx_ring->desc_head != NULL) { + pci_free_consistent(adapter->pdev, + TX_DESC_RINGSIZE(tx_ring), + tx_ring->desc_head, tx_ring->phys_addr); + tx_ring->desc_head = NULL; + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + + if (rds_ring->desc_head != NULL) { + pci_free_consistent(adapter->pdev, + RCV_DESC_RINGSIZE(rds_ring), + rds_ring->desc_head, + rds_ring->phys_addr); + rds_ring->desc_head = NULL; + } + } + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + + if (sds_ring->desc_head != NULL) { + pci_free_consistent(adapter->pdev, + STATUS_DESC_RINGSIZE(sds_ring), + sds_ring->desc_head, + sds_ring->phys_addr); + sds_ring->desc_head = NULL; + } + } +} + diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c new file mode 100644 index 000000000..87e073c6e --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c @@ -0,0 +1,959 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#include +#include +#include +#include +#include +#include + +#include "netxen_nic.h" +#include "netxen_nic_hw.h" + +struct netxen_nic_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define NETXEN_NIC_STAT(m) sizeof(((struct netxen_adapter *)0)->m), \ + offsetof(struct netxen_adapter, m) + +#define NETXEN_NIC_PORT_WINDOW 0x10000 +#define NETXEN_NIC_INVALID_DATA 0xDEADBEEF + +static const struct netxen_nic_stats netxen_nic_gstrings_stats[] = { + {"xmit_called", NETXEN_NIC_STAT(stats.xmitcalled)}, + {"xmit_finished", NETXEN_NIC_STAT(stats.xmitfinished)}, + {"rx_dropped", NETXEN_NIC_STAT(stats.rxdropped)}, + {"tx_dropped", NETXEN_NIC_STAT(stats.txdropped)}, + {"csummed", NETXEN_NIC_STAT(stats.csummed)}, + {"rx_pkts", NETXEN_NIC_STAT(stats.rx_pkts)}, + {"lro_pkts", NETXEN_NIC_STAT(stats.lro_pkts)}, + {"rx_bytes", NETXEN_NIC_STAT(stats.rxbytes)}, + {"tx_bytes", NETXEN_NIC_STAT(stats.txbytes)}, +}; + +#define NETXEN_NIC_STATS_LEN ARRAY_SIZE(netxen_nic_gstrings_stats) + +static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = { + "Register_Test_on_offline", + "Link_Test_on_offline" +}; + +#define NETXEN_NIC_TEST_LEN ARRAY_SIZE(netxen_nic_gstrings_test) + +#define NETXEN_NIC_REGS_COUNT 30 +#define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32)) +#define NETXEN_MAX_EEPROM_LEN 1024 + +static int netxen_nic_get_eeprom_len(struct net_device *dev) +{ + return NETXEN_FLASH_TOTAL_SIZE; +} + +static void +netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u32 fw_major = 0; + u32 fw_minor = 0; + u32 fw_build = 0; + + strlcpy(drvinfo->driver, netxen_nic_driver_name, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, + sizeof(drvinfo->version)); + fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); + fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); + fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d", fw_major, fw_minor, fw_build); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->regdump_len = NETXEN_NIC_REGS_LEN; + drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev); +} + +static int +netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + int check_sfp_module = 0; + + /* read which mode */ + if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + + ecmd->advertising = (ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full); + + ecmd->port = PORT_TP; + + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + ecmd->duplex = adapter->link_duplex; + ecmd->autoneg = adapter->link_autoneg; + + } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { + u32 val; + + val = NXRD32(adapter, NETXEN_PORT_MODE_ADDR); + if (val == NETXEN_PORT_MODE_802_3_AP) { + ecmd->supported = SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_1000baseT_Full; + } else { + ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->advertising = ADVERTISED_10000baseT_Full; + } + + if (netif_running(dev) && adapter->has_link_events) { + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + ecmd->autoneg = adapter->link_autoneg; + ecmd->duplex = adapter->link_duplex; + goto skip; + } + + ecmd->port = PORT_TP; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + u16 pcifn = adapter->ahw.pci_func; + + val = NXRD32(adapter, P3_LINK_SPEED_REG(pcifn)); + ethtool_cmd_speed_set(ecmd, P3_LINK_SPEED_MHZ * + P3_LINK_SPEED_VAL(pcifn, val)); + } else + ethtool_cmd_speed_set(ecmd, SPEED_10000); + + ecmd->duplex = DUPLEX_FULL; + ecmd->autoneg = AUTONEG_DISABLE; + } else + return -EIO; + +skip: + ecmd->phy_address = adapter->physical_port; + ecmd->transceiver = XCVR_EXTERNAL; + + switch (adapter->ahw.board_type) { + case NETXEN_BRDTYPE_P2_SB35_4G: + case NETXEN_BRDTYPE_P2_SB31_2G: + case NETXEN_BRDTYPE_P3_REF_QG: + case NETXEN_BRDTYPE_P3_4_GB: + case NETXEN_BRDTYPE_P3_4_GB_MM: + + ecmd->supported |= SUPPORTED_Autoneg; + ecmd->advertising |= ADVERTISED_Autoneg; + case NETXEN_BRDTYPE_P2_SB31_10G_CX4: + case NETXEN_BRDTYPE_P3_10G_CX4: + case NETXEN_BRDTYPE_P3_10G_CX4_LP: + case NETXEN_BRDTYPE_P3_10000_BASE_T: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + ecmd->autoneg = (adapter->ahw.board_type == + NETXEN_BRDTYPE_P2_SB31_10G_CX4) ? + (AUTONEG_DISABLE) : (adapter->link_autoneg); + break; + case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: + case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: + case NETXEN_BRDTYPE_P3_IMEZ: + case NETXEN_BRDTYPE_P3_XG_LOM: + case NETXEN_BRDTYPE_P3_HMEZ: + ecmd->supported |= SUPPORTED_MII; + ecmd->advertising |= ADVERTISED_MII; + ecmd->port = PORT_MII; + ecmd->autoneg = AUTONEG_DISABLE; + break; + case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: + case NETXEN_BRDTYPE_P3_10G_SFP_CT: + case NETXEN_BRDTYPE_P3_10G_SFP_QT: + ecmd->advertising |= ADVERTISED_TP; + ecmd->supported |= SUPPORTED_TP; + check_sfp_module = netif_running(dev) && + adapter->has_link_events; + case NETXEN_BRDTYPE_P2_SB31_10G: + case NETXEN_BRDTYPE_P3_10G_XFP: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + ecmd->autoneg = AUTONEG_DISABLE; + break; + case NETXEN_BRDTYPE_P3_10G_TP: + if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { + ecmd->autoneg = AUTONEG_DISABLE; + ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); + ecmd->advertising |= + (ADVERTISED_FIBRE | ADVERTISED_TP); + ecmd->port = PORT_FIBRE; + check_sfp_module = netif_running(dev) && + adapter->has_link_events; + } else { + ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); + ecmd->advertising |= + (ADVERTISED_TP | ADVERTISED_Autoneg); + ecmd->port = PORT_TP; + } + break; + default: + printk(KERN_ERR "netxen-nic: Unsupported board model %d\n", + adapter->ahw.board_type); + return -EIO; + } + + if (check_sfp_module) { + switch (adapter->module_type) { + case LINKEVENT_MODULE_OPTICAL_UNKNOWN: + case LINKEVENT_MODULE_OPTICAL_SRLR: + case LINKEVENT_MODULE_OPTICAL_LRM: + case LINKEVENT_MODULE_OPTICAL_SFP_1G: + ecmd->port = PORT_FIBRE; + break; + case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: + case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: + case LINKEVENT_MODULE_TWINAX: + ecmd->port = PORT_TP; + break; + default: + ecmd->port = -1; + } + } + + if (!netif_running(dev) || !adapter->ahw.linkup) { + ecmd->duplex = DUPLEX_UNKNOWN; + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + } + + return 0; +} + +static int +netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u32 speed = ethtool_cmd_speed(ecmd); + int ret; + + if (adapter->ahw.port_type != NETXEN_NIC_GBE) + return -EOPNOTSUPP; + + if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG)) + return -EOPNOTSUPP; + + ret = nx_fw_cmd_set_gbe_port(adapter, speed, ecmd->duplex, + ecmd->autoneg); + if (ret == NX_RCODE_NOT_SUPPORTED) + return -EOPNOTSUPP; + else if (ret) + return -EIO; + + adapter->link_speed = speed; + adapter->link_duplex = ecmd->duplex; + adapter->link_autoneg = ecmd->autoneg; + + if (!netif_running(dev)) + return 0; + + dev->netdev_ops->ndo_stop(dev); + return dev->netdev_ops->ndo_open(dev); +} + +static int netxen_nic_get_regs_len(struct net_device *dev) +{ + return NETXEN_NIC_REGS_LEN; +} + +static void +netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + struct nx_host_sds_ring *sds_ring; + u32 *regs_buff = p; + int ring, i = 0; + int port = adapter->physical_port; + + memset(p, 0, NETXEN_NIC_REGS_LEN); + + regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) | + (adapter->pdev)->device; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return; + + regs_buff[i++] = NXRD32(adapter, CRB_CMDPEG_STATE); + regs_buff[i++] = NXRD32(adapter, CRB_RCVPEG_STATE); + regs_buff[i++] = NXRD32(adapter, CRB_FW_CAPABILITIES_1); + regs_buff[i++] = NXRDIO(adapter, adapter->crb_int_state_reg); + regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); + regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_STATE); + regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); + regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); + regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS2); + + regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_0+0x3c); + regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_1+0x3c); + regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_2+0x3c); + regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_3+0x3c); + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + + regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_4+0x3c); + i += 2; + + regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE_P3); + regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer)); + + } else { + i++; + + regs_buff[i++] = NXRD32(adapter, + NETXEN_NIU_XGE_CONFIG_0+(0x10000*port)); + regs_buff[i++] = NXRD32(adapter, + NETXEN_NIU_XGE_CONFIG_1+(0x10000*port)); + + regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE); + regs_buff[i++] = NXRDIO(adapter, + adapter->tx_ring->crb_cmd_consumer); + } + + regs_buff[i++] = NXRDIO(adapter, adapter->tx_ring->crb_cmd_producer); + + regs_buff[i++] = NXRDIO(adapter, + recv_ctx->rds_rings[0].crb_rcv_producer); + regs_buff[i++] = NXRDIO(adapter, + recv_ctx->rds_rings[1].crb_rcv_producer); + + regs_buff[i++] = adapter->max_sds_rings; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &(recv_ctx->sds_rings[ring]); + regs_buff[i++] = NXRDIO(adapter, + sds_ring->crb_sts_consumer); + } +} + +static u32 netxen_nic_test_link(struct net_device *dev) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u32 val, port; + + port = adapter->physical_port; + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + val = NXRD32(adapter, CRB_XG_STATE_P3); + val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); + return (val == XG_LINK_UP_P3) ? 0 : 1; + } else { + val = NXRD32(adapter, CRB_XG_STATE); + val = (val >> port*8) & 0xff; + return (val == XG_LINK_UP) ? 0 : 1; + } +} + +static int +netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *bytes) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + int offset; + int ret; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = (adapter->pdev)->vendor | + ((adapter->pdev)->device << 16); + offset = eeprom->offset; + + ret = netxen_rom_fast_read_words(adapter, offset, bytes, + eeprom->len); + if (ret < 0) + return ret; + + return 0; +} + +static void +netxen_nic_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + + ring->rx_pending = adapter->num_rxd; + ring->rx_jumbo_pending = adapter->num_jumbo_rxd; + ring->rx_jumbo_pending += adapter->num_lro_rxd; + ring->tx_pending = adapter->num_txd; + + if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G; + ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G; + } else { + ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G; + ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G; + } + + ring->tx_max_pending = MAX_CMD_DESCRIPTORS; +} + +static u32 +netxen_validate_ringparam(u32 val, u32 min, u32 max, char *r_name) +{ + u32 num_desc; + num_desc = max(val, min); + num_desc = min(num_desc, max); + num_desc = roundup_pow_of_two(num_desc); + + if (val != num_desc) { + printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n", + netxen_nic_driver_name, r_name, num_desc, val); + } + + return num_desc; +} + +static int +netxen_nic_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G; + u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G; + u16 num_rxd, num_jumbo_rxd, num_txd; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return -EOPNOTSUPP; + + if (ring->rx_mini_pending) + return -EOPNOTSUPP; + + if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + max_rcv_desc = MAX_RCV_DESCRIPTORS_1G; + max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G; + } + + num_rxd = netxen_validate_ringparam(ring->rx_pending, + MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx"); + + num_jumbo_rxd = netxen_validate_ringparam(ring->rx_jumbo_pending, + MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo"); + + num_txd = netxen_validate_ringparam(ring->tx_pending, + MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx"); + + if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd && + num_jumbo_rxd == adapter->num_jumbo_rxd) + return 0; + + adapter->num_rxd = num_rxd; + adapter->num_jumbo_rxd = num_jumbo_rxd; + adapter->num_txd = num_txd; + + return netxen_nic_reset_context(adapter); +} + +static void +netxen_nic_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + __u32 val; + int port = adapter->physical_port; + + pause->autoneg = 0; + + if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS)) + return; + /* get flow control settings */ + val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port)); + pause->rx_pause = netxen_gb_get_rx_flowctl(val); + val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL); + switch (port) { + case 0: + pause->tx_pause = !(netxen_gb_get_gb0_mask(val)); + break; + case 1: + pause->tx_pause = !(netxen_gb_get_gb1_mask(val)); + break; + case 2: + pause->tx_pause = !(netxen_gb_get_gb2_mask(val)); + break; + case 3: + default: + pause->tx_pause = !(netxen_gb_get_gb3_mask(val)); + break; + } + } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { + if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS)) + return; + pause->rx_pause = 1; + val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL); + if (port == 0) + pause->tx_pause = !(netxen_xg_get_xg0_mask(val)); + else + pause->tx_pause = !(netxen_xg_get_xg1_mask(val)); + } else { + printk(KERN_ERR"%s: Unknown board type: %x\n", + netxen_nic_driver_name, adapter->ahw.port_type); + } +} + +static int +netxen_nic_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + __u32 val; + int port = adapter->physical_port; + + /* not supported */ + if (pause->autoneg) + return -EINVAL; + + /* read mode */ + if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS)) + return -EIO; + /* set flow control */ + val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port)); + + if (pause->rx_pause) + netxen_gb_rx_flowctl(val); + else + netxen_gb_unset_rx_flowctl(val); + + NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), + val); + /* set autoneg */ + val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL); + switch (port) { + case 0: + if (pause->tx_pause) + netxen_gb_unset_gb0_mask(val); + else + netxen_gb_set_gb0_mask(val); + break; + case 1: + if (pause->tx_pause) + netxen_gb_unset_gb1_mask(val); + else + netxen_gb_set_gb1_mask(val); + break; + case 2: + if (pause->tx_pause) + netxen_gb_unset_gb2_mask(val); + else + netxen_gb_set_gb2_mask(val); + break; + case 3: + default: + if (pause->tx_pause) + netxen_gb_unset_gb3_mask(val); + else + netxen_gb_set_gb3_mask(val); + break; + } + NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val); + } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { + if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS)) + return -EIO; + val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL); + if (port == 0) { + if (pause->tx_pause) + netxen_xg_unset_xg0_mask(val); + else + netxen_xg_set_xg0_mask(val); + } else { + if (pause->tx_pause) + netxen_xg_unset_xg1_mask(val); + else + netxen_xg_set_xg1_mask(val); + } + NXWR32(adapter, NETXEN_NIU_XG_PAUSE_CTL, val); + } else { + printk(KERN_ERR "%s: Unknown board type: %x\n", + netxen_nic_driver_name, + adapter->ahw.port_type); + } + return 0; +} + +static int netxen_nic_reg_test(struct net_device *dev) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u32 data_read, data_written; + + data_read = NXRD32(adapter, NETXEN_PCIX_PH_REG(0)); + if ((data_read & 0xffff) != adapter->pdev->vendor) + return 1; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + return 0; + + data_written = (u32)0xa5a5a5a5; + + NXWR32(adapter, CRB_SCRATCHPAD_TEST, data_written); + data_read = NXRD32(adapter, CRB_SCRATCHPAD_TEST); + if (data_written != data_read) + return 1; + + return 0; +} + +static int netxen_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return NETXEN_NIC_TEST_LEN; + case ETH_SS_STATS: + return NETXEN_NIC_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void +netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, + u64 *data) +{ + memset(data, 0, sizeof(uint64_t) * NETXEN_NIC_TEST_LEN); + if ((data[0] = netxen_nic_reg_test(dev))) + eth_test->flags |= ETH_TEST_FL_FAILED; + /* link test */ + if ((data[1] = (u64) netxen_nic_test_link(dev))) + eth_test->flags |= ETH_TEST_FL_FAILED; +} + +static void +netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int index; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *netxen_nic_gstrings_test, + NETXEN_NIC_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) { + memcpy(data + index * ETH_GSTRING_LEN, + netxen_nic_gstrings_stats[index].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static void +netxen_nic_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + int index; + + for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) { + char *p = + (char *)adapter + + netxen_nic_gstrings_stats[index].stat_offset; + data[index] = + (netxen_nic_gstrings_stats[index].sizeof_stat == + sizeof(u64)) ? *(u64 *) p : *(u32 *) p; + } +} + +static void +netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u32 wol_cfg = 0; + + wol->supported = 0; + wol->wolopts = 0; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return; + + wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV); + if (wol_cfg & (1UL << adapter->portnum)) + wol->supported |= WAKE_MAGIC; + + wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG); + if (wol_cfg & (1UL << adapter->portnum)) + wol->wolopts |= WAKE_MAGIC; +} + +static int +netxen_nic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + u32 wol_cfg = 0; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return -EOPNOTSUPP; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EOPNOTSUPP; + + wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV); + if (!(wol_cfg & (1 << adapter->portnum))) + return -EOPNOTSUPP; + + wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG); + if (wol->wolopts & WAKE_MAGIC) + wol_cfg |= 1UL << adapter->portnum; + else + wol_cfg &= ~(1UL << adapter->portnum); + NXWR32(adapter, NETXEN_WOL_CONFIG, wol_cfg); + + return 0; +} + +/* + * Set the coalescing parameters. Currently only normal is supported. + * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the + * firmware coalescing to default. + */ +static int netxen_set_intr_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ethcoal) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + + if (!NX_IS_REVISION_P3(adapter->ahw.revision_id)) + return -EINVAL; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return -EINVAL; + + /* + * Return Error if unsupported values or + * unsupported parameters are set. + */ + if (ethcoal->rx_coalesce_usecs > 0xffff || + ethcoal->rx_max_coalesced_frames > 0xffff || + ethcoal->tx_coalesce_usecs > 0xffff || + ethcoal->tx_max_coalesced_frames > 0xffff || + ethcoal->rx_coalesce_usecs_irq || + ethcoal->rx_max_coalesced_frames_irq || + ethcoal->tx_coalesce_usecs_irq || + ethcoal->tx_max_coalesced_frames_irq || + ethcoal->stats_block_coalesce_usecs || + ethcoal->use_adaptive_rx_coalesce || + ethcoal->use_adaptive_tx_coalesce || + ethcoal->pkt_rate_low || + ethcoal->rx_coalesce_usecs_low || + ethcoal->rx_max_coalesced_frames_low || + ethcoal->tx_coalesce_usecs_low || + ethcoal->tx_max_coalesced_frames_low || + ethcoal->pkt_rate_high || + ethcoal->rx_coalesce_usecs_high || + ethcoal->rx_max_coalesced_frames_high || + ethcoal->tx_coalesce_usecs_high || + ethcoal->tx_max_coalesced_frames_high) + return -EINVAL; + + if (!ethcoal->rx_coalesce_usecs || + !ethcoal->rx_max_coalesced_frames) { + adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT; + adapter->coal.normal.data.rx_time_us = + NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US; + adapter->coal.normal.data.rx_packets = + NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS; + } else { + adapter->coal.flags = 0; + adapter->coal.normal.data.rx_time_us = + ethcoal->rx_coalesce_usecs; + adapter->coal.normal.data.rx_packets = + ethcoal->rx_max_coalesced_frames; + } + adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs; + adapter->coal.normal.data.tx_packets = + ethcoal->tx_max_coalesced_frames; + + netxen_config_intr_coalesce(adapter); + + return 0; +} + +static int netxen_get_intr_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ethcoal) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + + if (!NX_IS_REVISION_P3(adapter->ahw.revision_id)) + return -EINVAL; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return -EINVAL; + + ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us; + ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us; + ethcoal->rx_max_coalesced_frames = + adapter->coal.normal.data.rx_packets; + ethcoal->tx_max_coalesced_frames = + adapter->coal.normal.data.tx_packets; + + return 0; +} + +static int +netxen_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + struct netxen_minidump *mdump = &adapter->mdump; + if (adapter->fw_mdump_rdy) + dump->len = mdump->md_dump_size; + else + dump->len = 0; + + if (!mdump->md_enabled) + dump->flag = ETH_FW_DUMP_DISABLE; + else + dump->flag = mdump->md_capture_mask; + + dump->version = adapter->fw_version; + return 0; +} + +static int +netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val) +{ + int i; + struct netxen_adapter *adapter = netdev_priv(netdev); + struct netxen_minidump *mdump = &adapter->mdump; + + switch (val->flag) { + case NX_FORCE_FW_DUMP_KEY: + if (!mdump->md_enabled) { + netdev_info(netdev, "FW dump not enabled\n"); + return 0; + } + if (adapter->fw_mdump_rdy) { + netdev_info(netdev, "Previous dump not cleared, not forcing dump\n"); + return 0; + } + netdev_info(netdev, "Forcing a fw dump\n"); + nx_dev_request_reset(adapter); + break; + case NX_DISABLE_FW_DUMP: + if (mdump->md_enabled) { + netdev_info(netdev, "Disabling FW Dump\n"); + mdump->md_enabled = 0; + } + break; + case NX_ENABLE_FW_DUMP: + if (!mdump->md_enabled) { + netdev_info(netdev, "Enabling FW dump\n"); + mdump->md_enabled = 1; + } + break; + case NX_FORCE_FW_RESET: + netdev_info(netdev, "Forcing FW reset\n"); + nx_dev_request_reset(adapter); + adapter->flags &= ~NETXEN_FW_RESET_OWNER; + break; + default: + for (i = 0; i < ARRAY_SIZE(FW_DUMP_LEVELS); i++) { + if (val->flag == FW_DUMP_LEVELS[i]) { + mdump->md_capture_mask = val->flag; + netdev_info(netdev, + "Driver mask changed to: 0x%x\n", + mdump->md_capture_mask); + return 0; + } + } + netdev_info(netdev, + "Invalid dump level: 0x%x\n", val->flag); + return -EINVAL; + } + + return 0; +} + +static int +netxen_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, + void *buffer) +{ + int i, copy_sz; + u32 *hdr_ptr, *data; + struct netxen_adapter *adapter = netdev_priv(netdev); + struct netxen_minidump *mdump = &adapter->mdump; + + + if (!adapter->fw_mdump_rdy) { + netdev_info(netdev, "Dump not available\n"); + return -EINVAL; + } + /* Copy template header first */ + copy_sz = mdump->md_template_size; + hdr_ptr = (u32 *) mdump->md_template; + data = buffer; + for (i = 0; i < copy_sz/sizeof(u32); i++) + *data++ = cpu_to_le32(*hdr_ptr++); + + /* Copy captured dump data */ + memcpy(buffer + copy_sz, + mdump->md_capture_buff + mdump->md_template_size, + mdump->md_capture_size); + dump->len = copy_sz + mdump->md_capture_size; + dump->flag = mdump->md_capture_mask; + + /* Free dump area once data has been captured */ + vfree(mdump->md_capture_buff); + mdump->md_capture_buff = NULL; + adapter->fw_mdump_rdy = 0; + netdev_info(netdev, "extracted the fw dump Successfully\n"); + return 0; +} + +const struct ethtool_ops netxen_nic_ethtool_ops = { + .get_settings = netxen_nic_get_settings, + .set_settings = netxen_nic_set_settings, + .get_drvinfo = netxen_nic_get_drvinfo, + .get_regs_len = netxen_nic_get_regs_len, + .get_regs = netxen_nic_get_regs, + .get_link = ethtool_op_get_link, + .get_eeprom_len = netxen_nic_get_eeprom_len, + .get_eeprom = netxen_nic_get_eeprom, + .get_ringparam = netxen_nic_get_ringparam, + .set_ringparam = netxen_nic_set_ringparam, + .get_pauseparam = netxen_nic_get_pauseparam, + .set_pauseparam = netxen_nic_set_pauseparam, + .get_wol = netxen_nic_get_wol, + .set_wol = netxen_nic_set_wol, + .self_test = netxen_nic_diag_test, + .get_strings = netxen_nic_get_strings, + .get_ethtool_stats = netxen_nic_get_ethtool_stats, + .get_sset_count = netxen_get_sset_count, + .get_coalesce = netxen_get_intr_coalesce, + .set_coalesce = netxen_set_intr_coalesce, + .get_dump_flag = netxen_get_dump_flag, + .get_dump_data = netxen_get_dump_data, + .set_dump = netxen_set_dump, +}; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h new file mode 100644 index 000000000..a310c2f65 --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h @@ -0,0 +1,1079 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#ifndef __NETXEN_NIC_HDR_H_ +#define __NETXEN_NIC_HDR_H_ + +#include +#include + +/* + * The basic unit of access when reading/writing control registers. + */ + +typedef __le32 netxen_crbword_t; /* single word in CRB space */ + +enum { + NETXEN_HW_H0_CH_HUB_ADR = 0x05, + NETXEN_HW_H1_CH_HUB_ADR = 0x0E, + NETXEN_HW_H2_CH_HUB_ADR = 0x03, + NETXEN_HW_H3_CH_HUB_ADR = 0x01, + NETXEN_HW_H4_CH_HUB_ADR = 0x06, + NETXEN_HW_H5_CH_HUB_ADR = 0x07, + NETXEN_HW_H6_CH_HUB_ADR = 0x08 +}; + +/* Hub 0 */ +enum { + NETXEN_HW_MN_CRB_AGT_ADR = 0x15, + NETXEN_HW_MS_CRB_AGT_ADR = 0x25 +}; + +/* Hub 1 */ +enum { + NETXEN_HW_PS_CRB_AGT_ADR = 0x73, + NETXEN_HW_SS_CRB_AGT_ADR = 0x20, + NETXEN_HW_RPMX3_CRB_AGT_ADR = 0x0b, + NETXEN_HW_QMS_CRB_AGT_ADR = 0x00, + NETXEN_HW_SQGS0_CRB_AGT_ADR = 0x01, + NETXEN_HW_SQGS1_CRB_AGT_ADR = 0x02, + NETXEN_HW_SQGS2_CRB_AGT_ADR = 0x03, + NETXEN_HW_SQGS3_CRB_AGT_ADR = 0x04, + NETXEN_HW_C2C0_CRB_AGT_ADR = 0x58, + NETXEN_HW_C2C1_CRB_AGT_ADR = 0x59, + NETXEN_HW_C2C2_CRB_AGT_ADR = 0x5a, + NETXEN_HW_RPMX2_CRB_AGT_ADR = 0x0a, + NETXEN_HW_RPMX4_CRB_AGT_ADR = 0x0c, + NETXEN_HW_RPMX7_CRB_AGT_ADR = 0x0f, + NETXEN_HW_RPMX9_CRB_AGT_ADR = 0x12, + NETXEN_HW_SMB_CRB_AGT_ADR = 0x18 +}; + +/* Hub 2 */ +enum { + NETXEN_HW_NIU_CRB_AGT_ADR = 0x31, + NETXEN_HW_I2C0_CRB_AGT_ADR = 0x19, + NETXEN_HW_I2C1_CRB_AGT_ADR = 0x29, + + NETXEN_HW_SN_CRB_AGT_ADR = 0x10, + NETXEN_HW_I2Q_CRB_AGT_ADR = 0x20, + NETXEN_HW_LPC_CRB_AGT_ADR = 0x22, + NETXEN_HW_ROMUSB_CRB_AGT_ADR = 0x21, + NETXEN_HW_QM_CRB_AGT_ADR = 0x66, + NETXEN_HW_SQG0_CRB_AGT_ADR = 0x60, + NETXEN_HW_SQG1_CRB_AGT_ADR = 0x61, + NETXEN_HW_SQG2_CRB_AGT_ADR = 0x62, + NETXEN_HW_SQG3_CRB_AGT_ADR = 0x63, + NETXEN_HW_RPMX1_CRB_AGT_ADR = 0x09, + NETXEN_HW_RPMX5_CRB_AGT_ADR = 0x0d, + NETXEN_HW_RPMX6_CRB_AGT_ADR = 0x0e, + NETXEN_HW_RPMX8_CRB_AGT_ADR = 0x11 +}; + +/* Hub 3 */ +enum { + NETXEN_HW_PH_CRB_AGT_ADR = 0x1A, + NETXEN_HW_SRE_CRB_AGT_ADR = 0x50, + NETXEN_HW_EG_CRB_AGT_ADR = 0x51, + NETXEN_HW_RPMX0_CRB_AGT_ADR = 0x08 +}; + +/* Hub 4 */ +enum { + NETXEN_HW_PEGN0_CRB_AGT_ADR = 0x40, + NETXEN_HW_PEGN1_CRB_AGT_ADR, + NETXEN_HW_PEGN2_CRB_AGT_ADR, + NETXEN_HW_PEGN3_CRB_AGT_ADR, + NETXEN_HW_PEGNI_CRB_AGT_ADR, + NETXEN_HW_PEGND_CRB_AGT_ADR, + NETXEN_HW_PEGNC_CRB_AGT_ADR, + NETXEN_HW_PEGR0_CRB_AGT_ADR, + NETXEN_HW_PEGR1_CRB_AGT_ADR, + NETXEN_HW_PEGR2_CRB_AGT_ADR, + NETXEN_HW_PEGR3_CRB_AGT_ADR, + NETXEN_HW_PEGN4_CRB_AGT_ADR +}; + +/* Hub 5 */ +enum { + NETXEN_HW_PEGS0_CRB_AGT_ADR = 0x40, + NETXEN_HW_PEGS1_CRB_AGT_ADR, + NETXEN_HW_PEGS2_CRB_AGT_ADR, + NETXEN_HW_PEGS3_CRB_AGT_ADR, + NETXEN_HW_PEGSI_CRB_AGT_ADR, + NETXEN_HW_PEGSD_CRB_AGT_ADR, + NETXEN_HW_PEGSC_CRB_AGT_ADR +}; + +/* Hub 6 */ +enum { + NETXEN_HW_CAS0_CRB_AGT_ADR = 0x46, + NETXEN_HW_CAS1_CRB_AGT_ADR = 0x47, + NETXEN_HW_CAS2_CRB_AGT_ADR = 0x48, + NETXEN_HW_CAS3_CRB_AGT_ADR = 0x49, + NETXEN_HW_NCM_CRB_AGT_ADR = 0x16, + NETXEN_HW_TMR_CRB_AGT_ADR = 0x17, + NETXEN_HW_XDMA_CRB_AGT_ADR = 0x05, + NETXEN_HW_OCM0_CRB_AGT_ADR = 0x06, + NETXEN_HW_OCM1_CRB_AGT_ADR = 0x07 +}; + +/* Floaters - non existent modules */ +#define NETXEN_HW_EFC_RPMX0_CRB_AGT_ADR 0x67 + +/* This field defines PCI/X adr [25:20] of agents on the CRB */ +enum { + NETXEN_HW_PX_MAP_CRB_PH = 0, + NETXEN_HW_PX_MAP_CRB_PS, + NETXEN_HW_PX_MAP_CRB_MN, + NETXEN_HW_PX_MAP_CRB_MS, + NETXEN_HW_PX_MAP_CRB_PGR1, + NETXEN_HW_PX_MAP_CRB_SRE, + NETXEN_HW_PX_MAP_CRB_NIU, + NETXEN_HW_PX_MAP_CRB_QMN, + NETXEN_HW_PX_MAP_CRB_SQN0, + NETXEN_HW_PX_MAP_CRB_SQN1, + NETXEN_HW_PX_MAP_CRB_SQN2, + NETXEN_HW_PX_MAP_CRB_SQN3, + NETXEN_HW_PX_MAP_CRB_QMS, + NETXEN_HW_PX_MAP_CRB_SQS0, + NETXEN_HW_PX_MAP_CRB_SQS1, + NETXEN_HW_PX_MAP_CRB_SQS2, + NETXEN_HW_PX_MAP_CRB_SQS3, + NETXEN_HW_PX_MAP_CRB_PGN0, + NETXEN_HW_PX_MAP_CRB_PGN1, + NETXEN_HW_PX_MAP_CRB_PGN2, + NETXEN_HW_PX_MAP_CRB_PGN3, + NETXEN_HW_PX_MAP_CRB_PGND, + NETXEN_HW_PX_MAP_CRB_PGNI, + NETXEN_HW_PX_MAP_CRB_PGS0, + NETXEN_HW_PX_MAP_CRB_PGS1, + NETXEN_HW_PX_MAP_CRB_PGS2, + NETXEN_HW_PX_MAP_CRB_PGS3, + NETXEN_HW_PX_MAP_CRB_PGSD, + NETXEN_HW_PX_MAP_CRB_PGSI, + NETXEN_HW_PX_MAP_CRB_SN, + NETXEN_HW_PX_MAP_CRB_PGR2, + NETXEN_HW_PX_MAP_CRB_EG, + NETXEN_HW_PX_MAP_CRB_PH2, + NETXEN_HW_PX_MAP_CRB_PS2, + NETXEN_HW_PX_MAP_CRB_CAM, + NETXEN_HW_PX_MAP_CRB_CAS0, + NETXEN_HW_PX_MAP_CRB_CAS1, + NETXEN_HW_PX_MAP_CRB_CAS2, + NETXEN_HW_PX_MAP_CRB_C2C0, + NETXEN_HW_PX_MAP_CRB_C2C1, + NETXEN_HW_PX_MAP_CRB_TIMR, + NETXEN_HW_PX_MAP_CRB_PGR3, + NETXEN_HW_PX_MAP_CRB_RPMX1, + NETXEN_HW_PX_MAP_CRB_RPMX2, + NETXEN_HW_PX_MAP_CRB_RPMX3, + NETXEN_HW_PX_MAP_CRB_RPMX4, + NETXEN_HW_PX_MAP_CRB_RPMX5, + NETXEN_HW_PX_MAP_CRB_RPMX6, + NETXEN_HW_PX_MAP_CRB_RPMX7, + NETXEN_HW_PX_MAP_CRB_XDMA, + NETXEN_HW_PX_MAP_CRB_I2Q, + NETXEN_HW_PX_MAP_CRB_ROMUSB, + NETXEN_HW_PX_MAP_CRB_CAS3, + NETXEN_HW_PX_MAP_CRB_RPMX0, + NETXEN_HW_PX_MAP_CRB_RPMX8, + NETXEN_HW_PX_MAP_CRB_RPMX9, + NETXEN_HW_PX_MAP_CRB_OCM0, + NETXEN_HW_PX_MAP_CRB_OCM1, + NETXEN_HW_PX_MAP_CRB_SMB, + NETXEN_HW_PX_MAP_CRB_I2C0, + NETXEN_HW_PX_MAP_CRB_I2C1, + NETXEN_HW_PX_MAP_CRB_LPC, + NETXEN_HW_PX_MAP_CRB_PGNC, + NETXEN_HW_PX_MAP_CRB_PGR0 +}; + +/* This field defines CRB adr [31:20] of the agents */ + +#define NETXEN_HW_CRB_HUB_AGT_ADR_MN \ + ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MN_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PH \ + ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_PH_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_MS \ + ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MS_CRB_AGT_ADR) + +#define NETXEN_HW_CRB_HUB_AGT_ADR_PS \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_PS_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SS \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SS_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX3_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_QMS \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_QMS_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS0 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS1 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS2 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS3 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS3_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C0 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C1 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX4_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX7_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9 \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX9_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SMB \ + ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SMB_CRB_AGT_ADR) + +#define NETXEN_HW_CRB_HUB_AGT_ADR_NIU \ + ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_NIU_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C0 \ + ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C1 \ + ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C1_CRB_AGT_ADR) + +#define NETXEN_HW_CRB_HUB_AGT_ADR_SRE \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SRE_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_EG \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_EG_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_QMN \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_QM_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN0 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN1 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN2 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN3 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG3_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX5_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX6_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX8_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS0 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS1 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS2 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS3 \ + ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS3_CRB_AGT_ADR) + +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNI \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNI_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGND \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGND_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN0 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN1 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN2 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN3 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN3_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN4 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN4_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNC \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNC_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR0 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR1 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR2 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR3 \ + ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR3_CRB_AGT_ADR) + +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSI \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSI_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSD \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSD_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS0 \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS1 \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS2 \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS2_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS3 \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS3_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSC \ + ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSC_CRB_AGT_ADR) + +#define NETXEN_HW_CRB_HUB_AGT_ADR_CAM \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_NCM_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_TIMR \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_TMR_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_XDMA \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_XDMA_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_SN \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_SN_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_I2Q \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_I2Q_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_ROMUSB_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM0 \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM0_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM1 \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM1_CRB_AGT_ADR) +#define NETXEN_HW_CRB_HUB_AGT_ADR_LPC \ + ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_LPC_CRB_AGT_ADR) + +#define NETXEN_SRE_MISC (NETXEN_CRB_SRE + 0x0002c) +#define NETXEN_SRE_INT_STATUS (NETXEN_CRB_SRE + 0x00034) +#define NETXEN_SRE_PBI_ACTIVE_STATUS (NETXEN_CRB_SRE + 0x01014) +#define NETXEN_SRE_L1RE_CTL (NETXEN_CRB_SRE + 0x03000) +#define NETXEN_SRE_L2RE_CTL (NETXEN_CRB_SRE + 0x05000) +#define NETXEN_SRE_BUF_CTL (NETXEN_CRB_SRE + 0x01000) + +#define NETXEN_DMA_BASE(U) (NETXEN_CRB_PCIX_MD + 0x20000 + ((U)<<16)) +#define NETXEN_DMA_COMMAND(U) (NETXEN_DMA_BASE(U) + 0x00008) + +#define NETXEN_I2Q_CLR_PCI_HI (NETXEN_CRB_I2Q + 0x00034) + +#define PEG_NETWORK_BASE(N) (NETXEN_CRB_PEG_NET_0 + (((N)&3) << 20)) +#define CRB_REG_EX_PC 0x3c + +#define ROMUSB_GLB (NETXEN_CRB_ROMUSB + 0x00000) +#define ROMUSB_ROM (NETXEN_CRB_ROMUSB + 0x10000) + +#define NETXEN_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004) +#define NETXEN_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008) +#define NETXEN_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c) +#define NETXEN_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038) +#define NETXEN_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044) +#define NETXEN_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c) +#define NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8) + +#define NETXEN_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n))) + +#define NETXEN_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004) +#define NETXEN_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008) +#define NETXEN_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c) +#define NETXEN_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010) +#define NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014) +#define NETXEN_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018) + +/* Lock IDs for ROM lock */ +#define ROM_LOCK_DRIVER 0x0d417340 + +/****************************************************************************** +* +* Definitions specific to M25P flash +* +******************************************************************************* +* Instructions +*/ +#define M25P_INSTR_WREN 0x06 +#define M25P_INSTR_WRDI 0x04 +#define M25P_INSTR_RDID 0x9f +#define M25P_INSTR_RDSR 0x05 +#define M25P_INSTR_WRSR 0x01 +#define M25P_INSTR_READ 0x03 +#define M25P_INSTR_FAST_READ 0x0b +#define M25P_INSTR_PP 0x02 +#define M25P_INSTR_SE 0xd8 +#define M25P_INSTR_BE 0xc7 +#define M25P_INSTR_DP 0xb9 +#define M25P_INSTR_RES 0xab + +/* all are 1MB windows */ + +#define NETXEN_PCI_CRB_WINDOWSIZE 0x00100000 +#define NETXEN_PCI_CRB_WINDOW(A) \ + (NETXEN_PCI_CRBSPACE + (A)*NETXEN_PCI_CRB_WINDOWSIZE) + +#define NETXEN_CRB_NIU NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_NIU) +#define NETXEN_CRB_SRE NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SRE) +#define NETXEN_CRB_ROMUSB \ + NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB) +#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q) +#define NETXEN_CRB_I2C0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2C0) +#define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB) +#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64) + +#define NETXEN_CRB_PCIX_HOST NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH) +#define NETXEN_CRB_PCIX_HOST2 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH2) +#define NETXEN_CRB_PEG_NET_0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN0) +#define NETXEN_CRB_PEG_NET_1 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN1) +#define NETXEN_CRB_PEG_NET_2 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN2) +#define NETXEN_CRB_PEG_NET_3 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN3) +#define NETXEN_CRB_PEG_NET_4 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SQS2) +#define NETXEN_CRB_PEG_NET_D NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGND) +#define NETXEN_CRB_PEG_NET_I NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGNI) +#define NETXEN_CRB_DDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_MN) +#define NETXEN_CRB_QDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SN) + +#define NETXEN_CRB_PCIX_MD NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PS) +#define NETXEN_CRB_PCIE NETXEN_CRB_PCIX_MD + +#define ISR_INT_VECTOR (NETXEN_PCIX_PS_REG(PCIX_INT_VECTOR)) +#define ISR_INT_MASK (NETXEN_PCIX_PS_REG(PCIX_INT_MASK)) +#define ISR_INT_MASK_SLOW (NETXEN_PCIX_PS_REG(PCIX_INT_MASK)) +#define ISR_INT_TARGET_STATUS (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS)) +#define ISR_INT_TARGET_MASK (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK)) +#define ISR_INT_TARGET_STATUS_F1 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F1)) +#define ISR_INT_TARGET_MASK_F1 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F1)) +#define ISR_INT_TARGET_STATUS_F2 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F2)) +#define ISR_INT_TARGET_MASK_F2 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F2)) +#define ISR_INT_TARGET_STATUS_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F3)) +#define ISR_INT_TARGET_MASK_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F3)) +#define ISR_INT_TARGET_STATUS_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F4)) +#define ISR_INT_TARGET_MASK_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F4)) +#define ISR_INT_TARGET_STATUS_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F5)) +#define ISR_INT_TARGET_MASK_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F5)) +#define ISR_INT_TARGET_STATUS_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F6)) +#define ISR_INT_TARGET_MASK_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F6)) +#define ISR_INT_TARGET_STATUS_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F7)) +#define ISR_INT_TARGET_MASK_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F7)) + +#define NETXEN_PCI_MAPSIZE 128 +#define NETXEN_PCI_DDR_NET (0x00000000UL) +#define NETXEN_PCI_QDR_NET (0x04000000UL) +#define NETXEN_PCI_DIRECT_CRB (0x04400000UL) +#define NETXEN_PCI_CAMQM (0x04800000UL) +#define NETXEN_PCI_CAMQM_MAX (0x04ffffffUL) +#define NETXEN_PCI_OCM0 (0x05000000UL) +#define NETXEN_PCI_OCM0_MAX (0x050fffffUL) +#define NETXEN_PCI_OCM1 (0x05100000UL) +#define NETXEN_PCI_OCM1_MAX (0x051fffffUL) +#define NETXEN_PCI_CRBSPACE (0x06000000UL) +#define NETXEN_PCI_128MB_SIZE (0x08000000UL) +#define NETXEN_PCI_32MB_SIZE (0x02000000UL) +#define NETXEN_PCI_2MB_SIZE (0x00200000UL) + +#define NETXEN_PCI_MN_2M (0) +#define NETXEN_PCI_MS_2M (0x80000) +#define NETXEN_PCI_OCM0_2M (0x000c0000UL) +#define NETXEN_PCI_CAMQM_2M_BASE (0x000ff800UL) +#define NETXEN_PCI_CAMQM_2M_END (0x04800800UL) + +#define NETXEN_CRB_CAM NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_CAM) + +#define NETXEN_ADDR_DDR_NET (0x0000000000000000ULL) +#define NETXEN_ADDR_DDR_NET_MAX (0x000000000fffffffULL) +#define NETXEN_ADDR_OCM0 (0x0000000200000000ULL) +#define NETXEN_ADDR_OCM0_MAX (0x00000002000fffffULL) +#define NETXEN_ADDR_OCM1 (0x0000000200400000ULL) +#define NETXEN_ADDR_OCM1_MAX (0x00000002004fffffULL) +#define NETXEN_ADDR_QDR_NET (0x0000000300000000ULL) +#define NETXEN_ADDR_QDR_NET_MAX_P2 (0x00000003003fffffULL) +#define NETXEN_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL) + +/* + * Register offsets for MN + */ +#define NETXEN_MIU_CONTROL (0x000) +#define NETXEN_MIU_MN_CONTROL (NETXEN_CRB_DDR_NET+NETXEN_MIU_CONTROL) + + /* 200ms delay in each loop */ +#define NETXEN_NIU_PHY_WAITLEN 200000 + /* 10 seconds before we give up */ +#define NETXEN_NIU_PHY_WAITMAX 50 +#define NETXEN_NIU_MAX_GBE_PORTS 4 +#define NETXEN_NIU_MAX_XG_PORTS 2 + +#define NETXEN_NIU_MODE (NETXEN_CRB_NIU + 0x00000) + +#define NETXEN_NIU_XG_SINGLE_TERM (NETXEN_CRB_NIU + 0x00004) +#define NETXEN_NIU_XG_DRIVE_HI (NETXEN_CRB_NIU + 0x00008) +#define NETXEN_NIU_XG_DRIVE_LO (NETXEN_CRB_NIU + 0x0000c) +#define NETXEN_NIU_XG_DTX (NETXEN_CRB_NIU + 0x00010) +#define NETXEN_NIU_XG_DEQ (NETXEN_CRB_NIU + 0x00014) +#define NETXEN_NIU_XG_WORD_ALIGN (NETXEN_CRB_NIU + 0x00018) +#define NETXEN_NIU_XG_RESET (NETXEN_CRB_NIU + 0x0001c) +#define NETXEN_NIU_XG_POWER_DOWN (NETXEN_CRB_NIU + 0x00020) +#define NETXEN_NIU_XG_RESET_PLL (NETXEN_CRB_NIU + 0x00024) +#define NETXEN_NIU_XG_SERDES_LOOPBACK (NETXEN_CRB_NIU + 0x00028) +#define NETXEN_NIU_XG_DO_BYTE_ALIGN (NETXEN_CRB_NIU + 0x0002c) +#define NETXEN_NIU_XG_TX_ENABLE (NETXEN_CRB_NIU + 0x00030) +#define NETXEN_NIU_XG_RX_ENABLE (NETXEN_CRB_NIU + 0x00034) +#define NETXEN_NIU_XG_STATUS (NETXEN_CRB_NIU + 0x00038) +#define NETXEN_NIU_XG_PAUSE_THRESHOLD (NETXEN_CRB_NIU + 0x0003c) +#define NETXEN_NIU_INT_MASK (NETXEN_CRB_NIU + 0x00040) +#define NETXEN_NIU_ACTIVE_INT (NETXEN_CRB_NIU + 0x00044) +#define NETXEN_NIU_MASKABLE_INT (NETXEN_CRB_NIU + 0x00048) + +#define NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER (NETXEN_CRB_NIU + 0x0004c) + +#define NETXEN_NIU_GB_SERDES_RESET (NETXEN_CRB_NIU + 0x00050) +#define NETXEN_NIU_GB0_GMII_MODE (NETXEN_CRB_NIU + 0x00054) +#define NETXEN_NIU_GB0_MII_MODE (NETXEN_CRB_NIU + 0x00058) +#define NETXEN_NIU_GB1_GMII_MODE (NETXEN_CRB_NIU + 0x0005c) +#define NETXEN_NIU_GB1_MII_MODE (NETXEN_CRB_NIU + 0x00060) +#define NETXEN_NIU_GB2_GMII_MODE (NETXEN_CRB_NIU + 0x00064) +#define NETXEN_NIU_GB2_MII_MODE (NETXEN_CRB_NIU + 0x00068) +#define NETXEN_NIU_GB3_GMII_MODE (NETXEN_CRB_NIU + 0x0006c) +#define NETXEN_NIU_GB3_MII_MODE (NETXEN_CRB_NIU + 0x00070) +#define NETXEN_NIU_REMOTE_LOOPBACK (NETXEN_CRB_NIU + 0x00074) +#define NETXEN_NIU_GB0_HALF_DUPLEX (NETXEN_CRB_NIU + 0x00078) +#define NETXEN_NIU_GB1_HALF_DUPLEX (NETXEN_CRB_NIU + 0x0007c) +#define NETXEN_NIU_RESET_SYS_FIFOS (NETXEN_CRB_NIU + 0x00088) +#define NETXEN_NIU_GB_CRC_DROP (NETXEN_CRB_NIU + 0x0008c) +#define NETXEN_NIU_GB_DROP_WRONGADDR (NETXEN_CRB_NIU + 0x00090) +#define NETXEN_NIU_TEST_MUX_CTL (NETXEN_CRB_NIU + 0x00094) +#define NETXEN_NIU_XG_PAUSE_CTL (NETXEN_CRB_NIU + 0x00098) +#define NETXEN_NIU_XG_PAUSE_LEVEL (NETXEN_CRB_NIU + 0x000dc) +#define NETXEN_NIU_FRAME_COUNT_SELECT (NETXEN_CRB_NIU + 0x000ac) +#define NETXEN_NIU_FRAME_COUNT (NETXEN_CRB_NIU + 0x000b0) +#define NETXEN_NIU_XG_SEL (NETXEN_CRB_NIU + 0x00128) +#define NETXEN_NIU_GB_PAUSE_CTL (NETXEN_CRB_NIU + 0x0030c) + +#define NETXEN_NIU_FULL_LEVEL_XG (NETXEN_CRB_NIU + 0x00450) + +#define NETXEN_NIU_XG1_RESET (NETXEN_CRB_NIU + 0x0011c) +#define NETXEN_NIU_XG1_POWER_DOWN (NETXEN_CRB_NIU + 0x00120) +#define NETXEN_NIU_XG1_RESET_PLL (NETXEN_CRB_NIU + 0x00124) + +#define NETXEN_MAC_ADDR_CNTL_REG (NETXEN_CRB_NIU + 0x1000) + +#define NETXEN_MULTICAST_ADDR_HI_0 (NETXEN_CRB_NIU + 0x1010) +#define NETXEN_MULTICAST_ADDR_HI_1 (NETXEN_CRB_NIU + 0x1014) +#define NETXEN_MULTICAST_ADDR_HI_2 (NETXEN_CRB_NIU + 0x1018) +#define NETXEN_MULTICAST_ADDR_HI_3 (NETXEN_CRB_NIU + 0x101c) + +#define NETXEN_UNICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1080) +#define NETXEN_MULTICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1100) + +#define NETXEN_NIU_GB_MAC_CONFIG_0(I) \ + (NETXEN_CRB_NIU + 0x30000 + (I)*0x10000) +#define NETXEN_NIU_GB_MAC_CONFIG_1(I) \ + (NETXEN_CRB_NIU + 0x30004 + (I)*0x10000) +#define NETXEN_NIU_GB_MAC_IPG_IFG(I) \ + (NETXEN_CRB_NIU + 0x30008 + (I)*0x10000) +#define NETXEN_NIU_GB_HALF_DUPLEX_CTRL(I) \ + (NETXEN_CRB_NIU + 0x3000c + (I)*0x10000) +#define NETXEN_NIU_GB_MAX_FRAME_SIZE(I) \ + (NETXEN_CRB_NIU + 0x30010 + (I)*0x10000) +#define NETXEN_NIU_GB_TEST_REG(I) \ + (NETXEN_CRB_NIU + 0x3001c + (I)*0x10000) +#define NETXEN_NIU_GB_MII_MGMT_CONFIG(I) \ + (NETXEN_CRB_NIU + 0x30020 + (I)*0x10000) +#define NETXEN_NIU_GB_MII_MGMT_COMMAND(I) \ + (NETXEN_CRB_NIU + 0x30024 + (I)*0x10000) +#define NETXEN_NIU_GB_MII_MGMT_ADDR(I) \ + (NETXEN_CRB_NIU + 0x30028 + (I)*0x10000) +#define NETXEN_NIU_GB_MII_MGMT_CTRL(I) \ + (NETXEN_CRB_NIU + 0x3002c + (I)*0x10000) +#define NETXEN_NIU_GB_MII_MGMT_STATUS(I) \ + (NETXEN_CRB_NIU + 0x30030 + (I)*0x10000) +#define NETXEN_NIU_GB_MII_MGMT_INDICATE(I) \ + (NETXEN_CRB_NIU + 0x30034 + (I)*0x10000) +#define NETXEN_NIU_GB_INTERFACE_CTRL(I) \ + (NETXEN_CRB_NIU + 0x30038 + (I)*0x10000) +#define NETXEN_NIU_GB_INTERFACE_STATUS(I) \ + (NETXEN_CRB_NIU + 0x3003c + (I)*0x10000) +#define NETXEN_NIU_GB_STATION_ADDR_0(I) \ + (NETXEN_CRB_NIU + 0x30040 + (I)*0x10000) +#define NETXEN_NIU_GB_STATION_ADDR_1(I) \ + (NETXEN_CRB_NIU + 0x30044 + (I)*0x10000) + +#define NETXEN_NIU_XGE_CONFIG_0 (NETXEN_CRB_NIU + 0x70000) +#define NETXEN_NIU_XGE_CONFIG_1 (NETXEN_CRB_NIU + 0x70004) +#define NETXEN_NIU_XGE_IPG (NETXEN_CRB_NIU + 0x70008) +#define NETXEN_NIU_XGE_STATION_ADDR_0_HI (NETXEN_CRB_NIU + 0x7000c) +#define NETXEN_NIU_XGE_STATION_ADDR_0_1 (NETXEN_CRB_NIU + 0x70010) +#define NETXEN_NIU_XGE_STATION_ADDR_1_LO (NETXEN_CRB_NIU + 0x70014) +#define NETXEN_NIU_XGE_STATUS (NETXEN_CRB_NIU + 0x70018) +#define NETXEN_NIU_XGE_MAX_FRAME_SIZE (NETXEN_CRB_NIU + 0x7001c) +#define NETXEN_NIU_XGE_PAUSE_FRAME_VALUE (NETXEN_CRB_NIU + 0x70020) +#define NETXEN_NIU_XGE_TX_BYTE_CNT (NETXEN_CRB_NIU + 0x70024) +#define NETXEN_NIU_XGE_TX_FRAME_CNT (NETXEN_CRB_NIU + 0x70028) +#define NETXEN_NIU_XGE_RX_BYTE_CNT (NETXEN_CRB_NIU + 0x7002c) +#define NETXEN_NIU_XGE_RX_FRAME_CNT (NETXEN_CRB_NIU + 0x70030) +#define NETXEN_NIU_XGE_AGGR_ERROR_CNT (NETXEN_CRB_NIU + 0x70034) +#define NETXEN_NIU_XGE_MULTICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x70038) +#define NETXEN_NIU_XGE_UNICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x7003c) +#define NETXEN_NIU_XGE_CRC_ERROR_CNT (NETXEN_CRB_NIU + 0x70040) +#define NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x70044) +#define NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x70048) +#define NETXEN_NIU_XGE_LOCAL_ERROR_CNT (NETXEN_CRB_NIU + 0x7004c) +#define NETXEN_NIU_XGE_REMOTE_ERROR_CNT (NETXEN_CRB_NIU + 0x70050) +#define NETXEN_NIU_XGE_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x70054) +#define NETXEN_NIU_XGE_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x70058) +#define NETXEN_NIU_XG1_CONFIG_0 (NETXEN_CRB_NIU + 0x80000) +#define NETXEN_NIU_XG1_CONFIG_1 (NETXEN_CRB_NIU + 0x80004) +#define NETXEN_NIU_XG1_IPG (NETXEN_CRB_NIU + 0x80008) +#define NETXEN_NIU_XG1_STATION_ADDR_0_HI (NETXEN_CRB_NIU + 0x8000c) +#define NETXEN_NIU_XG1_STATION_ADDR_0_1 (NETXEN_CRB_NIU + 0x80010) +#define NETXEN_NIU_XG1_STATION_ADDR_1_LO (NETXEN_CRB_NIU + 0x80014) +#define NETXEN_NIU_XG1_STATUS (NETXEN_CRB_NIU + 0x80018) +#define NETXEN_NIU_XG1_MAX_FRAME_SIZE (NETXEN_CRB_NIU + 0x8001c) +#define NETXEN_NIU_XG1_PAUSE_FRAME_VALUE (NETXEN_CRB_NIU + 0x80020) +#define NETXEN_NIU_XG1_TX_BYTE_CNT (NETXEN_CRB_NIU + 0x80024) +#define NETXEN_NIU_XG1_TX_FRAME_CNT (NETXEN_CRB_NIU + 0x80028) +#define NETXEN_NIU_XG1_RX_BYTE_CNT (NETXEN_CRB_NIU + 0x8002c) +#define NETXEN_NIU_XG1_RX_FRAME_CNT (NETXEN_CRB_NIU + 0x80030) +#define NETXEN_NIU_XG1_AGGR_ERROR_CNT (NETXEN_CRB_NIU + 0x80034) +#define NETXEN_NIU_XG1_MULTICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x80038) +#define NETXEN_NIU_XG1_UNICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x8003c) +#define NETXEN_NIU_XG1_CRC_ERROR_CNT (NETXEN_CRB_NIU + 0x80040) +#define NETXEN_NIU_XG1_OVERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x80044) +#define NETXEN_NIU_XG1_UNDERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x80048) +#define NETXEN_NIU_XG1_LOCAL_ERROR_CNT (NETXEN_CRB_NIU + 0x8004c) +#define NETXEN_NIU_XG1_REMOTE_ERROR_CNT (NETXEN_CRB_NIU + 0x80050) +#define NETXEN_NIU_XG1_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x80054) +#define NETXEN_NIU_XG1_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x80058) + +/* P3 802.3ap */ +#define NETXEN_NIU_AP_MAC_CONFIG_0(I) (NETXEN_CRB_NIU+0xa0000+(I)*0x10000) +#define NETXEN_NIU_AP_MAC_CONFIG_1(I) (NETXEN_CRB_NIU+0xa0004+(I)*0x10000) +#define NETXEN_NIU_AP_MAC_IPG_IFG(I) (NETXEN_CRB_NIU+0xa0008+(I)*0x10000) +#define NETXEN_NIU_AP_HALF_DUPLEX_CTRL(I) (NETXEN_CRB_NIU+0xa000c+(I)*0x10000) +#define NETXEN_NIU_AP_MAX_FRAME_SIZE(I) (NETXEN_CRB_NIU+0xa0010+(I)*0x10000) +#define NETXEN_NIU_AP_TEST_REG(I) (NETXEN_CRB_NIU+0xa001c+(I)*0x10000) +#define NETXEN_NIU_AP_MII_MGMT_CONFIG(I) (NETXEN_CRB_NIU+0xa0020+(I)*0x10000) +#define NETXEN_NIU_AP_MII_MGMT_COMMAND(I) (NETXEN_CRB_NIU+0xa0024+(I)*0x10000) +#define NETXEN_NIU_AP_MII_MGMT_ADDR(I) (NETXEN_CRB_NIU+0xa0028+(I)*0x10000) +#define NETXEN_NIU_AP_MII_MGMT_CTRL(I) (NETXEN_CRB_NIU+0xa002c+(I)*0x10000) +#define NETXEN_NIU_AP_MII_MGMT_STATUS(I) (NETXEN_CRB_NIU+0xa0030+(I)*0x10000) +#define NETXEN_NIU_AP_MII_MGMT_INDICATE(I) (NETXEN_CRB_NIU+0xa0034+(I)*0x10000) +#define NETXEN_NIU_AP_INTERFACE_CTRL(I) (NETXEN_CRB_NIU+0xa0038+(I)*0x10000) +#define NETXEN_NIU_AP_INTERFACE_STATUS(I) (NETXEN_CRB_NIU+0xa003c+(I)*0x10000) +#define NETXEN_NIU_AP_STATION_ADDR_0(I) (NETXEN_CRB_NIU+0xa0040+(I)*0x10000) +#define NETXEN_NIU_AP_STATION_ADDR_1(I) (NETXEN_CRB_NIU+0xa0044+(I)*0x10000) + + +#define TEST_AGT_CTRL (0x00) + +#define TA_CTL_START 1 +#define TA_CTL_ENABLE 2 +#define TA_CTL_WRITE 4 +#define TA_CTL_BUSY 8 + +/* + * Register offsets for MN + */ +#define MIU_TEST_AGT_BASE (0x90) + +#define MIU_TEST_AGT_ADDR_LO (0x04) +#define MIU_TEST_AGT_ADDR_HI (0x08) +#define MIU_TEST_AGT_WRDATA_LO (0x10) +#define MIU_TEST_AGT_WRDATA_HI (0x14) +#define MIU_TEST_AGT_RDDATA_LO (0x18) +#define MIU_TEST_AGT_RDDATA_HI (0x1c) + +#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8 +#define MIU_TEST_AGT_UPPER_ADDR(off) (0) + +/* + * Register offsets for MS + */ +#define SIU_TEST_AGT_BASE (0x60) + +#define SIU_TEST_AGT_ADDR_LO (0x04) +#define SIU_TEST_AGT_ADDR_HI (0x18) +#define SIU_TEST_AGT_WRDATA_LO (0x08) +#define SIU_TEST_AGT_WRDATA_HI (0x0c) +#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i))) +#define SIU_TEST_AGT_RDDATA_LO (0x10) +#define SIU_TEST_AGT_RDDATA_HI (0x14) +#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i))) + +#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8 +#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22) + +/* XG Link status */ +#define XG_LINK_UP 0x10 +#define XG_LINK_DOWN 0x20 + +#define XG_LINK_UP_P3 0x01 +#define XG_LINK_DOWN_P3 0x02 +#define XG_LINK_STATE_P3_MASK 0xf +#define XG_LINK_STATE_P3(pcifn,val) \ + (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK) + +#define P3_LINK_SPEED_MHZ 100 +#define P3_LINK_SPEED_MASK 0xff +#define P3_LINK_SPEED_REG(pcifn) \ + (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4)) +#define P3_LINK_SPEED_VAL(pcifn, reg) \ + (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK) + +#define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000) +#define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg)) +#define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150)) +#define NETXEN_FW_VERSION_MINOR (NETXEN_CAM_RAM(0x154)) +#define NETXEN_FW_VERSION_SUB (NETXEN_CAM_RAM(0x158)) +#define NETXEN_ROM_LOCK_ID (NETXEN_CAM_RAM(0x100)) +#define NETXEN_PHY_LOCK_ID (NETXEN_CAM_RAM(0x120)) +#define NETXEN_CRB_WIN_LOCK_ID (NETXEN_CAM_RAM(0x124)) + +#define NIC_CRB_BASE (NETXEN_CAM_RAM(0x200)) +#define NIC_CRB_BASE_2 (NETXEN_CAM_RAM(0x700)) +#define NETXEN_NIC_REG(X) (NIC_CRB_BASE+(X)) +#define NETXEN_NIC_REG_2(X) (NIC_CRB_BASE_2+(X)) +#define NETXEN_INTR_MODE_REG NETXEN_NIC_REG(0x44) +#define NETXEN_MSI_MODE 0x1 +#define NETXEN_INTX_MODE 0x2 + +#define NX_CDRP_CRB_OFFSET (NETXEN_NIC_REG(0x18)) +#define NX_ARG1_CRB_OFFSET (NETXEN_NIC_REG(0x1c)) +#define NX_ARG2_CRB_OFFSET (NETXEN_NIC_REG(0x20)) +#define NX_ARG3_CRB_OFFSET (NETXEN_NIC_REG(0x24)) +#define NX_SIGN_CRB_OFFSET (NETXEN_NIC_REG(0x28)) + +#define CRB_HOST_DUMMY_BUF_ADDR_HI (NETXEN_NIC_REG(0x3c)) +#define CRB_HOST_DUMMY_BUF_ADDR_LO (NETXEN_NIC_REG(0x40)) + +#define CRB_CMDPEG_STATE (NETXEN_NIC_REG(0x50)) +#define CRB_RCVPEG_STATE (NETXEN_NIC_REG(0x13c)) + +#define CRB_XG_STATE (NETXEN_NIC_REG(0x94)) +#define CRB_XG_STATE_P3 (NETXEN_NIC_REG(0x98)) +#define CRB_PF_LINK_SPEED_1 (NETXEN_NIC_REG(0xe8)) +#define CRB_PF_LINK_SPEED_2 (NETXEN_NIC_REG(0xec)) + +#define CRB_MPORT_MODE (NETXEN_NIC_REG(0xc4)) +#define CRB_DMA_SHIFT (NETXEN_NIC_REG(0xcc)) +#define CRB_INT_VECTOR (NETXEN_NIC_REG(0xd4)) + +#define CRB_CMD_PRODUCER_OFFSET (NETXEN_NIC_REG(0x08)) +#define CRB_CMD_CONSUMER_OFFSET (NETXEN_NIC_REG(0x0c)) +#define CRB_CMD_PRODUCER_OFFSET_1 (NETXEN_NIC_REG(0x1ac)) +#define CRB_CMD_CONSUMER_OFFSET_1 (NETXEN_NIC_REG(0x1b0)) +#define CRB_CMD_PRODUCER_OFFSET_2 (NETXEN_NIC_REG(0x1b8)) +#define CRB_CMD_CONSUMER_OFFSET_2 (NETXEN_NIC_REG(0x1bc)) +#define CRB_CMD_PRODUCER_OFFSET_3 (NETXEN_NIC_REG(0x1d0)) +#define CRB_CMD_CONSUMER_OFFSET_3 (NETXEN_NIC_REG(0x1d4)) +#define CRB_TEMP_STATE (NETXEN_NIC_REG(0x1b4)) + +#define CRB_V2P_0 (NETXEN_NIC_REG(0x290)) +#define CRB_V2P(port) (CRB_V2P_0+((port)*4)) +#define CRB_DRIVER_VERSION (NETXEN_NIC_REG(0x2a0)) + +#define CRB_SW_INT_MASK_0 (NETXEN_NIC_REG(0x1d8)) +#define CRB_SW_INT_MASK_1 (NETXEN_NIC_REG(0x1e0)) +#define CRB_SW_INT_MASK_2 (NETXEN_NIC_REG(0x1e4)) +#define CRB_SW_INT_MASK_3 (NETXEN_NIC_REG(0x1e8)) + +#define CRB_FW_CAPABILITIES_1 (NETXEN_CAM_RAM(0x128)) +#define CRB_FW_CAPABILITIES_2 (NETXEN_CAM_RAM(0x12c)) +#define CRB_MAC_BLOCK_START (NETXEN_CAM_RAM(0x1c0)) + +/* + * capabilities register, can be used to selectively enable/disable features + * for backward compatibility + */ +#define CRB_NIC_CAPABILITIES_HOST NETXEN_NIC_REG(0x1a8) +#define CRB_NIC_MSI_MODE_HOST NETXEN_NIC_REG(0x270) + +#define INTR_SCHEME_PERPORT 0x1 +#define MSI_MODE_MULTIFUNC 0x1 + +/* used for ethtool tests */ +#define CRB_SCRATCHPAD_TEST NETXEN_NIC_REG(0x280) + +/* + * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address + * which can be read by the Phantom host to get producer/consumer indexes from + * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following + * registers will be used for the addresses of the ring's shared memory + * on the Phantom. + */ + +#define nx_get_temp_val(x) ((x) >> 16) +#define nx_get_temp_state(x) ((x) & 0xffff) +#define nx_encode_temp(val, state) (((val) << 16) | (state)) + +/* + * Temperature control. + */ +enum { + NX_TEMP_NORMAL = 0x1, /* Normal operating range */ + NX_TEMP_WARN, /* Sound alert, temperature getting high */ + NX_TEMP_PANIC /* Fatal error, hardware has shut down. */ +}; + +/* Lock IDs for PHY lock */ +#define PHY_LOCK_DRIVER 0x44524956 + +/* Used for PS PCI Memory access */ +#define PCIX_PS_OP_ADDR_LO (0x10000) +/* via CRB (PS side only) */ +#define PCIX_PS_OP_ADDR_HI (0x10004) + +#define PCIX_INT_VECTOR (0x10100) +#define PCIX_INT_MASK (0x10104) + +#define PCIX_CRB_WINDOW (0x10210) +#define PCIX_CRB_WINDOW_F0 (0x10210) +#define PCIX_CRB_WINDOW_F1 (0x10230) +#define PCIX_CRB_WINDOW_F2 (0x10250) +#define PCIX_CRB_WINDOW_F3 (0x10270) +#define PCIX_CRB_WINDOW_F4 (0x102ac) +#define PCIX_CRB_WINDOW_F5 (0x102bc) +#define PCIX_CRB_WINDOW_F6 (0x102cc) +#define PCIX_CRB_WINDOW_F7 (0x102dc) +#define PCIE_CRB_WINDOW_REG(func) (((func) < 4) ? \ + (PCIX_CRB_WINDOW_F0 + (0x20 * (func))) :\ + (PCIX_CRB_WINDOW_F4 + (0x10 * ((func)-4)))) + +#define PCIX_MN_WINDOW (0x10200) +#define PCIX_MN_WINDOW_F0 (0x10200) +#define PCIX_MN_WINDOW_F1 (0x10220) +#define PCIX_MN_WINDOW_F2 (0x10240) +#define PCIX_MN_WINDOW_F3 (0x10260) +#define PCIX_MN_WINDOW_F4 (0x102a0) +#define PCIX_MN_WINDOW_F5 (0x102b0) +#define PCIX_MN_WINDOW_F6 (0x102c0) +#define PCIX_MN_WINDOW_F7 (0x102d0) +#define PCIE_MN_WINDOW_REG(func) (((func) < 4) ? \ + (PCIX_MN_WINDOW_F0 + (0x20 * (func))) :\ + (PCIX_MN_WINDOW_F4 + (0x10 * ((func)-4)))) + +#define PCIX_SN_WINDOW (0x10208) +#define PCIX_SN_WINDOW_F0 (0x10208) +#define PCIX_SN_WINDOW_F1 (0x10228) +#define PCIX_SN_WINDOW_F2 (0x10248) +#define PCIX_SN_WINDOW_F3 (0x10268) +#define PCIX_SN_WINDOW_F4 (0x102a8) +#define PCIX_SN_WINDOW_F5 (0x102b8) +#define PCIX_SN_WINDOW_F6 (0x102c8) +#define PCIX_SN_WINDOW_F7 (0x102d8) +#define PCIE_SN_WINDOW_REG(func) (((func) < 4) ? \ + (PCIX_SN_WINDOW_F0 + (0x20 * (func))) :\ + (PCIX_SN_WINDOW_F4 + (0x10 * ((func)-4)))) + +#define PCIX_OCM_WINDOW (0x10800) +#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func)) + +#define PCIX_TARGET_STATUS (0x10118) +#define PCIX_TARGET_STATUS_F1 (0x10160) +#define PCIX_TARGET_STATUS_F2 (0x10164) +#define PCIX_TARGET_STATUS_F3 (0x10168) +#define PCIX_TARGET_STATUS_F4 (0x10360) +#define PCIX_TARGET_STATUS_F5 (0x10364) +#define PCIX_TARGET_STATUS_F6 (0x10368) +#define PCIX_TARGET_STATUS_F7 (0x1036c) + +#define PCIX_TARGET_MASK (0x10128) +#define PCIX_TARGET_MASK_F1 (0x10170) +#define PCIX_TARGET_MASK_F2 (0x10174) +#define PCIX_TARGET_MASK_F3 (0x10178) +#define PCIX_TARGET_MASK_F4 (0x10370) +#define PCIX_TARGET_MASK_F5 (0x10374) +#define PCIX_TARGET_MASK_F6 (0x10378) +#define PCIX_TARGET_MASK_F7 (0x1037c) + +#define PCIX_MSI_F0 (0x13000) +#define PCIX_MSI_F1 (0x13004) +#define PCIX_MSI_F2 (0x13008) +#define PCIX_MSI_F3 (0x1300c) +#define PCIX_MSI_F4 (0x13010) +#define PCIX_MSI_F5 (0x13014) +#define PCIX_MSI_F6 (0x13018) +#define PCIX_MSI_F7 (0x1301c) +#define PCIX_MSI_F(i) (0x13000+((i)*4)) + +#define PCIX_PS_MEM_SPACE (0x90000) + +#define NETXEN_PCIX_PH_REG(reg) (NETXEN_CRB_PCIE + (reg)) +#define NETXEN_PCIX_PS_REG(reg) (NETXEN_CRB_PCIX_MD + (reg)) + +#define NETXEN_PCIE_REG(reg) (NETXEN_CRB_PCIE + (reg)) + +#define PCIE_MAX_DMA_XFER_SIZE (0x1404c) + +#define PCIE_DCR 0x00d8 + +#define PCIE_SEM0_LOCK (0x1c000) +#define PCIE_SEM0_UNLOCK (0x1c004) +#define PCIE_SEM1_LOCK (0x1c008) +#define PCIE_SEM1_UNLOCK (0x1c00c) +#define PCIE_SEM2_LOCK (0x1c010) /* Flash lock */ +#define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */ +#define PCIE_SEM3_LOCK (0x1c018) /* Phy lock */ +#define PCIE_SEM3_UNLOCK (0x1c01c) /* Phy unlock */ +#define PCIE_SEM4_LOCK (0x1c020) +#define PCIE_SEM4_UNLOCK (0x1c024) +#define PCIE_SEM5_LOCK (0x1c028) /* API lock */ +#define PCIE_SEM5_UNLOCK (0x1c02c) /* API unlock */ +#define PCIE_SEM6_LOCK (0x1c030) /* sw lock */ +#define PCIE_SEM6_UNLOCK (0x1c034) /* sw unlock */ +#define PCIE_SEM7_LOCK (0x1c038) /* crb win lock */ +#define PCIE_SEM7_UNLOCK (0x1c03c) /* crbwin unlock*/ +#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N)) +#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N)) + +#define PCIE_SETUP_FUNCTION (0x12040) +#define PCIE_SETUP_FUNCTION2 (0x12048) +#define PCIE_MISCCFG_RC (0x1206c) +#define PCIE_TGT_SPLIT_CHICKEN (0x12080) +#define PCIE_CHICKEN3 (0x120c8) + +#define ISR_INT_STATE_REG (NETXEN_PCIX_PS_REG(PCIE_MISCCFG_RC)) +#define PCIE_MAX_MASTER_SPLIT (0x14048) + +#define NETXEN_PORT_MODE_NONE 0 +#define NETXEN_PORT_MODE_XG 1 +#define NETXEN_PORT_MODE_GB 2 +#define NETXEN_PORT_MODE_802_3_AP 3 +#define NETXEN_PORT_MODE_AUTO_NEG 4 +#define NETXEN_PORT_MODE_AUTO_NEG_1G 5 +#define NETXEN_PORT_MODE_AUTO_NEG_XG 6 +#define NETXEN_PORT_MODE_ADDR (NETXEN_CAM_RAM(0x24)) +#define NETXEN_WOL_PORT_MODE (NETXEN_CAM_RAM(0x198)) + +#define NETXEN_WOL_CONFIG_NV (NETXEN_CAM_RAM(0x184)) +#define NETXEN_WOL_CONFIG (NETXEN_CAM_RAM(0x188)) + +#define NX_PEG_TUNE_MN_PRESENT 0x1 +#define NX_PEG_TUNE_CAPABILITY (NETXEN_CAM_RAM(0x02c)) + +#define NETXEN_DMA_WATCHDOG_CTRL (NETXEN_CAM_RAM(0x14)) +#define NETXEN_PEG_ALIVE_COUNTER (NETXEN_CAM_RAM(0xb0)) +#define NETXEN_PEG_HALT_STATUS1 (NETXEN_CAM_RAM(0xa8)) +#define NETXEN_PEG_HALT_STATUS2 (NETXEN_CAM_RAM(0xac)) +#define NX_CRB_DEV_REF_COUNT (NETXEN_CAM_RAM(0x138)) +#define NX_CRB_DEV_STATE (NETXEN_CAM_RAM(0x140)) +#define NETXEN_ULA_KEY (NETXEN_CAM_RAM(0x178)) + +/* MiniDIMM related macros */ +#define NETXEN_DIMM_CAPABILITY (NETXEN_CAM_RAM(0x258)) +#define NETXEN_DIMM_PRESENT 0x1 +#define NETXEN_DIMM_MEMTYPE_DDR2_SDRAM 0x2 +#define NETXEN_DIMM_SIZE 0x4 +#define NETXEN_DIMM_MEMTYPE(VAL) ((VAL >> 3) & 0xf) +#define NETXEN_DIMM_NUMROWS(VAL) ((VAL >> 7) & 0xf) +#define NETXEN_DIMM_NUMCOLS(VAL) ((VAL >> 11) & 0xf) +#define NETXEN_DIMM_NUMRANKS(VAL) ((VAL >> 15) & 0x3) +#define NETXEN_DIMM_DATAWIDTH(VAL) ((VAL >> 18) & 0x3) +#define NETXEN_DIMM_NUMBANKS(VAL) ((VAL >> 21) & 0xf) +#define NETXEN_DIMM_TYPE(VAL) ((VAL >> 25) & 0x3f) +#define NETXEN_DIMM_VALID_FLAG 0x80000000 + +#define NETXEN_DIMM_MEM_DDR2_SDRAM 0x8 + +#define NETXEN_DIMM_STD_MEM_SIZE 512 + +#define NETXEN_DIMM_TYPE_RDIMM 0x1 +#define NETXEN_DIMM_TYPE_UDIMM 0x2 +#define NETXEN_DIMM_TYPE_SO_DIMM 0x4 +#define NETXEN_DIMM_TYPE_Micro_DIMM 0x8 +#define NETXEN_DIMM_TYPE_Mini_RDIMM 0x10 +#define NETXEN_DIMM_TYPE_Mini_UDIMM 0x20 + +/* Device State */ +#define NX_DEV_COLD 1 +#define NX_DEV_INITALIZING 2 +#define NX_DEV_READY 3 +#define NX_DEV_NEED_RESET 4 +#define NX_DEV_NEED_QUISCENT 5 +#define NX_DEV_NEED_AER 6 +#define NX_DEV_FAILED 7 + +#define NX_RCODE_DRIVER_INFO 0x20000000 +#define NX_RCODE_DRIVER_CAN_RELOAD 0x40000000 +#define NX_RCODE_FATAL_ERROR 0x80000000 +#define NX_FWERROR_PEGNUM(code) ((code) & 0xff) +#define NX_FWERROR_CODE(code) ((code >> 8) & 0xfffff) +#define NX_FWERROR_PEGSTAT1(code) ((code >> 8) & 0x1fffff) + +#define FW_POLL_DELAY (2 * HZ) +#define FW_FAIL_THRESH 3 +#define FW_POLL_THRESH 10 + +#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC))) +#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) + +/* + * PCI Interrupt Vector Values. + */ +#define PCIX_INT_VECTOR_BIT_F0 0x0080 +#define PCIX_INT_VECTOR_BIT_F1 0x0100 +#define PCIX_INT_VECTOR_BIT_F2 0x0200 +#define PCIX_INT_VECTOR_BIT_F3 0x0400 +#define PCIX_INT_VECTOR_BIT_F4 0x0800 +#define PCIX_INT_VECTOR_BIT_F5 0x1000 +#define PCIX_INT_VECTOR_BIT_F6 0x2000 +#define PCIX_INT_VECTOR_BIT_F7 0x4000 + +struct netxen_legacy_intr_set { + uint32_t int_vec_bit; + uint32_t tgt_status_reg; + uint32_t tgt_mask_reg; + uint32_t pci_int_reg; +}; + +#define NX_LEGACY_INTR_CONFIG \ +{ \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \ + .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \ +} + +#endif /* __NETXEN_NIC_HDR_H_ */ diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c new file mode 100644 index 000000000..db80eb1c6 --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c @@ -0,0 +1,2593 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#include +#include "netxen_nic.h" +#include "netxen_nic_hw.h" + +#include + +#define MASK(n) ((1ULL<<(n))-1) +#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff)) +#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff)) +#define MS_WIN(addr) (addr & 0x0ffc0000) + +#define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) + +#define CRB_BLK(off) ((off >> 20) & 0x3f) +#define CRB_SUBBLK(off) ((off >> 16) & 0xf) +#define CRB_WINDOW_2M (0x130060) +#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000)) +#define CRB_INDIRECT_2M (0x1e0000UL) + +static void netxen_nic_io_write_128M(struct netxen_adapter *adapter, + void __iomem *addr, u32 data); +static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter, + void __iomem *addr); +#ifndef readq +static inline u64 readq(void __iomem *addr) +{ + return readl(addr) | (((u64) readl(addr + 4)) << 32LL); +} +#endif + +#ifndef writeq +static inline void writeq(u64 val, void __iomem *addr) +{ + writel(((u32) (val)), (addr)); + writel(((u32) (val >> 32)), (addr + 4)); +} +#endif + +#define PCI_OFFSET_FIRST_RANGE(adapter, off) \ + ((adapter)->ahw.pci_base0 + (off)) +#define PCI_OFFSET_SECOND_RANGE(adapter, off) \ + ((adapter)->ahw.pci_base1 + (off) - SECOND_PAGE_GROUP_START) +#define PCI_OFFSET_THIRD_RANGE(adapter, off) \ + ((adapter)->ahw.pci_base2 + (off) - THIRD_PAGE_GROUP_START) + +static void __iomem *pci_base_offset(struct netxen_adapter *adapter, + unsigned long off) +{ + if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END)) + return PCI_OFFSET_FIRST_RANGE(adapter, off); + + if (ADDR_IN_RANGE(off, SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_END)) + return PCI_OFFSET_SECOND_RANGE(adapter, off); + + if (ADDR_IN_RANGE(off, THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_END)) + return PCI_OFFSET_THIRD_RANGE(adapter, off); + + return NULL; +} + +static crb_128M_2M_block_map_t +crb_128M_2M_map[64] __cacheline_aligned_in_smp = { + {{{0, 0, 0, 0} } }, /* 0: PCI */ + {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ + {1, 0x0110000, 0x0120000, 0x130000}, + {1, 0x0120000, 0x0122000, 0x124000}, + {1, 0x0130000, 0x0132000, 0x126000}, + {1, 0x0140000, 0x0142000, 0x128000}, + {1, 0x0150000, 0x0152000, 0x12a000}, + {1, 0x0160000, 0x0170000, 0x110000}, + {1, 0x0170000, 0x0172000, 0x12e000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x01e0000, 0x01e0800, 0x122000}, + {0, 0x0000000, 0x0000000, 0x000000} } }, + {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */ + {{{0, 0, 0, 0} } }, /* 3: */ + {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */ + {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */ + {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */ + {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */ + {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x08f0000, 0x08f2000, 0x172000} } }, + {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x09f0000, 0x09f2000, 0x176000} } }, + {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x0af0000, 0x0af2000, 0x17a000} } }, + {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, + {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */ + {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */ + {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */ + {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */ + {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */ + {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */ + {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */ + {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */ + {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */ + {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */ + {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */ + {{{0, 0, 0, 0} } }, /* 23: */ + {{{0, 0, 0, 0} } }, /* 24: */ + {{{0, 0, 0, 0} } }, /* 25: */ + {{{0, 0, 0, 0} } }, /* 26: */ + {{{0, 0, 0, 0} } }, /* 27: */ + {{{0, 0, 0, 0} } }, /* 28: */ + {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */ + {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */ + {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */ + {{{0} } }, /* 32: PCI */ + {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */ + {1, 0x2110000, 0x2120000, 0x130000}, + {1, 0x2120000, 0x2122000, 0x124000}, + {1, 0x2130000, 0x2132000, 0x126000}, + {1, 0x2140000, 0x2142000, 0x128000}, + {1, 0x2150000, 0x2152000, 0x12a000}, + {1, 0x2160000, 0x2170000, 0x110000}, + {1, 0x2170000, 0x2172000, 0x12e000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000} } }, + {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */ + {{{0} } }, /* 35: */ + {{{0} } }, /* 36: */ + {{{0} } }, /* 37: */ + {{{0} } }, /* 38: */ + {{{0} } }, /* 39: */ + {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */ + {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */ + {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */ + {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */ + {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */ + {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */ + {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */ + {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */ + {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */ + {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */ + {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */ + {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */ + {{{0} } }, /* 52: */ + {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */ + {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */ + {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */ + {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */ + {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */ + {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */ + {{{0} } }, /* 59: I2C0 */ + {{{0} } }, /* 60: I2C1 */ + {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */ + {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */ + {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */ +}; + +/* + * top 12 bits of crb internal address (hub, agent) + */ +static unsigned crb_hub_agt[64] = +{ + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_PS, + NETXEN_HW_CRB_HUB_AGT_ADR_MN, + NETXEN_HW_CRB_HUB_AGT_ADR_MS, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_SRE, + NETXEN_HW_CRB_HUB_AGT_ADR_NIU, + NETXEN_HW_CRB_HUB_AGT_ADR_QMN, + NETXEN_HW_CRB_HUB_AGT_ADR_SQN0, + NETXEN_HW_CRB_HUB_AGT_ADR_SQN1, + NETXEN_HW_CRB_HUB_AGT_ADR_SQN2, + NETXEN_HW_CRB_HUB_AGT_ADR_SQN3, + NETXEN_HW_CRB_HUB_AGT_ADR_I2Q, + NETXEN_HW_CRB_HUB_AGT_ADR_TIMR, + NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB, + NETXEN_HW_CRB_HUB_AGT_ADR_PGN4, + NETXEN_HW_CRB_HUB_AGT_ADR_XDMA, + NETXEN_HW_CRB_HUB_AGT_ADR_PGN0, + NETXEN_HW_CRB_HUB_AGT_ADR_PGN1, + NETXEN_HW_CRB_HUB_AGT_ADR_PGN2, + NETXEN_HW_CRB_HUB_AGT_ADR_PGN3, + NETXEN_HW_CRB_HUB_AGT_ADR_PGND, + NETXEN_HW_CRB_HUB_AGT_ADR_PGNI, + NETXEN_HW_CRB_HUB_AGT_ADR_PGS0, + NETXEN_HW_CRB_HUB_AGT_ADR_PGS1, + NETXEN_HW_CRB_HUB_AGT_ADR_PGS2, + NETXEN_HW_CRB_HUB_AGT_ADR_PGS3, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_PGSI, + NETXEN_HW_CRB_HUB_AGT_ADR_SN, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_EG, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_PS, + NETXEN_HW_CRB_HUB_AGT_ADR_CAM, + 0, + 0, + 0, + 0, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_TIMR, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7, + NETXEN_HW_CRB_HUB_AGT_ADR_XDMA, + NETXEN_HW_CRB_HUB_AGT_ADR_I2Q, + NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8, + NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9, + NETXEN_HW_CRB_HUB_AGT_ADR_OCM0, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_SMB, + NETXEN_HW_CRB_HUB_AGT_ADR_I2C0, + NETXEN_HW_CRB_HUB_AGT_ADR_I2C1, + 0, + NETXEN_HW_CRB_HUB_AGT_ADR_PGNC, + 0, +}; + +/* PCI Windowing for DDR regions. */ + +#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ + +#define NETXEN_PCIE_SEM_TIMEOUT 10000 + +static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu); + +int +netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg) +{ + int done = 0, timeout = 0; + + while (!done) { + done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_LOCK(sem))); + if (done == 1) + break; + if (++timeout >= NETXEN_PCIE_SEM_TIMEOUT) + return -EIO; + msleep(1); + } + + if (id_reg) + NXWR32(adapter, id_reg, adapter->portnum); + + return 0; +} + +void +netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem) +{ + NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem))); +} + +static int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) +{ + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447); + NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0+(0x10000*port), 0x5); + } + + return 0; +} + +/* Disable an XG interface */ +static int netxen_niu_disable_xg_port(struct netxen_adapter *adapter) +{ + __u32 mac_cfg; + u32 port = adapter->physical_port; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + return 0; + + if (port >= NETXEN_NIU_MAX_XG_PORTS) + return -EINVAL; + + mac_cfg = 0; + if (NXWR32(adapter, + NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg)) + return -EIO; + return 0; +} + +#define NETXEN_UNICAST_ADDR(port, index) \ + (NETXEN_UNICAST_ADDR_BASE+(port*32)+(index*8)) +#define NETXEN_MCAST_ADDR(port, index) \ + (NETXEN_MULTICAST_ADDR_BASE+(port*0x80)+(index*8)) +#define MAC_HI(addr) \ + ((addr[2] << 16) | (addr[1] << 8) | (addr[0])) +#define MAC_LO(addr) \ + ((addr[5] << 16) | (addr[4] << 8) | (addr[3])) + +static int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) +{ + u32 mac_cfg; + u32 cnt = 0; + __u32 reg = 0x0200; + u32 port = adapter->physical_port; + u16 board_type = adapter->ahw.board_type; + + if (port >= NETXEN_NIU_MAX_XG_PORTS) + return -EINVAL; + + mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port)); + mac_cfg &= ~0x4; + NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg); + + if ((board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) || + (board_type == NETXEN_BRDTYPE_P2_SB31_10G_HMEZ)) + reg = (0x20 << port); + + NXWR32(adapter, NETXEN_NIU_FRAME_COUNT_SELECT, reg); + + mdelay(10); + + while (NXRD32(adapter, NETXEN_NIU_FRAME_COUNT) && ++cnt < 20) + mdelay(10); + + if (cnt < 20) { + + reg = NXRD32(adapter, + NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port)); + + if (mode == NETXEN_NIU_PROMISC_MODE) + reg = (reg | 0x2000UL); + else + reg = (reg & ~0x2000UL); + + if (mode == NETXEN_NIU_ALLMULTI_MODE) + reg = (reg | 0x1000UL); + else + reg = (reg & ~0x1000UL); + + NXWR32(adapter, + NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg); + } + + mac_cfg |= 0x4; + NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg); + + return 0; +} + +static int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr) +{ + u32 mac_hi, mac_lo; + u32 reg_hi, reg_lo; + + u8 phy = adapter->physical_port; + + if (phy >= NETXEN_NIU_MAX_XG_PORTS) + return -EINVAL; + + mac_lo = ((u32)addr[0] << 16) | ((u32)addr[1] << 24); + mac_hi = addr[2] | ((u32)addr[3] << 8) | + ((u32)addr[4] << 16) | ((u32)addr[5] << 24); + + reg_lo = NETXEN_NIU_XGE_STATION_ADDR_0_1 + (0x10000 * phy); + reg_hi = NETXEN_NIU_XGE_STATION_ADDR_0_HI + (0x10000 * phy); + + /* write twice to flush */ + if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi)) + return -EIO; + if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi)) + return -EIO; + + return 0; +} + +static int +netxen_nic_enable_mcast_filter(struct netxen_adapter *adapter) +{ + u32 val = 0; + u16 port = adapter->physical_port; + u8 *addr = adapter->mac_addr; + + if (adapter->mc_enabled) + return 0; + + val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG); + val |= (1UL << (28+port)); + NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); + + /* add broadcast addr to filter */ + val = 0xffffff; + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val); + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val); + + /* add station addr to filter */ + val = MAC_HI(addr); + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), val); + val = MAC_LO(addr); + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, val); + + adapter->mc_enabled = 1; + return 0; +} + +static int +netxen_nic_disable_mcast_filter(struct netxen_adapter *adapter) +{ + u32 val = 0; + u16 port = adapter->physical_port; + u8 *addr = adapter->mac_addr; + + if (!adapter->mc_enabled) + return 0; + + val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG); + val &= ~(1UL << (28+port)); + NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); + + val = MAC_HI(addr); + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val); + val = MAC_LO(addr); + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val); + + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), 0); + NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, 0); + + adapter->mc_enabled = 0; + return 0; +} + +static int +netxen_nic_set_mcast_addr(struct netxen_adapter *adapter, + int index, u8 *addr) +{ + u32 hi = 0, lo = 0; + u16 port = adapter->physical_port; + + lo = MAC_LO(addr); + hi = MAC_HI(addr); + + NXWR32(adapter, NETXEN_MCAST_ADDR(port, index), hi); + NXWR32(adapter, NETXEN_MCAST_ADDR(port, index)+4, lo); + + return 0; +} + +static void netxen_p2_nic_set_multi(struct net_device *netdev) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + struct netdev_hw_addr *ha; + u8 null_addr[ETH_ALEN]; + int i; + + eth_zero_addr(null_addr); + + if (netdev->flags & IFF_PROMISC) { + + adapter->set_promisc(adapter, + NETXEN_NIU_PROMISC_MODE); + + /* Full promiscuous mode */ + netxen_nic_disable_mcast_filter(adapter); + + return; + } + + if (netdev_mc_empty(netdev)) { + adapter->set_promisc(adapter, + NETXEN_NIU_NON_PROMISC_MODE); + netxen_nic_disable_mcast_filter(adapter); + return; + } + + adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE); + if (netdev->flags & IFF_ALLMULTI || + netdev_mc_count(netdev) > adapter->max_mc_count) { + netxen_nic_disable_mcast_filter(adapter); + return; + } + + netxen_nic_enable_mcast_filter(adapter); + + i = 0; + netdev_for_each_mc_addr(ha, netdev) + netxen_nic_set_mcast_addr(adapter, i++, ha->addr); + + /* Clear out remaining addresses */ + while (i < adapter->max_mc_count) + netxen_nic_set_mcast_addr(adapter, i++, null_addr); +} + +static int +netxen_send_cmd_descs(struct netxen_adapter *adapter, + struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) +{ + u32 i, producer, consumer; + struct netxen_cmd_buffer *pbuf; + struct cmd_desc_type0 *cmd_desc; + struct nx_host_tx_ring *tx_ring; + + i = 0; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return -EIO; + + tx_ring = adapter->tx_ring; + __netif_tx_lock_bh(tx_ring->txq); + + producer = tx_ring->producer; + consumer = tx_ring->sw_consumer; + + if (nr_desc >= netxen_tx_avail(tx_ring)) { + netif_tx_stop_queue(tx_ring->txq); + smp_mb(); + if (netxen_tx_avail(tx_ring) > nr_desc) { + if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) + netif_tx_wake_queue(tx_ring->txq); + } else { + __netif_tx_unlock_bh(tx_ring->txq); + return -EBUSY; + } + } + + do { + cmd_desc = &cmd_desc_arr[i]; + + pbuf = &tx_ring->cmd_buf_arr[producer]; + pbuf->skb = NULL; + pbuf->frag_count = 0; + + memcpy(&tx_ring->desc_head[producer], + &cmd_desc_arr[i], sizeof(struct cmd_desc_type0)); + + producer = get_next_index(producer, tx_ring->num_desc); + i++; + + } while (i != nr_desc); + + tx_ring->producer = producer; + + netxen_nic_update_cmd_producer(adapter, tx_ring); + + __netif_tx_unlock_bh(tx_ring->txq); + + return 0; +} + +static int +nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op) +{ + nx_nic_req_t req; + nx_mac_req_t *mac_req; + u64 word; + + memset(&req, 0, sizeof(nx_nic_req_t)); + req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23); + + word = NX_MAC_EVENT | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + mac_req = (nx_mac_req_t *)&req.words[0]; + mac_req->op = op; + memcpy(mac_req->mac_addr, addr, ETH_ALEN); + + return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); +} + +static int nx_p3_nic_add_mac(struct netxen_adapter *adapter, + const u8 *addr, struct list_head *del_list) +{ + struct list_head *head; + nx_mac_list_t *cur; + + /* look up if already exists */ + list_for_each(head, del_list) { + cur = list_entry(head, nx_mac_list_t, list); + + if (ether_addr_equal(addr, cur->mac_addr)) { + list_move_tail(head, &adapter->mac_list); + return 0; + } + } + + cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC); + if (cur == NULL) + return -ENOMEM; + + memcpy(cur->mac_addr, addr, ETH_ALEN); + list_add_tail(&cur->list, &adapter->mac_list); + return nx_p3_sre_macaddr_change(adapter, + cur->mac_addr, NETXEN_MAC_ADD); +} + +static void netxen_p3_nic_set_multi(struct net_device *netdev) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + struct netdev_hw_addr *ha; + static const u8 bcast_addr[ETH_ALEN] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + }; + u32 mode = VPORT_MISS_MODE_DROP; + LIST_HEAD(del_list); + struct list_head *head; + nx_mac_list_t *cur; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return; + + list_splice_tail_init(&adapter->mac_list, &del_list); + + nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list); + nx_p3_nic_add_mac(adapter, bcast_addr, &del_list); + + if (netdev->flags & IFF_PROMISC) { + mode = VPORT_MISS_MODE_ACCEPT_ALL; + goto send_fw_cmd; + } + + if ((netdev->flags & IFF_ALLMULTI) || + (netdev_mc_count(netdev) > adapter->max_mc_count)) { + mode = VPORT_MISS_MODE_ACCEPT_MULTI; + goto send_fw_cmd; + } + + if (!netdev_mc_empty(netdev)) { + netdev_for_each_mc_addr(ha, netdev) + nx_p3_nic_add_mac(adapter, ha->addr, &del_list); + } + +send_fw_cmd: + adapter->set_promisc(adapter, mode); + head = &del_list; + while (!list_empty(head)) { + cur = list_entry(head->next, nx_mac_list_t, list); + + nx_p3_sre_macaddr_change(adapter, + cur->mac_addr, NETXEN_MAC_DEL); + list_del(&cur->list); + kfree(cur); + } +} + +static int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) +{ + nx_nic_req_t req; + u64 word; + + memset(&req, 0, sizeof(nx_nic_req_t)); + + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE | + ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(mode); + + return netxen_send_cmd_descs(adapter, + (struct cmd_desc_type0 *)&req, 1); +} + +void netxen_p3_free_mac_list(struct netxen_adapter *adapter) +{ + nx_mac_list_t *cur; + struct list_head *head = &adapter->mac_list; + + while (!list_empty(head)) { + cur = list_entry(head->next, nx_mac_list_t, list); + nx_p3_sre_macaddr_change(adapter, + cur->mac_addr, NETXEN_MAC_DEL); + list_del(&cur->list); + kfree(cur); + } +} + +static int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr) +{ + /* assuming caller has already copied new addr to netdev */ + netxen_p3_nic_set_multi(adapter->netdev); + return 0; +} + +#define NETXEN_CONFIG_INTR_COALESCE 3 + +/* + * Send the interrupt coalescing parameter set by ethtool to the card. + */ +int netxen_config_intr_coalesce(struct netxen_adapter *adapter) +{ + nx_nic_req_t req; + u64 word[6]; + int rv, i; + + memset(&req, 0, sizeof(nx_nic_req_t)); + memset(word, 0, sizeof(word)); + + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word[0] = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word[0]); + + memcpy(&word[0], &adapter->coal, sizeof(adapter->coal)); + for (i = 0; i < 6; i++) + req.words[i] = cpu_to_le64(word[i]); + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "ERROR. Could not send " + "interrupt coalescing parameters\n"); + } + + return rv; +} + +int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable) +{ + nx_nic_req_t req; + u64 word; + int rv = 0; + + if (!test_bit(__NX_FW_ATTACHED, &adapter->state)) + return 0; + + memset(&req, 0, sizeof(nx_nic_req_t)); + + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(enable); + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "ERROR. Could not send " + "configure hw lro request\n"); + } + + return rv; +} + +int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable) +{ + nx_nic_req_t req; + u64 word; + int rv = 0; + + if (!!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED) == enable) + return rv; + + memset(&req, 0, sizeof(nx_nic_req_t)); + + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_CONFIG_BRIDGING | + ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(enable); + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "ERROR. Could not send " + "configure bridge mode request\n"); + } + + adapter->flags ^= NETXEN_NIC_BRIDGE_ENABLED; + + return rv; +} + + +#define RSS_HASHTYPE_IP_TCP 0x3 + +int netxen_config_rss(struct netxen_adapter *adapter, int enable) +{ + nx_nic_req_t req; + u64 word; + int i, rv; + + static const u64 key[] = { + 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, + 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, + 0x255b0ec26d5a56daULL + }; + + + memset(&req, 0, sizeof(nx_nic_req_t)); + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + /* + * RSS request: + * bits 3-0: hash_method + * 5-4: hash_type_ipv4 + * 7-6: hash_type_ipv6 + * 8: enable + * 9: use indirection table + * 47-10: reserved + * 63-48: indirection table mask + */ + word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) | + ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) | + ((u64)(enable & 0x1) << 8) | + ((0x7ULL) << 48); + req.words[0] = cpu_to_le64(word); + for (i = 0; i < ARRAY_SIZE(key); i++) + req.words[i+1] = cpu_to_le64(key[i]); + + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "%s: could not configure RSS\n", + adapter->netdev->name); + } + + return rv; +} + +int netxen_config_ipaddr(struct netxen_adapter *adapter, __be32 ip, int cmd) +{ + nx_nic_req_t req; + u64 word; + int rv; + + memset(&req, 0, sizeof(nx_nic_req_t)); + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(cmd); + memcpy(&req.words[1], &ip, sizeof(u32)); + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "%s: could not notify %s IP 0x%x request\n", + adapter->netdev->name, + (cmd == NX_IP_UP) ? "Add" : "Remove", ip); + } + return rv; +} + +int netxen_linkevent_request(struct netxen_adapter *adapter, int enable) +{ + nx_nic_req_t req; + u64 word; + int rv; + + memset(&req, 0, sizeof(nx_nic_req_t)); + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + req.words[0] = cpu_to_le64(enable | (enable << 8)); + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "%s: could not configure link notification\n", + adapter->netdev->name); + } + + return rv; +} + +int netxen_send_lro_cleanup(struct netxen_adapter *adapter) +{ + nx_nic_req_t req; + u64 word; + int rv; + + if (!test_bit(__NX_FW_ATTACHED, &adapter->state)) + return 0; + + memset(&req, 0, sizeof(nx_nic_req_t)); + req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); + + word = NX_NIC_H2C_OPCODE_LRO_REQUEST | + ((u64)adapter->portnum << 16) | + ((u64)NX_NIC_LRO_REQUEST_CLEANUP << 56) ; + + req.req_hdr = cpu_to_le64(word); + + rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) { + printk(KERN_ERR "%s: could not cleanup lro flows\n", + adapter->netdev->name); + } + return rv; +} + +/* + * netxen_nic_change_mtu - Change the Maximum Transfer Unit + * @returns 0 on success, negative on failure + */ + +#define MTU_FUDGE_FACTOR 100 + +int netxen_nic_change_mtu(struct net_device *netdev, int mtu) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + int max_mtu; + int rc = 0; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + max_mtu = P3_MAX_MTU; + else + max_mtu = P2_MAX_MTU; + + if (mtu > max_mtu) { + printk(KERN_ERR "%s: mtu > %d bytes unsupported\n", + netdev->name, max_mtu); + return -EINVAL; + } + + if (adapter->set_mtu) + rc = adapter->set_mtu(adapter, mtu); + + if (!rc) + netdev->mtu = mtu; + + return rc; +} + +static int netxen_get_flash_block(struct netxen_adapter *adapter, int base, + int size, __le32 * buf) +{ + int i, v, addr; + __le32 *ptr32; + + addr = base; + ptr32 = buf; + for (i = 0; i < size / sizeof(u32); i++) { + if (netxen_rom_fast_read(adapter, addr, &v) == -1) + return -1; + *ptr32 = cpu_to_le32(v); + ptr32++; + addr += sizeof(u32); + } + if ((char *)buf + size > (char *)ptr32) { + __le32 local; + if (netxen_rom_fast_read(adapter, addr, &v) == -1) + return -1; + local = cpu_to_le32(v); + memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32); + } + + return 0; +} + +int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac) +{ + __le32 *pmac = (__le32 *) mac; + u32 offset; + + offset = NX_FW_MAC_ADDR_OFFSET + (adapter->portnum * sizeof(u64)); + + if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1) + return -1; + + if (*mac == ~0ULL) { + + offset = NX_OLD_MAC_ADDR_OFFSET + + (adapter->portnum * sizeof(u64)); + + if (netxen_get_flash_block(adapter, + offset, sizeof(u64), pmac) == -1) + return -1; + + if (*mac == ~0ULL) + return -1; + } + return 0; +} + +int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac) +{ + uint32_t crbaddr, mac_hi, mac_lo; + int pci_func = adapter->ahw.pci_func; + + crbaddr = CRB_MAC_BLOCK_START + + (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1)); + + mac_lo = NXRD32(adapter, crbaddr); + mac_hi = NXRD32(adapter, crbaddr+4); + + if (pci_func & 1) + *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16)); + else + *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32)); + + return 0; +} + +/* + * Changes the CRB window to the specified window. + */ +static void +netxen_nic_pci_set_crbwindow_128M(struct netxen_adapter *adapter, + u32 window) +{ + void __iomem *offset; + int count = 10; + u8 func = adapter->ahw.pci_func; + + if (adapter->ahw.crb_win == window) + return; + + offset = PCI_OFFSET_SECOND_RANGE(adapter, + NETXEN_PCIX_PH_REG(PCIE_CRB_WINDOW_REG(func))); + + writel(window, offset); + do { + if (window == readl(offset)) + break; + + if (printk_ratelimit()) + dev_warn(&adapter->pdev->dev, + "failed to set CRB window to %d\n", + (window == NETXEN_WINDOW_ONE)); + udelay(1); + + } while (--count > 0); + + if (count > 0) + adapter->ahw.crb_win = window; +} + +/* + * Returns < 0 if off is not valid, + * 1 if window access is needed. 'off' is set to offset from + * CRB space in 128M pci map + * 0 if no window access is needed. 'off' is set to 2M addr + * In: 'off' is offset from base in 128M pci map + */ +static int +netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter, + ulong off, void __iomem **addr) +{ + crb_128M_2M_sub_block_map_t *m; + + + if ((off >= NETXEN_CRB_MAX) || (off < NETXEN_PCI_CRBSPACE)) + return -EINVAL; + + off -= NETXEN_PCI_CRBSPACE; + + /* + * Try direct map + */ + m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)]; + + if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) { + *addr = adapter->ahw.pci_base0 + m->start_2M + + (off - m->start_128M); + return 0; + } + + /* + * Not in direct map, use crb window + */ + *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M + + (off & MASK(16)); + return 1; +} + +/* + * In: 'off' is offset from CRB space in 128M pci map + * Out: 'off' is 2M pci map addr + * side effect: lock crb window + */ +static void +netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong off) +{ + u32 window; + void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M; + + off -= NETXEN_PCI_CRBSPACE; + + window = CRB_HI(off); + + writel(window, addr); + if (readl(addr) != window) { + if (printk_ratelimit()) + dev_warn(&adapter->pdev->dev, + "failed to set CRB window to %d off 0x%lx\n", + window, off); + } +} + +static void __iomem * +netxen_nic_map_indirect_address_128M(struct netxen_adapter *adapter, + ulong win_off, void __iomem **mem_ptr) +{ + ulong off = win_off; + void __iomem *addr; + resource_size_t mem_base; + + if (ADDR_IN_WINDOW1(win_off)) + off = NETXEN_CRB_NORMAL(win_off); + + addr = pci_base_offset(adapter, off); + if (addr) + return addr; + + if (adapter->ahw.pci_len0 == 0) + off -= NETXEN_PCI_CRBSPACE; + + mem_base = pci_resource_start(adapter->pdev, 0); + *mem_ptr = ioremap(mem_base + (off & PAGE_MASK), PAGE_SIZE); + if (*mem_ptr) + addr = *mem_ptr + (off & (PAGE_SIZE - 1)); + + return addr; +} + +static int +netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, ulong off, u32 data) +{ + unsigned long flags; + void __iomem *addr, *mem_ptr = NULL; + + addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr); + if (!addr) + return -EIO; + + if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ + netxen_nic_io_write_128M(adapter, addr, data); + } else { /* Window 0 */ + write_lock_irqsave(&adapter->ahw.crb_lock, flags); + netxen_nic_pci_set_crbwindow_128M(adapter, 0); + writel(data, addr); + netxen_nic_pci_set_crbwindow_128M(adapter, + NETXEN_WINDOW_ONE); + write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); + } + + if (mem_ptr) + iounmap(mem_ptr); + + return 0; +} + +static u32 +netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, ulong off) +{ + unsigned long flags; + void __iomem *addr, *mem_ptr = NULL; + u32 data; + + addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr); + if (!addr) + return -EIO; + + if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ + data = netxen_nic_io_read_128M(adapter, addr); + } else { /* Window 0 */ + write_lock_irqsave(&adapter->ahw.crb_lock, flags); + netxen_nic_pci_set_crbwindow_128M(adapter, 0); + data = readl(addr); + netxen_nic_pci_set_crbwindow_128M(adapter, + NETXEN_WINDOW_ONE); + write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); + } + + if (mem_ptr) + iounmap(mem_ptr); + + return data; +} + +static int +netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, ulong off, u32 data) +{ + unsigned long flags; + int rv; + void __iomem *addr = NULL; + + rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr); + + if (rv == 0) { + writel(data, addr); + return 0; + } + + if (rv > 0) { + /* indirect access */ + write_lock_irqsave(&adapter->ahw.crb_lock, flags); + crb_win_lock(adapter); + netxen_nic_pci_set_crbwindow_2M(adapter, off); + writel(data, addr); + crb_win_unlock(adapter); + write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); + return 0; + } + + dev_err(&adapter->pdev->dev, + "%s: invalid offset: 0x%016lx\n", __func__, off); + dump_stack(); + return -EIO; +} + +static u32 +netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, ulong off) +{ + unsigned long flags; + int rv; + u32 data; + void __iomem *addr = NULL; + + rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr); + + if (rv == 0) + return readl(addr); + + if (rv > 0) { + /* indirect access */ + write_lock_irqsave(&adapter->ahw.crb_lock, flags); + crb_win_lock(adapter); + netxen_nic_pci_set_crbwindow_2M(adapter, off); + data = readl(addr); + crb_win_unlock(adapter); + write_unlock_irqrestore(&adapter->ahw.crb_lock, flags); + return data; + } + + dev_err(&adapter->pdev->dev, + "%s: invalid offset: 0x%016lx\n", __func__, off); + dump_stack(); + return -1; +} + +/* window 1 registers only */ +static void netxen_nic_io_write_128M(struct netxen_adapter *adapter, + void __iomem *addr, u32 data) +{ + read_lock(&adapter->ahw.crb_lock); + writel(data, addr); + read_unlock(&adapter->ahw.crb_lock); +} + +static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter, + void __iomem *addr) +{ + u32 val; + + read_lock(&adapter->ahw.crb_lock); + val = readl(addr); + read_unlock(&adapter->ahw.crb_lock); + + return val; +} + +static void netxen_nic_io_write_2M(struct netxen_adapter *adapter, + void __iomem *addr, u32 data) +{ + writel(data, addr); +} + +static u32 netxen_nic_io_read_2M(struct netxen_adapter *adapter, + void __iomem *addr) +{ + return readl(addr); +} + +void __iomem * +netxen_get_ioaddr(struct netxen_adapter *adapter, u32 offset) +{ + void __iomem *addr = NULL; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + if ((offset < NETXEN_CRB_PCIX_HOST2) && + (offset > NETXEN_CRB_PCIX_HOST)) + addr = PCI_OFFSET_SECOND_RANGE(adapter, offset); + else + addr = NETXEN_CRB_NORMALIZE(adapter, offset); + } else { + WARN_ON(netxen_nic_pci_get_crb_addr_2M(adapter, + offset, &addr)); + } + + return addr; +} + +static int +netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter, + u64 addr, u32 *start) +{ + if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { + *start = (addr - NETXEN_ADDR_OCM0 + NETXEN_PCI_OCM0); + return 0; + } else if (ADDR_IN_RANGE(addr, + NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { + *start = (addr - NETXEN_ADDR_OCM1 + NETXEN_PCI_OCM1); + return 0; + } + + return -EIO; +} + +static int +netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter, + u64 addr, u32 *start) +{ + u32 window; + + window = OCM_WIN(addr); + + writel(window, adapter->ahw.ocm_win_crb); + /* read back to flush */ + readl(adapter->ahw.ocm_win_crb); + + adapter->ahw.ocm_win = window; + *start = NETXEN_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr); + return 0; +} + +static int +netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off, + u64 *data, int op) +{ + void __iomem *addr, *mem_ptr = NULL; + resource_size_t mem_base; + int ret; + u32 start; + + spin_lock(&adapter->ahw.mem_lock); + + ret = adapter->pci_set_window(adapter, off, &start); + if (ret != 0) + goto unlock; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + addr = adapter->ahw.pci_base0 + start; + } else { + addr = pci_base_offset(adapter, start); + if (addr) + goto noremap; + + mem_base = pci_resource_start(adapter->pdev, 0) + + (start & PAGE_MASK); + mem_ptr = ioremap(mem_base, PAGE_SIZE); + if (mem_ptr == NULL) { + ret = -EIO; + goto unlock; + } + + addr = mem_ptr + (start & (PAGE_SIZE-1)); + } +noremap: + if (op == 0) /* read */ + *data = readq(addr); + else /* write */ + writeq(*data, addr); + +unlock: + spin_unlock(&adapter->ahw.mem_lock); + + if (mem_ptr) + iounmap(mem_ptr); + return ret; +} + +void +netxen_pci_camqm_read_2M(struct netxen_adapter *adapter, u64 off, u64 *data) +{ + void __iomem *addr = adapter->ahw.pci_base0 + + NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM); + + spin_lock(&adapter->ahw.mem_lock); + *data = readq(addr); + spin_unlock(&adapter->ahw.mem_lock); +} + +void +netxen_pci_camqm_write_2M(struct netxen_adapter *adapter, u64 off, u64 data) +{ + void __iomem *addr = adapter->ahw.pci_base0 + + NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM); + + spin_lock(&adapter->ahw.mem_lock); + writeq(data, addr); + spin_unlock(&adapter->ahw.mem_lock); +} + +#define MAX_CTL_CHECK 1000 + +static int +netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter, + u64 off, u64 data) +{ + int j, ret; + u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo; + void __iomem *mem_crb; + + /* Only 64-bit aligned access */ + if (off & 7) + return -EIO; + + /* P2 has different SIU and MIU test agent base addr */ + if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, + NETXEN_ADDR_QDR_NET_MAX_P2)) { + mem_crb = pci_base_offset(adapter, + NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE); + addr_hi = SIU_TEST_AGT_ADDR_HI; + data_lo = SIU_TEST_AGT_WRDATA_LO; + data_hi = SIU_TEST_AGT_WRDATA_HI; + off_lo = off & SIU_TEST_AGT_ADDR_MASK; + off_hi = SIU_TEST_AGT_UPPER_ADDR(off); + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { + mem_crb = pci_base_offset(adapter, + NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); + addr_hi = MIU_TEST_AGT_ADDR_HI; + data_lo = MIU_TEST_AGT_WRDATA_LO; + data_hi = MIU_TEST_AGT_WRDATA_HI; + off_lo = off & MIU_TEST_AGT_ADDR_MASK; + off_hi = 0; + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) || + ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { + if (adapter->ahw.pci_len0 != 0) { + return netxen_nic_pci_mem_access_direct(adapter, + off, &data, 1); + } + } + + return -EIO; + +correct: + spin_lock(&adapter->ahw.mem_lock); + netxen_nic_pci_set_crbwindow_128M(adapter, 0); + + writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO)); + writel(off_hi, (mem_crb + addr_hi)); + writel(data & 0xffffffff, (mem_crb + data_lo)); + writel((data >> 32) & 0xffffffff, (mem_crb + data_hi)); + writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); + writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), + (mem_crb + TEST_AGT_CTRL)); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = readl((mem_crb + TEST_AGT_CTRL)); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&adapter->pdev->dev, + "failed to write through agent\n"); + ret = -EIO; + } else + ret = 0; + + netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); + spin_unlock(&adapter->ahw.mem_lock); + return ret; +} + +static int +netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter, + u64 off, u64 *data) +{ + int j, ret; + u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo; + u64 val; + void __iomem *mem_crb; + + /* Only 64-bit aligned access */ + if (off & 7) + return -EIO; + + /* P2 has different SIU and MIU test agent base addr */ + if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, + NETXEN_ADDR_QDR_NET_MAX_P2)) { + mem_crb = pci_base_offset(adapter, + NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE); + addr_hi = SIU_TEST_AGT_ADDR_HI; + data_lo = SIU_TEST_AGT_RDDATA_LO; + data_hi = SIU_TEST_AGT_RDDATA_HI; + off_lo = off & SIU_TEST_AGT_ADDR_MASK; + off_hi = SIU_TEST_AGT_UPPER_ADDR(off); + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { + mem_crb = pci_base_offset(adapter, + NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); + addr_hi = MIU_TEST_AGT_ADDR_HI; + data_lo = MIU_TEST_AGT_RDDATA_LO; + data_hi = MIU_TEST_AGT_RDDATA_HI; + off_lo = off & MIU_TEST_AGT_ADDR_MASK; + off_hi = 0; + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) || + ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { + if (adapter->ahw.pci_len0 != 0) { + return netxen_nic_pci_mem_access_direct(adapter, + off, data, 0); + } + } + + return -EIO; + +correct: + spin_lock(&adapter->ahw.mem_lock); + netxen_nic_pci_set_crbwindow_128M(adapter, 0); + + writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO)); + writel(off_hi, (mem_crb + addr_hi)); + writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); + writel((TA_CTL_START|TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = readl(mem_crb + TEST_AGT_CTRL); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&adapter->pdev->dev, + "failed to read through agent\n"); + ret = -EIO; + } else { + + temp = readl(mem_crb + data_hi); + val = ((u64)temp << 32); + val |= readl(mem_crb + data_lo); + *data = val; + ret = 0; + } + + netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE); + spin_unlock(&adapter->ahw.mem_lock); + + return ret; +} + +static int +netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter, + u64 off, u64 data) +{ + int j, ret; + u32 temp, off8; + void __iomem *mem_crb; + + /* Only 64-bit aligned access */ + if (off & 7) + return -EIO; + + /* P3 onward, test agent base for MIU and SIU is same */ + if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, + NETXEN_ADDR_QDR_NET_MAX_P3)) { + mem_crb = netxen_get_ioaddr(adapter, + NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE); + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { + mem_crb = netxen_get_ioaddr(adapter, + NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) + return netxen_nic_pci_mem_access_direct(adapter, off, &data, 1); + + return -EIO; + +correct: + off8 = off & 0xfffffff8; + + spin_lock(&adapter->ahw.mem_lock); + + writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); + writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); + + writel(data & 0xffffffff, + mem_crb + MIU_TEST_AGT_WRDATA_LO); + writel((data >> 32) & 0xffffffff, + mem_crb + MIU_TEST_AGT_WRDATA_HI); + + writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); + writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), + (mem_crb + TEST_AGT_CTRL)); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = readl(mem_crb + TEST_AGT_CTRL); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&adapter->pdev->dev, + "failed to write through agent\n"); + ret = -EIO; + } else + ret = 0; + + spin_unlock(&adapter->ahw.mem_lock); + + return ret; +} + +static int +netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter, + u64 off, u64 *data) +{ + int j, ret; + u32 temp, off8; + u64 val; + void __iomem *mem_crb; + + /* Only 64-bit aligned access */ + if (off & 7) + return -EIO; + + /* P3 onward, test agent base for MIU and SIU is same */ + if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, + NETXEN_ADDR_QDR_NET_MAX_P3)) { + mem_crb = netxen_get_ioaddr(adapter, + NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE); + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { + mem_crb = netxen_get_ioaddr(adapter, + NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE); + goto correct; + } + + if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { + return netxen_nic_pci_mem_access_direct(adapter, + off, data, 0); + } + + return -EIO; + +correct: + off8 = off & 0xfffffff8; + + spin_lock(&adapter->ahw.mem_lock); + + writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); + writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); + writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); + writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL)); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = readl(mem_crb + TEST_AGT_CTRL); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&adapter->pdev->dev, + "failed to read through agent\n"); + ret = -EIO; + } else { + val = (u64)(readl(mem_crb + MIU_TEST_AGT_RDDATA_HI)) << 32; + val |= readl(mem_crb + MIU_TEST_AGT_RDDATA_LO); + *data = val; + ret = 0; + } + + spin_unlock(&adapter->ahw.mem_lock); + + return ret; +} + +void +netxen_setup_hwops(struct netxen_adapter *adapter) +{ + adapter->init_port = netxen_niu_xg_init_port; + adapter->stop_port = netxen_niu_disable_xg_port; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + adapter->crb_read = netxen_nic_hw_read_wx_128M, + adapter->crb_write = netxen_nic_hw_write_wx_128M, + adapter->pci_set_window = netxen_nic_pci_set_window_128M, + adapter->pci_mem_read = netxen_nic_pci_mem_read_128M, + adapter->pci_mem_write = netxen_nic_pci_mem_write_128M, + adapter->io_read = netxen_nic_io_read_128M, + adapter->io_write = netxen_nic_io_write_128M, + + adapter->macaddr_set = netxen_p2_nic_set_mac_addr; + adapter->set_multi = netxen_p2_nic_set_multi; + adapter->set_mtu = netxen_nic_set_mtu_xgb; + adapter->set_promisc = netxen_p2_nic_set_promisc; + + } else { + adapter->crb_read = netxen_nic_hw_read_wx_2M, + adapter->crb_write = netxen_nic_hw_write_wx_2M, + adapter->pci_set_window = netxen_nic_pci_set_window_2M, + adapter->pci_mem_read = netxen_nic_pci_mem_read_2M, + adapter->pci_mem_write = netxen_nic_pci_mem_write_2M, + adapter->io_read = netxen_nic_io_read_2M, + adapter->io_write = netxen_nic_io_write_2M, + + adapter->set_mtu = nx_fw_cmd_set_mtu; + adapter->set_promisc = netxen_p3_nic_set_promisc; + adapter->macaddr_set = netxen_p3_nic_set_mac_addr; + adapter->set_multi = netxen_p3_nic_set_multi; + + adapter->phy_read = nx_fw_cmd_query_phy; + adapter->phy_write = nx_fw_cmd_set_phy; + } +} + +int netxen_nic_get_board_info(struct netxen_adapter *adapter) +{ + int offset, board_type, magic; + struct pci_dev *pdev = adapter->pdev; + + offset = NX_FW_MAGIC_OFFSET; + if (netxen_rom_fast_read(adapter, offset, &magic)) + return -EIO; + + if (magic != NETXEN_BDINFO_MAGIC) { + dev_err(&pdev->dev, "invalid board config, magic=%08x\n", + magic); + return -EIO; + } + + offset = NX_BRDTYPE_OFFSET; + if (netxen_rom_fast_read(adapter, offset, &board_type)) + return -EIO; + + if (board_type == NETXEN_BRDTYPE_P3_4_GB_MM) { + u32 gpio = NXRD32(adapter, NETXEN_ROMUSB_GLB_PAD_GPIO_I); + if ((gpio & 0x8000) == 0) + board_type = NETXEN_BRDTYPE_P3_10G_TP; + } + + adapter->ahw.board_type = board_type; + + switch (board_type) { + case NETXEN_BRDTYPE_P2_SB35_4G: + adapter->ahw.port_type = NETXEN_NIC_GBE; + break; + case NETXEN_BRDTYPE_P2_SB31_10G: + case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: + case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: + case NETXEN_BRDTYPE_P2_SB31_10G_CX4: + case NETXEN_BRDTYPE_P3_HMEZ: + case NETXEN_BRDTYPE_P3_XG_LOM: + case NETXEN_BRDTYPE_P3_10G_CX4: + case NETXEN_BRDTYPE_P3_10G_CX4_LP: + case NETXEN_BRDTYPE_P3_IMEZ: + case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: + case NETXEN_BRDTYPE_P3_10G_SFP_CT: + case NETXEN_BRDTYPE_P3_10G_SFP_QT: + case NETXEN_BRDTYPE_P3_10G_XFP: + case NETXEN_BRDTYPE_P3_10000_BASE_T: + adapter->ahw.port_type = NETXEN_NIC_XGBE; + break; + case NETXEN_BRDTYPE_P1_BD: + case NETXEN_BRDTYPE_P1_SB: + case NETXEN_BRDTYPE_P1_SMAX: + case NETXEN_BRDTYPE_P1_SOCK: + case NETXEN_BRDTYPE_P3_REF_QG: + case NETXEN_BRDTYPE_P3_4_GB: + case NETXEN_BRDTYPE_P3_4_GB_MM: + adapter->ahw.port_type = NETXEN_NIC_GBE; + break; + case NETXEN_BRDTYPE_P3_10G_TP: + adapter->ahw.port_type = (adapter->portnum < 2) ? + NETXEN_NIC_XGBE : NETXEN_NIC_GBE; + break; + default: + dev_err(&pdev->dev, "unknown board type %x\n", board_type); + adapter->ahw.port_type = NETXEN_NIC_XGBE; + break; + } + + return 0; +} + +/* NIU access sections */ +static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) +{ + new_mtu += MTU_FUDGE_FACTOR; + if (adapter->physical_port == 0) + NXWR32(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, new_mtu); + else + NXWR32(adapter, NETXEN_NIU_XG1_MAX_FRAME_SIZE, new_mtu); + return 0; +} + +void netxen_nic_set_link_parameters(struct netxen_adapter *adapter) +{ + __u32 status; + __u32 autoneg; + __u32 port_mode; + + if (!netif_carrier_ok(adapter->netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = -1; + adapter->link_autoneg = AUTONEG_ENABLE; + return; + } + + if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + port_mode = NXRD32(adapter, NETXEN_PORT_MODE_ADDR); + if (port_mode == NETXEN_PORT_MODE_802_3_AP) { + adapter->link_speed = SPEED_1000; + adapter->link_duplex = DUPLEX_FULL; + adapter->link_autoneg = AUTONEG_DISABLE; + return; + } + + if (adapter->phy_read && + adapter->phy_read(adapter, + NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, + &status) == 0) { + if (netxen_get_phy_link(status)) { + switch (netxen_get_phy_speed(status)) { + case 0: + adapter->link_speed = SPEED_10; + break; + case 1: + adapter->link_speed = SPEED_100; + break; + case 2: + adapter->link_speed = SPEED_1000; + break; + default: + adapter->link_speed = 0; + break; + } + switch (netxen_get_phy_duplex(status)) { + case 0: + adapter->link_duplex = DUPLEX_HALF; + break; + case 1: + adapter->link_duplex = DUPLEX_FULL; + break; + default: + adapter->link_duplex = -1; + break; + } + if (adapter->phy_read && + adapter->phy_read(adapter, + NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, + &autoneg) != 0) + adapter->link_autoneg = autoneg; + } else + goto link_down; + } else { + link_down: + adapter->link_speed = 0; + adapter->link_duplex = -1; + } + } +} + +int +netxen_nic_wol_supported(struct netxen_adapter *adapter) +{ + u32 wol_cfg; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 0; + + wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV); + if (wol_cfg & (1UL << adapter->portnum)) { + wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG); + if (wol_cfg & (1 << adapter->portnum)) + return 1; + } + + return 0; +} + +static u32 netxen_md_cntrl(struct netxen_adapter *adapter, + struct netxen_minidump_template_hdr *template_hdr, + struct netxen_minidump_entry_crb *crtEntry) +{ + int loop_cnt, i, rv = 0, timeout_flag; + u32 op_count, stride; + u32 opcode, read_value, addr; + unsigned long timeout, timeout_jiffies; + addr = crtEntry->addr; + op_count = crtEntry->op_count; + stride = crtEntry->addr_stride; + + for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) { + for (i = 0; i < sizeof(crtEntry->opcode) * 8; i++) { + opcode = (crtEntry->opcode & (0x1 << i)); + if (opcode) { + switch (opcode) { + case NX_DUMP_WCRB: + NX_WR_DUMP_REG(addr, + adapter->ahw.pci_base0, + crtEntry->value_1); + break; + case NX_DUMP_RWCRB: + NX_RD_DUMP_REG(addr, + adapter->ahw.pci_base0, + &read_value); + NX_WR_DUMP_REG(addr, + adapter->ahw.pci_base0, + read_value); + break; + case NX_DUMP_ANDCRB: + NX_RD_DUMP_REG(addr, + adapter->ahw.pci_base0, + &read_value); + read_value &= crtEntry->value_2; + NX_WR_DUMP_REG(addr, + adapter->ahw.pci_base0, + read_value); + break; + case NX_DUMP_ORCRB: + NX_RD_DUMP_REG(addr, + adapter->ahw.pci_base0, + &read_value); + read_value |= crtEntry->value_3; + NX_WR_DUMP_REG(addr, + adapter->ahw.pci_base0, + read_value); + break; + case NX_DUMP_POLLCRB: + timeout = crtEntry->poll_timeout; + NX_RD_DUMP_REG(addr, + adapter->ahw.pci_base0, + &read_value); + timeout_jiffies = + msecs_to_jiffies(timeout) + jiffies; + for (timeout_flag = 0; + !timeout_flag + && ((read_value & crtEntry->value_2) + != crtEntry->value_1);) { + if (time_after(jiffies, + timeout_jiffies)) + timeout_flag = 1; + NX_RD_DUMP_REG(addr, + adapter->ahw.pci_base0, + &read_value); + } + + if (timeout_flag) { + dev_err(&adapter->pdev->dev, "%s : " + "Timeout in poll_crb control operation.\n" + , __func__); + return -1; + } + break; + case NX_DUMP_RD_SAVE: + /* Decide which address to use */ + if (crtEntry->state_index_a) + addr = + template_hdr->saved_state_array + [crtEntry->state_index_a]; + NX_RD_DUMP_REG(addr, + adapter->ahw.pci_base0, + &read_value); + template_hdr->saved_state_array + [crtEntry->state_index_v] + = read_value; + break; + case NX_DUMP_WRT_SAVED: + /* Decide which value to use */ + if (crtEntry->state_index_v) + read_value = + template_hdr->saved_state_array + [crtEntry->state_index_v]; + else + read_value = crtEntry->value_1; + + /* Decide which address to use */ + if (crtEntry->state_index_a) + addr = + template_hdr->saved_state_array + [crtEntry->state_index_a]; + + NX_WR_DUMP_REG(addr, + adapter->ahw.pci_base0, + read_value); + break; + case NX_DUMP_MOD_SAVE_ST: + read_value = + template_hdr->saved_state_array + [crtEntry->state_index_v]; + read_value <<= crtEntry->shl; + read_value >>= crtEntry->shr; + if (crtEntry->value_2) + read_value &= + crtEntry->value_2; + read_value |= crtEntry->value_3; + read_value += crtEntry->value_1; + /* Write value back to state area.*/ + template_hdr->saved_state_array + [crtEntry->state_index_v] + = read_value; + break; + default: + rv = 1; + break; + } + } + } + addr = addr + stride; + } + return rv; +} + +/* Read memory or MN */ +static u32 +netxen_md_rdmem(struct netxen_adapter *adapter, + struct netxen_minidump_entry_rdmem + *memEntry, u64 *data_buff) +{ + u64 addr, value = 0; + int i = 0, loop_cnt; + + addr = (u64)memEntry->read_addr; + loop_cnt = memEntry->read_data_size; /* This is size in bytes */ + loop_cnt /= sizeof(value); + + for (i = 0; i < loop_cnt; i++) { + if (netxen_nic_pci_mem_read_2M(adapter, addr, &value)) + goto out; + *data_buff++ = value; + addr += sizeof(value); + } +out: + return i * sizeof(value); +} + +/* Read CRB operation */ +static u32 netxen_md_rd_crb(struct netxen_adapter *adapter, + struct netxen_minidump_entry_crb + *crbEntry, u32 *data_buff) +{ + int loop_cnt; + u32 op_count, addr, stride, value; + + addr = crbEntry->addr; + op_count = crbEntry->op_count; + stride = crbEntry->addr_stride; + + for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) { + NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &value); + *data_buff++ = addr; + *data_buff++ = value; + addr = addr + stride; + } + return loop_cnt * (2 * sizeof(u32)); +} + +/* Read ROM */ +static u32 +netxen_md_rdrom(struct netxen_adapter *adapter, + struct netxen_minidump_entry_rdrom + *romEntry, __le32 *data_buff) +{ + int i, count = 0; + u32 size, lck_val; + u32 val; + u32 fl_addr, waddr, raddr; + fl_addr = romEntry->read_addr; + size = romEntry->read_data_size/4; +lock_try: + lck_val = readl((void __iomem *)(adapter->ahw.pci_base0 + + NX_FLASH_SEM2_LK)); + if (!lck_val && count < MAX_CTL_CHECK) { + msleep(20); + count++; + goto lock_try; + } + writel(adapter->ahw.pci_func, (void __iomem *)(adapter->ahw.pci_base0 + + NX_FLASH_LOCK_ID)); + for (i = 0; i < size; i++) { + waddr = fl_addr & 0xFFFF0000; + NX_WR_DUMP_REG(FLASH_ROM_WINDOW, adapter->ahw.pci_base0, waddr); + raddr = FLASH_ROM_DATA + (fl_addr & 0x0000FFFF); + NX_RD_DUMP_REG(raddr, adapter->ahw.pci_base0, &val); + *data_buff++ = cpu_to_le32(val); + fl_addr += sizeof(val); + } + readl((void __iomem *)(adapter->ahw.pci_base0 + NX_FLASH_SEM2_ULK)); + return romEntry->read_data_size; +} + +/* Handle L2 Cache */ +static u32 +netxen_md_L2Cache(struct netxen_adapter *adapter, + struct netxen_minidump_entry_cache + *cacheEntry, u32 *data_buff) +{ + int loop_cnt, i, k, timeout_flag = 0; + u32 addr, read_addr, read_value, cntrl_addr, tag_reg_addr; + u32 tag_value, read_cnt; + u8 cntl_value_w, cntl_value_r; + unsigned long timeout, timeout_jiffies; + + loop_cnt = cacheEntry->op_count; + read_addr = cacheEntry->read_addr; + cntrl_addr = cacheEntry->control_addr; + cntl_value_w = (u32) cacheEntry->write_value; + tag_reg_addr = cacheEntry->tag_reg_addr; + tag_value = cacheEntry->init_tag_value; + read_cnt = cacheEntry->read_addr_cnt; + + for (i = 0; i < loop_cnt; i++) { + NX_WR_DUMP_REG(tag_reg_addr, adapter->ahw.pci_base0, tag_value); + if (cntl_value_w) + NX_WR_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0, + (u32)cntl_value_w); + if (cacheEntry->poll_mask) { + timeout = cacheEntry->poll_wait; + NX_RD_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0, + &cntl_value_r); + timeout_jiffies = msecs_to_jiffies(timeout) + jiffies; + for (timeout_flag = 0; !timeout_flag && + ((cntl_value_r & cacheEntry->poll_mask) != 0);) { + if (time_after(jiffies, timeout_jiffies)) + timeout_flag = 1; + NX_RD_DUMP_REG(cntrl_addr, + adapter->ahw.pci_base0, + &cntl_value_r); + } + if (timeout_flag) { + dev_err(&adapter->pdev->dev, + "Timeout in processing L2 Tag poll.\n"); + return -1; + } + } + addr = read_addr; + for (k = 0; k < read_cnt; k++) { + NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, + &read_value); + *data_buff++ = read_value; + addr += cacheEntry->read_addr_stride; + } + tag_value += cacheEntry->tag_value_stride; + } + return read_cnt * loop_cnt * sizeof(read_value); +} + + +/* Handle L1 Cache */ +static u32 netxen_md_L1Cache(struct netxen_adapter *adapter, + struct netxen_minidump_entry_cache + *cacheEntry, u32 *data_buff) +{ + int i, k, loop_cnt; + u32 addr, read_addr, read_value, cntrl_addr, tag_reg_addr; + u32 tag_value, read_cnt; + u8 cntl_value_w; + + loop_cnt = cacheEntry->op_count; + read_addr = cacheEntry->read_addr; + cntrl_addr = cacheEntry->control_addr; + cntl_value_w = (u32) cacheEntry->write_value; + tag_reg_addr = cacheEntry->tag_reg_addr; + tag_value = cacheEntry->init_tag_value; + read_cnt = cacheEntry->read_addr_cnt; + + for (i = 0; i < loop_cnt; i++) { + NX_WR_DUMP_REG(tag_reg_addr, adapter->ahw.pci_base0, tag_value); + NX_WR_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0, + (u32) cntl_value_w); + addr = read_addr; + for (k = 0; k < read_cnt; k++) { + NX_RD_DUMP_REG(addr, + adapter->ahw.pci_base0, + &read_value); + *data_buff++ = read_value; + addr += cacheEntry->read_addr_stride; + } + tag_value += cacheEntry->tag_value_stride; + } + return read_cnt * loop_cnt * sizeof(read_value); +} + +/* Reading OCM memory */ +static u32 +netxen_md_rdocm(struct netxen_adapter *adapter, + struct netxen_minidump_entry_rdocm + *ocmEntry, u32 *data_buff) +{ + int i, loop_cnt; + u32 value; + void __iomem *addr; + addr = (ocmEntry->read_addr + adapter->ahw.pci_base0); + loop_cnt = ocmEntry->op_count; + + for (i = 0; i < loop_cnt; i++) { + value = readl(addr); + *data_buff++ = value; + addr += ocmEntry->read_addr_stride; + } + return i * sizeof(u32); +} + +/* Read MUX data */ +static u32 +netxen_md_rdmux(struct netxen_adapter *adapter, struct netxen_minidump_entry_mux + *muxEntry, u32 *data_buff) +{ + int loop_cnt = 0; + u32 read_addr, read_value, select_addr, sel_value; + + read_addr = muxEntry->read_addr; + sel_value = muxEntry->select_value; + select_addr = muxEntry->select_addr; + + for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) { + NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, sel_value); + NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0, &read_value); + *data_buff++ = sel_value; + *data_buff++ = read_value; + sel_value += muxEntry->select_value_stride; + } + return loop_cnt * (2 * sizeof(u32)); +} + +/* Handling Queue State Reads */ +static u32 +netxen_md_rdqueue(struct netxen_adapter *adapter, + struct netxen_minidump_entry_queue + *queueEntry, u32 *data_buff) +{ + int loop_cnt, k; + u32 queue_id, read_addr, read_value, read_stride, select_addr, read_cnt; + + read_cnt = queueEntry->read_addr_cnt; + read_stride = queueEntry->read_addr_stride; + select_addr = queueEntry->select_addr; + + for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count; + loop_cnt++) { + NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, queue_id); + read_addr = queueEntry->read_addr; + for (k = 0; k < read_cnt; k--) { + NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0, + &read_value); + *data_buff++ = read_value; + read_addr += read_stride; + } + queue_id += queueEntry->queue_id_stride; + } + return loop_cnt * (read_cnt * sizeof(read_value)); +} + + +/* +* We catch an error where driver does not read +* as much data as we expect from the entry. +*/ + +static int netxen_md_entry_err_chk(struct netxen_adapter *adapter, + struct netxen_minidump_entry *entry, int esize) +{ + if (esize < 0) { + entry->hdr.driver_flags |= NX_DUMP_SKIP; + return esize; + } + if (esize != entry->hdr.entry_capture_size) { + entry->hdr.entry_capture_size = esize; + entry->hdr.driver_flags |= NX_DUMP_SIZE_ERR; + dev_info(&adapter->pdev->dev, + "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n", + entry->hdr.entry_type, entry->hdr.entry_capture_mask, + esize, entry->hdr.entry_capture_size); + dev_info(&adapter->pdev->dev, "Aborting further dump capture\n"); + } + return 0; +} + +static int netxen_parse_md_template(struct netxen_adapter *adapter) +{ + int num_of_entries, buff_level, e_cnt, esize; + int end_cnt = 0, rv = 0, sane_start = 0, sane_end = 0; + char *dbuff; + void *template_buff = adapter->mdump.md_template; + char *dump_buff = adapter->mdump.md_capture_buff; + int capture_mask = adapter->mdump.md_capture_mask; + struct netxen_minidump_template_hdr *template_hdr; + struct netxen_minidump_entry *entry; + + if ((capture_mask & 0x3) != 0x3) { + dev_err(&adapter->pdev->dev, "Capture mask %02x below minimum needed " + "for valid firmware dump\n", capture_mask); + return -EINVAL; + } + template_hdr = (struct netxen_minidump_template_hdr *) template_buff; + num_of_entries = template_hdr->num_of_entries; + entry = (struct netxen_minidump_entry *) ((char *) template_buff + + template_hdr->first_entry_offset); + memcpy(dump_buff, template_buff, adapter->mdump.md_template_size); + dump_buff = dump_buff + adapter->mdump.md_template_size; + + if (template_hdr->entry_type == TLHDR) + sane_start = 1; + + for (e_cnt = 0, buff_level = 0; e_cnt < num_of_entries; e_cnt++) { + if (!(entry->hdr.entry_capture_mask & capture_mask)) { + entry->hdr.driver_flags |= NX_DUMP_SKIP; + entry = (struct netxen_minidump_entry *) + ((char *) entry + entry->hdr.entry_size); + continue; + } + switch (entry->hdr.entry_type) { + case RDNOP: + entry->hdr.driver_flags |= NX_DUMP_SKIP; + break; + case RDEND: + entry->hdr.driver_flags |= NX_DUMP_SKIP; + if (!sane_end) + end_cnt = e_cnt; + sane_end += 1; + break; + case CNTRL: + rv = netxen_md_cntrl(adapter, + template_hdr, (void *)entry); + if (rv) + entry->hdr.driver_flags |= NX_DUMP_SKIP; + break; + case RDCRB: + dbuff = dump_buff + buff_level; + esize = netxen_md_rd_crb(adapter, + (void *) entry, (void *) dbuff); + rv = netxen_md_entry_err_chk + (adapter, entry, esize); + if (rv < 0) + break; + buff_level += esize; + break; + case RDMN: + case RDMEM: + dbuff = dump_buff + buff_level; + esize = netxen_md_rdmem(adapter, + (void *) entry, (void *) dbuff); + rv = netxen_md_entry_err_chk + (adapter, entry, esize); + if (rv < 0) + break; + buff_level += esize; + break; + case BOARD: + case RDROM: + dbuff = dump_buff + buff_level; + esize = netxen_md_rdrom(adapter, + (void *) entry, (void *) dbuff); + rv = netxen_md_entry_err_chk + (adapter, entry, esize); + if (rv < 0) + break; + buff_level += esize; + break; + case L2ITG: + case L2DTG: + case L2DAT: + case L2INS: + dbuff = dump_buff + buff_level; + esize = netxen_md_L2Cache(adapter, + (void *) entry, (void *) dbuff); + rv = netxen_md_entry_err_chk + (adapter, entry, esize); + if (rv < 0) + break; + buff_level += esize; + break; + case L1DAT: + case L1INS: + dbuff = dump_buff + buff_level; + esize = netxen_md_L1Cache(adapter, + (void *) entry, (void *) dbuff); + rv = netxen_md_entry_err_chk + (adapter, entry, esize); + if (rv < 0) + break; + buff_level += esize; + break; + case RDOCM: + dbuff = dump_buff + buff_level; + esize = netxen_md_rdocm(adapter, + (void *) entry, (void *) dbuff); + rv = netxen_md_entry_err_chk + (adapter, entry, esize); + if (rv < 0) + break; + buff_level += esize; + break; + case RDMUX: + dbuff = dump_buff + buff_level; + esize = netxen_md_rdmux(adapter, + (void *) entry, (void *) dbuff); + rv = netxen_md_entry_err_chk + (adapter, entry, esize); + if (rv < 0) + break; + buff_level += esize; + break; + case QUEUE: + dbuff = dump_buff + buff_level; + esize = netxen_md_rdqueue(adapter, + (void *) entry, (void *) dbuff); + rv = netxen_md_entry_err_chk + (adapter, entry, esize); + if (rv < 0) + break; + buff_level += esize; + break; + default: + entry->hdr.driver_flags |= NX_DUMP_SKIP; + break; + } + /* Next entry in the template */ + entry = (struct netxen_minidump_entry *) + ((char *) entry + entry->hdr.entry_size); + } + if (!sane_start || sane_end > 1) { + dev_err(&adapter->pdev->dev, + "Firmware minidump template configuration error.\n"); + } + return 0; +} + +static int +netxen_collect_minidump(struct netxen_adapter *adapter) +{ + int ret = 0; + struct netxen_minidump_template_hdr *hdr; + struct timespec val; + hdr = (struct netxen_minidump_template_hdr *) + adapter->mdump.md_template; + hdr->driver_capture_mask = adapter->mdump.md_capture_mask; + jiffies_to_timespec(jiffies, &val); + hdr->driver_timestamp = (u32) val.tv_sec; + hdr->driver_info_word2 = adapter->fw_version; + hdr->driver_info_word3 = NXRD32(adapter, CRB_DRIVER_VERSION); + ret = netxen_parse_md_template(adapter); + if (ret) + return ret; + + return ret; +} + + +void +netxen_dump_fw(struct netxen_adapter *adapter) +{ + struct netxen_minidump_template_hdr *hdr; + int i, k, data_size = 0; + u32 capture_mask; + hdr = (struct netxen_minidump_template_hdr *) + adapter->mdump.md_template; + capture_mask = adapter->mdump.md_capture_mask; + + for (i = 0x2, k = 1; (i & NX_DUMP_MASK_MAX); i <<= 1, k++) { + if (i & capture_mask) + data_size += hdr->capture_size_array[k]; + } + if (!data_size) { + dev_err(&adapter->pdev->dev, + "Invalid cap sizes for capture_mask=0x%x\n", + adapter->mdump.md_capture_mask); + return; + } + adapter->mdump.md_capture_size = data_size; + adapter->mdump.md_dump_size = adapter->mdump.md_template_size + + adapter->mdump.md_capture_size; + if (!adapter->mdump.md_capture_buff) { + adapter->mdump.md_capture_buff = + vzalloc(adapter->mdump.md_dump_size); + if (!adapter->mdump.md_capture_buff) + return; + + if (netxen_collect_minidump(adapter)) { + adapter->mdump.has_valid_dump = 0; + adapter->mdump.md_dump_size = 0; + vfree(adapter->mdump.md_capture_buff); + adapter->mdump.md_capture_buff = NULL; + dev_err(&adapter->pdev->dev, + "Error in collecting firmware minidump.\n"); + } else { + adapter->mdump.md_timestamp = jiffies; + adapter->mdump.has_valid_dump = 1; + adapter->fw_mdump_rdy = 1; + dev_info(&adapter->pdev->dev, "%s Successfully " + "collected fw dump.\n", adapter->netdev->name); + } + + } else { + dev_info(&adapter->pdev->dev, + "Cannot overwrite previously collected " + "firmware minidump.\n"); + adapter->fw_mdump_rdy = 1; + return; + } +} diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h new file mode 100644 index 000000000..7433c4d21 --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h @@ -0,0 +1,285 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#ifndef __NETXEN_NIC_HW_H_ +#define __NETXEN_NIC_HW_H_ + +/* Hardware memory size of 128 meg */ +#define NETXEN_MEMADDR_MAX (128 * 1024 * 1024) + +struct netxen_adapter; + +#define NETXEN_PCI_MAPSIZE_BYTES (NETXEN_PCI_MAPSIZE << 20) + +void netxen_nic_set_link_parameters(struct netxen_adapter *adapter); + +/* Nibble or Byte mode for phy interface (GbE mode only) */ + +#define _netxen_crb_get_bit(var, bit) ((var >> bit) & 0x1) + +/* + * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3) + * + * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable + * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream + * Bit 2 : enable_rx => 1:enable frame recv, 0:disable + * Bit 3 : rx_synced => R/O: recv enable synched to recv stream + * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable + * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore + * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal + * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op + * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op + * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op + * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op + * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op + */ + +#define netxen_gb_tx_flowctl(config_word) \ + ((config_word) |= 1 << 4) +#define netxen_gb_rx_flowctl(config_word) \ + ((config_word) |= 1 << 5) +#define netxen_gb_tx_reset_pb(config_word) \ + ((config_word) |= 1 << 16) +#define netxen_gb_rx_reset_pb(config_word) \ + ((config_word) |= 1 << 17) +#define netxen_gb_tx_reset_mac(config_word) \ + ((config_word) |= 1 << 18) +#define netxen_gb_rx_reset_mac(config_word) \ + ((config_word) |= 1 << 19) + +#define netxen_gb_unset_tx_flowctl(config_word) \ + ((config_word) &= ~(1 << 4)) +#define netxen_gb_unset_rx_flowctl(config_word) \ + ((config_word) &= ~(1 << 5)) + +#define netxen_gb_get_tx_synced(config_word) \ + _netxen_crb_get_bit((config_word), 1) +#define netxen_gb_get_rx_synced(config_word) \ + _netxen_crb_get_bit((config_word), 3) +#define netxen_gb_get_tx_flowctl(config_word) \ + _netxen_crb_get_bit((config_word), 4) +#define netxen_gb_get_rx_flowctl(config_word) \ + _netxen_crb_get_bit((config_word), 5) +#define netxen_gb_get_soft_reset(config_word) \ + _netxen_crb_get_bit((config_word), 31) + +#define netxen_gb_get_stationaddress_low(config_word) ((config_word) >> 16) + +#define netxen_gb_set_mii_mgmt_clockselect(config_word, val) \ + ((config_word) |= ((val) & 0x07)) +#define netxen_gb_mii_mgmt_reset(config_word) \ + ((config_word) |= 1 << 31) +#define netxen_gb_mii_mgmt_unset(config_word) \ + ((config_word) &= ~(1 << 31)) + +/* + * NIU GB MII Mgmt Command Register (applies to GB0, GB1, GB2, GB3) + * Bit 0 : read_cycle => 1:perform single read cycle, 0:no-op + * Bit 1 : scan_cycle => 1:perform continuous read cycles, 0:no-op + */ + +#define netxen_gb_mii_mgmt_set_read_cycle(config_word) \ + ((config_word) |= 1 << 0) +#define netxen_gb_mii_mgmt_reg_addr(config_word, val) \ + ((config_word) |= ((val) & 0x1F)) +#define netxen_gb_mii_mgmt_phy_addr(config_word, val) \ + ((config_word) |= (((val) & 0x1F) << 8)) + +/* + * NIU GB MII Mgmt Indicators Register (applies to GB0, GB1, GB2, GB3) + * Read-only register. + * Bit 0 : busy => 1:performing an MII mgmt cycle, 0:idle + * Bit 1 : scanning => 1:scan operation in progress, 0:idle + * Bit 2 : notvalid => :mgmt result data not yet valid, 0:idle + */ +#define netxen_get_gb_mii_mgmt_busy(config_word) \ + _netxen_crb_get_bit(config_word, 0) +#define netxen_get_gb_mii_mgmt_scanning(config_word) \ + _netxen_crb_get_bit(config_word, 1) +#define netxen_get_gb_mii_mgmt_notvalid(config_word) \ + _netxen_crb_get_bit(config_word, 2) +/* + * NIU XG Pause Ctl Register + * + * Bit 0 : xg0_mask => 1:disable tx pause frames + * Bit 1 : xg0_request => 1:request single pause frame + * Bit 2 : xg0_on_off => 1:request is pause on, 0:off + * Bit 3 : xg1_mask => 1:disable tx pause frames + * Bit 4 : xg1_request => 1:request single pause frame + * Bit 5 : xg1_on_off => 1:request is pause on, 0:off + */ + +#define netxen_xg_set_xg0_mask(config_word) \ + ((config_word) |= 1 << 0) +#define netxen_xg_set_xg1_mask(config_word) \ + ((config_word) |= 1 << 3) + +#define netxen_xg_get_xg0_mask(config_word) \ + _netxen_crb_get_bit((config_word), 0) +#define netxen_xg_get_xg1_mask(config_word) \ + _netxen_crb_get_bit((config_word), 3) + +#define netxen_xg_unset_xg0_mask(config_word) \ + ((config_word) &= ~(1 << 0)) +#define netxen_xg_unset_xg1_mask(config_word) \ + ((config_word) &= ~(1 << 3)) + +/* + * NIU XG Pause Ctl Register + * + * Bit 0 : xg0_mask => 1:disable tx pause frames + * Bit 1 : xg0_request => 1:request single pause frame + * Bit 2 : xg0_on_off => 1:request is pause on, 0:off + * Bit 3 : xg1_mask => 1:disable tx pause frames + * Bit 4 : xg1_request => 1:request single pause frame + * Bit 5 : xg1_on_off => 1:request is pause on, 0:off + */ +#define netxen_gb_set_gb0_mask(config_word) \ + ((config_word) |= 1 << 0) +#define netxen_gb_set_gb1_mask(config_word) \ + ((config_word) |= 1 << 2) +#define netxen_gb_set_gb2_mask(config_word) \ + ((config_word) |= 1 << 4) +#define netxen_gb_set_gb3_mask(config_word) \ + ((config_word) |= 1 << 6) + +#define netxen_gb_get_gb0_mask(config_word) \ + _netxen_crb_get_bit((config_word), 0) +#define netxen_gb_get_gb1_mask(config_word) \ + _netxen_crb_get_bit((config_word), 2) +#define netxen_gb_get_gb2_mask(config_word) \ + _netxen_crb_get_bit((config_word), 4) +#define netxen_gb_get_gb3_mask(config_word) \ + _netxen_crb_get_bit((config_word), 6) + +#define netxen_gb_unset_gb0_mask(config_word) \ + ((config_word) &= ~(1 << 0)) +#define netxen_gb_unset_gb1_mask(config_word) \ + ((config_word) &= ~(1 << 2)) +#define netxen_gb_unset_gb2_mask(config_word) \ + ((config_word) &= ~(1 << 4)) +#define netxen_gb_unset_gb3_mask(config_word) \ + ((config_word) &= ~(1 << 6)) + + +/* + * PHY-Specific MII control/status registers. + */ +#define NETXEN_NIU_GB_MII_MGMT_ADDR_CONTROL 0 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_STATUS 1 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_0 2 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_1 3 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG 4 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART 5 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG_MORE 6 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_NEXTPAGE_XMIT 7 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART_NEXTPAGE 8 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_CONTROL 9 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_STATUS 10 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_EXTENDED_STATUS 15 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL 16 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE 18 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS 19 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE 20 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_RECV_ERROR_COUNT 21 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_CONTROL 24 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_OVERRIDE 25 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE_YET 26 +#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS_MORE 27 + +/* + * PHY-Specific Status Register (reg 17). + * + * Bit 0 : jabber => 1:jabber detected, 0:not + * Bit 1 : polarity => 1:polarity reversed, 0:normal + * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled + * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled + * Bit 4 : energydetect => 1:sleep, 0:active + * Bit 5 : downshift => 1:downshift, 0:no downshift + * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover) + * Bits 7-9 : cablelen => not valid in 10Mb/s mode + * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m + * Bit 10 : link => 1:link up, 0:link down + * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet + * Bit 12 : pagercvd => 1:page received, 0:page not received + * Bit 13 : duplex => 1:full duplex, 0:half duplex + * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd + */ + +#define netxen_get_phy_speed(config_word) (((config_word) >> 14) & 0x03) + +#define netxen_set_phy_speed(config_word, val) \ + ((config_word) |= ((val & 0x03) << 14)) +#define netxen_set_phy_duplex(config_word) \ + ((config_word) |= 1 << 13) +#define netxen_clear_phy_duplex(config_word) \ + ((config_word) &= ~(1 << 13)) + +#define netxen_get_phy_link(config_word) \ + _netxen_crb_get_bit(config_word, 10) +#define netxen_get_phy_duplex(config_word) \ + _netxen_crb_get_bit(config_word, 13) + +/* + * NIU Mode Register. + * Bit 0 : enable FibreChannel + * Bit 1 : enable 10/100/1000 Ethernet + * Bit 2 : enable 10Gb Ethernet + */ + +#define netxen_get_niu_enable_ge(config_word) \ + _netxen_crb_get_bit(config_word, 1) + +#define NETXEN_NIU_NON_PROMISC_MODE 0 +#define NETXEN_NIU_PROMISC_MODE 1 +#define NETXEN_NIU_ALLMULTI_MODE 2 + +/* + * NIU XG MAC Config Register + * + * Bit 0 : tx_enable => 1:enable frame xmit, 0:disable + * Bit 2 : rx_enable => 1:enable frame recv, 0:disable + * Bit 4 : soft_reset => 1:reset the MAC , 0:no-op + * Bit 27: xaui_framer_reset + * Bit 28: xaui_rx_reset + * Bit 29: xaui_tx_reset + * Bit 30: xg_ingress_afifo_reset + * Bit 31: xg_egress_afifo_reset + */ + +#define netxen_xg_soft_reset(config_word) \ + ((config_word) |= 1 << 4) + +typedef struct { + unsigned valid; + unsigned start_128M; + unsigned end_128M; + unsigned start_2M; +} crb_128M_2M_sub_block_map_t; + +typedef struct { + crb_128M_2M_sub_block_map_t sub_block[16]; +} crb_128M_2M_block_map_t; + +#endif /* __NETXEN_NIC_HW_H_ */ diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c new file mode 100644 index 000000000..2ff479a9c --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c @@ -0,0 +1,1933 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#include +#include +#include +#include +#include +#include "netxen_nic.h" +#include "netxen_nic_hw.h" + +struct crb_addr_pair { + u32 addr; + u32 data; +}; + +#define NETXEN_MAX_CRB_XFORM 60 +static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM]; +#define NETXEN_ADDR_ERROR (0xffffffff) + +#define crb_addr_transform(name) \ + crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \ + NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20 + +#define NETXEN_NIC_XDMA_RESET 0x8000ff + +static void +netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, + struct nx_host_rds_ring *rds_ring); +static int netxen_p3_has_mn(struct netxen_adapter *adapter); + +static void crb_addr_transform_setup(void) +{ + crb_addr_transform(XDMA); + crb_addr_transform(TIMR); + crb_addr_transform(SRE); + crb_addr_transform(SQN3); + crb_addr_transform(SQN2); + crb_addr_transform(SQN1); + crb_addr_transform(SQN0); + crb_addr_transform(SQS3); + crb_addr_transform(SQS2); + crb_addr_transform(SQS1); + crb_addr_transform(SQS0); + crb_addr_transform(RPMX7); + crb_addr_transform(RPMX6); + crb_addr_transform(RPMX5); + crb_addr_transform(RPMX4); + crb_addr_transform(RPMX3); + crb_addr_transform(RPMX2); + crb_addr_transform(RPMX1); + crb_addr_transform(RPMX0); + crb_addr_transform(ROMUSB); + crb_addr_transform(SN); + crb_addr_transform(QMN); + crb_addr_transform(QMS); + crb_addr_transform(PGNI); + crb_addr_transform(PGND); + crb_addr_transform(PGN3); + crb_addr_transform(PGN2); + crb_addr_transform(PGN1); + crb_addr_transform(PGN0); + crb_addr_transform(PGSI); + crb_addr_transform(PGSD); + crb_addr_transform(PGS3); + crb_addr_transform(PGS2); + crb_addr_transform(PGS1); + crb_addr_transform(PGS0); + crb_addr_transform(PS); + crb_addr_transform(PH); + crb_addr_transform(NIU); + crb_addr_transform(I2Q); + crb_addr_transform(EG); + crb_addr_transform(MN); + crb_addr_transform(MS); + crb_addr_transform(CAS2); + crb_addr_transform(CAS1); + crb_addr_transform(CAS0); + crb_addr_transform(CAM); + crb_addr_transform(C2C1); + crb_addr_transform(C2C0); + crb_addr_transform(SMB); + crb_addr_transform(OCM0); + crb_addr_transform(I2C0); +} + +void netxen_release_rx_buffers(struct netxen_adapter *adapter) +{ + struct netxen_recv_context *recv_ctx; + struct nx_host_rds_ring *rds_ring; + struct netxen_rx_buffer *rx_buf; + int i, ring; + + recv_ctx = &adapter->recv_ctx; + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + for (i = 0; i < rds_ring->num_desc; ++i) { + rx_buf = &(rds_ring->rx_buf_arr[i]); + if (rx_buf->state == NETXEN_BUFFER_FREE) + continue; + pci_unmap_single(adapter->pdev, + rx_buf->dma, + rds_ring->dma_size, + PCI_DMA_FROMDEVICE); + if (rx_buf->skb != NULL) + dev_kfree_skb_any(rx_buf->skb); + } + } +} + +void netxen_release_tx_buffers(struct netxen_adapter *adapter) +{ + struct netxen_cmd_buffer *cmd_buf; + struct netxen_skb_frag *buffrag; + int i, j; + struct nx_host_tx_ring *tx_ring = adapter->tx_ring; + + spin_lock_bh(&adapter->tx_clean_lock); + cmd_buf = tx_ring->cmd_buf_arr; + for (i = 0; i < tx_ring->num_desc; i++) { + buffrag = cmd_buf->frag_array; + if (buffrag->dma) { + pci_unmap_single(adapter->pdev, buffrag->dma, + buffrag->length, PCI_DMA_TODEVICE); + buffrag->dma = 0ULL; + } + for (j = 1; j < cmd_buf->frag_count; j++) { + buffrag++; + if (buffrag->dma) { + pci_unmap_page(adapter->pdev, buffrag->dma, + buffrag->length, + PCI_DMA_TODEVICE); + buffrag->dma = 0ULL; + } + } + if (cmd_buf->skb) { + dev_kfree_skb_any(cmd_buf->skb); + cmd_buf->skb = NULL; + } + cmd_buf++; + } + spin_unlock_bh(&adapter->tx_clean_lock); +} + +void netxen_free_sw_resources(struct netxen_adapter *adapter) +{ + struct netxen_recv_context *recv_ctx; + struct nx_host_rds_ring *rds_ring; + struct nx_host_tx_ring *tx_ring; + int ring; + + recv_ctx = &adapter->recv_ctx; + + if (recv_ctx->rds_rings == NULL) + goto skip_rds; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + vfree(rds_ring->rx_buf_arr); + rds_ring->rx_buf_arr = NULL; + } + kfree(recv_ctx->rds_rings); + +skip_rds: + if (adapter->tx_ring == NULL) + return; + + tx_ring = adapter->tx_ring; + vfree(tx_ring->cmd_buf_arr); + kfree(tx_ring); + adapter->tx_ring = NULL; +} + +int netxen_alloc_sw_resources(struct netxen_adapter *adapter) +{ + struct netxen_recv_context *recv_ctx; + struct nx_host_rds_ring *rds_ring; + struct nx_host_sds_ring *sds_ring; + struct nx_host_tx_ring *tx_ring; + struct netxen_rx_buffer *rx_buf; + int ring, i; + + struct netxen_cmd_buffer *cmd_buf_arr; + struct net_device *netdev = adapter->netdev; + + tx_ring = kzalloc(sizeof(struct nx_host_tx_ring), GFP_KERNEL); + if (tx_ring == NULL) + return -ENOMEM; + + adapter->tx_ring = tx_ring; + + tx_ring->num_desc = adapter->num_txd; + tx_ring->txq = netdev_get_tx_queue(netdev, 0); + + cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); + if (cmd_buf_arr == NULL) + goto err_out; + + tx_ring->cmd_buf_arr = cmd_buf_arr; + + recv_ctx = &adapter->recv_ctx; + + rds_ring = kcalloc(adapter->max_rds_rings, + sizeof(struct nx_host_rds_ring), GFP_KERNEL); + if (rds_ring == NULL) + goto err_out; + + recv_ctx->rds_rings = rds_ring; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + switch (ring) { + case RCV_RING_NORMAL: + rds_ring->num_desc = adapter->num_rxd; + if (adapter->ahw.cut_through) { + rds_ring->dma_size = + NX_CT_DEFAULT_RX_BUF_LEN; + rds_ring->skb_size = + NX_CT_DEFAULT_RX_BUF_LEN; + } else { + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + rds_ring->dma_size = + NX_P3_RX_BUF_MAX_LEN; + else + rds_ring->dma_size = + NX_P2_RX_BUF_MAX_LEN; + rds_ring->skb_size = + rds_ring->dma_size + NET_IP_ALIGN; + } + break; + + case RCV_RING_JUMBO: + rds_ring->num_desc = adapter->num_jumbo_rxd; + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + rds_ring->dma_size = + NX_P3_RX_JUMBO_BUF_MAX_LEN; + else + rds_ring->dma_size = + NX_P2_RX_JUMBO_BUF_MAX_LEN; + + if (adapter->capabilities & NX_CAP0_HW_LRO) + rds_ring->dma_size += NX_LRO_BUFFER_EXTRA; + + rds_ring->skb_size = + rds_ring->dma_size + NET_IP_ALIGN; + break; + + case RCV_RING_LRO: + rds_ring->num_desc = adapter->num_lro_rxd; + rds_ring->dma_size = NX_RX_LRO_BUFFER_LENGTH; + rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; + break; + + } + rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring)); + if (rds_ring->rx_buf_arr == NULL) + /* free whatever was already allocated */ + goto err_out; + + INIT_LIST_HEAD(&rds_ring->free_list); + /* + * Now go through all of them, set reference handles + * and put them in the queues. + */ + rx_buf = rds_ring->rx_buf_arr; + for (i = 0; i < rds_ring->num_desc; i++) { + list_add_tail(&rx_buf->list, + &rds_ring->free_list); + rx_buf->ref_handle = i; + rx_buf->state = NETXEN_BUFFER_FREE; + rx_buf++; + } + spin_lock_init(&rds_ring->lock); + } + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + sds_ring->irq = adapter->msix_entries[ring].vector; + sds_ring->adapter = adapter; + sds_ring->num_desc = adapter->num_rxd; + + for (i = 0; i < NUM_RCV_DESC_RINGS; i++) + INIT_LIST_HEAD(&sds_ring->free_list[i]); + } + + return 0; + +err_out: + netxen_free_sw_resources(adapter); + return -ENOMEM; +} + +/* + * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB + * address to external PCI CRB address. + */ +static u32 netxen_decode_crb_addr(u32 addr) +{ + int i; + u32 base_addr, offset, pci_base; + + crb_addr_transform_setup(); + + pci_base = NETXEN_ADDR_ERROR; + base_addr = addr & 0xfff00000; + offset = addr & 0x000fffff; + + for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) { + if (crb_addr_xform[i] == base_addr) { + pci_base = i << 20; + break; + } + } + if (pci_base == NETXEN_ADDR_ERROR) + return pci_base; + else + return pci_base + offset; +} + +#define NETXEN_MAX_ROM_WAIT_USEC 100 + +static int netxen_wait_rom_done(struct netxen_adapter *adapter) +{ + long timeout = 0; + long done = 0; + + cond_resched(); + + while (done == 0) { + done = NXRD32(adapter, NETXEN_ROMUSB_GLB_STATUS); + done &= 2; + if (++timeout >= NETXEN_MAX_ROM_WAIT_USEC) { + dev_err(&adapter->pdev->dev, + "Timeout reached waiting for rom done"); + return -EIO; + } + udelay(1); + } + return 0; +} + +static int do_rom_fast_read(struct netxen_adapter *adapter, + int addr, int *valp) +{ + NXWR32(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr); + NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); + NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3); + NXWR32(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb); + if (netxen_wait_rom_done(adapter)) { + printk("Error waiting for rom done\n"); + return -EIO; + } + /* reset abyte_cnt and dummy_byte_cnt */ + NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0); + udelay(10); + NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); + + *valp = NXRD32(adapter, NETXEN_ROMUSB_ROM_RDATA); + return 0; +} + +static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr, + u8 *bytes, size_t size) +{ + int addridx; + int ret = 0; + + for (addridx = addr; addridx < (addr + size); addridx += 4) { + int v; + ret = do_rom_fast_read(adapter, addridx, &v); + if (ret != 0) + break; + *(__le32 *)bytes = cpu_to_le32(v); + bytes += 4; + } + + return ret; +} + +int +netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr, + u8 *bytes, size_t size) +{ + int ret; + + ret = netxen_rom_lock(adapter); + if (ret < 0) + return ret; + + ret = do_rom_fast_read_words(adapter, addr, bytes, size); + + netxen_rom_unlock(adapter); + return ret; +} + +int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp) +{ + int ret; + + if (netxen_rom_lock(adapter) != 0) + return -EIO; + + ret = do_rom_fast_read(adapter, addr, valp); + netxen_rom_unlock(adapter); + return ret; +} + +#define NETXEN_BOARDTYPE 0x4008 +#define NETXEN_BOARDNUM 0x400c +#define NETXEN_CHIPNUM 0x4010 + +int netxen_pinit_from_rom(struct netxen_adapter *adapter) +{ + int addr, val; + int i, n, init_delay = 0; + struct crb_addr_pair *buf; + unsigned offset; + u32 off; + + /* resetall */ + netxen_rom_lock(adapter); + NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xfeffffff); + netxen_rom_unlock(adapter); + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + if (netxen_rom_fast_read(adapter, 0, &n) != 0 || + (n != 0xcafecafe) || + netxen_rom_fast_read(adapter, 4, &n) != 0) { + printk(KERN_ERR "%s: ERROR Reading crb_init area: " + "n: %08x\n", netxen_nic_driver_name, n); + return -EIO; + } + offset = n & 0xffffU; + n = (n >> 16) & 0xffffU; + } else { + if (netxen_rom_fast_read(adapter, 0, &n) != 0 || + !(n & 0x80000000)) { + printk(KERN_ERR "%s: ERROR Reading crb_init area: " + "n: %08x\n", netxen_nic_driver_name, n); + return -EIO; + } + offset = 1; + n &= ~0x80000000; + } + + if (n >= 1024) { + printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not" + " initialized.\n", __func__, n); + return -EIO; + } + + buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + for (i = 0; i < n; i++) { + if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || + netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) { + kfree(buf); + return -EIO; + } + + buf[i].addr = addr; + buf[i].data = val; + + } + + for (i = 0; i < n; i++) { + + off = netxen_decode_crb_addr(buf[i].addr); + if (off == NETXEN_ADDR_ERROR) { + printk(KERN_ERR"CRB init value out of range %x\n", + buf[i].addr); + continue; + } + off += NETXEN_PCI_CRBSPACE; + + if (off & 1) + continue; + + /* skipping cold reboot MAGIC */ + if (off == NETXEN_CAM_RAM(0x1fc)) + continue; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + if (off == (NETXEN_CRB_I2C0 + 0x1c)) + continue; + /* do not reset PCI */ + if (off == (ROMUSB_GLB + 0xbc)) + continue; + if (off == (ROMUSB_GLB + 0xa8)) + continue; + if (off == (ROMUSB_GLB + 0xc8)) /* core clock */ + continue; + if (off == (ROMUSB_GLB + 0x24)) /* MN clock */ + continue; + if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */ + continue; + if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET) + continue; + if (off == (NETXEN_CRB_PEG_NET_1 + 0x18) && + !NX_IS_REVISION_P3P(adapter->ahw.revision_id)) + buf[i].data = 0x1020; + /* skip the function enable register */ + if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION)) + continue; + if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2)) + continue; + if ((off & 0x0ff00000) == NETXEN_CRB_SMB) + continue; + } + + init_delay = 1; + /* After writing this register, HW needs time for CRB */ + /* to quiet down (else crb_window returns 0xffffffff) */ + if (off == NETXEN_ROMUSB_GLB_SW_RESET) { + init_delay = 1000; + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + /* hold xdma in reset also */ + buf[i].data = NETXEN_NIC_XDMA_RESET; + buf[i].data = 0x8000ff; + } + } + + NXWR32(adapter, off, buf[i].data); + + msleep(init_delay); + } + kfree(buf); + + /* disable_peg_cache_all */ + + /* unreset_net_cache */ + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + val = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET); + NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f)); + } + + /* p2dn replyCount */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e); + /* disable_peg_cache 0 */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8); + /* disable_peg_cache 1 */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8); + + /* peg_clr_all */ + + /* peg_clr 0 */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0); + NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0); + /* peg_clr 1 */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0); + NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0); + /* peg_clr 2 */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0); + NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0); + /* peg_clr 3 */ + NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0); + NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0); + return 0; +} + +static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section) +{ + uint32_t i; + struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; + __le32 entries = cpu_to_le32(directory->num_entries); + + for (i = 0; i < entries; i++) { + + __le32 offs = cpu_to_le32(directory->findex) + + (i * cpu_to_le32(directory->entry_size)); + __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8)); + + if (tab_type == section) + return (struct uni_table_desc *) &unirom[offs]; + } + + return NULL; +} + +#define QLCNIC_FILEHEADER_SIZE (14 * 4) + +static int +netxen_nic_validate_header(struct netxen_adapter *adapter) + { + const u8 *unirom = adapter->fw->data; + struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; + u32 fw_file_size = adapter->fw->size; + u32 tab_size; + __le32 entries; + __le32 entry_size; + + if (fw_file_size < QLCNIC_FILEHEADER_SIZE) + return -EINVAL; + + entries = cpu_to_le32(directory->num_entries); + entry_size = cpu_to_le32(directory->entry_size); + tab_size = cpu_to_le32(directory->findex) + (entries * entry_size); + + if (fw_file_size < tab_size) + return -EINVAL; + + return 0; +} + +static int +netxen_nic_validate_bootld(struct netxen_adapter *adapter) +{ + struct uni_table_desc *tab_desc; + struct uni_data_desc *descr; + const u8 *unirom = adapter->fw->data; + __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + + NX_UNI_BOOTLD_IDX_OFF)); + u32 offs; + u32 tab_size; + u32 data_size; + + tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD); + + if (!tab_desc) + return -EINVAL; + + tab_size = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); + + if (adapter->fw->size < tab_size) + return -EINVAL; + + offs = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx)); + descr = (struct uni_data_desc *)&unirom[offs]; + + data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); + + if (adapter->fw->size < data_size) + return -EINVAL; + + return 0; +} + +static int +netxen_nic_validate_fw(struct netxen_adapter *adapter) +{ + struct uni_table_desc *tab_desc; + struct uni_data_desc *descr; + const u8 *unirom = adapter->fw->data; + __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + + NX_UNI_FIRMWARE_IDX_OFF)); + u32 offs; + u32 tab_size; + u32 data_size; + + tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW); + + if (!tab_desc) + return -EINVAL; + + tab_size = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); + + if (adapter->fw->size < tab_size) + return -EINVAL; + + offs = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * (idx)); + descr = (struct uni_data_desc *)&unirom[offs]; + data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); + + if (adapter->fw->size < data_size) + return -EINVAL; + + return 0; +} + + +static int +netxen_nic_validate_product_offs(struct netxen_adapter *adapter) +{ + struct uni_table_desc *ptab_descr; + const u8 *unirom = adapter->fw->data; + int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ? + 1 : netxen_p3_has_mn(adapter); + __le32 entries; + __le32 entry_size; + u32 tab_size; + u32 i; + + ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL); + if (ptab_descr == NULL) + return -EINVAL; + + entries = cpu_to_le32(ptab_descr->num_entries); + entry_size = cpu_to_le32(ptab_descr->entry_size); + tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size); + + if (adapter->fw->size < tab_size) + return -EINVAL; + +nomn: + for (i = 0; i < entries; i++) { + + __le32 flags, file_chiprev, offs; + u8 chiprev = adapter->ahw.revision_id; + uint32_t flagbit; + + offs = cpu_to_le32(ptab_descr->findex) + + (i * cpu_to_le32(ptab_descr->entry_size)); + flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF)); + file_chiprev = cpu_to_le32(*((int *)&unirom[offs] + + NX_UNI_CHIP_REV_OFF)); + + flagbit = mn_present ? 1 : 2; + + if ((chiprev == file_chiprev) && + ((1ULL << flagbit) & flags)) { + adapter->file_prd_off = offs; + return 0; + } + } + + if (mn_present && NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + mn_present = 0; + goto nomn; + } + + return -EINVAL; +} + +static int +netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter) +{ + if (netxen_nic_validate_header(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: header validation failed\n"); + return -EINVAL; + } + + if (netxen_nic_validate_product_offs(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: product validation failed\n"); + return -EINVAL; + } + + if (netxen_nic_validate_bootld(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: bootld validation failed\n"); + return -EINVAL; + } + + if (netxen_nic_validate_fw(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: firmware validation failed\n"); + return -EINVAL; + } + + return 0; +} + +static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter, + u32 section, u32 idx_offset) +{ + const u8 *unirom = adapter->fw->data; + int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + + idx_offset)); + struct uni_table_desc *tab_desc; + __le32 offs; + + tab_desc = nx_get_table_desc(unirom, section); + + if (tab_desc == NULL) + return NULL; + + offs = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * idx); + + return (struct uni_data_desc *)&unirom[offs]; +} + +static u8 * +nx_get_bootld_offs(struct netxen_adapter *adapter) +{ + u32 offs = NETXEN_BOOTLD_START; + + if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) + offs = cpu_to_le32((nx_get_data_desc(adapter, + NX_UNI_DIR_SECT_BOOTLD, + NX_UNI_BOOTLD_IDX_OFF))->findex); + + return (u8 *)&adapter->fw->data[offs]; +} + +static u8 * +nx_get_fw_offs(struct netxen_adapter *adapter) +{ + u32 offs = NETXEN_IMAGE_START; + + if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) + offs = cpu_to_le32((nx_get_data_desc(adapter, + NX_UNI_DIR_SECT_FW, + NX_UNI_FIRMWARE_IDX_OFF))->findex); + + return (u8 *)&adapter->fw->data[offs]; +} + +static __le32 +nx_get_fw_size(struct netxen_adapter *adapter) +{ + if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) + return cpu_to_le32((nx_get_data_desc(adapter, + NX_UNI_DIR_SECT_FW, + NX_UNI_FIRMWARE_IDX_OFF))->size); + else + return cpu_to_le32( + *(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]); +} + +static __le32 +nx_get_fw_version(struct netxen_adapter *adapter) +{ + struct uni_data_desc *fw_data_desc; + const struct firmware *fw = adapter->fw; + __le32 major, minor, sub; + const u8 *ver_str; + int i, ret = 0; + + if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { + + fw_data_desc = nx_get_data_desc(adapter, + NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF); + ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) + + cpu_to_le32(fw_data_desc->size) - 17; + + for (i = 0; i < 12; i++) { + if (!strncmp(&ver_str[i], "REV=", 4)) { + ret = sscanf(&ver_str[i+4], "%u.%u.%u ", + &major, &minor, &sub); + break; + } + } + + if (ret != 3) + return 0; + + return major + (minor << 8) + (sub << 16); + + } else + return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]); +} + +static __le32 +nx_get_bios_version(struct netxen_adapter *adapter) +{ + const struct firmware *fw = adapter->fw; + __le32 bios_ver, prd_off = adapter->file_prd_off; + + if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { + bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) + + NX_UNI_BIOS_VERSION_OFF)); + return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + + (bios_ver >> 24); + } else + return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]); + +} + +int +netxen_need_fw_reset(struct netxen_adapter *adapter) +{ + u32 count, old_count; + u32 val, version, major, minor, build; + int i, timeout; + u8 fw_type; + + /* NX2031 firmware doesn't support heartbit */ + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 1; + + if (adapter->need_fw_reset) + return 1; + + /* last attempt had failed */ + if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED) + return 1; + + old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); + + for (i = 0; i < 10; i++) { + + timeout = msleep_interruptible(200); + if (timeout) { + NXWR32(adapter, CRB_CMDPEG_STATE, + PHAN_INITIALIZE_FAILED); + return -EINTR; + } + + count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); + if (count != old_count) + break; + } + + /* firmware is dead */ + if (count == old_count) + return 1; + + /* check if we have got newer or different file firmware */ + if (adapter->fw) { + + val = nx_get_fw_version(adapter); + + version = NETXEN_DECODE_VERSION(val); + + major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); + minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); + build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); + + if (version > NETXEN_VERSION_CODE(major, minor, build)) + return 1; + + if (version == NETXEN_VERSION_CODE(major, minor, build) && + adapter->fw_type != NX_UNIFIED_ROMIMAGE) { + + val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL); + fw_type = (val & 0x4) ? + NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE; + + if (adapter->fw_type != fw_type) + return 1; + } + } + + return 0; +} + +#define NETXEN_MIN_P3_FW_SUPP NETXEN_VERSION_CODE(4, 0, 505) + +int +netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter) +{ + u32 flash_fw_ver, min_fw_ver; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 0; + + if (netxen_rom_fast_read(adapter, + NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) { + dev_err(&adapter->pdev->dev, "Unable to read flash fw" + "version\n"); + return -EIO; + } + + flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver); + min_fw_ver = NETXEN_MIN_P3_FW_SUPP; + if (flash_fw_ver >= min_fw_ver) + return 0; + + dev_info(&adapter->pdev->dev, "Flash fw[%d.%d.%d] is < min fw supported" + "[4.0.505]. Please update firmware on flash\n", + _major(flash_fw_ver), _minor(flash_fw_ver), + _build(flash_fw_ver)); + return -EINVAL; +} + +static char *fw_name[] = { + NX_P2_MN_ROMIMAGE_NAME, + NX_P3_CT_ROMIMAGE_NAME, + NX_P3_MN_ROMIMAGE_NAME, + NX_UNIFIED_ROMIMAGE_NAME, + NX_FLASH_ROMIMAGE_NAME, +}; + +int +netxen_load_firmware(struct netxen_adapter *adapter) +{ + u64 *ptr64; + u32 i, flashaddr, size; + const struct firmware *fw = adapter->fw; + struct pci_dev *pdev = adapter->pdev; + + dev_info(&pdev->dev, "loading firmware from %s\n", + fw_name[adapter->fw_type]); + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1); + + if (fw) { + __le64 data; + + size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; + + ptr64 = (u64 *)nx_get_bootld_offs(adapter); + flashaddr = NETXEN_BOOTLD_START; + + for (i = 0; i < size; i++) { + data = cpu_to_le64(ptr64[i]); + + if (adapter->pci_mem_write(adapter, flashaddr, data)) + return -EIO; + + flashaddr += 8; + } + + size = (__force u32)nx_get_fw_size(adapter) / 8; + + ptr64 = (u64 *)nx_get_fw_offs(adapter); + flashaddr = NETXEN_IMAGE_START; + + for (i = 0; i < size; i++) { + data = cpu_to_le64(ptr64[i]); + + if (adapter->pci_mem_write(adapter, + flashaddr, data)) + return -EIO; + + flashaddr += 8; + } + + size = (__force u32)nx_get_fw_size(adapter) % 8; + if (size) { + data = cpu_to_le64(ptr64[i]); + + if (adapter->pci_mem_write(adapter, + flashaddr, data)) + return -EIO; + } + + } else { + u64 data; + u32 hi, lo; + + size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; + flashaddr = NETXEN_BOOTLD_START; + + for (i = 0; i < size; i++) { + if (netxen_rom_fast_read(adapter, + flashaddr, (int *)&lo) != 0) + return -EIO; + if (netxen_rom_fast_read(adapter, + flashaddr + 4, (int *)&hi) != 0) + return -EIO; + + /* hi, lo are already in host endian byteorder */ + data = (((u64)hi << 32) | lo); + + if (adapter->pci_mem_write(adapter, + flashaddr, data)) + return -EIO; + + flashaddr += 8; + } + } + msleep(1); + + if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { + NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x18, 0x1020); + NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001e); + } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d); + else { + NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff); + NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 0); + } + + return 0; +} + +static int +netxen_validate_firmware(struct netxen_adapter *adapter) +{ + __le32 val; + __le32 flash_fw_ver; + u32 file_fw_ver, min_ver, bios; + struct pci_dev *pdev = adapter->pdev; + const struct firmware *fw = adapter->fw; + u8 fw_type = adapter->fw_type; + u32 crbinit_fix_fw; + + if (fw_type == NX_UNIFIED_ROMIMAGE) { + if (netxen_nic_validate_unified_romimage(adapter)) + return -EINVAL; + } else { + val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]); + if ((__force u32)val != NETXEN_BDINFO_MAGIC) + return -EINVAL; + + if (fw->size < NX_FW_MIN_SIZE) + return -EINVAL; + } + + val = nx_get_fw_version(adapter); + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + min_ver = NETXEN_MIN_P3_FW_SUPP; + else + min_ver = NETXEN_VERSION_CODE(3, 4, 216); + + file_fw_ver = NETXEN_DECODE_VERSION(val); + + if ((_major(file_fw_ver) > _NETXEN_NIC_LINUX_MAJOR) || + (file_fw_ver < min_ver)) { + dev_err(&pdev->dev, + "%s: firmware version %d.%d.%d unsupported\n", + fw_name[fw_type], _major(file_fw_ver), _minor(file_fw_ver), + _build(file_fw_ver)); + return -EINVAL; + } + val = nx_get_bios_version(adapter); + netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios); + if ((__force u32)val != bios) { + dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", + fw_name[fw_type]); + return -EINVAL; + } + + if (netxen_rom_fast_read(adapter, + NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) { + dev_err(&pdev->dev, "Unable to read flash fw version\n"); + return -EIO; + } + flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver); + + /* New fw from file is not allowed, if fw on flash is < 4.0.554 */ + crbinit_fix_fw = NETXEN_VERSION_CODE(4, 0, 554); + if (file_fw_ver >= crbinit_fix_fw && flash_fw_ver < crbinit_fix_fw && + NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + dev_err(&pdev->dev, "Incompatibility detected between driver " + "and firmware version on flash. This configuration " + "is not recommended. Please update the firmware on " + "flash immediately\n"); + return -EINVAL; + } + + /* check if flashed firmware is newer only for no-mn and P2 case*/ + if (!netxen_p3_has_mn(adapter) || + NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + if (flash_fw_ver > file_fw_ver) { + dev_info(&pdev->dev, "%s: firmware is older than flash\n", + fw_name[fw_type]); + return -EINVAL; + } + } + + NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); + return 0; +} + +static void +nx_get_next_fwtype(struct netxen_adapter *adapter) +{ + u8 fw_type; + + switch (adapter->fw_type) { + case NX_UNKNOWN_ROMIMAGE: + fw_type = NX_UNIFIED_ROMIMAGE; + break; + + case NX_UNIFIED_ROMIMAGE: + if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) + fw_type = NX_FLASH_ROMIMAGE; + else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + fw_type = NX_P2_MN_ROMIMAGE; + else if (netxen_p3_has_mn(adapter)) + fw_type = NX_P3_MN_ROMIMAGE; + else + fw_type = NX_P3_CT_ROMIMAGE; + break; + + case NX_P3_MN_ROMIMAGE: + fw_type = NX_P3_CT_ROMIMAGE; + break; + + case NX_P2_MN_ROMIMAGE: + case NX_P3_CT_ROMIMAGE: + default: + fw_type = NX_FLASH_ROMIMAGE; + break; + } + + adapter->fw_type = fw_type; +} + +static int +netxen_p3_has_mn(struct netxen_adapter *adapter) +{ + u32 capability, flashed_ver; + capability = 0; + + /* NX2031 always had MN */ + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 1; + + netxen_rom_fast_read(adapter, + NX_FW_VERSION_OFFSET, (int *)&flashed_ver); + flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); + + if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) { + + capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY); + if (capability & NX_PEG_TUNE_MN_PRESENT) + return 1; + } + return 0; +} + +void netxen_request_firmware(struct netxen_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int rc = 0; + + adapter->fw_type = NX_UNKNOWN_ROMIMAGE; + +next: + nx_get_next_fwtype(adapter); + + if (adapter->fw_type == NX_FLASH_ROMIMAGE) { + adapter->fw = NULL; + } else { + rc = reject_firmware(&adapter->fw, + fw_name[adapter->fw_type], &pdev->dev); + if (rc != 0) + goto next; + + rc = netxen_validate_firmware(adapter); + if (rc != 0) { + release_firmware(adapter->fw); + msleep(1); + goto next; + } + } +} + + +void +netxen_release_firmware(struct netxen_adapter *adapter) +{ + release_firmware(adapter->fw); + adapter->fw = NULL; +} + +int netxen_init_dummy_dma(struct netxen_adapter *adapter) +{ + u64 addr; + u32 hi, lo; + + if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 0; + + adapter->dummy_dma.addr = pci_alloc_consistent(adapter->pdev, + NETXEN_HOST_DUMMY_DMA_SIZE, + &adapter->dummy_dma.phys_addr); + if (adapter->dummy_dma.addr == NULL) { + dev_err(&adapter->pdev->dev, + "ERROR: Could not allocate dummy DMA memory\n"); + return -ENOMEM; + } + + addr = (uint64_t) adapter->dummy_dma.phys_addr; + hi = (addr >> 32) & 0xffffffff; + lo = addr & 0xffffffff; + + NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi); + NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo); + + return 0; +} + +/* + * NetXen DMA watchdog control: + * + * Bit 0 : enabled => R/O: 1 watchdog active, 0 inactive + * Bit 1 : disable_request => 1 req disable dma watchdog + * Bit 2 : enable_request => 1 req enable dma watchdog + * Bit 3-31 : unused + */ +void netxen_free_dummy_dma(struct netxen_adapter *adapter) +{ + int i = 100; + u32 ctrl; + + if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return; + + if (!adapter->dummy_dma.addr) + return; + + ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL); + if ((ctrl & 0x1) != 0) { + NXWR32(adapter, NETXEN_DMA_WATCHDOG_CTRL, (ctrl | 0x2)); + + while ((ctrl & 0x1) != 0) { + + msleep(50); + + ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL); + + if (--i == 0) + break; + } + } + + if (i) { + pci_free_consistent(adapter->pdev, + NETXEN_HOST_DUMMY_DMA_SIZE, + adapter->dummy_dma.addr, + adapter->dummy_dma.phys_addr); + adapter->dummy_dma.addr = NULL; + } else + dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n"); +} + +int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val) +{ + u32 val = 0; + int retries = 60; + + if (pegtune_val) + return 0; + + do { + val = NXRD32(adapter, CRB_CMDPEG_STATE); + switch (val) { + case PHAN_INITIALIZE_COMPLETE: + case PHAN_INITIALIZE_ACK: + return 0; + case PHAN_INITIALIZE_FAILED: + goto out_err; + default: + break; + } + + msleep(500); + + } while (--retries); + + NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); + +out_err: + dev_warn(&adapter->pdev->dev, "firmware init failed\n"); + return -EIO; +} + +static int +netxen_receive_peg_ready(struct netxen_adapter *adapter) +{ + u32 val = 0; + int retries = 2000; + + do { + val = NXRD32(adapter, CRB_RCVPEG_STATE); + + if (val == PHAN_PEG_RCV_INITIALIZED) + return 0; + + msleep(10); + + } while (--retries); + + if (!retries) { + printk(KERN_ERR "Receive Peg initialization not " + "complete, state: 0x%x.\n", val); + return -EIO; + } + + return 0; +} + +int netxen_init_firmware(struct netxen_adapter *adapter) +{ + int err; + + err = netxen_receive_peg_ready(adapter); + if (err) + return err; + + NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT); + NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE); + NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK); + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC); + + return err; +} + +static void +netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg) +{ + u32 cable_OUI; + u16 cable_len; + u16 link_speed; + u8 link_status, module, duplex, autoneg; + struct net_device *netdev = adapter->netdev; + + adapter->has_link_events = 1; + + cable_OUI = msg->body[1] & 0xffffffff; + cable_len = (msg->body[1] >> 32) & 0xffff; + link_speed = (msg->body[1] >> 48) & 0xffff; + + link_status = msg->body[2] & 0xff; + duplex = (msg->body[2] >> 16) & 0xff; + autoneg = (msg->body[2] >> 24) & 0xff; + + module = (msg->body[2] >> 8) & 0xff; + if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) { + printk(KERN_INFO "%s: unsupported cable: OUI 0x%x, length %d\n", + netdev->name, cable_OUI, cable_len); + } else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) { + printk(KERN_INFO "%s: unsupported cable length %d\n", + netdev->name, cable_len); + } + + /* update link parameters */ + if (duplex == LINKEVENT_FULL_DUPLEX) + adapter->link_duplex = DUPLEX_FULL; + else + adapter->link_duplex = DUPLEX_HALF; + adapter->module_type = module; + adapter->link_autoneg = autoneg; + adapter->link_speed = link_speed; + + netxen_advert_link_change(adapter, link_status); +} + +static void +netxen_handle_fw_message(int desc_cnt, int index, + struct nx_host_sds_ring *sds_ring) +{ + nx_fw_msg_t msg; + struct status_desc *desc; + int i = 0, opcode; + + while (desc_cnt > 0 && i < 8) { + desc = &sds_ring->desc_head[index]; + msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]); + msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]); + + index = get_next_index(index, sds_ring->num_desc); + desc_cnt--; + } + + opcode = netxen_get_nic_msg_opcode(msg.body[0]); + switch (opcode) { + case NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE: + netxen_handle_linkevent(sds_ring->adapter, &msg); + break; + default: + break; + } +} + +static int +netxen_alloc_rx_skb(struct netxen_adapter *adapter, + struct nx_host_rds_ring *rds_ring, + struct netxen_rx_buffer *buffer) +{ + struct sk_buff *skb; + dma_addr_t dma; + struct pci_dev *pdev = adapter->pdev; + + buffer->skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size); + if (!buffer->skb) + return 1; + + skb = buffer->skb; + + if (!adapter->ahw.cut_through) + skb_reserve(skb, 2); + + dma = pci_map_single(pdev, skb->data, + rds_ring->dma_size, PCI_DMA_FROMDEVICE); + + if (pci_dma_mapping_error(pdev, dma)) { + dev_kfree_skb_any(skb); + buffer->skb = NULL; + return 1; + } + + buffer->skb = skb; + buffer->dma = dma; + buffer->state = NETXEN_BUFFER_BUSY; + + return 0; +} + +static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter, + struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum) +{ + struct netxen_rx_buffer *buffer; + struct sk_buff *skb; + + buffer = &rds_ring->rx_buf_arr[index]; + + pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, + PCI_DMA_FROMDEVICE); + + skb = buffer->skb; + if (!skb) + goto no_skb; + + if (likely((adapter->netdev->features & NETIF_F_RXCSUM) + && cksum == STATUS_CKSUM_OK)) { + adapter->stats.csummed++; + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else + skb->ip_summed = CHECKSUM_NONE; + + buffer->skb = NULL; +no_skb: + buffer->state = NETXEN_BUFFER_FREE; + return skb; +} + +static struct netxen_rx_buffer * +netxen_process_rcv(struct netxen_adapter *adapter, + struct nx_host_sds_ring *sds_ring, + int ring, u64 sts_data0) +{ + struct net_device *netdev = adapter->netdev; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + struct netxen_rx_buffer *buffer; + struct sk_buff *skb; + struct nx_host_rds_ring *rds_ring; + int index, length, cksum, pkt_offset; + + if (unlikely(ring >= adapter->max_rds_rings)) + return NULL; + + rds_ring = &recv_ctx->rds_rings[ring]; + + index = netxen_get_sts_refhandle(sts_data0); + if (unlikely(index >= rds_ring->num_desc)) + return NULL; + + buffer = &rds_ring->rx_buf_arr[index]; + + length = netxen_get_sts_totallength(sts_data0); + cksum = netxen_get_sts_status(sts_data0); + pkt_offset = netxen_get_sts_pkt_offset(sts_data0); + + skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum); + if (!skb) + return buffer; + + if (length > rds_ring->skb_size) + skb_put(skb, rds_ring->skb_size); + else + skb_put(skb, length); + + + if (pkt_offset) + skb_pull(skb, pkt_offset); + + skb->protocol = eth_type_trans(skb, netdev); + + napi_gro_receive(&sds_ring->napi, skb); + + adapter->stats.rx_pkts++; + adapter->stats.rxbytes += length; + + return buffer; +} + +#define TCP_HDR_SIZE 20 +#define TCP_TS_OPTION_SIZE 12 +#define TCP_TS_HDR_SIZE (TCP_HDR_SIZE + TCP_TS_OPTION_SIZE) + +static struct netxen_rx_buffer * +netxen_process_lro(struct netxen_adapter *adapter, + struct nx_host_sds_ring *sds_ring, + int ring, u64 sts_data0, u64 sts_data1) +{ + struct net_device *netdev = adapter->netdev; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + struct netxen_rx_buffer *buffer; + struct sk_buff *skb; + struct nx_host_rds_ring *rds_ring; + struct iphdr *iph; + struct tcphdr *th; + bool push, timestamp; + int l2_hdr_offset, l4_hdr_offset; + int index; + u16 lro_length, length, data_offset; + u32 seq_number; + u8 vhdr_len = 0; + + if (unlikely(ring >= adapter->max_rds_rings)) + return NULL; + + rds_ring = &recv_ctx->rds_rings[ring]; + + index = netxen_get_lro_sts_refhandle(sts_data0); + if (unlikely(index >= rds_ring->num_desc)) + return NULL; + + buffer = &rds_ring->rx_buf_arr[index]; + + timestamp = netxen_get_lro_sts_timestamp(sts_data0); + lro_length = netxen_get_lro_sts_length(sts_data0); + l2_hdr_offset = netxen_get_lro_sts_l2_hdr_offset(sts_data0); + l4_hdr_offset = netxen_get_lro_sts_l4_hdr_offset(sts_data0); + push = netxen_get_lro_sts_push_flag(sts_data0); + seq_number = netxen_get_lro_sts_seq_number(sts_data1); + + skb = netxen_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); + if (!skb) + return buffer; + + if (timestamp) + data_offset = l4_hdr_offset + TCP_TS_HDR_SIZE; + else + data_offset = l4_hdr_offset + TCP_HDR_SIZE; + + skb_put(skb, lro_length + data_offset); + + skb_pull(skb, l2_hdr_offset); + skb->protocol = eth_type_trans(skb, netdev); + + if (skb->protocol == htons(ETH_P_8021Q)) + vhdr_len = VLAN_HLEN; + iph = (struct iphdr *)(skb->data + vhdr_len); + th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2)); + + length = (iph->ihl << 2) + (th->doff << 2) + lro_length; + csum_replace2(&iph->check, iph->tot_len, htons(length)); + iph->tot_len = htons(length); + th->psh = push; + th->seq = htonl(seq_number); + + length = skb->len; + + if (adapter->flags & NETXEN_FW_MSS_CAP) + skb_shinfo(skb)->gso_size = netxen_get_lro_sts_mss(sts_data1); + + netif_receive_skb(skb); + + adapter->stats.lro_pkts++; + adapter->stats.rxbytes += length; + + return buffer; +} + +#define netxen_merge_rx_buffers(list, head) \ + do { list_splice_tail_init(list, head); } while (0); + +int +netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max) +{ + struct netxen_adapter *adapter = sds_ring->adapter; + + struct list_head *cur; + + struct status_desc *desc; + struct netxen_rx_buffer *rxbuf; + + u32 consumer = sds_ring->consumer; + + int count = 0; + u64 sts_data0, sts_data1; + int opcode, ring = 0, desc_cnt; + + while (count < max) { + desc = &sds_ring->desc_head[consumer]; + sts_data0 = le64_to_cpu(desc->status_desc_data[0]); + + if (!(sts_data0 & STATUS_OWNER_HOST)) + break; + + desc_cnt = netxen_get_sts_desc_cnt(sts_data0); + + opcode = netxen_get_sts_opcode(sts_data0); + + switch (opcode) { + case NETXEN_NIC_RXPKT_DESC: + case NETXEN_OLD_RXPKT_DESC: + case NETXEN_NIC_SYN_OFFLOAD: + ring = netxen_get_sts_type(sts_data0); + rxbuf = netxen_process_rcv(adapter, sds_ring, + ring, sts_data0); + break; + case NETXEN_NIC_LRO_DESC: + ring = netxen_get_lro_sts_type(sts_data0); + sts_data1 = le64_to_cpu(desc->status_desc_data[1]); + rxbuf = netxen_process_lro(adapter, sds_ring, + ring, sts_data0, sts_data1); + break; + case NETXEN_NIC_RESPONSE_DESC: + netxen_handle_fw_message(desc_cnt, consumer, sds_ring); + default: + goto skip; + } + + WARN_ON(desc_cnt > 1); + + if (rxbuf) + list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); + +skip: + for (; desc_cnt > 0; desc_cnt--) { + desc = &sds_ring->desc_head[consumer]; + desc->status_desc_data[0] = + cpu_to_le64(STATUS_OWNER_PHANTOM); + consumer = get_next_index(consumer, sds_ring->num_desc); + } + count++; + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + struct nx_host_rds_ring *rds_ring = + &adapter->recv_ctx.rds_rings[ring]; + + if (!list_empty(&sds_ring->free_list[ring])) { + list_for_each(cur, &sds_ring->free_list[ring]) { + rxbuf = list_entry(cur, + struct netxen_rx_buffer, list); + netxen_alloc_rx_skb(adapter, rds_ring, rxbuf); + } + spin_lock(&rds_ring->lock); + netxen_merge_rx_buffers(&sds_ring->free_list[ring], + &rds_ring->free_list); + spin_unlock(&rds_ring->lock); + } + + netxen_post_rx_buffers_nodb(adapter, rds_ring); + } + + if (count) { + sds_ring->consumer = consumer; + NXWRIO(adapter, sds_ring->crb_sts_consumer, consumer); + } + + return count; +} + +/* Process Command status ring */ +int netxen_process_cmd_ring(struct netxen_adapter *adapter) +{ + u32 sw_consumer, hw_consumer; + int count = 0, i; + struct netxen_cmd_buffer *buffer; + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + struct netxen_skb_frag *frag; + int done = 0; + struct nx_host_tx_ring *tx_ring = adapter->tx_ring; + + if (!spin_trylock_bh(&adapter->tx_clean_lock)) + return 1; + + sw_consumer = tx_ring->sw_consumer; + hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); + + while (sw_consumer != hw_consumer) { + buffer = &tx_ring->cmd_buf_arr[sw_consumer]; + if (buffer->skb) { + frag = &buffer->frag_array[0]; + pci_unmap_single(pdev, frag->dma, frag->length, + PCI_DMA_TODEVICE); + frag->dma = 0ULL; + for (i = 1; i < buffer->frag_count; i++) { + frag++; /* Get the next frag */ + pci_unmap_page(pdev, frag->dma, frag->length, + PCI_DMA_TODEVICE); + frag->dma = 0ULL; + } + + adapter->stats.xmitfinished++; + dev_kfree_skb_any(buffer->skb); + buffer->skb = NULL; + } + + sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); + if (++count >= MAX_STATUS_HANDLE) + break; + } + + tx_ring->sw_consumer = sw_consumer; + + if (count && netif_running(netdev)) { + smp_mb(); + + if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) + if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) + netif_wake_queue(netdev); + adapter->tx_timeo_cnt = 0; + } + /* + * If everything is freed up to consumer then check if the ring is full + * If the ring is full then check if more needs to be freed and + * schedule the call back again. + * + * This happens when there are 2 CPUs. One could be freeing and the + * other filling it. If the ring is full when we get out of here and + * the card has already interrupted the host then the host can miss the + * interrupt. + * + * There is still a possible race condition and the host could miss an + * interrupt. The card has to take care of this. + */ + hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); + done = (sw_consumer == hw_consumer); + spin_unlock_bh(&adapter->tx_clean_lock); + + return done; +} + +void +netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, + struct nx_host_rds_ring *rds_ring) +{ + struct rcv_desc *pdesc; + struct netxen_rx_buffer *buffer; + int producer, count = 0; + netxen_ctx_msg msg = 0; + struct list_head *head; + + producer = rds_ring->producer; + + head = &rds_ring->free_list; + while (!list_empty(head)) { + + buffer = list_entry(head->next, struct netxen_rx_buffer, list); + + if (!buffer->skb) { + if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) + break; + } + + count++; + list_del(&buffer->list); + + /* make a rcv descriptor */ + pdesc = &rds_ring->desc_head[producer]; + pdesc->addr_buffer = cpu_to_le64(buffer->dma); + pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); + pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); + + producer = get_next_index(producer, rds_ring->num_desc); + } + + if (count) { + rds_ring->producer = producer; + NXWRIO(adapter, rds_ring->crb_rcv_producer, + (producer-1) & (rds_ring->num_desc-1)); + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + /* + * Write a doorbell msg to tell phanmon of change in + * receive ring producer + * Only for firmware version < 4.0.0 + */ + netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID); + netxen_set_msg_privid(msg); + netxen_set_msg_count(msg, + ((producer - 1) & + (rds_ring->num_desc - 1))); + netxen_set_msg_ctxid(msg, adapter->portnum); + netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid)); + NXWRIO(adapter, DB_NORMALIZE(adapter, + NETXEN_RCV_PRODUCER_OFFSET), msg); + } + } +} + +static void +netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, + struct nx_host_rds_ring *rds_ring) +{ + struct rcv_desc *pdesc; + struct netxen_rx_buffer *buffer; + int producer, count = 0; + struct list_head *head; + + if (!spin_trylock(&rds_ring->lock)) + return; + + producer = rds_ring->producer; + + head = &rds_ring->free_list; + while (!list_empty(head)) { + + buffer = list_entry(head->next, struct netxen_rx_buffer, list); + + if (!buffer->skb) { + if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) + break; + } + + count++; + list_del(&buffer->list); + + /* make a rcv descriptor */ + pdesc = &rds_ring->desc_head[producer]; + pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); + pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); + pdesc->addr_buffer = cpu_to_le64(buffer->dma); + + producer = get_next_index(producer, rds_ring->num_desc); + } + + if (count) { + rds_ring->producer = producer; + NXWRIO(adapter, rds_ring->crb_rcv_producer, + (producer - 1) & (rds_ring->num_desc - 1)); + } + spin_unlock(&rds_ring->lock); +} + +void netxen_nic_clear_stats(struct netxen_adapter *adapter) +{ + memset(&adapter->stats, 0, sizeof(adapter->stats)); +} + diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c new file mode 100644 index 000000000..1e1018ee3 --- /dev/null +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -0,0 +1,3526 @@ +/* + * Copyright (C) 2003 - 2009 NetXen, Inc. + * Copyright (C) 2009 - QLogic Corporation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * The full GNU General Public License is included in this distribution + * in the file called "COPYING". + * + */ + +#include +#include +#include +#include "netxen_nic_hw.h" + +#include "netxen_nic.h" + +#include +#include +#include +#include +#include +#include +#include + +MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Intelligent Ethernet Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); +/*(DEBLOBBED)*/ + +char netxen_nic_driver_name[] = "netxen_nic"; +static char netxen_nic_driver_string[] = "QLogic/NetXen Network Driver v" + NETXEN_NIC_LINUX_VERSIONID; + +static int port_mode = NETXEN_PORT_MODE_AUTO_NEG; + +/* Default to restricted 1G auto-neg mode */ +static int wol_port_mode = 5; + +static int use_msi = 1; + +static int use_msi_x = 1; + +static int auto_fw_reset = AUTO_FW_RESET_ENABLED; +module_param(auto_fw_reset, int, 0644); +MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled"); + +static int netxen_nic_probe(struct pci_dev *pdev, + const struct pci_device_id *ent); +static void netxen_nic_remove(struct pci_dev *pdev); +static int netxen_nic_open(struct net_device *netdev); +static int netxen_nic_close(struct net_device *netdev); +static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *, + struct net_device *); +static void netxen_tx_timeout(struct net_device *netdev); +static void netxen_tx_timeout_task(struct work_struct *work); +static void netxen_fw_poll_work(struct work_struct *work); +static void netxen_schedule_work(struct netxen_adapter *adapter, + work_func_t func, int delay); +static void netxen_cancel_fw_work(struct netxen_adapter *adapter); +static int netxen_nic_poll(struct napi_struct *napi, int budget); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void netxen_nic_poll_controller(struct net_device *netdev); +#endif + +static void netxen_create_sysfs_entries(struct netxen_adapter *adapter); +static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter); +static void netxen_create_diag_entries(struct netxen_adapter *adapter); +static void netxen_remove_diag_entries(struct netxen_adapter *adapter); +static int nx_dev_request_aer(struct netxen_adapter *adapter); +static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter); +static int netxen_can_start_firmware(struct netxen_adapter *adapter); + +static irqreturn_t netxen_intr(int irq, void *data); +static irqreturn_t netxen_msi_intr(int irq, void *data); +static irqreturn_t netxen_msix_intr(int irq, void *data); + +static void netxen_free_ip_list(struct netxen_adapter *, bool); +static void netxen_restore_indev_addr(struct net_device *dev, unsigned long); +static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats); +static int netxen_nic_set_mac(struct net_device *netdev, void *p); + +/* PCI Device ID Table */ +#define ENTRY(device) \ + {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \ + .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} + +static const struct pci_device_id netxen_pci_tbl[] = { + ENTRY(PCI_DEVICE_ID_NX2031_10GXSR), + ENTRY(PCI_DEVICE_ID_NX2031_10GCX4), + ENTRY(PCI_DEVICE_ID_NX2031_4GCU), + ENTRY(PCI_DEVICE_ID_NX2031_IMEZ), + ENTRY(PCI_DEVICE_ID_NX2031_HMEZ), + ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT), + ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2), + ENTRY(PCI_DEVICE_ID_NX3031), + {0,} +}; + +MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); + +static uint32_t crb_cmd_producer[4] = { + CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1, + CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3 +}; + +void +netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, + struct nx_host_tx_ring *tx_ring) +{ + NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer); +} + +static uint32_t crb_cmd_consumer[4] = { + CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1, + CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3 +}; + +static inline void +netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter, + struct nx_host_tx_ring *tx_ring) +{ + NXWRIO(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer); +} + +static uint32_t msi_tgt_status[8] = { + ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, + ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, + ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, + ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 +}; + +static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG; + +static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring) +{ + struct netxen_adapter *adapter = sds_ring->adapter; + + NXWRIO(adapter, sds_ring->crb_intr_mask, 0); +} + +static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring) +{ + struct netxen_adapter *adapter = sds_ring->adapter; + + NXWRIO(adapter, sds_ring->crb_intr_mask, 0x1); + + if (!NETXEN_IS_MSI_FAMILY(adapter)) + NXWRIO(adapter, adapter->tgt_mask_reg, 0xfbff); +} + +static int +netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count) +{ + int size = sizeof(struct nx_host_sds_ring) * count; + + recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL); + + return recv_ctx->sds_rings == NULL; +} + +static void +netxen_free_sds_rings(struct netxen_recv_context *recv_ctx) +{ + kfree(recv_ctx->sds_rings); + recv_ctx->sds_rings = NULL; +} + +static int +netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev) +{ + int ring; + struct nx_host_sds_ring *sds_ring; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) + return -ENOMEM; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + netif_napi_add(netdev, &sds_ring->napi, + netxen_nic_poll, NAPI_POLL_WEIGHT); + } + + return 0; +} + +static void +netxen_napi_del(struct netxen_adapter *adapter) +{ + int ring; + struct nx_host_sds_ring *sds_ring; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + netif_napi_del(&sds_ring->napi); + } + + netxen_free_sds_rings(&adapter->recv_ctx); +} + +static void +netxen_napi_enable(struct netxen_adapter *adapter) +{ + int ring; + struct nx_host_sds_ring *sds_ring; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + napi_enable(&sds_ring->napi); + netxen_nic_enable_int(sds_ring); + } +} + +static void +netxen_napi_disable(struct netxen_adapter *adapter) +{ + int ring; + struct nx_host_sds_ring *sds_ring; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + netxen_nic_disable_int(sds_ring); + napi_synchronize(&sds_ring->napi); + napi_disable(&sds_ring->napi); + } +} + +static int nx_set_dma_mask(struct netxen_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + uint64_t mask, cmask; + + adapter->pci_using_dac = 0; + + mask = DMA_BIT_MASK(32); + cmask = DMA_BIT_MASK(32); + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { +#ifndef CONFIG_IA64 + mask = DMA_BIT_MASK(35); +#endif + } else { + mask = DMA_BIT_MASK(39); + cmask = mask; + } + + if (pci_set_dma_mask(pdev, mask) == 0 && + pci_set_consistent_dma_mask(pdev, cmask) == 0) { + adapter->pci_using_dac = 1; + return 0; + } + + return -EIO; +} + +/* Update addressable range if firmware supports it */ +static int +nx_update_dma_mask(struct netxen_adapter *adapter) +{ + int change, shift, err; + uint64_t mask, old_mask, old_cmask; + struct pci_dev *pdev = adapter->pdev; + + change = 0; + + shift = NXRD32(adapter, CRB_DMA_SHIFT); + if (shift > 32) + return 0; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9)) + change = 1; + else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4)) + change = 1; + + if (change) { + old_mask = pdev->dma_mask; + old_cmask = pdev->dev.coherent_dma_mask; + + mask = DMA_BIT_MASK(32+shift); + + err = pci_set_dma_mask(pdev, mask); + if (err) + goto err_out; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + + err = pci_set_consistent_dma_mask(pdev, mask); + if (err) + goto err_out; + } + dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift); + } + + return 0; + +err_out: + pci_set_dma_mask(pdev, old_mask); + pci_set_consistent_dma_mask(pdev, old_cmask); + return err; +} + +static int +netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot) +{ + u32 val, timeout; + + if (first_boot == 0x55555555) { + /* This is the first boot after power up */ + NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); + + if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 0; + + /* PCI bus master workaround */ + first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4)); + if (!(first_boot & 0x4)) { + first_boot |= 0x4; + NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot); + NXRD32(adapter, NETXEN_PCIE_REG(0x4)); + } + + /* This is the first boot after power up */ + first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET); + if (first_boot != 0x80000f) { + /* clear the register for future unloads/loads */ + NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0); + return -EIO; + } + + /* Start P2 boot loader */ + val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE); + NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1); + timeout = 0; + do { + msleep(1); + val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); + + if (++timeout > 5000) + return -EIO; + + } while (val == NETXEN_BDINFO_MAGIC); + } + return 0; +} + +static void netxen_set_port_mode(struct netxen_adapter *adapter) +{ + u32 val, data; + + val = adapter->ahw.board_type; + if ((val == NETXEN_BRDTYPE_P3_HMEZ) || + (val == NETXEN_BRDTYPE_P3_XG_LOM)) { + if (port_mode == NETXEN_PORT_MODE_802_3_AP) { + data = NETXEN_PORT_MODE_802_3_AP; + NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); + } else if (port_mode == NETXEN_PORT_MODE_XG) { + data = NETXEN_PORT_MODE_XG; + NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); + } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) { + data = NETXEN_PORT_MODE_AUTO_NEG_1G; + NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); + } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) { + data = NETXEN_PORT_MODE_AUTO_NEG_XG; + NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); + } else { + data = NETXEN_PORT_MODE_AUTO_NEG; + NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); + } + + if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) && + (wol_port_mode != NETXEN_PORT_MODE_XG) && + (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) && + (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) { + wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG; + } + NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode); + } +} + +#define PCI_CAP_ID_GEN 0x10 + +static void netxen_pcie_strap_init(struct netxen_adapter *adapter) +{ + u32 pdevfuncsave; + u32 c8c9value = 0; + u32 chicken = 0; + u32 control = 0; + int i, pos; + struct pci_dev *pdev; + + pdev = adapter->pdev; + + chicken = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3)); + /* clear chicken3.25:24 */ + chicken &= 0xFCFFFFFF; + /* + * if gen1 and B0, set F1020 - if gen 2, do nothing + * if gen2 set to F1000 + */ + pos = pci_find_capability(pdev, PCI_CAP_ID_GEN); + if (pos == 0xC0) { + pci_read_config_dword(pdev, pos + 0x10, &control); + if ((control & 0x000F0000) != 0x00020000) { + /* set chicken3.24 if gen1 */ + chicken |= 0x01000000; + } + dev_info(&adapter->pdev->dev, "Gen2 strapping detected\n"); + c8c9value = 0xF1000; + } else { + /* set chicken3.24 if gen1 */ + chicken |= 0x01000000; + dev_info(&adapter->pdev->dev, "Gen1 strapping detected\n"); + if (adapter->ahw.revision_id == NX_P3_B0) + c8c9value = 0xF1020; + else + c8c9value = 0; + } + + NXWR32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3), chicken); + + if (!c8c9value) + return; + + pdevfuncsave = pdev->devfn; + if (pdevfuncsave & 0x07) + return; + + for (i = 0; i < 8; i++) { + pci_read_config_dword(pdev, pos + 8, &control); + pci_read_config_dword(pdev, pos + 8, &control); + pci_write_config_dword(pdev, pos + 8, c8c9value); + pdev->devfn++; + } + pdev->devfn = pdevfuncsave; +} + +static void netxen_set_msix_bit(struct pci_dev *pdev, int enable) +{ + u32 control; + + if (pdev->msix_cap) { + pci_read_config_dword(pdev, pdev->msix_cap, &control); + if (enable) + control |= PCI_MSIX_FLAGS_ENABLE; + else + control = 0; + pci_write_config_dword(pdev, pdev->msix_cap, control); + } +} + +static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count) +{ + int i; + + for (i = 0; i < count; i++) + adapter->msix_entries[i].entry = i; +} + +static int +netxen_read_mac_addr(struct netxen_adapter *adapter) +{ + int i; + unsigned char *p; + u64 mac_addr; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0) + return -EIO; + } else { + if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0) + return -EIO; + } + + p = (unsigned char *)&mac_addr; + for (i = 0; i < 6; i++) + netdev->dev_addr[i] = *(p + 5 - i); + + memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); + + /* set station address */ + + if (!is_valid_ether_addr(netdev->dev_addr)) + dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr); + + return 0; +} + +static int netxen_nic_set_mac(struct net_device *netdev, void *p) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (netif_running(netdev)) { + netif_device_detach(netdev); + netxen_napi_disable(adapter); + } + + memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + adapter->macaddr_set(adapter, addr->sa_data); + + if (netif_running(netdev)) { + netif_device_attach(netdev); + netxen_napi_enable(adapter); + } + return 0; +} + +static void netxen_set_multicast_list(struct net_device *dev) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + + adapter->set_multi(dev); +} + +static netdev_features_t netxen_fix_features(struct net_device *dev, + netdev_features_t features) +{ + if (!(features & NETIF_F_RXCSUM)) { + netdev_info(dev, "disabling LRO as RXCSUM is off\n"); + + features &= ~NETIF_F_LRO; + } + + return features; +} + +static int netxen_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + int hw_lro; + + if (!((dev->features ^ features) & NETIF_F_LRO)) + return 0; + + hw_lro = (features & NETIF_F_LRO) ? NETXEN_NIC_LRO_ENABLED + : NETXEN_NIC_LRO_DISABLED; + + if (netxen_config_hw_lro(adapter, hw_lro)) + return -EIO; + + if (!(features & NETIF_F_LRO) && netxen_send_lro_cleanup(adapter)) + return -EIO; + + return 0; +} + +static const struct net_device_ops netxen_netdev_ops = { + .ndo_open = netxen_nic_open, + .ndo_stop = netxen_nic_close, + .ndo_start_xmit = netxen_nic_xmit_frame, + .ndo_get_stats64 = netxen_nic_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = netxen_set_multicast_list, + .ndo_set_mac_address = netxen_nic_set_mac, + .ndo_change_mtu = netxen_nic_change_mtu, + .ndo_tx_timeout = netxen_tx_timeout, + .ndo_fix_features = netxen_fix_features, + .ndo_set_features = netxen_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = netxen_nic_poll_controller, +#endif +}; + +static inline bool netxen_function_zero(struct pci_dev *pdev) +{ + return (PCI_FUNC(pdev->devfn) == 0) ? true : false; +} + +static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter, + u32 mode) +{ + NXWR32(adapter, NETXEN_INTR_MODE_REG, mode); +} + +static inline u32 netxen_get_interrupt_mode(struct netxen_adapter *adapter) +{ + return NXRD32(adapter, NETXEN_INTR_MODE_REG); +} + +static void +netxen_initialize_interrupt_registers(struct netxen_adapter *adapter) +{ + struct netxen_legacy_intr_set *legacy_intrp; + u32 tgt_status_reg, int_state_reg; + + if (adapter->ahw.revision_id >= NX_P3_B0) + legacy_intrp = &legacy_intr[adapter->ahw.pci_func]; + else + legacy_intrp = &legacy_intr[0]; + + tgt_status_reg = legacy_intrp->tgt_status_reg; + int_state_reg = ISR_INT_STATE_REG; + + adapter->int_vec_bit = legacy_intrp->int_vec_bit; + adapter->tgt_status_reg = netxen_get_ioaddr(adapter, tgt_status_reg); + adapter->tgt_mask_reg = netxen_get_ioaddr(adapter, + legacy_intrp->tgt_mask_reg); + adapter->pci_int_reg = netxen_get_ioaddr(adapter, + legacy_intrp->pci_int_reg); + adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR); + + if (adapter->ahw.revision_id >= NX_P3_B1) + adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, + int_state_reg); + else + adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, + CRB_INT_VECTOR); +} + +static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter, + int num_msix) +{ + struct pci_dev *pdev = adapter->pdev; + u32 value; + int err; + + if (adapter->msix_supported) { + netxen_init_msix_entries(adapter, num_msix); + err = pci_enable_msix_range(pdev, adapter->msix_entries, + num_msix, num_msix); + if (err > 0) { + adapter->flags |= NETXEN_NIC_MSIX_ENABLED; + netxen_set_msix_bit(pdev, 1); + + if (adapter->rss_supported) + adapter->max_sds_rings = num_msix; + + dev_info(&pdev->dev, "using msi-x interrupts\n"); + return 0; + } + /* fall through for msi */ + } + + if (use_msi && !pci_enable_msi(pdev)) { + value = msi_tgt_status[adapter->ahw.pci_func]; + adapter->flags |= NETXEN_NIC_MSI_ENABLED; + adapter->tgt_status_reg = netxen_get_ioaddr(adapter, value); + adapter->msix_entries[0].vector = pdev->irq; + dev_info(&pdev->dev, "using msi interrupts\n"); + return 0; + } + + dev_err(&pdev->dev, "Failed to acquire MSI-X/MSI interrupt vector\n"); + return -EIO; +} + +static int netxen_setup_intr(struct netxen_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int num_msix; + + if (adapter->rss_supported) + num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ? + MSIX_ENTRIES_PER_ADAPTER : 2; + else + num_msix = 1; + + adapter->max_sds_rings = 1; + adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED); + + netxen_initialize_interrupt_registers(adapter); + netxen_set_msix_bit(pdev, 0); + + if (netxen_function_zero(pdev)) { + if (!netxen_setup_msi_interrupts(adapter, num_msix)) + netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE); + else + netxen_set_interrupt_mode(adapter, NETXEN_INTX_MODE); + } else { + if (netxen_get_interrupt_mode(adapter) == NETXEN_MSI_MODE && + netxen_setup_msi_interrupts(adapter, num_msix)) { + dev_err(&pdev->dev, "Co-existence of MSI-X/MSI and INTx interrupts is not supported\n"); + return -EIO; + } + } + + if (!NETXEN_IS_MSI_FAMILY(adapter)) { + adapter->msix_entries[0].vector = pdev->irq; + dev_info(&pdev->dev, "using legacy interrupts\n"); + } + return 0; +} + +static void +netxen_teardown_intr(struct netxen_adapter *adapter) +{ + if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) + pci_disable_msix(adapter->pdev); + if (adapter->flags & NETXEN_NIC_MSI_ENABLED) + pci_disable_msi(adapter->pdev); +} + +static void +netxen_cleanup_pci_map(struct netxen_adapter *adapter) +{ + if (adapter->ahw.db_base != NULL) + iounmap(adapter->ahw.db_base); + if (adapter->ahw.pci_base0 != NULL) + iounmap(adapter->ahw.pci_base0); + if (adapter->ahw.pci_base1 != NULL) + iounmap(adapter->ahw.pci_base1); + if (adapter->ahw.pci_base2 != NULL) + iounmap(adapter->ahw.pci_base2); +} + +static int +netxen_setup_pci_map(struct netxen_adapter *adapter) +{ + void __iomem *db_ptr = NULL; + + resource_size_t mem_base, db_base; + unsigned long mem_len, db_len = 0; + + struct pci_dev *pdev = adapter->pdev; + int pci_func = adapter->ahw.pci_func; + struct netxen_hardware_context *ahw = &adapter->ahw; + + int err = 0; + + /* + * Set the CRB window to invalid. If any register in window 0 is + * accessed it should set the window to 0 and then reset it to 1. + */ + adapter->ahw.crb_win = -1; + adapter->ahw.ocm_win = -1; + + /* remap phys address */ + mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ + mem_len = pci_resource_len(pdev, 0); + + /* 128 Meg of memory */ + if (mem_len == NETXEN_PCI_128MB_SIZE) { + + ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE); + ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START, + SECOND_PAGE_GROUP_SIZE); + ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, + THIRD_PAGE_GROUP_SIZE); + if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL || + ahw->pci_base2 == NULL) { + dev_err(&pdev->dev, "failed to map PCI bar 0\n"); + err = -EIO; + goto err_out; + } + + ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE; + + } else if (mem_len == NETXEN_PCI_32MB_SIZE) { + + ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); + ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - + SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); + if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) { + dev_err(&pdev->dev, "failed to map PCI bar 0\n"); + err = -EIO; + goto err_out; + } + + } else if (mem_len == NETXEN_PCI_2MB_SIZE) { + + ahw->pci_base0 = pci_ioremap_bar(pdev, 0); + if (ahw->pci_base0 == NULL) { + dev_err(&pdev->dev, "failed to map PCI bar 0\n"); + return -EIO; + } + ahw->pci_len0 = mem_len; + } else { + return -EIO; + } + + netxen_setup_hwops(adapter); + + dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); + + if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { + adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, + NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); + + } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, + NETXEN_PCIX_PS_REG(PCIE_MN_WINDOW_REG(pci_func))); + } + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + goto skip_doorbell; + + db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ + db_len = pci_resource_len(pdev, 4); + + if (db_len == 0) { + printk(KERN_ERR "%s: doorbell is disabled\n", + netxen_nic_driver_name); + err = -EIO; + goto err_out; + } + + db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES); + if (!db_ptr) { + printk(KERN_ERR "%s: Failed to allocate doorbell map.", + netxen_nic_driver_name); + err = -EIO; + goto err_out; + } + +skip_doorbell: + adapter->ahw.db_base = db_ptr; + adapter->ahw.db_len = db_len; + return 0; + +err_out: + netxen_cleanup_pci_map(adapter); + return err; +} + +static void +netxen_check_options(struct netxen_adapter *adapter) +{ + u32 fw_major, fw_minor, fw_build, prev_fw_version; + char brd_name[NETXEN_MAX_SHORT_NAME]; + char serial_num[32]; + int i, offset, val, err; + __le32 *ptr32; + struct pci_dev *pdev = adapter->pdev; + + adapter->driver_mismatch = 0; + + ptr32 = (__le32 *)&serial_num; + offset = NX_FW_SERIAL_NUM_OFFSET; + for (i = 0; i < 8; i++) { + if (netxen_rom_fast_read(adapter, offset, &val) == -1) { + dev_err(&pdev->dev, "error reading board info\n"); + adapter->driver_mismatch = 1; + return; + } + ptr32[i] = cpu_to_le32(val); + offset += sizeof(u32); + } + + fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); + fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); + fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); + prev_fw_version = adapter->fw_version; + adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build); + + /* Get FW Mini Coredump template and store it */ + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + if (adapter->mdump.md_template == NULL || + adapter->fw_version > prev_fw_version) { + kfree(adapter->mdump.md_template); + adapter->mdump.md_template = NULL; + err = netxen_setup_minidump(adapter); + if (err) + dev_err(&adapter->pdev->dev, + "Failed to setup minidump rcode = %d\n", err); + } + } + + if (adapter->portnum == 0) { + if (netxen_nic_get_brd_name_by_type(adapter->ahw.board_type, + brd_name)) + strcpy(serial_num, "Unknown"); + + pr_info("%s: %s Board S/N %s Chip rev 0x%x\n", + module_name(THIS_MODULE), + brd_name, serial_num, adapter->ahw.revision_id); + } + + if (adapter->fw_version < NETXEN_VERSION_CODE(3, 4, 216)) { + adapter->driver_mismatch = 1; + dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n", + fw_major, fw_minor, fw_build); + return; + } + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + i = NXRD32(adapter, NETXEN_SRE_MISC); + adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0; + } + + dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d [%s]\n", + NETXEN_NIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build, + adapter->ahw.cut_through ? "cut-through" : "legacy"); + + if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222)) + adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1); + + if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; + adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; + } else if (adapter->ahw.port_type == NETXEN_NIC_GBE) { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; + adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; + } + + adapter->msix_supported = 0; + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + adapter->msix_supported = !!use_msi_x; + adapter->rss_supported = !!use_msi_x; + } else { + u32 flashed_ver = 0; + netxen_rom_fast_read(adapter, + NX_FW_VERSION_OFFSET, (int *)&flashed_ver); + flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); + + if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) { + switch (adapter->ahw.board_type) { + case NETXEN_BRDTYPE_P2_SB31_10G: + case NETXEN_BRDTYPE_P2_SB31_10G_CX4: + adapter->msix_supported = !!use_msi_x; + adapter->rss_supported = !!use_msi_x; + break; + default: + break; + } + } + } + + adapter->num_txd = MAX_CMD_DESCRIPTORS; + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS; + adapter->max_rds_rings = 3; + } else { + adapter->num_lro_rxd = 0; + adapter->max_rds_rings = 2; + } +} + +static int +netxen_start_firmware(struct netxen_adapter *adapter) +{ + int val, err, first_boot; + struct pci_dev *pdev = adapter->pdev; + + /* required for NX2031 dummy dma */ + err = nx_set_dma_mask(adapter); + if (err) + return err; + + err = netxen_can_start_firmware(adapter); + + if (err < 0) + return err; + + if (!err) + goto wait_init; + + first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); + + err = netxen_check_hw_init(adapter, first_boot); + if (err) { + dev_err(&pdev->dev, "error in init HW init sequence\n"); + return err; + } + + netxen_request_firmware(adapter); + + err = netxen_need_fw_reset(adapter); + if (err < 0) + goto err_out; + if (err == 0) + goto pcie_strap_init; + + if (first_boot != 0x55555555) { + NXWR32(adapter, CRB_CMDPEG_STATE, 0); + netxen_pinit_from_rom(adapter); + msleep(1); + } + + NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555); + NXWR32(adapter, NETXEN_PEG_HALT_STATUS1, 0); + NXWR32(adapter, NETXEN_PEG_HALT_STATUS2, 0); + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + netxen_set_port_mode(adapter); + + err = netxen_load_firmware(adapter); + if (err) + goto err_out; + + netxen_release_firmware(adapter); + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + + /* Initialize multicast addr pool owners */ + val = 0x7654; + if (adapter->ahw.port_type == NETXEN_NIC_XGBE) + val |= 0x0f000000; + NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); + + } + + err = netxen_init_dummy_dma(adapter); + if (err) + goto err_out; + + /* + * Tell the hardware our version number. + */ + val = (_NETXEN_NIC_LINUX_MAJOR << 16) + | ((_NETXEN_NIC_LINUX_MINOR << 8)) + | (_NETXEN_NIC_LINUX_SUBVERSION); + NXWR32(adapter, CRB_DRIVER_VERSION, val); + +pcie_strap_init: + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + netxen_pcie_strap_init(adapter); + +wait_init: + /* Handshake with the card before we register the devices. */ + err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); + if (err) { + netxen_free_dummy_dma(adapter); + goto err_out; + } + + NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_READY); + + nx_update_dma_mask(adapter); + + netxen_check_options(adapter); + + adapter->need_fw_reset = 0; + + /* fall through and release firmware */ + +err_out: + netxen_release_firmware(adapter); + return err; +} + +static int +netxen_nic_request_irq(struct netxen_adapter *adapter) +{ + irq_handler_t handler; + struct nx_host_sds_ring *sds_ring; + int err, ring; + + unsigned long flags = 0; + struct net_device *netdev = adapter->netdev; + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) + handler = netxen_msix_intr; + else if (adapter->flags & NETXEN_NIC_MSI_ENABLED) + handler = netxen_msi_intr; + else { + flags |= IRQF_SHARED; + handler = netxen_intr; + } + adapter->irq = netdev->irq; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + sprintf(sds_ring->name, "%s[%d]", netdev->name, ring); + err = request_irq(sds_ring->irq, handler, + flags, sds_ring->name, sds_ring); + if (err) + return err; + } + + return 0; +} + +static void +netxen_nic_free_irq(struct netxen_adapter *adapter) +{ + int ring; + struct nx_host_sds_ring *sds_ring; + + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + free_irq(sds_ring->irq, sds_ring); + } +} + +static void +netxen_nic_init_coalesce_defaults(struct netxen_adapter *adapter) +{ + adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT; + adapter->coal.normal.data.rx_time_us = + NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US; + adapter->coal.normal.data.rx_packets = + NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS; + adapter->coal.normal.data.tx_time_us = + NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US; + adapter->coal.normal.data.tx_packets = + NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS; +} + +/* with rtnl_lock */ +static int +__netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) +{ + int err; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return -EIO; + + err = adapter->init_port(adapter, adapter->physical_port); + if (err) { + printk(KERN_ERR "%s: Failed to initialize port %d\n", + netxen_nic_driver_name, adapter->portnum); + return err; + } + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + adapter->macaddr_set(adapter, adapter->mac_addr); + + adapter->set_multi(netdev); + adapter->set_mtu(adapter, netdev->mtu); + + adapter->ahw.linkup = 0; + + if (adapter->max_sds_rings > 1) + netxen_config_rss(adapter, 1); + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + netxen_config_intr_coalesce(adapter); + + if (netdev->features & NETIF_F_LRO) + netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_ENABLED); + + netxen_napi_enable(adapter); + + if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) + netxen_linkevent_request(adapter, 1); + else + netxen_nic_set_link_parameters(adapter); + + set_bit(__NX_DEV_UP, &adapter->state); + return 0; +} + +/* Usage: During resume and firmware recovery module.*/ + +static inline int +netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) +{ + int err = 0; + + rtnl_lock(); + if (netif_running(netdev)) + err = __netxen_nic_up(adapter, netdev); + rtnl_unlock(); + + return err; +} + +/* with rtnl_lock */ +static void +__netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) +{ + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return; + + if (!test_and_clear_bit(__NX_DEV_UP, &adapter->state)) + return; + + smp_mb(); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) + netxen_linkevent_request(adapter, 0); + + if (adapter->stop_port) + adapter->stop_port(adapter); + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + netxen_p3_free_mac_list(adapter); + + adapter->set_promisc(adapter, NETXEN_NIU_NON_PROMISC_MODE); + + netxen_napi_disable(adapter); + + netxen_release_tx_buffers(adapter); +} + +/* Usage: During suspend and firmware recovery module */ + +static inline void +netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) +{ + rtnl_lock(); + if (netif_running(netdev)) + __netxen_nic_down(adapter, netdev); + rtnl_unlock(); + +} + +static int +netxen_nic_attach(struct netxen_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err, ring; + struct nx_host_rds_ring *rds_ring; + struct nx_host_tx_ring *tx_ring; + u32 capab2; + + if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) + return 0; + + err = netxen_init_firmware(adapter); + if (err) + return err; + + adapter->flags &= ~NETXEN_FW_MSS_CAP; + if (adapter->capabilities & NX_FW_CAPABILITY_MORE_CAPS) { + capab2 = NXRD32(adapter, CRB_FW_CAPABILITIES_2); + if (capab2 & NX_FW_CAPABILITY_2_LRO_MAX_TCP_SEG) + adapter->flags |= NETXEN_FW_MSS_CAP; + } + + err = netxen_napi_add(adapter, netdev); + if (err) + return err; + + err = netxen_alloc_sw_resources(adapter); + if (err) { + printk(KERN_ERR "%s: Error in setting sw resources\n", + netdev->name); + return err; + } + + err = netxen_alloc_hw_resources(adapter); + if (err) { + printk(KERN_ERR "%s: Error in setting hw resources\n", + netdev->name); + goto err_out_free_sw; + } + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + tx_ring = adapter->tx_ring; + tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter, + crb_cmd_producer[adapter->portnum]); + tx_ring->crb_cmd_consumer = netxen_get_ioaddr(adapter, + crb_cmd_consumer[adapter->portnum]); + + tx_ring->producer = 0; + tx_ring->sw_consumer = 0; + + netxen_nic_update_cmd_producer(adapter, tx_ring); + netxen_nic_update_cmd_consumer(adapter, tx_ring); + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &adapter->recv_ctx.rds_rings[ring]; + netxen_post_rx_buffers(adapter, ring, rds_ring); + } + + err = netxen_nic_request_irq(adapter); + if (err) { + dev_err(&pdev->dev, "%s: failed to setup interrupt\n", + netdev->name); + goto err_out_free_rxbuf; + } + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + netxen_nic_init_coalesce_defaults(adapter); + + netxen_create_sysfs_entries(adapter); + + adapter->is_up = NETXEN_ADAPTER_UP_MAGIC; + return 0; + +err_out_free_rxbuf: + netxen_release_rx_buffers(adapter); + netxen_free_hw_resources(adapter); +err_out_free_sw: + netxen_free_sw_resources(adapter); + return err; +} + +static void +netxen_nic_detach(struct netxen_adapter *adapter) +{ + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return; + + netxen_remove_sysfs_entries(adapter); + + netxen_free_hw_resources(adapter); + netxen_release_rx_buffers(adapter); + netxen_nic_free_irq(adapter); + netxen_napi_del(adapter); + netxen_free_sw_resources(adapter); + + adapter->is_up = 0; +} + +int +netxen_nic_reset_context(struct netxen_adapter *adapter) +{ + int err = 0; + struct net_device *netdev = adapter->netdev; + + if (test_and_set_bit(__NX_RESETTING, &adapter->state)) + return -EBUSY; + + if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { + + netif_device_detach(netdev); + + if (netif_running(netdev)) + __netxen_nic_down(adapter, netdev); + + netxen_nic_detach(adapter); + + if (netif_running(netdev)) { + err = netxen_nic_attach(adapter); + if (!err) + err = __netxen_nic_up(adapter, netdev); + + if (err) + goto done; + } + + netif_device_attach(netdev); + } + +done: + clear_bit(__NX_RESETTING, &adapter->state); + return err; +} + +static int +netxen_setup_netdev(struct netxen_adapter *adapter, + struct net_device *netdev) +{ + int err = 0; + struct pci_dev *pdev = adapter->pdev; + + adapter->mc_enabled = 0; + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + adapter->max_mc_count = 38; + else + adapter->max_mc_count = 16; + + netdev->netdev_ops = &netxen_netdev_ops; + netdev->watchdog_timeo = 5*HZ; + + netxen_nic_change_mtu(netdev, netdev->mtu); + + netdev->ethtool_ops = &netxen_nic_ethtool_ops; + + netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_RXCSUM; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + netdev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + + netdev->vlan_features |= netdev->hw_features; + + if (adapter->pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + + if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO) + netdev->hw_features |= NETIF_F_LRO; + + netdev->features |= netdev->hw_features; + + netdev->irq = adapter->msix_entries[0].vector; + + INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task); + + if (netxen_read_mac_addr(adapter)) + dev_warn(&pdev->dev, "failed to read mac addr\n"); + + netif_carrier_off(netdev); + + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "failed to register net device\n"); + return err; + } + + return 0; +} + +#define NETXEN_ULA_ADAPTER_KEY (0xdaddad01) +#define NETXEN_NON_ULA_ADAPTER_KEY (0xdaddad00) + +static void netxen_read_ula_info(struct netxen_adapter *adapter) +{ + u32 temp; + + /* Print ULA info only once for an adapter */ + if (adapter->portnum != 0) + return; + + temp = NXRD32(adapter, NETXEN_ULA_KEY); + switch (temp) { + case NETXEN_ULA_ADAPTER_KEY: + dev_info(&adapter->pdev->dev, "ULA adapter"); + break; + case NETXEN_NON_ULA_ADAPTER_KEY: + dev_info(&adapter->pdev->dev, "non ULA adapter"); + break; + default: + break; + } + + return; +} + +#ifdef CONFIG_PCIEAER +static void netxen_mask_aer_correctable(struct netxen_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct pci_dev *root = pdev->bus->self; + u32 aer_pos; + + /* root bus? */ + if (!root) + return; + + if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM && + adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP) + return; + + if (pci_pcie_type(root) != PCI_EXP_TYPE_ROOT_PORT) + return; + + aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR); + if (!aer_pos) + return; + + pci_write_config_dword(root, aer_pos + PCI_ERR_COR_MASK, 0xffff); +} +#endif + +static int +netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev = NULL; + struct netxen_adapter *adapter = NULL; + int i = 0, err; + int pci_func_id = PCI_FUNC(pdev->devfn); + uint8_t revision_id; + u32 val; + + if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) { + pr_warn("%s: chip revisions between 0x%x-0x%x will not be enabled\n", + module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1); + return -ENODEV; + } + + if ((err = pci_enable_device(pdev))) + return err; + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + err = -ENODEV; + goto err_out_disable_pdev; + } + + if ((err = pci_request_regions(pdev, netxen_nic_driver_name))) + goto err_out_disable_pdev; + + if (NX_IS_REVISION_P3(pdev->revision)) + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct netxen_adapter)); + if(!netdev) { + err = -ENOMEM; + goto err_out_free_res; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->ahw.pci_func = pci_func_id; + + revision_id = pdev->revision; + adapter->ahw.revision_id = revision_id; + + rwlock_init(&adapter->ahw.crb_lock); + spin_lock_init(&adapter->ahw.mem_lock); + + spin_lock_init(&adapter->tx_clean_lock); + INIT_LIST_HEAD(&adapter->mac_list); + INIT_LIST_HEAD(&adapter->ip_list); + + err = netxen_setup_pci_map(adapter); + if (err) + goto err_out_free_netdev; + + /* This will be reset for mezz cards */ + adapter->portnum = pci_func_id; + + err = netxen_nic_get_board_info(adapter); + if (err) { + dev_err(&pdev->dev, "Error getting board config info.\n"); + goto err_out_iounmap; + } + +#ifdef CONFIG_PCIEAER + netxen_mask_aer_correctable(adapter); +#endif + + /* Mezz cards have PCI function 0,2,3 enabled */ + switch (adapter->ahw.board_type) { + case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: + case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: + if (pci_func_id >= 2) + adapter->portnum = pci_func_id - 2; + break; + default: + break; + } + + err = netxen_check_flash_fw_compatibility(adapter); + if (err) + goto err_out_iounmap; + + if (adapter->portnum == 0) { + val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); + if (val != 0xffffffff && val != 0) { + NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0); + adapter->need_fw_reset = 1; + } + } + + err = netxen_start_firmware(adapter); + if (err) + goto err_out_decr_ref; + + /* + * See if the firmware gave us a virtual-physical port mapping. + */ + adapter->physical_port = adapter->portnum; + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + i = NXRD32(adapter, CRB_V2P(adapter->portnum)); + if (i != 0x55555555) + adapter->physical_port = i; + } + + netxen_nic_clear_stats(adapter); + + err = netxen_setup_intr(adapter); + + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to setup interrupts, error = %d\n", err); + goto err_out_disable_msi; + } + + netxen_read_ula_info(adapter); + + err = netxen_setup_netdev(adapter, netdev); + if (err) + goto err_out_disable_msi; + + pci_set_drvdata(pdev, adapter); + + netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); + + switch (adapter->ahw.port_type) { + case NETXEN_NIC_GBE: + dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", + adapter->netdev->name); + break; + case NETXEN_NIC_XGBE: + dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", + adapter->netdev->name); + break; + } + + netxen_create_diag_entries(adapter); + + return 0; + +err_out_disable_msi: + netxen_teardown_intr(adapter); + + netxen_free_dummy_dma(adapter); + +err_out_decr_ref: + nx_decr_dev_ref_cnt(adapter); + +err_out_iounmap: + netxen_cleanup_pci_map(adapter); + +err_out_free_netdev: + free_netdev(netdev); + +err_out_free_res: + pci_release_regions(pdev); + +err_out_disable_pdev: + pci_disable_device(pdev); + return err; +} + +static +void netxen_cleanup_minidump(struct netxen_adapter *adapter) +{ + kfree(adapter->mdump.md_template); + adapter->mdump.md_template = NULL; + + if (adapter->mdump.md_capture_buff) { + vfree(adapter->mdump.md_capture_buff); + adapter->mdump.md_capture_buff = NULL; + } +} + +static void netxen_nic_remove(struct pci_dev *pdev) +{ + struct netxen_adapter *adapter; + struct net_device *netdev; + + adapter = pci_get_drvdata(pdev); + if (adapter == NULL) + return; + + netdev = adapter->netdev; + + netxen_cancel_fw_work(adapter); + + unregister_netdev(netdev); + + cancel_work_sync(&adapter->tx_timeout_task); + + netxen_free_ip_list(adapter, false); + netxen_nic_detach(adapter); + + nx_decr_dev_ref_cnt(adapter); + + if (adapter->portnum == 0) + netxen_free_dummy_dma(adapter); + + clear_bit(__NX_RESETTING, &adapter->state); + + netxen_teardown_intr(adapter); + netxen_set_interrupt_mode(adapter, 0); + netxen_remove_diag_entries(adapter); + + netxen_cleanup_pci_map(adapter); + + netxen_release_firmware(adapter); + + if (NX_IS_REVISION_P3(pdev->revision)) { + netxen_cleanup_minidump(adapter); + pci_disable_pcie_error_reporting(pdev); + } + + pci_release_regions(pdev); + pci_disable_device(pdev); + + free_netdev(netdev); +} + +static void netxen_nic_detach_func(struct netxen_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + + netxen_cancel_fw_work(adapter); + + if (netif_running(netdev)) + netxen_nic_down(adapter, netdev); + + cancel_work_sync(&adapter->tx_timeout_task); + + netxen_nic_detach(adapter); + + if (adapter->portnum == 0) + netxen_free_dummy_dma(adapter); + + nx_decr_dev_ref_cnt(adapter); + + clear_bit(__NX_RESETTING, &adapter->state); +} + +static int netxen_nic_attach_func(struct pci_dev *pdev) +{ + struct netxen_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + pci_set_power_state(pdev, PCI_D0); + pci_set_master(pdev); + pci_restore_state(pdev); + + adapter->ahw.crb_win = -1; + adapter->ahw.ocm_win = -1; + + err = netxen_start_firmware(adapter); + if (err) { + dev_err(&pdev->dev, "failed to start firmware\n"); + return err; + } + + if (netif_running(netdev)) { + err = netxen_nic_attach(adapter); + if (err) + goto err_out; + + err = netxen_nic_up(adapter, netdev); + if (err) + goto err_out_detach; + + netxen_restore_indev_addr(netdev, NETDEV_UP); + } + + netif_device_attach(netdev); + netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); + return 0; + +err_out_detach: + netxen_nic_detach(adapter); +err_out: + nx_decr_dev_ref_cnt(adapter); + return err; +} + +static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct netxen_adapter *adapter = pci_get_drvdata(pdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (nx_dev_request_aer(adapter)) + return PCI_ERS_RESULT_RECOVERED; + + netxen_nic_detach_func(adapter); + + pci_disable_device(pdev); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev) +{ + int err = 0; + + err = netxen_nic_attach_func(pdev); + + return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; +} + +static void netxen_io_resume(struct pci_dev *pdev) +{ + pci_cleanup_aer_uncorrect_error_status(pdev); +} + +static void netxen_nic_shutdown(struct pci_dev *pdev) +{ + struct netxen_adapter *adapter = pci_get_drvdata(pdev); + + netxen_nic_detach_func(adapter); + + if (pci_save_state(pdev)) + return; + + if (netxen_nic_wol_supported(adapter)) { + pci_enable_wake(pdev, PCI_D3cold, 1); + pci_enable_wake(pdev, PCI_D3hot, 1); + } + + pci_disable_device(pdev); +} + +#ifdef CONFIG_PM +static int +netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct netxen_adapter *adapter = pci_get_drvdata(pdev); + int retval; + + netxen_nic_detach_func(adapter); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + if (netxen_nic_wol_supported(adapter)) { + pci_enable_wake(pdev, PCI_D3cold, 1); + pci_enable_wake(pdev, PCI_D3hot, 1); + } + + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int +netxen_nic_resume(struct pci_dev *pdev) +{ + return netxen_nic_attach_func(pdev); +} +#endif + +static int netxen_nic_open(struct net_device *netdev) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + int err = 0; + + if (adapter->driver_mismatch) + return -EIO; + + err = netxen_nic_attach(adapter); + if (err) + return err; + + err = __netxen_nic_up(adapter, netdev); + if (err) + goto err_out; + + netif_start_queue(netdev); + + return 0; + +err_out: + netxen_nic_detach(adapter); + return err; +} + +/* + * netxen_nic_close - Disables a network interface entry point + */ +static int netxen_nic_close(struct net_device *netdev) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + + __netxen_nic_down(adapter, netdev); + return 0; +} + +static void +netxen_tso_check(struct net_device *netdev, + struct nx_host_tx_ring *tx_ring, + struct cmd_desc_type0 *first_desc, + struct sk_buff *skb) +{ + u8 opcode = TX_ETHER_PKT; + __be16 protocol = skb->protocol; + u16 flags = 0, vid = 0; + u32 producer; + int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0; + struct cmd_desc_type0 *hwdesc; + struct vlan_ethhdr *vh; + + if (protocol == cpu_to_be16(ETH_P_8021Q)) { + + vh = (struct vlan_ethhdr *)skb->data; + protocol = vh->h_vlan_encapsulated_proto; + flags = FLAGS_VLAN_TAGGED; + + } else if (skb_vlan_tag_present(skb)) { + flags = FLAGS_VLAN_OOB; + vid = skb_vlan_tag_get(skb); + netxen_set_tx_vlan_tci(first_desc, vid); + vlan_oob = 1; + } + + if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && + skb_shinfo(skb)->gso_size > 0) { + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + + first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); + first_desc->total_hdr_length = hdr_len; + if (vlan_oob) { + first_desc->total_hdr_length += VLAN_HLEN; + first_desc->tcp_hdr_offset = VLAN_HLEN; + first_desc->ip_hdr_offset = VLAN_HLEN; + /* Only in case of TSO on vlan device */ + flags |= FLAGS_VLAN_TAGGED; + } + + opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ? + TX_TCP_LSO6 : TX_TCP_LSO; + tso = 1; + + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 l4proto; + + if (protocol == cpu_to_be16(ETH_P_IP)) { + l4proto = ip_hdr(skb)->protocol; + + if (l4proto == IPPROTO_TCP) + opcode = TX_TCP_PKT; + else if(l4proto == IPPROTO_UDP) + opcode = TX_UDP_PKT; + } else if (protocol == cpu_to_be16(ETH_P_IPV6)) { + l4proto = ipv6_hdr(skb)->nexthdr; + + if (l4proto == IPPROTO_TCP) + opcode = TX_TCPV6_PKT; + else if(l4proto == IPPROTO_UDP) + opcode = TX_UDPV6_PKT; + } + } + + first_desc->tcp_hdr_offset += skb_transport_offset(skb); + first_desc->ip_hdr_offset += skb_network_offset(skb); + netxen_set_tx_flags_opcode(first_desc, flags, opcode); + + if (!tso) + return; + + /* For LSO, we need to copy the MAC/IP/TCP headers into + * the descriptor ring + */ + producer = tx_ring->producer; + copied = 0; + offset = 2; + + if (vlan_oob) { + /* Create a TSO vlan header template for firmware */ + + hwdesc = &tx_ring->desc_head[producer]; + tx_ring->cmd_buf_arr[producer].skb = NULL; + + copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, + hdr_len + VLAN_HLEN); + + vh = (struct vlan_ethhdr *)((char *)hwdesc + 2); + skb_copy_from_linear_data(skb, vh, 12); + vh->h_vlan_proto = htons(ETH_P_8021Q); + vh->h_vlan_TCI = htons(vid); + skb_copy_from_linear_data_offset(skb, 12, + (char *)vh + 16, copy_len - 16); + + copied = copy_len - VLAN_HLEN; + offset = 0; + + producer = get_next_index(producer, tx_ring->num_desc); + } + + while (copied < hdr_len) { + + copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, + (hdr_len - copied)); + + hwdesc = &tx_ring->desc_head[producer]; + tx_ring->cmd_buf_arr[producer].skb = NULL; + + skb_copy_from_linear_data_offset(skb, copied, + (char *)hwdesc + offset, copy_len); + + copied += copy_len; + offset = 0; + + producer = get_next_index(producer, tx_ring->num_desc); + } + + tx_ring->producer = producer; + barrier(); +} + +static int +netxen_map_tx_skb(struct pci_dev *pdev, + struct sk_buff *skb, struct netxen_cmd_buffer *pbuf) +{ + struct netxen_skb_frag *nf; + struct skb_frag_struct *frag; + int i, nr_frags; + dma_addr_t map; + + nr_frags = skb_shinfo(skb)->nr_frags; + nf = &pbuf->frag_array[0]; + + map = pci_map_single(pdev, skb->data, + skb_headlen(skb), PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, map)) + goto out_err; + + nf->dma = map; + nf->length = skb_headlen(skb); + + for (i = 0; i < nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + nf = &pbuf->frag_array[i+1]; + + map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, map)) + goto unwind; + + nf->dma = map; + nf->length = skb_frag_size(frag); + } + + return 0; + +unwind: + while (--i >= 0) { + nf = &pbuf->frag_array[i+1]; + pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); + nf->dma = 0ULL; + } + + nf = &pbuf->frag_array[0]; + pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); + nf->dma = 0ULL; + +out_err: + return -ENOMEM; +} + +static inline void +netxen_clear_cmddesc(u64 *desc) +{ + desc[0] = 0ULL; + desc[2] = 0ULL; +} + +static netdev_tx_t +netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + struct nx_host_tx_ring *tx_ring = adapter->tx_ring; + struct netxen_cmd_buffer *pbuf; + struct netxen_skb_frag *buffrag; + struct cmd_desc_type0 *hwdesc, *first_desc; + struct pci_dev *pdev; + int i, k; + int delta = 0; + struct skb_frag_struct *frag; + + u32 producer; + int frag_count, no_of_desc; + u32 num_txd = tx_ring->num_desc; + + frag_count = skb_shinfo(skb)->nr_frags + 1; + + /* 14 frags supported for normal packet and + * 32 frags supported for TSO packet + */ + if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) { + + for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) { + frag = &skb_shinfo(skb)->frags[i]; + delta += skb_frag_size(frag); + } + + if (!__pskb_pull_tail(skb, delta)) + goto drop_packet; + + frag_count = 1 + skb_shinfo(skb)->nr_frags; + } + /* 4 fragments per cmd des */ + no_of_desc = (frag_count + 3) >> 2; + + if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) { + netif_stop_queue(netdev); + smp_mb(); + if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) + netif_start_queue(netdev); + else + return NETDEV_TX_BUSY; + } + + producer = tx_ring->producer; + pbuf = &tx_ring->cmd_buf_arr[producer]; + + pdev = adapter->pdev; + + if (netxen_map_tx_skb(pdev, skb, pbuf)) + goto drop_packet; + + pbuf->skb = skb; + pbuf->frag_count = frag_count; + + first_desc = hwdesc = &tx_ring->desc_head[producer]; + netxen_clear_cmddesc((u64 *)hwdesc); + + netxen_set_tx_frags_len(first_desc, frag_count, skb->len); + netxen_set_tx_port(first_desc, adapter->portnum); + + for (i = 0; i < frag_count; i++) { + + k = i % 4; + + if ((k == 0) && (i > 0)) { + /* move to next desc.*/ + producer = get_next_index(producer, num_txd); + hwdesc = &tx_ring->desc_head[producer]; + netxen_clear_cmddesc((u64 *)hwdesc); + tx_ring->cmd_buf_arr[producer].skb = NULL; + } + + buffrag = &pbuf->frag_array[i]; + + hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length); + switch (k) { + case 0: + hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); + break; + case 1: + hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma); + break; + case 2: + hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma); + break; + case 3: + hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma); + break; + } + } + + tx_ring->producer = get_next_index(producer, num_txd); + + netxen_tso_check(netdev, tx_ring, first_desc, skb); + + adapter->stats.txbytes += skb->len; + adapter->stats.xmitcalled++; + + netxen_nic_update_cmd_producer(adapter, tx_ring); + + return NETDEV_TX_OK; + +drop_packet: + adapter->stats.txdropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static int netxen_nic_check_temp(struct netxen_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + uint32_t temp, temp_state, temp_val; + int rv = 0; + + temp = NXRD32(adapter, CRB_TEMP_STATE); + + temp_state = nx_get_temp_state(temp); + temp_val = nx_get_temp_val(temp); + + if (temp_state == NX_TEMP_PANIC) { + printk(KERN_ALERT + "%s: Device temperature %d degrees C exceeds" + " maximum allowed. Hardware has been shut down.\n", + netdev->name, temp_val); + rv = 1; + } else if (temp_state == NX_TEMP_WARN) { + if (adapter->temp == NX_TEMP_NORMAL) { + printk(KERN_ALERT + "%s: Device temperature %d degrees C " + "exceeds operating range." + " Immediate action needed.\n", + netdev->name, temp_val); + } + } else { + if (adapter->temp == NX_TEMP_WARN) { + printk(KERN_INFO + "%s: Device temperature is now %d degrees C" + " in normal range.\n", netdev->name, + temp_val); + } + } + adapter->temp = temp_state; + return rv; +} + +void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->ahw.linkup && !linkup) { + printk(KERN_INFO "%s: %s NIC Link is down\n", + netxen_nic_driver_name, netdev->name); + adapter->ahw.linkup = 0; + if (netif_running(netdev)) { + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + adapter->link_changed = !adapter->has_link_events; + } else if (!adapter->ahw.linkup && linkup) { + printk(KERN_INFO "%s: %s NIC Link is up\n", + netxen_nic_driver_name, netdev->name); + adapter->ahw.linkup = 1; + if (netif_running(netdev)) { + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } + adapter->link_changed = !adapter->has_link_events; + } +} + +static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter) +{ + u32 val, port, linkup; + + port = adapter->physical_port; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + val = NXRD32(adapter, CRB_XG_STATE_P3); + val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); + linkup = (val == XG_LINK_UP_P3); + } else { + val = NXRD32(adapter, CRB_XG_STATE); + val = (val >> port*8) & 0xff; + linkup = (val == XG_LINK_UP); + } + + netxen_advert_link_change(adapter, linkup); +} + +static void netxen_tx_timeout(struct net_device *netdev) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + + if (test_bit(__NX_RESETTING, &adapter->state)) + return; + + dev_err(&netdev->dev, "transmit timeout, resetting.\n"); + schedule_work(&adapter->tx_timeout_task); +} + +static void netxen_tx_timeout_task(struct work_struct *work) +{ + struct netxen_adapter *adapter = + container_of(work, struct netxen_adapter, tx_timeout_task); + + if (!netif_running(adapter->netdev)) + return; + + if (test_and_set_bit(__NX_RESETTING, &adapter->state)) + return; + + if (++adapter->tx_timeo_cnt >= NX_MAX_TX_TIMEOUTS) + goto request_reset; + + rtnl_lock(); + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + /* try to scrub interrupt */ + netxen_napi_disable(adapter); + + netxen_napi_enable(adapter); + + netif_wake_queue(adapter->netdev); + + clear_bit(__NX_RESETTING, &adapter->state); + } else { + clear_bit(__NX_RESETTING, &adapter->state); + if (netxen_nic_reset_context(adapter)) { + rtnl_unlock(); + goto request_reset; + } + } + adapter->netdev->trans_start = jiffies; + rtnl_unlock(); + return; + +request_reset: + adapter->need_fw_reset = 1; + clear_bit(__NX_RESETTING, &adapter->state); +} + +static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + + stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; + stats->tx_packets = adapter->stats.xmitfinished; + stats->rx_bytes = adapter->stats.rxbytes; + stats->tx_bytes = adapter->stats.txbytes; + stats->rx_dropped = adapter->stats.rxdropped; + stats->tx_dropped = adapter->stats.txdropped; + + return stats; +} + +static irqreturn_t netxen_intr(int irq, void *data) +{ + struct nx_host_sds_ring *sds_ring = data; + struct netxen_adapter *adapter = sds_ring->adapter; + u32 status = 0; + + status = readl(adapter->isr_int_vec); + + if (!(status & adapter->int_vec_bit)) + return IRQ_NONE; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + /* check interrupt state machine, to be sure */ + status = readl(adapter->crb_int_state_reg); + if (!ISR_LEGACY_INT_TRIGGERED(status)) + return IRQ_NONE; + + } else { + unsigned long our_int = 0; + + our_int = readl(adapter->crb_int_state_reg); + + /* not our interrupt */ + if (!test_and_clear_bit((7 + adapter->portnum), &our_int)) + return IRQ_NONE; + + /* claim interrupt */ + writel((our_int & 0xffffffff), adapter->crb_int_state_reg); + + /* clear interrupt */ + netxen_nic_disable_int(sds_ring); + } + + writel(0xffffffff, adapter->tgt_status_reg); + /* read twice to ensure write is flushed */ + readl(adapter->isr_int_vec); + readl(adapter->isr_int_vec); + + napi_schedule(&sds_ring->napi); + + return IRQ_HANDLED; +} + +static irqreturn_t netxen_msi_intr(int irq, void *data) +{ + struct nx_host_sds_ring *sds_ring = data; + struct netxen_adapter *adapter = sds_ring->adapter; + + /* clear interrupt */ + writel(0xffffffff, adapter->tgt_status_reg); + + napi_schedule(&sds_ring->napi); + return IRQ_HANDLED; +} + +static irqreturn_t netxen_msix_intr(int irq, void *data) +{ + struct nx_host_sds_ring *sds_ring = data; + + napi_schedule(&sds_ring->napi); + return IRQ_HANDLED; +} + +static int netxen_nic_poll(struct napi_struct *napi, int budget) +{ + struct nx_host_sds_ring *sds_ring = + container_of(napi, struct nx_host_sds_ring, napi); + + struct netxen_adapter *adapter = sds_ring->adapter; + + int tx_complete; + int work_done; + + tx_complete = netxen_process_cmd_ring(adapter); + + work_done = netxen_process_rcv_ring(sds_ring, budget); + + if (!tx_complete) + work_done = budget; + + if (work_done < budget) { + napi_complete(&sds_ring->napi); + if (test_bit(__NX_DEV_UP, &adapter->state)) + netxen_nic_enable_int(sds_ring); + } + + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void netxen_nic_poll_controller(struct net_device *netdev) +{ + int ring; + struct nx_host_sds_ring *sds_ring; + struct netxen_adapter *adapter = netdev_priv(netdev); + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; + + disable_irq(adapter->irq); + for (ring = 0; ring < adapter->max_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + netxen_intr(adapter->irq, sds_ring); + } + enable_irq(adapter->irq); +} +#endif + +static int +nx_incr_dev_ref_cnt(struct netxen_adapter *adapter) +{ + int count; + if (netxen_api_lock(adapter)) + return -EIO; + + count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); + + NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count); + + netxen_api_unlock(adapter); + return count; +} + +static int +nx_decr_dev_ref_cnt(struct netxen_adapter *adapter) +{ + int count, state; + if (netxen_api_lock(adapter)) + return -EIO; + + count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); + WARN_ON(count == 0); + + NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count); + state = NXRD32(adapter, NX_CRB_DEV_STATE); + + if (count == 0 && state != NX_DEV_FAILED) + NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD); + + netxen_api_unlock(adapter); + return count; +} + +static int +nx_dev_request_aer(struct netxen_adapter *adapter) +{ + u32 state; + int ret = -EINVAL; + + if (netxen_api_lock(adapter)) + return ret; + + state = NXRD32(adapter, NX_CRB_DEV_STATE); + + if (state == NX_DEV_NEED_AER) + ret = 0; + else if (state == NX_DEV_READY) { + NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER); + ret = 0; + } + + netxen_api_unlock(adapter); + return ret; +} + +int +nx_dev_request_reset(struct netxen_adapter *adapter) +{ + u32 state; + int ret = -EINVAL; + + if (netxen_api_lock(adapter)) + return ret; + + state = NXRD32(adapter, NX_CRB_DEV_STATE); + + if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED) + ret = 0; + else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) { + NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET); + adapter->flags |= NETXEN_FW_RESET_OWNER; + ret = 0; + } + + netxen_api_unlock(adapter); + + return ret; +} + +static int +netxen_can_start_firmware(struct netxen_adapter *adapter) +{ + int count; + int can_start = 0; + + if (netxen_api_lock(adapter)) { + nx_incr_dev_ref_cnt(adapter); + return -1; + } + + count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); + + if ((count < 0) || (count >= NX_MAX_PCI_FUNC)) + count = 0; + + if (count == 0) { + can_start = 1; + NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_INITALIZING); + } + + NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count); + + netxen_api_unlock(adapter); + + return can_start; +} + +static void +netxen_schedule_work(struct netxen_adapter *adapter, + work_func_t func, int delay) +{ + INIT_DELAYED_WORK(&adapter->fw_work, func); + schedule_delayed_work(&adapter->fw_work, delay); +} + +static void +netxen_cancel_fw_work(struct netxen_adapter *adapter) +{ + while (test_and_set_bit(__NX_RESETTING, &adapter->state)) + msleep(10); + + cancel_delayed_work_sync(&adapter->fw_work); +} + +static void +netxen_attach_work(struct work_struct *work) +{ + struct netxen_adapter *adapter = container_of(work, + struct netxen_adapter, fw_work.work); + struct net_device *netdev = adapter->netdev; + int err = 0; + + if (netif_running(netdev)) { + err = netxen_nic_attach(adapter); + if (err) + goto done; + + err = netxen_nic_up(adapter, netdev); + if (err) { + netxen_nic_detach(adapter); + goto done; + } + + netxen_restore_indev_addr(netdev, NETDEV_UP); + } + + netif_device_attach(netdev); + +done: + adapter->fw_fail_cnt = 0; + clear_bit(__NX_RESETTING, &adapter->state); + netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); +} + +static void +netxen_fwinit_work(struct work_struct *work) +{ + struct netxen_adapter *adapter = container_of(work, + struct netxen_adapter, fw_work.work); + int dev_state; + int count; + dev_state = NXRD32(adapter, NX_CRB_DEV_STATE); + if (adapter->flags & NETXEN_FW_RESET_OWNER) { + count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); + WARN_ON(count == 0); + if (count == 1) { + if (adapter->mdump.md_enabled) { + rtnl_lock(); + netxen_dump_fw(adapter); + rtnl_unlock(); + } + adapter->flags &= ~NETXEN_FW_RESET_OWNER; + if (netxen_api_lock(adapter)) { + clear_bit(__NX_RESETTING, &adapter->state); + NXWR32(adapter, NX_CRB_DEV_STATE, + NX_DEV_FAILED); + return; + } + count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); + NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count); + NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD); + dev_state = NX_DEV_COLD; + netxen_api_unlock(adapter); + } + } + + switch (dev_state) { + case NX_DEV_COLD: + case NX_DEV_READY: + if (!netxen_start_firmware(adapter)) { + netxen_schedule_work(adapter, netxen_attach_work, 0); + return; + } + break; + + case NX_DEV_NEED_RESET: + case NX_DEV_INITALIZING: + netxen_schedule_work(adapter, + netxen_fwinit_work, 2 * FW_POLL_DELAY); + return; + + case NX_DEV_FAILED: + default: + nx_incr_dev_ref_cnt(adapter); + break; + } + + if (netxen_api_lock(adapter)) { + clear_bit(__NX_RESETTING, &adapter->state); + return; + } + NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_FAILED); + netxen_api_unlock(adapter); + dev_err(&adapter->pdev->dev, "%s: Device initialization Failed\n", + adapter->netdev->name); + + clear_bit(__NX_RESETTING, &adapter->state); +} + +static void +netxen_detach_work(struct work_struct *work) +{ + struct netxen_adapter *adapter = container_of(work, + struct netxen_adapter, fw_work.work); + struct net_device *netdev = adapter->netdev; + int ref_cnt = 0, delay; + u32 status; + + netif_device_detach(netdev); + + netxen_nic_down(adapter, netdev); + + rtnl_lock(); + netxen_nic_detach(adapter); + rtnl_unlock(); + + status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); + + if (status & NX_RCODE_FATAL_ERROR) + goto err_ret; + + if (adapter->temp == NX_TEMP_PANIC) + goto err_ret; + + if (!(adapter->flags & NETXEN_FW_RESET_OWNER)) + ref_cnt = nx_decr_dev_ref_cnt(adapter); + + if (ref_cnt == -EIO) + goto err_ret; + + delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY); + + adapter->fw_wait_cnt = 0; + netxen_schedule_work(adapter, netxen_fwinit_work, delay); + + return; + +err_ret: + clear_bit(__NX_RESETTING, &adapter->state); +} + +static int +netxen_check_health(struct netxen_adapter *adapter) +{ + u32 state, heartbit; + u32 peg_status; + struct net_device *netdev = adapter->netdev; + + state = NXRD32(adapter, NX_CRB_DEV_STATE); + if (state == NX_DEV_NEED_AER) + return 0; + + if (netxen_nic_check_temp(adapter)) + goto detach; + + if (adapter->need_fw_reset) { + if (nx_dev_request_reset(adapter)) + return 0; + goto detach; + } + + /* NX_DEV_NEED_RESET, this state can be marked in two cases + * 1. Tx timeout 2. Fw hang + * Send request to destroy context in case of tx timeout only + * and doesn't required in case of Fw hang + */ + if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED) { + adapter->need_fw_reset = 1; + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + goto detach; + } + + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 0; + + heartbit = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); + if (heartbit != adapter->heartbit) { + adapter->heartbit = heartbit; + adapter->fw_fail_cnt = 0; + if (adapter->need_fw_reset) + goto detach; + return 0; + } + + if (++adapter->fw_fail_cnt < FW_FAIL_THRESH) + return 0; + + if (nx_dev_request_reset(adapter)) + return 0; + + clear_bit(__NX_FW_ATTACHED, &adapter->state); + + dev_err(&netdev->dev, "firmware hang detected\n"); + peg_status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); + dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n" + "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" + "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" + "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" + "PEG_NET_4_PC: 0x%x\n", + peg_status, + NXRD32(adapter, NETXEN_PEG_HALT_STATUS2), + NXRD32(adapter, NETXEN_CRB_PEG_NET_0 + 0x3c), + NXRD32(adapter, NETXEN_CRB_PEG_NET_1 + 0x3c), + NXRD32(adapter, NETXEN_CRB_PEG_NET_2 + 0x3c), + NXRD32(adapter, NETXEN_CRB_PEG_NET_3 + 0x3c), + NXRD32(adapter, NETXEN_CRB_PEG_NET_4 + 0x3c)); + if (NX_FWERROR_PEGSTAT1(peg_status) == 0x67) + dev_err(&adapter->pdev->dev, + "Firmware aborted with error code 0x00006700. " + "Device is being reset.\n"); +detach: + if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) && + !test_and_set_bit(__NX_RESETTING, &adapter->state)) + netxen_schedule_work(adapter, netxen_detach_work, 0); + return 1; +} + +static void +netxen_fw_poll_work(struct work_struct *work) +{ + struct netxen_adapter *adapter = container_of(work, + struct netxen_adapter, fw_work.work); + + if (test_bit(__NX_RESETTING, &adapter->state)) + goto reschedule; + + if (test_bit(__NX_DEV_UP, &adapter->state) && + !(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) { + if (!adapter->has_link_events) { + + netxen_nic_handle_phy_intr(adapter); + + if (adapter->link_changed) + netxen_nic_set_link_parameters(adapter); + } + } + + if (netxen_check_health(adapter)) + return; + +reschedule: + netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); +} + +static ssize_t +netxen_store_bridged_mode(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + struct net_device *net = to_net_dev(dev); + struct netxen_adapter *adapter = netdev_priv(net); + unsigned long new; + int ret = -EINVAL; + + if (!(adapter->capabilities & NX_FW_CAPABILITY_BDG)) + goto err_out; + + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + goto err_out; + + if (kstrtoul(buf, 2, &new)) + goto err_out; + + if (!netxen_config_bridged_mode(adapter, !!new)) + ret = len; + +err_out: + return ret; +} + +static ssize_t +netxen_show_bridged_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *net = to_net_dev(dev); + struct netxen_adapter *adapter; + int bridged_mode = 0; + + adapter = netdev_priv(net); + + if (adapter->capabilities & NX_FW_CAPABILITY_BDG) + bridged_mode = !!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED); + + return sprintf(buf, "%d\n", bridged_mode); +} + +static struct device_attribute dev_attr_bridged_mode = { + .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, + .show = netxen_show_bridged_mode, + .store = netxen_store_bridged_mode, +}; + +static ssize_t +netxen_store_diag_mode(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + struct netxen_adapter *adapter = dev_get_drvdata(dev); + unsigned long new; + + if (kstrtoul(buf, 2, &new)) + return -EINVAL; + + if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) + adapter->flags ^= NETXEN_NIC_DIAG_ENABLED; + + return len; +} + +static ssize_t +netxen_show_diag_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct netxen_adapter *adapter = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", + !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)); +} + +static struct device_attribute dev_attr_diag_mode = { + .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)}, + .show = netxen_show_diag_mode, + .store = netxen_store_diag_mode, +}; + +static int +netxen_sysfs_validate_crb(struct netxen_adapter *adapter, + loff_t offset, size_t size) +{ + size_t crb_size = 4; + + if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) + return -EIO; + + if (offset < NETXEN_PCI_CRBSPACE) { + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return -EINVAL; + + if (ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, + NETXEN_PCI_CAMQM_2M_END)) + crb_size = 8; + else + return -EINVAL; + } + + if ((size != crb_size) || (offset & (crb_size-1))) + return -EINVAL; + + return 0; +} + +static ssize_t +netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct netxen_adapter *adapter = dev_get_drvdata(dev); + u32 data; + u64 qmdata; + int ret; + + ret = netxen_sysfs_validate_crb(adapter, offset, size); + if (ret != 0) + return ret; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && + ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, + NETXEN_PCI_CAMQM_2M_END)) { + netxen_pci_camqm_read_2M(adapter, offset, &qmdata); + memcpy(buf, &qmdata, size); + } else { + data = NXRD32(adapter, offset); + memcpy(buf, &data, size); + } + + return size; +} + +static ssize_t +netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct netxen_adapter *adapter = dev_get_drvdata(dev); + u32 data; + u64 qmdata; + int ret; + + ret = netxen_sysfs_validate_crb(adapter, offset, size); + if (ret != 0) + return ret; + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && + ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, + NETXEN_PCI_CAMQM_2M_END)) { + memcpy(&qmdata, buf, size); + netxen_pci_camqm_write_2M(adapter, offset, qmdata); + } else { + memcpy(&data, buf, size); + NXWR32(adapter, offset, data); + } + + return size; +} + +static int +netxen_sysfs_validate_mem(struct netxen_adapter *adapter, + loff_t offset, size_t size) +{ + if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) + return -EIO; + + if ((size != 8) || (offset & 0x7)) + return -EIO; + + return 0; +} + +static ssize_t +netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct netxen_adapter *adapter = dev_get_drvdata(dev); + u64 data; + int ret; + + ret = netxen_sysfs_validate_mem(adapter, offset, size); + if (ret != 0) + return ret; + + if (adapter->pci_mem_read(adapter, offset, &data)) + return -EIO; + + memcpy(buf, &data, size); + + return size; +} + +static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct netxen_adapter *adapter = dev_get_drvdata(dev); + u64 data; + int ret; + + ret = netxen_sysfs_validate_mem(adapter, offset, size); + if (ret != 0) + return ret; + + memcpy(&data, buf, size); + + if (adapter->pci_mem_write(adapter, offset, data)) + return -EIO; + + return size; +} + + +static struct bin_attribute bin_attr_crb = { + .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = netxen_sysfs_read_crb, + .write = netxen_sysfs_write_crb, +}; + +static struct bin_attribute bin_attr_mem = { + .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = netxen_sysfs_read_mem, + .write = netxen_sysfs_write_mem, +}; + +static ssize_t +netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct netxen_adapter *adapter = dev_get_drvdata(dev); + struct net_device *netdev = adapter->netdev; + struct netxen_dimm_cfg dimm; + u8 dw, rows, cols, banks, ranks; + u32 val; + + if (size < attr->size) { + netdev_err(netdev, "Invalid size\n"); + return -EINVAL; + } + + memset(&dimm, 0, sizeof(struct netxen_dimm_cfg)); + val = NXRD32(adapter, NETXEN_DIMM_CAPABILITY); + + /* Checks if DIMM info is valid. */ + if (val & NETXEN_DIMM_VALID_FLAG) { + netdev_err(netdev, "Invalid DIMM flag\n"); + dimm.presence = 0xff; + goto out; + } + + rows = NETXEN_DIMM_NUMROWS(val); + cols = NETXEN_DIMM_NUMCOLS(val); + ranks = NETXEN_DIMM_NUMRANKS(val); + banks = NETXEN_DIMM_NUMBANKS(val); + dw = NETXEN_DIMM_DATAWIDTH(val); + + dimm.presence = (val & NETXEN_DIMM_PRESENT); + + /* Checks if DIMM info is present. */ + if (!dimm.presence) { + netdev_err(netdev, "DIMM not present\n"); + goto out; + } + + dimm.dimm_type = NETXEN_DIMM_TYPE(val); + + switch (dimm.dimm_type) { + case NETXEN_DIMM_TYPE_RDIMM: + case NETXEN_DIMM_TYPE_UDIMM: + case NETXEN_DIMM_TYPE_SO_DIMM: + case NETXEN_DIMM_TYPE_Micro_DIMM: + case NETXEN_DIMM_TYPE_Mini_RDIMM: + case NETXEN_DIMM_TYPE_Mini_UDIMM: + break; + default: + netdev_err(netdev, "Invalid DIMM type %x\n", dimm.dimm_type); + goto out; + } + + if (val & NETXEN_DIMM_MEMTYPE_DDR2_SDRAM) + dimm.mem_type = NETXEN_DIMM_MEM_DDR2_SDRAM; + else + dimm.mem_type = NETXEN_DIMM_MEMTYPE(val); + + if (val & NETXEN_DIMM_SIZE) { + dimm.size = NETXEN_DIMM_STD_MEM_SIZE; + goto out; + } + + if (!rows) { + netdev_err(netdev, "Invalid no of rows %x\n", rows); + goto out; + } + + if (!cols) { + netdev_err(netdev, "Invalid no of columns %x\n", cols); + goto out; + } + + if (!banks) { + netdev_err(netdev, "Invalid no of banks %x\n", banks); + goto out; + } + + ranks += 1; + + switch (dw) { + case 0x0: + dw = 32; + break; + case 0x1: + dw = 33; + break; + case 0x2: + dw = 36; + break; + case 0x3: + dw = 64; + break; + case 0x4: + dw = 72; + break; + case 0x5: + dw = 80; + break; + case 0x6: + dw = 128; + break; + case 0x7: + dw = 144; + break; + default: + netdev_err(netdev, "Invalid data-width %x\n", dw); + goto out; + } + + dimm.size = ((1 << rows) * (1 << cols) * dw * banks * ranks) / 8; + /* Size returned in MB. */ + dimm.size = (dimm.size) / 0x100000; +out: + memcpy(buf, &dimm, sizeof(struct netxen_dimm_cfg)); + return sizeof(struct netxen_dimm_cfg); + +} + +static struct bin_attribute bin_attr_dimm = { + .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) }, + .size = sizeof(struct netxen_dimm_cfg), + .read = netxen_sysfs_read_dimm, +}; + + +static void +netxen_create_sysfs_entries(struct netxen_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + + if (adapter->capabilities & NX_FW_CAPABILITY_BDG) { + /* bridged_mode control */ + if (device_create_file(dev, &dev_attr_bridged_mode)) { + dev_warn(dev, + "failed to create bridged_mode sysfs entry\n"); + } + } +} + +static void +netxen_remove_sysfs_entries(struct netxen_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + + if (adapter->capabilities & NX_FW_CAPABILITY_BDG) + device_remove_file(dev, &dev_attr_bridged_mode); +} + +static void +netxen_create_diag_entries(struct netxen_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct device *dev; + + dev = &pdev->dev; + if (device_create_file(dev, &dev_attr_diag_mode)) + dev_info(dev, "failed to create diag_mode sysfs entry\n"); + if (device_create_bin_file(dev, &bin_attr_crb)) + dev_info(dev, "failed to create crb sysfs entry\n"); + if (device_create_bin_file(dev, &bin_attr_mem)) + dev_info(dev, "failed to create mem sysfs entry\n"); + if (device_create_bin_file(dev, &bin_attr_dimm)) + dev_info(dev, "failed to create dimm sysfs entry\n"); +} + + +static void +netxen_remove_diag_entries(struct netxen_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct device *dev = &pdev->dev; + + device_remove_file(dev, &dev_attr_diag_mode); + device_remove_bin_file(dev, &bin_attr_crb); + device_remove_bin_file(dev, &bin_attr_mem); + device_remove_bin_file(dev, &bin_attr_dimm); +} + +#ifdef CONFIG_INET + +#define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops) + +static int +netxen_destip_supported(struct netxen_adapter *adapter) +{ + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) + return 0; + + if (adapter->ahw.cut_through) + return 0; + + return 1; +} + +static void +netxen_free_ip_list(struct netxen_adapter *adapter, bool master) +{ + struct nx_ip_list *cur, *tmp_cur; + + list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) { + if (master) { + if (cur->master) { + netxen_config_ipaddr(adapter, cur->ip_addr, + NX_IP_DOWN); + list_del(&cur->list); + kfree(cur); + } + } else { + netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN); + list_del(&cur->list); + kfree(cur); + } + } +} + +static bool +netxen_list_config_ip(struct netxen_adapter *adapter, + struct in_ifaddr *ifa, unsigned long event) +{ + struct net_device *dev; + struct nx_ip_list *cur, *tmp_cur; + struct list_head *head; + bool ret = false; + + dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; + + if (dev == NULL) + goto out; + + switch (event) { + case NX_IP_UP: + list_for_each(head, &adapter->ip_list) { + cur = list_entry(head, struct nx_ip_list, list); + + if (cur->ip_addr == ifa->ifa_address) + goto out; + } + + cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC); + if (cur == NULL) + goto out; + if (dev->priv_flags & IFF_802_1Q_VLAN) + dev = vlan_dev_real_dev(dev); + cur->master = !!netif_is_bond_master(dev); + cur->ip_addr = ifa->ifa_address; + list_add_tail(&cur->list, &adapter->ip_list); + netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP); + ret = true; + break; + case NX_IP_DOWN: + list_for_each_entry_safe(cur, tmp_cur, + &adapter->ip_list, list) { + if (cur->ip_addr == ifa->ifa_address) { + list_del(&cur->list); + kfree(cur); + netxen_config_ipaddr(adapter, ifa->ifa_address, + NX_IP_DOWN); + ret = true; + break; + } + } + } +out: + return ret; +} + +static void +netxen_config_indev_addr(struct netxen_adapter *adapter, + struct net_device *dev, unsigned long event) +{ + struct in_device *indev; + + if (!netxen_destip_supported(adapter)) + return; + + indev = in_dev_get(dev); + if (!indev) + return; + + for_ifa(indev) { + switch (event) { + case NETDEV_UP: + netxen_list_config_ip(adapter, ifa, NX_IP_UP); + break; + case NETDEV_DOWN: + netxen_list_config_ip(adapter, ifa, NX_IP_DOWN); + break; + default: + break; + } + } endfor_ifa(indev); + + in_dev_put(indev); +} + +static void +netxen_restore_indev_addr(struct net_device *netdev, unsigned long event) + +{ + struct netxen_adapter *adapter = netdev_priv(netdev); + struct nx_ip_list *pos, *tmp_pos; + unsigned long ip_event; + + ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; + netxen_config_indev_addr(adapter, netdev, event); + + list_for_each_entry_safe(pos, tmp_pos, &adapter->ip_list, list) { + netxen_config_ipaddr(adapter, pos->ip_addr, ip_event); + } +} + +static inline bool +netxen_config_checkdev(struct net_device *dev) +{ + struct netxen_adapter *adapter; + + if (!is_netxen_netdev(dev)) + return false; + adapter = netdev_priv(dev); + if (!adapter) + return false; + if (!netxen_destip_supported(adapter)) + return false; + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) + return false; + + return true; +} + +/** + * netxen_config_master - configure addresses based on master + * @dev: netxen device + * @event: netdev event + */ +static void netxen_config_master(struct net_device *dev, unsigned long event) +{ + struct net_device *master, *slave; + struct netxen_adapter *adapter = netdev_priv(dev); + + rcu_read_lock(); + master = netdev_master_upper_dev_get_rcu(dev); + /* + * This is the case where the netxen nic is being + * enslaved and is dev_open()ed in bond_enslave() + * Now we should program the bond's (and its vlans') + * addresses in the netxen NIC. + */ + if (master && netif_is_bond_master(master) && + !netif_is_bond_slave(dev)) { + netxen_config_indev_addr(adapter, master, event); + for_each_netdev_rcu(&init_net, slave) + if (slave->priv_flags & IFF_802_1Q_VLAN && + vlan_dev_real_dev(slave) == master) + netxen_config_indev_addr(adapter, slave, event); + } + rcu_read_unlock(); + /* + * This is the case where the netxen nic is being + * released and is dev_close()ed in bond_release() + * just before IFF_BONDING is stripped. + */ + if (!master && dev->priv_flags & IFF_BONDING) + netxen_free_ip_list(adapter, true); +} + +static int netxen_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct netxen_adapter *adapter; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct net_device *orig_dev = dev; + struct net_device *slave; + +recheck: + if (dev == NULL) + goto done; + + if (dev->priv_flags & IFF_802_1Q_VLAN) { + dev = vlan_dev_real_dev(dev); + goto recheck; + } + if (event == NETDEV_UP || event == NETDEV_DOWN) { + /* If this is a bonding device, look for netxen-based slaves*/ + if (netif_is_bond_master(dev)) { + rcu_read_lock(); + for_each_netdev_in_bond_rcu(dev, slave) { + if (!netxen_config_checkdev(slave)) + continue; + adapter = netdev_priv(slave); + netxen_config_indev_addr(adapter, + orig_dev, event); + } + rcu_read_unlock(); + } else { + if (!netxen_config_checkdev(dev)) + goto done; + adapter = netdev_priv(dev); + /* Act only if the actual netxen is the target */ + if (orig_dev == dev) + netxen_config_master(dev, event); + netxen_config_indev_addr(adapter, orig_dev, event); + } + } +done: + return NOTIFY_DONE; +} + +static int +netxen_inetaddr_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct netxen_adapter *adapter; + struct net_device *dev, *slave; + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + unsigned long ip_event; + + dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; + ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; +recheck: + if (dev == NULL) + goto done; + + if (dev->priv_flags & IFF_802_1Q_VLAN) { + dev = vlan_dev_real_dev(dev); + goto recheck; + } + if (event == NETDEV_UP || event == NETDEV_DOWN) { + /* If this is a bonding device, look for netxen-based slaves*/ + if (netif_is_bond_master(dev)) { + rcu_read_lock(); + for_each_netdev_in_bond_rcu(dev, slave) { + if (!netxen_config_checkdev(slave)) + continue; + adapter = netdev_priv(slave); + netxen_list_config_ip(adapter, ifa, ip_event); + } + rcu_read_unlock(); + } else { + if (!netxen_config_checkdev(dev)) + goto done; + adapter = netdev_priv(dev); + netxen_list_config_ip(adapter, ifa, ip_event); + } + } +done: + return NOTIFY_DONE; +} + +static struct notifier_block netxen_netdev_cb = { + .notifier_call = netxen_netdev_event, +}; + +static struct notifier_block netxen_inetaddr_cb = { + .notifier_call = netxen_inetaddr_event, +}; +#else +static void +netxen_restore_indev_addr(struct net_device *dev, unsigned long event) +{ } +static void +netxen_free_ip_list(struct netxen_adapter *adapter, bool master) +{ } +#endif + +static const struct pci_error_handlers netxen_err_handler = { + .error_detected = netxen_io_error_detected, + .slot_reset = netxen_io_slot_reset, + .resume = netxen_io_resume, +}; + +static struct pci_driver netxen_driver = { + .name = netxen_nic_driver_name, + .id_table = netxen_pci_tbl, + .probe = netxen_nic_probe, + .remove = netxen_nic_remove, +#ifdef CONFIG_PM + .suspend = netxen_nic_suspend, + .resume = netxen_nic_resume, +#endif + .shutdown = netxen_nic_shutdown, + .err_handler = &netxen_err_handler +}; + +static int __init netxen_init_module(void) +{ + printk(KERN_INFO "%s\n", netxen_nic_driver_string); + +#ifdef CONFIG_INET + register_netdevice_notifier(&netxen_netdev_cb); + register_inetaddr_notifier(&netxen_inetaddr_cb); +#endif + return pci_register_driver(&netxen_driver); +} + +module_init(netxen_init_module); + +static void __exit netxen_exit_module(void) +{ + pci_unregister_driver(&netxen_driver); + +#ifdef CONFIG_INET + unregister_inetaddr_notifier(&netxen_inetaddr_cb); + unregister_netdevice_notifier(&netxen_netdev_cb); +#endif +} + +module_exit(netxen_exit_module); diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c new file mode 100644 index 000000000..484771321 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -0,0 +1,3949 @@ +/* + * QLogic QLA3xxx NIC HBA Driver + * Copyright (c) 2003-2006 QLogic Corporation + * + * See LICENSE.qla3xxx for copyright and licensing details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qla3xxx.h" + +#define DRV_NAME "qla3xxx" +#define DRV_STRING "QLogic ISP3XXX Network Driver" +#define DRV_VERSION "v2.03.00-k5" + +static const char ql3xxx_driver_name[] = DRV_NAME; +static const char ql3xxx_driver_version[] = DRV_VERSION; + +#define TIMED_OUT_MSG \ +"Timed out waiting for management port to get free before issuing command\n" + +MODULE_AUTHOR("QLogic Corporation"); +MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static const u32 default_msg + = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK + | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; + +static int debug = -1; /* defaults above */ +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static int msi; +module_param(msi, int, 0); +MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); + +static const struct pci_device_id ql3xxx_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); + +/* + * These are the known PHY's which are used + */ +enum PHY_DEVICE_TYPE { + PHY_TYPE_UNKNOWN = 0, + PHY_VITESSE_VSC8211, + PHY_AGERE_ET1011C, + MAX_PHY_DEV_TYPES +}; + +struct PHY_DEVICE_INFO { + const enum PHY_DEVICE_TYPE phyDevice; + const u32 phyIdOUI; + const u16 phyIdModel; + const char *name; +}; + +static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { + {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, + {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, + {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, +}; + + +/* + * Caller must take hw_lock. + */ +static int ql_sem_spinlock(struct ql3_adapter *qdev, + u32 sem_mask, u32 sem_bits) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + unsigned int seconds = 3; + + do { + writel((sem_mask | sem_bits), + &port_regs->CommonRegs.semaphoreReg); + value = readl(&port_regs->CommonRegs.semaphoreReg); + if ((value & (sem_mask >> 16)) == sem_bits) + return 0; + ssleep(1); + } while (--seconds); + return -1; +} + +static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); + readl(&port_regs->CommonRegs.semaphoreReg); +} + +static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); + value = readl(&port_regs->CommonRegs.semaphoreReg); + return ((value & (sem_mask >> 16)) == sem_bits); +} + +/* + * Caller holds hw_lock. + */ +static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) +{ + int i = 0; + + do { + if (ql_sem_lock(qdev, + QL_DRVR_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) + * 2) << 1)) { + netdev_printk(KERN_DEBUG, qdev->ndev, + "driver lock acquired\n"); + return 1; + } + ssleep(1); + } while (++i < 10); + + netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); + return 0; +} + +static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + writel(((ISP_CONTROL_NP_MASK << 16) | page), + &port_regs->CommonRegs.ispControlStatus); + readl(&port_regs->CommonRegs.ispControlStatus); + qdev->current_page = page; +} + +static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) +{ + u32 value; + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + value = readl(reg); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + return value; +} + +static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) +{ + return readl(reg); +} + +static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) +{ + u32 value; + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + + if (qdev->current_page != 0) + ql_set_register_page(qdev, 0); + value = readl(reg); + + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return value; +} + +static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) +{ + if (qdev->current_page != 0) + ql_set_register_page(qdev, 0); + return readl(reg); +} + +static void ql_write_common_reg_l(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value) +{ + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + writel(value, reg); + readl(reg); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); +} + +static void ql_write_common_reg(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value) +{ + writel(value, reg); + readl(reg); +} + +static void ql_write_nvram_reg(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value) +{ + writel(value, reg); + readl(reg); + udelay(1); +} + +static void ql_write_page0_reg(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value) +{ + if (qdev->current_page != 0) + ql_set_register_page(qdev, 0); + writel(value, reg); + readl(reg); +} + +/* + * Caller holds hw_lock. Only called during init. + */ +static void ql_write_page1_reg(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value) +{ + if (qdev->current_page != 1) + ql_set_register_page(qdev, 1); + writel(value, reg); + readl(reg); +} + +/* + * Caller holds hw_lock. Only called during init. + */ +static void ql_write_page2_reg(struct ql3_adapter *qdev, + u32 __iomem *reg, u32 value) +{ + if (qdev->current_page != 2) + ql_set_register_page(qdev, 2); + writel(value, reg); + readl(reg); +} + +static void ql_disable_interrupts(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, + (ISP_IMR_ENABLE_INT << 16)); + +} + +static void ql_enable_interrupts(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, + ((0xff << 16) | ISP_IMR_ENABLE_INT)); + +} + +static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, + struct ql_rcv_buf_cb *lrg_buf_cb) +{ + dma_addr_t map; + int err; + lrg_buf_cb->next = NULL; + + if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ + qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; + } else { + qdev->lrg_buf_free_tail->next = lrg_buf_cb; + qdev->lrg_buf_free_tail = lrg_buf_cb; + } + + if (!lrg_buf_cb->skb) { + lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, + qdev->lrg_buffer_len); + if (unlikely(!lrg_buf_cb->skb)) { + qdev->lrg_buf_skb_check++; + } else { + /* + * We save some space to copy the ethhdr from first + * buffer + */ + skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); + map = pci_map_single(qdev->pdev, + lrg_buf_cb->skb->data, + qdev->lrg_buffer_len - + QL_HEADER_SPACE, + PCI_DMA_FROMDEVICE); + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netdev_err(qdev->ndev, + "PCI mapping failed with error: %d\n", + err); + dev_kfree_skb(lrg_buf_cb->skb); + lrg_buf_cb->skb = NULL; + + qdev->lrg_buf_skb_check++; + return; + } + + lrg_buf_cb->buf_phy_addr_low = + cpu_to_le32(LS_64BITS(map)); + lrg_buf_cb->buf_phy_addr_high = + cpu_to_le32(MS_64BITS(map)); + dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); + dma_unmap_len_set(lrg_buf_cb, maplen, + qdev->lrg_buffer_len - + QL_HEADER_SPACE); + } + } + + qdev->lrg_buf_free_count++; +} + +static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter + *qdev) +{ + struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; + + if (lrg_buf_cb != NULL) { + qdev->lrg_buf_free_head = lrg_buf_cb->next; + if (qdev->lrg_buf_free_head == NULL) + qdev->lrg_buf_free_tail = NULL; + qdev->lrg_buf_free_count--; + } + + return lrg_buf_cb; +} + +static u32 addrBits = EEPROM_NO_ADDR_BITS; +static u32 dataBits = EEPROM_NO_DATA_BITS; + +static void fm93c56a_deselect(struct ql3_adapter *qdev); +static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, + unsigned short *value); + +/* + * Caller holds hw_lock. + */ +static void fm93c56a_select(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + + qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; + ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); + ql_write_nvram_reg(qdev, spir, + ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); +} + +/* + * Caller holds hw_lock. + */ +static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) +{ + int i; + u32 mask; + u32 dataBit; + u32 previousBit; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + + /* Clock in a zero, then do the start bit */ + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_DO_1)); + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); + + mask = 1 << (FM93C56A_CMD_BITS - 1); + /* Force the previous data bit to be different */ + previousBit = 0xffff; + for (i = 0; i < FM93C56A_CMD_BITS; i++) { + dataBit = (cmd & mask) + ? AUBURN_EEPROM_DO_1 + : AUBURN_EEPROM_DO_0; + if (previousBit != dataBit) { + /* If the bit changed, change the DO state to match */ + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | + qdev->eeprom_cmd_data | dataBit)); + previousBit = dataBit; + } + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + dataBit | AUBURN_EEPROM_CLK_RISE)); + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + dataBit | AUBURN_EEPROM_CLK_FALL)); + cmd = cmd << 1; + } + + mask = 1 << (addrBits - 1); + /* Force the previous data bit to be different */ + previousBit = 0xffff; + for (i = 0; i < addrBits; i++) { + dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 + : AUBURN_EEPROM_DO_0; + if (previousBit != dataBit) { + /* + * If the bit changed, then change the DO state to + * match + */ + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | + qdev->eeprom_cmd_data | dataBit)); + previousBit = dataBit; + } + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + dataBit | AUBURN_EEPROM_CLK_RISE)); + ql_write_nvram_reg(qdev, spir, + (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + dataBit | AUBURN_EEPROM_CLK_FALL)); + eepromAddr = eepromAddr << 1; + } +} + +/* + * Caller holds hw_lock. + */ +static void fm93c56a_deselect(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + + qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; + ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); +} + +/* + * Caller holds hw_lock. + */ +static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) +{ + int i; + u32 data = 0; + u32 dataBit; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + + /* Read the data bits */ + /* The first bit is a dummy. Clock right over it. */ + for (i = 0; i < dataBits; i++) { + ql_write_nvram_reg(qdev, spir, + ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_CLK_RISE); + ql_write_nvram_reg(qdev, spir, + ISP_NVRAM_MASK | qdev->eeprom_cmd_data | + AUBURN_EEPROM_CLK_FALL); + dataBit = (ql_read_common_reg(qdev, spir) & + AUBURN_EEPROM_DI_1) ? 1 : 0; + data = (data << 1) | dataBit; + } + *value = (u16)data; +} + +/* + * Caller holds hw_lock. + */ +static void eeprom_readword(struct ql3_adapter *qdev, + u32 eepromAddr, unsigned short *value) +{ + fm93c56a_select(qdev); + fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); + fm93c56a_datain(qdev, value); + fm93c56a_deselect(qdev); +} + +static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) +{ + __le16 *p = (__le16 *)ndev->dev_addr; + p[0] = cpu_to_le16(addr[0]); + p[1] = cpu_to_le16(addr[1]); + p[2] = cpu_to_le16(addr[2]); +} + +static int ql_get_nvram_params(struct ql3_adapter *qdev) +{ + u16 *pEEPROMData; + u16 checksum = 0; + u32 index; + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + + pEEPROMData = (u16 *)&qdev->nvram_data; + qdev->eeprom_cmd_data = 0; + if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 10)) { + pr_err("%s: Failed ql_sem_spinlock()\n", __func__); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return -1; + } + + for (index = 0; index < EEPROM_SIZE; index++) { + eeprom_readword(qdev, index, pEEPROMData); + checksum += *pEEPROMData; + pEEPROMData++; + } + ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); + + if (checksum != 0) { + netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", + checksum); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return -1; + } + + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return checksum; +} + +static const u32 PHYAddr[2] = { + PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS +}; + +static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 temp; + int count = 1000; + + while (count) { + temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); + if (!(temp & MAC_MII_STATUS_BSY)) + return 0; + udelay(10); + count--; + } + return -1; +} + +static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 scanControl; + + if (qdev->numPorts > 1) { + /* Auto scan will cycle through multiple ports */ + scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; + } else { + scanControl = MAC_MII_CONTROL_SC; + } + + /* + * Scan register 1 of PHY/PETBI, + * Set up to scan both devices + * The autoscan starts from the first register, completes + * the last one before rolling over to the first + */ + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + PHYAddr[0] | MII_SCAN_REGISTER); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + (scanControl) | + ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); +} + +static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) +{ + u8 ret; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + /* See if scan mode is enabled before we turn it off */ + if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & + (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { + /* Scan is enabled */ + ret = 1; + } else { + /* Scan is disabled */ + ret = 0; + } + + /* + * When disabling scan mode you must first change the MII register + * address + */ + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + PHYAddr[0] | MII_SCAN_REGISTER); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | + MAC_MII_CONTROL_RC) << 16)); + + return ret; +} + +static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, + u16 regAddr, u16 value, u32 phyAddr) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u8 scanWasEnabled; + + scanWasEnabled = ql_mii_disable_scan_mode(qdev); + + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + phyAddr | regAddr); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); + + /* Wait for write to complete 9/10/04 SJP */ + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + if (scanWasEnabled) + ql_mii_enable_scan_mode(qdev); + + return 0; +} + +static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, + u16 *value, u32 phyAddr) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u8 scanWasEnabled; + u32 temp; + + scanWasEnabled = ql_mii_disable_scan_mode(qdev); + + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + phyAddr | regAddr); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + (MAC_MII_CONTROL_RC << 16)); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); + + /* Wait for the read to complete */ + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); + *value = (u16) temp; + + if (scanWasEnabled) + ql_mii_enable_scan_mode(qdev); + + return 0; +} + +static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + ql_mii_disable_scan_mode(qdev); + + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + qdev->PHYAddr | regAddr); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); + + /* Wait for write to complete. */ + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + ql_mii_enable_scan_mode(qdev); + + return 0; +} + +static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) +{ + u32 temp; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + ql_mii_disable_scan_mode(qdev); + + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, + qdev->PHYAddr | regAddr); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + (MAC_MII_CONTROL_RC << 16)); + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); + + /* Wait for the read to complete */ + if (ql_wait_for_mii_ready(qdev)) { + netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); + return -1; + } + + temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); + *value = (u16) temp; + + ql_mii_enable_scan_mode(qdev); + + return 0; +} + +static void ql_petbi_reset(struct ql3_adapter *qdev) +{ + ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); +} + +static void ql_petbi_start_neg(struct ql3_adapter *qdev) +{ + u16 reg; + + /* Enable Auto-negotiation sense */ + ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®); + reg |= PETBI_TBI_AUTO_SENSE; + ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); + + ql_mii_write_reg(qdev, PETBI_NEG_ADVER, + PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); + + ql_mii_write_reg(qdev, PETBI_CONTROL_REG, + PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | + PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); + +} + +static void ql_petbi_reset_ex(struct ql3_adapter *qdev) +{ + ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, + PHYAddr[qdev->mac_index]); +} + +static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) +{ + u16 reg; + + /* Enable Auto-negotiation sense */ + ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, + PHYAddr[qdev->mac_index]); + reg |= PETBI_TBI_AUTO_SENSE; + ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, + PHYAddr[qdev->mac_index]); + + ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, + PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, + PHYAddr[qdev->mac_index]); + + ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, + PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | + PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, + PHYAddr[qdev->mac_index]); +} + +static void ql_petbi_init(struct ql3_adapter *qdev) +{ + ql_petbi_reset(qdev); + ql_petbi_start_neg(qdev); +} + +static void ql_petbi_init_ex(struct ql3_adapter *qdev) +{ + ql_petbi_reset_ex(qdev); + ql_petbi_start_neg_ex(qdev); +} + +static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) +{ + u16 reg; + + if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0) + return 0; + + return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; +} + +static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) +{ + netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); + /* power down device bit 11 = 1 */ + ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); + /* enable diagnostic mode bit 2 = 1 */ + ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); + /* 1000MB amplitude adjust (see Agere errata) */ + ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); + /* 1000MB amplitude adjust (see Agere errata) */ + ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); + /* 100MB amplitude adjust (see Agere errata) */ + ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); + /* 100MB amplitude adjust (see Agere errata) */ + ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); + /* 10MB amplitude adjust (see Agere errata) */ + ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); + /* 10MB amplitude adjust (see Agere errata) */ + ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); + /* point to hidden reg 0x2806 */ + ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); + /* Write new PHYAD w/bit 5 set */ + ql_mii_write_reg_ex(qdev, 0x11, + 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); + /* + * Disable diagnostic mode bit 2 = 0 + * Power up device bit 11 = 0 + * Link up (on) and activity (blink) + */ + ql_mii_write_reg(qdev, 0x12, 0x840a); + ql_mii_write_reg(qdev, 0x00, 0x1140); + ql_mii_write_reg(qdev, 0x1c, 0xfaf0); +} + +static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, + u16 phyIdReg0, u16 phyIdReg1) +{ + enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; + u32 oui; + u16 model; + int i; + + if (phyIdReg0 == 0xffff) + return result; + + if (phyIdReg1 == 0xffff) + return result; + + /* oui is split between two registers */ + oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); + + model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; + + /* Scan table for this PHY */ + for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { + if ((oui == PHY_DEVICES[i].phyIdOUI) && + (model == PHY_DEVICES[i].phyIdModel)) { + netdev_info(qdev->ndev, "Phy: %s\n", + PHY_DEVICES[i].name); + result = PHY_DEVICES[i].phyDevice; + break; + } + } + + return result; +} + +static int ql_phy_get_speed(struct ql3_adapter *qdev) +{ + u16 reg; + + switch (qdev->phyType) { + case PHY_AGERE_ET1011C: { + if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) + return 0; + + reg = (reg >> 8) & 3; + break; + } + default: + if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) + return 0; + + reg = (((reg & 0x18) >> 3) & 3); + } + + switch (reg) { + case 2: + return SPEED_1000; + case 1: + return SPEED_100; + case 0: + return SPEED_10; + default: + return -1; + } +} + +static int ql_is_full_dup(struct ql3_adapter *qdev) +{ + u16 reg; + + switch (qdev->phyType) { + case PHY_AGERE_ET1011C: { + if (ql_mii_read_reg(qdev, 0x1A, ®)) + return 0; + + return ((reg & 0x0080) && (reg & 0x1000)) != 0; + } + case PHY_VITESSE_VSC8211: + default: { + if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) + return 0; + return (reg & PHY_AUX_DUPLEX_STAT) != 0; + } + } +} + +static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) +{ + u16 reg; + + if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0) + return 0; + + return (reg & PHY_NEG_PAUSE) != 0; +} + +static int PHY_Setup(struct ql3_adapter *qdev) +{ + u16 reg1; + u16 reg2; + bool agereAddrChangeNeeded = false; + u32 miiAddr = 0; + int err; + + /* Determine the PHY we are using by reading the ID's */ + err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); + if (err != 0) { + netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); + return err; + } + + err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); + if (err != 0) { + netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); + return err; + } + + /* Check if we have a Agere PHY */ + if ((reg1 == 0xffff) || (reg2 == 0xffff)) { + + /* Determine which MII address we should be using + determined by the index of the card */ + if (qdev->mac_index == 0) + miiAddr = MII_AGERE_ADDR_1; + else + miiAddr = MII_AGERE_ADDR_2; + + err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); + if (err != 0) { + netdev_err(qdev->ndev, + "Could not read from reg PHY_ID_0_REG after Agere detected\n"); + return err; + } + + err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); + if (err != 0) { + netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); + return err; + } + + /* We need to remember to initialize the Agere PHY */ + agereAddrChangeNeeded = true; + } + + /* Determine the particular PHY we have on board to apply + PHY specific initializations */ + qdev->phyType = getPhyType(qdev, reg1, reg2); + + if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { + /* need this here so address gets changed */ + phyAgereSpecificInit(qdev, miiAddr); + } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { + netdev_err(qdev->ndev, "PHY is unknown\n"); + return -EIO; + } + + return 0; +} + +/* + * Caller holds hw_lock. + */ +static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + if (enable) + value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); + else + value = (MAC_CONFIG_REG_PE << 16); + + if (qdev->mac_index) + ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); + else + ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); +} + +/* + * Caller holds hw_lock. + */ +static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + if (enable) + value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); + else + value = (MAC_CONFIG_REG_SR << 16); + + if (qdev->mac_index) + ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); + else + ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); +} + +/* + * Caller holds hw_lock. + */ +static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + if (enable) + value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); + else + value = (MAC_CONFIG_REG_GM << 16); + + if (qdev->mac_index) + ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); + else + ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); +} + +/* + * Caller holds hw_lock. + */ +static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + if (enable) + value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); + else + value = (MAC_CONFIG_REG_FD << 16); + + if (qdev->mac_index) + ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); + else + ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); +} + +/* + * Caller holds hw_lock. + */ +static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + if (enable) + value = + ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | + ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); + else + value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); + + if (qdev->mac_index) + ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); + else + ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); +} + +/* + * Caller holds hw_lock. + */ +static int ql_is_fiber(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp; + + switch (qdev->mac_index) { + case 0: + bitToCheck = PORT_STATUS_SM0; + break; + case 1: + bitToCheck = PORT_STATUS_SM1; + break; + } + + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + return (temp & bitToCheck) != 0; +} + +static int ql_is_auto_cfg(struct ql3_adapter *qdev) +{ + u16 reg; + ql_mii_read_reg(qdev, 0x00, ®); + return (reg & 0x1000) != 0; +} + +/* + * Caller holds hw_lock. + */ +static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp; + + switch (qdev->mac_index) { + case 0: + bitToCheck = PORT_STATUS_AC0; + break; + case 1: + bitToCheck = PORT_STATUS_AC1; + break; + } + + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + if (temp & bitToCheck) { + netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); + return 1; + } + netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); + return 0; +} + +/* + * ql_is_neg_pause() returns 1 if pause was negotiated to be on + */ +static int ql_is_neg_pause(struct ql3_adapter *qdev) +{ + if (ql_is_fiber(qdev)) + return ql_is_petbi_neg_pause(qdev); + else + return ql_is_phy_neg_pause(qdev); +} + +static int ql_auto_neg_error(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp; + + switch (qdev->mac_index) { + case 0: + bitToCheck = PORT_STATUS_AE0; + break; + case 1: + bitToCheck = PORT_STATUS_AE1; + break; + } + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + return (temp & bitToCheck) != 0; +} + +static u32 ql_get_link_speed(struct ql3_adapter *qdev) +{ + if (ql_is_fiber(qdev)) + return SPEED_1000; + else + return ql_phy_get_speed(qdev); +} + +static int ql_is_link_full_dup(struct ql3_adapter *qdev) +{ + if (ql_is_fiber(qdev)) + return 1; + else + return ql_is_full_dup(qdev); +} + +/* + * Caller holds hw_lock. + */ +static int ql_link_down_detect(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp; + + switch (qdev->mac_index) { + case 0: + bitToCheck = ISP_CONTROL_LINK_DN_0; + break; + case 1: + bitToCheck = ISP_CONTROL_LINK_DN_1; + break; + } + + temp = + ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); + return (temp & bitToCheck) != 0; +} + +/* + * Caller holds hw_lock. + */ +static int ql_link_down_detect_clear(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + switch (qdev->mac_index) { + case 0: + ql_write_common_reg(qdev, + &port_regs->CommonRegs.ispControlStatus, + (ISP_CONTROL_LINK_DN_0) | + (ISP_CONTROL_LINK_DN_0 << 16)); + break; + + case 1: + ql_write_common_reg(qdev, + &port_regs->CommonRegs.ispControlStatus, + (ISP_CONTROL_LINK_DN_1) | + (ISP_CONTROL_LINK_DN_1 << 16)); + break; + + default: + return 1; + } + + return 0; +} + +/* + * Caller holds hw_lock. + */ +static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp; + + switch (qdev->mac_index) { + case 0: + bitToCheck = PORT_STATUS_F1_ENABLED; + break; + case 1: + bitToCheck = PORT_STATUS_F3_ENABLED; + break; + default: + break; + } + + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + if (temp & bitToCheck) { + netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, + "not link master\n"); + return 0; + } + + netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); + return 1; +} + +static void ql_phy_reset_ex(struct ql3_adapter *qdev) +{ + ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, + PHYAddr[qdev->mac_index]); +} + +static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) +{ + u16 reg; + u16 portConfiguration; + + if (qdev->phyType == PHY_AGERE_ET1011C) + ql_mii_write_reg(qdev, 0x13, 0x0000); + /* turn off external loopback */ + + if (qdev->mac_index == 0) + portConfiguration = + qdev->nvram_data.macCfg_port0.portConfiguration; + else + portConfiguration = + qdev->nvram_data.macCfg_port1.portConfiguration; + + /* Some HBA's in the field are set to 0 and they need to + be reinterpreted with a default value */ + if (portConfiguration == 0) + portConfiguration = PORT_CONFIG_DEFAULT; + + /* Set the 1000 advertisements */ + ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, + PHYAddr[qdev->mac_index]); + reg &= ~PHY_GIG_ALL_PARAMS; + + if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { + if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) + reg |= PHY_GIG_ADV_1000F; + else + reg |= PHY_GIG_ADV_1000H; + } + + ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, + PHYAddr[qdev->mac_index]); + + /* Set the 10/100 & pause negotiation advertisements */ + ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®, + PHYAddr[qdev->mac_index]); + reg &= ~PHY_NEG_ALL_PARAMS; + + if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) + reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; + + if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { + if (portConfiguration & PORT_CONFIG_100MB_SPEED) + reg |= PHY_NEG_ADV_100F; + + if (portConfiguration & PORT_CONFIG_10MB_SPEED) + reg |= PHY_NEG_ADV_10F; + } + + if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { + if (portConfiguration & PORT_CONFIG_100MB_SPEED) + reg |= PHY_NEG_ADV_100H; + + if (portConfiguration & PORT_CONFIG_10MB_SPEED) + reg |= PHY_NEG_ADV_10H; + } + + if (portConfiguration & PORT_CONFIG_1000MB_SPEED) + reg |= 1; + + ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, + PHYAddr[qdev->mac_index]); + + ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]); + + ql_mii_write_reg_ex(qdev, CONTROL_REG, + reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, + PHYAddr[qdev->mac_index]); +} + +static void ql_phy_init_ex(struct ql3_adapter *qdev) +{ + ql_phy_reset_ex(qdev); + PHY_Setup(qdev); + ql_phy_start_neg_ex(qdev); +} + +/* + * Caller holds hw_lock. + */ +static u32 ql_get_link_state(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 bitToCheck = 0; + u32 temp, linkState; + + switch (qdev->mac_index) { + case 0: + bitToCheck = PORT_STATUS_UP0; + break; + case 1: + bitToCheck = PORT_STATUS_UP1; + break; + } + + temp = ql_read_page0_reg(qdev, &port_regs->portStatus); + if (temp & bitToCheck) + linkState = LS_UP; + else + linkState = LS_DOWN; + + return linkState; +} + +static int ql_port_start(struct ql3_adapter *qdev) +{ + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 7)) { + netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); + return -1; + } + + if (ql_is_fiber(qdev)) { + ql_petbi_init(qdev); + } else { + /* Copper port */ + ql_phy_init_ex(qdev); + } + + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + return 0; +} + +static int ql_finish_auto_neg(struct ql3_adapter *qdev) +{ + + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 7)) + return -1; + + if (!ql_auto_neg_error(qdev)) { + if (test_bit(QL_LINK_MASTER, &qdev->flags)) { + /* configure the MAC */ + netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, + "Configuring link\n"); + ql_mac_cfg_soft_reset(qdev, 1); + ql_mac_cfg_gig(qdev, + (ql_get_link_speed + (qdev) == + SPEED_1000)); + ql_mac_cfg_full_dup(qdev, + ql_is_link_full_dup + (qdev)); + ql_mac_cfg_pause(qdev, + ql_is_neg_pause + (qdev)); + ql_mac_cfg_soft_reset(qdev, 0); + + /* enable the MAC */ + netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, + "Enabling mac\n"); + ql_mac_enable(qdev, 1); + } + + qdev->port_link_state = LS_UP; + netif_start_queue(qdev->ndev); + netif_carrier_on(qdev->ndev); + netif_info(qdev, link, qdev->ndev, + "Link is up at %d Mbps, %s duplex\n", + ql_get_link_speed(qdev), + ql_is_link_full_dup(qdev) ? "full" : "half"); + + } else { /* Remote error detected */ + + if (test_bit(QL_LINK_MASTER, &qdev->flags)) { + netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, + "Remote error detected. Calling ql_port_start()\n"); + /* + * ql_port_start() is shared code and needs + * to lock the PHY on it's own. + */ + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + if (ql_port_start(qdev)) /* Restart port */ + return -1; + return 0; + } + } + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + return 0; +} + +static void ql_link_state_machine_work(struct work_struct *work) +{ + struct ql3_adapter *qdev = + container_of(work, struct ql3_adapter, link_state_work.work); + + u32 curr_link_state; + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + + curr_link_state = ql_get_link_state(qdev); + + if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { + netif_info(qdev, link, qdev->ndev, + "Reset in progress, skip processing link state\n"); + + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + /* Restart timer on 2 second interval. */ + mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); + + return; + } + + switch (qdev->port_link_state) { + default: + if (test_bit(QL_LINK_MASTER, &qdev->flags)) + ql_port_start(qdev); + qdev->port_link_state = LS_DOWN; + /* Fall Through */ + + case LS_DOWN: + if (curr_link_state == LS_UP) { + netif_info(qdev, link, qdev->ndev, "Link is up\n"); + if (ql_is_auto_neg_complete(qdev)) + ql_finish_auto_neg(qdev); + + if (qdev->port_link_state == LS_UP) + ql_link_down_detect_clear(qdev); + + qdev->port_link_state = LS_UP; + } + break; + + case LS_UP: + /* + * See if the link is currently down or went down and came + * back up + */ + if (curr_link_state == LS_DOWN) { + netif_info(qdev, link, qdev->ndev, "Link is down\n"); + qdev->port_link_state = LS_DOWN; + } + if (ql_link_down_detect(qdev)) + qdev->port_link_state = LS_DOWN; + break; + } + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + /* Restart timer on 2 second interval. */ + mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); +} + +/* + * Caller must take hw_lock and QL_PHY_GIO_SEM. + */ +static void ql_get_phy_owner(struct ql3_adapter *qdev) +{ + if (ql_this_adapter_controls_port(qdev)) + set_bit(QL_LINK_MASTER, &qdev->flags); + else + clear_bit(QL_LINK_MASTER, &qdev->flags); +} + +/* + * Caller must take hw_lock and QL_PHY_GIO_SEM. + */ +static void ql_init_scan_mode(struct ql3_adapter *qdev) +{ + ql_mii_enable_scan_mode(qdev); + + if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { + if (ql_this_adapter_controls_port(qdev)) + ql_petbi_init_ex(qdev); + } else { + if (ql_this_adapter_controls_port(qdev)) + ql_phy_init_ex(qdev); + } +} + +/* + * MII_Setup needs to be called before taking the PHY out of reset + * so that the management interface clock speed can be set properly. + * It would be better if we had a way to disable MDC until after the + * PHY is out of reset, but we don't have that capability. + */ +static int ql_mii_setup(struct ql3_adapter *qdev) +{ + u32 reg; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 7)) + return -1; + + if (qdev->device_id == QL3032_DEVICE_ID) + ql_write_page0_reg(qdev, + &port_regs->macMIIMgmtControlReg, 0x0f00000); + + /* Divide 125MHz clock by 28 to meet PHY timing requirements */ + reg = MAC_MII_CONTROL_CLK_SEL_DIV28; + + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, + reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); + + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + return 0; +} + +#define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ + SUPPORTED_FIBRE | \ + SUPPORTED_Autoneg) +#define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ + SUPPORTED_10baseT_Full | \ + SUPPORTED_100baseT_Half | \ + SUPPORTED_100baseT_Full | \ + SUPPORTED_1000baseT_Half | \ + SUPPORTED_1000baseT_Full | \ + SUPPORTED_Autoneg | \ + SUPPORTED_TP) \ + +static u32 ql_supported_modes(struct ql3_adapter *qdev) +{ + if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) + return SUPPORTED_OPTICAL_MODES; + + return SUPPORTED_TP_MODES; +} + +static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) +{ + int status; + unsigned long hw_flags; + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | + (qdev->mac_index) * 2) << 7)) { + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return 0; + } + status = ql_is_auto_cfg(qdev); + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return status; +} + +static u32 ql_get_speed(struct ql3_adapter *qdev) +{ + u32 status; + unsigned long hw_flags; + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | + (qdev->mac_index) * 2) << 7)) { + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return 0; + } + status = ql_get_link_speed(qdev); + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return status; +} + +static int ql_get_full_dup(struct ql3_adapter *qdev) +{ + int status; + unsigned long hw_flags; + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | + (qdev->mac_index) * 2) << 7)) { + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return 0; + } + status = ql_is_link_full_dup(qdev); + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return status; +} + +static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + + ecmd->transceiver = XCVR_INTERNAL; + ecmd->supported = ql_supported_modes(qdev); + + if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { + ecmd->port = PORT_FIBRE; + } else { + ecmd->port = PORT_TP; + ecmd->phy_address = qdev->PHYAddr; + } + ecmd->advertising = ql_supported_modes(qdev); + ecmd->autoneg = ql_get_auto_cfg_status(qdev); + ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev)); + ecmd->duplex = ql_get_full_dup(qdev); + return 0; +} + +static void ql_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *drvinfo) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, ql3xxx_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, pci_name(qdev->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->regdump_len = 0; + drvinfo->eedump_len = 0; +} + +static u32 ql_get_msglevel(struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + return qdev->msg_enable; +} + +static void ql_set_msglevel(struct net_device *ndev, u32 value) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + qdev->msg_enable = value; +} + +static void ql_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + u32 reg; + if (qdev->mac_index == 0) + reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); + else + reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); + + pause->autoneg = ql_get_auto_cfg_status(qdev); + pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; + pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; +} + +static const struct ethtool_ops ql3xxx_ethtool_ops = { + .get_settings = ql_get_settings, + .get_drvinfo = ql_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = ql_get_msglevel, + .set_msglevel = ql_set_msglevel, + .get_pauseparam = ql_get_pauseparam, +}; + +static int ql_populate_free_queue(struct ql3_adapter *qdev) +{ + struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; + dma_addr_t map; + int err; + + while (lrg_buf_cb) { + if (!lrg_buf_cb->skb) { + lrg_buf_cb->skb = + netdev_alloc_skb(qdev->ndev, + qdev->lrg_buffer_len); + if (unlikely(!lrg_buf_cb->skb)) { + netdev_printk(KERN_DEBUG, qdev->ndev, + "Failed netdev_alloc_skb()\n"); + break; + } else { + /* + * We save some space to copy the ethhdr from + * first buffer + */ + skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); + map = pci_map_single(qdev->pdev, + lrg_buf_cb->skb->data, + qdev->lrg_buffer_len - + QL_HEADER_SPACE, + PCI_DMA_FROMDEVICE); + + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netdev_err(qdev->ndev, + "PCI mapping failed with error: %d\n", + err); + dev_kfree_skb(lrg_buf_cb->skb); + lrg_buf_cb->skb = NULL; + break; + } + + + lrg_buf_cb->buf_phy_addr_low = + cpu_to_le32(LS_64BITS(map)); + lrg_buf_cb->buf_phy_addr_high = + cpu_to_le32(MS_64BITS(map)); + dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); + dma_unmap_len_set(lrg_buf_cb, maplen, + qdev->lrg_buffer_len - + QL_HEADER_SPACE); + --qdev->lrg_buf_skb_check; + if (!qdev->lrg_buf_skb_check) + return 1; + } + } + lrg_buf_cb = lrg_buf_cb->next; + } + return 0; +} + +/* + * Caller holds hw_lock. + */ +static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + if (qdev->small_buf_release_cnt >= 16) { + while (qdev->small_buf_release_cnt >= 16) { + qdev->small_buf_q_producer_index++; + + if (qdev->small_buf_q_producer_index == + NUM_SBUFQ_ENTRIES) + qdev->small_buf_q_producer_index = 0; + qdev->small_buf_release_cnt -= 8; + } + wmb(); + writel(qdev->small_buf_q_producer_index, + &port_regs->CommonRegs.rxSmallQProducerIndex); + } +} + +/* + * Caller holds hw_lock. + */ +static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) +{ + struct bufq_addr_element *lrg_buf_q_ele; + int i; + struct ql_rcv_buf_cb *lrg_buf_cb; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + if ((qdev->lrg_buf_free_count >= 8) && + (qdev->lrg_buf_release_cnt >= 16)) { + + if (qdev->lrg_buf_skb_check) + if (!ql_populate_free_queue(qdev)) + return; + + lrg_buf_q_ele = qdev->lrg_buf_next_free; + + while ((qdev->lrg_buf_release_cnt >= 16) && + (qdev->lrg_buf_free_count >= 8)) { + + for (i = 0; i < 8; i++) { + lrg_buf_cb = + ql_get_from_lrg_buf_free_list(qdev); + lrg_buf_q_ele->addr_high = + lrg_buf_cb->buf_phy_addr_high; + lrg_buf_q_ele->addr_low = + lrg_buf_cb->buf_phy_addr_low; + lrg_buf_q_ele++; + + qdev->lrg_buf_release_cnt--; + } + + qdev->lrg_buf_q_producer_index++; + + if (qdev->lrg_buf_q_producer_index == + qdev->num_lbufq_entries) + qdev->lrg_buf_q_producer_index = 0; + + if (qdev->lrg_buf_q_producer_index == + (qdev->num_lbufq_entries - 1)) { + lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; + } + } + wmb(); + qdev->lrg_buf_next_free = lrg_buf_q_ele; + writel(qdev->lrg_buf_q_producer_index, + &port_regs->CommonRegs.rxLargeQProducerIndex); + } +} + +static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, + struct ob_mac_iocb_rsp *mac_rsp) +{ + struct ql_tx_buf_cb *tx_cb; + int i; + + if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { + netdev_warn(qdev->ndev, + "Frame too short but it was padded and sent\n"); + } + + tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; + + /* Check the transmit response flags for any errors */ + if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { + netdev_err(qdev->ndev, + "Frame too short to be legal, frame not sent\n"); + + qdev->ndev->stats.tx_errors++; + goto frame_not_sent; + } + + if (tx_cb->seg_count == 0) { + netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", + mac_rsp->transaction_id); + + qdev->ndev->stats.tx_errors++; + goto invalid_seg_count; + } + + pci_unmap_single(qdev->pdev, + dma_unmap_addr(&tx_cb->map[0], mapaddr), + dma_unmap_len(&tx_cb->map[0], maplen), + PCI_DMA_TODEVICE); + tx_cb->seg_count--; + if (tx_cb->seg_count) { + for (i = 1; i < tx_cb->seg_count; i++) { + pci_unmap_page(qdev->pdev, + dma_unmap_addr(&tx_cb->map[i], + mapaddr), + dma_unmap_len(&tx_cb->map[i], maplen), + PCI_DMA_TODEVICE); + } + } + qdev->ndev->stats.tx_packets++; + qdev->ndev->stats.tx_bytes += tx_cb->skb->len; + +frame_not_sent: + dev_kfree_skb_irq(tx_cb->skb); + tx_cb->skb = NULL; + +invalid_seg_count: + atomic_inc(&qdev->tx_count); +} + +static void ql_get_sbuf(struct ql3_adapter *qdev) +{ + if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) + qdev->small_buf_index = 0; + qdev->small_buf_release_cnt++; +} + +static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) +{ + struct ql_rcv_buf_cb *lrg_buf_cb = NULL; + lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; + qdev->lrg_buf_release_cnt++; + if (++qdev->lrg_buf_index == qdev->num_large_buffers) + qdev->lrg_buf_index = 0; + return lrg_buf_cb; +} + +/* + * The difference between 3022 and 3032 for inbound completions: + * 3022 uses two buffers per completion. The first buffer contains + * (some) header info, the second the remainder of the headers plus + * the data. For this chip we reserve some space at the top of the + * receive buffer so that the header info in buffer one can be + * prepended to the buffer two. Buffer two is the sent up while + * buffer one is returned to the hardware to be reused. + * 3032 receives all of it's data and headers in one buffer for a + * simpler process. 3032 also supports checksum verification as + * can be seen in ql_process_macip_rx_intr(). + */ +static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, + struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) +{ + struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; + struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; + struct sk_buff *skb; + u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); + + /* + * Get the inbound address list (small buffer). + */ + ql_get_sbuf(qdev); + + if (qdev->device_id == QL3022_DEVICE_ID) + lrg_buf_cb1 = ql_get_lbuf(qdev); + + /* start of second buffer */ + lrg_buf_cb2 = ql_get_lbuf(qdev); + skb = lrg_buf_cb2->skb; + + qdev->ndev->stats.rx_packets++; + qdev->ndev->stats.rx_bytes += length; + + skb_put(skb, length); + pci_unmap_single(qdev->pdev, + dma_unmap_addr(lrg_buf_cb2, mapaddr), + dma_unmap_len(lrg_buf_cb2, maplen), + PCI_DMA_FROMDEVICE); + prefetch(skb->data); + skb_checksum_none_assert(skb); + skb->protocol = eth_type_trans(skb, qdev->ndev); + + netif_receive_skb(skb); + lrg_buf_cb2->skb = NULL; + + if (qdev->device_id == QL3022_DEVICE_ID) + ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); + ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); +} + +static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, + struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) +{ + struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; + struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; + struct sk_buff *skb1 = NULL, *skb2; + struct net_device *ndev = qdev->ndev; + u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); + u16 size = 0; + + /* + * Get the inbound address list (small buffer). + */ + + ql_get_sbuf(qdev); + + if (qdev->device_id == QL3022_DEVICE_ID) { + /* start of first buffer on 3022 */ + lrg_buf_cb1 = ql_get_lbuf(qdev); + skb1 = lrg_buf_cb1->skb; + size = ETH_HLEN; + if (*((u16 *) skb1->data) != 0xFFFF) + size += VLAN_ETH_HLEN - ETH_HLEN; + } + + /* start of second buffer */ + lrg_buf_cb2 = ql_get_lbuf(qdev); + skb2 = lrg_buf_cb2->skb; + + skb_put(skb2, length); /* Just the second buffer length here. */ + pci_unmap_single(qdev->pdev, + dma_unmap_addr(lrg_buf_cb2, mapaddr), + dma_unmap_len(lrg_buf_cb2, maplen), + PCI_DMA_FROMDEVICE); + prefetch(skb2->data); + + skb_checksum_none_assert(skb2); + if (qdev->device_id == QL3022_DEVICE_ID) { + /* + * Copy the ethhdr from first buffer to second. This + * is necessary for 3022 IP completions. + */ + skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, + skb_push(skb2, size), size); + } else { + u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); + if (checksum & + (IB_IP_IOCB_RSP_3032_ICE | + IB_IP_IOCB_RSP_3032_CE)) { + netdev_err(ndev, + "%s: Bad checksum for this %s packet, checksum = %x\n", + __func__, + ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? + "TCP" : "UDP"), checksum); + } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || + (checksum & IB_IP_IOCB_RSP_3032_UDP && + !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { + skb2->ip_summed = CHECKSUM_UNNECESSARY; + } + } + skb2->protocol = eth_type_trans(skb2, qdev->ndev); + + netif_receive_skb(skb2); + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += length; + lrg_buf_cb2->skb = NULL; + + if (qdev->device_id == QL3022_DEVICE_ID) + ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); + ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); +} + +static int ql_tx_rx_clean(struct ql3_adapter *qdev, + int *tx_cleaned, int *rx_cleaned, int work_to_do) +{ + struct net_rsp_iocb *net_rsp; + struct net_device *ndev = qdev->ndev; + int work_done = 0; + + /* While there are entries in the completion queue. */ + while ((le32_to_cpu(*(qdev->prsp_producer_index)) != + qdev->rsp_consumer_index) && (work_done < work_to_do)) { + + net_rsp = qdev->rsp_current; + rmb(); + /* + * Fix 4032 chip's undocumented "feature" where bit-8 is set + * if the inbound completion is for a VLAN. + */ + if (qdev->device_id == QL3032_DEVICE_ID) + net_rsp->opcode &= 0x7f; + switch (net_rsp->opcode) { + + case OPCODE_OB_MAC_IOCB_FN0: + case OPCODE_OB_MAC_IOCB_FN2: + ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) + net_rsp); + (*tx_cleaned)++; + break; + + case OPCODE_IB_MAC_IOCB: + case OPCODE_IB_3032_MAC_IOCB: + ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) + net_rsp); + (*rx_cleaned)++; + break; + + case OPCODE_IB_IP_IOCB: + case OPCODE_IB_3032_IP_IOCB: + ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) + net_rsp); + (*rx_cleaned)++; + break; + default: { + u32 *tmp = (u32 *)net_rsp; + netdev_err(ndev, + "Hit default case, not handled!\n" + " dropping the packet, opcode = %x\n" + "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", + net_rsp->opcode, + (unsigned long int)tmp[0], + (unsigned long int)tmp[1], + (unsigned long int)tmp[2], + (unsigned long int)tmp[3]); + } + } + + qdev->rsp_consumer_index++; + + if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { + qdev->rsp_consumer_index = 0; + qdev->rsp_current = qdev->rsp_q_virt_addr; + } else { + qdev->rsp_current++; + } + + work_done = *tx_cleaned + *rx_cleaned; + } + + return work_done; +} + +static int ql_poll(struct napi_struct *napi, int budget) +{ + struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); + int rx_cleaned = 0, tx_cleaned = 0; + unsigned long hw_flags; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + + ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); + + if (tx_cleaned + rx_cleaned != budget) { + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + __napi_complete(napi); + ql_update_small_bufq_prod_index(qdev); + ql_update_lrg_bufq_prod_index(qdev); + writel(qdev->rsp_consumer_index, + &port_regs->CommonRegs.rspQConsumerIndex); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + ql_enable_interrupts(qdev); + } + return tx_cleaned + rx_cleaned; +} + +static irqreturn_t ql3xxx_isr(int irq, void *dev_id) +{ + + struct net_device *ndev = dev_id; + struct ql3_adapter *qdev = netdev_priv(ndev); + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + int handled = 1; + u32 var; + + value = ql_read_common_reg_l(qdev, + &port_regs->CommonRegs.ispControlStatus); + + if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { + spin_lock(&qdev->adapter_lock); + netif_stop_queue(qdev->ndev); + netif_carrier_off(qdev->ndev); + ql_disable_interrupts(qdev); + qdev->port_link_state = LS_DOWN; + set_bit(QL_RESET_ACTIVE, &qdev->flags) ; + + if (value & ISP_CONTROL_FE) { + /* + * Chip Fatal Error. + */ + var = + ql_read_page0_reg_l(qdev, + &port_regs->PortFatalErrStatus); + netdev_warn(ndev, + "Resetting chip. PortFatalErrStatus register = 0x%x\n", + var); + set_bit(QL_RESET_START, &qdev->flags) ; + } else { + /* + * Soft Reset Requested. + */ + set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; + netdev_err(ndev, + "Another function issued a reset to the chip. ISR value = %x\n", + value); + } + queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); + spin_unlock(&qdev->adapter_lock); + } else if (value & ISP_IMR_DISABLE_CMPL_INT) { + ql_disable_interrupts(qdev); + if (likely(napi_schedule_prep(&qdev->napi))) + __napi_schedule(&qdev->napi); + } else + return IRQ_NONE; + + return IRQ_RETVAL(handled); +} + +/* + * Get the total number of segments needed for the given number of fragments. + * This is necessary because outbound address lists (OAL) will be used when + * more than two frags are given. Each address list has 5 addr/len pairs. + * The 5th pair in each OAL is used to point to the next OAL if more frags + * are coming. That is why the frags:segment count ratio is not linear. + */ +static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) +{ + if (qdev->device_id == QL3022_DEVICE_ID) + return 1; + + if (frags <= 2) + return frags + 1; + else if (frags <= 6) + return frags + 2; + else if (frags <= 10) + return frags + 3; + else if (frags <= 14) + return frags + 4; + else if (frags <= 18) + return frags + 5; + return -1; +} + +static void ql_hw_csum_setup(const struct sk_buff *skb, + struct ob_mac_iocb_req *mac_iocb_ptr) +{ + const struct iphdr *ip = ip_hdr(skb); + + mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); + mac_iocb_ptr->ip_hdr_len = ip->ihl; + + if (ip->protocol == IPPROTO_TCP) { + mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | + OB_3032MAC_IOCB_REQ_IC; + } else { + mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | + OB_3032MAC_IOCB_REQ_IC; + } + +} + +/* + * Map the buffers for this transmit. + * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success. + */ +static int ql_send_map(struct ql3_adapter *qdev, + struct ob_mac_iocb_req *mac_iocb_ptr, + struct ql_tx_buf_cb *tx_cb, + struct sk_buff *skb) +{ + struct oal *oal; + struct oal_entry *oal_entry; + int len = skb_headlen(skb); + dma_addr_t map; + int err; + int completed_segs, i; + int seg_cnt, seg = 0; + int frag_cnt = (int)skb_shinfo(skb)->nr_frags; + + seg_cnt = tx_cb->seg_count; + /* + * Map the skb buffer first. + */ + map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); + + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", + err); + + return NETDEV_TX_BUSY; + } + + oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; + oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); + oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); + oal_entry->len = cpu_to_le32(len); + dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); + dma_unmap_len_set(&tx_cb->map[seg], maplen, len); + seg++; + + if (seg_cnt == 1) { + /* Terminate the last segment. */ + oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); + return NETDEV_TX_OK; + } + oal = tx_cb->oal; + for (completed_segs = 0; + completed_segs < frag_cnt; + completed_segs++, seg++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; + oal_entry++; + /* + * Check for continuation requirements. + * It's strange but necessary. + * Continuation entry points to outbound address list. + */ + if ((seg == 2 && seg_cnt > 3) || + (seg == 7 && seg_cnt > 8) || + (seg == 12 && seg_cnt > 13) || + (seg == 17 && seg_cnt > 18)) { + map = pci_map_single(qdev->pdev, oal, + sizeof(struct oal), + PCI_DMA_TODEVICE); + + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netdev_err(qdev->ndev, + "PCI mapping outbound address list with error: %d\n", + err); + goto map_error; + } + + oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); + oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); + oal_entry->len = cpu_to_le32(sizeof(struct oal) | + OAL_CONT_ENTRY); + dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); + dma_unmap_len_set(&tx_cb->map[seg], maplen, + sizeof(struct oal)); + oal_entry = (struct oal_entry *)oal; + oal++; + seg++; + } + + map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); + + err = dma_mapping_error(&qdev->pdev->dev, map); + if (err) { + netdev_err(qdev->ndev, + "PCI mapping frags failed with error: %d\n", + err); + goto map_error; + } + + oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); + oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); + oal_entry->len = cpu_to_le32(skb_frag_size(frag)); + dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); + dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag)); + } + /* Terminate the last segment. */ + oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); + return NETDEV_TX_OK; + +map_error: + /* A PCI mapping failed and now we will need to back out + * We need to traverse through the oal's and associated pages which + * have been mapped and now we must unmap them to clean up properly + */ + + seg = 1; + oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; + oal = tx_cb->oal; + for (i = 0; i < completed_segs; i++, seg++) { + oal_entry++; + + /* + * Check for continuation requirements. + * It's strange but necessary. + */ + + if ((seg == 2 && seg_cnt > 3) || + (seg == 7 && seg_cnt > 8) || + (seg == 12 && seg_cnt > 13) || + (seg == 17 && seg_cnt > 18)) { + pci_unmap_single(qdev->pdev, + dma_unmap_addr(&tx_cb->map[seg], mapaddr), + dma_unmap_len(&tx_cb->map[seg], maplen), + PCI_DMA_TODEVICE); + oal++; + seg++; + } + + pci_unmap_page(qdev->pdev, + dma_unmap_addr(&tx_cb->map[seg], mapaddr), + dma_unmap_len(&tx_cb->map[seg], maplen), + PCI_DMA_TODEVICE); + } + + pci_unmap_single(qdev->pdev, + dma_unmap_addr(&tx_cb->map[0], mapaddr), + dma_unmap_addr(&tx_cb->map[0], maplen), + PCI_DMA_TODEVICE); + + return NETDEV_TX_BUSY; + +} + +/* + * The difference between 3022 and 3032 sends: + * 3022 only supports a simple single segment transmission. + * 3032 supports checksumming and scatter/gather lists (fragments). + * The 3032 supports sglists by using the 3 addr/len pairs (ALP) + * in the IOCB plus a chain of outbound address lists (OAL) that + * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) + * will be used to point to an OAL when more ALP entries are required. + * The IOCB is always the top of the chain followed by one or more + * OALs (when necessary). + */ +static netdev_tx_t ql3xxx_send(struct sk_buff *skb, + struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + struct ql_tx_buf_cb *tx_cb; + u32 tot_len = skb->len; + struct ob_mac_iocb_req *mac_iocb_ptr; + + if (unlikely(atomic_read(&qdev->tx_count) < 2)) + return NETDEV_TX_BUSY; + + tx_cb = &qdev->tx_buf[qdev->req_producer_index]; + tx_cb->seg_count = ql_get_seg_count(qdev, + skb_shinfo(skb)->nr_frags); + if (tx_cb->seg_count == -1) { + netdev_err(ndev, "%s: invalid segment count!\n", __func__); + return NETDEV_TX_OK; + } + + mac_iocb_ptr = tx_cb->queue_entry; + memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); + mac_iocb_ptr->opcode = qdev->mac_ob_opcode; + mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; + mac_iocb_ptr->flags |= qdev->mb_bit_mask; + mac_iocb_ptr->transaction_id = qdev->req_producer_index; + mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); + tx_cb->skb = skb; + if (qdev->device_id == QL3032_DEVICE_ID && + skb->ip_summed == CHECKSUM_PARTIAL) + ql_hw_csum_setup(skb, mac_iocb_ptr); + + if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { + netdev_err(ndev, "%s: Could not map the segments!\n", __func__); + return NETDEV_TX_BUSY; + } + + wmb(); + qdev->req_producer_index++; + if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) + qdev->req_producer_index = 0; + wmb(); + ql_write_common_reg_l(qdev, + &port_regs->CommonRegs.reqQProducerIndex, + qdev->req_producer_index); + + netif_printk(qdev, tx_queued, KERN_DEBUG, ndev, + "tx queued, slot %d, len %d\n", + qdev->req_producer_index, skb->len); + + atomic_dec(&qdev->tx_count); + return NETDEV_TX_OK; +} + +static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) +{ + qdev->req_q_size = + (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); + + qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); + + /* The barrier is required to ensure request and response queue + * addr writes to the registers. + */ + wmb(); + + qdev->req_q_virt_addr = + pci_alloc_consistent(qdev->pdev, + (size_t) qdev->req_q_size, + &qdev->req_q_phy_addr); + + if ((qdev->req_q_virt_addr == NULL) || + LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { + netdev_err(qdev->ndev, "reqQ failed\n"); + return -ENOMEM; + } + + qdev->rsp_q_virt_addr = + pci_alloc_consistent(qdev->pdev, + (size_t) qdev->rsp_q_size, + &qdev->rsp_q_phy_addr); + + if ((qdev->rsp_q_virt_addr == NULL) || + LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { + netdev_err(qdev->ndev, "rspQ allocation failed\n"); + pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, + qdev->req_q_virt_addr, + qdev->req_q_phy_addr); + return -ENOMEM; + } + + set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); + + return 0; +} + +static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) +{ + if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { + netdev_info(qdev->ndev, "Already done\n"); + return; + } + + pci_free_consistent(qdev->pdev, + qdev->req_q_size, + qdev->req_q_virt_addr, qdev->req_q_phy_addr); + + qdev->req_q_virt_addr = NULL; + + pci_free_consistent(qdev->pdev, + qdev->rsp_q_size, + qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); + + qdev->rsp_q_virt_addr = NULL; + + clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); +} + +static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) +{ + /* Create Large Buffer Queue */ + qdev->lrg_buf_q_size = + qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); + if (qdev->lrg_buf_q_size < PAGE_SIZE) + qdev->lrg_buf_q_alloc_size = PAGE_SIZE; + else + qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; + + qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers, + sizeof(struct ql_rcv_buf_cb), + GFP_KERNEL); + if (qdev->lrg_buf == NULL) + return -ENOMEM; + + qdev->lrg_buf_q_alloc_virt_addr = + pci_alloc_consistent(qdev->pdev, + qdev->lrg_buf_q_alloc_size, + &qdev->lrg_buf_q_alloc_phy_addr); + + if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { + netdev_err(qdev->ndev, "lBufQ failed\n"); + return -ENOMEM; + } + qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; + qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; + + /* Create Small Buffer Queue */ + qdev->small_buf_q_size = + NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); + if (qdev->small_buf_q_size < PAGE_SIZE) + qdev->small_buf_q_alloc_size = PAGE_SIZE; + else + qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; + + qdev->small_buf_q_alloc_virt_addr = + pci_alloc_consistent(qdev->pdev, + qdev->small_buf_q_alloc_size, + &qdev->small_buf_q_alloc_phy_addr); + + if (qdev->small_buf_q_alloc_virt_addr == NULL) { + netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); + pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, + qdev->lrg_buf_q_alloc_virt_addr, + qdev->lrg_buf_q_alloc_phy_addr); + return -ENOMEM; + } + + qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; + qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; + set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); + return 0; +} + +static void ql_free_buffer_queues(struct ql3_adapter *qdev) +{ + if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { + netdev_info(qdev->ndev, "Already done\n"); + return; + } + kfree(qdev->lrg_buf); + pci_free_consistent(qdev->pdev, + qdev->lrg_buf_q_alloc_size, + qdev->lrg_buf_q_alloc_virt_addr, + qdev->lrg_buf_q_alloc_phy_addr); + + qdev->lrg_buf_q_virt_addr = NULL; + + pci_free_consistent(qdev->pdev, + qdev->small_buf_q_alloc_size, + qdev->small_buf_q_alloc_virt_addr, + qdev->small_buf_q_alloc_phy_addr); + + qdev->small_buf_q_virt_addr = NULL; + + clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); +} + +static int ql_alloc_small_buffers(struct ql3_adapter *qdev) +{ + int i; + struct bufq_addr_element *small_buf_q_entry; + + /* Currently we allocate on one of memory and use it for smallbuffers */ + qdev->small_buf_total_size = + (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * + QL_SMALL_BUFFER_SIZE); + + qdev->small_buf_virt_addr = + pci_alloc_consistent(qdev->pdev, + qdev->small_buf_total_size, + &qdev->small_buf_phy_addr); + + if (qdev->small_buf_virt_addr == NULL) { + netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); + return -ENOMEM; + } + + qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); + qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); + + small_buf_q_entry = qdev->small_buf_q_virt_addr; + + /* Initialize the small buffer queue. */ + for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { + small_buf_q_entry->addr_high = + cpu_to_le32(qdev->small_buf_phy_addr_high); + small_buf_q_entry->addr_low = + cpu_to_le32(qdev->small_buf_phy_addr_low + + (i * QL_SMALL_BUFFER_SIZE)); + small_buf_q_entry++; + } + qdev->small_buf_index = 0; + set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); + return 0; +} + +static void ql_free_small_buffers(struct ql3_adapter *qdev) +{ + if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { + netdev_info(qdev->ndev, "Already done\n"); + return; + } + if (qdev->small_buf_virt_addr != NULL) { + pci_free_consistent(qdev->pdev, + qdev->small_buf_total_size, + qdev->small_buf_virt_addr, + qdev->small_buf_phy_addr); + + qdev->small_buf_virt_addr = NULL; + } +} + +static void ql_free_large_buffers(struct ql3_adapter *qdev) +{ + int i = 0; + struct ql_rcv_buf_cb *lrg_buf_cb; + + for (i = 0; i < qdev->num_large_buffers; i++) { + lrg_buf_cb = &qdev->lrg_buf[i]; + if (lrg_buf_cb->skb) { + dev_kfree_skb(lrg_buf_cb->skb); + pci_unmap_single(qdev->pdev, + dma_unmap_addr(lrg_buf_cb, mapaddr), + dma_unmap_len(lrg_buf_cb, maplen), + PCI_DMA_FROMDEVICE); + memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); + } else { + break; + } + } +} + +static void ql_init_large_buffers(struct ql3_adapter *qdev) +{ + int i; + struct ql_rcv_buf_cb *lrg_buf_cb; + struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; + + for (i = 0; i < qdev->num_large_buffers; i++) { + lrg_buf_cb = &qdev->lrg_buf[i]; + buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; + buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; + buf_addr_ele++; + } + qdev->lrg_buf_index = 0; + qdev->lrg_buf_skb_check = 0; +} + +static int ql_alloc_large_buffers(struct ql3_adapter *qdev) +{ + int i; + struct ql_rcv_buf_cb *lrg_buf_cb; + struct sk_buff *skb; + dma_addr_t map; + int err; + + for (i = 0; i < qdev->num_large_buffers; i++) { + skb = netdev_alloc_skb(qdev->ndev, + qdev->lrg_buffer_len); + if (unlikely(!skb)) { + /* Better luck next round */ + netdev_err(qdev->ndev, + "large buff alloc failed for %d bytes at index %d\n", + qdev->lrg_buffer_len * 2, i); + ql_free_large_buffers(qdev); + return -ENOMEM; + } else { + + lrg_buf_cb = &qdev->lrg_buf[i]; + memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); + lrg_buf_cb->index = i; + lrg_buf_cb->skb = skb; + /* + * We save some space to copy the ethhdr from first + * buffer + */ + skb_reserve(skb, QL_HEADER_SPACE); + map = pci_map_single(qdev->pdev, + skb->data, + qdev->lrg_buffer_len - + QL_HEADER_SPACE, + PCI_DMA_FROMDEVICE); + + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netdev_err(qdev->ndev, + "PCI mapping failed with error: %d\n", + err); + ql_free_large_buffers(qdev); + return -ENOMEM; + } + + dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); + dma_unmap_len_set(lrg_buf_cb, maplen, + qdev->lrg_buffer_len - + QL_HEADER_SPACE); + lrg_buf_cb->buf_phy_addr_low = + cpu_to_le32(LS_64BITS(map)); + lrg_buf_cb->buf_phy_addr_high = + cpu_to_le32(MS_64BITS(map)); + } + } + return 0; +} + +static void ql_free_send_free_list(struct ql3_adapter *qdev) +{ + struct ql_tx_buf_cb *tx_cb; + int i; + + tx_cb = &qdev->tx_buf[0]; + for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { + kfree(tx_cb->oal); + tx_cb->oal = NULL; + tx_cb++; + } +} + +static int ql_create_send_free_list(struct ql3_adapter *qdev) +{ + struct ql_tx_buf_cb *tx_cb; + int i; + struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; + + /* Create free list of transmit buffers */ + for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { + + tx_cb = &qdev->tx_buf[i]; + tx_cb->skb = NULL; + tx_cb->queue_entry = req_q_curr; + req_q_curr++; + tx_cb->oal = kmalloc(512, GFP_KERNEL); + if (tx_cb->oal == NULL) + return -ENOMEM; + } + return 0; +} + +static int ql_alloc_mem_resources(struct ql3_adapter *qdev) +{ + if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { + qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; + qdev->lrg_buffer_len = NORMAL_MTU_SIZE; + } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { + /* + * Bigger buffers, so less of them. + */ + qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; + qdev->lrg_buffer_len = JUMBO_MTU_SIZE; + } else { + netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n", + qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); + return -ENOMEM; + } + qdev->num_large_buffers = + qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; + qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; + qdev->max_frame_size = + (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; + + /* + * First allocate a page of shared memory and use it for shadow + * locations of Network Request Queue Consumer Address Register and + * Network Completion Queue Producer Index Register + */ + qdev->shadow_reg_virt_addr = + pci_alloc_consistent(qdev->pdev, + PAGE_SIZE, &qdev->shadow_reg_phy_addr); + + if (qdev->shadow_reg_virt_addr != NULL) { + qdev->preq_consumer_index = qdev->shadow_reg_virt_addr; + qdev->req_consumer_index_phy_addr_high = + MS_64BITS(qdev->shadow_reg_phy_addr); + qdev->req_consumer_index_phy_addr_low = + LS_64BITS(qdev->shadow_reg_phy_addr); + + qdev->prsp_producer_index = + (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); + qdev->rsp_producer_index_phy_addr_high = + qdev->req_consumer_index_phy_addr_high; + qdev->rsp_producer_index_phy_addr_low = + qdev->req_consumer_index_phy_addr_low + 8; + } else { + netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); + return -ENOMEM; + } + + if (ql_alloc_net_req_rsp_queues(qdev) != 0) { + netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n"); + goto err_req_rsp; + } + + if (ql_alloc_buffer_queues(qdev) != 0) { + netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n"); + goto err_buffer_queues; + } + + if (ql_alloc_small_buffers(qdev) != 0) { + netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n"); + goto err_small_buffers; + } + + if (ql_alloc_large_buffers(qdev) != 0) { + netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n"); + goto err_small_buffers; + } + + /* Initialize the large buffer queue. */ + ql_init_large_buffers(qdev); + if (ql_create_send_free_list(qdev)) + goto err_free_list; + + qdev->rsp_current = qdev->rsp_q_virt_addr; + + return 0; +err_free_list: + ql_free_send_free_list(qdev); +err_small_buffers: + ql_free_buffer_queues(qdev); +err_buffer_queues: + ql_free_net_req_rsp_queues(qdev); +err_req_rsp: + pci_free_consistent(qdev->pdev, + PAGE_SIZE, + qdev->shadow_reg_virt_addr, + qdev->shadow_reg_phy_addr); + + return -ENOMEM; +} + +static void ql_free_mem_resources(struct ql3_adapter *qdev) +{ + ql_free_send_free_list(qdev); + ql_free_large_buffers(qdev); + ql_free_small_buffers(qdev); + ql_free_buffer_queues(qdev); + ql_free_net_req_rsp_queues(qdev); + if (qdev->shadow_reg_virt_addr != NULL) { + pci_free_consistent(qdev->pdev, + PAGE_SIZE, + qdev->shadow_reg_virt_addr, + qdev->shadow_reg_phy_addr); + qdev->shadow_reg_virt_addr = NULL; + } +} + +static int ql_init_misc_registers(struct ql3_adapter *qdev) +{ + struct ql3xxx_local_ram_registers __iomem *local_ram = + (void __iomem *)qdev->mem_map_registers; + + if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 4)) + return -1; + + ql_write_page2_reg(qdev, + &local_ram->bufletSize, qdev->nvram_data.bufletSize); + + ql_write_page2_reg(qdev, + &local_ram->maxBufletCount, + qdev->nvram_data.bufletCount); + + ql_write_page2_reg(qdev, + &local_ram->freeBufletThresholdLow, + (qdev->nvram_data.tcpWindowThreshold25 << 16) | + (qdev->nvram_data.tcpWindowThreshold0)); + + ql_write_page2_reg(qdev, + &local_ram->freeBufletThresholdHigh, + qdev->nvram_data.tcpWindowThreshold50); + + ql_write_page2_reg(qdev, + &local_ram->ipHashTableBase, + (qdev->nvram_data.ipHashTableBaseHi << 16) | + qdev->nvram_data.ipHashTableBaseLo); + ql_write_page2_reg(qdev, + &local_ram->ipHashTableCount, + qdev->nvram_data.ipHashTableSize); + ql_write_page2_reg(qdev, + &local_ram->tcpHashTableBase, + (qdev->nvram_data.tcpHashTableBaseHi << 16) | + qdev->nvram_data.tcpHashTableBaseLo); + ql_write_page2_reg(qdev, + &local_ram->tcpHashTableCount, + qdev->nvram_data.tcpHashTableSize); + ql_write_page2_reg(qdev, + &local_ram->ncbBase, + (qdev->nvram_data.ncbTableBaseHi << 16) | + qdev->nvram_data.ncbTableBaseLo); + ql_write_page2_reg(qdev, + &local_ram->maxNcbCount, + qdev->nvram_data.ncbTableSize); + ql_write_page2_reg(qdev, + &local_ram->drbBase, + (qdev->nvram_data.drbTableBaseHi << 16) | + qdev->nvram_data.drbTableBaseLo); + ql_write_page2_reg(qdev, + &local_ram->maxDrbCount, + qdev->nvram_data.drbTableSize); + ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); + return 0; +} + +static int ql_adapter_initialize(struct ql3_adapter *qdev) +{ + u32 value; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + struct ql3xxx_host_memory_registers __iomem *hmem_regs = + (void __iomem *)port_regs; + u32 delay = 10; + int status = 0; + + if (ql_mii_setup(qdev)) + return -1; + + /* Bring out PHY out of reset */ + ql_write_common_reg(qdev, spir, + (ISP_SERIAL_PORT_IF_WE | + (ISP_SERIAL_PORT_IF_WE << 16))); + /* Give the PHY time to come out of reset. */ + mdelay(100); + qdev->port_link_state = LS_DOWN; + netif_carrier_off(qdev->ndev); + + /* V2 chip fix for ARS-39168. */ + ql_write_common_reg(qdev, spir, + (ISP_SERIAL_PORT_IF_SDE | + (ISP_SERIAL_PORT_IF_SDE << 16))); + + /* Request Queue Registers */ + *((u32 *)(qdev->preq_consumer_index)) = 0; + atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); + qdev->req_producer_index = 0; + + ql_write_page1_reg(qdev, + &hmem_regs->reqConsumerIndexAddrHigh, + qdev->req_consumer_index_phy_addr_high); + ql_write_page1_reg(qdev, + &hmem_regs->reqConsumerIndexAddrLow, + qdev->req_consumer_index_phy_addr_low); + + ql_write_page1_reg(qdev, + &hmem_regs->reqBaseAddrHigh, + MS_64BITS(qdev->req_q_phy_addr)); + ql_write_page1_reg(qdev, + &hmem_regs->reqBaseAddrLow, + LS_64BITS(qdev->req_q_phy_addr)); + ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); + + /* Response Queue Registers */ + *((__le16 *) (qdev->prsp_producer_index)) = 0; + qdev->rsp_consumer_index = 0; + qdev->rsp_current = qdev->rsp_q_virt_addr; + + ql_write_page1_reg(qdev, + &hmem_regs->rspProducerIndexAddrHigh, + qdev->rsp_producer_index_phy_addr_high); + + ql_write_page1_reg(qdev, + &hmem_regs->rspProducerIndexAddrLow, + qdev->rsp_producer_index_phy_addr_low); + + ql_write_page1_reg(qdev, + &hmem_regs->rspBaseAddrHigh, + MS_64BITS(qdev->rsp_q_phy_addr)); + + ql_write_page1_reg(qdev, + &hmem_regs->rspBaseAddrLow, + LS_64BITS(qdev->rsp_q_phy_addr)); + + ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); + + /* Large Buffer Queue */ + ql_write_page1_reg(qdev, + &hmem_regs->rxLargeQBaseAddrHigh, + MS_64BITS(qdev->lrg_buf_q_phy_addr)); + + ql_write_page1_reg(qdev, + &hmem_regs->rxLargeQBaseAddrLow, + LS_64BITS(qdev->lrg_buf_q_phy_addr)); + + ql_write_page1_reg(qdev, + &hmem_regs->rxLargeQLength, + qdev->num_lbufq_entries); + + ql_write_page1_reg(qdev, + &hmem_regs->rxLargeBufferLength, + qdev->lrg_buffer_len); + + /* Small Buffer Queue */ + ql_write_page1_reg(qdev, + &hmem_regs->rxSmallQBaseAddrHigh, + MS_64BITS(qdev->small_buf_q_phy_addr)); + + ql_write_page1_reg(qdev, + &hmem_regs->rxSmallQBaseAddrLow, + LS_64BITS(qdev->small_buf_q_phy_addr)); + + ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); + ql_write_page1_reg(qdev, + &hmem_regs->rxSmallBufferLength, + QL_SMALL_BUFFER_SIZE); + + qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; + qdev->small_buf_release_cnt = 8; + qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; + qdev->lrg_buf_release_cnt = 8; + qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr; + qdev->small_buf_index = 0; + qdev->lrg_buf_index = 0; + qdev->lrg_buf_free_count = 0; + qdev->lrg_buf_free_head = NULL; + qdev->lrg_buf_free_tail = NULL; + + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + rxSmallQProducerIndex, + qdev->small_buf_q_producer_index); + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + rxLargeQProducerIndex, + qdev->lrg_buf_q_producer_index); + + /* + * Find out if the chip has already been initialized. If it has, then + * we skip some of the initialization. + */ + clear_bit(QL_LINK_MASTER, &qdev->flags); + value = ql_read_page0_reg(qdev, &port_regs->portStatus); + if ((value & PORT_STATUS_IC) == 0) { + + /* Chip has not been configured yet, so let it rip. */ + if (ql_init_misc_registers(qdev)) { + status = -1; + goto out; + } + + value = qdev->nvram_data.tcpMaxWindowSize; + ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); + + value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; + + if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) + * 2) << 13)) { + status = -1; + goto out; + } + ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); + ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, + (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << + 16) | (INTERNAL_CHIP_SD | + INTERNAL_CHIP_WE))); + ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); + } + + if (qdev->mac_index) + ql_write_page0_reg(qdev, + &port_regs->mac1MaxFrameLengthReg, + qdev->max_frame_size); + else + ql_write_page0_reg(qdev, + &port_regs->mac0MaxFrameLengthReg, + qdev->max_frame_size); + + if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, + (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * + 2) << 7)) { + status = -1; + goto out; + } + + PHY_Setup(qdev); + ql_init_scan_mode(qdev); + ql_get_phy_owner(qdev); + + /* Load the MAC Configuration */ + + /* Program lower 32 bits of the MAC address */ + ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, + (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); + ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, + ((qdev->ndev->dev_addr[2] << 24) + | (qdev->ndev->dev_addr[3] << 16) + | (qdev->ndev->dev_addr[4] << 8) + | qdev->ndev->dev_addr[5])); + + /* Program top 16 bits of the MAC address */ + ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, + ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); + ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, + ((qdev->ndev->dev_addr[0] << 8) + | qdev->ndev->dev_addr[1])); + + /* Enable Primary MAC */ + ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, + ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | + MAC_ADDR_INDIRECT_PTR_REG_PE)); + + /* Clear Primary and Secondary IP addresses */ + ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, + ((IP_ADDR_INDEX_REG_MASK << 16) | + (qdev->mac_index << 2))); + ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); + + ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, + ((IP_ADDR_INDEX_REG_MASK << 16) | + ((qdev->mac_index << 2) + 1))); + ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); + + ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); + + /* Indicate Configuration Complete */ + ql_write_page0_reg(qdev, + &port_regs->portControl, + ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); + + do { + value = ql_read_page0_reg(qdev, &port_regs->portStatus); + if (value & PORT_STATUS_IC) + break; + spin_unlock_irq(&qdev->hw_lock); + msleep(500); + spin_lock_irq(&qdev->hw_lock); + } while (--delay); + + if (delay == 0) { + netdev_err(qdev->ndev, "Hw Initialization timeout\n"); + status = -1; + goto out; + } + + /* Enable Ethernet Function */ + if (qdev->device_id == QL3032_DEVICE_ID) { + value = + (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | + QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 | + QL3032_PORT_CONTROL_ET); + ql_write_page0_reg(qdev, &port_regs->functionControl, + ((value << 16) | value)); + } else { + value = + (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | + PORT_CONTROL_HH); + ql_write_page0_reg(qdev, &port_regs->portControl, + ((value << 16) | value)); + } + + +out: + return status; +} + +/* + * Caller holds hw_lock. + */ +static int ql_adapter_reset(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + int status = 0; + u16 value; + int max_wait_time; + + set_bit(QL_RESET_ACTIVE, &qdev->flags); + clear_bit(QL_RESET_DONE, &qdev->flags); + + /* + * Issue soft reset to chip. + */ + netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n"); + ql_write_common_reg(qdev, + &port_regs->CommonRegs.ispControlStatus, + ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); + + /* Wait 3 seconds for reset to complete. */ + netdev_printk(KERN_DEBUG, qdev->ndev, + "Wait 10 milliseconds for reset to complete\n"); + + /* Wait until the firmware tells us the Soft Reset is done */ + max_wait_time = 5; + do { + value = + ql_read_common_reg(qdev, + &port_regs->CommonRegs.ispControlStatus); + if ((value & ISP_CONTROL_SR) == 0) + break; + + ssleep(1); + } while ((--max_wait_time)); + + /* + * Also, make sure that the Network Reset Interrupt bit has been + * cleared after the soft reset has taken place. + */ + value = + ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); + if (value & ISP_CONTROL_RI) { + netdev_printk(KERN_DEBUG, qdev->ndev, + "clearing RI after reset\n"); + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + ispControlStatus, + ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); + } + + if (max_wait_time == 0) { + /* Issue Force Soft Reset */ + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + ispControlStatus, + ((ISP_CONTROL_FSR << 16) | + ISP_CONTROL_FSR)); + /* + * Wait until the firmware tells us the Force Soft Reset is + * done + */ + max_wait_time = 5; + do { + value = ql_read_common_reg(qdev, + &port_regs->CommonRegs. + ispControlStatus); + if ((value & ISP_CONTROL_FSR) == 0) + break; + ssleep(1); + } while ((--max_wait_time)); + } + if (max_wait_time == 0) + status = 1; + + clear_bit(QL_RESET_ACTIVE, &qdev->flags); + set_bit(QL_RESET_DONE, &qdev->flags); + return status; +} + +static void ql_set_mac_info(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value, port_status; + u8 func_number; + + /* Get the function number */ + value = + ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); + func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); + port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); + switch (value & ISP_CONTROL_FN_MASK) { + case ISP_CONTROL_FN0_NET: + qdev->mac_index = 0; + qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; + qdev->mb_bit_mask = FN0_MA_BITS_MASK; + qdev->PHYAddr = PORT0_PHY_ADDRESS; + if (port_status & PORT_STATUS_SM0) + set_bit(QL_LINK_OPTICAL, &qdev->flags); + else + clear_bit(QL_LINK_OPTICAL, &qdev->flags); + break; + + case ISP_CONTROL_FN1_NET: + qdev->mac_index = 1; + qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; + qdev->mb_bit_mask = FN1_MA_BITS_MASK; + qdev->PHYAddr = PORT1_PHY_ADDRESS; + if (port_status & PORT_STATUS_SM1) + set_bit(QL_LINK_OPTICAL, &qdev->flags); + else + clear_bit(QL_LINK_OPTICAL, &qdev->flags); + break; + + case ISP_CONTROL_FN0_SCSI: + case ISP_CONTROL_FN1_SCSI: + default: + netdev_printk(KERN_DEBUG, qdev->ndev, + "Invalid function number, ispControlStatus = 0x%x\n", + value); + break; + } + qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; +} + +static void ql_display_dev_info(struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + struct pci_dev *pdev = qdev->pdev; + + netdev_info(ndev, + "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", + DRV_NAME, qdev->index, qdev->chip_rev_id, + qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", + qdev->pci_slot); + netdev_info(ndev, "%s Interface\n", + test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); + + /* + * Print PCI bus width/type. + */ + netdev_info(ndev, "Bus interface is %s %s\n", + ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), + ((qdev->pci_x) ? "PCI-X" : "PCI")); + + netdev_info(ndev, "mem IO base address adjusted = 0x%p\n", + qdev->mem_map_registers); + netdev_info(ndev, "Interrupt number = %d\n", pdev->irq); + + netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr); +} + +static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) +{ + struct net_device *ndev = qdev->ndev; + int retval = 0; + + netif_stop_queue(ndev); + netif_carrier_off(ndev); + + clear_bit(QL_ADAPTER_UP, &qdev->flags); + clear_bit(QL_LINK_MASTER, &qdev->flags); + + ql_disable_interrupts(qdev); + + free_irq(qdev->pdev->irq, ndev); + + if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { + netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); + clear_bit(QL_MSI_ENABLED, &qdev->flags); + pci_disable_msi(qdev->pdev); + } + + del_timer_sync(&qdev->adapter_timer); + + napi_disable(&qdev->napi); + + if (do_reset) { + int soft_reset; + unsigned long hw_flags; + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if (ql_wait_for_drvr_lock(qdev)) { + soft_reset = ql_adapter_reset(qdev); + if (soft_reset) { + netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", + qdev->index); + } + netdev_err(ndev, + "Releasing driver lock via chip reset\n"); + } else { + netdev_err(ndev, + "Could not acquire driver lock to do reset!\n"); + retval = -1; + } + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + } + ql_free_mem_resources(qdev); + return retval; +} + +static int ql_adapter_up(struct ql3_adapter *qdev) +{ + struct net_device *ndev = qdev->ndev; + int err; + unsigned long irq_flags = IRQF_SHARED; + unsigned long hw_flags; + + if (ql_alloc_mem_resources(qdev)) { + netdev_err(ndev, "Unable to allocate buffers\n"); + return -ENOMEM; + } + + if (qdev->msi) { + if (pci_enable_msi(qdev->pdev)) { + netdev_err(ndev, + "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n"); + qdev->msi = 0; + } else { + netdev_info(ndev, "MSI Enabled...\n"); + set_bit(QL_MSI_ENABLED, &qdev->flags); + irq_flags &= ~IRQF_SHARED; + } + } + + err = request_irq(qdev->pdev->irq, ql3xxx_isr, + irq_flags, ndev->name, ndev); + if (err) { + netdev_err(ndev, + "Failed to reserve interrupt %d - already in use\n", + qdev->pdev->irq); + goto err_irq; + } + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + + err = ql_wait_for_drvr_lock(qdev); + if (err) { + err = ql_adapter_initialize(qdev); + if (err) { + netdev_err(ndev, "Unable to initialize adapter\n"); + goto err_init; + } + netdev_err(ndev, "Releasing driver lock\n"); + ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); + } else { + netdev_err(ndev, "Could not acquire driver lock\n"); + goto err_lock; + } + + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + set_bit(QL_ADAPTER_UP, &qdev->flags); + + mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); + + napi_enable(&qdev->napi); + ql_enable_interrupts(qdev); + return 0; + +err_init: + ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); +err_lock: + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + free_irq(qdev->pdev->irq, ndev); +err_irq: + if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { + netdev_info(ndev, "calling pci_disable_msi()\n"); + clear_bit(QL_MSI_ENABLED, &qdev->flags); + pci_disable_msi(qdev->pdev); + } + return err; +} + +static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) +{ + if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { + netdev_err(qdev->ndev, + "Driver up/down cycle failed, closing device\n"); + rtnl_lock(); + dev_close(qdev->ndev); + rtnl_unlock(); + return -1; + } + return 0; +} + +static int ql3xxx_close(struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + + /* + * Wait for device to recover from a reset. + * (Rarely happens, but possible.) + */ + while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) + msleep(50); + + ql_adapter_down(qdev, QL_DO_RESET); + return 0; +} + +static int ql3xxx_open(struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + return ql_adapter_up(qdev); +} + +static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + struct sockaddr *addr = p; + unsigned long hw_flags; + + if (netif_running(ndev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + /* Program lower 32 bits of the MAC address */ + ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, + (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); + ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, + ((ndev->dev_addr[2] << 24) | (ndev-> + dev_addr[3] << 16) | + (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); + + /* Program top 16 bits of the MAC address */ + ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, + ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); + ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, + ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + return 0; +} + +static void ql3xxx_tx_timeout(struct net_device *ndev) +{ + struct ql3_adapter *qdev = netdev_priv(ndev); + + netdev_err(ndev, "Resetting...\n"); + /* + * Stop the queues, we've got a problem. + */ + netif_stop_queue(ndev); + + /* + * Wake up the worker to process this event. + */ + queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); +} + +static void ql_reset_work(struct work_struct *work) +{ + struct ql3_adapter *qdev = + container_of(work, struct ql3_adapter, reset_work.work); + struct net_device *ndev = qdev->ndev; + u32 value; + struct ql_tx_buf_cb *tx_cb; + int max_wait_time, i; + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + unsigned long hw_flags; + + if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { + clear_bit(QL_LINK_MASTER, &qdev->flags); + + /* + * Loop through the active list and return the skb. + */ + for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { + int j; + tx_cb = &qdev->tx_buf[i]; + if (tx_cb->skb) { + netdev_printk(KERN_DEBUG, ndev, + "Freeing lost SKB\n"); + pci_unmap_single(qdev->pdev, + dma_unmap_addr(&tx_cb->map[0], + mapaddr), + dma_unmap_len(&tx_cb->map[0], maplen), + PCI_DMA_TODEVICE); + for (j = 1; j < tx_cb->seg_count; j++) { + pci_unmap_page(qdev->pdev, + dma_unmap_addr(&tx_cb->map[j], + mapaddr), + dma_unmap_len(&tx_cb->map[j], + maplen), + PCI_DMA_TODEVICE); + } + dev_kfree_skb(tx_cb->skb); + tx_cb->skb = NULL; + } + } + + netdev_err(ndev, "Clearing NRI after reset\n"); + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + ql_write_common_reg(qdev, + &port_regs->CommonRegs. + ispControlStatus, + ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); + /* + * Wait the for Soft Reset to Complete. + */ + max_wait_time = 10; + do { + value = ql_read_common_reg(qdev, + &port_regs->CommonRegs. + + ispControlStatus); + if ((value & ISP_CONTROL_SR) == 0) { + netdev_printk(KERN_DEBUG, ndev, + "reset completed\n"); + break; + } + + if (value & ISP_CONTROL_RI) { + netdev_printk(KERN_DEBUG, ndev, + "clearing NRI after reset\n"); + ql_write_common_reg(qdev, + &port_regs-> + CommonRegs. + ispControlStatus, + ((ISP_CONTROL_RI << + 16) | ISP_CONTROL_RI)); + } + + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + ssleep(1); + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + } while (--max_wait_time); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + + if (value & ISP_CONTROL_SR) { + + /* + * Set the reset flags and clear the board again. + * Nothing else to do... + */ + netdev_err(ndev, + "Timed out waiting for reset to complete\n"); + netdev_err(ndev, "Do a reset\n"); + clear_bit(QL_RESET_PER_SCSI, &qdev->flags); + clear_bit(QL_RESET_START, &qdev->flags); + ql_cycle_adapter(qdev, QL_DO_RESET); + return; + } + + clear_bit(QL_RESET_ACTIVE, &qdev->flags); + clear_bit(QL_RESET_PER_SCSI, &qdev->flags); + clear_bit(QL_RESET_START, &qdev->flags); + ql_cycle_adapter(qdev, QL_NO_RESET); + } +} + +static void ql_tx_timeout_work(struct work_struct *work) +{ + struct ql3_adapter *qdev = + container_of(work, struct ql3_adapter, tx_timeout_work.work); + + ql_cycle_adapter(qdev, QL_DO_RESET); +} + +static void ql_get_board_info(struct ql3_adapter *qdev) +{ + struct ql3xxx_port_registers __iomem *port_regs = + qdev->mem_map_registers; + u32 value; + + value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); + + qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); + if (value & PORT_STATUS_64) + qdev->pci_width = 64; + else + qdev->pci_width = 32; + if (value & PORT_STATUS_X) + qdev->pci_x = 1; + else + qdev->pci_x = 0; + qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); +} + +static void ql3xxx_timer(unsigned long ptr) +{ + struct ql3_adapter *qdev = (struct ql3_adapter *)ptr; + queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); +} + +static const struct net_device_ops ql3xxx_netdev_ops = { + .ndo_open = ql3xxx_open, + .ndo_start_xmit = ql3xxx_send, + .ndo_stop = ql3xxx_close, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ql3xxx_set_mac_address, + .ndo_tx_timeout = ql3xxx_tx_timeout, +}; + +static int ql3xxx_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_entry) +{ + struct net_device *ndev = NULL; + struct ql3_adapter *qdev = NULL; + static int cards_found; + int uninitialized_var(pci_using_dac), err; + + err = pci_enable_device(pdev); + if (err) { + pr_err("%s cannot enable PCI device\n", pci_name(pdev)); + goto err_out; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + pr_err("%s cannot obtain PCI resources\n", pci_name(pdev)); + goto err_out_disable_pdev; + } + + pci_set_master(pdev); + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + pci_using_dac = 0; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + if (err) { + pr_err("%s no usable DMA configuration\n", pci_name(pdev)); + goto err_out_free_regions; + } + + ndev = alloc_etherdev(sizeof(struct ql3_adapter)); + if (!ndev) { + err = -ENOMEM; + goto err_out_free_regions; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + + pci_set_drvdata(pdev, ndev); + + qdev = netdev_priv(ndev); + qdev->index = cards_found; + qdev->ndev = ndev; + qdev->pdev = pdev; + qdev->device_id = pci_entry->device; + qdev->port_link_state = LS_DOWN; + if (msi) + qdev->msi = 1; + + qdev->msg_enable = netif_msg_init(debug, default_msg); + + if (pci_using_dac) + ndev->features |= NETIF_F_HIGHDMA; + if (qdev->device_id == QL3032_DEVICE_ID) + ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; + + qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); + if (!qdev->mem_map_registers) { + pr_err("%s: cannot map device registers\n", pci_name(pdev)); + err = -EIO; + goto err_out_free_ndev; + } + + spin_lock_init(&qdev->adapter_lock); + spin_lock_init(&qdev->hw_lock); + + /* Set driver entry points */ + ndev->netdev_ops = &ql3xxx_netdev_ops; + ndev->ethtool_ops = &ql3xxx_ethtool_ops; + ndev->watchdog_timeo = 5 * HZ; + + netif_napi_add(ndev, &qdev->napi, ql_poll, 64); + + ndev->irq = pdev->irq; + + /* make sure the EEPROM is good */ + if (ql_get_nvram_params(qdev)) { + pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n", + __func__, qdev->index); + err = -EIO; + goto err_out_iounmap; + } + + ql_set_mac_info(qdev); + + /* Validate and set parameters */ + if (qdev->mac_index) { + ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; + ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); + } else { + ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; + ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); + } + + ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; + + /* Record PCI bus information. */ + ql_get_board_info(qdev); + + /* + * Set the Maximum Memory Read Byte Count value. We do this to handle + * jumbo frames. + */ + if (qdev->pci_x) + pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); + + err = register_netdev(ndev); + if (err) { + pr_err("%s: cannot register net device\n", pci_name(pdev)); + goto err_out_iounmap; + } + + /* we're going to reset, so assume we have no link for now */ + + netif_carrier_off(ndev); + netif_stop_queue(ndev); + + qdev->workqueue = create_singlethread_workqueue(ndev->name); + INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); + INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); + INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); + + init_timer(&qdev->adapter_timer); + qdev->adapter_timer.function = ql3xxx_timer; + qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ + qdev->adapter_timer.data = (unsigned long)qdev; + + if (!cards_found) { + pr_alert("%s\n", DRV_STRING); + pr_alert("Driver name: %s, Version: %s\n", + DRV_NAME, DRV_VERSION); + } + ql_display_dev_info(ndev); + + cards_found++; + return 0; + +err_out_iounmap: + iounmap(qdev->mem_map_registers); +err_out_free_ndev: + free_netdev(ndev); +err_out_free_regions: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); +err_out: + return err; +} + +static void ql3xxx_remove(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql3_adapter *qdev = netdev_priv(ndev); + + unregister_netdev(ndev); + + ql_disable_interrupts(qdev); + + if (qdev->workqueue) { + cancel_delayed_work(&qdev->reset_work); + cancel_delayed_work(&qdev->tx_timeout_work); + destroy_workqueue(qdev->workqueue); + qdev->workqueue = NULL; + } + + iounmap(qdev->mem_map_registers); + pci_release_regions(pdev); + free_netdev(ndev); +} + +static struct pci_driver ql3xxx_driver = { + + .name = DRV_NAME, + .id_table = ql3xxx_pci_tbl, + .probe = ql3xxx_probe, + .remove = ql3xxx_remove, +}; + +module_pci_driver(ql3xxx_driver); diff --git a/drivers/net/ethernet/qlogic/qla3xxx.h b/drivers/net/ethernet/qlogic/qla3xxx.h new file mode 100644 index 000000000..73e234366 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qla3xxx.h @@ -0,0 +1,1189 @@ +/* + * QLogic QLA3xxx NIC HBA Driver + * Copyright (c) 2003-2006 QLogic Corporation + * + * See LICENSE.qla3xxx for copyright and licensing details. + */ +#ifndef _QLA3XXX_H_ +#define _QLA3XXX_H_ + +/* + * IOCB Definitions... + */ +#pragma pack(1) + +#define OPCODE_OB_MAC_IOCB_FN0 0x01 +#define OPCODE_OB_MAC_IOCB_FN2 0x21 + +#define OPCODE_IB_MAC_IOCB 0xF9 +#define OPCODE_IB_3032_MAC_IOCB 0x09 +#define OPCODE_IB_IP_IOCB 0xFA +#define OPCODE_IB_3032_IP_IOCB 0x0A + +#define OPCODE_FUNC_ID_MASK 0x30 +#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */ + +#define FN0_MA_BITS_MASK 0x00 +#define FN1_MA_BITS_MASK 0x80 + +struct ob_mac_iocb_req { + u8 opcode; + u8 flags; +#define OB_MAC_IOCB_REQ_MA 0xe0 +#define OB_MAC_IOCB_REQ_F 0x10 +#define OB_MAC_IOCB_REQ_X 0x08 +#define OB_MAC_IOCB_REQ_D 0x02 +#define OB_MAC_IOCB_REQ_I 0x01 + u8 flags1; +#define OB_3032MAC_IOCB_REQ_IC 0x04 +#define OB_3032MAC_IOCB_REQ_TC 0x02 +#define OB_3032MAC_IOCB_REQ_UC 0x01 + u8 reserved0; + + u32 transaction_id; /* opaque for hardware */ + __le16 data_len; + u8 ip_hdr_off; + u8 ip_hdr_len; + __le32 reserved1; + __le32 reserved2; + __le32 buf_addr0_low; + __le32 buf_addr0_high; + __le32 buf_0_len; + __le32 buf_addr1_low; + __le32 buf_addr1_high; + __le32 buf_1_len; + __le32 buf_addr2_low; + __le32 buf_addr2_high; + __le32 buf_2_len; + __le32 reserved3; + __le32 reserved4; +}; +/* + * The following constants define control bits for buffer + * length fields for all IOCB's. + */ +#define OB_MAC_IOCB_REQ_E 0x80000000 /* Last valid buffer in list. */ +#define OB_MAC_IOCB_REQ_C 0x40000000 /* points to an OAL. (continuation) */ +#define OB_MAC_IOCB_REQ_L 0x20000000 /* Auburn local address pointer. */ +#define OB_MAC_IOCB_REQ_R 0x10000000 /* 32-bit address pointer. */ + +struct ob_mac_iocb_rsp { + u8 opcode; + u8 flags; +#define OB_MAC_IOCB_RSP_P 0x08 +#define OB_MAC_IOCB_RSP_L 0x04 +#define OB_MAC_IOCB_RSP_S 0x02 +#define OB_MAC_IOCB_RSP_I 0x01 + + __le16 reserved0; + u32 transaction_id; /* opaque for hardware */ + __le32 reserved1; + __le32 reserved2; +}; + +struct ib_mac_iocb_rsp { + u8 opcode; +#define IB_MAC_IOCB_RSP_V 0x80 + u8 flags; +#define IB_MAC_IOCB_RSP_S 0x80 +#define IB_MAC_IOCB_RSP_H1 0x40 +#define IB_MAC_IOCB_RSP_H0 0x20 +#define IB_MAC_IOCB_RSP_B 0x10 +#define IB_MAC_IOCB_RSP_M 0x08 +#define IB_MAC_IOCB_RSP_MA 0x07 + + __le16 length; + __le32 reserved; + __le32 ial_low; + __le32 ial_high; + +}; + +struct ob_ip_iocb_req { + u8 opcode; + __le16 flags; +#define OB_IP_IOCB_REQ_O 0x100 +#define OB_IP_IOCB_REQ_H 0x008 +#define OB_IP_IOCB_REQ_U 0x004 +#define OB_IP_IOCB_REQ_D 0x002 +#define OB_IP_IOCB_REQ_I 0x001 + + u8 reserved0; + + __le32 transaction_id; + __le16 data_len; + __le16 reserved1; + __le32 hncb_ptr_low; + __le32 hncb_ptr_high; + __le32 buf_addr0_low; + __le32 buf_addr0_high; + __le32 buf_0_len; + __le32 buf_addr1_low; + __le32 buf_addr1_high; + __le32 buf_1_len; + __le32 buf_addr2_low; + __le32 buf_addr2_high; + __le32 buf_2_len; + __le32 reserved2; + __le32 reserved3; +}; + +/* defines for BufferLength fields above */ +#define OB_IP_IOCB_REQ_E 0x80000000 +#define OB_IP_IOCB_REQ_C 0x40000000 +#define OB_IP_IOCB_REQ_L 0x20000000 +#define OB_IP_IOCB_REQ_R 0x10000000 + +struct ob_ip_iocb_rsp { + u8 opcode; + u8 flags; +#define OB_MAC_IOCB_RSP_H 0x10 +#define OB_MAC_IOCB_RSP_E 0x08 +#define OB_MAC_IOCB_RSP_L 0x04 +#define OB_MAC_IOCB_RSP_S 0x02 +#define OB_MAC_IOCB_RSP_I 0x01 + + __le16 reserved0; + __le32 transaction_id; + __le32 reserved1; + __le32 reserved2; +}; + +struct ib_ip_iocb_rsp { + u8 opcode; +#define IB_IP_IOCB_RSP_3032_V 0x80 +#define IB_IP_IOCB_RSP_3032_O 0x40 +#define IB_IP_IOCB_RSP_3032_I 0x20 +#define IB_IP_IOCB_RSP_3032_R 0x10 + u8 flags; +#define IB_IP_IOCB_RSP_S 0x80 +#define IB_IP_IOCB_RSP_H1 0x40 +#define IB_IP_IOCB_RSP_H0 0x20 +#define IB_IP_IOCB_RSP_B 0x10 +#define IB_IP_IOCB_RSP_M 0x08 +#define IB_IP_IOCB_RSP_MA 0x07 + + __le16 length; + __le16 checksum; +#define IB_IP_IOCB_RSP_3032_ICE 0x01 +#define IB_IP_IOCB_RSP_3032_CE 0x02 +#define IB_IP_IOCB_RSP_3032_NUC 0x04 +#define IB_IP_IOCB_RSP_3032_UDP 0x08 +#define IB_IP_IOCB_RSP_3032_TCP 0x10 +#define IB_IP_IOCB_RSP_3032_IPE 0x20 + __le16 reserved; +#define IB_IP_IOCB_RSP_R 0x01 + __le32 ial_low; + __le32 ial_high; +}; + +struct net_rsp_iocb { + u8 opcode; + u8 flags; + __le16 reserved0; + __le32 reserved[3]; +}; +#pragma pack() + +/* + * Register Definitions... + */ +#define PORT0_PHY_ADDRESS 0x1e00 +#define PORT1_PHY_ADDRESS 0x1f00 + +#define ETHERNET_CRC_SIZE 4 + +#define MII_SCAN_REGISTER 0x00000001 + +#define PHY_ID_0_REG 2 +#define PHY_ID_1_REG 3 + +#define PHY_OUI_1_MASK 0xfc00 +#define PHY_MODEL_MASK 0x03f0 + +/* Address for the Agere Phy */ +#define MII_AGERE_ADDR_1 0x00001000 +#define MII_AGERE_ADDR_2 0x00001100 + +/* 32-bit ispControlStatus */ +enum { + ISP_CONTROL_NP_MASK = 0x0003, + ISP_CONTROL_NP_PCSR = 0x0000, + ISP_CONTROL_NP_HMCR = 0x0001, + ISP_CONTROL_NP_LRAMCR = 0x0002, + ISP_CONTROL_NP_PSR = 0x0003, + ISP_CONTROL_RI = 0x0008, + ISP_CONTROL_CI = 0x0010, + ISP_CONTROL_PI = 0x0020, + ISP_CONTROL_IN = 0x0040, + ISP_CONTROL_BE = 0x0080, + ISP_CONTROL_FN_MASK = 0x0700, + ISP_CONTROL_FN0_NET = 0x0400, + ISP_CONTROL_FN0_SCSI = 0x0500, + ISP_CONTROL_FN1_NET = 0x0600, + ISP_CONTROL_FN1_SCSI = 0x0700, + ISP_CONTROL_LINK_DN_0 = 0x0800, + ISP_CONTROL_LINK_DN_1 = 0x1000, + ISP_CONTROL_FSR = 0x2000, + ISP_CONTROL_FE = 0x4000, + ISP_CONTROL_SR = 0x8000, +}; + +/* 32-bit ispInterruptMaskReg */ +enum { + ISP_IMR_ENABLE_INT = 0x0004, + ISP_IMR_DISABLE_RESET_INT = 0x0008, + ISP_IMR_DISABLE_CMPL_INT = 0x0010, + ISP_IMR_DISABLE_PROC_INT = 0x0020, +}; + +/* 32-bit serialPortInterfaceReg */ +enum { + ISP_SERIAL_PORT_IF_CLK = 0x0001, + ISP_SERIAL_PORT_IF_CS = 0x0002, + ISP_SERIAL_PORT_IF_D0 = 0x0004, + ISP_SERIAL_PORT_IF_DI = 0x0008, + ISP_NVRAM_MASK = (0x000F << 16), + ISP_SERIAL_PORT_IF_WE = 0x0010, + ISP_SERIAL_PORT_IF_NVR_MASK = 0x001F, + ISP_SERIAL_PORT_IF_SCI = 0x0400, + ISP_SERIAL_PORT_IF_SC0 = 0x0800, + ISP_SERIAL_PORT_IF_SCE = 0x1000, + ISP_SERIAL_PORT_IF_SDI = 0x2000, + ISP_SERIAL_PORT_IF_SDO = 0x4000, + ISP_SERIAL_PORT_IF_SDE = 0x8000, + ISP_SERIAL_PORT_IF_I2C_MASK = 0xFC00, +}; + +/* semaphoreReg */ +enum { + QL_RESOURCE_MASK_BASE_CODE = 0x7, + QL_RESOURCE_BITS_BASE_CODE = 0x4, + QL_DRVR_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 1), + QL_DDR_RAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 4), + QL_PHY_GIO_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 7), + QL_NVRAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 10), + QL_FLASH_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 13), + QL_DRVR_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (1 + 16)), + QL_DDR_RAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (4 + 16)), + QL_PHY_GIO_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (7 + 16)), + QL_NVRAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (10 + 16)), + QL_FLASH_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (13 + 16)), +}; + + /* + * QL3XXX memory-mapped registers + * QL3XXX has 4 "pages" of registers, each page occupying + * 256 bytes. Each page has a "common" area at the start and then + * page-specific registers after that. + */ +struct ql3xxx_common_registers { + u32 MB0; /* Offset 0x00 */ + u32 MB1; /* Offset 0x04 */ + u32 MB2; /* Offset 0x08 */ + u32 MB3; /* Offset 0x0c */ + u32 MB4; /* Offset 0x10 */ + u32 MB5; /* Offset 0x14 */ + u32 MB6; /* Offset 0x18 */ + u32 MB7; /* Offset 0x1c */ + u32 flashBiosAddr; + u32 flashBiosData; + u32 ispControlStatus; + u32 ispInterruptMaskReg; + u32 serialPortInterfaceReg; + u32 semaphoreReg; + u32 reqQProducerIndex; + u32 rspQConsumerIndex; + + u32 rxLargeQProducerIndex; + u32 rxSmallQProducerIndex; + u32 arcMadiCommand; + u32 arcMadiData; +}; + +enum { + EXT_HW_CONFIG_SP_MASK = 0x0006, + EXT_HW_CONFIG_SP_NONE = 0x0000, + EXT_HW_CONFIG_SP_BYTE_PARITY = 0x0002, + EXT_HW_CONFIG_SP_ECC = 0x0004, + EXT_HW_CONFIG_SP_ECCx = 0x0006, + EXT_HW_CONFIG_SIZE_MASK = 0x0060, + EXT_HW_CONFIG_SIZE_128M = 0x0000, + EXT_HW_CONFIG_SIZE_256M = 0x0020, + EXT_HW_CONFIG_SIZE_512M = 0x0040, + EXT_HW_CONFIG_SIZE_INVALID = 0x0060, + EXT_HW_CONFIG_PD = 0x0080, + EXT_HW_CONFIG_FW = 0x0200, + EXT_HW_CONFIG_US = 0x0400, + EXT_HW_CONFIG_DCS_MASK = 0x1800, + EXT_HW_CONFIG_DCS_9MA = 0x0000, + EXT_HW_CONFIG_DCS_15MA = 0x0800, + EXT_HW_CONFIG_DCS_18MA = 0x1000, + EXT_HW_CONFIG_DCS_24MA = 0x1800, + EXT_HW_CONFIG_DDS_MASK = 0x6000, + EXT_HW_CONFIG_DDS_9MA = 0x0000, + EXT_HW_CONFIG_DDS_15MA = 0x2000, + EXT_HW_CONFIG_DDS_18MA = 0x4000, + EXT_HW_CONFIG_DDS_24MA = 0x6000, +}; + +/* InternalChipConfig */ +enum { + INTERNAL_CHIP_DM = 0x0001, + INTERNAL_CHIP_SD = 0x0002, + INTERNAL_CHIP_RAP_MASK = 0x000C, + INTERNAL_CHIP_RAP_RR = 0x0000, + INTERNAL_CHIP_RAP_NRM = 0x0004, + INTERNAL_CHIP_RAP_ERM = 0x0008, + INTERNAL_CHIP_RAP_ERMx = 0x000C, + INTERNAL_CHIP_WE = 0x0010, + INTERNAL_CHIP_EF = 0x0020, + INTERNAL_CHIP_FR = 0x0040, + INTERNAL_CHIP_FW = 0x0080, + INTERNAL_CHIP_FI = 0x0100, + INTERNAL_CHIP_FT = 0x0200, +}; + +/* portControl */ +enum { + PORT_CONTROL_DS = 0x0001, + PORT_CONTROL_HH = 0x0002, + PORT_CONTROL_EI = 0x0004, + PORT_CONTROL_ET = 0x0008, + PORT_CONTROL_EF = 0x0010, + PORT_CONTROL_DRM = 0x0020, + PORT_CONTROL_RLB = 0x0040, + PORT_CONTROL_RCB = 0x0080, + PORT_CONTROL_MAC = 0x0100, + PORT_CONTROL_IPV = 0x0200, + PORT_CONTROL_IFP = 0x0400, + PORT_CONTROL_ITP = 0x0800, + PORT_CONTROL_FI = 0x1000, + PORT_CONTROL_DFP = 0x2000, + PORT_CONTROL_OI = 0x4000, + PORT_CONTROL_CC = 0x8000, +}; + +/* portStatus */ +enum { + PORT_STATUS_SM0 = 0x0001, + PORT_STATUS_SM1 = 0x0002, + PORT_STATUS_X = 0x0008, + PORT_STATUS_DL = 0x0080, + PORT_STATUS_IC = 0x0200, + PORT_STATUS_MRC = 0x0400, + PORT_STATUS_NL = 0x0800, + PORT_STATUS_REV_ID_MASK = 0x7000, + PORT_STATUS_REV_ID_1 = 0x1000, + PORT_STATUS_REV_ID_2 = 0x2000, + PORT_STATUS_REV_ID_3 = 0x3000, + PORT_STATUS_64 = 0x8000, + PORT_STATUS_UP0 = 0x10000, + PORT_STATUS_AC0 = 0x20000, + PORT_STATUS_AE0 = 0x40000, + PORT_STATUS_UP1 = 0x100000, + PORT_STATUS_AC1 = 0x200000, + PORT_STATUS_AE1 = 0x400000, + PORT_STATUS_F0_ENABLED = 0x1000000, + PORT_STATUS_F1_ENABLED = 0x2000000, + PORT_STATUS_F2_ENABLED = 0x4000000, + PORT_STATUS_F3_ENABLED = 0x8000000, +}; + +/* macMIIMgmtControlReg */ +enum { + MAC_ADDR_INDIRECT_PTR_REG_RP_MASK = 0x0003, + MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_LWR = 0x0000, + MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_UPR = 0x0001, + MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_LWR = 0x0002, + MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_UPR = 0x0003, + MAC_ADDR_INDIRECT_PTR_REG_PR = 0x0008, + MAC_ADDR_INDIRECT_PTR_REG_SS = 0x0010, + MAC_ADDR_INDIRECT_PTR_REG_SE = 0x0020, + MAC_ADDR_INDIRECT_PTR_REG_SP = 0x0040, + MAC_ADDR_INDIRECT_PTR_REG_PE = 0x0080, +}; + +/* macMIIMgmtControlReg */ +enum { + MAC_MII_CONTROL_RC = 0x0001, + MAC_MII_CONTROL_SC = 0x0002, + MAC_MII_CONTROL_AS = 0x0004, + MAC_MII_CONTROL_NP = 0x0008, + MAC_MII_CONTROL_CLK_SEL_MASK = 0x0070, + MAC_MII_CONTROL_CLK_SEL_DIV2 = 0x0000, + MAC_MII_CONTROL_CLK_SEL_DIV4 = 0x0010, + MAC_MII_CONTROL_CLK_SEL_DIV6 = 0x0020, + MAC_MII_CONTROL_CLK_SEL_DIV8 = 0x0030, + MAC_MII_CONTROL_CLK_SEL_DIV10 = 0x0040, + MAC_MII_CONTROL_CLK_SEL_DIV14 = 0x0050, + MAC_MII_CONTROL_CLK_SEL_DIV20 = 0x0060, + MAC_MII_CONTROL_CLK_SEL_DIV28 = 0x0070, + MAC_MII_CONTROL_RM = 0x8000, +}; + +/* macMIIStatusReg */ +enum { + MAC_MII_STATUS_BSY = 0x0001, + MAC_MII_STATUS_SC = 0x0002, + MAC_MII_STATUS_NV = 0x0004, +}; + +enum { + MAC_CONFIG_REG_PE = 0x0001, + MAC_CONFIG_REG_TF = 0x0002, + MAC_CONFIG_REG_RF = 0x0004, + MAC_CONFIG_REG_FD = 0x0008, + MAC_CONFIG_REG_GM = 0x0010, + MAC_CONFIG_REG_LB = 0x0020, + MAC_CONFIG_REG_SR = 0x8000, +}; + +enum { + MAC_HALF_DUPLEX_REG_ED = 0x10000, + MAC_HALF_DUPLEX_REG_NB = 0x20000, + MAC_HALF_DUPLEX_REG_BNB = 0x40000, + MAC_HALF_DUPLEX_REG_ALT = 0x80000, +}; + +enum { + IP_ADDR_INDEX_REG_MASK = 0x000f, + IP_ADDR_INDEX_REG_FUNC_0_PRI = 0x0000, + IP_ADDR_INDEX_REG_FUNC_0_SEC = 0x0001, + IP_ADDR_INDEX_REG_FUNC_1_PRI = 0x0002, + IP_ADDR_INDEX_REG_FUNC_1_SEC = 0x0003, + IP_ADDR_INDEX_REG_FUNC_2_PRI = 0x0004, + IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005, + IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006, + IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007, + IP_ADDR_INDEX_REG_6 = 0x0008, + IP_ADDR_INDEX_REG_OFFSET_MASK = 0x0030, + IP_ADDR_INDEX_REG_E = 0x0040, +}; +enum { + QL3032_PORT_CONTROL_DS = 0x0001, + QL3032_PORT_CONTROL_HH = 0x0002, + QL3032_PORT_CONTROL_EIv6 = 0x0004, + QL3032_PORT_CONTROL_EIv4 = 0x0008, + QL3032_PORT_CONTROL_ET = 0x0010, + QL3032_PORT_CONTROL_EF = 0x0020, + QL3032_PORT_CONTROL_DRM = 0x0040, + QL3032_PORT_CONTROL_RLB = 0x0080, + QL3032_PORT_CONTROL_RCB = 0x0100, + QL3032_PORT_CONTROL_KIE = 0x0200, +}; + +enum { + PROBE_MUX_ADDR_REG_MUX_SEL_MASK = 0x003f, + PROBE_MUX_ADDR_REG_SYSCLK = 0x0000, + PROBE_MUX_ADDR_REG_PCICLK = 0x0040, + PROBE_MUX_ADDR_REG_NRXCLK = 0x0080, + PROBE_MUX_ADDR_REG_CPUCLK = 0x00C0, + PROBE_MUX_ADDR_REG_MODULE_SEL_MASK = 0x3f00, + PROBE_MUX_ADDR_REG_UP = 0x4000, + PROBE_MUX_ADDR_REG_RE = 0x8000, +}; + +enum { + STATISTICS_INDEX_REG_MASK = 0x01ff, + STATISTICS_INDEX_REG_MAC0_TX_FRAME = 0x0000, + STATISTICS_INDEX_REG_MAC0_TX_BYTES = 0x0001, + STATISTICS_INDEX_REG_MAC0_TX_STAT1 = 0x0002, + STATISTICS_INDEX_REG_MAC0_TX_STAT2 = 0x0003, + STATISTICS_INDEX_REG_MAC0_TX_STAT3 = 0x0004, + STATISTICS_INDEX_REG_MAC0_TX_STAT4 = 0x0005, + STATISTICS_INDEX_REG_MAC0_TX_STAT5 = 0x0006, + STATISTICS_INDEX_REG_MAC0_RX_FRAME = 0x0007, + STATISTICS_INDEX_REG_MAC0_RX_BYTES = 0x0008, + STATISTICS_INDEX_REG_MAC0_RX_STAT1 = 0x0009, + STATISTICS_INDEX_REG_MAC0_RX_STAT2 = 0x000a, + STATISTICS_INDEX_REG_MAC0_RX_STAT3 = 0x000b, + STATISTICS_INDEX_REG_MAC0_RX_ERR_CRC = 0x000c, + STATISTICS_INDEX_REG_MAC0_RX_ERR_ENC = 0x000d, + STATISTICS_INDEX_REG_MAC0_RX_ERR_LEN = 0x000e, + STATISTICS_INDEX_REG_MAC0_RX_STAT4 = 0x000f, + STATISTICS_INDEX_REG_MAC1_TX_FRAME = 0x0010, + STATISTICS_INDEX_REG_MAC1_TX_BYTES = 0x0011, + STATISTICS_INDEX_REG_MAC1_TX_STAT1 = 0x0012, + STATISTICS_INDEX_REG_MAC1_TX_STAT2 = 0x0013, + STATISTICS_INDEX_REG_MAC1_TX_STAT3 = 0x0014, + STATISTICS_INDEX_REG_MAC1_TX_STAT4 = 0x0015, + STATISTICS_INDEX_REG_MAC1_TX_STAT5 = 0x0016, + STATISTICS_INDEX_REG_MAC1_RX_FRAME = 0x0017, + STATISTICS_INDEX_REG_MAC1_RX_BYTES = 0x0018, + STATISTICS_INDEX_REG_MAC1_RX_STAT1 = 0x0019, + STATISTICS_INDEX_REG_MAC1_RX_STAT2 = 0x001a, + STATISTICS_INDEX_REG_MAC1_RX_STAT3 = 0x001b, + STATISTICS_INDEX_REG_MAC1_RX_ERR_CRC = 0x001c, + STATISTICS_INDEX_REG_MAC1_RX_ERR_ENC = 0x001d, + STATISTICS_INDEX_REG_MAC1_RX_ERR_LEN = 0x001e, + STATISTICS_INDEX_REG_MAC1_RX_STAT4 = 0x001f, + STATISTICS_INDEX_REG_IP_TX_PKTS = 0x0020, + STATISTICS_INDEX_REG_IP_TX_BYTES = 0x0021, + STATISTICS_INDEX_REG_IP_TX_FRAG = 0x0022, + STATISTICS_INDEX_REG_IP_RX_PKTS = 0x0023, + STATISTICS_INDEX_REG_IP_RX_BYTES = 0x0024, + STATISTICS_INDEX_REG_IP_RX_FRAG = 0x0025, + STATISTICS_INDEX_REG_IP_DGRM_REASSEMBLY = 0x0026, + STATISTICS_INDEX_REG_IP_V6_RX_PKTS = 0x0027, + STATISTICS_INDEX_REG_IP_RX_PKTERR = 0x0028, + STATISTICS_INDEX_REG_IP_REASSEMBLY_ERR = 0x0029, + STATISTICS_INDEX_REG_TCP_TX_SEG = 0x0030, + STATISTICS_INDEX_REG_TCP_TX_BYTES = 0x0031, + STATISTICS_INDEX_REG_TCP_RX_SEG = 0x0032, + STATISTICS_INDEX_REG_TCP_RX_BYTES = 0x0033, + STATISTICS_INDEX_REG_TCP_TIMER_EXP = 0x0034, + STATISTICS_INDEX_REG_TCP_RX_ACK = 0x0035, + STATISTICS_INDEX_REG_TCP_TX_ACK = 0x0036, + STATISTICS_INDEX_REG_TCP_RX_ERR = 0x0037, + STATISTICS_INDEX_REG_TCP_RX_WIN_PROBE = 0x0038, + STATISTICS_INDEX_REG_TCP_ECC_ERR_CORR = 0x003f, +}; + +enum { + PORT_FATAL_ERROR_STATUS_OFB_RE_MAC0 = 0x00000001, + PORT_FATAL_ERROR_STATUS_OFB_RE_MAC1 = 0x00000002, + PORT_FATAL_ERROR_STATUS_OFB_WE = 0x00000004, + PORT_FATAL_ERROR_STATUS_IFB_RE = 0x00000008, + PORT_FATAL_ERROR_STATUS_IFB_WE_MAC0 = 0x00000010, + PORT_FATAL_ERROR_STATUS_IFB_WE_MAC1 = 0x00000020, + PORT_FATAL_ERROR_STATUS_ODE_RE = 0x00000040, + PORT_FATAL_ERROR_STATUS_ODE_WE = 0x00000080, + PORT_FATAL_ERROR_STATUS_IDE_RE = 0x00000100, + PORT_FATAL_ERROR_STATUS_IDE_WE = 0x00000200, + PORT_FATAL_ERROR_STATUS_SDE_RE = 0x00000400, + PORT_FATAL_ERROR_STATUS_SDE_WE = 0x00000800, + PORT_FATAL_ERROR_STATUS_BLE = 0x00001000, + PORT_FATAL_ERROR_STATUS_SPE = 0x00002000, + PORT_FATAL_ERROR_STATUS_EP0 = 0x00004000, + PORT_FATAL_ERROR_STATUS_EP1 = 0x00008000, + PORT_FATAL_ERROR_STATUS_ICE = 0x00010000, + PORT_FATAL_ERROR_STATUS_ILE = 0x00020000, + PORT_FATAL_ERROR_STATUS_OPE = 0x00040000, + PORT_FATAL_ERROR_STATUS_TA = 0x00080000, + PORT_FATAL_ERROR_STATUS_MA = 0x00100000, + PORT_FATAL_ERROR_STATUS_SCE = 0x00200000, + PORT_FATAL_ERROR_STATUS_RPE = 0x00400000, + PORT_FATAL_ERROR_STATUS_MPE = 0x00800000, + PORT_FATAL_ERROR_STATUS_OCE = 0x01000000, +}; + +/* + * port control and status page - page 0 + */ + +struct ql3xxx_port_registers { + struct ql3xxx_common_registers CommonRegs; + + u32 ExternalHWConfig; + u32 InternalChipConfig; + u32 portControl; + u32 portStatus; + u32 macAddrIndirectPtrReg; + u32 macAddrDataReg; + u32 macMIIMgmtControlReg; + u32 macMIIMgmtAddrReg; + u32 macMIIMgmtDataReg; + u32 macMIIStatusReg; + u32 mac0ConfigReg; + u32 mac0IpgIfgReg; + u32 mac0HalfDuplexReg; + u32 mac0MaxFrameLengthReg; + u32 mac0PauseThresholdReg; + u32 mac1ConfigReg; + u32 mac1IpgIfgReg; + u32 mac1HalfDuplexReg; + u32 mac1MaxFrameLengthReg; + u32 mac1PauseThresholdReg; + u32 ipAddrIndexReg; + u32 ipAddrDataReg; + u32 ipReassemblyTimeout; + u32 tcpMaxWindow; + u32 currentTcpTimestamp[2]; + u32 internalRamRWAddrReg; + u32 internalRamWDataReg; + u32 reclaimedBufferAddrRegLow; + u32 reclaimedBufferAddrRegHigh; + u32 tcpConfiguration; + u32 functionControl; + u32 fpgaRevID; + u32 localRamAddr; + u32 localRamDataAutoIncr; + u32 localRamDataNonIncr; + u32 gpOutput; + u32 gpInput; + u32 probeMuxAddr; + u32 probeMuxData; + u32 statisticsIndexReg; + u32 statisticsReadDataRegAutoIncr; + u32 statisticsReadDataRegNoIncr; + u32 PortFatalErrStatus; +}; + +/* + * port host memory config page - page 1 + */ +struct ql3xxx_host_memory_registers { + struct ql3xxx_common_registers CommonRegs; + + u32 reserved[12]; + + /* Network Request Queue */ + u32 reqConsumerIndex; + u32 reqConsumerIndexAddrLow; + u32 reqConsumerIndexAddrHigh; + u32 reqBaseAddrLow; + u32 reqBaseAddrHigh; + u32 reqLength; + + /* Network Completion Queue */ + u32 rspProducerIndex; + u32 rspProducerIndexAddrLow; + u32 rspProducerIndexAddrHigh; + u32 rspBaseAddrLow; + u32 rspBaseAddrHigh; + u32 rspLength; + + /* RX Large Buffer Queue */ + u32 rxLargeQConsumerIndex; + u32 rxLargeQBaseAddrLow; + u32 rxLargeQBaseAddrHigh; + u32 rxLargeQLength; + u32 rxLargeBufferLength; + + /* RX Small Buffer Queue */ + u32 rxSmallQConsumerIndex; + u32 rxSmallQBaseAddrLow; + u32 rxSmallQBaseAddrHigh; + u32 rxSmallQLength; + u32 rxSmallBufferLength; + +}; + +/* + * port local RAM page - page 2 + */ +struct ql3xxx_local_ram_registers { + struct ql3xxx_common_registers CommonRegs; + u32 bufletSize; + u32 maxBufletCount; + u32 currentBufletCount; + u32 reserved; + u32 freeBufletThresholdLow; + u32 freeBufletThresholdHigh; + u32 ipHashTableBase; + u32 ipHashTableCount; + u32 tcpHashTableBase; + u32 tcpHashTableCount; + u32 ncbBase; + u32 maxNcbCount; + u32 currentNcbCount; + u32 drbBase; + u32 maxDrbCount; + u32 currentDrbCount; +}; + +/* + * definitions for Semaphore bits in Semaphore/Serial NVRAM interface register + */ + +#define LS_64BITS(x) (u32)(0xffffffff & ((u64)x)) +#define MS_64BITS(x) (u32)(0xffffffff & (((u64)x)>>16>>16) ) + +/* + * I/O register + */ + +enum { + CONTROL_REG = 0, + STATUS_REG = 1, + PHY_STAT_LINK_UP = 0x0004, + PHY_CTRL_LOOPBACK = 0x4000, + + PETBI_CONTROL_REG = 0x00, + PETBI_CTRL_ALL_PARAMS = 0x7140, + PETBI_CTRL_SOFT_RESET = 0x8000, + PETBI_CTRL_AUTO_NEG = 0x1000, + PETBI_CTRL_RESTART_NEG = 0x0200, + PETBI_CTRL_FULL_DUPLEX = 0x0100, + PETBI_CTRL_SPEED_1000 = 0x0040, + + PETBI_STATUS_REG = 0x01, + PETBI_STAT_NEG_DONE = 0x0020, + PETBI_STAT_LINK_UP = 0x0004, + + PETBI_NEG_ADVER = 0x04, + PETBI_NEG_PAUSE = 0x0080, + PETBI_NEG_PAUSE_MASK = 0x0180, + PETBI_NEG_DUPLEX = 0x0020, + PETBI_NEG_DUPLEX_MASK = 0x0060, + + PETBI_NEG_PARTNER = 0x05, + PETBI_NEG_ERROR_MASK = 0x3000, + + PETBI_EXPANSION_REG = 0x06, + PETBI_EXP_PAGE_RX = 0x0002, + + PHY_GIG_CONTROL = 9, + PHY_GIG_ENABLE_MAN = 0x1000, /* Enable Master/Slave Manual Config*/ + PHY_GIG_SET_MASTER = 0x0800, /* Set Master (slave if clear)*/ + PHY_GIG_ALL_PARAMS = 0x0300, + PHY_GIG_ADV_1000F = 0x0200, + PHY_GIG_ADV_1000H = 0x0100, + + PHY_NEG_ADVER = 4, + PHY_NEG_ALL_PARAMS = 0x0fe0, + PHY_NEG_ASY_PAUSE = 0x0800, + PHY_NEG_SYM_PAUSE = 0x0400, + PHY_NEG_ADV_SPEED = 0x01e0, + PHY_NEG_ADV_100F = 0x0100, + PHY_NEG_ADV_100H = 0x0080, + PHY_NEG_ADV_10F = 0x0040, + PHY_NEG_ADV_10H = 0x0020, + + PETBI_TBI_CTRL = 0x11, + PETBI_TBI_RESET = 0x8000, + PETBI_TBI_AUTO_SENSE = 0x0100, + PETBI_TBI_SERDES_MODE = 0x0010, + PETBI_TBI_SERDES_WRAP = 0x0002, + + AUX_CONTROL_STATUS = 0x1c, + PHY_AUX_NEG_DONE = 0x8000, + PHY_NEG_PARTNER = 5, + PHY_AUX_DUPLEX_STAT = 0x0020, + PHY_AUX_SPEED_STAT = 0x0018, + PHY_AUX_NO_HW_STRAP = 0x0004, + PHY_AUX_RESET_STICK = 0x0002, + PHY_NEG_PAUSE = 0x0400, + PHY_CTRL_SOFT_RESET = 0x8000, + PHY_CTRL_AUTO_NEG = 0x1000, + PHY_CTRL_RESTART_NEG = 0x0200, +}; +enum { +/* AM29LV Flash definitions */ + FM93C56A_START = 0x1, +/* Commands */ + FM93C56A_READ = 0x2, + FM93C56A_WEN = 0x0, + FM93C56A_WRITE = 0x1, + FM93C56A_WRITE_ALL = 0x0, + FM93C56A_WDS = 0x0, + FM93C56A_ERASE = 0x3, + FM93C56A_ERASE_ALL = 0x0, +/* Command Extensions */ + FM93C56A_WEN_EXT = 0x3, + FM93C56A_WRITE_ALL_EXT = 0x1, + FM93C56A_WDS_EXT = 0x0, + FM93C56A_ERASE_ALL_EXT = 0x2, +/* Special Bits */ + FM93C56A_READ_DUMMY_BITS = 1, + FM93C56A_READY = 0, + FM93C56A_BUSY = 1, + FM93C56A_CMD_BITS = 2, +/* AM29LV Flash definitions */ + FM93C56A_SIZE_8 = 0x100, + FM93C56A_SIZE_16 = 0x80, + FM93C66A_SIZE_8 = 0x200, + FM93C66A_SIZE_16 = 0x100, + FM93C86A_SIZE_16 = 0x400, +/* Address Bits */ + FM93C56A_NO_ADDR_BITS_16 = 8, + FM93C56A_NO_ADDR_BITS_8 = 9, + FM93C86A_NO_ADDR_BITS_16 = 10, +/* Data Bits */ + FM93C56A_DATA_BITS_16 = 16, + FM93C56A_DATA_BITS_8 = 8, +}; +enum { +/* Auburn Bits */ + AUBURN_EEPROM_DI = 0x8, + AUBURN_EEPROM_DI_0 = 0x0, + AUBURN_EEPROM_DI_1 = 0x8, + AUBURN_EEPROM_DO = 0x4, + AUBURN_EEPROM_DO_0 = 0x0, + AUBURN_EEPROM_DO_1 = 0x4, + AUBURN_EEPROM_CS = 0x2, + AUBURN_EEPROM_CS_0 = 0x0, + AUBURN_EEPROM_CS_1 = 0x2, + AUBURN_EEPROM_CLK_RISE = 0x1, + AUBURN_EEPROM_CLK_FALL = 0x0, +}; +enum {EEPROM_SIZE = FM93C86A_SIZE_16, + EEPROM_NO_ADDR_BITS = FM93C86A_NO_ADDR_BITS_16, + EEPROM_NO_DATA_BITS = FM93C56A_DATA_BITS_16, +}; + +/* + * MAC Config data structure + */ + struct eeprom_port_cfg { + u16 etherMtu_mac; + u16 pauseThreshold_mac; + u16 resumeThreshold_mac; + u16 portConfiguration; +#define PORT_CONFIG_DEFAULT 0xf700 +#define PORT_CONFIG_AUTO_NEG_ENABLED 0x8000 +#define PORT_CONFIG_SYM_PAUSE_ENABLED 0x4000 +#define PORT_CONFIG_FULL_DUPLEX_ENABLED 0x2000 +#define PORT_CONFIG_HALF_DUPLEX_ENABLED 0x1000 +#define PORT_CONFIG_1000MB_SPEED 0x0400 +#define PORT_CONFIG_100MB_SPEED 0x0200 +#define PORT_CONFIG_10MB_SPEED 0x0100 +#define PORT_CONFIG_LINK_SPEED_MASK 0x0F00 + u16 reserved[12]; + +}; + +/* + * BIOS data structure + */ +struct eeprom_bios_cfg { + u16 SpinDlyEn:1, disBios:1, EnMemMap:1, EnSelectBoot:1, Reserved:12; + + u8 bootID0:7, boodID0Valid:1; + u8 bootLun0[8]; + + u8 bootID1:7, boodID1Valid:1; + u8 bootLun1[8]; + + u16 MaxLunsTrgt; + u8 reserved[10]; +}; + +/* + * Function Specific Data structure + */ +struct eeprom_function_cfg { + u8 reserved[30]; + u16 macAddress[3]; + u16 macAddressSecondary[3]; + + u16 subsysVendorId; + u16 subsysDeviceId; +}; + +/* + * EEPROM format + */ +struct eeprom_data { + u8 asicId[4]; + u16 version_and_numPorts; /* together to avoid endianness crap */ + u16 boardId; + +#define EEPROM_BOARDID_STR_SIZE 16 +#define EEPROM_SERIAL_NUM_SIZE 16 + + u8 boardIdStr[16]; + u8 serialNumber[16]; + u16 extHwConfig; + struct eeprom_port_cfg macCfg_port0; + struct eeprom_port_cfg macCfg_port1; + u16 bufletSize; + u16 bufletCount; + u16 tcpWindowThreshold50; + u16 tcpWindowThreshold25; + u16 tcpWindowThreshold0; + u16 ipHashTableBaseHi; + u16 ipHashTableBaseLo; + u16 ipHashTableSize; + u16 tcpHashTableBaseHi; + u16 tcpHashTableBaseLo; + u16 tcpHashTableSize; + u16 ncbTableBaseHi; + u16 ncbTableBaseLo; + u16 ncbTableSize; + u16 drbTableBaseHi; + u16 drbTableBaseLo; + u16 drbTableSize; + u16 reserved_142[4]; + u16 ipReassemblyTimeout; + u16 tcpMaxWindowSize; + u16 ipSecurity; +#define IPSEC_CONFIG_PRESENT 0x0001 + u8 reserved_156[294]; + u16 qDebug[8]; + struct eeprom_function_cfg funcCfg_fn0; + u16 reserved_510; + u8 oemSpace[432]; + struct eeprom_bios_cfg biosCfg_fn1; + struct eeprom_function_cfg funcCfg_fn1; + u16 reserved_1022; + u8 reserved_1024[464]; + struct eeprom_function_cfg funcCfg_fn2; + u16 reserved_1534; + u8 reserved_1536[432]; + struct eeprom_bios_cfg biosCfg_fn3; + struct eeprom_function_cfg funcCfg_fn3; + u16 checksum; +}; + +/* + * General definitions... + */ + +/* + * Below are a number compiler switches for controlling driver behavior. + * Some are not supported under certain conditions and are notated as such. + */ + +#define QL3XXX_VENDOR_ID 0x1077 +#define QL3022_DEVICE_ID 0x3022 +#define QL3032_DEVICE_ID 0x3032 + +/* MTU & Frame Size stuff */ +#define NORMAL_MTU_SIZE ETH_DATA_LEN +#define JUMBO_MTU_SIZE 9000 +#define VLAN_ID_LEN 2 + +/* Request Queue Related Definitions */ +#define NUM_REQ_Q_ENTRIES 256 /* so that 64 * 64 = 4096 (1 page) */ + +/* Response Queue Related Definitions */ +#define NUM_RSP_Q_ENTRIES 256 /* so that 256 * 16 = 4096 (1 page) */ + +/* Transmit and Receive Buffers */ +#define NUM_LBUFQ_ENTRIES 128 +#define JUMBO_NUM_LBUFQ_ENTRIES 32 +#define NUM_SBUFQ_ENTRIES 64 +#define QL_SMALL_BUFFER_SIZE 32 +#define QL_ADDR_ELE_PER_BUFQ_ENTRY \ +(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element)) + /* Each send has at least control block. This is how many we keep. */ +#define NUM_SMALL_BUFFERS NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY + +#define QL_HEADER_SPACE 32 /* make header space at top of skb. */ +/* + * Large & Small Buffers for Receives + */ +struct lrg_buf_q_entry { + + __le32 addr0_lower; +#define IAL_LAST_ENTRY 0x00000001 +#define IAL_CONT_ENTRY 0x00000002 +#define IAL_FLAG_MASK 0x00000003 + __le32 addr0_upper; + __le32 addr1_lower; + __le32 addr1_upper; + __le32 addr2_lower; + __le32 addr2_upper; + __le32 addr3_lower; + __le32 addr3_upper; + __le32 addr4_lower; + __le32 addr4_upper; + __le32 addr5_lower; + __le32 addr5_upper; + __le32 addr6_lower; + __le32 addr6_upper; + __le32 addr7_lower; + __le32 addr7_upper; + +}; + +struct bufq_addr_element { + __le32 addr_low; + __le32 addr_high; +}; + +#define QL_NO_RESET 0 +#define QL_DO_RESET 1 + +enum link_state_t { + LS_UNKNOWN = 0, + LS_DOWN, + LS_DEGRADE, + LS_RECOVER, + LS_UP, +}; + +struct ql_rcv_buf_cb { + struct ql_rcv_buf_cb *next; + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); + __le32 buf_phy_addr_low; + __le32 buf_phy_addr_high; + int index; +}; + +/* + * Original IOCB has 3 sg entries: + * first points to skb-data area + * second points to first frag + * third points to next oal. + * OAL has 5 entries: + * 1 thru 4 point to frags + * fifth points to next oal. + */ +#define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1) + +struct oal_entry { + __le32 dma_lo; + __le32 dma_hi; + __le32 len; +#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */ +#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */ +}; + +struct oal { + struct oal_entry oal_entry[5]; +}; + +struct map_list { + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); +}; + +struct ql_tx_buf_cb { + struct sk_buff *skb; + struct ob_mac_iocb_req *queue_entry ; + int seg_count; + struct oal *oal; + struct map_list map[MAX_SKB_FRAGS+1]; +}; + +/* definitions for type field */ +#define QL_BUF_TYPE_MACIOCB 0x01 +#define QL_BUF_TYPE_IPIOCB 0x02 +#define QL_BUF_TYPE_TCPIOCB 0x03 + +/* qdev->flags definitions. */ +enum { QL_RESET_DONE = 1, /* Reset finished. */ + QL_RESET_ACTIVE = 2, /* Waiting for reset to finish. */ + QL_RESET_START = 3, /* Please reset the chip. */ + QL_RESET_PER_SCSI = 4, /* SCSI driver requests reset. */ + QL_TX_TIMEOUT = 5, /* Timeout in progress. */ + QL_LINK_MASTER = 6, /* This driver controls the link. */ + QL_ADAPTER_UP = 7, /* Adapter has been brought up. */ + QL_THREAD_UP = 8, /* This flag is available. */ + QL_LINK_UP = 9, /* Link Status. */ + QL_ALLOC_REQ_RSP_Q_DONE = 10, + QL_ALLOC_BUFQS_DONE = 11, + QL_ALLOC_SMALL_BUF_DONE = 12, + QL_LINK_OPTICAL = 13, + QL_MSI_ENABLED = 14, +}; + +/* + * ql3_adapter - The main Adapter structure definition. + * This structure has all fields relevant to the hardware. + */ + +struct ql3_adapter { + u32 reserved_00; + unsigned long flags; + + /* PCI Configuration information for this device */ + struct pci_dev *pdev; + struct net_device *ndev; /* Parent NET device */ + + struct napi_struct napi; + + /* Hardware information */ + u8 chip_rev_id; + u8 pci_slot; + u8 pci_width; + u8 pci_x; + u32 msi; + int index; + struct timer_list adapter_timer; /* timer used for various functions */ + + spinlock_t adapter_lock; + spinlock_t hw_lock; + + /* PCI Bus Relative Register Addresses */ + u8 __iomem *mmap_virt_base; /* stores return value from ioremap() */ + struct ql3xxx_port_registers __iomem *mem_map_registers; + u32 current_page; /* tracks current register page */ + + u32 msg_enable; + u8 reserved_01[2]; + u8 reserved_02[2]; + + /* Page for Shadow Registers */ + void *shadow_reg_virt_addr; + dma_addr_t shadow_reg_phy_addr; + + /* Net Request Queue */ + u32 req_q_size; + u32 reserved_03; + struct ob_mac_iocb_req *req_q_virt_addr; + dma_addr_t req_q_phy_addr; + u16 req_producer_index; + u16 reserved_04; + u16 *preq_consumer_index; + u32 req_consumer_index_phy_addr_high; + u32 req_consumer_index_phy_addr_low; + atomic_t tx_count; + struct ql_tx_buf_cb tx_buf[NUM_REQ_Q_ENTRIES]; + + /* Net Response Queue */ + u32 rsp_q_size; + u32 eeprom_cmd_data; + struct net_rsp_iocb *rsp_q_virt_addr; + dma_addr_t rsp_q_phy_addr; + struct net_rsp_iocb *rsp_current; + u16 rsp_consumer_index; + u16 reserved_06; + volatile __le32 *prsp_producer_index; + u32 rsp_producer_index_phy_addr_high; + u32 rsp_producer_index_phy_addr_low; + + /* Large Buffer Queue */ + u32 lrg_buf_q_alloc_size; + u32 lrg_buf_q_size; + void *lrg_buf_q_alloc_virt_addr; + void *lrg_buf_q_virt_addr; + dma_addr_t lrg_buf_q_alloc_phy_addr; + dma_addr_t lrg_buf_q_phy_addr; + u32 lrg_buf_q_producer_index; + u32 lrg_buf_release_cnt; + struct bufq_addr_element *lrg_buf_next_free; + u32 num_large_buffers; + u32 num_lbufq_entries; + + /* Large (Receive) Buffers */ + struct ql_rcv_buf_cb *lrg_buf; + struct ql_rcv_buf_cb *lrg_buf_free_head; + struct ql_rcv_buf_cb *lrg_buf_free_tail; + u32 lrg_buf_free_count; + u32 lrg_buffer_len; + u32 lrg_buf_index; + u32 lrg_buf_skb_check; + + /* Small Buffer Queue */ + u32 small_buf_q_alloc_size; + u32 small_buf_q_size; + u32 small_buf_q_producer_index; + void *small_buf_q_alloc_virt_addr; + void *small_buf_q_virt_addr; + dma_addr_t small_buf_q_alloc_phy_addr; + dma_addr_t small_buf_q_phy_addr; + u32 small_buf_index; + + /* Small (Receive) Buffers */ + void *small_buf_virt_addr; + dma_addr_t small_buf_phy_addr; + u32 small_buf_phy_addr_low; + u32 small_buf_phy_addr_high; + u32 small_buf_release_cnt; + u32 small_buf_total_size; + + struct eeprom_data nvram_data; + u32 port_link_state; + + /* 4022 specific */ + u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */ + u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */ + u32 mac_ob_opcode; /* Opcode to use on mac transmission */ + u32 mb_bit_mask; /* MA Bits mask to use on transmission */ + u32 numPorts; + struct workqueue_struct *workqueue; + struct delayed_work reset_work; + struct delayed_work tx_timeout_work; + struct delayed_work link_state_work; + u32 max_frame_size; + u32 device_id; + u16 phyType; +}; + +#endif /* _QLA3XXX_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile new file mode 100644 index 000000000..3c2c2c7c1 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile @@ -0,0 +1,15 @@ +# +# Makefile for Qlogic 1G/10G Ethernet Driver for CNA devices +# + +obj-$(CONFIG_QLCNIC) := qlcnic.o + +qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \ + qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \ + qlcnic_sysfs.o qlcnic_minidump.o qlcnic_83xx_hw.o \ + qlcnic_83xx_init.o qlcnic_83xx_vnic.o \ + qlcnic_sriov_common.o + +qlcnic-$(CONFIG_QLCNIC_SRIOV) += qlcnic_sriov_pf.o + +qlcnic-$(CONFIG_QLCNIC_DCB) += qlcnic_dcb.o diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h new file mode 100644 index 000000000..00d00beb2 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -0,0 +1,2404 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#ifndef _QLCNIC_H_ +#define _QLCNIC_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include "qlcnic_hdr.h" +#include "qlcnic_hw.h" +#include "qlcnic_83xx_hw.h" +#include "qlcnic_dcb.h" + +#define _QLCNIC_LINUX_MAJOR 5 +#define _QLCNIC_LINUX_MINOR 3 +#define _QLCNIC_LINUX_SUBVERSION 62 +#define QLCNIC_LINUX_VERSIONID "5.3.62" +#define QLCNIC_DRV_IDC_VER 0x01 +#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ + (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) + +#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) +#define _major(v) (((v) >> 24) & 0xff) +#define _minor(v) (((v) >> 16) & 0xff) +#define _build(v) ((v) & 0xffff) + +/* version in image has weird encoding: + * 7:0 - major + * 15:8 - minor + * 31:16 - build (little endian) + */ +#define QLCNIC_DECODE_VERSION(v) \ + QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16)) + +#define QLCNIC_MIN_FW_VERSION QLCNIC_VERSION_CODE(4, 4, 2) +#define QLCNIC_NUM_FLASH_SECTORS (64) +#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024) +#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \ + * QLCNIC_FLASH_SECTOR_SIZE) + +#define RCV_DESC_RINGSIZE(rds_ring) \ + (sizeof(struct rcv_desc) * (rds_ring)->num_desc) +#define RCV_BUFF_RINGSIZE(rds_ring) \ + (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc) +#define STATUS_DESC_RINGSIZE(sds_ring) \ + (sizeof(struct status_desc) * (sds_ring)->num_desc) +#define TX_BUFF_RINGSIZE(tx_ring) \ + (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc) +#define TX_DESC_RINGSIZE(tx_ring) \ + (sizeof(struct cmd_desc_type0) * tx_ring->num_desc) + +#define QLCNIC_P3P_A0 0x50 +#define QLCNIC_P3P_C0 0x58 + +#define QLCNIC_IS_REVISION_P3P(REVISION) (REVISION >= QLCNIC_P3P_A0) + +#define FIRST_PAGE_GROUP_START 0 +#define FIRST_PAGE_GROUP_END 0x100000 + +#define P3P_MAX_MTU (9600) +#define P3P_MIN_MTU (68) +#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */ + +#define QLCNIC_P3P_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN) +#define QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3P_MAX_MTU) +#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048 +#define QLCNIC_LRO_BUFFER_EXTRA 2048 + +/* Tx defines */ +#define QLCNIC_MAX_FRAGS_PER_TX 14 +#define MAX_TSO_HEADER_DESC 2 +#define MGMT_CMD_DESC_RESV 4 +#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ + + MGMT_CMD_DESC_RESV) +#define QLCNIC_MAX_TX_TIMEOUTS 2 + +/* Driver will use 1 Tx ring in INT-x/MSI/SRIOV mode. */ +#define QLCNIC_SINGLE_RING 1 +#define QLCNIC_DEF_SDS_RINGS 4 +#define QLCNIC_DEF_TX_RINGS 4 +#define QLCNIC_MAX_VNIC_TX_RINGS 4 +#define QLCNIC_MAX_VNIC_SDS_RINGS 4 +#define QLCNIC_83XX_MINIMUM_VECTOR 3 +#define QLCNIC_82XX_MINIMUM_VECTOR 2 + +enum qlcnic_queue_type { + QLCNIC_TX_QUEUE = 1, + QLCNIC_RX_QUEUE, +}; + +/* Operational mode for driver */ +#define QLCNIC_VNIC_MODE 0xFF +#define QLCNIC_DEFAULT_MODE 0x0 + +/* Virtual NIC function count */ +#define QLC_DEFAULT_VNIC_COUNT 8 +#define QLC_84XX_VNIC_COUNT 16 + +/* + * Following are the states of the Phantom. Phantom will set them and + * Host will read to check if the fields are correct. + */ +#define PHAN_INITIALIZE_FAILED 0xffff +#define PHAN_INITIALIZE_COMPLETE 0xff01 + +/* Host writes the following to notify that it has done the init-handshake */ +#define PHAN_INITIALIZE_ACK 0xf00f +#define PHAN_PEG_RCV_INITIALIZED 0xff01 + +#define NUM_RCV_DESC_RINGS 3 + +#define RCV_RING_NORMAL 0 +#define RCV_RING_JUMBO 1 + +#define MIN_CMD_DESCRIPTORS 64 +#define MIN_RCV_DESCRIPTORS 64 +#define MIN_JUMBO_DESCRIPTORS 32 + +#define MAX_CMD_DESCRIPTORS 1024 +#define MAX_RCV_DESCRIPTORS_1G 4096 +#define MAX_RCV_DESCRIPTORS_10G 8192 +#define MAX_RCV_DESCRIPTORS_VF 2048 +#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512 +#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024 + +#define DEFAULT_RCV_DESCRIPTORS_1G 2048 +#define DEFAULT_RCV_DESCRIPTORS_10G 4096 +#define DEFAULT_RCV_DESCRIPTORS_VF 1024 +#define MAX_RDS_RINGS 2 + +#define get_next_index(index, length) \ + (((index) + 1) & ((length) - 1)) + +/* + * Following data structures describe the descriptors that will be used. + * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when + * we are doing LSO (above the 1500 size packet) only. + */ +struct cmd_desc_type0 { + u8 tcp_hdr_offset; /* For LSO only */ + u8 ip_hdr_offset; /* For LSO only */ + __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */ + __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */ + + __le64 addr_buffer2; + + __le16 encap_descr; /* 15:10 offset of outer L3 header, + * 9:6 number of 32bit words in outer L3 header, + * 5 offload outer L4 checksum, + * 4 offload outer L3 checksum, + * 3 Inner L4 type, TCP=0, UDP=1, + * 2 Inner L3 type, IPv4=0, IPv6=1, + * 1 Outer L3 type,IPv4=0, IPv6=1, + * 0 type of encapsulation, GRE=0, VXLAN=1 + */ + __le16 mss; + u8 port_ctxid; /* 7:4 ctxid 3:0 port */ + u8 hdr_length; /* LSO only : MAC+IP+TCP Hdr size */ + u8 outer_hdr_length; /* Encapsulation only */ + u8 rsvd1; + + __le64 addr_buffer3; + __le64 addr_buffer1; + + __le16 buffer_length[4]; + + __le64 addr_buffer4; + + u8 eth_addr[ETH_ALEN]; + __le16 vlan_TCI; /* In case of encapsulation, + * this is for outer VLAN + */ + +} __attribute__ ((aligned(64))); + +/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ +struct rcv_desc { + __le16 reference_handle; + __le16 reserved; + __le32 buffer_length; /* allocated buffer length (usually 2K) */ + __le64 addr_buffer; +} __packed; + +struct status_desc { + __le64 status_desc_data[2]; +} __attribute__ ((aligned(16))); + +/* UNIFIED ROMIMAGE */ +#define QLCNIC_UNI_FW_MIN_SIZE 0xc8000 +#define QLCNIC_UNI_DIR_SECT_PRODUCT_TBL 0x0 +#define QLCNIC_UNI_DIR_SECT_BOOTLD 0x6 +#define QLCNIC_UNI_DIR_SECT_FW 0x7 + +/*Offsets */ +#define QLCNIC_UNI_CHIP_REV_OFF 10 +#define QLCNIC_UNI_FLAGS_OFF 11 +#define QLCNIC_UNI_BIOS_VERSION_OFF 12 +#define QLCNIC_UNI_BOOTLD_IDX_OFF 27 +#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29 + +struct uni_table_desc{ + __le32 findex; + __le32 num_entries; + __le32 entry_size; + __le32 reserved[5]; +}; + +struct uni_data_desc{ + __le32 findex; + __le32 size; + __le32 reserved[5]; +}; + +/* Flash Defines and Structures */ +#define QLCNIC_FLT_LOCATION 0x3F1000 +#define QLCNIC_FDT_LOCATION 0x3F0000 +#define QLCNIC_B0_FW_IMAGE_REGION 0x74 +#define QLCNIC_C0_FW_IMAGE_REGION 0x97 +#define QLCNIC_BOOTLD_REGION 0X72 +struct qlcnic_flt_header { + u16 version; + u16 len; + u16 checksum; + u16 reserved; +}; + +struct qlcnic_flt_entry { + u8 region; + u8 reserved0; + u8 attrib; + u8 reserved1; + u32 size; + u32 start_addr; + u32 end_addr; +}; + +/* Flash Descriptor Table */ +struct qlcnic_fdt { + u32 valid; + u16 ver; + u16 len; + u16 cksum; + u16 unused; + u8 model[16]; + u8 mfg_id; + u16 id; + u8 flag; + u8 erase_cmd; + u8 alt_erase_cmd; + u8 write_enable_cmd; + u8 write_enable_bits; + u8 write_statusreg_cmd; + u8 unprotected_sec_cmd; + u8 read_manuf_cmd; + u32 block_size; + u32 alt_block_size; + u32 flash_size; + u32 write_enable_data; + u8 readid_addr_len; + u8 write_disable_bits; + u8 read_dev_id_len; + u8 chip_erase_cmd; + u16 read_timeo; + u8 protected_sec_cmd; + u8 resvd[65]; +}; +/* Magic number to let user know flash is programmed */ +#define QLCNIC_BDINFO_MAGIC 0x12345678 + +#define QLCNIC_BRDTYPE_P3P_REF_QG 0x0021 +#define QLCNIC_BRDTYPE_P3P_HMEZ 0x0022 +#define QLCNIC_BRDTYPE_P3P_10G_CX4_LP 0x0023 +#define QLCNIC_BRDTYPE_P3P_4_GB 0x0024 +#define QLCNIC_BRDTYPE_P3P_IMEZ 0x0025 +#define QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS 0x0026 +#define QLCNIC_BRDTYPE_P3P_10000_BASE_T 0x0027 +#define QLCNIC_BRDTYPE_P3P_XG_LOM 0x0028 +#define QLCNIC_BRDTYPE_P3P_4_GB_MM 0x0029 +#define QLCNIC_BRDTYPE_P3P_10G_SFP_CT 0x002a +#define QLCNIC_BRDTYPE_P3P_10G_SFP_QT 0x002b +#define QLCNIC_BRDTYPE_P3P_10G_CX4 0x0031 +#define QLCNIC_BRDTYPE_P3P_10G_XFP 0x0032 +#define QLCNIC_BRDTYPE_P3P_10G_TP 0x0080 + +#define QLCNIC_MSIX_TABLE_OFFSET 0x44 + +/* Flash memory map */ +#define QLCNIC_BRDCFG_START 0x4000 /* board config */ +#define QLCNIC_BOOTLD_START 0x10000 /* bootld */ +#define QLCNIC_IMAGE_START 0x43000 /* compressed image */ +#define QLCNIC_USER_START 0x3E8000 /* Firmware info */ + +#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408) +#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c) +#define QLCNIC_FW_SERIAL_NUM_OFFSET (QLCNIC_USER_START+0x81c) +#define QLCNIC_BIOS_VERSION_OFFSET (QLCNIC_USER_START+0x83c) + +#define QLCNIC_BRDTYPE_OFFSET (QLCNIC_BRDCFG_START+0x8) +#define QLCNIC_FW_MAGIC_OFFSET (QLCNIC_BRDCFG_START+0x128) + +#define QLCNIC_FW_MIN_SIZE (0x3fffff) +#define QLCNIC_UNIFIED_ROMIMAGE 0 +#define QLCNIC_FLASH_ROMIMAGE 1 +#define QLCNIC_UNKNOWN_ROMIMAGE 0xff + +#define QLCNIC_UNIFIED_ROMIMAGE_NAME "/*(DEBLOBBED)*/" +#define QLCNIC_FLASH_ROMIMAGE_NAME "flash" + +extern char qlcnic_driver_name[]; + +extern int qlcnic_use_msi; +extern int qlcnic_use_msi_x; +extern int qlcnic_auto_fw_reset; +extern int qlcnic_load_fw_file; + +/* Number of status descriptors to handle per interrupt */ +#define MAX_STATUS_HANDLE (64) + +/* + * qlcnic_skb_frag{} is to contain mapping info for each SG list. This + * has to be freed when DMA is complete. This is part of qlcnic_tx_buffer{}. + */ +struct qlcnic_skb_frag { + u64 dma; + u64 length; +}; + +/* Following defines are for the state of the buffers */ +#define QLCNIC_BUFFER_FREE 0 +#define QLCNIC_BUFFER_BUSY 1 + +/* + * There will be one qlcnic_buffer per skb packet. These will be + * used to save the dma info for pci_unmap_page() + */ +struct qlcnic_cmd_buffer { + struct sk_buff *skb; + struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1]; + u32 frag_count; +}; + +/* In rx_buffer, we do not need multiple fragments as is a single buffer */ +struct qlcnic_rx_buffer { + u16 ref_handle; + struct sk_buff *skb; + struct list_head list; + u64 dma; +}; + +/* Board types */ +#define QLCNIC_GBE 0x01 +#define QLCNIC_XGBE 0x02 + +/* + * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is + * adjusted based on configured MTU. + */ +#define QLCNIC_INTR_COAL_TYPE_RX 1 +#define QLCNIC_INTR_COAL_TYPE_TX 2 +#define QLCNIC_INTR_COAL_TYPE_RX_TX 3 + +#define QLCNIC_DEF_INTR_COALESCE_RX_TIME_US 3 +#define QLCNIC_DEF_INTR_COALESCE_RX_PACKETS 256 + +#define QLCNIC_DEF_INTR_COALESCE_TX_TIME_US 64 +#define QLCNIC_DEF_INTR_COALESCE_TX_PACKETS 64 + +#define QLCNIC_INTR_DEFAULT 0x04 +#define QLCNIC_CONFIG_INTR_COALESCE 3 +#define QLCNIC_DEV_INFO_SIZE 2 + +struct qlcnic_nic_intr_coalesce { + u8 type; + u8 sts_ring_mask; + u16 rx_packets; + u16 rx_time_us; + u16 tx_packets; + u16 tx_time_us; + u16 flag; + u32 timer_out; +}; + +struct qlcnic_83xx_dump_template_hdr { + u32 type; + u32 offset; + u32 size; + u32 cap_mask; + u32 num_entries; + u32 version; + u32 timestamp; + u32 checksum; + u32 drv_cap_mask; + u32 sys_info[3]; + u32 saved_state[16]; + u32 cap_sizes[8]; + u32 ocm_wnd_reg[16]; + u32 rsvd[0]; +}; + +struct qlcnic_82xx_dump_template_hdr { + u32 type; + u32 offset; + u32 size; + u32 cap_mask; + u32 num_entries; + u32 version; + u32 timestamp; + u32 checksum; + u32 drv_cap_mask; + u32 sys_info[3]; + u32 saved_state[16]; + u32 cap_sizes[8]; + u32 rsvd[7]; + u32 capabilities; + u32 rsvd1[0]; +}; + +#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16) + +struct qlcnic_fw_dump { + u8 clr; /* flag to indicate if dump is cleared */ + bool enable; /* enable/disable dump */ + u32 size; /* total size of the dump */ + u32 cap_mask; /* Current capture mask */ + void *data; /* dump data area */ + void *tmpl_hdr; + dma_addr_t phys_addr; + void *dma_buffer; + bool use_pex_dma; + /* Read only elements which are common between 82xx and 83xx + * template header. Update these values immediately after we read + * template header from Firmware + */ + u32 tmpl_hdr_size; + u32 version; + u32 num_entries; + u32 offset; +}; + +/* + * One hardware_context{} per adapter + * contains interrupt info as well shared hardware info. + */ +struct qlcnic_hardware_context { + void __iomem *pci_base0; + void __iomem *ocm_win_crb; + + unsigned long pci_len0; + + rwlock_t crb_lock; + struct mutex mem_lock; + + u8 revision_id; + u8 pci_func; + u8 linkup; + u8 loopback_state; + u8 beacon_state; + u8 has_link_events; + u8 fw_type; + u8 physical_port; + u8 reset_context; + u8 msix_supported; + u8 max_mac_filters; + u8 mc_enabled; + u8 max_mc_count; + u8 diag_test; + u8 num_msix; + u8 nic_mode; + int diag_cnt; + + u16 max_uc_count; + u16 port_type; + u16 board_type; + u16 supported_type; + + u16 link_speed; + u16 link_duplex; + u16 link_autoneg; + u16 module_type; + + u16 op_mode; + u16 switch_mode; + u16 max_tx_ques; + u16 max_rx_ques; + u16 max_mtu; + u32 msg_enable; + u16 total_nic_func; + u16 max_pci_func; + u32 max_vnic_func; + u32 total_pci_func; + + u32 capabilities; + u32 extra_capability[3]; + u32 temp; + u32 int_vec_bit; + u32 fw_hal_version; + u32 port_config; + struct qlcnic_hardware_ops *hw_ops; + struct qlcnic_nic_intr_coalesce coal; + struct qlcnic_fw_dump fw_dump; + struct qlcnic_fdt fdt; + struct qlc_83xx_reset reset; + struct qlc_83xx_idc idc; + struct qlc_83xx_fw_info *fw_info; + struct qlcnic_intrpt_config *intr_tbl; + struct qlcnic_sriov *sriov; + u32 *reg_tbl; + u32 *ext_reg_tbl; + u32 mbox_aen[QLC_83XX_MBX_AEN_CNT]; + u32 mbox_reg[4]; + struct qlcnic_mailbox *mailbox; + u8 extend_lb_time; + u8 phys_port_id[ETH_ALEN]; + u8 lb_mode; + u16 vxlan_port; + struct device *hwmon_dev; + u32 post_mode; + bool run_post; +}; + +struct qlcnic_adapter_stats { + u64 xmitcalled; + u64 xmitfinished; + u64 rxdropped; + u64 txdropped; + u64 csummed; + u64 rx_pkts; + u64 lro_pkts; + u64 rxbytes; + u64 txbytes; + u64 lrobytes; + u64 lso_frames; + u64 encap_lso_frames; + u64 encap_tx_csummed; + u64 encap_rx_csummed; + u64 xmit_on; + u64 xmit_off; + u64 skb_alloc_failure; + u64 null_rxbuf; + u64 rx_dma_map_error; + u64 tx_dma_map_error; + u64 spurious_intr; + u64 mac_filter_limit_overrun; +}; + +/* + * Rcv Descriptor Context. One such per Rcv Descriptor. There may + * be one Rcv Descriptor for normal packets, one for jumbo and may be others. + */ +struct qlcnic_host_rds_ring { + void __iomem *crb_rcv_producer; + struct rcv_desc *desc_head; + struct qlcnic_rx_buffer *rx_buf_arr; + u32 num_desc; + u32 producer; + u32 dma_size; + u32 skb_size; + u32 flags; + struct list_head free_list; + spinlock_t lock; + dma_addr_t phys_addr; +} ____cacheline_internodealigned_in_smp; + +struct qlcnic_host_sds_ring { + u32 consumer; + u32 num_desc; + void __iomem *crb_sts_consumer; + + struct qlcnic_host_tx_ring *tx_ring; + struct status_desc *desc_head; + struct qlcnic_adapter *adapter; + struct napi_struct napi; + struct list_head free_list[NUM_RCV_DESC_RINGS]; + + void __iomem *crb_intr_mask; + int irq; + + dma_addr_t phys_addr; + char name[IFNAMSIZ + 12]; +} ____cacheline_internodealigned_in_smp; + +struct qlcnic_tx_queue_stats { + u64 xmit_on; + u64 xmit_off; + u64 xmit_called; + u64 xmit_finished; + u64 tx_bytes; +}; + +struct qlcnic_host_tx_ring { + int irq; + void __iomem *crb_intr_mask; + char name[IFNAMSIZ + 12]; + u16 ctx_id; + + u32 state; + u32 producer; + u32 sw_consumer; + u32 num_desc; + + struct qlcnic_tx_queue_stats tx_stats; + + void __iomem *crb_cmd_producer; + struct cmd_desc_type0 *desc_head; + struct qlcnic_adapter *adapter; + struct napi_struct napi; + struct qlcnic_cmd_buffer *cmd_buf_arr; + __le32 *hw_consumer; + + dma_addr_t phys_addr; + dma_addr_t hw_cons_phys_addr; + struct netdev_queue *txq; + /* Lock to protect Tx descriptors cleanup */ + spinlock_t tx_clean_lock; +} ____cacheline_internodealigned_in_smp; + +/* + * Receive context. There is one such structure per instance of the + * receive processing. Any state information that is relevant to + * the receive, and is must be in this structure. The global data may be + * present elsewhere. + */ +struct qlcnic_recv_context { + struct qlcnic_host_rds_ring *rds_rings; + struct qlcnic_host_sds_ring *sds_rings; + u32 state; + u16 context_id; + u16 virt_port; +}; + +/* HW context creation */ + +#define QLCNIC_OS_CRB_RETRY_COUNT 4000 + +#define QLCNIC_CDRP_CMD_BIT 0x80000000 + +/* + * All responses must have the QLCNIC_CDRP_CMD_BIT cleared + * in the crb QLCNIC_CDRP_CRB_OFFSET. + */ +#define QLCNIC_CDRP_FORM_RSP(rsp) (rsp) +#define QLCNIC_CDRP_IS_RSP(rsp) (((rsp) & QLCNIC_CDRP_CMD_BIT) == 0) + +#define QLCNIC_CDRP_RSP_OK 0x00000001 +#define QLCNIC_CDRP_RSP_FAIL 0x00000002 +#define QLCNIC_CDRP_RSP_TIMEOUT 0x00000003 + +/* + * All commands must have the QLCNIC_CDRP_CMD_BIT set in + * the crb QLCNIC_CDRP_CRB_OFFSET. + */ +#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd)) + +#define QLCNIC_RCODE_SUCCESS 0 +#define QLCNIC_RCODE_INVALID_ARGS 6 +#define QLCNIC_RCODE_NOT_SUPPORTED 9 +#define QLCNIC_RCODE_NOT_PERMITTED 10 +#define QLCNIC_RCODE_NOT_IMPL 15 +#define QLCNIC_RCODE_INVALID 16 +#define QLCNIC_RCODE_TIMEOUT 17 +#define QLCNIC_DESTROY_CTX_RESET 0 + +/* + * Capabilities Announced + */ +#define QLCNIC_CAP0_LEGACY_CONTEXT (1) +#define QLCNIC_CAP0_LEGACY_MN (1 << 2) +#define QLCNIC_CAP0_LSO (1 << 6) +#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7) +#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8) +#define QLCNIC_CAP0_VALIDOFF (1 << 11) +#define QLCNIC_CAP0_LRO_MSS (1 << 21) +#define QLCNIC_CAP0_TX_MULTI (1 << 22) + +/* + * Context state + */ +#define QLCNIC_HOST_CTX_STATE_FREED 0 +#define QLCNIC_HOST_CTX_STATE_ACTIVE 2 + +/* + * Rx context + */ + +struct qlcnic_hostrq_sds_ring { + __le64 host_phys_addr; /* Ring base addr */ + __le32 ring_size; /* Ring entries */ + __le16 msi_index; + __le16 rsvd; /* Padding */ +} __packed; + +struct qlcnic_hostrq_rds_ring { + __le64 host_phys_addr; /* Ring base addr */ + __le64 buff_size; /* Packet buffer size */ + __le32 ring_size; /* Ring entries */ + __le32 ring_kind; /* Class of ring */ +} __packed; + +struct qlcnic_hostrq_rx_ctx { + __le64 host_rsp_dma_addr; /* Response dma'd here */ + __le32 capabilities[4]; /* Flag bit vector */ + __le32 host_int_crb_mode; /* Interrupt crb usage */ + __le32 host_rds_crb_mode; /* RDS crb usage */ + /* These ring offsets are relative to data[0] below */ + __le32 rds_ring_offset; /* Offset to RDS config */ + __le32 sds_ring_offset; /* Offset to SDS config */ + __le16 num_rds_rings; /* Count of RDS rings */ + __le16 num_sds_rings; /* Count of SDS rings */ + __le16 valid_field_offset; + u8 txrx_sds_binding; + u8 msix_handler; + u8 reserved[128]; /* reserve space for future expansion*/ + /* MUST BE 64-bit aligned. + The following is packed: + - N hostrq_rds_rings + - N hostrq_sds_rings */ + char data[0]; +} __packed; + +struct qlcnic_cardrsp_rds_ring{ + __le32 host_producer_crb; /* Crb to use */ + __le32 rsvd1; /* Padding */ +} __packed; + +struct qlcnic_cardrsp_sds_ring { + __le32 host_consumer_crb; /* Crb to use */ + __le32 interrupt_crb; /* Crb to use */ +} __packed; + +struct qlcnic_cardrsp_rx_ctx { + /* These ring offsets are relative to data[0] below */ + __le32 rds_ring_offset; /* Offset to RDS config */ + __le32 sds_ring_offset; /* Offset to SDS config */ + __le32 host_ctx_state; /* Starting State */ + __le32 num_fn_per_port; /* How many PCI fn share the port */ + __le16 num_rds_rings; /* Count of RDS rings */ + __le16 num_sds_rings; /* Count of SDS rings */ + __le16 context_id; /* Handle for context */ + u8 phys_port; /* Physical id of port */ + u8 virt_port; /* Virtual/Logical id of port */ + u8 reserved[128]; /* save space for future expansion */ + /* MUST BE 64-bit aligned. + The following is packed: + - N cardrsp_rds_rings + - N cardrs_sds_rings */ + char data[0]; +} __packed; + +#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \ + (sizeof(HOSTRQ_RX) + \ + (rds_rings)*(sizeof(struct qlcnic_hostrq_rds_ring)) + \ + (sds_rings)*(sizeof(struct qlcnic_hostrq_sds_ring))) + +#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \ + (sizeof(CARDRSP_RX) + \ + (rds_rings)*(sizeof(struct qlcnic_cardrsp_rds_ring)) + \ + (sds_rings)*(sizeof(struct qlcnic_cardrsp_sds_ring))) + +/* + * Tx context + */ + +struct qlcnic_hostrq_cds_ring { + __le64 host_phys_addr; /* Ring base addr */ + __le32 ring_size; /* Ring entries */ + __le32 rsvd; /* Padding */ +} __packed; + +struct qlcnic_hostrq_tx_ctx { + __le64 host_rsp_dma_addr; /* Response dma'd here */ + __le64 cmd_cons_dma_addr; /* */ + __le64 dummy_dma_addr; /* */ + __le32 capabilities[4]; /* Flag bit vector */ + __le32 host_int_crb_mode; /* Interrupt crb usage */ + __le32 rsvd1; /* Padding */ + __le16 rsvd2; /* Padding */ + __le16 interrupt_ctl; + __le16 msi_index; + __le16 rsvd3; /* Padding */ + struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */ + u8 reserved[128]; /* future expansion */ +} __packed; + +struct qlcnic_cardrsp_cds_ring { + __le32 host_producer_crb; /* Crb to use */ + __le32 interrupt_crb; /* Crb to use */ +} __packed; + +struct qlcnic_cardrsp_tx_ctx { + __le32 host_ctx_state; /* Starting state */ + __le16 context_id; /* Handle for context */ + u8 phys_port; /* Physical id of port */ + u8 virt_port; /* Virtual/Logical id of port */ + struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */ + u8 reserved[128]; /* future expansion */ +} __packed; + +#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX)) +#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX)) + +/* CRB */ + +#define QLCNIC_HOST_RDS_CRB_MODE_UNIQUE 0 +#define QLCNIC_HOST_RDS_CRB_MODE_SHARED 1 +#define QLCNIC_HOST_RDS_CRB_MODE_CUSTOM 2 +#define QLCNIC_HOST_RDS_CRB_MODE_MAX 3 + +#define QLCNIC_HOST_INT_CRB_MODE_UNIQUE 0 +#define QLCNIC_HOST_INT_CRB_MODE_SHARED 1 +#define QLCNIC_HOST_INT_CRB_MODE_NORX 2 +#define QLCNIC_HOST_INT_CRB_MODE_NOTX 3 +#define QLCNIC_HOST_INT_CRB_MODE_NORXTX 4 + + +/* MAC */ + +#define MC_COUNT_P3P 38 + +#define QLCNIC_MAC_NOOP 0 +#define QLCNIC_MAC_ADD 1 +#define QLCNIC_MAC_DEL 2 +#define QLCNIC_MAC_VLAN_ADD 3 +#define QLCNIC_MAC_VLAN_DEL 4 + +enum qlcnic_mac_type { + QLCNIC_UNICAST_MAC, + QLCNIC_MULTICAST_MAC, + QLCNIC_BROADCAST_MAC, +}; + +struct qlcnic_mac_vlan_list { + struct list_head list; + uint8_t mac_addr[ETH_ALEN+2]; + u16 vlan_id; + enum qlcnic_mac_type mac_type; +}; + +/* MAC Learn */ +#define NO_MAC_LEARN 0 +#define DRV_MAC_LEARN 1 +#define FDB_MAC_LEARN 2 + +#define QLCNIC_HOST_REQUEST 0x13 +#define QLCNIC_REQUEST 0x14 + +#define QLCNIC_MAC_EVENT 0x1 + +#define QLCNIC_IP_UP 2 +#define QLCNIC_IP_DOWN 3 + +#define QLCNIC_ILB_MODE 0x1 +#define QLCNIC_ELB_MODE 0x2 +#define QLCNIC_LB_MODE_MASK 0x3 + +#define QLCNIC_LINKEVENT 0x1 +#define QLCNIC_LB_RESPONSE 0x2 +#define QLCNIC_IS_LB_CONFIGURED(VAL) \ + (VAL == (QLCNIC_LINKEVENT | QLCNIC_LB_RESPONSE)) + +/* + * Driver --> Firmware + */ +#define QLCNIC_H2C_OPCODE_CONFIG_RSS 0x1 +#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 0x3 +#define QLCNIC_H2C_OPCODE_CONFIG_LED 0x4 +#define QLCNIC_H2C_OPCODE_LRO_REQUEST 0x7 +#define QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE 0xc +#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 0x12 + +#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 0x15 +#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 0x17 +#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 0x18 +#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 0x13 + +/* + * Firmware --> Driver + */ + +#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f +#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 0x8D +#define QLCNIC_C2H_OPCODE_GET_DCB_AEN 0x90 + +#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */ +#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */ +#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */ + +#define QLCNIC_LRO_REQUEST_CLEANUP 4 + +/* Capabilites received */ +#define QLCNIC_FW_CAPABILITY_TSO BIT_1 +#define QLCNIC_FW_CAPABILITY_BDG BIT_8 +#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9 +#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10 +#define QLCNIC_FW_CAPABILITY_2_MULTI_TX BIT_4 +#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27 +#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31 + +#define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2 +#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3 +#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5 +#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7 +#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9 + +#define QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD BIT_0 +#define QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD BIT_1 +#define QLCNIC_83XX_FW_CAPAB_ENCAP_CKO_OFFLOAD BIT_4 + +/* module types */ +#define LINKEVENT_MODULE_NOT_PRESENT 1 +#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2 +#define LINKEVENT_MODULE_OPTICAL_SRLR 3 +#define LINKEVENT_MODULE_OPTICAL_LRM 4 +#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5 +#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6 +#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7 +#define LINKEVENT_MODULE_TWINAX 8 + +#define LINKSPEED_10GBPS 10000 +#define LINKSPEED_1GBPS 1000 +#define LINKSPEED_100MBPS 100 +#define LINKSPEED_10MBPS 10 + +#define LINKSPEED_ENCODED_10MBPS 0 +#define LINKSPEED_ENCODED_100MBPS 1 +#define LINKSPEED_ENCODED_1GBPS 2 + +#define LINKEVENT_AUTONEG_DISABLED 0 +#define LINKEVENT_AUTONEG_ENABLED 1 + +#define LINKEVENT_HALF_DUPLEX 0 +#define LINKEVENT_FULL_DUPLEX 1 + +#define LINKEVENT_LINKSPEED_MBPS 0 +#define LINKEVENT_LINKSPEED_ENCODED 1 + +/* firmware response header: + * 63:58 - message type + * 57:56 - owner + * 55:53 - desc count + * 52:48 - reserved + * 47:40 - completion id + * 39:32 - opcode + * 31:16 - error code + * 15:00 - reserved + */ +#define qlcnic_get_nic_msg_opcode(msg_hdr) \ + ((msg_hdr >> 32) & 0xFF) + +struct qlcnic_fw_msg { + union { + struct { + u64 hdr; + u64 body[7]; + }; + u64 words[8]; + }; +}; + +struct qlcnic_nic_req { + __le64 qhdr; + __le64 req_hdr; + __le64 words[6]; +} __packed; + +struct qlcnic_mac_req { + u8 op; + u8 tag; + u8 mac_addr[6]; +}; + +struct qlcnic_vlan_req { + __le16 vlan_id; + __le16 rsvd[3]; +} __packed; + +struct qlcnic_ipaddr { + __be32 ipv4; + __be32 ipv6[4]; +}; + +#define QLCNIC_MSI_ENABLED 0x02 +#define QLCNIC_MSIX_ENABLED 0x04 +#define QLCNIC_LRO_ENABLED 0x01 +#define QLCNIC_LRO_DISABLED 0x00 +#define QLCNIC_BRIDGE_ENABLED 0X10 +#define QLCNIC_DIAG_ENABLED 0x20 +#define QLCNIC_ESWITCH_ENABLED 0x40 +#define QLCNIC_ADAPTER_INITIALIZED 0x80 +#define QLCNIC_TAGGING_ENABLED 0x100 +#define QLCNIC_MACSPOOF 0x200 +#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400 +#define QLCNIC_PROMISC_DISABLED 0x800 +#define QLCNIC_NEED_FLR 0x1000 +#define QLCNIC_FW_RESET_OWNER 0x2000 +#define QLCNIC_FW_HANG 0x4000 +#define QLCNIC_FW_LRO_MSS_CAP 0x8000 +#define QLCNIC_TX_INTR_SHARED 0x10000 +#define QLCNIC_APP_CHANGED_FLAGS 0x20000 +#define QLCNIC_HAS_PHYS_PORT_ID 0x40000 +#define QLCNIC_TSS_RSS 0x80000 + +#ifdef CONFIG_QLCNIC_VXLAN +#define QLCNIC_ADD_VXLAN_PORT 0x100000 +#define QLCNIC_DEL_VXLAN_PORT 0x200000 +#endif + +#define QLCNIC_VLAN_FILTERING 0x800000 + +#define QLCNIC_IS_MSI_FAMILY(adapter) \ + ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) +#define QLCNIC_IS_TSO_CAPABLE(adapter) \ + ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) + +#define QLCNIC_BEACON_EANBLE 0xC +#define QLCNIC_BEACON_DISABLE 0xD + +#define QLCNIC_BEACON_ON 2 +#define QLCNIC_BEACON_OFF 0 + +#define QLCNIC_MSIX_TBL_SPACE 8192 +#define QLCNIC_PCI_REG_MSIX_TBL 0x44 +#define QLCNIC_MSIX_TBL_PGSIZE 4096 + +#define QLCNIC_ADAPTER_UP_MAGIC 777 + +#define __QLCNIC_FW_ATTACHED 0 +#define __QLCNIC_DEV_UP 1 +#define __QLCNIC_RESETTING 2 +#define __QLCNIC_START_FW 4 +#define __QLCNIC_AER 5 +#define __QLCNIC_DIAG_RES_ALLOC 6 +#define __QLCNIC_LED_ENABLE 7 +#define __QLCNIC_ELB_INPROGRESS 8 +#define __QLCNIC_MULTI_TX_UNIQUE 9 +#define __QLCNIC_SRIOV_ENABLE 10 +#define __QLCNIC_SRIOV_CAPABLE 11 +#define __QLCNIC_MBX_POLL_ENABLE 12 +#define __QLCNIC_DIAG_MODE 13 +#define __QLCNIC_MAINTENANCE_MODE 16 + +#define QLCNIC_INTERRUPT_TEST 1 +#define QLCNIC_LOOPBACK_TEST 2 +#define QLCNIC_LED_TEST 3 + +#define QLCNIC_FILTER_AGE 80 +#define QLCNIC_READD_AGE 20 +#define QLCNIC_LB_MAX_FILTERS 64 +#define QLCNIC_LB_BUCKET_SIZE 32 +#define QLCNIC_ILB_MAX_RCV_LOOP 10 + +struct qlcnic_filter { + struct hlist_node fnode; + u8 faddr[ETH_ALEN]; + u16 vlan_id; + unsigned long ftime; +}; + +struct qlcnic_filter_hash { + struct hlist_head *fhead; + u8 fnum; + u16 fmax; + u16 fbucket_size; +}; + +/* Mailbox specific data structures */ +struct qlcnic_mailbox { + struct workqueue_struct *work_q; + struct qlcnic_adapter *adapter; + struct qlcnic_mbx_ops *ops; + struct work_struct work; + struct completion completion; + struct list_head cmd_q; + unsigned long status; + spinlock_t queue_lock; /* Mailbox queue lock */ + spinlock_t aen_lock; /* Mailbox response/AEN lock */ + atomic_t rsp_status; + u32 num_cmds; +}; + +struct qlcnic_adapter { + struct qlcnic_hardware_context *ahw; + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_tx_ring *tx_ring; + struct net_device *netdev; + struct pci_dev *pdev; + + unsigned long state; + u32 flags; + + u16 num_txd; + u16 num_rxd; + u16 num_jumbo_rxd; + u16 max_rxd; + u16 max_jumbo_rxd; + + u8 max_rds_rings; + + u8 max_sds_rings; /* max sds rings supported by adapter */ + u8 max_tx_rings; /* max tx rings supported by adapter */ + + u8 drv_tx_rings; /* max tx rings supported by driver */ + u8 drv_sds_rings; /* max sds rings supported by driver */ + + u8 drv_tss_rings; /* tss ring input */ + u8 drv_rss_rings; /* rss ring input */ + + u8 rx_csum; + u8 portnum; + + u8 fw_wait_cnt; + u8 fw_fail_cnt; + u8 tx_timeo_cnt; + u8 need_fw_reset; + u8 reset_ctx_cnt; + + u16 is_up; + u16 rx_pvid; + u16 tx_pvid; + + u32 irq; + u32 heartbeat; + + u8 dev_state; + u8 reset_ack_timeo; + u8 dev_init_timeo; + + u8 mac_addr[ETH_ALEN]; + + u64 dev_rst_time; + bool drv_mac_learn; + bool fdb_mac_learn; + bool rx_mac_learn; + unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u8 flash_mfg_id; + struct qlcnic_npar_info *npars; + struct qlcnic_eswitch *eswitch; + struct qlcnic_nic_template *nic_ops; + + struct qlcnic_adapter_stats stats; + struct list_head mac_list; + + void __iomem *tgt_mask_reg; + void __iomem *tgt_status_reg; + void __iomem *crb_int_state_reg; + void __iomem *isr_int_vec; + + struct msix_entry *msix_entries; + struct workqueue_struct *qlcnic_wq; + struct delayed_work fw_work; + struct delayed_work idc_aen_work; + struct delayed_work mbx_poll_work; + struct qlcnic_dcb *dcb; + + struct qlcnic_filter_hash fhash; + struct qlcnic_filter_hash rx_fhash; + struct list_head vf_mc_list; + + spinlock_t mac_learn_lock; + /* spinlock for catching rcv filters for eswitch traffic */ + spinlock_t rx_mac_learn_lock; + u32 file_prd_off; /*File fw product offset*/ + u32 fw_version; + u32 offload_flags; + const struct firmware *fw; +}; + +struct qlcnic_info_le { + __le16 pci_func; + __le16 op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */ + __le16 phys_port; + __le16 switch_mode; /* 0 = disabled, 1 = int, 2 = ext */ + + __le32 capabilities; + u8 max_mac_filters; + u8 reserved1; + __le16 max_mtu; + + __le16 max_tx_ques; + __le16 max_rx_ques; + __le16 min_tx_bw; + __le16 max_tx_bw; + __le32 op_type; + __le16 max_bw_reg_offset; + __le16 max_linkspeed_reg_offset; + __le32 capability1; + __le32 capability2; + __le32 capability3; + __le16 max_tx_mac_filters; + __le16 max_rx_mcast_mac_filters; + __le16 max_rx_ucast_mac_filters; + __le16 max_rx_ip_addr; + __le16 max_rx_lro_flow; + __le16 max_rx_status_rings; + __le16 max_rx_buf_rings; + __le16 max_tx_vlan_keys; + u8 total_pf; + u8 total_rss_engines; + __le16 max_vports; + __le16 linkstate_reg_offset; + __le16 bit_offsets; + __le16 max_local_ipv6_addrs; + __le16 max_remote_ipv6_addrs; + u8 reserved2[56]; +} __packed; + +struct qlcnic_info { + u16 pci_func; + u16 op_mode; + u16 phys_port; + u16 switch_mode; + u32 capabilities; + u8 max_mac_filters; + u16 max_mtu; + u16 max_tx_ques; + u16 max_rx_ques; + u16 min_tx_bw; + u16 max_tx_bw; + u32 op_type; + u16 max_bw_reg_offset; + u16 max_linkspeed_reg_offset; + u32 capability1; + u32 capability2; + u32 capability3; + u16 max_tx_mac_filters; + u16 max_rx_mcast_mac_filters; + u16 max_rx_ucast_mac_filters; + u16 max_rx_ip_addr; + u16 max_rx_lro_flow; + u16 max_rx_status_rings; + u16 max_rx_buf_rings; + u16 max_tx_vlan_keys; + u8 total_pf; + u8 total_rss_engines; + u16 max_vports; + u16 linkstate_reg_offset; + u16 bit_offsets; + u16 max_local_ipv6_addrs; + u16 max_remote_ipv6_addrs; +}; + +struct qlcnic_pci_info_le { + __le16 id; /* pci function id */ + __le16 active; /* 1 = Enabled */ + __le16 type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */ + __le16 default_port; /* default port number */ + + __le16 tx_min_bw; /* Multiple of 100mbpc */ + __le16 tx_max_bw; + __le16 reserved1[2]; + + u8 mac[ETH_ALEN]; + __le16 func_count; + u8 reserved2[104]; + +} __packed; + +struct qlcnic_pci_info { + u16 id; + u16 active; + u16 type; + u16 default_port; + u16 tx_min_bw; + u16 tx_max_bw; + u8 mac[ETH_ALEN]; + u16 func_count; +}; + +struct qlcnic_npar_info { + bool eswitch_status; + u16 pvid; + u16 min_bw; + u16 max_bw; + u8 phy_port; + u8 type; + u8 active; + u8 enable_pm; + u8 dest_npar; + u8 discard_tagged; + u8 mac_override; + u8 mac_anti_spoof; + u8 promisc_mode; + u8 offload_flags; + u8 pci_func; + u8 mac[ETH_ALEN]; +}; + +struct qlcnic_eswitch { + u8 port; + u8 active_vports; + u8 active_vlans; + u8 active_ucast_filters; + u8 max_ucast_filters; + u8 max_active_vlans; + + u32 flags; +#define QLCNIC_SWITCH_ENABLE BIT_1 +#define QLCNIC_SWITCH_VLAN_FILTERING BIT_2 +#define QLCNIC_SWITCH_PROMISC_MODE BIT_3 +#define QLCNIC_SWITCH_PORT_MIRRORING BIT_4 +}; + + +/* Return codes for Error handling */ +#define QL_STATUS_INVALID_PARAM -1 + +#define MAX_BW 100 /* % of link speed */ +#define MIN_BW 1 /* % of link speed */ +#define MAX_VLAN_ID 4095 +#define MIN_VLAN_ID 2 +#define DEFAULT_MAC_LEARN 1 + +#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID) +#define IS_VALID_BW(bw) (bw <= MAX_BW) + +struct qlcnic_pci_func_cfg { + u16 func_type; + u16 min_bw; + u16 max_bw; + u16 port_num; + u8 pci_func; + u8 func_state; + u8 def_mac_addr[ETH_ALEN]; +}; + +struct qlcnic_npar_func_cfg { + u32 fw_capab; + u16 port_num; + u16 min_bw; + u16 max_bw; + u16 max_tx_queues; + u16 max_rx_queues; + u8 pci_func; + u8 op_mode; +}; + +struct qlcnic_pm_func_cfg { + u8 pci_func; + u8 action; + u8 dest_npar; + u8 reserved[5]; +}; + +struct qlcnic_esw_func_cfg { + u16 vlan_id; + u8 op_mode; + u8 op_type; + u8 pci_func; + u8 host_vlan_tag; + u8 promisc_mode; + u8 discard_tagged; + u8 mac_override; + u8 mac_anti_spoof; + u8 offload_flags; + u8 reserved[5]; +}; + +#define QLCNIC_STATS_VERSION 1 +#define QLCNIC_STATS_PORT 1 +#define QLCNIC_STATS_ESWITCH 2 +#define QLCNIC_QUERY_RX_COUNTER 0 +#define QLCNIC_QUERY_TX_COUNTER 1 +#define QLCNIC_STATS_NOT_AVAIL 0xffffffffffffffffULL +#define QLCNIC_FILL_STATS(VAL1) \ + (((VAL1) == QLCNIC_STATS_NOT_AVAIL) ? 0 : VAL1) +#define QLCNIC_MAC_STATS 1 +#define QLCNIC_ESW_STATS 2 + +#define QLCNIC_ADD_ESW_STATS(VAL1, VAL2)\ +do { \ + if (((VAL1) == QLCNIC_STATS_NOT_AVAIL) && \ + ((VAL2) != QLCNIC_STATS_NOT_AVAIL)) \ + (VAL1) = (VAL2); \ + else if (((VAL1) != QLCNIC_STATS_NOT_AVAIL) && \ + ((VAL2) != QLCNIC_STATS_NOT_AVAIL)) \ + (VAL1) += (VAL2); \ +} while (0) + +struct qlcnic_mac_statistics_le { + __le64 mac_tx_frames; + __le64 mac_tx_bytes; + __le64 mac_tx_mcast_pkts; + __le64 mac_tx_bcast_pkts; + __le64 mac_tx_pause_cnt; + __le64 mac_tx_ctrl_pkt; + __le64 mac_tx_lt_64b_pkts; + __le64 mac_tx_lt_127b_pkts; + __le64 mac_tx_lt_255b_pkts; + __le64 mac_tx_lt_511b_pkts; + __le64 mac_tx_lt_1023b_pkts; + __le64 mac_tx_lt_1518b_pkts; + __le64 mac_tx_gt_1518b_pkts; + __le64 rsvd1[3]; + + __le64 mac_rx_frames; + __le64 mac_rx_bytes; + __le64 mac_rx_mcast_pkts; + __le64 mac_rx_bcast_pkts; + __le64 mac_rx_pause_cnt; + __le64 mac_rx_ctrl_pkt; + __le64 mac_rx_lt_64b_pkts; + __le64 mac_rx_lt_127b_pkts; + __le64 mac_rx_lt_255b_pkts; + __le64 mac_rx_lt_511b_pkts; + __le64 mac_rx_lt_1023b_pkts; + __le64 mac_rx_lt_1518b_pkts; + __le64 mac_rx_gt_1518b_pkts; + __le64 rsvd2[3]; + + __le64 mac_rx_length_error; + __le64 mac_rx_length_small; + __le64 mac_rx_length_large; + __le64 mac_rx_jabber; + __le64 mac_rx_dropped; + __le64 mac_rx_crc_error; + __le64 mac_align_error; +} __packed; + +struct qlcnic_mac_statistics { + u64 mac_tx_frames; + u64 mac_tx_bytes; + u64 mac_tx_mcast_pkts; + u64 mac_tx_bcast_pkts; + u64 mac_tx_pause_cnt; + u64 mac_tx_ctrl_pkt; + u64 mac_tx_lt_64b_pkts; + u64 mac_tx_lt_127b_pkts; + u64 mac_tx_lt_255b_pkts; + u64 mac_tx_lt_511b_pkts; + u64 mac_tx_lt_1023b_pkts; + u64 mac_tx_lt_1518b_pkts; + u64 mac_tx_gt_1518b_pkts; + u64 rsvd1[3]; + u64 mac_rx_frames; + u64 mac_rx_bytes; + u64 mac_rx_mcast_pkts; + u64 mac_rx_bcast_pkts; + u64 mac_rx_pause_cnt; + u64 mac_rx_ctrl_pkt; + u64 mac_rx_lt_64b_pkts; + u64 mac_rx_lt_127b_pkts; + u64 mac_rx_lt_255b_pkts; + u64 mac_rx_lt_511b_pkts; + u64 mac_rx_lt_1023b_pkts; + u64 mac_rx_lt_1518b_pkts; + u64 mac_rx_gt_1518b_pkts; + u64 rsvd2[3]; + u64 mac_rx_length_error; + u64 mac_rx_length_small; + u64 mac_rx_length_large; + u64 mac_rx_jabber; + u64 mac_rx_dropped; + u64 mac_rx_crc_error; + u64 mac_align_error; +}; + +struct qlcnic_esw_stats_le { + __le16 context_id; + __le16 version; + __le16 size; + __le16 unused; + __le64 unicast_frames; + __le64 multicast_frames; + __le64 broadcast_frames; + __le64 dropped_frames; + __le64 errors; + __le64 local_frames; + __le64 numbytes; + __le64 rsvd[3]; +} __packed; + +struct __qlcnic_esw_statistics { + u16 context_id; + u16 version; + u16 size; + u16 unused; + u64 unicast_frames; + u64 multicast_frames; + u64 broadcast_frames; + u64 dropped_frames; + u64 errors; + u64 local_frames; + u64 numbytes; + u64 rsvd[3]; +}; + +struct qlcnic_esw_statistics { + struct __qlcnic_esw_statistics rx; + struct __qlcnic_esw_statistics tx; +}; + +#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed +#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed +#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed +#define QLCNIC_FORCE_FW_RESET 0xdeaddead +#define QLCNIC_SET_QUIESCENT 0xadd00010 +#define QLCNIC_RESET_QUIESCENT 0xadd00020 + +struct _cdrp_cmd { + u32 num; + u32 *arg; +}; + +struct qlcnic_cmd_args { + struct completion completion; + struct list_head list; + struct _cdrp_cmd req; + struct _cdrp_cmd rsp; + atomic_t rsp_status; + int pay_size; + u32 rsp_opcode; + u32 total_cmds; + u32 op_type; + u32 type; + u32 cmd_op; + u32 *hdr; /* Back channel message header */ + u32 *pay; /* Back channel message payload */ + u8 func_num; +}; + +int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter); +int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config); +int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data); +int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data); + +#define ADDR_IN_RANGE(addr, low, high) \ + (((addr) < (high)) && ((addr) >= (low))) + +#define QLCRD32(adapter, off, err) \ + (adapter->ahw->hw_ops->read_reg)(adapter, off, err) + +#define QLCWR32(adapter, off, val) \ + adapter->ahw->hw_ops->write_reg(adapter, off, val) + +int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32); +void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int); + +#define qlcnic_rom_lock(a) \ + qlcnic_pcie_sem_lock((a), 2, QLCNIC_ROM_LOCK_ID) +#define qlcnic_rom_unlock(a) \ + qlcnic_pcie_sem_unlock((a), 2) +#define qlcnic_phy_lock(a) \ + qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID) +#define qlcnic_phy_unlock(a) \ + qlcnic_pcie_sem_unlock((a), 3) +#define qlcnic_sw_lock(a) \ + qlcnic_pcie_sem_lock((a), 6, 0) +#define qlcnic_sw_unlock(a) \ + qlcnic_pcie_sem_unlock((a), 6) +#define crb_win_lock(a) \ + qlcnic_pcie_sem_lock((a), 7, QLCNIC_CRB_WIN_LOCK_ID) +#define crb_win_unlock(a) \ + qlcnic_pcie_sem_unlock((a), 7) + +#define __QLCNIC_MAX_LED_RATE 0xf +#define __QLCNIC_MAX_LED_STATE 0x2 + +#define MAX_CTL_CHECK 1000 + +void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter); +void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter); +int qlcnic_dump_fw(struct qlcnic_adapter *); +int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *); +bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *); + +/* Functions from qlcnic_init.c */ +void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int); +int qlcnic_load_firmware(struct qlcnic_adapter *adapter); +int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter); +void qlcnic_request_firmware(struct qlcnic_adapter *adapter); +void qlcnic_release_firmware(struct qlcnic_adapter *adapter); +int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter); +int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter); +int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter); + +int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp); +int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, + u8 *bytes, size_t size); +int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter); +void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter); + +void __iomem *qlcnic_get_ioaddr(struct qlcnic_hardware_context *, u32); + +int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter); +void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter); + +int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter); +void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter); + +void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter); +void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter); +void qlcnic_release_tx_buffers(struct qlcnic_adapter *, + struct qlcnic_host_tx_ring *); + +int qlcnic_check_fw_status(struct qlcnic_adapter *adapter); +void qlcnic_watchdog_task(struct work_struct *work); +void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, + struct qlcnic_host_rds_ring *rds_ring, u8 ring_id); +void qlcnic_set_multi(struct net_device *netdev); +void qlcnic_flush_mcast_mac(struct qlcnic_adapter *); +int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16, + enum qlcnic_mac_type); +int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *); +void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter); +int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *); + +int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu); +int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *, u32); +int qlcnic_change_mtu(struct net_device *netdev, int new_mtu); +netdev_features_t qlcnic_fix_features(struct net_device *netdev, + netdev_features_t features); +int qlcnic_set_features(struct net_device *netdev, netdev_features_t features); +int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable); +void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *); + +/* Functions from qlcnic_ethtool.c */ +int qlcnic_check_loopback_buff(unsigned char *, u8 []); +int qlcnic_do_lb_test(struct qlcnic_adapter *, u8); + +/* Functions from qlcnic_main.c */ +int qlcnic_reset_context(struct qlcnic_adapter *); +void qlcnic_diag_free_res(struct net_device *netdev, int); +int qlcnic_diag_alloc_res(struct net_device *netdev, int); +netdev_tx_t qlcnic_xmit_frame(struct sk_buff *, struct net_device *); +void qlcnic_set_tx_ring_count(struct qlcnic_adapter *, u8); +void qlcnic_set_sds_ring_count(struct qlcnic_adapter *, u8); +int qlcnic_setup_rings(struct qlcnic_adapter *); +int qlcnic_validate_rings(struct qlcnic_adapter *, __u32, int); +void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter); +int qlcnic_enable_msix(struct qlcnic_adapter *, u32); +void qlcnic_set_drv_version(struct qlcnic_adapter *); + +/* eSwitch management functions */ +int qlcnic_config_switch_port(struct qlcnic_adapter *, + struct qlcnic_esw_func_cfg *); + +int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *, + struct qlcnic_esw_func_cfg *); +int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8); +int qlcnic_get_port_stats(struct qlcnic_adapter *, const u8, const u8, + struct __qlcnic_esw_statistics *); +int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8, + struct __qlcnic_esw_statistics *); +int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8); +int qlcnic_get_mac_stats(struct qlcnic_adapter *, struct qlcnic_mac_statistics *); + +void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd); + +int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *, int); +void qlcnic_free_sds_rings(struct qlcnic_recv_context *); +void qlcnic_advert_link_change(struct qlcnic_adapter *, int); +void qlcnic_free_tx_rings(struct qlcnic_adapter *); +int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *); +void qlcnic_dump_mbx(struct qlcnic_adapter *, struct qlcnic_cmd_args *); + +void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter); +void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter); +void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter); +void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter); + +int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); +int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); +void qlcnic_set_vlan_config(struct qlcnic_adapter *, + struct qlcnic_esw_func_cfg *); +void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *, + struct qlcnic_esw_func_cfg *); +int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *); +void qlcnic_down(struct qlcnic_adapter *, struct net_device *); +int qlcnic_up(struct qlcnic_adapter *, struct net_device *); +void __qlcnic_down(struct qlcnic_adapter *, struct net_device *); +void qlcnic_detach(struct qlcnic_adapter *); +void qlcnic_teardown_intr(struct qlcnic_adapter *); +int qlcnic_attach(struct qlcnic_adapter *); +int __qlcnic_up(struct qlcnic_adapter *, struct net_device *); +void qlcnic_restore_indev_addr(struct net_device *, unsigned long); + +int qlcnic_check_temp(struct qlcnic_adapter *); +int qlcnic_init_pci_info(struct qlcnic_adapter *); +int qlcnic_set_default_offload_settings(struct qlcnic_adapter *); +int qlcnic_reset_npar_config(struct qlcnic_adapter *); +int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *); +int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter); +int qlcnic_read_mac_addr(struct qlcnic_adapter *); +int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int); +void qlcnic_set_netdev_features(struct qlcnic_adapter *, + struct qlcnic_esw_func_cfg *); +void qlcnic_sriov_vf_set_multi(struct net_device *); +int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8); +int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *, + u16 *); + +/* + * QLOGIC Board information + */ + +#define QLCNIC_MAX_BOARD_NAME_LEN 100 +struct qlcnic_board_info { + unsigned short vendor; + unsigned short device; + unsigned short sub_vendor; + unsigned short sub_device; + char short_name[QLCNIC_MAX_BOARD_NAME_LEN]; +}; + +static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) +{ + if (likely(tx_ring->producer < tx_ring->sw_consumer)) + return tx_ring->sw_consumer - tx_ring->producer; + else + return tx_ring->sw_consumer + tx_ring->num_desc - + tx_ring->producer; +} + +struct qlcnic_nic_template { + int (*config_bridged_mode) (struct qlcnic_adapter *, u32); + int (*config_led) (struct qlcnic_adapter *, u32, u32); + int (*start_firmware) (struct qlcnic_adapter *); + int (*init_driver) (struct qlcnic_adapter *); + void (*request_reset) (struct qlcnic_adapter *, u32); + void (*cancel_idc_work) (struct qlcnic_adapter *); + int (*napi_add)(struct qlcnic_adapter *, struct net_device *); + void (*napi_del)(struct qlcnic_adapter *); + void (*config_ipaddr)(struct qlcnic_adapter *, __be32, int); + irqreturn_t (*clear_legacy_intr)(struct qlcnic_adapter *); + int (*shutdown)(struct pci_dev *); + int (*resume)(struct qlcnic_adapter *); +}; + +struct qlcnic_mbx_ops { + int (*enqueue_cmd) (struct qlcnic_adapter *, + struct qlcnic_cmd_args *, unsigned long *); + void (*dequeue_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *); + void (*decode_resp) (struct qlcnic_adapter *, struct qlcnic_cmd_args *); + void (*encode_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *); + void (*nofity_fw) (struct qlcnic_adapter *, u8); +}; + +int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *); +void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *); +void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx); +void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx); +void qlcnic_update_stats(struct qlcnic_adapter *); + +/* Adapter hardware abstraction */ +struct qlcnic_hardware_ops { + void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t); + void (*write_crb) (struct qlcnic_adapter *, char *, loff_t, size_t); + int (*read_reg) (struct qlcnic_adapter *, ulong, int *); + int (*write_reg) (struct qlcnic_adapter *, ulong, u32); + void (*get_ocm_win) (struct qlcnic_hardware_context *); + int (*get_mac_address) (struct qlcnic_adapter *, u8 *, u8); + int (*setup_intr) (struct qlcnic_adapter *); + int (*alloc_mbx_args)(struct qlcnic_cmd_args *, + struct qlcnic_adapter *, u32); + int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *); + void (*get_func_no) (struct qlcnic_adapter *); + int (*api_lock) (struct qlcnic_adapter *); + void (*api_unlock) (struct qlcnic_adapter *); + void (*add_sysfs) (struct qlcnic_adapter *); + void (*remove_sysfs) (struct qlcnic_adapter *); + void (*process_lb_rcv_ring_diag) (struct qlcnic_host_sds_ring *); + int (*create_rx_ctx) (struct qlcnic_adapter *); + int (*create_tx_ctx) (struct qlcnic_adapter *, + struct qlcnic_host_tx_ring *, int); + void (*del_rx_ctx) (struct qlcnic_adapter *); + void (*del_tx_ctx) (struct qlcnic_adapter *, + struct qlcnic_host_tx_ring *); + int (*setup_link_event) (struct qlcnic_adapter *, int); + int (*get_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *, u8); + int (*get_pci_info) (struct qlcnic_adapter *, struct qlcnic_pci_info *); + int (*set_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *); + int (*change_macvlan) (struct qlcnic_adapter *, u8*, u16, u8); + void (*napi_enable) (struct qlcnic_adapter *); + void (*napi_disable) (struct qlcnic_adapter *); + int (*config_intr_coal) (struct qlcnic_adapter *, + struct ethtool_coalesce *); + int (*config_rss) (struct qlcnic_adapter *, int); + int (*config_hw_lro) (struct qlcnic_adapter *, int); + int (*config_loopback) (struct qlcnic_adapter *, u8); + int (*clear_loopback) (struct qlcnic_adapter *, u8); + int (*config_promisc_mode) (struct qlcnic_adapter *, u32); + void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16); + int (*get_board_info) (struct qlcnic_adapter *); + void (*set_mac_filter_count) (struct qlcnic_adapter *); + void (*free_mac_list) (struct qlcnic_adapter *); + int (*read_phys_port_id) (struct qlcnic_adapter *); + pci_ers_result_t (*io_error_detected) (struct pci_dev *, + pci_channel_state_t); + pci_ers_result_t (*io_slot_reset) (struct pci_dev *); + void (*io_resume) (struct pci_dev *); + void (*get_beacon_state)(struct qlcnic_adapter *); + void (*enable_sds_intr) (struct qlcnic_adapter *, + struct qlcnic_host_sds_ring *); + void (*disable_sds_intr) (struct qlcnic_adapter *, + struct qlcnic_host_sds_ring *); + void (*enable_tx_intr) (struct qlcnic_adapter *, + struct qlcnic_host_tx_ring *); + void (*disable_tx_intr) (struct qlcnic_adapter *, + struct qlcnic_host_tx_ring *); + u32 (*get_saved_state)(void *, u32); + void (*set_saved_state)(void *, u32, u32); + void (*cache_tmpl_hdr_values)(struct qlcnic_fw_dump *); + u32 (*get_cap_size)(void *, int); + void (*set_sys_info)(void *, int, u32); + void (*store_cap_mask)(void *, u32); +}; + +extern struct qlcnic_nic_template qlcnic_vf_ops; + +static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter) +{ + return adapter->ahw->extra_capability[0] & + QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD; +} + +static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter) +{ + return adapter->ahw->extra_capability[0] & + QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD; +} + +static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter) +{ + return adapter->nic_ops->start_firmware(adapter); +} + +static inline void qlcnic_read_crb(struct qlcnic_adapter *adapter, char *buf, + loff_t offset, size_t size) +{ + adapter->ahw->hw_ops->read_crb(adapter, buf, offset, size); +} + +static inline void qlcnic_write_crb(struct qlcnic_adapter *adapter, char *buf, + loff_t offset, size_t size) +{ + adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size); +} + +static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, + ulong off, u32 data) +{ + return adapter->ahw->hw_ops->write_reg(adapter, off, data); +} + +static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, + u8 *mac, u8 function) +{ + return adapter->ahw->hw_ops->get_mac_address(adapter, mac, function); +} + +static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter) +{ + return adapter->ahw->hw_ops->setup_intr(adapter); +} + +static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx, + struct qlcnic_adapter *adapter, u32 arg) +{ + return adapter->ahw->hw_ops->alloc_mbx_args(mbx, adapter, arg); +} + +static inline int qlcnic_issue_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + if (adapter->ahw->hw_ops->mbx_cmd) + return adapter->ahw->hw_ops->mbx_cmd(adapter, cmd); + + return -EIO; +} + +static inline void qlcnic_get_func_no(struct qlcnic_adapter *adapter) +{ + adapter->ahw->hw_ops->get_func_no(adapter); +} + +static inline int qlcnic_api_lock(struct qlcnic_adapter *adapter) +{ + return adapter->ahw->hw_ops->api_lock(adapter); +} + +static inline void qlcnic_api_unlock(struct qlcnic_adapter *adapter) +{ + adapter->ahw->hw_ops->api_unlock(adapter); +} + +static inline void qlcnic_add_sysfs(struct qlcnic_adapter *adapter) +{ + if (adapter->ahw->hw_ops->add_sysfs) + adapter->ahw->hw_ops->add_sysfs(adapter); +} + +static inline void qlcnic_remove_sysfs(struct qlcnic_adapter *adapter) +{ + if (adapter->ahw->hw_ops->remove_sysfs) + adapter->ahw->hw_ops->remove_sysfs(adapter); +} + +static inline void +qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) +{ + sds_ring->adapter->ahw->hw_ops->process_lb_rcv_ring_diag(sds_ring); +} + +static inline int qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) +{ + return adapter->ahw->hw_ops->create_rx_ctx(adapter); +} + +static inline int qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *ptr, + int ring) +{ + return adapter->ahw->hw_ops->create_tx_ctx(adapter, ptr, ring); +} + +static inline void qlcnic_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter) +{ + return adapter->ahw->hw_ops->del_rx_ctx(adapter); +} + +static inline void qlcnic_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *ptr) +{ + return adapter->ahw->hw_ops->del_tx_ctx(adapter, ptr); +} + +static inline int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, + int enable) +{ + return adapter->ahw->hw_ops->setup_link_event(adapter, enable); +} + +static inline int qlcnic_get_nic_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *info, u8 id) +{ + return adapter->ahw->hw_ops->get_nic_info(adapter, info, id); +} + +static inline int qlcnic_get_pci_info(struct qlcnic_adapter *adapter, + struct qlcnic_pci_info *info) +{ + return adapter->ahw->hw_ops->get_pci_info(adapter, info); +} + +static inline int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *info) +{ + return adapter->ahw->hw_ops->set_nic_info(adapter, info); +} + +static inline int qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, + u8 *addr, u16 id, u8 cmd) +{ + return adapter->ahw->hw_ops->change_macvlan(adapter, addr, id, cmd); +} + +static inline int qlcnic_napi_add(struct qlcnic_adapter *adapter, + struct net_device *netdev) +{ + return adapter->nic_ops->napi_add(adapter, netdev); +} + +static inline void qlcnic_napi_del(struct qlcnic_adapter *adapter) +{ + adapter->nic_ops->napi_del(adapter); +} + +static inline void qlcnic_napi_enable(struct qlcnic_adapter *adapter) +{ + adapter->ahw->hw_ops->napi_enable(adapter); +} + +static inline int __qlcnic_shutdown(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + + return adapter->nic_ops->shutdown(pdev); +} + +static inline int __qlcnic_resume(struct qlcnic_adapter *adapter) +{ + return adapter->nic_ops->resume(adapter); +} + +static inline void qlcnic_napi_disable(struct qlcnic_adapter *adapter) +{ + adapter->ahw->hw_ops->napi_disable(adapter); +} + +static inline int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter, + struct ethtool_coalesce *ethcoal) +{ + return adapter->ahw->hw_ops->config_intr_coal(adapter, ethcoal); +} + +static inline int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable) +{ + return adapter->ahw->hw_ops->config_rss(adapter, enable); +} + +static inline int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, + int enable) +{ + return adapter->ahw->hw_ops->config_hw_lro(adapter, enable); +} + +static inline int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) +{ + return adapter->ahw->hw_ops->config_loopback(adapter, mode); +} + +static inline int qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) +{ + return adapter->ahw->hw_ops->clear_loopback(adapter, mode); +} + +static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, + u32 mode) +{ + return adapter->ahw->hw_ops->config_promisc_mode(adapter, mode); +} + +static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter, + u64 *addr, u16 id) +{ + adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id); +} + +static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter) +{ + return adapter->ahw->hw_ops->get_board_info(adapter); +} + +static inline void qlcnic_free_mac_list(struct qlcnic_adapter *adapter) +{ + return adapter->ahw->hw_ops->free_mac_list(adapter); +} + +static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter) +{ + if (adapter->ahw->hw_ops->set_mac_filter_count) + adapter->ahw->hw_ops->set_mac_filter_count(adapter); +} + +static inline void qlcnic_get_beacon_state(struct qlcnic_adapter *adapter) +{ + adapter->ahw->hw_ops->get_beacon_state(adapter); +} + +static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter) +{ + if (adapter->ahw->hw_ops->read_phys_port_id) + adapter->ahw->hw_ops->read_phys_port_id(adapter); +} + +static inline u32 qlcnic_get_saved_state(struct qlcnic_adapter *adapter, + void *t_hdr, u32 index) +{ + return adapter->ahw->hw_ops->get_saved_state(t_hdr, index); +} + +static inline void qlcnic_set_saved_state(struct qlcnic_adapter *adapter, + void *t_hdr, u32 index, u32 value) +{ + adapter->ahw->hw_ops->set_saved_state(t_hdr, index, value); +} + +static inline void qlcnic_cache_tmpl_hdr_values(struct qlcnic_adapter *adapter, + struct qlcnic_fw_dump *fw_dump) +{ + adapter->ahw->hw_ops->cache_tmpl_hdr_values(fw_dump); +} + +static inline u32 qlcnic_get_cap_size(struct qlcnic_adapter *adapter, + void *tmpl_hdr, int index) +{ + return adapter->ahw->hw_ops->get_cap_size(tmpl_hdr, index); +} + +static inline void qlcnic_set_sys_info(struct qlcnic_adapter *adapter, + void *tmpl_hdr, int idx, u32 value) +{ + adapter->ahw->hw_ops->set_sys_info(tmpl_hdr, idx, value); +} + +static inline void qlcnic_store_cap_mask(struct qlcnic_adapter *adapter, + void *tmpl_hdr, u32 mask) +{ + adapter->ahw->hw_ops->store_cap_mask(tmpl_hdr, mask); +} + +static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter, + u32 key) +{ + if (adapter->nic_ops->request_reset) + adapter->nic_ops->request_reset(adapter, key); +} + +static inline void qlcnic_cancel_idc_work(struct qlcnic_adapter *adapter) +{ + if (adapter->nic_ops->cancel_idc_work) + adapter->nic_ops->cancel_idc_work(adapter); +} + +static inline irqreturn_t +qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter) +{ + return adapter->nic_ops->clear_legacy_intr(adapter); +} + +static inline int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, + u32 rate) +{ + return adapter->nic_ops->config_led(adapter, state, rate); +} + +static inline void qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, + __be32 ip, int cmd) +{ + adapter->nic_ops->config_ipaddr(adapter, ip, cmd); +} + +static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter) +{ + return test_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state); +} + +static inline void +qlcnic_82xx_enable_tx_intr(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) +{ + if (qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test) + writel(0x0, tx_ring->crb_intr_mask); +} + +static inline void +qlcnic_82xx_disable_tx_intr(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) +{ + if (qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test) + writel(1, tx_ring->crb_intr_mask); +} + +static inline void +qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) +{ + writel(0, tx_ring->crb_intr_mask); +} + +static inline void +qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) +{ + writel(1, tx_ring->crb_intr_mask); +} + +/* Enable MSI-x and INT-x interrupts */ +static inline void +qlcnic_83xx_enable_sds_intr(struct qlcnic_adapter *adapter, + struct qlcnic_host_sds_ring *sds_ring) +{ + writel(0, sds_ring->crb_intr_mask); +} + +/* Disable MSI-x and INT-x interrupts */ +static inline void +qlcnic_83xx_disable_sds_intr(struct qlcnic_adapter *adapter, + struct qlcnic_host_sds_ring *sds_ring) +{ + writel(1, sds_ring->crb_intr_mask); +} + +static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter) +{ + test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state); + adapter->drv_tx_rings = QLCNIC_SINGLE_RING; +} + +/* When operating in a muti tx mode, driver needs to write 0x1 + * to src register, instead of 0x0 to disable receiving interrupt. + */ +static inline void +qlcnic_82xx_disable_sds_intr(struct qlcnic_adapter *adapter, + struct qlcnic_host_sds_ring *sds_ring) +{ + if (qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test && + (adapter->flags & QLCNIC_MSIX_ENABLED)) + writel(0x1, sds_ring->crb_intr_mask); + else + writel(0, sds_ring->crb_intr_mask); +} + +static inline void qlcnic_enable_sds_intr(struct qlcnic_adapter *adapter, + struct qlcnic_host_sds_ring *sds_ring) +{ + if (adapter->ahw->hw_ops->enable_sds_intr) + adapter->ahw->hw_ops->enable_sds_intr(adapter, sds_ring); +} + +static inline void +qlcnic_disable_sds_intr(struct qlcnic_adapter *adapter, + struct qlcnic_host_sds_ring *sds_ring) +{ + if (adapter->ahw->hw_ops->disable_sds_intr) + adapter->ahw->hw_ops->disable_sds_intr(adapter, sds_ring); +} + +static inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) +{ + if (adapter->ahw->hw_ops->enable_tx_intr) + adapter->ahw->hw_ops->enable_tx_intr(adapter, tx_ring); +} + +static inline void qlcnic_disable_tx_intr(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) +{ + if (adapter->ahw->hw_ops->disable_tx_intr) + adapter->ahw->hw_ops->disable_tx_intr(adapter, tx_ring); +} + +/* When operating in a muti tx mode, driver needs to write 0x0 + * to src register, instead of 0x1 to enable receiving interrupts. + */ +static inline void +qlcnic_82xx_enable_sds_intr(struct qlcnic_adapter *adapter, + struct qlcnic_host_sds_ring *sds_ring) +{ + if (qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test && + (adapter->flags & QLCNIC_MSIX_ENABLED)) + writel(0, sds_ring->crb_intr_mask); + else + writel(0x1, sds_ring->crb_intr_mask); + + if (!QLCNIC_IS_MSI_FAMILY(adapter)) + writel(0xfbff, adapter->tgt_mask_reg); +} + +static inline int qlcnic_get_diag_lock(struct qlcnic_adapter *adapter) +{ + return test_and_set_bit(__QLCNIC_DIAG_MODE, &adapter->state); +} + +static inline void qlcnic_release_diag_lock(struct qlcnic_adapter *adapter) +{ + clear_bit(__QLCNIC_DIAG_MODE, &adapter->state); +} + +static inline int qlcnic_check_diag_status(struct qlcnic_adapter *adapter) +{ + return test_bit(__QLCNIC_DIAG_MODE, &adapter->state); +} + +extern const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops; +extern const struct ethtool_ops qlcnic_ethtool_ops; +extern const struct ethtool_ops qlcnic_ethtool_failed_ops; + +#define QLCDB(adapter, lvl, _fmt, _args...) do { \ + if (NETIF_MSG_##lvl & adapter->ahw->msg_enable) \ + printk(KERN_INFO "%s: %s: " _fmt, \ + dev_name(&adapter->pdev->dev), \ + __func__, ##_args); \ + } while (0) + +#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020 +#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030 +#define PCI_DEVICE_ID_QLOGIC_QLE8830 0x8830 +#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430 +#define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040 +#define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440 + +static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter) +{ + unsigned short device = adapter->pdev->device; + return (device == PCI_DEVICE_ID_QLOGIC_QLE824X) ? true : false; +} + +static inline bool qlcnic_84xx_check(struct qlcnic_adapter *adapter) +{ + unsigned short device = adapter->pdev->device; + + return ((device == PCI_DEVICE_ID_QLOGIC_QLE844X) || + (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false; +} + +static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter) +{ + unsigned short device = adapter->pdev->device; + bool status; + + status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) || + (device == PCI_DEVICE_ID_QLOGIC_QLE8830) || + (device == PCI_DEVICE_ID_QLOGIC_QLE844X) || + (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) || + (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false; + + return status; +} + +static inline bool qlcnic_sriov_pf_check(struct qlcnic_adapter *adapter) +{ + return (adapter->ahw->op_mode == QLCNIC_SRIOV_PF_FUNC) ? true : false; +} + +static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter) +{ + unsigned short device = adapter->pdev->device; + bool status; + + status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) || + (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false; + + return status; +} + +static inline bool qlcnic_83xx_pf_check(struct qlcnic_adapter *adapter) +{ + unsigned short device = adapter->pdev->device; + + return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false; +} + +static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter) +{ + unsigned short device = adapter->pdev->device; + + return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false; +} + +static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter) +{ + bool status; + + status = (qlcnic_sriov_pf_check(adapter) || + qlcnic_sriov_vf_check(adapter)) ? true : false; + + return status; +} + +static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter) +{ + if (qlcnic_84xx_check(adapter)) + return QLC_84XX_VNIC_COUNT; + else + return QLC_DEFAULT_VNIC_COUNT; +} + +static inline void qlcnic_swap32_buffer(u32 *buffer, int count) +{ +#if defined(__BIG_ENDIAN) + u32 *tmp = buffer; + int i; + + for (i = 0; i < count; i++) { + *tmp = swab32(*tmp); + tmp++; + } +#endif +} + +#ifdef CONFIG_QLCNIC_HWMON +void qlcnic_register_hwmon_dev(struct qlcnic_adapter *); +void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *); +#else +static inline void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter) +{ + return; +} +static inline void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter) +{ + return; +} +#endif +#endif /* __QLCNIC_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c new file mode 100644 index 000000000..840bf36b5 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -0,0 +1,4165 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include "qlcnic.h" +#include "qlcnic_sriov.h" +#include +#include +#include +#include +#include + +static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *); +static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8); +static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8, + struct qlcnic_cmd_args *); +static int qlcnic_83xx_get_port_config(struct qlcnic_adapter *); +static irqreturn_t qlcnic_83xx_handle_aen(int, void *); +static pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *, + pci_channel_state_t); +static int qlcnic_83xx_set_port_config(struct qlcnic_adapter *); +static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *); +static void qlcnic_83xx_io_resume(struct pci_dev *); +static int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *, u8); +static void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *); +static int qlcnic_83xx_resume(struct qlcnic_adapter *); +static int qlcnic_83xx_shutdown(struct pci_dev *); +static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *); + +#define RSS_HASHTYPE_IP_TCP 0x3 +#define QLC_83XX_FW_MBX_CMD 0 +#define QLC_SKIP_INACTIVE_PCI_REGS 7 +#define QLC_MAX_LEGACY_FUNC_SUPP 8 + +/* 83xx Module type */ +#define QLC_83XX_MODULE_FIBRE_10GBASE_LRM 0x1 /* 10GBase-LRM */ +#define QLC_83XX_MODULE_FIBRE_10GBASE_LR 0x2 /* 10GBase-LR */ +#define QLC_83XX_MODULE_FIBRE_10GBASE_SR 0x3 /* 10GBase-SR */ +#define QLC_83XX_MODULE_DA_10GE_PASSIVE_CP 0x4 /* 10GE passive + * copper(compliant) + */ +#define QLC_83XX_MODULE_DA_10GE_ACTIVE_CP 0x5 /* 10GE active limiting + * copper(compliant) + */ +#define QLC_83XX_MODULE_DA_10GE_LEGACY_CP 0x6 /* 10GE passive copper + * (legacy, best effort) + */ +#define QLC_83XX_MODULE_FIBRE_1000BASE_SX 0x7 /* 1000Base-SX */ +#define QLC_83XX_MODULE_FIBRE_1000BASE_LX 0x8 /* 1000Base-LX */ +#define QLC_83XX_MODULE_FIBRE_1000BASE_CX 0x9 /* 1000Base-CX */ +#define QLC_83XX_MODULE_TP_1000BASE_T 0xa /* 1000Base-T*/ +#define QLC_83XX_MODULE_DA_1GE_PASSIVE_CP 0xb /* 1GE passive copper + * (legacy, best effort) + */ +#define QLC_83XX_MODULE_UNKNOWN 0xf /* Unknown module type */ + +/* Port types */ +#define QLC_83XX_10_CAPABLE BIT_8 +#define QLC_83XX_100_CAPABLE BIT_9 +#define QLC_83XX_1G_CAPABLE BIT_10 +#define QLC_83XX_10G_CAPABLE BIT_11 +#define QLC_83XX_AUTONEG_ENABLE BIT_15 + +static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { + {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1}, + {QLCNIC_CMD_CONFIG_INTRPT, 18, 34}, + {QLCNIC_CMD_CREATE_RX_CTX, 136, 27}, + {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1}, + {QLCNIC_CMD_CREATE_TX_CTX, 54, 18}, + {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1}, + {QLCNIC_CMD_CONFIGURE_MAC_LEARNING, 2, 1}, + {QLCNIC_CMD_INTRPT_TEST, 22, 12}, + {QLCNIC_CMD_SET_MTU, 3, 1}, + {QLCNIC_CMD_READ_PHY, 4, 2}, + {QLCNIC_CMD_WRITE_PHY, 5, 1}, + {QLCNIC_CMD_READ_HW_REG, 4, 1}, + {QLCNIC_CMD_GET_FLOW_CTL, 4, 2}, + {QLCNIC_CMD_SET_FLOW_CTL, 4, 1}, + {QLCNIC_CMD_READ_MAX_MTU, 4, 2}, + {QLCNIC_CMD_READ_MAX_LRO, 4, 2}, + {QLCNIC_CMD_MAC_ADDRESS, 4, 3}, + {QLCNIC_CMD_GET_PCI_INFO, 1, 129}, + {QLCNIC_CMD_GET_NIC_INFO, 2, 19}, + {QLCNIC_CMD_SET_NIC_INFO, 32, 1}, + {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3}, + {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1}, + {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3}, + {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1}, + {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1}, + {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3}, + {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1}, + {QLCNIC_CMD_CONFIG_PORT, 4, 1}, + {QLCNIC_CMD_TEMP_SIZE, 1, 4}, + {QLCNIC_CMD_GET_TEMP_HDR, 5, 5}, + {QLCNIC_CMD_GET_LINK_EVENT, 2, 1}, + {QLCNIC_CMD_CONFIG_MAC_VLAN, 4, 3}, + {QLCNIC_CMD_CONFIG_INTR_COAL, 6, 1}, + {QLCNIC_CMD_CONFIGURE_RSS, 14, 1}, + {QLCNIC_CMD_CONFIGURE_LED, 2, 1}, + {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, 2, 1}, + {QLCNIC_CMD_CONFIGURE_HW_LRO, 2, 1}, + {QLCNIC_CMD_GET_STATISTICS, 2, 80}, + {QLCNIC_CMD_SET_PORT_CONFIG, 2, 1}, + {QLCNIC_CMD_GET_PORT_CONFIG, 2, 2}, + {QLCNIC_CMD_GET_LINK_STATUS, 2, 4}, + {QLCNIC_CMD_IDC_ACK, 5, 1}, + {QLCNIC_CMD_INIT_NIC_FUNC, 3, 1}, + {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1}, + {QLCNIC_CMD_SET_LED_CONFIG, 5, 1}, + {QLCNIC_CMD_GET_LED_CONFIG, 1, 5}, + {QLCNIC_CMD_83XX_SET_DRV_VER, 4, 1}, + {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26}, + {QLCNIC_CMD_CONFIG_VPORT, 4, 4}, + {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1}, + {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2}, + {QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50}, + {QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1}, +}; + +const u32 qlcnic_83xx_ext_reg_tbl[] = { + 0x38CC, /* Global Reset */ + 0x38F0, /* Wildcard */ + 0x38FC, /* Informant */ + 0x3038, /* Host MBX ctrl */ + 0x303C, /* FW MBX ctrl */ + 0x355C, /* BOOT LOADER ADDRESS REG */ + 0x3560, /* BOOT LOADER SIZE REG */ + 0x3564, /* FW IMAGE ADDR REG */ + 0x1000, /* MBX intr enable */ + 0x1200, /* Default Intr mask */ + 0x1204, /* Default Interrupt ID */ + 0x3780, /* QLC_83XX_IDC_MAJ_VERSION */ + 0x3784, /* QLC_83XX_IDC_DEV_STATE */ + 0x3788, /* QLC_83XX_IDC_DRV_PRESENCE */ + 0x378C, /* QLC_83XX_IDC_DRV_ACK */ + 0x3790, /* QLC_83XX_IDC_CTRL */ + 0x3794, /* QLC_83XX_IDC_DRV_AUDIT */ + 0x3798, /* QLC_83XX_IDC_MIN_VERSION */ + 0x379C, /* QLC_83XX_RECOVER_DRV_LOCK */ + 0x37A0, /* QLC_83XX_IDC_PF_0 */ + 0x37A4, /* QLC_83XX_IDC_PF_1 */ + 0x37A8, /* QLC_83XX_IDC_PF_2 */ + 0x37AC, /* QLC_83XX_IDC_PF_3 */ + 0x37B0, /* QLC_83XX_IDC_PF_4 */ + 0x37B4, /* QLC_83XX_IDC_PF_5 */ + 0x37B8, /* QLC_83XX_IDC_PF_6 */ + 0x37BC, /* QLC_83XX_IDC_PF_7 */ + 0x37C0, /* QLC_83XX_IDC_PF_8 */ + 0x37C4, /* QLC_83XX_IDC_PF_9 */ + 0x37C8, /* QLC_83XX_IDC_PF_10 */ + 0x37CC, /* QLC_83XX_IDC_PF_11 */ + 0x37D0, /* QLC_83XX_IDC_PF_12 */ + 0x37D4, /* QLC_83XX_IDC_PF_13 */ + 0x37D8, /* QLC_83XX_IDC_PF_14 */ + 0x37DC, /* QLC_83XX_IDC_PF_15 */ + 0x37E0, /* QLC_83XX_IDC_DEV_PARTITION_INFO_1 */ + 0x37E4, /* QLC_83XX_IDC_DEV_PARTITION_INFO_2 */ + 0x37F0, /* QLC_83XX_DRV_OP_MODE */ + 0x37F4, /* QLC_83XX_VNIC_STATE */ + 0x3868, /* QLC_83XX_DRV_LOCK */ + 0x386C, /* QLC_83XX_DRV_UNLOCK */ + 0x3504, /* QLC_83XX_DRV_LOCK_ID */ + 0x34A4, /* QLC_83XX_ASIC_TEMP */ +}; + +const u32 qlcnic_83xx_reg_tbl[] = { + 0x34A8, /* PEG_HALT_STAT1 */ + 0x34AC, /* PEG_HALT_STAT2 */ + 0x34B0, /* FW_HEARTBEAT */ + 0x3500, /* FLASH LOCK_ID */ + 0x3528, /* FW_CAPABILITIES */ + 0x3538, /* Driver active, DRV_REG0 */ + 0x3540, /* Device state, DRV_REG1 */ + 0x3544, /* Driver state, DRV_REG2 */ + 0x3548, /* Driver scratch, DRV_REG3 */ + 0x354C, /* Device partiton info, DRV_REG4 */ + 0x3524, /* Driver IDC ver, DRV_REG5 */ + 0x3550, /* FW_VER_MAJOR */ + 0x3554, /* FW_VER_MINOR */ + 0x3558, /* FW_VER_SUB */ + 0x359C, /* NPAR STATE */ + 0x35FC, /* FW_IMG_VALID */ + 0x3650, /* CMD_PEG_STATE */ + 0x373C, /* RCV_PEG_STATE */ + 0x37B4, /* ASIC TEMP */ + 0x356C, /* FW API */ + 0x3570, /* DRV OP MODE */ + 0x3850, /* FLASH LOCK */ + 0x3854, /* FLASH UNLOCK */ +}; + +static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = { + .read_crb = qlcnic_83xx_read_crb, + .write_crb = qlcnic_83xx_write_crb, + .read_reg = qlcnic_83xx_rd_reg_indirect, + .write_reg = qlcnic_83xx_wrt_reg_indirect, + .get_mac_address = qlcnic_83xx_get_mac_address, + .setup_intr = qlcnic_83xx_setup_intr, + .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args, + .mbx_cmd = qlcnic_83xx_issue_cmd, + .get_func_no = qlcnic_83xx_get_func_no, + .api_lock = qlcnic_83xx_cam_lock, + .api_unlock = qlcnic_83xx_cam_unlock, + .add_sysfs = qlcnic_83xx_add_sysfs, + .remove_sysfs = qlcnic_83xx_remove_sysfs, + .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag, + .create_rx_ctx = qlcnic_83xx_create_rx_ctx, + .create_tx_ctx = qlcnic_83xx_create_tx_ctx, + .del_rx_ctx = qlcnic_83xx_del_rx_ctx, + .del_tx_ctx = qlcnic_83xx_del_tx_ctx, + .setup_link_event = qlcnic_83xx_setup_link_event, + .get_nic_info = qlcnic_83xx_get_nic_info, + .get_pci_info = qlcnic_83xx_get_pci_info, + .set_nic_info = qlcnic_83xx_set_nic_info, + .change_macvlan = qlcnic_83xx_sre_macaddr_change, + .napi_enable = qlcnic_83xx_napi_enable, + .napi_disable = qlcnic_83xx_napi_disable, + .config_intr_coal = qlcnic_83xx_config_intr_coal, + .config_rss = qlcnic_83xx_config_rss, + .config_hw_lro = qlcnic_83xx_config_hw_lro, + .config_promisc_mode = qlcnic_83xx_nic_set_promisc, + .change_l2_filter = qlcnic_83xx_change_l2_filter, + .get_board_info = qlcnic_83xx_get_port_info, + .set_mac_filter_count = qlcnic_83xx_set_mac_filter_count, + .free_mac_list = qlcnic_82xx_free_mac_list, + .io_error_detected = qlcnic_83xx_io_error_detected, + .io_slot_reset = qlcnic_83xx_io_slot_reset, + .io_resume = qlcnic_83xx_io_resume, + .get_beacon_state = qlcnic_83xx_get_beacon_state, + .enable_sds_intr = qlcnic_83xx_enable_sds_intr, + .disable_sds_intr = qlcnic_83xx_disable_sds_intr, + .enable_tx_intr = qlcnic_83xx_enable_tx_intr, + .disable_tx_intr = qlcnic_83xx_disable_tx_intr, + .get_saved_state = qlcnic_83xx_get_saved_state, + .set_saved_state = qlcnic_83xx_set_saved_state, + .cache_tmpl_hdr_values = qlcnic_83xx_cache_tmpl_hdr_values, + .get_cap_size = qlcnic_83xx_get_cap_size, + .set_sys_info = qlcnic_83xx_set_sys_info, + .store_cap_mask = qlcnic_83xx_store_cap_mask, +}; + +static struct qlcnic_nic_template qlcnic_83xx_ops = { + .config_bridged_mode = qlcnic_config_bridged_mode, + .config_led = qlcnic_config_led, + .request_reset = qlcnic_83xx_idc_request_reset, + .cancel_idc_work = qlcnic_83xx_idc_exit, + .napi_add = qlcnic_83xx_napi_add, + .napi_del = qlcnic_83xx_napi_del, + .config_ipaddr = qlcnic_83xx_config_ipaddr, + .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr, + .shutdown = qlcnic_83xx_shutdown, + .resume = qlcnic_83xx_resume, +}; + +void qlcnic_83xx_register_map(struct qlcnic_hardware_context *ahw) +{ + ahw->hw_ops = &qlcnic_83xx_hw_ops; + ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl; + ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl; +} + +int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *adapter) +{ + u32 fw_major, fw_minor, fw_build; + struct pci_dev *pdev = adapter->pdev; + + fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR); + fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR); + fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB); + adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build); + + dev_info(&pdev->dev, "Driver v%s, firmware version %d.%d.%d\n", + QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build); + + return adapter->fw_version; +} + +static int __qlcnic_set_win_base(struct qlcnic_adapter *adapter, u32 addr) +{ + void __iomem *base; + u32 val; + + base = adapter->ahw->pci_base0 + + QLC_83XX_CRB_WIN_FUNC(adapter->ahw->pci_func); + writel(addr, base); + val = readl(base); + if (val != addr) + return -EIO; + + return 0; +} + +int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr, + int *err) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + + *err = __qlcnic_set_win_base(adapter, (u32) addr); + if (!*err) { + return QLCRDX(ahw, QLCNIC_WILDCARD); + } else { + dev_err(&adapter->pdev->dev, + "%s failed, addr = 0x%lx\n", __func__, addr); + return -EIO; + } +} + +int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr, + u32 data) +{ + int err; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + err = __qlcnic_set_win_base(adapter, (u32) addr); + if (!err) { + QLCWRX(ahw, QLCNIC_WILDCARD, data); + return 0; + } else { + dev_err(&adapter->pdev->dev, + "%s failed, addr = 0x%x data = 0x%x\n", + __func__, (int)addr, data); + return err; + } +} + +static void qlcnic_83xx_enable_legacy(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + + /* MSI-X enablement failed, use legacy interrupt */ + adapter->tgt_status_reg = ahw->pci_base0 + QLC_83XX_INTX_PTR; + adapter->tgt_mask_reg = ahw->pci_base0 + QLC_83XX_INTX_MASK; + adapter->isr_int_vec = ahw->pci_base0 + QLC_83XX_INTX_TRGR; + adapter->msix_entries[0].vector = adapter->pdev->irq; + dev_info(&adapter->pdev->dev, "using legacy interrupt\n"); +} + +static int qlcnic_83xx_calculate_msix_vector(struct qlcnic_adapter *adapter) +{ + int num_msix; + + num_msix = adapter->drv_sds_rings; + + /* account for AEN interrupt MSI-X based interrupts */ + num_msix += 1; + + if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) + num_msix += adapter->drv_tx_rings; + + return num_msix; +} + +int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int err, i, num_msix; + + if (adapter->flags & QLCNIC_TSS_RSS) { + err = qlcnic_setup_tss_rss_intr(adapter); + if (err < 0) + return err; + num_msix = ahw->num_msix; + } else { + num_msix = qlcnic_83xx_calculate_msix_vector(adapter); + + err = qlcnic_enable_msix(adapter, num_msix); + if (err == -ENOMEM) + return err; + + if (adapter->flags & QLCNIC_MSIX_ENABLED) { + num_msix = ahw->num_msix; + } else { + if (qlcnic_sriov_vf_check(adapter)) + return -EINVAL; + num_msix = 1; + adapter->drv_sds_rings = QLCNIC_SINGLE_RING; + adapter->drv_tx_rings = QLCNIC_SINGLE_RING; + } + } + + /* setup interrupt mapping table for fw */ + ahw->intr_tbl = vzalloc(num_msix * + sizeof(struct qlcnic_intrpt_config)); + if (!ahw->intr_tbl) + return -ENOMEM; + + if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { + if (adapter->ahw->pci_func >= QLC_MAX_LEGACY_FUNC_SUPP) { + dev_err(&adapter->pdev->dev, "PCI function number 8 and higher are not supported with legacy interrupt, func 0x%x\n", + ahw->pci_func); + return -EOPNOTSUPP; + } + + qlcnic_83xx_enable_legacy(adapter); + } + + for (i = 0; i < num_msix; i++) { + if (adapter->flags & QLCNIC_MSIX_ENABLED) + ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX; + else + ahw->intr_tbl[i].type = QLCNIC_INTRPT_INTX; + ahw->intr_tbl[i].id = i; + ahw->intr_tbl[i].src = 0; + } + + return 0; +} + +static inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter) +{ + writel(0, adapter->tgt_mask_reg); +} + +static inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter) +{ + if (adapter->tgt_mask_reg) + writel(1, adapter->tgt_mask_reg); +} + +static inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter + *adapter) +{ + u32 mask; + + /* Mailbox in MSI-x mode and Legacy Interrupt share the same + * source register. We could be here before contexts are created + * and sds_ring->crb_intr_mask has not been initialized, calculate + * BAR offset for Interrupt Source Register + */ + mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK); + writel(0, adapter->ahw->pci_base0 + mask); +} + +void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *adapter) +{ + u32 mask; + + mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK); + writel(1, adapter->ahw->pci_base0 + mask); + QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, 0); +} + +static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + int i; + + if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP) + return; + + for (i = 0; i < cmd->rsp.num; i++) + cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i)); +} + +irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter) +{ + u32 intr_val; + struct qlcnic_hardware_context *ahw = adapter->ahw; + int retries = 0; + + intr_val = readl(adapter->tgt_status_reg); + + if (!QLC_83XX_VALID_INTX_BIT31(intr_val)) + return IRQ_NONE; + + if (QLC_83XX_INTX_FUNC(intr_val) != adapter->ahw->pci_func) { + adapter->stats.spurious_intr++; + return IRQ_NONE; + } + /* The barrier is required to ensure writes to the registers */ + wmb(); + + /* clear the interrupt trigger control register */ + writel(0, adapter->isr_int_vec); + intr_val = readl(adapter->isr_int_vec); + do { + intr_val = readl(adapter->tgt_status_reg); + if (QLC_83XX_INTX_FUNC(intr_val) != ahw->pci_func) + break; + retries++; + } while (QLC_83XX_VALID_INTX_BIT30(intr_val) && + (retries < QLC_83XX_LEGACY_INTX_MAX_RETRY)); + + return IRQ_HANDLED; +} + +static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx) +{ + atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED); + complete(&mbx->completion); +} + +static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter) +{ + u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED; + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; + unsigned long flags; + + spin_lock_irqsave(&mbx->aen_lock, flags); + resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL); + if (!(resp & QLCNIC_SET_OWNER)) + goto out; + + event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); + if (event & QLCNIC_MBX_ASYNC_EVENT) { + __qlcnic_83xx_process_aen(adapter); + } else { + if (atomic_read(&mbx->rsp_status) != rsp_status) + qlcnic_83xx_notify_mbx_response(mbx); + } +out: + qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); + spin_unlock_irqrestore(&mbx->aen_lock, flags); +} + +irqreturn_t qlcnic_83xx_intr(int irq, void *data) +{ + struct qlcnic_adapter *adapter = data; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (qlcnic_83xx_clear_legacy_intr(adapter) == IRQ_NONE) + return IRQ_NONE; + + qlcnic_83xx_poll_process_aen(adapter); + + if (ahw->diag_test) { + if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) + ahw->diag_cnt++; + qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); + return IRQ_HANDLED; + } + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); + } else { + sds_ring = &adapter->recv_ctx->sds_rings[0]; + napi_schedule(&sds_ring->napi); + } + + return IRQ_HANDLED; +} + +irqreturn_t qlcnic_83xx_tmp_intr(int irq, void *data) +{ + struct qlcnic_host_sds_ring *sds_ring = data; + struct qlcnic_adapter *adapter = sds_ring->adapter; + + if (adapter->flags & QLCNIC_MSIX_ENABLED) + goto done; + + if (adapter->nic_ops->clear_legacy_intr(adapter) == IRQ_NONE) + return IRQ_NONE; + +done: + adapter->ahw->diag_cnt++; + qlcnic_enable_sds_intr(adapter, sds_ring); + + return IRQ_HANDLED; +} + +void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter) +{ + u32 num_msix; + + if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) + qlcnic_83xx_set_legacy_intr_mask(adapter); + + qlcnic_83xx_disable_mbx_intr(adapter); + + if (adapter->flags & QLCNIC_MSIX_ENABLED) + num_msix = adapter->ahw->num_msix - 1; + else + num_msix = 0; + + msleep(20); + + if (adapter->msix_entries) { + synchronize_irq(adapter->msix_entries[num_msix].vector); + free_irq(adapter->msix_entries[num_msix].vector, adapter); + } +} + +int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter) +{ + irq_handler_t handler; + u32 val; + int err = 0; + unsigned long flags = 0; + + if (!(adapter->flags & QLCNIC_MSI_ENABLED) && + !(adapter->flags & QLCNIC_MSIX_ENABLED)) + flags |= IRQF_SHARED; + + if (adapter->flags & QLCNIC_MSIX_ENABLED) { + handler = qlcnic_83xx_handle_aen; + val = adapter->msix_entries[adapter->ahw->num_msix - 1].vector; + err = request_irq(val, handler, flags, "qlcnic-MB", adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "failed to register MBX interrupt\n"); + return err; + } + } else { + handler = qlcnic_83xx_intr; + val = adapter->msix_entries[0].vector; + err = request_irq(val, handler, flags, "qlcnic", adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "failed to register INTx interrupt\n"); + return err; + } + qlcnic_83xx_clear_legacy_intr_mask(adapter); + } + + /* Enable mailbox interrupt */ + qlcnic_83xx_enable_mbx_interrupt(adapter); + + return err; +} + +void qlcnic_83xx_get_func_no(struct qlcnic_adapter *adapter) +{ + u32 val = QLCRDX(adapter->ahw, QLCNIC_INFORMANT); + adapter->ahw->pci_func = (val >> 24) & 0xff; +} + +int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter) +{ + void __iomem *addr; + u32 val, limit = 0; + + struct qlcnic_hardware_context *ahw = adapter->ahw; + + addr = ahw->pci_base0 + QLC_83XX_SEM_LOCK_FUNC(ahw->pci_func); + do { + val = readl(addr); + if (val) { + /* write the function number to register */ + QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, + ahw->pci_func); + return 0; + } + usleep_range(1000, 2000); + } while (++limit <= QLCNIC_PCIE_SEM_TIMEOUT); + + return -EIO; +} + +void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *adapter) +{ + void __iomem *addr; + u32 val; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + addr = ahw->pci_base0 + QLC_83XX_SEM_UNLOCK_FUNC(ahw->pci_func); + val = readl(addr); +} + +void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf, + loff_t offset, size_t size) +{ + int ret = 0; + u32 data; + + if (qlcnic_api_lock(adapter)) { + dev_err(&adapter->pdev->dev, + "%s: failed to acquire lock. addr offset 0x%x\n", + __func__, (u32)offset); + return; + } + + data = QLCRD32(adapter, (u32) offset, &ret); + qlcnic_api_unlock(adapter); + + if (ret == -EIO) { + dev_err(&adapter->pdev->dev, + "%s: failed. addr offset 0x%x\n", + __func__, (u32)offset); + return; + } + memcpy(buf, &data, size); +} + +void qlcnic_83xx_write_crb(struct qlcnic_adapter *adapter, char *buf, + loff_t offset, size_t size) +{ + u32 data; + + memcpy(&data, buf, size); + qlcnic_83xx_wrt_reg_indirect(adapter, (u32) offset, data); +} + +int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int status; + + status = qlcnic_83xx_get_port_config(adapter); + if (status) { + dev_err(&adapter->pdev->dev, + "Get Port Info failed\n"); + } else { + + if (ahw->port_config & QLC_83XX_10G_CAPABLE) { + ahw->port_type = QLCNIC_XGBE; + } else if (ahw->port_config & QLC_83XX_10_CAPABLE || + ahw->port_config & QLC_83XX_100_CAPABLE || + ahw->port_config & QLC_83XX_1G_CAPABLE) { + ahw->port_type = QLCNIC_GBE; + } else { + ahw->port_type = QLCNIC_XGBE; + } + + if (QLC_83XX_AUTONEG(ahw->port_config)) + ahw->link_autoneg = AUTONEG_ENABLE; + + } + return status; +} + +static void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u16 act_pci_fn = ahw->total_nic_func; + u16 count; + + ahw->max_mc_count = QLC_83XX_MAX_MC_COUNT; + if (act_pci_fn <= 2) + count = (QLC_83XX_MAX_UC_COUNT - QLC_83XX_MAX_MC_COUNT) / + act_pci_fn; + else + count = (QLC_83XX_LB_MAX_FILTERS - QLC_83XX_MAX_MC_COUNT) / + act_pci_fn; + ahw->max_uc_count = count; +} + +void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *adapter) +{ + u32 val; + + if (adapter->flags & QLCNIC_MSIX_ENABLED) + val = BIT_2 | ((adapter->ahw->num_msix - 1) << 8); + else + val = BIT_2; + + QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, val); + qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter); +} + +void qlcnic_83xx_check_vf(struct qlcnic_adapter *adapter, + const struct pci_device_id *ent) +{ + u32 op_mode, priv_level; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + ahw->fw_hal_version = 2; + qlcnic_get_func_no(adapter); + + if (qlcnic_sriov_vf_check(adapter)) { + qlcnic_sriov_vf_set_ops(adapter); + return; + } + + /* Determine function privilege level */ + op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE); + if (op_mode == QLC_83XX_DEFAULT_OPMODE) + priv_level = QLCNIC_MGMT_FUNC; + else + priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode, + ahw->pci_func); + + if (priv_level == QLCNIC_NON_PRIV_FUNC) { + ahw->op_mode = QLCNIC_NON_PRIV_FUNC; + dev_info(&adapter->pdev->dev, + "HAL Version: %d Non Privileged function\n", + ahw->fw_hal_version); + adapter->nic_ops = &qlcnic_vf_ops; + } else { + if (pci_find_ext_capability(adapter->pdev, + PCI_EXT_CAP_ID_SRIOV)) + set_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state); + adapter->nic_ops = &qlcnic_83xx_ops; + } +} + +static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, + u32 data[]); +static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter, + u32 data[]); + +void qlcnic_dump_mbx(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + int i; + + if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP) + return; + + dev_info(&adapter->pdev->dev, + "Host MBX regs(%d)\n", cmd->req.num); + for (i = 0; i < cmd->req.num; i++) { + if (i && !(i % 8)) + pr_info("\n"); + pr_info("%08x ", cmd->req.arg[i]); + } + pr_info("\n"); + dev_info(&adapter->pdev->dev, + "FW MBX regs(%d)\n", cmd->rsp.num); + for (i = 0; i < cmd->rsp.num; i++) { + if (i && !(i % 8)) + pr_info("\n"); + pr_info("%08x ", cmd->rsp.arg[i]); + } + pr_info("\n"); +} + +static void qlcnic_83xx_poll_for_mbx_completion(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int opcode = LSW(cmd->req.arg[0]); + unsigned long max_loops; + + max_loops = cmd->total_cmds * QLC_83XX_MBX_CMD_LOOP; + + for (; max_loops; max_loops--) { + if (atomic_read(&cmd->rsp_status) == + QLC_83XX_MBX_RESPONSE_ARRIVED) + return; + + udelay(1); + } + + dev_err(&adapter->pdev->dev, + "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", + __func__, opcode, cmd->type, ahw->pci_func, ahw->op_mode); + flush_workqueue(ahw->mailbox->work_q); + return; +} + +int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; + struct qlcnic_hardware_context *ahw = adapter->ahw; + int cmd_type, err, opcode; + unsigned long timeout; + + if (!mbx) + return -EIO; + + opcode = LSW(cmd->req.arg[0]); + cmd_type = cmd->type; + err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout); + if (err) { + dev_err(&adapter->pdev->dev, + "%s: Mailbox not available, cmd_op=0x%x, cmd_context=0x%x, pci_func=0x%x, op_mode=0x%x\n", + __func__, opcode, cmd->type, ahw->pci_func, + ahw->op_mode); + return err; + } + + switch (cmd_type) { + case QLC_83XX_MBX_CMD_WAIT: + if (!wait_for_completion_timeout(&cmd->completion, timeout)) { + dev_err(&adapter->pdev->dev, + "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", + __func__, opcode, cmd_type, ahw->pci_func, + ahw->op_mode); + flush_workqueue(mbx->work_q); + } + break; + case QLC_83XX_MBX_CMD_NO_WAIT: + return 0; + case QLC_83XX_MBX_CMD_BUSY_WAIT: + qlcnic_83xx_poll_for_mbx_completion(adapter, cmd); + break; + default: + dev_err(&adapter->pdev->dev, + "%s: Invalid mailbox command, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", + __func__, opcode, cmd_type, ahw->pci_func, + ahw->op_mode); + qlcnic_83xx_detach_mailbox_work(adapter); + } + + return cmd->rsp_opcode; +} + +int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, + struct qlcnic_adapter *adapter, u32 type) +{ + int i, size; + u32 temp; + const struct qlcnic_mailbox_metadata *mbx_tbl; + + memset(mbx, 0, sizeof(struct qlcnic_cmd_args)); + mbx_tbl = qlcnic_83xx_mbx_tbl; + size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl); + for (i = 0; i < size; i++) { + if (type == mbx_tbl[i].cmd) { + mbx->op_type = QLC_83XX_FW_MBX_CMD; + mbx->req.num = mbx_tbl[i].in_args; + mbx->rsp.num = mbx_tbl[i].out_args; + mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32), + GFP_ATOMIC); + if (!mbx->req.arg) + return -ENOMEM; + mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32), + GFP_ATOMIC); + if (!mbx->rsp.arg) { + kfree(mbx->req.arg); + mbx->req.arg = NULL; + return -ENOMEM; + } + memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num); + memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); + temp = adapter->ahw->fw_hal_version << 29; + mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp); + mbx->cmd_op = type; + return 0; + } + } + + dev_err(&adapter->pdev->dev, "%s: Invalid mailbox command opcode 0x%x\n", + __func__, type); + return -EINVAL; +} + +void qlcnic_83xx_idc_aen_work(struct work_struct *work) +{ + struct qlcnic_adapter *adapter; + struct qlcnic_cmd_args cmd; + int i, err = 0; + + adapter = container_of(work, struct qlcnic_adapter, idc_aen_work.work); + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_IDC_ACK); + if (err) + return; + + for (i = 1; i < QLC_83XX_MBX_AEN_CNT; i++) + cmd.req.arg[i] = adapter->ahw->mbox_aen[i]; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_info(&adapter->pdev->dev, + "%s: Mailbox IDC ACK failed.\n", __func__); + qlcnic_free_mbx_args(&cmd); +} + +static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter, + u32 data[]) +{ + dev_dbg(&adapter->pdev->dev, "Completion AEN:0x%x.\n", + QLCNIC_MBX_RSP(data[0])); + clear_bit(QLC_83XX_IDC_COMP_AEN, &adapter->ahw->idc.status); + return; +} + +static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 event[QLC_83XX_MBX_AEN_CNT]; + int i; + + for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++) + event[i] = readl(QLCNIC_MBX_FW(ahw, i)); + + switch (QLCNIC_MBX_RSP(event[0])) { + + case QLCNIC_MBX_LINK_EVENT: + qlcnic_83xx_handle_link_aen(adapter, event); + break; + case QLCNIC_MBX_COMP_EVENT: + qlcnic_83xx_handle_idc_comp_aen(adapter, event); + break; + case QLCNIC_MBX_REQUEST_EVENT: + for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++) + adapter->ahw->mbox_aen[i] = QLCNIC_MBX_RSP(event[i]); + queue_delayed_work(adapter->qlcnic_wq, + &adapter->idc_aen_work, 0); + break; + case QLCNIC_MBX_TIME_EXTEND_EVENT: + ahw->extend_lb_time = event[1] >> 8 & 0xf; + break; + case QLCNIC_MBX_BC_EVENT: + qlcnic_sriov_handle_bc_event(adapter, event[1]); + break; + case QLCNIC_MBX_SFP_INSERT_EVENT: + dev_info(&adapter->pdev->dev, "SFP+ Insert AEN:0x%x.\n", + QLCNIC_MBX_RSP(event[0])); + break; + case QLCNIC_MBX_SFP_REMOVE_EVENT: + dev_info(&adapter->pdev->dev, "SFP Removed AEN:0x%x.\n", + QLCNIC_MBX_RSP(event[0])); + break; + case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT: + qlcnic_dcb_aen_handler(adapter->dcb, (void *)&event[1]); + break; + default: + dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n", + QLCNIC_MBX_RSP(event[0])); + break; + } + + QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER); +} + +static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) +{ + u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED; + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_mailbox *mbx = ahw->mailbox; + unsigned long flags; + + spin_lock_irqsave(&mbx->aen_lock, flags); + resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL); + if (resp & QLCNIC_SET_OWNER) { + event = readl(QLCNIC_MBX_FW(ahw, 0)); + if (event & QLCNIC_MBX_ASYNC_EVENT) { + __qlcnic_83xx_process_aen(adapter); + } else { + if (atomic_read(&mbx->rsp_status) != rsp_status) + qlcnic_83xx_notify_mbx_response(mbx); + } + } + spin_unlock_irqrestore(&mbx->aen_lock, flags); +} + +static void qlcnic_83xx_mbx_poll_work(struct work_struct *work) +{ + struct qlcnic_adapter *adapter; + + adapter = container_of(work, struct qlcnic_adapter, mbx_poll_work.work); + + if (!test_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state)) + return; + + qlcnic_83xx_process_aen(adapter); + queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work, + (HZ / 10)); +} + +void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *adapter) +{ + if (test_and_set_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state)) + return; + + INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work); + queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work, 0); +} + +void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter) +{ + if (!test_and_clear_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state)) + return; + cancel_delayed_work_sync(&adapter->mbx_poll_work); +} + +static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter) +{ + int index, i, err, sds_mbx_size; + u32 *buf, intrpt_id, intr_mask; + u16 context_id; + u8 num_sds; + struct qlcnic_cmd_args cmd; + struct qlcnic_host_sds_ring *sds; + struct qlcnic_sds_mbx sds_mbx; + struct qlcnic_add_rings_mbx_out *mbx_out; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + sds_mbx_size = sizeof(struct qlcnic_sds_mbx); + context_id = recv_ctx->context_id; + num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS; + ahw->hw_ops->alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_ADD_RCV_RINGS); + cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16); + + /* set up status rings, mbx 2-81 */ + index = 2; + for (i = 8; i < adapter->drv_sds_rings; i++) { + memset(&sds_mbx, 0, sds_mbx_size); + sds = &recv_ctx->sds_rings[i]; + sds->consumer = 0; + memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds)); + sds_mbx.phy_addr_low = LSD(sds->phys_addr); + sds_mbx.phy_addr_high = MSD(sds->phys_addr); + sds_mbx.sds_ring_size = sds->num_desc; + + if (adapter->flags & QLCNIC_MSIX_ENABLED) + intrpt_id = ahw->intr_tbl[i].id; + else + intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID); + + if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) + sds_mbx.intrpt_id = intrpt_id; + else + sds_mbx.intrpt_id = 0xffff; + sds_mbx.intrpt_val = 0; + buf = &cmd.req.arg[index]; + memcpy(buf, &sds_mbx, sds_mbx_size); + index += sds_mbx_size / sizeof(u32); + } + + /* send the mailbox command */ + err = ahw->hw_ops->mbx_cmd(adapter, &cmd); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to add rings %d\n", err); + goto out; + } + + mbx_out = (struct qlcnic_add_rings_mbx_out *)&cmd.rsp.arg[1]; + index = 0; + /* status descriptor ring */ + for (i = 8; i < adapter->drv_sds_rings; i++) { + sds = &recv_ctx->sds_rings[i]; + sds->crb_sts_consumer = ahw->pci_base0 + + mbx_out->host_csmr[index]; + if (adapter->flags & QLCNIC_MSIX_ENABLED) + intr_mask = ahw->intr_tbl[i].src; + else + intr_mask = QLCRDX(ahw, QLCNIC_DEF_INT_MASK); + + sds->crb_intr_mask = ahw->pci_base0 + intr_mask; + index++; + } +out: + qlcnic_free_mbx_args(&cmd); + return err; +} + +void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *adapter) +{ + int err; + u32 temp = 0; + struct qlcnic_cmd_args cmd; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX)) + return; + + if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) + cmd.req.arg[0] |= (0x3 << 29); + + if (qlcnic_sriov_pf_check(adapter)) + qlcnic_pf_set_interface_id_del_rx_ctx(adapter, &temp); + + cmd.req.arg[1] = recv_ctx->context_id | temp; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_err(&adapter->pdev->dev, + "Failed to destroy rx ctx in firmware\n"); + + recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED; + qlcnic_free_mbx_args(&cmd); +} + +int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter) +{ + int i, err, index, sds_mbx_size, rds_mbx_size; + u8 num_sds, num_rds; + u32 *buf, intrpt_id, intr_mask, cap = 0; + struct qlcnic_host_sds_ring *sds; + struct qlcnic_host_rds_ring *rds; + struct qlcnic_sds_mbx sds_mbx; + struct qlcnic_rds_mbx rds_mbx; + struct qlcnic_cmd_args cmd; + struct qlcnic_rcv_mbx_out *mbx_out; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_hardware_context *ahw = adapter->ahw; + num_rds = adapter->max_rds_rings; + + if (adapter->drv_sds_rings <= QLCNIC_MAX_SDS_RINGS) + num_sds = adapter->drv_sds_rings; + else + num_sds = QLCNIC_MAX_SDS_RINGS; + + sds_mbx_size = sizeof(struct qlcnic_sds_mbx); + rds_mbx_size = sizeof(struct qlcnic_rds_mbx); + cap = QLCNIC_CAP0_LEGACY_CONTEXT; + + if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) + cap |= QLC_83XX_FW_CAP_LRO_MSS; + + /* set mailbox hdr and capabilities */ + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_CREATE_RX_CTX); + if (err) + return err; + + if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) + cmd.req.arg[0] |= (0x3 << 29); + + cmd.req.arg[1] = cap; + cmd.req.arg[5] = 1 | (num_rds << 5) | (num_sds << 8) | + (QLC_83XX_HOST_RDS_MODE_UNIQUE << 16); + + if (qlcnic_sriov_pf_check(adapter)) + qlcnic_pf_set_interface_id_create_rx_ctx(adapter, + &cmd.req.arg[6]); + /* set up status rings, mbx 8-57/87 */ + index = QLC_83XX_HOST_SDS_MBX_IDX; + for (i = 0; i < num_sds; i++) { + memset(&sds_mbx, 0, sds_mbx_size); + sds = &recv_ctx->sds_rings[i]; + sds->consumer = 0; + memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds)); + sds_mbx.phy_addr_low = LSD(sds->phys_addr); + sds_mbx.phy_addr_high = MSD(sds->phys_addr); + sds_mbx.sds_ring_size = sds->num_desc; + if (adapter->flags & QLCNIC_MSIX_ENABLED) + intrpt_id = ahw->intr_tbl[i].id; + else + intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID); + if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) + sds_mbx.intrpt_id = intrpt_id; + else + sds_mbx.intrpt_id = 0xffff; + sds_mbx.intrpt_val = 0; + buf = &cmd.req.arg[index]; + memcpy(buf, &sds_mbx, sds_mbx_size); + index += sds_mbx_size / sizeof(u32); + } + /* set up receive rings, mbx 88-111/135 */ + index = QLCNIC_HOST_RDS_MBX_IDX; + rds = &recv_ctx->rds_rings[0]; + rds->producer = 0; + memset(&rds_mbx, 0, rds_mbx_size); + rds_mbx.phy_addr_reg_low = LSD(rds->phys_addr); + rds_mbx.phy_addr_reg_high = MSD(rds->phys_addr); + rds_mbx.reg_ring_sz = rds->dma_size; + rds_mbx.reg_ring_len = rds->num_desc; + /* Jumbo ring */ + rds = &recv_ctx->rds_rings[1]; + rds->producer = 0; + rds_mbx.phy_addr_jmb_low = LSD(rds->phys_addr); + rds_mbx.phy_addr_jmb_high = MSD(rds->phys_addr); + rds_mbx.jmb_ring_sz = rds->dma_size; + rds_mbx.jmb_ring_len = rds->num_desc; + buf = &cmd.req.arg[index]; + memcpy(buf, &rds_mbx, rds_mbx_size); + + /* send the mailbox command */ + err = ahw->hw_ops->mbx_cmd(adapter, &cmd); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to create Rx ctx in firmware%d\n", err); + goto out; + } + mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd.rsp.arg[1]; + recv_ctx->context_id = mbx_out->ctx_id; + recv_ctx->state = mbx_out->state; + recv_ctx->virt_port = mbx_out->vport_id; + dev_info(&adapter->pdev->dev, "Rx Context[%d] Created, state:0x%x\n", + recv_ctx->context_id, recv_ctx->state); + /* Receive descriptor ring */ + /* Standard ring */ + rds = &recv_ctx->rds_rings[0]; + rds->crb_rcv_producer = ahw->pci_base0 + + mbx_out->host_prod[0].reg_buf; + /* Jumbo ring */ + rds = &recv_ctx->rds_rings[1]; + rds->crb_rcv_producer = ahw->pci_base0 + + mbx_out->host_prod[0].jmb_buf; + /* status descriptor ring */ + for (i = 0; i < num_sds; i++) { + sds = &recv_ctx->sds_rings[i]; + sds->crb_sts_consumer = ahw->pci_base0 + + mbx_out->host_csmr[i]; + if (adapter->flags & QLCNIC_MSIX_ENABLED) + intr_mask = ahw->intr_tbl[i].src; + else + intr_mask = QLCRDX(ahw, QLCNIC_DEF_INT_MASK); + sds->crb_intr_mask = ahw->pci_base0 + intr_mask; + } + + if (adapter->drv_sds_rings > QLCNIC_MAX_SDS_RINGS) + err = qlcnic_83xx_add_rings(adapter); +out: + qlcnic_free_mbx_args(&cmd); + return err; +} + +void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) +{ + struct qlcnic_cmd_args cmd; + u32 temp = 0; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX)) + return; + + if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) + cmd.req.arg[0] |= (0x3 << 29); + + if (qlcnic_sriov_pf_check(adapter)) + qlcnic_pf_set_interface_id_del_tx_ctx(adapter, &temp); + + cmd.req.arg[1] = tx_ring->ctx_id | temp; + if (qlcnic_issue_cmd(adapter, &cmd)) + dev_err(&adapter->pdev->dev, + "Failed to destroy tx ctx in firmware\n"); + qlcnic_free_mbx_args(&cmd); +} + +int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx, int ring) +{ + int err; + u16 msix_id; + u32 *buf, intr_mask, temp = 0; + struct qlcnic_cmd_args cmd; + struct qlcnic_tx_mbx mbx; + struct qlcnic_tx_mbx_out *mbx_out; + struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 msix_vector; + + /* Reset host resources */ + tx->producer = 0; + tx->sw_consumer = 0; + *(tx->hw_consumer) = 0; + + memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx)); + + /* setup mailbox inbox registerss */ + mbx.phys_addr_low = LSD(tx->phys_addr); + mbx.phys_addr_high = MSD(tx->phys_addr); + mbx.cnsmr_index_low = LSD(tx->hw_cons_phys_addr); + mbx.cnsmr_index_high = MSD(tx->hw_cons_phys_addr); + mbx.size = tx->num_desc; + if (adapter->flags & QLCNIC_MSIX_ENABLED) { + if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) + msix_vector = adapter->drv_sds_rings + ring; + else + msix_vector = adapter->drv_sds_rings - 1; + msix_id = ahw->intr_tbl[msix_vector].id; + } else { + msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID); + } + + if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) + mbx.intr_id = msix_id; + else + mbx.intr_id = 0xffff; + mbx.src = 0; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); + if (err) + return err; + + if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter)) + cmd.req.arg[0] |= (0x3 << 29); + + if (qlcnic_sriov_pf_check(adapter)) + qlcnic_pf_set_interface_id_create_tx_ctx(adapter, &temp); + + cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT; + cmd.req.arg[5] = QLCNIC_SINGLE_RING | temp; + + buf = &cmd.req.arg[6]; + memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx)); + /* send the mailbox command*/ + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + netdev_err(adapter->netdev, + "Failed to create Tx ctx in firmware 0x%x\n", err); + goto out; + } + mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2]; + tx->crb_cmd_producer = ahw->pci_base0 + mbx_out->host_prod; + tx->ctx_id = mbx_out->ctx_id; + if ((adapter->flags & QLCNIC_MSIX_ENABLED) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { + intr_mask = ahw->intr_tbl[adapter->drv_sds_rings + ring].src; + tx->crb_intr_mask = ahw->pci_base0 + intr_mask; + } + netdev_info(adapter->netdev, + "Tx Context[0x%x] Created, state:0x%x\n", + tx->ctx_id, mbx_out->state); +out: + qlcnic_free_mbx_args(&cmd); + return err; +} + +static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test, + u8 num_sds_ring) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_rds_ring *rds_ring; + u16 adapter_state = adapter->is_up; + u8 ring; + int ret; + + netif_device_detach(netdev); + + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + + qlcnic_detach(adapter); + + adapter->drv_sds_rings = QLCNIC_SINGLE_RING; + adapter->ahw->diag_test = test; + adapter->ahw->linkup = 0; + + ret = qlcnic_attach(adapter); + if (ret) { + netif_device_attach(netdev); + return ret; + } + + ret = qlcnic_fw_create_ctx(adapter); + if (ret) { + qlcnic_detach(adapter); + if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) { + adapter->drv_sds_rings = num_sds_ring; + qlcnic_attach(adapter); + } + netif_device_attach(netdev); + return ret; + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &adapter->recv_ctx->rds_rings[ring]; + qlcnic_post_rx_buffers(adapter, rds_ring, ring); + } + + if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { + sds_ring = &adapter->recv_ctx->sds_rings[ring]; + qlcnic_enable_sds_intr(adapter, sds_ring); + } + } + + if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { + adapter->ahw->loopback_state = 0; + adapter->ahw->hw_ops->setup_link_event(adapter, 1); + } + + set_bit(__QLCNIC_DEV_UP, &adapter->state); + return 0; +} + +static void qlcnic_83xx_diag_free_res(struct net_device *netdev, + u8 drv_sds_rings) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_host_sds_ring *sds_ring; + int ring; + + clear_bit(__QLCNIC_DEV_UP, &adapter->state); + if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { + sds_ring = &adapter->recv_ctx->sds_rings[ring]; + if (adapter->flags & QLCNIC_MSIX_ENABLED) + qlcnic_disable_sds_intr(adapter, sds_ring); + } + } + + qlcnic_fw_destroy_ctx(adapter); + qlcnic_detach(adapter); + + adapter->ahw->diag_test = 0; + adapter->drv_sds_rings = drv_sds_rings; + + if (qlcnic_attach(adapter)) + goto out; + + if (netif_running(netdev)) + __qlcnic_up(adapter, netdev); + +out: + netif_device_attach(netdev); +} + +static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_cmd_args cmd; + u8 beacon_state; + int err = 0; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_CONFIG); + if (!err) { + err = qlcnic_issue_cmd(adapter, &cmd); + if (!err) { + beacon_state = cmd.rsp.arg[4]; + if (beacon_state == QLCNIC_BEACON_DISABLE) + ahw->beacon_state = QLC_83XX_BEACON_OFF; + else if (beacon_state == QLC_83XX_ENABLE_BEACON) + ahw->beacon_state = QLC_83XX_BEACON_ON; + } + } else { + netdev_err(adapter->netdev, "Get beacon state failed, err=%d\n", + err); + } + + qlcnic_free_mbx_args(&cmd); + + return; +} + +int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state, + u32 beacon) +{ + struct qlcnic_cmd_args cmd; + u32 mbx_in; + int i, status = 0; + + if (state) { + /* Get LED configuration */ + status = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_GET_LED_CONFIG); + if (status) + return status; + + status = qlcnic_issue_cmd(adapter, &cmd); + if (status) { + dev_err(&adapter->pdev->dev, + "Get led config failed.\n"); + goto mbx_err; + } else { + for (i = 0; i < 4; i++) + adapter->ahw->mbox_reg[i] = cmd.rsp.arg[i+1]; + } + qlcnic_free_mbx_args(&cmd); + /* Set LED Configuration */ + mbx_in = (LSW(QLC_83XX_LED_CONFIG) << 16) | + LSW(QLC_83XX_LED_CONFIG); + status = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_SET_LED_CONFIG); + if (status) + return status; + + cmd.req.arg[1] = mbx_in; + cmd.req.arg[2] = mbx_in; + cmd.req.arg[3] = mbx_in; + if (beacon) + cmd.req.arg[4] = QLC_83XX_ENABLE_BEACON; + status = qlcnic_issue_cmd(adapter, &cmd); + if (status) { + dev_err(&adapter->pdev->dev, + "Set led config failed.\n"); + } +mbx_err: + qlcnic_free_mbx_args(&cmd); + return status; + + } else { + /* Restoring default LED configuration */ + status = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_SET_LED_CONFIG); + if (status) + return status; + + cmd.req.arg[1] = adapter->ahw->mbox_reg[0]; + cmd.req.arg[2] = adapter->ahw->mbox_reg[1]; + cmd.req.arg[3] = adapter->ahw->mbox_reg[2]; + if (beacon) + cmd.req.arg[4] = adapter->ahw->mbox_reg[3]; + status = qlcnic_issue_cmd(adapter, &cmd); + if (status) + dev_err(&adapter->pdev->dev, + "Restoring led config failed.\n"); + qlcnic_free_mbx_args(&cmd); + return status; + } +} + +int qlcnic_83xx_set_led(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int err = -EIO, active = 1; + + if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { + netdev_warn(netdev, + "LED test is not supported in non-privileged mode\n"); + return -EOPNOTSUPP; + } + + switch (state) { + case ETHTOOL_ID_ACTIVE: + if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) + return -EBUSY; + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) + break; + + err = qlcnic_83xx_config_led(adapter, active, 0); + if (err) + netdev_err(netdev, "Failed to set LED blink state\n"); + break; + case ETHTOOL_ID_INACTIVE: + active = 0; + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) + break; + + err = qlcnic_83xx_config_led(adapter, active, 0); + if (err) + netdev_err(netdev, "Failed to reset LED blink state\n"); + break; + + default: + return -EINVAL; + } + + if (!active || err) + clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); + + return err; +} + +void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *adapter, int enable) +{ + struct qlcnic_cmd_args cmd; + int status; + + if (qlcnic_sriov_vf_check(adapter)) + return; + + if (enable) + status = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_INIT_NIC_FUNC); + else + status = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_STOP_NIC_FUNC); + + if (status) + return; + + cmd.req.arg[1] = QLC_REGISTER_LB_IDC | QLC_INIT_FW_RESOURCES; + + if (adapter->dcb) + cmd.req.arg[1] |= QLC_REGISTER_DCB_AEN; + + status = qlcnic_issue_cmd(adapter, &cmd); + if (status) + dev_err(&adapter->pdev->dev, + "Failed to %s in NIC IDC function event.\n", + (enable ? "register" : "unregister")); + + qlcnic_free_mbx_args(&cmd); +} + +static int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter) +{ + struct qlcnic_cmd_args cmd; + int err; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORT_CONFIG); + if (err) + return err; + + cmd.req.arg[1] = adapter->ahw->port_config; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_info(&adapter->pdev->dev, "Set Port Config failed.\n"); + qlcnic_free_mbx_args(&cmd); + return err; +} + +static int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter) +{ + struct qlcnic_cmd_args cmd; + int err; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PORT_CONFIG); + if (err) + return err; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_info(&adapter->pdev->dev, "Get Port config failed\n"); + else + adapter->ahw->port_config = cmd.rsp.arg[1]; + qlcnic_free_mbx_args(&cmd); + return err; +} + +int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *adapter, int enable) +{ + int err; + u32 temp; + struct qlcnic_cmd_args cmd; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_EVENT); + if (err) + return err; + + temp = adapter->recv_ctx->context_id << 16; + cmd.req.arg[1] = (enable ? 1 : 0) | BIT_8 | temp; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_info(&adapter->pdev->dev, + "Setup linkevent mailbox failed\n"); + qlcnic_free_mbx_args(&cmd); + return err; +} + +static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter, + u32 *interface_id) +{ + if (qlcnic_sriov_pf_check(adapter)) { + qlcnic_alloc_lb_filters_mem(adapter); + qlcnic_pf_set_interface_id_promisc(adapter, interface_id); + adapter->rx_mac_learn = true; + } else { + if (!qlcnic_sriov_vf_check(adapter)) + *interface_id = adapter->recv_ctx->context_id << 16; + } +} + +int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) +{ + struct qlcnic_cmd_args *cmd = NULL; + u32 temp = 0; + int err; + + if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) + return -EIO; + + cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); + if (!cmd) + return -ENOMEM; + + err = qlcnic_alloc_mbx_args(cmd, adapter, + QLCNIC_CMD_CONFIGURE_MAC_RX_MODE); + if (err) + goto out; + + cmd->type = QLC_83XX_MBX_CMD_NO_WAIT; + qlcnic_83xx_set_interface_id_promisc(adapter, &temp); + + if (qlcnic_84xx_check(adapter) && qlcnic_sriov_pf_check(adapter)) + mode = VPORT_MISS_MODE_ACCEPT_ALL; + + cmd->req.arg[1] = mode | temp; + err = qlcnic_issue_cmd(adapter, cmd); + if (!err) + return err; + + qlcnic_free_mbx_args(cmd); + +out: + kfree(cmd); + return err; +} + +int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_hardware_context *ahw = adapter->ahw; + u8 drv_sds_rings = adapter->drv_sds_rings; + u8 drv_tx_rings = adapter->drv_tx_rings; + int ret = 0, loop = 0; + + if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { + netdev_warn(netdev, + "Loopback test not supported in non privileged mode\n"); + return -ENOTSUPP; + } + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { + netdev_info(netdev, "Device is resetting\n"); + return -EBUSY; + } + + if (qlcnic_get_diag_lock(adapter)) { + netdev_info(netdev, "Device is in diagnostics mode\n"); + return -EBUSY; + } + + netdev_info(netdev, "%s loopback test in progress\n", + mode == QLCNIC_ILB_MODE ? "internal" : "external"); + + ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST, + drv_sds_rings); + if (ret) + goto fail_diag_alloc; + + ret = qlcnic_83xx_set_lb_mode(adapter, mode); + if (ret) + goto free_diag_res; + + /* Poll for link up event before running traffic */ + do { + msleep(QLC_83XX_LB_MSLEEP_COUNT); + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { + netdev_info(netdev, + "Device is resetting, free LB test resources\n"); + ret = -EBUSY; + goto free_diag_res; + } + if (loop++ > QLC_83XX_LB_WAIT_COUNT) { + netdev_info(netdev, + "Firmware didn't sent link up event to loopback request\n"); + ret = -ETIMEDOUT; + qlcnic_83xx_clear_lb_mode(adapter, mode); + goto free_diag_res; + } + } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); + + ret = qlcnic_do_lb_test(adapter, mode); + + qlcnic_83xx_clear_lb_mode(adapter, mode); + +free_diag_res: + qlcnic_83xx_diag_free_res(netdev, drv_sds_rings); + +fail_diag_alloc: + adapter->drv_sds_rings = drv_sds_rings; + adapter->drv_tx_rings = drv_tx_rings; + qlcnic_release_diag_lock(adapter); + return ret; +} + +static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter, + u32 *max_wait_count) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int temp; + + netdev_info(adapter->netdev, "Received loopback IDC time extend event for 0x%x seconds\n", + ahw->extend_lb_time); + temp = ahw->extend_lb_time * 1000; + *max_wait_count += temp / QLC_83XX_LB_MSLEEP_COUNT; + ahw->extend_lb_time = 0; +} + +static int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct net_device *netdev = adapter->netdev; + u32 config, max_wait_count; + int status = 0, loop = 0; + + ahw->extend_lb_time = 0; + max_wait_count = QLC_83XX_LB_WAIT_COUNT; + status = qlcnic_83xx_get_port_config(adapter); + if (status) + return status; + + config = ahw->port_config; + + /* Check if port is already in loopback mode */ + if ((config & QLC_83XX_CFG_LOOPBACK_HSS) || + (config & QLC_83XX_CFG_LOOPBACK_EXT)) { + netdev_err(netdev, + "Port already in Loopback mode.\n"); + return -EINPROGRESS; + } + + set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); + + if (mode == QLCNIC_ILB_MODE) + ahw->port_config |= QLC_83XX_CFG_LOOPBACK_HSS; + if (mode == QLCNIC_ELB_MODE) + ahw->port_config |= QLC_83XX_CFG_LOOPBACK_EXT; + + status = qlcnic_83xx_set_port_config(adapter); + if (status) { + netdev_err(netdev, + "Failed to Set Loopback Mode = 0x%x.\n", + ahw->port_config); + ahw->port_config = config; + clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); + return status; + } + + /* Wait for Link and IDC Completion AEN */ + do { + msleep(QLC_83XX_LB_MSLEEP_COUNT); + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { + netdev_info(netdev, + "Device is resetting, free LB test resources\n"); + clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); + return -EBUSY; + } + + if (ahw->extend_lb_time) + qlcnic_extend_lb_idc_cmpltn_wait(adapter, + &max_wait_count); + + if (loop++ > max_wait_count) { + netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n", + __func__); + clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); + qlcnic_83xx_clear_lb_mode(adapter, mode); + return -ETIMEDOUT; + } + } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status)); + + qlcnic_sre_macaddr_change(adapter, adapter->mac_addr, 0, + QLCNIC_MAC_ADD); + return status; +} + +static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 config = ahw->port_config, max_wait_count; + struct net_device *netdev = adapter->netdev; + int status = 0, loop = 0; + + ahw->extend_lb_time = 0; + max_wait_count = QLC_83XX_LB_WAIT_COUNT; + set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); + if (mode == QLCNIC_ILB_MODE) + ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS; + if (mode == QLCNIC_ELB_MODE) + ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_EXT; + + status = qlcnic_83xx_set_port_config(adapter); + if (status) { + netdev_err(netdev, + "Failed to Clear Loopback Mode = 0x%x.\n", + ahw->port_config); + ahw->port_config = config; + clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); + return status; + } + + /* Wait for Link and IDC Completion AEN */ + do { + msleep(QLC_83XX_LB_MSLEEP_COUNT); + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { + netdev_info(netdev, + "Device is resetting, free LB test resources\n"); + clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); + return -EBUSY; + } + + if (ahw->extend_lb_time) + qlcnic_extend_lb_idc_cmpltn_wait(adapter, + &max_wait_count); + + if (loop++ > max_wait_count) { + netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n", + __func__); + clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status); + return -ETIMEDOUT; + } + } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status)); + + qlcnic_sre_macaddr_change(adapter, adapter->mac_addr, 0, + QLCNIC_MAC_DEL); + return status; +} + +static void qlcnic_83xx_set_interface_id_ipaddr(struct qlcnic_adapter *adapter, + u32 *interface_id) +{ + if (qlcnic_sriov_pf_check(adapter)) { + qlcnic_pf_set_interface_id_ipaddr(adapter, interface_id); + } else { + if (!qlcnic_sriov_vf_check(adapter)) + *interface_id = adapter->recv_ctx->context_id << 16; + } +} + +void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, + int mode) +{ + int err; + u32 temp = 0, temp_ip; + struct qlcnic_cmd_args cmd; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_CONFIGURE_IP_ADDR); + if (err) + return; + + qlcnic_83xx_set_interface_id_ipaddr(adapter, &temp); + + if (mode == QLCNIC_IP_UP) + cmd.req.arg[1] = 1 | temp; + else + cmd.req.arg[1] = 2 | temp; + + /* + * Adapter needs IP address in network byte order. + * But hardware mailbox registers go through writel(), hence IP address + * gets swapped on big endian architecture. + * To negate swapping of writel() on big endian architecture + * use swab32(value). + */ + + temp_ip = swab32(ntohl(ip)); + memcpy(&cmd.req.arg[2], &temp_ip, sizeof(u32)); + err = qlcnic_issue_cmd(adapter, &cmd); + if (err != QLCNIC_RCODE_SUCCESS) + dev_err(&adapter->netdev->dev, + "could not notify %s IP 0x%x request\n", + (mode == QLCNIC_IP_UP) ? "Add" : "Remove", ip); + + qlcnic_free_mbx_args(&cmd); +} + +int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode) +{ + int err; + u32 temp, arg1; + struct qlcnic_cmd_args cmd; + int lro_bit_mask; + + lro_bit_mask = (mode ? (BIT_0 | BIT_1 | BIT_2 | BIT_3) : 0); + + if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) + return 0; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_HW_LRO); + if (err) + return err; + + temp = adapter->recv_ctx->context_id << 16; + arg1 = lro_bit_mask | temp; + cmd.req.arg[1] = arg1; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_info(&adapter->pdev->dev, "LRO config failed\n"); + qlcnic_free_mbx_args(&cmd); + + return err; +} + +int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable) +{ + int err; + u32 word; + struct qlcnic_cmd_args cmd; + const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, + 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, + 0x255b0ec26d5a56daULL }; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS); + if (err) + return err; + /* + * RSS request: + * bits 3-0: Rsvd + * 5-4: hash_type_ipv4 + * 7-6: hash_type_ipv6 + * 8: enable + * 9: use indirection table + * 16-31: indirection table mask + */ + word = ((u32)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) | + ((u32)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) | + ((u32)(enable & 0x1) << 8) | + ((0x7ULL) << 16); + cmd.req.arg[1] = (adapter->recv_ctx->context_id); + cmd.req.arg[2] = word; + memcpy(&cmd.req.arg[4], key, sizeof(key)); + + err = qlcnic_issue_cmd(adapter, &cmd); + + if (err) + dev_info(&adapter->pdev->dev, "RSS config failed\n"); + qlcnic_free_mbx_args(&cmd); + + return err; + +} + +static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter, + u32 *interface_id) +{ + if (qlcnic_sriov_pf_check(adapter)) { + qlcnic_pf_set_interface_id_macaddr(adapter, interface_id); + } else { + if (!qlcnic_sriov_vf_check(adapter)) + *interface_id = adapter->recv_ctx->context_id << 16; + } +} + +int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, + u16 vlan_id, u8 op) +{ + struct qlcnic_cmd_args *cmd = NULL; + struct qlcnic_macvlan_mbx mv; + u32 *buf, temp = 0; + int err; + + if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) + return -EIO; + + cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); + if (!cmd) + return -ENOMEM; + + err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN); + if (err) + goto out; + + cmd->type = QLC_83XX_MBX_CMD_NO_WAIT; + + if (vlan_id) + op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ? + QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL; + + cmd->req.arg[1] = op | (1 << 8); + qlcnic_83xx_set_interface_id_macaddr(adapter, &temp); + cmd->req.arg[1] |= temp; + mv.vlan = vlan_id; + mv.mac_addr0 = addr[0]; + mv.mac_addr1 = addr[1]; + mv.mac_addr2 = addr[2]; + mv.mac_addr3 = addr[3]; + mv.mac_addr4 = addr[4]; + mv.mac_addr5 = addr[5]; + buf = &cmd->req.arg[2]; + memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx)); + err = qlcnic_issue_cmd(adapter, cmd); + if (!err) + return err; + + qlcnic_free_mbx_args(cmd); +out: + kfree(cmd); + return err; +} + +void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr, + u16 vlan_id) +{ + u8 mac[ETH_ALEN]; + memcpy(&mac, addr, ETH_ALEN); + qlcnic_83xx_sre_macaddr_change(adapter, mac, vlan_id, QLCNIC_MAC_ADD); +} + +static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac, + u8 type, struct qlcnic_cmd_args *cmd) +{ + switch (type) { + case QLCNIC_SET_STATION_MAC: + case QLCNIC_SET_FAC_DEF_MAC: + memcpy(&cmd->req.arg[2], mac, sizeof(u32)); + memcpy(&cmd->req.arg[3], &mac[4], sizeof(u16)); + break; + } + cmd->req.arg[1] = type; +} + +int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac, + u8 function) +{ + int err, i; + struct qlcnic_cmd_args cmd; + u32 mac_low, mac_high; + + function = 0; + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); + if (err) + return err; + + qlcnic_83xx_configure_mac(adapter, mac, QLCNIC_GET_CURRENT_MAC, &cmd); + err = qlcnic_issue_cmd(adapter, &cmd); + + if (err == QLCNIC_RCODE_SUCCESS) { + mac_low = cmd.rsp.arg[1]; + mac_high = cmd.rsp.arg[2]; + + for (i = 0; i < 2; i++) + mac[i] = (u8) (mac_high >> ((1 - i) * 8)); + for (i = 2; i < 6; i++) + mac[i] = (u8) (mac_low >> ((5 - i) * 8)); + } else { + dev_err(&adapter->pdev->dev, "Failed to get mac address%d\n", + err); + err = -EIO; + } + qlcnic_free_mbx_args(&cmd); + return err; +} + +static int qlcnic_83xx_set_rx_intr_coal(struct qlcnic_adapter *adapter) +{ + struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal; + struct qlcnic_cmd_args cmd; + u16 temp; + int err; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL); + if (err) + return err; + + temp = adapter->recv_ctx->context_id; + cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16; + temp = coal->rx_time_us; + cmd.req.arg[2] = coal->rx_packets | temp << 16; + cmd.req.arg[3] = coal->flag; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err != QLCNIC_RCODE_SUCCESS) + netdev_err(adapter->netdev, + "failed to set interrupt coalescing parameters\n"); + + qlcnic_free_mbx_args(&cmd); + + return err; +} + +static int qlcnic_83xx_set_tx_intr_coal(struct qlcnic_adapter *adapter) +{ + struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal; + struct qlcnic_cmd_args cmd; + u16 temp; + int err; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL); + if (err) + return err; + + temp = adapter->tx_ring->ctx_id; + cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_TX | temp << 16; + temp = coal->tx_time_us; + cmd.req.arg[2] = coal->tx_packets | temp << 16; + cmd.req.arg[3] = coal->flag; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err != QLCNIC_RCODE_SUCCESS) + netdev_err(adapter->netdev, + "failed to set interrupt coalescing parameters\n"); + + qlcnic_free_mbx_args(&cmd); + + return err; +} + +int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *adapter) +{ + int err = 0; + + err = qlcnic_83xx_set_rx_intr_coal(adapter); + if (err) + netdev_err(adapter->netdev, + "failed to set Rx coalescing parameters\n"); + + err = qlcnic_83xx_set_tx_intr_coal(adapter); + if (err) + netdev_err(adapter->netdev, + "failed to set Tx coalescing parameters\n"); + + return err; +} + +int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter, + struct ethtool_coalesce *ethcoal) +{ + struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal; + u32 rx_coalesce_usecs, rx_max_frames; + u32 tx_coalesce_usecs, tx_max_frames; + int err; + + if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) + return -EIO; + + tx_coalesce_usecs = ethcoal->tx_coalesce_usecs; + tx_max_frames = ethcoal->tx_max_coalesced_frames; + rx_coalesce_usecs = ethcoal->rx_coalesce_usecs; + rx_max_frames = ethcoal->rx_max_coalesced_frames; + coal->flag = QLCNIC_INTR_DEFAULT; + + if ((coal->rx_time_us == rx_coalesce_usecs) && + (coal->rx_packets == rx_max_frames)) { + coal->type = QLCNIC_INTR_COAL_TYPE_TX; + coal->tx_time_us = tx_coalesce_usecs; + coal->tx_packets = tx_max_frames; + } else if ((coal->tx_time_us == tx_coalesce_usecs) && + (coal->tx_packets == tx_max_frames)) { + coal->type = QLCNIC_INTR_COAL_TYPE_RX; + coal->rx_time_us = rx_coalesce_usecs; + coal->rx_packets = rx_max_frames; + } else { + coal->type = QLCNIC_INTR_COAL_TYPE_RX_TX; + coal->rx_time_us = rx_coalesce_usecs; + coal->rx_packets = rx_max_frames; + coal->tx_time_us = tx_coalesce_usecs; + coal->tx_packets = tx_max_frames; + } + + switch (coal->type) { + case QLCNIC_INTR_COAL_TYPE_RX: + err = qlcnic_83xx_set_rx_intr_coal(adapter); + break; + case QLCNIC_INTR_COAL_TYPE_TX: + err = qlcnic_83xx_set_tx_intr_coal(adapter); + break; + case QLCNIC_INTR_COAL_TYPE_RX_TX: + err = qlcnic_83xx_set_rx_tx_intr_coal(adapter); + break; + default: + err = -EINVAL; + netdev_err(adapter->netdev, + "Invalid Interrupt coalescing type\n"); + break; + } + + return err; +} + +static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, + u32 data[]) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u8 link_status, duplex; + /* link speed */ + link_status = LSB(data[3]) & 1; + if (link_status) { + ahw->link_speed = MSW(data[2]); + duplex = LSB(MSW(data[3])); + if (duplex) + ahw->link_duplex = DUPLEX_FULL; + else + ahw->link_duplex = DUPLEX_HALF; + } else { + ahw->link_speed = SPEED_UNKNOWN; + ahw->link_duplex = DUPLEX_UNKNOWN; + } + + ahw->link_autoneg = MSB(MSW(data[3])); + ahw->module_type = MSB(LSW(data[3])); + ahw->has_link_events = 1; + ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK; + qlcnic_advert_link_change(adapter, link_status); +} + +static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data) +{ + struct qlcnic_adapter *adapter = data; + struct qlcnic_mailbox *mbx; + u32 mask, resp, event; + unsigned long flags; + + mbx = adapter->ahw->mailbox; + spin_lock_irqsave(&mbx->aen_lock, flags); + resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL); + if (!(resp & QLCNIC_SET_OWNER)) + goto out; + + event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); + if (event & QLCNIC_MBX_ASYNC_EVENT) + __qlcnic_83xx_process_aen(adapter); + else + qlcnic_83xx_notify_mbx_response(mbx); + +out: + mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK); + writel(0, adapter->ahw->pci_base0 + mask); + spin_unlock_irqrestore(&mbx->aen_lock, flags); + return IRQ_HANDLED; +} + +int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *nic) +{ + int i, err = -EIO; + struct qlcnic_cmd_args cmd; + + if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) { + dev_err(&adapter->pdev->dev, + "%s: Error, invoked by non management func\n", + __func__); + return err; + } + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); + if (err) + return err; + + cmd.req.arg[1] = (nic->pci_func << 16); + cmd.req.arg[2] = 0x1 << 16; + cmd.req.arg[3] = nic->phys_port | (nic->switch_mode << 16); + cmd.req.arg[4] = nic->capabilities; + cmd.req.arg[5] = (nic->max_mac_filters & 0xFF) | ((nic->max_mtu) << 16); + cmd.req.arg[6] = (nic->max_tx_ques) | ((nic->max_rx_ques) << 16); + cmd.req.arg[7] = (nic->min_tx_bw) | ((nic->max_tx_bw) << 16); + for (i = 8; i < 32; i++) + cmd.req.arg[i] = 0; + + err = qlcnic_issue_cmd(adapter, &cmd); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, "Failed to set nic info%d\n", + err); + err = -EIO; + } + + qlcnic_free_mbx_args(&cmd); + + return err; +} + +int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *npar_info, u8 func_id) +{ + int err; + u32 temp; + u8 op = 0; + struct qlcnic_cmd_args cmd; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); + if (err) + return err; + + if (func_id != ahw->pci_func) { + temp = func_id << 16; + cmd.req.arg[1] = op | BIT_31 | temp; + } else { + cmd.req.arg[1] = ahw->pci_func << 16; + } + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_info(&adapter->pdev->dev, + "Failed to get nic info %d\n", err); + goto out; + } + + npar_info->op_type = cmd.rsp.arg[1]; + npar_info->pci_func = cmd.rsp.arg[2] & 0xFFFF; + npar_info->op_mode = (cmd.rsp.arg[2] & 0xFFFF0000) >> 16; + npar_info->phys_port = cmd.rsp.arg[3] & 0xFFFF; + npar_info->switch_mode = (cmd.rsp.arg[3] & 0xFFFF0000) >> 16; + npar_info->capabilities = cmd.rsp.arg[4]; + npar_info->max_mac_filters = cmd.rsp.arg[5] & 0xFF; + npar_info->max_mtu = (cmd.rsp.arg[5] & 0xFFFF0000) >> 16; + npar_info->max_tx_ques = cmd.rsp.arg[6] & 0xFFFF; + npar_info->max_rx_ques = (cmd.rsp.arg[6] & 0xFFFF0000) >> 16; + npar_info->min_tx_bw = cmd.rsp.arg[7] & 0xFFFF; + npar_info->max_tx_bw = (cmd.rsp.arg[7] & 0xFFFF0000) >> 16; + if (cmd.rsp.arg[8] & 0x1) + npar_info->max_bw_reg_offset = (cmd.rsp.arg[8] & 0x7FFE) >> 1; + if (cmd.rsp.arg[8] & 0x10000) { + temp = (cmd.rsp.arg[8] & 0x7FFE0000) >> 17; + npar_info->max_linkspeed_reg_offset = temp; + } + + memcpy(ahw->extra_capability, &cmd.rsp.arg[16], + sizeof(ahw->extra_capability)); + +out: + qlcnic_free_mbx_args(&cmd); + return err; +} + +int qlcnic_get_pci_func_type(struct qlcnic_adapter *adapter, u16 type, + u16 *nic, u16 *fcoe, u16 *iscsi) +{ + struct device *dev = &adapter->pdev->dev; + int err = 0; + + switch (type) { + case QLCNIC_TYPE_NIC: + (*nic)++; + break; + case QLCNIC_TYPE_FCOE: + (*fcoe)++; + break; + case QLCNIC_TYPE_ISCSI: + (*iscsi)++; + break; + default: + dev_err(dev, "%s: Unknown PCI type[%x]\n", + __func__, type); + err = -EIO; + } + + return err; +} + +int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter, + struct qlcnic_pci_info *pci_info) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct device *dev = &adapter->pdev->dev; + u16 nic = 0, fcoe = 0, iscsi = 0; + struct qlcnic_cmd_args cmd; + int i, err = 0, j = 0; + u32 temp; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); + if (err) + return err; + + err = qlcnic_issue_cmd(adapter, &cmd); + + ahw->total_nic_func = 0; + if (err == QLCNIC_RCODE_SUCCESS) { + ahw->max_pci_func = cmd.rsp.arg[1] & 0xFF; + for (i = 2, j = 0; j < ahw->max_vnic_func; j++, pci_info++) { + pci_info->id = cmd.rsp.arg[i] & 0xFFFF; + pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16; + i++; + if (!pci_info->active) { + i += QLC_SKIP_INACTIVE_PCI_REGS; + continue; + } + pci_info->type = cmd.rsp.arg[i] & 0xFFFF; + err = qlcnic_get_pci_func_type(adapter, pci_info->type, + &nic, &fcoe, &iscsi); + temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16; + pci_info->default_port = temp; + i++; + pci_info->tx_min_bw = cmd.rsp.arg[i] & 0xFFFF; + temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16; + pci_info->tx_max_bw = temp; + i = i + 2; + memcpy(pci_info->mac, &cmd.rsp.arg[i], ETH_ALEN - 2); + i++; + memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2); + i = i + 3; + } + } else { + dev_err(dev, "Failed to get PCI Info, error = %d\n", err); + err = -EIO; + } + + ahw->total_nic_func = nic; + ahw->total_pci_func = nic + fcoe + iscsi; + if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) { + dev_err(dev, "%s: Invalid function count: total nic func[%x], total pci func[%x]\n", + __func__, ahw->total_nic_func, ahw->total_pci_func); + err = -EIO; + } + qlcnic_free_mbx_args(&cmd); + + return err; +} + +int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type) +{ + int i, index, err; + u8 max_ints; + u32 val, temp, type; + struct qlcnic_cmd_args cmd; + + max_ints = adapter->ahw->num_msix - 1; + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT); + if (err) + return err; + + cmd.req.arg[1] = max_ints; + + if (qlcnic_sriov_vf_check(adapter)) + cmd.req.arg[1] |= (adapter->ahw->pci_func << 8) | BIT_16; + + for (i = 0, index = 2; i < max_ints; i++) { + type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL; + val = type | (adapter->ahw->intr_tbl[i].type << 4); + if (adapter->ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX) + val |= (adapter->ahw->intr_tbl[i].id << 16); + cmd.req.arg[index++] = val; + } + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to configure interrupts 0x%x\n", err); + goto out; + } + + max_ints = cmd.rsp.arg[1]; + for (i = 0, index = 2; i < max_ints; i++, index += 2) { + val = cmd.rsp.arg[index]; + if (LSB(val)) { + dev_info(&adapter->pdev->dev, + "Can't configure interrupt %d\n", + adapter->ahw->intr_tbl[i].id); + continue; + } + if (op_type) { + adapter->ahw->intr_tbl[i].id = MSW(val); + adapter->ahw->intr_tbl[i].enabled = 1; + temp = cmd.rsp.arg[index + 1]; + adapter->ahw->intr_tbl[i].src = temp; + } else { + adapter->ahw->intr_tbl[i].id = i; + adapter->ahw->intr_tbl[i].enabled = 0; + adapter->ahw->intr_tbl[i].src = 0; + } + } +out: + qlcnic_free_mbx_args(&cmd); + return err; +} + +int qlcnic_83xx_lock_flash(struct qlcnic_adapter *adapter) +{ + int id, timeout = 0; + u32 status = 0; + + while (status == 0) { + status = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK); + if (status) + break; + + if (++timeout >= QLC_83XX_FLASH_LOCK_TIMEOUT) { + id = QLC_SHARED_REG_RD32(adapter, + QLCNIC_FLASH_LOCK_OWNER); + dev_err(&adapter->pdev->dev, + "%s: failed, lock held by %d\n", __func__, id); + return -EIO; + } + usleep_range(1000, 2000); + } + + QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, adapter->portnum); + return 0; +} + +void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *adapter) +{ + QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK); + QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, 0xFF); +} + +int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter, + u32 flash_addr, u8 *p_data, + int count) +{ + u32 word, range, flash_offset, addr = flash_addr, ret; + ulong indirect_add, direct_window; + int i, err = 0; + + flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1); + if (addr & 0x3) { + dev_err(&adapter->pdev->dev, "Illegal addr = 0x%x\n", addr); + return -EIO; + } + + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW, + (addr & 0xFFFF0000)); + + range = flash_offset + (count * sizeof(u32)); + /* Check if data is spread across multiple sectors */ + if (range > (QLCNIC_FLASH_SECTOR_SIZE - 1)) { + + /* Multi sector read */ + for (i = 0; i < count; i++) { + indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr); + ret = QLCRD32(adapter, indirect_add, &err); + if (err == -EIO) + return err; + + word = ret; + *(u32 *)p_data = word; + p_data = p_data + 4; + addr = addr + 4; + flash_offset = flash_offset + 4; + + if (flash_offset > (QLCNIC_FLASH_SECTOR_SIZE - 1)) { + direct_window = QLC_83XX_FLASH_DIRECT_WINDOW; + /* This write is needed once for each sector */ + qlcnic_83xx_wrt_reg_indirect(adapter, + direct_window, + (addr)); + flash_offset = 0; + } + } + } else { + /* Single sector read */ + for (i = 0; i < count; i++) { + indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr); + ret = QLCRD32(adapter, indirect_add, &err); + if (err == -EIO) + return err; + + word = ret; + *(u32 *)p_data = word; + p_data = p_data + 4; + addr = addr + 4; + } + } + + return 0; +} + +static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter) +{ + u32 status; + int retries = QLC_83XX_FLASH_READ_RETRY_COUNT; + int err = 0; + + do { + status = QLCRD32(adapter, QLC_83XX_FLASH_STATUS, &err); + if (err == -EIO) + return err; + + if ((status & QLC_83XX_FLASH_STATUS_READY) == + QLC_83XX_FLASH_STATUS_READY) + break; + + usleep_range(1000, 1100); + } while (--retries); + + if (!retries) + return -EIO; + + return 0; +} + +int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *adapter) +{ + int ret; + u32 cmd; + cmd = adapter->ahw->fdt.write_statusreg_cmd; + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, + (QLC_83XX_FLASH_FDT_WRITE_DEF_SIG | cmd)); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, + adapter->ahw->fdt.write_enable_bits); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, + QLC_83XX_FLASH_SECOND_ERASE_MS_VAL); + ret = qlcnic_83xx_poll_flash_status_reg(adapter); + if (ret) + return -EIO; + + return 0; +} + +int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *adapter) +{ + int ret; + + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, + (QLC_83XX_FLASH_FDT_WRITE_DEF_SIG | + adapter->ahw->fdt.write_statusreg_cmd)); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, + adapter->ahw->fdt.write_disable_bits); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, + QLC_83XX_FLASH_SECOND_ERASE_MS_VAL); + ret = qlcnic_83xx_poll_flash_status_reg(adapter); + if (ret) + return -EIO; + + return 0; +} + +int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter) +{ + int ret, err = 0; + u32 mfg_id; + + if (qlcnic_83xx_lock_flash(adapter)) + return -EIO; + + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, + QLC_83XX_FLASH_FDT_READ_MFG_ID_VAL); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, + QLC_83XX_FLASH_READ_CTRL); + ret = qlcnic_83xx_poll_flash_status_reg(adapter); + if (ret) { + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + + mfg_id = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err); + if (err == -EIO) { + qlcnic_83xx_unlock_flash(adapter); + return err; + } + + adapter->flash_mfg_id = (mfg_id & 0xFF); + qlcnic_83xx_unlock_flash(adapter); + + return 0; +} + +int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *adapter) +{ + int count, fdt_size, ret = 0; + + fdt_size = sizeof(struct qlcnic_fdt); + count = fdt_size / sizeof(u32); + + if (qlcnic_83xx_lock_flash(adapter)) + return -EIO; + + memset(&adapter->ahw->fdt, 0, fdt_size); + ret = qlcnic_83xx_lockless_flash_read32(adapter, QLCNIC_FDT_LOCATION, + (u8 *)&adapter->ahw->fdt, + count); + qlcnic_swap32_buffer((u32 *)&adapter->ahw->fdt, count); + qlcnic_83xx_unlock_flash(adapter); + return ret; +} + +int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter, + u32 sector_start_addr) +{ + u32 reversed_addr, addr1, addr2, cmd; + int ret = -EIO; + + if (qlcnic_83xx_lock_flash(adapter) != 0) + return -EIO; + + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_enable_flash_write(adapter); + if (ret) { + qlcnic_83xx_unlock_flash(adapter); + dev_err(&adapter->pdev->dev, + "%s failed at %d\n", + __func__, __LINE__); + return ret; + } + } + + ret = qlcnic_83xx_poll_flash_status_reg(adapter); + if (ret) { + qlcnic_83xx_unlock_flash(adapter); + dev_err(&adapter->pdev->dev, + "%s: failed at %d\n", __func__, __LINE__); + return -EIO; + } + + addr1 = (sector_start_addr & 0xFF) << 16; + addr2 = (sector_start_addr & 0xFF0000) >> 16; + reversed_addr = addr1 | addr2 | (sector_start_addr & 0xFF00); + + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, + reversed_addr); + cmd = QLC_83XX_FLASH_FDT_ERASE_DEF_SIG | adapter->ahw->fdt.erase_cmd; + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, cmd); + else + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, + QLC_83XX_FLASH_OEM_ERASE_SIG); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, + QLC_83XX_FLASH_LAST_ERASE_MS_VAL); + + ret = qlcnic_83xx_poll_flash_status_reg(adapter); + if (ret) { + qlcnic_83xx_unlock_flash(adapter); + dev_err(&adapter->pdev->dev, + "%s: failed at %d\n", __func__, __LINE__); + return -EIO; + } + + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_disable_flash_write(adapter); + if (ret) { + qlcnic_83xx_unlock_flash(adapter); + dev_err(&adapter->pdev->dev, + "%s: failed at %d\n", __func__, __LINE__); + return ret; + } + } + + qlcnic_83xx_unlock_flash(adapter); + + return 0; +} + +int qlcnic_83xx_flash_write32(struct qlcnic_adapter *adapter, u32 addr, + u32 *p_data) +{ + int ret = -EIO; + u32 addr1 = 0x00800000 | (addr >> 2); + + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, addr1); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, + QLC_83XX_FLASH_LAST_ERASE_MS_VAL); + ret = qlcnic_83xx_poll_flash_status_reg(adapter); + if (ret) { + dev_err(&adapter->pdev->dev, + "%s: failed at %d\n", __func__, __LINE__); + return -EIO; + } + + return 0; +} + +int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr, + u32 *p_data, int count) +{ + u32 temp; + int ret = -EIO, err = 0; + + if ((count < QLC_83XX_FLASH_WRITE_MIN) || + (count > QLC_83XX_FLASH_WRITE_MAX)) { + dev_err(&adapter->pdev->dev, + "%s: Invalid word count\n", __func__); + return -EIO; + } + + temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err); + if (err == -EIO) + return err; + + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL, + (temp | QLC_83XX_FLASH_SPI_CTRL)); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, + QLC_83XX_FLASH_ADDR_TEMP_VAL); + + /* First DWORD write */ + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, + QLC_83XX_FLASH_FIRST_MS_PATTERN); + ret = qlcnic_83xx_poll_flash_status_reg(adapter); + if (ret) { + dev_err(&adapter->pdev->dev, + "%s: failed at %d\n", __func__, __LINE__); + return -EIO; + } + + count--; + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, + QLC_83XX_FLASH_ADDR_SECOND_TEMP_VAL); + /* Second to N-1 DWORD writes */ + while (count != 1) { + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, + *p_data++); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, + QLC_83XX_FLASH_SECOND_MS_PATTERN); + ret = qlcnic_83xx_poll_flash_status_reg(adapter); + if (ret) { + dev_err(&adapter->pdev->dev, + "%s: failed at %d\n", __func__, __LINE__); + return -EIO; + } + count--; + } + + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, + QLC_83XX_FLASH_ADDR_TEMP_VAL | + (addr >> 2)); + /* Last DWORD write */ + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, + QLC_83XX_FLASH_LAST_MS_PATTERN); + ret = qlcnic_83xx_poll_flash_status_reg(adapter); + if (ret) { + dev_err(&adapter->pdev->dev, + "%s: failed at %d\n", __func__, __LINE__); + return -EIO; + } + + ret = QLCRD32(adapter, QLC_83XX_FLASH_SPI_STATUS, &err); + if (err == -EIO) + return err; + + if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) { + dev_err(&adapter->pdev->dev, "%s: failed at %d\n", + __func__, __LINE__); + /* Operation failed, clear error bit */ + temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err); + if (err == -EIO) + return err; + + qlcnic_83xx_wrt_reg_indirect(adapter, + QLC_83XX_FLASH_SPI_CONTROL, + (temp | QLC_83XX_FLASH_SPI_CTRL)); + } + + return 0; +} + +static void qlcnic_83xx_recover_driver_lock(struct qlcnic_adapter *adapter) +{ + u32 val, id; + + val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK); + + /* Check if recovery need to be performed by the calling function */ + if ((val & QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK) == 0) { + val = val & ~0x3F; + val = val | ((adapter->portnum << 2) | + QLC_83XX_NEED_DRV_LOCK_RECOVERY); + QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val); + dev_info(&adapter->pdev->dev, + "%s: lock recovery initiated\n", __func__); + msleep(QLC_83XX_DRV_LOCK_RECOVERY_DELAY); + val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK); + id = ((val >> 2) & 0xF); + if (id == adapter->portnum) { + val = val & ~QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK; + val = val | QLC_83XX_DRV_LOCK_RECOVERY_IN_PROGRESS; + QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val); + /* Force release the lock */ + QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK); + /* Clear recovery bits */ + val = val & ~0x3F; + QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val); + dev_info(&adapter->pdev->dev, + "%s: lock recovery completed\n", __func__); + } else { + dev_info(&adapter->pdev->dev, + "%s: func %d to resume lock recovery process\n", + __func__, id); + } + } else { + dev_info(&adapter->pdev->dev, + "%s: lock recovery initiated by other functions\n", + __func__); + } +} + +int qlcnic_83xx_lock_driver(struct qlcnic_adapter *adapter) +{ + u32 lock_alive_counter, val, id, i = 0, status = 0, temp = 0; + int max_attempt = 0; + + while (status == 0) { + status = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK); + if (status) + break; + + msleep(QLC_83XX_DRV_LOCK_WAIT_DELAY); + i++; + + if (i == 1) + temp = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID); + + if (i == QLC_83XX_DRV_LOCK_WAIT_COUNTER) { + val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID); + if (val == temp) { + id = val & 0xFF; + dev_info(&adapter->pdev->dev, + "%s: lock to be recovered from %d\n", + __func__, id); + qlcnic_83xx_recover_driver_lock(adapter); + i = 0; + max_attempt++; + } else { + dev_err(&adapter->pdev->dev, + "%s: failed to get lock\n", __func__); + return -EIO; + } + } + + /* Force exit from while loop after few attempts */ + if (max_attempt == QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT) { + dev_err(&adapter->pdev->dev, + "%s: failed to get lock\n", __func__); + return -EIO; + } + } + + val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID); + lock_alive_counter = val >> 8; + lock_alive_counter++; + val = lock_alive_counter << 8 | adapter->portnum; + QLCWRX(adapter->ahw, QLC_83XX_DRV_LOCK_ID, val); + + return 0; +} + +void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *adapter) +{ + u32 val, lock_alive_counter, id; + + val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID); + id = val & 0xFF; + lock_alive_counter = val >> 8; + + if (id != adapter->portnum) + dev_err(&adapter->pdev->dev, + "%s:Warning func %d is unlocking lock owned by %d\n", + __func__, adapter->portnum, id); + + val = (lock_alive_counter << 8) | 0xFF; + QLCWRX(adapter->ahw, QLC_83XX_DRV_LOCK_ID, val); + QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK); +} + +int qlcnic_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr, + u32 *data, u32 count) +{ + int i, j, ret = 0; + u32 temp; + + /* Check alignment */ + if (addr & 0xF) + return -EIO; + + mutex_lock(&adapter->ahw->mem_lock); + qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0); + + for (i = 0; i < count; i++, addr += 16) { + if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET, + QLCNIC_ADDR_QDR_NET_MAX)) || + (ADDR_IN_RANGE(addr, QLCNIC_ADDR_DDR_NET, + QLCNIC_ADDR_DDR_NET_MAX)))) { + mutex_unlock(&adapter->ahw->mem_lock); + return -EIO; + } + + qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr); + qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_LO, *data++); + qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_HI, *data++); + qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_ULO, *data++); + qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_UHI, *data++); + qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_ENABLE); + qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_START); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL); + + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + /* Status check failure */ + if (j >= MAX_CTL_CHECK) { + printk_ratelimited(KERN_WARNING + "MS memory write failed\n"); + mutex_unlock(&adapter->ahw->mem_lock); + return -EIO; + } + } + + mutex_unlock(&adapter->ahw->mem_lock); + + return ret; +} + +int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr, + u8 *p_data, int count) +{ + u32 word, addr = flash_addr, ret; + ulong indirect_addr; + int i, err = 0; + + if (qlcnic_83xx_lock_flash(adapter) != 0) + return -EIO; + + if (addr & 0x3) { + dev_err(&adapter->pdev->dev, "Illegal addr = 0x%x\n", addr); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + + for (i = 0; i < count; i++) { + if (qlcnic_83xx_wrt_reg_indirect(adapter, + QLC_83XX_FLASH_DIRECT_WINDOW, + (addr))) { + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + + indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr); + ret = QLCRD32(adapter, indirect_addr, &err); + if (err == -EIO) + return err; + + word = ret; + *(u32 *)p_data = word; + p_data = p_data + 4; + addr = addr + 4; + } + + qlcnic_83xx_unlock_flash(adapter); + + return 0; +} + +int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) +{ + u8 pci_func; + int err; + u32 config = 0, state; + struct qlcnic_cmd_args cmd; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (qlcnic_sriov_vf_check(adapter)) + pci_func = adapter->portnum; + else + pci_func = ahw->pci_func; + + state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(pci_func)); + if (!QLC_83xx_FUNC_VAL(state, pci_func)) { + dev_info(&adapter->pdev->dev, "link state down\n"); + return config; + } + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS); + if (err) + return err; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_info(&adapter->pdev->dev, + "Get Link Status Command failed: 0x%x\n", err); + goto out; + } else { + config = cmd.rsp.arg[1]; + switch (QLC_83XX_CURRENT_LINK_SPEED(config)) { + case QLC_83XX_10M_LINK: + ahw->link_speed = SPEED_10; + break; + case QLC_83XX_100M_LINK: + ahw->link_speed = SPEED_100; + break; + case QLC_83XX_1G_LINK: + ahw->link_speed = SPEED_1000; + break; + case QLC_83XX_10G_LINK: + ahw->link_speed = SPEED_10000; + break; + default: + ahw->link_speed = 0; + break; + } + config = cmd.rsp.arg[3]; + switch (QLC_83XX_SFP_MODULE_TYPE(config)) { + case QLC_83XX_MODULE_FIBRE_10GBASE_LRM: + case QLC_83XX_MODULE_FIBRE_10GBASE_LR: + case QLC_83XX_MODULE_FIBRE_10GBASE_SR: + ahw->supported_type = PORT_FIBRE; + ahw->port_type = QLCNIC_XGBE; + break; + case QLC_83XX_MODULE_FIBRE_1000BASE_SX: + case QLC_83XX_MODULE_FIBRE_1000BASE_LX: + case QLC_83XX_MODULE_FIBRE_1000BASE_CX: + ahw->supported_type = PORT_FIBRE; + ahw->port_type = QLCNIC_GBE; + break; + case QLC_83XX_MODULE_TP_1000BASE_T: + ahw->supported_type = PORT_TP; + ahw->port_type = QLCNIC_GBE; + break; + case QLC_83XX_MODULE_DA_10GE_PASSIVE_CP: + case QLC_83XX_MODULE_DA_10GE_ACTIVE_CP: + case QLC_83XX_MODULE_DA_10GE_LEGACY_CP: + case QLC_83XX_MODULE_DA_1GE_PASSIVE_CP: + ahw->supported_type = PORT_DA; + ahw->port_type = QLCNIC_XGBE; + break; + default: + ahw->supported_type = PORT_OTHER; + ahw->port_type = QLCNIC_XGBE; + } + if (config & 1) + err = 1; + } +out: + qlcnic_free_mbx_args(&cmd); + return config; +} + +int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, + struct ethtool_cmd *ecmd) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 config = 0; + int status = 0; + + if (!test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) { + /* Get port configuration info */ + status = qlcnic_83xx_get_port_info(adapter); + /* Get Link Status related info */ + config = qlcnic_83xx_test_link(adapter); + ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config); + } + + /* hard code until there is a way to get it from flash */ + ahw->board_type = QLCNIC_BRDTYPE_83XX_10G; + + if (netif_running(adapter->netdev) && ahw->has_link_events) { + ethtool_cmd_speed_set(ecmd, ahw->link_speed); + ecmd->duplex = ahw->link_duplex; + ecmd->autoneg = ahw->link_autoneg; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + ecmd->autoneg = AUTONEG_DISABLE; + } + + ecmd->supported = (SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_Autoneg); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + if (ahw->port_config & QLC_83XX_10_CAPABLE) + ecmd->advertising |= SUPPORTED_10baseT_Full; + if (ahw->port_config & QLC_83XX_100_CAPABLE) + ecmd->advertising |= SUPPORTED_100baseT_Full; + if (ahw->port_config & QLC_83XX_1G_CAPABLE) + ecmd->advertising |= SUPPORTED_1000baseT_Full; + if (ahw->port_config & QLC_83XX_10G_CAPABLE) + ecmd->advertising |= SUPPORTED_10000baseT_Full; + if (ahw->port_config & QLC_83XX_AUTONEG_ENABLE) + ecmd->advertising |= ADVERTISED_Autoneg; + } else { + switch (ahw->link_speed) { + case SPEED_10: + ecmd->advertising = SUPPORTED_10baseT_Full; + break; + case SPEED_100: + ecmd->advertising = SUPPORTED_100baseT_Full; + break; + case SPEED_1000: + ecmd->advertising = SUPPORTED_1000baseT_Full; + break; + case SPEED_10000: + ecmd->advertising = SUPPORTED_10000baseT_Full; + break; + default: + break; + } + + } + + switch (ahw->supported_type) { + case PORT_FIBRE: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + break; + case PORT_TP: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + ecmd->transceiver = XCVR_INTERNAL; + break; + case PORT_DA: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_DA; + ecmd->transceiver = XCVR_EXTERNAL; + break; + default: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_OTHER; + ecmd->transceiver = XCVR_EXTERNAL; + break; + } + ecmd->phy_address = ahw->physical_port; + return status; +} + +int qlcnic_83xx_set_settings(struct qlcnic_adapter *adapter, + struct ethtool_cmd *ecmd) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 config = adapter->ahw->port_config; + int status = 0; + + /* 83xx devices do not support Half duplex */ + if (ecmd->duplex == DUPLEX_HALF) { + netdev_info(adapter->netdev, + "Half duplex mode not supported\n"); + return -EINVAL; + } + + if (ecmd->autoneg) { + ahw->port_config |= QLC_83XX_AUTONEG_ENABLE; + ahw->port_config |= (QLC_83XX_100_CAPABLE | + QLC_83XX_1G_CAPABLE | + QLC_83XX_10G_CAPABLE); + } else { /* force speed */ + ahw->port_config &= ~QLC_83XX_AUTONEG_ENABLE; + switch (ethtool_cmd_speed(ecmd)) { + case SPEED_10: + ahw->port_config &= ~(QLC_83XX_100_CAPABLE | + QLC_83XX_1G_CAPABLE | + QLC_83XX_10G_CAPABLE); + ahw->port_config |= QLC_83XX_10_CAPABLE; + break; + case SPEED_100: + ahw->port_config &= ~(QLC_83XX_10_CAPABLE | + QLC_83XX_1G_CAPABLE | + QLC_83XX_10G_CAPABLE); + ahw->port_config |= QLC_83XX_100_CAPABLE; + break; + case SPEED_1000: + ahw->port_config &= ~(QLC_83XX_10_CAPABLE | + QLC_83XX_100_CAPABLE | + QLC_83XX_10G_CAPABLE); + ahw->port_config |= QLC_83XX_1G_CAPABLE; + break; + case SPEED_10000: + ahw->port_config &= ~(QLC_83XX_10_CAPABLE | + QLC_83XX_100_CAPABLE | + QLC_83XX_1G_CAPABLE); + ahw->port_config |= QLC_83XX_10G_CAPABLE; + break; + default: + return -EINVAL; + } + } + status = qlcnic_83xx_set_port_config(adapter); + if (status) { + netdev_info(adapter->netdev, + "Failed to Set Link Speed and autoneg.\n"); + ahw->port_config = config; + } + + return status; +} + +static inline u64 *qlcnic_83xx_copy_stats(struct qlcnic_cmd_args *cmd, + u64 *data, int index) +{ + u32 low, hi; + u64 val; + + low = cmd->rsp.arg[index]; + hi = cmd->rsp.arg[index + 1]; + val = (((u64) low) | (((u64) hi) << 32)); + *data++ = val; + return data; +} + +static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd, u64 *data, + int type, int *ret) +{ + int err, k, total_regs; + + *ret = 0; + err = qlcnic_issue_cmd(adapter, cmd); + if (err != QLCNIC_RCODE_SUCCESS) { + dev_info(&adapter->pdev->dev, + "Error in get statistics mailbox command\n"); + *ret = -EIO; + return data; + } + total_regs = cmd->rsp.num; + switch (type) { + case QLC_83XX_STAT_MAC: + /* fill in MAC tx counters */ + for (k = 2; k < 28; k += 2) + data = qlcnic_83xx_copy_stats(cmd, data, k); + /* skip 24 bytes of reserved area */ + /* fill in MAC rx counters */ + for (k += 6; k < 60; k += 2) + data = qlcnic_83xx_copy_stats(cmd, data, k); + /* skip 24 bytes of reserved area */ + /* fill in MAC rx frame stats */ + for (k += 6; k < 80; k += 2) + data = qlcnic_83xx_copy_stats(cmd, data, k); + /* fill in eSwitch stats */ + for (; k < total_regs; k += 2) + data = qlcnic_83xx_copy_stats(cmd, data, k); + break; + case QLC_83XX_STAT_RX: + for (k = 2; k < 8; k += 2) + data = qlcnic_83xx_copy_stats(cmd, data, k); + /* skip 8 bytes of reserved data */ + for (k += 2; k < 24; k += 2) + data = qlcnic_83xx_copy_stats(cmd, data, k); + /* skip 8 bytes containing RE1FBQ error data */ + for (k += 2; k < total_regs; k += 2) + data = qlcnic_83xx_copy_stats(cmd, data, k); + break; + case QLC_83XX_STAT_TX: + for (k = 2; k < 10; k += 2) + data = qlcnic_83xx_copy_stats(cmd, data, k); + /* skip 8 bytes of reserved data */ + for (k += 2; k < total_regs; k += 2) + data = qlcnic_83xx_copy_stats(cmd, data, k); + break; + default: + dev_warn(&adapter->pdev->dev, "Unknown get statistics mode\n"); + *ret = -EIO; + } + return data; +} + +void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) +{ + struct qlcnic_cmd_args cmd; + struct net_device *netdev = adapter->netdev; + int ret = 0; + + ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS); + if (ret) + return; + /* Get Tx stats */ + cmd.req.arg[1] = BIT_1 | (adapter->tx_ring->ctx_id << 16); + cmd.rsp.num = QLC_83XX_TX_STAT_REGS; + data = qlcnic_83xx_fill_stats(adapter, &cmd, data, + QLC_83XX_STAT_TX, &ret); + if (ret) { + netdev_err(netdev, "Error getting Tx stats\n"); + goto out; + } + /* Get MAC stats */ + cmd.req.arg[1] = BIT_2 | (adapter->portnum << 16); + cmd.rsp.num = QLC_83XX_MAC_STAT_REGS; + memset(cmd.rsp.arg, 0, sizeof(u32) * cmd.rsp.num); + data = qlcnic_83xx_fill_stats(adapter, &cmd, data, + QLC_83XX_STAT_MAC, &ret); + if (ret) { + netdev_err(netdev, "Error getting MAC stats\n"); + goto out; + } + /* Get Rx stats */ + cmd.req.arg[1] = adapter->recv_ctx->context_id << 16; + cmd.rsp.num = QLC_83XX_RX_STAT_REGS; + memset(cmd.rsp.arg, 0, sizeof(u32) * cmd.rsp.num); + data = qlcnic_83xx_fill_stats(adapter, &cmd, data, + QLC_83XX_STAT_RX, &ret); + if (ret) + netdev_err(netdev, "Error getting Rx stats\n"); +out: + qlcnic_free_mbx_args(&cmd); +} + +int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter) +{ + u32 major, minor, sub; + + major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR); + minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR); + sub = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB); + + if (adapter->fw_version != QLCNIC_VERSION_CODE(major, minor, sub)) { + dev_info(&adapter->pdev->dev, "%s: Reg test failed\n", + __func__); + return 1; + } + return 0; +} + +inline int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter) +{ + return (ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl) * + sizeof(*adapter->ahw->ext_reg_tbl)) + + (ARRAY_SIZE(qlcnic_83xx_reg_tbl) * + sizeof(*adapter->ahw->reg_tbl)); +} + +int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff) +{ + int i, j = 0; + + for (i = QLCNIC_DEV_INFO_SIZE + 1; + j < ARRAY_SIZE(qlcnic_83xx_reg_tbl); i++, j++) + regs_buff[i] = QLC_SHARED_REG_RD32(adapter, j); + + for (j = 0; j < ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl); j++) + regs_buff[i++] = QLCRDX(adapter->ahw, j); + return i; +} + +int qlcnic_83xx_interrupt_test(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_cmd_args cmd; + u8 val, drv_sds_rings = adapter->drv_sds_rings; + u8 drv_tx_rings = adapter->drv_tx_rings; + u32 data; + u16 intrpt_id, id; + int ret; + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { + netdev_info(netdev, "Device is resetting\n"); + return -EBUSY; + } + + if (qlcnic_get_diag_lock(adapter)) { + netdev_info(netdev, "Device in diagnostics mode\n"); + return -EBUSY; + } + + ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST, + drv_sds_rings); + if (ret) + goto fail_diag_irq; + + ahw->diag_cnt = 0; + ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); + if (ret) + goto fail_diag_irq; + + if (adapter->flags & QLCNIC_MSIX_ENABLED) + intrpt_id = ahw->intr_tbl[0].id; + else + intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID); + + cmd.req.arg[1] = 1; + cmd.req.arg[2] = intrpt_id; + cmd.req.arg[3] = BIT_0; + + ret = qlcnic_issue_cmd(adapter, &cmd); + data = cmd.rsp.arg[2]; + id = LSW(data); + val = LSB(MSW(data)); + if (id != intrpt_id) + dev_info(&adapter->pdev->dev, + "Interrupt generated: 0x%x, requested:0x%x\n", + id, intrpt_id); + if (val) + dev_err(&adapter->pdev->dev, + "Interrupt test error: 0x%x\n", val); + if (ret) + goto done; + + msleep(20); + ret = !ahw->diag_cnt; + +done: + qlcnic_free_mbx_args(&cmd); + qlcnic_83xx_diag_free_res(netdev, drv_sds_rings); + +fail_diag_irq: + adapter->drv_sds_rings = drv_sds_rings; + adapter->drv_tx_rings = drv_tx_rings; + qlcnic_release_diag_lock(adapter); + return ret; +} + +void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *adapter, + struct ethtool_pauseparam *pause) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int status = 0; + u32 config; + + status = qlcnic_83xx_get_port_config(adapter); + if (status) { + dev_err(&adapter->pdev->dev, + "%s: Get Pause Config failed\n", __func__); + return; + } + config = ahw->port_config; + if (config & QLC_83XX_CFG_STD_PAUSE) { + switch (MSW(config)) { + case QLC_83XX_TX_PAUSE: + pause->tx_pause = 1; + break; + case QLC_83XX_RX_PAUSE: + pause->rx_pause = 1; + break; + case QLC_83XX_TX_RX_PAUSE: + default: + /* Backward compatibility for existing + * flash definitions + */ + pause->tx_pause = 1; + pause->rx_pause = 1; + } + } + + if (QLC_83XX_AUTONEG(config)) + pause->autoneg = 1; +} + +int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter, + struct ethtool_pauseparam *pause) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int status = 0; + u32 config; + + status = qlcnic_83xx_get_port_config(adapter); + if (status) { + dev_err(&adapter->pdev->dev, + "%s: Get Pause Config failed.\n", __func__); + return status; + } + config = ahw->port_config; + + if (ahw->port_type == QLCNIC_GBE) { + if (pause->autoneg) + ahw->port_config |= QLC_83XX_ENABLE_AUTONEG; + if (!pause->autoneg) + ahw->port_config &= ~QLC_83XX_ENABLE_AUTONEG; + } else if ((ahw->port_type == QLCNIC_XGBE) && (pause->autoneg)) { + return -EOPNOTSUPP; + } + + if (!(config & QLC_83XX_CFG_STD_PAUSE)) + ahw->port_config |= QLC_83XX_CFG_STD_PAUSE; + + if (pause->rx_pause && pause->tx_pause) { + ahw->port_config |= QLC_83XX_CFG_STD_TX_RX_PAUSE; + } else if (pause->rx_pause && !pause->tx_pause) { + ahw->port_config &= ~QLC_83XX_CFG_STD_TX_PAUSE; + ahw->port_config |= QLC_83XX_CFG_STD_RX_PAUSE; + } else if (pause->tx_pause && !pause->rx_pause) { + ahw->port_config &= ~QLC_83XX_CFG_STD_RX_PAUSE; + ahw->port_config |= QLC_83XX_CFG_STD_TX_PAUSE; + } else if (!pause->rx_pause && !pause->tx_pause) { + ahw->port_config &= ~(QLC_83XX_CFG_STD_TX_RX_PAUSE | + QLC_83XX_CFG_STD_PAUSE); + } + status = qlcnic_83xx_set_port_config(adapter); + if (status) { + dev_err(&adapter->pdev->dev, + "%s: Set Pause Config failed.\n", __func__); + ahw->port_config = config; + } + return status; +} + +static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter) +{ + int ret, err = 0; + u32 temp; + + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, + QLC_83XX_FLASH_OEM_READ_SIG); + qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL, + QLC_83XX_FLASH_READ_CTRL); + ret = qlcnic_83xx_poll_flash_status_reg(adapter); + if (ret) + return -EIO; + + temp = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err); + if (err == -EIO) + return err; + + return temp & 0xFF; +} + +int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter) +{ + int status; + + status = qlcnic_83xx_read_flash_status_reg(adapter); + if (status == -EIO) { + dev_info(&adapter->pdev->dev, "%s: EEPROM test failed.\n", + __func__); + return 1; + } + return 0; +} + +static int qlcnic_83xx_shutdown(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + int retval; + + netif_device_detach(netdev); + qlcnic_cancel_idc_work(adapter); + + if (netif_running(netdev)) + qlcnic_down(adapter, netdev); + + qlcnic_83xx_disable_mbx_intr(adapter); + cancel_delayed_work_sync(&adapter->idc_aen_work); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + return 0; +} + +static int qlcnic_83xx_resume(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlc_83xx_idc *idc = &ahw->idc; + int err = 0; + + err = qlcnic_83xx_idc_init(adapter); + if (err) + return err; + + if (ahw->nic_mode == QLCNIC_VNIC_MODE) { + if (ahw->op_mode == QLCNIC_MGMT_FUNC) { + qlcnic_83xx_set_vnic_opmode(adapter); + } else { + err = qlcnic_83xx_check_vnic_state(adapter); + if (err) + return err; + } + } + + err = qlcnic_83xx_idc_reattach_driver(adapter); + if (err) + return err; + + qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, + idc->delay); + return err; +} + +void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx) +{ + reinit_completion(&mbx->completion); + set_bit(QLC_83XX_MBX_READY, &mbx->status); +} + +void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx) +{ + if (!mbx) + return; + + destroy_workqueue(mbx->work_q); + kfree(mbx); +} + +static inline void +qlcnic_83xx_notify_cmd_completion(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED); + + if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) { + qlcnic_free_mbx_args(cmd); + kfree(cmd); + return; + } + complete(&cmd->completion); +} + +static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter) +{ + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; + struct list_head *head = &mbx->cmd_q; + struct qlcnic_cmd_args *cmd = NULL; + + spin_lock(&mbx->queue_lock); + + while (!list_empty(head)) { + cmd = list_entry(head->next, struct qlcnic_cmd_args, list); + dev_info(&adapter->pdev->dev, "%s: Mailbox command 0x%x\n", + __func__, cmd->cmd_op); + list_del(&cmd->list); + mbx->num_cmds--; + qlcnic_83xx_notify_cmd_completion(adapter, cmd); + } + + spin_unlock(&mbx->queue_lock); +} + +static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_mailbox *mbx = ahw->mailbox; + u32 host_mbx_ctrl; + + if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) + return -EBUSY; + + host_mbx_ctrl = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); + if (host_mbx_ctrl) { + clear_bit(QLC_83XX_MBX_READY, &mbx->status); + ahw->idc.collect_dump = 1; + return -EIO; + } + + return 0; +} + +static inline void qlcnic_83xx_signal_mbx_cmd(struct qlcnic_adapter *adapter, + u8 issue_cmd) +{ + if (issue_cmd) + QLCWRX(adapter->ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); + else + QLCWRX(adapter->ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER); +} + +static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; + + spin_lock(&mbx->queue_lock); + + list_del(&cmd->list); + mbx->num_cmds--; + + spin_unlock(&mbx->queue_lock); + + qlcnic_83xx_notify_cmd_completion(adapter, cmd); +} + +static void qlcnic_83xx_encode_mbx_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + u32 mbx_cmd, fw_hal_version, hdr_size, total_size, tmp; + struct qlcnic_hardware_context *ahw = adapter->ahw; + int i, j; + + if (cmd->op_type != QLC_83XX_MBX_POST_BC_OP) { + mbx_cmd = cmd->req.arg[0]; + writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0)); + for (i = 1; i < cmd->req.num; i++) + writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i)); + } else { + fw_hal_version = ahw->fw_hal_version; + hdr_size = sizeof(struct qlcnic_bc_hdr) / sizeof(u32); + total_size = cmd->pay_size + hdr_size; + tmp = QLCNIC_CMD_BC_EVENT_SETUP | total_size << 16; + mbx_cmd = tmp | fw_hal_version << 29; + writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0)); + + /* Back channel specific operations bits */ + mbx_cmd = 0x1 | 1 << 4; + + if (qlcnic_sriov_pf_check(adapter)) + mbx_cmd |= cmd->func_num << 5; + + writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1)); + + for (i = 2, j = 0; j < hdr_size; i++, j++) + writel(*(cmd->hdr++), QLCNIC_MBX_HOST(ahw, i)); + for (j = 0; j < cmd->pay_size; j++, i++) + writel(*(cmd->pay++), QLCNIC_MBX_HOST(ahw, i)); + } +} + +void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter) +{ + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; + + if (!mbx) + return; + + clear_bit(QLC_83XX_MBX_READY, &mbx->status); + complete(&mbx->completion); + cancel_work_sync(&mbx->work); + flush_workqueue(mbx->work_q); + qlcnic_83xx_flush_mbx_queue(adapter); +} + +static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd, + unsigned long *timeout) +{ + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; + + if (test_bit(QLC_83XX_MBX_READY, &mbx->status)) { + atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_WAIT); + init_completion(&cmd->completion); + cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN; + + spin_lock(&mbx->queue_lock); + + list_add_tail(&cmd->list, &mbx->cmd_q); + mbx->num_cmds++; + cmd->total_cmds = mbx->num_cmds; + *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT; + queue_work(mbx->work_q, &mbx->work); + + spin_unlock(&mbx->queue_lock); + + return 0; + } + + return -EBUSY; +} + +static int qlcnic_83xx_check_mac_rcode(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + u8 mac_cmd_rcode; + u32 fw_data; + + if (cmd->cmd_op == QLCNIC_CMD_CONFIG_MAC_VLAN) { + fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2)); + mac_cmd_rcode = (u8)fw_data; + if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE || + mac_cmd_rcode == QLC_83XX_MAC_PRESENT || + mac_cmd_rcode == QLC_83XX_MAC_ABSENT) { + cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS; + return QLCNIC_RCODE_SUCCESS; + } + } + + return -EINVAL; +} + +static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct device *dev = &adapter->pdev->dev; + u8 mbx_err_code; + u32 fw_data; + + fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); + mbx_err_code = QLCNIC_MBX_STATUS(fw_data); + qlcnic_83xx_get_mbx_data(adapter, cmd); + + switch (mbx_err_code) { + case QLCNIC_MBX_RSP_OK: + case QLCNIC_MBX_PORT_RSP_OK: + cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS; + break; + default: + if (!qlcnic_83xx_check_mac_rcode(adapter, cmd)) + break; + + dev_err(dev, "%s: Mailbox command failed, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x, error=0x%x\n", + __func__, cmd->cmd_op, cmd->type, ahw->pci_func, + ahw->op_mode, mbx_err_code); + cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_FAILED; + qlcnic_dump_mbx(adapter, cmd); + } + + return; +} + +static inline void qlcnic_dump_mailbox_registers(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 offset; + + offset = QLCRDX(ahw, QLCNIC_DEF_INT_MASK); + dev_info(&adapter->pdev->dev, "Mbx interrupt mask=0x%x, Mbx interrupt enable=0x%x, Host mbx control=0x%x, Fw mbx control=0x%x", + readl(ahw->pci_base0 + offset), + QLCRDX(ahw, QLCNIC_MBX_INTR_ENBL), + QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL), + QLCRDX(ahw, QLCNIC_FW_MBX_CTRL)); +} + +static void qlcnic_83xx_mailbox_worker(struct work_struct *work) +{ + struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox, + work); + struct qlcnic_adapter *adapter = mbx->adapter; + struct qlcnic_mbx_ops *mbx_ops = mbx->ops; + struct device *dev = &adapter->pdev->dev; + atomic_t *rsp_status = &mbx->rsp_status; + struct list_head *head = &mbx->cmd_q; + struct qlcnic_hardware_context *ahw; + struct qlcnic_cmd_args *cmd = NULL; + + ahw = adapter->ahw; + + while (true) { + if (qlcnic_83xx_check_mbx_status(adapter)) { + qlcnic_83xx_flush_mbx_queue(adapter); + return; + } + + atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT); + + spin_lock(&mbx->queue_lock); + + if (list_empty(head)) { + spin_unlock(&mbx->queue_lock); + return; + } + cmd = list_entry(head->next, struct qlcnic_cmd_args, list); + + spin_unlock(&mbx->queue_lock); + + mbx_ops->encode_cmd(adapter, cmd); + mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST); + + if (wait_for_completion_timeout(&mbx->completion, + QLC_83XX_MBX_TIMEOUT)) { + mbx_ops->decode_resp(adapter, cmd); + mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_COMPLETION); + } else { + dev_err(dev, "%s: Mailbox command timeout, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x\n", + __func__, cmd->cmd_op, cmd->type, ahw->pci_func, + ahw->op_mode); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); + qlcnic_dump_mailbox_registers(adapter); + qlcnic_83xx_get_mbx_data(adapter, cmd); + qlcnic_dump_mbx(adapter, cmd); + qlcnic_83xx_idc_request_reset(adapter, + QLCNIC_FORCE_FW_DUMP_KEY); + cmd->rsp_opcode = QLCNIC_RCODE_TIMEOUT; + } + mbx_ops->dequeue_cmd(adapter, cmd); + } +} + +static struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = { + .enqueue_cmd = qlcnic_83xx_enqueue_mbx_cmd, + .dequeue_cmd = qlcnic_83xx_dequeue_mbx_cmd, + .decode_resp = qlcnic_83xx_decode_mbx_rsp, + .encode_cmd = qlcnic_83xx_encode_mbx_cmd, + .nofity_fw = qlcnic_83xx_signal_mbx_cmd, +}; + +int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_mailbox *mbx; + + ahw->mailbox = kzalloc(sizeof(*mbx), GFP_KERNEL); + if (!ahw->mailbox) + return -ENOMEM; + + mbx = ahw->mailbox; + mbx->ops = &qlcnic_83xx_mbx_ops; + mbx->adapter = adapter; + + spin_lock_init(&mbx->queue_lock); + spin_lock_init(&mbx->aen_lock); + INIT_LIST_HEAD(&mbx->cmd_q); + init_completion(&mbx->completion); + + mbx->work_q = create_singlethread_workqueue("qlcnic_mailbox"); + if (mbx->work_q == NULL) { + kfree(mbx); + return -ENOMEM; + } + + INIT_WORK(&mbx->work, qlcnic_83xx_mailbox_worker); + set_bit(QLC_83XX_MBX_READY, &mbx->status); + return 0; +} + +static pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (state == pci_channel_io_normal) + return PCI_ERS_RESULT_RECOVERED; + + set_bit(__QLCNIC_AER, &adapter->state); + set_bit(__QLCNIC_RESETTING, &adapter->state); + + qlcnic_83xx_aer_stop_poll_work(adapter); + + pci_save_state(pdev); + pci_disable_device(pdev); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + int err = 0; + + pdev->error_state = pci_channel_io_normal; + err = pci_enable_device(pdev); + if (err) + goto disconnect; + + pci_set_power_state(pdev, PCI_D0); + pci_set_master(pdev); + pci_restore_state(pdev); + + err = qlcnic_83xx_aer_reset(adapter); + if (err == 0) + return PCI_ERS_RESULT_RECOVERED; +disconnect: + clear_bit(__QLCNIC_AER, &adapter->state); + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return PCI_ERS_RESULT_DISCONNECT; +} + +static void qlcnic_83xx_io_resume(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + + pci_cleanup_aer_uncorrect_error_status(pdev); + if (test_and_clear_bit(__QLCNIC_AER, &adapter->state)) + qlcnic_83xx_aer_start_poll_work(adapter); +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h new file mode 100644 index 000000000..db899e2cd --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -0,0 +1,661 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#ifndef __QLCNIC_83XX_HW_H +#define __QLCNIC_83XX_HW_H + +#include +#include +#include "qlcnic_hw.h" + +#define QLCNIC_83XX_BAR0_LENGTH 0x4000 + +/* Directly mapped registers */ +#define QLC_83XX_CRB_WIN_BASE 0x3800 +#define QLC_83XX_CRB_WIN_FUNC(f) (QLC_83XX_CRB_WIN_BASE+((f)*4)) +#define QLC_83XX_SEM_LOCK_BASE 0x3840 +#define QLC_83XX_SEM_UNLOCK_BASE 0x3844 +#define QLC_83XX_SEM_LOCK_FUNC(f) (QLC_83XX_SEM_LOCK_BASE+((f)*8)) +#define QLC_83XX_SEM_UNLOCK_FUNC(f) (QLC_83XX_SEM_UNLOCK_BASE+((f)*8)) +#define QLC_83XX_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0)) +#define QLC_83XX_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4)) +#define QLC_83XX_LINK_SPEED_FACTOR 10 +#define QLC_83xx_FUNC_VAL(v, f) ((v) & (1 << (f * 4))) +#define QLC_83XX_INTX_PTR 0x38C0 +#define QLC_83XX_INTX_TRGR 0x38C4 +#define QLC_83XX_INTX_MASK 0x38C8 + +#define QLC_83XX_DRV_LOCK_WAIT_COUNTER 100 +#define QLC_83XX_DRV_LOCK_WAIT_DELAY 20 +#define QLC_83XX_NEED_DRV_LOCK_RECOVERY 1 +#define QLC_83XX_DRV_LOCK_RECOVERY_IN_PROGRESS 2 +#define QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT 3 +#define QLC_83XX_DRV_LOCK_RECOVERY_DELAY 200 +#define QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK 0x3 +#define QLC_83XX_LB_WAIT_COUNT 250 +#define QLC_83XX_LB_MSLEEP_COUNT 20 +#define QLC_83XX_NO_NIC_RESOURCE 0x5 +#define QLC_83XX_MAC_PRESENT 0xC +#define QLC_83XX_MAC_ABSENT 0xD + + +#define QLC_83XX_FLASH_SECTOR_SIZE (64 * 1024) + +/* PEG status definitions */ +#define QLC_83XX_CMDPEG_COMPLETE 0xff01 +#define QLC_83XX_VALID_INTX_BIT30(val) ((val) & BIT_30) +#define QLC_83XX_VALID_INTX_BIT31(val) ((val) & BIT_31) +#define QLC_83XX_INTX_FUNC(val) ((val) & 0xFF) +#define QLC_83XX_LEGACY_INTX_MAX_RETRY 100 +#define QLC_83XX_LEGACY_INTX_DELAY 4 +#define QLC_83XX_REG_DESC 1 +#define QLC_83XX_LRO_DESC 2 +#define QLC_83XX_CTRL_DESC 3 +#define QLC_83XX_FW_CAPABILITY_TSO BIT_6 +#define QLC_83XX_FW_CAP_LRO_MSS BIT_17 +#define QLC_83XX_HOST_RDS_MODE_UNIQUE 0 +#define QLC_83XX_HOST_SDS_MBX_IDX 8 + +#define QLCNIC_HOST_RDS_MBX_IDX 88 + +/* Pause control registers */ +#define QLC_83XX_SRE_SHIM_REG 0x0D200284 +#define QLC_83XX_PORT0_THRESHOLD 0x0B2003A4 +#define QLC_83XX_PORT1_THRESHOLD 0x0B2013A4 +#define QLC_83XX_PORT0_TC_MC_REG 0x0B200388 +#define QLC_83XX_PORT1_TC_MC_REG 0x0B201388 +#define QLC_83XX_PORT0_TC_STATS 0x0B20039C +#define QLC_83XX_PORT1_TC_STATS 0x0B20139C +#define QLC_83XX_PORT2_IFB_THRESHOLD 0x0B200704 +#define QLC_83XX_PORT3_IFB_THRESHOLD 0x0B201704 + +/* Peg PC status registers */ +#define QLC_83XX_CRB_PEG_NET_0 0x3400003c +#define QLC_83XX_CRB_PEG_NET_1 0x3410003c +#define QLC_83XX_CRB_PEG_NET_2 0x3420003c +#define QLC_83XX_CRB_PEG_NET_3 0x3430003c +#define QLC_83XX_CRB_PEG_NET_4 0x34b0003c + +/* Firmware image definitions */ +#define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000 +#define QLC_83XX_FW_FILE_NAME "/*(DEBLOBBED)*/" +#define QLC_83XX_POST_FW_FILE_NAME "/*(DEBLOBBED)*/" +#define QLC_84XX_FW_FILE_NAME "/*(DEBLOBBED)*/" +#define QLC_83XX_BOOT_FROM_FLASH 0 +#define QLC_83XX_BOOT_FROM_FILE 0x12345678 + +#define QLC_FW_FILE_NAME_LEN 20 +#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16 + +#define QLC_83XX_MBX_POST_BC_OP 0x1 +#define QLC_83XX_MBX_COMPLETION 0x0 +#define QLC_83XX_MBX_REQUEST 0x1 + +#define QLC_83XX_MBX_TIMEOUT (5 * HZ) +#define QLC_83XX_MBX_CMD_LOOP 5000000 + +/* status descriptor mailbox data + * @phy_addr_{low|high}: physical address of buffer + * @sds_ring_size: buffer size + * @intrpt_id: interrupt id + * @intrpt_val: source of interrupt + */ +struct qlcnic_sds_mbx { + u32 phy_addr_low; + u32 phy_addr_high; + u32 rsvd1[4]; +#if defined(__LITTLE_ENDIAN) + u16 sds_ring_size; + u16 rsvd2; + u16 rsvd3[2]; + u16 intrpt_id; + u8 intrpt_val; + u8 rsvd4; +#elif defined(__BIG_ENDIAN) + u16 rsvd2; + u16 sds_ring_size; + u16 rsvd3[2]; + u8 rsvd4; + u8 intrpt_val; + u16 intrpt_id; +#endif + u32 rsvd5; +} __packed; + +/* receive descriptor buffer data + * phy_addr_reg_{low|high}: physical address of regular buffer + * phy_addr_jmb_{low|high}: physical address of jumbo buffer + * reg_ring_sz: size of regular buffer + * reg_ring_len: no. of entries in regular buffer + * jmb_ring_len: no. of entries in jumbo buffer + * jmb_ring_sz: size of jumbo buffer + */ +struct qlcnic_rds_mbx { + u32 phy_addr_reg_low; + u32 phy_addr_reg_high; + u32 phy_addr_jmb_low; + u32 phy_addr_jmb_high; +#if defined(__LITTLE_ENDIAN) + u16 reg_ring_sz; + u16 reg_ring_len; + u16 jmb_ring_sz; + u16 jmb_ring_len; +#elif defined(__BIG_ENDIAN) + u16 reg_ring_len; + u16 reg_ring_sz; + u16 jmb_ring_len; + u16 jmb_ring_sz; +#endif +} __packed; + +/* host producers for regular and jumbo rings */ +struct __host_producer_mbx { + u32 reg_buf; + u32 jmb_buf; +} __packed; + +/* Receive context mailbox data outbox registers + * @state: state of the context + * @vport_id: virtual port id + * @context_id: receive context id + * @num_pci_func: number of pci functions of the port + * @phy_port: physical port id + */ +struct qlcnic_rcv_mbx_out { +#if defined(__LITTLE_ENDIAN) + u8 rcv_num; + u8 sts_num; + u16 ctx_id; + u8 state; + u8 num_pci_func; + u8 phy_port; + u8 vport_id; +#elif defined(__BIG_ENDIAN) + u16 ctx_id; + u8 sts_num; + u8 rcv_num; + u8 vport_id; + u8 phy_port; + u8 num_pci_func; + u8 state; +#endif + u32 host_csmr[QLCNIC_MAX_SDS_RINGS]; + struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS]; +} __packed; + +struct qlcnic_add_rings_mbx_out { +#if defined(__LITTLE_ENDIAN) + u8 rcv_num; + u8 sts_num; + u16 ctx_id; +#elif defined(__BIG_ENDIAN) + u16 ctx_id; + u8 sts_num; + u8 rcv_num; +#endif + u32 host_csmr[QLCNIC_MAX_SDS_RINGS]; + struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS]; +} __packed; + +/* Transmit context mailbox inbox registers + * @phys_addr_{low|high}: DMA address of the transmit buffer + * @cnsmr_index_{low|high}: host consumer index + * @size: legth of transmit buffer ring + * @intr_id: interrupt id + * @src: src of interrupt + */ +struct qlcnic_tx_mbx { + u32 phys_addr_low; + u32 phys_addr_high; + u32 cnsmr_index_low; + u32 cnsmr_index_high; +#if defined(__LITTLE_ENDIAN) + u16 size; + u16 intr_id; + u8 src; + u8 rsvd[3]; +#elif defined(__BIG_ENDIAN) + u16 intr_id; + u16 size; + u8 rsvd[3]; + u8 src; +#endif +} __packed; + +/* Transmit context mailbox outbox registers + * @host_prod: host producer index + * @ctx_id: transmit context id + * @state: state of the transmit context + */ + +struct qlcnic_tx_mbx_out { + u32 host_prod; +#if defined(__LITTLE_ENDIAN) + u16 ctx_id; + u8 state; + u8 rsvd; +#elif defined(__BIG_ENDIAN) + u8 rsvd; + u8 state; + u16 ctx_id; +#endif +} __packed; + +struct qlcnic_intrpt_config { + u8 type; + u8 enabled; + u16 id; + u32 src; +}; + +struct qlcnic_macvlan_mbx { +#if defined(__LITTLE_ENDIAN) + u8 mac_addr0; + u8 mac_addr1; + u8 mac_addr2; + u8 mac_addr3; + u8 mac_addr4; + u8 mac_addr5; + u16 vlan; +#elif defined(__BIG_ENDIAN) + u8 mac_addr3; + u8 mac_addr2; + u8 mac_addr1; + u8 mac_addr0; + u16 vlan; + u8 mac_addr5; + u8 mac_addr4; +#endif +}; + +struct qlc_83xx_fw_info { + const struct firmware *fw; + char fw_file_name[QLC_FW_FILE_NAME_LEN]; +}; + +struct qlc_83xx_reset { + struct qlc_83xx_reset_hdr *hdr; + int seq_index; + int seq_error; + int array_index; + u32 array[QLC_83XX_MAX_RESET_SEQ_ENTRIES]; + u8 *buff; + u8 *stop_offset; + u8 *start_offset; + u8 *init_offset; + u8 seq_end; + u8 template_end; +}; + +#define QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY 0x1 +#define QLC_83XX_IDC_GRACEFULL_RESET 0x2 +#define QLC_83XX_IDC_DISABLE_FW_DUMP 0x4 +#define QLC_83XX_IDC_TIMESTAMP 0 +#define QLC_83XX_IDC_DURATION 1 +#define QLC_83XX_IDC_INIT_TIMEOUT_SECS 30 +#define QLC_83XX_IDC_RESET_ACK_TIMEOUT_SECS 10 +#define QLC_83XX_IDC_RESET_TIMEOUT_SECS 10 +#define QLC_83XX_IDC_QUIESCE_ACK_TIMEOUT_SECS 20 +#define QLC_83XX_IDC_FW_POLL_DELAY (1 * HZ) +#define QLC_83XX_IDC_FW_FAIL_THRESH 2 +#define QLC_83XX_IDC_MAX_FUNC_PER_PARTITION_INFO 8 +#define QLC_83XX_IDC_MAX_CNA_FUNCTIONS 16 +#define QLC_83XX_IDC_MAJOR_VERSION 1 +#define QLC_83XX_IDC_MINOR_VERSION 0 +#define QLC_83XX_IDC_FLASH_PARAM_ADDR 0x3e8020 + +struct qlcnic_adapter; +struct qlcnic_fw_dump; + +struct qlc_83xx_idc { + int (*state_entry) (struct qlcnic_adapter *); + u64 sec_counter; + u64 delay; + unsigned long status; + int err_code; + int collect_dump; + u8 curr_state; + u8 prev_state; + u8 vnic_state; + u8 vnic_wait_limit; + u8 quiesce_req; + u8 delay_reset; + char **name; +}; + +enum qlcnic_vlan_operations { + QLC_VLAN_ADD = 0, + QLC_VLAN_DELETE +}; + +/* Device States */ +enum qlcnic_83xx_states { + QLC_83XX_IDC_DEV_UNKNOWN, + QLC_83XX_IDC_DEV_COLD, + QLC_83XX_IDC_DEV_INIT, + QLC_83XX_IDC_DEV_READY, + QLC_83XX_IDC_DEV_NEED_RESET, + QLC_83XX_IDC_DEV_NEED_QUISCENT, + QLC_83XX_IDC_DEV_FAILED, + QLC_83XX_IDC_DEV_QUISCENT +}; + +#define QLCNIC_MBX_RSP(reg) LSW(reg) +#define QLCNIC_MBX_NUM_REGS(reg) (MSW(reg) & 0x1FF) +#define QLCNIC_MBX_STATUS(reg) (((reg) >> 25) & 0x7F) +#define QLCNIC_MBX_HOST(ahw, i) ((ahw)->pci_base0 + ((i) * 4)) +#define QLCNIC_MBX_FW(ahw, i) ((ahw)->pci_base0 + 0x800 + ((i) * 4)) + +/* Mailbox process AEN count */ +#define QLC_83XX_IDC_COMP_AEN 3 +#define QLC_83XX_MBX_AEN_CNT 5 +#define QLC_83XX_MODULE_LOADED 1 +#define QLC_83XX_MBX_READY 2 +#define QLC_83XX_MBX_AEN_ACK 3 +#define QLC_83XX_SFP_PRESENT(data) ((data) & 3) +#define QLC_83XX_SFP_ERR(data) (((data) >> 2) & 3) +#define QLC_83XX_SFP_MODULE_TYPE(data) (((data) >> 4) & 0x1F) +#define QLC_83XX_SFP_CU_LENGTH(data) (LSB((data) >> 16)) +#define QLC_83XX_SFP_TX_FAULT(data) ((data) & BIT_10) +#define QLC_83XX_LINK_STATS(data) ((data) & BIT_0) +#define QLC_83XX_CURRENT_LINK_SPEED(data) (((data) >> 3) & 7) +#define QLC_83XX_LINK_PAUSE(data) (((data) >> 6) & 3) +#define QLC_83XX_LINK_LB(data) (((data) >> 8) & 7) +#define QLC_83XX_LINK_FEC(data) ((data) & BIT_12) +#define QLC_83XX_LINK_EEE(data) ((data) & BIT_13) +#define QLC_83XX_DCBX(data) (((data) >> 28) & 7) +#define QLC_83XX_AUTONEG(data) ((data) & BIT_15) +#define QLC_83XX_TX_PAUSE 0x10 +#define QLC_83XX_RX_PAUSE 0x20 +#define QLC_83XX_TX_RX_PAUSE 0x30 +#define QLC_83XX_CFG_STD_PAUSE (1 << 5) +#define QLC_83XX_CFG_STD_TX_PAUSE (1 << 20) +#define QLC_83XX_CFG_STD_RX_PAUSE (2 << 20) +#define QLC_83XX_CFG_STD_TX_RX_PAUSE (3 << 20) +#define QLC_83XX_ENABLE_AUTONEG (1 << 15) +#define QLC_83XX_CFG_LOOPBACK_HSS (2 << 1) +#define QLC_83XX_CFG_LOOPBACK_PHY (3 << 1) +#define QLC_83XX_CFG_LOOPBACK_EXT (4 << 1) + +/* LED configuration settings */ +#define QLC_83XX_ENABLE_BEACON 0xe +#define QLC_83XX_BEACON_ON 1 +#define QLC_83XX_BEACON_OFF 0 +#define QLC_83XX_LED_RATE 0xff +#define QLC_83XX_LED_ACT (1 << 10) +#define QLC_83XX_LED_MOD (0 << 13) +#define QLC_83XX_LED_CONFIG (QLC_83XX_LED_RATE | QLC_83XX_LED_ACT | \ + QLC_83XX_LED_MOD) + +#define QLC_83XX_10M_LINK 1 +#define QLC_83XX_100M_LINK 2 +#define QLC_83XX_1G_LINK 3 +#define QLC_83XX_10G_LINK 4 +#define QLC_83XX_STAT_TX 3 +#define QLC_83XX_STAT_RX 2 +#define QLC_83XX_STAT_MAC 1 +#define QLC_83XX_TX_STAT_REGS 14 +#define QLC_83XX_RX_STAT_REGS 40 +#define QLC_83XX_MAC_STAT_REGS 94 + +#define QLC_83XX_GET_FUNC_PRIVILEGE(VAL, FN) (0x3 & ((VAL) >> (FN * 2))) +#define QLC_83XX_SET_FUNC_OPMODE(VAL, FN) ((VAL) << (FN * 2)) +#define QLC_83XX_DEFAULT_OPMODE 0x55555555 +#define QLC_83XX_PRIVLEGED_FUNC 0x1 +#define QLC_83XX_VIRTUAL_FUNC 0x2 + +#define QLC_83XX_LB_MAX_FILTERS 2048 +#define QLC_83XX_LB_BUCKET_SIZE 256 +#define QLC_83XX_MINIMUM_VECTOR 3 +#define QLC_83XX_MAX_MC_COUNT 38 +#define QLC_83XX_MAX_UC_COUNT 4096 + +#define QLC_83XX_PVID_STRIP_CAPABILITY BIT_22 +#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000) +#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20) +#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40) +#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400) +#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000) +#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000) +#define QLC_83XX_ESWITCH_CAPABILITY BIT_23 +#define QLC_83XX_SRIOV_MODE 0x1 +#define QLCNIC_BRDTYPE_83XX_10G 0x0083 + +#define QLC_83XX_FLASH_SPI_STATUS 0x2808E010 +#define QLC_83XX_FLASH_SPI_CONTROL 0x2808E014 +#define QLC_83XX_FLASH_STATUS 0x42100004 +#define QLC_83XX_FLASH_CONTROL 0x42110004 +#define QLC_83XX_FLASH_ADDR 0x42110008 +#define QLC_83XX_FLASH_WRDATA 0x4211000C +#define QLC_83XX_FLASH_RDDATA 0x42110018 +#define QLC_83XX_FLASH_DIRECT_WINDOW 0x42110030 +#define QLC_83XX_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA)) +#define QLC_83XX_FLASH_SECTOR_ERASE_CMD 0xdeadbeef +#define QLC_83XX_FLASH_WRITE_CMD 0xdacdacda +#define QLC_83XX_FLASH_BULK_WRITE_CMD 0xcadcadca +#define QLC_83XX_FLASH_READ_RETRY_COUNT 5000 +#define QLC_83XX_FLASH_STATUS_READY 0x6 +#define QLC_83XX_FLASH_WRITE_MIN 2 +#define QLC_83XX_FLASH_WRITE_MAX 64 +#define QLC_83XX_FLASH_STATUS_REG_POLL_DELAY 1 +#define QLC_83XX_ERASE_MODE 1 +#define QLC_83XX_WRITE_MODE 2 +#define QLC_83XX_BULK_WRITE_MODE 3 +#define QLC_83XX_FLASH_FDT_WRITE_DEF_SIG 0xFD0100 +#define QLC_83XX_FLASH_FDT_ERASE_DEF_SIG 0xFD0300 +#define QLC_83XX_FLASH_FDT_READ_MFG_ID_VAL 0xFD009F +#define QLC_83XX_FLASH_OEM_ERASE_SIG 0xFD03D8 +#define QLC_83XX_FLASH_OEM_WRITE_SIG 0xFD0101 +#define QLC_83XX_FLASH_OEM_READ_SIG 0xFD0005 +#define QLC_83XX_FLASH_ADDR_TEMP_VAL 0x00800000 +#define QLC_83XX_FLASH_ADDR_SECOND_TEMP_VAL 0x00800001 +#define QLC_83XX_FLASH_WRDATA_DEF 0x0 +#define QLC_83XX_FLASH_READ_CTRL 0x3F +#define QLC_83XX_FLASH_SPI_CTRL 0x4 +#define QLC_83XX_FLASH_FIRST_ERASE_MS_VAL 0x2 +#define QLC_83XX_FLASH_SECOND_ERASE_MS_VAL 0x5 +#define QLC_83XX_FLASH_LAST_ERASE_MS_VAL 0x3D +#define QLC_83XX_FLASH_FIRST_MS_PATTERN 0x43 +#define QLC_83XX_FLASH_SECOND_MS_PATTERN 0x7F +#define QLC_83XX_FLASH_LAST_MS_PATTERN 0x7D +#define QLC_83xx_FLASH_MAX_WAIT_USEC 100 +#define QLC_83XX_FLASH_LOCK_TIMEOUT 10000 + +enum qlc_83xx_mbx_cmd_type { + QLC_83XX_MBX_CMD_WAIT = 0, + QLC_83XX_MBX_CMD_NO_WAIT, + QLC_83XX_MBX_CMD_BUSY_WAIT, +}; + +enum qlc_83xx_mbx_response_states { + QLC_83XX_MBX_RESPONSE_WAIT = 0, + QLC_83XX_MBX_RESPONSE_ARRIVED, +}; + +#define QLC_83XX_MBX_RESPONSE_FAILED 0x2 +#define QLC_83XX_MBX_RESPONSE_UNKNOWN 0x3 + +/* Additional registers in 83xx */ +enum qlc_83xx_ext_regs { + QLCNIC_GLOBAL_RESET = 0, + QLCNIC_WILDCARD, + QLCNIC_INFORMANT, + QLCNIC_HOST_MBX_CTRL, + QLCNIC_FW_MBX_CTRL, + QLCNIC_BOOTLOADER_ADDR, + QLCNIC_BOOTLOADER_SIZE, + QLCNIC_FW_IMAGE_ADDR, + QLCNIC_MBX_INTR_ENBL, + QLCNIC_DEF_INT_MASK, + QLCNIC_DEF_INT_ID, + QLC_83XX_IDC_MAJ_VERSION, + QLC_83XX_IDC_DEV_STATE, + QLC_83XX_IDC_DRV_PRESENCE, + QLC_83XX_IDC_DRV_ACK, + QLC_83XX_IDC_CTRL, + QLC_83XX_IDC_DRV_AUDIT, + QLC_83XX_IDC_MIN_VERSION, + QLC_83XX_RECOVER_DRV_LOCK, + QLC_83XX_IDC_PF_0, + QLC_83XX_IDC_PF_1, + QLC_83XX_IDC_PF_2, + QLC_83XX_IDC_PF_3, + QLC_83XX_IDC_PF_4, + QLC_83XX_IDC_PF_5, + QLC_83XX_IDC_PF_6, + QLC_83XX_IDC_PF_7, + QLC_83XX_IDC_PF_8, + QLC_83XX_IDC_PF_9, + QLC_83XX_IDC_PF_10, + QLC_83XX_IDC_PF_11, + QLC_83XX_IDC_PF_12, + QLC_83XX_IDC_PF_13, + QLC_83XX_IDC_PF_14, + QLC_83XX_IDC_PF_15, + QLC_83XX_IDC_DEV_PARTITION_INFO_1, + QLC_83XX_IDC_DEV_PARTITION_INFO_2, + QLC_83XX_DRV_OP_MODE, + QLC_83XX_VNIC_STATE, + QLC_83XX_DRV_LOCK, + QLC_83XX_DRV_UNLOCK, + QLC_83XX_DRV_LOCK_ID, + QLC_83XX_ASIC_TEMP, +}; + +/* Initialize/Stop NIC command bit definitions */ +#define QLC_REGISTER_LB_IDC BIT_0 +#define QLC_REGISTER_DCB_AEN BIT_1 +#define QLC_83XX_MULTI_TENANCY_INFO BIT_29 +#define QLC_INIT_FW_RESOURCES BIT_31 + +/* 83xx funcitons */ +int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *); +int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *); +int qlcnic_83xx_setup_intr(struct qlcnic_adapter *); +void qlcnic_83xx_get_func_no(struct qlcnic_adapter *); +int qlcnic_83xx_cam_lock(struct qlcnic_adapter *); +void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *); +int qlcnic_send_ctrl_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *, u32); +void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *); +void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *); +void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t); +void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t); +int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong, int *); +int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32); +int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32); +int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int); +int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int); +void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16); +int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *); +int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); +void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int); + +int qlcnic_83xx_napi_add(struct qlcnic_adapter *, struct net_device *); +void qlcnic_83xx_napi_del(struct qlcnic_adapter *); +void qlcnic_83xx_napi_enable(struct qlcnic_adapter *); +void qlcnic_83xx_napi_disable(struct qlcnic_adapter *); +int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32); +int qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32); +int qlcnic_ind_rd(struct qlcnic_adapter *, u32); +int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *); +int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *, + struct qlcnic_host_tx_ring *, int); +void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *); +void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *, + struct qlcnic_host_tx_ring *); +int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); +int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int); +void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *); +int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool); +int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8); +int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8); +int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *, + struct qlcnic_adapter *, u32); +void qlcnic_free_mbx_args(struct qlcnic_cmd_args *); +void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *, + struct qlcnic_info *); +int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *, + struct ethtool_coalesce *); +int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *); +int qlcnic_83xx_get_port_info(struct qlcnic_adapter *); +void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *); +void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *); +irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *); +irqreturn_t qlcnic_83xx_intr(int, void *); +irqreturn_t qlcnic_83xx_tmp_intr(int, void *); +void qlcnic_83xx_check_vf(struct qlcnic_adapter *, + const struct pci_device_id *); +int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *); +int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *); +void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *); +void qlcnic_83xx_register_map(struct qlcnic_hardware_context *); +void qlcnic_83xx_idc_aen_work(struct work_struct *); +void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *, __be32, int); + +int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *, u32); +int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *, u32, u32 *, int); +int qlcnic_83xx_flash_write32(struct qlcnic_adapter *, u32, u32 *); +int qlcnic_83xx_lock_flash(struct qlcnic_adapter *); +void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *); +int qlcnic_83xx_save_flash_status(struct qlcnic_adapter *); +int qlcnic_83xx_restore_flash_status(struct qlcnic_adapter *, int); +int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *); +int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *); +int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int); +int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *, + u32, u8 *, int); +int qlcnic_83xx_init(struct qlcnic_adapter *, int); +int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *); +void qlcnic_83xx_idc_poll_dev_state(struct work_struct *); +void qlcnic_83xx_idc_exit(struct qlcnic_adapter *); +void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32); +int qlcnic_83xx_lock_driver(struct qlcnic_adapter *); +void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *); +int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *); +int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *); +int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int); +int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *); +int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *, + struct qlcnic_info *, u8); +int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *); +int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *); + +void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); +void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data); +int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *); +int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *); +void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *, + struct ethtool_pauseparam *); +int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *, + struct ethtool_pauseparam *); +int qlcnic_83xx_test_link(struct qlcnic_adapter *); +int qlcnic_83xx_reg_test(struct qlcnic_adapter *); +int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *); +int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *); +int qlcnic_83xx_loopback_test(struct net_device *, u8); +int qlcnic_83xx_interrupt_test(struct net_device *); +int qlcnic_83xx_set_led(struct net_device *, enum ethtool_phys_id_state); +int qlcnic_83xx_flash_test(struct qlcnic_adapter *); +int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *); +int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *); +void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *); +void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *); +int qlcnic_83xx_idc_init(struct qlcnic_adapter *); +int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *); +int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *); +int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *); +void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *); +int qlcnic_83xx_aer_reset(struct qlcnic_adapter *); +void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *); +u32 qlcnic_83xx_get_saved_state(void *, u32); +void qlcnic_83xx_set_saved_state(void *, u32, u32); +void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *); +u32 qlcnic_83xx_get_cap_size(void *, int); +void qlcnic_83xx_set_sys_info(void *, int, u32); +void qlcnic_83xx_store_cap_mask(void *, u32); +int qlcnic_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32); +#endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c new file mode 100644 index 000000000..78648e0ac --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -0,0 +1,2616 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include "qlcnic_sriov.h" +#include "qlcnic.h" +#include "qlcnic_hw.h" + +/* Reset template definitions */ +#define QLC_83XX_RESTART_TEMPLATE_SIZE 0x2000 +#define QLC_83XX_RESET_TEMPLATE_ADDR 0x4F0000 +#define QLC_83XX_RESET_SEQ_VERSION 0x0101 + +#define QLC_83XX_OPCODE_NOP 0x0000 +#define QLC_83XX_OPCODE_WRITE_LIST 0x0001 +#define QLC_83XX_OPCODE_READ_WRITE_LIST 0x0002 +#define QLC_83XX_OPCODE_POLL_LIST 0x0004 +#define QLC_83XX_OPCODE_POLL_WRITE_LIST 0x0008 +#define QLC_83XX_OPCODE_READ_MODIFY_WRITE 0x0010 +#define QLC_83XX_OPCODE_SEQ_PAUSE 0x0020 +#define QLC_83XX_OPCODE_SEQ_END 0x0040 +#define QLC_83XX_OPCODE_TMPL_END 0x0080 +#define QLC_83XX_OPCODE_POLL_READ_LIST 0x0100 + +/* EPORT control registers */ +#define QLC_83XX_RESET_CONTROL 0x28084E50 +#define QLC_83XX_RESET_REG 0x28084E60 +#define QLC_83XX_RESET_PORT0 0x28084E70 +#define QLC_83XX_RESET_PORT1 0x28084E80 +#define QLC_83XX_RESET_PORT2 0x28084E90 +#define QLC_83XX_RESET_PORT3 0x28084EA0 +#define QLC_83XX_RESET_SRESHIM 0x28084EB0 +#define QLC_83XX_RESET_EPGSHIM 0x28084EC0 +#define QLC_83XX_RESET_ETHERPCS 0x28084ED0 + +static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter); +static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev); +static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter); +static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev); +static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *); +static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *); + +/* Template header */ +struct qlc_83xx_reset_hdr { +#if defined(__LITTLE_ENDIAN) + u16 version; + u16 signature; + u16 size; + u16 entries; + u16 hdr_size; + u16 checksum; + u16 init_offset; + u16 start_offset; +#elif defined(__BIG_ENDIAN) + u16 signature; + u16 version; + u16 entries; + u16 size; + u16 checksum; + u16 hdr_size; + u16 start_offset; + u16 init_offset; +#endif +} __packed; + +/* Command entry header. */ +struct qlc_83xx_entry_hdr { +#if defined(__LITTLE_ENDIAN) + u16 cmd; + u16 size; + u16 count; + u16 delay; +#elif defined(__BIG_ENDIAN) + u16 size; + u16 cmd; + u16 delay; + u16 count; +#endif +} __packed; + +/* Generic poll command */ +struct qlc_83xx_poll { + u32 mask; + u32 status; +} __packed; + +/* Read modify write command */ +struct qlc_83xx_rmw { + u32 mask; + u32 xor_value; + u32 or_value; +#if defined(__LITTLE_ENDIAN) + u8 shl; + u8 shr; + u8 index_a; + u8 rsvd; +#elif defined(__BIG_ENDIAN) + u8 rsvd; + u8 index_a; + u8 shr; + u8 shl; +#endif +} __packed; + +/* Generic command with 2 DWORD */ +struct qlc_83xx_entry { + u32 arg1; + u32 arg2; +} __packed; + +/* Generic command with 4 DWORD */ +struct qlc_83xx_quad_entry { + u32 dr_addr; + u32 dr_value; + u32 ar_addr; + u32 ar_value; +} __packed; +static const char *const qlc_83xx_idc_states[] = { + "Unknown", + "Cold", + "Init", + "Ready", + "Need Reset", + "Need Quiesce", + "Failed", + "Quiesce" +}; + +static int +qlcnic_83xx_idc_check_driver_presence_reg(struct qlcnic_adapter *adapter) +{ + u32 val; + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); + if ((val & 0xFFFF)) + return 1; + else + return 0; +} + +static void qlcnic_83xx_idc_log_state_history(struct qlcnic_adapter *adapter) +{ + u32 cur, prev; + cur = adapter->ahw->idc.curr_state; + prev = adapter->ahw->idc.prev_state; + + dev_info(&adapter->pdev->dev, + "current state = %s, prev state = %s\n", + adapter->ahw->idc.name[cur], + adapter->ahw->idc.name[prev]); +} + +static int qlcnic_83xx_idc_update_audit_reg(struct qlcnic_adapter *adapter, + u8 mode, int lock) +{ + u32 val; + int seconds; + + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT); + val |= (adapter->portnum & 0xf); + val |= mode << 7; + if (mode) + seconds = jiffies / HZ - adapter->ahw->idc.sec_counter; + else + seconds = jiffies / HZ; + + val |= seconds << 8; + QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT, val); + adapter->ahw->idc.sec_counter = jiffies / HZ; + + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +static void qlcnic_83xx_idc_update_minor_version(struct qlcnic_adapter *adapter) +{ + u32 val; + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION); + val = val & ~(0x3 << (adapter->portnum * 2)); + val = val | (QLC_83XX_IDC_MINOR_VERSION << (adapter->portnum * 2)); + QLCWRX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION, val); +} + +static int qlcnic_83xx_idc_update_major_version(struct qlcnic_adapter *adapter, + int lock) +{ + u32 val; + + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION); + val = val & ~0xFF; + val = val | QLC_83XX_IDC_MAJOR_VERSION; + QLCWRX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION, val); + + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +static int +qlcnic_83xx_idc_update_drv_presence_reg(struct qlcnic_adapter *adapter, + int status, int lock) +{ + u32 val; + + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); + + if (status) + val = val | (1 << adapter->portnum); + else + val = val & ~(1 << adapter->portnum); + + QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val); + qlcnic_83xx_idc_update_minor_version(adapter); + + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +static int qlcnic_83xx_idc_check_major_version(struct qlcnic_adapter *adapter) +{ + u32 val; + u8 version; + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION); + version = val & 0xFF; + + if (version != QLC_83XX_IDC_MAJOR_VERSION) { + dev_info(&adapter->pdev->dev, + "%s:mismatch. version 0x%x, expected version 0x%x\n", + __func__, version, QLC_83XX_IDC_MAJOR_VERSION); + return -EIO; + } + + return 0; +} + +static int qlcnic_83xx_idc_clear_registers(struct qlcnic_adapter *adapter, + int lock) +{ + u32 val; + + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, 0); + /* Clear graceful reset bit */ + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + val &= ~QLC_83XX_IDC_GRACEFULL_RESET; + QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); + + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +static int qlcnic_83xx_idc_update_drv_ack_reg(struct qlcnic_adapter *adapter, + int flag, int lock) +{ + u32 val; + + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK); + if (flag) + val = val | (1 << adapter->portnum); + else + val = val & ~(1 << adapter->portnum); + QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, val); + + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +static int qlcnic_83xx_idc_check_timeout(struct qlcnic_adapter *adapter, + int time_limit) +{ + u64 seconds; + + seconds = jiffies / HZ - adapter->ahw->idc.sec_counter; + if (seconds <= time_limit) + return 0; + else + return -EBUSY; +} + +/** + * qlcnic_83xx_idc_check_reset_ack_reg + * + * @adapter: adapter structure + * + * Check ACK wait limit and clear the functions which failed to ACK + * + * Return 0 if all functions have acknowledged the reset request. + **/ +static int qlcnic_83xx_idc_check_reset_ack_reg(struct qlcnic_adapter *adapter) +{ + int timeout; + u32 ack, presence, val; + + timeout = QLC_83XX_IDC_RESET_TIMEOUT_SECS; + ack = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK); + presence = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); + dev_info(&adapter->pdev->dev, + "%s: ack = 0x%x, presence = 0x%x\n", __func__, ack, presence); + if (!((ack & presence) == presence)) { + if (qlcnic_83xx_idc_check_timeout(adapter, timeout)) { + /* Clear functions which failed to ACK */ + dev_info(&adapter->pdev->dev, + "%s: ACK wait exceeds time limit\n", __func__); + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); + val = val & ~(ack ^ presence); + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val); + dev_info(&adapter->pdev->dev, + "%s: updated drv presence reg = 0x%x\n", + __func__, val); + qlcnic_83xx_unlock_driver(adapter); + return 0; + + } else { + return 1; + } + } else { + dev_info(&adapter->pdev->dev, + "%s: Reset ACK received from all functions\n", + __func__); + return 0; + } +} + +/** + * qlcnic_83xx_idc_tx_soft_reset + * + * @adapter: adapter structure + * + * Handle context deletion and recreation request from transmit routine + * + * Returns -EBUSY or Success (0) + * + **/ +static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EBUSY; + + netif_device_detach(netdev); + qlcnic_down(adapter, netdev); + qlcnic_up(adapter, netdev); + netif_device_attach(netdev); + clear_bit(__QLCNIC_RESETTING, &adapter->state); + netdev_info(adapter->netdev, "%s: soft reset complete.\n", __func__); + + return 0; +} + +/** + * qlcnic_83xx_idc_detach_driver + * + * @adapter: adapter structure + * Detach net interface, stop TX and cleanup resources before the HW reset. + * Returns: None + * + **/ +static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter) +{ + int i; + struct net_device *netdev = adapter->netdev; + + netif_device_detach(netdev); + qlcnic_83xx_detach_mailbox_work(adapter); + + /* Disable mailbox interrupt */ + qlcnic_83xx_disable_mbx_intr(adapter); + qlcnic_down(adapter, netdev); + for (i = 0; i < adapter->ahw->num_msix; i++) { + adapter->ahw->intr_tbl[i].id = i; + adapter->ahw->intr_tbl[i].enabled = 0; + adapter->ahw->intr_tbl[i].src = 0; + } + + if (qlcnic_sriov_pf_check(adapter)) + qlcnic_sriov_pf_reset(adapter); +} + +/** + * qlcnic_83xx_idc_attach_driver + * + * @adapter: adapter structure + * + * Re-attach and re-enable net interface + * Returns: None + * + **/ +static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (netif_running(netdev)) { + if (qlcnic_up(adapter, netdev)) + goto done; + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } +done: + netif_device_attach(netdev); +} + +static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter, + int lock) +{ + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + qlcnic_83xx_idc_clear_registers(adapter, 0); + QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_FAILED); + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + qlcnic_83xx_idc_log_state_history(adapter); + dev_info(&adapter->pdev->dev, "Device will enter failed state\n"); + + return 0; +} + +static int qlcnic_83xx_idc_enter_init_state(struct qlcnic_adapter *adapter, + int lock) +{ + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_INIT); + + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +static int qlcnic_83xx_idc_enter_need_quiesce(struct qlcnic_adapter *adapter, + int lock) +{ + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, + QLC_83XX_IDC_DEV_NEED_QUISCENT); + + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +static int +qlcnic_83xx_idc_enter_need_reset_state(struct qlcnic_adapter *adapter, int lock) +{ + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, + QLC_83XX_IDC_DEV_NEED_RESET); + + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +static int qlcnic_83xx_idc_enter_ready_state(struct qlcnic_adapter *adapter, + int lock) +{ + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_READY); + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +/** + * qlcnic_83xx_idc_find_reset_owner_id + * + * @adapter: adapter structure + * + * NIC gets precedence over ISCSI and ISCSI has precedence over FCOE. + * Within the same class, function with lowest PCI ID assumes ownership + * + * Returns: reset owner id or failure indication (-EIO) + * + **/ +static int qlcnic_83xx_idc_find_reset_owner_id(struct qlcnic_adapter *adapter) +{ + u32 reg, reg1, reg2, i, j, owner, class; + + reg1 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_1); + reg2 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_2); + owner = QLCNIC_TYPE_NIC; + i = 0; + j = 0; + reg = reg1; + + do { + class = (((reg & (0xF << j * 4)) >> j * 4) & 0x3); + if (class == owner) + break; + if (i == (QLC_83XX_IDC_MAX_FUNC_PER_PARTITION_INFO - 1)) { + reg = reg2; + j = 0; + } else { + j++; + } + + if (i == (QLC_83XX_IDC_MAX_CNA_FUNCTIONS - 1)) { + if (owner == QLCNIC_TYPE_NIC) + owner = QLCNIC_TYPE_ISCSI; + else if (owner == QLCNIC_TYPE_ISCSI) + owner = QLCNIC_TYPE_FCOE; + else if (owner == QLCNIC_TYPE_FCOE) + return -EIO; + reg = reg1; + j = 0; + i = 0; + } + } while (i++ < QLC_83XX_IDC_MAX_CNA_FUNCTIONS); + + return i; +} + +static int qlcnic_83xx_idc_restart_hw(struct qlcnic_adapter *adapter, int lock) +{ + int ret = 0; + + ret = qlcnic_83xx_restart_hw(adapter); + + if (ret) { + qlcnic_83xx_idc_enter_failed_state(adapter, lock); + } else { + qlcnic_83xx_idc_clear_registers(adapter, lock); + ret = qlcnic_83xx_idc_enter_ready_state(adapter, lock); + } + + return ret; +} + +static int qlcnic_83xx_idc_check_fan_failure(struct qlcnic_adapter *adapter) +{ + u32 status; + + status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1); + + if (status & QLCNIC_RCODE_FATAL_ERROR) { + dev_err(&adapter->pdev->dev, + "peg halt status1=0x%x\n", status); + if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) { + dev_err(&adapter->pdev->dev, + "On board active cooling fan failed. " + "Device has been halted.\n"); + dev_err(&adapter->pdev->dev, + "Replace the adapter.\n"); + return -EIO; + } + } + + return 0; +} + +int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) +{ + int err; + + qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox); + qlcnic_83xx_enable_mbx_interrupt(adapter); + + qlcnic_83xx_initialize_nic(adapter, 1); + + err = qlcnic_sriov_pf_reinit(adapter); + if (err) + return err; + + qlcnic_83xx_enable_mbx_interrupt(adapter); + + if (qlcnic_83xx_configure_opmode(adapter)) { + qlcnic_83xx_idc_enter_failed_state(adapter, 1); + return -EIO; + } + + if (adapter->nic_ops->init_driver(adapter)) { + qlcnic_83xx_idc_enter_failed_state(adapter, 1); + return -EIO; + } + + if (adapter->portnum == 0) + qlcnic_set_drv_version(adapter); + + qlcnic_dcb_get_info(adapter->dcb); + qlcnic_83xx_idc_attach_driver(adapter); + + return 0; +} + +static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + + qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1); + qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); + set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); + + ahw->idc.quiesce_req = 0; + ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; + ahw->idc.err_code = 0; + ahw->idc.collect_dump = 0; + ahw->reset_context = 0; + adapter->tx_timeo_cnt = 0; + ahw->idc.delay_reset = 0; + + clear_bit(__QLCNIC_RESETTING, &adapter->state); +} + +/** + * qlcnic_83xx_idc_ready_state_entry + * + * @adapter: adapter structure + * + * Perform ready state initialization, this routine will get invoked only + * once from READY state. + * + * Returns: Error code or Success(0) + * + **/ +int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY) { + qlcnic_83xx_idc_update_idc_params(adapter); + /* Re-attach the device if required */ + if ((ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) || + (ahw->idc.prev_state == QLC_83XX_IDC_DEV_INIT)) { + if (qlcnic_83xx_idc_reattach_driver(adapter)) + return -EIO; + } + } + + return 0; +} + +/** + * qlcnic_83xx_idc_vnic_pf_entry + * + * @adapter: adapter structure + * + * Ensure vNIC mode privileged function starts only after vNIC mode is + * enabled by management function. + * If vNIC mode is ready, start initialization. + * + * Returns: -EIO or 0 + * + **/ +int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *adapter) +{ + u32 state; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + /* Privileged function waits till mgmt function enables VNIC mode */ + state = QLCRDX(adapter->ahw, QLC_83XX_VNIC_STATE); + if (state != QLCNIC_DEV_NPAR_OPER) { + if (!ahw->idc.vnic_wait_limit--) { + qlcnic_83xx_idc_enter_failed_state(adapter, 1); + return -EIO; + } + dev_info(&adapter->pdev->dev, "vNIC mode disabled\n"); + return -EIO; + + } else { + /* Perform one time initialization from ready state */ + if (ahw->idc.vnic_state != QLCNIC_DEV_NPAR_OPER) { + qlcnic_83xx_idc_update_idc_params(adapter); + + /* If the previous state is UNKNOWN, device will be + already attached properly by Init routine*/ + if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_UNKNOWN) { + if (qlcnic_83xx_idc_reattach_driver(adapter)) + return -EIO; + } + adapter->ahw->idc.vnic_state = QLCNIC_DEV_NPAR_OPER; + dev_info(&adapter->pdev->dev, "vNIC mode enabled\n"); + } + } + + return 0; +} + +static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter) +{ + adapter->ahw->idc.err_code = -EIO; + dev_err(&adapter->pdev->dev, + "%s: Device in unknown state\n", __func__); + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return 0; +} + +/** + * qlcnic_83xx_idc_cold_state + * + * @adapter: adapter structure + * + * If HW is up and running device will enter READY state. + * If firmware image from host needs to be loaded, device is + * forced to start with the file firmware image. + * + * Returns: Error code or Success(0) + * + **/ +static int qlcnic_83xx_idc_cold_state_handler(struct qlcnic_adapter *adapter) +{ + qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 0); + qlcnic_83xx_idc_update_audit_reg(adapter, 1, 0); + + if (qlcnic_load_fw_file) { + qlcnic_83xx_idc_restart_hw(adapter, 0); + } else { + if (qlcnic_83xx_check_hw_status(adapter)) { + qlcnic_83xx_idc_enter_failed_state(adapter, 0); + return -EIO; + } else { + qlcnic_83xx_idc_enter_ready_state(adapter, 0); + } + } + return 0; +} + +/** + * qlcnic_83xx_idc_init_state + * + * @adapter: adapter structure + * + * Reset owner will restart the device from this state. + * Device will enter failed state if it remains + * in this state for more than DEV_INIT time limit. + * + * Returns: Error code or Success(0) + * + **/ +static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter) +{ + int timeout, ret = 0; + u32 owner; + + timeout = QLC_83XX_IDC_INIT_TIMEOUT_SECS; + if (adapter->ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) { + owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); + if (adapter->ahw->pci_func == owner) + ret = qlcnic_83xx_idc_restart_hw(adapter, 1); + } else { + ret = qlcnic_83xx_idc_check_timeout(adapter, timeout); + } + + return ret; +} + +/** + * qlcnic_83xx_idc_ready_state + * + * @adapter: adapter structure + * + * Perform IDC protocol specicifed actions after monitoring device state and + * events. + * + * Returns: Error code or Success(0) + * + **/ +static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_mailbox *mbx = ahw->mailbox; + int ret = 0; + u32 val; + + /* Perform NIC configuration based ready state entry actions */ + if (ahw->idc.state_entry(adapter)) + return -EIO; + + if (qlcnic_check_temp(adapter)) { + if (ahw->temp == QLCNIC_TEMP_PANIC) { + qlcnic_83xx_idc_check_fan_failure(adapter); + dev_err(&adapter->pdev->dev, + "Error: device temperature %d above limits\n", + adapter->ahw->temp); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); + set_bit(__QLCNIC_RESETTING, &adapter->state); + qlcnic_83xx_idc_detach_driver(adapter); + qlcnic_83xx_idc_enter_failed_state(adapter, 1); + return -EIO; + } + } + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + ret = qlcnic_83xx_check_heartbeat(adapter); + if (ret) { + adapter->flags |= QLCNIC_FW_HANG; + if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { + clear_bit(QLC_83XX_MBX_READY, &mbx->status); + set_bit(__QLCNIC_RESETTING, &adapter->state); + qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); + } else { + netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", + __func__); + qlcnic_83xx_idc_enter_failed_state(adapter, 1); + } + return -EIO; + } + + if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) { + clear_bit(QLC_83XX_MBX_READY, &mbx->status); + + /* Move to need reset state and prepare for reset */ + qlcnic_83xx_idc_enter_need_reset_state(adapter, 1); + return ret; + } + + /* Check for soft reset request */ + if (ahw->reset_context && + !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { + adapter->ahw->reset_context = 0; + qlcnic_83xx_idc_tx_soft_reset(adapter); + return ret; + } + + /* Move to need quiesce state if requested */ + if (adapter->ahw->idc.quiesce_req) { + qlcnic_83xx_idc_enter_need_quiesce(adapter, 1); + qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); + return ret; + } + + return ret; +} + +/** + * qlcnic_83xx_idc_need_reset_state + * + * @adapter: adapter structure + * + * Device will remain in this state until: + * Reset request ACK's are received from all the functions + * Wait time exceeds max time limit + * + * Returns: Error code or Success(0) + * + **/ +static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter) +{ + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; + int ret = 0; + + if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) { + qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); + set_bit(__QLCNIC_RESETTING, &adapter->state); + clear_bit(QLC_83XX_MBX_READY, &mbx->status); + if (adapter->ahw->nic_mode == QLCNIC_VNIC_MODE) + qlcnic_83xx_disable_vnic_mode(adapter, 1); + + if (qlcnic_check_diag_status(adapter)) { + dev_info(&adapter->pdev->dev, + "%s: Wait for diag completion\n", __func__); + adapter->ahw->idc.delay_reset = 1; + return 0; + } else { + qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1); + qlcnic_83xx_idc_detach_driver(adapter); + } + } + + if (qlcnic_check_diag_status(adapter)) { + dev_info(&adapter->pdev->dev, + "%s: Wait for diag completion\n", __func__); + return -1; + } else { + if (adapter->ahw->idc.delay_reset) { + qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1); + qlcnic_83xx_idc_detach_driver(adapter); + adapter->ahw->idc.delay_reset = 0; + } + + /* Check for ACK from other functions */ + ret = qlcnic_83xx_idc_check_reset_ack_reg(adapter); + if (ret) { + dev_info(&adapter->pdev->dev, + "%s: Waiting for reset ACK\n", __func__); + return -1; + } + } + + /* Transit to INIT state and restart the HW */ + qlcnic_83xx_idc_enter_init_state(adapter, 1); + + return ret; +} + +static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter) +{ + dev_err(&adapter->pdev->dev, "%s: TBD\n", __func__); + return 0; +} + +static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 val, owner; + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { + owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); + if (ahw->pci_func == owner) { + qlcnic_83xx_stop_hw(adapter); + qlcnic_dump_fw(adapter); + } + } + + netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n", + __func__); + clear_bit(__QLCNIC_RESETTING, &adapter->state); + ahw->idc.err_code = -EIO; + + return; +} + +static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter) +{ + dev_info(&adapter->pdev->dev, "%s: TBD\n", __func__); + return 0; +} + +static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter, + u32 state) +{ + u32 cur, prev, next; + + cur = adapter->ahw->idc.curr_state; + prev = adapter->ahw->idc.prev_state; + next = state; + + if ((next < QLC_83XX_IDC_DEV_COLD) || + (next > QLC_83XX_IDC_DEV_QUISCENT)) { + dev_err(&adapter->pdev->dev, + "%s: curr %d, prev %d, next state %d is invalid\n", + __func__, cur, prev, state); + return 1; + } + + if ((cur == QLC_83XX_IDC_DEV_UNKNOWN) && + (prev == QLC_83XX_IDC_DEV_UNKNOWN)) { + if ((next != QLC_83XX_IDC_DEV_COLD) && + (next != QLC_83XX_IDC_DEV_READY)) { + dev_err(&adapter->pdev->dev, + "%s: failed, cur %d prev %d next %d\n", + __func__, cur, prev, next); + return 1; + } + } + + if (next == QLC_83XX_IDC_DEV_INIT) { + if ((prev != QLC_83XX_IDC_DEV_INIT) && + (prev != QLC_83XX_IDC_DEV_COLD) && + (prev != QLC_83XX_IDC_DEV_NEED_RESET)) { + dev_err(&adapter->pdev->dev, + "%s: failed, cur %d prev %d next %d\n", + __func__, cur, prev, next); + return 1; + } + } + + return 0; +} + +#ifdef CONFIG_QLCNIC_VXLAN +#define QLC_83XX_ENCAP_TYPE_VXLAN BIT_1 +#define QLC_83XX_MATCH_ENCAP_ID BIT_2 +#define QLC_83XX_SET_VXLAN_UDP_DPORT BIT_3 +#define QLC_83XX_VXLAN_UDP_DPORT(PORT) ((PORT & 0xffff) << 16) + +#define QLCNIC_ENABLE_INGRESS_ENCAP_PARSING 1 +#define QLCNIC_DISABLE_INGRESS_ENCAP_PARSING 0 + +static int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter) +{ + u16 port = adapter->ahw->vxlan_port; + struct qlcnic_cmd_args cmd; + int ret = 0; + + memset(&cmd, 0, sizeof(cmd)); + + ret = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_INIT_NIC_FUNC); + if (ret) + return ret; + + cmd.req.arg[1] = QLC_83XX_MULTI_TENANCY_INFO; + cmd.req.arg[2] = QLC_83XX_ENCAP_TYPE_VXLAN | + QLC_83XX_SET_VXLAN_UDP_DPORT | + QLC_83XX_VXLAN_UDP_DPORT(port); + + ret = qlcnic_issue_cmd(adapter, &cmd); + if (ret) + netdev_err(adapter->netdev, + "Failed to set VXLAN port %d in adapter\n", + port); + + qlcnic_free_mbx_args(&cmd); + + return ret; +} + +static int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter, + bool state) +{ + u16 vxlan_port = adapter->ahw->vxlan_port; + struct qlcnic_cmd_args cmd; + int ret = 0; + + memset(&cmd, 0, sizeof(cmd)); + + ret = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_SET_INGRESS_ENCAP); + if (ret) + return ret; + + cmd.req.arg[1] = state ? QLCNIC_ENABLE_INGRESS_ENCAP_PARSING : + QLCNIC_DISABLE_INGRESS_ENCAP_PARSING; + + ret = qlcnic_issue_cmd(adapter, &cmd); + if (ret) + netdev_err(adapter->netdev, + "Failed to %s VXLAN parsing for port %d\n", + state ? "enable" : "disable", vxlan_port); + else + netdev_info(adapter->netdev, + "%s VXLAN parsing for port %d\n", + state ? "Enabled" : "Disabled", vxlan_port); + + qlcnic_free_mbx_args(&cmd); + + return ret; +} +#endif + +static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter) +{ + if (adapter->fhash.fnum) + qlcnic_prune_lb_filters(adapter); + +#ifdef CONFIG_QLCNIC_VXLAN + if (adapter->flags & QLCNIC_ADD_VXLAN_PORT) { + if (qlcnic_set_vxlan_port(adapter)) + return; + + if (qlcnic_set_vxlan_parsing(adapter, true)) + return; + + adapter->flags &= ~QLCNIC_ADD_VXLAN_PORT; + } else if (adapter->flags & QLCNIC_DEL_VXLAN_PORT) { + if (qlcnic_set_vxlan_parsing(adapter, false)) + return; + + adapter->ahw->vxlan_port = 0; + adapter->flags &= ~QLCNIC_DEL_VXLAN_PORT; + } +#endif +} + +/** + * qlcnic_83xx_idc_poll_dev_state + * + * @work: kernel work queue structure used to schedule the function + * + * Poll device state periodically and perform state specific + * actions defined by Inter Driver Communication (IDC) protocol. + * + * Returns: None + * + **/ +void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work) +{ + struct qlcnic_adapter *adapter; + u32 state; + + adapter = container_of(work, struct qlcnic_adapter, fw_work.work); + state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); + + if (qlcnic_83xx_idc_check_state_validity(adapter, state)) { + qlcnic_83xx_idc_log_state_history(adapter); + adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN; + } else { + adapter->ahw->idc.curr_state = state; + } + + switch (adapter->ahw->idc.curr_state) { + case QLC_83XX_IDC_DEV_READY: + qlcnic_83xx_idc_ready_state(adapter); + break; + case QLC_83XX_IDC_DEV_NEED_RESET: + qlcnic_83xx_idc_need_reset_state(adapter); + break; + case QLC_83XX_IDC_DEV_NEED_QUISCENT: + qlcnic_83xx_idc_need_quiesce_state(adapter); + break; + case QLC_83XX_IDC_DEV_FAILED: + qlcnic_83xx_idc_failed_state(adapter); + return; + case QLC_83XX_IDC_DEV_INIT: + qlcnic_83xx_idc_init_state(adapter); + break; + case QLC_83XX_IDC_DEV_QUISCENT: + qlcnic_83xx_idc_quiesce_state(adapter); + break; + default: + qlcnic_83xx_idc_unknown_state(adapter); + return; + } + adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state; + qlcnic_83xx_periodic_tasks(adapter); + + /* Re-schedule the function */ + if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status)) + qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, + adapter->ahw->idc.delay); +} + +static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter) +{ + u32 idc_params, val; + + if (qlcnic_83xx_flash_read32(adapter, QLC_83XX_IDC_FLASH_PARAM_ADDR, + (u8 *)&idc_params, 1)) { + dev_info(&adapter->pdev->dev, + "%s:failed to get IDC params from flash\n", __func__); + adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS; + adapter->reset_ack_timeo = QLC_83XX_IDC_RESET_TIMEOUT_SECS; + } else { + adapter->dev_init_timeo = idc_params & 0xFFFF; + adapter->reset_ack_timeo = ((idc_params >> 16) & 0xFFFF); + } + + adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN; + adapter->ahw->idc.prev_state = QLC_83XX_IDC_DEV_UNKNOWN; + adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; + adapter->ahw->idc.err_code = 0; + adapter->ahw->idc.collect_dump = 0; + adapter->ahw->idc.name = (char **)qlc_83xx_idc_states; + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); + + /* Check if reset recovery is disabled */ + if (!qlcnic_auto_fw_reset) { + /* Propagate do not reset request to other functions */ + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + val = val | QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY; + QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); + } +} + +static int +qlcnic_83xx_idc_first_to_load_function_handler(struct qlcnic_adapter *adapter) +{ + u32 state, val; + + if (qlcnic_83xx_lock_driver(adapter)) + return -EIO; + + /* Clear driver lock register */ + QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, 0); + if (qlcnic_83xx_idc_update_major_version(adapter, 0)) { + qlcnic_83xx_unlock_driver(adapter); + return -EIO; + } + + state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); + if (qlcnic_83xx_idc_check_state_validity(adapter, state)) { + qlcnic_83xx_unlock_driver(adapter); + return -EIO; + } + + if (state != QLC_83XX_IDC_DEV_COLD && qlcnic_load_fw_file) { + QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, + QLC_83XX_IDC_DEV_COLD); + state = QLC_83XX_IDC_DEV_COLD; + } + + adapter->ahw->idc.curr_state = state; + /* First to load function should cold boot the device */ + if (state == QLC_83XX_IDC_DEV_COLD) + qlcnic_83xx_idc_cold_state_handler(adapter); + + /* Check if reset recovery is enabled */ + if (qlcnic_auto_fw_reset) { + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + val = val & ~QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY; + QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); + } + + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +int qlcnic_83xx_idc_init(struct qlcnic_adapter *adapter) +{ + int ret = -EIO; + + qlcnic_83xx_setup_idc_parameters(adapter); + + if (qlcnic_83xx_get_reset_instruction_template(adapter)) + return ret; + + if (!qlcnic_83xx_idc_check_driver_presence_reg(adapter)) { + if (qlcnic_83xx_idc_first_to_load_function_handler(adapter)) + return -EIO; + } else { + if (qlcnic_83xx_idc_check_major_version(adapter)) + return -EIO; + } + + qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); + + return 0; +} + +void qlcnic_83xx_idc_exit(struct qlcnic_adapter *adapter) +{ + int id; + u32 val; + + while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + usleep_range(10000, 11000); + + id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID); + id = id & 0xFF; + + if (id == adapter->portnum) { + dev_err(&adapter->pdev->dev, + "%s: wait for lock recovery.. %d\n", __func__, id); + msleep(20); + id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID); + id = id & 0xFF; + } + + /* Clear driver presence bit */ + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); + val = val & ~(1 << adapter->portnum); + QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val); + clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); + clear_bit(__QLCNIC_RESETTING, &adapter->state); + + cancel_delayed_work_sync(&adapter->fw_work); +} + +void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key) +{ + u32 val; + + if (qlcnic_sriov_vf_check(adapter)) + return; + + if (qlcnic_83xx_lock_driver(adapter)) { + dev_err(&adapter->pdev->dev, + "%s:failed, please retry\n", __func__); + return; + } + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { + netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", + __func__); + qlcnic_83xx_idc_enter_failed_state(adapter, 0); + qlcnic_83xx_unlock_driver(adapter); + return; + } + + if (key == QLCNIC_FORCE_FW_RESET) { + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + val = val | QLC_83XX_IDC_GRACEFULL_RESET; + QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); + } else if (key == QLCNIC_FORCE_FW_DUMP_KEY) { + adapter->ahw->idc.collect_dump = 1; + } + + qlcnic_83xx_unlock_driver(adapter); + return; +} + +static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter) +{ + u8 *p_cache; + u32 src, size; + u64 dest; + int ret = -EIO; + + src = QLC_83XX_BOOTLOADER_FLASH_ADDR; + dest = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_ADDR); + size = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_SIZE); + + /* alignment check */ + if (size & 0xF) + size = (size + 16) & ~0xF; + + p_cache = vzalloc(size); + if (p_cache == NULL) + return -ENOMEM; + + ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache, + size / sizeof(u32)); + if (ret) { + vfree(p_cache); + return ret; + } + /* 16 byte write to MS memory */ + ret = qlcnic_ms_mem_write128(adapter, dest, (u32 *)p_cache, + size / 16); + if (ret) { + vfree(p_cache); + return ret; + } + vfree(p_cache); + + return ret; +} + +static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) +{ + struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; + const struct firmware *fw = fw_info->fw; + u32 dest, *p_cache, *temp; + int i, ret = -EIO; + __le32 *temp_le; + u8 data[16]; + size_t size; + u64 addr; + + temp = kzalloc(fw->size, GFP_KERNEL); + if (!temp) { + release_firmware(fw); + fw_info->fw = NULL; + return -ENOMEM; + } + + temp_le = (__le32 *)fw->data; + + /* FW image in file is in little endian, swap the data to nullify + * the effect of writel() operation on big endian platform. + */ + for (i = 0; i < fw->size / sizeof(u32); i++) + temp[i] = __le32_to_cpu(temp_le[i]); + + dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR); + size = (fw->size & ~0xF); + p_cache = temp; + addr = (u64)dest; + + ret = qlcnic_ms_mem_write128(adapter, addr, + p_cache, size / 16); + if (ret) { + dev_err(&adapter->pdev->dev, "MS memory write failed\n"); + goto exit; + } + + /* alignment check */ + if (fw->size & 0xF) { + addr = dest + size; + for (i = 0; i < (fw->size & 0xF); i++) + data[i] = temp[size + i]; + for (; i < 16; i++) + data[i] = 0; + ret = qlcnic_ms_mem_write128(adapter, addr, + (u32 *)data, 1); + if (ret) { + dev_err(&adapter->pdev->dev, + "MS memory write failed\n"); + goto exit; + } + } + +exit: + release_firmware(fw); + fw_info->fw = NULL; + kfree(temp); + + return ret; +} + +static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter) +{ + int i, j; + u32 val = 0, val1 = 0, reg = 0; + int err = 0; + + val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG, &err); + if (err == -EIO) + return; + dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val); + + for (j = 0; j < 2; j++) { + if (j == 0) { + dev_info(&adapter->pdev->dev, + "Port 0 RxB Pause Threshold Regs[TC7..TC0]:"); + reg = QLC_83XX_PORT0_THRESHOLD; + } else if (j == 1) { + dev_info(&adapter->pdev->dev, + "Port 1 RxB Pause Threshold Regs[TC7..TC0]:"); + reg = QLC_83XX_PORT1_THRESHOLD; + } + for (i = 0; i < 8; i++) { + val = QLCRD32(adapter, reg + (i * 0x4), &err); + if (err == -EIO) + return; + dev_info(&adapter->pdev->dev, "0x%x ", val); + } + dev_info(&adapter->pdev->dev, "\n"); + } + + for (j = 0; j < 2; j++) { + if (j == 0) { + dev_info(&adapter->pdev->dev, + "Port 0 RxB TC Max Cell Registers[4..1]:"); + reg = QLC_83XX_PORT0_TC_MC_REG; + } else if (j == 1) { + dev_info(&adapter->pdev->dev, + "Port 1 RxB TC Max Cell Registers[4..1]:"); + reg = QLC_83XX_PORT1_TC_MC_REG; + } + for (i = 0; i < 4; i++) { + val = QLCRD32(adapter, reg + (i * 0x4), &err); + if (err == -EIO) + return; + dev_info(&adapter->pdev->dev, "0x%x ", val); + } + dev_info(&adapter->pdev->dev, "\n"); + } + + for (j = 0; j < 2; j++) { + if (j == 0) { + dev_info(&adapter->pdev->dev, + "Port 0 RxB Rx TC Stats[TC7..TC0]:"); + reg = QLC_83XX_PORT0_TC_STATS; + } else if (j == 1) { + dev_info(&adapter->pdev->dev, + "Port 1 RxB Rx TC Stats[TC7..TC0]:"); + reg = QLC_83XX_PORT1_TC_STATS; + } + for (i = 7; i >= 0; i--) { + val = QLCRD32(adapter, reg, &err); + if (err == -EIO) + return; + val &= ~(0x7 << 29); /* Reset bits 29 to 31 */ + QLCWR32(adapter, reg, (val | (i << 29))); + val = QLCRD32(adapter, reg, &err); + if (err == -EIO) + return; + dev_info(&adapter->pdev->dev, "0x%x ", val); + } + dev_info(&adapter->pdev->dev, "\n"); + } + + val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, &err); + if (err == -EIO) + return; + val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, &err); + if (err == -EIO) + return; + dev_info(&adapter->pdev->dev, + "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n", + val, val1); +} + + +static void qlcnic_83xx_disable_pause_frames(struct qlcnic_adapter *adapter) +{ + u32 reg = 0, i, j; + + if (qlcnic_83xx_lock_driver(adapter)) { + dev_err(&adapter->pdev->dev, + "%s:failed to acquire driver lock\n", __func__); + return; + } + + qlcnic_83xx_dump_pause_control_regs(adapter); + QLCWR32(adapter, QLC_83XX_SRE_SHIM_REG, 0x0); + + for (j = 0; j < 2; j++) { + if (j == 0) + reg = QLC_83XX_PORT0_THRESHOLD; + else if (j == 1) + reg = QLC_83XX_PORT1_THRESHOLD; + + for (i = 0; i < 8; i++) + QLCWR32(adapter, reg + (i * 0x4), 0x0); + } + + for (j = 0; j < 2; j++) { + if (j == 0) + reg = QLC_83XX_PORT0_TC_MC_REG; + else if (j == 1) + reg = QLC_83XX_PORT1_TC_MC_REG; + + for (i = 0; i < 4; i++) + QLCWR32(adapter, reg + (i * 0x4), 0x03FF03FF); + } + + QLCWR32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, 0); + QLCWR32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, 0); + dev_info(&adapter->pdev->dev, + "Disabled pause frames successfully on all ports\n"); + qlcnic_83xx_unlock_driver(adapter); +} + +static void qlcnic_83xx_take_eport_out_of_reset(struct qlcnic_adapter *adapter) +{ + QLCWR32(adapter, QLC_83XX_RESET_REG, 0); + QLCWR32(adapter, QLC_83XX_RESET_PORT0, 0); + QLCWR32(adapter, QLC_83XX_RESET_PORT1, 0); + QLCWR32(adapter, QLC_83XX_RESET_PORT2, 0); + QLCWR32(adapter, QLC_83XX_RESET_PORT3, 0); + QLCWR32(adapter, QLC_83XX_RESET_SRESHIM, 0); + QLCWR32(adapter, QLC_83XX_RESET_EPGSHIM, 0); + QLCWR32(adapter, QLC_83XX_RESET_ETHERPCS, 0); + QLCWR32(adapter, QLC_83XX_RESET_CONTROL, 1); +} + +static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev) +{ + u32 heartbeat, peg_status; + int retries, ret = -EIO, err = 0; + + retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT; + p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev, + QLCNIC_PEG_ALIVE_COUNTER); + + do { + msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS); + heartbeat = QLC_SHARED_REG_RD32(p_dev, + QLCNIC_PEG_ALIVE_COUNTER); + if (heartbeat != p_dev->heartbeat) { + ret = QLCNIC_RCODE_SUCCESS; + break; + } + } while (--retries); + + if (ret) { + dev_err(&p_dev->pdev->dev, "firmware hang detected\n"); + qlcnic_83xx_take_eport_out_of_reset(p_dev); + qlcnic_83xx_disable_pause_frames(p_dev); + peg_status = QLC_SHARED_REG_RD32(p_dev, + QLCNIC_PEG_HALT_STATUS1); + dev_info(&p_dev->pdev->dev, "Dumping HW/FW registers\n" + "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" + "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" + "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" + "PEG_NET_4_PC: 0x%x\n", peg_status, + QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2), + QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0, &err), + QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1, &err), + QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2, &err), + QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3, &err), + QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4, &err)); + + if (QLCNIC_FWERROR_CODE(peg_status) == 0x67) + dev_err(&p_dev->pdev->dev, + "Device is being reset err code 0x00006700.\n"); + } + + return ret; +} + +static int qlcnic_83xx_check_cmd_peg_status(struct qlcnic_adapter *p_dev) +{ + int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT; + u32 val; + + do { + val = QLC_SHARED_REG_RD32(p_dev, QLCNIC_CMDPEG_STATE); + if (val == QLC_83XX_CMDPEG_COMPLETE) + return 0; + msleep(QLCNIC_CMDPEG_CHECK_DELAY); + } while (--retries); + + dev_err(&p_dev->pdev->dev, "%s: failed, state = 0x%x\n", __func__, val); + return -EIO; +} + +static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev) +{ + int err; + + err = qlcnic_83xx_check_cmd_peg_status(p_dev); + if (err) + return err; + + err = qlcnic_83xx_check_heartbeat(p_dev); + if (err) + return err; + + return err; +} + +static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr, + int duration, u32 mask, u32 status) +{ + int timeout_error, err = 0; + u32 value; + u8 retries; + + value = QLCRD32(p_dev, addr, &err); + if (err == -EIO) + return err; + retries = duration / 10; + + do { + if ((value & mask) != status) { + timeout_error = 1; + msleep(duration / 10); + value = QLCRD32(p_dev, addr, &err); + if (err == -EIO) + return err; + } else { + timeout_error = 0; + break; + } + } while (retries--); + + if (timeout_error) { + p_dev->ahw->reset.seq_error++; + dev_err(&p_dev->pdev->dev, + "%s: Timeout Err, entry_num = %d\n", + __func__, p_dev->ahw->reset.seq_index); + dev_err(&p_dev->pdev->dev, + "0x%08x 0x%08x 0x%08x\n", + value, mask, status); + } + + return timeout_error; +} + +static int qlcnic_83xx_reset_template_checksum(struct qlcnic_adapter *p_dev) +{ + u32 sum = 0; + u16 *buff = (u16 *)p_dev->ahw->reset.buff; + int count = p_dev->ahw->reset.hdr->size / sizeof(u16); + + while (count-- > 0) + sum += *buff++; + + while (sum >> 16) + sum = (sum & 0xFFFF) + (sum >> 16); + + if (~sum) { + return 0; + } else { + dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); + return -1; + } +} + +static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev) +{ + struct qlcnic_hardware_context *ahw = p_dev->ahw; + u32 addr, count, prev_ver, curr_ver; + u8 *p_buff; + + if (ahw->reset.buff != NULL) { + prev_ver = p_dev->fw_version; + curr_ver = qlcnic_83xx_get_fw_version(p_dev); + if (curr_ver > prev_ver) + kfree(ahw->reset.buff); + else + return 0; + } + + ahw->reset.seq_error = 0; + ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL); + if (p_dev->ahw->reset.buff == NULL) + return -ENOMEM; + + p_buff = p_dev->ahw->reset.buff; + addr = QLC_83XX_RESET_TEMPLATE_ADDR; + count = sizeof(struct qlc_83xx_reset_hdr) / sizeof(u32); + + /* Copy template header from flash */ + if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) { + dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__); + return -EIO; + } + ahw->reset.hdr = (struct qlc_83xx_reset_hdr *)ahw->reset.buff; + addr = QLC_83XX_RESET_TEMPLATE_ADDR + ahw->reset.hdr->hdr_size; + p_buff = ahw->reset.buff + ahw->reset.hdr->hdr_size; + count = (ahw->reset.hdr->size - ahw->reset.hdr->hdr_size) / sizeof(u32); + + /* Copy rest of the template */ + if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) { + dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__); + return -EIO; + } + + if (qlcnic_83xx_reset_template_checksum(p_dev)) + return -EIO; + /* Get Stop, Start and Init command offsets */ + ahw->reset.init_offset = ahw->reset.buff + ahw->reset.hdr->init_offset; + ahw->reset.start_offset = ahw->reset.buff + + ahw->reset.hdr->start_offset; + ahw->reset.stop_offset = ahw->reset.buff + ahw->reset.hdr->hdr_size; + return 0; +} + +/* Read Write HW register command */ +static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev, + u32 raddr, u32 waddr) +{ + int err = 0; + u32 value; + + value = QLCRD32(p_dev, raddr, &err); + if (err == -EIO) + return; + qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value); +} + +/* Read Modify Write HW register command */ +static void qlcnic_83xx_rmw_crb_reg(struct qlcnic_adapter *p_dev, + u32 raddr, u32 waddr, + struct qlc_83xx_rmw *p_rmw_hdr) +{ + int err = 0; + u32 value; + + if (p_rmw_hdr->index_a) { + value = p_dev->ahw->reset.array[p_rmw_hdr->index_a]; + } else { + value = QLCRD32(p_dev, raddr, &err); + if (err == -EIO) + return; + } + + value &= p_rmw_hdr->mask; + value <<= p_rmw_hdr->shl; + value >>= p_rmw_hdr->shr; + value |= p_rmw_hdr->or_value; + value ^= p_rmw_hdr->xor_value; + qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value); +} + +/* Write HW register command */ +static void qlcnic_83xx_write_list(struct qlcnic_adapter *p_dev, + struct qlc_83xx_entry_hdr *p_hdr) +{ + int i; + struct qlc_83xx_entry *entry; + + entry = (struct qlc_83xx_entry *)((char *)p_hdr + + sizeof(struct qlc_83xx_entry_hdr)); + + for (i = 0; i < p_hdr->count; i++, entry++) { + qlcnic_83xx_wrt_reg_indirect(p_dev, entry->arg1, + entry->arg2); + if (p_hdr->delay) + udelay((u32)(p_hdr->delay)); + } +} + +/* Read and Write instruction */ +static void qlcnic_83xx_read_write_list(struct qlcnic_adapter *p_dev, + struct qlc_83xx_entry_hdr *p_hdr) +{ + int i; + struct qlc_83xx_entry *entry; + + entry = (struct qlc_83xx_entry *)((char *)p_hdr + + sizeof(struct qlc_83xx_entry_hdr)); + + for (i = 0; i < p_hdr->count; i++, entry++) { + qlcnic_83xx_read_write_crb_reg(p_dev, entry->arg1, + entry->arg2); + if (p_hdr->delay) + udelay((u32)(p_hdr->delay)); + } +} + +/* Poll HW register command */ +static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev, + struct qlc_83xx_entry_hdr *p_hdr) +{ + long delay; + struct qlc_83xx_entry *entry; + struct qlc_83xx_poll *poll; + int i, err = 0; + unsigned long arg1, arg2; + + poll = (struct qlc_83xx_poll *)((char *)p_hdr + + sizeof(struct qlc_83xx_entry_hdr)); + + entry = (struct qlc_83xx_entry *)((char *)poll + + sizeof(struct qlc_83xx_poll)); + delay = (long)p_hdr->delay; + + if (!delay) { + for (i = 0; i < p_hdr->count; i++, entry++) + qlcnic_83xx_poll_reg(p_dev, entry->arg1, + delay, poll->mask, + poll->status); + } else { + for (i = 0; i < p_hdr->count; i++, entry++) { + arg1 = entry->arg1; + arg2 = entry->arg2; + if (delay) { + if (qlcnic_83xx_poll_reg(p_dev, + arg1, delay, + poll->mask, + poll->status)){ + QLCRD32(p_dev, arg1, &err); + if (err == -EIO) + return; + QLCRD32(p_dev, arg2, &err); + if (err == -EIO) + return; + } + } + } + } +} + +/* Poll and write HW register command */ +static void qlcnic_83xx_poll_write_list(struct qlcnic_adapter *p_dev, + struct qlc_83xx_entry_hdr *p_hdr) +{ + int i; + long delay; + struct qlc_83xx_quad_entry *entry; + struct qlc_83xx_poll *poll; + + poll = (struct qlc_83xx_poll *)((char *)p_hdr + + sizeof(struct qlc_83xx_entry_hdr)); + entry = (struct qlc_83xx_quad_entry *)((char *)poll + + sizeof(struct qlc_83xx_poll)); + delay = (long)p_hdr->delay; + + for (i = 0; i < p_hdr->count; i++, entry++) { + qlcnic_83xx_wrt_reg_indirect(p_dev, entry->dr_addr, + entry->dr_value); + qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr, + entry->ar_value); + if (delay) + qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay, + poll->mask, poll->status); + } +} + +/* Read Modify Write register command */ +static void qlcnic_83xx_read_modify_write(struct qlcnic_adapter *p_dev, + struct qlc_83xx_entry_hdr *p_hdr) +{ + int i; + struct qlc_83xx_entry *entry; + struct qlc_83xx_rmw *rmw_hdr; + + rmw_hdr = (struct qlc_83xx_rmw *)((char *)p_hdr + + sizeof(struct qlc_83xx_entry_hdr)); + + entry = (struct qlc_83xx_entry *)((char *)rmw_hdr + + sizeof(struct qlc_83xx_rmw)); + + for (i = 0; i < p_hdr->count; i++, entry++) { + qlcnic_83xx_rmw_crb_reg(p_dev, entry->arg1, + entry->arg2, rmw_hdr); + if (p_hdr->delay) + udelay((u32)(p_hdr->delay)); + } +} + +static void qlcnic_83xx_pause(struct qlc_83xx_entry_hdr *p_hdr) +{ + if (p_hdr->delay) + mdelay((u32)((long)p_hdr->delay)); +} + +/* Read and poll register command */ +static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev, + struct qlc_83xx_entry_hdr *p_hdr) +{ + long delay; + int index, i, j, err; + struct qlc_83xx_quad_entry *entry; + struct qlc_83xx_poll *poll; + unsigned long addr; + + poll = (struct qlc_83xx_poll *)((char *)p_hdr + + sizeof(struct qlc_83xx_entry_hdr)); + + entry = (struct qlc_83xx_quad_entry *)((char *)poll + + sizeof(struct qlc_83xx_poll)); + delay = (long)p_hdr->delay; + + for (i = 0; i < p_hdr->count; i++, entry++) { + qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr, + entry->ar_value); + if (delay) { + if (!qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay, + poll->mask, poll->status)){ + index = p_dev->ahw->reset.array_index; + addr = entry->dr_addr; + j = QLCRD32(p_dev, addr, &err); + if (err == -EIO) + return; + + p_dev->ahw->reset.array[index++] = j; + + if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES) + p_dev->ahw->reset.array_index = 1; + } + } + } +} + +static inline void qlcnic_83xx_seq_end(struct qlcnic_adapter *p_dev) +{ + p_dev->ahw->reset.seq_end = 1; +} + +static void qlcnic_83xx_template_end(struct qlcnic_adapter *p_dev) +{ + p_dev->ahw->reset.template_end = 1; + if (p_dev->ahw->reset.seq_error == 0) + dev_err(&p_dev->pdev->dev, + "HW restart process completed successfully.\n"); + else + dev_err(&p_dev->pdev->dev, + "HW restart completed with timeout errors.\n"); +} + +/** +* qlcnic_83xx_exec_template_cmd +* +* @p_dev: adapter structure +* @p_buff: Poiter to instruction template +* +* Template provides instructions to stop, restart and initalize firmware. +* These instructions are abstracted as a series of read, write and +* poll operations on hardware registers. Register information and operation +* specifics are not exposed to the driver. Driver reads the template from +* flash and executes the instructions located at pre-defined offsets. +* +* Returns: None +* */ +static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev, + char *p_buff) +{ + int index, entries; + struct qlc_83xx_entry_hdr *p_hdr; + char *entry = p_buff; + + p_dev->ahw->reset.seq_end = 0; + p_dev->ahw->reset.template_end = 0; + entries = p_dev->ahw->reset.hdr->entries; + index = p_dev->ahw->reset.seq_index; + + for (; (!p_dev->ahw->reset.seq_end) && (index < entries); index++) { + p_hdr = (struct qlc_83xx_entry_hdr *)entry; + + switch (p_hdr->cmd) { + case QLC_83XX_OPCODE_NOP: + break; + case QLC_83XX_OPCODE_WRITE_LIST: + qlcnic_83xx_write_list(p_dev, p_hdr); + break; + case QLC_83XX_OPCODE_READ_WRITE_LIST: + qlcnic_83xx_read_write_list(p_dev, p_hdr); + break; + case QLC_83XX_OPCODE_POLL_LIST: + qlcnic_83xx_poll_list(p_dev, p_hdr); + break; + case QLC_83XX_OPCODE_POLL_WRITE_LIST: + qlcnic_83xx_poll_write_list(p_dev, p_hdr); + break; + case QLC_83XX_OPCODE_READ_MODIFY_WRITE: + qlcnic_83xx_read_modify_write(p_dev, p_hdr); + break; + case QLC_83XX_OPCODE_SEQ_PAUSE: + qlcnic_83xx_pause(p_hdr); + break; + case QLC_83XX_OPCODE_SEQ_END: + qlcnic_83xx_seq_end(p_dev); + break; + case QLC_83XX_OPCODE_TMPL_END: + qlcnic_83xx_template_end(p_dev); + break; + case QLC_83XX_OPCODE_POLL_READ_LIST: + qlcnic_83xx_poll_read_list(p_dev, p_hdr); + break; + default: + dev_err(&p_dev->pdev->dev, + "%s: Unknown opcode 0x%04x in template %d\n", + __func__, p_hdr->cmd, index); + break; + } + entry += p_hdr->size; + } + p_dev->ahw->reset.seq_index = index; +} + +static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev) +{ + p_dev->ahw->reset.seq_index = 0; + + qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.stop_offset); + if (p_dev->ahw->reset.seq_end != 1) + dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); +} + +static void qlcnic_83xx_start_hw(struct qlcnic_adapter *p_dev) +{ + qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.start_offset); + if (p_dev->ahw->reset.template_end != 1) + dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); +} + +static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev) +{ + qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.init_offset); + if (p_dev->ahw->reset.seq_end != 1) + dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); +} + +/* POST FW related definations*/ +#define QLC_83XX_POST_SIGNATURE_REG 0x41602014 +#define QLC_83XX_POST_MODE_REG 0x41602018 +#define QLC_83XX_POST_FAST_MODE 0 +#define QLC_83XX_POST_MEDIUM_MODE 1 +#define QLC_83XX_POST_SLOW_MODE 2 + +/* POST Timeout values in milliseconds */ +#define QLC_83XX_POST_FAST_MODE_TIMEOUT 690 +#define QLC_83XX_POST_MED_MODE_TIMEOUT 2930 +#define QLC_83XX_POST_SLOW_MODE_TIMEOUT 7500 + +/* POST result values */ +#define QLC_83XX_POST_PASS 0xfffffff0 +#define QLC_83XX_POST_ASIC_STRESS_TEST_FAIL 0xffffffff +#define QLC_83XX_POST_DDR_TEST_FAIL 0xfffffffe +#define QLC_83XX_POST_ASIC_MEMORY_TEST_FAIL 0xfffffffc +#define QLC_83XX_POST_FLASH_TEST_FAIL 0xfffffff8 + +static int qlcnic_83xx_run_post(struct qlcnic_adapter *adapter) +{ + struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; + struct device *dev = &adapter->pdev->dev; + int timeout, count, ret = 0; + u32 signature; + + /* Set timeout values with extra 2 seconds of buffer */ + switch (adapter->ahw->post_mode) { + case QLC_83XX_POST_FAST_MODE: + timeout = QLC_83XX_POST_FAST_MODE_TIMEOUT + 2000; + break; + case QLC_83XX_POST_MEDIUM_MODE: + timeout = QLC_83XX_POST_MED_MODE_TIMEOUT + 2000; + break; + case QLC_83XX_POST_SLOW_MODE: + timeout = QLC_83XX_POST_SLOW_MODE_TIMEOUT + 2000; + break; + default: + return -EINVAL; + } + + strncpy(fw_info->fw_file_name, QLC_83XX_POST_FW_FILE_NAME, + QLC_FW_FILE_NAME_LEN); + + ret = reject_firmware(&fw_info->fw, fw_info->fw_file_name, dev); + if (ret) { + dev_err(dev, "POST firmware can not be loaded, skipping POST\n"); + return 0; + } + + ret = qlcnic_83xx_copy_fw_file(adapter); + if (ret) + return ret; + + /* clear QLC_83XX_POST_SIGNATURE_REG register */ + qlcnic_ind_wr(adapter, QLC_83XX_POST_SIGNATURE_REG, 0); + + /* Set POST mode */ + qlcnic_ind_wr(adapter, QLC_83XX_POST_MODE_REG, + adapter->ahw->post_mode); + + QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, + QLC_83XX_BOOT_FROM_FILE); + + qlcnic_83xx_start_hw(adapter); + + count = 0; + do { + msleep(100); + count += 100; + + signature = qlcnic_ind_rd(adapter, QLC_83XX_POST_SIGNATURE_REG); + if (signature == QLC_83XX_POST_PASS) + break; + } while (timeout > count); + + if (timeout <= count) { + dev_err(dev, "POST timed out, signature = 0x%08x\n", signature); + return -EIO; + } + + switch (signature) { + case QLC_83XX_POST_PASS: + dev_info(dev, "POST passed, Signature = 0x%08x\n", signature); + break; + case QLC_83XX_POST_ASIC_STRESS_TEST_FAIL: + dev_err(dev, "POST failed, Test case : ASIC STRESS TEST, Signature = 0x%08x\n", + signature); + ret = -EIO; + break; + case QLC_83XX_POST_DDR_TEST_FAIL: + dev_err(dev, "POST failed, Test case : DDT TEST, Signature = 0x%08x\n", + signature); + ret = -EIO; + break; + case QLC_83XX_POST_ASIC_MEMORY_TEST_FAIL: + dev_err(dev, "POST failed, Test case : ASIC MEMORY TEST, Signature = 0x%08x\n", + signature); + ret = -EIO; + break; + case QLC_83XX_POST_FLASH_TEST_FAIL: + dev_err(dev, "POST failed, Test case : FLASH TEST, Signature = 0x%08x\n", + signature); + ret = -EIO; + break; + default: + dev_err(dev, "POST failed, Test case : INVALID, Signature = 0x%08x\n", + signature); + ret = -EIO; + break; + } + + return ret; +} + +static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter) +{ + struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; + int err = -EIO; + + if (reject_firmware(&fw_info->fw, fw_info->fw_file_name, + &(adapter->pdev->dev))) { + dev_err(&adapter->pdev->dev, + "No file FW image, loading flash FW image.\n"); + QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, + QLC_83XX_BOOT_FROM_FLASH); + } else { + if (qlcnic_83xx_copy_fw_file(adapter)) + return err; + QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, + QLC_83XX_BOOT_FROM_FILE); + } + + return 0; +} + +static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter) +{ + u32 val; + int err = -EIO; + + qlcnic_83xx_stop_hw(adapter); + + /* Collect FW register dump if required */ + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + if (!(val & QLC_83XX_IDC_GRACEFULL_RESET)) + qlcnic_dump_fw(adapter); + + if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) { + netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n", + __func__); + qlcnic_83xx_idc_enter_failed_state(adapter, 1); + return err; + } + + qlcnic_83xx_init_hw(adapter); + + if (qlcnic_83xx_copy_bootloader(adapter)) + return err; + + /* Check if POST needs to be run */ + if (adapter->ahw->run_post) { + err = qlcnic_83xx_run_post(adapter); + if (err) + return err; + + /* No need to run POST in next reset sequence */ + adapter->ahw->run_post = false; + + /* Again reset the adapter to load regular firmware */ + qlcnic_83xx_stop_hw(adapter); + qlcnic_83xx_init_hw(adapter); + + err = qlcnic_83xx_copy_bootloader(adapter); + if (err) + return err; + } + + /* Boot either flash image or firmware image from host file system */ + if (qlcnic_load_fw_file == 1) { + if (qlcnic_83xx_load_fw_image_from_host(adapter)) + return err; + } else { + QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, + QLC_83XX_BOOT_FROM_FLASH); + } + + qlcnic_83xx_start_hw(adapter); + if (qlcnic_83xx_check_hw_status(adapter)) + return -EIO; + + return 0; +} + +static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter) +{ + int err; + struct qlcnic_info nic_info; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + memset(&nic_info, 0, sizeof(struct qlcnic_info)); + err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func); + if (err) + return -EIO; + + ahw->physical_port = (u8) nic_info.phys_port; + ahw->switch_mode = nic_info.switch_mode; + ahw->max_tx_ques = nic_info.max_tx_ques; + ahw->max_rx_ques = nic_info.max_rx_ques; + ahw->capabilities = nic_info.capabilities; + ahw->max_mac_filters = nic_info.max_mac_filters; + ahw->max_mtu = nic_info.max_mtu; + + /* eSwitch capability indicates vNIC mode. + * vNIC and SRIOV are mutually exclusive operational modes. + * If SR-IOV capability is detected, SR-IOV physical function + * will get initialized in default mode. + * SR-IOV virtual function initialization follows a + * different code path and opmode. + * SRIOV mode has precedence over vNIC mode. + */ + if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state)) + return QLC_83XX_DEFAULT_OPMODE; + + if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY) + return QLCNIC_VNIC_MODE; + + return QLC_83XX_DEFAULT_OPMODE; +} + +int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u16 max_sds_rings, max_tx_rings; + int ret; + + ret = qlcnic_83xx_get_nic_configuration(adapter); + if (ret == -EIO) + return -EIO; + + if (ret == QLCNIC_VNIC_MODE) { + ahw->nic_mode = QLCNIC_VNIC_MODE; + + if (qlcnic_83xx_config_vnic_opmode(adapter)) + return -EIO; + + max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS; + max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS; + } else if (ret == QLC_83XX_DEFAULT_OPMODE) { + ahw->nic_mode = QLCNIC_DEFAULT_MODE; + adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; + ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; + max_sds_rings = QLCNIC_MAX_SDS_RINGS; + max_tx_rings = QLCNIC_MAX_TX_RINGS; + } else { + dev_err(&adapter->pdev->dev, "%s: Invalid opmode %d\n", + __func__, ret); + return -EIO; + } + + adapter->max_sds_rings = min(ahw->max_rx_ques, max_sds_rings); + adapter->max_tx_rings = min(ahw->max_tx_ques, max_tx_rings); + + return 0; +} + +static void qlcnic_83xx_config_buff_descriptors(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (ahw->port_type == QLCNIC_XGBE) { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; + adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G; + adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; + adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; + + } else if (ahw->port_type == QLCNIC_GBE) { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; + adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; + adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; + adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G; + } + adapter->num_txd = MAX_CMD_DESCRIPTORS; + adapter->max_rds_rings = MAX_RDS_RINGS; +} + +static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter) +{ + int err = -EIO; + + qlcnic_83xx_get_minidump_template(adapter); + if (qlcnic_83xx_get_port_info(adapter)) + return err; + + qlcnic_83xx_config_buff_descriptors(adapter); + adapter->ahw->msix_supported = !!qlcnic_use_msi_x; + adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; + + dev_info(&adapter->pdev->dev, "HAL Version: %d\n", + adapter->ahw->fw_hal_version); + + return 0; +} + +#define IS_QLC_83XX_USED(a, b, c) (((1 << a->portnum) & b) || ((c >> 6) & 0x1)) +static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter) +{ + struct qlcnic_cmd_args cmd; + u32 presence_mask, audit_mask; + int status; + + presence_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE); + audit_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT); + + if (IS_QLC_83XX_USED(adapter, presence_mask, audit_mask)) { + status = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_STOP_NIC_FUNC); + if (status) + return; + + cmd.req.arg[1] = BIT_31; + status = qlcnic_issue_cmd(adapter, &cmd); + if (status) + dev_err(&adapter->pdev->dev, + "Failed to clean up the function resources\n"); + qlcnic_free_mbx_args(&cmd); + } +} + +static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct pci_dev *pdev = adapter->pdev; + struct qlc_83xx_fw_info *fw_info; + int err = 0; + + ahw->fw_info = kzalloc(sizeof(*fw_info), GFP_KERNEL); + if (!ahw->fw_info) { + err = -ENOMEM; + } else { + fw_info = ahw->fw_info; + switch (pdev->device) { + case PCI_DEVICE_ID_QLOGIC_QLE834X: + case PCI_DEVICE_ID_QLOGIC_QLE8830: + strncpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME, + QLC_FW_FILE_NAME_LEN); + break; + case PCI_DEVICE_ID_QLOGIC_QLE844X: + strncpy(fw_info->fw_file_name, QLC_84XX_FW_FILE_NAME, + QLC_FW_FILE_NAME_LEN); + break; + default: + dev_err(&pdev->dev, "%s: Invalid device id\n", + __func__); + err = -EINVAL; + break; + } + } + + return err; +} + +static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter) +{ + u8 rx_cnt = QLCNIC_DEF_SDS_RINGS; + u8 tx_cnt = QLCNIC_DEF_TX_RINGS; + + adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS; + adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; + + if (!adapter->ahw->msix_supported) { + rx_cnt = QLCNIC_SINGLE_RING; + tx_cnt = QLCNIC_SINGLE_RING; + } + + /* compute and set drv sds rings */ + qlcnic_set_tx_ring_count(adapter, tx_cnt); + qlcnic_set_sds_ring_count(adapter, rx_cnt); +} + +int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int err = 0; + + adapter->rx_mac_learn = false; + ahw->msix_supported = !!qlcnic_use_msi_x; + + /* Check if POST needs to be run */ + switch (qlcnic_load_fw_file) { + case 2: + ahw->post_mode = QLC_83XX_POST_FAST_MODE; + ahw->run_post = true; + break; + case 3: + ahw->post_mode = QLC_83XX_POST_MEDIUM_MODE; + ahw->run_post = true; + break; + case 4: + ahw->post_mode = QLC_83XX_POST_SLOW_MODE; + ahw->run_post = true; + break; + default: + ahw->run_post = false; + break; + } + + qlcnic_83xx_init_rings(adapter); + + err = qlcnic_83xx_init_mailbox_work(adapter); + if (err) + goto exit; + + if (qlcnic_sriov_vf_check(adapter)) { + err = qlcnic_sriov_vf_init(adapter, pci_using_dac); + if (err) + goto detach_mbx; + else + return err; + } + + if (qlcnic_83xx_read_flash_descriptor_table(adapter) || + qlcnic_83xx_read_flash_mfg_id(adapter)) { + dev_err(&adapter->pdev->dev, "Failed reading flash mfg id\n"); + err = -ENOTRECOVERABLE; + goto detach_mbx; + } + + err = qlcnic_83xx_check_hw_status(adapter); + if (err) + goto detach_mbx; + + err = qlcnic_83xx_get_fw_info(adapter); + if (err) + goto detach_mbx; + + err = qlcnic_83xx_idc_init(adapter); + if (err) + goto detach_mbx; + + err = qlcnic_setup_intr(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n"); + goto disable_intr; + } + + INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work); + + err = qlcnic_83xx_setup_mbx_intr(adapter); + if (err) + goto disable_mbx_intr; + + qlcnic_83xx_clear_function_resources(adapter); + qlcnic_dcb_enable(adapter->dcb); + qlcnic_83xx_initialize_nic(adapter, 1); + qlcnic_dcb_get_info(adapter->dcb); + + /* Configure default, SR-IOV or Virtual NIC mode of operation */ + err = qlcnic_83xx_configure_opmode(adapter); + if (err) + goto disable_mbx_intr; + + + /* Perform operating mode specific initialization */ + err = adapter->nic_ops->init_driver(adapter); + if (err) + goto disable_mbx_intr; + + /* Periodically monitor device status */ + qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work); + return 0; + +disable_mbx_intr: + qlcnic_83xx_free_mbx_intr(adapter); + +disable_intr: + qlcnic_teardown_intr(adapter); + +detach_mbx: + qlcnic_83xx_detach_mailbox_work(adapter); + qlcnic_83xx_free_mailbox(ahw->mailbox); + ahw->mailbox = NULL; +exit: + return err; +} + +void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlc_83xx_idc *idc = &ahw->idc; + + clear_bit(QLC_83XX_MBX_READY, &idc->status); + cancel_delayed_work_sync(&adapter->fw_work); + + if (ahw->nic_mode == QLCNIC_VNIC_MODE) + qlcnic_83xx_disable_vnic_mode(adapter, 1); + + qlcnic_83xx_idc_detach_driver(adapter); + qlcnic_83xx_initialize_nic(adapter, 0); + + cancel_delayed_work_sync(&adapter->idc_aen_work); +} + +int qlcnic_83xx_aer_reset(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlc_83xx_idc *idc = &ahw->idc; + int ret = 0; + u32 owner; + + /* Mark the previous IDC state as NEED_RESET so + * that state_entry() will perform the reattachment + * and bringup the device + */ + idc->prev_state = QLC_83XX_IDC_DEV_NEED_RESET; + owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); + if (ahw->pci_func == owner) { + ret = qlcnic_83xx_restart_hw(adapter); + if (ret < 0) + return ret; + qlcnic_83xx_idc_clear_registers(adapter, 0); + } + + ret = idc->state_entry(adapter); + return ret; +} + +void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlc_83xx_idc *idc = &ahw->idc; + u32 owner; + + idc->prev_state = QLC_83XX_IDC_DEV_READY; + owner = qlcnic_83xx_idc_find_reset_owner_id(adapter); + if (ahw->pci_func == owner) + qlcnic_83xx_idc_enter_ready_state(adapter, 0); + + qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, 0); +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c new file mode 100644 index 000000000..be7d7a62c --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c @@ -0,0 +1,284 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include "qlcnic.h" +#include "qlcnic_hw.h" + +static int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *adapter, int lock) +{ + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + QLCWRX(adapter->ahw, QLC_83XX_VNIC_STATE, QLCNIC_DEV_NPAR_OPER); + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *adapter, int lock) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (lock) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + } + + QLCWRX(adapter->ahw, QLC_83XX_VNIC_STATE, QLCNIC_DEV_NPAR_NON_OPER); + ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER; + + if (lock) + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *adapter) +{ + u8 id; + int ret = -EBUSY; + u32 data = QLCNIC_MGMT_FUNC; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (qlcnic_83xx_lock_driver(adapter)) + return ret; + + id = ahw->pci_func; + data = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE); + data = (data & ~QLC_83XX_SET_FUNC_OPMODE(0x3, id)) | + QLC_83XX_SET_FUNC_OPMODE(QLCNIC_MGMT_FUNC, id); + + QLCWRX(adapter->ahw, QLC_83XX_DRV_OP_MODE, data); + + qlcnic_83xx_unlock_driver(adapter); + + return 0; +} + +static void +qlcnic_83xx_config_vnic_buff_descriptors(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (ahw->port_type == QLCNIC_XGBE) { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF; + adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF; + adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; + adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; + + } else if (ahw->port_type == QLCNIC_GBE) { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; + adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; + adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; + adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G; + } + adapter->num_txd = MAX_CMD_DESCRIPTORS; + adapter->max_rds_rings = MAX_RDS_RINGS; +} + + +/** + * qlcnic_83xx_init_mgmt_vnic + * + * @adapter: adapter structure + * Management virtual NIC sets the operational mode of other vNIC's and + * configures embedded switch (ESWITCH). + * Returns: Success(0) or error code. + * + **/ +static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct device *dev = &adapter->pdev->dev; + struct qlcnic_npar_info *npar; + int i, err = -EIO; + + qlcnic_83xx_get_minidump_template(adapter); + + if (!(adapter->flags & QLCNIC_ADAPTER_INITIALIZED)) { + if (qlcnic_init_pci_info(adapter)) + return err; + + npar = adapter->npars; + + for (i = 0; i < ahw->total_nic_func; i++, npar++) { + dev_info(dev, "id:%d active:%d type:%d port:%d min_bw:%d max_bw:%d mac_addr:%pM\n", + npar->pci_func, npar->active, npar->type, + npar->phy_port, npar->min_bw, npar->max_bw, + npar->mac); + } + + dev_info(dev, "Max functions = %d, active functions = %d\n", + ahw->max_pci_func, ahw->total_nic_func); + + if (qlcnic_83xx_set_vnic_opmode(adapter)) + return err; + + if (qlcnic_set_default_offload_settings(adapter)) + return err; + } else { + if (qlcnic_reset_npar_config(adapter)) + return err; + } + + if (qlcnic_83xx_get_port_info(adapter)) + return err; + + qlcnic_83xx_config_vnic_buff_descriptors(adapter); + ahw->msix_supported = qlcnic_use_msi_x ? 1 : 0; + adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; + qlcnic_83xx_enable_vnic_mode(adapter, 1); + + dev_info(dev, "HAL Version: %d, Management function\n", + ahw->fw_hal_version); + + return 0; +} + +static int qlcnic_83xx_init_privileged_vnic(struct qlcnic_adapter *adapter) +{ + int err = -EIO; + + qlcnic_83xx_get_minidump_template(adapter); + if (qlcnic_83xx_get_port_info(adapter)) + return err; + + qlcnic_83xx_config_vnic_buff_descriptors(adapter); + adapter->ahw->msix_supported = !!qlcnic_use_msi_x; + adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; + + dev_info(&adapter->pdev->dev, + "HAL Version: %d, Privileged function\n", + adapter->ahw->fw_hal_version); + return 0; +} + +static int qlcnic_83xx_init_non_privileged_vnic(struct qlcnic_adapter *adapter) +{ + int err = -EIO; + + qlcnic_83xx_get_fw_version(adapter); + if (qlcnic_set_eswitch_port_config(adapter)) + return err; + + if (qlcnic_83xx_get_port_info(adapter)) + return err; + + qlcnic_83xx_config_vnic_buff_descriptors(adapter); + adapter->ahw->msix_supported = !!qlcnic_use_msi_x; + adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; + + dev_info(&adapter->pdev->dev, "HAL Version: %d, Virtual function\n", + adapter->ahw->fw_hal_version); + + return 0; +} + +/** + * qlcnic_83xx_vnic_opmode + * + * @adapter: adapter structure + * Identify virtual NIC operational modes. + * + * Returns: Success(0) or error code. + * + **/ +int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter) +{ + u32 op_mode, priv_level; + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_nic_template *nic_ops = adapter->nic_ops; + + qlcnic_get_func_no(adapter); + op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE); + + if (op_mode == QLC_83XX_DEFAULT_OPMODE) + priv_level = QLCNIC_MGMT_FUNC; + else + priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode, + ahw->pci_func); + switch (priv_level) { + case QLCNIC_NON_PRIV_FUNC: + ahw->op_mode = QLCNIC_NON_PRIV_FUNC; + ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; + nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic; + break; + case QLCNIC_PRIV_FUNC: + ahw->op_mode = QLCNIC_PRIV_FUNC; + ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry; + nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic; + break; + case QLCNIC_MGMT_FUNC: + ahw->op_mode = QLCNIC_MGMT_FUNC; + ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; + nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic; + break; + default: + dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n"); + return -EIO; + } + + if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY) { + adapter->flags |= QLCNIC_ESWITCH_ENABLED; + if (adapter->drv_mac_learn) + adapter->rx_mac_learn = true; + } else { + adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; + adapter->rx_mac_learn = false; + } + + ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER; + ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO; + + return 0; +} + +int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlc_83xx_idc *idc = &ahw->idc; + u32 state; + + state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); + while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit--) { + msleep(1000); + state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); + } + + if (!idc->vnic_wait_limit) { + dev_err(&adapter->pdev->dev, + "vNIC mode not operational, state check timed out.\n"); + return -EIO; + } + + return 0; +} + +int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *adapter, + int func, int *port_id) +{ + struct qlcnic_info nic_info; + int err = 0; + + memset(&nic_info, 0, sizeof(struct qlcnic_info)); + + err = qlcnic_get_nic_info(adapter, &nic_info, func); + if (err) + return err; + + if (nic_info.capabilities & QLC_83XX_ESWITCH_CAPABILITY) + *port_id = nic_info.phys_port; + else + err = -EIO; + + if (!err) + adapter->eswitch[*port_id].flags |= QLCNIC_SWITCH_ENABLE; + + return err; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c new file mode 100644 index 000000000..6e6f18fc5 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -0,0 +1,1428 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include "qlcnic.h" + +static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = { + {QLCNIC_CMD_CREATE_RX_CTX, 4, 1}, + {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1}, + {QLCNIC_CMD_CREATE_TX_CTX, 4, 1}, + {QLCNIC_CMD_DESTROY_TX_CTX, 3, 1}, + {QLCNIC_CMD_INTRPT_TEST, 4, 1}, + {QLCNIC_CMD_SET_MTU, 4, 1}, + {QLCNIC_CMD_READ_PHY, 4, 2}, + {QLCNIC_CMD_WRITE_PHY, 5, 1}, + {QLCNIC_CMD_READ_HW_REG, 4, 1}, + {QLCNIC_CMD_GET_FLOW_CTL, 4, 2}, + {QLCNIC_CMD_SET_FLOW_CTL, 4, 1}, + {QLCNIC_CMD_READ_MAX_MTU, 4, 2}, + {QLCNIC_CMD_READ_MAX_LRO, 4, 2}, + {QLCNIC_CMD_MAC_ADDRESS, 4, 3}, + {QLCNIC_CMD_GET_PCI_INFO, 4, 1}, + {QLCNIC_CMD_GET_NIC_INFO, 4, 1}, + {QLCNIC_CMD_SET_NIC_INFO, 4, 1}, + {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3}, + {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1}, + {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3}, + {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1}, + {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1}, + {QLCNIC_CMD_GET_MAC_STATS, 4, 1}, + {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3}, + {QLCNIC_CMD_GET_ESWITCH_STATS, 4, 1}, + {QLCNIC_CMD_CONFIG_PORT, 4, 1}, + {QLCNIC_CMD_TEMP_SIZE, 4, 4}, + {QLCNIC_CMD_GET_TEMP_HDR, 4, 1}, + {QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1}, + {QLCNIC_CMD_GET_LED_STATUS, 4, 2}, + {QLCNIC_CMD_MQ_TX_CONFIG_INTR, 2, 3}, + {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2}, + {QLCNIC_CMD_DCB_QUERY_PARAM, 4, 1}, +}; + +static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw) +{ + return (ahw->pci_func & 0xff) | ((ahw->fw_hal_version & 0xff) << 8) | + (0xcafe << 16); +} + +/* Allocate mailbox registers */ +int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, + struct qlcnic_adapter *adapter, u32 type) +{ + int i, size; + const struct qlcnic_mailbox_metadata *mbx_tbl; + + mbx_tbl = qlcnic_mbx_tbl; + size = ARRAY_SIZE(qlcnic_mbx_tbl); + for (i = 0; i < size; i++) { + if (type == mbx_tbl[i].cmd) { + mbx->req.num = mbx_tbl[i].in_args; + mbx->rsp.num = mbx_tbl[i].out_args; + mbx->req.arg = kcalloc(mbx->req.num, + sizeof(u32), GFP_ATOMIC); + if (!mbx->req.arg) + return -ENOMEM; + mbx->rsp.arg = kcalloc(mbx->rsp.num, + sizeof(u32), GFP_ATOMIC); + if (!mbx->rsp.arg) { + kfree(mbx->req.arg); + mbx->req.arg = NULL; + return -ENOMEM; + } + memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num); + memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); + mbx->req.arg[0] = type; + break; + } + } + return 0; +} + +/* Free up mailbox registers */ +void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd) +{ + kfree(cmd->req.arg); + cmd->req.arg = NULL; + kfree(cmd->rsp.arg); + cmd->rsp.arg = NULL; +} + +static u32 +qlcnic_poll_rsp(struct qlcnic_adapter *adapter) +{ + u32 rsp; + int timeout = 0, err = 0; + + do { + /* give atleast 1ms for firmware to respond */ + mdelay(1); + + if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT) + return QLCNIC_CDRP_RSP_TIMEOUT; + + rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET, &err); + } while (!QLCNIC_CDRP_IS_RSP(rsp)); + + return rsp; +} + +int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + int i, err = 0; + u32 rsp; + u32 signature; + struct pci_dev *pdev = adapter->pdev; + struct qlcnic_hardware_context *ahw = adapter->ahw; + const char *fmt; + + signature = qlcnic_get_cmd_signature(ahw); + + /* Acquire semaphore before accessing CRB */ + if (qlcnic_api_lock(adapter)) { + cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; + return cmd->rsp.arg[0]; + } + + QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature); + for (i = 1; i < cmd->req.num; i++) + QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]); + QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, + QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0])); + rsp = qlcnic_poll_rsp(adapter); + + if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) { + dev_err(&pdev->dev, "command timeout, response = 0x%x\n", rsp); + cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; + } else if (rsp == QLCNIC_CDRP_RSP_FAIL) { + cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err); + switch (cmd->rsp.arg[0]) { + case QLCNIC_RCODE_INVALID_ARGS: + fmt = "CDRP invalid args: [%d]\n"; + break; + case QLCNIC_RCODE_NOT_SUPPORTED: + case QLCNIC_RCODE_NOT_IMPL: + fmt = "CDRP command not supported: [%d]\n"; + break; + case QLCNIC_RCODE_NOT_PERMITTED: + fmt = "CDRP requested action not permitted: [%d]\n"; + break; + case QLCNIC_RCODE_INVALID: + fmt = "CDRP invalid or unknown cmd received: [%d]\n"; + break; + case QLCNIC_RCODE_TIMEOUT: + fmt = "CDRP command timeout: [%d]\n"; + break; + default: + fmt = "CDRP command failed: [%d]\n"; + break; + } + dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]); + qlcnic_dump_mbx(adapter, cmd); + } else if (rsp == QLCNIC_CDRP_RSP_OK) + cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS; + + for (i = 1; i < cmd->rsp.num; i++) + cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i), &err); + + /* Release semaphore */ + qlcnic_api_unlock(adapter); + return cmd->rsp.arg[0]; +} + +int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter, u32 fw_cmd) +{ + struct qlcnic_cmd_args cmd; + u32 arg1, arg2, arg3; + char drv_string[12]; + int err = 0; + + memset(drv_string, 0, sizeof(drv_string)); + snprintf(drv_string, sizeof(drv_string), "%d"".""%d"".""%d", + _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR, + _QLCNIC_LINUX_SUBVERSION); + + err = qlcnic_alloc_mbx_args(&cmd, adapter, fw_cmd); + if (err) + return err; + + memcpy(&arg1, drv_string, sizeof(u32)); + memcpy(&arg2, drv_string + 4, sizeof(u32)); + memcpy(&arg3, drv_string + 8, sizeof(u32)); + + cmd.req.arg[1] = arg1; + cmd.req.arg[2] = arg2; + cmd.req.arg[3] = arg3; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_info(&adapter->pdev->dev, + "Failed to set driver version in firmware\n"); + err = -EIO; + } + qlcnic_free_mbx_args(&cmd); + return err; +} + +int +qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu) +{ + int err = 0; + struct qlcnic_cmd_args cmd; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE) + return err; + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU); + if (err) + return err; + + cmd.req.arg[1] = recv_ctx->context_id; + cmd.req.arg[2] = mtu; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to set mtu\n"); + err = -EIO; + } + qlcnic_free_mbx_args(&cmd); + return err; +} + +int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_hardware_context *ahw = adapter->ahw; + dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; + struct net_device *netdev = adapter->netdev; + u32 temp_intr_crb_mode, temp_rds_crb_mode; + struct qlcnic_cardrsp_rds_ring *prsp_rds; + struct qlcnic_cardrsp_sds_ring *prsp_sds; + struct qlcnic_hostrq_rds_ring *prq_rds; + struct qlcnic_hostrq_sds_ring *prq_sds; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_cardrsp_rx_ctx *prsp; + struct qlcnic_hostrq_rx_ctx *prq; + u8 i, nrds_rings, nsds_rings; + struct qlcnic_cmd_args cmd; + size_t rq_size, rsp_size; + u32 cap, reg, val, reg2; + u64 phys_addr; + u16 temp_u16; + void *addr; + int err; + + nrds_rings = adapter->max_rds_rings; + nsds_rings = adapter->drv_sds_rings; + + rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings, + nsds_rings); + rsp_size = SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings, + nsds_rings); + + addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, + &hostrq_phys_addr, GFP_KERNEL); + if (addr == NULL) + return -ENOMEM; + prq = addr; + + addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, + &cardrsp_phys_addr, GFP_KERNEL); + if (addr == NULL) { + err = -ENOMEM; + goto out_free_rq; + } + prsp = addr; + + prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); + + cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN + | QLCNIC_CAP0_VALIDOFF); + cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); + + if (qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test) { + cap |= QLCNIC_CAP0_TX_MULTI; + } else { + temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler); + prq->valid_field_offset = cpu_to_le16(temp_u16); + prq->txrx_sds_binding = nsds_rings - 1; + temp_intr_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED; + prq->host_int_crb_mode = cpu_to_le32(temp_intr_crb_mode); + temp_rds_crb_mode = QLCNIC_HOST_RDS_CRB_MODE_UNIQUE; + prq->host_rds_crb_mode = cpu_to_le32(temp_rds_crb_mode); + } + + prq->capabilities[0] = cpu_to_le32(cap); + + prq->num_rds_rings = cpu_to_le16(nrds_rings); + prq->num_sds_rings = cpu_to_le16(nsds_rings); + prq->rds_ring_offset = 0; + + val = le32_to_cpu(prq->rds_ring_offset) + + (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings); + prq->sds_ring_offset = cpu_to_le32(val); + + prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data + + le32_to_cpu(prq->rds_ring_offset)); + + for (i = 0; i < nrds_rings; i++) { + rds_ring = &recv_ctx->rds_rings[i]; + rds_ring->producer = 0; + prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); + prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); + prq_rds[i].ring_kind = cpu_to_le32(i); + prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); + } + + prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data + + le32_to_cpu(prq->sds_ring_offset)); + + for (i = 0; i < nsds_rings; i++) { + sds_ring = &recv_ctx->sds_rings[i]; + sds_ring->consumer = 0; + memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring)); + prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); + prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); + if (qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test) + prq_sds[i].msi_index = cpu_to_le16(ahw->intr_tbl[i].id); + else + prq_sds[i].msi_index = cpu_to_le16(i); + } + + phys_addr = hostrq_phys_addr; + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX); + if (err) + goto out_free_rsp; + + cmd.req.arg[1] = MSD(phys_addr); + cmd.req.arg[2] = LSD(phys_addr); + cmd.req.arg[3] = rq_size; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to create rx ctx in firmware%d\n", err); + goto out_free_rsp; + } + + prsp_rds = ((struct qlcnic_cardrsp_rds_ring *) + &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]); + + for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { + rds_ring = &recv_ctx->rds_rings[i]; + reg = le32_to_cpu(prsp_rds[i].host_producer_crb); + rds_ring->crb_rcv_producer = ahw->pci_base0 + reg; + } + + prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) + &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); + + for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { + sds_ring = &recv_ctx->sds_rings[i]; + reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); + if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) + reg2 = ahw->intr_tbl[i].src; + else + reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); + + sds_ring->crb_intr_mask = ahw->pci_base0 + reg2; + sds_ring->crb_sts_consumer = ahw->pci_base0 + reg; + } + + recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); + recv_ctx->context_id = le16_to_cpu(prsp->context_id); + recv_ctx->virt_port = prsp->virt_port; + + netdev_info(netdev, "Rx Context[%d] Created, state 0x%x\n", + recv_ctx->context_id, recv_ctx->state); + qlcnic_free_mbx_args(&cmd); + +out_free_rsp: + dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp, + cardrsp_phys_addr); +out_free_rq: + dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr); + + return err; +} + +void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter) +{ + int err; + struct qlcnic_cmd_args cmd; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX); + if (err) + return; + + cmd.req.arg[1] = recv_ctx->context_id; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_err(&adapter->pdev->dev, + "Failed to destroy rx ctx in firmware\n"); + + recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED; + qlcnic_free_mbx_args(&cmd); +} + +int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring, + int ring) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct net_device *netdev = adapter->netdev; + struct qlcnic_hostrq_tx_ctx *prq; + struct qlcnic_hostrq_cds_ring *prq_cds; + struct qlcnic_cardrsp_tx_ctx *prsp; + struct qlcnic_cmd_args cmd; + u32 temp, intr_mask, temp_int_crb_mode; + dma_addr_t rq_phys_addr, rsp_phys_addr; + int temp_nsds_rings, index, err; + void *rq_addr, *rsp_addr; + size_t rq_size, rsp_size; + u64 phys_addr; + u16 msix_id; + + /* reset host resources */ + tx_ring->producer = 0; + tx_ring->sw_consumer = 0; + *(tx_ring->hw_consumer) = 0; + + rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); + rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size, + &rq_phys_addr, GFP_KERNEL); + if (!rq_addr) + return -ENOMEM; + + rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); + rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size, + &rsp_phys_addr, GFP_KERNEL); + if (!rsp_addr) { + err = -ENOMEM; + goto out_free_rq; + } + + prq = rq_addr; + prsp = rsp_addr; + + prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); + + temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN | + QLCNIC_CAP0_LSO); + if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) + temp |= QLCNIC_CAP0_TX_MULTI; + + prq->capabilities[0] = cpu_to_le32(temp); + + if (qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test) { + temp_nsds_rings = adapter->drv_sds_rings; + index = temp_nsds_rings + ring; + msix_id = ahw->intr_tbl[index].id; + prq->msi_index = cpu_to_le16(msix_id); + } else { + temp_int_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED; + prq->host_int_crb_mode = cpu_to_le32(temp_int_crb_mode); + prq->msi_index = 0; + } + + prq->interrupt_ctl = 0; + prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr); + + prq_cds = &prq->cds_ring; + + prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); + prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); + + phys_addr = rq_phys_addr; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); + if (err) + goto out_free_rsp; + + cmd.req.arg[1] = MSD(phys_addr); + cmd.req.arg[2] = LSD(phys_addr); + cmd.req.arg[3] = rq_size; + err = qlcnic_issue_cmd(adapter, &cmd); + + if (err == QLCNIC_RCODE_SUCCESS) { + tx_ring->state = le32_to_cpu(prsp->host_ctx_state); + temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); + tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp; + tx_ring->ctx_id = le16_to_cpu(prsp->context_id); + if (qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test && + (adapter->flags & QLCNIC_MSIX_ENABLED)) { + index = adapter->drv_sds_rings + ring; + intr_mask = ahw->intr_tbl[index].src; + tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask; + } + + netdev_info(netdev, "Tx Context[0x%x] Created, state 0x%x\n", + tx_ring->ctx_id, tx_ring->state); + } else { + netdev_err(netdev, "Failed to create tx ctx in firmware%d\n", + err); + err = -EIO; + } + qlcnic_free_mbx_args(&cmd); + +out_free_rsp: + dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr, + rsp_phys_addr); +out_free_rq: + dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr); + + return err; +} + +void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) +{ + struct qlcnic_cmd_args cmd; + int ret; + + ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX); + if (ret) + return; + + cmd.req.arg[1] = tx_ring->ctx_id; + if (qlcnic_issue_cmd(adapter, &cmd)) + dev_err(&adapter->pdev->dev, + "Failed to destroy tx ctx in firmware\n"); + qlcnic_free_mbx_args(&cmd); +} + +int +qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config) +{ + int err; + struct qlcnic_cmd_args cmd; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT); + if (err) + return err; + + cmd.req.arg[1] = config; + err = qlcnic_issue_cmd(adapter, &cmd); + qlcnic_free_mbx_args(&cmd); + return err; +} + +int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) +{ + void *addr; + int err, ring; + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; + __le32 *ptr; + + struct pci_dev *pdev = adapter->pdev; + + recv_ctx = adapter->recv_ctx; + + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32), + &tx_ring->hw_cons_phys_addr, + GFP_KERNEL); + if (ptr == NULL) + return -ENOMEM; + + tx_ring->hw_consumer = ptr; + /* cmd desc ring */ + addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring), + &tx_ring->phys_addr, + GFP_KERNEL); + if (addr == NULL) { + err = -ENOMEM; + goto err_out_free; + } + + tx_ring->desc_head = addr; + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + addr = dma_alloc_coherent(&adapter->pdev->dev, + RCV_DESC_RINGSIZE(rds_ring), + &rds_ring->phys_addr, GFP_KERNEL); + if (addr == NULL) { + err = -ENOMEM; + goto err_out_free; + } + rds_ring->desc_head = addr; + + } + + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + + addr = dma_alloc_coherent(&adapter->pdev->dev, + STATUS_DESC_RINGSIZE(sds_ring), + &sds_ring->phys_addr, GFP_KERNEL); + if (addr == NULL) { + err = -ENOMEM; + goto err_out_free; + } + sds_ring->desc_head = addr; + } + + return 0; + +err_out_free: + qlcnic_free_hw_resources(adapter); + return err; +} + +int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev) +{ + int i, err, ring; + + if (dev->flags & QLCNIC_NEED_FLR) { + pci_reset_function(dev->pdev); + dev->flags &= ~QLCNIC_NEED_FLR; + } + + if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) { + if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) { + err = qlcnic_83xx_config_intrpt(dev, 1); + if (err) + return err; + } + } + + if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) && + qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) { + err = qlcnic_82xx_mq_intrpt(dev, 1); + if (err) + return err; + } + + err = qlcnic_fw_cmd_create_rx_ctx(dev); + if (err) + goto err_out; + + for (ring = 0; ring < dev->drv_tx_rings; ring++) { + err = qlcnic_fw_cmd_create_tx_ctx(dev, + &dev->tx_ring[ring], + ring); + if (err) { + qlcnic_fw_cmd_del_rx_ctx(dev); + if (ring == 0) + goto err_out; + + for (i = 0; i < ring; i++) + qlcnic_fw_cmd_del_tx_ctx(dev, &dev->tx_ring[i]); + + goto err_out; + } + } + + set_bit(__QLCNIC_FW_ATTACHED, &dev->state); + + return 0; + +err_out: + if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) && + qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) + qlcnic_82xx_config_intrpt(dev, 0); + + if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) { + if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) + qlcnic_83xx_config_intrpt(dev, 0); + } + + return err; +} + +void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) +{ + int ring; + + if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) { + qlcnic_fw_cmd_del_rx_ctx(adapter); + for (ring = 0; ring < adapter->drv_tx_rings; ring++) + qlcnic_fw_cmd_del_tx_ctx(adapter, + &adapter->tx_ring[ring]); + + if (qlcnic_82xx_check(adapter) && + (adapter->flags & QLCNIC_MSIX_ENABLED) && + qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test) + qlcnic_82xx_config_intrpt(adapter, 0); + + if (qlcnic_83xx_check(adapter) && + (adapter->flags & QLCNIC_MSIX_ENABLED)) { + if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) + qlcnic_83xx_config_intrpt(adapter, 0); + } + /* Allow dma queues to drain after context reset */ + mdelay(20); + } +} + +void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; + int ring; + + recv_ctx = adapter->recv_ctx; + + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + if (tx_ring->hw_consumer != NULL) { + dma_free_coherent(&adapter->pdev->dev, sizeof(u32), + tx_ring->hw_consumer, + tx_ring->hw_cons_phys_addr); + + tx_ring->hw_consumer = NULL; + } + + if (tx_ring->desc_head != NULL) { + dma_free_coherent(&adapter->pdev->dev, + TX_DESC_RINGSIZE(tx_ring), + tx_ring->desc_head, + tx_ring->phys_addr); + tx_ring->desc_head = NULL; + } + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + + if (rds_ring->desc_head != NULL) { + dma_free_coherent(&adapter->pdev->dev, + RCV_DESC_RINGSIZE(rds_ring), + rds_ring->desc_head, + rds_ring->phys_addr); + rds_ring->desc_head = NULL; + } + } + + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + + if (sds_ring->desc_head != NULL) { + dma_free_coherent(&adapter->pdev->dev, + STATUS_DESC_RINGSIZE(sds_ring), + sds_ring->desc_head, + sds_ring->phys_addr); + sds_ring->desc_head = NULL; + } + } +} + +int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct net_device *netdev = adapter->netdev; + struct qlcnic_cmd_args cmd; + u32 type, val; + int i, err = 0; + + for (i = 0; i < ahw->num_msix; i++) { + qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_MQ_TX_CONFIG_INTR); + type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL; + val = type | (ahw->intr_tbl[i].type << 4); + if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX) + val |= (ahw->intr_tbl[i].id << 16); + cmd.req.arg[1] = val; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + netdev_err(netdev, "Failed to %s interrupts %d\n", + op_type == QLCNIC_INTRPT_ADD ? "Add" : + "Delete", err); + qlcnic_free_mbx_args(&cmd); + return err; + } + val = cmd.rsp.arg[1]; + if (LSB(val)) { + netdev_info(netdev, + "failed to configure interrupt for %d\n", + ahw->intr_tbl[i].id); + continue; + } + if (op_type) { + ahw->intr_tbl[i].id = MSW(val); + ahw->intr_tbl[i].enabled = 1; + ahw->intr_tbl[i].src = cmd.rsp.arg[2]; + } else { + ahw->intr_tbl[i].id = i; + ahw->intr_tbl[i].enabled = 0; + ahw->intr_tbl[i].src = 0; + } + qlcnic_free_mbx_args(&cmd); + } + + return err; +} + +int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac, + u8 function) +{ + int err, i; + struct qlcnic_cmd_args cmd; + u32 mac_low, mac_high; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); + if (err) + return err; + + cmd.req.arg[1] = function | BIT_8; + err = qlcnic_issue_cmd(adapter, &cmd); + + if (err == QLCNIC_RCODE_SUCCESS) { + mac_low = cmd.rsp.arg[1]; + mac_high = cmd.rsp.arg[2]; + + for (i = 0; i < 2; i++) + mac[i] = (u8) (mac_high >> ((1 - i) * 8)); + for (i = 2; i < 6; i++) + mac[i] = (u8) (mac_low >> ((5 - i) * 8)); + } else { + dev_err(&adapter->pdev->dev, + "Failed to get mac address%d\n", err); + err = -EIO; + } + qlcnic_free_mbx_args(&cmd); + return err; +} + +/* Get info of a NIC partition */ +int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *npar_info, u8 func_id) +{ + int err; + dma_addr_t nic_dma_t; + const struct qlcnic_info_le *nic_info; + void *nic_info_addr; + struct qlcnic_cmd_args cmd; + size_t nic_size = sizeof(struct qlcnic_info_le); + + nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, + &nic_dma_t, GFP_KERNEL); + if (!nic_info_addr) + return -ENOMEM; + + nic_info = nic_info_addr; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); + if (err) + goto out_free_dma; + + cmd.req.arg[1] = MSD(nic_dma_t); + cmd.req.arg[2] = LSD(nic_dma_t); + cmd.req.arg[3] = (func_id << 16 | nic_size); + err = qlcnic_issue_cmd(adapter, &cmd); + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to get nic info%d\n", err); + err = -EIO; + } else { + npar_info->pci_func = le16_to_cpu(nic_info->pci_func); + npar_info->op_mode = le16_to_cpu(nic_info->op_mode); + npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw); + npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw); + npar_info->phys_port = le16_to_cpu(nic_info->phys_port); + npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode); + npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques); + npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); + npar_info->capabilities = le32_to_cpu(nic_info->capabilities); + npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); + } + + qlcnic_free_mbx_args(&cmd); +out_free_dma: + dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, + nic_dma_t); + + return err; +} + +/* Configure a NIC partition */ +int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *nic) +{ + int err = -EIO; + dma_addr_t nic_dma_t; + void *nic_info_addr; + struct qlcnic_cmd_args cmd; + struct qlcnic_info_le *nic_info; + size_t nic_size = sizeof(struct qlcnic_info_le); + + if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) + return err; + + nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, + &nic_dma_t, GFP_KERNEL); + if (!nic_info_addr) + return -ENOMEM; + + nic_info = nic_info_addr; + + nic_info->pci_func = cpu_to_le16(nic->pci_func); + nic_info->op_mode = cpu_to_le16(nic->op_mode); + nic_info->phys_port = cpu_to_le16(nic->phys_port); + nic_info->switch_mode = cpu_to_le16(nic->switch_mode); + nic_info->capabilities = cpu_to_le32(nic->capabilities); + nic_info->max_mac_filters = nic->max_mac_filters; + nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques); + nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques); + nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw); + nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw); + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); + if (err) + goto out_free_dma; + + cmd.req.arg[1] = MSD(nic_dma_t); + cmd.req.arg[2] = LSD(nic_dma_t); + cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size); + err = qlcnic_issue_cmd(adapter, &cmd); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to set nic info%d\n", err); + err = -EIO; + } + + qlcnic_free_mbx_args(&cmd); +out_free_dma: + dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, + nic_dma_t); + + return err; +} + +/* Get PCI Info of a partition */ +int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter, + struct qlcnic_pci_info *pci_info) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + size_t npar_size = sizeof(struct qlcnic_pci_info_le); + size_t pci_size = npar_size * ahw->max_vnic_func; + u16 nic = 0, fcoe = 0, iscsi = 0; + struct qlcnic_pci_info_le *npar; + struct qlcnic_cmd_args cmd; + dma_addr_t pci_info_dma_t; + void *pci_info_addr; + int err = 0, i; + + pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size, + &pci_info_dma_t, GFP_KERNEL); + if (!pci_info_addr) + return -ENOMEM; + + npar = pci_info_addr; + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); + if (err) + goto out_free_dma; + + cmd.req.arg[1] = MSD(pci_info_dma_t); + cmd.req.arg[2] = LSD(pci_info_dma_t); + cmd.req.arg[3] = pci_size; + err = qlcnic_issue_cmd(adapter, &cmd); + + ahw->total_nic_func = 0; + if (err == QLCNIC_RCODE_SUCCESS) { + for (i = 0; i < ahw->max_vnic_func; i++, npar++, pci_info++) { + pci_info->id = le16_to_cpu(npar->id); + pci_info->active = le16_to_cpu(npar->active); + if (!pci_info->active) + continue; + pci_info->type = le16_to_cpu(npar->type); + err = qlcnic_get_pci_func_type(adapter, pci_info->type, + &nic, &fcoe, &iscsi); + pci_info->default_port = + le16_to_cpu(npar->default_port); + pci_info->tx_min_bw = + le16_to_cpu(npar->tx_min_bw); + pci_info->tx_max_bw = + le16_to_cpu(npar->tx_max_bw); + memcpy(pci_info->mac, npar->mac, ETH_ALEN); + } + } else { + dev_err(&adapter->pdev->dev, + "Failed to get PCI Info%d\n", err); + err = -EIO; + } + + ahw->total_nic_func = nic; + ahw->total_pci_func = nic + fcoe + iscsi; + if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) { + dev_err(&adapter->pdev->dev, + "%s: Invalid function count: total nic func[%x], total pci func[%x]\n", + __func__, ahw->total_nic_func, ahw->total_pci_func); + err = -EIO; + } + qlcnic_free_mbx_args(&cmd); +out_free_dma: + dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr, + pci_info_dma_t); + + return err; +} + +/* Configure eSwitch for port mirroring */ +int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, + u8 enable_mirroring, u8 pci_func) +{ + struct device *dev = &adapter->pdev->dev; + struct qlcnic_cmd_args cmd; + int err = -EIO; + u32 arg1; + + if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC || + !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) { + dev_err(&adapter->pdev->dev, "%s: Not a management function\n", + __func__); + return err; + } + + arg1 = id | (enable_mirroring ? BIT_4 : 0); + arg1 |= pci_func << 8; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_SET_PORTMIRRORING); + if (err) + return err; + + cmd.req.arg[1] = arg1; + err = qlcnic_issue_cmd(adapter, &cmd); + + if (err != QLCNIC_RCODE_SUCCESS) + dev_err(dev, "Failed to configure port mirroring for vNIC function %d on eSwitch %d\n", + pci_func, id); + else + dev_info(dev, "Configured port mirroring for vNIC function %d on eSwitch %d\n", + pci_func, id); + qlcnic_free_mbx_args(&cmd); + + return err; +} + +int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, + const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { + + size_t stats_size = sizeof(struct qlcnic_esw_stats_le); + struct qlcnic_esw_stats_le *stats; + dma_addr_t stats_dma_t; + void *stats_addr; + u32 arg1; + struct qlcnic_cmd_args cmd; + int err; + + if (esw_stats == NULL) + return -ENOMEM; + + if ((adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) && + (func != adapter->ahw->pci_func)) { + dev_err(&adapter->pdev->dev, + "Not privilege to query stats for func=%d", func); + return -EIO; + } + + stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, + &stats_dma_t, GFP_KERNEL); + if (!stats_addr) + return -ENOMEM; + + arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; + arg1 |= rx_tx << 15 | stats_size << 16; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_GET_ESWITCH_STATS); + if (err) + goto out_free_dma; + + cmd.req.arg[1] = arg1; + cmd.req.arg[2] = MSD(stats_dma_t); + cmd.req.arg[3] = LSD(stats_dma_t); + err = qlcnic_issue_cmd(adapter, &cmd); + + if (!err) { + stats = stats_addr; + esw_stats->context_id = le16_to_cpu(stats->context_id); + esw_stats->version = le16_to_cpu(stats->version); + esw_stats->size = le16_to_cpu(stats->size); + esw_stats->multicast_frames = + le64_to_cpu(stats->multicast_frames); + esw_stats->broadcast_frames = + le64_to_cpu(stats->broadcast_frames); + esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames); + esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames); + esw_stats->local_frames = le64_to_cpu(stats->local_frames); + esw_stats->errors = le64_to_cpu(stats->errors); + esw_stats->numbytes = le64_to_cpu(stats->numbytes); + } + + qlcnic_free_mbx_args(&cmd); +out_free_dma: + dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, + stats_dma_t); + + return err; +} + +/* This routine will retrieve the MAC statistics from firmware */ +int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, + struct qlcnic_mac_statistics *mac_stats) +{ + struct qlcnic_mac_statistics_le *stats; + struct qlcnic_cmd_args cmd; + size_t stats_size = sizeof(struct qlcnic_mac_statistics_le); + dma_addr_t stats_dma_t; + void *stats_addr; + int err; + + if (mac_stats == NULL) + return -ENOMEM; + + stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, + &stats_dma_t, GFP_KERNEL); + if (!stats_addr) + return -ENOMEM; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS); + if (err) + goto out_free_dma; + + cmd.req.arg[1] = stats_size << 16; + cmd.req.arg[2] = MSD(stats_dma_t); + cmd.req.arg[3] = LSD(stats_dma_t); + err = qlcnic_issue_cmd(adapter, &cmd); + if (!err) { + stats = stats_addr; + mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames); + mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes); + mac_stats->mac_tx_mcast_pkts = + le64_to_cpu(stats->mac_tx_mcast_pkts); + mac_stats->mac_tx_bcast_pkts = + le64_to_cpu(stats->mac_tx_bcast_pkts); + mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames); + mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes); + mac_stats->mac_rx_mcast_pkts = + le64_to_cpu(stats->mac_rx_mcast_pkts); + mac_stats->mac_rx_length_error = + le64_to_cpu(stats->mac_rx_length_error); + mac_stats->mac_rx_length_small = + le64_to_cpu(stats->mac_rx_length_small); + mac_stats->mac_rx_length_large = + le64_to_cpu(stats->mac_rx_length_large); + mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber); + mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped); + mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error); + } else { + dev_err(&adapter->pdev->dev, + "%s: Get mac stats failed, err=%d.\n", __func__, err); + } + + qlcnic_free_mbx_args(&cmd); + +out_free_dma: + dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, + stats_dma_t); + + return err; +} + +int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch, + const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { + + struct __qlcnic_esw_statistics port_stats; + u8 i; + int ret = -EIO; + + if (esw_stats == NULL) + return -ENOMEM; + if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) + return -EIO; + if (adapter->npars == NULL) + return -EIO; + + memset(esw_stats, 0, sizeof(u64)); + esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL; + esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL; + esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL; + esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL; + esw_stats->errors = QLCNIC_STATS_NOT_AVAIL; + esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL; + esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL; + esw_stats->context_id = eswitch; + + for (i = 0; i < adapter->ahw->total_nic_func; i++) { + if (adapter->npars[i].phy_port != eswitch) + continue; + + memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics)); + if (qlcnic_get_port_stats(adapter, adapter->npars[i].pci_func, + rx_tx, &port_stats)) + continue; + + esw_stats->size = port_stats.size; + esw_stats->version = port_stats.version; + QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames, + port_stats.unicast_frames); + QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames, + port_stats.multicast_frames); + QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames, + port_stats.broadcast_frames); + QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames, + port_stats.dropped_frames); + QLCNIC_ADD_ESW_STATS(esw_stats->errors, + port_stats.errors); + QLCNIC_ADD_ESW_STATS(esw_stats->local_frames, + port_stats.local_frames); + QLCNIC_ADD_ESW_STATS(esw_stats->numbytes, + port_stats.numbytes); + ret = 0; + } + return ret; +} + +int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw, + const u8 port, const u8 rx_tx) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_cmd_args cmd; + int err; + u32 arg1; + + if (ahw->op_mode != QLCNIC_MGMT_FUNC) + return -EIO; + + if (func_esw == QLCNIC_STATS_PORT) { + if (port >= ahw->max_vnic_func) + goto err_ret; + } else if (func_esw == QLCNIC_STATS_ESWITCH) { + if (port >= QLCNIC_NIU_MAX_XG_PORTS) + goto err_ret; + } else { + goto err_ret; + } + + if (rx_tx > QLCNIC_QUERY_TX_COUNTER) + goto err_ret; + + arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12; + arg1 |= BIT_14 | rx_tx << 15; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_GET_ESWITCH_STATS); + if (err) + return err; + + cmd.req.arg[1] = arg1; + err = qlcnic_issue_cmd(adapter, &cmd); + qlcnic_free_mbx_args(&cmd); + return err; + +err_ret: + dev_err(&adapter->pdev->dev, + "Invalid args func_esw %d port %d rx_ctx %d\n", + func_esw, port, rx_tx); + return -EIO; +} + +static int __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, + u32 *arg1, u32 *arg2) +{ + struct device *dev = &adapter->pdev->dev; + struct qlcnic_cmd_args cmd; + u8 pci_func = *arg1 >> 8; + int err; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG); + if (err) + return err; + + cmd.req.arg[1] = *arg1; + err = qlcnic_issue_cmd(adapter, &cmd); + *arg1 = cmd.rsp.arg[1]; + *arg2 = cmd.rsp.arg[2]; + qlcnic_free_mbx_args(&cmd); + + if (err == QLCNIC_RCODE_SUCCESS) + dev_info(dev, "Get eSwitch port config for vNIC function %d\n", + pci_func); + else + dev_err(dev, "Failed to get eswitch port config for vNIC function %d\n", + pci_func); + return err; +} +/* Configure eSwitch port +op_mode = 0 for setting default port behavior +op_mode = 1 for setting vlan id +op_mode = 2 for deleting vlan id +op_type = 0 for vlan_id +op_type = 1 for port vlan_id +*/ +int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg) +{ + struct device *dev = &adapter->pdev->dev; + struct qlcnic_cmd_args cmd; + int err = -EIO, index; + u32 arg1, arg2 = 0; + u8 pci_func; + + if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) { + dev_err(&adapter->pdev->dev, "%s: Not a management function\n", + __func__); + return err; + } + + pci_func = esw_cfg->pci_func; + index = qlcnic_is_valid_nic_func(adapter, pci_func); + if (index < 0) + return err; + arg1 = (adapter->npars[index].phy_port & BIT_0); + arg1 |= (pci_func << 8); + + if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) + return err; + arg1 &= ~(0x0ff << 8); + arg1 |= (pci_func << 8); + arg1 &= ~(BIT_2 | BIT_3); + switch (esw_cfg->op_mode) { + case QLCNIC_PORT_DEFAULTS: + arg1 |= (BIT_4 | BIT_6 | BIT_7); + arg2 |= (BIT_0 | BIT_1); + if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) + arg2 |= (BIT_2 | BIT_3); + if (!(esw_cfg->discard_tagged)) + arg1 &= ~BIT_4; + if (!(esw_cfg->promisc_mode)) + arg1 &= ~BIT_6; + if (!(esw_cfg->mac_override)) + arg1 &= ~BIT_7; + if (!(esw_cfg->mac_anti_spoof)) + arg2 &= ~BIT_0; + if (!(esw_cfg->offload_flags & BIT_0)) + arg2 &= ~(BIT_1 | BIT_2 | BIT_3); + if (!(esw_cfg->offload_flags & BIT_1)) + arg2 &= ~BIT_2; + if (!(esw_cfg->offload_flags & BIT_2)) + arg2 &= ~BIT_3; + break; + case QLCNIC_ADD_VLAN: + arg1 &= ~(0x0ffff << 16); + arg1 |= (BIT_2 | BIT_5); + arg1 |= (esw_cfg->vlan_id << 16); + break; + case QLCNIC_DEL_VLAN: + arg1 |= (BIT_3 | BIT_5); + arg1 &= ~(0x0ffff << 16); + break; + default: + dev_err(&adapter->pdev->dev, "%s: Invalid opmode 0x%x\n", + __func__, esw_cfg->op_mode); + return err; + } + + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_CONFIGURE_ESWITCH); + if (err) + return err; + + cmd.req.arg[1] = arg1; + cmd.req.arg[2] = arg2; + err = qlcnic_issue_cmd(adapter, &cmd); + qlcnic_free_mbx_args(&cmd); + + if (err != QLCNIC_RCODE_SUCCESS) + dev_err(dev, "Failed to configure eswitch for vNIC function %d\n", + pci_func); + else + dev_info(dev, "Configured eSwitch for vNIC function %d\n", + pci_func); + + return err; +} + +int +qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg) +{ + u32 arg1, arg2; + int index; + u8 phy_port; + + if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) { + index = qlcnic_is_valid_nic_func(adapter, esw_cfg->pci_func); + if (index < 0) + return -EIO; + phy_port = adapter->npars[index].phy_port; + } else { + phy_port = adapter->ahw->physical_port; + } + arg1 = phy_port; + arg1 |= (esw_cfg->pci_func << 8); + if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) + return -EIO; + + esw_cfg->discard_tagged = !!(arg1 & BIT_4); + esw_cfg->host_vlan_tag = !!(arg1 & BIT_5); + esw_cfg->promisc_mode = !!(arg1 & BIT_6); + esw_cfg->mac_override = !!(arg1 & BIT_7); + esw_cfg->vlan_id = LSW(arg1 >> 16); + esw_cfg->mac_anti_spoof = (arg2 & 0x1); + esw_cfg->offload_flags = ((arg2 >> 1) & 0x7); + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c new file mode 100644 index 000000000..a72bcddf1 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c @@ -0,0 +1,1147 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include +#include "qlcnic.h" + +#define QLC_DCB_NUM_PARAM 3 +#define QLC_DCB_LOCAL_IDX 0 +#define QLC_DCB_OPER_IDX 1 +#define QLC_DCB_PEER_IDX 2 + +#define QLC_DCB_GET_MAP(V) (1 << V) + +#define QLC_DCB_FW_VER 0x2 +#define QLC_DCB_MAX_TC 0x8 +#define QLC_DCB_MAX_APP 0x8 +#define QLC_DCB_MAX_PRIO QLC_DCB_MAX_TC +#define QLC_DCB_MAX_PG QLC_DCB_MAX_TC + +#define QLC_DCB_TSA_SUPPORT(V) (V & 0x1) +#define QLC_DCB_ETS_SUPPORT(V) ((V >> 1) & 0x1) +#define QLC_DCB_VERSION_SUPPORT(V) ((V >> 2) & 0xf) +#define QLC_DCB_MAX_NUM_TC(V) ((V >> 20) & 0xf) +#define QLC_DCB_MAX_NUM_ETS_TC(V) ((V >> 24) & 0xf) +#define QLC_DCB_MAX_NUM_PFC_TC(V) ((V >> 28) & 0xf) +#define QLC_DCB_GET_TC_PRIO(X, P) ((X >> (P * 3)) & 0x7) +#define QLC_DCB_GET_PGID_PRIO(X, P) ((X >> (P * 8)) & 0xff) +#define QLC_DCB_GET_BWPER_PG(X, P) ((X >> (P * 8)) & 0xff) +#define QLC_DCB_GET_TSA_PG(X, P) ((X >> (P * 8)) & 0xff) +#define QLC_DCB_GET_PFC_PRIO(X, P) (((X >> 24) >> P) & 0x1) +#define QLC_DCB_GET_PROTO_ID_APP(X) ((X >> 8) & 0xffff) +#define QLC_DCB_GET_SELECTOR_APP(X) (X & 0xff) + +#define QLC_DCB_LOCAL_PARAM_FWID 0x3 +#define QLC_DCB_OPER_PARAM_FWID 0x1 +#define QLC_DCB_PEER_PARAM_FWID 0x2 + +#define QLC_83XX_DCB_GET_NUMAPP(X) ((X >> 2) & 0xf) +#define QLC_83XX_DCB_TSA_VALID(X) (X & 0x1) +#define QLC_83XX_DCB_PFC_VALID(X) ((X >> 1) & 0x1) +#define QLC_83XX_DCB_GET_PRIOMAP_APP(X) (X >> 24) + +#define QLC_82XX_DCB_GET_NUMAPP(X) ((X >> 12) & 0xf) +#define QLC_82XX_DCB_TSA_VALID(X) ((X >> 4) & 0x1) +#define QLC_82XX_DCB_PFC_VALID(X) ((X >> 5) & 0x1) +#define QLC_82XX_DCB_GET_PRIOVAL_APP(X) ((X >> 24) & 0x7) +#define QLC_82XX_DCB_GET_PRIOMAP_APP(X) (1 << X) +#define QLC_82XX_DCB_PRIO_TC_MAP (0x76543210) + +static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops; + +static void qlcnic_dcb_aen_work(struct work_struct *); +static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *); + +static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *); +static void __qlcnic_dcb_free(struct qlcnic_dcb *); +static int __qlcnic_dcb_attach(struct qlcnic_dcb *); +static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *, char *); +static void __qlcnic_dcb_get_info(struct qlcnic_dcb *); + +static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *); +static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8); +static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *); +static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *, void *); + +static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *); +static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8); +static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *); +static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *, void *); + +struct qlcnic_dcb_capability { + bool tsa_capability; + bool ets_capability; + u8 max_num_tc; + u8 max_ets_tc; + u8 max_pfc_tc; + u8 dcb_capability; +}; + +struct qlcnic_dcb_param { + u32 hdr_prio_pfc_map[2]; + u32 prio_pg_map[2]; + u32 pg_bw_map[2]; + u32 pg_tsa_map[2]; + u32 app[QLC_DCB_MAX_APP]; +}; + +struct qlcnic_dcb_mbx_params { + /* 1st local, 2nd operational 3rd remote */ + struct qlcnic_dcb_param type[3]; + u32 prio_tc_map; +}; + +struct qlcnic_82xx_dcb_param_mbx_le { + __le32 hdr_prio_pfc_map[2]; + __le32 prio_pg_map[2]; + __le32 pg_bw_map[2]; + __le32 pg_tsa_map[2]; + __le32 app[QLC_DCB_MAX_APP]; +}; + +enum qlcnic_dcb_selector { + QLC_SELECTOR_DEF = 0x0, + QLC_SELECTOR_ETHER, + QLC_SELECTOR_TCP, + QLC_SELECTOR_UDP, +}; + +enum qlcnic_dcb_prio_type { + QLC_PRIO_NONE = 0, + QLC_PRIO_GROUP, + QLC_PRIO_LINK, +}; + +enum qlcnic_dcb_pfc_type { + QLC_PFC_DISABLED = 0, + QLC_PFC_FULL, + QLC_PFC_TX, + QLC_PFC_RX +}; + +struct qlcnic_dcb_prio_cfg { + bool valid; + enum qlcnic_dcb_pfc_type pfc_type; +}; + +struct qlcnic_dcb_pg_cfg { + bool valid; + u8 total_bw_percent; /* of Link/ port BW */ + u8 prio_count; + u8 tsa_type; +}; + +struct qlcnic_dcb_tc_cfg { + bool valid; + struct qlcnic_dcb_prio_cfg prio_cfg[QLC_DCB_MAX_PRIO]; + enum qlcnic_dcb_prio_type prio_type; /* always prio_link */ + u8 link_percent; /* % of link bandwidth */ + u8 bwg_percent; /* % of BWG's bandwidth */ + u8 up_tc_map; + u8 pgid; +}; + +struct qlcnic_dcb_app { + bool valid; + enum qlcnic_dcb_selector selector; + u16 protocol; + u8 priority; +}; + +struct qlcnic_dcb_cee { + struct qlcnic_dcb_tc_cfg tc_cfg[QLC_DCB_MAX_TC]; + struct qlcnic_dcb_pg_cfg pg_cfg[QLC_DCB_MAX_PG]; + struct qlcnic_dcb_app app[QLC_DCB_MAX_APP]; + bool tc_param_valid; + bool pfc_mode_enable; +}; + +struct qlcnic_dcb_cfg { + /* 0 - local, 1 - operational, 2 - remote */ + struct qlcnic_dcb_cee type[QLC_DCB_NUM_PARAM]; + struct qlcnic_dcb_capability capability; + u32 version; +}; + +static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = { + .init_dcbnl_ops = __qlcnic_init_dcbnl_ops, + .free = __qlcnic_dcb_free, + .attach = __qlcnic_dcb_attach, + .query_hw_capability = __qlcnic_dcb_query_hw_capability, + .get_info = __qlcnic_dcb_get_info, + + .get_hw_capability = qlcnic_83xx_dcb_get_hw_capability, + .query_cee_param = qlcnic_83xx_dcb_query_cee_param, + .get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg, + .aen_handler = qlcnic_83xx_dcb_aen_handler, +}; + +static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = { + .init_dcbnl_ops = __qlcnic_init_dcbnl_ops, + .free = __qlcnic_dcb_free, + .attach = __qlcnic_dcb_attach, + .query_hw_capability = __qlcnic_dcb_query_hw_capability, + .get_info = __qlcnic_dcb_get_info, + + .get_hw_capability = qlcnic_82xx_dcb_get_hw_capability, + .query_cee_param = qlcnic_82xx_dcb_query_cee_param, + .get_cee_cfg = qlcnic_82xx_dcb_get_cee_cfg, + .aen_handler = qlcnic_82xx_dcb_aen_handler, +}; + +static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val) +{ + if (qlcnic_82xx_check(adapter)) + return QLC_82XX_DCB_GET_NUMAPP(val); + else + return QLC_83XX_DCB_GET_NUMAPP(val); +} + +static inline u8 qlcnic_dcb_pfc_hdr_valid(struct qlcnic_adapter *adapter, + u32 val) +{ + if (qlcnic_82xx_check(adapter)) + return QLC_82XX_DCB_PFC_VALID(val); + else + return QLC_83XX_DCB_PFC_VALID(val); +} + +static inline u8 qlcnic_dcb_tsa_hdr_valid(struct qlcnic_adapter *adapter, + u32 val) +{ + if (qlcnic_82xx_check(adapter)) + return QLC_82XX_DCB_TSA_VALID(val); + else + return QLC_83XX_DCB_TSA_VALID(val); +} + +static inline u8 qlcnic_dcb_get_prio_map_app(struct qlcnic_adapter *adapter, + u32 val) +{ + if (qlcnic_82xx_check(adapter)) + return QLC_82XX_DCB_GET_PRIOMAP_APP(val); + else + return QLC_83XX_DCB_GET_PRIOMAP_APP(val); +} + +static int qlcnic_dcb_prio_count(u8 up_tc_map) +{ + int j; + + for (j = 0; j < QLC_DCB_MAX_TC; j++) + if (up_tc_map & QLC_DCB_GET_MAP(j)) + break; + + return j; +} + +static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *dcb) +{ + if (test_bit(QLCNIC_DCB_STATE, &dcb->state)) + dcb->adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops; +} + +static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter) +{ + if (qlcnic_82xx_check(adapter)) + adapter->dcb->ops = &qlcnic_82xx_dcb_ops; + else if (qlcnic_83xx_check(adapter)) + adapter->dcb->ops = &qlcnic_83xx_dcb_ops; +} + +int qlcnic_register_dcb(struct qlcnic_adapter *adapter) +{ + struct qlcnic_dcb *dcb; + + if (qlcnic_sriov_vf_check(adapter)) + return 0; + + dcb = kzalloc(sizeof(struct qlcnic_dcb), GFP_ATOMIC); + if (!dcb) + return -ENOMEM; + + adapter->dcb = dcb; + dcb->adapter = adapter; + qlcnic_set_dcb_ops(adapter); + dcb->state = 0; + + return 0; +} + +static void __qlcnic_dcb_free(struct qlcnic_dcb *dcb) +{ + struct qlcnic_adapter *adapter; + + if (!dcb) + return; + + adapter = dcb->adapter; + + while (test_bit(QLCNIC_DCB_AEN_MODE, &dcb->state)) + usleep_range(10000, 11000); + + cancel_delayed_work_sync(&dcb->aen_work); + + if (dcb->wq) { + destroy_workqueue(dcb->wq); + dcb->wq = NULL; + } + + kfree(dcb->cfg); + dcb->cfg = NULL; + kfree(dcb->param); + dcb->param = NULL; + kfree(dcb); + adapter->dcb = NULL; +} + +static void __qlcnic_dcb_get_info(struct qlcnic_dcb *dcb) +{ + qlcnic_dcb_get_hw_capability(dcb); + qlcnic_dcb_get_cee_cfg(dcb); +} + +static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb) +{ + int err = 0; + + INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work); + + dcb->wq = create_singlethread_workqueue("qlcnic-dcb"); + if (!dcb->wq) { + dev_err(&dcb->adapter->pdev->dev, + "DCB workqueue allocation failed. DCB will be disabled\n"); + return -1; + } + + dcb->cfg = kzalloc(sizeof(struct qlcnic_dcb_cfg), GFP_ATOMIC); + if (!dcb->cfg) { + err = -ENOMEM; + goto out_free_wq; + } + + dcb->param = kzalloc(sizeof(struct qlcnic_dcb_mbx_params), GFP_ATOMIC); + if (!dcb->param) { + err = -ENOMEM; + goto out_free_cfg; + } + + return 0; +out_free_cfg: + kfree(dcb->cfg); + dcb->cfg = NULL; + +out_free_wq: + destroy_workqueue(dcb->wq); + dcb->wq = NULL; + + return err; +} + +static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf) +{ + struct qlcnic_adapter *adapter = dcb->adapter; + struct qlcnic_cmd_args cmd; + u32 mbx_out; + int err; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_CAP); + if (err) + return err; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to query DCBX capability, err %d\n", err); + } else { + mbx_out = cmd.rsp.arg[1]; + if (buf) + memcpy(buf, &mbx_out, sizeof(u32)); + } + + qlcnic_free_mbx_args(&cmd); + + return err; +} + +static int __qlcnic_dcb_get_capability(struct qlcnic_dcb *dcb, u32 *val) +{ + struct qlcnic_dcb_capability *cap = &dcb->cfg->capability; + u32 mbx_out; + int err; + + memset(cap, 0, sizeof(struct qlcnic_dcb_capability)); + + err = qlcnic_dcb_query_hw_capability(dcb, (char *)val); + if (err) + return err; + + mbx_out = *val; + if (QLC_DCB_TSA_SUPPORT(mbx_out)) + cap->tsa_capability = true; + + if (QLC_DCB_ETS_SUPPORT(mbx_out)) + cap->ets_capability = true; + + cap->max_num_tc = QLC_DCB_MAX_NUM_TC(mbx_out); + cap->max_ets_tc = QLC_DCB_MAX_NUM_ETS_TC(mbx_out); + cap->max_pfc_tc = QLC_DCB_MAX_NUM_PFC_TC(mbx_out); + + if (cap->max_num_tc > QLC_DCB_MAX_TC || + cap->max_ets_tc > cap->max_num_tc || + cap->max_pfc_tc > cap->max_num_tc) { + dev_err(&dcb->adapter->pdev->dev, "Invalid DCB configuration\n"); + return -EINVAL; + } + + return err; +} + +static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb) +{ + struct qlcnic_dcb_cfg *cfg = dcb->cfg; + struct qlcnic_dcb_capability *cap; + u32 mbx_out; + int err; + + err = __qlcnic_dcb_get_capability(dcb, &mbx_out); + if (err) + return err; + + cap = &cfg->capability; + cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED; + + if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability) + set_bit(QLCNIC_DCB_STATE, &dcb->state); + + return err; +} + +static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *dcb, + char *buf, u8 type) +{ + u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le); + struct qlcnic_adapter *adapter = dcb->adapter; + struct qlcnic_82xx_dcb_param_mbx_le *prsp_le; + struct device *dev = &adapter->pdev->dev; + dma_addr_t cardrsp_phys_addr; + struct qlcnic_dcb_param rsp; + struct qlcnic_cmd_args cmd; + u64 phys_addr; + void *addr; + int err, i; + + switch (type) { + case QLC_DCB_LOCAL_PARAM_FWID: + case QLC_DCB_OPER_PARAM_FWID: + case QLC_DCB_PEER_PARAM_FWID: + break; + default: + dev_err(dev, "Invalid parameter type %d\n", type); + return -EINVAL; + } + + addr = dma_alloc_coherent(dev, size, &cardrsp_phys_addr, GFP_KERNEL); + if (addr == NULL) + return -ENOMEM; + + prsp_le = addr; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM); + if (err) + goto out_free_rsp; + + phys_addr = cardrsp_phys_addr; + cmd.req.arg[1] = size | (type << 16); + cmd.req.arg[2] = MSD(phys_addr); + cmd.req.arg[3] = LSD(phys_addr); + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_err(dev, "Failed to query DCBX parameter, err %d\n", err); + goto out; + } + + memset(&rsp, 0, sizeof(struct qlcnic_dcb_param)); + rsp.hdr_prio_pfc_map[0] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[0]); + rsp.hdr_prio_pfc_map[1] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[1]); + rsp.prio_pg_map[0] = le32_to_cpu(prsp_le->prio_pg_map[0]); + rsp.prio_pg_map[1] = le32_to_cpu(prsp_le->prio_pg_map[1]); + rsp.pg_bw_map[0] = le32_to_cpu(prsp_le->pg_bw_map[0]); + rsp.pg_bw_map[1] = le32_to_cpu(prsp_le->pg_bw_map[1]); + rsp.pg_tsa_map[0] = le32_to_cpu(prsp_le->pg_tsa_map[0]); + rsp.pg_tsa_map[1] = le32_to_cpu(prsp_le->pg_tsa_map[1]); + + for (i = 0; i < QLC_DCB_MAX_APP; i++) + rsp.app[i] = le32_to_cpu(prsp_le->app[i]); + + if (buf) + memcpy(buf, &rsp, size); +out: + qlcnic_free_mbx_args(&cmd); + +out_free_rsp: + dma_free_coherent(dev, size, addr, cardrsp_phys_addr); + + return err; +} + +static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb) +{ + struct qlcnic_dcb_mbx_params *mbx; + int err; + + mbx = dcb->param; + if (!mbx) + return 0; + + err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[0], + QLC_DCB_LOCAL_PARAM_FWID); + if (err) + return err; + + err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[1], + QLC_DCB_OPER_PARAM_FWID); + if (err) + return err; + + err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[2], + QLC_DCB_PEER_PARAM_FWID); + if (err) + return err; + + mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP; + + qlcnic_dcb_data_cee_param_map(dcb->adapter); + + return err; +} + +static void qlcnic_dcb_aen_work(struct work_struct *work) +{ + struct qlcnic_dcb *dcb; + + dcb = container_of(work, struct qlcnic_dcb, aen_work.work); + + qlcnic_dcb_get_cee_cfg(dcb); + clear_bit(QLCNIC_DCB_AEN_MODE, &dcb->state); +} + +static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data) +{ + if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state)) + return; + + queue_delayed_work(dcb->wq, &dcb->aen_work, 0); +} + +static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb) +{ + struct qlcnic_dcb_capability *cap = &dcb->cfg->capability; + u32 mbx_out; + int err; + + err = __qlcnic_dcb_get_capability(dcb, &mbx_out); + if (err) + return err; + + if (mbx_out & BIT_2) + cap->dcb_capability = DCB_CAP_DCBX_VER_CEE; + if (mbx_out & BIT_3) + cap->dcb_capability |= DCB_CAP_DCBX_VER_IEEE; + if (cap->dcb_capability) + cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED; + + if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability) + set_bit(QLCNIC_DCB_STATE, &dcb->state); + + return err; +} + +static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *dcb, + char *buf, u8 idx) +{ + struct qlcnic_adapter *adapter = dcb->adapter; + struct qlcnic_dcb_mbx_params mbx_out; + int err, i, j, k, max_app, size; + struct qlcnic_dcb_param *each; + struct qlcnic_cmd_args cmd; + u32 val; + char *p; + + size = 0; + memset(&mbx_out, 0, sizeof(struct qlcnic_dcb_mbx_params)); + memset(buf, 0, sizeof(struct qlcnic_dcb_mbx_params)); + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM); + if (err) + return err; + + cmd.req.arg[0] |= QLC_DCB_FW_VER << 29; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to query DCBX param, err %d\n", err); + goto out; + } + + mbx_out.prio_tc_map = cmd.rsp.arg[1]; + p = memcpy(buf, &mbx_out, sizeof(u32)); + k = 2; + p += sizeof(u32); + + for (j = 0; j < QLC_DCB_NUM_PARAM; j++) { + each = &mbx_out.type[j]; + + each->hdr_prio_pfc_map[0] = cmd.rsp.arg[k++]; + each->hdr_prio_pfc_map[1] = cmd.rsp.arg[k++]; + each->prio_pg_map[0] = cmd.rsp.arg[k++]; + each->prio_pg_map[1] = cmd.rsp.arg[k++]; + each->pg_bw_map[0] = cmd.rsp.arg[k++]; + each->pg_bw_map[1] = cmd.rsp.arg[k++]; + each->pg_tsa_map[0] = cmd.rsp.arg[k++]; + each->pg_tsa_map[1] = cmd.rsp.arg[k++]; + val = each->hdr_prio_pfc_map[0]; + + max_app = qlcnic_dcb_get_num_app(adapter, val); + for (i = 0; i < max_app; i++) + each->app[i] = cmd.rsp.arg[i + k]; + + size = 16 * sizeof(u32); + memcpy(p, &each->hdr_prio_pfc_map[0], size); + p += size; + if (j == 0) + k = 18; + else + k = 34; + } +out: + qlcnic_free_mbx_args(&cmd); + + return err; +} + +static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb) +{ + int err; + + err = qlcnic_dcb_query_cee_param(dcb, (char *)dcb->param, 0); + if (err) + return err; + + qlcnic_dcb_data_cee_param_map(dcb->adapter); + + return err; +} + +static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data) +{ + u32 *val = data; + + if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state)) + return; + + if (*val & BIT_8) + set_bit(QLCNIC_DCB_STATE, &dcb->state); + else + clear_bit(QLCNIC_DCB_STATE, &dcb->state); + + queue_delayed_work(dcb->wq, &dcb->aen_work, 0); +} + +static void qlcnic_dcb_fill_cee_tc_params(struct qlcnic_dcb_mbx_params *mbx, + struct qlcnic_dcb_param *each, + struct qlcnic_dcb_cee *type) +{ + struct qlcnic_dcb_tc_cfg *tc_cfg; + u8 i, tc, pgid; + + for (i = 0; i < QLC_DCB_MAX_PRIO; i++) { + tc = QLC_DCB_GET_TC_PRIO(mbx->prio_tc_map, i); + tc_cfg = &type->tc_cfg[tc]; + tc_cfg->valid = true; + tc_cfg->up_tc_map |= QLC_DCB_GET_MAP(i); + + if (QLC_DCB_GET_PFC_PRIO(each->hdr_prio_pfc_map[1], i) && + type->pfc_mode_enable) { + tc_cfg->prio_cfg[i].valid = true; + tc_cfg->prio_cfg[i].pfc_type = QLC_PFC_FULL; + } + + if (i < 4) + pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[0], i); + else + pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[1], i); + + tc_cfg->pgid = pgid; + + tc_cfg->prio_type = QLC_PRIO_LINK; + type->pg_cfg[tc_cfg->pgid].prio_count++; + } +} + +static void qlcnic_dcb_fill_cee_pg_params(struct qlcnic_dcb_param *each, + struct qlcnic_dcb_cee *type) +{ + struct qlcnic_dcb_pg_cfg *pg_cfg; + u8 i, tsa, bw_per; + + for (i = 0; i < QLC_DCB_MAX_PG; i++) { + pg_cfg = &type->pg_cfg[i]; + pg_cfg->valid = true; + + if (i < 4) { + bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[0], i); + tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[0], i); + } else { + bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[1], i); + tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[1], i); + } + + pg_cfg->total_bw_percent = bw_per; + pg_cfg->tsa_type = tsa; + } +} + +static void +qlcnic_dcb_fill_cee_app_params(struct qlcnic_adapter *adapter, u8 idx, + struct qlcnic_dcb_param *each, + struct qlcnic_dcb_cee *type) +{ + struct qlcnic_dcb_app *app; + u8 i, num_app, map, cnt; + struct dcb_app new_app; + + num_app = qlcnic_dcb_get_num_app(adapter, each->hdr_prio_pfc_map[0]); + for (i = 0; i < num_app; i++) { + app = &type->app[i]; + app->valid = true; + + /* Only for CEE (-1) */ + app->selector = QLC_DCB_GET_SELECTOR_APP(each->app[i]) - 1; + new_app.selector = app->selector; + app->protocol = QLC_DCB_GET_PROTO_ID_APP(each->app[i]); + new_app.protocol = app->protocol; + map = qlcnic_dcb_get_prio_map_app(adapter, each->app[i]); + cnt = qlcnic_dcb_prio_count(map); + + if (cnt >= QLC_DCB_MAX_TC) + cnt = 0; + + app->priority = cnt; + new_app.priority = cnt; + + if (idx == QLC_DCB_OPER_IDX && adapter->netdev->dcbnl_ops) + dcb_setapp(adapter->netdev, &new_app); + } +} + +static void qlcnic_dcb_map_cee_params(struct qlcnic_adapter *adapter, u8 idx) +{ + struct qlcnic_dcb_mbx_params *mbx = adapter->dcb->param; + struct qlcnic_dcb_param *each = &mbx->type[idx]; + struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; + struct qlcnic_dcb_cee *type = &cfg->type[idx]; + + type->tc_param_valid = false; + type->pfc_mode_enable = false; + memset(type->tc_cfg, 0, + sizeof(struct qlcnic_dcb_tc_cfg) * QLC_DCB_MAX_TC); + memset(type->pg_cfg, 0, + sizeof(struct qlcnic_dcb_pg_cfg) * QLC_DCB_MAX_TC); + + if (qlcnic_dcb_pfc_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) && + cfg->capability.max_pfc_tc) + type->pfc_mode_enable = true; + + if (qlcnic_dcb_tsa_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) && + cfg->capability.max_ets_tc) + type->tc_param_valid = true; + + qlcnic_dcb_fill_cee_tc_params(mbx, each, type); + qlcnic_dcb_fill_cee_pg_params(each, type); + qlcnic_dcb_fill_cee_app_params(adapter, idx, each, type); +} + +static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *adapter) +{ + int i; + + for (i = 0; i < QLC_DCB_NUM_PARAM; i++) + qlcnic_dcb_map_cee_params(adapter, i); + + dcbnl_cee_notify(adapter->netdev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0); +} + +static u8 qlcnic_dcb_get_state(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + return test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state); +} + +static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr) +{ + memcpy(addr, netdev->perm_addr, netdev->addr_len); +} + +static void +qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio, + u8 *pgid, u8 *bw_per, u8 *up_tc_map) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb_tc_cfg *tc_cfg, *temp; + struct qlcnic_dcb_cee *type; + u8 i, cnt, pg; + + type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; + *prio = *pgid = *bw_per = *up_tc_map = 0; + + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) || + !type->tc_param_valid) + return; + + if (tc < 0 || (tc >= QLC_DCB_MAX_TC)) + return; + + tc_cfg = &type->tc_cfg[tc]; + if (!tc_cfg->valid) + return; + + *pgid = tc_cfg->pgid; + *prio = tc_cfg->prio_type; + *up_tc_map = tc_cfg->up_tc_map; + pg = *pgid; + + for (i = 0, cnt = 0; i < QLC_DCB_MAX_TC; i++) { + temp = &type->tc_cfg[i]; + if (temp->valid && (pg == temp->pgid)) + cnt++; + } + + tc_cfg->bwg_percent = (100 / cnt); + *bw_per = tc_cfg->bwg_percent; +} + +static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, + u8 *bw_pct) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb_pg_cfg *pgcfg; + struct qlcnic_dcb_cee *type; + + *bw_pct = 0; + type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; + + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) || + !type->tc_param_valid) + return; + + if (pgid < 0 || pgid >= QLC_DCB_MAX_PG) + return; + + pgcfg = &type->pg_cfg[pgid]; + if (!pgcfg->valid) + return; + + *bw_pct = pgcfg->total_bw_percent; +} + +static void qlcnic_dcb_get_pfc_cfg(struct net_device *netdev, int prio, + u8 *setting) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb_tc_cfg *tc_cfg; + u8 val = QLC_DCB_GET_MAP(prio); + struct qlcnic_dcb_cee *type; + u8 i; + + *setting = 0; + type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; + + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) || + !type->pfc_mode_enable) + return; + + for (i = 0; i < QLC_DCB_MAX_TC; i++) { + tc_cfg = &type->tc_cfg[i]; + if (!tc_cfg->valid) + continue; + + if ((val & tc_cfg->up_tc_map) && (tc_cfg->prio_cfg[prio].valid)) + *setting = tc_cfg->prio_cfg[prio].pfc_type; + } +} + +static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid, + u8 *cap) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) + return 0; + + switch (capid) { + case DCB_CAP_ATTR_PG: + case DCB_CAP_ATTR_UP2TC: + case DCB_CAP_ATTR_PFC: + case DCB_CAP_ATTR_GSP: + *cap = true; + break; + case DCB_CAP_ATTR_PG_TCS: + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; /* 8 priorities for PGs */ + break; + case DCB_CAP_ATTR_DCBX: + *cap = adapter->dcb->cfg->capability.dcb_capability; + break; + default: + *cap = false; + } + + return 0; +} + +static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; + + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) + return -EINVAL; + + switch (attr) { + case DCB_NUMTCS_ATTR_PG: + *num = cfg->capability.max_ets_tc; + return 0; + case DCB_NUMTCS_ATTR_PFC: + *num = cfg->capability.max_pfc_tc; + return 0; + default: + return -EINVAL; + } +} + +static int qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct dcb_app app = { + .selector = idtype, + .protocol = id, + }; + + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) + return -EINVAL; + + return dcb_getapp(netdev, &app); +} + +static u8 qlcnic_dcb_get_pfc_state(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb *dcb = adapter->dcb; + + if (!test_bit(QLCNIC_DCB_STATE, &dcb->state)) + return 0; + + return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable; +} + +static u8 qlcnic_dcb_get_dcbx(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; + + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) + return 0; + + return cfg->capability.dcb_capability; +} + +static u8 qlcnic_dcb_get_feat_cfg(struct net_device *netdev, int fid, u8 *flag) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb_cee *type; + + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) + return 1; + + type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX]; + *flag = 0; + + switch (fid) { + case DCB_FEATCFG_ATTR_PG: + if (type->tc_param_valid) + *flag |= DCB_FEATCFG_ENABLE; + else + *flag |= DCB_FEATCFG_ERROR; + break; + case DCB_FEATCFG_ATTR_PFC: + if (type->pfc_mode_enable) { + if (type->tc_cfg[0].prio_cfg[0].pfc_type) + *flag |= DCB_FEATCFG_ENABLE; + } else { + *flag |= DCB_FEATCFG_ERROR; + } + break; + case DCB_FEATCFG_ATTR_APP: + *flag |= DCB_FEATCFG_ENABLE; + break; + default: + netdev_err(netdev, "Invalid Feature ID %d\n", fid); + return 1; + } + + return 0; +} + +static inline void +qlcnic_dcb_get_pg_tc_cfg_rx(struct net_device *netdev, int prio, u8 *prio_type, + u8 *pgid, u8 *bw_pct, u8 *up_map) +{ + *prio_type = *pgid = *bw_pct = *up_map = 0; +} + +static inline void +qlcnic_dcb_get_pg_bwg_cfg_rx(struct net_device *netdev, int pgid, u8 *bw_pct) +{ + *bw_pct = 0; +} + +static int qlcnic_dcb_peer_app_info(struct net_device *netdev, + struct dcb_peer_app_info *info, + u16 *app_count) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb_cee *peer; + int i; + + memset(info, 0, sizeof(*info)); + *app_count = 0; + + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) + return 0; + + peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX]; + + for (i = 0; i < QLC_DCB_MAX_APP; i++) { + if (peer->app[i].valid) + (*app_count)++; + } + + return 0; +} + +static int qlcnic_dcb_peer_app_table(struct net_device *netdev, + struct dcb_app *table) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb_cee *peer; + struct qlcnic_dcb_app *app; + int i, j; + + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) + return 0; + + peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX]; + + for (i = 0, j = 0; i < QLC_DCB_MAX_APP; i++) { + app = &peer->app[i]; + if (!app->valid) + continue; + + table[j].selector = app->selector; + table[j].priority = app->priority; + table[j++].protocol = app->protocol; + } + + return 0; +} + +static int qlcnic_dcb_cee_peer_get_pg(struct net_device *netdev, + struct cee_pg *pg) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb_cee *peer; + u8 i, j, k, map; + + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) + return 0; + + peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX]; + + for (i = 0, j = 0; i < QLC_DCB_MAX_PG; i++) { + if (!peer->pg_cfg[i].valid) + continue; + + pg->pg_bw[j] = peer->pg_cfg[i].total_bw_percent; + + for (k = 0; k < QLC_DCB_MAX_TC; k++) { + if (peer->tc_cfg[i].valid && + (peer->tc_cfg[i].pgid == i)) { + map = peer->tc_cfg[i].up_tc_map; + pg->prio_pg[j++] = map; + break; + } + } + } + + return 0; +} + +static int qlcnic_dcb_cee_peer_get_pfc(struct net_device *netdev, + struct cee_pfc *pfc) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg; + struct qlcnic_dcb_tc_cfg *tc; + struct qlcnic_dcb_cee *peer; + u8 i, setting, prio; + + pfc->pfc_en = 0; + + if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) + return 0; + + peer = &cfg->type[QLC_DCB_PEER_IDX]; + + for (i = 0; i < QLC_DCB_MAX_TC; i++) { + tc = &peer->tc_cfg[i]; + prio = qlcnic_dcb_prio_count(tc->up_tc_map); + + setting = 0; + qlcnic_dcb_get_pfc_cfg(netdev, prio, &setting); + if (setting) + pfc->pfc_en |= QLC_DCB_GET_MAP(i); + } + + pfc->tcs_supported = cfg->capability.max_pfc_tc; + + return 0; +} + +static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops = { + .getstate = qlcnic_dcb_get_state, + .getpermhwaddr = qlcnic_dcb_get_perm_hw_addr, + .getpgtccfgtx = qlcnic_dcb_get_pg_tc_cfg_tx, + .getpgbwgcfgtx = qlcnic_dcb_get_pg_bwg_cfg_tx, + .getpfccfg = qlcnic_dcb_get_pfc_cfg, + .getcap = qlcnic_dcb_get_capability, + .getnumtcs = qlcnic_dcb_get_num_tcs, + .getapp = qlcnic_dcb_get_app, + .getpfcstate = qlcnic_dcb_get_pfc_state, + .getdcbx = qlcnic_dcb_get_dcbx, + .getfeatcfg = qlcnic_dcb_get_feat_cfg, + + .getpgtccfgrx = qlcnic_dcb_get_pg_tc_cfg_rx, + .getpgbwgcfgrx = qlcnic_dcb_get_pg_bwg_cfg_rx, + + .peer_getappinfo = qlcnic_dcb_peer_app_info, + .peer_getapptable = qlcnic_dcb_peer_app_table, + .cee_peer_getpg = qlcnic_dcb_cee_peer_get_pg, + .cee_peer_getpfc = qlcnic_dcb_cee_peer_get_pfc, +}; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h new file mode 100644 index 000000000..3cf4a10fb --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h @@ -0,0 +1,122 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#ifndef __QLCNIC_DCBX_H +#define __QLCNIC_DCBX_H + +#define QLCNIC_DCB_STATE 0 +#define QLCNIC_DCB_AEN_MODE 1 + +#ifdef CONFIG_QLCNIC_DCB +int qlcnic_register_dcb(struct qlcnic_adapter *); +#else +static inline int qlcnic_register_dcb(struct qlcnic_adapter *adapter) +{ return 0; } +#endif + +struct qlcnic_dcb; + +struct qlcnic_dcb_ops { + int (*query_hw_capability) (struct qlcnic_dcb *, char *); + int (*get_hw_capability) (struct qlcnic_dcb *); + int (*query_cee_param) (struct qlcnic_dcb *, char *, u8); + void (*init_dcbnl_ops) (struct qlcnic_dcb *); + void (*aen_handler) (struct qlcnic_dcb *, void *); + int (*get_cee_cfg) (struct qlcnic_dcb *); + void (*get_info) (struct qlcnic_dcb *); + int (*attach) (struct qlcnic_dcb *); + void (*free) (struct qlcnic_dcb *); +}; + +struct qlcnic_dcb { + struct qlcnic_dcb_mbx_params *param; + struct qlcnic_adapter *adapter; + struct delayed_work aen_work; + struct workqueue_struct *wq; + struct qlcnic_dcb_ops *ops; + struct qlcnic_dcb_cfg *cfg; + unsigned long state; +}; + +static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb) +{ + kfree(dcb); + dcb = NULL; +} + +static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb) +{ + if (dcb && dcb->ops->get_hw_capability) + return dcb->ops->get_hw_capability(dcb); + + return 0; +} + +static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb) +{ + if (dcb && dcb->ops->free) + dcb->ops->free(dcb); +} + +static inline int qlcnic_dcb_attach(struct qlcnic_dcb *dcb) +{ + if (dcb && dcb->ops->attach) + return dcb->ops->attach(dcb); + + return 0; +} + +static inline int +qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf) +{ + if (dcb && dcb->ops->query_hw_capability) + return dcb->ops->query_hw_capability(dcb, buf); + + return 0; +} + +static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb) +{ + if (dcb && dcb->ops->get_info) + dcb->ops->get_info(dcb); +} + +static inline int +qlcnic_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type) +{ + if (dcb && dcb->ops->query_cee_param) + return dcb->ops->query_cee_param(dcb, buf, type); + + return 0; +} + +static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb) +{ + if (dcb && dcb->ops->get_cee_cfg) + return dcb->ops->get_cee_cfg(dcb); + + return 0; +} + +static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg) +{ + if (dcb && dcb->ops->aen_handler) + dcb->ops->aen_handler(dcb, msg); +} + +static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb) +{ + if (dcb && dcb->ops->init_dcbnl_ops) + dcb->ops->init_dcbnl_ops(dcb); +} + +static inline void qlcnic_dcb_enable(struct qlcnic_dcb *dcb) +{ + if (dcb && qlcnic_dcb_attach(dcb)) + qlcnic_clear_dcb_ops(dcb); +} +#endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c new file mode 100644 index 000000000..494e8105a --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -0,0 +1,1882 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include +#include +#include +#include +#include +#include + +#include "qlcnic.h" + +struct qlcnic_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m) +#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m) +static const u32 qlcnic_fw_dump_level[] = { + 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff +}; + +static const struct qlcnic_stats qlcnic_gstrings_stats[] = { + {"xmit_on", QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)}, + {"xmit_off", QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)}, + {"xmit_called", QLC_SIZEOF(stats.xmitcalled), + QLC_OFF(stats.xmitcalled)}, + {"xmit_finished", QLC_SIZEOF(stats.xmitfinished), + QLC_OFF(stats.xmitfinished)}, + {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error), + QLC_OFF(stats.tx_dma_map_error)}, + {"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)}, + {"tx_dropped", QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)}, + {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error), + QLC_OFF(stats.rx_dma_map_error)}, + {"rx_pkts", QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)}, + {"rx_bytes", QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)}, + {"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)}, + {"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)}, + {"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)}, + {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)}, + {"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)}, + {"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)}, + {"encap_lso_frames", QLC_SIZEOF(stats.encap_lso_frames), + QLC_OFF(stats.encap_lso_frames)}, + {"encap_tx_csummed", QLC_SIZEOF(stats.encap_tx_csummed), + QLC_OFF(stats.encap_tx_csummed)}, + {"encap_rx_csummed", QLC_SIZEOF(stats.encap_rx_csummed), + QLC_OFF(stats.encap_rx_csummed)}, + {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure), + QLC_OFF(stats.skb_alloc_failure)}, + {"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun), + QLC_OFF(stats.mac_filter_limit_overrun)}, + {"spurious intr", QLC_SIZEOF(stats.spurious_intr), + QLC_OFF(stats.spurious_intr)}, + +}; + +static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = { + "tx unicast frames", + "tx multicast frames", + "tx broadcast frames", + "tx dropped frames", + "tx errors", + "tx local frames", + "tx numbytes", + "rx unicast frames", + "rx multicast frames", + "rx broadcast frames", + "rx dropped frames", + "rx errors", + "rx local frames", + "rx numbytes", +}; + +static const char qlcnic_83xx_tx_stats_strings[][ETH_GSTRING_LEN] = { + "ctx_tx_bytes", + "ctx_tx_pkts", + "ctx_tx_errors", + "ctx_tx_dropped_pkts", + "ctx_tx_num_buffers", +}; + +static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = { + "mac_tx_frames", + "mac_tx_bytes", + "mac_tx_mcast_pkts", + "mac_tx_bcast_pkts", + "mac_tx_pause_cnt", + "mac_tx_ctrl_pkt", + "mac_tx_lt_64b_pkts", + "mac_tx_lt_127b_pkts", + "mac_tx_lt_255b_pkts", + "mac_tx_lt_511b_pkts", + "mac_tx_lt_1023b_pkts", + "mac_tx_lt_1518b_pkts", + "mac_tx_gt_1518b_pkts", + "mac_rx_frames", + "mac_rx_bytes", + "mac_rx_mcast_pkts", + "mac_rx_bcast_pkts", + "mac_rx_pause_cnt", + "mac_rx_ctrl_pkt", + "mac_rx_lt_64b_pkts", + "mac_rx_lt_127b_pkts", + "mac_rx_lt_255b_pkts", + "mac_rx_lt_511b_pkts", + "mac_rx_lt_1023b_pkts", + "mac_rx_lt_1518b_pkts", + "mac_rx_gt_1518b_pkts", + "mac_rx_length_error", + "mac_rx_length_small", + "mac_rx_length_large", + "mac_rx_jabber", + "mac_rx_dropped", + "mac_crc_error", + "mac_align_error", + "eswitch_frames", + "eswitch_bytes", + "eswitch_multicast_frames", + "eswitch_broadcast_frames", + "eswitch_unicast_frames", + "eswitch_error_free_frames", + "eswitch_error_free_bytes", +}; + +#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats) + +static const char qlcnic_tx_queue_stats_strings[][ETH_GSTRING_LEN] = { + "xmit_on", + "xmit_off", + "xmit_called", + "xmit_finished", + "tx_bytes", +}; + +#define QLCNIC_TX_STATS_LEN ARRAY_SIZE(qlcnic_tx_queue_stats_strings) + +static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = { + "ctx_rx_bytes", + "ctx_rx_pkts", + "ctx_lro_pkt_cnt", + "ctx_ip_csum_error", + "ctx_rx_pkts_wo_ctx", + "ctx_rx_pkts_drop_wo_sds_on_card", + "ctx_rx_pkts_drop_wo_sds_on_host", + "ctx_rx_osized_pkts", + "ctx_rx_pkts_dropped_wo_rds", + "ctx_rx_unexpected_mcast_pkts", + "ctx_invalid_mac_address", + "ctx_rx_rds_ring_prim_attempted", + "ctx_rx_rds_ring_prim_success", + "ctx_num_lro_flows_added", + "ctx_num_lro_flows_removed", + "ctx_num_lro_flows_active", + "ctx_pkts_dropped_unknown", +}; + +static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { + "Register_Test_on_offline", + "Link_Test_on_offline", + "Interrupt_Test_offline", + "Internal_Loopback_offline", + "External_Loopback_offline", + "EEPROM_Test_offline" +}; + +#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) + +static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter) +{ + return ARRAY_SIZE(qlcnic_gstrings_stats) + + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + + QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings; +} + +static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter) +{ + return ARRAY_SIZE(qlcnic_gstrings_stats) + + ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + + ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) + + QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings; +} + +static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) +{ + int len = -1; + + if (qlcnic_82xx_check(adapter)) { + len = qlcnic_82xx_statistics(adapter); + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) + len += ARRAY_SIZE(qlcnic_device_gstrings_stats); + } else if (qlcnic_83xx_check(adapter)) { + len = qlcnic_83xx_statistics(adapter); + } + + return len; +} + +#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412 + +#define QLCNIC_MAX_EEPROM_LEN 1024 + +static const u32 diag_registers[] = { + QLCNIC_CMDPEG_STATE, + QLCNIC_RCVPEG_STATE, + QLCNIC_FW_CAPABILITIES, + QLCNIC_CRB_DRV_ACTIVE, + QLCNIC_CRB_DEV_STATE, + QLCNIC_CRB_DRV_STATE, + QLCNIC_CRB_DRV_SCRATCH, + QLCNIC_CRB_DEV_PARTITION_INFO, + QLCNIC_CRB_DRV_IDC_VER, + QLCNIC_PEG_ALIVE_COUNTER, + QLCNIC_PEG_HALT_STATUS1, + QLCNIC_PEG_HALT_STATUS2, + -1 +}; + + +static const u32 ext_diag_registers[] = { + CRB_XG_STATE_P3P, + ISR_INT_STATE_REG, + QLCNIC_CRB_PEG_NET_0+0x3c, + QLCNIC_CRB_PEG_NET_1+0x3c, + QLCNIC_CRB_PEG_NET_2+0x3c, + QLCNIC_CRB_PEG_NET_4+0x3c, + -1 +}; + +#define QLCNIC_MGMT_API_VERSION 3 +#define QLCNIC_ETHTOOL_REGS_VER 4 + +static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter) +{ + int ring_regs_cnt = (adapter->drv_tx_rings * 5) + + (adapter->max_rds_rings * 2) + + (adapter->drv_sds_rings * 3) + 5; + return ring_regs_cnt * sizeof(u32); +} + +static int qlcnic_get_regs_len(struct net_device *dev) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + u32 len; + + if (qlcnic_83xx_check(adapter)) + len = qlcnic_83xx_get_regs_len(adapter); + else + len = sizeof(ext_diag_registers) + sizeof(diag_registers); + + len += ((QLCNIC_DEV_INFO_SIZE + 2) * sizeof(u32)); + len += qlcnic_get_ring_regs_len(adapter); + return len; +} + +static int qlcnic_get_eeprom_len(struct net_device *dev) +{ + return QLCNIC_FLASH_TOTAL_SIZE; +} + +static void +qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + u32 fw_major, fw_minor, fw_build; + fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR); + fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR); + fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d", fw_major, fw_minor, fw_build); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + strlcpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, + sizeof(drvinfo->version)); +} + +static int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter, + struct ethtool_cmd *ecmd) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 speed, reg; + int check_sfp_module = 0, err = 0; + u16 pcifn = ahw->pci_func; + + /* read which mode */ + if (adapter->ahw->port_type == QLCNIC_GBE) { + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + + ecmd->advertising = (ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full); + + ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed); + ecmd->duplex = adapter->ahw->link_duplex; + ecmd->autoneg = adapter->ahw->link_autoneg; + + } else if (adapter->ahw->port_type == QLCNIC_XGBE) { + u32 val = 0; + val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR, &err); + + if (val == QLCNIC_PORT_MODE_802_3_AP) { + ecmd->supported = SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_1000baseT_Full; + } else { + ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->advertising = ADVERTISED_10000baseT_Full; + } + + if (netif_running(adapter->netdev) && ahw->has_link_events) { + if (ahw->linkup) { + reg = QLCRD32(adapter, + P3P_LINK_SPEED_REG(pcifn), &err); + speed = P3P_LINK_SPEED_VAL(pcifn, reg); + ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; + } + + ethtool_cmd_speed_set(ecmd, ahw->link_speed); + ecmd->autoneg = ahw->link_autoneg; + ecmd->duplex = ahw->link_duplex; + goto skip; + } + + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + ecmd->autoneg = AUTONEG_DISABLE; + } else + return -EIO; + +skip: + ecmd->phy_address = adapter->ahw->physical_port; + ecmd->transceiver = XCVR_EXTERNAL; + + switch (adapter->ahw->board_type) { + case QLCNIC_BRDTYPE_P3P_REF_QG: + case QLCNIC_BRDTYPE_P3P_4_GB: + case QLCNIC_BRDTYPE_P3P_4_GB_MM: + + ecmd->supported |= SUPPORTED_Autoneg; + ecmd->advertising |= ADVERTISED_Autoneg; + case QLCNIC_BRDTYPE_P3P_10G_CX4: + case QLCNIC_BRDTYPE_P3P_10G_CX4_LP: + case QLCNIC_BRDTYPE_P3P_10000_BASE_T: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + ecmd->autoneg = adapter->ahw->link_autoneg; + break; + case QLCNIC_BRDTYPE_P3P_IMEZ: + case QLCNIC_BRDTYPE_P3P_XG_LOM: + case QLCNIC_BRDTYPE_P3P_HMEZ: + ecmd->supported |= SUPPORTED_MII; + ecmd->advertising |= ADVERTISED_MII; + ecmd->port = PORT_MII; + ecmd->autoneg = AUTONEG_DISABLE; + break; + case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS: + case QLCNIC_BRDTYPE_P3P_10G_SFP_CT: + case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: + ecmd->advertising |= ADVERTISED_TP; + ecmd->supported |= SUPPORTED_TP; + check_sfp_module = netif_running(adapter->netdev) && + ahw->has_link_events; + case QLCNIC_BRDTYPE_P3P_10G_XFP: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + ecmd->autoneg = AUTONEG_DISABLE; + break; + case QLCNIC_BRDTYPE_P3P_10G_TP: + if (adapter->ahw->port_type == QLCNIC_XGBE) { + ecmd->autoneg = AUTONEG_DISABLE; + ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); + ecmd->advertising |= + (ADVERTISED_FIBRE | ADVERTISED_TP); + ecmd->port = PORT_FIBRE; + check_sfp_module = netif_running(adapter->netdev) && + ahw->has_link_events; + } else { + ecmd->autoneg = AUTONEG_ENABLE; + ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); + ecmd->advertising |= + (ADVERTISED_TP | ADVERTISED_Autoneg); + ecmd->port = PORT_TP; + } + break; + default: + dev_err(&adapter->pdev->dev, "Unsupported board model %d\n", + adapter->ahw->board_type); + return -EIO; + } + + if (check_sfp_module) { + switch (adapter->ahw->module_type) { + case LINKEVENT_MODULE_OPTICAL_UNKNOWN: + case LINKEVENT_MODULE_OPTICAL_SRLR: + case LINKEVENT_MODULE_OPTICAL_LRM: + case LINKEVENT_MODULE_OPTICAL_SFP_1G: + ecmd->port = PORT_FIBRE; + break; + case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: + case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: + case LINKEVENT_MODULE_TWINAX: + ecmd->port = PORT_TP; + break; + default: + ecmd->port = PORT_OTHER; + } + } + + return 0; +} + +static int qlcnic_get_settings(struct net_device *dev, + struct ethtool_cmd *ecmd) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + + if (qlcnic_82xx_check(adapter)) + return qlcnic_82xx_get_settings(adapter, ecmd); + else if (qlcnic_83xx_check(adapter)) + return qlcnic_83xx_get_settings(adapter, ecmd); + + return -EIO; +} + + +static int qlcnic_set_port_config(struct qlcnic_adapter *adapter, + struct ethtool_cmd *ecmd) +{ + u32 ret = 0, config = 0; + /* read which mode */ + if (ecmd->duplex) + config |= 0x1; + + if (ecmd->autoneg) + config |= 0x2; + + switch (ethtool_cmd_speed(ecmd)) { + case SPEED_10: + config |= (0 << 8); + break; + case SPEED_100: + config |= (1 << 8); + break; + case SPEED_1000: + config |= (10 << 8); + break; + default: + return -EIO; + } + + ret = qlcnic_fw_cmd_set_port(adapter, config); + + if (ret == QLCNIC_RCODE_NOT_SUPPORTED) + return -EOPNOTSUPP; + else if (ret) + return -EIO; + return ret; +} + +static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + u32 ret = 0; + struct qlcnic_adapter *adapter = netdev_priv(dev); + + if (adapter->ahw->port_type != QLCNIC_GBE) + return -EOPNOTSUPP; + + if (qlcnic_83xx_check(adapter)) + ret = qlcnic_83xx_set_settings(adapter, ecmd); + else + ret = qlcnic_set_port_config(adapter, ecmd); + + if (!ret) + return ret; + + adapter->ahw->link_speed = ethtool_cmd_speed(ecmd); + adapter->ahw->link_duplex = ecmd->duplex; + adapter->ahw->link_autoneg = ecmd->autoneg; + + if (!netif_running(dev)) + return 0; + + dev->netdev_ops->ndo_stop(dev); + return dev->netdev_ops->ndo_open(dev); +} + +static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter, + u32 *regs_buff) +{ + int i, j = 0, err = 0; + + for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++) + regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]); + j = 0; + while (ext_diag_registers[j] != -1) + regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++], + &err); + return i; +} + +static void +qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_rds_ring *rds_rings; + struct qlcnic_host_tx_ring *tx_ring; + u32 *regs_buff = p; + int ring, i = 0; + + memset(p, 0, qlcnic_get_regs_len(dev)); + + regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) | + (adapter->ahw->revision_id << 16) | (adapter->pdev)->device; + + regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff)); + regs_buff[1] = QLCNIC_MGMT_API_VERSION; + + if (adapter->ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY) + regs_buff[2] = adapter->ahw->max_vnic_func; + + if (qlcnic_82xx_check(adapter)) + i = qlcnic_82xx_get_registers(adapter, regs_buff); + else + i = qlcnic_83xx_get_registers(adapter, regs_buff); + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) + return; + + /* Marker btw regs and TX ring count */ + regs_buff[i++] = 0xFFEFCDAB; + + regs_buff[i++] = adapter->drv_tx_rings; /* No. of TX ring */ + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + regs_buff[i++] = le32_to_cpu(*(tx_ring->hw_consumer)); + regs_buff[i++] = tx_ring->sw_consumer; + regs_buff[i++] = readl(tx_ring->crb_cmd_producer); + regs_buff[i++] = tx_ring->producer; + if (tx_ring->crb_intr_mask) + regs_buff[i++] = readl(tx_ring->crb_intr_mask); + else + regs_buff[i++] = QLCNIC_TX_INTR_NOT_CONFIGURED; + } + + regs_buff[i++] = adapter->max_rds_rings; /* No. of RX ring */ + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_rings = &recv_ctx->rds_rings[ring]; + regs_buff[i++] = readl(rds_rings->crb_rcv_producer); + regs_buff[i++] = rds_rings->producer; + } + + regs_buff[i++] = adapter->drv_sds_rings; /* No. of SDS ring */ + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { + sds_ring = &(recv_ctx->sds_rings[ring]); + regs_buff[i++] = readl(sds_ring->crb_sts_consumer); + regs_buff[i++] = sds_ring->consumer; + regs_buff[i++] = readl(sds_ring->crb_intr_mask); + } +} + +static u32 qlcnic_test_link(struct net_device *dev) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + int err = 0; + u32 val; + + if (qlcnic_83xx_check(adapter)) { + val = qlcnic_83xx_test_link(adapter); + return (val & 1) ? 0 : 1; + } + val = QLCRD32(adapter, CRB_XG_STATE_P3P, &err); + if (err == -EIO) + return err; + val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val); + return (val == XG_LINK_UP_P3P) ? 0 : 1; +} + +static int +qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *bytes) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + int offset; + int ret = -1; + + if (qlcnic_83xx_check(adapter)) + return 0; + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = (adapter->pdev)->vendor | + ((adapter->pdev)->device << 16); + offset = eeprom->offset; + + if (qlcnic_82xx_check(adapter)) + ret = qlcnic_rom_fast_read_words(adapter, offset, bytes, + eeprom->len); + if (ret < 0) + return ret; + + return 0; +} + +static void +qlcnic_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + + ring->rx_pending = adapter->num_rxd; + ring->rx_jumbo_pending = adapter->num_jumbo_rxd; + ring->tx_pending = adapter->num_txd; + + ring->rx_max_pending = adapter->max_rxd; + ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd; + ring->tx_max_pending = MAX_CMD_DESCRIPTORS; +} + +static u32 +qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name) +{ + u32 num_desc; + num_desc = max(val, min); + num_desc = min(num_desc, max); + num_desc = roundup_pow_of_two(num_desc); + + if (val != num_desc) { + printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n", + qlcnic_driver_name, r_name, num_desc, val); + } + + return num_desc; +} + +static int +qlcnic_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + u16 num_rxd, num_jumbo_rxd, num_txd; + + if (ring->rx_mini_pending) + return -EOPNOTSUPP; + + num_rxd = qlcnic_validate_ringparam(ring->rx_pending, + MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx"); + + num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending, + MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd, + "rx jumbo"); + + num_txd = qlcnic_validate_ringparam(ring->tx_pending, + MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx"); + + if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd && + num_jumbo_rxd == adapter->num_jumbo_rxd) + return 0; + + adapter->num_rxd = num_rxd; + adapter->num_jumbo_rxd = num_jumbo_rxd; + adapter->num_txd = num_txd; + + return qlcnic_reset_context(adapter); +} + +static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter, + u8 rx_ring, u8 tx_ring) +{ + if (rx_ring == 0 || tx_ring == 0) + return -EINVAL; + + if (rx_ring != 0) { + if (rx_ring > adapter->max_sds_rings) { + netdev_err(adapter->netdev, + "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n", + rx_ring, adapter->max_sds_rings); + return -EINVAL; + } + } + + if (tx_ring != 0) { + if (tx_ring > adapter->max_tx_rings) { + netdev_err(adapter->netdev, + "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n", + tx_ring, adapter->max_tx_rings); + return -EINVAL; + } + } + + return 0; +} + +static void qlcnic_get_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + + channel->max_rx = adapter->max_sds_rings; + channel->max_tx = adapter->max_tx_rings; + channel->rx_count = adapter->drv_sds_rings; + channel->tx_count = adapter->drv_tx_rings; +} + +static int qlcnic_set_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + int err; + + if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { + netdev_err(dev, "No RSS/TSS support in non MSI-X mode\n"); + return -EINVAL; + } + + if (channel->other_count || channel->combined_count) + return -EINVAL; + + err = qlcnic_validate_ring_count(adapter, channel->rx_count, + channel->tx_count); + if (err) + return err; + + if (adapter->drv_sds_rings != channel->rx_count) { + err = qlcnic_validate_rings(adapter, channel->rx_count, + QLCNIC_RX_QUEUE); + if (err) { + netdev_err(dev, "Unable to configure %u SDS rings\n", + channel->rx_count); + return err; + } + adapter->drv_rss_rings = channel->rx_count; + } + + if (adapter->drv_tx_rings != channel->tx_count) { + err = qlcnic_validate_rings(adapter, channel->tx_count, + QLCNIC_TX_QUEUE); + if (err) { + netdev_err(dev, "Unable to configure %u Tx rings\n", + channel->tx_count); + return err; + } + adapter->drv_tss_rings = channel->tx_count; + } + + adapter->flags |= QLCNIC_TSS_RSS; + + err = qlcnic_setup_rings(adapter); + netdev_info(dev, "Allocated %d SDS rings and %d Tx rings\n", + adapter->drv_sds_rings, adapter->drv_tx_rings); + + return err; +} + +static void +qlcnic_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int port = adapter->ahw->physical_port; + int err = 0; + __u32 val; + + if (qlcnic_83xx_check(adapter)) { + qlcnic_83xx_get_pauseparam(adapter, pause); + return; + } + if (adapter->ahw->port_type == QLCNIC_GBE) { + if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) + return; + /* get flow control settings */ + val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err); + if (err == -EIO) + return; + pause->rx_pause = qlcnic_gb_get_rx_flowctl(val); + val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err); + if (err == -EIO) + return; + switch (port) { + case 0: + pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val)); + break; + case 1: + pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val)); + break; + case 2: + pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val)); + break; + case 3: + default: + pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val)); + break; + } + } else if (adapter->ahw->port_type == QLCNIC_XGBE) { + if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) + return; + pause->rx_pause = 1; + val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err); + if (err == -EIO) + return; + if (port == 0) + pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val)); + else + pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val)); + } else { + dev_err(&netdev->dev, "Unknown board type: %x\n", + adapter->ahw->port_type); + } +} + +static int +qlcnic_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int port = adapter->ahw->physical_port; + int err = 0; + __u32 val; + + if (qlcnic_83xx_check(adapter)) + return qlcnic_83xx_set_pauseparam(adapter, pause); + + /* read mode */ + if (adapter->ahw->port_type == QLCNIC_GBE) { + if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS)) + return -EIO; + /* set flow control */ + val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err); + if (err == -EIO) + return err; + + if (pause->rx_pause) + qlcnic_gb_rx_flowctl(val); + else + qlcnic_gb_unset_rx_flowctl(val); + + QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), + val); + QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val); + /* set autoneg */ + val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err); + if (err == -EIO) + return err; + switch (port) { + case 0: + if (pause->tx_pause) + qlcnic_gb_unset_gb0_mask(val); + else + qlcnic_gb_set_gb0_mask(val); + break; + case 1: + if (pause->tx_pause) + qlcnic_gb_unset_gb1_mask(val); + else + qlcnic_gb_set_gb1_mask(val); + break; + case 2: + if (pause->tx_pause) + qlcnic_gb_unset_gb2_mask(val); + else + qlcnic_gb_set_gb2_mask(val); + break; + case 3: + default: + if (pause->tx_pause) + qlcnic_gb_unset_gb3_mask(val); + else + qlcnic_gb_set_gb3_mask(val); + break; + } + QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val); + } else if (adapter->ahw->port_type == QLCNIC_XGBE) { + if (!pause->rx_pause || pause->autoneg) + return -EOPNOTSUPP; + + if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS)) + return -EIO; + + val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err); + if (err == -EIO) + return err; + if (port == 0) { + if (pause->tx_pause) + qlcnic_xg_unset_xg0_mask(val); + else + qlcnic_xg_set_xg0_mask(val); + } else { + if (pause->tx_pause) + qlcnic_xg_unset_xg1_mask(val); + else + qlcnic_xg_set_xg1_mask(val); + } + QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val); + } else { + dev_err(&netdev->dev, "Unknown board type: %x\n", + adapter->ahw->port_type); + } + return 0; +} + +static int qlcnic_reg_test(struct net_device *dev) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + u32 data_read; + int err = 0; + + if (qlcnic_83xx_check(adapter)) + return qlcnic_83xx_reg_test(adapter); + + data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0), &err); + if (err == -EIO) + return err; + if ((data_read & 0xffff) != adapter->pdev->vendor) + return 1; + + return 0; +} + +static int qlcnic_eeprom_test(struct net_device *dev) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + + if (qlcnic_82xx_check(adapter)) + return 0; + + return qlcnic_83xx_flash_test(adapter); +} + +static int qlcnic_get_sset_count(struct net_device *dev, int sset) +{ + + struct qlcnic_adapter *adapter = netdev_priv(dev); + switch (sset) { + case ETH_SS_TEST: + return QLCNIC_TEST_LEN; + case ETH_SS_STATS: + return qlcnic_dev_statistics_len(adapter); + default: + return -EOPNOTSUPP; + } +} + +static int qlcnic_irq_test(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_cmd_args cmd; + int ret, drv_sds_rings = adapter->drv_sds_rings; + int drv_tx_rings = adapter->drv_tx_rings; + + if (qlcnic_83xx_check(adapter)) + return qlcnic_83xx_interrupt_test(netdev); + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EIO; + + ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST); + if (ret) + goto clear_diag_irq; + + ahw->diag_cnt = 0; + ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); + if (ret) + goto free_diag_res; + + cmd.req.arg[1] = ahw->pci_func; + ret = qlcnic_issue_cmd(adapter, &cmd); + if (ret) + goto done; + + usleep_range(1000, 12000); + ret = !ahw->diag_cnt; + +done: + qlcnic_free_mbx_args(&cmd); + +free_diag_res: + qlcnic_diag_free_res(netdev, drv_sds_rings); + +clear_diag_irq: + adapter->drv_sds_rings = drv_sds_rings; + adapter->drv_tx_rings = drv_tx_rings; + clear_bit(__QLCNIC_RESETTING, &adapter->state); + + return ret; +} + +#define QLCNIC_ILB_PKT_SIZE 64 +#define QLCNIC_NUM_ILB_PKT 16 +#define QLCNIC_ILB_MAX_RCV_LOOP 10 +#define QLCNIC_LB_PKT_POLL_DELAY_MSEC 1 +#define QLCNIC_LB_PKT_POLL_COUNT 20 + +static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[]) +{ + unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00}; + + memset(data, 0x4e, QLCNIC_ILB_PKT_SIZE); + + memcpy(data, mac, ETH_ALEN); + memcpy(data + ETH_ALEN, mac, ETH_ALEN); + + memcpy(data + 2 * ETH_ALEN, random_data, sizeof(random_data)); +} + +int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]) +{ + unsigned char buff[QLCNIC_ILB_PKT_SIZE]; + qlcnic_create_loopback_buff(buff, mac); + return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE); +} + +int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode) +{ + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0]; + struct sk_buff *skb; + int i, loop, cnt = 0; + + for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) { + skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE); + qlcnic_create_loopback_buff(skb->data, adapter->mac_addr); + skb_put(skb, QLCNIC_ILB_PKT_SIZE); + adapter->ahw->diag_cnt = 0; + qlcnic_xmit_frame(skb, adapter->netdev); + loop = 0; + + do { + msleep(QLCNIC_LB_PKT_POLL_DELAY_MSEC); + qlcnic_process_rcv_ring_diag(sds_ring); + if (loop++ > QLCNIC_LB_PKT_POLL_COUNT) + break; + } while (!adapter->ahw->diag_cnt); + + dev_kfree_skb_any(skb); + + if (!adapter->ahw->diag_cnt) + dev_warn(&adapter->pdev->dev, + "LB Test: packet #%d was not received\n", + i + 1); + else + cnt++; + } + if (cnt != i) { + dev_err(&adapter->pdev->dev, + "LB Test: failed, TX[%d], RX[%d]\n", i, cnt); + if (mode != QLCNIC_ILB_MODE) + dev_warn(&adapter->pdev->dev, + "WARNING: Please check loopback cable\n"); + return -1; + } + return 0; +} + +static int qlcnic_loopback_test(struct net_device *netdev, u8 mode) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int drv_tx_rings = adapter->drv_tx_rings; + int drv_sds_rings = adapter->drv_sds_rings; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_hardware_context *ahw = adapter->ahw; + int loop = 0; + int ret; + + if (qlcnic_83xx_check(adapter)) + return qlcnic_83xx_loopback_test(netdev, mode); + + if (!(ahw->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) { + dev_info(&adapter->pdev->dev, + "Firmware do not support loopback test\n"); + return -EOPNOTSUPP; + } + + dev_warn(&adapter->pdev->dev, "%s loopback test in progress\n", + mode == QLCNIC_ILB_MODE ? "internal" : "external"); + if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { + dev_warn(&adapter->pdev->dev, + "Loopback test not supported in nonprivileged mode\n"); + return 0; + } + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EBUSY; + + ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST); + if (ret) + goto clear_it; + + sds_ring = &adapter->recv_ctx->sds_rings[0]; + ret = qlcnic_set_lb_mode(adapter, mode); + if (ret) + goto free_res; + + ahw->diag_cnt = 0; + do { + msleep(500); + qlcnic_process_rcv_ring_diag(sds_ring); + if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) { + netdev_info(netdev, + "Firmware didn't sent link up event to loopback request\n"); + ret = -ETIMEDOUT; + goto free_res; + } else if (adapter->ahw->diag_cnt) { + ret = adapter->ahw->diag_cnt; + goto free_res; + } + } while (!QLCNIC_IS_LB_CONFIGURED(ahw->loopback_state)); + + ret = qlcnic_do_lb_test(adapter, mode); + + qlcnic_clear_lb_mode(adapter, mode); + + free_res: + qlcnic_diag_free_res(netdev, drv_sds_rings); + + clear_it: + adapter->drv_sds_rings = drv_sds_rings; + adapter->drv_tx_rings = drv_tx_rings; + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return ret; +} + +static void +qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, + u64 *data) +{ + memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN); + + data[0] = qlcnic_reg_test(dev); + if (data[0]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + data[1] = (u64) qlcnic_test_link(dev); + if (data[1]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (eth_test->flags & ETH_TEST_FL_OFFLINE) { + data[2] = qlcnic_irq_test(dev); + if (data[2]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + data[3] = qlcnic_loopback_test(dev, QLCNIC_ILB_MODE); + if (data[3]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) { + data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE); + if (data[4]) + eth_test->flags |= ETH_TEST_FL_FAILED; + eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; + } + + data[5] = qlcnic_eeprom_test(dev); + if (data[5]) + eth_test->flags |= ETH_TEST_FL_FAILED; + } +} + +static void +qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + int index, i, num_stats; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *qlcnic_gstrings_test, + QLCNIC_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + num_stats = ARRAY_SIZE(qlcnic_tx_queue_stats_strings); + for (i = 0; i < adapter->drv_tx_rings; i++) { + for (index = 0; index < num_stats; index++) { + sprintf(data, "tx_queue_%d %s", i, + qlcnic_tx_queue_stats_strings[index]); + data += ETH_GSTRING_LEN; + } + } + + for (index = 0; index < QLCNIC_STATS_LEN; index++) { + memcpy(data + index * ETH_GSTRING_LEN, + qlcnic_gstrings_stats[index].stat_string, + ETH_GSTRING_LEN); + } + + if (qlcnic_83xx_check(adapter)) { + num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings); + for (i = 0; i < num_stats; i++, index++) + memcpy(data + index * ETH_GSTRING_LEN, + qlcnic_83xx_tx_stats_strings[i], + ETH_GSTRING_LEN); + num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); + for (i = 0; i < num_stats; i++, index++) + memcpy(data + index * ETH_GSTRING_LEN, + qlcnic_83xx_mac_stats_strings[i], + ETH_GSTRING_LEN); + num_stats = ARRAY_SIZE(qlcnic_83xx_rx_stats_strings); + for (i = 0; i < num_stats; i++, index++) + memcpy(data + index * ETH_GSTRING_LEN, + qlcnic_83xx_rx_stats_strings[i], + ETH_GSTRING_LEN); + return; + } else { + num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); + for (i = 0; i < num_stats; i++, index++) + memcpy(data + index * ETH_GSTRING_LEN, + qlcnic_83xx_mac_stats_strings[i], + ETH_GSTRING_LEN); + } + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) + return; + num_stats = ARRAY_SIZE(qlcnic_device_gstrings_stats); + for (i = 0; i < num_stats; index++, i++) { + memcpy(data + index * ETH_GSTRING_LEN, + qlcnic_device_gstrings_stats[i], + ETH_GSTRING_LEN); + } + } +} + +static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type) +{ + if (type == QLCNIC_MAC_STATS) { + struct qlcnic_mac_statistics *mac_stats = + (struct qlcnic_mac_statistics *)stats; + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error); + *data++ = QLCNIC_FILL_STATS(mac_stats->mac_align_error); + } else if (type == QLCNIC_ESW_STATS) { + struct __qlcnic_esw_statistics *esw_stats = + (struct __qlcnic_esw_statistics *)stats; + *data++ = QLCNIC_FILL_STATS(esw_stats->unicast_frames); + *data++ = QLCNIC_FILL_STATS(esw_stats->multicast_frames); + *data++ = QLCNIC_FILL_STATS(esw_stats->broadcast_frames); + *data++ = QLCNIC_FILL_STATS(esw_stats->dropped_frames); + *data++ = QLCNIC_FILL_STATS(esw_stats->errors); + *data++ = QLCNIC_FILL_STATS(esw_stats->local_frames); + *data++ = QLCNIC_FILL_STATS(esw_stats->numbytes); + } + return data; +} + +void qlcnic_update_stats(struct qlcnic_adapter *adapter) +{ + struct qlcnic_tx_queue_stats tx_stats; + struct qlcnic_host_tx_ring *tx_ring; + int ring; + + memset(&tx_stats, 0, sizeof(tx_stats)); + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + tx_stats.xmit_on += tx_ring->tx_stats.xmit_on; + tx_stats.xmit_off += tx_ring->tx_stats.xmit_off; + tx_stats.xmit_called += tx_ring->tx_stats.xmit_called; + tx_stats.xmit_finished += tx_ring->tx_stats.xmit_finished; + tx_stats.tx_bytes += tx_ring->tx_stats.tx_bytes; + } + + adapter->stats.xmit_on = tx_stats.xmit_on; + adapter->stats.xmit_off = tx_stats.xmit_off; + adapter->stats.xmitcalled = tx_stats.xmit_called; + adapter->stats.xmitfinished = tx_stats.xmit_finished; + adapter->stats.txbytes = tx_stats.tx_bytes; +} + +static u64 *qlcnic_fill_tx_queue_stats(u64 *data, void *stats) +{ + struct qlcnic_host_tx_ring *tx_ring; + + tx_ring = (struct qlcnic_host_tx_ring *)stats; + + *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_on); + *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_off); + *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_called); + *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_finished); + *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.tx_bytes); + + return data; +} + +static void qlcnic_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + struct qlcnic_host_tx_ring *tx_ring; + struct qlcnic_esw_statistics port_stats; + struct qlcnic_mac_statistics mac_stats; + int index, ret, length, size, ring; + char *p; + + memset(data, 0, stats->n_stats * sizeof(u64)); + + for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) { + if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) { + tx_ring = &adapter->tx_ring[ring]; + data = qlcnic_fill_tx_queue_stats(data, tx_ring); + qlcnic_update_stats(adapter); + } else { + data += QLCNIC_TX_STATS_LEN; + } + } + + length = QLCNIC_STATS_LEN; + for (index = 0; index < length; index++) { + p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset; + size = qlcnic_gstrings_stats[index].sizeof_stat; + *data++ = (size == sizeof(u64)) ? (*(u64 *)p) : ((*(u32 *)p)); + } + + if (qlcnic_83xx_check(adapter)) { + if (adapter->ahw->linkup) + qlcnic_83xx_get_stats(adapter, data); + return; + } else { + /* Retrieve MAC statistics from firmware */ + memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics)); + qlcnic_get_mac_stats(adapter, &mac_stats); + data = qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS); + } + + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) + return; + + memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics)); + ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func, + QLCNIC_QUERY_RX_COUNTER, &port_stats.rx); + if (ret) + return; + + data = qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS); + ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func, + QLCNIC_QUERY_TX_COUNTER, &port_stats.tx); + if (ret) + return; + + qlcnic_fill_stats(data, &port_stats.tx, QLCNIC_ESW_STATS); +} + +static int qlcnic_set_led(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + int drv_sds_rings = adapter->drv_sds_rings; + int err = -EIO, active = 1; + + if (qlcnic_83xx_check(adapter)) + return qlcnic_83xx_set_led(dev, state); + + if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { + netdev_warn(dev, "LED test not supported for non " + "privilege function\n"); + return -EOPNOTSUPP; + } + + switch (state) { + case ETHTOOL_ID_ACTIVE: + if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) + return -EBUSY; + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) + break; + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST)) + break; + set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state); + } + + if (adapter->nic_ops->config_led(adapter, 1, 0xf) == 0) { + err = 0; + break; + } + + dev_err(&adapter->pdev->dev, + "Failed to set LED blink state.\n"); + break; + + case ETHTOOL_ID_INACTIVE: + active = 0; + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) + break; + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST)) + break; + set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state); + } + + if (adapter->nic_ops->config_led(adapter, 0, 0xf)) + dev_err(&adapter->pdev->dev, + "Failed to reset LED blink state.\n"); + + break; + + default: + return -EINVAL; + } + + if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) + qlcnic_diag_free_res(dev, drv_sds_rings); + + if (!active || err) + clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); + + return err; +} + +static void +qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + u32 wol_cfg; + int err = 0; + + if (qlcnic_83xx_check(adapter)) + return; + wol->supported = 0; + wol->wolopts = 0; + + wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err); + if (err == -EIO) + return; + if (wol_cfg & (1UL << adapter->portnum)) + wol->supported |= WAKE_MAGIC; + + wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err); + if (wol_cfg & (1UL << adapter->portnum)) + wol->wolopts |= WAKE_MAGIC; +} + +static int +qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct qlcnic_adapter *adapter = netdev_priv(dev); + u32 wol_cfg; + int err = 0; + + if (qlcnic_83xx_check(adapter)) + return -EOPNOTSUPP; + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + + wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err); + if (err == -EIO) + return err; + if (!(wol_cfg & (1 << adapter->portnum))) + return -EOPNOTSUPP; + + wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err); + if (err == -EIO) + return err; + if (wol->wolopts & WAKE_MAGIC) + wol_cfg |= 1UL << adapter->portnum; + else + wol_cfg &= ~(1UL << adapter->portnum); + + QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg); + + return 0; +} + +/* + * Set the coalescing parameters. Currently only normal is supported. + * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the + * firmware coalescing to default. + */ +static int qlcnic_set_intr_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ethcoal) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int err; + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) + return -EINVAL; + + /* + * Return Error if unsupported values or + * unsupported parameters are set. + */ + if (ethcoal->rx_coalesce_usecs > 0xffff || + ethcoal->rx_max_coalesced_frames > 0xffff || + ethcoal->tx_coalesce_usecs > 0xffff || + ethcoal->tx_max_coalesced_frames > 0xffff || + ethcoal->rx_coalesce_usecs_irq || + ethcoal->rx_max_coalesced_frames_irq || + ethcoal->tx_coalesce_usecs_irq || + ethcoal->tx_max_coalesced_frames_irq || + ethcoal->stats_block_coalesce_usecs || + ethcoal->use_adaptive_rx_coalesce || + ethcoal->use_adaptive_tx_coalesce || + ethcoal->pkt_rate_low || + ethcoal->rx_coalesce_usecs_low || + ethcoal->rx_max_coalesced_frames_low || + ethcoal->tx_coalesce_usecs_low || + ethcoal->tx_max_coalesced_frames_low || + ethcoal->pkt_rate_high || + ethcoal->rx_coalesce_usecs_high || + ethcoal->rx_max_coalesced_frames_high || + ethcoal->tx_coalesce_usecs_high || + ethcoal->tx_max_coalesced_frames_high) + return -EINVAL; + + err = qlcnic_config_intr_coalesce(adapter, ethcoal); + + return err; +} + +static int qlcnic_get_intr_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ethcoal) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return -EINVAL; + + ethcoal->rx_coalesce_usecs = adapter->ahw->coal.rx_time_us; + ethcoal->rx_max_coalesced_frames = adapter->ahw->coal.rx_packets; + ethcoal->tx_coalesce_usecs = adapter->ahw->coal.tx_time_us; + ethcoal->tx_max_coalesced_frames = adapter->ahw->coal.tx_packets; + + return 0; +} + +static u32 qlcnic_get_msglevel(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + return adapter->ahw->msg_enable; +} + +static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + adapter->ahw->msg_enable = msglvl; +} + +int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *adapter) +{ + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + u32 val; + + if (qlcnic_84xx_check(adapter)) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + val &= ~QLC_83XX_IDC_DISABLE_FW_DUMP; + QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); + + qlcnic_83xx_unlock_driver(adapter); + } else { + fw_dump->enable = true; + } + + dev_info(&adapter->pdev->dev, "FW dump enabled\n"); + + return 0; +} + +static int qlcnic_disable_fw_dump_state(struct qlcnic_adapter *adapter) +{ + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + u32 val; + + if (qlcnic_84xx_check(adapter)) { + if (qlcnic_83xx_lock_driver(adapter)) + return -EBUSY; + + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + val |= QLC_83XX_IDC_DISABLE_FW_DUMP; + QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val); + + qlcnic_83xx_unlock_driver(adapter); + } else { + fw_dump->enable = false; + } + + dev_info(&adapter->pdev->dev, "FW dump disabled\n"); + + return 0; +} + +bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *adapter) +{ + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + bool state; + u32 val; + + if (qlcnic_84xx_check(adapter)) { + val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL); + state = (val & QLC_83XX_IDC_DISABLE_FW_DUMP) ? false : true; + } else { + state = fw_dump->enable; + } + + return state; +} + +static int +qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + + if (!fw_dump->tmpl_hdr) { + netdev_err(adapter->netdev, "FW Dump not supported\n"); + return -ENOTSUPP; + } + + if (fw_dump->clr) + dump->len = fw_dump->tmpl_hdr_size + fw_dump->size; + else + dump->len = 0; + + if (!qlcnic_check_fw_dump_state(adapter)) + dump->flag = ETH_FW_DUMP_DISABLE; + else + dump->flag = fw_dump->cap_mask; + + dump->version = adapter->fw_version; + return 0; +} + +static int +qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, + void *buffer) +{ + int i, copy_sz; + u32 *hdr_ptr; + __le32 *data; + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + + if (!fw_dump->tmpl_hdr) { + netdev_err(netdev, "FW Dump not supported\n"); + return -ENOTSUPP; + } + + if (!fw_dump->clr) { + netdev_info(netdev, "Dump not available\n"); + return -EINVAL; + } + + /* Copy template header first */ + copy_sz = fw_dump->tmpl_hdr_size; + hdr_ptr = (u32 *)fw_dump->tmpl_hdr; + data = buffer; + for (i = 0; i < copy_sz/sizeof(u32); i++) + *data++ = cpu_to_le32(*hdr_ptr++); + + /* Copy captured dump data */ + memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size); + dump->len = copy_sz + fw_dump->size; + dump->flag = fw_dump->cap_mask; + + /* Free dump area once data has been captured */ + vfree(fw_dump->data); + fw_dump->data = NULL; + fw_dump->clr = 0; + netdev_info(netdev, "extracted the FW dump Successfully\n"); + return 0; +} + +static int qlcnic_set_dump_mask(struct qlcnic_adapter *adapter, u32 mask) +{ + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + struct net_device *netdev = adapter->netdev; + + if (!qlcnic_check_fw_dump_state(adapter)) { + netdev_info(netdev, + "Can not change driver mask to 0x%x. FW dump not enabled\n", + mask); + return -EOPNOTSUPP; + } + + fw_dump->cap_mask = mask; + + /* Store new capture mask in template header as well*/ + qlcnic_store_cap_mask(adapter, fw_dump->tmpl_hdr, mask); + + netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask); + return 0; +} + +static int +qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + bool valid_mask = false; + int i, ret = 0; + + switch (val->flag) { + case QLCNIC_FORCE_FW_DUMP_KEY: + if (!fw_dump->tmpl_hdr) { + netdev_err(netdev, "FW dump not supported\n"); + ret = -EOPNOTSUPP; + break; + } + + if (!qlcnic_check_fw_dump_state(adapter)) { + netdev_info(netdev, "FW dump not enabled\n"); + ret = -EOPNOTSUPP; + break; + } + + if (fw_dump->clr) { + netdev_info(netdev, + "Previous dump not cleared, not forcing dump\n"); + break; + } + + netdev_info(netdev, "Forcing a FW dump\n"); + qlcnic_dev_request_reset(adapter, val->flag); + break; + case QLCNIC_DISABLE_FW_DUMP: + if (!fw_dump->tmpl_hdr) { + netdev_err(netdev, "FW dump not supported\n"); + ret = -EOPNOTSUPP; + break; + } + + ret = qlcnic_disable_fw_dump_state(adapter); + break; + + case QLCNIC_ENABLE_FW_DUMP: + if (!fw_dump->tmpl_hdr) { + netdev_err(netdev, "FW dump not supported\n"); + ret = -EOPNOTSUPP; + break; + } + + ret = qlcnic_enable_fw_dump_state(adapter); + break; + + case QLCNIC_FORCE_FW_RESET: + netdev_info(netdev, "Forcing a FW reset\n"); + qlcnic_dev_request_reset(adapter, val->flag); + adapter->flags &= ~QLCNIC_FW_RESET_OWNER; + break; + + case QLCNIC_SET_QUIESCENT: + case QLCNIC_RESET_QUIESCENT: + if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) + netdev_info(netdev, "Device is in non-operational state\n"); + break; + + default: + if (!fw_dump->tmpl_hdr) { + netdev_err(netdev, "FW dump not supported\n"); + ret = -EOPNOTSUPP; + break; + } + + for (i = 0; i < ARRAY_SIZE(qlcnic_fw_dump_level); i++) { + if (val->flag == qlcnic_fw_dump_level[i]) { + valid_mask = true; + break; + } + } + + if (valid_mask) { + ret = qlcnic_set_dump_mask(adapter, val->flag); + } else { + netdev_info(netdev, "Invalid dump level: 0x%x\n", + val->flag); + ret = -EINVAL; + } + } + return ret; +} + +const struct ethtool_ops qlcnic_ethtool_ops = { + .get_settings = qlcnic_get_settings, + .set_settings = qlcnic_set_settings, + .get_drvinfo = qlcnic_get_drvinfo, + .get_regs_len = qlcnic_get_regs_len, + .get_regs = qlcnic_get_regs, + .get_link = ethtool_op_get_link, + .get_eeprom_len = qlcnic_get_eeprom_len, + .get_eeprom = qlcnic_get_eeprom, + .get_ringparam = qlcnic_get_ringparam, + .set_ringparam = qlcnic_set_ringparam, + .get_channels = qlcnic_get_channels, + .set_channels = qlcnic_set_channels, + .get_pauseparam = qlcnic_get_pauseparam, + .set_pauseparam = qlcnic_set_pauseparam, + .get_wol = qlcnic_get_wol, + .set_wol = qlcnic_set_wol, + .self_test = qlcnic_diag_test, + .get_strings = qlcnic_get_strings, + .get_ethtool_stats = qlcnic_get_ethtool_stats, + .get_sset_count = qlcnic_get_sset_count, + .get_coalesce = qlcnic_get_intr_coalesce, + .set_coalesce = qlcnic_set_intr_coalesce, + .set_phys_id = qlcnic_set_led, + .set_msglevel = qlcnic_set_msglevel, + .get_msglevel = qlcnic_get_msglevel, + .get_dump_flag = qlcnic_get_dump_flag, + .get_dump_data = qlcnic_get_dump_data, + .set_dump = qlcnic_set_dump, +}; + +const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = { + .get_settings = qlcnic_get_settings, + .get_drvinfo = qlcnic_get_drvinfo, + .get_regs_len = qlcnic_get_regs_len, + .get_regs = qlcnic_get_regs, + .get_link = ethtool_op_get_link, + .get_eeprom_len = qlcnic_get_eeprom_len, + .get_eeprom = qlcnic_get_eeprom, + .get_ringparam = qlcnic_get_ringparam, + .set_ringparam = qlcnic_set_ringparam, + .get_channels = qlcnic_get_channels, + .get_pauseparam = qlcnic_get_pauseparam, + .get_wol = qlcnic_get_wol, + .get_strings = qlcnic_get_strings, + .get_ethtool_stats = qlcnic_get_ethtool_stats, + .get_sset_count = qlcnic_get_sset_count, + .get_coalesce = qlcnic_get_intr_coalesce, + .set_coalesce = qlcnic_set_intr_coalesce, + .set_msglevel = qlcnic_set_msglevel, + .get_msglevel = qlcnic_get_msglevel, +}; + +const struct ethtool_ops qlcnic_ethtool_failed_ops = { + .get_settings = qlcnic_get_settings, + .get_drvinfo = qlcnic_get_drvinfo, + .set_msglevel = qlcnic_set_msglevel, + .get_msglevel = qlcnic_get_msglevel, + .set_dump = qlcnic_set_dump, +}; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h new file mode 100644 index 000000000..34e467b23 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h @@ -0,0 +1,948 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#ifndef __QLCNIC_HDR_H_ +#define __QLCNIC_HDR_H_ + +#include +#include + +#include "qlcnic_hw.h" + +/* + * The basic unit of access when reading/writing control registers. + */ + +enum { + QLCNIC_HW_H0_CH_HUB_ADR = 0x05, + QLCNIC_HW_H1_CH_HUB_ADR = 0x0E, + QLCNIC_HW_H2_CH_HUB_ADR = 0x03, + QLCNIC_HW_H3_CH_HUB_ADR = 0x01, + QLCNIC_HW_H4_CH_HUB_ADR = 0x06, + QLCNIC_HW_H5_CH_HUB_ADR = 0x07, + QLCNIC_HW_H6_CH_HUB_ADR = 0x08 +}; + +/* Hub 0 */ +enum { + QLCNIC_HW_MN_CRB_AGT_ADR = 0x15, + QLCNIC_HW_MS_CRB_AGT_ADR = 0x25 +}; + +/* Hub 1 */ +enum { + QLCNIC_HW_PS_CRB_AGT_ADR = 0x73, + QLCNIC_HW_SS_CRB_AGT_ADR = 0x20, + QLCNIC_HW_RPMX3_CRB_AGT_ADR = 0x0b, + QLCNIC_HW_QMS_CRB_AGT_ADR = 0x00, + QLCNIC_HW_SQGS0_CRB_AGT_ADR = 0x01, + QLCNIC_HW_SQGS1_CRB_AGT_ADR = 0x02, + QLCNIC_HW_SQGS2_CRB_AGT_ADR = 0x03, + QLCNIC_HW_SQGS3_CRB_AGT_ADR = 0x04, + QLCNIC_HW_C2C0_CRB_AGT_ADR = 0x58, + QLCNIC_HW_C2C1_CRB_AGT_ADR = 0x59, + QLCNIC_HW_C2C2_CRB_AGT_ADR = 0x5a, + QLCNIC_HW_RPMX2_CRB_AGT_ADR = 0x0a, + QLCNIC_HW_RPMX4_CRB_AGT_ADR = 0x0c, + QLCNIC_HW_RPMX7_CRB_AGT_ADR = 0x0f, + QLCNIC_HW_RPMX9_CRB_AGT_ADR = 0x12, + QLCNIC_HW_SMB_CRB_AGT_ADR = 0x18 +}; + +/* Hub 2 */ +enum { + QLCNIC_HW_NIU_CRB_AGT_ADR = 0x31, + QLCNIC_HW_I2C0_CRB_AGT_ADR = 0x19, + QLCNIC_HW_I2C1_CRB_AGT_ADR = 0x29, + + QLCNIC_HW_SN_CRB_AGT_ADR = 0x10, + QLCNIC_HW_I2Q_CRB_AGT_ADR = 0x20, + QLCNIC_HW_LPC_CRB_AGT_ADR = 0x22, + QLCNIC_HW_ROMUSB_CRB_AGT_ADR = 0x21, + QLCNIC_HW_QM_CRB_AGT_ADR = 0x66, + QLCNIC_HW_SQG0_CRB_AGT_ADR = 0x60, + QLCNIC_HW_SQG1_CRB_AGT_ADR = 0x61, + QLCNIC_HW_SQG2_CRB_AGT_ADR = 0x62, + QLCNIC_HW_SQG3_CRB_AGT_ADR = 0x63, + QLCNIC_HW_RPMX1_CRB_AGT_ADR = 0x09, + QLCNIC_HW_RPMX5_CRB_AGT_ADR = 0x0d, + QLCNIC_HW_RPMX6_CRB_AGT_ADR = 0x0e, + QLCNIC_HW_RPMX8_CRB_AGT_ADR = 0x11 +}; + +/* Hub 3 */ +enum { + QLCNIC_HW_PH_CRB_AGT_ADR = 0x1A, + QLCNIC_HW_SRE_CRB_AGT_ADR = 0x50, + QLCNIC_HW_EG_CRB_AGT_ADR = 0x51, + QLCNIC_HW_RPMX0_CRB_AGT_ADR = 0x08 +}; + +/* Hub 4 */ +enum { + QLCNIC_HW_PEGN0_CRB_AGT_ADR = 0x40, + QLCNIC_HW_PEGN1_CRB_AGT_ADR, + QLCNIC_HW_PEGN2_CRB_AGT_ADR, + QLCNIC_HW_PEGN3_CRB_AGT_ADR, + QLCNIC_HW_PEGNI_CRB_AGT_ADR, + QLCNIC_HW_PEGND_CRB_AGT_ADR, + QLCNIC_HW_PEGNC_CRB_AGT_ADR, + QLCNIC_HW_PEGR0_CRB_AGT_ADR, + QLCNIC_HW_PEGR1_CRB_AGT_ADR, + QLCNIC_HW_PEGR2_CRB_AGT_ADR, + QLCNIC_HW_PEGR3_CRB_AGT_ADR, + QLCNIC_HW_PEGN4_CRB_AGT_ADR +}; + +/* Hub 5 */ +enum { + QLCNIC_HW_PEGS0_CRB_AGT_ADR = 0x40, + QLCNIC_HW_PEGS1_CRB_AGT_ADR, + QLCNIC_HW_PEGS2_CRB_AGT_ADR, + QLCNIC_HW_PEGS3_CRB_AGT_ADR, + QLCNIC_HW_PEGSI_CRB_AGT_ADR, + QLCNIC_HW_PEGSD_CRB_AGT_ADR, + QLCNIC_HW_PEGSC_CRB_AGT_ADR +}; + +/* Hub 6 */ +enum { + QLCNIC_HW_CAS0_CRB_AGT_ADR = 0x46, + QLCNIC_HW_CAS1_CRB_AGT_ADR = 0x47, + QLCNIC_HW_CAS2_CRB_AGT_ADR = 0x48, + QLCNIC_HW_CAS3_CRB_AGT_ADR = 0x49, + QLCNIC_HW_NCM_CRB_AGT_ADR = 0x16, + QLCNIC_HW_TMR_CRB_AGT_ADR = 0x17, + QLCNIC_HW_XDMA_CRB_AGT_ADR = 0x05, + QLCNIC_HW_OCM0_CRB_AGT_ADR = 0x06, + QLCNIC_HW_OCM1_CRB_AGT_ADR = 0x07 +}; + +/* Floaters - non existent modules */ +#define QLCNIC_HW_EFC_RPMX0_CRB_AGT_ADR 0x67 + +/* This field defines PCI/X adr [25:20] of agents on the CRB */ +enum { + QLCNIC_HW_PX_MAP_CRB_PH = 0, + QLCNIC_HW_PX_MAP_CRB_PS, + QLCNIC_HW_PX_MAP_CRB_MN, + QLCNIC_HW_PX_MAP_CRB_MS, + QLCNIC_HW_PX_MAP_CRB_PGR1, + QLCNIC_HW_PX_MAP_CRB_SRE, + QLCNIC_HW_PX_MAP_CRB_NIU, + QLCNIC_HW_PX_MAP_CRB_QMN, + QLCNIC_HW_PX_MAP_CRB_SQN0, + QLCNIC_HW_PX_MAP_CRB_SQN1, + QLCNIC_HW_PX_MAP_CRB_SQN2, + QLCNIC_HW_PX_MAP_CRB_SQN3, + QLCNIC_HW_PX_MAP_CRB_QMS, + QLCNIC_HW_PX_MAP_CRB_SQS0, + QLCNIC_HW_PX_MAP_CRB_SQS1, + QLCNIC_HW_PX_MAP_CRB_SQS2, + QLCNIC_HW_PX_MAP_CRB_SQS3, + QLCNIC_HW_PX_MAP_CRB_PGN0, + QLCNIC_HW_PX_MAP_CRB_PGN1, + QLCNIC_HW_PX_MAP_CRB_PGN2, + QLCNIC_HW_PX_MAP_CRB_PGN3, + QLCNIC_HW_PX_MAP_CRB_PGND, + QLCNIC_HW_PX_MAP_CRB_PGNI, + QLCNIC_HW_PX_MAP_CRB_PGS0, + QLCNIC_HW_PX_MAP_CRB_PGS1, + QLCNIC_HW_PX_MAP_CRB_PGS2, + QLCNIC_HW_PX_MAP_CRB_PGS3, + QLCNIC_HW_PX_MAP_CRB_PGSD, + QLCNIC_HW_PX_MAP_CRB_PGSI, + QLCNIC_HW_PX_MAP_CRB_SN, + QLCNIC_HW_PX_MAP_CRB_PGR2, + QLCNIC_HW_PX_MAP_CRB_EG, + QLCNIC_HW_PX_MAP_CRB_PH2, + QLCNIC_HW_PX_MAP_CRB_PS2, + QLCNIC_HW_PX_MAP_CRB_CAM, + QLCNIC_HW_PX_MAP_CRB_CAS0, + QLCNIC_HW_PX_MAP_CRB_CAS1, + QLCNIC_HW_PX_MAP_CRB_CAS2, + QLCNIC_HW_PX_MAP_CRB_C2C0, + QLCNIC_HW_PX_MAP_CRB_C2C1, + QLCNIC_HW_PX_MAP_CRB_TIMR, + QLCNIC_HW_PX_MAP_CRB_PGR3, + QLCNIC_HW_PX_MAP_CRB_RPMX1, + QLCNIC_HW_PX_MAP_CRB_RPMX2, + QLCNIC_HW_PX_MAP_CRB_RPMX3, + QLCNIC_HW_PX_MAP_CRB_RPMX4, + QLCNIC_HW_PX_MAP_CRB_RPMX5, + QLCNIC_HW_PX_MAP_CRB_RPMX6, + QLCNIC_HW_PX_MAP_CRB_RPMX7, + QLCNIC_HW_PX_MAP_CRB_XDMA, + QLCNIC_HW_PX_MAP_CRB_I2Q, + QLCNIC_HW_PX_MAP_CRB_ROMUSB, + QLCNIC_HW_PX_MAP_CRB_CAS3, + QLCNIC_HW_PX_MAP_CRB_RPMX0, + QLCNIC_HW_PX_MAP_CRB_RPMX8, + QLCNIC_HW_PX_MAP_CRB_RPMX9, + QLCNIC_HW_PX_MAP_CRB_OCM0, + QLCNIC_HW_PX_MAP_CRB_OCM1, + QLCNIC_HW_PX_MAP_CRB_SMB, + QLCNIC_HW_PX_MAP_CRB_I2C0, + QLCNIC_HW_PX_MAP_CRB_I2C1, + QLCNIC_HW_PX_MAP_CRB_LPC, + QLCNIC_HW_PX_MAP_CRB_PGNC, + QLCNIC_HW_PX_MAP_CRB_PGR0 +}; + +#define BIT_0 0x1 +#define BIT_1 0x2 +#define BIT_2 0x4 +#define BIT_3 0x8 +#define BIT_4 0x10 +#define BIT_5 0x20 +#define BIT_6 0x40 +#define BIT_7 0x80 +#define BIT_8 0x100 +#define BIT_9 0x200 +#define BIT_10 0x400 +#define BIT_11 0x800 +#define BIT_12 0x1000 +#define BIT_13 0x2000 +#define BIT_14 0x4000 +#define BIT_15 0x8000 +#define BIT_16 0x10000 +#define BIT_17 0x20000 +#define BIT_18 0x40000 +#define BIT_19 0x80000 +#define BIT_20 0x100000 +#define BIT_21 0x200000 +#define BIT_22 0x400000 +#define BIT_23 0x800000 +#define BIT_24 0x1000000 +#define BIT_25 0x2000000 +#define BIT_26 0x4000000 +#define BIT_27 0x8000000 +#define BIT_28 0x10000000 +#define BIT_29 0x20000000 +#define BIT_30 0x40000000 +#define BIT_31 0x80000000 + +/* This field defines CRB adr [31:20] of the agents */ + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \ + ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MN_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PH \ + ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_PH_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_MS \ + ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MS_CRB_AGT_ADR) + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PS \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_PS_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SS \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SS_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX3_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMS \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_QMS_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS0 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS1 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS2 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS3 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS3_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C0 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C1 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX4_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX7_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9 \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX9_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SMB \ + ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SMB_CRB_AGT_ADR) + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_NIU \ + ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_NIU_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0 \ + ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1 \ + ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C1_CRB_AGT_ADR) + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SRE \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SRE_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_EG \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_EG_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMN \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_QM_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG3_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX5_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX6_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX8_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS0 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS1 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS2 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS3 \ + ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS3_CRB_AGT_ADR) + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNI_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGND \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGND_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN3_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN4_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNC_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR0 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR1 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR2 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR3 \ + ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR3_CRB_AGT_ADR) + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSI_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSD \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSD_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0 \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1 \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2 \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS2_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3 \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS3_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSC \ + ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSC_CRB_AGT_ADR) + +#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAM \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_NCM_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_TMR_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_XDMA_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_SN \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_SN_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_I2Q_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_ROMUSB_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0 \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM0_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM1 \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM1_CRB_AGT_ADR) +#define QLCNIC_HW_CRB_HUB_AGT_ADR_LPC \ + ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_LPC_CRB_AGT_ADR) + +#define QLCNIC_SRE_MISC (QLCNIC_CRB_SRE + 0x0002c) + +#define QLCNIC_I2Q_CLR_PCI_HI (QLCNIC_CRB_I2Q + 0x00034) + +#define ROMUSB_GLB (QLCNIC_CRB_ROMUSB + 0x00000) +#define ROMUSB_ROM (QLCNIC_CRB_ROMUSB + 0x10000) + +#define QLCNIC_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004) +#define QLCNIC_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008) +#define QLCNIC_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c) +#define QLCNIC_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038) +#define QLCNIC_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044) +#define QLCNIC_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c) +#define QLCNIC_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8) + +#define QLCNIC_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n))) + +#define QLCNIC_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004) +#define QLCNIC_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008) +#define QLCNIC_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c) +#define QLCNIC_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010) +#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014) +#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018) + +/****************************************************************************** +* +* Definitions specific to M25P flash +* +******************************************************************************* +*/ + +/* all are 1MB windows */ + +#define QLCNIC_PCI_CRB_WINDOWSIZE 0x00100000 +#define QLCNIC_PCI_CRB_WINDOW(A) \ + (QLCNIC_PCI_CRBSPACE + (A)*QLCNIC_PCI_CRB_WINDOWSIZE) + +#define QLCNIC_CRB_NIU QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_NIU) +#define QLCNIC_CRB_SRE QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE) +#define QLCNIC_CRB_ROMUSB \ + QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB) +#define QLCNIC_CRB_EPG QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_EG) +#define QLCNIC_CRB_I2Q QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q) +#define QLCNIC_CRB_TIMER QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_TIMR) +#define QLCNIC_CRB_I2C0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0) +#define QLCNIC_CRB_SMB QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB) +#define QLCNIC_CRB_MAX QLCNIC_PCI_CRB_WINDOW(64) + +#define QLCNIC_CRB_PCIX_HOST QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH) +#define QLCNIC_CRB_PCIX_HOST2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH2) +#define QLCNIC_CRB_PEG_NET_0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN0) +#define QLCNIC_CRB_PEG_NET_1 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN1) +#define QLCNIC_CRB_PEG_NET_2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN2) +#define QLCNIC_CRB_PEG_NET_3 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN3) +#define QLCNIC_CRB_PEG_NET_4 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SQS2) +#define QLCNIC_CRB_PEG_NET_D QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGND) +#define QLCNIC_CRB_PEG_NET_I QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGNI) +#define QLCNIC_CRB_DDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_MN) +#define QLCNIC_CRB_QDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SN) + +#define QLCNIC_CRB_PCIX_MD QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PS) +#define QLCNIC_CRB_PCIE QLCNIC_CRB_PCIX_MD + +#define ISR_INT_VECTOR (QLCNIC_PCIX_PS_REG(PCIX_INT_VECTOR)) +#define ISR_INT_MASK (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK)) +#define ISR_INT_MASK_SLOW (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK)) +#define ISR_INT_TARGET_STATUS (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS)) +#define ISR_INT_TARGET_MASK (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK)) +#define ISR_INT_TARGET_STATUS_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F1)) +#define ISR_INT_TARGET_MASK_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F1)) +#define ISR_INT_TARGET_STATUS_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F2)) +#define ISR_INT_TARGET_MASK_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F2)) +#define ISR_INT_TARGET_STATUS_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F3)) +#define ISR_INT_TARGET_MASK_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F3)) +#define ISR_INT_TARGET_STATUS_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F4)) +#define ISR_INT_TARGET_MASK_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F4)) +#define ISR_INT_TARGET_STATUS_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F5)) +#define ISR_INT_TARGET_MASK_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F5)) +#define ISR_INT_TARGET_STATUS_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F6)) +#define ISR_INT_TARGET_MASK_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F6)) +#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7)) +#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7)) + +#define QLCNIC_PCI_OCM0_2M (0x000c0000UL) +#define QLCNIC_PCI_CRBSPACE (0x06000000UL) +#define QLCNIC_PCI_CAMQM (0x04800000UL) +#define QLCNIC_PCI_CAMQM_END (0x04800800UL) +#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL) + +#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM) + +#define QLCNIC_ADDR_DDR_NET (0x0000000000000000ULL) +#define QLCNIC_ADDR_DDR_NET_MAX (0x000000000fffffffULL) +#define QLCNIC_ADDR_OCM0 (0x0000000200000000ULL) +#define QLCNIC_ADDR_OCM0_MAX (0x00000002000fffffULL) +#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL) +#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL) +#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL) +#define QLCNIC_ADDR_QDR_NET_MAX (0x0000000307ffffffULL) + +/* + * Register offsets for MN + */ +#define QLCNIC_MIU_CONTROL (0x000) +#define QLCNIC_MIU_MN_CONTROL (QLCNIC_CRB_DDR_NET+QLCNIC_MIU_CONTROL) + +/* 200ms delay in each loop */ +#define QLCNIC_NIU_PHY_WAITLEN 200000 +/* 10 seconds before we give up */ +#define QLCNIC_NIU_PHY_WAITMAX 50 +#define QLCNIC_NIU_MAX_GBE_PORTS 4 +#define QLCNIC_NIU_MAX_XG_PORTS 2 + +#define QLCNIC_NIU_MODE (QLCNIC_CRB_NIU + 0x00000) +#define QLCNIC_NIU_GB_PAUSE_CTL (QLCNIC_CRB_NIU + 0x0030c) +#define QLCNIC_NIU_XG_PAUSE_CTL (QLCNIC_CRB_NIU + 0x00098) + +#define QLCNIC_NIU_GB_MAC_CONFIG_0(I) \ + (QLCNIC_CRB_NIU + 0x30000 + (I)*0x10000) +#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \ + (QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000) + +#define MAX_CTL_CHECK 1000 +#define TEST_AGT_CTRL (0x00) + +#define TA_CTL_START BIT_0 +#define TA_CTL_ENABLE BIT_1 +#define TA_CTL_WRITE BIT_2 +#define TA_CTL_BUSY BIT_3 + +/* XG Link status */ +#define XG_LINK_UP 0x10 +#define XG_LINK_DOWN 0x20 + +#define XG_LINK_UP_P3P 0x01 +#define XG_LINK_DOWN_P3P 0x02 +#define XG_LINK_STATE_P3P_MASK 0xf +#define XG_LINK_STATE_P3P(pcifn, val) \ + (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3P_MASK) + +#define P3P_LINK_SPEED_MHZ 100 +#define P3P_LINK_SPEED_MASK 0xff +#define P3P_LINK_SPEED_REG(pcifn) \ + (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4)) +#define P3P_LINK_SPEED_VAL(pcifn, reg) \ + (((reg) >> (8 * ((pcifn) & 0x3))) & P3P_LINK_SPEED_MASK) + +#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000) +#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg)) +#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100)) +#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120)) +#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124)) + +#define NIC_CRB_BASE (QLCNIC_CAM_RAM(0x200)) +#define NIC_CRB_BASE_2 (QLCNIC_CAM_RAM(0x700)) +#define QLCNIC_REG(X) (NIC_CRB_BASE+(X)) +#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X)) + +#define QLCNIC_CDRP_MAX_ARGS 4 +#define QLCNIC_CDRP_ARG(i) (QLCNIC_REG(0x18 + ((i) * 4))) + +#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18)) +#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28)) + +#define CRB_XG_STATE_P3P (QLCNIC_REG(0x98)) +#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8)) +#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0)) + +#define CRB_FW_CAPABILITIES_2 (QLCNIC_CAM_RAM(0x12c)) + +/* + * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address + * which can be read by the Phantom host to get producer/consumer indexes from + * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following + * registers will be used for the addresses of the ring's shared memory + * on the Phantom. + */ + +#define qlcnic_get_temp_val(x) ((x) >> 16) +#define qlcnic_get_temp_state(x) ((x) & 0xffff) +#define qlcnic_encode_temp(val, state) (((val) << 16) | (state)) + +/* + * Temperature control. + */ +enum { + QLCNIC_TEMP_NORMAL = 0x1, /* Normal operating range */ + QLCNIC_TEMP_WARN, /* Sound alert, temperature getting high */ + QLCNIC_TEMP_PANIC /* Fatal error, hardware has shut down. */ +}; + + +/* Lock IDs for PHY lock */ +#define PHY_LOCK_DRIVER 0x44524956 + +#define PCIX_INT_VECTOR (0x10100) +#define PCIX_INT_MASK (0x10104) + +#define PCIX_OCM_WINDOW (0x10800) +#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x4 * (func)) + +#define PCIX_TARGET_STATUS (0x10118) +#define PCIX_TARGET_STATUS_F1 (0x10160) +#define PCIX_TARGET_STATUS_F2 (0x10164) +#define PCIX_TARGET_STATUS_F3 (0x10168) +#define PCIX_TARGET_STATUS_F4 (0x10360) +#define PCIX_TARGET_STATUS_F5 (0x10364) +#define PCIX_TARGET_STATUS_F6 (0x10368) +#define PCIX_TARGET_STATUS_F7 (0x1036c) + +#define PCIX_TARGET_MASK (0x10128) +#define PCIX_TARGET_MASK_F1 (0x10170) +#define PCIX_TARGET_MASK_F2 (0x10174) +#define PCIX_TARGET_MASK_F3 (0x10178) +#define PCIX_TARGET_MASK_F4 (0x10370) +#define PCIX_TARGET_MASK_F5 (0x10374) +#define PCIX_TARGET_MASK_F6 (0x10378) +#define PCIX_TARGET_MASK_F7 (0x1037c) + +#define PCIX_MSI_F(i) (0x13000+((i)*4)) + +#define QLCNIC_PCIX_PH_REG(reg) (QLCNIC_CRB_PCIE + (reg)) +#define QLCNIC_PCIX_PS_REG(reg) (QLCNIC_CRB_PCIX_MD + (reg)) +#define QLCNIC_PCIE_REG(reg) (QLCNIC_CRB_PCIE + (reg)) + +#define PCIE_SEM0_LOCK (0x1c000) +#define PCIE_SEM0_UNLOCK (0x1c004) +#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N)) +#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N)) + +#define PCIE_SETUP_FUNCTION (0x12040) +#define PCIE_SETUP_FUNCTION2 (0x12048) +#define PCIE_MISCCFG_RC (0x1206c) +#define PCIE_TGT_SPLIT_CHICKEN (0x12080) +#define PCIE_CHICKEN3 (0x120c8) + +#define ISR_INT_STATE_REG (QLCNIC_PCIX_PS_REG(PCIE_MISCCFG_RC)) +#define PCIE_MAX_MASTER_SPLIT (0x14048) + +#define QLCNIC_PORT_MODE_NONE 0 +#define QLCNIC_PORT_MODE_XG 1 +#define QLCNIC_PORT_MODE_GB 2 +#define QLCNIC_PORT_MODE_802_3_AP 3 +#define QLCNIC_PORT_MODE_AUTO_NEG 4 +#define QLCNIC_PORT_MODE_AUTO_NEG_1G 5 +#define QLCNIC_PORT_MODE_AUTO_NEG_XG 6 +#define QLCNIC_PORT_MODE_ADDR (QLCNIC_CAM_RAM(0x24)) +#define QLCNIC_WOL_PORT_MODE (QLCNIC_CAM_RAM(0x198)) + +#define QLCNIC_WOL_CONFIG_NV (QLCNIC_CAM_RAM(0x184)) +#define QLCNIC_WOL_CONFIG (QLCNIC_CAM_RAM(0x188)) + +#define QLCNIC_PEG_TUNE_MN_PRESENT 0x1 +#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c)) + +#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14)) +#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c) +#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860) + +/* Device State */ +#define QLCNIC_DEV_COLD 0x1 +#define QLCNIC_DEV_INITIALIZING 0x2 +#define QLCNIC_DEV_READY 0x3 +#define QLCNIC_DEV_NEED_RESET 0x4 +#define QLCNIC_DEV_NEED_QUISCENT 0x5 +#define QLCNIC_DEV_FAILED 0x6 +#define QLCNIC_DEV_QUISCENT 0x7 + +#define QLCNIC_DEV_BADBAD 0xbad0bad0 + +#define QLCNIC_DEV_NPAR_NON_OPER 0 /* NON Operational */ +#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */ +#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */ + +#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4))) +#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4))) +#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4))) +#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4))) +#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4))) + +#define QLC_DEV_GET_DRV(VAL, FN) (0xf & ((VAL) >> (FN * 4))) +#define QLC_DEV_SET_DRV(VAL, FN) ((VAL) << (FN * 4)) + +#define QLCNIC_TYPE_NIC 1 +#define QLCNIC_TYPE_FCOE 2 +#define QLCNIC_TYPE_ISCSI 3 + +#define QLCNIC_RCODE_DRIVER_INFO 0x20000000 +#define QLCNIC_RCODE_DRIVER_CAN_RELOAD BIT_30 +#define QLCNIC_RCODE_FATAL_ERROR BIT_31 +#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff) +#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0x1fffff) +#define QLCNIC_FWERROR_FAN_FAILURE 0x16 + +#define FW_POLL_DELAY (1 * HZ) +#define FW_FAIL_THRESH 2 + +#define QLCNIC_RESET_TIMEOUT_SECS 10 +#define QLCNIC_INIT_TIMEOUT_SECS 30 +#define QLCNIC_RCVPEG_CHECK_RETRY_COUNT 2000 +#define QLCNIC_RCVPEG_CHECK_DELAY 10 +#define QLCNIC_CMDPEG_CHECK_RETRY_COUNT 60 +#define QLCNIC_CMDPEG_CHECK_DELAY 500 +#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200 +#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 10 + +#define QLCNIC_MAX_MC_COUNT 38 +#define QLCNIC_MAX_UC_COUNT 512 +#define QLCNIC_WATCHDOG_TIMEOUTVALUE 5 + +#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC))) +#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) + +/* + * PCI Interrupt Vector Values. + */ +#define PCIX_INT_VECTOR_BIT_F0 0x0080 +#define PCIX_INT_VECTOR_BIT_F1 0x0100 +#define PCIX_INT_VECTOR_BIT_F2 0x0200 +#define PCIX_INT_VECTOR_BIT_F3 0x0400 +#define PCIX_INT_VECTOR_BIT_F4 0x0800 +#define PCIX_INT_VECTOR_BIT_F5 0x1000 +#define PCIX_INT_VECTOR_BIT_F6 0x2000 +#define PCIX_INT_VECTOR_BIT_F7 0x4000 + +struct qlcnic_legacy_intr_set { + u32 int_vec_bit; + u32 tgt_status_reg; + u32 tgt_mask_reg; + u32 pci_int_reg; +}; + +#define QLCNIC_MSIX_BASE 0x132110 +#define QLCNIC_MAX_VLAN_FILTERS 64 + +#define FLASH_ROM_WINDOW 0x42110030 +#define FLASH_ROM_DATA 0x42150000 + +#define QLCNIC_FW_DUMP_REG1 0x00130060 +#define QLCNIC_FW_DUMP_REG2 0x001e0000 +#define QLCNIC_FLASH_SEM2_LK 0x0013C010 +#define QLCNIC_FLASH_SEM2_ULK 0x0013C014 +#define QLCNIC_FLASH_LOCK_ID 0x001B2100 + +/* PCI function operational mode */ +enum { + QLCNIC_MGMT_FUNC = 0, + QLCNIC_PRIV_FUNC = 1, + QLCNIC_NON_PRIV_FUNC = 2, + QLCNIC_SRIOV_PF_FUNC = 3, + QLCNIC_SRIOV_VF_FUNC = 4, + QLCNIC_UNKNOWN_FUNC_MODE = 5 +}; + +enum { + QLCNIC_PORT_DEFAULTS = 0, + QLCNIC_ADD_VLAN = 1, + QLCNIC_DEL_VLAN = 2 +}; + +#define QLC_DEV_DRV_DEFAULT 0x11111111 + +#define LSB(x) ((uint8_t)(x)) +#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8)) + +#define LSW(x) ((uint16_t)((uint32_t)(x))) +#define MSW(x) ((uint16_t)((uint32_t)(x) >> 16)) + +#define LSD(x) ((uint32_t)((uint64_t)(x))) +#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16)) + +#define QLCNIC_MS_CTRL 0x41000090 +#define QLCNIC_MS_ADDR_LO 0x41000094 +#define QLCNIC_MS_ADDR_HI 0x41000098 +#define QLCNIC_MS_WRTDATA_LO 0x410000A0 +#define QLCNIC_MS_WRTDATA_HI 0x410000A4 +#define QLCNIC_MS_WRTDATA_ULO 0x410000B0 +#define QLCNIC_MS_WRTDATA_UHI 0x410000B4 +#define QLCNIC_MS_RDDATA_LO 0x410000A8 +#define QLCNIC_MS_RDDATA_HI 0x410000AC +#define QLCNIC_MS_RDDATA_ULO 0x410000B8 +#define QLCNIC_MS_RDDATA_UHI 0x410000BC + +#define QLCNIC_TA_WRITE_ENABLE (TA_CTL_ENABLE | TA_CTL_WRITE) +#define QLCNIC_TA_WRITE_START (TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE) +#define QLCNIC_TA_START_ENABLE (TA_CTL_START | TA_CTL_ENABLE) + +#define QLCNIC_LEGACY_INTR_CONFIG \ +{ \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK, }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, }, \ + \ + { \ + .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \ + .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \ + .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, }, \ +} + +/* NIU REGS */ + +#define _qlcnic_crb_get_bit(var, bit) ((var >> bit) & 0x1) + +/* + * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3) + * + * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable + * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream + * Bit 2 : enable_rx => 1:enable frame recv, 0:disable + * Bit 3 : rx_synced => R/O: recv enable synched to recv stream + * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable + * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore + * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal + * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op + * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op + * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op + * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op + * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op + */ +#define qlcnic_gb_rx_flowctl(config_word) \ + ((config_word) |= 1 << 5) +#define qlcnic_gb_get_rx_flowctl(config_word) \ + _qlcnic_crb_get_bit((config_word), 5) +#define qlcnic_gb_unset_rx_flowctl(config_word) \ + ((config_word) &= ~(1 << 5)) + +/* + * NIU GB Pause Ctl Register + */ + +#define qlcnic_gb_set_gb0_mask(config_word) \ + ((config_word) |= 1 << 0) +#define qlcnic_gb_set_gb1_mask(config_word) \ + ((config_word) |= 1 << 2) +#define qlcnic_gb_set_gb2_mask(config_word) \ + ((config_word) |= 1 << 4) +#define qlcnic_gb_set_gb3_mask(config_word) \ + ((config_word) |= 1 << 6) + +#define qlcnic_gb_get_gb0_mask(config_word) \ + _qlcnic_crb_get_bit((config_word), 0) +#define qlcnic_gb_get_gb1_mask(config_word) \ + _qlcnic_crb_get_bit((config_word), 2) +#define qlcnic_gb_get_gb2_mask(config_word) \ + _qlcnic_crb_get_bit((config_word), 4) +#define qlcnic_gb_get_gb3_mask(config_word) \ + _qlcnic_crb_get_bit((config_word), 6) + +#define qlcnic_gb_unset_gb0_mask(config_word) \ + ((config_word) &= ~(1 << 0)) +#define qlcnic_gb_unset_gb1_mask(config_word) \ + ((config_word) &= ~(1 << 2)) +#define qlcnic_gb_unset_gb2_mask(config_word) \ + ((config_word) &= ~(1 << 4)) +#define qlcnic_gb_unset_gb3_mask(config_word) \ + ((config_word) &= ~(1 << 6)) + +/* + * NIU XG Pause Ctl Register + * + * Bit 0 : xg0_mask => 1:disable tx pause frames + * Bit 1 : xg0_request => 1:request single pause frame + * Bit 2 : xg0_on_off => 1:request is pause on, 0:off + * Bit 3 : xg1_mask => 1:disable tx pause frames + * Bit 4 : xg1_request => 1:request single pause frame + * Bit 5 : xg1_on_off => 1:request is pause on, 0:off + */ + +#define qlcnic_xg_set_xg0_mask(config_word) \ + ((config_word) |= 1 << 0) +#define qlcnic_xg_set_xg1_mask(config_word) \ + ((config_word) |= 1 << 3) + +#define qlcnic_xg_get_xg0_mask(config_word) \ + _qlcnic_crb_get_bit((config_word), 0) +#define qlcnic_xg_get_xg1_mask(config_word) \ + _qlcnic_crb_get_bit((config_word), 3) + +#define qlcnic_xg_unset_xg0_mask(config_word) \ + ((config_word) &= ~(1 << 0)) +#define qlcnic_xg_unset_xg1_mask(config_word) \ + ((config_word) &= ~(1 << 3)) + +/* + * NIU XG Pause Ctl Register + * + * Bit 0 : xg0_mask => 1:disable tx pause frames + * Bit 1 : xg0_request => 1:request single pause frame + * Bit 2 : xg0_on_off => 1:request is pause on, 0:off + * Bit 3 : xg1_mask => 1:disable tx pause frames + * Bit 4 : xg1_request => 1:request single pause frame + * Bit 5 : xg1_on_off => 1:request is pause on, 0:off + */ + +/* + * PHY-Specific MII control/status registers. + */ +#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG 4 +#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17 + +/* + * PHY-Specific Status Register (reg 17). + * + * Bit 0 : jabber => 1:jabber detected, 0:not + * Bit 1 : polarity => 1:polarity reversed, 0:normal + * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled + * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled + * Bit 4 : energydetect => 1:sleep, 0:active + * Bit 5 : downshift => 1:downshift, 0:no downshift + * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover) + * Bits 7-9 : cablelen => not valid in 10Mb/s mode + * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m + * Bit 10 : link => 1:link up, 0:link down + * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet + * Bit 12 : pagercvd => 1:page received, 0:page not received + * Bit 13 : duplex => 1:full duplex, 0:half duplex + * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd + */ + +#define qlcnic_get_phy_speed(config_word) (((config_word) >> 14) & 0x03) + +#define qlcnic_set_phy_speed(config_word, val) \ + ((config_word) |= ((val & 0x03) << 14)) +#define qlcnic_set_phy_duplex(config_word) \ + ((config_word) |= 1 << 13) +#define qlcnic_clear_phy_duplex(config_word) \ + ((config_word) &= ~(1 << 13)) + +#define qlcnic_get_phy_link(config_word) \ + _qlcnic_crb_get_bit(config_word, 10) +#define qlcnic_get_phy_duplex(config_word) \ + _qlcnic_crb_get_bit(config_word, 13) + +#define QLCNIC_NIU_NON_PROMISC_MODE 0 +#define QLCNIC_NIU_PROMISC_MODE 1 +#define QLCNIC_NIU_ALLMULTI_MODE 2 + +#define QLCNIC_PCIE_SEM_TIMEOUT 10000 + +struct crb_128M_2M_sub_block_map { + unsigned valid; + unsigned start_128M; + unsigned end_128M; + unsigned start_2M; +}; + +struct crb_128M_2M_block_map{ + struct crb_128M_2M_sub_block_map sub_block[16]; +}; +#endif /* __QLCNIC_HDR_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c new file mode 100644 index 000000000..75ee9e4ce --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -0,0 +1,1710 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include "qlcnic.h" +#include "qlcnic_hdr.h" + +#include +#include +#include + +#define MASK(n) ((1ULL<<(n))-1) +#define OCM_WIN_P3P(addr) (addr & 0xffc0000) + +#define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) + +#define CRB_BLK(off) ((off >> 20) & 0x3f) +#define CRB_SUBBLK(off) ((off >> 16) & 0xf) +#define CRB_WINDOW_2M (0x130060) +#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000)) +#define CRB_INDIRECT_2M (0x1e0000UL) + +struct qlcnic_ms_reg_ctrl { + u32 ocm_window; + u32 control; + u32 hi; + u32 low; + u32 rd[4]; + u32 wd[4]; + u64 off; +}; + +#ifndef readq +static inline u64 readq(void __iomem *addr) +{ + return readl(addr) | (((u64) readl(addr + 4)) << 32LL); +} +#endif + +#ifndef writeq +static inline void writeq(u64 val, void __iomem *addr) +{ + writel(((u32) (val)), (addr)); + writel(((u32) (val >> 32)), (addr + 4)); +} +#endif + +static struct crb_128M_2M_block_map +crb_128M_2M_map[64] __cacheline_aligned_in_smp = { + {{{0, 0, 0, 0} } }, /* 0: PCI */ + {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ + {1, 0x0110000, 0x0120000, 0x130000}, + {1, 0x0120000, 0x0122000, 0x124000}, + {1, 0x0130000, 0x0132000, 0x126000}, + {1, 0x0140000, 0x0142000, 0x128000}, + {1, 0x0150000, 0x0152000, 0x12a000}, + {1, 0x0160000, 0x0170000, 0x110000}, + {1, 0x0170000, 0x0172000, 0x12e000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x01e0000, 0x01e0800, 0x122000}, + {0, 0x0000000, 0x0000000, 0x000000} } }, + {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */ + {{{0, 0, 0, 0} } }, /* 3: */ + {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */ + {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */ + {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */ + {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */ + {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x08f0000, 0x08f2000, 0x172000} } }, + {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x09f0000, 0x09f2000, 0x176000} } }, + {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x0af0000, 0x0af2000, 0x17a000} } }, + {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/ + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, + {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */ + {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */ + {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */ + {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */ + {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */ + {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */ + {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */ + {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */ + {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */ + {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */ + {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */ + {{{0, 0, 0, 0} } }, /* 23: */ + {{{0, 0, 0, 0} } }, /* 24: */ + {{{0, 0, 0, 0} } }, /* 25: */ + {{{0, 0, 0, 0} } }, /* 26: */ + {{{0, 0, 0, 0} } }, /* 27: */ + {{{0, 0, 0, 0} } }, /* 28: */ + {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */ + {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */ + {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */ + {{{0} } }, /* 32: PCI */ + {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */ + {1, 0x2110000, 0x2120000, 0x130000}, + {1, 0x2120000, 0x2122000, 0x124000}, + {1, 0x2130000, 0x2132000, 0x126000}, + {1, 0x2140000, 0x2142000, 0x128000}, + {1, 0x2150000, 0x2152000, 0x12a000}, + {1, 0x2160000, 0x2170000, 0x110000}, + {1, 0x2170000, 0x2172000, 0x12e000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000}, + {0, 0x0000000, 0x0000000, 0x000000} } }, + {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */ + {{{0} } }, /* 35: */ + {{{0} } }, /* 36: */ + {{{0} } }, /* 37: */ + {{{0} } }, /* 38: */ + {{{0} } }, /* 39: */ + {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */ + {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */ + {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */ + {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */ + {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */ + {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */ + {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */ + {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */ + {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */ + {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */ + {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */ + {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */ + {{{0} } }, /* 52: */ + {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */ + {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */ + {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */ + {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */ + {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */ + {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */ + {{{0} } }, /* 59: I2C0 */ + {{{0} } }, /* 60: I2C1 */ + {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */ + {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */ + {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */ +}; + +/* + * top 12 bits of crb internal address (hub, agent) + */ +static const unsigned crb_hub_agt[64] = { + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_PS, + QLCNIC_HW_CRB_HUB_AGT_ADR_MN, + QLCNIC_HW_CRB_HUB_AGT_ADR_MS, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_SRE, + QLCNIC_HW_CRB_HUB_AGT_ADR_NIU, + QLCNIC_HW_CRB_HUB_AGT_ADR_QMN, + QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0, + QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1, + QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2, + QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3, + QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q, + QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR, + QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4, + QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGND, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI, + QLCNIC_HW_CRB_HUB_AGT_ADR_SN, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_EG, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_PS, + QLCNIC_HW_CRB_HUB_AGT_ADR_CAM, + 0, + 0, + 0, + 0, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7, + QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA, + QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q, + QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8, + QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9, + QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_SMB, + QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0, + QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1, + 0, + QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC, + 0, +}; + +static const u32 msi_tgt_status[8] = { + ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, + ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, + ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, + ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 +}; + +/* PCI Windowing for DDR regions. */ + +#define QLCNIC_PCIE_SEM_TIMEOUT 10000 + +static void qlcnic_read_window_reg(u32 addr, void __iomem *bar0, u32 *data) +{ + u32 dest; + void __iomem *val; + + dest = addr & 0xFFFF0000; + val = bar0 + QLCNIC_FW_DUMP_REG1; + writel(dest, val); + readl(val); + val = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr); + *data = readl(val); +} + +static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data) +{ + u32 dest; + void __iomem *val; + + dest = addr & 0xFFFF0000; + val = bar0 + QLCNIC_FW_DUMP_REG1; + writel(dest, val); + readl(val); + val = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr); + writel(data, val); + readl(val); +} + +int +qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) +{ + int timeout = 0, err = 0, done = 0; + + while (!done) { + done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)), + &err); + if (done == 1) + break; + if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) { + if (id_reg) { + done = QLCRD32(adapter, id_reg, &err); + if (done != -1) + dev_err(&adapter->pdev->dev, + "Failed to acquire sem=%d lock held by=%d\n", + sem, done); + else + dev_err(&adapter->pdev->dev, + "Failed to acquire sem=%d lock", + sem); + } else { + dev_err(&adapter->pdev->dev, + "Failed to acquire sem=%d lock", sem); + } + return -EIO; + } + usleep_range(1000, 1500); + } + + if (id_reg) + QLCWR32(adapter, id_reg, adapter->portnum); + + return 0; +} + +void +qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem) +{ + int err = 0; + + QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)), &err); +} + +int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr) +{ + int err = 0; + u32 data; + + if (qlcnic_82xx_check(adapter)) + qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data); + else { + data = QLCRD32(adapter, addr, &err); + if (err == -EIO) + return err; + } + return data; +} + +int qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data) +{ + int ret = 0; + + if (qlcnic_82xx_check(adapter)) + qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data); + else + ret = qlcnic_83xx_wrt_reg_indirect(adapter, addr, data); + + return ret; +} + +static int +qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter, + struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) +{ + u32 i, producer; + struct qlcnic_cmd_buffer *pbuf; + struct cmd_desc_type0 *cmd_desc; + struct qlcnic_host_tx_ring *tx_ring; + + i = 0; + + if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) + return -EIO; + + tx_ring = &adapter->tx_ring[0]; + __netif_tx_lock_bh(tx_ring->txq); + + producer = tx_ring->producer; + + if (nr_desc >= qlcnic_tx_avail(tx_ring)) { + netif_tx_stop_queue(tx_ring->txq); + smp_mb(); + if (qlcnic_tx_avail(tx_ring) > nr_desc) { + if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) + netif_tx_wake_queue(tx_ring->txq); + } else { + adapter->stats.xmit_off++; + __netif_tx_unlock_bh(tx_ring->txq); + return -EBUSY; + } + } + + do { + cmd_desc = &cmd_desc_arr[i]; + + pbuf = &tx_ring->cmd_buf_arr[producer]; + pbuf->skb = NULL; + pbuf->frag_count = 0; + + memcpy(&tx_ring->desc_head[producer], + cmd_desc, sizeof(struct cmd_desc_type0)); + + producer = get_next_index(producer, tx_ring->num_desc); + i++; + + } while (i != nr_desc); + + tx_ring->producer = producer; + + qlcnic_update_cmd_producer(tx_ring); + + __netif_tx_unlock_bh(tx_ring->txq); + + return 0; +} + +int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, + u16 vlan_id, u8 op) +{ + struct qlcnic_nic_req req; + struct qlcnic_mac_req *mac_req; + struct qlcnic_vlan_req *vlan_req; + u64 word; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23); + + word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + mac_req = (struct qlcnic_mac_req *)&req.words[0]; + mac_req->op = op; + memcpy(mac_req->mac_addr, addr, ETH_ALEN); + + vlan_req = (struct qlcnic_vlan_req *)&req.words[1]; + vlan_req->vlan_id = cpu_to_le16(vlan_id); + + return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); +} + +int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr) +{ + struct qlcnic_mac_vlan_list *cur; + struct list_head *head; + int err = -EINVAL; + + /* Delete MAC from the existing list */ + list_for_each(head, &adapter->mac_list) { + cur = list_entry(head, struct qlcnic_mac_vlan_list, list); + if (ether_addr_equal(addr, cur->mac_addr)) { + err = qlcnic_sre_macaddr_change(adapter, cur->mac_addr, + 0, QLCNIC_MAC_DEL); + if (err) + return err; + list_del(&cur->list); + kfree(cur); + return err; + } + } + return err; +} + +int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan, + enum qlcnic_mac_type mac_type) +{ + struct qlcnic_mac_vlan_list *cur; + struct list_head *head; + + /* look up if already exists */ + list_for_each(head, &adapter->mac_list) { + cur = list_entry(head, struct qlcnic_mac_vlan_list, list); + if (ether_addr_equal(addr, cur->mac_addr) && + cur->vlan_id == vlan) + return 0; + } + + cur = kzalloc(sizeof(*cur), GFP_ATOMIC); + if (cur == NULL) + return -ENOMEM; + + memcpy(cur->mac_addr, addr, ETH_ALEN); + + if (qlcnic_sre_macaddr_change(adapter, + cur->mac_addr, vlan, QLCNIC_MAC_ADD)) { + kfree(cur); + return -EIO; + } + + cur->vlan_id = vlan; + cur->mac_type = mac_type; + + list_add_tail(&cur->list, &adapter->mac_list); + return 0; +} + +void qlcnic_flush_mcast_mac(struct qlcnic_adapter *adapter) +{ + struct qlcnic_mac_vlan_list *cur; + struct list_head *head, *tmp; + + list_for_each_safe(head, tmp, &adapter->mac_list) { + cur = list_entry(head, struct qlcnic_mac_vlan_list, list); + if (cur->mac_type != QLCNIC_MULTICAST_MAC) + continue; + + qlcnic_sre_macaddr_change(adapter, cur->mac_addr, + cur->vlan_id, QLCNIC_MAC_DEL); + list_del(&cur->list); + kfree(cur); + } +} + +static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct netdev_hw_addr *ha; + static const u8 bcast_addr[ETH_ALEN] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + }; + u32 mode = VPORT_MISS_MODE_DROP; + + if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) + return; + + qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan, + QLCNIC_UNICAST_MAC); + qlcnic_nic_add_mac(adapter, bcast_addr, vlan, QLCNIC_BROADCAST_MAC); + + if (netdev->flags & IFF_PROMISC) { + if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) + mode = VPORT_MISS_MODE_ACCEPT_ALL; + } else if ((netdev->flags & IFF_ALLMULTI) || + (netdev_mc_count(netdev) > ahw->max_mc_count)) { + mode = VPORT_MISS_MODE_ACCEPT_MULTI; + } else if (!netdev_mc_empty(netdev)) { + qlcnic_flush_mcast_mac(adapter); + netdev_for_each_mc_addr(ha, netdev) + qlcnic_nic_add_mac(adapter, ha->addr, vlan, + QLCNIC_MULTICAST_MAC); + } + + /* configure unicast MAC address, if there is not sufficient space + * to store all the unicast addresses then enable promiscuous mode + */ + if (netdev_uc_count(netdev) > ahw->max_uc_count) { + mode = VPORT_MISS_MODE_ACCEPT_ALL; + } else if (!netdev_uc_empty(netdev)) { + netdev_for_each_uc_addr(ha, netdev) + qlcnic_nic_add_mac(adapter, ha->addr, vlan, + QLCNIC_UNICAST_MAC); + } + + if (mode == VPORT_MISS_MODE_ACCEPT_ALL && + !adapter->fdb_mac_learn) { + qlcnic_alloc_lb_filters_mem(adapter); + adapter->drv_mac_learn = 1; + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) + adapter->rx_mac_learn = true; + } else { + adapter->drv_mac_learn = 0; + adapter->rx_mac_learn = false; + } + + qlcnic_nic_set_promisc(adapter, mode); +} + +void qlcnic_set_multi(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) + return; + + if (qlcnic_sriov_vf_check(adapter)) + qlcnic_sriov_vf_set_multi(netdev); + else + __qlcnic_set_multi(netdev, 0); +} + +int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) +{ + struct qlcnic_nic_req req; + u64 word; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE | + ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(mode); + + return qlcnic_send_cmd_descs(adapter, + (struct cmd_desc_type0 *)&req, 1); +} + +void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter) +{ + struct list_head *head = &adapter->mac_list; + struct qlcnic_mac_vlan_list *cur; + + while (!list_empty(head)) { + cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list); + qlcnic_sre_macaddr_change(adapter, + cur->mac_addr, 0, QLCNIC_MAC_DEL); + list_del(&cur->list); + kfree(cur); + } +} + +void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter) +{ + struct qlcnic_filter *tmp_fil; + struct hlist_node *n; + struct hlist_head *head; + int i; + unsigned long expires; + u8 cmd; + + for (i = 0; i < adapter->fhash.fbucket_size; i++) { + head = &(adapter->fhash.fhead[i]); + hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { + cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : + QLCNIC_MAC_DEL; + expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ; + if (time_before(expires, jiffies)) { + qlcnic_sre_macaddr_change(adapter, + tmp_fil->faddr, + tmp_fil->vlan_id, + cmd); + spin_lock_bh(&adapter->mac_learn_lock); + adapter->fhash.fnum--; + hlist_del(&tmp_fil->fnode); + spin_unlock_bh(&adapter->mac_learn_lock); + kfree(tmp_fil); + } + } + } + for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) { + head = &(adapter->rx_fhash.fhead[i]); + + hlist_for_each_entry_safe(tmp_fil, n, head, fnode) + { + expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ; + if (time_before(expires, jiffies)) { + spin_lock_bh(&adapter->rx_mac_learn_lock); + adapter->rx_fhash.fnum--; + hlist_del(&tmp_fil->fnode); + spin_unlock_bh(&adapter->rx_mac_learn_lock); + kfree(tmp_fil); + } + } + } +} + +void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter) +{ + struct qlcnic_filter *tmp_fil; + struct hlist_node *n; + struct hlist_head *head; + int i; + u8 cmd; + + for (i = 0; i < adapter->fhash.fbucket_size; i++) { + head = &(adapter->fhash.fhead[i]); + hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { + cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL : + QLCNIC_MAC_DEL; + qlcnic_sre_macaddr_change(adapter, + tmp_fil->faddr, + tmp_fil->vlan_id, + cmd); + spin_lock_bh(&adapter->mac_learn_lock); + adapter->fhash.fnum--; + hlist_del(&tmp_fil->fnode); + spin_unlock_bh(&adapter->mac_learn_lock); + kfree(tmp_fil); + } + } +} + +static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag) +{ + struct qlcnic_nic_req req; + int rv; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + req.req_hdr = cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK | + ((u64) adapter->portnum << 16) | ((u64) 0x1 << 32)); + + req.words[0] = cpu_to_le64(flag); + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->pdev->dev, "%sting loopback mode failed\n", + flag ? "Set" : "Reset"); + return rv; +} + +int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode) +{ + if (qlcnic_set_fw_loopback(adapter, mode)) + return -EIO; + + if (qlcnic_nic_set_promisc(adapter, + VPORT_MISS_MODE_ACCEPT_ALL)) { + qlcnic_set_fw_loopback(adapter, 0); + return -EIO; + } + + msleep(1000); + return 0; +} + +int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode) +{ + struct net_device *netdev = adapter->netdev; + + mode = VPORT_MISS_MODE_DROP; + qlcnic_set_fw_loopback(adapter, 0); + + if (netdev->flags & IFF_PROMISC) + mode = VPORT_MISS_MODE_ACCEPT_ALL; + else if (netdev->flags & IFF_ALLMULTI) + mode = VPORT_MISS_MODE_ACCEPT_MULTI; + + qlcnic_nic_set_promisc(adapter, mode); + msleep(1000); + return 0; +} + +int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *adapter) +{ + u8 mac[ETH_ALEN]; + int ret; + + ret = qlcnic_get_mac_address(adapter, mac, + adapter->ahw->physical_port); + if (ret) + return ret; + + memcpy(adapter->ahw->phys_port_id, mac, ETH_ALEN); + adapter->flags |= QLCNIC_HAS_PHYS_PORT_ID; + + return 0; +} + +int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *adapter) +{ + struct qlcnic_nic_req req; + int rv; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE | + ((u64) adapter->portnum << 16)); + + req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32); + req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets | + ((u64) adapter->ahw->coal.rx_time_us) << 16); + req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out | + ((u64) adapter->ahw->coal.type) << 32 | + ((u64) adapter->ahw->coal.sts_ring_mask) << 40); + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, + "Could not send interrupt coalescing parameters\n"); + + return rv; +} + +/* Send the interrupt coalescing parameter set by ethtool to the card. */ +int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter, + struct ethtool_coalesce *ethcoal) +{ + struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal; + int rv; + + coal->flag = QLCNIC_INTR_DEFAULT; + coal->rx_time_us = ethcoal->rx_coalesce_usecs; + coal->rx_packets = ethcoal->rx_max_coalesced_frames; + + rv = qlcnic_82xx_set_rx_coalesce(adapter); + + if (rv) + netdev_err(adapter->netdev, + "Failed to set Rx coalescing parameters\n"); + + return rv; +} + +#define QLCNIC_ENABLE_IPV4_LRO BIT_0 +#define QLCNIC_ENABLE_IPV6_LRO (BIT_1 | BIT_9) + +int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable) +{ + struct qlcnic_nic_req req; + u64 word; + int rv; + + if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) + return 0; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + word = 0; + if (enable) { + word = QLCNIC_ENABLE_IPV4_LRO; + if (adapter->ahw->extra_capability[0] & + QLCNIC_FW_CAP2_HW_LRO_IPV6) + word |= QLCNIC_ENABLE_IPV6_LRO; + } + + req.words[0] = cpu_to_le64(word); + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, + "Could not send configure hw lro request\n"); + + return rv; +} + +int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) +{ + struct qlcnic_nic_req req; + u64 word; + int rv; + + if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable) + return 0; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING | + ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(enable); + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, + "Could not send configure bridge mode request\n"); + + adapter->flags ^= QLCNIC_BRIDGE_ENABLED; + + return rv; +} + + +#define QLCNIC_RSS_HASHTYPE_IP_TCP 0x3 +#define QLCNIC_ENABLE_TYPE_C_RSS BIT_10 +#define QLCNIC_RSS_FEATURE_FLAG (1ULL << 63) +#define QLCNIC_RSS_IND_TABLE_MASK 0x7ULL + +int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int enable) +{ + struct qlcnic_nic_req req; + u64 word; + int i, rv; + + static const u64 key[] = { + 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, + 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, + 0x255b0ec26d5a56daULL + }; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + /* + * RSS request: + * bits 3-0: hash_method + * 5-4: hash_type_ipv4 + * 7-6: hash_type_ipv6 + * 8: enable + * 9: use indirection table + * 10: type-c rss + * 11: udp rss + * 47-12: reserved + * 62-48: indirection table mask + * 63: feature flag + */ + word = ((u64)(QLCNIC_RSS_HASHTYPE_IP_TCP & 0x3) << 4) | + ((u64)(QLCNIC_RSS_HASHTYPE_IP_TCP & 0x3) << 6) | + ((u64)(enable & 0x1) << 8) | + ((u64)QLCNIC_RSS_IND_TABLE_MASK << 48) | + (u64)QLCNIC_ENABLE_TYPE_C_RSS | + (u64)QLCNIC_RSS_FEATURE_FLAG; + + req.words[0] = cpu_to_le64(word); + for (i = 0; i < 5; i++) + req.words[i+1] = cpu_to_le64(key[i]); + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, "could not configure RSS\n"); + + return rv; +} + +void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter, + __be32 ip, int cmd) +{ + struct qlcnic_nic_req req; + struct qlcnic_ipaddr *ipa; + u64 word; + int rv; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(cmd); + ipa = (struct qlcnic_ipaddr *)&req.words[1]; + ipa->ipv4 = ip; + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, + "could not notify %s IP 0x%x request\n", + (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip); +} + +int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int enable) +{ + struct qlcnic_nic_req req; + u64 word; + int rv; + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + req.words[0] = cpu_to_le64(enable | (enable << 8)); + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, + "could not configure link notification\n"); + + return rv; +} + +static int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter) +{ + struct qlcnic_nic_req req; + u64 word; + int rv; + + if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) + return 0; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_LRO_REQUEST | + ((u64)adapter->portnum << 16) | + ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ; + + req.req_hdr = cpu_to_le64(word); + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv != 0) + dev_err(&adapter->netdev->dev, + "could not cleanup lro flows\n"); + + return rv; +} + +/* + * qlcnic_change_mtu - Change the Maximum Transfer Unit + * @returns 0 on success, negative on failure + */ + +int qlcnic_change_mtu(struct net_device *netdev, int mtu) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int rc = 0; + + if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) { + dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes" + " not supported\n", P3P_MAX_MTU, P3P_MIN_MTU); + return -EINVAL; + } + + rc = qlcnic_fw_cmd_set_mtu(adapter, mtu); + + if (!rc) + netdev->mtu = mtu; + + return rc; +} + +static netdev_features_t qlcnic_process_flags(struct qlcnic_adapter *adapter, + netdev_features_t features) +{ + u32 offload_flags = adapter->offload_flags; + + if (offload_flags & BIT_0) { + features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM; + adapter->rx_csum = 1; + if (QLCNIC_IS_TSO_CAPABLE(adapter)) { + if (!(offload_flags & BIT_1)) + features &= ~NETIF_F_TSO; + else + features |= NETIF_F_TSO; + + if (!(offload_flags & BIT_2)) + features &= ~NETIF_F_TSO6; + else + features |= NETIF_F_TSO6; + } + } else { + features &= ~(NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM); + + if (QLCNIC_IS_TSO_CAPABLE(adapter)) + features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + adapter->rx_csum = 0; + } + + return features; +} + +netdev_features_t qlcnic_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed; + + if (qlcnic_82xx_check(adapter) && + (adapter->flags & QLCNIC_ESWITCH_ENABLED)) { + if (adapter->flags & QLCNIC_APP_CHANGED_FLAGS) { + features = qlcnic_process_flags(adapter, features); + } else { + changed = features ^ netdev->features; + features ^= changed & (NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6); + } + } + + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + return features; +} + + +int qlcnic_set_features(struct net_device *netdev, netdev_features_t features) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = netdev->features ^ features; + int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0; + + if (!(changed & NETIF_F_LRO)) + return 0; + + netdev->features ^= NETIF_F_LRO; + + if (qlcnic_config_hw_lro(adapter, hw_lro)) + return -EIO; + + if (!hw_lro && qlcnic_82xx_check(adapter)) { + if (qlcnic_send_lro_cleanup(adapter)) + return -EIO; + } + + return 0; +} + +/* + * Changes the CRB window to the specified window. + */ + /* Returns < 0 if off is not valid, + * 1 if window access is needed. 'off' is set to offset from + * CRB space in 128M pci map + * 0 if no window access is needed. 'off' is set to 2M addr + * In: 'off' is offset from base in 128M pci map + */ +static int qlcnic_pci_get_crb_addr_2M(struct qlcnic_hardware_context *ahw, + ulong off, void __iomem **addr) +{ + const struct crb_128M_2M_sub_block_map *m; + + if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE)) + return -EINVAL; + + off -= QLCNIC_PCI_CRBSPACE; + + /* + * Try direct map + */ + m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)]; + + if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) { + *addr = ahw->pci_base0 + m->start_2M + + (off - m->start_128M); + return 0; + } + + /* + * Not in direct map, use crb window + */ + *addr = ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16)); + return 1; +} + +/* + * In: 'off' is offset from CRB space in 128M pci map + * Out: 'off' is 2M pci map addr + * side effect: lock crb window + */ +static int +qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off) +{ + u32 window; + void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M; + + off -= QLCNIC_PCI_CRBSPACE; + + window = CRB_HI(off); + if (window == 0) { + dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off); + return -EIO; + } + + writel(window, addr); + if (readl(addr) != window) { + if (printk_ratelimit()) + dev_warn(&adapter->pdev->dev, + "failed to set CRB window to %d off 0x%lx\n", + window, off); + return -EIO; + } + return 0; +} + +int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, + u32 data) +{ + unsigned long flags; + int rv; + void __iomem *addr = NULL; + + rv = qlcnic_pci_get_crb_addr_2M(adapter->ahw, off, &addr); + + if (rv == 0) { + writel(data, addr); + return 0; + } + + if (rv > 0) { + /* indirect access */ + write_lock_irqsave(&adapter->ahw->crb_lock, flags); + crb_win_lock(adapter); + rv = qlcnic_pci_set_crbwindow_2M(adapter, off); + if (!rv) + writel(data, addr); + crb_win_unlock(adapter); + write_unlock_irqrestore(&adapter->ahw->crb_lock, flags); + return rv; + } + + dev_err(&adapter->pdev->dev, + "%s: invalid offset: 0x%016lx\n", __func__, off); + dump_stack(); + return -EIO; +} + +int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off, + int *err) +{ + unsigned long flags; + int rv; + u32 data = -1; + void __iomem *addr = NULL; + + rv = qlcnic_pci_get_crb_addr_2M(adapter->ahw, off, &addr); + + if (rv == 0) + return readl(addr); + + if (rv > 0) { + /* indirect access */ + write_lock_irqsave(&adapter->ahw->crb_lock, flags); + crb_win_lock(adapter); + if (!qlcnic_pci_set_crbwindow_2M(adapter, off)) + data = readl(addr); + crb_win_unlock(adapter); + write_unlock_irqrestore(&adapter->ahw->crb_lock, flags); + return data; + } + + dev_err(&adapter->pdev->dev, + "%s: invalid offset: 0x%016lx\n", __func__, off); + dump_stack(); + return -1; +} + +void __iomem *qlcnic_get_ioaddr(struct qlcnic_hardware_context *ahw, + u32 offset) +{ + void __iomem *addr = NULL; + + WARN_ON(qlcnic_pci_get_crb_addr_2M(ahw, offset, &addr)); + + return addr; +} + +static int qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, + u32 window, u64 off, u64 *data, int op) +{ + void __iomem *addr; + u32 start; + + mutex_lock(&adapter->ahw->mem_lock); + + writel(window, adapter->ahw->ocm_win_crb); + /* read back to flush */ + readl(adapter->ahw->ocm_win_crb); + start = QLCNIC_PCI_OCM0_2M + off; + + addr = adapter->ahw->pci_base0 + start; + + if (op == 0) /* read */ + *data = readq(addr); + else /* write */ + writeq(*data, addr); + + /* Set window to 0 */ + writel(0, adapter->ahw->ocm_win_crb); + readl(adapter->ahw->ocm_win_crb); + + mutex_unlock(&adapter->ahw->mem_lock); + return 0; +} + +static void +qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data) +{ + void __iomem *addr = adapter->ahw->pci_base0 + + QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM); + + mutex_lock(&adapter->ahw->mem_lock); + *data = readq(addr); + mutex_unlock(&adapter->ahw->mem_lock); +} + +static void +qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data) +{ + void __iomem *addr = adapter->ahw->pci_base0 + + QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM); + + mutex_lock(&adapter->ahw->mem_lock); + writeq(data, addr); + mutex_unlock(&adapter->ahw->mem_lock); +} + + + +/* Set MS memory control data for different adapters */ +static void qlcnic_set_ms_controls(struct qlcnic_adapter *adapter, u64 off, + struct qlcnic_ms_reg_ctrl *ms) +{ + ms->control = QLCNIC_MS_CTRL; + ms->low = QLCNIC_MS_ADDR_LO; + ms->hi = QLCNIC_MS_ADDR_HI; + if (off & 0xf) { + ms->wd[0] = QLCNIC_MS_WRTDATA_LO; + ms->rd[0] = QLCNIC_MS_RDDATA_LO; + ms->wd[1] = QLCNIC_MS_WRTDATA_HI; + ms->rd[1] = QLCNIC_MS_RDDATA_HI; + ms->wd[2] = QLCNIC_MS_WRTDATA_ULO; + ms->wd[3] = QLCNIC_MS_WRTDATA_UHI; + ms->rd[2] = QLCNIC_MS_RDDATA_ULO; + ms->rd[3] = QLCNIC_MS_RDDATA_UHI; + } else { + ms->wd[0] = QLCNIC_MS_WRTDATA_ULO; + ms->rd[0] = QLCNIC_MS_RDDATA_ULO; + ms->wd[1] = QLCNIC_MS_WRTDATA_UHI; + ms->rd[1] = QLCNIC_MS_RDDATA_UHI; + ms->wd[2] = QLCNIC_MS_WRTDATA_LO; + ms->wd[3] = QLCNIC_MS_WRTDATA_HI; + ms->rd[2] = QLCNIC_MS_RDDATA_LO; + ms->rd[3] = QLCNIC_MS_RDDATA_HI; + } + + ms->ocm_window = OCM_WIN_P3P(off); + ms->off = GET_MEM_OFFS_2M(off); +} + +int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data) +{ + int j, ret = 0; + u32 temp, off8; + struct qlcnic_ms_reg_ctrl ms; + + /* Only 64-bit aligned access */ + if (off & 7) + return -EIO; + + memset(&ms, 0, sizeof(struct qlcnic_ms_reg_ctrl)); + if (!(ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, + QLCNIC_ADDR_QDR_NET_MAX) || + ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, + QLCNIC_ADDR_DDR_NET_MAX))) + return -EIO; + + qlcnic_set_ms_controls(adapter, off, &ms); + + if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) + return qlcnic_pci_mem_access_direct(adapter, ms.ocm_window, + ms.off, &data, 1); + + off8 = off & ~0xf; + + mutex_lock(&adapter->ahw->mem_lock); + + qlcnic_ind_wr(adapter, ms.low, off8); + qlcnic_ind_wr(adapter, ms.hi, 0); + + qlcnic_ind_wr(adapter, ms.control, TA_CTL_ENABLE); + qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_START_ENABLE); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = qlcnic_ind_rd(adapter, ms.control); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + ret = -EIO; + goto done; + } + + /* This is the modify part of read-modify-write */ + qlcnic_ind_wr(adapter, ms.wd[0], qlcnic_ind_rd(adapter, ms.rd[0])); + qlcnic_ind_wr(adapter, ms.wd[1], qlcnic_ind_rd(adapter, ms.rd[1])); + /* This is the write part of read-modify-write */ + qlcnic_ind_wr(adapter, ms.wd[2], data & 0xffffffff); + qlcnic_ind_wr(adapter, ms.wd[3], (data >> 32) & 0xffffffff); + + qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_WRITE_ENABLE); + qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_WRITE_START); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = qlcnic_ind_rd(adapter, ms.control); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&adapter->pdev->dev, + "failed to write through agent\n"); + ret = -EIO; + } else + ret = 0; + +done: + mutex_unlock(&adapter->ahw->mem_lock); + + return ret; +} + +int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data) +{ + int j, ret; + u32 temp, off8; + u64 val; + struct qlcnic_ms_reg_ctrl ms; + + /* Only 64-bit aligned access */ + if (off & 7) + return -EIO; + if (!(ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, + QLCNIC_ADDR_QDR_NET_MAX) || + ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, + QLCNIC_ADDR_DDR_NET_MAX))) + return -EIO; + + memset(&ms, 0, sizeof(struct qlcnic_ms_reg_ctrl)); + qlcnic_set_ms_controls(adapter, off, &ms); + + if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) + return qlcnic_pci_mem_access_direct(adapter, ms.ocm_window, + ms.off, data, 0); + + mutex_lock(&adapter->ahw->mem_lock); + + off8 = off & ~0xf; + + qlcnic_ind_wr(adapter, ms.low, off8); + qlcnic_ind_wr(adapter, ms.hi, 0); + + qlcnic_ind_wr(adapter, ms.control, TA_CTL_ENABLE); + qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_START_ENABLE); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = qlcnic_ind_rd(adapter, ms.control); + if ((temp & TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&adapter->pdev->dev, + "failed to read through agent\n"); + ret = -EIO; + } else { + + temp = qlcnic_ind_rd(adapter, ms.rd[3]); + val = (u64)temp << 32; + val |= qlcnic_ind_rd(adapter, ms.rd[2]); + *data = val; + ret = 0; + } + + mutex_unlock(&adapter->ahw->mem_lock); + + return ret; +} + +int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter) +{ + int offset, board_type, magic, err = 0; + struct pci_dev *pdev = adapter->pdev; + + offset = QLCNIC_FW_MAGIC_OFFSET; + if (qlcnic_rom_fast_read(adapter, offset, &magic)) + return -EIO; + + if (magic != QLCNIC_BDINFO_MAGIC) { + dev_err(&pdev->dev, "invalid board config, magic=%08x\n", + magic); + return -EIO; + } + + offset = QLCNIC_BRDTYPE_OFFSET; + if (qlcnic_rom_fast_read(adapter, offset, &board_type)) + return -EIO; + + adapter->ahw->board_type = board_type; + + if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) { + u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I, &err); + if (err == -EIO) + return err; + if ((gpio & 0x8000) == 0) + board_type = QLCNIC_BRDTYPE_P3P_10G_TP; + } + + switch (board_type) { + case QLCNIC_BRDTYPE_P3P_HMEZ: + case QLCNIC_BRDTYPE_P3P_XG_LOM: + case QLCNIC_BRDTYPE_P3P_10G_CX4: + case QLCNIC_BRDTYPE_P3P_10G_CX4_LP: + case QLCNIC_BRDTYPE_P3P_IMEZ: + case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS: + case QLCNIC_BRDTYPE_P3P_10G_SFP_CT: + case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: + case QLCNIC_BRDTYPE_P3P_10G_XFP: + case QLCNIC_BRDTYPE_P3P_10000_BASE_T: + adapter->ahw->port_type = QLCNIC_XGBE; + break; + case QLCNIC_BRDTYPE_P3P_REF_QG: + case QLCNIC_BRDTYPE_P3P_4_GB: + case QLCNIC_BRDTYPE_P3P_4_GB_MM: + adapter->ahw->port_type = QLCNIC_GBE; + break; + case QLCNIC_BRDTYPE_P3P_10G_TP: + adapter->ahw->port_type = (adapter->portnum < 2) ? + QLCNIC_XGBE : QLCNIC_GBE; + break; + default: + dev_err(&pdev->dev, "unknown board type %x\n", board_type); + adapter->ahw->port_type = QLCNIC_XGBE; + break; + } + + return 0; +} + +static int +qlcnic_wol_supported(struct qlcnic_adapter *adapter) +{ + u32 wol_cfg; + int err = 0; + + wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err); + if (wol_cfg & (1UL << adapter->portnum)) { + wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err); + if (err == -EIO) + return err; + if (wol_cfg & (1 << adapter->portnum)) + return 1; + } + + return 0; +} + +int qlcnic_82xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) +{ + struct qlcnic_nic_req req; + int rv; + u64 word; + + memset(&req, 0, sizeof(struct qlcnic_nic_req)); + req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); + + word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16); + req.req_hdr = cpu_to_le64(word); + + req.words[0] = cpu_to_le64(((u64)rate << 32) | adapter->portnum); + req.words[1] = cpu_to_le64(state); + + rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); + if (rv) + dev_err(&adapter->pdev->dev, "LED configuration failed.\n"); + + return rv; +} + +void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_cmd_args cmd; + u8 beacon_state; + int err = 0; + + if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_GET_LED_STATUS); + if (!err) { + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + netdev_err(adapter->netdev, + "Failed to get current beacon state, err=%d\n", + err); + } else { + beacon_state = cmd.rsp.arg[1]; + if (beacon_state == QLCNIC_BEACON_DISABLE) + ahw->beacon_state = QLCNIC_BEACON_OFF; + else if (beacon_state == QLCNIC_BEACON_EANBLE) + ahw->beacon_state = QLCNIC_BEACON_ON; + } + } + qlcnic_free_mbx_args(&cmd); + } + + return; +} + +void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter) +{ + void __iomem *msix_base_addr; + u32 func; + u32 msix_base; + + pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func); + msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE; + msix_base = readl(msix_base_addr); + func = (func - msix_base) / QLCNIC_MSIX_TBL_PGSIZE; + adapter->ahw->pci_func = func; +} + +void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf, + loff_t offset, size_t size) +{ + int err = 0; + u32 data; + u64 qmdata; + + if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) { + qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata); + memcpy(buf, &qmdata, size); + } else { + data = QLCRD32(adapter, offset, &err); + memcpy(buf, &data, size); + } +} + +void qlcnic_82xx_write_crb(struct qlcnic_adapter *adapter, char *buf, + loff_t offset, size_t size) +{ + u32 data; + u64 qmdata; + + if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) { + memcpy(&qmdata, buf, size); + qlcnic_pci_camqm_write_2M(adapter, offset, qmdata); + } else { + memcpy(&data, buf, size); + QLCWR32(adapter, offset, data); + } +} + +int qlcnic_82xx_api_lock(struct qlcnic_adapter *adapter) +{ + return qlcnic_pcie_sem_lock(adapter, 5, 0); +} + +void qlcnic_82xx_api_unlock(struct qlcnic_adapter *adapter) +{ + qlcnic_pcie_sem_unlock(adapter, 5); +} + +int qlcnic_82xx_shutdown(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + int retval; + + netif_device_detach(netdev); + + qlcnic_cancel_idc_work(adapter); + + if (netif_running(netdev)) + qlcnic_down(adapter, netdev); + + qlcnic_clr_all_drv_state(adapter, 0); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + if (qlcnic_wol_supported(adapter)) { + pci_enable_wake(pdev, PCI_D3cold, 1); + pci_enable_wake(pdev, PCI_D3hot, 1); + } + + return 0; +} + +int qlcnic_82xx_resume(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + err = qlcnic_start_firmware(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "failed to start firmware\n"); + return err; + } + + if (netif_running(netdev)) { + err = qlcnic_up(adapter, netdev); + if (!err) + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } + + netif_device_attach(netdev); + qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); + return err; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h new file mode 100644 index 000000000..cbe2399c3 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -0,0 +1,225 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#ifndef __QLCNIC_HW_H +#define __QLCNIC_HW_H + +/* Common registers in 83xx and 82xx */ +enum qlcnic_regs { + QLCNIC_PEG_HALT_STATUS1 = 0, + QLCNIC_PEG_HALT_STATUS2, + QLCNIC_PEG_ALIVE_COUNTER, + QLCNIC_FLASH_LOCK_OWNER, + QLCNIC_FW_CAPABILITIES, + QLCNIC_CRB_DRV_ACTIVE, + QLCNIC_CRB_DEV_STATE, + QLCNIC_CRB_DRV_STATE, + QLCNIC_CRB_DRV_SCRATCH, + QLCNIC_CRB_DEV_PARTITION_INFO, + QLCNIC_CRB_DRV_IDC_VER, + QLCNIC_FW_VERSION_MAJOR, + QLCNIC_FW_VERSION_MINOR, + QLCNIC_FW_VERSION_SUB, + QLCNIC_CRB_DEV_NPAR_STATE, + QLCNIC_FW_IMG_VALID, + QLCNIC_CMDPEG_STATE, + QLCNIC_RCVPEG_STATE, + QLCNIC_ASIC_TEMP, + QLCNIC_FW_API, + QLCNIC_DRV_OP_MODE, + QLCNIC_FLASH_LOCK, + QLCNIC_FLASH_UNLOCK, +}; + +/* Read from an address offset from BAR0, existing registers */ +#define QLC_SHARED_REG_RD32(a, addr) \ + readl(((a)->ahw->pci_base0) + ((a)->ahw->reg_tbl[addr])) + +/* Write to an address offset from BAR0, existing registers */ +#define QLC_SHARED_REG_WR32(a, addr, value) \ + writel(value, ((a)->ahw->pci_base0) + ((a)->ahw->reg_tbl[addr])) + +/* Read from a direct address offset from BAR0, additional registers */ +#define QLCRDX(ahw, addr) \ + readl(((ahw)->pci_base0) + ((ahw)->ext_reg_tbl[addr])) + +/* Write to a direct address offset from BAR0, additional registers */ +#define QLCWRX(ahw, addr, value) \ + writel(value, (((ahw)->pci_base0) + ((ahw)->ext_reg_tbl[addr]))) + +#define QLCNIC_CMD_CONFIGURE_IP_ADDR 0x1 +#define QLCNIC_CMD_CONFIG_INTRPT 0x2 +#define QLCNIC_CMD_CREATE_RX_CTX 0x7 +#define QLCNIC_CMD_DESTROY_RX_CTX 0x8 +#define QLCNIC_CMD_CREATE_TX_CTX 0x9 +#define QLCNIC_CMD_DESTROY_TX_CTX 0xa +#define QLCNIC_CMD_CONFIGURE_LRO 0xC +#define QLCNIC_CMD_CONFIGURE_MAC_LEARNING 0xD +#define QLCNIC_CMD_GET_STATISTICS 0xF +#define QLCNIC_CMD_INTRPT_TEST 0x11 +#define QLCNIC_CMD_SET_MTU 0x12 +#define QLCNIC_CMD_READ_PHY 0x13 +#define QLCNIC_CMD_WRITE_PHY 0x14 +#define QLCNIC_CMD_READ_HW_REG 0x15 +#define QLCNIC_CMD_GET_FLOW_CTL 0x16 +#define QLCNIC_CMD_SET_FLOW_CTL 0x17 +#define QLCNIC_CMD_READ_MAX_MTU 0x18 +#define QLCNIC_CMD_READ_MAX_LRO 0x19 +#define QLCNIC_CMD_MAC_ADDRESS 0x1f +#define QLCNIC_CMD_GET_PCI_INFO 0x20 +#define QLCNIC_CMD_GET_NIC_INFO 0x21 +#define QLCNIC_CMD_SET_NIC_INFO 0x22 +#define QLCNIC_CMD_GET_ESWITCH_CAPABILITY 0x24 +#define QLCNIC_CMD_TOGGLE_ESWITCH 0x25 +#define QLCNIC_CMD_GET_ESWITCH_STATUS 0x26 +#define QLCNIC_CMD_SET_PORTMIRRORING 0x27 +#define QLCNIC_CMD_CONFIGURE_ESWITCH 0x28 +#define QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG 0x29 +#define QLCNIC_CMD_GET_ESWITCH_STATS 0x2a +#define QLCNIC_CMD_CONFIG_PORT 0x2e +#define QLCNIC_CMD_TEMP_SIZE 0x2f +#define QLCNIC_CMD_GET_TEMP_HDR 0x30 +#define QLCNIC_CMD_BC_EVENT_SETUP 0x31 +#define QLCNIC_CMD_CONFIG_VPORT 0x32 +#define QLCNIC_CMD_DCB_QUERY_CAP 0x34 +#define QLCNIC_CMD_DCB_QUERY_PARAM 0x35 +#define QLCNIC_CMD_GET_MAC_STATS 0x37 +#define QLCNIC_CMD_82XX_SET_DRV_VER 0x38 +#define QLCNIC_CMD_MQ_TX_CONFIG_INTR 0x39 +#define QLCNIC_CMD_GET_LED_STATUS 0x3C +#define QLCNIC_CMD_CONFIGURE_RSS 0x41 +#define QLCNIC_CMD_CONFIG_INTR_COAL 0x43 +#define QLCNIC_CMD_CONFIGURE_LED 0x44 +#define QLCNIC_CMD_CONFIG_MAC_VLAN 0x45 +#define QLCNIC_CMD_GET_LINK_EVENT 0x48 +#define QLCNIC_CMD_CONFIGURE_MAC_RX_MODE 0x49 +#define QLCNIC_CMD_CONFIGURE_HW_LRO 0x4A +#define QLCNIC_CMD_SET_INGRESS_ENCAP 0x4E +#define QLCNIC_CMD_INIT_NIC_FUNC 0x60 +#define QLCNIC_CMD_STOP_NIC_FUNC 0x61 +#define QLCNIC_CMD_IDC_ACK 0x63 +#define QLCNIC_CMD_SET_PORT_CONFIG 0x66 +#define QLCNIC_CMD_GET_PORT_CONFIG 0x67 +#define QLCNIC_CMD_GET_LINK_STATUS 0x68 +#define QLCNIC_CMD_SET_LED_CONFIG 0x69 +#define QLCNIC_CMD_GET_LED_CONFIG 0x6A +#define QLCNIC_CMD_83XX_SET_DRV_VER 0x6F +#define QLCNIC_CMD_ADD_RCV_RINGS 0x0B + +#define QLCNIC_INTRPT_INTX 1 +#define QLCNIC_INTRPT_MSIX 3 +#define QLCNIC_INTRPT_ADD 1 +#define QLCNIC_INTRPT_DEL 2 + +#define QLCNIC_GET_CURRENT_MAC 1 +#define QLCNIC_SET_STATION_MAC 2 +#define QLCNIC_GET_DEFAULT_MAC 3 +#define QLCNIC_GET_FAC_DEF_MAC 4 +#define QLCNIC_SET_FAC_DEF_MAC 5 + +#define QLCNIC_MBX_LINK_EVENT 0x8001 +#define QLCNIC_MBX_BC_EVENT 0x8002 +#define QLCNIC_MBX_COMP_EVENT 0x8100 +#define QLCNIC_MBX_REQUEST_EVENT 0x8101 +#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102 +#define QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT 0x8110 +#define QLCNIC_MBX_SFP_INSERT_EVENT 0x8130 +#define QLCNIC_MBX_SFP_REMOVE_EVENT 0x8131 + +struct qlcnic_mailbox_metadata { + u32 cmd; + u32 in_args; + u32 out_args; +}; + +/* Mailbox ownership */ +#define QLCNIC_GET_OWNER(val) ((val) & (BIT_0 | BIT_1)) + +#define QLCNIC_SET_OWNER 1 +#define QLCNIC_CLR_OWNER 0 +#define QLCNIC_MBX_TIMEOUT 5000 + +#define QLCNIC_MBX_RSP_OK 1 +#define QLCNIC_MBX_PORT_RSP_OK 0x1a +#define QLCNIC_MBX_ASYNC_EVENT BIT_15 + +/* Set HW Tx ring limit for 82xx adapter. */ +#define QLCNIC_MAX_HW_TX_RINGS 8 +#define QLCNIC_MAX_HW_VNIC_TX_RINGS 4 +#define QLCNIC_MAX_TX_RINGS 8 +#define QLCNIC_MAX_SDS_RINGS 8 + +struct qlcnic_pci_info; +struct qlcnic_info; +struct qlcnic_cmd_args; +struct ethtool_stats; +struct pci_device_id; +struct qlcnic_host_sds_ring; +struct qlcnic_host_tx_ring; +struct qlcnic_hardware_context; +struct qlcnic_adapter; +struct qlcnic_fw_dump; + +int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *); +int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32); +int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int); +int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32); +int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, + struct net_device *netdev); +void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *); +void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, + u64 *uaddr, u16 vlan_id); +int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *, + struct ethtool_coalesce *); +int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *); +int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int); +void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter, + __be32, int); +int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int); +void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring); +int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8); +int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8); +void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t); +void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t); +int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *); +int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *, int); +int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *, u8); +int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *); +int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *, + struct qlcnic_host_tx_ring *tx_ring, int); +void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *); +void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *, + struct qlcnic_host_tx_ring *); +int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8); +int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*, u8); +int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); +int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); +int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*); +int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *, + struct qlcnic_adapter *, u32); +int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32); +int qlcnic_82xx_get_board_info(struct qlcnic_adapter *); +int qlcnic_82xx_config_led(struct qlcnic_adapter *, u32, u32); +void qlcnic_82xx_get_func_no(struct qlcnic_adapter *); +int qlcnic_82xx_api_lock(struct qlcnic_adapter *); +void qlcnic_82xx_api_unlock(struct qlcnic_adapter *); +void qlcnic_82xx_napi_enable(struct qlcnic_adapter *); +void qlcnic_82xx_napi_disable(struct qlcnic_adapter *); +void qlcnic_82xx_napi_del(struct qlcnic_adapter *); +int qlcnic_82xx_shutdown(struct pci_dev *); +int qlcnic_82xx_resume(struct qlcnic_adapter *); +void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed); +void qlcnic_fw_poll_work(struct work_struct *work); + +u32 qlcnic_82xx_get_saved_state(void *, u32); +void qlcnic_82xx_set_saved_state(void *, u32, u32); +void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *); +u32 qlcnic_82xx_get_cap_size(void *, int); +void qlcnic_82xx_set_sys_info(void *, int, u32); +void qlcnic_82xx_store_cap_mask(void *, u32); +#endif /* __QLCNIC_HW_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c new file mode 100644 index 000000000..4ff3021b9 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c @@ -0,0 +1,1310 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include "qlcnic.h" +#include "qlcnic_hw.h" + +struct crb_addr_pair { + u32 addr; + u32 data; +}; + +#define QLCNIC_MAX_CRB_XFORM 60 +static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM]; + +#define crb_addr_transform(name) \ + (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \ + QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20) + +#define QLCNIC_ADDR_ERROR (0xffffffff) + +static int +qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter); + +static void crb_addr_transform_setup(void) +{ + crb_addr_transform(XDMA); + crb_addr_transform(TIMR); + crb_addr_transform(SRE); + crb_addr_transform(SQN3); + crb_addr_transform(SQN2); + crb_addr_transform(SQN1); + crb_addr_transform(SQN0); + crb_addr_transform(SQS3); + crb_addr_transform(SQS2); + crb_addr_transform(SQS1); + crb_addr_transform(SQS0); + crb_addr_transform(RPMX7); + crb_addr_transform(RPMX6); + crb_addr_transform(RPMX5); + crb_addr_transform(RPMX4); + crb_addr_transform(RPMX3); + crb_addr_transform(RPMX2); + crb_addr_transform(RPMX1); + crb_addr_transform(RPMX0); + crb_addr_transform(ROMUSB); + crb_addr_transform(SN); + crb_addr_transform(QMN); + crb_addr_transform(QMS); + crb_addr_transform(PGNI); + crb_addr_transform(PGND); + crb_addr_transform(PGN3); + crb_addr_transform(PGN2); + crb_addr_transform(PGN1); + crb_addr_transform(PGN0); + crb_addr_transform(PGSI); + crb_addr_transform(PGSD); + crb_addr_transform(PGS3); + crb_addr_transform(PGS2); + crb_addr_transform(PGS1); + crb_addr_transform(PGS0); + crb_addr_transform(PS); + crb_addr_transform(PH); + crb_addr_transform(NIU); + crb_addr_transform(I2Q); + crb_addr_transform(EG); + crb_addr_transform(MN); + crb_addr_transform(MS); + crb_addr_transform(CAS2); + crb_addr_transform(CAS1); + crb_addr_transform(CAS0); + crb_addr_transform(CAM); + crb_addr_transform(C2C1); + crb_addr_transform(C2C0); + crb_addr_transform(SMB); + crb_addr_transform(OCM0); + crb_addr_transform(I2C0); +} + +void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_rx_buffer *rx_buf; + int i, ring; + + recv_ctx = adapter->recv_ctx; + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + for (i = 0; i < rds_ring->num_desc; ++i) { + rx_buf = &(rds_ring->rx_buf_arr[i]); + if (rx_buf->skb == NULL) + continue; + + pci_unmap_single(adapter->pdev, + rx_buf->dma, + rds_ring->dma_size, + PCI_DMA_FROMDEVICE); + + dev_kfree_skb_any(rx_buf->skb); + } + } +} + +void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_rx_buffer *rx_buf; + int i, ring; + + recv_ctx = adapter->recv_ctx; + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + + INIT_LIST_HEAD(&rds_ring->free_list); + + rx_buf = rds_ring->rx_buf_arr; + for (i = 0; i < rds_ring->num_desc; i++) { + list_add_tail(&rx_buf->list, + &rds_ring->free_list); + rx_buf++; + } + } +} + +void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) +{ + struct qlcnic_cmd_buffer *cmd_buf; + struct qlcnic_skb_frag *buffrag; + int i, j; + + spin_lock(&tx_ring->tx_clean_lock); + + cmd_buf = tx_ring->cmd_buf_arr; + for (i = 0; i < tx_ring->num_desc; i++) { + buffrag = cmd_buf->frag_array; + if (buffrag->dma) { + pci_unmap_single(adapter->pdev, buffrag->dma, + buffrag->length, PCI_DMA_TODEVICE); + buffrag->dma = 0ULL; + } + for (j = 1; j < cmd_buf->frag_count; j++) { + buffrag++; + if (buffrag->dma) { + pci_unmap_page(adapter->pdev, buffrag->dma, + buffrag->length, + PCI_DMA_TODEVICE); + buffrag->dma = 0ULL; + } + } + if (cmd_buf->skb) { + dev_kfree_skb_any(cmd_buf->skb); + cmd_buf->skb = NULL; + } + cmd_buf++; + } + + spin_unlock(&tx_ring->tx_clean_lock); +} + +void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_rds_ring *rds_ring; + int ring; + + recv_ctx = adapter->recv_ctx; + + if (recv_ctx->rds_rings == NULL) + return; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + vfree(rds_ring->rx_buf_arr); + rds_ring->rx_buf_arr = NULL; + } + kfree(recv_ctx->rds_rings); +} + +int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_rx_buffer *rx_buf; + int ring, i; + + recv_ctx = adapter->recv_ctx; + + rds_ring = kcalloc(adapter->max_rds_rings, + sizeof(struct qlcnic_host_rds_ring), GFP_KERNEL); + if (rds_ring == NULL) + goto err_out; + + recv_ctx->rds_rings = rds_ring; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + switch (ring) { + case RCV_RING_NORMAL: + rds_ring->num_desc = adapter->num_rxd; + rds_ring->dma_size = QLCNIC_P3P_RX_BUF_MAX_LEN; + rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; + break; + + case RCV_RING_JUMBO: + rds_ring->num_desc = adapter->num_jumbo_rxd; + rds_ring->dma_size = + QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN; + + if (adapter->ahw->capabilities & + QLCNIC_FW_CAPABILITY_HW_LRO) + rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA; + + rds_ring->skb_size = + rds_ring->dma_size + NET_IP_ALIGN; + break; + } + rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring)); + if (rds_ring->rx_buf_arr == NULL) + goto err_out; + + INIT_LIST_HEAD(&rds_ring->free_list); + /* + * Now go through all of them, set reference handles + * and put them in the queues. + */ + rx_buf = rds_ring->rx_buf_arr; + for (i = 0; i < rds_ring->num_desc; i++) { + list_add_tail(&rx_buf->list, + &rds_ring->free_list); + rx_buf->ref_handle = i; + rx_buf++; + } + spin_lock_init(&rds_ring->lock); + } + + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + sds_ring->irq = adapter->msix_entries[ring].vector; + sds_ring->adapter = adapter; + sds_ring->num_desc = adapter->num_rxd; + if (qlcnic_82xx_check(adapter)) { + if (qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test) + sds_ring->tx_ring = &adapter->tx_ring[ring]; + else + sds_ring->tx_ring = &adapter->tx_ring[0]; + } + for (i = 0; i < NUM_RCV_DESC_RINGS; i++) + INIT_LIST_HEAD(&sds_ring->free_list[i]); + } + + return 0; + +err_out: + qlcnic_free_sw_resources(adapter); + return -ENOMEM; +} + +/* + * Utility to translate from internal Phantom CRB address + * to external PCI CRB address. + */ +static u32 qlcnic_decode_crb_addr(u32 addr) +{ + int i; + u32 base_addr, offset, pci_base; + + crb_addr_transform_setup(); + + pci_base = QLCNIC_ADDR_ERROR; + base_addr = addr & 0xfff00000; + offset = addr & 0x000fffff; + + for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) { + if (crb_addr_xform[i] == base_addr) { + pci_base = i << 20; + break; + } + } + if (pci_base == QLCNIC_ADDR_ERROR) + return pci_base; + else + return pci_base + offset; +} + +#define QLCNIC_MAX_ROM_WAIT_USEC 100 + +static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter) +{ + long timeout = 0; + long done = 0; + int err = 0; + + cond_resched(); + while (done == 0) { + done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS, &err); + done &= 2; + if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) { + dev_err(&adapter->pdev->dev, + "Timeout reached waiting for rom done"); + return -EIO; + } + udelay(1); + } + return 0; +} + +static int do_rom_fast_read(struct qlcnic_adapter *adapter, + u32 addr, u32 *valp) +{ + int err = 0; + + QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr); + QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); + QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3); + QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb); + if (qlcnic_wait_rom_done(adapter)) { + dev_err(&adapter->pdev->dev, "Error waiting for rom done\n"); + return -EIO; + } + /* reset abyte_cnt and dummy_byte_cnt */ + QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0); + udelay(10); + QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); + + *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA, &err); + if (err == -EIO) + return err; + return 0; +} + +static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, + u8 *bytes, size_t size) +{ + int addridx; + int ret = 0; + + for (addridx = addr; addridx < (addr + size); addridx += 4) { + int v; + ret = do_rom_fast_read(adapter, addridx, &v); + if (ret != 0) + break; + *(__le32 *)bytes = cpu_to_le32(v); + bytes += 4; + } + + return ret; +} + +int +qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, + u8 *bytes, size_t size) +{ + int ret; + + ret = qlcnic_rom_lock(adapter); + if (ret < 0) + return ret; + + ret = do_rom_fast_read_words(adapter, addr, bytes, size); + + qlcnic_rom_unlock(adapter); + return ret; +} + +int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp) +{ + int ret; + + if (qlcnic_rom_lock(adapter) != 0) + return -EIO; + + ret = do_rom_fast_read(adapter, addr, valp); + qlcnic_rom_unlock(adapter); + return ret; +} + +int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) +{ + int addr, err = 0; + int i, n, init_delay; + struct crb_addr_pair *buf; + unsigned offset; + u32 off, val; + struct pci_dev *pdev = adapter->pdev; + + QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, 0); + QLC_SHARED_REG_WR32(adapter, QLCNIC_RCVPEG_STATE, 0); + + /* Halt all the indiviual PEGs and other blocks */ + /* disable all I2Q */ + QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x10, 0x0); + QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x14, 0x0); + QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x18, 0x0); + QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x1c, 0x0); + QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x20, 0x0); + QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x24, 0x0); + + /* disable all niu interrupts */ + QLCWR32(adapter, QLCNIC_CRB_NIU + 0x40, 0xff); + /* disable xge rx/tx */ + QLCWR32(adapter, QLCNIC_CRB_NIU + 0x70000, 0x00); + /* disable xg1 rx/tx */ + QLCWR32(adapter, QLCNIC_CRB_NIU + 0x80000, 0x00); + /* disable sideband mac */ + QLCWR32(adapter, QLCNIC_CRB_NIU + 0x90000, 0x00); + /* disable ap0 mac */ + QLCWR32(adapter, QLCNIC_CRB_NIU + 0xa0000, 0x00); + /* disable ap1 mac */ + QLCWR32(adapter, QLCNIC_CRB_NIU + 0xb0000, 0x00); + + /* halt sre */ + val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000, &err); + if (err == -EIO) + return err; + QLCWR32(adapter, QLCNIC_CRB_SRE + 0x1000, val & (~(0x1))); + + /* halt epg */ + QLCWR32(adapter, QLCNIC_CRB_EPG + 0x1300, 0x1); + + /* halt timers */ + QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x0, 0x0); + QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x8, 0x0); + QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x10, 0x0); + QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x18, 0x0); + QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x100, 0x0); + QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x200, 0x0); + /* halt pegs */ + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, 1); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, 1); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, 1); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, 1); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, 1); + msleep(20); + + qlcnic_rom_unlock(adapter); + /* big hammer don't reset CAM block on reset */ + QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff); + + /* Init HW CRB block */ + if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) || + qlcnic_rom_fast_read(adapter, 4, &n) != 0) { + dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n); + return -EIO; + } + offset = n & 0xffffU; + n = (n >> 16) & 0xffffU; + + if (n >= 1024) { + dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n"); + return -EIO; + } + + buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + for (i = 0; i < n; i++) { + if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || + qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) { + kfree(buf); + return -EIO; + } + + buf[i].addr = addr; + buf[i].data = val; + } + + for (i = 0; i < n; i++) { + + off = qlcnic_decode_crb_addr(buf[i].addr); + if (off == QLCNIC_ADDR_ERROR) { + dev_err(&pdev->dev, "CRB init value out of range %x\n", + buf[i].addr); + continue; + } + off += QLCNIC_PCI_CRBSPACE; + + if (off & 1) + continue; + + /* skipping cold reboot MAGIC */ + if (off == QLCNIC_CAM_RAM(0x1fc)) + continue; + if (off == (QLCNIC_CRB_I2C0 + 0x1c)) + continue; + if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */ + continue; + if (off == (ROMUSB_GLB + 0xa8)) + continue; + if (off == (ROMUSB_GLB + 0xc8)) /* core clock */ + continue; + if (off == (ROMUSB_GLB + 0x24)) /* MN clock */ + continue; + if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */ + continue; + if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET) + continue; + /* skip the function enable register */ + if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION)) + continue; + if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2)) + continue; + if ((off & 0x0ff00000) == QLCNIC_CRB_SMB) + continue; + + init_delay = 1; + /* After writing this register, HW needs time for CRB */ + /* to quiet down (else crb_window returns 0xffffffff) */ + if (off == QLCNIC_ROMUSB_GLB_SW_RESET) + init_delay = 1000; + + QLCWR32(adapter, off, buf[i].data); + + msleep(init_delay); + } + kfree(buf); + + /* Initialize protocol process engine */ + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0); + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0); + usleep_range(1000, 1500); + + QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0); + QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0); + + return 0; +} + +static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter) +{ + u32 val; + int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT; + + do { + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CMDPEG_STATE); + + switch (val) { + case PHAN_INITIALIZE_COMPLETE: + case PHAN_INITIALIZE_ACK: + return 0; + case PHAN_INITIALIZE_FAILED: + goto out_err; + default: + break; + } + + msleep(QLCNIC_CMDPEG_CHECK_DELAY); + + } while (--retries); + + QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, + PHAN_INITIALIZE_FAILED); + +out_err: + dev_err(&adapter->pdev->dev, "Command Peg initialization not " + "complete, state: 0x%x.\n", val); + return -EIO; +} + +static int +qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter) +{ + u32 val; + int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT; + + do { + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_RCVPEG_STATE); + + if (val == PHAN_PEG_RCV_INITIALIZED) + return 0; + + msleep(QLCNIC_RCVPEG_CHECK_DELAY); + + } while (--retries); + + if (!retries) { + dev_err(&adapter->pdev->dev, "Receive Peg initialization not " + "complete, state: 0x%x.\n", val); + return -EIO; + } + + return 0; +} + +int +qlcnic_check_fw_status(struct qlcnic_adapter *adapter) +{ + int err; + + err = qlcnic_cmd_peg_ready(adapter); + if (err) + return err; + + err = qlcnic_receive_peg_ready(adapter); + if (err) + return err; + + QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, PHAN_INITIALIZE_ACK); + + return err; +} + +int +qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) { + + int timeo; + u32 val; + + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO); + val = QLC_DEV_GET_DRV(val, adapter->portnum); + if ((val & 0x3) != QLCNIC_TYPE_NIC) { + dev_err(&adapter->pdev->dev, + "Not an Ethernet NIC func=%u\n", val); + return -EIO; + } + adapter->ahw->physical_port = (val >> 2); + if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo)) + timeo = QLCNIC_INIT_TIMEOUT_SECS; + + adapter->dev_init_timeo = timeo; + + if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo)) + timeo = QLCNIC_RESET_TIMEOUT_SECS; + + adapter->reset_ack_timeo = timeo; + + return 0; +} + +static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region, + struct qlcnic_flt_entry *region_entry) +{ + struct qlcnic_flt_header flt_hdr; + struct qlcnic_flt_entry *flt_entry; + int i = 0, ret; + u32 entry_size; + + memset(region_entry, 0, sizeof(struct qlcnic_flt_entry)); + ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION, + (u8 *)&flt_hdr, + sizeof(struct qlcnic_flt_header)); + if (ret) { + dev_warn(&adapter->pdev->dev, + "error reading flash layout header\n"); + return -EIO; + } + + entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header); + flt_entry = vzalloc(entry_size); + if (flt_entry == NULL) + return -EIO; + + ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION + + sizeof(struct qlcnic_flt_header), + (u8 *)flt_entry, entry_size); + if (ret) { + dev_warn(&adapter->pdev->dev, + "error reading flash layout entries\n"); + goto err_out; + } + + while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) { + if (flt_entry[i].region == region) + break; + i++; + } + if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) { + dev_warn(&adapter->pdev->dev, + "region=%x not found in %d regions\n", region, i); + ret = -EIO; + goto err_out; + } + memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry)); + +err_out: + vfree(flt_entry); + return ret; +} + +int +qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) +{ + struct qlcnic_flt_entry fw_entry; + u32 ver = -1, min_ver; + int ret; + + if (adapter->ahw->revision_id == QLCNIC_P3P_C0) + ret = qlcnic_get_flt_entry(adapter, QLCNIC_C0_FW_IMAGE_REGION, + &fw_entry); + else + ret = qlcnic_get_flt_entry(adapter, QLCNIC_B0_FW_IMAGE_REGION, + &fw_entry); + + if (!ret) + /* 0-4:-signature, 4-8:-fw version */ + qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4, + (int *)&ver); + else + qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, + (int *)&ver); + + ver = QLCNIC_DECODE_VERSION(ver); + min_ver = QLCNIC_MIN_FW_VERSION; + + if (ver < min_ver) { + dev_err(&adapter->pdev->dev, + "firmware version %d.%d.%d unsupported." + "Min supported version %d.%d.%d\n", + _major(ver), _minor(ver), _build(ver), + _major(min_ver), _minor(min_ver), _build(min_ver)); + return -EINVAL; + } + + return 0; +} + +static int +qlcnic_has_mn(struct qlcnic_adapter *adapter) +{ + u32 capability = 0; + int err = 0; + + capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY, &err); + if (err == -EIO) + return err; + if (capability & QLCNIC_PEG_TUNE_MN_PRESENT) + return 1; + + return 0; +} + +static +struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section) +{ + u32 i, entries; + struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; + entries = le32_to_cpu(directory->num_entries); + + for (i = 0; i < entries; i++) { + + u32 offs = le32_to_cpu(directory->findex) + + i * le32_to_cpu(directory->entry_size); + u32 tab_type = le32_to_cpu(*((__le32 *)&unirom[offs] + 8)); + + if (tab_type == section) + return (struct uni_table_desc *) &unirom[offs]; + } + + return NULL; +} + +#define FILEHEADER_SIZE (14 * 4) + +static int +qlcnic_validate_header(struct qlcnic_adapter *adapter) +{ + const u8 *unirom = adapter->fw->data; + struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; + u32 entries, entry_size, tab_size, fw_file_size; + + fw_file_size = adapter->fw->size; + + if (fw_file_size < FILEHEADER_SIZE) + return -EINVAL; + + entries = le32_to_cpu(directory->num_entries); + entry_size = le32_to_cpu(directory->entry_size); + tab_size = le32_to_cpu(directory->findex) + (entries * entry_size); + + if (fw_file_size < tab_size) + return -EINVAL; + + return 0; +} + +static int +qlcnic_validate_bootld(struct qlcnic_adapter *adapter) +{ + struct uni_table_desc *tab_desc; + struct uni_data_desc *descr; + u32 offs, tab_size, data_size, idx; + const u8 *unirom = adapter->fw->data; + __le32 temp; + + temp = *((__le32 *)&unirom[adapter->file_prd_off] + + QLCNIC_UNI_BOOTLD_IDX_OFF); + idx = le32_to_cpu(temp); + tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_BOOTLD); + + if (!tab_desc) + return -EINVAL; + + tab_size = le32_to_cpu(tab_desc->findex) + + le32_to_cpu(tab_desc->entry_size) * (idx + 1); + + if (adapter->fw->size < tab_size) + return -EINVAL; + + offs = le32_to_cpu(tab_desc->findex) + + le32_to_cpu(tab_desc->entry_size) * idx; + descr = (struct uni_data_desc *)&unirom[offs]; + + data_size = le32_to_cpu(descr->findex) + le32_to_cpu(descr->size); + + if (adapter->fw->size < data_size) + return -EINVAL; + + return 0; +} + +static int +qlcnic_validate_fw(struct qlcnic_adapter *adapter) +{ + struct uni_table_desc *tab_desc; + struct uni_data_desc *descr; + const u8 *unirom = adapter->fw->data; + u32 offs, tab_size, data_size, idx; + __le32 temp; + + temp = *((__le32 *)&unirom[adapter->file_prd_off] + + QLCNIC_UNI_FIRMWARE_IDX_OFF); + idx = le32_to_cpu(temp); + tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_FW); + + if (!tab_desc) + return -EINVAL; + + tab_size = le32_to_cpu(tab_desc->findex) + + le32_to_cpu(tab_desc->entry_size) * (idx + 1); + + if (adapter->fw->size < tab_size) + return -EINVAL; + + offs = le32_to_cpu(tab_desc->findex) + + le32_to_cpu(tab_desc->entry_size) * idx; + descr = (struct uni_data_desc *)&unirom[offs]; + data_size = le32_to_cpu(descr->findex) + le32_to_cpu(descr->size); + + if (adapter->fw->size < data_size) + return -EINVAL; + + return 0; +} + +static int +qlcnic_validate_product_offs(struct qlcnic_adapter *adapter) +{ + struct uni_table_desc *ptab_descr; + const u8 *unirom = adapter->fw->data; + int mn_present = qlcnic_has_mn(adapter); + u32 entries, entry_size, tab_size, i; + __le32 temp; + + ptab_descr = qlcnic_get_table_desc(unirom, + QLCNIC_UNI_DIR_SECT_PRODUCT_TBL); + if (!ptab_descr) + return -EINVAL; + + entries = le32_to_cpu(ptab_descr->num_entries); + entry_size = le32_to_cpu(ptab_descr->entry_size); + tab_size = le32_to_cpu(ptab_descr->findex) + (entries * entry_size); + + if (adapter->fw->size < tab_size) + return -EINVAL; + +nomn: + for (i = 0; i < entries; i++) { + + u32 flags, file_chiprev, offs; + u8 chiprev = adapter->ahw->revision_id; + u32 flagbit; + + offs = le32_to_cpu(ptab_descr->findex) + + i * le32_to_cpu(ptab_descr->entry_size); + temp = *((__le32 *)&unirom[offs] + QLCNIC_UNI_FLAGS_OFF); + flags = le32_to_cpu(temp); + temp = *((__le32 *)&unirom[offs] + QLCNIC_UNI_CHIP_REV_OFF); + file_chiprev = le32_to_cpu(temp); + + flagbit = mn_present ? 1 : 2; + + if ((chiprev == file_chiprev) && + ((1ULL << flagbit) & flags)) { + adapter->file_prd_off = offs; + return 0; + } + } + if (mn_present) { + mn_present = 0; + goto nomn; + } + return -EINVAL; +} + +static int +qlcnic_validate_unified_romimage(struct qlcnic_adapter *adapter) +{ + if (qlcnic_validate_header(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: header validation failed\n"); + return -EINVAL; + } + + if (qlcnic_validate_product_offs(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: product validation failed\n"); + return -EINVAL; + } + + if (qlcnic_validate_bootld(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: bootld validation failed\n"); + return -EINVAL; + } + + if (qlcnic_validate_fw(adapter)) { + dev_err(&adapter->pdev->dev, + "unified image: firmware validation failed\n"); + return -EINVAL; + } + + return 0; +} + +static +struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter, + u32 section, u32 idx_offset) +{ + const u8 *unirom = adapter->fw->data; + struct uni_table_desc *tab_desc; + u32 offs, idx; + __le32 temp; + + temp = *((__le32 *)&unirom[adapter->file_prd_off] + idx_offset); + idx = le32_to_cpu(temp); + + tab_desc = qlcnic_get_table_desc(unirom, section); + + if (tab_desc == NULL) + return NULL; + + offs = le32_to_cpu(tab_desc->findex) + + le32_to_cpu(tab_desc->entry_size) * idx; + + return (struct uni_data_desc *)&unirom[offs]; +} + +static u8 * +qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter) +{ + u32 offs = QLCNIC_BOOTLD_START; + struct uni_data_desc *data_desc; + + data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_BOOTLD, + QLCNIC_UNI_BOOTLD_IDX_OFF); + + if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE) + offs = le32_to_cpu(data_desc->findex); + + return (u8 *)&adapter->fw->data[offs]; +} + +static u8 * +qlcnic_get_fw_offs(struct qlcnic_adapter *adapter) +{ + u32 offs = QLCNIC_IMAGE_START; + struct uni_data_desc *data_desc; + + data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW, + QLCNIC_UNI_FIRMWARE_IDX_OFF); + if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE) + offs = le32_to_cpu(data_desc->findex); + + return (u8 *)&adapter->fw->data[offs]; +} + +static u32 qlcnic_get_fw_size(struct qlcnic_adapter *adapter) +{ + struct uni_data_desc *data_desc; + const u8 *unirom = adapter->fw->data; + + data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW, + QLCNIC_UNI_FIRMWARE_IDX_OFF); + + if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE) + return le32_to_cpu(data_desc->size); + else + return le32_to_cpu(*(__le32 *)&unirom[QLCNIC_FW_SIZE_OFFSET]); +} + +static u32 qlcnic_get_fw_version(struct qlcnic_adapter *adapter) +{ + struct uni_data_desc *fw_data_desc; + const struct firmware *fw = adapter->fw; + u32 major, minor, sub; + __le32 version_offset; + const u8 *ver_str; + int i, ret; + + if (adapter->ahw->fw_type != QLCNIC_UNIFIED_ROMIMAGE) { + version_offset = *(__le32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]; + return le32_to_cpu(version_offset); + } + + fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW, + QLCNIC_UNI_FIRMWARE_IDX_OFF); + ver_str = fw->data + le32_to_cpu(fw_data_desc->findex) + + le32_to_cpu(fw_data_desc->size) - 17; + + for (i = 0; i < 12; i++) { + if (!strncmp(&ver_str[i], "REV=", 4)) { + ret = sscanf(&ver_str[i+4], "%u.%u.%u ", + &major, &minor, &sub); + if (ret != 3) + return 0; + else + return major + (minor << 8) + (sub << 16); + } + } + + return 0; +} + +static u32 qlcnic_get_bios_version(struct qlcnic_adapter *adapter) +{ + const struct firmware *fw = adapter->fw; + u32 bios_ver, prd_off = adapter->file_prd_off; + u8 *version_offset; + __le32 temp; + + if (adapter->ahw->fw_type != QLCNIC_UNIFIED_ROMIMAGE) { + version_offset = (u8 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]; + return le32_to_cpu(*(__le32 *)version_offset); + } + + temp = *((__le32 *)(&fw->data[prd_off]) + QLCNIC_UNI_BIOS_VERSION_OFF); + bios_ver = le32_to_cpu(temp); + + return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24); +} + +static void qlcnic_rom_lock_recovery(struct qlcnic_adapter *adapter) +{ + if (qlcnic_pcie_sem_lock(adapter, 2, QLCNIC_ROM_LOCK_ID)) + dev_info(&adapter->pdev->dev, "Resetting rom_lock\n"); + + qlcnic_pcie_sem_unlock(adapter, 2); +} + +static int +qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter) +{ + u32 heartbeat, ret = -EIO; + int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT; + + adapter->heartbeat = QLC_SHARED_REG_RD32(adapter, + QLCNIC_PEG_ALIVE_COUNTER); + + do { + msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS); + heartbeat = QLC_SHARED_REG_RD32(adapter, + QLCNIC_PEG_ALIVE_COUNTER); + if (heartbeat != adapter->heartbeat) { + ret = QLCNIC_RCODE_SUCCESS; + break; + } + } while (--retries); + + return ret; +} + +int +qlcnic_need_fw_reset(struct qlcnic_adapter *adapter) +{ + if ((adapter->flags & QLCNIC_FW_HANG) || + qlcnic_check_fw_hearbeat(adapter)) { + qlcnic_rom_lock_recovery(adapter); + return 1; + } + + if (adapter->need_fw_reset) + return 1; + + if (adapter->fw) + return 1; + + return 0; +} + +static const char *fw_name[] = { + QLCNIC_UNIFIED_ROMIMAGE_NAME, + QLCNIC_FLASH_ROMIMAGE_NAME, +}; + +int +qlcnic_load_firmware(struct qlcnic_adapter *adapter) +{ + __le64 *ptr64; + u32 i, flashaddr, size; + const struct firmware *fw = adapter->fw; + struct pci_dev *pdev = adapter->pdev; + + dev_info(&pdev->dev, "loading firmware from %s\n", + fw_name[adapter->ahw->fw_type]); + + if (fw) { + u64 data; + + size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8; + + ptr64 = (__le64 *)qlcnic_get_bootld_offs(adapter); + flashaddr = QLCNIC_BOOTLD_START; + + for (i = 0; i < size; i++) { + data = le64_to_cpu(ptr64[i]); + + if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data)) + return -EIO; + + flashaddr += 8; + } + + size = qlcnic_get_fw_size(adapter) / 8; + + ptr64 = (__le64 *)qlcnic_get_fw_offs(adapter); + flashaddr = QLCNIC_IMAGE_START; + + for (i = 0; i < size; i++) { + data = le64_to_cpu(ptr64[i]); + + if (qlcnic_pci_mem_write_2M(adapter, + flashaddr, data)) + return -EIO; + + flashaddr += 8; + } + + size = qlcnic_get_fw_size(adapter) % 8; + if (size) { + data = le64_to_cpu(ptr64[i]); + + if (qlcnic_pci_mem_write_2M(adapter, + flashaddr, data)) + return -EIO; + } + + } else { + u64 data; + u32 hi, lo; + int ret; + struct qlcnic_flt_entry bootld_entry; + + ret = qlcnic_get_flt_entry(adapter, QLCNIC_BOOTLD_REGION, + &bootld_entry); + if (!ret) { + size = bootld_entry.size / 8; + flashaddr = bootld_entry.start_addr; + } else { + size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8; + flashaddr = QLCNIC_BOOTLD_START; + dev_info(&pdev->dev, + "using legacy method to get flash fw region"); + } + + for (i = 0; i < size; i++) { + if (qlcnic_rom_fast_read(adapter, + flashaddr, (int *)&lo) != 0) + return -EIO; + if (qlcnic_rom_fast_read(adapter, + flashaddr + 4, (int *)&hi) != 0) + return -EIO; + + data = (((u64)hi << 32) | lo); + + if (qlcnic_pci_mem_write_2M(adapter, + flashaddr, data)) + return -EIO; + + flashaddr += 8; + } + } + usleep_range(1000, 1500); + + QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020); + QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e); + return 0; +} + +static int +qlcnic_validate_firmware(struct qlcnic_adapter *adapter) +{ + u32 val; + u32 ver, bios, min_size; + struct pci_dev *pdev = adapter->pdev; + const struct firmware *fw = adapter->fw; + u8 fw_type = adapter->ahw->fw_type; + + if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) { + if (qlcnic_validate_unified_romimage(adapter)) + return -EINVAL; + + min_size = QLCNIC_UNI_FW_MIN_SIZE; + } else { + val = le32_to_cpu(*(__le32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]); + if (val != QLCNIC_BDINFO_MAGIC) + return -EINVAL; + + min_size = QLCNIC_FW_MIN_SIZE; + } + + if (fw->size < min_size) + return -EINVAL; + + val = qlcnic_get_fw_version(adapter); + ver = QLCNIC_DECODE_VERSION(val); + + if (ver < QLCNIC_MIN_FW_VERSION) { + dev_err(&pdev->dev, + "%s: firmware version %d.%d.%d unsupported\n", + fw_name[fw_type], _major(ver), _minor(ver), _build(ver)); + return -EINVAL; + } + + val = qlcnic_get_bios_version(adapter); + qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios); + if (val != bios) { + dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", + fw_name[fw_type]); + return -EINVAL; + } + + QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, QLCNIC_BDINFO_MAGIC); + return 0; +} + +static void +qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter) +{ + u8 fw_type; + + switch (adapter->ahw->fw_type) { + case QLCNIC_UNKNOWN_ROMIMAGE: + fw_type = QLCNIC_UNIFIED_ROMIMAGE; + break; + + case QLCNIC_UNIFIED_ROMIMAGE: + default: + fw_type = QLCNIC_FLASH_ROMIMAGE; + break; + } + + adapter->ahw->fw_type = fw_type; +} + + + +void qlcnic_request_firmware(struct qlcnic_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int rc; + + adapter->ahw->fw_type = QLCNIC_UNKNOWN_ROMIMAGE; + +next: + qlcnic_get_next_fwtype(adapter); + + if (adapter->ahw->fw_type == QLCNIC_FLASH_ROMIMAGE) { + adapter->fw = NULL; + } else { + rc = reject_firmware(&adapter->fw, + fw_name[adapter->ahw->fw_type], + &pdev->dev); + if (rc != 0) + goto next; + + rc = qlcnic_validate_firmware(adapter); + if (rc != 0) { + release_firmware(adapter->fw); + usleep_range(1000, 1500); + goto next; + } + } +} + + +void +qlcnic_release_firmware(struct qlcnic_adapter *adapter) +{ + release_firmware(adapter->fw); + adapter->fw = NULL; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c new file mode 100644 index 000000000..d4b5085a2 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -0,0 +1,2230 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include +#include +#include +#include +#include +#include + +#include "qlcnic.h" + +#define QLCNIC_TX_ETHER_PKT 0x01 +#define QLCNIC_TX_TCP_PKT 0x02 +#define QLCNIC_TX_UDP_PKT 0x03 +#define QLCNIC_TX_IP_PKT 0x04 +#define QLCNIC_TX_TCP_LSO 0x05 +#define QLCNIC_TX_TCP_LSO6 0x06 +#define QLCNIC_TX_ENCAP_PKT 0x07 +#define QLCNIC_TX_ENCAP_LSO 0x08 +#define QLCNIC_TX_TCPV6_PKT 0x0b +#define QLCNIC_TX_UDPV6_PKT 0x0c + +#define QLCNIC_FLAGS_VLAN_TAGGED 0x10 +#define QLCNIC_FLAGS_VLAN_OOB 0x40 + +#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \ + (cmd_desc)->vlan_TCI = cpu_to_le16(v); +#define qlcnic_set_cmd_desc_port(cmd_desc, var) \ + ((cmd_desc)->port_ctxid |= ((var) & 0x0F)) +#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \ + ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0)) + +#define qlcnic_set_tx_port(_desc, _port) \ + ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0)) + +#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \ + ((_desc)->flags_opcode |= \ + cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7))) + +#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \ + ((_desc)->nfrags__length = \ + cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8))) + +/* owner bits of status_desc */ +#define STATUS_OWNER_HOST (0x1ULL << 56) +#define STATUS_OWNER_PHANTOM (0x2ULL << 56) + +/* Status descriptor: + 0-3 port, 4-7 status, 8-11 type, 12-27 total_length + 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset + 53-55 desc_cnt, 56-57 owner, 58-63 opcode + */ +#define qlcnic_get_sts_port(sts_data) \ + ((sts_data) & 0x0F) +#define qlcnic_get_sts_status(sts_data) \ + (((sts_data) >> 4) & 0x0F) +#define qlcnic_get_sts_type(sts_data) \ + (((sts_data) >> 8) & 0x0F) +#define qlcnic_get_sts_totallength(sts_data) \ + (((sts_data) >> 12) & 0xFFFF) +#define qlcnic_get_sts_refhandle(sts_data) \ + (((sts_data) >> 28) & 0xFFFF) +#define qlcnic_get_sts_prot(sts_data) \ + (((sts_data) >> 44) & 0x0F) +#define qlcnic_get_sts_pkt_offset(sts_data) \ + (((sts_data) >> 48) & 0x1F) +#define qlcnic_get_sts_desc_cnt(sts_data) \ + (((sts_data) >> 53) & 0x7) +#define qlcnic_get_sts_opcode(sts_data) \ + (((sts_data) >> 58) & 0x03F) + +#define qlcnic_get_lro_sts_refhandle(sts_data) \ + ((sts_data) & 0x07FFF) +#define qlcnic_get_lro_sts_length(sts_data) \ + (((sts_data) >> 16) & 0x0FFFF) +#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \ + (((sts_data) >> 32) & 0x0FF) +#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \ + (((sts_data) >> 40) & 0x0FF) +#define qlcnic_get_lro_sts_timestamp(sts_data) \ + (((sts_data) >> 48) & 0x1) +#define qlcnic_get_lro_sts_type(sts_data) \ + (((sts_data) >> 49) & 0x7) +#define qlcnic_get_lro_sts_push_flag(sts_data) \ + (((sts_data) >> 52) & 0x1) +#define qlcnic_get_lro_sts_seq_number(sts_data) \ + ((sts_data) & 0x0FFFFFFFF) +#define qlcnic_get_lro_sts_mss(sts_data1) \ + ((sts_data1 >> 32) & 0x0FFFF) + +#define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff) + +/* opcode field in status_desc */ +#define QLCNIC_SYN_OFFLOAD 0x03 +#define QLCNIC_RXPKT_DESC 0x04 +#define QLCNIC_OLD_RXPKT_DESC 0x3f +#define QLCNIC_RESPONSE_DESC 0x05 +#define QLCNIC_LRO_DESC 0x12 + +#define QLCNIC_TX_POLL_BUDGET 128 +#define QLCNIC_TCP_HDR_SIZE 20 +#define QLCNIC_TCP_TS_OPTION_SIZE 12 +#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63) +#define QLCNIC_DESC_OWNER_FW cpu_to_le64(STATUS_OWNER_PHANTOM) + +#define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE) + +/* for status field in status_desc */ +#define STATUS_CKSUM_LOOP 0 +#define STATUS_CKSUM_OK 2 + +#define qlcnic_83xx_pktln(sts) ((sts >> 32) & 0x3FFF) +#define qlcnic_83xx_hndl(sts) ((sts >> 48) & 0x7FFF) +#define qlcnic_83xx_csum_status(sts) ((sts >> 39) & 7) +#define qlcnic_83xx_opcode(sts) ((sts >> 42) & 0xF) +#define qlcnic_83xx_vlan_tag(sts) (((sts) >> 48) & 0xFFFF) +#define qlcnic_83xx_lro_pktln(sts) (((sts) >> 32) & 0x3FFF) +#define qlcnic_83xx_l2_hdr_off(sts) (((sts) >> 16) & 0xFF) +#define qlcnic_83xx_l4_hdr_off(sts) (((sts) >> 24) & 0xFF) +#define qlcnic_83xx_pkt_cnt(sts) (((sts) >> 16) & 0x7) +#define qlcnic_83xx_is_tstamp(sts) (((sts) >> 40) & 1) +#define qlcnic_83xx_is_psh_bit(sts) (((sts) >> 41) & 1) +#define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1) +#define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1) + +static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, + int max); + +static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *, + struct qlcnic_host_rds_ring *, + u16, u16); + +static inline u8 qlcnic_mac_hash(u64 mac, u16 vlan) +{ + return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff) ^ (vlan & 0xff)); +} + +static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter, + u16 handle, u8 ring_id) +{ + if (qlcnic_83xx_check(adapter)) + return handle | (ring_id << 15); + else + return handle; +} + +static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data) +{ + return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0; +} + +static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter *adapter, + struct qlcnic_filter *fil, + void *addr, u16 vlan_id) +{ + int ret; + u8 op; + + op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD; + ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op); + if (ret) + return; + + op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL; + ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op); + if (!ret) { + hlist_del(&fil->fnode); + adapter->rx_fhash.fnum--; + } +} + +static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head, + void *addr, u16 vlan_id) +{ + struct qlcnic_filter *tmp_fil = NULL; + struct hlist_node *n; + + hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { + if (ether_addr_equal(tmp_fil->faddr, addr) && + tmp_fil->vlan_id == vlan_id) + return tmp_fil; + } + + return NULL; +} + +static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, + struct sk_buff *skb, int loopback_pkt, u16 vlan_id) +{ + struct ethhdr *phdr = (struct ethhdr *)(skb->data); + struct qlcnic_filter *fil, *tmp_fil; + struct hlist_head *head; + unsigned long time; + u64 src_addr = 0; + u8 hindex, op; + int ret; + + if (!qlcnic_sriov_pf_check(adapter) || (vlan_id == 0xffff)) + vlan_id = 0; + + memcpy(&src_addr, phdr->h_source, ETH_ALEN); + hindex = qlcnic_mac_hash(src_addr, vlan_id) & + (adapter->fhash.fbucket_size - 1); + + if (loopback_pkt) { + if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax) + return; + + head = &(adapter->rx_fhash.fhead[hindex]); + + tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id); + if (tmp_fil) { + time = tmp_fil->ftime; + if (time_after(jiffies, QLCNIC_READD_AGE * HZ + time)) + tmp_fil->ftime = jiffies; + return; + } + + fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC); + if (!fil) + return; + + fil->ftime = jiffies; + memcpy(fil->faddr, &src_addr, ETH_ALEN); + fil->vlan_id = vlan_id; + spin_lock(&adapter->rx_mac_learn_lock); + hlist_add_head(&(fil->fnode), head); + adapter->rx_fhash.fnum++; + spin_unlock(&adapter->rx_mac_learn_lock); + } else { + head = &adapter->fhash.fhead[hindex]; + + spin_lock(&adapter->mac_learn_lock); + + tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id); + if (tmp_fil) { + op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL; + ret = qlcnic_sre_macaddr_change(adapter, + (u8 *)&src_addr, + vlan_id, op); + if (!ret) { + hlist_del(&tmp_fil->fnode); + adapter->fhash.fnum--; + } + + spin_unlock(&adapter->mac_learn_lock); + + return; + } + + spin_unlock(&adapter->mac_learn_lock); + + head = &adapter->rx_fhash.fhead[hindex]; + + spin_lock(&adapter->rx_mac_learn_lock); + + tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id); + if (tmp_fil) + qlcnic_delete_rx_list_mac(adapter, tmp_fil, &src_addr, + vlan_id); + + spin_unlock(&adapter->rx_mac_learn_lock); + } +} + +void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr, + u16 vlan_id) +{ + struct cmd_desc_type0 *hwdesc; + struct qlcnic_nic_req *req; + struct qlcnic_mac_req *mac_req; + struct qlcnic_vlan_req *vlan_req; + struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; + u32 producer; + u64 word; + + producer = tx_ring->producer; + hwdesc = &tx_ring->desc_head[tx_ring->producer]; + + req = (struct qlcnic_nic_req *)hwdesc; + memset(req, 0, sizeof(struct qlcnic_nic_req)); + req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23); + + word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16); + req->req_hdr = cpu_to_le64(word); + + mac_req = (struct qlcnic_mac_req *)&(req->words[0]); + mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD; + memcpy(mac_req->mac_addr, uaddr, ETH_ALEN); + + vlan_req = (struct qlcnic_vlan_req *)&req->words[1]; + vlan_req->vlan_id = cpu_to_le16(vlan_id); + + tx_ring->producer = get_next_index(producer, tx_ring->num_desc); + smp_mb(); +} + +static void qlcnic_send_filter(struct qlcnic_adapter *adapter, + struct cmd_desc_type0 *first_desc, + struct sk_buff *skb) +{ + struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data); + struct ethhdr *phdr = (struct ethhdr *)(skb->data); + u16 protocol = ntohs(skb->protocol); + struct qlcnic_filter *fil, *tmp_fil; + struct hlist_head *head; + struct hlist_node *n; + u64 src_addr = 0; + u16 vlan_id = 0; + u8 hindex, hval; + + if (ether_addr_equal(phdr->h_source, adapter->mac_addr)) + return; + + if (adapter->flags & QLCNIC_VLAN_FILTERING) { + if (protocol == ETH_P_8021Q) { + vh = (struct vlan_ethhdr *)skb->data; + vlan_id = ntohs(vh->h_vlan_TCI); + } else if (skb_vlan_tag_present(skb)) { + vlan_id = skb_vlan_tag_get(skb); + } + } + + memcpy(&src_addr, phdr->h_source, ETH_ALEN); + hval = qlcnic_mac_hash(src_addr, vlan_id); + hindex = hval & (adapter->fhash.fbucket_size - 1); + head = &(adapter->fhash.fhead[hindex]); + + hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { + if (ether_addr_equal(tmp_fil->faddr, (u8 *)&src_addr) && + tmp_fil->vlan_id == vlan_id) { + if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) + qlcnic_change_filter(adapter, &src_addr, + vlan_id); + tmp_fil->ftime = jiffies; + return; + } + } + + if (unlikely(adapter->fhash.fnum >= adapter->fhash.fmax)) { + adapter->stats.mac_filter_limit_overrun++; + return; + } + + fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC); + if (!fil) + return; + + qlcnic_change_filter(adapter, &src_addr, vlan_id); + fil->ftime = jiffies; + fil->vlan_id = vlan_id; + memcpy(fil->faddr, &src_addr, ETH_ALEN); + spin_lock(&adapter->mac_learn_lock); + hlist_add_head(&(fil->fnode), head); + adapter->fhash.fnum++; + spin_unlock(&adapter->mac_learn_lock); +} + +#define QLCNIC_ENCAP_VXLAN_PKT BIT_0 +#define QLCNIC_ENCAP_OUTER_L3_IP6 BIT_1 +#define QLCNIC_ENCAP_INNER_L3_IP6 BIT_2 +#define QLCNIC_ENCAP_INNER_L4_UDP BIT_3 +#define QLCNIC_ENCAP_DO_L3_CSUM BIT_4 +#define QLCNIC_ENCAP_DO_L4_CSUM BIT_5 + +static int qlcnic_tx_encap_pkt(struct qlcnic_adapter *adapter, + struct cmd_desc_type0 *first_desc, + struct sk_buff *skb, + struct qlcnic_host_tx_ring *tx_ring) +{ + u8 opcode = 0, inner_hdr_len = 0, outer_hdr_len = 0, total_hdr_len = 0; + int copied, copy_len, descr_size; + u32 producer = tx_ring->producer; + struct cmd_desc_type0 *hwdesc; + u16 flags = 0, encap_descr = 0; + + opcode = QLCNIC_TX_ETHER_PKT; + encap_descr = QLCNIC_ENCAP_VXLAN_PKT; + + if (skb_is_gso(skb)) { + inner_hdr_len = skb_inner_transport_header(skb) + + inner_tcp_hdrlen(skb) - + skb_inner_mac_header(skb); + + /* VXLAN header size = 8 */ + outer_hdr_len = skb_transport_offset(skb) + 8 + + sizeof(struct udphdr); + first_desc->outer_hdr_length = outer_hdr_len; + total_hdr_len = inner_hdr_len + outer_hdr_len; + encap_descr |= QLCNIC_ENCAP_DO_L3_CSUM | + QLCNIC_ENCAP_DO_L4_CSUM; + first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); + first_desc->hdr_length = inner_hdr_len; + + /* Copy inner and outer headers in Tx descriptor(s) + * If total_hdr_len > cmd_desc_type0, use multiple + * descriptors + */ + copied = 0; + descr_size = (int)sizeof(struct cmd_desc_type0); + while (copied < total_hdr_len) { + copy_len = min(descr_size, (total_hdr_len - copied)); + hwdesc = &tx_ring->desc_head[producer]; + tx_ring->cmd_buf_arr[producer].skb = NULL; + skb_copy_from_linear_data_offset(skb, copied, + (char *)hwdesc, + copy_len); + copied += copy_len; + producer = get_next_index(producer, tx_ring->num_desc); + } + + tx_ring->producer = producer; + + /* Make sure updated tx_ring->producer is visible + * for qlcnic_tx_avail() + */ + smp_mb(); + adapter->stats.encap_lso_frames++; + + opcode = QLCNIC_TX_ENCAP_LSO; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (inner_ip_hdr(skb)->version == 6) { + if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) + encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP; + } else { + if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP) + encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP; + } + + adapter->stats.encap_tx_csummed++; + opcode = QLCNIC_TX_ENCAP_PKT; + } + + /* Prepare first 16 bits of byte offset 16 of Tx descriptor */ + if (ip_hdr(skb)->version == 6) + encap_descr |= QLCNIC_ENCAP_OUTER_L3_IP6; + + /* outer IP header's size in 32bit words size*/ + encap_descr |= (skb_network_header_len(skb) >> 2) << 6; + + /* outer IP header offset */ + encap_descr |= skb_network_offset(skb) << 10; + first_desc->encap_descr = cpu_to_le16(encap_descr); + + first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) - + skb->data; + first_desc->ip_hdr_offset = skb_inner_network_offset(skb); + + qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); + + return 0; +} + +static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter, + struct cmd_desc_type0 *first_desc, struct sk_buff *skb, + struct qlcnic_host_tx_ring *tx_ring) +{ + u8 l4proto, opcode = 0, hdr_len = 0; + u16 flags = 0, vlan_tci = 0; + int copied, offset, copy_len, size; + struct cmd_desc_type0 *hwdesc; + struct vlan_ethhdr *vh; + u16 protocol = ntohs(skb->protocol); + u32 producer = tx_ring->producer; + + if (protocol == ETH_P_8021Q) { + vh = (struct vlan_ethhdr *)skb->data; + flags = QLCNIC_FLAGS_VLAN_TAGGED; + vlan_tci = ntohs(vh->h_vlan_TCI); + protocol = ntohs(vh->h_vlan_encapsulated_proto); + } else if (skb_vlan_tag_present(skb)) { + flags = QLCNIC_FLAGS_VLAN_OOB; + vlan_tci = skb_vlan_tag_get(skb); + } + if (unlikely(adapter->tx_pvid)) { + if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) + return -EIO; + if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED)) + goto set_flags; + + flags = QLCNIC_FLAGS_VLAN_OOB; + vlan_tci = adapter->tx_pvid; + } +set_flags: + qlcnic_set_tx_vlan_tci(first_desc, vlan_tci); + qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); + + if (*(skb->data) & BIT_0) { + flags |= BIT_0; + memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); + } + opcode = QLCNIC_TX_ETHER_PKT; + if (skb_is_gso(skb)) { + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); + first_desc->hdr_length = hdr_len; + opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 : + QLCNIC_TX_TCP_LSO; + + /* For LSO, we need to copy the MAC/IP/TCP headers into + * the descriptor ring */ + copied = 0; + offset = 2; + + if (flags & QLCNIC_FLAGS_VLAN_OOB) { + first_desc->hdr_length += VLAN_HLEN; + first_desc->tcp_hdr_offset = VLAN_HLEN; + first_desc->ip_hdr_offset = VLAN_HLEN; + + /* Only in case of TSO on vlan device */ + flags |= QLCNIC_FLAGS_VLAN_TAGGED; + + /* Create a TSO vlan header template for firmware */ + hwdesc = &tx_ring->desc_head[producer]; + tx_ring->cmd_buf_arr[producer].skb = NULL; + + copy_len = min((int)sizeof(struct cmd_desc_type0) - + offset, hdr_len + VLAN_HLEN); + + vh = (struct vlan_ethhdr *)((char *) hwdesc + 2); + skb_copy_from_linear_data(skb, vh, 12); + vh->h_vlan_proto = htons(ETH_P_8021Q); + vh->h_vlan_TCI = htons(vlan_tci); + + skb_copy_from_linear_data_offset(skb, 12, + (char *)vh + 16, + copy_len - 16); + copied = copy_len - VLAN_HLEN; + offset = 0; + producer = get_next_index(producer, tx_ring->num_desc); + } + + while (copied < hdr_len) { + size = (int)sizeof(struct cmd_desc_type0) - offset; + copy_len = min(size, (hdr_len - copied)); + hwdesc = &tx_ring->desc_head[producer]; + tx_ring->cmd_buf_arr[producer].skb = NULL; + skb_copy_from_linear_data_offset(skb, copied, + (char *)hwdesc + + offset, copy_len); + copied += copy_len; + offset = 0; + producer = get_next_index(producer, tx_ring->num_desc); + } + + tx_ring->producer = producer; + smp_mb(); + adapter->stats.lso_frames++; + + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (protocol == ETH_P_IP) { + l4proto = ip_hdr(skb)->protocol; + + if (l4proto == IPPROTO_TCP) + opcode = QLCNIC_TX_TCP_PKT; + else if (l4proto == IPPROTO_UDP) + opcode = QLCNIC_TX_UDP_PKT; + } else if (protocol == ETH_P_IPV6) { + l4proto = ipv6_hdr(skb)->nexthdr; + + if (l4proto == IPPROTO_TCP) + opcode = QLCNIC_TX_TCPV6_PKT; + else if (l4proto == IPPROTO_UDP) + opcode = QLCNIC_TX_UDPV6_PKT; + } + } + first_desc->tcp_hdr_offset += skb_transport_offset(skb); + first_desc->ip_hdr_offset += skb_network_offset(skb); + qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); + + return 0; +} + +static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, + struct qlcnic_cmd_buffer *pbuf) +{ + struct qlcnic_skb_frag *nf; + struct skb_frag_struct *frag; + int i, nr_frags; + dma_addr_t map; + + nr_frags = skb_shinfo(skb)->nr_frags; + nf = &pbuf->frag_array[0]; + + map = pci_map_single(pdev, skb->data, skb_headlen(skb), + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, map)) + goto out_err; + + nf->dma = map; + nf->length = skb_headlen(skb); + + for (i = 0; i < nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + nf = &pbuf->frag_array[i+1]; + map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, map)) + goto unwind; + + nf->dma = map; + nf->length = skb_frag_size(frag); + } + + return 0; + +unwind: + while (--i >= 0) { + nf = &pbuf->frag_array[i+1]; + pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); + } + + nf = &pbuf->frag_array[0]; + pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); + +out_err: + return -ENOMEM; +} + +static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb, + struct qlcnic_cmd_buffer *pbuf) +{ + struct qlcnic_skb_frag *nf = &pbuf->frag_array[0]; + int i, nr_frags = skb_shinfo(skb)->nr_frags; + + for (i = 0; i < nr_frags; i++) { + nf = &pbuf->frag_array[i+1]; + pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); + } + + nf = &pbuf->frag_array[0]; + pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); + pbuf->skb = NULL; +} + +static inline void qlcnic_clear_cmddesc(u64 *desc) +{ + desc[0] = 0ULL; + desc[2] = 0ULL; + desc[7] = 0ULL; +} + +netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_host_tx_ring *tx_ring; + struct qlcnic_cmd_buffer *pbuf; + struct qlcnic_skb_frag *buffrag; + struct cmd_desc_type0 *hwdesc, *first_desc; + struct pci_dev *pdev; + struct ethhdr *phdr; + int i, k, frag_count, delta = 0; + u32 producer, num_txd; + u16 protocol; + bool l4_is_udp = false; + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + netif_tx_stop_all_queues(netdev); + return NETDEV_TX_BUSY; + } + + if (adapter->flags & QLCNIC_MACSPOOF) { + phdr = (struct ethhdr *)skb->data; + if (!ether_addr_equal(phdr->h_source, adapter->mac_addr)) + goto drop_packet; + } + + tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)]; + num_txd = tx_ring->num_desc; + + frag_count = skb_shinfo(skb)->nr_frags + 1; + + /* 14 frags supported for normal packet and + * 32 frags supported for TSO packet + */ + if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) { + for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++) + delta += skb_frag_size(&skb_shinfo(skb)->frags[i]); + + if (!__pskb_pull_tail(skb, delta)) + goto drop_packet; + + frag_count = 1 + skb_shinfo(skb)->nr_frags; + } + + if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) { + netif_tx_stop_queue(tx_ring->txq); + if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { + netif_tx_start_queue(tx_ring->txq); + } else { + tx_ring->tx_stats.xmit_off++; + return NETDEV_TX_BUSY; + } + } + + producer = tx_ring->producer; + pbuf = &tx_ring->cmd_buf_arr[producer]; + pdev = adapter->pdev; + first_desc = &tx_ring->desc_head[producer]; + hwdesc = &tx_ring->desc_head[producer]; + qlcnic_clear_cmddesc((u64 *)hwdesc); + + if (qlcnic_map_tx_skb(pdev, skb, pbuf)) { + adapter->stats.tx_dma_map_error++; + goto drop_packet; + } + + pbuf->skb = skb; + pbuf->frag_count = frag_count; + + qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len); + qlcnic_set_tx_port(first_desc, adapter->portnum); + + for (i = 0; i < frag_count; i++) { + k = i % 4; + + if ((k == 0) && (i > 0)) { + /* move to next desc.*/ + producer = get_next_index(producer, num_txd); + hwdesc = &tx_ring->desc_head[producer]; + qlcnic_clear_cmddesc((u64 *)hwdesc); + tx_ring->cmd_buf_arr[producer].skb = NULL; + } + + buffrag = &pbuf->frag_array[i]; + hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length); + switch (k) { + case 0: + hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); + break; + case 1: + hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma); + break; + case 2: + hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma); + break; + case 3: + hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma); + break; + } + } + + tx_ring->producer = get_next_index(producer, num_txd); + smp_mb(); + + protocol = ntohs(skb->protocol); + if (protocol == ETH_P_IP) + l4_is_udp = ip_hdr(skb)->protocol == IPPROTO_UDP; + else if (protocol == ETH_P_IPV6) + l4_is_udp = ipv6_hdr(skb)->nexthdr == IPPROTO_UDP; + + /* Check if it is a VXLAN packet */ + if (!skb->encapsulation || !l4_is_udp || + !qlcnic_encap_tx_offload(adapter)) { + if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb, + tx_ring))) + goto unwind_buff; + } else { + if (unlikely(qlcnic_tx_encap_pkt(adapter, first_desc, + skb, tx_ring))) + goto unwind_buff; + } + + if (adapter->drv_mac_learn) + qlcnic_send_filter(adapter, first_desc, skb); + + tx_ring->tx_stats.tx_bytes += skb->len; + tx_ring->tx_stats.xmit_called++; + + qlcnic_update_cmd_producer(tx_ring); + + return NETDEV_TX_OK; + +unwind_buff: + qlcnic_unmap_buffers(pdev, skb, pbuf); +drop_packet: + adapter->stats.txdropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->ahw->linkup && !linkup) { + netdev_info(netdev, "NIC Link is down\n"); + adapter->ahw->linkup = 0; + netif_carrier_off(netdev); + } else if (!adapter->ahw->linkup && linkup) { + adapter->ahw->linkup = 1; + + /* Do not advertise Link up to the stack if device + * is in loopback mode + */ + if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) { + netdev_info(netdev, "NIC Link is up for loopback test\n"); + return; + } + + netdev_info(netdev, "NIC Link is up\n"); + netif_carrier_on(netdev); + } +} + +static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter, + struct qlcnic_host_rds_ring *rds_ring, + struct qlcnic_rx_buffer *buffer) +{ + struct sk_buff *skb; + dma_addr_t dma; + struct pci_dev *pdev = adapter->pdev; + + skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size); + if (!skb) { + adapter->stats.skb_alloc_failure++; + return -ENOMEM; + } + + skb_reserve(skb, NET_IP_ALIGN); + dma = pci_map_single(pdev, skb->data, + rds_ring->dma_size, PCI_DMA_FROMDEVICE); + + if (pci_dma_mapping_error(pdev, dma)) { + adapter->stats.rx_dma_map_error++; + dev_kfree_skb_any(skb); + return -ENOMEM; + } + + buffer->skb = skb; + buffer->dma = dma; + + return 0; +} + +static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, + struct qlcnic_host_rds_ring *rds_ring, + u8 ring_id) +{ + struct rcv_desc *pdesc; + struct qlcnic_rx_buffer *buffer; + int count = 0; + uint32_t producer, handle; + struct list_head *head; + + if (!spin_trylock(&rds_ring->lock)) + return; + + producer = rds_ring->producer; + head = &rds_ring->free_list; + while (!list_empty(head)) { + buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); + + if (!buffer->skb) { + if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer)) + break; + } + count++; + list_del(&buffer->list); + + /* make a rcv descriptor */ + pdesc = &rds_ring->desc_head[producer]; + handle = qlcnic_get_ref_handle(adapter, + buffer->ref_handle, ring_id); + pdesc->reference_handle = cpu_to_le16(handle); + pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); + pdesc->addr_buffer = cpu_to_le64(buffer->dma); + producer = get_next_index(producer, rds_ring->num_desc); + } + if (count) { + rds_ring->producer = producer; + writel((producer - 1) & (rds_ring->num_desc - 1), + rds_ring->crb_rcv_producer); + } + spin_unlock(&rds_ring->lock); +} + +static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring, + int budget) +{ + u32 sw_consumer, hw_consumer; + int i, done, count = 0; + struct qlcnic_cmd_buffer *buffer; + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + struct qlcnic_skb_frag *frag; + + if (!spin_trylock(&tx_ring->tx_clean_lock)) + return 1; + + sw_consumer = tx_ring->sw_consumer; + hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); + + while (sw_consumer != hw_consumer) { + buffer = &tx_ring->cmd_buf_arr[sw_consumer]; + if (buffer->skb) { + frag = &buffer->frag_array[0]; + pci_unmap_single(pdev, frag->dma, frag->length, + PCI_DMA_TODEVICE); + frag->dma = 0ULL; + for (i = 1; i < buffer->frag_count; i++) { + frag++; + pci_unmap_page(pdev, frag->dma, frag->length, + PCI_DMA_TODEVICE); + frag->dma = 0ULL; + } + tx_ring->tx_stats.xmit_finished++; + dev_kfree_skb_any(buffer->skb); + buffer->skb = NULL; + } + + sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); + if (++count >= budget) + break; + } + + tx_ring->sw_consumer = sw_consumer; + + if (count && netif_running(netdev)) { + smp_mb(); + if (netif_tx_queue_stopped(tx_ring->txq) && + netif_carrier_ok(netdev)) { + if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { + netif_tx_wake_queue(tx_ring->txq); + tx_ring->tx_stats.xmit_on++; + } + } + adapter->tx_timeo_cnt = 0; + } + /* + * If everything is freed up to consumer then check if the ring is full + * If the ring is full then check if more needs to be freed and + * schedule the call back again. + * + * This happens when there are 2 CPUs. One could be freeing and the + * other filling it. If the ring is full when we get out of here and + * the card has already interrupted the host then the host can miss the + * interrupt. + * + * There is still a possible race condition and the host could miss an + * interrupt. The card has to take care of this. + */ + hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); + done = (sw_consumer == hw_consumer); + + spin_unlock(&tx_ring->tx_clean_lock); + + return done; +} + +static int qlcnic_poll(struct napi_struct *napi, int budget) +{ + int tx_complete, work_done; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_adapter *adapter; + struct qlcnic_host_tx_ring *tx_ring; + + sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); + adapter = sds_ring->adapter; + tx_ring = sds_ring->tx_ring; + + tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, + budget); + work_done = qlcnic_process_rcv_ring(sds_ring, budget); + + /* Check if we need a repoll */ + if (!tx_complete) + work_done = budget; + + if (work_done < budget) { + napi_complete(&sds_ring->napi); + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + qlcnic_enable_sds_intr(adapter, sds_ring); + qlcnic_enable_tx_intr(adapter, tx_ring); + } + } + + return work_done; +} + +static int qlcnic_tx_poll(struct napi_struct *napi, int budget) +{ + struct qlcnic_host_tx_ring *tx_ring; + struct qlcnic_adapter *adapter; + int work_done; + + tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); + adapter = tx_ring->adapter; + + work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); + if (work_done) { + napi_complete(&tx_ring->napi); + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + qlcnic_enable_tx_intr(adapter, tx_ring); + } else { + /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/ + work_done = budget; + } + + return work_done; +} + +static int qlcnic_rx_poll(struct napi_struct *napi, int budget) +{ + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_adapter *adapter; + int work_done; + + sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); + adapter = sds_ring->adapter; + + work_done = qlcnic_process_rcv_ring(sds_ring, budget); + + if (work_done < budget) { + napi_complete(&sds_ring->napi); + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + qlcnic_enable_sds_intr(adapter, sds_ring); + } + + return work_done; +} + +static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter, + struct qlcnic_fw_msg *msg) +{ + u32 cable_OUI; + u16 cable_len, link_speed; + u8 link_status, module, duplex, autoneg, lb_status = 0; + struct net_device *netdev = adapter->netdev; + + adapter->ahw->has_link_events = 1; + + cable_OUI = msg->body[1] & 0xffffffff; + cable_len = (msg->body[1] >> 32) & 0xffff; + link_speed = (msg->body[1] >> 48) & 0xffff; + + link_status = msg->body[2] & 0xff; + duplex = (msg->body[2] >> 16) & 0xff; + autoneg = (msg->body[2] >> 24) & 0xff; + lb_status = (msg->body[2] >> 32) & 0x3; + + module = (msg->body[2] >> 8) & 0xff; + if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) + dev_info(&netdev->dev, + "unsupported cable: OUI 0x%x, length %d\n", + cable_OUI, cable_len); + else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) + dev_info(&netdev->dev, "unsupported cable length %d\n", + cable_len); + + if (!link_status && (lb_status == QLCNIC_ILB_MODE || + lb_status == QLCNIC_ELB_MODE)) + adapter->ahw->loopback_state |= QLCNIC_LINKEVENT; + + qlcnic_advert_link_change(adapter, link_status); + + if (duplex == LINKEVENT_FULL_DUPLEX) + adapter->ahw->link_duplex = DUPLEX_FULL; + else + adapter->ahw->link_duplex = DUPLEX_HALF; + + adapter->ahw->module_type = module; + adapter->ahw->link_autoneg = autoneg; + + if (link_status) { + adapter->ahw->link_speed = link_speed; + } else { + adapter->ahw->link_speed = SPEED_UNKNOWN; + adapter->ahw->link_duplex = DUPLEX_UNKNOWN; + } +} + +static void qlcnic_handle_fw_message(int desc_cnt, int index, + struct qlcnic_host_sds_ring *sds_ring) +{ + struct qlcnic_fw_msg msg; + struct status_desc *desc; + struct qlcnic_adapter *adapter; + struct device *dev; + int i = 0, opcode, ret; + + while (desc_cnt > 0 && i < 8) { + desc = &sds_ring->desc_head[index]; + msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]); + msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]); + + index = get_next_index(index, sds_ring->num_desc); + desc_cnt--; + } + + adapter = sds_ring->adapter; + dev = &adapter->pdev->dev; + opcode = qlcnic_get_nic_msg_opcode(msg.body[0]); + + switch (opcode) { + case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE: + qlcnic_handle_linkevent(adapter, &msg); + break; + case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK: + ret = (u32)(msg.body[1]); + switch (ret) { + case 0: + adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE; + break; + case 1: + dev_info(dev, "loopback already in progress\n"); + adapter->ahw->diag_cnt = -EINPROGRESS; + break; + case 2: + dev_info(dev, "loopback cable is not connected\n"); + adapter->ahw->diag_cnt = -ENODEV; + break; + default: + dev_info(dev, + "loopback configure request failed, err %x\n", + ret); + adapter->ahw->diag_cnt = -EIO; + break; + } + break; + case QLCNIC_C2H_OPCODE_GET_DCB_AEN: + qlcnic_dcb_aen_handler(adapter->dcb, (void *)&msg); + break; + default: + break; + } +} + +static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter, + struct qlcnic_host_rds_ring *ring, + u16 index, u16 cksum) +{ + struct qlcnic_rx_buffer *buffer; + struct sk_buff *skb; + + buffer = &ring->rx_buf_arr[index]; + if (unlikely(buffer->skb == NULL)) { + WARN_ON(1); + return NULL; + } + + pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size, + PCI_DMA_FROMDEVICE); + + skb = buffer->skb; + if (likely((adapter->netdev->features & NETIF_F_RXCSUM) && + (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) { + adapter->stats.csummed++; + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + skb_checksum_none_assert(skb); + } + + + buffer->skb = NULL; + + return skb; +} + +static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, + struct sk_buff *skb, u16 *vlan_tag) +{ + struct ethhdr *eth_hdr; + + if (!__vlan_get_tag(skb, vlan_tag)) { + eth_hdr = (struct ethhdr *)skb->data; + memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2); + skb_pull(skb, VLAN_HLEN); + } + if (!adapter->rx_pvid) + return 0; + + if (*vlan_tag == adapter->rx_pvid) { + /* Outer vlan tag. Packet should follow non-vlan path */ + *vlan_tag = 0xffff; + return 0; + } + if (adapter->flags & QLCNIC_TAGGING_ENABLED) + return 0; + + return -EINVAL; +} + +static struct qlcnic_rx_buffer * +qlcnic_process_rcv(struct qlcnic_adapter *adapter, + struct qlcnic_host_sds_ring *sds_ring, int ring, + u64 sts_data0) +{ + struct net_device *netdev = adapter->netdev; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_rx_buffer *buffer; + struct sk_buff *skb; + struct qlcnic_host_rds_ring *rds_ring; + int index, length, cksum, pkt_offset, is_lb_pkt; + u16 vid = 0xffff, t_vid; + + if (unlikely(ring >= adapter->max_rds_rings)) + return NULL; + + rds_ring = &recv_ctx->rds_rings[ring]; + + index = qlcnic_get_sts_refhandle(sts_data0); + if (unlikely(index >= rds_ring->num_desc)) + return NULL; + + buffer = &rds_ring->rx_buf_arr[index]; + length = qlcnic_get_sts_totallength(sts_data0); + cksum = qlcnic_get_sts_status(sts_data0); + pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0); + + skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); + if (!skb) + return buffer; + + if (adapter->rx_mac_learn) { + t_vid = 0; + is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0); + qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid); + } + + if (length > rds_ring->skb_size) + skb_put(skb, rds_ring->skb_size); + else + skb_put(skb, length); + + if (pkt_offset) + skb_pull(skb, pkt_offset); + + if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { + adapter->stats.rxdropped++; + dev_kfree_skb(skb); + return buffer; + } + + skb->protocol = eth_type_trans(skb, netdev); + + if (vid != 0xffff) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + + napi_gro_receive(&sds_ring->napi, skb); + + adapter->stats.rx_pkts++; + adapter->stats.rxbytes += length; + + return buffer; +} + +#define QLC_TCP_HDR_SIZE 20 +#define QLC_TCP_TS_OPTION_SIZE 12 +#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE) + +static struct qlcnic_rx_buffer * +qlcnic_process_lro(struct qlcnic_adapter *adapter, + int ring, u64 sts_data0, u64 sts_data1) +{ + struct net_device *netdev = adapter->netdev; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_rx_buffer *buffer; + struct sk_buff *skb; + struct qlcnic_host_rds_ring *rds_ring; + struct iphdr *iph; + struct ipv6hdr *ipv6h; + struct tcphdr *th; + bool push, timestamp; + int index, l2_hdr_offset, l4_hdr_offset, is_lb_pkt; + u16 lro_length, length, data_offset, t_vid, vid = 0xffff; + u32 seq_number; + + if (unlikely(ring >= adapter->max_rds_rings)) + return NULL; + + rds_ring = &recv_ctx->rds_rings[ring]; + + index = qlcnic_get_lro_sts_refhandle(sts_data0); + if (unlikely(index >= rds_ring->num_desc)) + return NULL; + + buffer = &rds_ring->rx_buf_arr[index]; + + timestamp = qlcnic_get_lro_sts_timestamp(sts_data0); + lro_length = qlcnic_get_lro_sts_length(sts_data0); + l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0); + l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0); + push = qlcnic_get_lro_sts_push_flag(sts_data0); + seq_number = qlcnic_get_lro_sts_seq_number(sts_data1); + + skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); + if (!skb) + return buffer; + + if (adapter->rx_mac_learn) { + t_vid = 0; + is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0); + qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid); + } + + if (timestamp) + data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE; + else + data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE; + + skb_put(skb, lro_length + data_offset); + skb_pull(skb, l2_hdr_offset); + + if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { + adapter->stats.rxdropped++; + dev_kfree_skb(skb); + return buffer; + } + + skb->protocol = eth_type_trans(skb, netdev); + + if (ntohs(skb->protocol) == ETH_P_IPV6) { + ipv6h = (struct ipv6hdr *)skb->data; + th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr)); + length = (th->doff << 2) + lro_length; + ipv6h->payload_len = htons(length); + } else { + iph = (struct iphdr *)skb->data; + th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); + length = (iph->ihl << 2) + (th->doff << 2) + lro_length; + csum_replace2(&iph->check, iph->tot_len, htons(length)); + iph->tot_len = htons(length); + } + + th->psh = push; + th->seq = htonl(seq_number); + length = skb->len; + + if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) { + skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1); + if (skb->protocol == htons(ETH_P_IPV6)) + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + else + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + } + + if (vid != 0xffff) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + netif_receive_skb(skb); + + adapter->stats.lro_pkts++; + adapter->stats.lrobytes += length; + + return buffer; +} + +static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max) +{ + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_adapter *adapter = sds_ring->adapter; + struct list_head *cur; + struct status_desc *desc; + struct qlcnic_rx_buffer *rxbuf; + int opcode, desc_cnt, count = 0; + u64 sts_data0, sts_data1; + u8 ring; + u32 consumer = sds_ring->consumer; + + while (count < max) { + desc = &sds_ring->desc_head[consumer]; + sts_data0 = le64_to_cpu(desc->status_desc_data[0]); + + if (!(sts_data0 & STATUS_OWNER_HOST)) + break; + + desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0); + opcode = qlcnic_get_sts_opcode(sts_data0); + switch (opcode) { + case QLCNIC_RXPKT_DESC: + case QLCNIC_OLD_RXPKT_DESC: + case QLCNIC_SYN_OFFLOAD: + ring = qlcnic_get_sts_type(sts_data0); + rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring, + sts_data0); + break; + case QLCNIC_LRO_DESC: + ring = qlcnic_get_lro_sts_type(sts_data0); + sts_data1 = le64_to_cpu(desc->status_desc_data[1]); + rxbuf = qlcnic_process_lro(adapter, ring, sts_data0, + sts_data1); + break; + case QLCNIC_RESPONSE_DESC: + qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring); + default: + goto skip; + } + WARN_ON(desc_cnt > 1); + + if (likely(rxbuf)) + list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); + else + adapter->stats.null_rxbuf++; +skip: + for (; desc_cnt > 0; desc_cnt--) { + desc = &sds_ring->desc_head[consumer]; + desc->status_desc_data[0] = QLCNIC_DESC_OWNER_FW; + consumer = get_next_index(consumer, sds_ring->num_desc); + } + count++; + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &adapter->recv_ctx->rds_rings[ring]; + if (!list_empty(&sds_ring->free_list[ring])) { + list_for_each(cur, &sds_ring->free_list[ring]) { + rxbuf = list_entry(cur, struct qlcnic_rx_buffer, + list); + qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf); + } + spin_lock(&rds_ring->lock); + list_splice_tail_init(&sds_ring->free_list[ring], + &rds_ring->free_list); + spin_unlock(&rds_ring->lock); + } + + qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring); + } + + if (count) { + sds_ring->consumer = consumer; + writel(consumer, sds_ring->crb_sts_consumer); + } + + return count; +} + +void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, + struct qlcnic_host_rds_ring *rds_ring, u8 ring_id) +{ + struct rcv_desc *pdesc; + struct qlcnic_rx_buffer *buffer; + int count = 0; + u32 producer, handle; + struct list_head *head; + + producer = rds_ring->producer; + head = &rds_ring->free_list; + + while (!list_empty(head)) { + + buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); + + if (!buffer->skb) { + if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer)) + break; + } + + count++; + list_del(&buffer->list); + + /* make a rcv descriptor */ + pdesc = &rds_ring->desc_head[producer]; + pdesc->addr_buffer = cpu_to_le64(buffer->dma); + handle = qlcnic_get_ref_handle(adapter, buffer->ref_handle, + ring_id); + pdesc->reference_handle = cpu_to_le16(handle); + pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); + producer = get_next_index(producer, rds_ring->num_desc); + } + + if (count) { + rds_ring->producer = producer; + writel((producer-1) & (rds_ring->num_desc-1), + rds_ring->crb_rcv_producer); + } +} + +static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter) +{ + if (adapter->ahw->msg_enable & NETIF_MSG_DRV) { + char prefix[30]; + + scnprintf(prefix, sizeof(prefix), "%s: %s: ", + dev_name(&adapter->pdev->dev), __func__); + + print_hex_dump_debug(prefix, DUMP_PREFIX_NONE, 16, 1, + skb->data, skb->len, true); + } +} + +static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring, + u64 sts_data0) +{ + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct sk_buff *skb; + struct qlcnic_host_rds_ring *rds_ring; + int index, length, cksum, pkt_offset; + + if (unlikely(ring >= adapter->max_rds_rings)) + return; + + rds_ring = &recv_ctx->rds_rings[ring]; + + index = qlcnic_get_sts_refhandle(sts_data0); + length = qlcnic_get_sts_totallength(sts_data0); + if (unlikely(index >= rds_ring->num_desc)) + return; + + cksum = qlcnic_get_sts_status(sts_data0); + pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0); + + skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); + if (!skb) + return; + + if (length > rds_ring->skb_size) + skb_put(skb, rds_ring->skb_size); + else + skb_put(skb, length); + + if (pkt_offset) + skb_pull(skb, pkt_offset); + + if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr)) + adapter->ahw->diag_cnt++; + else + dump_skb(skb, adapter); + + dev_kfree_skb_any(skb); + adapter->stats.rx_pkts++; + adapter->stats.rxbytes += length; + + return; +} + +void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) +{ + struct qlcnic_adapter *adapter = sds_ring->adapter; + struct status_desc *desc; + u64 sts_data0; + int ring, opcode, desc_cnt; + + u32 consumer = sds_ring->consumer; + + desc = &sds_ring->desc_head[consumer]; + sts_data0 = le64_to_cpu(desc->status_desc_data[0]); + + if (!(sts_data0 & STATUS_OWNER_HOST)) + return; + + desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0); + opcode = qlcnic_get_sts_opcode(sts_data0); + switch (opcode) { + case QLCNIC_RESPONSE_DESC: + qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring); + break; + default: + ring = qlcnic_get_sts_type(sts_data0); + qlcnic_process_rcv_diag(adapter, ring, sts_data0); + break; + } + + for (; desc_cnt > 0; desc_cnt--) { + desc = &sds_ring->desc_head[consumer]; + desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM); + consumer = get_next_index(consumer, sds_ring->num_desc); + } + + sds_ring->consumer = consumer; + writel(consumer, sds_ring->crb_sts_consumer); +} + +int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, + struct net_device *netdev) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_host_tx_ring *tx_ring; + + if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings)) + return -ENOMEM; + + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + if (qlcnic_check_multi_tx(adapter) && + !adapter->ahw->diag_test) { + netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll, + NAPI_POLL_WEIGHT); + } else { + if (ring == (adapter->drv_sds_rings - 1)) + netif_napi_add(netdev, &sds_ring->napi, + qlcnic_poll, + NAPI_POLL_WEIGHT); + else + netif_napi_add(netdev, &sds_ring->napi, + qlcnic_rx_poll, + NAPI_POLL_WEIGHT); + } + } + + if (qlcnic_alloc_tx_rings(adapter, netdev)) { + qlcnic_free_sds_rings(recv_ctx); + return -ENOMEM; + } + + if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll, + NAPI_POLL_WEIGHT); + } + } + + return 0; +} + +void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_host_tx_ring *tx_ring; + + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + netif_napi_del(&sds_ring->napi); + } + + qlcnic_free_sds_rings(adapter->recv_ctx); + + if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + netif_napi_del(&tx_ring->napi); + } + } + + qlcnic_free_tx_rings(adapter); +} + +void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return; + + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + napi_enable(&sds_ring->napi); + qlcnic_enable_sds_intr(adapter, sds_ring); + } + + if (qlcnic_check_multi_tx(adapter) && + (adapter->flags & QLCNIC_MSIX_ENABLED) && + !adapter->ahw->diag_test) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + napi_enable(&tx_ring->napi); + qlcnic_enable_tx_intr(adapter, tx_ring); + } + } +} + +void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return; + + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + qlcnic_disable_sds_intr(adapter, sds_ring); + napi_synchronize(&sds_ring->napi); + napi_disable(&sds_ring->napi); + } + + if ((adapter->flags & QLCNIC_MSIX_ENABLED) && + !adapter->ahw->diag_test && + qlcnic_check_multi_tx(adapter)) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + qlcnic_disable_tx_intr(adapter, tx_ring); + napi_synchronize(&tx_ring->napi); + napi_disable(&tx_ring->napi); + } + } +} + +#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36) +#define QLC_83XX_LRO_LB_PKT (1ULL << 46) + +static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt) +{ + if (lro_pkt) + return (sts_data & QLC_83XX_LRO_LB_PKT) ? 1 : 0; + else + return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0; +} + +#define QLCNIC_ENCAP_LENGTH_MASK 0x7f + +static inline u8 qlcnic_encap_length(u64 sts_data) +{ + return sts_data & QLCNIC_ENCAP_LENGTH_MASK; +} + +static struct qlcnic_rx_buffer * +qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter, + struct qlcnic_host_sds_ring *sds_ring, + u8 ring, u64 sts_data[]) +{ + struct net_device *netdev = adapter->netdev; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_rx_buffer *buffer; + struct sk_buff *skb; + struct qlcnic_host_rds_ring *rds_ring; + int index, length, cksum, is_lb_pkt; + u16 vid = 0xffff; + int err; + + if (unlikely(ring >= adapter->max_rds_rings)) + return NULL; + + rds_ring = &recv_ctx->rds_rings[ring]; + + index = qlcnic_83xx_hndl(sts_data[0]); + if (unlikely(index >= rds_ring->num_desc)) + return NULL; + + buffer = &rds_ring->rx_buf_arr[index]; + length = qlcnic_83xx_pktln(sts_data[0]); + cksum = qlcnic_83xx_csum_status(sts_data[1]); + skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); + if (!skb) + return buffer; + + if (length > rds_ring->skb_size) + skb_put(skb, rds_ring->skb_size); + else + skb_put(skb, length); + + err = qlcnic_check_rx_tagging(adapter, skb, &vid); + + if (adapter->rx_mac_learn) { + is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0); + qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid); + } + + if (unlikely(err)) { + adapter->stats.rxdropped++; + dev_kfree_skb(skb); + return buffer; + } + + skb->protocol = eth_type_trans(skb, netdev); + + if (qlcnic_encap_length(sts_data[1]) && + skb->ip_summed == CHECKSUM_UNNECESSARY) { + skb->csum_level = 1; + adapter->stats.encap_rx_csummed++; + } + + if (vid != 0xffff) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + + napi_gro_receive(&sds_ring->napi, skb); + + adapter->stats.rx_pkts++; + adapter->stats.rxbytes += length; + + return buffer; +} + +static struct qlcnic_rx_buffer * +qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter, + u8 ring, u64 sts_data[]) +{ + struct net_device *netdev = adapter->netdev; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_rx_buffer *buffer; + struct sk_buff *skb; + struct qlcnic_host_rds_ring *rds_ring; + struct iphdr *iph; + struct ipv6hdr *ipv6h; + struct tcphdr *th; + bool push; + int l2_hdr_offset, l4_hdr_offset; + int index, is_lb_pkt; + u16 lro_length, length, data_offset, gso_size; + u16 vid = 0xffff; + int err; + + if (unlikely(ring >= adapter->max_rds_rings)) + return NULL; + + rds_ring = &recv_ctx->rds_rings[ring]; + + index = qlcnic_83xx_hndl(sts_data[0]); + if (unlikely(index >= rds_ring->num_desc)) + return NULL; + + buffer = &rds_ring->rx_buf_arr[index]; + + lro_length = qlcnic_83xx_lro_pktln(sts_data[0]); + l2_hdr_offset = qlcnic_83xx_l2_hdr_off(sts_data[1]); + l4_hdr_offset = qlcnic_83xx_l4_hdr_off(sts_data[1]); + push = qlcnic_83xx_is_psh_bit(sts_data[1]); + + skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); + if (!skb) + return buffer; + + if (qlcnic_83xx_is_tstamp(sts_data[1])) + data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE; + else + data_offset = l4_hdr_offset + QLCNIC_TCP_HDR_SIZE; + + skb_put(skb, lro_length + data_offset); + skb_pull(skb, l2_hdr_offset); + + err = qlcnic_check_rx_tagging(adapter, skb, &vid); + + if (adapter->rx_mac_learn) { + is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1); + qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid); + } + + if (unlikely(err)) { + adapter->stats.rxdropped++; + dev_kfree_skb(skb); + return buffer; + } + + skb->protocol = eth_type_trans(skb, netdev); + if (ntohs(skb->protocol) == ETH_P_IPV6) { + ipv6h = (struct ipv6hdr *)skb->data; + th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr)); + + length = (th->doff << 2) + lro_length; + ipv6h->payload_len = htons(length); + } else { + iph = (struct iphdr *)skb->data; + th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); + length = (iph->ihl << 2) + (th->doff << 2) + lro_length; + csum_replace2(&iph->check, iph->tot_len, htons(length)); + iph->tot_len = htons(length); + } + + th->psh = push; + length = skb->len; + + if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) { + gso_size = qlcnic_83xx_get_lro_sts_mss(sts_data[0]); + skb_shinfo(skb)->gso_size = gso_size; + if (skb->protocol == htons(ETH_P_IPV6)) + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + else + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + } + + if (vid != 0xffff) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + + netif_receive_skb(skb); + + adapter->stats.lro_pkts++; + adapter->stats.lrobytes += length; + return buffer; +} + +static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, + int max) +{ + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_adapter *adapter = sds_ring->adapter; + struct list_head *cur; + struct status_desc *desc; + struct qlcnic_rx_buffer *rxbuf = NULL; + u8 ring; + u64 sts_data[2]; + int count = 0, opcode; + u32 consumer = sds_ring->consumer; + + while (count < max) { + desc = &sds_ring->desc_head[consumer]; + sts_data[1] = le64_to_cpu(desc->status_desc_data[1]); + opcode = qlcnic_83xx_opcode(sts_data[1]); + if (!opcode) + break; + sts_data[0] = le64_to_cpu(desc->status_desc_data[0]); + ring = QLCNIC_FETCH_RING_ID(sts_data[0]); + + switch (opcode) { + case QLC_83XX_REG_DESC: + rxbuf = qlcnic_83xx_process_rcv(adapter, sds_ring, + ring, sts_data); + break; + case QLC_83XX_LRO_DESC: + rxbuf = qlcnic_83xx_process_lro(adapter, ring, + sts_data); + break; + default: + dev_info(&adapter->pdev->dev, + "Unknown opcode: 0x%x\n", opcode); + goto skip; + } + + if (likely(rxbuf)) + list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); + else + adapter->stats.null_rxbuf++; +skip: + desc = &sds_ring->desc_head[consumer]; + /* Reset the descriptor */ + desc->status_desc_data[1] = 0; + consumer = get_next_index(consumer, sds_ring->num_desc); + count++; + } + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &adapter->recv_ctx->rds_rings[ring]; + if (!list_empty(&sds_ring->free_list[ring])) { + list_for_each(cur, &sds_ring->free_list[ring]) { + rxbuf = list_entry(cur, struct qlcnic_rx_buffer, + list); + qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf); + } + spin_lock(&rds_ring->lock); + list_splice_tail_init(&sds_ring->free_list[ring], + &rds_ring->free_list); + spin_unlock(&rds_ring->lock); + } + qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring); + } + if (count) { + sds_ring->consumer = consumer; + writel(consumer, sds_ring->crb_sts_consumer); + } + return count; +} + +static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget) +{ + int tx_complete; + int work_done; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_adapter *adapter; + struct qlcnic_host_tx_ring *tx_ring; + + sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); + adapter = sds_ring->adapter; + /* tx ring count = 1 */ + tx_ring = adapter->tx_ring; + + tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); + work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); + + /* Check if we need a repoll */ + if (!tx_complete) + work_done = budget; + + if (work_done < budget) { + napi_complete(&sds_ring->napi); + qlcnic_enable_sds_intr(adapter, sds_ring); + } + + return work_done; +} + +static int qlcnic_83xx_poll(struct napi_struct *napi, int budget) +{ + int tx_complete; + int work_done; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_adapter *adapter; + struct qlcnic_host_tx_ring *tx_ring; + + sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); + adapter = sds_ring->adapter; + /* tx ring count = 1 */ + tx_ring = adapter->tx_ring; + + tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); + work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); + + /* Check if we need a repoll */ + if (!tx_complete) + work_done = budget; + + if (work_done < budget) { + napi_complete(&sds_ring->napi); + qlcnic_enable_sds_intr(adapter, sds_ring); + } + + return work_done; +} + +static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget) +{ + int work_done; + struct qlcnic_host_tx_ring *tx_ring; + struct qlcnic_adapter *adapter; + + budget = QLCNIC_TX_POLL_BUDGET; + tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); + adapter = tx_ring->adapter; + work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); + if (work_done) { + napi_complete(&tx_ring->napi); + if (test_bit(__QLCNIC_DEV_UP , &adapter->state)) + qlcnic_enable_tx_intr(adapter, tx_ring); + } else { + /* need a repoll */ + work_done = budget; + } + + return work_done; +} + +static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget) +{ + int work_done; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_adapter *adapter; + + sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); + adapter = sds_ring->adapter; + work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget); + if (work_done < budget) { + napi_complete(&sds_ring->napi); + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + qlcnic_enable_sds_intr(adapter, sds_ring); + } + + return work_done; +} + +void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return; + + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + napi_enable(&sds_ring->napi); + if (adapter->flags & QLCNIC_MSIX_ENABLED) + qlcnic_enable_sds_intr(adapter, sds_ring); + } + + if ((adapter->flags & QLCNIC_MSIX_ENABLED) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + napi_enable(&tx_ring->napi); + qlcnic_enable_tx_intr(adapter, tx_ring); + } + } +} + +void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_host_tx_ring *tx_ring; + + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return; + + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + if (adapter->flags & QLCNIC_MSIX_ENABLED) + qlcnic_disable_sds_intr(adapter, sds_ring); + napi_synchronize(&sds_ring->napi); + napi_disable(&sds_ring->napi); + } + + if ((adapter->flags & QLCNIC_MSIX_ENABLED) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + qlcnic_disable_tx_intr(adapter, tx_ring); + napi_synchronize(&tx_ring->napi); + napi_disable(&tx_ring->napi); + } + } +} + +int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, + struct net_device *netdev) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings)) + return -ENOMEM; + + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + if (adapter->flags & QLCNIC_MSIX_ENABLED) { + if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) + netif_napi_add(netdev, &sds_ring->napi, + qlcnic_83xx_rx_poll, + NAPI_POLL_WEIGHT); + else + netif_napi_add(netdev, &sds_ring->napi, + qlcnic_83xx_msix_sriov_vf_poll, + NAPI_POLL_WEIGHT); + + } else { + netif_napi_add(netdev, &sds_ring->napi, + qlcnic_83xx_poll, + NAPI_POLL_WEIGHT); + } + } + + if (qlcnic_alloc_tx_rings(adapter, netdev)) { + qlcnic_free_sds_rings(recv_ctx); + return -ENOMEM; + } + + if ((adapter->flags & QLCNIC_MSIX_ENABLED) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + netif_napi_add(netdev, &tx_ring->napi, + qlcnic_83xx_msix_tx_poll, + NAPI_POLL_WEIGHT); + } + } + + return 0; +} + +void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_host_tx_ring *tx_ring; + + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + netif_napi_del(&sds_ring->napi); + } + + qlcnic_free_sds_rings(adapter->recv_ctx); + + if ((adapter->flags & QLCNIC_MSIX_ENABLED) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED)) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + netif_napi_del(&tx_ring->napi); + } + } + + qlcnic_free_tx_rings(adapter); +} + +static void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter, + int ring, u64 sts_data[]) +{ + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct sk_buff *skb; + struct qlcnic_host_rds_ring *rds_ring; + int index, length; + + if (unlikely(ring >= adapter->max_rds_rings)) + return; + + rds_ring = &recv_ctx->rds_rings[ring]; + index = qlcnic_83xx_hndl(sts_data[0]); + if (unlikely(index >= rds_ring->num_desc)) + return; + + length = qlcnic_83xx_pktln(sts_data[0]); + + skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); + if (!skb) + return; + + if (length > rds_ring->skb_size) + skb_put(skb, rds_ring->skb_size); + else + skb_put(skb, length); + + if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr)) + adapter->ahw->diag_cnt++; + else + dump_skb(skb, adapter); + + dev_kfree_skb_any(skb); + return; +} + +void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) +{ + struct qlcnic_adapter *adapter = sds_ring->adapter; + struct status_desc *desc; + u64 sts_data[2]; + int ring, opcode; + u32 consumer = sds_ring->consumer; + + desc = &sds_ring->desc_head[consumer]; + sts_data[0] = le64_to_cpu(desc->status_desc_data[0]); + sts_data[1] = le64_to_cpu(desc->status_desc_data[1]); + opcode = qlcnic_83xx_opcode(sts_data[1]); + if (!opcode) + return; + + ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0])); + qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data); + desc = &sds_ring->desc_head[consumer]; + desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM); + consumer = get_next_index(consumer, sds_ring->num_desc); + sds_ring->consumer = consumer; + writel(consumer, sds_ring->crb_sts_consumer); +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c new file mode 100644 index 000000000..49c525185 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -0,0 +1,4341 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include +#include + +#include "qlcnic.h" +#include "qlcnic_sriov.h" +#include "qlcnic_hw.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_QLCNIC_VXLAN +#include +#endif + +MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(QLCNIC_LINUX_VERSIONID); +/*(DEBLOBBED)*/ + +char qlcnic_driver_name[] = "qlcnic"; +static const char qlcnic_driver_string[] = "QLogic 1/10 GbE " + "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID; + +static int qlcnic_mac_learn; +module_param(qlcnic_mac_learn, int, 0444); +MODULE_PARM_DESC(qlcnic_mac_learn, + "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)"); + +int qlcnic_use_msi = 1; +MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled)"); +module_param_named(use_msi, qlcnic_use_msi, int, 0444); + +int qlcnic_use_msi_x = 1; +MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled)"); +module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444); + +int qlcnic_auto_fw_reset = 1; +MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)"); +module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644); + +int qlcnic_load_fw_file; +MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file, 2=POST in fast mode, 3= POST in medium mode, 4=POST in slow mode)"); +module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444); + +static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void qlcnic_remove(struct pci_dev *pdev); +static int qlcnic_open(struct net_device *netdev); +static int qlcnic_close(struct net_device *netdev); +static void qlcnic_tx_timeout(struct net_device *netdev); +static void qlcnic_attach_work(struct work_struct *work); +static void qlcnic_fwinit_work(struct work_struct *work); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void qlcnic_poll_controller(struct net_device *netdev); +#endif + +static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding); +static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter); + +static irqreturn_t qlcnic_tmp_intr(int irq, void *data); +static irqreturn_t qlcnic_intr(int irq, void *data); +static irqreturn_t qlcnic_msi_intr(int irq, void *data); +static irqreturn_t qlcnic_msix_intr(int irq, void *data); +static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data); + +static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev); +static int qlcnic_start_firmware(struct qlcnic_adapter *); + +static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter); +static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *); +static int qlcnicvf_start_firmware(struct qlcnic_adapter *); +static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16); +static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16); + +static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *); +static void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32); +static irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *); +static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *); +static int qlcnic_82xx_start_firmware(struct qlcnic_adapter *); +static void qlcnic_82xx_io_resume(struct pci_dev *); +static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *); +static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *, + pci_channel_state_t); +static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X) + return ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX; + else + return 1; +} + +/* PCI Device ID Table */ +#define ENTRY(device) \ + {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \ + .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} + +static const struct pci_device_id qlcnic_pci_tbl[] = { + ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X), + ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X), + ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830), + ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X), + ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X), + ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X), + {0,} +}; + +MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl); + + +inline void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *tx_ring) +{ + writel(tx_ring->producer, tx_ring->crb_cmd_producer); +} + +static const u32 msi_tgt_status[8] = { + ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, + ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, + ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, + ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 +}; + +static const u32 qlcnic_reg_tbl[] = { + 0x1B20A8, /* PEG_HALT_STAT1 */ + 0x1B20AC, /* PEG_HALT_STAT2 */ + 0x1B20B0, /* FW_HEARTBEAT */ + 0x1B2100, /* LOCK ID */ + 0x1B2128, /* FW_CAPABILITIES */ + 0x1B2138, /* drv active */ + 0x1B2140, /* dev state */ + 0x1B2144, /* drv state */ + 0x1B2148, /* drv scratch */ + 0x1B214C, /* dev partition info */ + 0x1B2174, /* drv idc ver */ + 0x1B2150, /* fw version major */ + 0x1B2154, /* fw version minor */ + 0x1B2158, /* fw version sub */ + 0x1B219C, /* npar state */ + 0x1B21FC, /* FW_IMG_VALID */ + 0x1B2250, /* CMD_PEG_STATE */ + 0x1B233C, /* RCV_PEG_STATE */ + 0x1B23B4, /* ASIC TEMP */ + 0x1B216C, /* FW api */ + 0x1B2170, /* drv op mode */ + 0x13C010, /* flash lock */ + 0x13C014, /* flash unlock */ +}; + +static const struct qlcnic_board_info qlcnic_boards[] = { + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE844X, + 0x0, + 0x0, + "8400 series 10GbE Converged Network Adapter (TCP/IP Networking)" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE834X, + PCI_VENDOR_ID_QLOGIC, + 0x24e, + "8300 Series Dual Port 10GbE Converged Network Adapter " + "(TCP/IP Networking)" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE834X, + PCI_VENDOR_ID_QLOGIC, + 0x243, + "8300 Series Single Port 10GbE Converged Network Adapter " + "(TCP/IP Networking)" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE834X, + PCI_VENDOR_ID_QLOGIC, + 0x24a, + "8300 Series Dual Port 10GbE Converged Network Adapter " + "(TCP/IP Networking)" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE834X, + PCI_VENDOR_ID_QLOGIC, + 0x246, + "8300 Series Dual Port 10GbE Converged Network Adapter " + "(TCP/IP Networking)" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE834X, + PCI_VENDOR_ID_QLOGIC, + 0x252, + "8300 Series Dual Port 10GbE Converged Network Adapter " + "(TCP/IP Networking)" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE834X, + PCI_VENDOR_ID_QLOGIC, + 0x26e, + "8300 Series Dual Port 10GbE Converged Network Adapter " + "(TCP/IP Networking)" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE834X, + PCI_VENDOR_ID_QLOGIC, + 0x260, + "8300 Series Dual Port 10GbE Converged Network Adapter " + "(TCP/IP Networking)" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE834X, + PCI_VENDOR_ID_QLOGIC, + 0x266, + "8300 Series Single Port 10GbE Converged Network Adapter " + "(TCP/IP Networking)" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE834X, + PCI_VENDOR_ID_QLOGIC, + 0x269, + "8300 Series Dual Port 10GbE Converged Network Adapter " + "(TCP/IP Networking)" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE834X, + PCI_VENDOR_ID_QLOGIC, + 0x271, + "8300 Series Dual Port 10GbE Converged Network Adapter " + "(TCP/IP Networking)" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE834X, + 0x0, 0x0, "8300 Series 1/10GbE Controller" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE8830, + 0x0, + 0x0, + "8830 Series 1/10GbE Controller" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE824X, + PCI_VENDOR_ID_QLOGIC, + 0x203, + "8200 Series Single Port 10GbE Converged Network Adapter" + "(TCP/IP Networking)" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE824X, + PCI_VENDOR_ID_QLOGIC, + 0x207, + "8200 Series Dual Port 10GbE Converged Network Adapter" + "(TCP/IP Networking)" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE824X, + PCI_VENDOR_ID_QLOGIC, + 0x20b, + "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE824X, + PCI_VENDOR_ID_QLOGIC, + 0x20c, + "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE824X, + PCI_VENDOR_ID_QLOGIC, + 0x20f, + "3200 Series Single Port 10Gb Intelligent Ethernet Adapter" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE824X, + 0x103c, 0x3733, + "NC523SFP 10Gb 2-port Server Adapter" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE824X, + 0x103c, 0x3346, + "CN1000Q Dual Port Converged Network Adapter" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE824X, + PCI_VENDOR_ID_QLOGIC, + 0x210, + "QME8242-k 10GbE Dual Port Mezzanine Card" }, + { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE824X, + 0x0, 0x0, "cLOM8214 1/10GbE Controller" }, +}; + +#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards) + +static const +struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG; + +int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count) +{ + int size = sizeof(struct qlcnic_host_sds_ring) * count; + + recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL); + + return recv_ctx->sds_rings == NULL; +} + +void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx) +{ + kfree(recv_ctx->sds_rings); + recv_ctx->sds_rings = NULL; +} + +int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + u8 mac_addr[ETH_ALEN]; + int ret; + + ret = qlcnic_get_mac_address(adapter, mac_addr, + adapter->ahw->pci_func); + if (ret) + return ret; + + memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); + memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); + + /* set station address */ + + if (!is_valid_ether_addr(netdev->dev_addr)) + dev_warn(&pdev->dev, "Bad MAC address %pM.\n", + netdev->dev_addr); + + return 0; +} + +static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter) +{ + struct qlcnic_mac_vlan_list *cur; + struct list_head *head; + + list_for_each(head, &adapter->mac_list) { + cur = list_entry(head, struct qlcnic_mac_vlan_list, list); + if (ether_addr_equal_unaligned(adapter->mac_addr, cur->mac_addr)) { + qlcnic_sre_macaddr_change(adapter, cur->mac_addr, + 0, QLCNIC_MAC_DEL); + list_del(&cur->list); + kfree(cur); + return; + } + } +} + +static int qlcnic_set_mac(struct net_device *netdev, void *p) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (qlcnic_sriov_vf_check(adapter)) + return -EINVAL; + + if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED)) + return -EOPNOTSUPP; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + if (ether_addr_equal_unaligned(adapter->mac_addr, addr->sa_data)) + return 0; + + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + netif_device_detach(netdev); + qlcnic_napi_disable(adapter); + } + + qlcnic_delete_adapter_mac(adapter); + memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + qlcnic_set_multi(adapter->netdev); + + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + netif_device_attach(netdev); + qlcnic_napi_enable(adapter); + } + return 0; +} + +static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *netdev, + const unsigned char *addr, u16 vid) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int err = -EOPNOTSUPP; + + if (!adapter->fdb_mac_learn) + return ndo_dflt_fdb_del(ndm, tb, netdev, addr, vid); + + if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) || + qlcnic_sriov_check(adapter)) { + if (is_unicast_ether_addr(addr)) { + err = dev_uc_del(netdev, addr); + if (!err) + err = qlcnic_nic_del_mac(adapter, addr); + } else if (is_multicast_ether_addr(addr)) { + err = dev_mc_del(netdev, addr); + } else { + err = -EINVAL; + } + } + return err; +} + +static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *netdev, + const unsigned char *addr, u16 vid, u16 flags) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int err = 0; + + if (!adapter->fdb_mac_learn) + return ndo_dflt_fdb_add(ndm, tb, netdev, addr, vid, flags); + + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) && + !qlcnic_sriov_check(adapter)) { + pr_info("%s: FDB e-switch is not enabled\n", __func__); + return -EOPNOTSUPP; + } + + if (ether_addr_equal(addr, adapter->mac_addr)) + return err; + + if (is_unicast_ether_addr(addr)) { + if (netdev_uc_count(netdev) < adapter->ahw->max_uc_count) + err = dev_uc_add_excl(netdev, addr); + else + err = -ENOMEM; + } else if (is_multicast_ether_addr(addr)) { + err = dev_mc_add_excl(netdev, addr); + } else { + err = -EINVAL; + } + + return err; +} + +static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb, + struct net_device *netdev, + struct net_device *filter_dev, int idx) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + if (!adapter->fdb_mac_learn) + return ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx); + + if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) || + qlcnic_sriov_check(adapter)) + idx = ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx); + + return idx; +} + +static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter) +{ + while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + usleep_range(10000, 11000); + + if (!adapter->fw_work.work.func) + return; + + cancel_delayed_work_sync(&adapter->fw_work); +} + +static int qlcnic_get_phys_port_id(struct net_device *netdev, + struct netdev_phys_item_id *ppid) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (!(adapter->flags & QLCNIC_HAS_PHYS_PORT_ID)) + return -EOPNOTSUPP; + + ppid->id_len = sizeof(ahw->phys_port_id); + memcpy(ppid->id, ahw->phys_port_id, ppid->id_len); + + return 0; +} + +#ifdef CONFIG_QLCNIC_VXLAN +static void qlcnic_add_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_hardware_context *ahw = adapter->ahw; + + /* Adapter supports only one VXLAN port. Use very first port + * for enabling offload + */ + if (!qlcnic_encap_rx_offload(adapter) || ahw->vxlan_port) + return; + + ahw->vxlan_port = ntohs(port); + adapter->flags |= QLCNIC_ADD_VXLAN_PORT; +} + +static void qlcnic_del_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port || + (ahw->vxlan_port != ntohs(port))) + return; + + adapter->flags |= QLCNIC_DEL_VXLAN_PORT; +} + +static netdev_features_t qlcnic_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + features = vlan_features_check(skb, features); + return vxlan_features_check(skb, features); +} +#endif + +static const struct net_device_ops qlcnic_netdev_ops = { + .ndo_open = qlcnic_open, + .ndo_stop = qlcnic_close, + .ndo_start_xmit = qlcnic_xmit_frame, + .ndo_get_stats = qlcnic_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = qlcnic_set_multi, + .ndo_set_mac_address = qlcnic_set_mac, + .ndo_change_mtu = qlcnic_change_mtu, + .ndo_fix_features = qlcnic_fix_features, + .ndo_set_features = qlcnic_set_features, + .ndo_tx_timeout = qlcnic_tx_timeout, + .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add, + .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del, + .ndo_fdb_add = qlcnic_fdb_add, + .ndo_fdb_del = qlcnic_fdb_del, + .ndo_fdb_dump = qlcnic_fdb_dump, + .ndo_get_phys_port_id = qlcnic_get_phys_port_id, +#ifdef CONFIG_QLCNIC_VXLAN + .ndo_add_vxlan_port = qlcnic_add_vxlan_port, + .ndo_del_vxlan_port = qlcnic_del_vxlan_port, + .ndo_features_check = qlcnic_features_check, +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = qlcnic_poll_controller, +#endif +#ifdef CONFIG_QLCNIC_SRIOV + .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac, + .ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate, + .ndo_get_vf_config = qlcnic_sriov_get_vf_config, + .ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan, + .ndo_set_vf_spoofchk = qlcnic_sriov_set_vf_spoofchk, +#endif +}; + +static const struct net_device_ops qlcnic_netdev_failed_ops = { + .ndo_open = qlcnic_open, +}; + +static struct qlcnic_nic_template qlcnic_ops = { + .config_bridged_mode = qlcnic_config_bridged_mode, + .config_led = qlcnic_82xx_config_led, + .start_firmware = qlcnic_82xx_start_firmware, + .request_reset = qlcnic_82xx_dev_request_reset, + .cancel_idc_work = qlcnic_82xx_cancel_idc_work, + .napi_add = qlcnic_82xx_napi_add, + .napi_del = qlcnic_82xx_napi_del, + .config_ipaddr = qlcnic_82xx_config_ipaddr, + .shutdown = qlcnic_82xx_shutdown, + .resume = qlcnic_82xx_resume, + .clear_legacy_intr = qlcnic_82xx_clear_legacy_intr, +}; + +struct qlcnic_nic_template qlcnic_vf_ops = { + .config_bridged_mode = qlcnicvf_config_bridged_mode, + .config_led = qlcnicvf_config_led, + .start_firmware = qlcnicvf_start_firmware +}; + +static struct qlcnic_hardware_ops qlcnic_hw_ops = { + .read_crb = qlcnic_82xx_read_crb, + .write_crb = qlcnic_82xx_write_crb, + .read_reg = qlcnic_82xx_hw_read_wx_2M, + .write_reg = qlcnic_82xx_hw_write_wx_2M, + .get_mac_address = qlcnic_82xx_get_mac_address, + .setup_intr = qlcnic_82xx_setup_intr, + .alloc_mbx_args = qlcnic_82xx_alloc_mbx_args, + .mbx_cmd = qlcnic_82xx_issue_cmd, + .get_func_no = qlcnic_82xx_get_func_no, + .api_lock = qlcnic_82xx_api_lock, + .api_unlock = qlcnic_82xx_api_unlock, + .add_sysfs = qlcnic_82xx_add_sysfs, + .remove_sysfs = qlcnic_82xx_remove_sysfs, + .process_lb_rcv_ring_diag = qlcnic_82xx_process_rcv_ring_diag, + .create_rx_ctx = qlcnic_82xx_fw_cmd_create_rx_ctx, + .create_tx_ctx = qlcnic_82xx_fw_cmd_create_tx_ctx, + .del_rx_ctx = qlcnic_82xx_fw_cmd_del_rx_ctx, + .del_tx_ctx = qlcnic_82xx_fw_cmd_del_tx_ctx, + .setup_link_event = qlcnic_82xx_linkevent_request, + .get_nic_info = qlcnic_82xx_get_nic_info, + .get_pci_info = qlcnic_82xx_get_pci_info, + .set_nic_info = qlcnic_82xx_set_nic_info, + .change_macvlan = qlcnic_82xx_sre_macaddr_change, + .napi_enable = qlcnic_82xx_napi_enable, + .napi_disable = qlcnic_82xx_napi_disable, + .config_intr_coal = qlcnic_82xx_config_intr_coalesce, + .config_rss = qlcnic_82xx_config_rss, + .config_hw_lro = qlcnic_82xx_config_hw_lro, + .config_loopback = qlcnic_82xx_set_lb_mode, + .clear_loopback = qlcnic_82xx_clear_lb_mode, + .config_promisc_mode = qlcnic_82xx_nic_set_promisc, + .change_l2_filter = qlcnic_82xx_change_filter, + .get_board_info = qlcnic_82xx_get_board_info, + .set_mac_filter_count = qlcnic_82xx_set_mac_filter_count, + .free_mac_list = qlcnic_82xx_free_mac_list, + .read_phys_port_id = qlcnic_82xx_read_phys_port_id, + .io_error_detected = qlcnic_82xx_io_error_detected, + .io_slot_reset = qlcnic_82xx_io_slot_reset, + .io_resume = qlcnic_82xx_io_resume, + .get_beacon_state = qlcnic_82xx_get_beacon_state, + .enable_sds_intr = qlcnic_82xx_enable_sds_intr, + .disable_sds_intr = qlcnic_82xx_disable_sds_intr, + .enable_tx_intr = qlcnic_82xx_enable_tx_intr, + .disable_tx_intr = qlcnic_82xx_disable_tx_intr, + .get_saved_state = qlcnic_82xx_get_saved_state, + .set_saved_state = qlcnic_82xx_set_saved_state, + .cache_tmpl_hdr_values = qlcnic_82xx_cache_tmpl_hdr_values, + .get_cap_size = qlcnic_82xx_get_cap_size, + .set_sys_info = qlcnic_82xx_set_sys_info, + .store_cap_mask = qlcnic_82xx_store_cap_mask, +}; + +static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + + if (qlcnic_82xx_check(adapter) && + (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_MULTI_TX)) { + test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state); + return 0; + } else { + return 1; + } +} + +static int qlcnic_max_rings(struct qlcnic_adapter *adapter, u8 ring_cnt, + int queue_type) +{ + int num_rings, max_rings = QLCNIC_MAX_SDS_RINGS; + + if (queue_type == QLCNIC_RX_QUEUE) + max_rings = adapter->max_sds_rings; + else if (queue_type == QLCNIC_TX_QUEUE) + max_rings = adapter->max_tx_rings; + + num_rings = rounddown_pow_of_two(min_t(int, num_online_cpus(), + max_rings)); + + if (ring_cnt > num_rings) + return num_rings; + else + return ring_cnt; +} + +void qlcnic_set_tx_ring_count(struct qlcnic_adapter *adapter, u8 tx_cnt) +{ + /* 83xx adapter does not have max_tx_rings intialized in probe */ + if (adapter->max_tx_rings) + adapter->drv_tx_rings = qlcnic_max_rings(adapter, tx_cnt, + QLCNIC_TX_QUEUE); + else + adapter->drv_tx_rings = tx_cnt; +} + +void qlcnic_set_sds_ring_count(struct qlcnic_adapter *adapter, u8 rx_cnt) +{ + /* 83xx adapter does not have max_sds_rings intialized in probe */ + if (adapter->max_sds_rings) + adapter->drv_sds_rings = qlcnic_max_rings(adapter, rx_cnt, + QLCNIC_RX_QUEUE); + else + adapter->drv_sds_rings = rx_cnt; +} + +int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int num_msix = 0, err = 0, vector; + + adapter->flags &= ~QLCNIC_TSS_RSS; + + if (adapter->drv_tss_rings > 0) + num_msix += adapter->drv_tss_rings; + else + num_msix += adapter->drv_tx_rings; + + if (adapter->drv_rss_rings > 0) + num_msix += adapter->drv_rss_rings; + else + num_msix += adapter->drv_sds_rings; + + if (qlcnic_83xx_check(adapter)) + num_msix += 1; + + if (!adapter->msix_entries) { + adapter->msix_entries = kcalloc(num_msix, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + } + + for (vector = 0; vector < num_msix; vector++) + adapter->msix_entries[vector].entry = vector; + +restore: + err = pci_enable_msix_exact(pdev, adapter->msix_entries, num_msix); + if (err == -ENOSPC) { + if (!adapter->drv_tss_rings && !adapter->drv_rss_rings) + return err; + + netdev_info(adapter->netdev, + "Unable to allocate %d MSI-X vectors, Available vectors %d\n", + num_msix, err); + + num_msix = adapter->drv_tx_rings + adapter->drv_sds_rings; + + /* Set rings to 0 so we can restore original TSS/RSS count */ + adapter->drv_tss_rings = 0; + adapter->drv_rss_rings = 0; + + if (qlcnic_83xx_check(adapter)) + num_msix += 1; + + netdev_info(adapter->netdev, + "Restoring %d Tx, %d SDS rings for total %d vectors.\n", + adapter->drv_tx_rings, adapter->drv_sds_rings, + num_msix); + + goto restore; + } else if (err < 0) { + return err; + } + + adapter->ahw->num_msix = num_msix; + if (adapter->drv_tss_rings > 0) + adapter->drv_tx_rings = adapter->drv_tss_rings; + + if (adapter->drv_rss_rings > 0) + adapter->drv_sds_rings = adapter->drv_rss_rings; + + return 0; +} + +int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) +{ + struct pci_dev *pdev = adapter->pdev; + int err, vector; + + if (!adapter->msix_entries) { + adapter->msix_entries = kcalloc(num_msix, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + } + + adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED); + + if (adapter->ahw->msix_supported) { +enable_msix: + for (vector = 0; vector < num_msix; vector++) + adapter->msix_entries[vector].entry = vector; + + err = pci_enable_msix_range(pdev, + adapter->msix_entries, 1, num_msix); + + if (err == num_msix) { + adapter->flags |= QLCNIC_MSIX_ENABLED; + adapter->ahw->num_msix = num_msix; + dev_info(&pdev->dev, "using msi-x interrupts\n"); + return 0; + } else if (err > 0) { + pci_disable_msix(pdev); + + dev_info(&pdev->dev, + "Unable to allocate %d MSI-X vectors, Available vectors %d\n", + num_msix, err); + + if (qlcnic_82xx_check(adapter)) { + num_msix = rounddown_pow_of_two(err); + if (err < QLCNIC_82XX_MINIMUM_VECTOR) + return -ENOSPC; + } else { + num_msix = rounddown_pow_of_two(err - 1); + num_msix += 1; + if (err < QLCNIC_83XX_MINIMUM_VECTOR) + return -ENOSPC; + } + + if (qlcnic_82xx_check(adapter) && + !qlcnic_check_multi_tx(adapter)) { + adapter->drv_sds_rings = num_msix; + adapter->drv_tx_rings = QLCNIC_SINGLE_RING; + } else { + /* Distribute vectors equally */ + adapter->drv_tx_rings = num_msix / 2; + adapter->drv_sds_rings = adapter->drv_tx_rings; + } + + if (num_msix) { + dev_info(&pdev->dev, + "Trying to allocate %d MSI-X interrupt vectors\n", + num_msix); + goto enable_msix; + } + } else { + dev_info(&pdev->dev, + "Unable to allocate %d MSI-X vectors, err=%d\n", + num_msix, err); + return err; + } + } + + return -EIO; +} + +static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter) +{ + int num_msix; + + num_msix = adapter->drv_sds_rings; + + if (qlcnic_check_multi_tx(adapter)) + num_msix += adapter->drv_tx_rings; + else + num_msix += QLCNIC_SINGLE_RING; + + return num_msix; +} + +static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter) +{ + int err = 0; + u32 offset, mask_reg; + const struct qlcnic_legacy_intr_set *legacy_intrp; + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct pci_dev *pdev = adapter->pdev; + + if (qlcnic_use_msi && !pci_enable_msi(pdev)) { + adapter->flags |= QLCNIC_MSI_ENABLED; + offset = msi_tgt_status[adapter->ahw->pci_func]; + adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter->ahw, + offset); + dev_info(&pdev->dev, "using msi interrupts\n"); + adapter->msix_entries[0].vector = pdev->irq; + return err; + } + + if (qlcnic_use_msi || qlcnic_use_msi_x) + return -EOPNOTSUPP; + + legacy_intrp = &legacy_intr[adapter->ahw->pci_func]; + adapter->ahw->int_vec_bit = legacy_intrp->int_vec_bit; + offset = legacy_intrp->tgt_status_reg; + adapter->tgt_status_reg = qlcnic_get_ioaddr(ahw, offset); + mask_reg = legacy_intrp->tgt_mask_reg; + adapter->tgt_mask_reg = qlcnic_get_ioaddr(ahw, mask_reg); + adapter->isr_int_vec = qlcnic_get_ioaddr(ahw, ISR_INT_VECTOR); + adapter->crb_int_state_reg = qlcnic_get_ioaddr(ahw, ISR_INT_STATE_REG); + dev_info(&pdev->dev, "using legacy interrupts\n"); + adapter->msix_entries[0].vector = pdev->irq; + return err; +} + +static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter) +{ + int num_msix, err = 0; + + if (adapter->flags & QLCNIC_TSS_RSS) { + err = qlcnic_setup_tss_rss_intr(adapter); + if (err < 0) + return err; + num_msix = adapter->ahw->num_msix; + } else { + num_msix = qlcnic_82xx_calculate_msix_vector(adapter); + + err = qlcnic_enable_msix(adapter, num_msix); + if (err == -ENOMEM) + return err; + + if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { + qlcnic_disable_multi_tx(adapter); + adapter->drv_sds_rings = QLCNIC_SINGLE_RING; + + err = qlcnic_enable_msi_legacy(adapter); + if (err) + return err; + } + } + + return 0; +} + +int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *adapter, int op_type) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int err, i; + + if (qlcnic_check_multi_tx(adapter) && + !ahw->diag_test && + (adapter->flags & QLCNIC_MSIX_ENABLED)) { + ahw->intr_tbl = vzalloc(ahw->num_msix * + sizeof(struct qlcnic_intrpt_config)); + if (!ahw->intr_tbl) + return -ENOMEM; + + for (i = 0; i < ahw->num_msix; i++) { + ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX; + ahw->intr_tbl[i].id = i; + ahw->intr_tbl[i].src = 0; + } + + err = qlcnic_82xx_config_intrpt(adapter, 1); + if (err) + dev_err(&adapter->pdev->dev, + "Failed to configure Interrupt for %d vector\n", + ahw->num_msix); + return err; + } + + return 0; +} + +void qlcnic_teardown_intr(struct qlcnic_adapter *adapter) +{ + if (adapter->flags & QLCNIC_MSIX_ENABLED) + pci_disable_msix(adapter->pdev); + if (adapter->flags & QLCNIC_MSI_ENABLED) + pci_disable_msi(adapter->pdev); + + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + if (adapter->ahw->intr_tbl) { + vfree(adapter->ahw->intr_tbl); + adapter->ahw->intr_tbl = NULL; + } +} + +static void qlcnic_cleanup_pci_map(struct qlcnic_hardware_context *ahw) +{ + if (ahw->pci_base0 != NULL) + iounmap(ahw->pci_base0); +} + +static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_pci_info *pci_info; + int ret; + + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) { + switch (ahw->port_type) { + case QLCNIC_GBE: + ahw->total_nic_func = QLCNIC_NIU_MAX_GBE_PORTS; + break; + case QLCNIC_XGBE: + ahw->total_nic_func = QLCNIC_NIU_MAX_XG_PORTS; + break; + } + return 0; + } + + if (ahw->op_mode == QLCNIC_MGMT_FUNC) + return 0; + + pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL); + if (!pci_info) + return -ENOMEM; + + ret = qlcnic_get_pci_info(adapter, pci_info); + kfree(pci_info); + return ret; +} + +static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter) +{ + bool ret = false; + + if (qlcnic_84xx_check(adapter)) { + ret = true; + } else if (qlcnic_83xx_check(adapter)) { + if (adapter->ahw->extra_capability[0] & + QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG) + ret = true; + else + ret = false; + } + + return ret; +} + +int qlcnic_init_pci_info(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_pci_info *pci_info; + int i, id = 0, ret = 0, j = 0; + u16 act_pci_func; + u8 pfn; + + pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL); + if (!pci_info) + return -ENOMEM; + + ret = qlcnic_get_pci_info(adapter, pci_info); + if (ret) + goto err_pci_info; + + act_pci_func = ahw->total_nic_func; + + adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) * + act_pci_func, GFP_KERNEL); + if (!adapter->npars) { + ret = -ENOMEM; + goto err_pci_info; + } + + adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) * + QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL); + if (!adapter->eswitch) { + ret = -ENOMEM; + goto err_npars; + } + + for (i = 0; i < ahw->max_vnic_func; i++) { + pfn = pci_info[i].id; + + if (pfn >= ahw->max_vnic_func) { + ret = QL_STATUS_INVALID_PARAM; + dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n", + __func__, pfn, ahw->max_vnic_func); + goto err_eswitch; + } + + if (!pci_info[i].active || + (pci_info[i].type != QLCNIC_TYPE_NIC)) + continue; + + if (qlcnic_port_eswitch_cfg_capability(adapter)) { + if (!qlcnic_83xx_set_port_eswitch_status(adapter, pfn, + &id)) + adapter->npars[j].eswitch_status = true; + else + continue; + } else { + adapter->npars[j].eswitch_status = true; + } + + adapter->npars[j].pci_func = pfn; + adapter->npars[j].active = (u8)pci_info[i].active; + adapter->npars[j].type = (u8)pci_info[i].type; + adapter->npars[j].phy_port = (u8)pci_info[i].default_port; + adapter->npars[j].min_bw = pci_info[i].tx_min_bw; + adapter->npars[j].max_bw = pci_info[i].tx_max_bw; + + memcpy(&adapter->npars[j].mac, &pci_info[i].mac, ETH_ALEN); + j++; + } + + /* Update eSwitch status for adapters without per port eSwitch + * configuration capability + */ + if (!qlcnic_port_eswitch_cfg_capability(adapter)) { + for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) + adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE; + } + + kfree(pci_info); + return 0; + +err_eswitch: + kfree(adapter->eswitch); + adapter->eswitch = NULL; +err_npars: + kfree(adapter->npars); + adapter->npars = NULL; +err_pci_info: + kfree(pci_info); + + return ret; +} + +static int +qlcnic_set_function_modes(struct qlcnic_adapter *adapter) +{ + u8 id; + int ret; + u32 data = QLCNIC_MGMT_FUNC; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + ret = qlcnic_api_lock(adapter); + if (ret) + goto err_lock; + + id = ahw->pci_func; + data = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE); + data = (data & ~QLC_DEV_SET_DRV(0xf, id)) | + QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, id); + QLC_SHARED_REG_WR32(adapter, QLCNIC_DRV_OP_MODE, data); + qlcnic_api_unlock(adapter); +err_lock: + return ret; +} + +static void qlcnic_check_vf(struct qlcnic_adapter *adapter, + const struct pci_device_id *ent) +{ + u32 op_mode, priv_level; + + /* Determine FW API version */ + adapter->ahw->fw_hal_version = QLC_SHARED_REG_RD32(adapter, + QLCNIC_FW_API); + + /* Find PCI function number */ + qlcnic_get_func_no(adapter); + + /* Determine function privilege level */ + op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE); + if (op_mode == QLC_DEV_DRV_DEFAULT) + priv_level = QLCNIC_MGMT_FUNC; + else + priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func); + + if (priv_level == QLCNIC_NON_PRIV_FUNC) { + adapter->ahw->op_mode = QLCNIC_NON_PRIV_FUNC; + dev_info(&adapter->pdev->dev, + "HAL Version: %d Non Privileged function\n", + adapter->ahw->fw_hal_version); + adapter->nic_ops = &qlcnic_vf_ops; + } else + adapter->nic_ops = &qlcnic_ops; +} + +#define QLCNIC_82XX_BAR0_LENGTH 0x00200000UL +#define QLCNIC_83XX_BAR0_LENGTH 0x4000 +static void qlcnic_get_bar_length(u32 dev_id, ulong *bar) +{ + switch (dev_id) { + case PCI_DEVICE_ID_QLOGIC_QLE824X: + *bar = QLCNIC_82XX_BAR0_LENGTH; + break; + case PCI_DEVICE_ID_QLOGIC_QLE834X: + case PCI_DEVICE_ID_QLOGIC_QLE8830: + case PCI_DEVICE_ID_QLOGIC_QLE844X: + case PCI_DEVICE_ID_QLOGIC_VF_QLE834X: + case PCI_DEVICE_ID_QLOGIC_VF_QLE844X: + *bar = QLCNIC_83XX_BAR0_LENGTH; + break; + default: + *bar = 0; + } +} + +static int qlcnic_setup_pci_map(struct pci_dev *pdev, + struct qlcnic_hardware_context *ahw) +{ + u32 offset; + void __iomem *mem_ptr0 = NULL; + unsigned long mem_len, pci_len0 = 0, bar0_len; + + /* remap phys address */ + mem_len = pci_resource_len(pdev, 0); + + qlcnic_get_bar_length(pdev->device, &bar0_len); + if (mem_len >= bar0_len) { + + mem_ptr0 = pci_ioremap_bar(pdev, 0); + if (mem_ptr0 == NULL) { + dev_err(&pdev->dev, "failed to map PCI bar 0\n"); + return -EIO; + } + pci_len0 = mem_len; + } else { + return -EIO; + } + + dev_info(&pdev->dev, "%dKB memory map\n", (int)(mem_len >> 10)); + + ahw->pci_base0 = mem_ptr0; + ahw->pci_len0 = pci_len0; + offset = QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(ahw->pci_func)); + qlcnic_get_ioaddr(ahw, offset); + + return 0; +} + +static bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter, + int index) +{ + struct pci_dev *pdev = adapter->pdev; + unsigned short subsystem_vendor; + bool ret = true; + + subsystem_vendor = pdev->subsystem_vendor; + + if (pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X || + pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X) { + if (qlcnic_boards[index].sub_vendor == subsystem_vendor && + qlcnic_boards[index].sub_device == pdev->subsystem_device) + ret = true; + else + ret = false; + } + + return ret; +} + +static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name) +{ + struct pci_dev *pdev = adapter->pdev; + int i, found = 0; + + for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) { + if (qlcnic_boards[i].vendor == pdev->vendor && + qlcnic_boards[i].device == pdev->device && + qlcnic_validate_subsystem_id(adapter, i)) { + found = 1; + break; + } + } + + if (!found) + sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr); + else + sprintf(name, "%pM: %s" , adapter->mac_addr, + qlcnic_boards[i].short_name); +} + +static void +qlcnic_check_options(struct qlcnic_adapter *adapter) +{ + int err; + u32 fw_major, fw_minor, fw_build, prev_fw_version; + struct pci_dev *pdev = adapter->pdev; + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump; + + prev_fw_version = adapter->fw_version; + + fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR); + fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR); + fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB); + + adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build); + + err = qlcnic_get_board_info(adapter); + if (err) { + dev_err(&pdev->dev, "Error getting board config info.\n"); + return; + } + if (ahw->op_mode != QLCNIC_NON_PRIV_FUNC) { + if (fw_dump->tmpl_hdr == NULL || + adapter->fw_version > prev_fw_version) { + vfree(fw_dump->tmpl_hdr); + if (!qlcnic_fw_cmd_get_minidump_temp(adapter)) + dev_info(&pdev->dev, + "Supports FW dump capability\n"); + } + } + + dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d\n", + QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build); + + if (adapter->ahw->port_type == QLCNIC_XGBE) { + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF; + adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF; + } else { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; + adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G; + } + + adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; + adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; + + } else if (adapter->ahw->port_type == QLCNIC_GBE) { + adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; + adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; + adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; + adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G; + } + + adapter->ahw->msix_supported = !!qlcnic_use_msi_x; + + adapter->num_txd = MAX_CMD_DESCRIPTORS; + + adapter->max_rds_rings = MAX_RDS_RINGS; +} + +static int +qlcnic_initialize_nic(struct qlcnic_adapter *adapter) +{ + struct qlcnic_info nic_info; + int err = 0; + + memset(&nic_info, 0, sizeof(struct qlcnic_info)); + err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func); + if (err) + return err; + + adapter->ahw->physical_port = (u8)nic_info.phys_port; + adapter->ahw->switch_mode = nic_info.switch_mode; + adapter->ahw->max_tx_ques = nic_info.max_tx_ques; + adapter->ahw->max_rx_ques = nic_info.max_rx_ques; + adapter->ahw->capabilities = nic_info.capabilities; + + if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) { + u32 temp; + temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2, &err); + if (err == -EIO) + return err; + adapter->ahw->extra_capability[0] = temp; + } else { + adapter->ahw->extra_capability[0] = 0; + } + + adapter->ahw->max_mac_filters = nic_info.max_mac_filters; + adapter->ahw->max_mtu = nic_info.max_mtu; + + if (adapter->ahw->capabilities & BIT_6) { + adapter->flags |= QLCNIC_ESWITCH_ENABLED; + adapter->ahw->nic_mode = QLCNIC_VNIC_MODE; + adapter->max_tx_rings = QLCNIC_MAX_HW_VNIC_TX_RINGS; + adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS; + + dev_info(&adapter->pdev->dev, "vNIC mode enabled.\n"); + } else { + adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE; + adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS; + adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; + adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; + } + + return err; +} + +void qlcnic_set_vlan_config(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg) +{ + if (esw_cfg->discard_tagged) + adapter->flags &= ~QLCNIC_TAGGING_ENABLED; + else + adapter->flags |= QLCNIC_TAGGING_ENABLED; + + if (esw_cfg->vlan_id) { + adapter->rx_pvid = esw_cfg->vlan_id; + adapter->tx_pvid = esw_cfg->vlan_id; + } else { + adapter->rx_pvid = 0; + adapter->tx_pvid = 0; + } +} + +static int +qlcnic_vlan_rx_add(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int err; + + if (qlcnic_sriov_vf_check(adapter)) { + err = qlcnic_sriov_cfg_vf_guest_vlan(adapter, vid, 1); + if (err) { + netdev_err(netdev, + "Cannot add VLAN filter for VLAN id %d, err=%d", + vid, err); + return err; + } + } + + set_bit(vid, adapter->vlans); + return 0; +} + +static int +qlcnic_vlan_rx_del(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int err; + + if (qlcnic_sriov_vf_check(adapter)) { + err = qlcnic_sriov_cfg_vf_guest_vlan(adapter, vid, 0); + if (err) { + netdev_err(netdev, + "Cannot delete VLAN filter for VLAN id %d, err=%d", + vid, err); + return err; + } + } + + qlcnic_restore_indev_addr(netdev, NETDEV_DOWN); + clear_bit(vid, adapter->vlans); + return 0; +} + +void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg) +{ + adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED | + QLCNIC_PROMISC_DISABLED); + + if (esw_cfg->mac_anti_spoof) + adapter->flags |= QLCNIC_MACSPOOF; + + if (!esw_cfg->mac_override) + adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED; + + if (!esw_cfg->promisc_mode) + adapter->flags |= QLCNIC_PROMISC_DISABLED; +} + +int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter) +{ + struct qlcnic_esw_func_cfg esw_cfg; + + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) + return 0; + + esw_cfg.pci_func = adapter->ahw->pci_func; + if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg)) + return -EIO; + qlcnic_set_vlan_config(adapter, &esw_cfg); + qlcnic_set_eswitch_port_features(adapter, &esw_cfg); + qlcnic_set_netdev_features(adapter, &esw_cfg); + + return 0; +} + +void qlcnic_set_netdev_features(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg) +{ + struct net_device *netdev = adapter->netdev; + + if (qlcnic_83xx_check(adapter)) + return; + + adapter->offload_flags = esw_cfg->offload_flags; + adapter->flags |= QLCNIC_APP_CHANGED_FLAGS; + netdev_update_features(netdev); + adapter->flags &= ~QLCNIC_APP_CHANGED_FLAGS; +} + +static int +qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter) +{ + u32 op_mode, priv_level; + int err = 0; + + err = qlcnic_initialize_nic(adapter); + if (err) + return err; + + if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED) + return 0; + + op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE); + priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func); + + if (op_mode == QLC_DEV_DRV_DEFAULT) + priv_level = QLCNIC_MGMT_FUNC; + else + priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func); + + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { + if (priv_level == QLCNIC_MGMT_FUNC) { + adapter->ahw->op_mode = QLCNIC_MGMT_FUNC; + err = qlcnic_init_pci_info(adapter); + if (err) + return err; + /* Set privilege level for other functions */ + qlcnic_set_function_modes(adapter); + dev_info(&adapter->pdev->dev, + "HAL Version: %d, Management function\n", + adapter->ahw->fw_hal_version); + } else if (priv_level == QLCNIC_PRIV_FUNC) { + adapter->ahw->op_mode = QLCNIC_PRIV_FUNC; + dev_info(&adapter->pdev->dev, + "HAL Version: %d, Privileged function\n", + adapter->ahw->fw_hal_version); + } + } else { + adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE; + } + + adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; + + return err; +} + +int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter) +{ + struct qlcnic_esw_func_cfg esw_cfg; + struct qlcnic_npar_info *npar; + u8 i; + + if (adapter->need_fw_reset) + return 0; + + for (i = 0; i < adapter->ahw->total_nic_func; i++) { + if (!adapter->npars[i].eswitch_status) + continue; + + memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg)); + esw_cfg.pci_func = adapter->npars[i].pci_func; + esw_cfg.mac_override = BIT_0; + esw_cfg.promisc_mode = BIT_0; + if (qlcnic_82xx_check(adapter)) { + esw_cfg.offload_flags = BIT_0; + if (QLCNIC_IS_TSO_CAPABLE(adapter)) + esw_cfg.offload_flags |= (BIT_1 | BIT_2); + } + if (qlcnic_config_switch_port(adapter, &esw_cfg)) + return -EIO; + npar = &adapter->npars[i]; + npar->pvid = esw_cfg.vlan_id; + npar->mac_override = esw_cfg.mac_override; + npar->mac_anti_spoof = esw_cfg.mac_anti_spoof; + npar->discard_tagged = esw_cfg.discard_tagged; + npar->promisc_mode = esw_cfg.promisc_mode; + npar->offload_flags = esw_cfg.offload_flags; + } + + return 0; +} + + +static int +qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter, + struct qlcnic_npar_info *npar, int pci_func) +{ + struct qlcnic_esw_func_cfg esw_cfg; + esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS; + esw_cfg.pci_func = pci_func; + esw_cfg.vlan_id = npar->pvid; + esw_cfg.mac_override = npar->mac_override; + esw_cfg.discard_tagged = npar->discard_tagged; + esw_cfg.mac_anti_spoof = npar->mac_anti_spoof; + esw_cfg.offload_flags = npar->offload_flags; + esw_cfg.promisc_mode = npar->promisc_mode; + if (qlcnic_config_switch_port(adapter, &esw_cfg)) + return -EIO; + + esw_cfg.op_mode = QLCNIC_ADD_VLAN; + if (qlcnic_config_switch_port(adapter, &esw_cfg)) + return -EIO; + + return 0; +} + +int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter) +{ + int i, err; + struct qlcnic_npar_info *npar; + struct qlcnic_info nic_info; + u8 pci_func; + + if (qlcnic_82xx_check(adapter)) + if (!adapter->need_fw_reset) + return 0; + + /* Set the NPAR config data after FW reset */ + for (i = 0; i < adapter->ahw->total_nic_func; i++) { + npar = &adapter->npars[i]; + pci_func = npar->pci_func; + if (!adapter->npars[i].eswitch_status) + continue; + + memset(&nic_info, 0, sizeof(struct qlcnic_info)); + err = qlcnic_get_nic_info(adapter, &nic_info, pci_func); + if (err) + return err; + nic_info.min_tx_bw = npar->min_bw; + nic_info.max_tx_bw = npar->max_bw; + err = qlcnic_set_nic_info(adapter, &nic_info); + if (err) + return err; + + if (npar->enable_pm) { + err = qlcnic_config_port_mirroring(adapter, + npar->dest_npar, 1, + pci_func); + if (err) + return err; + } + err = qlcnic_reset_eswitch_config(adapter, npar, pci_func); + if (err) + return err; + } + return 0; +} + +static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter) +{ + u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO; + u32 npar_state; + + if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) + return 0; + + npar_state = QLC_SHARED_REG_RD32(adapter, + QLCNIC_CRB_DEV_NPAR_STATE); + while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) { + msleep(1000); + npar_state = QLC_SHARED_REG_RD32(adapter, + QLCNIC_CRB_DEV_NPAR_STATE); + } + if (!npar_opt_timeo) { + dev_err(&adapter->pdev->dev, + "Waiting for NPAR state to operational timeout\n"); + return -EIO; + } + return 0; +} + +static int +qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter) +{ + int err; + + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) || + adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) + return 0; + + err = qlcnic_set_default_offload_settings(adapter); + if (err) + return err; + + err = qlcnic_reset_npar_config(adapter); + if (err) + return err; + + qlcnic_dev_set_npar_ready(adapter); + + return err; +} + +static int qlcnic_82xx_start_firmware(struct qlcnic_adapter *adapter) +{ + int err; + + err = qlcnic_can_start_firmware(adapter); + if (err < 0) + return err; + else if (!err) + goto check_fw_status; + + if (qlcnic_load_fw_file) + qlcnic_request_firmware(adapter); + else { + err = qlcnic_check_flash_fw_ver(adapter); + if (err) + goto err_out; + + adapter->ahw->fw_type = QLCNIC_FLASH_ROMIMAGE; + } + + err = qlcnic_need_fw_reset(adapter); + if (err == 0) + goto check_fw_status; + + err = qlcnic_pinit_from_rom(adapter); + if (err) + goto err_out; + + err = qlcnic_load_firmware(adapter); + if (err) + goto err_out; + + qlcnic_release_firmware(adapter); + QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION); + +check_fw_status: + err = qlcnic_check_fw_status(adapter); + if (err) + goto err_out; + + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY); + qlcnic_idc_debug_info(adapter, 1); + err = qlcnic_check_eswitch_mode(adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "Memory allocation failed for eswitch\n"); + goto err_out; + } + err = qlcnic_set_mgmt_operations(adapter); + if (err) + goto err_out; + + qlcnic_check_options(adapter); + adapter->need_fw_reset = 0; + + qlcnic_release_firmware(adapter); + return 0; + +err_out: + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED); + dev_err(&adapter->pdev->dev, "Device state set to failed\n"); + + qlcnic_release_firmware(adapter); + return err; +} + +static int +qlcnic_request_irq(struct qlcnic_adapter *adapter) +{ + irq_handler_t handler; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; + int err, ring, num_sds_rings; + + unsigned long flags = 0; + struct net_device *netdev = adapter->netdev; + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { + if (qlcnic_82xx_check(adapter)) + handler = qlcnic_tmp_intr; + else + handler = qlcnic_83xx_tmp_intr; + if (!QLCNIC_IS_MSI_FAMILY(adapter)) + flags |= IRQF_SHARED; + + } else { + if (adapter->flags & QLCNIC_MSIX_ENABLED) + handler = qlcnic_msix_intr; + else if (adapter->flags & QLCNIC_MSI_ENABLED) + handler = qlcnic_msi_intr; + else { + flags |= IRQF_SHARED; + if (qlcnic_82xx_check(adapter)) + handler = qlcnic_intr; + else + handler = qlcnic_83xx_intr; + } + } + adapter->irq = netdev->irq; + + if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) { + if (qlcnic_82xx_check(adapter) || + (qlcnic_83xx_check(adapter) && + (adapter->flags & QLCNIC_MSIX_ENABLED))) { + num_sds_rings = adapter->drv_sds_rings; + for (ring = 0; ring < num_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + if (qlcnic_82xx_check(adapter) && + !qlcnic_check_multi_tx(adapter) && + (ring == (num_sds_rings - 1))) { + if (!(adapter->flags & + QLCNIC_MSIX_ENABLED)) + snprintf(sds_ring->name, + sizeof(sds_ring->name), + "qlcnic"); + else + snprintf(sds_ring->name, + sizeof(sds_ring->name), + "%s-tx-0-rx-%d", + netdev->name, ring); + } else { + snprintf(sds_ring->name, + sizeof(sds_ring->name), + "%s-rx-%d", + netdev->name, ring); + } + err = request_irq(sds_ring->irq, handler, flags, + sds_ring->name, sds_ring); + if (err) + return err; + } + } + if ((qlcnic_82xx_check(adapter) && + qlcnic_check_multi_tx(adapter)) || + (qlcnic_83xx_check(adapter) && + (adapter->flags & QLCNIC_MSIX_ENABLED) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED))) { + handler = qlcnic_msix_tx_intr; + for (ring = 0; ring < adapter->drv_tx_rings; + ring++) { + tx_ring = &adapter->tx_ring[ring]; + snprintf(tx_ring->name, sizeof(tx_ring->name), + "%s-tx-%d", netdev->name, ring); + err = request_irq(tx_ring->irq, handler, flags, + tx_ring->name, tx_ring); + if (err) + return err; + } + } + } + return 0; +} + +static void +qlcnic_free_irq(struct qlcnic_adapter *adapter) +{ + int ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; + + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + + if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) { + if (qlcnic_82xx_check(adapter) || + (qlcnic_83xx_check(adapter) && + (adapter->flags & QLCNIC_MSIX_ENABLED))) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + free_irq(sds_ring->irq, sds_ring); + } + } + if ((qlcnic_83xx_check(adapter) && + !(adapter->flags & QLCNIC_TX_INTR_SHARED)) || + (qlcnic_82xx_check(adapter) && + qlcnic_check_multi_tx(adapter))) { + for (ring = 0; ring < adapter->drv_tx_rings; + ring++) { + tx_ring = &adapter->tx_ring[ring]; + if (tx_ring->irq) + free_irq(tx_ring->irq, tx_ring); + } + } + } +} + +static void qlcnic_get_lro_mss_capability(struct qlcnic_adapter *adapter) +{ + u32 capab = 0; + + if (qlcnic_82xx_check(adapter)) { + if (adapter->ahw->extra_capability[0] & + QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG) + adapter->flags |= QLCNIC_FW_LRO_MSS_CAP; + } else { + capab = adapter->ahw->capabilities; + if (QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(capab)) + adapter->flags |= QLCNIC_FW_LRO_MSS_CAP; + } +} + +static int qlcnic_config_def_intr_coalesce(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int err; + + /* Initialize interrupt coalesce parameters */ + ahw->coal.flag = QLCNIC_INTR_DEFAULT; + + if (qlcnic_83xx_check(adapter)) { + ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX; + ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US; + ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS; + ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US; + ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS; + + err = qlcnic_83xx_set_rx_tx_intr_coal(adapter); + } else { + ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX; + ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US; + ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS; + + err = qlcnic_82xx_set_rx_coalesce(adapter); + } + + return err; +} + +int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) +{ + int ring; + struct qlcnic_host_rds_ring *rds_ring; + + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return -EIO; + + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + return 0; + + if (qlcnic_set_eswitch_port_config(adapter)) + return -EIO; + + qlcnic_get_lro_mss_capability(adapter); + + if (qlcnic_fw_create_ctx(adapter)) + return -EIO; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &adapter->recv_ctx->rds_rings[ring]; + qlcnic_post_rx_buffers(adapter, rds_ring, ring); + } + + qlcnic_set_multi(netdev); + qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu); + + adapter->ahw->linkup = 0; + + if (adapter->drv_sds_rings > 1) + qlcnic_config_rss(adapter, 1); + + qlcnic_config_def_intr_coalesce(adapter); + + if (netdev->features & NETIF_F_LRO) + qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED); + + set_bit(__QLCNIC_DEV_UP, &adapter->state); + qlcnic_napi_enable(adapter); + + qlcnic_linkevent_request(adapter, 1); + + adapter->ahw->reset_context = 0; + netif_tx_start_all_queues(netdev); + return 0; +} + +int qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev) +{ + int err = 0; + + rtnl_lock(); + if (netif_running(netdev)) + err = __qlcnic_up(adapter, netdev); + rtnl_unlock(); + + return err; +} + +void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) +{ + int ring; + + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return; + + if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state)) + return; + + smp_mb(); + netif_carrier_off(netdev); + adapter->ahw->linkup = 0; + netif_tx_disable(netdev); + + qlcnic_free_mac_list(adapter); + + if (adapter->fhash.fnum) + qlcnic_delete_lb_filters(adapter); + + qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE); + if (qlcnic_sriov_vf_check(adapter)) + qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); + + qlcnic_napi_disable(adapter); + + qlcnic_fw_destroy_ctx(adapter); + adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP; + + qlcnic_reset_rx_buffers_list(adapter); + + for (ring = 0; ring < adapter->drv_tx_rings; ring++) + qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]); +} + +/* Usage: During suspend and firmware recovery module */ + +void qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev) +{ + rtnl_lock(); + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + rtnl_unlock(); + +} + +int +qlcnic_attach(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err; + + if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) + return 0; + + err = qlcnic_napi_add(adapter, netdev); + if (err) + return err; + + err = qlcnic_alloc_sw_resources(adapter); + if (err) { + dev_err(&pdev->dev, "Error in setting sw resources\n"); + goto err_out_napi_del; + } + + err = qlcnic_alloc_hw_resources(adapter); + if (err) { + dev_err(&pdev->dev, "Error in setting hw resources\n"); + goto err_out_free_sw; + } + + err = qlcnic_request_irq(adapter); + if (err) { + dev_err(&pdev->dev, "failed to setup interrupt\n"); + goto err_out_free_hw; + } + + qlcnic_create_sysfs_entries(adapter); + +#ifdef CONFIG_QLCNIC_VXLAN + if (qlcnic_encap_rx_offload(adapter)) + vxlan_get_rx_port(netdev); +#endif + + adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC; + return 0; + +err_out_free_hw: + qlcnic_free_hw_resources(adapter); +err_out_free_sw: + qlcnic_free_sw_resources(adapter); +err_out_napi_del: + qlcnic_napi_del(adapter); + return err; +} + +void qlcnic_detach(struct qlcnic_adapter *adapter) +{ + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) + return; + + qlcnic_remove_sysfs_entries(adapter); + + qlcnic_free_hw_resources(adapter); + qlcnic_release_rx_buffers(adapter); + qlcnic_free_irq(adapter); + qlcnic_napi_del(adapter); + qlcnic_free_sw_resources(adapter); + + adapter->is_up = 0; +} + +void qlcnic_diag_free_res(struct net_device *netdev, int drv_sds_rings) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_host_sds_ring *sds_ring; + int drv_tx_rings = adapter->drv_tx_rings; + int ring; + + clear_bit(__QLCNIC_DEV_UP, &adapter->state); + if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { + sds_ring = &adapter->recv_ctx->sds_rings[ring]; + qlcnic_disable_sds_intr(adapter, sds_ring); + } + } + + qlcnic_fw_destroy_ctx(adapter); + + qlcnic_detach(adapter); + + adapter->ahw->diag_test = 0; + adapter->drv_sds_rings = drv_sds_rings; + adapter->drv_tx_rings = drv_tx_rings; + + if (qlcnic_attach(adapter)) + goto out; + + if (netif_running(netdev)) + __qlcnic_up(adapter, netdev); +out: + netif_device_attach(netdev); +} + +static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int err = 0; + + adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context), + GFP_KERNEL); + if (!adapter->recv_ctx) { + err = -ENOMEM; + goto err_out; + } + + if (qlcnic_83xx_check(adapter)) { + ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX; + ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US; + ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS; + ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US; + ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS; + } else { + ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX; + ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US; + ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS; + } + + /* clear stats */ + memset(&adapter->stats, 0, sizeof(adapter->stats)); +err_out: + return err; +} + +static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter) +{ + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + + kfree(adapter->recv_ctx); + adapter->recv_ctx = NULL; + + if (fw_dump->tmpl_hdr) { + vfree(fw_dump->tmpl_hdr); + fw_dump->tmpl_hdr = NULL; + } + + if (fw_dump->dma_buffer) { + dma_free_coherent(&adapter->pdev->dev, QLC_PEX_DMA_READ_SIZE, + fw_dump->dma_buffer, fw_dump->phys_addr); + fw_dump->dma_buffer = NULL; + } + + kfree(adapter->ahw->reset.buff); + adapter->ahw->fw_dump.tmpl_hdr = NULL; +} + +int qlcnic_diag_alloc_res(struct net_device *netdev, int test) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_rds_ring *rds_ring; + int ring; + int ret; + + netif_device_detach(netdev); + + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + + qlcnic_detach(adapter); + + adapter->drv_sds_rings = QLCNIC_SINGLE_RING; + adapter->ahw->diag_test = test; + adapter->ahw->linkup = 0; + + ret = qlcnic_attach(adapter); + if (ret) { + netif_device_attach(netdev); + return ret; + } + + ret = qlcnic_fw_create_ctx(adapter); + if (ret) { + qlcnic_detach(adapter); + netif_device_attach(netdev); + return ret; + } + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &adapter->recv_ctx->rds_rings[ring]; + qlcnic_post_rx_buffers(adapter, rds_ring, ring); + } + + if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) { + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { + sds_ring = &adapter->recv_ctx->sds_rings[ring]; + qlcnic_enable_sds_intr(adapter, sds_ring); + } + } + + if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) { + adapter->ahw->loopback_state = 0; + qlcnic_linkevent_request(adapter, 1); + } + + set_bit(__QLCNIC_DEV_UP, &adapter->state); + + return 0; +} + +/* Reset context in hardware only */ +static int +qlcnic_reset_hw_context(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EBUSY; + + netif_device_detach(netdev); + + qlcnic_down(adapter, netdev); + + qlcnic_up(adapter, netdev); + + netif_device_attach(netdev); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + netdev_info(adapter->netdev, "%s: soft reset complete\n", __func__); + return 0; +} + +int +qlcnic_reset_context(struct qlcnic_adapter *adapter) +{ + int err = 0; + struct net_device *netdev = adapter->netdev; + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EBUSY; + + if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) { + + netif_device_detach(netdev); + + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + + qlcnic_detach(adapter); + + if (netif_running(netdev)) { + err = qlcnic_attach(adapter); + if (!err) { + __qlcnic_up(adapter, netdev); + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } + } + + netif_device_attach(netdev); + } + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return err; +} + +static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u16 act_pci_fn = ahw->total_nic_func; + u16 count; + + ahw->max_mc_count = QLCNIC_MAX_MC_COUNT; + if (act_pci_fn <= 2) + count = (QLCNIC_MAX_UC_COUNT - QLCNIC_MAX_MC_COUNT) / + act_pci_fn; + else + count = (QLCNIC_LB_MAX_FILTERS - QLCNIC_MAX_MC_COUNT) / + act_pci_fn; + ahw->max_uc_count = count; +} + +static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter, + u8 tx_queues, u8 rx_queues) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + + if (tx_queues) { + err = netif_set_real_num_tx_queues(netdev, tx_queues); + if (err) { + netdev_err(netdev, "failed to set %d Tx queues\n", + tx_queues); + return err; + } + } + + if (rx_queues) { + err = netif_set_real_num_rx_queues(netdev, rx_queues); + if (err) + netdev_err(netdev, "failed to set %d Rx queues\n", + rx_queues); + } + + return err; +} + +int +qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, + int pci_using_dac) +{ + int err; + struct pci_dev *pdev = adapter->pdev; + + adapter->rx_csum = 1; + adapter->ahw->mc_enabled = 0; + qlcnic_set_mac_filter_count(adapter); + + netdev->netdev_ops = &qlcnic_netdev_ops; + netdev->watchdog_timeo = QLCNIC_WATCHDOG_TIMEOUTVALUE * HZ; + + qlcnic_change_mtu(netdev, netdev->mtu); + + netdev->ethtool_ops = (qlcnic_sriov_vf_check(adapter)) ? + &qlcnic_sriov_vf_ethtool_ops : &qlcnic_ethtool_ops; + + netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_IPV6_CSUM | NETIF_F_GRO | + NETIF_F_HW_VLAN_CTAG_RX); + netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM); + + if (QLCNIC_IS_TSO_CAPABLE(adapter)) { + netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6); + netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6); + } + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + if (qlcnic_vlan_tx_check(adapter)) + netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX); + + if (qlcnic_sriov_vf_check(adapter)) + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) + netdev->features |= NETIF_F_LRO; + + if (qlcnic_encap_tx_offload(adapter)) { + netdev->features |= NETIF_F_GSO_UDP_TUNNEL; + + /* encapsulation Tx offload supported by Adapter */ + netdev->hw_enc_features = NETIF_F_IP_CSUM | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_TSO | + NETIF_F_TSO6; + } + + if (qlcnic_encap_rx_offload(adapter)) + netdev->hw_enc_features |= NETIF_F_RXCSUM; + + netdev->hw_features = netdev->features; + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->irq = adapter->msix_entries[0].vector; + + err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings, + adapter->drv_sds_rings); + if (err) + return err; + + qlcnic_dcb_init_dcbnl_ops(adapter->dcb); + + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "failed to register net device\n"); + return err; + } + + return 0; +} + +static int qlcnic_set_dma_mask(struct pci_dev *pdev, int *pci_using_dac) +{ + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && + !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) + *pci_using_dac = 1; + else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) && + !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) + *pci_using_dac = 0; + else { + dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n"); + return -EIO; + } + + return 0; +} + +void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter) +{ + int ring; + struct qlcnic_host_tx_ring *tx_ring; + + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + if (tx_ring) { + vfree(tx_ring->cmd_buf_arr); + tx_ring->cmd_buf_arr = NULL; + } + } + kfree(adapter->tx_ring); +} + +int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, + struct net_device *netdev) +{ + int ring, vector, index; + struct qlcnic_host_tx_ring *tx_ring; + struct qlcnic_cmd_buffer *cmd_buf_arr; + + tx_ring = kcalloc(adapter->drv_tx_rings, + sizeof(struct qlcnic_host_tx_ring), GFP_KERNEL); + if (tx_ring == NULL) + return -ENOMEM; + + adapter->tx_ring = tx_ring; + + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + tx_ring->num_desc = adapter->num_txd; + tx_ring->txq = netdev_get_tx_queue(netdev, ring); + cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); + if (cmd_buf_arr == NULL) { + qlcnic_free_tx_rings(adapter); + return -ENOMEM; + } + memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); + tx_ring->cmd_buf_arr = cmd_buf_arr; + spin_lock_init(&tx_ring->tx_clean_lock); + } + + if (qlcnic_83xx_check(adapter) || + (qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) { + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + tx_ring->adapter = adapter; + if (adapter->flags & QLCNIC_MSIX_ENABLED) { + index = adapter->drv_sds_rings + ring; + vector = adapter->msix_entries[index].vector; + tx_ring->irq = vector; + } + } + } + + return 0; +} + +void qlcnic_set_drv_version(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 fw_cmd = 0; + + if (qlcnic_82xx_check(adapter)) + fw_cmd = QLCNIC_CMD_82XX_SET_DRV_VER; + else if (qlcnic_83xx_check(adapter)) + fw_cmd = QLCNIC_CMD_83XX_SET_DRV_VER; + + if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_SET_DRV_VER) + qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd); +} + +/* Reset firmware API lock */ +static void qlcnic_reset_api_lock(struct qlcnic_adapter *adapter) +{ + qlcnic_api_lock(adapter); + qlcnic_api_unlock(adapter); +} + + +static int +qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev = NULL; + struct qlcnic_adapter *adapter = NULL; + struct qlcnic_hardware_context *ahw; + int err, pci_using_dac = -1; + char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */ + + err = pci_enable_device(pdev); + if (err) + return err; + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + err = -ENODEV; + goto err_out_disable_pdev; + } + + err = qlcnic_set_dma_mask(pdev, &pci_using_dac); + if (err) + goto err_out_disable_pdev; + + err = pci_request_regions(pdev, qlcnic_driver_name); + if (err) + goto err_out_disable_pdev; + + pci_set_master(pdev); + pci_enable_pcie_error_reporting(pdev); + + ahw = kzalloc(sizeof(struct qlcnic_hardware_context), GFP_KERNEL); + if (!ahw) { + err = -ENOMEM; + goto err_out_free_res; + } + + switch (ent->device) { + case PCI_DEVICE_ID_QLOGIC_QLE824X: + ahw->hw_ops = &qlcnic_hw_ops; + ahw->reg_tbl = (u32 *) qlcnic_reg_tbl; + break; + case PCI_DEVICE_ID_QLOGIC_QLE834X: + case PCI_DEVICE_ID_QLOGIC_QLE8830: + case PCI_DEVICE_ID_QLOGIC_QLE844X: + qlcnic_83xx_register_map(ahw); + break; + case PCI_DEVICE_ID_QLOGIC_VF_QLE834X: + case PCI_DEVICE_ID_QLOGIC_VF_QLE844X: + qlcnic_sriov_vf_register_map(ahw); + break; + default: + goto err_out_free_hw_res; + } + + err = qlcnic_setup_pci_map(pdev, ahw); + if (err) + goto err_out_free_hw_res; + + netdev = alloc_etherdev_mq(sizeof(struct qlcnic_adapter), + QLCNIC_MAX_TX_RINGS); + if (!netdev) { + err = -ENOMEM; + goto err_out_iounmap; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->ahw = ahw; + + adapter->qlcnic_wq = create_singlethread_workqueue("qlcnic"); + if (adapter->qlcnic_wq == NULL) { + err = -ENOMEM; + dev_err(&pdev->dev, "Failed to create workqueue\n"); + goto err_out_free_netdev; + } + + err = qlcnic_alloc_adapter_resources(adapter); + if (err) + goto err_out_free_wq; + + adapter->dev_rst_time = jiffies; + ahw->revision_id = pdev->revision; + ahw->max_vnic_func = qlcnic_get_vnic_func_count(adapter); + if (qlcnic_mac_learn == FDB_MAC_LEARN) + adapter->fdb_mac_learn = true; + else if (qlcnic_mac_learn == DRV_MAC_LEARN) + adapter->drv_mac_learn = true; + + rwlock_init(&adapter->ahw->crb_lock); + mutex_init(&adapter->ahw->mem_lock); + + INIT_LIST_HEAD(&adapter->mac_list); + + qlcnic_register_dcb(adapter); + + if (qlcnic_82xx_check(adapter)) { + qlcnic_check_vf(adapter, ent); + adapter->portnum = adapter->ahw->pci_func; + qlcnic_reset_api_lock(adapter); + err = qlcnic_start_firmware(adapter); + if (err) { + dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n" + "\t\tIf reboot doesn't help, try flashing the card\n"); + goto err_out_maintenance_mode; + } + + /* compute and set default and max tx/sds rings */ + if (adapter->ahw->msix_supported) { + if (qlcnic_check_multi_tx_capability(adapter) == 1) + qlcnic_set_tx_ring_count(adapter, + QLCNIC_SINGLE_RING); + else + qlcnic_set_tx_ring_count(adapter, + QLCNIC_DEF_TX_RINGS); + qlcnic_set_sds_ring_count(adapter, + QLCNIC_DEF_SDS_RINGS); + } else { + qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING); + qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING); + } + + err = qlcnic_setup_idc_param(adapter); + if (err) + goto err_out_free_hw; + + adapter->flags |= QLCNIC_NEED_FLR; + + } else if (qlcnic_83xx_check(adapter)) { + qlcnic_83xx_check_vf(adapter, ent); + adapter->portnum = adapter->ahw->pci_func; + err = qlcnic_83xx_init(adapter, pci_using_dac); + if (err) { + switch (err) { + case -ENOTRECOVERABLE: + dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware\n"); + dev_err(&pdev->dev, "Please replace the adapter with new one and return the faulty adapter for repair\n"); + goto err_out_free_hw; + case -ENOMEM: + dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n"); + goto err_out_free_hw; + case -EOPNOTSUPP: + dev_err(&pdev->dev, "Adapter initialization failed\n"); + goto err_out_free_hw; + default: + dev_err(&pdev->dev, "Adapter initialization failed. Driver will load in maintenance mode to recover the adapter using the application\n"); + goto err_out_maintenance_mode; + } + } + + if (qlcnic_sriov_vf_check(adapter)) + return 0; + } else { + dev_err(&pdev->dev, + "%s: failed. Please Reboot\n", __func__); + err = -ENODEV; + goto err_out_free_hw; + } + + if (qlcnic_read_mac_addr(adapter)) + dev_warn(&pdev->dev, "failed to read mac addr\n"); + + qlcnic_read_phys_port_id(adapter); + + if (adapter->portnum == 0) { + qlcnic_get_board_name(adapter, board_name); + + pr_info("%s: %s Board Chip rev 0x%x\n", + module_name(THIS_MODULE), + board_name, adapter->ahw->revision_id); + } + + if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x && + !!qlcnic_use_msi) + dev_warn(&pdev->dev, + "Device does not support MSI interrupts\n"); + + if (qlcnic_82xx_check(adapter)) { + qlcnic_dcb_enable(adapter->dcb); + qlcnic_dcb_get_info(adapter->dcb); + err = qlcnic_setup_intr(adapter); + + if (err) { + dev_err(&pdev->dev, "Failed to setup interrupt\n"); + goto err_out_disable_msi; + } + } + + err = qlcnic_get_act_pci_func(adapter); + if (err) + goto err_out_disable_mbx_intr; + + if (adapter->portnum == 0) + qlcnic_set_drv_version(adapter); + + err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac); + if (err) + goto err_out_disable_mbx_intr; + + pci_set_drvdata(pdev, adapter); + + if (qlcnic_82xx_check(adapter)) + qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, + FW_POLL_DELAY); + + switch (adapter->ahw->port_type) { + case QLCNIC_GBE: + dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", + adapter->netdev->name); + break; + case QLCNIC_XGBE: + dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", + adapter->netdev->name); + break; + } + + if (adapter->drv_mac_learn) + qlcnic_alloc_lb_filters_mem(adapter); + + qlcnic_add_sysfs(adapter); + qlcnic_register_hwmon_dev(adapter); + return 0; + +err_out_disable_mbx_intr: + if (qlcnic_83xx_check(adapter)) + qlcnic_83xx_free_mbx_intr(adapter); + +err_out_disable_msi: + qlcnic_teardown_intr(adapter); + qlcnic_cancel_idc_work(adapter); + qlcnic_clr_all_drv_state(adapter, 0); + +err_out_free_hw: + qlcnic_free_adapter_resources(adapter); + +err_out_free_wq: + destroy_workqueue(adapter->qlcnic_wq); + +err_out_free_netdev: + free_netdev(netdev); + +err_out_iounmap: + qlcnic_cleanup_pci_map(ahw); + +err_out_free_hw_res: + kfree(ahw); + +err_out_free_res: + pci_release_regions(pdev); + +err_out_disable_pdev: + pci_disable_device(pdev); + return err; + +err_out_maintenance_mode: + set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state); + netdev->netdev_ops = &qlcnic_netdev_failed_ops; + netdev->ethtool_ops = &qlcnic_ethtool_failed_ops; + ahw->port_type = QLCNIC_XGBE; + + if (qlcnic_83xx_check(adapter)) + adapter->tgt_status_reg = NULL; + else + ahw->board_type = QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS; + + err = register_netdev(netdev); + + if (err) { + dev_err(&pdev->dev, "Failed to register net device\n"); + qlcnic_clr_all_drv_state(adapter, 0); + goto err_out_free_hw; + } + + pci_set_drvdata(pdev, adapter); + qlcnic_add_sysfs(adapter); + + return 0; +} + +static void qlcnic_remove(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter; + struct net_device *netdev; + struct qlcnic_hardware_context *ahw; + + adapter = pci_get_drvdata(pdev); + if (adapter == NULL) + return; + + netdev = adapter->netdev; + + qlcnic_cancel_idc_work(adapter); + qlcnic_sriov_pf_disable(adapter); + ahw = adapter->ahw; + + unregister_netdev(netdev); + qlcnic_sriov_cleanup(adapter); + + if (qlcnic_83xx_check(adapter)) { + qlcnic_83xx_initialize_nic(adapter, 0); + cancel_delayed_work_sync(&adapter->idc_aen_work); + qlcnic_83xx_free_mbx_intr(adapter); + qlcnic_83xx_detach_mailbox_work(adapter); + qlcnic_83xx_free_mailbox(ahw->mailbox); + kfree(ahw->fw_info); + } + + qlcnic_dcb_free(adapter->dcb); + qlcnic_detach(adapter); + kfree(adapter->npars); + kfree(adapter->eswitch); + + if (qlcnic_82xx_check(adapter)) + qlcnic_clr_all_drv_state(adapter, 0); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + + qlcnic_free_lb_filters_mem(adapter); + + qlcnic_teardown_intr(adapter); + + qlcnic_remove_sysfs(adapter); + + qlcnic_unregister_hwmon_dev(adapter); + + qlcnic_cleanup_pci_map(adapter->ahw); + + qlcnic_release_firmware(adapter); + + pci_disable_pcie_error_reporting(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + + if (adapter->qlcnic_wq) { + destroy_workqueue(adapter->qlcnic_wq); + adapter->qlcnic_wq = NULL; + } + + qlcnic_free_adapter_resources(adapter); + kfree(ahw); + free_netdev(netdev); +} + +static void qlcnic_shutdown(struct pci_dev *pdev) +{ + if (__qlcnic_shutdown(pdev)) + return; + + pci_disable_device(pdev); +} + +#ifdef CONFIG_PM +static int qlcnic_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + + retval = __qlcnic_shutdown(pdev); + if (retval) + return retval; + + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +static int qlcnic_resume(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + pci_set_power_state(pdev, PCI_D0); + pci_set_master(pdev); + pci_restore_state(pdev); + + return __qlcnic_resume(adapter); +} +#endif + +static int qlcnic_open(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + int err; + + if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) { + netdev_err(netdev, "%s: Device is in non-operational state\n", + __func__); + + return -EIO; + } + + netif_carrier_off(netdev); + + err = qlcnic_attach(adapter); + if (err) + return err; + + err = __qlcnic_up(adapter, netdev); + if (err) + qlcnic_detach(adapter); + + return err; +} + +/* + * qlcnic_close - Disables a network interface entry point + */ +static int qlcnic_close(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + __qlcnic_down(adapter, netdev); + + return 0; +} + +#define QLCNIC_VF_LB_BUCKET_SIZE 1 + +void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter) +{ + void *head; + int i; + struct net_device *netdev = adapter->netdev; + u32 filter_size = 0; + u16 act_pci_func = 0; + + if (adapter->fhash.fmax && adapter->fhash.fhead) + return; + + act_pci_func = adapter->ahw->total_nic_func; + spin_lock_init(&adapter->mac_learn_lock); + spin_lock_init(&adapter->rx_mac_learn_lock); + + if (qlcnic_sriov_vf_check(adapter)) { + filter_size = QLCNIC_83XX_SRIOV_VF_MAX_MAC - 1; + adapter->fhash.fbucket_size = QLCNIC_VF_LB_BUCKET_SIZE; + } else if (qlcnic_82xx_check(adapter)) { + filter_size = QLCNIC_LB_MAX_FILTERS; + adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE; + } else { + filter_size = QLC_83XX_LB_MAX_FILTERS; + adapter->fhash.fbucket_size = QLC_83XX_LB_BUCKET_SIZE; + } + + head = kcalloc(adapter->fhash.fbucket_size, + sizeof(struct hlist_head), GFP_ATOMIC); + + if (!head) + return; + + adapter->fhash.fmax = (filter_size / act_pci_func); + adapter->fhash.fhead = head; + + netdev_info(netdev, "active nic func = %d, mac filter size=%d\n", + act_pci_func, adapter->fhash.fmax); + + for (i = 0; i < adapter->fhash.fbucket_size; i++) + INIT_HLIST_HEAD(&adapter->fhash.fhead[i]); + + adapter->rx_fhash.fbucket_size = adapter->fhash.fbucket_size; + + head = kcalloc(adapter->rx_fhash.fbucket_size, + sizeof(struct hlist_head), GFP_ATOMIC); + + if (!head) + return; + + adapter->rx_fhash.fmax = (filter_size / act_pci_func); + adapter->rx_fhash.fhead = head; + + for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) + INIT_HLIST_HEAD(&adapter->rx_fhash.fhead[i]); +} + +static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter) +{ + if (adapter->fhash.fmax) + kfree(adapter->fhash.fhead); + + adapter->fhash.fhead = NULL; + adapter->fhash.fmax = 0; + + if (adapter->rx_fhash.fmax) + kfree(adapter->rx_fhash.fhead); + + adapter->rx_fhash.fmax = 0; + adapter->rx_fhash.fhead = NULL; +} + +int qlcnic_check_temp(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 temp_state, temp_val, temp = 0; + int rv = 0; + + if (qlcnic_83xx_check(adapter)) + temp = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP); + + if (qlcnic_82xx_check(adapter)) + temp = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP); + + temp_state = qlcnic_get_temp_state(temp); + temp_val = qlcnic_get_temp_val(temp); + + if (temp_state == QLCNIC_TEMP_PANIC) { + dev_err(&netdev->dev, + "Device temperature %d degrees C exceeds" + " maximum allowed. Hardware has been shut down.\n", + temp_val); + rv = 1; + } else if (temp_state == QLCNIC_TEMP_WARN) { + if (adapter->ahw->temp == QLCNIC_TEMP_NORMAL) { + dev_err(&netdev->dev, + "Device temperature %d degrees C " + "exceeds operating range." + " Immediate action needed.\n", + temp_val); + } + } else { + if (adapter->ahw->temp == QLCNIC_TEMP_WARN) { + dev_info(&netdev->dev, + "Device temperature is now %d degrees C" + " in normal range.\n", temp_val); + } + } + adapter->ahw->temp = temp_state; + return rv; +} + +static inline void dump_tx_ring_desc(struct qlcnic_host_tx_ring *tx_ring) +{ + int i; + struct cmd_desc_type0 *tx_desc_info; + + for (i = 0; i < tx_ring->num_desc; i++) { + tx_desc_info = &tx_ring->desc_head[i]; + pr_info("TX Desc: %d\n", i); + print_hex_dump(KERN_INFO, "TX: ", DUMP_PREFIX_OFFSET, 16, 1, + &tx_ring->desc_head[i], + sizeof(struct cmd_desc_type0), true); + } +} + +static void qlcnic_dump_rings(struct qlcnic_adapter *adapter) +{ + struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct net_device *netdev = adapter->netdev; + struct qlcnic_host_rds_ring *rds_ring; + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; + int ring; + + if (!netdev || !netif_running(netdev)) + return; + + for (ring = 0; ring < adapter->max_rds_rings; ring++) { + rds_ring = &recv_ctx->rds_rings[ring]; + if (!rds_ring) + continue; + netdev_info(netdev, + "rds_ring=%d crb_rcv_producer=%d producer=%u num_desc=%u\n", + ring, readl(rds_ring->crb_rcv_producer), + rds_ring->producer, rds_ring->num_desc); + } + + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { + sds_ring = &(recv_ctx->sds_rings[ring]); + if (!sds_ring) + continue; + netdev_info(netdev, + "sds_ring=%d crb_sts_consumer=%d consumer=%u crb_intr_mask=%d num_desc=%u\n", + ring, readl(sds_ring->crb_sts_consumer), + sds_ring->consumer, readl(sds_ring->crb_intr_mask), + sds_ring->num_desc); + } + + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + if (!tx_ring) + continue; + netdev_info(netdev, "Tx ring=%d Context Id=0x%x\n", + ring, tx_ring->ctx_id); + netdev_info(netdev, + "xmit_finished=%llu, xmit_called=%llu, xmit_on=%llu, xmit_off=%llu\n", + tx_ring->tx_stats.xmit_finished, + tx_ring->tx_stats.xmit_called, + tx_ring->tx_stats.xmit_on, + tx_ring->tx_stats.xmit_off); + + if (tx_ring->crb_intr_mask) + netdev_info(netdev, "crb_intr_mask=%d\n", + readl(tx_ring->crb_intr_mask)); + + netdev_info(netdev, + "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n", + readl(tx_ring->crb_cmd_producer), + tx_ring->producer, tx_ring->sw_consumer, + le32_to_cpu(*(tx_ring->hw_consumer))); + + netdev_info(netdev, "Total desc=%d, Available desc=%d\n", + tx_ring->num_desc, qlcnic_tx_avail(tx_ring)); + + if (netif_msg_tx_err(adapter->ahw)) + dump_tx_ring_desc(tx_ring); + } + +} + +static void qlcnic_tx_timeout(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) + return; + + qlcnic_dump_rings(adapter); + + if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS || + netif_msg_tx_err(adapter->ahw)) { + netdev_err(netdev, "Tx timeout, reset the adapter.\n"); + if (qlcnic_82xx_check(adapter)) + adapter->need_fw_reset = 1; + else if (qlcnic_83xx_check(adapter)) + qlcnic_83xx_idc_request_reset(adapter, + QLCNIC_FORCE_FW_DUMP_KEY); + } else { + netdev_err(netdev, "Tx timeout, reset adapter context.\n"); + adapter->ahw->reset_context = 1; + } +} + +static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct net_device_stats *stats = &netdev->stats; + + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + qlcnic_update_stats(adapter); + + stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; + stats->tx_packets = adapter->stats.xmitfinished; + stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; + stats->tx_bytes = adapter->stats.txbytes; + stats->rx_dropped = adapter->stats.rxdropped; + stats->tx_dropped = adapter->stats.txdropped; + + return stats; +} + +static irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *adapter) +{ + u32 status; + + status = readl(adapter->isr_int_vec); + + if (!(status & adapter->ahw->int_vec_bit)) + return IRQ_NONE; + + /* check interrupt state machine, to be sure */ + status = readl(adapter->crb_int_state_reg); + if (!ISR_LEGACY_INT_TRIGGERED(status)) + return IRQ_NONE; + + writel(0xffffffff, adapter->tgt_status_reg); + /* read twice to ensure write is flushed */ + readl(adapter->isr_int_vec); + readl(adapter->isr_int_vec); + + return IRQ_HANDLED; +} + +static irqreturn_t qlcnic_tmp_intr(int irq, void *data) +{ + struct qlcnic_host_sds_ring *sds_ring = data; + struct qlcnic_adapter *adapter = sds_ring->adapter; + + if (adapter->flags & QLCNIC_MSIX_ENABLED) + goto done; + else if (adapter->flags & QLCNIC_MSI_ENABLED) { + writel(0xffffffff, adapter->tgt_status_reg); + goto done; + } + + if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE) + return IRQ_NONE; + +done: + adapter->ahw->diag_cnt++; + qlcnic_enable_sds_intr(adapter, sds_ring); + return IRQ_HANDLED; +} + +static irqreturn_t qlcnic_intr(int irq, void *data) +{ + struct qlcnic_host_sds_ring *sds_ring = data; + struct qlcnic_adapter *adapter = sds_ring->adapter; + + if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE) + return IRQ_NONE; + + napi_schedule(&sds_ring->napi); + + return IRQ_HANDLED; +} + +static irqreturn_t qlcnic_msi_intr(int irq, void *data) +{ + struct qlcnic_host_sds_ring *sds_ring = data; + struct qlcnic_adapter *adapter = sds_ring->adapter; + + /* clear interrupt */ + writel(0xffffffff, adapter->tgt_status_reg); + + napi_schedule(&sds_ring->napi); + return IRQ_HANDLED; +} + +static irqreturn_t qlcnic_msix_intr(int irq, void *data) +{ + struct qlcnic_host_sds_ring *sds_ring = data; + + napi_schedule(&sds_ring->napi); + return IRQ_HANDLED; +} + +static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data) +{ + struct qlcnic_host_tx_ring *tx_ring = data; + + napi_schedule(&tx_ring->napi); + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void qlcnic_poll_controller(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_recv_context *recv_ctx; + struct qlcnic_host_tx_ring *tx_ring; + int ring; + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) + return; + + recv_ctx = adapter->recv_ctx; + + for (ring = 0; ring < adapter->drv_sds_rings; ring++) { + sds_ring = &recv_ctx->sds_rings[ring]; + qlcnic_disable_sds_intr(adapter, sds_ring); + napi_schedule(&sds_ring->napi); + } + + if (adapter->flags & QLCNIC_MSIX_ENABLED) { + /* Only Multi-Tx queue capable devices need to + * schedule NAPI for TX rings + */ + if ((qlcnic_83xx_check(adapter) && + (adapter->flags & QLCNIC_TX_INTR_SHARED)) || + (qlcnic_82xx_check(adapter) && + !qlcnic_check_multi_tx(adapter))) + return; + + for (ring = 0; ring < adapter->drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + qlcnic_disable_tx_intr(adapter, tx_ring); + napi_schedule(&tx_ring->napi); + } + } +} +#endif + +static void +qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding) +{ + u32 val; + + val = adapter->portnum & 0xf; + val |= encoding << 7; + val |= (jiffies - adapter->dev_rst_time) << 8; + + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val); + adapter->dev_rst_time = jiffies; +} + +static int +qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state) +{ + u32 val; + + WARN_ON(state != QLCNIC_DEV_NEED_RESET && + state != QLCNIC_DEV_NEED_QUISCENT); + + if (qlcnic_api_lock(adapter)) + return -EIO; + + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); + + if (state == QLCNIC_DEV_NEED_RESET) + QLC_DEV_SET_RST_RDY(val, adapter->portnum); + else if (state == QLCNIC_DEV_NEED_QUISCENT) + QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum); + + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val); + + qlcnic_api_unlock(adapter); + + return 0; +} + +static int +qlcnic_clr_drv_state(struct qlcnic_adapter *adapter) +{ + u32 val; + + if (qlcnic_api_lock(adapter)) + return -EBUSY; + + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); + QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val); + + qlcnic_api_unlock(adapter); + + return 0; +} + +void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed) +{ + u32 val; + + if (qlcnic_api_lock(adapter)) + goto err; + + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE); + QLC_DEV_CLR_REF_CNT(val, adapter->portnum); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val); + + if (failed) { + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, + QLCNIC_DEV_FAILED); + dev_info(&adapter->pdev->dev, + "Device state set to Failed. Please Reboot\n"); + } else if (!(val & 0x11111111)) + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, + QLCNIC_DEV_COLD); + + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); + QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val); + + qlcnic_api_unlock(adapter); +err: + adapter->fw_fail_cnt = 0; + adapter->flags &= ~QLCNIC_FW_HANG; + clear_bit(__QLCNIC_START_FW, &adapter->state); + clear_bit(__QLCNIC_RESETTING, &adapter->state); +} + +/* Grab api lock, before checking state */ +static int +qlcnic_check_drv_state(struct qlcnic_adapter *adapter) +{ + int act, state, active_mask; + struct qlcnic_hardware_context *ahw = adapter->ahw; + + state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); + act = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE); + + if (adapter->flags & QLCNIC_FW_RESET_OWNER) { + active_mask = (~(1 << (ahw->pci_func * 4))); + act = act & active_mask; + } + + if (((state & 0x11111111) == (act & 0x11111111)) || + ((act & 0x11111111) == ((state >> 1) & 0x11111111))) + return 0; + else + return 1; +} + +static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter) +{ + u32 val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_IDC_VER); + + if (val != QLCNIC_DRV_IDC_VER) { + dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's" + " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val); + } + + return 0; +} + +static int +qlcnic_can_start_firmware(struct qlcnic_adapter *adapter) +{ + u32 val, prev_state; + u8 dev_init_timeo = adapter->dev_init_timeo; + u8 portnum = adapter->portnum; + u8 ret; + + if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) + return 1; + + if (qlcnic_api_lock(adapter)) + return -1; + + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE); + if (!(val & (1 << (portnum * 4)))) { + QLC_DEV_SET_REF_CNT(val, portnum); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val); + } + + prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); + QLCDB(adapter, HW, "Device state = %u\n", prev_state); + + switch (prev_state) { + case QLCNIC_DEV_COLD: + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, + QLCNIC_DEV_INITIALIZING); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_IDC_VER, + QLCNIC_DRV_IDC_VER); + qlcnic_idc_debug_info(adapter, 0); + qlcnic_api_unlock(adapter); + return 1; + + case QLCNIC_DEV_READY: + ret = qlcnic_check_idc_ver(adapter); + qlcnic_api_unlock(adapter); + return ret; + + case QLCNIC_DEV_NEED_RESET: + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); + QLC_DEV_SET_RST_RDY(val, portnum); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val); + break; + + case QLCNIC_DEV_NEED_QUISCENT: + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); + QLC_DEV_SET_QSCNT_RDY(val, portnum); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val); + break; + + case QLCNIC_DEV_FAILED: + dev_err(&adapter->pdev->dev, "Device in failed state.\n"); + qlcnic_api_unlock(adapter); + return -1; + + case QLCNIC_DEV_INITIALIZING: + case QLCNIC_DEV_QUISCENT: + break; + } + + qlcnic_api_unlock(adapter); + + do { + msleep(1000); + prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); + + if (prev_state == QLCNIC_DEV_QUISCENT) + continue; + } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo); + + if (!dev_init_timeo) { + dev_err(&adapter->pdev->dev, + "Waiting for device to initialize timeout\n"); + return -1; + } + + if (qlcnic_api_lock(adapter)) + return -1; + + val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE); + QLC_DEV_CLR_RST_QSCNT(val, portnum); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val); + + ret = qlcnic_check_idc_ver(adapter); + qlcnic_api_unlock(adapter); + + return ret; +} + +static void +qlcnic_fwinit_work(struct work_struct *work) +{ + struct qlcnic_adapter *adapter = container_of(work, + struct qlcnic_adapter, fw_work.work); + u32 dev_state = 0xf; + u32 val; + + if (qlcnic_api_lock(adapter)) + goto err_ret; + + dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); + if (dev_state == QLCNIC_DEV_QUISCENT || + dev_state == QLCNIC_DEV_NEED_QUISCENT) { + qlcnic_api_unlock(adapter); + qlcnic_schedule_work(adapter, qlcnic_fwinit_work, + FW_POLL_DELAY * 2); + return; + } + + if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { + qlcnic_api_unlock(adapter); + goto wait_npar; + } + + if (dev_state == QLCNIC_DEV_INITIALIZING || + dev_state == QLCNIC_DEV_READY) { + dev_info(&adapter->pdev->dev, "Detected state change from " + "DEV_NEED_RESET, skipping ack check\n"); + goto skip_ack_check; + } + + if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) { + dev_info(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n", + adapter->reset_ack_timeo); + goto skip_ack_check; + } + + if (!qlcnic_check_drv_state(adapter)) { +skip_ack_check: + dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); + + if (dev_state == QLCNIC_DEV_NEED_RESET) { + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, + QLCNIC_DEV_INITIALIZING); + set_bit(__QLCNIC_START_FW, &adapter->state); + QLCDB(adapter, DRV, "Restarting fw\n"); + qlcnic_idc_debug_info(adapter, 0); + val = QLC_SHARED_REG_RD32(adapter, + QLCNIC_CRB_DRV_STATE); + QLC_DEV_SET_RST_RDY(val, adapter->portnum); + QLC_SHARED_REG_WR32(adapter, + QLCNIC_CRB_DRV_STATE, val); + } + + qlcnic_api_unlock(adapter); + + rtnl_lock(); + if (qlcnic_check_fw_dump_state(adapter) && + (adapter->flags & QLCNIC_FW_RESET_OWNER)) { + QLCDB(adapter, DRV, "Take FW dump\n"); + qlcnic_dump_fw(adapter); + adapter->flags |= QLCNIC_FW_HANG; + } + rtnl_unlock(); + + adapter->flags &= ~QLCNIC_FW_RESET_OWNER; + if (!adapter->nic_ops->start_firmware(adapter)) { + qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); + adapter->fw_wait_cnt = 0; + return; + } + goto err_ret; + } + + qlcnic_api_unlock(adapter); + +wait_npar: + dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); + QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state); + + switch (dev_state) { + case QLCNIC_DEV_READY: + if (!qlcnic_start_firmware(adapter)) { + qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); + adapter->fw_wait_cnt = 0; + return; + } + case QLCNIC_DEV_FAILED: + break; + default: + qlcnic_schedule_work(adapter, + qlcnic_fwinit_work, FW_POLL_DELAY); + return; + } + +err_ret: + dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u " + "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt); + netif_device_attach(adapter->netdev); + qlcnic_clr_all_drv_state(adapter, 0); +} + +static void +qlcnic_detach_work(struct work_struct *work) +{ + struct qlcnic_adapter *adapter = container_of(work, + struct qlcnic_adapter, fw_work.work); + struct net_device *netdev = adapter->netdev; + u32 status; + + netif_device_detach(netdev); + + /* Dont grab rtnl lock during Quiscent mode */ + if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) { + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + } else + qlcnic_down(adapter, netdev); + + status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1); + + if (status & QLCNIC_RCODE_FATAL_ERROR) { + dev_err(&adapter->pdev->dev, + "Detaching the device: peg halt status1=0x%x\n", + status); + + if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) { + dev_err(&adapter->pdev->dev, + "On board active cooling fan failed. " + "Device has been halted.\n"); + dev_err(&adapter->pdev->dev, + "Replace the adapter.\n"); + } + + goto err_ret; + } + + if (adapter->ahw->temp == QLCNIC_TEMP_PANIC) { + dev_err(&adapter->pdev->dev, "Detaching the device: temp=%d\n", + adapter->ahw->temp); + goto err_ret; + } + + /* Dont ack if this instance is the reset owner */ + if (!(adapter->flags & QLCNIC_FW_RESET_OWNER)) { + if (qlcnic_set_drv_state(adapter, adapter->dev_state)) { + dev_err(&adapter->pdev->dev, + "Failed to set driver state," + "detaching the device.\n"); + goto err_ret; + } + } + + adapter->fw_wait_cnt = 0; + + qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY); + + return; + +err_ret: + netif_device_attach(netdev); + qlcnic_clr_all_drv_state(adapter, 1); +} + +/*Transit NPAR state to NON Operational */ +static void +qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter) +{ + u32 state; + + state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); + if (state == QLCNIC_DEV_NPAR_NON_OPER) + return; + + if (qlcnic_api_lock(adapter)) + return; + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, + QLCNIC_DEV_NPAR_NON_OPER); + qlcnic_api_unlock(adapter); +} + +static void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, + u32 key) +{ + u32 state, xg_val = 0, gb_val = 0; + + qlcnic_xg_set_xg0_mask(xg_val); + qlcnic_xg_set_xg1_mask(xg_val); + QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, xg_val); + qlcnic_gb_set_gb0_mask(gb_val); + qlcnic_gb_set_gb1_mask(gb_val); + qlcnic_gb_set_gb2_mask(gb_val); + qlcnic_gb_set_gb3_mask(gb_val); + QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, gb_val); + dev_info(&adapter->pdev->dev, "Pause control frames disabled" + " on all ports\n"); + adapter->need_fw_reset = 1; + + if (qlcnic_api_lock(adapter)) + return; + + state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); + + if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) { + netdev_err(adapter->netdev, "%s: Device is in non-operational state\n", + __func__); + qlcnic_api_unlock(adapter); + + return; + } + + if (state == QLCNIC_DEV_READY) { + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, + QLCNIC_DEV_NEED_RESET); + adapter->flags |= QLCNIC_FW_RESET_OWNER; + QLCDB(adapter, DRV, "NEED_RESET state set\n"); + qlcnic_idc_debug_info(adapter, 0); + } + + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, + QLCNIC_DEV_NPAR_NON_OPER); + qlcnic_api_unlock(adapter); +} + +/* Transit to NPAR READY state from NPAR NOT READY state */ +static void +qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter) +{ + if (qlcnic_api_lock(adapter)) + return; + + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, + QLCNIC_DEV_NPAR_OPER); + QLCDB(adapter, DRV, "NPAR operational state set\n"); + + qlcnic_api_unlock(adapter); +} + +void qlcnic_schedule_work(struct qlcnic_adapter *adapter, + work_func_t func, int delay) +{ + if (test_bit(__QLCNIC_AER, &adapter->state)) + return; + + INIT_DELAYED_WORK(&adapter->fw_work, func); + queue_delayed_work(adapter->qlcnic_wq, &adapter->fw_work, + round_jiffies_relative(delay)); +} + +static void +qlcnic_attach_work(struct work_struct *work) +{ + struct qlcnic_adapter *adapter = container_of(work, + struct qlcnic_adapter, fw_work.work); + struct net_device *netdev = adapter->netdev; + u32 npar_state; + + if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) { + npar_state = QLC_SHARED_REG_RD32(adapter, + QLCNIC_CRB_DEV_NPAR_STATE); + if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO) + qlcnic_clr_all_drv_state(adapter, 0); + else if (npar_state != QLCNIC_DEV_NPAR_OPER) + qlcnic_schedule_work(adapter, qlcnic_attach_work, + FW_POLL_DELAY); + else + goto attach; + QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n"); + return; + } +attach: + qlcnic_dcb_get_info(adapter->dcb); + + if (netif_running(netdev)) { + if (qlcnic_up(adapter, netdev)) + goto done; + + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } + +done: + netif_device_attach(netdev); + adapter->fw_fail_cnt = 0; + adapter->flags &= ~QLCNIC_FW_HANG; + clear_bit(__QLCNIC_RESETTING, &adapter->state); + if (adapter->portnum == 0) + qlcnic_set_drv_version(adapter); + + if (!qlcnic_clr_drv_state(adapter)) + qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, + FW_POLL_DELAY); +} + +static int +qlcnic_check_health(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump; + u32 state = 0, heartbeat; + u32 peg_status; + int err = 0; + + if (qlcnic_check_temp(adapter)) + goto detach; + + if (adapter->need_fw_reset) + qlcnic_dev_request_reset(adapter, 0); + + state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); + if (state == QLCNIC_DEV_NEED_RESET) { + qlcnic_set_npar_non_operational(adapter); + adapter->need_fw_reset = 1; + } else if (state == QLCNIC_DEV_NEED_QUISCENT) + goto detach; + + heartbeat = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); + if (heartbeat != adapter->heartbeat) { + adapter->heartbeat = heartbeat; + adapter->fw_fail_cnt = 0; + if (adapter->need_fw_reset) + goto detach; + + if (ahw->reset_context && qlcnic_auto_fw_reset) + qlcnic_reset_hw_context(adapter); + + return 0; + } + + if (++adapter->fw_fail_cnt < FW_FAIL_THRESH) + return 0; + + adapter->flags |= QLCNIC_FW_HANG; + + qlcnic_dev_request_reset(adapter, 0); + + if (qlcnic_auto_fw_reset) + clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state); + + dev_err(&adapter->pdev->dev, "firmware hang detected\n"); + peg_status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1); + dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n" + "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" + "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" + "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" + "PEG_NET_4_PC: 0x%x\n", + peg_status, + QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2), + QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, &err), + QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, &err), + QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, &err), + QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, &err), + QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, &err)); + if (QLCNIC_FWERROR_CODE(peg_status) == 0x67) + dev_err(&adapter->pdev->dev, + "Firmware aborted with error code 0x00006700. " + "Device is being reset.\n"); +detach: + adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state : + QLCNIC_DEV_NEED_RESET; + + if (qlcnic_auto_fw_reset && !test_and_set_bit(__QLCNIC_RESETTING, + &adapter->state)) { + + qlcnic_schedule_work(adapter, qlcnic_detach_work, 0); + QLCDB(adapter, DRV, "fw recovery scheduled.\n"); + } else if (!qlcnic_auto_fw_reset && fw_dump->enable && + adapter->flags & QLCNIC_FW_RESET_OWNER) { + qlcnic_dump_fw(adapter); + } + + return 1; +} + +void qlcnic_fw_poll_work(struct work_struct *work) +{ + struct qlcnic_adapter *adapter = container_of(work, + struct qlcnic_adapter, fw_work.work); + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) + goto reschedule; + + + if (qlcnic_check_health(adapter)) + return; + + if (adapter->fhash.fnum) + qlcnic_prune_lb_filters(adapter); + +reschedule: + qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); +} + +static int qlcnic_is_first_func(struct pci_dev *pdev) +{ + struct pci_dev *oth_pdev; + int val = pdev->devfn; + + while (val-- > 0) { + oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr + (pdev->bus), pdev->bus->number, + PCI_DEVFN(PCI_SLOT(pdev->devfn), val)); + if (!oth_pdev) + continue; + + if (oth_pdev->current_state != PCI_D3cold) { + pci_dev_put(oth_pdev); + return 0; + } + pci_dev_put(oth_pdev); + } + return 1; +} + +static int qlcnic_attach_func(struct pci_dev *pdev) +{ + int err, first_func; + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + + pdev->error_state = pci_channel_io_normal; + + err = pci_enable_device(pdev); + if (err) + return err; + + pci_set_master(pdev); + pci_restore_state(pdev); + + first_func = qlcnic_is_first_func(pdev); + + if (qlcnic_api_lock(adapter)) + return -EINVAL; + + if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) { + adapter->need_fw_reset = 1; + set_bit(__QLCNIC_START_FW, &adapter->state); + QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, + QLCNIC_DEV_INITIALIZING); + QLCDB(adapter, DRV, "Restarting fw\n"); + } + qlcnic_api_unlock(adapter); + + err = qlcnic_start_firmware(adapter); + if (err) + return err; + + qlcnic_clr_drv_state(adapter); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + err = qlcnic_setup_intr(adapter); + + if (err) { + kfree(adapter->msix_entries); + netdev_err(netdev, "failed to setup interrupt\n"); + return err; + } + + if (netif_running(netdev)) { + err = qlcnic_attach(adapter); + if (err) { + qlcnic_clr_all_drv_state(adapter, 1); + clear_bit(__QLCNIC_AER, &adapter->state); + netif_device_attach(netdev); + return err; + } + + err = qlcnic_up(adapter, netdev); + if (err) + goto done; + + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } + done: + netif_device_attach(netdev); + return err; +} + +static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (state == pci_channel_io_normal) + return PCI_ERS_RESULT_RECOVERED; + + set_bit(__QLCNIC_AER, &adapter->state); + netif_device_detach(netdev); + + cancel_delayed_work_sync(&adapter->fw_work); + + if (netif_running(netdev)) + qlcnic_down(adapter, netdev); + + qlcnic_detach(adapter); + qlcnic_teardown_intr(adapter); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + + pci_save_state(pdev); + pci_disable_device(pdev); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev) +{ + return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT : + PCI_ERS_RESULT_RECOVERED; +} + +static void qlcnic_82xx_io_resume(struct pci_dev *pdev) +{ + u32 state; + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + + pci_cleanup_aer_uncorrect_error_status(pdev); + state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE); + if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER, + &adapter->state)) + qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, + FW_POLL_DELAY); +} + +static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops; + + if (hw_ops->io_error_detected) { + return hw_ops->io_error_detected(pdev, state); + } else { + dev_err(&pdev->dev, "AER error_detected handler not registered.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } +} + +static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops; + + if (hw_ops->io_slot_reset) { + return hw_ops->io_slot_reset(pdev); + } else { + dev_err(&pdev->dev, "AER slot_reset handler not registered.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } +} + +static void qlcnic_io_resume(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops; + + if (hw_ops->io_resume) + hw_ops->io_resume(pdev); + else + dev_err(&pdev->dev, "AER resume handler not registered.\n"); +} + + +static int +qlcnicvf_start_firmware(struct qlcnic_adapter *adapter) +{ + int err; + + err = qlcnic_can_start_firmware(adapter); + if (err) + return err; + + err = qlcnic_check_npar_opertional(adapter); + if (err) + return err; + + err = qlcnic_initialize_nic(adapter); + if (err) + return err; + + qlcnic_check_options(adapter); + + err = qlcnic_set_eswitch_port_config(adapter); + if (err) + return err; + + adapter->need_fw_reset = 0; + + return err; +} + +int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt, + int queue_type) +{ + struct net_device *netdev = adapter->netdev; + u8 max_hw_rings = 0; + char buf[8]; + int cur_rings; + + if (queue_type == QLCNIC_RX_QUEUE) { + max_hw_rings = adapter->max_sds_rings; + cur_rings = adapter->drv_sds_rings; + strcpy(buf, "SDS"); + } else if (queue_type == QLCNIC_TX_QUEUE) { + max_hw_rings = adapter->max_tx_rings; + cur_rings = adapter->drv_tx_rings; + strcpy(buf, "Tx"); + } + + if (!is_power_of_2(ring_cnt)) { + netdev_err(netdev, "%s rings value should be a power of 2\n", + buf); + return -EINVAL; + } + + if (qlcnic_82xx_check(adapter) && (queue_type == QLCNIC_TX_QUEUE) && + !qlcnic_check_multi_tx(adapter)) { + netdev_err(netdev, "No Multi Tx queue support\n"); + return -EINVAL; + } + + if (ring_cnt > num_online_cpus()) { + netdev_err(netdev, + "%s value[%u] should not be higher than, number of online CPUs\n", + buf, num_online_cpus()); + return -EINVAL; + } + + return 0; +} + +int qlcnic_setup_rings(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u8 tx_rings, rx_rings; + int err; + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EBUSY; + + tx_rings = adapter->drv_tss_rings; + rx_rings = adapter->drv_rss_rings; + + netif_device_detach(netdev); + + err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings); + if (err) + goto done; + + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + + qlcnic_detach(adapter); + + if (qlcnic_83xx_check(adapter)) { + qlcnic_83xx_free_mbx_intr(adapter); + qlcnic_83xx_enable_mbx_poll(adapter); + } + + qlcnic_teardown_intr(adapter); + + err = qlcnic_setup_intr(adapter); + if (err) { + kfree(adapter->msix_entries); + netdev_err(netdev, "failed to setup interrupt\n"); + return err; + } + + /* Check if we need to update real_num_{tx|rx}_queues because + * qlcnic_setup_intr() may change Tx/Rx rings size + */ + if ((tx_rings != adapter->drv_tx_rings) || + (rx_rings != adapter->drv_sds_rings)) { + err = qlcnic_set_real_num_queues(adapter, + adapter->drv_tx_rings, + adapter->drv_sds_rings); + if (err) + goto done; + } + + if (qlcnic_83xx_check(adapter)) { + qlcnic_83xx_initialize_nic(adapter, 1); + err = qlcnic_83xx_setup_mbx_intr(adapter); + qlcnic_83xx_disable_mbx_poll(adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "failed to setup mbx interrupt\n"); + goto done; + } + } + + if (netif_running(netdev)) { + err = qlcnic_attach(adapter); + if (err) + goto done; + err = __qlcnic_up(adapter, netdev); + if (err) + goto done; + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } +done: + netif_device_attach(netdev); + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return err; +} + +#ifdef CONFIG_INET + +#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops) + +static void +qlcnic_config_indev_addr(struct qlcnic_adapter *adapter, + struct net_device *dev, unsigned long event) +{ + struct in_device *indev; + + indev = in_dev_get(dev); + if (!indev) + return; + + for_ifa(indev) { + switch (event) { + case NETDEV_UP: + qlcnic_config_ipaddr(adapter, + ifa->ifa_address, QLCNIC_IP_UP); + break; + case NETDEV_DOWN: + qlcnic_config_ipaddr(adapter, + ifa->ifa_address, QLCNIC_IP_DOWN); + break; + default: + break; + } + } endfor_ifa(indev); + + in_dev_put(indev); +} + +void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct net_device *dev; + u16 vid; + + qlcnic_config_indev_addr(adapter, netdev, event); + + rcu_read_lock(); + for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) { + dev = __vlan_find_dev_deep_rcu(netdev, htons(ETH_P_8021Q), vid); + if (!dev) + continue; + qlcnic_config_indev_addr(adapter, dev, event); + } + rcu_read_unlock(); +} + +static int qlcnic_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct qlcnic_adapter *adapter; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + +recheck: + if (dev == NULL) + goto done; + + if (dev->priv_flags & IFF_802_1Q_VLAN) { + dev = vlan_dev_real_dev(dev); + goto recheck; + } + + if (!is_qlcnic_netdev(dev)) + goto done; + + adapter = netdev_priv(dev); + + if (!adapter) + goto done; + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) + goto done; + + qlcnic_config_indev_addr(adapter, dev, event); +done: + return NOTIFY_DONE; +} + +static int +qlcnic_inetaddr_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct qlcnic_adapter *adapter; + struct net_device *dev; + + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + + dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; + +recheck: + if (dev == NULL) + goto done; + + if (dev->priv_flags & IFF_802_1Q_VLAN) { + dev = vlan_dev_real_dev(dev); + goto recheck; + } + + if (!is_qlcnic_netdev(dev)) + goto done; + + adapter = netdev_priv(dev); + + if (!adapter) + goto done; + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) + goto done; + + switch (event) { + case NETDEV_UP: + qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP); + + break; + case NETDEV_DOWN: + qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN); + + break; + default: + break; + } + +done: + return NOTIFY_DONE; +} + +static struct notifier_block qlcnic_netdev_cb = { + .notifier_call = qlcnic_netdev_event, +}; + +static struct notifier_block qlcnic_inetaddr_cb = { + .notifier_call = qlcnic_inetaddr_event, +}; +#else +void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event) +{ } +#endif +static const struct pci_error_handlers qlcnic_err_handler = { + .error_detected = qlcnic_io_error_detected, + .slot_reset = qlcnic_io_slot_reset, + .resume = qlcnic_io_resume, +}; + +static struct pci_driver qlcnic_driver = { + .name = qlcnic_driver_name, + .id_table = qlcnic_pci_tbl, + .probe = qlcnic_probe, + .remove = qlcnic_remove, +#ifdef CONFIG_PM + .suspend = qlcnic_suspend, + .resume = qlcnic_resume, +#endif + .shutdown = qlcnic_shutdown, + .err_handler = &qlcnic_err_handler, +#ifdef CONFIG_QLCNIC_SRIOV + .sriov_configure = qlcnic_pci_sriov_configure, +#endif + +}; + +static int __init qlcnic_init_module(void) +{ + int ret; + + printk(KERN_INFO "%s\n", qlcnic_driver_string); + +#ifdef CONFIG_INET + register_netdevice_notifier(&qlcnic_netdev_cb); + register_inetaddr_notifier(&qlcnic_inetaddr_cb); +#endif + + ret = pci_register_driver(&qlcnic_driver); + if (ret) { +#ifdef CONFIG_INET + unregister_inetaddr_notifier(&qlcnic_inetaddr_cb); + unregister_netdevice_notifier(&qlcnic_netdev_cb); +#endif + } + + return ret; +} + +module_init(qlcnic_init_module); + +static void __exit qlcnic_exit_module(void) +{ + pci_unregister_driver(&qlcnic_driver); + +#ifdef CONFIG_INET + unregister_inetaddr_notifier(&qlcnic_inetaddr_cb); + unregister_netdevice_notifier(&qlcnic_netdev_cb); +#endif +} + +module_exit(qlcnic_exit_module); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c new file mode 100644 index 000000000..332bb8a3f --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -0,0 +1,1414 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include "qlcnic.h" +#include "qlcnic_hdr.h" +#include "qlcnic_83xx_hw.h" +#include "qlcnic_hw.h" + +#include + +#define QLC_83XX_MINIDUMP_FLASH 0x520000 +#define QLC_83XX_OCM_INDEX 3 +#define QLC_83XX_PCI_INDEX 0 +#define QLC_83XX_DMA_ENGINE_INDEX 8 + +static const u32 qlcnic_ms_read_data[] = { + 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC +}; + +#define QLCNIC_DUMP_WCRB BIT_0 +#define QLCNIC_DUMP_RWCRB BIT_1 +#define QLCNIC_DUMP_ANDCRB BIT_2 +#define QLCNIC_DUMP_ORCRB BIT_3 +#define QLCNIC_DUMP_POLLCRB BIT_4 +#define QLCNIC_DUMP_RD_SAVE BIT_5 +#define QLCNIC_DUMP_WRT_SAVED BIT_6 +#define QLCNIC_DUMP_MOD_SAVE_ST BIT_7 +#define QLCNIC_DUMP_SKIP BIT_7 + +#define QLCNIC_DUMP_MASK_MAX 0xff + +struct qlcnic_pex_dma_descriptor { + u32 read_data_size; + u32 dma_desc_cmd; + u32 src_addr_low; + u32 src_addr_high; + u32 dma_bus_addr_low; + u32 dma_bus_addr_high; + u32 rsvd[6]; +} __packed; + +struct qlcnic_common_entry_hdr { + u32 type; + u32 offset; + u32 cap_size; +#if defined(__LITTLE_ENDIAN) + u8 mask; + u8 rsvd[2]; + u8 flags; +#else + u8 flags; + u8 rsvd[2]; + u8 mask; +#endif +} __packed; + +struct __crb { + u32 addr; +#if defined(__LITTLE_ENDIAN) + u8 stride; + u8 rsvd1[3]; +#else + u8 rsvd1[3]; + u8 stride; +#endif + u32 data_size; + u32 no_ops; + u32 rsvd2[4]; +} __packed; + +struct __ctrl { + u32 addr; +#if defined(__LITTLE_ENDIAN) + u8 stride; + u8 index_a; + u16 timeout; +#else + u16 timeout; + u8 index_a; + u8 stride; +#endif + u32 data_size; + u32 no_ops; +#if defined(__LITTLE_ENDIAN) + u8 opcode; + u8 index_v; + u8 shl_val; + u8 shr_val; +#else + u8 shr_val; + u8 shl_val; + u8 index_v; + u8 opcode; +#endif + u32 val1; + u32 val2; + u32 val3; +} __packed; + +struct __cache { + u32 addr; +#if defined(__LITTLE_ENDIAN) + u16 stride; + u16 init_tag_val; +#else + u16 init_tag_val; + u16 stride; +#endif + u32 size; + u32 no_ops; + u32 ctrl_addr; + u32 ctrl_val; + u32 read_addr; +#if defined(__LITTLE_ENDIAN) + u8 read_addr_stride; + u8 read_addr_num; + u8 rsvd1[2]; +#else + u8 rsvd1[2]; + u8 read_addr_num; + u8 read_addr_stride; +#endif +} __packed; + +struct __ocm { + u8 rsvd[8]; + u32 size; + u32 no_ops; + u8 rsvd1[8]; + u32 read_addr; + u32 read_addr_stride; +} __packed; + +struct __mem { + u32 desc_card_addr; + u32 dma_desc_cmd; + u32 start_dma_cmd; + u32 rsvd[3]; + u32 addr; + u32 size; +} __packed; + +struct __mux { + u32 addr; + u8 rsvd[4]; + u32 size; + u32 no_ops; + u32 val; + u32 val_stride; + u32 read_addr; + u8 rsvd2[4]; +} __packed; + +struct __queue { + u32 sel_addr; +#if defined(__LITTLE_ENDIAN) + u16 stride; + u8 rsvd[2]; +#else + u8 rsvd[2]; + u16 stride; +#endif + u32 size; + u32 no_ops; + u8 rsvd2[8]; + u32 read_addr; +#if defined(__LITTLE_ENDIAN) + u8 read_addr_stride; + u8 read_addr_cnt; + u8 rsvd3[2]; +#else + u8 rsvd3[2]; + u8 read_addr_cnt; + u8 read_addr_stride; +#endif +} __packed; + +struct __pollrd { + u32 sel_addr; + u32 read_addr; + u32 sel_val; +#if defined(__LITTLE_ENDIAN) + u16 sel_val_stride; + u16 no_ops; +#else + u16 no_ops; + u16 sel_val_stride; +#endif + u32 poll_wait; + u32 poll_mask; + u32 data_size; + u8 rsvd[4]; +} __packed; + +struct __mux2 { + u32 sel_addr1; + u32 sel_addr2; + u32 sel_val1; + u32 sel_val2; + u32 no_ops; + u32 sel_val_mask; + u32 read_addr; +#if defined(__LITTLE_ENDIAN) + u8 sel_val_stride; + u8 data_size; + u8 rsvd[2]; +#else + u8 rsvd[2]; + u8 data_size; + u8 sel_val_stride; +#endif +} __packed; + +struct __pollrdmwr { + u32 addr1; + u32 addr2; + u32 val1; + u32 val2; + u32 poll_wait; + u32 poll_mask; + u32 mod_mask; + u32 data_size; +} __packed; + +struct qlcnic_dump_entry { + struct qlcnic_common_entry_hdr hdr; + union { + struct __crb crb; + struct __cache cache; + struct __ocm ocm; + struct __mem mem; + struct __mux mux; + struct __queue que; + struct __ctrl ctrl; + struct __pollrdmwr pollrdmwr; + struct __mux2 mux2; + struct __pollrd pollrd; + } region; +} __packed; + +enum qlcnic_minidump_opcode { + QLCNIC_DUMP_NOP = 0, + QLCNIC_DUMP_READ_CRB = 1, + QLCNIC_DUMP_READ_MUX = 2, + QLCNIC_DUMP_QUEUE = 3, + QLCNIC_DUMP_BRD_CONFIG = 4, + QLCNIC_DUMP_READ_OCM = 6, + QLCNIC_DUMP_PEG_REG = 7, + QLCNIC_DUMP_L1_DTAG = 8, + QLCNIC_DUMP_L1_ITAG = 9, + QLCNIC_DUMP_L1_DATA = 11, + QLCNIC_DUMP_L1_INST = 12, + QLCNIC_DUMP_L2_DTAG = 21, + QLCNIC_DUMP_L2_ITAG = 22, + QLCNIC_DUMP_L2_DATA = 23, + QLCNIC_DUMP_L2_INST = 24, + QLCNIC_DUMP_POLL_RD = 35, + QLCNIC_READ_MUX2 = 36, + QLCNIC_READ_POLLRDMWR = 37, + QLCNIC_DUMP_READ_ROM = 71, + QLCNIC_DUMP_READ_MEM = 72, + QLCNIC_DUMP_READ_CTRL = 98, + QLCNIC_DUMP_TLHDR = 99, + QLCNIC_DUMP_RDEND = 255 +}; + +inline u32 qlcnic_82xx_get_saved_state(void *t_hdr, u32 index) +{ + struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr; + + return hdr->saved_state[index]; +} + +inline void qlcnic_82xx_set_saved_state(void *t_hdr, u32 index, + u32 value) +{ + struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr; + + hdr->saved_state[index] = value; +} + +void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump) +{ + struct qlcnic_82xx_dump_template_hdr *hdr; + + hdr = fw_dump->tmpl_hdr; + fw_dump->tmpl_hdr_size = hdr->size; + fw_dump->version = hdr->version; + fw_dump->num_entries = hdr->num_entries; + fw_dump->offset = hdr->offset; + + hdr->drv_cap_mask = hdr->cap_mask; + fw_dump->cap_mask = hdr->cap_mask; + + fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false; +} + +inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index) +{ + struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr; + + return hdr->cap_sizes[index]; +} + +void qlcnic_82xx_set_sys_info(void *t_hdr, int idx, u32 value) +{ + struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr; + + hdr->sys_info[idx] = value; +} + +void qlcnic_82xx_store_cap_mask(void *tmpl_hdr, u32 mask) +{ + struct qlcnic_82xx_dump_template_hdr *hdr = tmpl_hdr; + + hdr->drv_cap_mask = mask; +} + +inline u32 qlcnic_83xx_get_saved_state(void *t_hdr, u32 index) +{ + struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr; + + return hdr->saved_state[index]; +} + +inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index, + u32 value) +{ + struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr; + + hdr->saved_state[index] = value; +} + +#define QLCNIC_TEMPLATE_VERSION (0x20001) + +void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump) +{ + struct qlcnic_83xx_dump_template_hdr *hdr; + + hdr = fw_dump->tmpl_hdr; + fw_dump->tmpl_hdr_size = hdr->size; + fw_dump->version = hdr->version; + fw_dump->num_entries = hdr->num_entries; + fw_dump->offset = hdr->offset; + + hdr->drv_cap_mask = hdr->cap_mask; + fw_dump->cap_mask = hdr->cap_mask; + + fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >= + QLCNIC_TEMPLATE_VERSION; +} + +inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index) +{ + struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr; + + return hdr->cap_sizes[index]; +} + +void qlcnic_83xx_set_sys_info(void *t_hdr, int idx, u32 value) +{ + struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr; + + hdr->sys_info[idx] = value; +} + +void qlcnic_83xx_store_cap_mask(void *tmpl_hdr, u32 mask) +{ + struct qlcnic_83xx_dump_template_hdr *hdr; + + hdr = tmpl_hdr; + hdr->drv_cap_mask = mask; +} + +struct qlcnic_dump_operations { + enum qlcnic_minidump_opcode opcode; + u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *, + __le32 *); +}; + +static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, __le32 *buffer) +{ + int i; + u32 addr, data; + struct __crb *crb = &entry->region.crb; + + addr = crb->addr; + + for (i = 0; i < crb->no_ops; i++) { + data = qlcnic_ind_rd(adapter, addr); + *buffer++ = cpu_to_le32(addr); + *buffer++ = cpu_to_le32(data); + addr += crb->stride; + } + return crb->no_ops * 2 * sizeof(u32); +} + +static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, __le32 *buffer) +{ + void *hdr = adapter->ahw->fw_dump.tmpl_hdr; + struct __ctrl *ctr = &entry->region.ctrl; + int i, k, timeout = 0; + u32 addr, data, temp; + u8 no_ops; + + addr = ctr->addr; + no_ops = ctr->no_ops; + + for (i = 0; i < no_ops; i++) { + k = 0; + for (k = 0; k < 8; k++) { + if (!(ctr->opcode & (1 << k))) + continue; + switch (1 << k) { + case QLCNIC_DUMP_WCRB: + qlcnic_ind_wr(adapter, addr, ctr->val1); + break; + case QLCNIC_DUMP_RWCRB: + data = qlcnic_ind_rd(adapter, addr); + qlcnic_ind_wr(adapter, addr, data); + break; + case QLCNIC_DUMP_ANDCRB: + data = qlcnic_ind_rd(adapter, addr); + qlcnic_ind_wr(adapter, addr, + (data & ctr->val2)); + break; + case QLCNIC_DUMP_ORCRB: + data = qlcnic_ind_rd(adapter, addr); + qlcnic_ind_wr(adapter, addr, + (data | ctr->val3)); + break; + case QLCNIC_DUMP_POLLCRB: + while (timeout <= ctr->timeout) { + data = qlcnic_ind_rd(adapter, addr); + if ((data & ctr->val2) == ctr->val1) + break; + usleep_range(1000, 2000); + timeout++; + } + if (timeout > ctr->timeout) { + dev_info(&adapter->pdev->dev, + "Timed out, aborting poll CRB\n"); + return -EINVAL; + } + break; + case QLCNIC_DUMP_RD_SAVE: + temp = ctr->index_a; + if (temp) + addr = qlcnic_get_saved_state(adapter, + hdr, + temp); + data = qlcnic_ind_rd(adapter, addr); + qlcnic_set_saved_state(adapter, hdr, + ctr->index_v, data); + break; + case QLCNIC_DUMP_WRT_SAVED: + temp = ctr->index_v; + if (temp) + data = qlcnic_get_saved_state(adapter, + hdr, + temp); + else + data = ctr->val1; + + temp = ctr->index_a; + if (temp) + addr = qlcnic_get_saved_state(adapter, + hdr, + temp); + qlcnic_ind_wr(adapter, addr, data); + break; + case QLCNIC_DUMP_MOD_SAVE_ST: + data = qlcnic_get_saved_state(adapter, hdr, + ctr->index_v); + data <<= ctr->shl_val; + data >>= ctr->shr_val; + if (ctr->val2) + data &= ctr->val2; + data |= ctr->val3; + data += ctr->val1; + qlcnic_set_saved_state(adapter, hdr, + ctr->index_v, data); + break; + default: + dev_info(&adapter->pdev->dev, + "Unknown opcode\n"); + break; + } + } + addr += ctr->stride; + } + return 0; +} + +static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, __le32 *buffer) +{ + int loop; + u32 val, data = 0; + struct __mux *mux = &entry->region.mux; + + val = mux->val; + for (loop = 0; loop < mux->no_ops; loop++) { + qlcnic_ind_wr(adapter, mux->addr, val); + data = qlcnic_ind_rd(adapter, mux->read_addr); + *buffer++ = cpu_to_le32(val); + *buffer++ = cpu_to_le32(data); + val += mux->val_stride; + } + return 2 * mux->no_ops * sizeof(u32); +} + +static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, __le32 *buffer) +{ + int i, loop; + u32 cnt, addr, data, que_id = 0; + struct __queue *que = &entry->region.que; + + addr = que->read_addr; + cnt = que->read_addr_cnt; + + for (loop = 0; loop < que->no_ops; loop++) { + qlcnic_ind_wr(adapter, que->sel_addr, que_id); + addr = que->read_addr; + for (i = 0; i < cnt; i++) { + data = qlcnic_ind_rd(adapter, addr); + *buffer++ = cpu_to_le32(data); + addr += que->read_addr_stride; + } + que_id += que->stride; + } + return que->no_ops * cnt * sizeof(u32); +} + +static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, __le32 *buffer) +{ + int i; + u32 data; + void __iomem *addr; + struct __ocm *ocm = &entry->region.ocm; + + addr = adapter->ahw->pci_base0 + ocm->read_addr; + for (i = 0; i < ocm->no_ops; i++) { + data = readl(addr); + *buffer++ = cpu_to_le32(data); + addr += ocm->read_addr_stride; + } + return ocm->no_ops * sizeof(u32); +} + +static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, __le32 *buffer) +{ + int i, count = 0; + u32 fl_addr, size, val, lck_val, addr; + struct __mem *rom = &entry->region.mem; + + fl_addr = rom->addr; + size = rom->size / 4; +lock_try: + lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK); + if (!lck_val && count < MAX_CTL_CHECK) { + usleep_range(10000, 11000); + count++; + goto lock_try; + } + QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, + adapter->ahw->pci_func); + for (i = 0; i < size; i++) { + addr = fl_addr & 0xFFFF0000; + qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr); + addr = LSW(fl_addr) + FLASH_ROM_DATA; + val = qlcnic_ind_rd(adapter, addr); + fl_addr += 4; + *buffer++ = cpu_to_le32(val); + } + QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK); + return rom->size; +} + +static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, __le32 *buffer) +{ + int i; + u32 cnt, val, data, addr; + struct __cache *l1 = &entry->region.cache; + + val = l1->init_tag_val; + + for (i = 0; i < l1->no_ops; i++) { + qlcnic_ind_wr(adapter, l1->addr, val); + qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val)); + addr = l1->read_addr; + cnt = l1->read_addr_num; + while (cnt) { + data = qlcnic_ind_rd(adapter, addr); + *buffer++ = cpu_to_le32(data); + addr += l1->read_addr_stride; + cnt--; + } + val += l1->stride; + } + return l1->no_ops * l1->read_addr_num * sizeof(u32); +} + +static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, __le32 *buffer) +{ + int i; + u32 cnt, val, data, addr; + u8 poll_mask, poll_to, time_out = 0; + struct __cache *l2 = &entry->region.cache; + + val = l2->init_tag_val; + poll_mask = LSB(MSW(l2->ctrl_val)); + poll_to = MSB(MSW(l2->ctrl_val)); + + for (i = 0; i < l2->no_ops; i++) { + qlcnic_ind_wr(adapter, l2->addr, val); + if (LSW(l2->ctrl_val)) + qlcnic_ind_wr(adapter, l2->ctrl_addr, + LSW(l2->ctrl_val)); + if (!poll_mask) + goto skip_poll; + do { + data = qlcnic_ind_rd(adapter, l2->ctrl_addr); + if (!(data & poll_mask)) + break; + usleep_range(1000, 2000); + time_out++; + } while (time_out <= poll_to); + + if (time_out > poll_to) { + dev_err(&adapter->pdev->dev, + "Timeout exceeded in %s, aborting dump\n", + __func__); + return -EINVAL; + } +skip_poll: + addr = l2->read_addr; + cnt = l2->read_addr_num; + while (cnt) { + data = qlcnic_ind_rd(adapter, addr); + *buffer++ = cpu_to_le32(data); + addr += l2->read_addr_stride; + cnt--; + } + val += l2->stride; + } + return l2->no_ops * l2->read_addr_num * sizeof(u32); +} + +static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter, + struct __mem *mem, __le32 *buffer, + int *ret) +{ + u32 addr, data, test; + int i, reg_read; + + reg_read = mem->size; + addr = mem->addr; + /* check for data size of multiple of 16 and 16 byte alignment */ + if ((addr & 0xf) || (reg_read%16)) { + dev_info(&adapter->pdev->dev, + "Unaligned memory addr:0x%x size:0x%x\n", + addr, reg_read); + *ret = -EINVAL; + return 0; + } + + mutex_lock(&adapter->ahw->mem_lock); + + while (reg_read != 0) { + qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr); + qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0); + qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE); + + for (i = 0; i < MAX_CTL_CHECK; i++) { + test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL); + if (!(test & TA_CTL_BUSY)) + break; + } + if (i == MAX_CTL_CHECK) { + if (printk_ratelimit()) { + dev_err(&adapter->pdev->dev, + "failed to read through agent\n"); + *ret = -EIO; + goto out; + } + } + for (i = 0; i < 4; i++) { + data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]); + *buffer++ = cpu_to_le32(data); + } + addr += 16; + reg_read -= 16; + ret += 16; + } +out: + mutex_unlock(&adapter->ahw->mem_lock); + return mem->size; +} + +/* DMA register base address */ +#define QLC_DMA_REG_BASE_ADDR(dma_no) (0x77320000 + (dma_no * 0x10000)) + +/* DMA register offsets w.r.t base address */ +#define QLC_DMA_CMD_BUFF_ADDR_LOW 0 +#define QLC_DMA_CMD_BUFF_ADDR_HI 4 +#define QLC_DMA_CMD_STATUS_CTRL 8 + +static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter, + struct __mem *mem) +{ + struct device *dev = &adapter->pdev->dev; + u32 dma_no, dma_base_addr, temp_addr; + int i, ret, dma_sts; + void *tmpl_hdr; + + tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr; + dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr, + QLC_83XX_DMA_ENGINE_INDEX); + dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no); + + temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW; + ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr); + if (ret) + return ret; + + temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI; + ret = qlcnic_ind_wr(adapter, temp_addr, 0); + if (ret) + return ret; + + temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL; + ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd); + if (ret) + return ret; + + /* Wait for DMA to complete */ + temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL; + for (i = 0; i < 400; i++) { + dma_sts = qlcnic_ind_rd(adapter, temp_addr); + + if (dma_sts & BIT_1) + usleep_range(250, 500); + else + break; + } + + if (i >= 400) { + dev_info(dev, "PEX DMA operation timed out"); + ret = -EIO; + } + + return ret; +} + +static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter, + struct __mem *mem, + __le32 *buffer, int *ret) +{ + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + u32 temp, dma_base_addr, size = 0, read_size = 0; + struct qlcnic_pex_dma_descriptor *dma_descr; + struct device *dev = &adapter->pdev->dev; + dma_addr_t dma_phys_addr; + void *dma_buffer; + void *tmpl_hdr; + + tmpl_hdr = fw_dump->tmpl_hdr; + + /* Check if DMA engine is available */ + temp = qlcnic_get_saved_state(adapter, tmpl_hdr, + QLC_83XX_DMA_ENGINE_INDEX); + dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp); + temp = qlcnic_ind_rd(adapter, + dma_base_addr + QLC_DMA_CMD_STATUS_CTRL); + + if (!(temp & BIT_31)) { + dev_info(dev, "%s: DMA engine is not available\n", __func__); + *ret = -EIO; + return 0; + } + + /* Create DMA descriptor */ + dma_descr = kzalloc(sizeof(struct qlcnic_pex_dma_descriptor), + GFP_KERNEL); + if (!dma_descr) { + *ret = -ENOMEM; + return 0; + } + + /* dma_desc_cmd 0:15 = 0 + * dma_desc_cmd 16:19 = mem->dma_desc_cmd 0:3 + * dma_desc_cmd 20:23 = pci function number + * dma_desc_cmd 24:31 = mem->dma_desc_cmd 8:15 + */ + dma_phys_addr = fw_dump->phys_addr; + dma_buffer = fw_dump->dma_buffer; + temp = 0; + temp = mem->dma_desc_cmd & 0xff0f; + temp |= (adapter->ahw->pci_func & 0xf) << 4; + dma_descr->dma_desc_cmd = (temp << 16) & 0xffff0000; + dma_descr->dma_bus_addr_low = LSD(dma_phys_addr); + dma_descr->dma_bus_addr_high = MSD(dma_phys_addr); + dma_descr->src_addr_high = 0; + + /* Collect memory dump using multiple DMA operations if required */ + while (read_size < mem->size) { + if (mem->size - read_size >= QLC_PEX_DMA_READ_SIZE) + size = QLC_PEX_DMA_READ_SIZE; + else + size = mem->size - read_size; + + dma_descr->src_addr_low = mem->addr + read_size; + dma_descr->read_data_size = size; + + /* Write DMA descriptor to MS memory*/ + temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16; + *ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr, + (u32 *)dma_descr, temp); + if (*ret) { + dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n", + mem->desc_card_addr); + goto free_dma_descr; + } + + *ret = qlcnic_start_pex_dma(adapter, mem); + if (*ret) { + dev_info(dev, "Failed to start PEX DMA operation\n"); + goto free_dma_descr; + } + + memcpy(buffer, dma_buffer, size); + buffer += size / 4; + read_size += size; + } + +free_dma_descr: + kfree(dma_descr); + + return read_size; +} + +static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, __le32 *buffer) +{ + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + struct device *dev = &adapter->pdev->dev; + struct __mem *mem = &entry->region.mem; + u32 data_size; + int ret = 0; + + if (fw_dump->use_pex_dma) { + data_size = qlcnic_read_memory_pexdma(adapter, mem, buffer, + &ret); + if (ret) + dev_info(dev, + "Failed to read memory dump using PEX DMA: mask[0x%x]\n", + entry->hdr.mask); + else + return data_size; + } + + data_size = qlcnic_read_memory_test_agent(adapter, mem, buffer, &ret); + if (ret) { + dev_info(dev, + "Failed to read memory dump using test agent method: mask[0x%x]\n", + entry->hdr.mask); + return 0; + } else { + return data_size; + } +} + +static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, __le32 *buffer) +{ + entry->hdr.flags |= QLCNIC_DUMP_SKIP; + return 0; +} + +static int qlcnic_valid_dump_entry(struct device *dev, + struct qlcnic_dump_entry *entry, u32 size) +{ + int ret = 1; + if (size != entry->hdr.cap_size) { + dev_err(dev, + "Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n", + entry->hdr.type, entry->hdr.mask, size, + entry->hdr.cap_size); + ret = 0; + } + return ret; +} + +static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, + __le32 *buffer) +{ + struct __pollrdmwr *poll = &entry->region.pollrdmwr; + u32 data, wait_count, poll_wait, temp; + + poll_wait = poll->poll_wait; + + qlcnic_ind_wr(adapter, poll->addr1, poll->val1); + wait_count = 0; + + while (wait_count < poll_wait) { + data = qlcnic_ind_rd(adapter, poll->addr1); + if ((data & poll->poll_mask) != 0) + break; + wait_count++; + } + + if (wait_count == poll_wait) { + dev_err(&adapter->pdev->dev, + "Timeout exceeded in %s, aborting dump\n", + __func__); + return 0; + } + + data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask; + qlcnic_ind_wr(adapter, poll->addr2, data); + qlcnic_ind_wr(adapter, poll->addr1, poll->val2); + wait_count = 0; + + while (wait_count < poll_wait) { + temp = qlcnic_ind_rd(adapter, poll->addr1); + if ((temp & poll->poll_mask) != 0) + break; + wait_count++; + } + + *buffer++ = cpu_to_le32(poll->addr2); + *buffer++ = cpu_to_le32(data); + + return 2 * sizeof(u32); + +} + +static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, __le32 *buffer) +{ + struct __pollrd *pollrd = &entry->region.pollrd; + u32 data, wait_count, poll_wait, sel_val; + int i; + + poll_wait = pollrd->poll_wait; + sel_val = pollrd->sel_val; + + for (i = 0; i < pollrd->no_ops; i++) { + qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val); + wait_count = 0; + while (wait_count < poll_wait) { + data = qlcnic_ind_rd(adapter, pollrd->sel_addr); + if ((data & pollrd->poll_mask) != 0) + break; + wait_count++; + } + + if (wait_count == poll_wait) { + dev_err(&adapter->pdev->dev, + "Timeout exceeded in %s, aborting dump\n", + __func__); + return 0; + } + + data = qlcnic_ind_rd(adapter, pollrd->read_addr); + *buffer++ = cpu_to_le32(sel_val); + *buffer++ = cpu_to_le32(data); + sel_val += pollrd->sel_val_stride; + } + return pollrd->no_ops * (2 * sizeof(u32)); +} + +static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, __le32 *buffer) +{ + struct __mux2 *mux2 = &entry->region.mux2; + u32 data; + u32 t_sel_val, sel_val1, sel_val2; + int i; + + sel_val1 = mux2->sel_val1; + sel_val2 = mux2->sel_val2; + + for (i = 0; i < mux2->no_ops; i++) { + qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1); + t_sel_val = sel_val1 & mux2->sel_val_mask; + qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val); + data = qlcnic_ind_rd(adapter, mux2->read_addr); + *buffer++ = cpu_to_le32(t_sel_val); + *buffer++ = cpu_to_le32(data); + qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2); + t_sel_val = sel_val2 & mux2->sel_val_mask; + qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val); + data = qlcnic_ind_rd(adapter, mux2->read_addr); + *buffer++ = cpu_to_le32(t_sel_val); + *buffer++ = cpu_to_le32(data); + sel_val1 += mux2->sel_val_stride; + sel_val2 += mux2->sel_val_stride; + } + + return mux2->no_ops * (4 * sizeof(u32)); +} + +static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter, + struct qlcnic_dump_entry *entry, __le32 *buffer) +{ + u32 fl_addr, size; + struct __mem *rom = &entry->region.mem; + + fl_addr = rom->addr; + size = rom->size / 4; + + if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr, + (u8 *)buffer, size)) + return rom->size; + + return 0; +} + +static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = { + {QLCNIC_DUMP_NOP, qlcnic_dump_nop}, + {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb}, + {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux}, + {QLCNIC_DUMP_QUEUE, qlcnic_dump_que}, + {QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom}, + {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm}, + {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl}, + {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache}, + {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache}, + {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache}, + {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache}, + {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache}, + {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache}, + {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache}, + {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache}, + {QLCNIC_DUMP_READ_ROM, qlcnic_read_rom}, + {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory}, + {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl}, + {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop}, + {QLCNIC_DUMP_RDEND, qlcnic_dump_nop}, +}; + +static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = { + {QLCNIC_DUMP_NOP, qlcnic_dump_nop}, + {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb}, + {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux}, + {QLCNIC_DUMP_QUEUE, qlcnic_dump_que}, + {QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom}, + {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm}, + {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl}, + {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache}, + {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache}, + {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache}, + {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache}, + {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache}, + {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache}, + {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache}, + {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache}, + {QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd}, + {QLCNIC_READ_MUX2, qlcnic_read_mux2}, + {QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr}, + {QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom}, + {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory}, + {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl}, + {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop}, + {QLCNIC_DUMP_RDEND, qlcnic_dump_nop}, +}; + +static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size) +{ + uint64_t sum = 0; + int count = temp_size / sizeof(uint32_t); + while (count-- > 0) + sum += *temp_buffer++; + while (sum >> 32) + sum = (sum & 0xFFFFFFFF) + (sum >> 32); + return ~sum; +} + +static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter, + u8 *buffer, u32 size) +{ + int ret = 0; + + if (qlcnic_82xx_check(adapter)) + return -EIO; + + if (qlcnic_83xx_lock_flash(adapter)) + return -EIO; + + ret = qlcnic_83xx_lockless_flash_read32(adapter, + QLC_83XX_MINIDUMP_FLASH, + buffer, size / sizeof(u32)); + + qlcnic_83xx_unlock_flash(adapter); + + return ret; +} + +static int +qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_83xx_dump_template_hdr tmp_hdr; + u32 size = sizeof(tmp_hdr) / sizeof(u32); + int ret = 0; + + if (qlcnic_82xx_check(adapter)) + return -EIO; + + if (qlcnic_83xx_lock_flash(adapter)) + return -EIO; + + ret = qlcnic_83xx_lockless_flash_read32(adapter, + QLC_83XX_MINIDUMP_FLASH, + (u8 *)&tmp_hdr, size); + + qlcnic_83xx_unlock_flash(adapter); + + cmd->rsp.arg[2] = tmp_hdr.size; + cmd->rsp.arg[3] = tmp_hdr.version; + + return ret; +} + +static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter, + u32 *version, u32 *temp_size, + u8 *use_flash_temp) +{ + int err = 0; + struct qlcnic_cmd_args cmd; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE)) + return -ENOMEM; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err != QLCNIC_RCODE_SUCCESS) { + if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) { + qlcnic_free_mbx_args(&cmd); + return -EIO; + } + *use_flash_temp = 1; + } + + *temp_size = cmd.rsp.arg[2]; + *version = cmd.rsp.arg[3]; + qlcnic_free_mbx_args(&cmd); + + if (!(*temp_size)) + return -EIO; + + return 0; +} + +static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter, + u32 *buffer, u32 temp_size) +{ + int err = 0, i; + void *tmp_addr; + __le32 *tmp_buf; + struct qlcnic_cmd_args cmd; + dma_addr_t tmp_addr_t = 0; + + tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size, + &tmp_addr_t, GFP_KERNEL); + if (!tmp_addr) + return -ENOMEM; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) { + err = -ENOMEM; + goto free_mem; + } + + cmd.req.arg[1] = LSD(tmp_addr_t); + cmd.req.arg[2] = MSD(tmp_addr_t); + cmd.req.arg[3] = temp_size; + err = qlcnic_issue_cmd(adapter, &cmd); + + tmp_buf = tmp_addr; + if (err == QLCNIC_RCODE_SUCCESS) { + for (i = 0; i < temp_size / sizeof(u32); i++) + *buffer++ = __le32_to_cpu(*tmp_buf++); + } + + qlcnic_free_mbx_args(&cmd); + +free_mem: + dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t); + + return err; +} + +int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw; + struct qlcnic_fw_dump *fw_dump; + u32 version, csum, *tmp_buf; + u8 use_flash_temp = 0; + u32 temp_size = 0; + void *temp_buffer; + int err; + + ahw = adapter->ahw; + fw_dump = &ahw->fw_dump; + err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size, + &use_flash_temp); + if (err) { + dev_err(&adapter->pdev->dev, + "Can't get template size %d\n", err); + return -EIO; + } + + fw_dump->tmpl_hdr = vzalloc(temp_size); + if (!fw_dump->tmpl_hdr) + return -ENOMEM; + + tmp_buf = (u32 *)fw_dump->tmpl_hdr; + if (use_flash_temp) + goto flash_temp; + + err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size); + + if (err) { +flash_temp: + err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf, + temp_size); + + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to get minidump template header %d\n", + err); + vfree(fw_dump->tmpl_hdr); + fw_dump->tmpl_hdr = NULL; + return -EIO; + } + } + + csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size); + + if (csum) { + dev_err(&adapter->pdev->dev, + "Template header checksum validation failed\n"); + vfree(fw_dump->tmpl_hdr); + fw_dump->tmpl_hdr = NULL; + return -EIO; + } + + qlcnic_cache_tmpl_hdr_values(adapter, fw_dump); + + if (fw_dump->use_pex_dma) { + fw_dump->dma_buffer = NULL; + temp_buffer = dma_alloc_coherent(&adapter->pdev->dev, + QLC_PEX_DMA_READ_SIZE, + &fw_dump->phys_addr, + GFP_KERNEL); + if (!temp_buffer) + fw_dump->use_pex_dma = false; + else + fw_dump->dma_buffer = temp_buffer; + } + + + dev_info(&adapter->pdev->dev, + "Default minidump capture mask 0x%x\n", + fw_dump->cap_mask); + + qlcnic_enable_fw_dump_state(adapter); + + return 0; +} + +int qlcnic_dump_fw(struct qlcnic_adapter *adapter) +{ + struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; + static const struct qlcnic_dump_operations *fw_dump_ops; + struct qlcnic_83xx_dump_template_hdr *hdr_83xx; + u32 entry_offset, dump, no_entries, buf_offset = 0; + int i, k, ops_cnt, ops_index, dump_size = 0; + struct device *dev = &adapter->pdev->dev; + struct qlcnic_hardware_context *ahw; + struct qlcnic_dump_entry *entry; + void *tmpl_hdr; + u32 ocm_window; + __le32 *buffer; + char mesg[64]; + char *msg[] = {mesg, NULL}; + + ahw = adapter->ahw; + tmpl_hdr = fw_dump->tmpl_hdr; + + /* Return if we don't have firmware dump template header */ + if (!tmpl_hdr) + return -EIO; + + if (!qlcnic_check_fw_dump_state(adapter)) { + dev_info(&adapter->pdev->dev, "Dump not enabled\n"); + return -EIO; + } + + if (fw_dump->clr) { + dev_info(&adapter->pdev->dev, + "Previous dump not cleared, not capturing dump\n"); + return -EIO; + } + + netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n"); + /* Calculate the size for dump data area only */ + for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++) + if (i & fw_dump->cap_mask) + dump_size += qlcnic_get_cap_size(adapter, tmpl_hdr, k); + + if (!dump_size) + return -EIO; + + fw_dump->data = vzalloc(dump_size); + if (!fw_dump->data) + return -ENOMEM; + + buffer = fw_dump->data; + fw_dump->size = dump_size; + no_entries = fw_dump->num_entries; + entry_offset = fw_dump->offset; + qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION); + qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version); + + if (qlcnic_82xx_check(adapter)) { + ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops); + fw_dump_ops = qlcnic_fw_dump_ops; + } else { + hdr_83xx = tmpl_hdr; + ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops); + fw_dump_ops = qlcnic_83xx_fw_dump_ops; + ocm_window = hdr_83xx->ocm_wnd_reg[ahw->pci_func]; + hdr_83xx->saved_state[QLC_83XX_OCM_INDEX] = ocm_window; + hdr_83xx->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func; + } + + for (i = 0; i < no_entries; i++) { + entry = tmpl_hdr + entry_offset; + if (!(entry->hdr.mask & fw_dump->cap_mask)) { + entry->hdr.flags |= QLCNIC_DUMP_SKIP; + entry_offset += entry->hdr.offset; + continue; + } + + /* Find the handler for this entry */ + ops_index = 0; + while (ops_index < ops_cnt) { + if (entry->hdr.type == fw_dump_ops[ops_index].opcode) + break; + ops_index++; + } + + if (ops_index == ops_cnt) { + dev_info(dev, "Skipping unknown entry opcode %d\n", + entry->hdr.type); + entry->hdr.flags |= QLCNIC_DUMP_SKIP; + entry_offset += entry->hdr.offset; + continue; + } + + /* Collect dump for this entry */ + dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer); + if (!qlcnic_valid_dump_entry(dev, entry, dump)) { + entry->hdr.flags |= QLCNIC_DUMP_SKIP; + entry_offset += entry->hdr.offset; + continue; + } + + buf_offset += entry->hdr.cap_size; + entry_offset += entry->hdr.offset; + buffer = fw_dump->data + buf_offset; + } + + fw_dump->clr = 1; + snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name); + netdev_info(adapter->netdev, + "Dump data %d bytes captured, template header size %d bytes\n", + fw_dump->size, fw_dump->tmpl_hdr_size); + /* Send a udev event to notify availability of FW dump */ + kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg); + + return 0; +} + +void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) +{ + u32 prev_version, current_version; + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump; + struct pci_dev *pdev = adapter->pdev; + + prev_version = adapter->fw_version; + current_version = qlcnic_83xx_get_fw_version(adapter); + + if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) { + vfree(fw_dump->tmpl_hdr); + if (!qlcnic_fw_cmd_get_minidump_temp(adapter)) + dev_info(&pdev->dev, "Supports FW dump capability\n"); + } +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h new file mode 100644 index 000000000..4677b2edc --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h @@ -0,0 +1,276 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#ifndef _QLCNIC_83XX_SRIOV_H_ +#define _QLCNIC_83XX_SRIOV_H_ + +#include "qlcnic.h" +#include +#include + +extern const u32 qlcnic_83xx_reg_tbl[]; +extern const u32 qlcnic_83xx_ext_reg_tbl[]; + +struct qlcnic_bc_payload { + u64 payload[126]; +}; + +struct qlcnic_bc_hdr { +#if defined(__LITTLE_ENDIAN) + u8 version; + u8 msg_type:4; + u8 rsvd1:3; + u8 op_type:1; + u8 num_cmds; + u8 num_frags; + u8 frag_num; + u8 cmd_op; + u16 seq_id; + u64 rsvd3; +#elif defined(__BIG_ENDIAN) + u8 num_frags; + u8 num_cmds; + u8 op_type:1; + u8 rsvd1:3; + u8 msg_type:4; + u8 version; + u16 seq_id; + u8 cmd_op; + u8 frag_num; + u64 rsvd3; +#endif +}; + +enum qlcnic_bc_commands { + QLCNIC_BC_CMD_CHANNEL_INIT = 0x0, + QLCNIC_BC_CMD_CHANNEL_TERM = 0x1, + QLCNIC_BC_CMD_GET_ACL = 0x2, + QLCNIC_BC_CMD_CFG_GUEST_VLAN = 0x3, +}; + +#define QLCNIC_83XX_SRIOV_VF_MAX_MAC 2 +#define QLC_BC_CMD 1 + +struct qlcnic_trans_list { + /* Lock for manipulating list */ + spinlock_t lock; + struct list_head wait_list; + int count; +}; + +enum qlcnic_trans_state { + QLC_INIT = 0, + QLC_WAIT_FOR_CHANNEL_FREE, + QLC_WAIT_FOR_RESP, + QLC_ABORT, + QLC_END, +}; + +struct qlcnic_bc_trans { + u8 func_id; + u8 active; + u8 curr_rsp_frag; + u8 curr_req_frag; + u16 cmd_id; + u16 req_pay_size; + u16 rsp_pay_size; + u32 trans_id; + enum qlcnic_trans_state trans_state; + struct list_head list; + struct qlcnic_bc_hdr *req_hdr; + struct qlcnic_bc_hdr *rsp_hdr; + struct qlcnic_bc_payload *req_pay; + struct qlcnic_bc_payload *rsp_pay; + struct completion resp_cmpl; + struct qlcnic_vf_info *vf; +}; + +enum qlcnic_vf_state { + QLC_BC_VF_SEND = 0, + QLC_BC_VF_RECV, + QLC_BC_VF_CHANNEL, + QLC_BC_VF_STATE, + QLC_BC_VF_FLR, + QLC_BC_VF_SOFT_FLR, +}; + +enum qlcnic_vlan_mode { + QLC_NO_VLAN_MODE = 0, + QLC_PVID_MODE, + QLC_GUEST_VLAN_MODE, +}; + +struct qlcnic_resources { + u16 num_tx_mac_filters; + u16 num_rx_ucast_mac_filters; + u16 num_rx_mcast_mac_filters; + + u16 num_txvlan_keys; + + u16 num_rx_queues; + u16 num_tx_queues; + + u16 num_rx_buf_rings; + u16 num_rx_status_rings; + + u16 num_destip; + u32 num_lro_flows_supported; + u16 max_local_ipv6_addrs; + u16 max_remote_ipv6_addrs; +}; + +struct qlcnic_vport { + u16 handle; + u16 max_tx_bw; + u16 min_tx_bw; + u16 pvid; + u8 vlan_mode; + u8 qos; + bool spoofchk; + u8 mac[6]; +}; + +struct qlcnic_vf_info { + u8 pci_func; + u16 rx_ctx_id; + u16 tx_ctx_id; + u16 *sriov_vlans; + int num_vlan; + unsigned long state; + struct completion ch_free_cmpl; + struct work_struct trans_work; + struct work_struct flr_work; + /* It synchronizes commands sent from VF */ + struct mutex send_cmd_lock; + struct qlcnic_bc_trans *send_cmd; + struct qlcnic_bc_trans *flr_trans; + struct qlcnic_trans_list rcv_act; + struct qlcnic_trans_list rcv_pend; + struct qlcnic_adapter *adapter; + struct qlcnic_vport *vp; + spinlock_t vlan_list_lock; /* Lock for VLAN list */ +}; + +struct qlcnic_async_work_list { + struct list_head list; + struct work_struct work; + void *ptr; + struct qlcnic_cmd_args *cmd; +}; + +struct qlcnic_back_channel { + u16 trans_counter; + struct workqueue_struct *bc_trans_wq; + struct workqueue_struct *bc_async_wq; + struct workqueue_struct *bc_flr_wq; + struct list_head async_list; +}; + +struct qlcnic_sriov { + u16 vp_handle; + u8 num_vfs; + u8 any_vlan; + u8 vlan_mode; + u16 num_allowed_vlans; + u16 *allowed_vlans; + u16 vlan; + struct qlcnic_resources ff_max; + struct qlcnic_back_channel bc; + struct qlcnic_vf_info *vf_info; +}; + +int qlcnic_sriov_init(struct qlcnic_adapter *, int); +void qlcnic_sriov_cleanup(struct qlcnic_adapter *); +void __qlcnic_sriov_cleanup(struct qlcnic_adapter *); +void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *); +int qlcnic_sriov_vf_init(struct qlcnic_adapter *, int); +void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *); +int qlcnic_sriov_func_to_index(struct qlcnic_adapter *, u8); +void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *, u32); +int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *, u8); +void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *); +void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *); +int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *, struct qlcnic_vf_info *, + struct qlcnic_bc_trans *); +int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *, + struct qlcnic_info *, u16); +int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8); +void qlcnic_sriov_free_vlans(struct qlcnic_adapter *); +void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *); +bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *); +void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *, + struct qlcnic_vf_info *, u16); +void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *, + struct qlcnic_vf_info *, u16); + +static inline bool qlcnic_sriov_enable_check(struct qlcnic_adapter *adapter) +{ + return test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state) ? true : false; +} + +#ifdef CONFIG_QLCNIC_SRIOV +void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *, + struct qlcnic_bc_trans *, + struct qlcnic_cmd_args *); +void qlcnic_sriov_pf_disable(struct qlcnic_adapter *); +void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *); +int qlcnic_pci_sriov_configure(struct pci_dev *, int); +void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *, u32 *); +void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *, u32 *); +void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *, u32 *); +void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *, u32 *); +void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *, u32 *); +void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *, u32 *); +void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *, u32 *); +void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *, struct qlcnic_vf_info *); +bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *, + struct qlcnic_bc_trans *, + struct qlcnic_vf_info *); +void qlcnic_sriov_pf_reset(struct qlcnic_adapter *); +int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *); +int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *); +int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int, int); +int qlcnic_sriov_get_vf_config(struct net_device *, int , + struct ifla_vf_info *); +int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8); +int qlcnic_sriov_set_vf_spoofchk(struct net_device *, int, bool); +#else +static inline void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) {} +static inline void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter) {} +static inline void +qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter, + u32 *int_id) {} +static inline void +qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter, + u32 *int_id) {} +static inline void +qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter, + u32 *int_id) {} +static inline void +qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter, + u32 *int_id) {} +static inline void +qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter, u32 *int_id) +{} +static inline void +qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter, u32 *int_id) +{} +static inline void +qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter, u32 *int_id) +{} +static inline void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf) {} +static inline bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *adapter, + struct qlcnic_bc_trans *trans, + struct qlcnic_vf_info *vf) +{ return false; } +static inline void qlcnic_sriov_pf_reset(struct qlcnic_adapter *adapter) {} +static inline int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *adapter) +{ return 0; } +#endif + +#endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c new file mode 100644 index 000000000..e6312465f --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -0,0 +1,2214 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include "qlcnic_sriov.h" +#include "qlcnic.h" +#include "qlcnic_83xx_hw.h" +#include + +#define QLC_BC_COMMAND 0 +#define QLC_BC_RESPONSE 1 + +#define QLC_MBOX_RESP_TIMEOUT (10 * HZ) +#define QLC_MBOX_CH_FREE_TIMEOUT (10 * HZ) + +#define QLC_BC_MSG 0 +#define QLC_BC_CFREE 1 +#define QLC_BC_FLR 2 +#define QLC_BC_HDR_SZ 16 +#define QLC_BC_PAYLOAD_SZ (1024 - QLC_BC_HDR_SZ) + +#define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF 2048 +#define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF 512 + +#define QLC_83XX_VF_RESET_FAIL_THRESH 8 +#define QLC_BC_CMD_MAX_RETRY_CNT 5 + +static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); +static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); +static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); +static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *); +static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *); +static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *, + struct qlcnic_cmd_args *); +static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8); +static void qlcnic_sriov_process_bc_cmd(struct work_struct *); +static int qlcnic_sriov_vf_shutdown(struct pci_dev *); +static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *); +static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *, + struct qlcnic_cmd_args *); + +static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = { + .read_crb = qlcnic_83xx_read_crb, + .write_crb = qlcnic_83xx_write_crb, + .read_reg = qlcnic_83xx_rd_reg_indirect, + .write_reg = qlcnic_83xx_wrt_reg_indirect, + .get_mac_address = qlcnic_83xx_get_mac_address, + .setup_intr = qlcnic_83xx_setup_intr, + .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args, + .mbx_cmd = qlcnic_sriov_issue_cmd, + .get_func_no = qlcnic_83xx_get_func_no, + .api_lock = qlcnic_83xx_cam_lock, + .api_unlock = qlcnic_83xx_cam_unlock, + .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag, + .create_rx_ctx = qlcnic_83xx_create_rx_ctx, + .create_tx_ctx = qlcnic_83xx_create_tx_ctx, + .del_rx_ctx = qlcnic_83xx_del_rx_ctx, + .del_tx_ctx = qlcnic_83xx_del_tx_ctx, + .setup_link_event = qlcnic_83xx_setup_link_event, + .get_nic_info = qlcnic_83xx_get_nic_info, + .get_pci_info = qlcnic_83xx_get_pci_info, + .set_nic_info = qlcnic_83xx_set_nic_info, + .change_macvlan = qlcnic_83xx_sre_macaddr_change, + .napi_enable = qlcnic_83xx_napi_enable, + .napi_disable = qlcnic_83xx_napi_disable, + .config_intr_coal = qlcnic_83xx_config_intr_coal, + .config_rss = qlcnic_83xx_config_rss, + .config_hw_lro = qlcnic_83xx_config_hw_lro, + .config_promisc_mode = qlcnic_83xx_nic_set_promisc, + .change_l2_filter = qlcnic_83xx_change_l2_filter, + .get_board_info = qlcnic_83xx_get_port_info, + .free_mac_list = qlcnic_sriov_vf_free_mac_list, + .enable_sds_intr = qlcnic_83xx_enable_sds_intr, + .disable_sds_intr = qlcnic_83xx_disable_sds_intr, +}; + +static struct qlcnic_nic_template qlcnic_sriov_vf_ops = { + .config_bridged_mode = qlcnic_config_bridged_mode, + .config_led = qlcnic_config_led, + .cancel_idc_work = qlcnic_sriov_vf_cancel_fw_work, + .napi_add = qlcnic_83xx_napi_add, + .napi_del = qlcnic_83xx_napi_del, + .shutdown = qlcnic_sriov_vf_shutdown, + .resume = qlcnic_sriov_vf_resume, + .config_ipaddr = qlcnic_83xx_config_ipaddr, + .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr, +}; + +static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = { + {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2}, + {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2}, + {QLCNIC_BC_CMD_GET_ACL, 3, 14}, + {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2}, +}; + +static inline bool qlcnic_sriov_bc_msg_check(u32 val) +{ + return (val & (1 << QLC_BC_MSG)) ? true : false; +} + +static inline bool qlcnic_sriov_channel_free_check(u32 val) +{ + return (val & (1 << QLC_BC_CFREE)) ? true : false; +} + +static inline bool qlcnic_sriov_flr_check(u32 val) +{ + return (val & (1 << QLC_BC_FLR)) ? true : false; +} + +static inline u8 qlcnic_sriov_target_func_id(u32 val) +{ + return (val >> 4) & 0xff; +} + +static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id) +{ + struct pci_dev *dev = adapter->pdev; + int pos; + u16 stride, offset; + + if (qlcnic_sriov_vf_check(adapter)) + return 0; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset); + pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride); + + return (dev->devfn + offset + stride * vf_id) & 0xff; +} + +int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) +{ + struct qlcnic_sriov *sriov; + struct qlcnic_back_channel *bc; + struct workqueue_struct *wq; + struct qlcnic_vport *vp; + struct qlcnic_vf_info *vf; + int err, i; + + if (!qlcnic_sriov_enable_check(adapter)) + return -EIO; + + sriov = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL); + if (!sriov) + return -ENOMEM; + + adapter->ahw->sriov = sriov; + sriov->num_vfs = num_vfs; + bc = &sriov->bc; + sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) * + num_vfs, GFP_KERNEL); + if (!sriov->vf_info) { + err = -ENOMEM; + goto qlcnic_free_sriov; + } + + wq = create_singlethread_workqueue("bc-trans"); + if (wq == NULL) { + err = -ENOMEM; + dev_err(&adapter->pdev->dev, + "Cannot create bc-trans workqueue\n"); + goto qlcnic_free_vf_info; + } + + bc->bc_trans_wq = wq; + + wq = create_singlethread_workqueue("async"); + if (wq == NULL) { + err = -ENOMEM; + dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n"); + goto qlcnic_destroy_trans_wq; + } + + bc->bc_async_wq = wq; + INIT_LIST_HEAD(&bc->async_list); + + for (i = 0; i < num_vfs; i++) { + vf = &sriov->vf_info[i]; + vf->adapter = adapter; + vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i); + mutex_init(&vf->send_cmd_lock); + spin_lock_init(&vf->vlan_list_lock); + INIT_LIST_HEAD(&vf->rcv_act.wait_list); + INIT_LIST_HEAD(&vf->rcv_pend.wait_list); + spin_lock_init(&vf->rcv_act.lock); + spin_lock_init(&vf->rcv_pend.lock); + init_completion(&vf->ch_free_cmpl); + + INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd); + + if (qlcnic_sriov_pf_check(adapter)) { + vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL); + if (!vp) { + err = -ENOMEM; + goto qlcnic_destroy_async_wq; + } + sriov->vf_info[i].vp = vp; + vp->vlan_mode = QLC_GUEST_VLAN_MODE; + vp->max_tx_bw = MAX_BW; + vp->min_tx_bw = MIN_BW; + vp->spoofchk = false; + random_ether_addr(vp->mac); + dev_info(&adapter->pdev->dev, + "MAC Address %pM is configured for VF %d\n", + vp->mac, i); + } + } + + return 0; + +qlcnic_destroy_async_wq: + destroy_workqueue(bc->bc_async_wq); + +qlcnic_destroy_trans_wq: + destroy_workqueue(bc->bc_trans_wq); + +qlcnic_free_vf_info: + kfree(sriov->vf_info); + +qlcnic_free_sriov: + kfree(adapter->ahw->sriov); + return err; +} + +void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list) +{ + struct qlcnic_bc_trans *trans; + struct qlcnic_cmd_args cmd; + unsigned long flags; + + spin_lock_irqsave(&t_list->lock, flags); + + while (!list_empty(&t_list->wait_list)) { + trans = list_first_entry(&t_list->wait_list, + struct qlcnic_bc_trans, list); + list_del(&trans->list); + t_list->count--; + cmd.req.arg = (u32 *)trans->req_pay; + cmd.rsp.arg = (u32 *)trans->rsp_pay; + qlcnic_free_mbx_args(&cmd); + qlcnic_sriov_cleanup_transaction(trans); + } + + spin_unlock_irqrestore(&t_list->lock, flags); +} + +void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_back_channel *bc = &sriov->bc; + struct qlcnic_vf_info *vf; + int i; + + if (!qlcnic_sriov_enable_check(adapter)) + return; + + qlcnic_sriov_cleanup_async_list(bc); + destroy_workqueue(bc->bc_async_wq); + + for (i = 0; i < sriov->num_vfs; i++) { + vf = &sriov->vf_info[i]; + qlcnic_sriov_cleanup_list(&vf->rcv_pend); + cancel_work_sync(&vf->trans_work); + qlcnic_sriov_cleanup_list(&vf->rcv_act); + } + + destroy_workqueue(bc->bc_trans_wq); + + for (i = 0; i < sriov->num_vfs; i++) + kfree(sriov->vf_info[i].vp); + + kfree(sriov->vf_info); + kfree(adapter->ahw->sriov); +} + +static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter) +{ + qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); + qlcnic_sriov_cfg_bc_intr(adapter, 0); + __qlcnic_sriov_cleanup(adapter); +} + +void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) +{ + if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state)) + return; + + qlcnic_sriov_free_vlans(adapter); + + if (qlcnic_sriov_pf_check(adapter)) + qlcnic_sriov_pf_cleanup(adapter); + + if (qlcnic_sriov_vf_check(adapter)) + qlcnic_sriov_vf_cleanup(adapter); +} + +static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, + u32 *pay, u8 pci_func, u8 size) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_mailbox *mbx = ahw->mailbox; + struct qlcnic_cmd_args cmd; + unsigned long timeout; + int err; + + memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); + cmd.hdr = hdr; + cmd.pay = pay; + cmd.pay_size = size; + cmd.func_num = pci_func; + cmd.op_type = QLC_83XX_MBX_POST_BC_OP; + cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op; + + err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout); + if (err) { + dev_err(&adapter->pdev->dev, + "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", + __func__, cmd.cmd_op, cmd.type, ahw->pci_func, + ahw->op_mode); + return err; + } + + if (!wait_for_completion_timeout(&cmd.completion, timeout)) { + dev_err(&adapter->pdev->dev, + "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", + __func__, cmd.cmd_op, cmd.type, ahw->pci_func, + ahw->op_mode); + flush_workqueue(mbx->work_q); + } + + return cmd.rsp_opcode; +} + +static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter) +{ + adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF; + adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G; + adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF; + adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; + adapter->num_txd = MAX_CMD_DESCRIPTORS; + adapter->max_rds_rings = MAX_RDS_RINGS; +} + +int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *npar_info, u16 vport_id) +{ + struct device *dev = &adapter->pdev->dev; + struct qlcnic_cmd_args cmd; + int err; + u32 status; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); + if (err) + return err; + + cmd.req.arg[1] = vport_id << 16 | 0x1; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to get vport info, err=%d\n", err); + qlcnic_free_mbx_args(&cmd); + return err; + } + + status = cmd.rsp.arg[2] & 0xffff; + if (status & BIT_0) + npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]); + if (status & BIT_1) + npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]); + if (status & BIT_2) + npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]); + if (status & BIT_3) + npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]); + if (status & BIT_4) + npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]); + if (status & BIT_5) + npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]); + if (status & BIT_6) + npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]); + if (status & BIT_7) + npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]); + if (status & BIT_8) + npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]); + if (status & BIT_9) + npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]); + + npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]); + npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]); + npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]); + npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]); + + dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n" + "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n" + "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n" + "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n" + "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n" + "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n", + npar_info->min_tx_bw, npar_info->max_tx_bw, + npar_info->max_tx_ques, npar_info->max_tx_mac_filters, + npar_info->max_rx_mcast_mac_filters, + npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr, + npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings, + npar_info->max_rx_buf_rings, npar_info->max_rx_ques, + npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs, + npar_info->max_remote_ipv6_addrs); + + qlcnic_free_mbx_args(&cmd); + return err; +} + +static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff; + adapter->flags &= ~QLCNIC_TAGGING_ENABLED; + return 0; +} + +static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + int i, num_vlans; + u16 *vlans; + + if (sriov->allowed_vlans) + return 0; + + sriov->any_vlan = cmd->rsp.arg[2] & 0xf; + sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16; + dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n", + sriov->num_allowed_vlans); + + qlcnic_sriov_alloc_vlans(adapter); + + if (!sriov->any_vlan) + return 0; + + num_vlans = sriov->num_allowed_vlans; + sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL); + if (!sriov->allowed_vlans) + return -ENOMEM; + + vlans = (u16 *)&cmd->rsp.arg[3]; + for (i = 0; i < num_vlans; i++) + sriov->allowed_vlans[i] = vlans[i]; + + return 0; +} + +static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_cmd_args cmd; + int ret = 0; + + memset(&cmd, 0, sizeof(cmd)); + ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL); + if (ret) + return ret; + + ret = qlcnic_issue_cmd(adapter, &cmd); + if (ret) { + dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n", + ret); + } else { + sriov->vlan_mode = cmd.rsp.arg[1] & 0x3; + switch (sriov->vlan_mode) { + case QLC_GUEST_VLAN_MODE: + ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd); + break; + case QLC_PVID_MODE: + ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd); + break; + } + } + + qlcnic_free_mbx_args(&cmd); + return ret; +} + +static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_info nic_info; + int err; + + err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0); + if (err) + return err; + + ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters; + + err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func); + if (err) + return -EIO; + + if (qlcnic_83xx_get_port_info(adapter)) + return -EIO; + + qlcnic_sriov_vf_cfg_buff_desc(adapter); + adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; + dev_info(&adapter->pdev->dev, "HAL Version: %d\n", + adapter->ahw->fw_hal_version); + + ahw->physical_port = (u8) nic_info.phys_port; + ahw->switch_mode = nic_info.switch_mode; + ahw->max_mtu = nic_info.max_mtu; + ahw->op_mode = nic_info.op_mode; + ahw->capabilities = nic_info.capabilities; + return 0; +} + +static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter, + int pci_using_dac) +{ + int err; + + adapter->flags |= QLCNIC_VLAN_FILTERING; + adapter->ahw->total_nic_func = 1; + INIT_LIST_HEAD(&adapter->vf_mc_list); + if (!qlcnic_use_msi_x && !!qlcnic_use_msi) + dev_warn(&adapter->pdev->dev, + "Device does not support MSI interrupts\n"); + + /* compute and set default and max tx/sds rings */ + qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING); + qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING); + + err = qlcnic_setup_intr(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n"); + goto err_out_disable_msi; + } + + err = qlcnic_83xx_setup_mbx_intr(adapter); + if (err) + goto err_out_disable_msi; + + err = qlcnic_sriov_init(adapter, 1); + if (err) + goto err_out_disable_mbx_intr; + + err = qlcnic_sriov_cfg_bc_intr(adapter, 1); + if (err) + goto err_out_cleanup_sriov; + + err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); + if (err) + goto err_out_disable_bc_intr; + + err = qlcnic_sriov_vf_init_driver(adapter); + if (err) + goto err_out_send_channel_term; + + err = qlcnic_sriov_get_vf_acl(adapter); + if (err) + goto err_out_send_channel_term; + + err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac); + if (err) + goto err_out_send_channel_term; + + pci_set_drvdata(adapter->pdev, adapter); + dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", + adapter->netdev->name); + + qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, + adapter->ahw->idc.delay); + return 0; + +err_out_send_channel_term: + qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); + +err_out_disable_bc_intr: + qlcnic_sriov_cfg_bc_intr(adapter, 0); + +err_out_cleanup_sriov: + __qlcnic_sriov_cleanup(adapter); + +err_out_disable_mbx_intr: + qlcnic_83xx_free_mbx_intr(adapter); + +err_out_disable_msi: + qlcnic_teardown_intr(adapter); + return err; +} + +static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter) +{ + u32 state; + + do { + msleep(20); + if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT) + return -EIO; + state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); + } while (state != QLC_83XX_IDC_DEV_READY); + + return 0; +} + +int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int err; + + set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status); + ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; + ahw->reset_context = 0; + adapter->fw_fail_cnt = 0; + ahw->msix_supported = 1; + adapter->need_fw_reset = 0; + adapter->flags |= QLCNIC_TX_INTR_SHARED; + + err = qlcnic_sriov_check_dev_ready(adapter); + if (err) + return err; + + err = qlcnic_sriov_setup_vf(adapter, pci_using_dac); + if (err) + return err; + + if (qlcnic_read_mac_addr(adapter)) + dev_warn(&adapter->pdev->dev, "failed to read mac addr\n"); + + INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return 0; +} + +void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + + ahw->op_mode = QLCNIC_SRIOV_VF_FUNC; + dev_info(&adapter->pdev->dev, + "HAL Version: %d Non Privileged SRIOV function\n", + ahw->fw_hal_version); + adapter->nic_ops = &qlcnic_sriov_vf_ops; + set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state); + return; +} + +void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw) +{ + ahw->hw_ops = &qlcnic_sriov_vf_hw_ops; + ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl; + ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl; +} + +static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag) +{ + u32 pay_size; + + pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ); + + if (pay_size) + pay_size = QLC_BC_PAYLOAD_SZ; + else + pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ; + + return pay_size; +} + +int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func) +{ + struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info; + u8 i; + + if (qlcnic_sriov_vf_check(adapter)) + return 0; + + for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) { + if (vf_info[i].pci_func == pci_func) + return i; + } + + return -EINVAL; +} + +static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans) +{ + *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC); + if (!*trans) + return -ENOMEM; + + init_completion(&(*trans)->resp_cmpl); + return 0; +} + +static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr, + u32 size) +{ + *hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC); + if (!*hdr) + return -ENOMEM; + + return 0; +} + +static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type) +{ + const struct qlcnic_mailbox_metadata *mbx_tbl; + int i, size; + + mbx_tbl = qlcnic_sriov_bc_mbx_tbl; + size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl); + + for (i = 0; i < size; i++) { + if (type == mbx_tbl[i].cmd) { + mbx->op_type = QLC_BC_CMD; + mbx->req.num = mbx_tbl[i].in_args; + mbx->rsp.num = mbx_tbl[i].out_args; + mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32), + GFP_ATOMIC); + if (!mbx->req.arg) + return -ENOMEM; + mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32), + GFP_ATOMIC); + if (!mbx->rsp.arg) { + kfree(mbx->req.arg); + mbx->req.arg = NULL; + return -ENOMEM; + } + memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num); + memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); + mbx->req.arg[0] = (type | (mbx->req.num << 16) | + (3 << 29)); + mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16; + return 0; + } + } + return -EINVAL; +} + +static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd, + u16 seq, u8 msg_type) +{ + struct qlcnic_bc_hdr *hdr; + int i; + u32 num_regs, bc_pay_sz; + u16 remainder; + u8 cmd_op, num_frags, t_num_frags; + + bc_pay_sz = QLC_BC_PAYLOAD_SZ; + if (msg_type == QLC_BC_COMMAND) { + trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg; + trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg; + num_regs = cmd->req.num; + trans->req_pay_size = (num_regs * 4); + num_regs = cmd->rsp.num; + trans->rsp_pay_size = (num_regs * 4); + cmd_op = cmd->req.arg[0] & 0xff; + remainder = (trans->req_pay_size) % (bc_pay_sz); + num_frags = (trans->req_pay_size) / (bc_pay_sz); + if (remainder) + num_frags++; + t_num_frags = num_frags; + if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags)) + return -ENOMEM; + remainder = (trans->rsp_pay_size) % (bc_pay_sz); + num_frags = (trans->rsp_pay_size) / (bc_pay_sz); + if (remainder) + num_frags++; + if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags)) + return -ENOMEM; + num_frags = t_num_frags; + hdr = trans->req_hdr; + } else { + cmd->req.arg = (u32 *)trans->req_pay; + cmd->rsp.arg = (u32 *)trans->rsp_pay; + cmd_op = cmd->req.arg[0] & 0xff; + cmd->cmd_op = cmd_op; + remainder = (trans->rsp_pay_size) % (bc_pay_sz); + num_frags = (trans->rsp_pay_size) / (bc_pay_sz); + if (remainder) + num_frags++; + cmd->req.num = trans->req_pay_size / 4; + cmd->rsp.num = trans->rsp_pay_size / 4; + hdr = trans->rsp_hdr; + cmd->op_type = trans->req_hdr->op_type; + } + + trans->trans_id = seq; + trans->cmd_id = cmd_op; + for (i = 0; i < num_frags; i++) { + hdr[i].version = 2; + hdr[i].msg_type = msg_type; + hdr[i].op_type = cmd->op_type; + hdr[i].num_cmds = 1; + hdr[i].num_frags = num_frags; + hdr[i].frag_num = i + 1; + hdr[i].cmd_op = cmd_op; + hdr[i].seq_id = seq; + } + return 0; +} + +static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans) +{ + if (!trans) + return; + kfree(trans->req_hdr); + kfree(trans->rsp_hdr); + kfree(trans); +} + +static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf, + struct qlcnic_bc_trans *trans, u8 type) +{ + struct qlcnic_trans_list *t_list; + unsigned long flags; + int ret = 0; + + if (type == QLC_BC_RESPONSE) { + t_list = &vf->rcv_act; + spin_lock_irqsave(&t_list->lock, flags); + t_list->count--; + list_del(&trans->list); + if (t_list->count > 0) + ret = 1; + spin_unlock_irqrestore(&t_list->lock, flags); + } + if (type == QLC_BC_COMMAND) { + while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state)) + msleep(100); + vf->send_cmd = NULL; + clear_bit(QLC_BC_VF_SEND, &vf->state); + } + return ret; +} + +static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf, + work_func_t func) +{ + if (test_bit(QLC_BC_VF_FLR, &vf->state) || + vf->adapter->need_fw_reset) + return; + + queue_work(sriov->bc.bc_trans_wq, &vf->trans_work); +} + +static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans) +{ + struct completion *cmpl = &trans->resp_cmpl; + + if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT)) + trans->trans_state = QLC_END; + else + trans->trans_state = QLC_ABORT; + + return; +} + +static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans, + u8 type) +{ + if (type == QLC_BC_RESPONSE) { + trans->curr_rsp_frag++; + if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags) + trans->trans_state = QLC_INIT; + else + trans->trans_state = QLC_END; + } else { + trans->curr_req_frag++; + if (trans->curr_req_frag < trans->req_hdr->num_frags) + trans->trans_state = QLC_INIT; + else + trans->trans_state = QLC_WAIT_FOR_RESP; + } +} + +static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans, + u8 type) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct completion *cmpl = &vf->ch_free_cmpl; + + if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) { + trans->trans_state = QLC_ABORT; + return; + } + + clear_bit(QLC_BC_VF_CHANNEL, &vf->state); + qlcnic_sriov_handle_multi_frags(trans, type); +} + +static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter, + u32 *hdr, u32 *pay, u32 size) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 fw_mbx; + u8 i, max = 2, hdr_size, j; + + hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); + max = (size / sizeof(u32)) + hdr_size; + + fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0)); + for (i = 2, j = 0; j < hdr_size; i++, j++) + *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i)); + for (; j < max; i++, j++) + *(pay++) = readl(QLCNIC_MBX_FW(ahw, i)); +} + +static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf) +{ + int ret = -EBUSY; + u32 timeout = 10000; + + do { + if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) { + ret = 0; + break; + } + mdelay(1); + } while (--timeout); + + return ret; +} + +static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type) +{ + struct qlcnic_vf_info *vf = trans->vf; + u32 pay_size, hdr_size; + u32 *hdr, *pay; + int ret; + u8 pci_func = trans->func_id; + + if (__qlcnic_sriov_issue_bc_post(vf)) + return -EBUSY; + + if (type == QLC_BC_COMMAND) { + hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag); + pay = (u32 *)(trans->req_pay + trans->curr_req_frag); + hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); + pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, + trans->curr_req_frag); + pay_size = (pay_size / sizeof(u32)); + } else { + hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag); + pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag); + hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); + pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size, + trans->curr_rsp_frag); + pay_size = (pay_size / sizeof(u32)); + } + + ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay, + pci_func, pay_size); + return ret; +} + +static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans, + struct qlcnic_vf_info *vf, u8 type) +{ + bool flag = true; + int err = -EIO; + + while (flag) { + if (test_bit(QLC_BC_VF_FLR, &vf->state) || + vf->adapter->need_fw_reset) + trans->trans_state = QLC_ABORT; + + switch (trans->trans_state) { + case QLC_INIT: + trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE; + if (qlcnic_sriov_issue_bc_post(trans, type)) + trans->trans_state = QLC_ABORT; + break; + case QLC_WAIT_FOR_CHANNEL_FREE: + qlcnic_sriov_wait_for_channel_free(trans, type); + break; + case QLC_WAIT_FOR_RESP: + qlcnic_sriov_wait_for_resp(trans); + break; + case QLC_END: + err = 0; + flag = false; + break; + case QLC_ABORT: + err = -EIO; + flag = false; + clear_bit(QLC_BC_VF_CHANNEL, &vf->state); + break; + default: + err = -EIO; + flag = false; + } + } + return err; +} + +static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_bc_trans *trans, int pci_func) +{ + struct qlcnic_vf_info *vf; + int err, index = qlcnic_sriov_func_to_index(adapter, pci_func); + + if (index < 0) + return -EIO; + + vf = &adapter->ahw->sriov->vf_info[index]; + trans->vf = vf; + trans->func_id = pci_func; + + if (!test_bit(QLC_BC_VF_STATE, &vf->state)) { + if (qlcnic_sriov_pf_check(adapter)) + return -EIO; + if (qlcnic_sriov_vf_check(adapter) && + trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT) + return -EIO; + } + + mutex_lock(&vf->send_cmd_lock); + vf->send_cmd = trans; + err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND); + qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND); + mutex_unlock(&vf->send_cmd_lock); + return err; +} + +static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ +#ifdef CONFIG_QLCNIC_SRIOV + if (qlcnic_sriov_pf_check(adapter)) { + qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd); + return; + } +#endif + cmd->rsp.arg[0] |= (0x9 << 25); + return; +} + +static void qlcnic_sriov_process_bc_cmd(struct work_struct *work) +{ + struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info, + trans_work); + struct qlcnic_bc_trans *trans = NULL; + struct qlcnic_adapter *adapter = vf->adapter; + struct qlcnic_cmd_args cmd; + u8 req; + + if (adapter->need_fw_reset) + return; + + if (test_bit(QLC_BC_VF_FLR, &vf->state)) + return; + + memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); + trans = list_first_entry(&vf->rcv_act.wait_list, + struct qlcnic_bc_trans, list); + adapter = vf->adapter; + + if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id, + QLC_BC_RESPONSE)) + goto cleanup_trans; + + __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd); + trans->trans_state = QLC_INIT; + __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE); + +cleanup_trans: + qlcnic_free_mbx_args(&cmd); + req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE); + qlcnic_sriov_cleanup_transaction(trans); + if (req) + qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf, + qlcnic_sriov_process_bc_cmd); +} + +static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr, + struct qlcnic_vf_info *vf) +{ + struct qlcnic_bc_trans *trans; + u32 pay_size; + + if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state)) + return; + + trans = vf->send_cmd; + + if (trans == NULL) + goto clear_send; + + if (trans->trans_id != hdr->seq_id) + goto clear_send; + + pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size, + trans->curr_rsp_frag); + qlcnic_sriov_pull_bc_msg(vf->adapter, + (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag), + (u32 *)(trans->rsp_pay + trans->curr_rsp_frag), + pay_size); + if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags) + goto clear_send; + + complete(&trans->resp_cmpl); + +clear_send: + clear_bit(QLC_BC_VF_SEND, &vf->state); +} + +int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf, + struct qlcnic_bc_trans *trans) +{ + struct qlcnic_trans_list *t_list = &vf->rcv_act; + + t_list->count++; + list_add_tail(&trans->list, &t_list->wait_list); + if (t_list->count == 1) + qlcnic_sriov_schedule_bc_cmd(sriov, vf, + qlcnic_sriov_process_bc_cmd); + return 0; +} + +static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf, + struct qlcnic_bc_trans *trans) +{ + struct qlcnic_trans_list *t_list = &vf->rcv_act; + + spin_lock(&t_list->lock); + + __qlcnic_sriov_add_act_list(sriov, vf, trans); + + spin_unlock(&t_list->lock); + return 0; +} + +static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf, + struct qlcnic_bc_hdr *hdr) +{ + struct qlcnic_bc_trans *trans = NULL; + struct list_head *node; + u32 pay_size, curr_frag; + u8 found = 0, active = 0; + + spin_lock(&vf->rcv_pend.lock); + if (vf->rcv_pend.count > 0) { + list_for_each(node, &vf->rcv_pend.wait_list) { + trans = list_entry(node, struct qlcnic_bc_trans, list); + if (trans->trans_id == hdr->seq_id) { + found = 1; + break; + } + } + } + + if (found) { + curr_frag = trans->curr_req_frag; + pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, + curr_frag); + qlcnic_sriov_pull_bc_msg(vf->adapter, + (u32 *)(trans->req_hdr + curr_frag), + (u32 *)(trans->req_pay + curr_frag), + pay_size); + trans->curr_req_frag++; + if (trans->curr_req_frag >= hdr->num_frags) { + vf->rcv_pend.count--; + list_del(&trans->list); + active = 1; + } + } + spin_unlock(&vf->rcv_pend.lock); + + if (active) + if (qlcnic_sriov_add_act_list(sriov, vf, trans)) + qlcnic_sriov_cleanup_transaction(trans); + + return; +} + +static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov, + struct qlcnic_bc_hdr *hdr, + struct qlcnic_vf_info *vf) +{ + struct qlcnic_bc_trans *trans; + struct qlcnic_adapter *adapter = vf->adapter; + struct qlcnic_cmd_args cmd; + u32 pay_size; + int err; + u8 cmd_op; + + if (adapter->need_fw_reset) + return; + + if (!test_bit(QLC_BC_VF_STATE, &vf->state) && + hdr->op_type != QLC_BC_CMD && + hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT) + return; + + if (hdr->frag_num > 1) { + qlcnic_sriov_handle_pending_trans(sriov, vf, hdr); + return; + } + + memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); + cmd_op = hdr->cmd_op; + if (qlcnic_sriov_alloc_bc_trans(&trans)) + return; + + if (hdr->op_type == QLC_BC_CMD) + err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op); + else + err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op); + + if (err) { + qlcnic_sriov_cleanup_transaction(trans); + return; + } + + cmd.op_type = hdr->op_type; + if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id, + QLC_BC_COMMAND)) { + qlcnic_free_mbx_args(&cmd); + qlcnic_sriov_cleanup_transaction(trans); + return; + } + + pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, + trans->curr_req_frag); + qlcnic_sriov_pull_bc_msg(vf->adapter, + (u32 *)(trans->req_hdr + trans->curr_req_frag), + (u32 *)(trans->req_pay + trans->curr_req_frag), + pay_size); + trans->func_id = vf->pci_func; + trans->vf = vf; + trans->trans_id = hdr->seq_id; + trans->curr_req_frag++; + + if (qlcnic_sriov_soft_flr_check(adapter, trans, vf)) + return; + + if (trans->curr_req_frag == trans->req_hdr->num_frags) { + if (qlcnic_sriov_add_act_list(sriov, vf, trans)) { + qlcnic_free_mbx_args(&cmd); + qlcnic_sriov_cleanup_transaction(trans); + } + } else { + spin_lock(&vf->rcv_pend.lock); + list_add_tail(&trans->list, &vf->rcv_pend.wait_list); + vf->rcv_pend.count++; + spin_unlock(&vf->rcv_pend.lock); + } +} + +static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf) +{ + struct qlcnic_bc_hdr hdr; + u32 *ptr = (u32 *)&hdr; + u8 msg_type, i; + + for (i = 2; i < 6; i++) + ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i)); + msg_type = hdr.msg_type; + + switch (msg_type) { + case QLC_BC_COMMAND: + qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf); + break; + case QLC_BC_RESPONSE: + qlcnic_sriov_handle_bc_resp(&hdr, vf); + break; + } +} + +static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf) +{ + struct qlcnic_adapter *adapter = vf->adapter; + + if (qlcnic_sriov_pf_check(adapter)) + qlcnic_sriov_pf_handle_flr(sriov, vf); + else + dev_err(&adapter->pdev->dev, + "Invalid event to VF. VF should not get FLR event\n"); +} + +void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event) +{ + struct qlcnic_vf_info *vf; + struct qlcnic_sriov *sriov; + int index; + u8 pci_func; + + sriov = adapter->ahw->sriov; + pci_func = qlcnic_sriov_target_func_id(event); + index = qlcnic_sriov_func_to_index(adapter, pci_func); + + if (index < 0) + return; + + vf = &sriov->vf_info[index]; + vf->pci_func = pci_func; + + if (qlcnic_sriov_channel_free_check(event)) + complete(&vf->ch_free_cmpl); + + if (qlcnic_sriov_flr_check(event)) { + qlcnic_sriov_handle_flr_event(sriov, vf); + return; + } + + if (qlcnic_sriov_bc_msg_check(event)) + qlcnic_sriov_handle_msg_event(sriov, vf); +} + +int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable) +{ + struct qlcnic_cmd_args cmd; + int err; + + if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state)) + return 0; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP)) + return -ENOMEM; + + if (enable) + cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7); + + err = qlcnic_83xx_issue_cmd(adapter, &cmd); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to %s bc events, err=%d\n", + (enable ? "enable" : "disable"), err); + } + + qlcnic_free_mbx_args(&cmd); + return err; +} + +static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_bc_trans *trans) +{ + u8 max = QLC_BC_CMD_MAX_RETRY_CNT; + u32 state; + + state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); + if (state == QLC_83XX_IDC_DEV_READY) { + msleep(20); + clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state); + trans->trans_state = QLC_INIT; + if (++adapter->fw_fail_cnt > max) + return -EIO; + else + return 0; + } + + return -EIO; +} + +static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_mailbox *mbx = ahw->mailbox; + struct device *dev = &adapter->pdev->dev; + struct qlcnic_bc_trans *trans; + int err; + u32 rsp_data, opcode, mbx_err_code, rsp; + u16 seq = ++adapter->ahw->sriov->bc.trans_counter; + u8 func = ahw->pci_func; + + rsp = qlcnic_sriov_alloc_bc_trans(&trans); + if (rsp) + goto free_cmd; + + rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND); + if (rsp) + goto cleanup_transaction; + +retry: + if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) { + rsp = -EIO; + QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n", + QLCNIC_MBX_RSP(cmd->req.arg[0]), func); + goto err_out; + } + + err = qlcnic_sriov_send_bc_cmd(adapter, trans, func); + if (err) { + dev_err(dev, "MBX command 0x%x timed out for VF %d\n", + (cmd->req.arg[0] & 0xffff), func); + rsp = QLCNIC_RCODE_TIMEOUT; + + /* After adapter reset PF driver may take some time to + * respond to VF's request. Retry request till maximum retries. + */ + if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) && + !qlcnic_sriov_retry_bc_cmd(adapter, trans)) + goto retry; + + goto err_out; + } + + rsp_data = cmd->rsp.arg[0]; + mbx_err_code = QLCNIC_MBX_STATUS(rsp_data); + opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]); + + if ((mbx_err_code == QLCNIC_MBX_RSP_OK) || + (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) { + rsp = QLCNIC_RCODE_SUCCESS; + } else { + if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) { + rsp = QLCNIC_RCODE_SUCCESS; + } else { + rsp = mbx_err_code; + if (!rsp) + rsp = 1; + + dev_err(dev, + "MBX command 0x%x failed with err:0x%x for VF %d\n", + opcode, mbx_err_code, func); + } + } + +err_out: + if (rsp == QLCNIC_RCODE_TIMEOUT) { + ahw->reset_context = 1; + adapter->need_fw_reset = 1; + clear_bit(QLC_83XX_MBX_READY, &mbx->status); + } + +cleanup_transaction: + qlcnic_sriov_cleanup_transaction(trans); + +free_cmd: + if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) { + qlcnic_free_mbx_args(cmd); + kfree(cmd); + } + + return rsp; +} + + +static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) + return qlcnic_sriov_async_issue_cmd(adapter, cmd); + else + return __qlcnic_sriov_issue_cmd(adapter, cmd); +} + +static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op) +{ + struct qlcnic_cmd_args cmd; + struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0]; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op)) + return -ENOMEM; + + ret = qlcnic_issue_cmd(adapter, &cmd); + if (ret) { + dev_err(&adapter->pdev->dev, + "Failed bc channel %s %d\n", cmd_op ? "term" : "init", + ret); + goto out; + } + + cmd_op = (cmd.rsp.arg[0] & 0xff); + if (cmd.rsp.arg[0] >> 25 == 2) + return 2; + if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) + set_bit(QLC_BC_VF_STATE, &vf->state); + else + clear_bit(QLC_BC_VF_STATE, &vf->state); + +out: + qlcnic_free_mbx_args(&cmd); + return ret; +} + +static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac, + enum qlcnic_mac_type mac_type) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_vf_info *vf; + u16 vlan_id; + int i; + + vf = &adapter->ahw->sriov->vf_info[0]; + + if (!qlcnic_sriov_check_any_vlan(vf)) { + qlcnic_nic_add_mac(adapter, mac, 0, mac_type); + } else { + spin_lock(&vf->vlan_list_lock); + for (i = 0; i < sriov->num_allowed_vlans; i++) { + vlan_id = vf->sriov_vlans[i]; + if (vlan_id) + qlcnic_nic_add_mac(adapter, mac, vlan_id, + mac_type); + } + spin_unlock(&vf->vlan_list_lock); + if (qlcnic_84xx_check(adapter)) + qlcnic_nic_add_mac(adapter, mac, 0, mac_type); + } +} + +void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) +{ + struct list_head *head = &bc->async_list; + struct qlcnic_async_work_list *entry; + + flush_workqueue(bc->bc_async_wq); + while (!list_empty(head)) { + entry = list_entry(head->next, struct qlcnic_async_work_list, + list); + cancel_work_sync(&entry->work); + list_del(&entry->list); + kfree(entry); + } +} + +void qlcnic_sriov_vf_set_multi(struct net_device *netdev) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_hardware_context *ahw = adapter->ahw; + static const u8 bcast_addr[ETH_ALEN] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + }; + struct netdev_hw_addr *ha; + u32 mode = VPORT_MISS_MODE_DROP; + + if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) + return; + + if (netdev->flags & IFF_PROMISC) { + if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) + mode = VPORT_MISS_MODE_ACCEPT_ALL; + } else if ((netdev->flags & IFF_ALLMULTI) || + (netdev_mc_count(netdev) > ahw->max_mc_count)) { + mode = VPORT_MISS_MODE_ACCEPT_MULTI; + } else { + qlcnic_vf_add_mc_list(netdev, bcast_addr, QLCNIC_BROADCAST_MAC); + if (!netdev_mc_empty(netdev)) { + qlcnic_flush_mcast_mac(adapter); + netdev_for_each_mc_addr(ha, netdev) + qlcnic_vf_add_mc_list(netdev, ha->addr, + QLCNIC_MULTICAST_MAC); + } + } + + /* configure unicast MAC address, if there is not sufficient space + * to store all the unicast addresses then enable promiscuous mode + */ + if (netdev_uc_count(netdev) > ahw->max_uc_count) { + mode = VPORT_MISS_MODE_ACCEPT_ALL; + } else if (!netdev_uc_empty(netdev)) { + netdev_for_each_uc_addr(ha, netdev) + qlcnic_vf_add_mc_list(netdev, ha->addr, + QLCNIC_UNICAST_MAC); + } + + if (adapter->pdev->is_virtfn) { + if (mode == VPORT_MISS_MODE_ACCEPT_ALL && + !adapter->fdb_mac_learn) { + qlcnic_alloc_lb_filters_mem(adapter); + adapter->drv_mac_learn = 1; + adapter->rx_mac_learn = true; + } else { + adapter->drv_mac_learn = 0; + adapter->rx_mac_learn = false; + } + } + + qlcnic_nic_set_promisc(adapter, mode); +} + +static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work) +{ + struct qlcnic_async_work_list *entry; + struct qlcnic_adapter *adapter; + struct qlcnic_cmd_args *cmd; + + entry = container_of(work, struct qlcnic_async_work_list, work); + adapter = entry->ptr; + cmd = entry->cmd; + __qlcnic_sriov_issue_cmd(adapter, cmd); + return; +} + +static struct qlcnic_async_work_list * +qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc) +{ + struct list_head *node; + struct qlcnic_async_work_list *entry = NULL; + u8 empty = 0; + + list_for_each(node, &bc->async_list) { + entry = list_entry(node, struct qlcnic_async_work_list, list); + if (!work_pending(&entry->work)) { + empty = 1; + break; + } + } + + if (!empty) { + entry = kzalloc(sizeof(struct qlcnic_async_work_list), + GFP_ATOMIC); + if (entry == NULL) + return NULL; + list_add_tail(&entry->list, &bc->async_list); + } + + return entry; +} + +static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc, + work_func_t func, void *data, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_async_work_list *entry = NULL; + + entry = qlcnic_sriov_get_free_node_async_work(bc); + if (!entry) + return; + + entry->ptr = data; + entry->cmd = cmd; + INIT_WORK(&entry->work, func); + queue_work(bc->bc_async_wq, &entry->work); +} + +static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_cmd_args *cmd) +{ + + struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc; + + if (adapter->need_fw_reset) + return -EIO; + + qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd, + adapter, cmd); + return 0; +} + +static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter) +{ + int err; + + adapter->need_fw_reset = 0; + qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox); + qlcnic_83xx_enable_mbx_interrupt(adapter); + + err = qlcnic_sriov_cfg_bc_intr(adapter, 1); + if (err) + return err; + + err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); + if (err) + goto err_out_cleanup_bc_intr; + + err = qlcnic_sriov_vf_init_driver(adapter); + if (err) + goto err_out_term_channel; + + return 0; + +err_out_term_channel: + qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); + +err_out_cleanup_bc_intr: + qlcnic_sriov_cfg_bc_intr(adapter, 0); + return err; +} + +static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (netif_running(netdev)) { + if (!qlcnic_up(adapter, netdev)) + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } + + netif_device_attach(netdev); +} + +static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl; + struct net_device *netdev = adapter->netdev; + u8 i, max_ints = ahw->num_msix - 1; + + netif_device_detach(netdev); + qlcnic_83xx_detach_mailbox_work(adapter); + qlcnic_83xx_disable_mbx_intr(adapter); + + if (netif_running(netdev)) + qlcnic_down(adapter, netdev); + + for (i = 0; i < max_ints; i++) { + intr_tbl[i].id = i; + intr_tbl[i].enabled = 0; + intr_tbl[i].src = 0; + } + ahw->reset_context = 0; +} + +static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct device *dev = &adapter->pdev->dev; + struct qlc_83xx_idc *idc = &ahw->idc; + u8 func = ahw->pci_func; + u32 state; + + if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) || + (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) { + if (!qlcnic_sriov_vf_reinit_driver(adapter)) { + qlcnic_sriov_vf_attach(adapter); + adapter->fw_fail_cnt = 0; + dev_info(dev, + "%s: Reinitialization of VF 0x%x done after FW reset\n", + __func__, func); + } else { + dev_err(dev, + "%s: Reinitialization of VF 0x%x failed after FW reset\n", + __func__, func); + state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE); + dev_info(dev, "Current state 0x%x after FW reset\n", + state); + } + } + + return 0; +} + +static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_mailbox *mbx = ahw->mailbox; + struct device *dev = &adapter->pdev->dev; + struct qlc_83xx_idc *idc = &ahw->idc; + u8 func = ahw->pci_func; + u32 state; + + adapter->reset_ctx_cnt++; + + /* Skip the context reset and check if FW is hung */ + if (adapter->reset_ctx_cnt < 3) { + adapter->need_fw_reset = 1; + clear_bit(QLC_83XX_MBX_READY, &mbx->status); + dev_info(dev, + "Resetting context, wait here to check if FW is in failed state\n"); + return 0; + } + + /* Check if number of resets exceed the threshold. + * If it exceeds the threshold just fail the VF. + */ + if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) { + clear_bit(QLC_83XX_MODULE_LOADED, &idc->status); + adapter->tx_timeo_cnt = 0; + adapter->fw_fail_cnt = 0; + adapter->reset_ctx_cnt = 0; + qlcnic_sriov_vf_detach(adapter); + dev_err(dev, + "Device context resets have exceeded the threshold, device interface will be shutdown\n"); + return -EIO; + } + + dev_info(dev, "Resetting context of VF 0x%x\n", func); + dev_info(dev, "%s: Context reset count %d for VF 0x%x\n", + __func__, adapter->reset_ctx_cnt, func); + set_bit(__QLCNIC_RESETTING, &adapter->state); + adapter->need_fw_reset = 1; + clear_bit(QLC_83XX_MBX_READY, &mbx->status); + qlcnic_sriov_vf_detach(adapter); + adapter->need_fw_reset = 0; + + if (!qlcnic_sriov_vf_reinit_driver(adapter)) { + qlcnic_sriov_vf_attach(adapter); + adapter->tx_timeo_cnt = 0; + adapter->reset_ctx_cnt = 0; + adapter->fw_fail_cnt = 0; + dev_info(dev, "Done resetting context for VF 0x%x\n", func); + } else { + dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n", + __func__, func); + state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE); + dev_info(dev, "%s: Current state 0x%x\n", __func__, state); + } + + return 0; +} + +static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int ret = 0; + + if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY) + ret = qlcnic_sriov_vf_handle_dev_ready(adapter); + else if (ahw->reset_context) + ret = qlcnic_sriov_vf_handle_context_reset(adapter); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return ret; +} + +static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter) +{ + struct qlc_83xx_idc *idc = &adapter->ahw->idc; + + dev_err(&adapter->pdev->dev, "Device is in failed state\n"); + if (idc->prev_state == QLC_83XX_IDC_DEV_READY) + qlcnic_sriov_vf_detach(adapter); + + clear_bit(QLC_83XX_MODULE_LOADED, &idc->status); + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return -EIO; +} + +static int +qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter) +{ + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; + struct qlc_83xx_idc *idc = &adapter->ahw->idc; + + dev_info(&adapter->pdev->dev, "Device is in quiescent state\n"); + if (idc->prev_state == QLC_83XX_IDC_DEV_READY) { + set_bit(__QLCNIC_RESETTING, &adapter->state); + adapter->tx_timeo_cnt = 0; + adapter->reset_ctx_cnt = 0; + clear_bit(QLC_83XX_MBX_READY, &mbx->status); + qlcnic_sriov_vf_detach(adapter); + } + + return 0; +} + +static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter) +{ + struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; + struct qlc_83xx_idc *idc = &adapter->ahw->idc; + u8 func = adapter->ahw->pci_func; + + if (idc->prev_state == QLC_83XX_IDC_DEV_READY) { + dev_err(&adapter->pdev->dev, + "Firmware hang detected by VF 0x%x\n", func); + set_bit(__QLCNIC_RESETTING, &adapter->state); + adapter->tx_timeo_cnt = 0; + adapter->reset_ctx_cnt = 0; + clear_bit(QLC_83XX_MBX_READY, &mbx->status); + qlcnic_sriov_vf_detach(adapter); + } + return 0; +} + +static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter) +{ + dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__); + return 0; +} + +static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter) +{ + if (adapter->fhash.fnum) + qlcnic_prune_lb_filters(adapter); +} + +static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work) +{ + struct qlcnic_adapter *adapter; + struct qlc_83xx_idc *idc; + int ret = 0; + + adapter = container_of(work, struct qlcnic_adapter, fw_work.work); + idc = &adapter->ahw->idc; + idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); + + switch (idc->curr_state) { + case QLC_83XX_IDC_DEV_READY: + ret = qlcnic_sriov_vf_idc_ready_state(adapter); + break; + case QLC_83XX_IDC_DEV_NEED_RESET: + case QLC_83XX_IDC_DEV_INIT: + ret = qlcnic_sriov_vf_idc_init_reset_state(adapter); + break; + case QLC_83XX_IDC_DEV_NEED_QUISCENT: + ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter); + break; + case QLC_83XX_IDC_DEV_FAILED: + ret = qlcnic_sriov_vf_idc_failed_state(adapter); + break; + case QLC_83XX_IDC_DEV_QUISCENT: + break; + default: + ret = qlcnic_sriov_vf_idc_unknown_state(adapter); + } + + idc->prev_state = idc->curr_state; + qlcnic_sriov_vf_periodic_tasks(adapter); + + if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status)) + qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, + idc->delay); +} + +static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter) +{ + while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + msleep(20); + + clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); + clear_bit(__QLCNIC_RESETTING, &adapter->state); + cancel_delayed_work_sync(&adapter->fw_work); +} + +static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf, u16 vlan_id) +{ + int i, err = -EINVAL; + + if (!vf->sriov_vlans) + return err; + + spin_lock_bh(&vf->vlan_list_lock); + + for (i = 0; i < sriov->num_allowed_vlans; i++) { + if (vf->sriov_vlans[i] == vlan_id) { + err = 0; + break; + } + } + + spin_unlock_bh(&vf->vlan_list_lock); + return err; +} + +static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf) +{ + int err = 0; + + spin_lock_bh(&vf->vlan_list_lock); + + if (vf->num_vlan >= sriov->num_allowed_vlans) + err = -EINVAL; + + spin_unlock_bh(&vf->vlan_list_lock); + return err; +} + +static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter, + u16 vid, u8 enable) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_vf_info *vf; + bool vlan_exist; + u8 allowed = 0; + int i; + + vf = &adapter->ahw->sriov->vf_info[0]; + vlan_exist = qlcnic_sriov_check_any_vlan(vf); + if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE) + return -EINVAL; + + if (enable) { + if (qlcnic_83xx_vf_check(adapter) && vlan_exist) + return -EINVAL; + + if (qlcnic_sriov_validate_num_vlans(sriov, vf)) + return -EINVAL; + + if (sriov->any_vlan) { + for (i = 0; i < sriov->num_allowed_vlans; i++) { + if (sriov->allowed_vlans[i] == vid) + allowed = 1; + } + + if (!allowed) + return -EINVAL; + } + } else { + if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid)) + return -EINVAL; + } + + return 0; +} + +static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id, + enum qlcnic_vlan_operations opcode) +{ + struct qlcnic_adapter *adapter = vf->adapter; + struct qlcnic_sriov *sriov; + + sriov = adapter->ahw->sriov; + + if (!vf->sriov_vlans) + return; + + spin_lock_bh(&vf->vlan_list_lock); + + switch (opcode) { + case QLC_VLAN_ADD: + qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id); + break; + case QLC_VLAN_DELETE: + qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id); + break; + default: + netdev_err(adapter->netdev, "Invalid VLAN operation\n"); + } + + spin_unlock_bh(&vf->vlan_list_lock); + return; +} + +int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter, + u16 vid, u8 enable) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct net_device *netdev = adapter->netdev; + struct qlcnic_vf_info *vf; + struct qlcnic_cmd_args cmd; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + if (vid == 0) + return 0; + + vf = &adapter->ahw->sriov->vf_info[0]; + ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable); + if (ret) + return ret; + + ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, + QLCNIC_BC_CMD_CFG_GUEST_VLAN); + if (ret) + return ret; + + cmd.req.arg[1] = (enable & 1) | vid << 16; + + qlcnic_sriov_cleanup_async_list(&sriov->bc); + ret = qlcnic_issue_cmd(adapter, &cmd); + if (ret) { + dev_err(&adapter->pdev->dev, + "Failed to configure guest VLAN, err=%d\n", ret); + } else { + netif_addr_lock_bh(netdev); + qlcnic_free_mac_list(adapter); + netif_addr_unlock_bh(netdev); + + if (enable) + qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD); + else + qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE); + + netif_addr_lock_bh(netdev); + qlcnic_set_multi(netdev); + netif_addr_unlock_bh(netdev); + } + + qlcnic_free_mbx_args(&cmd); + return ret; +} + +static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter) +{ + struct list_head *head = &adapter->mac_list; + struct qlcnic_mac_vlan_list *cur; + + while (!list_empty(head)) { + cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list); + qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id, + QLCNIC_MAC_DEL); + list_del(&cur->list); + kfree(cur); + } +} + + +static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + int retval; + + netif_device_detach(netdev); + qlcnic_cancel_idc_work(adapter); + + if (netif_running(netdev)) + qlcnic_down(adapter, netdev); + + qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); + qlcnic_sriov_cfg_bc_intr(adapter, 0); + qlcnic_83xx_disable_mbx_intr(adapter); + cancel_delayed_work_sync(&adapter->idc_aen_work); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + return 0; +} + +static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter) +{ + struct qlc_83xx_idc *idc = &adapter->ahw->idc; + struct net_device *netdev = adapter->netdev; + int err; + + set_bit(QLC_83XX_MODULE_LOADED, &idc->status); + qlcnic_83xx_enable_mbx_interrupt(adapter); + err = qlcnic_sriov_cfg_bc_intr(adapter, 1); + if (err) + return err; + + err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); + if (!err) { + if (netif_running(netdev)) { + err = qlcnic_up(adapter, netdev); + if (!err) + qlcnic_restore_indev_addr(netdev, NETDEV_UP); + } + } + + netif_device_attach(netdev); + qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, + idc->delay); + return err; +} + +void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_vf_info *vf; + int i; + + for (i = 0; i < sriov->num_vfs; i++) { + vf = &sriov->vf_info[i]; + vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans, + sizeof(*vf->sriov_vlans), GFP_KERNEL); + } +} + +void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_vf_info *vf; + int i; + + for (i = 0; i < sriov->num_vfs; i++) { + vf = &sriov->vf_info[i]; + kfree(vf->sriov_vlans); + vf->sriov_vlans = NULL; + } +} + +void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf, u16 vlan_id) +{ + int i; + + for (i = 0; i < sriov->num_allowed_vlans; i++) { + if (!vf->sriov_vlans[i]) { + vf->sriov_vlans[i] = vlan_id; + vf->num_vlan++; + return; + } + } +} + +void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf, u16 vlan_id) +{ + int i; + + for (i = 0; i < sriov->num_allowed_vlans; i++) { + if (vf->sriov_vlans[i] == vlan_id) { + vf->sriov_vlans[i] = 0; + vf->num_vlan--; + return; + } + } +} + +bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf) +{ + bool err = false; + + spin_lock_bh(&vf->vlan_list_lock); + + if (vf->num_vlan) + err = true; + + spin_unlock_bh(&vf->vlan_list_lock); + return err; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c new file mode 100644 index 000000000..a29538b86 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -0,0 +1,2047 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include "qlcnic_sriov.h" +#include "qlcnic.h" +#include + +#define QLCNIC_SRIOV_VF_MAX_MAC 7 +#define QLC_VF_MIN_TX_RATE 100 +#define QLC_VF_MAX_TX_RATE 9999 +#define QLC_MAC_OPCODE_MASK 0x7 +#define QLC_VF_FLOOD_BIT BIT_16 +#define QLC_FLOOD_MODE 0x5 +#define QLC_SRIOV_ALLOW_VLAN0 BIT_19 +#define QLC_INTR_COAL_TYPE_MASK 0x7 + +static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8); + +struct qlcnic_sriov_cmd_handler { + int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *); +}; + +struct qlcnic_sriov_fw_cmd_handler { + u32 cmd; + int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *); +}; + +static int qlcnic_sriov_pf_set_vport_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *npar_info, + u16 vport_id) +{ + struct qlcnic_cmd_args cmd; + int err; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO)) + return -ENOMEM; + + cmd.req.arg[1] = (vport_id << 16) | 0x1; + cmd.req.arg[2] = npar_info->bit_offsets; + cmd.req.arg[2] |= npar_info->min_tx_bw << 16; + cmd.req.arg[3] = npar_info->max_tx_bw | (npar_info->max_tx_ques << 16); + cmd.req.arg[4] = npar_info->max_tx_mac_filters; + cmd.req.arg[4] |= npar_info->max_rx_mcast_mac_filters << 16; + cmd.req.arg[5] = npar_info->max_rx_ucast_mac_filters | + (npar_info->max_rx_ip_addr << 16); + cmd.req.arg[6] = npar_info->max_rx_lro_flow | + (npar_info->max_rx_status_rings << 16); + cmd.req.arg[7] = npar_info->max_rx_buf_rings | + (npar_info->max_rx_ques << 16); + cmd.req.arg[8] = npar_info->max_tx_vlan_keys; + cmd.req.arg[8] |= npar_info->max_local_ipv6_addrs << 16; + cmd.req.arg[9] = npar_info->max_remote_ipv6_addrs; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_err(&adapter->pdev->dev, + "Failed to set vport info, err=%d\n", err); + + qlcnic_free_mbx_args(&cmd); + return err; +} + +static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter, + struct qlcnic_info *info, u16 func) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_resources *res = &sriov->ff_max; + u16 num_macs = sriov->num_allowed_vlans + 1; + int ret = -EIO, vpid, id; + struct qlcnic_vport *vp; + u32 num_vfs, max, temp; + + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func); + if (vpid < 0) + return -EINVAL; + + num_vfs = sriov->num_vfs; + max = num_vfs + 1; + info->bit_offsets = 0xffff; + info->max_tx_ques = res->num_tx_queues / max; + + if (qlcnic_83xx_pf_check(adapter)) + num_macs = QLCNIC_83XX_SRIOV_VF_MAX_MAC; + + info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters; + + if (adapter->ahw->pci_func == func) { + info->min_tx_bw = 0; + info->max_tx_bw = MAX_BW; + + temp = res->num_rx_ucast_mac_filters - num_macs * num_vfs; + info->max_rx_ucast_mac_filters = temp; + temp = res->num_tx_mac_filters - num_macs * num_vfs; + info->max_tx_mac_filters = temp; + temp = num_macs * num_vfs * QLCNIC_SRIOV_VF_MAX_MAC; + temp = res->num_rx_mcast_mac_filters - temp; + info->max_rx_mcast_mac_filters = temp; + + info->max_tx_ques = res->num_tx_queues - sriov->num_vfs; + } else { + id = qlcnic_sriov_func_to_index(adapter, func); + if (id < 0) + return id; + vp = sriov->vf_info[id].vp; + info->min_tx_bw = vp->min_tx_bw; + info->max_tx_bw = vp->max_tx_bw; + + info->max_rx_ucast_mac_filters = num_macs; + info->max_tx_mac_filters = num_macs; + temp = num_macs * QLCNIC_SRIOV_VF_MAX_MAC; + info->max_rx_mcast_mac_filters = temp; + + info->max_tx_ques = QLCNIC_SINGLE_RING; + } + + info->max_rx_ip_addr = res->num_destip / max; + info->max_rx_status_rings = res->num_rx_status_rings / max; + info->max_rx_buf_rings = res->num_rx_buf_rings / max; + info->max_rx_ques = res->num_rx_queues / max; + info->max_rx_lro_flow = res->num_lro_flows_supported / max; + info->max_tx_vlan_keys = res->num_txvlan_keys; + info->max_local_ipv6_addrs = res->max_local_ipv6_addrs; + info->max_remote_ipv6_addrs = res->max_remote_ipv6_addrs; + + ret = qlcnic_sriov_pf_set_vport_info(adapter, info, vpid); + if (ret) + return ret; + + return 0; +} + +static void qlcnic_sriov_pf_set_ff_max_res(struct qlcnic_adapter *adapter, + struct qlcnic_info *info) +{ + struct qlcnic_resources *ff_max = &adapter->ahw->sriov->ff_max; + + ff_max->num_tx_mac_filters = info->max_tx_mac_filters; + ff_max->num_rx_ucast_mac_filters = info->max_rx_ucast_mac_filters; + ff_max->num_rx_mcast_mac_filters = info->max_rx_mcast_mac_filters; + ff_max->num_txvlan_keys = info->max_tx_vlan_keys; + ff_max->num_rx_queues = info->max_rx_ques; + ff_max->num_tx_queues = info->max_tx_ques; + ff_max->num_lro_flows_supported = info->max_rx_lro_flow; + ff_max->num_destip = info->max_rx_ip_addr; + ff_max->num_rx_buf_rings = info->max_rx_buf_rings; + ff_max->num_rx_status_rings = info->max_rx_status_rings; + ff_max->max_remote_ipv6_addrs = info->max_remote_ipv6_addrs; + ff_max->max_local_ipv6_addrs = info->max_local_ipv6_addrs; +} + +static void qlcnic_sriov_set_vf_max_vlan(struct qlcnic_adapter *adapter, + struct qlcnic_info *npar_info) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + int temp, total_fn; + + temp = npar_info->max_rx_mcast_mac_filters; + total_fn = sriov->num_vfs + 1; + + temp = temp / (QLCNIC_SRIOV_VF_MAX_MAC * total_fn); + sriov->num_allowed_vlans = temp - 1; + + if (qlcnic_83xx_pf_check(adapter)) + sriov->num_allowed_vlans = 1; + + netdev_info(adapter->netdev, "Max Guest VLANs supported per VF = %d\n", + sriov->num_allowed_vlans); +} + +static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter, + struct qlcnic_info *npar_info) +{ + int err; + struct qlcnic_cmd_args cmd; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO)) + return -ENOMEM; + + cmd.req.arg[1] = 0x2; + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) { + dev_err(&adapter->pdev->dev, + "Failed to get PF info, err=%d\n", err); + goto out; + } + + npar_info->total_pf = cmd.rsp.arg[2] & 0xff; + npar_info->total_rss_engines = (cmd.rsp.arg[2] >> 8) & 0xff; + npar_info->max_vports = MSW(cmd.rsp.arg[2]); + npar_info->max_tx_ques = LSW(cmd.rsp.arg[3]); + npar_info->max_tx_mac_filters = MSW(cmd.rsp.arg[3]); + npar_info->max_rx_mcast_mac_filters = LSW(cmd.rsp.arg[4]); + npar_info->max_rx_ucast_mac_filters = MSW(cmd.rsp.arg[4]); + npar_info->max_rx_ip_addr = LSW(cmd.rsp.arg[5]); + npar_info->max_rx_lro_flow = MSW(cmd.rsp.arg[5]); + npar_info->max_rx_status_rings = LSW(cmd.rsp.arg[6]); + npar_info->max_rx_buf_rings = MSW(cmd.rsp.arg[6]); + npar_info->max_rx_ques = LSW(cmd.rsp.arg[7]); + npar_info->max_tx_vlan_keys = MSW(cmd.rsp.arg[7]); + npar_info->max_local_ipv6_addrs = LSW(cmd.rsp.arg[8]); + npar_info->max_remote_ipv6_addrs = MSW(cmd.rsp.arg[8]); + + qlcnic_sriov_set_vf_max_vlan(adapter, npar_info); + qlcnic_sriov_pf_set_ff_max_res(adapter, npar_info); + dev_info(&adapter->pdev->dev, + "\n\ttotal_pf: %d,\n" + "\n\ttotal_rss_engines: %d max_vports: %d max_tx_ques %d,\n" + "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n" + "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n" + "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n" + "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n" + "\tmax_local_ipv6_addrs: %d, max_remote_ipv6_addrs: %d\n", + npar_info->total_pf, npar_info->total_rss_engines, + npar_info->max_vports, npar_info->max_tx_ques, + npar_info->max_tx_mac_filters, + npar_info->max_rx_mcast_mac_filters, + npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr, + npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings, + npar_info->max_rx_buf_rings, npar_info->max_rx_ques, + npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs, + npar_info->max_remote_ipv6_addrs); + +out: + qlcnic_free_mbx_args(&cmd); + return err; +} + +static void qlcnic_sriov_pf_reset_vport_handle(struct qlcnic_adapter *adapter, + u8 func) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_vport *vp; + int index; + + if (adapter->ahw->pci_func == func) { + sriov->vp_handle = 0; + } else { + index = qlcnic_sriov_func_to_index(adapter, func); + if (index < 0) + return; + vp = sriov->vf_info[index].vp; + vp->handle = 0; + } +} + +static void qlcnic_sriov_pf_set_vport_handle(struct qlcnic_adapter *adapter, + u16 vport_handle, u8 func) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_vport *vp; + int index; + + if (adapter->ahw->pci_func == func) { + sriov->vp_handle = vport_handle; + } else { + index = qlcnic_sriov_func_to_index(adapter, func); + if (index < 0) + return; + vp = sriov->vf_info[index].vp; + vp->handle = vport_handle; + } +} + +static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *adapter, + u8 func) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_vf_info *vf_info; + int index; + + if (adapter->ahw->pci_func == func) { + return sriov->vp_handle; + } else { + index = qlcnic_sriov_func_to_index(adapter, func); + if (index >= 0) { + vf_info = &sriov->vf_info[index]; + return vf_info->vp->handle; + } + } + + return -EINVAL; +} + +static int qlcnic_sriov_pf_config_vport(struct qlcnic_adapter *adapter, + u8 flag, u16 func) +{ + struct qlcnic_cmd_args cmd; + int ret; + int vpid; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_VPORT)) + return -ENOMEM; + + if (flag) { + cmd.req.arg[3] = func << 8; + } else { + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func); + if (vpid < 0) { + ret = -EINVAL; + goto out; + } + cmd.req.arg[3] = ((vpid & 0xffff) << 8) | 1; + } + + ret = qlcnic_issue_cmd(adapter, &cmd); + if (ret) { + dev_err(&adapter->pdev->dev, + "Failed %s vport, err %d for func 0x%x\n", + (flag ? "enable" : "disable"), ret, func); + goto out; + } + + if (flag) { + vpid = cmd.rsp.arg[2] & 0xffff; + qlcnic_sriov_pf_set_vport_handle(adapter, vpid, func); + } else { + qlcnic_sriov_pf_reset_vport_handle(adapter, func); + } + +out: + qlcnic_free_mbx_args(&cmd); + return ret; +} + +static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter, + u8 enable) +{ + struct qlcnic_cmd_args cmd; + int err; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); + if (err) + return err; + + cmd.req.arg[1] = 0x4; + if (enable) { + adapter->flags |= QLCNIC_VLAN_FILTERING; + cmd.req.arg[1] |= BIT_16; + if (qlcnic_84xx_check(adapter)) + cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0; + } else { + adapter->flags &= ~QLCNIC_VLAN_FILTERING; + } + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_err(&adapter->pdev->dev, + "Failed to configure VLAN filtering, err=%d\n", err); + + qlcnic_free_mbx_args(&cmd); + return err; +} + +/* On configuring VF flood bit, PFD will receive traffic from all VFs */ +static int qlcnic_sriov_pf_cfg_flood(struct qlcnic_adapter *adapter) +{ + struct qlcnic_cmd_args cmd; + int err; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); + if (err) + return err; + + cmd.req.arg[1] = QLC_FLOOD_MODE | QLC_VF_FLOOD_BIT; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_err(&adapter->pdev->dev, + "Failed to configure VF Flood bit on PF, err=%d\n", + err); + + qlcnic_free_mbx_args(&cmd); + return err; +} + +static int qlcnic_sriov_pf_cfg_eswitch(struct qlcnic_adapter *adapter, + u8 func, u8 enable) +{ + struct qlcnic_cmd_args cmd; + int err = -EIO; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH)) + return -ENOMEM; + + cmd.req.arg[0] |= (3 << 29); + cmd.req.arg[1] = ((func & 0xf) << 2) | BIT_6 | BIT_1; + if (enable) + cmd.req.arg[1] |= BIT_0; + + err = qlcnic_issue_cmd(adapter, &cmd); + + if (err != QLCNIC_RCODE_SUCCESS) { + dev_err(&adapter->pdev->dev, + "Failed to enable sriov eswitch%d\n", err); + err = -EIO; + } + + qlcnic_free_mbx_args(&cmd); + return err; +} + +static void qlcnic_sriov_pf_del_flr_queue(struct qlcnic_adapter *adapter) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_back_channel *bc = &sriov->bc; + int i; + + for (i = 0; i < sriov->num_vfs; i++) + cancel_work_sync(&sriov->vf_info[i].flr_work); + + destroy_workqueue(bc->bc_flr_wq); +} + +static int qlcnic_sriov_pf_create_flr_queue(struct qlcnic_adapter *adapter) +{ + struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc; + struct workqueue_struct *wq; + + wq = create_singlethread_workqueue("qlcnic-flr"); + if (wq == NULL) { + dev_err(&adapter->pdev->dev, "Cannot create FLR workqueue\n"); + return -ENOMEM; + } + + bc->bc_flr_wq = wq; + return 0; +} + +void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter) +{ + u8 func = adapter->ahw->pci_func; + + if (!qlcnic_sriov_enable_check(adapter)) + return; + + qlcnic_sriov_pf_del_flr_queue(adapter); + qlcnic_sriov_cfg_bc_intr(adapter, 0); + qlcnic_sriov_pf_config_vport(adapter, 0, func); + qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0); + qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 0); + __qlcnic_sriov_cleanup(adapter); + adapter->ahw->op_mode = QLCNIC_MGMT_FUNC; + clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state); +} + +void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) +{ + if (!qlcnic_sriov_pf_check(adapter)) + return; + + if (!qlcnic_sriov_enable_check(adapter)) + return; + + pci_disable_sriov(adapter->pdev); + netdev_info(adapter->netdev, + "SR-IOV is disabled successfully on port %d\n", + adapter->portnum); +} + +static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (pci_vfs_assigned(adapter->pdev)) { + netdev_err(adapter->netdev, + "SR-IOV VFs belonging to port %d are assigned to VMs. SR-IOV can not be disabled on this port\n", + adapter->portnum); + netdev_info(adapter->netdev, + "Please detach SR-IOV VFs belonging to port %d from VMs, and then try to disable SR-IOV on this port\n", + adapter->portnum); + return -EPERM; + } + + qlcnic_sriov_pf_disable(adapter); + + rtnl_lock(); + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + + qlcnic_sriov_free_vlans(adapter); + + qlcnic_sriov_pf_cleanup(adapter); + + /* After disabling SRIOV re-init the driver in default mode + configure opmode based on op_mode of function + */ + if (qlcnic_83xx_configure_opmode(adapter)) { + rtnl_unlock(); + return -EIO; + } + + if (netif_running(netdev)) + __qlcnic_up(adapter, netdev); + + rtnl_unlock(); + return 0; +} + +static int qlcnic_sriov_pf_init(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_info nic_info, pf_info, vp_info; + int err; + u8 func = ahw->pci_func; + + if (!qlcnic_sriov_enable_check(adapter)) + return 0; + + err = qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 1); + if (err) + return err; + + if (qlcnic_84xx_check(adapter)) { + err = qlcnic_sriov_pf_cfg_flood(adapter); + if (err) + goto disable_vlan_filtering; + } + + err = qlcnic_sriov_pf_cfg_eswitch(adapter, func, 1); + if (err) + goto disable_vlan_filtering; + + err = qlcnic_sriov_pf_config_vport(adapter, 1, func); + if (err) + goto disable_eswitch; + + err = qlcnic_sriov_get_pf_info(adapter, &pf_info); + if (err) + goto delete_vport; + + err = qlcnic_get_nic_info(adapter, &nic_info, func); + if (err) + goto delete_vport; + + err = qlcnic_sriov_pf_cal_res_limit(adapter, &vp_info, func); + if (err) + goto delete_vport; + + err = qlcnic_sriov_cfg_bc_intr(adapter, 1); + if (err) + goto delete_vport; + + ahw->physical_port = (u8) nic_info.phys_port; + ahw->switch_mode = nic_info.switch_mode; + ahw->max_mtu = nic_info.max_mtu; + ahw->capabilities = nic_info.capabilities; + ahw->nic_mode = QLC_83XX_SRIOV_MODE; + return err; + +delete_vport: + qlcnic_sriov_pf_config_vport(adapter, 0, func); + +disable_eswitch: + qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0); + +disable_vlan_filtering: + qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 0); + + return err; +} + +static int qlcnic_sriov_pf_enable(struct qlcnic_adapter *adapter, int num_vfs) +{ + int err; + + if (!qlcnic_sriov_enable_check(adapter)) + return 0; + + err = pci_enable_sriov(adapter->pdev, num_vfs); + if (err) + qlcnic_sriov_pf_cleanup(adapter); + + return err; +} + +static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, + int num_vfs) +{ + int err = 0; + + set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state); + adapter->ahw->op_mode = QLCNIC_SRIOV_PF_FUNC; + + err = qlcnic_sriov_init(adapter, num_vfs); + if (err) + goto clear_op_mode; + + err = qlcnic_sriov_pf_create_flr_queue(adapter); + if (err) + goto sriov_cleanup; + + err = qlcnic_sriov_pf_init(adapter); + if (err) + goto del_flr_queue; + + qlcnic_sriov_alloc_vlans(adapter); + + return err; + +del_flr_queue: + qlcnic_sriov_pf_del_flr_queue(adapter); + +sriov_cleanup: + __qlcnic_sriov_cleanup(adapter); + +clear_op_mode: + clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state); + adapter->ahw->op_mode = QLCNIC_MGMT_FUNC; + return err; +} + +static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { + netdev_err(netdev, + "SR-IOV cannot be enabled, when legacy interrupts are enabled\n"); + return -EIO; + } + + rtnl_lock(); + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + + err = __qlcnic_pci_sriov_enable(adapter, num_vfs); + if (err) + goto error; + + if (netif_running(netdev)) + __qlcnic_up(adapter, netdev); + + rtnl_unlock(); + err = qlcnic_sriov_pf_enable(adapter, num_vfs); + if (!err) { + netdev_info(netdev, + "SR-IOV is enabled successfully on port %d\n", + adapter->portnum); + /* Return number of vfs enabled */ + return num_vfs; + } + + rtnl_lock(); + if (netif_running(netdev)) + __qlcnic_down(adapter, netdev); + +error: + if (!qlcnic_83xx_configure_opmode(adapter)) { + if (netif_running(netdev)) + __qlcnic_up(adapter, netdev); + } + + rtnl_unlock(); + netdev_info(netdev, "Failed to enable SR-IOV on port %d\n", + adapter->portnum); + + return err; +} + +int qlcnic_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + struct qlcnic_adapter *adapter = pci_get_drvdata(dev); + int err; + + if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EBUSY; + + if (num_vfs == 0) + err = qlcnic_pci_sriov_disable(adapter); + else + err = qlcnic_pci_sriov_enable(adapter, num_vfs); + + clear_bit(__QLCNIC_RESETTING, &adapter->state); + return err; +} + +static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func) +{ + struct qlcnic_cmd_args cmd; + struct qlcnic_vport *vp; + int err, id; + u8 *mac; + + id = qlcnic_sriov_func_to_index(adapter, func); + if (id < 0) + return id; + + vp = adapter->ahw->sriov->vf_info[id].vp; + err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); + if (err) + return err; + + cmd.req.arg[1] = 0x3 | func << 16; + if (vp->spoofchk == true) { + mac = vp->mac; + cmd.req.arg[2] |= BIT_1 | BIT_3 | BIT_8; + cmd.req.arg[4] = mac[5] | mac[4] << 8 | mac[3] << 16 | + mac[2] << 24; + cmd.req.arg[5] = mac[1] | mac[0] << 8; + } + + if (vp->vlan_mode == QLC_PVID_MODE) { + cmd.req.arg[2] |= BIT_6; + cmd.req.arg[3] |= vp->pvid << 8; + } + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_err(&adapter->pdev->dev, "Failed to set ACL, err=%d\n", + err); + + qlcnic_free_mbx_args(&cmd); + return err; +} + +static int qlcnic_sriov_set_vf_vport_info(struct qlcnic_adapter *adapter, + u16 func) +{ + struct qlcnic_info defvp_info; + int err; + + err = qlcnic_sriov_pf_cal_res_limit(adapter, &defvp_info, func); + if (err) + return -EIO; + + err = qlcnic_sriov_set_vf_acl(adapter, func); + if (err) + return err; + + return 0; +} + +static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_vport *vp = vf->vp; + struct qlcnic_adapter *adapter; + struct qlcnic_sriov *sriov; + u16 func = vf->pci_func; + size_t size; + int err; + + adapter = vf->adapter; + sriov = adapter->ahw->sriov; + + if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) { + err = qlcnic_sriov_pf_config_vport(adapter, 1, func); + if (!err) { + err = qlcnic_sriov_set_vf_vport_info(adapter, func); + if (err) + qlcnic_sriov_pf_config_vport(adapter, 0, func); + } + } else { + if (vp->vlan_mode == QLC_GUEST_VLAN_MODE) { + size = sizeof(*vf->sriov_vlans); + size = size * sriov->num_allowed_vlans; + memset(vf->sriov_vlans, 0, size); + } + + err = qlcnic_sriov_pf_config_vport(adapter, 0, func); + } + + if (err) + goto err_out; + + cmd->rsp.arg[0] |= (1 << 25); + + if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) + set_bit(QLC_BC_VF_STATE, &vf->state); + else + clear_bit(QLC_BC_VF_STATE, &vf->state); + + return err; + +err_out: + cmd->rsp.arg[0] |= (2 << 25); + return err; +} + +static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter, + struct qlcnic_vf_info *vf, + u16 vlan, u8 op) +{ + struct qlcnic_cmd_args *cmd; + struct qlcnic_macvlan_mbx mv; + struct qlcnic_vport *vp; + u8 *addr; + int err; + u32 *buf; + int vpid; + + vp = vf->vp; + + cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); + if (!cmd) + return -ENOMEM; + + err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN); + if (err) + goto free_cmd; + + cmd->type = QLC_83XX_MBX_CMD_NO_WAIT; + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func); + if (vpid < 0) { + err = -EINVAL; + goto free_args; + } + + if (vlan) + op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ? + QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL); + + cmd->req.arg[1] = op | (1 << 8) | (3 << 6); + cmd->req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31; + + addr = vp->mac; + mv.vlan = vlan; + mv.mac_addr0 = addr[0]; + mv.mac_addr1 = addr[1]; + mv.mac_addr2 = addr[2]; + mv.mac_addr3 = addr[3]; + mv.mac_addr4 = addr[4]; + mv.mac_addr5 = addr[5]; + buf = &cmd->req.arg[2]; + memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx)); + + err = qlcnic_issue_cmd(adapter, cmd); + + if (!err) + return err; + +free_args: + qlcnic_free_mbx_args(cmd); +free_cmd: + kfree(cmd); + return err; +} + +static int qlcnic_sriov_validate_create_rx_ctx(struct qlcnic_cmd_args *cmd) +{ + if ((cmd->req.arg[0] >> 29) != 0x3) + return -EINVAL; + + return 0; +} + +static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter, + struct qlcnic_vf_info *vf, + int opcode) +{ + struct qlcnic_sriov *sriov; + u16 vlan; + int i; + + sriov = adapter->ahw->sriov; + + spin_lock_bh(&vf->vlan_list_lock); + if (vf->num_vlan) { + for (i = 0; i < sriov->num_allowed_vlans; i++) { + vlan = vf->sriov_vlans[i]; + if (vlan) + qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, + opcode); + } + } + spin_unlock_bh(&vf->vlan_list_lock); + + if (vf->vp->vlan_mode != QLC_PVID_MODE) { + if (qlcnic_83xx_pf_check(adapter) && + qlcnic_sriov_check_any_vlan(vf)) + return; + qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0, opcode); + } +} + +static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = tran->vf; + struct qlcnic_adapter *adapter = vf->adapter; + struct qlcnic_rcv_mbx_out *mbx_out; + int err; + + err = qlcnic_sriov_validate_create_rx_ctx(cmd); + if (err) { + cmd->rsp.arg[0] |= (0x6 << 25); + return err; + } + + cmd->req.arg[6] = vf->vp->handle; + err = qlcnic_issue_cmd(adapter, cmd); + + if (!err) { + mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd->rsp.arg[1]; + vf->rx_ctx_id = mbx_out->ctx_id; + qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_ADD); + } else { + vf->rx_ctx_id = 0; + } + + return err; +} + +static int qlcnic_sriov_pf_mac_address_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + u8 type, *mac; + + type = cmd->req.arg[1]; + switch (type) { + case QLCNIC_SET_STATION_MAC: + case QLCNIC_SET_FAC_DEF_MAC: + cmd->rsp.arg[0] = (2 << 25); + break; + case QLCNIC_GET_CURRENT_MAC: + cmd->rsp.arg[0] = (1 << 25); + mac = vf->vp->mac; + cmd->rsp.arg[2] = mac[1] | ((mac[0] << 8) & 0xff00); + cmd->rsp.arg[1] = mac[5] | ((mac[4] << 8) & 0xff00) | + ((mac[3]) << 16 & 0xff0000) | + ((mac[2]) << 24 & 0xff000000); + } + + return 0; +} + +static int qlcnic_sriov_validate_create_tx_ctx(struct qlcnic_cmd_args *cmd) +{ + if ((cmd->req.arg[0] >> 29) != 0x3) + return -EINVAL; + + return 0; +} + +static int qlcnic_sriov_pf_create_tx_ctx_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + struct qlcnic_tx_mbx_out *mbx_out; + int err; + + err = qlcnic_sriov_validate_create_tx_ctx(cmd); + if (err) { + cmd->rsp.arg[0] |= (0x6 << 25); + return err; + } + + cmd->req.arg[5] |= vf->vp->handle << 16; + err = qlcnic_issue_cmd(adapter, cmd); + if (!err) { + mbx_out = (struct qlcnic_tx_mbx_out *)&cmd->rsp.arg[2]; + vf->tx_ctx_id = mbx_out->ctx_id; + } else { + vf->tx_ctx_id = 0; + } + + return err; +} + +static int qlcnic_sriov_validate_del_rx_ctx(struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + if ((cmd->req.arg[0] >> 29) != 0x3) + return -EINVAL; + + if ((cmd->req.arg[1] & 0xffff) != vf->rx_ctx_id) + return -EINVAL; + + return 0; +} + +static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + err = qlcnic_sriov_validate_del_rx_ctx(vf, cmd); + if (err) { + cmd->rsp.arg[0] |= (0x6 << 25); + return err; + } + + qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_DEL); + cmd->req.arg[1] |= vf->vp->handle << 16; + err = qlcnic_issue_cmd(adapter, cmd); + + if (!err) + vf->rx_ctx_id = 0; + + return err; +} + +static int qlcnic_sriov_validate_del_tx_ctx(struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + if ((cmd->req.arg[0] >> 29) != 0x3) + return -EINVAL; + + if ((cmd->req.arg[1] & 0xffff) != vf->tx_ctx_id) + return -EINVAL; + + return 0; +} + +static int qlcnic_sriov_pf_del_tx_ctx_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + err = qlcnic_sriov_validate_del_tx_ctx(vf, cmd); + if (err) { + cmd->rsp.arg[0] |= (0x6 << 25); + return err; + } + + cmd->req.arg[1] |= vf->vp->handle << 16; + err = qlcnic_issue_cmd(adapter, cmd); + + if (!err) + vf->tx_ctx_id = 0; + + return err; +} + +static int qlcnic_sriov_validate_cfg_lro(struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id) + return -EINVAL; + + return 0; +} + +static int qlcnic_sriov_pf_cfg_lro_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + err = qlcnic_sriov_validate_cfg_lro(vf, cmd); + if (err) { + cmd->rsp.arg[0] |= (0x6 << 25); + return err; + } + + err = qlcnic_issue_cmd(adapter, cmd); + return err; +} + +static int qlcnic_sriov_pf_cfg_ip_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err = -EIO; + u8 op; + + op = cmd->req.arg[1] & 0xff; + + cmd->req.arg[1] |= vf->vp->handle << 16; + cmd->req.arg[1] |= BIT_31; + + err = qlcnic_issue_cmd(adapter, cmd); + return err; +} + +static int qlcnic_sriov_validate_cfg_intrpt(struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + if (((cmd->req.arg[1] >> 8) & 0xff) != vf->pci_func) + return -EINVAL; + + if (!(cmd->req.arg[1] & BIT_16)) + return -EINVAL; + + if ((cmd->req.arg[1] & 0xff) != 0x1) + return -EINVAL; + + return 0; +} + +static int qlcnic_sriov_pf_cfg_intrpt_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + err = qlcnic_sriov_validate_cfg_intrpt(vf, cmd); + if (err) + cmd->rsp.arg[0] |= (0x6 << 25); + else + err = qlcnic_issue_cmd(adapter, cmd); + + return err; +} + +static int qlcnic_sriov_validate_mtu(struct qlcnic_adapter *adapter, + struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + if (cmd->req.arg[1] != vf->rx_ctx_id) + return -EINVAL; + + if (cmd->req.arg[2] > adapter->ahw->max_mtu) + return -EINVAL; + + return 0; +} + +static int qlcnic_sriov_pf_set_mtu_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + err = qlcnic_sriov_validate_mtu(adapter, vf, cmd); + if (err) + cmd->rsp.arg[0] |= (0x6 << 25); + else + err = qlcnic_issue_cmd(adapter, cmd); + + return err; +} + +static int qlcnic_sriov_validate_get_nic_info(struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + if (cmd->req.arg[1] & BIT_31) { + if (((cmd->req.arg[1] >> 16) & 0x7fff) != vf->pci_func) + return -EINVAL; + } else { + cmd->req.arg[1] |= vf->vp->handle << 16; + } + + return 0; +} + +static int qlcnic_sriov_pf_get_nic_info_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + err = qlcnic_sriov_validate_get_nic_info(vf, cmd); + if (err) { + cmd->rsp.arg[0] |= (0x6 << 25); + return err; + } + + err = qlcnic_issue_cmd(adapter, cmd); + return err; +} + +static int qlcnic_sriov_validate_cfg_rss(struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + if (cmd->req.arg[1] != vf->rx_ctx_id) + return -EINVAL; + + return 0; +} + +static int qlcnic_sriov_pf_cfg_rss_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + err = qlcnic_sriov_validate_cfg_rss(vf, cmd); + if (err) + cmd->rsp.arg[0] |= (0x6 << 25); + else + err = qlcnic_issue_cmd(adapter, cmd); + + return err; +} + +static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter, + struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal; + u16 ctx_id, pkts, time; + int err = -EINVAL; + u8 type; + + type = cmd->req.arg[1] & QLC_INTR_COAL_TYPE_MASK; + ctx_id = cmd->req.arg[1] >> 16; + pkts = cmd->req.arg[2] & 0xffff; + time = cmd->req.arg[2] >> 16; + + switch (type) { + case QLCNIC_INTR_COAL_TYPE_RX: + if (ctx_id != vf->rx_ctx_id || pkts > coal->rx_packets || + time < coal->rx_time_us) + goto err_label; + break; + case QLCNIC_INTR_COAL_TYPE_TX: + if (ctx_id != vf->tx_ctx_id || pkts > coal->tx_packets || + time < coal->tx_time_us) + goto err_label; + break; + default: + netdev_err(adapter->netdev, "Invalid coalescing type 0x%x received\n", + type); + return err; + } + + return 0; + +err_label: + netdev_err(adapter->netdev, "Expected: rx_ctx_id 0x%x rx_packets 0x%x rx_time_us 0x%x tx_ctx_id 0x%x tx_packets 0x%x tx_time_us 0x%x\n", + vf->rx_ctx_id, coal->rx_packets, coal->rx_time_us, + vf->tx_ctx_id, coal->tx_packets, coal->tx_time_us); + netdev_err(adapter->netdev, "Received: ctx_id 0x%x packets 0x%x time_us 0x%x type 0x%x\n", + ctx_id, pkts, time, type); + + return err; +} + +static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = tran->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + err = qlcnic_sriov_validate_cfg_intrcoal(adapter, vf, cmd); + if (err) { + cmd->rsp.arg[0] |= (0x6 << 25); + return err; + } + + err = qlcnic_issue_cmd(adapter, cmd); + return err; +} + +static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter, + struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vport *vp = vf->vp; + u8 op, new_op; + + if (!(cmd->req.arg[1] & BIT_8)) + return -EINVAL; + + cmd->req.arg[1] |= (vf->vp->handle << 16); + cmd->req.arg[1] |= BIT_31; + + if (vp->vlan_mode == QLC_PVID_MODE) { + op = cmd->req.arg[1] & 0x7; + cmd->req.arg[1] &= ~0x7; + new_op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ? + QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL; + cmd->req.arg[3] |= vp->pvid << 16; + cmd->req.arg[1] |= new_op; + } + + return 0; +} + +static int qlcnic_sriov_pf_cfg_macvlan_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + err = qlcnic_sriov_validate_cfg_macvlan(adapter, vf, cmd); + if (err) { + cmd->rsp.arg[0] |= (0x6 << 25); + return err; + } + + err = qlcnic_issue_cmd(adapter, cmd); + return err; +} + +static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id) + return -EINVAL; + + return 0; +} + +static int qlcnic_sriov_pf_linkevent_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + err = qlcnic_sriov_validate_linkevent(vf, cmd); + if (err) { + cmd->rsp.arg[0] |= (0x6 << 25); + return err; + } + + err = qlcnic_issue_cmd(adapter, cmd); + return err; +} + +static int qlcnic_sriov_pf_cfg_promisc_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_adapter *adapter = vf->adapter; + int err; + + cmd->req.arg[1] |= vf->vp->handle << 16; + cmd->req.arg[1] |= BIT_31; + err = qlcnic_issue_cmd(adapter, cmd); + return err; +} + +static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = trans->vf; + struct qlcnic_vport *vp = vf->vp; + u8 cmd_op, mode = vp->vlan_mode; + struct qlcnic_adapter *adapter; + struct qlcnic_sriov *sriov; + + adapter = vf->adapter; + sriov = adapter->ahw->sriov; + + cmd_op = trans->req_hdr->cmd_op; + cmd->rsp.arg[0] |= 1 << 25; + + /* For 84xx adapter in case of PVID , PFD should send vlan mode as + * QLC_NO_VLAN_MODE to VFD which is zero in mailbox response + */ + if (qlcnic_84xx_check(adapter) && mode == QLC_PVID_MODE) + return 0; + + switch (mode) { + case QLC_GUEST_VLAN_MODE: + cmd->rsp.arg[1] = mode | 1 << 8; + cmd->rsp.arg[2] = sriov->num_allowed_vlans << 16; + break; + case QLC_PVID_MODE: + cmd->rsp.arg[1] = mode | 1 << 8 | vp->pvid << 16; + break; + } + + return 0; +} + +static int qlcnic_sriov_pf_del_guest_vlan(struct qlcnic_adapter *adapter, + struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + u16 vlan; + + if (!qlcnic_sriov_check_any_vlan(vf)) + return -EINVAL; + + vlan = cmd->req.arg[1] >> 16; + if (!vf->rx_ctx_id) { + qlcnic_sriov_del_vlan_id(sriov, vf, vlan); + return 0; + } + + qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_DEL); + qlcnic_sriov_del_vlan_id(sriov, vf, vlan); + + if (qlcnic_83xx_pf_check(adapter)) + qlcnic_sriov_cfg_vf_def_mac(adapter, vf, + 0, QLCNIC_MAC_ADD); + return 0; +} + +static int qlcnic_sriov_pf_add_guest_vlan(struct qlcnic_adapter *adapter, + struct qlcnic_vf_info *vf, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + int err = -EIO; + u16 vlan; + + if (qlcnic_83xx_pf_check(adapter) && qlcnic_sriov_check_any_vlan(vf)) + return err; + + vlan = cmd->req.arg[1] >> 16; + + if (!vf->rx_ctx_id) { + qlcnic_sriov_add_vlan_id(sriov, vf, vlan); + return 0; + } + + if (qlcnic_83xx_pf_check(adapter)) { + err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0, + QLCNIC_MAC_DEL); + if (err) + return err; + } + + err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_ADD); + + if (err) { + if (qlcnic_83xx_pf_check(adapter)) + qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0, + QLCNIC_MAC_ADD); + return err; + } + + qlcnic_sriov_add_vlan_id(sriov, vf, vlan); + return err; +} + +static int qlcnic_sriov_pf_cfg_guest_vlan_cmd(struct qlcnic_bc_trans *tran, + struct qlcnic_cmd_args *cmd) +{ + struct qlcnic_vf_info *vf = tran->vf; + struct qlcnic_adapter *adapter = vf->adapter; + struct qlcnic_vport *vp = vf->vp; + int err = -EIO; + u8 op; + + if (vp->vlan_mode != QLC_GUEST_VLAN_MODE) { + cmd->rsp.arg[0] |= 2 << 25; + return err; + } + + op = cmd->req.arg[1] & 0xf; + + if (op) + err = qlcnic_sriov_pf_add_guest_vlan(adapter, vf, cmd); + else + err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf, cmd); + + cmd->rsp.arg[0] |= err ? 2 << 25 : 1 << 25; + return err; +} + +static const int qlcnic_pf_passthru_supp_cmds[] = { + QLCNIC_CMD_GET_STATISTICS, + QLCNIC_CMD_GET_PORT_CONFIG, + QLCNIC_CMD_GET_LINK_STATUS, + QLCNIC_CMD_INIT_NIC_FUNC, + QLCNIC_CMD_STOP_NIC_FUNC, +}; + +static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = { + [QLCNIC_BC_CMD_CHANNEL_INIT] = {&qlcnic_sriov_pf_channel_cfg_cmd}, + [QLCNIC_BC_CMD_CHANNEL_TERM] = {&qlcnic_sriov_pf_channel_cfg_cmd}, + [QLCNIC_BC_CMD_GET_ACL] = {&qlcnic_sriov_pf_get_acl_cmd}, + [QLCNIC_BC_CMD_CFG_GUEST_VLAN] = {&qlcnic_sriov_pf_cfg_guest_vlan_cmd}, +}; + +static const struct qlcnic_sriov_fw_cmd_handler qlcnic_pf_fw_cmd_hdlr[] = { + {QLCNIC_CMD_CREATE_RX_CTX, qlcnic_sriov_pf_create_rx_ctx_cmd}, + {QLCNIC_CMD_CREATE_TX_CTX, qlcnic_sriov_pf_create_tx_ctx_cmd}, + {QLCNIC_CMD_MAC_ADDRESS, qlcnic_sriov_pf_mac_address_cmd}, + {QLCNIC_CMD_DESTROY_RX_CTX, qlcnic_sriov_pf_del_rx_ctx_cmd}, + {QLCNIC_CMD_DESTROY_TX_CTX, qlcnic_sriov_pf_del_tx_ctx_cmd}, + {QLCNIC_CMD_CONFIGURE_HW_LRO, qlcnic_sriov_pf_cfg_lro_cmd}, + {QLCNIC_CMD_CONFIGURE_IP_ADDR, qlcnic_sriov_pf_cfg_ip_cmd}, + {QLCNIC_CMD_CONFIG_INTRPT, qlcnic_sriov_pf_cfg_intrpt_cmd}, + {QLCNIC_CMD_SET_MTU, qlcnic_sriov_pf_set_mtu_cmd}, + {QLCNIC_CMD_GET_NIC_INFO, qlcnic_sriov_pf_get_nic_info_cmd}, + {QLCNIC_CMD_CONFIGURE_RSS, qlcnic_sriov_pf_cfg_rss_cmd}, + {QLCNIC_CMD_CONFIG_INTR_COAL, qlcnic_sriov_pf_cfg_intrcoal_cmd}, + {QLCNIC_CMD_CONFIG_MAC_VLAN, qlcnic_sriov_pf_cfg_macvlan_cmd}, + {QLCNIC_CMD_GET_LINK_EVENT, qlcnic_sriov_pf_linkevent_cmd}, + {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, qlcnic_sriov_pf_cfg_promisc_cmd}, +}; + +void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *adapter, + struct qlcnic_bc_trans *trans, + struct qlcnic_cmd_args *cmd) +{ + u8 size, cmd_op; + + cmd_op = trans->req_hdr->cmd_op; + + if (trans->req_hdr->op_type == QLC_BC_CMD) { + size = ARRAY_SIZE(qlcnic_pf_bc_cmd_hdlr); + if (cmd_op < size) { + qlcnic_pf_bc_cmd_hdlr[cmd_op].fn(trans, cmd); + return; + } + } else { + int i; + size = ARRAY_SIZE(qlcnic_pf_fw_cmd_hdlr); + for (i = 0; i < size; i++) { + if (cmd_op == qlcnic_pf_fw_cmd_hdlr[i].cmd) { + qlcnic_pf_fw_cmd_hdlr[i].fn(trans, cmd); + return; + } + } + + size = ARRAY_SIZE(qlcnic_pf_passthru_supp_cmds); + for (i = 0; i < size; i++) { + if (cmd_op == qlcnic_pf_passthru_supp_cmds[i]) { + qlcnic_issue_cmd(adapter, cmd); + return; + } + } + } + + cmd->rsp.arg[0] |= (0x9 << 25); +} + +void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter, + u32 *int_id) +{ + u16 vpid; + + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, + adapter->ahw->pci_func); + *int_id |= vpid; +} + +void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter, + u32 *int_id) +{ + u16 vpid; + + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, + adapter->ahw->pci_func); + *int_id |= vpid << 16; +} + +void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter, + u32 *int_id) +{ + int vpid; + + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, + adapter->ahw->pci_func); + *int_id |= vpid << 16; +} + +void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter, + u32 *int_id) +{ + u16 vpid; + + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, + adapter->ahw->pci_func); + *int_id |= vpid << 16; +} + +void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter, + u32 *int_id) +{ + u16 vpid; + + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, + adapter->ahw->pci_func); + *int_id |= (vpid << 16) | BIT_31; +} + +void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter, + u32 *int_id) +{ + u16 vpid; + + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, + adapter->ahw->pci_func); + *int_id |= (vpid << 16) | BIT_31; +} + +void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter, + u32 *int_id) +{ + u16 vpid; + + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, + adapter->ahw->pci_func); + *int_id |= (vpid << 16) | BIT_31; +} + +static void qlcnic_sriov_del_rx_ctx(struct qlcnic_adapter *adapter, + struct qlcnic_vf_info *vf) +{ + struct qlcnic_cmd_args cmd; + int vpid; + + if (!vf->rx_ctx_id) + return; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX)) + return; + + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func); + if (vpid >= 0) { + cmd.req.arg[1] = vf->rx_ctx_id | (vpid & 0xffff) << 16; + if (qlcnic_issue_cmd(adapter, &cmd)) + dev_err(&adapter->pdev->dev, + "Failed to delete Tx ctx in firmware for func 0x%x\n", + vf->pci_func); + else + vf->rx_ctx_id = 0; + } + + qlcnic_free_mbx_args(&cmd); +} + +static void qlcnic_sriov_del_tx_ctx(struct qlcnic_adapter *adapter, + struct qlcnic_vf_info *vf) +{ + struct qlcnic_cmd_args cmd; + int vpid; + + if (!vf->tx_ctx_id) + return; + + if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX)) + return; + + vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func); + if (vpid >= 0) { + cmd.req.arg[1] |= vf->tx_ctx_id | (vpid & 0xffff) << 16; + if (qlcnic_issue_cmd(adapter, &cmd)) + dev_err(&adapter->pdev->dev, + "Failed to delete Tx ctx in firmware for func 0x%x\n", + vf->pci_func); + else + vf->tx_ctx_id = 0; + } + + qlcnic_free_mbx_args(&cmd); +} + +static int qlcnic_sriov_add_act_list_irqsave(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf, + struct qlcnic_bc_trans *trans) +{ + struct qlcnic_trans_list *t_list = &vf->rcv_act; + unsigned long flag; + + spin_lock_irqsave(&t_list->lock, flag); + + __qlcnic_sriov_add_act_list(sriov, vf, trans); + + spin_unlock_irqrestore(&t_list->lock, flag); + return 0; +} + +static void __qlcnic_sriov_process_flr(struct qlcnic_vf_info *vf) +{ + struct qlcnic_adapter *adapter = vf->adapter; + + qlcnic_sriov_cleanup_list(&vf->rcv_pend); + cancel_work_sync(&vf->trans_work); + qlcnic_sriov_cleanup_list(&vf->rcv_act); + + if (test_bit(QLC_BC_VF_SOFT_FLR, &vf->state)) { + qlcnic_sriov_del_tx_ctx(adapter, vf); + qlcnic_sriov_del_rx_ctx(adapter, vf); + } + + qlcnic_sriov_pf_config_vport(adapter, 0, vf->pci_func); + + clear_bit(QLC_BC_VF_FLR, &vf->state); + if (test_bit(QLC_BC_VF_SOFT_FLR, &vf->state)) { + qlcnic_sriov_add_act_list_irqsave(adapter->ahw->sriov, vf, + vf->flr_trans); + clear_bit(QLC_BC_VF_SOFT_FLR, &vf->state); + vf->flr_trans = NULL; + } +} + +static void qlcnic_sriov_pf_process_flr(struct work_struct *work) +{ + struct qlcnic_vf_info *vf; + + vf = container_of(work, struct qlcnic_vf_info, flr_work); + __qlcnic_sriov_process_flr(vf); + return; +} + +static void qlcnic_sriov_schedule_flr(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf, + work_func_t func) +{ + if (test_bit(__QLCNIC_RESETTING, &vf->adapter->state)) + return; + + INIT_WORK(&vf->flr_work, func); + queue_work(sriov->bc.bc_flr_wq, &vf->flr_work); +} + +static void qlcnic_sriov_handle_soft_flr(struct qlcnic_adapter *adapter, + struct qlcnic_bc_trans *trans, + struct qlcnic_vf_info *vf) +{ + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + + set_bit(QLC_BC_VF_FLR, &vf->state); + clear_bit(QLC_BC_VF_STATE, &vf->state); + set_bit(QLC_BC_VF_SOFT_FLR, &vf->state); + vf->flr_trans = trans; + qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr); + netdev_info(adapter->netdev, "Software FLR for PCI func %d\n", + vf->pci_func); +} + +bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *adapter, + struct qlcnic_bc_trans *trans, + struct qlcnic_vf_info *vf) +{ + struct qlcnic_bc_hdr *hdr = trans->req_hdr; + + if ((hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) && + (hdr->op_type == QLC_BC_CMD) && + test_bit(QLC_BC_VF_STATE, &vf->state)) { + qlcnic_sriov_handle_soft_flr(adapter, trans, vf); + return true; + } + + return false; +} + +void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov, + struct qlcnic_vf_info *vf) +{ + struct net_device *dev = vf->adapter->netdev; + struct qlcnic_vport *vp = vf->vp; + + if (!test_and_clear_bit(QLC_BC_VF_STATE, &vf->state)) { + clear_bit(QLC_BC_VF_FLR, &vf->state); + return; + } + + if (test_and_set_bit(QLC_BC_VF_FLR, &vf->state)) { + netdev_info(dev, "FLR for PCI func %d in progress\n", + vf->pci_func); + return; + } + + if (vp->vlan_mode == QLC_GUEST_VLAN_MODE) + memset(vf->sriov_vlans, 0, + sizeof(*vf->sriov_vlans) * sriov->num_allowed_vlans); + + qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr); + netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func); +} + +void qlcnic_sriov_pf_reset(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_sriov *sriov = ahw->sriov; + struct qlcnic_vf_info *vf; + u16 num_vfs = sriov->num_vfs; + int i; + + for (i = 0; i < num_vfs; i++) { + vf = &sriov->vf_info[i]; + vf->rx_ctx_id = 0; + vf->tx_ctx_id = 0; + cancel_work_sync(&vf->flr_work); + __qlcnic_sriov_process_flr(vf); + clear_bit(QLC_BC_VF_STATE, &vf->state); + } + + qlcnic_sriov_pf_reset_vport_handle(adapter, ahw->pci_func); + QLCWRX(ahw, QLCNIC_MBX_INTR_ENBL, (ahw->num_msix - 1) << 8); +} + +int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *adapter) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int err; + + if (!qlcnic_sriov_enable_check(adapter)) + return 0; + + ahw->op_mode = QLCNIC_SRIOV_PF_FUNC; + + err = qlcnic_sriov_pf_init(adapter); + if (err) + return err; + + dev_info(&adapter->pdev->dev, "%s: op_mode %d\n", + __func__, ahw->op_mode); + return err; +} + +int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + int i, num_vfs; + struct qlcnic_vf_info *vf_info; + u8 *curr_mac; + + if (!qlcnic_sriov_pf_check(adapter)) + return -EOPNOTSUPP; + + num_vfs = sriov->num_vfs; + + if (!is_valid_ether_addr(mac) || vf >= num_vfs) + return -EINVAL; + + if (ether_addr_equal(adapter->mac_addr, mac)) { + netdev_err(netdev, "MAC address is already in use by the PF\n"); + return -EINVAL; + } + + for (i = 0; i < num_vfs; i++) { + vf_info = &sriov->vf_info[i]; + if (ether_addr_equal(vf_info->vp->mac, mac)) { + netdev_err(netdev, + "MAC address is already in use by VF %d\n", + i); + return -EINVAL; + } + } + + vf_info = &sriov->vf_info[vf]; + curr_mac = vf_info->vp->mac; + + if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) { + netdev_err(netdev, + "MAC address change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n", + vf); + return -EOPNOTSUPP; + } + + memcpy(curr_mac, mac, netdev->addr_len); + netdev_info(netdev, "MAC Address %pM is configured for VF %d\n", + mac, vf); + return 0; +} + +int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, + int min_tx_rate, int max_tx_rate) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_vf_info *vf_info; + struct qlcnic_info nic_info; + struct qlcnic_vport *vp; + u16 vpid; + + if (!qlcnic_sriov_pf_check(adapter)) + return -EOPNOTSUPP; + + if (vf >= sriov->num_vfs) + return -EINVAL; + + vf_info = &sriov->vf_info[vf]; + vp = vf_info->vp; + vpid = vp->handle; + + if (!min_tx_rate) + min_tx_rate = QLC_VF_MIN_TX_RATE; + + if (max_tx_rate && + (max_tx_rate >= 10000 || max_tx_rate < min_tx_rate)) { + netdev_err(netdev, + "Invalid max Tx rate, allowed range is [%d - %d]", + min_tx_rate, QLC_VF_MAX_TX_RATE); + return -EINVAL; + } + + if (!max_tx_rate) + max_tx_rate = 10000; + + if (min_tx_rate && + (min_tx_rate > max_tx_rate || min_tx_rate < QLC_VF_MIN_TX_RATE)) { + netdev_err(netdev, + "Invalid min Tx rate, allowed range is [%d - %d]", + QLC_VF_MIN_TX_RATE, max_tx_rate); + return -EINVAL; + } + + if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) { + if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid)) + return -EIO; + + nic_info.max_tx_bw = max_tx_rate / 100; + nic_info.min_tx_bw = min_tx_rate / 100; + nic_info.bit_offsets = BIT_0; + + if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid)) + return -EIO; + } + + vp->max_tx_bw = max_tx_rate / 100; + netdev_info(netdev, + "Setting Max Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n", + max_tx_rate, vp->max_tx_bw, vf); + vp->min_tx_bw = min_tx_rate / 100; + netdev_info(netdev, + "Setting Min Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n", + min_tx_rate, vp->min_tx_bw, vf); + return 0; +} + +int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf, + u16 vlan, u8 qos) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_vf_info *vf_info; + struct qlcnic_vport *vp; + + if (!qlcnic_sriov_pf_check(adapter)) + return -EOPNOTSUPP; + + if (vf >= sriov->num_vfs || qos > 7) + return -EINVAL; + + if (vlan > MAX_VLAN_ID) { + netdev_err(netdev, + "Invalid VLAN ID, allowed range is [0 - %d]\n", + MAX_VLAN_ID); + return -EINVAL; + } + + vf_info = &sriov->vf_info[vf]; + vp = vf_info->vp; + if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) { + netdev_err(netdev, + "VLAN change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n", + vf); + return -EOPNOTSUPP; + } + + memset(vf_info->sriov_vlans, 0, + sizeof(*vf_info->sriov_vlans) * sriov->num_allowed_vlans); + + switch (vlan) { + case 4095: + vp->vlan_mode = QLC_GUEST_VLAN_MODE; + break; + case 0: + vp->vlan_mode = QLC_NO_VLAN_MODE; + vp->qos = 0; + break; + default: + vp->vlan_mode = QLC_PVID_MODE; + qlcnic_sriov_add_vlan_id(sriov, vf_info, vlan); + vp->qos = qos; + vp->pvid = vlan; + } + + netdev_info(netdev, "Setting VLAN %d, QoS %d, for VF %d\n", + vlan, qos, vf); + return 0; +} + +static __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter, + struct qlcnic_vport *vp, int vf) +{ + __u32 vlan = 0; + + switch (vp->vlan_mode) { + case QLC_PVID_MODE: + vlan = vp->pvid; + break; + case QLC_GUEST_VLAN_MODE: + vlan = MAX_VLAN_ID; + break; + case QLC_NO_VLAN_MODE: + vlan = 0; + break; + default: + netdev_info(adapter->netdev, "Invalid VLAN mode = %d for VF %d\n", + vp->vlan_mode, vf); + } + + return vlan; +} + +int qlcnic_sriov_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_vport *vp; + + if (!qlcnic_sriov_pf_check(adapter)) + return -EOPNOTSUPP; + + if (vf >= sriov->num_vfs) + return -EINVAL; + + vp = sriov->vf_info[vf].vp; + memcpy(&ivi->mac, vp->mac, ETH_ALEN); + ivi->vlan = qlcnic_sriov_get_vf_vlan(adapter, vp, vf); + ivi->qos = vp->qos; + ivi->spoofchk = vp->spoofchk; + if (vp->max_tx_bw == MAX_BW) + ivi->max_tx_rate = 0; + else + ivi->max_tx_rate = vp->max_tx_bw * 100; + if (vp->min_tx_bw == MIN_BW) + ivi->min_tx_rate = 0; + else + ivi->min_tx_rate = vp->min_tx_bw * 100; + + ivi->vf = vf; + return 0; +} + +int qlcnic_sriov_set_vf_spoofchk(struct net_device *netdev, int vf, bool chk) +{ + struct qlcnic_adapter *adapter = netdev_priv(netdev); + struct qlcnic_sriov *sriov = adapter->ahw->sriov; + struct qlcnic_vf_info *vf_info; + struct qlcnic_vport *vp; + + if (!qlcnic_sriov_pf_check(adapter)) + return -EOPNOTSUPP; + + if (vf >= sriov->num_vfs) + return -EINVAL; + + vf_info = &sriov->vf_info[vf]; + vp = vf_info->vp; + if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) { + netdev_err(netdev, + "Spoof check change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n", + vf); + return -EOPNOTSUPP; + } + + vp->spoofchk = chk; + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c new file mode 100644 index 000000000..59a721fba --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -0,0 +1,1438 @@ +/* + * QLogic qlcnic NIC Driver + * Copyright (c) 2009-2013 QLogic Corporation + * + * See LICENSE.qlcnic for copyright and licensing details. + */ + +#include +#include + +#include "qlcnic.h" +#include "qlcnic_hw.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_QLCNIC_HWMON +#include +#include +#endif + +#define QLC_STATUS_UNSUPPORTED_CMD -2 + +int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) +{ + return -EOPNOTSUPP; +} + +int qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate) +{ + return -EOPNOTSUPP; +} + +static ssize_t qlcnic_store_bridged_mode(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + unsigned long new; + int ret = -EINVAL; + + if (!(adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)) + goto err_out; + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) + goto err_out; + + if (kstrtoul(buf, 2, &new)) + goto err_out; + + if (!qlcnic_config_bridged_mode(adapter, !!new)) + ret = len; + +err_out: + return ret; +} + +static ssize_t qlcnic_show_bridged_mode(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + int bridged_mode = 0; + + if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG) + bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED); + + return sprintf(buf, "%d\n", bridged_mode); +} + +static ssize_t qlcnic_store_diag_mode(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + unsigned long new; + + if (kstrtoul(buf, 2, &new)) + return -EINVAL; + + if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED)) + adapter->flags ^= QLCNIC_DIAG_ENABLED; + + return len; +} + +static ssize_t qlcnic_show_diag_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", !!(adapter->flags & QLCNIC_DIAG_ENABLED)); +} + +static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon, + u8 *state, u8 *rate) +{ + *rate = LSB(beacon); + *state = MSB(beacon); + + QLCDB(adapter, DRV, "rate %x state %x\n", *rate, *state); + + if (!*state) { + *rate = __QLCNIC_MAX_LED_RATE; + return 0; + } else if (*state > __QLCNIC_MAX_LED_STATE) { + return -EINVAL; + } + + if ((!*rate) || (*rate > __QLCNIC_MAX_LED_RATE)) + return -EINVAL; + + return 0; +} + +static int qlcnic_83xx_store_beacon(struct qlcnic_adapter *adapter, + const char *buf, size_t len) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + unsigned long h_beacon; + int err; + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) + return -EIO; + + if (kstrtoul(buf, 2, &h_beacon)) + return -EINVAL; + + qlcnic_get_beacon_state(adapter); + + if (ahw->beacon_state == h_beacon) + return len; + + rtnl_lock(); + if (!ahw->beacon_state) { + if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) { + rtnl_unlock(); + return -EBUSY; + } + } + + if (h_beacon) + err = qlcnic_83xx_config_led(adapter, 1, h_beacon); + else + err = qlcnic_83xx_config_led(adapter, 0, !h_beacon); + if (!err) + ahw->beacon_state = h_beacon; + + if (!ahw->beacon_state) + clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); + + rtnl_unlock(); + return len; +} + +static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter, + const char *buf, size_t len) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int err, drv_sds_rings = adapter->drv_sds_rings; + u16 beacon; + u8 b_state, b_rate; + + if (len != sizeof(u16)) + return QL_STATUS_INVALID_PARAM; + + memcpy(&beacon, buf, sizeof(u16)); + err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate); + if (err) + return err; + + qlcnic_get_beacon_state(adapter); + + if (ahw->beacon_state == b_state) + return len; + + rtnl_lock(); + if (!ahw->beacon_state) { + if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) { + rtnl_unlock(); + return -EBUSY; + } + } + + if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { + err = -EIO; + goto out; + } + + if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { + err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST); + if (err) + goto out; + set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state); + } + + err = qlcnic_config_led(adapter, b_state, b_rate); + if (!err) { + err = len; + ahw->beacon_state = b_state; + } + + if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) + qlcnic_diag_free_res(adapter->netdev, drv_sds_rings); + +out: + if (!ahw->beacon_state) + clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); + rtnl_unlock(); + + return err; +} + +static ssize_t qlcnic_store_beacon(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + int err = 0; + + if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { + dev_warn(dev, + "LED test not supported in non privileged mode\n"); + return -EOPNOTSUPP; + } + + if (qlcnic_82xx_check(adapter)) + err = qlcnic_82xx_store_beacon(adapter, buf, len); + else if (qlcnic_83xx_check(adapter)) + err = qlcnic_83xx_store_beacon(adapter, buf, len); + else + return -EIO; + + return err; +} + +static ssize_t qlcnic_show_beacon(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", adapter->ahw->beacon_state); +} + +static int qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter, + loff_t offset, size_t size) +{ + size_t crb_size = 4; + + if (!(adapter->flags & QLCNIC_DIAG_ENABLED)) + return -EIO; + + if (offset < QLCNIC_PCI_CRBSPACE) { + if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, + QLCNIC_PCI_CAMQM_END)) + crb_size = 8; + else + return -EINVAL; + } + + if ((size != crb_size) || (offset & (crb_size-1))) + return -EINVAL; + + return 0; +} + +static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + int ret; + + ret = qlcnic_sysfs_validate_crb(adapter, offset, size); + if (ret != 0) + return ret; + qlcnic_read_crb(adapter, buf, offset, size); + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); + + return size; +} + +static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + int ret; + + ret = qlcnic_sysfs_validate_crb(adapter, offset, size); + if (ret != 0) + return ret; + + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); + qlcnic_write_crb(adapter, buf, offset, size); + return size; +} + +static int qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter, + loff_t offset, size_t size) +{ + if (!(adapter->flags & QLCNIC_DIAG_ENABLED)) + return -EIO; + + if ((size != 8) || (offset & 0x7)) + return -EIO; + + return 0; +} + +static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + u64 data; + int ret; + + ret = qlcnic_sysfs_validate_mem(adapter, offset, size); + if (ret != 0) + return ret; + + if (qlcnic_pci_mem_read_2M(adapter, offset, &data)) + return -EIO; + + memcpy(buf, &data, size); + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); + + return size; +} + +static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t offset, size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + u64 data; + int ret; + + ret = qlcnic_sysfs_validate_mem(adapter, offset, size); + if (ret != 0) + return ret; + + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); + memcpy(&data, buf, size); + + if (qlcnic_pci_mem_write_2M(adapter, offset, data)) + return -EIO; + + return size; +} + +int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func) +{ + int i; + + for (i = 0; i < adapter->ahw->total_nic_func; i++) { + if (adapter->npars[i].pci_func == pci_func) + return i; + } + + dev_err(&adapter->pdev->dev, "%s: Invalid nic function\n", __func__); + return -EINVAL; +} + +static int validate_pm_config(struct qlcnic_adapter *adapter, + struct qlcnic_pm_func_cfg *pm_cfg, int count) +{ + u8 src_pci_func, s_esw_id, d_esw_id; + u8 dest_pci_func; + int i, src_index, dest_index; + + for (i = 0; i < count; i++) { + src_pci_func = pm_cfg[i].pci_func; + dest_pci_func = pm_cfg[i].dest_npar; + src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func); + if (src_index < 0) + return QL_STATUS_INVALID_PARAM; + + dest_index = qlcnic_is_valid_nic_func(adapter, dest_pci_func); + if (dest_index < 0) + return QL_STATUS_INVALID_PARAM; + + s_esw_id = adapter->npars[src_index].phy_port; + d_esw_id = adapter->npars[dest_index].phy_port; + + if (s_esw_id != d_esw_id) + return QL_STATUS_INVALID_PARAM; + } + + return 0; +} + +static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, + size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_pm_func_cfg *pm_cfg; + u32 id, action, pci_func; + int count, rem, i, ret, index; + + count = size / sizeof(struct qlcnic_pm_func_cfg); + rem = size % sizeof(struct qlcnic_pm_func_cfg); + if (rem) + return QL_STATUS_INVALID_PARAM; + + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); + pm_cfg = (struct qlcnic_pm_func_cfg *)buf; + ret = validate_pm_config(adapter, pm_cfg, count); + + if (ret) + return ret; + for (i = 0; i < count; i++) { + pci_func = pm_cfg[i].pci_func; + action = !!pm_cfg[i].action; + index = qlcnic_is_valid_nic_func(adapter, pci_func); + if (index < 0) + return QL_STATUS_INVALID_PARAM; + + id = adapter->npars[index].phy_port; + ret = qlcnic_config_port_mirroring(adapter, id, + action, pci_func); + if (ret) + return ret; + } + + for (i = 0; i < count; i++) { + pci_func = pm_cfg[i].pci_func; + index = qlcnic_is_valid_nic_func(adapter, pci_func); + if (index < 0) + return QL_STATUS_INVALID_PARAM; + id = adapter->npars[index].phy_port; + adapter->npars[index].enable_pm = !!pm_cfg[i].action; + adapter->npars[index].dest_npar = id; + } + + return size; +} + +static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, + size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_pm_func_cfg *pm_cfg; + u8 pci_func; + u32 count; + int i; + + memset(buf, 0, size); + pm_cfg = (struct qlcnic_pm_func_cfg *)buf; + count = size / sizeof(struct qlcnic_pm_func_cfg); + for (i = 0; i < adapter->ahw->total_nic_func; i++) { + pci_func = adapter->npars[i].pci_func; + if (pci_func >= count) { + dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n", + __func__, adapter->ahw->total_nic_func, count); + continue; + } + if (!adapter->npars[i].eswitch_status) + continue; + + pm_cfg[pci_func].action = adapter->npars[i].enable_pm; + pm_cfg[pci_func].dest_npar = 0; + pm_cfg[pci_func].pci_func = i; + } + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); + return size; +} + +static int validate_esw_config(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg, int count) +{ + struct qlcnic_hardware_context *ahw = adapter->ahw; + int i, ret; + u32 op_mode; + u8 pci_func; + + if (qlcnic_82xx_check(adapter)) + op_mode = readl(ahw->pci_base0 + QLCNIC_DRV_OP_MODE); + else + op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE); + + for (i = 0; i < count; i++) { + pci_func = esw_cfg[i].pci_func; + if (pci_func >= ahw->max_vnic_func) + return QL_STATUS_INVALID_PARAM; + + if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) + if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0) + return QL_STATUS_INVALID_PARAM; + + switch (esw_cfg[i].op_mode) { + case QLCNIC_PORT_DEFAULTS: + if (qlcnic_82xx_check(adapter)) { + ret = QLC_DEV_GET_DRV(op_mode, pci_func); + } else { + ret = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode, + pci_func); + esw_cfg[i].offload_flags = 0; + } + + if (ret != QLCNIC_NON_PRIV_FUNC) { + if (esw_cfg[i].mac_anti_spoof != 0) + return QL_STATUS_INVALID_PARAM; + if (esw_cfg[i].mac_override != 1) + return QL_STATUS_INVALID_PARAM; + if (esw_cfg[i].promisc_mode != 1) + return QL_STATUS_INVALID_PARAM; + } + break; + case QLCNIC_ADD_VLAN: + if (!IS_VALID_VLAN(esw_cfg[i].vlan_id)) + return QL_STATUS_INVALID_PARAM; + if (!esw_cfg[i].op_type) + return QL_STATUS_INVALID_PARAM; + break; + case QLCNIC_DEL_VLAN: + if (!esw_cfg[i].op_type) + return QL_STATUS_INVALID_PARAM; + break; + default: + return QL_STATUS_INVALID_PARAM; + } + } + + return 0; +} + +static ssize_t qlcnic_sysfs_write_esw_config(struct file *file, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, + size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_esw_func_cfg *esw_cfg; + struct qlcnic_npar_info *npar; + int count, rem, i, ret; + int index; + u8 op_mode = 0, pci_func; + + count = size / sizeof(struct qlcnic_esw_func_cfg); + rem = size % sizeof(struct qlcnic_esw_func_cfg); + if (rem) + return QL_STATUS_INVALID_PARAM; + + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); + esw_cfg = (struct qlcnic_esw_func_cfg *)buf; + ret = validate_esw_config(adapter, esw_cfg, count); + if (ret) + return ret; + + for (i = 0; i < count; i++) { + if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) + if (qlcnic_config_switch_port(adapter, &esw_cfg[i])) + return QL_STATUS_INVALID_PARAM; + + if (adapter->ahw->pci_func != esw_cfg[i].pci_func) + continue; + + op_mode = esw_cfg[i].op_mode; + qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]); + esw_cfg[i].op_mode = op_mode; + esw_cfg[i].pci_func = adapter->ahw->pci_func; + + switch (esw_cfg[i].op_mode) { + case QLCNIC_PORT_DEFAULTS: + qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]); + rtnl_lock(); + qlcnic_set_netdev_features(adapter, &esw_cfg[i]); + rtnl_unlock(); + break; + case QLCNIC_ADD_VLAN: + qlcnic_set_vlan_config(adapter, &esw_cfg[i]); + break; + case QLCNIC_DEL_VLAN: + esw_cfg[i].vlan_id = 0; + qlcnic_set_vlan_config(adapter, &esw_cfg[i]); + break; + } + } + + if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) + goto out; + + for (i = 0; i < count; i++) { + pci_func = esw_cfg[i].pci_func; + index = qlcnic_is_valid_nic_func(adapter, pci_func); + if (index < 0) + return QL_STATUS_INVALID_PARAM; + npar = &adapter->npars[index]; + switch (esw_cfg[i].op_mode) { + case QLCNIC_PORT_DEFAULTS: + npar->promisc_mode = esw_cfg[i].promisc_mode; + npar->mac_override = esw_cfg[i].mac_override; + npar->offload_flags = esw_cfg[i].offload_flags; + npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof; + npar->discard_tagged = esw_cfg[i].discard_tagged; + break; + case QLCNIC_ADD_VLAN: + npar->pvid = esw_cfg[i].vlan_id; + break; + case QLCNIC_DEL_VLAN: + npar->pvid = 0; + break; + } + } +out: + return size; +} + +static ssize_t qlcnic_sysfs_read_esw_config(struct file *file, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, + size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_esw_func_cfg *esw_cfg; + u8 pci_func; + u32 count; + int i; + + memset(buf, 0, size); + esw_cfg = (struct qlcnic_esw_func_cfg *)buf; + count = size / sizeof(struct qlcnic_esw_func_cfg); + for (i = 0; i < adapter->ahw->total_nic_func; i++) { + pci_func = adapter->npars[i].pci_func; + if (pci_func >= count) { + dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n", + __func__, adapter->ahw->total_nic_func, count); + continue; + } + if (!adapter->npars[i].eswitch_status) + continue; + + esw_cfg[pci_func].pci_func = pci_func; + if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func])) + return QL_STATUS_INVALID_PARAM; + } + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); + return size; +} + +static int validate_npar_config(struct qlcnic_adapter *adapter, + struct qlcnic_npar_func_cfg *np_cfg, + int count) +{ + u8 pci_func, i; + + for (i = 0; i < count; i++) { + pci_func = np_cfg[i].pci_func; + if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0) + return QL_STATUS_INVALID_PARAM; + + if (!IS_VALID_BW(np_cfg[i].min_bw) || + !IS_VALID_BW(np_cfg[i].max_bw)) + return QL_STATUS_INVALID_PARAM; + } + return 0; +} + +static ssize_t qlcnic_sysfs_write_npar_config(struct file *file, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, + size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_info nic_info; + struct qlcnic_npar_func_cfg *np_cfg; + int i, count, rem, ret, index; + u8 pci_func; + + count = size / sizeof(struct qlcnic_npar_func_cfg); + rem = size % sizeof(struct qlcnic_npar_func_cfg); + if (rem) + return QL_STATUS_INVALID_PARAM; + + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); + np_cfg = (struct qlcnic_npar_func_cfg *)buf; + ret = validate_npar_config(adapter, np_cfg, count); + if (ret) + return ret; + + for (i = 0; i < count; i++) { + pci_func = np_cfg[i].pci_func; + + memset(&nic_info, 0, sizeof(struct qlcnic_info)); + ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func); + if (ret) + return ret; + nic_info.pci_func = pci_func; + nic_info.min_tx_bw = np_cfg[i].min_bw; + nic_info.max_tx_bw = np_cfg[i].max_bw; + ret = qlcnic_set_nic_info(adapter, &nic_info); + if (ret) + return ret; + index = qlcnic_is_valid_nic_func(adapter, pci_func); + if (index < 0) + return QL_STATUS_INVALID_PARAM; + adapter->npars[index].min_bw = nic_info.min_tx_bw; + adapter->npars[index].max_bw = nic_info.max_tx_bw; + } + + return size; +} + +static ssize_t qlcnic_sysfs_read_npar_config(struct file *file, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, + size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_npar_func_cfg *np_cfg; + struct qlcnic_info nic_info; + u8 pci_func; + int i, ret; + u32 count; + + memset(&nic_info, 0, sizeof(struct qlcnic_info)); + memset(buf, 0, size); + np_cfg = (struct qlcnic_npar_func_cfg *)buf; + + count = size / sizeof(struct qlcnic_npar_func_cfg); + for (i = 0; i < adapter->ahw->total_nic_func; i++) { + if (adapter->npars[i].pci_func >= count) { + dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n", + __func__, adapter->ahw->total_nic_func, count); + continue; + } + if (!adapter->npars[i].eswitch_status) + continue; + pci_func = adapter->npars[i].pci_func; + if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0) + continue; + ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func); + if (ret) + return ret; + + np_cfg[pci_func].pci_func = pci_func; + np_cfg[pci_func].op_mode = (u8)nic_info.op_mode; + np_cfg[pci_func].port_num = nic_info.phys_port; + np_cfg[pci_func].fw_capab = nic_info.capabilities; + np_cfg[pci_func].min_bw = nic_info.min_tx_bw; + np_cfg[pci_func].max_bw = nic_info.max_tx_bw; + np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques; + np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques; + } + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); + return size; +} + +static ssize_t qlcnic_sysfs_get_port_stats(struct file *file, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, + size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_esw_statistics port_stats; + int ret; + + if (qlcnic_83xx_check(adapter)) + return QLC_STATUS_UNSUPPORTED_CMD; + + if (size != sizeof(struct qlcnic_esw_statistics)) + return QL_STATUS_INVALID_PARAM; + + if (offset >= adapter->ahw->max_vnic_func) + return QL_STATUS_INVALID_PARAM; + + memset(&port_stats, 0, size); + ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER, + &port_stats.rx); + if (ret) + return ret; + + ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER, + &port_stats.tx); + if (ret) + return ret; + + memcpy(buf, &port_stats, size); + return size; +} + +static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, + size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_esw_statistics esw_stats; + int ret; + + if (qlcnic_83xx_check(adapter)) + return QLC_STATUS_UNSUPPORTED_CMD; + + if (size != sizeof(struct qlcnic_esw_statistics)) + return QL_STATUS_INVALID_PARAM; + + if (offset >= QLCNIC_NIU_MAX_XG_PORTS) + return QL_STATUS_INVALID_PARAM; + + memset(&esw_stats, 0, size); + ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER, + &esw_stats.rx); + if (ret) + return ret; + + ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER, + &esw_stats.tx); + if (ret) + return ret; + + memcpy(buf, &esw_stats, size); + return size; +} + +static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, + size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + int ret; + + if (qlcnic_83xx_check(adapter)) + return QLC_STATUS_UNSUPPORTED_CMD; + + if (offset >= QLCNIC_NIU_MAX_XG_PORTS) + return QL_STATUS_INVALID_PARAM; + + ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset, + QLCNIC_QUERY_RX_COUNTER); + if (ret) + return ret; + + ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset, + QLCNIC_QUERY_TX_COUNTER); + if (ret) + return ret; + + return size; +} + +static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, + size_t size) +{ + + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + int ret; + + if (qlcnic_83xx_check(adapter)) + return QLC_STATUS_UNSUPPORTED_CMD; + + if (offset >= adapter->ahw->max_vnic_func) + return QL_STATUS_INVALID_PARAM; + + ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset, + QLCNIC_QUERY_RX_COUNTER); + if (ret) + return ret; + + ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset, + QLCNIC_QUERY_TX_COUNTER); + if (ret) + return ret; + + return size; +} + +static ssize_t qlcnic_sysfs_read_pci_config(struct file *file, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, + size_t size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + struct qlcnic_pci_func_cfg *pci_cfg; + struct qlcnic_pci_info *pci_info; + int i, ret; + u32 count; + + pci_info = kcalloc(size, sizeof(*pci_info), GFP_KERNEL); + if (!pci_info) + return -ENOMEM; + + ret = qlcnic_get_pci_info(adapter, pci_info); + if (ret) { + kfree(pci_info); + return ret; + } + + pci_cfg = (struct qlcnic_pci_func_cfg *)buf; + count = size / sizeof(struct qlcnic_pci_func_cfg); + qlcnic_swap32_buffer((u32 *)pci_info, size / sizeof(u32)); + for (i = 0; i < count; i++) { + pci_cfg[i].pci_func = pci_info[i].id; + pci_cfg[i].func_type = pci_info[i].type; + pci_cfg[i].func_state = 0; + pci_cfg[i].port_num = pci_info[i].default_port; + pci_cfg[i].min_bw = pci_info[i].tx_min_bw; + pci_cfg[i].max_bw = pci_info[i].tx_max_bw; + memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN); + } + + kfree(pci_info); + return size; +} + +static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, + size_t size) +{ + unsigned char *p_read_buf; + int ret, count; + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + + if (!size) + return QL_STATUS_INVALID_PARAM; + if (!buf) + return QL_STATUS_INVALID_PARAM; + + count = size / sizeof(u32); + + if (size % sizeof(u32)) + count++; + + p_read_buf = kcalloc(size, sizeof(unsigned char), GFP_KERNEL); + if (!p_read_buf) + return -ENOMEM; + if (qlcnic_83xx_lock_flash(adapter) != 0) { + kfree(p_read_buf); + return -EIO; + } + + ret = qlcnic_83xx_lockless_flash_read32(adapter, offset, p_read_buf, + count); + + if (ret) { + qlcnic_83xx_unlock_flash(adapter); + kfree(p_read_buf); + return ret; + } + + qlcnic_83xx_unlock_flash(adapter); + qlcnic_swap32_buffer((u32 *)p_read_buf, count); + memcpy(buf, p_read_buf, size); + kfree(p_read_buf); + + return size; +} + +static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter, + char *buf, loff_t offset, + size_t size) +{ + int i, ret, count; + unsigned char *p_cache, *p_src; + + p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL); + if (!p_cache) + return -ENOMEM; + + count = size / sizeof(u32); + qlcnic_swap32_buffer((u32 *)buf, count); + memcpy(p_cache, buf, size); + p_src = p_cache; + + if (qlcnic_83xx_lock_flash(adapter) != 0) { + kfree(p_cache); + return -EIO; + } + + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_enable_flash_write(adapter); + if (ret) { + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + } + + for (i = 0; i < count / QLC_83XX_FLASH_WRITE_MAX; i++) { + ret = qlcnic_83xx_flash_bulk_write(adapter, offset, + (u32 *)p_src, + QLC_83XX_FLASH_WRITE_MAX); + + if (ret) { + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_disable_flash_write(adapter); + if (ret) { + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + } + + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + + p_src = p_src + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX; + offset = offset + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX; + } + + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_disable_flash_write(adapter); + if (ret) { + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + } + + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + + return 0; +} + +static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter, + char *buf, loff_t offset, size_t size) +{ + int i, ret, count; + unsigned char *p_cache, *p_src; + + p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL); + if (!p_cache) + return -ENOMEM; + + qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); + memcpy(p_cache, buf, size); + p_src = p_cache; + count = size / sizeof(u32); + + if (qlcnic_83xx_lock_flash(adapter) != 0) { + kfree(p_cache); + return -EIO; + } + + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_enable_flash_write(adapter); + if (ret) { + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + } + + for (i = 0; i < count; i++) { + ret = qlcnic_83xx_flash_write32(adapter, offset, (u32 *)p_src); + if (ret) { + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_disable_flash_write(adapter); + if (ret) { + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + } + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + + p_src = p_src + sizeof(u32); + offset = offset + sizeof(u32); + } + + if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { + ret = qlcnic_83xx_disable_flash_write(adapter); + if (ret) { + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + return -EIO; + } + } + + kfree(p_cache); + qlcnic_83xx_unlock_flash(adapter); + + return 0; +} + +static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t offset, + size_t size) +{ + int ret; + static int flash_mode; + unsigned long data; + struct device *dev = container_of(kobj, struct device, kobj); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + + if (!buf) + return QL_STATUS_INVALID_PARAM; + + ret = kstrtoul(buf, 16, &data); + + switch (data) { + case QLC_83XX_FLASH_SECTOR_ERASE_CMD: + flash_mode = QLC_83XX_ERASE_MODE; + ret = qlcnic_83xx_erase_flash_sector(adapter, offset); + if (ret) { + dev_err(&adapter->pdev->dev, + "%s failed at %d\n", __func__, __LINE__); + return -EIO; + } + break; + + case QLC_83XX_FLASH_BULK_WRITE_CMD: + flash_mode = QLC_83XX_BULK_WRITE_MODE; + break; + + case QLC_83XX_FLASH_WRITE_CMD: + flash_mode = QLC_83XX_WRITE_MODE; + break; + default: + if (flash_mode == QLC_83XX_BULK_WRITE_MODE) { + ret = qlcnic_83xx_sysfs_flash_bulk_write(adapter, buf, + offset, size); + if (ret) { + dev_err(&adapter->pdev->dev, + "%s failed at %d\n", + __func__, __LINE__); + return -EIO; + } + } + + if (flash_mode == QLC_83XX_WRITE_MODE) { + ret = qlcnic_83xx_sysfs_flash_write(adapter, buf, + offset, size); + if (ret) { + dev_err(&adapter->pdev->dev, + "%s failed at %d\n", __func__, + __LINE__); + return -EIO; + } + } + } + + return size; +} + +static struct device_attribute dev_attr_bridged_mode = { + .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, + .show = qlcnic_show_bridged_mode, + .store = qlcnic_store_bridged_mode, +}; + +static struct device_attribute dev_attr_diag_mode = { + .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)}, + .show = qlcnic_show_diag_mode, + .store = qlcnic_store_diag_mode, +}; + +static struct device_attribute dev_attr_beacon = { + .attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)}, + .show = qlcnic_show_beacon, + .store = qlcnic_store_beacon, +}; + +static struct bin_attribute bin_attr_crb = { + .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_crb, + .write = qlcnic_sysfs_write_crb, +}; + +static struct bin_attribute bin_attr_mem = { + .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_mem, + .write = qlcnic_sysfs_write_mem, +}; + +static struct bin_attribute bin_attr_npar_config = { + .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_npar_config, + .write = qlcnic_sysfs_write_npar_config, +}; + +static struct bin_attribute bin_attr_pci_config = { + .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_pci_config, + .write = NULL, +}; + +static struct bin_attribute bin_attr_port_stats = { + .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_get_port_stats, + .write = qlcnic_sysfs_clear_port_stats, +}; + +static struct bin_attribute bin_attr_esw_stats = { + .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_get_esw_stats, + .write = qlcnic_sysfs_clear_esw_stats, +}; + +static struct bin_attribute bin_attr_esw_config = { + .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_esw_config, + .write = qlcnic_sysfs_write_esw_config, +}; + +static struct bin_attribute bin_attr_pm_config = { + .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_sysfs_read_pm_config, + .write = qlcnic_sysfs_write_pm_config, +}; + +static struct bin_attribute bin_attr_flash = { + .attr = {.name = "flash", .mode = (S_IRUGO | S_IWUSR)}, + .size = 0, + .read = qlcnic_83xx_sysfs_flash_read_handler, + .write = qlcnic_83xx_sysfs_flash_write_handler, +}; + +#ifdef CONFIG_QLCNIC_HWMON + +static ssize_t qlcnic_hwmon_show_temp(struct device *dev, + struct device_attribute *dev_attr, + char *buf) +{ + struct qlcnic_adapter *adapter = dev_get_drvdata(dev); + unsigned int temperature = 0, value = 0; + + if (qlcnic_83xx_check(adapter)) + value = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP); + else if (qlcnic_82xx_check(adapter)) + value = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP); + + temperature = qlcnic_get_temp_val(value); + /* display millidegree celcius */ + temperature *= 1000; + return sprintf(buf, "%u\n", temperature); +} + +/* hwmon-sysfs attributes */ +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, + qlcnic_hwmon_show_temp, NULL, 1); + +static struct attribute *qlcnic_hwmon_attrs[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + NULL +}; + +ATTRIBUTE_GROUPS(qlcnic_hwmon); + +void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + struct device *hwmon_dev; + + /* Skip hwmon registration for a VF device */ + if (qlcnic_sriov_vf_check(adapter)) { + adapter->ahw->hwmon_dev = NULL; + return; + } + hwmon_dev = hwmon_device_register_with_groups(dev, qlcnic_driver_name, + adapter, + qlcnic_hwmon_groups); + if (IS_ERR(hwmon_dev)) { + dev_err(dev, "Cannot register with hwmon, err=%ld\n", + PTR_ERR(hwmon_dev)); + hwmon_dev = NULL; + } + adapter->ahw->hwmon_dev = hwmon_dev; +} + +void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter) +{ + struct device *hwmon_dev = adapter->ahw->hwmon_dev; + if (hwmon_dev) { + hwmon_device_unregister(hwmon_dev); + adapter->ahw->hwmon_dev = NULL; + } +} +#endif + +void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + + if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG) + if (device_create_file(dev, &dev_attr_bridged_mode)) + dev_warn(dev, + "failed to create bridged_mode sysfs entry\n"); +} + +void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + + if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG) + device_remove_file(dev, &dev_attr_bridged_mode); +} + +static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + + if (device_create_bin_file(dev, &bin_attr_port_stats)) + dev_info(dev, "failed to create port stats sysfs entry"); + + if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) + return; + if (device_create_file(dev, &dev_attr_diag_mode)) + dev_info(dev, "failed to create diag_mode sysfs entry\n"); + if (device_create_bin_file(dev, &bin_attr_crb)) + dev_info(dev, "failed to create crb sysfs entry\n"); + if (device_create_bin_file(dev, &bin_attr_mem)) + dev_info(dev, "failed to create mem sysfs entry\n"); + + if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) + return; + + if (device_create_bin_file(dev, &bin_attr_pci_config)) + dev_info(dev, "failed to create pci config sysfs entry"); + + if (device_create_file(dev, &dev_attr_beacon)) + dev_info(dev, "failed to create beacon sysfs entry"); + + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) + return; + if (device_create_bin_file(dev, &bin_attr_esw_config)) + dev_info(dev, "failed to create esw config sysfs entry"); + if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) + return; + if (device_create_bin_file(dev, &bin_attr_npar_config)) + dev_info(dev, "failed to create npar config sysfs entry"); + if (device_create_bin_file(dev, &bin_attr_pm_config)) + dev_info(dev, "failed to create pm config sysfs entry"); + if (device_create_bin_file(dev, &bin_attr_esw_stats)) + dev_info(dev, "failed to create eswitch stats sysfs entry"); +} + +static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + + device_remove_bin_file(dev, &bin_attr_port_stats); + + if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) + return; + device_remove_file(dev, &dev_attr_diag_mode); + device_remove_bin_file(dev, &bin_attr_crb); + device_remove_bin_file(dev, &bin_attr_mem); + + if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) + return; + + device_remove_bin_file(dev, &bin_attr_pci_config); + device_remove_file(dev, &dev_attr_beacon); + if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) + return; + device_remove_bin_file(dev, &bin_attr_esw_config); + if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) + return; + device_remove_bin_file(dev, &bin_attr_npar_config); + device_remove_bin_file(dev, &bin_attr_pm_config); + device_remove_bin_file(dev, &bin_attr_esw_stats); +} + +void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter) +{ + qlcnic_create_diag_entries(adapter); +} + +void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter) +{ + qlcnic_remove_diag_entries(adapter); +} + +void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + + qlcnic_create_diag_entries(adapter); + + if (sysfs_create_bin_file(&dev->kobj, &bin_attr_flash)) + dev_info(dev, "failed to create flash sysfs entry\n"); +} + +void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + + qlcnic_remove_diag_entries(adapter); + sysfs_remove_bin_file(&dev->kobj, &bin_attr_flash); +} diff --git a/drivers/net/ethernet/qlogic/qlge/Makefile b/drivers/net/ethernet/qlogic/qlge/Makefile new file mode 100644 index 000000000..8a197658d --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlge/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the Qlogic 10GbE PCI Express ethernet driver +# + +obj-$(CONFIG_QLGE) += qlge.o + +qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h new file mode 100644 index 000000000..ef332708e --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h @@ -0,0 +1,2338 @@ +/* + * QLogic QLA41xx NIC HBA Driver + * Copyright (c) 2003-2006 QLogic Corporation + * + * See LICENSE.qlge for copyright and licensing details. + */ +#ifndef _QLGE_H_ +#define _QLGE_H_ + +#include +#include +#include +#include +#include + +/* + * General definitions... + */ +#define DRV_NAME "qlge" +#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " +#define DRV_VERSION "1.00.00.34" + +#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ + +#define QLGE_VENDOR_ID 0x1077 +#define QLGE_DEVICE_ID_8012 0x8012 +#define QLGE_DEVICE_ID_8000 0x8000 +#define QLGE_MEZZ_SSYS_ID_068 0x0068 +#define QLGE_MEZZ_SSYS_ID_180 0x0180 +#define MAX_CPUS 8 +#define MAX_TX_RINGS MAX_CPUS +#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1) + +#define NUM_TX_RING_ENTRIES 256 +#define NUM_RX_RING_ENTRIES 256 + +#define NUM_SMALL_BUFFERS 512 +#define NUM_LARGE_BUFFERS 512 +#define DB_PAGE_SIZE 4096 + +/* Calculate the number of (4k) pages required to + * contain a buffer queue of the given length. + */ +#define MAX_DB_PAGES_PER_BQ(x) \ + (((x * sizeof(u64)) / DB_PAGE_SIZE) + \ + (((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0)) + +#define RX_RING_SHADOW_SPACE (sizeof(u64) + \ + MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \ + MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64)) +#define LARGE_BUFFER_MAX_SIZE 8192 +#define LARGE_BUFFER_MIN_SIZE 2048 + +#define MAX_CQ 128 +#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */ +#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */ +#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2) +#define UDELAY_COUNT 3 +#define UDELAY_DELAY 100 + + +#define TX_DESC_PER_IOCB 8 + +#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0 +#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) +#else /* all other page sizes */ +#define TX_DESC_PER_OAL 0 +#endif + +/* Word shifting for converting 64-bit + * address to a series of 16-bit words. + * This is used for some MPI firmware + * mailbox commands. + */ +#define LSW(x) ((u16)(x)) +#define MSW(x) ((u16)((u32)(x) >> 16)) +#define LSD(x) ((u32)((u64)(x))) +#define MSD(x) ((u32)((((u64)(x)) >> 32))) + +/* MPI test register definitions. This register + * is used for determining alternate NIC function's + * PCI->func number. + */ +enum { + MPI_TEST_FUNC_PORT_CFG = 0x1002, + MPI_TEST_FUNC_PRB_CTL = 0x100e, + MPI_TEST_FUNC_PRB_EN = 0x18a20000, + MPI_TEST_FUNC_RST_STS = 0x100a, + MPI_TEST_FUNC_RST_FRC = 0x00000003, + MPI_TEST_NIC_FUNC_MASK = 0x00000007, + MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0), + MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e, + MPI_TEST_NIC1_FUNC_SHIFT = 1, + MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4), + MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0, + MPI_TEST_NIC2_FUNC_SHIFT = 5, + MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8), + MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00, + MPI_TEST_FC1_FUNCTION_SHIFT = 9, + MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12), + MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000, + MPI_TEST_FC2_FUNCTION_SHIFT = 13, + + MPI_NIC_READ = 0x00000000, + MPI_NIC_REG_BLOCK = 0x00020000, + MPI_NIC_FUNCTION_SHIFT = 6, +}; + +/* + * Processor Address Register (PROC_ADDR) bit definitions. + */ +enum { + + /* Misc. stuff */ + MAILBOX_COUNT = 16, + MAILBOX_TIMEOUT = 5, + + PROC_ADDR_RDY = (1 << 31), + PROC_ADDR_R = (1 << 30), + PROC_ADDR_ERR = (1 << 29), + PROC_ADDR_DA = (1 << 28), + PROC_ADDR_FUNC0_MBI = 0x00001180, + PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT), + PROC_ADDR_FUNC0_CTL = 0x000011a1, + PROC_ADDR_FUNC2_MBI = 0x00001280, + PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT), + PROC_ADDR_FUNC2_CTL = 0x000012a1, + PROC_ADDR_MPI_RISC = 0x00000000, + PROC_ADDR_MDE = 0x00010000, + PROC_ADDR_REGBLOCK = 0x00020000, + PROC_ADDR_RISC_REG = 0x00030000, +}; + +/* + * System Register (SYS) bit definitions. + */ +enum { + SYS_EFE = (1 << 0), + SYS_FAE = (1 << 1), + SYS_MDC = (1 << 2), + SYS_DST = (1 << 3), + SYS_DWC = (1 << 4), + SYS_EVW = (1 << 5), + SYS_OMP_DLY_MASK = 0x3f000000, + /* + * There are no values defined as of edit #15. + */ + SYS_ODI = (1 << 14), +}; + +/* + * Reset/Failover Register (RST_FO) bit definitions. + */ +enum { + RST_FO_TFO = (1 << 0), + RST_FO_RR_MASK = 0x00060000, + RST_FO_RR_CQ_CAM = 0x00000000, + RST_FO_RR_DROP = 0x00000002, + RST_FO_RR_DQ = 0x00000004, + RST_FO_RR_RCV_FUNC_CQ = 0x00000006, + RST_FO_FRB = (1 << 12), + RST_FO_MOP = (1 << 13), + RST_FO_REG = (1 << 14), + RST_FO_FR = (1 << 15), +}; + +/* + * Function Specific Control Register (FSC) bit definitions. + */ +enum { + FSC_DBRST_MASK = 0x00070000, + FSC_DBRST_256 = 0x00000000, + FSC_DBRST_512 = 0x00000001, + FSC_DBRST_768 = 0x00000002, + FSC_DBRST_1024 = 0x00000003, + FSC_DBL_MASK = 0x00180000, + FSC_DBL_DBRST = 0x00000000, + FSC_DBL_MAX_PLD = 0x00000008, + FSC_DBL_MAX_BRST = 0x00000010, + FSC_DBL_128_BYTES = 0x00000018, + FSC_EC = (1 << 5), + FSC_EPC_MASK = 0x00c00000, + FSC_EPC_INBOUND = (1 << 6), + FSC_EPC_OUTBOUND = (1 << 7), + FSC_VM_PAGESIZE_MASK = 0x07000000, + FSC_VM_PAGE_2K = 0x00000100, + FSC_VM_PAGE_4K = 0x00000200, + FSC_VM_PAGE_8K = 0x00000300, + FSC_VM_PAGE_64K = 0x00000600, + FSC_SH = (1 << 11), + FSC_DSB = (1 << 12), + FSC_STE = (1 << 13), + FSC_FE = (1 << 15), +}; + +/* + * Host Command Status Register (CSR) bit definitions. + */ +enum { + CSR_ERR_STS_MASK = 0x0000003f, + /* + * There are no valued defined as of edit #15. + */ + CSR_RR = (1 << 8), + CSR_HRI = (1 << 9), + CSR_RP = (1 << 10), + CSR_CMD_PARM_SHIFT = 22, + CSR_CMD_NOP = 0x00000000, + CSR_CMD_SET_RST = 0x10000000, + CSR_CMD_CLR_RST = 0x20000000, + CSR_CMD_SET_PAUSE = 0x30000000, + CSR_CMD_CLR_PAUSE = 0x40000000, + CSR_CMD_SET_H2R_INT = 0x50000000, + CSR_CMD_CLR_H2R_INT = 0x60000000, + CSR_CMD_PAR_EN = 0x70000000, + CSR_CMD_SET_BAD_PAR = 0x80000000, + CSR_CMD_CLR_BAD_PAR = 0x90000000, + CSR_CMD_CLR_R2PCI_INT = 0xa0000000, +}; + +/* + * Configuration Register (CFG) bit definitions. + */ +enum { + CFG_LRQ = (1 << 0), + CFG_DRQ = (1 << 1), + CFG_LR = (1 << 2), + CFG_DR = (1 << 3), + CFG_LE = (1 << 5), + CFG_LCQ = (1 << 6), + CFG_DCQ = (1 << 7), + CFG_Q_SHIFT = 8, + CFG_Q_MASK = 0x7f000000, +}; + +/* + * Status Register (STS) bit definitions. + */ +enum { + STS_FE = (1 << 0), + STS_PI = (1 << 1), + STS_PL0 = (1 << 2), + STS_PL1 = (1 << 3), + STS_PI0 = (1 << 4), + STS_PI1 = (1 << 5), + STS_FUNC_ID_MASK = 0x000000c0, + STS_FUNC_ID_SHIFT = 6, + STS_F0E = (1 << 8), + STS_F1E = (1 << 9), + STS_F2E = (1 << 10), + STS_F3E = (1 << 11), + STS_NFE = (1 << 12), +}; + +/* + * Interrupt Enable Register (INTR_EN) bit definitions. + */ +enum { + INTR_EN_INTR_MASK = 0x007f0000, + INTR_EN_TYPE_MASK = 0x03000000, + INTR_EN_TYPE_ENABLE = 0x00000100, + INTR_EN_TYPE_DISABLE = 0x00000200, + INTR_EN_TYPE_READ = 0x00000300, + INTR_EN_IHD = (1 << 13), + INTR_EN_IHD_MASK = (INTR_EN_IHD << 16), + INTR_EN_EI = (1 << 14), + INTR_EN_EN = (1 << 15), +}; + +/* + * Interrupt Mask Register (INTR_MASK) bit definitions. + */ +enum { + INTR_MASK_PI = (1 << 0), + INTR_MASK_HL0 = (1 << 1), + INTR_MASK_LH0 = (1 << 2), + INTR_MASK_HL1 = (1 << 3), + INTR_MASK_LH1 = (1 << 4), + INTR_MASK_SE = (1 << 5), + INTR_MASK_LSC = (1 << 6), + INTR_MASK_MC = (1 << 7), + INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC, +}; + +/* + * Register (REV_ID) bit definitions. + */ +enum { + REV_ID_MASK = 0x0000000f, + REV_ID_NICROLL_SHIFT = 0, + REV_ID_NICREV_SHIFT = 4, + REV_ID_XGROLL_SHIFT = 8, + REV_ID_XGREV_SHIFT = 12, + REV_ID_CHIPREV_SHIFT = 28, +}; + +/* + * Force ECC Error Register (FRC_ECC_ERR) bit definitions. + */ +enum { + FRC_ECC_ERR_VW = (1 << 12), + FRC_ECC_ERR_VB = (1 << 13), + FRC_ECC_ERR_NI = (1 << 14), + FRC_ECC_ERR_NO = (1 << 15), + FRC_ECC_PFE_SHIFT = 16, + FRC_ECC_ERR_DO = (1 << 18), + FRC_ECC_P14 = (1 << 19), +}; + +/* + * Error Status Register (ERR_STS) bit definitions. + */ +enum { + ERR_STS_NOF = (1 << 0), + ERR_STS_NIF = (1 << 1), + ERR_STS_DRP = (1 << 2), + ERR_STS_XGP = (1 << 3), + ERR_STS_FOU = (1 << 4), + ERR_STS_FOC = (1 << 5), + ERR_STS_FOF = (1 << 6), + ERR_STS_FIU = (1 << 7), + ERR_STS_FIC = (1 << 8), + ERR_STS_FIF = (1 << 9), + ERR_STS_MOF = (1 << 10), + ERR_STS_TA = (1 << 11), + ERR_STS_MA = (1 << 12), + ERR_STS_MPE = (1 << 13), + ERR_STS_SCE = (1 << 14), + ERR_STS_STE = (1 << 15), + ERR_STS_FOW = (1 << 16), + ERR_STS_UE = (1 << 17), + ERR_STS_MCH = (1 << 26), + ERR_STS_LOC_SHIFT = 27, +}; + +/* + * RAM Debug Address Register (RAM_DBG_ADDR) bit definitions. + */ +enum { + RAM_DBG_ADDR_FW = (1 << 30), + RAM_DBG_ADDR_FR = (1 << 31), +}; + +/* + * Semaphore Register (SEM) bit definitions. + */ +enum { + /* + * Example: + * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT) + */ + SEM_CLEAR = 0, + SEM_SET = 1, + SEM_FORCE = 3, + SEM_XGMAC0_SHIFT = 0, + SEM_XGMAC1_SHIFT = 2, + SEM_ICB_SHIFT = 4, + SEM_MAC_ADDR_SHIFT = 6, + SEM_FLASH_SHIFT = 8, + SEM_PROBE_SHIFT = 10, + SEM_RT_IDX_SHIFT = 12, + SEM_PROC_REG_SHIFT = 14, + SEM_XGMAC0_MASK = 0x00030000, + SEM_XGMAC1_MASK = 0x000c0000, + SEM_ICB_MASK = 0x00300000, + SEM_MAC_ADDR_MASK = 0x00c00000, + SEM_FLASH_MASK = 0x03000000, + SEM_PROBE_MASK = 0x0c000000, + SEM_RT_IDX_MASK = 0x30000000, + SEM_PROC_REG_MASK = 0xc0000000, +}; + +/* + * 10G MAC Address Register (XGMAC_ADDR) bit definitions. + */ +enum { + XGMAC_ADDR_RDY = (1 << 31), + XGMAC_ADDR_R = (1 << 30), + XGMAC_ADDR_XME = (1 << 29), + + /* XGMAC control registers */ + PAUSE_SRC_LO = 0x00000100, + PAUSE_SRC_HI = 0x00000104, + GLOBAL_CFG = 0x00000108, + GLOBAL_CFG_RESET = (1 << 0), + GLOBAL_CFG_JUMBO = (1 << 6), + GLOBAL_CFG_TX_STAT_EN = (1 << 10), + GLOBAL_CFG_RX_STAT_EN = (1 << 11), + TX_CFG = 0x0000010c, + TX_CFG_RESET = (1 << 0), + TX_CFG_EN = (1 << 1), + TX_CFG_PREAM = (1 << 2), + RX_CFG = 0x00000110, + RX_CFG_RESET = (1 << 0), + RX_CFG_EN = (1 << 1), + RX_CFG_PREAM = (1 << 2), + FLOW_CTL = 0x0000011c, + PAUSE_OPCODE = 0x00000120, + PAUSE_TIMER = 0x00000124, + PAUSE_FRM_DEST_LO = 0x00000128, + PAUSE_FRM_DEST_HI = 0x0000012c, + MAC_TX_PARAMS = 0x00000134, + MAC_TX_PARAMS_JUMBO = (1 << 31), + MAC_TX_PARAMS_SIZE_SHIFT = 16, + MAC_RX_PARAMS = 0x00000138, + MAC_SYS_INT = 0x00000144, + MAC_SYS_INT_MASK = 0x00000148, + MAC_MGMT_INT = 0x0000014c, + MAC_MGMT_IN_MASK = 0x00000150, + EXT_ARB_MODE = 0x000001fc, + + /* XGMAC TX statistics registers */ + TX_PKTS = 0x00000200, + TX_BYTES = 0x00000208, + TX_MCAST_PKTS = 0x00000210, + TX_BCAST_PKTS = 0x00000218, + TX_UCAST_PKTS = 0x00000220, + TX_CTL_PKTS = 0x00000228, + TX_PAUSE_PKTS = 0x00000230, + TX_64_PKT = 0x00000238, + TX_65_TO_127_PKT = 0x00000240, + TX_128_TO_255_PKT = 0x00000248, + TX_256_511_PKT = 0x00000250, + TX_512_TO_1023_PKT = 0x00000258, + TX_1024_TO_1518_PKT = 0x00000260, + TX_1519_TO_MAX_PKT = 0x00000268, + TX_UNDERSIZE_PKT = 0x00000270, + TX_OVERSIZE_PKT = 0x00000278, + + /* XGMAC statistics control registers */ + RX_HALF_FULL_DET = 0x000002a0, + TX_HALF_FULL_DET = 0x000002a4, + RX_OVERFLOW_DET = 0x000002a8, + TX_OVERFLOW_DET = 0x000002ac, + RX_HALF_FULL_MASK = 0x000002b0, + TX_HALF_FULL_MASK = 0x000002b4, + RX_OVERFLOW_MASK = 0x000002b8, + TX_OVERFLOW_MASK = 0x000002bc, + STAT_CNT_CTL = 0x000002c0, + STAT_CNT_CTL_CLEAR_TX = (1 << 0), + STAT_CNT_CTL_CLEAR_RX = (1 << 1), + AUX_RX_HALF_FULL_DET = 0x000002d0, + AUX_TX_HALF_FULL_DET = 0x000002d4, + AUX_RX_OVERFLOW_DET = 0x000002d8, + AUX_TX_OVERFLOW_DET = 0x000002dc, + AUX_RX_HALF_FULL_MASK = 0x000002f0, + AUX_TX_HALF_FULL_MASK = 0x000002f4, + AUX_RX_OVERFLOW_MASK = 0x000002f8, + AUX_TX_OVERFLOW_MASK = 0x000002fc, + + /* XGMAC RX statistics registers */ + RX_BYTES = 0x00000300, + RX_BYTES_OK = 0x00000308, + RX_PKTS = 0x00000310, + RX_PKTS_OK = 0x00000318, + RX_BCAST_PKTS = 0x00000320, + RX_MCAST_PKTS = 0x00000328, + RX_UCAST_PKTS = 0x00000330, + RX_UNDERSIZE_PKTS = 0x00000338, + RX_OVERSIZE_PKTS = 0x00000340, + RX_JABBER_PKTS = 0x00000348, + RX_UNDERSIZE_FCERR_PKTS = 0x00000350, + RX_DROP_EVENTS = 0x00000358, + RX_FCERR_PKTS = 0x00000360, + RX_ALIGN_ERR = 0x00000368, + RX_SYMBOL_ERR = 0x00000370, + RX_MAC_ERR = 0x00000378, + RX_CTL_PKTS = 0x00000380, + RX_PAUSE_PKTS = 0x00000388, + RX_64_PKTS = 0x00000390, + RX_65_TO_127_PKTS = 0x00000398, + RX_128_255_PKTS = 0x000003a0, + RX_256_511_PKTS = 0x000003a8, + RX_512_TO_1023_PKTS = 0x000003b0, + RX_1024_TO_1518_PKTS = 0x000003b8, + RX_1519_TO_MAX_PKTS = 0x000003c0, + RX_LEN_ERR_PKTS = 0x000003c8, + + /* XGMAC MDIO control registers */ + MDIO_TX_DATA = 0x00000400, + MDIO_RX_DATA = 0x00000410, + MDIO_CMD = 0x00000420, + MDIO_PHY_ADDR = 0x00000430, + MDIO_PORT = 0x00000440, + MDIO_STATUS = 0x00000450, + + XGMAC_REGISTER_END = 0x00000740, +}; + +/* + * Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions. + */ +enum { + ETS_QUEUE_SHIFT = 29, + ETS_REF = (1 << 26), + ETS_RS = (1 << 27), + ETS_P = (1 << 28), + ETS_FC_COS_SHIFT = 23, +}; + +/* + * Flash Address Register (FLASH_ADDR) bit definitions. + */ +enum { + FLASH_ADDR_RDY = (1 << 31), + FLASH_ADDR_R = (1 << 30), + FLASH_ADDR_ERR = (1 << 29), +}; + +/* + * Stop CQ Processing Register (CQ_STOP) bit definitions. + */ +enum { + CQ_STOP_QUEUE_MASK = (0x007f0000), + CQ_STOP_TYPE_MASK = (0x03000000), + CQ_STOP_TYPE_START = 0x00000100, + CQ_STOP_TYPE_STOP = 0x00000200, + CQ_STOP_TYPE_READ = 0x00000300, + CQ_STOP_EN = (1 << 15), +}; + +/* + * MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions. + */ +enum { + MAC_ADDR_IDX_SHIFT = 4, + MAC_ADDR_TYPE_SHIFT = 16, + MAC_ADDR_TYPE_COUNT = 10, + MAC_ADDR_TYPE_MASK = 0x000f0000, + MAC_ADDR_TYPE_CAM_MAC = 0x00000000, + MAC_ADDR_TYPE_MULTI_MAC = 0x00010000, + MAC_ADDR_TYPE_VLAN = 0x00020000, + MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000, + MAC_ADDR_TYPE_FC_MAC = 0x00040000, + MAC_ADDR_TYPE_MGMT_MAC = 0x00050000, + MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000, + MAC_ADDR_TYPE_MGMT_V4 = 0x00070000, + MAC_ADDR_TYPE_MGMT_V6 = 0x00080000, + MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000, + MAC_ADDR_ADR = (1 << 25), + MAC_ADDR_RS = (1 << 26), + MAC_ADDR_E = (1 << 27), + MAC_ADDR_MR = (1 << 30), + MAC_ADDR_MW = (1 << 31), + MAX_MULTICAST_ENTRIES = 32, + + /* Entry count and words per entry + * for each address type in the filter. + */ + MAC_ADDR_MAX_CAM_ENTRIES = 512, + MAC_ADDR_MAX_CAM_WCOUNT = 3, + MAC_ADDR_MAX_MULTICAST_ENTRIES = 32, + MAC_ADDR_MAX_MULTICAST_WCOUNT = 2, + MAC_ADDR_MAX_VLAN_ENTRIES = 4096, + MAC_ADDR_MAX_VLAN_WCOUNT = 1, + MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096, + MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1, + MAC_ADDR_MAX_FC_MAC_ENTRIES = 4, + MAC_ADDR_MAX_FC_MAC_WCOUNT = 2, + MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8, + MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2, + MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16, + MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1, + MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4, + MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1, + MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4, + MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4, + MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4, + MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1, +}; + +/* + * MAC Protocol Address Index Register (SPLT_HDR) bit definitions. + */ +enum { + SPLT_HDR_EP = (1 << 31), +}; + +/* + * FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions. + */ +enum { + FC_RCV_CFG_ECT = (1 << 15), + FC_RCV_CFG_DFH = (1 << 20), + FC_RCV_CFG_DVF = (1 << 21), + FC_RCV_CFG_RCE = (1 << 27), + FC_RCV_CFG_RFE = (1 << 28), + FC_RCV_CFG_TEE = (1 << 29), + FC_RCV_CFG_TCE = (1 << 30), + FC_RCV_CFG_TFE = (1 << 31), +}; + +/* + * NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions. + */ +enum { + NIC_RCV_CFG_PPE = (1 << 0), + NIC_RCV_CFG_VLAN_MASK = 0x00060000, + NIC_RCV_CFG_VLAN_ALL = 0x00000000, + NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002, + NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004, + NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006, + NIC_RCV_CFG_RV = (1 << 3), + NIC_RCV_CFG_DFQ_MASK = (0x7f000000), + NIC_RCV_CFG_DFQ_SHIFT = 8, + NIC_RCV_CFG_DFQ = 0, /* HARDCODE default queue to 0. */ +}; + +/* + * Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions. + */ +enum { + MGMT_RCV_CFG_ARP = (1 << 0), + MGMT_RCV_CFG_DHC = (1 << 1), + MGMT_RCV_CFG_DHS = (1 << 2), + MGMT_RCV_CFG_NP = (1 << 3), + MGMT_RCV_CFG_I6N = (1 << 4), + MGMT_RCV_CFG_I6R = (1 << 5), + MGMT_RCV_CFG_DH6 = (1 << 6), + MGMT_RCV_CFG_UD1 = (1 << 7), + MGMT_RCV_CFG_UD0 = (1 << 8), + MGMT_RCV_CFG_BCT = (1 << 9), + MGMT_RCV_CFG_MCT = (1 << 10), + MGMT_RCV_CFG_DM = (1 << 11), + MGMT_RCV_CFG_RM = (1 << 12), + MGMT_RCV_CFG_STL = (1 << 13), + MGMT_RCV_CFG_VLAN_MASK = 0xc0000000, + MGMT_RCV_CFG_VLAN_ALL = 0x00000000, + MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000, + MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000, + MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000, +}; + +/* + * Routing Index Register (RT_IDX) bit definitions. + */ +enum { + RT_IDX_IDX_SHIFT = 8, + RT_IDX_TYPE_MASK = 0x000f0000, + RT_IDX_TYPE_SHIFT = 16, + RT_IDX_TYPE_RT = 0x00000000, + RT_IDX_TYPE_RT_INV = 0x00010000, + RT_IDX_TYPE_NICQ = 0x00020000, + RT_IDX_TYPE_NICQ_INV = 0x00030000, + RT_IDX_DST_MASK = 0x00700000, + RT_IDX_DST_RSS = 0x00000000, + RT_IDX_DST_CAM_Q = 0x00100000, + RT_IDX_DST_COS_Q = 0x00200000, + RT_IDX_DST_DFLT_Q = 0x00300000, + RT_IDX_DST_DEST_Q = 0x00400000, + RT_IDX_RS = (1 << 26), + RT_IDX_E = (1 << 27), + RT_IDX_MR = (1 << 30), + RT_IDX_MW = (1 << 31), + + /* Nic Queue format - type 2 bits */ + RT_IDX_BCAST = (1 << 0), + RT_IDX_MCAST = (1 << 1), + RT_IDX_MCAST_MATCH = (1 << 2), + RT_IDX_MCAST_REG_MATCH = (1 << 3), + RT_IDX_MCAST_HASH_MATCH = (1 << 4), + RT_IDX_FC_MACH = (1 << 5), + RT_IDX_ETH_FCOE = (1 << 6), + RT_IDX_CAM_HIT = (1 << 7), + RT_IDX_CAM_BIT0 = (1 << 8), + RT_IDX_CAM_BIT1 = (1 << 9), + RT_IDX_VLAN_TAG = (1 << 10), + RT_IDX_VLAN_MATCH = (1 << 11), + RT_IDX_VLAN_FILTER = (1 << 12), + RT_IDX_ETH_SKIP1 = (1 << 13), + RT_IDX_ETH_SKIP2 = (1 << 14), + RT_IDX_BCAST_MCAST_MATCH = (1 << 15), + RT_IDX_802_3 = (1 << 16), + RT_IDX_LLDP = (1 << 17), + RT_IDX_UNUSED018 = (1 << 18), + RT_IDX_UNUSED019 = (1 << 19), + RT_IDX_UNUSED20 = (1 << 20), + RT_IDX_UNUSED21 = (1 << 21), + RT_IDX_ERR = (1 << 22), + RT_IDX_VALID = (1 << 23), + RT_IDX_TU_CSUM_ERR = (1 << 24), + RT_IDX_IP_CSUM_ERR = (1 << 25), + RT_IDX_MAC_ERR = (1 << 26), + RT_IDX_RSS_TCP6 = (1 << 27), + RT_IDX_RSS_TCP4 = (1 << 28), + RT_IDX_RSS_IPV6 = (1 << 29), + RT_IDX_RSS_IPV4 = (1 << 30), + RT_IDX_RSS_MATCH = (1 << 31), + + /* Hierarchy for the NIC Queue Mask */ + RT_IDX_ALL_ERR_SLOT = 0, + RT_IDX_MAC_ERR_SLOT = 0, + RT_IDX_IP_CSUM_ERR_SLOT = 1, + RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2, + RT_IDX_BCAST_SLOT = 3, + RT_IDX_MCAST_MATCH_SLOT = 4, + RT_IDX_ALLMULTI_SLOT = 5, + RT_IDX_UNUSED6_SLOT = 6, + RT_IDX_UNUSED7_SLOT = 7, + RT_IDX_RSS_MATCH_SLOT = 8, + RT_IDX_RSS_IPV4_SLOT = 8, + RT_IDX_RSS_IPV6_SLOT = 9, + RT_IDX_RSS_TCP4_SLOT = 10, + RT_IDX_RSS_TCP6_SLOT = 11, + RT_IDX_CAM_HIT_SLOT = 12, + RT_IDX_UNUSED013 = 13, + RT_IDX_UNUSED014 = 14, + RT_IDX_PROMISCUOUS_SLOT = 15, + RT_IDX_MAX_RT_SLOTS = 8, + RT_IDX_MAX_NIC_SLOTS = 16, +}; + +/* + * Serdes Address Register (XG_SERDES_ADDR) bit definitions. + */ +enum { + XG_SERDES_ADDR_RDY = (1 << 31), + XG_SERDES_ADDR_R = (1 << 30), + + XG_SERDES_ADDR_STS = 0x00001E06, + XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005, + XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a, + XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001, + + /* Serdes coredump definitions. */ + XG_SERDES_XAUI_AN_START = 0x00000000, + XG_SERDES_XAUI_AN_END = 0x00000034, + XG_SERDES_XAUI_HSS_PCS_START = 0x00000800, + XG_SERDES_XAUI_HSS_PCS_END = 0x0000880, + XG_SERDES_XFI_AN_START = 0x00001000, + XG_SERDES_XFI_AN_END = 0x00001034, + XG_SERDES_XFI_TRAIN_START = 0x10001050, + XG_SERDES_XFI_TRAIN_END = 0x1000107C, + XG_SERDES_XFI_HSS_PCS_START = 0x00001800, + XG_SERDES_XFI_HSS_PCS_END = 0x00001838, + XG_SERDES_XFI_HSS_TX_START = 0x00001c00, + XG_SERDES_XFI_HSS_TX_END = 0x00001c1f, + XG_SERDES_XFI_HSS_RX_START = 0x00001c40, + XG_SERDES_XFI_HSS_RX_END = 0x00001c5f, + XG_SERDES_XFI_HSS_PLL_START = 0x00001e00, + XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f, +}; + +/* + * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions. + */ +enum { + PRB_MX_ADDR_ARE = (1 << 16), + PRB_MX_ADDR_UP = (1 << 15), + PRB_MX_ADDR_SWP = (1 << 14), + + /* Module select values. */ + PRB_MX_ADDR_MAX_MODS = 21, + PRB_MX_ADDR_MOD_SEL_SHIFT = 9, + PRB_MX_ADDR_MOD_SEL_TBD = 0, + PRB_MX_ADDR_MOD_SEL_IDE1 = 1, + PRB_MX_ADDR_MOD_SEL_IDE2 = 2, + PRB_MX_ADDR_MOD_SEL_FRB = 3, + PRB_MX_ADDR_MOD_SEL_ODE1 = 4, + PRB_MX_ADDR_MOD_SEL_ODE2 = 5, + PRB_MX_ADDR_MOD_SEL_DA1 = 6, + PRB_MX_ADDR_MOD_SEL_DA2 = 7, + PRB_MX_ADDR_MOD_SEL_IMP1 = 8, + PRB_MX_ADDR_MOD_SEL_IMP2 = 9, + PRB_MX_ADDR_MOD_SEL_OMP1 = 10, + PRB_MX_ADDR_MOD_SEL_OMP2 = 11, + PRB_MX_ADDR_MOD_SEL_ORS1 = 12, + PRB_MX_ADDR_MOD_SEL_ORS2 = 13, + PRB_MX_ADDR_MOD_SEL_REG = 14, + PRB_MX_ADDR_MOD_SEL_MAC1 = 16, + PRB_MX_ADDR_MOD_SEL_MAC2 = 17, + PRB_MX_ADDR_MOD_SEL_VQM1 = 18, + PRB_MX_ADDR_MOD_SEL_VQM2 = 19, + PRB_MX_ADDR_MOD_SEL_MOP = 20, + /* Bit fields indicating which modules + * are valid for each clock domain. + */ + PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7, + PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1, + PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309, + PRB_MX_ADDR_VALID_FC_MOD = 0x00003001, + PRB_MX_ADDR_VALID_TOTAL = 34, + + /* Clock domain values. */ + PRB_MX_ADDR_CLOCK_SHIFT = 6, + PRB_MX_ADDR_SYS_CLOCK = 0, + PRB_MX_ADDR_PCI_CLOCK = 2, + PRB_MX_ADDR_FC_CLOCK = 5, + PRB_MX_ADDR_XGM_CLOCK = 6, + + PRB_MX_ADDR_MAX_MUX = 64, +}; + +/* + * Control Register Set Map + */ +enum { + PROC_ADDR = 0, /* Use semaphore */ + PROC_DATA = 0x04, /* Use semaphore */ + SYS = 0x08, + RST_FO = 0x0c, + FSC = 0x10, + CSR = 0x14, + LED = 0x18, + ICB_RID = 0x1c, /* Use semaphore */ + ICB_L = 0x20, /* Use semaphore */ + ICB_H = 0x24, /* Use semaphore */ + CFG = 0x28, + BIOS_ADDR = 0x2c, + STS = 0x30, + INTR_EN = 0x34, + INTR_MASK = 0x38, + ISR1 = 0x3c, + ISR2 = 0x40, + ISR3 = 0x44, + ISR4 = 0x48, + REV_ID = 0x4c, + FRC_ECC_ERR = 0x50, + ERR_STS = 0x54, + RAM_DBG_ADDR = 0x58, + RAM_DBG_DATA = 0x5c, + ECC_ERR_CNT = 0x60, + SEM = 0x64, + GPIO_1 = 0x68, /* Use semaphore */ + GPIO_2 = 0x6c, /* Use semaphore */ + GPIO_3 = 0x70, /* Use semaphore */ + RSVD2 = 0x74, + XGMAC_ADDR = 0x78, /* Use semaphore */ + XGMAC_DATA = 0x7c, /* Use semaphore */ + NIC_ETS = 0x80, + CNA_ETS = 0x84, + FLASH_ADDR = 0x88, /* Use semaphore */ + FLASH_DATA = 0x8c, /* Use semaphore */ + CQ_STOP = 0x90, + PAGE_TBL_RID = 0x94, + WQ_PAGE_TBL_LO = 0x98, + WQ_PAGE_TBL_HI = 0x9c, + CQ_PAGE_TBL_LO = 0xa0, + CQ_PAGE_TBL_HI = 0xa4, + MAC_ADDR_IDX = 0xa8, /* Use semaphore */ + MAC_ADDR_DATA = 0xac, /* Use semaphore */ + COS_DFLT_CQ1 = 0xb0, + COS_DFLT_CQ2 = 0xb4, + ETYPE_SKIP1 = 0xb8, + ETYPE_SKIP2 = 0xbc, + SPLT_HDR = 0xc0, + FC_PAUSE_THRES = 0xc4, + NIC_PAUSE_THRES = 0xc8, + FC_ETHERTYPE = 0xcc, + FC_RCV_CFG = 0xd0, + NIC_RCV_CFG = 0xd4, + FC_COS_TAGS = 0xd8, + NIC_COS_TAGS = 0xdc, + MGMT_RCV_CFG = 0xe0, + RT_IDX = 0xe4, + RT_DATA = 0xe8, + RSVD7 = 0xec, + XG_SERDES_ADDR = 0xf0, + XG_SERDES_DATA = 0xf4, + PRB_MX_ADDR = 0xf8, /* Use semaphore */ + PRB_MX_DATA = 0xfc, /* Use semaphore */ +}; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#define SMALL_BUFFER_SIZE 256 +#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE +#define SPLT_SETTING FSC_DBRST_1024 +#define SPLT_LEN 0 +#define QLGE_SB_PAD 0 +#else +#define SMALL_BUFFER_SIZE 512 +#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2) +#define SPLT_SETTING FSC_SH +#define SPLT_LEN (SPLT_HDR_EP | \ + min(SMALL_BUF_MAP_SIZE, 1023)) +#define QLGE_SB_PAD 32 +#endif + +/* + * CAM output format. + */ +enum { + CAM_OUT_ROUTE_FC = 0, + CAM_OUT_ROUTE_NIC = 1, + CAM_OUT_FUNC_SHIFT = 2, + CAM_OUT_RV = (1 << 4), + CAM_OUT_SH = (1 << 15), + CAM_OUT_CQ_ID_SHIFT = 5, +}; + +/* + * Mailbox definitions + */ +enum { + /* Asynchronous Event Notifications */ + AEN_SYS_ERR = 0x00008002, + AEN_LINK_UP = 0x00008011, + AEN_LINK_DOWN = 0x00008012, + AEN_IDC_CMPLT = 0x00008100, + AEN_IDC_REQ = 0x00008101, + AEN_IDC_EXT = 0x00008102, + AEN_DCBX_CHG = 0x00008110, + AEN_AEN_LOST = 0x00008120, + AEN_AEN_SFP_IN = 0x00008130, + AEN_AEN_SFP_OUT = 0x00008131, + AEN_FW_INIT_DONE = 0x00008400, + AEN_FW_INIT_FAIL = 0x00008401, + + /* Mailbox Command Opcodes. */ + MB_CMD_NOP = 0x00000000, + MB_CMD_EX_FW = 0x00000002, + MB_CMD_MB_TEST = 0x00000006, + MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */ + MB_CMD_ABOUT_FW = 0x00000008, + MB_CMD_COPY_RISC_RAM = 0x0000000a, + MB_CMD_LOAD_RISC_RAM = 0x0000000b, + MB_CMD_DUMP_RISC_RAM = 0x0000000c, + MB_CMD_WRITE_RAM = 0x0000000d, + MB_CMD_INIT_RISC_RAM = 0x0000000e, + MB_CMD_READ_RAM = 0x0000000f, + MB_CMD_STOP_FW = 0x00000014, + MB_CMD_MAKE_SYS_ERR = 0x0000002a, + MB_CMD_WRITE_SFP = 0x00000030, + MB_CMD_READ_SFP = 0x00000031, + MB_CMD_INIT_FW = 0x00000060, + MB_CMD_GET_IFCB = 0x00000061, + MB_CMD_GET_FW_STATE = 0x00000069, + MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */ + MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */ + MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */ + MB_WOL_DISABLE = 0, + MB_WOL_MAGIC_PKT = (1 << 1), + MB_WOL_FLTR = (1 << 2), + MB_WOL_UCAST = (1 << 3), + MB_WOL_MCAST = (1 << 4), + MB_WOL_BCAST = (1 << 5), + MB_WOL_LINK_UP = (1 << 6), + MB_WOL_LINK_DOWN = (1 << 7), + MB_WOL_MODE_ON = (1 << 16), /* Wake on Lan Mode on */ + MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */ + MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */ + MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */ + MB_CMD_CLEAR_WOL_MAGIC = 0x00000114,/* Wake On Lan Magic Packet */ + MB_CMD_SET_WOL_IMMED = 0x00000115, + MB_CMD_PORT_RESET = 0x00000120, + MB_CMD_SET_PORT_CFG = 0x00000122, + MB_CMD_GET_PORT_CFG = 0x00000123, + MB_CMD_GET_LINK_STS = 0x00000124, + MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */ + QL_LED_BLINK = 0x03e803e8, + MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */ + MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */ + MB_SET_MPI_TFK_STOP = (1 << 0), + MB_SET_MPI_TFK_RESUME = (1 << 1), + MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */ + MB_GET_MPI_TFK_STOPPED = (1 << 0), + MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1), + /* Sub-commands for IDC request. + * This describes the reason for the + * IDC request. + */ + MB_CMD_IOP_NONE = 0x0000, + MB_CMD_IOP_PREP_UPDATE_MPI = 0x0001, + MB_CMD_IOP_COMP_UPDATE_MPI = 0x0002, + MB_CMD_IOP_PREP_LINK_DOWN = 0x0010, + MB_CMD_IOP_DVR_START = 0x0100, + MB_CMD_IOP_FLASH_ACC = 0x0101, + MB_CMD_IOP_RESTART_MPI = 0x0102, + MB_CMD_IOP_CORE_DUMP_MPI = 0x0103, + + /* Mailbox Command Status. */ + MB_CMD_STS_GOOD = 0x00004000, /* Success. */ + MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */ + MB_CMD_STS_INVLD_CMD = 0x00004001, /* Invalid. */ + MB_CMD_STS_XFC_ERR = 0x00004002, /* Interface Error. */ + MB_CMD_STS_CSUM_ERR = 0x00004003, /* Csum Error. */ + MB_CMD_STS_ERR = 0x00004005, /* System Error. */ + MB_CMD_STS_PARAM_ERR = 0x00004006, /* Parameter Error. */ +}; + +struct mbox_params { + u32 mbox_in[MAILBOX_COUNT]; + u32 mbox_out[MAILBOX_COUNT]; + int in_count; + int out_count; +}; + +struct flash_params_8012 { + u8 dev_id_str[4]; + __le16 size; + __le16 csum; + __le16 ver; + __le16 sub_dev_id; + u8 mac_addr[6]; + __le16 res; +}; + +/* 8000 device's flash is a different structure + * at a different offset in flash. + */ +#define FUNC0_FLASH_OFFSET 0x140200 +#define FUNC1_FLASH_OFFSET 0x140600 + +/* Flash related data structures. */ +struct flash_params_8000 { + u8 dev_id_str[4]; /* "8000" */ + __le16 ver; + __le16 size; + __le16 csum; + __le16 reserved0; + __le16 total_size; + __le16 entry_count; + u8 data_type0; + u8 data_size0; + u8 mac_addr[6]; + u8 data_type1; + u8 data_size1; + u8 mac_addr1[6]; + u8 data_type2; + u8 data_size2; + __le16 vlan_id; + u8 data_type3; + u8 data_size3; + __le16 last; + u8 reserved1[464]; + __le16 subsys_ven_id; + __le16 subsys_dev_id; + u8 reserved2[4]; +}; + +union flash_params { + struct flash_params_8012 flash_params_8012; + struct flash_params_8000 flash_params_8000; +}; + +/* + * doorbell space for the rx ring context + */ +struct rx_doorbell_context { + u32 cnsmr_idx; /* 0x00 */ + u32 valid; /* 0x04 */ + u32 reserved[4]; /* 0x08-0x14 */ + u32 lbq_prod_idx; /* 0x18 */ + u32 sbq_prod_idx; /* 0x1c */ +}; + +/* + * doorbell space for the tx ring context + */ +struct tx_doorbell_context { + u32 prod_idx; /* 0x00 */ + u32 valid; /* 0x04 */ + u32 reserved[4]; /* 0x08-0x14 */ + u32 lbq_prod_idx; /* 0x18 */ + u32 sbq_prod_idx; /* 0x1c */ +}; + +/* DATA STRUCTURES SHARED WITH HARDWARE. */ +struct tx_buf_desc { + __le64 addr; + __le32 len; +#define TX_DESC_LEN_MASK 0x000fffff +#define TX_DESC_C 0x40000000 +#define TX_DESC_E 0x80000000 +} __packed; + +/* + * IOCB Definitions... + */ + +#define OPCODE_OB_MAC_IOCB 0x01 +#define OPCODE_OB_MAC_TSO_IOCB 0x02 +#define OPCODE_IB_MAC_IOCB 0x20 +#define OPCODE_IB_MPI_IOCB 0x21 +#define OPCODE_IB_AE_IOCB 0x3f + +struct ob_mac_iocb_req { + u8 opcode; + u8 flags1; +#define OB_MAC_IOCB_REQ_OI 0x01 +#define OB_MAC_IOCB_REQ_I 0x02 +#define OB_MAC_IOCB_REQ_D 0x08 +#define OB_MAC_IOCB_REQ_F 0x10 + u8 flags2; + u8 flags3; +#define OB_MAC_IOCB_DFP 0x02 +#define OB_MAC_IOCB_V 0x04 + __le32 reserved1[2]; + __le16 frame_len; +#define OB_MAC_IOCB_LEN_MASK 0x3ffff + __le16 reserved2; + u32 tid; + u32 txq_idx; + __le32 reserved3; + __le16 vlan_tci; + __le16 reserved4; + struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; +} __packed; + +struct ob_mac_iocb_rsp { + u8 opcode; /* */ + u8 flags1; /* */ +#define OB_MAC_IOCB_RSP_OI 0x01 /* */ +#define OB_MAC_IOCB_RSP_I 0x02 /* */ +#define OB_MAC_IOCB_RSP_E 0x08 /* */ +#define OB_MAC_IOCB_RSP_S 0x10 /* too Short */ +#define OB_MAC_IOCB_RSP_L 0x20 /* too Large */ +#define OB_MAC_IOCB_RSP_P 0x40 /* Padded */ + u8 flags2; /* */ + u8 flags3; /* */ +#define OB_MAC_IOCB_RSP_B 0x80 /* */ + u32 tid; + u32 txq_idx; + __le32 reserved[13]; +} __packed; + +struct ob_mac_tso_iocb_req { + u8 opcode; + u8 flags1; +#define OB_MAC_TSO_IOCB_OI 0x01 +#define OB_MAC_TSO_IOCB_I 0x02 +#define OB_MAC_TSO_IOCB_D 0x08 +#define OB_MAC_TSO_IOCB_IP4 0x40 +#define OB_MAC_TSO_IOCB_IP6 0x80 + u8 flags2; +#define OB_MAC_TSO_IOCB_LSO 0x20 +#define OB_MAC_TSO_IOCB_UC 0x40 +#define OB_MAC_TSO_IOCB_TC 0x80 + u8 flags3; +#define OB_MAC_TSO_IOCB_IC 0x01 +#define OB_MAC_TSO_IOCB_DFP 0x02 +#define OB_MAC_TSO_IOCB_V 0x04 + __le32 reserved1[2]; + __le32 frame_len; + u32 tid; + u32 txq_idx; + __le16 total_hdrs_len; + __le16 net_trans_offset; +#define OB_MAC_TRANSPORT_HDR_SHIFT 6 + __le16 vlan_tci; + __le16 mss; + struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; +} __packed; + +struct ob_mac_tso_iocb_rsp { + u8 opcode; + u8 flags1; +#define OB_MAC_TSO_IOCB_RSP_OI 0x01 +#define OB_MAC_TSO_IOCB_RSP_I 0x02 +#define OB_MAC_TSO_IOCB_RSP_E 0x08 +#define OB_MAC_TSO_IOCB_RSP_S 0x10 +#define OB_MAC_TSO_IOCB_RSP_L 0x20 +#define OB_MAC_TSO_IOCB_RSP_P 0x40 + u8 flags2; /* */ + u8 flags3; /* */ +#define OB_MAC_TSO_IOCB_RSP_B 0x8000 + u32 tid; + u32 txq_idx; + __le32 reserved2[13]; +} __packed; + +struct ib_mac_iocb_rsp { + u8 opcode; /* 0x20 */ + u8 flags1; +#define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */ +#define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */ +#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */ +#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */ +#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */ +#define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */ +#define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */ +#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */ +#define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */ +#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */ +#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */ +#define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */ + u8 flags2; +#define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */ +#define IB_MAC_IOCB_RSP_V 0x02 /* Vlan tag present */ +#define IB_MAC_IOCB_RSP_ERR_MASK 0x1c /* */ +#define IB_MAC_IOCB_RSP_ERR_CODE_ERR 0x04 +#define IB_MAC_IOCB_RSP_ERR_OVERSIZE 0x08 +#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE 0x10 +#define IB_MAC_IOCB_RSP_ERR_PREAMBLE 0x14 +#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN 0x18 +#define IB_MAC_IOCB_RSP_ERR_CRC 0x1c +#define IB_MAC_IOCB_RSP_U 0x20 /* UDP packet */ +#define IB_MAC_IOCB_RSP_T 0x40 /* TCP packet */ +#define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */ + u8 flags3; +#define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */ +#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */ +#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */ +#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */ +#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */ +#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */ +#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */ +#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */ +#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */ +#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */ +#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */ + __le32 data_len; /* */ + __le64 data_addr; /* */ + __le32 rss; /* */ + __le16 vlan_id; /* 12 bits */ +#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */ +#define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */ +#define IB_MAC_IOCB_RSP_VLAN_MASK 0x0ffff + + __le16 reserved1; + __le32 reserved2[6]; + u8 reserved3[3]; + u8 flags4; +#define IB_MAC_IOCB_RSP_HV 0x20 +#define IB_MAC_IOCB_RSP_HS 0x40 +#define IB_MAC_IOCB_RSP_HL 0x80 + __le32 hdr_len; /* */ + __le64 hdr_addr; /* */ +} __packed; + +struct ib_ae_iocb_rsp { + u8 opcode; + u8 flags1; +#define IB_AE_IOCB_RSP_OI 0x01 +#define IB_AE_IOCB_RSP_I 0x02 + u8 event; +#define LINK_UP_EVENT 0x00 +#define LINK_DOWN_EVENT 0x01 +#define CAM_LOOKUP_ERR_EVENT 0x06 +#define SOFT_ECC_ERROR_EVENT 0x07 +#define MGMT_ERR_EVENT 0x08 +#define TEN_GIG_MAC_EVENT 0x09 +#define GPI0_H2L_EVENT 0x10 +#define GPI0_L2H_EVENT 0x20 +#define GPI1_H2L_EVENT 0x11 +#define GPI1_L2H_EVENT 0x21 +#define PCI_ERR_ANON_BUF_RD 0x40 + u8 q_id; + __le32 reserved[15]; +} __packed; + +/* + * These three structures are for generic + * handling of ib and ob iocbs. + */ +struct ql_net_rsp_iocb { + u8 opcode; + u8 flags0; + __le16 length; + __le32 tid; + __le32 reserved[14]; +} __packed; + +struct net_req_iocb { + u8 opcode; + u8 flags0; + __le16 flags1; + __le32 tid; + __le32 reserved1[30]; +} __packed; + +/* + * tx ring initialization control block for chip. + * It is defined as: + * "Work Queue Initialization Control Block" + */ +struct wqicb { + __le16 len; +#define Q_LEN_V (1 << 4) +#define Q_LEN_CPP_CONT 0x0000 +#define Q_LEN_CPP_16 0x0001 +#define Q_LEN_CPP_32 0x0002 +#define Q_LEN_CPP_64 0x0003 +#define Q_LEN_CPP_512 0x0006 + __le16 flags; +#define Q_PRI_SHIFT 1 +#define Q_FLAGS_LC 0x1000 +#define Q_FLAGS_LB 0x2000 +#define Q_FLAGS_LI 0x4000 +#define Q_FLAGS_LO 0x8000 + __le16 cq_id_rss; +#define Q_CQ_ID_RSS_RV 0x8000 + __le16 rid; + __le64 addr; + __le64 cnsmr_idx_addr; +} __packed; + +/* + * rx ring initialization control block for chip. + * It is defined as: + * "Completion Queue Initialization Control Block" + */ +struct cqicb { + u8 msix_vect; + u8 reserved1; + u8 reserved2; + u8 flags; +#define FLAGS_LV 0x08 +#define FLAGS_LS 0x10 +#define FLAGS_LL 0x20 +#define FLAGS_LI 0x40 +#define FLAGS_LC 0x80 + __le16 len; +#define LEN_V (1 << 4) +#define LEN_CPP_CONT 0x0000 +#define LEN_CPP_32 0x0001 +#define LEN_CPP_64 0x0002 +#define LEN_CPP_128 0x0003 + __le16 rid; + __le64 addr; + __le64 prod_idx_addr; + __le16 pkt_delay; + __le16 irq_delay; + __le64 lbq_addr; + __le16 lbq_buf_size; + __le16 lbq_len; /* entry count */ + __le64 sbq_addr; + __le16 sbq_buf_size; + __le16 sbq_len; /* entry count */ +} __packed; + +struct ricb { + u8 base_cq; +#define RSS_L4K 0x80 + u8 flags; +#define RSS_L6K 0x01 +#define RSS_LI 0x02 +#define RSS_LB 0x04 +#define RSS_LM 0x08 +#define RSS_RI4 0x10 +#define RSS_RT4 0x20 +#define RSS_RI6 0x40 +#define RSS_RT6 0x80 + __le16 mask; + u8 hash_cq_id[1024]; + __le32 ipv6_hash_key[10]; + __le32 ipv4_hash_key[4]; +} __packed; + +/* SOFTWARE/DRIVER DATA STRUCTURES. */ + +struct oal { + struct tx_buf_desc oal[TX_DESC_PER_OAL]; +}; + +struct map_list { + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); +}; + +struct tx_ring_desc { + struct sk_buff *skb; + struct ob_mac_iocb_req *queue_entry; + u32 index; + struct oal oal; + struct map_list map[MAX_SKB_FRAGS + 2]; + int map_cnt; + struct tx_ring_desc *next; +}; + +struct page_chunk { + struct page *page; /* master page */ + char *va; /* virt addr for this chunk */ + u64 map; /* mapping for master */ + unsigned int offset; /* offset for this chunk */ + unsigned int last_flag; /* flag set for last chunk in page */ +}; + +struct bq_desc { + union { + struct page_chunk pg_chunk; + struct sk_buff *skb; + } p; + __le64 *addr; + u32 index; + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); +}; + +#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count)) + +struct tx_ring { + /* + * queue info. + */ + struct wqicb wqicb; /* structure used to inform chip of new queue */ + void *wq_base; /* pci_alloc:virtual addr for tx */ + dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */ + __le32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */ + dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */ + u32 wq_size; /* size in bytes of queue area */ + u32 wq_len; /* number of entries in queue */ + void __iomem *prod_idx_db_reg; /* doorbell area index reg at offset 0x00 */ + void __iomem *valid_db_reg; /* doorbell area valid reg at offset 0x04 */ + u16 prod_idx; /* current value for prod idx */ + u16 cq_id; /* completion (rx) queue for tx completions */ + u8 wq_id; /* queue id for this entry */ + u8 reserved1[3]; + struct tx_ring_desc *q; /* descriptor list for the queue */ + spinlock_t lock; + atomic_t tx_count; /* counts down for every outstanding IO */ + struct delayed_work tx_work; + struct ql_adapter *qdev; + u64 tx_packets; + u64 tx_bytes; + u64 tx_errors; +}; + +/* + * Type of inbound queue. + */ +enum { + DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */ + TX_Q = 3, /* Handles outbound completions. */ + RX_Q = 4, /* Handles inbound completions. */ +}; + +struct rx_ring { + struct cqicb cqicb; /* The chip's completion queue init control block. */ + + /* Completion queue elements. */ + void *cq_base; + dma_addr_t cq_base_dma; + u32 cq_size; + u32 cq_len; + u16 cq_id; + __le32 *prod_idx_sh_reg; /* Shadowed producer register. */ + dma_addr_t prod_idx_sh_reg_dma; + void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */ + u32 cnsmr_idx; /* current sw idx */ + struct ql_net_rsp_iocb *curr_entry; /* next entry on queue */ + void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */ + + /* Large buffer queue elements. */ + u32 lbq_len; /* entry count */ + u32 lbq_size; /* size in bytes of queue */ + u32 lbq_buf_size; + void *lbq_base; + dma_addr_t lbq_base_dma; + void *lbq_base_indirect; + dma_addr_t lbq_base_indirect_dma; + struct page_chunk pg_chunk; /* current page for chunks */ + struct bq_desc *lbq; /* array of control blocks */ + void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */ + u32 lbq_prod_idx; /* current sw prod idx */ + u32 lbq_curr_idx; /* next entry we expect */ + u32 lbq_clean_idx; /* beginning of new descs */ + u32 lbq_free_cnt; /* free buffer desc cnt */ + + /* Small buffer queue elements. */ + u32 sbq_len; /* entry count */ + u32 sbq_size; /* size in bytes of queue */ + u32 sbq_buf_size; + void *sbq_base; + dma_addr_t sbq_base_dma; + void *sbq_base_indirect; + dma_addr_t sbq_base_indirect_dma; + struct bq_desc *sbq; /* array of control blocks */ + void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */ + u32 sbq_prod_idx; /* current sw prod idx */ + u32 sbq_curr_idx; /* next entry we expect */ + u32 sbq_clean_idx; /* beginning of new descs */ + u32 sbq_free_cnt; /* free buffer desc cnt */ + + /* Misc. handler elements. */ + u32 type; /* Type of queue, tx, rx. */ + u32 irq; /* Which vector this ring is assigned. */ + u32 cpu; /* Which CPU this should run on. */ + char name[IFNAMSIZ + 5]; + struct napi_struct napi; + u8 reserved; + struct ql_adapter *qdev; + u64 rx_packets; + u64 rx_multicast; + u64 rx_bytes; + u64 rx_dropped; + u64 rx_errors; +}; + +/* + * RSS Initialization Control Block + */ +struct hash_id { + u8 value[4]; +}; + +struct nic_stats { + /* + * These stats come from offset 200h to 278h + * in the XGMAC register. + */ + u64 tx_pkts; + u64 tx_bytes; + u64 tx_mcast_pkts; + u64 tx_bcast_pkts; + u64 tx_ucast_pkts; + u64 tx_ctl_pkts; + u64 tx_pause_pkts; + u64 tx_64_pkt; + u64 tx_65_to_127_pkt; + u64 tx_128_to_255_pkt; + u64 tx_256_511_pkt; + u64 tx_512_to_1023_pkt; + u64 tx_1024_to_1518_pkt; + u64 tx_1519_to_max_pkt; + u64 tx_undersize_pkt; + u64 tx_oversize_pkt; + + /* + * These stats come from offset 300h to 3C8h + * in the XGMAC register. + */ + u64 rx_bytes; + u64 rx_bytes_ok; + u64 rx_pkts; + u64 rx_pkts_ok; + u64 rx_bcast_pkts; + u64 rx_mcast_pkts; + u64 rx_ucast_pkts; + u64 rx_undersize_pkts; + u64 rx_oversize_pkts; + u64 rx_jabber_pkts; + u64 rx_undersize_fcerr_pkts; + u64 rx_drop_events; + u64 rx_fcerr_pkts; + u64 rx_align_err; + u64 rx_symbol_err; + u64 rx_mac_err; + u64 rx_ctl_pkts; + u64 rx_pause_pkts; + u64 rx_64_pkts; + u64 rx_65_to_127_pkts; + u64 rx_128_255_pkts; + u64 rx_256_511_pkts; + u64 rx_512_to_1023_pkts; + u64 rx_1024_to_1518_pkts; + u64 rx_1519_to_max_pkts; + u64 rx_len_err_pkts; + /* Receive Mac Err stats */ + u64 rx_code_err; + u64 rx_oversize_err; + u64 rx_undersize_err; + u64 rx_preamble_err; + u64 rx_frame_len_err; + u64 rx_crc_err; + u64 rx_err_count; + /* + * These stats come from offset 500h to 5C8h + * in the XGMAC register. + */ + u64 tx_cbfc_pause_frames0; + u64 tx_cbfc_pause_frames1; + u64 tx_cbfc_pause_frames2; + u64 tx_cbfc_pause_frames3; + u64 tx_cbfc_pause_frames4; + u64 tx_cbfc_pause_frames5; + u64 tx_cbfc_pause_frames6; + u64 tx_cbfc_pause_frames7; + u64 rx_cbfc_pause_frames0; + u64 rx_cbfc_pause_frames1; + u64 rx_cbfc_pause_frames2; + u64 rx_cbfc_pause_frames3; + u64 rx_cbfc_pause_frames4; + u64 rx_cbfc_pause_frames5; + u64 rx_cbfc_pause_frames6; + u64 rx_cbfc_pause_frames7; + u64 rx_nic_fifo_drop; +}; + +/* Firmware coredump internal register address/length pairs. */ +enum { + MPI_CORE_REGS_ADDR = 0x00030000, + MPI_CORE_REGS_CNT = 127, + MPI_CORE_SH_REGS_CNT = 16, + TEST_REGS_ADDR = 0x00001000, + TEST_REGS_CNT = 23, + RMII_REGS_ADDR = 0x00001040, + RMII_REGS_CNT = 64, + FCMAC1_REGS_ADDR = 0x00001080, + FCMAC2_REGS_ADDR = 0x000010c0, + FCMAC_REGS_CNT = 64, + FC1_MBX_REGS_ADDR = 0x00001100, + FC2_MBX_REGS_ADDR = 0x00001240, + FC_MBX_REGS_CNT = 64, + IDE_REGS_ADDR = 0x00001140, + IDE_REGS_CNT = 64, + NIC1_MBX_REGS_ADDR = 0x00001180, + NIC2_MBX_REGS_ADDR = 0x00001280, + NIC_MBX_REGS_CNT = 64, + SMBUS_REGS_ADDR = 0x00001200, + SMBUS_REGS_CNT = 64, + I2C_REGS_ADDR = 0x00001fc0, + I2C_REGS_CNT = 64, + MEMC_REGS_ADDR = 0x00003000, + MEMC_REGS_CNT = 256, + PBUS_REGS_ADDR = 0x00007c00, + PBUS_REGS_CNT = 256, + MDE_REGS_ADDR = 0x00010000, + MDE_REGS_CNT = 6, + CODE_RAM_ADDR = 0x00020000, + CODE_RAM_CNT = 0x2000, + MEMC_RAM_ADDR = 0x00100000, + MEMC_RAM_CNT = 0x2000, +}; + +#define MPI_COREDUMP_COOKIE 0x5555aaaa +struct mpi_coredump_global_header { + u32 cookie; + u8 idString[16]; + u32 timeLo; + u32 timeHi; + u32 imageSize; + u32 headerSize; + u8 info[220]; +}; + +struct mpi_coredump_segment_header { + u32 cookie; + u32 segNum; + u32 segSize; + u32 extra; + u8 description[16]; +}; + +/* Firmware coredump header segment numbers. */ +enum { + CORE_SEG_NUM = 1, + TEST_LOGIC_SEG_NUM = 2, + RMII_SEG_NUM = 3, + FCMAC1_SEG_NUM = 4, + FCMAC2_SEG_NUM = 5, + FC1_MBOX_SEG_NUM = 6, + IDE_SEG_NUM = 7, + NIC1_MBOX_SEG_NUM = 8, + SMBUS_SEG_NUM = 9, + FC2_MBOX_SEG_NUM = 10, + NIC2_MBOX_SEG_NUM = 11, + I2C_SEG_NUM = 12, + MEMC_SEG_NUM = 13, + PBUS_SEG_NUM = 14, + MDE_SEG_NUM = 15, + NIC1_CONTROL_SEG_NUM = 16, + NIC2_CONTROL_SEG_NUM = 17, + NIC1_XGMAC_SEG_NUM = 18, + NIC2_XGMAC_SEG_NUM = 19, + WCS_RAM_SEG_NUM = 20, + MEMC_RAM_SEG_NUM = 21, + XAUI_AN_SEG_NUM = 22, + XAUI_HSS_PCS_SEG_NUM = 23, + XFI_AN_SEG_NUM = 24, + XFI_TRAIN_SEG_NUM = 25, + XFI_HSS_PCS_SEG_NUM = 26, + XFI_HSS_TX_SEG_NUM = 27, + XFI_HSS_RX_SEG_NUM = 28, + XFI_HSS_PLL_SEG_NUM = 29, + MISC_NIC_INFO_SEG_NUM = 30, + INTR_STATES_SEG_NUM = 31, + CAM_ENTRIES_SEG_NUM = 32, + ROUTING_WORDS_SEG_NUM = 33, + ETS_SEG_NUM = 34, + PROBE_DUMP_SEG_NUM = 35, + ROUTING_INDEX_SEG_NUM = 36, + MAC_PROTOCOL_SEG_NUM = 37, + XAUI2_AN_SEG_NUM = 38, + XAUI2_HSS_PCS_SEG_NUM = 39, + XFI2_AN_SEG_NUM = 40, + XFI2_TRAIN_SEG_NUM = 41, + XFI2_HSS_PCS_SEG_NUM = 42, + XFI2_HSS_TX_SEG_NUM = 43, + XFI2_HSS_RX_SEG_NUM = 44, + XFI2_HSS_PLL_SEG_NUM = 45, + SEM_REGS_SEG_NUM = 50 + +}; + +/* There are 64 generic NIC registers. */ +#define NIC_REGS_DUMP_WORD_COUNT 64 +/* XGMAC word count. */ +#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4) +/* Word counts for the SERDES blocks. */ +#define XG_SERDES_XAUI_AN_COUNT 14 +#define XG_SERDES_XAUI_HSS_PCS_COUNT 33 +#define XG_SERDES_XFI_AN_COUNT 14 +#define XG_SERDES_XFI_TRAIN_COUNT 12 +#define XG_SERDES_XFI_HSS_PCS_COUNT 15 +#define XG_SERDES_XFI_HSS_TX_COUNT 32 +#define XG_SERDES_XFI_HSS_RX_COUNT 32 +#define XG_SERDES_XFI_HSS_PLL_COUNT 32 + +/* There are 2 CNA ETS and 8 NIC ETS registers. */ +#define ETS_REGS_DUMP_WORD_COUNT 10 + +/* Each probe mux entry stores the probe type plus 64 entries + * that are each each 64-bits in length. There are a total of + * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes. + */ +#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2)) +#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \ + PRB_MX_ADDR_VALID_TOTAL) +/* Each routing entry consists of 4 32-bit words. + * They are route type, index, index word, and result. + * There are 2 route blocks with 8 entries each and + * 2 NIC blocks with 16 entries each. + * The totol entries is 48 with 4 words each. + */ +#define RT_IDX_DUMP_ENTRIES 48 +#define RT_IDX_DUMP_WORDS_PER_ENTRY 4 +#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \ + RT_IDX_DUMP_WORDS_PER_ENTRY) +/* There are 10 address blocks in filter, each with + * different entry counts and different word-count-per-entry. + */ +#define MAC_ADDR_DUMP_ENTRIES \ + ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \ + (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \ + (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \ + (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \ + (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \ + (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \ + (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \ + (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \ + (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \ + (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT)) +#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2 +#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \ + MAC_ADDR_DUMP_WORDS_PER_ENTRY) +/* Maximum of 4 functions whose semaphore registeres are + * in the coredump. + */ +#define MAX_SEMAPHORE_FUNCTIONS 4 +/* Defines for access the MPI shadow registers. */ +#define RISC_124 0x0003007c +#define RISC_127 0x0003007f +#define SHADOW_OFFSET 0xb0000000 +#define SHADOW_REG_SHIFT 20 + +struct ql_nic_misc { + u32 rx_ring_count; + u32 tx_ring_count; + u32 intr_count; + u32 function; +}; + +struct ql_reg_dump { + + /* segment 0 */ + struct mpi_coredump_global_header mpi_global_header; + + /* segment 16 */ + struct mpi_coredump_segment_header nic_regs_seg_hdr; + u32 nic_regs[64]; + + /* segment 30 */ + struct mpi_coredump_segment_header misc_nic_seg_hdr; + struct ql_nic_misc misc_nic_info; + + /* segment 31 */ + /* one interrupt state for each CQ */ + struct mpi_coredump_segment_header intr_states_seg_hdr; + u32 intr_states[MAX_CPUS]; + + /* segment 32 */ + /* 3 cam words each for 16 unicast, + * 2 cam words for each of 32 multicast. + */ + struct mpi_coredump_segment_header cam_entries_seg_hdr; + u32 cam_entries[(16 * 3) + (32 * 3)]; + + /* segment 33 */ + struct mpi_coredump_segment_header nic_routing_words_seg_hdr; + u32 nic_routing_words[16]; + + /* segment 34 */ + struct mpi_coredump_segment_header ets_seg_hdr; + u32 ets[8+2]; +}; + +struct ql_mpi_coredump { + /* segment 0 */ + struct mpi_coredump_global_header mpi_global_header; + + /* segment 1 */ + struct mpi_coredump_segment_header core_regs_seg_hdr; + u32 mpi_core_regs[MPI_CORE_REGS_CNT]; + u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT]; + + /* segment 2 */ + struct mpi_coredump_segment_header test_logic_regs_seg_hdr; + u32 test_logic_regs[TEST_REGS_CNT]; + + /* segment 3 */ + struct mpi_coredump_segment_header rmii_regs_seg_hdr; + u32 rmii_regs[RMII_REGS_CNT]; + + /* segment 4 */ + struct mpi_coredump_segment_header fcmac1_regs_seg_hdr; + u32 fcmac1_regs[FCMAC_REGS_CNT]; + + /* segment 5 */ + struct mpi_coredump_segment_header fcmac2_regs_seg_hdr; + u32 fcmac2_regs[FCMAC_REGS_CNT]; + + /* segment 6 */ + struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr; + u32 fc1_mbx_regs[FC_MBX_REGS_CNT]; + + /* segment 7 */ + struct mpi_coredump_segment_header ide_regs_seg_hdr; + u32 ide_regs[IDE_REGS_CNT]; + + /* segment 8 */ + struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr; + u32 nic1_mbx_regs[NIC_MBX_REGS_CNT]; + + /* segment 9 */ + struct mpi_coredump_segment_header smbus_regs_seg_hdr; + u32 smbus_regs[SMBUS_REGS_CNT]; + + /* segment 10 */ + struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr; + u32 fc2_mbx_regs[FC_MBX_REGS_CNT]; + + /* segment 11 */ + struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr; + u32 nic2_mbx_regs[NIC_MBX_REGS_CNT]; + + /* segment 12 */ + struct mpi_coredump_segment_header i2c_regs_seg_hdr; + u32 i2c_regs[I2C_REGS_CNT]; + /* segment 13 */ + struct mpi_coredump_segment_header memc_regs_seg_hdr; + u32 memc_regs[MEMC_REGS_CNT]; + + /* segment 14 */ + struct mpi_coredump_segment_header pbus_regs_seg_hdr; + u32 pbus_regs[PBUS_REGS_CNT]; + + /* segment 15 */ + struct mpi_coredump_segment_header mde_regs_seg_hdr; + u32 mde_regs[MDE_REGS_CNT]; + + /* segment 16 */ + struct mpi_coredump_segment_header nic_regs_seg_hdr; + u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT]; + + /* segment 17 */ + struct mpi_coredump_segment_header nic2_regs_seg_hdr; + u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT]; + + /* segment 18 */ + struct mpi_coredump_segment_header xgmac1_seg_hdr; + u32 xgmac1[XGMAC_DUMP_WORD_COUNT]; + + /* segment 19 */ + struct mpi_coredump_segment_header xgmac2_seg_hdr; + u32 xgmac2[XGMAC_DUMP_WORD_COUNT]; + + /* segment 20 */ + struct mpi_coredump_segment_header code_ram_seg_hdr; + u32 code_ram[CODE_RAM_CNT]; + + /* segment 21 */ + struct mpi_coredump_segment_header memc_ram_seg_hdr; + u32 memc_ram[MEMC_RAM_CNT]; + + /* segment 22 */ + struct mpi_coredump_segment_header xaui_an_hdr; + u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT]; + + /* segment 23 */ + struct mpi_coredump_segment_header xaui_hss_pcs_hdr; + u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT]; + + /* segment 24 */ + struct mpi_coredump_segment_header xfi_an_hdr; + u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT]; + + /* segment 25 */ + struct mpi_coredump_segment_header xfi_train_hdr; + u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT]; + + /* segment 26 */ + struct mpi_coredump_segment_header xfi_hss_pcs_hdr; + u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT]; + + /* segment 27 */ + struct mpi_coredump_segment_header xfi_hss_tx_hdr; + u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT]; + + /* segment 28 */ + struct mpi_coredump_segment_header xfi_hss_rx_hdr; + u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT]; + + /* segment 29 */ + struct mpi_coredump_segment_header xfi_hss_pll_hdr; + u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT]; + + /* segment 30 */ + struct mpi_coredump_segment_header misc_nic_seg_hdr; + struct ql_nic_misc misc_nic_info; + + /* segment 31 */ + /* one interrupt state for each CQ */ + struct mpi_coredump_segment_header intr_states_seg_hdr; + u32 intr_states[MAX_RX_RINGS]; + + /* segment 32 */ + /* 3 cam words each for 16 unicast, + * 2 cam words for each of 32 multicast. + */ + struct mpi_coredump_segment_header cam_entries_seg_hdr; + u32 cam_entries[(16 * 3) + (32 * 3)]; + + /* segment 33 */ + struct mpi_coredump_segment_header nic_routing_words_seg_hdr; + u32 nic_routing_words[16]; + /* segment 34 */ + struct mpi_coredump_segment_header ets_seg_hdr; + u32 ets[ETS_REGS_DUMP_WORD_COUNT]; + + /* segment 35 */ + struct mpi_coredump_segment_header probe_dump_seg_hdr; + u32 probe_dump[PRB_MX_DUMP_TOT_COUNT]; + + /* segment 36 */ + struct mpi_coredump_segment_header routing_reg_seg_hdr; + u32 routing_regs[RT_IDX_DUMP_TOT_WORDS]; + + /* segment 37 */ + struct mpi_coredump_segment_header mac_prot_reg_seg_hdr; + u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS]; + + /* segment 38 */ + struct mpi_coredump_segment_header xaui2_an_hdr; + u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT]; + + /* segment 39 */ + struct mpi_coredump_segment_header xaui2_hss_pcs_hdr; + u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT]; + + /* segment 40 */ + struct mpi_coredump_segment_header xfi2_an_hdr; + u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT]; + + /* segment 41 */ + struct mpi_coredump_segment_header xfi2_train_hdr; + u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT]; + + /* segment 42 */ + struct mpi_coredump_segment_header xfi2_hss_pcs_hdr; + u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT]; + + /* segment 43 */ + struct mpi_coredump_segment_header xfi2_hss_tx_hdr; + u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT]; + + /* segment 44 */ + struct mpi_coredump_segment_header xfi2_hss_rx_hdr; + u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT]; + + /* segment 45 */ + struct mpi_coredump_segment_header xfi2_hss_pll_hdr; + u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT]; + + /* segment 50 */ + /* semaphore register for all 5 functions */ + struct mpi_coredump_segment_header sem_regs_seg_hdr; + u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS]; +}; + +/* + * intr_context structure is used during initialization + * to hook the interrupts. It is also used in a single + * irq environment as a context to the ISR. + */ +struct intr_context { + struct ql_adapter *qdev; + u32 intr; + u32 irq_mask; /* Mask of which rings the vector services. */ + u32 hooked; + u32 intr_en_mask; /* value/mask used to enable this intr */ + u32 intr_dis_mask; /* value/mask used to disable this intr */ + u32 intr_read_mask; /* value/mask used to read this intr */ + char name[IFNAMSIZ * 2]; + atomic_t irq_cnt; /* irq_cnt is used in single vector + * environment. It's incremented for each + * irq handler that is scheduled. When each + * handler finishes it decrements irq_cnt and + * enables interrupts if it's zero. */ + irq_handler_t handler; +}; + +/* adapter flags definitions. */ +enum { + QL_ADAPTER_UP = 0, /* Adapter has been brought up. */ + QL_LEGACY_ENABLED = 1, + QL_MSI_ENABLED = 2, + QL_MSIX_ENABLED = 3, + QL_DMA64 = 4, + QL_PROMISCUOUS = 5, + QL_ALLMULTI = 6, + QL_PORT_CFG = 7, + QL_CAM_RT_SET = 8, + QL_SELFTEST = 9, + QL_LB_LINK_UP = 10, + QL_FRC_COREDUMP = 11, + QL_EEH_FATAL = 12, + QL_ASIC_RECOVERY = 14, /* We are in ascic recovery. */ +}; + +/* link_status bit definitions */ +enum { + STS_LOOPBACK_MASK = 0x00000700, + STS_LOOPBACK_PCS = 0x00000100, + STS_LOOPBACK_HSS = 0x00000200, + STS_LOOPBACK_EXT = 0x00000300, + STS_PAUSE_MASK = 0x000000c0, + STS_PAUSE_STD = 0x00000040, + STS_PAUSE_PRI = 0x00000080, + STS_SPEED_MASK = 0x00000038, + STS_SPEED_100Mb = 0x00000000, + STS_SPEED_1Gb = 0x00000008, + STS_SPEED_10Gb = 0x00000010, + STS_LINK_TYPE_MASK = 0x00000007, + STS_LINK_TYPE_XFI = 0x00000001, + STS_LINK_TYPE_XAUI = 0x00000002, + STS_LINK_TYPE_XFI_BP = 0x00000003, + STS_LINK_TYPE_XAUI_BP = 0x00000004, + STS_LINK_TYPE_10GBASET = 0x00000005, +}; + +/* link_config bit definitions */ +enum { + CFG_JUMBO_FRAME_SIZE = 0x00010000, + CFG_PAUSE_MASK = 0x00000060, + CFG_PAUSE_STD = 0x00000020, + CFG_PAUSE_PRI = 0x00000040, + CFG_DCBX = 0x00000010, + CFG_LOOPBACK_MASK = 0x00000007, + CFG_LOOPBACK_PCS = 0x00000002, + CFG_LOOPBACK_HSS = 0x00000004, + CFG_LOOPBACK_EXT = 0x00000006, + CFG_DEFAULT_MAX_FRAME_SIZE = 0x00002580, +}; + +struct nic_operations { + + int (*get_flash) (struct ql_adapter *); + int (*port_initialize) (struct ql_adapter *); +}; + +/* + * The main Adapter structure definition. + * This structure has all fields relevant to the hardware. + */ +struct ql_adapter { + struct ricb ricb; + unsigned long flags; + u32 wol; + + struct nic_stats nic_stats; + + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + /* PCI Configuration information for this device */ + struct pci_dev *pdev; + struct net_device *ndev; /* Parent NET device */ + + /* Hardware information */ + u32 chip_rev_id; + u32 fw_rev_id; + u32 func; /* PCI function for this adapter */ + u32 alt_func; /* PCI function for alternate adapter */ + u32 port; /* Port number this adapter */ + + spinlock_t adapter_lock; + spinlock_t hw_lock; + spinlock_t stats_lock; + + /* PCI Bus Relative Register Addresses */ + void __iomem *reg_base; + void __iomem *doorbell_area; + u32 doorbell_area_size; + + u32 msg_enable; + + /* Page for Shadow Registers */ + void *rx_ring_shadow_reg_area; + dma_addr_t rx_ring_shadow_reg_dma; + void *tx_ring_shadow_reg_area; + dma_addr_t tx_ring_shadow_reg_dma; + + u32 mailbox_in; + u32 mailbox_out; + struct mbox_params idc_mbc; + struct mutex mpi_mutex; + + int tx_ring_size; + int rx_ring_size; + u32 intr_count; + struct msix_entry *msi_x_entry; + struct intr_context intr_context[MAX_RX_RINGS]; + + int tx_ring_count; /* One per online CPU. */ + u32 rss_ring_count; /* One per irq vector. */ + /* + * rx_ring_count = + * (CPU count * outbound completion rx_ring) + + * (irq_vector_cnt * inbound (RSS) completion rx_ring) + */ + int rx_ring_count; + int ring_mem_size; + void *ring_mem; + + struct rx_ring rx_ring[MAX_RX_RINGS]; + struct tx_ring tx_ring[MAX_TX_RINGS]; + unsigned int lbq_buf_order; + + int rx_csum; + u32 default_rx_queue; + + u16 rx_coalesce_usecs; /* cqicb->int_delay */ + u16 rx_max_coalesced_frames; /* cqicb->pkt_int_delay */ + u16 tx_coalesce_usecs; /* cqicb->int_delay */ + u16 tx_max_coalesced_frames; /* cqicb->pkt_int_delay */ + + u32 xg_sem_mask; + u32 port_link_up; + u32 port_init; + u32 link_status; + struct ql_mpi_coredump *mpi_coredump; + u32 core_is_dumped; + u32 link_config; + u32 led_config; + u32 max_frame_size; + + union flash_params flash; + + struct workqueue_struct *workqueue; + struct delayed_work asic_reset_work; + struct delayed_work mpi_reset_work; + struct delayed_work mpi_work; + struct delayed_work mpi_port_cfg_work; + struct delayed_work mpi_idc_work; + struct delayed_work mpi_core_to_log; + struct completion ide_completion; + const struct nic_operations *nic_ops; + u16 device_id; + struct timer_list timer; + atomic_t lb_count; + /* Keep local copy of current mac address. */ + char current_mac_addr[ETH_ALEN]; +}; + +/* + * Typical Register accessor for memory mapped device. + */ +static inline u32 ql_read32(const struct ql_adapter *qdev, int reg) +{ + return readl(qdev->reg_base + reg); +} + +/* + * Typical Register accessor for memory mapped device. + */ +static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val) +{ + writel(val, qdev->reg_base + reg); +} + +/* + * Doorbell Registers: + * Doorbell registers are virtual registers in the PCI memory space. + * The space is allocated by the chip during PCI initialization. The + * device driver finds the doorbell address in BAR 3 in PCI config space. + * The registers are used to control outbound and inbound queues. For + * example, the producer index for an outbound queue. Each queue uses + * 1 4k chunk of memory. The lower half of the space is for outbound + * queues. The upper half is for inbound queues. + */ +static inline void ql_write_db_reg(u32 val, void __iomem *addr) +{ + writel(val, addr); + mmiowb(); +} + +/* + * Shadow Registers: + * Outbound queues have a consumer index that is maintained by the chip. + * Inbound queues have a producer index that is maintained by the chip. + * For lower overhead, these registers are "shadowed" to host memory + * which allows the device driver to track the queue progress without + * PCI reads. When an entry is placed on an inbound queue, the chip will + * update the relevant index register and then copy the value to the + * shadow register in host memory. + */ +static inline u32 ql_read_sh_reg(__le32 *addr) +{ + u32 reg; + reg = le32_to_cpu(*addr); + rmb(); + return reg; +} + +extern char qlge_driver_name[]; +extern const char qlge_driver_version[]; +extern const struct ethtool_ops qlge_ethtool_ops; + +int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask); +void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask); +int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data); +int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, + u32 *value); +int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value); +int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, + u16 q_id); +void ql_queue_fw_error(struct ql_adapter *qdev); +void ql_mpi_work(struct work_struct *work); +void ql_mpi_reset_work(struct work_struct *work); +void ql_mpi_core_to_log(struct work_struct *work); +int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); +void ql_queue_asic_error(struct ql_adapter *qdev); +u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); +void ql_set_ethtool_ops(struct net_device *ndev); +int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data); +void ql_mpi_idc_work(struct work_struct *work); +void ql_mpi_port_cfg_work(struct work_struct *work); +int ql_mb_get_fw_state(struct ql_adapter *qdev); +int ql_cam_route_initialize(struct ql_adapter *qdev); +int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data); +int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data); +int ql_unpause_mpi_risc(struct ql_adapter *qdev); +int ql_pause_mpi_risc(struct ql_adapter *qdev); +int ql_hard_reset_mpi_risc(struct ql_adapter *qdev); +int ql_soft_reset_mpi_risc(struct ql_adapter *qdev); +int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr, + int word_count); +int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump); +int ql_mb_about_fw(struct ql_adapter *qdev); +int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol); +int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol); +int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config); +int ql_mb_get_led_cfg(struct ql_adapter *qdev); +void ql_link_on(struct ql_adapter *qdev); +void ql_link_off(struct ql_adapter *qdev); +int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control); +int ql_mb_get_port_cfg(struct ql_adapter *qdev); +int ql_mb_set_port_cfg(struct ql_adapter *qdev); +int ql_wait_fifo_empty(struct ql_adapter *qdev); +void ql_get_dump(struct ql_adapter *qdev, void *buff); +netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev); +void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *); +int ql_own_firmware(struct ql_adapter *qdev); +int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget); + +/* #define QL_ALL_DUMP */ +/* #define QL_REG_DUMP */ +/* #define QL_DEV_DUMP */ +/* #define QL_CB_DUMP */ +/* #define QL_IB_DUMP */ +/* #define QL_OB_DUMP */ + +#ifdef QL_REG_DUMP +void ql_dump_xgmac_control_regs(struct ql_adapter *qdev); +void ql_dump_routing_entries(struct ql_adapter *qdev); +void ql_dump_regs(struct ql_adapter *qdev); +#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev) +#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev) +#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev) +#else +#define QL_DUMP_REGS(qdev) +#define QL_DUMP_ROUTE(qdev) +#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) +#endif + +#ifdef QL_STAT_DUMP +void ql_dump_stat(struct ql_adapter *qdev); +#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev) +#else +#define QL_DUMP_STAT(qdev) +#endif + +#ifdef QL_DEV_DUMP +void ql_dump_qdev(struct ql_adapter *qdev); +#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev) +#else +#define QL_DUMP_QDEV(qdev) +#endif + +#ifdef QL_CB_DUMP +void ql_dump_wqicb(struct wqicb *wqicb); +void ql_dump_tx_ring(struct tx_ring *tx_ring); +void ql_dump_ricb(struct ricb *ricb); +void ql_dump_cqicb(struct cqicb *cqicb); +void ql_dump_rx_ring(struct rx_ring *rx_ring); +void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id); +#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb) +#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb) +#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring) +#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb) +#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring) +#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \ + ql_dump_hw_cb(qdev, size, bit, q_id) +#else +#define QL_DUMP_RICB(ricb) +#define QL_DUMP_WQICB(wqicb) +#define QL_DUMP_TX_RING(tx_ring) +#define QL_DUMP_CQICB(cqicb) +#define QL_DUMP_RX_RING(rx_ring) +#define QL_DUMP_HW_CB(qdev, size, bit, q_id) +#endif + +#ifdef QL_OB_DUMP +void ql_dump_tx_desc(struct tx_buf_desc *tbd); +void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb); +void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp); +#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb) +#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp) +#else +#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) +#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) +#endif + +#ifdef QL_IB_DUMP +void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp); +#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp) +#else +#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) +#endif + +#ifdef QL_ALL_DUMP +void ql_dump_all(struct ql_adapter *qdev); +#define QL_DUMP_ALL(qdev) ql_dump_all(qdev) +#else +#define QL_DUMP_ALL(qdev) +#endif + +#endif /* _QLGE_H_ */ diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c new file mode 100644 index 000000000..829be21f9 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c @@ -0,0 +1,2042 @@ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include + +#include "qlge.h" + +/* Read a NIC register from the alternate function. */ +static u32 ql_read_other_func_reg(struct ql_adapter *qdev, + u32 reg) +{ + u32 register_to_read; + u32 reg_val; + unsigned int status = 0; + + register_to_read = MPI_NIC_REG_BLOCK + | MPI_NIC_READ + | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) + | reg; + status = ql_read_mpi_reg(qdev, register_to_read, ®_val); + if (status != 0) + return 0xffffffff; + + return reg_val; +} + +/* Write a NIC register from the alternate function. */ +static int ql_write_other_func_reg(struct ql_adapter *qdev, + u32 reg, u32 reg_val) +{ + u32 register_to_read; + int status = 0; + + register_to_read = MPI_NIC_REG_BLOCK + | MPI_NIC_READ + | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) + | reg; + status = ql_write_mpi_reg(qdev, register_to_read, reg_val); + + return status; +} + +static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg, + u32 bit, u32 err_bit) +{ + u32 temp; + int count = 10; + + while (count) { + temp = ql_read_other_func_reg(qdev, reg); + + /* check for errors */ + if (temp & err_bit) + return -1; + else if (temp & bit) + return 0; + mdelay(10); + count--; + } + return -1; +} + +static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg, + u32 *data) +{ + int status; + + /* wait for reg to come ready */ + status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, + XG_SERDES_ADDR_RDY, 0); + if (status) + goto exit; + + /* set up for reg read */ + ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R); + + /* wait for reg to come ready */ + status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, + XG_SERDES_ADDR_RDY, 0); + if (status) + goto exit; + + /* get the data */ + *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4)); +exit: + return status; +} + +/* Read out the SERDES registers */ +static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data) +{ + int status; + + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0); + if (status) + goto exit; + + /* set up for reg read */ + ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R); + + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0); + if (status) + goto exit; + + /* get the data */ + *data = ql_read32(qdev, XG_SERDES_DATA); +exit: + return status; +} + +static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr, + u32 *direct_ptr, u32 *indirect_ptr, + unsigned int direct_valid, unsigned int indirect_valid) +{ + unsigned int status; + + status = 1; + if (direct_valid) + status = ql_read_serdes_reg(qdev, addr, direct_ptr); + /* Dead fill any failures or invalids. */ + if (status) + *direct_ptr = 0xDEADBEEF; + + status = 1; + if (indirect_valid) + status = ql_read_other_func_serdes_reg( + qdev, addr, indirect_ptr); + /* Dead fill any failures or invalids. */ + if (status) + *indirect_ptr = 0xDEADBEEF; +} + +static int ql_get_serdes_regs(struct ql_adapter *qdev, + struct ql_mpi_coredump *mpi_coredump) +{ + int status; + unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid; + unsigned int xaui_indirect_valid, i; + u32 *direct_ptr, temp; + u32 *indirect_ptr; + + xfi_direct_valid = xfi_indirect_valid = 0; + xaui_direct_valid = xaui_indirect_valid = 1; + + /* The XAUI needs to be read out per port */ + if (qdev->func & 1) { + /* We are NIC 2 */ + status = ql_read_other_func_serdes_reg(qdev, + XG_SERDES_XAUI_HSS_PCS_START, &temp); + if (status) + temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; + if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == + XG_SERDES_ADDR_XAUI_PWR_DOWN) + xaui_indirect_valid = 0; + + status = ql_read_serdes_reg(qdev, + XG_SERDES_XAUI_HSS_PCS_START, &temp); + if (status) + temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; + + if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == + XG_SERDES_ADDR_XAUI_PWR_DOWN) + xaui_direct_valid = 0; + } else { + /* We are NIC 1 */ + status = ql_read_other_func_serdes_reg(qdev, + XG_SERDES_XAUI_HSS_PCS_START, &temp); + if (status) + temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; + if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == + XG_SERDES_ADDR_XAUI_PWR_DOWN) + xaui_indirect_valid = 0; + + status = ql_read_serdes_reg(qdev, + XG_SERDES_XAUI_HSS_PCS_START, &temp); + if (status) + temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; + if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == + XG_SERDES_ADDR_XAUI_PWR_DOWN) + xaui_direct_valid = 0; + } + + /* + * XFI register is shared so only need to read one + * functions and then check the bits. + */ + status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp); + if (status) + temp = 0; + + if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) == + XG_SERDES_ADDR_XFI1_PWR_UP) { + /* now see if i'm NIC 1 or NIC 2 */ + if (qdev->func & 1) + /* I'm NIC 2, so the indirect (NIC1) xfi is up. */ + xfi_indirect_valid = 1; + else + xfi_direct_valid = 1; + } + if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) == + XG_SERDES_ADDR_XFI2_PWR_UP) { + /* now see if i'm NIC 1 or NIC 2 */ + if (qdev->func & 1) + /* I'm NIC 2, so the indirect (NIC1) xfi is up. */ + xfi_direct_valid = 1; + else + xfi_indirect_valid = 1; + } + + /* Get XAUI_AN register block. */ + if (qdev->func & 1) { + /* Function 2 is direct */ + direct_ptr = mpi_coredump->serdes2_xaui_an; + indirect_ptr = mpi_coredump->serdes_xaui_an; + } else { + /* Function 1 is direct */ + direct_ptr = mpi_coredump->serdes_xaui_an; + indirect_ptr = mpi_coredump->serdes2_xaui_an; + } + + for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xaui_direct_valid, xaui_indirect_valid); + + /* Get XAUI_HSS_PCS register block. */ + if (qdev->func & 1) { + direct_ptr = + mpi_coredump->serdes2_xaui_hss_pcs; + indirect_ptr = + mpi_coredump->serdes_xaui_hss_pcs; + } else { + direct_ptr = + mpi_coredump->serdes_xaui_hss_pcs; + indirect_ptr = + mpi_coredump->serdes2_xaui_hss_pcs; + } + + for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xaui_direct_valid, xaui_indirect_valid); + + /* Get XAUI_XFI_AN register block. */ + if (qdev->func & 1) { + direct_ptr = mpi_coredump->serdes2_xfi_an; + indirect_ptr = mpi_coredump->serdes_xfi_an; + } else { + direct_ptr = mpi_coredump->serdes_xfi_an; + indirect_ptr = mpi_coredump->serdes2_xfi_an; + } + + for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xfi_direct_valid, xfi_indirect_valid); + + /* Get XAUI_XFI_TRAIN register block. */ + if (qdev->func & 1) { + direct_ptr = mpi_coredump->serdes2_xfi_train; + indirect_ptr = + mpi_coredump->serdes_xfi_train; + } else { + direct_ptr = mpi_coredump->serdes_xfi_train; + indirect_ptr = + mpi_coredump->serdes2_xfi_train; + } + + for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xfi_direct_valid, xfi_indirect_valid); + + /* Get XAUI_XFI_HSS_PCS register block. */ + if (qdev->func & 1) { + direct_ptr = + mpi_coredump->serdes2_xfi_hss_pcs; + indirect_ptr = + mpi_coredump->serdes_xfi_hss_pcs; + } else { + direct_ptr = + mpi_coredump->serdes_xfi_hss_pcs; + indirect_ptr = + mpi_coredump->serdes2_xfi_hss_pcs; + } + + for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xfi_direct_valid, xfi_indirect_valid); + + /* Get XAUI_XFI_HSS_TX register block. */ + if (qdev->func & 1) { + direct_ptr = + mpi_coredump->serdes2_xfi_hss_tx; + indirect_ptr = + mpi_coredump->serdes_xfi_hss_tx; + } else { + direct_ptr = mpi_coredump->serdes_xfi_hss_tx; + indirect_ptr = + mpi_coredump->serdes2_xfi_hss_tx; + } + for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xfi_direct_valid, xfi_indirect_valid); + + /* Get XAUI_XFI_HSS_RX register block. */ + if (qdev->func & 1) { + direct_ptr = + mpi_coredump->serdes2_xfi_hss_rx; + indirect_ptr = + mpi_coredump->serdes_xfi_hss_rx; + } else { + direct_ptr = mpi_coredump->serdes_xfi_hss_rx; + indirect_ptr = + mpi_coredump->serdes2_xfi_hss_rx; + } + + for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xfi_direct_valid, xfi_indirect_valid); + + + /* Get XAUI_XFI_HSS_PLL register block. */ + if (qdev->func & 1) { + direct_ptr = + mpi_coredump->serdes2_xfi_hss_pll; + indirect_ptr = + mpi_coredump->serdes_xfi_hss_pll; + } else { + direct_ptr = + mpi_coredump->serdes_xfi_hss_pll; + indirect_ptr = + mpi_coredump->serdes2_xfi_hss_pll; + } + for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++) + ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, + xfi_direct_valid, xfi_indirect_valid); + return 0; +} + +static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg, + u32 *data) +{ + int status = 0; + + /* wait for reg to come ready */ + status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, + XGMAC_ADDR_RDY, XGMAC_ADDR_XME); + if (status) + goto exit; + + /* set up for reg read */ + ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R); + + /* wait for reg to come ready */ + status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, + XGMAC_ADDR_RDY, XGMAC_ADDR_XME); + if (status) + goto exit; + + /* get the data */ + *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4); +exit: + return status; +} + +/* Read the 400 xgmac control/statistics registers + * skipping unused locations. + */ +static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf, + unsigned int other_function) +{ + int status = 0; + int i; + + for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) { + /* We're reading 400 xgmac registers, but we filter out + * serveral locations that are non-responsive to reads. + */ + if ((i == 0x00000114) || + (i == 0x00000118) || + (i == 0x0000013c) || + (i == 0x00000140) || + (i > 0x00000150 && i < 0x000001fc) || + (i > 0x00000278 && i < 0x000002a0) || + (i > 0x000002c0 && i < 0x000002cf) || + (i > 0x000002dc && i < 0x000002f0) || + (i > 0x000003c8 && i < 0x00000400) || + (i > 0x00000400 && i < 0x00000410) || + (i > 0x00000410 && i < 0x00000420) || + (i > 0x00000420 && i < 0x00000430) || + (i > 0x00000430 && i < 0x00000440) || + (i > 0x00000440 && i < 0x00000450) || + (i > 0x00000450 && i < 0x00000500) || + (i > 0x0000054c && i < 0x00000568) || + (i > 0x000005c8 && i < 0x00000600)) { + if (other_function) + status = + ql_read_other_func_xgmac_reg(qdev, i, buf); + else + status = ql_read_xgmac_reg(qdev, i, buf); + + if (status) + *buf = 0xdeadbeef; + break; + } + } + return status; +} + +static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf) +{ + int status = 0; + int i; + + for (i = 0; i < 8; i++, buf++) { + ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000); + *buf = ql_read32(qdev, NIC_ETS); + } + + for (i = 0; i < 2; i++, buf++) { + ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000); + *buf = ql_read32(qdev, CNA_ETS); + } + + return status; +} + +static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf) +{ + int i; + + for (i = 0; i < qdev->rx_ring_count; i++, buf++) { + ql_write32(qdev, INTR_EN, + qdev->intr_context[i].intr_read_mask); + *buf = ql_read32(qdev, INTR_EN); + } +} + +static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf) +{ + int i, status; + u32 value[3]; + + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + return status; + + for (i = 0; i < 16; i++) { + status = ql_get_mac_addr_reg(qdev, + MAC_ADDR_TYPE_CAM_MAC, i, value); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed read of mac index register\n"); + goto err; + } + *buf++ = value[0]; /* lower MAC address */ + *buf++ = value[1]; /* upper MAC address */ + *buf++ = value[2]; /* output */ + } + for (i = 0; i < 32; i++) { + status = ql_get_mac_addr_reg(qdev, + MAC_ADDR_TYPE_MULTI_MAC, i, value); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed read of mac index register\n"); + goto err; + } + *buf++ = value[0]; /* lower Mcast address */ + *buf++ = value[1]; /* upper Mcast address */ + } +err: + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + return status; +} + +static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf) +{ + int status; + u32 value, i; + + status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + if (status) + return status; + + for (i = 0; i < 16; i++) { + status = ql_get_routing_reg(qdev, i, &value); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed read of routing index register\n"); + goto err; + } else { + *buf++ = value; + } + } +err: + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); + return status; +} + +/* Read the MPI Processor shadow registers */ +static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf) +{ + u32 i; + int status; + + for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) { + status = ql_write_mpi_reg(qdev, RISC_124, + (SHADOW_OFFSET | i << SHADOW_REG_SHIFT)); + if (status) + goto end; + status = ql_read_mpi_reg(qdev, RISC_127, buf); + if (status) + goto end; + } +end: + return status; +} + +/* Read the MPI Processor core registers */ +static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf, + u32 offset, u32 count) +{ + int i, status = 0; + for (i = 0; i < count; i++, buf++) { + status = ql_read_mpi_reg(qdev, offset + i, buf); + if (status) + return status; + } + return status; +} + +/* Read the ASIC probe dump */ +static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock, + u32 valid, u32 *buf) +{ + u32 module, mux_sel, probe, lo_val, hi_val; + + for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) { + if (!((valid >> module) & 1)) + continue; + for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) { + probe = clock + | PRB_MX_ADDR_ARE + | mux_sel + | (module << PRB_MX_ADDR_MOD_SEL_SHIFT); + ql_write32(qdev, PRB_MX_ADDR, probe); + lo_val = ql_read32(qdev, PRB_MX_DATA); + if (mux_sel == 0) { + *buf = probe; + buf++; + } + probe |= PRB_MX_ADDR_UP; + ql_write32(qdev, PRB_MX_ADDR, probe); + hi_val = ql_read32(qdev, PRB_MX_DATA); + *buf = lo_val; + buf++; + *buf = hi_val; + buf++; + } + } + return buf; +} + +static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf) +{ + /* First we have to enable the probe mux */ + ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN); + buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK, + PRB_MX_ADDR_VALID_SYS_MOD, buf); + buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK, + PRB_MX_ADDR_VALID_PCI_MOD, buf); + buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK, + PRB_MX_ADDR_VALID_XGM_MOD, buf); + buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK, + PRB_MX_ADDR_VALID_FC_MOD, buf); + return 0; + +} + +/* Read out the routing index registers */ +static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf) +{ + int status; + u32 type, index, index_max; + u32 result_index; + u32 result_data; + u32 val; + + status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + if (status) + return status; + + for (type = 0; type < 4; type++) { + if (type < 2) + index_max = 8; + else + index_max = 16; + for (index = 0; index < index_max; index++) { + val = RT_IDX_RS + | (type << RT_IDX_TYPE_SHIFT) + | (index << RT_IDX_IDX_SHIFT); + ql_write32(qdev, RT_IDX, val); + result_index = 0; + while ((result_index & RT_IDX_MR) == 0) + result_index = ql_read32(qdev, RT_IDX); + result_data = ql_read32(qdev, RT_DATA); + *buf = type; + buf++; + *buf = index; + buf++; + *buf = result_index; + buf++; + *buf = result_data; + buf++; + } + } + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); + return status; +} + +/* Read out the MAC protocol registers */ +static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf) +{ + u32 result_index, result_data; + u32 type; + u32 index; + u32 offset; + u32 val; + u32 initial_val = MAC_ADDR_RS; + u32 max_index; + u32 max_offset; + + for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) { + switch (type) { + + case 0: /* CAM */ + initial_val |= MAC_ADDR_ADR; + max_index = MAC_ADDR_MAX_CAM_ENTRIES; + max_offset = MAC_ADDR_MAX_CAM_WCOUNT; + break; + case 1: /* Multicast MAC Address */ + max_index = MAC_ADDR_MAX_CAM_WCOUNT; + max_offset = MAC_ADDR_MAX_CAM_WCOUNT; + break; + case 2: /* VLAN filter mask */ + case 3: /* MC filter mask */ + max_index = MAC_ADDR_MAX_CAM_WCOUNT; + max_offset = MAC_ADDR_MAX_CAM_WCOUNT; + break; + case 4: /* FC MAC addresses */ + max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES; + max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT; + break; + case 5: /* Mgmt MAC addresses */ + max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES; + max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT; + break; + case 6: /* Mgmt VLAN addresses */ + max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES; + max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT; + break; + case 7: /* Mgmt IPv4 address */ + max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES; + max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT; + break; + case 8: /* Mgmt IPv6 address */ + max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES; + max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT; + break; + case 9: /* Mgmt TCP/UDP Dest port */ + max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES; + max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT; + break; + default: + pr_err("Bad type!!! 0x%08x\n", type); + max_index = 0; + max_offset = 0; + break; + } + for (index = 0; index < max_index; index++) { + for (offset = 0; offset < max_offset; offset++) { + val = initial_val + | (type << MAC_ADDR_TYPE_SHIFT) + | (index << MAC_ADDR_IDX_SHIFT) + | (offset); + ql_write32(qdev, MAC_ADDR_IDX, val); + result_index = 0; + while ((result_index & MAC_ADDR_MR) == 0) { + result_index = ql_read32(qdev, + MAC_ADDR_IDX); + } + result_data = ql_read32(qdev, MAC_ADDR_DATA); + *buf = result_index; + buf++; + *buf = result_data; + buf++; + } + } + } +} + +static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf) +{ + u32 func_num, reg, reg_val; + int status; + + for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) { + reg = MPI_NIC_REG_BLOCK + | (func_num << MPI_NIC_FUNCTION_SHIFT) + | (SEM / 4); + status = ql_read_mpi_reg(qdev, reg, ®_val); + *buf = reg_val; + /* if the read failed then dead fill the element. */ + if (!status) + *buf = 0xdeadbeef; + buf++; + } +} + +/* Create a coredump segment header */ +static void ql_build_coredump_seg_header( + struct mpi_coredump_segment_header *seg_hdr, + u32 seg_number, u32 seg_size, u8 *desc) +{ + memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header)); + seg_hdr->cookie = MPI_COREDUMP_COOKIE; + seg_hdr->segNum = seg_number; + seg_hdr->segSize = seg_size; + memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1); +} + +/* + * This function should be called when a coredump / probedump + * is to be extracted from the HBA. It is assumed there is a + * qdev structure that contains the base address of the register + * space for this function as well as a coredump structure that + * will contain the dump. + */ +int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) +{ + int status; + int i; + + if (!mpi_coredump) { + netif_err(qdev, drv, qdev->ndev, "No memory allocated\n"); + return -EINVAL; + } + + /* Try to get the spinlock, but dont worry if + * it isn't available. If the firmware died it + * might be holding the sem. + */ + ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); + + status = ql_pause_mpi_risc(qdev); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed RISC pause. Status = 0x%.08x\n", status); + goto err; + } + + /* Insert the global header */ + memset(&(mpi_coredump->mpi_global_header), 0, + sizeof(struct mpi_coredump_global_header)); + mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE; + mpi_coredump->mpi_global_header.headerSize = + sizeof(struct mpi_coredump_global_header); + mpi_coredump->mpi_global_header.imageSize = + sizeof(struct ql_mpi_coredump); + memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", + sizeof(mpi_coredump->mpi_global_header.idString)); + + /* Get generic NIC reg dump */ + ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, + NIC1_CONTROL_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic_regs), "NIC1 Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr, + NIC2_CONTROL_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic2_regs), "NIC2 Registers"); + + /* Get XGMac registers. (Segment 18, Rev C. step 21) */ + ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr, + NIC1_XGMAC_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr, + NIC2_XGMAC_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers"); + + if (qdev->func & 1) { + /* Odd means our function is NIC 2 */ + for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) + mpi_coredump->nic2_regs[i] = + ql_read32(qdev, i * sizeof(u32)); + + for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) + mpi_coredump->nic_regs[i] = + ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4); + + ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0); + ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1); + } else { + /* Even means our function is NIC 1 */ + for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) + mpi_coredump->nic_regs[i] = + ql_read32(qdev, i * sizeof(u32)); + for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) + mpi_coredump->nic2_regs[i] = + ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4); + + ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0); + ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1); + } + + /* Rev C. Step 20a */ + ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr, + XAUI_AN_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xaui_an), + "XAUI AN Registers"); + + /* Rev C. Step 20b */ + ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr, + XAUI_HSS_PCS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xaui_hss_pcs), + "XAUI HSS PCS Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xfi_an), + "XFI AN Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr, + XFI_TRAIN_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xfi_train), + "XFI TRAIN Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr, + XFI_HSS_PCS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xfi_hss_pcs), + "XFI HSS PCS Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr, + XFI_HSS_TX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xfi_hss_tx), + "XFI HSS TX Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr, + XFI_HSS_RX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xfi_hss_rx), + "XFI HSS RX Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr, + XFI_HSS_PLL_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes_xfi_hss_pll), + "XFI HSS PLL Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr, + XAUI2_AN_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xaui_an), + "XAUI2 AN Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr, + XAUI2_HSS_PCS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xaui_hss_pcs), + "XAUI2 HSS PCS Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr, + XFI2_AN_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xfi_an), + "XFI2 AN Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr, + XFI2_TRAIN_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xfi_train), + "XFI2 TRAIN Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr, + XFI2_HSS_PCS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xfi_hss_pcs), + "XFI2 HSS PCS Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr, + XFI2_HSS_TX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xfi_hss_tx), + "XFI2 HSS TX Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr, + XFI2_HSS_RX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xfi_hss_rx), + "XFI2 HSS RX Registers"); + + ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr, + XFI2_HSS_PLL_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->serdes2_xfi_hss_pll), + "XFI2 HSS PLL Registers"); + + status = ql_get_serdes_regs(qdev, mpi_coredump); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed Dump of Serdes Registers. Status = 0x%.08x\n", + status); + goto err; + } + + ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr, + CORE_SEG_NUM, + sizeof(mpi_coredump->core_regs_seg_hdr) + + sizeof(mpi_coredump->mpi_core_regs) + + sizeof(mpi_coredump->mpi_core_sh_regs), + "Core Registers"); + + /* Get the MPI Core Registers */ + status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0], + MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT); + if (status) + goto err; + /* Get the 16 MPI shadow registers */ + status = ql_get_mpi_shadow_regs(qdev, + &mpi_coredump->mpi_core_sh_regs[0]); + if (status) + goto err; + + /* Get the Test Logic Registers */ + ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr, + TEST_LOGIC_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->test_logic_regs), + "Test Logic Regs"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0], + TEST_REGS_ADDR, TEST_REGS_CNT); + if (status) + goto err; + + /* Get the RMII Registers */ + ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr, + RMII_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->rmii_regs), + "RMII Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0], + RMII_REGS_ADDR, RMII_REGS_CNT); + if (status) + goto err; + + /* Get the FCMAC1 Registers */ + ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr, + FCMAC1_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->fcmac1_regs), + "FCMAC1 Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0], + FCMAC1_REGS_ADDR, FCMAC_REGS_CNT); + if (status) + goto err; + + /* Get the FCMAC2 Registers */ + + ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr, + FCMAC2_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->fcmac2_regs), + "FCMAC2 Registers"); + + status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0], + FCMAC2_REGS_ADDR, FCMAC_REGS_CNT); + if (status) + goto err; + + /* Get the FC1 MBX Registers */ + ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr, + FC1_MBOX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->fc1_mbx_regs), + "FC1 MBox Regs"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0], + FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT); + if (status) + goto err; + + /* Get the IDE Registers */ + ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr, + IDE_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->ide_regs), + "IDE Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0], + IDE_REGS_ADDR, IDE_REGS_CNT); + if (status) + goto err; + + /* Get the NIC1 MBX Registers */ + ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr, + NIC1_MBOX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic1_mbx_regs), + "NIC1 MBox Regs"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0], + NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT); + if (status) + goto err; + + /* Get the SMBus Registers */ + ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr, + SMBUS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->smbus_regs), + "SMBus Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0], + SMBUS_REGS_ADDR, SMBUS_REGS_CNT); + if (status) + goto err; + + /* Get the FC2 MBX Registers */ + ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr, + FC2_MBOX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->fc2_mbx_regs), + "FC2 MBox Regs"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0], + FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT); + if (status) + goto err; + + /* Get the NIC2 MBX Registers */ + ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr, + NIC2_MBOX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic2_mbx_regs), + "NIC2 MBox Regs"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0], + NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT); + if (status) + goto err; + + /* Get the I2C Registers */ + ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr, + I2C_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->i2c_regs), + "I2C Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0], + I2C_REGS_ADDR, I2C_REGS_CNT); + if (status) + goto err; + + /* Get the MEMC Registers */ + ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr, + MEMC_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->memc_regs), + "MEMC Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0], + MEMC_REGS_ADDR, MEMC_REGS_CNT); + if (status) + goto err; + + /* Get the PBus Registers */ + ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr, + PBUS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->pbus_regs), + "PBUS Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0], + PBUS_REGS_ADDR, PBUS_REGS_CNT); + if (status) + goto err; + + /* Get the MDE Registers */ + ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr, + MDE_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->mde_regs), + "MDE Registers"); + status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0], + MDE_REGS_ADDR, MDE_REGS_CNT); + if (status) + goto err; + + ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, + MISC_NIC_INFO_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->misc_nic_info), + "MISC NIC INFO"); + mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count; + mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count; + mpi_coredump->misc_nic_info.intr_count = qdev->intr_count; + mpi_coredump->misc_nic_info.function = qdev->func; + + /* Segment 31 */ + /* Get indexed register values. */ + ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, + INTR_STATES_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->intr_states), + "INTR States"); + ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); + + ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, + CAM_ENTRIES_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->cam_entries), + "CAM Entries"); + status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); + if (status) + goto err; + + ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, + ROUTING_WORDS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic_routing_words), + "Routing Words"); + status = ql_get_routing_entries(qdev, + &mpi_coredump->nic_routing_words[0]); + if (status) + goto err; + + /* Segment 34 (Rev C. step 23) */ + ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, + ETS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->ets), + "ETS Registers"); + status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); + if (status) + goto err; + + ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr, + PROBE_DUMP_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->probe_dump), + "Probe Dump"); + ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]); + + ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr, + ROUTING_INDEX_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->routing_regs), + "Routing Regs"); + status = ql_get_routing_index_registers(qdev, + &mpi_coredump->routing_regs[0]); + if (status) + goto err; + + ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr, + MAC_PROTOCOL_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->mac_prot_regs), + "MAC Prot Regs"); + ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]); + + /* Get the semaphore registers for all 5 functions */ + ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr, + SEM_REGS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->sem_regs), "Sem Registers"); + + ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]); + + /* Prevent the mpi restarting while we dump the memory.*/ + ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC); + + /* clear the pause */ + status = ql_unpause_mpi_risc(qdev); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed RISC unpause. Status = 0x%.08x\n", status); + goto err; + } + + /* Reset the RISC so we can dump RAM */ + status = ql_hard_reset_mpi_risc(qdev); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed RISC reset. Status = 0x%.08x\n", status); + goto err; + } + + ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr, + WCS_RAM_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->code_ram), + "WCS RAM"); + status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0], + CODE_RAM_ADDR, CODE_RAM_CNT); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed Dump of CODE RAM. Status = 0x%.08x\n", + status); + goto err; + } + + /* Insert the segment header */ + ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr, + MEMC_RAM_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->memc_ram), + "MEMC RAM"); + status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0], + MEMC_RAM_ADDR, MEMC_RAM_CNT); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Failed Dump of MEMC RAM. Status = 0x%.08x\n", + status); + goto err; + } +err: + ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ + return status; + +} + +static void ql_get_core_dump(struct ql_adapter *qdev) +{ + if (!ql_own_firmware(qdev)) { + netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n"); + return; + } + + if (!netif_running(qdev->ndev)) { + netif_err(qdev, ifup, qdev->ndev, + "Force Coredump can only be done from interface that is up\n"); + return; + } + ql_queue_fw_error(qdev); +} + +static void ql_gen_reg_dump(struct ql_adapter *qdev, + struct ql_reg_dump *mpi_coredump) +{ + int i, status; + + + memset(&(mpi_coredump->mpi_global_header), 0, + sizeof(struct mpi_coredump_global_header)); + mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE; + mpi_coredump->mpi_global_header.headerSize = + sizeof(struct mpi_coredump_global_header); + mpi_coredump->mpi_global_header.imageSize = + sizeof(struct ql_reg_dump); + memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", + sizeof(mpi_coredump->mpi_global_header.idString)); + + + /* segment 16 */ + ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, + MISC_NIC_INFO_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->misc_nic_info), + "MISC NIC INFO"); + mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count; + mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count; + mpi_coredump->misc_nic_info.intr_count = qdev->intr_count; + mpi_coredump->misc_nic_info.function = qdev->func; + + /* Segment 16, Rev C. Step 18 */ + ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, + NIC1_CONTROL_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic_regs), + "NIC Registers"); + /* Get generic reg dump */ + for (i = 0; i < 64; i++) + mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32)); + + /* Segment 31 */ + /* Get indexed register values. */ + ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, + INTR_STATES_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->intr_states), + "INTR States"); + ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); + + ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, + CAM_ENTRIES_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->cam_entries), + "CAM Entries"); + status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); + if (status) + return; + + ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, + ROUTING_WORDS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->nic_routing_words), + "Routing Words"); + status = ql_get_routing_entries(qdev, + &mpi_coredump->nic_routing_words[0]); + if (status) + return; + + /* Segment 34 (Rev C. step 23) */ + ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, + ETS_SEG_NUM, + sizeof(struct mpi_coredump_segment_header) + + sizeof(mpi_coredump->ets), + "ETS Registers"); + status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); + if (status) + return; +} + +void ql_get_dump(struct ql_adapter *qdev, void *buff) +{ + /* + * If the dump has already been taken and is stored + * in our internal buffer and if force dump is set then + * just start the spool to dump it to the log file + * and also, take a snapshot of the general regs to + * to the user's buffer or else take complete dump + * to the user's buffer if force is not set. + */ + + if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) { + if (!ql_core_dump(qdev, buff)) + ql_soft_reset_mpi_risc(qdev); + else + netif_err(qdev, drv, qdev->ndev, "coredump failed!\n"); + } else { + ql_gen_reg_dump(qdev, buff); + ql_get_core_dump(qdev); + } +} + +/* Coredump to messages log file using separate worker thread */ +void ql_mpi_core_to_log(struct work_struct *work) +{ + struct ql_adapter *qdev = + container_of(work, struct ql_adapter, mpi_core_to_log.work); + u32 *tmp, count; + int i; + + count = sizeof(struct ql_mpi_coredump) / sizeof(u32); + tmp = (u32 *)qdev->mpi_coredump; + netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, + "Core is dumping to log file!\n"); + + for (i = 0; i < count; i += 8) { + pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x " + "%.08x %.08x %.08x\n", i, + tmp[i + 0], + tmp[i + 1], + tmp[i + 2], + tmp[i + 3], + tmp[i + 4], + tmp[i + 5], + tmp[i + 6], + tmp[i + 7]); + msleep(5); + } +} + +#ifdef QL_REG_DUMP +static void ql_dump_intr_states(struct ql_adapter *qdev) +{ + int i; + u32 value; + for (i = 0; i < qdev->intr_count; i++) { + ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask); + value = ql_read32(qdev, INTR_EN); + pr_err("%s: Interrupt %d is %s\n", + qdev->ndev->name, i, + (value & INTR_EN_EN ? "enabled" : "disabled")); + } +} + +#define DUMP_XGMAC(qdev, reg) \ +do { \ + u32 data; \ + ql_read_xgmac_reg(qdev, reg, &data); \ + pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \ +} while (0) + +void ql_dump_xgmac_control_regs(struct ql_adapter *qdev) +{ + if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { + pr_err("%s: Couldn't get xgmac sem\n", __func__); + return; + } + DUMP_XGMAC(qdev, PAUSE_SRC_LO); + DUMP_XGMAC(qdev, PAUSE_SRC_HI); + DUMP_XGMAC(qdev, GLOBAL_CFG); + DUMP_XGMAC(qdev, TX_CFG); + DUMP_XGMAC(qdev, RX_CFG); + DUMP_XGMAC(qdev, FLOW_CTL); + DUMP_XGMAC(qdev, PAUSE_OPCODE); + DUMP_XGMAC(qdev, PAUSE_TIMER); + DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO); + DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI); + DUMP_XGMAC(qdev, MAC_TX_PARAMS); + DUMP_XGMAC(qdev, MAC_RX_PARAMS); + DUMP_XGMAC(qdev, MAC_SYS_INT); + DUMP_XGMAC(qdev, MAC_SYS_INT_MASK); + DUMP_XGMAC(qdev, MAC_MGMT_INT); + DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK); + DUMP_XGMAC(qdev, EXT_ARB_MODE); + ql_sem_unlock(qdev, qdev->xg_sem_mask); +} + +static void ql_dump_ets_regs(struct ql_adapter *qdev) +{ +} + +static void ql_dump_cam_entries(struct ql_adapter *qdev) +{ + int i; + u32 value[3]; + + i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (i) + return; + for (i = 0; i < 4; i++) { + if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) { + pr_err("%s: Failed read of mac index register\n", + __func__); + return; + } else { + if (value[0]) + pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n", + qdev->ndev->name, i, value[1], value[0], + value[2]); + } + } + for (i = 0; i < 32; i++) { + if (ql_get_mac_addr_reg + (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) { + pr_err("%s: Failed read of mac index register\n", + __func__); + return; + } else { + if (value[0]) + pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n", + qdev->ndev->name, i, value[1], value[0]); + } + } + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); +} + +void ql_dump_routing_entries(struct ql_adapter *qdev) +{ + int i; + u32 value; + i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + if (i) + return; + for (i = 0; i < 16; i++) { + value = 0; + if (ql_get_routing_reg(qdev, i, &value)) { + pr_err("%s: Failed read of routing index register\n", + __func__); + return; + } else { + if (value) + pr_err("%s: Routing Mask %d = 0x%.08x\n", + qdev->ndev->name, i, value); + } + } + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); +} + +#define DUMP_REG(qdev, reg) \ + pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg)) + +void ql_dump_regs(struct ql_adapter *qdev) +{ + pr_err("reg dump for function #%d\n", qdev->func); + DUMP_REG(qdev, SYS); + DUMP_REG(qdev, RST_FO); + DUMP_REG(qdev, FSC); + DUMP_REG(qdev, CSR); + DUMP_REG(qdev, ICB_RID); + DUMP_REG(qdev, ICB_L); + DUMP_REG(qdev, ICB_H); + DUMP_REG(qdev, CFG); + DUMP_REG(qdev, BIOS_ADDR); + DUMP_REG(qdev, STS); + DUMP_REG(qdev, INTR_EN); + DUMP_REG(qdev, INTR_MASK); + DUMP_REG(qdev, ISR1); + DUMP_REG(qdev, ISR2); + DUMP_REG(qdev, ISR3); + DUMP_REG(qdev, ISR4); + DUMP_REG(qdev, REV_ID); + DUMP_REG(qdev, FRC_ECC_ERR); + DUMP_REG(qdev, ERR_STS); + DUMP_REG(qdev, RAM_DBG_ADDR); + DUMP_REG(qdev, RAM_DBG_DATA); + DUMP_REG(qdev, ECC_ERR_CNT); + DUMP_REG(qdev, SEM); + DUMP_REG(qdev, GPIO_1); + DUMP_REG(qdev, GPIO_2); + DUMP_REG(qdev, GPIO_3); + DUMP_REG(qdev, XGMAC_ADDR); + DUMP_REG(qdev, XGMAC_DATA); + DUMP_REG(qdev, NIC_ETS); + DUMP_REG(qdev, CNA_ETS); + DUMP_REG(qdev, FLASH_ADDR); + DUMP_REG(qdev, FLASH_DATA); + DUMP_REG(qdev, CQ_STOP); + DUMP_REG(qdev, PAGE_TBL_RID); + DUMP_REG(qdev, WQ_PAGE_TBL_LO); + DUMP_REG(qdev, WQ_PAGE_TBL_HI); + DUMP_REG(qdev, CQ_PAGE_TBL_LO); + DUMP_REG(qdev, CQ_PAGE_TBL_HI); + DUMP_REG(qdev, COS_DFLT_CQ1); + DUMP_REG(qdev, COS_DFLT_CQ2); + DUMP_REG(qdev, SPLT_HDR); + DUMP_REG(qdev, FC_PAUSE_THRES); + DUMP_REG(qdev, NIC_PAUSE_THRES); + DUMP_REG(qdev, FC_ETHERTYPE); + DUMP_REG(qdev, FC_RCV_CFG); + DUMP_REG(qdev, NIC_RCV_CFG); + DUMP_REG(qdev, FC_COS_TAGS); + DUMP_REG(qdev, NIC_COS_TAGS); + DUMP_REG(qdev, MGMT_RCV_CFG); + DUMP_REG(qdev, XG_SERDES_ADDR); + DUMP_REG(qdev, XG_SERDES_DATA); + DUMP_REG(qdev, PRB_MX_ADDR); + DUMP_REG(qdev, PRB_MX_DATA); + ql_dump_intr_states(qdev); + ql_dump_xgmac_control_regs(qdev); + ql_dump_ets_regs(qdev); + ql_dump_cam_entries(qdev); + ql_dump_routing_entries(qdev); +} +#endif + +#ifdef QL_STAT_DUMP + +#define DUMP_STAT(qdev, stat) \ + pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat) + +void ql_dump_stat(struct ql_adapter *qdev) +{ + pr_err("%s: Enter\n", __func__); + DUMP_STAT(qdev, tx_pkts); + DUMP_STAT(qdev, tx_bytes); + DUMP_STAT(qdev, tx_mcast_pkts); + DUMP_STAT(qdev, tx_bcast_pkts); + DUMP_STAT(qdev, tx_ucast_pkts); + DUMP_STAT(qdev, tx_ctl_pkts); + DUMP_STAT(qdev, tx_pause_pkts); + DUMP_STAT(qdev, tx_64_pkt); + DUMP_STAT(qdev, tx_65_to_127_pkt); + DUMP_STAT(qdev, tx_128_to_255_pkt); + DUMP_STAT(qdev, tx_256_511_pkt); + DUMP_STAT(qdev, tx_512_to_1023_pkt); + DUMP_STAT(qdev, tx_1024_to_1518_pkt); + DUMP_STAT(qdev, tx_1519_to_max_pkt); + DUMP_STAT(qdev, tx_undersize_pkt); + DUMP_STAT(qdev, tx_oversize_pkt); + DUMP_STAT(qdev, rx_bytes); + DUMP_STAT(qdev, rx_bytes_ok); + DUMP_STAT(qdev, rx_pkts); + DUMP_STAT(qdev, rx_pkts_ok); + DUMP_STAT(qdev, rx_bcast_pkts); + DUMP_STAT(qdev, rx_mcast_pkts); + DUMP_STAT(qdev, rx_ucast_pkts); + DUMP_STAT(qdev, rx_undersize_pkts); + DUMP_STAT(qdev, rx_oversize_pkts); + DUMP_STAT(qdev, rx_jabber_pkts); + DUMP_STAT(qdev, rx_undersize_fcerr_pkts); + DUMP_STAT(qdev, rx_drop_events); + DUMP_STAT(qdev, rx_fcerr_pkts); + DUMP_STAT(qdev, rx_align_err); + DUMP_STAT(qdev, rx_symbol_err); + DUMP_STAT(qdev, rx_mac_err); + DUMP_STAT(qdev, rx_ctl_pkts); + DUMP_STAT(qdev, rx_pause_pkts); + DUMP_STAT(qdev, rx_64_pkts); + DUMP_STAT(qdev, rx_65_to_127_pkts); + DUMP_STAT(qdev, rx_128_255_pkts); + DUMP_STAT(qdev, rx_256_511_pkts); + DUMP_STAT(qdev, rx_512_to_1023_pkts); + DUMP_STAT(qdev, rx_1024_to_1518_pkts); + DUMP_STAT(qdev, rx_1519_to_max_pkts); + DUMP_STAT(qdev, rx_len_err_pkts); +}; +#endif + +#ifdef QL_DEV_DUMP + +#define DUMP_QDEV_FIELD(qdev, type, field) \ + pr_err("qdev->%-24s = " type "\n", #field, qdev->field) +#define DUMP_QDEV_DMA_FIELD(qdev, field) \ + pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field) +#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \ + pr_err("%s[%d].%s = " type "\n", \ + #array, index, #field, qdev->array[index].field); +void ql_dump_qdev(struct ql_adapter *qdev) +{ + int i; + DUMP_QDEV_FIELD(qdev, "%lx", flags); + DUMP_QDEV_FIELD(qdev, "%p", vlgrp); + DUMP_QDEV_FIELD(qdev, "%p", pdev); + DUMP_QDEV_FIELD(qdev, "%p", ndev); + DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id); + DUMP_QDEV_FIELD(qdev, "%p", reg_base); + DUMP_QDEV_FIELD(qdev, "%p", doorbell_area); + DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size); + DUMP_QDEV_FIELD(qdev, "%x", msg_enable); + DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area); + DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma); + DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area); + DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma); + DUMP_QDEV_FIELD(qdev, "%d", intr_count); + if (qdev->msi_x_entry) + for (i = 0; i < qdev->intr_count; i++) { + DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector); + DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry); + } + for (i = 0; i < qdev->intr_count; i++) { + DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev); + DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr); + DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked); + DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask); + DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask); + DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask); + } + DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count); + DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count); + DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size); + DUMP_QDEV_FIELD(qdev, "%p", ring_mem); + DUMP_QDEV_FIELD(qdev, "%d", intr_count); + DUMP_QDEV_FIELD(qdev, "%p", tx_ring); + DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count); + DUMP_QDEV_FIELD(qdev, "%p", rx_ring); + DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue); + DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask); + DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up); + DUMP_QDEV_FIELD(qdev, "0x%08x", port_init); +} +#endif + +#ifdef QL_CB_DUMP +void ql_dump_wqicb(struct wqicb *wqicb) +{ + pr_err("Dumping wqicb stuff...\n"); + pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len)); + pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags)); + pr_err("wqicb->cq_id_rss = %d\n", + le16_to_cpu(wqicb->cq_id_rss)); + pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid)); + pr_err("wqicb->wq_addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(wqicb->addr)); + pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr)); +} + +void ql_dump_tx_ring(struct tx_ring *tx_ring) +{ + if (tx_ring == NULL) + return; + pr_err("===================== Dumping tx_ring %d ===============\n", + tx_ring->wq_id); + pr_err("tx_ring->base = %p\n", tx_ring->wq_base); + pr_err("tx_ring->base_dma = 0x%llx\n", + (unsigned long long) tx_ring->wq_base_dma); + pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n", + tx_ring->cnsmr_idx_sh_reg, + tx_ring->cnsmr_idx_sh_reg + ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0); + pr_err("tx_ring->size = %d\n", tx_ring->wq_size); + pr_err("tx_ring->len = %d\n", tx_ring->wq_len); + pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg); + pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg); + pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx); + pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id); + pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id); + pr_err("tx_ring->q = %p\n", tx_ring->q); + pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count)); +} + +void ql_dump_ricb(struct ricb *ricb) +{ + int i; + pr_err("===================== Dumping ricb ===============\n"); + pr_err("Dumping ricb stuff...\n"); + + pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f); + pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n", + ricb->base_cq & RSS_L4K ? "RSS_L4K " : "", + ricb->flags & RSS_L6K ? "RSS_L6K " : "", + ricb->flags & RSS_LI ? "RSS_LI " : "", + ricb->flags & RSS_LB ? "RSS_LB " : "", + ricb->flags & RSS_LM ? "RSS_LM " : "", + ricb->flags & RSS_RI4 ? "RSS_RI4 " : "", + ricb->flags & RSS_RT4 ? "RSS_RT4 " : "", + ricb->flags & RSS_RI6 ? "RSS_RI6 " : "", + ricb->flags & RSS_RT6 ? "RSS_RT6 " : ""); + pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask)); + for (i = 0; i < 16; i++) + pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i, + le32_to_cpu(ricb->hash_cq_id[i])); + for (i = 0; i < 10; i++) + pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i, + le32_to_cpu(ricb->ipv6_hash_key[i])); + for (i = 0; i < 4; i++) + pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i, + le32_to_cpu(ricb->ipv4_hash_key[i])); +} + +void ql_dump_cqicb(struct cqicb *cqicb) +{ + pr_err("Dumping cqicb stuff...\n"); + + pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect); + pr_err("cqicb->flags = %x\n", cqicb->flags); + pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len)); + pr_err("cqicb->addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(cqicb->addr)); + pr_err("cqicb->prod_idx_addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr)); + pr_err("cqicb->pkt_delay = 0x%.04x\n", + le16_to_cpu(cqicb->pkt_delay)); + pr_err("cqicb->irq_delay = 0x%.04x\n", + le16_to_cpu(cqicb->irq_delay)); + pr_err("cqicb->lbq_addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(cqicb->lbq_addr)); + pr_err("cqicb->lbq_buf_size = 0x%.04x\n", + le16_to_cpu(cqicb->lbq_buf_size)); + pr_err("cqicb->lbq_len = 0x%.04x\n", + le16_to_cpu(cqicb->lbq_len)); + pr_err("cqicb->sbq_addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(cqicb->sbq_addr)); + pr_err("cqicb->sbq_buf_size = 0x%.04x\n", + le16_to_cpu(cqicb->sbq_buf_size)); + pr_err("cqicb->sbq_len = 0x%.04x\n", + le16_to_cpu(cqicb->sbq_len)); +} + +void ql_dump_rx_ring(struct rx_ring *rx_ring) +{ + if (rx_ring == NULL) + return; + pr_err("===================== Dumping rx_ring %d ===============\n", + rx_ring->cq_id); + pr_err("Dumping rx_ring %d, type = %s%s%s\n", + rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "", + rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "", + rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : ""); + pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb); + pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base); + pr_err("rx_ring->cq_base_dma = %llx\n", + (unsigned long long) rx_ring->cq_base_dma); + pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size); + pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len); + pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n", + rx_ring->prod_idx_sh_reg, + rx_ring->prod_idx_sh_reg + ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0); + pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n", + (unsigned long long) rx_ring->prod_idx_sh_reg_dma); + pr_err("rx_ring->cnsmr_idx_db_reg = %p\n", + rx_ring->cnsmr_idx_db_reg); + pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx); + pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry); + pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg); + + pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base); + pr_err("rx_ring->lbq_base_dma = %llx\n", + (unsigned long long) rx_ring->lbq_base_dma); + pr_err("rx_ring->lbq_base_indirect = %p\n", + rx_ring->lbq_base_indirect); + pr_err("rx_ring->lbq_base_indirect_dma = %llx\n", + (unsigned long long) rx_ring->lbq_base_indirect_dma); + pr_err("rx_ring->lbq = %p\n", rx_ring->lbq); + pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len); + pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size); + pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n", + rx_ring->lbq_prod_idx_db_reg); + pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx); + pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx); + pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx); + pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt); + pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size); + + pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base); + pr_err("rx_ring->sbq_base_dma = %llx\n", + (unsigned long long) rx_ring->sbq_base_dma); + pr_err("rx_ring->sbq_base_indirect = %p\n", + rx_ring->sbq_base_indirect); + pr_err("rx_ring->sbq_base_indirect_dma = %llx\n", + (unsigned long long) rx_ring->sbq_base_indirect_dma); + pr_err("rx_ring->sbq = %p\n", rx_ring->sbq); + pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len); + pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size); + pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n", + rx_ring->sbq_prod_idx_db_reg); + pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx); + pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx); + pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx); + pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt); + pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size); + pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id); + pr_err("rx_ring->irq = %d\n", rx_ring->irq); + pr_err("rx_ring->cpu = %d\n", rx_ring->cpu); + pr_err("rx_ring->qdev = %p\n", rx_ring->qdev); +} + +void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id) +{ + void *ptr; + + pr_err("%s: Enter\n", __func__); + + ptr = kmalloc(size, GFP_ATOMIC); + if (ptr == NULL) + return; + + if (ql_write_cfg(qdev, ptr, size, bit, q_id)) { + pr_err("%s: Failed to upload control block!\n", __func__); + goto fail_it; + } + switch (bit) { + case CFG_DRQ: + ql_dump_wqicb((struct wqicb *)ptr); + break; + case CFG_DCQ: + ql_dump_cqicb((struct cqicb *)ptr); + break; + case CFG_DR: + ql_dump_ricb((struct ricb *)ptr); + break; + default: + pr_err("%s: Invalid bit value = %x\n", __func__, bit); + break; + } +fail_it: + kfree(ptr); +} +#endif + +#ifdef QL_OB_DUMP +void ql_dump_tx_desc(struct tx_buf_desc *tbd) +{ + pr_err("tbd->addr = 0x%llx\n", + le64_to_cpu((u64) tbd->addr)); + pr_err("tbd->len = %d\n", + le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); + pr_err("tbd->flags = %s %s\n", + tbd->len & TX_DESC_C ? "C" : ".", + tbd->len & TX_DESC_E ? "E" : "."); + tbd++; + pr_err("tbd->addr = 0x%llx\n", + le64_to_cpu((u64) tbd->addr)); + pr_err("tbd->len = %d\n", + le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); + pr_err("tbd->flags = %s %s\n", + tbd->len & TX_DESC_C ? "C" : ".", + tbd->len & TX_DESC_E ? "E" : "."); + tbd++; + pr_err("tbd->addr = 0x%llx\n", + le64_to_cpu((u64) tbd->addr)); + pr_err("tbd->len = %d\n", + le32_to_cpu(tbd->len & TX_DESC_LEN_MASK)); + pr_err("tbd->flags = %s %s\n", + tbd->len & TX_DESC_C ? "C" : ".", + tbd->len & TX_DESC_E ? "E" : "."); + +} + +void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb) +{ + struct ob_mac_tso_iocb_req *ob_mac_tso_iocb = + (struct ob_mac_tso_iocb_req *)ob_mac_iocb; + struct tx_buf_desc *tbd; + u16 frame_len; + + pr_err("%s\n", __func__); + pr_err("opcode = %s\n", + (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO"); + pr_err("flags1 = %s %s %s %s %s\n", + ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "", + ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "", + ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "", + ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "", + ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : ""); + pr_err("flags2 = %s %s %s\n", + ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "", + ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "", + ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : ""); + pr_err("flags3 = %s %s %s\n", + ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "", + ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "", + ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : ""); + pr_err("tid = %x\n", ob_mac_iocb->tid); + pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx); + pr_err("vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci); + if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) { + pr_err("frame_len = %d\n", + le32_to_cpu(ob_mac_tso_iocb->frame_len)); + pr_err("mss = %d\n", + le16_to_cpu(ob_mac_tso_iocb->mss)); + pr_err("prot_hdr_len = %d\n", + le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len)); + pr_err("hdr_offset = 0x%.04x\n", + le16_to_cpu(ob_mac_tso_iocb->net_trans_offset)); + frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len); + } else { + pr_err("frame_len = %d\n", + le16_to_cpu(ob_mac_iocb->frame_len)); + frame_len = le16_to_cpu(ob_mac_iocb->frame_len); + } + tbd = &ob_mac_iocb->tbd[0]; + ql_dump_tx_desc(tbd); +} + +void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp) +{ + pr_err("%s\n", __func__); + pr_err("opcode = %d\n", ob_mac_rsp->opcode); + pr_err("flags = %s %s %s %s %s %s %s\n", + ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".", + ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".", + ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".", + ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".", + ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".", + ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".", + ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : "."); + pr_err("tid = %x\n", ob_mac_rsp->tid); +} +#endif + +#ifdef QL_IB_DUMP +void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) +{ + pr_err("%s\n", __func__); + pr_err("opcode = 0x%x\n", ib_mac_rsp->opcode); + pr_err("flags1 = %s%s%s%s%s%s\n", + ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "", + ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "", + ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "", + ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "", + ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "", + ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : ""); + + if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) + pr_err("%s%s%s Multicast\n", + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "", + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_REG ? "Registered" : "", + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); + + pr_err("flags2 = %s%s%s%s%s\n", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : ""); + + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) + pr_err("%s%s%s%s%s error\n", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == + IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == + IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == + IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == + IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "", + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) == + IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : ""); + + pr_err("flags3 = %s%s\n", + ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "", + ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : ""); + + if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) + pr_err("RSS flags = %s%s%s%s\n", + ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == + IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "", + ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == + IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "", + ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == + IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "", + ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) == + IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : ""); + + pr_err("data_len = %d\n", + le32_to_cpu(ib_mac_rsp->data_len)); + pr_err("data_addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr)); + if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) + pr_err("rss = %x\n", + le32_to_cpu(ib_mac_rsp->rss)); + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) + pr_err("vlan_id = %x\n", + le16_to_cpu(ib_mac_rsp->vlan_id)); + + pr_err("flags4 = %s%s%s\n", + ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "", + ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "", + ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : ""); + + if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { + pr_err("hdr length = %d\n", + le32_to_cpu(ib_mac_rsp->hdr_len)); + pr_err("hdr addr = 0x%llx\n", + (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr)); + } +} +#endif + +#ifdef QL_ALL_DUMP +void ql_dump_all(struct ql_adapter *qdev) +{ + int i; + + QL_DUMP_REGS(qdev); + QL_DUMP_QDEV(qdev); + for (i = 0; i < qdev->tx_ring_count; i++) { + QL_DUMP_TX_RING(&qdev->tx_ring[i]); + QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]); + } + for (i = 0; i < qdev->rx_ring_count; i++) { + QL_DUMP_RX_RING(&qdev->rx_ring[i]); + QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]); + } +} +#endif diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c new file mode 100644 index 000000000..c3c514e33 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c @@ -0,0 +1,735 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include "qlge.h" + +struct ql_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define QL_SIZEOF(m) FIELD_SIZEOF(struct ql_adapter, m) +#define QL_OFF(m) offsetof(struct ql_adapter, m) + +static const struct ql_stats ql_gstrings_stats[] = { + {"tx_pkts", QL_SIZEOF(nic_stats.tx_pkts), QL_OFF(nic_stats.tx_pkts)}, + {"tx_bytes", QL_SIZEOF(nic_stats.tx_bytes), QL_OFF(nic_stats.tx_bytes)}, + {"tx_mcast_pkts", QL_SIZEOF(nic_stats.tx_mcast_pkts), + QL_OFF(nic_stats.tx_mcast_pkts)}, + {"tx_bcast_pkts", QL_SIZEOF(nic_stats.tx_bcast_pkts), + QL_OFF(nic_stats.tx_bcast_pkts)}, + {"tx_ucast_pkts", QL_SIZEOF(nic_stats.tx_ucast_pkts), + QL_OFF(nic_stats.tx_ucast_pkts)}, + {"tx_ctl_pkts", QL_SIZEOF(nic_stats.tx_ctl_pkts), + QL_OFF(nic_stats.tx_ctl_pkts)}, + {"tx_pause_pkts", QL_SIZEOF(nic_stats.tx_pause_pkts), + QL_OFF(nic_stats.tx_pause_pkts)}, + {"tx_64_pkts", QL_SIZEOF(nic_stats.tx_64_pkt), + QL_OFF(nic_stats.tx_64_pkt)}, + {"tx_65_to_127_pkts", QL_SIZEOF(nic_stats.tx_65_to_127_pkt), + QL_OFF(nic_stats.tx_65_to_127_pkt)}, + {"tx_128_to_255_pkts", QL_SIZEOF(nic_stats.tx_128_to_255_pkt), + QL_OFF(nic_stats.tx_128_to_255_pkt)}, + {"tx_256_511_pkts", QL_SIZEOF(nic_stats.tx_256_511_pkt), + QL_OFF(nic_stats.tx_256_511_pkt)}, + {"tx_512_to_1023_pkts", QL_SIZEOF(nic_stats.tx_512_to_1023_pkt), + QL_OFF(nic_stats.tx_512_to_1023_pkt)}, + {"tx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.tx_1024_to_1518_pkt), + QL_OFF(nic_stats.tx_1024_to_1518_pkt)}, + {"tx_1519_to_max_pkts", QL_SIZEOF(nic_stats.tx_1519_to_max_pkt), + QL_OFF(nic_stats.tx_1519_to_max_pkt)}, + {"tx_undersize_pkts", QL_SIZEOF(nic_stats.tx_undersize_pkt), + QL_OFF(nic_stats.tx_undersize_pkt)}, + {"tx_oversize_pkts", QL_SIZEOF(nic_stats.tx_oversize_pkt), + QL_OFF(nic_stats.tx_oversize_pkt)}, + {"rx_bytes", QL_SIZEOF(nic_stats.rx_bytes), QL_OFF(nic_stats.rx_bytes)}, + {"rx_bytes_ok", QL_SIZEOF(nic_stats.rx_bytes_ok), + QL_OFF(nic_stats.rx_bytes_ok)}, + {"rx_pkts", QL_SIZEOF(nic_stats.rx_pkts), QL_OFF(nic_stats.rx_pkts)}, + {"rx_pkts_ok", QL_SIZEOF(nic_stats.rx_pkts_ok), + QL_OFF(nic_stats.rx_pkts_ok)}, + {"rx_bcast_pkts", QL_SIZEOF(nic_stats.rx_bcast_pkts), + QL_OFF(nic_stats.rx_bcast_pkts)}, + {"rx_mcast_pkts", QL_SIZEOF(nic_stats.rx_mcast_pkts), + QL_OFF(nic_stats.rx_mcast_pkts)}, + {"rx_ucast_pkts", QL_SIZEOF(nic_stats.rx_ucast_pkts), + QL_OFF(nic_stats.rx_ucast_pkts)}, + {"rx_undersize_pkts", QL_SIZEOF(nic_stats.rx_undersize_pkts), + QL_OFF(nic_stats.rx_undersize_pkts)}, + {"rx_oversize_pkts", QL_SIZEOF(nic_stats.rx_oversize_pkts), + QL_OFF(nic_stats.rx_oversize_pkts)}, + {"rx_jabber_pkts", QL_SIZEOF(nic_stats.rx_jabber_pkts), + QL_OFF(nic_stats.rx_jabber_pkts)}, + {"rx_undersize_fcerr_pkts", + QL_SIZEOF(nic_stats.rx_undersize_fcerr_pkts), + QL_OFF(nic_stats.rx_undersize_fcerr_pkts)}, + {"rx_drop_events", QL_SIZEOF(nic_stats.rx_drop_events), + QL_OFF(nic_stats.rx_drop_events)}, + {"rx_fcerr_pkts", QL_SIZEOF(nic_stats.rx_fcerr_pkts), + QL_OFF(nic_stats.rx_fcerr_pkts)}, + {"rx_align_err", QL_SIZEOF(nic_stats.rx_align_err), + QL_OFF(nic_stats.rx_align_err)}, + {"rx_symbol_err", QL_SIZEOF(nic_stats.rx_symbol_err), + QL_OFF(nic_stats.rx_symbol_err)}, + {"rx_mac_err", QL_SIZEOF(nic_stats.rx_mac_err), + QL_OFF(nic_stats.rx_mac_err)}, + {"rx_ctl_pkts", QL_SIZEOF(nic_stats.rx_ctl_pkts), + QL_OFF(nic_stats.rx_ctl_pkts)}, + {"rx_pause_pkts", QL_SIZEOF(nic_stats.rx_pause_pkts), + QL_OFF(nic_stats.rx_pause_pkts)}, + {"rx_64_pkts", QL_SIZEOF(nic_stats.rx_64_pkts), + QL_OFF(nic_stats.rx_64_pkts)}, + {"rx_65_to_127_pkts", QL_SIZEOF(nic_stats.rx_65_to_127_pkts), + QL_OFF(nic_stats.rx_65_to_127_pkts)}, + {"rx_128_255_pkts", QL_SIZEOF(nic_stats.rx_128_255_pkts), + QL_OFF(nic_stats.rx_128_255_pkts)}, + {"rx_256_511_pkts", QL_SIZEOF(nic_stats.rx_256_511_pkts), + QL_OFF(nic_stats.rx_256_511_pkts)}, + {"rx_512_to_1023_pkts", QL_SIZEOF(nic_stats.rx_512_to_1023_pkts), + QL_OFF(nic_stats.rx_512_to_1023_pkts)}, + {"rx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.rx_1024_to_1518_pkts), + QL_OFF(nic_stats.rx_1024_to_1518_pkts)}, + {"rx_1519_to_max_pkts", QL_SIZEOF(nic_stats.rx_1519_to_max_pkts), + QL_OFF(nic_stats.rx_1519_to_max_pkts)}, + {"rx_len_err_pkts", QL_SIZEOF(nic_stats.rx_len_err_pkts), + QL_OFF(nic_stats.rx_len_err_pkts)}, + {"rx_code_err", QL_SIZEOF(nic_stats.rx_code_err), + QL_OFF(nic_stats.rx_code_err)}, + {"rx_oversize_err", QL_SIZEOF(nic_stats.rx_oversize_err), + QL_OFF(nic_stats.rx_oversize_err)}, + {"rx_undersize_err", QL_SIZEOF(nic_stats.rx_undersize_err), + QL_OFF(nic_stats.rx_undersize_err)}, + {"rx_preamble_err", QL_SIZEOF(nic_stats.rx_preamble_err), + QL_OFF(nic_stats.rx_preamble_err)}, + {"rx_frame_len_err", QL_SIZEOF(nic_stats.rx_frame_len_err), + QL_OFF(nic_stats.rx_frame_len_err)}, + {"rx_crc_err", QL_SIZEOF(nic_stats.rx_crc_err), + QL_OFF(nic_stats.rx_crc_err)}, + {"rx_err_count", QL_SIZEOF(nic_stats.rx_err_count), + QL_OFF(nic_stats.rx_err_count)}, + {"tx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames0), + QL_OFF(nic_stats.tx_cbfc_pause_frames0)}, + {"tx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames1), + QL_OFF(nic_stats.tx_cbfc_pause_frames1)}, + {"tx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames2), + QL_OFF(nic_stats.tx_cbfc_pause_frames2)}, + {"tx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames3), + QL_OFF(nic_stats.tx_cbfc_pause_frames3)}, + {"tx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames4), + QL_OFF(nic_stats.tx_cbfc_pause_frames4)}, + {"tx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames5), + QL_OFF(nic_stats.tx_cbfc_pause_frames5)}, + {"tx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames6), + QL_OFF(nic_stats.tx_cbfc_pause_frames6)}, + {"tx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames7), + QL_OFF(nic_stats.tx_cbfc_pause_frames7)}, + {"rx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames0), + QL_OFF(nic_stats.rx_cbfc_pause_frames0)}, + {"rx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames1), + QL_OFF(nic_stats.rx_cbfc_pause_frames1)}, + {"rx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames2), + QL_OFF(nic_stats.rx_cbfc_pause_frames2)}, + {"rx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames3), + QL_OFF(nic_stats.rx_cbfc_pause_frames3)}, + {"rx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames4), + QL_OFF(nic_stats.rx_cbfc_pause_frames4)}, + {"rx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames5), + QL_OFF(nic_stats.rx_cbfc_pause_frames5)}, + {"rx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames6), + QL_OFF(nic_stats.rx_cbfc_pause_frames6)}, + {"rx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames7), + QL_OFF(nic_stats.rx_cbfc_pause_frames7)}, + {"rx_nic_fifo_drop", QL_SIZEOF(nic_stats.rx_nic_fifo_drop), + QL_OFF(nic_stats.rx_nic_fifo_drop)}, +}; + +static const char ql_gstrings_test[][ETH_GSTRING_LEN] = { + "Loopback test (offline)" +}; +#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) +#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats) +#define QLGE_RCV_MAC_ERR_STATS 7 + +static int ql_update_ring_coalescing(struct ql_adapter *qdev) +{ + int i, status = 0; + struct rx_ring *rx_ring; + struct cqicb *cqicb; + + if (!netif_running(qdev->ndev)) + return status; + + /* Skip the default queue, and update the outbound handler + * queues if they changed. + */ + cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count]; + if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs || + le16_to_cpu(cqicb->pkt_delay) != + qdev->tx_max_coalesced_frames) { + for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { + rx_ring = &qdev->rx_ring[i]; + cqicb = (struct cqicb *)rx_ring; + cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); + cqicb->pkt_delay = + cpu_to_le16(qdev->tx_max_coalesced_frames); + cqicb->flags = FLAGS_LI; + status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), + CFG_LCQ, rx_ring->cq_id); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to load CQICB.\n"); + goto exit; + } + } + } + + /* Update the inbound (RSS) handler queues if they changed. */ + cqicb = (struct cqicb *)&qdev->rx_ring[0]; + if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs || + le16_to_cpu(cqicb->pkt_delay) != + qdev->rx_max_coalesced_frames) { + for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { + rx_ring = &qdev->rx_ring[i]; + cqicb = (struct cqicb *)rx_ring; + cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); + cqicb->pkt_delay = + cpu_to_le16(qdev->rx_max_coalesced_frames); + cqicb->flags = FLAGS_LI; + status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), + CFG_LCQ, rx_ring->cq_id); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to load CQICB.\n"); + goto exit; + } + } + } +exit: + return status; +} + +static void ql_update_stats(struct ql_adapter *qdev) +{ + u32 i; + u64 data; + u64 *iter = &qdev->nic_stats.tx_pkts; + + spin_lock(&qdev->stats_lock); + if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { + netif_err(qdev, drv, qdev->ndev, + "Couldn't get xgmac sem.\n"); + goto quit; + } + /* + * Get TX statistics. + */ + for (i = 0x200; i < 0x280; i += 8) { + if (ql_read_xgmac_reg64(qdev, i, &data)) { + netif_err(qdev, drv, qdev->ndev, + "Error reading status register 0x%.04x.\n", + i); + goto end; + } else + *iter = data; + iter++; + } + + /* + * Get RX statistics. + */ + for (i = 0x300; i < 0x3d0; i += 8) { + if (ql_read_xgmac_reg64(qdev, i, &data)) { + netif_err(qdev, drv, qdev->ndev, + "Error reading status register 0x%.04x.\n", + i); + goto end; + } else + *iter = data; + iter++; + } + + /* Update receive mac error statistics */ + iter += QLGE_RCV_MAC_ERR_STATS; + + /* + * Get Per-priority TX pause frame counter statistics. + */ + for (i = 0x500; i < 0x540; i += 8) { + if (ql_read_xgmac_reg64(qdev, i, &data)) { + netif_err(qdev, drv, qdev->ndev, + "Error reading status register 0x%.04x.\n", + i); + goto end; + } else + *iter = data; + iter++; + } + + /* + * Get Per-priority RX pause frame counter statistics. + */ + for (i = 0x568; i < 0x5a8; i += 8) { + if (ql_read_xgmac_reg64(qdev, i, &data)) { + netif_err(qdev, drv, qdev->ndev, + "Error reading status register 0x%.04x.\n", + i); + goto end; + } else + *iter = data; + iter++; + } + + /* + * Get RX NIC FIFO DROP statistics. + */ + if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) { + netif_err(qdev, drv, qdev->ndev, + "Error reading status register 0x%.04x.\n", i); + goto end; + } else + *iter = data; +end: + ql_sem_unlock(qdev, qdev->xg_sem_mask); +quit: + spin_unlock(&qdev->stats_lock); + + QL_DUMP_STAT(qdev); +} + +static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + int index; + switch (stringset) { + case ETH_SS_TEST: + memcpy(buf, *ql_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (index = 0; index < QLGE_STATS_LEN; index++) { + memcpy(buf + index * ETH_GSTRING_LEN, + ql_gstrings_stats[index].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static int ql_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return QLGE_TEST_LEN; + case ETH_SS_STATS: + return QLGE_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void +ql_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + int index, length; + + length = QLGE_STATS_LEN; + ql_update_stats(qdev); + + for (index = 0; index < length; index++) { + char *p = (char *)qdev + + ql_gstrings_stats[index].stat_offset; + *data++ = (ql_gstrings_stats[index].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : (*(u32 *)p); + } +} + +static int ql_get_settings(struct net_device *ndev, + struct ethtool_cmd *ecmd) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->advertising = ADVERTISED_10000baseT_Full; + ecmd->transceiver = XCVR_EXTERNAL; + if ((qdev->link_status & STS_LINK_TYPE_MASK) == + STS_LINK_TYPE_10GBASET) { + ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); + ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); + ecmd->port = PORT_TP; + ecmd->autoneg = AUTONEG_ENABLE; + } else { + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + } + + ethtool_cmd_speed_set(ecmd, SPEED_10000); + ecmd->duplex = DUPLEX_FULL; + + return 0; +} + +static void ql_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *drvinfo) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, qlge_driver_version, + sizeof(drvinfo->version)); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "v%d.%d.%d", + (qdev->fw_rev_id & 0x00ff0000) >> 16, + (qdev->fw_rev_id & 0x0000ff00) >> 8, + (qdev->fw_rev_id & 0x000000ff)); + strlcpy(drvinfo->bus_info, pci_name(qdev->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = 0; + drvinfo->testinfo_len = 0; + if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) + drvinfo->regdump_len = sizeof(struct ql_mpi_coredump); + else + drvinfo->regdump_len = sizeof(struct ql_reg_dump); + drvinfo->eedump_len = 0; +} + +static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + unsigned short ssys_dev = qdev->pdev->subsystem_device; + + /* WOL is only supported for mezz card. */ + if (ssys_dev == QLGE_MEZZ_SSYS_ID_068 || + ssys_dev == QLGE_MEZZ_SSYS_ID_180) { + wol->supported = WAKE_MAGIC; + wol->wolopts = qdev->wol; + } +} + +static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + unsigned short ssys_dev = qdev->pdev->subsystem_device; + + /* WOL is only supported for mezz card. */ + if (ssys_dev != QLGE_MEZZ_SSYS_ID_068 && + ssys_dev != QLGE_MEZZ_SSYS_ID_180) { + netif_info(qdev, drv, qdev->ndev, + "WOL is only supported for mezz card\n"); + return -EOPNOTSUPP; + } + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + qdev->wol = wol->wolopts; + + netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol); + return 0; +} + +static int ql_set_phys_id(struct net_device *ndev, + enum ethtool_phys_id_state state) + +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + /* Save the current LED settings */ + if (ql_mb_get_led_cfg(qdev)) + return -EIO; + + /* Start blinking */ + ql_mb_set_led_cfg(qdev, QL_LED_BLINK); + return 0; + + case ETHTOOL_ID_INACTIVE: + /* Restore LED settings */ + if (ql_mb_set_led_cfg(qdev, qdev->led_config)) + return -EIO; + return 0; + + default: + return -EINVAL; + } +} + +static int ql_start_loopback(struct ql_adapter *qdev) +{ + if (netif_carrier_ok(qdev->ndev)) { + set_bit(QL_LB_LINK_UP, &qdev->flags); + netif_carrier_off(qdev->ndev); + } else + clear_bit(QL_LB_LINK_UP, &qdev->flags); + qdev->link_config |= CFG_LOOPBACK_PCS; + return ql_mb_set_port_cfg(qdev); +} + +static void ql_stop_loopback(struct ql_adapter *qdev) +{ + qdev->link_config &= ~CFG_LOOPBACK_PCS; + ql_mb_set_port_cfg(qdev); + if (test_bit(QL_LB_LINK_UP, &qdev->flags)) { + netif_carrier_on(qdev->ndev); + clear_bit(QL_LB_LINK_UP, &qdev->flags); + } +} + +static void ql_create_lb_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); + memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); +} + +void ql_check_lb_frame(struct ql_adapter *qdev, + struct sk_buff *skb) +{ + unsigned int frame_size = skb->len; + + if ((*(skb->data + 3) == 0xFF) && + (*(skb->data + frame_size / 2 + 10) == 0xBE) && + (*(skb->data + frame_size / 2 + 12) == 0xAF)) { + atomic_dec(&qdev->lb_count); + return; + } +} + +static int ql_run_loopback_test(struct ql_adapter *qdev) +{ + int i; + netdev_tx_t rc; + struct sk_buff *skb; + unsigned int size = SMALL_BUF_MAP_SIZE; + + for (i = 0; i < 64; i++) { + skb = netdev_alloc_skb(qdev->ndev, size); + if (!skb) + return -ENOMEM; + + skb->queue_mapping = 0; + skb_put(skb, size); + ql_create_lb_frame(skb, size); + rc = ql_lb_send(skb, qdev->ndev); + if (rc != NETDEV_TX_OK) + return -EPIPE; + atomic_inc(&qdev->lb_count); + } + /* Give queue time to settle before testing results. */ + msleep(2); + ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128); + return atomic_read(&qdev->lb_count) ? -EIO : 0; +} + +static int ql_loopback_test(struct ql_adapter *qdev, u64 *data) +{ + *data = ql_start_loopback(qdev); + if (*data) + goto out; + *data = ql_run_loopback_test(qdev); +out: + ql_stop_loopback(qdev); + return *data; +} + +static void ql_self_test(struct net_device *ndev, + struct ethtool_test *eth_test, u64 *data) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + memset(data, 0, sizeof(u64) * QLGE_TEST_LEN); + + if (netif_running(ndev)) { + set_bit(QL_SELFTEST, &qdev->flags); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + if (ql_loopback_test(qdev, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + } else { + /* Online tests */ + data[0] = 0; + } + clear_bit(QL_SELFTEST, &qdev->flags); + /* Give link time to come up after + * port configuration changes. + */ + msleep_interruptible(4 * 1000); + } else { + netif_err(qdev, drv, qdev->ndev, + "is down, Loopback test will fail.\n"); + eth_test->flags |= ETH_TEST_FL_FAILED; + } +} + +static int ql_get_regs_len(struct net_device *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) + return sizeof(struct ql_mpi_coredump); + else + return sizeof(struct ql_reg_dump); +} + +static void ql_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, void *p) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + ql_get_dump(qdev, p); + qdev->core_is_dumped = 0; + if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) + regs->len = sizeof(struct ql_mpi_coredump); + else + regs->len = sizeof(struct ql_reg_dump); +} + +static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + struct ql_adapter *qdev = netdev_priv(dev); + + c->rx_coalesce_usecs = qdev->rx_coalesce_usecs; + c->tx_coalesce_usecs = qdev->tx_coalesce_usecs; + + /* This chip coalesces as follows: + * If a packet arrives, hold off interrupts until + * cqicb->int_delay expires, but if no other packets arrive don't + * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a + * timer to coalesce on a frame basis. So, we have to take ethtool's + * max_coalesced_frames value and convert it to a delay in microseconds. + * We do this by using a basic thoughput of 1,000,000 frames per + * second @ (1024 bytes). This means one frame per usec. So it's a + * simple one to one ratio. + */ + c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames; + c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames; + + return 0; +} + +static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + /* Validate user parameters. */ + if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2) + return -EINVAL; + /* Don't wait more than 10 usec. */ + if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT) + return -EINVAL; + if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2) + return -EINVAL; + if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT) + return -EINVAL; + + /* Verify a change took place before updating the hardware. */ + if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs && + qdev->tx_coalesce_usecs == c->tx_coalesce_usecs && + qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames && + qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames) + return 0; + + qdev->rx_coalesce_usecs = c->rx_coalesce_usecs; + qdev->tx_coalesce_usecs = c->tx_coalesce_usecs; + qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames; + qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames; + + return ql_update_ring_coalescing(qdev); +} + +static void ql_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ql_adapter *qdev = netdev_priv(netdev); + + ql_mb_get_port_cfg(qdev); + if (qdev->link_config & CFG_PAUSE_STD) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int ql_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ql_adapter *qdev = netdev_priv(netdev); + int status = 0; + + if ((pause->rx_pause) && (pause->tx_pause)) + qdev->link_config |= CFG_PAUSE_STD; + else if (!pause->rx_pause && !pause->tx_pause) + qdev->link_config &= ~CFG_PAUSE_STD; + else + return -EINVAL; + + status = ql_mb_set_port_cfg(qdev); + return status; +} + +static u32 ql_get_msglevel(struct net_device *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + return qdev->msg_enable; +} + +static void ql_set_msglevel(struct net_device *ndev, u32 value) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + qdev->msg_enable = value; +} + +const struct ethtool_ops qlge_ethtool_ops = { + .get_settings = ql_get_settings, + .get_drvinfo = ql_get_drvinfo, + .get_wol = ql_get_wol, + .set_wol = ql_set_wol, + .get_regs_len = ql_get_regs_len, + .get_regs = ql_get_regs, + .get_msglevel = ql_get_msglevel, + .set_msglevel = ql_set_msglevel, + .get_link = ethtool_op_get_link, + .set_phys_id = ql_set_phys_id, + .self_test = ql_self_test, + .get_pauseparam = ql_get_pauseparam, + .set_pauseparam = ql_set_pauseparam, + .get_coalesce = ql_get_coalesce, + .set_coalesce = ql_set_coalesce, + .get_sset_count = ql_get_sset_count, + .get_strings = ql_get_strings, + .get_ethtool_stats = ql_get_ethtool_stats, +}; + diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c new file mode 100644 index 000000000..25800a1de --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -0,0 +1,5016 @@ +/* + * QLogic qlge NIC HBA Driver + * Copyright (c) 2003-2008 QLogic Corporation + * See LICENSE.qlge for copyright and licensing details. + * Author: Linux qlge network device driver by + * Ron Mercer + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qlge.h" + +char qlge_driver_name[] = DRV_NAME; +const char qlge_driver_version[] = DRV_VERSION; + +MODULE_AUTHOR("Ron Mercer "); +MODULE_DESCRIPTION(DRV_STRING " "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static const u32 default_msg = + NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | +/* NETIF_MSG_TIMER | */ + NETIF_MSG_IFDOWN | + NETIF_MSG_IFUP | + NETIF_MSG_RX_ERR | + NETIF_MSG_TX_ERR | +/* NETIF_MSG_TX_QUEUED | */ +/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */ +/* NETIF_MSG_PKTDATA | */ + NETIF_MSG_HW | NETIF_MSG_WOL | 0; + +static int debug = -1; /* defaults above */ +module_param(debug, int, 0664); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +#define MSIX_IRQ 0 +#define MSI_IRQ 1 +#define LEG_IRQ 2 +static int qlge_irq_type = MSIX_IRQ; +module_param(qlge_irq_type, int, 0664); +MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); + +static int qlge_mpi_coredump; +module_param(qlge_mpi_coredump, int, 0); +MODULE_PARM_DESC(qlge_mpi_coredump, + "Option to enable MPI firmware dump. " + "Default is OFF - Do Not allocate memory. "); + +static int qlge_force_coredump; +module_param(qlge_force_coredump, int, 0); +MODULE_PARM_DESC(qlge_force_coredump, + "Option to allow force of firmware core dump. " + "Default is OFF - Do not allow."); + +static const struct pci_device_id qlge_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)}, + {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)}, + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, qlge_pci_tbl); + +static int ql_wol(struct ql_adapter *); +static void qlge_set_multicast_list(struct net_device *); +static int ql_adapter_down(struct ql_adapter *); +static int ql_adapter_up(struct ql_adapter *); + +/* This hardware semaphore causes exclusive access to + * resources shared between the NIC driver, MPI firmware, + * FCOE firmware and the FC driver. + */ +static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask) +{ + u32 sem_bits = 0; + + switch (sem_mask) { + case SEM_XGMAC0_MASK: + sem_bits = SEM_SET << SEM_XGMAC0_SHIFT; + break; + case SEM_XGMAC1_MASK: + sem_bits = SEM_SET << SEM_XGMAC1_SHIFT; + break; + case SEM_ICB_MASK: + sem_bits = SEM_SET << SEM_ICB_SHIFT; + break; + case SEM_MAC_ADDR_MASK: + sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT; + break; + case SEM_FLASH_MASK: + sem_bits = SEM_SET << SEM_FLASH_SHIFT; + break; + case SEM_PROBE_MASK: + sem_bits = SEM_SET << SEM_PROBE_SHIFT; + break; + case SEM_RT_IDX_MASK: + sem_bits = SEM_SET << SEM_RT_IDX_SHIFT; + break; + case SEM_PROC_REG_MASK: + sem_bits = SEM_SET << SEM_PROC_REG_SHIFT; + break; + default: + netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n"); + return -EINVAL; + } + + ql_write32(qdev, SEM, sem_bits | sem_mask); + return !(ql_read32(qdev, SEM) & sem_bits); +} + +int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) +{ + unsigned int wait_count = 30; + do { + if (!ql_sem_trylock(qdev, sem_mask)) + return 0; + udelay(100); + } while (--wait_count); + return -ETIMEDOUT; +} + +void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask) +{ + ql_write32(qdev, SEM, sem_mask); + ql_read32(qdev, SEM); /* flush */ +} + +/* This function waits for a specific bit to come ready + * in a given register. It is used mostly by the initialize + * process, but is also used in kernel thread API such as + * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid. + */ +int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) +{ + u32 temp; + int count = UDELAY_COUNT; + + while (count) { + temp = ql_read32(qdev, reg); + + /* check for errors */ + if (temp & err_bit) { + netif_alert(qdev, probe, qdev->ndev, + "register 0x%.08x access error, value = 0x%.08x!.\n", + reg, temp); + return -EIO; + } else if (temp & bit) + return 0; + udelay(UDELAY_DELAY); + count--; + } + netif_alert(qdev, probe, qdev->ndev, + "Timed out waiting for reg %x to come ready.\n", reg); + return -ETIMEDOUT; +} + +/* The CFG register is used to download TX and RX control blocks + * to the chip. This function waits for an operation to complete. + */ +static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit) +{ + int count = UDELAY_COUNT; + u32 temp; + + while (count) { + temp = ql_read32(qdev, CFG); + if (temp & CFG_LE) + return -EIO; + if (!(temp & bit)) + return 0; + udelay(UDELAY_DELAY); + count--; + } + return -ETIMEDOUT; +} + + +/* Used to issue init control blocks to hw. Maps control block, + * sets address, triggers download, waits for completion. + */ +int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, + u16 q_id) +{ + u64 map; + int status = 0; + int direction; + u32 mask; + u32 value; + + direction = + (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE : + PCI_DMA_FROMDEVICE; + + map = pci_map_single(qdev->pdev, ptr, size, direction); + if (pci_dma_mapping_error(qdev->pdev, map)) { + netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n"); + return -ENOMEM; + } + + status = ql_sem_spinlock(qdev, SEM_ICB_MASK); + if (status) + return status; + + status = ql_wait_cfg(qdev, bit); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Timed out waiting for CFG to come ready.\n"); + goto exit; + } + + ql_write32(qdev, ICB_L, (u32) map); + ql_write32(qdev, ICB_H, (u32) (map >> 32)); + + mask = CFG_Q_MASK | (bit << 16); + value = bit | (q_id << CFG_Q_SHIFT); + ql_write32(qdev, CFG, (mask | value)); + + /* + * Wait for the bit to clear after signaling hw. + */ + status = ql_wait_cfg(qdev, bit); +exit: + ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */ + pci_unmap_single(qdev->pdev, map, size, direction); + return status; +} + +/* Get a specific MAC address from the CAM. Used for debug and reg dump. */ +int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, + u32 *value) +{ + u32 offset = 0; + int status; + + switch (type) { + case MAC_ADDR_TYPE_MULTI_MAC: + case MAC_ADDR_TYPE_CAM_MAC: + { + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MR, 0); + if (status) + goto exit; + *value++ = ql_read32(qdev, MAC_ADDR_DATA); + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MR, 0); + if (status) + goto exit; + *value++ = ql_read32(qdev, MAC_ADDR_DATA); + if (type == MAC_ADDR_TYPE_CAM_MAC) { + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ + status = + ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, + MAC_ADDR_MR, 0); + if (status) + goto exit; + *value++ = ql_read32(qdev, MAC_ADDR_DATA); + } + break; + } + case MAC_ADDR_TYPE_VLAN: + case MAC_ADDR_TYPE_MULTI_FLTR: + default: + netif_crit(qdev, ifup, qdev->ndev, + "Address type %d not yet supported.\n", type); + status = -EPERM; + } +exit: + return status; +} + +/* Set up a MAC, multicast or VLAN address for the + * inbound frame matching. + */ +static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, + u16 index) +{ + u32 offset = 0; + int status = 0; + + switch (type) { + case MAC_ADDR_TYPE_MULTI_MAC: + { + u32 upper = (addr[0] << 8) | addr[1]; + u32 lower = (addr[2] << 24) | (addr[3] << 16) | + (addr[4] << 8) | (addr[5]); + + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | + (index << MAC_ADDR_IDX_SHIFT) | + type | MAC_ADDR_E); + ql_write32(qdev, MAC_ADDR_DATA, lower); + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | + (index << MAC_ADDR_IDX_SHIFT) | + type | MAC_ADDR_E); + + ql_write32(qdev, MAC_ADDR_DATA, upper); + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + break; + } + case MAC_ADDR_TYPE_CAM_MAC: + { + u32 cam_output; + u32 upper = (addr[0] << 8) | addr[1]; + u32 lower = + (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | + (addr[5]); + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + type); /* type */ + ql_write32(qdev, MAC_ADDR_DATA, lower); + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + type); /* type */ + ql_write32(qdev, MAC_ADDR_DATA, upper); + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + type); /* type */ + /* This field should also include the queue id + and possibly the function id. Right now we hardcode + the route field to NIC core. + */ + cam_output = (CAM_OUT_ROUTE_NIC | + (qdev-> + func << CAM_OUT_FUNC_SHIFT) | + (0 << CAM_OUT_CQ_ID_SHIFT)); + if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) + cam_output |= CAM_OUT_RV; + /* route to NIC core */ + ql_write32(qdev, MAC_ADDR_DATA, cam_output); + break; + } + case MAC_ADDR_TYPE_VLAN: + { + u32 enable_bit = *((u32 *) &addr[0]); + /* For VLAN, the addr actually holds a bit that + * either enables or disables the vlan id we are + * addressing. It's either MAC_ADDR_E on or off. + * That's bit-27 we're talking about. + */ + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */ + (index << MAC_ADDR_IDX_SHIFT) | /* index */ + type | /* type */ + enable_bit); /* enable/disable */ + break; + } + case MAC_ADDR_TYPE_MULTI_FLTR: + default: + netif_crit(qdev, ifup, qdev->ndev, + "Address type %d not yet supported.\n", type); + status = -EPERM; + } +exit: + return status; +} + +/* Set or clear MAC address in hardware. We sometimes + * have to clear it to prevent wrong frame routing + * especially in a bonding environment. + */ +static int ql_set_mac_addr(struct ql_adapter *qdev, int set) +{ + int status; + char zero_mac_addr[ETH_ALEN]; + char *addr; + + if (set) { + addr = &qdev->current_mac_addr[0]; + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Set Mac addr %pM\n", addr); + } else { + eth_zero_addr(zero_mac_addr); + addr = &zero_mac_addr[0]; + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Clearing MAC address\n"); + } + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + return status; + status = ql_set_mac_addr_reg(qdev, (u8 *) addr, + MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + netif_err(qdev, ifup, qdev->ndev, + "Failed to init mac address.\n"); + return status; +} + +void ql_link_on(struct ql_adapter *qdev) +{ + netif_err(qdev, link, qdev->ndev, "Link is up.\n"); + netif_carrier_on(qdev->ndev); + ql_set_mac_addr(qdev, 1); +} + +void ql_link_off(struct ql_adapter *qdev) +{ + netif_err(qdev, link, qdev->ndev, "Link is down.\n"); + netif_carrier_off(qdev->ndev); + ql_set_mac_addr(qdev, 0); +} + +/* Get a specific frame routing value from the CAM. + * Used for debug and reg dump. + */ +int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value) +{ + int status = 0; + + status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); + if (status) + goto exit; + + ql_write32(qdev, RT_IDX, + RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT)); + status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0); + if (status) + goto exit; + *value = ql_read32(qdev, RT_DATA); +exit: + return status; +} + +/* The NIC function for this chip has 16 routing indexes. Each one can be used + * to route different frame types to various inbound queues. We send broadcast/ + * multicast/error frames to the default queue for slow handling, + * and CAM hit/RSS frames to the fast handling queues. + */ +static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, + int enable) +{ + int status = -EINVAL; /* Return error if no mask match. */ + u32 value = 0; + + switch (mask) { + case RT_IDX_CAM_HIT: + { + value = RT_IDX_DST_CAM_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case RT_IDX_VALID: /* Promiscuous Mode frames. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_IP_CSUM_ERR_SLOT << + RT_IDX_IDX_SHIFT); /* index */ + break; + } + case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_TCP_UDP_CSUM_ERR_SLOT << + RT_IDX_IDX_SHIFT); /* index */ + break; + } + case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case RT_IDX_MCAST: /* Pass up All Multicast frames. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */ + { + value = RT_IDX_DST_RSS | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ + break; + } + case 0: /* Clear the E-bit on an entry. */ + { + value = RT_IDX_DST_DFLT_Q | /* dest */ + RT_IDX_TYPE_NICQ | /* type */ + (index << RT_IDX_IDX_SHIFT);/* index */ + break; + } + default: + netif_err(qdev, ifup, qdev->ndev, + "Mask type %d not yet supported.\n", mask); + status = -EPERM; + goto exit; + } + + if (value) { + status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); + if (status) + goto exit; + value |= (enable ? RT_IDX_E : 0); + ql_write32(qdev, RT_IDX, value); + ql_write32(qdev, RT_DATA, enable ? mask : 0); + } +exit: + return status; +} + +static void ql_enable_interrupts(struct ql_adapter *qdev) +{ + ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI); +} + +static void ql_disable_interrupts(struct ql_adapter *qdev) +{ + ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16)); +} + +/* If we're running with multiple MSI-X vectors then we enable on the fly. + * Otherwise, we may have multiple outstanding workers and don't want to + * enable until the last one finishes. In this case, the irq_cnt gets + * incremented every time we queue a worker and decremented every time + * a worker finishes. Once it hits zero we enable the interrupt. + */ +u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) +{ + u32 var = 0; + unsigned long hw_flags = 0; + struct intr_context *ctx = qdev->intr_context + intr; + + if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) { + /* Always enable if we're MSIX multi interrupts and + * it's not the default (zeroeth) interrupt. + */ + ql_write32(qdev, INTR_EN, + ctx->intr_en_mask); + var = ql_read32(qdev, STS); + return var; + } + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if (atomic_dec_and_test(&ctx->irq_cnt)) { + ql_write32(qdev, INTR_EN, + ctx->intr_en_mask); + var = ql_read32(qdev, STS); + } + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return var; +} + +static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) +{ + u32 var = 0; + struct intr_context *ctx; + + /* HW disables for us if we're MSIX multi interrupts and + * it's not the default (zeroeth) interrupt. + */ + if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) + return 0; + + ctx = qdev->intr_context + intr; + spin_lock(&qdev->hw_lock); + if (!atomic_read(&ctx->irq_cnt)) { + ql_write32(qdev, INTR_EN, + ctx->intr_dis_mask); + var = ql_read32(qdev, STS); + } + atomic_inc(&ctx->irq_cnt); + spin_unlock(&qdev->hw_lock); + return var; +} + +static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev) +{ + int i; + for (i = 0; i < qdev->intr_count; i++) { + /* The enable call does a atomic_dec_and_test + * and enables only if the result is zero. + * So we precharge it here. + */ + if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) || + i == 0)) + atomic_set(&qdev->intr_context[i].irq_cnt, 1); + ql_enable_completion_interrupt(qdev, i); + } + +} + +static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str) +{ + int status, i; + u16 csum = 0; + __le16 *flash = (__le16 *)&qdev->flash; + + status = strncmp((char *)&qdev->flash, str, 4); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n"); + return status; + } + + for (i = 0; i < size; i++) + csum += le16_to_cpu(*flash++); + + if (csum) + netif_err(qdev, ifup, qdev->ndev, + "Invalid flash checksum, csum = 0x%.04x.\n", csum); + + return csum; +} + +static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data) +{ + int status = 0; + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, + FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); + if (status) + goto exit; + /* set up for reg read */ + ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset); + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, + FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); + if (status) + goto exit; + /* This data is stored on flash as an array of + * __le32. Since ql_read32() returns cpu endian + * we need to swap it back. + */ + *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA)); +exit: + return status; +} + +static int ql_get_8000_flash_params(struct ql_adapter *qdev) +{ + u32 i, size; + int status; + __le32 *p = (__le32 *)&qdev->flash; + u32 offset; + u8 mac_addr[6]; + + /* Get flash offset for function and adjust + * for dword access. + */ + if (!qdev->port) + offset = FUNC0_FLASH_OFFSET / sizeof(u32); + else + offset = FUNC1_FLASH_OFFSET / sizeof(u32); + + if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) + return -ETIMEDOUT; + + size = sizeof(struct flash_params_8000) / sizeof(u32); + for (i = 0; i < size; i++, p++) { + status = ql_read_flash_word(qdev, i+offset, p); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Error reading flash.\n"); + goto exit; + } + } + + status = ql_validate_flash(qdev, + sizeof(struct flash_params_8000) / sizeof(u16), + "8000"); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); + status = -EINVAL; + goto exit; + } + + /* Extract either manufacturer or BOFM modified + * MAC address. + */ + if (qdev->flash.flash_params_8000.data_type1 == 2) + memcpy(mac_addr, + qdev->flash.flash_params_8000.mac_addr1, + qdev->ndev->addr_len); + else + memcpy(mac_addr, + qdev->flash.flash_params_8000.mac_addr, + qdev->ndev->addr_len); + + if (!is_valid_ether_addr(mac_addr)) { + netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n"); + status = -EINVAL; + goto exit; + } + + memcpy(qdev->ndev->dev_addr, + mac_addr, + qdev->ndev->addr_len); + +exit: + ql_sem_unlock(qdev, SEM_FLASH_MASK); + return status; +} + +static int ql_get_8012_flash_params(struct ql_adapter *qdev) +{ + int i; + int status; + __le32 *p = (__le32 *)&qdev->flash; + u32 offset = 0; + u32 size = sizeof(struct flash_params_8012) / sizeof(u32); + + /* Second function's parameters follow the first + * function's. + */ + if (qdev->port) + offset = size; + + if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) + return -ETIMEDOUT; + + for (i = 0; i < size; i++, p++) { + status = ql_read_flash_word(qdev, i+offset, p); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Error reading flash.\n"); + goto exit; + } + + } + + status = ql_validate_flash(qdev, + sizeof(struct flash_params_8012) / sizeof(u16), + "8012"); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); + status = -EINVAL; + goto exit; + } + + if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) { + status = -EINVAL; + goto exit; + } + + memcpy(qdev->ndev->dev_addr, + qdev->flash.flash_params_8012.mac_addr, + qdev->ndev->addr_len); + +exit: + ql_sem_unlock(qdev, SEM_FLASH_MASK); + return status; +} + +/* xgmac register are located behind the xgmac_addr and xgmac_data + * register pair. Each read/write requires us to wait for the ready + * bit before reading/writing the data. + */ +static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data) +{ + int status; + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, + XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); + if (status) + return status; + /* write the data to the data reg */ + ql_write32(qdev, XGMAC_DATA, data); + /* trigger the write */ + ql_write32(qdev, XGMAC_ADDR, reg); + return status; +} + +/* xgmac register are located behind the xgmac_addr and xgmac_data + * register pair. Each read/write requires us to wait for the ready + * bit before reading/writing the data. + */ +int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data) +{ + int status = 0; + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, + XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); + if (status) + goto exit; + /* set up for reg read */ + ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R); + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, + XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); + if (status) + goto exit; + /* get the data */ + *data = ql_read32(qdev, XGMAC_DATA); +exit: + return status; +} + +/* This is used for reading the 64-bit statistics regs. */ +int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data) +{ + int status = 0; + u32 hi = 0; + u32 lo = 0; + + status = ql_read_xgmac_reg(qdev, reg, &lo); + if (status) + goto exit; + + status = ql_read_xgmac_reg(qdev, reg + 4, &hi); + if (status) + goto exit; + + *data = (u64) lo | ((u64) hi << 32); + +exit: + return status; +} + +static int ql_8000_port_initialize(struct ql_adapter *qdev) +{ + int status; + /* + * Get MPI firmware version for driver banner + * and ethool info. + */ + status = ql_mb_about_fw(qdev); + if (status) + goto exit; + status = ql_mb_get_fw_state(qdev); + if (status) + goto exit; + /* Wake up a worker to get/set the TX/RX frame sizes. */ + queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0); +exit: + return status; +} + +/* Take the MAC Core out of reset. + * Enable statistics counting. + * Take the transmitter/receiver out of reset. + * This functionality may be done in the MPI firmware at a + * later date. + */ +static int ql_8012_port_initialize(struct ql_adapter *qdev) +{ + int status = 0; + u32 data; + + if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) { + /* Another function has the semaphore, so + * wait for the port init bit to come ready. + */ + netif_info(qdev, link, qdev->ndev, + "Another function has the semaphore, so wait for the port init bit to come ready.\n"); + status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0); + if (status) { + netif_crit(qdev, link, qdev->ndev, + "Port initialize timed out.\n"); + } + return status; + } + + netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n"); + /* Set the core reset. */ + status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); + if (status) + goto end; + data |= GLOBAL_CFG_RESET; + status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); + if (status) + goto end; + + /* Clear the core reset and turn on jumbo for receiver. */ + data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */ + data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */ + data |= GLOBAL_CFG_TX_STAT_EN; + data |= GLOBAL_CFG_RX_STAT_EN; + status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); + if (status) + goto end; + + /* Enable transmitter, and clear it's reset. */ + status = ql_read_xgmac_reg(qdev, TX_CFG, &data); + if (status) + goto end; + data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */ + data |= TX_CFG_EN; /* Enable the transmitter. */ + status = ql_write_xgmac_reg(qdev, TX_CFG, data); + if (status) + goto end; + + /* Enable receiver and clear it's reset. */ + status = ql_read_xgmac_reg(qdev, RX_CFG, &data); + if (status) + goto end; + data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */ + data |= RX_CFG_EN; /* Enable the receiver. */ + status = ql_write_xgmac_reg(qdev, RX_CFG, data); + if (status) + goto end; + + /* Turn on jumbo. */ + status = + ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16)); + if (status) + goto end; + status = + ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580); + if (status) + goto end; + + /* Signal to the world that the port is enabled. */ + ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init)); +end: + ql_sem_unlock(qdev, qdev->xg_sem_mask); + return status; +} + +static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev) +{ + return PAGE_SIZE << qdev->lbq_buf_order; +} + +/* Get the next large buffer. */ +static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) +{ + struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; + rx_ring->lbq_curr_idx++; + if (rx_ring->lbq_curr_idx == rx_ring->lbq_len) + rx_ring->lbq_curr_idx = 0; + rx_ring->lbq_free_cnt++; + return lbq_desc; +} + +static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, + struct rx_ring *rx_ring) +{ + struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); + + pci_dma_sync_single_for_cpu(qdev->pdev, + dma_unmap_addr(lbq_desc, mapaddr), + rx_ring->lbq_buf_size, + PCI_DMA_FROMDEVICE); + + /* If it's the last chunk of our master page then + * we unmap it. + */ + if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size) + == ql_lbq_block_size(qdev)) + pci_unmap_page(qdev->pdev, + lbq_desc->p.pg_chunk.map, + ql_lbq_block_size(qdev), + PCI_DMA_FROMDEVICE); + return lbq_desc; +} + +/* Get the next small buffer. */ +static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) +{ + struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx]; + rx_ring->sbq_curr_idx++; + if (rx_ring->sbq_curr_idx == rx_ring->sbq_len) + rx_ring->sbq_curr_idx = 0; + rx_ring->sbq_free_cnt++; + return sbq_desc; +} + +/* Update an rx ring index. */ +static void ql_update_cq(struct rx_ring *rx_ring) +{ + rx_ring->cnsmr_idx++; + rx_ring->curr_entry++; + if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) { + rx_ring->cnsmr_idx = 0; + rx_ring->curr_entry = rx_ring->cq_base; + } +} + +static void ql_write_cq_idx(struct rx_ring *rx_ring) +{ + ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); +} + +static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, + struct bq_desc *lbq_desc) +{ + if (!rx_ring->pg_chunk.page) { + u64 map; + rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP | + GFP_ATOMIC, + qdev->lbq_buf_order); + if (unlikely(!rx_ring->pg_chunk.page)) { + netif_err(qdev, drv, qdev->ndev, + "page allocation failed.\n"); + return -ENOMEM; + } + rx_ring->pg_chunk.offset = 0; + map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page, + 0, ql_lbq_block_size(qdev), + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(qdev->pdev, map)) { + __free_pages(rx_ring->pg_chunk.page, + qdev->lbq_buf_order); + rx_ring->pg_chunk.page = NULL; + netif_err(qdev, drv, qdev->ndev, + "PCI mapping failed.\n"); + return -ENOMEM; + } + rx_ring->pg_chunk.map = map; + rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page); + } + + /* Copy the current master pg_chunk info + * to the current descriptor. + */ + lbq_desc->p.pg_chunk = rx_ring->pg_chunk; + + /* Adjust the master page chunk for next + * buffer get. + */ + rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size; + if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) { + rx_ring->pg_chunk.page = NULL; + lbq_desc->p.pg_chunk.last_flag = 1; + } else { + rx_ring->pg_chunk.va += rx_ring->lbq_buf_size; + get_page(rx_ring->pg_chunk.page); + lbq_desc->p.pg_chunk.last_flag = 0; + } + return 0; +} +/* Process (refill) a large buffer queue. */ +static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) +{ + u32 clean_idx = rx_ring->lbq_clean_idx; + u32 start_idx = clean_idx; + struct bq_desc *lbq_desc; + u64 map; + int i; + + while (rx_ring->lbq_free_cnt > 32) { + for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "lbq: try cleaning clean_idx = %d.\n", + clean_idx); + lbq_desc = &rx_ring->lbq[clean_idx]; + if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { + rx_ring->lbq_clean_idx = clean_idx; + netif_err(qdev, ifup, qdev->ndev, + "Could not get a page chunk, i=%d, clean_idx =%d .\n", + i, clean_idx); + return; + } + + map = lbq_desc->p.pg_chunk.map + + lbq_desc->p.pg_chunk.offset; + dma_unmap_addr_set(lbq_desc, mapaddr, map); + dma_unmap_len_set(lbq_desc, maplen, + rx_ring->lbq_buf_size); + *lbq_desc->addr = cpu_to_le64(map); + + pci_dma_sync_single_for_device(qdev->pdev, map, + rx_ring->lbq_buf_size, + PCI_DMA_FROMDEVICE); + clean_idx++; + if (clean_idx == rx_ring->lbq_len) + clean_idx = 0; + } + + rx_ring->lbq_clean_idx = clean_idx; + rx_ring->lbq_prod_idx += 16; + if (rx_ring->lbq_prod_idx == rx_ring->lbq_len) + rx_ring->lbq_prod_idx = 0; + rx_ring->lbq_free_cnt -= 16; + } + + if (start_idx != clean_idx) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "lbq: updating prod idx = %d.\n", + rx_ring->lbq_prod_idx); + ql_write_db_reg(rx_ring->lbq_prod_idx, + rx_ring->lbq_prod_idx_db_reg); + } +} + +/* Process (refill) a small buffer queue. */ +static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) +{ + u32 clean_idx = rx_ring->sbq_clean_idx; + u32 start_idx = clean_idx; + struct bq_desc *sbq_desc; + u64 map; + int i; + + while (rx_ring->sbq_free_cnt > 16) { + for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) { + sbq_desc = &rx_ring->sbq[clean_idx]; + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "sbq: try cleaning clean_idx = %d.\n", + clean_idx); + if (sbq_desc->p.skb == NULL) { + netif_printk(qdev, rx_status, KERN_DEBUG, + qdev->ndev, + "sbq: getting new skb for index %d.\n", + sbq_desc->index); + sbq_desc->p.skb = + netdev_alloc_skb(qdev->ndev, + SMALL_BUFFER_SIZE); + if (sbq_desc->p.skb == NULL) { + rx_ring->sbq_clean_idx = clean_idx; + return; + } + skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD); + map = pci_map_single(qdev->pdev, + sbq_desc->p.skb->data, + rx_ring->sbq_buf_size, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(qdev->pdev, map)) { + netif_err(qdev, ifup, qdev->ndev, + "PCI mapping failed.\n"); + rx_ring->sbq_clean_idx = clean_idx; + dev_kfree_skb_any(sbq_desc->p.skb); + sbq_desc->p.skb = NULL; + return; + } + dma_unmap_addr_set(sbq_desc, mapaddr, map); + dma_unmap_len_set(sbq_desc, maplen, + rx_ring->sbq_buf_size); + *sbq_desc->addr = cpu_to_le64(map); + } + + clean_idx++; + if (clean_idx == rx_ring->sbq_len) + clean_idx = 0; + } + rx_ring->sbq_clean_idx = clean_idx; + rx_ring->sbq_prod_idx += 16; + if (rx_ring->sbq_prod_idx == rx_ring->sbq_len) + rx_ring->sbq_prod_idx = 0; + rx_ring->sbq_free_cnt -= 16; + } + + if (start_idx != clean_idx) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "sbq: updating prod idx = %d.\n", + rx_ring->sbq_prod_idx); + ql_write_db_reg(rx_ring->sbq_prod_idx, + rx_ring->sbq_prod_idx_db_reg); + } +} + +static void ql_update_buffer_queues(struct ql_adapter *qdev, + struct rx_ring *rx_ring) +{ + ql_update_sbq(qdev, rx_ring); + ql_update_lbq(qdev, rx_ring); +} + +/* Unmaps tx buffers. Can be called from send() if a pci mapping + * fails at some stage, or from the interrupt when a tx completes. + */ +static void ql_unmap_send(struct ql_adapter *qdev, + struct tx_ring_desc *tx_ring_desc, int mapped) +{ + int i; + for (i = 0; i < mapped; i++) { + if (i == 0 || (i == 7 && mapped > 7)) { + /* + * Unmap the skb->data area, or the + * external sglist (AKA the Outbound + * Address List (OAL)). + * If its the zeroeth element, then it's + * the skb->data area. If it's the 7th + * element and there is more than 6 frags, + * then its an OAL. + */ + if (i == 7) { + netif_printk(qdev, tx_done, KERN_DEBUG, + qdev->ndev, + "unmapping OAL area.\n"); + } + pci_unmap_single(qdev->pdev, + dma_unmap_addr(&tx_ring_desc->map[i], + mapaddr), + dma_unmap_len(&tx_ring_desc->map[i], + maplen), + PCI_DMA_TODEVICE); + } else { + netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, + "unmapping frag %d.\n", i); + pci_unmap_page(qdev->pdev, + dma_unmap_addr(&tx_ring_desc->map[i], + mapaddr), + dma_unmap_len(&tx_ring_desc->map[i], + maplen), PCI_DMA_TODEVICE); + } + } + +} + +/* Map the buffers for this transmit. This will return + * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. + */ +static int ql_map_send(struct ql_adapter *qdev, + struct ob_mac_iocb_req *mac_iocb_ptr, + struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc) +{ + int len = skb_headlen(skb); + dma_addr_t map; + int frag_idx, err, map_idx = 0; + struct tx_buf_desc *tbd = mac_iocb_ptr->tbd; + int frag_cnt = skb_shinfo(skb)->nr_frags; + + if (frag_cnt) { + netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, + "frag_cnt = %d.\n", frag_cnt); + } + /* + * Map the skb buffer first. + */ + map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); + + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netif_err(qdev, tx_queued, qdev->ndev, + "PCI mapping failed with error: %d\n", err); + + return NETDEV_TX_BUSY; + } + + tbd->len = cpu_to_le32(len); + tbd->addr = cpu_to_le64(map); + dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); + dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len); + map_idx++; + + /* + * This loop fills the remainder of the 8 address descriptors + * in the IOCB. If there are more than 7 fragments, then the + * eighth address desc will point to an external list (OAL). + * When this happens, the remainder of the frags will be stored + * in this list. + */ + for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx]; + tbd++; + if (frag_idx == 6 && frag_cnt > 7) { + /* Let's tack on an sglist. + * Our control block will now + * look like this: + * iocb->seg[0] = skb->data + * iocb->seg[1] = frag[0] + * iocb->seg[2] = frag[1] + * iocb->seg[3] = frag[2] + * iocb->seg[4] = frag[3] + * iocb->seg[5] = frag[4] + * iocb->seg[6] = frag[5] + * iocb->seg[7] = ptr to OAL (external sglist) + * oal->seg[0] = frag[6] + * oal->seg[1] = frag[7] + * oal->seg[2] = frag[8] + * oal->seg[3] = frag[9] + * oal->seg[4] = frag[10] + * etc... + */ + /* Tack on the OAL in the eighth segment of IOCB. */ + map = pci_map_single(qdev->pdev, &tx_ring_desc->oal, + sizeof(struct oal), + PCI_DMA_TODEVICE); + err = pci_dma_mapping_error(qdev->pdev, map); + if (err) { + netif_err(qdev, tx_queued, qdev->ndev, + "PCI mapping outbound address list with error: %d\n", + err); + goto map_error; + } + + tbd->addr = cpu_to_le64(map); + /* + * The length is the number of fragments + * that remain to be mapped times the length + * of our sglist (OAL). + */ + tbd->len = + cpu_to_le32((sizeof(struct tx_buf_desc) * + (frag_cnt - frag_idx)) | TX_DESC_C); + dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, + map); + dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, + sizeof(struct oal)); + tbd = (struct tx_buf_desc *)&tx_ring_desc->oal; + map_idx++; + } + + map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); + + err = dma_mapping_error(&qdev->pdev->dev, map); + if (err) { + netif_err(qdev, tx_queued, qdev->ndev, + "PCI mapping frags failed with error: %d.\n", + err); + goto map_error; + } + + tbd->addr = cpu_to_le64(map); + tbd->len = cpu_to_le32(skb_frag_size(frag)); + dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); + dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, + skb_frag_size(frag)); + + } + /* Save the number of segments we've mapped. */ + tx_ring_desc->map_cnt = map_idx; + /* Terminate the last segment. */ + tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E); + return NETDEV_TX_OK; + +map_error: + /* + * If the first frag mapping failed, then i will be zero. + * This causes the unmap of the skb->data area. Otherwise + * we pass in the number of frags that mapped successfully + * so they can be umapped. + */ + ql_unmap_send(qdev, tx_ring_desc, map_idx); + return NETDEV_TX_BUSY; +} + +/* Categorizing receive firmware frame errors */ +static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err, + struct rx_ring *rx_ring) +{ + struct nic_stats *stats = &qdev->nic_stats; + + stats->rx_err_count++; + rx_ring->rx_errors++; + + switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) { + case IB_MAC_IOCB_RSP_ERR_CODE_ERR: + stats->rx_code_err++; + break; + case IB_MAC_IOCB_RSP_ERR_OVERSIZE: + stats->rx_oversize_err++; + break; + case IB_MAC_IOCB_RSP_ERR_UNDERSIZE: + stats->rx_undersize_err++; + break; + case IB_MAC_IOCB_RSP_ERR_PREAMBLE: + stats->rx_preamble_err++; + break; + case IB_MAC_IOCB_RSP_ERR_FRAME_LEN: + stats->rx_frame_len_err++; + break; + case IB_MAC_IOCB_RSP_ERR_CRC: + stats->rx_crc_err++; + default: + break; + } +} + +/** + * ql_update_mac_hdr_len - helper routine to update the mac header length + * based on vlan tags if present + */ +static void ql_update_mac_hdr_len(struct ql_adapter *qdev, + struct ib_mac_iocb_rsp *ib_mac_rsp, + void *page, size_t *len) +{ + u16 *tags; + + if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) + return; + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) { + tags = (u16 *)page; + /* Look for stacked vlan tags in ethertype field */ + if (tags[6] == ETH_P_8021Q && + tags[8] == ETH_P_8021Q) + *len += 2 * VLAN_HLEN; + else + *len += VLAN_HLEN; + } +} + +/* Process an inbound completion from an rx ring. */ +static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, + struct rx_ring *rx_ring, + struct ib_mac_iocb_rsp *ib_mac_rsp, + u32 length, + u16 vlan_id) +{ + struct sk_buff *skb; + struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); + struct napi_struct *napi = &rx_ring->napi; + + /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); + put_page(lbq_desc->p.pg_chunk.page); + return; + } + napi->dev = qdev->ndev; + + skb = napi_get_frags(napi); + if (!skb) { + netif_err(qdev, drv, qdev->ndev, + "Couldn't get an skb, exiting.\n"); + rx_ring->rx_dropped++; + put_page(lbq_desc->p.pg_chunk.page); + return; + } + prefetch(lbq_desc->p.pg_chunk.va); + __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + lbq_desc->p.pg_chunk.page, + lbq_desc->p.pg_chunk.offset, + length); + + skb->len += length; + skb->data_len += length; + skb->truesize += length; + skb_shinfo(skb)->nr_frags++; + + rx_ring->rx_packets++; + rx_ring->rx_bytes += length; + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_record_rx_queue(skb, rx_ring->cq_id); + if (vlan_id != 0xffff) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id); + napi_gro_frags(napi); +} + +/* Process an inbound completion from an rx ring. */ +static void ql_process_mac_rx_page(struct ql_adapter *qdev, + struct rx_ring *rx_ring, + struct ib_mac_iocb_rsp *ib_mac_rsp, + u32 length, + u16 vlan_id) +{ + struct net_device *ndev = qdev->ndev; + struct sk_buff *skb = NULL; + void *addr; + struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); + struct napi_struct *napi = &rx_ring->napi; + size_t hlen = ETH_HLEN; + + skb = netdev_alloc_skb(ndev, length); + if (!skb) { + rx_ring->rx_dropped++; + put_page(lbq_desc->p.pg_chunk.page); + return; + } + + addr = lbq_desc->p.pg_chunk.va; + prefetch(addr); + + /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); + goto err_out; + } + + /* Update the MAC header length*/ + ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen); + + /* The max framesize filter on this chip is set higher than + * MTU since FCoE uses 2k frames. + */ + if (skb->len > ndev->mtu + hlen) { + netif_err(qdev, drv, qdev->ndev, + "Segment too small, dropping.\n"); + rx_ring->rx_dropped++; + goto err_out; + } + memcpy(skb_put(skb, hlen), addr, hlen); + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", + length); + skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, + lbq_desc->p.pg_chunk.offset + hlen, + length - hlen); + skb->len += length - hlen; + skb->data_len += length - hlen; + skb->truesize += length - hlen; + + rx_ring->rx_packets++; + rx_ring->rx_bytes += skb->len; + skb->protocol = eth_type_trans(skb, ndev); + skb_checksum_none_assert(skb); + + if ((ndev->features & NETIF_F_RXCSUM) && + !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { + /* TCP frame. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "TCP checksum done!\n"); + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && + (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { + /* Unfragmented ipv4 UDP frame. */ + struct iphdr *iph = + (struct iphdr *)((u8 *)addr + hlen); + if (!(iph->frag_off & + htons(IP_MF|IP_OFFSET))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + netif_printk(qdev, rx_status, KERN_DEBUG, + qdev->ndev, + "UDP checksum done!\n"); + } + } + } + + skb_record_rx_queue(skb, rx_ring->cq_id); + if (vlan_id != 0xffff) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id); + if (skb->ip_summed == CHECKSUM_UNNECESSARY) + napi_gro_receive(napi, skb); + else + netif_receive_skb(skb); + return; +err_out: + dev_kfree_skb_any(skb); + put_page(lbq_desc->p.pg_chunk.page); +} + +/* Process an inbound completion from an rx ring. */ +static void ql_process_mac_rx_skb(struct ql_adapter *qdev, + struct rx_ring *rx_ring, + struct ib_mac_iocb_rsp *ib_mac_rsp, + u32 length, + u16 vlan_id) +{ + struct net_device *ndev = qdev->ndev; + struct sk_buff *skb = NULL; + struct sk_buff *new_skb = NULL; + struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring); + + skb = sbq_desc->p.skb; + /* Allocate new_skb and copy */ + new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN); + if (new_skb == NULL) { + rx_ring->rx_dropped++; + return; + } + skb_reserve(new_skb, NET_IP_ALIGN); + memcpy(skb_put(new_skb, length), skb->data, length); + skb = new_skb; + + /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); + dev_kfree_skb_any(skb); + return; + } + + /* loopback self test for ethtool */ + if (test_bit(QL_SELFTEST, &qdev->flags)) { + ql_check_lb_frame(qdev, skb); + dev_kfree_skb_any(skb); + return; + } + + /* The max framesize filter on this chip is set higher than + * MTU since FCoE uses 2k frames. + */ + if (skb->len > ndev->mtu + ETH_HLEN) { + dev_kfree_skb_any(skb); + rx_ring->rx_dropped++; + return; + } + + prefetch(skb->data); + if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "%s Multicast.\n", + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_HASH ? "Hash" : + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_REG ? "Registered" : + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); + } + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Promiscuous Packet.\n"); + + rx_ring->rx_packets++; + rx_ring->rx_bytes += skb->len; + skb->protocol = eth_type_trans(skb, ndev); + skb_checksum_none_assert(skb); + + /* If rx checksum is on, and there are no + * csum or frame errors. + */ + if ((ndev->features & NETIF_F_RXCSUM) && + !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { + /* TCP frame. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "TCP checksum done!\n"); + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && + (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { + /* Unfragmented ipv4 UDP frame. */ + struct iphdr *iph = (struct iphdr *) skb->data; + if (!(iph->frag_off & + htons(IP_MF|IP_OFFSET))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + netif_printk(qdev, rx_status, KERN_DEBUG, + qdev->ndev, + "UDP checksum done!\n"); + } + } + } + + skb_record_rx_queue(skb, rx_ring->cq_id); + if (vlan_id != 0xffff) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id); + if (skb->ip_summed == CHECKSUM_UNNECESSARY) + napi_gro_receive(&rx_ring->napi, skb); + else + netif_receive_skb(skb); +} + +static void ql_realign_skb(struct sk_buff *skb, int len) +{ + void *temp_addr = skb->data; + + /* Undo the skb_reserve(skb,32) we did before + * giving to hardware, and realign data on + * a 2-byte boundary. + */ + skb->data -= QLGE_SB_PAD - NET_IP_ALIGN; + skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN; + skb_copy_to_linear_data(skb, temp_addr, + (unsigned int)len); +} + +/* + * This function builds an skb for the given inbound + * completion. It will be rewritten for readability in the near + * future, but for not it works well. + */ +static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, + struct rx_ring *rx_ring, + struct ib_mac_iocb_rsp *ib_mac_rsp) +{ + struct bq_desc *lbq_desc; + struct bq_desc *sbq_desc; + struct sk_buff *skb = NULL; + u32 length = le32_to_cpu(ib_mac_rsp->data_len); + u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len); + size_t hlen = ETH_HLEN; + + /* + * Handle the header buffer if present. + */ + if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV && + ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Header of %d bytes in small buffer.\n", hdr_len); + /* + * Headers fit nicely into a small buffer. + */ + sbq_desc = ql_get_curr_sbuf(rx_ring); + pci_unmap_single(qdev->pdev, + dma_unmap_addr(sbq_desc, mapaddr), + dma_unmap_len(sbq_desc, maplen), + PCI_DMA_FROMDEVICE); + skb = sbq_desc->p.skb; + ql_realign_skb(skb, hdr_len); + skb_put(skb, hdr_len); + sbq_desc->p.skb = NULL; + } + + /* + * Handle the data buffer(s). + */ + if (unlikely(!length)) { /* Is there data too? */ + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "No Data buffer in this packet.\n"); + return skb; + } + + if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { + if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Headers in small, data of %d bytes in small, combine them.\n", + length); + /* + * Data is less than small buffer size so it's + * stuffed in a small buffer. + * For this case we append the data + * from the "data" small buffer to the "header" small + * buffer. + */ + sbq_desc = ql_get_curr_sbuf(rx_ring); + pci_dma_sync_single_for_cpu(qdev->pdev, + dma_unmap_addr + (sbq_desc, mapaddr), + dma_unmap_len + (sbq_desc, maplen), + PCI_DMA_FROMDEVICE); + memcpy(skb_put(skb, length), + sbq_desc->p.skb->data, length); + pci_dma_sync_single_for_device(qdev->pdev, + dma_unmap_addr + (sbq_desc, + mapaddr), + dma_unmap_len + (sbq_desc, + maplen), + PCI_DMA_FROMDEVICE); + } else { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "%d bytes in a single small buffer.\n", + length); + sbq_desc = ql_get_curr_sbuf(rx_ring); + skb = sbq_desc->p.skb; + ql_realign_skb(skb, length); + skb_put(skb, length); + pci_unmap_single(qdev->pdev, + dma_unmap_addr(sbq_desc, + mapaddr), + dma_unmap_len(sbq_desc, + maplen), + PCI_DMA_FROMDEVICE); + sbq_desc->p.skb = NULL; + } + } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { + if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Header in small, %d bytes in large. Chain large to small!\n", + length); + /* + * The data is in a single large buffer. We + * chain it to the header buffer's skb and let + * it rip. + */ + lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Chaining page at offset = %d, for %d bytes to skb.\n", + lbq_desc->p.pg_chunk.offset, length); + skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, + lbq_desc->p.pg_chunk.offset, + length); + skb->len += length; + skb->data_len += length; + skb->truesize += length; + } else { + /* + * The headers and data are in a single large buffer. We + * copy it to a new skb and let it go. This can happen with + * jumbo mtu on a non-TCP/UDP frame. + */ + lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); + skb = netdev_alloc_skb(qdev->ndev, length); + if (skb == NULL) { + netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev, + "No skb available, drop the packet.\n"); + return NULL; + } + pci_unmap_page(qdev->pdev, + dma_unmap_addr(lbq_desc, + mapaddr), + dma_unmap_len(lbq_desc, maplen), + PCI_DMA_FROMDEVICE); + skb_reserve(skb, NET_IP_ALIGN); + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", + length); + skb_fill_page_desc(skb, 0, + lbq_desc->p.pg_chunk.page, + lbq_desc->p.pg_chunk.offset, + length); + skb->len += length; + skb->data_len += length; + skb->truesize += length; + length -= length; + ql_update_mac_hdr_len(qdev, ib_mac_rsp, + lbq_desc->p.pg_chunk.va, + &hlen); + __pskb_pull_tail(skb, hlen); + } + } else { + /* + * The data is in a chain of large buffers + * pointed to by a small buffer. We loop + * thru and chain them to the our small header + * buffer's skb. + * frags: There are 18 max frags and our small + * buffer will hold 32 of them. The thing is, + * we'll use 3 max for our 9000 byte jumbo + * frames. If the MTU goes up we could + * eventually be in trouble. + */ + int size, i = 0; + sbq_desc = ql_get_curr_sbuf(rx_ring); + pci_unmap_single(qdev->pdev, + dma_unmap_addr(sbq_desc, mapaddr), + dma_unmap_len(sbq_desc, maplen), + PCI_DMA_FROMDEVICE); + if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { + /* + * This is an non TCP/UDP IP frame, so + * the headers aren't split into a small + * buffer. We have to use the small buffer + * that contains our sg list as our skb to + * send upstairs. Copy the sg list here to + * a local buffer and use it to find the + * pages to chain. + */ + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "%d bytes of headers & data in chain of large.\n", + length); + skb = sbq_desc->p.skb; + sbq_desc->p.skb = NULL; + skb_reserve(skb, NET_IP_ALIGN); + } + do { + lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); + size = (length < rx_ring->lbq_buf_size) ? length : + rx_ring->lbq_buf_size; + + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Adding page %d to skb for %d bytes.\n", + i, size); + skb_fill_page_desc(skb, i, + lbq_desc->p.pg_chunk.page, + lbq_desc->p.pg_chunk.offset, + size); + skb->len += size; + skb->data_len += size; + skb->truesize += size; + length -= size; + i++; + } while (length > 0); + ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va, + &hlen); + __pskb_pull_tail(skb, hlen); + } + return skb; +} + +/* Process an inbound completion from an rx ring. */ +static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, + struct rx_ring *rx_ring, + struct ib_mac_iocb_rsp *ib_mac_rsp, + u16 vlan_id) +{ + struct net_device *ndev = qdev->ndev; + struct sk_buff *skb = NULL; + + QL_DUMP_IB_MAC_RSP(ib_mac_rsp); + + skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp); + if (unlikely(!skb)) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "No skb available, drop packet.\n"); + rx_ring->rx_dropped++; + return; + } + + /* Frame error, so drop the packet. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { + ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); + dev_kfree_skb_any(skb); + return; + } + + /* The max framesize filter on this chip is set higher than + * MTU since FCoE uses 2k frames. + */ + if (skb->len > ndev->mtu + ETH_HLEN) { + dev_kfree_skb_any(skb); + rx_ring->rx_dropped++; + return; + } + + /* loopback self test for ethtool */ + if (test_bit(QL_SELFTEST, &qdev->flags)) { + ql_check_lb_frame(qdev, skb); + dev_kfree_skb_any(skb); + return; + } + + prefetch(skb->data); + if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n", + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_HASH ? "Hash" : + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_REG ? "Registered" : + (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == + IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); + rx_ring->rx_multicast++; + } + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Promiscuous Packet.\n"); + } + + skb->protocol = eth_type_trans(skb, ndev); + skb_checksum_none_assert(skb); + + /* If rx checksum is on, and there are no + * csum or frame errors. + */ + if ((ndev->features & NETIF_F_RXCSUM) && + !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { + /* TCP frame. */ + if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "TCP checksum done!\n"); + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && + (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { + /* Unfragmented ipv4 UDP frame. */ + struct iphdr *iph = (struct iphdr *) skb->data; + if (!(iph->frag_off & + htons(IP_MF|IP_OFFSET))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "TCP checksum done!\n"); + } + } + } + + rx_ring->rx_packets++; + rx_ring->rx_bytes += skb->len; + skb_record_rx_queue(skb, rx_ring->cq_id); + if (vlan_id != 0xffff) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id); + if (skb->ip_summed == CHECKSUM_UNNECESSARY) + napi_gro_receive(&rx_ring->napi, skb); + else + netif_receive_skb(skb); +} + +/* Process an inbound completion from an rx ring. */ +static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, + struct rx_ring *rx_ring, + struct ib_mac_iocb_rsp *ib_mac_rsp) +{ + u32 length = le32_to_cpu(ib_mac_rsp->data_len); + u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && + (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ? + ((le16_to_cpu(ib_mac_rsp->vlan_id) & + IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff; + + QL_DUMP_IB_MAC_RSP(ib_mac_rsp); + + if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { + /* The data and headers are split into + * separate buffers. + */ + ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, + vlan_id); + } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { + /* The data fit in a single small buffer. + * Allocate a new skb, copy the data and + * return the buffer to the free pool. + */ + ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, + length, vlan_id); + } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) && + !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) && + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) { + /* TCP packet in a page chunk that's been checksummed. + * Tack it on to our GRO skb and let it go. + */ + ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, + length, vlan_id); + } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { + /* Non-TCP packet in a page chunk. Allocate an + * skb, tack it on frags, and send it up. + */ + ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, + length, vlan_id); + } else { + /* Non-TCP/UDP large frames that span multiple buffers + * can be processed corrrectly by the split frame logic. + */ + ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, + vlan_id); + } + + return (unsigned long)length; +} + +/* Process an outbound completion from an rx ring. */ +static void ql_process_mac_tx_intr(struct ql_adapter *qdev, + struct ob_mac_iocb_rsp *mac_rsp) +{ + struct tx_ring *tx_ring; + struct tx_ring_desc *tx_ring_desc; + + QL_DUMP_OB_MAC_RSP(mac_rsp); + tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; + tx_ring_desc = &tx_ring->q[mac_rsp->tid]; + ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); + tx_ring->tx_bytes += (tx_ring_desc->skb)->len; + tx_ring->tx_packets++; + dev_kfree_skb(tx_ring_desc->skb); + tx_ring_desc->skb = NULL; + + if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | + OB_MAC_IOCB_RSP_S | + OB_MAC_IOCB_RSP_L | + OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) { + if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) { + netif_warn(qdev, tx_done, qdev->ndev, + "Total descriptor length did not match transfer length.\n"); + } + if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) { + netif_warn(qdev, tx_done, qdev->ndev, + "Frame too short to be valid, not sent.\n"); + } + if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) { + netif_warn(qdev, tx_done, qdev->ndev, + "Frame too long, but sent anyway.\n"); + } + if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) { + netif_warn(qdev, tx_done, qdev->ndev, + "PCI backplane error. Frame not sent.\n"); + } + } + atomic_inc(&tx_ring->tx_count); +} + +/* Fire up a handler to reset the MPI processor. */ +void ql_queue_fw_error(struct ql_adapter *qdev) +{ + ql_link_off(qdev); + queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); +} + +void ql_queue_asic_error(struct ql_adapter *qdev) +{ + ql_link_off(qdev); + ql_disable_interrupts(qdev); + /* Clear adapter up bit to signal the recovery + * process that it shouldn't kill the reset worker + * thread + */ + clear_bit(QL_ADAPTER_UP, &qdev->flags); + /* Set asic recovery bit to indicate reset process that we are + * in fatal error recovery process rather than normal close + */ + set_bit(QL_ASIC_RECOVERY, &qdev->flags); + queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); +} + +static void ql_process_chip_ae_intr(struct ql_adapter *qdev, + struct ib_ae_iocb_rsp *ib_ae_rsp) +{ + switch (ib_ae_rsp->event) { + case MGMT_ERR_EVENT: + netif_err(qdev, rx_err, qdev->ndev, + "Management Processor Fatal Error.\n"); + ql_queue_fw_error(qdev); + return; + + case CAM_LOOKUP_ERR_EVENT: + netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n"); + netdev_err(qdev->ndev, "This event shouldn't occur.\n"); + ql_queue_asic_error(qdev); + return; + + case SOFT_ECC_ERROR_EVENT: + netdev_err(qdev->ndev, "Soft ECC error detected.\n"); + ql_queue_asic_error(qdev); + break; + + case PCI_ERR_ANON_BUF_RD: + netdev_err(qdev->ndev, "PCI error occurred when reading " + "anonymous buffers from rx_ring %d.\n", + ib_ae_rsp->q_id); + ql_queue_asic_error(qdev); + break; + + default: + netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n", + ib_ae_rsp->event); + ql_queue_asic_error(qdev); + break; + } +} + +static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) +{ + struct ql_adapter *qdev = rx_ring->qdev; + u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); + struct ob_mac_iocb_rsp *net_rsp = NULL; + int count = 0; + + struct tx_ring *tx_ring; + /* While there are entries in the completion queue. */ + while (prod != rx_ring->cnsmr_idx) { + + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "cq_id = %d, prod = %d, cnsmr = %d.\n.", + rx_ring->cq_id, prod, rx_ring->cnsmr_idx); + + net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; + rmb(); + switch (net_rsp->opcode) { + + case OPCODE_OB_MAC_TSO_IOCB: + case OPCODE_OB_MAC_IOCB: + ql_process_mac_tx_intr(qdev, net_rsp); + break; + default: + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Hit default case, not handled! dropping the packet, opcode = %x.\n", + net_rsp->opcode); + } + count++; + ql_update_cq(rx_ring); + prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); + } + if (!net_rsp) + return 0; + ql_write_cq_idx(rx_ring); + tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; + if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { + if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) + /* + * The queue got stopped because the tx_ring was full. + * Wake it up, because it's now at least 25% empty. + */ + netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); + } + + return count; +} + +static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) +{ + struct ql_adapter *qdev = rx_ring->qdev; + u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); + struct ql_net_rsp_iocb *net_rsp; + int count = 0; + + /* While there are entries in the completion queue. */ + while (prod != rx_ring->cnsmr_idx) { + + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "cq_id = %d, prod = %d, cnsmr = %d.\n.", + rx_ring->cq_id, prod, rx_ring->cnsmr_idx); + + net_rsp = rx_ring->curr_entry; + rmb(); + switch (net_rsp->opcode) { + case OPCODE_IB_MAC_IOCB: + ql_process_mac_rx_intr(qdev, rx_ring, + (struct ib_mac_iocb_rsp *) + net_rsp); + break; + + case OPCODE_IB_AE_IOCB: + ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *) + net_rsp); + break; + default: + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Hit default case, not handled! dropping the packet, opcode = %x.\n", + net_rsp->opcode); + break; + } + count++; + ql_update_cq(rx_ring); + prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); + if (count == budget) + break; + } + ql_update_buffer_queues(qdev, rx_ring); + ql_write_cq_idx(rx_ring); + return count; +} + +static int ql_napi_poll_msix(struct napi_struct *napi, int budget) +{ + struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi); + struct ql_adapter *qdev = rx_ring->qdev; + struct rx_ring *trx_ring; + int i, work_done = 0; + struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id]; + + netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, + "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id); + + /* Service the TX rings first. They start + * right after the RSS rings. */ + for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { + trx_ring = &qdev->rx_ring[i]; + /* If this TX completion ring belongs to this vector and + * it's not empty then service it. + */ + if ((ctx->irq_mask & (1 << trx_ring->cq_id)) && + (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) != + trx_ring->cnsmr_idx)) { + netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, + "%s: Servicing TX completion ring %d.\n", + __func__, trx_ring->cq_id); + ql_clean_outbound_rx_ring(trx_ring); + } + } + + /* + * Now service the RSS ring if it's active. + */ + if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != + rx_ring->cnsmr_idx) { + netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, + "%s: Servicing RX completion ring %d.\n", + __func__, rx_ring->cq_id); + work_done = ql_clean_inbound_rx_ring(rx_ring, budget); + } + + if (work_done < budget) { + napi_complete(napi); + ql_enable_completion_interrupt(qdev, rx_ring->irq); + } + return work_done; +} + +static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | + NIC_RCV_CFG_VLAN_MATCH_AND_NON); + } else { + ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK); + } +} + +/** + * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter + * based on the features to enable/disable hardware vlan accel + */ +static int qlge_update_hw_vlan_features(struct net_device *ndev, + netdev_features_t features) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + int status = 0; + bool need_restart = netif_running(ndev); + + if (need_restart) { + status = ql_adapter_down(qdev); + if (status) { + netif_err(qdev, link, qdev->ndev, + "Failed to bring down the adapter\n"); + return status; + } + } + + /* update the features with resent change */ + ndev->features = features; + + if (need_restart) { + status = ql_adapter_up(qdev); + if (status) { + netif_err(qdev, link, qdev->ndev, + "Failed to bring up the adapter\n"); + return status; + } + } + + return status; +} + +static netdev_features_t qlge_fix_features(struct net_device *ndev, + netdev_features_t features) +{ + int err; + + /* Update the behavior of vlan accel in the adapter */ + err = qlge_update_hw_vlan_features(ndev, features); + if (err) + return err; + + return features; +} + +static int qlge_set_features(struct net_device *ndev, + netdev_features_t features) +{ + netdev_features_t changed = ndev->features ^ features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + qlge_vlan_mode(ndev, features); + + return 0; +} + +static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid) +{ + u32 enable_bit = MAC_ADDR_E; + int err; + + err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit, + MAC_ADDR_TYPE_VLAN, vid); + if (err) + netif_err(qdev, ifup, qdev->ndev, + "Failed to init vlan address.\n"); + return err; +} + +static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + int status; + int err; + + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + return status; + + err = __qlge_vlan_rx_add_vid(qdev, vid); + set_bit(vid, qdev->active_vlans); + + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + + return err; +} + +static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid) +{ + u32 enable_bit = 0; + int err; + + err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit, + MAC_ADDR_TYPE_VLAN, vid); + if (err) + netif_err(qdev, ifup, qdev->ndev, + "Failed to clear vlan address.\n"); + return err; +} + +static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + int status; + int err; + + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + return status; + + err = __qlge_vlan_rx_kill_vid(qdev, vid); + clear_bit(vid, qdev->active_vlans); + + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + + return err; +} + +static void qlge_restore_vlan(struct ql_adapter *qdev) +{ + int status; + u16 vid; + + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + return; + + for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID) + __qlge_vlan_rx_add_vid(qdev, vid); + + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); +} + +/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */ +static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) +{ + struct rx_ring *rx_ring = dev_id; + napi_schedule(&rx_ring->napi); + return IRQ_HANDLED; +} + +/* This handles a fatal error, MPI activity, and the default + * rx_ring in an MSI-X multiple vector environment. + * In MSI/Legacy environment it also process the rest of + * the rx_rings. + */ +static irqreturn_t qlge_isr(int irq, void *dev_id) +{ + struct rx_ring *rx_ring = dev_id; + struct ql_adapter *qdev = rx_ring->qdev; + struct intr_context *intr_context = &qdev->intr_context[0]; + u32 var; + int work_done = 0; + + spin_lock(&qdev->hw_lock); + if (atomic_read(&qdev->intr_context[0].irq_cnt)) { + netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, + "Shared Interrupt, Not ours!\n"); + spin_unlock(&qdev->hw_lock); + return IRQ_NONE; + } + spin_unlock(&qdev->hw_lock); + + var = ql_disable_completion_interrupt(qdev, intr_context->intr); + + /* + * Check for fatal error. + */ + if (var & STS_FE) { + ql_queue_asic_error(qdev); + netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var); + var = ql_read32(qdev, ERR_STS); + netdev_err(qdev->ndev, "Resetting chip. " + "Error Status Register = 0x%x\n", var); + return IRQ_HANDLED; + } + + /* + * Check MPI processor activity. + */ + if ((var & STS_PI) && + (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) { + /* + * We've got an async event or mailbox completion. + * Handle it and clear the source of the interrupt. + */ + netif_err(qdev, intr, qdev->ndev, + "Got MPI processor interrupt.\n"); + ql_disable_completion_interrupt(qdev, intr_context->intr); + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); + queue_delayed_work_on(smp_processor_id(), + qdev->workqueue, &qdev->mpi_work, 0); + work_done++; + } + + /* + * Get the bit-mask that shows the active queues for this + * pass. Compare it to the queues that this irq services + * and call napi if there's a match. + */ + var = ql_read32(qdev, ISR1); + if (var & intr_context->irq_mask) { + netif_info(qdev, intr, qdev->ndev, + "Waking handler for rx_ring[0].\n"); + ql_disable_completion_interrupt(qdev, intr_context->intr); + napi_schedule(&rx_ring->napi); + work_done++; + } + ql_enable_completion_interrupt(qdev, intr_context->intr); + return work_done ? IRQ_HANDLED : IRQ_NONE; +} + +static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) +{ + + if (skb_is_gso(skb)) { + int err; + __be16 l3_proto = vlan_get_protocol(skb); + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; + mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC; + mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); + mac_iocb_ptr->total_hdrs_len = + cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb)); + mac_iocb_ptr->net_trans_offset = + cpu_to_le16(skb_network_offset(skb) | + skb_transport_offset(skb) + << OB_MAC_TRANSPORT_HDR_SHIFT); + mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); + mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; + if (likely(l3_proto == htons(ETH_P_IP))) { + struct iphdr *iph = ip_hdr(skb); + iph->check = 0; + mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + } else if (l3_proto == htons(ETH_P_IPV6)) { + mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } + return 1; + } + return 0; +} + +static void ql_hw_csum_setup(struct sk_buff *skb, + struct ob_mac_tso_iocb_req *mac_iocb_ptr) +{ + int len; + struct iphdr *iph = ip_hdr(skb); + __sum16 *check; + mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; + mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); + mac_iocb_ptr->net_trans_offset = + cpu_to_le16(skb_network_offset(skb) | + skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT); + + mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; + len = (ntohs(iph->tot_len) - (iph->ihl << 2)); + if (likely(iph->protocol == IPPROTO_TCP)) { + check = &(tcp_hdr(skb)->check); + mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC; + mac_iocb_ptr->total_hdrs_len = + cpu_to_le16(skb_transport_offset(skb) + + (tcp_hdr(skb)->doff << 2)); + } else { + check = &(udp_hdr(skb)->check); + mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC; + mac_iocb_ptr->total_hdrs_len = + cpu_to_le16(skb_transport_offset(skb) + + sizeof(struct udphdr)); + } + *check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, len, iph->protocol, 0); +} + +static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) +{ + struct tx_ring_desc *tx_ring_desc; + struct ob_mac_iocb_req *mac_iocb_ptr; + struct ql_adapter *qdev = netdev_priv(ndev); + int tso; + struct tx_ring *tx_ring; + u32 tx_ring_idx = (u32) skb->queue_mapping; + + tx_ring = &qdev->tx_ring[tx_ring_idx]; + + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { + netif_info(qdev, tx_queued, qdev->ndev, + "%s: BUG! shutting down tx queue %d due to lack of resources.\n", + __func__, tx_ring_idx); + netif_stop_subqueue(ndev, tx_ring->wq_id); + tx_ring->tx_errors++; + return NETDEV_TX_BUSY; + } + tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; + mac_iocb_ptr = tx_ring_desc->queue_entry; + memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr)); + + mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; + mac_iocb_ptr->tid = tx_ring_desc->index; + /* We use the upper 32-bits to store the tx queue for this IO. + * When we get the completion we can use it to establish the context. + */ + mac_iocb_ptr->txq_idx = tx_ring_idx; + tx_ring_desc->skb = skb; + + mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len); + + if (skb_vlan_tag_present(skb)) { + netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, + "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb)); + mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; + mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); + } + tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) { + ql_hw_csum_setup(skb, + (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); + } + if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != + NETDEV_TX_OK) { + netif_err(qdev, tx_queued, qdev->ndev, + "Could not map the segments.\n"); + tx_ring->tx_errors++; + return NETDEV_TX_BUSY; + } + QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); + tx_ring->prod_idx++; + if (tx_ring->prod_idx == tx_ring->wq_len) + tx_ring->prod_idx = 0; + wmb(); + + ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); + netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, + "tx queued, slot %d, len %d\n", + tx_ring->prod_idx, skb->len); + + atomic_dec(&tx_ring->tx_count); + + if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { + netif_stop_subqueue(ndev, tx_ring->wq_id); + if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) + /* + * The queue got stopped because the tx_ring was full. + * Wake it up, because it's now at least 25% empty. + */ + netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); + } + return NETDEV_TX_OK; +} + + +static void ql_free_shadow_space(struct ql_adapter *qdev) +{ + if (qdev->rx_ring_shadow_reg_area) { + pci_free_consistent(qdev->pdev, + PAGE_SIZE, + qdev->rx_ring_shadow_reg_area, + qdev->rx_ring_shadow_reg_dma); + qdev->rx_ring_shadow_reg_area = NULL; + } + if (qdev->tx_ring_shadow_reg_area) { + pci_free_consistent(qdev->pdev, + PAGE_SIZE, + qdev->tx_ring_shadow_reg_area, + qdev->tx_ring_shadow_reg_dma); + qdev->tx_ring_shadow_reg_area = NULL; + } +} + +static int ql_alloc_shadow_space(struct ql_adapter *qdev) +{ + qdev->rx_ring_shadow_reg_area = + pci_zalloc_consistent(qdev->pdev, PAGE_SIZE, + &qdev->rx_ring_shadow_reg_dma); + if (qdev->rx_ring_shadow_reg_area == NULL) { + netif_err(qdev, ifup, qdev->ndev, + "Allocation of RX shadow space failed.\n"); + return -ENOMEM; + } + + qdev->tx_ring_shadow_reg_area = + pci_zalloc_consistent(qdev->pdev, PAGE_SIZE, + &qdev->tx_ring_shadow_reg_dma); + if (qdev->tx_ring_shadow_reg_area == NULL) { + netif_err(qdev, ifup, qdev->ndev, + "Allocation of TX shadow space failed.\n"); + goto err_wqp_sh_area; + } + return 0; + +err_wqp_sh_area: + pci_free_consistent(qdev->pdev, + PAGE_SIZE, + qdev->rx_ring_shadow_reg_area, + qdev->rx_ring_shadow_reg_dma); + return -ENOMEM; +} + +static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) +{ + struct tx_ring_desc *tx_ring_desc; + int i; + struct ob_mac_iocb_req *mac_iocb_ptr; + + mac_iocb_ptr = tx_ring->wq_base; + tx_ring_desc = tx_ring->q; + for (i = 0; i < tx_ring->wq_len; i++) { + tx_ring_desc->index = i; + tx_ring_desc->skb = NULL; + tx_ring_desc->queue_entry = mac_iocb_ptr; + mac_iocb_ptr++; + tx_ring_desc++; + } + atomic_set(&tx_ring->tx_count, tx_ring->wq_len); +} + +static void ql_free_tx_resources(struct ql_adapter *qdev, + struct tx_ring *tx_ring) +{ + if (tx_ring->wq_base) { + pci_free_consistent(qdev->pdev, tx_ring->wq_size, + tx_ring->wq_base, tx_ring->wq_base_dma); + tx_ring->wq_base = NULL; + } + kfree(tx_ring->q); + tx_ring->q = NULL; +} + +static int ql_alloc_tx_resources(struct ql_adapter *qdev, + struct tx_ring *tx_ring) +{ + tx_ring->wq_base = + pci_alloc_consistent(qdev->pdev, tx_ring->wq_size, + &tx_ring->wq_base_dma); + + if ((tx_ring->wq_base == NULL) || + tx_ring->wq_base_dma & WQ_ADDR_ALIGN) + goto pci_alloc_err; + + tx_ring->q = + kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL); + if (tx_ring->q == NULL) + goto err; + + return 0; +err: + pci_free_consistent(qdev->pdev, tx_ring->wq_size, + tx_ring->wq_base, tx_ring->wq_base_dma); + tx_ring->wq_base = NULL; +pci_alloc_err: + netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n"); + return -ENOMEM; +} + +static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) +{ + struct bq_desc *lbq_desc; + + uint32_t curr_idx, clean_idx; + + curr_idx = rx_ring->lbq_curr_idx; + clean_idx = rx_ring->lbq_clean_idx; + while (curr_idx != clean_idx) { + lbq_desc = &rx_ring->lbq[curr_idx]; + + if (lbq_desc->p.pg_chunk.last_flag) { + pci_unmap_page(qdev->pdev, + lbq_desc->p.pg_chunk.map, + ql_lbq_block_size(qdev), + PCI_DMA_FROMDEVICE); + lbq_desc->p.pg_chunk.last_flag = 0; + } + + put_page(lbq_desc->p.pg_chunk.page); + lbq_desc->p.pg_chunk.page = NULL; + + if (++curr_idx == rx_ring->lbq_len) + curr_idx = 0; + + } + if (rx_ring->pg_chunk.page) { + pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map, + ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); + put_page(rx_ring->pg_chunk.page); + rx_ring->pg_chunk.page = NULL; + } +} + +static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) +{ + int i; + struct bq_desc *sbq_desc; + + for (i = 0; i < rx_ring->sbq_len; i++) { + sbq_desc = &rx_ring->sbq[i]; + if (sbq_desc == NULL) { + netif_err(qdev, ifup, qdev->ndev, + "sbq_desc %d is NULL.\n", i); + return; + } + if (sbq_desc->p.skb) { + pci_unmap_single(qdev->pdev, + dma_unmap_addr(sbq_desc, mapaddr), + dma_unmap_len(sbq_desc, maplen), + PCI_DMA_FROMDEVICE); + dev_kfree_skb(sbq_desc->p.skb); + sbq_desc->p.skb = NULL; + } + } +} + +/* Free all large and small rx buffers associated + * with the completion queues for this device. + */ +static void ql_free_rx_buffers(struct ql_adapter *qdev) +{ + int i; + struct rx_ring *rx_ring; + + for (i = 0; i < qdev->rx_ring_count; i++) { + rx_ring = &qdev->rx_ring[i]; + if (rx_ring->lbq) + ql_free_lbq_buffers(qdev, rx_ring); + if (rx_ring->sbq) + ql_free_sbq_buffers(qdev, rx_ring); + } +} + +static void ql_alloc_rx_buffers(struct ql_adapter *qdev) +{ + struct rx_ring *rx_ring; + int i; + + for (i = 0; i < qdev->rx_ring_count; i++) { + rx_ring = &qdev->rx_ring[i]; + if (rx_ring->type != TX_Q) + ql_update_buffer_queues(qdev, rx_ring); + } +} + +static void ql_init_lbq_ring(struct ql_adapter *qdev, + struct rx_ring *rx_ring) +{ + int i; + struct bq_desc *lbq_desc; + __le64 *bq = rx_ring->lbq_base; + + memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc)); + for (i = 0; i < rx_ring->lbq_len; i++) { + lbq_desc = &rx_ring->lbq[i]; + memset(lbq_desc, 0, sizeof(*lbq_desc)); + lbq_desc->index = i; + lbq_desc->addr = bq; + bq++; + } +} + +static void ql_init_sbq_ring(struct ql_adapter *qdev, + struct rx_ring *rx_ring) +{ + int i; + struct bq_desc *sbq_desc; + __le64 *bq = rx_ring->sbq_base; + + memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc)); + for (i = 0; i < rx_ring->sbq_len; i++) { + sbq_desc = &rx_ring->sbq[i]; + memset(sbq_desc, 0, sizeof(*sbq_desc)); + sbq_desc->index = i; + sbq_desc->addr = bq; + bq++; + } +} + +static void ql_free_rx_resources(struct ql_adapter *qdev, + struct rx_ring *rx_ring) +{ + /* Free the small buffer queue. */ + if (rx_ring->sbq_base) { + pci_free_consistent(qdev->pdev, + rx_ring->sbq_size, + rx_ring->sbq_base, rx_ring->sbq_base_dma); + rx_ring->sbq_base = NULL; + } + + /* Free the small buffer queue control blocks. */ + kfree(rx_ring->sbq); + rx_ring->sbq = NULL; + + /* Free the large buffer queue. */ + if (rx_ring->lbq_base) { + pci_free_consistent(qdev->pdev, + rx_ring->lbq_size, + rx_ring->lbq_base, rx_ring->lbq_base_dma); + rx_ring->lbq_base = NULL; + } + + /* Free the large buffer queue control blocks. */ + kfree(rx_ring->lbq); + rx_ring->lbq = NULL; + + /* Free the rx queue. */ + if (rx_ring->cq_base) { + pci_free_consistent(qdev->pdev, + rx_ring->cq_size, + rx_ring->cq_base, rx_ring->cq_base_dma); + rx_ring->cq_base = NULL; + } +} + +/* Allocate queues and buffers for this completions queue based + * on the values in the parameter structure. */ +static int ql_alloc_rx_resources(struct ql_adapter *qdev, + struct rx_ring *rx_ring) +{ + + /* + * Allocate the completion queue for this rx_ring. + */ + rx_ring->cq_base = + pci_alloc_consistent(qdev->pdev, rx_ring->cq_size, + &rx_ring->cq_base_dma); + + if (rx_ring->cq_base == NULL) { + netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n"); + return -ENOMEM; + } + + if (rx_ring->sbq_len) { + /* + * Allocate small buffer queue. + */ + rx_ring->sbq_base = + pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size, + &rx_ring->sbq_base_dma); + + if (rx_ring->sbq_base == NULL) { + netif_err(qdev, ifup, qdev->ndev, + "Small buffer queue allocation failed.\n"); + goto err_mem; + } + + /* + * Allocate small buffer queue control blocks. + */ + rx_ring->sbq = kmalloc_array(rx_ring->sbq_len, + sizeof(struct bq_desc), + GFP_KERNEL); + if (rx_ring->sbq == NULL) + goto err_mem; + + ql_init_sbq_ring(qdev, rx_ring); + } + + if (rx_ring->lbq_len) { + /* + * Allocate large buffer queue. + */ + rx_ring->lbq_base = + pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size, + &rx_ring->lbq_base_dma); + + if (rx_ring->lbq_base == NULL) { + netif_err(qdev, ifup, qdev->ndev, + "Large buffer queue allocation failed.\n"); + goto err_mem; + } + /* + * Allocate large buffer queue control blocks. + */ + rx_ring->lbq = kmalloc_array(rx_ring->lbq_len, + sizeof(struct bq_desc), + GFP_KERNEL); + if (rx_ring->lbq == NULL) + goto err_mem; + + ql_init_lbq_ring(qdev, rx_ring); + } + + return 0; + +err_mem: + ql_free_rx_resources(qdev, rx_ring); + return -ENOMEM; +} + +static void ql_tx_ring_clean(struct ql_adapter *qdev) +{ + struct tx_ring *tx_ring; + struct tx_ring_desc *tx_ring_desc; + int i, j; + + /* + * Loop through all queues and free + * any resources. + */ + for (j = 0; j < qdev->tx_ring_count; j++) { + tx_ring = &qdev->tx_ring[j]; + for (i = 0; i < tx_ring->wq_len; i++) { + tx_ring_desc = &tx_ring->q[i]; + if (tx_ring_desc && tx_ring_desc->skb) { + netif_err(qdev, ifdown, qdev->ndev, + "Freeing lost SKB %p, from queue %d, index %d.\n", + tx_ring_desc->skb, j, + tx_ring_desc->index); + ql_unmap_send(qdev, tx_ring_desc, + tx_ring_desc->map_cnt); + dev_kfree_skb(tx_ring_desc->skb); + tx_ring_desc->skb = NULL; + } + } + } +} + +static void ql_free_mem_resources(struct ql_adapter *qdev) +{ + int i; + + for (i = 0; i < qdev->tx_ring_count; i++) + ql_free_tx_resources(qdev, &qdev->tx_ring[i]); + for (i = 0; i < qdev->rx_ring_count; i++) + ql_free_rx_resources(qdev, &qdev->rx_ring[i]); + ql_free_shadow_space(qdev); +} + +static int ql_alloc_mem_resources(struct ql_adapter *qdev) +{ + int i; + + /* Allocate space for our shadow registers and such. */ + if (ql_alloc_shadow_space(qdev)) + return -ENOMEM; + + for (i = 0; i < qdev->rx_ring_count; i++) { + if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) { + netif_err(qdev, ifup, qdev->ndev, + "RX resource allocation failed.\n"); + goto err_mem; + } + } + /* Allocate tx queue resources */ + for (i = 0; i < qdev->tx_ring_count; i++) { + if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { + netif_err(qdev, ifup, qdev->ndev, + "TX resource allocation failed.\n"); + goto err_mem; + } + } + return 0; + +err_mem: + ql_free_mem_resources(qdev); + return -ENOMEM; +} + +/* Set up the rx ring control block and pass it to the chip. + * The control block is defined as + * "Completion Queue Initialization Control Block", or cqicb. + */ +static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) +{ + struct cqicb *cqicb = &rx_ring->cqicb; + void *shadow_reg = qdev->rx_ring_shadow_reg_area + + (rx_ring->cq_id * RX_RING_SHADOW_SPACE); + u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma + + (rx_ring->cq_id * RX_RING_SHADOW_SPACE); + void __iomem *doorbell_area = + qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); + int err = 0; + u16 bq_len; + u64 tmp; + __le64 *base_indirect_ptr; + int page_entries; + + /* Set up the shadow registers for this ring. */ + rx_ring->prod_idx_sh_reg = shadow_reg; + rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; + *rx_ring->prod_idx_sh_reg = 0; + shadow_reg += sizeof(u64); + shadow_reg_dma += sizeof(u64); + rx_ring->lbq_base_indirect = shadow_reg; + rx_ring->lbq_base_indirect_dma = shadow_reg_dma; + shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); + shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); + rx_ring->sbq_base_indirect = shadow_reg; + rx_ring->sbq_base_indirect_dma = shadow_reg_dma; + + /* PCI doorbell mem area + 0x00 for consumer index register */ + rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area; + rx_ring->cnsmr_idx = 0; + rx_ring->curr_entry = rx_ring->cq_base; + + /* PCI doorbell mem area + 0x04 for valid register */ + rx_ring->valid_db_reg = doorbell_area + 0x04; + + /* PCI doorbell mem area + 0x18 for large buffer consumer */ + rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18); + + /* PCI doorbell mem area + 0x1c */ + rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c); + + memset((void *)cqicb, 0, sizeof(struct cqicb)); + cqicb->msix_vect = rx_ring->irq; + + bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len; + cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT); + + cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma); + + cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma); + + /* + * Set up the control block load flags. + */ + cqicb->flags = FLAGS_LC | /* Load queue base address */ + FLAGS_LV | /* Load MSI-X vector */ + FLAGS_LI; /* Load irq delay values */ + if (rx_ring->lbq_len) { + cqicb->flags |= FLAGS_LL; /* Load lbq values */ + tmp = (u64)rx_ring->lbq_base_dma; + base_indirect_ptr = rx_ring->lbq_base_indirect; + page_entries = 0; + do { + *base_indirect_ptr = cpu_to_le64(tmp); + tmp += DB_PAGE_SIZE; + base_indirect_ptr++; + page_entries++; + } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); + cqicb->lbq_addr = + cpu_to_le64(rx_ring->lbq_base_indirect_dma); + bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : + (u16) rx_ring->lbq_buf_size; + cqicb->lbq_buf_size = cpu_to_le16(bq_len); + bq_len = (rx_ring->lbq_len == 65536) ? 0 : + (u16) rx_ring->lbq_len; + cqicb->lbq_len = cpu_to_le16(bq_len); + rx_ring->lbq_prod_idx = 0; + rx_ring->lbq_curr_idx = 0; + rx_ring->lbq_clean_idx = 0; + rx_ring->lbq_free_cnt = rx_ring->lbq_len; + } + if (rx_ring->sbq_len) { + cqicb->flags |= FLAGS_LS; /* Load sbq values */ + tmp = (u64)rx_ring->sbq_base_dma; + base_indirect_ptr = rx_ring->sbq_base_indirect; + page_entries = 0; + do { + *base_indirect_ptr = cpu_to_le64(tmp); + tmp += DB_PAGE_SIZE; + base_indirect_ptr++; + page_entries++; + } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len)); + cqicb->sbq_addr = + cpu_to_le64(rx_ring->sbq_base_indirect_dma); + cqicb->sbq_buf_size = + cpu_to_le16((u16)(rx_ring->sbq_buf_size)); + bq_len = (rx_ring->sbq_len == 65536) ? 0 : + (u16) rx_ring->sbq_len; + cqicb->sbq_len = cpu_to_le16(bq_len); + rx_ring->sbq_prod_idx = 0; + rx_ring->sbq_curr_idx = 0; + rx_ring->sbq_clean_idx = 0; + rx_ring->sbq_free_cnt = rx_ring->sbq_len; + } + switch (rx_ring->type) { + case TX_Q: + cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); + cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames); + break; + case RX_Q: + /* Inbound completion handling rx_rings run in + * separate NAPI contexts. + */ + netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix, + 64); + cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); + cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames); + break; + default: + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Invalid rx_ring->type = %d.\n", rx_ring->type); + } + err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb), + CFG_LCQ, rx_ring->cq_id); + if (err) { + netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n"); + return err; + } + return err; +} + +static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) +{ + struct wqicb *wqicb = (struct wqicb *)tx_ring; + void __iomem *doorbell_area = + qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id); + void *shadow_reg = qdev->tx_ring_shadow_reg_area + + (tx_ring->wq_id * sizeof(u64)); + u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma + + (tx_ring->wq_id * sizeof(u64)); + int err = 0; + + /* + * Assign doorbell registers for this tx_ring. + */ + /* TX PCI doorbell mem area for tx producer index */ + tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area; + tx_ring->prod_idx = 0; + /* TX PCI doorbell mem area + 0x04 */ + tx_ring->valid_db_reg = doorbell_area + 0x04; + + /* + * Assign shadow registers for this tx_ring. + */ + tx_ring->cnsmr_idx_sh_reg = shadow_reg; + tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma; + + wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT); + wqicb->flags = cpu_to_le16(Q_FLAGS_LC | + Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO); + wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id); + wqicb->rid = 0; + wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma); + + wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma); + + ql_init_tx_ring(qdev, tx_ring); + + err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ, + (u16) tx_ring->wq_id); + if (err) { + netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n"); + return err; + } + return err; +} + +static void ql_disable_msix(struct ql_adapter *qdev) +{ + if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { + pci_disable_msix(qdev->pdev); + clear_bit(QL_MSIX_ENABLED, &qdev->flags); + kfree(qdev->msi_x_entry); + qdev->msi_x_entry = NULL; + } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) { + pci_disable_msi(qdev->pdev); + clear_bit(QL_MSI_ENABLED, &qdev->flags); + } +} + +/* We start by trying to get the number of vectors + * stored in qdev->intr_count. If we don't get that + * many then we reduce the count and try again. + */ +static void ql_enable_msix(struct ql_adapter *qdev) +{ + int i, err; + + /* Get the MSIX vectors. */ + if (qlge_irq_type == MSIX_IRQ) { + /* Try to alloc space for the msix struct, + * if it fails then go to MSI/legacy. + */ + qdev->msi_x_entry = kcalloc(qdev->intr_count, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!qdev->msi_x_entry) { + qlge_irq_type = MSI_IRQ; + goto msi; + } + + for (i = 0; i < qdev->intr_count; i++) + qdev->msi_x_entry[i].entry = i; + + err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry, + 1, qdev->intr_count); + if (err < 0) { + kfree(qdev->msi_x_entry); + qdev->msi_x_entry = NULL; + netif_warn(qdev, ifup, qdev->ndev, + "MSI-X Enable failed, trying MSI.\n"); + qlge_irq_type = MSI_IRQ; + } else { + qdev->intr_count = err; + set_bit(QL_MSIX_ENABLED, &qdev->flags); + netif_info(qdev, ifup, qdev->ndev, + "MSI-X Enabled, got %d vectors.\n", + qdev->intr_count); + return; + } + } +msi: + qdev->intr_count = 1; + if (qlge_irq_type == MSI_IRQ) { + if (!pci_enable_msi(qdev->pdev)) { + set_bit(QL_MSI_ENABLED, &qdev->flags); + netif_info(qdev, ifup, qdev->ndev, + "Running with MSI interrupts.\n"); + return; + } + } + qlge_irq_type = LEG_IRQ; + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "Running with legacy interrupts.\n"); +} + +/* Each vector services 1 RSS ring and and 1 or more + * TX completion rings. This function loops through + * the TX completion rings and assigns the vector that + * will service it. An example would be if there are + * 2 vectors (so 2 RSS rings) and 8 TX completion rings. + * This would mean that vector 0 would service RSS ring 0 + * and TX completion rings 0,1,2 and 3. Vector 1 would + * service RSS ring 1 and TX completion rings 4,5,6 and 7. + */ +static void ql_set_tx_vect(struct ql_adapter *qdev) +{ + int i, j, vect; + u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; + + if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { + /* Assign irq vectors to TX rx_rings.*/ + for (vect = 0, j = 0, i = qdev->rss_ring_count; + i < qdev->rx_ring_count; i++) { + if (j == tx_rings_per_vector) { + vect++; + j = 0; + } + qdev->rx_ring[i].irq = vect; + j++; + } + } else { + /* For single vector all rings have an irq + * of zero. + */ + for (i = 0; i < qdev->rx_ring_count; i++) + qdev->rx_ring[i].irq = 0; + } +} + +/* Set the interrupt mask for this vector. Each vector + * will service 1 RSS ring and 1 or more TX completion + * rings. This function sets up a bit mask per vector + * that indicates which rings it services. + */ +static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx) +{ + int j, vect = ctx->intr; + u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; + + if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { + /* Add the RSS ring serviced by this vector + * to the mask. + */ + ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id); + /* Add the TX ring(s) serviced by this vector + * to the mask. */ + for (j = 0; j < tx_rings_per_vector; j++) { + ctx->irq_mask |= + (1 << qdev->rx_ring[qdev->rss_ring_count + + (vect * tx_rings_per_vector) + j].cq_id); + } + } else { + /* For single vector we just shift each queue's + * ID into the mask. + */ + for (j = 0; j < qdev->rx_ring_count; j++) + ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id); + } +} + +/* + * Here we build the intr_context structures based on + * our rx_ring count and intr vector count. + * The intr_context structure is used to hook each vector + * to possibly different handlers. + */ +static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev) +{ + int i = 0; + struct intr_context *intr_context = &qdev->intr_context[0]; + + if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { + /* Each rx_ring has it's + * own intr_context since we have separate + * vectors for each queue. + */ + for (i = 0; i < qdev->intr_count; i++, intr_context++) { + qdev->rx_ring[i].irq = i; + intr_context->intr = i; + intr_context->qdev = qdev; + /* Set up this vector's bit-mask that indicates + * which queues it services. + */ + ql_set_irq_mask(qdev, intr_context); + /* + * We set up each vectors enable/disable/read bits so + * there's no bit/mask calculations in the critical path. + */ + intr_context->intr_en_mask = + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | + INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD + | i; + intr_context->intr_dis_mask = + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | + INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK | + INTR_EN_IHD | i; + intr_context->intr_read_mask = + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | + INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD | + i; + if (i == 0) { + /* The first vector/queue handles + * broadcast/multicast, fatal errors, + * and firmware events. This in addition + * to normal inbound NAPI processing. + */ + intr_context->handler = qlge_isr; + sprintf(intr_context->name, "%s-rx-%d", + qdev->ndev->name, i); + } else { + /* + * Inbound queues handle unicast frames only. + */ + intr_context->handler = qlge_msix_rx_isr; + sprintf(intr_context->name, "%s-rx-%d", + qdev->ndev->name, i); + } + } + } else { + /* + * All rx_rings use the same intr_context since + * there is only one vector. + */ + intr_context->intr = 0; + intr_context->qdev = qdev; + /* + * We set up each vectors enable/disable/read bits so + * there's no bit/mask calculations in the critical path. + */ + intr_context->intr_en_mask = + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE; + intr_context->intr_dis_mask = + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | + INTR_EN_TYPE_DISABLE; + intr_context->intr_read_mask = + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ; + /* + * Single interrupt means one handler for all rings. + */ + intr_context->handler = qlge_isr; + sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name); + /* Set up this vector's bit-mask that indicates + * which queues it services. In this case there is + * a single vector so it will service all RSS and + * TX completion rings. + */ + ql_set_irq_mask(qdev, intr_context); + } + /* Tell the TX completion rings which MSIx vector + * they will be using. + */ + ql_set_tx_vect(qdev); +} + +static void ql_free_irq(struct ql_adapter *qdev) +{ + int i; + struct intr_context *intr_context = &qdev->intr_context[0]; + + for (i = 0; i < qdev->intr_count; i++, intr_context++) { + if (intr_context->hooked) { + if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { + free_irq(qdev->msi_x_entry[i].vector, + &qdev->rx_ring[i]); + } else { + free_irq(qdev->pdev->irq, &qdev->rx_ring[0]); + } + } + } + ql_disable_msix(qdev); +} + +static int ql_request_irq(struct ql_adapter *qdev) +{ + int i; + int status = 0; + struct pci_dev *pdev = qdev->pdev; + struct intr_context *intr_context = &qdev->intr_context[0]; + + ql_resolve_queues_to_irqs(qdev); + + for (i = 0; i < qdev->intr_count; i++, intr_context++) { + atomic_set(&intr_context->irq_cnt, 0); + if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { + status = request_irq(qdev->msi_x_entry[i].vector, + intr_context->handler, + 0, + intr_context->name, + &qdev->rx_ring[i]); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed request for MSIX interrupt %d.\n", + i); + goto err_irq; + } + } else { + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "trying msi or legacy interrupts.\n"); + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "%s: irq = %d.\n", __func__, pdev->irq); + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "%s: context->name = %s.\n", __func__, + intr_context->name); + netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, + "%s: dev_id = 0x%p.\n", __func__, + &qdev->rx_ring[0]); + status = + request_irq(pdev->irq, qlge_isr, + test_bit(QL_MSI_ENABLED, + &qdev-> + flags) ? 0 : IRQF_SHARED, + intr_context->name, &qdev->rx_ring[0]); + if (status) + goto err_irq; + + netif_err(qdev, ifup, qdev->ndev, + "Hooked intr %d, queue type %s, with name %s.\n", + i, + qdev->rx_ring[0].type == DEFAULT_Q ? + "DEFAULT_Q" : + qdev->rx_ring[0].type == TX_Q ? "TX_Q" : + qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "", + intr_context->name); + } + intr_context->hooked = 1; + } + return status; +err_irq: + netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n"); + ql_free_irq(qdev); + return status; +} + +static int ql_start_rss(struct ql_adapter *qdev) +{ + static const u8 init_hash_seed[] = { + 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, + 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, + 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, + 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, + 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa + }; + struct ricb *ricb = &qdev->ricb; + int status = 0; + int i; + u8 *hash_id = (u8 *) ricb->hash_cq_id; + + memset((void *)ricb, 0, sizeof(*ricb)); + + ricb->base_cq = RSS_L4K; + ricb->flags = + (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6); + ricb->mask = cpu_to_le16((u16)(0x3ff)); + + /* + * Fill out the Indirection Table. + */ + for (i = 0; i < 1024; i++) + hash_id[i] = (i & (qdev->rss_ring_count - 1)); + + memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40); + memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16); + + status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n"); + return status; + } + return status; +} + +static int ql_clear_routing_entries(struct ql_adapter *qdev) +{ + int i, status = 0; + + status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + if (status) + return status; + /* Clear all the entries in the routing table. */ + for (i = 0; i < 16; i++) { + status = ql_set_routing_reg(qdev, i, 0, 0); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init routing register for CAM packets.\n"); + break; + } + } + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); + return status; +} + +/* Initialize the frame-to-queue routing. */ +static int ql_route_initialize(struct ql_adapter *qdev) +{ + int status = 0; + + /* Clear all the entries in the routing table. */ + status = ql_clear_routing_entries(qdev); + if (status) + return status; + + status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + if (status) + return status; + + status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT, + RT_IDX_IP_CSUM_ERR, 1); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init routing register " + "for IP CSUM error packets.\n"); + goto exit; + } + status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT, + RT_IDX_TU_CSUM_ERR, 1); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init routing register " + "for TCP/UDP CSUM error packets.\n"); + goto exit; + } + status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init routing register for broadcast packets.\n"); + goto exit; + } + /* If we have more than one inbound queue, then turn on RSS in the + * routing block. + */ + if (qdev->rss_ring_count > 1) { + status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT, + RT_IDX_RSS_MATCH, 1); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init routing register for MATCH RSS packets.\n"); + goto exit; + } + } + + status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT, + RT_IDX_CAM_HIT, 1); + if (status) + netif_err(qdev, ifup, qdev->ndev, + "Failed to init routing register for CAM packets.\n"); +exit: + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); + return status; +} + +int ql_cam_route_initialize(struct ql_adapter *qdev) +{ + int status, set; + + /* If check if the link is up and use to + * determine if we are setting or clearing + * the MAC address in the CAM. + */ + set = ql_read32(qdev, STS); + set &= qdev->port_link_up; + status = ql_set_mac_addr(qdev, set); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n"); + return status; + } + + status = ql_route_initialize(qdev); + if (status) + netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n"); + + return status; +} + +static int ql_adapter_initialize(struct ql_adapter *qdev) +{ + u32 value, mask; + int i; + int status = 0; + + /* + * Set up the System register to halt on errors. + */ + value = SYS_EFE | SYS_FAE; + mask = value << 16; + ql_write32(qdev, SYS, mask | value); + + /* Set the default queue, and VLAN behavior. */ + value = NIC_RCV_CFG_DFQ; + mask = NIC_RCV_CFG_DFQ_MASK; + if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) { + value |= NIC_RCV_CFG_RV; + mask |= (NIC_RCV_CFG_RV << 16); + } + ql_write32(qdev, NIC_RCV_CFG, (mask | value)); + + /* Set the MPI interrupt to enabled. */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); + + /* Enable the function, set pagesize, enable error checking. */ + value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND | + FSC_EC | FSC_VM_PAGE_4K; + value |= SPLT_SETTING; + + /* Set/clear header splitting. */ + mask = FSC_VM_PAGESIZE_MASK | + FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16); + ql_write32(qdev, FSC, mask | value); + + ql_write32(qdev, SPLT_HDR, SPLT_LEN); + + /* Set RX packet routing to use port/pci function on which the + * packet arrived on in addition to usual frame routing. + * This is helpful on bonding where both interfaces can have + * the same MAC address. + */ + ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); + /* Reroute all packets to our Interface. + * They may have been routed to MPI firmware + * due to WOL. + */ + value = ql_read32(qdev, MGMT_RCV_CFG); + value &= ~MGMT_RCV_CFG_RM; + mask = 0xffff0000; + + /* Sticky reg needs clearing due to WOL. */ + ql_write32(qdev, MGMT_RCV_CFG, mask); + ql_write32(qdev, MGMT_RCV_CFG, mask | value); + + /* Default WOL is enable on Mezz cards */ + if (qdev->pdev->subsystem_device == 0x0068 || + qdev->pdev->subsystem_device == 0x0180) + qdev->wol = WAKE_MAGIC; + + /* Start up the rx queues. */ + for (i = 0; i < qdev->rx_ring_count; i++) { + status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to start rx ring[%d].\n", i); + return status; + } + } + + /* If there is more than one inbound completion queue + * then download a RICB to configure RSS. + */ + if (qdev->rss_ring_count > 1) { + status = ql_start_rss(qdev); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n"); + return status; + } + } + + /* Start up the tx queues. */ + for (i = 0; i < qdev->tx_ring_count; i++) { + status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to start tx ring[%d].\n", i); + return status; + } + } + + /* Initialize the port and set the max framesize. */ + status = qdev->nic_ops->port_initialize(qdev); + if (status) + netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n"); + + /* Set up the MAC address and frame routing filter. */ + status = ql_cam_route_initialize(qdev); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init CAM/Routing tables.\n"); + return status; + } + + /* Start NAPI for the RSS queues. */ + for (i = 0; i < qdev->rss_ring_count; i++) + napi_enable(&qdev->rx_ring[i].napi); + + return status; +} + +/* Issue soft reset to chip. */ +static int ql_adapter_reset(struct ql_adapter *qdev) +{ + u32 value; + int status = 0; + unsigned long end_jiffies; + + /* Clear all the entries in the routing table. */ + status = ql_clear_routing_entries(qdev); + if (status) { + netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n"); + return status; + } + + end_jiffies = jiffies + + max((unsigned long)1, usecs_to_jiffies(30)); + + /* Check if bit is set then skip the mailbox command and + * clear the bit, else we are in normal reset process. + */ + if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) { + /* Stop management traffic. */ + ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP); + + /* Wait for the NIC and MGMNT FIFOs to empty. */ + ql_wait_fifo_empty(qdev); + } else + clear_bit(QL_ASIC_RECOVERY, &qdev->flags); + + ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); + + do { + value = ql_read32(qdev, RST_FO); + if ((value & RST_FO_FR) == 0) + break; + cpu_relax(); + } while (time_before(jiffies, end_jiffies)); + + if (value & RST_FO_FR) { + netif_err(qdev, ifdown, qdev->ndev, + "ETIMEDOUT!!! errored out of resetting the chip!\n"); + status = -ETIMEDOUT; + } + + /* Resume management traffic. */ + ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME); + return status; +} + +static void ql_display_dev_info(struct net_device *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + netif_info(qdev, probe, qdev->ndev, + "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, " + "XG Roll = %d, XG Rev = %d.\n", + qdev->func, + qdev->port, + qdev->chip_rev_id & 0x0000000f, + qdev->chip_rev_id >> 4 & 0x0000000f, + qdev->chip_rev_id >> 8 & 0x0000000f, + qdev->chip_rev_id >> 12 & 0x0000000f); + netif_info(qdev, probe, qdev->ndev, + "MAC address %pM\n", ndev->dev_addr); +} + +static int ql_wol(struct ql_adapter *qdev) +{ + int status = 0; + u32 wol = MB_WOL_DISABLE; + + /* The CAM is still intact after a reset, but if we + * are doing WOL, then we may need to program the + * routing regs. We would also need to issue the mailbox + * commands to instruct the MPI what to do per the ethtool + * settings. + */ + + if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST | + WAKE_MCAST | WAKE_BCAST)) { + netif_err(qdev, ifdown, qdev->ndev, + "Unsupported WOL parameter. qdev->wol = 0x%x.\n", + qdev->wol); + return -EINVAL; + } + + if (qdev->wol & WAKE_MAGIC) { + status = ql_mb_wol_set_magic(qdev, 1); + if (status) { + netif_err(qdev, ifdown, qdev->ndev, + "Failed to set magic packet on %s.\n", + qdev->ndev->name); + return status; + } else + netif_info(qdev, drv, qdev->ndev, + "Enabled magic packet successfully on %s.\n", + qdev->ndev->name); + + wol |= MB_WOL_MAGIC_PKT; + } + + if (qdev->wol) { + wol |= MB_WOL_MODE_ON; + status = ql_mb_wol_mode(qdev, wol); + netif_err(qdev, drv, qdev->ndev, + "WOL %s (wol code 0x%x) on %s\n", + (status == 0) ? "Successfully set" : "Failed", + wol, qdev->ndev->name); + } + + return status; +} + +static void ql_cancel_all_work_sync(struct ql_adapter *qdev) +{ + + /* Don't kill the reset worker thread if we + * are in the process of recovery. + */ + if (test_bit(QL_ADAPTER_UP, &qdev->flags)) + cancel_delayed_work_sync(&qdev->asic_reset_work); + cancel_delayed_work_sync(&qdev->mpi_reset_work); + cancel_delayed_work_sync(&qdev->mpi_work); + cancel_delayed_work_sync(&qdev->mpi_idc_work); + cancel_delayed_work_sync(&qdev->mpi_core_to_log); + cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); +} + +static int ql_adapter_down(struct ql_adapter *qdev) +{ + int i, status = 0; + + ql_link_off(qdev); + + ql_cancel_all_work_sync(qdev); + + for (i = 0; i < qdev->rss_ring_count; i++) + napi_disable(&qdev->rx_ring[i].napi); + + clear_bit(QL_ADAPTER_UP, &qdev->flags); + + ql_disable_interrupts(qdev); + + ql_tx_ring_clean(qdev); + + /* Call netif_napi_del() from common point. + */ + for (i = 0; i < qdev->rss_ring_count; i++) + netif_napi_del(&qdev->rx_ring[i].napi); + + status = ql_adapter_reset(qdev); + if (status) + netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n", + qdev->func); + ql_free_rx_buffers(qdev); + + return status; +} + +static int ql_adapter_up(struct ql_adapter *qdev) +{ + int err = 0; + + err = ql_adapter_initialize(qdev); + if (err) { + netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n"); + goto err_init; + } + set_bit(QL_ADAPTER_UP, &qdev->flags); + ql_alloc_rx_buffers(qdev); + /* If the port is initialized and the + * link is up the turn on the carrier. + */ + if ((ql_read32(qdev, STS) & qdev->port_init) && + (ql_read32(qdev, STS) & qdev->port_link_up)) + ql_link_on(qdev); + /* Restore rx mode. */ + clear_bit(QL_ALLMULTI, &qdev->flags); + clear_bit(QL_PROMISCUOUS, &qdev->flags); + qlge_set_multicast_list(qdev->ndev); + + /* Restore vlan setting. */ + qlge_restore_vlan(qdev); + + ql_enable_interrupts(qdev); + ql_enable_all_completion_interrupts(qdev); + netif_tx_start_all_queues(qdev->ndev); + + return 0; +err_init: + ql_adapter_reset(qdev); + return err; +} + +static void ql_release_adapter_resources(struct ql_adapter *qdev) +{ + ql_free_mem_resources(qdev); + ql_free_irq(qdev); +} + +static int ql_get_adapter_resources(struct ql_adapter *qdev) +{ + int status = 0; + + if (ql_alloc_mem_resources(qdev)) { + netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n"); + return -ENOMEM; + } + status = ql_request_irq(qdev); + return status; +} + +static int qlge_close(struct net_device *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + + /* If we hit pci_channel_io_perm_failure + * failure condition, then we already + * brought the adapter down. + */ + if (test_bit(QL_EEH_FATAL, &qdev->flags)) { + netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n"); + clear_bit(QL_EEH_FATAL, &qdev->flags); + return 0; + } + + /* + * Wait for device to recover from a reset. + * (Rarely happens, but possible.) + */ + while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) + msleep(1); + ql_adapter_down(qdev); + ql_release_adapter_resources(qdev); + return 0; +} + +static int ql_configure_rings(struct ql_adapter *qdev) +{ + int i; + struct rx_ring *rx_ring; + struct tx_ring *tx_ring; + int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus()); + unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ? + LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; + + qdev->lbq_buf_order = get_order(lbq_buf_len); + + /* In a perfect world we have one RSS ring for each CPU + * and each has it's own vector. To do that we ask for + * cpu_cnt vectors. ql_enable_msix() will adjust the + * vector count to what we actually get. We then + * allocate an RSS ring for each. + * Essentially, we are doing min(cpu_count, msix_vector_count). + */ + qdev->intr_count = cpu_cnt; + ql_enable_msix(qdev); + /* Adjust the RSS ring count to the actual vector count. */ + qdev->rss_ring_count = qdev->intr_count; + qdev->tx_ring_count = cpu_cnt; + qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count; + + for (i = 0; i < qdev->tx_ring_count; i++) { + tx_ring = &qdev->tx_ring[i]; + memset((void *)tx_ring, 0, sizeof(*tx_ring)); + tx_ring->qdev = qdev; + tx_ring->wq_id = i; + tx_ring->wq_len = qdev->tx_ring_size; + tx_ring->wq_size = + tx_ring->wq_len * sizeof(struct ob_mac_iocb_req); + + /* + * The completion queue ID for the tx rings start + * immediately after the rss rings. + */ + tx_ring->cq_id = qdev->rss_ring_count + i; + } + + for (i = 0; i < qdev->rx_ring_count; i++) { + rx_ring = &qdev->rx_ring[i]; + memset((void *)rx_ring, 0, sizeof(*rx_ring)); + rx_ring->qdev = qdev; + rx_ring->cq_id = i; + rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */ + if (i < qdev->rss_ring_count) { + /* + * Inbound (RSS) queues. + */ + rx_ring->cq_len = qdev->rx_ring_size; + rx_ring->cq_size = + rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); + rx_ring->lbq_len = NUM_LARGE_BUFFERS; + rx_ring->lbq_size = + rx_ring->lbq_len * sizeof(__le64); + rx_ring->lbq_buf_size = (u16)lbq_buf_len; + rx_ring->sbq_len = NUM_SMALL_BUFFERS; + rx_ring->sbq_size = + rx_ring->sbq_len * sizeof(__le64); + rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE; + rx_ring->type = RX_Q; + } else { + /* + * Outbound queue handles outbound completions only. + */ + /* outbound cq is same size as tx_ring it services. */ + rx_ring->cq_len = qdev->tx_ring_size; + rx_ring->cq_size = + rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); + rx_ring->lbq_len = 0; + rx_ring->lbq_size = 0; + rx_ring->lbq_buf_size = 0; + rx_ring->sbq_len = 0; + rx_ring->sbq_size = 0; + rx_ring->sbq_buf_size = 0; + rx_ring->type = TX_Q; + } + } + return 0; +} + +static int qlge_open(struct net_device *ndev) +{ + int err = 0; + struct ql_adapter *qdev = netdev_priv(ndev); + + err = ql_adapter_reset(qdev); + if (err) + return err; + + err = ql_configure_rings(qdev); + if (err) + return err; + + err = ql_get_adapter_resources(qdev); + if (err) + goto error_up; + + err = ql_adapter_up(qdev); + if (err) + goto error_up; + + return err; + +error_up: + ql_release_adapter_resources(qdev); + return err; +} + +static int ql_change_rx_buffers(struct ql_adapter *qdev) +{ + struct rx_ring *rx_ring; + int i, status; + u32 lbq_buf_len; + + /* Wait for an outstanding reset to complete. */ + if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { + int i = 3; + while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { + netif_err(qdev, ifup, qdev->ndev, + "Waiting for adapter UP...\n"); + ssleep(1); + } + + if (!i) { + netif_err(qdev, ifup, qdev->ndev, + "Timed out waiting for adapter UP\n"); + return -ETIMEDOUT; + } + } + + status = ql_adapter_down(qdev); + if (status) + goto error; + + /* Get the new rx buffer size. */ + lbq_buf_len = (qdev->ndev->mtu > 1500) ? + LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; + qdev->lbq_buf_order = get_order(lbq_buf_len); + + for (i = 0; i < qdev->rss_ring_count; i++) { + rx_ring = &qdev->rx_ring[i]; + /* Set the new size. */ + rx_ring->lbq_buf_size = lbq_buf_len; + } + + status = ql_adapter_up(qdev); + if (status) + goto error; + + return status; +error: + netif_alert(qdev, ifup, qdev->ndev, + "Driver up/down cycle failed, closing device.\n"); + set_bit(QL_ADAPTER_UP, &qdev->flags); + dev_close(qdev->ndev); + return status; +} + +static int qlge_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + int status; + + if (ndev->mtu == 1500 && new_mtu == 9000) { + netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n"); + } else if (ndev->mtu == 9000 && new_mtu == 1500) { + netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n"); + } else + return -EINVAL; + + queue_delayed_work(qdev->workqueue, + &qdev->mpi_port_cfg_work, 3*HZ); + + ndev->mtu = new_mtu; + + if (!netif_running(qdev->ndev)) { + return 0; + } + + status = ql_change_rx_buffers(qdev); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Changing MTU failed.\n"); + } + + return status; +} + +static struct net_device_stats *qlge_get_stats(struct net_device + *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + struct rx_ring *rx_ring = &qdev->rx_ring[0]; + struct tx_ring *tx_ring = &qdev->tx_ring[0]; + unsigned long pkts, mcast, dropped, errors, bytes; + int i; + + /* Get RX stats. */ + pkts = mcast = dropped = errors = bytes = 0; + for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { + pkts += rx_ring->rx_packets; + bytes += rx_ring->rx_bytes; + dropped += rx_ring->rx_dropped; + errors += rx_ring->rx_errors; + mcast += rx_ring->rx_multicast; + } + ndev->stats.rx_packets = pkts; + ndev->stats.rx_bytes = bytes; + ndev->stats.rx_dropped = dropped; + ndev->stats.rx_errors = errors; + ndev->stats.multicast = mcast; + + /* Get TX stats. */ + pkts = errors = bytes = 0; + for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) { + pkts += tx_ring->tx_packets; + bytes += tx_ring->tx_bytes; + errors += tx_ring->tx_errors; + } + ndev->stats.tx_packets = pkts; + ndev->stats.tx_bytes = bytes; + ndev->stats.tx_errors = errors; + return &ndev->stats; +} + +static void qlge_set_multicast_list(struct net_device *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + struct netdev_hw_addr *ha; + int i, status; + + status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); + if (status) + return; + /* + * Set or clear promiscuous mode if a + * transition is taking place. + */ + if (ndev->flags & IFF_PROMISC) { + if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) { + if (ql_set_routing_reg + (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) { + netif_err(qdev, hw, qdev->ndev, + "Failed to set promiscuous mode.\n"); + } else { + set_bit(QL_PROMISCUOUS, &qdev->flags); + } + } + } else { + if (test_bit(QL_PROMISCUOUS, &qdev->flags)) { + if (ql_set_routing_reg + (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) { + netif_err(qdev, hw, qdev->ndev, + "Failed to clear promiscuous mode.\n"); + } else { + clear_bit(QL_PROMISCUOUS, &qdev->flags); + } + } + } + + /* + * Set or clear all multicast mode if a + * transition is taking place. + */ + if ((ndev->flags & IFF_ALLMULTI) || + (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) { + if (!test_bit(QL_ALLMULTI, &qdev->flags)) { + if (ql_set_routing_reg + (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) { + netif_err(qdev, hw, qdev->ndev, + "Failed to set all-multi mode.\n"); + } else { + set_bit(QL_ALLMULTI, &qdev->flags); + } + } + } else { + if (test_bit(QL_ALLMULTI, &qdev->flags)) { + if (ql_set_routing_reg + (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) { + netif_err(qdev, hw, qdev->ndev, + "Failed to clear all-multi mode.\n"); + } else { + clear_bit(QL_ALLMULTI, &qdev->flags); + } + } + } + + if (!netdev_mc_empty(ndev)) { + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + goto exit; + i = 0; + netdev_for_each_mc_addr(ha, ndev) { + if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr, + MAC_ADDR_TYPE_MULTI_MAC, i)) { + netif_err(qdev, hw, qdev->ndev, + "Failed to loadmulticast address.\n"); + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + goto exit; + } + i++; + } + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + if (ql_set_routing_reg + (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) { + netif_err(qdev, hw, qdev->ndev, + "Failed to set multicast match mode.\n"); + } else { + set_bit(QL_ALLMULTI, &qdev->flags); + } + } +exit: + ql_sem_unlock(qdev, SEM_RT_IDX_MASK); +} + +static int qlge_set_mac_address(struct net_device *ndev, void *p) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + struct sockaddr *addr = p; + int status; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + /* Update local copy of current mac address. */ + memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); + + status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); + if (status) + return status; + status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, + MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); + if (status) + netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n"); + ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); + return status; +} + +static void qlge_tx_timeout(struct net_device *ndev) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + ql_queue_asic_error(qdev); +} + +static void ql_asic_reset_work(struct work_struct *work) +{ + struct ql_adapter *qdev = + container_of(work, struct ql_adapter, asic_reset_work.work); + int status; + rtnl_lock(); + status = ql_adapter_down(qdev); + if (status) + goto error; + + status = ql_adapter_up(qdev); + if (status) + goto error; + + /* Restore rx mode. */ + clear_bit(QL_ALLMULTI, &qdev->flags); + clear_bit(QL_PROMISCUOUS, &qdev->flags); + qlge_set_multicast_list(qdev->ndev); + + rtnl_unlock(); + return; +error: + netif_alert(qdev, ifup, qdev->ndev, + "Driver up/down cycle failed, closing device\n"); + + set_bit(QL_ADAPTER_UP, &qdev->flags); + dev_close(qdev->ndev); + rtnl_unlock(); +} + +static const struct nic_operations qla8012_nic_ops = { + .get_flash = ql_get_8012_flash_params, + .port_initialize = ql_8012_port_initialize, +}; + +static const struct nic_operations qla8000_nic_ops = { + .get_flash = ql_get_8000_flash_params, + .port_initialize = ql_8000_port_initialize, +}; + +/* Find the pcie function number for the other NIC + * on this chip. Since both NIC functions share a + * common firmware we have the lowest enabled function + * do any common work. Examples would be resetting + * after a fatal firmware error, or doing a firmware + * coredump. + */ +static int ql_get_alt_pcie_func(struct ql_adapter *qdev) +{ + int status = 0; + u32 temp; + u32 nic_func1, nic_func2; + + status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG, + &temp); + if (status) + return status; + + nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) & + MPI_TEST_NIC_FUNC_MASK); + nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) & + MPI_TEST_NIC_FUNC_MASK); + + if (qdev->func == nic_func1) + qdev->alt_func = nic_func2; + else if (qdev->func == nic_func2) + qdev->alt_func = nic_func1; + else + status = -EIO; + + return status; +} + +static int ql_get_board_info(struct ql_adapter *qdev) +{ + int status; + qdev->func = + (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT; + if (qdev->func > 3) + return -EIO; + + status = ql_get_alt_pcie_func(qdev); + if (status) + return status; + + qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1; + if (qdev->port) { + qdev->xg_sem_mask = SEM_XGMAC1_MASK; + qdev->port_link_up = STS_PL1; + qdev->port_init = STS_PI1; + qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI; + qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO; + } else { + qdev->xg_sem_mask = SEM_XGMAC0_MASK; + qdev->port_link_up = STS_PL0; + qdev->port_init = STS_PI0; + qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI; + qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO; + } + qdev->chip_rev_id = ql_read32(qdev, REV_ID); + qdev->device_id = qdev->pdev->device; + if (qdev->device_id == QLGE_DEVICE_ID_8012) + qdev->nic_ops = &qla8012_nic_ops; + else if (qdev->device_id == QLGE_DEVICE_ID_8000) + qdev->nic_ops = &qla8000_nic_ops; + return status; +} + +static void ql_release_all(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + + if (qdev->workqueue) { + destroy_workqueue(qdev->workqueue); + qdev->workqueue = NULL; + } + + if (qdev->reg_base) + iounmap(qdev->reg_base); + if (qdev->doorbell_area) + iounmap(qdev->doorbell_area); + vfree(qdev->mpi_coredump); + pci_release_regions(pdev); +} + +static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev, + int cards_found) +{ + struct ql_adapter *qdev = netdev_priv(ndev); + int err = 0; + + memset((void *)qdev, 0, sizeof(*qdev)); + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "PCI device enable failed.\n"); + return err; + } + + qdev->ndev = ndev; + qdev->pdev = pdev; + pci_set_drvdata(pdev, ndev); + + /* Set PCIe read request size */ + err = pcie_set_readrq(pdev, 4096); + if (err) { + dev_err(&pdev->dev, "Set readrq failed.\n"); + goto err_out1; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "PCI region request failed.\n"); + return err; + } + + pci_set_master(pdev); + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + set_bit(QL_DMA64, &qdev->flags); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + if (err) { + dev_err(&pdev->dev, "No usable DMA configuration.\n"); + goto err_out2; + } + + /* Set PCIe reset type for EEH to fundamental. */ + pdev->needs_freset = 1; + pci_save_state(pdev); + qdev->reg_base = + ioremap_nocache(pci_resource_start(pdev, 1), + pci_resource_len(pdev, 1)); + if (!qdev->reg_base) { + dev_err(&pdev->dev, "Register mapping failed.\n"); + err = -ENOMEM; + goto err_out2; + } + + qdev->doorbell_area_size = pci_resource_len(pdev, 3); + qdev->doorbell_area = + ioremap_nocache(pci_resource_start(pdev, 3), + pci_resource_len(pdev, 3)); + if (!qdev->doorbell_area) { + dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); + err = -ENOMEM; + goto err_out2; + } + + err = ql_get_board_info(qdev); + if (err) { + dev_err(&pdev->dev, "Register access failed.\n"); + err = -EIO; + goto err_out2; + } + qdev->msg_enable = netif_msg_init(debug, default_msg); + spin_lock_init(&qdev->hw_lock); + spin_lock_init(&qdev->stats_lock); + + if (qlge_mpi_coredump) { + qdev->mpi_coredump = + vmalloc(sizeof(struct ql_mpi_coredump)); + if (qdev->mpi_coredump == NULL) { + err = -ENOMEM; + goto err_out2; + } + if (qlge_force_coredump) + set_bit(QL_FRC_COREDUMP, &qdev->flags); + } + /* make sure the EEPROM is good */ + err = qdev->nic_ops->get_flash(qdev); + if (err) { + dev_err(&pdev->dev, "Invalid FLASH.\n"); + goto err_out2; + } + + /* Keep local copy of current mac address. */ + memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); + + /* Set up the default ring sizes. */ + qdev->tx_ring_size = NUM_TX_RING_ENTRIES; + qdev->rx_ring_size = NUM_RX_RING_ENTRIES; + + /* Set up the coalescing parameters. */ + qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT; + qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT; + qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; + qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; + + /* + * Set up the operating parameters. + */ + qdev->workqueue = create_singlethread_workqueue(ndev->name); + INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); + INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); + INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); + INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); + INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); + INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log); + init_completion(&qdev->ide_completion); + mutex_init(&qdev->mpi_mutex); + + if (!cards_found) { + dev_info(&pdev->dev, "%s\n", DRV_STRING); + dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n", + DRV_NAME, DRV_VERSION); + } + return 0; +err_out2: + ql_release_all(pdev); +err_out1: + pci_disable_device(pdev); + return err; +} + +static const struct net_device_ops qlge_netdev_ops = { + .ndo_open = qlge_open, + .ndo_stop = qlge_close, + .ndo_start_xmit = qlge_send, + .ndo_change_mtu = qlge_change_mtu, + .ndo_get_stats = qlge_get_stats, + .ndo_set_rx_mode = qlge_set_multicast_list, + .ndo_set_mac_address = qlge_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_tx_timeout = qlge_tx_timeout, + .ndo_fix_features = qlge_fix_features, + .ndo_set_features = qlge_set_features, + .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, +}; + +static void ql_timer(unsigned long data) +{ + struct ql_adapter *qdev = (struct ql_adapter *)data; + u32 var = 0; + + var = ql_read32(qdev, STS); + if (pci_channel_offline(qdev->pdev)) { + netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var); + return; + } + + mod_timer(&qdev->timer, jiffies + (5*HZ)); +} + +static int qlge_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_entry) +{ + struct net_device *ndev = NULL; + struct ql_adapter *qdev = NULL; + static int cards_found = 0; + int err = 0; + + ndev = alloc_etherdev_mq(sizeof(struct ql_adapter), + min(MAX_CPUS, netif_get_num_default_rss_queues())); + if (!ndev) + return -ENOMEM; + + err = ql_init_device(pdev, ndev, cards_found); + if (err < 0) { + free_netdev(ndev); + return err; + } + + qdev = netdev_priv(ndev); + SET_NETDEV_DEV(ndev, &pdev->dev); + ndev->hw_features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_RXCSUM; + ndev->features = ndev->hw_features; + ndev->vlan_features = ndev->hw_features; + /* vlan gets same features (except vlan filter) */ + ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX); + + if (test_bit(QL_DMA64, &qdev->flags)) + ndev->features |= NETIF_F_HIGHDMA; + + /* + * Set up net_device structure. + */ + ndev->tx_queue_len = qdev->tx_ring_size; + ndev->irq = pdev->irq; + + ndev->netdev_ops = &qlge_netdev_ops; + ndev->ethtool_ops = &qlge_ethtool_ops; + ndev->watchdog_timeo = 10 * HZ; + + err = register_netdev(ndev); + if (err) { + dev_err(&pdev->dev, "net device registration failed.\n"); + ql_release_all(pdev); + pci_disable_device(pdev); + free_netdev(ndev); + return err; + } + /* Start up the timer to trigger EEH if + * the bus goes dead + */ + init_timer_deferrable(&qdev->timer); + qdev->timer.data = (unsigned long)qdev; + qdev->timer.function = ql_timer; + qdev->timer.expires = jiffies + (5*HZ); + add_timer(&qdev->timer); + ql_link_off(qdev); + ql_display_dev_info(ndev); + atomic_set(&qdev->lb_count, 0); + cards_found++; + return 0; +} + +netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev) +{ + return qlge_send(skb, ndev); +} + +int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget) +{ + return ql_clean_inbound_rx_ring(rx_ring, budget); +} + +static void qlge_remove(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + del_timer_sync(&qdev->timer); + ql_cancel_all_work_sync(qdev); + unregister_netdev(ndev); + ql_release_all(pdev); + pci_disable_device(pdev); + free_netdev(ndev); +} + +/* Clean up resources without touching hardware. */ +static void ql_eeh_close(struct net_device *ndev) +{ + int i; + struct ql_adapter *qdev = netdev_priv(ndev); + + if (netif_carrier_ok(ndev)) { + netif_carrier_off(ndev); + netif_stop_queue(ndev); + } + + /* Disabling the timer */ + del_timer_sync(&qdev->timer); + ql_cancel_all_work_sync(qdev); + + for (i = 0; i < qdev->rss_ring_count; i++) + netif_napi_del(&qdev->rx_ring[i].napi); + + clear_bit(QL_ADAPTER_UP, &qdev->flags); + ql_tx_ring_clean(qdev); + ql_free_rx_buffers(qdev); + ql_release_adapter_resources(qdev); +} + +/* + * This callback is called by the PCI subsystem whenever + * a PCI bus error is detected. + */ +static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, + enum pci_channel_state state) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + + switch (state) { + case pci_channel_io_normal: + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + netif_device_detach(ndev); + if (netif_running(ndev)) + ql_eeh_close(ndev); + pci_disable_device(pdev); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + dev_err(&pdev->dev, + "%s: pci_channel_io_perm_failure.\n", __func__); + ql_eeh_close(ndev); + set_bit(QL_EEH_FATAL, &qdev->flags); + return PCI_ERS_RESULT_DISCONNECT; + } + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/* + * This callback is called after the PCI buss has been reset. + * Basically, this tries to restart the card from scratch. + * This is a shortened version of the device probe/discovery code, + * it resembles the first-half of the () routine. + */ +static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + + pdev->error_state = pci_channel_io_normal; + + pci_restore_state(pdev); + if (pci_enable_device(pdev)) { + netif_err(qdev, ifup, qdev->ndev, + "Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + if (ql_adapter_reset(qdev)) { + netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n"); + set_bit(QL_EEH_FATAL, &qdev->flags); + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_RECOVERED; +} + +static void qlge_io_resume(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + int err = 0; + + if (netif_running(ndev)) { + err = qlge_open(ndev); + if (err) { + netif_err(qdev, ifup, qdev->ndev, + "Device initialization failed after reset.\n"); + return; + } + } else { + netif_err(qdev, ifup, qdev->ndev, + "Device was not running prior to EEH.\n"); + } + mod_timer(&qdev->timer, jiffies + (5*HZ)); + netif_device_attach(ndev); +} + +static const struct pci_error_handlers qlge_err_handler = { + .error_detected = qlge_io_error_detected, + .slot_reset = qlge_io_slot_reset, + .resume = qlge_io_resume, +}; + +static int qlge_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + int err; + + netif_device_detach(ndev); + del_timer_sync(&qdev->timer); + + if (netif_running(ndev)) { + err = ql_adapter_down(qdev); + if (!err) + return err; + } + + ql_wol(qdev); + err = pci_save_state(pdev); + if (err) + return err; + + pci_disable_device(pdev); + + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +#ifdef CONFIG_PM +static int qlge_resume(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + struct ql_adapter *qdev = netdev_priv(ndev); + int err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + err = pci_enable_device(pdev); + if (err) { + netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (netif_running(ndev)) { + err = ql_adapter_up(qdev); + if (err) + return err; + } + + mod_timer(&qdev->timer, jiffies + (5*HZ)); + netif_device_attach(ndev); + + return 0; +} +#endif /* CONFIG_PM */ + +static void qlge_shutdown(struct pci_dev *pdev) +{ + qlge_suspend(pdev, PMSG_SUSPEND); +} + +static struct pci_driver qlge_driver = { + .name = DRV_NAME, + .id_table = qlge_pci_tbl, + .probe = qlge_probe, + .remove = qlge_remove, +#ifdef CONFIG_PM + .suspend = qlge_suspend, + .resume = qlge_resume, +#endif + .shutdown = qlge_shutdown, + .err_handler = &qlge_err_handler +}; + +module_pci_driver(qlge_driver); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c new file mode 100644 index 000000000..7ad146080 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c @@ -0,0 +1,1284 @@ +#include "qlge.h" + +int ql_unpause_mpi_risc(struct ql_adapter *qdev) +{ + u32 tmp; + + /* Un-pause the RISC */ + tmp = ql_read32(qdev, CSR); + if (!(tmp & CSR_RP)) + return -EIO; + + ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE); + return 0; +} + +int ql_pause_mpi_risc(struct ql_adapter *qdev) +{ + u32 tmp; + int count = UDELAY_COUNT; + + /* Pause the RISC */ + ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE); + do { + tmp = ql_read32(qdev, CSR); + if (tmp & CSR_RP) + break; + mdelay(UDELAY_DELAY); + count--; + } while (count); + return (count == 0) ? -ETIMEDOUT : 0; +} + +int ql_hard_reset_mpi_risc(struct ql_adapter *qdev) +{ + u32 tmp; + int count = UDELAY_COUNT; + + /* Reset the RISC */ + ql_write32(qdev, CSR, CSR_CMD_SET_RST); + do { + tmp = ql_read32(qdev, CSR); + if (tmp & CSR_RR) { + ql_write32(qdev, CSR, CSR_CMD_CLR_RST); + break; + } + mdelay(UDELAY_DELAY); + count--; + } while (count); + return (count == 0) ? -ETIMEDOUT : 0; +} + +int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data) +{ + int status; + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); + if (status) + goto exit; + /* set up for reg read */ + ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R); + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); + if (status) + goto exit; + /* get the data */ + *data = ql_read32(qdev, PROC_DATA); +exit: + return status; +} + +int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data) +{ + int status = 0; + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); + if (status) + goto exit; + /* write the data to the data reg */ + ql_write32(qdev, PROC_DATA, data); + /* trigger the write */ + ql_write32(qdev, PROC_ADDR, reg); + /* wait for reg to come ready */ + status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); + if (status) + goto exit; +exit: + return status; +} + +int ql_soft_reset_mpi_risc(struct ql_adapter *qdev) +{ + int status; + status = ql_write_mpi_reg(qdev, 0x00001010, 1); + return status; +} + +/* Determine if we are in charge of the firwmare. If + * we are the lower of the 2 NIC pcie functions, or if + * we are the higher function and the lower function + * is not enabled. + */ +int ql_own_firmware(struct ql_adapter *qdev) +{ + u32 temp; + + /* If we are the lower of the 2 NIC functions + * on the chip the we are responsible for + * core dump and firmware reset after an error. + */ + if (qdev->func < qdev->alt_func) + return 1; + + /* If we are the higher of the 2 NIC functions + * on the chip and the lower function is not + * enabled, then we are responsible for + * core dump and firmware reset after an error. + */ + temp = ql_read32(qdev, STS); + if (!(temp & (1 << (8 + qdev->alt_func)))) + return 1; + + return 0; + +} + +static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int i, status; + + status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); + if (status) + return -EBUSY; + for (i = 0; i < mbcp->out_count; i++) { + status = + ql_read_mpi_reg(qdev, qdev->mailbox_out + i, + &mbcp->mbox_out[i]); + if (status) { + netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n"); + break; + } + } + ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ + return status; +} + +/* Wait for a single mailbox command to complete. + * Returns zero on success. + */ +static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev) +{ + int count = 100; + u32 value; + + do { + value = ql_read32(qdev, STS); + if (value & STS_PI) + return 0; + mdelay(UDELAY_DELAY); /* 100ms */ + } while (--count); + return -ETIMEDOUT; +} + +/* Execute a single mailbox command. + * Caller must hold PROC_ADDR semaphore. + */ +static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int i, status; + + /* + * Make sure there's nothing pending. + * This shouldn't happen. + */ + if (ql_read32(qdev, CSR) & CSR_HRI) + return -EIO; + + status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); + if (status) + return status; + + /* + * Fill the outbound mailboxes. + */ + for (i = 0; i < mbcp->in_count; i++) { + status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i, + mbcp->mbox_in[i]); + if (status) + goto end; + } + /* + * Wake up the MPI firmware. + */ + ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT); +end: + ql_sem_unlock(qdev, SEM_PROC_REG_MASK); + return status; +} + +/* We are being asked by firmware to accept + * a change to the port. This is only + * a change to max frame sizes (Tx/Rx), pause + * parameters, or loopback mode. We wake up a worker + * to handler processing this since a mailbox command + * will need to be sent to ACK the request. + */ +static int ql_idc_req_aen(struct ql_adapter *qdev) +{ + int status; + struct mbox_params *mbcp = &qdev->idc_mbc; + + netif_err(qdev, drv, qdev->ndev, "Enter!\n"); + /* Get the status data and start up a thread to + * handle the request. + */ + mbcp = &qdev->idc_mbc; + mbcp->out_count = 4; + status = ql_get_mb_sts(qdev, mbcp); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Could not read MPI, resetting ASIC!\n"); + ql_queue_asic_error(qdev); + } else { + /* Begin polled mode early so + * we don't get another interrupt + * when we leave mpi_worker. + */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); + queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0); + } + return status; +} + +/* Process an inter-device event completion. + * If good, signal the caller's completion. + */ +static int ql_idc_cmplt_aen(struct ql_adapter *qdev) +{ + int status; + struct mbox_params *mbcp = &qdev->idc_mbc; + mbcp->out_count = 4; + status = ql_get_mb_sts(qdev, mbcp); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Could not read MPI, resetting RISC!\n"); + ql_queue_fw_error(qdev); + } else + /* Wake up the sleeping mpi_idc_work thread that is + * waiting for this event. + */ + complete(&qdev->ide_completion); + + return status; +} + +static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + mbcp->out_count = 2; + + status = ql_get_mb_sts(qdev, mbcp); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "%s: Could not get mailbox status.\n", __func__); + return; + } + + qdev->link_status = mbcp->mbox_out[1]; + netif_err(qdev, drv, qdev->ndev, "Link Up.\n"); + + /* If we're coming back from an IDC event + * then set up the CAM and frame routing. + */ + if (test_bit(QL_CAM_RT_SET, &qdev->flags)) { + status = ql_cam_route_initialize(qdev); + if (status) { + netif_err(qdev, ifup, qdev->ndev, + "Failed to init CAM/Routing tables.\n"); + return; + } else + clear_bit(QL_CAM_RT_SET, &qdev->flags); + } + + /* Queue up a worker to check the frame + * size information, and fix it if it's not + * to our liking. + */ + if (!test_bit(QL_PORT_CFG, &qdev->flags)) { + netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n"); + set_bit(QL_PORT_CFG, &qdev->flags); + /* Begin polled mode early so + * we don't get another interrupt + * when we leave mpi_worker dpc. + */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); + queue_delayed_work(qdev->workqueue, + &qdev->mpi_port_cfg_work, 0); + } + + ql_link_on(qdev); +} + +static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + + mbcp->out_count = 3; + + status = ql_get_mb_sts(qdev, mbcp); + if (status) + netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n"); + + ql_link_off(qdev); +} + +static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + + mbcp->out_count = 5; + + status = ql_get_mb_sts(qdev, mbcp); + if (status) + netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n"); + else + netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n"); + + return status; +} + +static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + + mbcp->out_count = 1; + + status = ql_get_mb_sts(qdev, mbcp); + if (status) + netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n"); + else + netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n"); + + return status; +} + +static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + + mbcp->out_count = 6; + + status = ql_get_mb_sts(qdev, mbcp); + if (status) + netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n"); + else { + int i; + netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n"); + for (i = 0; i < mbcp->out_count; i++) + netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n", + i, mbcp->mbox_out[i]); + + } + + return status; +} + +static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + + mbcp->out_count = 2; + + status = ql_get_mb_sts(qdev, mbcp); + if (status) { + netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n"); + } else { + netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n", + mbcp->mbox_out[1]); + qdev->fw_rev_id = mbcp->mbox_out[1]; + status = ql_cam_route_initialize(qdev); + if (status) + netif_err(qdev, ifup, qdev->ndev, + "Failed to init CAM/Routing tables.\n"); + } +} + +/* Process an async event and clear it unless it's an + * error condition. + * This can get called iteratively from the mpi_work thread + * when events arrive via an interrupt. + * It also gets called when a mailbox command is polling for + * it's completion. */ +static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + int orig_count = mbcp->out_count; + + /* Just get mailbox zero for now. */ + mbcp->out_count = 1; + status = ql_get_mb_sts(qdev, mbcp); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Could not read MPI, resetting ASIC!\n"); + ql_queue_asic_error(qdev); + goto end; + } + + switch (mbcp->mbox_out[0]) { + + /* This case is only active when we arrive here + * as a result of issuing a mailbox command to + * the firmware. + */ + case MB_CMD_STS_INTRMDT: + case MB_CMD_STS_GOOD: + case MB_CMD_STS_INVLD_CMD: + case MB_CMD_STS_XFC_ERR: + case MB_CMD_STS_CSUM_ERR: + case MB_CMD_STS_ERR: + case MB_CMD_STS_PARAM_ERR: + /* We can only get mailbox status if we're polling from an + * unfinished command. Get the rest of the status data and + * return back to the caller. + * We only end up here when we're polling for a mailbox + * command completion. + */ + mbcp->out_count = orig_count; + status = ql_get_mb_sts(qdev, mbcp); + return status; + + /* We are being asked by firmware to accept + * a change to the port. This is only + * a change to max frame sizes (Tx/Rx), pause + * parameters, or loopback mode. + */ + case AEN_IDC_REQ: + status = ql_idc_req_aen(qdev); + break; + + /* Process and inbound IDC event. + * This will happen when we're trying to + * change tx/rx max frame size, change pause + * parameters or loopback mode. + */ + case AEN_IDC_CMPLT: + case AEN_IDC_EXT: + status = ql_idc_cmplt_aen(qdev); + break; + + case AEN_LINK_UP: + ql_link_up(qdev, mbcp); + break; + + case AEN_LINK_DOWN: + ql_link_down(qdev, mbcp); + break; + + case AEN_FW_INIT_DONE: + /* If we're in process on executing the firmware, + * then convert the status to normal mailbox status. + */ + if (mbcp->mbox_in[0] == MB_CMD_EX_FW) { + mbcp->out_count = orig_count; + status = ql_get_mb_sts(qdev, mbcp); + mbcp->mbox_out[0] = MB_CMD_STS_GOOD; + return status; + } + ql_init_fw_done(qdev, mbcp); + break; + + case AEN_AEN_SFP_IN: + ql_sfp_in(qdev, mbcp); + break; + + case AEN_AEN_SFP_OUT: + ql_sfp_out(qdev, mbcp); + break; + + /* This event can arrive at boot time or after an + * MPI reset if the firmware failed to initialize. + */ + case AEN_FW_INIT_FAIL: + /* If we're in process on executing the firmware, + * then convert the status to normal mailbox status. + */ + if (mbcp->mbox_in[0] == MB_CMD_EX_FW) { + mbcp->out_count = orig_count; + status = ql_get_mb_sts(qdev, mbcp); + mbcp->mbox_out[0] = MB_CMD_STS_ERR; + return status; + } + netif_err(qdev, drv, qdev->ndev, + "Firmware initialization failed.\n"); + status = -EIO; + ql_queue_fw_error(qdev); + break; + + case AEN_SYS_ERR: + netif_err(qdev, drv, qdev->ndev, "System Error.\n"); + ql_queue_fw_error(qdev); + status = -EIO; + break; + + case AEN_AEN_LOST: + ql_aen_lost(qdev, mbcp); + break; + + case AEN_DCBX_CHG: + /* Need to support AEN 8110 */ + break; + default: + netif_err(qdev, drv, qdev->ndev, + "Unsupported AE %.08x.\n", mbcp->mbox_out[0]); + /* Clear the MPI firmware status. */ + } +end: + ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); + /* Restore the original mailbox count to + * what the caller asked for. This can get + * changed when a mailbox command is waiting + * for a response and an AEN arrives and + * is handled. + * */ + mbcp->out_count = orig_count; + return status; +} + +/* Execute a single mailbox command. + * mbcp is a pointer to an array of u32. Each + * element in the array contains the value for it's + * respective mailbox register. + */ +static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) +{ + int status; + unsigned long count; + + mutex_lock(&qdev->mpi_mutex); + + /* Begin polled mode for MPI */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); + + /* Load the mailbox registers and wake up MPI RISC. */ + status = ql_exec_mb_cmd(qdev, mbcp); + if (status) + goto end; + + + /* If we're generating a system error, then there's nothing + * to wait for. + */ + if (mbcp->mbox_in[0] == MB_CMD_MAKE_SYS_ERR) + goto end; + + /* Wait for the command to complete. We loop + * here because some AEN might arrive while + * we're waiting for the mailbox command to + * complete. If more than 5 seconds expire we can + * assume something is wrong. */ + count = jiffies + HZ * MAILBOX_TIMEOUT; + do { + /* Wait for the interrupt to come in. */ + status = ql_wait_mbx_cmd_cmplt(qdev); + if (status) + continue; + + /* Process the event. If it's an AEN, it + * will be handled in-line or a worker + * will be spawned. If it's our completion + * we will catch it below. + */ + status = ql_mpi_handler(qdev, mbcp); + if (status) + goto end; + + /* It's either the completion for our mailbox + * command complete or an AEN. If it's our + * completion then get out. + */ + if (((mbcp->mbox_out[0] & 0x0000f000) == + MB_CMD_STS_GOOD) || + ((mbcp->mbox_out[0] & 0x0000f000) == + MB_CMD_STS_INTRMDT)) + goto done; + } while (time_before(jiffies, count)); + + netif_err(qdev, drv, qdev->ndev, + "Timed out waiting for mailbox complete.\n"); + status = -ETIMEDOUT; + goto end; + +done: + + /* Now we can clear the interrupt condition + * and look at our status. + */ + ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); + + if (((mbcp->mbox_out[0] & 0x0000f000) != + MB_CMD_STS_GOOD) && + ((mbcp->mbox_out[0] & 0x0000f000) != + MB_CMD_STS_INTRMDT)) { + status = -EIO; + } +end: + /* End polled mode for MPI */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); + mutex_unlock(&qdev->mpi_mutex); + return status; +} + +/* Get MPI firmware version. This will be used for + * driver banner and for ethtool info. + * Returns zero on success. + */ +int ql_mb_about_fw(struct ql_adapter *qdev) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status = 0; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 1; + mbcp->out_count = 3; + + mbcp->mbox_in[0] = MB_CMD_ABOUT_FW; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, + "Failed about firmware command\n"); + status = -EIO; + } + + /* Store the firmware version */ + qdev->fw_rev_id = mbcp->mbox_out[1]; + + return status; +} + +/* Get functional state for MPI firmware. + * Returns zero on success. + */ +int ql_mb_get_fw_state(struct ql_adapter *qdev) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status = 0; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 1; + mbcp->out_count = 2; + + mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, + "Failed Get Firmware State.\n"); + status = -EIO; + } + + /* If bit zero is set in mbx 1 then the firmware is + * running, but not initialized. This should never + * happen. + */ + if (mbcp->mbox_out[1] & 1) { + netif_err(qdev, drv, qdev->ndev, + "Firmware waiting for initialization.\n"); + status = -EIO; + } + + return status; +} + +/* Send and ACK mailbox command to the firmware to + * let it continue with the change. + */ +static int ql_mb_idc_ack(struct ql_adapter *qdev) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status = 0; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 5; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_IDC_ACK; + mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1]; + mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2]; + mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3]; + mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4]; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n"); + status = -EIO; + } + return status; +} + +/* Get link settings and maximum frame size settings + * for the current port. + * Most likely will block. + */ +int ql_mb_set_port_cfg(struct ql_adapter *qdev) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status = 0; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 3; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_SET_PORT_CFG; + mbcp->mbox_in[1] = qdev->link_config; + mbcp->mbox_in[2] = qdev->max_frame_size; + + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) { + netif_err(qdev, drv, qdev->ndev, + "Port Config sent, wait for IDC.\n"); + } else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, + "Failed Set Port Configuration.\n"); + status = -EIO; + } + return status; +} + +static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr, + u32 size) +{ + int status = 0; + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 9; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM; + mbcp->mbox_in[1] = LSW(addr); + mbcp->mbox_in[2] = MSW(req_dma); + mbcp->mbox_in[3] = LSW(req_dma); + mbcp->mbox_in[4] = MSW(size); + mbcp->mbox_in[5] = LSW(size); + mbcp->mbox_in[6] = MSW(MSD(req_dma)); + mbcp->mbox_in[7] = LSW(MSD(req_dma)); + mbcp->mbox_in[8] = MSW(addr); + + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n"); + status = -EIO; + } + return status; +} + +/* Issue a mailbox command to dump RISC RAM. */ +int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, + u32 ram_addr, int word_count) +{ + int status; + char *my_buf; + dma_addr_t buf_dma; + + my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32), + &buf_dma); + if (!my_buf) + return -EIO; + + status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count); + if (!status) + memcpy(buf, my_buf, word_count * sizeof(u32)); + + pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf, + buf_dma); + return status; +} + +/* Get link settings and maximum frame size settings + * for the current port. + * Most likely will block. + */ +int ql_mb_get_port_cfg(struct ql_adapter *qdev) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status = 0; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 1; + mbcp->out_count = 3; + + mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, + "Failed Get Port Configuration.\n"); + status = -EIO; + } else { + netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, + "Passed Get Port Configuration.\n"); + qdev->link_config = mbcp->mbox_out[1]; + qdev->max_frame_size = mbcp->mbox_out[2]; + } + return status; +} + +int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 2; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE; + mbcp->mbox_in[1] = wol; + + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n"); + status = -EIO; + } + return status; +} + +int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + u8 *addr = qdev->ndev->dev_addr; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 8; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC; + if (enable_wol) { + mbcp->mbox_in[1] = (u32)addr[0]; + mbcp->mbox_in[2] = (u32)addr[1]; + mbcp->mbox_in[3] = (u32)addr[2]; + mbcp->mbox_in[4] = (u32)addr[3]; + mbcp->mbox_in[5] = (u32)addr[4]; + mbcp->mbox_in[6] = (u32)addr[5]; + mbcp->mbox_in[7] = 0; + } else { + mbcp->mbox_in[1] = 0; + mbcp->mbox_in[2] = 1; + mbcp->mbox_in[3] = 1; + mbcp->mbox_in[4] = 1; + mbcp->mbox_in[5] = 1; + mbcp->mbox_in[6] = 1; + mbcp->mbox_in[7] = 0; + } + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n"); + status = -EIO; + } + return status; +} + +/* IDC - Inter Device Communication... + * Some firmware commands require consent of adjacent FCOE + * function. This function waits for the OK, or a + * counter-request for a little more time.i + * The firmware will complete the request if the other + * function doesn't respond. + */ +static int ql_idc_wait(struct ql_adapter *qdev) +{ + int status = -ETIMEDOUT; + long wait_time = 1 * HZ; + struct mbox_params *mbcp = &qdev->idc_mbc; + do { + /* Wait here for the command to complete + * via the IDC process. + */ + wait_time = + wait_for_completion_timeout(&qdev->ide_completion, + wait_time); + if (!wait_time) { + netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n"); + break; + } + /* Now examine the response from the IDC process. + * We might have a good completion or a request for + * more wait time. + */ + if (mbcp->mbox_out[0] == AEN_IDC_EXT) { + netif_err(qdev, drv, qdev->ndev, + "IDC Time Extension from function.\n"); + wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f; + } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) { + netif_err(qdev, drv, qdev->ndev, "IDC Success.\n"); + status = 0; + break; + } else { + netif_err(qdev, drv, qdev->ndev, + "IDC: Invalid State 0x%.04x.\n", + mbcp->mbox_out[0]); + status = -EIO; + break; + } + } while (wait_time); + + return status; +} + +int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 2; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG; + mbcp->mbox_in[1] = led_config; + + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, + "Failed to set LED Configuration.\n"); + status = -EIO; + } + + return status; +} + +int ql_mb_get_led_cfg(struct ql_adapter *qdev) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 1; + mbcp->out_count = 2; + + mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) { + netif_err(qdev, drv, qdev->ndev, + "Failed to get LED Configuration.\n"); + status = -EIO; + } else + qdev->led_config = mbcp->mbox_out[1]; + + return status; +} + +int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 1; + mbcp->out_count = 2; + + mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL; + mbcp->mbox_in[1] = control; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) + return status; + + if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { + netif_err(qdev, drv, qdev->ndev, + "Command not supported by firmware.\n"); + status = -EINVAL; + } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { + /* This indicates that the firmware is + * already in the state we are trying to + * change it to. + */ + netif_err(qdev, drv, qdev->ndev, + "Command parameters make no change.\n"); + } + return status; +} + +/* Returns a negative error code or the mailbox command status. */ +static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + *control = 0; + + mbcp->in_count = 1; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) { + *control = mbcp->mbox_in[1]; + return status; + } + + if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { + netif_err(qdev, drv, qdev->ndev, + "Command not supported by firmware.\n"); + status = -EINVAL; + } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { + netif_err(qdev, drv, qdev->ndev, + "Failed to get MPI traffic control.\n"); + status = -EIO; + } + return status; +} + +int ql_wait_fifo_empty(struct ql_adapter *qdev) +{ + int count = 5; + u32 mgmnt_fifo_empty; + u32 nic_fifo_empty; + + do { + nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE; + ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty); + mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY; + if (nic_fifo_empty && mgmnt_fifo_empty) + return 0; + msleep(100); + } while (count-- > 0); + return -ETIMEDOUT; +} + +/* API called in work thread context to set new TX/RX + * maximum frame size values to match MTU. + */ +static int ql_set_port_cfg(struct ql_adapter *qdev) +{ + int status; + status = ql_mb_set_port_cfg(qdev); + if (status) + return status; + status = ql_idc_wait(qdev); + return status; +} + +/* The following routines are worker threads that process + * events that may sleep waiting for completion. + */ + +/* This thread gets the maximum TX and RX frame size values + * from the firmware and, if necessary, changes them to match + * the MTU setting. + */ +void ql_mpi_port_cfg_work(struct work_struct *work) +{ + struct ql_adapter *qdev = + container_of(work, struct ql_adapter, mpi_port_cfg_work.work); + int status; + + status = ql_mb_get_port_cfg(qdev); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Bug: Failed to get port config data.\n"); + goto err; + } + + if (qdev->link_config & CFG_JUMBO_FRAME_SIZE && + qdev->max_frame_size == + CFG_DEFAULT_MAX_FRAME_SIZE) + goto end; + + qdev->link_config |= CFG_JUMBO_FRAME_SIZE; + qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE; + status = ql_set_port_cfg(qdev); + if (status) { + netif_err(qdev, drv, qdev->ndev, + "Bug: Failed to set port config data.\n"); + goto err; + } +end: + clear_bit(QL_PORT_CFG, &qdev->flags); + return; +err: + ql_queue_fw_error(qdev); + goto end; +} + +/* Process an inter-device request. This is issues by + * the firmware in response to another function requesting + * a change to the port. We set a flag to indicate a change + * has been made and then send a mailbox command ACKing + * the change request. + */ +void ql_mpi_idc_work(struct work_struct *work) +{ + struct ql_adapter *qdev = + container_of(work, struct ql_adapter, mpi_idc_work.work); + int status; + struct mbox_params *mbcp = &qdev->idc_mbc; + u32 aen; + int timeout; + + aen = mbcp->mbox_out[1] >> 16; + timeout = (mbcp->mbox_out[1] >> 8) & 0xf; + + switch (aen) { + default: + netif_err(qdev, drv, qdev->ndev, + "Bug: Unhandled IDC action.\n"); + break; + case MB_CMD_PORT_RESET: + case MB_CMD_STOP_FW: + ql_link_off(qdev); + case MB_CMD_SET_PORT_CFG: + /* Signal the resulting link up AEN + * that the frame routing and mac addr + * needs to be set. + * */ + set_bit(QL_CAM_RT_SET, &qdev->flags); + /* Do ACK if required */ + if (timeout) { + status = ql_mb_idc_ack(qdev); + if (status) + netif_err(qdev, drv, qdev->ndev, + "Bug: No pending IDC!\n"); + } else { + netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, + "IDC ACK not required\n"); + status = 0; /* success */ + } + break; + + /* These sub-commands issued by another (FCoE) + * function are requesting to do an operation + * on the shared resource (MPI environment). + * We currently don't issue these so we just + * ACK the request. + */ + case MB_CMD_IOP_RESTART_MPI: + case MB_CMD_IOP_PREP_LINK_DOWN: + /* Drop the link, reload the routing + * table when link comes up. + */ + ql_link_off(qdev); + set_bit(QL_CAM_RT_SET, &qdev->flags); + /* Fall through. */ + case MB_CMD_IOP_DVR_START: + case MB_CMD_IOP_FLASH_ACC: + case MB_CMD_IOP_CORE_DUMP_MPI: + case MB_CMD_IOP_PREP_UPDATE_MPI: + case MB_CMD_IOP_COMP_UPDATE_MPI: + case MB_CMD_IOP_NONE: /* an IDC without params */ + /* Do ACK if required */ + if (timeout) { + status = ql_mb_idc_ack(qdev); + if (status) + netif_err(qdev, drv, qdev->ndev, + "Bug: No pending IDC!\n"); + } else { + netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev, + "IDC ACK not required\n"); + status = 0; /* success */ + } + break; + } +} + +void ql_mpi_work(struct work_struct *work) +{ + struct ql_adapter *qdev = + container_of(work, struct ql_adapter, mpi_work.work); + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int err = 0; + + mutex_lock(&qdev->mpi_mutex); + /* Begin polled mode for MPI */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); + + while (ql_read32(qdev, STS) & STS_PI) { + memset(mbcp, 0, sizeof(struct mbox_params)); + mbcp->out_count = 1; + /* Don't continue if an async event + * did not complete properly. + */ + err = ql_mpi_handler(qdev, mbcp); + if (err) + break; + } + + /* End polled mode for MPI */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); + mutex_unlock(&qdev->mpi_mutex); + ql_enable_completion_interrupt(qdev, 0); +} + +void ql_mpi_reset_work(struct work_struct *work) +{ + struct ql_adapter *qdev = + container_of(work, struct ql_adapter, mpi_reset_work.work); + cancel_delayed_work_sync(&qdev->mpi_work); + cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); + cancel_delayed_work_sync(&qdev->mpi_idc_work); + /* If we're not the dominant NIC function, + * then there is nothing to do. + */ + if (!ql_own_firmware(qdev)) { + netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n"); + return; + } + + if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) { + netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n"); + qdev->core_is_dumped = 1; + queue_delayed_work(qdev->workqueue, + &qdev->mpi_core_to_log, 5 * HZ); + } + ql_soft_reset_mpi_risc(qdev); +} diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig new file mode 100644 index 000000000..9a49f42ac --- /dev/null +++ b/drivers/net/ethernet/qualcomm/Kconfig @@ -0,0 +1,29 @@ +# +# Qualcomm network device configuration +# + +config NET_VENDOR_QUALCOMM + bool "Qualcomm devices" + default y + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Qualcomm cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_QUALCOMM + +config QCA7000 + tristate "Qualcomm Atheros QCA7000 support" + depends on SPI_MASTER && OF + ---help--- + This SPI protocol driver supports the Qualcomm Atheros QCA7000. + + To compile this driver as a module, choose M here. The module + will be called qcaspi. + +endif # NET_VENDOR_QUALCOMM diff --git a/drivers/net/ethernet/qualcomm/Makefile b/drivers/net/ethernet/qualcomm/Makefile new file mode 100644 index 000000000..9da2d75db --- /dev/null +++ b/drivers/net/ethernet/qualcomm/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the Qualcomm network device drivers. +# + +obj-$(CONFIG_QCA7000) += qcaspi.o +qcaspi-objs := qca_spi.o qca_framing.o qca_7k.o qca_debug.o diff --git a/drivers/net/ethernet/qualcomm/qca_7k.c b/drivers/net/ethernet/qualcomm/qca_7k.c new file mode 100644 index 000000000..f0066fbb4 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/qca_7k.c @@ -0,0 +1,149 @@ +/* + * + * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. + * Copyright (c) 2014, I2SE GmbH + * + * Permission to use, copy, modify, and/or distribute this software + * for any purpose with or without fee is hereby granted, provided + * that the above copyright notice and this permission notice appear + * in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/* This module implements the Qualcomm Atheros SPI protocol for + * kernel-based SPI device. + */ + +#include +#include +#include +#include +#include + +#include "qca_7k.h" + +void +qcaspi_spi_error(struct qcaspi *qca) +{ + if (qca->sync != QCASPI_SYNC_READY) + return; + + netdev_err(qca->net_dev, "spi error\n"); + qca->sync = QCASPI_SYNC_UNKNOWN; + qca->stats.spi_err++; +} + +int +qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result) +{ + __be16 rx_data; + __be16 tx_data; + struct spi_transfer *transfer; + struct spi_message *msg; + int ret; + + tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg); + + if (qca->legacy_mode) { + msg = &qca->spi_msg1; + transfer = &qca->spi_xfer1; + transfer->tx_buf = &tx_data; + transfer->rx_buf = NULL; + transfer->len = QCASPI_CMD_LEN; + spi_sync(qca->spi_dev, msg); + } else { + msg = &qca->spi_msg2; + transfer = &qca->spi_xfer2[0]; + transfer->tx_buf = &tx_data; + transfer->rx_buf = NULL; + transfer->len = QCASPI_CMD_LEN; + transfer = &qca->spi_xfer2[1]; + } + transfer->tx_buf = NULL; + transfer->rx_buf = &rx_data; + transfer->len = QCASPI_CMD_LEN; + ret = spi_sync(qca->spi_dev, msg); + + if (!ret) + ret = msg->status; + + if (ret) + qcaspi_spi_error(qca); + else + *result = be16_to_cpu(rx_data); + + return ret; +} + +int +qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value) +{ + __be16 tx_data[2]; + struct spi_transfer *transfer; + struct spi_message *msg; + int ret; + + tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg); + tx_data[1] = cpu_to_be16(value); + + if (qca->legacy_mode) { + msg = &qca->spi_msg1; + transfer = &qca->spi_xfer1; + transfer->tx_buf = &tx_data[0]; + transfer->rx_buf = NULL; + transfer->len = QCASPI_CMD_LEN; + spi_sync(qca->spi_dev, msg); + } else { + msg = &qca->spi_msg2; + transfer = &qca->spi_xfer2[0]; + transfer->tx_buf = &tx_data[0]; + transfer->rx_buf = NULL; + transfer->len = QCASPI_CMD_LEN; + transfer = &qca->spi_xfer2[1]; + } + transfer->tx_buf = &tx_data[1]; + transfer->rx_buf = NULL; + transfer->len = QCASPI_CMD_LEN; + ret = spi_sync(qca->spi_dev, msg); + + if (!ret) + ret = msg->status; + + if (ret) + qcaspi_spi_error(qca); + + return ret; +} + +int +qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd) +{ + __be16 tx_data; + struct spi_message *msg = &qca->spi_msg1; + struct spi_transfer *transfer = &qca->spi_xfer1; + int ret; + + tx_data = cpu_to_be16(cmd); + transfer->len = sizeof(tx_data); + transfer->tx_buf = &tx_data; + transfer->rx_buf = NULL; + + ret = spi_sync(qca->spi_dev, msg); + + if (!ret) + ret = msg->status; + + if (ret) + qcaspi_spi_error(qca); + + return ret; +} diff --git a/drivers/net/ethernet/qualcomm/qca_7k.h b/drivers/net/ethernet/qualcomm/qca_7k.h new file mode 100644 index 000000000..1cad851ee --- /dev/null +++ b/drivers/net/ethernet/qualcomm/qca_7k.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. + * Copyright (c) 2014, I2SE GmbH + * + * Permission to use, copy, modify, and/or distribute this software + * for any purpose with or without fee is hereby granted, provided + * that the above copyright notice and this permission notice appear + * in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/* Qualcomm Atheros SPI register definition. + * + * This module is designed to define the Qualcomm Atheros SPI + * register placeholders. + */ + +#ifndef _QCA_7K_H +#define _QCA_7K_H + +#include + +#include "qca_spi.h" + +#define QCA7K_SPI_READ (1 << 15) +#define QCA7K_SPI_WRITE (0 << 15) +#define QCA7K_SPI_INTERNAL (1 << 14) +#define QCA7K_SPI_EXTERNAL (0 << 14) + +#define QCASPI_CMD_LEN 2 +#define QCASPI_HW_PKT_LEN 4 +#define QCASPI_HW_BUF_LEN 0xC5B + +/* SPI registers; */ +#define SPI_REG_BFR_SIZE 0x0100 +#define SPI_REG_WRBUF_SPC_AVA 0x0200 +#define SPI_REG_RDBUF_BYTE_AVA 0x0300 +#define SPI_REG_SPI_CONFIG 0x0400 +#define SPI_REG_SPI_STATUS 0x0500 +#define SPI_REG_INTR_CAUSE 0x0C00 +#define SPI_REG_INTR_ENABLE 0x0D00 +#define SPI_REG_RDBUF_WATERMARK 0x1200 +#define SPI_REG_WRBUF_WATERMARK 0x1300 +#define SPI_REG_SIGNATURE 0x1A00 +#define SPI_REG_ACTION_CTRL 0x1B00 + +/* SPI_CONFIG register definition; */ +#define QCASPI_SLAVE_RESET_BIT (1 << 6) + +/* INTR_CAUSE/ENABLE register definition. */ +#define SPI_INT_WRBUF_BELOW_WM (1 << 10) +#define SPI_INT_CPU_ON (1 << 6) +#define SPI_INT_ADDR_ERR (1 << 3) +#define SPI_INT_WRBUF_ERR (1 << 2) +#define SPI_INT_RDBUF_ERR (1 << 1) +#define SPI_INT_PKT_AVLBL (1 << 0) + +void qcaspi_spi_error(struct qcaspi *qca); +int qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result); +int qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value); +int qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd); + +#endif /* _QCA_7K_H */ diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c new file mode 100644 index 000000000..8e28234dd --- /dev/null +++ b/drivers/net/ethernet/qualcomm/qca_debug.c @@ -0,0 +1,311 @@ +/* + * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. + * Copyright (c) 2014, I2SE GmbH + * + * Permission to use, copy, modify, and/or distribute this software + * for any purpose with or without fee is hereby granted, provided + * that the above copyright notice and this permission notice appear + * in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* This file contains debugging routines for use in the QCA7K driver. + */ + +#include +#include +#include +#include + +#include "qca_7k.h" +#include "qca_debug.h" + +#define QCASPI_MAX_REGS 0x20 + +static const u16 qcaspi_spi_regs[] = { + SPI_REG_BFR_SIZE, + SPI_REG_WRBUF_SPC_AVA, + SPI_REG_RDBUF_BYTE_AVA, + SPI_REG_SPI_CONFIG, + SPI_REG_SPI_STATUS, + SPI_REG_INTR_CAUSE, + SPI_REG_INTR_ENABLE, + SPI_REG_RDBUF_WATERMARK, + SPI_REG_WRBUF_WATERMARK, + SPI_REG_SIGNATURE, + SPI_REG_ACTION_CTRL +}; + +/* The order of these strings must match the order of the fields in + * struct qcaspi_stats + * See qca_spi.h + */ +static const char qcaspi_gstrings_stats[][ETH_GSTRING_LEN] = { + "Triggered resets", + "Device resets", + "Reset timeouts", + "Read errors", + "Write errors", + "Read buffer errors", + "Write buffer errors", + "Out of memory", + "Write buffer misses", + "Transmit ring full", + "SPI errors", +}; + +#ifdef CONFIG_DEBUG_FS + +static int +qcaspi_info_show(struct seq_file *s, void *what) +{ + struct qcaspi *qca = s->private; + + seq_printf(s, "RX buffer size : %lu\n", + (unsigned long)qca->buffer_size); + + seq_puts(s, "TX ring state : "); + + if (qca->txr.skb[qca->txr.head] == NULL) + seq_puts(s, "empty"); + else if (qca->txr.skb[qca->txr.tail]) + seq_puts(s, "full"); + else + seq_puts(s, "in use"); + + seq_puts(s, "\n"); + + seq_printf(s, "TX ring size : %u\n", + qca->txr.size); + + seq_printf(s, "Sync state : %u (", + (unsigned int)qca->sync); + switch (qca->sync) { + case QCASPI_SYNC_UNKNOWN: + seq_puts(s, "QCASPI_SYNC_UNKNOWN"); + break; + case QCASPI_SYNC_RESET: + seq_puts(s, "QCASPI_SYNC_RESET"); + break; + case QCASPI_SYNC_READY: + seq_puts(s, "QCASPI_SYNC_READY"); + break; + default: + seq_puts(s, "INVALID"); + break; + } + seq_puts(s, ")\n"); + + seq_printf(s, "IRQ : %d\n", + qca->spi_dev->irq); + seq_printf(s, "INTR REQ : %u\n", + qca->intr_req); + seq_printf(s, "INTR SVC : %u\n", + qca->intr_svc); + + seq_printf(s, "SPI max speed : %lu\n", + (unsigned long)qca->spi_dev->max_speed_hz); + seq_printf(s, "SPI mode : %x\n", + qca->spi_dev->mode); + seq_printf(s, "SPI chip select : %u\n", + (unsigned int)qca->spi_dev->chip_select); + seq_printf(s, "SPI legacy mode : %u\n", + (unsigned int)qca->legacy_mode); + seq_printf(s, "SPI burst length : %u\n", + (unsigned int)qca->burst_len); + + return 0; +} + +static int +qcaspi_info_open(struct inode *inode, struct file *file) +{ + return single_open(file, qcaspi_info_show, inode->i_private); +} + +static const struct file_operations qcaspi_info_ops = { + .open = qcaspi_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void +qcaspi_init_device_debugfs(struct qcaspi *qca) +{ + struct dentry *device_root; + + device_root = debugfs_create_dir(dev_name(&qca->net_dev->dev), NULL); + qca->device_root = device_root; + + if (IS_ERR(device_root) || !device_root) { + pr_warn("failed to create debugfs directory for %s\n", + dev_name(&qca->net_dev->dev)); + return; + } + debugfs_create_file("info", S_IFREG | S_IRUGO, device_root, qca, + &qcaspi_info_ops); +} + +void +qcaspi_remove_device_debugfs(struct qcaspi *qca) +{ + debugfs_remove_recursive(qca->device_root); +} + +#else /* CONFIG_DEBUG_FS */ + +void +qcaspi_init_device_debugfs(struct qcaspi *qca) +{ +} + +void +qcaspi_remove_device_debugfs(struct qcaspi *qca) +{ +} + +#endif + +static void +qcaspi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *p) +{ + struct qcaspi *qca = netdev_priv(dev); + + strlcpy(p->driver, QCASPI_DRV_NAME, sizeof(p->driver)); + strlcpy(p->version, QCASPI_DRV_VERSION, sizeof(p->version)); + strlcpy(p->fw_version, "QCA7000", sizeof(p->fw_version)); + strlcpy(p->bus_info, dev_name(&qca->spi_dev->dev), + sizeof(p->bus_info)); +} + +static int +qcaspi_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + cmd->transceiver = XCVR_INTERNAL; + cmd->supported = SUPPORTED_10baseT_Half; + ethtool_cmd_speed_set(cmd, SPEED_10); + cmd->duplex = DUPLEX_HALF; + cmd->port = PORT_OTHER; + cmd->autoneg = AUTONEG_DISABLE; + + return 0; +} + +static void +qcaspi_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *data) +{ + struct qcaspi *qca = netdev_priv(dev); + struct qcaspi_stats *st = &qca->stats; + + memcpy(data, st, ARRAY_SIZE(qcaspi_gstrings_stats) * sizeof(u64)); +} + +static void +qcaspi_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, &qcaspi_gstrings_stats, + sizeof(qcaspi_gstrings_stats)); + break; + default: + WARN_ON(1); + break; + } +} + +static int +qcaspi_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(qcaspi_gstrings_stats); + default: + return -EINVAL; + } +} + +static int +qcaspi_get_regs_len(struct net_device *dev) +{ + return sizeof(u32) * QCASPI_MAX_REGS; +} + +static void +qcaspi_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) +{ + struct qcaspi *qca = netdev_priv(dev); + u32 *regs_buff = p; + unsigned int i; + + regs->version = 1; + memset(regs_buff, 0, sizeof(u32) * QCASPI_MAX_REGS); + + for (i = 0; i < ARRAY_SIZE(qcaspi_spi_regs); i++) { + u16 offset, value; + + qcaspi_read_register(qca, qcaspi_spi_regs[i], &value); + offset = qcaspi_spi_regs[i] >> 8; + regs_buff[offset] = value; + } +} + +static void +qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) +{ + struct qcaspi *qca = netdev_priv(dev); + + ring->rx_max_pending = 4; + ring->tx_max_pending = TX_RING_MAX_LEN; + ring->rx_pending = 4; + ring->tx_pending = qca->txr.count; +} + +static int +qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) +{ + struct qcaspi *qca = netdev_priv(dev); + + if ((ring->rx_pending) || + (ring->rx_mini_pending) || + (ring->rx_jumbo_pending)) + return -EINVAL; + + if (netif_running(dev)) + qcaspi_netdev_close(dev); + + qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN); + qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN); + + if (netif_running(dev)) + qcaspi_netdev_open(dev); + + return 0; +} + +static const struct ethtool_ops qcaspi_ethtool_ops = { + .get_drvinfo = qcaspi_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_settings = qcaspi_get_settings, + .get_ethtool_stats = qcaspi_get_ethtool_stats, + .get_strings = qcaspi_get_strings, + .get_sset_count = qcaspi_get_sset_count, + .get_regs_len = qcaspi_get_regs_len, + .get_regs = qcaspi_get_regs, + .get_ringparam = qcaspi_get_ringparam, + .set_ringparam = qcaspi_set_ringparam, +}; + +void qcaspi_set_ethtool_ops(struct net_device *dev) +{ + dev->ethtool_ops = &qcaspi_ethtool_ops; +} diff --git a/drivers/net/ethernet/qualcomm/qca_debug.h b/drivers/net/ethernet/qualcomm/qca_debug.h new file mode 100644 index 000000000..46a785844 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/qca_debug.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. + * Copyright (c) 2014, I2SE GmbH + * + * Permission to use, copy, modify, and/or distribute this software + * for any purpose with or without fee is hereby granted, provided + * that the above copyright notice and this permission notice appear + * in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* This file contains debugging routines for use in the QCA7K driver. + */ + +#ifndef _QCA_DEBUG_H +#define _QCA_DEBUG_H + +#include "qca_spi.h" + +void qcaspi_init_device_debugfs(struct qcaspi *qca); + +void qcaspi_remove_device_debugfs(struct qcaspi *qca); + +void qcaspi_set_ethtool_ops(struct net_device *dev); + +#endif /* _QCA_DEBUG_H */ diff --git a/drivers/net/ethernet/qualcomm/qca_framing.c b/drivers/net/ethernet/qualcomm/qca_framing.c new file mode 100644 index 000000000..faa924c85 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/qca_framing.c @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2011, 2012, Atheros Communications Inc. + * Copyright (c) 2014, I2SE GmbH + * + * Permission to use, copy, modify, and/or distribute this software + * for any purpose with or without fee is hereby granted, provided + * that the above copyright notice and this permission notice appear + * in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* Atheros ethernet framing. Every Ethernet frame is surrounded + * by an atheros frame while transmitted over a serial channel; + */ + +#include + +#include "qca_framing.h" + +u16 +qcafrm_create_header(u8 *buf, u16 length) +{ + __le16 len; + + if (!buf) + return 0; + + len = cpu_to_le16(length); + + buf[0] = 0xAA; + buf[1] = 0xAA; + buf[2] = 0xAA; + buf[3] = 0xAA; + buf[4] = len & 0xff; + buf[5] = (len >> 8) & 0xff; + buf[6] = 0; + buf[7] = 0; + + return QCAFRM_HEADER_LEN; +} + +u16 +qcafrm_create_footer(u8 *buf) +{ + if (!buf) + return 0; + + buf[0] = 0x55; + buf[1] = 0x55; + return QCAFRM_FOOTER_LEN; +} + +/* Gather received bytes and try to extract a full ethernet frame by + * following a simple state machine. + * + * Return: QCAFRM_GATHER No ethernet frame fully received yet. + * QCAFRM_NOHEAD Header expected but not found. + * QCAFRM_INVLEN Atheros frame length is invalid + * QCAFRM_NOTAIL Footer expected but not found. + * > 0 Number of byte in the fully received + * Ethernet frame + */ + +s32 +qcafrm_fsm_decode(struct qcafrm_handle *handle, u8 *buf, u16 buf_len, u8 recv_byte) +{ + s32 ret = QCAFRM_GATHER; + u16 len; + + switch (handle->state) { + case QCAFRM_HW_LEN0: + case QCAFRM_HW_LEN1: + /* by default, just go to next state */ + handle->state--; + + if (recv_byte != 0x00) { + /* first two bytes of length must be 0 */ + handle->state = QCAFRM_HW_LEN0; + } + break; + case QCAFRM_HW_LEN2: + case QCAFRM_HW_LEN3: + handle->state--; + break; + /* 4 bytes header pattern */ + case QCAFRM_WAIT_AA1: + case QCAFRM_WAIT_AA2: + case QCAFRM_WAIT_AA3: + case QCAFRM_WAIT_AA4: + if (recv_byte != 0xAA) { + ret = QCAFRM_NOHEAD; + handle->state = QCAFRM_HW_LEN0; + } else { + handle->state--; + } + break; + /* 2 bytes length. */ + /* Borrow offset field to hold length for now. */ + case QCAFRM_WAIT_LEN_BYTE0: + handle->offset = recv_byte; + handle->state = QCAFRM_WAIT_LEN_BYTE1; + break; + case QCAFRM_WAIT_LEN_BYTE1: + handle->offset = handle->offset | (recv_byte << 8); + handle->state = QCAFRM_WAIT_RSVD_BYTE1; + break; + case QCAFRM_WAIT_RSVD_BYTE1: + handle->state = QCAFRM_WAIT_RSVD_BYTE2; + break; + case QCAFRM_WAIT_RSVD_BYTE2: + len = handle->offset; + if (len > buf_len || len < QCAFRM_ETHMINLEN) { + ret = QCAFRM_INVLEN; + handle->state = QCAFRM_HW_LEN0; + } else { + handle->state = (enum qcafrm_state)(len + 1); + /* Remaining number of bytes. */ + handle->offset = 0; + } + break; + default: + /* Receiving Ethernet frame itself. */ + buf[handle->offset] = recv_byte; + handle->offset++; + handle->state--; + break; + case QCAFRM_WAIT_551: + if (recv_byte != 0x55) { + ret = QCAFRM_NOTAIL; + handle->state = QCAFRM_HW_LEN0; + } else { + handle->state = QCAFRM_WAIT_552; + } + break; + case QCAFRM_WAIT_552: + if (recv_byte != 0x55) { + ret = QCAFRM_NOTAIL; + handle->state = QCAFRM_HW_LEN0; + } else { + ret = handle->offset; + /* Frame is fully received. */ + handle->state = QCAFRM_HW_LEN0; + } + break; + } + + return ret; +} diff --git a/drivers/net/ethernet/qualcomm/qca_framing.h b/drivers/net/ethernet/qualcomm/qca_framing.h new file mode 100644 index 000000000..5d965959c --- /dev/null +++ b/drivers/net/ethernet/qualcomm/qca_framing.h @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2011, 2012, Atheros Communications Inc. + * Copyright (c) 2014, I2SE GmbH + * + * Permission to use, copy, modify, and/or distribute this software + * for any purpose with or without fee is hereby granted, provided + * that the above copyright notice and this permission notice appear + * in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* Atheros Ethernet framing. Every Ethernet frame is surrounded by an atheros + * frame while transmitted over a serial channel. + */ + +#ifndef _QCA_FRAMING_H +#define _QCA_FRAMING_H + +#include +#include +#include + +/* Frame is currently being received */ +#define QCAFRM_GATHER 0 + +/* No header byte while expecting it */ +#define QCAFRM_NOHEAD (QCAFRM_ERR_BASE - 1) + +/* No tailer byte while expecting it */ +#define QCAFRM_NOTAIL (QCAFRM_ERR_BASE - 2) + +/* Frame length is invalid */ +#define QCAFRM_INVLEN (QCAFRM_ERR_BASE - 3) + +/* Frame length is invalid */ +#define QCAFRM_INVFRAME (QCAFRM_ERR_BASE - 4) + +/* Min/Max Ethernet MTU */ +#define QCAFRM_ETHMINMTU 46 +#define QCAFRM_ETHMAXMTU 1500 + +/* Min/Max frame lengths */ +#define QCAFRM_ETHMINLEN (QCAFRM_ETHMINMTU + ETH_HLEN) +#define QCAFRM_ETHMAXLEN (QCAFRM_ETHMAXMTU + VLAN_ETH_HLEN) + +/* QCA7K header len */ +#define QCAFRM_HEADER_LEN 8 + +/* QCA7K footer len */ +#define QCAFRM_FOOTER_LEN 2 + +/* QCA7K Framing. */ +#define QCAFRM_ERR_BASE -1000 + +enum qcafrm_state { + QCAFRM_HW_LEN0 = 0x8000, + QCAFRM_HW_LEN1 = QCAFRM_HW_LEN0 - 1, + QCAFRM_HW_LEN2 = QCAFRM_HW_LEN1 - 1, + QCAFRM_HW_LEN3 = QCAFRM_HW_LEN2 - 1, + + /* Waiting first 0xAA of header */ + QCAFRM_WAIT_AA1 = QCAFRM_HW_LEN3 - 1, + + /* Waiting second 0xAA of header */ + QCAFRM_WAIT_AA2 = QCAFRM_WAIT_AA1 - 1, + + /* Waiting third 0xAA of header */ + QCAFRM_WAIT_AA3 = QCAFRM_WAIT_AA2 - 1, + + /* Waiting fourth 0xAA of header */ + QCAFRM_WAIT_AA4 = QCAFRM_WAIT_AA3 - 1, + + /* Waiting Byte 0-1 of length (litte endian) */ + QCAFRM_WAIT_LEN_BYTE0 = QCAFRM_WAIT_AA4 - 1, + QCAFRM_WAIT_LEN_BYTE1 = QCAFRM_WAIT_AA4 - 2, + + /* Reserved bytes */ + QCAFRM_WAIT_RSVD_BYTE1 = QCAFRM_WAIT_AA4 - 3, + QCAFRM_WAIT_RSVD_BYTE2 = QCAFRM_WAIT_AA4 - 4, + + /* The frame length is used as the state until + * the end of the Ethernet frame + * Waiting for first 0x55 of footer + */ + QCAFRM_WAIT_551 = 1, + + /* Waiting for second 0x55 of footer */ + QCAFRM_WAIT_552 = QCAFRM_WAIT_551 - 1 +}; + +/* Structure to maintain the frame decoding during reception. */ + +struct qcafrm_handle { + /* Current decoding state */ + enum qcafrm_state state; + + /* Offset in buffer (borrowed for length too) */ + s16 offset; + + /* Frame length as kept by this module */ + u16 len; +}; + +u16 qcafrm_create_header(u8 *buf, u16 len); + +u16 qcafrm_create_footer(u8 *buf); + +static inline void qcafrm_fsm_init(struct qcafrm_handle *handle) +{ + handle->state = QCAFRM_HW_LEN0; +} + +/* Gather received bytes and try to extract a full Ethernet frame + * by following a simple state machine. + * + * Return: QCAFRM_GATHER No Ethernet frame fully received yet. + * QCAFRM_NOHEAD Header expected but not found. + * QCAFRM_INVLEN QCA7K frame length is invalid + * QCAFRM_NOTAIL Footer expected but not found. + * > 0 Number of byte in the fully received + * Ethernet frame + */ + +s32 qcafrm_fsm_decode(struct qcafrm_handle *handle, u8 *buf, u16 buf_len, u8 recv_byte); + +#endif /* _QCA_FRAMING_H */ diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c new file mode 100644 index 000000000..6af028d5f --- /dev/null +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -0,0 +1,990 @@ +/* + * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. + * Copyright (c) 2014, I2SE GmbH + * + * Permission to use, copy, modify, and/or distribute this software + * for any purpose with or without fee is hereby granted, provided + * that the above copyright notice and this permission notice appear + * in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* This module implements the Qualcomm Atheros SPI protocol for + * kernel-based SPI device; it is essentially an Ethernet-to-SPI + * serial converter; + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qca_7k.h" +#include "qca_debug.h" +#include "qca_framing.h" +#include "qca_spi.h" + +#define MAX_DMA_BURST_LEN 5000 + +/* Modules parameters */ +#define QCASPI_CLK_SPEED_MIN 1000000 +#define QCASPI_CLK_SPEED_MAX 16000000 +#define QCASPI_CLK_SPEED 8000000 +static int qcaspi_clkspeed; +module_param(qcaspi_clkspeed, int, 0); +MODULE_PARM_DESC(qcaspi_clkspeed, "SPI bus clock speed (Hz). Use 1000000-16000000."); + +#define QCASPI_BURST_LEN_MIN 1 +#define QCASPI_BURST_LEN_MAX MAX_DMA_BURST_LEN +static int qcaspi_burst_len = MAX_DMA_BURST_LEN; +module_param(qcaspi_burst_len, int, 0); +MODULE_PARM_DESC(qcaspi_burst_len, "Number of data bytes per burst. Use 1-5000."); + +#define QCASPI_PLUGGABLE_MIN 0 +#define QCASPI_PLUGGABLE_MAX 1 +static int qcaspi_pluggable = QCASPI_PLUGGABLE_MIN; +module_param(qcaspi_pluggable, int, 0); +MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no)."); + +#define QCASPI_MTU QCAFRM_ETHMAXMTU +#define QCASPI_TX_TIMEOUT (1 * HZ) +#define QCASPI_QCA7K_REBOOT_TIME_MS 1000 + +static void +start_spi_intr_handling(struct qcaspi *qca, u16 *intr_cause) +{ + *intr_cause = 0; + + qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0); + qcaspi_read_register(qca, SPI_REG_INTR_CAUSE, intr_cause); + netdev_dbg(qca->net_dev, "interrupts: 0x%04x\n", *intr_cause); +} + +static void +end_spi_intr_handling(struct qcaspi *qca, u16 intr_cause) +{ + u16 intr_enable = (SPI_INT_CPU_ON | + SPI_INT_PKT_AVLBL | + SPI_INT_RDBUF_ERR | + SPI_INT_WRBUF_ERR); + + qcaspi_write_register(qca, SPI_REG_INTR_CAUSE, intr_cause); + qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, intr_enable); + netdev_dbg(qca->net_dev, "acking int: 0x%04x\n", intr_cause); +} + +static u32 +qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len) +{ + __be16 cmd; + struct spi_message *msg = &qca->spi_msg2; + struct spi_transfer *transfer = &qca->spi_xfer2[0]; + int ret; + + cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); + transfer->tx_buf = &cmd; + transfer->rx_buf = NULL; + transfer->len = QCASPI_CMD_LEN; + transfer = &qca->spi_xfer2[1]; + transfer->tx_buf = src; + transfer->rx_buf = NULL; + transfer->len = len; + + ret = spi_sync(qca->spi_dev, msg); + + if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) { + qcaspi_spi_error(qca); + return 0; + } + + return len; +} + +static u32 +qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len) +{ + struct spi_message *msg = &qca->spi_msg1; + struct spi_transfer *transfer = &qca->spi_xfer1; + int ret; + + transfer->tx_buf = src; + transfer->rx_buf = NULL; + transfer->len = len; + + ret = spi_sync(qca->spi_dev, msg); + + if (ret || (msg->actual_length != len)) { + qcaspi_spi_error(qca); + return 0; + } + + return len; +} + +static u32 +qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len) +{ + struct spi_message *msg = &qca->spi_msg2; + __be16 cmd; + struct spi_transfer *transfer = &qca->spi_xfer2[0]; + int ret; + + cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); + transfer->tx_buf = &cmd; + transfer->rx_buf = NULL; + transfer->len = QCASPI_CMD_LEN; + transfer = &qca->spi_xfer2[1]; + transfer->tx_buf = NULL; + transfer->rx_buf = dst; + transfer->len = len; + + ret = spi_sync(qca->spi_dev, msg); + + if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) { + qcaspi_spi_error(qca); + return 0; + } + + return len; +} + +static u32 +qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len) +{ + struct spi_message *msg = &qca->spi_msg1; + struct spi_transfer *transfer = &qca->spi_xfer1; + int ret; + + transfer->tx_buf = NULL; + transfer->rx_buf = dst; + transfer->len = len; + + ret = spi_sync(qca->spi_dev, msg); + + if (ret || (msg->actual_length != len)) { + qcaspi_spi_error(qca); + return 0; + } + + return len; +} + +static int +qcaspi_tx_frame(struct qcaspi *qca, struct sk_buff *skb) +{ + u32 count; + u32 written; + u32 offset; + u32 len; + + len = skb->len; + + qcaspi_write_register(qca, SPI_REG_BFR_SIZE, len); + if (qca->legacy_mode) + qcaspi_tx_cmd(qca, QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); + + offset = 0; + while (len) { + count = len; + if (count > qca->burst_len) + count = qca->burst_len; + + if (qca->legacy_mode) { + written = qcaspi_write_legacy(qca, + skb->data + offset, + count); + } else { + written = qcaspi_write_burst(qca, + skb->data + offset, + count); + } + + if (written != count) + return -1; + + offset += count; + len -= count; + } + + return 0; +} + +static int +qcaspi_transmit(struct qcaspi *qca) +{ + struct net_device_stats *n_stats = &qca->net_dev->stats; + u16 available = 0; + u32 pkt_len; + u16 new_head; + u16 packets = 0; + + if (qca->txr.skb[qca->txr.head] == NULL) + return 0; + + qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA, &available); + + while (qca->txr.skb[qca->txr.head]) { + pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN; + + if (available < pkt_len) { + if (packets == 0) + qca->stats.write_buf_miss++; + break; + } + + if (qcaspi_tx_frame(qca, qca->txr.skb[qca->txr.head]) == -1) { + qca->stats.write_err++; + return -1; + } + + packets++; + n_stats->tx_packets++; + n_stats->tx_bytes += qca->txr.skb[qca->txr.head]->len; + available -= pkt_len; + + /* remove the skb from the queue */ + /* XXX After inconsistent lock states netif_tx_lock() + * has been replaced by netif_tx_lock_bh() and so on. + */ + netif_tx_lock_bh(qca->net_dev); + dev_kfree_skb(qca->txr.skb[qca->txr.head]); + qca->txr.skb[qca->txr.head] = NULL; + qca->txr.size -= pkt_len; + new_head = qca->txr.head + 1; + if (new_head >= qca->txr.count) + new_head = 0; + qca->txr.head = new_head; + if (netif_queue_stopped(qca->net_dev)) + netif_wake_queue(qca->net_dev); + netif_tx_unlock_bh(qca->net_dev); + } + + return 0; +} + +static int +qcaspi_receive(struct qcaspi *qca) +{ + struct net_device *net_dev = qca->net_dev; + struct net_device_stats *n_stats = &net_dev->stats; + u16 available = 0; + u32 bytes_read; + u8 *cp; + + /* Allocate rx SKB if we don't have one available. */ + if (!qca->rx_skb) { + qca->rx_skb = netdev_alloc_skb(net_dev, + net_dev->mtu + VLAN_ETH_HLEN); + if (!qca->rx_skb) { + netdev_dbg(net_dev, "out of RX resources\n"); + qca->stats.out_of_mem++; + return -1; + } + } + + /* Read the packet size. */ + qcaspi_read_register(qca, SPI_REG_RDBUF_BYTE_AVA, &available); + netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n", + available); + + if (available == 0) { + netdev_dbg(net_dev, "qcaspi_receive called without any data being available!\n"); + return -1; + } + + qcaspi_write_register(qca, SPI_REG_BFR_SIZE, available); + + if (qca->legacy_mode) + qcaspi_tx_cmd(qca, QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); + + while (available) { + u32 count = available; + + if (count > qca->burst_len) + count = qca->burst_len; + + if (qca->legacy_mode) { + bytes_read = qcaspi_read_legacy(qca, qca->rx_buffer, + count); + } else { + bytes_read = qcaspi_read_burst(qca, qca->rx_buffer, + count); + } + + netdev_dbg(net_dev, "available: %d, byte read: %d\n", + available, bytes_read); + + if (bytes_read) { + available -= bytes_read; + } else { + qca->stats.read_err++; + return -1; + } + + cp = qca->rx_buffer; + + while ((bytes_read--) && (qca->rx_skb)) { + s32 retcode; + + retcode = qcafrm_fsm_decode(&qca->frm_handle, + qca->rx_skb->data, + skb_tailroom(qca->rx_skb), + *cp); + cp++; + switch (retcode) { + case QCAFRM_GATHER: + case QCAFRM_NOHEAD: + break; + case QCAFRM_NOTAIL: + netdev_dbg(net_dev, "no RX tail\n"); + n_stats->rx_errors++; + n_stats->rx_dropped++; + break; + case QCAFRM_INVLEN: + netdev_dbg(net_dev, "invalid RX length\n"); + n_stats->rx_errors++; + n_stats->rx_dropped++; + break; + default: + qca->rx_skb->dev = qca->net_dev; + n_stats->rx_packets++; + n_stats->rx_bytes += retcode; + skb_put(qca->rx_skb, retcode); + qca->rx_skb->protocol = eth_type_trans( + qca->rx_skb, qca->rx_skb->dev); + qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; + netif_rx_ni(qca->rx_skb); + qca->rx_skb = netdev_alloc_skb(net_dev, + net_dev->mtu + VLAN_ETH_HLEN); + if (!qca->rx_skb) { + netdev_dbg(net_dev, "out of RX resources\n"); + n_stats->rx_errors++; + qca->stats.out_of_mem++; + break; + } + } + } + } + + return 0; +} + +/* Check that tx ring stores only so much bytes + * that fit into the internal QCA buffer. + */ + +static int +qcaspi_tx_ring_has_space(struct tx_ring *txr) +{ + if (txr->skb[txr->tail]) + return 0; + + return (txr->size + QCAFRM_ETHMAXLEN < QCASPI_HW_BUF_LEN) ? 1 : 0; +} + +/* Flush the tx ring. This function is only safe to + * call from the qcaspi_spi_thread. + */ + +static void +qcaspi_flush_tx_ring(struct qcaspi *qca) +{ + int i; + + /* XXX After inconsistent lock states netif_tx_lock() + * has been replaced by netif_tx_lock_bh() and so on. + */ + netif_tx_lock_bh(qca->net_dev); + for (i = 0; i < TX_RING_MAX_LEN; i++) { + if (qca->txr.skb[i]) { + dev_kfree_skb(qca->txr.skb[i]); + qca->txr.skb[i] = NULL; + qca->net_dev->stats.tx_dropped++; + } + } + qca->txr.tail = 0; + qca->txr.head = 0; + qca->txr.size = 0; + netif_tx_unlock_bh(qca->net_dev); +} + +static void +qcaspi_qca7k_sync(struct qcaspi *qca, int event) +{ + u16 signature = 0; + u16 spi_config; + u16 wrbuf_space = 0; + static u16 reset_count; + + if (event == QCASPI_EVENT_CPUON) { + /* Read signature twice, if not valid + * go back to unknown state. + */ + qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); + qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); + if (signature != QCASPI_GOOD_SIGNATURE) { + qca->sync = QCASPI_SYNC_UNKNOWN; + netdev_dbg(qca->net_dev, "sync: got CPU on, but signature was invalid, restart\n"); + } else { + /* ensure that the WRBUF is empty */ + qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA, + &wrbuf_space); + if (wrbuf_space != QCASPI_HW_BUF_LEN) { + netdev_dbg(qca->net_dev, "sync: got CPU on, but wrbuf not empty. reset!\n"); + qca->sync = QCASPI_SYNC_UNKNOWN; + } else { + netdev_dbg(qca->net_dev, "sync: got CPU on, now in sync\n"); + qca->sync = QCASPI_SYNC_READY; + return; + } + } + } + + switch (qca->sync) { + case QCASPI_SYNC_READY: + /* Read signature, if not valid go to unknown state. */ + qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); + if (signature != QCASPI_GOOD_SIGNATURE) { + qca->sync = QCASPI_SYNC_UNKNOWN; + netdev_dbg(qca->net_dev, "sync: bad signature, restart\n"); + /* don't reset right away */ + return; + } + break; + case QCASPI_SYNC_UNKNOWN: + /* Read signature, if not valid stay in unknown state */ + qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); + if (signature != QCASPI_GOOD_SIGNATURE) { + netdev_dbg(qca->net_dev, "sync: could not read signature to reset device, retry.\n"); + return; + } + + /* TODO: use GPIO to reset QCA7000 in legacy mode*/ + netdev_dbg(qca->net_dev, "sync: resetting device.\n"); + qcaspi_read_register(qca, SPI_REG_SPI_CONFIG, &spi_config); + spi_config |= QCASPI_SLAVE_RESET_BIT; + qcaspi_write_register(qca, SPI_REG_SPI_CONFIG, spi_config); + + qca->sync = QCASPI_SYNC_RESET; + qca->stats.trig_reset++; + reset_count = 0; + break; + case QCASPI_SYNC_RESET: + reset_count++; + netdev_dbg(qca->net_dev, "sync: waiting for CPU on, count %u.\n", + reset_count); + if (reset_count >= QCASPI_RESET_TIMEOUT) { + /* reset did not seem to take place, try again */ + qca->sync = QCASPI_SYNC_UNKNOWN; + qca->stats.reset_timeout++; + netdev_dbg(qca->net_dev, "sync: reset timeout, restarting process.\n"); + } + break; + } +} + +static int +qcaspi_spi_thread(void *data) +{ + struct qcaspi *qca = data; + u16 intr_cause = 0; + + netdev_info(qca->net_dev, "SPI thread created\n"); + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + if ((qca->intr_req == qca->intr_svc) && + (qca->txr.skb[qca->txr.head] == NULL) && + (qca->sync == QCASPI_SYNC_READY)) + schedule(); + + set_current_state(TASK_RUNNING); + + netdev_dbg(qca->net_dev, "have work to do. int: %d, tx_skb: %p\n", + qca->intr_req - qca->intr_svc, + qca->txr.skb[qca->txr.head]); + + qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE); + + if (qca->sync != QCASPI_SYNC_READY) { + netdev_dbg(qca->net_dev, "sync: not ready %u, turn off carrier and flush\n", + (unsigned int)qca->sync); + netif_stop_queue(qca->net_dev); + netif_carrier_off(qca->net_dev); + qcaspi_flush_tx_ring(qca); + msleep(QCASPI_QCA7K_REBOOT_TIME_MS); + } + + if (qca->intr_svc != qca->intr_req) { + qca->intr_svc = qca->intr_req; + start_spi_intr_handling(qca, &intr_cause); + + if (intr_cause & SPI_INT_CPU_ON) { + qcaspi_qca7k_sync(qca, QCASPI_EVENT_CPUON); + + /* not synced. */ + if (qca->sync != QCASPI_SYNC_READY) + continue; + + qca->stats.device_reset++; + netif_wake_queue(qca->net_dev); + netif_carrier_on(qca->net_dev); + } + + if (intr_cause & SPI_INT_RDBUF_ERR) { + /* restart sync */ + netdev_dbg(qca->net_dev, "===> rdbuf error!\n"); + qca->stats.read_buf_err++; + qca->sync = QCASPI_SYNC_UNKNOWN; + continue; + } + + if (intr_cause & SPI_INT_WRBUF_ERR) { + /* restart sync */ + netdev_dbg(qca->net_dev, "===> wrbuf error!\n"); + qca->stats.write_buf_err++; + qca->sync = QCASPI_SYNC_UNKNOWN; + continue; + } + + /* can only handle other interrupts + * if sync has occurred + */ + if (qca->sync == QCASPI_SYNC_READY) { + if (intr_cause & SPI_INT_PKT_AVLBL) + qcaspi_receive(qca); + } + + end_spi_intr_handling(qca, intr_cause); + } + + if (qca->sync == QCASPI_SYNC_READY) + qcaspi_transmit(qca); + } + set_current_state(TASK_RUNNING); + netdev_info(qca->net_dev, "SPI thread exit\n"); + + return 0; +} + +static irqreturn_t +qcaspi_intr_handler(int irq, void *data) +{ + struct qcaspi *qca = data; + + qca->intr_req++; + if (qca->spi_thread && + qca->spi_thread->state != TASK_RUNNING) + wake_up_process(qca->spi_thread); + + return IRQ_HANDLED; +} + +int +qcaspi_netdev_open(struct net_device *dev) +{ + struct qcaspi *qca = netdev_priv(dev); + int ret = 0; + + if (!qca) + return -EINVAL; + + qca->intr_req = 1; + qca->intr_svc = 0; + qca->sync = QCASPI_SYNC_UNKNOWN; + qcafrm_fsm_init(&qca->frm_handle); + + qca->spi_thread = kthread_run((void *)qcaspi_spi_thread, + qca, "%s", dev->name); + + if (IS_ERR(qca->spi_thread)) { + netdev_err(dev, "%s: unable to start kernel thread.\n", + QCASPI_DRV_NAME); + return PTR_ERR(qca->spi_thread); + } + + ret = request_irq(qca->spi_dev->irq, qcaspi_intr_handler, 0, + dev->name, qca); + if (ret) { + netdev_err(dev, "%s: unable to get IRQ %d (irqval=%d).\n", + QCASPI_DRV_NAME, qca->spi_dev->irq, ret); + kthread_stop(qca->spi_thread); + return ret; + } + + netif_start_queue(qca->net_dev); + + return 0; +} + +int +qcaspi_netdev_close(struct net_device *dev) +{ + struct qcaspi *qca = netdev_priv(dev); + + netif_stop_queue(dev); + + qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0); + free_irq(qca->spi_dev->irq, qca); + + kthread_stop(qca->spi_thread); + qca->spi_thread = NULL; + qcaspi_flush_tx_ring(qca); + + return 0; +} + +static netdev_tx_t +qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev) +{ + u32 frame_len; + u8 *ptmp; + struct qcaspi *qca = netdev_priv(dev); + u16 new_tail; + struct sk_buff *tskb; + u8 pad_len = 0; + + if (skb->len < QCAFRM_ETHMINLEN) + pad_len = QCAFRM_ETHMINLEN - skb->len; + + if (qca->txr.skb[qca->txr.tail]) { + netdev_warn(qca->net_dev, "queue was unexpectedly full!\n"); + netif_stop_queue(qca->net_dev); + qca->stats.ring_full++; + return NETDEV_TX_BUSY; + } + + if ((skb_headroom(skb) < QCAFRM_HEADER_LEN) || + (skb_tailroom(skb) < QCAFRM_FOOTER_LEN + pad_len)) { + tskb = skb_copy_expand(skb, QCAFRM_HEADER_LEN, + QCAFRM_FOOTER_LEN + pad_len, GFP_ATOMIC); + if (!tskb) { + netdev_dbg(qca->net_dev, "could not allocate tx_buff\n"); + qca->stats.out_of_mem++; + return NETDEV_TX_BUSY; + } + dev_kfree_skb(skb); + skb = tskb; + } + + frame_len = skb->len + pad_len; + + ptmp = skb_push(skb, QCAFRM_HEADER_LEN); + qcafrm_create_header(ptmp, frame_len); + + if (pad_len) { + ptmp = skb_put(skb, pad_len); + memset(ptmp, 0, pad_len); + } + + ptmp = skb_put(skb, QCAFRM_FOOTER_LEN); + qcafrm_create_footer(ptmp); + + netdev_dbg(qca->net_dev, "Tx-ing packet: Size: 0x%08x\n", + skb->len); + + qca->txr.size += skb->len + QCASPI_HW_PKT_LEN; + + new_tail = qca->txr.tail + 1; + if (new_tail >= qca->txr.count) + new_tail = 0; + + qca->txr.skb[qca->txr.tail] = skb; + qca->txr.tail = new_tail; + + if (!qcaspi_tx_ring_has_space(&qca->txr)) { + netif_stop_queue(qca->net_dev); + qca->stats.ring_full++; + } + + dev->trans_start = jiffies; + + if (qca->spi_thread && + qca->spi_thread->state != TASK_RUNNING) + wake_up_process(qca->spi_thread); + + return NETDEV_TX_OK; +} + +static void +qcaspi_netdev_tx_timeout(struct net_device *dev) +{ + struct qcaspi *qca = netdev_priv(dev); + + netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n", + jiffies, jiffies - dev->trans_start); + qca->net_dev->stats.tx_errors++; + /* wake the queue if there is room */ + if (qcaspi_tx_ring_has_space(&qca->txr)) + netif_wake_queue(dev); +} + +static int +qcaspi_netdev_init(struct net_device *dev) +{ + struct qcaspi *qca = netdev_priv(dev); + + dev->mtu = QCASPI_MTU; + dev->type = ARPHRD_ETHER; + qca->clkspeed = qcaspi_clkspeed; + qca->burst_len = qcaspi_burst_len; + qca->spi_thread = NULL; + qca->buffer_size = (dev->mtu + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN + + QCAFRM_FOOTER_LEN + 4) * 4; + + memset(&qca->stats, 0, sizeof(struct qcaspi_stats)); + + qca->rx_buffer = kmalloc(qca->buffer_size, GFP_KERNEL); + if (!qca->rx_buffer) + return -ENOBUFS; + + qca->rx_skb = netdev_alloc_skb(dev, qca->net_dev->mtu + VLAN_ETH_HLEN); + if (!qca->rx_skb) { + kfree(qca->rx_buffer); + netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n"); + return -ENOBUFS; + } + + return 0; +} + +static void +qcaspi_netdev_uninit(struct net_device *dev) +{ + struct qcaspi *qca = netdev_priv(dev); + + kfree(qca->rx_buffer); + qca->buffer_size = 0; + if (qca->rx_skb) + dev_kfree_skb(qca->rx_skb); +} + +static int +qcaspi_netdev_change_mtu(struct net_device *dev, int new_mtu) +{ + if ((new_mtu < QCAFRM_ETHMINMTU) || (new_mtu > QCAFRM_ETHMAXMTU)) + return -EINVAL; + + dev->mtu = new_mtu; + + return 0; +} + +static const struct net_device_ops qcaspi_netdev_ops = { + .ndo_init = qcaspi_netdev_init, + .ndo_uninit = qcaspi_netdev_uninit, + .ndo_open = qcaspi_netdev_open, + .ndo_stop = qcaspi_netdev_close, + .ndo_start_xmit = qcaspi_netdev_xmit, + .ndo_change_mtu = qcaspi_netdev_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_tx_timeout = qcaspi_netdev_tx_timeout, + .ndo_validate_addr = eth_validate_addr, +}; + +static void +qcaspi_netdev_setup(struct net_device *dev) +{ + struct qcaspi *qca = NULL; + + dev->netdev_ops = &qcaspi_netdev_ops; + qcaspi_set_ethtool_ops(dev); + dev->watchdog_timeo = QCASPI_TX_TIMEOUT; + dev->flags = IFF_MULTICAST; + dev->tx_queue_len = 100; + + qca = netdev_priv(dev); + memset(qca, 0, sizeof(struct qcaspi)); + + memset(&qca->spi_xfer1, 0, sizeof(struct spi_transfer)); + memset(&qca->spi_xfer2, 0, sizeof(struct spi_transfer) * 2); + + spi_message_init(&qca->spi_msg1); + spi_message_add_tail(&qca->spi_xfer1, &qca->spi_msg1); + + spi_message_init(&qca->spi_msg2); + spi_message_add_tail(&qca->spi_xfer2[0], &qca->spi_msg2); + spi_message_add_tail(&qca->spi_xfer2[1], &qca->spi_msg2); + + memset(&qca->txr, 0, sizeof(qca->txr)); + qca->txr.count = TX_RING_MAX_LEN; +} + +static const struct of_device_id qca_spi_of_match[] = { + { .compatible = "qca,qca7000" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, qca_spi_of_match); + +static int +qca_spi_probe(struct spi_device *spi_device) +{ + struct qcaspi *qca = NULL; + struct net_device *qcaspi_devs = NULL; + u8 legacy_mode = 0; + u16 signature; + const char *mac; + + if (!spi_device->dev.of_node) { + dev_err(&spi_device->dev, "Missing device tree\n"); + return -EINVAL; + } + + legacy_mode = of_property_read_bool(spi_device->dev.of_node, + "qca,legacy-mode"); + + if (qcaspi_clkspeed == 0) { + if (spi_device->max_speed_hz) + qcaspi_clkspeed = spi_device->max_speed_hz; + else + qcaspi_clkspeed = QCASPI_CLK_SPEED; + } + + if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) || + (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) { + dev_info(&spi_device->dev, "Invalid clkspeed: %d\n", + qcaspi_clkspeed); + return -EINVAL; + } + + if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) || + (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) { + dev_info(&spi_device->dev, "Invalid burst len: %d\n", + qcaspi_burst_len); + return -EINVAL; + } + + if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) || + (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) { + dev_info(&spi_device->dev, "Invalid pluggable: %d\n", + qcaspi_pluggable); + return -EINVAL; + } + + dev_info(&spi_device->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n", + QCASPI_DRV_VERSION, + qcaspi_clkspeed, + qcaspi_burst_len, + qcaspi_pluggable); + + spi_device->mode = SPI_MODE_3; + spi_device->max_speed_hz = qcaspi_clkspeed; + if (spi_setup(spi_device) < 0) { + dev_err(&spi_device->dev, "Unable to setup SPI device\n"); + return -EFAULT; + } + + qcaspi_devs = alloc_etherdev(sizeof(struct qcaspi)); + if (!qcaspi_devs) + return -ENOMEM; + + qcaspi_netdev_setup(qcaspi_devs); + + qca = netdev_priv(qcaspi_devs); + if (!qca) { + free_netdev(qcaspi_devs); + dev_err(&spi_device->dev, "Fail to retrieve private structure\n"); + return -ENOMEM; + } + qca->net_dev = qcaspi_devs; + qca->spi_dev = spi_device; + qca->legacy_mode = legacy_mode; + + spi_set_drvdata(spi_device, qcaspi_devs); + + mac = of_get_mac_address(spi_device->dev.of_node); + + if (mac) + ether_addr_copy(qca->net_dev->dev_addr, mac); + + if (!is_valid_ether_addr(qca->net_dev->dev_addr)) { + eth_hw_addr_random(qca->net_dev); + dev_info(&spi_device->dev, "Using random MAC address: %pM\n", + qca->net_dev->dev_addr); + } + + netif_carrier_off(qca->net_dev); + + if (!qcaspi_pluggable) { + qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); + qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); + + if (signature != QCASPI_GOOD_SIGNATURE) { + dev_err(&spi_device->dev, "Invalid signature (0x%04X)\n", + signature); + free_netdev(qcaspi_devs); + return -EFAULT; + } + } + + if (register_netdev(qcaspi_devs)) { + dev_info(&spi_device->dev, "Unable to register net device %s\n", + qcaspi_devs->name); + free_netdev(qcaspi_devs); + return -EFAULT; + } + + qcaspi_init_device_debugfs(qca); + + return 0; +} + +static int +qca_spi_remove(struct spi_device *spi_device) +{ + struct net_device *qcaspi_devs = spi_get_drvdata(spi_device); + struct qcaspi *qca = netdev_priv(qcaspi_devs); + + qcaspi_remove_device_debugfs(qca); + + unregister_netdev(qcaspi_devs); + free_netdev(qcaspi_devs); + + return 0; +} + +static const struct spi_device_id qca_spi_id[] = { + { "qca7000", 0 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(spi, qca_spi_id); + +static struct spi_driver qca_spi_driver = { + .driver = { + .name = QCASPI_DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = qca_spi_of_match, + }, + .id_table = qca_spi_id, + .probe = qca_spi_probe, + .remove = qca_spi_remove, +}; +module_spi_driver(qca_spi_driver); + +MODULE_DESCRIPTION("Qualcomm Atheros SPI Driver"); +MODULE_AUTHOR("Qualcomm Atheros Communications"); +MODULE_AUTHOR("Stefan Wahren "); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(QCASPI_DRV_VERSION); diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h new file mode 100644 index 000000000..6e31a0e74 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/qca_spi.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. + * Copyright (c) 2014, I2SE GmbH + * + * Permission to use, copy, modify, and/or distribute this software + * for any purpose with or without fee is hereby granted, provided + * that the above copyright notice and this permission notice appear + * in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* Qualcomm Atheros SPI register definition. + * + * This module is designed to define the Qualcomm Atheros SPI register + * placeholders; + */ + +#ifndef _QCA_SPI_H +#define _QCA_SPI_H + +#include +#include +#include +#include +#include + +#include "qca_framing.h" + +#define QCASPI_DRV_VERSION "0.2.7-i" +#define QCASPI_DRV_NAME "qcaspi" + +#define QCASPI_GOOD_SIGNATURE 0xAA55 + +#define TX_RING_MAX_LEN 10 +#define TX_RING_MIN_LEN 2 + +/* sync related constants */ +#define QCASPI_SYNC_UNKNOWN 0 +#define QCASPI_SYNC_RESET 1 +#define QCASPI_SYNC_READY 2 + +#define QCASPI_RESET_TIMEOUT 10 + +/* sync events */ +#define QCASPI_EVENT_UPDATE 0 +#define QCASPI_EVENT_CPUON 1 + +struct tx_ring { + struct sk_buff *skb[TX_RING_MAX_LEN]; + u16 head; + u16 tail; + u16 size; + u16 count; +}; + +struct qcaspi_stats { + u64 trig_reset; + u64 device_reset; + u64 reset_timeout; + u64 read_err; + u64 write_err; + u64 read_buf_err; + u64 write_buf_err; + u64 out_of_mem; + u64 write_buf_miss; + u64 ring_full; + u64 spi_err; +}; + +struct qcaspi { + struct net_device *net_dev; + struct spi_device *spi_dev; + struct task_struct *spi_thread; + + struct tx_ring txr; + struct qcaspi_stats stats; + + struct spi_message spi_msg1; + struct spi_message spi_msg2; + struct spi_transfer spi_xfer1; + struct spi_transfer spi_xfer2[2]; + + u8 *rx_buffer; + u32 buffer_size; + u8 sync; + + struct qcafrm_handle frm_handle; + struct sk_buff *rx_skb; + + unsigned int intr_req; + unsigned int intr_svc; + +#ifdef CONFIG_DEBUG_FS + struct dentry *device_root; +#endif + + /* user configurable options */ + u32 clkspeed; + u8 legacy_mode; + u16 burst_len; +}; + +int qcaspi_netdev_open(struct net_device *dev); +int qcaspi_netdev_close(struct net_device *dev); + +#endif /* _QCA_SPI_H */ diff --git a/drivers/net/ethernet/rdc/Kconfig b/drivers/net/ethernet/rdc/Kconfig new file mode 100644 index 000000000..2055f7eb2 --- /dev/null +++ b/drivers/net/ethernet/rdc/Kconfig @@ -0,0 +1,34 @@ +# +# RDC network device configuration +# + +config NET_VENDOR_RDC + bool "RDC devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about RDC cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_RDC + +config R6040 + tristate "RDC R6040 Fast Ethernet Adapter support" + depends on PCI + select CRC32 + select MII + select PHYLIB + ---help--- + This is a driver for the R6040 Fast Ethernet MACs found in the + the RDC R-321x System-on-chips. + + To compile this driver as a module, choose M here: the module + will be called r6040. This is recommended. + +endif # NET_VENDOR_RDC diff --git a/drivers/net/ethernet/rdc/Makefile b/drivers/net/ethernet/rdc/Makefile new file mode 100644 index 000000000..8d51fd2d0 --- /dev/null +++ b/drivers/net/ethernet/rdc/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the RDC network device drivers. +# + +obj-$(CONFIG_R6040) += r6040.o diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c new file mode 100644 index 000000000..9a37247cf --- /dev/null +++ b/drivers/net/ethernet/rdc/r6040.c @@ -0,0 +1,1270 @@ +/* + * RDC R6040 Fast Ethernet MAC support + * + * Copyright (C) 2004 Sten Wang + * Copyright (C) 2007 + * Daniel Gimpelevich + * Copyright (C) 2007-2012 Florian Fainelli + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define DRV_NAME "r6040" +#define DRV_VERSION "0.28" +#define DRV_RELDATE "07Oct2011" + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (6000 * HZ / 1000) + +/* RDC MAC I/O Size */ +#define R6040_IO_SIZE 256 + +/* MAX RDC MAC */ +#define MAX_MAC 2 + +/* MAC registers */ +#define MCR0 0x00 /* Control register 0 */ +#define MCR0_RCVEN 0x0002 /* Receive enable */ +#define MCR0_PROMISC 0x0020 /* Promiscuous mode */ +#define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */ +#define MCR0_XMTEN 0x1000 /* Transmission enable */ +#define MCR0_FD 0x8000 /* Full/Half duplex */ +#define MCR1 0x04 /* Control register 1 */ +#define MAC_RST 0x0001 /* Reset the MAC */ +#define MBCR 0x08 /* Bus control */ +#define MT_ICR 0x0C /* TX interrupt control */ +#define MR_ICR 0x10 /* RX interrupt control */ +#define MTPR 0x14 /* TX poll command register */ +#define TM2TX 0x0001 /* Trigger MAC to transmit */ +#define MR_BSR 0x18 /* RX buffer size */ +#define MR_DCR 0x1A /* RX descriptor control */ +#define MLSR 0x1C /* Last status */ +#define TX_FIFO_UNDR 0x0200 /* TX FIFO under-run */ +#define TX_EXCEEDC 0x2000 /* Transmit exceed collision */ +#define TX_LATEC 0x4000 /* Transmit late collision */ +#define MMDIO 0x20 /* MDIO control register */ +#define MDIO_WRITE 0x4000 /* MDIO write */ +#define MDIO_READ 0x2000 /* MDIO read */ +#define MMRD 0x24 /* MDIO read data register */ +#define MMWD 0x28 /* MDIO write data register */ +#define MTD_SA0 0x2C /* TX descriptor start address 0 */ +#define MTD_SA1 0x30 /* TX descriptor start address 1 */ +#define MRD_SA0 0x34 /* RX descriptor start address 0 */ +#define MRD_SA1 0x38 /* RX descriptor start address 1 */ +#define MISR 0x3C /* Status register */ +#define MIER 0x40 /* INT enable register */ +#define MSK_INT 0x0000 /* Mask off interrupts */ +#define RX_FINISH 0x0001 /* RX finished */ +#define RX_NO_DESC 0x0002 /* No RX descriptor available */ +#define RX_FIFO_FULL 0x0004 /* RX FIFO full */ +#define RX_EARLY 0x0008 /* RX early */ +#define TX_FINISH 0x0010 /* TX finished */ +#define TX_EARLY 0x0080 /* TX early */ +#define EVENT_OVRFL 0x0100 /* Event counter overflow */ +#define LINK_CHANGED 0x0200 /* PHY link changed */ +#define ME_CISR 0x44 /* Event counter INT status */ +#define ME_CIER 0x48 /* Event counter INT enable */ +#define MR_CNT 0x50 /* Successfully received packet counter */ +#define ME_CNT0 0x52 /* Event counter 0 */ +#define ME_CNT1 0x54 /* Event counter 1 */ +#define ME_CNT2 0x56 /* Event counter 2 */ +#define ME_CNT3 0x58 /* Event counter 3 */ +#define MT_CNT 0x5A /* Successfully transmit packet counter */ +#define ME_CNT4 0x5C /* Event counter 4 */ +#define MP_CNT 0x5E /* Pause frame counter register */ +#define MAR0 0x60 /* Hash table 0 */ +#define MAR1 0x62 /* Hash table 1 */ +#define MAR2 0x64 /* Hash table 2 */ +#define MAR3 0x66 /* Hash table 3 */ +#define MID_0L 0x68 /* Multicast address MID0 Low */ +#define MID_0M 0x6A /* Multicast address MID0 Medium */ +#define MID_0H 0x6C /* Multicast address MID0 High */ +#define MID_1L 0x70 /* MID1 Low */ +#define MID_1M 0x72 /* MID1 Medium */ +#define MID_1H 0x74 /* MID1 High */ +#define MID_2L 0x78 /* MID2 Low */ +#define MID_2M 0x7A /* MID2 Medium */ +#define MID_2H 0x7C /* MID2 High */ +#define MID_3L 0x80 /* MID3 Low */ +#define MID_3M 0x82 /* MID3 Medium */ +#define MID_3H 0x84 /* MID3 High */ +#define PHY_CC 0x88 /* PHY status change configuration register */ +#define SCEN 0x8000 /* PHY status change enable */ +#define PHYAD_SHIFT 8 /* PHY address shift */ +#define TMRDIV_SHIFT 0 /* Timer divider shift */ +#define PHY_ST 0x8A /* PHY status register */ +#define MAC_SM 0xAC /* MAC status machine */ +#define MAC_SM_RST 0x0002 /* MAC status machine reset */ +#define MAC_ID 0xBE /* Identifier register */ + +#define TX_DCNT 0x80 /* TX descriptor count */ +#define RX_DCNT 0x80 /* RX descriptor count */ +#define MAX_BUF_SIZE 0x600 +#define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor)) +#define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor)) +#define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ +#define MCAST_MAX 3 /* Max number multicast addresses to filter */ + +#define MAC_DEF_TIMEOUT 2048 /* Default MAC read/write operation timeout */ + +/* Descriptor status */ +#define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */ +#define DSC_RX_OK 0x4000 /* RX was successful */ +#define DSC_RX_ERR 0x0800 /* RX PHY error */ +#define DSC_RX_ERR_DRI 0x0400 /* RX dribble packet */ +#define DSC_RX_ERR_BUF 0x0200 /* RX length exceeds buffer size */ +#define DSC_RX_ERR_LONG 0x0100 /* RX length > maximum packet length */ +#define DSC_RX_ERR_RUNT 0x0080 /* RX packet length < 64 byte */ +#define DSC_RX_ERR_CRC 0x0040 /* RX CRC error */ +#define DSC_RX_BCAST 0x0020 /* RX broadcast (no error) */ +#define DSC_RX_MCAST 0x0010 /* RX multicast (no error) */ +#define DSC_RX_MCH_HIT 0x0008 /* RX multicast hit in hash table (no error) */ +#define DSC_RX_MIDH_HIT 0x0004 /* RX MID table hit (no error) */ +#define DSC_RX_IDX_MID_MASK 3 /* RX mask for the index of matched MIDx */ + +MODULE_AUTHOR("Sten Wang ," + "Daniel Gimpelevich ," + "Florian Fainelli "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver"); +MODULE_VERSION(DRV_VERSION " " DRV_RELDATE); + +/* RX and TX interrupts that we handle */ +#define RX_INTS (RX_FIFO_FULL | RX_NO_DESC | RX_FINISH) +#define TX_INTS (TX_FINISH) +#define INT_MASK (RX_INTS | TX_INTS) + +struct r6040_descriptor { + u16 status, len; /* 0-3 */ + __le32 buf; /* 4-7 */ + __le32 ndesc; /* 8-B */ + u32 rev1; /* C-F */ + char *vbufp; /* 10-13 */ + struct r6040_descriptor *vndescp; /* 14-17 */ + struct sk_buff *skb_ptr; /* 18-1B */ + u32 rev2; /* 1C-1F */ +} __aligned(32); + +struct r6040_private { + spinlock_t lock; /* driver lock */ + struct pci_dev *pdev; + struct r6040_descriptor *rx_insert_ptr; + struct r6040_descriptor *rx_remove_ptr; + struct r6040_descriptor *tx_insert_ptr; + struct r6040_descriptor *tx_remove_ptr; + struct r6040_descriptor *rx_ring; + struct r6040_descriptor *tx_ring; + dma_addr_t rx_ring_dma; + dma_addr_t tx_ring_dma; + u16 tx_free_desc; + u16 mcr0; + struct net_device *dev; + struct mii_bus *mii_bus; + struct napi_struct napi; + void __iomem *base; + struct phy_device *phydev; + int old_link; + int old_duplex; +}; + +static char version[] = DRV_NAME + ": RDC R6040 NAPI net driver," + "version "DRV_VERSION " (" DRV_RELDATE ")"; + +/* Read a word data from PHY Chip */ +static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg) +{ + int limit = MAC_DEF_TIMEOUT; + u16 cmd; + + iowrite16(MDIO_READ + reg + (phy_addr << 8), ioaddr + MMDIO); + /* Wait for the read bit to be cleared */ + while (limit--) { + cmd = ioread16(ioaddr + MMDIO); + if (!(cmd & MDIO_READ)) + break; + udelay(1); + } + + if (limit < 0) + return -ETIMEDOUT; + + return ioread16(ioaddr + MMRD); +} + +/* Write a word data from PHY Chip */ +static int r6040_phy_write(void __iomem *ioaddr, + int phy_addr, int reg, u16 val) +{ + int limit = MAC_DEF_TIMEOUT; + u16 cmd; + + iowrite16(val, ioaddr + MMWD); + /* Write the command to the MDIO bus */ + iowrite16(MDIO_WRITE + reg + (phy_addr << 8), ioaddr + MMDIO); + /* Wait for the write bit to be cleared */ + while (limit--) { + cmd = ioread16(ioaddr + MMDIO); + if (!(cmd & MDIO_WRITE)) + break; + udelay(1); + } + + return (limit < 0) ? -ETIMEDOUT : 0; +} + +static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg) +{ + struct net_device *dev = bus->priv; + struct r6040_private *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + + return r6040_phy_read(ioaddr, phy_addr, reg); +} + +static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr, + int reg, u16 value) +{ + struct net_device *dev = bus->priv; + struct r6040_private *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + + return r6040_phy_write(ioaddr, phy_addr, reg, value); +} + +static void r6040_free_txbufs(struct net_device *dev) +{ + struct r6040_private *lp = netdev_priv(dev); + int i; + + for (i = 0; i < TX_DCNT; i++) { + if (lp->tx_insert_ptr->skb_ptr) { + pci_unmap_single(lp->pdev, + le32_to_cpu(lp->tx_insert_ptr->buf), + MAX_BUF_SIZE, PCI_DMA_TODEVICE); + dev_kfree_skb(lp->tx_insert_ptr->skb_ptr); + lp->tx_insert_ptr->skb_ptr = NULL; + } + lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp; + } +} + +static void r6040_free_rxbufs(struct net_device *dev) +{ + struct r6040_private *lp = netdev_priv(dev); + int i; + + for (i = 0; i < RX_DCNT; i++) { + if (lp->rx_insert_ptr->skb_ptr) { + pci_unmap_single(lp->pdev, + le32_to_cpu(lp->rx_insert_ptr->buf), + MAX_BUF_SIZE, PCI_DMA_FROMDEVICE); + dev_kfree_skb(lp->rx_insert_ptr->skb_ptr); + lp->rx_insert_ptr->skb_ptr = NULL; + } + lp->rx_insert_ptr = lp->rx_insert_ptr->vndescp; + } +} + +static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring, + dma_addr_t desc_dma, int size) +{ + struct r6040_descriptor *desc = desc_ring; + dma_addr_t mapping = desc_dma; + + while (size-- > 0) { + mapping += sizeof(*desc); + desc->ndesc = cpu_to_le32(mapping); + desc->vndescp = desc + 1; + desc++; + } + desc--; + desc->ndesc = cpu_to_le32(desc_dma); + desc->vndescp = desc_ring; +} + +static void r6040_init_txbufs(struct net_device *dev) +{ + struct r6040_private *lp = netdev_priv(dev); + + lp->tx_free_desc = TX_DCNT; + + lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring; + r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT); +} + +static int r6040_alloc_rxbufs(struct net_device *dev) +{ + struct r6040_private *lp = netdev_priv(dev); + struct r6040_descriptor *desc; + struct sk_buff *skb; + int rc; + + lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring; + r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT); + + /* Allocate skbs for the rx descriptors */ + desc = lp->rx_ring; + do { + skb = netdev_alloc_skb(dev, MAX_BUF_SIZE); + if (!skb) { + rc = -ENOMEM; + goto err_exit; + } + desc->skb_ptr = skb; + desc->buf = cpu_to_le32(pci_map_single(lp->pdev, + desc->skb_ptr->data, + MAX_BUF_SIZE, PCI_DMA_FROMDEVICE)); + desc->status = DSC_OWNER_MAC; + desc = desc->vndescp; + } while (desc != lp->rx_ring); + + return 0; + +err_exit: + /* Deallocate all previously allocated skbs */ + r6040_free_rxbufs(dev); + return rc; +} + +static void r6040_reset_mac(struct r6040_private *lp) +{ + void __iomem *ioaddr = lp->base; + int limit = MAC_DEF_TIMEOUT; + u16 cmd; + + iowrite16(MAC_RST, ioaddr + MCR1); + while (limit--) { + cmd = ioread16(ioaddr + MCR1); + if (cmd & MAC_RST) + break; + } + + /* Reset internal state machine */ + iowrite16(MAC_SM_RST, ioaddr + MAC_SM); + iowrite16(0, ioaddr + MAC_SM); + mdelay(5); +} + +static void r6040_init_mac_regs(struct net_device *dev) +{ + struct r6040_private *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + + /* Mask Off Interrupt */ + iowrite16(MSK_INT, ioaddr + MIER); + + /* Reset RDC MAC */ + r6040_reset_mac(lp); + + /* MAC Bus Control Register */ + iowrite16(MBCR_DEFAULT, ioaddr + MBCR); + + /* Buffer Size Register */ + iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR); + + /* Write TX ring start address */ + iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0); + iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1); + + /* Write RX ring start address */ + iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0); + iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1); + + /* Set interrupt waiting time and packet numbers */ + iowrite16(0, ioaddr + MT_ICR); + iowrite16(0, ioaddr + MR_ICR); + + /* Enable interrupts */ + iowrite16(INT_MASK, ioaddr + MIER); + + /* Enable TX and RX */ + iowrite16(lp->mcr0 | MCR0_RCVEN, ioaddr); + + /* Let TX poll the descriptors + * we may got called by r6040_tx_timeout which has left + * some unsent tx buffers */ + iowrite16(TM2TX, ioaddr + MTPR); +} + +static void r6040_tx_timeout(struct net_device *dev) +{ + struct r6040_private *priv = netdev_priv(dev); + void __iomem *ioaddr = priv->base; + + netdev_warn(dev, "transmit timed out, int enable %4.4x " + "status %4.4x\n", + ioread16(ioaddr + MIER), + ioread16(ioaddr + MISR)); + + dev->stats.tx_errors++; + + /* Reset MAC and re-init all registers */ + r6040_init_mac_regs(dev); +} + +static struct net_device_stats *r6040_get_stats(struct net_device *dev) +{ + struct r6040_private *priv = netdev_priv(dev); + void __iomem *ioaddr = priv->base; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + dev->stats.rx_crc_errors += ioread8(ioaddr + ME_CNT1); + dev->stats.multicast += ioread8(ioaddr + ME_CNT0); + spin_unlock_irqrestore(&priv->lock, flags); + + return &dev->stats; +} + +/* Stop RDC MAC and Free the allocated resource */ +static void r6040_down(struct net_device *dev) +{ + struct r6040_private *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + u16 *adrp; + + /* Stop MAC */ + iowrite16(MSK_INT, ioaddr + MIER); /* Mask Off Interrupt */ + + /* Reset RDC MAC */ + r6040_reset_mac(lp); + + /* Restore MAC Address to MIDx */ + adrp = (u16 *) dev->dev_addr; + iowrite16(adrp[0], ioaddr + MID_0L); + iowrite16(adrp[1], ioaddr + MID_0M); + iowrite16(adrp[2], ioaddr + MID_0H); + + phy_stop(lp->phydev); +} + +static int r6040_close(struct net_device *dev) +{ + struct r6040_private *lp = netdev_priv(dev); + struct pci_dev *pdev = lp->pdev; + + spin_lock_irq(&lp->lock); + napi_disable(&lp->napi); + netif_stop_queue(dev); + r6040_down(dev); + + free_irq(dev->irq, dev); + + /* Free RX buffer */ + r6040_free_rxbufs(dev); + + /* Free TX buffer */ + r6040_free_txbufs(dev); + + spin_unlock_irq(&lp->lock); + + /* Free Descriptor memory */ + if (lp->rx_ring) { + pci_free_consistent(pdev, + RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma); + lp->rx_ring = NULL; + } + + if (lp->tx_ring) { + pci_free_consistent(pdev, + TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma); + lp->tx_ring = NULL; + } + + return 0; +} + +static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct r6040_private *lp = netdev_priv(dev); + + if (!lp->phydev) + return -EINVAL; + + return phy_mii_ioctl(lp->phydev, rq, cmd); +} + +static int r6040_rx(struct net_device *dev, int limit) +{ + struct r6040_private *priv = netdev_priv(dev); + struct r6040_descriptor *descptr = priv->rx_remove_ptr; + struct sk_buff *skb_ptr, *new_skb; + int count = 0; + u16 err; + + /* Limit not reached and the descriptor belongs to the CPU */ + while (count < limit && !(descptr->status & DSC_OWNER_MAC)) { + /* Read the descriptor status */ + err = descptr->status; + /* Global error status set */ + if (err & DSC_RX_ERR) { + /* RX dribble */ + if (err & DSC_RX_ERR_DRI) + dev->stats.rx_frame_errors++; + /* Buffer length exceeded */ + if (err & DSC_RX_ERR_BUF) + dev->stats.rx_length_errors++; + /* Packet too long */ + if (err & DSC_RX_ERR_LONG) + dev->stats.rx_length_errors++; + /* Packet < 64 bytes */ + if (err & DSC_RX_ERR_RUNT) + dev->stats.rx_length_errors++; + /* CRC error */ + if (err & DSC_RX_ERR_CRC) { + spin_lock(&priv->lock); + dev->stats.rx_crc_errors++; + spin_unlock(&priv->lock); + } + goto next_descr; + } + + /* Packet successfully received */ + new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE); + if (!new_skb) { + dev->stats.rx_dropped++; + goto next_descr; + } + skb_ptr = descptr->skb_ptr; + skb_ptr->dev = priv->dev; + + /* Do not count the CRC */ + skb_put(skb_ptr, descptr->len - 4); + pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf), + MAX_BUF_SIZE, PCI_DMA_FROMDEVICE); + skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev); + + /* Send to upper layer */ + netif_receive_skb(skb_ptr); + dev->stats.rx_packets++; + dev->stats.rx_bytes += descptr->len - 4; + + /* put new skb into descriptor */ + descptr->skb_ptr = new_skb; + descptr->buf = cpu_to_le32(pci_map_single(priv->pdev, + descptr->skb_ptr->data, + MAX_BUF_SIZE, PCI_DMA_FROMDEVICE)); + +next_descr: + /* put the descriptor back to the MAC */ + descptr->status = DSC_OWNER_MAC; + descptr = descptr->vndescp; + count++; + } + priv->rx_remove_ptr = descptr; + + return count; +} + +static void r6040_tx(struct net_device *dev) +{ + struct r6040_private *priv = netdev_priv(dev); + struct r6040_descriptor *descptr; + void __iomem *ioaddr = priv->base; + struct sk_buff *skb_ptr; + u16 err; + + spin_lock(&priv->lock); + descptr = priv->tx_remove_ptr; + while (priv->tx_free_desc < TX_DCNT) { + /* Check for errors */ + err = ioread16(ioaddr + MLSR); + + if (err & TX_FIFO_UNDR) + dev->stats.tx_fifo_errors++; + if (err & (TX_EXCEEDC | TX_LATEC)) + dev->stats.tx_carrier_errors++; + + if (descptr->status & DSC_OWNER_MAC) + break; /* Not complete */ + skb_ptr = descptr->skb_ptr; + pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf), + skb_ptr->len, PCI_DMA_TODEVICE); + /* Free buffer */ + dev_kfree_skb_irq(skb_ptr); + descptr->skb_ptr = NULL; + /* To next descriptor */ + descptr = descptr->vndescp; + priv->tx_free_desc++; + } + priv->tx_remove_ptr = descptr; + + if (priv->tx_free_desc) + netif_wake_queue(dev); + spin_unlock(&priv->lock); +} + +static int r6040_poll(struct napi_struct *napi, int budget) +{ + struct r6040_private *priv = + container_of(napi, struct r6040_private, napi); + struct net_device *dev = priv->dev; + void __iomem *ioaddr = priv->base; + int work_done; + + work_done = r6040_rx(dev, budget); + + if (work_done < budget) { + napi_complete(napi); + /* Enable RX interrupt */ + iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER); + } + return work_done; +} + +/* The RDC interrupt handler. */ +static irqreturn_t r6040_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct r6040_private *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + u16 misr, status; + + /* Save MIER */ + misr = ioread16(ioaddr + MIER); + /* Mask off RDC MAC interrupt */ + iowrite16(MSK_INT, ioaddr + MIER); + /* Read MISR status and clear */ + status = ioread16(ioaddr + MISR); + + if (status == 0x0000 || status == 0xffff) { + /* Restore RDC MAC interrupt */ + iowrite16(misr, ioaddr + MIER); + return IRQ_NONE; + } + + /* RX interrupt request */ + if (status & RX_INTS) { + if (status & RX_NO_DESC) { + /* RX descriptor unavailable */ + dev->stats.rx_dropped++; + dev->stats.rx_missed_errors++; + } + if (status & RX_FIFO_FULL) + dev->stats.rx_fifo_errors++; + + if (likely(napi_schedule_prep(&lp->napi))) { + /* Mask off RX interrupt */ + misr &= ~RX_INTS; + __napi_schedule(&lp->napi); + } + } + + /* TX interrupt request */ + if (status & TX_INTS) + r6040_tx(dev); + + /* Restore RDC MAC interrupt */ + iowrite16(misr, ioaddr + MIER); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void r6040_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + r6040_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +/* Init RDC MAC */ +static int r6040_up(struct net_device *dev) +{ + struct r6040_private *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + int ret; + + /* Initialise and alloc RX/TX buffers */ + r6040_init_txbufs(dev); + ret = r6040_alloc_rxbufs(dev); + if (ret) + return ret; + + /* improve performance (by RDC guys) */ + r6040_phy_write(ioaddr, 30, 17, + (r6040_phy_read(ioaddr, 30, 17) | 0x4000)); + r6040_phy_write(ioaddr, 30, 17, + ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000)); + r6040_phy_write(ioaddr, 0, 19, 0x0000); + r6040_phy_write(ioaddr, 0, 30, 0x01F0); + + /* Initialize all MAC registers */ + r6040_init_mac_regs(dev); + + phy_start(lp->phydev); + + return 0; +} + + +/* Read/set MAC address routines */ +static void r6040_mac_address(struct net_device *dev) +{ + struct r6040_private *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + u16 *adrp; + + /* Reset MAC */ + r6040_reset_mac(lp); + + /* Restore MAC Address */ + adrp = (u16 *) dev->dev_addr; + iowrite16(adrp[0], ioaddr + MID_0L); + iowrite16(adrp[1], ioaddr + MID_0M); + iowrite16(adrp[2], ioaddr + MID_0H); +} + +static int r6040_open(struct net_device *dev) +{ + struct r6040_private *lp = netdev_priv(dev); + int ret; + + /* Request IRQ and Register interrupt handler */ + ret = request_irq(dev->irq, r6040_interrupt, + IRQF_SHARED, dev->name, dev); + if (ret) + goto out; + + /* Set MAC address */ + r6040_mac_address(dev); + + /* Allocate Descriptor memory */ + lp->rx_ring = + pci_alloc_consistent(lp->pdev, RX_DESC_SIZE, &lp->rx_ring_dma); + if (!lp->rx_ring) { + ret = -ENOMEM; + goto err_free_irq; + } + + lp->tx_ring = + pci_alloc_consistent(lp->pdev, TX_DESC_SIZE, &lp->tx_ring_dma); + if (!lp->tx_ring) { + ret = -ENOMEM; + goto err_free_rx_ring; + } + + ret = r6040_up(dev); + if (ret) + goto err_free_tx_ring; + + napi_enable(&lp->napi); + netif_start_queue(dev); + + return 0; + +err_free_tx_ring: + pci_free_consistent(lp->pdev, TX_DESC_SIZE, lp->tx_ring, + lp->tx_ring_dma); +err_free_rx_ring: + pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring, + lp->rx_ring_dma); +err_free_irq: + free_irq(dev->irq, dev); +out: + return ret; +} + +static netdev_tx_t r6040_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct r6040_private *lp = netdev_priv(dev); + struct r6040_descriptor *descptr; + void __iomem *ioaddr = lp->base; + unsigned long flags; + + /* Critical Section */ + spin_lock_irqsave(&lp->lock, flags); + + /* TX resource check */ + if (!lp->tx_free_desc) { + spin_unlock_irqrestore(&lp->lock, flags); + netif_stop_queue(dev); + netdev_err(dev, ": no tx descriptor\n"); + return NETDEV_TX_BUSY; + } + + /* Statistic Counter */ + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + /* Set TX descriptor & Transmit it */ + lp->tx_free_desc--; + descptr = lp->tx_insert_ptr; + if (skb->len < ETH_ZLEN) + descptr->len = ETH_ZLEN; + else + descptr->len = skb->len; + + descptr->skb_ptr = skb; + descptr->buf = cpu_to_le32(pci_map_single(lp->pdev, + skb->data, skb->len, PCI_DMA_TODEVICE)); + descptr->status = DSC_OWNER_MAC; + + skb_tx_timestamp(skb); + + /* Trigger the MAC to check the TX descriptor */ + iowrite16(TM2TX, ioaddr + MTPR); + lp->tx_insert_ptr = descptr->vndescp; + + /* If no tx resource, stop */ + if (!lp->tx_free_desc) + netif_stop_queue(dev); + + spin_unlock_irqrestore(&lp->lock, flags); + + return NETDEV_TX_OK; +} + +static void r6040_multicast_list(struct net_device *dev) +{ + struct r6040_private *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned long flags; + struct netdev_hw_addr *ha; + int i; + u16 *adrp; + u16 hash_table[4] = { 0 }; + + spin_lock_irqsave(&lp->lock, flags); + + /* Keep our MAC Address */ + adrp = (u16 *)dev->dev_addr; + iowrite16(adrp[0], ioaddr + MID_0L); + iowrite16(adrp[1], ioaddr + MID_0M); + iowrite16(adrp[2], ioaddr + MID_0H); + + /* Clear AMCP & PROM bits */ + lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN); + + /* Promiscuous mode */ + if (dev->flags & IFF_PROMISC) + lp->mcr0 |= MCR0_PROMISC; + + /* Enable multicast hash table function to + * receive all multicast packets. */ + else if (dev->flags & IFF_ALLMULTI) { + lp->mcr0 |= MCR0_HASH_EN; + + for (i = 0; i < MCAST_MAX ; i++) { + iowrite16(0, ioaddr + MID_1L + 8 * i); + iowrite16(0, ioaddr + MID_1M + 8 * i); + iowrite16(0, ioaddr + MID_1H + 8 * i); + } + + for (i = 0; i < 4; i++) + hash_table[i] = 0xffff; + } + /* Use internal multicast address registers if the number of + * multicast addresses is not greater than MCAST_MAX. */ + else if (netdev_mc_count(dev) <= MCAST_MAX) { + i = 0; + netdev_for_each_mc_addr(ha, dev) { + u16 *adrp = (u16 *) ha->addr; + iowrite16(adrp[0], ioaddr + MID_1L + 8 * i); + iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); + iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); + i++; + } + while (i < MCAST_MAX) { + iowrite16(0, ioaddr + MID_1L + 8 * i); + iowrite16(0, ioaddr + MID_1M + 8 * i); + iowrite16(0, ioaddr + MID_1H + 8 * i); + i++; + } + } + /* Otherwise, Enable multicast hash table function. */ + else { + u32 crc; + + lp->mcr0 |= MCR0_HASH_EN; + + for (i = 0; i < MCAST_MAX ; i++) { + iowrite16(0, ioaddr + MID_1L + 8 * i); + iowrite16(0, ioaddr + MID_1M + 8 * i); + iowrite16(0, ioaddr + MID_1H + 8 * i); + } + + /* Build multicast hash table */ + netdev_for_each_mc_addr(ha, dev) { + u8 *addrs = ha->addr; + + crc = ether_crc(ETH_ALEN, addrs); + crc >>= 26; + hash_table[crc >> 4] |= 1 << (crc & 0xf); + } + } + + iowrite16(lp->mcr0, ioaddr + MCR0); + + /* Fill the MAC hash tables with their values */ + if (lp->mcr0 & MCR0_HASH_EN) { + iowrite16(hash_table[0], ioaddr + MAR0); + iowrite16(hash_table[1], ioaddr + MAR1); + iowrite16(hash_table[2], ioaddr + MAR2); + iowrite16(hash_table[3], ioaddr + MAR3); + } + + spin_unlock_irqrestore(&lp->lock, flags); +} + +static void netdev_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct r6040_private *rp = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info)); +} + +static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct r6040_private *rp = netdev_priv(dev); + + return phy_ethtool_gset(rp->phydev, cmd); +} + +static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct r6040_private *rp = netdev_priv(dev); + + return phy_ethtool_sset(rp->phydev, cmd); +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, + .get_settings = netdev_get_settings, + .set_settings = netdev_set_settings, + .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, +}; + +static const struct net_device_ops r6040_netdev_ops = { + .ndo_open = r6040_open, + .ndo_stop = r6040_close, + .ndo_start_xmit = r6040_start_xmit, + .ndo_get_stats = r6040_get_stats, + .ndo_set_rx_mode = r6040_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_do_ioctl = r6040_ioctl, + .ndo_tx_timeout = r6040_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = r6040_poll_controller, +#endif +}; + +static void r6040_adjust_link(struct net_device *dev) +{ + struct r6040_private *lp = netdev_priv(dev); + struct phy_device *phydev = lp->phydev; + int status_changed = 0; + void __iomem *ioaddr = lp->base; + + BUG_ON(!phydev); + + if (lp->old_link != phydev->link) { + status_changed = 1; + lp->old_link = phydev->link; + } + + /* reflect duplex change */ + if (phydev->link && (lp->old_duplex != phydev->duplex)) { + lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? MCR0_FD : 0); + iowrite16(lp->mcr0, ioaddr); + + status_changed = 1; + lp->old_duplex = phydev->duplex; + } + + if (status_changed) { + pr_info("%s: link %s", dev->name, phydev->link ? + "UP" : "DOWN"); + if (phydev->link) + pr_cont(" - %d/%s", phydev->speed, + DUPLEX_FULL == phydev->duplex ? "full" : "half"); + pr_cont("\n"); + } +} + +static int r6040_mii_probe(struct net_device *dev) +{ + struct r6040_private *lp = netdev_priv(dev); + struct phy_device *phydev = NULL; + + phydev = phy_find_first(lp->mii_bus); + if (!phydev) { + dev_err(&lp->pdev->dev, "no PHY found\n"); + return -ENODEV; + } + + phydev = phy_connect(dev, dev_name(&phydev->dev), &r6040_adjust_link, + PHY_INTERFACE_MODE_MII); + + if (IS_ERR(phydev)) { + dev_err(&lp->pdev->dev, "could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + /* mask with MAC supported features */ + phydev->supported &= (SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full + | SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full + | SUPPORTED_Autoneg + | SUPPORTED_MII + | SUPPORTED_TP); + + phydev->advertising = phydev->supported; + lp->phydev = phydev; + lp->old_link = 0; + lp->old_duplex = -1; + + dev_info(&lp->pdev->dev, "attached PHY driver [%s] " + "(mii_bus:phy_addr=%s)\n", + phydev->drv->name, dev_name(&phydev->dev)); + + return 0; +} + +static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *dev; + struct r6040_private *lp; + void __iomem *ioaddr; + int err, io_size = R6040_IO_SIZE; + static int card_idx = -1; + int bar = 0; + u16 *adrp; + int i; + + pr_info("%s\n", version); + + err = pci_enable_device(pdev); + if (err) + goto err_out; + + /* this should always be supported */ + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "32-bit PCI DMA addresses" + "not supported by the card\n"); + goto err_out_disable_dev; + } + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "32-bit PCI DMA addresses" + "not supported by the card\n"); + goto err_out_disable_dev; + } + + /* IO Size check */ + if (pci_resource_len(pdev, bar) < io_size) { + dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n"); + err = -EIO; + goto err_out_disable_dev; + } + + pci_set_master(pdev); + + dev = alloc_etherdev(sizeof(struct r6040_private)); + if (!dev) { + err = -ENOMEM; + goto err_out_disable_dev; + } + SET_NETDEV_DEV(dev, &pdev->dev); + lp = netdev_priv(dev); + + err = pci_request_regions(pdev, DRV_NAME); + + if (err) { + dev_err(&pdev->dev, "Failed to request PCI regions\n"); + goto err_out_free_dev; + } + + ioaddr = pci_iomap(pdev, bar, io_size); + if (!ioaddr) { + dev_err(&pdev->dev, "ioremap failed for device\n"); + err = -EIO; + goto err_out_free_res; + } + + /* If PHY status change register is still set to zero it means the + * bootloader didn't initialize it, so we set it to: + * - enable phy status change + * - enable all phy addresses + * - set to lowest timer divider */ + if (ioread16(ioaddr + PHY_CC) == 0) + iowrite16(SCEN | PHY_MAX_ADDR << PHYAD_SHIFT | + 7 << TMRDIV_SHIFT, ioaddr + PHY_CC); + + /* Init system & device */ + lp->base = ioaddr; + dev->irq = pdev->irq; + + spin_lock_init(&lp->lock); + pci_set_drvdata(pdev, dev); + + /* Set MAC address */ + card_idx++; + + adrp = (u16 *)dev->dev_addr; + adrp[0] = ioread16(ioaddr + MID_0L); + adrp[1] = ioread16(ioaddr + MID_0M); + adrp[2] = ioread16(ioaddr + MID_0H); + + /* Some bootloader/BIOSes do not initialize + * MAC address, warn about that */ + if (!(adrp[0] || adrp[1] || adrp[2])) { + netdev_warn(dev, "MAC address not initialized, " + "generating random\n"); + eth_hw_addr_random(dev); + } + + /* Link new device into r6040_root_dev */ + lp->pdev = pdev; + lp->dev = dev; + + /* Init RDC private data */ + lp->mcr0 = MCR0_XMTEN | MCR0_RCVEN; + + /* The RDC-specific entries in the device structure. */ + dev->netdev_ops = &r6040_netdev_ops; + dev->ethtool_ops = &netdev_ethtool_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + netif_napi_add(dev, &lp->napi, r6040_poll, 64); + + lp->mii_bus = mdiobus_alloc(); + if (!lp->mii_bus) { + dev_err(&pdev->dev, "mdiobus_alloc() failed\n"); + err = -ENOMEM; + goto err_out_unmap; + } + + lp->mii_bus->priv = dev; + lp->mii_bus->read = r6040_mdiobus_read; + lp->mii_bus->write = r6040_mdiobus_write; + lp->mii_bus->name = "r6040_eth_mii"; + snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + dev_name(&pdev->dev), card_idx); + lp->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); + if (!lp->mii_bus->irq) { + err = -ENOMEM; + goto err_out_mdio; + } + + for (i = 0; i < PHY_MAX_ADDR; i++) + lp->mii_bus->irq[i] = PHY_POLL; + + err = mdiobus_register(lp->mii_bus); + if (err) { + dev_err(&pdev->dev, "failed to register MII bus\n"); + goto err_out_mdio_irq; + } + + err = r6040_mii_probe(dev); + if (err) { + dev_err(&pdev->dev, "failed to probe MII bus\n"); + goto err_out_mdio_unregister; + } + + /* Register net device. After this dev->name assign */ + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "Failed to register net device\n"); + goto err_out_mdio_unregister; + } + return 0; + +err_out_mdio_unregister: + mdiobus_unregister(lp->mii_bus); +err_out_mdio_irq: + kfree(lp->mii_bus->irq); +err_out_mdio: + mdiobus_free(lp->mii_bus); +err_out_unmap: + netif_napi_del(&lp->napi); + pci_iounmap(pdev, ioaddr); +err_out_free_res: + pci_release_regions(pdev); +err_out_free_dev: + free_netdev(dev); +err_out_disable_dev: + pci_disable_device(pdev); +err_out: + return err; +} + +static void r6040_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct r6040_private *lp = netdev_priv(dev); + + unregister_netdev(dev); + mdiobus_unregister(lp->mii_bus); + kfree(lp->mii_bus->irq); + mdiobus_free(lp->mii_bus); + netif_napi_del(&lp->napi); + pci_iounmap(pdev, lp->base); + pci_release_regions(pdev); + free_netdev(dev); + pci_disable_device(pdev); +} + + +static const struct pci_device_id r6040_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, r6040_pci_tbl); + +static struct pci_driver r6040_driver = { + .name = DRV_NAME, + .id_table = r6040_pci_tbl, + .probe = r6040_init_one, + .remove = r6040_remove_one, +}; + +module_pci_driver(r6040_driver); diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c new file mode 100644 index 000000000..d79e33b3c --- /dev/null +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -0,0 +1,2120 @@ +/* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */ +/* + Copyright 2001-2004 Jeff Garzik + + Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c] + Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c] + Copyright 2001 Manfred Spraul [natsemi.c] + Copyright 1999-2001 by Donald Becker. [natsemi.c] + Written 1997-2001 by Donald Becker. [8139too.c] + Copyright 1998-2001 by Jes Sorensen, . [acenic.c] + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. + + See the file COPYING in this distribution for more information. + + Contributors: + + Wake-on-LAN support - Felipe Damasio + PCI suspend/resume - Felipe Damasio + LinkChg interrupt - Felipe Damasio + + TODO: + * Test Tx checksumming thoroughly + + Low priority TODO: + * Complete reset on PciErr + * Consider Rx interrupt mitigation using TimerIntr + * Investigate using skb->priority with h/w VLAN priority + * Investigate using High Priority Tx Queue with skb->priority + * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error + * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error + * Implement Tx software interrupt mitigation via + Tx descriptor bit + * The real minimum of CP_MIN_MTU is 4 bytes. However, + for this to be supported, one must(?) turn on packet padding. + * Support external MII transceivers (patch available) + + NOTES: + * TX checksumming is considered experimental. It is off by + default, use ethtool to turn it on. + + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DRV_NAME "8139cp" +#define DRV_VERSION "1.3" +#define DRV_RELDATE "Mar 22, 2004" + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* These identify the driver base version and may not be removed. */ +static char version[] = +DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; + +MODULE_AUTHOR("Jeff Garzik "); +MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver"); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("GPL"); + +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number"); + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ +static int multicast_filter_limit = 32; +module_param(multicast_filter_limit, int, 0); +MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses"); + +#define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) +#define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */ +#define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */ +#define CP_REGS_SIZE (0xff + 1) +#define CP_REGS_VER 1 /* version 1 */ +#define CP_RX_RING_SIZE 64 +#define CP_TX_RING_SIZE 64 +#define CP_RING_BYTES \ + ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \ + (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \ + CP_STATS_SIZE) +#define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1)) +#define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1)) +#define TX_BUFFS_AVAIL(CP) \ + (((CP)->tx_tail <= (CP)->tx_head) ? \ + (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \ + (CP)->tx_tail - (CP)->tx_head - 1) + +#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ +#define CP_INTERNAL_PHY 32 + +/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */ +#define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */ +#define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */ +#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ +#define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */ + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (6*HZ) + +/* hardware minimum and maximum for a single frame's data payload */ +#define CP_MIN_MTU 60 /* TODO: allow lower, but pad */ +#define CP_MAX_MTU 4096 + +enum { + /* NIC register offsets */ + MAC0 = 0x00, /* Ethernet hardware address. */ + MAR0 = 0x08, /* Multicast filter. */ + StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */ + TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */ + HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */ + Cmd = 0x37, /* Command register */ + IntrMask = 0x3C, /* Interrupt mask */ + IntrStatus = 0x3E, /* Interrupt status */ + TxConfig = 0x40, /* Tx configuration */ + ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */ + RxConfig = 0x44, /* Rx configuration */ + RxMissed = 0x4C, /* 24 bits valid, write clears */ + Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */ + Config1 = 0x52, /* Config1 */ + Config3 = 0x59, /* Config3 */ + Config4 = 0x5A, /* Config4 */ + MultiIntr = 0x5C, /* Multiple interrupt select */ + BasicModeCtrl = 0x62, /* MII BMCR */ + BasicModeStatus = 0x64, /* MII BMSR */ + NWayAdvert = 0x66, /* MII ADVERTISE */ + NWayLPAR = 0x68, /* MII LPA */ + NWayExpansion = 0x6A, /* MII Expansion */ + Config5 = 0xD8, /* Config5 */ + TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */ + RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */ + CpCmd = 0xE0, /* C+ Command register (C+ mode only) */ + IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */ + RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */ + TxThresh = 0xEC, /* Early Tx threshold */ + OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */ + OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */ + + /* Tx and Rx status descriptors */ + DescOwn = (1 << 31), /* Descriptor is owned by NIC */ + RingEnd = (1 << 30), /* End of descriptor ring */ + FirstFrag = (1 << 29), /* First segment of a packet */ + LastFrag = (1 << 28), /* Final segment of a packet */ + LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */ + MSSShift = 16, /* MSS value position */ + MSSMask = 0xfff, /* MSS value: 11 bits */ + TxError = (1 << 23), /* Tx error summary */ + RxError = (1 << 20), /* Rx error summary */ + IPCS = (1 << 18), /* Calculate IP checksum */ + UDPCS = (1 << 17), /* Calculate UDP/IP checksum */ + TCPCS = (1 << 16), /* Calculate TCP/IP checksum */ + TxVlanTag = (1 << 17), /* Add VLAN tag */ + RxVlanTagged = (1 << 16), /* Rx VLAN tag available */ + IPFail = (1 << 15), /* IP checksum failed */ + UDPFail = (1 << 14), /* UDP/IP checksum failed */ + TCPFail = (1 << 13), /* TCP/IP checksum failed */ + NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */ + PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */ + PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */ + RxProtoTCP = 1, + RxProtoUDP = 2, + RxProtoIP = 3, + TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */ + TxOWC = (1 << 22), /* Tx Out-of-window collision */ + TxLinkFail = (1 << 21), /* Link failed during Tx of packet */ + TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */ + TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */ + TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */ + RxErrFrame = (1 << 27), /* Rx frame alignment error */ + RxMcast = (1 << 26), /* Rx multicast packet rcv'd */ + RxErrCRC = (1 << 18), /* Rx CRC error */ + RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */ + RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */ + RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */ + + /* StatsAddr register */ + DumpStats = (1 << 3), /* Begin stats dump */ + + /* RxConfig register */ + RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */ + RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */ + AcceptErr = 0x20, /* Accept packets with CRC errors */ + AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */ + AcceptBroadcast = 0x08, /* Accept broadcast packets */ + AcceptMulticast = 0x04, /* Accept multicast packets */ + AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */ + AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */ + + /* IntrMask / IntrStatus registers */ + PciErr = (1 << 15), /* System error on the PCI bus */ + TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */ + LenChg = (1 << 13), /* Cable length change */ + SWInt = (1 << 8), /* Software-requested interrupt */ + TxEmpty = (1 << 7), /* No Tx descriptors available */ + RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */ + LinkChg = (1 << 5), /* Packet underrun, or link change */ + RxEmpty = (1 << 4), /* No Rx descriptors available */ + TxErr = (1 << 3), /* Tx error */ + TxOK = (1 << 2), /* Tx packet sent */ + RxErr = (1 << 1), /* Rx error */ + RxOK = (1 << 0), /* Rx packet received */ + IntrResvd = (1 << 10), /* reserved, according to RealTek engineers, + but hardware likes to raise it */ + + IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty | + RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK | + RxErr | RxOK | IntrResvd, + + /* C mode command register */ + CmdReset = (1 << 4), /* Enable to reset; self-clearing */ + RxOn = (1 << 3), /* Rx mode enable */ + TxOn = (1 << 2), /* Tx mode enable */ + + /* C+ mode command register */ + RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */ + RxChkSum = (1 << 5), /* Rx checksum offload enable */ + PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */ + PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */ + CpRxOn = (1 << 1), /* Rx mode enable */ + CpTxOn = (1 << 0), /* Tx mode enable */ + + /* Cfg9436 EEPROM control register */ + Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */ + Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */ + + /* TxConfig register */ + IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */ + TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + + /* Early Tx Threshold register */ + TxThreshMask = 0x3f, /* Mask bits 5-0 */ + TxThreshMax = 2048, /* Max early Tx threshold */ + + /* Config1 register */ + DriverLoaded = (1 << 5), /* Software marker, driver is loaded */ + LWACT = (1 << 4), /* LWAKE active mode */ + PMEnable = (1 << 0), /* Enable various PM features of chip */ + + /* Config3 register */ + PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */ + MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ + LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ + + /* Config4 register */ + LWPTN = (1 << 1), /* LWAKE Pattern */ + LWPME = (1 << 4), /* LANWAKE vs PMEB */ + + /* Config5 register */ + BWF = (1 << 6), /* Accept Broadcast wakeup frame */ + MWF = (1 << 5), /* Accept Multicast wakeup frame */ + UWF = (1 << 4), /* Accept Unicast wakeup frame */ + LANWake = (1 << 1), /* Enable LANWake signal */ + PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ + + cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty, + cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr, + cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask, +}; + +static const unsigned int cp_rx_config = + (RX_FIFO_THRESH << RxCfgFIFOShift) | + (RX_DMA_BURST << RxCfgDMAShift); + +struct cp_desc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct cp_dma_stats { + __le64 tx_ok; + __le64 rx_ok; + __le64 tx_err; + __le32 rx_err; + __le16 rx_fifo; + __le16 frame_align; + __le32 tx_ok_1col; + __le32 tx_ok_mcol; + __le64 rx_ok_phys; + __le64 rx_ok_bcast; + __le32 rx_ok_mcast; + __le16 tx_abort; + __le16 tx_underrun; +} __packed; + +struct cp_extra_stats { + unsigned long rx_frags; +}; + +struct cp_private { + void __iomem *regs; + struct net_device *dev; + spinlock_t lock; + u32 msg_enable; + + struct napi_struct napi; + + struct pci_dev *pdev; + u32 rx_config; + u16 cpcmd; + + struct cp_extra_stats cp_stats; + + unsigned rx_head ____cacheline_aligned; + unsigned rx_tail; + struct cp_desc *rx_ring; + struct sk_buff *rx_skb[CP_RX_RING_SIZE]; + + unsigned tx_head ____cacheline_aligned; + unsigned tx_tail; + struct cp_desc *tx_ring; + struct sk_buff *tx_skb[CP_TX_RING_SIZE]; + + unsigned rx_buf_sz; + unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */ + + dma_addr_t ring_dma; + + struct mii_if_info mii_if; +}; + +#define cpr8(reg) readb(cp->regs + (reg)) +#define cpr16(reg) readw(cp->regs + (reg)) +#define cpr32(reg) readl(cp->regs + (reg)) +#define cpw8(reg,val) writeb((val), cp->regs + (reg)) +#define cpw16(reg,val) writew((val), cp->regs + (reg)) +#define cpw32(reg,val) writel((val), cp->regs + (reg)) +#define cpw8_f(reg,val) do { \ + writeb((val), cp->regs + (reg)); \ + readb(cp->regs + (reg)); \ + } while (0) +#define cpw16_f(reg,val) do { \ + writew((val), cp->regs + (reg)); \ + readw(cp->regs + (reg)); \ + } while (0) +#define cpw32_f(reg,val) do { \ + writel((val), cp->regs + (reg)); \ + readl(cp->regs + (reg)); \ + } while (0) + + +static void __cp_set_rx_mode (struct net_device *dev); +static void cp_tx (struct cp_private *cp); +static void cp_clean_rings (struct cp_private *cp); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void cp_poll_controller(struct net_device *dev); +#endif +static int cp_get_eeprom_len(struct net_device *dev); +static int cp_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data); +static int cp_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data); + +static struct { + const char str[ETH_GSTRING_LEN]; +} ethtool_stats_keys[] = { + { "tx_ok" }, + { "rx_ok" }, + { "tx_err" }, + { "rx_err" }, + { "rx_fifo" }, + { "frame_align" }, + { "tx_ok_1col" }, + { "tx_ok_mcol" }, + { "rx_ok_phys" }, + { "rx_ok_bcast" }, + { "rx_ok_mcast" }, + { "tx_abort" }, + { "tx_underrun" }, + { "rx_frags" }, +}; + + +static inline void cp_set_rxbufsize (struct cp_private *cp) +{ + unsigned int mtu = cp->dev->mtu; + + if (mtu > ETH_DATA_LEN) + /* MTU + ethernet header + FCS + optional VLAN tag */ + cp->rx_buf_sz = mtu + ETH_HLEN + 8; + else + cp->rx_buf_sz = PKT_BUF_SZ; +} + +static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb, + struct cp_desc *desc) +{ + u32 opts2 = le32_to_cpu(desc->opts2); + + skb->protocol = eth_type_trans (skb, cp->dev); + + cp->dev->stats.rx_packets++; + cp->dev->stats.rx_bytes += skb->len; + + if (opts2 & RxVlanTagged) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); + + napi_gro_receive(&cp->napi, skb); +} + +static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail, + u32 status, u32 len) +{ + netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n", + rx_tail, status, len); + cp->dev->stats.rx_errors++; + if (status & RxErrFrame) + cp->dev->stats.rx_frame_errors++; + if (status & RxErrCRC) + cp->dev->stats.rx_crc_errors++; + if ((status & RxErrRunt) || (status & RxErrLong)) + cp->dev->stats.rx_length_errors++; + if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) + cp->dev->stats.rx_length_errors++; + if (status & RxErrFIFO) + cp->dev->stats.rx_fifo_errors++; +} + +static inline unsigned int cp_rx_csum_ok (u32 status) +{ + unsigned int protocol = (status >> 16) & 0x3; + + if (((protocol == RxProtoTCP) && !(status & TCPFail)) || + ((protocol == RxProtoUDP) && !(status & UDPFail))) + return 1; + else + return 0; +} + +static int cp_rx_poll(struct napi_struct *napi, int budget) +{ + struct cp_private *cp = container_of(napi, struct cp_private, napi); + struct net_device *dev = cp->dev; + unsigned int rx_tail = cp->rx_tail; + int rx; + +rx_status_loop: + rx = 0; + cpw16(IntrStatus, cp_rx_intr_mask); + + while (rx < budget) { + u32 status, len; + dma_addr_t mapping, new_mapping; + struct sk_buff *skb, *new_skb; + struct cp_desc *desc; + const unsigned buflen = cp->rx_buf_sz; + + skb = cp->rx_skb[rx_tail]; + BUG_ON(!skb); + + desc = &cp->rx_ring[rx_tail]; + status = le32_to_cpu(desc->opts1); + if (status & DescOwn) + break; + + len = (status & 0x1fff) - 4; + mapping = le64_to_cpu(desc->addr); + + if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) { + /* we don't support incoming fragmented frames. + * instead, we attempt to ensure that the + * pre-allocated RX skbs are properly sized such + * that RX fragments are never encountered + */ + cp_rx_err_acct(cp, rx_tail, status, len); + dev->stats.rx_dropped++; + cp->cp_stats.rx_frags++; + goto rx_next; + } + + if (status & (RxError | RxErrFIFO)) { + cp_rx_err_acct(cp, rx_tail, status, len); + goto rx_next; + } + + netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n", + rx_tail, status, len); + + new_skb = napi_alloc_skb(napi, buflen); + if (!new_skb) { + dev->stats.rx_dropped++; + goto rx_next; + } + + new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { + dev->stats.rx_dropped++; + kfree_skb(new_skb); + goto rx_next; + } + + dma_unmap_single(&cp->pdev->dev, mapping, + buflen, PCI_DMA_FROMDEVICE); + + /* Handle checksum offloading for incoming packets. */ + if (cp_rx_csum_ok(status)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + + skb_put(skb, len); + + cp->rx_skb[rx_tail] = new_skb; + + cp_rx_skb(cp, skb, desc); + rx++; + mapping = new_mapping; + +rx_next: + cp->rx_ring[rx_tail].opts2 = 0; + cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping); + if (rx_tail == (CP_RX_RING_SIZE - 1)) + desc->opts1 = cpu_to_le32(DescOwn | RingEnd | + cp->rx_buf_sz); + else + desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); + rx_tail = NEXT_RX(rx_tail); + } + + cp->rx_tail = rx_tail; + + /* if we did not reach work limit, then we're done with + * this round of polling + */ + if (rx < budget) { + unsigned long flags; + + if (cpr16(IntrStatus) & cp_rx_intr_mask) + goto rx_status_loop; + + napi_gro_flush(napi, false); + spin_lock_irqsave(&cp->lock, flags); + __napi_complete(napi); + cpw16_f(IntrMask, cp_intr_mask); + spin_unlock_irqrestore(&cp->lock, flags); + } + + return rx; +} + +static irqreturn_t cp_interrupt (int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct cp_private *cp; + int handled = 0; + u16 status; + + if (unlikely(dev == NULL)) + return IRQ_NONE; + cp = netdev_priv(dev); + + spin_lock(&cp->lock); + + status = cpr16(IntrStatus); + if (!status || (status == 0xFFFF)) + goto out_unlock; + + handled = 1; + + netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n", + status, cpr8(Cmd), cpr16(CpCmd)); + + cpw16(IntrStatus, status & ~cp_rx_intr_mask); + + /* close possible race's with dev_close */ + if (unlikely(!netif_running(dev))) { + cpw16(IntrMask, 0); + goto out_unlock; + } + + if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr)) + if (napi_schedule_prep(&cp->napi)) { + cpw16_f(IntrMask, cp_norx_intr_mask); + __napi_schedule(&cp->napi); + } + + if (status & (TxOK | TxErr | TxEmpty | SWInt)) + cp_tx(cp); + if (status & LinkChg) + mii_check_media(&cp->mii_if, netif_msg_link(cp), false); + + + if (status & PciErr) { + u16 pci_status; + + pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status); + pci_write_config_word(cp->pdev, PCI_STATUS, pci_status); + netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n", + status, pci_status); + + /* TODO: reset hardware */ + } + +out_unlock: + spin_unlock(&cp->lock); + + return IRQ_RETVAL(handled); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling receive - used by netconsole and other diagnostic tools + * to allow network i/o with interrupts disabled. + */ +static void cp_poll_controller(struct net_device *dev) +{ + struct cp_private *cp = netdev_priv(dev); + const int irq = cp->pdev->irq; + + disable_irq(irq); + cp_interrupt(irq, dev); + enable_irq(irq); +} +#endif + +static void cp_tx (struct cp_private *cp) +{ + unsigned tx_head = cp->tx_head; + unsigned tx_tail = cp->tx_tail; + unsigned bytes_compl = 0, pkts_compl = 0; + + while (tx_tail != tx_head) { + struct cp_desc *txd = cp->tx_ring + tx_tail; + struct sk_buff *skb; + u32 status; + + rmb(); + status = le32_to_cpu(txd->opts1); + if (status & DescOwn) + break; + + skb = cp->tx_skb[tx_tail]; + BUG_ON(!skb); + + dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), + le32_to_cpu(txd->opts1) & 0xffff, + PCI_DMA_TODEVICE); + + if (status & LastFrag) { + if (status & (TxError | TxFIFOUnder)) { + netif_dbg(cp, tx_err, cp->dev, + "tx err, status 0x%x\n", status); + cp->dev->stats.tx_errors++; + if (status & TxOWC) + cp->dev->stats.tx_window_errors++; + if (status & TxMaxCol) + cp->dev->stats.tx_aborted_errors++; + if (status & TxLinkFail) + cp->dev->stats.tx_carrier_errors++; + if (status & TxFIFOUnder) + cp->dev->stats.tx_fifo_errors++; + } else { + cp->dev->stats.collisions += + ((status >> TxColCntShift) & TxColCntMask); + cp->dev->stats.tx_packets++; + cp->dev->stats.tx_bytes += skb->len; + netif_dbg(cp, tx_done, cp->dev, + "tx done, slot %d\n", tx_tail); + } + bytes_compl += skb->len; + pkts_compl++; + dev_kfree_skb_irq(skb); + } + + cp->tx_skb[tx_tail] = NULL; + + tx_tail = NEXT_TX(tx_tail); + } + + cp->tx_tail = tx_tail; + + netdev_completed_queue(cp->dev, pkts_compl, bytes_compl); + if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1)) + netif_wake_queue(cp->dev); +} + +static inline u32 cp_tx_vlan_tag(struct sk_buff *skb) +{ + return skb_vlan_tag_present(skb) ? + TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00; +} + +static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb, + int first, int entry_last) +{ + int frag, index; + struct cp_desc *txd; + skb_frag_t *this_frag; + for (frag = 0; frag+first < entry_last; frag++) { + index = first+frag; + cp->tx_skb[index] = NULL; + txd = &cp->tx_ring[index]; + this_frag = &skb_shinfo(skb)->frags[frag]; + dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), + skb_frag_size(this_frag), PCI_DMA_TODEVICE); + } +} + +static netdev_tx_t cp_start_xmit (struct sk_buff *skb, + struct net_device *dev) +{ + struct cp_private *cp = netdev_priv(dev); + unsigned entry; + u32 eor, flags; + unsigned long intr_flags; + __le32 opts2; + int mss = 0; + + spin_lock_irqsave(&cp->lock, intr_flags); + + /* This is a hard error, log it. */ + if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) { + netif_stop_queue(dev); + spin_unlock_irqrestore(&cp->lock, intr_flags); + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + return NETDEV_TX_BUSY; + } + + entry = cp->tx_head; + eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; + mss = skb_shinfo(skb)->gso_size; + + opts2 = cpu_to_le32(cp_tx_vlan_tag(skb)); + + if (skb_shinfo(skb)->nr_frags == 0) { + struct cp_desc *txd = &cp->tx_ring[entry]; + u32 len; + dma_addr_t mapping; + + len = skb->len; + mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&cp->pdev->dev, mapping)) + goto out_dma_error; + + txd->opts2 = opts2; + txd->addr = cpu_to_le64(mapping); + wmb(); + + flags = eor | len | DescOwn | FirstFrag | LastFrag; + + if (mss) + flags |= LargeSend | ((mss & MSSMask) << MSSShift); + else if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ip = ip_hdr(skb); + if (ip->protocol == IPPROTO_TCP) + flags |= IPCS | TCPCS; + else if (ip->protocol == IPPROTO_UDP) + flags |= IPCS | UDPCS; + else + WARN_ON(1); /* we need a WARN() */ + } + + txd->opts1 = cpu_to_le32(flags); + wmb(); + + cp->tx_skb[entry] = skb; + entry = NEXT_TX(entry); + } else { + struct cp_desc *txd; + u32 first_len, first_eor; + dma_addr_t first_mapping; + int frag, first_entry = entry; + const struct iphdr *ip = ip_hdr(skb); + + /* We must give this initial chunk to the device last. + * Otherwise we could race with the device. + */ + first_eor = eor; + first_len = skb_headlen(skb); + first_mapping = dma_map_single(&cp->pdev->dev, skb->data, + first_len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&cp->pdev->dev, first_mapping)) + goto out_dma_error; + + cp->tx_skb[entry] = skb; + entry = NEXT_TX(entry); + + for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { + const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; + u32 len; + u32 ctrl; + dma_addr_t mapping; + + len = skb_frag_size(this_frag); + mapping = dma_map_single(&cp->pdev->dev, + skb_frag_address(this_frag), + len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&cp->pdev->dev, mapping)) { + unwind_tx_frag_mapping(cp, skb, first_entry, entry); + goto out_dma_error; + } + + eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; + + ctrl = eor | len | DescOwn; + + if (mss) + ctrl |= LargeSend | + ((mss & MSSMask) << MSSShift); + else if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (ip->protocol == IPPROTO_TCP) + ctrl |= IPCS | TCPCS; + else if (ip->protocol == IPPROTO_UDP) + ctrl |= IPCS | UDPCS; + else + BUG(); + } + + if (frag == skb_shinfo(skb)->nr_frags - 1) + ctrl |= LastFrag; + + txd = &cp->tx_ring[entry]; + txd->opts2 = opts2; + txd->addr = cpu_to_le64(mapping); + wmb(); + + txd->opts1 = cpu_to_le32(ctrl); + wmb(); + + cp->tx_skb[entry] = skb; + entry = NEXT_TX(entry); + } + + txd = &cp->tx_ring[first_entry]; + txd->opts2 = opts2; + txd->addr = cpu_to_le64(first_mapping); + wmb(); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (ip->protocol == IPPROTO_TCP) + txd->opts1 = cpu_to_le32(first_eor | first_len | + FirstFrag | DescOwn | + IPCS | TCPCS); + else if (ip->protocol == IPPROTO_UDP) + txd->opts1 = cpu_to_le32(first_eor | first_len | + FirstFrag | DescOwn | + IPCS | UDPCS); + else + BUG(); + } else + txd->opts1 = cpu_to_le32(first_eor | first_len | + FirstFrag | DescOwn); + wmb(); + } + cp->tx_head = entry; + + netdev_sent_queue(dev, skb->len); + netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", + entry, skb->len); + if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) + netif_stop_queue(dev); + +out_unlock: + spin_unlock_irqrestore(&cp->lock, intr_flags); + + cpw8(TxPoll, NormalTxPoll); + + return NETDEV_TX_OK; +out_dma_error: + dev_kfree_skb_any(skb); + cp->dev->stats.tx_dropped++; + goto out_unlock; +} + +/* Set or clear the multicast filter for this adaptor. + This routine is not state sensitive and need not be SMP locked. */ + +static void __cp_set_rx_mode (struct net_device *dev) +{ + struct cp_private *cp = netdev_priv(dev); + u32 mc_filter[2]; /* Multicast hash filter */ + int rx_mode; + + /* Note: do not reorder, GCC is clever about common statements. */ + if (dev->flags & IFF_PROMISC) { + /* Unconditionally log net taps. */ + rx_mode = + AcceptBroadcast | AcceptMulticast | AcceptMyPhys | + AcceptAllPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to filter perfectly -- accept all multicasts. */ + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else { + struct netdev_hw_addr *ha; + rx_mode = AcceptBroadcast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + rx_mode |= AcceptMulticast; + } + } + + /* We can safely update without stopping the chip. */ + cp->rx_config = cp_rx_config | rx_mode; + cpw32_f(RxConfig, cp->rx_config); + + cpw32_f (MAR0 + 0, mc_filter[0]); + cpw32_f (MAR0 + 4, mc_filter[1]); +} + +static void cp_set_rx_mode (struct net_device *dev) +{ + unsigned long flags; + struct cp_private *cp = netdev_priv(dev); + + spin_lock_irqsave (&cp->lock, flags); + __cp_set_rx_mode(dev); + spin_unlock_irqrestore (&cp->lock, flags); +} + +static void __cp_get_stats(struct cp_private *cp) +{ + /* only lower 24 bits valid; write any value to clear */ + cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff); + cpw32 (RxMissed, 0); +} + +static struct net_device_stats *cp_get_stats(struct net_device *dev) +{ + struct cp_private *cp = netdev_priv(dev); + unsigned long flags; + + /* The chip only need report frame silently dropped. */ + spin_lock_irqsave(&cp->lock, flags); + if (netif_running(dev) && netif_device_present(dev)) + __cp_get_stats(cp); + spin_unlock_irqrestore(&cp->lock, flags); + + return &dev->stats; +} + +static void cp_stop_hw (struct cp_private *cp) +{ + cpw16(IntrStatus, ~(cpr16(IntrStatus))); + cpw16_f(IntrMask, 0); + cpw8(Cmd, 0); + cpw16_f(CpCmd, 0); + cpw16_f(IntrStatus, ~(cpr16(IntrStatus))); + + cp->rx_tail = 0; + cp->tx_head = cp->tx_tail = 0; + + netdev_reset_queue(cp->dev); +} + +static void cp_reset_hw (struct cp_private *cp) +{ + unsigned work = 1000; + + cpw8(Cmd, CmdReset); + + while (work--) { + if (!(cpr8(Cmd) & CmdReset)) + return; + + schedule_timeout_uninterruptible(10); + } + + netdev_err(cp->dev, "hardware reset timeout\n"); +} + +static inline void cp_start_hw (struct cp_private *cp) +{ + dma_addr_t ring_dma; + + cpw16(CpCmd, cp->cpcmd); + + /* + * These (at least TxRingAddr) need to be configured after the + * corresponding bits in CpCmd are enabled. Datasheet v1.6 §6.33 + * (C+ Command Register) recommends that these and more be configured + * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware + * it's been observed that the TxRingAddr is actually reset to garbage + * when C+ mode Tx is enabled in CpCmd. + */ + cpw32_f(HiTxRingAddr, 0); + cpw32_f(HiTxRingAddr + 4, 0); + + ring_dma = cp->ring_dma; + cpw32_f(RxRingAddr, ring_dma & 0xffffffff); + cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); + + ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; + cpw32_f(TxRingAddr, ring_dma & 0xffffffff); + cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); + + /* + * Strictly speaking, the datasheet says this should be enabled + * *before* setting the descriptor addresses. But what, then, would + * prevent it from doing DMA to random unconfigured addresses? + * This variant appears to work fine. + */ + cpw8(Cmd, RxOn | TxOn); + + netdev_reset_queue(cp->dev); +} + +static void cp_enable_irq(struct cp_private *cp) +{ + cpw16_f(IntrMask, cp_intr_mask); +} + +static void cp_init_hw (struct cp_private *cp) +{ + struct net_device *dev = cp->dev; + + cp_reset_hw(cp); + + cpw8_f (Cfg9346, Cfg9346_Unlock); + + /* Restore our idea of the MAC address. */ + cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); + cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); + + cp_start_hw(cp); + cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ + + __cp_set_rx_mode(dev); + cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift)); + + cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable); + /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */ + cpw8(Config3, PARMEnable); + cp->wol_enabled = 0; + + cpw8(Config5, cpr8(Config5) & PMEStatus); + + cpw16(MultiIntr, 0); + + cpw8_f(Cfg9346, Cfg9346_Lock); +} + +static int cp_refill_rx(struct cp_private *cp) +{ + struct net_device *dev = cp->dev; + unsigned i; + + for (i = 0; i < CP_RX_RING_SIZE; i++) { + struct sk_buff *skb; + dma_addr_t mapping; + + skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz); + if (!skb) + goto err_out; + + mapping = dma_map_single(&cp->pdev->dev, skb->data, + cp->rx_buf_sz, PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&cp->pdev->dev, mapping)) { + kfree_skb(skb); + goto err_out; + } + cp->rx_skb[i] = skb; + + cp->rx_ring[i].opts2 = 0; + cp->rx_ring[i].addr = cpu_to_le64(mapping); + if (i == (CP_RX_RING_SIZE - 1)) + cp->rx_ring[i].opts1 = + cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz); + else + cp->rx_ring[i].opts1 = + cpu_to_le32(DescOwn | cp->rx_buf_sz); + } + + return 0; + +err_out: + cp_clean_rings(cp); + return -ENOMEM; +} + +static void cp_init_rings_index (struct cp_private *cp) +{ + cp->rx_tail = 0; + cp->tx_head = cp->tx_tail = 0; +} + +static int cp_init_rings (struct cp_private *cp) +{ + memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); + cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); + + cp_init_rings_index(cp); + + return cp_refill_rx (cp); +} + +static int cp_alloc_rings (struct cp_private *cp) +{ + struct device *d = &cp->pdev->dev; + void *mem; + int rc; + + mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL); + if (!mem) + return -ENOMEM; + + cp->rx_ring = mem; + cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE]; + + rc = cp_init_rings(cp); + if (rc < 0) + dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); + + return rc; +} + +static void cp_clean_rings (struct cp_private *cp) +{ + struct cp_desc *desc; + unsigned i; + + for (i = 0; i < CP_RX_RING_SIZE; i++) { + if (cp->rx_skb[i]) { + desc = cp->rx_ring + i; + dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), + cp->rx_buf_sz, PCI_DMA_FROMDEVICE); + dev_kfree_skb(cp->rx_skb[i]); + } + } + + for (i = 0; i < CP_TX_RING_SIZE; i++) { + if (cp->tx_skb[i]) { + struct sk_buff *skb = cp->tx_skb[i]; + + desc = cp->tx_ring + i; + dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), + le32_to_cpu(desc->opts1) & 0xffff, + PCI_DMA_TODEVICE); + if (le32_to_cpu(desc->opts1) & LastFrag) + dev_kfree_skb(skb); + cp->dev->stats.tx_dropped++; + } + } + netdev_reset_queue(cp->dev); + + memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); + memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); + + memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); + memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE); +} + +static void cp_free_rings (struct cp_private *cp) +{ + cp_clean_rings(cp); + dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring, + cp->ring_dma); + cp->rx_ring = NULL; + cp->tx_ring = NULL; +} + +static int cp_open (struct net_device *dev) +{ + struct cp_private *cp = netdev_priv(dev); + const int irq = cp->pdev->irq; + int rc; + + netif_dbg(cp, ifup, dev, "enabling interface\n"); + + rc = cp_alloc_rings(cp); + if (rc) + return rc; + + napi_enable(&cp->napi); + + cp_init_hw(cp); + + rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev); + if (rc) + goto err_out_hw; + + cp_enable_irq(cp); + + netif_carrier_off(dev); + mii_check_media(&cp->mii_if, netif_msg_link(cp), true); + netif_start_queue(dev); + + return 0; + +err_out_hw: + napi_disable(&cp->napi); + cp_stop_hw(cp); + cp_free_rings(cp); + return rc; +} + +static int cp_close (struct net_device *dev) +{ + struct cp_private *cp = netdev_priv(dev); + unsigned long flags; + + napi_disable(&cp->napi); + + netif_dbg(cp, ifdown, dev, "disabling interface\n"); + + spin_lock_irqsave(&cp->lock, flags); + + netif_stop_queue(dev); + netif_carrier_off(dev); + + cp_stop_hw(cp); + + spin_unlock_irqrestore(&cp->lock, flags); + + free_irq(cp->pdev->irq, dev); + + cp_free_rings(cp); + return 0; +} + +static void cp_tx_timeout(struct net_device *dev) +{ + struct cp_private *cp = netdev_priv(dev); + unsigned long flags; + int rc; + + netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n", + cpr8(Cmd), cpr16(CpCmd), + cpr16(IntrStatus), cpr16(IntrMask)); + + spin_lock_irqsave(&cp->lock, flags); + + cp_stop_hw(cp); + cp_clean_rings(cp); + rc = cp_init_rings(cp); + cp_start_hw(cp); + cp_enable_irq(cp); + + netif_wake_queue(dev); + + spin_unlock_irqrestore(&cp->lock, flags); +} + +static int cp_change_mtu(struct net_device *dev, int new_mtu) +{ + struct cp_private *cp = netdev_priv(dev); + + /* check for invalid MTU, according to hardware limits */ + if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU) + return -EINVAL; + + /* if network interface not up, no need for complexity */ + if (!netif_running(dev)) { + dev->mtu = new_mtu; + cp_set_rxbufsize(cp); /* set new rx buf size */ + return 0; + } + + /* network IS up, close it, reset MTU, and come up again. */ + cp_close(dev); + dev->mtu = new_mtu; + cp_set_rxbufsize(cp); + return cp_open(dev); +} + +static const char mii_2_8139_map[8] = { + BasicModeCtrl, + BasicModeStatus, + 0, + 0, + NWayAdvert, + NWayLPAR, + NWayExpansion, + 0 +}; + +static int mdio_read(struct net_device *dev, int phy_id, int location) +{ + struct cp_private *cp = netdev_priv(dev); + + return location < 8 && mii_2_8139_map[location] ? + readw(cp->regs + mii_2_8139_map[location]) : 0; +} + + +static void mdio_write(struct net_device *dev, int phy_id, int location, + int value) +{ + struct cp_private *cp = netdev_priv(dev); + + if (location == 0) { + cpw8(Cfg9346, Cfg9346_Unlock); + cpw16(BasicModeCtrl, value); + cpw8(Cfg9346, Cfg9346_Lock); + } else if (location < 8 && mii_2_8139_map[location]) + cpw16(mii_2_8139_map[location], value); +} + +/* Set the ethtool Wake-on-LAN settings */ +static int netdev_set_wol (struct cp_private *cp, + const struct ethtool_wolinfo *wol) +{ + u8 options; + + options = cpr8 (Config3) & ~(LinkUp | MagicPacket); + /* If WOL is being disabled, no need for complexity */ + if (wol->wolopts) { + if (wol->wolopts & WAKE_PHY) options |= LinkUp; + if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket; + } + + cpw8 (Cfg9346, Cfg9346_Unlock); + cpw8 (Config3, options); + cpw8 (Cfg9346, Cfg9346_Lock); + + options = 0; /* Paranoia setting */ + options = cpr8 (Config5) & ~(UWF | MWF | BWF); + /* If WOL is being disabled, no need for complexity */ + if (wol->wolopts) { + if (wol->wolopts & WAKE_UCAST) options |= UWF; + if (wol->wolopts & WAKE_BCAST) options |= BWF; + if (wol->wolopts & WAKE_MCAST) options |= MWF; + } + + cpw8 (Config5, options); + + cp->wol_enabled = (wol->wolopts) ? 1 : 0; + + return 0; +} + +/* Get the ethtool Wake-on-LAN settings */ +static void netdev_get_wol (struct cp_private *cp, + struct ethtool_wolinfo *wol) +{ + u8 options; + + wol->wolopts = 0; /* Start from scratch */ + wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC | + WAKE_MCAST | WAKE_UCAST; + /* We don't need to go on if WOL is disabled */ + if (!cp->wol_enabled) return; + + options = cpr8 (Config3); + if (options & LinkUp) wol->wolopts |= WAKE_PHY; + if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC; + + options = 0; /* Paranoia setting */ + options = cpr8 (Config5); + if (options & UWF) wol->wolopts |= WAKE_UCAST; + if (options & BWF) wol->wolopts |= WAKE_BCAST; + if (options & MWF) wol->wolopts |= WAKE_MCAST; +} + +static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct cp_private *cp = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); +} + +static void cp_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + ring->rx_max_pending = CP_RX_RING_SIZE; + ring->tx_max_pending = CP_TX_RING_SIZE; + ring->rx_pending = CP_RX_RING_SIZE; + ring->tx_pending = CP_TX_RING_SIZE; +} + +static int cp_get_regs_len(struct net_device *dev) +{ + return CP_REGS_SIZE; +} + +static int cp_get_sset_count (struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return CP_NUM_STATS; + default: + return -EOPNOTSUPP; + } +} + +static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct cp_private *cp = netdev_priv(dev); + int rc; + unsigned long flags; + + spin_lock_irqsave(&cp->lock, flags); + rc = mii_ethtool_gset(&cp->mii_if, cmd); + spin_unlock_irqrestore(&cp->lock, flags); + + return rc; +} + +static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct cp_private *cp = netdev_priv(dev); + int rc; + unsigned long flags; + + spin_lock_irqsave(&cp->lock, flags); + rc = mii_ethtool_sset(&cp->mii_if, cmd); + spin_unlock_irqrestore(&cp->lock, flags); + + return rc; +} + +static int cp_nway_reset(struct net_device *dev) +{ + struct cp_private *cp = netdev_priv(dev); + return mii_nway_restart(&cp->mii_if); +} + +static u32 cp_get_msglevel(struct net_device *dev) +{ + struct cp_private *cp = netdev_priv(dev); + return cp->msg_enable; +} + +static void cp_set_msglevel(struct net_device *dev, u32 value) +{ + struct cp_private *cp = netdev_priv(dev); + cp->msg_enable = value; +} + +static int cp_set_features(struct net_device *dev, netdev_features_t features) +{ + struct cp_private *cp = netdev_priv(dev); + unsigned long flags; + + if (!((dev->features ^ features) & NETIF_F_RXCSUM)) + return 0; + + spin_lock_irqsave(&cp->lock, flags); + + if (features & NETIF_F_RXCSUM) + cp->cpcmd |= RxChkSum; + else + cp->cpcmd &= ~RxChkSum; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + cp->cpcmd |= RxVlanOn; + else + cp->cpcmd &= ~RxVlanOn; + + cpw16_f(CpCmd, cp->cpcmd); + spin_unlock_irqrestore(&cp->lock, flags); + + return 0; +} + +static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct cp_private *cp = netdev_priv(dev); + unsigned long flags; + + if (regs->len < CP_REGS_SIZE) + return /* -EINVAL */; + + regs->version = CP_REGS_VER; + + spin_lock_irqsave(&cp->lock, flags); + memcpy_fromio(p, cp->regs, CP_REGS_SIZE); + spin_unlock_irqrestore(&cp->lock, flags); +} + +static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct cp_private *cp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave (&cp->lock, flags); + netdev_get_wol (cp, wol); + spin_unlock_irqrestore (&cp->lock, flags); +} + +static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct cp_private *cp = netdev_priv(dev); + unsigned long flags; + int rc; + + spin_lock_irqsave (&cp->lock, flags); + rc = netdev_set_wol (cp, wol); + spin_unlock_irqrestore (&cp->lock, flags); + + return rc; +} + +static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); + break; + default: + BUG(); + break; + } +} + +static void cp_get_ethtool_stats (struct net_device *dev, + struct ethtool_stats *estats, u64 *tmp_stats) +{ + struct cp_private *cp = netdev_priv(dev); + struct cp_dma_stats *nic_stats; + dma_addr_t dma; + int i; + + nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats), + &dma, GFP_KERNEL); + if (!nic_stats) + return; + + /* begin NIC statistics dump */ + cpw32(StatsAddr + 4, (u64)dma >> 32); + cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats); + cpr32(StatsAddr); + + for (i = 0; i < 1000; i++) { + if ((cpr32(StatsAddr) & DumpStats) == 0) + break; + udelay(10); + } + cpw32(StatsAddr, 0); + cpw32(StatsAddr + 4, 0); + cpr32(StatsAddr); + + i = 0; + tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok); + tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok); + tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err); + tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err); + tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo); + tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align); + tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col); + tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol); + tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys); + tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast); + tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast); + tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort); + tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun); + tmp_stats[i++] = cp->cp_stats.rx_frags; + BUG_ON(i != CP_NUM_STATS); + + dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma); +} + +static const struct ethtool_ops cp_ethtool_ops = { + .get_drvinfo = cp_get_drvinfo, + .get_regs_len = cp_get_regs_len, + .get_sset_count = cp_get_sset_count, + .get_settings = cp_get_settings, + .set_settings = cp_set_settings, + .nway_reset = cp_nway_reset, + .get_link = ethtool_op_get_link, + .get_msglevel = cp_get_msglevel, + .set_msglevel = cp_set_msglevel, + .get_regs = cp_get_regs, + .get_wol = cp_get_wol, + .set_wol = cp_set_wol, + .get_strings = cp_get_strings, + .get_ethtool_stats = cp_get_ethtool_stats, + .get_eeprom_len = cp_get_eeprom_len, + .get_eeprom = cp_get_eeprom, + .set_eeprom = cp_set_eeprom, + .get_ringparam = cp_get_ringparam, +}; + +static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct cp_private *cp = netdev_priv(dev); + int rc; + unsigned long flags; + + if (!netif_running(dev)) + return -EINVAL; + + spin_lock_irqsave(&cp->lock, flags); + rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL); + spin_unlock_irqrestore(&cp->lock, flags); + return rc; +} + +static int cp_set_mac_address(struct net_device *dev, void *p) +{ + struct cp_private *cp = netdev_priv(dev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + spin_lock_irq(&cp->lock); + + cpw8_f(Cfg9346, Cfg9346_Unlock); + cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); + cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); + cpw8_f(Cfg9346, Cfg9346_Lock); + + spin_unlock_irq(&cp->lock); + + return 0; +} + +/* Serial EEPROM section. */ + +/* EEPROM_Ctrl bits. */ +#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */ +#define EE_CS 0x08 /* EEPROM chip select. */ +#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */ +#define EE_WRITE_0 0x00 +#define EE_WRITE_1 0x02 +#define EE_DATA_READ 0x01 /* EEPROM chip data out. */ +#define EE_ENB (0x80 | EE_CS) + +/* Delay between EEPROM clock transitions. + No extra delay is needed with 33Mhz PCI, but 66Mhz may change this. + */ + +#define eeprom_delay() readb(ee_addr) + +/* The EEPROM commands include the alway-set leading bit. */ +#define EE_EXTEND_CMD (4) +#define EE_WRITE_CMD (5) +#define EE_READ_CMD (6) +#define EE_ERASE_CMD (7) + +#define EE_EWDS_ADDR (0) +#define EE_WRAL_ADDR (1) +#define EE_ERAL_ADDR (2) +#define EE_EWEN_ADDR (3) + +#define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139 + +static void eeprom_cmd_start(void __iomem *ee_addr) +{ + writeb (EE_ENB & ~EE_CS, ee_addr); + writeb (EE_ENB, ee_addr); + eeprom_delay (); +} + +static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len) +{ + int i; + + /* Shift the command bits out. */ + for (i = cmd_len - 1; i >= 0; i--) { + int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0; + writeb (EE_ENB | dataval, ee_addr); + eeprom_delay (); + writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr); + eeprom_delay (); + } + writeb (EE_ENB, ee_addr); + eeprom_delay (); +} + +static void eeprom_cmd_end(void __iomem *ee_addr) +{ + writeb(0, ee_addr); + eeprom_delay (); +} + +static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd, + int addr_len) +{ + int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2)); + + eeprom_cmd_start(ee_addr); + eeprom_cmd(ee_addr, cmd, 3 + addr_len); + eeprom_cmd_end(ee_addr); +} + +static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len) +{ + int i; + u16 retval = 0; + void __iomem *ee_addr = ioaddr + Cfg9346; + int read_cmd = location | (EE_READ_CMD << addr_len); + + eeprom_cmd_start(ee_addr); + eeprom_cmd(ee_addr, read_cmd, 3 + addr_len); + + for (i = 16; i > 0; i--) { + writeb (EE_ENB | EE_SHIFT_CLK, ee_addr); + eeprom_delay (); + retval = + (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 : + 0); + writeb (EE_ENB, ee_addr); + eeprom_delay (); + } + + eeprom_cmd_end(ee_addr); + + return retval; +} + +static void write_eeprom(void __iomem *ioaddr, int location, u16 val, + int addr_len) +{ + int i; + void __iomem *ee_addr = ioaddr + Cfg9346; + int write_cmd = location | (EE_WRITE_CMD << addr_len); + + eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len); + + eeprom_cmd_start(ee_addr); + eeprom_cmd(ee_addr, write_cmd, 3 + addr_len); + eeprom_cmd(ee_addr, val, 16); + eeprom_cmd_end(ee_addr); + + eeprom_cmd_start(ee_addr); + for (i = 0; i < 20000; i++) + if (readb(ee_addr) & EE_DATA_READ) + break; + eeprom_cmd_end(ee_addr); + + eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len); +} + +static int cp_get_eeprom_len(struct net_device *dev) +{ + struct cp_private *cp = netdev_priv(dev); + int size; + + spin_lock_irq(&cp->lock); + size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128; + spin_unlock_irq(&cp->lock); + + return size; +} + +static int cp_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct cp_private *cp = netdev_priv(dev); + unsigned int addr_len; + u16 val; + u32 offset = eeprom->offset >> 1; + u32 len = eeprom->len; + u32 i = 0; + + eeprom->magic = CP_EEPROM_MAGIC; + + spin_lock_irq(&cp->lock); + + addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6; + + if (eeprom->offset & 1) { + val = read_eeprom(cp->regs, offset, addr_len); + data[i++] = (u8)(val >> 8); + offset++; + } + + while (i < len - 1) { + val = read_eeprom(cp->regs, offset, addr_len); + data[i++] = (u8)val; + data[i++] = (u8)(val >> 8); + offset++; + } + + if (i < len) { + val = read_eeprom(cp->regs, offset, addr_len); + data[i] = (u8)val; + } + + spin_unlock_irq(&cp->lock); + return 0; +} + +static int cp_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct cp_private *cp = netdev_priv(dev); + unsigned int addr_len; + u16 val; + u32 offset = eeprom->offset >> 1; + u32 len = eeprom->len; + u32 i = 0; + + if (eeprom->magic != CP_EEPROM_MAGIC) + return -EINVAL; + + spin_lock_irq(&cp->lock); + + addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6; + + if (eeprom->offset & 1) { + val = read_eeprom(cp->regs, offset, addr_len) & 0xff; + val |= (u16)data[i++] << 8; + write_eeprom(cp->regs, offset, val, addr_len); + offset++; + } + + while (i < len - 1) { + val = (u16)data[i++]; + val |= (u16)data[i++] << 8; + write_eeprom(cp->regs, offset, val, addr_len); + offset++; + } + + if (i < len) { + val = read_eeprom(cp->regs, offset, addr_len) & 0xff00; + val |= (u16)data[i]; + write_eeprom(cp->regs, offset, val, addr_len); + } + + spin_unlock_irq(&cp->lock); + return 0; +} + +/* Put the board into D3cold state and wait for WakeUp signal */ +static void cp_set_d3_state (struct cp_private *cp) +{ + pci_enable_wake(cp->pdev, PCI_D0, 1); /* Enable PME# generation */ + pci_set_power_state (cp->pdev, PCI_D3hot); +} + +static const struct net_device_ops cp_netdev_ops = { + .ndo_open = cp_open, + .ndo_stop = cp_close, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = cp_set_mac_address, + .ndo_set_rx_mode = cp_set_rx_mode, + .ndo_get_stats = cp_get_stats, + .ndo_do_ioctl = cp_ioctl, + .ndo_start_xmit = cp_start_xmit, + .ndo_tx_timeout = cp_tx_timeout, + .ndo_set_features = cp_set_features, + .ndo_change_mtu = cp_change_mtu, + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cp_poll_controller, +#endif +}; + +static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *dev; + struct cp_private *cp; + int rc; + void __iomem *regs; + resource_size_t pciaddr; + unsigned int addr_len, i, pci_using_dac; + + pr_info_once("%s", version); + + if (pdev->vendor == PCI_VENDOR_ID_REALTEK && + pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) { + dev_info(&pdev->dev, + "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n", + pdev->vendor, pdev->device, pdev->revision); + return -ENODEV; + } + + dev = alloc_etherdev(sizeof(struct cp_private)); + if (!dev) + return -ENOMEM; + SET_NETDEV_DEV(dev, &pdev->dev); + + cp = netdev_priv(dev); + cp->pdev = pdev; + cp->dev = dev; + cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug); + spin_lock_init (&cp->lock); + cp->mii_if.dev = dev; + cp->mii_if.mdio_read = mdio_read; + cp->mii_if.mdio_write = mdio_write; + cp->mii_if.phy_id = CP_INTERNAL_PHY; + cp->mii_if.phy_id_mask = 0x1f; + cp->mii_if.reg_num_mask = 0x1f; + cp_set_rxbufsize(cp); + + rc = pci_enable_device(pdev); + if (rc) + goto err_out_free; + + rc = pci_set_mwi(pdev); + if (rc) + goto err_out_disable; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out_mwi; + + pciaddr = pci_resource_start(pdev, 1); + if (!pciaddr) { + rc = -EIO; + dev_err(&pdev->dev, "no MMIO resource\n"); + goto err_out_res; + } + if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) { + rc = -EIO; + dev_err(&pdev->dev, "MMIO resource (%llx) too small\n", + (unsigned long long)pci_resource_len(pdev, 1)); + goto err_out_res; + } + + /* Configure DMA attributes. */ + if ((sizeof(dma_addr_t) > 4) && + !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + pci_using_dac = 0; + + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_out_res; + } + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(&pdev->dev, + "No usable consistent DMA configuration, aborting\n"); + goto err_out_res; + } + } + + cp->cpcmd = (pci_using_dac ? PCIDAC : 0) | + PCIMulRW | RxChkSum | CpRxOn | CpTxOn; + + dev->features |= NETIF_F_RXCSUM; + dev->hw_features |= NETIF_F_RXCSUM; + + regs = ioremap(pciaddr, CP_REGS_SIZE); + if (!regs) { + rc = -EIO; + dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n", + (unsigned long long)pci_resource_len(pdev, 1), + (unsigned long long)pciaddr); + goto err_out_res; + } + cp->regs = regs; + + cp_stop_hw(cp); + + /* read MAC address from EEPROM */ + addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6; + for (i = 0; i < 3; i++) + ((__le16 *) (dev->dev_addr))[i] = + cpu_to_le16(read_eeprom (regs, i + 7, addr_len)); + + dev->netdev_ops = &cp_netdev_ops; + netif_napi_add(dev, &cp->napi, cp_rx_poll, 16); + dev->ethtool_ops = &cp_ethtool_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + + if (pci_using_dac) + dev->features |= NETIF_F_HIGHDMA; + + /* disabled by default until verified */ + dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_HIGHDMA; + + rc = register_netdev(dev); + if (rc) + goto err_out_iomap; + + netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n", + regs, dev->dev_addr, pdev->irq); + + pci_set_drvdata(pdev, dev); + + /* enable busmastering and memory-write-invalidate */ + pci_set_master(pdev); + + if (cp->wol_enabled) + cp_set_d3_state (cp); + + return 0; + +err_out_iomap: + iounmap(regs); +err_out_res: + pci_release_regions(pdev); +err_out_mwi: + pci_clear_mwi(pdev); +err_out_disable: + pci_disable_device(pdev); +err_out_free: + free_netdev(dev); + return rc; +} + +static void cp_remove_one (struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct cp_private *cp = netdev_priv(dev); + + unregister_netdev(dev); + iounmap(cp->regs); + if (cp->wol_enabled) + pci_set_power_state (pdev, PCI_D0); + pci_release_regions(pdev); + pci_clear_mwi(pdev); + pci_disable_device(pdev); + free_netdev(dev); +} + +#ifdef CONFIG_PM +static int cp_suspend (struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct cp_private *cp = netdev_priv(dev); + unsigned long flags; + + if (!netif_running(dev)) + return 0; + + netif_device_detach (dev); + netif_stop_queue (dev); + + spin_lock_irqsave (&cp->lock, flags); + + /* Disable Rx and Tx */ + cpw16 (IntrMask, 0); + cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn)); + + spin_unlock_irqrestore (&cp->lock, flags); + + pci_save_state(pdev); + pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int cp_resume (struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata (pdev); + struct cp_private *cp = netdev_priv(dev); + unsigned long flags; + + if (!netif_running(dev)) + return 0; + + netif_device_attach (dev); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_enable_wake(pdev, PCI_D0, 0); + + /* FIXME: sh*t may happen if the Rx ring buffer is depleted */ + cp_init_rings_index (cp); + cp_init_hw (cp); + cp_enable_irq(cp); + netif_start_queue (dev); + + spin_lock_irqsave (&cp->lock, flags); + + mii_check_media(&cp->mii_if, netif_msg_link(cp), false); + + spin_unlock_irqrestore (&cp->lock, flags); + + return 0; +} +#endif /* CONFIG_PM */ + +static const struct pci_device_id cp_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), }, + { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), }, + { }, +}; +MODULE_DEVICE_TABLE(pci, cp_pci_tbl); + +static struct pci_driver cp_driver = { + .name = DRV_NAME, + .id_table = cp_pci_tbl, + .probe = cp_init_one, + .remove = cp_remove_one, +#ifdef CONFIG_PM + .resume = cp_resume, + .suspend = cp_suspend, +#endif +}; + +module_pci_driver(cp_driver); diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c new file mode 100644 index 000000000..78bb4ceb1 --- /dev/null +++ b/drivers/net/ethernet/realtek/8139too.c @@ -0,0 +1,2696 @@ +/* + + 8139too.c: A RealTek RTL-8139 Fast Ethernet driver for Linux. + + Maintained by Jeff Garzik + Copyright 2000-2002 Jeff Garzik + + Much code comes from Donald Becker's rtl8139.c driver, + versions 1.13 and older. This driver was originally based + on rtl8139.c version 1.07. Header of rtl8139.c version 1.13: + + ---------- + + Written 1997-2001 by Donald Becker. + This software may be used and distributed according to the + terms of the GNU General Public License (GPL), incorporated + herein by reference. Drivers based on or derived from this + code fall under the GPL and must retain the authorship, + copyright and license notice. This file is not a complete + program and may only be used when the entire operating + system is licensed under the GPL. + + This driver is for boards based on the RTL8129 and RTL8139 + PCI ethernet chips. + + The author may be reached as becker@scyld.com, or C/O Scyld + Computing Corporation 410 Severn Ave., Suite 210 Annapolis + MD 21403 + + Support and updates available at + http://www.scyld.com/network/rtl8139.html + + Twister-tuning table provided by Kinston + . + + ---------- + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + Contributors: + + Donald Becker - he wrote the original driver, kudos to him! + (but please don't e-mail him for support, this isn't his driver) + + Tigran Aivazian - bug fixes, skbuff free cleanup + + Martin Mares - suggestions for PCI cleanup + + David S. Miller - PCI DMA and softnet updates + + Ernst Gill - fixes ported from BSD driver + + Daniel Kobras - identified specific locations of + posted MMIO write bugginess + + Gerard Sharp - bug fix, testing and feedback + + David Ford - Rx ring wrap fix + + Dan DeMaggio - swapped RTL8139 cards with me, and allowed me + to find and fix a crucial bug on older chipsets. + + Donald Becker/Chris Butterworth/Marcus Westergren - + Noticed various Rx packet size-related buglets. + + Santiago Garcia Mantinan - testing and feedback + + Jens David - 2.2.x kernel backports + + Martin Dennett - incredibly helpful insight on undocumented + features of the 8139 chips + + Jean-Jacques Michel - bug fix + + Tobias Ringström - Rx interrupt status checking suggestion + + Andrew Morton - Clear blocked signals, avoid + buffer overrun setting current->comm. + + Kalle Olavi Niemitalo - Wake-on-LAN ioctls + + Robert Kuebel - Save kernel thread from dying on any signal. + + Submitting bug reports: + + "rtl8139-diag -mmmaaavvveefN" output + enable RTL8139_DEBUG below, and look at 'dmesg' or kernel log + +*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DRV_NAME "8139too" +#define DRV_VERSION "0.9.28" + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION + +/* Default Message level */ +#define RTL8139_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + + +/* define to 1, 2 or 3 to enable copious debugging info */ +#define RTL8139_DEBUG 0 + +/* define to 1 to disable lightweight runtime debugging checks */ +#undef RTL8139_NDEBUG + + +#ifdef RTL8139_NDEBUG +# define assert(expr) do {} while (0) +#else +# define assert(expr) \ + if (unlikely(!(expr))) { \ + pr_err("Assertion failed! %s,%s,%s,line=%d\n", \ + #expr, __FILE__, __func__, __LINE__); \ + } +#endif + + +/* A few user-configurable values. */ +/* media options */ +#define MAX_UNITS 8 +static int media[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; +static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; + +/* Whether to use MMIO or PIO. Default to MMIO. */ +#ifdef CONFIG_8139TOO_PIO +static bool use_io = true; +#else +static bool use_io = false; +#endif + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ +static int multicast_filter_limit = 32; + +/* bitmapped message enable number */ +static int debug = -1; + +/* + * Receive ring size + * Warning: 64K ring has hardware issues and may lock up. + */ +#if defined(CONFIG_SH_DREAMCAST) +#define RX_BUF_IDX 0 /* 8K ring */ +#else +#define RX_BUF_IDX 2 /* 32K ring */ +#endif +#define RX_BUF_LEN (8192 << RX_BUF_IDX) +#define RX_BUF_PAD 16 +#define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */ + +#if RX_BUF_LEN == 65536 +#define RX_BUF_TOT_LEN RX_BUF_LEN +#else +#define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD) +#endif + +/* Number of Tx descriptor registers. */ +#define NUM_TX_DESC 4 + +/* max supported ethernet frame size -- must be at least (dev->mtu+18+4).*/ +#define MAX_ETH_FRAME_SIZE 1792 + +/* max supported payload size */ +#define MAX_ETH_DATA_SIZE (MAX_ETH_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN) + +/* Size of the Tx bounce buffers -- must be at least (dev->mtu+18+4). */ +#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE +#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC) + +/* PCI Tuning Parameters + Threshold is bytes transferred to chip before transmission starts. */ +#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */ + +/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */ +#define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */ +#define RX_DMA_BURST 7 /* Maximum PCI burst, '6' is 1024 */ +#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ +#define TX_RETRY 8 /* 0-15. retries = 16 + (TX_RETRY * 16) */ + +/* Operational parameters that usually are not changed. */ +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (6*HZ) + + +enum { + HAS_MII_XCVR = 0x010000, + HAS_CHIP_XCVR = 0x020000, + HAS_LNK_CHNG = 0x040000, +}; + +#define RTL_NUM_STATS 4 /* number of ETHTOOL_GSTATS u64's */ +#define RTL_REGS_VER 1 /* version of reg. data in ETHTOOL_GREGS */ +#define RTL_MIN_IO_SIZE 0x80 +#define RTL8139B_IO_SIZE 256 + +#define RTL8129_CAPS HAS_MII_XCVR +#define RTL8139_CAPS (HAS_CHIP_XCVR|HAS_LNK_CHNG) + +typedef enum { + RTL8139 = 0, + RTL8129, +} board_t; + + +/* indexed by board_t, above */ +static const struct { + const char *name; + u32 hw_flags; +} board_info[] = { + { "RealTek RTL8139", RTL8139_CAPS }, + { "RealTek RTL8129", RTL8129_CAPS }, +}; + + +static const struct pci_device_id rtl8139_pci_tbl[] = { + {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1500, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1186, 0x1300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1186, 0x1340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x13d1, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1259, 0xa117, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1259, 0xa11e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x14ea, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x14ea, 0xab07, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x11db, 0x1234, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1432, 0x9130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x02ac, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x018a, 0x0106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + +#ifdef CONFIG_SH_SECUREEDGE5410 + /* Bogus 8139 silicon reports 8129 without external PROM :-( */ + {0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, +#endif +#ifdef CONFIG_8139TOO_8129 + {0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8129 }, +#endif + + /* some crazy cards report invalid vendor ids like + * 0x0001 here. The other ids are valid and constant, + * so we simply don't match on the main vendor id. + */ + {PCI_ANY_ID, 0x8139, 0x10ec, 0x8139, 0, 0, RTL8139 }, + {PCI_ANY_ID, 0x8139, 0x1186, 0x1300, 0, 0, RTL8139 }, + {PCI_ANY_ID, 0x8139, 0x13d1, 0xab06, 0, 0, RTL8139 }, + + {0,} +}; +MODULE_DEVICE_TABLE (pci, rtl8139_pci_tbl); + +static struct { + const char str[ETH_GSTRING_LEN]; +} ethtool_stats_keys[] = { + { "early_rx" }, + { "tx_buf_mapped" }, + { "tx_timeouts" }, + { "rx_lost_in_ring" }, +}; + +/* The rest of these values should never change. */ + +/* Symbolic offsets to registers. */ +enum RTL8139_registers { + MAC0 = 0, /* Ethernet hardware address. */ + MAR0 = 8, /* Multicast filter. */ + TxStatus0 = 0x10, /* Transmit status (Four 32bit registers). */ + TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */ + RxBuf = 0x30, + ChipCmd = 0x37, + RxBufPtr = 0x38, + RxBufAddr = 0x3A, + IntrMask = 0x3C, + IntrStatus = 0x3E, + TxConfig = 0x40, + RxConfig = 0x44, + Timer = 0x48, /* A general-purpose counter. */ + RxMissed = 0x4C, /* 24 bits valid, write clears. */ + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + TimerInt = 0x54, + MediaStatus = 0x58, + Config3 = 0x59, + Config4 = 0x5A, /* absent on RTL-8139A */ + HltClk = 0x5B, + MultiIntr = 0x5C, + TxSummary = 0x60, + BasicModeCtrl = 0x62, + BasicModeStatus = 0x64, + NWayAdvert = 0x66, + NWayLPAR = 0x68, + NWayExpansion = 0x6A, + /* Undocumented registers, but required for proper operation. */ + FIFOTMS = 0x70, /* FIFO Control and test. */ + CSCR = 0x74, /* Chip Status and Configuration Register. */ + PARA78 = 0x78, + FlashReg = 0xD4, /* Communication with Flash ROM, four bytes. */ + PARA7c = 0x7c, /* Magic transceiver parameter register. */ + Config5 = 0xD8, /* absent on RTL-8139A */ +}; + +enum ClearBitMasks { + MultiIntrClear = 0xF000, + ChipCmdClear = 0xE2, + Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1), +}; + +enum ChipCmdBits { + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, +}; + +/* Interrupt register bits, using my own meaningful names. */ +enum IntrStatusBits { + PCIErr = 0x8000, + PCSTimeout = 0x4000, + RxFIFOOver = 0x40, + RxUnderrun = 0x20, + RxOverflow = 0x10, + TxErr = 0x08, + TxOK = 0x04, + RxErr = 0x02, + RxOK = 0x01, + + RxAckBits = RxFIFOOver | RxOverflow | RxOK, +}; + +enum TxStatusBits { + TxHostOwns = 0x2000, + TxUnderrun = 0x4000, + TxStatOK = 0x8000, + TxOutOfWindow = 0x20000000, + TxAborted = 0x40000000, + TxCarrierLost = 0x80000000, +}; +enum RxStatusBits { + RxMulticast = 0x8000, + RxPhysical = 0x4000, + RxBroadcast = 0x2000, + RxBadSymbol = 0x0020, + RxRunt = 0x0010, + RxTooLong = 0x0008, + RxCRCErr = 0x0004, + RxBadAlign = 0x0002, + RxStatusOK = 0x0001, +}; + +/* Bits in RxConfig. */ +enum rx_mode_bits { + AcceptErr = 0x20, + AcceptRunt = 0x10, + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, +}; + +/* Bits in TxConfig. */ +enum tx_config_bits { + /* Interframe Gap Time. Only TxIFG96 doesn't violate IEEE 802.3 */ + TxIFGShift = 24, + TxIFG84 = (0 << TxIFGShift), /* 8.4us / 840ns (10 / 100Mbps) */ + TxIFG88 = (1 << TxIFGShift), /* 8.8us / 880ns (10 / 100Mbps) */ + TxIFG92 = (2 << TxIFGShift), /* 9.2us / 920ns (10 / 100Mbps) */ + TxIFG96 = (3 << TxIFGShift), /* 9.6us / 960ns (10 / 100Mbps) */ + + TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */ + TxCRC = (1 << 16), /* DISABLE Tx pkt CRC append */ + TxClearAbt = (1 << 0), /* Clear abort (WO) */ + TxDMAShift = 8, /* DMA burst value (0-7) is shifted X many bits */ + TxRetryShift = 4, /* TXRR value (0-15) is shifted X many bits */ + + TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */ +}; + +/* Bits in Config1 */ +enum Config1Bits { + Cfg1_PM_Enable = 0x01, + Cfg1_VPD_Enable = 0x02, + Cfg1_PIO = 0x04, + Cfg1_MMIO = 0x08, + LWAKE = 0x10, /* not on 8139, 8139A */ + Cfg1_Driver_Load = 0x20, + Cfg1_LED0 = 0x40, + Cfg1_LED1 = 0x80, + SLEEP = (1 << 1), /* only on 8139, 8139A */ + PWRDN = (1 << 0), /* only on 8139, 8139A */ +}; + +/* Bits in Config3 */ +enum Config3Bits { + Cfg3_FBtBEn = (1 << 0), /* 1 = Fast Back to Back */ + Cfg3_FuncRegEn = (1 << 1), /* 1 = enable CardBus Function registers */ + Cfg3_CLKRUN_En = (1 << 2), /* 1 = enable CLKRUN */ + Cfg3_CardB_En = (1 << 3), /* 1 = enable CardBus registers */ + Cfg3_LinkUp = (1 << 4), /* 1 = wake up on link up */ + Cfg3_Magic = (1 << 5), /* 1 = wake up on Magic Packet (tm) */ + Cfg3_PARM_En = (1 << 6), /* 0 = software can set twister parameters */ + Cfg3_GNTSel = (1 << 7), /* 1 = delay 1 clock from PCI GNT signal */ +}; + +/* Bits in Config4 */ +enum Config4Bits { + LWPTN = (1 << 2), /* not on 8139, 8139A */ +}; + +/* Bits in Config5 */ +enum Config5Bits { + Cfg5_PME_STS = (1 << 0), /* 1 = PCI reset resets PME_Status */ + Cfg5_LANWake = (1 << 1), /* 1 = enable LANWake signal */ + Cfg5_LDPS = (1 << 2), /* 0 = save power when link is down */ + Cfg5_FIFOAddrPtr= (1 << 3), /* Realtek internal SRAM testing */ + Cfg5_UWF = (1 << 4), /* 1 = accept unicast wakeup frame */ + Cfg5_MWF = (1 << 5), /* 1 = accept multicast wakeup frame */ + Cfg5_BWF = (1 << 6), /* 1 = accept broadcast wakeup frame */ +}; + +enum RxConfigBits { + /* rx fifo threshold */ + RxCfgFIFOShift = 13, + RxCfgFIFONone = (7 << RxCfgFIFOShift), + + /* Max DMA burst */ + RxCfgDMAShift = 8, + RxCfgDMAUnlimited = (7 << RxCfgDMAShift), + + /* rx ring buffer length */ + RxCfgRcv8K = 0, + RxCfgRcv16K = (1 << 11), + RxCfgRcv32K = (1 << 12), + RxCfgRcv64K = (1 << 11) | (1 << 12), + + /* Disable packet wrap at end of Rx buffer. (not possible with 64k) */ + RxNoWrap = (1 << 7), +}; + +/* Twister tuning parameters from RealTek. + Completely undocumented, but required to tune bad links on some boards. */ +enum CSCRBits { + CSCR_LinkOKBit = 0x0400, + CSCR_LinkChangeBit = 0x0800, + CSCR_LinkStatusBits = 0x0f000, + CSCR_LinkDownOffCmd = 0x003c0, + CSCR_LinkDownCmd = 0x0f3c0, +}; + +enum Cfg9346Bits { + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xC0, +}; + +typedef enum { + CH_8139 = 0, + CH_8139_K, + CH_8139A, + CH_8139A_G, + CH_8139B, + CH_8130, + CH_8139C, + CH_8100, + CH_8100B_8139D, + CH_8101, +} chip_t; + +enum chip_flags { + HasHltClk = (1 << 0), + HasLWake = (1 << 1), +}; + +#define HW_REVID(b30, b29, b28, b27, b26, b23, b22) \ + (b30<<30 | b29<<29 | b28<<28 | b27<<27 | b26<<26 | b23<<23 | b22<<22) +#define HW_REVID_MASK HW_REVID(1, 1, 1, 1, 1, 1, 1) + +/* directly indexed by chip_t, above */ +static const struct { + const char *name; + u32 version; /* from RTL8139C/RTL8139D docs */ + u32 flags; +} rtl_chip_info[] = { + { "RTL-8139", + HW_REVID(1, 0, 0, 0, 0, 0, 0), + HasHltClk, + }, + + { "RTL-8139 rev K", + HW_REVID(1, 1, 0, 0, 0, 0, 0), + HasHltClk, + }, + + { "RTL-8139A", + HW_REVID(1, 1, 1, 0, 0, 0, 0), + HasHltClk, /* XXX undocumented? */ + }, + + { "RTL-8139A rev G", + HW_REVID(1, 1, 1, 0, 0, 1, 0), + HasHltClk, /* XXX undocumented? */ + }, + + { "RTL-8139B", + HW_REVID(1, 1, 1, 1, 0, 0, 0), + HasLWake, + }, + + { "RTL-8130", + HW_REVID(1, 1, 1, 1, 1, 0, 0), + HasLWake, + }, + + { "RTL-8139C", + HW_REVID(1, 1, 1, 0, 1, 0, 0), + HasLWake, + }, + + { "RTL-8100", + HW_REVID(1, 1, 1, 1, 0, 1, 0), + HasLWake, + }, + + { "RTL-8100B/8139D", + HW_REVID(1, 1, 1, 0, 1, 0, 1), + HasHltClk /* XXX undocumented? */ + | HasLWake, + }, + + { "RTL-8101", + HW_REVID(1, 1, 1, 0, 1, 1, 1), + HasLWake, + }, +}; + +struct rtl_extra_stats { + unsigned long early_rx; + unsigned long tx_buf_mapped; + unsigned long tx_timeouts; + unsigned long rx_lost_in_ring; +}; + +struct rtl8139_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + +struct rtl8139_private { + void __iomem *mmio_addr; + int drv_flags; + struct pci_dev *pci_dev; + u32 msg_enable; + struct napi_struct napi; + struct net_device *dev; + + unsigned char *rx_ring; + unsigned int cur_rx; /* RX buf index of next pkt */ + struct rtl8139_stats rx_stats; + dma_addr_t rx_ring_dma; + + unsigned int tx_flag; + unsigned long cur_tx; + unsigned long dirty_tx; + struct rtl8139_stats tx_stats; + unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */ + unsigned char *tx_bufs; /* Tx bounce buffer region. */ + dma_addr_t tx_bufs_dma; + + signed char phys[4]; /* MII device addresses. */ + + /* Twister tune state. */ + char twistie, twist_row, twist_col; + + unsigned int watchdog_fired : 1; + unsigned int default_port : 4; /* Last dev->if_port value. */ + unsigned int have_thread : 1; + + spinlock_t lock; + spinlock_t rx_lock; + + chip_t chipset; + u32 rx_config; + struct rtl_extra_stats xstats; + + struct delayed_work thread; + + struct mii_if_info mii; + unsigned int regs_len; + unsigned long fifo_copy_timeout; +}; + +MODULE_AUTHOR ("Jeff Garzik "); +MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +module_param(use_io, bool, 0); +MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO"); +module_param(multicast_filter_limit, int, 0); +module_param_array(media, int, NULL, 0); +module_param_array(full_duplex, int, NULL, 0); +module_param(debug, int, 0); +MODULE_PARM_DESC (debug, "8139too bitmapped message enable number"); +MODULE_PARM_DESC (multicast_filter_limit, "8139too maximum number of filtered multicast addresses"); +MODULE_PARM_DESC (media, "8139too: Bits 4+9: force full duplex, bit 5: 100Mbps"); +MODULE_PARM_DESC (full_duplex, "8139too: Force full duplex for board(s) (1)"); + +static int read_eeprom (void __iomem *ioaddr, int location, int addr_len); +static int rtl8139_open (struct net_device *dev); +static int mdio_read (struct net_device *dev, int phy_id, int location); +static void mdio_write (struct net_device *dev, int phy_id, int location, + int val); +static void rtl8139_start_thread(struct rtl8139_private *tp); +static void rtl8139_tx_timeout (struct net_device *dev); +static void rtl8139_init_ring (struct net_device *dev); +static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb, + struct net_device *dev); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void rtl8139_poll_controller(struct net_device *dev); +#endif +static int rtl8139_set_mac_address(struct net_device *dev, void *p); +static int rtl8139_poll(struct napi_struct *napi, int budget); +static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance); +static int rtl8139_close (struct net_device *dev); +static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); +static struct rtnl_link_stats64 *rtl8139_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 + *stats); +static void rtl8139_set_rx_mode (struct net_device *dev); +static void __set_rx_mode (struct net_device *dev); +static void rtl8139_hw_start (struct net_device *dev); +static void rtl8139_thread (struct work_struct *work); +static void rtl8139_tx_timeout_task(struct work_struct *work); +static const struct ethtool_ops rtl8139_ethtool_ops; + +/* write MMIO register, with flush */ +/* Flush avoids rtl8139 bug w/ posted MMIO writes */ +#define RTL_W8_F(reg, val8) do { iowrite8 ((val8), ioaddr + (reg)); ioread8 (ioaddr + (reg)); } while (0) +#define RTL_W16_F(reg, val16) do { iowrite16 ((val16), ioaddr + (reg)); ioread16 (ioaddr + (reg)); } while (0) +#define RTL_W32_F(reg, val32) do { iowrite32 ((val32), ioaddr + (reg)); ioread32 (ioaddr + (reg)); } while (0) + +/* write MMIO register */ +#define RTL_W8(reg, val8) iowrite8 ((val8), ioaddr + (reg)) +#define RTL_W16(reg, val16) iowrite16 ((val16), ioaddr + (reg)) +#define RTL_W32(reg, val32) iowrite32 ((val32), ioaddr + (reg)) + +/* read MMIO register */ +#define RTL_R8(reg) ioread8 (ioaddr + (reg)) +#define RTL_R16(reg) ioread16 (ioaddr + (reg)) +#define RTL_R32(reg) ioread32 (ioaddr + (reg)) + + +static const u16 rtl8139_intr_mask = + PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver | + TxErr | TxOK | RxErr | RxOK; + +static const u16 rtl8139_norx_intr_mask = + PCIErr | PCSTimeout | RxUnderrun | + TxErr | TxOK | RxErr ; + +#if RX_BUF_IDX == 0 +static const unsigned int rtl8139_rx_config = + RxCfgRcv8K | RxNoWrap | + (RX_FIFO_THRESH << RxCfgFIFOShift) | + (RX_DMA_BURST << RxCfgDMAShift); +#elif RX_BUF_IDX == 1 +static const unsigned int rtl8139_rx_config = + RxCfgRcv16K | RxNoWrap | + (RX_FIFO_THRESH << RxCfgFIFOShift) | + (RX_DMA_BURST << RxCfgDMAShift); +#elif RX_BUF_IDX == 2 +static const unsigned int rtl8139_rx_config = + RxCfgRcv32K | RxNoWrap | + (RX_FIFO_THRESH << RxCfgFIFOShift) | + (RX_DMA_BURST << RxCfgDMAShift); +#elif RX_BUF_IDX == 3 +static const unsigned int rtl8139_rx_config = + RxCfgRcv64K | + (RX_FIFO_THRESH << RxCfgFIFOShift) | + (RX_DMA_BURST << RxCfgDMAShift); +#else +#error "Invalid configuration for 8139_RXBUF_IDX" +#endif + +static const unsigned int rtl8139_tx_config = + TxIFG96 | (TX_DMA_BURST << TxDMAShift) | (TX_RETRY << TxRetryShift); + +static void __rtl8139_cleanup_dev (struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + struct pci_dev *pdev; + + assert (dev != NULL); + assert (tp->pci_dev != NULL); + pdev = tp->pci_dev; + + if (tp->mmio_addr) + pci_iounmap (pdev, tp->mmio_addr); + + /* it's ok to call this even if we have no regions to free */ + pci_release_regions (pdev); + + free_netdev(dev); +} + + +static void rtl8139_chip_reset (void __iomem *ioaddr) +{ + int i; + + /* Soft reset the chip. */ + RTL_W8 (ChipCmd, CmdReset); + + /* Check that the chip has finished the reset. */ + for (i = 1000; i > 0; i--) { + barrier(); + if ((RTL_R8 (ChipCmd) & CmdReset) == 0) + break; + udelay (10); + } +} + + +static struct net_device *rtl8139_init_board(struct pci_dev *pdev) +{ + struct device *d = &pdev->dev; + void __iomem *ioaddr; + struct net_device *dev; + struct rtl8139_private *tp; + u8 tmp8; + int rc, disable_dev_on_err = 0; + unsigned int i, bar; + unsigned long io_len; + u32 version; + static const struct { + unsigned long mask; + char *type; + } res[] = { + { IORESOURCE_IO, "PIO" }, + { IORESOURCE_MEM, "MMIO" } + }; + + assert (pdev != NULL); + + /* dev and priv zeroed in alloc_etherdev */ + dev = alloc_etherdev (sizeof (*tp)); + if (dev == NULL) + return ERR_PTR(-ENOMEM); + + SET_NETDEV_DEV(dev, &pdev->dev); + + tp = netdev_priv(dev); + tp->pci_dev = pdev; + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pci_enable_device (pdev); + if (rc) + goto err_out; + + disable_dev_on_err = 1; + rc = pci_request_regions (pdev, DRV_NAME); + if (rc) + goto err_out; + + pci_set_master (pdev); + + u64_stats_init(&tp->rx_stats.syncp); + u64_stats_init(&tp->tx_stats.syncp); + +retry: + /* PIO bar register comes first. */ + bar = !use_io; + + io_len = pci_resource_len(pdev, bar); + + dev_dbg(d, "%s region size = 0x%02lX\n", res[bar].type, io_len); + + if (!(pci_resource_flags(pdev, bar) & res[bar].mask)) { + dev_err(d, "region #%d not a %s resource, aborting\n", bar, + res[bar].type); + rc = -ENODEV; + goto err_out; + } + if (io_len < RTL_MIN_IO_SIZE) { + dev_err(d, "Invalid PCI %s region size(s), aborting\n", + res[bar].type); + rc = -ENODEV; + goto err_out; + } + + ioaddr = pci_iomap(pdev, bar, 0); + if (!ioaddr) { + dev_err(d, "cannot map %s\n", res[bar].type); + if (!use_io) { + use_io = true; + goto retry; + } + rc = -ENODEV; + goto err_out; + } + tp->regs_len = io_len; + tp->mmio_addr = ioaddr; + + /* Bring old chips out of low-power mode. */ + RTL_W8 (HltClk, 'R'); + + /* check for missing/broken hardware */ + if (RTL_R32 (TxConfig) == 0xFFFFFFFF) { + dev_err(&pdev->dev, "Chip not responding, ignoring board\n"); + rc = -EIO; + goto err_out; + } + + /* identify chip attached to board */ + version = RTL_R32 (TxConfig) & HW_REVID_MASK; + for (i = 0; i < ARRAY_SIZE (rtl_chip_info); i++) + if (version == rtl_chip_info[i].version) { + tp->chipset = i; + goto match; + } + + /* if unknown chip, assume array element #0, original RTL-8139 in this case */ + i = 0; + dev_dbg(&pdev->dev, "unknown chip version, assuming RTL-8139\n"); + dev_dbg(&pdev->dev, "TxConfig = 0x%x\n", RTL_R32 (TxConfig)); + tp->chipset = 0; + +match: + pr_debug("chipset id (%d) == index %d, '%s'\n", + version, i, rtl_chip_info[i].name); + + if (tp->chipset >= CH_8139B) { + u8 new_tmp8 = tmp8 = RTL_R8 (Config1); + pr_debug("PCI PM wakeup\n"); + if ((rtl_chip_info[tp->chipset].flags & HasLWake) && + (tmp8 & LWAKE)) + new_tmp8 &= ~LWAKE; + new_tmp8 |= Cfg1_PM_Enable; + if (new_tmp8 != tmp8) { + RTL_W8 (Cfg9346, Cfg9346_Unlock); + RTL_W8 (Config1, tmp8); + RTL_W8 (Cfg9346, Cfg9346_Lock); + } + if (rtl_chip_info[tp->chipset].flags & HasLWake) { + tmp8 = RTL_R8 (Config4); + if (tmp8 & LWPTN) { + RTL_W8 (Cfg9346, Cfg9346_Unlock); + RTL_W8 (Config4, tmp8 & ~LWPTN); + RTL_W8 (Cfg9346, Cfg9346_Lock); + } + } + } else { + pr_debug("Old chip wakeup\n"); + tmp8 = RTL_R8 (Config1); + tmp8 &= ~(SLEEP | PWRDN); + RTL_W8 (Config1, tmp8); + } + + rtl8139_chip_reset (ioaddr); + + return dev; + +err_out: + __rtl8139_cleanup_dev (dev); + if (disable_dev_on_err) + pci_disable_device (pdev); + return ERR_PTR(rc); +} + +static int rtl8139_set_features(struct net_device *dev, netdev_features_t features) +{ + struct rtl8139_private *tp = netdev_priv(dev); + unsigned long flags; + netdev_features_t changed = features ^ dev->features; + void __iomem *ioaddr = tp->mmio_addr; + + if (!(changed & (NETIF_F_RXALL))) + return 0; + + spin_lock_irqsave(&tp->lock, flags); + + if (changed & NETIF_F_RXALL) { + int rx_mode = tp->rx_config; + if (features & NETIF_F_RXALL) + rx_mode |= (AcceptErr | AcceptRunt); + else + rx_mode &= ~(AcceptErr | AcceptRunt); + tp->rx_config = rtl8139_rx_config | rx_mode; + RTL_W32_F(RxConfig, tp->rx_config); + } + + spin_unlock_irqrestore(&tp->lock, flags); + + return 0; +} + +static int rtl8139_change_mtu(struct net_device *dev, int new_mtu) +{ + if (new_mtu < 68 || new_mtu > MAX_ETH_DATA_SIZE) + return -EINVAL; + dev->mtu = new_mtu; + return 0; +} + +static const struct net_device_ops rtl8139_netdev_ops = { + .ndo_open = rtl8139_open, + .ndo_stop = rtl8139_close, + .ndo_get_stats64 = rtl8139_get_stats64, + .ndo_change_mtu = rtl8139_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = rtl8139_set_mac_address, + .ndo_start_xmit = rtl8139_start_xmit, + .ndo_set_rx_mode = rtl8139_set_rx_mode, + .ndo_do_ioctl = netdev_ioctl, + .ndo_tx_timeout = rtl8139_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8139_poll_controller, +#endif + .ndo_set_features = rtl8139_set_features, +}; + +static int rtl8139_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev = NULL; + struct rtl8139_private *tp; + int i, addr_len, option; + void __iomem *ioaddr; + static int board_idx = -1; + + assert (pdev != NULL); + assert (ent != NULL); + + board_idx++; + + /* when we're built into the kernel, the driver version message + * is only printed if at least one 8139 board has been found + */ +#ifndef MODULE + { + static int printed_version; + if (!printed_version++) + pr_info(RTL8139_DRIVER_NAME "\n"); + } +#endif + + if (pdev->vendor == PCI_VENDOR_ID_REALTEK && + pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision >= 0x20) { + dev_info(&pdev->dev, + "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip, use 8139cp\n", + pdev->vendor, pdev->device, pdev->revision); + return -ENODEV; + } + + if (pdev->vendor == PCI_VENDOR_ID_REALTEK && + pdev->device == PCI_DEVICE_ID_REALTEK_8139 && + pdev->subsystem_vendor == PCI_VENDOR_ID_ATHEROS && + pdev->subsystem_device == PCI_DEVICE_ID_REALTEK_8139) { + pr_info("OQO Model 2 detected. Forcing PIO\n"); + use_io = 1; + } + + dev = rtl8139_init_board (pdev); + if (IS_ERR(dev)) + return PTR_ERR(dev); + + assert (dev != NULL); + tp = netdev_priv(dev); + tp->dev = dev; + + ioaddr = tp->mmio_addr; + assert (ioaddr != NULL); + + addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6; + for (i = 0; i < 3; i++) + ((__le16 *) (dev->dev_addr))[i] = + cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len)); + + /* The Rtl8139-specific entries in the device structure. */ + dev->netdev_ops = &rtl8139_netdev_ops; + dev->ethtool_ops = &rtl8139_ethtool_ops; + dev->watchdog_timeo = TX_TIMEOUT; + netif_napi_add(dev, &tp->napi, rtl8139_poll, 64); + + /* note: the hardware is not capable of sg/csum/highdma, however + * through the use of skb_copy_and_csum_dev we enable these + * features + */ + dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA; + dev->vlan_features = dev->features; + + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; + + /* tp zeroed and aligned in alloc_etherdev */ + tp = netdev_priv(dev); + + /* note: tp->chipset set in rtl8139_init_board */ + tp->drv_flags = board_info[ent->driver_data].hw_flags; + tp->mmio_addr = ioaddr; + tp->msg_enable = + (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1)); + spin_lock_init (&tp->lock); + spin_lock_init (&tp->rx_lock); + INIT_DELAYED_WORK(&tp->thread, rtl8139_thread); + tp->mii.dev = dev; + tp->mii.mdio_read = mdio_read; + tp->mii.mdio_write = mdio_write; + tp->mii.phy_id_mask = 0x3f; + tp->mii.reg_num_mask = 0x1f; + + /* dev is fully set up and ready to use now */ + pr_debug("about to register device named %s (%p)...\n", + dev->name, dev); + i = register_netdev (dev); + if (i) goto err_out; + + pci_set_drvdata (pdev, dev); + + netdev_info(dev, "%s at 0x%p, %pM, IRQ %d\n", + board_info[ent->driver_data].name, + ioaddr, dev->dev_addr, pdev->irq); + + netdev_dbg(dev, "Identified 8139 chip type '%s'\n", + rtl_chip_info[tp->chipset].name); + + /* Find the connected MII xcvrs. + Doing this in open() would allow detecting external xcvrs later, but + takes too much time. */ +#ifdef CONFIG_8139TOO_8129 + if (tp->drv_flags & HAS_MII_XCVR) { + int phy, phy_idx = 0; + for (phy = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) { + int mii_status = mdio_read(dev, phy, 1); + if (mii_status != 0xffff && mii_status != 0x0000) { + u16 advertising = mdio_read(dev, phy, 4); + tp->phys[phy_idx++] = phy; + netdev_info(dev, "MII transceiver %d status 0x%04x advertising %04x\n", + phy, mii_status, advertising); + } + } + if (phy_idx == 0) { + netdev_info(dev, "No MII transceivers found! Assuming SYM transceiver\n"); + tp->phys[0] = 32; + } + } else +#endif + tp->phys[0] = 32; + tp->mii.phy_id = tp->phys[0]; + + /* The lower four bits are the media type. */ + option = (board_idx >= MAX_UNITS) ? 0 : media[board_idx]; + if (option > 0) { + tp->mii.full_duplex = (option & 0x210) ? 1 : 0; + tp->default_port = option & 0xFF; + if (tp->default_port) + tp->mii.force_media = 1; + } + if (board_idx < MAX_UNITS && full_duplex[board_idx] > 0) + tp->mii.full_duplex = full_duplex[board_idx]; + if (tp->mii.full_duplex) { + netdev_info(dev, "Media type forced to Full Duplex\n"); + /* Changing the MII-advertised media because might prevent + re-connection. */ + tp->mii.force_media = 1; + } + if (tp->default_port) { + netdev_info(dev, " Forcing %dMbps %s-duplex operation\n", + (option & 0x20 ? 100 : 10), + (option & 0x10 ? "full" : "half")); + mdio_write(dev, tp->phys[0], 0, + ((option & 0x20) ? 0x2000 : 0) | /* 100Mbps? */ + ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */ + } + + /* Put the chip into low-power mode. */ + if (rtl_chip_info[tp->chipset].flags & HasHltClk) + RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */ + + return 0; + +err_out: + netif_napi_del(&tp->napi); + __rtl8139_cleanup_dev (dev); + pci_disable_device (pdev); + return i; +} + + +static void rtl8139_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata (pdev); + struct rtl8139_private *tp = netdev_priv(dev); + + assert (dev != NULL); + + cancel_delayed_work_sync(&tp->thread); + netif_napi_del(&tp->napi); + + unregister_netdev (dev); + + __rtl8139_cleanup_dev (dev); + pci_disable_device (pdev); +} + + +/* Serial EEPROM section. */ + +/* EEPROM_Ctrl bits. */ +#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */ +#define EE_CS 0x08 /* EEPROM chip select. */ +#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */ +#define EE_WRITE_0 0x00 +#define EE_WRITE_1 0x02 +#define EE_DATA_READ 0x01 /* EEPROM chip data out. */ +#define EE_ENB (0x80 | EE_CS) + +/* Delay between EEPROM clock transitions. + No extra delay is needed with 33Mhz PCI, but 66Mhz may change this. + */ + +#define eeprom_delay() (void)RTL_R8(Cfg9346) + +/* The EEPROM commands include the alway-set leading bit. */ +#define EE_WRITE_CMD (5) +#define EE_READ_CMD (6) +#define EE_ERASE_CMD (7) + +static int read_eeprom(void __iomem *ioaddr, int location, int addr_len) +{ + int i; + unsigned retval = 0; + int read_cmd = location | (EE_READ_CMD << addr_len); + + RTL_W8 (Cfg9346, EE_ENB & ~EE_CS); + RTL_W8 (Cfg9346, EE_ENB); + eeprom_delay (); + + /* Shift the read command bits out. */ + for (i = 4 + addr_len; i >= 0; i--) { + int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0; + RTL_W8 (Cfg9346, EE_ENB | dataval); + eeprom_delay (); + RTL_W8 (Cfg9346, EE_ENB | dataval | EE_SHIFT_CLK); + eeprom_delay (); + } + RTL_W8 (Cfg9346, EE_ENB); + eeprom_delay (); + + for (i = 16; i > 0; i--) { + RTL_W8 (Cfg9346, EE_ENB | EE_SHIFT_CLK); + eeprom_delay (); + retval = + (retval << 1) | ((RTL_R8 (Cfg9346) & EE_DATA_READ) ? 1 : + 0); + RTL_W8 (Cfg9346, EE_ENB); + eeprom_delay (); + } + + /* Terminate the EEPROM access. */ + RTL_W8(Cfg9346, 0); + eeprom_delay (); + + return retval; +} + +/* MII serial management: mostly bogus for now. */ +/* Read and write the MII management registers using software-generated + serial MDIO protocol. + The maximum data clock rate is 2.5 Mhz. The minimum timing is usually + met by back-to-back PCI I/O cycles, but we insert a delay to avoid + "overclocking" issues. */ +#define MDIO_DIR 0x80 +#define MDIO_DATA_OUT 0x04 +#define MDIO_DATA_IN 0x02 +#define MDIO_CLK 0x01 +#define MDIO_WRITE0 (MDIO_DIR) +#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT) + +#define mdio_delay() RTL_R8(Config4) + + +static const char mii_2_8139_map[8] = { + BasicModeCtrl, + BasicModeStatus, + 0, + 0, + NWayAdvert, + NWayLPAR, + NWayExpansion, + 0 +}; + + +#ifdef CONFIG_8139TOO_8129 +/* Syncronize the MII management interface by shifting 32 one bits out. */ +static void mdio_sync (void __iomem *ioaddr) +{ + int i; + + for (i = 32; i >= 0; i--) { + RTL_W8 (Config4, MDIO_WRITE1); + mdio_delay (); + RTL_W8 (Config4, MDIO_WRITE1 | MDIO_CLK); + mdio_delay (); + } +} +#endif + +static int mdio_read (struct net_device *dev, int phy_id, int location) +{ + struct rtl8139_private *tp = netdev_priv(dev); + int retval = 0; +#ifdef CONFIG_8139TOO_8129 + void __iomem *ioaddr = tp->mmio_addr; + int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; + int i; +#endif + + if (phy_id > 31) { /* Really a 8139. Use internal registers. */ + void __iomem *ioaddr = tp->mmio_addr; + return location < 8 && mii_2_8139_map[location] ? + RTL_R16 (mii_2_8139_map[location]) : 0; + } + +#ifdef CONFIG_8139TOO_8129 + mdio_sync (ioaddr); + /* Shift the read command bits out. */ + for (i = 15; i >= 0; i--) { + int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0; + + RTL_W8 (Config4, MDIO_DIR | dataval); + mdio_delay (); + RTL_W8 (Config4, MDIO_DIR | dataval | MDIO_CLK); + mdio_delay (); + } + + /* Read the two transition, 16 data, and wire-idle bits. */ + for (i = 19; i > 0; i--) { + RTL_W8 (Config4, 0); + mdio_delay (); + retval = (retval << 1) | ((RTL_R8 (Config4) & MDIO_DATA_IN) ? 1 : 0); + RTL_W8 (Config4, MDIO_CLK); + mdio_delay (); + } +#endif + + return (retval >> 1) & 0xffff; +} + + +static void mdio_write (struct net_device *dev, int phy_id, int location, + int value) +{ + struct rtl8139_private *tp = netdev_priv(dev); +#ifdef CONFIG_8139TOO_8129 + void __iomem *ioaddr = tp->mmio_addr; + int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value; + int i; +#endif + + if (phy_id > 31) { /* Really a 8139. Use internal registers. */ + void __iomem *ioaddr = tp->mmio_addr; + if (location == 0) { + RTL_W8 (Cfg9346, Cfg9346_Unlock); + RTL_W16 (BasicModeCtrl, value); + RTL_W8 (Cfg9346, Cfg9346_Lock); + } else if (location < 8 && mii_2_8139_map[location]) + RTL_W16 (mii_2_8139_map[location], value); + return; + } + +#ifdef CONFIG_8139TOO_8129 + mdio_sync (ioaddr); + + /* Shift the command bits out. */ + for (i = 31; i >= 0; i--) { + int dataval = + (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; + RTL_W8 (Config4, dataval); + mdio_delay (); + RTL_W8 (Config4, dataval | MDIO_CLK); + mdio_delay (); + } + /* Clear out extra bits. */ + for (i = 2; i > 0; i--) { + RTL_W8 (Config4, 0); + mdio_delay (); + RTL_W8 (Config4, MDIO_CLK); + mdio_delay (); + } +#endif +} + + +static int rtl8139_open (struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + const int irq = tp->pci_dev->irq; + int retval; + + retval = request_irq(irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev); + if (retval) + return retval; + + tp->tx_bufs = dma_alloc_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, + &tp->tx_bufs_dma, GFP_KERNEL); + tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, + &tp->rx_ring_dma, GFP_KERNEL); + if (tp->tx_bufs == NULL || tp->rx_ring == NULL) { + free_irq(irq, dev); + + if (tp->tx_bufs) + dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, + tp->tx_bufs, tp->tx_bufs_dma); + if (tp->rx_ring) + dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, + tp->rx_ring, tp->rx_ring_dma); + + return -ENOMEM; + + } + + napi_enable(&tp->napi); + + tp->mii.full_duplex = tp->mii.force_media; + tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000; + + rtl8139_init_ring (dev); + rtl8139_hw_start (dev); + netif_start_queue (dev); + + netif_dbg(tp, ifup, dev, + "%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n", + __func__, + (unsigned long long)pci_resource_start (tp->pci_dev, 1), + irq, RTL_R8 (MediaStatus), + tp->mii.full_duplex ? "full" : "half"); + + rtl8139_start_thread(tp); + + return 0; +} + + +static void rtl_check_media (struct net_device *dev, unsigned int init_media) +{ + struct rtl8139_private *tp = netdev_priv(dev); + + if (tp->phys[0] >= 0) { + mii_check_media(&tp->mii, netif_msg_link(tp), init_media); + } +} + +/* Start the hardware at open or resume. */ +static void rtl8139_hw_start (struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 i; + u8 tmp; + + /* Bring old chips out of low-power mode. */ + if (rtl_chip_info[tp->chipset].flags & HasHltClk) + RTL_W8 (HltClk, 'R'); + + rtl8139_chip_reset (ioaddr); + + /* unlock Config[01234] and BMCR register writes */ + RTL_W8_F (Cfg9346, Cfg9346_Unlock); + /* Restore our idea of the MAC address. */ + RTL_W32_F (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); + RTL_W32_F (MAC0 + 4, le16_to_cpu (*(__le16 *) (dev->dev_addr + 4))); + + tp->cur_rx = 0; + + /* init Rx ring buffer DMA address */ + RTL_W32_F (RxBuf, tp->rx_ring_dma); + + /* Must enable Tx/Rx before setting transfer thresholds! */ + RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb); + + tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys; + RTL_W32 (RxConfig, tp->rx_config); + RTL_W32 (TxConfig, rtl8139_tx_config); + + rtl_check_media (dev, 1); + + if (tp->chipset >= CH_8139B) { + /* Disable magic packet scanning, which is enabled + * when PM is enabled in Config1. It can be reenabled + * via ETHTOOL_SWOL if desired. */ + RTL_W8 (Config3, RTL_R8 (Config3) & ~Cfg3_Magic); + } + + netdev_dbg(dev, "init buffer addresses\n"); + + /* Lock Config[01234] and BMCR register writes */ + RTL_W8 (Cfg9346, Cfg9346_Lock); + + /* init Tx buffer DMA addresses */ + for (i = 0; i < NUM_TX_DESC; i++) + RTL_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs)); + + RTL_W32 (RxMissed, 0); + + rtl8139_set_rx_mode (dev); + + /* no early-rx interrupts */ + RTL_W16 (MultiIntr, RTL_R16 (MultiIntr) & MultiIntrClear); + + /* make sure RxTx has started */ + tmp = RTL_R8 (ChipCmd); + if ((!(tmp & CmdRxEnb)) || (!(tmp & CmdTxEnb))) + RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb); + + /* Enable all known interrupts by setting the interrupt mask. */ + RTL_W16 (IntrMask, rtl8139_intr_mask); +} + + +/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ +static void rtl8139_init_ring (struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + int i; + + tp->cur_rx = 0; + tp->cur_tx = 0; + tp->dirty_tx = 0; + + for (i = 0; i < NUM_TX_DESC; i++) + tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE]; +} + + +/* This must be global for CONFIG_8139TOO_TUNE_TWISTER case */ +static int next_tick = 3 * HZ; + +#ifndef CONFIG_8139TOO_TUNE_TWISTER +static inline void rtl8139_tune_twister (struct net_device *dev, + struct rtl8139_private *tp) {} +#else +enum TwisterParamVals { + PARA78_default = 0x78fa8388, + PARA7c_default = 0xcb38de43, /* param[0][3] */ + PARA7c_xxx = 0xcb38de43, +}; + +static const unsigned long param[4][4] = { + {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43}, + {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83}, + {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83}, + {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83} +}; + +static void rtl8139_tune_twister (struct net_device *dev, + struct rtl8139_private *tp) +{ + int linkcase; + void __iomem *ioaddr = tp->mmio_addr; + + /* This is a complicated state machine to configure the "twister" for + impedance/echos based on the cable length. + All of this is magic and undocumented. + */ + switch (tp->twistie) { + case 1: + if (RTL_R16 (CSCR) & CSCR_LinkOKBit) { + /* We have link beat, let us tune the twister. */ + RTL_W16 (CSCR, CSCR_LinkDownOffCmd); + tp->twistie = 2; /* Change to state 2. */ + next_tick = HZ / 10; + } else { + /* Just put in some reasonable defaults for when beat returns. */ + RTL_W16 (CSCR, CSCR_LinkDownCmd); + RTL_W32 (FIFOTMS, 0x20); /* Turn on cable test mode. */ + RTL_W32 (PARA78, PARA78_default); + RTL_W32 (PARA7c, PARA7c_default); + tp->twistie = 0; /* Bail from future actions. */ + } + break; + case 2: + /* Read how long it took to hear the echo. */ + linkcase = RTL_R16 (CSCR) & CSCR_LinkStatusBits; + if (linkcase == 0x7000) + tp->twist_row = 3; + else if (linkcase == 0x3000) + tp->twist_row = 2; + else if (linkcase == 0x1000) + tp->twist_row = 1; + else + tp->twist_row = 0; + tp->twist_col = 0; + tp->twistie = 3; /* Change to state 2. */ + next_tick = HZ / 10; + break; + case 3: + /* Put out four tuning parameters, one per 100msec. */ + if (tp->twist_col == 0) + RTL_W16 (FIFOTMS, 0); + RTL_W32 (PARA7c, param[(int) tp->twist_row] + [(int) tp->twist_col]); + next_tick = HZ / 10; + if (++tp->twist_col >= 4) { + /* For short cables we are done. + For long cables (row == 3) check for mistune. */ + tp->twistie = + (tp->twist_row == 3) ? 4 : 0; + } + break; + case 4: + /* Special case for long cables: check for mistune. */ + if ((RTL_R16 (CSCR) & + CSCR_LinkStatusBits) == 0x7000) { + tp->twistie = 0; + break; + } else { + RTL_W32 (PARA7c, 0xfb38de03); + tp->twistie = 5; + next_tick = HZ / 10; + } + break; + case 5: + /* Retune for shorter cable (column 2). */ + RTL_W32 (FIFOTMS, 0x20); + RTL_W32 (PARA78, PARA78_default); + RTL_W32 (PARA7c, PARA7c_default); + RTL_W32 (FIFOTMS, 0x00); + tp->twist_row = 2; + tp->twist_col = 0; + tp->twistie = 3; + next_tick = HZ / 10; + break; + + default: + /* do nothing */ + break; + } +} +#endif /* CONFIG_8139TOO_TUNE_TWISTER */ + +static inline void rtl8139_thread_iter (struct net_device *dev, + struct rtl8139_private *tp, + void __iomem *ioaddr) +{ + int mii_lpa; + + mii_lpa = mdio_read (dev, tp->phys[0], MII_LPA); + + if (!tp->mii.force_media && mii_lpa != 0xffff) { + int duplex = ((mii_lpa & LPA_100FULL) || + (mii_lpa & 0x01C0) == 0x0040); + if (tp->mii.full_duplex != duplex) { + tp->mii.full_duplex = duplex; + + if (mii_lpa) { + netdev_info(dev, "Setting %s-duplex based on MII #%d link partner ability of %04x\n", + tp->mii.full_duplex ? "full" : "half", + tp->phys[0], mii_lpa); + } else { + netdev_info(dev, "media is unconnected, link down, or incompatible connection\n"); + } +#if 0 + RTL_W8 (Cfg9346, Cfg9346_Unlock); + RTL_W8 (Config1, tp->mii.full_duplex ? 0x60 : 0x20); + RTL_W8 (Cfg9346, Cfg9346_Lock); +#endif + } + } + + next_tick = HZ * 60; + + rtl8139_tune_twister (dev, tp); + + netdev_dbg(dev, "Media selection tick, Link partner %04x\n", + RTL_R16(NWayLPAR)); + netdev_dbg(dev, "Other registers are IntMask %04x IntStatus %04x\n", + RTL_R16(IntrMask), RTL_R16(IntrStatus)); + netdev_dbg(dev, "Chip config %02x %02x\n", + RTL_R8(Config0), RTL_R8(Config1)); +} + +static void rtl8139_thread (struct work_struct *work) +{ + struct rtl8139_private *tp = + container_of(work, struct rtl8139_private, thread.work); + struct net_device *dev = tp->mii.dev; + unsigned long thr_delay = next_tick; + + rtnl_lock(); + + if (!netif_running(dev)) + goto out_unlock; + + if (tp->watchdog_fired) { + tp->watchdog_fired = 0; + rtl8139_tx_timeout_task(work); + } else + rtl8139_thread_iter(dev, tp, tp->mmio_addr); + + if (tp->have_thread) + schedule_delayed_work(&tp->thread, thr_delay); +out_unlock: + rtnl_unlock (); +} + +static void rtl8139_start_thread(struct rtl8139_private *tp) +{ + tp->twistie = 0; + if (tp->chipset == CH_8139_K) + tp->twistie = 1; + else if (tp->drv_flags & HAS_LNK_CHNG) + return; + + tp->have_thread = 1; + tp->watchdog_fired = 0; + + schedule_delayed_work(&tp->thread, next_tick); +} + +static inline void rtl8139_tx_clear (struct rtl8139_private *tp) +{ + tp->cur_tx = 0; + tp->dirty_tx = 0; + + /* XXX account for unsent Tx packets in tp->stats.tx_dropped */ +} + +static void rtl8139_tx_timeout_task (struct work_struct *work) +{ + struct rtl8139_private *tp = + container_of(work, struct rtl8139_private, thread.work); + struct net_device *dev = tp->mii.dev; + void __iomem *ioaddr = tp->mmio_addr; + int i; + u8 tmp8; + + netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n", + RTL_R8(ChipCmd), RTL_R16(IntrStatus), + RTL_R16(IntrMask), RTL_R8(MediaStatus)); + /* Emit info to figure out what went wrong. */ + netdev_dbg(dev, "Tx queue start entry %ld dirty entry %ld\n", + tp->cur_tx, tp->dirty_tx); + for (i = 0; i < NUM_TX_DESC; i++) + netdev_dbg(dev, "Tx descriptor %d is %08x%s\n", + i, RTL_R32(TxStatus0 + (i * 4)), + i == tp->dirty_tx % NUM_TX_DESC ? + " (queue head)" : ""); + + tp->xstats.tx_timeouts++; + + /* disable Tx ASAP, if not already */ + tmp8 = RTL_R8 (ChipCmd); + if (tmp8 & CmdTxEnb) + RTL_W8 (ChipCmd, CmdRxEnb); + + spin_lock_bh(&tp->rx_lock); + /* Disable interrupts by clearing the interrupt mask. */ + RTL_W16 (IntrMask, 0x0000); + + /* Stop a shared interrupt from scavenging while we are. */ + spin_lock_irq(&tp->lock); + rtl8139_tx_clear (tp); + spin_unlock_irq(&tp->lock); + + /* ...and finally, reset everything */ + if (netif_running(dev)) { + rtl8139_hw_start (dev); + netif_wake_queue (dev); + } + spin_unlock_bh(&tp->rx_lock); +} + +static void rtl8139_tx_timeout (struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + + tp->watchdog_fired = 1; + if (!tp->have_thread) { + INIT_DELAYED_WORK(&tp->thread, rtl8139_thread); + schedule_delayed_work(&tp->thread, next_tick); + } +} + +static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb, + struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned int entry; + unsigned int len = skb->len; + unsigned long flags; + + /* Calculate the next Tx descriptor entry. */ + entry = tp->cur_tx % NUM_TX_DESC; + + /* Note: the chip doesn't have auto-pad! */ + if (likely(len < TX_BUF_SIZE)) { + if (len < ETH_ZLEN) + memset(tp->tx_buf[entry], 0, ETH_ZLEN); + skb_copy_and_csum_dev(skb, tp->tx_buf[entry]); + dev_kfree_skb_any(skb); + } else { + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + spin_lock_irqsave(&tp->lock, flags); + /* + * Writing to TxStatus triggers a DMA transfer of the data + * copied to tp->tx_buf[entry] above. Use a memory barrier + * to make sure that the device sees the updated data. + */ + wmb(); + RTL_W32_F (TxStatus0 + (entry * sizeof (u32)), + tp->tx_flag | max(len, (unsigned int)ETH_ZLEN)); + + tp->cur_tx++; + + if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) + netif_stop_queue (dev); + spin_unlock_irqrestore(&tp->lock, flags); + + netif_dbg(tp, tx_queued, dev, "Queued Tx packet size %u to slot %d\n", + len, entry); + + return NETDEV_TX_OK; +} + + +static void rtl8139_tx_interrupt (struct net_device *dev, + struct rtl8139_private *tp, + void __iomem *ioaddr) +{ + unsigned long dirty_tx, tx_left; + + assert (dev != NULL); + assert (ioaddr != NULL); + + dirty_tx = tp->dirty_tx; + tx_left = tp->cur_tx - dirty_tx; + while (tx_left > 0) { + int entry = dirty_tx % NUM_TX_DESC; + int txstatus; + + txstatus = RTL_R32 (TxStatus0 + (entry * sizeof (u32))); + + if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted))) + break; /* It still hasn't been Txed */ + + /* Note: TxCarrierLost is always asserted at 100mbps. */ + if (txstatus & (TxOutOfWindow | TxAborted)) { + /* There was an major error, log it. */ + netif_dbg(tp, tx_err, dev, "Transmit error, Tx status %08x\n", + txstatus); + dev->stats.tx_errors++; + if (txstatus & TxAborted) { + dev->stats.tx_aborted_errors++; + RTL_W32 (TxConfig, TxClearAbt); + RTL_W16 (IntrStatus, TxErr); + wmb(); + } + if (txstatus & TxCarrierLost) + dev->stats.tx_carrier_errors++; + if (txstatus & TxOutOfWindow) + dev->stats.tx_window_errors++; + } else { + if (txstatus & TxUnderrun) { + /* Add 64 to the Tx FIFO threshold. */ + if (tp->tx_flag < 0x00300000) + tp->tx_flag += 0x00020000; + dev->stats.tx_fifo_errors++; + } + dev->stats.collisions += (txstatus >> 24) & 15; + u64_stats_update_begin(&tp->tx_stats.syncp); + tp->tx_stats.packets++; + tp->tx_stats.bytes += txstatus & 0x7ff; + u64_stats_update_end(&tp->tx_stats.syncp); + } + + dirty_tx++; + tx_left--; + } + +#ifndef RTL8139_NDEBUG + if (tp->cur_tx - dirty_tx > NUM_TX_DESC) { + netdev_err(dev, "Out-of-sync dirty pointer, %ld vs. %ld\n", + dirty_tx, tp->cur_tx); + dirty_tx += NUM_TX_DESC; + } +#endif /* RTL8139_NDEBUG */ + + /* only wake the queue if we did work, and the queue is stopped */ + if (tp->dirty_tx != dirty_tx) { + tp->dirty_tx = dirty_tx; + mb(); + netif_wake_queue (dev); + } +} + + +/* TODO: clean this up! Rx reset need not be this intensive */ +static void rtl8139_rx_err (u32 rx_status, struct net_device *dev, + struct rtl8139_private *tp, void __iomem *ioaddr) +{ + u8 tmp8; +#ifdef CONFIG_8139_OLD_RX_RESET + int tmp_work; +#endif + + netif_dbg(tp, rx_err, dev, "Ethernet frame had errors, status %08x\n", + rx_status); + dev->stats.rx_errors++; + if (!(rx_status & RxStatusOK)) { + if (rx_status & RxTooLong) { + netdev_dbg(dev, "Oversized Ethernet frame, status %04x!\n", + rx_status); + /* A.C.: The chip hangs here. */ + } + if (rx_status & (RxBadSymbol | RxBadAlign)) + dev->stats.rx_frame_errors++; + if (rx_status & (RxRunt | RxTooLong)) + dev->stats.rx_length_errors++; + if (rx_status & RxCRCErr) + dev->stats.rx_crc_errors++; + } else { + tp->xstats.rx_lost_in_ring++; + } + +#ifndef CONFIG_8139_OLD_RX_RESET + tmp8 = RTL_R8 (ChipCmd); + RTL_W8 (ChipCmd, tmp8 & ~CmdRxEnb); + RTL_W8 (ChipCmd, tmp8); + RTL_W32 (RxConfig, tp->rx_config); + tp->cur_rx = 0; +#else + /* Reset the receiver, based on RealTek recommendation. (Bug?) */ + + /* disable receive */ + RTL_W8_F (ChipCmd, CmdTxEnb); + tmp_work = 200; + while (--tmp_work > 0) { + udelay(1); + tmp8 = RTL_R8 (ChipCmd); + if (!(tmp8 & CmdRxEnb)) + break; + } + if (tmp_work <= 0) + netdev_warn(dev, "rx stop wait too long\n"); + /* restart receive */ + tmp_work = 200; + while (--tmp_work > 0) { + RTL_W8_F (ChipCmd, CmdRxEnb | CmdTxEnb); + udelay(1); + tmp8 = RTL_R8 (ChipCmd); + if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb)) + break; + } + if (tmp_work <= 0) + netdev_warn(dev, "tx/rx enable wait too long\n"); + + /* and reinitialize all rx related registers */ + RTL_W8_F (Cfg9346, Cfg9346_Unlock); + /* Must enable Tx/Rx before setting transfer thresholds! */ + RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb); + + tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys; + RTL_W32 (RxConfig, tp->rx_config); + tp->cur_rx = 0; + + netdev_dbg(dev, "init buffer addresses\n"); + + /* Lock Config[01234] and BMCR register writes */ + RTL_W8 (Cfg9346, Cfg9346_Lock); + + /* init Rx ring buffer DMA address */ + RTL_W32_F (RxBuf, tp->rx_ring_dma); + + /* A.C.: Reset the multicast list. */ + __set_rx_mode (dev); +#endif +} + +#if RX_BUF_IDX == 3 +static inline void wrap_copy(struct sk_buff *skb, const unsigned char *ring, + u32 offset, unsigned int size) +{ + u32 left = RX_BUF_LEN - offset; + + if (size > left) { + skb_copy_to_linear_data(skb, ring + offset, left); + skb_copy_to_linear_data_offset(skb, left, ring, size - left); + } else + skb_copy_to_linear_data(skb, ring + offset, size); +} +#endif + +static void rtl8139_isr_ack(struct rtl8139_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + u16 status; + + status = RTL_R16 (IntrStatus) & RxAckBits; + + /* Clear out errors and receive interrupts */ + if (likely(status != 0)) { + if (unlikely(status & (RxFIFOOver | RxOverflow))) { + tp->dev->stats.rx_errors++; + if (status & RxFIFOOver) + tp->dev->stats.rx_fifo_errors++; + } + RTL_W16_F (IntrStatus, RxAckBits); + } +} + +static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp, + int budget) +{ + void __iomem *ioaddr = tp->mmio_addr; + int received = 0; + unsigned char *rx_ring = tp->rx_ring; + unsigned int cur_rx = tp->cur_rx; + unsigned int rx_size = 0; + + netdev_dbg(dev, "In %s(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n", + __func__, (u16)cur_rx, + RTL_R16(RxBufAddr), RTL_R16(RxBufPtr), RTL_R8(ChipCmd)); + + while (netif_running(dev) && received < budget && + (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) { + u32 ring_offset = cur_rx % RX_BUF_LEN; + u32 rx_status; + unsigned int pkt_size; + struct sk_buff *skb; + + rmb(); + + /* read size+status of next frame from DMA ring buffer */ + rx_status = le32_to_cpu (*(__le32 *) (rx_ring + ring_offset)); + rx_size = rx_status >> 16; + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size = rx_size - 4; + else + pkt_size = rx_size; + + netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n", + __func__, rx_status, rx_size, cur_rx); +#if RTL8139_DEBUG > 2 + print_hex_dump(KERN_DEBUG, "Frame contents: ", + DUMP_PREFIX_OFFSET, 16, 1, + &rx_ring[ring_offset], 70, true); +#endif + + /* Packet copy from FIFO still in progress. + * Theoretically, this should never happen + * since EarlyRx is disabled. + */ + if (unlikely(rx_size == 0xfff0)) { + if (!tp->fifo_copy_timeout) + tp->fifo_copy_timeout = jiffies + 2; + else if (time_after(jiffies, tp->fifo_copy_timeout)) { + netdev_dbg(dev, "hung FIFO. Reset\n"); + rx_size = 0; + goto no_early_rx; + } + netif_dbg(tp, intr, dev, "fifo copy in progress\n"); + tp->xstats.early_rx++; + break; + } + +no_early_rx: + tp->fifo_copy_timeout = 0; + + /* If Rx err or invalid rx_size/rx_status received + * (which happens if we get lost in the ring), + * Rx process gets reset, so we abort any further + * Rx processing. + */ + if (unlikely((rx_size > (MAX_ETH_FRAME_SIZE+4)) || + (rx_size < 8) || + (!(rx_status & RxStatusOK)))) { + if ((dev->features & NETIF_F_RXALL) && + (rx_size <= (MAX_ETH_FRAME_SIZE + 4)) && + (rx_size >= 8) && + (!(rx_status & RxStatusOK))) { + /* Length is at least mostly OK, but pkt has + * error. I'm hoping we can handle some of these + * errors without resetting the chip. --Ben + */ + dev->stats.rx_errors++; + if (rx_status & RxCRCErr) { + dev->stats.rx_crc_errors++; + goto keep_pkt; + } + if (rx_status & RxRunt) { + dev->stats.rx_length_errors++; + goto keep_pkt; + } + } + rtl8139_rx_err (rx_status, dev, tp, ioaddr); + received = -1; + goto out; + } + +keep_pkt: + /* Malloc up new buffer, compatible with net-2e. */ + /* Omit the four octet CRC from the length. */ + + skb = napi_alloc_skb(&tp->napi, pkt_size); + if (likely(skb)) { +#if RX_BUF_IDX == 3 + wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); +#else + skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size); +#endif + skb_put (skb, pkt_size); + + skb->protocol = eth_type_trans (skb, dev); + + u64_stats_update_begin(&tp->rx_stats.syncp); + tp->rx_stats.packets++; + tp->rx_stats.bytes += pkt_size; + u64_stats_update_end(&tp->rx_stats.syncp); + + netif_receive_skb (skb); + } else { + dev->stats.rx_dropped++; + } + received++; + + cur_rx = (cur_rx + rx_size + 4 + 3) & ~3; + RTL_W16 (RxBufPtr, (u16) (cur_rx - 16)); + + rtl8139_isr_ack(tp); + } + + if (unlikely(!received || rx_size == 0xfff0)) + rtl8139_isr_ack(tp); + + netdev_dbg(dev, "Done %s(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n", + __func__, cur_rx, + RTL_R16(RxBufAddr), RTL_R16(RxBufPtr), RTL_R8(ChipCmd)); + + tp->cur_rx = cur_rx; + + /* + * The receive buffer should be mostly empty. + * Tell NAPI to reenable the Rx irq. + */ + if (tp->fifo_copy_timeout) + received = budget; + +out: + return received; +} + + +static void rtl8139_weird_interrupt (struct net_device *dev, + struct rtl8139_private *tp, + void __iomem *ioaddr, + int status, int link_changed) +{ + netdev_dbg(dev, "Abnormal interrupt, status %08x\n", status); + + assert (dev != NULL); + assert (tp != NULL); + assert (ioaddr != NULL); + + /* Update the error count. */ + dev->stats.rx_missed_errors += RTL_R32 (RxMissed); + RTL_W32 (RxMissed, 0); + + if ((status & RxUnderrun) && link_changed && + (tp->drv_flags & HAS_LNK_CHNG)) { + rtl_check_media(dev, 0); + status &= ~RxUnderrun; + } + + if (status & (RxUnderrun | RxErr)) + dev->stats.rx_errors++; + + if (status & PCSTimeout) + dev->stats.rx_length_errors++; + if (status & RxUnderrun) + dev->stats.rx_fifo_errors++; + if (status & PCIErr) { + u16 pci_cmd_status; + pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status); + pci_write_config_word (tp->pci_dev, PCI_STATUS, pci_cmd_status); + + netdev_err(dev, "PCI Bus error %04x\n", pci_cmd_status); + } +} + +static int rtl8139_poll(struct napi_struct *napi, int budget) +{ + struct rtl8139_private *tp = container_of(napi, struct rtl8139_private, napi); + struct net_device *dev = tp->dev; + void __iomem *ioaddr = tp->mmio_addr; + int work_done; + + spin_lock(&tp->rx_lock); + work_done = 0; + if (likely(RTL_R16(IntrStatus) & RxAckBits)) + work_done += rtl8139_rx(dev, tp, budget); + + if (work_done < budget) { + unsigned long flags; + /* + * Order is important since data can get interrupted + * again when we think we are done. + */ + spin_lock_irqsave(&tp->lock, flags); + __napi_complete(napi); + RTL_W16_F(IntrMask, rtl8139_intr_mask); + spin_unlock_irqrestore(&tp->lock, flags); + } + spin_unlock(&tp->rx_lock); + + return work_done; +} + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ +static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance) +{ + struct net_device *dev = (struct net_device *) dev_instance; + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u16 status, ackstat; + int link_changed = 0; /* avoid bogus "uninit" warning */ + int handled = 0; + + spin_lock (&tp->lock); + status = RTL_R16 (IntrStatus); + + /* shared irq? */ + if (unlikely((status & rtl8139_intr_mask) == 0)) + goto out; + + handled = 1; + + /* h/w no longer present (hotplug?) or major error, bail */ + if (unlikely(status == 0xFFFF)) + goto out; + + /* close possible race's with dev_close */ + if (unlikely(!netif_running(dev))) { + RTL_W16 (IntrMask, 0); + goto out; + } + + /* Acknowledge all of the current interrupt sources ASAP, but + an first get an additional status bit from CSCR. */ + if (unlikely(status & RxUnderrun)) + link_changed = RTL_R16 (CSCR) & CSCR_LinkChangeBit; + + ackstat = status & ~(RxAckBits | TxErr); + if (ackstat) + RTL_W16 (IntrStatus, ackstat); + + /* Receive packets are processed by poll routine. + If not running start it now. */ + if (status & RxAckBits){ + if (napi_schedule_prep(&tp->napi)) { + RTL_W16_F (IntrMask, rtl8139_norx_intr_mask); + __napi_schedule(&tp->napi); + } + } + + /* Check uncommon events with one test. */ + if (unlikely(status & (PCIErr | PCSTimeout | RxUnderrun | RxErr))) + rtl8139_weird_interrupt (dev, tp, ioaddr, + status, link_changed); + + if (status & (TxOK | TxErr)) { + rtl8139_tx_interrupt (dev, tp, ioaddr); + if (status & TxErr) + RTL_W16 (IntrStatus, TxErr); + } + out: + spin_unlock (&tp->lock); + + netdev_dbg(dev, "exiting interrupt, intr_status=%#4.4x\n", + RTL_R16(IntrStatus)); + return IRQ_RETVAL(handled); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling receive - used by netconsole and other diagnostic tools + * to allow network i/o with interrupts disabled. + */ +static void rtl8139_poll_controller(struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + const int irq = tp->pci_dev->irq; + + disable_irq(irq); + rtl8139_interrupt(irq, dev); + enable_irq(irq); +} +#endif + +static int rtl8139_set_mac_address(struct net_device *dev, void *p) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + spin_lock_irq(&tp->lock); + + RTL_W8_F(Cfg9346, Cfg9346_Unlock); + RTL_W32_F(MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0))); + RTL_W32_F(MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4))); + RTL_W8_F(Cfg9346, Cfg9346_Lock); + + spin_unlock_irq(&tp->lock); + + return 0; +} + +static int rtl8139_close (struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags; + + netif_stop_queue(dev); + napi_disable(&tp->napi); + + netif_dbg(tp, ifdown, dev, "Shutting down ethercard, status was 0x%04x\n", + RTL_R16(IntrStatus)); + + spin_lock_irqsave (&tp->lock, flags); + + /* Stop the chip's Tx and Rx DMA processes. */ + RTL_W8 (ChipCmd, 0); + + /* Disable interrupts by clearing the interrupt mask. */ + RTL_W16 (IntrMask, 0); + + /* Update the error counts. */ + dev->stats.rx_missed_errors += RTL_R32 (RxMissed); + RTL_W32 (RxMissed, 0); + + spin_unlock_irqrestore (&tp->lock, flags); + + free_irq(tp->pci_dev->irq, dev); + + rtl8139_tx_clear (tp); + + dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, + tp->rx_ring, tp->rx_ring_dma); + dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, + tp->tx_bufs, tp->tx_bufs_dma); + tp->rx_ring = NULL; + tp->tx_bufs = NULL; + + /* Green! Put the chip in low-power mode. */ + RTL_W8 (Cfg9346, Cfg9346_Unlock); + + if (rtl_chip_info[tp->chipset].flags & HasHltClk) + RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */ + + return 0; +} + + +/* Get the ethtool Wake-on-LAN settings. Assumes that wol points to + kernel memory, *wol has been initialized as {ETHTOOL_GWOL}, and + other threads or interrupts aren't messing with the 8139. */ +static void rtl8139_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + + spin_lock_irq(&tp->lock); + if (rtl_chip_info[tp->chipset].flags & HasLWake) { + u8 cfg3 = RTL_R8 (Config3); + u8 cfg5 = RTL_R8 (Config5); + + wol->supported = WAKE_PHY | WAKE_MAGIC + | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; + + wol->wolopts = 0; + if (cfg3 & Cfg3_LinkUp) + wol->wolopts |= WAKE_PHY; + if (cfg3 & Cfg3_Magic) + wol->wolopts |= WAKE_MAGIC; + /* (KON)FIXME: See how netdev_set_wol() handles the + following constants. */ + if (cfg5 & Cfg5_UWF) + wol->wolopts |= WAKE_UCAST; + if (cfg5 & Cfg5_MWF) + wol->wolopts |= WAKE_MCAST; + if (cfg5 & Cfg5_BWF) + wol->wolopts |= WAKE_BCAST; + } + spin_unlock_irq(&tp->lock); +} + + +/* Set the ethtool Wake-on-LAN settings. Return 0 or -errno. Assumes + that wol points to kernel memory and other threads or interrupts + aren't messing with the 8139. */ +static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 support; + u8 cfg3, cfg5; + + support = ((rtl_chip_info[tp->chipset].flags & HasLWake) + ? (WAKE_PHY | WAKE_MAGIC + | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST) + : 0); + if (wol->wolopts & ~support) + return -EINVAL; + + spin_lock_irq(&tp->lock); + cfg3 = RTL_R8 (Config3) & ~(Cfg3_LinkUp | Cfg3_Magic); + if (wol->wolopts & WAKE_PHY) + cfg3 |= Cfg3_LinkUp; + if (wol->wolopts & WAKE_MAGIC) + cfg3 |= Cfg3_Magic; + RTL_W8 (Cfg9346, Cfg9346_Unlock); + RTL_W8 (Config3, cfg3); + RTL_W8 (Cfg9346, Cfg9346_Lock); + + cfg5 = RTL_R8 (Config5) & ~(Cfg5_UWF | Cfg5_MWF | Cfg5_BWF); + /* (KON)FIXME: These are untested. We may have to set the + CRC0, Wakeup0 and LSBCRC0 registers too, but I have no + documentation. */ + if (wol->wolopts & WAKE_UCAST) + cfg5 |= Cfg5_UWF; + if (wol->wolopts & WAKE_MCAST) + cfg5 |= Cfg5_MWF; + if (wol->wolopts & WAKE_BCAST) + cfg5 |= Cfg5_BWF; + RTL_W8 (Config5, cfg5); /* need not unlock via Cfg9346 */ + spin_unlock_irq(&tp->lock); + + return 0; +} + +static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct rtl8139_private *tp = netdev_priv(dev); + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); + info->regdump_len = tp->regs_len; +} + +static int rtl8139_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8139_private *tp = netdev_priv(dev); + spin_lock_irq(&tp->lock); + mii_ethtool_gset(&tp->mii, cmd); + spin_unlock_irq(&tp->lock); + return 0; +} + +static int rtl8139_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8139_private *tp = netdev_priv(dev); + int rc; + spin_lock_irq(&tp->lock); + rc = mii_ethtool_sset(&tp->mii, cmd); + spin_unlock_irq(&tp->lock); + return rc; +} + +static int rtl8139_nway_reset(struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + return mii_nway_restart(&tp->mii); +} + +static u32 rtl8139_get_link(struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + return mii_link_ok(&tp->mii); +} + +static u32 rtl8139_get_msglevel(struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + return tp->msg_enable; +} + +static void rtl8139_set_msglevel(struct net_device *dev, u32 datum) +{ + struct rtl8139_private *tp = netdev_priv(dev); + tp->msg_enable = datum; +} + +static int rtl8139_get_regs_len(struct net_device *dev) +{ + struct rtl8139_private *tp; + /* TODO: we are too slack to do reg dumping for pio, for now */ + if (use_io) + return 0; + tp = netdev_priv(dev); + return tp->regs_len; +} + +static void rtl8139_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf) +{ + struct rtl8139_private *tp; + + /* TODO: we are too slack to do reg dumping for pio, for now */ + if (use_io) + return; + tp = netdev_priv(dev); + + regs->version = RTL_REGS_VER; + + spin_lock_irq(&tp->lock); + memcpy_fromio(regbuf, tp->mmio_addr, regs->len); + spin_unlock_irq(&tp->lock); +} + +static int rtl8139_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return RTL_NUM_STATS; + default: + return -EOPNOTSUPP; + } +} + +static void rtl8139_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) +{ + struct rtl8139_private *tp = netdev_priv(dev); + + data[0] = tp->xstats.early_rx; + data[1] = tp->xstats.tx_buf_mapped; + data[2] = tp->xstats.tx_timeouts; + data[3] = tp->xstats.rx_lost_in_ring; +} + +static void rtl8139_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys)); +} + +static const struct ethtool_ops rtl8139_ethtool_ops = { + .get_drvinfo = rtl8139_get_drvinfo, + .get_settings = rtl8139_get_settings, + .set_settings = rtl8139_set_settings, + .get_regs_len = rtl8139_get_regs_len, + .get_regs = rtl8139_get_regs, + .nway_reset = rtl8139_nway_reset, + .get_link = rtl8139_get_link, + .get_msglevel = rtl8139_get_msglevel, + .set_msglevel = rtl8139_set_msglevel, + .get_wol = rtl8139_get_wol, + .set_wol = rtl8139_set_wol, + .get_strings = rtl8139_get_strings, + .get_sset_count = rtl8139_get_sset_count, + .get_ethtool_stats = rtl8139_get_ethtool_stats, +}; + +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct rtl8139_private *tp = netdev_priv(dev); + int rc; + + if (!netif_running(dev)) + return -EINVAL; + + spin_lock_irq(&tp->lock); + rc = generic_mii_ioctl(&tp->mii, if_mii(rq), cmd, NULL); + spin_unlock_irq(&tp->lock); + + return rc; +} + + +static struct rtnl_link_stats64 * +rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags; + unsigned int start; + + if (netif_running(dev)) { + spin_lock_irqsave (&tp->lock, flags); + dev->stats.rx_missed_errors += RTL_R32 (RxMissed); + RTL_W32 (RxMissed, 0); + spin_unlock_irqrestore (&tp->lock, flags); + } + + netdev_stats_to_stats64(stats, &dev->stats); + + do { + start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp); + stats->rx_packets = tp->rx_stats.packets; + stats->rx_bytes = tp->rx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start)); + + do { + start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp); + stats->tx_packets = tp->tx_stats.packets; + stats->tx_bytes = tp->tx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); + + return stats; +} + +/* Set or clear the multicast filter for this adaptor. + This routine is not state sensitive and need not be SMP locked. */ + +static void __set_rx_mode (struct net_device *dev) +{ + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 mc_filter[2]; /* Multicast hash filter */ + int rx_mode; + u32 tmp; + + netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08x\n", + dev->flags, RTL_R32(RxConfig)); + + /* Note: do not reorder, GCC is clever about common statements. */ + if (dev->flags & IFF_PROMISC) { + rx_mode = + AcceptBroadcast | AcceptMulticast | AcceptMyPhys | + AcceptAllPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to filter perfectly -- accept all multicasts. */ + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else { + struct netdev_hw_addr *ha; + rx_mode = AcceptBroadcast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + rx_mode |= AcceptMulticast; + } + } + + if (dev->features & NETIF_F_RXALL) + rx_mode |= (AcceptErr | AcceptRunt); + + /* We can safely update without stopping the chip. */ + tmp = rtl8139_rx_config | rx_mode; + if (tp->rx_config != tmp) { + RTL_W32_F (RxConfig, tmp); + tp->rx_config = tmp; + } + RTL_W32_F (MAR0 + 0, mc_filter[0]); + RTL_W32_F (MAR0 + 4, mc_filter[1]); +} + +static void rtl8139_set_rx_mode (struct net_device *dev) +{ + unsigned long flags; + struct rtl8139_private *tp = netdev_priv(dev); + + spin_lock_irqsave (&tp->lock, flags); + __set_rx_mode(dev); + spin_unlock_irqrestore (&tp->lock, flags); +} + +#ifdef CONFIG_PM + +static int rtl8139_suspend (struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata (pdev); + struct rtl8139_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags; + + pci_save_state (pdev); + + if (!netif_running (dev)) + return 0; + + netif_device_detach (dev); + + spin_lock_irqsave (&tp->lock, flags); + + /* Disable interrupts, stop Tx and Rx. */ + RTL_W16 (IntrMask, 0); + RTL_W8 (ChipCmd, 0); + + /* Update the error counts. */ + dev->stats.rx_missed_errors += RTL_R32 (RxMissed); + RTL_W32 (RxMissed, 0); + + spin_unlock_irqrestore (&tp->lock, flags); + + pci_set_power_state (pdev, PCI_D3hot); + + return 0; +} + + +static int rtl8139_resume (struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata (pdev); + + pci_restore_state (pdev); + if (!netif_running (dev)) + return 0; + pci_set_power_state (pdev, PCI_D0); + rtl8139_init_ring (dev); + rtl8139_hw_start (dev); + netif_device_attach (dev); + return 0; +} + +#endif /* CONFIG_PM */ + + +static struct pci_driver rtl8139_pci_driver = { + .name = DRV_NAME, + .id_table = rtl8139_pci_tbl, + .probe = rtl8139_init_one, + .remove = rtl8139_remove_one, +#ifdef CONFIG_PM + .suspend = rtl8139_suspend, + .resume = rtl8139_resume, +#endif /* CONFIG_PM */ +}; + + +static int __init rtl8139_init_module (void) +{ + /* when we're a module, we always print a version message, + * even if no 8139 board is found. + */ +#ifdef MODULE + pr_info(RTL8139_DRIVER_NAME "\n"); +#endif + + return pci_register_driver(&rtl8139_pci_driver); +} + + +static void __exit rtl8139_cleanup_module (void) +{ + pci_unregister_driver (&rtl8139_pci_driver); +} + + +module_init(rtl8139_init_module); +module_exit(rtl8139_cleanup_module); diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig new file mode 100644 index 000000000..ae5d02709 --- /dev/null +++ b/drivers/net/ethernet/realtek/Kconfig @@ -0,0 +1,115 @@ +# +# Realtek device configuration +# + +config NET_VENDOR_REALTEK + bool "Realtek devices" + default y + depends on PCI || (PARPORT && X86) + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Realtek devices. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_REALTEK + +config ATP + tristate "AT-LAN-TEC/RealTek pocket adapter support" + depends on PARPORT && X86 + select CRC32 + ---help--- + This is a network (Ethernet) device which attaches to your parallel + port. Read as well as the + Ethernet-HOWTO, available from , + if you want to use this. If you intend to use this driver, you + should have said N to the "Parallel printer support", because the two + drivers don't like each other. + + To compile this driver as a module, choose M here: the module + will be called atp. + +config 8139CP + tristate "RealTek RTL-8139 C+ PCI Fast Ethernet Adapter support" + depends on PCI + select CRC32 + select MII + ---help--- + This is a driver for the Fast Ethernet PCI network cards based on + the RTL8139C+ chips. If you have one of those, say Y and read + the Ethernet-HOWTO, available from + . + + To compile this driver as a module, choose M here: the module + will be called 8139cp. This is recommended. + +config 8139TOO + tristate "RealTek RTL-8129/8130/8139 PCI Fast Ethernet Adapter support" + depends on PCI + select CRC32 + select MII + ---help--- + This is a driver for the Fast Ethernet PCI network cards based on + the RTL 8129/8130/8139 chips. If you have one of those, say Y and + read the Ethernet-HOWTO . + + To compile this driver as a module, choose M here: the module + will be called 8139too. This is recommended. + +config 8139TOO_PIO + bool "Use PIO instead of MMIO" + default y + depends on 8139TOO + ---help--- + This instructs the driver to use programmed I/O ports (PIO) instead + of PCI shared memory (MMIO). This can possibly solve some problems + in case your mainboard has memory consistency issues. If unsure, + say N. + +config 8139TOO_TUNE_TWISTER + bool "Support for uncommon RTL-8139 rev. K (automatic channel equalization)" + depends on 8139TOO + ---help--- + This implements a function which might come in handy in case you + are using low quality on long cabling. It is required for RealTek + RTL-8139 revision K boards, and totally unused otherwise. It tries + to match the transceiver to the cable characteristics. This is + experimental since hardly documented by the manufacturer. + If unsure, say Y. + +config 8139TOO_8129 + bool "Support for older RTL-8129/8130 boards" + depends on 8139TOO + ---help--- + This enables support for the older and uncommon RTL-8129 and + RTL-8130 chips, which support MII via an external transceiver, + instead of an internal one. Disabling this option will save some + memory by making the code size smaller. If unsure, say Y. + +config 8139_OLD_RX_RESET + bool "Use older RX-reset method" + depends on 8139TOO + ---help--- + The 8139too driver was recently updated to contain a more rapid + reset sequence, in the face of severe receive errors. This "new" + RX-reset method should be adequate for all boards. But if you + experience problems, you can enable this option to restore the + old RX-reset behavior. If unsure, say N. + +config R8169 + tristate "Realtek 8169 gigabit ethernet support" + depends on PCI + select FW_LOADER + select CRC32 + select MII + ---help--- + Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter. + + To compile this driver as a module, choose M here: the module + will be called r8169. This is recommended. + +endif # NET_VENDOR_REALTEK diff --git a/drivers/net/ethernet/realtek/Makefile b/drivers/net/ethernet/realtek/Makefile new file mode 100644 index 000000000..71b1da30e --- /dev/null +++ b/drivers/net/ethernet/realtek/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the Realtek network device drivers. +# + +obj-$(CONFIG_8139CP) += 8139cp.o +obj-$(CONFIG_8139TOO) += 8139too.o +obj-$(CONFIG_ATP) += atp.o +obj-$(CONFIG_R8169) += r8169.o diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c new file mode 100644 index 000000000..d77d60ea8 --- /dev/null +++ b/drivers/net/ethernet/realtek/atp.c @@ -0,0 +1,883 @@ +/* atp.c: Attached (pocket) ethernet adapter driver for linux. */ +/* + This is a driver for commonly OEM pocket (parallel port) + ethernet adapters based on the Realtek RTL8002 and RTL8012 chips. + + Written 1993-2000 by Donald Becker. + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. + + Copyright 1993 United States Government as represented by the Director, + National Security Agency. Copyright 1994-2000 retained by the original + author, Donald Becker. The timer-based reset code was supplied in 1995 + by Bill Carlson, wwc@super.org. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + Support information and updates available at + http://www.scyld.com/network/atp.html + + + Modular support/softnet added by Alan Cox. + _bit abuse fixed up by Alan Cox + +*/ + +static const char version[] = +"atp.c:v1.09=ac 2002/10/01 Donald Becker \n"; + +/* The user-configurable values. + These may be modified when a driver module is loaded.*/ + +static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ +#define net_debug debug + +/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ +static int max_interrupt_work = 15; + +#define NUM_UNITS 2 +/* The standard set of ISA module parameters. */ +static int io[NUM_UNITS]; +static int irq[NUM_UNITS]; +static int xcvr[NUM_UNITS]; /* The data transfer mode. */ + +/* Operational parameters that are set at compile time. */ + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (400*HZ/1000) + +/* + This file is a device driver for the RealTek (aka AT-Lan-Tec) pocket + ethernet adapter. This is a common low-cost OEM pocket ethernet + adapter, sold under many names. + + Sources: + This driver was written from the packet driver assembly code provided by + Vincent Bono of AT-Lan-Tec. Ever try to figure out how a complicated + device works just from the assembly code? It ain't pretty. The following + description is written based on guesses and writing lots of special-purpose + code to test my theorized operation. + + In 1997 Realtek made available the documentation for the second generation + RTL8012 chip, which has lead to several driver improvements. + http://www.realtek.com.tw/ + + Theory of Operation + + The RTL8002 adapter seems to be built around a custom spin of the SEEQ + controller core. It probably has a 16K or 64K internal packet buffer, of + which the first 4K is devoted to transmit and the rest to receive. + The controller maintains the queue of received packet and the packet buffer + access pointer internally, with only 'reset to beginning' and 'skip to next + packet' commands visible. The transmit packet queue holds two (or more?) + packets: both 'retransmit this packet' (due to collision) and 'transmit next + packet' commands must be started by hand. + + The station address is stored in a standard bit-serial EEPROM which must be + read (ughh) by the device driver. (Provisions have been made for + substituting a 74S288 PROM, but I haven't gotten reports of any models + using it.) Unlike built-in devices, a pocket adapter can temporarily lose + power without indication to the device driver. The major effect is that + the station address, receive filter (promiscuous, etc.) and transceiver + must be reset. + + The controller itself has 16 registers, some of which use only the lower + bits. The registers are read and written 4 bits at a time. The four bit + register address is presented on the data lines along with a few additional + timing and control bits. The data is then read from status port or written + to the data port. + + Correction: the controller has two banks of 16 registers. The second + bank contains only the multicast filter table (now used) and the EEPROM + access registers. + + Since the bulk data transfer of the actual packets through the slow + parallel port dominates the driver's running time, four distinct data + (non-register) transfer modes are provided by the adapter, two in each + direction. In the first mode timing for the nibble transfers is + provided through the data port. In the second mode the same timing is + provided through the control port. In either case the data is read from + the status port and written to the data port, just as it is accessing + registers. + + In addition to the basic data transfer methods, several more are modes are + created by adding some delay by doing multiple reads of the data to allow + it to stabilize. This delay seems to be needed on most machines. + + The data transfer mode is stored in the 'dev->if_port' field. Its default + value is '4'. It may be overridden at boot-time using the third parameter + to the "ether=..." initialization. + + The header file provides inline functions that encapsulate the + register and data access methods. These functions are hand-tuned to + generate reasonable object code. This header file also documents my + interpretations of the device registers. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "atp.h" + +MODULE_AUTHOR("Donald Becker "); +MODULE_DESCRIPTION("RealTek RTL8002/8012 parallel port Ethernet driver"); +MODULE_LICENSE("GPL"); + +module_param(max_interrupt_work, int, 0); +module_param(debug, int, 0); +module_param_array(io, int, NULL, 0); +module_param_array(irq, int, NULL, 0); +module_param_array(xcvr, int, NULL, 0); +MODULE_PARM_DESC(max_interrupt_work, "ATP maximum events handled per interrupt"); +MODULE_PARM_DESC(debug, "ATP debug level (0-7)"); +MODULE_PARM_DESC(io, "ATP I/O base address(es)"); +MODULE_PARM_DESC(irq, "ATP IRQ number(s)"); +MODULE_PARM_DESC(xcvr, "ATP transceiver(s) (0=internal, 1=external)"); + +/* The number of low I/O ports used by the ethercard. */ +#define ETHERCARD_TOTAL_SIZE 3 + +/* Sequence to switch an 8012 from printer mux to ethernet mode. */ +static char mux_8012[] = { 0xff, 0xf7, 0xff, 0xfb, 0xf3, 0xfb, 0xff, 0xf7,}; + +struct net_local { + spinlock_t lock; + struct net_device *next_module; + struct timer_list timer; /* Media selection timer. */ + long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */ + int saved_tx_size; + unsigned int tx_unit_busy:1; + unsigned char re_tx, /* Number of packet retransmissions. */ + addr_mode, /* Current Rx filter e.g. promiscuous, etc. */ + pac_cnt_in_tx_buf; +}; + +/* This code, written by wwc@super.org, resets the adapter every + TIMED_CHECKER ticks. This recovers from an unknown error which + hangs the device. */ +#define TIMED_CHECKER (HZ/4) +#ifdef TIMED_CHECKER +#include +static void atp_timed_checker(unsigned long ignored); +#endif + +/* Index to functions, as function prototypes. */ + +static int atp_probe1(long ioaddr); +static void get_node_ID(struct net_device *dev); +static unsigned short eeprom_op(long ioaddr, unsigned int cmd); +static int net_open(struct net_device *dev); +static void hardware_init(struct net_device *dev); +static void write_packet(long ioaddr, int length, unsigned char *packet, int pad, int mode); +static void trigger_send(long ioaddr, int length); +static netdev_tx_t atp_send_packet(struct sk_buff *skb, + struct net_device *dev); +static irqreturn_t atp_interrupt(int irq, void *dev_id); +static void net_rx(struct net_device *dev); +static void read_block(long ioaddr, int length, unsigned char *buffer, int data_mode); +static int net_close(struct net_device *dev); +static void set_rx_mode(struct net_device *dev); +static void tx_timeout(struct net_device *dev); + + +/* A list of all installed ATP devices, for removing the driver module. */ +static struct net_device *root_atp_dev; + +/* Check for a network adapter of this type, and return '0' iff one exists. + If dev->base_addr == 0, probe all likely locations. + If dev->base_addr == 1, always return failure. + If dev->base_addr == 2, allocate space for the device and return success + (detachable devices only). + + FIXME: we should use the parport layer for this + */ +static int __init atp_init(void) +{ + int *port, ports[] = {0x378, 0x278, 0x3bc, 0}; + int base_addr = io[0]; + + if (base_addr > 0x1ff) /* Check a single specified location. */ + return atp_probe1(base_addr); + else if (base_addr == 1) /* Don't probe at all. */ + return -ENXIO; + + for (port = ports; *port; port++) { + long ioaddr = *port; + outb(0x57, ioaddr + PAR_DATA); + if (inb(ioaddr + PAR_DATA) != 0x57) + continue; + if (atp_probe1(ioaddr) == 0) + return 0; + } + + return -ENODEV; +} + +static const struct net_device_ops atp_netdev_ops = { + .ndo_open = net_open, + .ndo_stop = net_close, + .ndo_start_xmit = atp_send_packet, + .ndo_set_rx_mode = set_rx_mode, + .ndo_tx_timeout = tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int __init atp_probe1(long ioaddr) +{ + struct net_device *dev = NULL; + struct net_local *lp; + int saved_ctrl_reg, status, i; + int res; + + outb(0xff, ioaddr + PAR_DATA); + /* Save the original value of the Control register, in case we guessed + wrong. */ + saved_ctrl_reg = inb(ioaddr + PAR_CONTROL); + if (net_debug > 3) + printk("atp: Control register was %#2.2x.\n", saved_ctrl_reg); + /* IRQEN=0, SLCTB=high INITB=high, AUTOFDB=high, STBB=high. */ + outb(0x04, ioaddr + PAR_CONTROL); +#ifndef final_version + if (net_debug > 3) { + /* Turn off the printer multiplexer on the 8012. */ + for (i = 0; i < 8; i++) + outb(mux_8012[i], ioaddr + PAR_DATA); + write_reg(ioaddr, MODSEL, 0x00); + printk("atp: Registers are "); + for (i = 0; i < 32; i++) + printk(" %2.2x", read_nibble(ioaddr, i)); + printk(".\n"); + } +#endif + /* Turn off the printer multiplexer on the 8012. */ + for (i = 0; i < 8; i++) + outb(mux_8012[i], ioaddr + PAR_DATA); + write_reg_high(ioaddr, CMR1, CMR1h_RESET); + /* udelay() here? */ + status = read_nibble(ioaddr, CMR1); + + if (net_debug > 3) { + printk(KERN_DEBUG "atp: Status nibble was %#2.2x..", status); + for (i = 0; i < 32; i++) + printk(" %2.2x", read_nibble(ioaddr, i)); + printk("\n"); + } + + if ((status & 0x78) != 0x08) { + /* The pocket adapter probe failed, restore the control register. */ + outb(saved_ctrl_reg, ioaddr + PAR_CONTROL); + return -ENODEV; + } + status = read_nibble(ioaddr, CMR2_h); + if ((status & 0x78) != 0x10) { + outb(saved_ctrl_reg, ioaddr + PAR_CONTROL); + return -ENODEV; + } + + dev = alloc_etherdev(sizeof(struct net_local)); + if (!dev) + return -ENOMEM; + + /* Find the IRQ used by triggering an interrupt. */ + write_reg_byte(ioaddr, CMR2, 0x01); /* No accept mode, IRQ out. */ + write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE); /* Enable Tx and Rx. */ + + /* Omit autoIRQ routine for now. Use "table lookup" instead. Uhgggh. */ + if (irq[0]) + dev->irq = irq[0]; + else if (ioaddr == 0x378) + dev->irq = 7; + else + dev->irq = 5; + write_reg_high(ioaddr, CMR1, CMR1h_TxRxOFF); /* Disable Tx and Rx units. */ + write_reg(ioaddr, CMR2, CMR2_NULL); + + dev->base_addr = ioaddr; + + /* Read the station address PROM. */ + get_node_ID(dev); + +#ifndef MODULE + if (net_debug) + printk(KERN_INFO "%s", version); +#endif + + printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, " + "SAPROM %pM.\n", + dev->name, dev->base_addr, dev->irq, dev->dev_addr); + + /* Reset the ethernet hardware and activate the printer pass-through. */ + write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX); + + lp = netdev_priv(dev); + lp->addr_mode = CMR2h_Normal; + spin_lock_init(&lp->lock); + + /* For the ATP adapter the "if_port" is really the data transfer mode. */ + if (xcvr[0]) + dev->if_port = xcvr[0]; + else + dev->if_port = (dev->mem_start & 0xf) ? (dev->mem_start & 0x7) : 4; + if (dev->mem_end & 0xf) + net_debug = dev->mem_end & 7; + + dev->netdev_ops = &atp_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + res = register_netdev(dev); + if (res) { + free_netdev(dev); + return res; + } + + lp->next_module = root_atp_dev; + root_atp_dev = dev; + + return 0; +} + +/* Read the station address PROM, usually a word-wide EEPROM. */ +static void __init get_node_ID(struct net_device *dev) +{ + long ioaddr = dev->base_addr; + int sa_offset = 0; + int i; + + write_reg(ioaddr, CMR2, CMR2_EEPROM); /* Point to the EEPROM control registers. */ + + /* Some adapters have the station address at offset 15 instead of offset + zero. Check for it, and fix it if needed. */ + if (eeprom_op(ioaddr, EE_READ(0)) == 0xffff) + sa_offset = 15; + + for (i = 0; i < 3; i++) + ((__be16 *)dev->dev_addr)[i] = + cpu_to_be16(eeprom_op(ioaddr, EE_READ(sa_offset + i))); + + write_reg(ioaddr, CMR2, CMR2_NULL); +} + +/* + An EEPROM read command starts by shifting out 0x60+address, and then + shifting in the serial data. See the NatSemi databook for details. + * ________________ + * CS : __| + * ___ ___ + * CLK: ______| |___| | + * __ _______ _______ + * DI : __X_______X_______X + * DO : _________X_______X + */ + +static unsigned short __init eeprom_op(long ioaddr, u32 cmd) +{ + unsigned eedata_out = 0; + int num_bits = EE_CMD_SIZE; + + while (--num_bits >= 0) { + char outval = (cmd & (1<irq, atp_interrupt, 0, dev->name, dev); + if (ret) + return ret; + + hardware_init(dev); + + init_timer(&lp->timer); + lp->timer.expires = jiffies + TIMED_CHECKER; + lp->timer.data = (unsigned long)dev; + lp->timer.function = atp_timed_checker; /* timer handler */ + add_timer(&lp->timer); + + netif_start_queue(dev); + return 0; +} + +/* This routine resets the hardware. We initialize everything, assuming that + the hardware may have been temporarily detached. */ +static void hardware_init(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + long ioaddr = dev->base_addr; + int i; + + /* Turn off the printer multiplexer on the 8012. */ + for (i = 0; i < 8; i++) + outb(mux_8012[i], ioaddr + PAR_DATA); + write_reg_high(ioaddr, CMR1, CMR1h_RESET); + + for (i = 0; i < 6; i++) + write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]); + + write_reg_high(ioaddr, CMR2, lp->addr_mode); + + if (net_debug > 2) { + printk(KERN_DEBUG "%s: Reset: current Rx mode %d.\n", dev->name, + (read_nibble(ioaddr, CMR2_h) >> 3) & 0x0f); + } + + write_reg(ioaddr, CMR2, CMR2_IRQOUT); + write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE); + + /* Enable the interrupt line from the serial port. */ + outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL); + + /* Unmask the interesting interrupts. */ + write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK); + write_reg_high(ioaddr, IMR, ISRh_RxErr); + + lp->tx_unit_busy = 0; + lp->pac_cnt_in_tx_buf = 0; + lp->saved_tx_size = 0; +} + +static void trigger_send(long ioaddr, int length) +{ + write_reg_byte(ioaddr, TxCNT0, length & 0xff); + write_reg(ioaddr, TxCNT1, length >> 8); + write_reg(ioaddr, CMR1, CMR1_Xmit); +} + +static void write_packet(long ioaddr, int length, unsigned char *packet, int pad_len, int data_mode) +{ + if (length & 1) + { + length++; + pad_len++; + } + + outb(EOC+MAR, ioaddr + PAR_DATA); + if ((data_mode & 1) == 0) { + /* Write the packet out, starting with the write addr. */ + outb(WrAddr+MAR, ioaddr + PAR_DATA); + do { + write_byte_mode0(ioaddr, *packet++); + } while (--length > pad_len) ; + do { + write_byte_mode0(ioaddr, 0); + } while (--length > 0) ; + } else { + /* Write the packet out in slow mode. */ + unsigned char outbyte = *packet++; + + outb(Ctrl_LNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL); + outb(WrAddr+MAR, ioaddr + PAR_DATA); + + outb((outbyte & 0x0f)|0x40, ioaddr + PAR_DATA); + outb(outbyte & 0x0f, ioaddr + PAR_DATA); + outbyte >>= 4; + outb(outbyte & 0x0f, ioaddr + PAR_DATA); + outb(Ctrl_HNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL); + while (--length > pad_len) + write_byte_mode1(ioaddr, *packet++); + while (--length > 0) + write_byte_mode1(ioaddr, 0); + } + /* Terminate the Tx frame. End of write: ECB. */ + outb(0xff, ioaddr + PAR_DATA); + outb(Ctrl_HNibWrite | Ctrl_SelData | Ctrl_IRQEN, ioaddr + PAR_CONTROL); +} + +static void tx_timeout(struct net_device *dev) +{ + long ioaddr = dev->base_addr; + + printk(KERN_WARNING "%s: Transmit timed out, %s?\n", dev->name, + inb(ioaddr + PAR_CONTROL) & 0x10 ? "network cable problem" + : "IRQ conflict"); + dev->stats.tx_errors++; + /* Try to restart the adapter. */ + hardware_init(dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); + dev->stats.tx_errors++; +} + +static netdev_tx_t atp_send_packet(struct sk_buff *skb, + struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + long ioaddr = dev->base_addr; + int length; + unsigned long flags; + + length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; + + netif_stop_queue(dev); + + /* Disable interrupts by writing 0x00 to the Interrupt Mask Register. + This sequence must not be interrupted by an incoming packet. */ + + spin_lock_irqsave(&lp->lock, flags); + write_reg(ioaddr, IMR, 0); + write_reg_high(ioaddr, IMR, 0); + spin_unlock_irqrestore(&lp->lock, flags); + + write_packet(ioaddr, length, skb->data, length-skb->len, dev->if_port); + + lp->pac_cnt_in_tx_buf++; + if (lp->tx_unit_busy == 0) { + trigger_send(ioaddr, length); + lp->saved_tx_size = 0; /* Redundant */ + lp->re_tx = 0; + lp->tx_unit_busy = 1; + } else + lp->saved_tx_size = length; + /* Re-enable the LPT interrupts. */ + write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK); + write_reg_high(ioaddr, IMR, ISRh_RxErr); + + dev_kfree_skb (skb); + return NETDEV_TX_OK; +} + + +/* The typical workload of the driver: + Handle the network interface interrupts. */ +static irqreturn_t atp_interrupt(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct net_local *lp; + long ioaddr; + static int num_tx_since_rx; + int boguscount = max_interrupt_work; + int handled = 0; + + ioaddr = dev->base_addr; + lp = netdev_priv(dev); + + spin_lock(&lp->lock); + + /* Disable additional spurious interrupts. */ + outb(Ctrl_SelData, ioaddr + PAR_CONTROL); + + /* The adapter's output is currently the IRQ line, switch it to data. */ + write_reg(ioaddr, CMR2, CMR2_NULL); + write_reg(ioaddr, IMR, 0); + + if (net_debug > 5) printk(KERN_DEBUG "%s: In interrupt ", dev->name); + while (--boguscount > 0) { + int status = read_nibble(ioaddr, ISR); + if (net_debug > 5) printk("loop status %02x..", status); + + if (status & (ISR_RxOK<<3)) { + handled = 1; + write_reg(ioaddr, ISR, ISR_RxOK); /* Clear the Rx interrupt. */ + do { + int read_status = read_nibble(ioaddr, CMR1); + if (net_debug > 6) + printk("handling Rx packet %02x..", read_status); + /* We acknowledged the normal Rx interrupt, so if the interrupt + is still outstanding we must have a Rx error. */ + if (read_status & (CMR1_IRQ << 3)) { /* Overrun. */ + dev->stats.rx_over_errors++; + /* Set to no-accept mode long enough to remove a packet. */ + write_reg_high(ioaddr, CMR2, CMR2h_OFF); + net_rx(dev); + /* Clear the interrupt and return to normal Rx mode. */ + write_reg_high(ioaddr, ISR, ISRh_RxErr); + write_reg_high(ioaddr, CMR2, lp->addr_mode); + } else if ((read_status & (CMR1_BufEnb << 3)) == 0) { + net_rx(dev); + num_tx_since_rx = 0; + } else + break; + } while (--boguscount > 0); + } else if (status & ((ISR_TxErr + ISR_TxOK)<<3)) { + handled = 1; + if (net_debug > 6) printk("handling Tx done.."); + /* Clear the Tx interrupt. We should check for too many failures + and reinitialize the adapter. */ + write_reg(ioaddr, ISR, ISR_TxErr + ISR_TxOK); + if (status & (ISR_TxErr<<3)) { + dev->stats.collisions++; + if (++lp->re_tx > 15) { + dev->stats.tx_aborted_errors++; + hardware_init(dev); + break; + } + /* Attempt to retransmit. */ + if (net_debug > 6) printk("attempting to ReTx"); + write_reg(ioaddr, CMR1, CMR1_ReXmit + CMR1_Xmit); + } else { + /* Finish up the transmit. */ + dev->stats.tx_packets++; + lp->pac_cnt_in_tx_buf--; + if ( lp->saved_tx_size) { + trigger_send(ioaddr, lp->saved_tx_size); + lp->saved_tx_size = 0; + lp->re_tx = 0; + } else + lp->tx_unit_busy = 0; + netif_wake_queue(dev); /* Inform upper layers. */ + } + num_tx_since_rx++; + } else if (num_tx_since_rx > 8 && + time_after(jiffies, dev->last_rx + HZ)) { + if (net_debug > 2) + printk(KERN_DEBUG "%s: Missed packet? No Rx after %d Tx and " + "%ld jiffies status %02x CMR1 %02x.\n", dev->name, + num_tx_since_rx, jiffies - dev->last_rx, status, + (read_nibble(ioaddr, CMR1) >> 3) & 15); + dev->stats.rx_missed_errors++; + hardware_init(dev); + num_tx_since_rx = 0; + break; + } else + break; + } + + /* This following code fixes a rare (and very difficult to track down) + problem where the adapter forgets its ethernet address. */ + { + int i; + for (i = 0; i < 6; i++) + write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]); +#if 0 && defined(TIMED_CHECKER) + mod_timer(&lp->timer, jiffies + TIMED_CHECKER); +#endif + } + + /* Tell the adapter that it can go back to using the output line as IRQ. */ + write_reg(ioaddr, CMR2, CMR2_IRQOUT); + /* Enable the physical interrupt line, which is sure to be low until.. */ + outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL); + /* .. we enable the interrupt sources. */ + write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK); + write_reg_high(ioaddr, IMR, ISRh_RxErr); /* Hmmm, really needed? */ + + spin_unlock(&lp->lock); + + if (net_debug > 5) printk("exiting interrupt.\n"); + return IRQ_RETVAL(handled); +} + +#ifdef TIMED_CHECKER +/* This following code fixes a rare (and very difficult to track down) + problem where the adapter forgets its ethernet address. */ +static void atp_timed_checker(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + long ioaddr = dev->base_addr; + struct net_local *lp = netdev_priv(dev); + int tickssofar = jiffies - lp->last_rx_time; + int i; + + spin_lock(&lp->lock); + if (tickssofar > 2*HZ) { +#if 1 + for (i = 0; i < 6; i++) + write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]); + lp->last_rx_time = jiffies; +#else + for (i = 0; i < 6; i++) + if (read_cmd_byte(ioaddr, PAR0 + i) != atp_timed_dev->dev_addr[i]) + { + struct net_local *lp = netdev_priv(atp_timed_dev); + write_reg_byte(ioaddr, PAR0 + i, atp_timed_dev->dev_addr[i]); + if (i == 2) + dev->stats.tx_errors++; + else if (i == 3) + dev->stats.tx_dropped++; + else if (i == 4) + dev->stats.collisions++; + else + dev->stats.rx_errors++; + } +#endif + } + spin_unlock(&lp->lock); + lp->timer.expires = jiffies + TIMED_CHECKER; + add_timer(&lp->timer); +} +#endif + +/* We have a good packet(s), get it/them out of the buffers. */ +static void net_rx(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + long ioaddr = dev->base_addr; + struct rx_header rx_head; + + /* Process the received packet. */ + outb(EOC+MAR, ioaddr + PAR_DATA); + read_block(ioaddr, 8, (unsigned char*)&rx_head, dev->if_port); + if (net_debug > 5) + printk(KERN_DEBUG " rx_count %04x %04x %04x %04x..", rx_head.pad, + rx_head.rx_count, rx_head.rx_status, rx_head.cur_addr); + if ((rx_head.rx_status & 0x77) != 0x01) { + dev->stats.rx_errors++; + if (rx_head.rx_status & 0x0004) dev->stats.rx_frame_errors++; + else if (rx_head.rx_status & 0x0002) dev->stats.rx_crc_errors++; + if (net_debug > 3) + printk(KERN_DEBUG "%s: Unknown ATP Rx error %04x.\n", + dev->name, rx_head.rx_status); + if (rx_head.rx_status & 0x0020) { + dev->stats.rx_fifo_errors++; + write_reg_high(ioaddr, CMR1, CMR1h_TxENABLE); + write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE); + } else if (rx_head.rx_status & 0x0050) + hardware_init(dev); + return; + } else { + /* Malloc up new buffer. The "-4" omits the FCS (CRC). */ + int pkt_len = (rx_head.rx_count & 0x7ff) - 4; + struct sk_buff *skb; + + skb = netdev_alloc_skb(dev, pkt_len + 2); + if (skb == NULL) { + dev->stats.rx_dropped++; + goto done; + } + + skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->last_rx = jiffies; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + done: + write_reg(ioaddr, CMR1, CMR1_NextPkt); + lp->last_rx_time = jiffies; +} + +static void read_block(long ioaddr, int length, unsigned char *p, int data_mode) +{ + if (data_mode <= 3) { /* Mode 0 or 1 */ + outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL); + outb(length == 8 ? RdAddr | HNib | MAR : RdAddr | MAR, + ioaddr + PAR_DATA); + if (data_mode <= 1) { /* Mode 0 or 1 */ + do { *p++ = read_byte_mode0(ioaddr); } while (--length > 0); + } else { /* Mode 2 or 3 */ + do { *p++ = read_byte_mode2(ioaddr); } while (--length > 0); + } + } else if (data_mode <= 5) { + do { *p++ = read_byte_mode4(ioaddr); } while (--length > 0); + } else { + do { *p++ = read_byte_mode6(ioaddr); } while (--length > 0); + } + + outb(EOC+HNib+MAR, ioaddr + PAR_DATA); + outb(Ctrl_SelData, ioaddr + PAR_CONTROL); +} + +/* The inverse routine to net_open(). */ +static int +net_close(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + long ioaddr = dev->base_addr; + + netif_stop_queue(dev); + + del_timer_sync(&lp->timer); + + /* Flush the Tx and disable Rx here. */ + lp->addr_mode = CMR2h_OFF; + write_reg_high(ioaddr, CMR2, CMR2h_OFF); + + /* Free the IRQ line. */ + outb(0x00, ioaddr + PAR_CONTROL); + free_irq(dev->irq, dev); + + /* Reset the ethernet hardware and activate the printer pass-through. */ + write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX); + return 0; +} + +/* + * Set or clear the multicast filter for this adapter. + */ + +static void set_rx_mode(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + long ioaddr = dev->base_addr; + + if (!netdev_mc_empty(dev) || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC))) + lp->addr_mode = CMR2h_PROMISC; + else + lp->addr_mode = CMR2h_Normal; + write_reg_high(ioaddr, CMR2, lp->addr_mode); +} + +static int __init atp_init_module(void) { + if (debug) /* Emit version even if no cards detected. */ + printk(KERN_INFO "%s", version); + return atp_init(); +} + +static void __exit atp_cleanup_module(void) { + struct net_device *next_dev; + + while (root_atp_dev) { + struct net_local *atp_local = netdev_priv(root_atp_dev); + next_dev = atp_local->next_module; + unregister_netdev(root_atp_dev); + /* No need to release_region(), since we never snarf it. */ + free_netdev(root_atp_dev); + root_atp_dev = next_dev; + } +} + +module_init(atp_init_module); +module_exit(atp_cleanup_module); diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h new file mode 100644 index 000000000..32497f0e5 --- /dev/null +++ b/drivers/net/ethernet/realtek/atp.h @@ -0,0 +1,265 @@ +/* Linux header file for the ATP pocket ethernet adapter. */ +/* v1.09 8/9/2000 becker@scyld.com. */ + +#include +#include + +/* The header prepended to received packets. */ +struct rx_header { + ushort pad; /* Pad. */ + ushort rx_count; + ushort rx_status; /* Unknown bit assignments :-<. */ + ushort cur_addr; /* Apparently the current buffer address(?) */ +}; + +#define PAR_DATA 0 +#define PAR_STATUS 1 +#define PAR_CONTROL 2 + +#define Ctrl_LNibRead 0x08 /* LP_PSELECP */ +#define Ctrl_HNibRead 0 +#define Ctrl_LNibWrite 0x08 /* LP_PSELECP */ +#define Ctrl_HNibWrite 0 +#define Ctrl_SelData 0x04 /* LP_PINITP */ +#define Ctrl_IRQEN 0x10 /* LP_PINTEN */ + +#define EOW 0xE0 +#define EOC 0xE0 +#define WrAddr 0x40 /* Set address of EPLC read, write register. */ +#define RdAddr 0xC0 +#define HNib 0x10 + +enum page0_regs { + /* The first six registers hold + * the ethernet physical station address. + */ + PAR0 = 0, PAR1 = 1, PAR2 = 2, PAR3 = 3, PAR4 = 4, PAR5 = 5, + TxCNT0 = 6, TxCNT1 = 7, /* The transmit byte count. */ + TxSTAT = 8, RxSTAT = 9, /* Tx and Rx status. */ + ISR = 10, IMR = 11, /* Interrupt status and mask. */ + CMR1 = 12, /* Command register 1. */ + CMR2 = 13, /* Command register 2. */ + MODSEL = 14, /* Mode select register. */ + MAR = 14, /* Memory address register (?). */ + CMR2_h = 0x1d, +}; + +enum eepage_regs { + PROM_CMD = 6, + PROM_DATA = 7 /* Note that PROM_CMD is in the "high" bits. */ +}; + +#define ISR_TxOK 0x01 +#define ISR_RxOK 0x04 +#define ISR_TxErr 0x02 +#define ISRh_RxErr 0x11 /* ISR, high nibble */ + +#define CMR1h_MUX 0x08 /* Select printer multiplexor on 8012. */ +#define CMR1h_RESET 0x04 /* Reset. */ +#define CMR1h_RxENABLE 0x02 /* Rx unit enable. */ +#define CMR1h_TxENABLE 0x01 /* Tx unit enable. */ +#define CMR1h_TxRxOFF 0x00 +#define CMR1_ReXmit 0x08 /* Trigger a retransmit. */ +#define CMR1_Xmit 0x04 /* Trigger a transmit. */ +#define CMR1_IRQ 0x02 /* Interrupt active. */ +#define CMR1_BufEnb 0x01 /* Enable the buffer(?). */ +#define CMR1_NextPkt 0x01 /* Enable the buffer(?). */ + +#define CMR2_NULL 8 +#define CMR2_IRQOUT 9 +#define CMR2_RAMTEST 10 +#define CMR2_EEPROM 12 /* Set to page 1, for reading the EEPROM. */ + +#define CMR2h_OFF 0 /* No accept mode. */ +#define CMR2h_Physical 1 /* Accept a physical address match only. */ +#define CMR2h_Normal 2 /* Accept physical and broadcast address. */ +#define CMR2h_PROMISC 3 /* Promiscuous mode. */ + +/* An inline function used below: it differs from inb() by explicitly + * return an unsigned char, saving a truncation. + */ +static inline unsigned char inbyte(unsigned short port) +{ + unsigned char _v; + + __asm__ __volatile__ ("inb %w1,%b0" : "=a" (_v) : "d" (port)); + return _v; +} + +/* Read register OFFSET. + * This command should always be terminated with read_end(). + */ +static inline unsigned char read_nibble(short port, unsigned char offset) +{ + unsigned char retval; + + outb(EOC+offset, port + PAR_DATA); + outb(RdAddr+offset, port + PAR_DATA); + inbyte(port + PAR_STATUS); /* Settling time delay */ + retval = inbyte(port + PAR_STATUS); + outb(EOC+offset, port + PAR_DATA); + + return retval; +} + +/* Functions for bulk data read. The interrupt line is always disabled. */ +/* Get a byte using read mode 0, reading data from the control lines. */ +static inline unsigned char read_byte_mode0(short ioaddr) +{ + unsigned char low_nib; + + outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL); + inbyte(ioaddr + PAR_STATUS); + low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; + outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL); + inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ + inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ + return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); +} + +/* The same as read_byte_mode0(), but does multiple inb()s for stability. */ +static inline unsigned char read_byte_mode2(short ioaddr) +{ + unsigned char low_nib; + + outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL); + inbyte(ioaddr + PAR_STATUS); + low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; + outb(Ctrl_HNibRead, ioaddr + PAR_CONTROL); + inbyte(ioaddr + PAR_STATUS); /* Settling time delay -- needed! */ + return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); +} + +/* Read a byte through the data register. */ +static inline unsigned char read_byte_mode4(short ioaddr) +{ + unsigned char low_nib; + + outb(RdAddr | MAR, ioaddr + PAR_DATA); + low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; + outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA); + return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); +} + +/* Read a byte through the data register, double reading to allow settling. */ +static inline unsigned char read_byte_mode6(short ioaddr) +{ + unsigned char low_nib; + + outb(RdAddr | MAR, ioaddr + PAR_DATA); + inbyte(ioaddr + PAR_STATUS); + low_nib = (inbyte(ioaddr + PAR_STATUS) >> 3) & 0x0f; + outb(RdAddr | HNib | MAR, ioaddr + PAR_DATA); + inbyte(ioaddr + PAR_STATUS); + return low_nib | ((inbyte(ioaddr + PAR_STATUS) << 1) & 0xf0); +} + +static inline void +write_reg(short port, unsigned char reg, unsigned char value) +{ + unsigned char outval; + + outb(EOC | reg, port + PAR_DATA); + outval = WrAddr | reg; + outb(outval, port + PAR_DATA); + outb(outval, port + PAR_DATA); /* Double write for PS/2. */ + + outval &= 0xf0; + outval |= value; + outb(outval, port + PAR_DATA); + outval &= 0x1f; + outb(outval, port + PAR_DATA); + outb(outval, port + PAR_DATA); + + outb(EOC | outval, port + PAR_DATA); +} + +static inline void +write_reg_high(short port, unsigned char reg, unsigned char value) +{ + unsigned char outval = EOC | HNib | reg; + + outb(outval, port + PAR_DATA); + outval &= WrAddr | HNib | 0x0f; + outb(outval, port + PAR_DATA); + outb(outval, port + PAR_DATA); /* Double write for PS/2. */ + + outval = WrAddr | HNib | value; + outb(outval, port + PAR_DATA); + outval &= HNib | 0x0f; /* HNib | value */ + outb(outval, port + PAR_DATA); + outb(outval, port + PAR_DATA); + + outb(EOC | HNib | outval, port + PAR_DATA); +} + +/* Write a byte out using nibble mode. The low nibble is written first. */ +static inline void +write_reg_byte(short port, unsigned char reg, unsigned char value) +{ + unsigned char outval; + + outb(EOC | reg, port + PAR_DATA); /* Reset the address register. */ + outval = WrAddr | reg; + outb(outval, port + PAR_DATA); + outb(outval, port + PAR_DATA); /* Double write for PS/2. */ + + outb((outval & 0xf0) | (value & 0x0f), port + PAR_DATA); + outb(value & 0x0f, port + PAR_DATA); + value >>= 4; + outb(value, port + PAR_DATA); + outb(0x10 | value, port + PAR_DATA); + outb(0x10 | value, port + PAR_DATA); + + outb(EOC | value, port + PAR_DATA); /* Reset the address register. */ +} + +/* Bulk data writes to the packet buffer. The interrupt line remains enabled. + * The first, faster method uses only the dataport (data modes 0, 2 & 4). + * The second (backup) method uses data and control regs (modes 1, 3 & 5). + * It should only be needed when there is skew between the individual data + * lines. + */ +static inline void write_byte_mode0(short ioaddr, unsigned char value) +{ + outb(value & 0x0f, ioaddr + PAR_DATA); + outb((value>>4) | 0x10, ioaddr + PAR_DATA); +} + +static inline void write_byte_mode1(short ioaddr, unsigned char value) +{ + outb(value & 0x0f, ioaddr + PAR_DATA); + outb(Ctrl_IRQEN | Ctrl_LNibWrite, ioaddr + PAR_CONTROL); + outb((value>>4) | 0x10, ioaddr + PAR_DATA); + outb(Ctrl_IRQEN | Ctrl_HNibWrite, ioaddr + PAR_CONTROL); +} + +/* Write 16bit VALUE to the packet buffer: the same as above just doubled. */ +static inline void write_word_mode0(short ioaddr, unsigned short value) +{ + outb(value & 0x0f, ioaddr + PAR_DATA); + value >>= 4; + outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA); + value >>= 4; + outb(value & 0x0f, ioaddr + PAR_DATA); + value >>= 4; + outb((value & 0x0f) | 0x10, ioaddr + PAR_DATA); +} + +/* EEPROM_Ctrl bits. */ +#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */ +#define EE_CS 0x02 /* EEPROM chip select. */ +#define EE_CLK_HIGH 0x12 +#define EE_CLK_LOW 0x16 +#define EE_DATA_WRITE 0x01 /* EEPROM chip data in. */ +#define EE_DATA_READ 0x08 /* EEPROM chip data out. */ + +/* Delay between EEPROM clock transitions. */ +#define eeprom_delay(ticks) \ +do { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; } } while (0) + +/* The EEPROM commands include the alway-set leading bit. */ +#define EE_WRITE_CMD(offset) (((5 << 6) + (offset)) << 17) +#define EE_READ(offset) (((6 << 6) + (offset)) << 17) +#define EE_ERASE(offset) (((7 << 6) + (offset)) << 17) +#define EE_CMD_SIZE 27 /* The command+address+data size. */ diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c new file mode 100644 index 000000000..8267ffead --- /dev/null +++ b/drivers/net/ethernet/realtek/r8169.c @@ -0,0 +1,8355 @@ +/* + * r8169.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define RTL8169_VERSION "2.3LK-NAPI" +#define MODULENAME "r8169" +#define PFX MODULENAME ": " + +#define FIRMWARE_8168D_1 "/*(DEBLOBBED)*/" +#define FIRMWARE_8168D_2 "/*(DEBLOBBED)*/" +#define FIRMWARE_8168E_1 "/*(DEBLOBBED)*/" +#define FIRMWARE_8168E_2 "/*(DEBLOBBED)*/" +#define FIRMWARE_8168E_3 "/*(DEBLOBBED)*/" +#define FIRMWARE_8168F_1 "/*(DEBLOBBED)*/" +#define FIRMWARE_8168F_2 "/*(DEBLOBBED)*/" +#define FIRMWARE_8105E_1 "/*(DEBLOBBED)*/" +#define FIRMWARE_8402_1 "/*(DEBLOBBED)*/" +#define FIRMWARE_8411_1 "/*(DEBLOBBED)*/" +#define FIRMWARE_8411_2 "/*(DEBLOBBED)*/" +#define FIRMWARE_8106E_1 "/*(DEBLOBBED)*/" +#define FIRMWARE_8106E_2 "/*(DEBLOBBED)*/" +#define FIRMWARE_8168G_2 "/*(DEBLOBBED)*/" +#define FIRMWARE_8168G_3 "/*(DEBLOBBED)*/" +#define FIRMWARE_8168H_1 "/*(DEBLOBBED)*/" +#define FIRMWARE_8168H_2 "/*(DEBLOBBED)*/" +#define FIRMWARE_8107E_1 "/*(DEBLOBBED)*/" +#define FIRMWARE_8107E_2 "/*(DEBLOBBED)*/" + +#ifdef RTL8169_DEBUG +#define assert(expr) \ + if (!(expr)) { \ + printk( "Assertion failed! %s,%s,%s,line=%d\n", \ + #expr,__FILE__,__func__,__LINE__); \ + } +#define dprintk(fmt, args...) \ + do { printk(KERN_DEBUG PFX fmt, ## args); } while (0) +#else +#define assert(expr) do {} while (0) +#define dprintk(fmt, args...) do {} while (0) +#endif /* RTL8169_DEBUG */ + +#define R8169_MSG_DEFAULT \ + (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) + +#define TX_SLOTS_AVAIL(tp) \ + (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx) + +/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ +#define TX_FRAGS_READY_FOR(tp,nr_frags) \ + (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1)) + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ +static const int multicast_filter_limit = 32; + +#define MAX_READ_REQUEST_SHIFT 12 +#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ +#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ + +#define R8169_REGS_SIZE 256 +#define R8169_NAPI_WEIGHT 64 +#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */ +#define NUM_RX_DESC 256U /* Number of Rx descriptor registers */ +#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) +#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) + +#define RTL8169_TX_TIMEOUT (6*HZ) +#define RTL8169_PHY_TIMEOUT (10*HZ) + +/* write/read MMIO register */ +#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) +#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg)) +#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg)) +#define RTL_R8(reg) readb (ioaddr + (reg)) +#define RTL_R16(reg) readw (ioaddr + (reg)) +#define RTL_R32(reg) readl (ioaddr + (reg)) + +enum mac_version { + RTL_GIGA_MAC_VER_01 = 0, + RTL_GIGA_MAC_VER_02, + RTL_GIGA_MAC_VER_03, + RTL_GIGA_MAC_VER_04, + RTL_GIGA_MAC_VER_05, + RTL_GIGA_MAC_VER_06, + RTL_GIGA_MAC_VER_07, + RTL_GIGA_MAC_VER_08, + RTL_GIGA_MAC_VER_09, + RTL_GIGA_MAC_VER_10, + RTL_GIGA_MAC_VER_11, + RTL_GIGA_MAC_VER_12, + RTL_GIGA_MAC_VER_13, + RTL_GIGA_MAC_VER_14, + RTL_GIGA_MAC_VER_15, + RTL_GIGA_MAC_VER_16, + RTL_GIGA_MAC_VER_17, + RTL_GIGA_MAC_VER_18, + RTL_GIGA_MAC_VER_19, + RTL_GIGA_MAC_VER_20, + RTL_GIGA_MAC_VER_21, + RTL_GIGA_MAC_VER_22, + RTL_GIGA_MAC_VER_23, + RTL_GIGA_MAC_VER_24, + RTL_GIGA_MAC_VER_25, + RTL_GIGA_MAC_VER_26, + RTL_GIGA_MAC_VER_27, + RTL_GIGA_MAC_VER_28, + RTL_GIGA_MAC_VER_29, + RTL_GIGA_MAC_VER_30, + RTL_GIGA_MAC_VER_31, + RTL_GIGA_MAC_VER_32, + RTL_GIGA_MAC_VER_33, + RTL_GIGA_MAC_VER_34, + RTL_GIGA_MAC_VER_35, + RTL_GIGA_MAC_VER_36, + RTL_GIGA_MAC_VER_37, + RTL_GIGA_MAC_VER_38, + RTL_GIGA_MAC_VER_39, + RTL_GIGA_MAC_VER_40, + RTL_GIGA_MAC_VER_41, + RTL_GIGA_MAC_VER_42, + RTL_GIGA_MAC_VER_43, + RTL_GIGA_MAC_VER_44, + RTL_GIGA_MAC_VER_45, + RTL_GIGA_MAC_VER_46, + RTL_GIGA_MAC_VER_47, + RTL_GIGA_MAC_VER_48, + RTL_GIGA_MAC_VER_49, + RTL_GIGA_MAC_VER_50, + RTL_GIGA_MAC_VER_51, + RTL_GIGA_MAC_NONE = 0xff, +}; + +enum rtl_tx_desc_version { + RTL_TD_0 = 0, + RTL_TD_1 = 1, +}; + +#define JUMBO_1K ETH_DATA_LEN +#define JUMBO_4K (4*1024 - ETH_HLEN - 2) +#define JUMBO_6K (6*1024 - ETH_HLEN - 2) +#define JUMBO_7K (7*1024 - ETH_HLEN - 2) +#define JUMBO_9K (9*1024 - ETH_HLEN - 2) + +#define _R(NAME,TD,FW,SZ,B) { \ + .name = NAME, \ + .txd_version = TD, \ + .fw_name = FW, \ + .jumbo_max = SZ, \ + .jumbo_tx_csum = B \ +} + +static const struct { + const char *name; + enum rtl_tx_desc_version txd_version; + const char *fw_name; + u16 jumbo_max; + bool jumbo_tx_csum; +} rtl_chip_infos[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_01] = + _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_02] = + _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_03] = + _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_04] = + _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_05] = + _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_06] = + _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true), + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_08] = + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_09] = + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_10] = + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_11] = + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), + [RTL_GIGA_MAC_VER_12] = + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), + [RTL_GIGA_MAC_VER_13] = + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_14] = + _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_15] = + _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_16] = + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_17] = + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), + [RTL_GIGA_MAC_VER_18] = + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_19] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_20] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_21] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_22] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_23] = + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_24] = + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_25] = + _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_26] = + _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_27] = + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_28] = + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_29] = + _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_30] = + _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_31] = + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_32] = + _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_33] = + _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_34] = + _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_35] = + _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_36] = + _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_37] = + _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_38] = + _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_39] = + _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_40] = + _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_41] = + _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_42] = + _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_3, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_43] = + _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_44] = + _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_45] = + _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_46] = + _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_47] = + _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_1, + JUMBO_1K, false), + [RTL_GIGA_MAC_VER_48] = + _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_2, + JUMBO_1K, false), + [RTL_GIGA_MAC_VER_49] = + _R("RTL8168ep/8111ep", RTL_TD_1, NULL, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_50] = + _R("RTL8168ep/8111ep", RTL_TD_1, NULL, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_51] = + _R("RTL8168ep/8111ep", RTL_TD_1, NULL, + JUMBO_9K, false), +}; +#undef _R + +enum cfg_version { + RTL_CFG_0 = 0x00, + RTL_CFG_1, + RTL_CFG_2 +}; + +static const struct pci_device_id rtl8169_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, + { PCI_VENDOR_ID_DLINK, 0x4300, + PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 }, + { PCI_VENDOR_ID_LINKSYS, 0x1032, + PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 }, + { 0x0001, 0x8168, + PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 }, + {0,}, +}; + +MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); + +static int rx_buf_sz = 16383; +static int use_dac; +static struct { + u32 msg_enable; +} debug = { -1 }; + +enum rtl_registers { + MAC0 = 0, /* Ethernet hardware address. */ + MAC4 = 4, + MAR0 = 8, /* Multicast filter. */ + CounterAddrLow = 0x10, + CounterAddrHigh = 0x14, + TxDescStartAddrLow = 0x20, + TxDescStartAddrHigh = 0x24, + TxHDescStartAddrLow = 0x28, + TxHDescStartAddrHigh = 0x2c, + FLASH = 0x30, + ERSR = 0x36, + ChipCmd = 0x37, + TxPoll = 0x38, + IntrMask = 0x3c, + IntrStatus = 0x3e, + + TxConfig = 0x40, +#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */ +#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */ + + RxConfig = 0x44, +#define RX128_INT_EN (1 << 15) /* 8111c and later */ +#define RX_MULTI_EN (1 << 14) /* 8111c only */ +#define RXCFG_FIFO_SHIFT 13 + /* No threshold before first PCI xfer */ +#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) +#define RX_EARLY_OFF (1 << 11) +#define RXCFG_DMA_SHIFT 8 + /* Unlimited maximum PCI burst. */ +#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) + + RxMissed = 0x4c, + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + Config2 = 0x53, +#define PME_SIGNAL (1 << 5) /* 8168c and later */ + + Config3 = 0x54, + Config4 = 0x55, + Config5 = 0x56, + MultiIntr = 0x5c, + PHYAR = 0x60, + PHYstatus = 0x6c, + RxMaxSize = 0xda, + CPlusCmd = 0xe0, + IntrMitigate = 0xe2, + RxDescAddrLow = 0xe4, + RxDescAddrHigh = 0xe8, + EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */ + +#define NoEarlyTx 0x3f /* Max value : no early transmit. */ + + MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */ + +#define TxPacketMax (8064 >> 7) +#define EarlySize 0x27 + + FuncEvent = 0xf0, + FuncEventMask = 0xf4, + FuncPresetState = 0xf8, + IBCR0 = 0xf8, + IBCR2 = 0xf9, + IBIMR0 = 0xfa, + IBISR0 = 0xfb, + FuncForceEvent = 0xfc, +}; + +enum rtl8110_registers { + TBICSR = 0x64, + TBI_ANAR = 0x68, + TBI_LPAR = 0x6a, +}; + +enum rtl8168_8101_registers { + CSIDR = 0x64, + CSIAR = 0x68, +#define CSIAR_FLAG 0x80000000 +#define CSIAR_WRITE_CMD 0x80000000 +#define CSIAR_BYTE_ENABLE 0x0f +#define CSIAR_BYTE_ENABLE_SHIFT 12 +#define CSIAR_ADDR_MASK 0x0fff +#define CSIAR_FUNC_CARD 0x00000000 +#define CSIAR_FUNC_SDIO 0x00010000 +#define CSIAR_FUNC_NIC 0x00020000 +#define CSIAR_FUNC_NIC2 0x00010000 + PMCH = 0x6f, + EPHYAR = 0x80, +#define EPHYAR_FLAG 0x80000000 +#define EPHYAR_WRITE_CMD 0x80000000 +#define EPHYAR_REG_MASK 0x1f +#define EPHYAR_REG_SHIFT 16 +#define EPHYAR_DATA_MASK 0xffff + DLLPR = 0xd0, +#define PFM_EN (1 << 6) +#define TX_10M_PS_EN (1 << 7) + DBG_REG = 0xd1, +#define FIX_NAK_1 (1 << 4) +#define FIX_NAK_2 (1 << 3) + TWSI = 0xd2, + MCU = 0xd3, +#define NOW_IS_OOB (1 << 7) +#define TX_EMPTY (1 << 5) +#define RX_EMPTY (1 << 4) +#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY) +#define EN_NDP (1 << 3) +#define EN_OOB_RESET (1 << 2) +#define LINK_LIST_RDY (1 << 1) + EFUSEAR = 0xdc, +#define EFUSEAR_FLAG 0x80000000 +#define EFUSEAR_WRITE_CMD 0x80000000 +#define EFUSEAR_READ_CMD 0x00000000 +#define EFUSEAR_REG_MASK 0x03ff +#define EFUSEAR_REG_SHIFT 8 +#define EFUSEAR_DATA_MASK 0xff + MISC_1 = 0xf2, +#define PFM_D3COLD_EN (1 << 6) +}; + +enum rtl8168_registers { + LED_FREQ = 0x1a, + EEE_LED = 0x1b, + ERIDR = 0x70, + ERIAR = 0x74, +#define ERIAR_FLAG 0x80000000 +#define ERIAR_WRITE_CMD 0x80000000 +#define ERIAR_READ_CMD 0x00000000 +#define ERIAR_ADDR_BYTE_ALIGN 4 +#define ERIAR_TYPE_SHIFT 16 +#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT) +#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT) +#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_OOB (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_MASK_SHIFT 12 +#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0100 (0x4 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) + EPHY_RXER_NUM = 0x7c, + OCPDR = 0xb0, /* OCP GPHY access */ +#define OCPDR_WRITE_CMD 0x80000000 +#define OCPDR_READ_CMD 0x00000000 +#define OCPDR_REG_MASK 0x7f +#define OCPDR_GPHY_REG_SHIFT 16 +#define OCPDR_DATA_MASK 0xffff + OCPAR = 0xb4, +#define OCPAR_FLAG 0x80000000 +#define OCPAR_GPHY_WRITE_CMD 0x8000f060 +#define OCPAR_GPHY_READ_CMD 0x0000f060 + GPHY_OCP = 0xb8, + RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */ + MISC = 0xf0, /* 8168e only. */ +#define TXPLA_RST (1 << 29) +#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */ +#define PWM_EN (1 << 22) +#define RXDV_GATED_EN (1 << 19) +#define EARLY_TALLY_EN (1 << 16) +}; + +enum rtl_register_content { + /* InterruptStatusBits */ + SYSErr = 0x8000, + PCSTimeout = 0x4000, + SWInt = 0x0100, + TxDescUnavail = 0x0080, + RxFIFOOver = 0x0040, + LinkChg = 0x0020, + RxOverflow = 0x0010, + TxErr = 0x0008, + TxOK = 0x0004, + RxErr = 0x0002, + RxOK = 0x0001, + + /* RxStatusDesc */ + RxBOVF = (1 << 24), + RxFOVF = (1 << 23), + RxRWT = (1 << 22), + RxRES = (1 << 21), + RxRUNT = (1 << 20), + RxCRC = (1 << 19), + + /* ChipCmdBits */ + StopReq = 0x80, + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, + + /* TXPoll register p.5 */ + HPQ = 0x80, /* Poll cmd on the high prio queue */ + NPQ = 0x40, /* Poll cmd on the low prio queue */ + FSWInt = 0x01, /* Forced software interrupt */ + + /* Cfg9346Bits */ + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xc0, + + /* rx_mode_bits */ + AcceptErr = 0x20, + AcceptRunt = 0x10, + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, +#define RX_CONFIG_ACCEPT_MASK 0x3f + + /* TxConfigBits */ + TxInterFrameGapShift = 24, + TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + + /* Config1 register p.24 */ + LEDS1 = (1 << 7), + LEDS0 = (1 << 6), + Speed_down = (1 << 4), + MEMMAP = (1 << 3), + IOMAP = (1 << 2), + VPD = (1 << 1), + PMEnable = (1 << 0), /* Power Management Enable */ + + /* Config2 register p. 25 */ + ClkReqEn = (1 << 7), /* Clock Request Enable */ + MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ + PCI_Clock_66MHz = 0x01, + PCI_Clock_33MHz = 0x00, + + /* Config3 register p.25 */ + MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ + LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ + Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ + Rdy_to_L23 = (1 << 1), /* L23 Enable */ + Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ + + /* Config4 register */ + Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */ + + /* Config5 register p.27 */ + BWF = (1 << 6), /* Accept Broadcast wakeup frame */ + MWF = (1 << 5), /* Accept Multicast wakeup frame */ + UWF = (1 << 4), /* Accept Unicast wakeup frame */ + Spi_en = (1 << 3), + LanWake = (1 << 1), /* LanWake enable/disable */ + PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ + ASPM_en = (1 << 0), /* ASPM enable */ + + /* TBICSR p.28 */ + TBIReset = 0x80000000, + TBILoopback = 0x40000000, + TBINwEnable = 0x20000000, + TBINwRestart = 0x10000000, + TBILinkOk = 0x02000000, + TBINwComplete = 0x01000000, + + /* CPlusCmd p.31 */ + EnableBist = (1 << 15), // 8168 8101 + Mac_dbgo_oe = (1 << 14), // 8168 8101 + Normal_mode = (1 << 13), // unused + Force_half_dup = (1 << 12), // 8168 8101 + Force_rxflow_en = (1 << 11), // 8168 8101 + Force_txflow_en = (1 << 10), // 8168 8101 + Cxpl_dbg_sel = (1 << 9), // 8168 8101 + ASF = (1 << 8), // 8168 8101 + PktCntrDisable = (1 << 7), // 8168 8101 + Mac_dbgo_sel = 0x001c, // 8168 + RxVlan = (1 << 6), + RxChkSum = (1 << 5), + PCIDAC = (1 << 4), + PCIMulRW = (1 << 3), + INTT_0 = 0x0000, // 8168 + INTT_1 = 0x0001, // 8168 + INTT_2 = 0x0002, // 8168 + INTT_3 = 0x0003, // 8168 + + /* rtl8169_PHYstatus */ + TBI_Enable = 0x80, + TxFlowCtrl = 0x40, + RxFlowCtrl = 0x20, + _1000bpsF = 0x10, + _100bps = 0x08, + _10bps = 0x04, + LinkStatus = 0x02, + FullDup = 0x01, + + /* _TBICSRBit */ + TBILinkOK = 0x02000000, + + /* DumpCounterCommand */ + CounterDump = 0x8, + + /* magic enable v2 */ + MagicPacket_v2 = (1 << 16), /* Wake up when receives a Magic Packet */ +}; + +enum rtl_desc_bit { + /* First doubleword. */ + DescOwn = (1 << 31), /* Descriptor is owned by NIC */ + RingEnd = (1 << 30), /* End of descriptor ring */ + FirstFrag = (1 << 29), /* First segment of a packet */ + LastFrag = (1 << 28), /* Final segment of a packet */ +}; + +/* Generic case. */ +enum rtl_tx_desc_bit { + /* First doubleword. */ + TD_LSO = (1 << 27), /* Large Send Offload */ +#define TD_MSS_MAX 0x07ffu /* MSS value */ + + /* Second doubleword. */ + TxVlanTag = (1 << 17), /* Add VLAN tag */ +}; + +/* 8169, 8168b and 810x except 8102e. */ +enum rtl_tx_desc_bit_0 { + /* First doubleword. */ +#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */ + TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */ + TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */ + TD0_IP_CS = (1 << 18), /* Calculate IP checksum */ +}; + +/* 8102e, 8168c and beyond. */ +enum rtl_tx_desc_bit_1 { + /* First doubleword. */ + TD1_GTSENV4 = (1 << 26), /* Giant Send for IPv4 */ + TD1_GTSENV6 = (1 << 25), /* Giant Send for IPv6 */ +#define GTTCPHO_SHIFT 18 +#define GTTCPHO_MAX 0x7fU + + /* Second doubleword. */ +#define TCPHO_SHIFT 18 +#define TCPHO_MAX 0x3ffU +#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */ + TD1_IPv6_CS = (1 << 28), /* Calculate IPv6 checksum */ + TD1_IPv4_CS = (1 << 29), /* Calculate IPv4 checksum */ + TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */ + TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */ +}; + +enum rtl_rx_desc_bit { + /* Rx private */ + PID1 = (1 << 18), /* Protocol ID bit 1/2 */ + PID0 = (1 << 17), /* Protocol ID bit 2/2 */ + +#define RxProtoUDP (PID1) +#define RxProtoTCP (PID0) +#define RxProtoIP (PID1 | PID0) +#define RxProtoMask RxProtoIP + + IPFail = (1 << 16), /* IP checksum failed */ + UDPFail = (1 << 15), /* UDP/IP checksum failed */ + TCPFail = (1 << 14), /* TCP/IP checksum failed */ + RxVlanTag = (1 << 16), /* VLAN tag available */ +}; + +#define RsvdMask 0x3fffc000 + +struct TxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct RxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct ring_info { + struct sk_buff *skb; + u32 len; + u8 __pad[sizeof(void *) - sizeof(u32)]; +}; + +enum features { + RTL_FEATURE_WOL = (1 << 0), + RTL_FEATURE_MSI = (1 << 1), + RTL_FEATURE_GMII = (1 << 2), +}; + +struct rtl8169_counters { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underun; +}; + +enum rtl_flag { + RTL_FLAG_TASK_ENABLED, + RTL_FLAG_TASK_SLOW_PENDING, + RTL_FLAG_TASK_RESET_PENDING, + RTL_FLAG_TASK_PHY_PENDING, + RTL_FLAG_MAX +}; + +struct rtl8169_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + +struct rtl8169_private { + void __iomem *mmio_addr; /* memory map physical address */ + struct pci_dev *pci_dev; + struct net_device *dev; + struct napi_struct napi; + u32 msg_enable; + u16 txd_version; + u16 mac_version; + u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ + u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ + u32 dirty_tx; + struct rtl8169_stats rx_stats; + struct rtl8169_stats tx_stats; + struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ + struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ + struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ + struct timer_list timer; + u16 cp_cmd; + + u16 event_slow; + + struct mdio_ops { + void (*write)(struct rtl8169_private *, int, int); + int (*read)(struct rtl8169_private *, int); + } mdio_ops; + + struct pll_power_ops { + void (*down)(struct rtl8169_private *); + void (*up)(struct rtl8169_private *); + } pll_power_ops; + + struct jumbo_ops { + void (*enable)(struct rtl8169_private *); + void (*disable)(struct rtl8169_private *); + } jumbo_ops; + + struct csi_ops { + void (*write)(struct rtl8169_private *, int, int); + u32 (*read)(struct rtl8169_private *, int); + } csi_ops; + + int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); + int (*get_settings)(struct net_device *, struct ethtool_cmd *); + void (*phy_reset_enable)(struct rtl8169_private *tp); + void (*hw_start)(struct net_device *); + unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); + unsigned int (*link_ok)(void __iomem *); + int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); + bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *); + + struct { + DECLARE_BITMAP(flags, RTL_FLAG_MAX); + struct mutex mutex; + struct work_struct work; + } wk; + + unsigned features; + + struct mii_if_info mii; + struct rtl8169_counters counters; + u32 saved_wolopts; + u32 opts1_mask; + + struct rtl_fw { + const struct firmware *fw; + +#define RTL_VER_SIZE 32 + + char version[RTL_VER_SIZE]; + + struct rtl_fw_phy_action { + __le32 *code; + size_t size; + } phy_action; + } *rtl_fw; +#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN) + + u32 ocp_base; +}; + +MODULE_AUTHOR("Realtek and the Linux r8169 crew "); +MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); +module_param(use_dac, int, 0); +MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); +module_param_named(debug, debug.msg_enable, int, 0); +MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(RTL8169_VERSION); +/*(DEBLOBBED)*/ + +static void rtl_lock_work(struct rtl8169_private *tp) +{ + mutex_lock(&tp->wk.mutex); +} + +static void rtl_unlock_work(struct rtl8169_private *tp) +{ + mutex_unlock(&tp->wk.mutex); +} + +static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force) +{ + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, force); +} + +struct rtl_cond { + bool (*check)(struct rtl8169_private *); + const char *msg; +}; + +static void rtl_udelay(unsigned int d) +{ + udelay(d); +} + +static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c, + void (*delay)(unsigned int), unsigned int d, int n, + bool high) +{ + int i; + + for (i = 0; i < n; i++) { + delay(d); + if (c->check(tp) == high) + return true; + } + netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n", + c->msg, !high, n, d); + return false; +} + +static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned int d, int n) +{ + return rtl_loop_wait(tp, c, rtl_udelay, d, n, true); +} + +static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned int d, int n) +{ + return rtl_loop_wait(tp, c, rtl_udelay, d, n, false); +} + +static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned int d, int n) +{ + return rtl_loop_wait(tp, c, msleep, d, n, true); +} + +static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned int d, int n) +{ + return rtl_loop_wait(tp, c, msleep, d, n, false); +} + +#define DECLARE_RTL_COND(name) \ +static bool name ## _check(struct rtl8169_private *); \ + \ +static const struct rtl_cond name = { \ + .check = name ## _check, \ + .msg = #name \ +}; \ + \ +static bool name ## _check(struct rtl8169_private *tp) + +static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg) +{ + if (reg & 0xffff0001) { + netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg); + return true; + } + return false; +} + +DECLARE_RTL_COND(rtl_ocp_gphy_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(GPHY_OCP) & OCPAR_FLAG; +} + +static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if (rtl_ocp_reg_failure(tp, reg)) + return; + + RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data); + + rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10); +} + +static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if (rtl_ocp_reg_failure(tp, reg)) + return 0; + + RTL_W32(GPHY_OCP, reg << 15); + + return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ? + (RTL_R32(GPHY_OCP) & 0xffff) : ~0; +} + +static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if (rtl_ocp_reg_failure(tp, reg)) + return; + + RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data); +} + +static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if (rtl_ocp_reg_failure(tp, reg)) + return 0; + + RTL_W32(OCPDR, reg << 15); + + return RTL_R32(OCPDR); +} + +#define OCP_STD_PHY_BASE 0xa400 + +static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE; + return; + } + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value); +} + +static int r8168g_mdio_read(struct rtl8169_private *tp, int reg) +{ + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2); +} + +static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value << 4; + return; + } + + r8168_mac_ocp_write(tp, tp->ocp_base + reg, value); +} + +static int mac_mcu_read(struct rtl8169_private *tp, int reg) +{ + return r8168_mac_ocp_read(tp, tp->ocp_base + reg); +} + +DECLARE_RTL_COND(rtl_phyar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(PHYAR) & 0x80000000; +} + +static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff)); + + rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20); + /* + * According to hardware specs a 20us delay is required after write + * complete indication, but before sending next command. + */ + udelay(20); +} + +static int r8169_mdio_read(struct rtl8169_private *tp, int reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + int value; + + RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16); + + value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ? + RTL_R32(PHYAR) & 0xffff : ~0; + + /* + * According to hardware specs a 20us delay is required after read + * complete indication, but before sending next command. + */ + udelay(20); + + return value; +} + +DECLARE_RTL_COND(rtl_ocpar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(OCPAR) & OCPAR_FLAG; +} + +static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); + RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD); + RTL_W32(EPHY_RXER_NUM, 0); + + rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100); +} + +static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + r8168dp_1_mdio_access(tp, reg, + OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK)); +} + +static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + + r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD); + + mdelay(1); + RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD); + RTL_W32(EPHY_RXER_NUM, 0); + + return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ? + RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0; +} + +#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 + +static void r8168dp_2_mdio_start(void __iomem *ioaddr) +{ + RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_stop(void __iomem *ioaddr) +{ + RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + r8168dp_2_mdio_start(ioaddr); + + r8169_mdio_write(tp, reg, value); + + r8168dp_2_mdio_stop(ioaddr); +} + +static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + int value; + + r8168dp_2_mdio_start(ioaddr); + + value = r8169_mdio_read(tp, reg); + + r8168dp_2_mdio_stop(ioaddr); + + return value; +} + +static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val) +{ + tp->mdio_ops.write(tp, location, val); +} + +static int rtl_readphy(struct rtl8169_private *tp, int location) +{ + return tp->mdio_ops.read(tp, location); +} + +static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value) +{ + rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value); +} + +static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m) +{ + int val; + + val = rtl_readphy(tp, reg_addr); + rtl_writephy(tp, reg_addr, (val & ~m) | p); +} + +static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, + int val) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_writephy(tp, location, val); +} + +static int rtl_mdio_read(struct net_device *dev, int phy_id, int location) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + return rtl_readphy(tp, location); +} + +DECLARE_RTL_COND(rtl_ephyar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(EPHYAR) & EPHYAR_FLAG; +} + +static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | + (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100); + + udelay(10); +} + +static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ? + RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0; +} + +DECLARE_RTL_COND(rtl_eriar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(ERIAR) & ERIAR_FLAG; +} + +static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val, int type) +{ + void __iomem *ioaddr = tp->mmio_addr; + + BUG_ON((addr & 3) || (mask == 0)); + RTL_W32(ERIDR, val); + RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr); + + rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); +} + +static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr); + + return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ? + RTL_R32(ERIDR) : ~0; +} + +static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p, + u32 m, int type) +{ + u32 val; + + val = rtl_eri_read(tp, addr, type); + rtl_eri_write(tp, addr, mask, (val & ~m) | p, type); +} + +static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ? + RTL_R32(OCPDR) : ~0; +} + +static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) +{ + return rtl_eri_read(tp, reg, ERIAR_OOB); +} + +static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_ocp_read(tp, mask, reg); + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + return r8168ep_ocp_read(tp, mask, reg); + default: + BUG(); + return ~0; + } +} + +static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(OCPDR, data); + RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20); +} + +static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT, + data, ERIAR_OOB); +} + +static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + r8168dp_ocp_write(tp, mask, reg, data); + break; + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + r8168ep_ocp_write(tp, mask, reg, data); + break; + default: + BUG(); + break; + } +} + +static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) +{ + rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd, ERIAR_EXGMAC); + + ocp_write(tp, 0x1, 0x30, 0x00000001); +} + +#define OOB_CMD_RESET 0x00 +#define OOB_CMD_DRIVER_START 0x05 +#define OOB_CMD_DRIVER_STOP 0x06 + +static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp) +{ + return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; +} + +DECLARE_RTL_COND(rtl_ocp_read_cond) +{ + u16 reg; + + reg = rtl8168_get_ocp_reg(tp); + + return ocp_read(tp, 0x0f, reg) & 0x00000800; +} + +DECLARE_RTL_COND(rtl_ep_ocp_read_cond) +{ + return ocp_read(tp, 0x0f, 0x124) & 0x00000001; +} + +DECLARE_RTL_COND(rtl_ocp_tx_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R8(IBISR0) & 0x02; +} + +static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01); + rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000); + RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20); + RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01); +} + +static void rtl8168dp_driver_start(struct rtl8169_private *tp) +{ + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START); + rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10); +} + +static void rtl8168ep_driver_start(struct rtl8169_private *tp) +{ + ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); + ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01); + rtl_msleep_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10, 10); +} + +static void rtl8168_driver_start(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl8168dp_driver_start(tp); + break; + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + rtl8168ep_driver_start(tp); + break; + default: + BUG(); + break; + } +} + +static void rtl8168dp_driver_stop(struct rtl8169_private *tp) +{ + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP); + rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10); +} + +static void rtl8168ep_driver_stop(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP); + ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01); + rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10); +} + +static void rtl8168_driver_stop(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl8168dp_driver_stop(tp); + break; + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + rtl8168ep_driver_stop(tp); + break; + default: + BUG(); + break; + } +} + +static int r8168dp_check_dash(struct rtl8169_private *tp) +{ + u16 reg = rtl8168_get_ocp_reg(tp); + + return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0; +} + +static int r8168ep_check_dash(struct rtl8169_private *tp) +{ + return (ocp_read(tp, 0x0f, 0x128) & 0x00000001) ? 1 : 0; +} + +static int r8168_check_dash(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_check_dash(tp); + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + return r8168ep_check_dash(tp); + default: + return 0; + } +} + +struct exgmac_reg { + u16 addr; + u16 mask; + u32 val; +}; + +static void rtl_write_exgmac_batch(struct rtl8169_private *tp, + const struct exgmac_reg *r, int len) +{ + while (len-- > 0) { + rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC); + r++; + } +} + +DECLARE_RTL_COND(rtl_efusear_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(EFUSEAR) & EFUSEAR_FLAG; +} + +static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ? + RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0; +} + +static u16 rtl_get_events(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R16(IntrStatus); +} + +static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W16(IntrStatus, bits); + mmiowb(); +} + +static void rtl_irq_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W16(IntrMask, 0); + mmiowb(); +} + +static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W16(IntrMask, bits); +} + +#define RTL_EVENT_NAPI_RX (RxOK | RxErr) +#define RTL_EVENT_NAPI_TX (TxOK | TxErr) +#define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX) + +static void rtl_irq_enable_all(struct rtl8169_private *tp) +{ + rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow); +} + +static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + rtl_irq_disable(tp); + rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow); + RTL_R8(ChipCmd); +} + +static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(TBICSR) & TBIReset; +} + +static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp) +{ + return rtl_readphy(tp, MII_BMCR) & BMCR_RESET; +} + +static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr) +{ + return RTL_R32(TBICSR) & TBILinkOk; +} + +static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr) +{ + return RTL_R8(PHYstatus) & LinkStatus; +} + +static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset); +} + +static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp) +{ + unsigned int val; + + val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET; + rtl_writephy(tp, MII_BMCR, val & 0xffff); +} + +static void rtl_link_chg_patch(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct net_device *dev = tp->dev; + + if (!netif_running(dev)) + return; + + if (tp->mac_version == RTL_GIGA_MAC_VER_34 || + tp->mac_version == RTL_GIGA_MAC_VER_38) { + if (RTL_R8(PHYstatus) & _1000bpsF) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, + ERIAR_EXGMAC); + } else if (RTL_R8(PHYstatus) & _100bps) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, + ERIAR_EXGMAC); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f, + ERIAR_EXGMAC); + } + /* Reset packet filter */ + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, + ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, + ERIAR_EXGMAC); + } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36) { + if (RTL_R8(PHYstatus) & _1000bpsF) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, + ERIAR_EXGMAC); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f, + ERIAR_EXGMAC); + } + } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { + if (RTL_R8(PHYstatus) & _10bps) { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060, + ERIAR_EXGMAC); + } else { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, + ERIAR_EXGMAC); + } + } +} + +static void __rtl8169_check_link_status(struct net_device *dev, + struct rtl8169_private *tp, + void __iomem *ioaddr, bool pm) +{ + if (tp->link_ok(ioaddr)) { + rtl_link_chg_patch(tp); + /* This is to cancel a scheduled suspend if there's one. */ + if (pm) + pm_request_resume(&tp->pci_dev->dev); + netif_carrier_on(dev); + if (net_ratelimit()) + netif_info(tp, ifup, dev, "link up\n"); + } else { + netif_carrier_off(dev); + netif_info(tp, ifdown, dev, "link down\n"); + if (pm) + pm_schedule_suspend(&tp->pci_dev->dev, 5000); + } +} + +static void rtl8169_check_link_status(struct net_device *dev, + struct rtl8169_private *tp, + void __iomem *ioaddr) +{ + __rtl8169_check_link_status(dev, tp, ioaddr, false); +} + +#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) + +static u32 __rtl8169_get_wol(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + u8 options; + u32 wolopts = 0; + + options = RTL_R8(Config1); + if (!(options & PMEnable)) + return 0; + + options = RTL_R8(Config3); + if (options & LinkUp) + wolopts |= WAKE_PHY; + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2) + wolopts |= WAKE_MAGIC; + break; + default: + if (options & MagicPacket) + wolopts |= WAKE_MAGIC; + break; + } + + options = RTL_R8(Config5); + if (options & UWF) + wolopts |= WAKE_UCAST; + if (options & BWF) + wolopts |= WAKE_BCAST; + if (options & MWF) + wolopts |= WAKE_MCAST; + + return wolopts; +} + +static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_lock_work(tp); + + wol->supported = WAKE_ANY; + wol->wolopts = __rtl8169_get_wol(tp); + + rtl_unlock_work(tp); +} + +static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) +{ + void __iomem *ioaddr = tp->mmio_addr; + unsigned int i, tmp; + static const struct { + u32 opt; + u16 reg; + u8 mask; + } cfg[] = { + { WAKE_PHY, Config3, LinkUp }, + { WAKE_UCAST, Config5, UWF }, + { WAKE_BCAST, Config5, BWF }, + { WAKE_MCAST, Config5, MWF }, + { WAKE_ANY, Config5, LanWake }, + { WAKE_MAGIC, Config3, MagicPacket } + }; + u8 options; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + tmp = ARRAY_SIZE(cfg) - 1; + if (wolopts & WAKE_MAGIC) + rtl_w0w1_eri(tp, + 0x0dc, + ERIAR_MASK_0100, + MagicPacket_v2, + 0x0000, + ERIAR_EXGMAC); + else + rtl_w0w1_eri(tp, + 0x0dc, + ERIAR_MASK_0100, + 0x0000, + MagicPacket_v2, + ERIAR_EXGMAC); + break; + default: + tmp = ARRAY_SIZE(cfg); + break; + } + + for (i = 0; i < tmp; i++) { + options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; + if (wolopts & cfg[i].opt) + options |= cfg[i].mask; + RTL_W8(cfg[i].reg, options); + } + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17: + options = RTL_R8(Config1) & ~PMEnable; + if (wolopts) + options |= PMEnable; + RTL_W8(Config1, options); + break; + default: + options = RTL_R8(Config2) & ~PME_SIGNAL; + if (wolopts) + options |= PME_SIGNAL; + RTL_W8(Config2, options); + break; + } + + RTL_W8(Cfg9346, Cfg9346_Lock); +} + +static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_lock_work(tp); + + if (wol->wolopts) + tp->features |= RTL_FEATURE_WOL; + else + tp->features &= ~RTL_FEATURE_WOL; + __rtl8169_set_wol(tp, wol->wolopts); + + rtl_unlock_work(tp); + + device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); + + return 0; +} + +static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp) +{ + return rtl_chip_infos[tp->mac_version].fw_name; +} + +static void rtl8169_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl_fw *rtl_fw = tp->rtl_fw; + + strlcpy(info->driver, MODULENAME, sizeof(info->driver)); + strlcpy(info->version, RTL8169_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); + BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); + if (!IS_ERR_OR_NULL(rtl_fw)) + strlcpy(info->fw_version, rtl_fw->version, + sizeof(info->fw_version)); +} + +static int rtl8169_get_regs_len(struct net_device *dev) +{ + return R8169_REGS_SIZE; +} + +static int rtl8169_set_speed_tbi(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex, u32 ignored) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + int ret = 0; + u32 reg; + + reg = RTL_R32(TBICSR); + if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) && + (duplex == DUPLEX_FULL)) { + RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart)); + } else if (autoneg == AUTONEG_ENABLE) + RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart); + else { + netif_warn(tp, link, dev, + "incorrect speed setting refused in TBI mode\n"); + ret = -EOPNOTSUPP; + } + + return ret; +} + +static int rtl8169_set_speed_xmii(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex, u32 adv) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int giga_ctrl, bmcr; + int rc = -EINVAL; + + rtl_writephy(tp, 0x1f, 0x0000); + + if (autoneg == AUTONEG_ENABLE) { + int auto_nego; + + auto_nego = rtl_readphy(tp, MII_ADVERTISE); + auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL); + + if (adv & ADVERTISED_10baseT_Half) + auto_nego |= ADVERTISE_10HALF; + if (adv & ADVERTISED_10baseT_Full) + auto_nego |= ADVERTISE_10FULL; + if (adv & ADVERTISED_100baseT_Half) + auto_nego |= ADVERTISE_100HALF; + if (adv & ADVERTISED_100baseT_Full) + auto_nego |= ADVERTISE_100FULL; + + auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + + giga_ctrl = rtl_readphy(tp, MII_CTRL1000); + giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); + + /* The 8100e/8101e/8102e do Fast Ethernet only. */ + if (tp->mii.supports_gmii) { + if (adv & ADVERTISED_1000baseT_Half) + giga_ctrl |= ADVERTISE_1000HALF; + if (adv & ADVERTISED_1000baseT_Full) + giga_ctrl |= ADVERTISE_1000FULL; + } else if (adv & (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full)) { + netif_info(tp, link, dev, + "PHY does not support 1000Mbps\n"); + goto out; + } + + bmcr = BMCR_ANENABLE | BMCR_ANRESTART; + + rtl_writephy(tp, MII_ADVERTISE, auto_nego); + rtl_writephy(tp, MII_CTRL1000, giga_ctrl); + } else { + giga_ctrl = 0; + + if (speed == SPEED_10) + bmcr = 0; + else if (speed == SPEED_100) + bmcr = BMCR_SPEED100; + else + goto out; + + if (duplex == DUPLEX_FULL) + bmcr |= BMCR_FULLDPLX; + } + + rtl_writephy(tp, MII_BMCR, bmcr); + + if (tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03) { + if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) { + rtl_writephy(tp, 0x17, 0x2138); + rtl_writephy(tp, 0x0e, 0x0260); + } else { + rtl_writephy(tp, 0x17, 0x2108); + rtl_writephy(tp, 0x0e, 0x0000); + } + } + + rc = 0; +out: + return rc; +} + +static int rtl8169_set_speed(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex, u32 advertising) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + ret = tp->set_speed(dev, autoneg, speed, duplex, advertising); + if (ret < 0) + goto out; + + if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) && + (advertising & ADVERTISED_1000baseT_Full)) { + mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT); + } +out: + return ret; +} + +static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + del_timer_sync(&tp->timer); + + rtl_lock_work(tp); + ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd), + cmd->duplex, cmd->advertising); + rtl_unlock_work(tp); + + return ret; +} + +static netdev_features_t rtl8169_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > TD_MSS_MAX) + features &= ~NETIF_F_ALL_TSO; + + if (dev->mtu > JUMBO_1K && + !rtl_chip_infos[tp->mac_version].jumbo_tx_csum) + features &= ~NETIF_F_IP_CSUM; + + return features; +} + +static void __rtl8169_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 rx_config; + + rx_config = RTL_R32(RxConfig); + if (features & NETIF_F_RXALL) + rx_config |= (AcceptErr | AcceptRunt); + else + rx_config &= ~(AcceptErr | AcceptRunt); + + RTL_W32(RxConfig, rx_config); + + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + + tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum); + + RTL_W16(CPlusCmd, tp->cp_cmd); + RTL_R16(CPlusCmd); +} + +static int rtl8169_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX; + + rtl_lock_work(tp); + if (features ^ dev->features) + __rtl8169_set_features(dev, features); + rtl_unlock_work(tp); + + return 0; +} + + +static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb) +{ + return (skb_vlan_tag_present(skb)) ? + TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00; +} + +static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) +{ + u32 opts2 = le32_to_cpu(desc->opts2); + + if (opts2 & RxVlanTag) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); +} + +static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 status; + + cmd->supported = + SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE; + cmd->port = PORT_FIBRE; + cmd->transceiver = XCVR_INTERNAL; + + status = RTL_R32(TBICSR); + cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0; + cmd->autoneg = !!(status & TBINwEnable); + + ethtool_cmd_speed_set(cmd, SPEED_1000); + cmd->duplex = DUPLEX_FULL; /* Always set */ + + return 0; +} + +static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + return mii_ethtool_gset(&tp->mii, cmd); +} + +static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int rc; + + rtl_lock_work(tp); + rc = tp->get_settings(dev, cmd); + rtl_unlock_work(tp); + + return rc; +} + +static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u32 __iomem *data = tp->mmio_addr; + u32 *dw = p; + int i; + + rtl_lock_work(tp); + for (i = 0; i < R8169_REGS_SIZE; i += 4) + memcpy_fromio(dw++, data++, 4); + rtl_unlock_work(tp); +} + +static u32 rtl8169_get_msglevel(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + return tp->msg_enable; +} + +static void rtl8169_set_msglevel(struct net_device *dev, u32 value) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + tp->msg_enable = value; +} + +static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = { + "tx_packets", + "rx_packets", + "tx_errors", + "rx_errors", + "rx_missed", + "align_errors", + "tx_single_collisions", + "tx_multi_collisions", + "unicast", + "broadcast", + "multicast", + "tx_aborted", + "tx_underrun", +}; + +static int rtl8169_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rtl8169_gstrings); + default: + return -EOPNOTSUPP; + } +} + +DECLARE_RTL_COND(rtl_counters_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(CounterAddrLow) & CounterDump; +} + +static void rtl8169_update_counters(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct device *d = &tp->pci_dev->dev; + struct rtl8169_counters *counters; + dma_addr_t paddr; + u32 cmd; + + /* + * Some chips are unable to dump tally counters when the receiver + * is disabled. + */ + if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0) + return; + + counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL); + if (!counters) + return; + + RTL_W32(CounterAddrHigh, (u64)paddr >> 32); + cmd = (u64)paddr & DMA_BIT_MASK(32); + RTL_W32(CounterAddrLow, cmd); + RTL_W32(CounterAddrLow, cmd | CounterDump); + + if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000)) + memcpy(&tp->counters, counters, sizeof(*counters)); + + RTL_W32(CounterAddrLow, 0); + RTL_W32(CounterAddrHigh, 0); + + dma_free_coherent(d, sizeof(*counters), counters, paddr); +} + +static void rtl8169_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + ASSERT_RTNL(); + + rtl8169_update_counters(dev); + + data[0] = le64_to_cpu(tp->counters.tx_packets); + data[1] = le64_to_cpu(tp->counters.rx_packets); + data[2] = le64_to_cpu(tp->counters.tx_errors); + data[3] = le32_to_cpu(tp->counters.rx_errors); + data[4] = le16_to_cpu(tp->counters.rx_missed); + data[5] = le16_to_cpu(tp->counters.align_errors); + data[6] = le32_to_cpu(tp->counters.tx_one_collision); + data[7] = le32_to_cpu(tp->counters.tx_multi_collision); + data[8] = le64_to_cpu(tp->counters.rx_unicast); + data[9] = le64_to_cpu(tp->counters.rx_broadcast); + data[10] = le32_to_cpu(tp->counters.rx_multicast); + data[11] = le16_to_cpu(tp->counters.tx_aborted); + data[12] = le16_to_cpu(tp->counters.tx_underun); +} + +static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch(stringset) { + case ETH_SS_STATS: + memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings)); + break; + } +} + +static const struct ethtool_ops rtl8169_ethtool_ops = { + .get_drvinfo = rtl8169_get_drvinfo, + .get_regs_len = rtl8169_get_regs_len, + .get_link = ethtool_op_get_link, + .get_settings = rtl8169_get_settings, + .set_settings = rtl8169_set_settings, + .get_msglevel = rtl8169_get_msglevel, + .set_msglevel = rtl8169_set_msglevel, + .get_regs = rtl8169_get_regs, + .get_wol = rtl8169_get_wol, + .set_wol = rtl8169_set_wol, + .get_strings = rtl8169_get_strings, + .get_sset_count = rtl8169_get_sset_count, + .get_ethtool_stats = rtl8169_get_ethtool_stats, + .get_ts_info = ethtool_op_get_ts_info, +}; + +static void rtl8169_get_mac_version(struct rtl8169_private *tp, + struct net_device *dev, u8 default_version) +{ + void __iomem *ioaddr = tp->mmio_addr; + /* + * The driver currently handles the 8168Bf and the 8168Be identically + * but they can be identified more specifically through the test below + * if needed: + * + * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be + * + * Same thing for the 8101Eb and the 8101Ec: + * + * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec + */ + static const struct rtl_mac_info { + u32 mask; + u32 val; + int mac_version; + } mac_info[] = { + /* 8168EP family. */ + { 0x7cf00000, 0x50200000, RTL_GIGA_MAC_VER_51 }, + { 0x7cf00000, 0x50100000, RTL_GIGA_MAC_VER_50 }, + { 0x7cf00000, 0x50000000, RTL_GIGA_MAC_VER_49 }, + + /* 8168H family. */ + { 0x7cf00000, 0x54100000, RTL_GIGA_MAC_VER_46 }, + { 0x7cf00000, 0x54000000, RTL_GIGA_MAC_VER_45 }, + + /* 8168G family. */ + { 0x7cf00000, 0x5c800000, RTL_GIGA_MAC_VER_44 }, + { 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 }, + { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 }, + { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 }, + + /* 8168F family. */ + { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 }, + { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 }, + { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 }, + + /* 8168E family. */ + { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 }, + { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 }, + { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 }, + { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 }, + + /* 8168D family. */ + { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 }, + { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 }, + { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 }, + + /* 8168DP family. */ + { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 }, + { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 }, + { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 }, + + /* 8168C family. */ + { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 }, + { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 }, + { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 }, + { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 }, + { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 }, + { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 }, + { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 }, + { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 }, + { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 }, + + /* 8168B family. */ + { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 }, + { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 }, + { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 }, + { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, + + /* 8101 family. */ + { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 }, + { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 }, + { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 }, + { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 }, + { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 }, + { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 }, + { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 }, + { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 }, + { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 }, + { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 }, + { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 }, + { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 }, + { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 }, + { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 }, + { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 }, + { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 }, + { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 }, + { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 }, + { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 }, + /* FIXME: where did these entries come from ? -- FR */ + { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 }, + { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 }, + + /* 8110 family. */ + { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 }, + { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 }, + { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 }, + { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 }, + { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 }, + { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 }, + + /* Catch-all */ + { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE } + }; + const struct rtl_mac_info *p = mac_info; + u32 reg; + + reg = RTL_R32(TxConfig); + while ((reg & p->mask) != p->val) + p++; + tp->mac_version = p->mac_version; + + if (tp->mac_version == RTL_GIGA_MAC_NONE) { + netif_notice(tp, probe, dev, + "unknown MAC, using family default\n"); + tp->mac_version = default_version; + } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) { + tp->mac_version = tp->mii.supports_gmii ? + RTL_GIGA_MAC_VER_42 : + RTL_GIGA_MAC_VER_43; + } else if (tp->mac_version == RTL_GIGA_MAC_VER_45) { + tp->mac_version = tp->mii.supports_gmii ? + RTL_GIGA_MAC_VER_45 : + RTL_GIGA_MAC_VER_47; + } else if (tp->mac_version == RTL_GIGA_MAC_VER_46) { + tp->mac_version = tp->mii.supports_gmii ? + RTL_GIGA_MAC_VER_46 : + RTL_GIGA_MAC_VER_48; + } +} + +static void rtl8169_print_mac_version(struct rtl8169_private *tp) +{ + dprintk("mac_version = 0x%02x\n", tp->mac_version); +} + +struct phy_reg { + u16 reg; + u16 val; +}; + +static void rtl_writephy_batch(struct rtl8169_private *tp, + const struct phy_reg *regs, int len) +{ + while (len-- > 0) { + rtl_writephy(tp, regs->reg, regs->val); + regs++; + } +} + +#define PHY_READ 0x00000000 +#define PHY_DATA_OR 0x10000000 +#define PHY_DATA_AND 0x20000000 +#define PHY_BJMPN 0x30000000 +#define PHY_MDIO_CHG 0x40000000 +#define PHY_CLEAR_READCOUNT 0x70000000 +#define PHY_WRITE 0x80000000 +#define PHY_READCOUNT_EQ_SKIP 0x90000000 +#define PHY_COMP_EQ_SKIPN 0xa0000000 +#define PHY_COMP_NEQ_SKIPN 0xb0000000 +#define PHY_WRITE_PREVIOUS 0xc0000000 +#define PHY_SKIPN 0xd0000000 +#define PHY_DELAY_MS 0xe0000000 + +struct fw_info { + u32 magic; + char version[RTL_VER_SIZE]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; +} __packed; + +#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code)) + +static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + const struct firmware *fw = rtl_fw->fw; + struct fw_info *fw_info = (struct fw_info *)fw->data; + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + char *version = rtl_fw->version; + bool rc = false; + + if (fw->size < FW_OPCODE_SIZE) + goto out; + + if (!fw_info->magic) { + size_t i, size, start; + u8 checksum = 0; + + if (fw->size < sizeof(*fw_info)) + goto out; + + for (i = 0; i < fw->size; i++) + checksum += fw->data[i]; + if (checksum != 0) + goto out; + + start = le32_to_cpu(fw_info->fw_start); + if (start > fw->size) + goto out; + + size = le32_to_cpu(fw_info->fw_len); + if (size > (fw->size - start) / FW_OPCODE_SIZE) + goto out; + + memcpy(version, fw_info->version, RTL_VER_SIZE); + + pa->code = (__le32 *)(fw->data + start); + pa->size = size; + } else { + if (fw->size % FW_OPCODE_SIZE) + goto out; + + strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE); + + pa->code = (__le32 *)fw->data; + pa->size = fw->size / FW_OPCODE_SIZE; + } + version[RTL_VER_SIZE - 1] = 0; + + rc = true; +out: + return rc; +} + +static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev, + struct rtl_fw_phy_action *pa) +{ + bool rc = false; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 regno = (action & 0x0fff0000) >> 16; + + switch(action & 0xf0000000) { + case PHY_READ: + case PHY_DATA_OR: + case PHY_DATA_AND: + case PHY_MDIO_CHG: + case PHY_CLEAR_READCOUNT: + case PHY_WRITE: + case PHY_WRITE_PREVIOUS: + case PHY_DELAY_MS: + break; + + case PHY_BJMPN: + if (regno > index) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + case PHY_READCOUNT_EQ_SKIP: + if (index + 2 >= pa->size) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + case PHY_COMP_EQ_SKIPN: + case PHY_COMP_NEQ_SKIPN: + case PHY_SKIPN: + if (index + 1 + regno >= pa->size) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + + default: + netif_err(tp, ifup, tp->dev, + "Invalid action 0x%08x\n", action); + goto out; + } + } + rc = true; +out: + return rc; +} + +static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + struct net_device *dev = tp->dev; + int rc = -EINVAL; + + if (!rtl_fw_format_ok(tp, rtl_fw)) { + netif_err(tp, ifup, dev, "invalid firmware\n"); + goto out; + } + + if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action)) + rc = 0; +out: + return rc; +} + +static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + struct mdio_ops org, *ops = &tp->mdio_ops; + u32 predata, count; + size_t index; + + predata = count = 0; + org.write = ops->write; + org.read = ops->read; + + for (index = 0; index < pa->size; ) { + u32 action = le32_to_cpu(pa->code[index]); + u32 data = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + + if (!action) + break; + + switch(action & 0xf0000000) { + case PHY_READ: + predata = rtl_readphy(tp, regno); + count++; + index++; + break; + case PHY_DATA_OR: + predata |= data; + index++; + break; + case PHY_DATA_AND: + predata &= data; + index++; + break; + case PHY_BJMPN: + index -= regno; + break; + case PHY_MDIO_CHG: + if (data == 0) { + ops->write = org.write; + ops->read = org.read; + } else if (data == 1) { + ops->write = mac_mcu_write; + ops->read = mac_mcu_read; + } + + index++; + break; + case PHY_CLEAR_READCOUNT: + count = 0; + index++; + break; + case PHY_WRITE: + rtl_writephy(tp, regno, data); + index++; + break; + case PHY_READCOUNT_EQ_SKIP: + index += (count == data) ? 2 : 1; + break; + case PHY_COMP_EQ_SKIPN: + if (predata == data) + index += regno; + index++; + break; + case PHY_COMP_NEQ_SKIPN: + if (predata != data) + index += regno; + index++; + break; + case PHY_WRITE_PREVIOUS: + rtl_writephy(tp, regno, predata); + index++; + break; + case PHY_SKIPN: + index += regno + 1; + break; + case PHY_DELAY_MS: + mdelay(data); + index++; + break; + + default: + BUG(); + } + } + + ops->write = org.write; + ops->read = org.read; +} + +static void rtl_release_firmware(struct rtl8169_private *tp) +{ + if (!IS_ERR_OR_NULL(tp->rtl_fw)) { + release_firmware(tp->rtl_fw->fw); + kfree(tp->rtl_fw); + } + tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; +} + +static void rtl_apply_firmware(struct rtl8169_private *tp) +{ + struct rtl_fw *rtl_fw = tp->rtl_fw; + + /* TODO: release firmware once rtl_phy_write_fw signals failures. */ + if (!IS_ERR_OR_NULL(rtl_fw)) + rtl_phy_write_fw(tp, rtl_fw); +} + +static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) +{ + if (rtl_readphy(tp, reg) != val) + netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n"); + else + rtl_apply_firmware(tp); +} + +static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x06, 0x006e }, + { 0x08, 0x0708 }, + { 0x15, 0x4000 }, + { 0x18, 0x65c7 }, + + { 0x1f, 0x0001 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x0000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf60 }, + { 0x01, 0x0140 }, + { 0x00, 0x0077 }, + { 0x04, 0x7800 }, + { 0x04, 0x7000 }, + + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf0f9 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xa000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf20 }, + { 0x01, 0x0140 }, + { 0x00, 0x00bb }, + { 0x04, 0xb800 }, + { 0x04, 0xb000 }, + + { 0x03, 0xdf41 }, + { 0x02, 0xdc60 }, + { 0x01, 0x6340 }, + { 0x00, 0x007d }, + { 0x04, 0xd800 }, + { 0x04, 0xd000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x100a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + + { 0x1f, 0x0000 }, + { 0x0b, 0x0000 }, + { 0x00, 0x9200 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x01, 0x90d0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + + if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) || + (pdev->subsystem_device != 0xe000)) + return; + + rtl_writephy(tp, 0x1f, 0x0001); + rtl_writephy(tp, 0x10, 0xf01b); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x14, 0xfb54 }, + { 0x18, 0xf5c7 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl8169scd_hw_phy_config_quirk(tp); +} + +static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0x8480 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x18, 0x67c7 }, + { 0x04, 0x2000 }, + { 0x03, 0x002f }, + { 0x02, 0x4360 }, + { 0x01, 0x0109 }, + { 0x00, 0x3022 }, + { 0x04, 0x2800 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x10, 0xf41b }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0001); + rtl_patchphy(tp, 0x16, 1 << 0); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0000 }, + { 0x1d, 0x0f00 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x1ec8 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0000); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1f, 0x0002 }, + { 0x00, 0x88d4 }, + { 0x01, 0x82b1 }, + { 0x03, 0x7002 }, + { 0x08, 0x9e30 }, + { 0x09, 0x01f0 }, + { 0x0a, 0x5500 }, + { 0x0c, 0x00c8 }, + { 0x1f, 0x0003 }, + { 0x12, 0xc096 }, + { 0x16, 0x000a }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x09, 0x2000 }, + { 0x09, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x0761 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x16, 1 << 0); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x5461 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x16, 1 << 0); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp) +{ + rtl8168c_3_hw_phy_config(tp); +} + +static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } + }; + + rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); + + /* + * Rx Error Issue + * Fine Tune Switching regulator parameter + */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w0w1_phy(tp, 0x0b, 0x0010, 0x00ef); + rtl_w0w1_phy(tp, 0x0c, 0xa200, 0x5d00); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + { 0x1f, 0x0002 } + }; + int val; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + val = rtl_readphy(tp, 0x0d); + + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + rtl_writephy(tp, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + rtl_writephy(tp, 0x0d, val | set[i]); + } + } else { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x6662 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x6662 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + /* RSET couple improve */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_patchphy(tp, 0x0d, 0x0300); + rtl_patchphy(tp, 0x0f, 0x0010); + + /* Fine tune PLL performance */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600); + rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x001b); + + rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } + }; + + rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + + { 0x1f, 0x0002 } + }; + int val; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + val = rtl_readphy(tp, 0x0d); + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + rtl_writephy(tp, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + rtl_writephy(tp, 0x0d, val | set[i]); + } + } else { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x2642 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x2642 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + /* Fine tune PLL performance */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600); + rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000); + + /* Switching regulator Slew rate */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_patchphy(tp, 0x0f, 0x0017); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x001b); + + rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x10, 0x0008 }, + { 0x0d, 0x006c }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0xa4d8 }, + { 0x09, 0x281c }, + { 0x07, 0x2883 }, + { 0x0a, 0x6b35 }, + { 0x1d, 0x3da4 }, + { 0x1c, 0xeffd }, + { 0x14, 0x7f52 }, + { 0x18, 0x7fc6 }, + { 0x08, 0x0601 }, + { 0x06, 0x4063 }, + { 0x10, 0xf074 }, + { 0x1f, 0x0003 }, + { 0x13, 0x0789 }, + { 0x12, 0xf4bd }, + { 0x1a, 0x04fd }, + { 0x14, 0x84b0 }, + { 0x1f, 0x0000 }, + { 0x00, 0x9200 }, + + { 0x1f, 0x0005 }, + { 0x01, 0x0340 }, + { 0x1f, 0x0001 }, + { 0x04, 0x4000 }, + { 0x03, 0x1d21 }, + { 0x02, 0x0c32 }, + { 0x01, 0x0200 }, + { 0x00, 0x5554 }, + { 0x04, 0x4800 }, + { 0x04, 0x4000 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x0023 }, + { 0x16, 0x0000 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x002d }, + { 0x18, 0x0040 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_patchphy(tp, 0x0d, 1 << 5); +} + +static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + /* Enable Delay cap */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b80 }, + { 0x06, 0xc896 }, + { 0x1f, 0x0000 }, + + /* Channel estimation fine tune */ + { 0x1f, 0x0001 }, + { 0x0b, 0x6c20 }, + { 0x07, 0x2872 }, + { 0x1c, 0xefff }, + { 0x1f, 0x0003 }, + { 0x14, 0x6420 }, + { 0x1f, 0x0000 }, + + /* Update PFM & 10M TX idle timer */ + { 0x1f, 0x0007 }, + { 0x1e, 0x002f }, + { 0x15, 0x1919 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x00ac }, + { 0x18, 0x0006 }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* DCO enable for 10M IDLE Power */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0023); + rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* For impedance matching */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w0w1_phy(tp, 0x08, 0x8000, 0x7f00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w0w1_phy(tp, 0x18, 0x0050, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0020); + rtl_w0w1_phy(tp, 0x15, 0x0000, 0x1100); + rtl_writephy(tp, 0x1f, 0x0006); + rtl_writephy(tp, 0x00, 0x5a00); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0d, 0x0000); +} + +static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr) +{ + const u16 w[] = { + addr[0] | (addr[1] << 8), + addr[2] | (addr[3] << 8), + addr[4] | (addr[5] << 8) + }; + const struct exgmac_reg e[] = { + { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) }, + { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] }, + { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 }, + { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) } + }; + + rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e)); +} + +static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + /* Enable Delay cap */ + { 0x1f, 0x0004 }, + { 0x1f, 0x0007 }, + { 0x1e, 0x00ac }, + { 0x18, 0x0006 }, + { 0x1f, 0x0002 }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + + /* Channel estimation fine tune */ + { 0x1f, 0x0003 }, + { 0x09, 0xa20f }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + + /* Green Setting */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b5b }, + { 0x06, 0x9222 }, + { 0x05, 0x8b6d }, + { 0x06, 0x8000 }, + { 0x05, 0x8b76 }, + { 0x06, 0x8000 }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* For 4-corner performance improve */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b80); + rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy(tp, 0x1f, 0x0002); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + + /* improve 10M EEE waveform */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Improve 2-pair detection performance */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* EEE setting */ + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC); + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0020); + rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0002); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0d, 0x0000); + + /* Green feature */ + rtl_writephy(tp, 0x1f, 0x0003); + rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001); + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */ + rtl_rar_exgmac_set(tp, tp->dev->dev_addr); +} + +static void rtl8168f_hw_phy_config(struct rtl8169_private *tp) +{ + /* For 4-corner performance improve */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b80); + rtl_w0w1_phy(tp, 0x06, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + + /* Improve 10M EEE waveform */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0003 }, + { 0x09, 0xa20f }, + { 0x1f, 0x0000 }, + + /* Modify green table for giga & fnet */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b55 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b5e }, + { 0x06, 0x0000 }, + { 0x05, 0x8b67 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b70 }, + { 0x06, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0007 }, + { 0x1e, 0x0078 }, + { 0x17, 0x0000 }, + { 0x19, 0x00fb }, + { 0x1f, 0x0000 }, + + /* Modify green table for 10M */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b79 }, + { 0x06, 0xaa00 }, + { 0x1f, 0x0000 }, + + /* Disable hiimpedance detection (RTCT) */ + { 0x1f, 0x0003 }, + { 0x01, 0x328a }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl8168f_hw_phy_config(tp); + + /* Improve 2-pair detection performance */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp) +{ + rtl_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp); +} + +static void rtl8411_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0003 }, + { 0x09, 0xa20f }, + { 0x1f, 0x0000 }, + + /* Modify green table for giga & fnet */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b55 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b5e }, + { 0x06, 0x0000 }, + { 0x05, 0x8b67 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b70 }, + { 0x06, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0007 }, + { 0x1e, 0x0078 }, + { 0x17, 0x0000 }, + { 0x19, 0x00aa }, + { 0x1f, 0x0000 }, + + /* Modify green table for 10M */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b79 }, + { 0x06, 0xaa00 }, + { 0x1f, 0x0000 }, + + /* Disable hiimpedance detection (RTCT) */ + { 0x1f, 0x0003 }, + { 0x01, 0x328a }, + { 0x1f, 0x0000 } + }; + + + rtl_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp); + + /* Improve 2-pair detection performance */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* Modify green table for giga */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b54); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800); + rtl_writephy(tp, 0x05, 0x8b5d); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800); + rtl_writephy(tp, 0x05, 0x8a7c); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x05, 0x8a7f); + rtl_w0w1_phy(tp, 0x06, 0x0100, 0x0000); + rtl_writephy(tp, 0x05, 0x8a82); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x05, 0x8a85); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x05, 0x8a88); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0000); + + /* uc same-seed solution */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x8000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* eee setting */ + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC); + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0020); + rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0d, 0x0000); + + /* Green feature */ + rtl_writephy(tp, 0x1f, 0x0003); + rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001); + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) +{ + rtl_apply_firmware(tp); + + rtl_writephy(tp, 0x1f, 0x0a46); + if (rtl_readphy(tp, 0x10) & 0x0100) { + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w0w1_phy(tp, 0x12, 0x0000, 0x8000); + } else { + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w0w1_phy(tp, 0x12, 0x8000, 0x0000); + } + + rtl_writephy(tp, 0x1f, 0x0a46); + if (rtl_readphy(tp, 0x13) & 0x0100) { + rtl_writephy(tp, 0x1f, 0x0c41); + rtl_w0w1_phy(tp, 0x15, 0x0002, 0x0000); + } else { + rtl_writephy(tp, 0x1f, 0x0c41); + rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0002); + } + + /* Enable PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w0w1_phy(tp, 0x14, 0x0100, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8084); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000); + rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000); + + /* EEE auto-fallback function */ + rtl_writephy(tp, 0x1f, 0x0a4b); + rtl_w0w1_phy(tp, 0x11, 0x0004, 0x0000); + + /* Enable UC LPF tune function */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8012); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0c42); + rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000); + + /* Improve SWR Efficiency */ + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x14, 0x5065); + rtl_writephy(tp, 0x14, 0xd065); + rtl_writephy(tp, 0x1f, 0x0bc8); + rtl_writephy(tp, 0x11, 0x5655); + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x14, 0x1065); + rtl_writephy(tp, 0x14, 0x9065); + rtl_writephy(tp, 0x14, 0x1065); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp) +{ + rtl_apply_firmware(tp); +} + +static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp) +{ + u16 dout_tapbin; + u32 data; + + rtl_apply_firmware(tp); + + /* CHN EST parameters adjust - giga master */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x809b); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0xf800); + rtl_writephy(tp, 0x13, 0x80a2); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0xff00); + rtl_writephy(tp, 0x13, 0x80a4); + rtl_w0w1_phy(tp, 0x14, 0x8500, 0xff00); + rtl_writephy(tp, 0x13, 0x809c); + rtl_w0w1_phy(tp, 0x14, 0xbd00, 0xff00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* CHN EST parameters adjust - giga slave */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x80ad); + rtl_w0w1_phy(tp, 0x14, 0x7000, 0xf800); + rtl_writephy(tp, 0x13, 0x80b4); + rtl_w0w1_phy(tp, 0x14, 0x5000, 0xff00); + rtl_writephy(tp, 0x13, 0x80ac); + rtl_w0w1_phy(tp, 0x14, 0x4000, 0xff00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* CHN EST parameters adjust - fnet */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x808e); + rtl_w0w1_phy(tp, 0x14, 0x1200, 0xff00); + rtl_writephy(tp, 0x13, 0x8090); + rtl_w0w1_phy(tp, 0x14, 0xe500, 0xff00); + rtl_writephy(tp, 0x13, 0x8092); + rtl_w0w1_phy(tp, 0x14, 0x9f00, 0xff00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* enable R-tune & PGA-retune function */ + dout_tapbin = 0; + rtl_writephy(tp, 0x1f, 0x0a46); + data = rtl_readphy(tp, 0x13); + data &= 3; + data <<= 2; + dout_tapbin |= data; + data = rtl_readphy(tp, 0x12); + data &= 0xc000; + data >>= 14; + dout_tapbin |= data; + dout_tapbin = ~(dout_tapbin^0x08); + dout_tapbin <<= 12; + dout_tapbin &= 0xf000; + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x827a); + rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); + rtl_writephy(tp, 0x13, 0x827b); + rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); + rtl_writephy(tp, 0x13, 0x827c); + rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); + rtl_writephy(tp, 0x13, 0x827d); + rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); + + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x0811); + rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a42); + rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* enable GPHY 10M */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x0800, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* SAR ADC performance */ + rtl_writephy(tp, 0x1f, 0x0bca); + rtl_w0w1_phy(tp, 0x17, 0x4000, 0x3000); + rtl_writephy(tp, 0x1f, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x803f); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x8047); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x804f); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x8057); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x805f); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x8067); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x806f); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* disable phy pfm mode */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0080); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp) +{ + u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0; + u16 rlen; + u32 data; + + rtl_apply_firmware(tp); + + /* CHIN EST parameter update */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x808a); + rtl_w0w1_phy(tp, 0x14, 0x000a, 0x003f); + rtl_writephy(tp, 0x1f, 0x0000); + + /* enable R-tune & PGA-retune function */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x0811); + rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a42); + rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* enable GPHY 10M */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x0800, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + r8168_mac_ocp_write(tp, 0xdd02, 0x807d); + data = r8168_mac_ocp_read(tp, 0xdd02); + ioffset_p3 = ((data & 0x80)>>7); + ioffset_p3 <<= 3; + + data = r8168_mac_ocp_read(tp, 0xdd00); + ioffset_p3 |= ((data & (0xe000))>>13); + ioffset_p2 = ((data & (0x1e00))>>9); + ioffset_p1 = ((data & (0x01e0))>>5); + ioffset_p0 = ((data & 0x0010)>>4); + ioffset_p0 <<= 3; + ioffset_p0 |= (data & (0x07)); + data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0); + + if ((ioffset_p3 != 0x0f) || (ioffset_p2 != 0x0f) || + (ioffset_p1 != 0x0f) || (ioffset_p0 == 0x0f)) { + rtl_writephy(tp, 0x1f, 0x0bcf); + rtl_writephy(tp, 0x16, data); + rtl_writephy(tp, 0x1f, 0x0000); + } + + /* Modify rlen (TX LPF corner frequency) level */ + rtl_writephy(tp, 0x1f, 0x0bcd); + data = rtl_readphy(tp, 0x16); + data &= 0x000f; + rlen = 0; + if (data > 3) + rlen = data - 3; + data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12); + rtl_writephy(tp, 0x17, data); + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x1f, 0x0000); + + /* disable phy pfm mode */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0080); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp) +{ + /* Enable PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* patch 10M & ALDPS */ + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8084); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000); + rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Enable EEE auto-fallback function */ + rtl_writephy(tp, 0x1f, 0x0a4b); + rtl_w0w1_phy(tp, 0x11, 0x0004, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Enable UC LPF tune function */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8012); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* set rg_sel_sdm_rate */ + rtl_writephy(tp, 0x1f, 0x0c42); + rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp) +{ + /* patch 10M & ALDPS */ + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8084); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000); + rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Enable UC LPF tune function */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8012); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Set rg_sel_sdm_rate */ + rtl_writephy(tp, 0x1f, 0x0c42); + rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Channel estimation parameters */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x80f3); + rtl_w0w1_phy(tp, 0x14, 0x8b00, ~0x8bff); + rtl_writephy(tp, 0x13, 0x80f0); + rtl_w0w1_phy(tp, 0x14, 0x3a00, ~0x3aff); + rtl_writephy(tp, 0x13, 0x80ef); + rtl_w0w1_phy(tp, 0x14, 0x0500, ~0x05ff); + rtl_writephy(tp, 0x13, 0x80f6); + rtl_w0w1_phy(tp, 0x14, 0x6e00, ~0x6eff); + rtl_writephy(tp, 0x13, 0x80ec); + rtl_w0w1_phy(tp, 0x14, 0x6800, ~0x68ff); + rtl_writephy(tp, 0x13, 0x80ed); + rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff); + rtl_writephy(tp, 0x13, 0x80f2); + rtl_w0w1_phy(tp, 0x14, 0xf400, ~0xf4ff); + rtl_writephy(tp, 0x13, 0x80f4); + rtl_w0w1_phy(tp, 0x14, 0x8500, ~0x85ff); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8110); + rtl_w0w1_phy(tp, 0x14, 0xa800, ~0xa8ff); + rtl_writephy(tp, 0x13, 0x810f); + rtl_w0w1_phy(tp, 0x14, 0x1d00, ~0x1dff); + rtl_writephy(tp, 0x13, 0x8111); + rtl_w0w1_phy(tp, 0x14, 0xf500, ~0xf5ff); + rtl_writephy(tp, 0x13, 0x8113); + rtl_w0w1_phy(tp, 0x14, 0x6100, ~0x61ff); + rtl_writephy(tp, 0x13, 0x8115); + rtl_w0w1_phy(tp, 0x14, 0x9200, ~0x92ff); + rtl_writephy(tp, 0x13, 0x810e); + rtl_w0w1_phy(tp, 0x14, 0x0400, ~0x04ff); + rtl_writephy(tp, 0x13, 0x810c); + rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff); + rtl_writephy(tp, 0x13, 0x810b); + rtl_w0w1_phy(tp, 0x14, 0x5a00, ~0x5aff); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x80d1); + rtl_w0w1_phy(tp, 0x14, 0xff00, ~0xffff); + rtl_writephy(tp, 0x13, 0x80cd); + rtl_w0w1_phy(tp, 0x14, 0x9e00, ~0x9eff); + rtl_writephy(tp, 0x13, 0x80d3); + rtl_w0w1_phy(tp, 0x14, 0x0e00, ~0x0eff); + rtl_writephy(tp, 0x13, 0x80d5); + rtl_w0w1_phy(tp, 0x14, 0xca00, ~0xcaff); + rtl_writephy(tp, 0x13, 0x80d7); + rtl_w0w1_phy(tp, 0x14, 0x8400, ~0x84ff); + + /* Force PWM-mode */ + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x14, 0x5065); + rtl_writephy(tp, 0x14, 0xd065); + rtl_writephy(tp, 0x1f, 0x0bc8); + rtl_writephy(tp, 0x12, 0x00ed); + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x14, 0x1065); + rtl_writephy(tp, 0x14, 0x9065); + rtl_writephy(tp, 0x14, 0x1065); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0003 }, + { 0x08, 0x441d }, + { 0x01, 0x9100 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0000); + rtl_patchphy(tp, 0x11, 1 << 12); + rtl_patchphy(tp, 0x19, 1 << 13); + rtl_patchphy(tp, 0x10, 1 << 15); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8105e_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0005 }, + { 0x1a, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0004 }, + { 0x1c, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x15, 0x7701 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x18, 0x0310); + msleep(100); + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8402_hw_phy_config(struct rtl8169_private *tp) +{ + /* Disable ALDPS before setting firmware */ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x18, 0x0310); + msleep(20); + + rtl_apply_firmware(tp); + + /* EEE setting */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x10, 0x401f); + rtl_writephy(tp, 0x19, 0x7030); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8106e_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0004 }, + { 0x10, 0xc07f }, + { 0x19, 0x7030 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x18, 0x0310); + msleep(100); + + rtl_apply_firmware(tp); + + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); +} + +static void rtl_hw_phy_config(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_print_mac_version(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01: + break; + case RTL_GIGA_MAC_VER_02: + case RTL_GIGA_MAC_VER_03: + rtl8169s_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_04: + rtl8169sb_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_05: + rtl8169scd_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_06: + rtl8169sce_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + rtl8102e_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_11: + rtl8168bb_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_12: + rtl8168bef_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_17: + rtl8168bef_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_18: + rtl8168cp_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_19: + rtl8168c_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_20: + rtl8168c_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_21: + rtl8168c_3_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_22: + rtl8168c_4_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + rtl8168cp_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_25: + rtl8168d_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_26: + rtl8168d_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_27: + rtl8168d_3_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_28: + rtl8168d_4_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + rtl8105e_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_31: + /* None. */ + break; + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + rtl8168e_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_34: + rtl8168e_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_35: + rtl8168f_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_36: + rtl8168f_2_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_37: + rtl8402_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_38: + rtl8411_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_39: + rtl8106e_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_40: + rtl8168g_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + rtl8168g_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_47: + rtl8168h_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_48: + rtl8168h_2_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_49: + rtl8168ep_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + rtl8168ep_2_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_41: + default: + break; + } +} + +static void rtl_phy_work(struct rtl8169_private *tp) +{ + struct timer_list *timer = &tp->timer; + void __iomem *ioaddr = tp->mmio_addr; + unsigned long timeout = RTL8169_PHY_TIMEOUT; + + assert(tp->mac_version > RTL_GIGA_MAC_VER_01); + + if (tp->phy_reset_pending(tp)) { + /* + * A busy loop could burn quite a few cycles on nowadays CPU. + * Let's delay the execution of the timer for a few ticks. + */ + timeout = HZ/10; + goto out_mod_timer; + } + + if (tp->link_ok(ioaddr)) + return; + + netif_dbg(tp, link, tp->dev, "PHY reset until link up\n"); + + tp->phy_reset_enable(tp); + +out_mod_timer: + mod_timer(timer, jiffies + timeout); +} + +static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) +{ + if (!test_and_set_bit(flag, tp->wk.flags)) + schedule_work(&tp->wk.work); +} + +static void rtl8169_phy_timer(unsigned long __opaque) +{ + struct net_device *dev = (struct net_device *)__opaque; + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING); +} + +static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev, + void __iomem *ioaddr) +{ + iounmap(ioaddr); + pci_release_regions(pdev); + pci_clear_mwi(pdev); + pci_disable_device(pdev); + free_netdev(dev); +} + +DECLARE_RTL_COND(rtl_phy_reset_cond) +{ + return tp->phy_reset_pending(tp); +} + +static void rtl8169_phy_reset(struct net_device *dev, + struct rtl8169_private *tp) +{ + tp->phy_reset_enable(tp); + rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100); +} + +static bool rtl_tbi_enabled(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return (tp->mac_version == RTL_GIGA_MAC_VER_01) && + (RTL_R8(PHYstatus) & TBI_Enable); +} + +static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + rtl_hw_phy_config(dev); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { + dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + RTL_W8(0x82, 0x01); + } + + pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); + + if (tp->mac_version == RTL_GIGA_MAC_VER_02) { + dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + RTL_W8(0x82, 0x01); + dprintk("Set PHY Reg 0x0bh = 0x00h\n"); + rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0 + } + + rtl8169_phy_reset(dev, tp); + + rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, + ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | + (tp->mii.supports_gmii ? + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full : 0)); + + if (rtl_tbi_enabled(tp)) + netif_info(tp, link, dev, "TBI auto-negotiating\n"); +} + +static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + rtl_lock_work(tp); + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + RTL_W32(MAC4, addr[4] | addr[5] << 8); + RTL_R32(MAC4); + + RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); + RTL_R32(MAC0); + + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + rtl_rar_exgmac_set(tp, addr); + + RTL_W8(Cfg9346, Cfg9346_Lock); + + rtl_unlock_work(tp); +} + +static int rtl_set_mac_address(struct net_device *dev, void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + rtl_rar_set(tp, dev->dev_addr); + + return 0; +} + +static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(ifr); + + return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV; +} + +static int rtl_xmii_ioctl(struct rtl8169_private *tp, + struct mii_ioctl_data *data, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = 32; /* Internal PHY */ + return 0; + + case SIOCGMIIREG: + data->val_out = rtl_readphy(tp, data->reg_num & 0x1f); + return 0; + + case SIOCSMIIREG: + rtl_writephy(tp, data->reg_num & 0x1f, data->val_in); + return 0; + } + return -EOPNOTSUPP; +} + +static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd) +{ + return -EOPNOTSUPP; +} + +static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp) +{ + if (tp->features & RTL_FEATURE_MSI) { + pci_disable_msi(pdev); + tp->features &= ~RTL_FEATURE_MSI; + } +} + +static void rtl_init_mdio_ops(struct rtl8169_private *tp) +{ + struct mdio_ops *ops = &tp->mdio_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + ops->write = r8168dp_1_mdio_write; + ops->read = r8168dp_1_mdio_read; + break; + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + ops->write = r8168dp_2_mdio_write; + ops->read = r8168dp_2_mdio_read; + break; + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + ops->write = r8168g_mdio_write; + ops->read = r8168g_mdio_read; + break; + default: + ops->write = r8169_mdio_write; + ops->read = r8169_mdio_read; + break; + } +} + +static void rtl_speed_down(struct rtl8169_private *tp) +{ + u32 adv; + int lpa; + + rtl_writephy(tp, 0x1f, 0x0000); + lpa = rtl_readphy(tp, MII_LPA); + + if (lpa & (LPA_10HALF | LPA_10FULL)) + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; + else if (lpa & (LPA_100HALF | LPA_100FULL)) + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; + else + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | + (tp->mii.supports_gmii ? + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full : 0); + + rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, + adv); +} + +static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_39: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + RTL_W32(RxConfig, RTL_R32(RxConfig) | + AcceptBroadcast | AcceptMulticast | AcceptMyPhys); + break; + default: + break; + } +} + +static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) +{ + if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) + return false; + + rtl_speed_down(tp); + rtl_wol_suspend_quirk(tp); + + return true; +} + +static void r810x_phy_power_down(struct rtl8169_private *tp) +{ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, MII_BMCR, BMCR_PDOWN); +} + +static void r810x_phy_power_up(struct rtl8169_private *tp) +{ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); +} + +static void r810x_pll_power_down(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if (rtl_wol_pll_power_down(tp)) + return; + + r810x_phy_power_down(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_16: + break; + default: + RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); + break; + } +} + +static void r810x_pll_power_up(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + r810x_phy_power_up(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_16: + break; + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0); + break; + default: + RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); + break; + } +} + +static void r8168_phy_power_up(struct rtl8169_private *tp) +{ + rtl_writephy(tp, 0x1f, 0x0000); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_18: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl_writephy(tp, 0x0e, 0x0000); + break; + default: + break; + } + rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); +} + +static void r8168_phy_power_down(struct rtl8169_private *tp) +{ + rtl_writephy(tp, 0x1f, 0x0000); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN); + break; + + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_18: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl_writephy(tp, 0x0e, 0x0200); + default: + rtl_writephy(tp, MII_BMCR, BMCR_PDOWN); + break; + } +} + +static void r8168_pll_power_down(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31 || + tp->mac_version == RTL_GIGA_MAC_VER_49 || + tp->mac_version == RTL_GIGA_MAC_VER_50 || + tp->mac_version == RTL_GIGA_MAC_VER_51) && + r8168_check_dash(tp)) { + return; + } + + if ((tp->mac_version == RTL_GIGA_MAC_VER_23 || + tp->mac_version == RTL_GIGA_MAC_VER_24) && + (RTL_R16(CPlusCmd) & ASF)) { + return; + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_32 || + tp->mac_version == RTL_GIGA_MAC_VER_33) + rtl_ephy_write(tp, 0x19, 0xff64); + + if (rtl_wol_pll_power_down(tp)) + return; + + r8168_phy_power_down(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); + break; + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_49: + rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000, + 0xfc000000, ERIAR_EXGMAC); + RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); + break; + } +} + +static void r8168_pll_power_up(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); + break; + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0); + break; + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_49: + RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0); + rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000, + 0x00000000, ERIAR_EXGMAC); + break; + } + + r8168_phy_power_up(tp); +} + +static void rtl_generic_op(struct rtl8169_private *tp, + void (*op)(struct rtl8169_private *)) +{ + if (op) + op(tp); +} + +static void rtl_pll_power_down(struct rtl8169_private *tp) +{ + rtl_generic_op(tp, tp->pll_power_ops.down); +} + +static void rtl_pll_power_up(struct rtl8169_private *tp) +{ + rtl_generic_op(tp, tp->pll_power_ops.up); +} + +static void rtl_init_pll_power_ops(struct rtl8169_private *tp) +{ + struct pll_power_ops *ops = &tp->pll_power_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_16: + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + ops->down = r810x_pll_power_down; + ops->up = r810x_pll_power_up; + break; + + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_18: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + ops->down = r8168_pll_power_down; + ops->up = r8168_pll_power_up; + break; + + default: + ops->down = NULL; + ops->up = NULL; + break; + } +} + +static void rtl_init_rxcfg(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01: + case RTL_GIGA_MAC_VER_02: + case RTL_GIGA_MAC_VER_03: + case RTL_GIGA_MAC_VER_04: + case RTL_GIGA_MAC_VER_05: + case RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_14: + case RTL_GIGA_MAC_VER_15: + case RTL_GIGA_MAC_VER_16: + case RTL_GIGA_MAC_VER_17: + RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_18: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_40: + RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; + default: + RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); + break; + } +} + +static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) +{ + tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0; +} + +static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + rtl_generic_op(tp, tp->jumbo_ops.enable); + RTL_W8(Cfg9346, Cfg9346_Lock); +} + +static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + rtl_generic_op(tp, tp->jumbo_ops.disable); + RTL_W8(Cfg9346, Cfg9346_Lock); +} + +static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1); + rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B); +} + +static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1); + rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); +} + +static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); +} + +static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); +} + +static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(MaxTxPacketSize, 0x3f); + RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) | 0x01); + rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B); +} + +static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(MaxTxPacketSize, 0x0c); + RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) & ~0x01); + rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); +} + +static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) +{ + rtl_tx_performance_tweak(tp->pci_dev, + PCI_EXP_DEVCTL_READRQ_512B | PCI_EXP_DEVCTL_NOSNOOP_EN); +} + +static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp) +{ + rtl_tx_performance_tweak(tp->pci_dev, + (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); +} + +static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + r8168b_0_hw_jumbo_enable(tp); + + RTL_W8(Config4, RTL_R8(Config4) | (1 << 0)); +} + +static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + r8168b_0_hw_jumbo_disable(tp); + + RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); +} + +static void rtl_init_jumbo_ops(struct rtl8169_private *tp) +{ + struct jumbo_ops *ops = &tp->jumbo_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + ops->disable = r8168b_0_hw_jumbo_disable; + ops->enable = r8168b_0_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + ops->disable = r8168b_1_hw_jumbo_disable; + ops->enable = r8168b_1_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + ops->disable = r8168c_hw_jumbo_disable; + ops->enable = r8168c_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + ops->disable = r8168dp_hw_jumbo_disable; + ops->enable = r8168dp_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_34: + ops->disable = r8168e_hw_jumbo_disable; + ops->enable = r8168e_hw_jumbo_enable; + break; + + /* + * No action needed for jumbo frames with 8169. + * No jumbo for 810x at all. + */ + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + default: + ops->disable = NULL; + ops->enable = NULL; + break; + } +} + +DECLARE_RTL_COND(rtl_chipcmd_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R8(ChipCmd) & CmdReset; +} + +static void rtl_hw_reset(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(ChipCmd, CmdReset); + + rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); +} + +static void rtl_request_uncached_firmware(struct rtl8169_private *tp) +{ + struct rtl_fw *rtl_fw; + const char *name; + int rc = -ENOMEM; + + name = rtl_lookup_firmware_name(tp); + if (!name) + goto out_no_firmware; + + rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL); + if (!rtl_fw) + goto err_warn; + + rc = reject_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev); + if (rc < 0) + goto err_free; + + rc = rtl_check_firmware(tp, rtl_fw); + if (rc < 0) + goto err_release_firmware; + + tp->rtl_fw = rtl_fw; +out: + return; + +err_release_firmware: + release_firmware(rtl_fw->fw); +err_free: + kfree(rtl_fw); +err_warn: + netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n", + name, rc); +out_no_firmware: + tp->rtl_fw = NULL; + goto out; +} + +static void rtl_request_firmware(struct rtl8169_private *tp) +{ + if (IS_ERR(tp->rtl_fw)) + rtl_request_uncached_firmware(tp); +} + +static void rtl_rx_close(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK); +} + +DECLARE_RTL_COND(rtl_npq_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R8(TxPoll) & NPQ; +} + +DECLARE_RTL_COND(rtl_txcfg_empty_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(TxConfig) & TXCFG_EMPTY; +} + +static void rtl8169_hw_reset(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + /* Disable interrupts */ + rtl8169_irq_mask_and_ack(tp); + + rtl_rx_close(tp); + + if (tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31) { + rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42); + } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 || + tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36 || + tp->mac_version == RTL_GIGA_MAC_VER_37 || + tp->mac_version == RTL_GIGA_MAC_VER_38 || + tp->mac_version == RTL_GIGA_MAC_VER_40 || + tp->mac_version == RTL_GIGA_MAC_VER_41 || + tp->mac_version == RTL_GIGA_MAC_VER_42 || + tp->mac_version == RTL_GIGA_MAC_VER_43 || + tp->mac_version == RTL_GIGA_MAC_VER_44 || + tp->mac_version == RTL_GIGA_MAC_VER_45 || + tp->mac_version == RTL_GIGA_MAC_VER_46 || + tp->mac_version == RTL_GIGA_MAC_VER_47 || + tp->mac_version == RTL_GIGA_MAC_VER_48 || + tp->mac_version == RTL_GIGA_MAC_VER_49 || + tp->mac_version == RTL_GIGA_MAC_VER_50 || + tp->mac_version == RTL_GIGA_MAC_VER_51) { + RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); + rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); + } else { + RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); + udelay(100); + } + + rtl_hw_reset(tp); +} + +static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + /* Set DMA burst size and Interframe Gap Time */ + RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) | + (InterFrameGap << TxInterFrameGapShift)); +} + +static void rtl_hw_start(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + tp->hw_start(dev); + + rtl_irq_enable_all(tp); +} + +static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp, + void __iomem *ioaddr) +{ + /* + * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh + * register to be written before TxDescAddrLow to work. + * Switching from MMIO to I/O access fixes the issue as well. + */ + RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32); + RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32)); + RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32); + RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); +} + +static u16 rtl_rw_cpluscmd(void __iomem *ioaddr) +{ + u16 cmd; + + cmd = RTL_R16(CPlusCmd); + RTL_W16(CPlusCmd, cmd); + return cmd; +} + +static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz) +{ + /* Low hurts. Let's disable the filtering. */ + RTL_W16(RxMaxSize, rx_buf_sz + 1); +} + +static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) +{ + static const struct rtl_cfg2_info { + u32 mac_version; + u32 clk; + u32 val; + } cfg2_info [] = { + { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd + { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff }, + { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe + { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff } + }; + const struct rtl_cfg2_info *p = cfg2_info; + unsigned int i; + u32 clk; + + clk = RTL_R8(Config2) & PCI_Clock_66MHz; + for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) { + if ((p->mac_version == mac_version) && (p->clk == clk)) { + RTL_W32(0x7c, p->val); + break; + } + } +} + +static void rtl_set_rx_mode(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 mc_filter[2]; /* Multicast hash filter */ + int rx_mode; + u32 tmp = 0; + + if (dev->flags & IFF_PROMISC) { + /* Unconditionally log net taps. */ + netif_notice(tp, link, dev, "Promiscuous mode enabled\n"); + rx_mode = + AcceptBroadcast | AcceptMulticast | AcceptMyPhys | + AcceptAllPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to filter perfectly -- accept all multicasts. */ + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else { + struct netdev_hw_addr *ha; + + rx_mode = AcceptBroadcast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + rx_mode |= AcceptMulticast; + } + } + + if (dev->features & NETIF_F_RXALL) + rx_mode |= (AcceptErr | AcceptRunt); + + tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode; + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) { + u32 data = mc_filter[0]; + + mc_filter[0] = swab32(mc_filter[1]); + mc_filter[1] = swab32(data); + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_35) + mc_filter[1] = mc_filter[0] = 0xffffffff; + + RTL_W32(MAR0 + 4, mc_filter[1]); + RTL_W32(MAR0 + 0, mc_filter[0]); + + RTL_W32(RxConfig, tmp); +} + +static void rtl_hw_start_8169(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + if (tp->mac_version == RTL_GIGA_MAC_VER_05) { + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW); + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08); + } + + RTL_W8(Cfg9346, Cfg9346_Unlock); + if (tp->mac_version == RTL_GIGA_MAC_VER_01 || + tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03 || + tp->mac_version == RTL_GIGA_MAC_VER_04) + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + + rtl_init_rxcfg(tp); + + RTL_W8(EarlyTxThres, NoEarlyTx); + + rtl_set_rx_max_size(ioaddr, rx_buf_sz); + + if (tp->mac_version == RTL_GIGA_MAC_VER_01 || + tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03 || + tp->mac_version == RTL_GIGA_MAC_VER_04) + rtl_set_rx_tx_config_registers(tp); + + tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; + + if (tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03) { + dprintk("Set MAC Reg C+CR Offset 0xe0. " + "Bit-3 and bit-14 MUST be 1\n"); + tp->cp_cmd |= (1 << 14); + } + + RTL_W16(CPlusCmd, tp->cp_cmd); + + rtl8169_set_magic_reg(ioaddr, tp->mac_version); + + /* + * Undocumented corner. Supposedly: + * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets + */ + RTL_W16(IntrMitigate, 0x0000); + + rtl_set_rx_tx_desc_registers(tp, ioaddr); + + if (tp->mac_version != RTL_GIGA_MAC_VER_01 && + tp->mac_version != RTL_GIGA_MAC_VER_02 && + tp->mac_version != RTL_GIGA_MAC_VER_03 && + tp->mac_version != RTL_GIGA_MAC_VER_04) { + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + rtl_set_rx_tx_config_registers(tp); + } + + RTL_W8(Cfg9346, Cfg9346_Lock); + + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ + RTL_R8(IntrMask); + + RTL_W32(RxMissed, 0); + + rtl_set_rx_mode(dev); + + /* no early-rx interrupts */ + RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); +} + +static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + if (tp->csi_ops.write) + tp->csi_ops.write(tp, addr, value); +} + +static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) +{ + return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0; +} + +static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits) +{ + u32 csi; + + csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; + rtl_csi_write(tp, 0x070c, csi | bits); +} + +static void rtl_csi_access_enable_1(struct rtl8169_private *tp) +{ + rtl_csi_access_enable(tp, 0x17000000); +} + +static void rtl_csi_access_enable_2(struct rtl8169_private *tp) +{ + rtl_csi_access_enable(tp, 0x27000000); +} + +DECLARE_RTL_COND(rtl_csiar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(CSIAR) & CSIAR_FLAG; +} + +static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIDR, value); + RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 r8169_csi_read(struct rtl8169_private *tp, int addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(CSIDR) : ~0; +} + +static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIDR, value); + RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT | + CSIAR_FUNC_NIC); + + rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 r8402_csi_read(struct rtl8169_private *tp, int addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(CSIDR) : ~0; +} + +static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIDR, value); + RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT | + CSIAR_FUNC_NIC2); + + rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 r8411_csi_read(struct rtl8169_private *tp, int addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(CSIDR) : ~0; +} + +static void rtl_init_csi_ops(struct rtl8169_private *tp) +{ + struct csi_ops *ops = &tp->csi_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01: + case RTL_GIGA_MAC_VER_02: + case RTL_GIGA_MAC_VER_03: + case RTL_GIGA_MAC_VER_04: + case RTL_GIGA_MAC_VER_05: + case RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_14: + case RTL_GIGA_MAC_VER_15: + case RTL_GIGA_MAC_VER_16: + case RTL_GIGA_MAC_VER_17: + ops->write = NULL; + ops->read = NULL; + break; + + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_38: + ops->write = r8402_csi_write; + ops->read = r8402_csi_read; + break; + + case RTL_GIGA_MAC_VER_44: + ops->write = r8411_csi_write; + ops->read = r8411_csi_read; + break; + + default: + ops->write = r8169_csi_write; + ops->read = r8169_csi_read; + break; + } +} + +struct ephy_info { + unsigned int offset; + u16 mask; + u16 bits; +}; + +static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e, + int len) +{ + u16 w; + + while (len-- > 0) { + w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits; + rtl_ephy_write(tp, e->offset, w); + e++; + } +} + +static void rtl_disable_clock_request(struct pci_dev *pdev) +{ + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_enable_clock_request(struct pci_dev *pdev) +{ + pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) +{ + void __iomem *ioaddr = tp->mmio_addr; + u8 data; + + data = RTL_R8(Config3); + + if (enable) + data |= Rdy_to_L23; + else + data &= ~Rdy_to_L23; + + RTL_W8(Config3, data); +} + +#define R8168_CPCMD_QUIRK_MASK (\ + EnableBist | \ + Mac_dbgo_oe | \ + Force_half_dup | \ + Force_rxflow_en | \ + Force_txflow_en | \ + Cxpl_dbg_sel | \ + ASF | \ + PktCntrDisable | \ + Mac_dbgo_sel) + +static void rtl_hw_start_8168bb(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + + if (tp->dev->mtu <= ETH_DATA_LEN) { + rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) | + PCI_EXP_DEVCTL_NOSNOOP_EN); + } +} + +static void rtl_hw_start_8168bef(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + rtl_hw_start_8168bb(tp); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); +} + +static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + RTL_W8(Config1, RTL_R8(Config1) | Speed_down); + + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_disable_clock_request(pdev); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); +} + +static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168cp[] = { + { 0x01, 0, 0x0001 }, + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0042 }, + { 0x06, 0x0080, 0x0000 }, + { 0x07, 0, 0x2000 } + }; + + rtl_csi_access_enable_2(tp); + + rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); + + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); +} + +static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); + + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + /* Magic. */ + RTL_W8(DBG_REG, 0x20); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); +} + +static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8168c_1[] = { + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0002 }, + { 0x06, 0x0080, 0x0000 } + }; + + rtl_csi_access_enable_2(tp); + + RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); + + rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1)); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_2[] = { + { 0x01, 0, 0x0001 }, + { 0x03, 0x0400, 0x0220 } + }; + + rtl_csi_access_enable_2(tp); + + rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_3(struct rtl8169_private *tp) +{ + rtl_hw_start_8168c_2(tp); +} + +static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) +{ + rtl_csi_access_enable_2(tp); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168d(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); + + rtl_disable_clock_request(pdev); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); +} + +static void rtl_hw_start_8168dp(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_1(tp); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_disable_clock_request(pdev); +} + +static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + static const struct ephy_info e_info_8168d_4[] = { + { 0x0b, ~0, 0x48 }, + { 0x19, 0x20, 0x50 }, + { 0x0c, ~0, 0x20 } + }; + int i; + + rtl_csi_access_enable_1(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) { + const struct ephy_info *e = e_info_8168d_4 + i; + u16 w; + + w = rtl_ephy_read(tp, e->offset); + rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits); + } + + rtl_enable_clock_request(pdev); +} + +static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + static const struct ephy_info e_info_8168e_1[] = { + { 0x00, 0x0200, 0x0100 }, + { 0x00, 0x0000, 0x0004 }, + { 0x06, 0x0002, 0x0001 }, + { 0x06, 0x0000, 0x0030 }, + { 0x07, 0x0000, 0x2000 }, + { 0x00, 0x0000, 0x0020 }, + { 0x03, 0x5800, 0x2000 }, + { 0x03, 0x0000, 0x0001 }, + { 0x01, 0x0800, 0x1000 }, + { 0x07, 0x0000, 0x4000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x19, 0xffff, 0xfe6c }, + { 0x0a, 0x0000, 0x0040 } + }; + + rtl_csi_access_enable_2(tp); + + rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_disable_clock_request(pdev); + + /* Reset tx FIFO pointer */ + RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST); + RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST); + + RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + static const struct ephy_info e_info_8168e_2[] = { + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 } + }; + + rtl_csi_access_enable_1(tp); + + rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); + + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_disable_clock_request(pdev); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + + RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); + RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); + RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168f(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC); + + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_disable_clock_request(pdev); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); + RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); + RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x08, 0x0001, 0x0002 }, + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 } + }; + + rtl_hw_start_8168f(tp); + + rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); + + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); +} + +static void rtl_hw_start_8411(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x0f, 0xffff, 0x5200 }, + { 0x1e, 0x0000, 0x4000 }, + { 0x19, 0x0000, 0x0224 } + }; + + rtl_hw_start_8168f(tp); + rtl_pcie_state_l2l3_enable(tp, false); + + rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); + + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC); +} + +static void rtl_hw_start_8168g(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + + rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + + rtl_csi_access_enable_1(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC); + + RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + + rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); + + rtl_pcie_state_l2l3_enable(tp, false); +} + +static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8168g_1[] = { + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x37d0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8000, 0x0000 } + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1)); +} + +static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8168g_2[] = { + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + { 0x19, 0xffff, 0xfc00 }, + { 0x1e, 0xffff, 0x20eb } + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2)); +} + +static void rtl_hw_start_8411_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8411_2[] = { + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + { 0x0f, 0xffff, 0x5200 }, + { 0x19, 0x0020, 0x0000 }, + { 0x1e, 0x0000, 0x2000 } + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2)); +} + +static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + u16 rg_saw_cnt; + u32 data; + static const struct ephy_info e_info_8168h_1[] = { + { 0x1e, 0x0800, 0x0001 }, + { 0x1d, 0x0000, 0x0800 }, + { 0x05, 0xffff, 0x2089 }, + { 0x06, 0xffff, 0x5881 }, + { 0x04, 0xffff, 0x154a }, + { 0x01, 0xffff, 0x068b } + }; + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1)); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + + rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + + rtl_csi_access_enable_1(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_1111, 0x0010, 0x00, ERIAR_EXGMAC); + + rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f00, 0x00, ERIAR_EXGMAC); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); + + RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); + RTL_W8(DLLPR, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN); + + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); + + rtl_pcie_state_l2l3_enable(tp, false); + + rtl_writephy(tp, 0x1f, 0x0c42); + rg_saw_cnt = rtl_readphy(tp, 0x13); + rtl_writephy(tp, 0x1f, 0x0000); + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = 16000000/rg_saw_cnt; + sw_cnt_1ms_ini &= 0x0fff; + data = r8168_mac_ocp_read(tp, 0xd412); + data &= 0x0fff; + data |= sw_cnt_1ms_ini; + r8168_mac_ocp_write(tp, 0xd412, data); + } + + data = r8168_mac_ocp_read(tp, 0xe056); + data &= 0xf0; + data |= 0x07; + r8168_mac_ocp_write(tp, 0xe056, data); + + data = r8168_mac_ocp_read(tp, 0xe052); + data &= 0x8008; + data |= 0x6000; + r8168_mac_ocp_write(tp, 0xe052, data); + + data = r8168_mac_ocp_read(tp, 0xe0d6); + data &= 0x01ff; + data |= 0x017f; + r8168_mac_ocp_write(tp, 0xe0d6, data); + + data = r8168_mac_ocp_read(tp, 0xd420); + data &= 0x0fff; + data |= 0x047f; + r8168_mac_ocp_write(tp, 0xd420, data); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); +} + +static void rtl_hw_start_8168ep(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl8168ep_stop_cmac(tp); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + + rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + + rtl_csi_access_enable_1(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + + rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f80, 0x00, ERIAR_EXGMAC); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); + + RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + + rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); + + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN); + + rtl_pcie_state_l2l3_enable(tp, false); +} + +static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8168ep_1[] = { + { 0x00, 0xffff, 0x10ab }, + { 0x06, 0xffff, 0xf030 }, + { 0x08, 0xffff, 0x2006 }, + { 0x0d, 0xffff, 0x1666 }, + { 0x0c, 0x3ff0, 0x0000 } + }; + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1)); + + rtl_hw_start_8168ep(tp); +} + +static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8168ep_2[] = { + { 0x00, 0xffff, 0x10a3 }, + { 0x19, 0xffff, 0xfc00 }, + { 0x1e, 0xffff, 0x20ea } + }; + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2)); + + rtl_hw_start_8168ep(tp); + + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); + RTL_W8(DLLPR, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); +} + +static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + u32 data; + static const struct ephy_info e_info_8168ep_3[] = { + { 0x00, 0xffff, 0x10a3 }, + { 0x19, 0xffff, 0x7c00 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x0d, 0xffff, 0x1666 } + }; + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3)); + + rtl_hw_start_8168ep(tp); + + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); + RTL_W8(DLLPR, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); + + data = r8168_mac_ocp_read(tp, 0xd3e2); + data &= 0xf000; + data |= 0x0271; + r8168_mac_ocp_write(tp, 0xd3e2, data); + + data = r8168_mac_ocp_read(tp, 0xd3e4); + data &= 0xff00; + r8168_mac_ocp_write(tp, 0xd3e4, data); + + data = r8168_mac_ocp_read(tp, 0xe860); + data |= 0x0080; + r8168_mac_ocp_write(tp, 0xe860, data); +} + +static void rtl_hw_start_8168(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_set_rx_max_size(ioaddr, rx_buf_sz); + + tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1; + + RTL_W16(CPlusCmd, tp->cp_cmd); + + RTL_W16(IntrMitigate, 0x5151); + + /* Work around for RxFIFO overflow. */ + if (tp->mac_version == RTL_GIGA_MAC_VER_11) { + tp->event_slow |= RxFIFOOver | PCSTimeout; + tp->event_slow &= ~RxOverflow; + } + + rtl_set_rx_tx_desc_registers(tp, ioaddr); + + rtl_set_rx_tx_config_registers(tp); + + RTL_R8(IntrMask); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + rtl_hw_start_8168bb(tp); + break; + + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + rtl_hw_start_8168bef(tp); + break; + + case RTL_GIGA_MAC_VER_18: + rtl_hw_start_8168cp_1(tp); + break; + + case RTL_GIGA_MAC_VER_19: + rtl_hw_start_8168c_1(tp); + break; + + case RTL_GIGA_MAC_VER_20: + rtl_hw_start_8168c_2(tp); + break; + + case RTL_GIGA_MAC_VER_21: + rtl_hw_start_8168c_3(tp); + break; + + case RTL_GIGA_MAC_VER_22: + rtl_hw_start_8168c_4(tp); + break; + + case RTL_GIGA_MAC_VER_23: + rtl_hw_start_8168cp_2(tp); + break; + + case RTL_GIGA_MAC_VER_24: + rtl_hw_start_8168cp_3(tp); + break; + + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + rtl_hw_start_8168d(tp); + break; + + case RTL_GIGA_MAC_VER_28: + rtl_hw_start_8168d_4(tp); + break; + + case RTL_GIGA_MAC_VER_31: + rtl_hw_start_8168dp(tp); + break; + + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + rtl_hw_start_8168e_1(tp); + break; + case RTL_GIGA_MAC_VER_34: + rtl_hw_start_8168e_2(tp); + break; + + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + rtl_hw_start_8168f_1(tp); + break; + + case RTL_GIGA_MAC_VER_38: + rtl_hw_start_8411(tp); + break; + + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + rtl_hw_start_8168g_1(tp); + break; + case RTL_GIGA_MAC_VER_42: + rtl_hw_start_8168g_2(tp); + break; + + case RTL_GIGA_MAC_VER_44: + rtl_hw_start_8411_2(tp); + break; + + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + rtl_hw_start_8168h_1(tp); + break; + + case RTL_GIGA_MAC_VER_49: + rtl_hw_start_8168ep_1(tp); + break; + + case RTL_GIGA_MAC_VER_50: + rtl_hw_start_8168ep_2(tp); + break; + + case RTL_GIGA_MAC_VER_51: + rtl_hw_start_8168ep_3(tp); + break; + + default: + printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", + dev->name, tp->mac_version); + break; + } + + RTL_W8(Cfg9346, Cfg9346_Lock); + + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + + rtl_set_rx_mode(dev); + + RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); +} + +#define R810X_CPCMD_QUIRK_MASK (\ + EnableBist | \ + Mac_dbgo_oe | \ + Force_half_dup | \ + Force_rxflow_en | \ + Force_txflow_en | \ + Cxpl_dbg_sel | \ + ASF | \ + PktCntrDisable | \ + Mac_dbgo_sel) + +static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + static const struct ephy_info e_info_8102e_1[] = { + { 0x01, 0, 0x6e65 }, + { 0x02, 0, 0x091f }, + { 0x03, 0, 0xc2f9 }, + { 0x06, 0, 0xafb5 }, + { 0x07, 0, 0x0e00 }, + { 0x19, 0, 0xec80 }, + { 0x01, 0, 0x2e65 }, + { 0x01, 0, 0x6e65 } + }; + u8 cfg1; + + rtl_csi_access_enable_2(tp); + + RTL_W8(DBG_REG, FIX_NAK_1); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(Config1, + LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + cfg1 = RTL_R8(Config1); + if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) + RTL_W8(Config1, cfg1 & ~LEDS0); + + rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); +} + +static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8102e_3(struct rtl8169_private *tp) +{ + rtl_hw_start_8102e_2(tp); + + rtl_ephy_write(tp, 0x03, 0xc2f9); +} + +static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8105e_1[] = { + { 0x07, 0, 0x4000 }, + { 0x19, 0, 0x0200 }, + { 0x19, 0, 0x0020 }, + { 0x1e, 0, 0x2000 }, + { 0x03, 0, 0x0001 }, + { 0x19, 0, 0x0100 }, + { 0x19, 0, 0x0004 }, + { 0x0a, 0, 0x0020 } + }; + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + + /* Disable Early Tally Counter */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000); + + RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); + + rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); + + rtl_pcie_state_l2l3_enable(tp, false); +} + +static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) +{ + rtl_hw_start_8105e_1(tp); + rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000); +} + +static void rtl_hw_start_8402(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8402[] = { + { 0x19, 0xffff, 0xff64 }, + { 0x1e, 0, 0x4000 } + }; + + rtl_csi_access_enable_2(tp); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + + rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); + + rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC); + + rtl_pcie_state_l2l3_enable(tp, false); +} + +static void rtl_hw_start_8106(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + + RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); + RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); + + rtl_pcie_state_l2l3_enable(tp, false); +} + +static void rtl_hw_start_8101(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + if (tp->mac_version >= RTL_GIGA_MAC_VER_30) + tp->event_slow &= ~RxFIFOOver; + + if (tp->mac_version == RTL_GIGA_MAC_VER_13 || + tp->mac_version == RTL_GIGA_MAC_VER_16) + pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_NOSNOOP_EN); + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_set_rx_max_size(ioaddr, rx_buf_sz); + + tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK; + RTL_W16(CPlusCmd, tp->cp_cmd); + + rtl_set_rx_tx_desc_registers(tp, ioaddr); + + rtl_set_rx_tx_config_registers(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + rtl_hw_start_8102e_1(tp); + break; + + case RTL_GIGA_MAC_VER_08: + rtl_hw_start_8102e_3(tp); + break; + + case RTL_GIGA_MAC_VER_09: + rtl_hw_start_8102e_2(tp); + break; + + case RTL_GIGA_MAC_VER_29: + rtl_hw_start_8105e_1(tp); + break; + case RTL_GIGA_MAC_VER_30: + rtl_hw_start_8105e_2(tp); + break; + + case RTL_GIGA_MAC_VER_37: + rtl_hw_start_8402(tp); + break; + + case RTL_GIGA_MAC_VER_39: + rtl_hw_start_8106(tp); + break; + case RTL_GIGA_MAC_VER_43: + rtl_hw_start_8168g_2(tp); + break; + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + rtl_hw_start_8168h_1(tp); + break; + } + + RTL_W8(Cfg9346, Cfg9346_Lock); + + RTL_W16(IntrMitigate, 0x0000); + + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + + rtl_set_rx_mode(dev); + + RTL_R8(IntrMask); + + RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); +} + +static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (new_mtu < ETH_ZLEN || + new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max) + return -EINVAL; + + if (new_mtu > ETH_DATA_LEN) + rtl_hw_jumbo_enable(tp); + else + rtl_hw_jumbo_disable(tp); + + dev->mtu = new_mtu; + netdev_update_features(dev); + + return 0; +} + +static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc) +{ + desc->addr = cpu_to_le64(0x0badbadbadbadbadull); + desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask); +} + +static void rtl8169_free_rx_databuff(struct rtl8169_private *tp, + void **data_buff, struct RxDesc *desc) +{ + dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz, + DMA_FROM_DEVICE); + + kfree(*data_buff); + *data_buff = NULL; + rtl8169_make_unusable_by_asic(desc); +} + +static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz) +{ + u32 eor = le32_to_cpu(desc->opts1) & RingEnd; + + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + + desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz); +} + +static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, + u32 rx_buf_sz) +{ + desc->addr = cpu_to_le64(mapping); + rtl8169_mark_to_asic(desc, rx_buf_sz); +} + +static inline void *rtl8169_align(void *data) +{ + return (void *)ALIGN((long)data, 16); +} + +static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp, + struct RxDesc *desc) +{ + void *data; + dma_addr_t mapping; + struct device *d = &tp->pci_dev->dev; + struct net_device *dev = tp->dev; + int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; + + data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node); + if (!data) + return NULL; + + if (rtl8169_align(data) != data) { + kfree(data); + data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node); + if (!data) + return NULL; + } + + mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + if (net_ratelimit()) + netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n"); + goto err_out; + } + + rtl8169_map_to_asic(desc, mapping, rx_buf_sz); + return data; + +err_out: + kfree(data); + return NULL; +} + +static void rtl8169_rx_clear(struct rtl8169_private *tp) +{ + unsigned int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + if (tp->Rx_databuff[i]) { + rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i, + tp->RxDescArray + i); + } + } +} + +static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc) +{ + desc->opts1 |= cpu_to_le32(RingEnd); +} + +static int rtl8169_rx_fill(struct rtl8169_private *tp) +{ + unsigned int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + void *data; + + if (tp->Rx_databuff[i]) + continue; + + data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i); + if (!data) { + rtl8169_make_unusable_by_asic(tp->RxDescArray + i); + goto err_out; + } + tp->Rx_databuff[i] = data; + } + + rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1); + return 0; + +err_out: + rtl8169_rx_clear(tp); + return -ENOMEM; +} + +static int rtl8169_init_ring(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_init_ring_indexes(tp); + + memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); + memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *)); + + return rtl8169_rx_fill(tp); +} + +static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb, + struct TxDesc *desc) +{ + unsigned int len = tx_skb->len; + + dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE); + + desc->opts1 = 0x00; + desc->opts2 = 0x00; + desc->addr = 0x00; + tx_skb->len = 0; +} + +static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, + unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; i++) { + unsigned int entry = (start + i) % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + unsigned int len = tx_skb->len; + + if (len) { + struct sk_buff *skb = tx_skb->skb; + + rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, + tp->TxDescArray + entry); + if (skb) { + tp->dev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + tx_skb->skb = NULL; + } + } + } +} + +static void rtl8169_tx_clear(struct rtl8169_private *tp) +{ + rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); + tp->cur_tx = tp->dirty_tx = 0; +} + +static void rtl_reset_work(struct rtl8169_private *tp) +{ + struct net_device *dev = tp->dev; + int i; + + napi_disable(&tp->napi); + netif_stop_queue(dev); + synchronize_sched(); + + rtl8169_hw_reset(tp); + + for (i = 0; i < NUM_RX_DESC; i++) + rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz); + + rtl8169_tx_clear(tp); + rtl8169_init_ring_indexes(tp); + + napi_enable(&tp->napi); + rtl_hw_start(dev); + netif_wake_queue(dev); + rtl8169_check_link_status(dev, tp, tp->mmio_addr); +} + +static void rtl8169_tx_timeout(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, + u32 *opts) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int cur_frag, entry; + struct TxDesc *uninitialized_var(txd); + struct device *d = &tp->pci_dev->dev; + + entry = tp->cur_tx; + for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { + const skb_frag_t *frag = info->frags + cur_frag; + dma_addr_t mapping; + u32 status, len; + void *addr; + + entry = (entry + 1) % NUM_TX_DESC; + + txd = tp->TxDescArray + entry; + len = skb_frag_size(frag); + addr = skb_frag_address(frag); + mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + if (net_ratelimit()) + netif_err(tp, drv, tp->dev, + "Failed to map TX fragments DMA!\n"); + goto err_out; + } + + /* Anti gcc 2.95.3 bugware (sic) */ + status = opts[0] | len | + (RingEnd * !((entry + 1) % NUM_TX_DESC)); + + txd->opts1 = cpu_to_le32(status); + txd->opts2 = cpu_to_le32(opts[1]); + txd->addr = cpu_to_le64(mapping); + + tp->tx_skb[entry].len = len; + } + + if (cur_frag) { + tp->tx_skb[entry].skb = skb; + txd->opts1 |= cpu_to_le32(LastFrag); + } + + return cur_frag; + +err_out: + rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag); + return -EIO; +} + +static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb) +{ + return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34; +} + +static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + struct net_device *dev); +/* r8169_csum_workaround() + * The hw limites the value the transport offset. When the offset is out of the + * range, calculate the checksum by sw. + */ +static void r8169_csum_workaround(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + if (skb_shinfo(skb)->gso_size) { + netdev_features_t features = tp->dev->features; + struct sk_buff *segs, *nskb; + + features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6); + segs = skb_gso_segment(skb, features); + if (IS_ERR(segs) || !segs) + goto drop; + + do { + nskb = segs; + segs = segs->next; + nskb->next = NULL; + rtl8169_start_xmit(nskb, tp->dev); + } while (segs); + + dev_consume_skb_any(skb); + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb_checksum_help(skb) < 0) + goto drop; + + rtl8169_start_xmit(skb, tp->dev); + } else { + struct net_device_stats *stats; + +drop: + stats = &tp->dev->stats; + stats->tx_dropped++; + dev_kfree_skb_any(skb); + } +} + +/* msdn_giant_send_check() + * According to the document of microsoft, the TCP Pseudo Header excludes the + * packet length for IPv6 TCP large packets. + */ +static int msdn_giant_send_check(struct sk_buff *skb) +{ + const struct ipv6hdr *ipv6h; + struct tcphdr *th; + int ret; + + ret = skb_cow_head(skb, 0); + if (ret) + return ret; + + ipv6h = ipv6_hdr(skb); + th = tcp_hdr(skb); + + th->check = 0; + th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0); + + return ret; +} + +static inline __be16 get_protocol(struct sk_buff *skb) +{ + __be16 protocol; + + if (skb->protocol == htons(ETH_P_8021Q)) + protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; + else + protocol = skb->protocol; + + return protocol; +} + +static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp, + struct sk_buff *skb, u32 *opts) +{ + u32 mss = skb_shinfo(skb)->gso_size; + + if (mss) { + opts[0] |= TD_LSO; + opts[0] |= min(mss, TD_MSS_MAX) << TD0_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ip = ip_hdr(skb); + + if (ip->protocol == IPPROTO_TCP) + opts[0] |= TD0_IP_CS | TD0_TCP_CS; + else if (ip->protocol == IPPROTO_UDP) + opts[0] |= TD0_IP_CS | TD0_UDP_CS; + else + WARN_ON_ONCE(1); + } + + return true; +} + +static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, + struct sk_buff *skb, u32 *opts) +{ + u32 transport_offset = (u32)skb_transport_offset(skb); + u32 mss = skb_shinfo(skb)->gso_size; + + if (mss) { + if (transport_offset > GTTCPHO_MAX) { + netif_warn(tp, tx_err, tp->dev, + "Invalid transport offset 0x%x for TSO\n", + transport_offset); + return false; + } + + switch (get_protocol(skb)) { + case htons(ETH_P_IP): + opts[0] |= TD1_GTSENV4; + break; + + case htons(ETH_P_IPV6): + if (msdn_giant_send_check(skb)) + return false; + + opts[0] |= TD1_GTSENV6; + break; + + default: + WARN_ON_ONCE(1); + break; + } + + opts[0] |= transport_offset << GTTCPHO_SHIFT; + opts[1] |= min(mss, TD_MSS_MAX) << TD1_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 ip_protocol; + + if (unlikely(rtl_test_hw_pad_bug(tp, skb))) + return !(skb_checksum_help(skb) || eth_skb_pad(skb)); + + if (transport_offset > TCPHO_MAX) { + netif_warn(tp, tx_err, tp->dev, + "Invalid transport offset 0x%x\n", + transport_offset); + return false; + } + + switch (get_protocol(skb)) { + case htons(ETH_P_IP): + opts[1] |= TD1_IPv4_CS; + ip_protocol = ip_hdr(skb)->protocol; + break; + + case htons(ETH_P_IPV6): + opts[1] |= TD1_IPv6_CS; + ip_protocol = ipv6_hdr(skb)->nexthdr; + break; + + default: + ip_protocol = IPPROTO_RAW; + break; + } + + if (ip_protocol == IPPROTO_TCP) + opts[1] |= TD1_TCP_CS; + else if (ip_protocol == IPPROTO_UDP) + opts[1] |= TD1_UDP_CS; + else + WARN_ON_ONCE(1); + + opts[1] |= transport_offset << TCPHO_SHIFT; + } else { + if (unlikely(rtl_test_hw_pad_bug(tp, skb))) + return !eth_skb_pad(skb); + } + + return true; +} + +static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + unsigned int entry = tp->cur_tx % NUM_TX_DESC; + struct TxDesc *txd = tp->TxDescArray + entry; + void __iomem *ioaddr = tp->mmio_addr; + struct device *d = &tp->pci_dev->dev; + dma_addr_t mapping; + u32 status, len; + u32 opts[2]; + int frags; + + if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { + netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); + goto err_stop_0; + } + + if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) + goto err_stop_0; + + opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb)); + opts[0] = DescOwn; + + if (!tp->tso_csum(tp, skb, opts)) { + r8169_csum_workaround(tp, skb); + return NETDEV_TX_OK; + } + + len = skb_headlen(skb); + mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + if (net_ratelimit()) + netif_err(tp, drv, dev, "Failed to map TX DMA!\n"); + goto err_dma_0; + } + + tp->tx_skb[entry].len = len; + txd->addr = cpu_to_le64(mapping); + + frags = rtl8169_xmit_frags(tp, skb, opts); + if (frags < 0) + goto err_dma_1; + else if (frags) + opts[0] |= FirstFrag; + else { + opts[0] |= FirstFrag | LastFrag; + tp->tx_skb[entry].skb = skb; + } + + txd->opts2 = cpu_to_le32(opts[1]); + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + + /* Anti gcc 2.95.3 bugware (sic) */ + status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); + txd->opts1 = cpu_to_le32(status); + + /* Force all memory writes to complete before notifying device */ + wmb(); + + tp->cur_tx += frags + 1; + + RTL_W8(TxPoll, NPQ); + + mmiowb(); + + if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { + /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must + * not miss a ring update when it notices a stopped queue. + */ + smp_wmb(); + netif_stop_queue(dev); + /* Sync with rtl_tx: + * - publish queue status and cur_tx ring index (write barrier) + * - refresh dirty_tx ring index (read barrier). + * May the current thread have a pessimistic view of the ring + * status and forget to wake up queue, a racing rtl_tx thread + * can't. + */ + smp_mb(); + if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) + netif_wake_queue(dev); + } + + return NETDEV_TX_OK; + +err_dma_1: + rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd); +err_dma_0: + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + +err_stop_0: + netif_stop_queue(dev); + dev->stats.tx_dropped++; + return NETDEV_TX_BUSY; +} + +static void rtl8169_pcierr_interrupt(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + u16 pci_status, pci_cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + pci_read_config_word(pdev, PCI_STATUS, &pci_status); + + netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n", + pci_cmd, pci_status); + + /* + * The recovery sequence below admits a very elaborated explanation: + * - it seems to work; + * - I did not see what else could be done; + * - it makes iop3xx happy. + * + * Feel free to adjust to your needs. + */ + if (pdev->broken_parity_status) + pci_cmd &= ~PCI_COMMAND_PARITY; + else + pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY; + + pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); + + pci_write_config_word(pdev, PCI_STATUS, + pci_status & (PCI_STATUS_DETECTED_PARITY | + PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT | + PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT)); + + /* The infamous DAC f*ckup only happens at boot time */ + if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) { + void __iomem *ioaddr = tp->mmio_addr; + + netif_info(tp, intr, dev, "disabling PCI DAC\n"); + tp->cp_cmd &= ~PCIDAC; + RTL_W16(CPlusCmd, tp->cp_cmd); + dev->features &= ~NETIF_F_HIGHDMA; + } + + rtl8169_hw_reset(tp); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) +{ + unsigned int dirty_tx, tx_left; + + dirty_tx = tp->dirty_tx; + smp_rmb(); + tx_left = tp->cur_tx - dirty_tx; + + while (tx_left > 0) { + unsigned int entry = dirty_tx % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + u32 status; + + status = le32_to_cpu(tp->TxDescArray[entry].opts1); + if (status & DescOwn) + break; + + /* This barrier is needed to keep us from reading + * any other fields out of the Tx descriptor until + * we know the status of DescOwn + */ + dma_rmb(); + + rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, + tp->TxDescArray + entry); + if (status & LastFrag) { + u64_stats_update_begin(&tp->tx_stats.syncp); + tp->tx_stats.packets++; + tp->tx_stats.bytes += tx_skb->skb->len; + u64_stats_update_end(&tp->tx_stats.syncp); + dev_kfree_skb_any(tx_skb->skb); + tx_skb->skb = NULL; + } + dirty_tx++; + tx_left--; + } + + if (tp->dirty_tx != dirty_tx) { + tp->dirty_tx = dirty_tx; + /* Sync with rtl8169_start_xmit: + * - publish dirty_tx ring index (write barrier) + * - refresh cur_tx ring index and queue status (read barrier) + * May the current thread miss the stopped queue condition, + * a racing xmit thread can only have a right view of the + * ring status. + */ + smp_mb(); + if (netif_queue_stopped(dev) && + TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { + netif_wake_queue(dev); + } + /* + * 8168 hack: TxPoll requests are lost when the Tx packets are + * too close. Let's kick an extra TxPoll request when a burst + * of start_xmit activity is detected (if it is not detected, + * it is slow enough). -- FR + */ + if (tp->cur_tx != dirty_tx) { + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(TxPoll, NPQ); + } + } +} + +static inline int rtl8169_fragmented_frame(u32 status) +{ + return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); +} + +static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) +{ + u32 status = opts1 & RxProtoMask; + + if (((status == RxProtoTCP) && !(opts1 & TCPFail)) || + ((status == RxProtoUDP) && !(opts1 & UDPFail))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); +} + +static struct sk_buff *rtl8169_try_rx_copy(void *data, + struct rtl8169_private *tp, + int pkt_size, + dma_addr_t addr) +{ + struct sk_buff *skb; + struct device *d = &tp->pci_dev->dev; + + data = rtl8169_align(data); + dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); + prefetch(data); + skb = napi_alloc_skb(&tp->napi, pkt_size); + if (skb) + memcpy(skb->data, data, pkt_size); + dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); + + return skb; +} + +static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget) +{ + unsigned int cur_rx, rx_left; + unsigned int count; + + cur_rx = tp->cur_rx; + + for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) { + unsigned int entry = cur_rx % NUM_RX_DESC; + struct RxDesc *desc = tp->RxDescArray + entry; + u32 status; + + status = le32_to_cpu(desc->opts1) & tp->opts1_mask; + if (status & DescOwn) + break; + + /* This barrier is needed to keep us from reading + * any other fields out of the Rx descriptor until + * we know the status of DescOwn + */ + dma_rmb(); + + if (unlikely(status & RxRES)) { + netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n", + status); + dev->stats.rx_errors++; + if (status & (RxRWT | RxRUNT)) + dev->stats.rx_length_errors++; + if (status & RxCRC) + dev->stats.rx_crc_errors++; + if (status & RxFOVF) { + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); + dev->stats.rx_fifo_errors++; + } + if ((status & (RxRUNT | RxCRC)) && + !(status & (RxRWT | RxFOVF)) && + (dev->features & NETIF_F_RXALL)) + goto process_pkt; + } else { + struct sk_buff *skb; + dma_addr_t addr; + int pkt_size; + +process_pkt: + addr = le64_to_cpu(desc->addr); + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size = (status & 0x00003fff) - 4; + else + pkt_size = status & 0x00003fff; + + /* + * The driver does not support incoming fragmented + * frames. They are seen as a symptom of over-mtu + * sized frames. + */ + if (unlikely(rtl8169_fragmented_frame(status))) { + dev->stats.rx_dropped++; + dev->stats.rx_length_errors++; + goto release_descriptor; + } + + skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry], + tp, pkt_size, addr); + if (!skb) { + dev->stats.rx_dropped++; + goto release_descriptor; + } + + rtl8169_rx_csum(skb, status); + skb_put(skb, pkt_size); + skb->protocol = eth_type_trans(skb, dev); + + rtl8169_rx_vlan_tag(desc, skb); + + napi_gro_receive(&tp->napi, skb); + + u64_stats_update_begin(&tp->rx_stats.syncp); + tp->rx_stats.packets++; + tp->rx_stats.bytes += pkt_size; + u64_stats_update_end(&tp->rx_stats.syncp); + } +release_descriptor: + desc->opts2 = 0; + rtl8169_mark_to_asic(desc, rx_buf_sz); + } + + count = cur_rx - tp->cur_rx; + tp->cur_rx = cur_rx; + + return count; +} + +static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct rtl8169_private *tp = netdev_priv(dev); + int handled = 0; + u16 status; + + status = rtl_get_events(tp); + if (status && status != 0xffff) { + status &= RTL_EVENT_NAPI | tp->event_slow; + if (status) { + handled = 1; + + rtl_irq_disable(tp); + napi_schedule(&tp->napi); + } + } + return IRQ_RETVAL(handled); +} + +/* + * Workqueue context. + */ +static void rtl_slow_event_work(struct rtl8169_private *tp) +{ + struct net_device *dev = tp->dev; + u16 status; + + status = rtl_get_events(tp) & tp->event_slow; + rtl_ack_events(tp, status); + + if (unlikely(status & RxFIFOOver)) { + switch (tp->mac_version) { + /* Work around for rx fifo overflow */ + case RTL_GIGA_MAC_VER_11: + netif_stop_queue(dev); + /* XXX - Hack alert. See rtl_task(). */ + set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags); + default: + break; + } + } + + if (unlikely(status & SYSErr)) + rtl8169_pcierr_interrupt(dev); + + if (status & LinkChg) + __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); + + rtl_irq_enable_all(tp); +} + +static void rtl_task(struct work_struct *work) +{ + static const struct { + int bitnr; + void (*action)(struct rtl8169_private *); + } rtl_work[] = { + /* XXX - keep rtl_slow_event_work() as first element. */ + { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work }, + { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work }, + { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work } + }; + struct rtl8169_private *tp = + container_of(work, struct rtl8169_private, wk.work); + struct net_device *dev = tp->dev; + int i; + + rtl_lock_work(tp); + + if (!netif_running(dev) || + !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) + goto out_unlock; + + for (i = 0; i < ARRAY_SIZE(rtl_work); i++) { + bool pending; + + pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags); + if (pending) + rtl_work[i].action(tp); + } + +out_unlock: + rtl_unlock_work(tp); +} + +static int rtl8169_poll(struct napi_struct *napi, int budget) +{ + struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); + struct net_device *dev = tp->dev; + u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow; + int work_done= 0; + u16 status; + + status = rtl_get_events(tp); + rtl_ack_events(tp, status & ~tp->event_slow); + + if (status & RTL_EVENT_NAPI_RX) + work_done = rtl_rx(dev, tp, (u32) budget); + + if (status & RTL_EVENT_NAPI_TX) + rtl_tx(dev, tp); + + if (status & tp->event_slow) { + enable_mask &= ~tp->event_slow; + + rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING); + } + + if (work_done < budget) { + napi_complete(napi); + + rtl_irq_enable(tp, enable_mask); + mmiowb(); + } + + return work_done; +} + +static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) + return; + + dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff); + RTL_W32(RxMissed, 0); +} + +static void rtl8169_down(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + + del_timer_sync(&tp->timer); + + napi_disable(&tp->napi); + netif_stop_queue(dev); + + rtl8169_hw_reset(tp); + /* + * At this point device interrupts can not be enabled in any function, + * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task) + * and napi is disabled (rtl8169_poll). + */ + rtl8169_rx_missed(dev, ioaddr); + + /* Give a racing hard_start_xmit a few cycles to complete. */ + synchronize_sched(); + + rtl8169_tx_clear(tp); + + rtl8169_rx_clear(tp); + + rtl_pll_power_down(tp); +} + +static int rtl8169_close(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + pm_runtime_get_sync(&pdev->dev); + + /* Update counters before going down */ + rtl8169_update_counters(dev); + + rtl_lock_work(tp); + clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + + rtl8169_down(dev); + rtl_unlock_work(tp); + + cancel_work_sync(&tp->wk.work); + + free_irq(pdev->irq, dev); + + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + tp->RxDescArray = NULL; + + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void rtl8169_netpoll(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_interrupt(tp->pci_dev->irq, dev); +} +#endif + +static int rtl_open(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + int retval = -ENOMEM; + + pm_runtime_get_sync(&pdev->dev); + + /* + * Rx and Tx descriptors needs 256 bytes alignment. + * dma_alloc_coherent provides more. + */ + tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, + &tp->TxPhyAddr, GFP_KERNEL); + if (!tp->TxDescArray) + goto err_pm_runtime_put; + + tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, + &tp->RxPhyAddr, GFP_KERNEL); + if (!tp->RxDescArray) + goto err_free_tx_0; + + retval = rtl8169_init_ring(dev); + if (retval < 0) + goto err_free_rx_1; + + INIT_WORK(&tp->wk.work, rtl_task); + + smp_mb(); + + rtl_request_firmware(tp); + + retval = request_irq(pdev->irq, rtl8169_interrupt, + (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED, + dev->name, dev); + if (retval < 0) + goto err_release_fw_2; + + rtl_lock_work(tp); + + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + + napi_enable(&tp->napi); + + rtl8169_init_phy(dev, tp); + + __rtl8169_set_features(dev, dev->features); + + rtl_pll_power_up(tp); + + rtl_hw_start(dev); + + netif_start_queue(dev); + + rtl_unlock_work(tp); + + tp->saved_wolopts = 0; + pm_runtime_put_noidle(&pdev->dev); + + rtl8169_check_link_status(dev, tp, ioaddr); +out: + return retval; + +err_release_fw_2: + rtl_release_firmware(tp); + rtl8169_rx_clear(tp); +err_free_rx_1: + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + tp->RxDescArray = NULL; +err_free_tx_0: + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; +err_pm_runtime_put: + pm_runtime_put_noidle(&pdev->dev); + goto out; +} + +static struct rtnl_link_stats64 * +rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned int start; + + if (netif_running(dev)) + rtl8169_rx_missed(dev, ioaddr); + + do { + start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp); + stats->rx_packets = tp->rx_stats.packets; + stats->rx_bytes = tp->rx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start)); + + + do { + start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp); + stats->tx_packets = tp->tx_stats.packets; + stats->tx_bytes = tp->tx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); + + stats->rx_dropped = dev->stats.rx_dropped; + stats->tx_dropped = dev->stats.tx_dropped; + stats->rx_length_errors = dev->stats.rx_length_errors; + stats->rx_errors = dev->stats.rx_errors; + stats->rx_crc_errors = dev->stats.rx_crc_errors; + stats->rx_fifo_errors = dev->stats.rx_fifo_errors; + stats->rx_missed_errors = dev->stats.rx_missed_errors; + + return stats; +} + +static void rtl8169_net_suspend(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (!netif_running(dev)) + return; + + netif_device_detach(dev); + netif_stop_queue(dev); + + rtl_lock_work(tp); + napi_disable(&tp->napi); + clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl_unlock_work(tp); + + rtl_pll_power_down(tp); +} + +#ifdef CONFIG_PM + +static int rtl8169_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + + rtl8169_net_suspend(dev); + + return 0; +} + +static void __rtl8169_resume(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + netif_device_attach(dev); + + rtl_pll_power_up(tp); + + rtl_lock_work(tp); + napi_enable(&tp->napi); + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl_unlock_work(tp); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static int rtl8169_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_init_phy(dev, tp); + + if (netif_running(dev)) + __rtl8169_resume(dev); + + return 0; +} + +static int rtl8169_runtime_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if (!tp->TxDescArray) + return 0; + + rtl_lock_work(tp); + tp->saved_wolopts = __rtl8169_get_wol(tp); + __rtl8169_set_wol(tp, WAKE_ANY); + rtl_unlock_work(tp); + + rtl8169_net_suspend(dev); + + return 0; +} + +static int rtl8169_runtime_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if (!tp->TxDescArray) + return 0; + + rtl_lock_work(tp); + __rtl8169_set_wol(tp, tp->saved_wolopts); + tp->saved_wolopts = 0; + rtl_unlock_work(tp); + + rtl8169_init_phy(dev, tp); + + __rtl8169_resume(dev); + + return 0; +} + +static int rtl8169_runtime_idle(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + return tp->TxDescArray ? -EBUSY : 0; +} + +static const struct dev_pm_ops rtl8169_pm_ops = { + .suspend = rtl8169_suspend, + .resume = rtl8169_resume, + .freeze = rtl8169_suspend, + .thaw = rtl8169_resume, + .poweroff = rtl8169_suspend, + .restore = rtl8169_resume, + .runtime_suspend = rtl8169_runtime_suspend, + .runtime_resume = rtl8169_runtime_resume, + .runtime_idle = rtl8169_runtime_idle, +}; + +#define RTL8169_PM_OPS (&rtl8169_pm_ops) + +#else /* !CONFIG_PM */ + +#define RTL8169_PM_OPS NULL + +#endif /* !CONFIG_PM */ + +static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + /* WoL fails with 8168b when the receiver is disabled. */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + pci_clear_master(tp->pci_dev); + + RTL_W8(ChipCmd, CmdRxEnb); + /* PCI commit */ + RTL_R8(ChipCmd); + break; + default: + break; + } +} + +static void rtl_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = &pdev->dev; + + pm_runtime_get_sync(d); + + rtl8169_net_suspend(dev); + + /* Restore original MAC address */ + rtl_rar_set(tp, dev->perm_addr); + + rtl8169_hw_reset(tp); + + if (system_state == SYSTEM_POWER_OFF) { + if (__rtl8169_get_wol(tp) & WAKE_ANY) { + rtl_wol_suspend_quirk(tp); + rtl_wol_shutdown_quirk(tp); + } + + pci_wake_from_d3(pdev, true); + pci_set_power_state(pdev, PCI_D3hot); + } + + pm_runtime_put_noidle(d); +} + +static void rtl_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31 || + tp->mac_version == RTL_GIGA_MAC_VER_49 || + tp->mac_version == RTL_GIGA_MAC_VER_50 || + tp->mac_version == RTL_GIGA_MAC_VER_51) && + r8168_check_dash(tp)) { + rtl8168_driver_stop(tp); + } + + netif_napi_del(&tp->napi); + + unregister_netdev(dev); + + rtl_release_firmware(tp); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + /* restore original MAC address */ + rtl_rar_set(tp, dev->perm_addr); + + rtl_disable_msi(pdev, tp); + rtl8169_release_board(pdev, dev, tp->mmio_addr); +} + +static const struct net_device_ops rtl_netdev_ops = { + .ndo_open = rtl_open, + .ndo_stop = rtl8169_close, + .ndo_get_stats64 = rtl8169_get_stats64, + .ndo_start_xmit = rtl8169_start_xmit, + .ndo_tx_timeout = rtl8169_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = rtl8169_change_mtu, + .ndo_fix_features = rtl8169_fix_features, + .ndo_set_features = rtl8169_set_features, + .ndo_set_mac_address = rtl_set_mac_address, + .ndo_do_ioctl = rtl8169_ioctl, + .ndo_set_rx_mode = rtl_set_rx_mode, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8169_netpoll, +#endif + +}; + +static const struct rtl_cfg_info { + void (*hw_start)(struct net_device *); + unsigned int region; + unsigned int align; + u16 event_slow; + unsigned features; + u8 default_ver; +} rtl_cfg_infos [] = { + [RTL_CFG_0] = { + .hw_start = rtl_hw_start_8169, + .region = 1, + .align = 0, + .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver, + .features = RTL_FEATURE_GMII, + .default_ver = RTL_GIGA_MAC_VER_01, + }, + [RTL_CFG_1] = { + .hw_start = rtl_hw_start_8168, + .region = 2, + .align = 8, + .event_slow = SYSErr | LinkChg | RxOverflow, + .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, + .default_ver = RTL_GIGA_MAC_VER_11, + }, + [RTL_CFG_2] = { + .hw_start = rtl_hw_start_8101, + .region = 2, + .align = 8, + .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver | + PCSTimeout, + .features = RTL_FEATURE_MSI, + .default_ver = RTL_GIGA_MAC_VER_13, + } +}; + +/* Cfg9346_Unlock assumed. */ +static unsigned rtl_try_msi(struct rtl8169_private *tp, + const struct rtl_cfg_info *cfg) +{ + void __iomem *ioaddr = tp->mmio_addr; + unsigned msi = 0; + u8 cfg2; + + cfg2 = RTL_R8(Config2) & ~MSIEnable; + if (cfg->features & RTL_FEATURE_MSI) { + if (pci_enable_msi(tp->pci_dev)) { + netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n"); + } else { + cfg2 |= MSIEnable; + msi = RTL_FEATURE_MSI; + } + } + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + RTL_W8(Config2, cfg2); + return msi; +} + +DECLARE_RTL_COND(rtl_link_list_ready_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R8(MCU) & LINK_LIST_RDY; +} + +DECLARE_RTL_COND(rtl_rxtx_empty_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY; +} + +static void rtl_hw_init_8168g(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + u32 data; + + tp->ocp_base = OCP_STD_PHY_BASE; + + RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN); + + if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42)) + return; + + if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42)) + return; + + RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + + data = r8168_mac_ocp_read(tp, 0xe8de); + data &= ~(1 << 14); + r8168_mac_ocp_write(tp, 0xe8de, data); + + if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42)) + return; + + data = r8168_mac_ocp_read(tp, 0xe8de); + data |= (1 << 15); + r8168_mac_ocp_write(tp, 0xe8de, data); + + if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42)) + return; +} + +static void rtl_hw_init_8168ep(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + rtl_hw_init_8168g(tp); +} + +static void rtl_hw_initialize(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + rtl_hw_init_8168g(tp); + break; + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + rtl_hw_init_8168ep(tp); + break; + default: + break; + } +} + +static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; + const unsigned int region = cfg->region; + struct rtl8169_private *tp; + struct mii_if_info *mii; + struct net_device *dev; + void __iomem *ioaddr; + int chipset, i; + int rc; + + if (netif_msg_drv(&debug)) { + printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", + MODULENAME, RTL8169_VERSION); + } + + dev = alloc_etherdev(sizeof (*tp)); + if (!dev) { + rc = -ENOMEM; + goto out; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + dev->netdev_ops = &rtl_netdev_ops; + tp = netdev_priv(dev); + tp->dev = dev; + tp->pci_dev = pdev; + tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); + + mii = &tp->mii; + mii->dev = dev; + mii->mdio_read = rtl_mdio_read; + mii->mdio_write = rtl_mdio_write; + mii->phy_id_mask = 0x1f; + mii->reg_num_mask = 0x1f; + mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); + + /* disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pci_enable_device(pdev); + if (rc < 0) { + netif_err(tp, probe, dev, "enable failure\n"); + goto err_out_free_dev_1; + } + + if (pci_set_mwi(pdev) < 0) + netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n"); + + /* make sure PCI base addr 1 is MMIO */ + if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { + netif_err(tp, probe, dev, + "region #%d not an MMIO resource, aborting\n", + region); + rc = -ENODEV; + goto err_out_mwi_2; + } + + /* check for weird/broken PCI region reporting */ + if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { + netif_err(tp, probe, dev, + "Invalid PCI region size(s), aborting\n"); + rc = -ENODEV; + goto err_out_mwi_2; + } + + rc = pci_request_regions(pdev, MODULENAME); + if (rc < 0) { + netif_err(tp, probe, dev, "could not request regions\n"); + goto err_out_mwi_2; + } + + tp->cp_cmd = 0; + + if ((sizeof(dma_addr_t) > 4) && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { + tp->cp_cmd |= PCIDAC; + dev->features |= NETIF_F_HIGHDMA; + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc < 0) { + netif_err(tp, probe, dev, "DMA configuration failed\n"); + goto err_out_free_res_3; + } + } + + /* ioremap MMIO region */ + ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); + if (!ioaddr) { + netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); + rc = -EIO; + goto err_out_free_res_3; + } + tp->mmio_addr = ioaddr; + + if (!pci_is_pcie(pdev)) + netif_info(tp, probe, dev, "not PCI Express\n"); + + /* Identify chip attached to board */ + rtl8169_get_mac_version(tp, dev, cfg->default_ver); + + rtl_init_rxcfg(tp); + + rtl_irq_disable(tp); + + rtl_hw_initialize(tp); + + rtl_hw_reset(tp); + + rtl_ack_events(tp, 0xffff); + + pci_set_master(pdev); + + rtl_init_mdio_ops(tp); + rtl_init_pll_power_ops(tp); + rtl_init_jumbo_ops(tp); + rtl_init_csi_ops(tp); + + rtl8169_print_mac_version(tp); + + chipset = tp->mac_version; + tp->txd_version = rtl_chip_infos[chipset].txd_version; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(Config1, RTL_R8(Config1) | PMEnable); + RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus)); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2) + tp->features |= RTL_FEATURE_WOL; + if ((RTL_R8(Config3) & LinkUp) != 0) + tp->features |= RTL_FEATURE_WOL; + break; + default: + if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) + tp->features |= RTL_FEATURE_WOL; + break; + } + if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) + tp->features |= RTL_FEATURE_WOL; + tp->features |= rtl_try_msi(tp, cfg); + RTL_W8(Cfg9346, Cfg9346_Lock); + + if (rtl_tbi_enabled(tp)) { + tp->set_speed = rtl8169_set_speed_tbi; + tp->get_settings = rtl8169_gset_tbi; + tp->phy_reset_enable = rtl8169_tbi_reset_enable; + tp->phy_reset_pending = rtl8169_tbi_reset_pending; + tp->link_ok = rtl8169_tbi_link_ok; + tp->do_ioctl = rtl_tbi_ioctl; + } else { + tp->set_speed = rtl8169_set_speed_xmii; + tp->get_settings = rtl8169_gset_xmii; + tp->phy_reset_enable = rtl8169_xmii_reset_enable; + tp->phy_reset_pending = rtl8169_xmii_reset_pending; + tp->link_ok = rtl8169_xmii_link_ok; + tp->do_ioctl = rtl_xmii_ioctl; + } + + mutex_init(&tp->wk.mutex); + u64_stats_init(&tp->rx_stats.syncp); + u64_stats_init(&tp->tx_stats.syncp); + + /* Get MAC address */ + if (tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36 || + tp->mac_version == RTL_GIGA_MAC_VER_37 || + tp->mac_version == RTL_GIGA_MAC_VER_38 || + tp->mac_version == RTL_GIGA_MAC_VER_40 || + tp->mac_version == RTL_GIGA_MAC_VER_41 || + tp->mac_version == RTL_GIGA_MAC_VER_42 || + tp->mac_version == RTL_GIGA_MAC_VER_43 || + tp->mac_version == RTL_GIGA_MAC_VER_44 || + tp->mac_version == RTL_GIGA_MAC_VER_45 || + tp->mac_version == RTL_GIGA_MAC_VER_46 || + tp->mac_version == RTL_GIGA_MAC_VER_47 || + tp->mac_version == RTL_GIGA_MAC_VER_48 || + tp->mac_version == RTL_GIGA_MAC_VER_49 || + tp->mac_version == RTL_GIGA_MAC_VER_50 || + tp->mac_version == RTL_GIGA_MAC_VER_51) { + u16 mac_addr[3]; + + *(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC); + *(u16 *)&mac_addr[2] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC); + + if (is_valid_ether_addr((u8 *)mac_addr)) + rtl_rar_set(tp, (u8 *)mac_addr); + } + for (i = 0; i < ETH_ALEN; i++) + dev->dev_addr[i] = RTL_R8(MAC0 + i); + + dev->ethtool_ops = &rtl8169_ethtool_ops; + dev->watchdog_timeo = RTL8169_TX_TIMEOUT; + + netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); + + /* don't enable SG, IP_CSUM and TSO by default - it might not work + * properly for all devices */ + dev->features |= NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_HIGHDMA; + + tp->cp_cmd |= RxChkSum | RxVlan; + + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + /* Disallow toggling */ + dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (tp->txd_version == RTL_TD_0) + tp->tso_csum = rtl8169_tso_csum_v1; + else if (tp->txd_version == RTL_TD_1) { + tp->tso_csum = rtl8169_tso_csum_v2; + dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + } else + WARN_ON_ONCE(1); + + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; + + tp->hw_start = cfg->hw_start; + tp->event_slow = cfg->event_slow; + + tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? + ~(RxBOVF | RxFOVF) : ~0; + + init_timer(&tp->timer); + tp->timer.data = (unsigned long) dev; + tp->timer.function = rtl8169_phy_timer; + + tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; + + rc = register_netdev(dev); + if (rc < 0) + goto err_out_msi_4; + + pci_set_drvdata(pdev, dev); + + netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n", + rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr, + (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq); + if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) { + netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, " + "tx checksumming: %s]\n", + rtl_chip_infos[chipset].jumbo_max, + rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko"); + } + + if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31 || + tp->mac_version == RTL_GIGA_MAC_VER_49 || + tp->mac_version == RTL_GIGA_MAC_VER_50 || + tp->mac_version == RTL_GIGA_MAC_VER_51) && + r8168_check_dash(tp)) { + rtl8168_driver_start(tp); + } + + device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_noidle(&pdev->dev); + + netif_carrier_off(dev); + +out: + return rc; + +err_out_msi_4: + netif_napi_del(&tp->napi); + rtl_disable_msi(pdev, tp); + iounmap(ioaddr); +err_out_free_res_3: + pci_release_regions(pdev); +err_out_mwi_2: + pci_clear_mwi(pdev); + pci_disable_device(pdev); +err_out_free_dev_1: + free_netdev(dev); + goto out; +} + +static struct pci_driver rtl8169_pci_driver = { + .name = MODULENAME, + .id_table = rtl8169_pci_tbl, + .probe = rtl_init_one, + .remove = rtl_remove_one, + .shutdown = rtl_shutdown, + .driver.pm = RTL8169_PM_OPS, +}; + +module_pci_driver(rtl8169_pci_driver); diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig new file mode 100644 index 000000000..196e98a2d --- /dev/null +++ b/drivers/net/ethernet/renesas/Kconfig @@ -0,0 +1,17 @@ +# +# Renesas device configuration +# + +config SH_ETH + tristate "Renesas SuperH Ethernet support" + depends on HAS_DMA + depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST + select CRC32 + select MII + select MDIO_BITBANG + select PHYLIB + ---help--- + Renesas SuperH Ethernet device driver. + This driver supporting CPUs are: + - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757, + R8A7740, R8A777x and R8A779x. diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile new file mode 100644 index 000000000..1c278a8e0 --- /dev/null +++ b/drivers/net/ethernet/renesas/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Renesas device drivers. +# + +obj-$(CONFIG_SH_ETH) += sh_eth.o diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c new file mode 100644 index 000000000..7fb244f56 --- /dev/null +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -0,0 +1,3332 @@ +/* SuperH Ethernet device driver + * + * Copyright (C) 2014 Renesas Electronics Corporation + * Copyright (C) 2006-2012 Nobuhiro Iwamatsu + * Copyright (C) 2008-2014 Renesas Solutions Corp. + * Copyright (C) 2013-2014 Cogent Embedded, Inc. + * Copyright (C) 2014 Codethink Limited + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sh_eth.h" + +#define SH_ETH_DEF_MSG_ENABLE \ + (NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_RX_ERR| \ + NETIF_MSG_TX_ERR) + +#define SH_ETH_OFFSET_DEFAULTS \ + [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID + +static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + + [EDSR] = 0x0000, + [EDMR] = 0x0400, + [EDTRR] = 0x0408, + [EDRRR] = 0x0410, + [EESR] = 0x0428, + [EESIPR] = 0x0430, + [TDLAR] = 0x0010, + [TDFAR] = 0x0014, + [TDFXR] = 0x0018, + [TDFFR] = 0x001c, + [RDLAR] = 0x0030, + [RDFAR] = 0x0034, + [RDFXR] = 0x0038, + [RDFFR] = 0x003c, + [TRSCER] = 0x0438, + [RMFCR] = 0x0440, + [TFTR] = 0x0448, + [FDR] = 0x0450, + [RMCR] = 0x0458, + [RPADIR] = 0x0460, + [FCFTR] = 0x0468, + [CSMR] = 0x04E4, + + [ECMR] = 0x0500, + [ECSR] = 0x0510, + [ECSIPR] = 0x0518, + [PIR] = 0x0520, + [PSR] = 0x0528, + [PIPR] = 0x052c, + [RFLR] = 0x0508, + [APR] = 0x0554, + [MPR] = 0x0558, + [PFTCR] = 0x055c, + [PFRCR] = 0x0560, + [TPAUSER] = 0x0564, + [GECMR] = 0x05b0, + [BCULR] = 0x05b4, + [MAHR] = 0x05c0, + [MALR] = 0x05c8, + [TROCR] = 0x0700, + [CDCR] = 0x0708, + [LCCR] = 0x0710, + [CEFCR] = 0x0740, + [FRECR] = 0x0748, + [TSFRCR] = 0x0750, + [TLFRCR] = 0x0758, + [RFCR] = 0x0760, + [CERCR] = 0x0768, + [CEECR] = 0x0770, + [MAFCR] = 0x0778, + [RMII_MII] = 0x0790, + + [ARSTR] = 0x0000, + [TSU_CTRST] = 0x0004, + [TSU_FWEN0] = 0x0010, + [TSU_FWEN1] = 0x0014, + [TSU_FCM] = 0x0018, + [TSU_BSYSL0] = 0x0020, + [TSU_BSYSL1] = 0x0024, + [TSU_PRISL0] = 0x0028, + [TSU_PRISL1] = 0x002c, + [TSU_FWSL0] = 0x0030, + [TSU_FWSL1] = 0x0034, + [TSU_FWSLC] = 0x0038, + [TSU_QTAG0] = 0x0040, + [TSU_QTAG1] = 0x0044, + [TSU_FWSR] = 0x0050, + [TSU_FWINMK] = 0x0054, + [TSU_ADQT0] = 0x0048, + [TSU_ADQT1] = 0x004c, + [TSU_VTAG0] = 0x0058, + [TSU_VTAG1] = 0x005c, + [TSU_ADSBSY] = 0x0060, + [TSU_TEN] = 0x0064, + [TSU_POST1] = 0x0070, + [TSU_POST2] = 0x0074, + [TSU_POST3] = 0x0078, + [TSU_POST4] = 0x007c, + [TSU_ADRH0] = 0x0100, + + [TXNLCR0] = 0x0080, + [TXALCR0] = 0x0084, + [RXNLCR0] = 0x0088, + [RXALCR0] = 0x008c, + [FWNLCR0] = 0x0090, + [FWALCR0] = 0x0094, + [TXNLCR1] = 0x00a0, + [TXALCR1] = 0x00a0, + [RXNLCR1] = 0x00a8, + [RXALCR1] = 0x00ac, + [FWNLCR1] = 0x00b0, + [FWALCR1] = 0x00b4, +}; + +static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + + [EDSR] = 0x0000, + [EDMR] = 0x0400, + [EDTRR] = 0x0408, + [EDRRR] = 0x0410, + [EESR] = 0x0428, + [EESIPR] = 0x0430, + [TDLAR] = 0x0010, + [TDFAR] = 0x0014, + [TDFXR] = 0x0018, + [TDFFR] = 0x001c, + [RDLAR] = 0x0030, + [RDFAR] = 0x0034, + [RDFXR] = 0x0038, + [RDFFR] = 0x003c, + [TRSCER] = 0x0438, + [RMFCR] = 0x0440, + [TFTR] = 0x0448, + [FDR] = 0x0450, + [RMCR] = 0x0458, + [RPADIR] = 0x0460, + [FCFTR] = 0x0468, + [CSMR] = 0x04E4, + + [ECMR] = 0x0500, + [RFLR] = 0x0508, + [ECSR] = 0x0510, + [ECSIPR] = 0x0518, + [PIR] = 0x0520, + [APR] = 0x0554, + [MPR] = 0x0558, + [PFTCR] = 0x055c, + [PFRCR] = 0x0560, + [TPAUSER] = 0x0564, + [MAHR] = 0x05c0, + [MALR] = 0x05c8, + [CEFCR] = 0x0740, + [FRECR] = 0x0748, + [TSFRCR] = 0x0750, + [TLFRCR] = 0x0758, + [RFCR] = 0x0760, + [MAFCR] = 0x0778, + + [ARSTR] = 0x0000, + [TSU_CTRST] = 0x0004, + [TSU_VTAG0] = 0x0058, + [TSU_ADSBSY] = 0x0060, + [TSU_TEN] = 0x0064, + [TSU_ADRH0] = 0x0100, + + [TXNLCR0] = 0x0080, + [TXALCR0] = 0x0084, + [RXNLCR0] = 0x0088, + [RXALCR0] = 0x008C, +}; + +static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + + [ECMR] = 0x0300, + [RFLR] = 0x0308, + [ECSR] = 0x0310, + [ECSIPR] = 0x0318, + [PIR] = 0x0320, + [PSR] = 0x0328, + [RDMLR] = 0x0340, + [IPGR] = 0x0350, + [APR] = 0x0354, + [MPR] = 0x0358, + [RFCF] = 0x0360, + [TPAUSER] = 0x0364, + [TPAUSECR] = 0x0368, + [MAHR] = 0x03c0, + [MALR] = 0x03c8, + [TROCR] = 0x03d0, + [CDCR] = 0x03d4, + [LCCR] = 0x03d8, + [CNDCR] = 0x03dc, + [CEFCR] = 0x03e4, + [FRECR] = 0x03e8, + [TSFRCR] = 0x03ec, + [TLFRCR] = 0x03f0, + [RFCR] = 0x03f4, + [MAFCR] = 0x03f8, + + [EDMR] = 0x0200, + [EDTRR] = 0x0208, + [EDRRR] = 0x0210, + [TDLAR] = 0x0218, + [RDLAR] = 0x0220, + [EESR] = 0x0228, + [EESIPR] = 0x0230, + [TRSCER] = 0x0238, + [RMFCR] = 0x0240, + [TFTR] = 0x0248, + [FDR] = 0x0250, + [RMCR] = 0x0258, + [TFUCR] = 0x0264, + [RFOCR] = 0x0268, + [RMIIMODE] = 0x026c, + [FCFTR] = 0x0270, + [TRIMD] = 0x027c, +}; + +static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + + [ECMR] = 0x0100, + [RFLR] = 0x0108, + [ECSR] = 0x0110, + [ECSIPR] = 0x0118, + [PIR] = 0x0120, + [PSR] = 0x0128, + [RDMLR] = 0x0140, + [IPGR] = 0x0150, + [APR] = 0x0154, + [MPR] = 0x0158, + [TPAUSER] = 0x0164, + [RFCF] = 0x0160, + [TPAUSECR] = 0x0168, + [BCFRR] = 0x016c, + [MAHR] = 0x01c0, + [MALR] = 0x01c8, + [TROCR] = 0x01d0, + [CDCR] = 0x01d4, + [LCCR] = 0x01d8, + [CNDCR] = 0x01dc, + [CEFCR] = 0x01e4, + [FRECR] = 0x01e8, + [TSFRCR] = 0x01ec, + [TLFRCR] = 0x01f0, + [RFCR] = 0x01f4, + [MAFCR] = 0x01f8, + [RTRATE] = 0x01fc, + + [EDMR] = 0x0000, + [EDTRR] = 0x0008, + [EDRRR] = 0x0010, + [TDLAR] = 0x0018, + [RDLAR] = 0x0020, + [EESR] = 0x0028, + [EESIPR] = 0x0030, + [TRSCER] = 0x0038, + [RMFCR] = 0x0040, + [TFTR] = 0x0048, + [FDR] = 0x0050, + [RMCR] = 0x0058, + [TFUCR] = 0x0064, + [RFOCR] = 0x0068, + [FCFTR] = 0x0070, + [RPADIR] = 0x0078, + [TRIMD] = 0x007c, + [RBWAR] = 0x00c8, + [RDFAR] = 0x00cc, + [TBRAR] = 0x00d4, + [TDFAR] = 0x00d8, +}; + +static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { + SH_ETH_OFFSET_DEFAULTS, + + [EDMR] = 0x0000, + [EDTRR] = 0x0004, + [EDRRR] = 0x0008, + [TDLAR] = 0x000c, + [RDLAR] = 0x0010, + [EESR] = 0x0014, + [EESIPR] = 0x0018, + [TRSCER] = 0x001c, + [RMFCR] = 0x0020, + [TFTR] = 0x0024, + [FDR] = 0x0028, + [RMCR] = 0x002c, + [EDOCR] = 0x0030, + [FCFTR] = 0x0034, + [RPADIR] = 0x0038, + [TRIMD] = 0x003c, + [RBWAR] = 0x0040, + [RDFAR] = 0x0044, + [TBRAR] = 0x004c, + [TDFAR] = 0x0050, + + [ECMR] = 0x0160, + [ECSR] = 0x0164, + [ECSIPR] = 0x0168, + [PIR] = 0x016c, + [MAHR] = 0x0170, + [MALR] = 0x0174, + [RFLR] = 0x0178, + [PSR] = 0x017c, + [TROCR] = 0x0180, + [CDCR] = 0x0184, + [LCCR] = 0x0188, + [CNDCR] = 0x018c, + [CEFCR] = 0x0194, + [FRECR] = 0x0198, + [TSFRCR] = 0x019c, + [TLFRCR] = 0x01a0, + [RFCR] = 0x01a4, + [MAFCR] = 0x01a8, + [IPGR] = 0x01b4, + [APR] = 0x01b8, + [MPR] = 0x01bc, + [TPAUSER] = 0x01c4, + [BCFR] = 0x01cc, + + [ARSTR] = 0x0000, + [TSU_CTRST] = 0x0004, + [TSU_FWEN0] = 0x0010, + [TSU_FWEN1] = 0x0014, + [TSU_FCM] = 0x0018, + [TSU_BSYSL0] = 0x0020, + [TSU_BSYSL1] = 0x0024, + [TSU_PRISL0] = 0x0028, + [TSU_PRISL1] = 0x002c, + [TSU_FWSL0] = 0x0030, + [TSU_FWSL1] = 0x0034, + [TSU_FWSLC] = 0x0038, + [TSU_QTAGM0] = 0x0040, + [TSU_QTAGM1] = 0x0044, + [TSU_ADQT0] = 0x0048, + [TSU_ADQT1] = 0x004c, + [TSU_FWSR] = 0x0050, + [TSU_FWINMK] = 0x0054, + [TSU_ADSBSY] = 0x0060, + [TSU_TEN] = 0x0064, + [TSU_POST1] = 0x0070, + [TSU_POST2] = 0x0074, + [TSU_POST3] = 0x0078, + [TSU_POST4] = 0x007c, + + [TXNLCR0] = 0x0080, + [TXALCR0] = 0x0084, + [RXNLCR0] = 0x0088, + [RXALCR0] = 0x008c, + [FWNLCR0] = 0x0090, + [FWALCR0] = 0x0094, + [TXNLCR1] = 0x00a0, + [TXALCR1] = 0x00a0, + [RXNLCR1] = 0x00a8, + [RXALCR1] = 0x00ac, + [FWNLCR1] = 0x00b0, + [FWALCR1] = 0x00b4, + + [TSU_ADRH0] = 0x0100, +}; + +static void sh_eth_rcv_snd_disable(struct net_device *ndev); +static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev); + +static bool sh_eth_is_gether(struct sh_eth_private *mdp) +{ + return mdp->reg_offset == sh_eth_offset_gigabit; +} + +static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp) +{ + return mdp->reg_offset == sh_eth_offset_fast_rz; +} + +static void sh_eth_select_mii(struct net_device *ndev) +{ + u32 value = 0x0; + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->phy_interface) { + case PHY_INTERFACE_MODE_GMII: + value = 0x2; + break; + case PHY_INTERFACE_MODE_MII: + value = 0x1; + break; + case PHY_INTERFACE_MODE_RMII: + value = 0x0; + break; + default: + netdev_warn(ndev, + "PHY interface mode was not setup. Set to MII.\n"); + value = 0x1; + break; + } + + sh_eth_write(ndev, value, RMII_MII); +} + +static void sh_eth_set_duplex(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (mdp->duplex) /* Full */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); + else /* Half */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); +} + +/* There is CPU dependent code */ +static void sh_eth_set_rate_r8a777x(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR); + break; + default: + break; + } +} + +/* R8A7778/9 */ +static struct sh_eth_cpu_data r8a777x_data = { + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_r8a777x, + + .register_type = SH_ETH_REG_FAST_RCAR, + + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, + .eesipr_value = 0x01ff009f, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | + EESR_ECI, + .fdr_value = 0x00000f0f, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, +}; + +/* R8A7790/1 */ +static struct sh_eth_cpu_data r8a779x_data = { + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_r8a777x, + + .register_type = SH_ETH_REG_FAST_RCAR, + + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, + .eesipr_value = 0x01ff009f, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | + EESR_ECI, + .fdr_value = 0x00000f0f, + + .trscer_err_mask = DESC_I_RINT8, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .rmiimode = 1, +}; + +static void sh_eth_set_rate_sh7724(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); + break; + default: + break; + } +} + +/* SH7724 */ +static struct sh_eth_cpu_data sh7724_data = { + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_sh7724, + + .register_type = SH_ETH_REG_FAST_SH4, + + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, + .eesipr_value = 0x01ff009f, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | + EESR_ECI, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .rpadir = 1, + .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ +}; + +static void sh_eth_set_rate_sh7757(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, 0, RTRATE); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, 1, RTRATE); + break; + default: + break; + } +} + +/* SH7757 */ +static struct sh_eth_cpu_data sh7757_data = { + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_sh7757, + + .register_type = SH_ETH_REG_FAST_SH4, + + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | + EESR_ECI, + + .irq_flags = IRQF_SHARED, + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .no_ade = 1, + .rpadir = 1, + .rpadir_value = 2 << 16, + .rtrate = 1, +}; + +#define SH_GIGA_ETH_BASE 0xfee00000UL +#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8) +#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) +static void sh_eth_chip_reset_giga(struct net_device *ndev) +{ + int i; + u32 mahr[2], malr[2]; + + /* save MAHR and MALR */ + for (i = 0; i < 2; i++) { + malr[i] = ioread32((void *)GIGA_MALR(i)); + mahr[i] = ioread32((void *)GIGA_MAHR(i)); + } + + /* reset device */ + iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800)); + mdelay(1); + + /* restore MAHR and MALR */ + for (i = 0; i < 2; i++) { + iowrite32(malr[i], (void *)GIGA_MALR(i)); + iowrite32(mahr[i], (void *)GIGA_MAHR(i)); + } +} + +static void sh_eth_set_rate_giga(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, 0x00000000, GECMR); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, 0x00000010, GECMR); + break; + case 1000: /* 1000BASE */ + sh_eth_write(ndev, 0x00000020, GECMR); + break; + default: + break; + } +} + +/* SH7757(GETHERC) */ +static struct sh_eth_cpu_data sh7757_data_giga = { + .chip_reset = sh_eth_chip_reset_giga, + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_giga, + + .register_type = SH_ETH_REG_GIGABIT, + + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE | EESR_ECI, + .fdr_value = 0x0000072f, + + .irq_flags = IRQF_SHARED, + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .rpadir = 1, + .rpadir_value = 2 << 16, + .no_trimd = 1, + .no_ade = 1, + .tsu = 1, +}; + +static void sh_eth_chip_reset(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + /* reset device */ + sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); + mdelay(1); +} + +static void sh_eth_set_rate_gether(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, GECMR_10, GECMR); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, GECMR_100, GECMR); + break; + case 1000: /* 1000BASE */ + sh_eth_write(ndev, GECMR_1000, GECMR); + break; + default: + break; + } +} + +/* SH7734 */ +static struct sh_eth_cpu_data sh7734_data = { + .chip_reset = sh_eth_chip_reset, + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_gether, + + .register_type = SH_ETH_REG_GIGABIT, + + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE | EESR_ECI, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .no_trimd = 1, + .no_ade = 1, + .tsu = 1, + .hw_crc = 1, + .select_mii = 1, +}; + +/* SH7763 */ +static struct sh_eth_cpu_data sh7763_data = { + .chip_reset = sh_eth_chip_reset, + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_gether, + + .register_type = SH_ETH_REG_GIGABIT, + + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | + EESR_ECI, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .no_trimd = 1, + .no_ade = 1, + .tsu = 1, + .irq_flags = IRQF_SHARED, +}; + +static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + /* reset device */ + sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); + mdelay(1); + + sh_eth_select_mii(ndev); +} + +/* R8A7740 */ +static struct sh_eth_cpu_data r8a7740_data = { + .chip_reset = sh_eth_chip_reset_r8a7740, + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_gether, + + .register_type = SH_ETH_REG_GIGABIT, + + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE | EESR_ECI, + .fdr_value = 0x0000070f, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .rpadir = 1, + .rpadir_value = 2 << 16, + .no_trimd = 1, + .no_ade = 1, + .tsu = 1, + .select_mii = 1, + .shift_rd0 = 1, +}; + +/* R7S72100 */ +static struct sh_eth_cpu_data r7s72100_data = { + .chip_reset = sh_eth_chip_reset, + .set_duplex = sh_eth_set_duplex, + + .register_type = SH_ETH_REG_FAST_RZ, + + .ecsr_value = ECSR_ICD, + .ecsipr_value = ECSIPR_ICDIP, + .eesipr_value = 0xff7f009f, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE | EESR_ECI, + .fdr_value = 0x0000070f, + + .no_psr = 1, + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, + .rpadir = 1, + .rpadir_value = 2 << 16, + .no_trimd = 1, + .no_ade = 1, + .hw_crc = 1, + .tsu = 1, + .shift_rd0 = 1, +}; + +static struct sh_eth_cpu_data sh7619_data = { + .register_type = SH_ETH_REG_FAST_SH3_SH2, + + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .hw_swap = 1, +}; + +static struct sh_eth_cpu_data sh771x_data = { + .register_type = SH_ETH_REG_FAST_SH3_SH2, + + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .tsu = 1, +}; + +static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) +{ + if (!cd->ecsr_value) + cd->ecsr_value = DEFAULT_ECSR_INIT; + + if (!cd->ecsipr_value) + cd->ecsipr_value = DEFAULT_ECSIPR_INIT; + + if (!cd->fcftr_value) + cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | + DEFAULT_FIFO_F_D_RFD; + + if (!cd->fdr_value) + cd->fdr_value = DEFAULT_FDR_INIT; + + if (!cd->tx_check) + cd->tx_check = DEFAULT_TX_CHECK; + + if (!cd->eesr_err_check) + cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; + + if (!cd->trscer_err_mask) + cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK; +} + +static int sh_eth_check_reset(struct net_device *ndev) +{ + int ret = 0; + int cnt = 100; + + while (cnt > 0) { + if (!(sh_eth_read(ndev, EDMR) & 0x3)) + break; + mdelay(1); + cnt--; + } + if (cnt <= 0) { + netdev_err(ndev, "Device reset failed\n"); + ret = -ETIMEDOUT; + } + return ret; +} + +static int sh_eth_reset(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret = 0; + + if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) { + sh_eth_write(ndev, EDSR_ENALL, EDSR); + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, + EDMR); + + ret = sh_eth_check_reset(ndev); + if (ret) + return ret; + + /* Table Init */ + sh_eth_write(ndev, 0x0, TDLAR); + sh_eth_write(ndev, 0x0, TDFAR); + sh_eth_write(ndev, 0x0, TDFXR); + sh_eth_write(ndev, 0x0, TDFFR); + sh_eth_write(ndev, 0x0, RDLAR); + sh_eth_write(ndev, 0x0, RDFAR); + sh_eth_write(ndev, 0x0, RDFXR); + sh_eth_write(ndev, 0x0, RDFFR); + + /* Reset HW CRC register */ + if (mdp->cd->hw_crc) + sh_eth_write(ndev, 0x0, CSMR); + + /* Select MII mode */ + if (mdp->cd->select_mii) + sh_eth_select_mii(ndev); + } else { + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, + EDMR); + mdelay(3); + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, + EDMR); + } + + return ret; +} + +static void sh_eth_set_receive_align(struct sk_buff *skb) +{ + uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1); + + if (reserve) + skb_reserve(skb, SH_ETH_RX_ALIGN - reserve); +} + + +/* CPU <-> EDMAC endian convert */ +static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) +{ + switch (mdp->edmac_endian) { + case EDMAC_LITTLE_ENDIAN: + return cpu_to_le32(x); + case EDMAC_BIG_ENDIAN: + return cpu_to_be32(x); + } + return x; +} + +static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) +{ + switch (mdp->edmac_endian) { + case EDMAC_LITTLE_ENDIAN: + return le32_to_cpu(x); + case EDMAC_BIG_ENDIAN: + return be32_to_cpu(x); + } + return x; +} + +/* Program the hardware MAC address from dev->dev_addr. */ +static void update_mac_address(struct net_device *ndev) +{ + sh_eth_write(ndev, + (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | + (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); + sh_eth_write(ndev, + (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); +} + +/* Get MAC address from SuperH MAC address register + * + * SuperH's Ethernet device doesn't have 'ROM' to MAC address. + * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g). + * When you want use this device, you must set MAC address in bootloader. + * + */ +static void read_mac_address(struct net_device *ndev, unsigned char *mac) +{ + if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { + memcpy(ndev->dev_addr, mac, ETH_ALEN); + } else { + ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24); + ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF; + ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF; + ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF); + ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF; + ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF); + } +} + +static u32 sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) +{ + if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) + return EDTRR_TRNS_GETHER; + else + return EDTRR_TRNS_ETHER; +} + +struct bb_info { + void (*set_gate)(void *addr); + struct mdiobb_ctrl ctrl; + void *addr; + u32 mmd_msk;/* MMD */ + u32 mdo_msk; + u32 mdi_msk; + u32 mdc_msk; +}; + +/* PHY bit set */ +static void bb_set(void *addr, u32 msk) +{ + iowrite32(ioread32(addr) | msk, addr); +} + +/* PHY bit clear */ +static void bb_clr(void *addr, u32 msk) +{ + iowrite32((ioread32(addr) & ~msk), addr); +} + +/* PHY bit read */ +static int bb_read(void *addr, u32 msk) +{ + return (ioread32(addr) & msk) != 0; +} + +/* Data I/O pin control */ +static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit) +{ + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + + if (bitbang->set_gate) + bitbang->set_gate(bitbang->addr); + + if (bit) + bb_set(bitbang->addr, bitbang->mmd_msk); + else + bb_clr(bitbang->addr, bitbang->mmd_msk); +} + +/* Set bit data*/ +static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit) +{ + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + + if (bitbang->set_gate) + bitbang->set_gate(bitbang->addr); + + if (bit) + bb_set(bitbang->addr, bitbang->mdo_msk); + else + bb_clr(bitbang->addr, bitbang->mdo_msk); +} + +/* Get bit data*/ +static int sh_get_mdio(struct mdiobb_ctrl *ctrl) +{ + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + + if (bitbang->set_gate) + bitbang->set_gate(bitbang->addr); + + return bb_read(bitbang->addr, bitbang->mdi_msk); +} + +/* MDC pin control */ +static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit) +{ + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + + if (bitbang->set_gate) + bitbang->set_gate(bitbang->addr); + + if (bit) + bb_set(bitbang->addr, bitbang->mdc_msk); + else + bb_clr(bitbang->addr, bitbang->mdc_msk); +} + +/* mdio bus control struct */ +static struct mdiobb_ops bb_ops = { + .owner = THIS_MODULE, + .set_mdc = sh_mdc_ctrl, + .set_mdio_dir = sh_mmd_ctrl, + .set_mdio_data = sh_set_mdio, + .get_mdio_data = sh_get_mdio, +}; + +/* free skb and descriptor buffer */ +static void sh_eth_ring_free(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i; + + /* Free Rx skb ringbuffer */ + if (mdp->rx_skbuff) { + for (i = 0; i < mdp->num_rx_ring; i++) + dev_kfree_skb(mdp->rx_skbuff[i]); + } + kfree(mdp->rx_skbuff); + mdp->rx_skbuff = NULL; + + /* Free Tx skb ringbuffer */ + if (mdp->tx_skbuff) { + for (i = 0; i < mdp->num_tx_ring; i++) + dev_kfree_skb(mdp->tx_skbuff[i]); + } + kfree(mdp->tx_skbuff); + mdp->tx_skbuff = NULL; +} + +/* format skb and descriptor buffer */ +static void sh_eth_ring_format(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i; + struct sk_buff *skb; + struct sh_eth_rxdesc *rxdesc = NULL; + struct sh_eth_txdesc *txdesc = NULL; + int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; + int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; + int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; + dma_addr_t dma_addr; + + mdp->cur_rx = 0; + mdp->cur_tx = 0; + mdp->dirty_rx = 0; + mdp->dirty_tx = 0; + + memset(mdp->rx_ring, 0, rx_ringsize); + + /* build Rx ring buffer */ + for (i = 0; i < mdp->num_rx_ring; i++) { + /* skb */ + mdp->rx_skbuff[i] = NULL; + skb = netdev_alloc_skb(ndev, skbuff_size); + if (skb == NULL) + break; + sh_eth_set_receive_align(skb); + + /* RX descriptor */ + rxdesc = &mdp->rx_ring[i]; + /* The size of the buffer is a multiple of 16 bytes. */ + rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); + dma_addr = dma_map_single(&ndev->dev, skb->data, + rxdesc->buffer_length, + DMA_FROM_DEVICE); + if (dma_mapping_error(&ndev->dev, dma_addr)) { + kfree_skb(skb); + break; + } + mdp->rx_skbuff[i] = skb; + rxdesc->addr = dma_addr; + rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); + + /* Rx descriptor address set */ + if (i == 0) { + sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); + if (sh_eth_is_gether(mdp) || + sh_eth_is_rz_fast_ether(mdp)) + sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); + } + } + + mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); + + /* Mark the last entry as wrapping the ring. */ + rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); + + memset(mdp->tx_ring, 0, tx_ringsize); + + /* build Tx ring buffer */ + for (i = 0; i < mdp->num_tx_ring; i++) { + mdp->tx_skbuff[i] = NULL; + txdesc = &mdp->tx_ring[i]; + txdesc->status = cpu_to_edmac(mdp, TD_TFP); + txdesc->buffer_length = 0; + if (i == 0) { + /* Tx descriptor address set */ + sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); + if (sh_eth_is_gether(mdp) || + sh_eth_is_rz_fast_ether(mdp)) + sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); + } + } + + txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); +} + +/* Get skb and descriptor buffer */ +static int sh_eth_ring_init(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int rx_ringsize, tx_ringsize, ret = 0; + + /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the + * card needs room to do 8 byte alignment, +2 so we can reserve + * the first 2 bytes, and +16 gets room for the status word from the + * card. + */ + mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : + (((ndev->mtu + 26 + 7) & ~7) + 2 + 16)); + if (mdp->cd->rpadir) + mdp->rx_buf_sz += NET_IP_ALIGN; + + /* Allocate RX and TX skb rings */ + mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring, + sizeof(*mdp->rx_skbuff), GFP_KERNEL); + if (!mdp->rx_skbuff) { + ret = -ENOMEM; + return ret; + } + + mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring, + sizeof(*mdp->tx_skbuff), GFP_KERNEL); + if (!mdp->tx_skbuff) { + ret = -ENOMEM; + goto skb_ring_free; + } + + /* Allocate all Rx descriptors. */ + rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; + mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, + GFP_KERNEL); + if (!mdp->rx_ring) { + ret = -ENOMEM; + goto desc_ring_free; + } + + mdp->dirty_rx = 0; + + /* Allocate all Tx descriptors. */ + tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; + mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, + GFP_KERNEL); + if (!mdp->tx_ring) { + ret = -ENOMEM; + goto desc_ring_free; + } + return ret; + +desc_ring_free: + /* free DMA buffer */ + dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma); + +skb_ring_free: + /* Free Rx and Tx skb ring buffer */ + sh_eth_ring_free(ndev); + mdp->tx_ring = NULL; + mdp->rx_ring = NULL; + + return ret; +} + +static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp) +{ + int ringsize; + + if (mdp->rx_ring) { + ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; + dma_free_coherent(NULL, ringsize, mdp->rx_ring, + mdp->rx_desc_dma); + mdp->rx_ring = NULL; + } + + if (mdp->tx_ring) { + ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; + dma_free_coherent(NULL, ringsize, mdp->tx_ring, + mdp->tx_desc_dma); + mdp->tx_ring = NULL; + } +} + +static int sh_eth_dev_init(struct net_device *ndev, bool start) +{ + int ret = 0; + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 val; + + /* Soft Reset */ + ret = sh_eth_reset(ndev); + if (ret) + return ret; + + if (mdp->cd->rmiimode) + sh_eth_write(ndev, 0x1, RMIIMODE); + + /* Descriptor format */ + sh_eth_ring_format(ndev); + if (mdp->cd->rpadir) + sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR); + + /* all sh_eth int mask */ + sh_eth_write(ndev, 0, EESIPR); + +#if defined(__LITTLE_ENDIAN) + if (mdp->cd->hw_swap) + sh_eth_write(ndev, EDMR_EL, EDMR); + else +#endif + sh_eth_write(ndev, 0, EDMR); + + /* FIFO size set */ + sh_eth_write(ndev, mdp->cd->fdr_value, FDR); + sh_eth_write(ndev, 0, TFTR); + + /* Frame recv control (enable multiple-packets per rx irq) */ + sh_eth_write(ndev, RMCR_RNC, RMCR); + + sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER); + + if (mdp->cd->bculr) + sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ + + sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); + + if (!mdp->cd->no_trimd) + sh_eth_write(ndev, 0, TRIMD); + + /* Recv frame limit set register */ + sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, + RFLR); + + sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); + if (start) { + mdp->irq_enabled = true; + sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); + } + + /* PAUSE Prohibition */ + val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | + ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; + + sh_eth_write(ndev, val, ECMR); + + if (mdp->cd->set_rate) + mdp->cd->set_rate(ndev); + + /* E-MAC Status Register clear */ + sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); + + /* E-MAC Interrupt Enable register */ + if (start) + sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); + + /* Set MAC address */ + update_mac_address(ndev); + + /* mask reset */ + if (mdp->cd->apr) + sh_eth_write(ndev, APR_AP, APR); + if (mdp->cd->mpr) + sh_eth_write(ndev, MPR_MP, MPR); + if (mdp->cd->tpauser) + sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); + + if (start) { + /* Setting the Rx mode will start the Rx process. */ + sh_eth_write(ndev, EDRRR_R, EDRRR); + + netif_start_queue(ndev); + } + + return ret; +} + +static void sh_eth_dev_exit(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i; + + /* Deactivate all TX descriptors, so DMA should stop at next + * packet boundary if it's currently running + */ + for (i = 0; i < mdp->num_tx_ring; i++) + mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT); + + /* Disable TX FIFO egress to MAC */ + sh_eth_rcv_snd_disable(ndev); + + /* Stop RX DMA at next packet boundary */ + sh_eth_write(ndev, 0, EDRRR); + + /* Aside from TX DMA, we can't tell when the hardware is + * really stopped, so we need to reset to make sure. + * Before doing that, wait for long enough to *probably* + * finish transmitting the last packet and poll stats. + */ + msleep(2); /* max frame time at 10 Mbps < 1250 us */ + sh_eth_get_stats(ndev); + sh_eth_reset(ndev); + + /* Set MAC address again */ + update_mac_address(ndev); +} + +/* free Tx skb function */ +static int sh_eth_txfree(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_txdesc *txdesc; + int free_num = 0; + int entry = 0; + + for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { + entry = mdp->dirty_tx % mdp->num_tx_ring; + txdesc = &mdp->tx_ring[entry]; + if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) + break; + /* TACT bit must be checked before all the following reads */ + rmb(); + netif_info(mdp, tx_done, ndev, + "tx entry %d status 0x%08x\n", + entry, edmac_to_cpu(mdp, txdesc->status)); + /* Free the original skb. */ + if (mdp->tx_skbuff[entry]) { + dma_unmap_single(&ndev->dev, txdesc->addr, + txdesc->buffer_length, DMA_TO_DEVICE); + dev_kfree_skb_irq(mdp->tx_skbuff[entry]); + mdp->tx_skbuff[entry] = NULL; + free_num++; + } + txdesc->status = cpu_to_edmac(mdp, TD_TFP); + if (entry >= mdp->num_tx_ring - 1) + txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); + + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += txdesc->buffer_length; + } + return free_num; +} + +/* Packet receive function */ +static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_rxdesc *rxdesc; + + int entry = mdp->cur_rx % mdp->num_rx_ring; + int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; + int limit; + struct sk_buff *skb; + u16 pkt_len = 0; + u32 desc_status; + int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; + dma_addr_t dma_addr; + + boguscnt = min(boguscnt, *quota); + limit = boguscnt; + rxdesc = &mdp->rx_ring[entry]; + while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { + /* RACT bit must be checked before all the following reads */ + rmb(); + desc_status = edmac_to_cpu(mdp, rxdesc->status); + pkt_len = rxdesc->frame_length; + + if (--boguscnt < 0) + break; + + netif_info(mdp, rx_status, ndev, + "rx entry %d status 0x%08x len %d\n", + entry, desc_status, pkt_len); + + if (!(desc_status & RDFEND)) + ndev->stats.rx_length_errors++; + + /* In case of almost all GETHER/ETHERs, the Receive Frame State + * (RFS) bits in the Receive Descriptor 0 are from bit 9 to + * bit 0. However, in case of the R8A7740 and R7S72100 + * the RFS bits are from bit 25 to bit 16. So, the + * driver needs right shifting by 16. + */ + if (mdp->cd->shift_rd0) + desc_status >>= 16; + + if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | + RD_RFS5 | RD_RFS6 | RD_RFS10)) { + ndev->stats.rx_errors++; + if (desc_status & RD_RFS1) + ndev->stats.rx_crc_errors++; + if (desc_status & RD_RFS2) + ndev->stats.rx_frame_errors++; + if (desc_status & RD_RFS3) + ndev->stats.rx_length_errors++; + if (desc_status & RD_RFS4) + ndev->stats.rx_length_errors++; + if (desc_status & RD_RFS6) + ndev->stats.rx_missed_errors++; + if (desc_status & RD_RFS10) + ndev->stats.rx_over_errors++; + } else { + if (!mdp->cd->hw_swap) + sh_eth_soft_swap( + phys_to_virt(ALIGN(rxdesc->addr, 4)), + pkt_len + 2); + skb = mdp->rx_skbuff[entry]; + mdp->rx_skbuff[entry] = NULL; + if (mdp->cd->rpadir) + skb_reserve(skb, NET_IP_ALIGN); + dma_unmap_single(&ndev->dev, rxdesc->addr, + ALIGN(mdp->rx_buf_sz, 16), + DMA_FROM_DEVICE); + skb_put(skb, pkt_len); + skb->protocol = eth_type_trans(skb, ndev); + netif_receive_skb(skb); + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += pkt_len; + if (desc_status & RD_RFS8) + ndev->stats.multicast++; + } + entry = (++mdp->cur_rx) % mdp->num_rx_ring; + rxdesc = &mdp->rx_ring[entry]; + } + + /* Refill the Rx ring buffers. */ + for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { + entry = mdp->dirty_rx % mdp->num_rx_ring; + rxdesc = &mdp->rx_ring[entry]; + /* The size of the buffer is 16 byte boundary. */ + rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); + + if (mdp->rx_skbuff[entry] == NULL) { + skb = netdev_alloc_skb(ndev, skbuff_size); + if (skb == NULL) + break; /* Better luck next round. */ + sh_eth_set_receive_align(skb); + dma_addr = dma_map_single(&ndev->dev, skb->data, + rxdesc->buffer_length, + DMA_FROM_DEVICE); + if (dma_mapping_error(&ndev->dev, dma_addr)) { + kfree_skb(skb); + break; + } + mdp->rx_skbuff[entry] = skb; + + skb_checksum_none_assert(skb); + rxdesc->addr = dma_addr; + } + wmb(); /* RACT bit must be set after all the above writes */ + if (entry >= mdp->num_rx_ring - 1) + rxdesc->status |= + cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); + else + rxdesc->status |= + cpu_to_edmac(mdp, RD_RACT | RD_RFP); + } + + /* Restart Rx engine if stopped. */ + /* If we don't need to check status, don't. -KDU */ + if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { + /* fix the values for the next receiving if RDE is set */ + if (intr_status & EESR_RDE && + mdp->reg_offset[RDFAR] != SH_ETH_OFFSET_INVALID) { + u32 count = (sh_eth_read(ndev, RDFAR) - + sh_eth_read(ndev, RDLAR)) >> 4; + + mdp->cur_rx = count; + mdp->dirty_rx = count; + } + sh_eth_write(ndev, EDRRR_R, EDRRR); + } + + *quota -= limit - boguscnt - 1; + + return *quota <= 0; +} + +static void sh_eth_rcv_snd_disable(struct net_device *ndev) +{ + /* disable tx and rx */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & + ~(ECMR_RE | ECMR_TE), ECMR); +} + +static void sh_eth_rcv_snd_enable(struct net_device *ndev) +{ + /* enable tx and rx */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | + (ECMR_RE | ECMR_TE), ECMR); +} + +/* error control function */ +static void sh_eth_error(struct net_device *ndev, u32 intr_status) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 felic_stat; + u32 link_stat; + u32 mask; + + if (intr_status & EESR_ECI) { + felic_stat = sh_eth_read(ndev, ECSR); + sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ + if (felic_stat & ECSR_ICD) + ndev->stats.tx_carrier_errors++; + if (felic_stat & ECSR_LCHNG) { + /* Link Changed */ + if (mdp->cd->no_psr || mdp->no_ether_link) { + goto ignore_link; + } else { + link_stat = (sh_eth_read(ndev, PSR)); + if (mdp->ether_link_active_low) + link_stat = ~link_stat; + } + if (!(link_stat & PHY_ST_LINK)) { + sh_eth_rcv_snd_disable(ndev); + } else { + /* Link Up */ + sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) & + ~DMAC_M_ECI, EESIPR); + /* clear int */ + sh_eth_write(ndev, sh_eth_read(ndev, ECSR), + ECSR); + sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) | + DMAC_M_ECI, EESIPR); + /* enable tx and rx */ + sh_eth_rcv_snd_enable(ndev); + } + } + } + +ignore_link: + if (intr_status & EESR_TWB) { + /* Unused write back interrupt */ + if (intr_status & EESR_TABT) { /* Transmit Abort int */ + ndev->stats.tx_aborted_errors++; + netif_err(mdp, tx_err, ndev, "Transmit Abort\n"); + } + } + + if (intr_status & EESR_RABT) { + /* Receive Abort int */ + if (intr_status & EESR_RFRMER) { + /* Receive Frame Overflow int */ + ndev->stats.rx_frame_errors++; + } + } + + if (intr_status & EESR_TDE) { + /* Transmit Descriptor Empty int */ + ndev->stats.tx_fifo_errors++; + netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n"); + } + + if (intr_status & EESR_TFE) { + /* FIFO under flow */ + ndev->stats.tx_fifo_errors++; + netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n"); + } + + if (intr_status & EESR_RDE) { + /* Receive Descriptor Empty int */ + ndev->stats.rx_over_errors++; + } + + if (intr_status & EESR_RFE) { + /* Receive FIFO Overflow int */ + ndev->stats.rx_fifo_errors++; + } + + if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { + /* Address Error */ + ndev->stats.tx_fifo_errors++; + netif_err(mdp, tx_err, ndev, "Address Error\n"); + } + + mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; + if (mdp->cd->no_ade) + mask &= ~EESR_ADE; + if (intr_status & mask) { + /* Tx error */ + u32 edtrr = sh_eth_read(ndev, EDTRR); + + /* dmesg */ + netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", + intr_status, mdp->cur_tx, mdp->dirty_tx, + (u32)ndev->state, edtrr); + /* dirty buffer free */ + sh_eth_txfree(ndev); + + /* SH7712 BUG */ + if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { + /* tx dma start */ + sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); + } + /* wakeup */ + netif_wake_queue(ndev); + } +} + +static irqreturn_t sh_eth_interrupt(int irq, void *netdev) +{ + struct net_device *ndev = netdev; + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_cpu_data *cd = mdp->cd; + irqreturn_t ret = IRQ_NONE; + u32 intr_status, intr_enable; + + spin_lock(&mdp->lock); + + /* Get interrupt status */ + intr_status = sh_eth_read(ndev, EESR); + /* Mask it with the interrupt mask, forcing ECI interrupt to be always + * enabled since it's the one that comes thru regardless of the mask, + * and we need to fully handle it in sh_eth_error() in order to quench + * it as it doesn't get cleared by just writing 1 to the ECI bit... + */ + intr_enable = sh_eth_read(ndev, EESIPR); + intr_status &= intr_enable | DMAC_M_ECI; + if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) + ret = IRQ_HANDLED; + else + goto out; + + if (!likely(mdp->irq_enabled)) { + sh_eth_write(ndev, 0, EESIPR); + goto out; + } + + if (intr_status & EESR_RX_CHECK) { + if (napi_schedule_prep(&mdp->napi)) { + /* Mask Rx interrupts */ + sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK, + EESIPR); + __napi_schedule(&mdp->napi); + } else { + netdev_warn(ndev, + "ignoring interrupt, status 0x%08x, mask 0x%08x.\n", + intr_status, intr_enable); + } + } + + /* Tx Check */ + if (intr_status & cd->tx_check) { + /* Clear Tx interrupts */ + sh_eth_write(ndev, intr_status & cd->tx_check, EESR); + + sh_eth_txfree(ndev); + netif_wake_queue(ndev); + } + + if (intr_status & cd->eesr_err_check) { + /* Clear error interrupts */ + sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR); + + sh_eth_error(ndev, intr_status); + } + +out: + spin_unlock(&mdp->lock); + + return ret; +} + +static int sh_eth_poll(struct napi_struct *napi, int budget) +{ + struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private, + napi); + struct net_device *ndev = napi->dev; + int quota = budget; + u32 intr_status; + + for (;;) { + intr_status = sh_eth_read(ndev, EESR); + if (!(intr_status & EESR_RX_CHECK)) + break; + /* Clear Rx interrupts */ + sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR); + + if (sh_eth_rx(ndev, intr_status, "a)) + goto out; + } + + napi_complete(napi); + + /* Reenable Rx interrupts */ + if (mdp->irq_enabled) + sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); +out: + return budget - quota; +} + +/* PHY state control function */ +static void sh_eth_adjust_link(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct phy_device *phydev = mdp->phydev; + int new_state = 0; + + if (phydev->link) { + if (phydev->duplex != mdp->duplex) { + new_state = 1; + mdp->duplex = phydev->duplex; + if (mdp->cd->set_duplex) + mdp->cd->set_duplex(ndev); + } + + if (phydev->speed != mdp->speed) { + new_state = 1; + mdp->speed = phydev->speed; + if (mdp->cd->set_rate) + mdp->cd->set_rate(ndev); + } + if (!mdp->link) { + sh_eth_write(ndev, + sh_eth_read(ndev, ECMR) & ~ECMR_TXF, + ECMR); + new_state = 1; + mdp->link = phydev->link; + if (mdp->cd->no_psr || mdp->no_ether_link) + sh_eth_rcv_snd_enable(ndev); + } + } else if (mdp->link) { + new_state = 1; + mdp->link = 0; + mdp->speed = 0; + mdp->duplex = -1; + if (mdp->cd->no_psr || mdp->no_ether_link) + sh_eth_rcv_snd_disable(ndev); + } + + if (new_state && netif_msg_link(mdp)) + phy_print_status(phydev); +} + +/* PHY init function */ +static int sh_eth_phy_init(struct net_device *ndev) +{ + struct device_node *np = ndev->dev.parent->of_node; + struct sh_eth_private *mdp = netdev_priv(ndev); + struct phy_device *phydev = NULL; + + mdp->link = 0; + mdp->speed = 0; + mdp->duplex = -1; + + /* Try connect to PHY */ + if (np) { + struct device_node *pn; + + pn = of_parse_phandle(np, "phy-handle", 0); + phydev = of_phy_connect(ndev, pn, + sh_eth_adjust_link, 0, + mdp->phy_interface); + + if (!phydev) + phydev = ERR_PTR(-ENOENT); + } else { + char phy_id[MII_BUS_ID_SIZE + 3]; + + snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, + mdp->mii_bus->id, mdp->phy_id); + + phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, + mdp->phy_interface); + } + + if (IS_ERR(phydev)) { + netdev_err(ndev, "failed to connect PHY\n"); + return PTR_ERR(phydev); + } + + netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n", + phydev->addr, phydev->irq, phydev->drv->name); + + mdp->phydev = phydev; + + return 0; +} + +/* PHY control start function */ +static int sh_eth_phy_start(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; + + ret = sh_eth_phy_init(ndev); + if (ret) + return ret; + + phy_start(mdp->phydev); + + return 0; +} + +static int sh_eth_get_settings(struct net_device *ndev, + struct ethtool_cmd *ecmd) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + unsigned long flags; + int ret; + + if (!mdp->phydev) + return -ENODEV; + + spin_lock_irqsave(&mdp->lock, flags); + ret = phy_ethtool_gset(mdp->phydev, ecmd); + spin_unlock_irqrestore(&mdp->lock, flags); + + return ret; +} + +static int sh_eth_set_settings(struct net_device *ndev, + struct ethtool_cmd *ecmd) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + unsigned long flags; + int ret; + + if (!mdp->phydev) + return -ENODEV; + + spin_lock_irqsave(&mdp->lock, flags); + + /* disable tx and rx */ + sh_eth_rcv_snd_disable(ndev); + + ret = phy_ethtool_sset(mdp->phydev, ecmd); + if (ret) + goto error_exit; + + if (ecmd->duplex == DUPLEX_FULL) + mdp->duplex = 1; + else + mdp->duplex = 0; + + if (mdp->cd->set_duplex) + mdp->cd->set_duplex(ndev); + +error_exit: + mdelay(1); + + /* enable tx and rx */ + sh_eth_rcv_snd_enable(ndev); + + spin_unlock_irqrestore(&mdp->lock, flags); + + return ret; +} + +/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the + * version must be bumped as well. Just adding registers up to that + * limit is fine, as long as the existing register indices don't + * change. + */ +#define SH_ETH_REG_DUMP_VERSION 1 +#define SH_ETH_REG_DUMP_MAX_REGS 256 + +static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_cpu_data *cd = mdp->cd; + u32 *valid_map; + size_t len; + + BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS); + + /* Dump starts with a bitmap that tells ethtool which + * registers are defined for this chip. + */ + len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32); + if (buf) { + valid_map = buf; + buf += len; + } else { + valid_map = NULL; + } + + /* Add a register to the dump, if it has a defined offset. + * This automatically skips most undefined registers, but for + * some it is also necessary to check a capability flag in + * struct sh_eth_cpu_data. + */ +#define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32) +#define add_reg_from(reg, read_expr) do { \ + if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \ + if (buf) { \ + mark_reg_valid(reg); \ + *buf++ = read_expr; \ + } \ + ++len; \ + } \ + } while (0) +#define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg)) +#define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg)) + + add_reg(EDSR); + add_reg(EDMR); + add_reg(EDTRR); + add_reg(EDRRR); + add_reg(EESR); + add_reg(EESIPR); + add_reg(TDLAR); + add_reg(TDFAR); + add_reg(TDFXR); + add_reg(TDFFR); + add_reg(RDLAR); + add_reg(RDFAR); + add_reg(RDFXR); + add_reg(RDFFR); + add_reg(TRSCER); + add_reg(RMFCR); + add_reg(TFTR); + add_reg(FDR); + add_reg(RMCR); + add_reg(TFUCR); + add_reg(RFOCR); + if (cd->rmiimode) + add_reg(RMIIMODE); + add_reg(FCFTR); + if (cd->rpadir) + add_reg(RPADIR); + if (!cd->no_trimd) + add_reg(TRIMD); + add_reg(ECMR); + add_reg(ECSR); + add_reg(ECSIPR); + add_reg(PIR); + if (!cd->no_psr) + add_reg(PSR); + add_reg(RDMLR); + add_reg(RFLR); + add_reg(IPGR); + if (cd->apr) + add_reg(APR); + if (cd->mpr) + add_reg(MPR); + add_reg(RFCR); + add_reg(RFCF); + if (cd->tpauser) + add_reg(TPAUSER); + add_reg(TPAUSECR); + add_reg(GECMR); + if (cd->bculr) + add_reg(BCULR); + add_reg(MAHR); + add_reg(MALR); + add_reg(TROCR); + add_reg(CDCR); + add_reg(LCCR); + add_reg(CNDCR); + add_reg(CEFCR); + add_reg(FRECR); + add_reg(TSFRCR); + add_reg(TLFRCR); + add_reg(CERCR); + add_reg(CEECR); + add_reg(MAFCR); + if (cd->rtrate) + add_reg(RTRATE); + if (cd->hw_crc) + add_reg(CSMR); + if (cd->select_mii) + add_reg(RMII_MII); + add_reg(ARSTR); + if (cd->tsu) { + add_tsu_reg(TSU_CTRST); + add_tsu_reg(TSU_FWEN0); + add_tsu_reg(TSU_FWEN1); + add_tsu_reg(TSU_FCM); + add_tsu_reg(TSU_BSYSL0); + add_tsu_reg(TSU_BSYSL1); + add_tsu_reg(TSU_PRISL0); + add_tsu_reg(TSU_PRISL1); + add_tsu_reg(TSU_FWSL0); + add_tsu_reg(TSU_FWSL1); + add_tsu_reg(TSU_FWSLC); + add_tsu_reg(TSU_QTAG0); + add_tsu_reg(TSU_QTAG1); + add_tsu_reg(TSU_QTAGM0); + add_tsu_reg(TSU_QTAGM1); + add_tsu_reg(TSU_FWSR); + add_tsu_reg(TSU_FWINMK); + add_tsu_reg(TSU_ADQT0); + add_tsu_reg(TSU_ADQT1); + add_tsu_reg(TSU_VTAG0); + add_tsu_reg(TSU_VTAG1); + add_tsu_reg(TSU_ADSBSY); + add_tsu_reg(TSU_TEN); + add_tsu_reg(TSU_POST1); + add_tsu_reg(TSU_POST2); + add_tsu_reg(TSU_POST3); + add_tsu_reg(TSU_POST4); + if (mdp->reg_offset[TSU_ADRH0] != SH_ETH_OFFSET_INVALID) { + /* This is the start of a table, not just a single + * register. + */ + if (buf) { + unsigned int i; + + mark_reg_valid(TSU_ADRH0); + for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++) + *buf++ = ioread32( + mdp->tsu_addr + + mdp->reg_offset[TSU_ADRH0] + + i * 4); + } + len += SH_ETH_TSU_CAM_ENTRIES * 2; + } + } + +#undef mark_reg_valid +#undef add_reg_from +#undef add_reg +#undef add_tsu_reg + + return len * 4; +} + +static int sh_eth_get_regs_len(struct net_device *ndev) +{ + return __sh_eth_get_regs(ndev, NULL); +} + +static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs, + void *buf) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + regs->version = SH_ETH_REG_DUMP_VERSION; + + pm_runtime_get_sync(&mdp->pdev->dev); + __sh_eth_get_regs(ndev, buf); + pm_runtime_put_sync(&mdp->pdev->dev); +} + +static int sh_eth_nway_reset(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + unsigned long flags; + int ret; + + if (!mdp->phydev) + return -ENODEV; + + spin_lock_irqsave(&mdp->lock, flags); + ret = phy_start_aneg(mdp->phydev); + spin_unlock_irqrestore(&mdp->lock, flags); + + return ret; +} + +static u32 sh_eth_get_msglevel(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + return mdp->msg_enable; +} + +static void sh_eth_set_msglevel(struct net_device *ndev, u32 value) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + mdp->msg_enable = value; +} + +static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_current", "tx_current", + "rx_dirty", "tx_dirty", +}; +#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats) + +static int sh_eth_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return SH_ETH_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void sh_eth_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i = 0; + + /* device-specific stats */ + data[i++] = mdp->cur_rx; + data[i++] = mdp->cur_tx; + data[i++] = mdp->dirty_rx; + data[i++] = mdp->dirty_tx; +} + +static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, *sh_eth_gstrings_stats, + sizeof(sh_eth_gstrings_stats)); + break; + } +} + +static void sh_eth_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + ring->rx_max_pending = RX_RING_MAX; + ring->tx_max_pending = TX_RING_MAX; + ring->rx_pending = mdp->num_rx_ring; + ring->tx_pending = mdp->num_tx_ring; +} + +static int sh_eth_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; + + if (ring->tx_pending > TX_RING_MAX || + ring->rx_pending > RX_RING_MAX || + ring->tx_pending < TX_RING_MIN || + ring->rx_pending < RX_RING_MIN) + return -EINVAL; + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + if (netif_running(ndev)) { + netif_device_detach(ndev); + netif_tx_disable(ndev); + + /* Serialise with the interrupt handler and NAPI, then + * disable interrupts. We have to clear the + * irq_enabled flag first to ensure that interrupts + * won't be re-enabled. + */ + mdp->irq_enabled = false; + synchronize_irq(ndev->irq); + napi_synchronize(&mdp->napi); + sh_eth_write(ndev, 0x0000, EESIPR); + + sh_eth_dev_exit(ndev); + + /* Free all the skbuffs in the Rx queue. */ + sh_eth_ring_free(ndev); + /* Free DMA buffer */ + sh_eth_free_dma_buffer(mdp); + } + + /* Set new parameters */ + mdp->num_rx_ring = ring->rx_pending; + mdp->num_tx_ring = ring->tx_pending; + + if (netif_running(ndev)) { + ret = sh_eth_ring_init(ndev); + if (ret < 0) { + netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", + __func__); + return ret; + } + ret = sh_eth_dev_init(ndev, false); + if (ret < 0) { + netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", + __func__); + return ret; + } + + mdp->irq_enabled = true; + sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); + /* Setting the Rx mode will start the Rx process. */ + sh_eth_write(ndev, EDRRR_R, EDRRR); + netif_device_attach(ndev); + } + + return 0; +} + +static const struct ethtool_ops sh_eth_ethtool_ops = { + .get_settings = sh_eth_get_settings, + .set_settings = sh_eth_set_settings, + .get_regs_len = sh_eth_get_regs_len, + .get_regs = sh_eth_get_regs, + .nway_reset = sh_eth_nway_reset, + .get_msglevel = sh_eth_get_msglevel, + .set_msglevel = sh_eth_set_msglevel, + .get_link = ethtool_op_get_link, + .get_strings = sh_eth_get_strings, + .get_ethtool_stats = sh_eth_get_ethtool_stats, + .get_sset_count = sh_eth_get_sset_count, + .get_ringparam = sh_eth_get_ringparam, + .set_ringparam = sh_eth_set_ringparam, +}; + +/* network device open function */ +static int sh_eth_open(struct net_device *ndev) +{ + int ret = 0; + struct sh_eth_private *mdp = netdev_priv(ndev); + + pm_runtime_get_sync(&mdp->pdev->dev); + + napi_enable(&mdp->napi); + + ret = request_irq(ndev->irq, sh_eth_interrupt, + mdp->cd->irq_flags, ndev->name, ndev); + if (ret) { + netdev_err(ndev, "Can not assign IRQ number\n"); + goto out_napi_off; + } + + /* Descriptor set */ + ret = sh_eth_ring_init(ndev); + if (ret) + goto out_free_irq; + + /* device init */ + ret = sh_eth_dev_init(ndev, true); + if (ret) + goto out_free_irq; + + /* PHY control start*/ + ret = sh_eth_phy_start(ndev); + if (ret) + goto out_free_irq; + + mdp->is_opened = 1; + + return ret; + +out_free_irq: + free_irq(ndev->irq, ndev); +out_napi_off: + napi_disable(&mdp->napi); + pm_runtime_put_sync(&mdp->pdev->dev); + return ret; +} + +/* Timeout function */ +static void sh_eth_tx_timeout(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_rxdesc *rxdesc; + int i; + + netif_stop_queue(ndev); + + netif_err(mdp, timer, ndev, + "transmit timed out, status %8.8x, resetting...\n", + sh_eth_read(ndev, EESR)); + + /* tx_errors count up */ + ndev->stats.tx_errors++; + + /* Free all the skbuffs in the Rx queue. */ + for (i = 0; i < mdp->num_rx_ring; i++) { + rxdesc = &mdp->rx_ring[i]; + rxdesc->status = 0; + rxdesc->addr = 0xBADF00D0; + dev_kfree_skb(mdp->rx_skbuff[i]); + mdp->rx_skbuff[i] = NULL; + } + for (i = 0; i < mdp->num_tx_ring; i++) { + dev_kfree_skb(mdp->tx_skbuff[i]); + mdp->tx_skbuff[i] = NULL; + } + + /* device init */ + sh_eth_dev_init(ndev, true); +} + +/* Packet transmit function */ +static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_txdesc *txdesc; + u32 entry; + unsigned long flags; + + spin_lock_irqsave(&mdp->lock, flags); + if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { + if (!sh_eth_txfree(ndev)) { + netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n"); + netif_stop_queue(ndev); + spin_unlock_irqrestore(&mdp->lock, flags); + return NETDEV_TX_BUSY; + } + } + spin_unlock_irqrestore(&mdp->lock, flags); + + if (skb_put_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + entry = mdp->cur_tx % mdp->num_tx_ring; + mdp->tx_skbuff[entry] = skb; + txdesc = &mdp->tx_ring[entry]; + /* soft swap. */ + if (!mdp->cd->hw_swap) + sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)), + skb->len + 2); + txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&ndev->dev, txdesc->addr)) { + kfree_skb(skb); + return NETDEV_TX_OK; + } + txdesc->buffer_length = skb->len; + + wmb(); /* TACT bit must be set after all the above writes */ + if (entry >= mdp->num_tx_ring - 1) + txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); + else + txdesc->status |= cpu_to_edmac(mdp, TD_TACT); + + mdp->cur_tx++; + + if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp))) + sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); + + return NETDEV_TX_OK; +} + +/* The statistics registers have write-clear behaviour, which means we + * will lose any increment between the read and write. We mitigate + * this by only clearing when we read a non-zero value, so we will + * never falsely report a total of zero. + */ +static void +sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg) +{ + u32 delta = sh_eth_read(ndev, reg); + + if (delta) { + *stat += delta; + sh_eth_write(ndev, 0, reg); + } +} + +static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (sh_eth_is_rz_fast_ether(mdp)) + return &ndev->stats; + + if (!mdp->is_opened) + return &ndev->stats; + + sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR); + sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR); + sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR); + + if (sh_eth_is_gether(mdp)) { + sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, + CERCR); + sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, + CEECR); + } else { + sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, + CNDCR); + } + + return &ndev->stats; +} + +/* device close function */ +static int sh_eth_close(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + netif_stop_queue(ndev); + + /* Serialise with the interrupt handler and NAPI, then disable + * interrupts. We have to clear the irq_enabled flag first to + * ensure that interrupts won't be re-enabled. + */ + mdp->irq_enabled = false; + synchronize_irq(ndev->irq); + napi_disable(&mdp->napi); + sh_eth_write(ndev, 0x0000, EESIPR); + + sh_eth_dev_exit(ndev); + + /* PHY Disconnect */ + if (mdp->phydev) { + phy_stop(mdp->phydev); + phy_disconnect(mdp->phydev); + mdp->phydev = NULL; + } + + free_irq(ndev->irq, ndev); + + /* Free all the skbuffs in the Rx queue. */ + sh_eth_ring_free(ndev); + + /* free DMA buffer */ + sh_eth_free_dma_buffer(mdp); + + pm_runtime_put_sync(&mdp->pdev->dev); + + mdp->is_opened = 0; + + return 0; +} + +/* ioctl to device function */ +static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct phy_device *phydev = mdp->phydev; + + if (!netif_running(ndev)) + return -EINVAL; + + if (!phydev) + return -ENODEV; + + return phy_mii_ioctl(phydev, rq, cmd); +} + +/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */ +static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp, + int entry) +{ + return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4); +} + +static u32 sh_eth_tsu_get_post_mask(int entry) +{ + return 0x0f << (28 - ((entry % 8) * 4)); +} + +static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry) +{ + return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4)); +} + +static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev, + int entry) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 tmp; + void *reg_offset; + + reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); + tmp = ioread32(reg_offset); + iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset); +} + +static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev, + int entry) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 post_mask, ref_mask, tmp; + void *reg_offset; + + reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); + post_mask = sh_eth_tsu_get_post_mask(entry); + ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask; + + tmp = ioread32(reg_offset); + iowrite32(tmp & ~post_mask, reg_offset); + + /* If other port enables, the function returns "true" */ + return tmp & ref_mask; +} + +static int sh_eth_tsu_busy(struct net_device *ndev) +{ + int timeout = SH_ETH_TSU_TIMEOUT_MS * 100; + struct sh_eth_private *mdp = netdev_priv(ndev); + + while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) { + udelay(10); + timeout--; + if (timeout <= 0) { + netdev_err(ndev, "%s: timeout\n", __func__); + return -ETIMEDOUT; + } + } + + return 0; +} + +static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg, + const u8 *addr) +{ + u32 val; + + val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3]; + iowrite32(val, reg); + if (sh_eth_tsu_busy(ndev) < 0) + return -EBUSY; + + val = addr[4] << 8 | addr[5]; + iowrite32(val, reg + 4); + if (sh_eth_tsu_busy(ndev) < 0) + return -EBUSY; + + return 0; +} + +static void sh_eth_tsu_read_entry(void *reg, u8 *addr) +{ + u32 val; + + val = ioread32(reg); + addr[0] = (val >> 24) & 0xff; + addr[1] = (val >> 16) & 0xff; + addr[2] = (val >> 8) & 0xff; + addr[3] = val & 0xff; + val = ioread32(reg + 4); + addr[4] = (val >> 8) & 0xff; + addr[5] = val & 0xff; +} + + +static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); + int i; + u8 c_addr[ETH_ALEN]; + + for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { + sh_eth_tsu_read_entry(reg_offset, c_addr); + if (ether_addr_equal(addr, c_addr)) + return i; + } + + return -ENOENT; +} + +static int sh_eth_tsu_find_empty(struct net_device *ndev) +{ + u8 blank[ETH_ALEN]; + int entry; + + memset(blank, 0, sizeof(blank)); + entry = sh_eth_tsu_find_entry(ndev, blank); + return (entry < 0) ? -ENOMEM : entry; +} + +static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev, + int entry) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); + int ret; + u8 blank[ETH_ALEN]; + + sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) & + ~(1 << (31 - entry)), TSU_TEN); + + memset(blank, 0, sizeof(blank)); + ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank); + if (ret < 0) + return ret; + return 0; +} + +static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); + int i, ret; + + if (!mdp->cd->tsu) + return 0; + + i = sh_eth_tsu_find_entry(ndev, addr); + if (i < 0) { + /* No entry found, create one */ + i = sh_eth_tsu_find_empty(ndev); + if (i < 0) + return -ENOMEM; + ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr); + if (ret < 0) + return ret; + + /* Enable the entry */ + sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) | + (1 << (31 - i)), TSU_TEN); + } + + /* Entry found or created, enable POST */ + sh_eth_tsu_enable_cam_entry_post(ndev, i); + + return 0; +} + +static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i, ret; + + if (!mdp->cd->tsu) + return 0; + + i = sh_eth_tsu_find_entry(ndev, addr); + if (i) { + /* Entry found */ + if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) + goto done; + + /* Disable the entry if both ports was disabled */ + ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); + if (ret < 0) + return ret; + } +done: + return 0; +} + +static int sh_eth_tsu_purge_all(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i, ret; + + if (!mdp->cd->tsu) + return 0; + + for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) { + if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) + continue; + + /* Disable the entry if both ports was disabled */ + ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); + if (ret < 0) + return ret; + } + + return 0; +} + +static void sh_eth_tsu_purge_mcast(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u8 addr[ETH_ALEN]; + void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); + int i; + + if (!mdp->cd->tsu) + return; + + for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { + sh_eth_tsu_read_entry(reg_offset, addr); + if (is_multicast_ether_addr(addr)) + sh_eth_tsu_del_entry(ndev, addr); + } +} + +/* Update promiscuous flag and multicast filter */ +static void sh_eth_set_rx_mode(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u32 ecmr_bits; + int mcast_all = 0; + unsigned long flags; + + spin_lock_irqsave(&mdp->lock, flags); + /* Initial condition is MCT = 1, PRM = 0. + * Depending on ndev->flags, set PRM or clear MCT + */ + ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM; + if (mdp->cd->tsu) + ecmr_bits |= ECMR_MCT; + + if (!(ndev->flags & IFF_MULTICAST)) { + sh_eth_tsu_purge_mcast(ndev); + mcast_all = 1; + } + if (ndev->flags & IFF_ALLMULTI) { + sh_eth_tsu_purge_mcast(ndev); + ecmr_bits &= ~ECMR_MCT; + mcast_all = 1; + } + + if (ndev->flags & IFF_PROMISC) { + sh_eth_tsu_purge_all(ndev); + ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM; + } else if (mdp->cd->tsu) { + struct netdev_hw_addr *ha; + netdev_for_each_mc_addr(ha, ndev) { + if (mcast_all && is_multicast_ether_addr(ha->addr)) + continue; + + if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) { + if (!mcast_all) { + sh_eth_tsu_purge_mcast(ndev); + ecmr_bits &= ~ECMR_MCT; + mcast_all = 1; + } + } + } + } + + /* update the ethernet mode */ + sh_eth_write(ndev, ecmr_bits, ECMR); + + spin_unlock_irqrestore(&mdp->lock, flags); +} + +static int sh_eth_get_vtag_index(struct sh_eth_private *mdp) +{ + if (!mdp->port) + return TSU_VTAG0; + else + return TSU_VTAG1; +} + +static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, + __be16 proto, u16 vid) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int vtag_reg_index = sh_eth_get_vtag_index(mdp); + + if (unlikely(!mdp->cd->tsu)) + return -EPERM; + + /* No filtering if vid = 0 */ + if (!vid) + return 0; + + mdp->vlan_num_ids++; + + /* The controller has one VLAN tag HW filter. So, if the filter is + * already enabled, the driver disables it and the filte + */ + if (mdp->vlan_num_ids > 1) { + /* disable VLAN filter */ + sh_eth_tsu_write(mdp, 0, vtag_reg_index); + return 0; + } + + sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK), + vtag_reg_index); + + return 0; +} + +static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, + __be16 proto, u16 vid) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int vtag_reg_index = sh_eth_get_vtag_index(mdp); + + if (unlikely(!mdp->cd->tsu)) + return -EPERM; + + /* No filtering if vid = 0 */ + if (!vid) + return 0; + + mdp->vlan_num_ids--; + sh_eth_tsu_write(mdp, 0, vtag_reg_index); + + return 0; +} + +/* SuperH's TSU register init function */ +static void sh_eth_tsu_init(struct sh_eth_private *mdp) +{ + if (sh_eth_is_rz_fast_ether(mdp)) { + sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ + return; + } + + sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ + sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ + sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ + sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0); + sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1); + sh_eth_tsu_write(mdp, 0, TSU_PRISL0); + sh_eth_tsu_write(mdp, 0, TSU_PRISL1); + sh_eth_tsu_write(mdp, 0, TSU_FWSL0); + sh_eth_tsu_write(mdp, 0, TSU_FWSL1); + sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); + if (sh_eth_is_gether(mdp)) { + sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */ + sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */ + } else { + sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ + sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ + } + sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ + sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ + sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ + sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ + sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ + sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ + sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ +} + +/* MDIO bus release function */ +static int sh_mdio_release(struct sh_eth_private *mdp) +{ + /* unregister mdio bus */ + mdiobus_unregister(mdp->mii_bus); + + /* free bitbang info */ + free_mdio_bitbang(mdp->mii_bus); + + return 0; +} + +/* MDIO bus init function */ +static int sh_mdio_init(struct sh_eth_private *mdp, + struct sh_eth_plat_data *pd) +{ + int ret, i; + struct bb_info *bitbang; + struct platform_device *pdev = mdp->pdev; + struct device *dev = &mdp->pdev->dev; + + /* create bit control struct for PHY */ + bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL); + if (!bitbang) + return -ENOMEM; + + /* bitbang init */ + bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; + bitbang->set_gate = pd->set_mdio_gate; + bitbang->mdi_msk = PIR_MDI; + bitbang->mdo_msk = PIR_MDO; + bitbang->mmd_msk = PIR_MMD; + bitbang->mdc_msk = PIR_MDC; + bitbang->ctrl.ops = &bb_ops; + + /* MII controller setting */ + mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); + if (!mdp->mii_bus) + return -ENOMEM; + + /* Hook up MII support for ethtool */ + mdp->mii_bus->name = "sh_mii"; + mdp->mii_bus->parent = dev; + snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + + /* PHY IRQ */ + mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int), + GFP_KERNEL); + if (!mdp->mii_bus->irq) { + ret = -ENOMEM; + goto out_free_bus; + } + + /* register MDIO bus */ + if (dev->of_node) { + ret = of_mdiobus_register(mdp->mii_bus, dev->of_node); + } else { + for (i = 0; i < PHY_MAX_ADDR; i++) + mdp->mii_bus->irq[i] = PHY_POLL; + if (pd->phy_irq > 0) + mdp->mii_bus->irq[pd->phy] = pd->phy_irq; + + ret = mdiobus_register(mdp->mii_bus); + } + + if (ret) + goto out_free_bus; + + return 0; + +out_free_bus: + free_mdio_bitbang(mdp->mii_bus); + return ret; +} + +static const u16 *sh_eth_get_register_offset(int register_type) +{ + const u16 *reg_offset = NULL; + + switch (register_type) { + case SH_ETH_REG_GIGABIT: + reg_offset = sh_eth_offset_gigabit; + break; + case SH_ETH_REG_FAST_RZ: + reg_offset = sh_eth_offset_fast_rz; + break; + case SH_ETH_REG_FAST_RCAR: + reg_offset = sh_eth_offset_fast_rcar; + break; + case SH_ETH_REG_FAST_SH4: + reg_offset = sh_eth_offset_fast_sh4; + break; + case SH_ETH_REG_FAST_SH3_SH2: + reg_offset = sh_eth_offset_fast_sh3_sh2; + break; + default: + break; + } + + return reg_offset; +} + +static const struct net_device_ops sh_eth_netdev_ops = { + .ndo_open = sh_eth_open, + .ndo_stop = sh_eth_close, + .ndo_start_xmit = sh_eth_start_xmit, + .ndo_get_stats = sh_eth_get_stats, + .ndo_set_rx_mode = sh_eth_set_rx_mode, + .ndo_tx_timeout = sh_eth_tx_timeout, + .ndo_do_ioctl = sh_eth_do_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static const struct net_device_ops sh_eth_netdev_ops_tsu = { + .ndo_open = sh_eth_open, + .ndo_stop = sh_eth_close, + .ndo_start_xmit = sh_eth_start_xmit, + .ndo_get_stats = sh_eth_get_stats, + .ndo_set_rx_mode = sh_eth_set_rx_mode, + .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, + .ndo_tx_timeout = sh_eth_tx_timeout, + .ndo_do_ioctl = sh_eth_do_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +#ifdef CONFIG_OF +static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct sh_eth_plat_data *pdata; + const char *mac_addr; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + pdata->phy_interface = of_get_phy_mode(np); + + mac_addr = of_get_mac_address(np); + if (mac_addr) + memcpy(pdata->mac_addr, mac_addr, ETH_ALEN); + + pdata->no_ether_link = + of_property_read_bool(np, "renesas,no-ether-link"); + pdata->ether_link_active_low = + of_property_read_bool(np, "renesas,ether-link-active-low"); + + return pdata; +} + +static const struct of_device_id sh_eth_match_table[] = { + { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data }, + { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data }, + { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data }, + { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data }, + { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data }, + { .compatible = "renesas,ether-r8a7793", .data = &r8a779x_data }, + { .compatible = "renesas,ether-r8a7794", .data = &r8a779x_data }, + { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, + { } +}; +MODULE_DEVICE_TABLE(of, sh_eth_match_table); +#else +static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) +{ + return NULL; +} +#endif + +static int sh_eth_drv_probe(struct platform_device *pdev) +{ + int ret, devno = 0; + struct resource *res; + struct net_device *ndev = NULL; + struct sh_eth_private *mdp = NULL; + struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev); + const struct platform_device_id *id = platform_get_device_id(pdev); + + /* get base addr */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + ndev = alloc_etherdev(sizeof(struct sh_eth_private)); + if (!ndev) + return -ENOMEM; + + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + + devno = pdev->id; + if (devno < 0) + devno = 0; + + ndev->dma = -1; + ret = platform_get_irq(pdev, 0); + if (ret < 0) { + ret = -ENODEV; + goto out_release; + } + ndev->irq = ret; + + SET_NETDEV_DEV(ndev, &pdev->dev); + + mdp = netdev_priv(ndev); + mdp->num_tx_ring = TX_RING_SIZE; + mdp->num_rx_ring = RX_RING_SIZE; + mdp->addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(mdp->addr)) { + ret = PTR_ERR(mdp->addr); + goto out_release; + } + + ndev->base_addr = res->start; + + spin_lock_init(&mdp->lock); + mdp->pdev = pdev; + + if (pdev->dev.of_node) + pd = sh_eth_parse_dt(&pdev->dev); + if (!pd) { + dev_err(&pdev->dev, "no platform data\n"); + ret = -EINVAL; + goto out_release; + } + + /* get PHY ID */ + mdp->phy_id = pd->phy; + mdp->phy_interface = pd->phy_interface; + /* EDMAC endian */ + mdp->edmac_endian = pd->edmac_endian; + mdp->no_ether_link = pd->no_ether_link; + mdp->ether_link_active_low = pd->ether_link_active_low; + + /* set cpu data */ + if (id) { + mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; + } else { + const struct of_device_id *match; + + match = of_match_device(of_match_ptr(sh_eth_match_table), + &pdev->dev); + mdp->cd = (struct sh_eth_cpu_data *)match->data; + } + mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); + if (!mdp->reg_offset) { + dev_err(&pdev->dev, "Unknown register type (%d)\n", + mdp->cd->register_type); + ret = -EINVAL; + goto out_release; + } + sh_eth_set_default_cpu_data(mdp->cd); + + /* set function */ + if (mdp->cd->tsu) + ndev->netdev_ops = &sh_eth_netdev_ops_tsu; + else + ndev->netdev_ops = &sh_eth_netdev_ops; + ndev->ethtool_ops = &sh_eth_ethtool_ops; + ndev->watchdog_timeo = TX_TIMEOUT; + + /* debug message level */ + mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; + + /* read and set MAC address */ + read_mac_address(ndev, pd->mac_addr); + if (!is_valid_ether_addr(ndev->dev_addr)) { + dev_warn(&pdev->dev, + "no valid MAC address supplied, using a random one.\n"); + eth_hw_addr_random(ndev); + } + + /* ioremap the TSU registers */ + if (mdp->cd->tsu) { + struct resource *rtsu; + rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); + mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); + if (IS_ERR(mdp->tsu_addr)) { + ret = PTR_ERR(mdp->tsu_addr); + goto out_release; + } + mdp->port = devno % 2; + ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER; + } + + /* initialize first or needed device */ + if (!devno || pd->needs_init) { + if (mdp->cd->chip_reset) + mdp->cd->chip_reset(ndev); + + if (mdp->cd->tsu) { + /* TSU init (Init only)*/ + sh_eth_tsu_init(mdp); + } + } + + if (mdp->cd->rmiimode) + sh_eth_write(ndev, 0x1, RMIIMODE); + + /* MDIO bus init */ + ret = sh_mdio_init(mdp, pd); + if (ret) { + dev_err(&ndev->dev, "failed to initialise MDIO\n"); + goto out_release; + } + + netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64); + + /* network device register */ + ret = register_netdev(ndev); + if (ret) + goto out_napi_del; + + /* print device information */ + netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n", + (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); + + pm_runtime_put(&pdev->dev); + platform_set_drvdata(pdev, ndev); + + return ret; + +out_napi_del: + netif_napi_del(&mdp->napi); + sh_mdio_release(mdp); + +out_release: + /* net_dev free */ + if (ndev) + free_netdev(ndev); + + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return ret; +} + +static int sh_eth_drv_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct sh_eth_private *mdp = netdev_priv(ndev); + + unregister_netdev(ndev); + netif_napi_del(&mdp->napi); + sh_mdio_release(mdp); + pm_runtime_disable(&pdev->dev); + free_netdev(ndev); + + return 0; +} + +#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP +static int sh_eth_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + int ret = 0; + + if (netif_running(ndev)) { + netif_device_detach(ndev); + ret = sh_eth_close(ndev); + } + + return ret; +} + +static int sh_eth_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + int ret = 0; + + if (netif_running(ndev)) { + ret = sh_eth_open(ndev); + if (ret < 0) + return ret; + netif_device_attach(ndev); + } + + return ret; +} +#endif + +static int sh_eth_runtime_nop(struct device *dev) +{ + /* Runtime PM callback shared between ->runtime_suspend() + * and ->runtime_resume(). Simply returns success. + * + * This driver re-initializes all registers after + * pm_runtime_get_sync() anyway so there is no need + * to save and restore registers here. + */ + return 0; +} + +static const struct dev_pm_ops sh_eth_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume) + SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL) +}; +#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops) +#else +#define SH_ETH_PM_OPS NULL +#endif + +static struct platform_device_id sh_eth_id_table[] = { + { "sh7619-ether", (kernel_ulong_t)&sh7619_data }, + { "sh771x-ether", (kernel_ulong_t)&sh771x_data }, + { "sh7724-ether", (kernel_ulong_t)&sh7724_data }, + { "sh7734-gether", (kernel_ulong_t)&sh7734_data }, + { "sh7757-ether", (kernel_ulong_t)&sh7757_data }, + { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga }, + { "sh7763-gether", (kernel_ulong_t)&sh7763_data }, + { "r7s72100-ether", (kernel_ulong_t)&r7s72100_data }, + { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data }, + { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data }, + { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data }, + { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data }, + { "r8a7793-ether", (kernel_ulong_t)&r8a779x_data }, + { "r8a7794-ether", (kernel_ulong_t)&r8a779x_data }, + { } +}; +MODULE_DEVICE_TABLE(platform, sh_eth_id_table); + +static struct platform_driver sh_eth_driver = { + .probe = sh_eth_drv_probe, + .remove = sh_eth_drv_remove, + .id_table = sh_eth_id_table, + .driver = { + .name = CARDNAME, + .pm = SH_ETH_PM_OPS, + .of_match_table = of_match_ptr(sh_eth_match_table), + }, +}; + +module_platform_driver(sh_eth_driver); + +MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda"); +MODULE_DESCRIPTION("Renesas SuperH Ethernet driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h new file mode 100644 index 000000000..06dbbe520 --- /dev/null +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -0,0 +1,591 @@ +/* SuperH Ethernet device driver + * + * Copyright (C) 2006-2012 Nobuhiro Iwamatsu + * Copyright (C) 2008-2012 Renesas Solutions Corp. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + */ + +#ifndef __SH_ETH_H__ +#define __SH_ETH_H__ + +#define CARDNAME "sh-eth" +#define TX_TIMEOUT (5*HZ) +#define TX_RING_SIZE 64 /* Tx ring size */ +#define RX_RING_SIZE 64 /* Rx ring size */ +#define TX_RING_MIN 64 +#define RX_RING_MIN 64 +#define TX_RING_MAX 1024 +#define RX_RING_MAX 1024 +#define PKT_BUF_SZ 1538 +#define SH_ETH_TSU_TIMEOUT_MS 500 +#define SH_ETH_TSU_CAM_ENTRIES 32 + +enum { + /* IMPORTANT: To keep ethtool register dump working, add new + * register names immediately before SH_ETH_MAX_REGISTER_OFFSET. + */ + + /* E-DMAC registers */ + EDSR = 0, + EDMR, + EDTRR, + EDRRR, + EESR, + EESIPR, + TDLAR, + TDFAR, + TDFXR, + TDFFR, + RDLAR, + RDFAR, + RDFXR, + RDFFR, + TRSCER, + RMFCR, + TFTR, + FDR, + RMCR, + EDOCR, + TFUCR, + RFOCR, + RMIIMODE, + FCFTR, + RPADIR, + TRIMD, + RBWAR, + TBRAR, + + /* Ether registers */ + ECMR, + ECSR, + ECSIPR, + PIR, + PSR, + RDMLR, + PIPR, + RFLR, + IPGR, + APR, + MPR, + PFTCR, + PFRCR, + RFCR, + RFCF, + TPAUSER, + TPAUSECR, + BCFR, + BCFRR, + GECMR, + BCULR, + MAHR, + MALR, + TROCR, + CDCR, + LCCR, + CNDCR, + CEFCR, + FRECR, + TSFRCR, + TLFRCR, + CERCR, + CEECR, + MAFCR, + RTRATE, + CSMR, + RMII_MII, + + /* TSU Absolute address */ + ARSTR, + TSU_CTRST, + TSU_FWEN0, + TSU_FWEN1, + TSU_FCM, + TSU_BSYSL0, + TSU_BSYSL1, + TSU_PRISL0, + TSU_PRISL1, + TSU_FWSL0, + TSU_FWSL1, + TSU_FWSLC, + TSU_QTAG0, + TSU_QTAG1, + TSU_QTAGM0, + TSU_QTAGM1, + TSU_FWSR, + TSU_FWINMK, + TSU_ADQT0, + TSU_ADQT1, + TSU_VTAG0, + TSU_VTAG1, + TSU_ADSBSY, + TSU_TEN, + TSU_POST1, + TSU_POST2, + TSU_POST3, + TSU_POST4, + TSU_ADRH0, + /* TSU_ADR{H,L}{0..31} are assumed to be contiguous */ + + TXNLCR0, + TXALCR0, + RXNLCR0, + RXALCR0, + FWNLCR0, + FWALCR0, + TXNLCR1, + TXALCR1, + RXNLCR1, + RXALCR1, + FWNLCR1, + FWALCR1, + + /* This value must be written at last. */ + SH_ETH_MAX_REGISTER_OFFSET, +}; + +enum { + SH_ETH_REG_GIGABIT, + SH_ETH_REG_FAST_RZ, + SH_ETH_REG_FAST_RCAR, + SH_ETH_REG_FAST_SH4, + SH_ETH_REG_FAST_SH3_SH2 +}; + +/* Driver's parameters */ +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) +#define SH_ETH_RX_ALIGN 32 +#else +#define SH_ETH_RX_ALIGN 2 +#endif + +/* Register's bits + */ +/* EDSR : sh7734, sh7757, sh7763, r8a7740, and r7s72100 only */ +enum EDSR_BIT { + EDSR_ENT = 0x01, EDSR_ENR = 0x02, +}; +#define EDSR_ENALL (EDSR_ENT|EDSR_ENR) + +/* GECMR : sh7734, sh7763 and r8a7740 only */ +enum GECMR_BIT { + GECMR_10 = 0x0, GECMR_100 = 0x04, GECMR_1000 = 0x01, +}; + +/* EDMR */ +enum DMAC_M_BIT { + EDMR_EL = 0x40, /* Litte endian */ + EDMR_DL1 = 0x20, EDMR_DL0 = 0x10, + EDMR_SRST_GETHER = 0x03, + EDMR_SRST_ETHER = 0x01, +}; + +/* EDTRR */ +enum DMAC_T_BIT { + EDTRR_TRNS_GETHER = 0x03, + EDTRR_TRNS_ETHER = 0x01, +}; + +/* EDRRR */ +enum EDRRR_R_BIT { + EDRRR_R = 0x01, +}; + +/* TPAUSER */ +enum TPAUSER_BIT { + TPAUSER_TPAUSE = 0x0000ffff, + TPAUSER_UNLIMITED = 0, +}; + +/* BCFR */ +enum BCFR_BIT { + BCFR_RPAUSE = 0x0000ffff, + BCFR_UNLIMITED = 0, +}; + +/* PIR */ +enum PIR_BIT { + PIR_MDI = 0x08, PIR_MDO = 0x04, PIR_MMD = 0x02, PIR_MDC = 0x01, +}; + +/* PSR */ +enum PHY_STATUS_BIT { PHY_ST_LINK = 0x01, }; + +/* EESR */ +enum EESR_BIT { + EESR_TWB1 = 0x80000000, + EESR_TWB = 0x40000000, /* same as TWB0 */ + EESR_TC1 = 0x20000000, + EESR_TUC = 0x10000000, + EESR_ROC = 0x08000000, + EESR_TABT = 0x04000000, + EESR_RABT = 0x02000000, + EESR_RFRMER = 0x01000000, /* same as RFCOF */ + EESR_ADE = 0x00800000, + EESR_ECI = 0x00400000, + EESR_FTC = 0x00200000, /* same as TC or TC0 */ + EESR_TDE = 0x00100000, + EESR_TFE = 0x00080000, /* same as TFUF */ + EESR_FRC = 0x00040000, /* same as FR */ + EESR_RDE = 0x00020000, + EESR_RFE = 0x00010000, + EESR_CND = 0x00000800, + EESR_DLC = 0x00000400, + EESR_CD = 0x00000200, + EESR_RTO = 0x00000100, + EESR_RMAF = 0x00000080, + EESR_CEEF = 0x00000040, + EESR_CELF = 0x00000020, + EESR_RRF = 0x00000010, + EESR_RTLF = 0x00000008, + EESR_RTSF = 0x00000004, + EESR_PRE = 0x00000002, + EESR_CERF = 0x00000001, +}; + +#define EESR_RX_CHECK (EESR_FRC | /* Frame recv */ \ + EESR_RMAF | /* Multicast address recv */ \ + EESR_RRF | /* Bit frame recv */ \ + EESR_RTLF | /* Long frame recv */ \ + EESR_RTSF | /* Short frame recv */ \ + EESR_PRE | /* PHY-LSI recv error */ \ + EESR_CERF) /* Recv frame CRC error */ + +#define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \ + EESR_RTO) +#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \ + EESR_RDE | EESR_RFRMER | EESR_ADE | \ + EESR_TFE | EESR_TDE | EESR_ECI) + +/* EESIPR */ +enum DMAC_IM_BIT { + DMAC_M_TWB = 0x40000000, DMAC_M_TABT = 0x04000000, + DMAC_M_RABT = 0x02000000, + DMAC_M_RFRMER = 0x01000000, DMAC_M_ADF = 0x00800000, + DMAC_M_ECI = 0x00400000, DMAC_M_FTC = 0x00200000, + DMAC_M_TDE = 0x00100000, DMAC_M_TFE = 0x00080000, + DMAC_M_FRC = 0x00040000, DMAC_M_RDE = 0x00020000, + DMAC_M_RFE = 0x00010000, DMAC_M_TINT4 = 0x00000800, + DMAC_M_TINT3 = 0x00000400, DMAC_M_TINT2 = 0x00000200, + DMAC_M_TINT1 = 0x00000100, DMAC_M_RINT8 = 0x00000080, + DMAC_M_RINT5 = 0x00000010, DMAC_M_RINT4 = 0x00000008, + DMAC_M_RINT3 = 0x00000004, DMAC_M_RINT2 = 0x00000002, + DMAC_M_RINT1 = 0x00000001, +}; + +/* Receive descriptor bit */ +enum RD_STS_BIT { + RD_RACT = 0x80000000, RD_RDEL = 0x40000000, + RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000, + RD_RFE = 0x08000000, RD_RFS10 = 0x00000200, + RD_RFS9 = 0x00000100, RD_RFS8 = 0x00000080, + RD_RFS7 = 0x00000040, RD_RFS6 = 0x00000020, + RD_RFS5 = 0x00000010, RD_RFS4 = 0x00000008, + RD_RFS3 = 0x00000004, RD_RFS2 = 0x00000002, + RD_RFS1 = 0x00000001, +}; +#define RDF1ST RD_RFP1 +#define RDFEND RD_RFP0 +#define RD_RFP (RD_RFP1|RD_RFP0) + +/* FCFTR */ +enum FCFTR_BIT { + FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000, + FCFTR_RFF0 = 0x00010000, FCFTR_RFD2 = 0x00000004, + FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001, +}; +#define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0) +#define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0) + +/* Transmit descriptor bit */ +enum TD_STS_BIT { + TD_TACT = 0x80000000, TD_TDLE = 0x40000000, + TD_TFP1 = 0x20000000, TD_TFP0 = 0x10000000, + TD_TFE = 0x08000000, TD_TWBI = 0x04000000, +}; +#define TDF1ST TD_TFP1 +#define TDFEND TD_TFP0 +#define TD_TFP (TD_TFP1|TD_TFP0) + +/* RMCR */ +enum RMCR_BIT { + RMCR_RNC = 0x00000001, +}; + +/* ECMR */ +enum FELIC_MODE_BIT { + ECMR_TRCCM = 0x04000000, ECMR_RCSC = 0x00800000, + ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000, + ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000, + ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000, + ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020, + ECMR_RTM = 0x00000010, ECMR_ILB = 0x00000008, ECMR_ELB = 0x00000004, + ECMR_DM = 0x00000002, ECMR_PRM = 0x00000001, +}; + +/* ECSR */ +enum ECSR_STATUS_BIT { + ECSR_BRCRX = 0x20, ECSR_PSRTO = 0x10, + ECSR_LCHNG = 0x04, + ECSR_MPD = 0x02, ECSR_ICD = 0x01, +}; + +#define DEFAULT_ECSR_INIT (ECSR_BRCRX | ECSR_PSRTO | ECSR_LCHNG | \ + ECSR_ICD | ECSIPR_MPDIP) + +/* ECSIPR */ +enum ECSIPR_STATUS_MASK_BIT { + ECSIPR_BRCRXIP = 0x20, ECSIPR_PSRTOIP = 0x10, + ECSIPR_LCHNGIP = 0x04, + ECSIPR_MPDIP = 0x02, ECSIPR_ICDIP = 0x01, +}; + +#define DEFAULT_ECSIPR_INIT (ECSIPR_BRCRXIP | ECSIPR_PSRTOIP | \ + ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP) + +/* APR */ +enum APR_BIT { + APR_AP = 0x00000001, +}; + +/* MPR */ +enum MPR_BIT { + MPR_MP = 0x00000001, +}; + +/* TRSCER */ +enum DESC_I_BIT { + DESC_I_TINT4 = 0x0800, DESC_I_TINT3 = 0x0400, DESC_I_TINT2 = 0x0200, + DESC_I_TINT1 = 0x0100, DESC_I_RINT8 = 0x0080, DESC_I_RINT5 = 0x0010, + DESC_I_RINT4 = 0x0008, DESC_I_RINT3 = 0x0004, DESC_I_RINT2 = 0x0002, + DESC_I_RINT1 = 0x0001, +}; + +#define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2) + +/* RPADIR */ +enum RPADIR_BIT { + RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000, + RPADIR_PADR = 0x0003f, +}; + +/* FDR */ +#define DEFAULT_FDR_INIT 0x00000707 + +/* ARSTR */ +enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, }; + +/* TSU_FWEN0 */ +enum TSU_FWEN0_BIT { + TSU_FWEN0_0 = 0x00000001, +}; + +/* TSU_ADSBSY */ +enum TSU_ADSBSY_BIT { + TSU_ADSBSY_0 = 0x00000001, +}; + +/* TSU_TEN */ +enum TSU_TEN_BIT { + TSU_TEN_0 = 0x80000000, +}; + +/* TSU_FWSL0 */ +enum TSU_FWSL0_BIT { + TSU_FWSL0_FW50 = 0x1000, TSU_FWSL0_FW40 = 0x0800, + TSU_FWSL0_FW30 = 0x0400, TSU_FWSL0_FW20 = 0x0200, + TSU_FWSL0_FW10 = 0x0100, TSU_FWSL0_RMSA0 = 0x0010, +}; + +/* TSU_FWSLC */ +enum TSU_FWSLC_BIT { + TSU_FWSLC_POSTENU = 0x2000, TSU_FWSLC_POSTENL = 0x1000, + TSU_FWSLC_CAMSEL03 = 0x0080, TSU_FWSLC_CAMSEL02 = 0x0040, + TSU_FWSLC_CAMSEL01 = 0x0020, TSU_FWSLC_CAMSEL00 = 0x0010, + TSU_FWSLC_CAMSEL13 = 0x0008, TSU_FWSLC_CAMSEL12 = 0x0004, + TSU_FWSLC_CAMSEL11 = 0x0002, TSU_FWSLC_CAMSEL10 = 0x0001, +}; + +/* TSU_VTAGn */ +#define TSU_VTAG_ENABLE 0x80000000 +#define TSU_VTAG_VID_MASK 0x00000fff + +/* The sh ether Tx buffer descriptors. + * This structure should be 20 bytes. + */ +struct sh_eth_txdesc { + u32 status; /* TD0 */ +#if defined(__LITTLE_ENDIAN) + u16 pad0; /* TD1 */ + u16 buffer_length; /* TD1 */ +#else + u16 buffer_length; /* TD1 */ + u16 pad0; /* TD1 */ +#endif + u32 addr; /* TD2 */ + u32 pad1; /* padding data */ +} __aligned(2) __packed; + +/* The sh ether Rx buffer descriptors. + * This structure should be 20 bytes. + */ +struct sh_eth_rxdesc { + u32 status; /* RD0 */ +#if defined(__LITTLE_ENDIAN) + u16 frame_length; /* RD1 */ + u16 buffer_length; /* RD1 */ +#else + u16 buffer_length; /* RD1 */ + u16 frame_length; /* RD1 */ +#endif + u32 addr; /* RD2 */ + u32 pad0; /* padding data */ +} __aligned(2) __packed; + +/* This structure is used by each CPU dependency handling. */ +struct sh_eth_cpu_data { + /* optional functions */ + void (*chip_reset)(struct net_device *ndev); + void (*set_duplex)(struct net_device *ndev); + void (*set_rate)(struct net_device *ndev); + + /* mandatory initialize value */ + int register_type; + u32 eesipr_value; + + /* optional initialize value */ + u32 ecsr_value; + u32 ecsipr_value; + u32 fdr_value; + u32 fcftr_value; + u32 rpadir_value; + + /* interrupt checking mask */ + u32 tx_check; + u32 eesr_err_check; + + /* Error mask */ + u32 trscer_err_mask; + + /* hardware features */ + unsigned long irq_flags; /* IRQ configuration flags */ + unsigned no_psr:1; /* EtherC DO NOT have PSR */ + unsigned apr:1; /* EtherC have APR */ + unsigned mpr:1; /* EtherC have MPR */ + unsigned tpauser:1; /* EtherC have TPAUSER */ + unsigned bculr:1; /* EtherC have BCULR */ + unsigned tsu:1; /* EtherC have TSU */ + unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */ + unsigned rpadir:1; /* E-DMAC have RPADIR */ + unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */ + unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */ + unsigned hw_crc:1; /* E-DMAC have CSMR */ + unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */ + unsigned shift_rd0:1; /* shift Rx descriptor word 0 right by 16 */ + unsigned rmiimode:1; /* EtherC has RMIIMODE register */ + unsigned rtrate:1; /* EtherC has RTRATE register */ +}; + +struct sh_eth_private { + struct platform_device *pdev; + struct sh_eth_cpu_data *cd; + const u16 *reg_offset; + void __iomem *addr; + void __iomem *tsu_addr; + u32 num_rx_ring; + u32 num_tx_ring; + dma_addr_t rx_desc_dma; + dma_addr_t tx_desc_dma; + struct sh_eth_rxdesc *rx_ring; + struct sh_eth_txdesc *tx_ring; + struct sk_buff **rx_skbuff; + struct sk_buff **tx_skbuff; + spinlock_t lock; /* Register access lock */ + u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */ + u32 cur_tx, dirty_tx; + u32 rx_buf_sz; /* Based on MTU+slack. */ + int edmac_endian; + struct napi_struct napi; + bool irq_enabled; + /* MII transceiver section. */ + u32 phy_id; /* PHY ID */ + struct mii_bus *mii_bus; /* MDIO bus control */ + struct phy_device *phydev; /* PHY device control */ + int link; + phy_interface_t phy_interface; + int msg_enable; + int speed; + int duplex; + int port; /* for TSU */ + int vlan_num_ids; /* for VLAN tag filter */ + + unsigned no_ether_link:1; + unsigned ether_link_active_low:1; + unsigned is_opened:1; +}; + +static inline void sh_eth_soft_swap(char *src, int len) +{ +#ifdef __LITTLE_ENDIAN__ + u32 *p = (u32 *)src; + u32 *maxp; + maxp = p + ((len + sizeof(u32) - 1) / sizeof(u32)); + + for (; p < maxp; p++) + *p = swab32(*p); +#endif +} + +#define SH_ETH_OFFSET_INVALID ((u16) ~0) + +static inline void sh_eth_write(struct net_device *ndev, u32 data, + int enum_index) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u16 offset = mdp->reg_offset[enum_index]; + + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return; + + iowrite32(data, mdp->addr + offset); +} + +static inline u32 sh_eth_read(struct net_device *ndev, int enum_index) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u16 offset = mdp->reg_offset[enum_index]; + + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return ~0U; + + return ioread32(mdp->addr + offset); +} + +static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, + int enum_index) +{ + return mdp->tsu_addr + mdp->reg_offset[enum_index]; +} + +static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, + int enum_index) +{ + iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]); +} + +static inline u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) +{ + return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]); +} + +#endif /* #ifndef __SH_ETH_H__ */ diff --git a/drivers/net/ethernet/rocker/Kconfig b/drivers/net/ethernet/rocker/Kconfig new file mode 100644 index 000000000..b9952ef04 --- /dev/null +++ b/drivers/net/ethernet/rocker/Kconfig @@ -0,0 +1,27 @@ +# +# Rocker device configuration +# + +config NET_VENDOR_ROCKER + bool "Rocker devices" + default y + ---help--- + If you have a network device belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Rocker devices. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_ROCKER + +config ROCKER + tristate "Rocker switch driver (EXPERIMENTAL)" + depends on PCI && NET_SWITCHDEV && BRIDGE + ---help--- + This driver supports Rocker switch device. + + To compile this driver as a module, choose M here: the + module will be called rocker. + +endif # NET_VENDOR_ROCKER diff --git a/drivers/net/ethernet/rocker/Makefile b/drivers/net/ethernet/rocker/Makefile new file mode 100644 index 000000000..f85fb12f3 --- /dev/null +++ b/drivers/net/ethernet/rocker/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Rocker network device drivers. +# + +obj-$(CONFIG_ROCKER) += rocker.o diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c new file mode 100644 index 000000000..cf98cc9bb --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker.c @@ -0,0 +1,5062 @@ +/* + * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver + * Copyright (c) 2014 Jiri Pirko + * Copyright (c) 2014 Scott Feldman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rocker.h" + +static const char rocker_driver_name[] = "rocker"; + +static const struct pci_device_id rocker_pci_id_table[] = { + {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0}, + {0, } +}; + +struct rocker_flow_tbl_key { + u32 priority; + enum rocker_of_dpa_table_id tbl_id; + union { + struct { + u32 in_pport; + u32 in_pport_mask; + enum rocker_of_dpa_table_id goto_tbl; + } ig_port; + struct { + u32 in_pport; + __be16 vlan_id; + __be16 vlan_id_mask; + enum rocker_of_dpa_table_id goto_tbl; + bool untagged; + __be16 new_vlan_id; + } vlan; + struct { + u32 in_pport; + u32 in_pport_mask; + __be16 eth_type; + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + __be16 vlan_id; + __be16 vlan_id_mask; + enum rocker_of_dpa_table_id goto_tbl; + bool copy_to_cpu; + } term_mac; + struct { + __be16 eth_type; + __be32 dst4; + __be32 dst4_mask; + enum rocker_of_dpa_table_id goto_tbl; + u32 group_id; + } ucast_routing; + struct { + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + int has_eth_dst; + int has_eth_dst_mask; + __be16 vlan_id; + u32 tunnel_id; + enum rocker_of_dpa_table_id goto_tbl; + u32 group_id; + bool copy_to_cpu; + } bridge; + struct { + u32 in_pport; + u32 in_pport_mask; + u8 eth_src[ETH_ALEN]; + u8 eth_src_mask[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + __be16 eth_type; + __be16 vlan_id; + __be16 vlan_id_mask; + u8 ip_proto; + u8 ip_proto_mask; + u8 ip_tos; + u8 ip_tos_mask; + u32 group_id; + } acl; + }; +}; + +struct rocker_flow_tbl_entry { + struct hlist_node entry; + u32 cmd; + u64 cookie; + struct rocker_flow_tbl_key key; + size_t key_len; + u32 key_crc32; /* key */ +}; + +struct rocker_group_tbl_entry { + struct hlist_node entry; + u32 cmd; + u32 group_id; /* key */ + u16 group_count; + u32 *group_ids; + union { + struct { + u8 pop_vlan; + } l2_interface; + struct { + u8 eth_src[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + __be16 vlan_id; + u32 group_id; + } l2_rewrite; + struct { + u8 eth_src[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + __be16 vlan_id; + bool ttl_check; + u32 group_id; + } l3_unicast; + }; +}; + +struct rocker_fdb_tbl_entry { + struct hlist_node entry; + u32 key_crc32; /* key */ + bool learned; + struct rocker_fdb_tbl_key { + u32 pport; + u8 addr[ETH_ALEN]; + __be16 vlan_id; + } key; +}; + +struct rocker_internal_vlan_tbl_entry { + struct hlist_node entry; + int ifindex; /* key */ + u32 ref_count; + __be16 vlan_id; +}; + +struct rocker_neigh_tbl_entry { + struct hlist_node entry; + __be32 ip_addr; /* key */ + struct net_device *dev; + u32 ref_count; + u32 index; + u8 eth_dst[ETH_ALEN]; + bool ttl_check; +}; + +struct rocker_desc_info { + char *data; /* mapped */ + size_t data_size; + size_t tlv_size; + struct rocker_desc *desc; + DEFINE_DMA_UNMAP_ADDR(mapaddr); +}; + +struct rocker_dma_ring_info { + size_t size; + u32 head; + u32 tail; + struct rocker_desc *desc; /* mapped */ + dma_addr_t mapaddr; + struct rocker_desc_info *desc_info; + unsigned int type; +}; + +struct rocker; + +enum { + ROCKER_CTRL_LINK_LOCAL_MCAST, + ROCKER_CTRL_LOCAL_ARP, + ROCKER_CTRL_IPV4_MCAST, + ROCKER_CTRL_IPV6_MCAST, + ROCKER_CTRL_DFLT_BRIDGING, + ROCKER_CTRL_MAX, +}; + +#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00 +#define ROCKER_N_INTERNAL_VLANS 255 +#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID) +#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS) + +struct rocker_port { + struct net_device *dev; + struct net_device *bridge_dev; + struct rocker *rocker; + unsigned int port_number; + u32 pport; + __be16 internal_vlan_id; + int stp_state; + u32 brport_flags; + bool ctrls[ROCKER_CTRL_MAX]; + unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN]; + struct napi_struct napi_tx; + struct napi_struct napi_rx; + struct rocker_dma_ring_info tx_ring; + struct rocker_dma_ring_info rx_ring; +}; + +struct rocker { + struct pci_dev *pdev; + u8 __iomem *hw_addr; + struct msix_entry *msix_entries; + unsigned int port_count; + struct rocker_port **ports; + struct { + u64 id; + } hw; + spinlock_t cmd_ring_lock; + struct rocker_dma_ring_info cmd_ring; + struct rocker_dma_ring_info event_ring; + DECLARE_HASHTABLE(flow_tbl, 16); + spinlock_t flow_tbl_lock; + u64 flow_tbl_next_cookie; + DECLARE_HASHTABLE(group_tbl, 16); + spinlock_t group_tbl_lock; + DECLARE_HASHTABLE(fdb_tbl, 16); + spinlock_t fdb_tbl_lock; + unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN]; + DECLARE_HASHTABLE(internal_vlan_tbl, 8); + spinlock_t internal_vlan_tbl_lock; + DECLARE_HASHTABLE(neigh_tbl, 16); + spinlock_t neigh_tbl_lock; + u32 neigh_tbl_next_index; +}; + +static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; +static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; +static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 }; +static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 }; +static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 }; +static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }; + +/* Rocker priority levels for flow table entries. Higher + * priority match takes precedence over lower priority match. + */ + +enum { + ROCKER_PRIORITY_UNKNOWN = 0, + ROCKER_PRIORITY_IG_PORT = 1, + ROCKER_PRIORITY_VLAN = 1, + ROCKER_PRIORITY_TERM_MAC_UCAST = 0, + ROCKER_PRIORITY_TERM_MAC_MCAST = 1, + ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1, + ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2, + ROCKER_PRIORITY_BRIDGING_VLAN = 3, + ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1, + ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2, + ROCKER_PRIORITY_BRIDGING_TENANT = 3, + ROCKER_PRIORITY_ACL_CTRL = 3, + ROCKER_PRIORITY_ACL_NORMAL = 2, + ROCKER_PRIORITY_ACL_DFLT = 1, +}; + +static bool rocker_vlan_id_is_internal(__be16 vlan_id) +{ + u16 start = ROCKER_INTERNAL_VLAN_ID_BASE; + u16 end = 0xffe; + u16 _vlan_id = ntohs(vlan_id); + + return (_vlan_id >= start && _vlan_id <= end); +} + +static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port, + u16 vid, bool *pop_vlan) +{ + __be16 vlan_id; + + if (pop_vlan) + *pop_vlan = false; + vlan_id = htons(vid); + if (!vlan_id) { + vlan_id = rocker_port->internal_vlan_id; + if (pop_vlan) + *pop_vlan = true; + } + + return vlan_id; +} + +static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port, + __be16 vlan_id) +{ + if (rocker_vlan_id_is_internal(vlan_id)) + return 0; + + return ntohs(vlan_id); +} + +static bool rocker_port_is_bridged(struct rocker_port *rocker_port) +{ + return !!rocker_port->bridge_dev; +} + +struct rocker_wait { + wait_queue_head_t wait; + bool done; + bool nowait; +}; + +static void rocker_wait_reset(struct rocker_wait *wait) +{ + wait->done = false; + wait->nowait = false; +} + +static void rocker_wait_init(struct rocker_wait *wait) +{ + init_waitqueue_head(&wait->wait); + rocker_wait_reset(wait); +} + +static struct rocker_wait *rocker_wait_create(gfp_t gfp) +{ + struct rocker_wait *wait; + + wait = kmalloc(sizeof(*wait), gfp); + if (!wait) + return NULL; + rocker_wait_init(wait); + return wait; +} + +static void rocker_wait_destroy(struct rocker_wait *work) +{ + kfree(work); +} + +static bool rocker_wait_event_timeout(struct rocker_wait *wait, + unsigned long timeout) +{ + wait_event_timeout(wait->wait, wait->done, HZ / 10); + if (!wait->done) + return false; + return true; +} + +static void rocker_wait_wake_up(struct rocker_wait *wait) +{ + wait->done = true; + wake_up(&wait->wait); +} + +static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector) +{ + return rocker->msix_entries[vector].vector; +} + +static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port) +{ + return rocker_msix_vector(rocker_port->rocker, + ROCKER_MSIX_VEC_TX(rocker_port->port_number)); +} + +static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port) +{ + return rocker_msix_vector(rocker_port->rocker, + ROCKER_MSIX_VEC_RX(rocker_port->port_number)); +} + +#define rocker_write32(rocker, reg, val) \ + writel((val), (rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_read32(rocker, reg) \ + readl((rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_write64(rocker, reg, val) \ + writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_read64(rocker, reg) \ + readq((rocker)->hw_addr + (ROCKER_ ## reg)) + +/***************************** + * HW basic testing functions + *****************************/ + +static int rocker_reg_test(struct rocker *rocker) +{ + struct pci_dev *pdev = rocker->pdev; + u64 test_reg; + u64 rnd; + + rnd = prandom_u32(); + rnd >>= 1; + rocker_write32(rocker, TEST_REG, rnd); + test_reg = rocker_read32(rocker, TEST_REG); + if (test_reg != rnd * 2) { + dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n", + test_reg, rnd * 2); + return -EIO; + } + + rnd = prandom_u32(); + rnd <<= 31; + rnd |= prandom_u32(); + rocker_write64(rocker, TEST_REG64, rnd); + test_reg = rocker_read64(rocker, TEST_REG64); + if (test_reg != rnd * 2) { + dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n", + test_reg, rnd * 2); + return -EIO; + } + + return 0; +} + +static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait, + u32 test_type, dma_addr_t dma_handle, + unsigned char *buf, unsigned char *expect, + size_t size) +{ + struct pci_dev *pdev = rocker->pdev; + int i; + + rocker_wait_reset(wait); + rocker_write32(rocker, TEST_DMA_CTRL, test_type); + + if (!rocker_wait_event_timeout(wait, HZ / 10)) { + dev_err(&pdev->dev, "no interrupt received within a timeout\n"); + return -EIO; + } + + for (i = 0; i < size; i++) { + if (buf[i] != expect[i]) { + dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected", + buf[i], i, expect[i]); + return -EIO; + } + } + return 0; +} + +#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4) +#define ROCKER_TEST_DMA_FILL_PATTERN 0x96 + +static int rocker_dma_test_offset(struct rocker *rocker, + struct rocker_wait *wait, int offset) +{ + struct pci_dev *pdev = rocker->pdev; + unsigned char *alloc; + unsigned char *buf; + unsigned char *expect; + dma_addr_t dma_handle; + int i; + int err; + + alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset, + GFP_KERNEL | GFP_DMA); + if (!alloc) + return -ENOMEM; + buf = alloc + offset; + expect = buf + ROCKER_TEST_DMA_BUF_SIZE; + + dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(pdev, dma_handle)) { + err = -EIO; + goto free_alloc; + } + + rocker_write64(rocker, TEST_DMA_ADDR, dma_handle); + rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE); + + memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE); + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + + memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE); + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + + prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE); + for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++) + expect[i] = ~buf[i]; + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + +unmap: + pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE, + PCI_DMA_BIDIRECTIONAL); +free_alloc: + kfree(alloc); + + return err; +} + +static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait) +{ + int i; + int err; + + for (i = 0; i < 8; i++) { + err = rocker_dma_test_offset(rocker, wait, i); + if (err) + return err; + } + return 0; +} + +static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id) +{ + struct rocker_wait *wait = dev_id; + + rocker_wait_wake_up(wait); + + return IRQ_HANDLED; +} + +static int rocker_basic_hw_test(struct rocker *rocker) +{ + struct pci_dev *pdev = rocker->pdev; + struct rocker_wait wait; + int err; + + err = rocker_reg_test(rocker); + if (err) { + dev_err(&pdev->dev, "reg test failed\n"); + return err; + } + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), + rocker_test_irq_handler, 0, + rocker_driver_name, &wait); + if (err) { + dev_err(&pdev->dev, "cannot assign test irq\n"); + return err; + } + + rocker_wait_init(&wait); + rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST); + + if (!rocker_wait_event_timeout(&wait, HZ / 10)) { + dev_err(&pdev->dev, "no interrupt received within a timeout\n"); + err = -EIO; + goto free_irq; + } + + err = rocker_dma_test(rocker, &wait); + if (err) + dev_err(&pdev->dev, "dma test failed\n"); + +free_irq: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait); + return err; +} + +/****** + * TLV + ******/ + +#define ROCKER_TLV_ALIGNTO 8U +#define ROCKER_TLV_ALIGN(len) \ + (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1)) +#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv)) + +/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) ---> + * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ + * | Header | Pad | Payload | Pad | + * | (struct rocker_tlv) | ing | | ing | + * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ + * <--------------------------- tlv->len --------------------------> + */ + +static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv, + int *remaining) +{ + int totlen = ROCKER_TLV_ALIGN(tlv->len); + + *remaining -= totlen; + return (struct rocker_tlv *) ((char *) tlv + totlen); +} + +static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining) +{ + return remaining >= (int) ROCKER_TLV_HDRLEN && + tlv->len >= ROCKER_TLV_HDRLEN && + tlv->len <= remaining; +} + +#define rocker_tlv_for_each(pos, head, len, rem) \ + for (pos = head, rem = len; \ + rocker_tlv_ok(pos, rem); \ + pos = rocker_tlv_next(pos, &(rem))) + +#define rocker_tlv_for_each_nested(pos, tlv, rem) \ + rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \ + rocker_tlv_len(tlv), rem) + +static int rocker_tlv_attr_size(int payload) +{ + return ROCKER_TLV_HDRLEN + payload; +} + +static int rocker_tlv_total_size(int payload) +{ + return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload)); +} + +static int rocker_tlv_padlen(int payload) +{ + return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload); +} + +static int rocker_tlv_type(const struct rocker_tlv *tlv) +{ + return tlv->type; +} + +static void *rocker_tlv_data(const struct rocker_tlv *tlv) +{ + return (char *) tlv + ROCKER_TLV_HDRLEN; +} + +static int rocker_tlv_len(const struct rocker_tlv *tlv) +{ + return tlv->len - ROCKER_TLV_HDRLEN; +} + +static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv) +{ + return *(u8 *) rocker_tlv_data(tlv); +} + +static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv) +{ + return *(u16 *) rocker_tlv_data(tlv); +} + +static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv) +{ + return *(__be16 *) rocker_tlv_data(tlv); +} + +static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv) +{ + return *(u32 *) rocker_tlv_data(tlv); +} + +static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv) +{ + return *(u64 *) rocker_tlv_data(tlv); +} + +static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype, + const char *buf, int buf_len) +{ + const struct rocker_tlv *tlv; + const struct rocker_tlv *head = (const struct rocker_tlv *) buf; + int rem; + + memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1)); + + rocker_tlv_for_each(tlv, head, buf_len, rem) { + u32 type = rocker_tlv_type(tlv); + + if (type > 0 && type <= maxtype) + tb[type] = (struct rocker_tlv *) tlv; + } +} + +static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype, + const struct rocker_tlv *tlv) +{ + rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv), + rocker_tlv_len(tlv)); +} + +static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype, + struct rocker_desc_info *desc_info) +{ + rocker_tlv_parse(tb, maxtype, desc_info->data, + desc_info->desc->tlv_size); +} + +static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info) +{ + return (struct rocker_tlv *) ((char *) desc_info->data + + desc_info->tlv_size); +} + +static int rocker_tlv_put(struct rocker_desc_info *desc_info, + int attrtype, int attrlen, const void *data) +{ + int tail_room = desc_info->data_size - desc_info->tlv_size; + int total_size = rocker_tlv_total_size(attrlen); + struct rocker_tlv *tlv; + + if (unlikely(tail_room < total_size)) + return -EMSGSIZE; + + tlv = rocker_tlv_start(desc_info); + desc_info->tlv_size += total_size; + tlv->type = attrtype; + tlv->len = rocker_tlv_attr_size(attrlen); + memcpy(rocker_tlv_data(tlv), data, attrlen); + memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen)); + return 0; +} + +static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info, + int attrtype, u8 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value); +} + +static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info, + int attrtype, u16 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value); +} + +static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info, + int attrtype, __be16 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value); +} + +static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info, + int attrtype, u32 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value); +} + +static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info, + int attrtype, __be32 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value); +} + +static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info, + int attrtype, u64 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value); +} + +static struct rocker_tlv * +rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype) +{ + struct rocker_tlv *start = rocker_tlv_start(desc_info); + + if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0) + return NULL; + + return start; +} + +static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info, + struct rocker_tlv *start) +{ + start->len = (char *) rocker_tlv_start(desc_info) - (char *) start; +} + +static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info, + struct rocker_tlv *start) +{ + desc_info->tlv_size = (char *) start - desc_info->data; +} + +/****************************************** + * DMA rings and descriptors manipulations + ******************************************/ + +static u32 __pos_inc(u32 pos, size_t limit) +{ + return ++pos == limit ? 0 : pos; +} + +static int rocker_desc_err(struct rocker_desc_info *desc_info) +{ + int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN; + + switch (err) { + case ROCKER_OK: + return 0; + case -ROCKER_ENOENT: + return -ENOENT; + case -ROCKER_ENXIO: + return -ENXIO; + case -ROCKER_ENOMEM: + return -ENOMEM; + case -ROCKER_EEXIST: + return -EEXIST; + case -ROCKER_EINVAL: + return -EINVAL; + case -ROCKER_EMSGSIZE: + return -EMSGSIZE; + case -ROCKER_ENOTSUP: + return -EOPNOTSUPP; + case -ROCKER_ENOBUFS: + return -ENOBUFS; + } + + return -EINVAL; +} + +static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info) +{ + desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN; +} + +static bool rocker_desc_gen(struct rocker_desc_info *desc_info) +{ + u32 comp_err = desc_info->desc->comp_err; + + return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false; +} + +static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info) +{ + return (void *)(uintptr_t)desc_info->desc->cookie; +} + +static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info, + void *ptr) +{ + desc_info->desc->cookie = (uintptr_t) ptr; +} + +static struct rocker_desc_info * +rocker_desc_head_get(struct rocker_dma_ring_info *info) +{ + static struct rocker_desc_info *desc_info; + u32 head = __pos_inc(info->head, info->size); + + desc_info = &info->desc_info[info->head]; + if (head == info->tail) + return NULL; /* ring full */ + desc_info->tlv_size = 0; + return desc_info; +} + +static void rocker_desc_commit(struct rocker_desc_info *desc_info) +{ + desc_info->desc->buf_size = desc_info->data_size; + desc_info->desc->tlv_size = desc_info->tlv_size; +} + +static void rocker_desc_head_set(struct rocker *rocker, + struct rocker_dma_ring_info *info, + struct rocker_desc_info *desc_info) +{ + u32 head = __pos_inc(info->head, info->size); + + BUG_ON(head == info->tail); + rocker_desc_commit(desc_info); + info->head = head; + rocker_write32(rocker, DMA_DESC_HEAD(info->type), head); +} + +static struct rocker_desc_info * +rocker_desc_tail_get(struct rocker_dma_ring_info *info) +{ + static struct rocker_desc_info *desc_info; + + if (info->tail == info->head) + return NULL; /* nothing to be done between head and tail */ + desc_info = &info->desc_info[info->tail]; + if (!rocker_desc_gen(desc_info)) + return NULL; /* gen bit not set, desc is not ready yet */ + info->tail = __pos_inc(info->tail, info->size); + desc_info->tlv_size = desc_info->desc->tlv_size; + return desc_info; +} + +static void rocker_dma_ring_credits_set(struct rocker *rocker, + struct rocker_dma_ring_info *info, + u32 credits) +{ + if (credits) + rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits); +} + +static unsigned long rocker_dma_ring_size_fix(size_t size) +{ + return max(ROCKER_DMA_SIZE_MIN, + min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX)); +} + +static int rocker_dma_ring_create(struct rocker *rocker, + unsigned int type, + size_t size, + struct rocker_dma_ring_info *info) +{ + int i; + + BUG_ON(size != rocker_dma_ring_size_fix(size)); + info->size = size; + info->type = type; + info->head = 0; + info->tail = 0; + info->desc_info = kcalloc(info->size, sizeof(*info->desc_info), + GFP_KERNEL); + if (!info->desc_info) + return -ENOMEM; + + info->desc = pci_alloc_consistent(rocker->pdev, + info->size * sizeof(*info->desc), + &info->mapaddr); + if (!info->desc) { + kfree(info->desc_info); + return -ENOMEM; + } + + for (i = 0; i < info->size; i++) + info->desc_info[i].desc = &info->desc[i]; + + rocker_write32(rocker, DMA_DESC_CTRL(info->type), + ROCKER_DMA_DESC_CTRL_RESET); + rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr); + rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size); + + return 0; +} + +static void rocker_dma_ring_destroy(struct rocker *rocker, + struct rocker_dma_ring_info *info) +{ + rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0); + + pci_free_consistent(rocker->pdev, + info->size * sizeof(struct rocker_desc), + info->desc, info->mapaddr); + kfree(info->desc_info); +} + +static void rocker_dma_ring_pass_to_producer(struct rocker *rocker, + struct rocker_dma_ring_info *info) +{ + int i; + + BUG_ON(info->head || info->tail); + + /* When ring is consumer, we need to advance head for each desc. + * That tells hw that the desc is ready to be used by it. + */ + for (i = 0; i < info->size - 1; i++) + rocker_desc_head_set(rocker, info, &info->desc_info[i]); + rocker_desc_commit(&info->desc_info[i]); +} + +static int rocker_dma_ring_bufs_alloc(struct rocker *rocker, + struct rocker_dma_ring_info *info, + int direction, size_t buf_size) +{ + struct pci_dev *pdev = rocker->pdev; + int i; + int err; + + for (i = 0; i < info->size; i++) { + struct rocker_desc_info *desc_info = &info->desc_info[i]; + struct rocker_desc *desc = &info->desc[i]; + dma_addr_t dma_handle; + char *buf; + + buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA); + if (!buf) { + err = -ENOMEM; + goto rollback; + } + + dma_handle = pci_map_single(pdev, buf, buf_size, direction); + if (pci_dma_mapping_error(pdev, dma_handle)) { + kfree(buf); + err = -EIO; + goto rollback; + } + + desc_info->data = buf; + desc_info->data_size = buf_size; + dma_unmap_addr_set(desc_info, mapaddr, dma_handle); + + desc->buf_addr = dma_handle; + desc->buf_size = buf_size; + } + return 0; + +rollback: + for (i--; i >= 0; i--) { + struct rocker_desc_info *desc_info = &info->desc_info[i]; + + pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), + desc_info->data_size, direction); + kfree(desc_info->data); + } + return err; +} + +static void rocker_dma_ring_bufs_free(struct rocker *rocker, + struct rocker_dma_ring_info *info, + int direction) +{ + struct pci_dev *pdev = rocker->pdev; + int i; + + for (i = 0; i < info->size; i++) { + struct rocker_desc_info *desc_info = &info->desc_info[i]; + struct rocker_desc *desc = &info->desc[i]; + + desc->buf_addr = 0; + desc->buf_size = 0; + pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), + desc_info->data_size, direction); + kfree(desc_info->data); + } +} + +static int rocker_dma_rings_init(struct rocker *rocker) +{ + struct pci_dev *pdev = rocker->pdev; + int err; + + err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD, + ROCKER_DMA_CMD_DEFAULT_SIZE, + &rocker->cmd_ring); + if (err) { + dev_err(&pdev->dev, "failed to create command dma ring\n"); + return err; + } + + spin_lock_init(&rocker->cmd_ring_lock); + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL, PAGE_SIZE); + if (err) { + dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n"); + goto err_dma_cmd_ring_bufs_alloc; + } + + err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT, + ROCKER_DMA_EVENT_DEFAULT_SIZE, + &rocker->event_ring); + if (err) { + dev_err(&pdev->dev, "failed to create event dma ring\n"); + goto err_dma_event_ring_create; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring, + PCI_DMA_FROMDEVICE, PAGE_SIZE); + if (err) { + dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n"); + goto err_dma_event_ring_bufs_alloc; + } + rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring); + return 0; + +err_dma_event_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker->event_ring); +err_dma_event_ring_create: + rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL); +err_dma_cmd_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); + return err; +} + +static void rocker_dma_rings_fini(struct rocker *rocker) +{ + rocker_dma_ring_bufs_free(rocker, &rocker->event_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker->event_ring); + rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); +} + +static int rocker_dma_rx_ring_skb_map(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + struct sk_buff *skb, size_t buf_len) +{ + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + + dma_handle = pci_map_single(pdev, skb->data, buf_len, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(pdev, dma_handle)) + return -EIO; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle)) + goto tlv_put_failure; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len)) + goto tlv_put_failure; + return 0; + +tlv_put_failure: + pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE); + desc_info->tlv_size = 0; + return -EMSGSIZE; +} + +static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port) +{ + return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; +} + +static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info) +{ + struct net_device *dev = rocker_port->dev; + struct sk_buff *skb; + size_t buf_len = rocker_port_rx_buf_len(rocker_port); + int err; + + /* Ensure that hw will see tlv_size zero in case of an error. + * That tells hw to use another descriptor. + */ + rocker_desc_cookie_ptr_set(desc_info, NULL); + desc_info->tlv_size = 0; + + skb = netdev_alloc_skb_ip_align(dev, buf_len); + if (!skb) + return -ENOMEM; + err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info, + skb, buf_len); + if (err) { + dev_kfree_skb_any(skb); + return err; + } + rocker_desc_cookie_ptr_set(desc_info, skb); + return 0; +} + +static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker, + struct rocker_tlv **attrs) +{ + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + size_t len; + + if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] || + !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]) + return; + dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]); + len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]); + pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE); +} + +static void rocker_dma_rx_ring_skb_free(struct rocker *rocker, + struct rocker_desc_info *desc_info) +{ + struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; + struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); + + if (!skb) + return; + rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); + rocker_dma_rx_ring_skb_unmap(rocker, attrs); + dev_kfree_skb_any(skb); +} + +static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker, + struct rocker_port *rocker_port) +{ + struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; + int i; + int err; + + for (i = 0; i < rx_ring->size; i++) { + err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, + &rx_ring->desc_info[i]); + if (err) + goto rollback; + } + return 0; + +rollback: + for (i--; i >= 0; i--) + rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); + return err; +} + +static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker, + struct rocker_port *rocker_port) +{ + struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; + int i; + + for (i = 0; i < rx_ring->size; i++) + rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); +} + +static int rocker_port_dma_rings_init(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + int err; + + err = rocker_dma_ring_create(rocker, + ROCKER_DMA_TX(rocker_port->port_number), + ROCKER_DMA_TX_DEFAULT_SIZE, + &rocker_port->tx_ring); + if (err) { + netdev_err(rocker_port->dev, "failed to create tx dma ring\n"); + return err; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE, + ROCKER_DMA_TX_DESC_SIZE); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n"); + goto err_dma_tx_ring_bufs_alloc; + } + + err = rocker_dma_ring_create(rocker, + ROCKER_DMA_RX(rocker_port->port_number), + ROCKER_DMA_RX_DEFAULT_SIZE, + &rocker_port->rx_ring); + if (err) { + netdev_err(rocker_port->dev, "failed to create rx dma ring\n"); + goto err_dma_rx_ring_create; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL, + ROCKER_DMA_RX_DESC_SIZE); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n"); + goto err_dma_rx_ring_bufs_alloc; + } + + err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n"); + goto err_dma_rx_ring_skbs_alloc; + } + rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring); + + return 0; + +err_dma_rx_ring_skbs_alloc: + rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL); +err_dma_rx_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); +err_dma_rx_ring_create: + rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE); +err_dma_tx_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); + return err; +} + +static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + + rocker_dma_rx_ring_skbs_free(rocker, rocker_port); + rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); + rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE); + rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); +} + +static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable) +{ + u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); + + if (enable) + val |= 1ULL << rocker_port->pport; + else + val &= ~(1ULL << rocker_port->pport); + rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); +} + +/******************************** + * Interrupt handler and helpers + ********************************/ + +static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id) +{ + struct rocker *rocker = dev_id; + struct rocker_desc_info *desc_info; + struct rocker_wait *wait; + u32 credits = 0; + + spin_lock(&rocker->cmd_ring_lock); + while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) { + wait = rocker_desc_cookie_ptr_get(desc_info); + if (wait->nowait) { + rocker_desc_gen_clear(desc_info); + rocker_wait_destroy(wait); + } else { + rocker_wait_wake_up(wait); + } + credits++; + } + spin_unlock(&rocker->cmd_ring_lock); + rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits); + + return IRQ_HANDLED; +} + +static void rocker_port_link_up(struct rocker_port *rocker_port) +{ + netif_carrier_on(rocker_port->dev); + netdev_info(rocker_port->dev, "Link is up\n"); +} + +static void rocker_port_link_down(struct rocker_port *rocker_port) +{ + netif_carrier_off(rocker_port->dev); + netdev_info(rocker_port->dev, "Link is down\n"); +} + +static int rocker_event_link_change(struct rocker *rocker, + const struct rocker_tlv *info) +{ + struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1]; + unsigned int port_number; + bool link_up; + struct rocker_port *rocker_port; + + rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info); + if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] || + !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]) + return -EIO; + port_number = + rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1; + link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]); + + if (port_number >= rocker->port_count) + return -EINVAL; + + rocker_port = rocker->ports[port_number]; + if (netif_carrier_ok(rocker_port->dev) != link_up) { + if (link_up) + rocker_port_link_up(rocker_port); + else + rocker_port_link_down(rocker_port); + } + + return 0; +} + +#define ROCKER_OP_FLAG_REMOVE BIT(0) +#define ROCKER_OP_FLAG_NOWAIT BIT(1) +#define ROCKER_OP_FLAG_LEARNED BIT(2) +#define ROCKER_OP_FLAG_REFRESH BIT(3) + +static int rocker_port_fdb(struct rocker_port *rocker_port, + const unsigned char *addr, + __be16 vlan_id, int flags); + +static int rocker_event_mac_vlan_seen(struct rocker *rocker, + const struct rocker_tlv *info) +{ + struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1]; + unsigned int port_number; + struct rocker_port *rocker_port; + unsigned char *addr; + int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED; + __be16 vlan_id; + + rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info); + if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] || + !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] || + !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]) + return -EIO; + port_number = + rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1; + addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]); + vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]); + + if (port_number >= rocker->port_count) + return -EINVAL; + + rocker_port = rocker->ports[port_number]; + + if (rocker_port->stp_state != BR_STATE_LEARNING && + rocker_port->stp_state != BR_STATE_FORWARDING) + return 0; + + return rocker_port_fdb(rocker_port, addr, vlan_id, flags); +} + +static int rocker_event_process(struct rocker *rocker, + struct rocker_desc_info *desc_info) +{ + struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1]; + struct rocker_tlv *info; + u16 type; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info); + if (!attrs[ROCKER_TLV_EVENT_TYPE] || + !attrs[ROCKER_TLV_EVENT_INFO]) + return -EIO; + + type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]); + info = attrs[ROCKER_TLV_EVENT_INFO]; + + switch (type) { + case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED: + return rocker_event_link_change(rocker, info); + case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN: + return rocker_event_mac_vlan_seen(rocker, info); + } + + return -EOPNOTSUPP; +} + +static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id) +{ + struct rocker *rocker = dev_id; + struct pci_dev *pdev = rocker->pdev; + struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) { + err = rocker_desc_err(desc_info); + if (err) { + dev_err(&pdev->dev, "event desc received with err %d\n", + err); + } else { + err = rocker_event_process(rocker, desc_info); + if (err) + dev_err(&pdev->dev, "event processing failed with err %d\n", + err); + } + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker->event_ring, desc_info); + credits++; + } + rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits); + + return IRQ_HANDLED; +} + +static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id) +{ + struct rocker_port *rocker_port = dev_id; + + napi_schedule(&rocker_port->napi_tx); + return IRQ_HANDLED; +} + +static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id) +{ + struct rocker_port *rocker_port = dev_id; + + napi_schedule(&rocker_port->napi_rx); + return IRQ_HANDLED; +} + +/******************** + * Command interface + ********************/ + +typedef int (*rocker_cmd_cb_t)(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv); + +static int rocker_cmd_exec(struct rocker *rocker, + struct rocker_port *rocker_port, + rocker_cmd_cb_t prepare, void *prepare_priv, + rocker_cmd_cb_t process, void *process_priv, + bool nowait) +{ + struct rocker_desc_info *desc_info; + struct rocker_wait *wait; + unsigned long flags; + int err; + + wait = rocker_wait_create(nowait ? GFP_ATOMIC : GFP_KERNEL); + if (!wait) + return -ENOMEM; + wait->nowait = nowait; + + spin_lock_irqsave(&rocker->cmd_ring_lock, flags); + desc_info = rocker_desc_head_get(&rocker->cmd_ring); + if (!desc_info) { + spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags); + err = -EAGAIN; + goto out; + } + err = prepare(rocker, rocker_port, desc_info, prepare_priv); + if (err) { + spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags); + goto out; + } + rocker_desc_cookie_ptr_set(desc_info, wait); + rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info); + spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags); + + if (nowait) + return 0; + + if (!rocker_wait_event_timeout(wait, HZ / 10)) + return -EIO; + + err = rocker_desc_err(desc_info); + if (err) + return err; + + if (process) + err = process(rocker, rocker_port, desc_info, process_priv); + + rocker_desc_gen_clear(desc_info); +out: + rocker_wait_destroy(wait); + return err; +} + +static int +rocker_cmd_get_port_settings_prep(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct ethtool_cmd *ecmd = priv; + struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + u32 speed; + u8 duplex; + u8 autoneg; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] || + !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] || + !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]) + return -EIO; + + speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]); + duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]); + autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]); + + ecmd->transceiver = XCVR_INTERNAL; + ecmd->supported = SUPPORTED_TP; + ecmd->phy_address = 0xff; + ecmd->port = PORT_TP; + ethtool_cmd_speed_set(ecmd, speed); + ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF; + ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + return 0; +} + +static int +rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + unsigned char *macaddr = priv; + struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + struct rocker_tlv *attr; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]; + if (!attr) + return -EIO; + + if (rocker_tlv_len(attr) != ETH_ALEN) + return -EINVAL; + + ether_addr_copy(macaddr, rocker_tlv_data(attr)); + return 0; +} + +struct port_name { + char *buf; + size_t len; +}; + +static int +rocker_cmd_get_port_settings_phys_name_proc(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + struct port_name *name = priv; + struct rocker_tlv *attr; + size_t i, j, len; + char *str; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME]; + if (!attr) + return -EIO; + + len = min_t(size_t, rocker_tlv_len(attr), name->len); + str = rocker_tlv_data(attr); + + /* make sure name only contains alphanumeric characters */ + for (i = j = 0; i < len; ++i) { + if (isalnum(str[i])) { + name->buf[j] = str[i]; + j++; + } + } + + if (j == 0) + return -EIO; + + name->buf[j] = '\0'; + + return 0; +} + +static int +rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct ethtool_cmd *ecmd = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, + ethtool_cmd_speed(ecmd))) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, + ecmd->duplex)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, + ecmd->autoneg)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + unsigned char *macaddr = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, + ETH_ALEN, macaddr)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_set_port_learning_prep(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, + !!(rocker_port->brport_flags & BR_LEARNING))) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, + struct ethtool_cmd *ecmd) +{ + return rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_ethtool_proc, + ecmd, false); +} + +static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, + unsigned char *macaddr) +{ + return rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_macaddr_proc, + macaddr, false); +} + +static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, + struct ethtool_cmd *ecmd) +{ + return rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_set_port_settings_ethtool_prep, + ecmd, NULL, NULL, false); +} + +static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, + unsigned char *macaddr) +{ + return rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_set_port_settings_macaddr_prep, + macaddr, NULL, NULL, false); +} + +static int rocker_port_set_learning(struct rocker_port *rocker_port) +{ + return rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_set_port_learning_prep, + NULL, NULL, NULL, false); +} + +static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, + struct rocker_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.ig_port.in_pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.ig_port.in_pport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.ig_port.goto_tbl)) + return -EMSGSIZE; + + return 0; +} + +static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, + struct rocker_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.vlan.in_pport)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.vlan.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.vlan.vlan_id_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.vlan.goto_tbl)) + return -EMSGSIZE; + if (entry->key.vlan.untagged && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID, + entry->key.vlan.new_vlan_id)) + return -EMSGSIZE; + + return 0; +} + +static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info, + struct rocker_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.term_mac.in_pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.term_mac.in_pport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.term_mac.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.term_mac.eth_dst)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.term_mac.eth_dst_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.term_mac.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.term_mac.vlan_id_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.term_mac.goto_tbl)) + return -EMSGSIZE; + if (entry->key.term_mac.copy_to_cpu && + rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, + entry->key.term_mac.copy_to_cpu)) + return -EMSGSIZE; + + return 0; +} + +static int +rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info, + struct rocker_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.ucast_routing.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP, + entry->key.ucast_routing.dst4)) + return -EMSGSIZE; + if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK, + entry->key.ucast_routing.dst4_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.ucast_routing.goto_tbl)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.ucast_routing.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info, + struct rocker_flow_tbl_entry *entry) +{ + if (entry->key.bridge.has_eth_dst && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.bridge.eth_dst)) + return -EMSGSIZE; + if (entry->key.bridge.has_eth_dst_mask && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.bridge.eth_dst_mask)) + return -EMSGSIZE; + if (entry->key.bridge.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.bridge.vlan_id)) + return -EMSGSIZE; + if (entry->key.bridge.tunnel_id && + rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID, + entry->key.bridge.tunnel_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.bridge.goto_tbl)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.bridge.group_id)) + return -EMSGSIZE; + if (entry->key.bridge.copy_to_cpu && + rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, + entry->key.bridge.copy_to_cpu)) + return -EMSGSIZE; + + return 0; +} + +static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, + struct rocker_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.acl.in_pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.acl.in_pport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->key.acl.eth_src)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK, + ETH_ALEN, entry->key.acl.eth_src_mask)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.acl.eth_dst)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.acl.eth_dst_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.acl.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.acl.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.acl.vlan_id_mask)) + return -EMSGSIZE; + + switch (ntohs(entry->key.acl.eth_type)) { + case ETH_P_IP: + case ETH_P_IPV6: + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO, + entry->key.acl.ip_proto)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_PROTO_MASK, + entry->key.acl.ip_proto_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP, + entry->key.acl.ip_tos & 0x3f)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_DSCP_MASK, + entry->key.acl.ip_tos_mask & 0x3f)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN, + (entry->key.acl.ip_tos & 0xc0) >> 6)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_ECN_MASK, + (entry->key.acl.ip_tos_mask & 0xc0) >> 6)) + return -EMSGSIZE; + break; + } + + if (entry->key.acl.group_id != ROCKER_GROUP_NONE && + rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.acl.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int rocker_cmd_flow_tbl_add(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_flow_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + int err = 0; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID, + entry->key.tbl_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY, + entry->key.priority)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0)) + return -EMSGSIZE; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, + entry->cookie)) + return -EMSGSIZE; + + switch (entry->key.tbl_id) { + case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT: + err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_VLAN: + err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC: + err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING: + err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_BRIDGING: + err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY: + err = rocker_cmd_flow_tbl_add_acl(desc_info, entry); + break; + default: + err = -ENOTSUPP; + break; + } + + if (err) + return err; + + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int rocker_cmd_flow_tbl_del(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const struct rocker_flow_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, + entry->cookie)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int +rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info, + struct rocker_group_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT, + ROCKER_GROUP_PORT_GET(entry->group_id))) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN, + entry->l2_interface.pop_vlan)) + return -EMSGSIZE; + + return 0; +} + +static int +rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info, + struct rocker_group_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, + entry->l2_rewrite.group_id)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->l2_rewrite.eth_src)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->l2_rewrite.eth_dst)) + return -EMSGSIZE; + if (entry->l2_rewrite.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->l2_rewrite.vlan_id)) + return -EMSGSIZE; + + return 0; +} + +static int +rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info, + struct rocker_group_tbl_entry *entry) +{ + int i; + struct rocker_tlv *group_ids; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT, + entry->group_count)) + return -EMSGSIZE; + + group_ids = rocker_tlv_nest_start(desc_info, + ROCKER_TLV_OF_DPA_GROUP_IDS); + if (!group_ids) + return -EMSGSIZE; + + for (i = 0; i < entry->group_count; i++) + /* Note TLV array is 1-based */ + if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i])) + return -EMSGSIZE; + + rocker_tlv_nest_end(desc_info, group_ids); + + return 0; +} + +static int +rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info, + struct rocker_group_tbl_entry *entry) +{ + if (!is_zero_ether_addr(entry->l3_unicast.eth_src) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->l3_unicast.eth_src)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->l3_unicast.eth_dst)) + return -EMSGSIZE; + if (entry->l3_unicast.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->l3_unicast.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK, + entry->l3_unicast.ttl_check)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, + entry->l3_unicast.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int rocker_cmd_group_tbl_add(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_group_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + int err = 0; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->group_id)) + return -EMSGSIZE; + + switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { + case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE: + err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE: + err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: + case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: + err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST: + err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry); + break; + default: + err = -ENOTSUPP; + break; + } + + if (err) + return err; + + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int rocker_cmd_group_tbl_del(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const struct rocker_group_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->group_id)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +/*************************************************** + * Flow, group, FDB, internal VLAN and neigh tables + ***************************************************/ + +static int rocker_init_tbls(struct rocker *rocker) +{ + hash_init(rocker->flow_tbl); + spin_lock_init(&rocker->flow_tbl_lock); + + hash_init(rocker->group_tbl); + spin_lock_init(&rocker->group_tbl_lock); + + hash_init(rocker->fdb_tbl); + spin_lock_init(&rocker->fdb_tbl_lock); + + hash_init(rocker->internal_vlan_tbl); + spin_lock_init(&rocker->internal_vlan_tbl_lock); + + hash_init(rocker->neigh_tbl); + spin_lock_init(&rocker->neigh_tbl_lock); + + return 0; +} + +static void rocker_free_tbls(struct rocker *rocker) +{ + unsigned long flags; + struct rocker_flow_tbl_entry *flow_entry; + struct rocker_group_tbl_entry *group_entry; + struct rocker_fdb_tbl_entry *fdb_entry; + struct rocker_internal_vlan_tbl_entry *internal_vlan_entry; + struct rocker_neigh_tbl_entry *neigh_entry; + struct hlist_node *tmp; + int bkt; + + spin_lock_irqsave(&rocker->flow_tbl_lock, flags); + hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry) + hash_del(&flow_entry->entry); + spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); + + spin_lock_irqsave(&rocker->group_tbl_lock, flags); + hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry) + hash_del(&group_entry->entry); + spin_unlock_irqrestore(&rocker->group_tbl_lock, flags); + + spin_lock_irqsave(&rocker->fdb_tbl_lock, flags); + hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry) + hash_del(&fdb_entry->entry); + spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags); + + spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags); + hash_for_each_safe(rocker->internal_vlan_tbl, bkt, + tmp, internal_vlan_entry, entry) + hash_del(&internal_vlan_entry->entry); + spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags); + + spin_lock_irqsave(&rocker->neigh_tbl_lock, flags); + hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry) + hash_del(&neigh_entry->entry); + spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags); +} + +static struct rocker_flow_tbl_entry * +rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match) +{ + struct rocker_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); + + hash_for_each_possible(rocker->flow_tbl, found, + entry, match->key_crc32) { + if (memcmp(&found->key, &match->key, key_len) == 0) + return found; + } + + return NULL; +} + +static int rocker_flow_tbl_add(struct rocker_port *rocker_port, + struct rocker_flow_tbl_entry *match, + bool nowait) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); + unsigned long flags; + + match->key_crc32 = crc32(~0, &match->key, key_len); + + spin_lock_irqsave(&rocker->flow_tbl_lock, flags); + + found = rocker_flow_tbl_find(rocker, match); + + if (found) { + match->cookie = found->cookie; + hash_del(&found->entry); + kfree(found); + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD; + } else { + found = match; + found->cookie = rocker->flow_tbl_next_cookie++; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD; + } + + hash_add(rocker->flow_tbl, &found->entry, found->key_crc32); + + spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); + + return rocker_cmd_exec(rocker, rocker_port, + rocker_cmd_flow_tbl_add, + found, NULL, NULL, nowait); +} + +static int rocker_flow_tbl_del(struct rocker_port *rocker_port, + struct rocker_flow_tbl_entry *match, + bool nowait) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); + unsigned long flags; + int err = 0; + + match->key_crc32 = crc32(~0, &match->key, key_len); + + spin_lock_irqsave(&rocker->flow_tbl_lock, flags); + + found = rocker_flow_tbl_find(rocker, match); + + if (found) { + hash_del(&found->entry); + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL; + } + + spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); + + kfree(match); + + if (found) { + err = rocker_cmd_exec(rocker, rocker_port, + rocker_cmd_flow_tbl_del, + found, NULL, NULL, nowait); + kfree(found); + } + + return err; +} + +static gfp_t rocker_op_flags_gfp(int flags) +{ + return flags & ROCKER_OP_FLAG_NOWAIT ? GFP_ATOMIC : GFP_KERNEL; +} + +static int rocker_flow_tbl_do(struct rocker_port *rocker_port, + int flags, struct rocker_flow_tbl_entry *entry) +{ + bool nowait = flags & ROCKER_OP_FLAG_NOWAIT; + + if (flags & ROCKER_OP_FLAG_REMOVE) + return rocker_flow_tbl_del(rocker_port, entry, nowait); + else + return rocker_flow_tbl_add(rocker_port, entry, nowait); +} + +static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port, + int flags, u32 in_pport, u32 in_pport_mask, + enum rocker_of_dpa_table_id goto_tbl) +{ + struct rocker_flow_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->key.priority = ROCKER_PRIORITY_IG_PORT; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; + entry->key.ig_port.in_pport = in_pport; + entry->key.ig_port.in_pport_mask = in_pport_mask; + entry->key.ig_port.goto_tbl = goto_tbl; + + return rocker_flow_tbl_do(rocker_port, flags, entry); +} + +static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, + int flags, u32 in_pport, + __be16 vlan_id, __be16 vlan_id_mask, + enum rocker_of_dpa_table_id goto_tbl, + bool untagged, __be16 new_vlan_id) +{ + struct rocker_flow_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->key.priority = ROCKER_PRIORITY_VLAN; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; + entry->key.vlan.in_pport = in_pport; + entry->key.vlan.vlan_id = vlan_id; + entry->key.vlan.vlan_id_mask = vlan_id_mask; + entry->key.vlan.goto_tbl = goto_tbl; + + entry->key.vlan.untagged = untagged; + entry->key.vlan.new_vlan_id = new_vlan_id; + + return rocker_flow_tbl_do(rocker_port, flags, entry); +} + +static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port, + u32 in_pport, u32 in_pport_mask, + __be16 eth_type, const u8 *eth_dst, + const u8 *eth_dst_mask, __be16 vlan_id, + __be16 vlan_id_mask, bool copy_to_cpu, + int flags) +{ + struct rocker_flow_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + if (is_multicast_ether_addr(eth_dst)) { + entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST; + entry->key.term_mac.goto_tbl = + ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING; + } else { + entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST; + entry->key.term_mac.goto_tbl = + ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; + } + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; + entry->key.term_mac.in_pport = in_pport; + entry->key.term_mac.in_pport_mask = in_pport_mask; + entry->key.term_mac.eth_type = eth_type; + ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst); + ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask); + entry->key.term_mac.vlan_id = vlan_id; + entry->key.term_mac.vlan_id_mask = vlan_id_mask; + entry->key.term_mac.copy_to_cpu = copy_to_cpu; + + return rocker_flow_tbl_do(rocker_port, flags, entry); +} + +static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port, + int flags, + const u8 *eth_dst, const u8 *eth_dst_mask, + __be16 vlan_id, u32 tunnel_id, + enum rocker_of_dpa_table_id goto_tbl, + u32 group_id, bool copy_to_cpu) +{ + struct rocker_flow_tbl_entry *entry; + u32 priority; + bool vlan_bridging = !!vlan_id; + bool dflt = !eth_dst || (eth_dst && eth_dst_mask); + bool wild = false; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING; + + if (eth_dst) { + entry->key.bridge.has_eth_dst = 1; + ether_addr_copy(entry->key.bridge.eth_dst, eth_dst); + } + if (eth_dst_mask) { + entry->key.bridge.has_eth_dst_mask = 1; + ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask); + if (memcmp(eth_dst_mask, ff_mac, ETH_ALEN)) + wild = true; + } + + priority = ROCKER_PRIORITY_UNKNOWN; + if (vlan_bridging && dflt && wild) + priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD; + else if (vlan_bridging && dflt && !wild) + priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT; + else if (vlan_bridging && !dflt) + priority = ROCKER_PRIORITY_BRIDGING_VLAN; + else if (!vlan_bridging && dflt && wild) + priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD; + else if (!vlan_bridging && dflt && !wild) + priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT; + else if (!vlan_bridging && !dflt) + priority = ROCKER_PRIORITY_BRIDGING_TENANT; + + entry->key.priority = priority; + entry->key.bridge.vlan_id = vlan_id; + entry->key.bridge.tunnel_id = tunnel_id; + entry->key.bridge.goto_tbl = goto_tbl; + entry->key.bridge.group_id = group_id; + entry->key.bridge.copy_to_cpu = copy_to_cpu; + + return rocker_flow_tbl_do(rocker_port, flags, entry); +} + +static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port, + __be16 eth_type, __be32 dst, + __be32 dst_mask, u32 priority, + enum rocker_of_dpa_table_id goto_tbl, + u32 group_id, int flags) +{ + struct rocker_flow_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; + entry->key.priority = priority; + entry->key.ucast_routing.eth_type = eth_type; + entry->key.ucast_routing.dst4 = dst; + entry->key.ucast_routing.dst4_mask = dst_mask; + entry->key.ucast_routing.goto_tbl = goto_tbl; + entry->key.ucast_routing.group_id = group_id; + entry->key_len = offsetof(struct rocker_flow_tbl_key, + ucast_routing.group_id); + + return rocker_flow_tbl_do(rocker_port, flags, entry); +} + +static int rocker_flow_tbl_acl(struct rocker_port *rocker_port, + int flags, u32 in_pport, + u32 in_pport_mask, + const u8 *eth_src, const u8 *eth_src_mask, + const u8 *eth_dst, const u8 *eth_dst_mask, + __be16 eth_type, + __be16 vlan_id, __be16 vlan_id_mask, + u8 ip_proto, u8 ip_proto_mask, + u8 ip_tos, u8 ip_tos_mask, + u32 group_id) +{ + u32 priority; + struct rocker_flow_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + priority = ROCKER_PRIORITY_ACL_NORMAL; + if (eth_dst && eth_dst_mask) { + if (memcmp(eth_dst_mask, mcast_mac, ETH_ALEN) == 0) + priority = ROCKER_PRIORITY_ACL_DFLT; + else if (is_link_local_ether_addr(eth_dst)) + priority = ROCKER_PRIORITY_ACL_CTRL; + } + + entry->key.priority = priority; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + entry->key.acl.in_pport = in_pport; + entry->key.acl.in_pport_mask = in_pport_mask; + + if (eth_src) + ether_addr_copy(entry->key.acl.eth_src, eth_src); + if (eth_src_mask) + ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask); + if (eth_dst) + ether_addr_copy(entry->key.acl.eth_dst, eth_dst); + if (eth_dst_mask) + ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask); + + entry->key.acl.eth_type = eth_type; + entry->key.acl.vlan_id = vlan_id; + entry->key.acl.vlan_id_mask = vlan_id_mask; + entry->key.acl.ip_proto = ip_proto; + entry->key.acl.ip_proto_mask = ip_proto_mask; + entry->key.acl.ip_tos = ip_tos; + entry->key.acl.ip_tos_mask = ip_tos_mask; + entry->key.acl.group_id = group_id; + + return rocker_flow_tbl_do(rocker_port, flags, entry); +} + +static struct rocker_group_tbl_entry * +rocker_group_tbl_find(struct rocker *rocker, + struct rocker_group_tbl_entry *match) +{ + struct rocker_group_tbl_entry *found; + + hash_for_each_possible(rocker->group_tbl, found, + entry, match->group_id) { + if (found->group_id == match->group_id) + return found; + } + + return NULL; +} + +static void rocker_group_tbl_entry_free(struct rocker_group_tbl_entry *entry) +{ + switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { + case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: + case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: + kfree(entry->group_ids); + break; + default: + break; + } + kfree(entry); +} + +static int rocker_group_tbl_add(struct rocker_port *rocker_port, + struct rocker_group_tbl_entry *match, + bool nowait) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_group_tbl_entry *found; + unsigned long flags; + + spin_lock_irqsave(&rocker->group_tbl_lock, flags); + + found = rocker_group_tbl_find(rocker, match); + + if (found) { + hash_del(&found->entry); + rocker_group_tbl_entry_free(found); + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD; + } else { + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD; + } + + hash_add(rocker->group_tbl, &found->entry, found->group_id); + + spin_unlock_irqrestore(&rocker->group_tbl_lock, flags); + + return rocker_cmd_exec(rocker, rocker_port, + rocker_cmd_group_tbl_add, + found, NULL, NULL, nowait); +} + +static int rocker_group_tbl_del(struct rocker_port *rocker_port, + struct rocker_group_tbl_entry *match, + bool nowait) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_group_tbl_entry *found; + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&rocker->group_tbl_lock, flags); + + found = rocker_group_tbl_find(rocker, match); + + if (found) { + hash_del(&found->entry); + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL; + } + + spin_unlock_irqrestore(&rocker->group_tbl_lock, flags); + + rocker_group_tbl_entry_free(match); + + if (found) { + err = rocker_cmd_exec(rocker, rocker_port, + rocker_cmd_group_tbl_del, + found, NULL, NULL, nowait); + rocker_group_tbl_entry_free(found); + } + + return err; +} + +static int rocker_group_tbl_do(struct rocker_port *rocker_port, + int flags, struct rocker_group_tbl_entry *entry) +{ + bool nowait = flags & ROCKER_OP_FLAG_NOWAIT; + + if (flags & ROCKER_OP_FLAG_REMOVE) + return rocker_group_tbl_del(rocker_port, entry, nowait); + else + return rocker_group_tbl_add(rocker_port, entry, nowait); +} + +static int rocker_group_l2_interface(struct rocker_port *rocker_port, + int flags, __be16 vlan_id, + u32 out_pport, int pop_vlan) +{ + struct rocker_group_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); + entry->l2_interface.pop_vlan = pop_vlan; + + return rocker_group_tbl_do(rocker_port, flags, entry); +} + +static int rocker_group_l2_fan_out(struct rocker_port *rocker_port, + int flags, u8 group_count, + u32 *group_ids, u32 group_id) +{ + struct rocker_group_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->group_id = group_id; + entry->group_count = group_count; + + entry->group_ids = kcalloc(group_count, sizeof(u32), + rocker_op_flags_gfp(flags)); + if (!entry->group_ids) { + kfree(entry); + return -ENOMEM; + } + memcpy(entry->group_ids, group_ids, group_count * sizeof(u32)); + + return rocker_group_tbl_do(rocker_port, flags, entry); +} + +static int rocker_group_l2_flood(struct rocker_port *rocker_port, + int flags, __be16 vlan_id, + u8 group_count, u32 *group_ids, + u32 group_id) +{ + return rocker_group_l2_fan_out(rocker_port, flags, + group_count, group_ids, + group_id); +} + +static int rocker_group_l3_unicast(struct rocker_port *rocker_port, + int flags, u32 index, u8 *src_mac, + u8 *dst_mac, __be16 vlan_id, + bool ttl_check, u32 pport) +{ + struct rocker_group_tbl_entry *entry; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + entry->group_id = ROCKER_GROUP_L3_UNICAST(index); + if (src_mac) + ether_addr_copy(entry->l3_unicast.eth_src, src_mac); + if (dst_mac) + ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac); + entry->l3_unicast.vlan_id = vlan_id; + entry->l3_unicast.ttl_check = ttl_check; + entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport); + + return rocker_group_tbl_do(rocker_port, flags, entry); +} + +static struct rocker_neigh_tbl_entry * + rocker_neigh_tbl_find(struct rocker *rocker, __be32 ip_addr) +{ + struct rocker_neigh_tbl_entry *found; + + hash_for_each_possible(rocker->neigh_tbl, found, + entry, be32_to_cpu(ip_addr)) + if (found->ip_addr == ip_addr) + return found; + + return NULL; +} + +static void _rocker_neigh_add(struct rocker *rocker, + struct rocker_neigh_tbl_entry *entry) +{ + entry->index = rocker->neigh_tbl_next_index++; + entry->ref_count++; + hash_add(rocker->neigh_tbl, &entry->entry, + be32_to_cpu(entry->ip_addr)); +} + +static void _rocker_neigh_del(struct rocker *rocker, + struct rocker_neigh_tbl_entry *entry) +{ + if (--entry->ref_count == 0) { + hash_del(&entry->entry); + kfree(entry); + } +} + +static void _rocker_neigh_update(struct rocker *rocker, + struct rocker_neigh_tbl_entry *entry, + u8 *eth_dst, bool ttl_check) +{ + if (eth_dst) { + ether_addr_copy(entry->eth_dst, eth_dst); + entry->ttl_check = ttl_check; + } else { + entry->ref_count++; + } +} + +static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port, + int flags, __be32 ip_addr, u8 *eth_dst) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_neigh_tbl_entry *entry; + struct rocker_neigh_tbl_entry *found; + unsigned long lock_flags; + __be16 eth_type = htons(ETH_P_IP); + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id; + u32 priority = 0; + bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); + bool updating; + bool removing; + int err = 0; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags); + + found = rocker_neigh_tbl_find(rocker, ip_addr); + + updating = found && adding; + removing = found && !adding; + adding = !found && adding; + + if (adding) { + entry->ip_addr = ip_addr; + entry->dev = rocker_port->dev; + ether_addr_copy(entry->eth_dst, eth_dst); + entry->ttl_check = true; + _rocker_neigh_add(rocker, entry); + } else if (removing) { + memcpy(entry, found, sizeof(*entry)); + _rocker_neigh_del(rocker, found); + } else if (updating) { + _rocker_neigh_update(rocker, found, eth_dst, true); + memcpy(entry, found, sizeof(*entry)); + } else { + err = -ENOENT; + } + + spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags); + + if (err) + goto err_out; + + /* For each active neighbor, we have an L3 unicast group and + * a /32 route to the neighbor, which uses the L3 unicast + * group. The L3 unicast group can also be referred to by + * other routes' nexthops. + */ + + err = rocker_group_l3_unicast(rocker_port, flags, + entry->index, + rocker_port->dev->dev_addr, + entry->eth_dst, + rocker_port->internal_vlan_id, + entry->ttl_check, + rocker_port->pport); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) L3 unicast group index %d\n", + err, entry->index); + goto err_out; + } + + if (adding || removing) { + group_id = ROCKER_GROUP_L3_UNICAST(entry->index); + err = rocker_flow_tbl_ucast4_routing(rocker_port, + eth_type, ip_addr, + inet_make_mask(32), + priority, goto_tbl, + group_id, flags); + + if (err) + netdev_err(rocker_port->dev, + "Error (%d) /32 unicast route %pI4 group 0x%08x\n", + err, &entry->ip_addr, group_id); + } + +err_out: + if (!adding) + kfree(entry); + + return err; +} + +static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port, + __be32 ip_addr) +{ + struct net_device *dev = rocker_port->dev; + struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr); + int err = 0; + + if (!n) { + n = neigh_create(&arp_tbl, &ip_addr, dev); + if (IS_ERR(n)) + return IS_ERR(n); + } + + /* If the neigh is already resolved, then go ahead and + * install the entry, otherwise start the ARP process to + * resolve the neigh. + */ + + if (n->nud_state & NUD_VALID) + err = rocker_port_ipv4_neigh(rocker_port, 0, ip_addr, n->ha); + else + neigh_event_send(n, NULL); + + neigh_release(n); + return err; +} + +static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags, + __be32 ip_addr, u32 *index) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_neigh_tbl_entry *entry; + struct rocker_neigh_tbl_entry *found; + unsigned long lock_flags; + bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); + bool updating; + bool removing; + bool resolved = true; + int err = 0; + + entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags)); + if (!entry) + return -ENOMEM; + + spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags); + + found = rocker_neigh_tbl_find(rocker, ip_addr); + if (found) + *index = found->index; + + updating = found && adding; + removing = found && !adding; + adding = !found && adding; + + if (adding) { + entry->ip_addr = ip_addr; + entry->dev = rocker_port->dev; + _rocker_neigh_add(rocker, entry); + *index = entry->index; + resolved = false; + } else if (removing) { + _rocker_neigh_del(rocker, found); + } else if (updating) { + _rocker_neigh_update(rocker, found, NULL, false); + resolved = !is_zero_ether_addr(found->eth_dst); + } else { + err = -ENOENT; + } + + spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags); + + if (!adding) + kfree(entry); + + if (err) + return err; + + /* Resolved means neigh ip_addr is resolved to neigh mac. */ + + if (!resolved) + err = rocker_port_ipv4_resolve(rocker_port, ip_addr); + + return err; +} + +static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, + int flags, __be16 vlan_id) +{ + struct rocker_port *p; + struct rocker *rocker = rocker_port->rocker; + u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); + u32 *group_ids; + u8 group_count = 0; + int err = 0; + int i; + + group_ids = kcalloc(rocker->port_count, sizeof(u32), + rocker_op_flags_gfp(flags)); + if (!group_ids) + return -ENOMEM; + + /* Adjust the flood group for this VLAN. The flood group + * references an L2 interface group for each port in this + * VLAN. + */ + + for (i = 0; i < rocker->port_count; i++) { + p = rocker->ports[i]; + if (!rocker_port_is_bridged(p)) + continue; + if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) { + group_ids[group_count++] = + ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport); + } + } + + /* If there are no bridged ports in this VLAN, we're done */ + if (group_count == 0) + goto no_ports_in_vlan; + + err = rocker_group_l2_flood(rocker_port, flags, vlan_id, + group_count, group_ids, + group_id); + if (err) + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 flood group\n", err); + +no_ports_in_vlan: + kfree(group_ids); + return err; +} + +static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, + int flags, __be16 vlan_id, + bool pop_vlan) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_port *p; + bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); + u32 out_pport; + int ref = 0; + int err; + int i; + + /* An L2 interface group for this port in this VLAN, but + * only when port STP state is LEARNING|FORWARDING. + */ + + if (rocker_port->stp_state == BR_STATE_LEARNING || + rocker_port->stp_state == BR_STATE_FORWARDING) { + out_pport = rocker_port->pport; + err = rocker_group_l2_interface(rocker_port, flags, + vlan_id, out_pport, + pop_vlan); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 group for pport %d\n", + err, out_pport); + return err; + } + } + + /* An L2 interface group for this VLAN to CPU port. + * Add when first port joins this VLAN and destroy when + * last port leaves this VLAN. + */ + + for (i = 0; i < rocker->port_count; i++) { + p = rocker->ports[i]; + if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) + ref++; + } + + if ((!adding || ref != 1) && (adding || ref != 0)) + return 0; + + out_pport = 0; + err = rocker_group_l2_interface(rocker_port, flags, + vlan_id, out_pport, + pop_vlan); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 group for CPU port\n", err); + return err; + } + + return 0; +} + +static struct rocker_ctrl { + const u8 *eth_dst; + const u8 *eth_dst_mask; + __be16 eth_type; + bool acl; + bool bridge; + bool term; + bool copy_to_cpu; +} rocker_ctrls[] = { + [ROCKER_CTRL_LINK_LOCAL_MCAST] = { + /* pass link local multicast pkts up to CPU for filtering */ + .eth_dst = ll_mac, + .eth_dst_mask = ll_mask, + .acl = true, + }, + [ROCKER_CTRL_LOCAL_ARP] = { + /* pass local ARP pkts up to CPU */ + .eth_dst = zero_mac, + .eth_dst_mask = zero_mac, + .eth_type = htons(ETH_P_ARP), + .acl = true, + }, + [ROCKER_CTRL_IPV4_MCAST] = { + /* pass IPv4 mcast pkts up to CPU, RFC 1112 */ + .eth_dst = ipv4_mcast, + .eth_dst_mask = ipv4_mask, + .eth_type = htons(ETH_P_IP), + .term = true, + .copy_to_cpu = true, + }, + [ROCKER_CTRL_IPV6_MCAST] = { + /* pass IPv6 mcast pkts up to CPU, RFC 2464 */ + .eth_dst = ipv6_mcast, + .eth_dst_mask = ipv6_mask, + .eth_type = htons(ETH_P_IPV6), + .term = true, + .copy_to_cpu = true, + }, + [ROCKER_CTRL_DFLT_BRIDGING] = { + /* flood any pkts on vlan */ + .bridge = true, + .copy_to_cpu = true, + }, +}; + +static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port, + int flags, struct rocker_ctrl *ctrl, + __be16 vlan_id) +{ + u32 in_pport = rocker_port->pport; + u32 in_pport_mask = 0xffffffff; + u32 out_pport = 0; + u8 *eth_src = NULL; + u8 *eth_src_mask = NULL; + __be16 vlan_id_mask = htons(0xffff); + u8 ip_proto = 0; + u8 ip_proto_mask = 0; + u8 ip_tos = 0; + u8 ip_tos_mask = 0; + u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); + int err; + + err = rocker_flow_tbl_acl(rocker_port, flags, + in_pport, in_pport_mask, + eth_src, eth_src_mask, + ctrl->eth_dst, ctrl->eth_dst_mask, + ctrl->eth_type, + vlan_id, vlan_id_mask, + ip_proto, ip_proto_mask, + ip_tos, ip_tos_mask, + group_id); + + if (err) + netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err); + + return err; +} + +static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port, + int flags, struct rocker_ctrl *ctrl, + __be16 vlan_id) +{ + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); + u32 tunnel_id = 0; + int err; + + if (!rocker_port_is_bridged(rocker_port)) + return 0; + + err = rocker_flow_tbl_bridge(rocker_port, flags, + ctrl->eth_dst, ctrl->eth_dst_mask, + vlan_id, tunnel_id, + goto_tbl, group_id, ctrl->copy_to_cpu); + + if (err) + netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err); + + return err; +} + +static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port, + int flags, struct rocker_ctrl *ctrl, + __be16 vlan_id) +{ + u32 in_pport_mask = 0xffffffff; + __be16 vlan_id_mask = htons(0xffff); + int err; + + if (ntohs(vlan_id) == 0) + vlan_id = rocker_port->internal_vlan_id; + + err = rocker_flow_tbl_term_mac(rocker_port, + rocker_port->pport, in_pport_mask, + ctrl->eth_type, ctrl->eth_dst, + ctrl->eth_dst_mask, vlan_id, + vlan_id_mask, ctrl->copy_to_cpu, + flags); + + if (err) + netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err); + + return err; +} + +static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, int flags, + struct rocker_ctrl *ctrl, __be16 vlan_id) +{ + if (ctrl->acl) + return rocker_port_ctrl_vlan_acl(rocker_port, flags, + ctrl, vlan_id); + if (ctrl->bridge) + return rocker_port_ctrl_vlan_bridge(rocker_port, flags, + ctrl, vlan_id); + + if (ctrl->term) + return rocker_port_ctrl_vlan_term(rocker_port, flags, + ctrl, vlan_id); + + return -EOPNOTSUPP; +} + +static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port, + int flags, __be16 vlan_id) +{ + int err = 0; + int i; + + for (i = 0; i < ROCKER_CTRL_MAX; i++) { + if (rocker_port->ctrls[i]) { + err = rocker_port_ctrl_vlan(rocker_port, flags, + &rocker_ctrls[i], vlan_id); + if (err) + return err; + } + } + + return err; +} + +static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags, + struct rocker_ctrl *ctrl) +{ + u16 vid; + int err = 0; + + for (vid = 1; vid < VLAN_N_VID; vid++) { + if (!test_bit(vid, rocker_port->vlan_bitmap)) + continue; + err = rocker_port_ctrl_vlan(rocker_port, flags, + ctrl, htons(vid)); + if (err) + break; + } + + return err; +} + +static int rocker_port_vlan(struct rocker_port *rocker_port, int flags, + u16 vid) +{ + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; + u32 in_pport = rocker_port->pport; + __be16 vlan_id = htons(vid); + __be16 vlan_id_mask = htons(0xffff); + __be16 internal_vlan_id; + bool untagged; + bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); + int err; + + internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged); + + if (adding && test_and_set_bit(ntohs(internal_vlan_id), + rocker_port->vlan_bitmap)) + return 0; /* already added */ + else if (!adding && !test_and_clear_bit(ntohs(internal_vlan_id), + rocker_port->vlan_bitmap)) + return 0; /* already removed */ + + if (adding) { + err = rocker_port_ctrl_vlan_add(rocker_port, flags, + internal_vlan_id); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port ctrl vlan add\n", err); + return err; + } + } + + err = rocker_port_vlan_l2_groups(rocker_port, flags, + internal_vlan_id, untagged); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 groups\n", err); + return err; + } + + err = rocker_port_vlan_flood_group(rocker_port, flags, + internal_vlan_id); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 flood group\n", err); + return err; + } + + err = rocker_flow_tbl_vlan(rocker_port, flags, + in_pport, vlan_id, vlan_id_mask, + goto_tbl, untagged, internal_vlan_id); + if (err) + netdev_err(rocker_port->dev, + "Error (%d) port VLAN table\n", err); + + return err; +} + +static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags) +{ + enum rocker_of_dpa_table_id goto_tbl; + u32 in_pport; + u32 in_pport_mask; + int err; + + /* Normal Ethernet Frames. Matches pkts from any local physical + * ports. Goto VLAN tbl. + */ + + in_pport = 0; + in_pport_mask = 0xffff0000; + goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN; + + err = rocker_flow_tbl_ig_port(rocker_port, flags, + in_pport, in_pport_mask, + goto_tbl); + if (err) + netdev_err(rocker_port->dev, + "Error (%d) ingress port table entry\n", err); + + return err; +} + +struct rocker_fdb_learn_work { + struct work_struct work; + struct net_device *dev; + int flags; + u8 addr[ETH_ALEN]; + u16 vid; +}; + +static void rocker_port_fdb_learn_work(struct work_struct *work) +{ + struct rocker_fdb_learn_work *lw = + container_of(work, struct rocker_fdb_learn_work, work); + bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE); + bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED); + struct netdev_switch_notifier_fdb_info info; + + info.addr = lw->addr; + info.vid = lw->vid; + + if (learned && removing) + call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_DEL, + lw->dev, &info.info); + else if (learned && !removing) + call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_ADD, + lw->dev, &info.info); + + kfree(work); +} + +static int rocker_port_fdb_learn(struct rocker_port *rocker_port, + int flags, const u8 *addr, __be16 vlan_id) +{ + struct rocker_fdb_learn_work *lw; + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 out_pport = rocker_port->pport; + u32 tunnel_id = 0; + u32 group_id = ROCKER_GROUP_NONE; + bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC); + bool copy_to_cpu = false; + int err; + + if (rocker_port_is_bridged(rocker_port)) + group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); + + if (!(flags & ROCKER_OP_FLAG_REFRESH)) { + err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL, + vlan_id, tunnel_id, goto_tbl, + group_id, copy_to_cpu); + if (err) + return err; + } + + if (!syncing) + return 0; + + if (!rocker_port_is_bridged(rocker_port)) + return 0; + + lw = kmalloc(sizeof(*lw), rocker_op_flags_gfp(flags)); + if (!lw) + return -ENOMEM; + + INIT_WORK(&lw->work, rocker_port_fdb_learn_work); + + lw->dev = rocker_port->dev; + lw->flags = flags; + ether_addr_copy(lw->addr, addr); + lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id); + + schedule_work(&lw->work); + + return 0; +} + +static struct rocker_fdb_tbl_entry * +rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match) +{ + struct rocker_fdb_tbl_entry *found; + + hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32) + if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0) + return found; + + return NULL; +} + +static int rocker_port_fdb(struct rocker_port *rocker_port, + const unsigned char *addr, + __be16 vlan_id, int flags) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_fdb_tbl_entry *fdb; + struct rocker_fdb_tbl_entry *found; + bool removing = (flags & ROCKER_OP_FLAG_REMOVE); + unsigned long lock_flags; + + fdb = kzalloc(sizeof(*fdb), rocker_op_flags_gfp(flags)); + if (!fdb) + return -ENOMEM; + + fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED); + fdb->key.pport = rocker_port->pport; + ether_addr_copy(fdb->key.addr, addr); + fdb->key.vlan_id = vlan_id; + fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key)); + + spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); + + found = rocker_fdb_tbl_find(rocker, fdb); + + if (removing && found) { + kfree(fdb); + hash_del(&found->entry); + } else if (!removing && !found) { + hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32); + } + + spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); + + /* Check if adding and already exists, or removing and can't find */ + if (!found != !removing) { + kfree(fdb); + if (!found && removing) + return 0; + /* Refreshing existing to update aging timers */ + flags |= ROCKER_OP_FLAG_REFRESH; + } + + return rocker_port_fdb_learn(rocker_port, flags, addr, vlan_id); +} + +static int rocker_port_fdb_flush(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_fdb_tbl_entry *found; + unsigned long lock_flags; + int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE; + struct hlist_node *tmp; + int bkt; + int err = 0; + + if (rocker_port->stp_state == BR_STATE_LEARNING || + rocker_port->stp_state == BR_STATE_FORWARDING) + return 0; + + spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); + + hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { + if (found->key.pport != rocker_port->pport) + continue; + if (!found->learned) + continue; + err = rocker_port_fdb_learn(rocker_port, flags, + found->key.addr, + found->key.vlan_id); + if (err) + goto err_out; + hash_del(&found->entry); + } + +err_out: + spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); + + return err; +} + +static int rocker_port_router_mac(struct rocker_port *rocker_port, + int flags, __be16 vlan_id) +{ + u32 in_pport_mask = 0xffffffff; + __be16 eth_type; + const u8 *dst_mac_mask = ff_mac; + __be16 vlan_id_mask = htons(0xffff); + bool copy_to_cpu = false; + int err; + + if (ntohs(vlan_id) == 0) + vlan_id = rocker_port->internal_vlan_id; + + eth_type = htons(ETH_P_IP); + err = rocker_flow_tbl_term_mac(rocker_port, + rocker_port->pport, in_pport_mask, + eth_type, rocker_port->dev->dev_addr, + dst_mac_mask, vlan_id, vlan_id_mask, + copy_to_cpu, flags); + if (err) + return err; + + eth_type = htons(ETH_P_IPV6); + err = rocker_flow_tbl_term_mac(rocker_port, + rocker_port->pport, in_pport_mask, + eth_type, rocker_port->dev->dev_addr, + dst_mac_mask, vlan_id, vlan_id_mask, + copy_to_cpu, flags); + + return err; +} + +static int rocker_port_fwding(struct rocker_port *rocker_port) +{ + bool pop_vlan; + u32 out_pport; + __be16 vlan_id; + u16 vid; + int flags = ROCKER_OP_FLAG_NOWAIT; + int err; + + /* Port will be forwarding-enabled if its STP state is LEARNING + * or FORWARDING. Traffic from CPU can still egress, regardless of + * port STP state. Use L2 interface group on port VLANs as a way + * to toggle port forwarding: if forwarding is disabled, L2 + * interface group will not exist. + */ + + if (rocker_port->stp_state != BR_STATE_LEARNING && + rocker_port->stp_state != BR_STATE_FORWARDING) + flags |= ROCKER_OP_FLAG_REMOVE; + + out_pport = rocker_port->pport; + for (vid = 1; vid < VLAN_N_VID; vid++) { + if (!test_bit(vid, rocker_port->vlan_bitmap)) + continue; + vlan_id = htons(vid); + pop_vlan = rocker_vlan_id_is_internal(vlan_id); + err = rocker_group_l2_interface(rocker_port, flags, + vlan_id, out_pport, + pop_vlan); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 group for pport %d\n", + err, out_pport); + return err; + } + } + + return 0; +} + +static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state) +{ + bool want[ROCKER_CTRL_MAX] = { 0, }; + int flags; + int err; + int i; + + if (rocker_port->stp_state == state) + return 0; + + rocker_port->stp_state = state; + + switch (state) { + case BR_STATE_DISABLED: + /* port is completely disabled */ + break; + case BR_STATE_LISTENING: + case BR_STATE_BLOCKING: + want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; + break; + case BR_STATE_LEARNING: + case BR_STATE_FORWARDING: + want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; + want[ROCKER_CTRL_IPV4_MCAST] = true; + want[ROCKER_CTRL_IPV6_MCAST] = true; + if (rocker_port_is_bridged(rocker_port)) + want[ROCKER_CTRL_DFLT_BRIDGING] = true; + else + want[ROCKER_CTRL_LOCAL_ARP] = true; + break; + } + + for (i = 0; i < ROCKER_CTRL_MAX; i++) { + if (want[i] != rocker_port->ctrls[i]) { + flags = ROCKER_OP_FLAG_NOWAIT | + (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE); + err = rocker_port_ctrl(rocker_port, flags, + &rocker_ctrls[i]); + if (err) + return err; + rocker_port->ctrls[i] = want[i]; + } + } + + err = rocker_port_fdb_flush(rocker_port); + if (err) + return err; + + return rocker_port_fwding(rocker_port); +} + +static int rocker_port_fwd_enable(struct rocker_port *rocker_port) +{ + if (rocker_port_is_bridged(rocker_port)) + /* bridge STP will enable port */ + return 0; + + /* port is not bridged, so simulate going to FORWARDING state */ + return rocker_port_stp_update(rocker_port, BR_STATE_FORWARDING); +} + +static int rocker_port_fwd_disable(struct rocker_port *rocker_port) +{ + if (rocker_port_is_bridged(rocker_port)) + /* bridge STP will disable port */ + return 0; + + /* port is not bridged, so simulate going to DISABLED state */ + return rocker_port_stp_update(rocker_port, BR_STATE_DISABLED); +} + +static struct rocker_internal_vlan_tbl_entry * +rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex) +{ + struct rocker_internal_vlan_tbl_entry *found; + + hash_for_each_possible(rocker->internal_vlan_tbl, found, + entry, ifindex) { + if (found->ifindex == ifindex) + return found; + } + + return NULL; +} + +static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port, + int ifindex) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_internal_vlan_tbl_entry *entry; + struct rocker_internal_vlan_tbl_entry *found; + unsigned long lock_flags; + int i; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return 0; + + entry->ifindex = ifindex; + + spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags); + + found = rocker_internal_vlan_tbl_find(rocker, ifindex); + if (found) { + kfree(entry); + goto found; + } + + found = entry; + hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex); + + for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) { + if (test_and_set_bit(i, rocker->internal_vlan_bitmap)) + continue; + found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i); + goto found; + } + + netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n"); + +found: + found->ref_count++; + spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags); + + return found->vlan_id; +} + +static void rocker_port_internal_vlan_id_put(struct rocker_port *rocker_port, + int ifindex) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_internal_vlan_tbl_entry *found; + unsigned long lock_flags; + unsigned long bit; + + spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags); + + found = rocker_internal_vlan_tbl_find(rocker, ifindex); + if (!found) { + netdev_err(rocker_port->dev, + "ifindex (%d) not found in internal VLAN tbl\n", + ifindex); + goto not_found; + } + + if (--found->ref_count <= 0) { + bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE; + clear_bit(bit, rocker->internal_vlan_bitmap); + hash_del(&found->entry); + kfree(found); + } + +not_found: + spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags); +} + +static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst, + int dst_len, struct fib_info *fi, u32 tb_id, + int flags) +{ + struct fib_nh *nh; + __be16 eth_type = htons(ETH_P_IP); + __be32 dst_mask = inet_make_mask(dst_len); + __be16 internal_vlan_id = rocker_port->internal_vlan_id; + u32 priority = fi->fib_priority; + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id; + bool nh_on_port; + bool has_gw; + u32 index; + int err; + + /* XXX support ECMP */ + + nh = fi->fib_nh; + nh_on_port = (fi->fib_dev == rocker_port->dev); + has_gw = !!nh->nh_gw; + + if (has_gw && nh_on_port) { + err = rocker_port_ipv4_nh(rocker_port, flags, + nh->nh_gw, &index); + if (err) + return err; + + group_id = ROCKER_GROUP_L3_UNICAST(index); + } else { + /* Send to CPU for processing */ + group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0); + } + + err = rocker_flow_tbl_ucast4_routing(rocker_port, eth_type, dst, + dst_mask, priority, goto_tbl, + group_id, flags); + if (err) + netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n", + err, &dst); + + return err; +} + +/***************** + * Net device ops + *****************/ + +static int rocker_port_open(struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int err; + + err = rocker_port_dma_rings_init(rocker_port); + if (err) + return err; + + err = request_irq(rocker_msix_tx_vector(rocker_port), + rocker_tx_irq_handler, 0, + rocker_driver_name, rocker_port); + if (err) { + netdev_err(rocker_port->dev, "cannot assign tx irq\n"); + goto err_request_tx_irq; + } + + err = request_irq(rocker_msix_rx_vector(rocker_port), + rocker_rx_irq_handler, 0, + rocker_driver_name, rocker_port); + if (err) { + netdev_err(rocker_port->dev, "cannot assign rx irq\n"); + goto err_request_rx_irq; + } + + err = rocker_port_fwd_enable(rocker_port); + if (err) + goto err_fwd_enable; + + napi_enable(&rocker_port->napi_tx); + napi_enable(&rocker_port->napi_rx); + rocker_port_set_enable(rocker_port, true); + netif_start_queue(dev); + return 0; + +err_fwd_enable: + free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); +err_request_rx_irq: + free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); +err_request_tx_irq: + rocker_port_dma_rings_fini(rocker_port); + return err; +} + +static int rocker_port_stop(struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + netif_stop_queue(dev); + rocker_port_set_enable(rocker_port, false); + napi_disable(&rocker_port->napi_rx); + napi_disable(&rocker_port->napi_tx); + rocker_port_fwd_disable(rocker_port); + free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); + free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); + rocker_port_dma_rings_fini(rocker_port); + + return 0; +} + +static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info) +{ + struct rocker *rocker = rocker_port->rocker; + struct pci_dev *pdev = rocker->pdev; + struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1]; + struct rocker_tlv *attr; + int rem; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info); + if (!attrs[ROCKER_TLV_TX_FRAGS]) + return; + rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) { + struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1]; + dma_addr_t dma_handle; + size_t len; + + if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG) + continue; + rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX, + attr); + if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] || + !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) + continue; + dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]); + len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]); + pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE); + } +} + +static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + char *buf, size_t buf_len) +{ + struct rocker *rocker = rocker_port->rocker; + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + struct rocker_tlv *frag; + + dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE); + if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) { + if (net_ratelimit()) + netdev_err(rocker_port->dev, "failed to dma map tx frag\n"); + return -EIO; + } + frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG); + if (!frag) + goto unmap_frag; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR, + dma_handle)) + goto nest_cancel; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN, + buf_len)) + goto nest_cancel; + rocker_tlv_nest_end(desc_info, frag); + return 0; + +nest_cancel: + rocker_tlv_nest_cancel(desc_info, frag); +unmap_frag: + pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE); + return -EMSGSIZE; +} + +static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + struct rocker_tlv *frags; + int i; + int err; + + desc_info = rocker_desc_head_get(&rocker_port->tx_ring); + if (unlikely(!desc_info)) { + if (net_ratelimit()) + netdev_err(dev, "tx ring full when queue awake\n"); + return NETDEV_TX_BUSY; + } + + rocker_desc_cookie_ptr_set(desc_info, skb); + + frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS); + if (!frags) + goto out; + err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, + skb->data, skb_headlen(skb)); + if (err) + goto nest_cancel; + if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) + goto nest_cancel; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, + skb_frag_address(frag), + skb_frag_size(frag)); + if (err) + goto unmap_frags; + } + rocker_tlv_nest_end(desc_info, frags); + + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info); + + desc_info = rocker_desc_head_get(&rocker_port->tx_ring); + if (!desc_info) + netif_stop_queue(dev); + + return NETDEV_TX_OK; + +unmap_frags: + rocker_tx_desc_frags_unmap(rocker_port, desc_info); +nest_cancel: + rocker_tlv_nest_cancel(desc_info, frags); +out: + dev_kfree_skb(skb); + dev->stats.tx_dropped++; + + return NETDEV_TX_OK; +} + +static int rocker_port_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct rocker_port *rocker_port = netdev_priv(dev); + int err; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data); + if (err) + return err; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + return 0; +} + +static int rocker_port_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int err; + + err = rocker_port_vlan(rocker_port, 0, vid); + if (err) + return err; + + return rocker_port_router_mac(rocker_port, 0, htons(vid)); +} + +static int rocker_port_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int err; + + err = rocker_port_router_mac(rocker_port, ROCKER_OP_FLAG_REMOVE, + htons(vid)); + if (err) + return err; + + return rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, vid); +} + +static int rocker_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, + u16 nlm_flags) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL); + int flags = 0; + + if (!rocker_port_is_bridged(rocker_port)) + return -EINVAL; + + return rocker_port_fdb(rocker_port, addr, vlan_id, flags); +} + +static int rocker_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL); + int flags = ROCKER_OP_FLAG_REMOVE; + + if (!rocker_port_is_bridged(rocker_port)) + return -EINVAL; + + return rocker_port_fdb(rocker_port, addr, vlan_id, flags); +} + +static int rocker_fdb_fill_info(struct sk_buff *skb, + struct rocker_port *rocker_port, + const unsigned char *addr, u16 vid, + u32 portid, u32 seq, int type, + unsigned int flags) +{ + struct nlmsghdr *nlh; + struct ndmsg *ndm; + + nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags); + if (!nlh) + return -EMSGSIZE; + + ndm = nlmsg_data(nlh); + ndm->ndm_family = AF_BRIDGE; + ndm->ndm_pad1 = 0; + ndm->ndm_pad2 = 0; + ndm->ndm_flags = NTF_SELF; + ndm->ndm_type = 0; + ndm->ndm_ifindex = rocker_port->dev->ifindex; + ndm->ndm_state = NUD_REACHABLE; + + if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr)) + goto nla_put_failure; + + if (vid && nla_put_u16(skb, NDA_VLAN, vid)) + goto nla_put_failure; + + nlmsg_end(skb, nlh); + return 0; + +nla_put_failure: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; +} + +static int rocker_port_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + struct net_device *filter_dev, + int idx) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct rocker *rocker = rocker_port->rocker; + struct rocker_fdb_tbl_entry *found; + struct hlist_node *tmp; + int bkt; + unsigned long lock_flags; + const unsigned char *addr; + u16 vid; + int err; + + spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); + hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { + if (found->key.pport != rocker_port->pport) + continue; + if (idx < cb->args[0]) + goto skip; + addr = found->key.addr; + vid = rocker_port_vlan_to_vid(rocker_port, found->key.vlan_id); + err = rocker_fdb_fill_info(skb, rocker_port, addr, vid, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + RTM_NEWNEIGH, NLM_F_MULTI); + if (err < 0) + break; +skip: + ++idx; + } + spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); + return idx; +} + +static int rocker_port_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh, u16 flags) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct nlattr *protinfo; + struct nlattr *attr; + int err; + + protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), + IFLA_PROTINFO); + if (protinfo) { + attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING); + if (attr) { + if (nla_len(attr) < sizeof(u8)) + return -EINVAL; + + if (nla_get_u8(attr)) + rocker_port->brport_flags |= BR_LEARNING; + else + rocker_port->brport_flags &= ~BR_LEARNING; + err = rocker_port_set_learning(rocker_port); + if (err) + return err; + } + attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING_SYNC); + if (attr) { + if (nla_len(attr) < sizeof(u8)) + return -EINVAL; + + if (nla_get_u8(attr)) + rocker_port->brport_flags |= BR_LEARNING_SYNC; + else + rocker_port->brport_flags &= ~BR_LEARNING_SYNC; + } + } + + return 0; +} + +static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 filter_mask, int nlflags) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + u16 mode = BRIDGE_MODE_UNDEF; + u32 mask = BR_LEARNING | BR_LEARNING_SYNC; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, + rocker_port->brport_flags, mask, + nlflags); +} + +static int rocker_port_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct port_name name = { .buf = buf, .len = len }; + int err; + + err = rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_phys_name_proc, + &name, false); + + return err ? -EOPNOTSUPP : 0; +} + +static const struct net_device_ops rocker_port_netdev_ops = { + .ndo_open = rocker_port_open, + .ndo_stop = rocker_port_stop, + .ndo_start_xmit = rocker_port_xmit, + .ndo_set_mac_address = rocker_port_set_mac_address, + .ndo_vlan_rx_add_vid = rocker_port_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = rocker_port_vlan_rx_kill_vid, + .ndo_fdb_add = rocker_port_fdb_add, + .ndo_fdb_del = rocker_port_fdb_del, + .ndo_fdb_dump = rocker_port_fdb_dump, + .ndo_bridge_setlink = rocker_port_bridge_setlink, + .ndo_bridge_getlink = rocker_port_bridge_getlink, + .ndo_get_phys_port_name = rocker_port_get_phys_port_name, +}; + +/******************** + * swdev interface + ********************/ + +static int rocker_port_swdev_parent_id_get(struct net_device *dev, + struct netdev_phys_item_id *psid) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct rocker *rocker = rocker_port->rocker; + + psid->id_len = sizeof(rocker->hw.id); + memcpy(&psid->id, &rocker->hw.id, psid->id_len); + return 0; +} + +static int rocker_port_swdev_port_stp_update(struct net_device *dev, u8 state) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + return rocker_port_stp_update(rocker_port, state); +} + +static int rocker_port_swdev_fib_ipv4_add(struct net_device *dev, + __be32 dst, int dst_len, + struct fib_info *fi, + u8 tos, u8 type, + u32 nlflags, u32 tb_id) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int flags = 0; + + return rocker_port_fib_ipv4(rocker_port, dst, dst_len, + fi, tb_id, flags); +} + +static int rocker_port_swdev_fib_ipv4_del(struct net_device *dev, + __be32 dst, int dst_len, + struct fib_info *fi, + u8 tos, u8 type, u32 tb_id) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int flags = ROCKER_OP_FLAG_REMOVE; + + return rocker_port_fib_ipv4(rocker_port, dst, dst_len, + fi, tb_id, flags); +} + +static const struct swdev_ops rocker_port_swdev_ops = { + .swdev_parent_id_get = rocker_port_swdev_parent_id_get, + .swdev_port_stp_update = rocker_port_swdev_port_stp_update, + .swdev_fib_ipv4_add = rocker_port_swdev_fib_ipv4_add, + .swdev_fib_ipv4_del = rocker_port_swdev_fib_ipv4_del, +}; + +/******************** + * ethtool interface + ********************/ + +static int rocker_port_get_settings(struct net_device *dev, + struct ethtool_cmd *ecmd) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd); +} + +static int rocker_port_set_settings(struct net_device *dev, + struct ethtool_cmd *ecmd) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd); +} + +static void rocker_port_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); +} + +static struct rocker_port_stats { + char str[ETH_GSTRING_LEN]; + int type; +} rocker_port_stats[] = { + { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, }, + { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, }, + { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, }, + { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, }, + + { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, }, + { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, }, + { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, }, + { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, }, +}; + +#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats) + +static void rocker_port_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { + memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int +rocker_cmd_get_port_stats_prep(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_tlv *cmd_stats; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_GET_PORT_STATS)) + return -EMSGSIZE; + + cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_stats) + return -EMSGSIZE; + + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + + rocker_tlv_nest_end(desc_info, cmd_stats); + + return 0; +} + +static int +rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1]; + struct rocker_tlv *pattr; + u32 pport; + u64 *data = priv; + int i; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + + if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]) + return -EIO; + + pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]); + if (pport != rocker_port->pport) + return -EIO; + + for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { + pattr = stats_attrs[rocker_port_stats[i].type]; + if (!pattr) + continue; + + data[i] = rocker_tlv_get_u64(pattr); + } + + return 0; +} + +static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port, + void *priv) +{ + return rocker_cmd_exec(rocker_port->rocker, rocker_port, + rocker_cmd_get_port_stats_prep, NULL, + rocker_cmd_get_port_stats_ethtool_proc, + priv, false); +} + +static void rocker_port_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) { + int i; + + for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i) + data[i] = 0; + } + + return; +} + +static int rocker_port_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ROCKER_PORT_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static const struct ethtool_ops rocker_port_ethtool_ops = { + .get_settings = rocker_port_get_settings, + .set_settings = rocker_port_set_settings, + .get_drvinfo = rocker_port_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = rocker_port_get_strings, + .get_ethtool_stats = rocker_port_get_stats, + .get_sset_count = rocker_port_get_sset_count, +}; + +/***************** + * NAPI interface + *****************/ + +static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi) +{ + return container_of(napi, struct rocker_port, napi_tx); +} + +static int rocker_port_poll_tx(struct napi_struct *napi, int budget) +{ + struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi); + struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + /* Cleanup tx descriptors */ + while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) { + struct sk_buff *skb; + + err = rocker_desc_err(desc_info); + if (err && net_ratelimit()) + netdev_err(rocker_port->dev, "tx desc received with err %d\n", + err); + rocker_tx_desc_frags_unmap(rocker_port, desc_info); + + skb = rocker_desc_cookie_ptr_get(desc_info); + if (err == 0) { + rocker_port->dev->stats.tx_packets++; + rocker_port->dev->stats.tx_bytes += skb->len; + } else + rocker_port->dev->stats.tx_errors++; + + dev_kfree_skb_any(skb); + credits++; + } + + if (credits && netif_queue_stopped(rocker_port->dev)) + netif_wake_queue(rocker_port->dev); + + napi_complete(napi); + rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits); + + return 0; +} + +static int rocker_port_rx_proc(struct rocker *rocker, + struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info) +{ + struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; + struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); + size_t rx_len; + + if (!skb) + return -ENOENT; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); + if (!attrs[ROCKER_TLV_RX_FRAG_LEN]) + return -EINVAL; + + rocker_dma_rx_ring_skb_unmap(rocker, attrs); + + rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]); + skb_put(skb, rx_len); + skb->protocol = eth_type_trans(skb, rocker_port->dev); + + rocker_port->dev->stats.rx_packets++; + rocker_port->dev->stats.rx_bytes += skb->len; + + netif_receive_skb(skb); + + return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info); +} + +static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi) +{ + return container_of(napi, struct rocker_port, napi_rx); +} + +static int rocker_port_poll_rx(struct napi_struct *napi, int budget) +{ + struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi); + struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + /* Process rx descriptors */ + while (credits < budget && + (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) { + err = rocker_desc_err(desc_info); + if (err) { + if (net_ratelimit()) + netdev_err(rocker_port->dev, "rx desc received with err %d\n", + err); + } else { + err = rocker_port_rx_proc(rocker, rocker_port, + desc_info); + if (err && net_ratelimit()) + netdev_err(rocker_port->dev, "rx processing failed with err %d\n", + err); + } + if (err) + rocker_port->dev->stats.rx_errors++; + + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info); + credits++; + } + + if (credits < budget) + napi_complete(napi); + + rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits); + + return credits; +} + +/***************** + * PCI driver ops + *****************/ + +static void rocker_carrier_init(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS); + bool link_up; + + link_up = link_status & (1 << rocker_port->pport); + if (link_up) + netif_carrier_on(rocker_port->dev); + else + netif_carrier_off(rocker_port->dev); +} + +static void rocker_remove_ports(struct rocker *rocker) +{ + struct rocker_port *rocker_port; + int i; + + for (i = 0; i < rocker->port_count; i++) { + rocker_port = rocker->ports[i]; + rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE); + unregister_netdev(rocker_port->dev); + } + kfree(rocker->ports); +} + +static void rocker_port_dev_addr_init(struct rocker *rocker, + struct rocker_port *rocker_port) +{ + struct pci_dev *pdev = rocker->pdev; + int err; + + err = rocker_cmd_get_port_settings_macaddr(rocker_port, + rocker_port->dev->dev_addr); + if (err) { + dev_warn(&pdev->dev, "failed to get mac address, using random\n"); + eth_hw_addr_random(rocker_port->dev); + } +} + +static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) +{ + struct pci_dev *pdev = rocker->pdev; + struct rocker_port *rocker_port; + struct net_device *dev; + int err; + + dev = alloc_etherdev(sizeof(struct rocker_port)); + if (!dev) + return -ENOMEM; + rocker_port = netdev_priv(dev); + rocker_port->dev = dev; + rocker_port->rocker = rocker; + rocker_port->port_number = port_number; + rocker_port->pport = port_number + 1; + rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC; + + rocker_port_dev_addr_init(rocker, rocker_port); + dev->netdev_ops = &rocker_port_netdev_ops; + dev->ethtool_ops = &rocker_port_ethtool_ops; + dev->swdev_ops = &rocker_port_swdev_ops; + netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, + NAPI_POLL_WEIGHT); + netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx, + NAPI_POLL_WEIGHT); + rocker_carrier_init(rocker_port); + + dev->features |= NETIF_F_NETNS_LOCAL | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_SWITCH_OFFLOAD; + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "register_netdev failed\n"); + goto err_register_netdev; + } + rocker->ports[port_number] = rocker_port; + + rocker_port_set_learning(rocker_port); + + rocker_port->internal_vlan_id = + rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex); + err = rocker_port_ig_tbl(rocker_port, 0); + if (err) { + dev_err(&pdev->dev, "install ig port table failed\n"); + goto err_port_ig_tbl; + } + + return 0; + +err_port_ig_tbl: + unregister_netdev(dev); +err_register_netdev: + free_netdev(dev); + return err; +} + +static int rocker_probe_ports(struct rocker *rocker) +{ + int i; + size_t alloc_size; + int err; + + alloc_size = sizeof(struct rocker_port *) * rocker->port_count; + rocker->ports = kmalloc(alloc_size, GFP_KERNEL); + if (!rocker->ports) + return -ENOMEM; + for (i = 0; i < rocker->port_count; i++) { + err = rocker_probe_port(rocker, i); + if (err) + goto remove_ports; + } + return 0; + +remove_ports: + rocker_remove_ports(rocker); + return err; +} + +static int rocker_msix_init(struct rocker *rocker) +{ + struct pci_dev *pdev = rocker->pdev; + int msix_entries; + int i; + int err; + + msix_entries = pci_msix_vec_count(pdev); + if (msix_entries < 0) + return msix_entries; + + if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count)) + return -EINVAL; + + rocker->msix_entries = kmalloc_array(msix_entries, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!rocker->msix_entries) + return -ENOMEM; + + for (i = 0; i < msix_entries; i++) + rocker->msix_entries[i].entry = i; + + err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries); + if (err < 0) + goto err_enable_msix; + + return 0; + +err_enable_msix: + kfree(rocker->msix_entries); + return err; +} + +static void rocker_msix_fini(struct rocker *rocker) +{ + pci_disable_msix(rocker->pdev); + kfree(rocker->msix_entries); +} + +static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct rocker *rocker; + int err; + + rocker = kzalloc(sizeof(*rocker), GFP_KERNEL); + if (!rocker) + return -ENOMEM; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "pci_enable_device failed\n"); + goto err_pci_enable_device; + } + + err = pci_request_regions(pdev, rocker_driver_name); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed\n"); + goto err_pci_request_regions; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (!err) { + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n"); + goto err_pci_set_dma_mask; + } + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "pci_set_dma_mask failed\n"); + goto err_pci_set_dma_mask; + } + } + + if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) { + dev_err(&pdev->dev, "invalid PCI region size\n"); + err = -EINVAL; + goto err_pci_resource_len_check; + } + + rocker->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!rocker->hw_addr) { + dev_err(&pdev->dev, "ioremap failed\n"); + err = -EIO; + goto err_ioremap; + } + pci_set_master(pdev); + + rocker->pdev = pdev; + pci_set_drvdata(pdev, rocker); + + rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT); + + err = rocker_msix_init(rocker); + if (err) { + dev_err(&pdev->dev, "MSI-X init failed\n"); + goto err_msix_init; + } + + err = rocker_basic_hw_test(rocker); + if (err) { + dev_err(&pdev->dev, "basic hw test failed\n"); + goto err_basic_hw_test; + } + + rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); + + err = rocker_dma_rings_init(rocker); + if (err) + goto err_dma_rings_init; + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), + rocker_cmd_irq_handler, 0, + rocker_driver_name, rocker); + if (err) { + dev_err(&pdev->dev, "cannot assign cmd irq\n"); + goto err_request_cmd_irq; + } + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), + rocker_event_irq_handler, 0, + rocker_driver_name, rocker); + if (err) { + dev_err(&pdev->dev, "cannot assign event irq\n"); + goto err_request_event_irq; + } + + rocker->hw.id = rocker_read64(rocker, SWITCH_ID); + + err = rocker_init_tbls(rocker); + if (err) { + dev_err(&pdev->dev, "cannot init rocker tables\n"); + goto err_init_tbls; + } + + err = rocker_probe_ports(rocker); + if (err) { + dev_err(&pdev->dev, "failed to probe ports\n"); + goto err_probe_ports; + } + + dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id); + + return 0; + +err_probe_ports: + rocker_free_tbls(rocker); +err_init_tbls: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); +err_request_event_irq: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); +err_request_cmd_irq: + rocker_dma_rings_fini(rocker); +err_dma_rings_init: +err_basic_hw_test: + rocker_msix_fini(rocker); +err_msix_init: + iounmap(rocker->hw_addr); +err_ioremap: +err_pci_resource_len_check: +err_pci_set_dma_mask: + pci_release_regions(pdev); +err_pci_request_regions: + pci_disable_device(pdev); +err_pci_enable_device: + kfree(rocker); + return err; +} + +static void rocker_remove(struct pci_dev *pdev) +{ + struct rocker *rocker = pci_get_drvdata(pdev); + + rocker_free_tbls(rocker); + rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); + rocker_remove_ports(rocker); + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); + rocker_dma_rings_fini(rocker); + rocker_msix_fini(rocker); + iounmap(rocker->hw_addr); + pci_release_regions(rocker->pdev); + pci_disable_device(rocker->pdev); + kfree(rocker); +} + +static struct pci_driver rocker_pci_driver = { + .name = rocker_driver_name, + .id_table = rocker_pci_id_table, + .probe = rocker_probe, + .remove = rocker_remove, +}; + +/************************************ + * Net device notifier event handler + ************************************/ + +static bool rocker_port_dev_check(struct net_device *dev) +{ + return dev->netdev_ops == &rocker_port_netdev_ops; +} + +static int rocker_port_bridge_join(struct rocker_port *rocker_port, + struct net_device *bridge) +{ + int err; + + rocker_port_internal_vlan_id_put(rocker_port, + rocker_port->dev->ifindex); + + rocker_port->bridge_dev = bridge; + + /* Use bridge internal VLAN ID for untagged pkts */ + err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0); + if (err) + return err; + rocker_port->internal_vlan_id = + rocker_port_internal_vlan_id_get(rocker_port, + bridge->ifindex); + return rocker_port_vlan(rocker_port, 0, 0); +} + +static int rocker_port_bridge_leave(struct rocker_port *rocker_port) +{ + int err; + + rocker_port_internal_vlan_id_put(rocker_port, + rocker_port->bridge_dev->ifindex); + + rocker_port->bridge_dev = NULL; + + /* Use port internal VLAN ID for untagged pkts */ + err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0); + if (err) + return err; + rocker_port->internal_vlan_id = + rocker_port_internal_vlan_id_get(rocker_port, + rocker_port->dev->ifindex); + err = rocker_port_vlan(rocker_port, 0, 0); + if (err) + return err; + + if (rocker_port->dev->flags & IFF_UP) + err = rocker_port_fwd_enable(rocker_port); + + return err; +} + +static int rocker_port_master_changed(struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct net_device *master = netdev_master_upper_dev_get(dev); + int err = 0; + + /* There are currently three cases handled here: + * 1. Joining a bridge + * 2. Leaving a previously joined bridge + * 3. Other, e.g. being added to or removed from a bond or openvswitch, + * in which case nothing is done + */ + if (master && master->rtnl_link_ops && + !strcmp(master->rtnl_link_ops->kind, "bridge")) + err = rocker_port_bridge_join(rocker_port, master); + else if (rocker_port_is_bridged(rocker_port)) + err = rocker_port_bridge_leave(rocker_port); + + return err; +} + +static int rocker_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev; + int err; + + switch (event) { + case NETDEV_CHANGEUPPER: + dev = netdev_notifier_info_to_dev(ptr); + if (!rocker_port_dev_check(dev)) + return NOTIFY_DONE; + err = rocker_port_master_changed(dev); + if (err) + netdev_warn(dev, + "failed to reflect master change (err %d)\n", + err); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block rocker_netdevice_nb __read_mostly = { + .notifier_call = rocker_netdevice_event, +}; + +/************************************ + * Net event notifier event handler + ************************************/ + +static int rocker_neigh_update(struct net_device *dev, struct neighbour *n) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int flags = (n->nud_state & NUD_VALID) ? 0 : ROCKER_OP_FLAG_REMOVE; + __be32 ip_addr = *(__be32 *)n->primary_key; + + return rocker_port_ipv4_neigh(rocker_port, flags, ip_addr, n->ha); +} + +static int rocker_netevent_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev; + struct neighbour *n = ptr; + int err; + + switch (event) { + case NETEVENT_NEIGH_UPDATE: + if (n->tbl != &arp_tbl) + return NOTIFY_DONE; + dev = n->dev; + if (!rocker_port_dev_check(dev)) + return NOTIFY_DONE; + err = rocker_neigh_update(dev, n); + if (err) + netdev_warn(dev, + "failed to handle neigh update (err %d)\n", + err); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block rocker_netevent_nb __read_mostly = { + .notifier_call = rocker_netevent_event, +}; + +/*********************** + * Module init and exit + ***********************/ + +static int __init rocker_module_init(void) +{ + int err; + + register_netdevice_notifier(&rocker_netdevice_nb); + register_netevent_notifier(&rocker_netevent_nb); + err = pci_register_driver(&rocker_pci_driver); + if (err) + goto err_pci_register_driver; + return 0; + +err_pci_register_driver: + unregister_netdevice_notifier(&rocker_netevent_nb); + unregister_netdevice_notifier(&rocker_netdevice_nb); + return err; +} + +static void __exit rocker_module_exit(void) +{ + unregister_netevent_notifier(&rocker_netevent_nb); + unregister_netdevice_notifier(&rocker_netdevice_nb); + pci_unregister_driver(&rocker_pci_driver); +} + +module_init(rocker_module_init); +module_exit(rocker_module_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jiri Pirko "); +MODULE_AUTHOR("Scott Feldman "); +MODULE_DESCRIPTION("Rocker switch device driver"); +MODULE_DEVICE_TABLE(pci, rocker_pci_id_table); diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h new file mode 100644 index 000000000..a4e9591d7 --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker.h @@ -0,0 +1,465 @@ +/* + * drivers/net/ethernet/rocker/rocker.h - Rocker switch device driver + * Copyright (c) 2014 Jiri Pirko + * Copyright (c) 2014 Scott Feldman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _ROCKER_H +#define _ROCKER_H + +#include + +/* Return codes */ +enum { + ROCKER_OK = 0, + ROCKER_ENOENT = 2, + ROCKER_ENXIO = 6, + ROCKER_ENOMEM = 12, + ROCKER_EEXIST = 17, + ROCKER_EINVAL = 22, + ROCKER_EMSGSIZE = 90, + ROCKER_ENOTSUP = 95, + ROCKER_ENOBUFS = 105, +}; + +#define ROCKER_FP_PORTS_MAX 62 + +#define PCI_VENDOR_ID_REDHAT 0x1b36 +#define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006 + +#define ROCKER_PCI_BAR0_SIZE 0x2000 + +/* MSI-X vectors */ +enum { + ROCKER_MSIX_VEC_CMD, + ROCKER_MSIX_VEC_EVENT, + ROCKER_MSIX_VEC_TEST, + ROCKER_MSIX_VEC_RESERVED0, + __ROCKER_MSIX_VEC_TX, + __ROCKER_MSIX_VEC_RX, +#define ROCKER_MSIX_VEC_TX(port) \ + (__ROCKER_MSIX_VEC_TX + ((port) * 2)) +#define ROCKER_MSIX_VEC_RX(port) \ + (__ROCKER_MSIX_VEC_RX + ((port) * 2)) +#define ROCKER_MSIX_VEC_COUNT(portcnt) \ + (ROCKER_MSIX_VEC_RX((portcnt - 1)) + 1) +}; + +/* Rocker bogus registers */ +#define ROCKER_BOGUS_REG0 0x0000 +#define ROCKER_BOGUS_REG1 0x0004 +#define ROCKER_BOGUS_REG2 0x0008 +#define ROCKER_BOGUS_REG3 0x000c + +/* Rocker test registers */ +#define ROCKER_TEST_REG 0x0010 +#define ROCKER_TEST_REG64 0x0018 /* 8-byte */ +#define ROCKER_TEST_IRQ 0x0020 +#define ROCKER_TEST_DMA_ADDR 0x0028 /* 8-byte */ +#define ROCKER_TEST_DMA_SIZE 0x0030 +#define ROCKER_TEST_DMA_CTRL 0x0034 + +/* Rocker test register ctrl */ +#define ROCKER_TEST_DMA_CTRL_CLEAR (1 << 0) +#define ROCKER_TEST_DMA_CTRL_FILL (1 << 1) +#define ROCKER_TEST_DMA_CTRL_INVERT (1 << 2) + +/* Rocker DMA ring register offsets */ +#define ROCKER_DMA_DESC_ADDR(x) (0x1000 + (x) * 32) /* 8-byte */ +#define ROCKER_DMA_DESC_SIZE(x) (0x1008 + (x) * 32) +#define ROCKER_DMA_DESC_HEAD(x) (0x100c + (x) * 32) +#define ROCKER_DMA_DESC_TAIL(x) (0x1010 + (x) * 32) +#define ROCKER_DMA_DESC_CTRL(x) (0x1014 + (x) * 32) +#define ROCKER_DMA_DESC_CREDITS(x) (0x1018 + (x) * 32) +#define ROCKER_DMA_DESC_RES1(x) (0x101c + (x) * 32) + +/* Rocker dma ctrl register bits */ +#define ROCKER_DMA_DESC_CTRL_RESET (1 << 0) + +/* Rocker DMA ring types */ +enum rocker_dma_type { + ROCKER_DMA_CMD, + ROCKER_DMA_EVENT, + __ROCKER_DMA_TX, + __ROCKER_DMA_RX, +#define ROCKER_DMA_TX(port) (__ROCKER_DMA_TX + (port) * 2) +#define ROCKER_DMA_RX(port) (__ROCKER_DMA_RX + (port) * 2) +}; + +/* Rocker DMA ring size limits and default sizes */ +#define ROCKER_DMA_SIZE_MIN 2ul +#define ROCKER_DMA_SIZE_MAX 65536ul +#define ROCKER_DMA_CMD_DEFAULT_SIZE 32ul +#define ROCKER_DMA_EVENT_DEFAULT_SIZE 32ul +#define ROCKER_DMA_TX_DEFAULT_SIZE 64ul +#define ROCKER_DMA_TX_DESC_SIZE 256 +#define ROCKER_DMA_RX_DEFAULT_SIZE 64ul +#define ROCKER_DMA_RX_DESC_SIZE 256 + +/* Rocker DMA descriptor struct */ +struct rocker_desc { + u64 buf_addr; + u64 cookie; + u16 buf_size; + u16 tlv_size; + u16 resv[5]; + u16 comp_err; +}; + +#define ROCKER_DMA_DESC_COMP_ERR_GEN (1 << 15) + +/* Rocker DMA TLV struct */ +struct rocker_tlv { + u32 type; + u16 len; +}; + +/* TLVs */ +enum { + ROCKER_TLV_CMD_UNSPEC, + ROCKER_TLV_CMD_TYPE, /* u16 */ + ROCKER_TLV_CMD_INFO, /* nest */ + + __ROCKER_TLV_CMD_MAX, + ROCKER_TLV_CMD_MAX = __ROCKER_TLV_CMD_MAX - 1, +}; + +enum { + ROCKER_TLV_CMD_TYPE_UNSPEC, + ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS, + + ROCKER_TLV_CMD_TYPE_CLEAR_PORT_STATS, + ROCKER_TLV_CMD_TYPE_GET_PORT_STATS, + + __ROCKER_TLV_CMD_TYPE_MAX, + ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1, +}; + +enum { + ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC, + ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, /* u32 */ + ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */ + ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, /* binary */ + ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, /* binary */ + + __ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + ROCKER_TLV_CMD_PORT_SETTINGS_MAX = + __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1, +}; + +enum { + ROCKER_TLV_CMD_PORT_STATS_UNSPEC, + ROCKER_TLV_CMD_PORT_STATS_PPORT, /* u32 */ + + ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, /* u64 */ + + ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, /* u64 */ + + __ROCKER_TLV_CMD_PORT_STATS_MAX, + ROCKER_TLV_CMD_PORT_STATS_MAX = __ROCKER_TLV_CMD_PORT_STATS_MAX - 1, +}; + +enum rocker_port_mode { + ROCKER_PORT_MODE_OF_DPA, +}; + +enum { + ROCKER_TLV_EVENT_UNSPEC, + ROCKER_TLV_EVENT_TYPE, /* u16 */ + ROCKER_TLV_EVENT_INFO, /* nest */ + + __ROCKER_TLV_EVENT_MAX, + ROCKER_TLV_EVENT_MAX = __ROCKER_TLV_EVENT_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_TYPE_UNSPEC, + ROCKER_TLV_EVENT_TYPE_LINK_CHANGED, + ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN, + + __ROCKER_TLV_EVENT_TYPE_MAX, + ROCKER_TLV_EVENT_TYPE_MAX = __ROCKER_TLV_EVENT_TYPE_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC, + ROCKER_TLV_EVENT_LINK_CHANGED_PPORT, /* u32 */ + ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */ + + __ROCKER_TLV_EVENT_LINK_CHANGED_MAX, + ROCKER_TLV_EVENT_LINK_CHANGED_MAX = + __ROCKER_TLV_EVENT_LINK_CHANGED_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC, + ROCKER_TLV_EVENT_MAC_VLAN_PPORT, /* u32 */ + ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */ + ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */ + + __ROCKER_TLV_EVENT_MAC_VLAN_MAX, + ROCKER_TLV_EVENT_MAC_VLAN_MAX = __ROCKER_TLV_EVENT_MAC_VLAN_MAX - 1, +}; + +enum { + ROCKER_TLV_RX_UNSPEC, + ROCKER_TLV_RX_FLAGS, /* u16, see ROCKER_RX_FLAGS_ */ + ROCKER_TLV_RX_CSUM, /* u16 */ + ROCKER_TLV_RX_FRAG_ADDR, /* u64 */ + ROCKER_TLV_RX_FRAG_MAX_LEN, /* u16 */ + ROCKER_TLV_RX_FRAG_LEN, /* u16 */ + + __ROCKER_TLV_RX_MAX, + ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1, +}; + +#define ROCKER_RX_FLAGS_IPV4 (1 << 0) +#define ROCKER_RX_FLAGS_IPV6 (1 << 1) +#define ROCKER_RX_FLAGS_CSUM_CALC (1 << 2) +#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD (1 << 3) +#define ROCKER_RX_FLAGS_IP_FRAG (1 << 4) +#define ROCKER_RX_FLAGS_TCP (1 << 5) +#define ROCKER_RX_FLAGS_UDP (1 << 6) +#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD (1 << 7) + +enum { + ROCKER_TLV_TX_UNSPEC, + ROCKER_TLV_TX_OFFLOAD, /* u8, see ROCKER_TX_OFFLOAD_ */ + ROCKER_TLV_TX_L3_CSUM_OFF, /* u16 */ + ROCKER_TLV_TX_TSO_MSS, /* u16 */ + ROCKER_TLV_TX_TSO_HDR_LEN, /* u16 */ + ROCKER_TLV_TX_FRAGS, /* array */ + + __ROCKER_TLV_TX_MAX, + ROCKER_TLV_TX_MAX = __ROCKER_TLV_TX_MAX - 1, +}; + +#define ROCKER_TX_OFFLOAD_NONE 0 +#define ROCKER_TX_OFFLOAD_IP_CSUM 1 +#define ROCKER_TX_OFFLOAD_TCP_UDP_CSUM 2 +#define ROCKER_TX_OFFLOAD_L3_CSUM 3 +#define ROCKER_TX_OFFLOAD_TSO 4 + +#define ROCKER_TX_FRAGS_MAX 16 + +enum { + ROCKER_TLV_TX_FRAG_UNSPEC, + ROCKER_TLV_TX_FRAG, /* nest */ + + __ROCKER_TLV_TX_FRAG_MAX, + ROCKER_TLV_TX_FRAG_MAX = __ROCKER_TLV_TX_FRAG_MAX - 1, +}; + +enum { + ROCKER_TLV_TX_FRAG_ATTR_UNSPEC, + ROCKER_TLV_TX_FRAG_ATTR_ADDR, /* u64 */ + ROCKER_TLV_TX_FRAG_ATTR_LEN, /* u16 */ + + __ROCKER_TLV_TX_FRAG_ATTR_MAX, + ROCKER_TLV_TX_FRAG_ATTR_MAX = __ROCKER_TLV_TX_FRAG_ATTR_MAX - 1, +}; + +/* cmd info nested for OF-DPA msgs */ +enum { + ROCKER_TLV_OF_DPA_UNSPEC, + ROCKER_TLV_OF_DPA_TABLE_ID, /* u16 */ + ROCKER_TLV_OF_DPA_PRIORITY, /* u32 */ + ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */ + ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */ + ROCKER_TLV_OF_DPA_COOKIE, /* u64 */ + ROCKER_TLV_OF_DPA_IN_PPORT, /* u32 */ + ROCKER_TLV_OF_DPA_IN_PPORT_MASK, /* u32 */ + ROCKER_TLV_OF_DPA_OUT_PPORT, /* u32 */ + ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */ + ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */ + ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */ + ROCKER_TLV_OF_DPA_GROUP_COUNT, /* u16 */ + ROCKER_TLV_OF_DPA_GROUP_IDS, /* u32 array */ + ROCKER_TLV_OF_DPA_VLAN_ID, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_ID_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */ + ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */ + ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */ + ROCKER_TLV_OF_DPA_TUNNEL_LPORT, /* u32 */ + ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */ + ROCKER_TLV_OF_DPA_DST_MAC, /* binary */ + ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_MAC, /* binary */ + ROCKER_TLV_OF_DPA_SRC_MAC_MASK, /* binary */ + ROCKER_TLV_OF_DPA_IP_PROTO, /* u8 */ + ROCKER_TLV_OF_DPA_IP_PROTO_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_IP_DSCP, /* u8 */ + ROCKER_TLV_OF_DPA_IP_ECN, /* u8 */ + ROCKER_TLV_OF_DPA_IP_ECN_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_DST_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_DST_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_DST_IPV6, /* binary */ + ROCKER_TLV_OF_DPA_DST_IPV6_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_IPV6, /* binary */ + ROCKER_TLV_OF_DPA_SRC_IPV6_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_ARP_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_ARP_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_L4_DST_PORT, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_DST_PORT_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_SRC_PORT, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_SRC_PORT_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_ICMP_TYPE, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_TYPE_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_CODE, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_CODE_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IPV6_LABEL, /* __be32 */ + ROCKER_TLV_OF_DPA_IPV6_LABEL_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_QUEUE_ID_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_QUEUE_ID, /* u8 */ + ROCKER_TLV_OF_DPA_CLEAR_ACTIONS, /* u32 */ + ROCKER_TLV_OF_DPA_POP_VLAN, /* u8 */ + ROCKER_TLV_OF_DPA_TTL_CHECK, /* u8 */ + ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, /* u8 */ + + __ROCKER_TLV_OF_DPA_MAX, + ROCKER_TLV_OF_DPA_MAX = __ROCKER_TLV_OF_DPA_MAX - 1, +}; + +/* OF-DPA table IDs */ + +enum rocker_of_dpa_table_id { + ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT = 0, + ROCKER_OF_DPA_TABLE_ID_VLAN = 10, + ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC = 20, + ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING = 30, + ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING = 40, + ROCKER_OF_DPA_TABLE_ID_BRIDGING = 50, + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY = 60, +}; + +/* OF-DPA flow stats */ +enum { + ROCKER_TLV_OF_DPA_FLOW_STAT_UNSPEC, + ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION, /* u32 */ + ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS, /* u64 */ + ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS, /* u64 */ + + __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX, + ROCKER_TLV_OF_DPA_FLOW_STAT_MAX = __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX - 1, +}; + +/* OF-DPA group types */ +enum rocker_of_dpa_group_type { + ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE = 0, + ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE, + ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST, + ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST, + ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD, + ROCKER_OF_DPA_GROUP_TYPE_L3_INTERFACE, + ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST, + ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP, + ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY, +}; + +/* OF-DPA group L2 overlay types */ +enum rocker_of_dpa_overlay_type { + ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_UCAST = 0, + ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_MCAST, + ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_UCAST, + ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_MCAST, +}; + +/* OF-DPA group ID encoding */ +#define ROCKER_GROUP_TYPE_SHIFT 28 +#define ROCKER_GROUP_TYPE_MASK 0xf0000000 +#define ROCKER_GROUP_VLAN_SHIFT 16 +#define ROCKER_GROUP_VLAN_MASK 0x0fff0000 +#define ROCKER_GROUP_PORT_SHIFT 0 +#define ROCKER_GROUP_PORT_MASK 0x0000ffff +#define ROCKER_GROUP_TUNNEL_ID_SHIFT 12 +#define ROCKER_GROUP_TUNNEL_ID_MASK 0x0ffff000 +#define ROCKER_GROUP_SUBTYPE_SHIFT 10 +#define ROCKER_GROUP_SUBTYPE_MASK 0x00000c00 +#define ROCKER_GROUP_INDEX_SHIFT 0 +#define ROCKER_GROUP_INDEX_MASK 0x0000ffff +#define ROCKER_GROUP_INDEX_LONG_SHIFT 0 +#define ROCKER_GROUP_INDEX_LONG_MASK 0x0fffffff + +#define ROCKER_GROUP_TYPE_GET(group_id) \ + (((group_id) & ROCKER_GROUP_TYPE_MASK) >> ROCKER_GROUP_TYPE_SHIFT) +#define ROCKER_GROUP_TYPE_SET(type) \ + (((type) << ROCKER_GROUP_TYPE_SHIFT) & ROCKER_GROUP_TYPE_MASK) +#define ROCKER_GROUP_VLAN_GET(group_id) \ + (((group_id) & ROCKER_GROUP_VLAN_ID_MASK) >> ROCKER_GROUP_VLAN_ID_SHIFT) +#define ROCKER_GROUP_VLAN_SET(vlan_id) \ + (((vlan_id) << ROCKER_GROUP_VLAN_SHIFT) & ROCKER_GROUP_VLAN_MASK) +#define ROCKER_GROUP_PORT_GET(group_id) \ + (((group_id) & ROCKER_GROUP_PORT_MASK) >> ROCKER_GROUP_PORT_SHIFT) +#define ROCKER_GROUP_PORT_SET(port) \ + (((port) << ROCKER_GROUP_PORT_SHIFT) & ROCKER_GROUP_PORT_MASK) +#define ROCKER_GROUP_INDEX_GET(group_id) \ + (((group_id) & ROCKER_GROUP_INDEX_MASK) >> ROCKER_GROUP_INDEX_SHIFT) +#define ROCKER_GROUP_INDEX_SET(index) \ + (((index) << ROCKER_GROUP_INDEX_SHIFT) & ROCKER_GROUP_INDEX_MASK) +#define ROCKER_GROUP_INDEX_LONG_GET(group_id) \ + (((group_id) & ROCKER_GROUP_INDEX_LONG_MASK) >> \ + ROCKER_GROUP_INDEX_LONG_SHIFT) +#define ROCKER_GROUP_INDEX_LONG_SET(index) \ + (((index) << ROCKER_GROUP_INDEX_LONG_SHIFT) & \ + ROCKER_GROUP_INDEX_LONG_MASK) + +#define ROCKER_GROUP_NONE 0 +#define ROCKER_GROUP_L2_INTERFACE(vlan_id, port) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_PORT_SET(port)) +#define ROCKER_GROUP_L2_REWRITE(index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE) |\ + ROCKER_GROUP_INDEX_LONG_SET(index)) +#define ROCKER_GROUP_L2_MCAST(vlan_id, index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) +#define ROCKER_GROUP_L2_FLOOD(vlan_id, index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) +#define ROCKER_GROUP_L3_UNICAST(index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST) |\ + ROCKER_GROUP_INDEX_LONG_SET(index)) + +/* Rocker general purpose registers */ +#define ROCKER_CONTROL 0x0300 +#define ROCKER_PORT_PHYS_COUNT 0x0304 +#define ROCKER_PORT_PHYS_LINK_STATUS 0x0310 /* 8-byte */ +#define ROCKER_PORT_PHYS_ENABLE 0x0318 /* 8-byte */ +#define ROCKER_SWITCH_ID 0x0320 /* 8-byte */ + +/* Rocker control bits */ +#define ROCKER_CONTROL_RESET (1 << 0) + +#endif diff --git a/drivers/net/ethernet/samsung/Kconfig b/drivers/net/ethernet/samsung/Kconfig new file mode 100644 index 000000000..2360d8150 --- /dev/null +++ b/drivers/net/ethernet/samsung/Kconfig @@ -0,0 +1,32 @@ +# +# Samsung Ethernet device configuration +# + +config NET_VENDOR_SAMSUNG + bool "Samsung Ethernet devices" + default y + ---help--- + If you have a network (Ethernet) chipset belonging to this class, + say Y. + + Note that the answer to this question does not directly affect + the kernel: saying N will just case the configurator to skip all + the questions about Samsung chipsets. If you say Y, you will be asked + for your specific chipset/driver in the following questions. + +if NET_VENDOR_SAMSUNG + +config SXGBE_ETH + tristate "Samsung 10G/2.5G/1G SXGBE Ethernet driver" + depends on HAS_IOMEM && HAS_DMA + select PHYLIB + select CRC32 + select PTP_1588_CLOCK + ---help--- + This is the driver for the SXGBE 10G Ethernet IP block found on + Samsung platforms. + + To compile this driver as a module, choose M here: the module + will be called samsung-sxgbe. + +endif # NET_VENDOR_SAMSUNG diff --git a/drivers/net/ethernet/samsung/Makefile b/drivers/net/ethernet/samsung/Makefile new file mode 100644 index 000000000..1773c29b8 --- /dev/null +++ b/drivers/net/ethernet/samsung/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Samsung Ethernet device drivers. +# + +obj-$(CONFIG_SXGBE_ETH) += sxgbe/ diff --git a/drivers/net/ethernet/samsung/sxgbe/Makefile b/drivers/net/ethernet/samsung/sxgbe/Makefile new file mode 100644 index 000000000..dcc80b9d4 --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_SXGBE_ETH) += samsung-sxgbe.o +samsung-sxgbe-objs:= sxgbe_platform.o sxgbe_main.o sxgbe_desc.o \ + sxgbe_dma.o sxgbe_core.o sxgbe_mtl.o sxgbe_mdio.o \ + sxgbe_ethtool.o sxgbe_xpcs.o $(samsung-sxgbe-y) diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h new file mode 100644 index 000000000..45019649b --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h @@ -0,0 +1,537 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __SXGBE_COMMON_H__ +#define __SXGBE_COMMON_H__ + +/* forward references */ +struct sxgbe_desc_ops; +struct sxgbe_dma_ops; +struct sxgbe_mtl_ops; + +#define SXGBE_RESOURCE_NAME "sam_sxgbeeth" +#define DRV_MODULE_VERSION "November_2013" + +/* MAX HW feature words */ +#define SXGBE_HW_WORDS 3 + +#define SXGBE_RX_COE_NONE 0 + +/* CSR Frequency Access Defines*/ +#define SXGBE_CSR_F_150M 150000000 +#define SXGBE_CSR_F_250M 250000000 +#define SXGBE_CSR_F_300M 300000000 +#define SXGBE_CSR_F_350M 350000000 +#define SXGBE_CSR_F_400M 400000000 +#define SXGBE_CSR_F_500M 500000000 + +/* pause time */ +#define SXGBE_PAUSE_TIME 0x200 + +/* tx queues */ +#define SXGBE_TX_QUEUES 8 +#define SXGBE_RX_QUEUES 16 + +/* Calculated based how much time does it take to fill 256KB Rx memory + * at 10Gb speed at 156MHz clock rate and considered little less then + * the actual value. + */ +#define SXGBE_MAX_DMA_RIWT 0x70 +#define SXGBE_MIN_DMA_RIWT 0x01 + +/* Tx coalesce parameters */ +#define SXGBE_COAL_TX_TIMER 40000 +#define SXGBE_MAX_COAL_TX_TICK 100000 +#define SXGBE_TX_MAX_FRAMES 512 +#define SXGBE_TX_FRAMES 128 + +/* SXGBE TX FIFO is 8K, Rx FIFO is 16K */ +#define BUF_SIZE_16KiB 16384 +#define BUF_SIZE_8KiB 8192 +#define BUF_SIZE_4KiB 4096 +#define BUF_SIZE_2KiB 2048 + +#define SXGBE_DEFAULT_LIT_LS 0x3E8 +#define SXGBE_DEFAULT_TWT_LS 0x0 + +/* Flow Control defines */ +#define SXGBE_FLOW_OFF 0 +#define SXGBE_FLOW_RX 1 +#define SXGBE_FLOW_TX 2 +#define SXGBE_FLOW_AUTO (SXGBE_FLOW_TX | SXGBE_FLOW_RX) + +#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */ + +/* errors */ +#define RX_GMII_ERR 0x01 +#define RX_WATCHDOG_ERR 0x02 +#define RX_CRC_ERR 0x03 +#define RX_GAINT_ERR 0x04 +#define RX_IP_HDR_ERR 0x05 +#define RX_PAYLOAD_ERR 0x06 +#define RX_OVERFLOW_ERR 0x07 + +/* pkt type */ +#define RX_LEN_PKT 0x00 +#define RX_MACCTL_PKT 0x01 +#define RX_DCBCTL_PKT 0x02 +#define RX_ARP_PKT 0x03 +#define RX_OAM_PKT 0x04 +#define RX_UNTAG_PKT 0x05 +#define RX_OTHER_PKT 0x07 +#define RX_SVLAN_PKT 0x08 +#define RX_CVLAN_PKT 0x09 +#define RX_DVLAN_OCVLAN_ICVLAN_PKT 0x0A +#define RX_DVLAN_OSVLAN_ISVLAN_PKT 0x0B +#define RX_DVLAN_OSVLAN_ICVLAN_PKT 0x0C +#define RX_DVLAN_OCVLAN_ISVLAN_PKT 0x0D + +#define RX_NOT_IP_PKT 0x00 +#define RX_IPV4_TCP_PKT 0x01 +#define RX_IPV4_UDP_PKT 0x02 +#define RX_IPV4_ICMP_PKT 0x03 +#define RX_IPV4_UNKNOWN_PKT 0x07 +#define RX_IPV6_TCP_PKT 0x09 +#define RX_IPV6_UDP_PKT 0x0A +#define RX_IPV6_ICMP_PKT 0x0B +#define RX_IPV6_UNKNOWN_PKT 0x0F + +#define RX_NO_PTP 0x00 +#define RX_PTP_SYNC 0x01 +#define RX_PTP_FOLLOW_UP 0x02 +#define RX_PTP_DELAY_REQ 0x03 +#define RX_PTP_DELAY_RESP 0x04 +#define RX_PTP_PDELAY_REQ 0x05 +#define RX_PTP_PDELAY_RESP 0x06 +#define RX_PTP_PDELAY_FOLLOW_UP 0x07 +#define RX_PTP_ANNOUNCE 0x08 +#define RX_PTP_MGMT 0x09 +#define RX_PTP_SIGNAL 0x0A +#define RX_PTP_RESV_MSG 0x0F + +/* EEE-LPI mode flags*/ +#define TX_ENTRY_LPI_MODE 0x10 +#define TX_EXIT_LPI_MODE 0x20 +#define RX_ENTRY_LPI_MODE 0x40 +#define RX_EXIT_LPI_MODE 0x80 + +/* EEE-LPI Interrupt status flag */ +#define LPI_INT_STATUS BIT(5) + +/* EEE-LPI Default timer values */ +#define LPI_LINK_STATUS_TIMER 0x3E8 +#define LPI_MAC_WAIT_TIMER 0x00 + +/* EEE-LPI Control and status definitions */ +#define LPI_CTRL_STATUS_TXA BIT(19) +#define LPI_CTRL_STATUS_PLSDIS BIT(18) +#define LPI_CTRL_STATUS_PLS BIT(17) +#define LPI_CTRL_STATUS_LPIEN BIT(16) +#define LPI_CTRL_STATUS_TXRSTP BIT(11) +#define LPI_CTRL_STATUS_RXRSTP BIT(10) +#define LPI_CTRL_STATUS_RLPIST BIT(9) +#define LPI_CTRL_STATUS_TLPIST BIT(8) +#define LPI_CTRL_STATUS_RLPIEX BIT(3) +#define LPI_CTRL_STATUS_RLPIEN BIT(2) +#define LPI_CTRL_STATUS_TLPIEX BIT(1) +#define LPI_CTRL_STATUS_TLPIEN BIT(0) + +enum dma_irq_status { + tx_hard_error = BIT(0), + tx_bump_tc = BIT(1), + handle_tx = BIT(2), + rx_hard_error = BIT(3), + rx_bump_tc = BIT(4), + handle_rx = BIT(5), +}; + +#define NETIF_F_HW_VLAN_ALL (NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_STAG_RX | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_STAG_TX | \ + NETIF_F_HW_VLAN_CTAG_FILTER | \ + NETIF_F_HW_VLAN_STAG_FILTER) + +/* MMC control defines */ +#define SXGBE_MMC_CTRL_CNT_FRZ 0x00000008 + +/* SXGBE HW ADDR regs */ +#define SXGBE_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \ + (reg * 8)) +#define SXGBE_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \ + (reg * 8)) +#define SXGBE_MAX_PERFECT_ADDRESSES 32 /* Maximum unicast perfect filtering */ +#define SXGBE_FRAME_FILTER 0x00000004 /* Frame Filter */ + +/* SXGBE Frame Filter defines */ +#define SXGBE_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ +#define SXGBE_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ +#define SXGBE_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ +#define SXGBE_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ +#define SXGBE_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ +#define SXGBE_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ +#define SXGBE_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ +#define SXGBE_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ +#define SXGBE_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ +#define SXGBE_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ + +#define SXGBE_HASH_TABLE_SIZE 64 +#define SXGBE_HASH_HIGH 0x00000008 /* Multicast Hash Table High */ +#define SXGBE_HASH_LOW 0x0000000c /* Multicast Hash Table Low */ + +#define SXGBE_HI_REG_AE 0x80000000 + +/* Minimum and maximum MTU */ +#define MIN_MTU 68 +#define MAX_MTU 9000 + +#define SXGBE_FOR_EACH_QUEUE(max_queues, queue_num) \ + for (queue_num = 0; queue_num < max_queues; queue_num++) + +#define DRV_VERSION "1.0.0" + +#define SXGBE_MAX_RX_CHANNELS 16 +#define SXGBE_MAX_TX_CHANNELS 16 + +#define START_MAC_REG_OFFSET 0x0000 +#define MAX_MAC_REG_OFFSET 0x0DFC +#define START_MTL_REG_OFFSET 0x1000 +#define MAX_MTL_REG_OFFSET 0x18FC +#define START_DMA_REG_OFFSET 0x3000 +#define MAX_DMA_REG_OFFSET 0x38FC + +#define REG_SPACE_SIZE 0x2000 + +/* sxgbe statistics counters */ +struct sxgbe_extra_stats { + /* TX/RX IRQ events */ + unsigned long tx_underflow_irq; + unsigned long tx_process_stopped_irq; + unsigned long tx_ctxt_desc_err; + unsigned long tx_threshold; + unsigned long rx_threshold; + unsigned long tx_pkt_n; + unsigned long rx_pkt_n; + unsigned long normal_irq_n; + unsigned long tx_normal_irq_n; + unsigned long rx_normal_irq_n; + unsigned long napi_poll; + unsigned long tx_clean; + unsigned long tx_reset_ic_bit; + unsigned long rx_process_stopped_irq; + unsigned long rx_underflow_irq; + + /* Bus access errors */ + unsigned long fatal_bus_error_irq; + unsigned long tx_read_transfer_err; + unsigned long tx_write_transfer_err; + unsigned long tx_desc_access_err; + unsigned long tx_buffer_access_err; + unsigned long tx_data_transfer_err; + unsigned long rx_read_transfer_err; + unsigned long rx_write_transfer_err; + unsigned long rx_desc_access_err; + unsigned long rx_buffer_access_err; + unsigned long rx_data_transfer_err; + + /* EEE-LPI stats */ + unsigned long tx_lpi_entry_n; + unsigned long tx_lpi_exit_n; + unsigned long rx_lpi_entry_n; + unsigned long rx_lpi_exit_n; + unsigned long eee_wakeup_error_n; + + /* RX specific */ + /* L2 error */ + unsigned long rx_code_gmii_err; + unsigned long rx_watchdog_err; + unsigned long rx_crc_err; + unsigned long rx_gaint_pkt_err; + unsigned long ip_hdr_err; + unsigned long ip_payload_err; + unsigned long overflow_error; + + /* L2 Pkt type */ + unsigned long len_pkt; + unsigned long mac_ctl_pkt; + unsigned long dcb_ctl_pkt; + unsigned long arp_pkt; + unsigned long oam_pkt; + unsigned long untag_okt; + unsigned long other_pkt; + unsigned long svlan_tag_pkt; + unsigned long cvlan_tag_pkt; + unsigned long dvlan_ocvlan_icvlan_pkt; + unsigned long dvlan_osvlan_isvlan_pkt; + unsigned long dvlan_osvlan_icvlan_pkt; + unsigned long dvan_ocvlan_icvlan_pkt; + + /* L3/L4 Pkt type */ + unsigned long not_ip_pkt; + unsigned long ip4_tcp_pkt; + unsigned long ip4_udp_pkt; + unsigned long ip4_icmp_pkt; + unsigned long ip4_unknown_pkt; + unsigned long ip6_tcp_pkt; + unsigned long ip6_udp_pkt; + unsigned long ip6_icmp_pkt; + unsigned long ip6_unknown_pkt; + + /* Filter specific */ + unsigned long vlan_filter_match; + unsigned long sa_filter_fail; + unsigned long da_filter_fail; + unsigned long hash_filter_pass; + unsigned long l3_filter_match; + unsigned long l4_filter_match; + + /* RX context specific */ + unsigned long timestamp_dropped; + unsigned long rx_msg_type_no_ptp; + unsigned long rx_ptp_type_sync; + unsigned long rx_ptp_type_follow_up; + unsigned long rx_ptp_type_delay_req; + unsigned long rx_ptp_type_delay_resp; + unsigned long rx_ptp_type_pdelay_req; + unsigned long rx_ptp_type_pdelay_resp; + unsigned long rx_ptp_type_pdelay_follow_up; + unsigned long rx_ptp_announce; + unsigned long rx_ptp_mgmt; + unsigned long rx_ptp_signal; + unsigned long rx_ptp_resv_msg_type; +}; + +struct mac_link { + int port; + int duplex; + int speed; +}; + +struct mii_regs { + unsigned int addr; /* MII Address */ + unsigned int data; /* MII Data */ +}; + +struct sxgbe_core_ops { + /* MAC core initialization */ + void (*core_init)(void __iomem *ioaddr); + /* Dump MAC registers */ + void (*dump_regs)(void __iomem *ioaddr); + /* Handle extra events on specific interrupts hw dependent */ + int (*host_irq_status)(void __iomem *ioaddr, + struct sxgbe_extra_stats *x); + /* Set power management mode (e.g. magic frame) */ + void (*pmt)(void __iomem *ioaddr, unsigned long mode); + /* Set/Get Unicast MAC addresses */ + void (*set_umac_addr)(void __iomem *ioaddr, unsigned char *addr, + unsigned int reg_n); + void (*get_umac_addr)(void __iomem *ioaddr, unsigned char *addr, + unsigned int reg_n); + void (*enable_rx)(void __iomem *ioaddr, bool enable); + void (*enable_tx)(void __iomem *ioaddr, bool enable); + + /* controller version specific operations */ + int (*get_controller_version)(void __iomem *ioaddr); + + /* If supported then get the optional core features */ + unsigned int (*get_hw_feature)(void __iomem *ioaddr, + unsigned char feature_index); + /* adjust SXGBE speed */ + void (*set_speed)(void __iomem *ioaddr, unsigned char speed); + + /* EEE-LPI specific operations */ + void (*set_eee_mode)(void __iomem *ioaddr); + void (*reset_eee_mode)(void __iomem *ioaddr); + void (*set_eee_timer)(void __iomem *ioaddr, const int ls, + const int tw); + void (*set_eee_pls)(void __iomem *ioaddr, const int link); + + /* Enable disable checksum offload operations */ + void (*enable_rx_csum)(void __iomem *ioaddr); + void (*disable_rx_csum)(void __iomem *ioaddr); + void (*enable_rxqueue)(void __iomem *ioaddr, int queue_num); + void (*disable_rxqueue)(void __iomem *ioaddr, int queue_num); +}; + +const struct sxgbe_core_ops *sxgbe_get_core_ops(void); + +struct sxgbe_ops { + const struct sxgbe_core_ops *mac; + const struct sxgbe_desc_ops *desc; + const struct sxgbe_dma_ops *dma; + const struct sxgbe_mtl_ops *mtl; + struct mii_regs mii; /* MII register Addresses */ + struct mac_link link; + unsigned int ctrl_uid; + unsigned int ctrl_id; +}; + +/* SXGBE private data structures */ +struct sxgbe_tx_queue { + unsigned int irq_no; + struct sxgbe_priv_data *priv_ptr; + struct sxgbe_tx_norm_desc *dma_tx; + dma_addr_t dma_tx_phy; + dma_addr_t *tx_skbuff_dma; + struct sk_buff **tx_skbuff; + struct timer_list txtimer; + spinlock_t tx_lock; /* lock for tx queues */ + unsigned int cur_tx; + unsigned int dirty_tx; + u32 tx_count_frames; + u32 tx_coal_frames; + u32 tx_coal_timer; + int hwts_tx_en; + u16 prev_mss; + u8 queue_no; +}; + +struct sxgbe_rx_queue { + struct sxgbe_priv_data *priv_ptr; + struct sxgbe_rx_norm_desc *dma_rx; + struct sk_buff **rx_skbuff; + unsigned int cur_rx; + unsigned int dirty_rx; + unsigned int irq_no; + u32 rx_riwt; + dma_addr_t *rx_skbuff_dma; + dma_addr_t dma_rx_phy; + u8 queue_no; +}; + +/* SXGBE HW capabilities */ +struct sxgbe_hw_features { + /****** CAP [0] *******/ + unsigned int pmt_remote_wake_up; + unsigned int pmt_magic_frame; + /* IEEE 1588-2008 */ + unsigned int atime_stamp; + + unsigned int eee; + + unsigned int tx_csum_offload; + unsigned int rx_csum_offload; + unsigned int multi_macaddr; + unsigned int tstamp_srcselect; + unsigned int sa_vlan_insert; + + /****** CAP [1] *******/ + unsigned int rxfifo_size; + unsigned int txfifo_size; + unsigned int atstmap_hword; + unsigned int dcb_enable; + unsigned int splithead_enable; + unsigned int tcpseg_offload; + unsigned int debug_mem; + unsigned int rss_enable; + unsigned int hash_tsize; + unsigned int l3l4_filer_size; + + /* This value is in bytes and + * as mentioned in HW features + * of SXGBE data book + */ + unsigned int rx_mtl_qsize; + unsigned int tx_mtl_qsize; + + /****** CAP [2] *******/ + /* TX and RX number of channels */ + unsigned int rx_mtl_queues; + unsigned int tx_mtl_queues; + unsigned int rx_dma_channels; + unsigned int tx_dma_channels; + unsigned int pps_output_count; + unsigned int aux_input_count; +}; + +struct sxgbe_priv_data { + /* DMA descriptos */ + struct sxgbe_tx_queue *txq[SXGBE_TX_QUEUES]; + struct sxgbe_rx_queue *rxq[SXGBE_RX_QUEUES]; + u8 cur_rx_qnum; + + unsigned int dma_tx_size; + unsigned int dma_rx_size; + unsigned int dma_buf_sz; + u32 rx_riwt; + + struct napi_struct napi; + + void __iomem *ioaddr; + struct net_device *dev; + struct device *device; + struct sxgbe_ops *hw; /* sxgbe specific ops */ + int no_csum_insertion; + int irq; + int rxcsum_insertion; + spinlock_t stats_lock; /* lock for tx/rx statatics */ + + struct phy_device *phydev; + int oldlink; + int speed; + int oldduplex; + struct mii_bus *mii; + int mii_irq[PHY_MAX_ADDR]; + u8 rx_pause; + u8 tx_pause; + + struct sxgbe_extra_stats xstats; + struct sxgbe_plat_data *plat; + struct sxgbe_hw_features hw_cap; + + u32 msg_enable; + + struct clk *sxgbe_clk; + int clk_csr; + unsigned int mode; + unsigned int default_addend; + + /* advanced time stamp support */ + u32 adv_ts; + int use_riwt; + struct ptp_clock *ptp_clock; + + /* tc control */ + int tx_tc; + int rx_tc; + /* EEE-LPI specific members */ + struct timer_list eee_ctrl_timer; + bool tx_path_in_lpi_mode; + int lpi_irq; + int eee_enabled; + int eee_active; + int tx_lpi_timer; +}; + +/* Function prototypes */ +struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device, + struct sxgbe_plat_data *plat_dat, + void __iomem *addr); +int sxgbe_drv_remove(struct net_device *ndev); +void sxgbe_set_ethtool_ops(struct net_device *netdev); +int sxgbe_mdio_unregister(struct net_device *ndev); +int sxgbe_mdio_register(struct net_device *ndev); +int sxgbe_register_platform(void); +void sxgbe_unregister_platform(void); + +#ifdef CONFIG_PM +int sxgbe_suspend(struct net_device *ndev); +int sxgbe_resume(struct net_device *ndev); +int sxgbe_freeze(struct net_device *ndev); +int sxgbe_restore(struct net_device *ndev); +#endif /* CONFIG_PM */ + +const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void); + +void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv); +bool sxgbe_eee_init(struct sxgbe_priv_data * const priv); +#endif /* __SXGBE_COMMON_H__ */ diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c new file mode 100644 index 000000000..58c356925 --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c @@ -0,0 +1,284 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include + +#include "sxgbe_common.h" +#include "sxgbe_reg.h" + +/* MAC core initialization */ +static void sxgbe_core_init(void __iomem *ioaddr) +{ + u32 regval; + + /* TX configuration */ + regval = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG); + /* Other configurable parameters IFP, IPG, ISR, ISM + * needs to be set if needed + */ + regval |= SXGBE_TX_JABBER_DISABLE; + writel(regval, ioaddr + SXGBE_CORE_TX_CONFIG_REG); + + /* RX configuration */ + regval = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG); + /* Other configurable parameters CST, SPEN, USP, GPSLCE + * WD, LM, S2KP, HDSMS, GPSL, ELEN, ARPEN needs to be + * set if needed + */ + regval |= SXGBE_RX_JUMBPKT_ENABLE | SXGBE_RX_ACS_ENABLE; + writel(regval, ioaddr + SXGBE_CORE_RX_CONFIG_REG); +} + +/* Dump MAC registers */ +static void sxgbe_core_dump_regs(void __iomem *ioaddr) +{ +} + +static int sxgbe_get_lpi_status(void __iomem *ioaddr, const u32 irq_status) +{ + int status = 0; + int lpi_status; + + /* Reading this register shall clear all the LPI status bits */ + lpi_status = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS); + + if (lpi_status & LPI_CTRL_STATUS_TLPIEN) + status |= TX_ENTRY_LPI_MODE; + if (lpi_status & LPI_CTRL_STATUS_TLPIEX) + status |= TX_EXIT_LPI_MODE; + if (lpi_status & LPI_CTRL_STATUS_RLPIEN) + status |= RX_ENTRY_LPI_MODE; + if (lpi_status & LPI_CTRL_STATUS_RLPIEX) + status |= RX_EXIT_LPI_MODE; + + return status; +} + +/* Handle extra events on specific interrupts hw dependent */ +static int sxgbe_core_host_irq_status(void __iomem *ioaddr, + struct sxgbe_extra_stats *x) +{ + int irq_status, status = 0; + + irq_status = readl(ioaddr + SXGBE_CORE_INT_STATUS_REG); + + if (unlikely(irq_status & LPI_INT_STATUS)) + status |= sxgbe_get_lpi_status(ioaddr, irq_status); + + return status; +} + +/* Set power management mode (e.g. magic frame) */ +static void sxgbe_core_pmt(void __iomem *ioaddr, unsigned long mode) +{ +} + +/* Set/Get Unicast MAC addresses */ +static void sxgbe_core_set_umac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int reg_n) +{ + u32 high_word, low_word; + + high_word = (addr[5] << 8) | (addr[4]); + low_word = (addr[3] << 24) | (addr[2] << 16) | + (addr[1] << 8) | (addr[0]); + writel(high_word, ioaddr + SXGBE_CORE_ADD_HIGHOFFSET(reg_n)); + writel(low_word, ioaddr + SXGBE_CORE_ADD_LOWOFFSET(reg_n)); +} + +static void sxgbe_core_get_umac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int reg_n) +{ + u32 high_word, low_word; + + high_word = readl(ioaddr + SXGBE_CORE_ADD_HIGHOFFSET(reg_n)); + low_word = readl(ioaddr + SXGBE_CORE_ADD_LOWOFFSET(reg_n)); + + /* extract and assign address */ + addr[5] = (high_word & 0x0000FF00) >> 8; + addr[4] = (high_word & 0x000000FF); + addr[3] = (low_word & 0xFF000000) >> 24; + addr[2] = (low_word & 0x00FF0000) >> 16; + addr[1] = (low_word & 0x0000FF00) >> 8; + addr[0] = (low_word & 0x000000FF); +} + +static void sxgbe_enable_tx(void __iomem *ioaddr, bool enable) +{ + u32 tx_config; + + tx_config = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG); + tx_config &= ~SXGBE_TX_ENABLE; + + if (enable) + tx_config |= SXGBE_TX_ENABLE; + writel(tx_config, ioaddr + SXGBE_CORE_TX_CONFIG_REG); +} + +static void sxgbe_enable_rx(void __iomem *ioaddr, bool enable) +{ + u32 rx_config; + + rx_config = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG); + rx_config &= ~SXGBE_RX_ENABLE; + + if (enable) + rx_config |= SXGBE_RX_ENABLE; + writel(rx_config, ioaddr + SXGBE_CORE_RX_CONFIG_REG); +} + +static int sxgbe_get_controller_version(void __iomem *ioaddr) +{ + return readl(ioaddr + SXGBE_CORE_VERSION_REG); +} + +/* If supported then get the optional core features */ +static unsigned int sxgbe_get_hw_feature(void __iomem *ioaddr, + unsigned char feature_index) +{ + return readl(ioaddr + (SXGBE_CORE_HW_FEA_REG(feature_index))); +} + +static void sxgbe_core_set_speed(void __iomem *ioaddr, unsigned char speed) +{ + u32 tx_cfg = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG); + + /* clear the speed bits */ + tx_cfg &= ~0x60000000; + tx_cfg |= (speed << SXGBE_SPEED_LSHIFT); + + /* set the speed */ + writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG); +} + +static void sxgbe_core_enable_rxqueue(void __iomem *ioaddr, int queue_num) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG); + reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num); + reg_val |= SXGBE_CORE_RXQ_ENABLE; + writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG); +} + +static void sxgbe_core_disable_rxqueue(void __iomem *ioaddr, int queue_num) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG); + reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num); + reg_val |= SXGBE_CORE_RXQ_DISABLE; + writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG); +} + +static void sxgbe_set_eee_mode(void __iomem *ioaddr) +{ + u32 ctrl; + + /* Enable the LPI mode for transmit path with Tx automate bit set. + * When Tx Automate bit is set, MAC internally handles the entry + * to LPI mode after all outstanding and pending packets are + * transmitted. + */ + ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS); + ctrl |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_TXA; + writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS); +} + +static void sxgbe_reset_eee_mode(void __iomem *ioaddr) +{ + u32 ctrl; + + ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS); + ctrl &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_TXA); + writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS); +} + +static void sxgbe_set_eee_pls(void __iomem *ioaddr, const int link) +{ + u32 ctrl; + + ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS); + + /* If the PHY link status is UP then set PLS */ + if (link) + ctrl |= LPI_CTRL_STATUS_PLS; + else + ctrl &= ~LPI_CTRL_STATUS_PLS; + + writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS); +} + +static void sxgbe_set_eee_timer(void __iomem *ioaddr, + const int ls, const int tw) +{ + int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16); + + /* Program the timers in the LPI timer control register: + * LS: minimum time (ms) for which the link + * status from PHY should be ok before transmitting + * the LPI pattern. + * TW: minimum time (us) for which the core waits + * after it has stopped transmitting the LPI pattern. + */ + writel(value, ioaddr + SXGBE_CORE_LPI_TIMER_CTRL); +} + +static void sxgbe_enable_rx_csum(void __iomem *ioaddr) +{ + u32 ctrl; + + ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG); + ctrl |= SXGBE_RX_CSUMOFFLOAD_ENABLE; + writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG); +} + +static void sxgbe_disable_rx_csum(void __iomem *ioaddr) +{ + u32 ctrl; + + ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG); + ctrl &= ~SXGBE_RX_CSUMOFFLOAD_ENABLE; + writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG); +} + +static const struct sxgbe_core_ops core_ops = { + .core_init = sxgbe_core_init, + .dump_regs = sxgbe_core_dump_regs, + .host_irq_status = sxgbe_core_host_irq_status, + .pmt = sxgbe_core_pmt, + .set_umac_addr = sxgbe_core_set_umac_addr, + .get_umac_addr = sxgbe_core_get_umac_addr, + .enable_rx = sxgbe_enable_rx, + .enable_tx = sxgbe_enable_tx, + .get_controller_version = sxgbe_get_controller_version, + .get_hw_feature = sxgbe_get_hw_feature, + .set_speed = sxgbe_core_set_speed, + .set_eee_mode = sxgbe_set_eee_mode, + .reset_eee_mode = sxgbe_reset_eee_mode, + .set_eee_timer = sxgbe_set_eee_timer, + .set_eee_pls = sxgbe_set_eee_pls, + .enable_rx_csum = sxgbe_enable_rx_csum, + .disable_rx_csum = sxgbe_disable_rx_csum, + .enable_rxqueue = sxgbe_core_enable_rxqueue, + .disable_rxqueue = sxgbe_core_disable_rxqueue, +}; + +const struct sxgbe_core_ops *sxgbe_get_core_ops(void) +{ + return &core_ops; +} diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c new file mode 100644 index 000000000..2686bb5b6 --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c @@ -0,0 +1,522 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#include "sxgbe_common.h" +#include "sxgbe_dma.h" +#include "sxgbe_desc.h" + +/* DMA TX descriptor ring initialization */ +static void sxgbe_init_tx_desc(struct sxgbe_tx_norm_desc *p) +{ + p->tdes23.tx_rd_des23.own_bit = 0; +} + +static void sxgbe_tx_desc_enable_tse(struct sxgbe_tx_norm_desc *p, u8 is_tse, + u32 total_hdr_len, u32 tcp_hdr_len, + u32 tcp_payload_len) +{ + p->tdes23.tx_rd_des23.tse_bit = is_tse; + p->tdes23.tx_rd_des23.buf1_size = total_hdr_len; + p->tdes23.tx_rd_des23.tcp_hdr_len = tcp_hdr_len / 4; + p->tdes23.tx_rd_des23.tx_pkt_len.tcp_payload_len = tcp_payload_len; +} + +/* Assign buffer lengths for descriptor */ +static void sxgbe_prepare_tx_desc(struct sxgbe_tx_norm_desc *p, u8 is_fd, + int buf1_len, int pkt_len, int cksum) +{ + p->tdes23.tx_rd_des23.first_desc = is_fd; + p->tdes23.tx_rd_des23.buf1_size = buf1_len; + + p->tdes23.tx_rd_des23.tx_pkt_len.pkt_len.total_pkt_len = pkt_len; + + if (cksum) + p->tdes23.tx_rd_des23.cksum_ctl = cic_full; +} + +/* Set VLAN control information */ +static void sxgbe_tx_vlanctl_desc(struct sxgbe_tx_norm_desc *p, int vlan_ctl) +{ + p->tdes23.tx_rd_des23.vlan_tag_ctl = vlan_ctl; +} + +/* Set the owner of Normal descriptor */ +static void sxgbe_set_tx_owner(struct sxgbe_tx_norm_desc *p) +{ + p->tdes23.tx_rd_des23.own_bit = 1; +} + +/* Get the owner of Normal descriptor */ +static int sxgbe_get_tx_owner(struct sxgbe_tx_norm_desc *p) +{ + return p->tdes23.tx_rd_des23.own_bit; +} + +/* Invoked by the xmit function to close the tx descriptor */ +static void sxgbe_close_tx_desc(struct sxgbe_tx_norm_desc *p) +{ + p->tdes23.tx_rd_des23.last_desc = 1; + p->tdes23.tx_rd_des23.int_on_com = 1; +} + +/* Clean the tx descriptor as soon as the tx irq is received */ +static void sxgbe_release_tx_desc(struct sxgbe_tx_norm_desc *p) +{ + memset(p, 0, sizeof(*p)); +} + +/* Clear interrupt on tx frame completion. When this bit is + * set an interrupt happens as soon as the frame is transmitted + */ +static void sxgbe_clear_tx_ic(struct sxgbe_tx_norm_desc *p) +{ + p->tdes23.tx_rd_des23.int_on_com = 0; +} + +/* Last tx segment reports the transmit status */ +static int sxgbe_get_tx_ls(struct sxgbe_tx_norm_desc *p) +{ + return p->tdes23.tx_rd_des23.last_desc; +} + +/* Get the buffer size from the descriptor */ +static int sxgbe_get_tx_len(struct sxgbe_tx_norm_desc *p) +{ + return p->tdes23.tx_rd_des23.buf1_size; +} + +/* Set tx timestamp enable bit */ +static void sxgbe_tx_enable_tstamp(struct sxgbe_tx_norm_desc *p) +{ + p->tdes23.tx_rd_des23.timestmp_enable = 1; +} + +/* get tx timestamp status */ +static int sxgbe_get_tx_timestamp_status(struct sxgbe_tx_norm_desc *p) +{ + return p->tdes23.tx_rd_des23.timestmp_enable; +} + +/* TX Context Descripto Specific */ +static void sxgbe_tx_ctxt_desc_set_ctxt(struct sxgbe_tx_ctxt_desc *p) +{ + p->ctxt_bit = 1; +} + +/* Set the owner of TX context descriptor */ +static void sxgbe_tx_ctxt_desc_set_owner(struct sxgbe_tx_ctxt_desc *p) +{ + p->own_bit = 1; +} + +/* Get the owner of TX context descriptor */ +static int sxgbe_tx_ctxt_desc_get_owner(struct sxgbe_tx_ctxt_desc *p) +{ + return p->own_bit; +} + +/* Set TX mss in TX context Descriptor */ +static void sxgbe_tx_ctxt_desc_set_mss(struct sxgbe_tx_ctxt_desc *p, u16 mss) +{ + p->maxseg_size = mss; +} + +/* Get TX mss from TX context Descriptor */ +static int sxgbe_tx_ctxt_desc_get_mss(struct sxgbe_tx_ctxt_desc *p) +{ + return p->maxseg_size; +} + +/* Set TX tcmssv in TX context Descriptor */ +static void sxgbe_tx_ctxt_desc_set_tcmssv(struct sxgbe_tx_ctxt_desc *p) +{ + p->tcmssv = 1; +} + +/* Reset TX ostc in TX context Descriptor */ +static void sxgbe_tx_ctxt_desc_reset_ostc(struct sxgbe_tx_ctxt_desc *p) +{ + p->ostc = 0; +} + +/* Set IVLAN information */ +static void sxgbe_tx_ctxt_desc_set_ivlantag(struct sxgbe_tx_ctxt_desc *p, + int is_ivlanvalid, int ivlan_tag, + int ivlan_ctl) +{ + if (is_ivlanvalid) { + p->ivlan_tag_valid = is_ivlanvalid; + p->ivlan_tag = ivlan_tag; + p->ivlan_tag_ctl = ivlan_ctl; + } +} + +/* Return IVLAN Tag */ +static int sxgbe_tx_ctxt_desc_get_ivlantag(struct sxgbe_tx_ctxt_desc *p) +{ + return p->ivlan_tag; +} + +/* Set VLAN Tag */ +static void sxgbe_tx_ctxt_desc_set_vlantag(struct sxgbe_tx_ctxt_desc *p, + int is_vlanvalid, int vlan_tag) +{ + if (is_vlanvalid) { + p->vltag_valid = is_vlanvalid; + p->vlan_tag = vlan_tag; + } +} + +/* Return VLAN Tag */ +static int sxgbe_tx_ctxt_desc_get_vlantag(struct sxgbe_tx_ctxt_desc *p) +{ + return p->vlan_tag; +} + +/* Set Time stamp */ +static void sxgbe_tx_ctxt_desc_set_tstamp(struct sxgbe_tx_ctxt_desc *p, + u8 ostc_enable, u64 tstamp) +{ + if (ostc_enable) { + p->ostc = ostc_enable; + p->tstamp_lo = (u32) tstamp; + p->tstamp_hi = (u32) (tstamp>>32); + } +} +/* Close TX context descriptor */ +static void sxgbe_tx_ctxt_desc_close(struct sxgbe_tx_ctxt_desc *p) +{ + p->own_bit = 1; +} + +/* WB status of context descriptor */ +static int sxgbe_tx_ctxt_desc_get_cde(struct sxgbe_tx_ctxt_desc *p) +{ + return p->ctxt_desc_err; +} + +/* DMA RX descriptor ring initialization */ +static void sxgbe_init_rx_desc(struct sxgbe_rx_norm_desc *p, int disable_rx_ic, + int mode, int end) +{ + p->rdes23.rx_rd_des23.own_bit = 1; + if (disable_rx_ic) + p->rdes23.rx_rd_des23.int_on_com = disable_rx_ic; +} + +/* Get RX own bit */ +static int sxgbe_get_rx_owner(struct sxgbe_rx_norm_desc *p) +{ + return p->rdes23.rx_rd_des23.own_bit; +} + +/* Set RX own bit */ +static void sxgbe_set_rx_owner(struct sxgbe_rx_norm_desc *p) +{ + p->rdes23.rx_rd_des23.own_bit = 1; +} + +/* Set Interrupt on completion bit */ +static void sxgbe_set_rx_int_on_com(struct sxgbe_rx_norm_desc *p) +{ + p->rdes23.rx_rd_des23.int_on_com = 1; +} + +/* Get the receive frame size */ +static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p) +{ + return p->rdes23.rx_wb_des23.pkt_len; +} + +/* Return first Descriptor status */ +static int sxgbe_get_rx_fd_status(struct sxgbe_rx_norm_desc *p) +{ + return p->rdes23.rx_wb_des23.first_desc; +} + +/* Return Last Descriptor status */ +static int sxgbe_get_rx_ld_status(struct sxgbe_rx_norm_desc *p) +{ + return p->rdes23.rx_wb_des23.last_desc; +} + + +/* Return the RX status looking at the WB fields */ +static int sxgbe_rx_wbstatus(struct sxgbe_rx_norm_desc *p, + struct sxgbe_extra_stats *x, int *checksum) +{ + int status = 0; + + *checksum = CHECKSUM_UNNECESSARY; + if (p->rdes23.rx_wb_des23.err_summary) { + switch (p->rdes23.rx_wb_des23.err_l2_type) { + case RX_GMII_ERR: + status = -EINVAL; + x->rx_code_gmii_err++; + break; + case RX_WATCHDOG_ERR: + status = -EINVAL; + x->rx_watchdog_err++; + break; + case RX_CRC_ERR: + status = -EINVAL; + x->rx_crc_err++; + break; + case RX_GAINT_ERR: + status = -EINVAL; + x->rx_gaint_pkt_err++; + break; + case RX_IP_HDR_ERR: + *checksum = CHECKSUM_NONE; + x->ip_hdr_err++; + break; + case RX_PAYLOAD_ERR: + *checksum = CHECKSUM_NONE; + x->ip_payload_err++; + break; + case RX_OVERFLOW_ERR: + status = -EINVAL; + x->overflow_error++; + break; + default: + pr_err("Invalid Error type\n"); + break; + } + } else { + switch (p->rdes23.rx_wb_des23.err_l2_type) { + case RX_LEN_PKT: + x->len_pkt++; + break; + case RX_MACCTL_PKT: + x->mac_ctl_pkt++; + break; + case RX_DCBCTL_PKT: + x->dcb_ctl_pkt++; + break; + case RX_ARP_PKT: + x->arp_pkt++; + break; + case RX_OAM_PKT: + x->oam_pkt++; + break; + case RX_UNTAG_PKT: + x->untag_okt++; + break; + case RX_OTHER_PKT: + x->other_pkt++; + break; + case RX_SVLAN_PKT: + x->svlan_tag_pkt++; + break; + case RX_CVLAN_PKT: + x->cvlan_tag_pkt++; + break; + case RX_DVLAN_OCVLAN_ICVLAN_PKT: + x->dvlan_ocvlan_icvlan_pkt++; + break; + case RX_DVLAN_OSVLAN_ISVLAN_PKT: + x->dvlan_osvlan_isvlan_pkt++; + break; + case RX_DVLAN_OSVLAN_ICVLAN_PKT: + x->dvlan_osvlan_icvlan_pkt++; + break; + case RX_DVLAN_OCVLAN_ISVLAN_PKT: + x->dvlan_ocvlan_icvlan_pkt++; + break; + default: + pr_err("Invalid L2 Packet type\n"); + break; + } + } + + /* L3/L4 Pkt type */ + switch (p->rdes23.rx_wb_des23.layer34_pkt_type) { + case RX_NOT_IP_PKT: + x->not_ip_pkt++; + break; + case RX_IPV4_TCP_PKT: + x->ip4_tcp_pkt++; + break; + case RX_IPV4_UDP_PKT: + x->ip4_udp_pkt++; + break; + case RX_IPV4_ICMP_PKT: + x->ip4_icmp_pkt++; + break; + case RX_IPV4_UNKNOWN_PKT: + x->ip4_unknown_pkt++; + break; + case RX_IPV6_TCP_PKT: + x->ip6_tcp_pkt++; + break; + case RX_IPV6_UDP_PKT: + x->ip6_udp_pkt++; + break; + case RX_IPV6_ICMP_PKT: + x->ip6_icmp_pkt++; + break; + case RX_IPV6_UNKNOWN_PKT: + x->ip6_unknown_pkt++; + break; + default: + pr_err("Invalid L3/L4 Packet type\n"); + break; + } + + /* Filter */ + if (p->rdes23.rx_wb_des23.vlan_filter_match) + x->vlan_filter_match++; + + if (p->rdes23.rx_wb_des23.sa_filter_fail) { + status = -EINVAL; + x->sa_filter_fail++; + } + if (p->rdes23.rx_wb_des23.da_filter_fail) { + status = -EINVAL; + x->da_filter_fail++; + } + if (p->rdes23.rx_wb_des23.hash_filter_pass) + x->hash_filter_pass++; + + if (p->rdes23.rx_wb_des23.l3_filter_match) + x->l3_filter_match++; + + if (p->rdes23.rx_wb_des23.l4_filter_match) + x->l4_filter_match++; + + return status; +} + +/* Get own bit of context descriptor */ +static int sxgbe_get_rx_ctxt_owner(struct sxgbe_rx_ctxt_desc *p) +{ + return p->own_bit; +} + +/* Set own bit for context descriptor */ +static void sxgbe_set_ctxt_rx_owner(struct sxgbe_rx_ctxt_desc *p) +{ + p->own_bit = 1; +} + + +/* Return the reception status looking at Context control information */ +static void sxgbe_rx_ctxt_wbstatus(struct sxgbe_rx_ctxt_desc *p, + struct sxgbe_extra_stats *x) +{ + if (p->tstamp_dropped) + x->timestamp_dropped++; + + /* ptp */ + if (p->ptp_msgtype == RX_NO_PTP) + x->rx_msg_type_no_ptp++; + else if (p->ptp_msgtype == RX_PTP_SYNC) + x->rx_ptp_type_sync++; + else if (p->ptp_msgtype == RX_PTP_FOLLOW_UP) + x->rx_ptp_type_follow_up++; + else if (p->ptp_msgtype == RX_PTP_DELAY_REQ) + x->rx_ptp_type_delay_req++; + else if (p->ptp_msgtype == RX_PTP_DELAY_RESP) + x->rx_ptp_type_delay_resp++; + else if (p->ptp_msgtype == RX_PTP_PDELAY_REQ) + x->rx_ptp_type_pdelay_req++; + else if (p->ptp_msgtype == RX_PTP_PDELAY_RESP) + x->rx_ptp_type_pdelay_resp++; + else if (p->ptp_msgtype == RX_PTP_PDELAY_FOLLOW_UP) + x->rx_ptp_type_pdelay_follow_up++; + else if (p->ptp_msgtype == RX_PTP_ANNOUNCE) + x->rx_ptp_announce++; + else if (p->ptp_msgtype == RX_PTP_MGMT) + x->rx_ptp_mgmt++; + else if (p->ptp_msgtype == RX_PTP_SIGNAL) + x->rx_ptp_signal++; + else if (p->ptp_msgtype == RX_PTP_RESV_MSG) + x->rx_ptp_resv_msg_type++; +} + +/* Get rx timestamp status */ +static int sxgbe_get_rx_ctxt_tstamp_status(struct sxgbe_rx_ctxt_desc *p) +{ + if ((p->tstamp_hi == 0xffffffff) && (p->tstamp_lo == 0xffffffff)) { + pr_err("Time stamp corrupted\n"); + return 0; + } + + return p->tstamp_available; +} + + +static u64 sxgbe_get_rx_timestamp(struct sxgbe_rx_ctxt_desc *p) +{ + u64 ns; + + ns = p->tstamp_lo; + ns |= ((u64)p->tstamp_hi) << 32; + + return ns; +} + +static const struct sxgbe_desc_ops desc_ops = { + .init_tx_desc = sxgbe_init_tx_desc, + .tx_desc_enable_tse = sxgbe_tx_desc_enable_tse, + .prepare_tx_desc = sxgbe_prepare_tx_desc, + .tx_vlanctl_desc = sxgbe_tx_vlanctl_desc, + .set_tx_owner = sxgbe_set_tx_owner, + .get_tx_owner = sxgbe_get_tx_owner, + .close_tx_desc = sxgbe_close_tx_desc, + .release_tx_desc = sxgbe_release_tx_desc, + .clear_tx_ic = sxgbe_clear_tx_ic, + .get_tx_ls = sxgbe_get_tx_ls, + .get_tx_len = sxgbe_get_tx_len, + .tx_enable_tstamp = sxgbe_tx_enable_tstamp, + .get_tx_timestamp_status = sxgbe_get_tx_timestamp_status, + .tx_ctxt_desc_set_ctxt = sxgbe_tx_ctxt_desc_set_ctxt, + .tx_ctxt_desc_set_owner = sxgbe_tx_ctxt_desc_set_owner, + .get_tx_ctxt_owner = sxgbe_tx_ctxt_desc_get_owner, + .tx_ctxt_desc_set_mss = sxgbe_tx_ctxt_desc_set_mss, + .tx_ctxt_desc_get_mss = sxgbe_tx_ctxt_desc_get_mss, + .tx_ctxt_desc_set_tcmssv = sxgbe_tx_ctxt_desc_set_tcmssv, + .tx_ctxt_desc_reset_ostc = sxgbe_tx_ctxt_desc_reset_ostc, + .tx_ctxt_desc_set_ivlantag = sxgbe_tx_ctxt_desc_set_ivlantag, + .tx_ctxt_desc_get_ivlantag = sxgbe_tx_ctxt_desc_get_ivlantag, + .tx_ctxt_desc_set_vlantag = sxgbe_tx_ctxt_desc_set_vlantag, + .tx_ctxt_desc_get_vlantag = sxgbe_tx_ctxt_desc_get_vlantag, + .tx_ctxt_set_tstamp = sxgbe_tx_ctxt_desc_set_tstamp, + .close_tx_ctxt_desc = sxgbe_tx_ctxt_desc_close, + .get_tx_ctxt_cde = sxgbe_tx_ctxt_desc_get_cde, + .init_rx_desc = sxgbe_init_rx_desc, + .get_rx_owner = sxgbe_get_rx_owner, + .set_rx_owner = sxgbe_set_rx_owner, + .set_rx_int_on_com = sxgbe_set_rx_int_on_com, + .get_rx_frame_len = sxgbe_get_rx_frame_len, + .get_rx_fd_status = sxgbe_get_rx_fd_status, + .get_rx_ld_status = sxgbe_get_rx_ld_status, + .rx_wbstatus = sxgbe_rx_wbstatus, + .get_rx_ctxt_owner = sxgbe_get_rx_ctxt_owner, + .set_rx_ctxt_owner = sxgbe_set_ctxt_rx_owner, + .rx_ctxt_wbstatus = sxgbe_rx_ctxt_wbstatus, + .get_rx_ctxt_tstamp_status = sxgbe_get_rx_ctxt_tstamp_status, + .get_timestamp = sxgbe_get_rx_timestamp, +}; + +const struct sxgbe_desc_ops *sxgbe_get_desc_ops(void) +{ + return &desc_ops; +} diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h new file mode 100644 index 000000000..18609324d --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h @@ -0,0 +1,296 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __SXGBE_DESC_H__ +#define __SXGBE_DESC_H__ + +#define SXGBE_DESC_SIZE_BYTES 16 + +/* forward declaration */ +struct sxgbe_extra_stats; + +/* Transmit checksum insertion control */ +enum tdes_csum_insertion { + cic_disabled = 0, /* Checksum Insertion Control */ + cic_only_ip = 1, /* Only IP header */ + /* IP header but pseudoheader is not calculated */ + cic_no_pseudoheader = 2, + cic_full = 3, /* IP header and pseudoheader */ +}; + +struct sxgbe_tx_norm_desc { + u64 tdes01; /* buf1 address */ + union { + /* TX Read-Format Desc 2,3 */ + struct { + /* TDES2 */ + u32 buf1_size:14; + u32 vlan_tag_ctl:2; + u32 buf2_size:14; + u32 timestmp_enable:1; + u32 int_on_com:1; + /* TDES3 */ + union { + u16 tcp_payload_len; + struct { + u32 total_pkt_len:15; + u32 reserved1:1; + } pkt_len; + } tx_pkt_len; + + u16 cksum_ctl:2; + u16 tse_bit:1; + u16 tcp_hdr_len:4; + u16 sa_insert_ctl:3; + u16 crc_pad_ctl:2; + u16 last_desc:1; + u16 first_desc:1; + u16 ctxt_bit:1; + u16 own_bit:1; + } tx_rd_des23; + + /* tx write back Desc 2,3 */ + struct { + /* WB TES2 */ + u32 reserved1; + /* WB TES3 */ + u32 reserved2:31; + u32 own_bit:1; + } tx_wb_des23; + } tdes23; +}; + +struct sxgbe_rx_norm_desc { + union { + u64 rdes01; /* buf1 address */ + union { + u32 out_vlan_tag:16; + u32 in_vlan_tag:16; + u32 rss_hash; + } rx_wb_des01; + } rdes01; + + union { + /* RX Read format Desc 2,3 */ + struct{ + /* RDES2 */ + u64 buf2_addr:62; + /* RDES3 */ + u32 int_on_com:1; + u32 own_bit:1; + } rx_rd_des23; + + /* RX write back */ + struct{ + /* WB RDES2 */ + u32 hdr_len:10; + u32 rdes2_reserved:2; + u32 elrd_val:1; + u32 iovt_sel:1; + u32 res_pkt:1; + u32 vlan_filter_match:1; + u32 sa_filter_fail:1; + u32 da_filter_fail:1; + u32 hash_filter_pass:1; + u32 macaddr_filter_match:8; + u32 l3_filter_match:1; + u32 l4_filter_match:1; + u32 l34_filter_num:3; + + /* WB RDES3 */ + u32 pkt_len:14; + u32 rdes3_reserved:1; + u32 err_summary:1; + u32 err_l2_type:4; + u32 layer34_pkt_type:4; + u32 no_coagulation_pkt:1; + u32 in_seq_pkt:1; + u32 rss_valid:1; + u32 context_des_avail:1; + u32 last_desc:1; + u32 first_desc:1; + u32 recv_context_desc:1; + u32 own_bit:1; + } rx_wb_des23; + } rdes23; +}; + +/* Context descriptor structure */ +struct sxgbe_tx_ctxt_desc { + u32 tstamp_lo; + u32 tstamp_hi; + u32 maxseg_size:15; + u32 reserved1:1; + u32 ivlan_tag:16; + u32 vlan_tag:16; + u32 vltag_valid:1; + u32 ivlan_tag_valid:1; + u32 ivlan_tag_ctl:2; + u32 reserved2:3; + u32 ctxt_desc_err:1; + u32 reserved3:2; + u32 ostc:1; + u32 tcmssv:1; + u32 reserved4:2; + u32 ctxt_bit:1; + u32 own_bit:1; +}; + +struct sxgbe_rx_ctxt_desc { + u32 tstamp_lo; + u32 tstamp_hi; + u32 reserved1; + u32 ptp_msgtype:4; + u32 tstamp_available:1; + u32 ptp_rsp_err:1; + u32 tstamp_dropped:1; + u32 reserved2:23; + u32 rx_ctxt_desc:1; + u32 own_bit:1; +}; + +struct sxgbe_desc_ops { + /* DMA TX descriptor ring initialization */ + void (*init_tx_desc)(struct sxgbe_tx_norm_desc *p); + + /* Invoked by the xmit function to prepare the tx descriptor */ + void (*tx_desc_enable_tse)(struct sxgbe_tx_norm_desc *p, u8 is_tse, + u32 total_hdr_len, u32 tcp_hdr_len, + u32 tcp_payload_len); + + /* Assign buffer lengths for descriptor */ + void (*prepare_tx_desc)(struct sxgbe_tx_norm_desc *p, u8 is_fd, + int buf1_len, int pkt_len, int cksum); + + /* Set VLAN control information */ + void (*tx_vlanctl_desc)(struct sxgbe_tx_norm_desc *p, int vlan_ctl); + + /* Set the owner of the descriptor */ + void (*set_tx_owner)(struct sxgbe_tx_norm_desc *p); + + /* Get the owner of the descriptor */ + int (*get_tx_owner)(struct sxgbe_tx_norm_desc *p); + + /* Invoked by the xmit function to close the tx descriptor */ + void (*close_tx_desc)(struct sxgbe_tx_norm_desc *p); + + /* Clean the tx descriptor as soon as the tx irq is received */ + void (*release_tx_desc)(struct sxgbe_tx_norm_desc *p); + + /* Clear interrupt on tx frame completion. When this bit is + * set an interrupt happens as soon as the frame is transmitted + */ + void (*clear_tx_ic)(struct sxgbe_tx_norm_desc *p); + + /* Last tx segment reports the transmit status */ + int (*get_tx_ls)(struct sxgbe_tx_norm_desc *p); + + /* Get the buffer size from the descriptor */ + int (*get_tx_len)(struct sxgbe_tx_norm_desc *p); + + /* Set tx timestamp enable bit */ + void (*tx_enable_tstamp)(struct sxgbe_tx_norm_desc *p); + + /* get tx timestamp status */ + int (*get_tx_timestamp_status)(struct sxgbe_tx_norm_desc *p); + + /* TX Context Descripto Specific */ + void (*tx_ctxt_desc_set_ctxt)(struct sxgbe_tx_ctxt_desc *p); + + /* Set the owner of the TX context descriptor */ + void (*tx_ctxt_desc_set_owner)(struct sxgbe_tx_ctxt_desc *p); + + /* Get the owner of the TX context descriptor */ + int (*get_tx_ctxt_owner)(struct sxgbe_tx_ctxt_desc *p); + + /* Set TX mss */ + void (*tx_ctxt_desc_set_mss)(struct sxgbe_tx_ctxt_desc *p, u16 mss); + + /* Set TX mss */ + int (*tx_ctxt_desc_get_mss)(struct sxgbe_tx_ctxt_desc *p); + + /* Set TX tcmssv */ + void (*tx_ctxt_desc_set_tcmssv)(struct sxgbe_tx_ctxt_desc *p); + + /* Reset TX ostc */ + void (*tx_ctxt_desc_reset_ostc)(struct sxgbe_tx_ctxt_desc *p); + + /* Set IVLAN information */ + void (*tx_ctxt_desc_set_ivlantag)(struct sxgbe_tx_ctxt_desc *p, + int is_ivlanvalid, int ivlan_tag, + int ivlan_ctl); + + /* Return IVLAN Tag */ + int (*tx_ctxt_desc_get_ivlantag)(struct sxgbe_tx_ctxt_desc *p); + + /* Set VLAN Tag */ + void (*tx_ctxt_desc_set_vlantag)(struct sxgbe_tx_ctxt_desc *p, + int is_vlanvalid, int vlan_tag); + + /* Return VLAN Tag */ + int (*tx_ctxt_desc_get_vlantag)(struct sxgbe_tx_ctxt_desc *p); + + /* Set Time stamp */ + void (*tx_ctxt_set_tstamp)(struct sxgbe_tx_ctxt_desc *p, + u8 ostc_enable, u64 tstamp); + + /* Close TX context descriptor */ + void (*close_tx_ctxt_desc)(struct sxgbe_tx_ctxt_desc *p); + + /* WB status of context descriptor */ + int (*get_tx_ctxt_cde)(struct sxgbe_tx_ctxt_desc *p); + + /* DMA RX descriptor ring initialization */ + void (*init_rx_desc)(struct sxgbe_rx_norm_desc *p, int disable_rx_ic, + int mode, int end); + + /* Get own bit */ + int (*get_rx_owner)(struct sxgbe_rx_norm_desc *p); + + /* Set own bit */ + void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p); + + /* Set Interrupt on completion bit */ + void (*set_rx_int_on_com)(struct sxgbe_rx_norm_desc *p); + + /* Get the receive frame size */ + int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p); + + /* Return first Descriptor status */ + int (*get_rx_fd_status)(struct sxgbe_rx_norm_desc *p); + + /* Return first Descriptor status */ + int (*get_rx_ld_status)(struct sxgbe_rx_norm_desc *p); + + /* Return the reception status looking at the RDES1 */ + int (*rx_wbstatus)(struct sxgbe_rx_norm_desc *p, + struct sxgbe_extra_stats *x, int *checksum); + + /* Get own bit */ + int (*get_rx_ctxt_owner)(struct sxgbe_rx_ctxt_desc *p); + + /* Set own bit */ + void (*set_rx_ctxt_owner)(struct sxgbe_rx_ctxt_desc *p); + + /* Return the reception status looking at Context control information */ + void (*rx_ctxt_wbstatus)(struct sxgbe_rx_ctxt_desc *p, + struct sxgbe_extra_stats *x); + + /* Get rx timestamp status */ + int (*get_rx_ctxt_tstamp_status)(struct sxgbe_rx_ctxt_desc *p); + + /* Get timestamp value for rx, need to check this */ + u64 (*get_timestamp)(struct sxgbe_rx_ctxt_desc *p); +}; + +const struct sxgbe_desc_ops *sxgbe_get_desc_ops(void); + +#endif /* __SXGBE_DESC_H__ */ diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c new file mode 100644 index 000000000..bb9b5b8af --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c @@ -0,0 +1,368 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include + +#include "sxgbe_common.h" +#include "sxgbe_dma.h" +#include "sxgbe_reg.h" +#include "sxgbe_desc.h" + +/* DMA core initialization */ +static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG); + + /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register. + * if fix_burst = 1, Set UNDEF = 0 of DMA_Sys_Mode Register. + * burst_map is bitmap for BLEN[4, 8, 16, 32, 64, 128 and 256]. + * Set burst_map irrespective of fix_burst value. + */ + if (!fix_burst) + reg_val |= SXGBE_DMA_AXI_UNDEF_BURST; + + /* write burst len map */ + reg_val |= (burst_map << SXGBE_DMA_BLENMAP_LSHIFT); + + writel(reg_val, ioaddr + SXGBE_DMA_SYSBUS_MODE_REG); + + return 0; +} + +static void sxgbe_dma_channel_init(void __iomem *ioaddr, int cha_num, + int fix_burst, int pbl, dma_addr_t dma_tx, + dma_addr_t dma_rx, int t_rsize, int r_rsize) +{ + u32 reg_val; + dma_addr_t dma_addr; + + reg_val = readl(ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num)); + /* set the pbl */ + if (fix_burst) { + reg_val |= SXGBE_DMA_PBL_X8MODE; + writel(reg_val, ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num)); + /* program the TX pbl */ + reg_val = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num)); + reg_val |= (pbl << SXGBE_DMA_TXPBL_LSHIFT); + writel(reg_val, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num)); + /* program the RX pbl */ + reg_val = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num)); + reg_val |= (pbl << SXGBE_DMA_RXPBL_LSHIFT); + writel(reg_val, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num)); + } + + /* program desc registers */ + writel(upper_32_bits(dma_tx), + ioaddr + SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num)); + writel(lower_32_bits(dma_tx), + ioaddr + SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num)); + + writel(upper_32_bits(dma_rx), + ioaddr + SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num)); + writel(lower_32_bits(dma_rx), + ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num)); + + /* program tail pointers */ + /* assumption: upper 32 bits are constant and + * same as TX/RX desc list + */ + dma_addr = dma_tx + ((t_rsize - 1) * SXGBE_DESC_SIZE_BYTES); + writel(lower_32_bits(dma_addr), + ioaddr + SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num)); + + dma_addr = dma_rx + ((r_rsize - 1) * SXGBE_DESC_SIZE_BYTES); + writel(lower_32_bits(dma_addr), + ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num)); + /* program the ring sizes */ + writel(t_rsize - 1, ioaddr + SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num)); + writel(r_rsize - 1, ioaddr + SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num)); + + /* Enable TX/RX interrupts */ + writel(SXGBE_DMA_ENA_INT, + ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num)); +} + +static void sxgbe_enable_dma_transmission(void __iomem *ioaddr, int cha_num) +{ + u32 tx_config; + + tx_config = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num)); + tx_config |= SXGBE_TX_START_DMA; + writel(tx_config, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num)); +} + +static void sxgbe_enable_dma_irq(void __iomem *ioaddr, int dma_cnum) +{ + /* Enable TX/RX interrupts */ + writel(SXGBE_DMA_ENA_INT, + ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum)); +} + +static void sxgbe_disable_dma_irq(void __iomem *ioaddr, int dma_cnum) +{ + /* Disable TX/RX interrupts */ + writel(0, ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum)); +} + +static void sxgbe_dma_start_tx(void __iomem *ioaddr, int tchannels) +{ + int cnum; + u32 tx_ctl_reg; + + for (cnum = 0; cnum < tchannels; cnum++) { + tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum)); + tx_ctl_reg |= SXGBE_TX_ENABLE; + writel(tx_ctl_reg, + ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum)); + } +} + +static void sxgbe_dma_start_tx_queue(void __iomem *ioaddr, int dma_cnum) +{ + u32 tx_ctl_reg; + + tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum)); + tx_ctl_reg |= SXGBE_TX_ENABLE; + writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum)); +} + +static void sxgbe_dma_stop_tx_queue(void __iomem *ioaddr, int dma_cnum) +{ + u32 tx_ctl_reg; + + tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum)); + tx_ctl_reg &= ~(SXGBE_TX_ENABLE); + writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum)); +} + +static void sxgbe_dma_stop_tx(void __iomem *ioaddr, int tchannels) +{ + int cnum; + u32 tx_ctl_reg; + + for (cnum = 0; cnum < tchannels; cnum++) { + tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum)); + tx_ctl_reg &= ~(SXGBE_TX_ENABLE); + writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum)); + } +} + +static void sxgbe_dma_start_rx(void __iomem *ioaddr, int rchannels) +{ + int cnum; + u32 rx_ctl_reg; + + for (cnum = 0; cnum < rchannels; cnum++) { + rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum)); + rx_ctl_reg |= SXGBE_RX_ENABLE; + writel(rx_ctl_reg, + ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum)); + } +} + +static void sxgbe_dma_stop_rx(void __iomem *ioaddr, int rchannels) +{ + int cnum; + u32 rx_ctl_reg; + + for (cnum = 0; cnum < rchannels; cnum++) { + rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum)); + rx_ctl_reg &= ~(SXGBE_RX_ENABLE); + writel(rx_ctl_reg, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum)); + } +} + +static int sxgbe_tx_dma_int_status(void __iomem *ioaddr, int channel_no, + struct sxgbe_extra_stats *x) +{ + u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no)); + u32 clear_val = 0; + u32 ret_val = 0; + + /* TX Normal Interrupt Summary */ + if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) { + x->normal_irq_n++; + if (int_status & SXGBE_DMA_INT_STATUS_TI) { + ret_val |= handle_tx; + x->tx_normal_irq_n++; + clear_val |= SXGBE_DMA_INT_STATUS_TI; + } + + if (int_status & SXGBE_DMA_INT_STATUS_TBU) { + x->tx_underflow_irq++; + ret_val |= tx_bump_tc; + clear_val |= SXGBE_DMA_INT_STATUS_TBU; + } + } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) { + /* TX Abnormal Interrupt Summary */ + if (int_status & SXGBE_DMA_INT_STATUS_TPS) { + ret_val |= tx_hard_error; + clear_val |= SXGBE_DMA_INT_STATUS_TPS; + x->tx_process_stopped_irq++; + } + + if (int_status & SXGBE_DMA_INT_STATUS_FBE) { + ret_val |= tx_hard_error; + x->fatal_bus_error_irq++; + + /* Assumption: FBE bit is the combination of + * all the bus access erros and cleared when + * the respective error bits cleared + */ + + /* check for actual cause */ + if (int_status & SXGBE_DMA_INT_STATUS_TEB0) { + x->tx_read_transfer_err++; + clear_val |= SXGBE_DMA_INT_STATUS_TEB0; + } else { + x->tx_write_transfer_err++; + } + + if (int_status & SXGBE_DMA_INT_STATUS_TEB1) { + x->tx_desc_access_err++; + clear_val |= SXGBE_DMA_INT_STATUS_TEB1; + } else { + x->tx_buffer_access_err++; + } + + if (int_status & SXGBE_DMA_INT_STATUS_TEB2) { + x->tx_data_transfer_err++; + clear_val |= SXGBE_DMA_INT_STATUS_TEB2; + } + } + + /* context descriptor error */ + if (int_status & SXGBE_DMA_INT_STATUS_CTXTERR) { + x->tx_ctxt_desc_err++; + clear_val |= SXGBE_DMA_INT_STATUS_CTXTERR; + } + } + + /* clear the served bits */ + writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no)); + + return ret_val; +} + +static int sxgbe_rx_dma_int_status(void __iomem *ioaddr, int channel_no, + struct sxgbe_extra_stats *x) +{ + u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no)); + u32 clear_val = 0; + u32 ret_val = 0; + + /* RX Normal Interrupt Summary */ + if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) { + x->normal_irq_n++; + if (int_status & SXGBE_DMA_INT_STATUS_RI) { + ret_val |= handle_rx; + x->rx_normal_irq_n++; + clear_val |= SXGBE_DMA_INT_STATUS_RI; + } + } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) { + /* RX Abnormal Interrupt Summary */ + if (int_status & SXGBE_DMA_INT_STATUS_RBU) { + ret_val |= rx_bump_tc; + clear_val |= SXGBE_DMA_INT_STATUS_RBU; + x->rx_underflow_irq++; + } + + if (int_status & SXGBE_DMA_INT_STATUS_RPS) { + ret_val |= rx_hard_error; + clear_val |= SXGBE_DMA_INT_STATUS_RPS; + x->rx_process_stopped_irq++; + } + + if (int_status & SXGBE_DMA_INT_STATUS_FBE) { + ret_val |= rx_hard_error; + x->fatal_bus_error_irq++; + + /* Assumption: FBE bit is the combination of + * all the bus access erros and cleared when + * the respective error bits cleared + */ + + /* check for actual cause */ + if (int_status & SXGBE_DMA_INT_STATUS_REB0) { + x->rx_read_transfer_err++; + clear_val |= SXGBE_DMA_INT_STATUS_REB0; + } else { + x->rx_write_transfer_err++; + } + + if (int_status & SXGBE_DMA_INT_STATUS_REB1) { + x->rx_desc_access_err++; + clear_val |= SXGBE_DMA_INT_STATUS_REB1; + } else { + x->rx_buffer_access_err++; + } + + if (int_status & SXGBE_DMA_INT_STATUS_REB2) { + x->rx_data_transfer_err++; + clear_val |= SXGBE_DMA_INT_STATUS_REB2; + } + } + } + + /* clear the served bits */ + writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no)); + + return ret_val; +} + +/* Program the HW RX Watchdog */ +static void sxgbe_dma_rx_watchdog(void __iomem *ioaddr, u32 riwt) +{ + u32 que_num; + + SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, que_num) { + writel(riwt, + ioaddr + SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(que_num)); + } +} + +static void sxgbe_enable_tso(void __iomem *ioaddr, u8 chan_num) +{ + u32 ctrl; + + ctrl = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num)); + ctrl |= SXGBE_DMA_CHA_TXCTL_TSE_ENABLE; + writel(ctrl, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num)); +} + +static const struct sxgbe_dma_ops sxgbe_dma_ops = { + .init = sxgbe_dma_init, + .cha_init = sxgbe_dma_channel_init, + .enable_dma_transmission = sxgbe_enable_dma_transmission, + .enable_dma_irq = sxgbe_enable_dma_irq, + .disable_dma_irq = sxgbe_disable_dma_irq, + .start_tx = sxgbe_dma_start_tx, + .start_tx_queue = sxgbe_dma_start_tx_queue, + .stop_tx = sxgbe_dma_stop_tx, + .stop_tx_queue = sxgbe_dma_stop_tx_queue, + .start_rx = sxgbe_dma_start_rx, + .stop_rx = sxgbe_dma_stop_rx, + .tx_dma_int_status = sxgbe_tx_dma_int_status, + .rx_dma_int_status = sxgbe_rx_dma_int_status, + .rx_watchdog = sxgbe_dma_rx_watchdog, + .enable_tso = sxgbe_enable_tso, +}; + +const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void) +{ + return &sxgbe_dma_ops; +} diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h new file mode 100644 index 000000000..1607b54c9 --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h @@ -0,0 +1,50 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __SXGBE_DMA_H__ +#define __SXGBE_DMA_H__ + +/* forward declaration */ +struct sxgbe_extra_stats; + +#define SXGBE_DMA_BLENMAP_LSHIFT 1 +#define SXGBE_DMA_TXPBL_LSHIFT 16 +#define SXGBE_DMA_RXPBL_LSHIFT 16 +#define DEFAULT_DMA_PBL 8 + +struct sxgbe_dma_ops { + /* DMA core initialization */ + int (*init)(void __iomem *ioaddr, int fix_burst, int burst_map); + void (*cha_init)(void __iomem *ioaddr, int cha_num, int fix_burst, + int pbl, dma_addr_t dma_tx, dma_addr_t dma_rx, + int t_rzie, int r_rsize); + void (*enable_dma_transmission)(void __iomem *ioaddr, int dma_cnum); + void (*enable_dma_irq)(void __iomem *ioaddr, int dma_cnum); + void (*disable_dma_irq)(void __iomem *ioaddr, int dma_cnum); + void (*start_tx)(void __iomem *ioaddr, int tchannels); + void (*start_tx_queue)(void __iomem *ioaddr, int dma_cnum); + void (*stop_tx)(void __iomem *ioaddr, int tchannels); + void (*stop_tx_queue)(void __iomem *ioaddr, int dma_cnum); + void (*start_rx)(void __iomem *ioaddr, int rchannels); + void (*stop_rx)(void __iomem *ioaddr, int rchannels); + int (*tx_dma_int_status)(void __iomem *ioaddr, int channel_no, + struct sxgbe_extra_stats *x); + int (*rx_dma_int_status)(void __iomem *ioaddr, int channel_no, + struct sxgbe_extra_stats *x); + /* Program the HW RX Watchdog */ + void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt); + /* Enable TSO for each DMA channel */ + void (*enable_tso)(void __iomem *ioaddr, u8 chan_num); +}; + +const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void); + +#endif /* __SXGBE_CORE_H__ */ diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c new file mode 100644 index 000000000..c0981ae45 --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c @@ -0,0 +1,524 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "sxgbe_common.h" +#include "sxgbe_reg.h" +#include "sxgbe_dma.h" + +struct sxgbe_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define SXGBE_STAT(m) \ +{ \ + #m, \ + FIELD_SIZEOF(struct sxgbe_extra_stats, m), \ + offsetof(struct sxgbe_priv_data, xstats.m) \ +} + +static const struct sxgbe_stats sxgbe_gstrings_stats[] = { + /* TX/RX IRQ events */ + SXGBE_STAT(tx_process_stopped_irq), + SXGBE_STAT(tx_ctxt_desc_err), + SXGBE_STAT(tx_threshold), + SXGBE_STAT(rx_threshold), + SXGBE_STAT(tx_pkt_n), + SXGBE_STAT(rx_pkt_n), + SXGBE_STAT(normal_irq_n), + SXGBE_STAT(tx_normal_irq_n), + SXGBE_STAT(rx_normal_irq_n), + SXGBE_STAT(napi_poll), + SXGBE_STAT(tx_clean), + SXGBE_STAT(tx_reset_ic_bit), + SXGBE_STAT(rx_process_stopped_irq), + SXGBE_STAT(rx_underflow_irq), + + /* Bus access errors */ + SXGBE_STAT(fatal_bus_error_irq), + SXGBE_STAT(tx_read_transfer_err), + SXGBE_STAT(tx_write_transfer_err), + SXGBE_STAT(tx_desc_access_err), + SXGBE_STAT(tx_buffer_access_err), + SXGBE_STAT(tx_data_transfer_err), + SXGBE_STAT(rx_read_transfer_err), + SXGBE_STAT(rx_write_transfer_err), + SXGBE_STAT(rx_desc_access_err), + SXGBE_STAT(rx_buffer_access_err), + SXGBE_STAT(rx_data_transfer_err), + + /* EEE-LPI stats */ + SXGBE_STAT(tx_lpi_entry_n), + SXGBE_STAT(tx_lpi_exit_n), + SXGBE_STAT(rx_lpi_entry_n), + SXGBE_STAT(rx_lpi_exit_n), + SXGBE_STAT(eee_wakeup_error_n), + + /* RX specific */ + /* L2 error */ + SXGBE_STAT(rx_code_gmii_err), + SXGBE_STAT(rx_watchdog_err), + SXGBE_STAT(rx_crc_err), + SXGBE_STAT(rx_gaint_pkt_err), + SXGBE_STAT(ip_hdr_err), + SXGBE_STAT(ip_payload_err), + SXGBE_STAT(overflow_error), + + /* L2 Pkt type */ + SXGBE_STAT(len_pkt), + SXGBE_STAT(mac_ctl_pkt), + SXGBE_STAT(dcb_ctl_pkt), + SXGBE_STAT(arp_pkt), + SXGBE_STAT(oam_pkt), + SXGBE_STAT(untag_okt), + SXGBE_STAT(other_pkt), + SXGBE_STAT(svlan_tag_pkt), + SXGBE_STAT(cvlan_tag_pkt), + SXGBE_STAT(dvlan_ocvlan_icvlan_pkt), + SXGBE_STAT(dvlan_osvlan_isvlan_pkt), + SXGBE_STAT(dvlan_osvlan_icvlan_pkt), + SXGBE_STAT(dvan_ocvlan_icvlan_pkt), + + /* L3/L4 Pkt type */ + SXGBE_STAT(not_ip_pkt), + SXGBE_STAT(ip4_tcp_pkt), + SXGBE_STAT(ip4_udp_pkt), + SXGBE_STAT(ip4_icmp_pkt), + SXGBE_STAT(ip4_unknown_pkt), + SXGBE_STAT(ip6_tcp_pkt), + SXGBE_STAT(ip6_udp_pkt), + SXGBE_STAT(ip6_icmp_pkt), + SXGBE_STAT(ip6_unknown_pkt), + + /* Filter specific */ + SXGBE_STAT(vlan_filter_match), + SXGBE_STAT(sa_filter_fail), + SXGBE_STAT(da_filter_fail), + SXGBE_STAT(hash_filter_pass), + SXGBE_STAT(l3_filter_match), + SXGBE_STAT(l4_filter_match), + + /* RX context specific */ + SXGBE_STAT(timestamp_dropped), + SXGBE_STAT(rx_msg_type_no_ptp), + SXGBE_STAT(rx_ptp_type_sync), + SXGBE_STAT(rx_ptp_type_follow_up), + SXGBE_STAT(rx_ptp_type_delay_req), + SXGBE_STAT(rx_ptp_type_delay_resp), + SXGBE_STAT(rx_ptp_type_pdelay_req), + SXGBE_STAT(rx_ptp_type_pdelay_resp), + SXGBE_STAT(rx_ptp_type_pdelay_follow_up), + SXGBE_STAT(rx_ptp_announce), + SXGBE_STAT(rx_ptp_mgmt), + SXGBE_STAT(rx_ptp_signal), + SXGBE_STAT(rx_ptp_resv_msg_type), +}; +#define SXGBE_STATS_LEN ARRAY_SIZE(sxgbe_gstrings_stats) + +static int sxgbe_get_eee(struct net_device *dev, + struct ethtool_eee *edata) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + + if (!priv->hw_cap.eee) + return -EOPNOTSUPP; + + edata->eee_enabled = priv->eee_enabled; + edata->eee_active = priv->eee_active; + edata->tx_lpi_timer = priv->tx_lpi_timer; + + return phy_ethtool_get_eee(priv->phydev, edata); +} + +static int sxgbe_set_eee(struct net_device *dev, + struct ethtool_eee *edata) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + + priv->eee_enabled = edata->eee_enabled; + + if (!priv->eee_enabled) { + sxgbe_disable_eee_mode(priv); + } else { + /* We are asking for enabling the EEE but it is safe + * to verify all by invoking the eee_init function. + * In case of failure it will return an error. + */ + priv->eee_enabled = sxgbe_eee_init(priv); + if (!priv->eee_enabled) + return -EOPNOTSUPP; + + /* Do not change tx_lpi_timer in case of failure */ + priv->tx_lpi_timer = edata->tx_lpi_timer; + } + + return phy_ethtool_set_eee(priv->phydev, edata); +} + +static void sxgbe_getdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); +} + +static int sxgbe_getsettings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + + if (priv->phydev) + return phy_ethtool_gset(priv->phydev, cmd); + + return -EOPNOTSUPP; +} + +static int sxgbe_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + + if (priv->phydev) + return phy_ethtool_sset(priv->phydev, cmd); + + return -EOPNOTSUPP; +} + +static u32 sxgbe_getmsglevel(struct net_device *dev) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + return priv->msg_enable; +} + +static void sxgbe_setmsglevel(struct net_device *dev, u32 level) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + priv->msg_enable = level; +} + +static void sxgbe_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + u8 *p = data; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < SXGBE_STATS_LEN; i++) { + memcpy(p, sxgbe_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + default: + WARN_ON(1); + break; + } +} + +static int sxgbe_get_sset_count(struct net_device *netdev, int sset) +{ + int len; + + switch (sset) { + case ETH_SS_STATS: + len = SXGBE_STATS_LEN; + return len; + default: + return -EINVAL; + } +} + +static void sxgbe_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *dummy, u64 *data) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + int i; + char *p; + + if (priv->eee_enabled) { + int val = phy_get_eee_err(priv->phydev); + + if (val) + priv->xstats.eee_wakeup_error_n = val; + } + + for (i = 0; i < SXGBE_STATS_LEN; i++) { + p = (char *)priv + sxgbe_gstrings_stats[i].stat_offset; + data[i] = (sxgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) + ? (*(u64 *)p) : (*(u32 *)p); + } +} + +static void sxgbe_get_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + channel->max_rx = SXGBE_MAX_RX_CHANNELS; + channel->max_tx = SXGBE_MAX_TX_CHANNELS; + channel->rx_count = SXGBE_RX_QUEUES; + channel->tx_count = SXGBE_TX_QUEUES; +} + +static u32 sxgbe_riwt2usec(u32 riwt, struct sxgbe_priv_data *priv) +{ + unsigned long clk = clk_get_rate(priv->sxgbe_clk); + + if (!clk) + return 0; + + return (riwt * 256) / (clk / 1000000); +} + +static u32 sxgbe_usec2riwt(u32 usec, struct sxgbe_priv_data *priv) +{ + unsigned long clk = clk_get_rate(priv->sxgbe_clk); + + if (!clk) + return 0; + + return (usec * (clk / 1000000)) / 256; +} + +static int sxgbe_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + + if (priv->use_riwt) + ec->rx_coalesce_usecs = sxgbe_riwt2usec(priv->rx_riwt, priv); + + return 0; +} + +static int sxgbe_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + unsigned int rx_riwt; + + if (!ec->rx_coalesce_usecs) + return -EINVAL; + + rx_riwt = sxgbe_usec2riwt(ec->rx_coalesce_usecs, priv); + + if ((rx_riwt > SXGBE_MAX_DMA_RIWT) || (rx_riwt < SXGBE_MIN_DMA_RIWT)) + return -EINVAL; + else if (!priv->use_riwt) + return -EOPNOTSUPP; + + priv->rx_riwt = rx_riwt; + priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt); + + return 0; +} + +static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on sxgbe */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int sxgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXFH: + ret = sxgbe_get_rss_hash_opts(priv, cmd); + break; + default: + break; + } + + return ret; +} + +static int sxgbe_set_rss_hash_opt(struct sxgbe_priv_data *priv, + struct ethtool_rxnfc *cmd) +{ + u32 reg_val = 0; + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(cmd->data & RXH_IP_SRC) || + !(cmd->data & RXH_IP_DST) || + !(cmd->data & RXH_L4_B_0_1) || + !(cmd->data & RXH_L4_B_2_3)) + return -EINVAL; + reg_val = SXGBE_CORE_RSS_CTL_TCP4TE; + break; + case UDP_V4_FLOW: + case UDP_V6_FLOW: + if (!(cmd->data & RXH_IP_SRC) || + !(cmd->data & RXH_IP_DST) || + !(cmd->data & RXH_L4_B_0_1) || + !(cmd->data & RXH_L4_B_2_3)) + return -EINVAL; + reg_val = SXGBE_CORE_RSS_CTL_UDP4TE; + break; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + case IPV4_FLOW: + case IPV6_FLOW: + if (!(cmd->data & RXH_IP_SRC) || + !(cmd->data & RXH_IP_DST) || + (cmd->data & RXH_L4_B_0_1) || + (cmd->data & RXH_L4_B_2_3)) + return -EINVAL; + reg_val = SXGBE_CORE_RSS_CTL_IP2TE; + break; + default: + return -EINVAL; + } + + /* Read SXGBE RSS control register and update */ + reg_val |= readl(priv->ioaddr + SXGBE_CORE_RSS_CTL_REG); + writel(reg_val, priv->ioaddr + SXGBE_CORE_RSS_CTL_REG); + readl(priv->ioaddr + SXGBE_CORE_RSS_CTL_REG); + + return 0; +} + +static int sxgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = sxgbe_set_rss_hash_opt(priv, cmd); + break; + default: + break; + } + + return ret; +} + +static void sxgbe_get_regs(struct net_device *dev, + struct ethtool_regs *regs, void *space) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + u32 *reg_space = (u32 *)space; + int reg_offset; + int reg_ix = 0; + void __iomem *ioaddr = priv->ioaddr; + + memset(reg_space, 0x0, REG_SPACE_SIZE); + + /* MAC registers */ + for (reg_offset = START_MAC_REG_OFFSET; + reg_offset <= MAX_MAC_REG_OFFSET; reg_offset += 4) { + reg_space[reg_ix] = readl(ioaddr + reg_offset); + reg_ix++; + } + + /* MTL registers */ + for (reg_offset = START_MTL_REG_OFFSET; + reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) { + reg_space[reg_ix] = readl(ioaddr + reg_offset); + reg_ix++; + } + + /* DMA registers */ + for (reg_offset = START_DMA_REG_OFFSET; + reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) { + reg_space[reg_ix] = readl(ioaddr + reg_offset); + reg_ix++; + } + + BUG_ON(reg_ix * 4 > REG_SPACE_SIZE); +} + +static int sxgbe_get_regs_len(struct net_device *dev) +{ + return REG_SPACE_SIZE; +} + +static const struct ethtool_ops sxgbe_ethtool_ops = { + .get_drvinfo = sxgbe_getdrvinfo, + .get_settings = sxgbe_getsettings, + .set_settings = sxgbe_setsettings, + .get_msglevel = sxgbe_getmsglevel, + .set_msglevel = sxgbe_setmsglevel, + .get_link = ethtool_op_get_link, + .get_strings = sxgbe_get_strings, + .get_ethtool_stats = sxgbe_get_ethtool_stats, + .get_sset_count = sxgbe_get_sset_count, + .get_channels = sxgbe_get_channels, + .get_coalesce = sxgbe_get_coalesce, + .set_coalesce = sxgbe_set_coalesce, + .get_rxnfc = sxgbe_get_rxnfc, + .set_rxnfc = sxgbe_set_rxnfc, + .get_regs = sxgbe_get_regs, + .get_regs_len = sxgbe_get_regs_len, + .get_eee = sxgbe_get_eee, + .set_eee = sxgbe_set_eee, +}; + +void sxgbe_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &sxgbe_ethtool_ops; +} diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c new file mode 100644 index 000000000..413ea14ab --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -0,0 +1,2345 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sxgbe_common.h" +#include "sxgbe_desc.h" +#include "sxgbe_dma.h" +#include "sxgbe_mtl.h" +#include "sxgbe_reg.h" + +#define SXGBE_ALIGN(x) L1_CACHE_ALIGN(x) +#define JUMBO_LEN 9000 + +/* Module parameters */ +#define TX_TIMEO 5000 +#define DMA_TX_SIZE 512 +#define DMA_RX_SIZE 1024 +#define TC_DEFAULT 64 +#define DMA_BUFFER_SIZE BUF_SIZE_2KiB +/* The default timer value as per the sxgbe specification 1 sec(1000 ms) */ +#define SXGBE_DEFAULT_LPI_TIMER 1000 + +static int debug = -1; +static int eee_timer = SXGBE_DEFAULT_LPI_TIMER; + +module_param(eee_timer, int, S_IRUGO | S_IWUSR); + +module_param(debug, int, S_IRUGO | S_IWUSR); +static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); + +static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id); +static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id); +static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id); + +#define SXGBE_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) + +#define SXGBE_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x)) + +/** + * sxgbe_verify_args - verify the driver parameters. + * Description: it verifies if some wrong parameter is passed to the driver. + * Note that wrong parameters are replaced with the default values. + */ +static void sxgbe_verify_args(void) +{ + if (unlikely(eee_timer < 0)) + eee_timer = SXGBE_DEFAULT_LPI_TIMER; +} + +static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data *priv) +{ + /* Check and enter in LPI mode */ + if (!priv->tx_path_in_lpi_mode) + priv->hw->mac->set_eee_mode(priv->ioaddr); +} + +void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv) +{ + /* Exit and disable EEE in case of we are are in LPI state. */ + priv->hw->mac->reset_eee_mode(priv->ioaddr); + del_timer_sync(&priv->eee_ctrl_timer); + priv->tx_path_in_lpi_mode = false; +} + +/** + * sxgbe_eee_ctrl_timer + * @arg : data hook + * Description: + * If there is no data transfer and if we are not in LPI state, + * then MAC Transmitter can be moved to LPI state. + */ +static void sxgbe_eee_ctrl_timer(unsigned long arg) +{ + struct sxgbe_priv_data *priv = (struct sxgbe_priv_data *)arg; + + sxgbe_enable_eee_mode(priv); + mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer)); +} + +/** + * sxgbe_eee_init + * @priv: private device pointer + * Description: + * If the EEE support has been enabled while configuring the driver, + * if the GMAC actually supports the EEE (from the HW cap reg) and the + * phy can also manage EEE, so enable the LPI state and start the timer + * to verify if the tx path can enter in LPI state. + */ +bool sxgbe_eee_init(struct sxgbe_priv_data * const priv) +{ + bool ret = false; + + /* MAC core supports the EEE feature. */ + if (priv->hw_cap.eee) { + /* Check if the PHY supports EEE */ + if (phy_init_eee(priv->phydev, 1)) + return false; + + priv->eee_active = 1; + setup_timer(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer, + (unsigned long)priv); + priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer); + add_timer(&priv->eee_ctrl_timer); + + priv->hw->mac->set_eee_timer(priv->ioaddr, + SXGBE_DEFAULT_LPI_TIMER, + priv->tx_lpi_timer); + + pr_info("Energy-Efficient Ethernet initialized\n"); + + ret = true; + } + + return ret; +} + +static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv) +{ + /* When the EEE has been already initialised we have to + * modify the PLS bit in the LPI ctrl & status reg according + * to the PHY link status. For this reason. + */ + if (priv->eee_enabled) + priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link); +} + +/** + * sxgbe_clk_csr_set - dynamically set the MDC clock + * @priv: driver private structure + * Description: this is to dynamically set the MDC clock according to the csr + * clock input. + */ +static void sxgbe_clk_csr_set(struct sxgbe_priv_data *priv) +{ + u32 clk_rate = clk_get_rate(priv->sxgbe_clk); + + /* assign the proper divider, this will be used during + * mdio communication + */ + if (clk_rate < SXGBE_CSR_F_150M) + priv->clk_csr = SXGBE_CSR_100_150M; + else if (clk_rate <= SXGBE_CSR_F_250M) + priv->clk_csr = SXGBE_CSR_150_250M; + else if (clk_rate <= SXGBE_CSR_F_300M) + priv->clk_csr = SXGBE_CSR_250_300M; + else if (clk_rate <= SXGBE_CSR_F_350M) + priv->clk_csr = SXGBE_CSR_300_350M; + else if (clk_rate <= SXGBE_CSR_F_400M) + priv->clk_csr = SXGBE_CSR_350_400M; + else if (clk_rate <= SXGBE_CSR_F_500M) + priv->clk_csr = SXGBE_CSR_400_500M; +} + +/* minimum number of free TX descriptors required to wake up TX process */ +#define SXGBE_TX_THRESH(x) (x->dma_tx_size/4) + +static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize) +{ + return queue->dirty_tx + tx_qsize - queue->cur_tx - 1; +} + +/** + * sxgbe_adjust_link + * @dev: net device structure + * Description: it adjusts the link parameters. + */ +static void sxgbe_adjust_link(struct net_device *dev) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + u8 new_state = 0; + u8 speed = 0xff; + + if (!phydev) + return; + + /* SXGBE is not supporting auto-negotiation and + * half duplex mode. so, not handling duplex change + * in this function. only handling speed and link status + */ + if (phydev->link) { + if (phydev->speed != priv->speed) { + new_state = 1; + switch (phydev->speed) { + case SPEED_10000: + speed = SXGBE_SPEED_10G; + break; + case SPEED_2500: + speed = SXGBE_SPEED_2_5G; + break; + case SPEED_1000: + speed = SXGBE_SPEED_1G; + break; + default: + netif_err(priv, link, dev, + "Speed (%d) not supported\n", + phydev->speed); + } + + priv->speed = phydev->speed; + priv->hw->mac->set_speed(priv->ioaddr, speed); + } + + if (!priv->oldlink) { + new_state = 1; + priv->oldlink = 1; + } + } else if (priv->oldlink) { + new_state = 1; + priv->oldlink = 0; + priv->speed = SPEED_UNKNOWN; + } + + if (new_state & netif_msg_link(priv)) + phy_print_status(phydev); + + /* Alter the MAC settings for EEE */ + sxgbe_eee_adjust(priv); +} + +/** + * sxgbe_init_phy - PHY initialization + * @dev: net device structure + * Description: it initializes the driver's PHY state, and attaches the PHY + * to the mac driver. + * Return value: + * 0 on success + */ +static int sxgbe_init_phy(struct net_device *ndev) +{ + char phy_id_fmt[MII_BUS_ID_SIZE + 3]; + char bus_id[MII_BUS_ID_SIZE]; + struct phy_device *phydev; + struct sxgbe_priv_data *priv = netdev_priv(ndev); + int phy_iface = priv->plat->interface; + + /* assign default link status */ + priv->oldlink = 0; + priv->speed = SPEED_UNKNOWN; + priv->oldduplex = DUPLEX_UNKNOWN; + + if (priv->plat->phy_bus_name) + snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x", + priv->plat->phy_bus_name, priv->plat->bus_id); + else + snprintf(bus_id, MII_BUS_ID_SIZE, "sxgbe-%x", + priv->plat->bus_id); + + snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, + priv->plat->phy_addr); + netdev_dbg(ndev, "%s: trying to attach to %s\n", __func__, phy_id_fmt); + + phydev = phy_connect(ndev, phy_id_fmt, &sxgbe_adjust_link, phy_iface); + + if (IS_ERR(phydev)) { + netdev_err(ndev, "Could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + /* Stop Advertising 1000BASE Capability if interface is not GMII */ + if ((phy_iface == PHY_INTERFACE_MODE_MII) || + (phy_iface == PHY_INTERFACE_MODE_RMII)) + phydev->advertising &= ~(SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + if (phydev->phy_id == 0) { + phy_disconnect(phydev); + return -ENODEV; + } + + netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n", + __func__, phydev->phy_id, phydev->link); + + /* save phy device in private structure */ + priv->phydev = phydev; + + return 0; +} + +/** + * sxgbe_clear_descriptors: clear descriptors + * @priv: driver private structure + * Description: this function is called to clear the tx and rx descriptors + * in case of both basic and extended descriptors are used. + */ +static void sxgbe_clear_descriptors(struct sxgbe_priv_data *priv) +{ + int i, j; + unsigned int txsize = priv->dma_tx_size; + unsigned int rxsize = priv->dma_rx_size; + + /* Clear the Rx/Tx descriptors */ + for (j = 0; j < SXGBE_RX_QUEUES; j++) { + for (i = 0; i < rxsize; i++) + priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i], + priv->use_riwt, priv->mode, + (i == rxsize - 1)); + } + + for (j = 0; j < SXGBE_TX_QUEUES; j++) { + for (i = 0; i < txsize; i++) + priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]); + } +} + +static int sxgbe_init_rx_buffers(struct net_device *dev, + struct sxgbe_rx_norm_desc *p, int i, + unsigned int dma_buf_sz, + struct sxgbe_rx_queue *rx_ring) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + struct sk_buff *skb; + + skb = __netdev_alloc_skb_ip_align(dev, dma_buf_sz, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + rx_ring->rx_skbuff[i] = skb; + rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, + dma_buf_sz, DMA_FROM_DEVICE); + + if (dma_mapping_error(priv->device, rx_ring->rx_skbuff_dma[i])) { + netdev_err(dev, "%s: DMA mapping error\n", __func__); + dev_kfree_skb_any(skb); + return -EINVAL; + } + + p->rdes23.rx_rd_des23.buf2_addr = rx_ring->rx_skbuff_dma[i]; + + return 0; +} + +/** + * sxgbe_free_rx_buffers - free what sxgbe_init_rx_buffers() allocated + * @dev: net device structure + * @rx_ring: ring to be freed + * @rx_rsize: ring size + * Description: this function initializes the DMA RX descriptor + */ +static void sxgbe_free_rx_buffers(struct net_device *dev, + struct sxgbe_rx_norm_desc *p, int i, + unsigned int dma_buf_sz, + struct sxgbe_rx_queue *rx_ring) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + + kfree_skb(rx_ring->rx_skbuff[i]); + dma_unmap_single(priv->device, rx_ring->rx_skbuff_dma[i], + dma_buf_sz, DMA_FROM_DEVICE); +} + +/** + * init_tx_ring - init the TX descriptor ring + * @dev: net device structure + * @tx_ring: ring to be intialised + * @tx_rsize: ring size + * Description: this function initializes the DMA TX descriptor + */ +static int init_tx_ring(struct device *dev, u8 queue_no, + struct sxgbe_tx_queue *tx_ring, int tx_rsize) +{ + /* TX ring is not allcoated */ + if (!tx_ring) { + dev_err(dev, "No memory for TX queue of SXGBE\n"); + return -ENOMEM; + } + + /* allocate memory for TX descriptors */ + tx_ring->dma_tx = dma_zalloc_coherent(dev, + tx_rsize * sizeof(struct sxgbe_tx_norm_desc), + &tx_ring->dma_tx_phy, GFP_KERNEL); + if (!tx_ring->dma_tx) + return -ENOMEM; + + /* allocate memory for TX skbuff array */ + tx_ring->tx_skbuff_dma = devm_kcalloc(dev, tx_rsize, + sizeof(dma_addr_t), GFP_KERNEL); + if (!tx_ring->tx_skbuff_dma) + goto dmamem_err; + + tx_ring->tx_skbuff = devm_kcalloc(dev, tx_rsize, + sizeof(struct sk_buff *), GFP_KERNEL); + + if (!tx_ring->tx_skbuff) + goto dmamem_err; + + /* assign queue number */ + tx_ring->queue_no = queue_no; + + /* initialise counters */ + tx_ring->dirty_tx = 0; + tx_ring->cur_tx = 0; + + /* initialise TX queue lock */ + spin_lock_init(&tx_ring->tx_lock); + + return 0; + +dmamem_err: + dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc), + tx_ring->dma_tx, tx_ring->dma_tx_phy); + return -ENOMEM; +} + +/** + * free_rx_ring - free the RX descriptor ring + * @dev: net device structure + * @rx_ring: ring to be intialised + * @rx_rsize: ring size + * Description: this function initializes the DMA RX descriptor + */ +static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring, + int rx_rsize) +{ + dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc), + rx_ring->dma_rx, rx_ring->dma_rx_phy); + kfree(rx_ring->rx_skbuff_dma); + kfree(rx_ring->rx_skbuff); +} + +/** + * init_rx_ring - init the RX descriptor ring + * @dev: net device structure + * @rx_ring: ring to be intialised + * @rx_rsize: ring size + * Description: this function initializes the DMA RX descriptor + */ +static int init_rx_ring(struct net_device *dev, u8 queue_no, + struct sxgbe_rx_queue *rx_ring, int rx_rsize) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + int desc_index; + unsigned int bfsize = 0; + unsigned int ret = 0; + + /* Set the max buffer size according to the MTU. */ + bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8); + + netif_dbg(priv, probe, dev, "%s: bfsize %d\n", __func__, bfsize); + + /* RX ring is not allcoated */ + if (rx_ring == NULL) { + netdev_err(dev, "No memory for RX queue\n"); + return -ENOMEM; + } + + /* assign queue number */ + rx_ring->queue_no = queue_no; + + /* allocate memory for RX descriptors */ + rx_ring->dma_rx = dma_zalloc_coherent(priv->device, + rx_rsize * sizeof(struct sxgbe_rx_norm_desc), + &rx_ring->dma_rx_phy, GFP_KERNEL); + + if (rx_ring->dma_rx == NULL) + return -ENOMEM; + + /* allocate memory for RX skbuff array */ + rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize, + sizeof(dma_addr_t), GFP_KERNEL); + if (!rx_ring->rx_skbuff_dma) { + ret = -ENOMEM; + goto err_free_dma_rx; + } + + rx_ring->rx_skbuff = kmalloc_array(rx_rsize, + sizeof(struct sk_buff *), GFP_KERNEL); + if (!rx_ring->rx_skbuff) { + ret = -ENOMEM; + goto err_free_skbuff_dma; + } + + /* initialise the buffers */ + for (desc_index = 0; desc_index < rx_rsize; desc_index++) { + struct sxgbe_rx_norm_desc *p; + p = rx_ring->dma_rx + desc_index; + ret = sxgbe_init_rx_buffers(dev, p, desc_index, + bfsize, rx_ring); + if (ret) + goto err_free_rx_buffers; + } + + /* initialise counters */ + rx_ring->cur_rx = 0; + rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize); + priv->dma_buf_sz = bfsize; + + return 0; + +err_free_rx_buffers: + while (--desc_index >= 0) { + struct sxgbe_rx_norm_desc *p; + + p = rx_ring->dma_rx + desc_index; + sxgbe_free_rx_buffers(dev, p, desc_index, bfsize, rx_ring); + } + kfree(rx_ring->rx_skbuff); +err_free_skbuff_dma: + kfree(rx_ring->rx_skbuff_dma); +err_free_dma_rx: + dma_free_coherent(priv->device, + rx_rsize * sizeof(struct sxgbe_rx_norm_desc), + rx_ring->dma_rx, rx_ring->dma_rx_phy); + + return ret; +} +/** + * free_tx_ring - free the TX descriptor ring + * @dev: net device structure + * @tx_ring: ring to be intialised + * @tx_rsize: ring size + * Description: this function initializes the DMA TX descriptor + */ +static void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring, + int tx_rsize) +{ + dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc), + tx_ring->dma_tx, tx_ring->dma_tx_phy); +} + +/** + * init_dma_desc_rings - init the RX/TX descriptor rings + * @dev: net device structure + * Description: this function initializes the DMA RX/TX descriptors + * and allocates the socket buffers. It suppors the chained and ring + * modes. + */ +static int init_dma_desc_rings(struct net_device *netd) +{ + int queue_num, ret; + struct sxgbe_priv_data *priv = netdev_priv(netd); + int tx_rsize = priv->dma_tx_size; + int rx_rsize = priv->dma_rx_size; + + /* Allocate memory for queue structures and TX descs */ + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { + ret = init_tx_ring(priv->device, queue_num, + priv->txq[queue_num], tx_rsize); + if (ret) { + dev_err(&netd->dev, "TX DMA ring allocation failed!\n"); + goto txalloc_err; + } + + /* save private pointer in each ring this + * pointer is needed during cleaing TX queue + */ + priv->txq[queue_num]->priv_ptr = priv; + } + + /* Allocate memory for queue structures and RX descs */ + SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { + ret = init_rx_ring(netd, queue_num, + priv->rxq[queue_num], rx_rsize); + if (ret) { + netdev_err(netd, "RX DMA ring allocation failed!!\n"); + goto rxalloc_err; + } + + /* save private pointer in each ring this + * pointer is needed during cleaing TX queue + */ + priv->rxq[queue_num]->priv_ptr = priv; + } + + sxgbe_clear_descriptors(priv); + + return 0; + +txalloc_err: + while (queue_num--) + free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize); + return ret; + +rxalloc_err: + while (queue_num--) + free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize); + return ret; +} + +static void tx_free_ring_skbufs(struct sxgbe_tx_queue *txqueue) +{ + int dma_desc; + struct sxgbe_priv_data *priv = txqueue->priv_ptr; + int tx_rsize = priv->dma_tx_size; + + for (dma_desc = 0; dma_desc < tx_rsize; dma_desc++) { + struct sxgbe_tx_norm_desc *tdesc = txqueue->dma_tx + dma_desc; + + if (txqueue->tx_skbuff_dma[dma_desc]) + dma_unmap_single(priv->device, + txqueue->tx_skbuff_dma[dma_desc], + priv->hw->desc->get_tx_len(tdesc), + DMA_TO_DEVICE); + + dev_kfree_skb_any(txqueue->tx_skbuff[dma_desc]); + txqueue->tx_skbuff[dma_desc] = NULL; + txqueue->tx_skbuff_dma[dma_desc] = 0; + } +} + + +static void dma_free_tx_skbufs(struct sxgbe_priv_data *priv) +{ + int queue_num; + + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { + struct sxgbe_tx_queue *tqueue = priv->txq[queue_num]; + tx_free_ring_skbufs(tqueue); + } +} + +static void free_dma_desc_resources(struct sxgbe_priv_data *priv) +{ + int queue_num; + int tx_rsize = priv->dma_tx_size; + int rx_rsize = priv->dma_rx_size; + + /* Release the DMA TX buffers */ + dma_free_tx_skbufs(priv); + + /* Release the TX ring memory also */ + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { + free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize); + } + + /* Release the RX ring memory also */ + SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { + free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize); + } +} + +static int txring_mem_alloc(struct sxgbe_priv_data *priv) +{ + int queue_num; + + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { + priv->txq[queue_num] = devm_kmalloc(priv->device, + sizeof(struct sxgbe_tx_queue), GFP_KERNEL); + if (!priv->txq[queue_num]) + return -ENOMEM; + } + + return 0; +} + +static int rxring_mem_alloc(struct sxgbe_priv_data *priv) +{ + int queue_num; + + SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { + priv->rxq[queue_num] = devm_kmalloc(priv->device, + sizeof(struct sxgbe_rx_queue), GFP_KERNEL); + if (!priv->rxq[queue_num]) + return -ENOMEM; + } + + return 0; +} + +/** + * sxgbe_mtl_operation_mode - HW MTL operation mode + * @priv: driver private structure + * Description: it sets the MTL operation mode: tx/rx MTL thresholds + * or Store-And-Forward capability. + */ +static void sxgbe_mtl_operation_mode(struct sxgbe_priv_data *priv) +{ + int queue_num; + + /* TX/RX threshold control */ + if (likely(priv->plat->force_sf_dma_mode)) { + /* set TC mode for TX QUEUES */ + SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num) + priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num, + SXGBE_MTL_SFMODE); + priv->tx_tc = SXGBE_MTL_SFMODE; + + /* set TC mode for RX QUEUES */ + SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num) + priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num, + SXGBE_MTL_SFMODE); + priv->rx_tc = SXGBE_MTL_SFMODE; + } else if (unlikely(priv->plat->force_thresh_dma_mode)) { + /* set TC mode for TX QUEUES */ + SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num) + priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num, + priv->tx_tc); + /* set TC mode for RX QUEUES */ + SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num) + priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num, + priv->rx_tc); + } else { + pr_err("ERROR: %s: Invalid TX threshold mode\n", __func__); + } +} + +/** + * sxgbe_tx_queue_clean: + * @priv: driver private structure + * Description: it reclaims resources after transmission completes. + */ +static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue) +{ + struct sxgbe_priv_data *priv = tqueue->priv_ptr; + unsigned int tx_rsize = priv->dma_tx_size; + struct netdev_queue *dev_txq; + u8 queue_no = tqueue->queue_no; + + dev_txq = netdev_get_tx_queue(priv->dev, queue_no); + + spin_lock(&tqueue->tx_lock); + + priv->xstats.tx_clean++; + while (tqueue->dirty_tx != tqueue->cur_tx) { + unsigned int entry = tqueue->dirty_tx % tx_rsize; + struct sk_buff *skb = tqueue->tx_skbuff[entry]; + struct sxgbe_tx_norm_desc *p; + + p = tqueue->dma_tx + entry; + + /* Check if the descriptor is owned by the DMA. */ + if (priv->hw->desc->get_tx_owner(p)) + break; + + if (netif_msg_tx_done(priv)) + pr_debug("%s: curr %d, dirty %d\n", + __func__, tqueue->cur_tx, tqueue->dirty_tx); + + if (likely(tqueue->tx_skbuff_dma[entry])) { + dma_unmap_single(priv->device, + tqueue->tx_skbuff_dma[entry], + priv->hw->desc->get_tx_len(p), + DMA_TO_DEVICE); + tqueue->tx_skbuff_dma[entry] = 0; + } + + if (likely(skb)) { + dev_kfree_skb(skb); + tqueue->tx_skbuff[entry] = NULL; + } + + priv->hw->desc->release_tx_desc(p); + + tqueue->dirty_tx++; + } + + /* wake up queue */ + if (unlikely(netif_tx_queue_stopped(dev_txq) && + sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) { + netif_tx_lock(priv->dev); + if (netif_tx_queue_stopped(dev_txq) && + sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv)) { + if (netif_msg_tx_done(priv)) + pr_debug("%s: restart transmit\n", __func__); + netif_tx_wake_queue(dev_txq); + } + netif_tx_unlock(priv->dev); + } + + spin_unlock(&tqueue->tx_lock); +} + +/** + * sxgbe_tx_clean: + * @priv: driver private structure + * Description: it reclaims resources after transmission completes. + */ +static void sxgbe_tx_all_clean(struct sxgbe_priv_data * const priv) +{ + u8 queue_num; + + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { + struct sxgbe_tx_queue *tqueue = priv->txq[queue_num]; + + sxgbe_tx_queue_clean(tqueue); + } + + if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { + sxgbe_enable_eee_mode(priv); + mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer)); + } +} + +/** + * sxgbe_restart_tx_queue: irq tx error mng function + * @priv: driver private structure + * Description: it cleans the descriptors and restarts the transmission + * in case of errors. + */ +static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num) +{ + struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num]; + struct netdev_queue *dev_txq = netdev_get_tx_queue(priv->dev, + queue_num); + + /* stop the queue */ + netif_tx_stop_queue(dev_txq); + + /* stop the tx dma */ + priv->hw->dma->stop_tx_queue(priv->ioaddr, queue_num); + + /* free the skbuffs of the ring */ + tx_free_ring_skbufs(tx_ring); + + /* initialise counters */ + tx_ring->cur_tx = 0; + tx_ring->dirty_tx = 0; + + /* start the tx dma */ + priv->hw->dma->start_tx_queue(priv->ioaddr, queue_num); + + priv->dev->stats.tx_errors++; + + /* wakeup the queue */ + netif_tx_wake_queue(dev_txq); +} + +/** + * sxgbe_reset_all_tx_queues: irq tx error mng function + * @priv: driver private structure + * Description: it cleans all the descriptors and + * restarts the transmission on all queues in case of errors. + */ +static void sxgbe_reset_all_tx_queues(struct sxgbe_priv_data *priv) +{ + int queue_num; + + /* On TX timeout of net device, resetting of all queues + * may not be proper way, revisit this later if needed + */ + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) + sxgbe_restart_tx_queue(priv, queue_num); +} + +/** + * sxgbe_get_hw_features: get XMAC capabilities from the HW cap. register. + * @priv: driver private structure + * Description: + * new GMAC chip generations have a new register to indicate the + * presence of the optional feature/functions. + * This can be also used to override the value passed through the + * platform and necessary for old MAC10/100 and GMAC chips. + */ +static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv) +{ + int rval = 0; + struct sxgbe_hw_features *features = &priv->hw_cap; + + /* Read First Capability Register CAP[0] */ + rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 0); + if (rval) { + features->pmt_remote_wake_up = + SXGBE_HW_FEAT_PMT_TEMOTE_WOP(rval); + features->pmt_magic_frame = SXGBE_HW_FEAT_PMT_MAGIC_PKT(rval); + features->atime_stamp = SXGBE_HW_FEAT_IEEE1500_2008(rval); + features->tx_csum_offload = + SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(rval); + features->rx_csum_offload = + SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(rval); + features->multi_macaddr = SXGBE_HW_FEAT_MACADDR_COUNT(rval); + features->tstamp_srcselect = SXGBE_HW_FEAT_TSTMAP_SRC(rval); + features->sa_vlan_insert = SXGBE_HW_FEAT_SRCADDR_VLAN(rval); + features->eee = SXGBE_HW_FEAT_EEE(rval); + } + + /* Read First Capability Register CAP[1] */ + rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 1); + if (rval) { + features->rxfifo_size = SXGBE_HW_FEAT_RX_FIFO_SIZE(rval); + features->txfifo_size = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval); + features->atstmap_hword = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval); + features->dcb_enable = SXGBE_HW_FEAT_DCB(rval); + features->splithead_enable = SXGBE_HW_FEAT_SPLIT_HDR(rval); + features->tcpseg_offload = SXGBE_HW_FEAT_TSO(rval); + features->debug_mem = SXGBE_HW_FEAT_DEBUG_MEM_IFACE(rval); + features->rss_enable = SXGBE_HW_FEAT_RSS(rval); + features->hash_tsize = SXGBE_HW_FEAT_HASH_TABLE_SIZE(rval); + features->l3l4_filer_size = SXGBE_HW_FEAT_L3L4_FILTER_NUM(rval); + } + + /* Read First Capability Register CAP[2] */ + rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 2); + if (rval) { + features->rx_mtl_queues = SXGBE_HW_FEAT_RX_MTL_QUEUES(rval); + features->tx_mtl_queues = SXGBE_HW_FEAT_TX_MTL_QUEUES(rval); + features->rx_dma_channels = SXGBE_HW_FEAT_RX_DMA_CHANNELS(rval); + features->tx_dma_channels = SXGBE_HW_FEAT_TX_DMA_CHANNELS(rval); + features->pps_output_count = SXGBE_HW_FEAT_PPS_OUTPUTS(rval); + features->aux_input_count = SXGBE_HW_FEAT_AUX_SNAPSHOTS(rval); + } + + return rval; +} + +/** + * sxgbe_check_ether_addr: check if the MAC addr is valid + * @priv: driver private structure + * Description: + * it is to verify if the MAC address is valid, in case of failures it + * generates a random MAC address + */ +static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv) +{ + if (!is_valid_ether_addr(priv->dev->dev_addr)) { + priv->hw->mac->get_umac_addr((void __iomem *) + priv->ioaddr, + priv->dev->dev_addr, 0); + if (!is_valid_ether_addr(priv->dev->dev_addr)) + eth_hw_addr_random(priv->dev); + } + dev_info(priv->device, "device MAC address %pM\n", + priv->dev->dev_addr); +} + +/** + * sxgbe_init_dma_engine: DMA init. + * @priv: driver private structure + * Description: + * It inits the DMA invoking the specific SXGBE callback. + * Some DMA parameters can be passed from the platform; + * in case of these are not passed a default is kept for the MAC or GMAC. + */ +static int sxgbe_init_dma_engine(struct sxgbe_priv_data *priv) +{ + int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_map = 0; + int queue_num; + + if (priv->plat->dma_cfg) { + pbl = priv->plat->dma_cfg->pbl; + fixed_burst = priv->plat->dma_cfg->fixed_burst; + burst_map = priv->plat->dma_cfg->burst_map; + } + + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) + priv->hw->dma->cha_init(priv->ioaddr, queue_num, + fixed_burst, pbl, + (priv->txq[queue_num])->dma_tx_phy, + (priv->rxq[queue_num])->dma_rx_phy, + priv->dma_tx_size, priv->dma_rx_size); + + return priv->hw->dma->init(priv->ioaddr, fixed_burst, burst_map); +} + +/** + * sxgbe_init_mtl_engine: MTL init. + * @priv: driver private structure + * Description: + * It inits the MTL invoking the specific SXGBE callback. + */ +static void sxgbe_init_mtl_engine(struct sxgbe_priv_data *priv) +{ + int queue_num; + + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { + priv->hw->mtl->mtl_set_txfifosize(priv->ioaddr, queue_num, + priv->hw_cap.tx_mtl_qsize); + priv->hw->mtl->mtl_enable_txqueue(priv->ioaddr, queue_num); + } +} + +/** + * sxgbe_disable_mtl_engine: MTL disable. + * @priv: driver private structure + * Description: + * It disables the MTL queues by invoking the specific SXGBE callback. + */ +static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv) +{ + int queue_num; + + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) + priv->hw->mtl->mtl_disable_txqueue(priv->ioaddr, queue_num); +} + + +/** + * sxgbe_tx_timer: mitigation sw timer for tx. + * @data: data pointer + * Description: + * This is the timer handler to directly invoke the sxgbe_tx_clean. + */ +static void sxgbe_tx_timer(unsigned long data) +{ + struct sxgbe_tx_queue *p = (struct sxgbe_tx_queue *)data; + sxgbe_tx_queue_clean(p); +} + +/** + * sxgbe_init_tx_coalesce: init tx mitigation options. + * @priv: driver private structure + * Description: + * This inits the transmit coalesce parameters: i.e. timer rate, + * timer handler and default threshold used for enabling the + * interrupt on completion bit. + */ +static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv) +{ + u8 queue_num; + + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { + struct sxgbe_tx_queue *p = priv->txq[queue_num]; + p->tx_coal_frames = SXGBE_TX_FRAMES; + p->tx_coal_timer = SXGBE_COAL_TX_TIMER; + setup_timer(&p->txtimer, sxgbe_tx_timer, + (unsigned long)&priv->txq[queue_num]); + p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer); + add_timer(&p->txtimer); + } +} + +static void sxgbe_tx_del_timer(struct sxgbe_priv_data *priv) +{ + u8 queue_num; + + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { + struct sxgbe_tx_queue *p = priv->txq[queue_num]; + del_timer_sync(&p->txtimer); + } +} + +/** + * sxgbe_open - open entry point of the driver + * @dev : pointer to the device structure. + * Description: + * This function is the open entry point of the driver. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int sxgbe_open(struct net_device *dev) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + int ret, queue_num; + + clk_prepare_enable(priv->sxgbe_clk); + + sxgbe_check_ether_addr(priv); + + /* Init the phy */ + ret = sxgbe_init_phy(dev); + if (ret) { + netdev_err(dev, "%s: Cannot attach to PHY (error: %d)\n", + __func__, ret); + goto phy_error; + } + + /* Create and initialize the TX/RX descriptors chains. */ + priv->dma_tx_size = SXGBE_ALIGN(DMA_TX_SIZE); + priv->dma_rx_size = SXGBE_ALIGN(DMA_RX_SIZE); + priv->dma_buf_sz = SXGBE_ALIGN(DMA_BUFFER_SIZE); + priv->tx_tc = TC_DEFAULT; + priv->rx_tc = TC_DEFAULT; + init_dma_desc_rings(dev); + + /* DMA initialization and SW reset */ + ret = sxgbe_init_dma_engine(priv); + if (ret < 0) { + netdev_err(dev, "%s: DMA initialization failed\n", __func__); + goto init_error; + } + + /* MTL initialization */ + sxgbe_init_mtl_engine(priv); + + /* Copy the MAC addr into the HW */ + priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0); + + /* Initialize the MAC Core */ + priv->hw->mac->core_init(priv->ioaddr); + SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { + priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num); + } + + /* Request the IRQ lines */ + ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + netdev_err(dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n", + __func__, priv->irq, ret); + goto init_error; + } + + /* If the LPI irq is different from the mac irq + * register a dedicated handler + */ + if (priv->lpi_irq != dev->irq) { + ret = devm_request_irq(priv->device, priv->lpi_irq, + sxgbe_common_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + netdev_err(dev, "%s: ERROR: allocating the LPI IRQ %d (%d)\n", + __func__, priv->lpi_irq, ret); + goto init_error; + } + } + + /* Request TX DMA irq lines */ + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { + ret = devm_request_irq(priv->device, + (priv->txq[queue_num])->irq_no, + sxgbe_tx_interrupt, 0, + dev->name, priv->txq[queue_num]); + if (unlikely(ret < 0)) { + netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n", + __func__, priv->irq, ret); + goto init_error; + } + } + + /* Request RX DMA irq lines */ + SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { + ret = devm_request_irq(priv->device, + (priv->rxq[queue_num])->irq_no, + sxgbe_rx_interrupt, 0, + dev->name, priv->rxq[queue_num]); + if (unlikely(ret < 0)) { + netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n", + __func__, priv->irq, ret); + goto init_error; + } + } + + /* Enable the MAC Rx/Tx */ + priv->hw->mac->enable_tx(priv->ioaddr, true); + priv->hw->mac->enable_rx(priv->ioaddr, true); + + /* Set the HW DMA mode and the COE */ + sxgbe_mtl_operation_mode(priv); + + /* Extra statistics */ + memset(&priv->xstats, 0, sizeof(struct sxgbe_extra_stats)); + + priv->xstats.tx_threshold = priv->tx_tc; + priv->xstats.rx_threshold = priv->rx_tc; + + /* Start the ball rolling... */ + netdev_dbg(dev, "DMA RX/TX processes started...\n"); + priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES); + priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES); + + if (priv->phydev) + phy_start(priv->phydev); + + /* initialise TX coalesce parameters */ + sxgbe_tx_init_coalesce(priv); + + if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { + priv->rx_riwt = SXGBE_MAX_DMA_RIWT; + priv->hw->dma->rx_watchdog(priv->ioaddr, SXGBE_MAX_DMA_RIWT); + } + + priv->tx_lpi_timer = SXGBE_DEFAULT_LPI_TIMER; + priv->eee_enabled = sxgbe_eee_init(priv); + + napi_enable(&priv->napi); + netif_start_queue(dev); + + return 0; + +init_error: + free_dma_desc_resources(priv); + if (priv->phydev) + phy_disconnect(priv->phydev); +phy_error: + clk_disable_unprepare(priv->sxgbe_clk); + + return ret; +} + +/** + * sxgbe_release - close entry point of the driver + * @dev : device pointer. + * Description: + * This is the stop entry point of the driver. + */ +static int sxgbe_release(struct net_device *dev) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + + if (priv->eee_enabled) + del_timer_sync(&priv->eee_ctrl_timer); + + /* Stop and disconnect the PHY */ + if (priv->phydev) { + phy_stop(priv->phydev); + phy_disconnect(priv->phydev); + priv->phydev = NULL; + } + + netif_tx_stop_all_queues(dev); + + napi_disable(&priv->napi); + + /* delete TX timers */ + sxgbe_tx_del_timer(priv); + + /* Stop TX/RX DMA and clear the descriptors */ + priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES); + priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES); + + /* disable MTL queue */ + sxgbe_disable_mtl_engine(priv); + + /* Release and free the Rx/Tx resources */ + free_dma_desc_resources(priv); + + /* Disable the MAC Rx/Tx */ + priv->hw->mac->enable_tx(priv->ioaddr, false); + priv->hw->mac->enable_rx(priv->ioaddr, false); + + clk_disable_unprepare(priv->sxgbe_clk); + + return 0; +} +/* Prepare first Tx descriptor for doing TSO operation */ +static void sxgbe_tso_prepare(struct sxgbe_priv_data *priv, + struct sxgbe_tx_norm_desc *first_desc, + struct sk_buff *skb) +{ + unsigned int total_hdr_len, tcp_hdr_len; + + /* Write first Tx descriptor with appropriate value */ + tcp_hdr_len = tcp_hdrlen(skb); + total_hdr_len = skb_transport_offset(skb) + tcp_hdr_len; + + first_desc->tdes01 = dma_map_single(priv->device, skb->data, + total_hdr_len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, first_desc->tdes01)) + pr_err("%s: TX dma mapping failed!!\n", __func__); + + first_desc->tdes23.tx_rd_des23.first_desc = 1; + priv->hw->desc->tx_desc_enable_tse(first_desc, 1, total_hdr_len, + tcp_hdr_len, + skb->len - total_hdr_len); +} + +/** + * sxgbe_xmit: Tx entry point of the driver + * @skb : the socket buffer + * @dev : device pointer + * Description : this is the tx entry point of the driver. + * It programs the chain or the ring and supports oversized frames + * and SG feature. + */ +static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev) +{ + unsigned int entry, frag_num; + int cksum_flag = 0; + struct netdev_queue *dev_txq; + unsigned txq_index = skb_get_queue_mapping(skb); + struct sxgbe_priv_data *priv = netdev_priv(dev); + unsigned int tx_rsize = priv->dma_tx_size; + struct sxgbe_tx_queue *tqueue = priv->txq[txq_index]; + struct sxgbe_tx_norm_desc *tx_desc, *first_desc; + struct sxgbe_tx_ctxt_desc *ctxt_desc = NULL; + int nr_frags = skb_shinfo(skb)->nr_frags; + int no_pagedlen = skb_headlen(skb); + int is_jumbo = 0; + u16 cur_mss = skb_shinfo(skb)->gso_size; + u32 ctxt_desc_req = 0; + + /* get the TX queue handle */ + dev_txq = netdev_get_tx_queue(dev, txq_index); + + if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss)) + ctxt_desc_req = 1; + + if (unlikely(skb_vlan_tag_present(skb) || + ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + tqueue->hwts_tx_en))) + ctxt_desc_req = 1; + + /* get the spinlock */ + spin_lock(&tqueue->tx_lock); + + if (priv->tx_path_in_lpi_mode) + sxgbe_disable_eee_mode(priv); + + if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) < nr_frags + 1)) { + if (!netif_tx_queue_stopped(dev_txq)) { + netif_tx_stop_queue(dev_txq); + netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n", + __func__, txq_index); + } + /* release the spin lock in case of BUSY */ + spin_unlock(&tqueue->tx_lock); + return NETDEV_TX_BUSY; + } + + entry = tqueue->cur_tx % tx_rsize; + tx_desc = tqueue->dma_tx + entry; + + first_desc = tx_desc; + if (ctxt_desc_req) + ctxt_desc = (struct sxgbe_tx_ctxt_desc *)first_desc; + + /* save the skb address */ + tqueue->tx_skbuff[entry] = skb; + + if (!is_jumbo) { + if (likely(skb_is_gso(skb))) { + /* TSO support */ + if (unlikely(tqueue->prev_mss != cur_mss)) { + priv->hw->desc->tx_ctxt_desc_set_mss( + ctxt_desc, cur_mss); + priv->hw->desc->tx_ctxt_desc_set_tcmssv( + ctxt_desc); + priv->hw->desc->tx_ctxt_desc_reset_ostc( + ctxt_desc); + priv->hw->desc->tx_ctxt_desc_set_ctxt( + ctxt_desc); + priv->hw->desc->tx_ctxt_desc_set_owner( + ctxt_desc); + + entry = (++tqueue->cur_tx) % tx_rsize; + first_desc = tqueue->dma_tx + entry; + + tqueue->prev_mss = cur_mss; + } + sxgbe_tso_prepare(priv, first_desc, skb); + } else { + tx_desc->tdes01 = dma_map_single(priv->device, + skb->data, no_pagedlen, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, tx_desc->tdes01)) + netdev_err(dev, "%s: TX dma mapping failed!!\n", + __func__); + + priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen, + no_pagedlen, cksum_flag); + } + } + + for (frag_num = 0; frag_num < nr_frags; frag_num++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num]; + int len = skb_frag_size(frag); + + entry = (++tqueue->cur_tx) % tx_rsize; + tx_desc = tqueue->dma_tx + entry; + tx_desc->tdes01 = skb_frag_dma_map(priv->device, frag, 0, len, + DMA_TO_DEVICE); + + tqueue->tx_skbuff_dma[entry] = tx_desc->tdes01; + tqueue->tx_skbuff[entry] = NULL; + + /* prepare the descriptor */ + priv->hw->desc->prepare_tx_desc(tx_desc, 0, len, + len, cksum_flag); + /* memory barrier to flush descriptor */ + wmb(); + + /* set the owner */ + priv->hw->desc->set_tx_owner(tx_desc); + } + + /* close the descriptors */ + priv->hw->desc->close_tx_desc(tx_desc); + + /* memory barrier to flush descriptor */ + wmb(); + + tqueue->tx_count_frames += nr_frags + 1; + if (tqueue->tx_count_frames > tqueue->tx_coal_frames) { + priv->hw->desc->clear_tx_ic(tx_desc); + priv->xstats.tx_reset_ic_bit++; + mod_timer(&tqueue->txtimer, + SXGBE_COAL_TIMER(tqueue->tx_coal_timer)); + } else { + tqueue->tx_count_frames = 0; + } + + /* set owner for first desc */ + priv->hw->desc->set_tx_owner(first_desc); + + /* memory barrier to flush descriptor */ + wmb(); + + tqueue->cur_tx++; + + /* display current ring */ + netif_dbg(priv, pktdata, dev, "%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d\n", + __func__, tqueue->cur_tx % tx_rsize, + tqueue->dirty_tx % tx_rsize, entry, + first_desc, nr_frags); + + if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) <= (MAX_SKB_FRAGS + 1))) { + netif_dbg(priv, hw, dev, "%s: stop transmitted packets\n", + __func__); + netif_tx_stop_queue(dev_txq); + } + + dev->stats.tx_bytes += skb->len; + + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + tqueue->hwts_tx_en)) { + /* declare that device is doing timestamping */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + priv->hw->desc->tx_enable_tstamp(first_desc); + } + + if (!tqueue->hwts_tx_en) + skb_tx_timestamp(skb); + + priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index); + + spin_unlock(&tqueue->tx_lock); + + return NETDEV_TX_OK; +} + +/** + * sxgbe_rx_refill: refill used skb preallocated buffers + * @priv: driver private structure + * Description : this is to reallocate the skb for the reception process + * that is based on zero-copy. + */ +static void sxgbe_rx_refill(struct sxgbe_priv_data *priv) +{ + unsigned int rxsize = priv->dma_rx_size; + int bfsize = priv->dma_buf_sz; + u8 qnum = priv->cur_rx_qnum; + + for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0; + priv->rxq[qnum]->dirty_rx++) { + unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize; + struct sxgbe_rx_norm_desc *p; + + p = priv->rxq[qnum]->dma_rx + entry; + + if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) { + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); + + if (unlikely(skb == NULL)) + break; + + priv->rxq[qnum]->rx_skbuff[entry] = skb; + priv->rxq[qnum]->rx_skbuff_dma[entry] = + dma_map_single(priv->device, skb->data, bfsize, + DMA_FROM_DEVICE); + + p->rdes23.rx_rd_des23.buf2_addr = + priv->rxq[qnum]->rx_skbuff_dma[entry]; + } + + /* Added memory barrier for RX descriptor modification */ + wmb(); + priv->hw->desc->set_rx_owner(p); + priv->hw->desc->set_rx_int_on_com(p); + /* Added memory barrier for RX descriptor modification */ + wmb(); + } +} + +/** + * sxgbe_rx: receive the frames from the remote host + * @priv: driver private structure + * @limit: napi bugget. + * Description : this the function called by the napi poll method. + * It gets all the frames inside the ring. + */ +static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit) +{ + u8 qnum = priv->cur_rx_qnum; + unsigned int rxsize = priv->dma_rx_size; + unsigned int entry = priv->rxq[qnum]->cur_rx; + unsigned int next_entry = 0; + unsigned int count = 0; + int checksum; + int status; + + while (count < limit) { + struct sxgbe_rx_norm_desc *p; + struct sk_buff *skb; + int frame_len; + + p = priv->rxq[qnum]->dma_rx + entry; + + if (priv->hw->desc->get_rx_owner(p)) + break; + + count++; + + next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize; + prefetch(priv->rxq[qnum]->dma_rx + next_entry); + + /* Read the status of the incoming frame and also get checksum + * value based on whether it is enabled in SXGBE hardware or + * not. + */ + status = priv->hw->desc->rx_wbstatus(p, &priv->xstats, + &checksum); + if (unlikely(status < 0)) { + entry = next_entry; + continue; + } + if (unlikely(!priv->rxcsum_insertion)) + checksum = CHECKSUM_NONE; + + skb = priv->rxq[qnum]->rx_skbuff[entry]; + + if (unlikely(!skb)) + netdev_err(priv->dev, "rx descriptor is not consistent\n"); + + prefetch(skb->data - NET_IP_ALIGN); + priv->rxq[qnum]->rx_skbuff[entry] = NULL; + + frame_len = priv->hw->desc->get_rx_frame_len(p); + + skb_put(skb, frame_len); + + skb->ip_summed = checksum; + if (checksum == CHECKSUM_NONE) + netif_receive_skb(skb); + else + napi_gro_receive(&priv->napi, skb); + + entry = next_entry; + } + + sxgbe_rx_refill(priv); + + return count; +} + +/** + * sxgbe_poll - sxgbe poll method (NAPI) + * @napi : pointer to the napi structure. + * @budget : maximum number of packets that the current CPU can receive from + * all interfaces. + * Description : + * To look at the incoming frames and clear the tx resources. + */ +static int sxgbe_poll(struct napi_struct *napi, int budget) +{ + struct sxgbe_priv_data *priv = container_of(napi, + struct sxgbe_priv_data, napi); + int work_done = 0; + u8 qnum = priv->cur_rx_qnum; + + priv->xstats.napi_poll++; + /* first, clean the tx queues */ + sxgbe_tx_all_clean(priv); + + work_done = sxgbe_rx(priv, budget); + if (work_done < budget) { + napi_complete(napi); + priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum); + } + + return work_done; +} + +/** + * sxgbe_tx_timeout + * @dev : Pointer to net device structure + * Description: this function is called when a packet transmission fails to + * complete within a reasonable time. The driver will mark the error in the + * netdev structure and arrange for the device to be reset to a sane state + * in order to transmit a new packet. + */ +static void sxgbe_tx_timeout(struct net_device *dev) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + + sxgbe_reset_all_tx_queues(priv); +} + +/** + * sxgbe_common_interrupt - main ISR + * @irq: interrupt number. + * @dev_id: to pass the net device pointer. + * Description: this is the main driver interrupt service routine. + * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI + * interrupts. + */ +static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id) +{ + struct net_device *netdev = (struct net_device *)dev_id; + struct sxgbe_priv_data *priv = netdev_priv(netdev); + int status; + + status = priv->hw->mac->host_irq_status(priv->ioaddr, &priv->xstats); + /* For LPI we need to save the tx status */ + if (status & TX_ENTRY_LPI_MODE) { + priv->xstats.tx_lpi_entry_n++; + priv->tx_path_in_lpi_mode = true; + } + if (status & TX_EXIT_LPI_MODE) { + priv->xstats.tx_lpi_exit_n++; + priv->tx_path_in_lpi_mode = false; + } + if (status & RX_ENTRY_LPI_MODE) + priv->xstats.rx_lpi_entry_n++; + if (status & RX_EXIT_LPI_MODE) + priv->xstats.rx_lpi_exit_n++; + + return IRQ_HANDLED; +} + +/** + * sxgbe_tx_interrupt - TX DMA ISR + * @irq: interrupt number. + * @dev_id: to pass the net device pointer. + * Description: this is the tx dma interrupt service routine. + */ +static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id) +{ + int status; + struct sxgbe_tx_queue *txq = (struct sxgbe_tx_queue *)dev_id; + struct sxgbe_priv_data *priv = txq->priv_ptr; + + /* get the channel status */ + status = priv->hw->dma->tx_dma_int_status(priv->ioaddr, txq->queue_no, + &priv->xstats); + /* check for normal path */ + if (likely((status & handle_tx))) + napi_schedule(&priv->napi); + + /* check for unrecoverable error */ + if (unlikely((status & tx_hard_error))) + sxgbe_restart_tx_queue(priv, txq->queue_no); + + /* check for TC configuration change */ + if (unlikely((status & tx_bump_tc) && + (priv->tx_tc != SXGBE_MTL_SFMODE) && + (priv->tx_tc < 512))) { + /* step of TX TC is 32 till 128, otherwise 64 */ + priv->tx_tc += (priv->tx_tc < 128) ? 32 : 64; + priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, + txq->queue_no, priv->tx_tc); + priv->xstats.tx_threshold = priv->tx_tc; + } + + return IRQ_HANDLED; +} + +/** + * sxgbe_rx_interrupt - RX DMA ISR + * @irq: interrupt number. + * @dev_id: to pass the net device pointer. + * Description: this is the rx dma interrupt service routine. + */ +static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id) +{ + int status; + struct sxgbe_rx_queue *rxq = (struct sxgbe_rx_queue *)dev_id; + struct sxgbe_priv_data *priv = rxq->priv_ptr; + + /* get the channel status */ + status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no, + &priv->xstats); + + if (likely((status & handle_rx) && (napi_schedule_prep(&priv->napi)))) { + priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no); + __napi_schedule(&priv->napi); + } + + /* check for TC configuration change */ + if (unlikely((status & rx_bump_tc) && + (priv->rx_tc != SXGBE_MTL_SFMODE) && + (priv->rx_tc < 128))) { + /* step of TC is 32 */ + priv->rx_tc += 32; + priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, + rxq->queue_no, priv->rx_tc); + priv->xstats.rx_threshold = priv->rx_tc; + } + + return IRQ_HANDLED; +} + +static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi) +{ + u64 val = readl(ioaddr + reg_lo); + + val |= ((u64)readl(ioaddr + reg_hi)) << 32; + + return val; +} + + +/* sxgbe_get_stats64 - entry point to see statistical information of device + * @dev : device pointer. + * @stats : pointer to hold all the statistical information of device. + * Description: + * This function is a driver entry point whenever ifconfig command gets + * executed to see device statistics. Statistics are number of + * bytes sent or received, errors occurred etc. + * Return value: + * This function returns various statistical information of device. + */ +static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + void __iomem *ioaddr = priv->ioaddr; + u64 count; + + spin_lock(&priv->stats_lock); + /* Freeze the counter registers before reading value otherwise it may + * get updated by hardware while we are reading them + */ + writel(SXGBE_MMC_CTRL_CNT_FRZ, ioaddr + SXGBE_MMC_CTL_REG); + + stats->rx_bytes = sxgbe_get_stat64(ioaddr, + SXGBE_MMC_RXOCTETLO_GCNT_REG, + SXGBE_MMC_RXOCTETHI_GCNT_REG); + + stats->rx_packets = sxgbe_get_stat64(ioaddr, + SXGBE_MMC_RXFRAMELO_GBCNT_REG, + SXGBE_MMC_RXFRAMEHI_GBCNT_REG); + + stats->multicast = sxgbe_get_stat64(ioaddr, + SXGBE_MMC_RXMULTILO_GCNT_REG, + SXGBE_MMC_RXMULTIHI_GCNT_REG); + + stats->rx_crc_errors = sxgbe_get_stat64(ioaddr, + SXGBE_MMC_RXCRCERRLO_REG, + SXGBE_MMC_RXCRCERRHI_REG); + + stats->rx_length_errors = sxgbe_get_stat64(ioaddr, + SXGBE_MMC_RXLENERRLO_REG, + SXGBE_MMC_RXLENERRHI_REG); + + stats->rx_missed_errors = sxgbe_get_stat64(ioaddr, + SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG, + SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG); + + stats->tx_bytes = sxgbe_get_stat64(ioaddr, + SXGBE_MMC_TXOCTETLO_GCNT_REG, + SXGBE_MMC_TXOCTETHI_GCNT_REG); + + count = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GBCNT_REG, + SXGBE_MMC_TXFRAMEHI_GBCNT_REG); + + stats->tx_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GCNT_REG, + SXGBE_MMC_TXFRAMEHI_GCNT_REG); + stats->tx_errors = count - stats->tx_errors; + stats->tx_packets = count; + stats->tx_fifo_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXUFLWLO_GBCNT_REG, + SXGBE_MMC_TXUFLWHI_GBCNT_REG); + writel(0, ioaddr + SXGBE_MMC_CTL_REG); + spin_unlock(&priv->stats_lock); + + return stats; +} + +/* sxgbe_set_features - entry point to set offload features of the device. + * @dev : device pointer. + * @features : features which are required to be set. + * Description: + * This function is a driver entry point and called by Linux kernel whenever + * any device features are set or reset by user. + * Return value: + * This function returns 0 after setting or resetting device features. + */ +static int sxgbe_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + netdev_features_t changed = dev->features ^ features; + + if (changed & NETIF_F_RXCSUM) { + if (features & NETIF_F_RXCSUM) { + priv->hw->mac->enable_rx_csum(priv->ioaddr); + priv->rxcsum_insertion = true; + } else { + priv->hw->mac->disable_rx_csum(priv->ioaddr); + priv->rxcsum_insertion = false; + } + } + + return 0; +} + +/* sxgbe_change_mtu - entry point to change MTU size for the device. + * @dev : device pointer. + * @new_mtu : the new MTU size for the device. + * Description: the Maximum Transfer Unit (MTU) is used by the network layer + * to drive packet transmission. Ethernet has an MTU of 1500 octets + * (ETH_DATA_LEN). This value can be changed with ifconfig. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int sxgbe_change_mtu(struct net_device *dev, int new_mtu) +{ + /* RFC 791, page 25, "Every internet module must be able to forward + * a datagram of 68 octets without further fragmentation." + */ + if (new_mtu < MIN_MTU || (new_mtu > MAX_MTU)) { + netdev_err(dev, "invalid MTU, MTU should be in between %d and %d\n", + MIN_MTU, MAX_MTU); + return -EINVAL; + } + + /* Return if the buffer sizes will not change */ + if (dev->mtu == new_mtu) + return 0; + + dev->mtu = new_mtu; + + if (!netif_running(dev)) + return 0; + + /* Recevice ring buffer size is needed to be set based on MTU. If MTU is + * changed then reinitilisation of the receive ring buffers need to be + * done. Hence bring interface down and bring interface back up + */ + sxgbe_release(dev); + return sxgbe_open(dev); +} + +static void sxgbe_set_umac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int reg_n) +{ + unsigned long data; + + data = (addr[5] << 8) | addr[4]; + /* For MAC Addr registers se have to set the Address Enable (AE) + * bit that has no effect on the High Reg 0 where the bit 31 (MO) + * is RO. + */ + writel(data | SXGBE_HI_REG_AE, ioaddr + SXGBE_ADDR_HIGH(reg_n)); + data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + writel(data, ioaddr + SXGBE_ADDR_LOW(reg_n)); +} + +/** + * sxgbe_set_rx_mode - entry point for setting different receive mode of + * a device. unicast, multicast addressing + * @dev : pointer to the device structure + * Description: + * This function is a driver entry point which gets called by the kernel + * whenever different receive mode like unicast, multicast and promiscuous + * must be enabled/disabled. + * Return value: + * void. + */ +static void sxgbe_set_rx_mode(struct net_device *dev) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + void __iomem *ioaddr = (void __iomem *)priv->ioaddr; + unsigned int value = 0; + u32 mc_filter[2]; + struct netdev_hw_addr *ha; + int reg = 1; + + netdev_dbg(dev, "%s: # mcasts %d, # unicast %d\n", + __func__, netdev_mc_count(dev), netdev_uc_count(dev)); + + if (dev->flags & IFF_PROMISC) { + value = SXGBE_FRAME_FILTER_PR; + + } else if ((netdev_mc_count(dev) > SXGBE_HASH_TABLE_SIZE) || + (dev->flags & IFF_ALLMULTI)) { + value = SXGBE_FRAME_FILTER_PM; /* pass all multi */ + writel(0xffffffff, ioaddr + SXGBE_HASH_HIGH); + writel(0xffffffff, ioaddr + SXGBE_HASH_LOW); + + } else if (!netdev_mc_empty(dev)) { + /* Hash filter for multicast */ + value = SXGBE_FRAME_FILTER_HMC; + + memset(mc_filter, 0, sizeof(mc_filter)); + netdev_for_each_mc_addr(ha, dev) { + /* The upper 6 bits of the calculated CRC are used to + * index the contens of the hash table + */ + int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26; + + /* The most significant bit determines the register to + * use (H/L) while the other 5 bits determine the bit + * within the register. + */ + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + } + writel(mc_filter[0], ioaddr + SXGBE_HASH_LOW); + writel(mc_filter[1], ioaddr + SXGBE_HASH_HIGH); + } + + /* Handle multiple unicast addresses (perfect filtering) */ + if (netdev_uc_count(dev) > SXGBE_MAX_PERFECT_ADDRESSES) + /* Switch to promiscuous mode if more than 16 addrs + * are required + */ + value |= SXGBE_FRAME_FILTER_PR; + else { + netdev_for_each_uc_addr(ha, dev) { + sxgbe_set_umac_addr(ioaddr, ha->addr, reg); + reg++; + } + } +#ifdef FRAME_FILTER_DEBUG + /* Enable Receive all mode (to debug filtering_fail errors) */ + value |= SXGBE_FRAME_FILTER_RA; +#endif + writel(value, ioaddr + SXGBE_FRAME_FILTER); + + netdev_dbg(dev, "Filter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n", + readl(ioaddr + SXGBE_FRAME_FILTER), + readl(ioaddr + SXGBE_HASH_HIGH), + readl(ioaddr + SXGBE_HASH_LOW)); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * sxgbe_poll_controller - entry point for polling receive by device + * @dev : pointer to the device structure + * Description: + * This function is used by NETCONSOLE and other diagnostic tools + * to allow network I/O with interrupts disabled. + * Return value: + * Void. + */ +static void sxgbe_poll_controller(struct net_device *dev) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + + disable_irq(priv->irq); + sxgbe_rx_interrupt(priv->irq, dev); + enable_irq(priv->irq); +} +#endif + +/* sxgbe_ioctl - Entry point for the Ioctl + * @dev: Device pointer. + * @rq: An IOCTL specefic structure, that can contain a pointer to + * a proprietary structure used to pass information to the driver. + * @cmd: IOCTL command + * Description: + * Currently it supports the phy_mii_ioctl(...) and HW time stamping. + */ +static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct sxgbe_priv_data *priv = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + if (!netif_running(dev)) + return -EINVAL; + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + if (!priv->phydev) + return -EINVAL; + ret = phy_mii_ioctl(priv->phydev, rq, cmd); + break; + default: + break; + } + + return ret; +} + +static const struct net_device_ops sxgbe_netdev_ops = { + .ndo_open = sxgbe_open, + .ndo_start_xmit = sxgbe_xmit, + .ndo_stop = sxgbe_release, + .ndo_get_stats64 = sxgbe_get_stats64, + .ndo_change_mtu = sxgbe_change_mtu, + .ndo_set_features = sxgbe_set_features, + .ndo_set_rx_mode = sxgbe_set_rx_mode, + .ndo_tx_timeout = sxgbe_tx_timeout, + .ndo_do_ioctl = sxgbe_ioctl, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = sxgbe_poll_controller, +#endif + .ndo_set_mac_address = eth_mac_addr, +}; + +/* Get the hardware ops */ +static void sxgbe_get_ops(struct sxgbe_ops * const ops_ptr) +{ + ops_ptr->mac = sxgbe_get_core_ops(); + ops_ptr->desc = sxgbe_get_desc_ops(); + ops_ptr->dma = sxgbe_get_dma_ops(); + ops_ptr->mtl = sxgbe_get_mtl_ops(); + + /* set the MDIO communication Address/Data regisers */ + ops_ptr->mii.addr = SXGBE_MDIO_SCMD_ADD_REG; + ops_ptr->mii.data = SXGBE_MDIO_SCMD_DATA_REG; + + /* Assigning the default link settings + * no SXGBE defined default values to be set in registers, + * so assigning as 0 for port and duplex + */ + ops_ptr->link.port = 0; + ops_ptr->link.duplex = 0; + ops_ptr->link.speed = SXGBE_SPEED_10G; +} + +/** + * sxgbe_hw_init - Init the GMAC device + * @priv: driver private structure + * Description: this function checks the HW capability + * (if supported) and sets the driver's features. + */ +static int sxgbe_hw_init(struct sxgbe_priv_data * const priv) +{ + u32 ctrl_ids; + + priv->hw = kmalloc(sizeof(*priv->hw), GFP_KERNEL); + if(!priv->hw) + return -ENOMEM; + + /* get the hardware ops */ + sxgbe_get_ops(priv->hw); + + /* get the controller id */ + ctrl_ids = priv->hw->mac->get_controller_version(priv->ioaddr); + priv->hw->ctrl_uid = (ctrl_ids & 0x00ff0000) >> 16; + priv->hw->ctrl_id = (ctrl_ids & 0x000000ff); + pr_info("user ID: 0x%x, Controller ID: 0x%x\n", + priv->hw->ctrl_uid, priv->hw->ctrl_id); + + /* get the H/W features */ + if (!sxgbe_get_hw_features(priv)) + pr_info("Hardware features not found\n"); + + if (priv->hw_cap.tx_csum_offload) + pr_info("TX Checksum offload supported\n"); + + if (priv->hw_cap.rx_csum_offload) + pr_info("RX Checksum offload supported\n"); + + return 0; +} + +static int sxgbe_sw_reset(void __iomem *addr) +{ + int retry_count = 10; + + writel(SXGBE_DMA_SOFT_RESET, addr + SXGBE_DMA_MODE_REG); + while (retry_count--) { + if (!(readl(addr + SXGBE_DMA_MODE_REG) & + SXGBE_DMA_SOFT_RESET)) + break; + mdelay(10); + } + + if (retry_count < 0) + return -EBUSY; + + return 0; +} + +/** + * sxgbe_drv_probe + * @device: device pointer + * @plat_dat: platform data pointer + * @addr: iobase memory address + * Description: this is the main probe function used to + * call the alloc_etherdev, allocate the priv structure. + */ +struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device, + struct sxgbe_plat_data *plat_dat, + void __iomem *addr) +{ + struct sxgbe_priv_data *priv; + struct net_device *ndev; + int ret; + u8 queue_num; + + ndev = alloc_etherdev_mqs(sizeof(struct sxgbe_priv_data), + SXGBE_TX_QUEUES, SXGBE_RX_QUEUES); + if (!ndev) + return NULL; + + SET_NETDEV_DEV(ndev, device); + + priv = netdev_priv(ndev); + priv->device = device; + priv->dev = ndev; + + sxgbe_set_ethtool_ops(ndev); + priv->plat = plat_dat; + priv->ioaddr = addr; + + ret = sxgbe_sw_reset(priv->ioaddr); + if (ret) + goto error_free_netdev; + + /* Verify driver arguments */ + sxgbe_verify_args(); + + /* Init MAC and get the capabilities */ + ret = sxgbe_hw_init(priv); + if (ret) + goto error_free_netdev; + + /* allocate memory resources for Descriptor rings */ + ret = txring_mem_alloc(priv); + if (ret) + goto error_free_hw; + + ret = rxring_mem_alloc(priv); + if (ret) + goto error_free_hw; + + ndev->netdev_ops = &sxgbe_netdev_ops; + + ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GRO; + ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; + ndev->watchdog_timeo = msecs_to_jiffies(TX_TIMEO); + + /* assign filtering support */ + ndev->priv_flags |= IFF_UNICAST_FLT; + + priv->msg_enable = netif_msg_init(debug, default_msg_level); + + /* Enable TCP segmentation offload for all DMA channels */ + if (priv->hw_cap.tcpseg_offload) { + SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) { + priv->hw->dma->enable_tso(priv->ioaddr, queue_num); + } + } + + /* Enable Rx checksum offload */ + if (priv->hw_cap.rx_csum_offload) { + priv->hw->mac->enable_rx_csum(priv->ioaddr); + priv->rxcsum_insertion = true; + } + + /* Initialise pause frame settings */ + priv->rx_pause = 1; + priv->tx_pause = 1; + + /* Rx Watchdog is available, enable depend on platform data */ + if (!priv->plat->riwt_off) { + priv->use_riwt = 1; + pr_info("Enable RX Mitigation via HW Watchdog Timer\n"); + } + + netif_napi_add(ndev, &priv->napi, sxgbe_poll, 64); + + spin_lock_init(&priv->stats_lock); + + priv->sxgbe_clk = clk_get(priv->device, SXGBE_RESOURCE_NAME); + if (IS_ERR(priv->sxgbe_clk)) { + netdev_warn(ndev, "%s: warning: cannot get CSR clock\n", + __func__); + goto error_napi_del; + } + + /* If a specific clk_csr value is passed from the platform + * this means that the CSR Clock Range selection cannot be + * changed at run-time and it is fixed. Viceversa the driver'll try to + * set the MDC clock dynamically according to the csr actual + * clock input. + */ + if (!priv->plat->clk_csr) + sxgbe_clk_csr_set(priv); + else + priv->clk_csr = priv->plat->clk_csr; + + /* MDIO bus Registration */ + ret = sxgbe_mdio_register(ndev); + if (ret < 0) { + netdev_dbg(ndev, "%s: MDIO bus (id: %d) registration failed\n", + __func__, priv->plat->bus_id); + goto error_clk_put; + } + + ret = register_netdev(ndev); + if (ret) { + pr_err("%s: ERROR %i registering the device\n", __func__, ret); + goto error_mdio_unregister; + } + + sxgbe_check_ether_addr(priv); + + return priv; + +error_mdio_unregister: + sxgbe_mdio_unregister(ndev); +error_clk_put: + clk_put(priv->sxgbe_clk); +error_napi_del: + netif_napi_del(&priv->napi); +error_free_hw: + kfree(priv->hw); +error_free_netdev: + free_netdev(ndev); + + return NULL; +} + +/** + * sxgbe_drv_remove + * @ndev: net device pointer + * Description: this function resets the TX/RX processes, disables the MAC RX/TX + * changes the link status, releases the DMA descriptor rings. + */ +int sxgbe_drv_remove(struct net_device *ndev) +{ + struct sxgbe_priv_data *priv = netdev_priv(ndev); + u8 queue_num; + + netdev_info(ndev, "%s: removing driver\n", __func__); + + SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { + priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num); + } + + priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES); + priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES); + + priv->hw->mac->enable_tx(priv->ioaddr, false); + priv->hw->mac->enable_rx(priv->ioaddr, false); + + unregister_netdev(ndev); + + sxgbe_mdio_unregister(ndev); + + clk_put(priv->sxgbe_clk); + + netif_napi_del(&priv->napi); + + kfree(priv->hw); + + free_netdev(ndev); + + return 0; +} + +#ifdef CONFIG_PM +int sxgbe_suspend(struct net_device *ndev) +{ + return 0; +} + +int sxgbe_resume(struct net_device *ndev) +{ + return 0; +} + +int sxgbe_freeze(struct net_device *ndev) +{ + return -ENOSYS; +} + +int sxgbe_restore(struct net_device *ndev) +{ + return -ENOSYS; +} +#endif /* CONFIG_PM */ + +/* Driver is configured as Platform driver */ +static int __init sxgbe_init(void) +{ + int ret; + + ret = sxgbe_register_platform(); + if (ret) + goto err; + return 0; +err: + pr_err("driver registration failed\n"); + return ret; +} + +static void __exit sxgbe_exit(void) +{ + sxgbe_unregister_platform(); +} + +module_init(sxgbe_init); +module_exit(sxgbe_exit); + +#ifndef MODULE +static int __init sxgbe_cmdline_opt(char *str) +{ + char *opt; + + if (!str || !*str) + return -EINVAL; + while ((opt = strsep(&str, ",")) != NULL) { + if (!strncmp(opt, "eee_timer:", 6)) { + if (kstrtoint(opt + 10, 0, &eee_timer)) + goto err; + } + } + return 0; + +err: + pr_err("%s: ERROR broken module parameter conversion\n", __func__); + return -EINVAL; +} + +__setup("sxgbeeth=", sxgbe_cmdline_opt); +#endif /* MODULE */ + + + +MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver"); + +MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); +MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value"); + +MODULE_AUTHOR("Siva Reddy Kallam "); +MODULE_AUTHOR("ByungHo An "); +MODULE_AUTHOR("Girish K S "); +MODULE_AUTHOR("Vipul Pandya "); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c new file mode 100644 index 000000000..43ccb4a6d --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c @@ -0,0 +1,254 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "sxgbe_common.h" +#include "sxgbe_reg.h" + +#define SXGBE_SMA_WRITE_CMD 0x01 /* write command */ +#define SXGBE_SMA_PREAD_CMD 0x02 /* post read increament address */ +#define SXGBE_SMA_READ_CMD 0x03 /* read command */ +#define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */ +#define SXGBE_MII_BUSY 0x00400000 /* mii busy */ + +static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data) +{ + unsigned long fin_time = jiffies + 3 * HZ; /* 3 seconds */ + + while (!time_after(jiffies, fin_time)) { + if (!(readl(ioaddr + mii_data) & SXGBE_MII_BUSY)) + return 0; + cpu_relax(); + } + + return -EBUSY; +} + +static void sxgbe_mdio_ctrl_data(struct sxgbe_priv_data *sp, u32 cmd, + u16 phydata) +{ + u32 reg = phydata; + + reg |= (cmd << 16) | SXGBE_SMA_SKIP_ADDRFRM | + ((sp->clk_csr & 0x7) << 19) | SXGBE_MII_BUSY; + writel(reg, sp->ioaddr + sp->hw->mii.data); +} + +static void sxgbe_mdio_c45(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr, + int phyreg, u16 phydata) +{ + u32 reg; + + /* set mdio address register */ + reg = ((phyreg >> 16) & 0x1f) << 21; + reg |= (phyaddr << 16) | (phyreg & 0xffff); + writel(reg, sp->ioaddr + sp->hw->mii.addr); + + sxgbe_mdio_ctrl_data(sp, cmd, phydata); +} + +static void sxgbe_mdio_c22(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr, + int phyreg, u16 phydata) +{ + u32 reg; + + writel(1 << phyaddr, sp->ioaddr + SXGBE_MDIO_CLAUSE22_PORT_REG); + + /* set mdio address register */ + reg = (phyaddr << 16) | (phyreg & 0x1f); + writel(reg, sp->ioaddr + sp->hw->mii.addr); + + sxgbe_mdio_ctrl_data(sp, cmd, phydata); +} + +static int sxgbe_mdio_access(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr, + int phyreg, u16 phydata) +{ + const struct mii_regs *mii = &sp->hw->mii; + int rc; + + rc = sxgbe_mdio_busy_wait(sp->ioaddr, mii->data); + if (rc < 0) + return rc; + + if (phyreg & MII_ADDR_C45) { + sxgbe_mdio_c45(sp, cmd, phyaddr, phyreg, phydata); + } else { + /* Ports 0-3 only support C22. */ + if (phyaddr >= 4) + return -ENODEV; + + sxgbe_mdio_c22(sp, cmd, phyaddr, phyreg, phydata); + } + + return sxgbe_mdio_busy_wait(sp->ioaddr, mii->data); +} + +/** + * sxgbe_mdio_read + * @bus: points to the mii_bus structure + * @phyaddr: address of phy port + * @phyreg: address of register with in phy register + * Description: this function used for C45 and C22 MDIO Read + */ +static int sxgbe_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) +{ + struct net_device *ndev = bus->priv; + struct sxgbe_priv_data *priv = netdev_priv(ndev); + int rc; + + rc = sxgbe_mdio_access(priv, SXGBE_SMA_READ_CMD, phyaddr, phyreg, 0); + if (rc < 0) + return rc; + + return readl(priv->ioaddr + priv->hw->mii.data) & 0xffff; +} + +/** + * sxgbe_mdio_write + * @bus: points to the mii_bus structure + * @phyaddr: address of phy port + * @phyreg: address of phy registers + * @phydata: data to be written into phy register + * Description: this function is used for C45 and C22 MDIO write + */ +static int sxgbe_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, + u16 phydata) +{ + struct net_device *ndev = bus->priv; + struct sxgbe_priv_data *priv = netdev_priv(ndev); + + return sxgbe_mdio_access(priv, SXGBE_SMA_WRITE_CMD, phyaddr, phyreg, + phydata); +} + +int sxgbe_mdio_register(struct net_device *ndev) +{ + struct mii_bus *mdio_bus; + struct sxgbe_priv_data *priv = netdev_priv(ndev); + struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data; + int err, phy_addr; + int *irqlist; + bool phy_found = false; + bool act; + + /* allocate the new mdio bus */ + mdio_bus = mdiobus_alloc(); + if (!mdio_bus) { + netdev_err(ndev, "%s: mii bus allocation failed\n", __func__); + return -ENOMEM; + } + + if (mdio_data->irqs) + irqlist = mdio_data->irqs; + else + irqlist = priv->mii_irq; + + /* assign mii bus fields */ + mdio_bus->name = "sxgbe"; + mdio_bus->read = &sxgbe_mdio_read; + mdio_bus->write = &sxgbe_mdio_write; + snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x", + mdio_bus->name, priv->plat->bus_id); + mdio_bus->priv = ndev; + mdio_bus->phy_mask = mdio_data->phy_mask; + mdio_bus->parent = priv->device; + + /* register with kernel subsystem */ + err = mdiobus_register(mdio_bus); + if (err != 0) { + netdev_err(ndev, "mdiobus register failed\n"); + goto mdiobus_err; + } + + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { + struct phy_device *phy = mdio_bus->phy_map[phy_addr]; + + if (phy) { + char irq_num[4]; + char *irq_str; + /* If an IRQ was provided to be assigned after + * the bus probe, do it here. + */ + if ((mdio_data->irqs == NULL) && + (mdio_data->probed_phy_irq > 0)) { + irqlist[phy_addr] = mdio_data->probed_phy_irq; + phy->irq = mdio_data->probed_phy_irq; + } + + /* If we're going to bind the MAC to this PHY bus, + * and no PHY number was provided to the MAC, + * use the one probed here. + */ + if (priv->plat->phy_addr == -1) + priv->plat->phy_addr = phy_addr; + + act = (priv->plat->phy_addr == phy_addr); + switch (phy->irq) { + case PHY_POLL: + irq_str = "POLL"; + break; + case PHY_IGNORE_INTERRUPT: + irq_str = "IGNORE"; + break; + default: + sprintf(irq_num, "%d", phy->irq); + irq_str = irq_num; + break; + } + netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", + phy->phy_id, phy_addr, irq_str, + dev_name(&phy->dev), act ? " active" : ""); + phy_found = true; + } + } + + if (!phy_found) { + netdev_err(ndev, "PHY not found\n"); + goto phyfound_err; + } + + priv->mii = mdio_bus; + + return 0; + +phyfound_err: + err = -ENODEV; + mdiobus_unregister(mdio_bus); +mdiobus_err: + mdiobus_free(mdio_bus); + return err; +} + +int sxgbe_mdio_unregister(struct net_device *ndev) +{ + struct sxgbe_priv_data *priv = netdev_priv(ndev); + + if (!priv->mii) + return 0; + + mdiobus_unregister(priv->mii); + priv->mii->priv = NULL; + mdiobus_free(priv->mii); + priv->mii = NULL; + + return 0; +} diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c new file mode 100644 index 000000000..324681c2b --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c @@ -0,0 +1,254 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include + +#include "sxgbe_mtl.h" +#include "sxgbe_reg.h" + +static void sxgbe_mtl_init(void __iomem *ioaddr, unsigned int etsalg, + unsigned int raa) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_OP_MODE_REG); + reg_val &= ETS_RST; + + /* ETS Algorith */ + switch (etsalg & SXGBE_MTL_OPMODE_ESTMASK) { + case ETS_WRR: + reg_val &= ETS_WRR; + break; + case ETS_WFQ: + reg_val |= ETS_WFQ; + break; + case ETS_DWRR: + reg_val |= ETS_DWRR; + break; + } + writel(reg_val, ioaddr + SXGBE_MTL_OP_MODE_REG); + + switch (raa & SXGBE_MTL_OPMODE_RAAMASK) { + case RAA_SP: + reg_val &= RAA_SP; + break; + case RAA_WSP: + reg_val |= RAA_WSP; + break; + } + writel(reg_val, ioaddr + SXGBE_MTL_OP_MODE_REG); +} + +/* For Dynamic DMA channel mapping for Rx queue */ +static void sxgbe_mtl_dma_dm_rxqueue(void __iomem *ioaddr) +{ + writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP0_REG); + writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP1_REG); + writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP2_REG); +} + +static void sxgbe_mtl_set_txfifosize(void __iomem *ioaddr, int queue_num, + int queue_fifo) +{ + u32 fifo_bits, reg_val; + + /* 0 means 256 bytes */ + fifo_bits = (queue_fifo / SXGBE_MTL_TX_FIFO_DIV) - 1; + reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num)); + reg_val |= (fifo_bits << SXGBE_MTL_FIFO_LSHIFT); + writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_mtl_set_rxfifosize(void __iomem *ioaddr, int queue_num, + int queue_fifo) +{ + u32 fifo_bits, reg_val; + + /* 0 means 256 bytes */ + fifo_bits = (queue_fifo / SXGBE_MTL_RX_FIFO_DIV)-1; + reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); + reg_val |= (fifo_bits << SXGBE_MTL_FIFO_LSHIFT); + writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_mtl_enable_txqueue(void __iomem *ioaddr, int queue_num) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num)); + reg_val |= SXGBE_MTL_ENABLE_QUEUE; + writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_mtl_disable_txqueue(void __iomem *ioaddr, int queue_num) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num)); + reg_val &= ~SXGBE_MTL_ENABLE_QUEUE; + writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_mtl_fc_active(void __iomem *ioaddr, int queue_num, + int threshold) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); + reg_val &= ~(SXGBE_MTL_FCMASK << RX_FC_ACTIVE); + reg_val |= (threshold << RX_FC_ACTIVE); + + writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_mtl_fc_enable(void __iomem *ioaddr, int queue_num) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); + reg_val |= SXGBE_MTL_ENABLE_FC; + writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_mtl_fc_deactive(void __iomem *ioaddr, int queue_num, + int threshold) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); + reg_val &= ~(SXGBE_MTL_FCMASK << RX_FC_DEACTIVE); + reg_val |= (threshold << RX_FC_DEACTIVE); + + writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_mtl_fep_enable(void __iomem *ioaddr, int queue_num) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); + reg_val |= SXGBE_MTL_RXQ_OP_FEP; + + writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_mtl_fep_disable(void __iomem *ioaddr, int queue_num) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); + reg_val &= ~(SXGBE_MTL_RXQ_OP_FEP); + + writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_mtl_fup_enable(void __iomem *ioaddr, int queue_num) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); + reg_val |= SXGBE_MTL_RXQ_OP_FUP; + + writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_mtl_fup_disable(void __iomem *ioaddr, int queue_num) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); + reg_val &= ~(SXGBE_MTL_RXQ_OP_FUP); + + writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); +} + + +static void sxgbe_set_tx_mtl_mode(void __iomem *ioaddr, int queue_num, + int tx_mode) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num)); + /* TX specific MTL mode settings */ + if (tx_mode == SXGBE_MTL_SFMODE) { + reg_val |= SXGBE_MTL_SFMODE; + } else { + /* set the TTC values */ + if (tx_mode <= 64) + reg_val |= MTL_CONTROL_TTC_64; + else if (tx_mode <= 96) + reg_val |= MTL_CONTROL_TTC_96; + else if (tx_mode <= 128) + reg_val |= MTL_CONTROL_TTC_128; + else if (tx_mode <= 192) + reg_val |= MTL_CONTROL_TTC_192; + else if (tx_mode <= 256) + reg_val |= MTL_CONTROL_TTC_256; + else if (tx_mode <= 384) + reg_val |= MTL_CONTROL_TTC_384; + else + reg_val |= MTL_CONTROL_TTC_512; + } + + /* write into TXQ operation register */ + writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num)); +} + +static void sxgbe_set_rx_mtl_mode(void __iomem *ioaddr, int queue_num, + int rx_mode) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); + /* RX specific MTL mode settings */ + if (rx_mode == SXGBE_RX_MTL_SFMODE) { + reg_val |= SXGBE_RX_MTL_SFMODE; + } else { + if (rx_mode <= 64) + reg_val |= MTL_CONTROL_RTC_64; + else if (rx_mode <= 96) + reg_val |= MTL_CONTROL_RTC_96; + else if (rx_mode <= 128) + reg_val |= MTL_CONTROL_RTC_128; + } + + /* write into RXQ operation register */ + writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num)); +} + +static const struct sxgbe_mtl_ops mtl_ops = { + .mtl_set_txfifosize = sxgbe_mtl_set_txfifosize, + .mtl_set_rxfifosize = sxgbe_mtl_set_rxfifosize, + .mtl_enable_txqueue = sxgbe_mtl_enable_txqueue, + .mtl_disable_txqueue = sxgbe_mtl_disable_txqueue, + .mtl_dynamic_dma_rxqueue = sxgbe_mtl_dma_dm_rxqueue, + .set_tx_mtl_mode = sxgbe_set_tx_mtl_mode, + .set_rx_mtl_mode = sxgbe_set_rx_mtl_mode, + .mtl_init = sxgbe_mtl_init, + .mtl_fc_active = sxgbe_mtl_fc_active, + .mtl_fc_deactive = sxgbe_mtl_fc_deactive, + .mtl_fc_enable = sxgbe_mtl_fc_enable, + .mtl_fep_enable = sxgbe_mtl_fep_enable, + .mtl_fep_disable = sxgbe_mtl_fep_disable, + .mtl_fup_enable = sxgbe_mtl_fup_enable, + .mtl_fup_disable = sxgbe_mtl_fup_disable +}; + +const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void) +{ + return &mtl_ops; +} diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h new file mode 100644 index 000000000..7e4810c41 --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h @@ -0,0 +1,104 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __SXGBE_MTL_H__ +#define __SXGBE_MTL_H__ + +#define SXGBE_MTL_OPMODE_ESTMASK 0x3 +#define SXGBE_MTL_OPMODE_RAAMASK 0x1 +#define SXGBE_MTL_FCMASK 0x7 +#define SXGBE_MTL_TX_FIFO_DIV 256 +#define SXGBE_MTL_RX_FIFO_DIV 256 + +#define SXGBE_MTL_RXQ_OP_FEP BIT(4) +#define SXGBE_MTL_RXQ_OP_FUP BIT(3) +#define SXGBE_MTL_ENABLE_FC 0x80 + +#define ETS_WRR 0xFFFFFF9F +#define ETS_RST 0xFFFFFF9F +#define ETS_WFQ 0x00000020 +#define ETS_DWRR 0x00000040 +#define RAA_SP 0xFFFFFFFB +#define RAA_WSP 0x00000004 + +#define RX_QUEUE_DYNAMIC 0x80808080 +#define RX_FC_ACTIVE 8 +#define RX_FC_DEACTIVE 13 + +enum ttc_control { + MTL_CONTROL_TTC_64 = 0x00000000, + MTL_CONTROL_TTC_96 = 0x00000020, + MTL_CONTROL_TTC_128 = 0x00000030, + MTL_CONTROL_TTC_192 = 0x00000040, + MTL_CONTROL_TTC_256 = 0x00000050, + MTL_CONTROL_TTC_384 = 0x00000060, + MTL_CONTROL_TTC_512 = 0x00000070, +}; + +enum rtc_control { + MTL_CONTROL_RTC_64 = 0x00000000, + MTL_CONTROL_RTC_96 = 0x00000002, + MTL_CONTROL_RTC_128 = 0x00000003, +}; + +enum flow_control_th { + MTL_FC_FULL_1K = 0x00000000, + MTL_FC_FULL_2K = 0x00000001, + MTL_FC_FULL_4K = 0x00000002, + MTL_FC_FULL_5K = 0x00000003, + MTL_FC_FULL_6K = 0x00000004, + MTL_FC_FULL_8K = 0x00000005, + MTL_FC_FULL_16K = 0x00000006, + MTL_FC_FULL_24K = 0x00000007, +}; + +struct sxgbe_mtl_ops { + void (*mtl_init)(void __iomem *ioaddr, unsigned int etsalg, + unsigned int raa); + + void (*mtl_set_txfifosize)(void __iomem *ioaddr, int queue_num, + int mtl_fifo); + + void (*mtl_set_rxfifosize)(void __iomem *ioaddr, int queue_num, + int queue_fifo); + + void (*mtl_enable_txqueue)(void __iomem *ioaddr, int queue_num); + + void (*mtl_disable_txqueue)(void __iomem *ioaddr, int queue_num); + + void (*set_tx_mtl_mode)(void __iomem *ioaddr, int queue_num, + int tx_mode); + + void (*set_rx_mtl_mode)(void __iomem *ioaddr, int queue_num, + int rx_mode); + + void (*mtl_dynamic_dma_rxqueue)(void __iomem *ioaddr); + + void (*mtl_fc_active)(void __iomem *ioaddr, int queue_num, + int threshold); + + void (*mtl_fc_deactive)(void __iomem *ioaddr, int queue_num, + int threshold); + + void (*mtl_fc_enable)(void __iomem *ioaddr, int queue_num); + + void (*mtl_fep_enable)(void __iomem *ioaddr, int queue_num); + + void (*mtl_fep_disable)(void __iomem *ioaddr, int queue_num); + + void (*mtl_fup_enable)(void __iomem *ioaddr, int queue_num); + + void (*mtl_fup_disable)(void __iomem *ioaddr, int queue_num); +}; + +const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void); + +#endif /* __SXGBE_MTL_H__ */ diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c new file mode 100644 index 000000000..b02eed12b --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c @@ -0,0 +1,255 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sxgbe_common.h" +#include "sxgbe_reg.h" + +#ifdef CONFIG_OF +static int sxgbe_probe_config_dt(struct platform_device *pdev, + struct sxgbe_plat_data *plat, + const char **mac) +{ + struct device_node *np = pdev->dev.of_node; + struct sxgbe_dma_cfg *dma_cfg; + + if (!np) + return -ENODEV; + + *mac = of_get_mac_address(np); + plat->interface = of_get_phy_mode(np); + + plat->bus_id = of_alias_get_id(np, "ethernet"); + if (plat->bus_id < 0) + plat->bus_id = 0; + + plat->mdio_bus_data = devm_kzalloc(&pdev->dev, + sizeof(*plat->mdio_bus_data), + GFP_KERNEL); + + dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); + if (!dma_cfg) + return -ENOMEM; + + plat->dma_cfg = dma_cfg; + of_property_read_u32(np, "samsung,pbl", &dma_cfg->pbl); + if (of_property_read_u32(np, "samsung,burst-map", &dma_cfg->burst_map) == 0) + dma_cfg->fixed_burst = true; + + return 0; +} +#else +static int sxgbe_probe_config_dt(struct platform_device *pdev, + struct sxgbe_plat_data *plat, + const char **mac) +{ + return -ENOSYS; +} +#endif /* CONFIG_OF */ + +/** + * sxgbe_platform_probe + * @pdev: platform device pointer + * Description: platform_device probe function. It allocates + * the necessary resources and invokes the main to init + * the net device, register the mdio bus etc. + */ +static int sxgbe_platform_probe(struct platform_device *pdev) +{ + int ret; + int i, chan; + struct resource *res; + struct device *dev = &pdev->dev; + void __iomem *addr; + struct sxgbe_priv_data *priv = NULL; + struct sxgbe_plat_data *plat_dat = NULL; + const char *mac = NULL; + struct net_device *ndev = platform_get_drvdata(pdev); + struct device_node *node = dev->of_node; + + /* Get memory resource */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + addr = devm_ioremap_resource(dev, res); + if (IS_ERR(addr)) + return PTR_ERR(addr); + + if (pdev->dev.of_node) { + plat_dat = devm_kzalloc(&pdev->dev, + sizeof(struct sxgbe_plat_data), + GFP_KERNEL); + if (!plat_dat) + return -ENOMEM; + + ret = sxgbe_probe_config_dt(pdev, plat_dat, &mac); + if (ret) { + pr_err("%s: main dt probe failed\n", __func__); + return ret; + } + } + + priv = sxgbe_drv_probe(&(pdev->dev), plat_dat, addr); + if (!priv) { + pr_err("%s: main driver probe failed\n", __func__); + goto err_out; + } + + /* Get the SXGBE common INT information */ + priv->irq = irq_of_parse_and_map(node, 0); + if (priv->irq <= 0) { + dev_err(dev, "sxgbe common irq parsing failed\n"); + goto err_drv_remove; + } + + /* Get MAC address if available (DT) */ + if (mac) + ether_addr_copy(priv->dev->dev_addr, mac); + + /* Get the TX/RX IRQ numbers */ + for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) { + priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++); + if (priv->txq[i]->irq_no <= 0) { + dev_err(dev, "sxgbe tx irq parsing failed\n"); + goto err_tx_irq_unmap; + } + } + + for (i = 0; i < SXGBE_RX_QUEUES; i++) { + priv->rxq[i]->irq_no = irq_of_parse_and_map(node, chan++); + if (priv->rxq[i]->irq_no <= 0) { + dev_err(dev, "sxgbe rx irq parsing failed\n"); + goto err_rx_irq_unmap; + } + } + + priv->lpi_irq = irq_of_parse_and_map(node, chan); + if (priv->lpi_irq <= 0) { + dev_err(dev, "sxgbe lpi irq parsing failed\n"); + goto err_rx_irq_unmap; + } + + platform_set_drvdata(pdev, priv->dev); + + pr_debug("platform driver registration completed\n"); + + return 0; + +err_rx_irq_unmap: + while (--i) + irq_dispose_mapping(priv->rxq[i]->irq_no); + i = SXGBE_TX_QUEUES; +err_tx_irq_unmap: + while (--i) + irq_dispose_mapping(priv->txq[i]->irq_no); + irq_dispose_mapping(priv->irq); +err_drv_remove: + sxgbe_drv_remove(ndev); +err_out: + return -ENODEV; +} + +/** + * sxgbe_platform_remove + * @pdev: platform device pointer + * Description: this function calls the main to free the net resources + * and calls the platforms hook and release the resources (e.g. mem). + */ +static int sxgbe_platform_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + int ret = sxgbe_drv_remove(ndev); + + return ret; +} + +#ifdef CONFIG_PM +static int sxgbe_platform_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + + return sxgbe_suspend(ndev); +} + +static int sxgbe_platform_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + + return sxgbe_resume(ndev); +} + +static int sxgbe_platform_freeze(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + + return sxgbe_freeze(ndev); +} + +static int sxgbe_platform_restore(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + + return sxgbe_restore(ndev); +} + +static const struct dev_pm_ops sxgbe_platform_pm_ops = { + .suspend = sxgbe_platform_suspend, + .resume = sxgbe_platform_resume, + .freeze = sxgbe_platform_freeze, + .thaw = sxgbe_platform_restore, + .restore = sxgbe_platform_restore, +}; +#else +static const struct dev_pm_ops sxgbe_platform_pm_ops; +#endif /* CONFIG_PM */ + +static const struct of_device_id sxgbe_dt_ids[] = { + { .compatible = "samsung,sxgbe-v2.0a"}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sxgbe_dt_ids); + +static struct platform_driver sxgbe_platform_driver = { + .probe = sxgbe_platform_probe, + .remove = sxgbe_platform_remove, + .driver = { + .name = SXGBE_RESOURCE_NAME, + .pm = &sxgbe_platform_pm_ops, + .of_match_table = of_match_ptr(sxgbe_dt_ids), + }, +}; + +int sxgbe_register_platform(void) +{ + int err; + + err = platform_driver_register(&sxgbe_platform_driver); + if (err) + pr_err("failed to register the platform driver\n"); + + return err; +} + +void sxgbe_unregister_platform(void) +{ + platform_driver_unregister(&sxgbe_platform_driver); +} diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h new file mode 100644 index 000000000..81437d91d --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h @@ -0,0 +1,491 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __SXGBE_REGMAP_H__ +#define __SXGBE_REGMAP_H__ + +/* SXGBE MAC Registers */ +#define SXGBE_CORE_TX_CONFIG_REG 0x0000 +#define SXGBE_CORE_RX_CONFIG_REG 0x0004 +#define SXGBE_CORE_PKT_FILTER_REG 0x0008 +#define SXGBE_CORE_WATCHDOG_TIMEOUT_REG 0x000C +#define SXGBE_CORE_HASH_TABLE_REG0 0x0010 +#define SXGBE_CORE_HASH_TABLE_REG1 0x0014 +#define SXGBE_CORE_HASH_TABLE_REG2 0x0018 +#define SXGBE_CORE_HASH_TABLE_REG3 0x001C +#define SXGBE_CORE_HASH_TABLE_REG4 0x0020 +#define SXGBE_CORE_HASH_TABLE_REG5 0x0024 +#define SXGBE_CORE_HASH_TABLE_REG6 0x0028 +#define SXGBE_CORE_HASH_TABLE_REG7 0x002C + +/* EEE-LPI Registers */ +#define SXGBE_CORE_LPI_CTRL_STATUS 0x00D0 +#define SXGBE_CORE_LPI_TIMER_CTRL 0x00D4 + +/* VLAN Specific Registers */ +#define SXGBE_CORE_VLAN_TAG_REG 0x0050 +#define SXGBE_CORE_VLAN_HASHTAB_REG 0x0058 +#define SXGBE_CORE_VLAN_INSCTL_REG 0x0060 +#define SXGBE_CORE_VLAN_INNERCTL_REG 0x0064 +#define SXGBE_CORE_RX_ETHTYPE_MATCH_REG 0x006C + +/* Flow Contol Registers */ +#define SXGBE_CORE_TX_Q0_FLOWCTL_REG 0x0070 +#define SXGBE_CORE_TX_Q1_FLOWCTL_REG 0x0074 +#define SXGBE_CORE_TX_Q2_FLOWCTL_REG 0x0078 +#define SXGBE_CORE_TX_Q3_FLOWCTL_REG 0x007C +#define SXGBE_CORE_TX_Q4_FLOWCTL_REG 0x0080 +#define SXGBE_CORE_TX_Q5_FLOWCTL_REG 0x0084 +#define SXGBE_CORE_TX_Q6_FLOWCTL_REG 0x0088 +#define SXGBE_CORE_TX_Q7_FLOWCTL_REG 0x008C +#define SXGBE_CORE_RX_FLOWCTL_REG 0x0090 +#define SXGBE_CORE_RX_CTL0_REG 0x00A0 +#define SXGBE_CORE_RX_CTL1_REG 0x00A4 +#define SXGBE_CORE_RX_CTL2_REG 0x00A8 +#define SXGBE_CORE_RX_CTL3_REG 0x00AC + +#define SXGBE_CORE_RXQ_ENABLE_MASK 0x0003 +#define SXGBE_CORE_RXQ_ENABLE 0x0002 +#define SXGBE_CORE_RXQ_DISABLE 0x0000 + +/* Interrupt Registers */ +#define SXGBE_CORE_INT_STATUS_REG 0x00B0 +#define SXGBE_CORE_INT_ENABLE_REG 0x00B4 +#define SXGBE_CORE_RXTX_ERR_STATUS_REG 0x00B8 +#define SXGBE_CORE_PMT_CTL_STATUS_REG 0x00C0 +#define SXGBE_CORE_RWK_PKT_FILTER_REG 0x00C4 +#define SXGBE_CORE_VERSION_REG 0x0110 +#define SXGBE_CORE_DEBUG_REG 0x0114 +#define SXGBE_CORE_HW_FEA_REG(index) (0x011C + index * 4) + +/* SMA(MDIO) module registers */ +#define SXGBE_MDIO_SCMD_ADD_REG 0x0200 +#define SXGBE_MDIO_SCMD_DATA_REG 0x0204 +#define SXGBE_MDIO_CCMD_WADD_REG 0x0208 +#define SXGBE_MDIO_CCMD_WDATA_REG 0x020C +#define SXGBE_MDIO_CSCAN_PORT_REG 0x0210 +#define SXGBE_MDIO_INT_STATUS_REG 0x0214 +#define SXGBE_MDIO_INT_ENABLE_REG 0x0218 +#define SXGBE_MDIO_PORT_CONDCON_REG 0x021C +#define SXGBE_MDIO_CLAUSE22_PORT_REG 0x0220 + +/* port specific, addr = 0-3 */ +#define SXGBE_MDIO_DEV_BASE_REG 0x0230 +#define SXGBE_MDIO_PORT_DEV_REG(addr) \ + (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x0) +#define SXGBE_MDIO_PORT_LSTATUS_REG(addr) \ + (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x4) +#define SXGBE_MDIO_PORT_ALIVE_REG(addr) \ + (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x8) + +#define SXGBE_CORE_GPIO_CTL_REG 0x0278 +#define SXGBE_CORE_GPIO_STATUS_REG 0x027C + +/* Address registers for filtering */ +#define SXGBE_CORE_ADD_BASE_REG 0x0300 + +/* addr = 0-31 */ +#define SXGBE_CORE_ADD_HIGHOFFSET(addr) \ + (SXGBE_CORE_ADD_BASE_REG + (0x8 * addr) + 0x0) +#define SXGBE_CORE_ADD_LOWOFFSET(addr) \ + (SXGBE_CORE_ADD_BASE_REG + (0x8 * addr) + 0x4) + +/* SXGBE MMC registers */ +#define SXGBE_MMC_CTL_REG 0x0800 +#define SXGBE_MMC_RXINT_STATUS_REG 0x0804 +#define SXGBE_MMC_TXINT_STATUS_REG 0x0808 +#define SXGBE_MMC_RXINT_ENABLE_REG 0x080C +#define SXGBE_MMC_TXINT_ENABLE_REG 0x0810 + +/* TX specific counters */ +#define SXGBE_MMC_TXOCTETHI_GBCNT_REG 0x0814 +#define SXGBE_MMC_TXOCTETLO_GBCNT_REG 0x0818 +#define SXGBE_MMC_TXFRAMELO_GBCNT_REG 0x081C +#define SXGBE_MMC_TXFRAMEHI_GBCNT_REG 0x0820 +#define SXGBE_MMC_TXBROADLO_GCNT_REG 0x0824 +#define SXGBE_MMC_TXBROADHI_GCNT_REG 0x0828 +#define SXGBE_MMC_TXMULTILO_GCNT_REG 0x082C +#define SXGBE_MMC_TXMULTIHI_GCNT_REG 0x0830 +#define SXGBE_MMC_TX64LO_GBCNT_REG 0x0834 +#define SXGBE_MMC_TX64HI_GBCNT_REG 0x0838 +#define SXGBE_MMC_TX65TO127LO_GBCNT_REG 0x083C +#define SXGBE_MMC_TX65TO127HI_GBCNT_REG 0x0840 +#define SXGBE_MMC_TX128TO255LO_GBCNT_REG 0x0844 +#define SXGBE_MMC_TX128TO255HI_GBCNT_REG 0x0848 +#define SXGBE_MMC_TX256TO511LO_GBCNT_REG 0x084C +#define SXGBE_MMC_TX256TO511HI_GBCNT_REG 0x0850 +#define SXGBE_MMC_TX512TO1023LO_GBCNT_REG 0x0854 +#define SXGBE_MMC_TX512TO1023HI_GBCNT_REG 0x0858 +#define SXGBE_MMC_TX1023TOMAXLO_GBCNT_REG 0x085C +#define SXGBE_MMC_TX1023TOMAXHI_GBCNT_REG 0x0860 +#define SXGBE_MMC_TXUNICASTLO_GBCNT_REG 0x0864 +#define SXGBE_MMC_TXUNICASTHI_GBCNT_REG 0x0868 +#define SXGBE_MMC_TXMULTILO_GBCNT_REG 0x086C +#define SXGBE_MMC_TXMULTIHI_GBCNT_REG 0x0870 +#define SXGBE_MMC_TXBROADLO_GBCNT_REG 0x0874 +#define SXGBE_MMC_TXBROADHI_GBCNT_REG 0x0878 +#define SXGBE_MMC_TXUFLWLO_GBCNT_REG 0x087C +#define SXGBE_MMC_TXUFLWHI_GBCNT_REG 0x0880 +#define SXGBE_MMC_TXOCTETLO_GCNT_REG 0x0884 +#define SXGBE_MMC_TXOCTETHI_GCNT_REG 0x0888 +#define SXGBE_MMC_TXFRAMELO_GCNT_REG 0x088C +#define SXGBE_MMC_TXFRAMEHI_GCNT_REG 0x0890 +#define SXGBE_MMC_TXPAUSELO_CNT_REG 0x0894 +#define SXGBE_MMC_TXPAUSEHI_CNT_REG 0x0898 +#define SXGBE_MMC_TXVLANLO_GCNT_REG 0x089C +#define SXGBE_MMC_TXVLANHI_GCNT_REG 0x08A0 + +/* RX specific counters */ +#define SXGBE_MMC_RXFRAMELO_GBCNT_REG 0x0900 +#define SXGBE_MMC_RXFRAMEHI_GBCNT_REG 0x0904 +#define SXGBE_MMC_RXOCTETLO_GBCNT_REG 0x0908 +#define SXGBE_MMC_RXOCTETHI_GBCNT_REG 0x090C +#define SXGBE_MMC_RXOCTETLO_GCNT_REG 0x0910 +#define SXGBE_MMC_RXOCTETHI_GCNT_REG 0x0914 +#define SXGBE_MMC_RXBROADLO_GCNT_REG 0x0918 +#define SXGBE_MMC_RXBROADHI_GCNT_REG 0x091C +#define SXGBE_MMC_RXMULTILO_GCNT_REG 0x0920 +#define SXGBE_MMC_RXMULTIHI_GCNT_REG 0x0924 +#define SXGBE_MMC_RXCRCERRLO_REG 0x0928 +#define SXGBE_MMC_RXCRCERRHI_REG 0x092C +#define SXGBE_MMC_RXSHORT64BFRAME_ERR_REG 0x0930 +#define SXGBE_MMC_RXJABBERERR_REG 0x0934 +#define SXGBE_MMC_RXSHORT64BFRAME_COR_REG 0x0938 +#define SXGBE_MMC_RXOVERMAXFRAME_COR_REG 0x093C +#define SXGBE_MMC_RX64LO_GBCNT_REG 0x0940 +#define SXGBE_MMC_RX64HI_GBCNT_REG 0x0944 +#define SXGBE_MMC_RX65TO127LO_GBCNT_REG 0x0948 +#define SXGBE_MMC_RX65TO127HI_GBCNT_REG 0x094C +#define SXGBE_MMC_RX128TO255LO_GBCNT_REG 0x0950 +#define SXGBE_MMC_RX128TO255HI_GBCNT_REG 0x0954 +#define SXGBE_MMC_RX256TO511LO_GBCNT_REG 0x0958 +#define SXGBE_MMC_RX256TO511HI_GBCNT_REG 0x095C +#define SXGBE_MMC_RX512TO1023LO_GBCNT_REG 0x0960 +#define SXGBE_MMC_RX512TO1023HI_GBCNT_REG 0x0964 +#define SXGBE_MMC_RX1023TOMAXLO_GBCNT_REG 0x0968 +#define SXGBE_MMC_RX1023TOMAXHI_GBCNT_REG 0x096C +#define SXGBE_MMC_RXUNICASTLO_GCNT_REG 0x0970 +#define SXGBE_MMC_RXUNICASTHI_GCNT_REG 0x0974 +#define SXGBE_MMC_RXLENERRLO_REG 0x0978 +#define SXGBE_MMC_RXLENERRHI_REG 0x097C +#define SXGBE_MMC_RXOUTOFRANGETYPELO_REG 0x0980 +#define SXGBE_MMC_RXOUTOFRANGETYPEHI_REG 0x0984 +#define SXGBE_MMC_RXPAUSELO_CNT_REG 0x0988 +#define SXGBE_MMC_RXPAUSEHI_CNT_REG 0x098C +#define SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG 0x0990 +#define SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG 0x0994 +#define SXGBE_MMC_RXVLANLO_GBCNT_REG 0x0998 +#define SXGBE_MMC_RXVLANHI_GBCNT_REG 0x099C +#define SXGBE_MMC_RXWATCHDOG_ERR_REG 0x09A0 + +/* L3/L4 function registers */ +#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00 +#define SXGBE_CORE_L34_DATA_REG 0x0C04 + +/* ARP registers */ +#define SXGBE_CORE_ARP_ADD_REG 0x0C10 + +/* RSS registers */ +#define SXGBE_CORE_RSS_CTL_REG 0x0C80 +#define SXGBE_CORE_RSS_ADD_REG 0x0C88 +#define SXGBE_CORE_RSS_DATA_REG 0x0C8C + +/* RSS control register bits */ +#define SXGBE_CORE_RSS_CTL_UDP4TE BIT(3) +#define SXGBE_CORE_RSS_CTL_TCP4TE BIT(2) +#define SXGBE_CORE_RSS_CTL_IP2TE BIT(1) +#define SXGBE_CORE_RSS_CTL_RSSE BIT(0) + +/* IEEE 1588 registers */ +#define SXGBE_CORE_TSTAMP_CTL_REG 0x0D00 +#define SXGBE_CORE_SUBSEC_INC_REG 0x0D04 +#define SXGBE_CORE_SYSTIME_SEC_REG 0x0D0C +#define SXGBE_CORE_SYSTIME_NSEC_REG 0x0D10 +#define SXGBE_CORE_SYSTIME_SECUP_REG 0x0D14 +#define SXGBE_CORE_TSTAMP_ADD_REG 0x0D18 +#define SXGBE_CORE_SYSTIME_HWORD_REG 0x0D1C +#define SXGBE_CORE_TSTAMP_STATUS_REG 0x0D20 +#define SXGBE_CORE_TXTIME_STATUSNSEC_REG 0x0D30 +#define SXGBE_CORE_TXTIME_STATUSSEC_REG 0x0D34 + +/* Auxiliary registers */ +#define SXGBE_CORE_AUX_CTL_REG 0x0D40 +#define SXGBE_CORE_AUX_TSTAMP_NSEC_REG 0x0D48 +#define SXGBE_CORE_AUX_TSTAMP_SEC_REG 0x0D4C +#define SXGBE_CORE_AUX_TSTAMP_INGCOR_REG 0x0D50 +#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_REG 0x0D54 +#define SXGBE_CORE_AUX_TSTAMP_INGCOR_NSEC_REG 0x0D58 +#define SXGBE_CORE_AUX_TSTAMP_INGCOR_SUBNSEC_REG 0x0D5C +#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_NSEC_REG 0x0D60 +#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_SUBNSEC_REG 0x0D64 + +/* PPS registers */ +#define SXGBE_CORE_PPS_CTL_REG 0x0D70 +#define SXGBE_CORE_PPS_BASE 0x0D80 + +/* addr = 0 - 3 */ +#define SXGBE_CORE_PPS_TTIME_SEC_REG(addr) \ + (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x0) +#define SXGBE_CORE_PPS_TTIME_NSEC_REG(addr) \ + (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x4) +#define SXGBE_CORE_PPS_INTERVAL_REG(addr) \ + (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x8) +#define SXGBE_CORE_PPS_WIDTH_REG(addr) \ + (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0xC) +#define SXGBE_CORE_PTO_CTL_REG 0x0DC0 +#define SXGBE_CORE_SRCPORT_ITY0_REG 0x0DC4 +#define SXGBE_CORE_SRCPORT_ITY1_REG 0x0DC8 +#define SXGBE_CORE_SRCPORT_ITY2_REG 0x0DCC +#define SXGBE_CORE_LOGMSG_LEVEL_REG 0x0DD0 + +/* SXGBE MTL Registers */ +#define SXGBE_MTL_BASE_REG 0x1000 +#define SXGBE_MTL_OP_MODE_REG (SXGBE_MTL_BASE_REG + 0x0000) +#define SXGBE_MTL_DEBUG_CTL_REG (SXGBE_MTL_BASE_REG + 0x0008) +#define SXGBE_MTL_DEBUG_STATUS_REG (SXGBE_MTL_BASE_REG + 0x000C) +#define SXGBE_MTL_FIFO_DEBUGDATA_REG (SXGBE_MTL_BASE_REG + 0x0010) +#define SXGBE_MTL_INT_STATUS_REG (SXGBE_MTL_BASE_REG + 0x0020) +#define SXGBE_MTL_RXQ_DMAMAP0_REG (SXGBE_MTL_BASE_REG + 0x0030) +#define SXGBE_MTL_RXQ_DMAMAP1_REG (SXGBE_MTL_BASE_REG + 0x0034) +#define SXGBE_MTL_RXQ_DMAMAP2_REG (SXGBE_MTL_BASE_REG + 0x0038) +#define SXGBE_MTL_TX_PRTYMAP0_REG (SXGBE_MTL_BASE_REG + 0x0040) +#define SXGBE_MTL_TX_PRTYMAP1_REG (SXGBE_MTL_BASE_REG + 0x0044) + +/* TC/Queue registers, qnum=0-15 */ +#define SXGBE_MTL_TC_TXBASE_REG (SXGBE_MTL_BASE_REG + 0x0100) +#define SXGBE_MTL_TXQ_OPMODE_REG(qnum) \ + (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x00) +#define SXGBE_MTL_SFMODE BIT(1) +#define SXGBE_MTL_FIFO_LSHIFT 16 +#define SXGBE_MTL_ENABLE_QUEUE 0x00000008 +#define SXGBE_MTL_TXQ_UNDERFLOW_REG(qnum) \ + (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x04) +#define SXGBE_MTL_TXQ_DEBUG_REG(qnum) \ + (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x08) +#define SXGBE_MTL_TXQ_ETSCTL_REG(qnum) \ + (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x10) +#define SXGBE_MTL_TXQ_ETSSTATUS_REG(qnum) \ + (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x14) +#define SXGBE_MTL_TXQ_QUANTWEIGHT_REG(qnum) \ + (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x18) + +#define SXGBE_MTL_TC_RXBASE_REG 0x1140 +#define SXGBE_RX_MTL_SFMODE BIT(5) +#define SXGBE_MTL_RXQ_OPMODE_REG(qnum) \ + (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x00) +#define SXGBE_MTL_RXQ_MISPKTOVERFLOW_REG(qnum) \ + (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x04) +#define SXGBE_MTL_RXQ_DEBUG_REG(qnum) \ + (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x08) +#define SXGBE_MTL_RXQ_CTL_REG(qnum) \ + (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x0C) +#define SXGBE_MTL_RXQ_INTENABLE_REG(qnum) \ + (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x30) +#define SXGBE_MTL_RXQ_INTSTATUS_REG(qnum) \ + (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x34) + +/* SXGBE DMA Registers */ +#define SXGBE_DMA_BASE_REG 0x3000 +#define SXGBE_DMA_MODE_REG (SXGBE_DMA_BASE_REG + 0x0000) +#define SXGBE_DMA_SOFT_RESET BIT(0) +#define SXGBE_DMA_SYSBUS_MODE_REG (SXGBE_DMA_BASE_REG + 0x0004) +#define SXGBE_DMA_AXI_UNDEF_BURST BIT(0) +#define SXGBE_DMA_ENHACE_ADDR_MODE BIT(11) +#define SXGBE_DMA_INT_STATUS_REG (SXGBE_DMA_BASE_REG + 0x0008) +#define SXGBE_DMA_AXI_ARCACHECTL_REG (SXGBE_DMA_BASE_REG + 0x0010) +#define SXGBE_DMA_AXI_AWCACHECTL_REG (SXGBE_DMA_BASE_REG + 0x0018) +#define SXGBE_DMA_DEBUG_STATUS0_REG (SXGBE_DMA_BASE_REG + 0x0020) +#define SXGBE_DMA_DEBUG_STATUS1_REG (SXGBE_DMA_BASE_REG + 0x0024) +#define SXGBE_DMA_DEBUG_STATUS2_REG (SXGBE_DMA_BASE_REG + 0x0028) +#define SXGBE_DMA_DEBUG_STATUS3_REG (SXGBE_DMA_BASE_REG + 0x002C) +#define SXGBE_DMA_DEBUG_STATUS4_REG (SXGBE_DMA_BASE_REG + 0x0030) +#define SXGBE_DMA_DEBUG_STATUS5_REG (SXGBE_DMA_BASE_REG + 0x0034) + +/* Channel Registers, cha_num = 0-15 */ +#define SXGBE_DMA_CHA_BASE_REG \ + (SXGBE_DMA_BASE_REG + 0x0100) +#define SXGBE_DMA_CHA_CTL_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x00) +#define SXGBE_DMA_PBL_X8MODE BIT(16) +#define SXGBE_DMA_CHA_TXCTL_TSE_ENABLE BIT(12) +#define SXGBE_DMA_CHA_TXCTL_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x04) +#define SXGBE_DMA_CHA_RXCTL_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x08) +#define SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x10) +#define SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x14) +#define SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x18) +#define SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x1C) +#define SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x24) +#define SXGBE_DMA_CHA_RXDESC_TAILPTR_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x2C) +#define SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x30) +#define SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x34) +#define SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x38) +#define SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x3C) +#define SXGBE_DMA_CHA_TXDESC_CURADDLO_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x44) +#define SXGBE_DMA_CHA_RXDESC_CURADDLO_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x4C) +#define SXGBE_DMA_CHA_CURTXBUF_ADDHI_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x50) +#define SXGBE_DMA_CHA_CURTXBUF_ADDLO_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x54) +#define SXGBE_DMA_CHA_CURRXBUF_ADDHI_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x58) +#define SXGBE_DMA_CHA_CURRXBUF_ADDLO_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x5C) +#define SXGBE_DMA_CHA_STATUS_REG(cha_num) \ + (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x60) + +/* TX DMA control register specific */ +#define SXGBE_TX_START_DMA BIT(0) + +/* sxgbe tx configuration register bitfields */ +#define SXGBE_SPEED_10G 0x0 +#define SXGBE_SPEED_2_5G 0x1 +#define SXGBE_SPEED_1G 0x2 +#define SXGBE_SPEED_LSHIFT 29 + +#define SXGBE_TX_ENABLE BIT(0) +#define SXGBE_TX_DISDIC_ALGO BIT(1) +#define SXGBE_TX_JABBER_DISABLE BIT(16) + +/* sxgbe rx configuration register bitfields */ +#define SXGBE_RX_ENABLE BIT(0) +#define SXGBE_RX_ACS_ENABLE BIT(1) +#define SXGBE_RX_WATCHDOG_DISABLE BIT(7) +#define SXGBE_RX_JUMBPKT_ENABLE BIT(8) +#define SXGBE_RX_CSUMOFFLOAD_ENABLE BIT(9) +#define SXGBE_RX_LOOPBACK_ENABLE BIT(10) +#define SXGBE_RX_ARPOFFLOAD_ENABLE BIT(31) + +/* sxgbe vlan Tag Register bitfields */ +#define SXGBE_VLAN_SVLAN_ENABLE BIT(18) +#define SXGBE_VLAN_DOUBLEVLAN_ENABLE BIT(26) +#define SXGBE_VLAN_INNERVLAN_ENABLE BIT(27) + +/* XMAC VLAN Tag Inclusion Register(0x0060) bitfields + * Below fields same for Inner VLAN Tag Inclusion + * Register(0x0064) register + */ +enum vlan_tag_ctl_tx { + VLAN_TAG_TX_NOP, + VLAN_TAG_TX_DEL, + VLAN_TAG_TX_INSERT, + VLAN_TAG_TX_REPLACE +}; +#define SXGBE_VLAN_PRTY_CTL BIT(18) +#define SXGBE_VLAN_CSVL_CTL BIT(19) + +/* SXGBE TX Q Flow Control Register bitfields */ +#define SXGBE_TX_FLOW_CTL_FCB BIT(0) +#define SXGBE_TX_FLOW_CTL_TFB BIT(1) + +/* SXGBE RX Q Flow Control Register bitfields */ +#define SXGBE_RX_FLOW_CTL_ENABLE BIT(0) +#define SXGBE_RX_UNICAST_DETECT BIT(1) +#define SXGBE_RX_PRTYFLOW_CTL_ENABLE BIT(8) + +/* sxgbe rx Q control0 register bitfields */ +#define SXGBE_RX_Q_ENABLE 0x2 + +/* SXGBE hardware features bitfield specific */ +/* Capability Register 0 */ +#define SXGBE_HW_FEAT_GMII(cap) ((cap & 0x00000002) >> 1) +#define SXGBE_HW_FEAT_VLAN_HASH_FILTER(cap) ((cap & 0x00000010) >> 4) +#define SXGBE_HW_FEAT_SMA(cap) ((cap & 0x00000020) >> 5) +#define SXGBE_HW_FEAT_PMT_TEMOTE_WOP(cap) ((cap & 0x00000040) >> 6) +#define SXGBE_HW_FEAT_PMT_MAGIC_PKT(cap) ((cap & 0x00000080) >> 7) +#define SXGBE_HW_FEAT_RMON(cap) ((cap & 0x00000100) >> 8) +#define SXGBE_HW_FEAT_ARP_OFFLOAD(cap) ((cap & 0x00000200) >> 9) +#define SXGBE_HW_FEAT_IEEE1500_2008(cap) ((cap & 0x00001000) >> 12) +#define SXGBE_HW_FEAT_EEE(cap) ((cap & 0x00002000) >> 13) +#define SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(cap) ((cap & 0x00004000) >> 14) +#define SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(cap) ((cap & 0x00010000) >> 16) +#define SXGBE_HW_FEAT_MACADDR_COUNT(cap) ((cap & 0x007C0000) >> 18) +#define SXGBE_HW_FEAT_TSTMAP_SRC(cap) ((cap & 0x06000000) >> 25) +#define SXGBE_HW_FEAT_SRCADDR_VLAN(cap) ((cap & 0x08000000) >> 27) + +/* Capability Register 1 */ +#define SXGBE_HW_FEAT_RX_FIFO_SIZE(cap) ((cap & 0x0000001F)) +#define SXGBE_HW_FEAT_TX_FIFO_SIZE(cap) ((cap & 0x000007C0) >> 6) +#define SXGBE_HW_FEAT_IEEE1588_HWORD(cap) ((cap & 0x00002000) >> 13) +#define SXGBE_HW_FEAT_DCB(cap) ((cap & 0x00010000) >> 16) +#define SXGBE_HW_FEAT_SPLIT_HDR(cap) ((cap & 0x00020000) >> 17) +#define SXGBE_HW_FEAT_TSO(cap) ((cap & 0x00040000) >> 18) +#define SXGBE_HW_FEAT_DEBUG_MEM_IFACE(cap) ((cap & 0x00080000) >> 19) +#define SXGBE_HW_FEAT_RSS(cap) ((cap & 0x00100000) >> 20) +#define SXGBE_HW_FEAT_HASH_TABLE_SIZE(cap) ((cap & 0x03000000) >> 24) +#define SXGBE_HW_FEAT_L3L4_FILTER_NUM(cap) ((cap & 0x78000000) >> 27) + +/* Capability Register 2 */ +#define SXGBE_HW_FEAT_RX_MTL_QUEUES(cap) ((cap & 0x0000000F)) +#define SXGBE_HW_FEAT_TX_MTL_QUEUES(cap) ((cap & 0x000003C0) >> 6) +#define SXGBE_HW_FEAT_RX_DMA_CHANNELS(cap) ((cap & 0x0000F000) >> 12) +#define SXGBE_HW_FEAT_TX_DMA_CHANNELS(cap) ((cap & 0x003C0000) >> 18) +#define SXGBE_HW_FEAT_PPS_OUTPUTS(cap) ((cap & 0x07000000) >> 24) +#define SXGBE_HW_FEAT_AUX_SNAPSHOTS(cap) ((cap & 0x70000000) >> 28) + +/* DMAchannel interrupt enable specific */ +/* DMA Normal interrupt */ +#define SXGBE_DMA_INT_ENA_NIE BIT(16) /* Normal Summary */ +#define SXGBE_DMA_INT_ENA_TIE BIT(0) /* Transmit Interrupt */ +#define SXGBE_DMA_INT_ENA_TUE BIT(2) /* Transmit Buffer Unavailable */ +#define SXGBE_DMA_INT_ENA_RIE BIT(6) /* Receive Interrupt */ + +#define SXGBE_DMA_INT_NORMAL \ + (SXGBE_DMA_INT_ENA_NIE | SXGBE_DMA_INT_ENA_RIE | \ + SXGBE_DMA_INT_ENA_TIE | SXGBE_DMA_INT_ENA_TUE) + +/* DMA Abnormal interrupt */ +#define SXGBE_DMA_INT_ENA_AIE BIT(15) /* Abnormal Summary */ +#define SXGBE_DMA_INT_ENA_TSE BIT(1) /* Transmit Stopped */ +#define SXGBE_DMA_INT_ENA_RUE BIT(7) /* Receive Buffer Unavailable */ +#define SXGBE_DMA_INT_ENA_RSE BIT(8) /* Receive Stopped */ +#define SXGBE_DMA_INT_ENA_FBE BIT(12) /* Fatal Bus Error */ +#define SXGBE_DMA_INT_ENA_CDEE BIT(13) /* Context Descriptor Error */ + +#define SXGBE_DMA_INT_ABNORMAL \ + (SXGBE_DMA_INT_ENA_AIE | SXGBE_DMA_INT_ENA_TSE | \ + SXGBE_DMA_INT_ENA_RUE | SXGBE_DMA_INT_ENA_RSE | \ + SXGBE_DMA_INT_ENA_FBE | SXGBE_DMA_INT_ENA_CDEE) + +#define SXGBE_DMA_ENA_INT (SXGBE_DMA_INT_NORMAL | SXGBE_DMA_INT_ABNORMAL) + +/* DMA channel interrupt status specific */ +#define SXGBE_DMA_INT_STATUS_REB2 BIT(21) +#define SXGBE_DMA_INT_STATUS_REB1 BIT(20) +#define SXGBE_DMA_INT_STATUS_REB0 BIT(19) +#define SXGBE_DMA_INT_STATUS_TEB2 BIT(18) +#define SXGBE_DMA_INT_STATUS_TEB1 BIT(17) +#define SXGBE_DMA_INT_STATUS_TEB0 BIT(16) +#define SXGBE_DMA_INT_STATUS_NIS BIT(15) +#define SXGBE_DMA_INT_STATUS_AIS BIT(14) +#define SXGBE_DMA_INT_STATUS_CTXTERR BIT(13) +#define SXGBE_DMA_INT_STATUS_FBE BIT(12) +#define SXGBE_DMA_INT_STATUS_RPS BIT(8) +#define SXGBE_DMA_INT_STATUS_RBU BIT(7) +#define SXGBE_DMA_INT_STATUS_RI BIT(6) +#define SXGBE_DMA_INT_STATUS_TBU BIT(2) +#define SXGBE_DMA_INT_STATUS_TPS BIT(1) +#define SXGBE_DMA_INT_STATUS_TI BIT(0) + +#endif /* __SXGBE_REGMAP_H__ */ diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c new file mode 100644 index 000000000..51c32194b --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c @@ -0,0 +1,91 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Siva Reddy Kallam + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include "sxgbe_common.h" +#include "sxgbe_xpcs.h" + +static int sxgbe_xpcs_read(struct net_device *ndev, unsigned int reg) +{ + u32 value; + struct sxgbe_priv_data *priv = netdev_priv(ndev); + + value = readl(priv->ioaddr + XPCS_OFFSET + reg); + + return value; +} + +static int sxgbe_xpcs_write(struct net_device *ndev, int reg, int data) +{ + struct sxgbe_priv_data *priv = netdev_priv(ndev); + + writel(data, priv->ioaddr + XPCS_OFFSET + reg); + + return 0; +} + +int sxgbe_xpcs_init(struct net_device *ndev) +{ + u32 value; + + value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); + /* 10G XAUI mode */ + sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X); + sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE); + sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, value | BIT(13)); + sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11)); + + do { + value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS); + } while ((value & XPCS_QSEQ_STATE_MPLLOFF) == XPCS_QSEQ_STATE_STABLE); + + value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); + sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11)); + + do { + value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS); + } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE); + + return 0; +} + +int sxgbe_xpcs_init_1G(struct net_device *ndev) +{ + int value; + + /* 10GBASE-X PCS (1G) mode */ + sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X); + sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE); + value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); + sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(13)); + + value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL); + sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(6)); + sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value & ~BIT(13)); + value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); + sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11)); + + do { + value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS); + } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE); + + value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); + sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11)); + + /* Auto Negotiation cluase 37 enable */ + value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL); + sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(12)); + + return 0; +} diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h new file mode 100644 index 000000000..6b26a5072 --- /dev/null +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h @@ -0,0 +1,38 @@ +/* 10G controller driver for Samsung SoCs + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Byungho An + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __SXGBE_XPCS_H__ +#define __SXGBE_XPCS_H__ + +/* XPCS Registers */ +#define XPCS_OFFSET 0x1A060000 +#define SR_PCS_MMD_CONTROL1 0x030000 +#define SR_PCS_CONTROL2 0x030007 +#define VR_PCS_MMD_XAUI_MODE_CONTROL 0x038004 +#define VR_PCS_MMD_DIGITAL_STATUS 0x038010 +#define SR_MII_MMD_CONTROL 0x1F0000 +#define SR_MII_MMD_AN_ADV 0x1F0004 +#define SR_MII_MMD_AN_LINK_PARTNER_BA 0x1F0005 +#define VR_MII_MMD_AN_CONTROL 0x1F8001 +#define VR_MII_MMD_AN_INT_STATUS 0x1F8002 + +#define XPCS_QSEQ_STATE_STABLE 0x10 +#define XPCS_QSEQ_STATE_MPLLOFF 0x1c +#define XPCS_TYPE_SEL_R 0x00 +#define XPCS_TYPE_SEL_X 0x01 +#define XPCS_TYPE_SEL_W 0x02 +#define XPCS_XAUI_MODE 0x00 +#define XPCS_RXAUI_MODE 0x01 + +int sxgbe_xpcs_init(struct net_device *ndev); +int sxgbe_xpcs_init_1G(struct net_device *ndev); + +#endif /* __SXGBE_XPCS_H__ */ diff --git a/drivers/net/ethernet/seeq/Kconfig b/drivers/net/ethernet/seeq/Kconfig new file mode 100644 index 000000000..11f168e46 --- /dev/null +++ b/drivers/net/ethernet/seeq/Kconfig @@ -0,0 +1,35 @@ +# +# SEEQ device configuration +# + +config NET_VENDOR_SEEQ + bool "SEEQ devices" + default y + depends on HAS_IOMEM + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about SEEQ devices. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_SEEQ + +config ARM_ETHER3 + tristate "Acorn/ANT Ether3 support" + depends on ARM && ARCH_ACORN + ---help--- + If you have an Acorn system with one of these network cards, you + should say Y to this option if you wish to use it with Linux. + +config SGISEEQ + tristate "SGI Seeq ethernet controller support" + depends on SGI_HAS_SEEQ + ---help--- + Say Y here if you have an Seeq based Ethernet network card. This is + used in many Silicon Graphics machines. + +endif # NET_VENDOR_SEEQ diff --git a/drivers/net/ethernet/seeq/Makefile b/drivers/net/ethernet/seeq/Makefile new file mode 100644 index 000000000..0488e99b8 --- /dev/null +++ b/drivers/net/ethernet/seeq/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the SEEQ network device drivers +# + +obj-$(CONFIG_ARM_ETHER3) += ether3.o +obj-$(CONFIG_SGISEEQ) += sgiseeq.o diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c new file mode 100644 index 000000000..bdac936a6 --- /dev/null +++ b/drivers/net/ethernet/seeq/ether3.c @@ -0,0 +1,899 @@ +/* + * linux/drivers/acorn/net/ether3.c + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * SEEQ nq8005 ethernet driver for Acorn/ANT Ether3 card + * for Acorn machines + * + * By Russell King, with some suggestions from borris@ant.co.uk + * + * Changelog: + * 1.04 RMK 29/02/1996 Won't pass packets that are from our ethernet + * address up to the higher levels - they're + * silently ignored. I/F can now be put into + * multicast mode. Receiver routine optimised. + * 1.05 RMK 30/02/1996 Now claims interrupt at open when part of + * the kernel rather than when a module. + * 1.06 RMK 02/03/1996 Various code cleanups + * 1.07 RMK 13/10/1996 Optimised interrupt routine and transmit + * routines. + * 1.08 RMK 14/10/1996 Fixed problem with too many packets, + * prevented the kernel message about dropped + * packets appearing too many times a second. + * Now does not disable all IRQs, only the IRQ + * used by this card. + * 1.09 RMK 10/11/1996 Only enables TX irq when buffer space is low, + * but we still service the TX queue if we get a + * RX interrupt. + * 1.10 RMK 15/07/1997 Fixed autoprobing of NQ8004. + * 1.11 RMK 16/11/1997 Fixed autoprobing of NQ8005A. + * 1.12 RMK 31/12/1997 Removed reference to dev_tint for Linux 2.1. + * RMK 27/06/1998 Changed asm/delay.h to linux/delay.h. + * 1.13 RMK 29/06/1998 Fixed problem with transmission of packets. + * Chip seems to have a bug in, whereby if the + * packet starts two bytes from the end of the + * buffer, it corrupts the receiver chain, and + * never updates the transmit status correctly. + * 1.14 RMK 07/01/1998 Added initial code for ETHERB addressing. + * 1.15 RMK 30/04/1999 More fixes to the transmit routine for buggy + * hardware. + * 1.16 RMK 10/02/2000 Updated for 2.3.43 + * 1.17 RMK 13/05/2000 Updated for 2.3.99-pre8 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +static char version[] = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n"; + +#include "ether3.h" + +static unsigned int net_debug = NET_DEBUG; + +static void ether3_setmulticastlist(struct net_device *dev); +static int ether3_rx(struct net_device *dev, unsigned int maxcnt); +static void ether3_tx(struct net_device *dev); +static int ether3_open (struct net_device *dev); +static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev); +static irqreturn_t ether3_interrupt (int irq, void *dev_id); +static int ether3_close (struct net_device *dev); +static void ether3_setmulticastlist (struct net_device *dev); +static void ether3_timeout(struct net_device *dev); + +#define BUS_16 2 +#define BUS_8 1 +#define BUS_UNKNOWN 0 + +/* --------------------------------------------------------------------------- */ + +typedef enum { + buffer_write, + buffer_read +} buffer_rw_t; + +/* + * ether3 read/write. Slow things down a bit... + * The SEEQ8005 doesn't like us writing to its registers + * too quickly. + */ +static inline void ether3_outb(int v, void __iomem *r) +{ + writeb(v, r); + udelay(1); +} + +static inline void ether3_outw(int v, void __iomem *r) +{ + writew(v, r); + udelay(1); +} +#define ether3_inb(r) ({ unsigned int __v = readb((r)); udelay(1); __v; }) +#define ether3_inw(r) ({ unsigned int __v = readw((r)); udelay(1); __v; }) + +static int +ether3_setbuffer(struct net_device *dev, buffer_rw_t read, int start) +{ + int timeout = 1000; + + ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1); + ether3_outw(priv(dev)->regs.command | CMD_FIFOWRITE, REG_COMMAND); + + while ((ether3_inw(REG_STATUS) & STAT_FIFOEMPTY) == 0) { + if (!timeout--) { + printk("%s: setbuffer broken\n", dev->name); + priv(dev)->broken = 1; + return 1; + } + udelay(1); + } + + if (read == buffer_read) { + ether3_outw(start, REG_DMAADDR); + ether3_outw(priv(dev)->regs.command | CMD_FIFOREAD, REG_COMMAND); + } else { + ether3_outw(priv(dev)->regs.command | CMD_FIFOWRITE, REG_COMMAND); + ether3_outw(start, REG_DMAADDR); + } + return 0; +} + +/* + * write data to the buffer memory + */ +#define ether3_writebuffer(dev,data,length) \ + writesw(REG_BUFWIN, (data), (length) >> 1) + +#define ether3_writeword(dev,data) \ + writew((data), REG_BUFWIN) + +#define ether3_writelong(dev,data) { \ + void __iomem *reg_bufwin = REG_BUFWIN; \ + writew((data), reg_bufwin); \ + writew((data) >> 16, reg_bufwin); \ +} + +/* + * read data from the buffer memory + */ +#define ether3_readbuffer(dev,data,length) \ + readsw(REG_BUFWIN, (data), (length) >> 1) + +#define ether3_readword(dev) \ + readw(REG_BUFWIN) + +#define ether3_readlong(dev) \ + readw(REG_BUFWIN) | (readw(REG_BUFWIN) << 16) + +/* + * Switch LED off... + */ +static void ether3_ledoff(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2); +} + +/* + * switch LED on... + */ +static inline void ether3_ledon(struct net_device *dev) +{ + del_timer(&priv(dev)->timer); + priv(dev)->timer.expires = jiffies + HZ / 50; /* leave on for 1/50th second */ + priv(dev)->timer.data = (unsigned long)dev; + priv(dev)->timer.function = ether3_ledoff; + add_timer(&priv(dev)->timer); + if (priv(dev)->regs.config2 & CFG2_CTRLO) + ether3_outw(priv(dev)->regs.config2 &= ~CFG2_CTRLO, REG_CONFIG2); +} + +/* + * Read the ethernet address string from the on board rom. + * This is an ascii string!!! + */ +static int +ether3_addr(char *addr, struct expansion_card *ec) +{ + struct in_chunk_dir cd; + char *s; + + if (ecard_readchunk(&cd, ec, 0xf5, 0) && (s = strchr(cd.d.string, '('))) { + int i; + for (i = 0; i<6; i++) { + addr[i] = simple_strtoul(s + 1, &s, 0x10); + if (*s != (i==5?')' : ':' )) + break; + } + if (i == 6) + return 0; + } + /* I wonder if we should even let the user continue in this case + * - no, it would be better to disable the device + */ + printk(KERN_ERR "ether3: Couldn't read a valid MAC address from card.\n"); + return -ENODEV; +} + +/* --------------------------------------------------------------------------- */ + +static int +ether3_ramtest(struct net_device *dev, unsigned char byte) +{ + unsigned char *buffer = kmalloc(RX_END, GFP_KERNEL); + int i,ret = 0; + int max_errors = 4; + int bad = -1; + + if (!buffer) + return 1; + + memset(buffer, byte, RX_END); + ether3_setbuffer(dev, buffer_write, 0); + ether3_writebuffer(dev, buffer, TX_END); + ether3_setbuffer(dev, buffer_write, RX_START); + ether3_writebuffer(dev, buffer + RX_START, RX_LEN); + memset(buffer, byte ^ 0xff, RX_END); + ether3_setbuffer(dev, buffer_read, 0); + ether3_readbuffer(dev, buffer, TX_END); + ether3_setbuffer(dev, buffer_read, RX_START); + ether3_readbuffer(dev, buffer + RX_START, RX_LEN); + + for (i = 0; i < RX_END; i++) { + if (buffer[i] != byte) { + if (max_errors > 0 && bad != buffer[i]) { + printk("%s: RAM failed with (%02X instead of %02X) at 0x%04X", + dev->name, buffer[i], byte, i); + ret = 2; + max_errors--; + bad = i; + } + } else { + if (bad != -1) { + if (bad != i - 1) + printk(" - 0x%04X\n", i - 1); + printk("\n"); + bad = -1; + } + } + } + if (bad != -1) + printk(" - 0xffff\n"); + kfree(buffer); + + return ret; +} + +/* ------------------------------------------------------------------------------- */ + +static int ether3_init_2(struct net_device *dev) +{ + int i; + + priv(dev)->regs.config1 = CFG1_RECVCOMPSTAT0|CFG1_DMABURST8; + priv(dev)->regs.config2 = CFG2_CTRLO|CFG2_RECVCRC|CFG2_ERRENCRC; + priv(dev)->regs.command = 0; + + /* + * Set up our hardware address + */ + ether3_outw(priv(dev)->regs.config1 | CFG1_BUFSELSTAT0, REG_CONFIG1); + for (i = 0; i < 6; i++) + ether3_outb(dev->dev_addr[i], REG_BUFWIN); + + if (dev->flags & IFF_PROMISC) + priv(dev)->regs.config1 |= CFG1_RECVPROMISC; + else if (dev->flags & IFF_MULTICAST) + priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI; + else + priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD; + + /* + * There is a problem with the NQ8005 in that it occasionally loses the + * last two bytes. To get round this problem, we receive the CRC as + * well. That way, if we do lose the last two, then it doesn't matter. + */ + ether3_outw(priv(dev)->regs.config1 | CFG1_TRANSEND, REG_CONFIG1); + ether3_outw((TX_END>>8) - 1, REG_BUFWIN); + ether3_outw(priv(dev)->rx_head, REG_RECVPTR); + ether3_outw(0, REG_TRANSMITPTR); + ether3_outw(priv(dev)->rx_head >> 8, REG_RECVEND); + ether3_outw(priv(dev)->regs.config2, REG_CONFIG2); + ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1); + ether3_outw(priv(dev)->regs.command, REG_COMMAND); + + i = ether3_ramtest(dev, 0x5A); + if(i) + return i; + i = ether3_ramtest(dev, 0x1E); + if(i) + return i; + + ether3_setbuffer(dev, buffer_write, 0); + ether3_writelong(dev, 0); + return 0; +} + +static void +ether3_init_for_open(struct net_device *dev) +{ + int i; + + /* Reset the chip */ + ether3_outw(CFG2_RESET, REG_CONFIG2); + udelay(4); + + priv(dev)->regs.command = 0; + ether3_outw(CMD_RXOFF|CMD_TXOFF, REG_COMMAND); + while (ether3_inw(REG_STATUS) & (STAT_RXON|STAT_TXON)) + barrier(); + + ether3_outw(priv(dev)->regs.config1 | CFG1_BUFSELSTAT0, REG_CONFIG1); + for (i = 0; i < 6; i++) + ether3_outb(dev->dev_addr[i], REG_BUFWIN); + + priv(dev)->tx_head = 0; + priv(dev)->tx_tail = 0; + priv(dev)->regs.config2 |= CFG2_CTRLO; + priv(dev)->rx_head = RX_START; + + ether3_outw(priv(dev)->regs.config1 | CFG1_TRANSEND, REG_CONFIG1); + ether3_outw((TX_END>>8) - 1, REG_BUFWIN); + ether3_outw(priv(dev)->rx_head, REG_RECVPTR); + ether3_outw(priv(dev)->rx_head >> 8, REG_RECVEND); + ether3_outw(0, REG_TRANSMITPTR); + ether3_outw(priv(dev)->regs.config2, REG_CONFIG2); + ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1); + + ether3_setbuffer(dev, buffer_write, 0); + ether3_writelong(dev, 0); + + priv(dev)->regs.command = CMD_ENINTRX | CMD_ENINTTX; + ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND); +} + +static inline int +ether3_probe_bus_8(struct net_device *dev, int val) +{ + int write_low, write_high, read_low, read_high; + + write_low = val & 255; + write_high = val >> 8; + + printk(KERN_DEBUG "ether3_probe: write8 [%02X:%02X]", write_high, write_low); + + ether3_outb(write_low, REG_RECVPTR); + ether3_outb(write_high, REG_RECVPTR + 4); + + read_low = ether3_inb(REG_RECVPTR); + read_high = ether3_inb(REG_RECVPTR + 4); + + printk(", read8 [%02X:%02X]\n", read_high, read_low); + + return read_low == write_low && read_high == write_high; +} + +static inline int +ether3_probe_bus_16(struct net_device *dev, int val) +{ + int read_val; + + ether3_outw(val, REG_RECVPTR); + read_val = ether3_inw(REG_RECVPTR); + + printk(KERN_DEBUG "ether3_probe: write16 [%04X], read16 [%04X]\n", val, read_val); + + return read_val == val; +} + +/* + * Open/initialize the board. This is called (in the current kernel) + * sometime after booting when the 'ifconfig' program is run. + * + * This routine should set everything up anew at each open, even + * registers that "should" only need to be set once at boot, so that + * there is non-reboot way to recover if something goes wrong. + */ +static int +ether3_open(struct net_device *dev) +{ + if (request_irq(dev->irq, ether3_interrupt, 0, "ether3", dev)) + return -EAGAIN; + + ether3_init_for_open(dev); + + netif_start_queue(dev); + + return 0; +} + +/* + * The inverse routine to ether3_open(). + */ +static int +ether3_close(struct net_device *dev) +{ + netif_stop_queue(dev); + + disable_irq(dev->irq); + + ether3_outw(CMD_RXOFF|CMD_TXOFF, REG_COMMAND); + priv(dev)->regs.command = 0; + while (ether3_inw(REG_STATUS) & (STAT_RXON|STAT_TXON)) + barrier(); + ether3_outb(0x80, REG_CONFIG2 + 4); + ether3_outw(0, REG_COMMAND); + + free_irq(dev->irq, dev); + + return 0; +} + +/* + * Set or clear promiscuous/multicast mode filter for this adaptor. + * + * We don't attempt any packet filtering. The card may have a SEEQ 8004 + * in which does not have the other ethernet address registers present... + */ +static void ether3_setmulticastlist(struct net_device *dev) +{ + priv(dev)->regs.config1 &= ~CFG1_RECVPROMISC; + + if (dev->flags & IFF_PROMISC) { + /* promiscuous mode */ + priv(dev)->regs.config1 |= CFG1_RECVPROMISC; + } else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) { + priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI; + } else + priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD; + + ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1); +} + +static void ether3_timeout(struct net_device *dev) +{ + unsigned long flags; + + del_timer(&priv(dev)->timer); + + local_irq_save(flags); + printk(KERN_ERR "%s: transmit timed out, network cable problem?\n", dev->name); + printk(KERN_ERR "%s: state: { status=%04X cfg1=%04X cfg2=%04X }\n", dev->name, + ether3_inw(REG_STATUS), ether3_inw(REG_CONFIG1), ether3_inw(REG_CONFIG2)); + printk(KERN_ERR "%s: { rpr=%04X rea=%04X tpr=%04X }\n", dev->name, + ether3_inw(REG_RECVPTR), ether3_inw(REG_RECVEND), ether3_inw(REG_TRANSMITPTR)); + printk(KERN_ERR "%s: tx head=%X tx tail=%X\n", dev->name, + priv(dev)->tx_head, priv(dev)->tx_tail); + ether3_setbuffer(dev, buffer_read, priv(dev)->tx_tail); + printk(KERN_ERR "%s: packet status = %08X\n", dev->name, ether3_readlong(dev)); + local_irq_restore(flags); + + priv(dev)->regs.config2 |= CFG2_CTRLO; + dev->stats.tx_errors += 1; + ether3_outw(priv(dev)->regs.config2, REG_CONFIG2); + priv(dev)->tx_head = priv(dev)->tx_tail = 0; + + netif_wake_queue(dev); +} + +/* + * Transmit a packet + */ +static int +ether3_sendpacket(struct sk_buff *skb, struct net_device *dev) +{ + unsigned long flags; + unsigned int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; + unsigned int ptr, next_ptr; + + if (priv(dev)->broken) { + dev_kfree_skb(skb); + dev->stats.tx_dropped++; + netif_start_queue(dev); + return NETDEV_TX_OK; + } + + length = (length + 1) & ~1; + if (length != skb->len) { + if (skb_padto(skb, length)) + goto out; + } + + next_ptr = (priv(dev)->tx_head + 1) & 15; + + local_irq_save(flags); + + if (priv(dev)->tx_tail == next_ptr) { + local_irq_restore(flags); + return NETDEV_TX_BUSY; /* unable to queue */ + } + + ptr = 0x600 * priv(dev)->tx_head; + priv(dev)->tx_head = next_ptr; + next_ptr *= 0x600; + +#define TXHDR_FLAGS (TXHDR_TRANSMIT|TXHDR_CHAINCONTINUE|TXHDR_DATAFOLLOWS|TXHDR_ENSUCCESS) + + ether3_setbuffer(dev, buffer_write, next_ptr); + ether3_writelong(dev, 0); + ether3_setbuffer(dev, buffer_write, ptr); + ether3_writelong(dev, 0); + ether3_writebuffer(dev, skb->data, length); + ether3_writeword(dev, htons(next_ptr)); + ether3_writeword(dev, TXHDR_CHAINCONTINUE >> 16); + ether3_setbuffer(dev, buffer_write, ptr); + ether3_writeword(dev, htons((ptr + length + 4))); + ether3_writeword(dev, TXHDR_FLAGS >> 16); + ether3_ledon(dev); + + if (!(ether3_inw(REG_STATUS) & STAT_TXON)) { + ether3_outw(ptr, REG_TRANSMITPTR); + ether3_outw(priv(dev)->regs.command | CMD_TXON, REG_COMMAND); + } + + next_ptr = (priv(dev)->tx_head + 1) & 15; + local_irq_restore(flags); + + dev_kfree_skb(skb); + + if (priv(dev)->tx_tail == next_ptr) + netif_stop_queue(dev); + + out: + return NETDEV_TX_OK; +} + +static irqreturn_t +ether3_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + unsigned int status, handled = IRQ_NONE; + +#if NET_DEBUG > 1 + if(net_debug & DEBUG_INT) + printk("eth3irq: %d ", irq); +#endif + + status = ether3_inw(REG_STATUS); + + if (status & STAT_INTRX) { + ether3_outw(CMD_ACKINTRX | priv(dev)->regs.command, REG_COMMAND); + ether3_rx(dev, 12); + handled = IRQ_HANDLED; + } + + if (status & STAT_INTTX) { + ether3_outw(CMD_ACKINTTX | priv(dev)->regs.command, REG_COMMAND); + ether3_tx(dev); + handled = IRQ_HANDLED; + } + +#if NET_DEBUG > 1 + if(net_debug & DEBUG_INT) + printk("done\n"); +#endif + return handled; +} + +/* + * If we have a good packet(s), get it/them out of the buffers. + */ +static int ether3_rx(struct net_device *dev, unsigned int maxcnt) +{ + unsigned int next_ptr = priv(dev)->rx_head, received = 0; + + ether3_ledon(dev); + + do { + unsigned int this_ptr, status; + unsigned char addrs[16]; + + /* + * read the first 16 bytes from the buffer. + * This contains the status bytes etc and ethernet addresses, + * and we also check the source ethernet address to see if + * it originated from us. + */ + { + unsigned int temp_ptr; + ether3_setbuffer(dev, buffer_read, next_ptr); + temp_ptr = ether3_readword(dev); + status = ether3_readword(dev); + if ((status & (RXSTAT_DONE | RXHDR_CHAINCONTINUE | RXHDR_RECEIVE)) != + (RXSTAT_DONE | RXHDR_CHAINCONTINUE) || !temp_ptr) + break; + + this_ptr = next_ptr + 4; + next_ptr = ntohs(temp_ptr); + } + ether3_setbuffer(dev, buffer_read, this_ptr); + ether3_readbuffer(dev, addrs+2, 12); + +if (next_ptr < RX_START || next_ptr >= RX_END) { + int i; + printk("%s: bad next pointer @%04X: ", dev->name, priv(dev)->rx_head); + printk("%02X %02X %02X %02X ", next_ptr >> 8, next_ptr & 255, status & 255, status >> 8); + for (i = 2; i < 14; i++) + printk("%02X ", addrs[i]); + printk("\n"); + next_ptr = priv(dev)->rx_head; + break; +} + /* + * ignore our own packets... + */ + if (!(*(unsigned long *)&dev->dev_addr[0] ^ *(unsigned long *)&addrs[2+6]) && + !(*(unsigned short *)&dev->dev_addr[4] ^ *(unsigned short *)&addrs[2+10])) { + maxcnt ++; /* compensate for loopedback packet */ + ether3_outw(next_ptr >> 8, REG_RECVEND); + } else + if (!(status & (RXSTAT_OVERSIZE|RXSTAT_CRCERROR|RXSTAT_DRIBBLEERROR|RXSTAT_SHORTPACKET))) { + unsigned int length = next_ptr - this_ptr; + struct sk_buff *skb; + + if (next_ptr <= this_ptr) + length += RX_END - RX_START; + + skb = netdev_alloc_skb(dev, length + 2); + if (skb) { + unsigned char *buf; + + skb_reserve(skb, 2); + buf = skb_put(skb, length); + ether3_readbuffer(dev, buf + 12, length - 12); + ether3_outw(next_ptr >> 8, REG_RECVEND); + *(unsigned short *)(buf + 0) = *(unsigned short *)(addrs + 2); + *(unsigned long *)(buf + 2) = *(unsigned long *)(addrs + 4); + *(unsigned long *)(buf + 6) = *(unsigned long *)(addrs + 8); + *(unsigned short *)(buf + 10) = *(unsigned short *)(addrs + 12); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + received ++; + } else { + ether3_outw(next_ptr >> 8, REG_RECVEND); + dev->stats.rx_dropped++; + goto done; + } + } else { + struct net_device_stats *stats = &dev->stats; + ether3_outw(next_ptr >> 8, REG_RECVEND); + if (status & RXSTAT_OVERSIZE) stats->rx_over_errors ++; + if (status & RXSTAT_CRCERROR) stats->rx_crc_errors ++; + if (status & RXSTAT_DRIBBLEERROR) stats->rx_fifo_errors ++; + if (status & RXSTAT_SHORTPACKET) stats->rx_length_errors ++; + stats->rx_errors++; + } + } + while (-- maxcnt); + +done: + dev->stats.rx_packets += received; + priv(dev)->rx_head = next_ptr; + /* + * If rx went off line, then that means that the buffer may be full. We + * have dropped at least one packet. + */ + if (!(ether3_inw(REG_STATUS) & STAT_RXON)) { + dev->stats.rx_dropped++; + ether3_outw(next_ptr, REG_RECVPTR); + ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND); + } + + return maxcnt; +} + +/* + * Update stats for the transmitted packet(s) + */ +static void ether3_tx(struct net_device *dev) +{ + unsigned int tx_tail = priv(dev)->tx_tail; + int max_work = 14; + + do { + unsigned long status; + + /* + * Read the packet header + */ + ether3_setbuffer(dev, buffer_read, tx_tail * 0x600); + status = ether3_readlong(dev); + + /* + * Check to see if this packet has been transmitted + */ + if ((status & (TXSTAT_DONE | TXHDR_TRANSMIT)) != + (TXSTAT_DONE | TXHDR_TRANSMIT)) + break; + + /* + * Update errors + */ + if (!(status & (TXSTAT_BABBLED | TXSTAT_16COLLISIONS))) + dev->stats.tx_packets++; + else { + dev->stats.tx_errors++; + if (status & TXSTAT_16COLLISIONS) + dev->stats.collisions += 16; + if (status & TXSTAT_BABBLED) + dev->stats.tx_fifo_errors++; + } + + tx_tail = (tx_tail + 1) & 15; + } while (--max_work); + + if (priv(dev)->tx_tail != tx_tail) { + priv(dev)->tx_tail = tx_tail; + netif_wake_queue(dev); + } +} + +static void ether3_banner(void) +{ + static unsigned version_printed = 0; + + if (net_debug && version_printed++ == 0) + printk(KERN_INFO "%s", version); +} + +static const struct net_device_ops ether3_netdev_ops = { + .ndo_open = ether3_open, + .ndo_stop = ether3_close, + .ndo_start_xmit = ether3_sendpacket, + .ndo_set_rx_mode = ether3_setmulticastlist, + .ndo_tx_timeout = ether3_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + +static int +ether3_probe(struct expansion_card *ec, const struct ecard_id *id) +{ + const struct ether3_data *data = id->data; + struct net_device *dev; + int bus_type, ret; + + ether3_banner(); + + ret = ecard_request_resources(ec); + if (ret) + goto out; + + dev = alloc_etherdev(sizeof(struct dev_priv)); + if (!dev) { + ret = -ENOMEM; + goto release; + } + + SET_NETDEV_DEV(dev, &ec->dev); + + priv(dev)->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); + if (!priv(dev)->base) { + ret = -ENOMEM; + goto free; + } + + ec->irqaddr = priv(dev)->base + data->base_offset; + ec->irqmask = 0xf0; + + priv(dev)->seeq = priv(dev)->base + data->base_offset; + dev->irq = ec->irq; + + ether3_addr(dev->dev_addr, ec); + + init_timer(&priv(dev)->timer); + + /* Reset card... + */ + ether3_outb(0x80, REG_CONFIG2 + 4); + bus_type = BUS_UNKNOWN; + udelay(4); + + /* Test using Receive Pointer (16-bit register) to find out + * how the ether3 is connected to the bus... + */ + if (ether3_probe_bus_8(dev, 0x100) && + ether3_probe_bus_8(dev, 0x201)) + bus_type = BUS_8; + + if (bus_type == BUS_UNKNOWN && + ether3_probe_bus_16(dev, 0x101) && + ether3_probe_bus_16(dev, 0x201)) + bus_type = BUS_16; + + switch (bus_type) { + case BUS_UNKNOWN: + printk(KERN_ERR "%s: unable to identify bus width\n", dev->name); + ret = -ENODEV; + goto free; + + case BUS_8: + printk(KERN_ERR "%s: %s found, but is an unsupported " + "8-bit card\n", dev->name, data->name); + ret = -ENODEV; + goto free; + + default: + break; + } + + if (ether3_init_2(dev)) { + ret = -ENODEV; + goto free; + } + + dev->netdev_ops = ðer3_netdev_ops; + dev->watchdog_timeo = 5 * HZ / 100; + + ret = register_netdev(dev); + if (ret) + goto free; + + printk("%s: %s in slot %d, %pM\n", + dev->name, data->name, ec->slot_no, dev->dev_addr); + + ecard_set_drvdata(ec, dev); + return 0; + + free: + free_netdev(dev); + release: + ecard_release_resources(ec); + out: + return ret; +} + +static void ether3_remove(struct expansion_card *ec) +{ + struct net_device *dev = ecard_get_drvdata(ec); + + ecard_set_drvdata(ec, NULL); + + unregister_netdev(dev); + free_netdev(dev); + ecard_release_resources(ec); +} + +static struct ether3_data ether3 = { + .name = "ether3", + .base_offset = 0, +}; + +static struct ether3_data etherb = { + .name = "etherb", + .base_offset = 0x800, +}; + +static const struct ecard_id ether3_ids[] = { + { MANU_ANT2, PROD_ANT_ETHER3, ðer3 }, + { MANU_ANT, PROD_ANT_ETHER3, ðer3 }, + { MANU_ANT, PROD_ANT_ETHERB, ðerb }, + { 0xffff, 0xffff } +}; + +static struct ecard_driver ether3_driver = { + .probe = ether3_probe, + .remove = ether3_remove, + .id_table = ether3_ids, + .drv = { + .name = "ether3", + }, +}; + +static int __init ether3_init(void) +{ + return ecard_register_driver(ðer3_driver); +} + +static void __exit ether3_exit(void) +{ + ecard_remove_driver(ðer3_driver); +} + +module_init(ether3_init); +module_exit(ether3_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/seeq/ether3.h b/drivers/net/ethernet/seeq/ether3.h new file mode 100644 index 000000000..2db63b08b --- /dev/null +++ b/drivers/net/ethernet/seeq/ether3.h @@ -0,0 +1,176 @@ +/* + * linux/drivers/acorn/net/ether3.h + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * network driver for Acorn/ANT Ether3 cards + */ + +#ifndef _LINUX_ether3_H +#define _LINUX_ether3_H + +/* use 0 for production, 1 for verification, >2 for debug. debug flags: */ +#define DEBUG_TX 2 +#define DEBUG_RX 4 +#define DEBUG_INT 8 +#define DEBUG_IC 16 +#ifndef NET_DEBUG +#define NET_DEBUG 0 +#endif + +#define priv(dev) ((struct dev_priv *)netdev_priv(dev)) + +/* Command register definitions & bits */ +#define REG_COMMAND (priv(dev)->seeq + 0x0000) +#define CMD_ENINTDMA 0x0001 +#define CMD_ENINTRX 0x0002 +#define CMD_ENINTTX 0x0004 +#define CMD_ENINTBUFWIN 0x0008 +#define CMD_ACKINTDMA 0x0010 +#define CMD_ACKINTRX 0x0020 +#define CMD_ACKINTTX 0x0040 +#define CMD_ACKINTBUFWIN 0x0080 +#define CMD_DMAON 0x0100 +#define CMD_RXON 0x0200 +#define CMD_TXON 0x0400 +#define CMD_DMAOFF 0x0800 +#define CMD_RXOFF 0x1000 +#define CMD_TXOFF 0x2000 +#define CMD_FIFOREAD 0x4000 +#define CMD_FIFOWRITE 0x8000 + +/* status register */ +#define REG_STATUS (priv(dev)->seeq + 0x0000) +#define STAT_ENINTSTAT 0x0001 +#define STAT_ENINTRX 0x0002 +#define STAT_ENINTTX 0x0004 +#define STAT_ENINTBUFWIN 0x0008 +#define STAT_INTDMA 0x0010 +#define STAT_INTRX 0x0020 +#define STAT_INTTX 0x0040 +#define STAT_INTBUFWIN 0x0080 +#define STAT_DMAON 0x0100 +#define STAT_RXON 0x0200 +#define STAT_TXON 0x0400 +#define STAT_FIFOFULL 0x2000 +#define STAT_FIFOEMPTY 0x4000 +#define STAT_FIFODIR 0x8000 + +/* configuration register 1 */ +#define REG_CONFIG1 (priv(dev)->seeq + 0x0040) +#define CFG1_BUFSELSTAT0 0x0000 +#define CFG1_BUFSELSTAT1 0x0001 +#define CFG1_BUFSELSTAT2 0x0002 +#define CFG1_BUFSELSTAT3 0x0003 +#define CFG1_BUFSELSTAT4 0x0004 +#define CFG1_BUFSELSTAT5 0x0005 +#define CFG1_ADDRPROM 0x0006 +#define CFG1_TRANSEND 0x0007 +#define CFG1_LOCBUFMEM 0x0008 +#define CFG1_INTVECTOR 0x0009 +#define CFG1_RECVSPECONLY 0x0000 +#define CFG1_RECVSPECBROAD 0x4000 +#define CFG1_RECVSPECBRMULTI 0x8000 +#define CFG1_RECVPROMISC 0xC000 + +/* The following aren't in 8004 */ +#define CFG1_DMABURSTCONT 0x0000 +#define CFG1_DMABURST800NS 0x0010 +#define CFG1_DMABURST1600NS 0x0020 +#define CFG1_DMABURST3200NS 0x0030 +#define CFG1_DMABURST1 0x0000 +#define CFG1_DMABURST4 0x0040 +#define CFG1_DMABURST8 0x0080 +#define CFG1_DMABURST16 0x00C0 +#define CFG1_RECVCOMPSTAT0 0x0100 +#define CFG1_RECVCOMPSTAT1 0x0200 +#define CFG1_RECVCOMPSTAT2 0x0400 +#define CFG1_RECVCOMPSTAT3 0x0800 +#define CFG1_RECVCOMPSTAT4 0x1000 +#define CFG1_RECVCOMPSTAT5 0x2000 + +/* configuration register 2 */ +#define REG_CONFIG2 (priv(dev)->seeq + 0x0080) +#define CFG2_BYTESWAP 0x0001 +#define CFG2_ERRENCRC 0x0008 +#define CFG2_ERRENDRIBBLE 0x0010 +#define CFG2_ERRSHORTFRAME 0x0020 +#define CFG2_SLOTSELECT 0x0040 +#define CFG2_PREAMSELECT 0x0080 +#define CFG2_ADDRLENGTH 0x0100 +#define CFG2_RECVCRC 0x0200 +#define CFG2_XMITNOCRC 0x0400 +#define CFG2_LOOPBACK 0x0800 +#define CFG2_CTRLO 0x1000 +#define CFG2_RESET 0x8000 + +#define REG_RECVEND (priv(dev)->seeq + 0x00c0) + +#define REG_BUFWIN (priv(dev)->seeq + 0x0100) + +#define REG_RECVPTR (priv(dev)->seeq + 0x0140) + +#define REG_TRANSMITPTR (priv(dev)->seeq + 0x0180) + +#define REG_DMAADDR (priv(dev)->seeq + 0x01c0) + +/* + * Cards transmit/receive headers + */ +#define TX_NEXT (0xffff) +#define TXHDR_ENBABBLEINT (1 << 16) +#define TXHDR_ENCOLLISIONINT (1 << 17) +#define TXHDR_EN16COLLISION (1 << 18) +#define TXHDR_ENSUCCESS (1 << 19) +#define TXHDR_DATAFOLLOWS (1 << 21) +#define TXHDR_CHAINCONTINUE (1 << 22) +#define TXHDR_TRANSMIT (1 << 23) +#define TXSTAT_BABBLED (1 << 24) +#define TXSTAT_COLLISION (1 << 25) +#define TXSTAT_16COLLISIONS (1 << 26) +#define TXSTAT_DONE (1 << 31) + +#define RX_NEXT (0xffff) +#define RXHDR_CHAINCONTINUE (1 << 6) +#define RXHDR_RECEIVE (1 << 7) +#define RXSTAT_OVERSIZE (1 << 8) +#define RXSTAT_CRCERROR (1 << 9) +#define RXSTAT_DRIBBLEERROR (1 << 10) +#define RXSTAT_SHORTPACKET (1 << 11) +#define RXSTAT_DONE (1 << 15) + + +#define TX_START 0x0000 +#define TX_END 0x6000 +#define RX_START 0x6000 +#define RX_LEN 0xA000 +#define RX_END 0x10000 +/* must be a power of 2 and greater than MAX_TX_BUFFERED */ +#define MAX_TXED 16 +#define MAX_TX_BUFFERED 10 + +struct dev_priv { + void __iomem *base; + void __iomem *seeq; + struct { + unsigned int command; + unsigned int config1; + unsigned int config2; + } regs; + unsigned char tx_head; /* buffer nr to insert next packet */ + unsigned char tx_tail; /* buffer nr of transmitting packet */ + unsigned int rx_head; /* address to fetch next packet from */ + struct timer_list timer; + int broken; /* 0 = ok, 1 = something went wrong */ +}; + +struct ether3_data { + const char name[8]; + unsigned long base_offset; +}; + +#endif diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c new file mode 100644 index 000000000..ca7336605 --- /dev/null +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -0,0 +1,837 @@ +/* + * sgiseeq.c: Seeq8003 ethernet driver for SGI machines. + * + * Copyright (C) 1996 David S. Miller (davem@davemloft.net) + */ + +#undef DEBUG + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "sgiseeq.h" + +static char *sgiseeqstr = "SGI Seeq8003"; + +/* + * If you want speed, you do something silly, it always has worked for me. So, + * with that in mind, I've decided to make this driver look completely like a + * stupid Lance from a driver architecture perspective. Only difference is that + * here our "ring buffer" looks and acts like a real Lance one does but is + * laid out like how the HPC DMA and the Seeq want it to. You'd be surprised + * how a stupid idea like this can pay off in performance, not to mention + * making this driver 2,000 times easier to write. ;-) + */ + +/* Tune these if we tend to run out often etc. */ +#define SEEQ_RX_BUFFERS 16 +#define SEEQ_TX_BUFFERS 16 + +#define PKT_BUF_SZ 1584 + +#define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1)) +#define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1)) +#define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1)) +#define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1)) + +#define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \ + sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \ + sp->tx_old - sp->tx_new - 1) + +#define VIRT_TO_DMA(sp, v) ((sp)->srings_dma + \ + (dma_addr_t)((unsigned long)(v) - \ + (unsigned long)((sp)->rx_desc))) + +/* Copy frames shorter than rx_copybreak, otherwise pass on up in + * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha). + */ +static int rx_copybreak = 100; + +#define PAD_SIZE (128 - sizeof(struct hpc_dma_desc) - sizeof(void *)) + +struct sgiseeq_rx_desc { + volatile struct hpc_dma_desc rdma; + u8 padding[PAD_SIZE]; + struct sk_buff *skb; +}; + +struct sgiseeq_tx_desc { + volatile struct hpc_dma_desc tdma; + u8 padding[PAD_SIZE]; + struct sk_buff *skb; +}; + +/* + * Warning: This structure is laid out in a certain way because HPC dma + * descriptors must be 8-byte aligned. So don't touch this without + * some care. + */ +struct sgiseeq_init_block { /* Note the name ;-) */ + struct sgiseeq_rx_desc rxvector[SEEQ_RX_BUFFERS]; + struct sgiseeq_tx_desc txvector[SEEQ_TX_BUFFERS]; +}; + +struct sgiseeq_private { + struct sgiseeq_init_block *srings; + dma_addr_t srings_dma; + + /* Ptrs to the descriptors in uncached space. */ + struct sgiseeq_rx_desc *rx_desc; + struct sgiseeq_tx_desc *tx_desc; + + char *name; + struct hpc3_ethregs *hregs; + struct sgiseeq_regs *sregs; + + /* Ring entry counters. */ + unsigned int rx_new, tx_new; + unsigned int rx_old, tx_old; + + int is_edlc; + unsigned char control; + unsigned char mode; + + spinlock_t tx_lock; +}; + +static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr) +{ + dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), + DMA_FROM_DEVICE); +} + +static inline void dma_sync_desc_dev(struct net_device *dev, void *addr) +{ + dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), + DMA_TO_DEVICE); +} + +static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs) +{ + hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ; + udelay(20); + hregs->reset = 0; +} + +static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs, + struct sgiseeq_regs *sregs) +{ + hregs->rx_ctrl = hregs->tx_ctrl = 0; + hpc3_eth_reset(hregs); +} + +#define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \ + SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC) + +static inline void seeq_go(struct sgiseeq_private *sp, + struct hpc3_ethregs *hregs, + struct sgiseeq_regs *sregs) +{ + sregs->rstat = sp->mode | RSTAT_GO_BITS; + hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE; +} + +static inline void __sgiseeq_set_mac_address(struct net_device *dev) +{ + struct sgiseeq_private *sp = netdev_priv(dev); + struct sgiseeq_regs *sregs = sp->sregs; + int i; + + sregs->tstat = SEEQ_TCMD_RB0; + for (i = 0; i < 6; i++) + sregs->rw.eth_addr[i] = dev->dev_addr[i]; +} + +static int sgiseeq_set_mac_address(struct net_device *dev, void *addr) +{ + struct sgiseeq_private *sp = netdev_priv(dev); + struct sockaddr *sa = addr; + + memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); + + spin_lock_irq(&sp->tx_lock); + __sgiseeq_set_mac_address(dev); + spin_unlock_irq(&sp->tx_lock); + + return 0; +} + +#define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD) +#define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE) +#define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT)) + +static int seeq_init_ring(struct net_device *dev) +{ + struct sgiseeq_private *sp = netdev_priv(dev); + int i; + + netif_stop_queue(dev); + sp->rx_new = sp->tx_new = 0; + sp->rx_old = sp->tx_old = 0; + + __sgiseeq_set_mac_address(dev); + + /* Setup tx ring. */ + for(i = 0; i < SEEQ_TX_BUFFERS; i++) { + sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT; + dma_sync_desc_dev(dev, &sp->tx_desc[i]); + } + + /* And now the rx ring. */ + for (i = 0; i < SEEQ_RX_BUFFERS; i++) { + if (!sp->rx_desc[i].skb) { + dma_addr_t dma_addr; + struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ); + + if (skb == NULL) + return -ENOMEM; + skb_reserve(skb, 2); + dma_addr = dma_map_single(dev->dev.parent, + skb->data - 2, + PKT_BUF_SZ, DMA_FROM_DEVICE); + sp->rx_desc[i].skb = skb; + sp->rx_desc[i].rdma.pbuf = dma_addr; + } + sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT; + dma_sync_desc_dev(dev, &sp->rx_desc[i]); + } + sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR; + dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]); + return 0; +} + +static void seeq_purge_ring(struct net_device *dev) +{ + struct sgiseeq_private *sp = netdev_priv(dev); + int i; + + /* clear tx ring. */ + for (i = 0; i < SEEQ_TX_BUFFERS; i++) { + if (sp->tx_desc[i].skb) { + dev_kfree_skb(sp->tx_desc[i].skb); + sp->tx_desc[i].skb = NULL; + } + } + + /* And now the rx ring. */ + for (i = 0; i < SEEQ_RX_BUFFERS; i++) { + if (sp->rx_desc[i].skb) { + dev_kfree_skb(sp->rx_desc[i].skb); + sp->rx_desc[i].skb = NULL; + } + } +} + +#ifdef DEBUG +static struct sgiseeq_private *gpriv; +static struct net_device *gdev; + +static void sgiseeq_dump_rings(void) +{ + static int once; + struct sgiseeq_rx_desc *r = gpriv->rx_desc; + struct sgiseeq_tx_desc *t = gpriv->tx_desc; + struct hpc3_ethregs *hregs = gpriv->hregs; + int i; + + if (once) + return; + once++; + printk("RING DUMP:\n"); + for (i = 0; i < SEEQ_RX_BUFFERS; i++) { + printk("RX [%d]: @(%p) [%08x,%08x,%08x] ", + i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, + r[i].rdma.pnext); + i += 1; + printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n", + i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, + r[i].rdma.pnext); + } + for (i = 0; i < SEEQ_TX_BUFFERS; i++) { + printk("TX [%d]: @(%p) [%08x,%08x,%08x] ", + i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo, + t[i].tdma.pnext); + i += 1; + printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n", + i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo, + t[i].tdma.pnext); + } + printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n", + gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old); + printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n", + hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl); + printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n", + hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl); +} +#endif + +#define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF) +#define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2) + +static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp, + struct sgiseeq_regs *sregs) +{ + struct hpc3_ethregs *hregs = sp->hregs; + int err; + + reset_hpc3_and_seeq(hregs, sregs); + err = seeq_init_ring(dev); + if (err) + return err; + + /* Setup to field the proper interrupt types. */ + if (sp->is_edlc) { + sregs->tstat = TSTAT_INIT_EDLC; + sregs->rw.wregs.control = sp->control; + sregs->rw.wregs.frame_gap = 0; + } else { + sregs->tstat = TSTAT_INIT_SEEQ; + } + + hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc); + hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc); + + seeq_go(sp, hregs, sregs); + return 0; +} + +static void record_rx_errors(struct net_device *dev, unsigned char status) +{ + if (status & SEEQ_RSTAT_OVERF || + status & SEEQ_RSTAT_SFRAME) + dev->stats.rx_over_errors++; + if (status & SEEQ_RSTAT_CERROR) + dev->stats.rx_crc_errors++; + if (status & SEEQ_RSTAT_DERROR) + dev->stats.rx_frame_errors++; + if (status & SEEQ_RSTAT_REOF) + dev->stats.rx_errors++; +} + +static inline void rx_maybe_restart(struct sgiseeq_private *sp, + struct hpc3_ethregs *hregs, + struct sgiseeq_regs *sregs) +{ + if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) { + hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc + sp->rx_new); + seeq_go(sp, hregs, sregs); + } +} + +static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp, + struct hpc3_ethregs *hregs, + struct sgiseeq_regs *sregs) +{ + struct sgiseeq_rx_desc *rd; + struct sk_buff *skb = NULL; + struct sk_buff *newskb; + unsigned char pkt_status; + int len = 0; + unsigned int orig_end = PREV_RX(sp->rx_new); + + /* Service every received packet. */ + rd = &sp->rx_desc[sp->rx_new]; + dma_sync_desc_cpu(dev, rd); + while (!(rd->rdma.cntinfo & HPCDMA_OWN)) { + len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3; + dma_unmap_single(dev->dev.parent, rd->rdma.pbuf, + PKT_BUF_SZ, DMA_FROM_DEVICE); + pkt_status = rd->skb->data[len]; + if (pkt_status & SEEQ_RSTAT_FIG) { + /* Packet is OK. */ + /* We don't want to receive our own packets */ + if (!ether_addr_equal(rd->skb->data + 6, dev->dev_addr)) { + if (len > rx_copybreak) { + skb = rd->skb; + newskb = netdev_alloc_skb(dev, PKT_BUF_SZ); + if (!newskb) { + newskb = skb; + skb = NULL; + goto memory_squeeze; + } + skb_reserve(newskb, 2); + } else { + skb = netdev_alloc_skb_ip_align(dev, len); + if (skb) + skb_copy_to_linear_data(skb, rd->skb->data, len); + + newskb = rd->skb; + } +memory_squeeze: + if (skb) { + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + } else { + dev->stats.rx_dropped++; + } + } else { + /* Silently drop my own packets */ + newskb = rd->skb; + } + } else { + record_rx_errors(dev, pkt_status); + newskb = rd->skb; + } + rd->skb = newskb; + rd->rdma.pbuf = dma_map_single(dev->dev.parent, + newskb->data - 2, + PKT_BUF_SZ, DMA_FROM_DEVICE); + + /* Return the entry to the ring pool. */ + rd->rdma.cntinfo = RCNTINFO_INIT; + sp->rx_new = NEXT_RX(sp->rx_new); + dma_sync_desc_dev(dev, rd); + rd = &sp->rx_desc[sp->rx_new]; + dma_sync_desc_cpu(dev, rd); + } + dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]); + sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR); + dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]); + dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); + sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR; + dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); + rx_maybe_restart(sp, hregs, sregs); +} + +static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp, + struct sgiseeq_regs *sregs) +{ + if (sp->is_edlc) { + sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT); + sregs->rw.wregs.control = sp->control; + } +} + +static inline void kick_tx(struct net_device *dev, + struct sgiseeq_private *sp, + struct hpc3_ethregs *hregs) +{ + struct sgiseeq_tx_desc *td; + int i = sp->tx_old; + + /* If the HPC aint doin nothin, and there are more packets + * with ETXD cleared and XIU set we must make very certain + * that we restart the HPC else we risk locking up the + * adapter. The following code is only safe iff the HPCDMA + * is not active! + */ + td = &sp->tx_desc[i]; + dma_sync_desc_cpu(dev, td); + while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) == + (HPCDMA_XIU | HPCDMA_ETXD)) { + i = NEXT_TX(i); + td = &sp->tx_desc[i]; + dma_sync_desc_cpu(dev, td); + } + if (td->tdma.cntinfo & HPCDMA_XIU) { + hregs->tx_ndptr = VIRT_TO_DMA(sp, td); + hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; + } +} + +static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp, + struct hpc3_ethregs *hregs, + struct sgiseeq_regs *sregs) +{ + struct sgiseeq_tx_desc *td; + unsigned long status = hregs->tx_ctrl; + int j; + + tx_maybe_reset_collisions(sp, sregs); + + if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) { + /* Oops, HPC detected some sort of error. */ + if (status & SEEQ_TSTAT_R16) + dev->stats.tx_aborted_errors++; + if (status & SEEQ_TSTAT_UFLOW) + dev->stats.tx_fifo_errors++; + if (status & SEEQ_TSTAT_LCLS) + dev->stats.collisions++; + } + + /* Ack 'em... */ + for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) { + td = &sp->tx_desc[j]; + + dma_sync_desc_cpu(dev, td); + if (!(td->tdma.cntinfo & (HPCDMA_XIU))) + break; + if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) { + if (!(status & HPC3_ETXCTRL_ACTIVE)) { + hregs->tx_ndptr = VIRT_TO_DMA(sp, td); + hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; + } + break; + } + dev->stats.tx_packets++; + sp->tx_old = NEXT_TX(sp->tx_old); + td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE); + td->tdma.cntinfo |= HPCDMA_EOX; + if (td->skb) { + dev_kfree_skb_any(td->skb); + td->skb = NULL; + } + dma_sync_desc_dev(dev, td); + } +} + +static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct sgiseeq_private *sp = netdev_priv(dev); + struct hpc3_ethregs *hregs = sp->hregs; + struct sgiseeq_regs *sregs = sp->sregs; + + spin_lock(&sp->tx_lock); + + /* Ack the IRQ and set software state. */ + hregs->reset = HPC3_ERST_CLRIRQ; + + /* Always check for received packets. */ + sgiseeq_rx(dev, sp, hregs, sregs); + + /* Only check for tx acks if we have something queued. */ + if (sp->tx_old != sp->tx_new) + sgiseeq_tx(dev, sp, hregs, sregs); + + if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) { + netif_wake_queue(dev); + } + spin_unlock(&sp->tx_lock); + + return IRQ_HANDLED; +} + +static int sgiseeq_open(struct net_device *dev) +{ + struct sgiseeq_private *sp = netdev_priv(dev); + struct sgiseeq_regs *sregs = sp->sregs; + unsigned int irq = dev->irq; + int err; + + if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) { + printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq); + return -EAGAIN; + } + + err = init_seeq(dev, sp, sregs); + if (err) + goto out_free_irq; + + netif_start_queue(dev); + + return 0; + +out_free_irq: + free_irq(irq, dev); + + return err; +} + +static int sgiseeq_close(struct net_device *dev) +{ + struct sgiseeq_private *sp = netdev_priv(dev); + struct sgiseeq_regs *sregs = sp->sregs; + unsigned int irq = dev->irq; + + netif_stop_queue(dev); + + /* Shutdown the Seeq. */ + reset_hpc3_and_seeq(sp->hregs, sregs); + free_irq(irq, dev); + seeq_purge_ring(dev); + + return 0; +} + +static inline int sgiseeq_reset(struct net_device *dev) +{ + struct sgiseeq_private *sp = netdev_priv(dev); + struct sgiseeq_regs *sregs = sp->sregs; + int err; + + err = init_seeq(dev, sp, sregs); + if (err) + return err; + + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); + + return 0; +} + +static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct sgiseeq_private *sp = netdev_priv(dev); + struct hpc3_ethregs *hregs = sp->hregs; + unsigned long flags; + struct sgiseeq_tx_desc *td; + int len, entry; + + spin_lock_irqsave(&sp->tx_lock, flags); + + /* Setup... */ + len = skb->len; + if (len < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) { + spin_unlock_irqrestore(&sp->tx_lock, flags); + return NETDEV_TX_OK; + } + len = ETH_ZLEN; + } + + dev->stats.tx_bytes += len; + entry = sp->tx_new; + td = &sp->tx_desc[entry]; + dma_sync_desc_cpu(dev, td); + + /* Create entry. There are so many races with adding a new + * descriptor to the chain: + * 1) Assume that the HPC is off processing a DMA chain while + * we are changing all of the following. + * 2) Do no allow the HPC to look at a new descriptor until + * we have completely set up it's state. This means, do + * not clear HPCDMA_EOX in the current last descritptor + * until the one we are adding looks consistent and could + * be processes right now. + * 3) The tx interrupt code must notice when we've added a new + * entry and the HPC got to the end of the chain before we + * added this new entry and restarted it. + */ + td->skb = skb; + td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data, + len, DMA_TO_DEVICE); + td->tdma.cntinfo = (len & HPCDMA_BCNT) | + HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX; + dma_sync_desc_dev(dev, td); + if (sp->tx_old != sp->tx_new) { + struct sgiseeq_tx_desc *backend; + + backend = &sp->tx_desc[PREV_TX(sp->tx_new)]; + dma_sync_desc_cpu(dev, backend); + backend->tdma.cntinfo &= ~HPCDMA_EOX; + dma_sync_desc_dev(dev, backend); + } + sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */ + + /* Maybe kick the HPC back into motion. */ + if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE)) + kick_tx(dev, sp, hregs); + + if (!TX_BUFFS_AVAIL(sp)) + netif_stop_queue(dev); + spin_unlock_irqrestore(&sp->tx_lock, flags); + + return NETDEV_TX_OK; +} + +static void timeout(struct net_device *dev) +{ + printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name); + sgiseeq_reset(dev); + + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); +} + +static void sgiseeq_set_multicast(struct net_device *dev) +{ + struct sgiseeq_private *sp = netdev_priv(dev); + unsigned char oldmode = sp->mode; + + if(dev->flags & IFF_PROMISC) + sp->mode = SEEQ_RCMD_RANY; + else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) + sp->mode = SEEQ_RCMD_RBMCAST; + else + sp->mode = SEEQ_RCMD_RBCAST; + + /* XXX I know this sucks, but is there a better way to reprogram + * XXX the receiver? At least, this shouldn't happen too often. + */ + + if (oldmode != sp->mode) + sgiseeq_reset(dev); +} + +static inline void setup_tx_ring(struct net_device *dev, + struct sgiseeq_tx_desc *buf, + int nbufs) +{ + struct sgiseeq_private *sp = netdev_priv(dev); + int i = 0; + + while (i < (nbufs - 1)) { + buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); + buf[i].tdma.pbuf = 0; + dma_sync_desc_dev(dev, &buf[i]); + i++; + } + buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf); + dma_sync_desc_dev(dev, &buf[i]); +} + +static inline void setup_rx_ring(struct net_device *dev, + struct sgiseeq_rx_desc *buf, + int nbufs) +{ + struct sgiseeq_private *sp = netdev_priv(dev); + int i = 0; + + while (i < (nbufs - 1)) { + buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); + buf[i].rdma.pbuf = 0; + dma_sync_desc_dev(dev, &buf[i]); + i++; + } + buf[i].rdma.pbuf = 0; + buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf); + dma_sync_desc_dev(dev, &buf[i]); +} + +static const struct net_device_ops sgiseeq_netdev_ops = { + .ndo_open = sgiseeq_open, + .ndo_stop = sgiseeq_close, + .ndo_start_xmit = sgiseeq_start_xmit, + .ndo_tx_timeout = timeout, + .ndo_set_rx_mode = sgiseeq_set_multicast, + .ndo_set_mac_address = sgiseeq_set_mac_address, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + +static int sgiseeq_probe(struct platform_device *pdev) +{ + struct sgiseeq_platform_data *pd = dev_get_platdata(&pdev->dev); + struct hpc3_regs *hpcregs = pd->hpc; + struct sgiseeq_init_block *sr; + unsigned int irq = pd->irq; + struct sgiseeq_private *sp; + struct net_device *dev; + int err; + + dev = alloc_etherdev(sizeof (struct sgiseeq_private)); + if (!dev) { + err = -ENOMEM; + goto err_out; + } + + platform_set_drvdata(pdev, dev); + sp = netdev_priv(dev); + + /* Make private data page aligned */ + sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings), + &sp->srings_dma, GFP_KERNEL); + if (!sr) { + printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n"); + err = -ENOMEM; + goto err_out_free_dev; + } + sp->srings = sr; + sp->rx_desc = sp->srings->rxvector; + sp->tx_desc = sp->srings->txvector; + spin_lock_init(&sp->tx_lock); + + /* A couple calculations now, saves many cycles later. */ + setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS); + setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS); + + memcpy(dev->dev_addr, pd->mac, ETH_ALEN); + +#ifdef DEBUG + gpriv = sp; + gdev = dev; +#endif + sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0]; + sp->hregs = &hpcregs->ethregs; + sp->name = sgiseeqstr; + sp->mode = SEEQ_RCMD_RBCAST; + + /* Setup PIO and DMA transfer timing */ + sp->hregs->pconfig = 0x161; + sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | + HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026; + + /* Setup PIO and DMA transfer timing */ + sp->hregs->pconfig = 0x161; + sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | + HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026; + + /* Reset the chip. */ + hpc3_eth_reset(sp->hregs); + + sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff); + if (sp->is_edlc) + sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT | + SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT | + SEEQ_CTRL_ENCARR; + + dev->netdev_ops = &sgiseeq_netdev_ops; + dev->watchdog_timeo = (200 * HZ) / 1000; + dev->irq = irq; + + if (register_netdev(dev)) { + printk(KERN_ERR "Sgiseeq: Cannot register net device, " + "aborting.\n"); + err = -ENODEV; + goto err_out_free_page; + } + + printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr); + + return 0; + +err_out_free_page: + free_page((unsigned long) sp->srings); +err_out_free_dev: + free_netdev(dev); + +err_out: + return err; +} + +static int __exit sgiseeq_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct sgiseeq_private *sp = netdev_priv(dev); + + unregister_netdev(dev); + dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings, + sp->srings_dma); + free_netdev(dev); + + return 0; +} + +static struct platform_driver sgiseeq_driver = { + .probe = sgiseeq_probe, + .remove = __exit_p(sgiseeq_remove), + .driver = { + .name = "sgiseeq", + } +}; + +module_platform_driver(sgiseeq_driver); + +MODULE_DESCRIPTION("SGI Seeq 8003 driver"); +MODULE_AUTHOR("Linux/MIPS Mailing List "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:sgiseeq"); diff --git a/drivers/net/ethernet/seeq/sgiseeq.h b/drivers/net/ethernet/seeq/sgiseeq.h new file mode 100644 index 000000000..2211e2987 --- /dev/null +++ b/drivers/net/ethernet/seeq/sgiseeq.h @@ -0,0 +1,103 @@ +/* + * sgiseeq.h: Defines for the Seeq8003 ethernet controller. + * + * Copyright (C) 1996 David S. Miller (davem@davemloft.net) + */ +#ifndef _SGISEEQ_H +#define _SGISEEQ_H + +struct sgiseeq_wregs { + volatile unsigned int multicase_high[2]; + volatile unsigned int frame_gap; + volatile unsigned int control; +}; + +struct sgiseeq_rregs { + volatile unsigned int collision_tx[2]; + volatile unsigned int collision_all[2]; + volatile unsigned int _unused0; + volatile unsigned int rflags; +}; + +struct sgiseeq_regs { + union { + volatile unsigned int eth_addr[6]; + volatile unsigned int multicast_low[6]; + struct sgiseeq_wregs wregs; + struct sgiseeq_rregs rregs; + } rw; + volatile unsigned int rstat; + volatile unsigned int tstat; +}; + +/* Seeq8003 receive status register */ +#define SEEQ_RSTAT_OVERF 0x001 /* Overflow */ +#define SEEQ_RSTAT_CERROR 0x002 /* CRC error */ +#define SEEQ_RSTAT_DERROR 0x004 /* Dribble error */ +#define SEEQ_RSTAT_SFRAME 0x008 /* Short frame */ +#define SEEQ_RSTAT_REOF 0x010 /* Received end of frame */ +#define SEEQ_RSTAT_FIG 0x020 /* Frame is good */ +#define SEEQ_RSTAT_TIMEO 0x040 /* Timeout, or late receive */ +#define SEEQ_RSTAT_WHICH 0x080 /* Which status, 1=old 0=new */ +#define SEEQ_RSTAT_LITTLE 0x100 /* DMA is done in little endian format */ +#define SEEQ_RSTAT_SDMA 0x200 /* DMA has started */ +#define SEEQ_RSTAT_ADMA 0x400 /* DMA is active */ +#define SEEQ_RSTAT_ROVERF 0x800 /* Receive buffer overflow */ + +/* Seeq8003 receive command register */ +#define SEEQ_RCMD_RDISAB 0x000 /* Disable receiver on the Seeq8003 */ +#define SEEQ_RCMD_IOVERF 0x001 /* IRQ on buffer overflows */ +#define SEEQ_RCMD_ICRC 0x002 /* IRQ on CRC errors */ +#define SEEQ_RCMD_IDRIB 0x004 /* IRQ on dribble errors */ +#define SEEQ_RCMD_ISHORT 0x008 /* IRQ on short frames */ +#define SEEQ_RCMD_IEOF 0x010 /* IRQ on end of frame */ +#define SEEQ_RCMD_IGOOD 0x020 /* IRQ on good frames */ +#define SEEQ_RCMD_RANY 0x040 /* Receive any frame */ +#define SEEQ_RCMD_RBCAST 0x080 /* Receive broadcasts */ +#define SEEQ_RCMD_RBMCAST 0x0c0 /* Receive broadcasts/multicasts */ + +/* Seeq8003 transmit status register */ +#define SEEQ_TSTAT_UFLOW 0x001 /* Transmit buffer underflow */ +#define SEEQ_TSTAT_CLS 0x002 /* Collision detected */ +#define SEEQ_TSTAT_R16 0x004 /* Did 16 retries to tx a frame */ +#define SEEQ_TSTAT_PTRANS 0x008 /* Packet was transmitted ok */ +#define SEEQ_TSTAT_LCLS 0x010 /* Late collision occurred */ +#define SEEQ_TSTAT_WHICH 0x080 /* Which status, 1=old 0=new */ +#define SEEQ_TSTAT_TLE 0x100 /* DMA is done in little endian format */ +#define SEEQ_TSTAT_SDMA 0x200 /* DMA has started */ +#define SEEQ_TSTAT_ADMA 0x400 /* DMA is active */ + +/* Seeq8003 transmit command register */ +#define SEEQ_TCMD_RB0 0x00 /* Register bank zero w/station addr */ +#define SEEQ_TCMD_IUF 0x01 /* IRQ on tx underflow */ +#define SEEQ_TCMD_IC 0x02 /* IRQ on collisions */ +#define SEEQ_TCMD_I16 0x04 /* IRQ after 16 failed attempts to tx frame */ +#define SEEQ_TCMD_IPT 0x08 /* IRQ when packet successfully transmitted */ +#define SEEQ_TCMD_RB1 0x20 /* Register bank one w/multi-cast low byte */ +#define SEEQ_TCMD_RB2 0x40 /* Register bank two w/multi-cast high byte */ + +/* Seeq8003 control register */ +#define SEEQ_CTRL_XCNT 0x01 +#define SEEQ_CTRL_ACCNT 0x02 +#define SEEQ_CTRL_SFLAG 0x04 +#define SEEQ_CTRL_EMULTI 0x08 +#define SEEQ_CTRL_ESHORT 0x10 +#define SEEQ_CTRL_ENCARR 0x20 + +/* Seeq8003 control registers on the SGI Hollywood HPC. */ +#define SEEQ_HPIO_P1BITS 0x00000001 /* cycles to stay in P1 phase for PIO */ +#define SEEQ_HPIO_P2BITS 0x00000060 /* cycles to stay in P2 phase for PIO */ +#define SEEQ_HPIO_P3BITS 0x00000100 /* cycles to stay in P3 phase for PIO */ +#define SEEQ_HDMA_D1BITS 0x00000006 /* cycles to stay in D1 phase for DMA */ +#define SEEQ_HDMA_D2BITS 0x00000020 /* cycles to stay in D2 phase for DMA */ +#define SEEQ_HDMA_D3BITS 0x00000000 /* cycles to stay in D3 phase for DMA */ +#define SEEQ_HDMA_TIMEO 0x00030000 /* cycles for DMA timeout */ +#define SEEQ_HCTL_NORM 0x00000000 /* Normal operation mode */ +#define SEEQ_HCTL_RESET 0x00000001 /* Reset Seeq8003 and HPC interface */ +#define SEEQ_HCTL_IPEND 0x00000002 /* IRQ is pending for the chip */ +#define SEEQ_HCTL_IPG 0x00001000 /* Inter-packet gap */ +#define SEEQ_HCTL_RFIX 0x00002000 /* At rxdc, clear end-of-packet */ +#define SEEQ_HCTL_EFIX 0x00004000 /* fixes intr status bit settings */ +#define SEEQ_HCTL_IFIX 0x00008000 /* enable startup timeouts */ + +#endif /* !(_SGISEEQ_H) */ diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig new file mode 100644 index 000000000..088921294 --- /dev/null +++ b/drivers/net/ethernet/sfc/Kconfig @@ -0,0 +1,38 @@ +config SFC + tristate "Solarflare SFC4000/SFC9000/SFC9100-family support" + depends on PCI + select MDIO + select CRC32 + select I2C + select I2C_ALGOBIT + select PTP_1588_CLOCK + ---help--- + This driver supports 10/40-gigabit Ethernet cards based on + the Solarflare SFC4000, SFC9000-family and SFC9100-family + controllers. + + To compile this driver as a module, choose M here. The module + will be called sfc. +config SFC_MTD + bool "Solarflare SFC4000/SFC9000/SFC9100-family MTD support" + depends on SFC && MTD && !(SFC=y && MTD=m) + default y + ---help--- + This exposes the on-board flash and/or EEPROM as MTD devices + (e.g. /dev/mtd1). This is required to update the firmware or + the boot configuration under Linux. +config SFC_MCDI_MON + bool "Solarflare SFC9000/SFC9100-family hwmon support" + depends on SFC && HWMON && !(SFC=y && HWMON=m) + default y + ---help--- + This exposes the on-board firmware-managed sensors as a + hardware monitor device. +config SFC_SRIOV + bool "Solarflare SFC9000-family SR-IOV support" + depends on SFC && PCI_IOV + default y + ---help--- + This enables support for the SFC9000 I/O Virtualization + features, allowing accelerated network performance in + virtualized environments. diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile new file mode 100644 index 000000000..3a83c0dca --- /dev/null +++ b/drivers/net/ethernet/sfc/Makefile @@ -0,0 +1,8 @@ +sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \ + rx.o selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ + tenxpress.o txc43128_phy.o falcon_boards.o \ + mcdi.o mcdi_port.o mcdi_mon.o ptp.o +sfc-$(CONFIG_SFC_MTD) += mtd.o +sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o + +obj-$(CONFIG_SFC) += sfc.o diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h new file mode 100644 index 000000000..17d83f37f --- /dev/null +++ b/drivers/net/ethernet/sfc/bitfield.h @@ -0,0 +1,542 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_BITFIELD_H +#define EFX_BITFIELD_H + +/* + * Efx bitfield access + * + * Efx NICs make extensive use of bitfields up to 128 bits + * wide. Since there is no native 128-bit datatype on most systems, + * and since 64-bit datatypes are inefficient on 32-bit systems and + * vice versa, we wrap accesses in a way that uses the most efficient + * datatype. + * + * The NICs are PCI devices and therefore little-endian. Since most + * of the quantities that we deal with are DMAed to/from host memory, + * we define our datatypes (efx_oword_t, efx_qword_t and + * efx_dword_t) to be little-endian. + */ + +/* Lowest bit numbers and widths */ +#define EFX_DUMMY_FIELD_LBN 0 +#define EFX_DUMMY_FIELD_WIDTH 0 +#define EFX_WORD_0_LBN 0 +#define EFX_WORD_0_WIDTH 16 +#define EFX_WORD_1_LBN 16 +#define EFX_WORD_1_WIDTH 16 +#define EFX_DWORD_0_LBN 0 +#define EFX_DWORD_0_WIDTH 32 +#define EFX_DWORD_1_LBN 32 +#define EFX_DWORD_1_WIDTH 32 +#define EFX_DWORD_2_LBN 64 +#define EFX_DWORD_2_WIDTH 32 +#define EFX_DWORD_3_LBN 96 +#define EFX_DWORD_3_WIDTH 32 +#define EFX_QWORD_0_LBN 0 +#define EFX_QWORD_0_WIDTH 64 + +/* Specified attribute (e.g. LBN) of the specified field */ +#define EFX_VAL(field, attribute) field ## _ ## attribute +/* Low bit number of the specified field */ +#define EFX_LOW_BIT(field) EFX_VAL(field, LBN) +/* Bit width of the specified field */ +#define EFX_WIDTH(field) EFX_VAL(field, WIDTH) +/* High bit number of the specified field */ +#define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1) +/* Mask equal in width to the specified field. + * + * For example, a field with width 5 would have a mask of 0x1f. + * + * The maximum width mask that can be generated is 64 bits. + */ +#define EFX_MASK64(width) \ + ((width) == 64 ? ~((u64) 0) : \ + (((((u64) 1) << (width))) - 1)) + +/* Mask equal in width to the specified field. + * + * For example, a field with width 5 would have a mask of 0x1f. + * + * The maximum width mask that can be generated is 32 bits. Use + * EFX_MASK64 for higher width fields. + */ +#define EFX_MASK32(width) \ + ((width) == 32 ? ~((u32) 0) : \ + (((((u32) 1) << (width))) - 1)) + +/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */ +typedef union efx_dword { + __le32 u32[1]; +} efx_dword_t; + +/* A quadword (i.e. 8 byte) datatype - little-endian in HW */ +typedef union efx_qword { + __le64 u64[1]; + __le32 u32[2]; + efx_dword_t dword[2]; +} efx_qword_t; + +/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */ +typedef union efx_oword { + __le64 u64[2]; + efx_qword_t qword[2]; + __le32 u32[4]; + efx_dword_t dword[4]; +} efx_oword_t; + +/* Format string and value expanders for printk */ +#define EFX_DWORD_FMT "%08x" +#define EFX_QWORD_FMT "%08x:%08x" +#define EFX_OWORD_FMT "%08x:%08x:%08x:%08x" +#define EFX_DWORD_VAL(dword) \ + ((unsigned int) le32_to_cpu((dword).u32[0])) +#define EFX_QWORD_VAL(qword) \ + ((unsigned int) le32_to_cpu((qword).u32[1])), \ + ((unsigned int) le32_to_cpu((qword).u32[0])) +#define EFX_OWORD_VAL(oword) \ + ((unsigned int) le32_to_cpu((oword).u32[3])), \ + ((unsigned int) le32_to_cpu((oword).u32[2])), \ + ((unsigned int) le32_to_cpu((oword).u32[1])), \ + ((unsigned int) le32_to_cpu((oword).u32[0])) + +/* + * Extract bit field portion [low,high) from the native-endian element + * which contains bits [min,max). + * + * For example, suppose "element" represents the high 32 bits of a + * 64-bit value, and we wish to extract the bits belonging to the bit + * field occupying bits 28-45 of this 64-bit value. + * + * Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give + * + * ( element ) << 4 + * + * The result will contain the relevant bits filled in in the range + * [0,high-low), with garbage in bits [high-low+1,...). + */ +#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \ + ((low) > (max) || (high) < (min) ? 0 : \ + (low) > (min) ? \ + (native_element) >> ((low) - (min)) : \ + (native_element) << ((min) - (low))) + +/* + * Extract bit field portion [low,high) from the 64-bit little-endian + * element which contains bits [min,max) + */ +#define EFX_EXTRACT64(element, min, max, low, high) \ + EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high) + +/* + * Extract bit field portion [low,high) from the 32-bit little-endian + * element which contains bits [min,max) + */ +#define EFX_EXTRACT32(element, min, max, low, high) \ + EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high) + +#define EFX_EXTRACT_OWORD64(oword, low, high) \ + ((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \ + EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \ + EFX_MASK64((high) + 1 - (low))) + +#define EFX_EXTRACT_QWORD64(qword, low, high) \ + (EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \ + EFX_MASK64((high) + 1 - (low))) + +#define EFX_EXTRACT_OWORD32(oword, low, high) \ + ((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \ + EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \ + EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \ + EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \ + EFX_MASK32((high) + 1 - (low))) + +#define EFX_EXTRACT_QWORD32(qword, low, high) \ + ((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \ + EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \ + EFX_MASK32((high) + 1 - (low))) + +#define EFX_EXTRACT_DWORD(dword, low, high) \ + (EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \ + EFX_MASK32((high) + 1 - (low))) + +#define EFX_OWORD_FIELD64(oword, field) \ + EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_QWORD_FIELD64(qword, field) \ + EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_OWORD_FIELD32(oword, field) \ + EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_QWORD_FIELD32(qword, field) \ + EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_DWORD_FIELD(dword, field) \ + EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field)) + +#define EFX_OWORD_IS_ZERO64(oword) \ + (((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0) + +#define EFX_QWORD_IS_ZERO64(qword) \ + (((qword).u64[0]) == (__force __le64) 0) + +#define EFX_OWORD_IS_ZERO32(oword) \ + (((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \ + == (__force __le32) 0) + +#define EFX_QWORD_IS_ZERO32(qword) \ + (((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0) + +#define EFX_DWORD_IS_ZERO(dword) \ + (((dword).u32[0]) == (__force __le32) 0) + +#define EFX_OWORD_IS_ALL_ONES64(oword) \ + (((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0)) + +#define EFX_QWORD_IS_ALL_ONES64(qword) \ + ((qword).u64[0] == ~((__force __le64) 0)) + +#define EFX_OWORD_IS_ALL_ONES32(oword) \ + (((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \ + == ~((__force __le32) 0)) + +#define EFX_QWORD_IS_ALL_ONES32(qword) \ + (((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0)) + +#define EFX_DWORD_IS_ALL_ONES(dword) \ + ((dword).u32[0] == ~((__force __le32) 0)) + +#if BITS_PER_LONG == 64 +#define EFX_OWORD_FIELD EFX_OWORD_FIELD64 +#define EFX_QWORD_FIELD EFX_QWORD_FIELD64 +#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64 +#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64 +#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES64 +#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES64 +#else +#define EFX_OWORD_FIELD EFX_OWORD_FIELD32 +#define EFX_QWORD_FIELD EFX_QWORD_FIELD32 +#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32 +#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32 +#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES32 +#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES32 +#endif + +/* + * Construct bit field portion + * + * Creates the portion of the bit field [low,high) that lies within + * the range [min,max). + */ +#define EFX_INSERT_NATIVE64(min, max, low, high, value) \ + (((low > max) || (high < min)) ? 0 : \ + ((low > min) ? \ + (((u64) (value)) << (low - min)) : \ + (((u64) (value)) >> (min - low)))) + +#define EFX_INSERT_NATIVE32(min, max, low, high, value) \ + (((low > max) || (high < min)) ? 0 : \ + ((low > min) ? \ + (((u32) (value)) << (low - min)) : \ + (((u32) (value)) >> (min - low)))) + +#define EFX_INSERT_NATIVE(min, max, low, high, value) \ + ((((max - min) >= 32) || ((high - low) >= 32)) ? \ + EFX_INSERT_NATIVE64(min, max, low, high, value) : \ + EFX_INSERT_NATIVE32(min, max, low, high, value)) + +/* + * Construct bit field portion + * + * Creates the portion of the named bit field that lies within the + * range [min,max). + */ +#define EFX_INSERT_FIELD_NATIVE(min, max, field, value) \ + EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +/* + * Construct bit field + * + * Creates the portion of the named bit fields that lie within the + * range [min,max). + */ +#define EFX_INSERT_FIELDS_NATIVE(min, max, \ + field1, value1, \ + field2, value2, \ + field3, value3, \ + field4, value4, \ + field5, value5, \ + field6, value6, \ + field7, value7, \ + field8, value8, \ + field9, value9, \ + field10, value10) \ + (EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10))) + +#define EFX_INSERT_FIELDS64(...) \ + cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__)) + +#define EFX_INSERT_FIELDS32(...) \ + cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__)) + +#define EFX_POPULATE_OWORD64(oword, ...) do { \ + (oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \ + (oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__); \ + } while (0) + +#define EFX_POPULATE_QWORD64(qword, ...) do { \ + (qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \ + } while (0) + +#define EFX_POPULATE_OWORD32(oword, ...) do { \ + (oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ + (oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \ + (oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__); \ + (oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__); \ + } while (0) + +#define EFX_POPULATE_QWORD32(qword, ...) do { \ + (qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ + (qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \ + } while (0) + +#define EFX_POPULATE_DWORD(dword, ...) do { \ + (dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ + } while (0) + +#if BITS_PER_LONG == 64 +#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64 +#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64 +#else +#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32 +#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32 +#endif + +/* Populate an octword field with various numbers of arguments */ +#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD +#define EFX_POPULATE_OWORD_9(oword, ...) \ + EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_8(oword, ...) \ + EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_7(oword, ...) \ + EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_6(oword, ...) \ + EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_5(oword, ...) \ + EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_4(oword, ...) \ + EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_3(oword, ...) \ + EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_2(oword, ...) \ + EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_1(oword, ...) \ + EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_ZERO_OWORD(oword) \ + EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0) +#define EFX_SET_OWORD(oword) \ + EFX_POPULATE_OWORD_4(oword, \ + EFX_DWORD_0, 0xffffffff, \ + EFX_DWORD_1, 0xffffffff, \ + EFX_DWORD_2, 0xffffffff, \ + EFX_DWORD_3, 0xffffffff) + +/* Populate a quadword field with various numbers of arguments */ +#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD +#define EFX_POPULATE_QWORD_9(qword, ...) \ + EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_8(qword, ...) \ + EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_7(qword, ...) \ + EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_6(qword, ...) \ + EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_5(qword, ...) \ + EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_4(qword, ...) \ + EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_3(qword, ...) \ + EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_2(qword, ...) \ + EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_1(qword, ...) \ + EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_ZERO_QWORD(qword) \ + EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0) +#define EFX_SET_QWORD(qword) \ + EFX_POPULATE_QWORD_2(qword, \ + EFX_DWORD_0, 0xffffffff, \ + EFX_DWORD_1, 0xffffffff) + +/* Populate a dword field with various numbers of arguments */ +#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD +#define EFX_POPULATE_DWORD_9(dword, ...) \ + EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_8(dword, ...) \ + EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_7(dword, ...) \ + EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_6(dword, ...) \ + EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_5(dword, ...) \ + EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_4(dword, ...) \ + EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_3(dword, ...) \ + EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_2(dword, ...) \ + EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_1(dword, ...) \ + EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_ZERO_DWORD(dword) \ + EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0) +#define EFX_SET_DWORD(dword) \ + EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff) + +/* + * Modify a named field within an already-populated structure. Used + * for read-modify-write operations. + * + */ +#define EFX_INVERT_OWORD(oword) do { \ + (oword).u64[0] = ~((oword).u64[0]); \ + (oword).u64[1] = ~((oword).u64[1]); \ + } while (0) + +#define EFX_AND_OWORD(oword, from, mask) \ + do { \ + (oword).u64[0] = (from).u64[0] & (mask).u64[0]; \ + (oword).u64[1] = (from).u64[1] & (mask).u64[1]; \ + } while (0) + +#define EFX_OR_OWORD(oword, from, mask) \ + do { \ + (oword).u64[0] = (from).u64[0] | (mask).u64[0]; \ + (oword).u64[1] = (from).u64[1] | (mask).u64[1]; \ + } while (0) + +#define EFX_INSERT64(min, max, low, high, value) \ + cpu_to_le64(EFX_INSERT_NATIVE(min, max, low, high, value)) + +#define EFX_INSERT32(min, max, low, high, value) \ + cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value)) + +#define EFX_INPLACE_MASK64(min, max, low, high) \ + EFX_INSERT64(min, max, low, high, EFX_MASK64((high) + 1 - (low))) + +#define EFX_INPLACE_MASK32(min, max, low, high) \ + EFX_INSERT32(min, max, low, high, EFX_MASK32((high) + 1 - (low))) + +#define EFX_SET_OWORD64(oword, low, high, value) do { \ + (oword).u64[0] = (((oword).u64[0] \ + & ~EFX_INPLACE_MASK64(0, 63, low, high)) \ + | EFX_INSERT64(0, 63, low, high, value)); \ + (oword).u64[1] = (((oword).u64[1] \ + & ~EFX_INPLACE_MASK64(64, 127, low, high)) \ + | EFX_INSERT64(64, 127, low, high, value)); \ + } while (0) + +#define EFX_SET_QWORD64(qword, low, high, value) do { \ + (qword).u64[0] = (((qword).u64[0] \ + & ~EFX_INPLACE_MASK64(0, 63, low, high)) \ + | EFX_INSERT64(0, 63, low, high, value)); \ + } while (0) + +#define EFX_SET_OWORD32(oword, low, high, value) do { \ + (oword).u32[0] = (((oword).u32[0] \ + & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ + | EFX_INSERT32(0, 31, low, high, value)); \ + (oword).u32[1] = (((oword).u32[1] \ + & ~EFX_INPLACE_MASK32(32, 63, low, high)) \ + | EFX_INSERT32(32, 63, low, high, value)); \ + (oword).u32[2] = (((oword).u32[2] \ + & ~EFX_INPLACE_MASK32(64, 95, low, high)) \ + | EFX_INSERT32(64, 95, low, high, value)); \ + (oword).u32[3] = (((oword).u32[3] \ + & ~EFX_INPLACE_MASK32(96, 127, low, high)) \ + | EFX_INSERT32(96, 127, low, high, value)); \ + } while (0) + +#define EFX_SET_QWORD32(qword, low, high, value) do { \ + (qword).u32[0] = (((qword).u32[0] \ + & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ + | EFX_INSERT32(0, 31, low, high, value)); \ + (qword).u32[1] = (((qword).u32[1] \ + & ~EFX_INPLACE_MASK32(32, 63, low, high)) \ + | EFX_INSERT32(32, 63, low, high, value)); \ + } while (0) + +#define EFX_SET_DWORD32(dword, low, high, value) do { \ + (dword).u32[0] = (((dword).u32[0] \ + & ~EFX_INPLACE_MASK32(0, 31, low, high)) \ + | EFX_INSERT32(0, 31, low, high, value)); \ + } while (0) + +#define EFX_SET_OWORD_FIELD64(oword, field, value) \ + EFX_SET_OWORD64(oword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +#define EFX_SET_QWORD_FIELD64(qword, field, value) \ + EFX_SET_QWORD64(qword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +#define EFX_SET_OWORD_FIELD32(oword, field, value) \ + EFX_SET_OWORD32(oword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +#define EFX_SET_QWORD_FIELD32(qword, field, value) \ + EFX_SET_QWORD32(qword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + +#define EFX_SET_DWORD_FIELD(dword, field, value) \ + EFX_SET_DWORD32(dword, EFX_LOW_BIT(field), \ + EFX_HIGH_BIT(field), value) + + + +#if BITS_PER_LONG == 64 +#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64 +#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64 +#else +#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32 +#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32 +#endif + +/* Used to avoid compiler warnings about shift range exceeding width + * of the data types when dma_addr_t is only 32 bits wide. + */ +#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) +#define EFX_DMA_TYPE_WIDTH(width) \ + (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) + + +/* Static initialiser */ +#define EFX_OWORD32(a, b, c, d) \ + { .u32 = { cpu_to_le32(a), cpu_to_le32(b), \ + cpu_to_le32(c), cpu_to_le32(d) } } + +#endif /* EFX_BITFIELD_H */ diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c new file mode 100644 index 000000000..fbb6cfa0f --- /dev/null +++ b/drivers/net/ethernet/sfc/ef10.c @@ -0,0 +1,3713 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2012-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include "ef10_regs.h" +#include "io.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "nic.h" +#include "workarounds.h" +#include "selftest.h" +#include +#include +#include +#include + +/* Hardware control for EF10 architecture including 'Huntington'. */ + +#define EFX_EF10_DRVGEN_EV 7 +enum { + EFX_EF10_TEST = 1, + EFX_EF10_REFILL, +}; + +/* The reserved RSS context value */ +#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff + +/* The filter table(s) are managed by firmware and we have write-only + * access. When removing filters we must identify them to the + * firmware by a 64-bit handle, but this is too wide for Linux kernel + * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to + * be able to tell in advance whether a requested insertion will + * replace an existing filter. Therefore we maintain a software hash + * table, which should be at least as large as the hardware hash + * table. + * + * Huntington has a single 8K filter table shared between all filter + * types and both ports. + */ +#define HUNT_FILTER_TBL_ROWS 8192 + +struct efx_ef10_filter_table { +/* The RX match field masks supported by this fw & hw, in order of priority */ + enum efx_filter_match_flags rx_match_flags[ + MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM]; + unsigned int rx_match_count; + + struct { + unsigned long spec; /* pointer to spec plus flag bits */ +/* BUSY flag indicates that an update is in progress. AUTO_OLD is + * used to mark and sweep MAC filters for the device address lists. + */ +#define EFX_EF10_FILTER_FLAG_BUSY 1UL +#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL +#define EFX_EF10_FILTER_FLAGS 3UL + u64 handle; /* firmware handle */ + } *entry; + wait_queue_head_t waitq; +/* Shadow of net_device address lists, guarded by mac_lock */ +#define EFX_EF10_FILTER_DEV_UC_MAX 32 +#define EFX_EF10_FILTER_DEV_MC_MAX 256 + struct { + u8 addr[ETH_ALEN]; + u16 id; + } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX], + dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; + int dev_uc_count; /* negative for PROMISC */ + int dev_mc_count; /* negative for PROMISC/ALLMULTI */ +}; + +/* An arbitrary search limit for the software hash table */ +#define EFX_EF10_FILTER_SEARCH_LIMIT 200 + +static void efx_ef10_rx_push_rss_config(struct efx_nic *efx); +static void efx_ef10_rx_free_indir_table(struct efx_nic *efx); +static void efx_ef10_filter_table_remove(struct efx_nic *efx); + +static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) +{ + efx_dword_t reg; + + efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS); + return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ? + EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO; +} + +static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) +{ + return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]); +} + +static int efx_ef10_init_datapath_caps(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) { + netif_err(efx, drv, efx->net_dev, + "unable to read datapath firmware capabilities\n"); + return -EIO; + } + + nic_data->datapath_caps = + MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); + + if (!(nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) { + netif_err(efx, drv, efx->net_dev, + "current firmware does not support TSO\n"); + return -ENODEV; + } + + if (!(nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) { + netif_err(efx, probe, efx->net_dev, + "current firmware does not support an RX prefix\n"); + return -ENODEV; + } + + return 0; +} + +static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN); + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) + return rc; + rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ); + return rc > 0 ? rc : -ERANGE; +} + +static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) + return -EIO; + + ether_addr_copy(mac_address, + MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE)); + return 0; +} + +static int efx_ef10_probe(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data; + int i, rc; + + /* We can have one VI for each 8K region. However, until we + * use TX option descriptors we need two TX queues per channel. + */ + efx->max_channels = + min_t(unsigned int, + EFX_MAX_CHANNELS, + resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) / + (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); + if (WARN_ON(efx->max_channels == 0)) + return -EIO; + + nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); + if (!nic_data) + return -ENOMEM; + efx->nic_data = nic_data; + + rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, + 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL); + if (rc) + goto fail1; + + /* Get the MC's warm boot count. In case it's rebooting right + * now, be prepared to retry. + */ + i = 0; + for (;;) { + rc = efx_ef10_get_warm_boot_count(efx); + if (rc >= 0) + break; + if (++i == 5) + goto fail2; + ssleep(1); + } + nic_data->warm_boot_count = rc; + + nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; + + /* In case we're recovering from a crash (kexec), we want to + * cancel any outstanding request by the previous user of this + * function. We send a special message using the least + * significant bits of the 'high' (doorbell) register. + */ + _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD); + + rc = efx_mcdi_init(efx); + if (rc) + goto fail2; + + /* Reset (most) configuration for this function */ + rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); + if (rc) + goto fail3; + + /* Enable event logging */ + rc = efx_mcdi_log_ctrl(efx, true, false, 0); + if (rc) + goto fail3; + + rc = efx_ef10_init_datapath_caps(efx); + if (rc < 0) + goto fail3; + + efx->rx_packet_len_offset = + ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; + + rc = efx_mcdi_port_get_number(efx); + if (rc < 0) + goto fail3; + efx->port_num = rc; + + rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr); + if (rc) + goto fail3; + + rc = efx_ef10_get_sysclk_freq(efx); + if (rc < 0) + goto fail3; + efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */ + + /* Check whether firmware supports bug 35388 workaround */ + rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true); + if (rc == 0) + nic_data->workaround_35388 = true; + else if (rc != -ENOSYS && rc != -ENOENT) + goto fail3; + netif_dbg(efx, probe, efx->net_dev, + "workaround for bug 35388 is %sabled\n", + nic_data->workaround_35388 ? "en" : "dis"); + + rc = efx_mcdi_mon_probe(efx); + if (rc) + goto fail3; + + efx_ptp_probe(efx, NULL); + + return 0; + +fail3: + efx_mcdi_fini(efx); +fail2: + efx_nic_free_buffer(efx, &nic_data->mcdi_buf); +fail1: + kfree(nic_data); + efx->nic_data = NULL; + return rc; +} + +static int efx_ef10_free_vis(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0); + size_t outlen; + int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + + /* -EALREADY means nothing to free, so ignore */ + if (rc == -EALREADY) + rc = 0; + if (rc) + efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen, + rc); + return rc; +} + +#ifdef EFX_USE_PIO + +static void efx_ef10_free_piobufs(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN); + unsigned int i; + int rc; + + BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0); + + for (i = 0; i < nic_data->n_piobufs; i++) { + MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE, + nic_data->piobuf_handle[i]); + rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf), + NULL, 0, NULL); + WARN_ON(rc); + } + + nic_data->n_piobufs = 0; +} + +static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN); + unsigned int i; + size_t outlen; + int rc = 0; + + BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0); + + for (i = 0; i < n; i++) { + rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + break; + if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { + rc = -EIO; + break; + } + nic_data->piobuf_handle[i] = + MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE); + netif_dbg(efx, probe, efx->net_dev, + "allocated PIO buffer %u handle %x\n", i, + nic_data->piobuf_handle[i]); + } + + nic_data->n_piobufs = i; + if (rc) + efx_ef10_free_piobufs(efx); + return rc; +} + +static int efx_ef10_link_piobufs(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + MCDI_DECLARE_BUF(inbuf, + max(MC_CMD_LINK_PIOBUF_IN_LEN, + MC_CMD_UNLINK_PIOBUF_IN_LEN)); + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + unsigned int offset, index; + int rc; + + BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); + BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); + + /* Link a buffer to each VI in the write-combining mapping */ + for (index = 0; index < nic_data->n_piobufs; ++index) { + MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, + nic_data->piobuf_handle[index]); + MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE, + nic_data->pio_write_vi_base + index); + rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, + inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, + NULL, 0, NULL); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to link VI %u to PIO buffer %u (%d)\n", + nic_data->pio_write_vi_base + index, index, + rc); + goto fail; + } + netif_dbg(efx, probe, efx->net_dev, + "linked VI %u to PIO buffer %u\n", + nic_data->pio_write_vi_base + index, index); + } + + /* Link a buffer to each TX queue */ + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + /* We assign the PIO buffers to queues in + * reverse order to allow for the following + * special case. + */ + offset = ((efx->tx_channel_offset + efx->n_tx_channels - + tx_queue->channel->channel - 1) * + efx_piobuf_size); + index = offset / ER_DZ_TX_PIOBUF_SIZE; + offset = offset % ER_DZ_TX_PIOBUF_SIZE; + + /* When the host page size is 4K, the first + * host page in the WC mapping may be within + * the same VI page as the last TX queue. We + * can only link one buffer to each VI. + */ + if (tx_queue->queue == nic_data->pio_write_vi_base) { + BUG_ON(index != 0); + rc = 0; + } else { + MCDI_SET_DWORD(inbuf, + LINK_PIOBUF_IN_PIOBUF_HANDLE, + nic_data->piobuf_handle[index]); + MCDI_SET_DWORD(inbuf, + LINK_PIOBUF_IN_TXQ_INSTANCE, + tx_queue->queue); + rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, + inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, + NULL, 0, NULL); + } + + if (rc) { + /* This is non-fatal; the TX path just + * won't use PIO for this queue + */ + netif_err(efx, drv, efx->net_dev, + "failed to link VI %u to PIO buffer %u (%d)\n", + tx_queue->queue, index, rc); + tx_queue->piobuf = NULL; + } else { + tx_queue->piobuf = + nic_data->pio_write_base + + index * EFX_VI_PAGE_SIZE + offset; + tx_queue->piobuf_offset = offset; + netif_dbg(efx, probe, efx->net_dev, + "linked VI %u to PIO buffer %u offset %x addr %p\n", + tx_queue->queue, index, + tx_queue->piobuf_offset, + tx_queue->piobuf); + } + } + } + + return 0; + +fail: + while (index--) { + MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE, + nic_data->pio_write_vi_base + index); + efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF, + inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN, + NULL, 0, NULL); + } + return rc; +} + +#else /* !EFX_USE_PIO */ + +static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) +{ + return n == 0 ? 0 : -ENOBUFS; +} + +static int efx_ef10_link_piobufs(struct efx_nic *efx) +{ + return 0; +} + +static void efx_ef10_free_piobufs(struct efx_nic *efx) +{ +} + +#endif /* EFX_USE_PIO */ + +static void efx_ef10_remove(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int rc; + + efx_ptp_remove(efx); + + efx_mcdi_mon_remove(efx); + + efx_ef10_rx_free_indir_table(efx); + + if (nic_data->wc_membase) + iounmap(nic_data->wc_membase); + + rc = efx_ef10_free_vis(efx); + WARN_ON(rc != 0); + + if (!nic_data->must_restore_piobufs) + efx_ef10_free_piobufs(efx); + + efx_mcdi_fini(efx); + efx_nic_free_buffer(efx, &nic_data->mcdi_buf); + kfree(nic_data); +} + +static int efx_ef10_alloc_vis(struct efx_nic *efx, + unsigned int min_vis, unsigned int max_vis) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis); + MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis); + rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc != 0) + return rc; + + if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN) + return -EIO; + + netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n", + MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE)); + + nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE); + nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT); + return 0; +} + +/* Note that the failure path of this function does not free + * resources, as this will be done by efx_ef10_remove(). + */ +static int efx_ef10_dimension_resources(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int uc_mem_map_size, wc_mem_map_size; + unsigned int min_vis, pio_write_vi_base, max_vis; + void __iomem *membase; + int rc; + + min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); + +#ifdef EFX_USE_PIO + /* Try to allocate PIO buffers if wanted and if the full + * number of PIO buffers would be sufficient to allocate one + * copy-buffer per TX channel. Failure is non-fatal, as there + * are only a small number of PIO buffers shared between all + * functions of the controller. + */ + if (efx_piobuf_size != 0 && + ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= + efx->n_tx_channels) { + unsigned int n_piobufs = + DIV_ROUND_UP(efx->n_tx_channels, + ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size); + + rc = efx_ef10_alloc_piobufs(efx, n_piobufs); + if (rc) + netif_err(efx, probe, efx->net_dev, + "failed to allocate PIO buffers (%d)\n", rc); + else + netif_dbg(efx, probe, efx->net_dev, + "allocated %u PIO buffers\n", n_piobufs); + } +#else + nic_data->n_piobufs = 0; +#endif + + /* PIO buffers should be mapped with write-combining enabled, + * and we want to make single UC and WC mappings rather than + * several of each (in fact that's the only option if host + * page size is >4K). So we may allocate some extra VIs just + * for writing PIO buffers through. + * + * The UC mapping contains (min_vis - 1) complete VIs and the + * first half of the next VI. Then the WC mapping begins with + * the second half of this last VI. + */ + uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE + + ER_DZ_TX_PIOBUF); + if (nic_data->n_piobufs) { + /* pio_write_vi_base rounds down to give the number of complete + * VIs inside the UC mapping. + */ + pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE; + wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base + + nic_data->n_piobufs) * + EFX_VI_PAGE_SIZE) - + uc_mem_map_size); + max_vis = pio_write_vi_base + nic_data->n_piobufs; + } else { + pio_write_vi_base = 0; + wc_mem_map_size = 0; + max_vis = min_vis; + } + + /* In case the last attached driver failed to free VIs, do it now */ + rc = efx_ef10_free_vis(efx); + if (rc != 0) + return rc; + + rc = efx_ef10_alloc_vis(efx, min_vis, max_vis); + if (rc != 0) + return rc; + + /* If we didn't get enough VIs to map all the PIO buffers, free the + * PIO buffers + */ + if (nic_data->n_piobufs && + nic_data->n_allocated_vis < + pio_write_vi_base + nic_data->n_piobufs) { + netif_dbg(efx, probe, efx->net_dev, + "%u VIs are not sufficient to map %u PIO buffers\n", + nic_data->n_allocated_vis, nic_data->n_piobufs); + efx_ef10_free_piobufs(efx); + } + + /* Shrink the original UC mapping of the memory BAR */ + membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size); + if (!membase) { + netif_err(efx, probe, efx->net_dev, + "could not shrink memory BAR to %x\n", + uc_mem_map_size); + return -ENOMEM; + } + iounmap(efx->membase); + efx->membase = membase; + + /* Set up the WC mapping if needed */ + if (wc_mem_map_size) { + nic_data->wc_membase = ioremap_wc(efx->membase_phys + + uc_mem_map_size, + wc_mem_map_size); + if (!nic_data->wc_membase) { + netif_err(efx, probe, efx->net_dev, + "could not allocate WC mapping of size %x\n", + wc_mem_map_size); + return -ENOMEM; + } + nic_data->pio_write_vi_base = pio_write_vi_base; + nic_data->pio_write_base = + nic_data->wc_membase + + (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF - + uc_mem_map_size); + + rc = efx_ef10_link_piobufs(efx); + if (rc) + efx_ef10_free_piobufs(efx); + } + + netif_dbg(efx, probe, efx->net_dev, + "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n", + &efx->membase_phys, efx->membase, uc_mem_map_size, + nic_data->wc_membase, wc_mem_map_size); + + return 0; +} + +static int efx_ef10_init_nic(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int rc; + + if (nic_data->must_check_datapath_caps) { + rc = efx_ef10_init_datapath_caps(efx); + if (rc) + return rc; + nic_data->must_check_datapath_caps = false; + } + + if (nic_data->must_realloc_vis) { + /* We cannot let the number of VIs change now */ + rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis, + nic_data->n_allocated_vis); + if (rc) + return rc; + nic_data->must_realloc_vis = false; + } + + if (nic_data->must_restore_piobufs && nic_data->n_piobufs) { + rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs); + if (rc == 0) { + rc = efx_ef10_link_piobufs(efx); + if (rc) + efx_ef10_free_piobufs(efx); + } + + /* Log an error on failure, but this is non-fatal */ + if (rc) + netif_err(efx, drv, efx->net_dev, + "failed to restore PIO buffers (%d)\n", rc); + nic_data->must_restore_piobufs = false; + } + + efx_ef10_rx_push_rss_config(efx); + return 0; +} + +static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + /* All our allocations have been reset */ + nic_data->must_realloc_vis = true; + nic_data->must_restore_filters = true; + nic_data->must_restore_piobufs = true; + nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; +} + +static int efx_ef10_map_reset_flags(u32 *flags) +{ + enum { + EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) << + ETH_RESET_SHARED_SHIFT), + EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER | + ETH_RESET_OFFLOAD | ETH_RESET_MAC | + ETH_RESET_PHY | ETH_RESET_MGMT) << + ETH_RESET_SHARED_SHIFT) + }; + + /* We assume for now that our PCI function is permitted to + * reset everything. + */ + + if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) { + *flags &= ~EF10_RESET_MC; + return RESET_TYPE_WORLD; + } + + if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) { + *flags &= ~EF10_RESET_PORT; + return RESET_TYPE_ALL; + } + + /* no invisible reset implemented */ + + return -EINVAL; +} + +static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) +{ + int rc = efx_mcdi_reset(efx, reset_type); + + /* If it was a port reset, trigger reallocation of MC resources. + * Note that on an MC reset nothing needs to be done now because we'll + * detect the MC reset later and handle it then. + * For an FLR, we never get an MC reset event, but the MC has reset all + * resources assigned to us, so we have to trigger reallocation now. + */ + if ((reset_type == RESET_TYPE_ALL || + reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc) + efx_ef10_reset_mc_allocations(efx); + return rc; +} + +#define EF10_DMA_STAT(ext_name, mcdi_name) \ + [EF10_STAT_ ## ext_name] = \ + { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } +#define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \ + [EF10_STAT_ ## int_name] = \ + { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name } +#define EF10_OTHER_STAT(ext_name) \ + [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 } +#define GENERIC_SW_STAT(ext_name) \ + [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } + +static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { + EF10_DMA_STAT(tx_bytes, TX_BYTES), + EF10_DMA_STAT(tx_packets, TX_PKTS), + EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS), + EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS), + EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS), + EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS), + EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS), + EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS), + EF10_DMA_STAT(tx_64, TX_64_PKTS), + EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS), + EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS), + EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS), + EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS), + EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), + EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), + EF10_DMA_STAT(rx_bytes, RX_BYTES), + EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES), + EF10_OTHER_STAT(rx_good_bytes), + EF10_OTHER_STAT(rx_bad_bytes), + EF10_DMA_STAT(rx_packets, RX_PKTS), + EF10_DMA_STAT(rx_good, RX_GOOD_PKTS), + EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS), + EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS), + EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS), + EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS), + EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS), + EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS), + EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS), + EF10_DMA_STAT(rx_64, RX_64_PKTS), + EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS), + EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS), + EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS), + EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS), + EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), + EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), + EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS), + EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS), + EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS), + EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS), + EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS), + EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS), + GENERIC_SW_STAT(rx_nodesc_trunc), + GENERIC_SW_STAT(rx_noskb_drops), + EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), + EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), + EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), + EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), + EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB), + EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB), + EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING), + EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), + EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), + EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS), + EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS), + EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS), +}; + +#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \ + (1ULL << EF10_STAT_tx_packets) | \ + (1ULL << EF10_STAT_tx_pause) | \ + (1ULL << EF10_STAT_tx_unicast) | \ + (1ULL << EF10_STAT_tx_multicast) | \ + (1ULL << EF10_STAT_tx_broadcast) | \ + (1ULL << EF10_STAT_rx_bytes) | \ + (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \ + (1ULL << EF10_STAT_rx_good_bytes) | \ + (1ULL << EF10_STAT_rx_bad_bytes) | \ + (1ULL << EF10_STAT_rx_packets) | \ + (1ULL << EF10_STAT_rx_good) | \ + (1ULL << EF10_STAT_rx_bad) | \ + (1ULL << EF10_STAT_rx_pause) | \ + (1ULL << EF10_STAT_rx_control) | \ + (1ULL << EF10_STAT_rx_unicast) | \ + (1ULL << EF10_STAT_rx_multicast) | \ + (1ULL << EF10_STAT_rx_broadcast) | \ + (1ULL << EF10_STAT_rx_lt64) | \ + (1ULL << EF10_STAT_rx_64) | \ + (1ULL << EF10_STAT_rx_65_to_127) | \ + (1ULL << EF10_STAT_rx_128_to_255) | \ + (1ULL << EF10_STAT_rx_256_to_511) | \ + (1ULL << EF10_STAT_rx_512_to_1023) | \ + (1ULL << EF10_STAT_rx_1024_to_15xx) | \ + (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \ + (1ULL << EF10_STAT_rx_gtjumbo) | \ + (1ULL << EF10_STAT_rx_bad_gtjumbo) | \ + (1ULL << EF10_STAT_rx_overflow) | \ + (1ULL << EF10_STAT_rx_nodesc_drops) | \ + (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \ + (1ULL << GENERIC_STAT_rx_noskb_drops)) + +/* These statistics are only provided by the 10G MAC. For a 10G/40G + * switchable port we do not expose these because they might not + * include all the packets they should. + */ +#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \ + (1ULL << EF10_STAT_tx_lt64) | \ + (1ULL << EF10_STAT_tx_64) | \ + (1ULL << EF10_STAT_tx_65_to_127) | \ + (1ULL << EF10_STAT_tx_128_to_255) | \ + (1ULL << EF10_STAT_tx_256_to_511) | \ + (1ULL << EF10_STAT_tx_512_to_1023) | \ + (1ULL << EF10_STAT_tx_1024_to_15xx) | \ + (1ULL << EF10_STAT_tx_15xx_to_jumbo)) + +/* These statistics are only provided by the 40G MAC. For a 10G/40G + * switchable port we do expose these because the errors will otherwise + * be silent. + */ +#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \ + (1ULL << EF10_STAT_rx_length_error)) + +/* These statistics are only provided if the firmware supports the + * capability PM_AND_RXDP_COUNTERS. + */ +#define HUNT_PM_AND_RXDP_STAT_MASK ( \ + (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \ + (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \ + (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \ + (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \ + (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \ + (1ULL << EF10_STAT_rx_pm_discard_qbb) | \ + (1ULL << EF10_STAT_rx_pm_discard_mapping) | \ + (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \ + (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \ + (1ULL << EF10_STAT_rx_dp_streaming_packets) | \ + (1ULL << EF10_STAT_rx_dp_hlb_fetch) | \ + (1ULL << EF10_STAT_rx_dp_hlb_wait)) + +static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) +{ + u64 raw_mask = HUNT_COMMON_STAT_MASK; + u32 port_caps = efx_mcdi_phy_get_caps(efx); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) + raw_mask |= HUNT_40G_EXTRA_STAT_MASK; + else + raw_mask |= HUNT_10G_ONLY_STAT_MASK; + + if (nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN)) + raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK; + + return raw_mask; +} + +static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) +{ + u64 raw_mask = efx_ef10_raw_stat_mask(efx); + +#if BITS_PER_LONG == 64 + mask[0] = raw_mask; +#else + mask[0] = raw_mask & 0xffffffff; + mask[1] = raw_mask >> 32; +#endif +} + +static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) +{ + DECLARE_BITMAP(mask, EF10_STAT_COUNT); + + efx_ef10_get_stat_mask(efx, mask); + return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, + mask, names); +} + +static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + DECLARE_BITMAP(mask, EF10_STAT_COUNT); + __le64 generation_start, generation_end; + u64 *stats = nic_data->stats; + __le64 *dma_stats; + + efx_ef10_get_stat_mask(efx, mask); + + dma_stats = efx->stats_buffer.addr; + nic_data = efx->nic_data; + + generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; + if (generation_end == EFX_MC_STATS_GENERATION_INVALID) + return 0; + rmb(); + efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, + stats, efx->stats_buffer.addr, false); + rmb(); + generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; + if (generation_end != generation_start) + return -EAGAIN; + + /* Update derived statistics */ + efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]); + stats[EF10_STAT_rx_good_bytes] = + stats[EF10_STAT_rx_bytes] - + stats[EF10_STAT_rx_bytes_minus_good_bytes]; + efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes], + stats[EF10_STAT_rx_bytes_minus_good_bytes]); + efx_update_sw_stats(efx, stats); + return 0; +} + + +static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +{ + DECLARE_BITMAP(mask, EF10_STAT_COUNT); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u64 *stats = nic_data->stats; + size_t stats_count = 0, index; + int retry; + + efx_ef10_get_stat_mask(efx, mask); + + /* If we're unlucky enough to read statistics during the DMA, wait + * up to 10ms for it to finish (typically takes <500us) + */ + for (retry = 0; retry < 100; ++retry) { + if (efx_ef10_try_update_nic_stats(efx) == 0) + break; + udelay(100); + } + + if (full_stats) { + for_each_set_bit(index, mask, EF10_STAT_COUNT) { + if (efx_ef10_stat_desc[index].name) { + *full_stats++ = stats[index]; + ++stats_count; + } + } + } + + if (core_stats) { + core_stats->rx_packets = stats[EF10_STAT_rx_packets]; + core_stats->tx_packets = stats[EF10_STAT_tx_packets]; + core_stats->rx_bytes = stats[EF10_STAT_rx_bytes]; + core_stats->tx_bytes = stats[EF10_STAT_tx_bytes]; + core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops] + + stats[GENERIC_STAT_rx_nodesc_trunc] + + stats[GENERIC_STAT_rx_noskb_drops]; + core_stats->multicast = stats[EF10_STAT_rx_multicast]; + core_stats->rx_length_errors = + stats[EF10_STAT_rx_gtjumbo] + + stats[EF10_STAT_rx_length_error]; + core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; + core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error]; + core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; + core_stats->rx_errors = (core_stats->rx_length_errors + + core_stats->rx_crc_errors + + core_stats->rx_frame_errors); + } + + return stats_count; +} + +static void efx_ef10_push_irq_moderation(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + unsigned int mode, value; + efx_dword_t timer_cmd; + + if (channel->irq_moderation) { + mode = 3; + value = channel->irq_moderation - 1; + } else { + mode = 0; + value = 0; + } + + if (EFX_EF10_WORKAROUND_35388(efx)) { + EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS, + EFE_DD_EVQ_IND_TIMER_FLAGS, + ERF_DD_EVQ_IND_TIMER_MODE, mode, + ERF_DD_EVQ_IND_TIMER_VAL, value); + efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT, + channel->channel); + } else { + EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode, + ERF_DZ_TC_TIMER_VAL, value); + efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR, + channel->channel); + } +} + +static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int efx_ef10_set_wol(struct efx_nic *efx, u32 type) +{ + if (type != 0) + return -EINVAL; + return 0; +} + +static void efx_ef10_mcdi_request(struct efx_nic *efx, + const efx_dword_t *hdr, size_t hdr_len, + const efx_dword_t *sdu, size_t sdu_len) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u8 *pdu = nic_data->mcdi_buf.addr; + + memcpy(pdu, hdr, hdr_len); + memcpy(pdu + hdr_len, sdu, sdu_len); + wmb(); + + /* The hardware provides 'low' and 'high' (doorbell) registers + * for passing the 64-bit address of an MCDI request to + * firmware. However the dwords are swapped by firmware. The + * least significant bits of the doorbell are then 0 for all + * MCDI requests due to alignment. + */ + _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32), + ER_DZ_MC_DB_LWRD); + _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr), + ER_DZ_MC_DB_HWRD); +} + +static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr; + + rmb(); + return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); +} + +static void +efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, + size_t offset, size_t outlen) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + const u8 *pdu = nic_data->mcdi_buf.addr; + + memcpy(outbuf, pdu + offset, outlen); +} + +static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int rc; + + rc = efx_ef10_get_warm_boot_count(efx); + if (rc < 0) { + /* The firmware is presumably in the process of + * rebooting. However, we are supposed to report each + * reboot just once, so we must only do that once we + * can read and store the updated warm boot count. + */ + return 0; + } + + if (rc == nic_data->warm_boot_count) + return 0; + + nic_data->warm_boot_count = rc; + + /* All our allocations have been reset */ + efx_ef10_reset_mc_allocations(efx); + + /* The datapath firmware might have been changed */ + nic_data->must_check_datapath_caps = true; + + /* MAC statistics have been cleared on the NIC; clear the local + * statistic that we update with efx_update_diff_stat(). + */ + nic_data->stats[EF10_STAT_rx_bad_bytes] = 0; + + return -EIO; +} + +/* Handle an MSI interrupt + * + * Handle an MSI hardware interrupt. This routine schedules event + * queue processing. No interrupt acknowledgement cycle is necessary. + * Also, we never need to check that the interrupt is for us, since + * MSI interrupts cannot be shared. + */ +static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) +{ + struct efx_msi_context *context = dev_id; + struct efx_nic *efx = context->efx; + + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); + + if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) { + /* Note test interrupts */ + if (context->index == efx->irq_level) + efx->last_irq_cpu = raw_smp_processor_id(); + + /* Schedule processing of the channel */ + efx_schedule_channel_irq(efx->channel[context->index]); + } + + return IRQ_HANDLED; +} + +static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) +{ + struct efx_nic *efx = dev_id; + bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); + struct efx_channel *channel; + efx_dword_t reg; + u32 queues; + + /* Read the ISR which also ACKs the interrupts */ + efx_readd(efx, ®, ER_DZ_BIU_INT_ISR); + queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG); + + if (queues == 0) + return IRQ_NONE; + + if (likely(soft_enabled)) { + /* Note test interrupts */ + if (queues & (1U << efx->irq_level)) + efx->last_irq_cpu = raw_smp_processor_id(); + + efx_for_each_channel(channel, efx) { + if (queues & 1) + efx_schedule_channel_irq(channel); + queues >>= 1; + } + } + + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", + irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); + + return IRQ_HANDLED; +} + +static void efx_ef10_irq_test_generate(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN); + + BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0); + + MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level); + (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT, + inbuf, sizeof(inbuf), NULL, 0, NULL); +} + +static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue) +{ + return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, + (tx_queue->ptr_mask + 1) * + sizeof(efx_qword_t), + GFP_KERNEL); +} + +/* This writes to the TX_DESC_WPTR and also pushes data */ +static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue, + const efx_qword_t *txd) +{ + unsigned int write_ptr; + efx_oword_t reg; + + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr); + reg.qword[0] = *txd; + efx_writeo_page(tx_queue->efx, ®, + ER_DZ_TX_DESC_UPD, tx_queue->queue); +} + +static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / + EFX_BUF_SIZE)); + MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN); + bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; + size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; + struct efx_channel *channel = tx_queue->channel; + struct efx_nic *efx = tx_queue->efx; + size_t inlen, outlen; + dma_addr_t dma_addr; + efx_qword_t *txd; + int rc; + int i; + + MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); + MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); + MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue); + MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue); + MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS, + INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload, + INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload); + MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); + MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); + + dma_addr = tx_queue->txd.buf.dma_addr; + + netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n", + tx_queue->queue, entries, (u64)dma_addr); + + for (i = 0; i < entries; ++i) { + MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr); + dma_addr += EFX_BUF_SIZE; + } + + inlen = MC_CMD_INIT_TXQ_IN_LEN(entries); + + rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + /* A previous user of this TX queue might have set us up the + * bomb by writing a descriptor to the TX push collector but + * not the doorbell. (Each collector belongs to a port, not a + * queue or function, so cannot easily be reset.) We must + * attempt to push a no-op descriptor in its place. + */ + tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION; + tx_queue->insert_count = 1; + txd = efx_tx_desc(tx_queue, 0); + EFX_POPULATE_QWORD_4(*txd, + ESF_DZ_TX_DESC_IS_OPT, true, + ESF_DZ_TX_OPTION_TYPE, + ESE_DZ_TX_OPTION_DESC_CRC_CSUM, + ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload, + ESF_DZ_TX_OPTION_IP_CSUM, csum_offload); + tx_queue->write_count = 1; + wmb(); + efx_ef10_push_tx_desc(tx_queue, txd); + + return; + +fail: + netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n", + tx_queue->queue); +} + +static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN); + struct efx_nic *efx = tx_queue->efx; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE, + tx_queue->queue); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + + if (rc && rc != -EALREADY) + goto fail; + + return; + +fail: + efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN, + outbuf, outlen, rc); +} + +static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue) +{ + efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf); +} + +/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ +static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue) +{ + unsigned int write_ptr; + efx_dword_t reg; + + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr); + efx_writed_page(tx_queue->efx, ®, + ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue); +} + +static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) +{ + unsigned int old_write_count = tx_queue->write_count; + struct efx_tx_buffer *buffer; + unsigned int write_ptr; + efx_qword_t *txd; + + BUG_ON(tx_queue->write_count == tx_queue->insert_count); + + do { + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + buffer = &tx_queue->buffer[write_ptr]; + txd = efx_tx_desc(tx_queue, write_ptr); + ++tx_queue->write_count; + + /* Create TX descriptor ring entry */ + if (buffer->flags & EFX_TX_BUF_OPTION) { + *txd = buffer->option; + } else { + BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); + EFX_POPULATE_QWORD_3( + *txd, + ESF_DZ_TX_KER_CONT, + buffer->flags & EFX_TX_BUF_CONT, + ESF_DZ_TX_KER_BYTE_CNT, buffer->len, + ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr); + } + } while (tx_queue->write_count != tx_queue->insert_count); + + wmb(); /* Ensure descriptors are written before they are fetched */ + + if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { + txd = efx_tx_desc(tx_queue, + old_write_count & tx_queue->ptr_mask); + efx_ef10_push_tx_desc(tx_queue, txd); + ++tx_queue->pushes; + } else { + efx_ef10_notify_tx_desc(tx_queue); + } +} + +static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, + EVB_PORT_ID_ASSIGNED); + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, + MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE); + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, + EFX_MAX_CHANNELS); + + rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc != 0) + return rc; + + if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) + return -EIO; + + *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); + + return 0; +} + +static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, + context); + + rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf), + NULL, 0, NULL); + WARN_ON(rc != 0); +} + +static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context) +{ + MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); + MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); + int i, rc; + + MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID, + context); + BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != + MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN); + + for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i) + MCDI_PTR(tablebuf, + RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = + (u8) efx->rx_indir_table[i]; + + rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf, + sizeof(tablebuf), NULL, 0, NULL); + if (rc != 0) + return rc; + + MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID, + context); + BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != + MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); + for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) + MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = + efx->rx_hash_key[i]; + + return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf, + sizeof(keybuf), NULL, 0, NULL); +} + +static void efx_ef10_rx_free_indir_table(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) + efx_ef10_free_rss_context(efx, nic_data->rx_rss_context); + nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; +} + +static void efx_ef10_rx_push_rss_config(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int rc; + + netif_dbg(efx, drv, efx->net_dev, "pushing RSS config\n"); + + if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) { + rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context); + if (rc != 0) + goto fail; + } + + rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context); + if (rc != 0) + goto fail; + + return; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); +} + +static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue) +{ + return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf, + (rx_queue->ptr_mask + 1) * + sizeof(efx_qword_t), + GFP_KERNEL); +} + +static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) +{ + MCDI_DECLARE_BUF(inbuf, + MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / + EFX_BUF_SIZE)); + MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN); + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; + struct efx_nic *efx = rx_queue->efx; + size_t inlen, outlen; + dma_addr_t dma_addr; + int rc; + int i; + + rx_queue->scatter_n = 0; + rx_queue->scatter_len = 0; + + MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1); + MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel); + MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue)); + MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE, + efx_rx_queue_index(rx_queue)); + MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS, + INIT_RXQ_IN_FLAG_PREFIX, 1, + INIT_RXQ_IN_FLAG_TIMESTAMP, 1); + MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0); + MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); + + dma_addr = rx_queue->rxd.buf.dma_addr; + + netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n", + efx_rx_queue_index(rx_queue), entries, (u64)dma_addr); + + for (i = 0; i < entries; ++i) { + MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr); + dma_addr += EFX_BUF_SIZE; + } + + inlen = MC_CMD_INIT_RXQ_IN_LEN(entries); + + rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen, + outbuf, sizeof(outbuf), &outlen); + if (rc) + netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n", + efx_rx_queue_index(rx_queue)); +} + +static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN); + struct efx_nic *efx = rx_queue->efx; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE, + efx_rx_queue_index(rx_queue)); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + + if (rc && rc != -EALREADY) + goto fail; + + return; + +fail: + efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN, + outbuf, outlen, rc); +} + +static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue) +{ + efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf); +} + +/* This creates an entry in the RX descriptor queue */ +static inline void +efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) +{ + struct efx_rx_buffer *rx_buf; + efx_qword_t *rxd; + + rxd = efx_rx_desc(rx_queue, index); + rx_buf = efx_rx_buffer(rx_queue, index); + EFX_POPULATE_QWORD_2(*rxd, + ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len, + ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); +} + +static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned int write_count; + efx_dword_t reg; + + /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */ + write_count = rx_queue->added_count & ~7; + if (rx_queue->notified_count == write_count) + return; + + do + efx_ef10_build_rx_desc( + rx_queue, + rx_queue->notified_count & rx_queue->ptr_mask); + while (++rx_queue->notified_count != write_count); + + wmb(); + EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, + write_count & rx_queue->ptr_mask); + efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD, + efx_rx_queue_index(rx_queue)); +} + +static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete; + +static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue) +{ + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); + efx_qword_t event; + + EFX_POPULATE_QWORD_2(event, + ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, + ESF_DZ_EV_DATA, EFX_EF10_REFILL); + + MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); + + /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has + * already swapped the data to little-endian order. + */ + memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], + sizeof(efx_qword_t)); + + efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT, + inbuf, sizeof(inbuf), 0, + efx_ef10_rx_defer_refill_complete, 0); +} + +static void +efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie, + int rc, efx_dword_t *outbuf, + size_t outlen_actual) +{ + /* nothing to do */ +} + +static int efx_ef10_ev_probe(struct efx_channel *channel) +{ + return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf, + (channel->eventq_mask + 1) * + sizeof(efx_qword_t), + GFP_KERNEL); +} + +static int efx_ef10_ev_init(struct efx_channel *channel) +{ + MCDI_DECLARE_BUF(inbuf, + MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 / + EFX_BUF_SIZE)); + MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN); + size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE; + struct efx_nic *efx = channel->efx; + struct efx_ef10_nic_data *nic_data; + bool supports_rx_merge; + size_t inlen, outlen; + dma_addr_t dma_addr; + int rc; + int i; + + nic_data = efx->nic_data; + supports_rx_merge = + !!(nic_data->datapath_caps & + 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN); + + /* Fill event queue with all ones (i.e. empty events) */ + memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); + + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1); + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel); + /* INIT_EVQ expects index in vector table, not absolute */ + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel); + MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS, + INIT_EVQ_IN_FLAG_INTERRUPTING, 1, + INIT_EVQ_IN_FLAG_RX_MERGE, 1, + INIT_EVQ_IN_FLAG_TX_MERGE, 1, + INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge); + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE, + MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0); + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0); + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE, + MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); + MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0); + + dma_addr = channel->eventq.buf.dma_addr; + for (i = 0; i < entries; ++i) { + MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr); + dma_addr += EFX_BUF_SIZE; + } + + inlen = MC_CMD_INIT_EVQ_IN_LEN(entries); + + rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, + outbuf, sizeof(outbuf), &outlen); + /* IRQ return is ignored */ + return rc; +} + +static void efx_ef10_ev_fini(struct efx_channel *channel) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN); + struct efx_nic *efx = channel->efx; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + + if (rc && rc != -EALREADY) + goto fail; + + return; + +fail: + efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN, + outbuf, outlen, rc); +} + +static void efx_ef10_ev_remove(struct efx_channel *channel) +{ + efx_nic_free_buffer(channel->efx, &channel->eventq.buf); +} + +static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue, + unsigned int rx_queue_label) +{ + struct efx_nic *efx = rx_queue->efx; + + netif_info(efx, hw, efx->net_dev, + "rx event arrived on queue %d labeled as queue %u\n", + efx_rx_queue_index(rx_queue), rx_queue_label); + + efx_schedule_reset(efx, RESET_TYPE_DISABLE); +} + +static void +efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue, + unsigned int actual, unsigned int expected) +{ + unsigned int dropped = (actual - expected) & rx_queue->ptr_mask; + struct efx_nic *efx = rx_queue->efx; + + netif_info(efx, hw, efx->net_dev, + "dropped %d events (index=%d expected=%d)\n", + dropped, actual, expected); + + efx_schedule_reset(efx, RESET_TYPE_DISABLE); +} + +/* partially received RX was aborted. clean up. */ +static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue) +{ + unsigned int rx_desc_ptr; + + netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev, + "scattered RX aborted (dropping %u buffers)\n", + rx_queue->scatter_n); + + rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask; + + efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n, + 0, EFX_RX_PKT_DISCARD); + + rx_queue->removed_count += rx_queue->scatter_n; + rx_queue->scatter_n = 0; + rx_queue->scatter_len = 0; + ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc; +} + +static int efx_ef10_handle_rx_event(struct efx_channel *channel, + const efx_qword_t *event) +{ + unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class; + unsigned int n_descs, n_packets, i; + struct efx_nic *efx = channel->efx; + struct efx_rx_queue *rx_queue; + bool rx_cont; + u16 flags = 0; + + if (unlikely(ACCESS_ONCE(efx->reset_pending))) + return 0; + + /* Basic packet information */ + rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES); + next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS); + rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL); + rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS); + rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); + + if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)) + netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event=" + EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*event)); + + rx_queue = efx_channel_get_rx_queue(channel); + + if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue))) + efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label); + + n_descs = ((next_ptr_lbits - rx_queue->removed_count) & + ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); + + if (n_descs != rx_queue->scatter_n + 1) { + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + /* detect rx abort */ + if (unlikely(n_descs == rx_queue->scatter_n)) { + if (rx_queue->scatter_n == 0 || rx_bytes != 0) + netdev_WARN(efx->net_dev, + "invalid RX abort: scatter_n=%u event=" + EFX_QWORD_FMT "\n", + rx_queue->scatter_n, + EFX_QWORD_VAL(*event)); + efx_ef10_handle_rx_abort(rx_queue); + return 0; + } + + /* Check that RX completion merging is valid, i.e. + * the current firmware supports it and this is a + * non-scattered packet. + */ + if (!(nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) || + rx_queue->scatter_n != 0 || rx_cont) { + efx_ef10_handle_rx_bad_lbits( + rx_queue, next_ptr_lbits, + (rx_queue->removed_count + + rx_queue->scatter_n + 1) & + ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); + return 0; + } + + /* Merged completion for multiple non-scattered packets */ + rx_queue->scatter_n = 1; + rx_queue->scatter_len = 0; + n_packets = n_descs; + ++channel->n_rx_merge_events; + channel->n_rx_merge_packets += n_packets; + flags |= EFX_RX_PKT_PREFIX_LEN; + } else { + ++rx_queue->scatter_n; + rx_queue->scatter_len += rx_bytes; + if (rx_cont) + return 0; + n_packets = 1; + } + + if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR))) + flags |= EFX_RX_PKT_DISCARD; + + if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) { + channel->n_rx_ip_hdr_chksum_err += n_packets; + } else if (unlikely(EFX_QWORD_FIELD(*event, + ESF_DZ_RX_TCPUDP_CKSUM_ERR))) { + channel->n_rx_tcp_udp_chksum_err += n_packets; + } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP || + rx_l4_class == ESE_DZ_L4_CLASS_UDP) { + flags |= EFX_RX_PKT_CSUMMED; + } + + if (rx_l4_class == ESE_DZ_L4_CLASS_TCP) + flags |= EFX_RX_PKT_TCP; + + channel->irq_mod_score += 2 * n_packets; + + /* Handle received packet(s) */ + for (i = 0; i < n_packets; i++) { + efx_rx_packet(rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + rx_queue->scatter_n, rx_queue->scatter_len, + flags); + rx_queue->removed_count += rx_queue->scatter_n; + } + + rx_queue->scatter_n = 0; + rx_queue->scatter_len = 0; + + return n_packets; +} + +static int +efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + struct efx_tx_queue *tx_queue; + unsigned int tx_ev_desc_ptr; + unsigned int tx_ev_q_label; + int tx_descs = 0; + + if (unlikely(ACCESS_ONCE(efx->reset_pending))) + return 0; + + if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) + return 0; + + /* Transmit completion */ + tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX); + tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL); + tx_queue = efx_channel_get_tx_queue(channel, + tx_ev_q_label % EFX_TXQ_TYPES); + tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) & + tx_queue->ptr_mask); + efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); + + return tx_descs; +} + +static void +efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + int subcode; + + subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE); + + switch (subcode) { + case ESE_DZ_DRV_TIMER_EV: + case ESE_DZ_DRV_WAKE_UP_EV: + break; + case ESE_DZ_DRV_START_UP_EV: + /* event queue init complete. ok. */ + break; + default: + netif_err(efx, hw, efx->net_dev, + "channel %d unknown driver event type %d" + " (data " EFX_QWORD_FMT ")\n", + channel->channel, subcode, + EFX_QWORD_VAL(*event)); + + } +} + +static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel, + efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + u32 subcode; + + subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0); + + switch (subcode) { + case EFX_EF10_TEST: + channel->event_test_cpu = raw_smp_processor_id(); + break; + case EFX_EF10_REFILL: + /* The queue must be empty, so we won't receive any rx + * events, so efx_process_channel() won't refill the + * queue. Refill it here + */ + efx_fast_push_rx_descriptors(&channel->rx_queue, true); + break; + default: + netif_err(efx, hw, efx->net_dev, + "channel %d unknown driver event type %u" + " (data " EFX_QWORD_FMT ")\n", + channel->channel, (unsigned) subcode, + EFX_QWORD_VAL(*event)); + } +} + +static int efx_ef10_ev_process(struct efx_channel *channel, int quota) +{ + struct efx_nic *efx = channel->efx; + efx_qword_t event, *p_event; + unsigned int read_ptr; + int ev_code; + int tx_descs = 0; + int spent = 0; + + if (quota <= 0) + return spent; + + read_ptr = channel->eventq_read_ptr; + + for (;;) { + p_event = efx_event(channel, read_ptr); + event = *p_event; + + if (!efx_event_present(&event)) + break; + + EFX_SET_QWORD(*p_event); + + ++read_ptr; + + ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE); + + netif_vdbg(efx, drv, efx->net_dev, + "processing event on %d " EFX_QWORD_FMT "\n", + channel->channel, EFX_QWORD_VAL(event)); + + switch (ev_code) { + case ESE_DZ_EV_CODE_MCDI_EV: + efx_mcdi_process_event(channel, &event); + break; + case ESE_DZ_EV_CODE_RX_EV: + spent += efx_ef10_handle_rx_event(channel, &event); + if (spent >= quota) { + /* XXX can we split a merged event to + * avoid going over-quota? + */ + spent = quota; + goto out; + } + break; + case ESE_DZ_EV_CODE_TX_EV: + tx_descs += efx_ef10_handle_tx_event(channel, &event); + if (tx_descs > efx->txq_entries) { + spent = quota; + goto out; + } else if (++spent == quota) { + goto out; + } + break; + case ESE_DZ_EV_CODE_DRIVER_EV: + efx_ef10_handle_driver_event(channel, &event); + if (++spent == quota) + goto out; + break; + case EFX_EF10_DRVGEN_EV: + efx_ef10_handle_driver_generated_event(channel, &event); + break; + default: + netif_err(efx, hw, efx->net_dev, + "channel %d unknown event type %d" + " (data " EFX_QWORD_FMT ")\n", + channel->channel, ev_code, + EFX_QWORD_VAL(event)); + } + } + +out: + channel->eventq_read_ptr = read_ptr; + return spent; +} + +static void efx_ef10_ev_read_ack(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + efx_dword_t rptr; + + if (EFX_EF10_WORKAROUND_35388(efx)) { + BUILD_BUG_ON(EFX_MIN_EVQ_SIZE < + (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); + BUILD_BUG_ON(EFX_MAX_EVQ_SIZE > + (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); + + EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, + EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, + ERF_DD_EVQ_IND_RPTR, + (channel->eventq_read_ptr & + channel->eventq_mask) >> + ERF_DD_EVQ_IND_RPTR_WIDTH); + efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, + channel->channel); + EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, + EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, + ERF_DD_EVQ_IND_RPTR, + channel->eventq_read_ptr & + ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); + efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, + channel->channel); + } else { + EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR, + channel->eventq_read_ptr & + channel->eventq_mask); + efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel); + } +} + +static void efx_ef10_ev_test_generate(struct efx_channel *channel) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); + struct efx_nic *efx = channel->efx; + efx_qword_t event; + int rc; + + EFX_POPULATE_QWORD_2(event, + ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, + ESF_DZ_EV_DATA, EFX_EF10_TEST); + + MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); + + /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has + * already swapped the data to little-endian order. + */ + memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], + sizeof(efx_qword_t)); + + rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf), + NULL, 0, NULL); + if (rc != 0) + goto fail; + + return; + +fail: + WARN_ON(true); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); +} + +void efx_ef10_handle_drain_event(struct efx_nic *efx) +{ + if (atomic_dec_and_test(&efx->active_queues)) + wake_up(&efx->flush_wq); + + WARN_ON(atomic_read(&efx->active_queues) < 0); +} + +static int efx_ef10_fini_dmaq(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + int pending; + + /* If the MC has just rebooted, the TX/RX queues will have already been + * torn down, but efx->active_queues needs to be set to zero. + */ + if (nic_data->must_realloc_vis) { + atomic_set(&efx->active_queues, 0); + return 0; + } + + /* Do not attempt to write to the NIC during EEH recovery */ + if (efx->state != STATE_RECOVERY) { + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_ef10_rx_fini(rx_queue); + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_ef10_tx_fini(tx_queue); + } + + wait_event_timeout(efx->flush_wq, + atomic_read(&efx->active_queues) == 0, + msecs_to_jiffies(EFX_MAX_FLUSH_TIME)); + pending = atomic_read(&efx->active_queues); + if (pending) { + netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n", + pending); + return -ETIMEDOUT; + } + } + + return 0; +} + +static void efx_ef10_prepare_flr(struct efx_nic *efx) +{ + atomic_set(&efx->active_queues, 0); +} + +static bool efx_ef10_filter_equal(const struct efx_filter_spec *left, + const struct efx_filter_spec *right) +{ + if ((left->match_flags ^ right->match_flags) | + ((left->flags ^ right->flags) & + (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) + return false; + + return memcmp(&left->outer_vid, &right->outer_vid, + sizeof(struct efx_filter_spec) - + offsetof(struct efx_filter_spec, outer_vid)) == 0; +} + +static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec) +{ + BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); + return jhash2((const u32 *)&spec->outer_vid, + (sizeof(struct efx_filter_spec) - + offsetof(struct efx_filter_spec, outer_vid)) / 4, + 0); + /* XXX should we randomise the initval? */ +} + +/* Decide whether a filter should be exclusive or else should allow + * delivery to additional recipients. Currently we decide that + * filters for specific local unicast MAC and IP addresses are + * exclusive. + */ +static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec) +{ + if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC && + !is_multicast_ether_addr(spec->loc_mac)) + return true; + + if ((spec->match_flags & + (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == + (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { + if (spec->ether_type == htons(ETH_P_IP) && + !ipv4_is_multicast(spec->loc_host[0])) + return true; + if (spec->ether_type == htons(ETH_P_IPV6) && + ((const u8 *)spec->loc_host)[0] != 0xff) + return true; + } + + return false; +} + +static struct efx_filter_spec * +efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table, + unsigned int filter_idx) +{ + return (struct efx_filter_spec *)(table->entry[filter_idx].spec & + ~EFX_EF10_FILTER_FLAGS); +} + +static unsigned int +efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table, + unsigned int filter_idx) +{ + return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS; +} + +static void +efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table, + unsigned int filter_idx, + const struct efx_filter_spec *spec, + unsigned int flags) +{ + table->entry[filter_idx].spec = (unsigned long)spec | flags; +} + +static void efx_ef10_filter_push_prep(struct efx_nic *efx, + const struct efx_filter_spec *spec, + efx_dword_t *inbuf, u64 handle, + bool replacing) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN); + + if (replacing) { + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + MC_CMD_FILTER_OP_IN_OP_REPLACE); + MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle); + } else { + u32 match_fields = 0; + + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + efx_ef10_filter_is_exclusive(spec) ? + MC_CMD_FILTER_OP_IN_OP_INSERT : + MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); + + /* Convert match flags and values. Unlike almost + * everything else in MCDI, these fields are in + * network byte order. + */ + if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) + match_fields |= + is_multicast_ether_addr(spec->loc_mac) ? + 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN : + 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN; +#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ + if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ + match_fields |= \ + 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ + mcdi_field ## _LBN; \ + BUILD_BUG_ON( \ + MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ + sizeof(spec->gen_field)); \ + memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ + &spec->gen_field, sizeof(spec->gen_field)); \ + } + COPY_FIELD(REM_HOST, rem_host, SRC_IP); + COPY_FIELD(LOC_HOST, loc_host, DST_IP); + COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); + COPY_FIELD(REM_PORT, rem_port, SRC_PORT); + COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); + COPY_FIELD(LOC_PORT, loc_port, DST_PORT); + COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); + COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); + COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); + COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); +#undef COPY_FIELD + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, + match_fields); + } + + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST, + spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? + MC_CMD_FILTER_OP_IN_RX_DEST_DROP : + MC_CMD_FILTER_OP_IN_RX_DEST_HOST); + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST, + MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, + spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? + 0 : spec->dmaq_id); + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, + (spec->flags & EFX_FILTER_FLAG_RX_RSS) ? + MC_CMD_FILTER_OP_IN_RX_MODE_RSS : + MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); + if (spec->flags & EFX_FILTER_FLAG_RX_RSS) + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, + spec->rss_context != + EFX_FILTER_RSS_CONTEXT_DEFAULT ? + spec->rss_context : nic_data->rx_rss_context); +} + +static int efx_ef10_filter_push(struct efx_nic *efx, + const struct efx_filter_spec *spec, + u64 *handle, bool replacing) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN); + int rc; + + efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing); + rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc == 0) + *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); + if (rc == -ENOSPC) + rc = -EBUSY; /* to match efx_farch_filter_insert() */ + return rc; +} + +static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table, + enum efx_filter_match_flags match_flags) +{ + unsigned int match_pri; + + for (match_pri = 0; + match_pri < table->rx_match_count; + match_pri++) + if (table->rx_match_flags[match_pri] == match_flags) + return match_pri; + + return -EPROTONOSUPPORT; +} + +static s32 efx_ef10_filter_insert(struct efx_nic *efx, + struct efx_filter_spec *spec, + bool replace_equal) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); + struct efx_filter_spec *saved_spec; + unsigned int match_pri, hash; + unsigned int priv_flags; + bool replacing = false; + int ins_index = -1; + DEFINE_WAIT(wait); + bool is_mc_recip; + s32 rc; + + /* For now, only support RX filters */ + if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) != + EFX_FILTER_FLAG_RX) + return -EINVAL; + + rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags); + if (rc < 0) + return rc; + match_pri = rc; + + hash = efx_ef10_filter_hash(spec); + is_mc_recip = efx_filter_is_mc_recipient(spec); + if (is_mc_recip) + bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); + + /* Find any existing filters with the same match tuple or + * else a free slot to insert at. If any of them are busy, + * we have to wait and retry. + */ + for (;;) { + unsigned int depth = 1; + unsigned int i; + + spin_lock_bh(&efx->filter_lock); + + for (;;) { + i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); + saved_spec = efx_ef10_filter_entry_spec(table, i); + + if (!saved_spec) { + if (ins_index < 0) + ins_index = i; + } else if (efx_ef10_filter_equal(spec, saved_spec)) { + if (table->entry[i].spec & + EFX_EF10_FILTER_FLAG_BUSY) + break; + if (spec->priority < saved_spec->priority && + spec->priority != EFX_FILTER_PRI_AUTO) { + rc = -EPERM; + goto out_unlock; + } + if (!is_mc_recip) { + /* This is the only one */ + if (spec->priority == + saved_spec->priority && + !replace_equal) { + rc = -EEXIST; + goto out_unlock; + } + ins_index = i; + goto found; + } else if (spec->priority > + saved_spec->priority || + (spec->priority == + saved_spec->priority && + replace_equal)) { + if (ins_index < 0) + ins_index = i; + else + __set_bit(depth, mc_rem_map); + } + } + + /* Once we reach the maximum search depth, use + * the first suitable slot or return -EBUSY if + * there was none + */ + if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { + if (ins_index < 0) { + rc = -EBUSY; + goto out_unlock; + } + goto found; + } + + ++depth; + } + + prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); + spin_unlock_bh(&efx->filter_lock); + schedule(); + } + +found: + /* Create a software table entry if necessary, and mark it + * busy. We might yet fail to insert, but any attempt to + * insert a conflicting filter while we're waiting for the + * firmware must find the busy entry. + */ + saved_spec = efx_ef10_filter_entry_spec(table, ins_index); + if (saved_spec) { + if (spec->priority == EFX_FILTER_PRI_AUTO && + saved_spec->priority >= EFX_FILTER_PRI_AUTO) { + /* Just make sure it won't be removed */ + if (saved_spec->priority > EFX_FILTER_PRI_AUTO) + saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; + table->entry[ins_index].spec &= + ~EFX_EF10_FILTER_FLAG_AUTO_OLD; + rc = ins_index; + goto out_unlock; + } + replacing = true; + priv_flags = efx_ef10_filter_entry_flags(table, ins_index); + } else { + saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); + if (!saved_spec) { + rc = -ENOMEM; + goto out_unlock; + } + *saved_spec = *spec; + priv_flags = 0; + } + efx_ef10_filter_set_entry(table, ins_index, saved_spec, + priv_flags | EFX_EF10_FILTER_FLAG_BUSY); + + /* Mark lower-priority multicast recipients busy prior to removal */ + if (is_mc_recip) { + unsigned int depth, i; + + for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { + i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); + if (test_bit(depth, mc_rem_map)) + table->entry[i].spec |= + EFX_EF10_FILTER_FLAG_BUSY; + } + } + + spin_unlock_bh(&efx->filter_lock); + + rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle, + replacing); + + /* Finalise the software table entry */ + spin_lock_bh(&efx->filter_lock); + if (rc == 0) { + if (replacing) { + /* Update the fields that may differ */ + if (saved_spec->priority == EFX_FILTER_PRI_AUTO) + saved_spec->flags |= + EFX_FILTER_FLAG_RX_OVER_AUTO; + saved_spec->priority = spec->priority; + saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO; + saved_spec->flags |= spec->flags; + saved_spec->rss_context = spec->rss_context; + saved_spec->dmaq_id = spec->dmaq_id; + } + } else if (!replacing) { + kfree(saved_spec); + saved_spec = NULL; + } + efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags); + + /* Remove and finalise entries for lower-priority multicast + * recipients + */ + if (is_mc_recip) { + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); + unsigned int depth, i; + + memset(inbuf, 0, sizeof(inbuf)); + + for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { + if (!test_bit(depth, mc_rem_map)) + continue; + + i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); + saved_spec = efx_ef10_filter_entry_spec(table, i); + priv_flags = efx_ef10_filter_entry_flags(table, i); + + if (rc == 0) { + spin_unlock_bh(&efx->filter_lock); + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); + MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, + table->entry[i].handle); + rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, + inbuf, sizeof(inbuf), + NULL, 0, NULL); + spin_lock_bh(&efx->filter_lock); + } + + if (rc == 0) { + kfree(saved_spec); + saved_spec = NULL; + priv_flags = 0; + } else { + priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY; + } + efx_ef10_filter_set_entry(table, i, saved_spec, + priv_flags); + } + } + + /* If successful, return the inserted filter ID */ + if (rc == 0) + rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index; + + wake_up_all(&table->waitq); +out_unlock: + spin_unlock_bh(&efx->filter_lock); + finish_wait(&table->waitq, &wait); + return rc; +} + +static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) +{ + /* no need to do anything here on EF10 */ +} + +/* Remove a filter. + * If !by_index, remove by ID + * If by_index, remove by index + * Filter ID may come from userland and must be range-checked. + */ +static int efx_ef10_filter_remove_internal(struct efx_nic *efx, + unsigned int priority_mask, + u32 filter_id, bool by_index) +{ + unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; + struct efx_ef10_filter_table *table = efx->filter_state; + MCDI_DECLARE_BUF(inbuf, + MC_CMD_FILTER_OP_IN_HANDLE_OFST + + MC_CMD_FILTER_OP_IN_HANDLE_LEN); + struct efx_filter_spec *spec; + DEFINE_WAIT(wait); + int rc; + + /* Find the software table entry and mark it busy. Don't + * remove it yet; any attempt to update while we're waiting + * for the firmware must find the busy entry. + */ + for (;;) { + spin_lock_bh(&efx->filter_lock); + if (!(table->entry[filter_idx].spec & + EFX_EF10_FILTER_FLAG_BUSY)) + break; + prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); + spin_unlock_bh(&efx->filter_lock); + schedule(); + } + + spec = efx_ef10_filter_entry_spec(table, filter_idx); + if (!spec || + (!by_index && + efx_ef10_filter_rx_match_pri(table, spec->match_flags) != + filter_id / HUNT_FILTER_TBL_ROWS)) { + rc = -ENOENT; + goto out_unlock; + } + + if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO && + priority_mask == (1U << EFX_FILTER_PRI_AUTO)) { + /* Just remove flags */ + spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO; + table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD; + rc = 0; + goto out_unlock; + } + + if (!(priority_mask & (1U << spec->priority))) { + rc = -ENOENT; + goto out_unlock; + } + + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; + spin_unlock_bh(&efx->filter_lock); + + if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { + /* Reset to an automatic filter */ + + struct efx_filter_spec new_spec = *spec; + + new_spec.priority = EFX_FILTER_PRI_AUTO; + new_spec.flags = (EFX_FILTER_FLAG_RX | + EFX_FILTER_FLAG_RX_RSS); + new_spec.dmaq_id = 0; + new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; + rc = efx_ef10_filter_push(efx, &new_spec, + &table->entry[filter_idx].handle, + true); + + spin_lock_bh(&efx->filter_lock); + if (rc == 0) + *spec = new_spec; + } else { + /* Really remove the filter */ + + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + efx_ef10_filter_is_exclusive(spec) ? + MC_CMD_FILTER_OP_IN_OP_REMOVE : + MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); + MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, + table->entry[filter_idx].handle); + rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, + inbuf, sizeof(inbuf), NULL, 0, NULL); + + spin_lock_bh(&efx->filter_lock); + if (rc == 0) { + kfree(spec); + efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); + } + } + + table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; + wake_up_all(&table->waitq); +out_unlock: + spin_unlock_bh(&efx->filter_lock); + finish_wait(&table->waitq, &wait); + return rc; +} + +static int efx_ef10_filter_remove_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id) +{ + return efx_ef10_filter_remove_internal(efx, 1U << priority, + filter_id, false); +} + +static int efx_ef10_filter_get_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *spec) +{ + unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; + struct efx_ef10_filter_table *table = efx->filter_state; + const struct efx_filter_spec *saved_spec; + int rc; + + spin_lock_bh(&efx->filter_lock); + saved_spec = efx_ef10_filter_entry_spec(table, filter_idx); + if (saved_spec && saved_spec->priority == priority && + efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) == + filter_id / HUNT_FILTER_TBL_ROWS) { + *spec = *saved_spec; + rc = 0; + } else { + rc = -ENOENT; + } + spin_unlock_bh(&efx->filter_lock); + return rc; +} + +static int efx_ef10_filter_clear_rx(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + unsigned int priority_mask; + unsigned int i; + int rc; + + priority_mask = (((1U << (priority + 1)) - 1) & + ~(1U << EFX_FILTER_PRI_AUTO)); + + for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { + rc = efx_ef10_filter_remove_internal(efx, priority_mask, + i, true); + if (rc && rc != -ENOENT) + return rc; + } + + return 0; +} + +static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + unsigned int filter_idx; + s32 count = 0; + + spin_lock_bh(&efx->filter_lock); + for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { + if (table->entry[filter_idx].spec && + efx_ef10_filter_entry_spec(table, filter_idx)->priority == + priority) + ++count; + } + spin_unlock_bh(&efx->filter_lock); + return count; +} + +static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + + return table->rx_match_count * HUNT_FILTER_TBL_ROWS; +} + +static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_filter_spec *spec; + unsigned int filter_idx; + s32 count = 0; + + spin_lock_bh(&efx->filter_lock); + for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { + spec = efx_ef10_filter_entry_spec(table, filter_idx); + if (spec && spec->priority == priority) { + if (count == size) { + count = -EMSGSIZE; + break; + } + buf[count++] = (efx_ef10_filter_rx_match_pri( + table, spec->match_flags) * + HUNT_FILTER_TBL_ROWS + + filter_idx); + } + } + spin_unlock_bh(&efx->filter_lock); + return count; +} + +#ifdef CONFIG_RFS_ACCEL + +static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete; + +static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx, + struct efx_filter_spec *spec) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); + struct efx_filter_spec *saved_spec; + unsigned int hash, i, depth = 1; + bool replacing = false; + int ins_index = -1; + u64 cookie; + s32 rc; + + /* Must be an RX filter without RSS and not for a multicast + * destination address (RFS only works for connected sockets). + * These restrictions allow us to pass only a tiny amount of + * data through to the completion function. + */ + EFX_WARN_ON_PARANOID(spec->flags != + (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER)); + EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT); + EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec)); + + hash = efx_ef10_filter_hash(spec); + + spin_lock_bh(&efx->filter_lock); + + /* Find any existing filter with the same match tuple or else + * a free slot to insert at. If an existing filter is busy, + * we have to give up. + */ + for (;;) { + i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); + saved_spec = efx_ef10_filter_entry_spec(table, i); + + if (!saved_spec) { + if (ins_index < 0) + ins_index = i; + } else if (efx_ef10_filter_equal(spec, saved_spec)) { + if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) { + rc = -EBUSY; + goto fail_unlock; + } + if (spec->priority < saved_spec->priority) { + rc = -EPERM; + goto fail_unlock; + } + ins_index = i; + break; + } + + /* Once we reach the maximum search depth, use the + * first suitable slot or return -EBUSY if there was + * none + */ + if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { + if (ins_index < 0) { + rc = -EBUSY; + goto fail_unlock; + } + break; + } + + ++depth; + } + + /* Create a software table entry if necessary, and mark it + * busy. We might yet fail to insert, but any attempt to + * insert a conflicting filter while we're waiting for the + * firmware must find the busy entry. + */ + saved_spec = efx_ef10_filter_entry_spec(table, ins_index); + if (saved_spec) { + replacing = true; + } else { + saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); + if (!saved_spec) { + rc = -ENOMEM; + goto fail_unlock; + } + *saved_spec = *spec; + } + efx_ef10_filter_set_entry(table, ins_index, saved_spec, + EFX_EF10_FILTER_FLAG_BUSY); + + spin_unlock_bh(&efx->filter_lock); + + /* Pack up the variables needed on completion */ + cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id; + + efx_ef10_filter_push_prep(efx, spec, inbuf, + table->entry[ins_index].handle, replacing); + efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), + MC_CMD_FILTER_OP_OUT_LEN, + efx_ef10_filter_rfs_insert_complete, cookie); + + return ins_index; + +fail_unlock: + spin_unlock_bh(&efx->filter_lock); + return rc; +} + +static void +efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie, + int rc, efx_dword_t *outbuf, + size_t outlen_actual) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + unsigned int ins_index, dmaq_id; + struct efx_filter_spec *spec; + bool replacing; + + /* Unpack the cookie */ + replacing = cookie >> 31; + ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1); + dmaq_id = cookie & 0xffff; + + spin_lock_bh(&efx->filter_lock); + spec = efx_ef10_filter_entry_spec(table, ins_index); + if (rc == 0) { + table->entry[ins_index].handle = + MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); + if (replacing) + spec->dmaq_id = dmaq_id; + } else if (!replacing) { + kfree(spec); + spec = NULL; + } + efx_ef10_filter_set_entry(table, ins_index, spec, 0); + spin_unlock_bh(&efx->filter_lock); + + wake_up_all(&table->waitq); +} + +static void +efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, + unsigned long filter_idx, + int rc, efx_dword_t *outbuf, + size_t outlen_actual); + +static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, + unsigned int filter_idx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_filter_spec *spec = + efx_ef10_filter_entry_spec(table, filter_idx); + MCDI_DECLARE_BUF(inbuf, + MC_CMD_FILTER_OP_IN_HANDLE_OFST + + MC_CMD_FILTER_OP_IN_HANDLE_LEN); + + if (!spec || + (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) || + spec->priority != EFX_FILTER_PRI_HINT || + !rps_may_expire_flow(efx->net_dev, spec->dmaq_id, + flow_id, filter_idx)) + return false; + + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + MC_CMD_FILTER_OP_IN_OP_REMOVE); + MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, + table->entry[filter_idx].handle); + if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0, + efx_ef10_filter_rfs_expire_complete, filter_idx)) + return false; + + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; + return true; +} + +static void +efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, + unsigned long filter_idx, + int rc, efx_dword_t *outbuf, + size_t outlen_actual) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_filter_spec *spec = + efx_ef10_filter_entry_spec(table, filter_idx); + + spin_lock_bh(&efx->filter_lock); + if (rc == 0) { + kfree(spec); + efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); + } + table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; + wake_up_all(&table->waitq); + spin_unlock_bh(&efx->filter_lock); +} + +#endif /* CONFIG_RFS_ACCEL */ + +static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags) +{ + int match_flags = 0; + +#define MAP_FLAG(gen_flag, mcdi_field) { \ + u32 old_mcdi_flags = mcdi_flags; \ + mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ + mcdi_field ## _LBN); \ + if (mcdi_flags != old_mcdi_flags) \ + match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \ + } + MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); + MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); + MAP_FLAG(REM_HOST, SRC_IP); + MAP_FLAG(LOC_HOST, DST_IP); + MAP_FLAG(REM_MAC, SRC_MAC); + MAP_FLAG(REM_PORT, SRC_PORT); + MAP_FLAG(LOC_MAC, DST_MAC); + MAP_FLAG(LOC_PORT, DST_PORT); + MAP_FLAG(ETHER_TYPE, ETHER_TYPE); + MAP_FLAG(INNER_VID, INNER_VLAN); + MAP_FLAG(OUTER_VID, OUTER_VLAN); + MAP_FLAG(IP_PROTO, IP_PROTO); +#undef MAP_FLAG + + /* Did we map them all? */ + if (mcdi_flags) + return -EINVAL; + + return match_flags; +} + +static int efx_ef10_filter_table_probe(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); + unsigned int pd_match_pri, pd_match_count; + struct efx_ef10_filter_table *table; + size_t outlen; + int rc; + + table = kzalloc(sizeof(*table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + /* Find out which RX filter types are supported, and their priorities */ + MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP, + MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO, + inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), + &outlen); + if (rc) + goto fail; + pd_match_count = MCDI_VAR_ARRAY_LEN( + outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES); + table->rx_match_count = 0; + + for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) { + u32 mcdi_flags = + MCDI_ARRAY_DWORD( + outbuf, + GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES, + pd_match_pri); + rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags); + if (rc < 0) { + netif_dbg(efx, probe, efx->net_dev, + "%s: fw flags %#x pri %u not supported in driver\n", + __func__, mcdi_flags, pd_match_pri); + } else { + netif_dbg(efx, probe, efx->net_dev, + "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n", + __func__, mcdi_flags, pd_match_pri, + rc, table->rx_match_count); + table->rx_match_flags[table->rx_match_count++] = rc; + } + } + + table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry)); + if (!table->entry) { + rc = -ENOMEM; + goto fail; + } + + efx->filter_state = table; + init_waitqueue_head(&table->waitq); + return 0; + +fail: + kfree(table); + return rc; +} + +static void efx_ef10_filter_table_restore(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_filter_spec *spec; + unsigned int filter_idx; + bool failed = false; + int rc; + + if (!nic_data->must_restore_filters) + return; + + spin_lock_bh(&efx->filter_lock); + + for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { + spec = efx_ef10_filter_entry_spec(table, filter_idx); + if (!spec) + continue; + + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; + spin_unlock_bh(&efx->filter_lock); + + rc = efx_ef10_filter_push(efx, spec, + &table->entry[filter_idx].handle, + false); + if (rc) + failed = true; + + spin_lock_bh(&efx->filter_lock); + if (rc) { + kfree(spec); + efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); + } else { + table->entry[filter_idx].spec &= + ~EFX_EF10_FILTER_FLAG_BUSY; + } + } + + spin_unlock_bh(&efx->filter_lock); + + if (failed) + netif_err(efx, hw, efx->net_dev, + "unable to restore all filters\n"); + else + nic_data->must_restore_filters = false; +} + +static void efx_ef10_filter_table_remove(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); + struct efx_filter_spec *spec; + unsigned int filter_idx; + int rc; + + for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { + spec = efx_ef10_filter_entry_spec(table, filter_idx); + if (!spec) + continue; + + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, + efx_ef10_filter_is_exclusive(spec) ? + MC_CMD_FILTER_OP_IN_OP_REMOVE : + MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); + MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, + table->entry[filter_idx].handle); + rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), + NULL, 0, NULL); + if (rc) + netdev_WARN(efx->net_dev, + "filter_idx=%#x handle=%#llx\n", + filter_idx, + table->entry[filter_idx].handle); + kfree(spec); + } + + vfree(table->entry); + kfree(table); +} + +static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct net_device *net_dev = efx->net_dev; + struct efx_filter_spec spec; + bool remove_failed = false; + struct netdev_hw_addr *uc; + struct netdev_hw_addr *mc; + unsigned int filter_idx; + int i, n, rc; + + if (!efx_dev_registered(efx)) + return; + + /* Mark old filters that may need to be removed */ + spin_lock_bh(&efx->filter_lock); + n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count; + for (i = 0; i < n; i++) { + filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS; + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; + } + n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count; + for (i = 0; i < n; i++) { + filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS; + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; + } + spin_unlock_bh(&efx->filter_lock); + + /* Copy/convert the address lists; add the primary station + * address and broadcast address + */ + netif_addr_lock_bh(net_dev); + if (net_dev->flags & IFF_PROMISC || + netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) { + table->dev_uc_count = -1; + } else { + table->dev_uc_count = 1 + netdev_uc_count(net_dev); + ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); + i = 1; + netdev_for_each_uc_addr(uc, net_dev) { + ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); + i++; + } + } + if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) || + netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) { + table->dev_mc_count = -1; + } else { + table->dev_mc_count = 1 + netdev_mc_count(net_dev); + eth_broadcast_addr(table->dev_mc_list[0].addr); + i = 1; + netdev_for_each_mc_addr(mc, net_dev) { + ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); + i++; + } + } + netif_addr_unlock_bh(net_dev); + + /* Insert/renew unicast filters */ + if (table->dev_uc_count >= 0) { + for (i = 0; i < table->dev_uc_count; i++) { + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, + EFX_FILTER_FLAG_RX_RSS, + 0); + efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, + table->dev_uc_list[i].addr); + rc = efx_ef10_filter_insert(efx, &spec, true); + if (rc < 0) { + /* Fall back to unicast-promisc */ + while (i--) + efx_ef10_filter_remove_safe( + efx, EFX_FILTER_PRI_AUTO, + table->dev_uc_list[i].id); + table->dev_uc_count = -1; + break; + } + table->dev_uc_list[i].id = rc; + } + } + if (table->dev_uc_count < 0) { + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, + EFX_FILTER_FLAG_RX_RSS, + 0); + efx_filter_set_uc_def(&spec); + rc = efx_ef10_filter_insert(efx, &spec, true); + if (rc < 0) { + WARN_ON(1); + table->dev_uc_count = 0; + } else { + table->dev_uc_list[0].id = rc; + } + } + + /* Insert/renew multicast filters */ + if (table->dev_mc_count >= 0) { + for (i = 0; i < table->dev_mc_count; i++) { + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, + EFX_FILTER_FLAG_RX_RSS, + 0); + efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, + table->dev_mc_list[i].addr); + rc = efx_ef10_filter_insert(efx, &spec, true); + if (rc < 0) { + /* Fall back to multicast-promisc */ + while (i--) + efx_ef10_filter_remove_safe( + efx, EFX_FILTER_PRI_AUTO, + table->dev_mc_list[i].id); + table->dev_mc_count = -1; + break; + } + table->dev_mc_list[i].id = rc; + } + } + if (table->dev_mc_count < 0) { + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, + EFX_FILTER_FLAG_RX_RSS, + 0); + efx_filter_set_mc_def(&spec); + rc = efx_ef10_filter_insert(efx, &spec, true); + if (rc < 0) { + WARN_ON(1); + table->dev_mc_count = 0; + } else { + table->dev_mc_list[0].id = rc; + } + } + + /* Remove filters that weren't renewed. Since nothing else + * changes the AUTO_OLD flag or removes these filters, we + * don't need to hold the filter_lock while scanning for + * these filters. + */ + for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { + if (ACCESS_ONCE(table->entry[i].spec) & + EFX_EF10_FILTER_FLAG_AUTO_OLD) { + if (efx_ef10_filter_remove_internal( + efx, 1U << EFX_FILTER_PRI_AUTO, + i, true) < 0) + remove_failed = true; + } + } + WARN_ON(remove_failed); +} + +static int efx_ef10_mac_reconfigure(struct efx_nic *efx) +{ + efx_ef10_filter_sync_rx_mode(efx); + + return efx_mcdi_set_mac(efx); +} + +static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); + + MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type); + return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +/* MC BISTs follow a different poll mechanism to phy BISTs. + * The BIST is done in the poll handler on the MC, and the MCDI command + * will block until the BIST is done. + */ +static int efx_ef10_poll_bist(struct efx_nic *efx) +{ + int rc; + MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN); + size_t outlen; + u32 result; + + rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc != 0) + return rc; + + if (outlen < MC_CMD_POLL_BIST_OUT_LEN) + return -EIO; + + result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT); + switch (result) { + case MC_CMD_POLL_BIST_PASSED: + netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n"); + return 0; + case MC_CMD_POLL_BIST_TIMEOUT: + netif_err(efx, hw, efx->net_dev, "BIST timed out\n"); + return -EIO; + case MC_CMD_POLL_BIST_FAILED: + netif_err(efx, hw, efx->net_dev, "BIST failed.\n"); + return -EIO; + default: + netif_err(efx, hw, efx->net_dev, + "BIST returned unknown result %u", result); + return -EIO; + } +} + +static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type) +{ + int rc; + + netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type); + + rc = efx_ef10_start_bist(efx, bist_type); + if (rc != 0) + return rc; + + return efx_ef10_poll_bist(efx); +} + +static int +efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) +{ + int rc, rc2; + + efx_reset_down(efx, RESET_TYPE_WORLD); + + rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST, + NULL, 0, NULL, 0, NULL); + if (rc != 0) + goto out; + + tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1; + tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1; + + rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD); + +out: + rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0); + return rc ? rc : rc2; +} + +#ifdef CONFIG_SFC_MTD + +struct efx_ef10_nvram_type_info { + u16 type, type_mask; + u8 port; + const char *name; +}; + +static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { + { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" }, + { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" }, + { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" }, + { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" }, + { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" }, + { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" }, + { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" }, + { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" }, + { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" }, + { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" }, + { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" }, +}; + +static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, + struct efx_mcdi_mtd_partition *part, + unsigned int type) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); + const struct efx_ef10_nvram_type_info *info; + size_t size, erase_size, outlen; + bool protected; + int rc; + + for (info = efx_ef10_nvram_types; ; info++) { + if (info == + efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types)) + return -ENODEV; + if ((type & ~info->type_mask) == info->type) + break; + } + if (info->port != efx_port_num(efx)) + return -ENODEV; + + rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); + if (rc) + return rc; + if (protected) + return -ENODEV; /* hide it */ + + part->nvram_type = type; + + MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN) + return -EIO; + if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) & + (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN)) + part->fw_subtype = MCDI_DWORD(outbuf, + NVRAM_METADATA_OUT_SUBTYPE); + + part->common.dev_type_name = "EF10 NVRAM manager"; + part->common.type_name = info->name; + + part->common.mtd.type = MTD_NORFLASH; + part->common.mtd.flags = MTD_CAP_NORFLASH; + part->common.mtd.size = size; + part->common.mtd.erasesize = erase_size; + + return 0; +} + +static int efx_ef10_mtd_probe(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); + struct efx_mcdi_mtd_partition *parts; + size_t outlen, n_parts_total, i, n_parts; + unsigned int type; + int rc; + + ASSERT_RTNL(); + + BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) + return -EIO; + + n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); + if (n_parts_total > + MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID)) + return -EIO; + + parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL); + if (!parts) + return -ENOMEM; + + n_parts = 0; + for (i = 0; i < n_parts_total; i++) { + type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, + i); + rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type); + if (rc == 0) + n_parts++; + else if (rc != -ENODEV) + goto fail; + } + + rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); +fail: + if (rc) + kfree(parts); + return rc; +} + +#endif /* CONFIG_SFC_MTD */ + +static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time) +{ + _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD); +} + +static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel, + bool temp) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN); + int rc; + + if (channel->sync_events_state == SYNC_EVENTS_REQUESTED || + channel->sync_events_state == SYNC_EVENTS_VALID || + (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED)) + return 0; + channel->sync_events_state = SYNC_EVENTS_REQUESTED; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE, + channel->channel); + + rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, + inbuf, sizeof(inbuf), NULL, 0, NULL); + + if (rc != 0) + channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : + SYNC_EVENTS_DISABLED; + + return rc; +} + +static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel, + bool temp) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN); + int rc; + + if (channel->sync_events_state == SYNC_EVENTS_DISABLED || + (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT)) + return 0; + if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) { + channel->sync_events_state = SYNC_EVENTS_DISABLED; + return 0; + } + channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : + SYNC_EVENTS_DISABLED; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL, + MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE); + MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE, + channel->channel); + + rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, + inbuf, sizeof(inbuf), NULL, 0, NULL); + + return rc; +} + +static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en, + bool temp) +{ + int (*set)(struct efx_channel *channel, bool temp); + struct efx_channel *channel; + + set = en ? + efx_ef10_rx_enable_timestamping : + efx_ef10_rx_disable_timestamping; + + efx_for_each_channel(channel, efx) { + int rc = set(channel, temp); + if (en && rc != 0) { + efx_ef10_ptp_set_ts_sync_events(efx, false, temp); + return rc; + } + } + + return 0; +} + +static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, + struct hwtstamp_config *init) +{ + int rc; + + switch (init->rx_filter) { + case HWTSTAMP_FILTER_NONE: + efx_ef10_ptp_set_ts_sync_events(efx, false, false); + /* if TX timestamping is still requested then leave PTP on */ + return efx_ptp_change_mode(efx, + init->tx_type != HWTSTAMP_TX_OFF, 0); + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + init->rx_filter = HWTSTAMP_FILTER_ALL; + rc = efx_ptp_change_mode(efx, true, 0); + if (!rc) + rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false); + if (rc) + efx_ptp_change_mode(efx, false, 0); + return rc; + default: + return -ERANGE; + } +} + +const struct efx_nic_type efx_hunt_a0_nic_type = { + .mem_map_size = efx_ef10_mem_map_size, + .probe = efx_ef10_probe, + .remove = efx_ef10_remove, + .dimension_resources = efx_ef10_dimension_resources, + .init = efx_ef10_init_nic, + .fini = efx_port_dummy_op_void, + .map_reset_reason = efx_mcdi_map_reset_reason, + .map_reset_flags = efx_ef10_map_reset_flags, + .reset = efx_ef10_reset, + .probe_port = efx_mcdi_port_probe, + .remove_port = efx_mcdi_port_remove, + .fini_dmaq = efx_ef10_fini_dmaq, + .prepare_flr = efx_ef10_prepare_flr, + .finish_flr = efx_port_dummy_op_void, + .describe_stats = efx_ef10_describe_stats, + .update_stats = efx_ef10_update_stats, + .start_stats = efx_mcdi_mac_start_stats, + .pull_stats = efx_mcdi_mac_pull_stats, + .stop_stats = efx_mcdi_mac_stop_stats, + .set_id_led = efx_mcdi_set_id_led, + .push_irq_moderation = efx_ef10_push_irq_moderation, + .reconfigure_mac = efx_ef10_mac_reconfigure, + .check_mac_fault = efx_mcdi_mac_check_fault, + .reconfigure_port = efx_mcdi_port_reconfigure, + .get_wol = efx_ef10_get_wol, + .set_wol = efx_ef10_set_wol, + .resume_wol = efx_port_dummy_op_void, + .test_chip = efx_ef10_test_chip, + .test_nvram = efx_mcdi_nvram_test_all, + .mcdi_request = efx_ef10_mcdi_request, + .mcdi_poll_response = efx_ef10_mcdi_poll_response, + .mcdi_read_response = efx_ef10_mcdi_read_response, + .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, + .irq_enable_master = efx_port_dummy_op_void, + .irq_test_generate = efx_ef10_irq_test_generate, + .irq_disable_non_ev = efx_port_dummy_op_void, + .irq_handle_msi = efx_ef10_msi_interrupt, + .irq_handle_legacy = efx_ef10_legacy_interrupt, + .tx_probe = efx_ef10_tx_probe, + .tx_init = efx_ef10_tx_init, + .tx_remove = efx_ef10_tx_remove, + .tx_write = efx_ef10_tx_write, + .rx_push_rss_config = efx_ef10_rx_push_rss_config, + .rx_probe = efx_ef10_rx_probe, + .rx_init = efx_ef10_rx_init, + .rx_remove = efx_ef10_rx_remove, + .rx_write = efx_ef10_rx_write, + .rx_defer_refill = efx_ef10_rx_defer_refill, + .ev_probe = efx_ef10_ev_probe, + .ev_init = efx_ef10_ev_init, + .ev_fini = efx_ef10_ev_fini, + .ev_remove = efx_ef10_ev_remove, + .ev_process = efx_ef10_ev_process, + .ev_read_ack = efx_ef10_ev_read_ack, + .ev_test_generate = efx_ef10_ev_test_generate, + .filter_table_probe = efx_ef10_filter_table_probe, + .filter_table_restore = efx_ef10_filter_table_restore, + .filter_table_remove = efx_ef10_filter_table_remove, + .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter, + .filter_insert = efx_ef10_filter_insert, + .filter_remove_safe = efx_ef10_filter_remove_safe, + .filter_get_safe = efx_ef10_filter_get_safe, + .filter_clear_rx = efx_ef10_filter_clear_rx, + .filter_count_rx_used = efx_ef10_filter_count_rx_used, + .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, + .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, +#ifdef CONFIG_RFS_ACCEL + .filter_rfs_insert = efx_ef10_filter_rfs_insert, + .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, +#endif +#ifdef CONFIG_SFC_MTD + .mtd_probe = efx_ef10_mtd_probe, + .mtd_rename = efx_mcdi_mtd_rename, + .mtd_read = efx_mcdi_mtd_read, + .mtd_erase = efx_mcdi_mtd_erase, + .mtd_write = efx_mcdi_mtd_write, + .mtd_sync = efx_mcdi_mtd_sync, +#endif + .ptp_write_host_time = efx_ef10_ptp_write_host_time, + .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, + .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, + .sriov_init = efx_ef10_sriov_init, + .sriov_fini = efx_ef10_sriov_fini, + .sriov_mac_address_changed = efx_ef10_sriov_mac_address_changed, + .sriov_wanted = efx_ef10_sriov_wanted, + .sriov_reset = efx_ef10_sriov_reset, + + .revision = EFX_REV_HUNT_A0, + .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), + .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, + .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, + .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, + .can_rx_scatter = true, + .always_rx_scatter = true, + .max_interrupt_mode = EFX_INT_MODE_MSIX, + .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, + .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXHASH | NETIF_F_NTUPLE), + .mcdi_max_ver = 2, + .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, + .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | + 1 << HWTSTAMP_FILTER_ALL, +}; diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h new file mode 100644 index 000000000..62a55dde6 --- /dev/null +++ b/drivers/net/ethernet/sfc/ef10_regs.h @@ -0,0 +1,355 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2012-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_EF10_REGS_H +#define EFX_EF10_REGS_H + +/* EF10 hardware architecture definitions have a name prefix following + * the format: + * + * E__ + * + * The following strings are used: + * + * MMIO register Host memory structure + * ------------------------------------------------------------- + * Address R + * Bitfield RF SF + * Enumerator FE SE + * + * is the first revision to which the definition applies: + * + * D: Huntington A0 + * + * If the definition has been changed or removed in later revisions + * then is the last revision to which the definition applies; + * otherwise it is "Z". + */ + +/************************************************************************** + * + * EF10 registers and descriptors + * + ************************************************************************** + */ + +/* BIU_HW_REV_ID_REG: */ +#define ER_DZ_BIU_HW_REV_ID 0x00000000 +#define ERF_DZ_HW_REV_ID_LBN 0 +#define ERF_DZ_HW_REV_ID_WIDTH 32 + +/* BIU_MC_SFT_STATUS_REG: */ +#define ER_DZ_BIU_MC_SFT_STATUS 0x00000010 +#define ER_DZ_BIU_MC_SFT_STATUS_STEP 4 +#define ER_DZ_BIU_MC_SFT_STATUS_ROWS 8 +#define ERF_DZ_MC_SFT_STATUS_LBN 0 +#define ERF_DZ_MC_SFT_STATUS_WIDTH 32 + +/* BIU_INT_ISR_REG: */ +#define ER_DZ_BIU_INT_ISR 0x00000090 +#define ERF_DZ_ISR_REG_LBN 0 +#define ERF_DZ_ISR_REG_WIDTH 32 + +/* MC_DB_LWRD_REG: */ +#define ER_DZ_MC_DB_LWRD 0x00000200 +#define ERF_DZ_MC_DOORBELL_L_LBN 0 +#define ERF_DZ_MC_DOORBELL_L_WIDTH 32 + +/* MC_DB_HWRD_REG: */ +#define ER_DZ_MC_DB_HWRD 0x00000204 +#define ERF_DZ_MC_DOORBELL_H_LBN 0 +#define ERF_DZ_MC_DOORBELL_H_WIDTH 32 + +/* EVQ_RPTR_REG: */ +#define ER_DZ_EVQ_RPTR 0x00000400 +#define ER_DZ_EVQ_RPTR_STEP 8192 +#define ER_DZ_EVQ_RPTR_ROWS 2048 +#define ERF_DZ_EVQ_RPTR_VLD_LBN 15 +#define ERF_DZ_EVQ_RPTR_VLD_WIDTH 1 +#define ERF_DZ_EVQ_RPTR_LBN 0 +#define ERF_DZ_EVQ_RPTR_WIDTH 15 + +/* EVQ_TMR_REG: */ +#define ER_DZ_EVQ_TMR 0x00000420 +#define ER_DZ_EVQ_TMR_STEP 8192 +#define ER_DZ_EVQ_TMR_ROWS 2048 +#define ERF_DZ_TC_TIMER_MODE_LBN 14 +#define ERF_DZ_TC_TIMER_MODE_WIDTH 2 +#define ERF_DZ_TC_TIMER_VAL_LBN 0 +#define ERF_DZ_TC_TIMER_VAL_WIDTH 14 + +/* RX_DESC_UPD_REG: */ +#define ER_DZ_RX_DESC_UPD 0x00000830 +#define ER_DZ_RX_DESC_UPD_STEP 8192 +#define ER_DZ_RX_DESC_UPD_ROWS 2048 +#define ERF_DZ_RX_DESC_WPTR_LBN 0 +#define ERF_DZ_RX_DESC_WPTR_WIDTH 12 + +/* TX_DESC_UPD_REG: */ +#define ER_DZ_TX_DESC_UPD 0x00000a10 +#define ER_DZ_TX_DESC_UPD_STEP 8192 +#define ER_DZ_TX_DESC_UPD_ROWS 2048 +#define ERF_DZ_RSVD_LBN 76 +#define ERF_DZ_RSVD_WIDTH 20 +#define ERF_DZ_TX_DESC_WPTR_LBN 64 +#define ERF_DZ_TX_DESC_WPTR_WIDTH 12 +#define ERF_DZ_TX_DESC_HWORD_LBN 32 +#define ERF_DZ_TX_DESC_HWORD_WIDTH 32 +#define ERF_DZ_TX_DESC_LWORD_LBN 0 +#define ERF_DZ_TX_DESC_LWORD_WIDTH 32 + +/* DRIVER_EV */ +#define ESF_DZ_DRV_CODE_LBN 60 +#define ESF_DZ_DRV_CODE_WIDTH 4 +#define ESF_DZ_DRV_SUB_CODE_LBN 56 +#define ESF_DZ_DRV_SUB_CODE_WIDTH 4 +#define ESE_DZ_DRV_TIMER_EV 3 +#define ESE_DZ_DRV_START_UP_EV 2 +#define ESE_DZ_DRV_WAKE_UP_EV 1 +#define ESF_DZ_DRV_SUB_DATA_LBN 0 +#define ESF_DZ_DRV_SUB_DATA_WIDTH 56 +#define ESF_DZ_DRV_EVQ_ID_LBN 0 +#define ESF_DZ_DRV_EVQ_ID_WIDTH 14 +#define ESF_DZ_DRV_TMR_ID_LBN 0 +#define ESF_DZ_DRV_TMR_ID_WIDTH 14 + +/* EVENT_ENTRY */ +#define ESF_DZ_EV_CODE_LBN 60 +#define ESF_DZ_EV_CODE_WIDTH 4 +#define ESE_DZ_EV_CODE_MCDI_EV 12 +#define ESE_DZ_EV_CODE_DRIVER_EV 5 +#define ESE_DZ_EV_CODE_TX_EV 2 +#define ESE_DZ_EV_CODE_RX_EV 0 +#define ESE_DZ_OTHER other +#define ESF_DZ_EV_DATA_LBN 0 +#define ESF_DZ_EV_DATA_WIDTH 60 + +/* MC_EVENT */ +#define ESF_DZ_MC_CODE_LBN 60 +#define ESF_DZ_MC_CODE_WIDTH 4 +#define ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59 +#define ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1 +#define ESF_DZ_MC_DROP_EVENT_LBN 58 +#define ESF_DZ_MC_DROP_EVENT_WIDTH 1 +#define ESF_DZ_MC_SOFT_LBN 0 +#define ESF_DZ_MC_SOFT_WIDTH 58 + +/* RX_EVENT */ +#define ESF_DZ_RX_CODE_LBN 60 +#define ESF_DZ_RX_CODE_WIDTH 4 +#define ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59 +#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1 +#define ESF_DZ_RX_DROP_EVENT_LBN 58 +#define ESF_DZ_RX_DROP_EVENT_WIDTH 1 +#define ESF_DZ_RX_EV_RSVD2_LBN 54 +#define ESF_DZ_RX_EV_RSVD2_WIDTH 4 +#define ESF_DZ_RX_EV_SOFT2_LBN 52 +#define ESF_DZ_RX_EV_SOFT2_WIDTH 2 +#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48 +#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4 +#define ESF_DZ_RX_L4_CLASS_LBN 45 +#define ESF_DZ_RX_L4_CLASS_WIDTH 3 +#define ESE_DZ_L4_CLASS_RSVD7 7 +#define ESE_DZ_L4_CLASS_RSVD6 6 +#define ESE_DZ_L4_CLASS_RSVD5 5 +#define ESE_DZ_L4_CLASS_RSVD4 4 +#define ESE_DZ_L4_CLASS_RSVD3 3 +#define ESE_DZ_L4_CLASS_UDP 2 +#define ESE_DZ_L4_CLASS_TCP 1 +#define ESE_DZ_L4_CLASS_UNKNOWN 0 +#define ESF_DZ_RX_L3_CLASS_LBN 42 +#define ESF_DZ_RX_L3_CLASS_WIDTH 3 +#define ESE_DZ_L3_CLASS_RSVD7 7 +#define ESE_DZ_L3_CLASS_IP6_FRAG 6 +#define ESE_DZ_L3_CLASS_ARP 5 +#define ESE_DZ_L3_CLASS_IP4_FRAG 4 +#define ESE_DZ_L3_CLASS_FCOE 3 +#define ESE_DZ_L3_CLASS_IP6 2 +#define ESE_DZ_L3_CLASS_IP4 1 +#define ESE_DZ_L3_CLASS_UNKNOWN 0 +#define ESF_DZ_RX_ETH_TAG_CLASS_LBN 39 +#define ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3 +#define ESE_DZ_ETH_TAG_CLASS_RSVD7 7 +#define ESE_DZ_ETH_TAG_CLASS_RSVD6 6 +#define ESE_DZ_ETH_TAG_CLASS_RSVD5 5 +#define ESE_DZ_ETH_TAG_CLASS_RSVD4 4 +#define ESE_DZ_ETH_TAG_CLASS_RSVD3 3 +#define ESE_DZ_ETH_TAG_CLASS_VLAN2 2 +#define ESE_DZ_ETH_TAG_CLASS_VLAN1 1 +#define ESE_DZ_ETH_TAG_CLASS_NONE 0 +#define ESF_DZ_RX_ETH_BASE_CLASS_LBN 36 +#define ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3 +#define ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2 +#define ESE_DZ_ETH_BASE_CLASS_LLC 1 +#define ESE_DZ_ETH_BASE_CLASS_ETH2 0 +#define ESF_DZ_RX_MAC_CLASS_LBN 35 +#define ESF_DZ_RX_MAC_CLASS_WIDTH 1 +#define ESE_DZ_MAC_CLASS_MCAST 1 +#define ESE_DZ_MAC_CLASS_UCAST 0 +#define ESF_DZ_RX_EV_SOFT1_LBN 32 +#define ESF_DZ_RX_EV_SOFT1_WIDTH 3 +#define ESF_DZ_RX_EV_RSVD1_LBN 31 +#define ESF_DZ_RX_EV_RSVD1_WIDTH 1 +#define ESF_DZ_RX_ABORT_LBN 30 +#define ESF_DZ_RX_ABORT_WIDTH 1 +#define ESF_DZ_RX_ECC_ERR_LBN 29 +#define ESF_DZ_RX_ECC_ERR_WIDTH 1 +#define ESF_DZ_RX_CRC1_ERR_LBN 28 +#define ESF_DZ_RX_CRC1_ERR_WIDTH 1 +#define ESF_DZ_RX_CRC0_ERR_LBN 27 +#define ESF_DZ_RX_CRC0_ERR_WIDTH 1 +#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26 +#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1 +#define ESF_DZ_RX_IPCKSUM_ERR_LBN 25 +#define ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1 +#define ESF_DZ_RX_ECRC_ERR_LBN 24 +#define ESF_DZ_RX_ECRC_ERR_WIDTH 1 +#define ESF_DZ_RX_QLABEL_LBN 16 +#define ESF_DZ_RX_QLABEL_WIDTH 5 +#define ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15 +#define ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1 +#define ESF_DZ_RX_CONT_LBN 14 +#define ESF_DZ_RX_CONT_WIDTH 1 +#define ESF_DZ_RX_BYTES_LBN 0 +#define ESF_DZ_RX_BYTES_WIDTH 14 + +/* RX_KER_DESC */ +#define ESF_DZ_RX_KER_RESERVED_LBN 62 +#define ESF_DZ_RX_KER_RESERVED_WIDTH 2 +#define ESF_DZ_RX_KER_BYTE_CNT_LBN 48 +#define ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14 +#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0 +#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48 + +/* TX_CSUM_TSTAMP_DESC */ +#define ESF_DZ_TX_DESC_IS_OPT_LBN 63 +#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 +#define ESF_DZ_TX_OPTION_TYPE_LBN 60 +#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3 +#define ESE_DZ_TX_OPTION_DESC_TSO 7 +#define ESE_DZ_TX_OPTION_DESC_VLAN 6 +#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0 +#define ESF_DZ_TX_TIMESTAMP_LBN 5 +#define ESF_DZ_TX_TIMESTAMP_WIDTH 1 +#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2 +#define ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3 +#define ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5 +#define ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4 +#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3 +#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2 +#define ESE_DZ_TX_OPTION_CRC_FCOE 1 +#define ESE_DZ_TX_OPTION_CRC_OFF 0 +#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1 +#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1 +#define ESF_DZ_TX_OPTION_IP_CSUM_LBN 0 +#define ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1 + +/* TX_EVENT */ +#define ESF_DZ_TX_CODE_LBN 60 +#define ESF_DZ_TX_CODE_WIDTH 4 +#define ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59 +#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1 +#define ESF_DZ_TX_DROP_EVENT_LBN 58 +#define ESF_DZ_TX_DROP_EVENT_WIDTH 1 +#define ESF_DZ_TX_EV_RSVD_LBN 48 +#define ESF_DZ_TX_EV_RSVD_WIDTH 10 +#define ESF_DZ_TX_SOFT2_LBN 32 +#define ESF_DZ_TX_SOFT2_WIDTH 16 +#define ESF_DZ_TX_CAN_MERGE_LBN 31 +#define ESF_DZ_TX_CAN_MERGE_WIDTH 1 +#define ESF_DZ_TX_SOFT1_LBN 24 +#define ESF_DZ_TX_SOFT1_WIDTH 7 +#define ESF_DZ_TX_QLABEL_LBN 16 +#define ESF_DZ_TX_QLABEL_WIDTH 5 +#define ESF_DZ_TX_DESCR_INDX_LBN 0 +#define ESF_DZ_TX_DESCR_INDX_WIDTH 16 + +/* TX_KER_DESC */ +#define ESF_DZ_TX_KER_TYPE_LBN 63 +#define ESF_DZ_TX_KER_TYPE_WIDTH 1 +#define ESF_DZ_TX_KER_CONT_LBN 62 +#define ESF_DZ_TX_KER_CONT_WIDTH 1 +#define ESF_DZ_TX_KER_BYTE_CNT_LBN 48 +#define ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14 +#define ESF_DZ_TX_KER_BUF_ADDR_LBN 0 +#define ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48 + +/* TX_PIO_DESC */ +#define ESF_DZ_TX_PIO_TYPE_LBN 63 +#define ESF_DZ_TX_PIO_TYPE_WIDTH 1 +#define ESF_DZ_TX_PIO_OPT_LBN 60 +#define ESF_DZ_TX_PIO_OPT_WIDTH 3 +#define ESE_DZ_TX_OPTION_DESC_PIO 1 +#define ESF_DZ_TX_PIO_CONT_LBN 59 +#define ESF_DZ_TX_PIO_CONT_WIDTH 1 +#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32 +#define ESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12 +#define ESF_DZ_TX_PIO_BUF_ADDR_LBN 0 +#define ESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12 + +/* TX_TSO_DESC */ +#define ESF_DZ_TX_DESC_IS_OPT_LBN 63 +#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 +#define ESF_DZ_TX_OPTION_TYPE_LBN 60 +#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3 +#define ESE_DZ_TX_OPTION_DESC_TSO 7 +#define ESE_DZ_TX_OPTION_DESC_VLAN 6 +#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0 +#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48 +#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8 +#define ESF_DZ_TX_TSO_IP_ID_LBN 32 +#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16 +#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0 +#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32 + +/*************************************************************************/ + +/* TX_DESC_UPD_REG: Transmit descriptor update register. + * We may write just one dword of these registers. + */ +#define ER_DZ_TX_DESC_UPD_DWORD (ER_DZ_TX_DESC_UPD + 2 * 4) +#define ERF_DZ_TX_DESC_WPTR_DWORD_LBN (ERF_DZ_TX_DESC_WPTR_LBN - 2 * 32) +#define ERF_DZ_TX_DESC_WPTR_DWORD_WIDTH ERF_DZ_TX_DESC_WPTR_WIDTH + +/* The workaround for bug 35388 requires multiplexing writes through + * the TX_DESC_UPD_DWORD address. + * TX_DESC_UPD: 0ppppppppppp (bit 11 lost) + * EVQ_RPTR: 1000hhhhhhhh, 1001llllllll (split into high and low bits) + * EVQ_TMR: 11mmvvvvvvvv (bits 8:13 of value lost) + */ +#define ER_DD_EVQ_INDIRECT ER_DZ_TX_DESC_UPD_DWORD +#define ERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8 +#define ERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4 +#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8 +#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9 +#define ERF_DD_EVQ_IND_RPTR_LBN 0 +#define ERF_DD_EVQ_IND_RPTR_WIDTH 8 +#define ERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10 +#define ERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2 +#define EFE_DD_EVQ_IND_TIMER_FLAGS 3 +#define ERF_DD_EVQ_IND_TIMER_MODE_LBN 8 +#define ERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2 +#define ERF_DD_EVQ_IND_TIMER_VAL_LBN 0 +#define ERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8 + +/* TX_PIOBUF + * PIO buffer aperture (paged) + */ +#define ER_DZ_TX_PIOBUF 4096 +#define ER_DZ_TX_PIOBUF_SIZE 2048 + +/* RX packet prefix */ +#define ES_DZ_RX_PREFIX_HASH_OFST 0 +#define ES_DZ_RX_PREFIX_VLAN1_OFST 4 +#define ES_DZ_RX_PREFIX_VLAN2_OFST 6 +#define ES_DZ_RX_PREFIX_PKTLEN_OFST 8 +#define ES_DZ_RX_PREFIX_TSTAMP_OFST 10 +#define ES_DZ_RX_PREFIX_SIZE 14 + +#endif /* EFX_EF10_REGS_H */ diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c new file mode 100644 index 000000000..4b00545a3 --- /dev/null +++ b/drivers/net/ethernet/sfc/efx.c @@ -0,0 +1,3349 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2005-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "nic.h" +#include "selftest.h" + +#include "mcdi.h" +#include "workarounds.h" + +/************************************************************************** + * + * Type name strings + * + ************************************************************************** + */ + +/* Loopback mode names (see LOOPBACK_MODE()) */ +const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; +const char *const efx_loopback_mode_names[] = { + [LOOPBACK_NONE] = "NONE", + [LOOPBACK_DATA] = "DATAPATH", + [LOOPBACK_GMAC] = "GMAC", + [LOOPBACK_XGMII] = "XGMII", + [LOOPBACK_XGXS] = "XGXS", + [LOOPBACK_XAUI] = "XAUI", + [LOOPBACK_GMII] = "GMII", + [LOOPBACK_SGMII] = "SGMII", + [LOOPBACK_XGBR] = "XGBR", + [LOOPBACK_XFI] = "XFI", + [LOOPBACK_XAUI_FAR] = "XAUI_FAR", + [LOOPBACK_GMII_FAR] = "GMII_FAR", + [LOOPBACK_SGMII_FAR] = "SGMII_FAR", + [LOOPBACK_XFI_FAR] = "XFI_FAR", + [LOOPBACK_GPHY] = "GPHY", + [LOOPBACK_PHYXS] = "PHYXS", + [LOOPBACK_PCS] = "PCS", + [LOOPBACK_PMAPMD] = "PMA/PMD", + [LOOPBACK_XPORT] = "XPORT", + [LOOPBACK_XGMII_WS] = "XGMII_WS", + [LOOPBACK_XAUI_WS] = "XAUI_WS", + [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", + [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", + [LOOPBACK_GMII_WS] = "GMII_WS", + [LOOPBACK_XFI_WS] = "XFI_WS", + [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", + [LOOPBACK_PHYXS_WS] = "PHYXS_WS", +}; + +const unsigned int efx_reset_type_max = RESET_TYPE_MAX; +const char *const efx_reset_type_names[] = { + [RESET_TYPE_INVISIBLE] = "INVISIBLE", + [RESET_TYPE_ALL] = "ALL", + [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL", + [RESET_TYPE_WORLD] = "WORLD", + [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE", + [RESET_TYPE_MC_BIST] = "MC_BIST", + [RESET_TYPE_DISABLE] = "DISABLE", + [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", + [RESET_TYPE_INT_ERROR] = "INT_ERROR", + [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", + [RESET_TYPE_DMA_ERROR] = "DMA_ERROR", + [RESET_TYPE_TX_SKIP] = "TX_SKIP", + [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", + [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)", +}; + +/* Reset workqueue. If any NIC has a hardware failure then a reset will be + * queued onto this work queue. This is not a per-nic work queue, because + * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. + */ +static struct workqueue_struct *reset_workqueue; + +/* How often and how many times to poll for a reset while waiting for a + * BIST that another function started to complete. + */ +#define BIST_WAIT_DELAY_MS 100 +#define BIST_WAIT_DELAY_COUNT 100 + +/************************************************************************** + * + * Configurable values + * + *************************************************************************/ + +/* + * Use separate channels for TX and RX events + * + * Set this to 1 to use separate channels for TX and RX. It allows us + * to control interrupt affinity separately for TX and RX. + * + * This is only used in MSI-X interrupt mode + */ +static bool separate_tx_channels; +module_param(separate_tx_channels, bool, 0444); +MODULE_PARM_DESC(separate_tx_channels, + "Use separate channels for TX and RX"); + +/* This is the weight assigned to each of the (per-channel) virtual + * NAPI devices. + */ +static int napi_weight = 64; + +/* This is the time (in jiffies) between invocations of the hardware + * monitor. + * On Falcon-based NICs, this will: + * - Check the on-board hardware monitor; + * - Poll the link state and reconfigure the hardware as necessary. + * On Siena-based NICs for power systems with EEH support, this will give EEH a + * chance to start. + */ +static unsigned int efx_monitor_interval = 1 * HZ; + +/* Initial interrupt moderation settings. They can be modified after + * module load with ethtool. + * + * The default for RX should strike a balance between increasing the + * round-trip latency and reducing overhead. + */ +static unsigned int rx_irq_mod_usec = 60; + +/* Initial interrupt moderation settings. They can be modified after + * module load with ethtool. + * + * This default is chosen to ensure that a 10G link does not go idle + * while a TX queue is stopped after it has become full. A queue is + * restarted when it drops below half full. The time this takes (assuming + * worst case 3 descriptors per packet and 1024 descriptors) is + * 512 / 3 * 1.2 = 205 usec. + */ +static unsigned int tx_irq_mod_usec = 150; + +/* This is the first interrupt mode to try out of: + * 0 => MSI-X + * 1 => MSI + * 2 => legacy + */ +static unsigned int interrupt_mode; + +/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), + * i.e. the number of CPUs among which we may distribute simultaneous + * interrupt handling. + * + * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. + * The default (0) means to assign an interrupt to each core. + */ +static unsigned int rss_cpus; +module_param(rss_cpus, uint, 0444); +MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); + +static bool phy_flash_cfg; +module_param(phy_flash_cfg, bool, 0644); +MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); + +static unsigned irq_adapt_low_thresh = 8000; +module_param(irq_adapt_low_thresh, uint, 0644); +MODULE_PARM_DESC(irq_adapt_low_thresh, + "Threshold score for reducing IRQ moderation"); + +static unsigned irq_adapt_high_thresh = 16000; +module_param(irq_adapt_high_thresh, uint, 0644); +MODULE_PARM_DESC(irq_adapt_high_thresh, + "Threshold score for increasing IRQ moderation"); + +static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFDOWN | + NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | + NETIF_MSG_TX_ERR | NETIF_MSG_HW); +module_param(debug, uint, 0); +MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); + +/************************************************************************** + * + * Utility functions and prototypes + * + *************************************************************************/ + +static int efx_soft_enable_interrupts(struct efx_nic *efx); +static void efx_soft_disable_interrupts(struct efx_nic *efx); +static void efx_remove_channel(struct efx_channel *channel); +static void efx_remove_channels(struct efx_nic *efx); +static const struct efx_channel_type efx_default_channel_type; +static void efx_remove_port(struct efx_nic *efx); +static void efx_init_napi_channel(struct efx_channel *channel); +static void efx_fini_napi(struct efx_nic *efx); +static void efx_fini_napi_channel(struct efx_channel *channel); +static void efx_fini_struct(struct efx_nic *efx); +static void efx_start_all(struct efx_nic *efx); +static void efx_stop_all(struct efx_nic *efx); + +#define EFX_ASSERT_RESET_SERIALISED(efx) \ + do { \ + if ((efx->state == STATE_READY) || \ + (efx->state == STATE_RECOVERY) || \ + (efx->state == STATE_DISABLED)) \ + ASSERT_RTNL(); \ + } while (0) + +static int efx_check_disabled(struct efx_nic *efx) +{ + if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) { + netif_err(efx, drv, efx->net_dev, + "device is disabled due to earlier errors\n"); + return -EIO; + } + return 0; +} + +/************************************************************************** + * + * Event queue processing + * + *************************************************************************/ + +/* Process channel's event queue + * + * This function is responsible for processing the event queue of a + * single channel. The caller must guarantee that this function will + * never be concurrently called more than once on the same channel, + * though different channels may be being processed concurrently. + */ +static int efx_process_channel(struct efx_channel *channel, int budget) +{ + int spent; + + if (unlikely(!channel->enabled)) + return 0; + + spent = efx_nic_process_eventq(channel, budget); + if (spent && efx_channel_has_rx_queue(channel)) { + struct efx_rx_queue *rx_queue = + efx_channel_get_rx_queue(channel); + + efx_rx_flush_packet(channel); + efx_fast_push_rx_descriptors(rx_queue, true); + } + + return spent; +} + +/* NAPI poll handler + * + * NAPI guarantees serialisation of polls of the same device, which + * provides the guarantee required by efx_process_channel(). + */ +static int efx_poll(struct napi_struct *napi, int budget) +{ + struct efx_channel *channel = + container_of(napi, struct efx_channel, napi_str); + struct efx_nic *efx = channel->efx; + int spent; + + if (!efx_channel_lock_napi(channel)) + return budget; + + netif_vdbg(efx, intr, efx->net_dev, + "channel %d NAPI poll executing on CPU %d\n", + channel->channel, raw_smp_processor_id()); + + spent = efx_process_channel(channel, budget); + + if (spent < budget) { + if (efx_channel_has_rx_queue(channel) && + efx->irq_rx_adaptive && + unlikely(++channel->irq_count == 1000)) { + if (unlikely(channel->irq_mod_score < + irq_adapt_low_thresh)) { + if (channel->irq_moderation > 1) { + channel->irq_moderation -= 1; + efx->type->push_irq_moderation(channel); + } + } else if (unlikely(channel->irq_mod_score > + irq_adapt_high_thresh)) { + if (channel->irq_moderation < + efx->irq_rx_moderation) { + channel->irq_moderation += 1; + efx->type->push_irq_moderation(channel); + } + } + channel->irq_count = 0; + channel->irq_mod_score = 0; + } + + efx_filter_rfs_expire(channel); + + /* There is no race here; although napi_disable() will + * only wait for napi_complete(), this isn't a problem + * since efx_nic_eventq_read_ack() will have no effect if + * interrupts have already been disabled. + */ + napi_complete(napi); + efx_nic_eventq_read_ack(channel); + } + + efx_channel_unlock_napi(channel); + return spent; +} + +/* Create event queue + * Event queue memory allocations are done only once. If the channel + * is reset, the memory buffer will be reused; this guards against + * errors during channel reset and also simplifies interrupt handling. + */ +static int efx_probe_eventq(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + unsigned long entries; + + netif_dbg(efx, probe, efx->net_dev, + "chan %d create event queue\n", channel->channel); + + /* Build an event queue with room for one event per tx and rx buffer, + * plus some extra for link state events and MCDI completions. */ + entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); + EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); + channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; + + return efx_nic_probe_eventq(channel); +} + +/* Prepare channel's event queue */ +static int efx_init_eventq(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + int rc; + + EFX_WARN_ON_PARANOID(channel->eventq_init); + + netif_dbg(efx, drv, efx->net_dev, + "chan %d init event queue\n", channel->channel); + + rc = efx_nic_init_eventq(channel); + if (rc == 0) { + efx->type->push_irq_moderation(channel); + channel->eventq_read_ptr = 0; + channel->eventq_init = true; + } + return rc; +} + +/* Enable event queue processing and NAPI */ +void efx_start_eventq(struct efx_channel *channel) +{ + netif_dbg(channel->efx, ifup, channel->efx->net_dev, + "chan %d start event queue\n", channel->channel); + + /* Make sure the NAPI handler sees the enabled flag set */ + channel->enabled = true; + smp_wmb(); + + efx_channel_enable(channel); + napi_enable(&channel->napi_str); + efx_nic_eventq_read_ack(channel); +} + +/* Disable event queue processing and NAPI */ +void efx_stop_eventq(struct efx_channel *channel) +{ + if (!channel->enabled) + return; + + napi_disable(&channel->napi_str); + while (!efx_channel_disable(channel)) + usleep_range(1000, 20000); + channel->enabled = false; +} + +static void efx_fini_eventq(struct efx_channel *channel) +{ + if (!channel->eventq_init) + return; + + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "chan %d fini event queue\n", channel->channel); + + efx_nic_fini_eventq(channel); + channel->eventq_init = false; +} + +static void efx_remove_eventq(struct efx_channel *channel) +{ + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "chan %d remove event queue\n", channel->channel); + + efx_nic_remove_eventq(channel); +} + +/************************************************************************** + * + * Channel handling + * + *************************************************************************/ + +/* Allocate and initialise a channel structure. */ +static struct efx_channel * +efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) +{ + struct efx_channel *channel; + struct efx_rx_queue *rx_queue; + struct efx_tx_queue *tx_queue; + int j; + + channel = kzalloc(sizeof(*channel), GFP_KERNEL); + if (!channel) + return NULL; + + channel->efx = efx; + channel->channel = i; + channel->type = &efx_default_channel_type; + + for (j = 0; j < EFX_TXQ_TYPES; j++) { + tx_queue = &channel->tx_queue[j]; + tx_queue->efx = efx; + tx_queue->queue = i * EFX_TXQ_TYPES + j; + tx_queue->channel = channel; + } + + rx_queue = &channel->rx_queue; + rx_queue->efx = efx; + setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, + (unsigned long)rx_queue); + + return channel; +} + +/* Allocate and initialise a channel structure, copying parameters + * (but not resources) from an old channel structure. + */ +static struct efx_channel * +efx_copy_channel(const struct efx_channel *old_channel) +{ + struct efx_channel *channel; + struct efx_rx_queue *rx_queue; + struct efx_tx_queue *tx_queue; + int j; + + channel = kmalloc(sizeof(*channel), GFP_KERNEL); + if (!channel) + return NULL; + + *channel = *old_channel; + + channel->napi_dev = NULL; + memset(&channel->eventq, 0, sizeof(channel->eventq)); + + for (j = 0; j < EFX_TXQ_TYPES; j++) { + tx_queue = &channel->tx_queue[j]; + if (tx_queue->channel) + tx_queue->channel = channel; + tx_queue->buffer = NULL; + memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); + } + + rx_queue = &channel->rx_queue; + rx_queue->buffer = NULL; + memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); + setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, + (unsigned long)rx_queue); + + return channel; +} + +static int efx_probe_channel(struct efx_channel *channel) +{ + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + int rc; + + netif_dbg(channel->efx, probe, channel->efx->net_dev, + "creating channel %d\n", channel->channel); + + rc = channel->type->pre_probe(channel); + if (rc) + goto fail; + + rc = efx_probe_eventq(channel); + if (rc) + goto fail; + + efx_for_each_channel_tx_queue(tx_queue, channel) { + rc = efx_probe_tx_queue(tx_queue); + if (rc) + goto fail; + } + + efx_for_each_channel_rx_queue(rx_queue, channel) { + rc = efx_probe_rx_queue(rx_queue); + if (rc) + goto fail; + } + + return 0; + +fail: + efx_remove_channel(channel); + return rc; +} + +static void +efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len) +{ + struct efx_nic *efx = channel->efx; + const char *type; + int number; + + number = channel->channel; + if (efx->tx_channel_offset == 0) { + type = ""; + } else if (channel->channel < efx->tx_channel_offset) { + type = "-rx"; + } else { + type = "-tx"; + number -= efx->tx_channel_offset; + } + snprintf(buf, len, "%s%s-%d", efx->name, type, number); +} + +static void efx_set_channel_names(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + channel->type->get_name(channel, + efx->msi_context[channel->channel].name, + sizeof(efx->msi_context[0].name)); +} + +static int efx_probe_channels(struct efx_nic *efx) +{ + struct efx_channel *channel; + int rc; + + /* Restart special buffer allocation */ + efx->next_buffer_table = 0; + + /* Probe channels in reverse, so that any 'extra' channels + * use the start of the buffer table. This allows the traffic + * channels to be resized without moving them or wasting the + * entries before them. + */ + efx_for_each_channel_rev(channel, efx) { + rc = efx_probe_channel(channel); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to create channel %d\n", + channel->channel); + goto fail; + } + } + efx_set_channel_names(efx); + + return 0; + +fail: + efx_remove_channels(efx); + return rc; +} + +/* Channels are shutdown and reinitialised whilst the NIC is running + * to propagate configuration changes (mtu, checksum offload), or + * to clear hardware error conditions + */ +static void efx_start_datapath(struct efx_nic *efx) +{ + bool old_rx_scatter = efx->rx_scatter; + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + struct efx_channel *channel; + size_t rx_buf_len; + + /* Calculate the rx buffer allocation parameters required to + * support the current MTU, including padding for header + * alignment and overruns. + */ + efx->rx_dma_len = (efx->rx_prefix_size + + EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + + efx->type->rx_buffer_padding); + rx_buf_len = (sizeof(struct efx_rx_page_state) + + efx->rx_ip_align + efx->rx_dma_len); + if (rx_buf_len <= PAGE_SIZE) { + efx->rx_scatter = efx->type->always_rx_scatter; + efx->rx_buffer_order = 0; + } else if (efx->type->can_rx_scatter) { + BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES); + BUILD_BUG_ON(sizeof(struct efx_rx_page_state) + + 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE, + EFX_RX_BUF_ALIGNMENT) > + PAGE_SIZE); + efx->rx_scatter = true; + efx->rx_dma_len = EFX_RX_USR_BUF_SIZE; + efx->rx_buffer_order = 0; + } else { + efx->rx_scatter = false; + efx->rx_buffer_order = get_order(rx_buf_len); + } + + efx_rx_config_page_split(efx); + if (efx->rx_buffer_order) + netif_dbg(efx, drv, efx->net_dev, + "RX buf len=%u; page order=%u batch=%u\n", + efx->rx_dma_len, efx->rx_buffer_order, + efx->rx_pages_per_batch); + else + netif_dbg(efx, drv, efx->net_dev, + "RX buf len=%u step=%u bpp=%u; page batch=%u\n", + efx->rx_dma_len, efx->rx_page_buf_step, + efx->rx_bufs_per_page, efx->rx_pages_per_batch); + + /* RX filters may also have scatter-enabled flags */ + if (efx->rx_scatter != old_rx_scatter) + efx->type->filter_update_rx_scatter(efx); + + /* We must keep at least one descriptor in a TX ring empty. + * We could avoid this when the queue size does not exactly + * match the hardware ring size, but it's not that important. + * Therefore we stop the queue when one more skb might fill + * the ring completely. We wake it when half way back to + * empty. + */ + efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx); + efx->txq_wake_thresh = efx->txq_stop_thresh / 2; + + /* Initialise the channels */ + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + efx_init_tx_queue(tx_queue); + atomic_inc(&efx->active_queues); + } + + efx_for_each_channel_rx_queue(rx_queue, channel) { + efx_init_rx_queue(rx_queue); + atomic_inc(&efx->active_queues); + efx_stop_eventq(channel); + efx_fast_push_rx_descriptors(rx_queue, false); + efx_start_eventq(channel); + } + + WARN_ON(channel->rx_pkt_n_frags); + } + + efx_ptp_start_datapath(efx); + + if (netif_device_present(efx->net_dev)) + netif_tx_wake_all_queues(efx->net_dev); +} + +static void efx_stop_datapath(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + int rc; + + EFX_ASSERT_RESET_SERIALISED(efx); + BUG_ON(efx->port_enabled); + + efx_ptp_stop_datapath(efx); + + /* Stop RX refill */ + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) + rx_queue->refill_enabled = false; + } + + efx_for_each_channel(channel, efx) { + /* RX packet processing is pipelined, so wait for the + * NAPI handler to complete. At least event queue 0 + * might be kept active by non-data events, so don't + * use napi_synchronize() but actually disable NAPI + * temporarily. + */ + if (efx_channel_has_rx_queue(channel)) { + efx_stop_eventq(channel); + efx_start_eventq(channel); + } + } + + rc = efx->type->fini_dmaq(efx); + if (rc && EFX_WORKAROUND_7803(efx)) { + /* Schedule a reset to recover from the flush failure. The + * descriptor caches reference memory we're about to free, + * but falcon_reconfigure_mac_wrapper() won't reconnect + * the MACs because of the pending reset. + */ + netif_err(efx, drv, efx->net_dev, + "Resetting to recover from flush failure\n"); + efx_schedule_reset(efx, RESET_TYPE_ALL); + } else if (rc) { + netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); + } else { + netif_dbg(efx, drv, efx->net_dev, + "successfully flushed all queues\n"); + } + + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_fini_rx_queue(rx_queue); + efx_for_each_possible_channel_tx_queue(tx_queue, channel) + efx_fini_tx_queue(tx_queue); + } +} + +static void efx_remove_channel(struct efx_channel *channel) +{ + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + + netif_dbg(channel->efx, drv, channel->efx->net_dev, + "destroy chan %d\n", channel->channel); + + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_remove_rx_queue(rx_queue); + efx_for_each_possible_channel_tx_queue(tx_queue, channel) + efx_remove_tx_queue(tx_queue); + efx_remove_eventq(channel); + channel->type->post_remove(channel); +} + +static void efx_remove_channels(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_remove_channel(channel); +} + +int +efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) +{ + struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; + u32 old_rxq_entries, old_txq_entries; + unsigned i, next_buffer_table = 0; + int rc, rc2; + + rc = efx_check_disabled(efx); + if (rc) + return rc; + + /* Not all channels should be reallocated. We must avoid + * reallocating their buffer table entries. + */ + efx_for_each_channel(channel, efx) { + struct efx_rx_queue *rx_queue; + struct efx_tx_queue *tx_queue; + + if (channel->type->copy) + continue; + next_buffer_table = max(next_buffer_table, + channel->eventq.index + + channel->eventq.entries); + efx_for_each_channel_rx_queue(rx_queue, channel) + next_buffer_table = max(next_buffer_table, + rx_queue->rxd.index + + rx_queue->rxd.entries); + efx_for_each_channel_tx_queue(tx_queue, channel) + next_buffer_table = max(next_buffer_table, + tx_queue->txd.index + + tx_queue->txd.entries); + } + + efx_device_detach_sync(efx); + efx_stop_all(efx); + efx_soft_disable_interrupts(efx); + + /* Clone channels (where possible) */ + memset(other_channel, 0, sizeof(other_channel)); + for (i = 0; i < efx->n_channels; i++) { + channel = efx->channel[i]; + if (channel->type->copy) + channel = channel->type->copy(channel); + if (!channel) { + rc = -ENOMEM; + goto out; + } + other_channel[i] = channel; + } + + /* Swap entry counts and channel pointers */ + old_rxq_entries = efx->rxq_entries; + old_txq_entries = efx->txq_entries; + efx->rxq_entries = rxq_entries; + efx->txq_entries = txq_entries; + for (i = 0; i < efx->n_channels; i++) { + channel = efx->channel[i]; + efx->channel[i] = other_channel[i]; + other_channel[i] = channel; + } + + /* Restart buffer table allocation */ + efx->next_buffer_table = next_buffer_table; + + for (i = 0; i < efx->n_channels; i++) { + channel = efx->channel[i]; + if (!channel->type->copy) + continue; + rc = efx_probe_channel(channel); + if (rc) + goto rollback; + efx_init_napi_channel(efx->channel[i]); + } + +out: + /* Destroy unused channel structures */ + for (i = 0; i < efx->n_channels; i++) { + channel = other_channel[i]; + if (channel && channel->type->copy) { + efx_fini_napi_channel(channel); + efx_remove_channel(channel); + kfree(channel); + } + } + + rc2 = efx_soft_enable_interrupts(efx); + if (rc2) { + rc = rc ? rc : rc2; + netif_err(efx, drv, efx->net_dev, + "unable to restart interrupts on channel reallocation\n"); + efx_schedule_reset(efx, RESET_TYPE_DISABLE); + } else { + efx_start_all(efx); + netif_device_attach(efx->net_dev); + } + return rc; + +rollback: + /* Swap back */ + efx->rxq_entries = old_rxq_entries; + efx->txq_entries = old_txq_entries; + for (i = 0; i < efx->n_channels; i++) { + channel = efx->channel[i]; + efx->channel[i] = other_channel[i]; + other_channel[i] = channel; + } + goto out; +} + +void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) +{ + mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); +} + +static const struct efx_channel_type efx_default_channel_type = { + .pre_probe = efx_channel_dummy_op_int, + .post_remove = efx_channel_dummy_op_void, + .get_name = efx_get_channel_name, + .copy = efx_copy_channel, + .keep_eventq = false, +}; + +int efx_channel_dummy_op_int(struct efx_channel *channel) +{ + return 0; +} + +void efx_channel_dummy_op_void(struct efx_channel *channel) +{ +} + +/************************************************************************** + * + * Port handling + * + **************************************************************************/ + +/* This ensures that the kernel is kept informed (via + * netif_carrier_on/off) of the link status, and also maintains the + * link status's stop on the port's TX queue. + */ +void efx_link_status_changed(struct efx_nic *efx) +{ + struct efx_link_state *link_state = &efx->link_state; + + /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure + * that no events are triggered between unregister_netdev() and the + * driver unloading. A more general condition is that NETDEV_CHANGE + * can only be generated between NETDEV_UP and NETDEV_DOWN */ + if (!netif_running(efx->net_dev)) + return; + + if (link_state->up != netif_carrier_ok(efx->net_dev)) { + efx->n_link_state_changes++; + + if (link_state->up) + netif_carrier_on(efx->net_dev); + else + netif_carrier_off(efx->net_dev); + } + + /* Status message for kernel log */ + if (link_state->up) + netif_info(efx, link, efx->net_dev, + "link up at %uMbps %s-duplex (MTU %d)\n", + link_state->speed, link_state->fd ? "full" : "half", + efx->net_dev->mtu); + else + netif_info(efx, link, efx->net_dev, "link down\n"); +} + +void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) +{ + efx->link_advertising = advertising; + if (advertising) { + if (advertising & ADVERTISED_Pause) + efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); + else + efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); + if (advertising & ADVERTISED_Asym_Pause) + efx->wanted_fc ^= EFX_FC_TX; + } +} + +void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) +{ + efx->wanted_fc = wanted_fc; + if (efx->link_advertising) { + if (wanted_fc & EFX_FC_RX) + efx->link_advertising |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + else + efx->link_advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + if (wanted_fc & EFX_FC_TX) + efx->link_advertising ^= ADVERTISED_Asym_Pause; + } +} + +static void efx_fini_port(struct efx_nic *efx); + +/* Push loopback/power/transmit disable settings to the PHY, and reconfigure + * the MAC appropriately. All other PHY configuration changes are pushed + * through phy_op->set_settings(), and pushed asynchronously to the MAC + * through efx_monitor(). + * + * Callers must hold the mac_lock + */ +int __efx_reconfigure_port(struct efx_nic *efx) +{ + enum efx_phy_mode phy_mode; + int rc; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + /* Disable PHY transmit in mac level loopbacks */ + phy_mode = efx->phy_mode; + if (LOOPBACK_INTERNAL(efx)) + efx->phy_mode |= PHY_MODE_TX_DISABLED; + else + efx->phy_mode &= ~PHY_MODE_TX_DISABLED; + + rc = efx->type->reconfigure_port(efx); + + if (rc) + efx->phy_mode = phy_mode; + + return rc; +} + +/* Reinitialise the MAC to pick up new PHY settings, even if the port is + * disabled. */ +int efx_reconfigure_port(struct efx_nic *efx) +{ + int rc; + + EFX_ASSERT_RESET_SERIALISED(efx); + + mutex_lock(&efx->mac_lock); + rc = __efx_reconfigure_port(efx); + mutex_unlock(&efx->mac_lock); + + return rc; +} + +/* Asynchronous work item for changing MAC promiscuity and multicast + * hash. Avoid a drain/rx_ingress enable by reconfiguring the current + * MAC directly. */ +static void efx_mac_work(struct work_struct *data) +{ + struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); + + mutex_lock(&efx->mac_lock); + if (efx->port_enabled) + efx->type->reconfigure_mac(efx); + mutex_unlock(&efx->mac_lock); +} + +static int efx_probe_port(struct efx_nic *efx) +{ + int rc; + + netif_dbg(efx, probe, efx->net_dev, "create port\n"); + + if (phy_flash_cfg) + efx->phy_mode = PHY_MODE_SPECIAL; + + /* Connect up MAC/PHY operations table */ + rc = efx->type->probe_port(efx); + if (rc) + return rc; + + /* Initialise MAC address to permanent address */ + ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr); + + return 0; +} + +static int efx_init_port(struct efx_nic *efx) +{ + int rc; + + netif_dbg(efx, drv, efx->net_dev, "init port\n"); + + mutex_lock(&efx->mac_lock); + + rc = efx->phy_op->init(efx); + if (rc) + goto fail1; + + efx->port_initialized = true; + + /* Reconfigure the MAC before creating dma queues (required for + * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ + efx->type->reconfigure_mac(efx); + + /* Ensure the PHY advertises the correct flow control settings */ + rc = efx->phy_op->reconfigure(efx); + if (rc) + goto fail2; + + mutex_unlock(&efx->mac_lock); + return 0; + +fail2: + efx->phy_op->fini(efx); +fail1: + mutex_unlock(&efx->mac_lock); + return rc; +} + +static void efx_start_port(struct efx_nic *efx) +{ + netif_dbg(efx, ifup, efx->net_dev, "start port\n"); + BUG_ON(efx->port_enabled); + + mutex_lock(&efx->mac_lock); + efx->port_enabled = true; + + /* Ensure MAC ingress/egress is enabled */ + efx->type->reconfigure_mac(efx); + + mutex_unlock(&efx->mac_lock); +} + +/* Cancel work for MAC reconfiguration, periodic hardware monitoring + * and the async self-test, wait for them to finish and prevent them + * being scheduled again. This doesn't cover online resets, which + * should only be cancelled when removing the device. + */ +static void efx_stop_port(struct efx_nic *efx) +{ + netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); + + EFX_ASSERT_RESET_SERIALISED(efx); + + mutex_lock(&efx->mac_lock); + efx->port_enabled = false; + mutex_unlock(&efx->mac_lock); + + /* Serialise against efx_set_multicast_list() */ + netif_addr_lock_bh(efx->net_dev); + netif_addr_unlock_bh(efx->net_dev); + + cancel_delayed_work_sync(&efx->monitor_work); + efx_selftest_async_cancel(efx); + cancel_work_sync(&efx->mac_work); +} + +static void efx_fini_port(struct efx_nic *efx) +{ + netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); + + if (!efx->port_initialized) + return; + + efx->phy_op->fini(efx); + efx->port_initialized = false; + + efx->link_state.up = false; + efx_link_status_changed(efx); +} + +static void efx_remove_port(struct efx_nic *efx) +{ + netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); + + efx->type->remove_port(efx); +} + +/************************************************************************** + * + * NIC handling + * + **************************************************************************/ + +static LIST_HEAD(efx_primary_list); +static LIST_HEAD(efx_unassociated_list); + +static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right) +{ + return left->type == right->type && + left->vpd_sn && right->vpd_sn && + !strcmp(left->vpd_sn, right->vpd_sn); +} + +static void efx_associate(struct efx_nic *efx) +{ + struct efx_nic *other, *next; + + if (efx->primary == efx) { + /* Adding primary function; look for secondaries */ + + netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n"); + list_add_tail(&efx->node, &efx_primary_list); + + list_for_each_entry_safe(other, next, &efx_unassociated_list, + node) { + if (efx_same_controller(efx, other)) { + list_del(&other->node); + netif_dbg(other, probe, other->net_dev, + "moving to secondary list of %s %s\n", + pci_name(efx->pci_dev), + efx->net_dev->name); + list_add_tail(&other->node, + &efx->secondary_list); + other->primary = efx; + } + } + } else { + /* Adding secondary function; look for primary */ + + list_for_each_entry(other, &efx_primary_list, node) { + if (efx_same_controller(efx, other)) { + netif_dbg(efx, probe, efx->net_dev, + "adding to secondary list of %s %s\n", + pci_name(other->pci_dev), + other->net_dev->name); + list_add_tail(&efx->node, + &other->secondary_list); + efx->primary = other; + return; + } + } + + netif_dbg(efx, probe, efx->net_dev, + "adding to unassociated list\n"); + list_add_tail(&efx->node, &efx_unassociated_list); + } +} + +static void efx_dissociate(struct efx_nic *efx) +{ + struct efx_nic *other, *next; + + list_del(&efx->node); + efx->primary = NULL; + + list_for_each_entry_safe(other, next, &efx->secondary_list, node) { + list_del(&other->node); + netif_dbg(other, probe, other->net_dev, + "moving to unassociated list\n"); + list_add_tail(&other->node, &efx_unassociated_list); + other->primary = NULL; + } +} + +/* This configures the PCI device to enable I/O and DMA. */ +static int efx_init_io(struct efx_nic *efx) +{ + struct pci_dev *pci_dev = efx->pci_dev; + dma_addr_t dma_mask = efx->type->max_dma_mask; + unsigned int mem_map_size = efx->type->mem_map_size(efx); + int rc; + + netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); + + rc = pci_enable_device(pci_dev); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to enable PCI device\n"); + goto fail1; + } + + pci_set_master(pci_dev); + + /* Set the PCI DMA mask. Try all possibilities from our + * genuine mask down to 32 bits, because some architectures + * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit + * masks event though they reject 46 bit masks. + */ + while (dma_mask > 0x7fffffffUL) { + if (dma_supported(&pci_dev->dev, dma_mask)) { + rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask); + if (rc == 0) + break; + } + dma_mask >>= 1; + } + if (rc) { + netif_err(efx, probe, efx->net_dev, + "could not find a suitable DMA mask\n"); + goto fail2; + } + netif_dbg(efx, probe, efx->net_dev, + "using DMA mask %llx\n", (unsigned long long) dma_mask); + + efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); + rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "request for memory BAR failed\n"); + rc = -EIO; + goto fail3; + } + efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size); + if (!efx->membase) { + netif_err(efx, probe, efx->net_dev, + "could not map memory BAR at %llx+%x\n", + (unsigned long long)efx->membase_phys, mem_map_size); + rc = -ENOMEM; + goto fail4; + } + netif_dbg(efx, probe, efx->net_dev, + "memory BAR at %llx+%x (virtual %p)\n", + (unsigned long long)efx->membase_phys, mem_map_size, + efx->membase); + + return 0; + + fail4: + pci_release_region(efx->pci_dev, EFX_MEM_BAR); + fail3: + efx->membase_phys = 0; + fail2: + pci_disable_device(efx->pci_dev); + fail1: + return rc; +} + +static void efx_fini_io(struct efx_nic *efx) +{ + netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); + + if (efx->membase) { + iounmap(efx->membase); + efx->membase = NULL; + } + + if (efx->membase_phys) { + pci_release_region(efx->pci_dev, EFX_MEM_BAR); + efx->membase_phys = 0; + } + + pci_disable_device(efx->pci_dev); +} + +static unsigned int efx_wanted_parallelism(struct efx_nic *efx) +{ + cpumask_var_t thread_mask; + unsigned int count; + int cpu; + + if (rss_cpus) { + count = rss_cpus; + } else { + if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) { + netif_warn(efx, probe, efx->net_dev, + "RSS disabled due to allocation failure\n"); + return 1; + } + + count = 0; + for_each_online_cpu(cpu) { + if (!cpumask_test_cpu(cpu, thread_mask)) { + ++count; + cpumask_or(thread_mask, thread_mask, + topology_thread_cpumask(cpu)); + } + } + + free_cpumask_var(thread_mask); + } + + /* If RSS is requested for the PF *and* VFs then we can't write RSS + * table entries that are inaccessible to VFs + */ + if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 && + count > efx_vf_size(efx)) { + netif_warn(efx, probe, efx->net_dev, + "Reducing number of RSS channels from %u to %u for " + "VF support. Increase vf-msix-limit to use more " + "channels on the PF.\n", + count, efx_vf_size(efx)); + count = efx_vf_size(efx); + } + + return count; +} + +/* Probe the number and type of interrupts we are able to obtain, and + * the resulting numbers of channels and RX queues. + */ +static int efx_probe_interrupts(struct efx_nic *efx) +{ + unsigned int extra_channels = 0; + unsigned int i, j; + int rc; + + for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) + if (efx->extra_channel_type[i]) + ++extra_channels; + + if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { + struct msix_entry xentries[EFX_MAX_CHANNELS]; + unsigned int n_channels; + + n_channels = efx_wanted_parallelism(efx); + if (separate_tx_channels) + n_channels *= 2; + n_channels += extra_channels; + n_channels = min(n_channels, efx->max_channels); + + for (i = 0; i < n_channels; i++) + xentries[i].entry = i; + rc = pci_enable_msix_range(efx->pci_dev, + xentries, 1, n_channels); + if (rc < 0) { + /* Fall back to single channel MSI */ + efx->interrupt_mode = EFX_INT_MODE_MSI; + netif_err(efx, drv, efx->net_dev, + "could not enable MSI-X\n"); + } else if (rc < n_channels) { + netif_err(efx, drv, efx->net_dev, + "WARNING: Insufficient MSI-X vectors" + " available (%d < %u).\n", rc, n_channels); + netif_err(efx, drv, efx->net_dev, + "WARNING: Performance may be reduced.\n"); + n_channels = rc; + } + + if (rc > 0) { + efx->n_channels = n_channels; + if (n_channels > extra_channels) + n_channels -= extra_channels; + if (separate_tx_channels) { + efx->n_tx_channels = max(n_channels / 2, 1U); + efx->n_rx_channels = max(n_channels - + efx->n_tx_channels, + 1U); + } else { + efx->n_tx_channels = n_channels; + efx->n_rx_channels = n_channels; + } + for (i = 0; i < efx->n_channels; i++) + efx_get_channel(efx, i)->irq = + xentries[i].vector; + } + } + + /* Try single interrupt MSI */ + if (efx->interrupt_mode == EFX_INT_MODE_MSI) { + efx->n_channels = 1; + efx->n_rx_channels = 1; + efx->n_tx_channels = 1; + rc = pci_enable_msi(efx->pci_dev); + if (rc == 0) { + efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; + } else { + netif_err(efx, drv, efx->net_dev, + "could not enable MSI\n"); + efx->interrupt_mode = EFX_INT_MODE_LEGACY; + } + } + + /* Assume legacy interrupts */ + if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { + efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); + efx->n_rx_channels = 1; + efx->n_tx_channels = 1; + efx->legacy_irq = efx->pci_dev->irq; + } + + /* Assign extra channels if possible */ + j = efx->n_channels; + for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) { + if (!efx->extra_channel_type[i]) + continue; + if (efx->interrupt_mode != EFX_INT_MODE_MSIX || + efx->n_channels <= extra_channels) { + efx->extra_channel_type[i]->handle_no_channel(efx); + } else { + --j; + efx_get_channel(efx, j)->type = + efx->extra_channel_type[i]; + } + } + + /* RSS might be usable on VFs even if it is disabled on the PF */ + + efx->rss_spread = ((efx->n_rx_channels > 1 || + !efx->type->sriov_wanted(efx)) ? + efx->n_rx_channels : efx_vf_size(efx)); + + return 0; +} + +static int efx_soft_enable_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel, *end_channel; + int rc; + + BUG_ON(efx->state == STATE_DISABLED); + + efx->irq_soft_enabled = true; + smp_wmb(); + + efx_for_each_channel(channel, efx) { + if (!channel->type->keep_eventq) { + rc = efx_init_eventq(channel); + if (rc) + goto fail; + } + efx_start_eventq(channel); + } + + efx_mcdi_mode_event(efx); + + return 0; +fail: + end_channel = channel; + efx_for_each_channel(channel, efx) { + if (channel == end_channel) + break; + efx_stop_eventq(channel); + if (!channel->type->keep_eventq) + efx_fini_eventq(channel); + } + + return rc; +} + +static void efx_soft_disable_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel; + + if (efx->state == STATE_DISABLED) + return; + + efx_mcdi_mode_poll(efx); + + efx->irq_soft_enabled = false; + smp_wmb(); + + if (efx->legacy_irq) + synchronize_irq(efx->legacy_irq); + + efx_for_each_channel(channel, efx) { + if (channel->irq) + synchronize_irq(channel->irq); + + efx_stop_eventq(channel); + if (!channel->type->keep_eventq) + efx_fini_eventq(channel); + } + + /* Flush the asynchronous MCDI request queue */ + efx_mcdi_flush_async(efx); +} + +static int efx_enable_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel, *end_channel; + int rc; + + BUG_ON(efx->state == STATE_DISABLED); + + if (efx->eeh_disabled_legacy_irq) { + enable_irq(efx->legacy_irq); + efx->eeh_disabled_legacy_irq = false; + } + + efx->type->irq_enable_master(efx); + + efx_for_each_channel(channel, efx) { + if (channel->type->keep_eventq) { + rc = efx_init_eventq(channel); + if (rc) + goto fail; + } + } + + rc = efx_soft_enable_interrupts(efx); + if (rc) + goto fail; + + return 0; + +fail: + end_channel = channel; + efx_for_each_channel(channel, efx) { + if (channel == end_channel) + break; + if (channel->type->keep_eventq) + efx_fini_eventq(channel); + } + + efx->type->irq_disable_non_ev(efx); + + return rc; +} + +static void efx_disable_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_soft_disable_interrupts(efx); + + efx_for_each_channel(channel, efx) { + if (channel->type->keep_eventq) + efx_fini_eventq(channel); + } + + efx->type->irq_disable_non_ev(efx); +} + +static void efx_remove_interrupts(struct efx_nic *efx) +{ + struct efx_channel *channel; + + /* Remove MSI/MSI-X interrupts */ + efx_for_each_channel(channel, efx) + channel->irq = 0; + pci_disable_msi(efx->pci_dev); + pci_disable_msix(efx->pci_dev); + + /* Remove legacy interrupt */ + efx->legacy_irq = 0; +} + +static void efx_set_channels(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + + efx->tx_channel_offset = + separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; + + /* We need to mark which channels really have RX and TX + * queues, and adjust the TX queue numbers if we have separate + * RX-only and TX-only channels. + */ + efx_for_each_channel(channel, efx) { + if (channel->channel < efx->n_rx_channels) + channel->rx_queue.core_index = channel->channel; + else + channel->rx_queue.core_index = -1; + + efx_for_each_channel_tx_queue(tx_queue, channel) + tx_queue->queue -= (efx->tx_channel_offset * + EFX_TXQ_TYPES); + } +} + +static int efx_probe_nic(struct efx_nic *efx) +{ + size_t i; + int rc; + + netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); + + /* Carry out hardware-type specific initialisation */ + rc = efx->type->probe(efx); + if (rc) + return rc; + + /* Determine the number of channels and queues by trying to hook + * in MSI-X interrupts. */ + rc = efx_probe_interrupts(efx); + if (rc) + goto fail1; + + efx_set_channels(efx); + + rc = efx->type->dimension_resources(efx); + if (rc) + goto fail2; + + if (efx->n_channels > 1) + netdev_rss_key_fill(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); + for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) + efx->rx_indir_table[i] = + ethtool_rxfh_indir_default(i, efx->rss_spread); + + netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); + netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); + + /* Initialise the interrupt moderation settings */ + efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, + true); + + return 0; + +fail2: + efx_remove_interrupts(efx); +fail1: + efx->type->remove(efx); + return rc; +} + +static void efx_remove_nic(struct efx_nic *efx) +{ + netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); + + efx_remove_interrupts(efx); + efx->type->remove(efx); +} + +static int efx_probe_filters(struct efx_nic *efx) +{ + int rc; + + spin_lock_init(&efx->filter_lock); + + rc = efx->type->filter_table_probe(efx); + if (rc) + return rc; + +#ifdef CONFIG_RFS_ACCEL + if (efx->type->offload_features & NETIF_F_NTUPLE) { + efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters, + sizeof(*efx->rps_flow_id), + GFP_KERNEL); + if (!efx->rps_flow_id) { + efx->type->filter_table_remove(efx); + return -ENOMEM; + } + } +#endif + + return 0; +} + +static void efx_remove_filters(struct efx_nic *efx) +{ +#ifdef CONFIG_RFS_ACCEL + kfree(efx->rps_flow_id); +#endif + efx->type->filter_table_remove(efx); +} + +static void efx_restore_filters(struct efx_nic *efx) +{ + efx->type->filter_table_restore(efx); +} + +/************************************************************************** + * + * NIC startup/shutdown + * + *************************************************************************/ + +static int efx_probe_all(struct efx_nic *efx) +{ + int rc; + + rc = efx_probe_nic(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); + goto fail1; + } + + rc = efx_probe_port(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, "failed to create port\n"); + goto fail2; + } + + BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT); + if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) { + rc = -EINVAL; + goto fail3; + } + efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; + + rc = efx_probe_filters(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to create filter tables\n"); + goto fail3; + } + + rc = efx_probe_channels(efx); + if (rc) + goto fail4; + + return 0; + + fail4: + efx_remove_filters(efx); + fail3: + efx_remove_port(efx); + fail2: + efx_remove_nic(efx); + fail1: + return rc; +} + +/* If the interface is supposed to be running but is not, start + * the hardware and software data path, regular activity for the port + * (MAC statistics, link polling, etc.) and schedule the port to be + * reconfigured. Interrupts must already be enabled. This function + * is safe to call multiple times, so long as the NIC is not disabled. + * Requires the RTNL lock. + */ +static void efx_start_all(struct efx_nic *efx) +{ + EFX_ASSERT_RESET_SERIALISED(efx); + BUG_ON(efx->state == STATE_DISABLED); + + /* Check that it is appropriate to restart the interface. All + * of these flags are safe to read under just the rtnl lock */ + if (efx->port_enabled || !netif_running(efx->net_dev) || + efx->reset_pending) + return; + + efx_start_port(efx); + efx_start_datapath(efx); + + /* Start the hardware monitor if there is one */ + if (efx->type->monitor != NULL) + queue_delayed_work(efx->workqueue, &efx->monitor_work, + efx_monitor_interval); + + /* If link state detection is normally event-driven, we have + * to poll now because we could have missed a change + */ + if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { + mutex_lock(&efx->mac_lock); + if (efx->phy_op->poll(efx)) + efx_link_status_changed(efx); + mutex_unlock(&efx->mac_lock); + } + + efx->type->start_stats(efx); + efx->type->pull_stats(efx); + spin_lock_bh(&efx->stats_lock); + efx->type->update_stats(efx, NULL, NULL); + spin_unlock_bh(&efx->stats_lock); +} + +/* Quiesce the hardware and software data path, and regular activity + * for the port without bringing the link down. Safe to call multiple + * times with the NIC in almost any state, but interrupts should be + * enabled. Requires the RTNL lock. + */ +static void efx_stop_all(struct efx_nic *efx) +{ + EFX_ASSERT_RESET_SERIALISED(efx); + + /* port_enabled can be read safely under the rtnl lock */ + if (!efx->port_enabled) + return; + + /* update stats before we go down so we can accurately count + * rx_nodesc_drops + */ + efx->type->pull_stats(efx); + spin_lock_bh(&efx->stats_lock); + efx->type->update_stats(efx, NULL, NULL); + spin_unlock_bh(&efx->stats_lock); + efx->type->stop_stats(efx); + efx_stop_port(efx); + + /* Stop the kernel transmit interface. This is only valid if + * the device is stopped or detached; otherwise the watchdog + * may fire immediately. + */ + WARN_ON(netif_running(efx->net_dev) && + netif_device_present(efx->net_dev)); + netif_tx_disable(efx->net_dev); + + efx_stop_datapath(efx); +} + +static void efx_remove_all(struct efx_nic *efx) +{ + efx_remove_channels(efx); + efx_remove_filters(efx); + efx_remove_port(efx); + efx_remove_nic(efx); +} + +/************************************************************************** + * + * Interrupt moderation + * + **************************************************************************/ + +static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns) +{ + if (usecs == 0) + return 0; + if (usecs * 1000 < quantum_ns) + return 1; /* never round down to 0 */ + return usecs * 1000 / quantum_ns; +} + +/* Set interrupt moderation parameters */ +int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, + unsigned int rx_usecs, bool rx_adaptive, + bool rx_may_override_tx) +{ + struct efx_channel *channel; + unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max * + efx->timer_quantum_ns, + 1000); + unsigned int tx_ticks; + unsigned int rx_ticks; + + EFX_ASSERT_RESET_SERIALISED(efx); + + if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max) + return -EINVAL; + + tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns); + rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns); + + if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 && + !rx_may_override_tx) { + netif_err(efx, drv, efx->net_dev, "Channels are shared. " + "RX and TX IRQ moderation must be equal\n"); + return -EINVAL; + } + + efx->irq_rx_adaptive = rx_adaptive; + efx->irq_rx_moderation = rx_ticks; + efx_for_each_channel(channel, efx) { + if (efx_channel_has_rx_queue(channel)) + channel->irq_moderation = rx_ticks; + else if (efx_channel_has_tx_queues(channel)) + channel->irq_moderation = tx_ticks; + } + + return 0; +} + +void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, + unsigned int *rx_usecs, bool *rx_adaptive) +{ + /* We must round up when converting ticks to microseconds + * because we round down when converting the other way. + */ + + *rx_adaptive = efx->irq_rx_adaptive; + *rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation * + efx->timer_quantum_ns, + 1000); + + /* If channels are shared between RX and TX, so is IRQ + * moderation. Otherwise, IRQ moderation is the same for all + * TX channels and is not adaptive. + */ + if (efx->tx_channel_offset == 0) + *tx_usecs = *rx_usecs; + else + *tx_usecs = DIV_ROUND_UP( + efx->channel[efx->tx_channel_offset]->irq_moderation * + efx->timer_quantum_ns, + 1000); +} + +/************************************************************************** + * + * Hardware monitor + * + **************************************************************************/ + +/* Run periodically off the general workqueue */ +static void efx_monitor(struct work_struct *data) +{ + struct efx_nic *efx = container_of(data, struct efx_nic, + monitor_work.work); + + netif_vdbg(efx, timer, efx->net_dev, + "hardware monitor executing on CPU %d\n", + raw_smp_processor_id()); + BUG_ON(efx->type->monitor == NULL); + + /* If the mac_lock is already held then it is likely a port + * reconfiguration is already in place, which will likely do + * most of the work of monitor() anyway. */ + if (mutex_trylock(&efx->mac_lock)) { + if (efx->port_enabled) + efx->type->monitor(efx); + mutex_unlock(&efx->mac_lock); + } + + queue_delayed_work(efx->workqueue, &efx->monitor_work, + efx_monitor_interval); +} + +/************************************************************************** + * + * ioctls + * + *************************************************************************/ + +/* Net device ioctl + * Context: process, rtnl_lock() held. + */ +static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct mii_ioctl_data *data = if_mii(ifr); + + if (cmd == SIOCSHWTSTAMP) + return efx_ptp_set_ts_config(efx, ifr); + if (cmd == SIOCGHWTSTAMP) + return efx_ptp_get_ts_config(efx, ifr); + + /* Convert phy_id from older PRTAD/DEVAD format */ + if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && + (data->phy_id & 0xfc00) == 0x0400) + data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; + + return mdio_mii_ioctl(&efx->mdio, data, cmd); +} + +/************************************************************************** + * + * NAPI interface + * + **************************************************************************/ + +static void efx_init_napi_channel(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + + channel->napi_dev = efx->net_dev; + netif_napi_add(channel->napi_dev, &channel->napi_str, + efx_poll, napi_weight); + napi_hash_add(&channel->napi_str); + efx_channel_init_lock(channel); +} + +static void efx_init_napi(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_init_napi_channel(channel); +} + +static void efx_fini_napi_channel(struct efx_channel *channel) +{ + if (channel->napi_dev) { + netif_napi_del(&channel->napi_str); + napi_hash_del(&channel->napi_str); + } + channel->napi_dev = NULL; +} + +static void efx_fini_napi(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_fini_napi_channel(channel); +} + +/************************************************************************** + * + * Kernel netpoll interface + * + *************************************************************************/ + +#ifdef CONFIG_NET_POLL_CONTROLLER + +/* Although in the common case interrupts will be disabled, this is not + * guaranteed. However, all our work happens inside the NAPI callback, + * so no locking is required. + */ +static void efx_netpoll(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_schedule_channel(channel); +} + +#endif + +#ifdef CONFIG_NET_RX_BUSY_POLL +static int efx_busy_poll(struct napi_struct *napi) +{ + struct efx_channel *channel = + container_of(napi, struct efx_channel, napi_str); + struct efx_nic *efx = channel->efx; + int budget = 4; + int old_rx_packets, rx_packets; + + if (!netif_running(efx->net_dev)) + return LL_FLUSH_FAILED; + + if (!efx_channel_lock_poll(channel)) + return LL_FLUSH_BUSY; + + old_rx_packets = channel->rx_queue.rx_packets; + efx_process_channel(channel, budget); + + rx_packets = channel->rx_queue.rx_packets - old_rx_packets; + + /* There is no race condition with NAPI here. + * NAPI will automatically be rescheduled if it yielded during busy + * polling, because it was not able to take the lock and thus returned + * the full budget. + */ + efx_channel_unlock_poll(channel); + + return rx_packets; +} +#endif + +/************************************************************************** + * + * Kernel net device interface + * + *************************************************************************/ + +/* Context: process, rtnl_lock() held. */ +static int efx_net_open(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", + raw_smp_processor_id()); + + rc = efx_check_disabled(efx); + if (rc) + return rc; + if (efx->phy_mode & PHY_MODE_SPECIAL) + return -EBUSY; + if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) + return -EIO; + + /* Notify the kernel of the link state polled during driver load, + * before the monitor starts running */ + efx_link_status_changed(efx); + + efx_start_all(efx); + efx_selftest_async_start(efx); + return 0; +} + +/* Context: process, rtnl_lock() held. + * Note that the kernel will ignore our return code; this method + * should really be a void. + */ +static int efx_net_stop(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", + raw_smp_processor_id()); + + /* Stop the device and flush all the channels */ + efx_stop_all(efx); + + return 0; +} + +/* Context: process, dev_base_lock or RTNL held, non-blocking. */ +static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + spin_lock_bh(&efx->stats_lock); + efx->type->update_stats(efx, NULL, stats); + spin_unlock_bh(&efx->stats_lock); + + return stats; +} + +/* Context: netif_tx_lock held, BHs disabled. */ +static void efx_watchdog(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + netif_err(efx, tx_err, efx->net_dev, + "TX stuck with port_enabled=%d: resetting channels\n", + efx->port_enabled); + + efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); +} + + +/* Context: process, rtnl_lock() held. */ +static int efx_change_mtu(struct net_device *net_dev, int new_mtu) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + rc = efx_check_disabled(efx); + if (rc) + return rc; + if (new_mtu > EFX_MAX_MTU) + return -EINVAL; + + netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); + + efx_device_detach_sync(efx); + efx_stop_all(efx); + + mutex_lock(&efx->mac_lock); + net_dev->mtu = new_mtu; + efx->type->reconfigure_mac(efx); + mutex_unlock(&efx->mac_lock); + + efx_start_all(efx); + netif_device_attach(efx->net_dev); + return 0; +} + +static int efx_set_mac_address(struct net_device *net_dev, void *data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct sockaddr *addr = data; + u8 *new_addr = addr->sa_data; + + if (!is_valid_ether_addr(new_addr)) { + netif_err(efx, drv, efx->net_dev, + "invalid ethernet MAC address requested: %pM\n", + new_addr); + return -EADDRNOTAVAIL; + } + + ether_addr_copy(net_dev->dev_addr, new_addr); + efx->type->sriov_mac_address_changed(efx); + + /* Reconfigure the MAC */ + mutex_lock(&efx->mac_lock); + efx->type->reconfigure_mac(efx); + mutex_unlock(&efx->mac_lock); + + return 0; +} + +/* Context: netif_addr_lock held, BHs disabled. */ +static void efx_set_rx_mode(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->port_enabled) + queue_work(efx->workqueue, &efx->mac_work); + /* Otherwise efx_start_port() will do this */ +} + +static int efx_set_features(struct net_device *net_dev, netdev_features_t data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + /* If disabling RX n-tuple filtering, clear existing filters */ + if (net_dev->features & ~data & NETIF_F_NTUPLE) + return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); + + return 0; +} + +static const struct net_device_ops efx_farch_netdev_ops = { + .ndo_open = efx_net_open, + .ndo_stop = efx_net_stop, + .ndo_get_stats64 = efx_net_stats, + .ndo_tx_timeout = efx_watchdog, + .ndo_start_xmit = efx_hard_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = efx_ioctl, + .ndo_change_mtu = efx_change_mtu, + .ndo_set_mac_address = efx_set_mac_address, + .ndo_set_rx_mode = efx_set_rx_mode, + .ndo_set_features = efx_set_features, +#ifdef CONFIG_SFC_SRIOV + .ndo_set_vf_mac = efx_siena_sriov_set_vf_mac, + .ndo_set_vf_vlan = efx_siena_sriov_set_vf_vlan, + .ndo_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk, + .ndo_get_vf_config = efx_siena_sriov_get_vf_config, +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = efx_netpoll, +#endif + .ndo_setup_tc = efx_setup_tc, +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = efx_busy_poll, +#endif +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = efx_filter_rfs, +#endif +}; + +static const struct net_device_ops efx_ef10_netdev_ops = { + .ndo_open = efx_net_open, + .ndo_stop = efx_net_stop, + .ndo_get_stats64 = efx_net_stats, + .ndo_tx_timeout = efx_watchdog, + .ndo_start_xmit = efx_hard_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = efx_ioctl, + .ndo_change_mtu = efx_change_mtu, + .ndo_set_mac_address = efx_set_mac_address, + .ndo_set_rx_mode = efx_set_rx_mode, + .ndo_set_features = efx_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = efx_netpoll, +#endif +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = efx_busy_poll, +#endif +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = efx_filter_rfs, +#endif +}; + +static void efx_update_name(struct efx_nic *efx) +{ + strcpy(efx->name, efx->net_dev->name); + efx_mtd_rename(efx); + efx_set_channel_names(efx); +} + +static int efx_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); + + if ((net_dev->netdev_ops == &efx_farch_netdev_ops || + net_dev->netdev_ops == &efx_ef10_netdev_ops) && + event == NETDEV_CHANGENAME) + efx_update_name(netdev_priv(net_dev)); + + return NOTIFY_DONE; +} + +static struct notifier_block efx_netdev_notifier = { + .notifier_call = efx_netdev_event, +}; + +static ssize_t +show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + return sprintf(buf, "%d\n", efx->phy_type); +} +static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); + +static int efx_register_netdev(struct efx_nic *efx) +{ + struct net_device *net_dev = efx->net_dev; + struct efx_channel *channel; + int rc; + + net_dev->watchdog_timeo = 5 * HZ; + net_dev->irq = efx->pci_dev->irq; + if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) { + net_dev->netdev_ops = &efx_ef10_netdev_ops; + net_dev->priv_flags |= IFF_UNICAST_FLT; + } else { + net_dev->netdev_ops = &efx_farch_netdev_ops; + } + net_dev->ethtool_ops = &efx_ethtool_ops; + net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; + + rtnl_lock(); + + /* Enable resets to be scheduled and check whether any were + * already requested. If so, the NIC is probably hosed so we + * abort. + */ + efx->state = STATE_READY; + smp_mb(); /* ensure we change state before checking reset_pending */ + if (efx->reset_pending) { + netif_err(efx, probe, efx->net_dev, + "aborting probe due to scheduled reset\n"); + rc = -EIO; + goto fail_locked; + } + + rc = dev_alloc_name(net_dev, net_dev->name); + if (rc < 0) + goto fail_locked; + efx_update_name(efx); + + /* Always start with carrier off; PHY events will detect the link */ + netif_carrier_off(net_dev); + + rc = register_netdevice(net_dev); + if (rc) + goto fail_locked; + + efx_for_each_channel(channel, efx) { + struct efx_tx_queue *tx_queue; + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_init_tx_queue_core_txq(tx_queue); + } + + efx_associate(efx); + + rtnl_unlock(); + + rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to init net dev attributes\n"); + goto fail_registered; + } + + return 0; + +fail_registered: + rtnl_lock(); + efx_dissociate(efx); + unregister_netdevice(net_dev); +fail_locked: + efx->state = STATE_UNINIT; + rtnl_unlock(); + netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); + return rc; +} + +static void efx_unregister_netdev(struct efx_nic *efx) +{ + if (!efx->net_dev) + return; + + BUG_ON(netdev_priv(efx->net_dev) != efx); + + strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); + device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); + + rtnl_lock(); + unregister_netdevice(efx->net_dev); + efx->state = STATE_UNINIT; + rtnl_unlock(); +} + +/************************************************************************** + * + * Device reset and suspend + * + **************************************************************************/ + +/* Tears down the entire software state and most of the hardware state + * before reset. */ +void efx_reset_down(struct efx_nic *efx, enum reset_type method) +{ + EFX_ASSERT_RESET_SERIALISED(efx); + + if (method == RESET_TYPE_MCDI_TIMEOUT) + efx->type->prepare_flr(efx); + + efx_stop_all(efx); + efx_disable_interrupts(efx); + + mutex_lock(&efx->mac_lock); + if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) + efx->phy_op->fini(efx); + efx->type->fini(efx); +} + +/* This function will always ensure that the locks acquired in + * efx_reset_down() are released. A failure return code indicates + * that we were unable to reinitialise the hardware, and the + * driver should be disabled. If ok is false, then the rx and tx + * engines are not restarted, pending a RESET_DISABLE. */ +int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) +{ + int rc; + + EFX_ASSERT_RESET_SERIALISED(efx); + + if (method == RESET_TYPE_MCDI_TIMEOUT) + efx->type->finish_flr(efx); + + /* Ensure that SRAM is initialised even if we're disabling the device */ + rc = efx->type->init(efx); + if (rc) { + netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); + goto fail; + } + + if (!ok) + goto fail; + + if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { + rc = efx->phy_op->init(efx); + if (rc) + goto fail; + if (efx->phy_op->reconfigure(efx)) + netif_err(efx, drv, efx->net_dev, + "could not restore PHY settings\n"); + } + + rc = efx_enable_interrupts(efx); + if (rc) + goto fail; + efx_restore_filters(efx); + efx->type->sriov_reset(efx); + + mutex_unlock(&efx->mac_lock); + + efx_start_all(efx); + + return 0; + +fail: + efx->port_initialized = false; + + mutex_unlock(&efx->mac_lock); + + return rc; +} + +/* Reset the NIC using the specified method. Note that the reset may + * fail, in which case the card will be left in an unusable state. + * + * Caller must hold the rtnl_lock. + */ +int efx_reset(struct efx_nic *efx, enum reset_type method) +{ + int rc, rc2; + bool disabled; + + netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", + RESET_TYPE(method)); + + efx_device_detach_sync(efx); + efx_reset_down(efx, method); + + rc = efx->type->reset(efx, method); + if (rc) { + netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); + goto out; + } + + /* Clear flags for the scopes we covered. We assume the NIC and + * driver are now quiescent so that there is no race here. + */ + if (method < RESET_TYPE_MAX_METHOD) + efx->reset_pending &= -(1 << (method + 1)); + else /* it doesn't fit into the well-ordered scope hierarchy */ + __clear_bit(method, &efx->reset_pending); + + /* Reinitialise bus-mastering, which may have been turned off before + * the reset was scheduled. This is still appropriate, even in the + * RESET_TYPE_DISABLE since this driver generally assumes the hardware + * can respond to requests. */ + pci_set_master(efx->pci_dev); + +out: + /* Leave device stopped if necessary */ + disabled = rc || + method == RESET_TYPE_DISABLE || + method == RESET_TYPE_RECOVER_OR_DISABLE; + rc2 = efx_reset_up(efx, method, !disabled); + if (rc2) { + disabled = true; + if (!rc) + rc = rc2; + } + + if (disabled) { + dev_close(efx->net_dev); + netif_err(efx, drv, efx->net_dev, "has been disabled\n"); + efx->state = STATE_DISABLED; + } else { + netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); + netif_device_attach(efx->net_dev); + } + return rc; +} + +/* Try recovery mechanisms. + * For now only EEH is supported. + * Returns 0 if the recovery mechanisms are unsuccessful. + * Returns a non-zero value otherwise. + */ +int efx_try_recovery(struct efx_nic *efx) +{ +#ifdef CONFIG_EEH + /* A PCI error can occur and not be seen by EEH because nothing + * happens on the PCI bus. In this case the driver may fail and + * schedule a 'recover or reset', leading to this recovery handler. + * Manually call the eeh failure check function. + */ + struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); + if (eeh_dev_check_failure(eehdev)) { + /* The EEH mechanisms will handle the error and reset the + * device if necessary. + */ + return 1; + } +#endif + return 0; +} + +static void efx_wait_for_bist_end(struct efx_nic *efx) +{ + int i; + + for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) { + if (efx_mcdi_poll_reboot(efx)) + goto out; + msleep(BIST_WAIT_DELAY_MS); + } + + netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n"); +out: + /* Either way unset the BIST flag. If we found no reboot we probably + * won't recover, but we should try. + */ + efx->mc_bist_for_other_fn = false; +} + +/* The worker thread exists so that code that cannot sleep can + * schedule a reset for later. + */ +static void efx_reset_work(struct work_struct *data) +{ + struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); + unsigned long pending; + enum reset_type method; + + pending = ACCESS_ONCE(efx->reset_pending); + method = fls(pending) - 1; + + if (method == RESET_TYPE_MC_BIST) + efx_wait_for_bist_end(efx); + + if ((method == RESET_TYPE_RECOVER_OR_DISABLE || + method == RESET_TYPE_RECOVER_OR_ALL) && + efx_try_recovery(efx)) + return; + + if (!pending) + return; + + rtnl_lock(); + + /* We checked the state in efx_schedule_reset() but it may + * have changed by now. Now that we have the RTNL lock, + * it cannot change again. + */ + if (efx->state == STATE_READY) + (void)efx_reset(efx, method); + + rtnl_unlock(); +} + +void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) +{ + enum reset_type method; + + if (efx->state == STATE_RECOVERY) { + netif_dbg(efx, drv, efx->net_dev, + "recovering: skip scheduling %s reset\n", + RESET_TYPE(type)); + return; + } + + switch (type) { + case RESET_TYPE_INVISIBLE: + case RESET_TYPE_ALL: + case RESET_TYPE_RECOVER_OR_ALL: + case RESET_TYPE_WORLD: + case RESET_TYPE_DISABLE: + case RESET_TYPE_RECOVER_OR_DISABLE: + case RESET_TYPE_MC_BIST: + case RESET_TYPE_MCDI_TIMEOUT: + method = type; + netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", + RESET_TYPE(method)); + break; + default: + method = efx->type->map_reset_reason(type); + netif_dbg(efx, drv, efx->net_dev, + "scheduling %s reset for %s\n", + RESET_TYPE(method), RESET_TYPE(type)); + break; + } + + set_bit(method, &efx->reset_pending); + smp_mb(); /* ensure we change reset_pending before checking state */ + + /* If we're not READY then just leave the flags set as the cue + * to abort probing or reschedule the reset later. + */ + if (ACCESS_ONCE(efx->state) != STATE_READY) + return; + + /* efx_process_channel() will no longer read events once a + * reset is scheduled. So switch back to poll'd MCDI completions. */ + efx_mcdi_mode_poll(efx); + + queue_work(reset_workqueue, &efx->reset_work); +} + +/************************************************************************** + * + * List of NICs we support + * + **************************************************************************/ + +/* PCI device ID table */ +static const struct pci_device_id efx_pci_table[] = { + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, + PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0), + .driver_data = (unsigned long) &falcon_a1_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, + PCI_DEVICE_ID_SOLARFLARE_SFC4000B), + .driver_data = (unsigned long) &falcon_b0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */ + .driver_data = (unsigned long) &siena_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */ + .driver_data = (unsigned long) &siena_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */ + .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {0} /* end of list */ +}; + +/************************************************************************** + * + * Dummy PHY/MAC operations + * + * Can be used for some unimplemented operations + * Needed so all function pointers are valid and do not have to be tested + * before use + * + **************************************************************************/ +int efx_port_dummy_op_int(struct efx_nic *efx) +{ + return 0; +} +void efx_port_dummy_op_void(struct efx_nic *efx) {} + +static bool efx_port_dummy_op_poll(struct efx_nic *efx) +{ + return false; +} + +static const struct efx_phy_operations efx_dummy_phy_operations = { + .init = efx_port_dummy_op_int, + .reconfigure = efx_port_dummy_op_int, + .poll = efx_port_dummy_op_poll, + .fini = efx_port_dummy_op_void, +}; + +/************************************************************************** + * + * Data housekeeping + * + **************************************************************************/ + +/* This zeroes out and then fills in the invariants in a struct + * efx_nic (including all sub-structures). + */ +static int efx_init_struct(struct efx_nic *efx, + struct pci_dev *pci_dev, struct net_device *net_dev) +{ + int i; + + /* Initialise common structures */ + INIT_LIST_HEAD(&efx->node); + INIT_LIST_HEAD(&efx->secondary_list); + spin_lock_init(&efx->biu_lock); +#ifdef CONFIG_SFC_MTD + INIT_LIST_HEAD(&efx->mtd_list); +#endif + INIT_WORK(&efx->reset_work, efx_reset_work); + INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); + INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work); + efx->pci_dev = pci_dev; + efx->msg_enable = debug; + efx->state = STATE_UNINIT; + strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); + + efx->net_dev = net_dev; + efx->rx_prefix_size = efx->type->rx_prefix_size; + efx->rx_ip_align = + NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0; + efx->rx_packet_hash_offset = + efx->type->rx_hash_offset - efx->type->rx_prefix_size; + efx->rx_packet_ts_offset = + efx->type->rx_ts_offset - efx->type->rx_prefix_size; + spin_lock_init(&efx->stats_lock); + mutex_init(&efx->mac_lock); + efx->phy_op = &efx_dummy_phy_operations; + efx->mdio.dev = net_dev; + INIT_WORK(&efx->mac_work, efx_mac_work); + init_waitqueue_head(&efx->flush_wq); + + for (i = 0; i < EFX_MAX_CHANNELS; i++) { + efx->channel[i] = efx_alloc_channel(efx, i, NULL); + if (!efx->channel[i]) + goto fail; + efx->msi_context[i].efx = efx; + efx->msi_context[i].index = i; + } + + /* Higher numbered interrupt modes are less capable! */ + efx->interrupt_mode = max(efx->type->max_interrupt_mode, + interrupt_mode); + + /* Would be good to use the net_dev name, but we're too early */ + snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", + pci_name(pci_dev)); + efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); + if (!efx->workqueue) + goto fail; + + return 0; + +fail: + efx_fini_struct(efx); + return -ENOMEM; +} + +static void efx_fini_struct(struct efx_nic *efx) +{ + int i; + + for (i = 0; i < EFX_MAX_CHANNELS; i++) + kfree(efx->channel[i]); + + kfree(efx->vpd_sn); + + if (efx->workqueue) { + destroy_workqueue(efx->workqueue); + efx->workqueue = NULL; + } +} + +void efx_update_sw_stats(struct efx_nic *efx, u64 *stats) +{ + u64 n_rx_nodesc_trunc = 0; + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc; + stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc; + stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); +} + +/************************************************************************** + * + * PCI interface + * + **************************************************************************/ + +/* Main body of final NIC shutdown code + * This is called only at module unload (or hotplug removal). + */ +static void efx_pci_remove_main(struct efx_nic *efx) +{ + /* Flush reset_work. It can no longer be scheduled since we + * are not READY. + */ + BUG_ON(efx->state == STATE_READY); + cancel_work_sync(&efx->reset_work); + + efx_disable_interrupts(efx); + efx_nic_fini_interrupt(efx); + efx_fini_port(efx); + efx->type->fini(efx); + efx_fini_napi(efx); + efx_remove_all(efx); +} + +/* Final NIC shutdown + * This is called only at module unload (or hotplug removal). + */ +static void efx_pci_remove(struct pci_dev *pci_dev) +{ + struct efx_nic *efx; + + efx = pci_get_drvdata(pci_dev); + if (!efx) + return; + + /* Mark the NIC as fini, then stop the interface */ + rtnl_lock(); + efx_dissociate(efx); + dev_close(efx->net_dev); + efx_disable_interrupts(efx); + rtnl_unlock(); + + efx->type->sriov_fini(efx); + efx_unregister_netdev(efx); + + efx_mtd_remove(efx); + + efx_pci_remove_main(efx); + + efx_fini_io(efx); + netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); + + efx_fini_struct(efx); + free_netdev(efx->net_dev); + + pci_disable_pcie_error_reporting(pci_dev); +}; + +/* NIC VPD information + * Called during probe to display the part number of the + * installed NIC. VPD is potentially very large but this should + * always appear within the first 512 bytes. + */ +#define SFC_VPD_LEN 512 +static void efx_probe_vpd_strings(struct efx_nic *efx) +{ + struct pci_dev *dev = efx->pci_dev; + char vpd_data[SFC_VPD_LEN]; + ssize_t vpd_size; + int ro_start, ro_size, i, j; + + /* Get the vpd data from the device */ + vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data); + if (vpd_size <= 0) { + netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n"); + return; + } + + /* Get the Read only section */ + ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA); + if (ro_start < 0) { + netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n"); + return; + } + + ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); + j = ro_size; + i = ro_start + PCI_VPD_LRDT_TAG_SIZE; + if (i + j > vpd_size) + j = vpd_size - i; + + /* Get the Part number */ + i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN"); + if (i < 0) { + netif_err(efx, drv, efx->net_dev, "Part number not found\n"); + return; + } + + j = pci_vpd_info_field_size(&vpd_data[i]); + i += PCI_VPD_INFO_FLD_HDR_SIZE; + if (i + j > vpd_size) { + netif_err(efx, drv, efx->net_dev, "Incomplete part number\n"); + return; + } + + netif_info(efx, drv, efx->net_dev, + "Part Number : %.*s\n", j, &vpd_data[i]); + + i = ro_start + PCI_VPD_LRDT_TAG_SIZE; + j = ro_size; + i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN"); + if (i < 0) { + netif_err(efx, drv, efx->net_dev, "Serial number not found\n"); + return; + } + + j = pci_vpd_info_field_size(&vpd_data[i]); + i += PCI_VPD_INFO_FLD_HDR_SIZE; + if (i + j > vpd_size) { + netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n"); + return; + } + + efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL); + if (!efx->vpd_sn) + return; + + snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]); +} + + +/* Main body of NIC initialisation + * This is called at module load (or hotplug insertion, theoretically). + */ +static int efx_pci_probe_main(struct efx_nic *efx) +{ + int rc; + + /* Do start-of-day initialisation */ + rc = efx_probe_all(efx); + if (rc) + goto fail1; + + efx_init_napi(efx); + + rc = efx->type->init(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to initialise NIC\n"); + goto fail3; + } + + rc = efx_init_port(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to initialise port\n"); + goto fail4; + } + + rc = efx_nic_init_interrupt(efx); + if (rc) + goto fail5; + rc = efx_enable_interrupts(efx); + if (rc) + goto fail6; + + return 0; + + fail6: + efx_nic_fini_interrupt(efx); + fail5: + efx_fini_port(efx); + fail4: + efx->type->fini(efx); + fail3: + efx_fini_napi(efx); + efx_remove_all(efx); + fail1: + return rc; +} + +/* NIC initialisation + * + * This is called at module load (or hotplug insertion, + * theoretically). It sets up PCI mappings, resets the NIC, + * sets up and registers the network devices with the kernel and hooks + * the interrupt service routine. It does not prepare the device for + * transmission; this is left to the first time one of the network + * interfaces is brought up (i.e. efx_net_open). + */ +static int efx_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *entry) +{ + struct net_device *net_dev; + struct efx_nic *efx; + int rc; + + /* Allocate and initialise a struct net_device and struct efx_nic */ + net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, + EFX_MAX_RX_QUEUES); + if (!net_dev) + return -ENOMEM; + efx = netdev_priv(net_dev); + efx->type = (const struct efx_nic_type *) entry->driver_data; + net_dev->features |= (efx->type->offload_features | NETIF_F_SG | + NETIF_F_HIGHDMA | NETIF_F_TSO | + NETIF_F_RXCSUM); + if (efx->type->offload_features & NETIF_F_V6_CSUM) + net_dev->features |= NETIF_F_TSO6; + /* Mask for features that also apply to VLAN devices */ + net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | + NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | + NETIF_F_RXCSUM); + /* All offloads can be toggled */ + net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; + pci_set_drvdata(pci_dev, efx); + SET_NETDEV_DEV(net_dev, &pci_dev->dev); + rc = efx_init_struct(efx, pci_dev, net_dev); + if (rc) + goto fail1; + + netif_info(efx, probe, efx->net_dev, + "Solarflare NIC detected\n"); + + efx_probe_vpd_strings(efx); + + /* Set up basic I/O (BAR mappings etc) */ + rc = efx_init_io(efx); + if (rc) + goto fail2; + + rc = efx_pci_probe_main(efx); + if (rc) + goto fail3; + + rc = efx_register_netdev(efx); + if (rc) + goto fail4; + + rc = efx->type->sriov_init(efx); + if (rc) + netif_err(efx, probe, efx->net_dev, + "SR-IOV can't be enabled rc %d\n", rc); + + netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); + + /* Try to create MTDs, but allow this to fail */ + rtnl_lock(); + rc = efx_mtd_probe(efx); + rtnl_unlock(); + if (rc) + netif_warn(efx, probe, efx->net_dev, + "failed to create MTDs (%d)\n", rc); + + rc = pci_enable_pcie_error_reporting(pci_dev); + if (rc && rc != -EINVAL) + netif_warn(efx, probe, efx->net_dev, + "pci_enable_pcie_error_reporting failed (%d)\n", rc); + + return 0; + + fail4: + efx_pci_remove_main(efx); + fail3: + efx_fini_io(efx); + fail2: + efx_fini_struct(efx); + fail1: + WARN_ON(rc > 0); + netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); + free_netdev(net_dev); + return rc; +} + +static int efx_pm_freeze(struct device *dev) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + + rtnl_lock(); + + if (efx->state != STATE_DISABLED) { + efx->state = STATE_UNINIT; + + efx_device_detach_sync(efx); + + efx_stop_all(efx); + efx_disable_interrupts(efx); + } + + rtnl_unlock(); + + return 0; +} + +static int efx_pm_thaw(struct device *dev) +{ + int rc; + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + + rtnl_lock(); + + if (efx->state != STATE_DISABLED) { + rc = efx_enable_interrupts(efx); + if (rc) + goto fail; + + mutex_lock(&efx->mac_lock); + efx->phy_op->reconfigure(efx); + mutex_unlock(&efx->mac_lock); + + efx_start_all(efx); + + netif_device_attach(efx->net_dev); + + efx->state = STATE_READY; + + efx->type->resume_wol(efx); + } + + rtnl_unlock(); + + /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ + queue_work(reset_workqueue, &efx->reset_work); + + return 0; + +fail: + rtnl_unlock(); + + return rc; +} + +static int efx_pm_poweroff(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct efx_nic *efx = pci_get_drvdata(pci_dev); + + efx->type->fini(efx); + + efx->reset_pending = 0; + + pci_save_state(pci_dev); + return pci_set_power_state(pci_dev, PCI_D3hot); +} + +/* Used for both resume and restore */ +static int efx_pm_resume(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct efx_nic *efx = pci_get_drvdata(pci_dev); + int rc; + + rc = pci_set_power_state(pci_dev, PCI_D0); + if (rc) + return rc; + pci_restore_state(pci_dev); + rc = pci_enable_device(pci_dev); + if (rc) + return rc; + pci_set_master(efx->pci_dev); + rc = efx->type->reset(efx, RESET_TYPE_ALL); + if (rc) + return rc; + rc = efx->type->init(efx); + if (rc) + return rc; + rc = efx_pm_thaw(dev); + return rc; +} + +static int efx_pm_suspend(struct device *dev) +{ + int rc; + + efx_pm_freeze(dev); + rc = efx_pm_poweroff(dev); + if (rc) + efx_pm_resume(dev); + return rc; +} + +static const struct dev_pm_ops efx_pm_ops = { + .suspend = efx_pm_suspend, + .resume = efx_pm_resume, + .freeze = efx_pm_freeze, + .thaw = efx_pm_thaw, + .poweroff = efx_pm_poweroff, + .restore = efx_pm_resume, +}; + +/* A PCI error affecting this device was detected. + * At this point MMIO and DMA may be disabled. + * Stop the software path and request a slot reset. + */ +static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, + enum pci_channel_state state) +{ + pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; + struct efx_nic *efx = pci_get_drvdata(pdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + rtnl_lock(); + + if (efx->state != STATE_DISABLED) { + efx->state = STATE_RECOVERY; + efx->reset_pending = 0; + + efx_device_detach_sync(efx); + + efx_stop_all(efx); + efx_disable_interrupts(efx); + + status = PCI_ERS_RESULT_NEED_RESET; + } else { + /* If the interface is disabled we don't want to do anything + * with it. + */ + status = PCI_ERS_RESULT_RECOVERED; + } + + rtnl_unlock(); + + pci_disable_device(pdev); + + return status; +} + +/* Fake a successful reset, which will be performed later in efx_io_resume. */ +static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev) +{ + struct efx_nic *efx = pci_get_drvdata(pdev); + pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; + int rc; + + if (pci_enable_device(pdev)) { + netif_err(efx, hw, efx->net_dev, + "Cannot re-enable PCI device after reset.\n"); + status = PCI_ERS_RESULT_DISCONNECT; + } + + rc = pci_cleanup_aer_uncorrect_error_status(pdev); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc); + /* Non-fatal error. Continue. */ + } + + return status; +} + +/* Perform the actual reset and resume I/O operations. */ +static void efx_io_resume(struct pci_dev *pdev) +{ + struct efx_nic *efx = pci_get_drvdata(pdev); + int rc; + + rtnl_lock(); + + if (efx->state == STATE_DISABLED) + goto out; + + rc = efx_reset(efx, RESET_TYPE_ALL); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "efx_reset failed after PCI error (%d)\n", rc); + } else { + efx->state = STATE_READY; + netif_dbg(efx, hw, efx->net_dev, + "Done resetting and resuming IO after PCI error.\n"); + } + +out: + rtnl_unlock(); +} + +/* For simplicity and reliability, we always require a slot reset and try to + * reset the hardware when a pci error affecting the device is detected. + * We leave both the link_reset and mmio_enabled callback unimplemented: + * with our request for slot reset the mmio_enabled callback will never be + * called, and the link_reset callback is not used by AER or EEH mechanisms. + */ +static struct pci_error_handlers efx_err_handlers = { + .error_detected = efx_io_error_detected, + .slot_reset = efx_io_slot_reset, + .resume = efx_io_resume, +}; + +static struct pci_driver efx_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = efx_pci_table, + .probe = efx_pci_probe, + .remove = efx_pci_remove, + .driver.pm = &efx_pm_ops, + .err_handler = &efx_err_handlers, +}; + +/************************************************************************** + * + * Kernel module interface + * + *************************************************************************/ + +module_param(interrupt_mode, uint, 0444); +MODULE_PARM_DESC(interrupt_mode, + "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); + +static int __init efx_init_module(void) +{ + int rc; + + printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n"); + + rc = register_netdevice_notifier(&efx_netdev_notifier); + if (rc) + goto err_notifier; + + rc = efx_init_sriov(); + if (rc) + goto err_sriov; + + reset_workqueue = create_singlethread_workqueue("sfc_reset"); + if (!reset_workqueue) { + rc = -ENOMEM; + goto err_reset; + } + + rc = pci_register_driver(&efx_pci_driver); + if (rc < 0) + goto err_pci; + + return 0; + + err_pci: + destroy_workqueue(reset_workqueue); + err_reset: + efx_fini_sriov(); + err_sriov: + unregister_netdevice_notifier(&efx_netdev_notifier); + err_notifier: + return rc; +} + +static void __exit efx_exit_module(void) +{ + printk(KERN_INFO "Solarflare NET driver unloading\n"); + + pci_unregister_driver(&efx_pci_driver); + destroy_workqueue(reset_workqueue); + efx_fini_sriov(); + unregister_netdevice_notifier(&efx_netdev_notifier); + +} + +module_init(efx_init_module); +module_exit(efx_exit_module); + +MODULE_AUTHOR("Solarflare Communications and " + "Michael Brown "); +MODULE_DESCRIPTION("Solarflare network driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, efx_pci_table); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h new file mode 100644 index 000000000..2587c582a --- /dev/null +++ b/drivers/net/ethernet/sfc/efx.h @@ -0,0 +1,255 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_EFX_H +#define EFX_EFX_H + +#include "net_driver.h" +#include "filter.h" + +/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */ +#define EFX_MEM_BAR 2 + +/* TX */ +int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); +void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); +void efx_init_tx_queue(struct efx_tx_queue *tx_queue); +void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); +void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); +netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, + struct net_device *net_dev); +netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); +void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); +int efx_setup_tc(struct net_device *net_dev, u8 num_tc); +unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); +extern unsigned int efx_piobuf_size; + +/* RX */ +void efx_rx_config_page_split(struct efx_nic *efx); +int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); +void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); +void efx_init_rx_queue(struct efx_rx_queue *rx_queue); +void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); +void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic); +void efx_rx_slow_fill(unsigned long context); +void __efx_rx_packet(struct efx_channel *channel); +void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, + unsigned int n_frags, unsigned int len, u16 flags); +static inline void efx_rx_flush_packet(struct efx_channel *channel) +{ + if (channel->rx_pkt_n_frags) + __efx_rx_packet(channel); +} +void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); + +#define EFX_MAX_DMAQ_SIZE 4096UL +#define EFX_DEFAULT_DMAQ_SIZE 1024UL +#define EFX_MIN_DMAQ_SIZE 512UL + +#define EFX_MAX_EVQ_SIZE 16384UL +#define EFX_MIN_EVQ_SIZE 512UL + +/* Maximum number of TCP segments we support for soft-TSO */ +#define EFX_TSO_MAX_SEGS 100 + +/* The smallest [rt]xq_entries that the driver supports. RX minimum + * is a bit arbitrary. For TX, we must have space for at least 2 + * TSO skbs. + */ +#define EFX_RXQ_MIN_ENT 128U +#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) + +#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \ + EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) + +/* Filters */ + +/** + * efx_filter_insert_filter - add or replace a filter + * @efx: NIC in which to insert the filter + * @spec: Specification for the filter + * @replace_equal: Flag for whether the specified filter may replace an + * existing filter with equal priority + * + * On success, return the filter ID. + * On failure, return a negative error code. + * + * If existing filters have equal match values to the new filter spec, + * then the new filter might replace them or the function might fail, + * as follows. + * + * 1. If the existing filters have lower priority, or @replace_equal + * is set and they have equal priority, replace them. + * + * 2. If the existing filters have higher priority, return -%EPERM. + * + * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not + * support delivery to multiple recipients, return -%EEXIST. + * + * This implies that filters for multiple multicast recipients must + * all be inserted with the same priority and @replace_equal = %false. + */ +static inline s32 efx_filter_insert_filter(struct efx_nic *efx, + struct efx_filter_spec *spec, + bool replace_equal) +{ + return efx->type->filter_insert(efx, spec, replace_equal); +} + +/** + * efx_filter_remove_id_safe - remove a filter by ID, carefully + * @efx: NIC from which to remove the filter + * @priority: Priority of filter, as passed to @efx_filter_insert_filter + * @filter_id: ID of filter, as returned by @efx_filter_insert_filter + * + * This function will range-check @filter_id, so it is safe to call + * with a value passed from userland. + */ +static inline int efx_filter_remove_id_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id) +{ + return efx->type->filter_remove_safe(efx, priority, filter_id); +} + +/** + * efx_filter_get_filter_safe - retrieve a filter by ID, carefully + * @efx: NIC from which to remove the filter + * @priority: Priority of filter, as passed to @efx_filter_insert_filter + * @filter_id: ID of filter, as returned by @efx_filter_insert_filter + * @spec: Buffer in which to store filter specification + * + * This function will range-check @filter_id, so it is safe to call + * with a value passed from userland. + */ +static inline int +efx_filter_get_filter_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *spec) +{ + return efx->type->filter_get_safe(efx, priority, filter_id, spec); +} + +static inline u32 efx_filter_count_rx_used(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + return efx->type->filter_count_rx_used(efx, priority); +} +static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) +{ + return efx->type->filter_get_rx_id_limit(efx); +} +static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size) +{ + return efx->type->filter_get_rx_ids(efx, priority, buf, size); +} +#ifdef CONFIG_RFS_ACCEL +int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id); +bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota); +static inline void efx_filter_rfs_expire(struct efx_channel *channel) +{ + if (channel->rfs_filters_added >= 60 && + __efx_filter_rfs_expire(channel->efx, 100)) + channel->rfs_filters_added -= 60; +} +#define efx_filter_rfs_enabled() 1 +#else +static inline void efx_filter_rfs_expire(struct efx_channel *channel) {} +#define efx_filter_rfs_enabled() 0 +#endif +bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec); + +/* Channels */ +int efx_channel_dummy_op_int(struct efx_channel *channel); +void efx_channel_dummy_op_void(struct efx_channel *channel); +int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); + +/* Ports */ +int efx_reconfigure_port(struct efx_nic *efx); +int __efx_reconfigure_port(struct efx_nic *efx); + +/* Ethtool support */ +extern const struct ethtool_ops efx_ethtool_ops; + +/* Reset handling */ +int efx_reset(struct efx_nic *efx, enum reset_type method); +void efx_reset_down(struct efx_nic *efx, enum reset_type method); +int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok); +int efx_try_recovery(struct efx_nic *efx); + +/* Global */ +void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); +int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, + unsigned int rx_usecs, bool rx_adaptive, + bool rx_may_override_tx); +void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, + unsigned int *rx_usecs, bool *rx_adaptive); +void efx_stop_eventq(struct efx_channel *channel); +void efx_start_eventq(struct efx_channel *channel); + +/* Dummy PHY ops for PHY drivers */ +int efx_port_dummy_op_int(struct efx_nic *efx); +void efx_port_dummy_op_void(struct efx_nic *efx); + +/* Update the generic software stats in the passed stats array */ +void efx_update_sw_stats(struct efx_nic *efx, u64 *stats); + +/* MTD */ +#ifdef CONFIG_SFC_MTD +int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, + size_t n_parts, size_t sizeof_part); +static inline int efx_mtd_probe(struct efx_nic *efx) +{ + return efx->type->mtd_probe(efx); +} +void efx_mtd_rename(struct efx_nic *efx); +void efx_mtd_remove(struct efx_nic *efx); +#else +static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; } +static inline void efx_mtd_rename(struct efx_nic *efx) {} +static inline void efx_mtd_remove(struct efx_nic *efx) {} +#endif + +static inline void efx_schedule_channel(struct efx_channel *channel) +{ + netif_vdbg(channel->efx, intr, channel->efx->net_dev, + "channel %d scheduling NAPI poll on CPU%d\n", + channel->channel, raw_smp_processor_id()); + + napi_schedule(&channel->napi_str); +} + +static inline void efx_schedule_channel_irq(struct efx_channel *channel) +{ + channel->event_test_cpu = raw_smp_processor_id(); + efx_schedule_channel(channel); +} + +void efx_link_status_changed(struct efx_nic *efx); +void efx_link_set_advertising(struct efx_nic *efx, u32); +void efx_link_set_wanted_fc(struct efx_nic *efx, u8); + +static inline void efx_device_detach_sync(struct efx_nic *efx) +{ + struct net_device *dev = efx->net_dev; + + /* Lock/freeze all TX queues so that we can be sure the + * TX scheduler is stopped when we're done and before + * netif_device_present() becomes false. + */ + netif_tx_lock_bh(dev); + netif_device_detach(dev); + netif_tx_unlock_bh(dev); +} + +#endif /* EFX_EFX_H */ diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h new file mode 100644 index 000000000..d1dbb5fb3 --- /dev/null +++ b/drivers/net/ethernet/sfc/enum.h @@ -0,0 +1,181 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2007-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_ENUM_H +#define EFX_ENUM_H + +/** + * enum efx_loopback_mode - loopback modes + * @LOOPBACK_NONE: no loopback + * @LOOPBACK_DATA: data path loopback + * @LOOPBACK_GMAC: loopback within GMAC + * @LOOPBACK_XGMII: loopback after XMAC + * @LOOPBACK_XGXS: loopback within BPX after XGXS + * @LOOPBACK_XAUI: loopback within BPX before XAUI serdes + * @LOOPBACK_GMII: loopback within BPX after GMAC + * @LOOPBACK_SGMII: loopback within BPX within SGMII + * @LOOPBACK_XGBR: loopback within BPX within XGBR + * @LOOPBACK_XFI: loopback within BPX before XFI serdes + * @LOOPBACK_XAUI_FAR: loopback within BPX after XAUI serdes + * @LOOPBACK_GMII_FAR: loopback within BPX before SGMII + * @LOOPBACK_SGMII_FAR: loopback within BPX after SGMII + * @LOOPBACK_XFI_FAR: loopback after XFI serdes + * @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level + * @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level + * @LOOPBACK_PCS: loopback within 10G PHY at PCS level + * @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level + * @LOOPBACK_XPORT: cross port loopback + * @LOOPBACK_XGMII_WS: wireside loopback excluding XMAC + * @LOOPBACK_XAUI_WS: wireside loopback within BPX within XAUI serdes + * @LOOPBACK_XAUI_WS_FAR: wireside loopback within BPX including XAUI serdes + * @LOOPBACK_XAUI_WS_NEAR: wireside loopback within BPX excluding XAUI serdes + * @LOOPBACK_GMII_WS: wireside loopback excluding GMAC + * @LOOPBACK_XFI_WS: wireside loopback excluding XFI serdes + * @LOOPBACK_XFI_WS_FAR: wireside loopback including XFI serdes + * @LOOPBACK_PHYXS_WS: wireside loopback within 10G PHY at PHYXS level + */ +/* Please keep up-to-date w.r.t the following two #defines */ +enum efx_loopback_mode { + LOOPBACK_NONE = 0, + LOOPBACK_DATA = 1, + LOOPBACK_GMAC = 2, + LOOPBACK_XGMII = 3, + LOOPBACK_XGXS = 4, + LOOPBACK_XAUI = 5, + LOOPBACK_GMII = 6, + LOOPBACK_SGMII = 7, + LOOPBACK_XGBR = 8, + LOOPBACK_XFI = 9, + LOOPBACK_XAUI_FAR = 10, + LOOPBACK_GMII_FAR = 11, + LOOPBACK_SGMII_FAR = 12, + LOOPBACK_XFI_FAR = 13, + LOOPBACK_GPHY = 14, + LOOPBACK_PHYXS = 15, + LOOPBACK_PCS = 16, + LOOPBACK_PMAPMD = 17, + LOOPBACK_XPORT = 18, + LOOPBACK_XGMII_WS = 19, + LOOPBACK_XAUI_WS = 20, + LOOPBACK_XAUI_WS_FAR = 21, + LOOPBACK_XAUI_WS_NEAR = 22, + LOOPBACK_GMII_WS = 23, + LOOPBACK_XFI_WS = 24, + LOOPBACK_XFI_WS_FAR = 25, + LOOPBACK_PHYXS_WS = 26, + LOOPBACK_MAX +}; +#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD + +/* These loopbacks occur within the controller */ +#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_DATA) | \ + (1 << LOOPBACK_GMAC) | \ + (1 << LOOPBACK_XGMII)| \ + (1 << LOOPBACK_XGXS) | \ + (1 << LOOPBACK_XAUI) | \ + (1 << LOOPBACK_GMII) | \ + (1 << LOOPBACK_SGMII) | \ + (1 << LOOPBACK_SGMII) | \ + (1 << LOOPBACK_XGBR) | \ + (1 << LOOPBACK_XFI) | \ + (1 << LOOPBACK_XAUI_FAR) | \ + (1 << LOOPBACK_GMII_FAR) | \ + (1 << LOOPBACK_SGMII_FAR) | \ + (1 << LOOPBACK_XFI_FAR) | \ + (1 << LOOPBACK_XGMII_WS) | \ + (1 << LOOPBACK_XAUI_WS) | \ + (1 << LOOPBACK_XAUI_WS_FAR) | \ + (1 << LOOPBACK_XAUI_WS_NEAR) | \ + (1 << LOOPBACK_GMII_WS) | \ + (1 << LOOPBACK_XFI_WS) | \ + (1 << LOOPBACK_XFI_WS_FAR)) + +#define LOOPBACKS_WS ((1 << LOOPBACK_XGMII_WS) | \ + (1 << LOOPBACK_XAUI_WS) | \ + (1 << LOOPBACK_XAUI_WS_FAR) | \ + (1 << LOOPBACK_XAUI_WS_NEAR) | \ + (1 << LOOPBACK_GMII_WS) | \ + (1 << LOOPBACK_XFI_WS) | \ + (1 << LOOPBACK_XFI_WS_FAR) | \ + (1 << LOOPBACK_PHYXS_WS)) + +#define LOOPBACKS_EXTERNAL(_efx) \ + ((_efx)->loopback_modes & ~LOOPBACKS_INTERNAL & \ + ~(1 << LOOPBACK_NONE)) + +#define LOOPBACK_MASK(_efx) \ + (1 << (_efx)->loopback_mode) + +#define LOOPBACK_INTERNAL(_efx) \ + (!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx))) + +#define LOOPBACK_EXTERNAL(_efx) \ + (!!(LOOPBACK_MASK(_efx) & LOOPBACKS_EXTERNAL(_efx))) + +#define LOOPBACK_CHANGED(_from, _to, _mask) \ + (!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask))) + +#define LOOPBACK_OUT_OF(_from, _to, _mask) \ + ((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask))) + +/*****************************************************************************/ + +/** + * enum reset_type - reset types + * + * %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and + * %RESET_TYPE_DISABLE specify the method/scope of the reset. The + * other valuesspecify reasons, which efx_schedule_reset() will choose + * a method for. + * + * Reset methods are numbered in order of increasing scope. + * + * @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only) + * @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL + * if unsuccessful. + * @RESET_TYPE_ALL: Reset datapath, MAC and PHY + * @RESET_TYPE_WORLD: Reset as much as possible + * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if + * unsuccessful. + * @RESET_TYPE_MC_BIST: MC entering BIST mode. + * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled + * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog + * @RESET_TYPE_INT_ERROR: reset due to internal error + * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors + * @RESET_TYPE_DMA_ERROR: DMA error + * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors + * @RESET_TYPE_MC_FAILURE: MC reboot/assertion + * @RESET_TYPE_MCDI_TIMEOUT: MCDI timeout. + */ +enum reset_type { + RESET_TYPE_INVISIBLE, + RESET_TYPE_RECOVER_OR_ALL, + RESET_TYPE_ALL, + RESET_TYPE_WORLD, + RESET_TYPE_RECOVER_OR_DISABLE, + RESET_TYPE_MC_BIST, + RESET_TYPE_DISABLE, + RESET_TYPE_MAX_METHOD, + RESET_TYPE_TX_WATCHDOG, + RESET_TYPE_INT_ERROR, + RESET_TYPE_RX_RECOVERY, + RESET_TYPE_DMA_ERROR, + RESET_TYPE_TX_SKIP, + RESET_TYPE_MC_FAILURE, + /* RESET_TYPE_MCDI_TIMEOUT is actually a method, not just a reason, but + * it doesn't fit the scope hierarchy (not well-ordered by inclusion). + * We encode this by having its enum value be greater than + * RESET_TYPE_MAX_METHOD. This also prevents issuing it with + * efx_ioctl_reset. + */ + RESET_TYPE_MCDI_TIMEOUT, + RESET_TYPE_MAX, +}; + +#endif /* EFX_ENUM_H */ diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c new file mode 100644 index 000000000..4835bc0d0 --- /dev/null +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -0,0 +1,1196 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include "net_driver.h" +#include "workarounds.h" +#include "selftest.h" +#include "efx.h" +#include "filter.h" +#include "nic.h" + +struct efx_sw_stat_desc { + const char *name; + enum { + EFX_ETHTOOL_STAT_SOURCE_nic, + EFX_ETHTOOL_STAT_SOURCE_channel, + EFX_ETHTOOL_STAT_SOURCE_tx_queue + } source; + unsigned offset; + u64(*get_stat) (void *field); /* Reader function */ +}; + +/* Initialiser for a struct efx_sw_stat_desc with type-checking */ +#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \ + get_stat_function) { \ + .name = #stat_name, \ + .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \ + .offset = ((((field_type *) 0) == \ + &((struct efx_##source_name *)0)->field) ? \ + offsetof(struct efx_##source_name, field) : \ + offsetof(struct efx_##source_name, field)), \ + .get_stat = get_stat_function, \ +} + +static u64 efx_get_uint_stat(void *field) +{ + return *(unsigned int *)field; +} + +static u64 efx_get_atomic_stat(void *field) +{ + return atomic_read((atomic_t *) field); +} + +#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \ + EFX_ETHTOOL_STAT(field, nic, field, \ + atomic_t, efx_get_atomic_stat) + +#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \ + EFX_ETHTOOL_STAT(field, channel, n_##field, \ + unsigned int, efx_get_uint_stat) + +#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \ + EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \ + unsigned int, efx_get_uint_stat) + +static const struct efx_sw_stat_desc efx_sw_stat_desc[] = { + EFX_ETHTOOL_UINT_TXQ_STAT(merge_events), + EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts), + EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers), + EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets), + EFX_ETHTOOL_UINT_TXQ_STAT(pushes), + EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets), + EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events), + EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets), +}; + +#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc) + +#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB + +/************************************************************************** + * + * Ethtool operations + * + ************************************************************************** + */ + +/* Identify device by flashing LEDs */ +static int efx_ethtool_phys_id(struct net_device *net_dev, + enum ethtool_phys_id_state state) +{ + struct efx_nic *efx = netdev_priv(net_dev); + enum efx_led_mode mode = EFX_LED_DEFAULT; + + switch (state) { + case ETHTOOL_ID_ON: + mode = EFX_LED_ON; + break; + case ETHTOOL_ID_OFF: + mode = EFX_LED_OFF; + break; + case ETHTOOL_ID_INACTIVE: + mode = EFX_LED_DEFAULT; + break; + case ETHTOOL_ID_ACTIVE: + return 1; /* cycle on/off once per second */ + } + + efx->type->set_id_led(efx, mode); + return 0; +} + +/* This must be called with rtnl_lock held. */ +static int efx_ethtool_get_settings(struct net_device *net_dev, + struct ethtool_cmd *ecmd) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_link_state *link_state = &efx->link_state; + + mutex_lock(&efx->mac_lock); + efx->phy_op->get_settings(efx, ecmd); + mutex_unlock(&efx->mac_lock); + + /* Both MACs support pause frames (bidirectional and respond-only) */ + ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + + if (LOOPBACK_INTERNAL(efx)) { + ethtool_cmd_speed_set(ecmd, link_state->speed); + ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; + } + + return 0; +} + +/* This must be called with rtnl_lock held. */ +static int efx_ethtool_set_settings(struct net_device *net_dev, + struct ethtool_cmd *ecmd) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + /* GMAC does not support 1000Mbps HD */ + if ((ethtool_cmd_speed(ecmd) == SPEED_1000) && + (ecmd->duplex != DUPLEX_FULL)) { + netif_dbg(efx, drv, efx->net_dev, + "rejecting unsupported 1000Mbps HD setting\n"); + return -EINVAL; + } + + mutex_lock(&efx->mac_lock); + rc = efx->phy_op->set_settings(efx, ecmd); + mutex_unlock(&efx->mac_lock); + return rc; +} + +static void efx_ethtool_get_drvinfo(struct net_device *net_dev, + struct ethtool_drvinfo *info) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); + if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) + efx_mcdi_print_fwver(efx, info->fw_version, + sizeof(info->fw_version)); + strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); +} + +static int efx_ethtool_get_regs_len(struct net_device *net_dev) +{ + return efx_nic_get_regs_len(netdev_priv(net_dev)); +} + +static void efx_ethtool_get_regs(struct net_device *net_dev, + struct ethtool_regs *regs, void *buf) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + regs->version = efx->type->revision; + efx_nic_get_regs(efx, buf); +} + +static u32 efx_ethtool_get_msglevel(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + return efx->msg_enable; +} + +static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable) +{ + struct efx_nic *efx = netdev_priv(net_dev); + efx->msg_enable = msg_enable; +} + +/** + * efx_fill_test - fill in an individual self-test entry + * @test_index: Index of the test + * @strings: Ethtool strings, or %NULL + * @data: Ethtool test results, or %NULL + * @test: Pointer to test result (used only if data != %NULL) + * @unit_format: Unit name format (e.g. "chan\%d") + * @unit_id: Unit id (e.g. 0 for "chan0") + * @test_format: Test name format (e.g. "loopback.\%s.tx.sent") + * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent") + * + * Fill in an individual self-test entry. + */ +static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data, + int *test, const char *unit_format, int unit_id, + const char *test_format, const char *test_id) +{ + char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN]; + + /* Fill data value, if applicable */ + if (data) + data[test_index] = *test; + + /* Fill string, if applicable */ + if (strings) { + if (strchr(unit_format, '%')) + snprintf(unit_str, sizeof(unit_str), + unit_format, unit_id); + else + strcpy(unit_str, unit_format); + snprintf(test_str, sizeof(test_str), test_format, test_id); + snprintf(strings + test_index * ETH_GSTRING_LEN, + ETH_GSTRING_LEN, + "%-6s %-24s", unit_str, test_str); + } +} + +#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel +#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue +#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue +#define EFX_LOOPBACK_NAME(_mode, _counter) \ + "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode) + +/** + * efx_fill_loopback_test - fill in a block of loopback self-test entries + * @efx: Efx NIC + * @lb_tests: Efx loopback self-test results structure + * @mode: Loopback test mode + * @test_index: Starting index of the test + * @strings: Ethtool strings, or %NULL + * @data: Ethtool test results, or %NULL + * + * Fill in a block of loopback self-test entries. Return new test + * index. + */ +static int efx_fill_loopback_test(struct efx_nic *efx, + struct efx_loopback_self_tests *lb_tests, + enum efx_loopback_mode mode, + unsigned int test_index, + u8 *strings, u64 *data) +{ + struct efx_channel *channel = + efx_get_channel(efx, efx->tx_channel_offset); + struct efx_tx_queue *tx_queue; + + efx_for_each_channel_tx_queue(tx_queue, channel) { + efx_fill_test(test_index++, strings, data, + &lb_tests->tx_sent[tx_queue->queue], + EFX_TX_QUEUE_NAME(tx_queue), + EFX_LOOPBACK_NAME(mode, "tx_sent")); + efx_fill_test(test_index++, strings, data, + &lb_tests->tx_done[tx_queue->queue], + EFX_TX_QUEUE_NAME(tx_queue), + EFX_LOOPBACK_NAME(mode, "tx_done")); + } + efx_fill_test(test_index++, strings, data, + &lb_tests->rx_good, + "rx", 0, + EFX_LOOPBACK_NAME(mode, "rx_good")); + efx_fill_test(test_index++, strings, data, + &lb_tests->rx_bad, + "rx", 0, + EFX_LOOPBACK_NAME(mode, "rx_bad")); + + return test_index; +} + +/** + * efx_ethtool_fill_self_tests - get self-test details + * @efx: Efx NIC + * @tests: Efx self-test results structure, or %NULL + * @strings: Ethtool strings, or %NULL + * @data: Ethtool test results, or %NULL + * + * Get self-test number of strings, strings, and/or test results. + * Return number of strings (== number of test results). + * + * The reason for merging these three functions is to make sure that + * they can never be inconsistent. + */ +static int efx_ethtool_fill_self_tests(struct efx_nic *efx, + struct efx_self_tests *tests, + u8 *strings, u64 *data) +{ + struct efx_channel *channel; + unsigned int n = 0, i; + enum efx_loopback_mode mode; + + efx_fill_test(n++, strings, data, &tests->phy_alive, + "phy", 0, "alive", NULL); + efx_fill_test(n++, strings, data, &tests->nvram, + "core", 0, "nvram", NULL); + efx_fill_test(n++, strings, data, &tests->interrupt, + "core", 0, "interrupt", NULL); + + /* Event queues */ + efx_for_each_channel(channel, efx) { + efx_fill_test(n++, strings, data, + &tests->eventq_dma[channel->channel], + EFX_CHANNEL_NAME(channel), + "eventq.dma", NULL); + efx_fill_test(n++, strings, data, + &tests->eventq_int[channel->channel], + EFX_CHANNEL_NAME(channel), + "eventq.int", NULL); + } + + efx_fill_test(n++, strings, data, &tests->memory, + "core", 0, "memory", NULL); + efx_fill_test(n++, strings, data, &tests->registers, + "core", 0, "registers", NULL); + + if (efx->phy_op->run_tests != NULL) { + EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL); + + for (i = 0; true; ++i) { + const char *name; + + EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS); + name = efx->phy_op->test_name(efx, i); + if (name == NULL) + break; + + efx_fill_test(n++, strings, data, &tests->phy_ext[i], + "phy", 0, name, NULL); + } + } + + /* Loopback tests */ + for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { + if (!(efx->loopback_modes & (1 << mode))) + continue; + n = efx_fill_loopback_test(efx, + &tests->loopback[mode], mode, n, + strings, data); + } + + return n; +} + +static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings) +{ + size_t n_stats = 0; + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) { + if (efx_channel_has_tx_queues(channel)) { + n_stats++; + if (strings != NULL) { + snprintf(strings, ETH_GSTRING_LEN, + "tx-%u.tx_packets", + channel->tx_queue[0].queue / + EFX_TXQ_TYPES); + + strings += ETH_GSTRING_LEN; + } + } + } + efx_for_each_channel(channel, efx) { + if (efx_channel_has_rx_queue(channel)) { + n_stats++; + if (strings != NULL) { + snprintf(strings, ETH_GSTRING_LEN, + "rx-%d.rx_packets", channel->channel); + strings += ETH_GSTRING_LEN; + } + } + } + return n_stats; +} + +static int efx_ethtool_get_sset_count(struct net_device *net_dev, + int string_set) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + switch (string_set) { + case ETH_SS_STATS: + return efx->type->describe_stats(efx, NULL) + + EFX_ETHTOOL_SW_STAT_COUNT + + efx_describe_per_queue_stats(efx, NULL) + + efx_ptp_describe_stats(efx, NULL); + case ETH_SS_TEST: + return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL); + default: + return -EINVAL; + } +} + +static void efx_ethtool_get_strings(struct net_device *net_dev, + u32 string_set, u8 *strings) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int i; + + switch (string_set) { + case ETH_SS_STATS: + strings += (efx->type->describe_stats(efx, strings) * + ETH_GSTRING_LEN); + for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) + strlcpy(strings + i * ETH_GSTRING_LEN, + efx_sw_stat_desc[i].name, ETH_GSTRING_LEN); + strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN; + strings += (efx_describe_per_queue_stats(efx, strings) * + ETH_GSTRING_LEN); + efx_ptp_describe_stats(efx, strings); + break; + case ETH_SS_TEST: + efx_ethtool_fill_self_tests(efx, NULL, strings, NULL); + break; + default: + /* No other string sets */ + break; + } +} + +static void efx_ethtool_get_stats(struct net_device *net_dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + const struct efx_sw_stat_desc *stat; + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + int i; + + spin_lock_bh(&efx->stats_lock); + + /* Get NIC statistics */ + data += efx->type->update_stats(efx, data, NULL); + + /* Get software statistics */ + for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) { + stat = &efx_sw_stat_desc[i]; + switch (stat->source) { + case EFX_ETHTOOL_STAT_SOURCE_nic: + data[i] = stat->get_stat((void *)efx + stat->offset); + break; + case EFX_ETHTOOL_STAT_SOURCE_channel: + data[i] = 0; + efx_for_each_channel(channel, efx) + data[i] += stat->get_stat((void *)channel + + stat->offset); + break; + case EFX_ETHTOOL_STAT_SOURCE_tx_queue: + data[i] = 0; + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) + data[i] += + stat->get_stat((void *)tx_queue + + stat->offset); + } + break; + } + } + data += EFX_ETHTOOL_SW_STAT_COUNT; + + spin_unlock_bh(&efx->stats_lock); + + efx_for_each_channel(channel, efx) { + if (efx_channel_has_tx_queues(channel)) { + *data = 0; + efx_for_each_channel_tx_queue(tx_queue, channel) { + *data += tx_queue->tx_packets; + } + data++; + } + } + efx_for_each_channel(channel, efx) { + if (efx_channel_has_rx_queue(channel)) { + *data = 0; + efx_for_each_channel_rx_queue(rx_queue, channel) { + *data += rx_queue->rx_packets; + } + data++; + } + } + + efx_ptp_update_stats(efx, data); +} + +static void efx_ethtool_self_test(struct net_device *net_dev, + struct ethtool_test *test, u64 *data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_self_tests *efx_tests; + bool already_up; + int rc = -ENOMEM; + + efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); + if (!efx_tests) + goto fail; + + if (efx->state != STATE_READY) { + rc = -EBUSY; + goto out; + } + + netif_info(efx, drv, efx->net_dev, "starting %sline testing\n", + (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); + + /* We need rx buffers and interrupts. */ + already_up = (efx->net_dev->flags & IFF_UP); + if (!already_up) { + rc = dev_open(efx->net_dev); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed opening device.\n"); + goto out; + } + } + + rc = efx_selftest(efx, efx_tests, test->flags); + + if (!already_up) + dev_close(efx->net_dev); + + netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n", + rc == 0 ? "passed" : "failed", + (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); + +out: + efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); + kfree(efx_tests); +fail: + if (rc) + test->flags |= ETH_TEST_FL_FAILED; +} + +/* Restart autonegotiation */ +static int efx_ethtool_nway_reset(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + return mdio45_nway_restart(&efx->mdio); +} + +/* + * Each channel has a single IRQ and moderation timer, started by any + * completion (or other event). Unless the module parameter + * separate_tx_channels is set, IRQs and moderation are therefore + * shared between RX and TX completions. In this case, when RX IRQ + * moderation is explicitly changed then TX IRQ moderation is + * automatically changed too, but otherwise we fail if the two values + * are requested to be different. + * + * The hardware does not support a limit on the number of completions + * before an IRQ, so we do not use the max_frames fields. We should + * report and require that max_frames == (usecs != 0), but this would + * invalidate existing user documentation. + * + * The hardware does not have distinct settings for interrupt + * moderation while the previous IRQ is being handled, so we should + * not use the 'irq' fields. However, an earlier developer + * misunderstood the meaning of the 'irq' fields and the driver did + * not support the standard fields. To avoid invalidating existing + * user documentation, we report and accept changes through either the + * standard or 'irq' fields. If both are changed at the same time, we + * prefer the standard field. + * + * We implement adaptive IRQ moderation, but use a different algorithm + * from that assumed in the definition of struct ethtool_coalesce. + * Therefore we do not use any of the adaptive moderation parameters + * in it. + */ + +static int efx_ethtool_get_coalesce(struct net_device *net_dev, + struct ethtool_coalesce *coalesce) +{ + struct efx_nic *efx = netdev_priv(net_dev); + unsigned int tx_usecs, rx_usecs; + bool rx_adaptive; + + efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive); + + coalesce->tx_coalesce_usecs = tx_usecs; + coalesce->tx_coalesce_usecs_irq = tx_usecs; + coalesce->rx_coalesce_usecs = rx_usecs; + coalesce->rx_coalesce_usecs_irq = rx_usecs; + coalesce->use_adaptive_rx_coalesce = rx_adaptive; + + return 0; +} + +static int efx_ethtool_set_coalesce(struct net_device *net_dev, + struct ethtool_coalesce *coalesce) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_channel *channel; + unsigned int tx_usecs, rx_usecs; + bool adaptive, rx_may_override_tx; + int rc; + + if (coalesce->use_adaptive_tx_coalesce) + return -EINVAL; + + efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive); + + if (coalesce->rx_coalesce_usecs != rx_usecs) + rx_usecs = coalesce->rx_coalesce_usecs; + else + rx_usecs = coalesce->rx_coalesce_usecs_irq; + + adaptive = coalesce->use_adaptive_rx_coalesce; + + /* If channels are shared, TX IRQ moderation can be quietly + * overridden unless it is changed from its old value. + */ + rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs && + coalesce->tx_coalesce_usecs_irq == tx_usecs); + if (coalesce->tx_coalesce_usecs != tx_usecs) + tx_usecs = coalesce->tx_coalesce_usecs; + else + tx_usecs = coalesce->tx_coalesce_usecs_irq; + + rc = efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive, + rx_may_override_tx); + if (rc != 0) + return rc; + + efx_for_each_channel(channel, efx) + efx->type->push_irq_moderation(channel); + + return 0; +} + +static void efx_ethtool_get_ringparam(struct net_device *net_dev, + struct ethtool_ringparam *ring) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + ring->rx_max_pending = EFX_MAX_DMAQ_SIZE; + ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx); + ring->rx_pending = efx->rxq_entries; + ring->tx_pending = efx->txq_entries; +} + +static int efx_ethtool_set_ringparam(struct net_device *net_dev, + struct ethtool_ringparam *ring) +{ + struct efx_nic *efx = netdev_priv(net_dev); + u32 txq_entries; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending || + ring->rx_pending > EFX_MAX_DMAQ_SIZE || + ring->tx_pending > EFX_TXQ_MAX_ENT(efx)) + return -EINVAL; + + if (ring->rx_pending < EFX_RXQ_MIN_ENT) { + netif_err(efx, drv, efx->net_dev, + "RX queues cannot be smaller than %u\n", + EFX_RXQ_MIN_ENT); + return -EINVAL; + } + + txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx)); + if (txq_entries != ring->tx_pending) + netif_warn(efx, drv, efx->net_dev, + "increasing TX queue size to minimum of %u\n", + txq_entries); + + return efx_realloc_channels(efx, ring->rx_pending, txq_entries); +} + +static int efx_ethtool_set_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause) +{ + struct efx_nic *efx = netdev_priv(net_dev); + u8 wanted_fc, old_fc; + u32 old_adv; + int rc = 0; + + mutex_lock(&efx->mac_lock); + + wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) | + (pause->tx_pause ? EFX_FC_TX : 0) | + (pause->autoneg ? EFX_FC_AUTO : 0)); + + if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) { + netif_dbg(efx, drv, efx->net_dev, + "Flow control unsupported: tx ON rx OFF\n"); + rc = -EINVAL; + goto out; + } + + if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) { + netif_dbg(efx, drv, efx->net_dev, + "Autonegotiation is disabled\n"); + rc = -EINVAL; + goto out; + } + + /* Hook for Falcon bug 11482 workaround */ + if (efx->type->prepare_enable_fc_tx && + (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX)) + efx->type->prepare_enable_fc_tx(efx); + + old_adv = efx->link_advertising; + old_fc = efx->wanted_fc; + efx_link_set_wanted_fc(efx, wanted_fc); + if (efx->link_advertising != old_adv || + (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { + rc = efx->phy_op->reconfigure(efx); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "Unable to advertise requested flow " + "control setting\n"); + goto out; + } + } + + /* Reconfigure the MAC. The PHY *may* generate a link state change event + * if the user just changed the advertised capabilities, but there's no + * harm doing this twice */ + efx->type->reconfigure_mac(efx); + +out: + mutex_unlock(&efx->mac_lock); + + return rc; +} + +static void efx_ethtool_get_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX); + pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX); + pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO); +} + +static void efx_ethtool_get_wol(struct net_device *net_dev, + struct ethtool_wolinfo *wol) +{ + struct efx_nic *efx = netdev_priv(net_dev); + return efx->type->get_wol(efx, wol); +} + + +static int efx_ethtool_set_wol(struct net_device *net_dev, + struct ethtool_wolinfo *wol) +{ + struct efx_nic *efx = netdev_priv(net_dev); + return efx->type->set_wol(efx, wol->wolopts); +} + +static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + rc = efx->type->map_reset_flags(flags); + if (rc < 0) + return rc; + + return efx_reset(efx, rc); +} + +/* MAC address mask including only I/G bit */ +static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0}; + +#define IP4_ADDR_FULL_MASK ((__force __be32)~0) +#define PORT_FULL_MASK ((__force __be16)~0) +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) + +static int efx_ethtool_get_class_rule(struct efx_nic *efx, + struct ethtool_rx_flow_spec *rule) +{ + struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; + struct ethhdr *mac_entry = &rule->h_u.ether_spec; + struct ethhdr *mac_mask = &rule->m_u.ether_spec; + struct efx_filter_spec spec; + int rc; + + rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL, + rule->location, &spec); + if (rc) + return rc; + + if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) + rule->ring_cookie = RX_CLS_FLOW_DISC; + else + rule->ring_cookie = spec.dmaq_id; + + if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) && + spec.ether_type == htons(ETH_P_IP) && + (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) && + (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) { + rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ? + TCP_V4_FLOW : UDP_V4_FLOW); + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + ip_entry->ip4dst = spec.loc_host[0]; + ip_mask->ip4dst = IP4_ADDR_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + ip_entry->ip4src = spec.rem_host[0]; + ip_mask->ip4src = IP4_ADDR_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) { + ip_entry->pdst = spec.loc_port; + ip_mask->pdst = PORT_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) { + ip_entry->psrc = spec.rem_port; + ip_mask->psrc = PORT_FULL_MASK; + } + } else if (!(spec.match_flags & + ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG | + EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_OUTER_VID))) { + rule->flow_type = ETHER_FLOW; + if (spec.match_flags & + (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) { + ether_addr_copy(mac_entry->h_dest, spec.loc_mac); + if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC) + eth_broadcast_addr(mac_mask->h_dest); + else + ether_addr_copy(mac_mask->h_dest, + mac_addr_ig_mask); + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) { + ether_addr_copy(mac_entry->h_source, spec.rem_mac); + eth_broadcast_addr(mac_mask->h_source); + } + if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { + mac_entry->h_proto = spec.ether_type; + mac_mask->h_proto = ETHER_TYPE_FULL_MASK; + } + } else { + /* The above should handle all filters that we insert */ + WARN_ON(1); + return -EINVAL; + } + + if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) { + rule->flow_type |= FLOW_EXT; + rule->h_ext.vlan_tci = spec.outer_vid; + rule->m_ext.vlan_tci = htons(0xfff); + } + + return rc; +} + +static int +efx_ethtool_get_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info, u32 *rule_locs) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = efx->n_rx_channels; + return 0; + + case ETHTOOL_GRXFH: { + unsigned min_revision = 0; + + info->data = 0; + switch (info->flow_type) { + case TCP_V4_FLOW: + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case IPV4_FLOW: + info->data |= RXH_IP_SRC | RXH_IP_DST; + min_revision = EFX_REV_FALCON_B0; + break; + case TCP_V6_FLOW: + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case IPV6_FLOW: + info->data |= RXH_IP_SRC | RXH_IP_DST; + min_revision = EFX_REV_SIENA_A0; + break; + default: + break; + } + if (efx_nic_rev(efx) < min_revision) + info->data = 0; + return 0; + } + + case ETHTOOL_GRXCLSRLCNT: + info->data = efx_filter_get_rx_id_limit(efx); + if (info->data == 0) + return -EOPNOTSUPP; + info->data |= RX_CLS_LOC_SPECIAL; + info->rule_cnt = + efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL); + return 0; + + case ETHTOOL_GRXCLSRULE: + if (efx_filter_get_rx_id_limit(efx) == 0) + return -EOPNOTSUPP; + return efx_ethtool_get_class_rule(efx, &info->fs); + + case ETHTOOL_GRXCLSRLALL: { + s32 rc; + info->data = efx_filter_get_rx_id_limit(efx); + if (info->data == 0) + return -EOPNOTSUPP; + rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL, + rule_locs, info->rule_cnt); + if (rc < 0) + return rc; + info->rule_cnt = rc; + return 0; + } + + default: + return -EOPNOTSUPP; + } +} + +static int efx_ethtool_set_class_rule(struct efx_nic *efx, + struct ethtool_rx_flow_spec *rule) +{ + struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; + struct ethhdr *mac_entry = &rule->h_u.ether_spec; + struct ethhdr *mac_mask = &rule->m_u.ether_spec; + struct efx_filter_spec spec; + int rc; + + /* Check that user wants us to choose the location */ + if (rule->location != RX_CLS_LOC_ANY) + return -EINVAL; + + /* Range-check ring_cookie */ + if (rule->ring_cookie >= efx->n_rx_channels && + rule->ring_cookie != RX_CLS_FLOW_DISC) + return -EINVAL; + + /* Check for unsupported extensions */ + if ((rule->flow_type & FLOW_EXT) && + (rule->m_ext.vlan_etype || rule->m_ext.data[0] || + rule->m_ext.data[1])) + return -EINVAL; + + efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, + efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, + (rule->ring_cookie == RX_CLS_FLOW_DISC) ? + EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie); + + switch (rule->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO); + spec.ether_type = htons(ETH_P_IP); + spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ? + IPPROTO_TCP : IPPROTO_UDP); + if (ip_mask->ip4dst) { + if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + spec.loc_host[0] = ip_entry->ip4dst; + } + if (ip_mask->ip4src) { + if (ip_mask->ip4src != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + spec.rem_host[0] = ip_entry->ip4src; + } + if (ip_mask->pdst) { + if (ip_mask->pdst != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT; + spec.loc_port = ip_entry->pdst; + } + if (ip_mask->psrc) { + if (ip_mask->psrc != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_PORT; + spec.rem_port = ip_entry->psrc; + } + if (ip_mask->tos) + return -EINVAL; + break; + + case ETHER_FLOW: + if (!is_zero_ether_addr(mac_mask->h_dest)) { + if (ether_addr_equal(mac_mask->h_dest, + mac_addr_ig_mask)) + spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; + else if (is_broadcast_ether_addr(mac_mask->h_dest)) + spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC; + else + return -EINVAL; + ether_addr_copy(spec.loc_mac, mac_entry->h_dest); + } + if (!is_zero_ether_addr(mac_mask->h_source)) { + if (!is_broadcast_ether_addr(mac_mask->h_source)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_MAC; + ether_addr_copy(spec.rem_mac, mac_entry->h_source); + } + if (mac_mask->h_proto) { + if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = mac_entry->h_proto; + } + break; + + default: + return -EINVAL; + } + + if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) { + if (rule->m_ext.vlan_tci != htons(0xfff)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID; + spec.outer_vid = rule->h_ext.vlan_tci; + } + + rc = efx_filter_insert_filter(efx, &spec, true); + if (rc < 0) + return rc; + + rule->location = rc; + return 0; +} + +static int efx_ethtool_set_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx_filter_get_rx_id_limit(efx) == 0) + return -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_SRXCLSRLINS: + return efx_ethtool_set_class_rule(efx, &info->fs); + + case ETHTOOL_SRXCLSRLDEL: + return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL, + info->fs.location); + + default: + return -EOPNOTSUPP; + } +} + +static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + return ((efx_nic_rev(efx) < EFX_REV_FALCON_B0 || + efx->n_rx_channels == 1) ? + 0 : ARRAY_SIZE(efx->rx_indir_table)); +} + +static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (indir) + memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table)); + return 0; +} + +static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table)); + efx->type->rx_push_rss_config(efx); + return 0; +} + +static int efx_ethtool_get_ts_info(struct net_device *net_dev, + struct ethtool_ts_info *ts_info) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + /* Software capabilities */ + ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE); + ts_info->phc_index = -1; + + efx_ptp_get_ts_info(efx, ts_info); + return 0; +} + +static int efx_ethtool_get_module_eeprom(struct net_device *net_dev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int ret; + + if (!efx->phy_op || !efx->phy_op->get_module_eeprom) + return -EOPNOTSUPP; + + mutex_lock(&efx->mac_lock); + ret = efx->phy_op->get_module_eeprom(efx, ee, data); + mutex_unlock(&efx->mac_lock); + + return ret; +} + +static int efx_ethtool_get_module_info(struct net_device *net_dev, + struct ethtool_modinfo *modinfo) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int ret; + + if (!efx->phy_op || !efx->phy_op->get_module_info) + return -EOPNOTSUPP; + + mutex_lock(&efx->mac_lock); + ret = efx->phy_op->get_module_info(efx, modinfo); + mutex_unlock(&efx->mac_lock); + + return ret; +} + +const struct ethtool_ops efx_ethtool_ops = { + .get_settings = efx_ethtool_get_settings, + .set_settings = efx_ethtool_set_settings, + .get_drvinfo = efx_ethtool_get_drvinfo, + .get_regs_len = efx_ethtool_get_regs_len, + .get_regs = efx_ethtool_get_regs, + .get_msglevel = efx_ethtool_get_msglevel, + .set_msglevel = efx_ethtool_set_msglevel, + .nway_reset = efx_ethtool_nway_reset, + .get_link = ethtool_op_get_link, + .get_coalesce = efx_ethtool_get_coalesce, + .set_coalesce = efx_ethtool_set_coalesce, + .get_ringparam = efx_ethtool_get_ringparam, + .set_ringparam = efx_ethtool_set_ringparam, + .get_pauseparam = efx_ethtool_get_pauseparam, + .set_pauseparam = efx_ethtool_set_pauseparam, + .get_sset_count = efx_ethtool_get_sset_count, + .self_test = efx_ethtool_self_test, + .get_strings = efx_ethtool_get_strings, + .set_phys_id = efx_ethtool_phys_id, + .get_ethtool_stats = efx_ethtool_get_stats, + .get_wol = efx_ethtool_get_wol, + .set_wol = efx_ethtool_set_wol, + .reset = efx_ethtool_reset, + .get_rxnfc = efx_ethtool_get_rxnfc, + .set_rxnfc = efx_ethtool_set_rxnfc, + .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, + .get_rxfh = efx_ethtool_get_rxfh, + .set_rxfh = efx_ethtool_set_rxfh, + .get_ts_info = efx_ethtool_get_ts_info, + .get_module_info = efx_ethtool_get_module_info, + .get_module_eeprom = efx_ethtool_get_module_eeprom, +}; diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c new file mode 100644 index 000000000..f166c8ef3 --- /dev/null +++ b/drivers/net/ethernet/sfc/falcon.c @@ -0,0 +1,2892 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "bitfield.h" +#include "efx.h" +#include "nic.h" +#include "farch_regs.h" +#include "io.h" +#include "phy.h" +#include "workarounds.h" +#include "selftest.h" +#include "mdio_10g.h" + +/* Hardware control for SFC4000 (aka Falcon). */ + +/************************************************************************** + * + * NIC stats + * + ************************************************************************** + */ + +#define FALCON_MAC_STATS_SIZE 0x100 + +#define XgRxOctets_offset 0x0 +#define XgRxOctets_WIDTH 48 +#define XgRxOctetsOK_offset 0x8 +#define XgRxOctetsOK_WIDTH 48 +#define XgRxPkts_offset 0x10 +#define XgRxPkts_WIDTH 32 +#define XgRxPktsOK_offset 0x14 +#define XgRxPktsOK_WIDTH 32 +#define XgRxBroadcastPkts_offset 0x18 +#define XgRxBroadcastPkts_WIDTH 32 +#define XgRxMulticastPkts_offset 0x1C +#define XgRxMulticastPkts_WIDTH 32 +#define XgRxUnicastPkts_offset 0x20 +#define XgRxUnicastPkts_WIDTH 32 +#define XgRxUndersizePkts_offset 0x24 +#define XgRxUndersizePkts_WIDTH 32 +#define XgRxOversizePkts_offset 0x28 +#define XgRxOversizePkts_WIDTH 32 +#define XgRxJabberPkts_offset 0x2C +#define XgRxJabberPkts_WIDTH 32 +#define XgRxUndersizeFCSerrorPkts_offset 0x30 +#define XgRxUndersizeFCSerrorPkts_WIDTH 32 +#define XgRxDropEvents_offset 0x34 +#define XgRxDropEvents_WIDTH 32 +#define XgRxFCSerrorPkts_offset 0x38 +#define XgRxFCSerrorPkts_WIDTH 32 +#define XgRxAlignError_offset 0x3C +#define XgRxAlignError_WIDTH 32 +#define XgRxSymbolError_offset 0x40 +#define XgRxSymbolError_WIDTH 32 +#define XgRxInternalMACError_offset 0x44 +#define XgRxInternalMACError_WIDTH 32 +#define XgRxControlPkts_offset 0x48 +#define XgRxControlPkts_WIDTH 32 +#define XgRxPausePkts_offset 0x4C +#define XgRxPausePkts_WIDTH 32 +#define XgRxPkts64Octets_offset 0x50 +#define XgRxPkts64Octets_WIDTH 32 +#define XgRxPkts65to127Octets_offset 0x54 +#define XgRxPkts65to127Octets_WIDTH 32 +#define XgRxPkts128to255Octets_offset 0x58 +#define XgRxPkts128to255Octets_WIDTH 32 +#define XgRxPkts256to511Octets_offset 0x5C +#define XgRxPkts256to511Octets_WIDTH 32 +#define XgRxPkts512to1023Octets_offset 0x60 +#define XgRxPkts512to1023Octets_WIDTH 32 +#define XgRxPkts1024to15xxOctets_offset 0x64 +#define XgRxPkts1024to15xxOctets_WIDTH 32 +#define XgRxPkts15xxtoMaxOctets_offset 0x68 +#define XgRxPkts15xxtoMaxOctets_WIDTH 32 +#define XgRxLengthError_offset 0x6C +#define XgRxLengthError_WIDTH 32 +#define XgTxPkts_offset 0x80 +#define XgTxPkts_WIDTH 32 +#define XgTxOctets_offset 0x88 +#define XgTxOctets_WIDTH 48 +#define XgTxMulticastPkts_offset 0x90 +#define XgTxMulticastPkts_WIDTH 32 +#define XgTxBroadcastPkts_offset 0x94 +#define XgTxBroadcastPkts_WIDTH 32 +#define XgTxUnicastPkts_offset 0x98 +#define XgTxUnicastPkts_WIDTH 32 +#define XgTxControlPkts_offset 0x9C +#define XgTxControlPkts_WIDTH 32 +#define XgTxPausePkts_offset 0xA0 +#define XgTxPausePkts_WIDTH 32 +#define XgTxPkts64Octets_offset 0xA4 +#define XgTxPkts64Octets_WIDTH 32 +#define XgTxPkts65to127Octets_offset 0xA8 +#define XgTxPkts65to127Octets_WIDTH 32 +#define XgTxPkts128to255Octets_offset 0xAC +#define XgTxPkts128to255Octets_WIDTH 32 +#define XgTxPkts256to511Octets_offset 0xB0 +#define XgTxPkts256to511Octets_WIDTH 32 +#define XgTxPkts512to1023Octets_offset 0xB4 +#define XgTxPkts512to1023Octets_WIDTH 32 +#define XgTxPkts1024to15xxOctets_offset 0xB8 +#define XgTxPkts1024to15xxOctets_WIDTH 32 +#define XgTxPkts1519toMaxOctets_offset 0xBC +#define XgTxPkts1519toMaxOctets_WIDTH 32 +#define XgTxUndersizePkts_offset 0xC0 +#define XgTxUndersizePkts_WIDTH 32 +#define XgTxOversizePkts_offset 0xC4 +#define XgTxOversizePkts_WIDTH 32 +#define XgTxNonTcpUdpPkt_offset 0xC8 +#define XgTxNonTcpUdpPkt_WIDTH 16 +#define XgTxMacSrcErrPkt_offset 0xCC +#define XgTxMacSrcErrPkt_WIDTH 16 +#define XgTxIpSrcErrPkt_offset 0xD0 +#define XgTxIpSrcErrPkt_WIDTH 16 +#define XgDmaDone_offset 0xD4 +#define XgDmaDone_WIDTH 32 + +#define FALCON_XMAC_STATS_DMA_FLAG(efx) \ + (*(u32 *)((efx)->stats_buffer.addr + XgDmaDone_offset)) + +#define FALCON_DMA_STAT(ext_name, hw_name) \ + [FALCON_STAT_ ## ext_name] = \ + { #ext_name, \ + /* 48-bit stats are zero-padded to 64 on DMA */ \ + hw_name ## _ ## WIDTH == 48 ? 64 : hw_name ## _ ## WIDTH, \ + hw_name ## _ ## offset } +#define FALCON_OTHER_STAT(ext_name) \ + [FALCON_STAT_ ## ext_name] = { #ext_name, 0, 0 } +#define GENERIC_SW_STAT(ext_name) \ + [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } + +static const struct efx_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = { + FALCON_DMA_STAT(tx_bytes, XgTxOctets), + FALCON_DMA_STAT(tx_packets, XgTxPkts), + FALCON_DMA_STAT(tx_pause, XgTxPausePkts), + FALCON_DMA_STAT(tx_control, XgTxControlPkts), + FALCON_DMA_STAT(tx_unicast, XgTxUnicastPkts), + FALCON_DMA_STAT(tx_multicast, XgTxMulticastPkts), + FALCON_DMA_STAT(tx_broadcast, XgTxBroadcastPkts), + FALCON_DMA_STAT(tx_lt64, XgTxUndersizePkts), + FALCON_DMA_STAT(tx_64, XgTxPkts64Octets), + FALCON_DMA_STAT(tx_65_to_127, XgTxPkts65to127Octets), + FALCON_DMA_STAT(tx_128_to_255, XgTxPkts128to255Octets), + FALCON_DMA_STAT(tx_256_to_511, XgTxPkts256to511Octets), + FALCON_DMA_STAT(tx_512_to_1023, XgTxPkts512to1023Octets), + FALCON_DMA_STAT(tx_1024_to_15xx, XgTxPkts1024to15xxOctets), + FALCON_DMA_STAT(tx_15xx_to_jumbo, XgTxPkts1519toMaxOctets), + FALCON_DMA_STAT(tx_gtjumbo, XgTxOversizePkts), + FALCON_DMA_STAT(tx_non_tcpudp, XgTxNonTcpUdpPkt), + FALCON_DMA_STAT(tx_mac_src_error, XgTxMacSrcErrPkt), + FALCON_DMA_STAT(tx_ip_src_error, XgTxIpSrcErrPkt), + FALCON_DMA_STAT(rx_bytes, XgRxOctets), + FALCON_DMA_STAT(rx_good_bytes, XgRxOctetsOK), + FALCON_OTHER_STAT(rx_bad_bytes), + FALCON_DMA_STAT(rx_packets, XgRxPkts), + FALCON_DMA_STAT(rx_good, XgRxPktsOK), + FALCON_DMA_STAT(rx_bad, XgRxFCSerrorPkts), + FALCON_DMA_STAT(rx_pause, XgRxPausePkts), + FALCON_DMA_STAT(rx_control, XgRxControlPkts), + FALCON_DMA_STAT(rx_unicast, XgRxUnicastPkts), + FALCON_DMA_STAT(rx_multicast, XgRxMulticastPkts), + FALCON_DMA_STAT(rx_broadcast, XgRxBroadcastPkts), + FALCON_DMA_STAT(rx_lt64, XgRxUndersizePkts), + FALCON_DMA_STAT(rx_64, XgRxPkts64Octets), + FALCON_DMA_STAT(rx_65_to_127, XgRxPkts65to127Octets), + FALCON_DMA_STAT(rx_128_to_255, XgRxPkts128to255Octets), + FALCON_DMA_STAT(rx_256_to_511, XgRxPkts256to511Octets), + FALCON_DMA_STAT(rx_512_to_1023, XgRxPkts512to1023Octets), + FALCON_DMA_STAT(rx_1024_to_15xx, XgRxPkts1024to15xxOctets), + FALCON_DMA_STAT(rx_15xx_to_jumbo, XgRxPkts15xxtoMaxOctets), + FALCON_DMA_STAT(rx_gtjumbo, XgRxOversizePkts), + FALCON_DMA_STAT(rx_bad_lt64, XgRxUndersizeFCSerrorPkts), + FALCON_DMA_STAT(rx_bad_gtjumbo, XgRxJabberPkts), + FALCON_DMA_STAT(rx_overflow, XgRxDropEvents), + FALCON_DMA_STAT(rx_symbol_error, XgRxSymbolError), + FALCON_DMA_STAT(rx_align_error, XgRxAlignError), + FALCON_DMA_STAT(rx_length_error, XgRxLengthError), + FALCON_DMA_STAT(rx_internal_error, XgRxInternalMACError), + FALCON_OTHER_STAT(rx_nodesc_drop_cnt), + GENERIC_SW_STAT(rx_nodesc_trunc), + GENERIC_SW_STAT(rx_noskb_drops), +}; +static const unsigned long falcon_stat_mask[] = { + [0 ... BITS_TO_LONGS(FALCON_STAT_COUNT) - 1] = ~0UL, +}; + +/************************************************************************** + * + * Basic SPI command set and bit definitions + * + *************************************************************************/ + +#define SPI_WRSR 0x01 /* Write status register */ +#define SPI_WRITE 0x02 /* Write data to memory array */ +#define SPI_READ 0x03 /* Read data from memory array */ +#define SPI_WRDI 0x04 /* Reset write enable latch */ +#define SPI_RDSR 0x05 /* Read status register */ +#define SPI_WREN 0x06 /* Set write enable latch */ +#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */ + +#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */ +#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */ +#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */ +#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */ +#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */ +#define SPI_STATUS_NRDY 0x01 /* Device busy flag */ + +/************************************************************************** + * + * Non-volatile memory layout + * + ************************************************************************** + */ + +/* SFC4000 flash is partitioned into: + * 0-0x400 chip and board config (see struct falcon_nvconfig) + * 0x400-0x8000 unused (or may contain VPD if EEPROM not present) + * 0x8000-end boot code (mapped to PCI expansion ROM) + * SFC4000 small EEPROM (size < 0x400) is used for VPD only. + * SFC4000 large EEPROM (size >= 0x400) is partitioned into: + * 0-0x400 chip and board config + * configurable VPD + * 0x800-0x1800 boot config + * Aside from the chip and board config, all of these are optional and may + * be absent or truncated depending on the devices used. + */ +#define FALCON_NVCONFIG_END 0x400U +#define FALCON_FLASH_BOOTCODE_START 0x8000U +#define FALCON_EEPROM_BOOTCONFIG_START 0x800U +#define FALCON_EEPROM_BOOTCONFIG_END 0x1800U + +/* Board configuration v2 (v1 is obsolete; later versions are compatible) */ +struct falcon_nvconfig_board_v2 { + __le16 nports; + u8 port0_phy_addr; + u8 port0_phy_type; + u8 port1_phy_addr; + u8 port1_phy_type; + __le16 asic_sub_revision; + __le16 board_revision; +} __packed; + +/* Board configuration v3 extra information */ +struct falcon_nvconfig_board_v3 { + __le32 spi_device_type[2]; +} __packed; + +/* Bit numbers for spi_device_type */ +#define SPI_DEV_TYPE_SIZE_LBN 0 +#define SPI_DEV_TYPE_SIZE_WIDTH 5 +#define SPI_DEV_TYPE_ADDR_LEN_LBN 6 +#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2 +#define SPI_DEV_TYPE_ERASE_CMD_LBN 8 +#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8 +#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16 +#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5 +#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24 +#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5 +#define SPI_DEV_TYPE_FIELD(type, field) \ + (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field))) + +#define FALCON_NVCONFIG_OFFSET 0x300 + +#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C +struct falcon_nvconfig { + efx_oword_t ee_vpd_cfg_reg; /* 0x300 */ + u8 mac_address[2][8]; /* 0x310 */ + efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */ + efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */ + efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */ + efx_oword_t hw_init_reg; /* 0x350 */ + efx_oword_t nic_stat_reg; /* 0x360 */ + efx_oword_t glb_ctl_reg; /* 0x370 */ + efx_oword_t srm_cfg_reg; /* 0x380 */ + efx_oword_t spare_reg; /* 0x390 */ + __le16 board_magic_num; /* 0x3A0 */ + __le16 board_struct_ver; + __le16 board_checksum; + struct falcon_nvconfig_board_v2 board_v2; + efx_oword_t ee_base_page_reg; /* 0x3B0 */ + struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */ +} __packed; + +/*************************************************************************/ + +static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method); +static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); + +static const unsigned int +/* "Large" EEPROM device: Atmel AT25640 or similar + * 8 KB, 16-bit address, 32 B write block */ +large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN) + | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN) + | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)), +/* Default flash device: Atmel AT25F1024 + * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */ +default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN) + | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN) + | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN) + | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN) + | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)); + +/************************************************************************** + * + * I2C bus - this is a bit-bashing interface using GPIO pins + * Note that it uses the output enables to tristate the outputs + * SDA is the data pin and SCL is the clock + * + ************************************************************************** + */ +static void falcon_setsda(void *data, int state) +{ + struct efx_nic *efx = (struct efx_nic *)data; + efx_oword_t reg; + + efx_reado(efx, ®, FR_AB_GPIO_CTL); + EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state); + efx_writeo(efx, ®, FR_AB_GPIO_CTL); +} + +static void falcon_setscl(void *data, int state) +{ + struct efx_nic *efx = (struct efx_nic *)data; + efx_oword_t reg; + + efx_reado(efx, ®, FR_AB_GPIO_CTL); + EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state); + efx_writeo(efx, ®, FR_AB_GPIO_CTL); +} + +static int falcon_getsda(void *data) +{ + struct efx_nic *efx = (struct efx_nic *)data; + efx_oword_t reg; + + efx_reado(efx, ®, FR_AB_GPIO_CTL); + return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN); +} + +static int falcon_getscl(void *data) +{ + struct efx_nic *efx = (struct efx_nic *)data; + efx_oword_t reg; + + efx_reado(efx, ®, FR_AB_GPIO_CTL); + return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN); +} + +static const struct i2c_algo_bit_data falcon_i2c_bit_operations = { + .setsda = falcon_setsda, + .setscl = falcon_setscl, + .getsda = falcon_getsda, + .getscl = falcon_getscl, + .udelay = 5, + /* Wait up to 50 ms for slave to let us pull SCL high */ + .timeout = DIV_ROUND_UP(HZ, 20), +}; + +static void falcon_push_irq_moderation(struct efx_channel *channel) +{ + efx_dword_t timer_cmd; + struct efx_nic *efx = channel->efx; + + /* Set timer register */ + if (channel->irq_moderation) { + EFX_POPULATE_DWORD_2(timer_cmd, + FRF_AB_TC_TIMER_MODE, + FFE_BB_TIMER_MODE_INT_HLDOFF, + FRF_AB_TC_TIMER_VAL, + channel->irq_moderation - 1); + } else { + EFX_POPULATE_DWORD_2(timer_cmd, + FRF_AB_TC_TIMER_MODE, + FFE_BB_TIMER_MODE_DIS, + FRF_AB_TC_TIMER_VAL, 0); + } + BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0); + efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, + channel->channel); +} + +static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx); + +static void falcon_prepare_flush(struct efx_nic *efx) +{ + falcon_deconfigure_mac_wrapper(efx); + + /* Wait for the tx and rx fifo's to get to the next packet boundary + * (~1ms without back-pressure), then to drain the remainder of the + * fifo's at data path speeds (negligible), with a healthy margin. */ + msleep(10); +} + +/* Acknowledge a legacy interrupt from Falcon + * + * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG. + * + * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the + * BIU. Interrupt acknowledge is read sensitive so must write instead + * (then read to ensure the BIU collector is flushed) + * + * NB most hardware supports MSI interrupts + */ +static inline void falcon_irq_ack_a1(struct efx_nic *efx) +{ + efx_dword_t reg; + + EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e); + efx_writed(efx, ®, FR_AA_INT_ACK_KER); + efx_readd(efx, ®, FR_AA_WORK_AROUND_BROKEN_PCI_READS); +} + +static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) +{ + struct efx_nic *efx = dev_id; + efx_oword_t *int_ker = efx->irq_status.addr; + int syserr; + int queues; + + /* Check to see if this is our interrupt. If it isn't, we + * exit without having touched the hardware. + */ + if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) { + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d not for me\n", irq, + raw_smp_processor_id()); + return IRQ_NONE; + } + efx->last_irq_cpu = raw_smp_processor_id(); + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", + irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); + + if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) + return IRQ_HANDLED; + + /* Check to see if we have a serious error condition */ + syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); + if (unlikely(syserr)) + return efx_farch_fatal_interrupt(efx); + + /* Determine interrupting queues, clear interrupt status + * register and acknowledge the device interrupt. + */ + BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS); + queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q); + EFX_ZERO_OWORD(*int_ker); + wmb(); /* Ensure the vector is cleared before interrupt ack */ + falcon_irq_ack_a1(efx); + + if (queues & 1) + efx_schedule_channel_irq(efx_get_channel(efx, 0)); + if (queues & 2) + efx_schedule_channel_irq(efx_get_channel(efx, 1)); + return IRQ_HANDLED; +} + +/************************************************************************** + * + * RSS + * + ************************************************************************** + */ + +static void falcon_b0_rx_push_rss_config(struct efx_nic *efx) +{ + efx_oword_t temp; + + /* Set hash key for IPv4 */ + memcpy(&temp, efx->rx_hash_key, sizeof(temp)); + efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); + + efx_farch_rx_push_indir_table(efx); +} + +/************************************************************************** + * + * EEPROM/flash + * + ************************************************************************** + */ + +#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t) + +static int falcon_spi_poll(struct efx_nic *efx) +{ + efx_oword_t reg; + efx_reado(efx, ®, FR_AB_EE_SPI_HCMD); + return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0; +} + +/* Wait for SPI command completion */ +static int falcon_spi_wait(struct efx_nic *efx) +{ + /* Most commands will finish quickly, so we start polling at + * very short intervals. Sometimes the command may have to + * wait for VPD or expansion ROM access outside of our + * control, so we allow up to 100 ms. */ + unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10); + int i; + + for (i = 0; i < 10; i++) { + if (!falcon_spi_poll(efx)) + return 0; + udelay(10); + } + + for (;;) { + if (!falcon_spi_poll(efx)) + return 0; + if (time_after_eq(jiffies, timeout)) { + netif_err(efx, hw, efx->net_dev, + "timed out waiting for SPI\n"); + return -ETIMEDOUT; + } + schedule_timeout_uninterruptible(1); + } +} + +static int +falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi, + unsigned int command, int address, + const void *in, void *out, size_t len) +{ + bool addressed = (address >= 0); + bool reading = (out != NULL); + efx_oword_t reg; + int rc; + + /* Input validation */ + if (len > FALCON_SPI_MAX_LEN) + return -EINVAL; + + /* Check that previous command is not still running */ + rc = falcon_spi_poll(efx); + if (rc) + return rc; + + /* Program address register, if we have an address */ + if (addressed) { + EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address); + efx_writeo(efx, ®, FR_AB_EE_SPI_HADR); + } + + /* Program data register, if we have data */ + if (in != NULL) { + memcpy(®, in, len); + efx_writeo(efx, ®, FR_AB_EE_SPI_HDATA); + } + + /* Issue read/write command */ + EFX_POPULATE_OWORD_7(reg, + FRF_AB_EE_SPI_HCMD_CMD_EN, 1, + FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id, + FRF_AB_EE_SPI_HCMD_DABCNT, len, + FRF_AB_EE_SPI_HCMD_READ, reading, + FRF_AB_EE_SPI_HCMD_DUBCNT, 0, + FRF_AB_EE_SPI_HCMD_ADBCNT, + (addressed ? spi->addr_len : 0), + FRF_AB_EE_SPI_HCMD_ENC, command); + efx_writeo(efx, ®, FR_AB_EE_SPI_HCMD); + + /* Wait for read/write to complete */ + rc = falcon_spi_wait(efx); + if (rc) + return rc; + + /* Read data */ + if (out != NULL) { + efx_reado(efx, ®, FR_AB_EE_SPI_HDATA); + memcpy(out, ®, len); + } + + return 0; +} + +static inline u8 +falcon_spi_munge_command(const struct falcon_spi_device *spi, + const u8 command, const unsigned int address) +{ + return command | (((address >> 8) & spi->munge_address) << 3); +} + +static int +falcon_spi_read(struct efx_nic *efx, const struct falcon_spi_device *spi, + loff_t start, size_t len, size_t *retlen, u8 *buffer) +{ + size_t block_len, pos = 0; + unsigned int command; + int rc = 0; + + while (pos < len) { + block_len = min(len - pos, FALCON_SPI_MAX_LEN); + + command = falcon_spi_munge_command(spi, SPI_READ, start + pos); + rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL, + buffer + pos, block_len); + if (rc) + break; + pos += block_len; + + /* Avoid locking up the system */ + cond_resched(); + if (signal_pending(current)) { + rc = -EINTR; + break; + } + } + + if (retlen) + *retlen = pos; + return rc; +} + +#ifdef CONFIG_SFC_MTD + +struct falcon_mtd_partition { + struct efx_mtd_partition common; + const struct falcon_spi_device *spi; + size_t offset; +}; + +#define to_falcon_mtd_partition(mtd) \ + container_of(mtd, struct falcon_mtd_partition, common.mtd) + +static size_t +falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start) +{ + return min(FALCON_SPI_MAX_LEN, + (spi->block_size - (start & (spi->block_size - 1)))); +} + +/* Wait up to 10 ms for buffered write completion */ +static int +falcon_spi_wait_write(struct efx_nic *efx, const struct falcon_spi_device *spi) +{ + unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100); + u8 status; + int rc; + + for (;;) { + rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, + &status, sizeof(status)); + if (rc) + return rc; + if (!(status & SPI_STATUS_NRDY)) + return 0; + if (time_after_eq(jiffies, timeout)) { + netif_err(efx, hw, efx->net_dev, + "SPI write timeout on device %d" + " last status=0x%02x\n", + spi->device_id, status); + return -ETIMEDOUT; + } + schedule_timeout_uninterruptible(1); + } +} + +static int +falcon_spi_write(struct efx_nic *efx, const struct falcon_spi_device *spi, + loff_t start, size_t len, size_t *retlen, const u8 *buffer) +{ + u8 verify_buffer[FALCON_SPI_MAX_LEN]; + size_t block_len, pos = 0; + unsigned int command; + int rc = 0; + + while (pos < len) { + rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); + if (rc) + break; + + block_len = min(len - pos, + falcon_spi_write_limit(spi, start + pos)); + command = falcon_spi_munge_command(spi, SPI_WRITE, start + pos); + rc = falcon_spi_cmd(efx, spi, command, start + pos, + buffer + pos, NULL, block_len); + if (rc) + break; + + rc = falcon_spi_wait_write(efx, spi); + if (rc) + break; + + command = falcon_spi_munge_command(spi, SPI_READ, start + pos); + rc = falcon_spi_cmd(efx, spi, command, start + pos, + NULL, verify_buffer, block_len); + if (memcmp(verify_buffer, buffer + pos, block_len)) { + rc = -EIO; + break; + } + + pos += block_len; + + /* Avoid locking up the system */ + cond_resched(); + if (signal_pending(current)) { + rc = -EINTR; + break; + } + } + + if (retlen) + *retlen = pos; + return rc; +} + +static int +falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible) +{ + const struct falcon_spi_device *spi = part->spi; + struct efx_nic *efx = part->common.mtd.priv; + u8 status; + int rc, i; + + /* Wait up to 4s for flash/EEPROM to finish a slow operation. */ + for (i = 0; i < 40; i++) { + __set_current_state(uninterruptible ? + TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE); + schedule_timeout(HZ / 10); + rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, + &status, sizeof(status)); + if (rc) + return rc; + if (!(status & SPI_STATUS_NRDY)) + return 0; + if (signal_pending(current)) + return -EINTR; + } + pr_err("%s: timed out waiting for %s\n", + part->common.name, part->common.dev_type_name); + return -ETIMEDOUT; +} + +static int +falcon_spi_unlock(struct efx_nic *efx, const struct falcon_spi_device *spi) +{ + const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 | + SPI_STATUS_BP0); + u8 status; + int rc; + + rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, + &status, sizeof(status)); + if (rc) + return rc; + + if (!(status & unlock_mask)) + return 0; /* already unlocked */ + + rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); + if (rc) + return rc; + rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0); + if (rc) + return rc; + + status &= ~unlock_mask; + rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status, + NULL, sizeof(status)); + if (rc) + return rc; + rc = falcon_spi_wait_write(efx, spi); + if (rc) + return rc; + + return 0; +} + +#define FALCON_SPI_VERIFY_BUF_LEN 16 + +static int +falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len) +{ + const struct falcon_spi_device *spi = part->spi; + struct efx_nic *efx = part->common.mtd.priv; + unsigned pos, block_len; + u8 empty[FALCON_SPI_VERIFY_BUF_LEN]; + u8 buffer[FALCON_SPI_VERIFY_BUF_LEN]; + int rc; + + if (len != spi->erase_size) + return -EINVAL; + + if (spi->erase_command == 0) + return -EOPNOTSUPP; + + rc = falcon_spi_unlock(efx, spi); + if (rc) + return rc; + rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); + if (rc) + return rc; + rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL, + NULL, 0); + if (rc) + return rc; + rc = falcon_spi_slow_wait(part, false); + + /* Verify the entire region has been wiped */ + memset(empty, 0xff, sizeof(empty)); + for (pos = 0; pos < len; pos += block_len) { + block_len = min(len - pos, sizeof(buffer)); + rc = falcon_spi_read(efx, spi, start + pos, block_len, + NULL, buffer); + if (rc) + return rc; + if (memcmp(empty, buffer, block_len)) + return -EIO; + + /* Avoid locking up the system */ + cond_resched(); + if (signal_pending(current)) + return -EINTR; + } + + return rc; +} + +static void falcon_mtd_rename(struct efx_mtd_partition *part) +{ + struct efx_nic *efx = part->mtd.priv; + + snprintf(part->name, sizeof(part->name), "%s %s", + efx->name, part->type_name); +} + +static int falcon_mtd_read(struct mtd_info *mtd, loff_t start, + size_t len, size_t *retlen, u8 *buffer) +{ + struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + struct falcon_nic_data *nic_data = efx->nic_data; + int rc; + + rc = mutex_lock_interruptible(&nic_data->spi_lock); + if (rc) + return rc; + rc = falcon_spi_read(efx, part->spi, part->offset + start, + len, retlen, buffer); + mutex_unlock(&nic_data->spi_lock); + return rc; +} + +static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) +{ + struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + struct falcon_nic_data *nic_data = efx->nic_data; + int rc; + + rc = mutex_lock_interruptible(&nic_data->spi_lock); + if (rc) + return rc; + rc = falcon_spi_erase(part, part->offset + start, len); + mutex_unlock(&nic_data->spi_lock); + return rc; +} + +static int falcon_mtd_write(struct mtd_info *mtd, loff_t start, + size_t len, size_t *retlen, const u8 *buffer) +{ + struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + struct falcon_nic_data *nic_data = efx->nic_data; + int rc; + + rc = mutex_lock_interruptible(&nic_data->spi_lock); + if (rc) + return rc; + rc = falcon_spi_write(efx, part->spi, part->offset + start, + len, retlen, buffer); + mutex_unlock(&nic_data->spi_lock); + return rc; +} + +static int falcon_mtd_sync(struct mtd_info *mtd) +{ + struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + struct falcon_nic_data *nic_data = efx->nic_data; + int rc; + + mutex_lock(&nic_data->spi_lock); + rc = falcon_spi_slow_wait(part, true); + mutex_unlock(&nic_data->spi_lock); + return rc; +} + +static int falcon_mtd_probe(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + struct falcon_mtd_partition *parts; + struct falcon_spi_device *spi; + size_t n_parts; + int rc = -ENODEV; + + ASSERT_RTNL(); + + /* Allocate space for maximum number of partitions */ + parts = kcalloc(2, sizeof(*parts), GFP_KERNEL); + if (!parts) + return -ENOMEM; + n_parts = 0; + + spi = &nic_data->spi_flash; + if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) { + parts[n_parts].spi = spi; + parts[n_parts].offset = FALCON_FLASH_BOOTCODE_START; + parts[n_parts].common.dev_type_name = "flash"; + parts[n_parts].common.type_name = "sfc_flash_bootrom"; + parts[n_parts].common.mtd.type = MTD_NORFLASH; + parts[n_parts].common.mtd.flags = MTD_CAP_NORFLASH; + parts[n_parts].common.mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START; + parts[n_parts].common.mtd.erasesize = spi->erase_size; + n_parts++; + } + + spi = &nic_data->spi_eeprom; + if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) { + parts[n_parts].spi = spi; + parts[n_parts].offset = FALCON_EEPROM_BOOTCONFIG_START; + parts[n_parts].common.dev_type_name = "EEPROM"; + parts[n_parts].common.type_name = "sfc_bootconfig"; + parts[n_parts].common.mtd.type = MTD_RAM; + parts[n_parts].common.mtd.flags = MTD_CAP_RAM; + parts[n_parts].common.mtd.size = + min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) - + FALCON_EEPROM_BOOTCONFIG_START; + parts[n_parts].common.mtd.erasesize = spi->erase_size; + n_parts++; + } + + rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); + if (rc) + kfree(parts); + return rc; +} + +#endif /* CONFIG_SFC_MTD */ + +/************************************************************************** + * + * XMAC operations + * + ************************************************************************** + */ + +/* Configure the XAUI driver that is an output from Falcon */ +static void falcon_setup_xaui(struct efx_nic *efx) +{ + efx_oword_t sdctl, txdrv; + + /* Move the XAUI into low power, unless there is no PHY, in + * which case the XAUI will have to drive a cable. */ + if (efx->phy_type == PHY_TYPE_NONE) + return; + + efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL); + EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF); + EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF); + EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF); + EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF); + EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF); + EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF); + EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF); + EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF); + efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL); + + EFX_POPULATE_OWORD_8(txdrv, + FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF, + FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF, + FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF, + FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF, + FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF, + FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF, + FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF, + FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF); + efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL); +} + +int falcon_reset_xaui(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + efx_oword_t reg; + int count; + + /* Don't fetch MAC statistics over an XMAC reset */ + WARN_ON(nic_data->stats_disable_count == 0); + + /* Start reset sequence */ + EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1); + efx_writeo(efx, ®, FR_AB_XX_PWR_RST); + + /* Wait up to 10 ms for completion, then reinitialise */ + for (count = 0; count < 1000; count++) { + efx_reado(efx, ®, FR_AB_XX_PWR_RST); + if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 && + EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) { + falcon_setup_xaui(efx); + return 0; + } + udelay(10); + } + netif_err(efx, hw, efx->net_dev, + "timed out waiting for XAUI/XGXS reset\n"); + return -ETIMEDOUT; +} + +static void falcon_ack_status_intr(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + efx_oword_t reg; + + if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx)) + return; + + /* We expect xgmii faults if the wireside link is down */ + if (!efx->link_state.up) + return; + + /* We can only use this interrupt to signal the negative edge of + * xaui_align [we have to poll the positive edge]. */ + if (nic_data->xmac_poll_required) + return; + + efx_reado(efx, ®, FR_AB_XM_MGT_INT_MSK); +} + +static bool falcon_xgxs_link_ok(struct efx_nic *efx) +{ + efx_oword_t reg; + bool align_done, link_ok = false; + int sync_status; + + /* Read link status */ + efx_reado(efx, ®, FR_AB_XX_CORE_STAT); + + align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE); + sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT); + if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES)) + link_ok = true; + + /* Clear link status ready for next read */ + EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES); + EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES); + EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES); + efx_writeo(efx, ®, FR_AB_XX_CORE_STAT); + + return link_ok; +} + +static bool falcon_xmac_link_ok(struct efx_nic *efx) +{ + /* + * Check MAC's XGXS link status except when using XGMII loopback + * which bypasses the XGXS block. + * If possible, check PHY's XGXS link status except when using + * MAC loopback. + */ + return (efx->loopback_mode == LOOPBACK_XGMII || + falcon_xgxs_link_ok(efx)) && + (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) || + LOOPBACK_INTERNAL(efx) || + efx_mdio_phyxgxs_lane_sync(efx)); +} + +static void falcon_reconfigure_xmac_core(struct efx_nic *efx) +{ + unsigned int max_frame_len; + efx_oword_t reg; + bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX); + bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX); + + /* Configure MAC - cut-thru mode is hard wired on */ + EFX_POPULATE_OWORD_3(reg, + FRF_AB_XM_RX_JUMBO_MODE, 1, + FRF_AB_XM_TX_STAT_EN, 1, + FRF_AB_XM_RX_STAT_EN, 1); + efx_writeo(efx, ®, FR_AB_XM_GLB_CFG); + + /* Configure TX */ + EFX_POPULATE_OWORD_6(reg, + FRF_AB_XM_TXEN, 1, + FRF_AB_XM_TX_PRMBL, 1, + FRF_AB_XM_AUTO_PAD, 1, + FRF_AB_XM_TXCRC, 1, + FRF_AB_XM_FCNTL, tx_fc, + FRF_AB_XM_IPG, 0x3); + efx_writeo(efx, ®, FR_AB_XM_TX_CFG); + + /* Configure RX */ + EFX_POPULATE_OWORD_5(reg, + FRF_AB_XM_RXEN, 1, + FRF_AB_XM_AUTO_DEPAD, 0, + FRF_AB_XM_ACPT_ALL_MCAST, 1, + FRF_AB_XM_ACPT_ALL_UCAST, !efx->unicast_filter, + FRF_AB_XM_PASS_CRC_ERR, 1); + efx_writeo(efx, ®, FR_AB_XM_RX_CFG); + + /* Set frame length */ + max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); + EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len); + efx_writeo(efx, ®, FR_AB_XM_RX_PARAM); + EFX_POPULATE_OWORD_2(reg, + FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len, + FRF_AB_XM_TX_JUMBO_MODE, 1); + efx_writeo(efx, ®, FR_AB_XM_TX_PARAM); + + EFX_POPULATE_OWORD_2(reg, + FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */ + FRF_AB_XM_DIS_FCNTL, !rx_fc); + efx_writeo(efx, ®, FR_AB_XM_FC); + + /* Set MAC address */ + memcpy(®, &efx->net_dev->dev_addr[0], 4); + efx_writeo(efx, ®, FR_AB_XM_ADR_LO); + memcpy(®, &efx->net_dev->dev_addr[4], 2); + efx_writeo(efx, ®, FR_AB_XM_ADR_HI); +} + +static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) +{ + efx_oword_t reg; + bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS); + bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI); + bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII); + bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; + + /* XGXS block is flaky and will need to be reset if moving + * into our out of XGMII, XGXS or XAUI loopbacks. */ + efx_reado(efx, ®, FR_AB_XX_CORE_STAT); + old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN); + old_xgmii_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN); + + efx_reado(efx, ®, FR_AB_XX_SD_CTL); + old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA); + + /* The PHY driver may have turned XAUI off */ + if ((xgxs_loopback != old_xgxs_loopback) || + (xaui_loopback != old_xaui_loopback) || + (xgmii_loopback != old_xgmii_loopback)) + falcon_reset_xaui(efx); + + efx_reado(efx, ®, FR_AB_XX_CORE_STAT); + EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG, + (xgxs_loopback || xaui_loopback) ? + FFE_AB_XX_FORCE_SIG_ALL_LANES : 0); + EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback); + EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback); + efx_writeo(efx, ®, FR_AB_XX_CORE_STAT); + + efx_reado(efx, ®, FR_AB_XX_SD_CTL); + EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback); + EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback); + EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback); + EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback); + efx_writeo(efx, ®, FR_AB_XX_SD_CTL); +} + + +/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */ +static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries) +{ + bool mac_up = falcon_xmac_link_ok(efx); + + if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS || + efx_phy_mode_disabled(efx->phy_mode)) + /* XAUI link is expected to be down */ + return mac_up; + + falcon_stop_nic_stats(efx); + + while (!mac_up && tries) { + netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n"); + falcon_reset_xaui(efx); + udelay(200); + + mac_up = falcon_xmac_link_ok(efx); + --tries; + } + + falcon_start_nic_stats(efx); + + return mac_up; +} + +static bool falcon_xmac_check_fault(struct efx_nic *efx) +{ + return !falcon_xmac_link_ok_retry(efx, 5); +} + +static int falcon_reconfigure_xmac(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + + efx_farch_filter_sync_rx_mode(efx); + + falcon_reconfigure_xgxs_core(efx); + falcon_reconfigure_xmac_core(efx); + + falcon_reconfigure_mac_wrapper(efx); + + nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5); + falcon_ack_status_intr(efx); + + return 0; +} + +static void falcon_poll_xmac(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + + /* We expect xgmii faults if the wireside link is down */ + if (!efx->link_state.up || !nic_data->xmac_poll_required) + return; + + nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); + falcon_ack_status_intr(efx); +} + +/************************************************************************** + * + * MAC wrapper + * + ************************************************************************** + */ + +static void falcon_push_multicast_hash(struct efx_nic *efx) +{ + union efx_multicast_hash *mc_hash = &efx->multicast_hash; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0); + efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1); +} + +static void falcon_reset_macs(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + efx_oword_t reg, mac_ctrl; + int count; + + if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { + /* It's not safe to use GLB_CTL_REG to reset the + * macs, so instead use the internal MAC resets + */ + EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1); + efx_writeo(efx, ®, FR_AB_XM_GLB_CFG); + + for (count = 0; count < 10000; count++) { + efx_reado(efx, ®, FR_AB_XM_GLB_CFG); + if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) == + 0) + return; + udelay(10); + } + + netif_err(efx, hw, efx->net_dev, + "timed out waiting for XMAC core reset\n"); + } + + /* Mac stats will fail whist the TX fifo is draining */ + WARN_ON(nic_data->stats_disable_count == 0); + + efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL); + EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1); + efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); + + efx_reado(efx, ®, FR_AB_GLB_CTL); + EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1); + EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1); + EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1); + efx_writeo(efx, ®, FR_AB_GLB_CTL); + + count = 0; + while (1) { + efx_reado(efx, ®, FR_AB_GLB_CTL); + if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) && + !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) && + !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) { + netif_dbg(efx, hw, efx->net_dev, + "Completed MAC reset after %d loops\n", + count); + break; + } + if (count > 20) { + netif_err(efx, hw, efx->net_dev, "MAC reset failed\n"); + break; + } + count++; + udelay(10); + } + + /* Ensure the correct MAC is selected before statistics + * are re-enabled by the caller */ + efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); + + falcon_setup_xaui(efx); +} + +static void falcon_drain_tx_fifo(struct efx_nic *efx) +{ + efx_oword_t reg; + + if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) || + (efx->loopback_mode != LOOPBACK_NONE)) + return; + + efx_reado(efx, ®, FR_AB_MAC_CTRL); + /* There is no point in draining more than once */ + if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN)) + return; + + falcon_reset_macs(efx); +} + +static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx) +{ + efx_oword_t reg; + + if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) + return; + + /* Isolate the MAC -> RX */ + efx_reado(efx, ®, FR_AZ_RX_CFG); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0); + efx_writeo(efx, ®, FR_AZ_RX_CFG); + + /* Isolate TX -> MAC */ + falcon_drain_tx_fifo(efx); +} + +static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) +{ + struct efx_link_state *link_state = &efx->link_state; + efx_oword_t reg; + int link_speed, isolate; + + isolate = !!ACCESS_ONCE(efx->reset_pending); + + switch (link_state->speed) { + case 10000: link_speed = 3; break; + case 1000: link_speed = 2; break; + case 100: link_speed = 1; break; + default: link_speed = 0; break; + } + + /* MAC_LINK_STATUS controls MAC backpressure but doesn't work + * as advertised. Disable to ensure packets are not + * indefinitely held and TX queue can be flushed at any point + * while the link is down. */ + EFX_POPULATE_OWORD_5(reg, + FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */, + FRF_AB_MAC_BCAD_ACPT, 1, + FRF_AB_MAC_UC_PROM, !efx->unicast_filter, + FRF_AB_MAC_LINK_STATUS, 1, /* always set */ + FRF_AB_MAC_SPEED, link_speed); + /* On B0, MAC backpressure can be disabled and packets get + * discarded. */ + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, + !link_state->up || isolate); + } + + efx_writeo(efx, ®, FR_AB_MAC_CTRL); + + /* Restore the multicast hash registers. */ + falcon_push_multicast_hash(efx); + + efx_reado(efx, ®, FR_AZ_RX_CFG); + /* Enable XOFF signal from RX FIFO (we enabled it during NIC + * initialisation but it may read back as 0) */ + EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1); + /* Unisolate the MAC -> RX */ + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate); + efx_writeo(efx, ®, FR_AZ_RX_CFG); +} + +static void falcon_stats_request(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + efx_oword_t reg; + + WARN_ON(nic_data->stats_pending); + WARN_ON(nic_data->stats_disable_count); + + FALCON_XMAC_STATS_DMA_FLAG(efx) = 0; + nic_data->stats_pending = true; + wmb(); /* ensure done flag is clear */ + + /* Initiate DMA transfer of stats */ + EFX_POPULATE_OWORD_2(reg, + FRF_AB_MAC_STAT_DMA_CMD, 1, + FRF_AB_MAC_STAT_DMA_ADR, + efx->stats_buffer.dma_addr); + efx_writeo(efx, ®, FR_AB_MAC_STAT_DMA); + + mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2)); +} + +static void falcon_stats_complete(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + + if (!nic_data->stats_pending) + return; + + nic_data->stats_pending = false; + if (FALCON_XMAC_STATS_DMA_FLAG(efx)) { + rmb(); /* read the done flag before the stats */ + efx_nic_update_stats(falcon_stat_desc, FALCON_STAT_COUNT, + falcon_stat_mask, nic_data->stats, + efx->stats_buffer.addr, true); + } else { + netif_err(efx, hw, efx->net_dev, + "timed out waiting for statistics\n"); + } +} + +static void falcon_stats_timer_func(unsigned long context) +{ + struct efx_nic *efx = (struct efx_nic *)context; + struct falcon_nic_data *nic_data = efx->nic_data; + + spin_lock(&efx->stats_lock); + + falcon_stats_complete(efx); + if (nic_data->stats_disable_count == 0) + falcon_stats_request(efx); + + spin_unlock(&efx->stats_lock); +} + +static bool falcon_loopback_link_poll(struct efx_nic *efx) +{ + struct efx_link_state old_state = efx->link_state; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + WARN_ON(!LOOPBACK_INTERNAL(efx)); + + efx->link_state.fd = true; + efx->link_state.fc = efx->wanted_fc; + efx->link_state.up = true; + efx->link_state.speed = 10000; + + return !efx_link_state_equal(&efx->link_state, &old_state); +} + +static int falcon_reconfigure_port(struct efx_nic *efx) +{ + int rc; + + WARN_ON(efx_nic_rev(efx) > EFX_REV_FALCON_B0); + + /* Poll the PHY link state *before* reconfiguring it. This means we + * will pick up the correct speed (in loopback) to select the correct + * MAC. + */ + if (LOOPBACK_INTERNAL(efx)) + falcon_loopback_link_poll(efx); + else + efx->phy_op->poll(efx); + + falcon_stop_nic_stats(efx); + falcon_deconfigure_mac_wrapper(efx); + + falcon_reset_macs(efx); + + efx->phy_op->reconfigure(efx); + rc = falcon_reconfigure_xmac(efx); + BUG_ON(rc); + + falcon_start_nic_stats(efx); + + /* Synchronise efx->link_state with the kernel */ + efx_link_status_changed(efx); + + return 0; +} + +/* TX flow control may automatically turn itself off if the link + * partner (intermittently) stops responding to pause frames. There + * isn't any indication that this has happened, so the best we do is + * leave it up to the user to spot this and fix it by cycling transmit + * flow control on this end. + */ + +static void falcon_a1_prepare_enable_fc_tx(struct efx_nic *efx) +{ + /* Schedule a reset to recover */ + efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); +} + +static void falcon_b0_prepare_enable_fc_tx(struct efx_nic *efx) +{ + /* Recover by resetting the EM block */ + falcon_stop_nic_stats(efx); + falcon_drain_tx_fifo(efx); + falcon_reconfigure_xmac(efx); + falcon_start_nic_stats(efx); +} + +/************************************************************************** + * + * PHY access via GMII + * + ************************************************************************** + */ + +/* Wait for GMII access to complete */ +static int falcon_gmii_wait(struct efx_nic *efx) +{ + efx_oword_t md_stat; + int count; + + /* wait up to 50ms - taken max from datasheet */ + for (count = 0; count < 5000; count++) { + efx_reado(efx, &md_stat, FR_AB_MD_STAT); + if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) { + if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 || + EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) { + netif_err(efx, hw, efx->net_dev, + "error from GMII access " + EFX_OWORD_FMT"\n", + EFX_OWORD_VAL(md_stat)); + return -EIO; + } + return 0; + } + udelay(10); + } + netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n"); + return -ETIMEDOUT; +} + +/* Write an MDIO register of a PHY connected to Falcon. */ +static int falcon_mdio_write(struct net_device *net_dev, + int prtad, int devad, u16 addr, u16 value) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct falcon_nic_data *nic_data = efx->nic_data; + efx_oword_t reg; + int rc; + + netif_vdbg(efx, hw, efx->net_dev, + "writing MDIO %d register %d.%d with 0x%04x\n", + prtad, devad, addr, value); + + mutex_lock(&nic_data->mdio_lock); + + /* Check MDIO not currently being accessed */ + rc = falcon_gmii_wait(efx); + if (rc) + goto out; + + /* Write the address/ID register */ + EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); + efx_writeo(efx, ®, FR_AB_MD_PHY_ADR); + + EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, + FRF_AB_MD_DEV_ADR, devad); + efx_writeo(efx, ®, FR_AB_MD_ID); + + /* Write data */ + EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value); + efx_writeo(efx, ®, FR_AB_MD_TXD); + + EFX_POPULATE_OWORD_2(reg, + FRF_AB_MD_WRC, 1, + FRF_AB_MD_GC, 0); + efx_writeo(efx, ®, FR_AB_MD_CS); + + /* Wait for data to be written */ + rc = falcon_gmii_wait(efx); + if (rc) { + /* Abort the write operation */ + EFX_POPULATE_OWORD_2(reg, + FRF_AB_MD_WRC, 0, + FRF_AB_MD_GC, 1); + efx_writeo(efx, ®, FR_AB_MD_CS); + udelay(10); + } + +out: + mutex_unlock(&nic_data->mdio_lock); + return rc; +} + +/* Read an MDIO register of a PHY connected to Falcon. */ +static int falcon_mdio_read(struct net_device *net_dev, + int prtad, int devad, u16 addr) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct falcon_nic_data *nic_data = efx->nic_data; + efx_oword_t reg; + int rc; + + mutex_lock(&nic_data->mdio_lock); + + /* Check MDIO not currently being accessed */ + rc = falcon_gmii_wait(efx); + if (rc) + goto out; + + EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); + efx_writeo(efx, ®, FR_AB_MD_PHY_ADR); + + EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, + FRF_AB_MD_DEV_ADR, devad); + efx_writeo(efx, ®, FR_AB_MD_ID); + + /* Request data to be read */ + EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0); + efx_writeo(efx, ®, FR_AB_MD_CS); + + /* Wait for data to become available */ + rc = falcon_gmii_wait(efx); + if (rc == 0) { + efx_reado(efx, ®, FR_AB_MD_RXD); + rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD); + netif_vdbg(efx, hw, efx->net_dev, + "read from MDIO %d register %d.%d, got %04x\n", + prtad, devad, addr, rc); + } else { + /* Abort the read operation */ + EFX_POPULATE_OWORD_2(reg, + FRF_AB_MD_RIC, 0, + FRF_AB_MD_GC, 1); + efx_writeo(efx, ®, FR_AB_MD_CS); + + netif_dbg(efx, hw, efx->net_dev, + "read from MDIO %d register %d.%d, got error %d\n", + prtad, devad, addr, rc); + } + +out: + mutex_unlock(&nic_data->mdio_lock); + return rc; +} + +/* This call is responsible for hooking in the MAC and PHY operations */ +static int falcon_probe_port(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + int rc; + + switch (efx->phy_type) { + case PHY_TYPE_SFX7101: + efx->phy_op = &falcon_sfx7101_phy_ops; + break; + case PHY_TYPE_QT2022C2: + case PHY_TYPE_QT2025C: + efx->phy_op = &falcon_qt202x_phy_ops; + break; + case PHY_TYPE_TXC43128: + efx->phy_op = &falcon_txc_phy_ops; + break; + default: + netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n", + efx->phy_type); + return -ENODEV; + } + + /* Fill out MDIO structure and loopback modes */ + mutex_init(&nic_data->mdio_lock); + efx->mdio.mdio_read = falcon_mdio_read; + efx->mdio.mdio_write = falcon_mdio_write; + rc = efx->phy_op->probe(efx); + if (rc != 0) + return rc; + + /* Initial assumption */ + efx->link_state.speed = 10000; + efx->link_state.fd = true; + + /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) + efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; + else + efx->wanted_fc = EFX_FC_RX; + if (efx->mdio.mmds & MDIO_DEVS_AN) + efx->wanted_fc |= EFX_FC_AUTO; + + /* Allocate buffer for stats */ + rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, + FALCON_MAC_STATS_SIZE, GFP_KERNEL); + if (rc) + return rc; + netif_dbg(efx, probe, efx->net_dev, + "stats buffer at %llx (virt %p phys %llx)\n", + (u64)efx->stats_buffer.dma_addr, + efx->stats_buffer.addr, + (u64)virt_to_phys(efx->stats_buffer.addr)); + + return 0; +} + +static void falcon_remove_port(struct efx_nic *efx) +{ + efx->phy_op->remove(efx); + efx_nic_free_buffer(efx, &efx->stats_buffer); +} + +/* Global events are basically PHY events */ +static bool +falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + struct falcon_nic_data *nic_data = efx->nic_data; + + if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) || + EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) || + EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) + /* Ignored */ + return true; + + if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) && + EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) { + nic_data->xmac_poll_required = true; + return true; + } + + if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? + EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : + EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { + netif_err(efx, rx_err, efx->net_dev, + "channel %d seen global RX_RESET event. Resetting.\n", + channel->channel); + + atomic_inc(&efx->rx_reset); + efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? + RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); + return true; + } + + return false; +} + +/************************************************************************** + * + * Falcon test code + * + **************************************************************************/ + +static int +falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + struct falcon_nvconfig *nvconfig; + struct falcon_spi_device *spi; + void *region; + int rc, magic_num, struct_ver; + __le16 *word, *limit; + u32 csum; + + if (falcon_spi_present(&nic_data->spi_flash)) + spi = &nic_data->spi_flash; + else if (falcon_spi_present(&nic_data->spi_eeprom)) + spi = &nic_data->spi_eeprom; + else + return -EINVAL; + + region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL); + if (!region) + return -ENOMEM; + nvconfig = region + FALCON_NVCONFIG_OFFSET; + + mutex_lock(&nic_data->spi_lock); + rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region); + mutex_unlock(&nic_data->spi_lock); + if (rc) { + netif_err(efx, hw, efx->net_dev, "Failed to read %s\n", + falcon_spi_present(&nic_data->spi_flash) ? + "flash" : "EEPROM"); + rc = -EIO; + goto out; + } + + magic_num = le16_to_cpu(nvconfig->board_magic_num); + struct_ver = le16_to_cpu(nvconfig->board_struct_ver); + + rc = -EINVAL; + if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) { + netif_err(efx, hw, efx->net_dev, + "NVRAM bad magic 0x%x\n", magic_num); + goto out; + } + if (struct_ver < 2) { + netif_err(efx, hw, efx->net_dev, + "NVRAM has ancient version 0x%x\n", struct_ver); + goto out; + } else if (struct_ver < 4) { + word = &nvconfig->board_magic_num; + limit = (__le16 *) (nvconfig + 1); + } else { + word = region; + limit = region + FALCON_NVCONFIG_END; + } + for (csum = 0; word < limit; ++word) + csum += le16_to_cpu(*word); + + if (~csum & 0xffff) { + netif_err(efx, hw, efx->net_dev, + "NVRAM has incorrect checksum\n"); + goto out; + } + + rc = 0; + if (nvconfig_out) + memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig)); + + out: + kfree(region); + return rc; +} + +static int falcon_test_nvram(struct efx_nic *efx) +{ + return falcon_read_nvram(efx, NULL); +} + +static const struct efx_farch_register_test falcon_b0_register_tests[] = { + { FR_AZ_ADR_REGION, + EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, + { FR_AZ_RX_CFG, + EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) }, + { FR_AZ_TX_CFG, + EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AZ_TX_RESERVED, + EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, + { FR_AB_MAC_CTRL, + EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AZ_SRM_TX_DC_CFG, + EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AZ_RX_DC_CFG, + EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AZ_RX_DC_PF_WM, + EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, + { FR_BZ_DP_CTRL, + EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AB_GM_CFG2, + EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AB_GMF_CFG0, + EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AB_XM_GLB_CFG, + EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AB_XM_TX_CFG, + EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AB_XM_RX_CFG, + EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AB_XM_RX_PARAM, + EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AB_XM_FC, + EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AB_XM_ADR_LO, + EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AB_XX_SD_CTL, + EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) }, +}; + +static int +falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) +{ + enum reset_type reset_method = RESET_TYPE_INVISIBLE; + int rc, rc2; + + mutex_lock(&efx->mac_lock); + if (efx->loopback_modes) { + /* We need the 312 clock from the PHY to test the XMAC + * registers, so move into XGMII loopback if available */ + if (efx->loopback_modes & (1 << LOOPBACK_XGMII)) + efx->loopback_mode = LOOPBACK_XGMII; + else + efx->loopback_mode = __ffs(efx->loopback_modes); + } + __efx_reconfigure_port(efx); + mutex_unlock(&efx->mac_lock); + + efx_reset_down(efx, reset_method); + + tests->registers = + efx_farch_test_registers(efx, falcon_b0_register_tests, + ARRAY_SIZE(falcon_b0_register_tests)) + ? -1 : 1; + + rc = falcon_reset_hw(efx, reset_method); + rc2 = efx_reset_up(efx, reset_method, rc == 0); + return rc ? rc : rc2; +} + +/************************************************************************** + * + * Device reset + * + ************************************************************************** + */ + +static enum reset_type falcon_map_reset_reason(enum reset_type reason) +{ + switch (reason) { + case RESET_TYPE_RX_RECOVERY: + case RESET_TYPE_DMA_ERROR: + case RESET_TYPE_TX_SKIP: + /* These can occasionally occur due to hardware bugs. + * We try to reset without disrupting the link. + */ + return RESET_TYPE_INVISIBLE; + default: + return RESET_TYPE_ALL; + } +} + +static int falcon_map_reset_flags(u32 *flags) +{ + enum { + FALCON_RESET_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER | + ETH_RESET_OFFLOAD | ETH_RESET_MAC), + FALCON_RESET_ALL = FALCON_RESET_INVISIBLE | ETH_RESET_PHY, + FALCON_RESET_WORLD = FALCON_RESET_ALL | ETH_RESET_IRQ, + }; + + if ((*flags & FALCON_RESET_WORLD) == FALCON_RESET_WORLD) { + *flags &= ~FALCON_RESET_WORLD; + return RESET_TYPE_WORLD; + } + + if ((*flags & FALCON_RESET_ALL) == FALCON_RESET_ALL) { + *flags &= ~FALCON_RESET_ALL; + return RESET_TYPE_ALL; + } + + if ((*flags & FALCON_RESET_INVISIBLE) == FALCON_RESET_INVISIBLE) { + *flags &= ~FALCON_RESET_INVISIBLE; + return RESET_TYPE_INVISIBLE; + } + + return -EINVAL; +} + +/* Resets NIC to known state. This routine must be called in process + * context and is allowed to sleep. */ +static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + efx_oword_t glb_ctl_reg_ker; + int rc; + + netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n", + RESET_TYPE(method)); + + /* Initiate device reset */ + if (method == RESET_TYPE_WORLD) { + rc = pci_save_state(efx->pci_dev); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to backup PCI state of primary " + "function prior to hardware reset\n"); + goto fail1; + } + if (efx_nic_is_dual_func(efx)) { + rc = pci_save_state(nic_data->pci_dev2); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to backup PCI state of " + "secondary function prior to " + "hardware reset\n"); + goto fail2; + } + } + + EFX_POPULATE_OWORD_2(glb_ctl_reg_ker, + FRF_AB_EXT_PHY_RST_DUR, + FFE_AB_EXT_PHY_RST_DUR_10240US, + FRF_AB_SWRST, 1); + } else { + EFX_POPULATE_OWORD_7(glb_ctl_reg_ker, + /* exclude PHY from "invisible" reset */ + FRF_AB_EXT_PHY_RST_CTL, + method == RESET_TYPE_INVISIBLE, + /* exclude EEPROM/flash and PCIe */ + FRF_AB_PCIE_CORE_RST_CTL, 1, + FRF_AB_PCIE_NSTKY_RST_CTL, 1, + FRF_AB_PCIE_SD_RST_CTL, 1, + FRF_AB_EE_RST_CTL, 1, + FRF_AB_EXT_PHY_RST_DUR, + FFE_AB_EXT_PHY_RST_DUR_10240US, + FRF_AB_SWRST, 1); + } + efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); + + netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n"); + schedule_timeout_uninterruptible(HZ / 20); + + /* Restore PCI configuration if needed */ + if (method == RESET_TYPE_WORLD) { + if (efx_nic_is_dual_func(efx)) + pci_restore_state(nic_data->pci_dev2); + pci_restore_state(efx->pci_dev); + netif_dbg(efx, drv, efx->net_dev, + "successfully restored PCI config\n"); + } + + /* Assert that reset complete */ + efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); + if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) { + rc = -ETIMEDOUT; + netif_err(efx, hw, efx->net_dev, + "timed out waiting for hardware reset\n"); + goto fail3; + } + netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n"); + + return 0; + + /* pci_save_state() and pci_restore_state() MUST be called in pairs */ +fail2: + pci_restore_state(efx->pci_dev); +fail1: +fail3: + return rc; +} + +static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + int rc; + + mutex_lock(&nic_data->spi_lock); + rc = __falcon_reset_hw(efx, method); + mutex_unlock(&nic_data->spi_lock); + + return rc; +} + +static void falcon_monitor(struct efx_nic *efx) +{ + bool link_changed; + int rc; + + BUG_ON(!mutex_is_locked(&efx->mac_lock)); + + rc = falcon_board(efx)->type->monitor(efx); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "Board sensor %s; shutting down PHY\n", + (rc == -ERANGE) ? "reported fault" : "failed"); + efx->phy_mode |= PHY_MODE_LOW_POWER; + rc = __efx_reconfigure_port(efx); + WARN_ON(rc); + } + + if (LOOPBACK_INTERNAL(efx)) + link_changed = falcon_loopback_link_poll(efx); + else + link_changed = efx->phy_op->poll(efx); + + if (link_changed) { + falcon_stop_nic_stats(efx); + falcon_deconfigure_mac_wrapper(efx); + + falcon_reset_macs(efx); + rc = falcon_reconfigure_xmac(efx); + BUG_ON(rc); + + falcon_start_nic_stats(efx); + + efx_link_status_changed(efx); + } + + falcon_poll_xmac(efx); +} + +/* Zeroes out the SRAM contents. This routine must be called in + * process context and is allowed to sleep. + */ +static int falcon_reset_sram(struct efx_nic *efx) +{ + efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker; + int count; + + /* Set the SRAM wake/sleep GPIO appropriately. */ + efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); + EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1); + EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1); + efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); + + /* Initiate SRAM reset */ + EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, + FRF_AZ_SRM_INIT_EN, 1, + FRF_AZ_SRM_NB_SZ, 0); + efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); + + /* Wait for SRAM reset to complete */ + count = 0; + do { + netif_dbg(efx, hw, efx->net_dev, + "waiting for SRAM reset (attempt %d)...\n", count); + + /* SRAM reset is slow; expect around 16ms */ + schedule_timeout_uninterruptible(HZ / 50); + + /* Check for reset complete */ + efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); + if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) { + netif_dbg(efx, hw, efx->net_dev, + "SRAM reset complete\n"); + + return 0; + } + } while (++count < 20); /* wait up to 0.4 sec */ + + netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n"); + return -ETIMEDOUT; +} + +static void falcon_spi_device_init(struct efx_nic *efx, + struct falcon_spi_device *spi_device, + unsigned int device_id, u32 device_type) +{ + if (device_type != 0) { + spi_device->device_id = device_id; + spi_device->size = + 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE); + spi_device->addr_len = + SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN); + spi_device->munge_address = (spi_device->size == 1 << 9 && + spi_device->addr_len == 1); + spi_device->erase_command = + SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD); + spi_device->erase_size = + 1 << SPI_DEV_TYPE_FIELD(device_type, + SPI_DEV_TYPE_ERASE_SIZE); + spi_device->block_size = + 1 << SPI_DEV_TYPE_FIELD(device_type, + SPI_DEV_TYPE_BLOCK_SIZE); + } else { + spi_device->size = 0; + } +} + +/* Extract non-volatile configuration */ +static int falcon_probe_nvconfig(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + struct falcon_nvconfig *nvconfig; + int rc; + + nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL); + if (!nvconfig) + return -ENOMEM; + + rc = falcon_read_nvram(efx, nvconfig); + if (rc) + goto out; + + efx->phy_type = nvconfig->board_v2.port0_phy_type; + efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr; + + if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) { + falcon_spi_device_init( + efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH, + le32_to_cpu(nvconfig->board_v3 + .spi_device_type[FFE_AB_SPI_DEVICE_FLASH])); + falcon_spi_device_init( + efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM, + le32_to_cpu(nvconfig->board_v3 + .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM])); + } + + /* Read the MAC addresses */ + ether_addr_copy(efx->net_dev->perm_addr, nvconfig->mac_address[0]); + + netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n", + efx->phy_type, efx->mdio.prtad); + + rc = falcon_probe_board(efx, + le16_to_cpu(nvconfig->board_v2.board_revision)); +out: + kfree(nvconfig); + return rc; +} + +static int falcon_dimension_resources(struct efx_nic *efx) +{ + efx->rx_dc_base = 0x20000; + efx->tx_dc_base = 0x26000; + return 0; +} + +/* Probe all SPI devices on the NIC */ +static void falcon_probe_spi_devices(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; + int boot_dev; + + efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL); + efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); + efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); + + if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) { + boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ? + FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM); + netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n", + boot_dev == FFE_AB_SPI_DEVICE_FLASH ? + "flash" : "EEPROM"); + } else { + /* Disable VPD and set clock dividers to safe + * values for initial programming. */ + boot_dev = -1; + netif_dbg(efx, probe, efx->net_dev, + "Booted from internal ASIC settings;" + " setting SPI config\n"); + EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0, + /* 125 MHz / 7 ~= 20 MHz */ + FRF_AB_EE_SF_CLOCK_DIV, 7, + /* 125 MHz / 63 ~= 2 MHz */ + FRF_AB_EE_EE_CLOCK_DIV, 63); + efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); + } + + mutex_init(&nic_data->spi_lock); + + if (boot_dev == FFE_AB_SPI_DEVICE_FLASH) + falcon_spi_device_init(efx, &nic_data->spi_flash, + FFE_AB_SPI_DEVICE_FLASH, + default_flash_type); + if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM) + falcon_spi_device_init(efx, &nic_data->spi_eeprom, + FFE_AB_SPI_DEVICE_EEPROM, + large_eeprom_type); +} + +static unsigned int falcon_a1_mem_map_size(struct efx_nic *efx) +{ + return 0x20000; +} + +static unsigned int falcon_b0_mem_map_size(struct efx_nic *efx) +{ + /* Map everything up to and including the RSS indirection table. + * The PCI core takes care of mapping the MSI-X tables. + */ + return FR_BZ_RX_INDIRECTION_TBL + + FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS; +} + +static int falcon_probe_nic(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data; + struct falcon_board *board; + int rc; + + efx->primary = efx; /* only one usable function per controller */ + + /* Allocate storage for hardware specific data */ + nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); + if (!nic_data) + return -ENOMEM; + efx->nic_data = nic_data; + + rc = -ENODEV; + + if (efx_farch_fpga_ver(efx) != 0) { + netif_err(efx, probe, efx->net_dev, + "Falcon FPGA not supported\n"); + goto fail1; + } + + if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { + efx_oword_t nic_stat; + struct pci_dev *dev; + u8 pci_rev = efx->pci_dev->revision; + + if ((pci_rev == 0xff) || (pci_rev == 0)) { + netif_err(efx, probe, efx->net_dev, + "Falcon rev A0 not supported\n"); + goto fail1; + } + efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); + if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) { + netif_err(efx, probe, efx->net_dev, + "Falcon rev A1 1G not supported\n"); + goto fail1; + } + if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) { + netif_err(efx, probe, efx->net_dev, + "Falcon rev A1 PCI-X not supported\n"); + goto fail1; + } + + dev = pci_dev_get(efx->pci_dev); + while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE, + PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, + dev))) { + if (dev->bus == efx->pci_dev->bus && + dev->devfn == efx->pci_dev->devfn + 1) { + nic_data->pci_dev2 = dev; + break; + } + } + if (!nic_data->pci_dev2) { + netif_err(efx, probe, efx->net_dev, + "failed to find secondary function\n"); + rc = -ENODEV; + goto fail2; + } + } + + /* Now we can reset the NIC */ + rc = __falcon_reset_hw(efx, RESET_TYPE_ALL); + if (rc) { + netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); + goto fail3; + } + + /* Allocate memory for INT_KER */ + rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t), + GFP_KERNEL); + if (rc) + goto fail4; + BUG_ON(efx->irq_status.dma_addr & 0x0f); + + netif_dbg(efx, probe, efx->net_dev, + "INT_KER at %llx (virt %p phys %llx)\n", + (u64)efx->irq_status.dma_addr, + efx->irq_status.addr, + (u64)virt_to_phys(efx->irq_status.addr)); + + falcon_probe_spi_devices(efx); + + /* Read in the non-volatile configuration */ + rc = falcon_probe_nvconfig(efx); + if (rc) { + if (rc == -EINVAL) + netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n"); + goto fail5; + } + + efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 : + EFX_MAX_CHANNELS); + efx->timer_quantum_ns = 4968; /* 621 cycles */ + + /* Initialise I2C adapter */ + board = falcon_board(efx); + board->i2c_adap.owner = THIS_MODULE; + board->i2c_data = falcon_i2c_bit_operations; + board->i2c_data.data = efx; + board->i2c_adap.algo_data = &board->i2c_data; + board->i2c_adap.dev.parent = &efx->pci_dev->dev; + strlcpy(board->i2c_adap.name, "SFC4000 GPIO", + sizeof(board->i2c_adap.name)); + rc = i2c_bit_add_bus(&board->i2c_adap); + if (rc) + goto fail5; + + rc = falcon_board(efx)->type->init(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "failed to initialise board\n"); + goto fail6; + } + + nic_data->stats_disable_count = 1; + setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func, + (unsigned long)efx); + + return 0; + + fail6: + i2c_del_adapter(&board->i2c_adap); + memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); + fail5: + efx_nic_free_buffer(efx, &efx->irq_status); + fail4: + fail3: + if (nic_data->pci_dev2) { + pci_dev_put(nic_data->pci_dev2); + nic_data->pci_dev2 = NULL; + } + fail2: + fail1: + kfree(efx->nic_data); + return rc; +} + +static void falcon_init_rx_cfg(struct efx_nic *efx) +{ + /* RX control FIFO thresholds (32 entries) */ + const unsigned ctrl_xon_thr = 20; + const unsigned ctrl_xoff_thr = 25; + efx_oword_t reg; + + efx_reado(efx, ®, FR_AZ_RX_CFG); + if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { + /* Data FIFO size is 5.5K. The RX DMA engine only + * supports scattering for user-mode queues, but will + * split DMA writes at intervals of RX_USR_BUF_SIZE + * (32-byte units) even for kernel-mode queues. We + * set it to be so large that that never happens. + */ + EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0); + EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE, + (3 * 4096) >> 5); + EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8); + EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8); + EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr); + EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr); + } else { + /* Data FIFO size is 80K; register fields moved */ + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE, + EFX_RX_USR_BUF_SIZE >> 5); + /* Send XON and XOFF at ~3 * max MTU away from empty/full */ + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); + + /* Enable hash insertion. This is broken for the + * 'Falcon' hash so also select Toeplitz TCP/IPv4 and + * IPv4 hashes. */ + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1); + } + /* Always enable XOFF signal from RX FIFO. We enable + * or disable transmission of pause frames at the MAC. */ + EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1); + efx_writeo(efx, ®, FR_AZ_RX_CFG); +} + +/* This call performs hardware-specific global initialisation, such as + * defining the descriptor cache sizes and number of RSS channels. + * It does not set up any buffers, descriptor rings or event queues. + */ +static int falcon_init_nic(struct efx_nic *efx) +{ + efx_oword_t temp; + int rc; + + /* Use on-chip SRAM */ + efx_reado(efx, &temp, FR_AB_NIC_STAT); + EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1); + efx_writeo(efx, &temp, FR_AB_NIC_STAT); + + rc = falcon_reset_sram(efx); + if (rc) + return rc; + + /* Clear the parity enables on the TX data fifos as + * they produce false parity errors because of timing issues + */ + if (EFX_WORKAROUND_5129(efx)) { + efx_reado(efx, &temp, FR_AZ_CSR_SPARE); + EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0); + efx_writeo(efx, &temp, FR_AZ_CSR_SPARE); + } + + if (EFX_WORKAROUND_7244(efx)) { + efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8); + efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL); + } + + /* XXX This is documented only for Falcon A0/A1 */ + /* Setup RX. Wait for descriptor is broken and must + * be disabled. RXDP recovery shouldn't be needed, but is. + */ + efx_reado(efx, &temp, FR_AA_RX_SELF_RST); + EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1); + EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1); + if (EFX_WORKAROUND_5583(efx)) + EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1); + efx_writeo(efx, &temp, FR_AA_RX_SELF_RST); + + /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 + * descriptors (which is bad). + */ + efx_reado(efx, &temp, FR_AZ_TX_CFG); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); + efx_writeo(efx, &temp, FR_AZ_TX_CFG); + + falcon_init_rx_cfg(efx); + + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + falcon_b0_rx_push_rss_config(efx); + + /* Set destination of both TX and RX Flush events */ + EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); + efx_writeo(efx, &temp, FR_BZ_DP_CTRL); + } + + efx_farch_init_common(efx); + + return 0; +} + +static void falcon_remove_nic(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + struct falcon_board *board = falcon_board(efx); + + board->type->fini(efx); + + /* Remove I2C adapter and clear it in preparation for a retry */ + i2c_del_adapter(&board->i2c_adap); + memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); + + efx_nic_free_buffer(efx, &efx->irq_status); + + __falcon_reset_hw(efx, RESET_TYPE_ALL); + + /* Release the second function after the reset */ + if (nic_data->pci_dev2) { + pci_dev_put(nic_data->pci_dev2); + nic_data->pci_dev2 = NULL; + } + + /* Tear down the private nic state */ + kfree(efx->nic_data); + efx->nic_data = NULL; +} + +static size_t falcon_describe_nic_stats(struct efx_nic *efx, u8 *names) +{ + return efx_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT, + falcon_stat_mask, names); +} + +static size_t falcon_update_nic_stats(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + u64 *stats = nic_data->stats; + efx_oword_t cnt; + + if (!nic_data->stats_disable_count) { + efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP); + stats[FALCON_STAT_rx_nodesc_drop_cnt] += + EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT); + + if (nic_data->stats_pending && + FALCON_XMAC_STATS_DMA_FLAG(efx)) { + nic_data->stats_pending = false; + rmb(); /* read the done flag before the stats */ + efx_nic_update_stats( + falcon_stat_desc, FALCON_STAT_COUNT, + falcon_stat_mask, + stats, efx->stats_buffer.addr, true); + } + + /* Update derived statistic */ + efx_update_diff_stat(&stats[FALCON_STAT_rx_bad_bytes], + stats[FALCON_STAT_rx_bytes] - + stats[FALCON_STAT_rx_good_bytes] - + stats[FALCON_STAT_rx_control] * 64); + efx_update_sw_stats(efx, stats); + } + + if (full_stats) + memcpy(full_stats, stats, sizeof(u64) * FALCON_STAT_COUNT); + + if (core_stats) { + core_stats->rx_packets = stats[FALCON_STAT_rx_packets]; + core_stats->tx_packets = stats[FALCON_STAT_tx_packets]; + core_stats->rx_bytes = stats[FALCON_STAT_rx_bytes]; + core_stats->tx_bytes = stats[FALCON_STAT_tx_bytes]; + core_stats->rx_dropped = stats[FALCON_STAT_rx_nodesc_drop_cnt] + + stats[GENERIC_STAT_rx_nodesc_trunc] + + stats[GENERIC_STAT_rx_noskb_drops]; + core_stats->multicast = stats[FALCON_STAT_rx_multicast]; + core_stats->rx_length_errors = + stats[FALCON_STAT_rx_gtjumbo] + + stats[FALCON_STAT_rx_length_error]; + core_stats->rx_crc_errors = stats[FALCON_STAT_rx_bad]; + core_stats->rx_frame_errors = stats[FALCON_STAT_rx_align_error]; + core_stats->rx_fifo_errors = stats[FALCON_STAT_rx_overflow]; + + core_stats->rx_errors = (core_stats->rx_length_errors + + core_stats->rx_crc_errors + + core_stats->rx_frame_errors + + stats[FALCON_STAT_rx_symbol_error]); + } + + return FALCON_STAT_COUNT; +} + +void falcon_start_nic_stats(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + + spin_lock_bh(&efx->stats_lock); + if (--nic_data->stats_disable_count == 0) + falcon_stats_request(efx); + spin_unlock_bh(&efx->stats_lock); +} + +/* We don't acutally pull stats on falcon. Wait 10ms so that + * they arrive when we call this just after start_stats + */ +static void falcon_pull_nic_stats(struct efx_nic *efx) +{ + msleep(10); +} + +void falcon_stop_nic_stats(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + int i; + + might_sleep(); + + spin_lock_bh(&efx->stats_lock); + ++nic_data->stats_disable_count; + spin_unlock_bh(&efx->stats_lock); + + del_timer_sync(&nic_data->stats_timer); + + /* Wait enough time for the most recent transfer to + * complete. */ + for (i = 0; i < 4 && nic_data->stats_pending; i++) { + if (FALCON_XMAC_STATS_DMA_FLAG(efx)) + break; + msleep(1); + } + + spin_lock_bh(&efx->stats_lock); + falcon_stats_complete(efx); + spin_unlock_bh(&efx->stats_lock); +} + +static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) +{ + falcon_board(efx)->type->set_id_led(efx, mode); +} + +/************************************************************************** + * + * Wake on LAN + * + ************************************************************************** + */ + +static void falcon_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int falcon_set_wol(struct efx_nic *efx, u32 type) +{ + if (type != 0) + return -EINVAL; + return 0; +} + +/************************************************************************** + * + * Revision-dependent attributes used by efx.c and nic.c + * + ************************************************************************** + */ + +const struct efx_nic_type falcon_a1_nic_type = { + .mem_map_size = falcon_a1_mem_map_size, + .probe = falcon_probe_nic, + .remove = falcon_remove_nic, + .init = falcon_init_nic, + .dimension_resources = falcon_dimension_resources, + .fini = falcon_irq_ack_a1, + .monitor = falcon_monitor, + .map_reset_reason = falcon_map_reset_reason, + .map_reset_flags = falcon_map_reset_flags, + .reset = falcon_reset_hw, + .probe_port = falcon_probe_port, + .remove_port = falcon_remove_port, + .handle_global_event = falcon_handle_global_event, + .fini_dmaq = efx_farch_fini_dmaq, + .prepare_flush = falcon_prepare_flush, + .finish_flush = efx_port_dummy_op_void, + .prepare_flr = efx_port_dummy_op_void, + .finish_flr = efx_farch_finish_flr, + .describe_stats = falcon_describe_nic_stats, + .update_stats = falcon_update_nic_stats, + .start_stats = falcon_start_nic_stats, + .pull_stats = falcon_pull_nic_stats, + .stop_stats = falcon_stop_nic_stats, + .set_id_led = falcon_set_id_led, + .push_irq_moderation = falcon_push_irq_moderation, + .reconfigure_port = falcon_reconfigure_port, + .prepare_enable_fc_tx = falcon_a1_prepare_enable_fc_tx, + .reconfigure_mac = falcon_reconfigure_xmac, + .check_mac_fault = falcon_xmac_check_fault, + .get_wol = falcon_get_wol, + .set_wol = falcon_set_wol, + .resume_wol = efx_port_dummy_op_void, + .test_nvram = falcon_test_nvram, + .irq_enable_master = efx_farch_irq_enable_master, + .irq_test_generate = efx_farch_irq_test_generate, + .irq_disable_non_ev = efx_farch_irq_disable_master, + .irq_handle_msi = efx_farch_msi_interrupt, + .irq_handle_legacy = falcon_legacy_interrupt_a1, + .tx_probe = efx_farch_tx_probe, + .tx_init = efx_farch_tx_init, + .tx_remove = efx_farch_tx_remove, + .tx_write = efx_farch_tx_write, + .rx_push_rss_config = efx_port_dummy_op_void, + .rx_probe = efx_farch_rx_probe, + .rx_init = efx_farch_rx_init, + .rx_remove = efx_farch_rx_remove, + .rx_write = efx_farch_rx_write, + .rx_defer_refill = efx_farch_rx_defer_refill, + .ev_probe = efx_farch_ev_probe, + .ev_init = efx_farch_ev_init, + .ev_fini = efx_farch_ev_fini, + .ev_remove = efx_farch_ev_remove, + .ev_process = efx_farch_ev_process, + .ev_read_ack = efx_farch_ev_read_ack, + .ev_test_generate = efx_farch_ev_test_generate, + + /* We don't expose the filter table on Falcon A1 as it is not + * mapped into function 0, but these implementations still + * work with a degenerate case of all tables set to size 0. + */ + .filter_table_probe = efx_farch_filter_table_probe, + .filter_table_restore = efx_farch_filter_table_restore, + .filter_table_remove = efx_farch_filter_table_remove, + .filter_insert = efx_farch_filter_insert, + .filter_remove_safe = efx_farch_filter_remove_safe, + .filter_get_safe = efx_farch_filter_get_safe, + .filter_clear_rx = efx_farch_filter_clear_rx, + .filter_count_rx_used = efx_farch_filter_count_rx_used, + .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit, + .filter_get_rx_ids = efx_farch_filter_get_rx_ids, + +#ifdef CONFIG_SFC_MTD + .mtd_probe = falcon_mtd_probe, + .mtd_rename = falcon_mtd_rename, + .mtd_read = falcon_mtd_read, + .mtd_erase = falcon_mtd_erase, + .mtd_write = falcon_mtd_write, + .mtd_sync = falcon_mtd_sync, +#endif + .sriov_init = efx_falcon_sriov_init, + .sriov_fini = efx_falcon_sriov_fini, + .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed, + .sriov_wanted = efx_falcon_sriov_wanted, + .sriov_reset = efx_falcon_sriov_reset, + + .revision = EFX_REV_FALCON_A1, + .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER, + .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER, + .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER, + .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER, + .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER, + .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), + .rx_buffer_padding = 0x24, + .can_rx_scatter = false, + .max_interrupt_mode = EFX_INT_MODE_MSI, + .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, + .offload_features = NETIF_F_IP_CSUM, + .mcdi_max_ver = -1, +}; + +const struct efx_nic_type falcon_b0_nic_type = { + .mem_map_size = falcon_b0_mem_map_size, + .probe = falcon_probe_nic, + .remove = falcon_remove_nic, + .init = falcon_init_nic, + .dimension_resources = falcon_dimension_resources, + .fini = efx_port_dummy_op_void, + .monitor = falcon_monitor, + .map_reset_reason = falcon_map_reset_reason, + .map_reset_flags = falcon_map_reset_flags, + .reset = falcon_reset_hw, + .probe_port = falcon_probe_port, + .remove_port = falcon_remove_port, + .handle_global_event = falcon_handle_global_event, + .fini_dmaq = efx_farch_fini_dmaq, + .prepare_flush = falcon_prepare_flush, + .finish_flush = efx_port_dummy_op_void, + .prepare_flr = efx_port_dummy_op_void, + .finish_flr = efx_farch_finish_flr, + .describe_stats = falcon_describe_nic_stats, + .update_stats = falcon_update_nic_stats, + .start_stats = falcon_start_nic_stats, + .pull_stats = falcon_pull_nic_stats, + .stop_stats = falcon_stop_nic_stats, + .set_id_led = falcon_set_id_led, + .push_irq_moderation = falcon_push_irq_moderation, + .reconfigure_port = falcon_reconfigure_port, + .prepare_enable_fc_tx = falcon_b0_prepare_enable_fc_tx, + .reconfigure_mac = falcon_reconfigure_xmac, + .check_mac_fault = falcon_xmac_check_fault, + .get_wol = falcon_get_wol, + .set_wol = falcon_set_wol, + .resume_wol = efx_port_dummy_op_void, + .test_chip = falcon_b0_test_chip, + .test_nvram = falcon_test_nvram, + .irq_enable_master = efx_farch_irq_enable_master, + .irq_test_generate = efx_farch_irq_test_generate, + .irq_disable_non_ev = efx_farch_irq_disable_master, + .irq_handle_msi = efx_farch_msi_interrupt, + .irq_handle_legacy = efx_farch_legacy_interrupt, + .tx_probe = efx_farch_tx_probe, + .tx_init = efx_farch_tx_init, + .tx_remove = efx_farch_tx_remove, + .tx_write = efx_farch_tx_write, + .rx_push_rss_config = falcon_b0_rx_push_rss_config, + .rx_probe = efx_farch_rx_probe, + .rx_init = efx_farch_rx_init, + .rx_remove = efx_farch_rx_remove, + .rx_write = efx_farch_rx_write, + .rx_defer_refill = efx_farch_rx_defer_refill, + .ev_probe = efx_farch_ev_probe, + .ev_init = efx_farch_ev_init, + .ev_fini = efx_farch_ev_fini, + .ev_remove = efx_farch_ev_remove, + .ev_process = efx_farch_ev_process, + .ev_read_ack = efx_farch_ev_read_ack, + .ev_test_generate = efx_farch_ev_test_generate, + .filter_table_probe = efx_farch_filter_table_probe, + .filter_table_restore = efx_farch_filter_table_restore, + .filter_table_remove = efx_farch_filter_table_remove, + .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter, + .filter_insert = efx_farch_filter_insert, + .filter_remove_safe = efx_farch_filter_remove_safe, + .filter_get_safe = efx_farch_filter_get_safe, + .filter_clear_rx = efx_farch_filter_clear_rx, + .filter_count_rx_used = efx_farch_filter_count_rx_used, + .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit, + .filter_get_rx_ids = efx_farch_filter_get_rx_ids, +#ifdef CONFIG_RFS_ACCEL + .filter_rfs_insert = efx_farch_filter_rfs_insert, + .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one, +#endif +#ifdef CONFIG_SFC_MTD + .mtd_probe = falcon_mtd_probe, + .mtd_rename = falcon_mtd_rename, + .mtd_read = falcon_mtd_read, + .mtd_erase = falcon_mtd_erase, + .mtd_write = falcon_mtd_write, + .mtd_sync = falcon_mtd_sync, +#endif + .sriov_init = efx_falcon_sriov_init, + .sriov_fini = efx_falcon_sriov_fini, + .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed, + .sriov_wanted = efx_falcon_sriov_wanted, + .sriov_reset = efx_falcon_sriov_reset, + + .revision = EFX_REV_FALCON_B0, + .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, + .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, + .buf_tbl_base = FR_BZ_BUF_FULL_TBL, + .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, + .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, + .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), + .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE, + .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST, + .rx_buffer_padding = 0, + .can_rx_scatter = true, + .max_interrupt_mode = EFX_INT_MODE_MSIX, + .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, + .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, + .mcdi_max_ver = -1, + .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS, +}; diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c new file mode 100644 index 000000000..1736f4b80 --- /dev/null +++ b/drivers/net/ethernet/sfc/falcon_boards.c @@ -0,0 +1,764 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2007-2012 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include + +#include "net_driver.h" +#include "phy.h" +#include "efx.h" +#include "nic.h" +#include "workarounds.h" + +/* Macros for unpacking the board revision */ +/* The revision info is in host byte order. */ +#define FALCON_BOARD_TYPE(_rev) (_rev >> 8) +#define FALCON_BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf) +#define FALCON_BOARD_MINOR(_rev) (_rev & 0xf) + +/* Board types */ +#define FALCON_BOARD_SFE4001 0x01 +#define FALCON_BOARD_SFE4002 0x02 +#define FALCON_BOARD_SFE4003 0x03 +#define FALCON_BOARD_SFN4112F 0x52 + +/* Board temperature is about 15°C above ambient when air flow is + * limited. The maximum acceptable ambient temperature varies + * depending on the PHY specifications but the critical temperature + * above which we should shut down to avoid damage is 80°C. */ +#define FALCON_BOARD_TEMP_BIAS 15 +#define FALCON_BOARD_TEMP_CRIT (80 + FALCON_BOARD_TEMP_BIAS) + +/* SFC4000 datasheet says: 'The maximum permitted junction temperature + * is 125°C; the thermal design of the environment for the SFC4000 + * should aim to keep this well below 100°C.' */ +#define FALCON_JUNC_TEMP_MIN 0 +#define FALCON_JUNC_TEMP_MAX 90 +#define FALCON_JUNC_TEMP_CRIT 125 + +/***************************************************************************** + * Support for LM87 sensor chip used on several boards + */ +#define LM87_REG_TEMP_HW_INT_LOCK 0x13 +#define LM87_REG_TEMP_HW_EXT_LOCK 0x14 +#define LM87_REG_TEMP_HW_INT 0x17 +#define LM87_REG_TEMP_HW_EXT 0x18 +#define LM87_REG_TEMP_EXT1 0x26 +#define LM87_REG_TEMP_INT 0x27 +#define LM87_REG_ALARMS1 0x41 +#define LM87_REG_ALARMS2 0x42 +#define LM87_IN_LIMITS(nr, _min, _max) \ + 0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min +#define LM87_AIN_LIMITS(nr, _min, _max) \ + 0x3B + (nr), _max, 0x1A + (nr), _min +#define LM87_TEMP_INT_LIMITS(_min, _max) \ + 0x39, _max, 0x3A, _min +#define LM87_TEMP_EXT1_LIMITS(_min, _max) \ + 0x37, _max, 0x38, _min + +#define LM87_ALARM_TEMP_INT 0x10 +#define LM87_ALARM_TEMP_EXT1 0x20 + +#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE) + +static int efx_poke_lm87(struct i2c_client *client, const u8 *reg_values) +{ + while (*reg_values) { + u8 reg = *reg_values++; + u8 value = *reg_values++; + int rc = i2c_smbus_write_byte_data(client, reg, value); + if (rc) + return rc; + } + return 0; +} + +static const u8 falcon_lm87_common_regs[] = { + LM87_REG_TEMP_HW_INT_LOCK, FALCON_BOARD_TEMP_CRIT, + LM87_REG_TEMP_HW_INT, FALCON_BOARD_TEMP_CRIT, + LM87_TEMP_EXT1_LIMITS(FALCON_JUNC_TEMP_MIN, FALCON_JUNC_TEMP_MAX), + LM87_REG_TEMP_HW_EXT_LOCK, FALCON_JUNC_TEMP_CRIT, + LM87_REG_TEMP_HW_EXT, FALCON_JUNC_TEMP_CRIT, + 0 +}; + +static int efx_init_lm87(struct efx_nic *efx, const struct i2c_board_info *info, + const u8 *reg_values) +{ + struct falcon_board *board = falcon_board(efx); + struct i2c_client *client = i2c_new_device(&board->i2c_adap, info); + int rc; + + if (!client) + return -EIO; + + /* Read-to-clear alarm/interrupt status */ + i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1); + i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2); + + rc = efx_poke_lm87(client, reg_values); + if (rc) + goto err; + rc = efx_poke_lm87(client, falcon_lm87_common_regs); + if (rc) + goto err; + + board->hwmon_client = client; + return 0; + +err: + i2c_unregister_device(client); + return rc; +} + +static void efx_fini_lm87(struct efx_nic *efx) +{ + i2c_unregister_device(falcon_board(efx)->hwmon_client); +} + +static int efx_check_lm87(struct efx_nic *efx, unsigned mask) +{ + struct i2c_client *client = falcon_board(efx)->hwmon_client; + bool temp_crit, elec_fault, is_failure; + u16 alarms; + s32 reg; + + /* If link is up then do not monitor temperature */ + if (EFX_WORKAROUND_7884(efx) && efx->link_state.up) + return 0; + + reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1); + if (reg < 0) + return reg; + alarms = reg; + reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2); + if (reg < 0) + return reg; + alarms |= reg << 8; + alarms &= mask; + + temp_crit = false; + if (alarms & LM87_ALARM_TEMP_INT) { + reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_INT); + if (reg < 0) + return reg; + if (reg > FALCON_BOARD_TEMP_CRIT) + temp_crit = true; + } + if (alarms & LM87_ALARM_TEMP_EXT1) { + reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_EXT1); + if (reg < 0) + return reg; + if (reg > FALCON_JUNC_TEMP_CRIT) + temp_crit = true; + } + elec_fault = alarms & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1); + is_failure = temp_crit || elec_fault; + + if (alarms) + netif_err(efx, hw, efx->net_dev, + "LM87 detected a hardware %s (status %02x:%02x)" + "%s%s%s%s\n", + is_failure ? "failure" : "problem", + alarms & 0xff, alarms >> 8, + (alarms & LM87_ALARM_TEMP_INT) ? + "; board is overheating" : "", + (alarms & LM87_ALARM_TEMP_EXT1) ? + "; controller is overheating" : "", + temp_crit ? "; reached critical temperature" : "", + elec_fault ? "; electrical fault" : ""); + + return is_failure ? -ERANGE : 0; +} + +#else /* !CONFIG_SENSORS_LM87 */ + +static inline int +efx_init_lm87(struct efx_nic *efx, const struct i2c_board_info *info, + const u8 *reg_values) +{ + return 0; +} +static inline void efx_fini_lm87(struct efx_nic *efx) +{ +} +static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask) +{ + return 0; +} + +#endif /* CONFIG_SENSORS_LM87 */ + +/***************************************************************************** + * Support for the SFE4001 NIC. + * + * The SFE4001 does not power-up fully at reset due to its high power + * consumption. We control its power via a PCA9539 I/O expander. + * It also has a MAX6647 temperature monitor which we expose to + * the lm90 driver. + * + * This also provides minimal support for reflashing the PHY, which is + * initiated by resetting it with the FLASH_CFG_1 pin pulled down. + * On SFE4001 rev A2 and later this is connected to the 3V3X output of + * the IO-expander. + * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually + * exclusive with the network device being open. + */ + +/************************************************************************** + * Support for I2C IO Expander device on SFE4001 + */ +#define PCA9539 0x74 + +#define P0_IN 0x00 +#define P0_OUT 0x02 +#define P0_INVERT 0x04 +#define P0_CONFIG 0x06 + +#define P0_EN_1V0X_LBN 0 +#define P0_EN_1V0X_WIDTH 1 +#define P0_EN_1V2_LBN 1 +#define P0_EN_1V2_WIDTH 1 +#define P0_EN_2V5_LBN 2 +#define P0_EN_2V5_WIDTH 1 +#define P0_EN_3V3X_LBN 3 +#define P0_EN_3V3X_WIDTH 1 +#define P0_EN_5V_LBN 4 +#define P0_EN_5V_WIDTH 1 +#define P0_SHORTEN_JTAG_LBN 5 +#define P0_SHORTEN_JTAG_WIDTH 1 +#define P0_X_TRST_LBN 6 +#define P0_X_TRST_WIDTH 1 +#define P0_DSP_RESET_LBN 7 +#define P0_DSP_RESET_WIDTH 1 + +#define P1_IN 0x01 +#define P1_OUT 0x03 +#define P1_INVERT 0x05 +#define P1_CONFIG 0x07 + +#define P1_AFE_PWD_LBN 0 +#define P1_AFE_PWD_WIDTH 1 +#define P1_DSP_PWD25_LBN 1 +#define P1_DSP_PWD25_WIDTH 1 +#define P1_RESERVED_LBN 2 +#define P1_RESERVED_WIDTH 2 +#define P1_SPARE_LBN 4 +#define P1_SPARE_WIDTH 4 + +/* Temperature Sensor */ +#define MAX664X_REG_RSL 0x02 +#define MAX664X_REG_WLHO 0x0B + +static void sfe4001_poweroff(struct efx_nic *efx) +{ + struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client; + struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client; + + /* Turn off all power rails and disable outputs */ + i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff); + i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff); + i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff); + + /* Clear any over-temperature alert */ + i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL); +} + +static int sfe4001_poweron(struct efx_nic *efx) +{ + struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client; + struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client; + unsigned int i, j; + int rc; + u8 out; + + /* Clear any previous over-temperature alert */ + rc = i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL); + if (rc < 0) + return rc; + + /* Enable port 0 and port 1 outputs on IO expander */ + rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00); + if (rc) + return rc; + rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, + 0xff & ~(1 << P1_SPARE_LBN)); + if (rc) + goto fail_on; + + /* If PHY power is on, turn it all off and wait 1 second to + * ensure a full reset. + */ + rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT); + if (rc < 0) + goto fail_on; + out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) | + (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) | + (0 << P0_EN_1V0X_LBN)); + if (rc != out) { + netif_info(efx, hw, efx->net_dev, "power-cycling PHY\n"); + rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); + if (rc) + goto fail_on; + schedule_timeout_uninterruptible(HZ); + } + + for (i = 0; i < 20; ++i) { + /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */ + out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) | + (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) | + (1 << P0_X_TRST_LBN)); + if (efx->phy_mode & PHY_MODE_SPECIAL) + out |= 1 << P0_EN_3V3X_LBN; + + rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); + if (rc) + goto fail_on; + msleep(10); + + /* Turn on 1V power rail */ + out &= ~(1 << P0_EN_1V0X_LBN); + rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); + if (rc) + goto fail_on; + + netif_info(efx, hw, efx->net_dev, + "waiting for DSP boot (attempt %d)...\n", i); + + /* In flash config mode, DSP does not turn on AFE, so + * just wait 1 second. + */ + if (efx->phy_mode & PHY_MODE_SPECIAL) { + schedule_timeout_uninterruptible(HZ); + return 0; + } + + for (j = 0; j < 10; ++j) { + msleep(100); + + /* Check DSP has asserted AFE power line */ + rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN); + if (rc < 0) + goto fail_on; + if (rc & (1 << P1_AFE_PWD_LBN)) + return 0; + } + } + + netif_info(efx, hw, efx->net_dev, "timed out waiting for DSP boot\n"); + rc = -ETIMEDOUT; +fail_on: + sfe4001_poweroff(efx); + return rc; +} + +static ssize_t show_phy_flash_cfg(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL)); +} + +static ssize_t set_phy_flash_cfg(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + enum efx_phy_mode old_mode, new_mode; + int err; + + rtnl_lock(); + old_mode = efx->phy_mode; + if (count == 0 || *buf == '0') + new_mode = old_mode & ~PHY_MODE_SPECIAL; + else + new_mode = PHY_MODE_SPECIAL; + if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) { + err = 0; + } else if (efx->state != STATE_READY || netif_running(efx->net_dev)) { + err = -EBUSY; + } else { + /* Reset the PHY, reconfigure the MAC and enable/disable + * MAC stats accordingly. */ + efx->phy_mode = new_mode; + if (new_mode & PHY_MODE_SPECIAL) + falcon_stop_nic_stats(efx); + err = sfe4001_poweron(efx); + if (!err) + err = efx_reconfigure_port(efx); + if (!(new_mode & PHY_MODE_SPECIAL)) + falcon_start_nic_stats(efx); + } + rtnl_unlock(); + + return err ? err : count; +} + +static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg); + +static void sfe4001_fini(struct efx_nic *efx) +{ + struct falcon_board *board = falcon_board(efx); + + netif_info(efx, drv, efx->net_dev, "%s\n", __func__); + + device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); + sfe4001_poweroff(efx); + i2c_unregister_device(board->ioexp_client); + i2c_unregister_device(board->hwmon_client); +} + +static int sfe4001_check_hw(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + s32 status; + + /* If XAUI link is up then do not monitor */ + if (EFX_WORKAROUND_7884(efx) && !nic_data->xmac_poll_required) + return 0; + + /* Check the powered status of the PHY. Lack of power implies that + * the MAX6647 has shut down power to it, probably due to a temp. + * alarm. Reading the power status rather than the MAX6647 status + * directly because the later is read-to-clear and would thus + * start to power up the PHY again when polled, causing us to blip + * the power undesirably. + * We know we can read from the IO expander because we did + * it during power-on. Assume failure now is bad news. */ + status = i2c_smbus_read_byte_data(falcon_board(efx)->ioexp_client, P1_IN); + if (status >= 0 && + (status & ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN))) != 0) + return 0; + + /* Use board power control, not PHY power control */ + sfe4001_poweroff(efx); + efx->phy_mode = PHY_MODE_OFF; + + return (status < 0) ? -EIO : -ERANGE; +} + +static const struct i2c_board_info sfe4001_hwmon_info = { + I2C_BOARD_INFO("max6647", 0x4e), +}; + +/* This board uses an I2C expander to provider power to the PHY, which needs to + * be turned on before the PHY can be used. + * Context: Process context, rtnl lock held + */ +static int sfe4001_init(struct efx_nic *efx) +{ + struct falcon_board *board = falcon_board(efx); + int rc; + +#if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE) + board->hwmon_client = + i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info); +#else + board->hwmon_client = + i2c_new_dummy(&board->i2c_adap, sfe4001_hwmon_info.addr); +#endif + if (!board->hwmon_client) + return -EIO; + + /* Raise board/PHY high limit from 85 to 90 degrees Celsius */ + rc = i2c_smbus_write_byte_data(board->hwmon_client, + MAX664X_REG_WLHO, 90); + if (rc) + goto fail_hwmon; + + board->ioexp_client = i2c_new_dummy(&board->i2c_adap, PCA9539); + if (!board->ioexp_client) { + rc = -EIO; + goto fail_hwmon; + } + + if (efx->phy_mode & PHY_MODE_SPECIAL) { + /* PHY won't generate a 156.25 MHz clock and MAC stats fetch + * will fail. */ + falcon_stop_nic_stats(efx); + } + rc = sfe4001_poweron(efx); + if (rc) + goto fail_ioexp; + + rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); + if (rc) + goto fail_on; + + netif_info(efx, hw, efx->net_dev, "PHY is powered on\n"); + return 0; + +fail_on: + sfe4001_poweroff(efx); +fail_ioexp: + i2c_unregister_device(board->ioexp_client); +fail_hwmon: + i2c_unregister_device(board->hwmon_client); + return rc; +} + +/***************************************************************************** + * Support for the SFE4002 + * + */ +static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */ + +static const u8 sfe4002_lm87_regs[] = { + LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */ + LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */ + LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */ + LM87_IN_LIMITS(3, 0xac, 0xd4), /* 5V: 5.0V +/- 10% */ + LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */ + LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */ + LM87_AIN_LIMITS(0, 0x98, 0xbb), /* AIN1: 1.66V +/- 10% */ + LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */ + LM87_TEMP_INT_LIMITS(0, 80 + FALCON_BOARD_TEMP_BIAS), + LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX), + 0 +}; + +static const struct i2c_board_info sfe4002_hwmon_info = { + I2C_BOARD_INFO("lm87", 0x2e), + .platform_data = &sfe4002_lm87_channel, +}; + +/****************************************************************************/ +/* LED allocations. Note that on rev A0 boards the schematic and the reality + * differ: red and green are swapped. Below is the fixed (A1) layout (there + * are only 3 A0 boards in existence, so no real reason to make this + * conditional). + */ +#define SFE4002_FAULT_LED (2) /* Red */ +#define SFE4002_RX_LED (0) /* Green */ +#define SFE4002_TX_LED (1) /* Amber */ + +static void sfe4002_init_phy(struct efx_nic *efx) +{ + /* Set the TX and RX LEDs to reflect status and activity, and the + * fault LED off */ + falcon_qt202x_set_led(efx, SFE4002_TX_LED, + QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT); + falcon_qt202x_set_led(efx, SFE4002_RX_LED, + QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT); + falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF); +} + +static void sfe4002_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) +{ + falcon_qt202x_set_led( + efx, SFE4002_FAULT_LED, + (mode == EFX_LED_ON) ? QUAKE_LED_ON : QUAKE_LED_OFF); +} + +static int sfe4002_check_hw(struct efx_nic *efx) +{ + struct falcon_board *board = falcon_board(efx); + + /* A0 board rev. 4002s report a temperature fault the whole time + * (bad sensor) so we mask it out. */ + unsigned alarm_mask = + (board->major == 0 && board->minor == 0) ? + ~LM87_ALARM_TEMP_EXT1 : ~0; + + return efx_check_lm87(efx, alarm_mask); +} + +static int sfe4002_init(struct efx_nic *efx) +{ + return efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs); +} + +/***************************************************************************** + * Support for the SFN4112F + * + */ +static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */ + +static const u8 sfn4112f_lm87_regs[] = { + LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */ + LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */ + LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */ + LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */ + LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */ + LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */ + LM87_TEMP_INT_LIMITS(0, 60 + FALCON_BOARD_TEMP_BIAS), + LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX), + 0 +}; + +static const struct i2c_board_info sfn4112f_hwmon_info = { + I2C_BOARD_INFO("lm87", 0x2e), + .platform_data = &sfn4112f_lm87_channel, +}; + +#define SFN4112F_ACT_LED 0 +#define SFN4112F_LINK_LED 1 + +static void sfn4112f_init_phy(struct efx_nic *efx) +{ + falcon_qt202x_set_led(efx, SFN4112F_ACT_LED, + QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT); + falcon_qt202x_set_led(efx, SFN4112F_LINK_LED, + QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT); +} + +static void sfn4112f_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) +{ + int reg; + + switch (mode) { + case EFX_LED_OFF: + reg = QUAKE_LED_OFF; + break; + case EFX_LED_ON: + reg = QUAKE_LED_ON; + break; + default: + reg = QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT; + break; + } + + falcon_qt202x_set_led(efx, SFN4112F_LINK_LED, reg); +} + +static int sfn4112f_check_hw(struct efx_nic *efx) +{ + /* Mask out unused sensors */ + return efx_check_lm87(efx, ~0x48); +} + +static int sfn4112f_init(struct efx_nic *efx) +{ + return efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs); +} + +/***************************************************************************** + * Support for the SFE4003 + * + */ +static u8 sfe4003_lm87_channel = 0x03; /* use AIN not FAN inputs */ + +static const u8 sfe4003_lm87_regs[] = { + LM87_IN_LIMITS(0, 0x67, 0x7f), /* 2.5V: 1.5V +/- 10% */ + LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */ + LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */ + LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */ + LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */ + LM87_TEMP_INT_LIMITS(0, 70 + FALCON_BOARD_TEMP_BIAS), + 0 +}; + +static const struct i2c_board_info sfe4003_hwmon_info = { + I2C_BOARD_INFO("lm87", 0x2e), + .platform_data = &sfe4003_lm87_channel, +}; + +/* Board-specific LED info. */ +#define SFE4003_RED_LED_GPIO 11 +#define SFE4003_LED_ON 1 +#define SFE4003_LED_OFF 0 + +static void sfe4003_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) +{ + struct falcon_board *board = falcon_board(efx); + + /* The LEDs were not wired to GPIOs before A3 */ + if (board->minor < 3 && board->major == 0) + return; + + falcon_txc_set_gpio_val( + efx, SFE4003_RED_LED_GPIO, + (mode == EFX_LED_ON) ? SFE4003_LED_ON : SFE4003_LED_OFF); +} + +static void sfe4003_init_phy(struct efx_nic *efx) +{ + struct falcon_board *board = falcon_board(efx); + + /* The LEDs were not wired to GPIOs before A3 */ + if (board->minor < 3 && board->major == 0) + return; + + falcon_txc_set_gpio_dir(efx, SFE4003_RED_LED_GPIO, TXC_GPIO_DIR_OUTPUT); + falcon_txc_set_gpio_val(efx, SFE4003_RED_LED_GPIO, SFE4003_LED_OFF); +} + +static int sfe4003_check_hw(struct efx_nic *efx) +{ + struct falcon_board *board = falcon_board(efx); + + /* A0/A1/A2 board rev. 4003s report a temperature fault the whole time + * (bad sensor) so we mask it out. */ + unsigned alarm_mask = + (board->major == 0 && board->minor <= 2) ? + ~LM87_ALARM_TEMP_EXT1 : ~0; + + return efx_check_lm87(efx, alarm_mask); +} + +static int sfe4003_init(struct efx_nic *efx) +{ + return efx_init_lm87(efx, &sfe4003_hwmon_info, sfe4003_lm87_regs); +} + +static const struct falcon_board_type board_types[] = { + { + .id = FALCON_BOARD_SFE4001, + .init = sfe4001_init, + .init_phy = efx_port_dummy_op_void, + .fini = sfe4001_fini, + .set_id_led = tenxpress_set_id_led, + .monitor = sfe4001_check_hw, + }, + { + .id = FALCON_BOARD_SFE4002, + .init = sfe4002_init, + .init_phy = sfe4002_init_phy, + .fini = efx_fini_lm87, + .set_id_led = sfe4002_set_id_led, + .monitor = sfe4002_check_hw, + }, + { + .id = FALCON_BOARD_SFE4003, + .init = sfe4003_init, + .init_phy = sfe4003_init_phy, + .fini = efx_fini_lm87, + .set_id_led = sfe4003_set_id_led, + .monitor = sfe4003_check_hw, + }, + { + .id = FALCON_BOARD_SFN4112F, + .init = sfn4112f_init, + .init_phy = sfn4112f_init_phy, + .fini = efx_fini_lm87, + .set_id_led = sfn4112f_set_id_led, + .monitor = sfn4112f_check_hw, + }, +}; + +int falcon_probe_board(struct efx_nic *efx, u16 revision_info) +{ + struct falcon_board *board = falcon_board(efx); + u8 type_id = FALCON_BOARD_TYPE(revision_info); + int i; + + board->major = FALCON_BOARD_MAJOR(revision_info); + board->minor = FALCON_BOARD_MINOR(revision_info); + + for (i = 0; i < ARRAY_SIZE(board_types); i++) + if (board_types[i].id == type_id) + board->type = &board_types[i]; + + if (board->type) { + return 0; + } else { + netif_err(efx, probe, efx->net_dev, "unknown board type %d\n", + type_id); + return -ENODEV; + } +} diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c new file mode 100644 index 000000000..bb89e96a1 --- /dev/null +++ b/drivers/net/ethernet/sfc/farch.c @@ -0,0 +1,2969 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "bitfield.h" +#include "efx.h" +#include "nic.h" +#include "farch_regs.h" +#include "io.h" +#include "workarounds.h" + +/* Falcon-architecture (SFC4000 and SFC9000-family) support */ + +/************************************************************************** + * + * Configurable values + * + ************************************************************************** + */ + +/* This is set to 16 for a good reason. In summary, if larger than + * 16, the descriptor cache holds more than a default socket + * buffer's worth of packets (for UDP we can only have at most one + * socket buffer's worth outstanding). This combined with the fact + * that we only get 1 TX event per descriptor cache means the NIC + * goes idle. + */ +#define TX_DC_ENTRIES 16 +#define TX_DC_ENTRIES_ORDER 1 + +#define RX_DC_ENTRIES 64 +#define RX_DC_ENTRIES_ORDER 3 + +/* If EFX_MAX_INT_ERRORS internal errors occur within + * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and + * disable it. + */ +#define EFX_INT_ERROR_EXPIRE 3600 +#define EFX_MAX_INT_ERRORS 5 + +/* Depth of RX flush request fifo */ +#define EFX_RX_FLUSH_COUNT 4 + +/* Driver generated events */ +#define _EFX_CHANNEL_MAGIC_TEST 0x000101 +#define _EFX_CHANNEL_MAGIC_FILL 0x000102 +#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 +#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 + +#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) +#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) + +#define EFX_CHANNEL_MAGIC_TEST(_channel) \ + _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) +#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ + _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ + efx_rx_queue_index(_rx_queue)) +#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ + _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ + efx_rx_queue_index(_rx_queue)) +#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ + _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ + (_tx_queue)->queue) + +static void efx_farch_magic_event(struct efx_channel *channel, u32 magic); + +/************************************************************************** + * + * Hardware access + * + **************************************************************************/ + +static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, + unsigned int index) +{ + efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, + value, index); +} + +static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, + const efx_oword_t *mask) +{ + return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || + ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); +} + +int efx_farch_test_registers(struct efx_nic *efx, + const struct efx_farch_register_test *regs, + size_t n_regs) +{ + unsigned address = 0, i, j; + efx_oword_t mask, imask, original, reg, buf; + + for (i = 0; i < n_regs; ++i) { + address = regs[i].address; + mask = imask = regs[i].mask; + EFX_INVERT_OWORD(imask); + + efx_reado(efx, &original, address); + + /* bit sweep on and off */ + for (j = 0; j < 128; j++) { + if (!EFX_EXTRACT_OWORD32(mask, j, j)) + continue; + + /* Test this testable bit can be set in isolation */ + EFX_AND_OWORD(reg, original, mask); + EFX_SET_OWORD32(reg, j, j, 1); + + efx_writeo(efx, ®, address); + efx_reado(efx, &buf, address); + + if (efx_masked_compare_oword(®, &buf, &mask)) + goto fail; + + /* Test this testable bit can be cleared in isolation */ + EFX_OR_OWORD(reg, original, mask); + EFX_SET_OWORD32(reg, j, j, 0); + + efx_writeo(efx, ®, address); + efx_reado(efx, &buf, address); + + if (efx_masked_compare_oword(®, &buf, &mask)) + goto fail; + } + + efx_writeo(efx, &original, address); + } + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, + "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT + " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), + EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); + return -EIO; +} + +/************************************************************************** + * + * Special buffer handling + * Special buffers are used for event queues and the TX and RX + * descriptor rings. + * + *************************************************************************/ + +/* + * Initialise a special buffer + * + * This will define a buffer (previously allocated via + * efx_alloc_special_buffer()) in the buffer table, allowing + * it to be used for event queues, descriptor rings etc. + */ +static void +efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) +{ + efx_qword_t buf_desc; + unsigned int index; + dma_addr_t dma_addr; + int i; + + EFX_BUG_ON_PARANOID(!buffer->buf.addr); + + /* Write buffer descriptors to NIC */ + for (i = 0; i < buffer->entries; i++) { + index = buffer->index + i; + dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE); + netif_dbg(efx, probe, efx->net_dev, + "mapping special buffer %d at %llx\n", + index, (unsigned long long)dma_addr); + EFX_POPULATE_QWORD_3(buf_desc, + FRF_AZ_BUF_ADR_REGION, 0, + FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, + FRF_AZ_BUF_OWNER_ID_FBUF, 0); + efx_write_buf_tbl(efx, &buf_desc, index); + } +} + +/* Unmaps a buffer and clears the buffer table entries */ +static void +efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) +{ + efx_oword_t buf_tbl_upd; + unsigned int start = buffer->index; + unsigned int end = (buffer->index + buffer->entries - 1); + + if (!buffer->entries) + return; + + netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", + buffer->index, buffer->index + buffer->entries - 1); + + EFX_POPULATE_OWORD_4(buf_tbl_upd, + FRF_AZ_BUF_UPD_CMD, 0, + FRF_AZ_BUF_CLR_CMD, 1, + FRF_AZ_BUF_CLR_END_ID, end, + FRF_AZ_BUF_CLR_START_ID, start); + efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); +} + +/* + * Allocate a new special buffer + * + * This allocates memory for a new buffer, clears it and allocates a + * new buffer ID range. It does not write into the buffer table. + * + * This call will allocate 4KB buffers, since 8KB buffers can't be + * used for event queues and descriptor rings. + */ +static int efx_alloc_special_buffer(struct efx_nic *efx, + struct efx_special_buffer *buffer, + unsigned int len) +{ +#ifdef CONFIG_SFC_SRIOV + struct siena_nic_data *nic_data = efx->nic_data; +#endif + len = ALIGN(len, EFX_BUF_SIZE); + + if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) + return -ENOMEM; + buffer->entries = len / EFX_BUF_SIZE; + BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1)); + + /* Select new buffer ID */ + buffer->index = efx->next_buffer_table; + efx->next_buffer_table += buffer->entries; +#ifdef CONFIG_SFC_SRIOV + BUG_ON(efx_siena_sriov_enabled(efx) && + nic_data->vf_buftbl_base < efx->next_buffer_table); +#endif + + netif_dbg(efx, probe, efx->net_dev, + "allocating special buffers %d-%d at %llx+%x " + "(virt %p phys %llx)\n", buffer->index, + buffer->index + buffer->entries - 1, + (u64)buffer->buf.dma_addr, len, + buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); + + return 0; +} + +static void +efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) +{ + if (!buffer->buf.addr) + return; + + netif_dbg(efx, hw, efx->net_dev, + "deallocating special buffers %d-%d at %llx+%x " + "(virt %p phys %llx)\n", buffer->index, + buffer->index + buffer->entries - 1, + (u64)buffer->buf.dma_addr, buffer->buf.len, + buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); + + efx_nic_free_buffer(efx, &buffer->buf); + buffer->entries = 0; +} + +/************************************************************************** + * + * TX path + * + **************************************************************************/ + +/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ +static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue) +{ + unsigned write_ptr; + efx_dword_t reg; + + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); + efx_writed_page(tx_queue->efx, ®, + FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); +} + +/* Write pointer and first descriptor for TX descriptor ring */ +static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue, + const efx_qword_t *txd) +{ + unsigned write_ptr; + efx_oword_t reg; + + BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); + BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); + + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, + FRF_AZ_TX_DESC_WPTR, write_ptr); + reg.qword[0] = *txd; + efx_writeo_page(tx_queue->efx, ®, + FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); +} + + +/* For each entry inserted into the software descriptor ring, create a + * descriptor in the hardware TX descriptor ring (in host memory), and + * write a doorbell. + */ +void efx_farch_tx_write(struct efx_tx_queue *tx_queue) +{ + struct efx_tx_buffer *buffer; + efx_qword_t *txd; + unsigned write_ptr; + unsigned old_write_count = tx_queue->write_count; + + BUG_ON(tx_queue->write_count == tx_queue->insert_count); + + do { + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + buffer = &tx_queue->buffer[write_ptr]; + txd = efx_tx_desc(tx_queue, write_ptr); + ++tx_queue->write_count; + + EFX_BUG_ON_PARANOID(buffer->flags & EFX_TX_BUF_OPTION); + + /* Create TX descriptor ring entry */ + BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); + EFX_POPULATE_QWORD_4(*txd, + FSF_AZ_TX_KER_CONT, + buffer->flags & EFX_TX_BUF_CONT, + FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, + FSF_AZ_TX_KER_BUF_REGION, 0, + FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); + } while (tx_queue->write_count != tx_queue->insert_count); + + wmb(); /* Ensure descriptors are written before they are fetched */ + + if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { + txd = efx_tx_desc(tx_queue, + old_write_count & tx_queue->ptr_mask); + efx_farch_push_tx_desc(tx_queue, txd); + ++tx_queue->pushes; + } else { + efx_farch_notify_tx_desc(tx_queue); + } +} + +/* Allocate hardware resources for a TX queue */ +int efx_farch_tx_probe(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + unsigned entries; + + entries = tx_queue->ptr_mask + 1; + return efx_alloc_special_buffer(efx, &tx_queue->txd, + entries * sizeof(efx_qword_t)); +} + +void efx_farch_tx_init(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + efx_oword_t reg; + + /* Pin TX descriptor ring */ + efx_init_special_buffer(efx, &tx_queue->txd); + + /* Push TX descriptor ring to card */ + EFX_POPULATE_OWORD_10(reg, + FRF_AZ_TX_DESCQ_EN, 1, + FRF_AZ_TX_ISCSI_DDIG_EN, 0, + FRF_AZ_TX_ISCSI_HDIG_EN, 0, + FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, + FRF_AZ_TX_DESCQ_EVQ_ID, + tx_queue->channel->channel, + FRF_AZ_TX_DESCQ_OWNER_ID, 0, + FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, + FRF_AZ_TX_DESCQ_SIZE, + __ffs(tx_queue->txd.entries), + FRF_AZ_TX_DESCQ_TYPE, 0, + FRF_BZ_TX_NON_IP_DROP_DIS, 1); + + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; + EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, + !csum); + } + + efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, + tx_queue->queue); + + if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { + /* Only 128 bits in this register */ + BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); + + efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); + if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) + __clear_bit_le(tx_queue->queue, ®); + else + __set_bit_le(tx_queue->queue, ®); + efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); + } + + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + EFX_POPULATE_OWORD_1(reg, + FRF_BZ_TX_PACE, + (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? + FFE_BZ_TX_PACE_OFF : + FFE_BZ_TX_PACE_RESERVED); + efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, + tx_queue->queue); + } +} + +static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + efx_oword_t tx_flush_descq; + + WARN_ON(atomic_read(&tx_queue->flush_outstanding)); + atomic_set(&tx_queue->flush_outstanding, 1); + + EFX_POPULATE_OWORD_2(tx_flush_descq, + FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, + FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); + efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); +} + +void efx_farch_tx_fini(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + efx_oword_t tx_desc_ptr; + + /* Remove TX descriptor ring from card */ + EFX_ZERO_OWORD(tx_desc_ptr); + efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, + tx_queue->queue); + + /* Unpin TX descriptor ring */ + efx_fini_special_buffer(efx, &tx_queue->txd); +} + +/* Free buffers backing TX queue */ +void efx_farch_tx_remove(struct efx_tx_queue *tx_queue) +{ + efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); +} + +/************************************************************************** + * + * RX path + * + **************************************************************************/ + +/* This creates an entry in the RX descriptor queue */ +static inline void +efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) +{ + struct efx_rx_buffer *rx_buf; + efx_qword_t *rxd; + + rxd = efx_rx_desc(rx_queue, index); + rx_buf = efx_rx_buffer(rx_queue, index); + EFX_POPULATE_QWORD_3(*rxd, + FSF_AZ_RX_KER_BUF_SIZE, + rx_buf->len - + rx_queue->efx->type->rx_buffer_padding, + FSF_AZ_RX_KER_BUF_REGION, 0, + FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); +} + +/* This writes to the RX_DESC_WPTR register for the specified receive + * descriptor ring. + */ +void efx_farch_rx_write(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + efx_dword_t reg; + unsigned write_ptr; + + while (rx_queue->notified_count != rx_queue->added_count) { + efx_farch_build_rx_desc( + rx_queue, + rx_queue->notified_count & rx_queue->ptr_mask); + ++rx_queue->notified_count; + } + + wmb(); + write_ptr = rx_queue->added_count & rx_queue->ptr_mask; + EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); + efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, + efx_rx_queue_index(rx_queue)); +} + +int efx_farch_rx_probe(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned entries; + + entries = rx_queue->ptr_mask + 1; + return efx_alloc_special_buffer(efx, &rx_queue->rxd, + entries * sizeof(efx_qword_t)); +} + +void efx_farch_rx_init(struct efx_rx_queue *rx_queue) +{ + efx_oword_t rx_desc_ptr; + struct efx_nic *efx = rx_queue->efx; + bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; + bool iscsi_digest_en = is_b0; + bool jumbo_en; + + /* For kernel-mode queues in Falcon A1, the JUMBO flag enables + * DMA to continue after a PCIe page boundary (and scattering + * is not possible). In Falcon B0 and Siena, it enables + * scatter. + */ + jumbo_en = !is_b0 || efx->rx_scatter; + + netif_dbg(efx, hw, efx->net_dev, + "RX queue %d ring in special buffers %d-%d\n", + efx_rx_queue_index(rx_queue), rx_queue->rxd.index, + rx_queue->rxd.index + rx_queue->rxd.entries - 1); + + rx_queue->scatter_n = 0; + + /* Pin RX descriptor ring */ + efx_init_special_buffer(efx, &rx_queue->rxd); + + /* Push RX descriptor ring to card */ + EFX_POPULATE_OWORD_10(rx_desc_ptr, + FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, + FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, + FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, + FRF_AZ_RX_DESCQ_EVQ_ID, + efx_rx_queue_channel(rx_queue)->channel, + FRF_AZ_RX_DESCQ_OWNER_ID, 0, + FRF_AZ_RX_DESCQ_LABEL, + efx_rx_queue_index(rx_queue), + FRF_AZ_RX_DESCQ_SIZE, + __ffs(rx_queue->rxd.entries), + FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , + FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, + FRF_AZ_RX_DESCQ_EN, 1); + efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, + efx_rx_queue_index(rx_queue)); +} + +static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + efx_oword_t rx_flush_descq; + + EFX_POPULATE_OWORD_2(rx_flush_descq, + FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, + FRF_AZ_RX_FLUSH_DESCQ, + efx_rx_queue_index(rx_queue)); + efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); +} + +void efx_farch_rx_fini(struct efx_rx_queue *rx_queue) +{ + efx_oword_t rx_desc_ptr; + struct efx_nic *efx = rx_queue->efx; + + /* Remove RX descriptor ring from card */ + EFX_ZERO_OWORD(rx_desc_ptr); + efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, + efx_rx_queue_index(rx_queue)); + + /* Unpin RX descriptor ring */ + efx_fini_special_buffer(efx, &rx_queue->rxd); +} + +/* Free buffers backing RX queue */ +void efx_farch_rx_remove(struct efx_rx_queue *rx_queue) +{ + efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); +} + +/************************************************************************** + * + * Flush handling + * + **************************************************************************/ + +/* efx_farch_flush_queues() must be woken up when all flushes are completed, + * or more RX flushes can be kicked off. + */ +static bool efx_farch_flush_wake(struct efx_nic *efx) +{ + /* Ensure that all updates are visible to efx_farch_flush_queues() */ + smp_mb(); + + return (atomic_read(&efx->active_queues) == 0 || + (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT + && atomic_read(&efx->rxq_flush_pending) > 0)); +} + +static bool efx_check_tx_flush_complete(struct efx_nic *efx) +{ + bool i = true; + efx_oword_t txd_ptr_tbl; + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + efx_reado_table(efx, &txd_ptr_tbl, + FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); + if (EFX_OWORD_FIELD(txd_ptr_tbl, + FRF_AZ_TX_DESCQ_FLUSH) || + EFX_OWORD_FIELD(txd_ptr_tbl, + FRF_AZ_TX_DESCQ_EN)) { + netif_dbg(efx, hw, efx->net_dev, + "flush did not complete on TXQ %d\n", + tx_queue->queue); + i = false; + } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, + 1, 0)) { + /* The flush is complete, but we didn't + * receive a flush completion event + */ + netif_dbg(efx, hw, efx->net_dev, + "flush complete on TXQ %d, so drain " + "the queue\n", tx_queue->queue); + /* Don't need to increment active_queues as it + * has already been incremented for the queues + * which did not drain + */ + efx_farch_magic_event(channel, + EFX_CHANNEL_MAGIC_TX_DRAIN( + tx_queue)); + } + } + } + + return i; +} + +/* Flush all the transmit queues, and continue flushing receive queues until + * they're all flushed. Wait for the DRAIN events to be received so that there + * are no more RX and TX events left on any channel. */ +static int efx_farch_do_flush(struct efx_nic *efx) +{ + unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ + struct efx_channel *channel; + struct efx_rx_queue *rx_queue; + struct efx_tx_queue *tx_queue; + int rc = 0; + + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + efx_farch_flush_tx_queue(tx_queue); + } + efx_for_each_channel_rx_queue(rx_queue, channel) { + rx_queue->flush_pending = true; + atomic_inc(&efx->rxq_flush_pending); + } + } + + while (timeout && atomic_read(&efx->active_queues) > 0) { + /* If SRIOV is enabled, then offload receive queue flushing to + * the firmware (though we will still have to poll for + * completion). If that fails, fall back to the old scheme. + */ + if (efx_siena_sriov_enabled(efx)) { + rc = efx_mcdi_flush_rxqs(efx); + if (!rc) + goto wait; + } + + /* The hardware supports four concurrent rx flushes, each of + * which may need to be retried if there is an outstanding + * descriptor fetch + */ + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) { + if (atomic_read(&efx->rxq_flush_outstanding) >= + EFX_RX_FLUSH_COUNT) + break; + + if (rx_queue->flush_pending) { + rx_queue->flush_pending = false; + atomic_dec(&efx->rxq_flush_pending); + atomic_inc(&efx->rxq_flush_outstanding); + efx_farch_flush_rx_queue(rx_queue); + } + } + } + + wait: + timeout = wait_event_timeout(efx->flush_wq, + efx_farch_flush_wake(efx), + timeout); + } + + if (atomic_read(&efx->active_queues) && + !efx_check_tx_flush_complete(efx)) { + netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " + "(rx %d+%d)\n", atomic_read(&efx->active_queues), + atomic_read(&efx->rxq_flush_outstanding), + atomic_read(&efx->rxq_flush_pending)); + rc = -ETIMEDOUT; + + atomic_set(&efx->active_queues, 0); + atomic_set(&efx->rxq_flush_pending, 0); + atomic_set(&efx->rxq_flush_outstanding, 0); + } + + return rc; +} + +int efx_farch_fini_dmaq(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + int rc = 0; + + /* Do not attempt to write to the NIC during EEH recovery */ + if (efx->state != STATE_RECOVERY) { + /* Only perform flush if DMA is enabled */ + if (efx->pci_dev->is_busmaster) { + efx->type->prepare_flush(efx); + rc = efx_farch_do_flush(efx); + efx->type->finish_flush(efx); + } + + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_farch_rx_fini(rx_queue); + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_farch_tx_fini(tx_queue); + } + } + + return rc; +} + +/* Reset queue and flush accounting after FLR + * + * One possible cause of FLR recovery is that DMA may be failing (eg. if bus + * mastering was disabled), in which case we don't receive (RXQ) flush + * completion events. This means that efx->rxq_flush_outstanding remained at 4 + * after the FLR; also, efx->active_queues was non-zero (as no flush completion + * events were received, and we didn't go through efx_check_tx_flush_complete()) + * If we don't fix this up, on the next call to efx_realloc_channels() we won't + * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4 + * for batched flush requests; and the efx->active_queues gets messed up because + * we keep incrementing for the newly initialised queues, but it never went to + * zero previously. Then we get a timeout every time we try to restart the + * queues, as it doesn't go back to zero when we should be flushing the queues. + */ +void efx_farch_finish_flr(struct efx_nic *efx) +{ + atomic_set(&efx->rxq_flush_pending, 0); + atomic_set(&efx->rxq_flush_outstanding, 0); + atomic_set(&efx->active_queues, 0); +} + + +/************************************************************************** + * + * Event queue processing + * Event queues are processed by per-channel tasklets. + * + **************************************************************************/ + +/* Update a channel's event queue's read pointer (RPTR) register + * + * This writes the EVQ_RPTR_REG register for the specified channel's + * event queue. + */ +void efx_farch_ev_read_ack(struct efx_channel *channel) +{ + efx_dword_t reg; + struct efx_nic *efx = channel->efx; + + EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, + channel->eventq_read_ptr & channel->eventq_mask); + + /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size + * of 4 bytes, but it is really 16 bytes just like later revisions. + */ + efx_writed(efx, ®, + efx->type->evq_rptr_tbl_base + + FR_BZ_EVQ_RPTR_STEP * channel->channel); +} + +/* Use HW to insert a SW defined event */ +void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, + efx_qword_t *event) +{ + efx_oword_t drv_ev_reg; + + BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || + FRF_AZ_DRV_EV_DATA_WIDTH != 64); + drv_ev_reg.u32[0] = event->u32[0]; + drv_ev_reg.u32[1] = event->u32[1]; + drv_ev_reg.u32[2] = 0; + drv_ev_reg.u32[3] = 0; + EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); + efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); +} + +static void efx_farch_magic_event(struct efx_channel *channel, u32 magic) +{ + efx_qword_t event; + + EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, + FSE_AZ_EV_CODE_DRV_GEN_EV, + FSF_AZ_DRV_GEN_EV_MAGIC, magic); + efx_farch_generate_event(channel->efx, channel->channel, &event); +} + +/* Handle a transmit completion event + * + * The NIC batches TX completion events; the message we receive is of + * the form "complete all TX events up to this index". + */ +static int +efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) +{ + unsigned int tx_ev_desc_ptr; + unsigned int tx_ev_q_label; + struct efx_tx_queue *tx_queue; + struct efx_nic *efx = channel->efx; + int tx_packets = 0; + + if (unlikely(ACCESS_ONCE(efx->reset_pending))) + return 0; + + if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { + /* Transmit completion */ + tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); + tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); + tx_queue = efx_channel_get_tx_queue( + channel, tx_ev_q_label % EFX_TXQ_TYPES); + tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & + tx_queue->ptr_mask); + efx_xmit_done(tx_queue, tx_ev_desc_ptr); + } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { + /* Rewrite the FIFO write pointer */ + tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); + tx_queue = efx_channel_get_tx_queue( + channel, tx_ev_q_label % EFX_TXQ_TYPES); + + netif_tx_lock(efx->net_dev); + efx_farch_notify_tx_desc(tx_queue); + netif_tx_unlock(efx->net_dev); + } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) { + efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); + } else { + netif_err(efx, tx_err, efx->net_dev, + "channel %d unexpected TX event " + EFX_QWORD_FMT"\n", channel->channel, + EFX_QWORD_VAL(*event)); + } + + return tx_packets; +} + +/* Detect errors included in the rx_evt_pkt_ok bit. */ +static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue, + const efx_qword_t *event) +{ + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + struct efx_nic *efx = rx_queue->efx; + bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; + bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; + bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; + bool rx_ev_other_err, rx_ev_pause_frm; + bool rx_ev_hdr_type, rx_ev_mcast_pkt; + unsigned rx_ev_pkt_type; + + rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); + rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); + rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); + rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); + rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, + FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); + rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, + FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); + rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, + FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); + rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); + rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); + rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? + 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); + rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); + + /* Every error apart from tobe_disc and pause_frm */ + rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | + rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | + rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); + + /* Count errors that are not in MAC stats. Ignore expected + * checksum errors during self-test. */ + if (rx_ev_frm_trunc) + ++channel->n_rx_frm_trunc; + else if (rx_ev_tobe_disc) + ++channel->n_rx_tobe_disc; + else if (!efx->loopback_selftest) { + if (rx_ev_ip_hdr_chksum_err) + ++channel->n_rx_ip_hdr_chksum_err; + else if (rx_ev_tcp_udp_chksum_err) + ++channel->n_rx_tcp_udp_chksum_err; + } + + /* TOBE_DISC is expected on unicast mismatches; don't print out an + * error message. FRM_TRUNC indicates RXDP dropped the packet due + * to a FIFO overflow. + */ +#ifdef DEBUG + if (rx_ev_other_err && net_ratelimit()) { + netif_dbg(efx, rx_err, efx->net_dev, + " RX queue %d unexpected RX event " + EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", + efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), + rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", + rx_ev_ip_hdr_chksum_err ? + " [IP_HDR_CHKSUM_ERR]" : "", + rx_ev_tcp_udp_chksum_err ? + " [TCP_UDP_CHKSUM_ERR]" : "", + rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", + rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", + rx_ev_drib_nib ? " [DRIB_NIB]" : "", + rx_ev_tobe_disc ? " [TOBE_DISC]" : "", + rx_ev_pause_frm ? " [PAUSE]" : ""); + } +#endif + + /* The frame must be discarded if any of these are true. */ + return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | + rx_ev_tobe_disc | rx_ev_pause_frm) ? + EFX_RX_PKT_DISCARD : 0; +} + +/* Handle receive events that are not in-order. Return true if this + * can be handled as a partial packet discard, false if it's more + * serious. + */ +static bool +efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) +{ + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + struct efx_nic *efx = rx_queue->efx; + unsigned expected, dropped; + + if (rx_queue->scatter_n && + index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & + rx_queue->ptr_mask)) { + ++channel->n_rx_nodesc_trunc; + return true; + } + + expected = rx_queue->removed_count & rx_queue->ptr_mask; + dropped = (index - expected) & rx_queue->ptr_mask; + netif_info(efx, rx_err, efx->net_dev, + "dropped %d events (index=%d expected=%d)\n", + dropped, index, expected); + + efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? + RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); + return false; +} + +/* Handle a packet received event + * + * The NIC gives a "discard" flag if it's a unicast packet with the + * wrong destination address + * Also "is multicast" and "matches multicast filter" flags can be used to + * discard non-matching multicast packets. + */ +static void +efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) +{ + unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; + unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; + unsigned expected_ptr; + bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; + u16 flags; + struct efx_rx_queue *rx_queue; + struct efx_nic *efx = channel->efx; + + if (unlikely(ACCESS_ONCE(efx->reset_pending))) + return; + + rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); + rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); + WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != + channel->channel); + + rx_queue = efx_channel_get_rx_queue(channel); + + rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); + expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & + rx_queue->ptr_mask); + + /* Check for partial drops and other errors */ + if (unlikely(rx_ev_desc_ptr != expected_ptr) || + unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { + if (rx_ev_desc_ptr != expected_ptr && + !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) + return; + + /* Discard all pending fragments */ + if (rx_queue->scatter_n) { + efx_rx_packet( + rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); + rx_queue->removed_count += rx_queue->scatter_n; + rx_queue->scatter_n = 0; + } + + /* Return if there is no new fragment */ + if (rx_ev_desc_ptr != expected_ptr) + return; + + /* Discard new fragment if not SOP */ + if (!rx_ev_sop) { + efx_rx_packet( + rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + 1, 0, EFX_RX_PKT_DISCARD); + ++rx_queue->removed_count; + return; + } + } + + ++rx_queue->scatter_n; + if (rx_ev_cont) + return; + + rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); + rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); + rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); + + if (likely(rx_ev_pkt_ok)) { + /* If packet is marked as OK then we can rely on the + * hardware checksum and classification. + */ + flags = 0; + switch (rx_ev_hdr_type) { + case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: + flags |= EFX_RX_PKT_TCP; + /* fall through */ + case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: + flags |= EFX_RX_PKT_CSUMMED; + /* fall through */ + case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: + case FSE_AZ_RX_EV_HDR_TYPE_OTHER: + break; + } + } else { + flags = efx_farch_handle_rx_not_ok(rx_queue, event); + } + + /* Detect multicast packets that didn't match the filter */ + rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); + if (rx_ev_mcast_pkt) { + unsigned int rx_ev_mcast_hash_match = + EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); + + if (unlikely(!rx_ev_mcast_hash_match)) { + ++channel->n_rx_mcast_mismatch; + flags |= EFX_RX_PKT_DISCARD; + } + } + + channel->irq_mod_score += 2; + + /* Handle received packet */ + efx_rx_packet(rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask, + rx_queue->scatter_n, rx_ev_byte_cnt, flags); + rx_queue->removed_count += rx_queue->scatter_n; + rx_queue->scatter_n = 0; +} + +/* If this flush done event corresponds to a &struct efx_tx_queue, then + * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue + * of all transmit completions. + */ +static void +efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) +{ + struct efx_tx_queue *tx_queue; + int qid; + + qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); + if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { + tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, + qid % EFX_TXQ_TYPES); + if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) { + efx_farch_magic_event(tx_queue->channel, + EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); + } + } +} + +/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush + * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add + * the RX queue back to the mask of RX queues in need of flushing. + */ +static void +efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) +{ + struct efx_channel *channel; + struct efx_rx_queue *rx_queue; + int qid; + bool failed; + + qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); + failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); + if (qid >= efx->n_channels) + return; + channel = efx_get_channel(efx, qid); + if (!efx_channel_has_rx_queue(channel)) + return; + rx_queue = efx_channel_get_rx_queue(channel); + + if (failed) { + netif_info(efx, hw, efx->net_dev, + "RXQ %d flush retry\n", qid); + rx_queue->flush_pending = true; + atomic_inc(&efx->rxq_flush_pending); + } else { + efx_farch_magic_event(efx_rx_queue_channel(rx_queue), + EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); + } + atomic_dec(&efx->rxq_flush_outstanding); + if (efx_farch_flush_wake(efx)) + wake_up(&efx->flush_wq); +} + +static void +efx_farch_handle_drain_event(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + + WARN_ON(atomic_read(&efx->active_queues) == 0); + atomic_dec(&efx->active_queues); + if (efx_farch_flush_wake(efx)) + wake_up(&efx->flush_wq); +} + +static void efx_farch_handle_generated_event(struct efx_channel *channel, + efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + struct efx_rx_queue *rx_queue = + efx_channel_has_rx_queue(channel) ? + efx_channel_get_rx_queue(channel) : NULL; + unsigned magic, code; + + magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); + code = _EFX_CHANNEL_MAGIC_CODE(magic); + + if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { + channel->event_test_cpu = raw_smp_processor_id(); + } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { + /* The queue must be empty, so we won't receive any rx + * events, so efx_process_channel() won't refill the + * queue. Refill it here */ + efx_fast_push_rx_descriptors(rx_queue, true); + } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { + efx_farch_handle_drain_event(channel); + } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { + efx_farch_handle_drain_event(channel); + } else { + netif_dbg(efx, hw, efx->net_dev, "channel %d received " + "generated event "EFX_QWORD_FMT"\n", + channel->channel, EFX_QWORD_VAL(*event)); + } +} + +static void +efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + unsigned int ev_sub_code; + unsigned int ev_sub_data; + + ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); + ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); + + switch (ev_sub_code) { + case FSE_AZ_TX_DESCQ_FLS_DONE_EV: + netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", + channel->channel, ev_sub_data); + efx_farch_handle_tx_flush_done(efx, event); + efx_siena_sriov_tx_flush_done(efx, event); + break; + case FSE_AZ_RX_DESCQ_FLS_DONE_EV: + netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", + channel->channel, ev_sub_data); + efx_farch_handle_rx_flush_done(efx, event); + efx_siena_sriov_rx_flush_done(efx, event); + break; + case FSE_AZ_EVQ_INIT_DONE_EV: + netif_dbg(efx, hw, efx->net_dev, + "channel %d EVQ %d initialised\n", + channel->channel, ev_sub_data); + break; + case FSE_AZ_SRM_UPD_DONE_EV: + netif_vdbg(efx, hw, efx->net_dev, + "channel %d SRAM update done\n", channel->channel); + break; + case FSE_AZ_WAKE_UP_EV: + netif_vdbg(efx, hw, efx->net_dev, + "channel %d RXQ %d wakeup event\n", + channel->channel, ev_sub_data); + break; + case FSE_AZ_TIMER_EV: + netif_vdbg(efx, hw, efx->net_dev, + "channel %d RX queue %d timer expired\n", + channel->channel, ev_sub_data); + break; + case FSE_AA_RX_RECOVER_EV: + netif_err(efx, rx_err, efx->net_dev, + "channel %d seen DRIVER RX_RESET event. " + "Resetting.\n", channel->channel); + atomic_inc(&efx->rx_reset); + efx_schedule_reset(efx, + EFX_WORKAROUND_6555(efx) ? + RESET_TYPE_RX_RECOVERY : + RESET_TYPE_DISABLE); + break; + case FSE_BZ_RX_DSC_ERROR_EV: + if (ev_sub_data < EFX_VI_BASE) { + netif_err(efx, rx_err, efx->net_dev, + "RX DMA Q %d reports descriptor fetch error." + " RX Q %d is disabled.\n", ev_sub_data, + ev_sub_data); + efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); + } else + efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); + break; + case FSE_BZ_TX_DSC_ERROR_EV: + if (ev_sub_data < EFX_VI_BASE) { + netif_err(efx, tx_err, efx->net_dev, + "TX DMA Q %d reports descriptor fetch error." + " TX Q %d is disabled.\n", ev_sub_data, + ev_sub_data); + efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); + } else + efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); + break; + default: + netif_vdbg(efx, hw, efx->net_dev, + "channel %d unknown driver event code %d " + "data %04x\n", channel->channel, ev_sub_code, + ev_sub_data); + break; + } +} + +int efx_farch_ev_process(struct efx_channel *channel, int budget) +{ + struct efx_nic *efx = channel->efx; + unsigned int read_ptr; + efx_qword_t event, *p_event; + int ev_code; + int tx_packets = 0; + int spent = 0; + + if (budget <= 0) + return spent; + + read_ptr = channel->eventq_read_ptr; + + for (;;) { + p_event = efx_event(channel, read_ptr); + event = *p_event; + + if (!efx_event_present(&event)) + /* End of events */ + break; + + netif_vdbg(channel->efx, intr, channel->efx->net_dev, + "channel %d event is "EFX_QWORD_FMT"\n", + channel->channel, EFX_QWORD_VAL(event)); + + /* Clear this event by marking it all ones */ + EFX_SET_QWORD(*p_event); + + ++read_ptr; + + ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); + + switch (ev_code) { + case FSE_AZ_EV_CODE_RX_EV: + efx_farch_handle_rx_event(channel, &event); + if (++spent == budget) + goto out; + break; + case FSE_AZ_EV_CODE_TX_EV: + tx_packets += efx_farch_handle_tx_event(channel, + &event); + if (tx_packets > efx->txq_entries) { + spent = budget; + goto out; + } + break; + case FSE_AZ_EV_CODE_DRV_GEN_EV: + efx_farch_handle_generated_event(channel, &event); + break; + case FSE_AZ_EV_CODE_DRIVER_EV: + efx_farch_handle_driver_event(channel, &event); + break; + case FSE_CZ_EV_CODE_USER_EV: + efx_siena_sriov_event(channel, &event); + break; + case FSE_CZ_EV_CODE_MCDI_EV: + efx_mcdi_process_event(channel, &event); + break; + case FSE_AZ_EV_CODE_GLOBAL_EV: + if (efx->type->handle_global_event && + efx->type->handle_global_event(channel, &event)) + break; + /* else fall through */ + default: + netif_err(channel->efx, hw, channel->efx->net_dev, + "channel %d unknown event type %d (data " + EFX_QWORD_FMT ")\n", channel->channel, + ev_code, EFX_QWORD_VAL(event)); + } + } + +out: + channel->eventq_read_ptr = read_ptr; + return spent; +} + +/* Allocate buffer table entries for event queue */ +int efx_farch_ev_probe(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + unsigned entries; + + entries = channel->eventq_mask + 1; + return efx_alloc_special_buffer(efx, &channel->eventq, + entries * sizeof(efx_qword_t)); +} + +int efx_farch_ev_init(struct efx_channel *channel) +{ + efx_oword_t reg; + struct efx_nic *efx = channel->efx; + + netif_dbg(efx, hw, efx->net_dev, + "channel %d event queue in special buffers %d-%d\n", + channel->channel, channel->eventq.index, + channel->eventq.index + channel->eventq.entries - 1); + + if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { + EFX_POPULATE_OWORD_3(reg, + FRF_CZ_TIMER_Q_EN, 1, + FRF_CZ_HOST_NOTIFY_MODE, 0, + FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); + efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); + } + + /* Pin event queue buffer */ + efx_init_special_buffer(efx, &channel->eventq); + + /* Fill event queue with all ones (i.e. empty events) */ + memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); + + /* Push event queue to card */ + EFX_POPULATE_OWORD_3(reg, + FRF_AZ_EVQ_EN, 1, + FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), + FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); + efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, + channel->channel); + + return 0; +} + +void efx_farch_ev_fini(struct efx_channel *channel) +{ + efx_oword_t reg; + struct efx_nic *efx = channel->efx; + + /* Remove event queue from card */ + EFX_ZERO_OWORD(reg); + efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, + channel->channel); + if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) + efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); + + /* Unpin event queue */ + efx_fini_special_buffer(efx, &channel->eventq); +} + +/* Free buffers backing event queue */ +void efx_farch_ev_remove(struct efx_channel *channel) +{ + efx_free_special_buffer(channel->efx, &channel->eventq); +} + + +void efx_farch_ev_test_generate(struct efx_channel *channel) +{ + efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); +} + +void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue) +{ + efx_farch_magic_event(efx_rx_queue_channel(rx_queue), + EFX_CHANNEL_MAGIC_FILL(rx_queue)); +} + +/************************************************************************** + * + * Hardware interrupts + * The hardware interrupt handler does very little work; all the event + * queue processing is carried out by per-channel tasklets. + * + **************************************************************************/ + +/* Enable/disable/generate interrupts */ +static inline void efx_farch_interrupts(struct efx_nic *efx, + bool enabled, bool force) +{ + efx_oword_t int_en_reg_ker; + + EFX_POPULATE_OWORD_3(int_en_reg_ker, + FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, + FRF_AZ_KER_INT_KER, force, + FRF_AZ_DRV_INT_EN_KER, enabled); + efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); +} + +void efx_farch_irq_enable_master(struct efx_nic *efx) +{ + EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); + wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ + + efx_farch_interrupts(efx, true, false); +} + +void efx_farch_irq_disable_master(struct efx_nic *efx) +{ + /* Disable interrupts */ + efx_farch_interrupts(efx, false, false); +} + +/* Generate a test interrupt + * Interrupt must already have been enabled, otherwise nasty things + * may happen. + */ +void efx_farch_irq_test_generate(struct efx_nic *efx) +{ + efx_farch_interrupts(efx, true, true); +} + +/* Process a fatal interrupt + * Disable bus mastering ASAP and schedule a reset + */ +irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) +{ + struct falcon_nic_data *nic_data = efx->nic_data; + efx_oword_t *int_ker = efx->irq_status.addr; + efx_oword_t fatal_intr; + int error, mem_perr; + + efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); + error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); + + netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " + EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), + EFX_OWORD_VAL(fatal_intr), + error ? "disabling bus mastering" : "no recognised error"); + + /* If this is a memory parity error dump which blocks are offending */ + mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || + EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); + if (mem_perr) { + efx_oword_t reg; + efx_reado(efx, ®, FR_AZ_MEM_STAT); + netif_err(efx, hw, efx->net_dev, + "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", + EFX_OWORD_VAL(reg)); + } + + /* Disable both devices */ + pci_clear_master(efx->pci_dev); + if (efx_nic_is_dual_func(efx)) + pci_clear_master(nic_data->pci_dev2); + efx_farch_irq_disable_master(efx); + + /* Count errors and reset or disable the NIC accordingly */ + if (efx->int_error_count == 0 || + time_after(jiffies, efx->int_error_expire)) { + efx->int_error_count = 0; + efx->int_error_expire = + jiffies + EFX_INT_ERROR_EXPIRE * HZ; + } + if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { + netif_err(efx, hw, efx->net_dev, + "SYSTEM ERROR - reset scheduled\n"); + efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); + } else { + netif_err(efx, hw, efx->net_dev, + "SYSTEM ERROR - max number of errors seen." + "NIC will be disabled\n"); + efx_schedule_reset(efx, RESET_TYPE_DISABLE); + } + + return IRQ_HANDLED; +} + +/* Handle a legacy interrupt + * Acknowledges the interrupt and schedule event queue processing. + */ +irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) +{ + struct efx_nic *efx = dev_id; + bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); + efx_oword_t *int_ker = efx->irq_status.addr; + irqreturn_t result = IRQ_NONE; + struct efx_channel *channel; + efx_dword_t reg; + u32 queues; + int syserr; + + /* Read the ISR which also ACKs the interrupts */ + efx_readd(efx, ®, FR_BZ_INT_ISR0); + queues = EFX_EXTRACT_DWORD(reg, 0, 31); + + /* Legacy interrupts are disabled too late by the EEH kernel + * code. Disable them earlier. + * If an EEH error occurred, the read will have returned all ones. + */ + if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && + !efx->eeh_disabled_legacy_irq) { + disable_irq_nosync(efx->legacy_irq); + efx->eeh_disabled_legacy_irq = true; + } + + /* Handle non-event-queue sources */ + if (queues & (1U << efx->irq_level) && soft_enabled) { + syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); + if (unlikely(syserr)) + return efx_farch_fatal_interrupt(efx); + efx->last_irq_cpu = raw_smp_processor_id(); + } + + if (queues != 0) { + efx->irq_zero_count = 0; + + /* Schedule processing of any interrupting queues */ + if (likely(soft_enabled)) { + efx_for_each_channel(channel, efx) { + if (queues & 1) + efx_schedule_channel_irq(channel); + queues >>= 1; + } + } + result = IRQ_HANDLED; + + } else { + efx_qword_t *event; + + /* Legacy ISR read can return zero once (SF bug 15783) */ + + /* We can't return IRQ_HANDLED more than once on seeing ISR=0 + * because this might be a shared interrupt. */ + if (efx->irq_zero_count++ == 0) + result = IRQ_HANDLED; + + /* Ensure we schedule or rearm all event queues */ + if (likely(soft_enabled)) { + efx_for_each_channel(channel, efx) { + event = efx_event(channel, + channel->eventq_read_ptr); + if (efx_event_present(event)) + efx_schedule_channel_irq(channel); + else + efx_farch_ev_read_ack(channel); + } + } + } + + if (result == IRQ_HANDLED) + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", + irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); + + return result; +} + +/* Handle an MSI interrupt + * + * Handle an MSI hardware interrupt. This routine schedules event + * queue processing. No interrupt acknowledgement cycle is necessary. + * Also, we never need to check that the interrupt is for us, since + * MSI interrupts cannot be shared. + */ +irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id) +{ + struct efx_msi_context *context = dev_id; + struct efx_nic *efx = context->efx; + efx_oword_t *int_ker = efx->irq_status.addr; + int syserr; + + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", + irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); + + if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) + return IRQ_HANDLED; + + /* Handle non-event-queue sources */ + if (context->index == efx->irq_level) { + syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); + if (unlikely(syserr)) + return efx_farch_fatal_interrupt(efx); + efx->last_irq_cpu = raw_smp_processor_id(); + } + + /* Schedule processing of the channel */ + efx_schedule_channel_irq(efx->channel[context->index]); + + return IRQ_HANDLED; +} + +/* Setup RSS indirection table. + * This maps from the hash value of the packet to RXQ + */ +void efx_farch_rx_push_indir_table(struct efx_nic *efx) +{ + size_t i = 0; + efx_dword_t dword; + + BUG_ON(efx_nic_rev(efx) < EFX_REV_FALCON_B0); + + BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != + FR_BZ_RX_INDIRECTION_TBL_ROWS); + + for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { + EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, + efx->rx_indir_table[i]); + efx_writed(efx, &dword, + FR_BZ_RX_INDIRECTION_TBL + + FR_BZ_RX_INDIRECTION_TBL_STEP * i); + } +} + +/* Looks at available SRAM resources and works out how many queues we + * can support, and where things like descriptor caches should live. + * + * SRAM is split up as follows: + * 0 buftbl entries for channels + * efx->vf_buftbl_base buftbl entries for SR-IOV + * efx->rx_dc_base RX descriptor caches + * efx->tx_dc_base TX descriptor caches + */ +void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) +{ + unsigned vi_count, buftbl_min; + +#ifdef CONFIG_SFC_SRIOV + struct siena_nic_data *nic_data = efx->nic_data; +#endif + + /* Account for the buffer table entries backing the datapath channels + * and the descriptor caches for those channels. + */ + buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + + efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + + efx->n_channels * EFX_MAX_EVQ_SIZE) + * sizeof(efx_qword_t) / EFX_BUF_SIZE); + vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); + +#ifdef CONFIG_SFC_SRIOV + if (efx->type->sriov_wanted(efx)) { + unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; + + nic_data->vf_buftbl_base = buftbl_min; + + vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; + vi_count = max(vi_count, EFX_VI_BASE); + buftbl_free = (sram_lim_qw - buftbl_min - + vi_count * vi_dc_entries); + + entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) * + efx_vf_size(efx)); + vf_limit = min(buftbl_free / entries_per_vf, + (1024U - EFX_VI_BASE) >> efx->vi_scale); + + if (efx->vf_count > vf_limit) { + netif_err(efx, probe, efx->net_dev, + "Reducing VF count from from %d to %d\n", + efx->vf_count, vf_limit); + efx->vf_count = vf_limit; + } + vi_count += efx->vf_count * efx_vf_size(efx); + } +#endif + + efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; + efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; +} + +u32 efx_farch_fpga_ver(struct efx_nic *efx) +{ + efx_oword_t altera_build; + efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); + return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); +} + +void efx_farch_init_common(struct efx_nic *efx) +{ + efx_oword_t temp; + + /* Set positions of descriptor caches in SRAM. */ + EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); + efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); + EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); + efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); + + /* Set TX descriptor cache size. */ + BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); + EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); + efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); + + /* Set RX descriptor cache size. Set low watermark to size-8, as + * this allows most efficient prefetching. + */ + BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); + EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); + efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); + EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); + efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); + + /* Program INT_KER address */ + EFX_POPULATE_OWORD_2(temp, + FRF_AZ_NORM_INT_VEC_DIS_KER, + EFX_INT_MODE_USE_MSI(efx), + FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); + efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); + + if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) + /* Use an interrupt level unused by event queues */ + efx->irq_level = 0x1f; + else + /* Use a valid MSI-X vector */ + efx->irq_level = 0; + + /* Enable all the genuinely fatal interrupts. (They are still + * masked by the overall interrupt mask, controlled by + * falcon_interrupts()). + * + * Note: All other fatal interrupts are enabled + */ + EFX_POPULATE_OWORD_3(temp, + FRF_AZ_ILL_ADR_INT_KER_EN, 1, + FRF_AZ_RBUF_OWN_INT_KER_EN, 1, + FRF_AZ_TBUF_OWN_INT_KER_EN, 1); + if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) + EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); + EFX_INVERT_OWORD(temp); + efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); + + /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be + * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. + */ + efx_reado(efx, &temp, FR_AZ_TX_RESERVED); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); + /* Enable SW_EV to inherit in char driver - assume harmless here */ + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); + /* Prefetch threshold 2 => fetch when descriptor cache half empty */ + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); + /* Disable hardware watchdog which can misfire */ + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); + /* Squash TX of packets of 16 bytes or less */ + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) + EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); + efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); + + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + EFX_POPULATE_OWORD_4(temp, + /* Default values */ + FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, + FRF_BZ_TX_PACE_SB_AF, 0xb, + FRF_BZ_TX_PACE_FB_BASE, 0, + /* Allow large pace values in the + * fast bin. */ + FRF_BZ_TX_PACE_BIN_TH, + FFE_BZ_TX_PACE_RESERVED); + efx_writeo(efx, &temp, FR_BZ_TX_PACE); + } +} + +/************************************************************************** + * + * Filter tables + * + ************************************************************************** + */ + +/* "Fudge factors" - difference between programmed value and actual depth. + * Due to pipelined implementation we need to program H/W with a value that + * is larger than the hop limit we want. + */ +#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3 +#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1 + +/* Hard maximum search limit. Hardware will time-out beyond 200-something. + * We also need to avoid infinite loops in efx_farch_filter_search() when the + * table is full. + */ +#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200 + +/* Don't try very hard to find space for performance hints, as this is + * counter-productive. */ +#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5 + +enum efx_farch_filter_type { + EFX_FARCH_FILTER_TCP_FULL = 0, + EFX_FARCH_FILTER_TCP_WILD, + EFX_FARCH_FILTER_UDP_FULL, + EFX_FARCH_FILTER_UDP_WILD, + EFX_FARCH_FILTER_MAC_FULL = 4, + EFX_FARCH_FILTER_MAC_WILD, + EFX_FARCH_FILTER_UC_DEF = 8, + EFX_FARCH_FILTER_MC_DEF, + EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */ +}; + +enum efx_farch_filter_table_id { + EFX_FARCH_FILTER_TABLE_RX_IP = 0, + EFX_FARCH_FILTER_TABLE_RX_MAC, + EFX_FARCH_FILTER_TABLE_RX_DEF, + EFX_FARCH_FILTER_TABLE_TX_MAC, + EFX_FARCH_FILTER_TABLE_COUNT, +}; + +enum efx_farch_filter_index { + EFX_FARCH_FILTER_INDEX_UC_DEF, + EFX_FARCH_FILTER_INDEX_MC_DEF, + EFX_FARCH_FILTER_SIZE_RX_DEF, +}; + +struct efx_farch_filter_spec { + u8 type:4; + u8 priority:4; + u8 flags; + u16 dmaq_id; + u32 data[3]; +}; + +struct efx_farch_filter_table { + enum efx_farch_filter_table_id id; + u32 offset; /* address of table relative to BAR */ + unsigned size; /* number of entries */ + unsigned step; /* step between entries */ + unsigned used; /* number currently used */ + unsigned long *used_bitmap; + struct efx_farch_filter_spec *spec; + unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT]; +}; + +struct efx_farch_filter_state { + struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT]; +}; + +static void +efx_farch_filter_table_clear_entry(struct efx_nic *efx, + struct efx_farch_filter_table *table, + unsigned int filter_idx); + +/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit + * key derived from the n-tuple. The initial LFSR state is 0xffff. */ +static u16 efx_farch_filter_hash(u32 key) +{ + u16 tmp; + + /* First 16 rounds */ + tmp = 0x1fff ^ key >> 16; + tmp = tmp ^ tmp >> 3 ^ tmp >> 6; + tmp = tmp ^ tmp >> 9; + /* Last 16 rounds */ + tmp = tmp ^ tmp << 13 ^ key; + tmp = tmp ^ tmp >> 3 ^ tmp >> 6; + return tmp ^ tmp >> 9; +} + +/* To allow for hash collisions, filter search continues at these + * increments from the first possible entry selected by the hash. */ +static u16 efx_farch_filter_increment(u32 key) +{ + return key * 2 - 1; +} + +static enum efx_farch_filter_table_id +efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec) +{ + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != + (EFX_FARCH_FILTER_TCP_FULL >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != + (EFX_FARCH_FILTER_TCP_WILD >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != + (EFX_FARCH_FILTER_UDP_FULL >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != + (EFX_FARCH_FILTER_UDP_WILD >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != + (EFX_FARCH_FILTER_MAC_FULL >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != + (EFX_FARCH_FILTER_MAC_WILD >> 2)); + BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC != + EFX_FARCH_FILTER_TABLE_RX_MAC + 2); + return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0); +} + +static void efx_farch_filter_push_rx_config(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table; + efx_oword_t filter_ctl; + + efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; + EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_TCP_FULL] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_TCP_WILD] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); + EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_UDP_FULL] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_UDP_WILD] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; + if (table->size) { + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, + table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); + } + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; + if (table->size) { + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID, + table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id); + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED, + !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & + EFX_FILTER_FLAG_RX_RSS)); + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID, + table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id); + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, + !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & + EFX_FILTER_FLAG_RX_RSS)); + + /* There is a single bit to enable RX scatter for all + * unmatched packets. Only set it if scatter is + * enabled in both filter specs. + */ + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, + !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & + table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & + EFX_FILTER_FLAG_RX_SCATTER)); + } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + /* We don't expose 'default' filters because unmatched + * packets always go to the queue number found in the + * RSS table. But we still need to set the RX scatter + * bit here. + */ + EFX_SET_OWORD_FIELD( + filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, + efx->rx_scatter); + } + + efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); +} + +static void efx_farch_filter_push_tx_limits(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table; + efx_oword_t tx_cfg; + + efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG); + + table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; + if (table->size) { + EFX_SET_OWORD_FIELD( + tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE, + table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); + EFX_SET_OWORD_FIELD( + tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE, + table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + + EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); + } + + efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG); +} + +static int +efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec, + const struct efx_filter_spec *gen_spec) +{ + bool is_full = false; + + if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && + gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT) + return -EINVAL; + + spec->priority = gen_spec->priority; + spec->flags = gen_spec->flags; + spec->dmaq_id = gen_spec->dmaq_id; + + switch (gen_spec->match_flags) { + case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | + EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT): + is_full = true; + /* fall through */ + case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): { + __be32 rhost, host1, host2; + __be16 rport, port1, port2; + + EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX)); + + if (gen_spec->ether_type != htons(ETH_P_IP)) + return -EPROTONOSUPPORT; + if (gen_spec->loc_port == 0 || + (is_full && gen_spec->rem_port == 0)) + return -EADDRNOTAVAIL; + switch (gen_spec->ip_proto) { + case IPPROTO_TCP: + spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL : + EFX_FARCH_FILTER_TCP_WILD); + break; + case IPPROTO_UDP: + spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL : + EFX_FARCH_FILTER_UDP_WILD); + break; + default: + return -EPROTONOSUPPORT; + } + + /* Filter is constructed in terms of source and destination, + * with the odd wrinkle that the ports are swapped in a UDP + * wildcard filter. We need to convert from local and remote + * (= zero for wildcard) addresses. + */ + rhost = is_full ? gen_spec->rem_host[0] : 0; + rport = is_full ? gen_spec->rem_port : 0; + host1 = rhost; + host2 = gen_spec->loc_host[0]; + if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) { + port1 = gen_spec->loc_port; + port2 = rport; + } else { + port1 = rport; + port2 = gen_spec->loc_port; + } + spec->data[0] = ntohl(host1) << 16 | ntohs(port1); + spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; + spec->data[2] = ntohl(host2); + + break; + } + + case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID: + is_full = true; + /* fall through */ + case EFX_FILTER_MATCH_LOC_MAC: + spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL : + EFX_FARCH_FILTER_MAC_WILD); + spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0; + spec->data[1] = (gen_spec->loc_mac[2] << 24 | + gen_spec->loc_mac[3] << 16 | + gen_spec->loc_mac[4] << 8 | + gen_spec->loc_mac[5]); + spec->data[2] = (gen_spec->loc_mac[0] << 8 | + gen_spec->loc_mac[1]); + break; + + case EFX_FILTER_MATCH_LOC_MAC_IG: + spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ? + EFX_FARCH_FILTER_MC_DEF : + EFX_FARCH_FILTER_UC_DEF); + memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */ + break; + + default: + return -EPROTONOSUPPORT; + } + + return 0; +} + +static void +efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec, + const struct efx_farch_filter_spec *spec) +{ + bool is_full = false; + + /* *gen_spec should be completely initialised, to be consistent + * with efx_filter_init_{rx,tx}() and in case we want to copy + * it back to userland. + */ + memset(gen_spec, 0, sizeof(*gen_spec)); + + gen_spec->priority = spec->priority; + gen_spec->flags = spec->flags; + gen_spec->dmaq_id = spec->dmaq_id; + + switch (spec->type) { + case EFX_FARCH_FILTER_TCP_FULL: + case EFX_FARCH_FILTER_UDP_FULL: + is_full = true; + /* fall through */ + case EFX_FARCH_FILTER_TCP_WILD: + case EFX_FARCH_FILTER_UDP_WILD: { + __be32 host1, host2; + __be16 port1, port2; + + gen_spec->match_flags = + EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; + if (is_full) + gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_REM_PORT); + gen_spec->ether_type = htons(ETH_P_IP); + gen_spec->ip_proto = + (spec->type == EFX_FARCH_FILTER_TCP_FULL || + spec->type == EFX_FARCH_FILTER_TCP_WILD) ? + IPPROTO_TCP : IPPROTO_UDP; + + host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16); + port1 = htons(spec->data[0]); + host2 = htonl(spec->data[2]); + port2 = htons(spec->data[1] >> 16); + if (spec->flags & EFX_FILTER_FLAG_TX) { + gen_spec->loc_host[0] = host1; + gen_spec->rem_host[0] = host2; + } else { + gen_spec->loc_host[0] = host2; + gen_spec->rem_host[0] = host1; + } + if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^ + (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) { + gen_spec->loc_port = port1; + gen_spec->rem_port = port2; + } else { + gen_spec->loc_port = port2; + gen_spec->rem_port = port1; + } + + break; + } + + case EFX_FARCH_FILTER_MAC_FULL: + is_full = true; + /* fall through */ + case EFX_FARCH_FILTER_MAC_WILD: + gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC; + if (is_full) + gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID; + gen_spec->loc_mac[0] = spec->data[2] >> 8; + gen_spec->loc_mac[1] = spec->data[2]; + gen_spec->loc_mac[2] = spec->data[1] >> 24; + gen_spec->loc_mac[3] = spec->data[1] >> 16; + gen_spec->loc_mac[4] = spec->data[1] >> 8; + gen_spec->loc_mac[5] = spec->data[1]; + gen_spec->outer_vid = htons(spec->data[0]); + break; + + case EFX_FARCH_FILTER_UC_DEF: + case EFX_FARCH_FILTER_MC_DEF: + gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG; + gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF; + break; + + default: + WARN_ON(1); + break; + } +} + +static void +efx_farch_filter_init_rx_auto(struct efx_nic *efx, + struct efx_farch_filter_spec *spec) +{ + /* If there's only one channel then disable RSS for non VF + * traffic, thereby allowing VFs to use RSS when the PF can't. + */ + spec->priority = EFX_FILTER_PRI_AUTO; + spec->flags = (EFX_FILTER_FLAG_RX | + (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) | + (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); + spec->dmaq_id = 0; +} + +/* Build a filter entry and return its n-tuple key. */ +static u32 efx_farch_filter_build(efx_oword_t *filter, + struct efx_farch_filter_spec *spec) +{ + u32 data3; + + switch (efx_farch_filter_spec_table_id(spec)) { + case EFX_FARCH_FILTER_TABLE_RX_IP: { + bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL || + spec->type == EFX_FARCH_FILTER_UDP_WILD); + EFX_POPULATE_OWORD_7( + *filter, + FRF_BZ_RSS_EN, + !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), + FRF_BZ_SCATTER_EN, + !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), + FRF_BZ_TCP_UDP, is_udp, + FRF_BZ_RXQ_ID, spec->dmaq_id, + EFX_DWORD_2, spec->data[2], + EFX_DWORD_1, spec->data[1], + EFX_DWORD_0, spec->data[0]); + data3 = is_udp; + break; + } + + case EFX_FARCH_FILTER_TABLE_RX_MAC: { + bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; + EFX_POPULATE_OWORD_7( + *filter, + FRF_CZ_RMFT_RSS_EN, + !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), + FRF_CZ_RMFT_SCATTER_EN, + !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), + FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id, + FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, + FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2], + FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1], + FRF_CZ_RMFT_VLAN_ID, spec->data[0]); + data3 = is_wild; + break; + } + + case EFX_FARCH_FILTER_TABLE_TX_MAC: { + bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; + EFX_POPULATE_OWORD_5(*filter, + FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id, + FRF_CZ_TMFT_WILDCARD_MATCH, is_wild, + FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2], + FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1], + FRF_CZ_TMFT_VLAN_ID, spec->data[0]); + data3 = is_wild | spec->dmaq_id << 1; + break; + } + + default: + BUG(); + } + + return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3; +} + +static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left, + const struct efx_farch_filter_spec *right) +{ + if (left->type != right->type || + memcmp(left->data, right->data, sizeof(left->data))) + return false; + + if (left->flags & EFX_FILTER_FLAG_TX && + left->dmaq_id != right->dmaq_id) + return false; + + return true; +} + +/* + * Construct/deconstruct external filter IDs. At least the RX filter + * IDs must be ordered by matching priority, for RX NFC semantics. + * + * Deconstruction needs to be robust against invalid IDs so that + * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can + * accept user-provided IDs. + */ + +#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5 + +static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = { + [EFX_FARCH_FILTER_TCP_FULL] = 0, + [EFX_FARCH_FILTER_UDP_FULL] = 0, + [EFX_FARCH_FILTER_TCP_WILD] = 1, + [EFX_FARCH_FILTER_UDP_WILD] = 1, + [EFX_FARCH_FILTER_MAC_FULL] = 2, + [EFX_FARCH_FILTER_MAC_WILD] = 3, + [EFX_FARCH_FILTER_UC_DEF] = 4, + [EFX_FARCH_FILTER_MC_DEF] = 4, +}; + +static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = { + EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */ + EFX_FARCH_FILTER_TABLE_RX_IP, + EFX_FARCH_FILTER_TABLE_RX_MAC, + EFX_FARCH_FILTER_TABLE_RX_MAC, + EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */ + EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */ + EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */ +}; + +#define EFX_FARCH_FILTER_INDEX_WIDTH 13 +#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1) + +static inline u32 +efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec, + unsigned int index) +{ + unsigned int range; + + range = efx_farch_filter_type_match_pri[spec->type]; + if (!(spec->flags & EFX_FILTER_FLAG_RX)) + range += EFX_FARCH_FILTER_MATCH_PRI_COUNT; + + return range << EFX_FARCH_FILTER_INDEX_WIDTH | index; +} + +static inline enum efx_farch_filter_table_id +efx_farch_filter_id_table_id(u32 id) +{ + unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH; + + if (range < ARRAY_SIZE(efx_farch_filter_range_table)) + return efx_farch_filter_range_table[range]; + else + return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */ +} + +static inline unsigned int efx_farch_filter_id_index(u32 id) +{ + return id & EFX_FARCH_FILTER_INDEX_MASK; +} + +u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1; + enum efx_farch_filter_table_id table_id; + + do { + table_id = efx_farch_filter_range_table[range]; + if (state->table[table_id].size != 0) + return range << EFX_FARCH_FILTER_INDEX_WIDTH | + state->table[table_id].size; + } while (range--); + + return 0; +} + +s32 efx_farch_filter_insert(struct efx_nic *efx, + struct efx_filter_spec *gen_spec, + bool replace_equal) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table; + struct efx_farch_filter_spec spec; + efx_oword_t filter; + int rep_index, ins_index; + unsigned int depth = 0; + int rc; + + rc = efx_farch_filter_from_gen_spec(&spec, gen_spec); + if (rc) + return rc; + + table = &state->table[efx_farch_filter_spec_table_id(&spec)]; + if (table->size == 0) + return -EINVAL; + + netif_vdbg(efx, hw, efx->net_dev, + "%s: type %d search_limit=%d", __func__, spec.type, + table->search_limit[spec.type]); + + if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { + /* One filter spec per type */ + BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0); + BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF != + EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF); + rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF; + ins_index = rep_index; + + spin_lock_bh(&efx->filter_lock); + } else { + /* Search concurrently for + * (1) a filter to be replaced (rep_index): any filter + * with the same match values, up to the current + * search depth for this type, and + * (2) the insertion point (ins_index): (1) or any + * free slot before it or up to the maximum search + * depth for this priority + * We fail if we cannot find (2). + * + * We can stop once either + * (a) we find (1), in which case we have definitely + * found (2) as well; or + * (b) we have searched exhaustively for (1), and have + * either found (2) or searched exhaustively for it + */ + u32 key = efx_farch_filter_build(&filter, &spec); + unsigned int hash = efx_farch_filter_hash(key); + unsigned int incr = efx_farch_filter_increment(key); + unsigned int max_rep_depth = table->search_limit[spec.type]; + unsigned int max_ins_depth = + spec.priority <= EFX_FILTER_PRI_HINT ? + EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX : + EFX_FARCH_FILTER_CTL_SRCH_MAX; + unsigned int i = hash & (table->size - 1); + + ins_index = -1; + depth = 1; + + spin_lock_bh(&efx->filter_lock); + + for (;;) { + if (!test_bit(i, table->used_bitmap)) { + if (ins_index < 0) + ins_index = i; + } else if (efx_farch_filter_equal(&spec, + &table->spec[i])) { + /* Case (a) */ + if (ins_index < 0) + ins_index = i; + rep_index = i; + break; + } + + if (depth >= max_rep_depth && + (ins_index >= 0 || depth >= max_ins_depth)) { + /* Case (b) */ + if (ins_index < 0) { + rc = -EBUSY; + goto out; + } + rep_index = -1; + break; + } + + i = (i + incr) & (table->size - 1); + ++depth; + } + } + + /* If we found a filter to be replaced, check whether we + * should do so + */ + if (rep_index >= 0) { + struct efx_farch_filter_spec *saved_spec = + &table->spec[rep_index]; + + if (spec.priority == saved_spec->priority && !replace_equal) { + rc = -EEXIST; + goto out; + } + if (spec.priority < saved_spec->priority) { + rc = -EPERM; + goto out; + } + if (saved_spec->priority == EFX_FILTER_PRI_AUTO || + saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) + spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; + } + + /* Insert the filter */ + if (ins_index != rep_index) { + __set_bit(ins_index, table->used_bitmap); + ++table->used; + } + table->spec[ins_index] = spec; + + if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { + efx_farch_filter_push_rx_config(efx); + } else { + if (table->search_limit[spec.type] < depth) { + table->search_limit[spec.type] = depth; + if (spec.flags & EFX_FILTER_FLAG_TX) + efx_farch_filter_push_tx_limits(efx); + else + efx_farch_filter_push_rx_config(efx); + } + + efx_writeo(efx, &filter, + table->offset + table->step * ins_index); + + /* If we were able to replace a filter by inserting + * at a lower depth, clear the replaced filter + */ + if (ins_index != rep_index && rep_index >= 0) + efx_farch_filter_table_clear_entry(efx, table, + rep_index); + } + + netif_vdbg(efx, hw, efx->net_dev, + "%s: filter type %d index %d rxq %u set", + __func__, spec.type, ins_index, spec.dmaq_id); + rc = efx_farch_filter_make_id(&spec, ins_index); + +out: + spin_unlock_bh(&efx->filter_lock); + return rc; +} + +static void +efx_farch_filter_table_clear_entry(struct efx_nic *efx, + struct efx_farch_filter_table *table, + unsigned int filter_idx) +{ + static efx_oword_t filter; + + EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap)); + BUG_ON(table->offset == 0); /* can't clear MAC default filters */ + + __clear_bit(filter_idx, table->used_bitmap); + --table->used; + memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); + + efx_writeo(efx, &filter, table->offset + table->step * filter_idx); + + /* If this filter required a greater search depth than + * any other, the search limit for its type can now be + * decreased. However, it is hard to determine that + * unless the table has become completely empty - in + * which case, all its search limits can be set to 0. + */ + if (unlikely(table->used == 0)) { + memset(table->search_limit, 0, sizeof(table->search_limit)); + if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC) + efx_farch_filter_push_tx_limits(efx); + else + efx_farch_filter_push_rx_config(efx); + } +} + +static int efx_farch_filter_remove(struct efx_nic *efx, + struct efx_farch_filter_table *table, + unsigned int filter_idx, + enum efx_filter_priority priority) +{ + struct efx_farch_filter_spec *spec = &table->spec[filter_idx]; + + if (!test_bit(filter_idx, table->used_bitmap) || + spec->priority != priority) + return -ENOENT; + + if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { + efx_farch_filter_init_rx_auto(efx, spec); + efx_farch_filter_push_rx_config(efx); + } else { + efx_farch_filter_table_clear_entry(efx, table, filter_idx); + } + + return 0; +} + +int efx_farch_filter_remove_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + unsigned int filter_idx; + struct efx_farch_filter_spec *spec; + int rc; + + table_id = efx_farch_filter_id_table_id(filter_id); + if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) + return -ENOENT; + table = &state->table[table_id]; + + filter_idx = efx_farch_filter_id_index(filter_id); + if (filter_idx >= table->size) + return -ENOENT; + spec = &table->spec[filter_idx]; + + spin_lock_bh(&efx->filter_lock); + rc = efx_farch_filter_remove(efx, table, filter_idx, priority); + spin_unlock_bh(&efx->filter_lock); + + return rc; +} + +int efx_farch_filter_get_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *spec_buf) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + struct efx_farch_filter_spec *spec; + unsigned int filter_idx; + int rc; + + table_id = efx_farch_filter_id_table_id(filter_id); + if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) + return -ENOENT; + table = &state->table[table_id]; + + filter_idx = efx_farch_filter_id_index(filter_id); + if (filter_idx >= table->size) + return -ENOENT; + spec = &table->spec[filter_idx]; + + spin_lock_bh(&efx->filter_lock); + + if (test_bit(filter_idx, table->used_bitmap) && + spec->priority == priority) { + efx_farch_filter_to_gen_spec(spec_buf, spec); + rc = 0; + } else { + rc = -ENOENT; + } + + spin_unlock_bh(&efx->filter_lock); + + return rc; +} + +static void +efx_farch_filter_table_clear(struct efx_nic *efx, + enum efx_farch_filter_table_id table_id, + enum efx_filter_priority priority) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table = &state->table[table_id]; + unsigned int filter_idx; + + spin_lock_bh(&efx->filter_lock); + for (filter_idx = 0; filter_idx < table->size; ++filter_idx) { + if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO) + efx_farch_filter_remove(efx, table, + filter_idx, priority); + } + spin_unlock_bh(&efx->filter_lock); +} + +int efx_farch_filter_clear_rx(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP, + priority); + efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC, + priority); + efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF, + priority); + return 0; +} + +u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, + enum efx_filter_priority priority) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + unsigned int filter_idx; + u32 count = 0; + + spin_lock_bh(&efx->filter_lock); + + for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; + table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; + table_id++) { + table = &state->table[table_id]; + for (filter_idx = 0; filter_idx < table->size; filter_idx++) { + if (test_bit(filter_idx, table->used_bitmap) && + table->spec[filter_idx].priority == priority) + ++count; + } + } + + spin_unlock_bh(&efx->filter_lock); + + return count; +} + +s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + unsigned int filter_idx; + s32 count = 0; + + spin_lock_bh(&efx->filter_lock); + + for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; + table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; + table_id++) { + table = &state->table[table_id]; + for (filter_idx = 0; filter_idx < table->size; filter_idx++) { + if (test_bit(filter_idx, table->used_bitmap) && + table->spec[filter_idx].priority == priority) { + if (count == size) { + count = -EMSGSIZE; + goto out; + } + buf[count++] = efx_farch_filter_make_id( + &table->spec[filter_idx], filter_idx); + } + } + } +out: + spin_unlock_bh(&efx->filter_lock); + + return count; +} + +/* Restore filter stater after reset */ +void efx_farch_filter_table_restore(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + efx_oword_t filter; + unsigned int filter_idx; + + spin_lock_bh(&efx->filter_lock); + + for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { + table = &state->table[table_id]; + + /* Check whether this is a regular register table */ + if (table->step == 0) + continue; + + for (filter_idx = 0; filter_idx < table->size; filter_idx++) { + if (!test_bit(filter_idx, table->used_bitmap)) + continue; + efx_farch_filter_build(&filter, &table->spec[filter_idx]); + efx_writeo(efx, &filter, + table->offset + table->step * filter_idx); + } + } + + efx_farch_filter_push_rx_config(efx); + efx_farch_filter_push_tx_limits(efx); + + spin_unlock_bh(&efx->filter_lock); +} + +void efx_farch_filter_table_remove(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + + for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { + kfree(state->table[table_id].used_bitmap); + vfree(state->table[table_id].spec); + } + kfree(state); +} + +int efx_farch_filter_table_probe(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state; + struct efx_farch_filter_table *table; + unsigned table_id; + + state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL); + if (!state) + return -ENOMEM; + efx->filter_state = state; + + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; + table->id = EFX_FARCH_FILTER_TABLE_RX_IP; + table->offset = FR_BZ_RX_FILTER_TBL0; + table->size = FR_BZ_RX_FILTER_TBL0_ROWS; + table->step = FR_BZ_RX_FILTER_TBL0_STEP; + } + + if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; + table->id = EFX_FARCH_FILTER_TABLE_RX_MAC; + table->offset = FR_CZ_RX_MAC_FILTER_TBL0; + table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; + table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; + table->id = EFX_FARCH_FILTER_TABLE_RX_DEF; + table->size = EFX_FARCH_FILTER_SIZE_RX_DEF; + + table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; + table->id = EFX_FARCH_FILTER_TABLE_TX_MAC; + table->offset = FR_CZ_TX_MAC_FILTER_TBL0; + table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS; + table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP; + } + + for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { + table = &state->table[table_id]; + if (table->size == 0) + continue; + table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size), + sizeof(unsigned long), + GFP_KERNEL); + if (!table->used_bitmap) + goto fail; + table->spec = vzalloc(table->size * sizeof(*table->spec)); + if (!table->spec) + goto fail; + } + + table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; + if (table->size) { + /* RX default filters must always exist */ + struct efx_farch_filter_spec *spec; + unsigned i; + + for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) { + spec = &table->spec[i]; + spec->type = EFX_FARCH_FILTER_UC_DEF + i; + efx_farch_filter_init_rx_auto(efx, spec); + __set_bit(i, table->used_bitmap); + } + } + + efx_farch_filter_push_rx_config(efx); + + return 0; + +fail: + efx_farch_filter_table_remove(efx); + return -ENOMEM; +} + +/* Update scatter enable flags for filters pointing to our own RX queues */ +void efx_farch_filter_update_rx_scatter(struct efx_nic *efx) +{ + struct efx_farch_filter_state *state = efx->filter_state; + enum efx_farch_filter_table_id table_id; + struct efx_farch_filter_table *table; + efx_oword_t filter; + unsigned int filter_idx; + + spin_lock_bh(&efx->filter_lock); + + for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; + table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; + table_id++) { + table = &state->table[table_id]; + + for (filter_idx = 0; filter_idx < table->size; filter_idx++) { + if (!test_bit(filter_idx, table->used_bitmap) || + table->spec[filter_idx].dmaq_id >= + efx->n_rx_channels) + continue; + + if (efx->rx_scatter) + table->spec[filter_idx].flags |= + EFX_FILTER_FLAG_RX_SCATTER; + else + table->spec[filter_idx].flags &= + ~EFX_FILTER_FLAG_RX_SCATTER; + + if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF) + /* Pushed by efx_farch_filter_push_rx_config() */ + continue; + + efx_farch_filter_build(&filter, &table->spec[filter_idx]); + efx_writeo(efx, &filter, + table->offset + table->step * filter_idx); + } + } + + efx_farch_filter_push_rx_config(efx); + + spin_unlock_bh(&efx->filter_lock); +} + +#ifdef CONFIG_RFS_ACCEL + +s32 efx_farch_filter_rfs_insert(struct efx_nic *efx, + struct efx_filter_spec *gen_spec) +{ + return efx_farch_filter_insert(efx, gen_spec, true); +} + +bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, + unsigned int index) +{ + struct efx_farch_filter_state *state = efx->filter_state; + struct efx_farch_filter_table *table = + &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; + + if (test_bit(index, table->used_bitmap) && + table->spec[index].priority == EFX_FILTER_PRI_HINT && + rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, + flow_id, index)) { + efx_farch_filter_table_clear_entry(efx, table, index); + return true; + } + + return false; +} + +#endif /* CONFIG_RFS_ACCEL */ + +void efx_farch_filter_sync_rx_mode(struct efx_nic *efx) +{ + struct net_device *net_dev = efx->net_dev; + struct netdev_hw_addr *ha; + union efx_multicast_hash *mc_hash = &efx->multicast_hash; + u32 crc; + int bit; + + if (!efx_dev_registered(efx)) + return; + + netif_addr_lock_bh(net_dev); + + efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); + + /* Build multicast hash table */ + if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { + memset(mc_hash, 0xff, sizeof(*mc_hash)); + } else { + memset(mc_hash, 0x00, sizeof(*mc_hash)); + netdev_for_each_mc_addr(ha, net_dev) { + crc = ether_crc_le(ETH_ALEN, ha->addr); + bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); + __set_bit_le(bit, mc_hash); + } + + /* Broadcast packets go through the multicast hash filter. + * ether_crc_le() of the broadcast address is 0xbe2612ff + * so we always add bit 0xff to the mask. + */ + __set_bit_le(0xff, mc_hash); + } + + netif_addr_unlock_bh(net_dev); +} diff --git a/drivers/net/ethernet/sfc/farch_regs.h b/drivers/net/ethernet/sfc/farch_regs.h new file mode 100644 index 000000000..7019a712e --- /dev/null +++ b/drivers/net/ethernet/sfc/farch_regs.h @@ -0,0 +1,2932 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2012 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_FARCH_REGS_H +#define EFX_FARCH_REGS_H + +/* + * Falcon hardware architecture definitions have a name prefix following + * the format: + * + * F__ + * + * The following strings are used: + * + * MMIO register MC register Host memory structure + * ------------------------------------------------------------- + * Address R MCR + * Bitfield RF MCRF SF + * Enumerator FE MCFE SE + * + * is the first revision to which the definition applies: + * + * A: Falcon A1 (SFC4000AB) + * B: Falcon B0 (SFC4000BA) + * C: Siena A0 (SFL9021AA) + * + * If the definition has been changed or removed in later revisions + * then is the last revision to which the definition applies; + * otherwise it is "Z". + */ + +/************************************************************************** + * + * Falcon/Siena registers and descriptors + * + ************************************************************************** + */ + +/* ADR_REGION_REG: Address region register */ +#define FR_AZ_ADR_REGION 0x00000000 +#define FRF_AZ_ADR_REGION3_LBN 96 +#define FRF_AZ_ADR_REGION3_WIDTH 18 +#define FRF_AZ_ADR_REGION2_LBN 64 +#define FRF_AZ_ADR_REGION2_WIDTH 18 +#define FRF_AZ_ADR_REGION1_LBN 32 +#define FRF_AZ_ADR_REGION1_WIDTH 18 +#define FRF_AZ_ADR_REGION0_LBN 0 +#define FRF_AZ_ADR_REGION0_WIDTH 18 + +/* INT_EN_REG_KER: Kernel driver Interrupt enable register */ +#define FR_AZ_INT_EN_KER 0x00000010 +#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8 +#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6 +#define FRF_AZ_KER_INT_CHAR_LBN 4 +#define FRF_AZ_KER_INT_CHAR_WIDTH 1 +#define FRF_AZ_KER_INT_KER_LBN 3 +#define FRF_AZ_KER_INT_KER_WIDTH 1 +#define FRF_AZ_DRV_INT_EN_KER_LBN 0 +#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1 + +/* INT_EN_REG_CHAR: Char Driver interrupt enable register */ +#define FR_BZ_INT_EN_CHAR 0x00000020 +#define FRF_BZ_CHAR_INT_LEVE_SEL_LBN 8 +#define FRF_BZ_CHAR_INT_LEVE_SEL_WIDTH 6 +#define FRF_BZ_CHAR_INT_CHAR_LBN 4 +#define FRF_BZ_CHAR_INT_CHAR_WIDTH 1 +#define FRF_BZ_CHAR_INT_KER_LBN 3 +#define FRF_BZ_CHAR_INT_KER_WIDTH 1 +#define FRF_BZ_DRV_INT_EN_CHAR_LBN 0 +#define FRF_BZ_DRV_INT_EN_CHAR_WIDTH 1 + +/* INT_ADR_REG_KER: Interrupt host address for Kernel driver */ +#define FR_AZ_INT_ADR_KER 0x00000030 +#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64 +#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1 +#define FRF_AZ_INT_ADR_KER_LBN 0 +#define FRF_AZ_INT_ADR_KER_WIDTH 64 + +/* INT_ADR_REG_CHAR: Interrupt host address for Char driver */ +#define FR_BZ_INT_ADR_CHAR 0x00000040 +#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_LBN 64 +#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1 +#define FRF_BZ_INT_ADR_CHAR_LBN 0 +#define FRF_BZ_INT_ADR_CHAR_WIDTH 64 + +/* INT_ACK_KER: Kernel interrupt acknowledge register */ +#define FR_AA_INT_ACK_KER 0x00000050 +#define FRF_AA_INT_ACK_KER_FIELD_LBN 0 +#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32 + +/* INT_ISR0_REG: Function 0 Interrupt Acknowledge Status register */ +#define FR_BZ_INT_ISR0 0x00000090 +#define FRF_BZ_INT_ISR_REG_LBN 0 +#define FRF_BZ_INT_ISR_REG_WIDTH 64 + +/* HW_INIT_REG: Hardware initialization register */ +#define FR_AZ_HW_INIT 0x000000c0 +#define FRF_BB_BDMRD_CPLF_FULL_LBN 124 +#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1 +#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121 +#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3 +#define FRF_CZ_TX_MRG_TAGS_LBN 120 +#define FRF_CZ_TX_MRG_TAGS_WIDTH 1 +#define FRF_AB_TRGT_MASK_ALL_LBN 100 +#define FRF_AB_TRGT_MASK_ALL_WIDTH 1 +#define FRF_AZ_DOORBELL_DROP_LBN 92 +#define FRF_AZ_DOORBELL_DROP_WIDTH 8 +#define FRF_AB_TX_RREQ_MASK_EN_LBN 76 +#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1 +#define FRF_AB_PE_EIDLE_DIS_LBN 75 +#define FRF_AB_PE_EIDLE_DIS_WIDTH 1 +#define FRF_AA_FC_BLOCKING_EN_LBN 45 +#define FRF_AA_FC_BLOCKING_EN_WIDTH 1 +#define FRF_BZ_B2B_REQ_EN_LBN 45 +#define FRF_BZ_B2B_REQ_EN_WIDTH 1 +#define FRF_AA_B2B_REQ_EN_LBN 44 +#define FRF_AA_B2B_REQ_EN_WIDTH 1 +#define FRF_BB_FC_BLOCKING_EN_LBN 44 +#define FRF_BB_FC_BLOCKING_EN_WIDTH 1 +#define FRF_AZ_POST_WR_MASK_LBN 40 +#define FRF_AZ_POST_WR_MASK_WIDTH 4 +#define FRF_AZ_TLP_TC_LBN 34 +#define FRF_AZ_TLP_TC_WIDTH 3 +#define FRF_AZ_TLP_ATTR_LBN 32 +#define FRF_AZ_TLP_ATTR_WIDTH 2 +#define FRF_AB_INTB_VEC_LBN 24 +#define FRF_AB_INTB_VEC_WIDTH 5 +#define FRF_AB_INTA_VEC_LBN 16 +#define FRF_AB_INTA_VEC_WIDTH 5 +#define FRF_AZ_WD_TIMER_LBN 8 +#define FRF_AZ_WD_TIMER_WIDTH 8 +#define FRF_AZ_US_DISABLE_LBN 5 +#define FRF_AZ_US_DISABLE_WIDTH 1 +#define FRF_AZ_TLP_EP_LBN 4 +#define FRF_AZ_TLP_EP_WIDTH 1 +#define FRF_AZ_ATTR_SEL_LBN 3 +#define FRF_AZ_ATTR_SEL_WIDTH 1 +#define FRF_AZ_TD_SEL_LBN 1 +#define FRF_AZ_TD_SEL_WIDTH 1 +#define FRF_AZ_TLP_TD_LBN 0 +#define FRF_AZ_TLP_TD_WIDTH 1 + +/* EE_SPI_HCMD_REG: SPI host command register */ +#define FR_AB_EE_SPI_HCMD 0x00000100 +#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31 +#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1 +#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28 +#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1 +#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24 +#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1 +#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16 +#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5 +#define FRF_AB_EE_SPI_HCMD_READ_LBN 15 +#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1 +#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12 +#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2 +#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8 +#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2 +#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0 +#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8 + +/* USR_EV_CFG: User Level Event Configuration register */ +#define FR_CZ_USR_EV_CFG 0x00000100 +#define FRF_CZ_USREV_DIS_LBN 16 +#define FRF_CZ_USREV_DIS_WIDTH 1 +#define FRF_CZ_DFLT_EVQ_LBN 0 +#define FRF_CZ_DFLT_EVQ_WIDTH 10 + +/* EE_SPI_HADR_REG: SPI host address register */ +#define FR_AB_EE_SPI_HADR 0x00000110 +#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24 +#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8 +#define FRF_AB_EE_SPI_HADR_ADR_LBN 0 +#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24 + +/* EE_SPI_HDATA_REG: SPI host data register */ +#define FR_AB_EE_SPI_HDATA 0x00000120 +#define FRF_AB_EE_SPI_HDATA3_LBN 96 +#define FRF_AB_EE_SPI_HDATA3_WIDTH 32 +#define FRF_AB_EE_SPI_HDATA2_LBN 64 +#define FRF_AB_EE_SPI_HDATA2_WIDTH 32 +#define FRF_AB_EE_SPI_HDATA1_LBN 32 +#define FRF_AB_EE_SPI_HDATA1_WIDTH 32 +#define FRF_AB_EE_SPI_HDATA0_LBN 0 +#define FRF_AB_EE_SPI_HDATA0_WIDTH 32 + +/* EE_BASE_PAGE_REG: Expansion ROM base mirror register */ +#define FR_AB_EE_BASE_PAGE 0x00000130 +#define FRF_AB_EE_EXPROM_MASK_LBN 16 +#define FRF_AB_EE_EXPROM_MASK_WIDTH 13 +#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0 +#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13 + +/* EE_VPD_CFG0_REG: SPI/VPD configuration register 0 */ +#define FR_AB_EE_VPD_CFG0 0x00000140 +#define FRF_AB_EE_SF_FASTRD_EN_LBN 127 +#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1 +#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120 +#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7 +#define FRF_AB_EE_VPD_WIP_POLL_LBN 119 +#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1 +#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112 +#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7 +#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96 +#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16 +#define FRF_AB_EE_VPDW_LENGTH_LBN 80 +#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15 +#define FRF_AB_EE_VPDW_BASE_LBN 64 +#define FRF_AB_EE_VPDW_BASE_WIDTH 15 +#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56 +#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8 +#define FRF_AB_EE_VPD_BASE_LBN 32 +#define FRF_AB_EE_VPD_BASE_WIDTH 24 +#define FRF_AB_EE_VPD_LENGTH_LBN 16 +#define FRF_AB_EE_VPD_LENGTH_WIDTH 15 +#define FRF_AB_EE_VPD_AD_SIZE_LBN 8 +#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5 +#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5 +#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1 +#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4 +#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1 +#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2 +#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1 +#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1 +#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1 +#define FRF_AB_EE_VPD_EN_LBN 0 +#define FRF_AB_EE_VPD_EN_WIDTH 1 + +/* EE_VPD_SW_CNTL_REG: VPD access SW control register */ +#define FR_AB_EE_VPD_SW_CNTL 0x00000150 +#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31 +#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1 +#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28 +#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1 +#define FRF_AB_EE_VPD_CYC_ADR_LBN 0 +#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15 + +/* EE_VPD_SW_DATA_REG: VPD access SW data register */ +#define FR_AB_EE_VPD_SW_DATA 0x00000160 +#define FRF_AB_EE_VPD_CYC_DAT_LBN 0 +#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32 + +/* PBMX_DBG_IADDR_REG: Capture Module address register */ +#define FR_CZ_PBMX_DBG_IADDR 0x000001f0 +#define FRF_CZ_PBMX_DBG_IADDR_LBN 0 +#define FRF_CZ_PBMX_DBG_IADDR_WIDTH 32 + +/* PCIE_CORE_INDIRECT_REG: Indirect Access to PCIE Core registers */ +#define FR_BB_PCIE_CORE_INDIRECT 0x000001f0 +#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32 +#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32 +#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15 +#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1 +#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0 +#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12 + +/* PBMX_DBG_IDATA_REG: Capture Module data register */ +#define FR_CZ_PBMX_DBG_IDATA 0x000001f8 +#define FRF_CZ_PBMX_DBG_IDATA_LBN 0 +#define FRF_CZ_PBMX_DBG_IDATA_WIDTH 64 + +/* NIC_STAT_REG: NIC status register */ +#define FR_AB_NIC_STAT 0x00000200 +#define FRF_BB_AER_DIS_LBN 34 +#define FRF_BB_AER_DIS_WIDTH 1 +#define FRF_BB_EE_STRAP_EN_LBN 31 +#define FRF_BB_EE_STRAP_EN_WIDTH 1 +#define FRF_BB_EE_STRAP_LBN 24 +#define FRF_BB_EE_STRAP_WIDTH 4 +#define FRF_BB_REVISION_ID_LBN 17 +#define FRF_BB_REVISION_ID_WIDTH 7 +#define FRF_AB_ONCHIP_SRAM_LBN 16 +#define FRF_AB_ONCHIP_SRAM_WIDTH 1 +#define FRF_AB_SF_PRST_LBN 9 +#define FRF_AB_SF_PRST_WIDTH 1 +#define FRF_AB_EE_PRST_LBN 8 +#define FRF_AB_EE_PRST_WIDTH 1 +#define FRF_AB_ATE_MODE_LBN 3 +#define FRF_AB_ATE_MODE_WIDTH 1 +#define FRF_AB_STRAP_PINS_LBN 0 +#define FRF_AB_STRAP_PINS_WIDTH 3 + +/* GPIO_CTL_REG: GPIO control register */ +#define FR_AB_GPIO_CTL 0x00000210 +#define FRF_AB_GPIO_OUT3_LBN 112 +#define FRF_AB_GPIO_OUT3_WIDTH 16 +#define FRF_AB_GPIO_IN3_LBN 104 +#define FRF_AB_GPIO_IN3_WIDTH 8 +#define FRF_AB_GPIO_PWRUP_VALUE3_LBN 96 +#define FRF_AB_GPIO_PWRUP_VALUE3_WIDTH 8 +#define FRF_AB_GPIO_OUT2_LBN 80 +#define FRF_AB_GPIO_OUT2_WIDTH 16 +#define FRF_AB_GPIO_IN2_LBN 72 +#define FRF_AB_GPIO_IN2_WIDTH 8 +#define FRF_AB_GPIO_PWRUP_VALUE2_LBN 64 +#define FRF_AB_GPIO_PWRUP_VALUE2_WIDTH 8 +#define FRF_AB_GPIO15_OEN_LBN 63 +#define FRF_AB_GPIO15_OEN_WIDTH 1 +#define FRF_AB_GPIO14_OEN_LBN 62 +#define FRF_AB_GPIO14_OEN_WIDTH 1 +#define FRF_AB_GPIO13_OEN_LBN 61 +#define FRF_AB_GPIO13_OEN_WIDTH 1 +#define FRF_AB_GPIO12_OEN_LBN 60 +#define FRF_AB_GPIO12_OEN_WIDTH 1 +#define FRF_AB_GPIO11_OEN_LBN 59 +#define FRF_AB_GPIO11_OEN_WIDTH 1 +#define FRF_AB_GPIO10_OEN_LBN 58 +#define FRF_AB_GPIO10_OEN_WIDTH 1 +#define FRF_AB_GPIO9_OEN_LBN 57 +#define FRF_AB_GPIO9_OEN_WIDTH 1 +#define FRF_AB_GPIO8_OEN_LBN 56 +#define FRF_AB_GPIO8_OEN_WIDTH 1 +#define FRF_AB_GPIO15_OUT_LBN 55 +#define FRF_AB_GPIO15_OUT_WIDTH 1 +#define FRF_AB_GPIO14_OUT_LBN 54 +#define FRF_AB_GPIO14_OUT_WIDTH 1 +#define FRF_AB_GPIO13_OUT_LBN 53 +#define FRF_AB_GPIO13_OUT_WIDTH 1 +#define FRF_AB_GPIO12_OUT_LBN 52 +#define FRF_AB_GPIO12_OUT_WIDTH 1 +#define FRF_AB_GPIO11_OUT_LBN 51 +#define FRF_AB_GPIO11_OUT_WIDTH 1 +#define FRF_AB_GPIO10_OUT_LBN 50 +#define FRF_AB_GPIO10_OUT_WIDTH 1 +#define FRF_AB_GPIO9_OUT_LBN 49 +#define FRF_AB_GPIO9_OUT_WIDTH 1 +#define FRF_AB_GPIO8_OUT_LBN 48 +#define FRF_AB_GPIO8_OUT_WIDTH 1 +#define FRF_AB_GPIO15_IN_LBN 47 +#define FRF_AB_GPIO15_IN_WIDTH 1 +#define FRF_AB_GPIO14_IN_LBN 46 +#define FRF_AB_GPIO14_IN_WIDTH 1 +#define FRF_AB_GPIO13_IN_LBN 45 +#define FRF_AB_GPIO13_IN_WIDTH 1 +#define FRF_AB_GPIO12_IN_LBN 44 +#define FRF_AB_GPIO12_IN_WIDTH 1 +#define FRF_AB_GPIO11_IN_LBN 43 +#define FRF_AB_GPIO11_IN_WIDTH 1 +#define FRF_AB_GPIO10_IN_LBN 42 +#define FRF_AB_GPIO10_IN_WIDTH 1 +#define FRF_AB_GPIO9_IN_LBN 41 +#define FRF_AB_GPIO9_IN_WIDTH 1 +#define FRF_AB_GPIO8_IN_LBN 40 +#define FRF_AB_GPIO8_IN_WIDTH 1 +#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39 +#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38 +#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37 +#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36 +#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35 +#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34 +#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33 +#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32 +#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_CLK156_OUT_EN_LBN 31 +#define FRF_AB_CLK156_OUT_EN_WIDTH 1 +#define FRF_AB_USE_NIC_CLK_LBN 30 +#define FRF_AB_USE_NIC_CLK_WIDTH 1 +#define FRF_AB_GPIO5_OEN_LBN 29 +#define FRF_AB_GPIO5_OEN_WIDTH 1 +#define FRF_AB_GPIO4_OEN_LBN 28 +#define FRF_AB_GPIO4_OEN_WIDTH 1 +#define FRF_AB_GPIO3_OEN_LBN 27 +#define FRF_AB_GPIO3_OEN_WIDTH 1 +#define FRF_AB_GPIO2_OEN_LBN 26 +#define FRF_AB_GPIO2_OEN_WIDTH 1 +#define FRF_AB_GPIO1_OEN_LBN 25 +#define FRF_AB_GPIO1_OEN_WIDTH 1 +#define FRF_AB_GPIO0_OEN_LBN 24 +#define FRF_AB_GPIO0_OEN_WIDTH 1 +#define FRF_AB_GPIO7_OUT_LBN 23 +#define FRF_AB_GPIO7_OUT_WIDTH 1 +#define FRF_AB_GPIO6_OUT_LBN 22 +#define FRF_AB_GPIO6_OUT_WIDTH 1 +#define FRF_AB_GPIO5_OUT_LBN 21 +#define FRF_AB_GPIO5_OUT_WIDTH 1 +#define FRF_AB_GPIO4_OUT_LBN 20 +#define FRF_AB_GPIO4_OUT_WIDTH 1 +#define FRF_AB_GPIO3_OUT_LBN 19 +#define FRF_AB_GPIO3_OUT_WIDTH 1 +#define FRF_AB_GPIO2_OUT_LBN 18 +#define FRF_AB_GPIO2_OUT_WIDTH 1 +#define FRF_AB_GPIO1_OUT_LBN 17 +#define FRF_AB_GPIO1_OUT_WIDTH 1 +#define FRF_AB_GPIO0_OUT_LBN 16 +#define FRF_AB_GPIO0_OUT_WIDTH 1 +#define FRF_AB_GPIO7_IN_LBN 15 +#define FRF_AB_GPIO7_IN_WIDTH 1 +#define FRF_AB_GPIO6_IN_LBN 14 +#define FRF_AB_GPIO6_IN_WIDTH 1 +#define FRF_AB_GPIO5_IN_LBN 13 +#define FRF_AB_GPIO5_IN_WIDTH 1 +#define FRF_AB_GPIO4_IN_LBN 12 +#define FRF_AB_GPIO4_IN_WIDTH 1 +#define FRF_AB_GPIO3_IN_LBN 11 +#define FRF_AB_GPIO3_IN_WIDTH 1 +#define FRF_AB_GPIO2_IN_LBN 10 +#define FRF_AB_GPIO2_IN_WIDTH 1 +#define FRF_AB_GPIO1_IN_LBN 9 +#define FRF_AB_GPIO1_IN_WIDTH 1 +#define FRF_AB_GPIO0_IN_LBN 8 +#define FRF_AB_GPIO0_IN_WIDTH 1 +#define FRF_AB_GPIO7_PWRUP_VALUE_LBN 7 +#define FRF_AB_GPIO7_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO6_PWRUP_VALUE_LBN 6 +#define FRF_AB_GPIO6_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5 +#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4 +#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3 +#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2 +#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1 +#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1 +#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0 +#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1 + +/* GLB_CTL_REG: Global control register */ +#define FR_AB_GLB_CTL 0x00000220 +#define FRF_AB_EXT_PHY_RST_CTL_LBN 63 +#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1 +#define FRF_AB_XAUI_SD_RST_CTL_LBN 62 +#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1 +#define FRF_AB_PCIE_SD_RST_CTL_LBN 61 +#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1 +#define FRF_AA_PCIX_RST_CTL_LBN 60 +#define FRF_AA_PCIX_RST_CTL_WIDTH 1 +#define FRF_BB_BIU_RST_CTL_LBN 60 +#define FRF_BB_BIU_RST_CTL_WIDTH 1 +#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59 +#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1 +#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58 +#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1 +#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57 +#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1 +#define FRF_AB_XGRX_RST_CTL_LBN 56 +#define FRF_AB_XGRX_RST_CTL_WIDTH 1 +#define FRF_AB_XGTX_RST_CTL_LBN 55 +#define FRF_AB_XGTX_RST_CTL_WIDTH 1 +#define FRF_AB_EM_RST_CTL_LBN 54 +#define FRF_AB_EM_RST_CTL_WIDTH 1 +#define FRF_AB_EV_RST_CTL_LBN 53 +#define FRF_AB_EV_RST_CTL_WIDTH 1 +#define FRF_AB_SR_RST_CTL_LBN 52 +#define FRF_AB_SR_RST_CTL_WIDTH 1 +#define FRF_AB_RX_RST_CTL_LBN 51 +#define FRF_AB_RX_RST_CTL_WIDTH 1 +#define FRF_AB_TX_RST_CTL_LBN 50 +#define FRF_AB_TX_RST_CTL_WIDTH 1 +#define FRF_AB_EE_RST_CTL_LBN 49 +#define FRF_AB_EE_RST_CTL_WIDTH 1 +#define FRF_AB_CS_RST_CTL_LBN 48 +#define FRF_AB_CS_RST_CTL_WIDTH 1 +#define FRF_AB_HOT_RST_CTL_LBN 40 +#define FRF_AB_HOT_RST_CTL_WIDTH 2 +#define FRF_AB_RST_EXT_PHY_LBN 31 +#define FRF_AB_RST_EXT_PHY_WIDTH 1 +#define FRF_AB_RST_XAUI_SD_LBN 30 +#define FRF_AB_RST_XAUI_SD_WIDTH 1 +#define FRF_AB_RST_PCIE_SD_LBN 29 +#define FRF_AB_RST_PCIE_SD_WIDTH 1 +#define FRF_AA_RST_PCIX_LBN 28 +#define FRF_AA_RST_PCIX_WIDTH 1 +#define FRF_BB_RST_BIU_LBN 28 +#define FRF_BB_RST_BIU_WIDTH 1 +#define FRF_AB_RST_PCIE_STKY_LBN 27 +#define FRF_AB_RST_PCIE_STKY_WIDTH 1 +#define FRF_AB_RST_PCIE_NSTKY_LBN 26 +#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1 +#define FRF_AB_RST_PCIE_CORE_LBN 25 +#define FRF_AB_RST_PCIE_CORE_WIDTH 1 +#define FRF_AB_RST_XGRX_LBN 24 +#define FRF_AB_RST_XGRX_WIDTH 1 +#define FRF_AB_RST_XGTX_LBN 23 +#define FRF_AB_RST_XGTX_WIDTH 1 +#define FRF_AB_RST_EM_LBN 22 +#define FRF_AB_RST_EM_WIDTH 1 +#define FRF_AB_RST_EV_LBN 21 +#define FRF_AB_RST_EV_WIDTH 1 +#define FRF_AB_RST_SR_LBN 20 +#define FRF_AB_RST_SR_WIDTH 1 +#define FRF_AB_RST_RX_LBN 19 +#define FRF_AB_RST_RX_WIDTH 1 +#define FRF_AB_RST_TX_LBN 18 +#define FRF_AB_RST_TX_WIDTH 1 +#define FRF_AB_RST_SF_LBN 17 +#define FRF_AB_RST_SF_WIDTH 1 +#define FRF_AB_RST_CS_LBN 16 +#define FRF_AB_RST_CS_WIDTH 1 +#define FRF_AB_INT_RST_DUR_LBN 4 +#define FRF_AB_INT_RST_DUR_WIDTH 3 +#define FRF_AB_EXT_PHY_RST_DUR_LBN 1 +#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3 +#define FFE_AB_EXT_PHY_RST_DUR_10240US 7 +#define FFE_AB_EXT_PHY_RST_DUR_5120US 6 +#define FFE_AB_EXT_PHY_RST_DUR_2560US 5 +#define FFE_AB_EXT_PHY_RST_DUR_1280US 4 +#define FFE_AB_EXT_PHY_RST_DUR_640US 3 +#define FFE_AB_EXT_PHY_RST_DUR_320US 2 +#define FFE_AB_EXT_PHY_RST_DUR_160US 1 +#define FFE_AB_EXT_PHY_RST_DUR_80US 0 +#define FRF_AB_SWRST_LBN 0 +#define FRF_AB_SWRST_WIDTH 1 + +/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */ +#define FR_AZ_FATAL_INTR_KER 0x00000230 +#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44 +#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1 +#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43 +#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1 +#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43 +#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1 +#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42 +#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1 +#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41 +#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1 +#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40 +#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1 +#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39 +#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38 +#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37 +#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36 +#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35 +#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1 +#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34 +#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1 +#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33 +#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1 +#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32 +#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1 +#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12 +#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1 +#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11 +#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1 +#define FRF_CZ_MBU_PERR_INT_KER_LBN 11 +#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1 +#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10 +#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1 +#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9 +#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1 +#define FRF_AZ_MEM_PERR_INT_KER_LBN 8 +#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1 +#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7 +#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6 +#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5 +#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4 +#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3 +#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1 +#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2 +#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1 +#define FRF_AZ_ILL_ADR_INT_KER_LBN 1 +#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1 +#define FRF_AZ_SRM_PERR_INT_KER_LBN 0 +#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1 + +/* FATAL_INTR_REG_CHAR: Fatal interrupt register for Char */ +#define FR_BZ_FATAL_INTR_CHAR 0x00000240 +#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44 +#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1 +#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_LBN 43 +#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1 +#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43 +#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_LBN 42 +#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_LBN 41 +#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_MEM_PERR_INT_CHAR_EN_LBN 40 +#define FRF_BZ_MEM_PERR_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_LBN 39 +#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_LBN 38 +#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37 +#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36 +#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_LBN 35 +#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_LBN 34 +#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_ILL_ADR_INT_CHAR_EN_LBN 33 +#define FRF_BZ_ILL_ADR_INT_CHAR_EN_WIDTH 1 +#define FRF_BZ_SRM_PERR_INT_CHAR_EN_LBN 32 +#define FRF_BZ_SRM_PERR_INT_CHAR_EN_WIDTH 1 +#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12 +#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1 +#define FRF_BB_PCI_BUSERR_INT_CHAR_LBN 11 +#define FRF_BB_PCI_BUSERR_INT_CHAR_WIDTH 1 +#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11 +#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1 +#define FRF_BZ_SRAM_OOB_INT_CHAR_LBN 10 +#define FRF_BZ_SRAM_OOB_INT_CHAR_WIDTH 1 +#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_LBN 9 +#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1 +#define FRF_BZ_MEM_PERR_INT_CHAR_LBN 8 +#define FRF_BZ_MEM_PERR_INT_CHAR_WIDTH 1 +#define FRF_BZ_RBUF_OWN_INT_CHAR_LBN 7 +#define FRF_BZ_RBUF_OWN_INT_CHAR_WIDTH 1 +#define FRF_BZ_TBUF_OWN_INT_CHAR_LBN 6 +#define FRF_BZ_TBUF_OWN_INT_CHAR_WIDTH 1 +#define FRF_BZ_RDESCQ_OWN_INT_CHAR_LBN 5 +#define FRF_BZ_RDESCQ_OWN_INT_CHAR_WIDTH 1 +#define FRF_BZ_TDESCQ_OWN_INT_CHAR_LBN 4 +#define FRF_BZ_TDESCQ_OWN_INT_CHAR_WIDTH 1 +#define FRF_BZ_EVQ_OWN_INT_CHAR_LBN 3 +#define FRF_BZ_EVQ_OWN_INT_CHAR_WIDTH 1 +#define FRF_BZ_EVF_OFLO_INT_CHAR_LBN 2 +#define FRF_BZ_EVF_OFLO_INT_CHAR_WIDTH 1 +#define FRF_BZ_ILL_ADR_INT_CHAR_LBN 1 +#define FRF_BZ_ILL_ADR_INT_CHAR_WIDTH 1 +#define FRF_BZ_SRM_PERR_INT_CHAR_LBN 0 +#define FRF_BZ_SRM_PERR_INT_CHAR_WIDTH 1 + +/* DP_CTRL_REG: Datapath control register */ +#define FR_BZ_DP_CTRL 0x00000250 +#define FRF_BZ_FLS_EVQ_ID_LBN 0 +#define FRF_BZ_FLS_EVQ_ID_WIDTH 12 + +/* MEM_STAT_REG: Memory status register */ +#define FR_AZ_MEM_STAT 0x00000260 +#define FRF_AB_MEM_PERR_VEC_LBN 53 +#define FRF_AB_MEM_PERR_VEC_WIDTH 38 +#define FRF_AB_MBIST_CORR_LBN 38 +#define FRF_AB_MBIST_CORR_WIDTH 15 +#define FRF_AB_MBIST_ERR_LBN 0 +#define FRF_AB_MBIST_ERR_WIDTH 40 +#define FRF_CZ_MEM_PERR_VEC_LBN 0 +#define FRF_CZ_MEM_PERR_VEC_WIDTH 35 + +/* CS_DEBUG_REG: Debug register */ +#define FR_AZ_CS_DEBUG 0x00000270 +#define FRF_AB_GLB_DEBUG2_SEL_LBN 50 +#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3 +#define FRF_AB_DEBUG_BLK_SEL2_LBN 47 +#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3 +#define FRF_AB_DEBUG_BLK_SEL1_LBN 44 +#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3 +#define FRF_AB_DEBUG_BLK_SEL0_LBN 41 +#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3 +#define FRF_CZ_CS_PORT_NUM_LBN 40 +#define FRF_CZ_CS_PORT_NUM_WIDTH 2 +#define FRF_AB_MISC_DEBUG_ADDR_LBN 36 +#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31 +#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5 +#define FRF_CZ_CS_PORT_FPE_LBN 1 +#define FRF_CZ_CS_PORT_FPE_WIDTH 35 +#define FRF_AB_EM_DEBUG_ADDR_LBN 26 +#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_SR_DEBUG_ADDR_LBN 21 +#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_EV_DEBUG_ADDR_LBN 16 +#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_RX_DEBUG_ADDR_LBN 11 +#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_TX_DEBUG_ADDR_LBN 6 +#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5 +#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1 +#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5 +#define FRF_AZ_CS_DEBUG_EN_LBN 0 +#define FRF_AZ_CS_DEBUG_EN_WIDTH 1 + +/* DRIVER_REG: Driver scratch register [0-7] */ +#define FR_AZ_DRIVER 0x00000280 +#define FR_AZ_DRIVER_STEP 16 +#define FR_AZ_DRIVER_ROWS 8 +#define FRF_AZ_DRIVER_DW0_LBN 0 +#define FRF_AZ_DRIVER_DW0_WIDTH 32 + +/* ALTERA_BUILD_REG: Altera build register */ +#define FR_AZ_ALTERA_BUILD 0x00000300 +#define FRF_AZ_ALTERA_BUILD_VER_LBN 0 +#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32 + +/* CSR_SPARE_REG: Spare register */ +#define FR_AZ_CSR_SPARE 0x00000310 +#define FRF_AB_MEM_PERR_EN_LBN 64 +#define FRF_AB_MEM_PERR_EN_WIDTH 38 +#define FRF_CZ_MEM_PERR_EN_LBN 64 +#define FRF_CZ_MEM_PERR_EN_WIDTH 35 +#define FRF_AB_MEM_PERR_EN_TX_DATA_LBN 72 +#define FRF_AB_MEM_PERR_EN_TX_DATA_WIDTH 2 +#define FRF_AZ_CSR_SPARE_BITS_LBN 0 +#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32 + +/* PCIE_SD_CTL0123_REG: PCIE SerDes control register 0 to 3 */ +#define FR_AB_PCIE_SD_CTL0123 0x00000320 +#define FRF_AB_PCIE_TESTSIG_H_LBN 96 +#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19 +#define FRF_AB_PCIE_TESTSIG_L_LBN 64 +#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19 +#define FRF_AB_PCIE_OFFSET_LBN 56 +#define FRF_AB_PCIE_OFFSET_WIDTH 8 +#define FRF_AB_PCIE_OFFSETEN_H_LBN 55 +#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1 +#define FRF_AB_PCIE_OFFSETEN_L_LBN 54 +#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1 +#define FRF_AB_PCIE_HIVMODE_H_LBN 53 +#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1 +#define FRF_AB_PCIE_HIVMODE_L_LBN 52 +#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1 +#define FRF_AB_PCIE_PARRESET_H_LBN 51 +#define FRF_AB_PCIE_PARRESET_H_WIDTH 1 +#define FRF_AB_PCIE_PARRESET_L_LBN 50 +#define FRF_AB_PCIE_PARRESET_L_WIDTH 1 +#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49 +#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1 +#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48 +#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1 +#define FRF_AB_PCIE_LPBK_LBN 40 +#define FRF_AB_PCIE_LPBK_WIDTH 8 +#define FRF_AB_PCIE_PARLPBK_LBN 32 +#define FRF_AB_PCIE_PARLPBK_WIDTH 8 +#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30 +#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2 +#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28 +#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2 +#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3 +#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2 +#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1 +#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0 +#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26 +#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2 +#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24 +#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2 +#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3 +#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2 +#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1 +#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0 +#define FRF_AB_PCIE_RXEQCTL_H_LBN 18 +#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2 +#define FRF_AB_PCIE_RXEQCTL_L_LBN 16 +#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2 +#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3 +#define FFE_AB_PCIE_RXEQCTL_OFF 2 +#define FFE_AB_PCIE_RXEQCTL_MIN 1 +#define FFE_AB_PCIE_RXEQCTL_MAX 0 +#define FRF_AB_PCIE_HIDRV_LBN 8 +#define FRF_AB_PCIE_HIDRV_WIDTH 8 +#define FRF_AB_PCIE_LODRV_LBN 0 +#define FRF_AB_PCIE_LODRV_WIDTH 8 + +/* PCIE_SD_CTL45_REG: PCIE SerDes control register 4 and 5 */ +#define FR_AB_PCIE_SD_CTL45 0x00000330 +#define FRF_AB_PCIE_DTX7_LBN 60 +#define FRF_AB_PCIE_DTX7_WIDTH 4 +#define FRF_AB_PCIE_DTX6_LBN 56 +#define FRF_AB_PCIE_DTX6_WIDTH 4 +#define FRF_AB_PCIE_DTX5_LBN 52 +#define FRF_AB_PCIE_DTX5_WIDTH 4 +#define FRF_AB_PCIE_DTX4_LBN 48 +#define FRF_AB_PCIE_DTX4_WIDTH 4 +#define FRF_AB_PCIE_DTX3_LBN 44 +#define FRF_AB_PCIE_DTX3_WIDTH 4 +#define FRF_AB_PCIE_DTX2_LBN 40 +#define FRF_AB_PCIE_DTX2_WIDTH 4 +#define FRF_AB_PCIE_DTX1_LBN 36 +#define FRF_AB_PCIE_DTX1_WIDTH 4 +#define FRF_AB_PCIE_DTX0_LBN 32 +#define FRF_AB_PCIE_DTX0_WIDTH 4 +#define FRF_AB_PCIE_DEQ7_LBN 28 +#define FRF_AB_PCIE_DEQ7_WIDTH 4 +#define FRF_AB_PCIE_DEQ6_LBN 24 +#define FRF_AB_PCIE_DEQ6_WIDTH 4 +#define FRF_AB_PCIE_DEQ5_LBN 20 +#define FRF_AB_PCIE_DEQ5_WIDTH 4 +#define FRF_AB_PCIE_DEQ4_LBN 16 +#define FRF_AB_PCIE_DEQ4_WIDTH 4 +#define FRF_AB_PCIE_DEQ3_LBN 12 +#define FRF_AB_PCIE_DEQ3_WIDTH 4 +#define FRF_AB_PCIE_DEQ2_LBN 8 +#define FRF_AB_PCIE_DEQ2_WIDTH 4 +#define FRF_AB_PCIE_DEQ1_LBN 4 +#define FRF_AB_PCIE_DEQ1_WIDTH 4 +#define FRF_AB_PCIE_DEQ0_LBN 0 +#define FRF_AB_PCIE_DEQ0_WIDTH 4 + +/* PCIE_PCS_CTL_STAT_REG: PCIE PCS control and status register */ +#define FR_AB_PCIE_PCS_CTL_STAT 0x00000340 +#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52 +#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4 +#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48 +#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4 +#define FRF_AB_PCIE_PRBSERR_LBN 40 +#define FRF_AB_PCIE_PRBSERR_WIDTH 8 +#define FRF_AB_PCIE_PRBSERRH0_LBN 32 +#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8 +#define FRF_AB_PCIE_FASTINIT_H_LBN 15 +#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1 +#define FRF_AB_PCIE_FASTINIT_L_LBN 14 +#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1 +#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13 +#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1 +#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12 +#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1 +#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11 +#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1 +#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10 +#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1 +#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9 +#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1 +#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8 +#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1 +#define FRF_AB_PCIE_PRBSSEL_LBN 0 +#define FRF_AB_PCIE_PRBSSEL_WIDTH 8 + +/* DEBUG_DATA_OUT_REG: Live Debug and Debug 2 out ports */ +#define FR_BB_DEBUG_DATA_OUT 0x00000350 +#define FRF_BB_DEBUG2_PORT_LBN 25 +#define FRF_BB_DEBUG2_PORT_WIDTH 15 +#define FRF_BB_DEBUG1_PORT_LBN 0 +#define FRF_BB_DEBUG1_PORT_WIDTH 25 + +/* EVQ_RPTR_REGP0: Event queue read pointer register */ +#define FR_BZ_EVQ_RPTR_P0 0x00000400 +#define FR_BZ_EVQ_RPTR_P0_STEP 8192 +#define FR_BZ_EVQ_RPTR_P0_ROWS 1024 +/* EVQ_RPTR_REG_KER: Event queue read pointer register */ +#define FR_AA_EVQ_RPTR_KER 0x00011b00 +#define FR_AA_EVQ_RPTR_KER_STEP 4 +#define FR_AA_EVQ_RPTR_KER_ROWS 4 +/* EVQ_RPTR_REG: Event queue read pointer register */ +#define FR_BZ_EVQ_RPTR 0x00fa0000 +#define FR_BZ_EVQ_RPTR_STEP 16 +#define FR_BB_EVQ_RPTR_ROWS 4096 +#define FR_CZ_EVQ_RPTR_ROWS 1024 +/* EVQ_RPTR_REGP123: Event queue read pointer register */ +#define FR_BB_EVQ_RPTR_P123 0x01000400 +#define FR_BB_EVQ_RPTR_P123_STEP 8192 +#define FR_BB_EVQ_RPTR_P123_ROWS 3072 +#define FRF_AZ_EVQ_RPTR_VLD_LBN 15 +#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1 +#define FRF_AZ_EVQ_RPTR_LBN 0 +#define FRF_AZ_EVQ_RPTR_WIDTH 15 + +/* TIMER_COMMAND_REGP0: Timer Command Registers */ +#define FR_BZ_TIMER_COMMAND_P0 0x00000420 +#define FR_BZ_TIMER_COMMAND_P0_STEP 8192 +#define FR_BZ_TIMER_COMMAND_P0_ROWS 1024 +/* TIMER_COMMAND_REG_KER: Timer Command Registers */ +#define FR_AA_TIMER_COMMAND_KER 0x00000420 +#define FR_AA_TIMER_COMMAND_KER_STEP 8192 +#define FR_AA_TIMER_COMMAND_KER_ROWS 4 +/* TIMER_COMMAND_REGP123: Timer Command Registers */ +#define FR_BB_TIMER_COMMAND_P123 0x01000420 +#define FR_BB_TIMER_COMMAND_P123_STEP 8192 +#define FR_BB_TIMER_COMMAND_P123_ROWS 3072 +#define FRF_CZ_TC_TIMER_MODE_LBN 14 +#define FRF_CZ_TC_TIMER_MODE_WIDTH 2 +#define FRF_AB_TC_TIMER_MODE_LBN 12 +#define FRF_AB_TC_TIMER_MODE_WIDTH 2 +#define FRF_CZ_TC_TIMER_VAL_LBN 0 +#define FRF_CZ_TC_TIMER_VAL_WIDTH 14 +#define FRF_AB_TC_TIMER_VAL_LBN 0 +#define FRF_AB_TC_TIMER_VAL_WIDTH 12 + +/* DRV_EV_REG: Driver generated event register */ +#define FR_AZ_DRV_EV 0x00000440 +#define FRF_AZ_DRV_EV_QID_LBN 64 +#define FRF_AZ_DRV_EV_QID_WIDTH 12 +#define FRF_AZ_DRV_EV_DATA_LBN 0 +#define FRF_AZ_DRV_EV_DATA_WIDTH 64 + +/* EVQ_CTL_REG: Event queue control register */ +#define FR_AZ_EVQ_CTL 0x00000450 +#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15 +#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10 +#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15 +#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6 +#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14 +#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1 +#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7 +#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7 +#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0 +#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7 + +/* EVQ_CNT1_REG: Event counter 1 register */ +#define FR_AZ_EVQ_CNT1 0x00000460 +#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120 +#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7 +#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100 +#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20 +#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80 +#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60 +#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40 +#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20 +#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0 +#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20 + +/* EVQ_CNT2_REG: Event counter 2 register */ +#define FR_AZ_EVQ_CNT2 0x00000470 +#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104 +#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84 +#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_RDY_CNT_LBN 80 +#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4 +#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60 +#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40 +#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20 +#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20 +#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0 +#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20 + +/* USR_EV_REG: Event mailbox register */ +#define FR_CZ_USR_EV 0x00000540 +#define FR_CZ_USR_EV_STEP 8192 +#define FR_CZ_USR_EV_ROWS 1024 +#define FRF_CZ_USR_EV_DATA_LBN 0 +#define FRF_CZ_USR_EV_DATA_WIDTH 32 + +/* BUF_TBL_CFG_REG: Buffer table configuration register */ +#define FR_AZ_BUF_TBL_CFG 0x00000600 +#define FRF_AZ_BUF_TBL_MODE_LBN 3 +#define FRF_AZ_BUF_TBL_MODE_WIDTH 1 + +/* SRM_RX_DC_CFG_REG: SRAM receive descriptor cache configuration register */ +#define FR_AZ_SRM_RX_DC_CFG 0x00000610 +#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21 +#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1 +#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0 +#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21 + +/* SRM_TX_DC_CFG_REG: SRAM transmit descriptor cache configuration register */ +#define FR_AZ_SRM_TX_DC_CFG 0x00000620 +#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0 +#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21 + +/* SRM_CFG_REG: SRAM configuration register */ +#define FR_AZ_SRM_CFG 0x00000630 +#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5 +#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1 +#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4 +#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1 +#define FRF_AZ_SRM_INIT_EN_LBN 3 +#define FRF_AZ_SRM_INIT_EN_WIDTH 1 +#define FRF_AZ_SRM_NUM_BANK_LBN 2 +#define FRF_AZ_SRM_NUM_BANK_WIDTH 1 +#define FRF_AZ_SRM_BANK_SIZE_LBN 0 +#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2 + +/* BUF_TBL_UPD_REG: Buffer table update register */ +#define FR_AZ_BUF_TBL_UPD 0x00000650 +#define FRF_AZ_BUF_UPD_CMD_LBN 63 +#define FRF_AZ_BUF_UPD_CMD_WIDTH 1 +#define FRF_AZ_BUF_CLR_CMD_LBN 62 +#define FRF_AZ_BUF_CLR_CMD_WIDTH 1 +#define FRF_AZ_BUF_CLR_END_ID_LBN 32 +#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20 +#define FRF_AZ_BUF_CLR_START_ID_LBN 0 +#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20 + +/* SRM_UPD_EVQ_REG: Buffer table update register */ +#define FR_AZ_SRM_UPD_EVQ 0x00000660 +#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0 +#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12 + +/* SRAM_PARITY_REG: SRAM parity register. */ +#define FR_AZ_SRAM_PARITY 0x00000670 +#define FRF_CZ_BYPASS_ECC_LBN 3 +#define FRF_CZ_BYPASS_ECC_WIDTH 1 +#define FRF_CZ_SEC_INT_LBN 2 +#define FRF_CZ_SEC_INT_WIDTH 1 +#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1 +#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1 +#define FRF_AB_FORCE_SRAM_PERR_LBN 0 +#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1 +#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0 +#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1 + +/* RX_CFG_REG: Receive configuration register */ +#define FR_AZ_RX_CFG 0x00000800 +#define FRF_CZ_RX_MIN_KBUF_SIZE_LBN 72 +#define FRF_CZ_RX_MIN_KBUF_SIZE_WIDTH 14 +#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71 +#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1 +#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62 +#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9 +#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53 +#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9 +#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49 +#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4 +#define FRF_BZ_RX_TCP_SUP_LBN 48 +#define FRF_BZ_RX_TCP_SUP_WIDTH 1 +#define FRF_BZ_RX_INGR_EN_LBN 47 +#define FRF_BZ_RX_INGR_EN_WIDTH 1 +#define FRF_BZ_RX_IP_HASH_LBN 46 +#define FRF_BZ_RX_IP_HASH_WIDTH 1 +#define FRF_BZ_RX_HASH_ALG_LBN 45 +#define FRF_BZ_RX_HASH_ALG_WIDTH 1 +#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44 +#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1 +#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43 +#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1 +#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42 +#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1 +#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39 +#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3 +#define FRF_BZ_RX_OWNERR_CTL_LBN 38 +#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1 +#define FRF_BZ_RX_XON_TX_TH_LBN 33 +#define FRF_BZ_RX_XON_TX_TH_WIDTH 5 +#define FRF_AA_RX_DESC_PUSH_EN_LBN 35 +#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1 +#define FRF_AA_RX_RDW_PATCH_EN_LBN 34 +#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1 +#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31 +#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3 +#define FRF_BZ_RX_XOFF_TX_TH_LBN 28 +#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5 +#define FRF_AA_RX_OWNERR_CTL_LBN 30 +#define FRF_AA_RX_OWNERR_CTL_WIDTH 1 +#define FRF_AA_RX_XON_TX_TH_LBN 25 +#define FRF_AA_RX_XON_TX_TH_WIDTH 5 +#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19 +#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9 +#define FRF_AA_RX_XOFF_TX_TH_LBN 20 +#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5 +#define FRF_AA_RX_USR_BUF_SIZE_LBN 11 +#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9 +#define FRF_BZ_RX_XON_MAC_TH_LBN 10 +#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9 +#define FRF_AA_RX_XON_MAC_TH_LBN 6 +#define FRF_AA_RX_XON_MAC_TH_WIDTH 5 +#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1 +#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9 +#define FRF_AA_RX_XOFF_MAC_TH_LBN 1 +#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5 +#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0 +#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1 + +/* RX_FILTER_CTL_REG: Receive filter control registers */ +#define FR_BZ_RX_FILTER_CTL 0x00000810 +#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94 +#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8 +#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86 +#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8 +#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85 +#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1 +#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69 +#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16 +#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57 +#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12 +#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56 +#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1 +#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55 +#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1 +#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43 +#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12 +#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42 +#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1 +#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41 +#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1 +#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40 +#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1 +#define FRF_BZ_UDP_FULL_SRCH_LIMIT_LBN 32 +#define FRF_BZ_UDP_FULL_SRCH_LIMIT_WIDTH 8 +#define FRF_BZ_NUM_KER_LBN 24 +#define FRF_BZ_NUM_KER_WIDTH 2 +#define FRF_BZ_UDP_WILD_SRCH_LIMIT_LBN 16 +#define FRF_BZ_UDP_WILD_SRCH_LIMIT_WIDTH 8 +#define FRF_BZ_TCP_WILD_SRCH_LIMIT_LBN 8 +#define FRF_BZ_TCP_WILD_SRCH_LIMIT_WIDTH 8 +#define FRF_BZ_TCP_FULL_SRCH_LIMIT_LBN 0 +#define FRF_BZ_TCP_FULL_SRCH_LIMIT_WIDTH 8 + +/* RX_FLUSH_DESCQ_REG: Receive flush descriptor queue register */ +#define FR_AZ_RX_FLUSH_DESCQ 0x00000820 +#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24 +#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1 +#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0 +#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12 + +/* RX_DESC_UPD_REGP0: Receive descriptor update register. */ +#define FR_BZ_RX_DESC_UPD_P0 0x00000830 +#define FR_BZ_RX_DESC_UPD_P0_STEP 8192 +#define FR_BZ_RX_DESC_UPD_P0_ROWS 1024 +/* RX_DESC_UPD_REG_KER: Receive descriptor update register. */ +#define FR_AA_RX_DESC_UPD_KER 0x00000830 +#define FR_AA_RX_DESC_UPD_KER_STEP 8192 +#define FR_AA_RX_DESC_UPD_KER_ROWS 4 +/* RX_DESC_UPD_REGP123: Receive descriptor update register. */ +#define FR_BB_RX_DESC_UPD_P123 0x01000830 +#define FR_BB_RX_DESC_UPD_P123_STEP 8192 +#define FR_BB_RX_DESC_UPD_P123_ROWS 3072 +#define FRF_AZ_RX_DESC_WPTR_LBN 96 +#define FRF_AZ_RX_DESC_WPTR_WIDTH 12 +#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95 +#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1 +#define FRF_AZ_RX_DESC_LBN 0 +#define FRF_AZ_RX_DESC_WIDTH 64 + +/* RX_DC_CFG_REG: Receive descriptor cache configuration register */ +#define FR_AZ_RX_DC_CFG 0x00000840 +#define FRF_AB_RX_MAX_PF_LBN 2 +#define FRF_AB_RX_MAX_PF_WIDTH 2 +#define FRF_AZ_RX_DC_SIZE_LBN 0 +#define FRF_AZ_RX_DC_SIZE_WIDTH 2 +#define FFE_AZ_RX_DC_SIZE_64 3 +#define FFE_AZ_RX_DC_SIZE_32 2 +#define FFE_AZ_RX_DC_SIZE_16 1 +#define FFE_AZ_RX_DC_SIZE_8 0 + +/* RX_DC_PF_WM_REG: Receive descriptor cache pre-fetch watermark register */ +#define FR_AZ_RX_DC_PF_WM 0x00000850 +#define FRF_AZ_RX_DC_PF_HWM_LBN 6 +#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6 +#define FRF_AZ_RX_DC_PF_LWM_LBN 0 +#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6 + +/* RX_RSS_TKEY_REG: RSS Toeplitz hash key */ +#define FR_BZ_RX_RSS_TKEY 0x00000860 +#define FRF_BZ_RX_RSS_TKEY_HI_LBN 64 +#define FRF_BZ_RX_RSS_TKEY_HI_WIDTH 64 +#define FRF_BZ_RX_RSS_TKEY_LO_LBN 0 +#define FRF_BZ_RX_RSS_TKEY_LO_WIDTH 64 + +/* RX_NODESC_DROP_REG: Receive dropped packet counter register */ +#define FR_AZ_RX_NODESC_DROP 0x00000880 +#define FRF_CZ_RX_NODESC_DROP_CNT_LBN 0 +#define FRF_CZ_RX_NODESC_DROP_CNT_WIDTH 32 +#define FRF_AB_RX_NODESC_DROP_CNT_LBN 0 +#define FRF_AB_RX_NODESC_DROP_CNT_WIDTH 16 + +/* RX_SELF_RST_REG: Receive self reset register */ +#define FR_AA_RX_SELF_RST 0x00000890 +#define FRF_AA_RX_ISCSI_DIS_LBN 17 +#define FRF_AA_RX_ISCSI_DIS_WIDTH 1 +#define FRF_AA_RX_SW_RST_REG_LBN 16 +#define FRF_AA_RX_SW_RST_REG_WIDTH 1 +#define FRF_AA_RX_NODESC_WAIT_DIS_LBN 9 +#define FRF_AA_RX_NODESC_WAIT_DIS_WIDTH 1 +#define FRF_AA_RX_SELF_RST_EN_LBN 8 +#define FRF_AA_RX_SELF_RST_EN_WIDTH 1 +#define FRF_AA_RX_MAX_PF_LAT_LBN 4 +#define FRF_AA_RX_MAX_PF_LAT_WIDTH 4 +#define FRF_AA_RX_MAX_LU_LAT_LBN 0 +#define FRF_AA_RX_MAX_LU_LAT_WIDTH 4 + +/* RX_DEBUG_REG: undocumented register */ +#define FR_AZ_RX_DEBUG 0x000008a0 +#define FRF_AZ_RX_DEBUG_LBN 0 +#define FRF_AZ_RX_DEBUG_WIDTH 64 + +/* RX_PUSH_DROP_REG: Receive descriptor push dropped counter register */ +#define FR_AZ_RX_PUSH_DROP 0x000008b0 +#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0 +#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32 + +/* RX_RSS_IPV6_REG1: IPv6 RSS Toeplitz hash key low bytes */ +#define FR_CZ_RX_RSS_IPV6_REG1 0x000008d0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128 + +/* RX_RSS_IPV6_REG2: IPv6 RSS Toeplitz hash key middle bytes */ +#define FR_CZ_RX_RSS_IPV6_REG2 0x000008e0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128 + +/* RX_RSS_IPV6_REG3: IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings */ +#define FR_CZ_RX_RSS_IPV6_REG3 0x000008f0 +#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66 +#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1 +#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65 +#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1 +#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64 +#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1 +#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0 +#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64 + +/* TX_FLUSH_DESCQ_REG: Transmit flush descriptor queue register */ +#define FR_AZ_TX_FLUSH_DESCQ 0x00000a00 +#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12 +#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1 +#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0 +#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12 + +/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */ +#define FR_BZ_TX_DESC_UPD_P0 0x00000a10 +#define FR_BZ_TX_DESC_UPD_P0_STEP 8192 +#define FR_BZ_TX_DESC_UPD_P0_ROWS 1024 +/* TX_DESC_UPD_REG_KER: Transmit descriptor update register. */ +#define FR_AA_TX_DESC_UPD_KER 0x00000a10 +#define FR_AA_TX_DESC_UPD_KER_STEP 8192 +#define FR_AA_TX_DESC_UPD_KER_ROWS 8 +/* TX_DESC_UPD_REGP123: Transmit descriptor update register. */ +#define FR_BB_TX_DESC_UPD_P123 0x01000a10 +#define FR_BB_TX_DESC_UPD_P123_STEP 8192 +#define FR_BB_TX_DESC_UPD_P123_ROWS 3072 +#define FRF_AZ_TX_DESC_WPTR_LBN 96 +#define FRF_AZ_TX_DESC_WPTR_WIDTH 12 +#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95 +#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1 +#define FRF_AZ_TX_DESC_LBN 0 +#define FRF_AZ_TX_DESC_WIDTH 95 + +/* TX_DC_CFG_REG: Transmit descriptor cache configuration register */ +#define FR_AZ_TX_DC_CFG 0x00000a20 +#define FRF_AZ_TX_DC_SIZE_LBN 0 +#define FRF_AZ_TX_DC_SIZE_WIDTH 2 +#define FFE_AZ_TX_DC_SIZE_32 2 +#define FFE_AZ_TX_DC_SIZE_16 1 +#define FFE_AZ_TX_DC_SIZE_8 0 + +/* TX_CHKSM_CFG_REG: Transmit checksum configuration register */ +#define FR_AA_TX_CHKSM_CFG 0x00000a30 +#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96 +#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32 +#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64 +#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32 +#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32 +#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32 +#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0 +#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32 + +/* TX_CFG_REG: Transmit configuration register */ +#define FR_AZ_TX_CFG 0x00000a50 +#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114 +#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8 +#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113 +#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1 +#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105 +#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97 +#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89 +#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81 +#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73 +#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65 +#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8 +#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64 +#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1 +#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48 +#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16 +#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47 +#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1 +#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16 +#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15 +#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5 +#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1 +#define FRF_AZ_TX_P1_PRI_EN_LBN 4 +#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1 +#define FRF_AZ_TX_OWNERR_CTL_LBN 2 +#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1 +#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1 +#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1 +#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0 +#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1 + +/* TX_PUSH_DROP_REG: Transmit push dropped register */ +#define FR_AZ_TX_PUSH_DROP 0x00000a60 +#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0 +#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32 + +/* TX_RESERVED_REG: Transmit configuration register */ +#define FR_AZ_TX_RESERVED 0x00000a80 +#define FRF_AZ_TX_EVT_CNT_LBN 121 +#define FRF_AZ_TX_EVT_CNT_WIDTH 7 +#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119 +#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2 +#define FRF_AZ_TX_RD_COMP_TMR_LBN 96 +#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23 +#define FRF_AZ_TX_PUSH_EN_LBN 89 +#define FRF_AZ_TX_PUSH_EN_WIDTH 1 +#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88 +#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1 +#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85 +#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1 +#define FRF_AZ_TX_DMAR_ST_P0_LBN 81 +#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1 +#define FRF_AZ_TX_DMAQ_ST_LBN 78 +#define FRF_AZ_TX_DMAQ_ST_WIDTH 1 +#define FRF_AZ_TX_RX_SPACER_LBN 64 +#define FRF_AZ_TX_RX_SPACER_WIDTH 8 +#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60 +#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1 +#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59 +#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1 +#define FRF_AZ_TX_PS_EVT_DIS_LBN 58 +#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1 +#define FRF_AZ_TX_RX_SPACER_EN_LBN 57 +#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1 +#define FRF_AZ_TX_XP_TIMER_LBN 52 +#define FRF_AZ_TX_XP_TIMER_WIDTH 5 +#define FRF_AZ_TX_PREF_SPACER_LBN 44 +#define FRF_AZ_TX_PREF_SPACER_WIDTH 8 +#define FRF_AZ_TX_PREF_WD_TMR_LBN 22 +#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22 +#define FRF_AZ_TX_ONLY1TAG_LBN 21 +#define FRF_AZ_TX_ONLY1TAG_WIDTH 1 +#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19 +#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2 +#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18 +#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1 +#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17 +#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1 +#define FRF_AA_TX_DMA_FF_THR_LBN 16 +#define FRF_AA_TX_DMA_FF_THR_WIDTH 1 +#define FRF_AZ_TX_DMA_SPACER_LBN 8 +#define FRF_AZ_TX_DMA_SPACER_WIDTH 8 +#define FRF_AA_TX_TCP_DIS_LBN 7 +#define FRF_AA_TX_TCP_DIS_WIDTH 1 +#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7 +#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1 +#define FRF_AA_TX_IP_DIS_LBN 6 +#define FRF_AA_TX_IP_DIS_WIDTH 1 +#define FRF_AZ_TX_MAX_CPL_LBN 2 +#define FRF_AZ_TX_MAX_CPL_WIDTH 2 +#define FFE_AZ_TX_MAX_CPL_16 3 +#define FFE_AZ_TX_MAX_CPL_8 2 +#define FFE_AZ_TX_MAX_CPL_4 1 +#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0 +#define FRF_AZ_TX_MAX_PREF_LBN 0 +#define FRF_AZ_TX_MAX_PREF_WIDTH 2 +#define FFE_AZ_TX_MAX_PREF_32 3 +#define FFE_AZ_TX_MAX_PREF_16 2 +#define FFE_AZ_TX_MAX_PREF_8 1 +#define FFE_AZ_TX_MAX_PREF_OFF 0 + +/* TX_PACE_REG: Transmit pace control register */ +#define FR_BZ_TX_PACE 0x00000a90 +#define FRF_BZ_TX_PACE_SB_NOT_AF_LBN 19 +#define FRF_BZ_TX_PACE_SB_NOT_AF_WIDTH 10 +#define FRF_BZ_TX_PACE_SB_AF_LBN 9 +#define FRF_BZ_TX_PACE_SB_AF_WIDTH 10 +#define FRF_BZ_TX_PACE_FB_BASE_LBN 5 +#define FRF_BZ_TX_PACE_FB_BASE_WIDTH 4 +#define FRF_BZ_TX_PACE_BIN_TH_LBN 0 +#define FRF_BZ_TX_PACE_BIN_TH_WIDTH 5 + +/* TX_PACE_DROP_QID_REG: PACE Drop QID Counter */ +#define FR_BZ_TX_PACE_DROP_QID 0x00000aa0 +#define FRF_BZ_TX_PACE_QID_DRP_CNT_LBN 0 +#define FRF_BZ_TX_PACE_QID_DRP_CNT_WIDTH 16 + +/* TX_VLAN_REG: Transmit VLAN tag register */ +#define FR_BB_TX_VLAN 0x00000ae0 +#define FRF_BB_TX_VLAN_EN_LBN 127 +#define FRF_BB_TX_VLAN_EN_WIDTH 1 +#define FRF_BB_TX_VLAN7_PORT1_EN_LBN 125 +#define FRF_BB_TX_VLAN7_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN7_PORT0_EN_LBN 124 +#define FRF_BB_TX_VLAN7_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN7_LBN 112 +#define FRF_BB_TX_VLAN7_WIDTH 12 +#define FRF_BB_TX_VLAN6_PORT1_EN_LBN 109 +#define FRF_BB_TX_VLAN6_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN6_PORT0_EN_LBN 108 +#define FRF_BB_TX_VLAN6_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN6_LBN 96 +#define FRF_BB_TX_VLAN6_WIDTH 12 +#define FRF_BB_TX_VLAN5_PORT1_EN_LBN 93 +#define FRF_BB_TX_VLAN5_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN5_PORT0_EN_LBN 92 +#define FRF_BB_TX_VLAN5_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN5_LBN 80 +#define FRF_BB_TX_VLAN5_WIDTH 12 +#define FRF_BB_TX_VLAN4_PORT1_EN_LBN 77 +#define FRF_BB_TX_VLAN4_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN4_PORT0_EN_LBN 76 +#define FRF_BB_TX_VLAN4_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN4_LBN 64 +#define FRF_BB_TX_VLAN4_WIDTH 12 +#define FRF_BB_TX_VLAN3_PORT1_EN_LBN 61 +#define FRF_BB_TX_VLAN3_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN3_PORT0_EN_LBN 60 +#define FRF_BB_TX_VLAN3_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN3_LBN 48 +#define FRF_BB_TX_VLAN3_WIDTH 12 +#define FRF_BB_TX_VLAN2_PORT1_EN_LBN 45 +#define FRF_BB_TX_VLAN2_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN2_PORT0_EN_LBN 44 +#define FRF_BB_TX_VLAN2_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN2_LBN 32 +#define FRF_BB_TX_VLAN2_WIDTH 12 +#define FRF_BB_TX_VLAN1_PORT1_EN_LBN 29 +#define FRF_BB_TX_VLAN1_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN1_PORT0_EN_LBN 28 +#define FRF_BB_TX_VLAN1_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN1_LBN 16 +#define FRF_BB_TX_VLAN1_WIDTH 12 +#define FRF_BB_TX_VLAN0_PORT1_EN_LBN 13 +#define FRF_BB_TX_VLAN0_PORT1_EN_WIDTH 1 +#define FRF_BB_TX_VLAN0_PORT0_EN_LBN 12 +#define FRF_BB_TX_VLAN0_PORT0_EN_WIDTH 1 +#define FRF_BB_TX_VLAN0_LBN 0 +#define FRF_BB_TX_VLAN0_WIDTH 12 + +/* TX_IPFIL_PORTEN_REG: Transmit filter control register */ +#define FR_BZ_TX_IPFIL_PORTEN 0x00000af0 +#define FRF_BZ_TX_MADR0_FIL_EN_LBN 64 +#define FRF_BZ_TX_MADR0_FIL_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL31_PORT_EN_LBN 62 +#define FRF_BB_TX_IPFIL31_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL30_PORT_EN_LBN 60 +#define FRF_BB_TX_IPFIL30_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL29_PORT_EN_LBN 58 +#define FRF_BB_TX_IPFIL29_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL28_PORT_EN_LBN 56 +#define FRF_BB_TX_IPFIL28_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL27_PORT_EN_LBN 54 +#define FRF_BB_TX_IPFIL27_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL26_PORT_EN_LBN 52 +#define FRF_BB_TX_IPFIL26_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL25_PORT_EN_LBN 50 +#define FRF_BB_TX_IPFIL25_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL24_PORT_EN_LBN 48 +#define FRF_BB_TX_IPFIL24_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL23_PORT_EN_LBN 46 +#define FRF_BB_TX_IPFIL23_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL22_PORT_EN_LBN 44 +#define FRF_BB_TX_IPFIL22_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL21_PORT_EN_LBN 42 +#define FRF_BB_TX_IPFIL21_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL20_PORT_EN_LBN 40 +#define FRF_BB_TX_IPFIL20_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL19_PORT_EN_LBN 38 +#define FRF_BB_TX_IPFIL19_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL18_PORT_EN_LBN 36 +#define FRF_BB_TX_IPFIL18_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL17_PORT_EN_LBN 34 +#define FRF_BB_TX_IPFIL17_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL16_PORT_EN_LBN 32 +#define FRF_BB_TX_IPFIL16_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL15_PORT_EN_LBN 30 +#define FRF_BB_TX_IPFIL15_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL14_PORT_EN_LBN 28 +#define FRF_BB_TX_IPFIL14_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL13_PORT_EN_LBN 26 +#define FRF_BB_TX_IPFIL13_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL12_PORT_EN_LBN 24 +#define FRF_BB_TX_IPFIL12_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL11_PORT_EN_LBN 22 +#define FRF_BB_TX_IPFIL11_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL10_PORT_EN_LBN 20 +#define FRF_BB_TX_IPFIL10_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL9_PORT_EN_LBN 18 +#define FRF_BB_TX_IPFIL9_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL8_PORT_EN_LBN 16 +#define FRF_BB_TX_IPFIL8_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL7_PORT_EN_LBN 14 +#define FRF_BB_TX_IPFIL7_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL6_PORT_EN_LBN 12 +#define FRF_BB_TX_IPFIL6_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL5_PORT_EN_LBN 10 +#define FRF_BB_TX_IPFIL5_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL4_PORT_EN_LBN 8 +#define FRF_BB_TX_IPFIL4_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL3_PORT_EN_LBN 6 +#define FRF_BB_TX_IPFIL3_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL2_PORT_EN_LBN 4 +#define FRF_BB_TX_IPFIL2_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL1_PORT_EN_LBN 2 +#define FRF_BB_TX_IPFIL1_PORT_EN_WIDTH 1 +#define FRF_BB_TX_IPFIL0_PORT_EN_LBN 0 +#define FRF_BB_TX_IPFIL0_PORT_EN_WIDTH 1 + +/* TX_IPFIL_TBL: Transmit IP source address filter table */ +#define FR_BB_TX_IPFIL_TBL 0x00000b00 +#define FR_BB_TX_IPFIL_TBL_STEP 16 +#define FR_BB_TX_IPFIL_TBL_ROWS 16 +#define FRF_BB_TX_IPFIL_MASK_1_LBN 96 +#define FRF_BB_TX_IPFIL_MASK_1_WIDTH 32 +#define FRF_BB_TX_IP_SRC_ADR_1_LBN 64 +#define FRF_BB_TX_IP_SRC_ADR_1_WIDTH 32 +#define FRF_BB_TX_IPFIL_MASK_0_LBN 32 +#define FRF_BB_TX_IPFIL_MASK_0_WIDTH 32 +#define FRF_BB_TX_IP_SRC_ADR_0_LBN 0 +#define FRF_BB_TX_IP_SRC_ADR_0_WIDTH 32 + +/* MD_TXD_REG: PHY management transmit data register */ +#define FR_AB_MD_TXD 0x00000c00 +#define FRF_AB_MD_TXD_LBN 0 +#define FRF_AB_MD_TXD_WIDTH 16 + +/* MD_RXD_REG: PHY management receive data register */ +#define FR_AB_MD_RXD 0x00000c10 +#define FRF_AB_MD_RXD_LBN 0 +#define FRF_AB_MD_RXD_WIDTH 16 + +/* MD_CS_REG: PHY management configuration & status register */ +#define FR_AB_MD_CS 0x00000c20 +#define FRF_AB_MD_RD_EN_CMD_LBN 15 +#define FRF_AB_MD_RD_EN_CMD_WIDTH 1 +#define FRF_AB_MD_WR_EN_CMD_LBN 14 +#define FRF_AB_MD_WR_EN_CMD_WIDTH 1 +#define FRF_AB_MD_ADDR_CMD_LBN 13 +#define FRF_AB_MD_ADDR_CMD_WIDTH 1 +#define FRF_AB_MD_PT_LBN 7 +#define FRF_AB_MD_PT_WIDTH 3 +#define FRF_AB_MD_PL_LBN 6 +#define FRF_AB_MD_PL_WIDTH 1 +#define FRF_AB_MD_INT_CLR_LBN 5 +#define FRF_AB_MD_INT_CLR_WIDTH 1 +#define FRF_AB_MD_GC_LBN 4 +#define FRF_AB_MD_GC_WIDTH 1 +#define FRF_AB_MD_PRSP_LBN 3 +#define FRF_AB_MD_PRSP_WIDTH 1 +#define FRF_AB_MD_RIC_LBN 2 +#define FRF_AB_MD_RIC_WIDTH 1 +#define FRF_AB_MD_RDC_LBN 1 +#define FRF_AB_MD_RDC_WIDTH 1 +#define FRF_AB_MD_WRC_LBN 0 +#define FRF_AB_MD_WRC_WIDTH 1 + +/* MD_PHY_ADR_REG: PHY management PHY address register */ +#define FR_AB_MD_PHY_ADR 0x00000c30 +#define FRF_AB_MD_PHY_ADR_LBN 0 +#define FRF_AB_MD_PHY_ADR_WIDTH 16 + +/* MD_ID_REG: PHY management ID register */ +#define FR_AB_MD_ID 0x00000c40 +#define FRF_AB_MD_PRT_ADR_LBN 11 +#define FRF_AB_MD_PRT_ADR_WIDTH 5 +#define FRF_AB_MD_DEV_ADR_LBN 6 +#define FRF_AB_MD_DEV_ADR_WIDTH 5 + +/* MD_STAT_REG: PHY management status & mask register */ +#define FR_AB_MD_STAT 0x00000c50 +#define FRF_AB_MD_PINT_LBN 4 +#define FRF_AB_MD_PINT_WIDTH 1 +#define FRF_AB_MD_DONE_LBN 3 +#define FRF_AB_MD_DONE_WIDTH 1 +#define FRF_AB_MD_BSERR_LBN 2 +#define FRF_AB_MD_BSERR_WIDTH 1 +#define FRF_AB_MD_LNFL_LBN 1 +#define FRF_AB_MD_LNFL_WIDTH 1 +#define FRF_AB_MD_BSY_LBN 0 +#define FRF_AB_MD_BSY_WIDTH 1 + +/* MAC_STAT_DMA_REG: Port MAC statistical counter DMA register */ +#define FR_AB_MAC_STAT_DMA 0x00000c60 +#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48 +#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1 +#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0 +#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48 + +/* MAC_CTRL_REG: Port MAC control register */ +#define FR_AB_MAC_CTRL 0x00000c80 +#define FRF_AB_MAC_XOFF_VAL_LBN 16 +#define FRF_AB_MAC_XOFF_VAL_WIDTH 16 +#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7 +#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1 +#define FRF_AB_MAC_XG_DISTXCRC_LBN 5 +#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1 +#define FRF_AB_MAC_BCAD_ACPT_LBN 4 +#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1 +#define FRF_AB_MAC_UC_PROM_LBN 3 +#define FRF_AB_MAC_UC_PROM_WIDTH 1 +#define FRF_AB_MAC_LINK_STATUS_LBN 2 +#define FRF_AB_MAC_LINK_STATUS_WIDTH 1 +#define FRF_AB_MAC_SPEED_LBN 0 +#define FRF_AB_MAC_SPEED_WIDTH 2 +#define FFE_AB_MAC_SPEED_10G 3 +#define FFE_AB_MAC_SPEED_1G 2 +#define FFE_AB_MAC_SPEED_100M 1 +#define FFE_AB_MAC_SPEED_10M 0 + +/* GEN_MODE_REG: General Purpose mode register (external interrupt mask) */ +#define FR_BB_GEN_MODE 0x00000c90 +#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3 +#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1 +#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2 +#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1 +#define FRF_BB_XFP_PHY_INT_MASK_LBN 1 +#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1 +#define FRF_BB_XG_PHY_INT_MASK_LBN 0 +#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1 + +/* MAC_MC_HASH_REG0: Multicast address hash table */ +#define FR_AB_MAC_MC_HASH_REG0 0x00000ca0 +#define FRF_AB_MAC_MCAST_HASH0_LBN 0 +#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128 + +/* MAC_MC_HASH_REG1: Multicast address hash table */ +#define FR_AB_MAC_MC_HASH_REG1 0x00000cb0 +#define FRF_AB_MAC_MCAST_HASH1_LBN 0 +#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128 + +/* GM_CFG1_REG: GMAC configuration register 1 */ +#define FR_AB_GM_CFG1 0x00000e00 +#define FRF_AB_GM_SW_RST_LBN 31 +#define FRF_AB_GM_SW_RST_WIDTH 1 +#define FRF_AB_GM_SIM_RST_LBN 30 +#define FRF_AB_GM_SIM_RST_WIDTH 1 +#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19 +#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1 +#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18 +#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1 +#define FRF_AB_GM_RST_RX_FUNC_LBN 17 +#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1 +#define FRF_AB_GM_RST_TX_FUNC_LBN 16 +#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1 +#define FRF_AB_GM_LOOP_LBN 8 +#define FRF_AB_GM_LOOP_WIDTH 1 +#define FRF_AB_GM_RX_FC_EN_LBN 5 +#define FRF_AB_GM_RX_FC_EN_WIDTH 1 +#define FRF_AB_GM_TX_FC_EN_LBN 4 +#define FRF_AB_GM_TX_FC_EN_WIDTH 1 +#define FRF_AB_GM_SYNC_RXEN_LBN 3 +#define FRF_AB_GM_SYNC_RXEN_WIDTH 1 +#define FRF_AB_GM_RX_EN_LBN 2 +#define FRF_AB_GM_RX_EN_WIDTH 1 +#define FRF_AB_GM_SYNC_TXEN_LBN 1 +#define FRF_AB_GM_SYNC_TXEN_WIDTH 1 +#define FRF_AB_GM_TX_EN_LBN 0 +#define FRF_AB_GM_TX_EN_WIDTH 1 + +/* GM_CFG2_REG: GMAC configuration register 2 */ +#define FR_AB_GM_CFG2 0x00000e10 +#define FRF_AB_GM_PAMBL_LEN_LBN 12 +#define FRF_AB_GM_PAMBL_LEN_WIDTH 4 +#define FRF_AB_GM_IF_MODE_LBN 8 +#define FRF_AB_GM_IF_MODE_WIDTH 2 +#define FFE_AB_IF_MODE_BYTE_MODE 2 +#define FFE_AB_IF_MODE_NIBBLE_MODE 1 +#define FRF_AB_GM_HUGE_FRM_EN_LBN 5 +#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1 +#define FRF_AB_GM_LEN_CHK_LBN 4 +#define FRF_AB_GM_LEN_CHK_WIDTH 1 +#define FRF_AB_GM_PAD_CRC_EN_LBN 2 +#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1 +#define FRF_AB_GM_CRC_EN_LBN 1 +#define FRF_AB_GM_CRC_EN_WIDTH 1 +#define FRF_AB_GM_FD_LBN 0 +#define FRF_AB_GM_FD_WIDTH 1 + +/* GM_IPG_REG: GMAC IPG register */ +#define FR_AB_GM_IPG 0x00000e20 +#define FRF_AB_GM_NONB2B_IPG1_LBN 24 +#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7 +#define FRF_AB_GM_NONB2B_IPG2_LBN 16 +#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7 +#define FRF_AB_GM_MIN_IPG_ENF_LBN 8 +#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8 +#define FRF_AB_GM_B2B_IPG_LBN 0 +#define FRF_AB_GM_B2B_IPG_WIDTH 7 + +/* GM_HD_REG: GMAC half duplex register */ +#define FR_AB_GM_HD 0x00000e30 +#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20 +#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4 +#define FRF_AB_GM_ALT_BOFF_EN_LBN 19 +#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1 +#define FRF_AB_GM_BP_NO_BOFF_LBN 18 +#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1 +#define FRF_AB_GM_DIS_BOFF_LBN 17 +#define FRF_AB_GM_DIS_BOFF_WIDTH 1 +#define FRF_AB_GM_EXDEF_TX_EN_LBN 16 +#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1 +#define FRF_AB_GM_RTRY_LIMIT_LBN 12 +#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4 +#define FRF_AB_GM_COL_WIN_LBN 0 +#define FRF_AB_GM_COL_WIN_WIDTH 10 + +/* GM_MAX_FLEN_REG: GMAC maximum frame length register */ +#define FR_AB_GM_MAX_FLEN 0x00000e40 +#define FRF_AB_GM_MAX_FLEN_LBN 0 +#define FRF_AB_GM_MAX_FLEN_WIDTH 16 + +/* GM_TEST_REG: GMAC test register */ +#define FR_AB_GM_TEST 0x00000e70 +#define FRF_AB_GM_MAX_BOFF_LBN 3 +#define FRF_AB_GM_MAX_BOFF_WIDTH 1 +#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2 +#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1 +#define FRF_AB_GM_TEST_PAUSE_LBN 1 +#define FRF_AB_GM_TEST_PAUSE_WIDTH 1 +#define FRF_AB_GM_SHORT_SLOT_LBN 0 +#define FRF_AB_GM_SHORT_SLOT_WIDTH 1 + +/* GM_ADR1_REG: GMAC station address register 1 */ +#define FR_AB_GM_ADR1 0x00000f00 +#define FRF_AB_GM_ADR_B0_LBN 24 +#define FRF_AB_GM_ADR_B0_WIDTH 8 +#define FRF_AB_GM_ADR_B1_LBN 16 +#define FRF_AB_GM_ADR_B1_WIDTH 8 +#define FRF_AB_GM_ADR_B2_LBN 8 +#define FRF_AB_GM_ADR_B2_WIDTH 8 +#define FRF_AB_GM_ADR_B3_LBN 0 +#define FRF_AB_GM_ADR_B3_WIDTH 8 + +/* GM_ADR2_REG: GMAC station address register 2 */ +#define FR_AB_GM_ADR2 0x00000f10 +#define FRF_AB_GM_ADR_B4_LBN 24 +#define FRF_AB_GM_ADR_B4_WIDTH 8 +#define FRF_AB_GM_ADR_B5_LBN 16 +#define FRF_AB_GM_ADR_B5_WIDTH 8 + +/* GMF_CFG0_REG: GMAC FIFO configuration register 0 */ +#define FR_AB_GMF_CFG0 0x00000f20 +#define FRF_AB_GMF_FTFENRPLY_LBN 20 +#define FRF_AB_GMF_FTFENRPLY_WIDTH 1 +#define FRF_AB_GMF_STFENRPLY_LBN 19 +#define FRF_AB_GMF_STFENRPLY_WIDTH 1 +#define FRF_AB_GMF_FRFENRPLY_LBN 18 +#define FRF_AB_GMF_FRFENRPLY_WIDTH 1 +#define FRF_AB_GMF_SRFENRPLY_LBN 17 +#define FRF_AB_GMF_SRFENRPLY_WIDTH 1 +#define FRF_AB_GMF_WTMENRPLY_LBN 16 +#define FRF_AB_GMF_WTMENRPLY_WIDTH 1 +#define FRF_AB_GMF_FTFENREQ_LBN 12 +#define FRF_AB_GMF_FTFENREQ_WIDTH 1 +#define FRF_AB_GMF_STFENREQ_LBN 11 +#define FRF_AB_GMF_STFENREQ_WIDTH 1 +#define FRF_AB_GMF_FRFENREQ_LBN 10 +#define FRF_AB_GMF_FRFENREQ_WIDTH 1 +#define FRF_AB_GMF_SRFENREQ_LBN 9 +#define FRF_AB_GMF_SRFENREQ_WIDTH 1 +#define FRF_AB_GMF_WTMENREQ_LBN 8 +#define FRF_AB_GMF_WTMENREQ_WIDTH 1 +#define FRF_AB_GMF_HSTRSTFT_LBN 4 +#define FRF_AB_GMF_HSTRSTFT_WIDTH 1 +#define FRF_AB_GMF_HSTRSTST_LBN 3 +#define FRF_AB_GMF_HSTRSTST_WIDTH 1 +#define FRF_AB_GMF_HSTRSTFR_LBN 2 +#define FRF_AB_GMF_HSTRSTFR_WIDTH 1 +#define FRF_AB_GMF_HSTRSTSR_LBN 1 +#define FRF_AB_GMF_HSTRSTSR_WIDTH 1 +#define FRF_AB_GMF_HSTRSTWT_LBN 0 +#define FRF_AB_GMF_HSTRSTWT_WIDTH 1 + +/* GMF_CFG1_REG: GMAC FIFO configuration register 1 */ +#define FR_AB_GMF_CFG1 0x00000f30 +#define FRF_AB_GMF_CFGFRTH_LBN 16 +#define FRF_AB_GMF_CFGFRTH_WIDTH 5 +#define FRF_AB_GMF_CFGXOFFRTX_LBN 0 +#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16 + +/* GMF_CFG2_REG: GMAC FIFO configuration register 2 */ +#define FR_AB_GMF_CFG2 0x00000f40 +#define FRF_AB_GMF_CFGHWM_LBN 16 +#define FRF_AB_GMF_CFGHWM_WIDTH 6 +#define FRF_AB_GMF_CFGLWM_LBN 0 +#define FRF_AB_GMF_CFGLWM_WIDTH 6 + +/* GMF_CFG3_REG: GMAC FIFO configuration register 3 */ +#define FR_AB_GMF_CFG3 0x00000f50 +#define FRF_AB_GMF_CFGHWMFT_LBN 16 +#define FRF_AB_GMF_CFGHWMFT_WIDTH 6 +#define FRF_AB_GMF_CFGFTTH_LBN 0 +#define FRF_AB_GMF_CFGFTTH_WIDTH 6 + +/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */ +#define FR_AB_GMF_CFG4 0x00000f60 +#define FRF_AB_GMF_HSTFLTRFRM_LBN 0 +#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18 + +/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */ +#define FR_AB_GMF_CFG5 0x00000f70 +#define FRF_AB_GMF_CFGHDPLX_LBN 22 +#define FRF_AB_GMF_CFGHDPLX_WIDTH 1 +#define FRF_AB_GMF_SRFULL_LBN 21 +#define FRF_AB_GMF_SRFULL_WIDTH 1 +#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20 +#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1 +#define FRF_AB_GMF_CFGBYTMODE_LBN 19 +#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1 +#define FRF_AB_GMF_HSTDRPLT64_LBN 18 +#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1 +#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0 +#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18 + +/* TX_SRC_MAC_TBL: Transmit IP source address filter table */ +#define FR_BB_TX_SRC_MAC_TBL 0x00001000 +#define FR_BB_TX_SRC_MAC_TBL_STEP 16 +#define FR_BB_TX_SRC_MAC_TBL_ROWS 16 +#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64 +#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48 +#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0 +#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48 + +/* TX_SRC_MAC_CTL_REG: Transmit MAC source address filter control */ +#define FR_BB_TX_SRC_MAC_CTL 0x00001100 +#define FRF_BB_TX_SRC_DROP_CTR_LBN 16 +#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16 +#define FRF_BB_TX_SRC_FLTR_EN_LBN 15 +#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1 +#define FRF_BB_TX_DROP_CTR_CLR_LBN 12 +#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1 +#define FRF_BB_TX_MAC_QID_SEL_LBN 0 +#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3 + +/* XM_ADR_LO_REG: XGMAC address register low */ +#define FR_AB_XM_ADR_LO 0x00001200 +#define FRF_AB_XM_ADR_LO_LBN 0 +#define FRF_AB_XM_ADR_LO_WIDTH 32 + +/* XM_ADR_HI_REG: XGMAC address register high */ +#define FR_AB_XM_ADR_HI 0x00001210 +#define FRF_AB_XM_ADR_HI_LBN 0 +#define FRF_AB_XM_ADR_HI_WIDTH 16 + +/* XM_GLB_CFG_REG: XGMAC global configuration */ +#define FR_AB_XM_GLB_CFG 0x00001220 +#define FRF_AB_XM_RMTFLT_GEN_LBN 17 +#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1 +#define FRF_AB_XM_DEBUG_MODE_LBN 16 +#define FRF_AB_XM_DEBUG_MODE_WIDTH 1 +#define FRF_AB_XM_RX_STAT_EN_LBN 11 +#define FRF_AB_XM_RX_STAT_EN_WIDTH 1 +#define FRF_AB_XM_TX_STAT_EN_LBN 10 +#define FRF_AB_XM_TX_STAT_EN_WIDTH 1 +#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6 +#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1 +#define FRF_AB_XM_WAN_MODE_LBN 5 +#define FRF_AB_XM_WAN_MODE_WIDTH 1 +#define FRF_AB_XM_INTCLR_MODE_LBN 3 +#define FRF_AB_XM_INTCLR_MODE_WIDTH 1 +#define FRF_AB_XM_CORE_RST_LBN 0 +#define FRF_AB_XM_CORE_RST_WIDTH 1 + +/* XM_TX_CFG_REG: XGMAC transmit configuration */ +#define FR_AB_XM_TX_CFG 0x00001230 +#define FRF_AB_XM_TX_PROG_LBN 24 +#define FRF_AB_XM_TX_PROG_WIDTH 1 +#define FRF_AB_XM_IPG_LBN 16 +#define FRF_AB_XM_IPG_WIDTH 4 +#define FRF_AB_XM_FCNTL_LBN 10 +#define FRF_AB_XM_FCNTL_WIDTH 1 +#define FRF_AB_XM_TXCRC_LBN 8 +#define FRF_AB_XM_TXCRC_WIDTH 1 +#define FRF_AB_XM_EDRC_LBN 6 +#define FRF_AB_XM_EDRC_WIDTH 1 +#define FRF_AB_XM_AUTO_PAD_LBN 5 +#define FRF_AB_XM_AUTO_PAD_WIDTH 1 +#define FRF_AB_XM_TX_PRMBL_LBN 2 +#define FRF_AB_XM_TX_PRMBL_WIDTH 1 +#define FRF_AB_XM_TXEN_LBN 1 +#define FRF_AB_XM_TXEN_WIDTH 1 +#define FRF_AB_XM_TX_RST_LBN 0 +#define FRF_AB_XM_TX_RST_WIDTH 1 + +/* XM_RX_CFG_REG: XGMAC receive configuration */ +#define FR_AB_XM_RX_CFG 0x00001240 +#define FRF_AB_XM_PASS_LENERR_LBN 26 +#define FRF_AB_XM_PASS_LENERR_WIDTH 1 +#define FRF_AB_XM_PASS_CRC_ERR_LBN 25 +#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1 +#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24 +#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1 +#define FRF_AB_XM_REJ_BCAST_LBN 20 +#define FRF_AB_XM_REJ_BCAST_WIDTH 1 +#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11 +#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1 +#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9 +#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1 +#define FRF_AB_XM_AUTO_DEPAD_LBN 8 +#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1 +#define FRF_AB_XM_RXCRC_LBN 3 +#define FRF_AB_XM_RXCRC_WIDTH 1 +#define FRF_AB_XM_RX_PRMBL_LBN 2 +#define FRF_AB_XM_RX_PRMBL_WIDTH 1 +#define FRF_AB_XM_RXEN_LBN 1 +#define FRF_AB_XM_RXEN_WIDTH 1 +#define FRF_AB_XM_RX_RST_LBN 0 +#define FRF_AB_XM_RX_RST_WIDTH 1 + +/* XM_MGT_INT_MASK: documentation to be written for sum_XM_MGT_INT_MASK */ +#define FR_AB_XM_MGT_INT_MASK 0x00001250 +#define FRF_AB_XM_MSK_STA_INTR_LBN 16 +#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1 +#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9 +#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1 +#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8 +#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1 +#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2 +#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1 +#define FRF_AB_XM_MSK_RMTFLT_LBN 1 +#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1 +#define FRF_AB_XM_MSK_LCLFLT_LBN 0 +#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1 + +/* XM_FC_REG: XGMAC flow control register */ +#define FR_AB_XM_FC 0x00001270 +#define FRF_AB_XM_PAUSE_TIME_LBN 16 +#define FRF_AB_XM_PAUSE_TIME_WIDTH 16 +#define FRF_AB_XM_RX_MAC_STAT_LBN 11 +#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1 +#define FRF_AB_XM_TX_MAC_STAT_LBN 10 +#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1 +#define FRF_AB_XM_MCNTL_PASS_LBN 8 +#define FRF_AB_XM_MCNTL_PASS_WIDTH 2 +#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6 +#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1 +#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5 +#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1 +#define FRF_AB_XM_ZPAUSE_LBN 2 +#define FRF_AB_XM_ZPAUSE_WIDTH 1 +#define FRF_AB_XM_XMIT_PAUSE_LBN 1 +#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1 +#define FRF_AB_XM_DIS_FCNTL_LBN 0 +#define FRF_AB_XM_DIS_FCNTL_WIDTH 1 + +/* XM_PAUSE_TIME_REG: XGMAC pause time register */ +#define FR_AB_XM_PAUSE_TIME 0x00001290 +#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16 +#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16 +#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0 +#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16 + +/* XM_TX_PARAM_REG: XGMAC transmit parameter register */ +#define FR_AB_XM_TX_PARAM 0x000012d0 +#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31 +#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1 +#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19 +#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11 +#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16 +#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3 +#define FRF_AB_XM_PAD_CHAR_LBN 0 +#define FRF_AB_XM_PAD_CHAR_WIDTH 8 + +/* XM_RX_PARAM_REG: XGMAC receive parameter register */ +#define FR_AB_XM_RX_PARAM 0x000012e0 +#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3 +#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11 +#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0 +#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3 + +/* XM_MGT_INT_MSK_REG: XGMAC management interrupt mask register */ +#define FR_AB_XM_MGT_INT_MSK 0x000012f0 +#define FRF_AB_XM_STAT_CNTR_OF_LBN 9 +#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1 +#define FRF_AB_XM_STAT_CNTR_HF_LBN 8 +#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1 +#define FRF_AB_XM_PRMBLE_ERR_LBN 2 +#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1 +#define FRF_AB_XM_RMTFLT_LBN 1 +#define FRF_AB_XM_RMTFLT_WIDTH 1 +#define FRF_AB_XM_LCLFLT_LBN 0 +#define FRF_AB_XM_LCLFLT_WIDTH 1 + +/* XX_PWR_RST_REG: XGXS/XAUI powerdown/reset register */ +#define FR_AB_XX_PWR_RST 0x00001300 +#define FRF_AB_XX_PWRDND_SIG_LBN 31 +#define FRF_AB_XX_PWRDND_SIG_WIDTH 1 +#define FRF_AB_XX_PWRDNC_SIG_LBN 30 +#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1 +#define FRF_AB_XX_PWRDNB_SIG_LBN 29 +#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1 +#define FRF_AB_XX_PWRDNA_SIG_LBN 28 +#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1 +#define FRF_AB_XX_SIM_MODE_LBN 27 +#define FRF_AB_XX_SIM_MODE_WIDTH 1 +#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25 +#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1 +#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24 +#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1 +#define FRF_AB_XX_RESETD_SIG_LBN 23 +#define FRF_AB_XX_RESETD_SIG_WIDTH 1 +#define FRF_AB_XX_RESETC_SIG_LBN 22 +#define FRF_AB_XX_RESETC_SIG_WIDTH 1 +#define FRF_AB_XX_RESETB_SIG_LBN 21 +#define FRF_AB_XX_RESETB_SIG_WIDTH 1 +#define FRF_AB_XX_RESETA_SIG_LBN 20 +#define FRF_AB_XX_RESETA_SIG_WIDTH 1 +#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18 +#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1 +#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17 +#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1 +#define FRF_AB_XX_SD_RST_ACT_LBN 16 +#define FRF_AB_XX_SD_RST_ACT_WIDTH 1 +#define FRF_AB_XX_PWRDND_EN_LBN 15 +#define FRF_AB_XX_PWRDND_EN_WIDTH 1 +#define FRF_AB_XX_PWRDNC_EN_LBN 14 +#define FRF_AB_XX_PWRDNC_EN_WIDTH 1 +#define FRF_AB_XX_PWRDNB_EN_LBN 13 +#define FRF_AB_XX_PWRDNB_EN_WIDTH 1 +#define FRF_AB_XX_PWRDNA_EN_LBN 12 +#define FRF_AB_XX_PWRDNA_EN_WIDTH 1 +#define FRF_AB_XX_RSTPLLCD_EN_LBN 9 +#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1 +#define FRF_AB_XX_RSTPLLAB_EN_LBN 8 +#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1 +#define FRF_AB_XX_RESETD_EN_LBN 7 +#define FRF_AB_XX_RESETD_EN_WIDTH 1 +#define FRF_AB_XX_RESETC_EN_LBN 6 +#define FRF_AB_XX_RESETC_EN_WIDTH 1 +#define FRF_AB_XX_RESETB_EN_LBN 5 +#define FRF_AB_XX_RESETB_EN_WIDTH 1 +#define FRF_AB_XX_RESETA_EN_LBN 4 +#define FRF_AB_XX_RESETA_EN_WIDTH 1 +#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2 +#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1 +#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1 +#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1 +#define FRF_AB_XX_RST_XX_EN_LBN 0 +#define FRF_AB_XX_RST_XX_EN_WIDTH 1 + +/* XX_SD_CTL_REG: XGXS/XAUI powerdown/reset control register */ +#define FR_AB_XX_SD_CTL 0x00001310 +#define FRF_AB_XX_TERMADJ1_LBN 17 +#define FRF_AB_XX_TERMADJ1_WIDTH 1 +#define FRF_AB_XX_TERMADJ0_LBN 16 +#define FRF_AB_XX_TERMADJ0_WIDTH 1 +#define FRF_AB_XX_HIDRVD_LBN 15 +#define FRF_AB_XX_HIDRVD_WIDTH 1 +#define FRF_AB_XX_LODRVD_LBN 14 +#define FRF_AB_XX_LODRVD_WIDTH 1 +#define FRF_AB_XX_HIDRVC_LBN 13 +#define FRF_AB_XX_HIDRVC_WIDTH 1 +#define FRF_AB_XX_LODRVC_LBN 12 +#define FRF_AB_XX_LODRVC_WIDTH 1 +#define FRF_AB_XX_HIDRVB_LBN 11 +#define FRF_AB_XX_HIDRVB_WIDTH 1 +#define FRF_AB_XX_LODRVB_LBN 10 +#define FRF_AB_XX_LODRVB_WIDTH 1 +#define FRF_AB_XX_HIDRVA_LBN 9 +#define FRF_AB_XX_HIDRVA_WIDTH 1 +#define FRF_AB_XX_LODRVA_LBN 8 +#define FRF_AB_XX_LODRVA_WIDTH 1 +#define FRF_AB_XX_LPBKD_LBN 3 +#define FRF_AB_XX_LPBKD_WIDTH 1 +#define FRF_AB_XX_LPBKC_LBN 2 +#define FRF_AB_XX_LPBKC_WIDTH 1 +#define FRF_AB_XX_LPBKB_LBN 1 +#define FRF_AB_XX_LPBKB_WIDTH 1 +#define FRF_AB_XX_LPBKA_LBN 0 +#define FRF_AB_XX_LPBKA_WIDTH 1 + +/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */ +#define FR_AB_XX_TXDRV_CTL 0x00001320 +#define FRF_AB_XX_DEQD_LBN 28 +#define FRF_AB_XX_DEQD_WIDTH 4 +#define FRF_AB_XX_DEQC_LBN 24 +#define FRF_AB_XX_DEQC_WIDTH 4 +#define FRF_AB_XX_DEQB_LBN 20 +#define FRF_AB_XX_DEQB_WIDTH 4 +#define FRF_AB_XX_DEQA_LBN 16 +#define FRF_AB_XX_DEQA_WIDTH 4 +#define FRF_AB_XX_DTXD_LBN 12 +#define FRF_AB_XX_DTXD_WIDTH 4 +#define FRF_AB_XX_DTXC_LBN 8 +#define FRF_AB_XX_DTXC_WIDTH 4 +#define FRF_AB_XX_DTXB_LBN 4 +#define FRF_AB_XX_DTXB_WIDTH 4 +#define FRF_AB_XX_DTXA_LBN 0 +#define FRF_AB_XX_DTXA_WIDTH 4 + +/* XX_PRBS_CTL_REG: documentation to be written for sum_XX_PRBS_CTL_REG */ +#define FR_AB_XX_PRBS_CTL 0x00001330 +#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30 +#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29 +#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28 +#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26 +#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25 +#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24 +#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22 +#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21 +#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20 +#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18 +#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17 +#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16 +#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14 +#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13 +#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12 +#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10 +#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9 +#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8 +#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6 +#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5 +#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4 +#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1 +#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2 +#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2 +#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1 +#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1 +#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0 +#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1 + +/* XX_PRBS_CHK_REG: documentation to be written for sum_XX_PRBS_CHK_REG */ +#define FR_AB_XX_PRBS_CHK 0x00001340 +#define FRF_AB_XX_REV_LB_EN_LBN 16 +#define FRF_AB_XX_REV_LB_EN_WIDTH 1 +#define FRF_AB_XX_CH3_DEG_DET_LBN 15 +#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1 +#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14 +#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1 +#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13 +#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1 +#define FRF_AB_XX_CH3_ERR_CHK_LBN 12 +#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1 +#define FRF_AB_XX_CH2_DEG_DET_LBN 11 +#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1 +#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10 +#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1 +#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9 +#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1 +#define FRF_AB_XX_CH2_ERR_CHK_LBN 8 +#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1 +#define FRF_AB_XX_CH1_DEG_DET_LBN 7 +#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1 +#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6 +#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1 +#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5 +#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1 +#define FRF_AB_XX_CH1_ERR_CHK_LBN 4 +#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1 +#define FRF_AB_XX_CH0_DEG_DET_LBN 3 +#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1 +#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2 +#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1 +#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1 +#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1 +#define FRF_AB_XX_CH0_ERR_CHK_LBN 0 +#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1 + +/* XX_PRBS_ERR_REG: documentation to be written for sum_XX_PRBS_ERR_REG */ +#define FR_AB_XX_PRBS_ERR 0x00001350 +#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24 +#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8 +#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16 +#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8 +#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8 +#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8 +#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0 +#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8 + +/* XX_CORE_STAT_REG: XAUI XGXS core status register */ +#define FR_AB_XX_CORE_STAT 0x00001360 +#define FRF_AB_XX_FORCE_SIG3_LBN 31 +#define FRF_AB_XX_FORCE_SIG3_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30 +#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG2_LBN 29 +#define FRF_AB_XX_FORCE_SIG2_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28 +#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG1_LBN 27 +#define FRF_AB_XX_FORCE_SIG1_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26 +#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG0_LBN 25 +#define FRF_AB_XX_FORCE_SIG0_WIDTH 1 +#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24 +#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1 +#define FRF_AB_XX_XGXS_LB_EN_LBN 23 +#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1 +#define FRF_AB_XX_XGMII_LB_EN_LBN 22 +#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1 +#define FRF_AB_XX_MATCH_FAULT_LBN 21 +#define FRF_AB_XX_MATCH_FAULT_WIDTH 1 +#define FRF_AB_XX_ALIGN_DONE_LBN 20 +#define FRF_AB_XX_ALIGN_DONE_WIDTH 1 +#define FRF_AB_XX_SYNC_STAT3_LBN 19 +#define FRF_AB_XX_SYNC_STAT3_WIDTH 1 +#define FRF_AB_XX_SYNC_STAT2_LBN 18 +#define FRF_AB_XX_SYNC_STAT2_WIDTH 1 +#define FRF_AB_XX_SYNC_STAT1_LBN 17 +#define FRF_AB_XX_SYNC_STAT1_WIDTH 1 +#define FRF_AB_XX_SYNC_STAT0_LBN 16 +#define FRF_AB_XX_SYNC_STAT0_WIDTH 1 +#define FRF_AB_XX_COMMA_DET_CH3_LBN 15 +#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1 +#define FRF_AB_XX_COMMA_DET_CH2_LBN 14 +#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1 +#define FRF_AB_XX_COMMA_DET_CH1_LBN 13 +#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1 +#define FRF_AB_XX_COMMA_DET_CH0_LBN 12 +#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1 +#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11 +#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1 +#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10 +#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1 +#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9 +#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1 +#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8 +#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1 +#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7 +#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1 +#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6 +#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1 +#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5 +#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1 +#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4 +#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1 +#define FRF_AB_XX_DISPERR_CH3_LBN 3 +#define FRF_AB_XX_DISPERR_CH3_WIDTH 1 +#define FRF_AB_XX_DISPERR_CH2_LBN 2 +#define FRF_AB_XX_DISPERR_CH2_WIDTH 1 +#define FRF_AB_XX_DISPERR_CH1_LBN 1 +#define FRF_AB_XX_DISPERR_CH1_WIDTH 1 +#define FRF_AB_XX_DISPERR_CH0_LBN 0 +#define FRF_AB_XX_DISPERR_CH0_WIDTH 1 + +/* RX_DESC_PTR_TBL_KER: Receive descriptor pointer table */ +#define FR_AA_RX_DESC_PTR_TBL_KER 0x00011800 +#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16 +#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4 +/* RX_DESC_PTR_TBL: Receive descriptor pointer table */ +#define FR_BZ_RX_DESC_PTR_TBL 0x00f40000 +#define FR_BZ_RX_DESC_PTR_TBL_STEP 16 +#define FR_BB_RX_DESC_PTR_TBL_ROWS 4096 +#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024 +#define FRF_CZ_RX_HDR_SPLIT_LBN 90 +#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1 +#define FRF_AA_RX_RESET_LBN 89 +#define FRF_AA_RX_RESET_WIDTH 1 +#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88 +#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1 +#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87 +#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1 +#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86 +#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1 +#define FRF_AZ_RX_DC_HW_RPTR_LBN 80 +#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6 +#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68 +#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12 +#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56 +#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12 +#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36 +#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20 +#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24 +#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12 +#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10 +#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14 +#define FRF_AZ_RX_DESCQ_LABEL_LBN 5 +#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5 +#define FRF_AZ_RX_DESCQ_SIZE_LBN 3 +#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2 +#define FFE_AZ_RX_DESCQ_SIZE_4K 3 +#define FFE_AZ_RX_DESCQ_SIZE_2K 2 +#define FFE_AZ_RX_DESCQ_SIZE_1K 1 +#define FFE_AZ_RX_DESCQ_SIZE_512 0 +#define FRF_AZ_RX_DESCQ_TYPE_LBN 2 +#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1 +#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1 +#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1 +#define FRF_AZ_RX_DESCQ_EN_LBN 0 +#define FRF_AZ_RX_DESCQ_EN_WIDTH 1 + +/* TX_DESC_PTR_TBL_KER: Transmit descriptor pointer */ +#define FR_AA_TX_DESC_PTR_TBL_KER 0x00011900 +#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16 +#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8 +/* TX_DESC_PTR_TBL: Transmit descriptor pointer */ +#define FR_BZ_TX_DESC_PTR_TBL 0x00f50000 +#define FR_BZ_TX_DESC_PTR_TBL_STEP 16 +#define FR_BB_TX_DESC_PTR_TBL_ROWS 4096 +#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024 +#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94 +#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2 +#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93 +#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1 +#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92 +#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1 +#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91 +#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1 +#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90 +#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1 +#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89 +#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1 +#define FRF_AZ_TX_DESCQ_EN_LBN 88 +#define FRF_AZ_TX_DESCQ_EN_WIDTH 1 +#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87 +#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1 +#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86 +#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1 +#define FRF_AZ_TX_DC_HW_RPTR_LBN 80 +#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6 +#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68 +#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12 +#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56 +#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12 +#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36 +#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20 +#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24 +#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12 +#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10 +#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14 +#define FRF_AZ_TX_DESCQ_LABEL_LBN 5 +#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5 +#define FRF_AZ_TX_DESCQ_SIZE_LBN 3 +#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2 +#define FFE_AZ_TX_DESCQ_SIZE_4K 3 +#define FFE_AZ_TX_DESCQ_SIZE_2K 2 +#define FFE_AZ_TX_DESCQ_SIZE_1K 1 +#define FFE_AZ_TX_DESCQ_SIZE_512 0 +#define FRF_AZ_TX_DESCQ_TYPE_LBN 1 +#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2 +#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0 +#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1 + +/* EVQ_PTR_TBL_KER: Event queue pointer table */ +#define FR_AA_EVQ_PTR_TBL_KER 0x00011a00 +#define FR_AA_EVQ_PTR_TBL_KER_STEP 16 +#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4 +/* EVQ_PTR_TBL: Event queue pointer table */ +#define FR_BZ_EVQ_PTR_TBL 0x00f60000 +#define FR_BZ_EVQ_PTR_TBL_STEP 16 +#define FR_CZ_EVQ_PTR_TBL_ROWS 1024 +#define FR_BB_EVQ_PTR_TBL_ROWS 4096 +#define FRF_BZ_EVQ_RPTR_IGN_LBN 40 +#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1 +#define FRF_AB_EVQ_WKUP_OR_INT_EN_LBN 39 +#define FRF_AB_EVQ_WKUP_OR_INT_EN_WIDTH 1 +#define FRF_CZ_EVQ_DOS_PROTECT_EN_LBN 39 +#define FRF_CZ_EVQ_DOS_PROTECT_EN_WIDTH 1 +#define FRF_AZ_EVQ_NXT_WPTR_LBN 24 +#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15 +#define FRF_AZ_EVQ_EN_LBN 23 +#define FRF_AZ_EVQ_EN_WIDTH 1 +#define FRF_AZ_EVQ_SIZE_LBN 20 +#define FRF_AZ_EVQ_SIZE_WIDTH 3 +#define FFE_AZ_EVQ_SIZE_32K 6 +#define FFE_AZ_EVQ_SIZE_16K 5 +#define FFE_AZ_EVQ_SIZE_8K 4 +#define FFE_AZ_EVQ_SIZE_4K 3 +#define FFE_AZ_EVQ_SIZE_2K 2 +#define FFE_AZ_EVQ_SIZE_1K 1 +#define FFE_AZ_EVQ_SIZE_512 0 +#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0 +#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20 + +/* BUF_HALF_TBL_KER: Buffer table in half buffer table mode direct access by driver */ +#define FR_AA_BUF_HALF_TBL_KER 0x00018000 +#define FR_AA_BUF_HALF_TBL_KER_STEP 8 +#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096 +/* BUF_HALF_TBL: Buffer table in half buffer table mode direct access by driver */ +#define FR_BZ_BUF_HALF_TBL 0x00800000 +#define FR_BZ_BUF_HALF_TBL_STEP 8 +#define FR_CZ_BUF_HALF_TBL_ROWS 147456 +#define FR_BB_BUF_HALF_TBL_ROWS 524288 +#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44 +#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20 +#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32 +#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12 +#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12 +#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20 +#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0 +#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12 + +/* BUF_FULL_TBL_KER: Buffer table in full buffer table mode direct access by driver */ +#define FR_AA_BUF_FULL_TBL_KER 0x00018000 +#define FR_AA_BUF_FULL_TBL_KER_STEP 8 +#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096 +/* BUF_FULL_TBL: Buffer table in full buffer table mode direct access by driver */ +#define FR_BZ_BUF_FULL_TBL 0x00800000 +#define FR_BZ_BUF_FULL_TBL_STEP 8 +#define FR_CZ_BUF_FULL_TBL_ROWS 147456 +#define FR_BB_BUF_FULL_TBL_ROWS 917504 +#define FRF_AZ_BUF_FULL_UNUSED_LBN 51 +#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13 +#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50 +#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1 +#define FRF_AZ_BUF_ADR_REGION_LBN 48 +#define FRF_AZ_BUF_ADR_REGION_WIDTH 2 +#define FFE_AZ_BUF_ADR_REGN3 3 +#define FFE_AZ_BUF_ADR_REGN2 2 +#define FFE_AZ_BUF_ADR_REGN1 1 +#define FFE_AZ_BUF_ADR_REGN0 0 +#define FRF_AZ_BUF_ADR_FBUF_LBN 14 +#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34 +#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0 +#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14 + +/* RX_FILTER_TBL0: TCP/IPv4 Receive filter table */ +#define FR_BZ_RX_FILTER_TBL0 0x00f00000 +#define FR_BZ_RX_FILTER_TBL0_STEP 32 +#define FR_BZ_RX_FILTER_TBL0_ROWS 8192 +/* RX_FILTER_TBL1: TCP/IPv4 Receive filter table */ +#define FR_BB_RX_FILTER_TBL1 0x00f00010 +#define FR_BB_RX_FILTER_TBL1_STEP 32 +#define FR_BB_RX_FILTER_TBL1_ROWS 8192 +#define FRF_BZ_RSS_EN_LBN 110 +#define FRF_BZ_RSS_EN_WIDTH 1 +#define FRF_BZ_SCATTER_EN_LBN 109 +#define FRF_BZ_SCATTER_EN_WIDTH 1 +#define FRF_BZ_TCP_UDP_LBN 108 +#define FRF_BZ_TCP_UDP_WIDTH 1 +#define FRF_BZ_RXQ_ID_LBN 96 +#define FRF_BZ_RXQ_ID_WIDTH 12 +#define FRF_BZ_DEST_IP_LBN 64 +#define FRF_BZ_DEST_IP_WIDTH 32 +#define FRF_BZ_DEST_PORT_TCP_LBN 48 +#define FRF_BZ_DEST_PORT_TCP_WIDTH 16 +#define FRF_BZ_SRC_IP_LBN 16 +#define FRF_BZ_SRC_IP_WIDTH 32 +#define FRF_BZ_SRC_TCP_DEST_UDP_LBN 0 +#define FRF_BZ_SRC_TCP_DEST_UDP_WIDTH 16 + +/* RX_MAC_FILTER_TBL0: Receive Ethernet filter table */ +#define FR_CZ_RX_MAC_FILTER_TBL0 0x00f00010 +#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32 +#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512 +#define FRF_CZ_RMFT_RSS_EN_LBN 75 +#define FRF_CZ_RMFT_RSS_EN_WIDTH 1 +#define FRF_CZ_RMFT_SCATTER_EN_LBN 74 +#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1 +#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73 +#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1 +#define FRF_CZ_RMFT_RXQ_ID_LBN 61 +#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12 +#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60 +#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1 +#define FRF_CZ_RMFT_DEST_MAC_LBN 12 +#define FRF_CZ_RMFT_DEST_MAC_WIDTH 48 +#define FRF_CZ_RMFT_VLAN_ID_LBN 0 +#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12 + +/* TIMER_TBL: Timer table */ +#define FR_BZ_TIMER_TBL 0x00f70000 +#define FR_BZ_TIMER_TBL_STEP 16 +#define FR_CZ_TIMER_TBL_ROWS 1024 +#define FR_BB_TIMER_TBL_ROWS 4096 +#define FRF_CZ_TIMER_Q_EN_LBN 33 +#define FRF_CZ_TIMER_Q_EN_WIDTH 1 +#define FRF_CZ_INT_ARMD_LBN 32 +#define FRF_CZ_INT_ARMD_WIDTH 1 +#define FRF_CZ_INT_PEND_LBN 31 +#define FRF_CZ_INT_PEND_WIDTH 1 +#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30 +#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1 +#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16 +#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14 +#define FRF_CZ_TIMER_MODE_LBN 14 +#define FRF_CZ_TIMER_MODE_WIDTH 2 +#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3 +#define FFE_CZ_TIMER_MODE_TRIG_START 2 +#define FFE_CZ_TIMER_MODE_IMMED_START 1 +#define FFE_CZ_TIMER_MODE_DIS 0 +#define FRF_BB_TIMER_MODE_LBN 12 +#define FRF_BB_TIMER_MODE_WIDTH 2 +#define FFE_BB_TIMER_MODE_INT_HLDOFF 2 +#define FFE_BB_TIMER_MODE_TRIG_START 2 +#define FFE_BB_TIMER_MODE_IMMED_START 1 +#define FFE_BB_TIMER_MODE_DIS 0 +#define FRF_CZ_TIMER_VAL_LBN 0 +#define FRF_CZ_TIMER_VAL_WIDTH 14 +#define FRF_BB_TIMER_VAL_LBN 0 +#define FRF_BB_TIMER_VAL_WIDTH 12 + +/* TX_PACE_TBL: Transmit pacing table */ +#define FR_BZ_TX_PACE_TBL 0x00f80000 +#define FR_BZ_TX_PACE_TBL_STEP 16 +#define FR_CZ_TX_PACE_TBL_ROWS 1024 +#define FR_BB_TX_PACE_TBL_ROWS 4096 +#define FRF_BZ_TX_PACE_LBN 0 +#define FRF_BZ_TX_PACE_WIDTH 5 + +/* RX_INDIRECTION_TBL: RX Indirection Table */ +#define FR_BZ_RX_INDIRECTION_TBL 0x00fb0000 +#define FR_BZ_RX_INDIRECTION_TBL_STEP 16 +#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128 +#define FRF_BZ_IT_QUEUE_LBN 0 +#define FRF_BZ_IT_QUEUE_WIDTH 6 + +/* TX_FILTER_TBL0: TCP/IPv4 Transmit filter table */ +#define FR_CZ_TX_FILTER_TBL0 0x00fc0000 +#define FR_CZ_TX_FILTER_TBL0_STEP 16 +#define FR_CZ_TX_FILTER_TBL0_ROWS 8192 +#define FRF_CZ_TIFT_TCP_UDP_LBN 108 +#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1 +#define FRF_CZ_TIFT_TXQ_ID_LBN 96 +#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12 +#define FRF_CZ_TIFT_DEST_IP_LBN 64 +#define FRF_CZ_TIFT_DEST_IP_WIDTH 32 +#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48 +#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16 +#define FRF_CZ_TIFT_SRC_IP_LBN 16 +#define FRF_CZ_TIFT_SRC_IP_WIDTH 32 +#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0 +#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16 + +/* TX_MAC_FILTER_TBL0: Transmit Ethernet filter table */ +#define FR_CZ_TX_MAC_FILTER_TBL0 0x00fe0000 +#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16 +#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512 +#define FRF_CZ_TMFT_TXQ_ID_LBN 61 +#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12 +#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60 +#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1 +#define FRF_CZ_TMFT_SRC_MAC_LBN 12 +#define FRF_CZ_TMFT_SRC_MAC_WIDTH 48 +#define FRF_CZ_TMFT_VLAN_ID_LBN 0 +#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12 + +/* MC_TREG_SMEM: MC Shared Memory */ +#define FR_CZ_MC_TREG_SMEM 0x00ff0000 +#define FR_CZ_MC_TREG_SMEM_STEP 4 +#define FR_CZ_MC_TREG_SMEM_ROWS 512 +#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0 +#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32 + +/* MSIX_VECTOR_TABLE: MSIX Vector Table */ +#define FR_BB_MSIX_VECTOR_TABLE 0x00ff0000 +#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16 +#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64 +/* MSIX_VECTOR_TABLE: MSIX Vector Table */ +#define FR_CZ_MSIX_VECTOR_TABLE 0x00000000 +/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */ +#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024 +#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97 +#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31 +#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96 +#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1 +#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64 +#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32 +#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32 +#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32 +#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0 +#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32 + +/* MSIX_PBA_TABLE: MSIX Pending Bit Array */ +#define FR_BB_MSIX_PBA_TABLE 0x00ff2000 +#define FR_BZ_MSIX_PBA_TABLE_STEP 4 +#define FR_BB_MSIX_PBA_TABLE_ROWS 2 +/* MSIX_PBA_TABLE: MSIX Pending Bit Array */ +#define FR_CZ_MSIX_PBA_TABLE 0x00008000 +/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */ +#define FR_CZ_MSIX_PBA_TABLE_ROWS 32 +#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0 +#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32 + +/* SRM_DBG_REG: SRAM debug access */ +#define FR_BZ_SRM_DBG 0x03000000 +#define FR_BZ_SRM_DBG_STEP 8 +#define FR_CZ_SRM_DBG_ROWS 262144 +#define FR_BB_SRM_DBG_ROWS 2097152 +#define FRF_BZ_SRM_DBG_LBN 0 +#define FRF_BZ_SRM_DBG_WIDTH 64 + +/* TB_MSIX_PBA_TABLE: MSIX Pending Bit Array */ +#define FR_CZ_TB_MSIX_PBA_TABLE 0x00008000 +#define FR_CZ_TB_MSIX_PBA_TABLE_STEP 4 +#define FR_CZ_TB_MSIX_PBA_TABLE_ROWS 1024 +#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_LBN 0 +#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_WIDTH 32 + +/* DRIVER_EV */ +#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56 +#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4 +#define FSE_BZ_TX_DSC_ERROR_EV 15 +#define FSE_BZ_RX_DSC_ERROR_EV 14 +#define FSE_AA_RX_RECOVER_EV 11 +#define FSE_AZ_TIMER_EV 10 +#define FSE_AZ_TX_PKT_NON_TCP_UDP 9 +#define FSE_AZ_WAKE_UP_EV 6 +#define FSE_AZ_SRM_UPD_DONE_EV 5 +#define FSE_AB_EVQ_NOT_EN_EV 3 +#define FSE_AZ_EVQ_INIT_DONE_EV 2 +#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1 +#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0 +#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0 +#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14 + +/* EVENT_ENTRY */ +#define FSF_AZ_EV_CODE_LBN 60 +#define FSF_AZ_EV_CODE_WIDTH 4 +#define FSE_CZ_EV_CODE_MCDI_EV 12 +#define FSE_CZ_EV_CODE_USER_EV 8 +#define FSE_AZ_EV_CODE_DRV_GEN_EV 7 +#define FSE_AZ_EV_CODE_GLOBAL_EV 6 +#define FSE_AZ_EV_CODE_DRIVER_EV 5 +#define FSE_AZ_EV_CODE_TX_EV 2 +#define FSE_AZ_EV_CODE_RX_EV 0 +#define FSF_AZ_EV_DATA_LBN 0 +#define FSF_AZ_EV_DATA_WIDTH 60 + +/* GLOBAL_EV */ +#define FSF_BB_GLB_EV_RX_RECOVERY_LBN 12 +#define FSF_BB_GLB_EV_RX_RECOVERY_WIDTH 1 +#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 11 +#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1 +#define FSF_BB_GLB_EV_XG_MGT_INTR_LBN 11 +#define FSF_BB_GLB_EV_XG_MGT_INTR_WIDTH 1 +#define FSF_AB_GLB_EV_XFP_PHY0_INTR_LBN 10 +#define FSF_AB_GLB_EV_XFP_PHY0_INTR_WIDTH 1 +#define FSF_AB_GLB_EV_XG_PHY0_INTR_LBN 9 +#define FSF_AB_GLB_EV_XG_PHY0_INTR_WIDTH 1 +#define FSF_AB_GLB_EV_G_PHY0_INTR_LBN 7 +#define FSF_AB_GLB_EV_G_PHY0_INTR_WIDTH 1 + +/* LEGACY_INT_VEC */ +#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64 +#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1 +#define FSF_AZ_NET_IVEC_INT_Q_LBN 40 +#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4 +#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32 +#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1 +#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1 +#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1 +#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0 +#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1 + +/* MC_XGMAC_FLTR_RULE_DEF */ +#define FSF_CZ_MC_XFRC_MODE_LBN 416 +#define FSF_CZ_MC_XFRC_MODE_WIDTH 1 +#define FSE_CZ_MC_XFRC_MODE_LAYERED 1 +#define FSE_CZ_MC_XFRC_MODE_SIMPLE 0 +#define FSF_CZ_MC_XFRC_HASH_LBN 384 +#define FSF_CZ_MC_XFRC_HASH_WIDTH 32 +#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256 +#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128 +#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128 +#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128 +#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0 +#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128 + +/* RX_EV */ +#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58 +#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1 +#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57 +#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1 +#define FSF_AZ_RX_EV_PKT_OK_LBN 56 +#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1 +#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55 +#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54 +#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53 +#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52 +#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51 +#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50 +#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49 +#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1 +#define FSF_AA_RX_EV_DRIB_NIB_LBN 49 +#define FSF_AA_RX_EV_DRIB_NIB_WIDTH 1 +#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47 +#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1 +#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44 +#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3 +#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5 +#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4 +#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3 +#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2 +#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1 +#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0 +#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42 +#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2 +#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3 +#define FSE_AB_RX_EV_HDR_TYPE_IPV4_OTHER 2 +#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2 +#define FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP 1 +#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1 +#define FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP 0 +#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0 +#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41 +#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1 +#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40 +#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1 +#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39 +#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1 +#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37 +#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1 +#define FSF_AZ_RX_EV_Q_LABEL_LBN 32 +#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5 +#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31 +#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1 +#define FSF_AZ_RX_EV_PORT_LBN 30 +#define FSF_AZ_RX_EV_PORT_WIDTH 1 +#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16 +#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14 +#define FSF_AZ_RX_EV_SOP_LBN 15 +#define FSF_AZ_RX_EV_SOP_WIDTH 1 +#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14 +#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1 +#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13 +#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12 +#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1 +#define FSF_AZ_RX_EV_DESC_PTR_LBN 0 +#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12 + +/* RX_KER_DESC */ +#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48 +#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14 +#define FSF_AZ_RX_KER_BUF_REGION_LBN 46 +#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2 +#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0 +#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46 + +/* RX_USER_DESC */ +#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20 +#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12 +#define FSF_AZ_RX_USER_BUF_ID_LBN 0 +#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20 + +/* TX_EV */ +#define FSF_AZ_TX_EV_PKT_ERR_LBN 38 +#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1 +#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37 +#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1 +#define FSF_AZ_TX_EV_Q_LABEL_LBN 32 +#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5 +#define FSF_AZ_TX_EV_PORT_LBN 16 +#define FSF_AZ_TX_EV_PORT_WIDTH 1 +#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15 +#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1 +#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14 +#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1 +#define FSF_AZ_TX_EV_COMP_LBN 12 +#define FSF_AZ_TX_EV_COMP_WIDTH 1 +#define FSF_AZ_TX_EV_DESC_PTR_LBN 0 +#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12 + +/* TX_KER_DESC */ +#define FSF_AZ_TX_KER_CONT_LBN 62 +#define FSF_AZ_TX_KER_CONT_WIDTH 1 +#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48 +#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14 +#define FSF_AZ_TX_KER_BUF_REGION_LBN 46 +#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2 +#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0 +#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46 + +/* TX_USER_DESC */ +#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48 +#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1 +#define FSF_AZ_TX_USER_CONT_LBN 46 +#define FSF_AZ_TX_USER_CONT_WIDTH 1 +#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33 +#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13 +#define FSF_AZ_TX_USER_BUF_ID_LBN 13 +#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20 +#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0 +#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13 + +/* USER_EV */ +#define FSF_CZ_USER_QID_LBN 32 +#define FSF_CZ_USER_QID_WIDTH 10 +#define FSF_CZ_USER_EV_REG_VALUE_LBN 0 +#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32 + +/************************************************************************** + * + * Falcon B0 PCIe core indirect registers + * + ************************************************************************** + */ + +#define FPCR_BB_PCIE_DEVICE_CTRL_STAT 0x68 + +#define FPCR_BB_PCIE_LINK_CTRL_STAT 0x70 + +#define FPCR_BB_ACK_RPL_TIMER 0x700 +#define FPCRF_BB_ACK_TL_LBN 0 +#define FPCRF_BB_ACK_TL_WIDTH 16 +#define FPCRF_BB_RPL_TL_LBN 16 +#define FPCRF_BB_RPL_TL_WIDTH 16 + +#define FPCR_BB_ACK_FREQ 0x70C +#define FPCRF_BB_ACK_FREQ_LBN 0 +#define FPCRF_BB_ACK_FREQ_WIDTH 7 + +/************************************************************************** + * + * Pseudo-registers and fields + * + ************************************************************************** + */ + +/* Interrupt acknowledge work-around register (A0/A1 only) */ +#define FR_AA_WORK_AROUND_BROKEN_PCI_READS 0x0070 + +/* EE_SPI_HCMD_REG: SPI host command register */ +/* Values for the EE_SPI_HCMD_SF_SEL register field */ +#define FFE_AB_SPI_DEVICE_EEPROM 0 +#define FFE_AB_SPI_DEVICE_FLASH 1 + +/* NIC_STAT_REG: NIC status register */ +#define FRF_AB_STRAP_10G_LBN 2 +#define FRF_AB_STRAP_10G_WIDTH 1 +#define FRF_AA_STRAP_PCIE_LBN 0 +#define FRF_AA_STRAP_PCIE_WIDTH 1 + +/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */ +#define FRF_AZ_FATAL_INTR_LBN 0 +#define FRF_AZ_FATAL_INTR_WIDTH 12 + +/* SRM_CFG_REG: SRAM configuration register */ +/* We treat the number of SRAM banks and bank size as a single field */ +#define FRF_AZ_SRM_NB_SZ_LBN FRF_AZ_SRM_BANK_SIZE_LBN +#define FRF_AZ_SRM_NB_SZ_WIDTH \ + (FRF_AZ_SRM_BANK_SIZE_WIDTH + FRF_AZ_SRM_NUM_BANK_WIDTH) +#define FFE_AB_SRM_NB1_SZ2M 0 +#define FFE_AB_SRM_NB1_SZ4M 1 +#define FFE_AB_SRM_NB1_SZ8M 2 +#define FFE_AB_SRM_NB_SZ_DEF 3 +#define FFE_AB_SRM_NB2_SZ4M 4 +#define FFE_AB_SRM_NB2_SZ8M 5 +#define FFE_AB_SRM_NB2_SZ16M 6 +#define FFE_AB_SRM_NB_SZ_RES 7 + +/* RX_DESC_UPD_REGP0: Receive descriptor update register. */ +/* We write just the last dword of these registers */ +#define FR_AZ_RX_DESC_UPD_DWORD_P0 \ + (BUILD_BUG_ON_ZERO(FR_AA_RX_DESC_UPD_KER != FR_BZ_RX_DESC_UPD_P0) + \ + FR_BZ_RX_DESC_UPD_P0 + 3 * 4) +#define FRF_AZ_RX_DESC_WPTR_DWORD_LBN (FRF_AZ_RX_DESC_WPTR_LBN - 3 * 32) +#define FRF_AZ_RX_DESC_WPTR_DWORD_WIDTH FRF_AZ_RX_DESC_WPTR_WIDTH + +/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */ +#define FR_AZ_TX_DESC_UPD_DWORD_P0 \ + (BUILD_BUG_ON_ZERO(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0) + \ + FR_BZ_TX_DESC_UPD_P0 + 3 * 4) +#define FRF_AZ_TX_DESC_WPTR_DWORD_LBN (FRF_AZ_TX_DESC_WPTR_LBN - 3 * 32) +#define FRF_AZ_TX_DESC_WPTR_DWORD_WIDTH FRF_AZ_TX_DESC_WPTR_WIDTH + +/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */ +#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_LBN 12 +#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_WIDTH 1 + +/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */ +#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_LBN 12 +#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1 + +/* XM_TX_PARAM_REG: XGMAC transmit parameter register */ +#define FRF_AB_XM_MAX_TX_FRM_SIZE_LBN FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN +#define FRF_AB_XM_MAX_TX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH + \ + FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH) + +/* XM_RX_PARAM_REG: XGMAC receive parameter register */ +#define FRF_AB_XM_MAX_RX_FRM_SIZE_LBN FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN +#define FRF_AB_XM_MAX_RX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH + \ + FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH) + +/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */ +/* Default values */ +#define FFE_AB_XX_TXDRV_DEQ_DEF 0xe /* deq=.6 */ +#define FFE_AB_XX_TXDRV_DTX_DEF 0x5 /* 1.25 */ +#define FFE_AB_XX_SD_CTL_DRV_DEF 0 /* 20mA */ + +/* XX_CORE_STAT_REG: XAUI XGXS core status register */ +/* XGXS all-lanes status fields */ +#define FRF_AB_XX_SYNC_STAT_LBN FRF_AB_XX_SYNC_STAT0_LBN +#define FRF_AB_XX_SYNC_STAT_WIDTH 4 +#define FRF_AB_XX_COMMA_DET_LBN FRF_AB_XX_COMMA_DET_CH0_LBN +#define FRF_AB_XX_COMMA_DET_WIDTH 4 +#define FRF_AB_XX_CHAR_ERR_LBN FRF_AB_XX_CHAR_ERR_CH0_LBN +#define FRF_AB_XX_CHAR_ERR_WIDTH 4 +#define FRF_AB_XX_DISPERR_LBN FRF_AB_XX_DISPERR_CH0_LBN +#define FRF_AB_XX_DISPERR_WIDTH 4 +#define FFE_AB_XX_STAT_ALL_LANES 0xf +#define FRF_AB_XX_FORCE_SIG_LBN FRF_AB_XX_FORCE_SIG0_VAL_LBN +#define FRF_AB_XX_FORCE_SIG_WIDTH 8 +#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff + +/* RX_MAC_FILTER_TBL0 */ +/* RMFT_DEST_MAC is wider than 32 bits */ +#define FRF_CZ_RMFT_DEST_MAC_LO_LBN FRF_CZ_RMFT_DEST_MAC_LBN +#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32 +#define FRF_CZ_RMFT_DEST_MAC_HI_LBN (FRF_CZ_RMFT_DEST_MAC_LBN + 32) +#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH (FRF_CZ_RMFT_DEST_MAC_WIDTH - 32) + +/* TX_MAC_FILTER_TBL0 */ +/* TMFT_SRC_MAC is wider than 32 bits */ +#define FRF_CZ_TMFT_SRC_MAC_LO_LBN FRF_CZ_TMFT_SRC_MAC_LBN +#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32 +#define FRF_CZ_TMFT_SRC_MAC_HI_LBN (FRF_CZ_TMFT_SRC_MAC_LBN + 32) +#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH (FRF_CZ_TMFT_SRC_MAC_WIDTH - 32) + +/* TX_PACE_TBL */ +/* Values >20 are documented as reserved, but will result in a queue going + * into the fast bin with a pace value of zero. */ +#define FFE_BZ_TX_PACE_OFF 0 +#define FFE_BZ_TX_PACE_RESERVED 21 + +/* DRIVER_EV */ +/* Sub-fields of an RX flush completion event */ +#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 +#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1 +#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0 +#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12 + +/* EVENT_ENTRY */ +/* Magic number field for event test */ +#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0 +#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32 + +/* RX packet prefix */ +#define FS_BZ_RX_PREFIX_HASH_OFST 12 +#define FS_BZ_RX_PREFIX_SIZE 16 + +#endif /* EFX_FARCH_REGS_H */ diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h new file mode 100644 index 000000000..d0ed7f71e --- /dev/null +++ b/drivers/net/ethernet/sfc/filter.h @@ -0,0 +1,272 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_FILTER_H +#define EFX_FILTER_H + +#include +#include +#include + +/** + * enum efx_filter_match_flags - Flags for hardware filter match type + * @EFX_FILTER_MATCH_REM_HOST: Match by remote IP host address + * @EFX_FILTER_MATCH_LOC_HOST: Match by local IP host address + * @EFX_FILTER_MATCH_REM_MAC: Match by remote MAC address + * @EFX_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port + * @EFX_FILTER_MATCH_LOC_MAC: Match by local MAC address + * @EFX_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port + * @EFX_FILTER_MATCH_ETHER_TYPE: Match by Ether-type + * @EFX_FILTER_MATCH_INNER_VID: Match by inner VLAN ID + * @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID + * @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol + * @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit. + * Used for RX default unicast and multicast/broadcast filters. + * + * Only some combinations are supported, depending on NIC type: + * + * - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or + * local 2-tuple (only implemented for Falcon B0) + * + * - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple + * or local 2-tuple, or local MAC with or without outer VID, and RX + * default filters + * + * - Huntington supports filter matching controlled by firmware, potentially + * using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit, + * with or without outer and inner VID + */ +enum efx_filter_match_flags { + EFX_FILTER_MATCH_REM_HOST = 0x0001, + EFX_FILTER_MATCH_LOC_HOST = 0x0002, + EFX_FILTER_MATCH_REM_MAC = 0x0004, + EFX_FILTER_MATCH_REM_PORT = 0x0008, + EFX_FILTER_MATCH_LOC_MAC = 0x0010, + EFX_FILTER_MATCH_LOC_PORT = 0x0020, + EFX_FILTER_MATCH_ETHER_TYPE = 0x0040, + EFX_FILTER_MATCH_INNER_VID = 0x0080, + EFX_FILTER_MATCH_OUTER_VID = 0x0100, + EFX_FILTER_MATCH_IP_PROTO = 0x0200, + EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400, +}; + +/** + * enum efx_filter_priority - priority of a hardware filter specification + * @EFX_FILTER_PRI_HINT: Performance hint + * @EFX_FILTER_PRI_AUTO: Automatic filter based on device address list + * or hardware requirements. This may only be used by the filter + * implementation for each NIC type. + * @EFX_FILTER_PRI_MANUAL: Manually configured filter + * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level + * networking and SR-IOV) + */ +enum efx_filter_priority { + EFX_FILTER_PRI_HINT = 0, + EFX_FILTER_PRI_AUTO, + EFX_FILTER_PRI_MANUAL, + EFX_FILTER_PRI_REQUIRED, +}; + +/** + * enum efx_filter_flags - flags for hardware filter specifications + * @EFX_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues. + * By default, matching packets will be delivered only to the + * specified queue. If this flag is set, they will be delivered + * to a range of queues offset from the specified queue number + * according to the indirection table. + * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving + * queue. + * @EFX_FILTER_FLAG_RX_OVER_AUTO: Indicates a filter that is + * overriding an automatic filter (priority + * %EFX_FILTER_PRI_AUTO). This may only be set by the filter + * implementation for each type. A removal request will restore + * the automatic filter in its place. + * @EFX_FILTER_FLAG_RX: Filter is for RX + * @EFX_FILTER_FLAG_TX: Filter is for TX + */ +enum efx_filter_flags { + EFX_FILTER_FLAG_RX_RSS = 0x01, + EFX_FILTER_FLAG_RX_SCATTER = 0x02, + EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04, + EFX_FILTER_FLAG_RX = 0x08, + EFX_FILTER_FLAG_TX = 0x10, +}; + +/** + * struct efx_filter_spec - specification for a hardware filter + * @match_flags: Match type flags, from &enum efx_filter_match_flags + * @priority: Priority of the filter, from &enum efx_filter_priority + * @flags: Miscellaneous flags, from &enum efx_filter_flags + * @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set + * @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for + * an RX drop filter + * @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set + * @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set + * @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or + * %EFX_FILTER_MATCH_LOC_MAC_IG is set + * @rem_mac: Remote MAC address to match, if %EFX_FILTER_MATCH_REM_MAC is set + * @ether_type: Ether-type to match, if %EFX_FILTER_MATCH_ETHER_TYPE is set + * @ip_proto: IP transport protocol to match, if %EFX_FILTER_MATCH_IP_PROTO + * is set + * @loc_host: Local IP host to match, if %EFX_FILTER_MATCH_LOC_HOST is set + * @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set + * @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set + * @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set + * + * The efx_filter_init_rx() or efx_filter_init_tx() function *must* be + * used to initialise the structure. The efx_filter_set_*() functions + * may then be used to set @rss_context, @match_flags and related + * fields. + * + * The @priority field is used by software to determine whether a new + * filter may replace an old one. The hardware priority of a filter + * depends on which fields are matched. + */ +struct efx_filter_spec { + u32 match_flags:12; + u32 priority:2; + u32 flags:6; + u32 dmaq_id:12; + u32 rss_context; + __be16 outer_vid __aligned(4); /* allow jhash2() of match values */ + __be16 inner_vid; + u8 loc_mac[ETH_ALEN]; + u8 rem_mac[ETH_ALEN]; + __be16 ether_type; + u8 ip_proto; + __be32 loc_host[4]; + __be32 rem_host[4]; + __be16 loc_port; + __be16 rem_port; + /* total 64 bytes */ +}; + +enum { + EFX_FILTER_RSS_CONTEXT_DEFAULT = 0xffffffff, + EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff +}; + +static inline void efx_filter_init_rx(struct efx_filter_spec *spec, + enum efx_filter_priority priority, + enum efx_filter_flags flags, + unsigned rxq_id) +{ + memset(spec, 0, sizeof(*spec)); + spec->priority = priority; + spec->flags = EFX_FILTER_FLAG_RX | flags; + spec->rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; + spec->dmaq_id = rxq_id; +} + +static inline void efx_filter_init_tx(struct efx_filter_spec *spec, + unsigned txq_id) +{ + memset(spec, 0, sizeof(*spec)); + spec->priority = EFX_FILTER_PRI_REQUIRED; + spec->flags = EFX_FILTER_FLAG_TX; + spec->dmaq_id = txq_id; +} + +/** + * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port + * @spec: Specification to initialise + * @proto: Transport layer protocol number + * @host: Local host address (network byte order) + * @port: Local port (network byte order) + */ +static inline int +efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, + __be32 host, __be16 port) +{ + spec->match_flags |= + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; + spec->ether_type = htons(ETH_P_IP); + spec->ip_proto = proto; + spec->loc_host[0] = host; + spec->loc_port = port; + return 0; +} + +/** + * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports + * @spec: Specification to initialise + * @proto: Transport layer protocol number + * @lhost: Local host address (network byte order) + * @lport: Local port (network byte order) + * @rhost: Remote host address (network byte order) + * @rport: Remote port (network byte order) + */ +static inline int +efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto, + __be32 lhost, __be16 lport, + __be32 rhost, __be16 rport) +{ + spec->match_flags |= + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | + EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; + spec->ether_type = htons(ETH_P_IP); + spec->ip_proto = proto; + spec->loc_host[0] = lhost; + spec->loc_port = lport; + spec->rem_host[0] = rhost; + spec->rem_port = rport; + return 0; +} + +enum { + EFX_FILTER_VID_UNSPEC = 0xffff, +}; + +/** + * efx_filter_set_eth_local - specify local Ethernet address and/or VID + * @spec: Specification to initialise + * @vid: Outer VLAN ID to match, or %EFX_FILTER_VID_UNSPEC + * @addr: Local Ethernet MAC address, or %NULL + */ +static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec, + u16 vid, const u8 *addr) +{ + if (vid == EFX_FILTER_VID_UNSPEC && addr == NULL) + return -EINVAL; + + if (vid != EFX_FILTER_VID_UNSPEC) { + spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID; + spec->outer_vid = htons(vid); + } + if (addr != NULL) { + spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC; + ether_addr_copy(spec->loc_mac, addr); + } + return 0; +} + +/** + * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast + * @spec: Specification to initialise + */ +static inline int efx_filter_set_uc_def(struct efx_filter_spec *spec) +{ + spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; + return 0; +} + +/** + * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast + * @spec: Specification to initialise + */ +static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec) +{ + spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; + spec->loc_mac[0] = 1; + return 0; +} + +#endif /* EFX_FILTER_H */ diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h new file mode 100644 index 000000000..afb94aa2c --- /dev/null +++ b/drivers/net/ethernet/sfc/io.h @@ -0,0 +1,302 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_IO_H +#define EFX_IO_H + +#include +#include + +/************************************************************************** + * + * NIC register I/O + * + ************************************************************************** + * + * Notes on locking strategy for the Falcon architecture: + * + * Many CSRs are very wide and cannot be read or written atomically. + * Writes from the host are buffered by the Bus Interface Unit (BIU) + * up to 128 bits. Whenever the host writes part of such a register, + * the BIU collects the written value and does not write to the + * underlying register until all 4 dwords have been written. A + * similar buffering scheme applies to host access to the NIC's 64-bit + * SRAM. + * + * Writes to different CSRs and 64-bit SRAM words must be serialised, + * since interleaved access can result in lost writes. We use + * efx_nic::biu_lock for this. + * + * We also serialise reads from 128-bit CSRs and SRAM with the same + * spinlock. This may not be necessary, but it doesn't really matter + * as there are no such reads on the fast path. + * + * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are + * 128-bit but are special-cased in the BIU to avoid the need for + * locking in the host: + * + * - They are write-only. + * - The semantics of writing to these registers are such that + * replacing the low 96 bits with zero does not affect functionality. + * - If the host writes to the last dword address of such a register + * (i.e. the high 32 bits) the underlying register will always be + * written. If the collector and the current write together do not + * provide values for all 128 bits of the register, the low 96 bits + * will be written as zero. + * - If the host writes to the address of any other part of such a + * register while the collector already holds values for some other + * register, the write is discarded and the collector maintains its + * current state. + * + * The EF10 architecture exposes very few registers to the host and + * most of them are only 32 bits wide. The only exceptions are the MC + * doorbell register pair, which has its own latching, and + * TX_DESC_UPD, which works in a similar way to the Falcon + * architecture. + */ + +#if BITS_PER_LONG == 64 +#define EFX_USE_QWORD_IO 1 +#endif + +/* Hardware issue requires that only 64-bit naturally aligned writes + * are seen by hardware. Its not strictly necessary to restrict to + * x86_64 arch, but done for safety since unusual write combining behaviour + * can break PIO. + */ +#ifdef CONFIG_X86_64 +/* PIO is a win only if write-combining is possible */ +#ifdef ARCH_HAS_IOREMAP_WC +#define EFX_USE_PIO 1 +#endif +#endif + +#ifdef EFX_USE_QWORD_IO +static inline void _efx_writeq(struct efx_nic *efx, __le64 value, + unsigned int reg) +{ + __raw_writeq((__force u64)value, efx->membase + reg); +} +static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg) +{ + return (__force __le64)__raw_readq(efx->membase + reg); +} +#endif + +static inline void _efx_writed(struct efx_nic *efx, __le32 value, + unsigned int reg) +{ + __raw_writel((__force u32)value, efx->membase + reg); +} +static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg) +{ + return (__force __le32)__raw_readl(efx->membase + reg); +} + +/* Write a normal 128-bit CSR, locking as appropriate. */ +static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value, + unsigned int reg) +{ + unsigned long flags __attribute__ ((unused)); + + netif_vdbg(efx, hw, efx->net_dev, + "writing register %x with " EFX_OWORD_FMT "\n", reg, + EFX_OWORD_VAL(*value)); + + spin_lock_irqsave(&efx->biu_lock, flags); +#ifdef EFX_USE_QWORD_IO + _efx_writeq(efx, value->u64[0], reg + 0); + _efx_writeq(efx, value->u64[1], reg + 8); +#else + _efx_writed(efx, value->u32[0], reg + 0); + _efx_writed(efx, value->u32[1], reg + 4); + _efx_writed(efx, value->u32[2], reg + 8); + _efx_writed(efx, value->u32[3], reg + 12); +#endif + mmiowb(); + spin_unlock_irqrestore(&efx->biu_lock, flags); +} + +/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */ +static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, + const efx_qword_t *value, unsigned int index) +{ + unsigned int addr = index * sizeof(*value); + unsigned long flags __attribute__ ((unused)); + + netif_vdbg(efx, hw, efx->net_dev, + "writing SRAM address %x with " EFX_QWORD_FMT "\n", + addr, EFX_QWORD_VAL(*value)); + + spin_lock_irqsave(&efx->biu_lock, flags); +#ifdef EFX_USE_QWORD_IO + __raw_writeq((__force u64)value->u64[0], membase + addr); +#else + __raw_writel((__force u32)value->u32[0], membase + addr); + __raw_writel((__force u32)value->u32[1], membase + addr + 4); +#endif + mmiowb(); + spin_unlock_irqrestore(&efx->biu_lock, flags); +} + +/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */ +static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value, + unsigned int reg) +{ + netif_vdbg(efx, hw, efx->net_dev, + "writing register %x with "EFX_DWORD_FMT"\n", + reg, EFX_DWORD_VAL(*value)); + + /* No lock required */ + _efx_writed(efx, value->u32[0], reg); +} + +/* Read a 128-bit CSR, locking as appropriate. */ +static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, + unsigned int reg) +{ + unsigned long flags __attribute__ ((unused)); + + spin_lock_irqsave(&efx->biu_lock, flags); + value->u32[0] = _efx_readd(efx, reg + 0); + value->u32[1] = _efx_readd(efx, reg + 4); + value->u32[2] = _efx_readd(efx, reg + 8); + value->u32[3] = _efx_readd(efx, reg + 12); + spin_unlock_irqrestore(&efx->biu_lock, flags); + + netif_vdbg(efx, hw, efx->net_dev, + "read from register %x, got " EFX_OWORD_FMT "\n", reg, + EFX_OWORD_VAL(*value)); +} + +/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */ +static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, + efx_qword_t *value, unsigned int index) +{ + unsigned int addr = index * sizeof(*value); + unsigned long flags __attribute__ ((unused)); + + spin_lock_irqsave(&efx->biu_lock, flags); +#ifdef EFX_USE_QWORD_IO + value->u64[0] = (__force __le64)__raw_readq(membase + addr); +#else + value->u32[0] = (__force __le32)__raw_readl(membase + addr); + value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); +#endif + spin_unlock_irqrestore(&efx->biu_lock, flags); + + netif_vdbg(efx, hw, efx->net_dev, + "read from SRAM address %x, got "EFX_QWORD_FMT"\n", + addr, EFX_QWORD_VAL(*value)); +} + +/* Read a 32-bit CSR or SRAM */ +static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value, + unsigned int reg) +{ + value->u32[0] = _efx_readd(efx, reg); + netif_vdbg(efx, hw, efx->net_dev, + "read from register %x, got "EFX_DWORD_FMT"\n", + reg, EFX_DWORD_VAL(*value)); +} + +/* Write a 128-bit CSR forming part of a table */ +static inline void +efx_writeo_table(struct efx_nic *efx, const efx_oword_t *value, + unsigned int reg, unsigned int index) +{ + efx_writeo(efx, value, reg + index * sizeof(efx_oword_t)); +} + +/* Read a 128-bit CSR forming part of a table */ +static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value, + unsigned int reg, unsigned int index) +{ + efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); +} + +/* Page size used as step between per-VI registers */ +#define EFX_VI_PAGE_SIZE 0x2000 + +/* Calculate offset to page-mapped register */ +#define EFX_PAGED_REG(page, reg) \ + ((page) * EFX_VI_PAGE_SIZE + (reg)) + +/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */ +static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, + unsigned int reg, unsigned int page) +{ + reg = EFX_PAGED_REG(page, reg); + + netif_vdbg(efx, hw, efx->net_dev, + "writing register %x with " EFX_OWORD_FMT "\n", reg, + EFX_OWORD_VAL(*value)); + +#ifdef EFX_USE_QWORD_IO + _efx_writeq(efx, value->u64[0], reg + 0); + _efx_writeq(efx, value->u64[1], reg + 8); +#else + _efx_writed(efx, value->u32[0], reg + 0); + _efx_writed(efx, value->u32[1], reg + 4); + _efx_writed(efx, value->u32[2], reg + 8); + _efx_writed(efx, value->u32[3], reg + 12); +#endif +} +#define efx_writeo_page(efx, value, reg, page) \ + _efx_writeo_page(efx, value, \ + reg + \ + BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \ + page) + +/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the + * high bits of RX_DESC_UPD or TX_DESC_UPD) + */ +static inline void +_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value, + unsigned int reg, unsigned int page) +{ + efx_writed(efx, value, EFX_PAGED_REG(page, reg)); +} +#define efx_writed_page(efx, value, reg, page) \ + _efx_writed_page(efx, value, \ + reg + \ + BUILD_BUG_ON_ZERO((reg) != 0x400 && \ + (reg) != 0x420 && \ + (reg) != 0x830 && \ + (reg) != 0x83c && \ + (reg) != 0xa18 && \ + (reg) != 0xa1c), \ + page) + +/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug + * in the BIU means that writes to TIMER_COMMAND[0] invalidate the + * collector register. + */ +static inline void _efx_writed_page_locked(struct efx_nic *efx, + const efx_dword_t *value, + unsigned int reg, + unsigned int page) +{ + unsigned long flags __attribute__ ((unused)); + + if (page == 0) { + spin_lock_irqsave(&efx->biu_lock, flags); + efx_writed(efx, value, EFX_PAGED_REG(page, reg)); + spin_unlock_irqrestore(&efx->biu_lock, flags); + } else { + efx_writed(efx, value, EFX_PAGED_REG(page, reg)); + } +} +#define efx_writed_page_locked(efx, value, reg, page) \ + _efx_writed_page_locked(efx, value, \ + reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \ + page) + +#endif /* EFX_IO_H */ diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c new file mode 100644 index 000000000..d37928f01 --- /dev/null +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -0,0 +1,1891 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2008-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include "net_driver.h" +#include "nic.h" +#include "io.h" +#include "farch_regs.h" +#include "mcdi_pcol.h" +#include "phy.h" + +/************************************************************************** + * + * Management-Controller-to-Driver Interface + * + ************************************************************************** + */ + +#define MCDI_RPC_TIMEOUT (10 * HZ) + +/* A reboot/assertion causes the MCDI status word to be set after the + * command word is set or a REBOOT event is sent. If we notice a reboot + * via these mechanisms then wait 250ms for the status word to be set. + */ +#define MCDI_STATUS_DELAY_US 100 +#define MCDI_STATUS_DELAY_COUNT 2500 +#define MCDI_STATUS_SLEEP_MS \ + (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) + +#define SEQ_MASK \ + EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) + +struct efx_mcdi_async_param { + struct list_head list; + unsigned int cmd; + size_t inlen; + size_t outlen; + bool quiet; + efx_mcdi_async_completer *complete; + unsigned long cookie; + /* followed by request/response buffer */ +}; + +static void efx_mcdi_timeout_async(unsigned long context); +static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, + bool *was_attached_out); +static bool efx_mcdi_poll_once(struct efx_nic *efx); +static void efx_mcdi_abandon(struct efx_nic *efx); + +int efx_mcdi_init(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi; + bool already_attached; + int rc; + + efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL); + if (!efx->mcdi) + return -ENOMEM; + + mcdi = efx_mcdi(efx); + mcdi->efx = efx; + init_waitqueue_head(&mcdi->wq); + spin_lock_init(&mcdi->iface_lock); + mcdi->state = MCDI_STATE_QUIESCENT; + mcdi->mode = MCDI_MODE_POLL; + spin_lock_init(&mcdi->async_lock); + INIT_LIST_HEAD(&mcdi->async_list); + setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async, + (unsigned long)mcdi); + + (void) efx_mcdi_poll_reboot(efx); + mcdi->new_epoch = true; + + /* Recover from a failed assertion before probing */ + rc = efx_mcdi_handle_assertion(efx); + if (rc) + return rc; + + /* Let the MC (and BMC, if this is a LOM) know that the driver + * is loaded. We should do this before we reset the NIC. + */ + rc = efx_mcdi_drv_attach(efx, true, &already_attached); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "Unable to register driver with MCPU\n"); + return rc; + } + if (already_attached) + /* Not a fatal error */ + netif_err(efx, probe, efx->net_dev, + "Host already registered with MCPU\n"); + + if (efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) + efx->primary = efx; + + return 0; +} + +void efx_mcdi_fini(struct efx_nic *efx) +{ + if (!efx->mcdi) + return; + + BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT); + + /* Relinquish the device (back to the BMC, if this is a LOM) */ + efx_mcdi_drv_attach(efx, false, NULL); + + kfree(efx->mcdi); +} + +static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + efx_dword_t hdr[2]; + size_t hdr_len; + u32 xflags, seqno; + + BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT); + + /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ + spin_lock_bh(&mcdi->iface_lock); + ++mcdi->seqno; + spin_unlock_bh(&mcdi->iface_lock); + + seqno = mcdi->seqno & SEQ_MASK; + xflags = 0; + if (mcdi->mode == MCDI_MODE_EVENTS) + xflags |= MCDI_HEADER_XFLAGS_EVREQ; + + if (efx->type->mcdi_max_ver == 1) { + /* MCDI v1 */ + EFX_POPULATE_DWORD_7(hdr[0], + MCDI_HEADER_RESPONSE, 0, + MCDI_HEADER_RESYNC, 1, + MCDI_HEADER_CODE, cmd, + MCDI_HEADER_DATALEN, inlen, + MCDI_HEADER_SEQ, seqno, + MCDI_HEADER_XFLAGS, xflags, + MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch); + hdr_len = 4; + } else { + /* MCDI v2 */ + BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2); + EFX_POPULATE_DWORD_7(hdr[0], + MCDI_HEADER_RESPONSE, 0, + MCDI_HEADER_RESYNC, 1, + MCDI_HEADER_CODE, MC_CMD_V2_EXTN, + MCDI_HEADER_DATALEN, 0, + MCDI_HEADER_SEQ, seqno, + MCDI_HEADER_XFLAGS, xflags, + MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch); + EFX_POPULATE_DWORD_2(hdr[1], + MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd, + MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen); + hdr_len = 8; + } + + efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen); + + mcdi->new_epoch = false; +} + +static int efx_mcdi_errno(unsigned int mcdi_err) +{ + switch (mcdi_err) { + case 0: + return 0; +#define TRANSLATE_ERROR(name) \ + case MC_CMD_ERR_ ## name: \ + return -name; + TRANSLATE_ERROR(EPERM); + TRANSLATE_ERROR(ENOENT); + TRANSLATE_ERROR(EINTR); + TRANSLATE_ERROR(EAGAIN); + TRANSLATE_ERROR(EACCES); + TRANSLATE_ERROR(EBUSY); + TRANSLATE_ERROR(EINVAL); + TRANSLATE_ERROR(EDEADLK); + TRANSLATE_ERROR(ENOSYS); + TRANSLATE_ERROR(ETIME); + TRANSLATE_ERROR(EALREADY); + TRANSLATE_ERROR(ENOSPC); +#undef TRANSLATE_ERROR + case MC_CMD_ERR_ENOTSUP: + return -EOPNOTSUPP; + case MC_CMD_ERR_ALLOC_FAIL: + return -ENOBUFS; + case MC_CMD_ERR_MAC_EXIST: + return -EADDRINUSE; + default: + return -EPROTO; + } +} + +static void efx_mcdi_read_response_header(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + unsigned int respseq, respcmd, error; + efx_dword_t hdr; + + efx->type->mcdi_read_response(efx, &hdr, 0, 4); + respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ); + respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE); + error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR); + + if (respcmd != MC_CMD_V2_EXTN) { + mcdi->resp_hdr_len = 4; + mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN); + } else { + efx->type->mcdi_read_response(efx, &hdr, 4, 4); + mcdi->resp_hdr_len = 8; + mcdi->resp_data_len = + EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN); + } + + if (error && mcdi->resp_data_len == 0) { + netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); + mcdi->resprc = -EIO; + } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { + netif_err(efx, hw, efx->net_dev, + "MC response mismatch tx seq 0x%x rx seq 0x%x\n", + respseq, mcdi->seqno); + mcdi->resprc = -EIO; + } else if (error) { + efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4); + mcdi->resprc = + efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0)); + } else { + mcdi->resprc = 0; + } +} + +static bool efx_mcdi_poll_once(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + rmb(); + if (!efx->type->mcdi_poll_response(efx)) + return false; + + spin_lock_bh(&mcdi->iface_lock); + efx_mcdi_read_response_header(efx); + spin_unlock_bh(&mcdi->iface_lock); + + return true; +} + +static int efx_mcdi_poll(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + unsigned long time, finish; + unsigned int spins; + int rc; + + /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ + rc = efx_mcdi_poll_reboot(efx); + if (rc) { + spin_lock_bh(&mcdi->iface_lock); + mcdi->resprc = rc; + mcdi->resp_hdr_len = 0; + mcdi->resp_data_len = 0; + spin_unlock_bh(&mcdi->iface_lock); + return 0; + } + + /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, + * because generally mcdi responses are fast. After that, back off + * and poll once a jiffy (approximately) + */ + spins = TICK_USEC; + finish = jiffies + MCDI_RPC_TIMEOUT; + + while (1) { + if (spins != 0) { + --spins; + udelay(1); + } else { + schedule_timeout_uninterruptible(1); + } + + time = jiffies; + + if (efx_mcdi_poll_once(efx)) + break; + + if (time_after(time, finish)) + return -ETIMEDOUT; + } + + /* Return rc=0 like wait_event_timeout() */ + return 0; +} + +/* Test and clear MC-rebooted flag for this port/function; reset + * software state as necessary. + */ +int efx_mcdi_poll_reboot(struct efx_nic *efx) +{ + if (!efx->mcdi) + return 0; + + return efx->type->mcdi_poll_reboot(efx); +} + +static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi) +{ + return cmpxchg(&mcdi->state, + MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) == + MCDI_STATE_QUIESCENT; +} + +static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi) +{ + /* Wait until the interface becomes QUIESCENT and we win the race + * to mark it RUNNING_SYNC. + */ + wait_event(mcdi->wq, + cmpxchg(&mcdi->state, + MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) == + MCDI_STATE_QUIESCENT); +} + +static int efx_mcdi_await_completion(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED, + MCDI_RPC_TIMEOUT) == 0) + return -ETIMEDOUT; + + /* Check if efx_mcdi_set_mode() switched us back to polled completions. + * In which case, poll for completions directly. If efx_mcdi_ev_cpl() + * completed the request first, then we'll just end up completing the + * request again, which is safe. + * + * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which + * wait_event_timeout() implicitly provides. + */ + if (mcdi->mode == MCDI_MODE_POLL) + return efx_mcdi_poll(efx); + + return 0; +} + +/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the + * requester. Return whether this was done. Does not take any locks. + */ +static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi) +{ + if (cmpxchg(&mcdi->state, + MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) == + MCDI_STATE_RUNNING_SYNC) { + wake_up(&mcdi->wq); + return true; + } + + return false; +} + +static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) +{ + if (mcdi->mode == MCDI_MODE_EVENTS) { + struct efx_mcdi_async_param *async; + struct efx_nic *efx = mcdi->efx; + + /* Process the asynchronous request queue */ + spin_lock_bh(&mcdi->async_lock); + async = list_first_entry_or_null( + &mcdi->async_list, struct efx_mcdi_async_param, list); + if (async) { + mcdi->state = MCDI_STATE_RUNNING_ASYNC; + efx_mcdi_send_request(efx, async->cmd, + (const efx_dword_t *)(async + 1), + async->inlen); + mod_timer(&mcdi->async_timer, + jiffies + MCDI_RPC_TIMEOUT); + } + spin_unlock_bh(&mcdi->async_lock); + + if (async) + return; + } + + mcdi->state = MCDI_STATE_QUIESCENT; + wake_up(&mcdi->wq); +} + +/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the + * asynchronous completion function, and release the interface. + * Return whether this was done. Must be called in bh-disabled + * context. Will take iface_lock and async_lock. + */ +static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout) +{ + struct efx_nic *efx = mcdi->efx; + struct efx_mcdi_async_param *async; + size_t hdr_len, data_len, err_len; + efx_dword_t *outbuf; + MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0); + int rc; + + if (cmpxchg(&mcdi->state, + MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) != + MCDI_STATE_RUNNING_ASYNC) + return false; + + spin_lock(&mcdi->iface_lock); + if (timeout) { + /* Ensure that if the completion event arrives later, + * the seqno check in efx_mcdi_ev_cpl() will fail + */ + ++mcdi->seqno; + ++mcdi->credits; + rc = -ETIMEDOUT; + hdr_len = 0; + data_len = 0; + } else { + rc = mcdi->resprc; + hdr_len = mcdi->resp_hdr_len; + data_len = mcdi->resp_data_len; + } + spin_unlock(&mcdi->iface_lock); + + /* Stop the timer. In case the timer function is running, we + * must wait for it to return so that there is no possibility + * of it aborting the next request. + */ + if (!timeout) + del_timer_sync(&mcdi->async_timer); + + spin_lock(&mcdi->async_lock); + async = list_first_entry(&mcdi->async_list, + struct efx_mcdi_async_param, list); + list_del(&async->list); + spin_unlock(&mcdi->async_lock); + + outbuf = (efx_dword_t *)(async + 1); + efx->type->mcdi_read_response(efx, outbuf, hdr_len, + min(async->outlen, data_len)); + if (!timeout && rc && !async->quiet) { + err_len = min(sizeof(errbuf), data_len); + efx->type->mcdi_read_response(efx, errbuf, hdr_len, + sizeof(errbuf)); + efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf, + err_len, rc); + } + async->complete(efx, async->cookie, rc, outbuf, data_len); + kfree(async); + + efx_mcdi_release(mcdi); + + return true; +} + +static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, + unsigned int datalen, unsigned int mcdi_err) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + bool wake = false; + + spin_lock(&mcdi->iface_lock); + + if ((seqno ^ mcdi->seqno) & SEQ_MASK) { + if (mcdi->credits) + /* The request has been cancelled */ + --mcdi->credits; + else + netif_err(efx, hw, efx->net_dev, + "MC response mismatch tx seq 0x%x rx " + "seq 0x%x\n", seqno, mcdi->seqno); + } else { + if (efx->type->mcdi_max_ver >= 2) { + /* MCDI v2 responses don't fit in an event */ + efx_mcdi_read_response_header(efx); + } else { + mcdi->resprc = efx_mcdi_errno(mcdi_err); + mcdi->resp_hdr_len = 4; + mcdi->resp_data_len = datalen; + } + + wake = true; + } + + spin_unlock(&mcdi->iface_lock); + + if (wake) { + if (!efx_mcdi_complete_async(mcdi, false)) + (void) efx_mcdi_complete_sync(mcdi); + + /* If the interface isn't RUNNING_ASYNC or + * RUNNING_SYNC then we've received a duplicate + * completion after we've already transitioned back to + * QUIESCENT. [A subsequent invocation would increment + * seqno, so would have failed the seqno check]. + */ + } +} + +static void efx_mcdi_timeout_async(unsigned long context) +{ + struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context; + + efx_mcdi_complete_async(mcdi, true); +} + +static int +efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen) +{ + if (efx->type->mcdi_max_ver < 0 || + (efx->type->mcdi_max_ver < 2 && + cmd > MC_CMD_CMD_SPACE_ESCAPE_7)) + return -EINVAL; + + if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 || + (efx->type->mcdi_max_ver < 2 && + inlen > MCDI_CTL_SDU_LEN_MAX_V1)) + return -EMSGSIZE; + + return 0; +} + +static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual, bool quiet) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0); + int rc; + + if (mcdi->mode == MCDI_MODE_POLL) + rc = efx_mcdi_poll(efx); + else + rc = efx_mcdi_await_completion(efx); + + if (rc != 0) { + netif_err(efx, hw, efx->net_dev, + "MC command 0x%x inlen %d mode %d timed out\n", + cmd, (int)inlen, mcdi->mode); + + if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) { + netif_err(efx, hw, efx->net_dev, + "MCDI request was completed without an event\n"); + rc = 0; + } + + efx_mcdi_abandon(efx); + + /* Close the race with efx_mcdi_ev_cpl() executing just too late + * and completing a request we've just cancelled, by ensuring + * that the seqno check therein fails. + */ + spin_lock_bh(&mcdi->iface_lock); + ++mcdi->seqno; + ++mcdi->credits; + spin_unlock_bh(&mcdi->iface_lock); + } + + if (rc != 0) { + if (outlen_actual) + *outlen_actual = 0; + } else { + size_t hdr_len, data_len, err_len; + + /* At the very least we need a memory barrier here to ensure + * we pick up changes from efx_mcdi_ev_cpl(). Protect against + * a spurious efx_mcdi_ev_cpl() running concurrently by + * acquiring the iface_lock. */ + spin_lock_bh(&mcdi->iface_lock); + rc = mcdi->resprc; + hdr_len = mcdi->resp_hdr_len; + data_len = mcdi->resp_data_len; + err_len = min(sizeof(errbuf), data_len); + spin_unlock_bh(&mcdi->iface_lock); + + BUG_ON(rc > 0); + + efx->type->mcdi_read_response(efx, outbuf, hdr_len, + min(outlen, data_len)); + if (outlen_actual) + *outlen_actual = data_len; + + efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len); + + if (cmd == MC_CMD_REBOOT && rc == -EIO) { + /* Don't reset if MC_CMD_REBOOT returns EIO */ + } else if (rc == -EIO || rc == -EINTR) { + netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", + -rc); + efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); + } else if (rc && !quiet) { + efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len, + rc); + } + + if (rc == -EIO || rc == -EINTR) { + msleep(MCDI_STATUS_SLEEP_MS); + efx_mcdi_poll_reboot(efx); + mcdi->new_epoch = true; + } + } + + efx_mcdi_release(mcdi); + return rc; +} + +static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual, bool quiet) +{ + int rc; + + rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); + if (rc) { + if (outlen_actual) + *outlen_actual = 0; + return rc; + } + return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, + outlen_actual, quiet); +} + +int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual) +{ + return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen, + outlen_actual, false); +} + +/* Normally, on receiving an error code in the MCDI response, + * efx_mcdi_rpc will log an error message containing (among other + * things) the raw error code, by means of efx_mcdi_display_error. + * This _quiet version suppresses that; if the caller wishes to log + * the error conditionally on the return code, it should call this + * function and is then responsible for calling efx_mcdi_display_error + * as needed. + */ +int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual) +{ + return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen, + outlen_actual, true); +} + +int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + int rc; + + rc = efx_mcdi_check_supported(efx, cmd, inlen); + if (rc) + return rc; + + if (efx->mc_bist_for_other_fn) + return -ENETDOWN; + + if (mcdi->mode == MCDI_MODE_FAIL) + return -ENETDOWN; + + efx_mcdi_acquire_sync(mcdi); + efx_mcdi_send_request(efx, cmd, inbuf, inlen); + return 0; +} + +static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + size_t outlen, + efx_mcdi_async_completer *complete, + unsigned long cookie, bool quiet) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + struct efx_mcdi_async_param *async; + int rc; + + rc = efx_mcdi_check_supported(efx, cmd, inlen); + if (rc) + return rc; + + if (efx->mc_bist_for_other_fn) + return -ENETDOWN; + + async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4), + GFP_ATOMIC); + if (!async) + return -ENOMEM; + + async->cmd = cmd; + async->inlen = inlen; + async->outlen = outlen; + async->quiet = quiet; + async->complete = complete; + async->cookie = cookie; + memcpy(async + 1, inbuf, inlen); + + spin_lock_bh(&mcdi->async_lock); + + if (mcdi->mode == MCDI_MODE_EVENTS) { + list_add_tail(&async->list, &mcdi->async_list); + + /* If this is at the front of the queue, try to start it + * immediately + */ + if (mcdi->async_list.next == &async->list && + efx_mcdi_acquire_async(mcdi)) { + efx_mcdi_send_request(efx, cmd, inbuf, inlen); + mod_timer(&mcdi->async_timer, + jiffies + MCDI_RPC_TIMEOUT); + } + } else { + kfree(async); + rc = -ENETDOWN; + } + + spin_unlock_bh(&mcdi->async_lock); + + return rc; +} + +/** + * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously + * @efx: NIC through which to issue the command + * @cmd: Command type number + * @inbuf: Command parameters + * @inlen: Length of command parameters, in bytes + * @outlen: Length to allocate for response buffer, in bytes + * @complete: Function to be called on completion or cancellation. + * @cookie: Arbitrary value to be passed to @complete. + * + * This function does not sleep and therefore may be called in atomic + * context. It will fail if event queues are disabled or if MCDI + * event completions have been disabled due to an error. + * + * If it succeeds, the @complete function will be called exactly once + * in atomic context, when one of the following occurs: + * (a) the completion event is received (in NAPI context) + * (b) event queues are disabled (in the process that disables them) + * (c) the request times-out (in timer context) + */ +int +efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, size_t outlen, + efx_mcdi_async_completer *complete, unsigned long cookie) +{ + return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete, + cookie, false); +} + +int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + size_t outlen, efx_mcdi_async_completer *complete, + unsigned long cookie) +{ + return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete, + cookie, true); +} + +int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual) +{ + return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, + outlen_actual, false); +} + +int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual) +{ + return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, + outlen_actual, true); +} + +void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, int rc) +{ + int code = 0, err_arg = 0; + + if (outlen >= MC_CMD_ERR_CODE_OFST + 4) + code = MCDI_DWORD(outbuf, ERR_CODE); + if (outlen >= MC_CMD_ERR_ARG_OFST + 4) + err_arg = MCDI_DWORD(outbuf, ERR_ARG); + netif_err(efx, hw, efx->net_dev, + "MC command 0x%x inlen %d failed rc=%d (raw=%d) arg=%d\n", + cmd, (int)inlen, rc, code, err_arg); +} + +/* Switch to polled MCDI completions. This can be called in various + * error conditions with various locks held, so it must be lockless. + * Caller is responsible for flushing asynchronous requests later. + */ +void efx_mcdi_mode_poll(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi; + + if (!efx->mcdi) + return; + + mcdi = efx_mcdi(efx); + /* If already in polling mode, nothing to do. + * If in fail-fast state, don't switch to polled completion. + * FLR recovery will do that later. + */ + if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL) + return; + + /* We can switch from event completion to polled completion, because + * mcdi requests are always completed in shared memory. We do this by + * switching the mode to POLL'd then completing the request. + * efx_mcdi_await_completion() will then call efx_mcdi_poll(). + * + * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), + * which efx_mcdi_complete_sync() provides for us. + */ + mcdi->mode = MCDI_MODE_POLL; + + efx_mcdi_complete_sync(mcdi); +} + +/* Flush any running or queued asynchronous requests, after event processing + * is stopped + */ +void efx_mcdi_flush_async(struct efx_nic *efx) +{ + struct efx_mcdi_async_param *async, *next; + struct efx_mcdi_iface *mcdi; + + if (!efx->mcdi) + return; + + mcdi = efx_mcdi(efx); + + /* We must be in poll or fail mode so no more requests can be queued */ + BUG_ON(mcdi->mode == MCDI_MODE_EVENTS); + + del_timer_sync(&mcdi->async_timer); + + /* If a request is still running, make sure we give the MC + * time to complete it so that the response won't overwrite our + * next request. + */ + if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) { + efx_mcdi_poll(efx); + mcdi->state = MCDI_STATE_QUIESCENT; + } + + /* Nothing else will access the async list now, so it is safe + * to walk it without holding async_lock. If we hold it while + * calling a completer then lockdep may warn that we have + * acquired locks in the wrong order. + */ + list_for_each_entry_safe(async, next, &mcdi->async_list, list) { + async->complete(efx, async->cookie, -ENETDOWN, NULL, 0); + list_del(&async->list); + kfree(async); + } +} + +void efx_mcdi_mode_event(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi; + + if (!efx->mcdi) + return; + + mcdi = efx_mcdi(efx); + /* If already in event completion mode, nothing to do. + * If in fail-fast state, don't switch to event completion. FLR + * recovery will do that later. + */ + if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL) + return; + + /* We can't switch from polled to event completion in the middle of a + * request, because the completion method is specified in the request. + * So acquire the interface to serialise the requestors. We don't need + * to acquire the iface_lock to change the mode here, but we do need a + * write memory barrier ensure that efx_mcdi_rpc() sees it, which + * efx_mcdi_acquire() provides. + */ + efx_mcdi_acquire_sync(mcdi); + mcdi->mode = MCDI_MODE_EVENTS; + efx_mcdi_release(mcdi); +} + +static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + /* If there is an outstanding MCDI request, it has been terminated + * either by a BADASSERT or REBOOT event. If the mcdi interface is + * in polled mode, then do nothing because the MC reboot handler will + * set the header correctly. However, if the mcdi interface is waiting + * for a CMDDONE event it won't receive it [and since all MCDI events + * are sent to the same queue, we can't be racing with + * efx_mcdi_ev_cpl()] + * + * If there is an outstanding asynchronous request, we can't + * complete it now (efx_mcdi_complete() would deadlock). The + * reset process will take care of this. + * + * There's a race here with efx_mcdi_send_request(), because + * we might receive a REBOOT event *before* the request has + * been copied out. In polled mode (during startup) this is + * irrelevant, because efx_mcdi_complete_sync() is ignored. In + * event mode, this condition is just an edge-case of + * receiving a REBOOT event after posting the MCDI + * request. Did the mc reboot before or after the copyout? The + * best we can do always is just return failure. + */ + spin_lock(&mcdi->iface_lock); + if (efx_mcdi_complete_sync(mcdi)) { + if (mcdi->mode == MCDI_MODE_EVENTS) { + mcdi->resprc = rc; + mcdi->resp_hdr_len = 0; + mcdi->resp_data_len = 0; + ++mcdi->credits; + } + } else { + int count; + + /* Consume the status word since efx_mcdi_rpc_finish() won't */ + for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { + if (efx_mcdi_poll_reboot(efx)) + break; + udelay(MCDI_STATUS_DELAY_US); + } + mcdi->new_epoch = true; + + /* Nobody was waiting for an MCDI request, so trigger a reset */ + efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); + } + + spin_unlock(&mcdi->iface_lock); +} + +/* The MC is going down in to BIST mode. set the BIST flag to block + * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset + * (which doesn't actually execute a reset, it waits for the controlling + * function to reset it). + */ +static void efx_mcdi_ev_bist(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + spin_lock(&mcdi->iface_lock); + efx->mc_bist_for_other_fn = true; + if (efx_mcdi_complete_sync(mcdi)) { + if (mcdi->mode == MCDI_MODE_EVENTS) { + mcdi->resprc = -EIO; + mcdi->resp_hdr_len = 0; + mcdi->resp_data_len = 0; + ++mcdi->credits; + } + } + mcdi->new_epoch = true; + efx_schedule_reset(efx, RESET_TYPE_MC_BIST); + spin_unlock(&mcdi->iface_lock); +} + +/* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try + * to recover. + */ +static void efx_mcdi_abandon(struct efx_nic *efx) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL) + return; /* it had already been done */ + netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n"); + efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT); +} + +/* Called from falcon_process_eventq for MCDI events */ +void efx_mcdi_process_event(struct efx_channel *channel, + efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); + u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); + + switch (code) { + case MCDI_EVENT_CODE_BADSSERT: + netif_err(efx, hw, efx->net_dev, + "MC watchdog or assertion failure at 0x%x\n", data); + efx_mcdi_ev_death(efx, -EINTR); + break; + + case MCDI_EVENT_CODE_PMNOTICE: + netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); + break; + + case MCDI_EVENT_CODE_CMDDONE: + efx_mcdi_ev_cpl(efx, + MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), + MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), + MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); + break; + + case MCDI_EVENT_CODE_LINKCHANGE: + efx_mcdi_process_link_change(efx, event); + break; + case MCDI_EVENT_CODE_SENSOREVT: + efx_mcdi_sensor_event(efx, event); + break; + case MCDI_EVENT_CODE_SCHEDERR: + netif_dbg(efx, hw, efx->net_dev, + "MC Scheduler alert (0x%x)\n", data); + break; + case MCDI_EVENT_CODE_REBOOT: + case MCDI_EVENT_CODE_MC_REBOOT: + netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); + efx_mcdi_ev_death(efx, -EIO); + break; + case MCDI_EVENT_CODE_MC_BIST: + netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n"); + efx_mcdi_ev_bist(efx); + break; + case MCDI_EVENT_CODE_MAC_STATS_DMA: + /* MAC stats are gather lazily. We can ignore this. */ + break; + case MCDI_EVENT_CODE_FLR: + efx_siena_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF)); + break; + case MCDI_EVENT_CODE_PTP_RX: + case MCDI_EVENT_CODE_PTP_FAULT: + case MCDI_EVENT_CODE_PTP_PPS: + efx_ptp_event(efx, event); + break; + case MCDI_EVENT_CODE_PTP_TIME: + efx_time_sync_event(channel, event); + break; + case MCDI_EVENT_CODE_TX_FLUSH: + case MCDI_EVENT_CODE_RX_FLUSH: + /* Two flush events will be sent: one to the same event + * queue as completions, and one to event queue 0. + * In the latter case the {RX,TX}_FLUSH_TO_DRIVER + * flag will be set, and we should ignore the event + * because we want to wait for all completions. + */ + BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN != + MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN); + if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER)) + efx_ef10_handle_drain_event(efx); + break; + case MCDI_EVENT_CODE_TX_ERR: + case MCDI_EVENT_CODE_RX_ERR: + netif_err(efx, hw, efx->net_dev, + "%s DMA error (event: "EFX_QWORD_FMT")\n", + code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX", + EFX_QWORD_VAL(*event)); + efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); + break; + default: + netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", + code); + } +} + +/************************************************************************** + * + * Specific request functions + * + ************************************************************************** + */ + +void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) +{ + MCDI_DECLARE_BUF(outbuf, + max(MC_CMD_GET_VERSION_OUT_LEN, + MC_CMD_GET_CAPABILITIES_OUT_LEN)); + size_t outlength; + const __le16 *ver_words; + size_t offset; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, + outbuf, sizeof(outbuf), &outlength); + if (rc) + goto fail; + if (outlength < MC_CMD_GET_VERSION_OUT_LEN) { + rc = -EIO; + goto fail; + } + + ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); + offset = snprintf(buf, len, "%u.%u.%u.%u", + le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), + le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); + + /* EF10 may have multiple datapath firmware variants within a + * single version. Report which variants are running. + */ + if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) { + BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, + outbuf, sizeof(outbuf), &outlength); + if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN) + offset += snprintf( + buf + offset, len - offset, " rx? tx?"); + else + offset += snprintf( + buf + offset, len - offset, " rx%x tx%x", + MCDI_WORD(outbuf, + GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID), + MCDI_WORD(outbuf, + GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID)); + + /* It's theoretically possible for the string to exceed 31 + * characters, though in practice the first three version + * components are short enough that this doesn't happen. + */ + if (WARN_ON(offset >= len)) + buf[0] = 0; + } + + return; + +fail: + netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + buf[0] = 0; +} + +static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, + bool *was_attached) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, + driver_operating ? 1 : 0); + MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); + MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY); + + rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { + rc = -EIO; + goto fail; + } + + if (driver_operating) { + if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) { + efx->mcdi->fn_flags = + MCDI_DWORD(outbuf, + DRV_ATTACH_EXT_OUT_FUNC_FLAGS); + } else { + /* Synthesise flags for Siena */ + efx->mcdi->fn_flags = + 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | + 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED | + (efx_port_num(efx) == 0) << + MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY; + } + } + + /* We currently assume we have control of the external link + * and are completely trusted by firmware. Abort probing + * if that's not true for this function. + */ + if (driver_operating && + (efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | + 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) != + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | + 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) { + netif_err(efx, probe, efx->net_dev, + "This driver version only supports one function per port\n"); + return -ENODEV; + } + + if (was_attached != NULL) + *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); + return 0; + +fail: + netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, + u16 *fw_subtype_list, u32 *capabilities) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX); + size_t outlen, i; + int port_num = efx_port_num(efx); + int rc; + + BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); + /* we need __aligned(2) for ether_addr_copy */ + BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1); + BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { + rc = -EIO; + goto fail; + } + + if (mac_address) + ether_addr_copy(mac_address, + port_num ? + MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) : + MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0)); + if (fw_subtype_list) { + for (i = 0; + i < MCDI_VAR_ARRAY_LEN(outlen, + GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST); + i++) + fw_subtype_list[i] = MCDI_ARRAY_WORD( + outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i); + for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++) + fw_subtype_list[i] = 0; + } + if (capabilities) { + if (port_num) + *capabilities = MCDI_DWORD(outbuf, + GET_BOARD_CFG_OUT_CAPABILITIES_PORT1); + else + *capabilities = MCDI_DWORD(outbuf, + GET_BOARD_CFG_OUT_CAPABILITIES_PORT0); + } + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", + __func__, rc, (int)outlen); + + return rc; +} + +int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN); + u32 dest = 0; + int rc; + + if (uart) + dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; + if (evq) + dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; + + MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); + MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); + + BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), + NULL, 0, NULL); + return rc; +} + +int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) { + rc = -EIO; + goto fail; + } + + *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", + __func__, rc); + return rc; +} + +int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, + size_t *size_out, size_t *erase_size_out, + bool *protected_out) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) { + rc = -EIO; + goto fail; + } + + *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); + *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); + *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & + (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN)); + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc) + return rc; + + switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { + case MC_CMD_NVRAM_TEST_PASS: + case MC_CMD_NVRAM_TEST_NOTSUPP: + return 0; + default: + return -EIO; + } +} + +int efx_mcdi_nvram_test_all(struct efx_nic *efx) +{ + u32 nvram_types; + unsigned int type; + int rc; + + rc = efx_mcdi_nvram_types(efx, &nvram_types); + if (rc) + goto fail1; + + type = 0; + while (nvram_types != 0) { + if (nvram_types & 1) { + rc = efx_mcdi_nvram_test(efx, type); + if (rc) + goto fail2; + } + type++; + nvram_types >>= 1; + } + + return 0; + +fail2: + netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", + __func__, type); +fail1: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +static int efx_mcdi_read_assertion(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN); + MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN); + unsigned int flags, index; + const char *reason; + size_t outlen; + int retry; + int rc; + + /* Attempt to read any stored assertion state before we reboot + * the mcfw out of the assertion handler. Retry twice, once + * because a boot-time assertion might cause this command to fail + * with EINTR. And once again because GET_ASSERTS can race with + * MC_CMD_REBOOT running on the other port. */ + retry = 2; + do { + MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS, + inbuf, MC_CMD_GET_ASSERTS_IN_LEN, + outbuf, sizeof(outbuf), &outlen); + } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); + + if (rc) { + efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS, + MC_CMD_GET_ASSERTS_IN_LEN, outbuf, + outlen, rc); + return rc; + } + if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) + return -EIO; + + /* Print out any recorded assertion state */ + flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); + if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) + return 0; + + reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) + ? "system-level assertion" + : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) + ? "thread-level assertion" + : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) + ? "watchdog reset" + : "unknown assertion"; + netif_err(efx, hw, efx->net_dev, + "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, + MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), + MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); + + /* Print out the registers */ + for (index = 0; + index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM; + index++) + netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", + 1 + index, + MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS, + index)); + + return 0; +} + +static void efx_mcdi_exit_assertion(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); + + /* If the MC is running debug firmware, it might now be + * waiting for a debugger to attach, but we just want it to + * reboot. We set a flag that makes the command a no-op if it + * has already done so. We don't know what return code to + * expect (0 or -EIO), so ignore it. + */ + BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); + MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, + MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); + (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, + NULL, 0, NULL); +} + +int efx_mcdi_handle_assertion(struct efx_nic *efx) +{ + int rc; + + rc = efx_mcdi_read_assertion(efx); + if (rc) + return rc; + + efx_mcdi_exit_assertion(efx); + + return 0; +} + +void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN); + int rc; + + BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); + BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); + BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); + + BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); + + MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); + + rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +static int efx_mcdi_reset_func(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN); + int rc; + + BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0); + MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG, + ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1); + rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf), + NULL, 0, NULL); + return rc; +} + +static int efx_mcdi_reset_mc(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); + int rc; + + BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); + MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); + rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), + NULL, 0, NULL); + /* White is black, and up is down */ + if (rc == -EIO) + return 0; + if (rc == 0) + rc = -EIO; + return rc; +} + +enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason) +{ + return RESET_TYPE_RECOVER_OR_ALL; +} + +int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method) +{ + int rc; + + /* If MCDI is down, we can't handle_assertion */ + if (method == RESET_TYPE_MCDI_TIMEOUT) { + rc = pci_reset_function(efx->pci_dev); + if (rc) + return rc; + /* Re-enable polled MCDI completion */ + if (efx->mcdi) { + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + mcdi->mode = MCDI_MODE_POLL; + } + return 0; + } + + /* Recover from a failed assertion pre-reset */ + rc = efx_mcdi_handle_assertion(efx); + if (rc) + return rc; + + if (method == RESET_TYPE_WORLD) + return efx_mcdi_reset_mc(efx); + else + return efx_mcdi_reset_func(efx); +} + +static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, + const u8 *mac, int *id_out) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); + MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, + MC_CMD_FILTER_MODE_SIMPLE); + ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac); + + rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { + rc = -EIO; + goto fail; + } + + *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); + + return 0; + +fail: + *id_out = -1; + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; + +} + + +int +efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) +{ + return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); +} + + +int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN); + size_t outlen; + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { + rc = -EIO; + goto fail; + } + + *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); + + return 0; + +fail: + *id_out = -1; + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + + +int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); + + rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), + NULL, 0, NULL); + return rc; +} + +int efx_mcdi_flush_rxqs(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_rx_queue *rx_queue; + MCDI_DECLARE_BUF(inbuf, + MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS)); + int rc, count; + + BUILD_BUG_ON(EFX_MAX_CHANNELS > + MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); + + count = 0; + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) { + if (rx_queue->flush_pending) { + rx_queue->flush_pending = false; + atomic_dec(&efx->rxq_flush_pending); + MCDI_SET_ARRAY_DWORD( + inbuf, FLUSH_RX_QUEUES_IN_QID_OFST, + count, efx_rx_queue_index(rx_queue)); + count++; + } + } + } + + rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf, + MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL); + WARN_ON(rc < 0); + + return rc; +} + +int efx_mcdi_wol_filter_reset(struct efx_nic *efx) +{ + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); + return rc; +} + +int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN); + + BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0); + MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type); + MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled); + return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +#ifdef CONFIG_SFC_MTD + +#define EFX_MCDI_NVRAM_LEN_MAX 128 + +static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); + + BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), + NULL, 0, NULL); + return rc; +} + +static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, + loff_t offset, u8 *buffer, size_t length) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN); + MCDI_DECLARE_BUF(outbuf, + MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); + MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); + MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); + return 0; +} + +static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, + loff_t offset, const u8 *buffer, size_t length) +{ + MCDI_DECLARE_BUF(inbuf, + MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)); + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); + MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); + MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); + memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); + + BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, + ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), + NULL, 0, NULL); + return rc; +} + +static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, + loff_t offset, size_t length) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); + MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); + MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); + + BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), + NULL, 0, NULL); + return rc; +} + +static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); + + BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), + NULL, 0, NULL); + return rc; +} + +int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, + size_t len, size_t *retlen, u8 *buffer) +{ + struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + loff_t offset = start; + loff_t end = min_t(loff_t, start + len, mtd->size); + size_t chunk; + int rc = 0; + + while (offset < end) { + chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); + rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset, + buffer, chunk); + if (rc) + goto out; + offset += chunk; + buffer += chunk; + } +out: + *retlen = offset - start; + return rc; +} + +int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) +{ + struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + loff_t offset = start & ~((loff_t)(mtd->erasesize - 1)); + loff_t end = min_t(loff_t, start + len, mtd->size); + size_t chunk = part->common.mtd.erasesize; + int rc = 0; + + if (!part->updating) { + rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); + if (rc) + goto out; + part->updating = true; + } + + /* The MCDI interface can in fact do multiple erase blocks at once; + * but erasing may be slow, so we make multiple calls here to avoid + * tripping the MCDI RPC timeout. */ + while (offset < end) { + rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset, + chunk); + if (rc) + goto out; + offset += chunk; + } +out: + return rc; +} + +int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, + size_t len, size_t *retlen, const u8 *buffer) +{ + struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + loff_t offset = start; + loff_t end = min_t(loff_t, start + len, mtd->size); + size_t chunk; + int rc = 0; + + if (!part->updating) { + rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); + if (rc) + goto out; + part->updating = true; + } + + while (offset < end) { + chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); + rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset, + buffer, chunk); + if (rc) + goto out; + offset += chunk; + buffer += chunk; + } +out: + *retlen = offset - start; + return rc; +} + +int efx_mcdi_mtd_sync(struct mtd_info *mtd) +{ + struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + int rc = 0; + + if (part->updating) { + part->updating = false; + rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type); + } + + return rc; +} + +void efx_mcdi_mtd_rename(struct efx_mtd_partition *part) +{ + struct efx_mcdi_mtd_partition *mcdi_part = + container_of(part, struct efx_mcdi_mtd_partition, common); + struct efx_nic *efx = part->mtd.priv; + + snprintf(part->name, sizeof(part->name), "%s %s:%02x", + efx->name, part->type_name, mcdi_part->fw_subtype); +} + +#endif /* CONFIG_SFC_MTD */ diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h new file mode 100644 index 000000000..56465f746 --- /dev/null +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -0,0 +1,361 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2008-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_MCDI_H +#define EFX_MCDI_H + +/** + * enum efx_mcdi_state - MCDI request handling state + * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the + * mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING + * @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending. + * Only the thread that moved into this state is allowed to move out of it. + * @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending. + * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread + * has not yet consumed the result. For all other threads, equivalent to + * %MCDI_STATE_RUNNING. + */ +enum efx_mcdi_state { + MCDI_STATE_QUIESCENT, + MCDI_STATE_RUNNING_SYNC, + MCDI_STATE_RUNNING_ASYNC, + MCDI_STATE_COMPLETED, +}; + +/** + * enum efx_mcdi_mode - MCDI transaction mode + * @MCDI_MODE_POLL: poll for MCDI completion, until timeout + * @MCDI_MODE_EVENTS: wait for an mcdi_event. On timeout, poll once + * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls + */ +enum efx_mcdi_mode { + MCDI_MODE_POLL, + MCDI_MODE_EVENTS, + MCDI_MODE_FAIL, +}; + +/** + * struct efx_mcdi_iface - MCDI protocol context + * @efx: The associated NIC. + * @state: Request handling state. Waited for by @wq. + * @mode: Poll for mcdi completion, or wait for an mcdi_event. + * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING + * @new_epoch: Indicates start of day or start of MC reboot recovery + * @iface_lock: Serialises access to @seqno, @credits and response metadata + * @seqno: The next sequence number to use for mcdi requests. + * @credits: Number of spurious MCDI completion events allowed before we + * trigger a fatal error + * @resprc: Response error/success code (Linux numbering) + * @resp_hdr_len: Response header length + * @resp_data_len: Response data (SDU or error) length + * @async_lock: Serialises access to @async_list while event processing is + * enabled + * @async_list: Queue of asynchronous requests + * @async_timer: Timer for asynchronous request timeout + */ +struct efx_mcdi_iface { + struct efx_nic *efx; + enum efx_mcdi_state state; + enum efx_mcdi_mode mode; + wait_queue_head_t wq; + spinlock_t iface_lock; + bool new_epoch; + unsigned int credits; + unsigned int seqno; + int resprc; + size_t resp_hdr_len; + size_t resp_data_len; + spinlock_t async_lock; + struct list_head async_list; + struct timer_list async_timer; +}; + +struct efx_mcdi_mon { + struct efx_buffer dma_buf; + struct mutex update_lock; + unsigned long last_update; + struct device *device; + struct efx_mcdi_mon_attribute *attrs; + struct attribute_group group; + const struct attribute_group *groups[2]; + unsigned int n_attrs; +}; + +struct efx_mcdi_mtd_partition { + struct efx_mtd_partition common; + bool updating; + u16 nvram_type; + u16 fw_subtype; +}; + +#define to_efx_mcdi_mtd_partition(mtd) \ + container_of(mtd, struct efx_mcdi_mtd_partition, common.mtd) + +/** + * struct efx_mcdi_data - extra state for NICs that implement MCDI + * @iface: Interface/protocol state + * @hwmon: Hardware monitor state + * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH. + */ +struct efx_mcdi_data { + struct efx_mcdi_iface iface; +#ifdef CONFIG_SFC_MCDI_MON + struct efx_mcdi_mon hwmon; +#endif + u32 fn_flags; +}; + +static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) +{ + EFX_BUG_ON_PARANOID(!efx->mcdi); + return &efx->mcdi->iface; +} + +#ifdef CONFIG_SFC_MCDI_MON +static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx) +{ + EFX_BUG_ON_PARANOID(!efx->mcdi); + return &efx->mcdi->hwmon; +} +#endif + +int efx_mcdi_init(struct efx_nic *efx); +void efx_mcdi_fini(struct efx_nic *efx); + +int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, + size_t inlen, efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual); +int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual); + +int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, + const efx_dword_t *inbuf, size_t inlen); +int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, + efx_dword_t *outbuf, size_t outlen, + size_t *outlen_actual); +int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, size_t *outlen_actual); + +typedef void efx_mcdi_async_completer(struct efx_nic *efx, + unsigned long cookie, int rc, + efx_dword_t *outbuf, + size_t outlen_actual); +int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, size_t outlen, + efx_mcdi_async_completer *complete, + unsigned long cookie); +int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd, + const efx_dword_t *inbuf, size_t inlen, + size_t outlen, + efx_mcdi_async_completer *complete, + unsigned long cookie); + +void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, + size_t inlen, efx_dword_t *outbuf, + size_t outlen, int rc); + +int efx_mcdi_poll_reboot(struct efx_nic *efx); +void efx_mcdi_mode_poll(struct efx_nic *efx); +void efx_mcdi_mode_event(struct efx_nic *efx); +void efx_mcdi_flush_async(struct efx_nic *efx); + +void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event); +void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); + +/* We expect that 16- and 32-bit fields in MCDI requests and responses + * are appropriately aligned, but 64-bit fields are only + * 32-bit-aligned. Also, on Siena we must copy to the MC shared + * memory strictly 32 bits at a time, so add any necessary padding. + */ +#define MCDI_DECLARE_BUF(_name, _len) \ + efx_dword_t _name[DIV_ROUND_UP(_len, 4)] +#define MCDI_DECLARE_BUF_OUT_OR_ERR(_name, _len) \ + MCDI_DECLARE_BUF(_name, max_t(size_t, _len, 8)) +#define _MCDI_PTR(_buf, _offset) \ + ((u8 *)(_buf) + (_offset)) +#define MCDI_PTR(_buf, _field) \ + _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST) +#define _MCDI_CHECK_ALIGN(_ofst, _align) \ + ((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1))) +#define _MCDI_DWORD(_buf, _field) \ + ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2)) + +#define MCDI_WORD(_buf, _field) \ + ((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \ + le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field))) +#define MCDI_SET_DWORD(_buf, _field, _value) \ + EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value) +#define MCDI_DWORD(_buf, _field) \ + EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0) +#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \ + EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1) +#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \ + _name2, _value2) \ + EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2) +#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3) \ + EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3) +#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4) \ + EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4) +#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5) \ + EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5) +#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5, \ + _name6, _value6) \ + EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5, \ + MC_CMD_ ## _name6, _value6) +#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \ + _name2, _value2, _name3, _value3, \ + _name4, _value4, _name5, _value5, \ + _name6, _value6, _name7, _value7) \ + EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \ + MC_CMD_ ## _name1, _value1, \ + MC_CMD_ ## _name2, _value2, \ + MC_CMD_ ## _name3, _value3, \ + MC_CMD_ ## _name4, _value4, \ + MC_CMD_ ## _name5, _value5, \ + MC_CMD_ ## _name6, _value6, \ + MC_CMD_ ## _name7, _value7) +#define MCDI_SET_QWORD(_buf, _field, _value) \ + do { \ + EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \ + EFX_DWORD_0, (u32)(_value)); \ + EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \ + EFX_DWORD_0, (u64)(_value) >> 32); \ + } while (0) +#define MCDI_QWORD(_buf, _field) \ + (EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) | \ + (u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32) +#define MCDI_FIELD(_ptr, _type, _field) \ + EFX_EXTRACT_DWORD( \ + *(efx_dword_t *) \ + _MCDI_PTR(_ptr, MC_CMD_ ## _type ## _ ## _field ## _OFST & ~3),\ + MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f, \ + (MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f) + \ + MC_CMD_ ## _type ## _ ## _field ## _WIDTH - 1) + +#define _MCDI_ARRAY_PTR(_buf, _field, _index, _align) \ + (_MCDI_PTR(_buf, _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, _align))\ + + (_index) * _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _LEN, _align)) +#define MCDI_DECLARE_STRUCT_PTR(_name) \ + efx_dword_t *_name +#define MCDI_ARRAY_STRUCT_PTR(_buf, _field, _index) \ + ((efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4)) +#define MCDI_VAR_ARRAY_LEN(_len, _field) \ + min_t(size_t, MC_CMD_ ## _field ## _MAXNUM, \ + ((_len) - MC_CMD_ ## _field ## _OFST) / MC_CMD_ ## _field ## _LEN) +#define MCDI_ARRAY_WORD(_buf, _field, _index) \ + (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \ + le16_to_cpu(*(__force const __le16 *) \ + _MCDI_ARRAY_PTR(_buf, _field, _index, 2))) +#define _MCDI_ARRAY_DWORD(_buf, _field, _index) \ + (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4) + \ + (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4)) +#define MCDI_SET_ARRAY_DWORD(_buf, _field, _index, _value) \ + EFX_SET_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), \ + EFX_DWORD_0, _value) +#define MCDI_ARRAY_DWORD(_buf, _field, _index) \ + EFX_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), EFX_DWORD_0) +#define _MCDI_ARRAY_QWORD(_buf, _field, _index) \ + (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 8) + \ + (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4)) +#define MCDI_SET_ARRAY_QWORD(_buf, _field, _index, _value) \ + do { \ + EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[0],\ + EFX_DWORD_0, (u32)(_value)); \ + EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[1],\ + EFX_DWORD_0, (u64)(_value) >> 32); \ + } while (0) +#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \ + MCDI_FIELD(MCDI_ARRAY_STRUCT_PTR(_buf, _field1, _index), \ + _type ## _TYPEDEF, _field2) + +#define MCDI_EVENT_FIELD(_ev, _field) \ + EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) + +void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); +int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, + u16 *fw_subtype_list, u32 *capabilities); +int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq); +int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out); +int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, + size_t *size_out, size_t *erase_size_out, + bool *protected_out); +int efx_mcdi_nvram_test_all(struct efx_nic *efx); +int efx_mcdi_handle_assertion(struct efx_nic *efx); +void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); +int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, + int *id_out); +int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); +int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); +int efx_mcdi_wol_filter_reset(struct efx_nic *efx); +int efx_mcdi_flush_rxqs(struct efx_nic *efx); +int efx_mcdi_port_probe(struct efx_nic *efx); +void efx_mcdi_port_remove(struct efx_nic *efx); +int efx_mcdi_port_reconfigure(struct efx_nic *efx); +int efx_mcdi_port_get_number(struct efx_nic *efx); +u32 efx_mcdi_phy_get_caps(struct efx_nic *efx); +void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev); +int efx_mcdi_set_mac(struct efx_nic *efx); +#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1)) +void efx_mcdi_mac_start_stats(struct efx_nic *efx); +void efx_mcdi_mac_stop_stats(struct efx_nic *efx); +void efx_mcdi_mac_pull_stats(struct efx_nic *efx); +bool efx_mcdi_mac_check_fault(struct efx_nic *efx); +enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason); +int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method); +int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled); + +#ifdef CONFIG_SFC_MCDI_MON +int efx_mcdi_mon_probe(struct efx_nic *efx); +void efx_mcdi_mon_remove(struct efx_nic *efx); +#else +static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; } +static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {} +#endif + +#ifdef CONFIG_SFC_MTD +int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, u8 *buffer); +int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len); +int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, const u8 *buffer); +int efx_mcdi_mtd_sync(struct mtd_info *mtd); +void efx_mcdi_mtd_rename(struct efx_mtd_partition *part); +#endif + +#endif /* EFX_MCDI_H */ diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c new file mode 100644 index 000000000..bc27d5b58 --- /dev/null +++ b/drivers/net/ethernet/sfc/mcdi_mon.c @@ -0,0 +1,534 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2011-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include + +#include "net_driver.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "nic.h" + +enum efx_hwmon_type { + EFX_HWMON_UNKNOWN, + EFX_HWMON_TEMP, /* temperature */ + EFX_HWMON_COOL, /* cooling device, probably a heatsink */ + EFX_HWMON_IN, /* voltage */ + EFX_HWMON_CURR, /* current */ + EFX_HWMON_POWER, /* power */ + EFX_HWMON_TYPES_COUNT +}; + +static const char *const efx_hwmon_unit[EFX_HWMON_TYPES_COUNT] = { + [EFX_HWMON_TEMP] = " degC", + [EFX_HWMON_COOL] = " rpm", /* though nonsense for a heatsink */ + [EFX_HWMON_IN] = " mV", + [EFX_HWMON_CURR] = " mA", + [EFX_HWMON_POWER] = " W", +}; + +static const struct { + const char *label; + enum efx_hwmon_type hwmon_type; + int port; +} efx_mcdi_sensor_type[] = { +#define SENSOR(name, label, hwmon_type, port) \ + [MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port } + SENSOR(CONTROLLER_TEMP, "Controller board temp.", TEMP, -1), + SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1), + SENSOR(CONTROLLER_COOLING, "Controller heat sink", COOL, -1), + SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0), + SENSOR(PHY0_COOLING, "PHY heat sink", COOL, 0), + SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1), + SENSOR(PHY1_COOLING, "PHY heat sink", COOL, 1), + SENSOR(IN_1V0, "1.0V supply", IN, -1), + SENSOR(IN_1V2, "1.2V supply", IN, -1), + SENSOR(IN_1V8, "1.8V supply", IN, -1), + SENSOR(IN_2V5, "2.5V supply", IN, -1), + SENSOR(IN_3V3, "3.3V supply", IN, -1), + SENSOR(IN_12V0, "12.0V supply", IN, -1), + SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1), + SENSOR(IN_VREF, "Ref. voltage", IN, -1), + SENSOR(OUT_VAOE, "AOE FPGA supply", IN, -1), + SENSOR(AOE_TEMP, "AOE FPGA temp.", TEMP, -1), + SENSOR(PSU_AOE_TEMP, "AOE regulator temp.", TEMP, -1), + SENSOR(PSU_TEMP, "Controller regulator temp.", + TEMP, -1), + SENSOR(FAN_0, "Fan 0", COOL, -1), + SENSOR(FAN_1, "Fan 1", COOL, -1), + SENSOR(FAN_2, "Fan 2", COOL, -1), + SENSOR(FAN_3, "Fan 3", COOL, -1), + SENSOR(FAN_4, "Fan 4", COOL, -1), + SENSOR(IN_VAOE, "AOE input supply", IN, -1), + SENSOR(OUT_IAOE, "AOE output current", CURR, -1), + SENSOR(IN_IAOE, "AOE input current", CURR, -1), + SENSOR(NIC_POWER, "Board power use", POWER, -1), + SENSOR(IN_0V9, "0.9V supply", IN, -1), + SENSOR(IN_I0V9, "0.9V supply current", CURR, -1), + SENSOR(IN_I1V2, "1.2V supply current", CURR, -1), + SENSOR(IN_0V9_ADC, "0.9V supply (ext. ADC)", IN, -1), + SENSOR(CONTROLLER_2_TEMP, "Controller board temp. 2", TEMP, -1), + SENSOR(VREG_INTERNAL_TEMP, "Regulator die temp.", TEMP, -1), + SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1), + SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1), + SENSOR(CONTROLLER_VPTAT, + "Controller PTAT voltage (int. ADC)", IN, -1), + SENSOR(CONTROLLER_INTERNAL_TEMP, + "Controller die temp. (int. ADC)", TEMP, -1), + SENSOR(CONTROLLER_VPTAT_EXTADC, + "Controller PTAT voltage (ext. ADC)", IN, -1), + SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC, + "Controller die temp. (ext. ADC)", TEMP, -1), + SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1), + SENSOR(AIRFLOW, "Air flow raw", IN, -1), + SENSOR(VDD08D_VSS08D_CSR, "0.9V die (int. ADC)", IN, -1), + SENSOR(VDD08D_VSS08D_CSR_EXTADC, "0.9V die (ext. ADC)", IN, -1), + SENSOR(HOTPOINT_TEMP, "Controller board temp. (hotpoint)", TEMP, -1), +#undef SENSOR +}; + +static const char *const sensor_status_names[] = { + [MC_CMD_SENSOR_STATE_OK] = "OK", + [MC_CMD_SENSOR_STATE_WARNING] = "Warning", + [MC_CMD_SENSOR_STATE_FATAL] = "Fatal", + [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure", + [MC_CMD_SENSOR_STATE_NO_READING] = "No reading", +}; + +void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) +{ + unsigned int type, state, value; + enum efx_hwmon_type hwmon_type = EFX_HWMON_UNKNOWN; + const char *name = NULL, *state_txt, *unit; + + type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR); + state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE); + value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE); + + /* Deal gracefully with the board having more drivers than we + * know about, but do not expect new sensor states. */ + if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) { + name = efx_mcdi_sensor_type[type].label; + hwmon_type = efx_mcdi_sensor_type[type].hwmon_type; + } + if (!name) + name = "No sensor name available"; + EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names)); + state_txt = sensor_status_names[state]; + EFX_BUG_ON_PARANOID(hwmon_type >= EFX_HWMON_TYPES_COUNT); + unit = efx_hwmon_unit[hwmon_type]; + if (!unit) + unit = ""; + + netif_err(efx, hw, efx->net_dev, + "Sensor %d (%s) reports condition '%s' for value %d%s\n", + type, name, state_txt, value, unit); +} + +#ifdef CONFIG_SFC_MCDI_MON + +struct efx_mcdi_mon_attribute { + struct device_attribute dev_attr; + unsigned int index; + unsigned int type; + enum efx_hwmon_type hwmon_type; + unsigned int limit_value; + char name[12]; +}; + +static int efx_mcdi_mon_update(struct efx_nic *efx) +{ + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_EXT_IN_LEN); + int rc; + + MCDI_SET_QWORD(inbuf, READ_SENSORS_EXT_IN_DMA_ADDR, + hwmon->dma_buf.dma_addr); + MCDI_SET_DWORD(inbuf, READ_SENSORS_EXT_IN_LENGTH, hwmon->dma_buf.len); + + rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS, + inbuf, sizeof(inbuf), NULL, 0, NULL); + if (rc == 0) + hwmon->last_update = jiffies; + return rc; +} + +static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index, + efx_dword_t *entry) +{ + struct efx_nic *efx = dev_get_drvdata(dev->parent); + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + int rc; + + BUILD_BUG_ON(MC_CMD_READ_SENSORS_OUT_LEN != 0); + + mutex_lock(&hwmon->update_lock); + + /* Use cached value if last update was < 1 s ago */ + if (time_before(jiffies, hwmon->last_update + HZ)) + rc = 0; + else + rc = efx_mcdi_mon_update(efx); + + /* Copy out the requested entry */ + *entry = ((efx_dword_t *)hwmon->dma_buf.addr)[index]; + + mutex_unlock(&hwmon->update_lock); + + return rc; +} + +static ssize_t efx_mcdi_mon_show_value(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_mcdi_mon_attribute *mon_attr = + container_of(attr, struct efx_mcdi_mon_attribute, dev_attr); + efx_dword_t entry; + unsigned int value, state; + int rc; + + rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry); + if (rc) + return rc; + + state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE); + if (state == MC_CMD_SENSOR_STATE_NO_READING) + return -EBUSY; + + value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE); + + switch (mon_attr->hwmon_type) { + case EFX_HWMON_TEMP: + /* Convert temperature from degrees to milli-degrees Celsius */ + value *= 1000; + break; + case EFX_HWMON_POWER: + /* Convert power from watts to microwatts */ + value *= 1000000; + break; + default: + /* No conversion needed */ + break; + } + + return sprintf(buf, "%u\n", value); +} + +static ssize_t efx_mcdi_mon_show_limit(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_mcdi_mon_attribute *mon_attr = + container_of(attr, struct efx_mcdi_mon_attribute, dev_attr); + unsigned int value; + + value = mon_attr->limit_value; + + switch (mon_attr->hwmon_type) { + case EFX_HWMON_TEMP: + /* Convert temperature from degrees to milli-degrees Celsius */ + value *= 1000; + break; + case EFX_HWMON_POWER: + /* Convert power from watts to microwatts */ + value *= 1000000; + break; + default: + /* No conversion needed */ + break; + } + + return sprintf(buf, "%u\n", value); +} + +static ssize_t efx_mcdi_mon_show_alarm(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_mcdi_mon_attribute *mon_attr = + container_of(attr, struct efx_mcdi_mon_attribute, dev_attr); + efx_dword_t entry; + int state; + int rc; + + rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry); + if (rc) + return rc; + + state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE); + return sprintf(buf, "%d\n", state != MC_CMD_SENSOR_STATE_OK); +} + +static ssize_t efx_mcdi_mon_show_label(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_mcdi_mon_attribute *mon_attr = + container_of(attr, struct efx_mcdi_mon_attribute, dev_attr); + return sprintf(buf, "%s\n", + efx_mcdi_sensor_type[mon_attr->type].label); +} + +static void +efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, + ssize_t (*reader)(struct device *, + struct device_attribute *, char *), + unsigned int index, unsigned int type, + unsigned int limit_value) +{ + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs]; + + strlcpy(attr->name, name, sizeof(attr->name)); + attr->index = index; + attr->type = type; + if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) + attr->hwmon_type = efx_mcdi_sensor_type[type].hwmon_type; + else + attr->hwmon_type = EFX_HWMON_UNKNOWN; + attr->limit_value = limit_value; + sysfs_attr_init(&attr->dev_attr.attr); + attr->dev_attr.attr.name = attr->name; + attr->dev_attr.attr.mode = S_IRUGO; + attr->dev_attr.show = reader; + hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr; +} + +int efx_mcdi_mon_probe(struct efx_nic *efx) +{ + unsigned int n_temp = 0, n_cool = 0, n_in = 0, n_curr = 0, n_power = 0; + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX); + unsigned int n_pages, n_sensors, n_attrs, page; + size_t outlen; + char name[12]; + u32 mask; + int rc, i, j, type; + + /* Find out how many sensors are present */ + n_sensors = 0; + page = 0; + do { + MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page); + + rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) + return -EIO; + + mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK); + n_sensors += hweight32(mask & ~(1 << MC_CMD_SENSOR_PAGE0_NEXT)); + ++page; + } while (mask & (1 << MC_CMD_SENSOR_PAGE0_NEXT)); + n_pages = page; + + /* Don't create a device if there are none */ + if (n_sensors == 0) + return 0; + + rc = efx_nic_alloc_buffer( + efx, &hwmon->dma_buf, + n_sensors * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN, + GFP_KERNEL); + if (rc) + return rc; + + mutex_init(&hwmon->update_lock); + efx_mcdi_mon_update(efx); + + /* Allocate space for the maximum possible number of + * attributes for this set of sensors: + * value, min, max, crit, alarm and label for each sensor. + */ + n_attrs = 6 * n_sensors; + hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL); + if (!hwmon->attrs) { + rc = -ENOMEM; + goto fail; + } + hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *), + GFP_KERNEL); + if (!hwmon->group.attrs) { + rc = -ENOMEM; + goto fail; + } + + for (i = 0, j = -1, type = -1; ; i++) { + enum efx_hwmon_type hwmon_type; + const char *hwmon_prefix; + unsigned hwmon_index; + u16 min1, max1, min2, max2; + + /* Find next sensor type or exit if there is none */ + do { + type++; + + if ((type % 32) == 0) { + page = type / 32; + j = -1; + if (page == n_pages) + goto hwmon_register; + + MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, + page); + rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, + inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), + &outlen); + if (rc) + goto fail; + if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) { + rc = -EIO; + goto fail; + } + + mask = (MCDI_DWORD(outbuf, + SENSOR_INFO_OUT_MASK) & + ~(1 << MC_CMD_SENSOR_PAGE0_NEXT)); + + /* Check again for short response */ + if (outlen < + MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) { + rc = -EIO; + goto fail; + } + } + } while (!(mask & (1 << type % 32))); + j++; + + if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) { + hwmon_type = efx_mcdi_sensor_type[type].hwmon_type; + + /* Skip sensors specific to a different port */ + if (hwmon_type != EFX_HWMON_UNKNOWN && + efx_mcdi_sensor_type[type].port >= 0 && + efx_mcdi_sensor_type[type].port != + efx_port_num(efx)) + continue; + } else { + hwmon_type = EFX_HWMON_UNKNOWN; + } + + switch (hwmon_type) { + case EFX_HWMON_TEMP: + hwmon_prefix = "temp"; + hwmon_index = ++n_temp; /* 1-based */ + break; + case EFX_HWMON_COOL: + /* This is likely to be a heatsink, but there + * is no convention for representing cooling + * devices other than fans. + */ + hwmon_prefix = "fan"; + hwmon_index = ++n_cool; /* 1-based */ + break; + default: + hwmon_prefix = "in"; + hwmon_index = n_in++; /* 0-based */ + break; + case EFX_HWMON_CURR: + hwmon_prefix = "curr"; + hwmon_index = ++n_curr; /* 1-based */ + break; + case EFX_HWMON_POWER: + hwmon_prefix = "power"; + hwmon_index = ++n_power; /* 1-based */ + break; + } + + min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, + SENSOR_INFO_ENTRY, j, MIN1); + max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, + SENSOR_INFO_ENTRY, j, MAX1); + min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, + SENSOR_INFO_ENTRY, j, MIN2); + max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY, + SENSOR_INFO_ENTRY, j, MAX2); + + if (min1 != max1) { + snprintf(name, sizeof(name), "%s%u_input", + hwmon_prefix, hwmon_index); + efx_mcdi_mon_add_attr( + efx, name, efx_mcdi_mon_show_value, i, type, 0); + + if (hwmon_type != EFX_HWMON_POWER) { + snprintf(name, sizeof(name), "%s%u_min", + hwmon_prefix, hwmon_index); + efx_mcdi_mon_add_attr( + efx, name, efx_mcdi_mon_show_limit, + i, type, min1); + } + + snprintf(name, sizeof(name), "%s%u_max", + hwmon_prefix, hwmon_index); + efx_mcdi_mon_add_attr( + efx, name, efx_mcdi_mon_show_limit, + i, type, max1); + + if (min2 != max2) { + /* Assume max2 is critical value. + * But we have no good way to expose min2. + */ + snprintf(name, sizeof(name), "%s%u_crit", + hwmon_prefix, hwmon_index); + efx_mcdi_mon_add_attr( + efx, name, efx_mcdi_mon_show_limit, + i, type, max2); + } + } + + snprintf(name, sizeof(name), "%s%u_alarm", + hwmon_prefix, hwmon_index); + efx_mcdi_mon_add_attr( + efx, name, efx_mcdi_mon_show_alarm, i, type, 0); + + if (type < ARRAY_SIZE(efx_mcdi_sensor_type) && + efx_mcdi_sensor_type[type].label) { + snprintf(name, sizeof(name), "%s%u_label", + hwmon_prefix, hwmon_index); + efx_mcdi_mon_add_attr( + efx, name, efx_mcdi_mon_show_label, i, type, 0); + } + } + +hwmon_register: + hwmon->groups[0] = &hwmon->group; + hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev, + KBUILD_MODNAME, NULL, + hwmon->groups); + if (IS_ERR(hwmon->device)) { + rc = PTR_ERR(hwmon->device); + goto fail; + } + + return 0; + +fail: + efx_mcdi_mon_remove(efx); + return rc; +} + +void efx_mcdi_mon_remove(struct efx_nic *efx) +{ + struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); + + if (hwmon->device) + hwmon_device_unregister(hwmon->device); + kfree(hwmon->attrs); + kfree(hwmon->group.attrs); + efx_nic_free_buffer(efx, &hwmon->dma_buf); +} + +#endif /* CONFIG_SFC_MCDI_MON */ diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h new file mode 100644 index 000000000..e028de10e --- /dev/null +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -0,0 +1,7907 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2009-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + + +#ifndef MCDI_PCOL_H +#define MCDI_PCOL_H + +/* Values to be written into FMCR_CZ_RESET_STATE_REG to control boot. */ +/* Power-on reset state */ +#define MC_FW_STATE_POR (1) +/* If this is set in MC_RESET_STATE_REG then it should be + * possible to jump into IMEM without loading code from flash. */ +#define MC_FW_WARM_BOOT_OK (2) +/* The MC main image has started to boot. */ +#define MC_FW_STATE_BOOTING (4) +/* The Scheduler has started. */ +#define MC_FW_STATE_SCHED (8) +/* If this is set in MC_RESET_STATE_REG then it should be + * possible to jump into IMEM without loading code from flash. + * Unlike a warm boot, assume DMEM has been reloaded, so that + * the MC persistent data must be reinitialised. */ +#define MC_FW_TEPID_BOOT_OK (16) +/* BIST state has been initialized */ +#define MC_FW_BIST_INIT_OK (128) + +/* Siena MC shared memmory offsets */ +/* The 'doorbell' addresses are hard-wired to alert the MC when written */ +#define MC_SMEM_P0_DOORBELL_OFST 0x000 +#define MC_SMEM_P1_DOORBELL_OFST 0x004 +/* The rest of these are firmware-defined */ +#define MC_SMEM_P0_PDU_OFST 0x008 +#define MC_SMEM_P1_PDU_OFST 0x108 +#define MC_SMEM_PDU_LEN 0x100 +#define MC_SMEM_P0_PTP_TIME_OFST 0x7f0 +#define MC_SMEM_P0_STATUS_OFST 0x7f8 +#define MC_SMEM_P1_STATUS_OFST 0x7fc + +/* Values to be written to the per-port status dword in shared + * memory on reboot and assert */ +#define MC_STATUS_DWORD_REBOOT (0xb007b007) +#define MC_STATUS_DWORD_ASSERT (0xdeaddead) + +/* Check whether an mcfw version (in host order) belongs to a bootloader */ +#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007) + +/* The current version of the MCDI protocol. + * + * Note that the ROM burnt into the card only talks V0, so at the very + * least every driver must support version 0 and MCDI_PCOL_VERSION + */ +#define MCDI_PCOL_VERSION 2 + +/* Unused commands: 0x23, 0x27, 0x30, 0x31 */ + +/* MCDI version 1 + * + * Each MCDI request starts with an MCDI_HEADER, which is a 32bit + * structure, filled in by the client. + * + * 0 7 8 16 20 22 23 24 31 + * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS | + * | | | + * | | \--- Response + * | \------- Error + * \------------------------------ Resync (always set) + * + * The client writes it's request into MC shared memory, and rings the + * doorbell. Each request is completed by either by the MC writting + * back into shared memory, or by writting out an event. + * + * All MCDI commands support completion by shared memory response. Each + * request may also contain additional data (accounted for by HEADER.LEN), + * and some response's may also contain additional data (again, accounted + * for by HEADER.LEN). + * + * Some MCDI commands support completion by event, in which any associated + * response data is included in the event. + * + * The protocol requires one response to be delivered for every request, a + * request should not be sent unless the response for the previous request + * has been received (either by polling shared memory, or by receiving + * an event). + */ + +/** Request/Response structure */ +#define MCDI_HEADER_OFST 0 +#define MCDI_HEADER_CODE_LBN 0 +#define MCDI_HEADER_CODE_WIDTH 7 +#define MCDI_HEADER_RESYNC_LBN 7 +#define MCDI_HEADER_RESYNC_WIDTH 1 +#define MCDI_HEADER_DATALEN_LBN 8 +#define MCDI_HEADER_DATALEN_WIDTH 8 +#define MCDI_HEADER_SEQ_LBN 16 +#define MCDI_HEADER_SEQ_WIDTH 4 +#define MCDI_HEADER_RSVD_LBN 20 +#define MCDI_HEADER_RSVD_WIDTH 1 +#define MCDI_HEADER_NOT_EPOCH_LBN 21 +#define MCDI_HEADER_NOT_EPOCH_WIDTH 1 +#define MCDI_HEADER_ERROR_LBN 22 +#define MCDI_HEADER_ERROR_WIDTH 1 +#define MCDI_HEADER_RESPONSE_LBN 23 +#define MCDI_HEADER_RESPONSE_WIDTH 1 +#define MCDI_HEADER_XFLAGS_LBN 24 +#define MCDI_HEADER_XFLAGS_WIDTH 8 +/* Request response using event */ +#define MCDI_HEADER_XFLAGS_EVREQ 0x01 + +/* Maximum number of payload bytes */ +#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc +#define MCDI_CTL_SDU_LEN_MAX_V2 0x400 + +#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2 + + +/* The MC can generate events for two reasons: + * - To complete a shared memory request if XFLAGS_EVREQ was set + * - As a notification (link state, i2c event), controlled + * via MC_CMD_LOG_CTRL + * + * Both events share a common structure: + * + * 0 32 33 36 44 52 60 + * | Data | Cont | Level | Src | Code | Rsvd | + * | + * \ There is another event pending in this notification + * + * If Code==CMDDONE, then the fields are further interpreted as: + * + * - LEVEL==INFO Command succeeded + * - LEVEL==ERR Command failed + * + * 0 8 16 24 32 + * | Seq | Datalen | Errno | Rsvd | + * + * These fields are taken directly out of the standard MCDI header, i.e., + * LEVEL==ERR, Datalen == 0 => Reboot + * + * Events can be squirted out of the UART (using LOG_CTRL) without a + * MCDI header. An event can be distinguished from a MCDI response by + * examining the first byte which is 0xc0. This corresponds to the + * non-existent MCDI command MC_CMD_DEBUG_LOG. + * + * 0 7 8 + * | command | Resync | = 0xc0 + * + * Since the event is written in big-endian byte order, this works + * providing bits 56-63 of the event are 0xc0. + * + * 56 60 63 + * | Rsvd | Code | = 0xc0 + * + * Which means for convenience the event code is 0xc for all MC + * generated events. + */ +#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc + + +/* Operation not permitted. */ +#define MC_CMD_ERR_EPERM 1 +/* Non-existent command target */ +#define MC_CMD_ERR_ENOENT 2 +/* assert() has killed the MC */ +#define MC_CMD_ERR_EINTR 4 +/* I/O failure */ +#define MC_CMD_ERR_EIO 5 +/* Try again */ +#define MC_CMD_ERR_EAGAIN 11 +/* Out of memory */ +#define MC_CMD_ERR_ENOMEM 12 +/* Caller does not hold required locks */ +#define MC_CMD_ERR_EACCES 13 +/* Resource is currently unavailable (e.g. lock contention) */ +#define MC_CMD_ERR_EBUSY 16 +/* No such device */ +#define MC_CMD_ERR_ENODEV 19 +/* Invalid argument to target */ +#define MC_CMD_ERR_EINVAL 22 +/* Out of range */ +#define MC_CMD_ERR_ERANGE 34 +/* Non-recursive resource is already acquired */ +#define MC_CMD_ERR_EDEADLK 35 +/* Operation not implemented */ +#define MC_CMD_ERR_ENOSYS 38 +/* Operation timed out */ +#define MC_CMD_ERR_ETIME 62 +/* Link has been severed */ +#define MC_CMD_ERR_ENOLINK 67 +/* Protocol error */ +#define MC_CMD_ERR_EPROTO 71 +/* Operation not supported */ +#define MC_CMD_ERR_ENOTSUP 95 +/* Address not available */ +#define MC_CMD_ERR_EADDRNOTAVAIL 99 +/* Not connected */ +#define MC_CMD_ERR_ENOTCONN 107 +/* Operation already in progress */ +#define MC_CMD_ERR_EALREADY 114 + +/* Resource allocation failed. */ +#define MC_CMD_ERR_ALLOC_FAIL 0x1000 +/* V-adaptor not found. */ +#define MC_CMD_ERR_NO_VADAPTOR 0x1001 +/* EVB port not found. */ +#define MC_CMD_ERR_NO_EVB_PORT 0x1002 +/* V-switch not found. */ +#define MC_CMD_ERR_NO_VSWITCH 0x1003 +/* Too many VLAN tags. */ +#define MC_CMD_ERR_VLAN_LIMIT 0x1004 +/* Bad PCI function number. */ +#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005 +/* Invalid VLAN mode. */ +#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006 +/* Invalid v-switch type. */ +#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007 +/* Invalid v-port type. */ +#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008 +/* MAC address exists. */ +#define MC_CMD_ERR_MAC_EXIST 0x1009 +/* Slave core not present */ +#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a +/* The datapath is disabled. */ +#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b + +#define MC_CMD_ERR_CODE_OFST 0 + +/* We define 8 "escape" commands to allow + for command number space extension */ + +#define MC_CMD_CMD_SPACE_ESCAPE_0 0x78 +#define MC_CMD_CMD_SPACE_ESCAPE_1 0x79 +#define MC_CMD_CMD_SPACE_ESCAPE_2 0x7A +#define MC_CMD_CMD_SPACE_ESCAPE_3 0x7B +#define MC_CMD_CMD_SPACE_ESCAPE_4 0x7C +#define MC_CMD_CMD_SPACE_ESCAPE_5 0x7D +#define MC_CMD_CMD_SPACE_ESCAPE_6 0x7E +#define MC_CMD_CMD_SPACE_ESCAPE_7 0x7F + +/* Vectors in the boot ROM */ +/* Point to the copycode entry point. */ +#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4) +#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4) +/* Points to the recovery mode entry point. */ +#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4) +#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4) + +/* The command set exported by the boot ROM (MCDI v0) */ +#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \ + (1 << MC_CMD_READ32) | \ + (1 << MC_CMD_WRITE32) | \ + (1 << MC_CMD_COPYCODE) | \ + (1 << MC_CMD_GET_VERSION), \ + 0, 0, 0 } + +#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \ + (MC_CMD_SENSOR_ENTRY_OFST + (_x)) + +#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(n) \ + (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \ + MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST + \ + (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) + +#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(n) \ + (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \ + MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST + \ + (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) + +#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(n) \ + (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \ + MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \ + (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) + + +/* Version 2 adds an optional argument to error returns: the errno value + * may be followed by the (0-based) number of the first argument that + * could not be processed. + */ +#define MC_CMD_ERR_ARG_OFST 4 + +/* No space */ +#define MC_CMD_ERR_ENOSPC 28 + +/* MCDI_EVENT structuredef */ +#define MCDI_EVENT_LEN 8 +#define MCDI_EVENT_CONT_LBN 32 +#define MCDI_EVENT_CONT_WIDTH 1 +#define MCDI_EVENT_LEVEL_LBN 33 +#define MCDI_EVENT_LEVEL_WIDTH 3 +/* enum: Info. */ +#define MCDI_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define MCDI_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define MCDI_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define MCDI_EVENT_LEVEL_FATAL 0x3 +#define MCDI_EVENT_DATA_OFST 0 +#define MCDI_EVENT_CMDDONE_SEQ_LBN 0 +#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8 +#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8 +#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8 +#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16 +#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8 +#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0 +#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16 +#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16 +#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4 +/* enum: 100Mbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 +/* enum: 1Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 +/* enum: 10Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 +/* enum: 40Gbs */ +#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4 +#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20 +#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4 +#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24 +#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0 +#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_STATE_LBN 8 +#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16 +#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16 +#define MCDI_EVENT_FWALERT_DATA_LBN 8 +#define MCDI_EVENT_FWALERT_DATA_WIDTH 24 +#define MCDI_EVENT_FWALERT_REASON_LBN 0 +#define MCDI_EVENT_FWALERT_REASON_WIDTH 8 +/* enum: SRAM Access. */ +#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1 +#define MCDI_EVENT_FLR_VF_LBN 0 +#define MCDI_EVENT_FLR_VF_WIDTH 8 +#define MCDI_EVENT_TX_ERR_TXQ_LBN 0 +#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12 +#define MCDI_EVENT_TX_ERR_TYPE_LBN 12 +#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4 +/* enum: Descriptor loader reported failure */ +#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1 +/* enum: Descriptor ring empty and no EOP seen for packet */ +#define MCDI_EVENT_TX_ERR_NO_EOP 0x2 +/* enum: Overlength packet */ +#define MCDI_EVENT_TX_ERR_2BIG 0x3 +/* enum: Malformed option descriptor */ +#define MCDI_EVENT_TX_BAD_OPTDESC 0x5 +/* enum: Option descriptor part way through a packet */ +#define MCDI_EVENT_TX_OPT_IN_PKT 0x8 +/* enum: DMA or PIO data access error */ +#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9 +#define MCDI_EVENT_TX_ERR_INFO_LBN 16 +#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16 +#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12 +#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1 +#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0 +#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12 +#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0 +#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8 +/* enum: PLL lost lock */ +#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1 +/* enum: Filter overflow (PDMA) */ +#define MCDI_EVENT_PTP_ERR_FILTER 0x2 +/* enum: FIFO overflow (FPGA) */ +#define MCDI_EVENT_PTP_ERR_FIFO 0x3 +/* enum: Merge queue overflow */ +#define MCDI_EVENT_PTP_ERR_QUEUE 0x4 +#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0 +#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8 +/* enum: AOE failed to load - no valid image? */ +#define MCDI_EVENT_AOE_NO_LOAD 0x1 +/* enum: AOE FC reported an exception */ +#define MCDI_EVENT_AOE_FC_ASSERT 0x2 +/* enum: AOE FC watchdogged */ +#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3 +/* enum: AOE FC failed to start */ +#define MCDI_EVENT_AOE_FC_NO_START 0x4 +/* enum: Generic AOE fault - likely to have been reported via other means too + * but intended for use by aoex driver. + */ +#define MCDI_EVENT_AOE_FAULT 0x5 +/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */ +#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6 +/* enum: AOE loaded successfully */ +#define MCDI_EVENT_AOE_LOAD 0x7 +/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */ +#define MCDI_EVENT_AOE_DMA 0x8 +/* enum: AOE byteblaster connected/disconnected (Connection status in + * AOE_ERR_DATA) + */ +#define MCDI_EVENT_AOE_BYTEBLASTER 0x9 +/* enum: DDR ECC status update */ +#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa +#define MCDI_EVENT_AOE_ERR_DATA_LBN 8 +#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8 +#define MCDI_EVENT_RX_ERR_RXQ_LBN 0 +#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12 +#define MCDI_EVENT_RX_ERR_TYPE_LBN 12 +#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4 +#define MCDI_EVENT_RX_ERR_INFO_LBN 16 +#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16 +#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12 +#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1 +#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0 +#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12 +#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0 +#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16 +#define MCDI_EVENT_DATA_LBN 0 +#define MCDI_EVENT_DATA_WIDTH 32 +#define MCDI_EVENT_SRC_LBN 36 +#define MCDI_EVENT_SRC_WIDTH 8 +#define MCDI_EVENT_EV_CODE_LBN 60 +#define MCDI_EVENT_EV_CODE_WIDTH 4 +#define MCDI_EVENT_CODE_LBN 44 +#define MCDI_EVENT_CODE_WIDTH 8 +/* enum: Bad assert. */ +#define MCDI_EVENT_CODE_BADSSERT 0x1 +/* enum: PM Notice. */ +#define MCDI_EVENT_CODE_PMNOTICE 0x2 +/* enum: Command done. */ +#define MCDI_EVENT_CODE_CMDDONE 0x3 +/* enum: Link change. */ +#define MCDI_EVENT_CODE_LINKCHANGE 0x4 +/* enum: Sensor Event. */ +#define MCDI_EVENT_CODE_SENSOREVT 0x5 +/* enum: Schedule error. */ +#define MCDI_EVENT_CODE_SCHEDERR 0x6 +/* enum: Reboot. */ +#define MCDI_EVENT_CODE_REBOOT 0x7 +/* enum: Mac stats DMA. */ +#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8 +/* enum: Firmware alert. */ +#define MCDI_EVENT_CODE_FWALERT 0x9 +/* enum: Function level reset. */ +#define MCDI_EVENT_CODE_FLR 0xa +/* enum: Transmit error */ +#define MCDI_EVENT_CODE_TX_ERR 0xb +/* enum: Tx flush has completed */ +#define MCDI_EVENT_CODE_TX_FLUSH 0xc +/* enum: PTP packet received timestamp */ +#define MCDI_EVENT_CODE_PTP_RX 0xd +/* enum: PTP NIC failure */ +#define MCDI_EVENT_CODE_PTP_FAULT 0xe +/* enum: PTP PPS event */ +#define MCDI_EVENT_CODE_PTP_PPS 0xf +/* enum: Rx flush has completed */ +#define MCDI_EVENT_CODE_RX_FLUSH 0x10 +/* enum: Receive error */ +#define MCDI_EVENT_CODE_RX_ERR 0x11 +/* enum: AOE fault */ +#define MCDI_EVENT_CODE_AOE 0x12 +/* enum: Network port calibration failed (VCAL). */ +#define MCDI_EVENT_CODE_VCAL_FAIL 0x13 +/* enum: HW PPS event */ +#define MCDI_EVENT_CODE_HW_PPS 0x14 +/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and + * a different format) + */ +#define MCDI_EVENT_CODE_MC_REBOOT 0x15 +/* enum: the MC has detected a parity error */ +#define MCDI_EVENT_CODE_PAR_ERR 0x16 +/* enum: the MC has detected a correctable error */ +#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17 +/* enum: the MC has detected an uncorrectable error */ +#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18 +/* enum: The MC has entered offline BIST mode */ +#define MCDI_EVENT_CODE_MC_BIST 0x19 +/* enum: PTP tick event providing current NIC time */ +#define MCDI_EVENT_CODE_PTP_TIME 0x1a +/* enum: Artificial event generated by host and posted via MC for test + * purposes. + */ +#define MCDI_EVENT_CODE_TESTGEN 0xfa +#define MCDI_EVENT_CMDDONE_DATA_OFST 0 +#define MCDI_EVENT_CMDDONE_DATA_LBN 0 +#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32 +#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0 +#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0 +#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32 +#define MCDI_EVENT_SENSOREVT_DATA_OFST 0 +#define MCDI_EVENT_SENSOREVT_DATA_LBN 0 +#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0 +#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32 +#define MCDI_EVENT_TX_ERR_DATA_OFST 0 +#define MCDI_EVENT_TX_ERR_DATA_LBN 0 +#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of + * timestamp + */ +#define MCDI_EVENT_PTP_SECONDS_OFST 0 +#define MCDI_EVENT_PTP_SECONDS_LBN 0 +#define MCDI_EVENT_PTP_SECONDS_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of + * timestamp + */ +#define MCDI_EVENT_PTP_MAJOR_OFST 0 +#define MCDI_EVENT_PTP_MAJOR_LBN 0 +#define MCDI_EVENT_PTP_MAJOR_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field + * of timestamp + */ +#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0 +#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0 +#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32 +/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of + * timestamp + */ +#define MCDI_EVENT_PTP_MINOR_OFST 0 +#define MCDI_EVENT_PTP_MINOR_LBN 0 +#define MCDI_EVENT_PTP_MINOR_WIDTH 32 +/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet + */ +#define MCDI_EVENT_PTP_UUID_OFST 0 +#define MCDI_EVENT_PTP_UUID_LBN 0 +#define MCDI_EVENT_PTP_UUID_WIDTH 32 +#define MCDI_EVENT_RX_ERR_DATA_OFST 0 +#define MCDI_EVENT_RX_ERR_DATA_LBN 0 +#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_PAR_ERR_DATA_OFST 0 +#define MCDI_EVENT_PAR_ERR_DATA_LBN 0 +#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0 +#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0 +#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32 +/* For CODE_PTP_TIME events, the major value of the PTP clock */ +#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0 +#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0 +#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32 +/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */ +#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36 +#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8 + +/* FCDI_EVENT structuredef */ +#define FCDI_EVENT_LEN 8 +#define FCDI_EVENT_CONT_LBN 32 +#define FCDI_EVENT_CONT_WIDTH 1 +#define FCDI_EVENT_LEVEL_LBN 33 +#define FCDI_EVENT_LEVEL_WIDTH 3 +/* enum: Info. */ +#define FCDI_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define FCDI_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define FCDI_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define FCDI_EVENT_LEVEL_FATAL 0x3 +#define FCDI_EVENT_DATA_OFST 0 +#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0 +#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1 +#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */ +#define FCDI_EVENT_LINK_UP 0x1 /* enum */ +#define FCDI_EVENT_DATA_LBN 0 +#define FCDI_EVENT_DATA_WIDTH 32 +#define FCDI_EVENT_SRC_LBN 36 +#define FCDI_EVENT_SRC_WIDTH 8 +#define FCDI_EVENT_EV_CODE_LBN 60 +#define FCDI_EVENT_EV_CODE_WIDTH 4 +#define FCDI_EVENT_CODE_LBN 44 +#define FCDI_EVENT_CODE_WIDTH 8 +/* enum: The FC was rebooted. */ +#define FCDI_EVENT_CODE_REBOOT 0x1 +/* enum: Bad assert. */ +#define FCDI_EVENT_CODE_ASSERT 0x2 +/* enum: DDR3 test result. */ +#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3 +/* enum: Link status. */ +#define FCDI_EVENT_CODE_LINK_STATE 0x4 +/* enum: A timed read is ready to be serviced. */ +#define FCDI_EVENT_CODE_TIMED_READ 0x5 +/* enum: One or more PPS IN events */ +#define FCDI_EVENT_CODE_PPS_IN 0x6 +/* enum: Tick event from PTP clock */ +#define FCDI_EVENT_CODE_PTP_TICK 0x7 +/* enum: ECC error counters */ +#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0 +#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32 +#define FCDI_EVENT_ASSERT_TYPE_LBN 36 +#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8 +#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36 +#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0 +#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32 +#define FCDI_EVENT_LINK_STATE_DATA_OFST 0 +#define FCDI_EVENT_LINK_STATE_DATA_LBN 0 +#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32 +#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36 +#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0 +#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32 + +/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events + * to the MC. Note that this structure | is overlayed over a normal FCDI event + * such that bits 32-63 containing | event code, level, source etc remain the + * same. In this case the data | field of the header is defined to be the + * number of timestamps + */ +#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16 +#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248 +#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num)) +/* Number of timestamps following */ +#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0 +#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0 +#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32 +/* Seconds field of a timestamp record */ +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64 +#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32 +/* Nanoseconds field of a timestamp record */ +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12 +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96 +#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32 +/* Timestamp records comprising the event */ +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64 + + +/***********************************/ +/* MC_CMD_READ32 + * Read multiple 32byte words from MC memory. + */ +#define MC_CMD_READ32 0x1 + +/* MC_CMD_READ32_IN msgrequest */ +#define MC_CMD_READ32_IN_LEN 8 +#define MC_CMD_READ32_IN_ADDR_OFST 0 +#define MC_CMD_READ32_IN_NUMWORDS_OFST 4 + +/* MC_CMD_READ32_OUT msgresponse */ +#define MC_CMD_READ32_OUT_LENMIN 4 +#define MC_CMD_READ32_OUT_LENMAX 252 +#define MC_CMD_READ32_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_READ32_OUT_BUFFER_OFST 0 +#define MC_CMD_READ32_OUT_BUFFER_LEN 4 +#define MC_CMD_READ32_OUT_BUFFER_MINNUM 1 +#define MC_CMD_READ32_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_WRITE32 + * Write multiple 32byte words to MC memory. + */ +#define MC_CMD_WRITE32 0x2 + +/* MC_CMD_WRITE32_IN msgrequest */ +#define MC_CMD_WRITE32_IN_LENMIN 8 +#define MC_CMD_WRITE32_IN_LENMAX 252 +#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num)) +#define MC_CMD_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_WRITE32_IN_BUFFER_OFST 4 +#define MC_CMD_WRITE32_IN_BUFFER_LEN 4 +#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1 +#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM 62 + +/* MC_CMD_WRITE32_OUT msgresponse */ +#define MC_CMD_WRITE32_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_COPYCODE + * Copy MC code between two locations and jump. + */ +#define MC_CMD_COPYCODE 0x3 + +/* MC_CMD_COPYCODE_IN msgrequest */ +#define MC_CMD_COPYCODE_IN_LEN 16 +/* Source address */ +#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 +/* enum: The main image should be entered via a copy of a single word from and + * to this address when none of the other magic behaviours are required. + */ +#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000 +/* enum: Entering the main image via a copy of a single word from and to this + * address indicates that it should not attempt to start the datapath CPUs. + * This is useful for certain soft rebooting scenarios. (Huntington only) + */ +#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0 +/* enum: Entering the main image via a copy of a single word from and to this + * address indicates that it should not attempt to parse any configuration from + * flash. (In addition, the datapath CPUs will not be started, as for + * MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR above.) This is useful for + * certain soft rebooting scenarios. (Huntington only) + */ +#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc +/* Destination address */ +#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4 +#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8 +/* Address of where to jump after copy. */ +#define MC_CMD_COPYCODE_IN_JUMP_OFST 12 +/* enum: Control should return to the caller rather than jumping */ +#define MC_CMD_COPYCODE_JUMP_NONE 0x1 + +/* MC_CMD_COPYCODE_OUT msgresponse */ +#define MC_CMD_COPYCODE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_FUNC + * Select function for function-specific commands. + */ +#define MC_CMD_SET_FUNC 0x4 + +/* MC_CMD_SET_FUNC_IN msgrequest */ +#define MC_CMD_SET_FUNC_IN_LEN 4 +/* Set function */ +#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0 + +/* MC_CMD_SET_FUNC_OUT msgresponse */ +#define MC_CMD_SET_FUNC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_BOOT_STATUS + * Get the instruction address from which the MC booted. + */ +#define MC_CMD_GET_BOOT_STATUS 0x5 + +/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */ +#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0 + +/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */ +#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8 +/* ?? */ +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0 +/* enum: indicates that the MC wasn't flash booted */ +#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_WIDTH 1 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_LBN 2 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_WIDTH 1 + + +/***********************************/ +/* MC_CMD_GET_ASSERTS + * Get (and optionally clear) the current assertion status. Only + * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other + * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS + */ +#define MC_CMD_GET_ASSERTS 0x6 + +/* MC_CMD_GET_ASSERTS_IN msgrequest */ +#define MC_CMD_GET_ASSERTS_IN_LEN 4 +/* Set to clear assertion */ +#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0 + +/* MC_CMD_GET_ASSERTS_OUT msgresponse */ +#define MC_CMD_GET_ASSERTS_OUT_LEN 140 +/* Assertion status flag. */ +#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0 +/* enum: No assertions have failed. */ +#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 +/* enum: A system-level assertion has failed. */ +#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 +/* enum: A thread-level assertion has failed. */ +#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 +/* enum: The system was reset by the watchdog. */ +#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 +/* enum: An illegal address trap stopped the system (huntington and later) */ +#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 +/* Failing PC value */ +#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4 +/* Saved GP regs */ +#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8 +#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31 +/* Failing thread address */ +#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132 +#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136 + + +/***********************************/ +/* MC_CMD_LOG_CTRL + * Configure the output stream for various events and messages. + */ +#define MC_CMD_LOG_CTRL 0x7 + +/* MC_CMD_LOG_CTRL_IN msgrequest */ +#define MC_CMD_LOG_CTRL_IN_LEN 8 +/* Log destination */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0 +/* enum: UART. */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 +/* enum: Event queue. */ +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 +#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4 + +/* MC_CMD_LOG_CTRL_OUT msgresponse */ +#define MC_CMD_LOG_CTRL_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_VERSION + * Get version information about the MC firmware. + */ +#define MC_CMD_GET_VERSION 0x8 + +/* MC_CMD_GET_VERSION_IN msgrequest */ +#define MC_CMD_GET_VERSION_IN_LEN 0 + +/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */ +#define MC_CMD_GET_VERSION_EXT_IN_LEN 4 +/* placeholder, set to 0 */ +#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0 + +/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */ +#define MC_CMD_GET_VERSION_V0_OUT_LEN 4 +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 +/* enum: Reserved version number to indicate "any" version. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff +/* enum: Bootrom version value for Siena. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000 +/* enum: Bootrom version value for Huntington. */ +#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001 + +/* MC_CMD_GET_VERSION_OUT msgresponse */ +#define MC_CMD_GET_VERSION_OUT_LEN 32 +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* Enum values, see field(s): */ +/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ +#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4 +/* 128bit mask of functions supported by the current firmware */ +#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8 +#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16 +#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24 +#define MC_CMD_GET_VERSION_OUT_VERSION_LEN 8 +#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24 +#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28 + +/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */ +#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48 +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* Enum values, see field(s): */ +/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ +#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4 +/* 128bit mask of functions supported by the current firmware */ +#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8 +#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24 +#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28 +/* extra info */ +#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32 +#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16 + + +/***********************************/ +/* MC_CMD_PTP + * Perform PTP operation + */ +#define MC_CMD_PTP 0xb + +/* MC_CMD_PTP_IN msgrequest */ +#define MC_CMD_PTP_IN_LEN 1 +/* PTP operation code */ +#define MC_CMD_PTP_IN_OP_OFST 0 +#define MC_CMD_PTP_IN_OP_LEN 1 +/* enum: Enable PTP packet timestamping operation. */ +#define MC_CMD_PTP_OP_ENABLE 0x1 +/* enum: Disable PTP packet timestamping operation. */ +#define MC_CMD_PTP_OP_DISABLE 0x2 +/* enum: Send a PTP packet. */ +#define MC_CMD_PTP_OP_TRANSMIT 0x3 +/* enum: Read the current NIC time. */ +#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4 +/* enum: Get the current PTP status. */ +#define MC_CMD_PTP_OP_STATUS 0x5 +/* enum: Adjust the PTP NIC's time. */ +#define MC_CMD_PTP_OP_ADJUST 0x6 +/* enum: Synchronize host and NIC time. */ +#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7 +/* enum: Basic manufacturing tests. */ +#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8 +/* enum: Packet based manufacturing tests. */ +#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9 +/* enum: Reset some of the PTP related statistics */ +#define MC_CMD_PTP_OP_RESET_STATS 0xa +/* enum: Debug operations to MC. */ +#define MC_CMD_PTP_OP_DEBUG 0xb +/* enum: Read an FPGA register */ +#define MC_CMD_PTP_OP_FPGAREAD 0xc +/* enum: Write an FPGA register */ +#define MC_CMD_PTP_OP_FPGAWRITE 0xd +/* enum: Apply an offset to the NIC clock */ +#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe +/* enum: Change Apply an offset to the NIC clock */ +#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf +/* enum: Set the MC packet filter VLAN tags for received PTP packets */ +#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10 +/* enum: Set the MC packet filter UUID for received PTP packets */ +#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11 +/* enum: Set the MC packet filter Domain for received PTP packets */ +#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12 +/* enum: Set the clock source */ +#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13 +/* enum: Reset value of Timer Reg. */ +#define MC_CMD_PTP_OP_RST_CLK 0x14 +/* enum: Enable the forwarding of PPS events to the host */ +#define MC_CMD_PTP_OP_PPS_ENABLE 0x15 +/* enum: Get the time format used by this NIC for PTP operations */ +#define MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16 +/* enum: Get the clock attributes. NOTE- extended version of + * MC_CMD_PTP_OP_GET_TIME_FORMAT + */ +#define MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16 +/* enum: Get corrections that should be applied to the various different + * timestamps + */ +#define MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17 +/* enum: Subscribe to receive periodic time events indicating the current NIC + * time + */ +#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18 +/* enum: Unsubscribe to stop receiving time events */ +#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19 +/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS + * input on the same NIC. + */ +#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a +/* enum: Above this for future use. */ +#define MC_CMD_PTP_OP_MAX 0x1b + +/* MC_CMD_PTP_IN_ENABLE msgrequest */ +#define MC_CMD_PTP_IN_ENABLE_LEN 16 +#define MC_CMD_PTP_IN_CMD_OFST 0 +#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4 +/* Event queue for PTP events */ +#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8 +/* PTP timestamping mode */ +#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12 +/* enum: PTP, version 1 */ +#define MC_CMD_PTP_MODE_V1 0x0 +/* enum: PTP, version 1, with VLAN headers - deprecated */ +#define MC_CMD_PTP_MODE_V1_VLAN 0x1 +/* enum: PTP, version 2 */ +#define MC_CMD_PTP_MODE_V2 0x2 +/* enum: PTP, version 2, with VLAN headers - deprecated */ +#define MC_CMD_PTP_MODE_V2_VLAN 0x3 +/* enum: PTP, version 2, with improved UUID filtering */ +#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 +/* enum: FCoE (seconds and microseconds) */ +#define MC_CMD_PTP_MODE_FCOE 0x5 + +/* MC_CMD_PTP_IN_DISABLE msgrequest */ +#define MC_CMD_PTP_IN_DISABLE_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ + +/* MC_CMD_PTP_IN_TRANSMIT msgrequest */ +#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13 +#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252 +#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num)) +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Transmit packet length */ +#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8 +/* Transmit packet data */ +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240 + +/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */ +#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ + +/* MC_CMD_PTP_IN_STATUS msgrequest */ +#define MC_CMD_PTP_IN_STATUS_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ + +/* MC_CMD_PTP_IN_ADJUST msgrequest */ +#define MC_CMD_PTP_IN_ADJUST_LEN 24 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Frequency adjustment 40 bit fixed point ns */ +#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8 +#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8 +#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12 +/* enum: Number of fractional bits in frequency adjustment */ +#define MC_CMD_PTP_IN_ADJUST_BITS 0x28 +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20 + +/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */ +#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Number of time readings to capture */ +#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8 +/* Host address in which to write "synchronization started" indication (64 + * bits) + */ +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12 +#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_OFST 16 + +/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */ +#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ + +/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */ +#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Enable or disable packet testing */ +#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8 + +/* MC_CMD_PTP_IN_RESET_STATS msgrequest */ +#define MC_CMD_PTP_IN_RESET_STATS_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* Reset PTP statistics */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ + +/* MC_CMD_PTP_IN_DEBUG msgrequest */ +#define MC_CMD_PTP_IN_DEBUG_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Debug operations */ +#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8 + +/* MC_CMD_PTP_IN_FPGAREAD msgrequest */ +#define MC_CMD_PTP_IN_FPGAREAD_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8 +#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12 + +/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */ +#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13 +#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252 +#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num)) +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240 + +/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Time adjustment in seconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8 +/* Time adjustment major value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8 +/* Time adjustment in nanoseconds */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12 +/* Time adjustment minor value */ +#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12 + +/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */ +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Frequency adjustment 40 bit fixed point ns */ +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8 +#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12 +/* enum: Number of fractional bits in frequency adjustment */ +/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */ + +/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */ +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Number of VLAN tags, 0 if not VLAN */ +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8 +/* Set of VLAN tags to filter against */ +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4 +#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3 + +/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */ +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* 1 to enable UUID filtering, 0 to disable */ +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8 +/* UUID to filter against */ +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12 +#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16 + +/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */ +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* 1 to enable Domain filtering, 0 to disable */ +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8 +/* Domain number to filter against */ +#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12 + +/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */ +#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Set the clock source. */ +#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8 +/* enum: Internal. */ +#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0 +/* enum: External. */ +#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1 + +/* MC_CMD_PTP_IN_RST_CLK msgrequest */ +#define MC_CMD_PTP_IN_RST_CLK_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* Reset value of Timer Reg. */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ + +/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */ +#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* Enable or disable */ +#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4 +/* enum: Enable */ +#define MC_CMD_PTP_ENABLE_PPS 0x0 +/* enum: Disable */ +#define MC_CMD_PTP_DISABLE_PPS 0x1 +/* Queue id to send events back */ +#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8 + +/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */ +#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ + +/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */ +#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ + +/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */ +#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ + +/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */ +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Event queue to send PTP time events to */ +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8 + +/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* Unsubscribe options */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8 +/* enum: Unsubscribe a single queue */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0 +/* enum: Unsubscribe all queues */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1 +/* Event queue ID */ +#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12 + +/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */ +#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* 1 to enable PPS test mode, 0 to disable and return result. */ +#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8 + +/* MC_CMD_PTP_OUT msgresponse */ +#define MC_CMD_PTP_OUT_LEN 0 + +/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */ +#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4 + +/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */ +#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0 + +/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */ +#define MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0 + +/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4 + +/* MC_CMD_PTP_OUT_STATUS msgresponse */ +#define MC_CMD_PTP_OUT_STATUS_LEN 64 +/* Frequency of NIC's hardware clock */ +#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0 +/* Number of packets transmitted and timestamped */ +#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4 +/* Number of packets received and timestamped */ +#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8 +/* Number of packets timestamped by the FPGA */ +#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12 +/* Number of packets filter matched */ +#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16 +/* Number of packets not filter matched */ +#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20 +/* Number of PPS overflows (noise on input?) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24 +/* Number of PPS bad periods */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28 +/* Minimum period of PPS pulse in nanoseconds */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32 +/* Maximum period of PPS pulse in nanoseconds */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36 +/* Last period of PPS pulse in nanoseconds */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40 +/* Mean period of PPS pulse in nanoseconds */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44 +/* Minimum offset of PPS pulse in nanoseconds (signed) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48 +/* Maximum offset of PPS pulse in nanoseconds (signed) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52 +/* Last offset of PPS pulse in nanoseconds (signed) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56 +/* Mean offset of PPS pulse in nanoseconds (signed) */ +#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60 + +/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num)) +/* A set of host and NIC times */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12 +/* Host time immediately before NIC's hardware clock read */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0 +/* Value of seconds timestamp */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4 +/* Timestamp major value */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4 +/* Value of nanoseconds timestamp */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8 +/* Timestamp minor value */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8 +/* Host time immediately after NIC's hardware clock read */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12 +/* Number of nanoseconds waited after reading NIC's hardware clock */ +#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16 + +/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */ +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8 +/* Results of testing */ +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0 +/* enum: Successful test */ +#define MC_CMD_PTP_MANF_SUCCESS 0x0 +/* enum: FPGA load failed */ +#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1 +/* enum: FPGA version invalid */ +#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2 +/* enum: FPGA registers incorrect */ +#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3 +/* enum: Oscillator possibly not working? */ +#define MC_CMD_PTP_MANF_OSCILLATOR 0x4 +/* enum: Timestamps not increasing */ +#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5 +/* enum: Mismatched packet count */ +#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6 +/* enum: Mismatched packet count (Siena filter and FPGA) */ +#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7 +/* enum: Not enough packets to perform timestamp check */ +#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8 +/* enum: Timestamp trigger GPIO not working */ +#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9 +/* enum: Insufficient PPS events to perform checks */ +#define MC_CMD_PTP_MANF_PPS_ENOUGH 0xa +/* enum: PPS time event period not sufficiently close to 1s. */ +#define MC_CMD_PTP_MANF_PPS_PERIOD 0xb +/* enum: PPS time event nS reading not sufficiently close to zero. */ +#define MC_CMD_PTP_MANF_PPS_NS 0xc +/* enum: PTP peripheral registers incorrect */ +#define MC_CMD_PTP_MANF_REGISTERS 0xd +/* enum: Failed to read time from PTP peripheral */ +#define MC_CMD_PTP_MANF_CLOCK_READ 0xe +/* Presence of external oscillator */ +#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4 + +/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */ +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12 +/* Results of testing */ +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0 +/* Number of packets received by FPGA */ +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4 +/* Number of packets received by Siena filters */ +#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8 + +/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */ +#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1 +#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252 +#define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num)) +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252 + +/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4 +/* Time format required/used by for this NIC. Applies to all PTP MCDI + * operations that pass times between the host and firmware. If this operation + * is not supported (older firmware) a format of seconds and nanoseconds should + * be assumed. + */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0 +/* enum: Times are in seconds and nanoseconds */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0 +/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1 +/* enum: Major register has units of seconds, minor 2^-27s per tick */ +#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2 + +/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 8 +/* Time format required/used by for this NIC. Applies to all PTP MCDI + * operations that pass times between the host and firmware. If this operation + * is not supported (older firmware) a format of seconds and nanoseconds should + * be assumed. + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0 +/* enum: Times are in seconds and nanoseconds */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0 +/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1 +/* enum: Major register has units of seconds, minor 2^-27s per tick */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2 +/* Minimum acceptable value for a corrected synchronization timeset. When + * comparing host and NIC clock times, the MC returns a set of samples that + * contain the host start and end time, the MC time when the host start was + * detected and the time the MC waited between reading the time and detecting + * the host end. The corrected sync window is the difference between the host + * end and start times minus the time that the MC waited for host end. + */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4 + +/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16 +/* Uncorrected error on transmit timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0 +/* Uncorrected error on receive timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4 +/* Uncorrected error on PPS output in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8 +/* Uncorrected error on PPS input in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12 + +/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */ +#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4 +/* Results of testing */ +#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */ + + +/***********************************/ +/* MC_CMD_CSR_READ32 + * Read 32bit words from the indirect memory map. + */ +#define MC_CMD_CSR_READ32 0xc + +/* MC_CMD_CSR_READ32_IN msgrequest */ +#define MC_CMD_CSR_READ32_IN_LEN 12 +/* Address */ +#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0 +#define MC_CMD_CSR_READ32_IN_STEP_OFST 4 +#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8 + +/* MC_CMD_CSR_READ32_OUT msgresponse */ +#define MC_CMD_CSR_READ32_OUT_LENMIN 4 +#define MC_CMD_CSR_READ32_OUT_LENMAX 252 +#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num)) +/* The last dword is the status, not a value read */ +#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0 +#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4 +#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1 +#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_CSR_WRITE32 + * Write 32bit dwords to the indirect memory map. + */ +#define MC_CMD_CSR_WRITE32 0xd + +/* MC_CMD_CSR_WRITE32_IN msgrequest */ +#define MC_CMD_CSR_WRITE32_IN_LENMIN 12 +#define MC_CMD_CSR_WRITE32_IN_LENMAX 252 +#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num)) +/* Address */ +#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0 +#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61 + +/* MC_CMD_CSR_WRITE32_OUT msgresponse */ +#define MC_CMD_CSR_WRITE32_OUT_LEN 4 +#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0 + + +/***********************************/ +/* MC_CMD_HP + * These commands are used for HP related features. They are grouped under one + * MCDI command to avoid creating too many MCDI commands. + */ +#define MC_CMD_HP 0x54 + +/* MC_CMD_HP_IN msgrequest */ +#define MC_CMD_HP_IN_LEN 16 +/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at + * the specified address with the specified interval.When address is NULL, + * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current + * state / 2: (debug) Show temperature reported by one of the supported + * sensors. + */ +#define MC_CMD_HP_IN_SUBCMD_OFST 0 +/* enum: OCSD (Option Card Sensor Data) sub-command. */ +#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0 +/* enum: Last known valid HP sub-command. */ +#define MC_CMD_HP_IN_LAST_SUBCMD 0x0 +/* The address to the array of sensor fields. (Or NULL to use a sub-command.) + */ +#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4 +#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8 +#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4 +#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8 +/* The requested update interval, in seconds. (Or the sub-command if ADDR is + * NULL.) + */ +#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12 + +/* MC_CMD_HP_OUT msgresponse */ +#define MC_CMD_HP_OUT_LEN 4 +#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0 +/* enum: OCSD stopped for this card. */ +#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1 +/* enum: OCSD was successfully started with the address provided. */ +#define MC_CMD_HP_OUT_OCSD_STARTED 0x2 +/* enum: OCSD was already started for this card. */ +#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3 + + +/***********************************/ +/* MC_CMD_STACKINFO + * Get stack information. + */ +#define MC_CMD_STACKINFO 0xf + +/* MC_CMD_STACKINFO_IN msgrequest */ +#define MC_CMD_STACKINFO_IN_LEN 0 + +/* MC_CMD_STACKINFO_OUT msgresponse */ +#define MC_CMD_STACKINFO_OUT_LENMIN 12 +#define MC_CMD_STACKINFO_OUT_LENMAX 252 +#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num)) +/* (thread ptr, stack size, free space) for each thread in system */ +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0 +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12 +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1 +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21 + + +/***********************************/ +/* MC_CMD_MDIO_READ + * MDIO register read. + */ +#define MC_CMD_MDIO_READ 0x10 + +/* MC_CMD_MDIO_READ_IN msgrequest */ +#define MC_CMD_MDIO_READ_IN_LEN 16 +/* Bus number; there are two MDIO buses: one for the internal PHY, and one for + * external devices. + */ +#define MC_CMD_MDIO_READ_IN_BUS_OFST 0 +/* enum: Internal. */ +#define MC_CMD_MDIO_BUS_INTERNAL 0x0 +/* enum: External. */ +#define MC_CMD_MDIO_BUS_EXTERNAL 0x1 +/* Port address */ +#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4 +/* Device Address or clause 22. */ +#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8 +/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you + * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. + */ +#define MC_CMD_MDIO_CLAUSE22 0x20 +/* Address */ +#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12 + +/* MC_CMD_MDIO_READ_OUT msgresponse */ +#define MC_CMD_MDIO_READ_OUT_LEN 8 +/* Value */ +#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0 +/* Status the MDIO commands return the raw status bits from the MDIO block. A + * "good" transaction should have the DONE bit set and all other bits clear. + */ +#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4 +/* enum: Good. */ +#define MC_CMD_MDIO_STATUS_GOOD 0x8 + + +/***********************************/ +/* MC_CMD_MDIO_WRITE + * MDIO register write. + */ +#define MC_CMD_MDIO_WRITE 0x11 + +/* MC_CMD_MDIO_WRITE_IN msgrequest */ +#define MC_CMD_MDIO_WRITE_IN_LEN 20 +/* Bus number; there are two MDIO buses: one for the internal PHY, and one for + * external devices. + */ +#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0 +/* enum: Internal. */ +/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */ +/* enum: External. */ +/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */ +/* Port address */ +#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4 +/* Device Address or clause 22. */ +#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8 +/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you + * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. + */ +/* MC_CMD_MDIO_CLAUSE22 0x20 */ +/* Address */ +#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12 +/* Value */ +#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16 + +/* MC_CMD_MDIO_WRITE_OUT msgresponse */ +#define MC_CMD_MDIO_WRITE_OUT_LEN 4 +/* Status; the MDIO commands return the raw status bits from the MDIO block. A + * "good" transaction should have the DONE bit set and all other bits clear. + */ +#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0 +/* enum: Good. */ +/* MC_CMD_MDIO_STATUS_GOOD 0x8 */ + + +/***********************************/ +/* MC_CMD_DBI_WRITE + * Write DBI register(s). + */ +#define MC_CMD_DBI_WRITE 0x12 + +/* MC_CMD_DBI_WRITE_IN msgrequest */ +#define MC_CMD_DBI_WRITE_IN_LENMIN 12 +#define MC_CMD_DBI_WRITE_IN_LENMAX 252 +#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num)) +/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset + * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF. + */ +#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0 +#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12 +#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1 +#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21 + +/* MC_CMD_DBI_WRITE_OUT msgresponse */ +#define MC_CMD_DBI_WRITE_OUT_LEN 0 + +/* MC_CMD_DBIWROP_TYPEDEF structuredef */ +#define MC_CMD_DBIWROP_TYPEDEF_LEN 12 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0 +#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4 +#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16 +#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16 +#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15 +#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1 +#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14 +#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32 +#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64 +#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32 + + +/***********************************/ +/* MC_CMD_PORT_READ32 + * Read a 32-bit register from the indirect port register map. The port to + * access is implied by the Shared memory channel used. + */ +#define MC_CMD_PORT_READ32 0x14 + +/* MC_CMD_PORT_READ32_IN msgrequest */ +#define MC_CMD_PORT_READ32_IN_LEN 4 +/* Address */ +#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0 + +/* MC_CMD_PORT_READ32_OUT msgresponse */ +#define MC_CMD_PORT_READ32_OUT_LEN 8 +/* Value */ +#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0 +/* Status */ +#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4 + + +/***********************************/ +/* MC_CMD_PORT_WRITE32 + * Write a 32-bit register to the indirect port register map. The port to + * access is implied by the Shared memory channel used. + */ +#define MC_CMD_PORT_WRITE32 0x15 + +/* MC_CMD_PORT_WRITE32_IN msgrequest */ +#define MC_CMD_PORT_WRITE32_IN_LEN 8 +/* Address */ +#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0 +/* Value */ +#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4 + +/* MC_CMD_PORT_WRITE32_OUT msgresponse */ +#define MC_CMD_PORT_WRITE32_OUT_LEN 4 +/* Status */ +#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0 + + +/***********************************/ +/* MC_CMD_PORT_READ128 + * Read a 128-bit register from the indirect port register map. The port to + * access is implied by the Shared memory channel used. + */ +#define MC_CMD_PORT_READ128 0x16 + +/* MC_CMD_PORT_READ128_IN msgrequest */ +#define MC_CMD_PORT_READ128_IN_LEN 4 +/* Address */ +#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0 + +/* MC_CMD_PORT_READ128_OUT msgresponse */ +#define MC_CMD_PORT_READ128_OUT_LEN 20 +/* Value */ +#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0 +#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16 +/* Status */ +#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16 + + +/***********************************/ +/* MC_CMD_PORT_WRITE128 + * Write a 128-bit register to the indirect port register map. The port to + * access is implied by the Shared memory channel used. + */ +#define MC_CMD_PORT_WRITE128 0x17 + +/* MC_CMD_PORT_WRITE128_IN msgrequest */ +#define MC_CMD_PORT_WRITE128_IN_LEN 20 +/* Address */ +#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0 +/* Value */ +#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4 +#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16 + +/* MC_CMD_PORT_WRITE128_OUT msgresponse */ +#define MC_CMD_PORT_WRITE128_OUT_LEN 4 +/* Status */ +#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0 + +/* MC_CMD_CAPABILITIES structuredef */ +#define MC_CMD_CAPABILITIES_LEN 4 +/* Small buf table. */ +#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0 +#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1 +/* Turbo mode (for Maranello). */ +#define MC_CMD_CAPABILITIES_TURBO_LBN 1 +#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1 +/* Turbo mode active (for Maranello). */ +#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2 +#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1 +/* PTP offload. */ +#define MC_CMD_CAPABILITIES_PTP_LBN 3 +#define MC_CMD_CAPABILITIES_PTP_WIDTH 1 +/* AOE mode. */ +#define MC_CMD_CAPABILITIES_AOE_LBN 4 +#define MC_CMD_CAPABILITIES_AOE_WIDTH 1 +/* AOE mode active. */ +#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5 +#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1 +/* AOE mode active. */ +#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6 +#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1 +#define MC_CMD_CAPABILITIES_RESERVED_LBN 7 +#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25 + + +/***********************************/ +/* MC_CMD_GET_BOARD_CFG + * Returns the MC firmware configuration structure. + */ +#define MC_CMD_GET_BOARD_CFG 0x18 + +/* MC_CMD_GET_BOARD_CFG_IN msgrequest */ +#define MC_CMD_GET_BOARD_CFG_IN_LEN 0 + +/* MC_CMD_GET_BOARD_CFG_OUT msgresponse */ +#define MC_CMD_GET_BOARD_CFG_OUT_LENMIN 96 +#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136 +#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num)) +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0 +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4 +#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32 +/* See MC_CMD_CAPABILITIES */ +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36 +/* See MC_CMD_CAPABILITIES */ +#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64 +#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68 +/* This field contains a 16-bit value for each of the types of NVRAM area. The + * values are defined in the firmware/mc/platform/.c file for a specific board + * type, but otherwise have no meaning to the MC; they are used by the driver + * to manage selection of appropriate firmware updates. + */ +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72 +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2 +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12 +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM 32 + + +/***********************************/ +/* MC_CMD_DBI_READX + * Read DBI register(s) -- extended functionality + */ +#define MC_CMD_DBI_READX 0x19 + +/* MC_CMD_DBI_READX_IN msgrequest */ +#define MC_CMD_DBI_READX_IN_LENMIN 8 +#define MC_CMD_DBI_READX_IN_LENMAX 248 +#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num)) +/* Each Read op consists of an address (offset 0), VF/CS2) */ +#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0 +#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8 +#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0 +#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4 +#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1 +#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31 + +/* MC_CMD_DBI_READX_OUT msgresponse */ +#define MC_CMD_DBI_READX_OUT_LENMIN 4 +#define MC_CMD_DBI_READX_OUT_LENMAX 252 +#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num)) +/* Value */ +#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0 +#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4 +#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1 +#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63 + +/* MC_CMD_DBIRDOP_TYPEDEF structuredef */ +#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0 +#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1 +#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14 +#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32 +#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32 + + +/***********************************/ +/* MC_CMD_SET_RAND_SEED + * Set the 16byte seed for the MC pseudo-random generator. + */ +#define MC_CMD_SET_RAND_SEED 0x1a + +/* MC_CMD_SET_RAND_SEED_IN msgrequest */ +#define MC_CMD_SET_RAND_SEED_IN_LEN 16 +/* Seed value. */ +#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0 +#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16 + +/* MC_CMD_SET_RAND_SEED_OUT msgresponse */ +#define MC_CMD_SET_RAND_SEED_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LTSSM_HIST + * Retrieve the history of the LTSSM, if the build supports it. + */ +#define MC_CMD_LTSSM_HIST 0x1b + +/* MC_CMD_LTSSM_HIST_IN msgrequest */ +#define MC_CMD_LTSSM_HIST_IN_LEN 0 + +/* MC_CMD_LTSSM_HIST_OUT msgresponse */ +#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0 +#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252 +#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num)) +/* variable number of LTSSM values, as bytes. The history is read-to-clear. */ +#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0 +#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4 +#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0 +#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_DRV_ATTACH + * Inform MCPU that this port is managed on the host (i.e. driver active). For + * Huntington, also request the preferred datapath firmware to use if possible + * (it may not be possible for this request to be fulfilled; the driver must + * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which + * features are actually available). The FIRMWARE_ID field is ignored by older + * platforms. + */ +#define MC_CMD_DRV_ATTACH 0x1c + +/* MC_CMD_DRV_ATTACH_IN msgrequest */ +#define MC_CMD_DRV_ATTACH_IN_LEN 12 +/* new state (0=detached, 1=attached) to set if UPDATE=1 */ +#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0 +/* 1 to set new state, or 0 to just report the existing state */ +#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4 +/* preferred datapath firmware (for Huntington; ignored for Siena) */ +#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8 +/* enum: Prefer to use full featured firmware */ +#define MC_CMD_FW_FULL_FEATURED 0x0 +/* enum: Prefer to use firmware with fewer features but lower latency */ +#define MC_CMD_FW_LOW_LATENCY 0x1 + +/* MC_CMD_DRV_ATTACH_OUT msgresponse */ +#define MC_CMD_DRV_ATTACH_OUT_LEN 4 +/* previous or existing state (0=detached, 1=attached) */ +#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0 + +/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8 +/* previous or existing state (0=detached, 1=attached) */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0 +/* Flags associated with this function */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4 +/* enum: Labels the lowest-numbered function visible to the OS */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0 +/* enum: The function can control the link state of the physical port it is + * bound to. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1 +/* enum: The function can perform privileged operations */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2 + + +/***********************************/ +/* MC_CMD_SHMUART + * Route UART output to circular buffer in shared memory instead. + */ +#define MC_CMD_SHMUART 0x1f + +/* MC_CMD_SHMUART_IN msgrequest */ +#define MC_CMD_SHMUART_IN_LEN 4 +/* ??? */ +#define MC_CMD_SHMUART_IN_FLAG_OFST 0 + +/* MC_CMD_SHMUART_OUT msgresponse */ +#define MC_CMD_SHMUART_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PORT_RESET + * Generic per-port reset. There is no equivalent for per-board reset. Locks + * required: None; Return code: 0, ETIME. NOTE: This command is deprecated - + * use MC_CMD_ENTITY_RESET instead. + */ +#define MC_CMD_PORT_RESET 0x20 + +/* MC_CMD_PORT_RESET_IN msgrequest */ +#define MC_CMD_PORT_RESET_IN_LEN 0 + +/* MC_CMD_PORT_RESET_OUT msgresponse */ +#define MC_CMD_PORT_RESET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_ENTITY_RESET + * Generic per-resource reset. There is no equivalent for per-board reset. + * Locks required: None; Return code: 0, ETIME. NOTE: This command is an + * extended version of the deprecated MC_CMD_PORT_RESET with added fields. + */ +#define MC_CMD_ENTITY_RESET 0x20 + +/* MC_CMD_ENTITY_RESET_IN msgrequest */ +#define MC_CMD_ENTITY_RESET_IN_LEN 4 +/* Optional flags field. Omitting this will perform a "legacy" reset action + * (TBD). + */ +#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0 +#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0 +#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1 + +/* MC_CMD_ENTITY_RESET_OUT msgresponse */ +#define MC_CMD_ENTITY_RESET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PCIE_CREDITS + * Read instantaneous and minimum flow control thresholds. + */ +#define MC_CMD_PCIE_CREDITS 0x21 + +/* MC_CMD_PCIE_CREDITS_IN msgrequest */ +#define MC_CMD_PCIE_CREDITS_IN_LEN 8 +/* poll period. 0 is disabled */ +#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0 +/* wipe statistics */ +#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4 + +/* MC_CMD_PCIE_CREDITS_OUT msgresponse */ +#define MC_CMD_PCIE_CREDITS_OUT_LEN 16 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6 +#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14 +#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2 + + +/***********************************/ +/* MC_CMD_RXD_MONITOR + * Get histogram of RX queue fill level. + */ +#define MC_CMD_RXD_MONITOR 0x22 + +/* MC_CMD_RXD_MONITOR_IN msgrequest */ +#define MC_CMD_RXD_MONITOR_IN_LEN 12 +#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0 +#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4 +#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8 + +/* MC_CMD_RXD_MONITOR_OUT msgresponse */ +#define MC_CMD_RXD_MONITOR_OUT_LEN 80 +#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0 +#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40 +#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44 +#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72 +#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76 + + +/***********************************/ +/* MC_CMD_PUTS + * Copy the given ASCII string out onto UART and/or out of the network port. + */ +#define MC_CMD_PUTS 0x23 + +/* MC_CMD_PUTS_IN msgrequest */ +#define MC_CMD_PUTS_IN_LENMIN 13 +#define MC_CMD_PUTS_IN_LENMAX 252 +#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num)) +#define MC_CMD_PUTS_IN_DEST_OFST 0 +#define MC_CMD_PUTS_IN_UART_LBN 0 +#define MC_CMD_PUTS_IN_UART_WIDTH 1 +#define MC_CMD_PUTS_IN_PORT_LBN 1 +#define MC_CMD_PUTS_IN_PORT_WIDTH 1 +#define MC_CMD_PUTS_IN_DHOST_OFST 4 +#define MC_CMD_PUTS_IN_DHOST_LEN 6 +#define MC_CMD_PUTS_IN_STRING_OFST 12 +#define MC_CMD_PUTS_IN_STRING_LEN 1 +#define MC_CMD_PUTS_IN_STRING_MINNUM 1 +#define MC_CMD_PUTS_IN_STRING_MAXNUM 240 + +/* MC_CMD_PUTS_OUT msgresponse */ +#define MC_CMD_PUTS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PHY_CFG + * Report PHY configuration. This guarantees to succeed even if the PHY is in a + * 'zombie' state. Locks required: None + */ +#define MC_CMD_GET_PHY_CFG 0x24 + +/* MC_CMD_GET_PHY_CFG_IN msgrequest */ +#define MC_CMD_GET_PHY_CFG_IN_LEN 0 + +/* MC_CMD_GET_PHY_CFG_OUT msgresponse */ +#define MC_CMD_GET_PHY_CFG_OUT_LEN 72 +/* flags */ +#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0 +#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3 +#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4 +#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5 +#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4 +/* Bitmask of supported capabilities */ +#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8 +#define MC_CMD_PHY_CAP_10HDX_LBN 1 +#define MC_CMD_PHY_CAP_10HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_10FDX_LBN 2 +#define MC_CMD_PHY_CAP_10FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_100HDX_LBN 3 +#define MC_CMD_PHY_CAP_100HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_100FDX_LBN 4 +#define MC_CMD_PHY_CAP_100FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_1000HDX_LBN 5 +#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_1000FDX_LBN 6 +#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_10000FDX_LBN 7 +#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_PAUSE_LBN 8 +#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1 +#define MC_CMD_PHY_CAP_ASYM_LBN 9 +#define MC_CMD_PHY_CAP_ASYM_WIDTH 1 +#define MC_CMD_PHY_CAP_AN_LBN 10 +#define MC_CMD_PHY_CAP_AN_WIDTH 1 +#define MC_CMD_PHY_CAP_40000FDX_LBN 11 +#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_DDM_LBN 12 +#define MC_CMD_PHY_CAP_DDM_WIDTH 1 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24 +#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20 +/* ?? */ +#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44 +/* enum: Xaui. */ +#define MC_CMD_MEDIA_XAUI 0x1 +/* enum: CX4. */ +#define MC_CMD_MEDIA_CX4 0x2 +/* enum: KX4. */ +#define MC_CMD_MEDIA_KX4 0x3 +/* enum: XFP Far. */ +#define MC_CMD_MEDIA_XFP 0x4 +/* enum: SFP+. */ +#define MC_CMD_MEDIA_SFP_PLUS 0x5 +/* enum: 10GBaseT. */ +#define MC_CMD_MEDIA_BASE_T 0x6 +/* enum: QSFP+. */ +#define MC_CMD_MEDIA_QSFP_PLUS 0x7 +#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48 +/* enum: Native clause 22 */ +#define MC_CMD_MMD_CLAUSE22 0x0 +#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */ +#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */ +#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */ +#define MC_CMD_MMD_CLAUSE45_PHYXS 0x4 /* enum */ +#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */ +#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */ +#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */ +/* enum: Clause22 proxied over clause45 by PHY. */ +#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d +#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */ +#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */ +#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52 +#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20 + + +/***********************************/ +/* MC_CMD_START_BIST + * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST + * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held) + */ +#define MC_CMD_START_BIST 0x25 + +/* MC_CMD_START_BIST_IN msgrequest */ +#define MC_CMD_START_BIST_IN_LEN 4 +/* Type of test. */ +#define MC_CMD_START_BIST_IN_TYPE_OFST 0 +/* enum: Run the PHY's short cable BIST. */ +#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1 +/* enum: Run the PHY's long cable BIST. */ +#define MC_CMD_PHY_BIST_CABLE_LONG 0x2 +/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */ +#define MC_CMD_BPX_SERDES_BIST 0x3 +/* enum: Run the MC loopback tests. */ +#define MC_CMD_MC_LOOPBACK_BIST 0x4 +/* enum: Run the PHY's standard BIST. */ +#define MC_CMD_PHY_BIST 0x5 +/* enum: Run MC RAM test. */ +#define MC_CMD_MC_MEM_BIST 0x6 +/* enum: Run Port RAM test. */ +#define MC_CMD_PORT_MEM_BIST 0x7 +/* enum: Run register test. */ +#define MC_CMD_REG_BIST 0x8 + +/* MC_CMD_START_BIST_OUT msgresponse */ +#define MC_CMD_START_BIST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_POLL_BIST + * Poll for BIST completion. Returns a single status code, and optionally some + * PHY specific bist output. The driver should only consume the BIST output + * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't + * successfully parse the BIST output, it should still respect the pass/Fail in + * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0, + * EACCES (if PHY_LOCK is not held). + */ +#define MC_CMD_POLL_BIST 0x26 + +/* MC_CMD_POLL_BIST_IN msgrequest */ +#define MC_CMD_POLL_BIST_IN_LEN 0 + +/* MC_CMD_POLL_BIST_OUT msgresponse */ +#define MC_CMD_POLL_BIST_OUT_LEN 8 +/* result */ +#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 +/* enum: Running. */ +#define MC_CMD_POLL_BIST_RUNNING 0x1 +/* enum: Passed. */ +#define MC_CMD_POLL_BIST_PASSED 0x2 +/* enum: Failed. */ +#define MC_CMD_POLL_BIST_FAILED 0x3 +/* enum: Timed-out. */ +#define MC_CMD_POLL_BIST_TIMEOUT 0x4 +#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4 + +/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36 +/* result */ +/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* Enum values, see field(s): */ +/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12 +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16 +/* Status of each channel A */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20 +/* enum: Ok. */ +#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1 +/* enum: Open. */ +#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2 +/* enum: Intra-pair short. */ +#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3 +/* enum: Inter-pair short. */ +#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4 +/* enum: Busy. */ +#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9 +/* Status of each channel B */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24 +/* Enum values, see field(s): */ +/* CABLE_STATUS_A */ +/* Status of each channel C */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28 +/* Enum values, see field(s): */ +/* CABLE_STATUS_A */ +/* Status of each channel D */ +#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32 +/* Enum values, see field(s): */ +/* CABLE_STATUS_A */ + +/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */ +#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8 +/* result */ +/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* Enum values, see field(s): */ +/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ +#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4 +/* enum: Complete. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0 +/* enum: Bus switch off I2C write. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1 +/* enum: Bus switch off I2C no access IO exp. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2 +/* enum: Bus switch off I2C no access module. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3 +/* enum: IO exp I2C configure. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4 +/* enum: Bus switch I2C no cross talk. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5 +/* enum: Module presence. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6 +/* enum: Module ID I2C access. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7 +/* enum: Module ID sane value. */ +#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8 + +/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */ +#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36 +/* result */ +/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */ +/* Enum values, see field(s): */ +/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */ +#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4 +/* enum: Test has completed. */ +#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0 +/* enum: RAM test - walk ones. */ +#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1 +/* enum: RAM test - walk zeros. */ +#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2 +/* enum: RAM test - walking inversions zeros/ones. */ +#define MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3 +/* enum: RAM test - walking inversions checkerboard. */ +#define MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4 +/* enum: Register test - set / clear individual bits. */ +#define MC_CMD_POLL_BIST_MEM_REG 0x5 +/* enum: ECC error detected. */ +#define MC_CMD_POLL_BIST_MEM_ECC 0x6 +/* Failure address, only valid if result is POLL_BIST_FAILED */ +#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8 +/* Bus or address space to which the failure address corresponds */ +#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12 +/* enum: MC MIPS bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0 +/* enum: CSR IREG bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1 +/* enum: RX DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2 +/* enum: TX0 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3 +/* enum: TX1 DPCPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4 +/* enum: RX DICPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5 +/* enum: TX DICPU bus. */ +#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6 +/* Pattern written to RAM / register */ +#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16 +/* Actual value read from RAM / register */ +#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20 +/* ECC error mask */ +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24 +/* ECC parity error mask */ +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28 +/* ECC fatal error mask */ +#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32 + + +/***********************************/ +/* MC_CMD_FLUSH_RX_QUEUES + * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ + * flushes should be initiated via this MCDI operation, rather than via + * directly writing FLUSH_CMD. + * + * The flush is completed (either done/fail) asynchronously (after this command + * returns). The driver must still wait for flush done/failure events as usual. + */ +#define MC_CMD_FLUSH_RX_QUEUES 0x27 + +/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */ +#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4 +#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252 +#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num)) +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0 +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4 +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1 +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63 + +/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */ +#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_LOOPBACK_MODES + * Returns a bitmask of loopback modes available at each speed. + */ +#define MC_CMD_GET_LOOPBACK_MODES 0x28 + +/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */ +#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0 + +/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40 +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4 +/* enum: None. */ +#define MC_CMD_LOOPBACK_NONE 0x0 +/* enum: Data. */ +#define MC_CMD_LOOPBACK_DATA 0x1 +/* enum: GMAC. */ +#define MC_CMD_LOOPBACK_GMAC 0x2 +/* enum: XGMII. */ +#define MC_CMD_LOOPBACK_XGMII 0x3 +/* enum: XGXS. */ +#define MC_CMD_LOOPBACK_XGXS 0x4 +/* enum: XAUI. */ +#define MC_CMD_LOOPBACK_XAUI 0x5 +/* enum: GMII. */ +#define MC_CMD_LOOPBACK_GMII 0x6 +/* enum: SGMII. */ +#define MC_CMD_LOOPBACK_SGMII 0x7 +/* enum: XGBR. */ +#define MC_CMD_LOOPBACK_XGBR 0x8 +/* enum: XFI. */ +#define MC_CMD_LOOPBACK_XFI 0x9 +/* enum: XAUI Far. */ +#define MC_CMD_LOOPBACK_XAUI_FAR 0xa +/* enum: GMII Far. */ +#define MC_CMD_LOOPBACK_GMII_FAR 0xb +/* enum: SGMII Far. */ +#define MC_CMD_LOOPBACK_SGMII_FAR 0xc +/* enum: XFI Far. */ +#define MC_CMD_LOOPBACK_XFI_FAR 0xd +/* enum: GPhy. */ +#define MC_CMD_LOOPBACK_GPHY 0xe +/* enum: PhyXS. */ +#define MC_CMD_LOOPBACK_PHYXS 0xf +/* enum: PCS. */ +#define MC_CMD_LOOPBACK_PCS 0x10 +/* enum: PMA-PMD. */ +#define MC_CMD_LOOPBACK_PMAPMD 0x11 +/* enum: Cross-Port. */ +#define MC_CMD_LOOPBACK_XPORT 0x12 +/* enum: XGMII-Wireside. */ +#define MC_CMD_LOOPBACK_XGMII_WS 0x13 +/* enum: XAUI Wireside. */ +#define MC_CMD_LOOPBACK_XAUI_WS 0x14 +/* enum: XAUI Wireside Far. */ +#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 +/* enum: XAUI Wireside near. */ +#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 +/* enum: GMII Wireside. */ +#define MC_CMD_LOOPBACK_GMII_WS 0x17 +/* enum: XFI Wireside. */ +#define MC_CMD_LOOPBACK_XFI_WS 0x18 +/* enum: XFI Wireside Far. */ +#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 +/* enum: PhyXS Wireside. */ +#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a +/* enum: PMA lanes MAC-Serdes. */ +#define MC_CMD_LOOPBACK_PMA_INT 0x1b +/* enum: KR Serdes Parallel (Encoder). */ +#define MC_CMD_LOOPBACK_SD_NEAR 0x1c +/* enum: KR Serdes Serial. */ +#define MC_CMD_LOOPBACK_SD_FAR 0x1d +/* enum: PMA lanes MAC-Serdes Wireside. */ +#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e +/* enum: KR Serdes Parallel Wireside (Full PCS). */ +#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f +/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */ +#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 +/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */ +#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21 +/* enum: KR Serdes Serial Wireside. */ +#define MC_CMD_LOOPBACK_SD_FES_WS 0x22 +/* enum: Near side of AOE Siena side port */ +#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28 +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36 +/* Enum values, see field(s): */ +/* 100M */ + + +/***********************************/ +/* MC_CMD_GET_LINK + * Read the unified MAC/PHY link state. Locks required: None Return code: 0, + * ETIME. + */ +#define MC_CMD_GET_LINK 0x29 + +/* MC_CMD_GET_LINK_IN msgrequest */ +#define MC_CMD_GET_LINK_IN_LEN 0 + +/* MC_CMD_GET_LINK_OUT msgresponse */ +#define MC_CMD_GET_LINK_OUT_LEN 28 +/* near-side advertised capabilities */ +#define MC_CMD_GET_LINK_OUT_CAP_OFST 0 +/* link-partner advertised capabilities */ +#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4 +/* Autonegotiated speed in mbit/s. The link may still be down even if this + * reads non-zero. + */ +#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8 +/* Current loopback setting. */ +#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16 +#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0 +#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1 +#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2 +#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3 +#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1 +/* This returns the negotiated flow control value. */ +#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 +/* enum: Flow control is off. */ +#define MC_CMD_FCNTL_OFF 0x0 +/* enum: Respond to flow control. */ +#define MC_CMD_FCNTL_RESPOND 0x1 +/* enum: Respond to and Issue flow control. */ +#define MC_CMD_FCNTL_BIDIR 0x2 +#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 +#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 +#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 +#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 +#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 +#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 +#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 +#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 +#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 + + +/***********************************/ +/* MC_CMD_SET_LINK + * Write the unified MAC/PHY link configuration. Locks required: None. Return + * code: 0, EINVAL, ETIME + */ +#define MC_CMD_SET_LINK 0x2a + +/* MC_CMD_SET_LINK_IN msgrequest */ +#define MC_CMD_SET_LINK_IN_LEN 16 +/* ??? */ +#define MC_CMD_SET_LINK_IN_CAP_OFST 0 +/* Flags */ +#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4 +#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0 +#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1 +#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1 +#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1 +#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2 +#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1 +/* Loopback mode. */ +#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +/* A loopback speed of "0" is supported, and means (choose any available + * speed). + */ +#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12 + +/* MC_CMD_SET_LINK_OUT msgresponse */ +#define MC_CMD_SET_LINK_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_ID_LED + * Set identification LED state. Locks required: None. Return code: 0, EINVAL + */ +#define MC_CMD_SET_ID_LED 0x2b + +/* MC_CMD_SET_ID_LED_IN msgrequest */ +#define MC_CMD_SET_ID_LED_IN_LEN 4 +/* Set LED state. */ +#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0 +#define MC_CMD_LED_OFF 0x0 /* enum */ +#define MC_CMD_LED_ON 0x1 /* enum */ +#define MC_CMD_LED_DEFAULT 0x2 /* enum */ + +/* MC_CMD_SET_ID_LED_OUT msgresponse */ +#define MC_CMD_SET_ID_LED_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_MAC + * Set MAC configuration. Locks required: None. Return code: 0, EINVAL + */ +#define MC_CMD_SET_MAC 0x2c + +/* MC_CMD_SET_MAC_IN msgrequest */ +#define MC_CMD_SET_MAC_IN_LEN 24 +/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of + * EtherII, VLAN, bug16011 padding). + */ +#define MC_CMD_SET_MAC_IN_MTU_OFST 0 +#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4 +#define MC_CMD_SET_MAC_IN_ADDR_OFST 8 +#define MC_CMD_SET_MAC_IN_ADDR_LEN 8 +#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8 +#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12 +#define MC_CMD_SET_MAC_IN_REJECT_OFST 16 +#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0 +#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1 +#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1 +#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 +#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 +/* enum: Flow control is off. */ +/* MC_CMD_FCNTL_OFF 0x0 */ +/* enum: Respond to flow control. */ +/* MC_CMD_FCNTL_RESPOND 0x1 */ +/* enum: Respond to and Issue flow control. */ +/* MC_CMD_FCNTL_BIDIR 0x2 */ +/* enum: Auto neg flow control. */ +#define MC_CMD_FCNTL_AUTO 0x3 + +/* MC_CMD_SET_MAC_OUT msgresponse */ +#define MC_CMD_SET_MAC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PHY_STATS + * Get generic PHY statistics. This call returns the statistics for a generic + * PHY in a sparse array (indexed by the enumerate). Each value is represented + * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the + * statistics may be read from the message response. If DMA_ADDR != 0, then the + * statistics are dmad to that (page-aligned location). Locks required: None. + * Returns: 0, ETIME + */ +#define MC_CMD_PHY_STATS 0x2d + +/* MC_CMD_PHY_STATS_IN msgrequest */ +#define MC_CMD_PHY_STATS_IN_LEN 8 +/* ??? */ +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4 + +/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */ +#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0 + +/* MC_CMD_PHY_STATS_OUT_NO_DMA msgresponse */ +#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (((MC_CMD_PHY_NSTATS*32))>>3) +#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4 +#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS +/* enum: OUI. */ +#define MC_CMD_OUI 0x0 +/* enum: PMA-PMD Link Up. */ +#define MC_CMD_PMA_PMD_LINK_UP 0x1 +/* enum: PMA-PMD RX Fault. */ +#define MC_CMD_PMA_PMD_RX_FAULT 0x2 +/* enum: PMA-PMD TX Fault. */ +#define MC_CMD_PMA_PMD_TX_FAULT 0x3 +/* enum: PMA-PMD Signal */ +#define MC_CMD_PMA_PMD_SIGNAL 0x4 +/* enum: PMA-PMD SNR A. */ +#define MC_CMD_PMA_PMD_SNR_A 0x5 +/* enum: PMA-PMD SNR B. */ +#define MC_CMD_PMA_PMD_SNR_B 0x6 +/* enum: PMA-PMD SNR C. */ +#define MC_CMD_PMA_PMD_SNR_C 0x7 +/* enum: PMA-PMD SNR D. */ +#define MC_CMD_PMA_PMD_SNR_D 0x8 +/* enum: PCS Link Up. */ +#define MC_CMD_PCS_LINK_UP 0x9 +/* enum: PCS RX Fault. */ +#define MC_CMD_PCS_RX_FAULT 0xa +/* enum: PCS TX Fault. */ +#define MC_CMD_PCS_TX_FAULT 0xb +/* enum: PCS BER. */ +#define MC_CMD_PCS_BER 0xc +/* enum: PCS Block Errors. */ +#define MC_CMD_PCS_BLOCK_ERRORS 0xd +/* enum: PhyXS Link Up. */ +#define MC_CMD_PHYXS_LINK_UP 0xe +/* enum: PhyXS RX Fault. */ +#define MC_CMD_PHYXS_RX_FAULT 0xf +/* enum: PhyXS TX Fault. */ +#define MC_CMD_PHYXS_TX_FAULT 0x10 +/* enum: PhyXS Align. */ +#define MC_CMD_PHYXS_ALIGN 0x11 +/* enum: PhyXS Sync. */ +#define MC_CMD_PHYXS_SYNC 0x12 +/* enum: AN link-up. */ +#define MC_CMD_AN_LINK_UP 0x13 +/* enum: AN Complete. */ +#define MC_CMD_AN_COMPLETE 0x14 +/* enum: AN 10GBaseT Status. */ +#define MC_CMD_AN_10GBT_STATUS 0x15 +/* enum: Clause 22 Link-Up. */ +#define MC_CMD_CL22_LINK_UP 0x16 +/* enum: (Last entry) */ +#define MC_CMD_PHY_NSTATS 0x17 + + +/***********************************/ +/* MC_CMD_MAC_STATS + * Get generic MAC statistics. This call returns unified statistics maintained + * by the MC as it switches between the GMAC and XMAC. The MC will write out + * all supported stats. The driver should zero initialise the buffer to + * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is + * performed, and the statistics may be read from the message response. If + * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location). + * Locks required: None. Returns: 0, ETIME + */ +#define MC_CMD_MAC_STATS 0x2e + +/* MC_CMD_MAC_STATS_IN msgrequest */ +#define MC_CMD_MAC_STATS_IN_LEN 16 +/* ??? */ +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4 +#define MC_CMD_MAC_STATS_IN_CMD_OFST 8 +#define MC_CMD_MAC_STATS_IN_DMA_LBN 0 +#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1 +#define MC_CMD_MAC_STATS_IN_CLEAR_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_LBN 2 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_LBN 3 +#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_LBN 4 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_LBN 5 +#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16 +#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16 +#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12 + +/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3) +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS +#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */ +#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */ +#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */ +#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */ +#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */ +#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */ +#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */ +#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */ +#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */ +#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */ +#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */ +#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */ +#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */ +#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */ +#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */ +#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */ +#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */ +#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */ +#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */ +#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */ +#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */ +#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */ +#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */ +#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */ +#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */ +#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */ +#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */ +#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */ +#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */ +#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */ +#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */ +#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */ +#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */ +#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */ +#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */ +#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */ +#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */ +#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */ +#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */ +#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */ +#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */ +#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */ +#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */ +#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */ +#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */ +#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */ +#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */ +#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */ +#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */ +#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */ +#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */ +#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */ +#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */ +#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */ +#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */ +#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */ +#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */ +#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */ +#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */ +#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */ +/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c +/* enum: PM discard_bb_overflow counter. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d +/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e +/* enum: PM discard_vfifo_full counter. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f +/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_TRUNC_QBB 0x40 +/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_QBB 0x41 +/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS + * capability only. + */ +#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42 +/* enum: RXDP counter: Number of packets dropped due to the queue being + * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43 +/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10 + * with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45 +/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with + * PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46 +/* enum: RXDP counter: Number of times an emergency descriptor fetch was + * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS 0x47 +/* enum: RXDP counter: Number of times the DPCPU waited for an existing + * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. + */ +#define MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS 0x48 +/* enum: Start of GMAC stats buffer space, for Siena only. */ +#define MC_CMD_GMAC_DMABUF_START 0x40 +/* enum: End of GMAC stats buffer space, for Siena only. */ +#define MC_CMD_GMAC_DMABUF_END 0x5f +#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */ +#define MC_CMD_MAC_NSTATS 0x61 /* enum */ + + +/***********************************/ +/* MC_CMD_SRIOV + * to be documented + */ +#define MC_CMD_SRIOV 0x30 + +/* MC_CMD_SRIOV_IN msgrequest */ +#define MC_CMD_SRIOV_IN_LEN 12 +#define MC_CMD_SRIOV_IN_ENABLE_OFST 0 +#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4 +#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8 + +/* MC_CMD_SRIOV_OUT msgresponse */ +#define MC_CMD_SRIOV_OUT_LEN 8 +#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0 +#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4 + +/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */ +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32 +/* this is only used for the first record */ +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */ +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224 +#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32 + + +/***********************************/ +/* MC_CMD_MEMCPY + * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data + * embedded directly in the command. + * + * A common pattern is for a client to use generation counts to signal a dma + * update of a datastructure. To facilitate this, this MCDI operation can + * contain multiple requests which are executed in strict order. Requests take + * the form of duplicating the entire MCDI request continuously (including the + * requests record, which is ignored in all but the first structure) + * + * The source data can either come from a DMA from the host, or it can be + * embedded within the request directly, thereby eliminating a DMA read. To + * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and + * ADDR_LO=offset, and inserts the data at %offset from the start of the + * payload. It's the callers responsibility to ensure that the embedded data + * doesn't overlap the records. + * + * Returns: 0, EINVAL (invalid RID) + */ +#define MC_CMD_MEMCPY 0x31 + +/* MC_CMD_MEMCPY_IN msgrequest */ +#define MC_CMD_MEMCPY_IN_LENMIN 32 +#define MC_CMD_MEMCPY_IN_LENMAX 224 +#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num)) +/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */ +#define MC_CMD_MEMCPY_IN_RECORD_OFST 0 +#define MC_CMD_MEMCPY_IN_RECORD_LEN 32 +#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1 +#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7 + +/* MC_CMD_MEMCPY_OUT msgresponse */ +#define MC_CMD_MEMCPY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_WOL_FILTER_SET + * Set a WoL filter. + */ +#define MC_CMD_WOL_FILTER_SET 0x32 + +/* MC_CMD_WOL_FILTER_SET_IN msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_LEN 192 +#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 +#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */ +#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */ +/* A type value of 1 is unused. */ +#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 +/* enum: Magic */ +#define MC_CMD_WOL_TYPE_MAGIC 0x0 +/* enum: MS Windows Magic */ +#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 +/* enum: IPv4 Syn */ +#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 +/* enum: IPv6 Syn */ +#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 +/* enum: Bitmap */ +#define MC_CMD_WOL_TYPE_BITMAP 0x5 +/* enum: Link */ +#define MC_CMD_WOL_TYPE_LINK 0x6 +/* enum: (Above this for future use) */ +#define MC_CMD_WOL_TYPE_MAX 0x7 +#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4 +#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46 + +/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12 + +/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18 +#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_LEN 2 + +/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_LEN 16 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST 40 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_LEN 2 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST 42 +#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_LEN 2 + +/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_LEN 128 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST 184 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_LEN 1 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST 185 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_LEN 1 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST 186 +#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_LEN 1 + +/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */ +#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12 +/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */ +/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */ +#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1 + +/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */ +#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4 +#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0 + + +/***********************************/ +/* MC_CMD_WOL_FILTER_REMOVE + * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS + */ +#define MC_CMD_WOL_FILTER_REMOVE 0x33 + +/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */ +#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4 +#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0 + +/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */ +#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_WOL_FILTER_RESET + * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0, + * ENOSYS + */ +#define MC_CMD_WOL_FILTER_RESET 0x34 + +/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */ +#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4 +#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0 +#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */ +#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */ + +/* MC_CMD_WOL_FILTER_RESET_OUT msgresponse */ +#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_MCAST_HASH + * Set the MCAST hash value without otherwise reconfiguring the MAC + */ +#define MC_CMD_SET_MCAST_HASH 0x35 + +/* MC_CMD_SET_MCAST_HASH_IN msgrequest */ +#define MC_CMD_SET_MCAST_HASH_IN_LEN 32 +#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0 +#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16 +#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16 +#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16 + +/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */ +#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_NVRAM_TYPES + * Return bitfield indicating available types of virtual NVRAM partitions. + * Locks required: none. Returns: 0 + */ +#define MC_CMD_NVRAM_TYPES 0x36 + +/* MC_CMD_NVRAM_TYPES_IN msgrequest */ +#define MC_CMD_NVRAM_TYPES_IN_LEN 0 + +/* MC_CMD_NVRAM_TYPES_OUT msgresponse */ +#define MC_CMD_NVRAM_TYPES_OUT_LEN 4 +/* Bit mask of supported types. */ +#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0 +/* enum: Disabled callisto. */ +#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 +/* enum: MC firmware. */ +#define MC_CMD_NVRAM_TYPE_MC_FW 0x1 +/* enum: MC backup firmware. */ +#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2 +/* enum: Static configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3 +/* enum: Static configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4 +/* enum: Dynamic configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5 +/* enum: Dynamic configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6 +/* enum: Expansion Rom. */ +#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7 +/* enum: Expansion Rom Configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8 +/* enum: Expansion Rom Configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9 +/* enum: Phy Configuration Port0. */ +#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa +/* enum: Phy Configuration Port1. */ +#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb +/* enum: Log. */ +#define MC_CMD_NVRAM_TYPE_LOG 0xc +/* enum: FPGA image. */ +#define MC_CMD_NVRAM_TYPE_FPGA 0xd +/* enum: FPGA backup image */ +#define MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe +/* enum: FC firmware. */ +#define MC_CMD_NVRAM_TYPE_FC_FW 0xf +/* enum: FC backup firmware. */ +#define MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10 +/* enum: CPLD image. */ +#define MC_CMD_NVRAM_TYPE_CPLD 0x11 +/* enum: Licensing information. */ +#define MC_CMD_NVRAM_TYPE_LICENSE 0x12 +/* enum: FC Log. */ +#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13 + + +/***********************************/ +/* MC_CMD_NVRAM_INFO + * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0, + * EINVAL (bad type). + */ +#define MC_CMD_NVRAM_INFO 0x37 + +/* MC_CMD_NVRAM_INFO_IN msgrequest */ +#define MC_CMD_NVRAM_INFO_IN_LEN 4 +#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ + +/* MC_CMD_NVRAM_INFO_OUT msgresponse */ +#define MC_CMD_NVRAM_INFO_OUT_LEN 24 +#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4 +#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8 +#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0 +#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1 +#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7 +#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 +#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20 + + +/***********************************/ +/* MC_CMD_NVRAM_UPDATE_START + * Start a group of update operations on a virtual NVRAM partition. Locks + * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if + * PHY_LOCK required and not held). + */ +#define MC_CMD_NVRAM_UPDATE_START 0x38 + +/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest */ +#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4 +#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ + +/* MC_CMD_NVRAM_UPDATE_START_OUT msgresponse */ +#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_NVRAM_READ + * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if + * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if + * PHY_LOCK required and not held) + */ +#define MC_CMD_NVRAM_READ 0x39 + +/* MC_CMD_NVRAM_READ_IN msgrequest */ +#define MC_CMD_NVRAM_READ_IN_LEN 12 +#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4 +/* amount to read in bytes */ +#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8 + +/* MC_CMD_NVRAM_READ_OUT msgresponse */ +#define MC_CMD_NVRAM_READ_OUT_LENMIN 1 +#define MC_CMD_NVRAM_READ_OUT_LENMAX 252 +#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num)) +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252 + + +/***********************************/ +/* MC_CMD_NVRAM_WRITE + * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if + * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if + * PHY_LOCK required and not held) + */ +#define MC_CMD_NVRAM_WRITE 0x3a + +/* MC_CMD_NVRAM_WRITE_IN msgrequest */ +#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13 +#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252 +#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num)) +#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240 + +/* MC_CMD_NVRAM_WRITE_OUT msgresponse */ +#define MC_CMD_NVRAM_WRITE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_NVRAM_ERASE + * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if + * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if + * PHY_LOCK required and not held) + */ +#define MC_CMD_NVRAM_ERASE 0x3b + +/* MC_CMD_NVRAM_ERASE_IN msgrequest */ +#define MC_CMD_NVRAM_ERASE_IN_LEN 12 +#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4 +#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8 + +/* MC_CMD_NVRAM_ERASE_OUT msgresponse */ +#define MC_CMD_NVRAM_ERASE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_NVRAM_UPDATE_FINISH + * Finish a group of update operations on a virtual NVRAM partition. Locks + * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad + * type/offset/length), EACCES (if PHY_LOCK required and not held) + */ +#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c + +/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest */ +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4 + +/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse */ +#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_REBOOT + * Reboot the MC. + * + * The AFTER_ASSERTION flag is intended to be used when the driver notices an + * assertion failure (at which point it is expected to perform a complete tear + * down and reinitialise), to allow both ports to reset the MC once in an + * atomic fashion. + * + * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1, + * which means that they will automatically reboot out of the assertion + * handler, so this is in practise an optional operation. It is still + * recommended that drivers execute this to support custom firmwares with + * REBOOT_ON_ASSERT=0. + * + * Locks required: NONE Returns: Nothing. You get back a response with ERR=1, + * DATALEN=0 + */ +#define MC_CMD_REBOOT 0x3d + +/* MC_CMD_REBOOT_IN msgrequest */ +#define MC_CMD_REBOOT_IN_LEN 4 +#define MC_CMD_REBOOT_IN_FLAGS_OFST 0 +#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */ + +/* MC_CMD_REBOOT_OUT msgresponse */ +#define MC_CMD_REBOOT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SCHEDINFO + * Request scheduler info. Locks required: NONE. Returns: An array of + * (timeslice,maximum overrun), one for each thread, in ascending order of + * thread address. + */ +#define MC_CMD_SCHEDINFO 0x3e + +/* MC_CMD_SCHEDINFO_IN msgrequest */ +#define MC_CMD_SCHEDINFO_IN_LEN 0 + +/* MC_CMD_SCHEDINFO_OUT msgresponse */ +#define MC_CMD_SCHEDINFO_OUT_LENMIN 4 +#define MC_CMD_SCHEDINFO_OUT_LENMAX 252 +#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0 +#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4 +#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1 +#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_REBOOT_MODE + * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot + * mode to the specified value. Returns the old mode. + */ +#define MC_CMD_REBOOT_MODE 0x3f + +/* MC_CMD_REBOOT_MODE_IN msgrequest */ +#define MC_CMD_REBOOT_MODE_IN_LEN 4 +#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0 +/* enum: Normal. */ +#define MC_CMD_REBOOT_MODE_NORMAL 0x0 +/* enum: Power-on Reset. */ +#define MC_CMD_REBOOT_MODE_POR 0x2 +/* enum: Snapper. */ +#define MC_CMD_REBOOT_MODE_SNAPPER 0x3 +/* enum: snapper fake POR */ +#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4 +#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7 +#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1 + +/* MC_CMD_REBOOT_MODE_OUT msgresponse */ +#define MC_CMD_REBOOT_MODE_OUT_LEN 4 +#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0 + + +/***********************************/ +/* MC_CMD_SENSOR_INFO + * Returns information about every available sensor. + * + * Each sensor has a single (16bit) value, and a corresponding state. The + * mapping between value and state is nominally determined by the MC, but may + * be implemented using up to 2 ranges per sensor. + * + * This call returns a mask (32bit) of the sensors that are supported by this + * platform, then an array of sensor information structures, in order of sensor + * type (but without gaps for unimplemented sensors). Each structure defines + * the ranges for the corresponding sensor. An unused range is indicated by + * equal limit values. If one range is used, a value outside that range results + * in STATE_FATAL. If two ranges are used, a value outside the second range + * results in STATE_FATAL while a value outside the first and inside the second + * range results in STATE_WARNING. + * + * Sensor masks and sensor information arrays are organised into pages. For + * backward compatibility, older host software can only use sensors in page 0. + * Bit 32 in the sensor mask was previously unused, and is no reserved for use + * as the next page flag. + * + * If the request does not contain a PAGE value then firmware will only return + * page 0 of sensor information, with bit 31 in the sensor mask cleared. + * + * If the request contains a PAGE value then firmware responds with the sensor + * mask and sensor information array for that page of sensors. In this case bit + * 31 in the mask is set if another page exists. + * + * Locks required: None Returns: 0 + */ +#define MC_CMD_SENSOR_INFO 0x41 + +/* MC_CMD_SENSOR_INFO_IN msgrequest */ +#define MC_CMD_SENSOR_INFO_IN_LEN 0 + +/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */ +#define MC_CMD_SENSOR_INFO_EXT_IN_LEN 4 +/* Which page of sensors to report. + * + * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit). + * + * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc. + */ +#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0 + +/* MC_CMD_SENSOR_INFO_OUT msgresponse */ +#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4 +#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252 +#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num)) +#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0 +/* enum: Controller temperature: degC */ +#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 +/* enum: Phy common temperature: degC */ +#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 +/* enum: Controller cooling: bool */ +#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 +/* enum: Phy 0 temperature: degC */ +#define MC_CMD_SENSOR_PHY0_TEMP 0x3 +/* enum: Phy 0 cooling: bool */ +#define MC_CMD_SENSOR_PHY0_COOLING 0x4 +/* enum: Phy 1 temperature: degC */ +#define MC_CMD_SENSOR_PHY1_TEMP 0x5 +/* enum: Phy 1 cooling: bool */ +#define MC_CMD_SENSOR_PHY1_COOLING 0x6 +/* enum: 1.0v power: mV */ +#define MC_CMD_SENSOR_IN_1V0 0x7 +/* enum: 1.2v power: mV */ +#define MC_CMD_SENSOR_IN_1V2 0x8 +/* enum: 1.8v power: mV */ +#define MC_CMD_SENSOR_IN_1V8 0x9 +/* enum: 2.5v power: mV */ +#define MC_CMD_SENSOR_IN_2V5 0xa +/* enum: 3.3v power: mV */ +#define MC_CMD_SENSOR_IN_3V3 0xb +/* enum: 12v power: mV */ +#define MC_CMD_SENSOR_IN_12V0 0xc +/* enum: 1.2v analogue power: mV */ +#define MC_CMD_SENSOR_IN_1V2A 0xd +/* enum: reference voltage: mV */ +#define MC_CMD_SENSOR_IN_VREF 0xe +/* enum: AOE FPGA power: mV */ +#define MC_CMD_SENSOR_OUT_VAOE 0xf +/* enum: AOE FPGA temperature: degC */ +#define MC_CMD_SENSOR_AOE_TEMP 0x10 +/* enum: AOE FPGA PSU temperature: degC */ +#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11 +/* enum: AOE PSU temperature: degC */ +#define MC_CMD_SENSOR_PSU_TEMP 0x12 +/* enum: Fan 0 speed: RPM */ +#define MC_CMD_SENSOR_FAN_0 0x13 +/* enum: Fan 1 speed: RPM */ +#define MC_CMD_SENSOR_FAN_1 0x14 +/* enum: Fan 2 speed: RPM */ +#define MC_CMD_SENSOR_FAN_2 0x15 +/* enum: Fan 3 speed: RPM */ +#define MC_CMD_SENSOR_FAN_3 0x16 +/* enum: Fan 4 speed: RPM */ +#define MC_CMD_SENSOR_FAN_4 0x17 +/* enum: AOE FPGA input power: mV */ +#define MC_CMD_SENSOR_IN_VAOE 0x18 +/* enum: AOE FPGA current: mA */ +#define MC_CMD_SENSOR_OUT_IAOE 0x19 +/* enum: AOE FPGA input current: mA */ +#define MC_CMD_SENSOR_IN_IAOE 0x1a +/* enum: NIC power consumption: W */ +#define MC_CMD_SENSOR_NIC_POWER 0x1b +/* enum: 0.9v power voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9 0x1c +/* enum: 0.9v power current: mA */ +#define MC_CMD_SENSOR_IN_I0V9 0x1d +/* enum: 1.2v power current: mA */ +#define MC_CMD_SENSOR_IN_I1V2 0x1e +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f +/* enum: 0.9v power voltage (at ADC): mV */ +#define MC_CMD_SENSOR_IN_0V9_ADC 0x20 +/* enum: Controller temperature 2: degC */ +#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21 +/* enum: Voltage regulator internal temperature: degC */ +#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22 +/* enum: 0.9V voltage regulator temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23 +/* enum: 1.2V voltage regulator temperature: degC */ +#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24 +/* enum: controller internal temperature sensor voltage (internal ADC): mV */ +#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25 +/* enum: controller internal temperature (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26 +/* enum: controller internal temperature sensor voltage (external ADC): mV */ +#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27 +/* enum: controller internal temperature (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28 +/* enum: ambient temperature: degC */ +#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29 +/* enum: air flow: bool */ +#define MC_CMD_SENSOR_AIRFLOW 0x2a +/* enum: voltage between VSS08D and VSS08D at CSR: mV */ +#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b +/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */ +#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c +/* enum: Hotpoint temperature: degC */ +#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d +/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ +#define MC_CMD_SENSOR_ENTRY_OFST 4 +#define MC_CMD_SENSOR_ENTRY_LEN 8 +#define MC_CMD_SENSOR_ENTRY_LO_OFST 4 +#define MC_CMD_SENSOR_ENTRY_HI_OFST 8 +#define MC_CMD_SENSOR_ENTRY_MINNUM 0 +#define MC_CMD_SENSOR_ENTRY_MAXNUM 31 + +/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */ +#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4 +#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252 +#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num)) +#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO_OUT */ +#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31 +#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1 +/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ +/* MC_CMD_SENSOR_ENTRY_OFST 4 */ +/* MC_CMD_SENSOR_ENTRY_LEN 8 */ +/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */ +/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */ +/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */ +/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */ + +/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */ +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LEN 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LBN 0 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_WIDTH 16 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_OFST 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LEN 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LBN 16 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_WIDTH 16 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_OFST 4 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LEN 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LBN 32 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_WIDTH 16 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_OFST 6 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LEN 2 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LBN 48 +#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_WIDTH 16 + + +/***********************************/ +/* MC_CMD_READ_SENSORS + * Returns the current reading from each sensor. DMAs an array of sensor + * readings, in order of sensor type (but without gaps for unimplemented + * sensors), into host memory. Each array element is a + * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword. + * + * If the request does not contain the LENGTH field then only sensors 0 to 30 + * are reported, to avoid DMA buffer overflow in older host software. If the + * sensor reading require more space than the LENGTH allows, then return + * EINVAL. + * + * The MC will send a SENSOREVT event every time any sensor changes state. The + * driver is responsible for ensuring that it doesn't miss any events. The + * board will function normally if all sensors are in STATE_OK or + * STATE_WARNING. Otherwise the board should not be expected to function. + */ +#define MC_CMD_READ_SENSORS 0x42 + +/* MC_CMD_READ_SENSORS_IN msgrequest */ +#define MC_CMD_READ_SENSORS_IN_LEN 8 +/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */ +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4 + +/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */ +#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12 +/* DMA address of host buffer for sensor readings */ +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4 +/* Size in bytes of host buffer. */ +#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8 + +/* MC_CMD_READ_SENSORS_OUT msgresponse */ +#define MC_CMD_READ_SENSORS_OUT_LEN 0 + +/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */ +#define MC_CMD_READ_SENSORS_EXT_OUT_LEN 0 + +/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */ +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1 +/* enum: Ok. */ +#define MC_CMD_SENSOR_STATE_OK 0x0 +/* enum: Breached warning threshold. */ +#define MC_CMD_SENSOR_STATE_WARNING 0x1 +/* enum: Breached fatal threshold. */ +#define MC_CMD_SENSOR_STATE_FATAL 0x2 +/* enum: Fault with sensor. */ +#define MC_CMD_SENSOR_STATE_BROKEN 0x3 +/* enum: Sensor is working but does not currently have a reading. */ +#define MC_CMD_SENSOR_STATE_NO_READING 0x4 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24 +#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8 + + +/***********************************/ +/* MC_CMD_GET_PHY_STATE + * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot + * (e.g. due to missing or corrupted firmware). Locks required: None. Return + * code: 0 + */ +#define MC_CMD_GET_PHY_STATE 0x43 + +/* MC_CMD_GET_PHY_STATE_IN msgrequest */ +#define MC_CMD_GET_PHY_STATE_IN_LEN 0 + +/* MC_CMD_GET_PHY_STATE_OUT msgresponse */ +#define MC_CMD_GET_PHY_STATE_OUT_LEN 4 +#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0 +/* enum: Ok. */ +#define MC_CMD_PHY_STATE_OK 0x1 +/* enum: Faulty. */ +#define MC_CMD_PHY_STATE_ZOMBIE 0x2 + + +/***********************************/ +/* MC_CMD_SETUP_8021QBB + * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to + * disable 802.Qbb for a given priority. + */ +#define MC_CMD_SETUP_8021QBB 0x44 + +/* MC_CMD_SETUP_8021QBB_IN msgrequest */ +#define MC_CMD_SETUP_8021QBB_IN_LEN 32 +#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0 +#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32 + +/* MC_CMD_SETUP_8021QBB_OUT msgresponse */ +#define MC_CMD_SETUP_8021QBB_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_WOL_FILTER_GET + * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS + */ +#define MC_CMD_WOL_FILTER_GET 0x45 + +/* MC_CMD_WOL_FILTER_GET_IN msgrequest */ +#define MC_CMD_WOL_FILTER_GET_IN_LEN 0 + +/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */ +#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4 +#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0 + + +/***********************************/ +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD + * Add a protocol offload to NIC for lights-out state. Locks required: None. + * Returns: 0, ENOSYS + */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46 + +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num)) +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 +#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */ +#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62 + +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14 +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10 + +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42 +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16 + +/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */ +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0 + + +/***********************************/ +/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD + * Remove a protocol offload from NIC for lights-out state. Locks required: + * None. Returns: 0, ENOSYS + */ +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47 + +/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */ +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4 + +/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */ +#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_MAC_RESET_RESTORE + * Restore MAC after block reset. Locks required: None. Returns: 0. + */ +#define MC_CMD_MAC_RESET_RESTORE 0x48 + +/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */ +#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0 + +/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */ +#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TESTASSERT + * Deliberately trigger an assert-detonation in the firmware for testing + * purposes (i.e. to allow tests that the driver copes gracefully). Locks + * required: None Returns: 0 + */ +#define MC_CMD_TESTASSERT 0x49 + +/* MC_CMD_TESTASSERT_IN msgrequest */ +#define MC_CMD_TESTASSERT_IN_LEN 0 + +/* MC_CMD_TESTASSERT_OUT msgresponse */ +#define MC_CMD_TESTASSERT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_WORKAROUND + * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't + * understand the given workaround number - which should not be treated as a + * hard error by client code. This op does not imply any semantics about each + * workaround, that's between the driver and the mcfw on a per-workaround + * basis. Locks required: None. Returns: 0, EINVAL . + */ +#define MC_CMD_WORKAROUND 0x4a + +/* MC_CMD_WORKAROUND_IN msgrequest */ +#define MC_CMD_WORKAROUND_IN_LEN 8 +#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0 +/* enum: Bug 17230 work around. */ +#define MC_CMD_WORKAROUND_BUG17230 0x1 +/* enum: Bug 35388 work around (unsafe EVQ writes). */ +#define MC_CMD_WORKAROUND_BUG35388 0x2 +/* enum: Bug35017 workaround (A64 tables must be identity map) */ +#define MC_CMD_WORKAROUND_BUG35017 0x3 +#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4 + +/* MC_CMD_WORKAROUND_OUT msgresponse */ +#define MC_CMD_WORKAROUND_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PHY_MEDIA_INFO + * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for + * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG + * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the + * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1 + * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80. + * Anything else: currently undefined. Locks required: None. Return code: 0. + */ +#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b + +/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */ +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0 + +/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */ +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num)) +/* in bytes */ +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248 + + +/***********************************/ +/* MC_CMD_NVRAM_TEST + * Test a particular NVRAM partition for valid contents (where "valid" depends + * on the type of partition). + */ +#define MC_CMD_NVRAM_TEST 0x4c + +/* MC_CMD_NVRAM_TEST_IN msgrequest */ +#define MC_CMD_NVRAM_TEST_IN_LEN 4 +#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ + +/* MC_CMD_NVRAM_TEST_OUT msgresponse */ +#define MC_CMD_NVRAM_TEST_OUT_LEN 4 +#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0 +/* enum: Passed. */ +#define MC_CMD_NVRAM_TEST_PASS 0x0 +/* enum: Failed. */ +#define MC_CMD_NVRAM_TEST_FAIL 0x1 +/* enum: Not supported. */ +#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2 + + +/***********************************/ +/* MC_CMD_MRSFP_TWEAK + * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds. + * I2C I/O expander bits are always read; if equaliser parameters are supplied, + * they are configured first. Locks required: None. Return code: 0, EINVAL. + */ +#define MC_CMD_MRSFP_TWEAK 0x4d + +/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16 +/* 0-6 low->high de-emph. */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0 +/* 0-8 low->high ref.V */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4 +/* 0-8 0-8 low->high boost */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8 +/* 0-8 low->high ref.V */ +#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12 + +/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */ +#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0 + +/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */ +#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12 +/* input bits */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 +/* output bits */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 +/* direction */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 +/* enum: Out. */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 +/* enum: In. */ +#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1 + + +/***********************************/ +/* MC_CMD_SENSOR_SET_LIMS + * Adjusts the sensor limits. This is a warranty-voiding operation. Returns: + * ENOENT if the sensor specified does not exist, EINVAL if the limits are out + * of range. + */ +#define MC_CMD_SENSOR_SET_LIMS 0x4e + +/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */ +#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20 +#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +/* interpretation is is sensor-specific. */ +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4 +/* interpretation is is sensor-specific. */ +#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8 +/* interpretation is is sensor-specific. */ +#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12 +/* interpretation is is sensor-specific. */ +#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16 + +/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */ +#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_RESOURCE_LIMITS + */ +#define MC_CMD_GET_RESOURCE_LIMITS 0x4f + +/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */ +#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0 + +/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */ +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8 +#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12 + + +/***********************************/ +/* MC_CMD_NVRAM_PARTITIONS + * Reads the list of available virtual NVRAM partition types. Locks required: + * none. Returns: 0, EINVAL (bad type). + */ +#define MC_CMD_NVRAM_PARTITIONS 0x51 + +/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */ +#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0 + +/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */ +#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4 +#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252 +#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num)) +/* total number of partitions */ +#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0 +/* type ID code for each of NUM_PARTITIONS partitions */ +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62 + + +/***********************************/ +/* MC_CMD_NVRAM_METADATA + * Reads soft metadata for a virtual NVRAM partition type. Locks required: + * none. Returns: 0, EINVAL (bad type). + */ +#define MC_CMD_NVRAM_METADATA 0x52 + +/* MC_CMD_NVRAM_METADATA_IN msgrequest */ +#define MC_CMD_NVRAM_METADATA_IN_LEN 4 +/* Partition type ID code */ +#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0 + +/* MC_CMD_NVRAM_METADATA_OUT msgresponse */ +#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20 +#define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252 +#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num)) +/* Partition type ID code */ +#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0 +#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1 +/* Subtype ID code for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8 +/* 1st component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2 +/* 2nd component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2 +/* 3rd component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2 +/* 4th component of W.X.Y.Z version number for content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2 +/* Zero-terminated string describing the content of this partition */ +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232 + + +/***********************************/ +/* MC_CMD_GET_MAC_ADDRESSES + * Returns the base MAC, count and stride for the requestiong function + */ +#define MC_CMD_GET_MAC_ADDRESSES 0x55 + +/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */ +#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0 + +/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16 +/* Base MAC address */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6 +/* Padding */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6 +#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2 +/* Number of allocated MAC addresses */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8 +/* Spacing of allocated MAC addresses */ +#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12 + +/* MC_CMD_RESOURCE_SPECIFIER enum */ +/* enum: Any */ +#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff +/* enum: None */ +#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe + +/* EVB_PORT_ID structuredef */ +#define EVB_PORT_ID_LEN 4 +#define EVB_PORT_ID_PORT_ID_OFST 0 +/* enum: An invalid port handle. */ +#define EVB_PORT_ID_NULL 0x0 +/* enum: The port assigned to this function.. */ +#define EVB_PORT_ID_ASSIGNED 0x1000000 +/* enum: External network port 0 */ +#define EVB_PORT_ID_MAC0 0x2000000 +/* enum: External network port 1 */ +#define EVB_PORT_ID_MAC1 0x2000001 +/* enum: External network port 2 */ +#define EVB_PORT_ID_MAC2 0x2000002 +/* enum: External network port 3 */ +#define EVB_PORT_ID_MAC3 0x2000003 +#define EVB_PORT_ID_PORT_ID_LBN 0 +#define EVB_PORT_ID_PORT_ID_WIDTH 32 + +/* EVB_VLAN_TAG structuredef */ +#define EVB_VLAN_TAG_LEN 2 +/* The VLAN tag value */ +#define EVB_VLAN_TAG_VLAN_ID_LBN 0 +#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12 +#define EVB_VLAN_TAG_MODE_LBN 12 +#define EVB_VLAN_TAG_MODE_WIDTH 4 +/* enum: Insert the VLAN. */ +#define EVB_VLAN_TAG_INSERT 0x0 +/* enum: Replace the VLAN if already present. */ +#define EVB_VLAN_TAG_REPLACE 0x1 + +/* BUFTBL_ENTRY structuredef */ +#define BUFTBL_ENTRY_LEN 12 +/* the owner ID */ +#define BUFTBL_ENTRY_OID_OFST 0 +#define BUFTBL_ENTRY_OID_LEN 2 +#define BUFTBL_ENTRY_OID_LBN 0 +#define BUFTBL_ENTRY_OID_WIDTH 16 +/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */ +#define BUFTBL_ENTRY_PGSZ_OFST 2 +#define BUFTBL_ENTRY_PGSZ_LEN 2 +#define BUFTBL_ENTRY_PGSZ_LBN 16 +#define BUFTBL_ENTRY_PGSZ_WIDTH 16 +/* the raw 64-bit address field from the SMC, not adjusted for page size */ +#define BUFTBL_ENTRY_RAWADDR_OFST 4 +#define BUFTBL_ENTRY_RAWADDR_LEN 8 +#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4 +#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8 +#define BUFTBL_ENTRY_RAWADDR_LBN 32 +#define BUFTBL_ENTRY_RAWADDR_WIDTH 64 + +/* NVRAM_PARTITION_TYPE structuredef */ +#define NVRAM_PARTITION_TYPE_LEN 2 +#define NVRAM_PARTITION_TYPE_ID_OFST 0 +#define NVRAM_PARTITION_TYPE_ID_LEN 2 +/* enum: Primary MC firmware partition */ +#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100 +/* enum: Secondary MC firmware partition */ +#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200 +/* enum: Expansion ROM partition */ +#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300 +/* enum: Static configuration TLV partition */ +#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400 +/* enum: Dynamic configuration TLV partition */ +#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500 +/* enum: Expansion ROM configuration data for port 0 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600 +/* enum: Expansion ROM configuration data for port 1 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601 +/* enum: Expansion ROM configuration data for port 2 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602 +/* enum: Expansion ROM configuration data for port 3 */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603 +/* enum: Non-volatile log output partition */ +#define NVRAM_PARTITION_TYPE_LOG 0x700 +/* enum: Device state dump output partition */ +#define NVRAM_PARTITION_TYPE_DUMP 0x800 +/* enum: Application license key storage partition */ +#define NVRAM_PARTITION_TYPE_LICENSE 0x900 +/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */ +#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00 +/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */ +#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff +/* enum: Start of reserved value range (firmware may use for any purpose) */ +#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 +/* enum: End of reserved value range (firmware may use for any purpose) */ +#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd +/* enum: Recovery partition map (provided if real map is missing or corrupt) */ +#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe +/* enum: Partition map (real map as stored in flash) */ +#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff +#define NVRAM_PARTITION_TYPE_ID_LBN 0 +#define NVRAM_PARTITION_TYPE_ID_WIDTH 16 + +/* LICENSED_APP_ID structuredef */ +#define LICENSED_APP_ID_LEN 4 +#define LICENSED_APP_ID_ID_OFST 0 +/* enum: OpenOnload */ +#define LICENSED_APP_ID_ONLOAD 0x1 +/* enum: PTP timestamping */ +#define LICENSED_APP_ID_PTP 0x2 +/* enum: SolarCapture Pro */ +#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4 +#define LICENSED_APP_ID_ID_LBN 0 +#define LICENSED_APP_ID_ID_WIDTH 32 + + +/***********************************/ +/* MC_CMD_READ_REGS + * Get a dump of the MCPU registers + */ +#define MC_CMD_READ_REGS 0x50 + +/* MC_CMD_READ_REGS_IN msgrequest */ +#define MC_CMD_READ_REGS_IN_LEN 0 + +/* MC_CMD_READ_REGS_OUT msgresponse */ +#define MC_CMD_READ_REGS_OUT_LEN 308 +/* Whether the corresponding register entry contains a valid value */ +#define MC_CMD_READ_REGS_OUT_MASK_OFST 0 +#define MC_CMD_READ_REGS_OUT_MASK_LEN 16 +/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr, + * fir, fp) + */ +#define MC_CMD_READ_REGS_OUT_REGS_OFST 16 +#define MC_CMD_READ_REGS_OUT_REGS_LEN 4 +#define MC_CMD_READ_REGS_OUT_REGS_NUM 73 + + +/***********************************/ +/* MC_CMD_INIT_EVQ + * Set up an event queue according to the supplied parameters. The IN arguments + * end with an address for each 4k of host memory required to back the EVQ. + */ +#define MC_CMD_INIT_EVQ 0x80 + +/* MC_CMD_INIT_EVQ_IN msgrequest */ +#define MC_CMD_INIT_EVQ_IN_LENMIN 44 +#define MC_CMD_INIT_EVQ_IN_LENMAX 548 +#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num)) +/* Size, in entries */ +#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4 +/* The initial timer value. The load value is ignored if the timer mode is DIS. + */ +#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8 +/* The reload value is ignored in one-shot modes */ +#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12 +/* tbd */ +#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0 +#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2 +#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3 +#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4 +#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5 +#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0 +/* enum: Immediate */ +#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1 +/* enum: Triggered */ +#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2 +/* enum: Hold-off */ +#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3 +/* Target EVQ for wakeups if in wakeup mode. */ +#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24 +/* Target interrupt if in interrupting mode (note union with target EVQ). Use + * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test + * purposes. + */ +#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24 +/* Event Counter Mode. */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2 +/* enum: Disabled */ +#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3 +/* Event queue packet count threshold. */ +#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64 + +/* MC_CMD_INIT_EVQ_OUT msgresponse */ +#define MC_CMD_INIT_EVQ_OUT_LEN 4 +/* Only valid if INTRFLAG was true */ +#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0 + +/* QUEUE_CRC_MODE structuredef */ +#define QUEUE_CRC_MODE_LEN 1 +#define QUEUE_CRC_MODE_MODE_LBN 0 +#define QUEUE_CRC_MODE_MODE_WIDTH 4 +/* enum: No CRC. */ +#define QUEUE_CRC_MODE_NONE 0x0 +/* enum: CRC Fiber channel over ethernet. */ +#define QUEUE_CRC_MODE_FCOE 0x1 +/* enum: CRC (digest) iSCSI header only. */ +#define QUEUE_CRC_MODE_ISCSI_HDR 0x2 +/* enum: CRC (digest) iSCSI header and payload. */ +#define QUEUE_CRC_MODE_ISCSI 0x3 +/* enum: CRC Fiber channel over IP over ethernet. */ +#define QUEUE_CRC_MODE_FCOIPOE 0x4 +/* enum: CRC MPA. */ +#define QUEUE_CRC_MODE_MPA 0x5 +#define QUEUE_CRC_MODE_SPARE_LBN 4 +#define QUEUE_CRC_MODE_SPARE_WIDTH 4 + + +/***********************************/ +/* MC_CMD_INIT_RXQ + * set up a receive queue according to the supplied parameters. The IN + * arguments end with an address for each 4k of host memory required to back + * the RXQ. + */ +#define MC_CMD_INIT_RXQ 0x81 + +/* MC_CMD_INIT_RXQ_IN msgrequest */ +#define MC_CMD_INIT_RXQ_IN_LENMIN 36 +#define MC_CMD_INIT_RXQ_IN_LENMAX 252 +#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num)) +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0 +/* The EVQ to send events to. This is an index originally specified to INIT_EVQ + */ +#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28 + +/* MC_CMD_INIT_RXQ_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_INIT_TXQ + */ +#define MC_CMD_INIT_TXQ 0x82 + +/* MC_CMD_INIT_TXQ_IN msgrequest */ +#define MC_CMD_INIT_TXQ_IN_LENMIN 36 +#define MC_CMD_INIT_TXQ_IN_LENMAX 252 +#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num)) +/* Size, in entries */ +#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. + */ +#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12 +/* There will be more flags here. */ +#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4 +#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8 +#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9 +#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28 + +/* MC_CMD_INIT_TXQ_OUT msgresponse */ +#define MC_CMD_INIT_TXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_EVQ + * Teardown an EVQ. + * + * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first + * or the operation will fail with EBUSY + */ +#define MC_CMD_FINI_EVQ 0x83 + +/* MC_CMD_FINI_EVQ_IN msgrequest */ +#define MC_CMD_FINI_EVQ_IN_LEN 4 +/* Instance of EVQ to destroy. Should be the same instance as that previously + * passed to INIT_EVQ + */ +#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0 + +/* MC_CMD_FINI_EVQ_OUT msgresponse */ +#define MC_CMD_FINI_EVQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_RXQ + * Teardown a RXQ. + */ +#define MC_CMD_FINI_RXQ 0x84 + +/* MC_CMD_FINI_RXQ_IN msgrequest */ +#define MC_CMD_FINI_RXQ_IN_LEN 4 +/* Instance of RXQ to destroy */ +#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0 + +/* MC_CMD_FINI_RXQ_OUT msgresponse */ +#define MC_CMD_FINI_RXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FINI_TXQ + * Teardown a TXQ. + */ +#define MC_CMD_FINI_TXQ 0x85 + +/* MC_CMD_FINI_TXQ_IN msgrequest */ +#define MC_CMD_FINI_TXQ_IN_LEN 4 +/* Instance of TXQ to destroy */ +#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0 + +/* MC_CMD_FINI_TXQ_OUT msgresponse */ +#define MC_CMD_FINI_TXQ_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DRIVER_EVENT + * Generate an event on an EVQ belonging to the function issuing the command. + */ +#define MC_CMD_DRIVER_EVENT 0x86 + +/* MC_CMD_DRIVER_EVENT_IN msgrequest */ +#define MC_CMD_DRIVER_EVENT_IN_LEN 12 +/* Handle of target EVQ */ +#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0 +/* Bits 0 - 63 of event */ +#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4 +#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8 +#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4 +#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8 + +/* MC_CMD_DRIVER_EVENT_OUT msgresponse */ +#define MC_CMD_DRIVER_EVENT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PROXY_CMD + * Execute an arbitrary MCDI command on behalf of a different function, subject + * to security restrictions. The command to be proxied follows immediately + * afterward in the host buffer (or on the UART). This command supercedes + * MC_CMD_SET_FUNC, which remains available for Siena but now deprecated. + */ +#define MC_CMD_PROXY_CMD 0x5b + +/* MC_CMD_PROXY_CMD_IN msgrequest */ +#define MC_CMD_PROXY_CMD_IN_LEN 4 +/* The handle of the target function. */ +#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0 +#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0 +#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16 +#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16 +#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16 +#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */ + +/* MC_CMD_PROXY_CMD_OUT msgresponse */ +#define MC_CMD_PROXY_CMD_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_ALLOC_BUFTBL_CHUNK + * Allocate a set of buffer table entries using the specified owner ID. This + * operation allocates the required buffer table entries (and fails if it + * cannot do so). The buffer table entries will initially be zeroed. + */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87 + +/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8 +/* Owner ID to use */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0 +/* Size of buffer table pages to use, in bytes (note that only a few values are + * legal on any specific hardware). + */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4 + +/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0 +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4 +/* Buffer table IDs for use in DMA descriptors. */ +#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8 + + +/***********************************/ +/* MC_CMD_PROGRAM_BUFTBL_ENTRIES + * Reprogram a set of buffer table entries in the specified chunk. + */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88 + +/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num)) +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0 +/* ID */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4 +/* Num entries */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8 +/* Buffer table entry address */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32 + +/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */ +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FREE_BUFTBL_CHUNK + */ +#define MC_CMD_FREE_BUFTBL_CHUNK 0x89 + +/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */ +#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4 +#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0 + +/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */ +#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_FILTER_OP + * Multiplexed MCDI call for filter operations + */ +#define MC_CMD_FILTER_OP 0x8a + +/* MC_CMD_FILTER_OP_IN msgrequest */ +#define MC_CMD_FILTER_OP_IN_LEN 108 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_IN_OP_OFST 0 +/* enum: single-recipient filter insert */ +#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0 +/* enum: single-recipient filter remove */ +#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1 +/* enum: multi-recipient filter subscribe */ +#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2 +/* enum: multi-recipient filter unsubscribe */ +#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3 +/* enum: replace one recipient with another (warning - the filter handle may + * change) + */ +#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4 +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2 +/* enum: loop back to port 0 TX MAC */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3 +/* enum: loop back to port 1 TX MAC */ +#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24 +/* receive mode */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. Note that these handles should be considered + * opaque to the host, although a value of 0xFFFFFFFF is guaranteed never to be + * a valid handle. + */ +#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68 +/* Firmware defined register 1 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16 + +/* MC_CMD_FILTER_OP_OUT msgresponse */ +#define MC_CMD_FILTER_OP_OUT_LEN 12 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_OUT_OP_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* Returned filter handle (for insert / subscribe operations). Note that these + * handles should be considered opaque to the host, although a value of + * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8 + + +/***********************************/ +/* MC_CMD_GET_PARSER_DISP_INFO + * Get information related to the parser-dispatcher subsystem + */ +#define MC_CMD_GET_PARSER_DISP_INFO 0xe4 + +/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4 +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0 +/* enum: read the list of supported RX filter matches */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1 + +/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num)) +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* number of supported match types */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4 +/* array of supported match types (valid MATCH_FIELDS values for + * MC_CMD_FILTER_OP) sorted in decreasing priority order + */ +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61 + + +/***********************************/ +/* MC_CMD_PARSER_DISP_RW + * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging + */ +#define MC_CMD_PARSER_DISP_RW 0xe5 + +/* MC_CMD_PARSER_DISP_RW_IN msgrequest */ +#define MC_CMD_PARSER_DISP_RW_IN_LEN 32 +/* identifies the target of the operation */ +#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0 +/* enum: RX dispatcher CPU */ +#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0 +/* enum: TX dispatcher CPU */ +#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1 +/* enum: Lookup engine */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2 +/* identifies the type of operation requested */ +#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4 +/* enum: read a word of DICPU DMEM or a LUE entry */ +#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0 +/* enum: write a word of DICPU DMEM or a LUE entry */ +#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1 +/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */ +#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2 +/* data memory address or LUE index */ +#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8 +/* value to write (for DMEM writes) */ +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12 +/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */ +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12 +/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */ +#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16 +/* value to write (for LUE writes) */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12 +#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20 + +/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */ +#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52 +/* value read (for DMEM reads) */ +#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0 +/* value read (for LUE reads) */ +#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0 +#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20 +/* up to 8 32-bit words of additional soft state from the LUE manager (the + * exact content is firmware-dependent and intended only for debug use) + */ +#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20 +#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32 + + +/***********************************/ +/* MC_CMD_GET_PF_COUNT + * Get number of PFs on the device. + */ +#define MC_CMD_GET_PF_COUNT 0xb6 + +/* MC_CMD_GET_PF_COUNT_IN msgrequest */ +#define MC_CMD_GET_PF_COUNT_IN_LEN 0 + +/* MC_CMD_GET_PF_COUNT_OUT msgresponse */ +#define MC_CMD_GET_PF_COUNT_OUT_LEN 1 +/* Identifies the number of PFs on the device. */ +#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0 +#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1 + + +/***********************************/ +/* MC_CMD_SET_PF_COUNT + * Set number of PFs on the device. + */ +#define MC_CMD_SET_PF_COUNT 0xb7 + +/* MC_CMD_SET_PF_COUNT_IN msgrequest */ +#define MC_CMD_SET_PF_COUNT_IN_LEN 4 +/* New number of PFs on the device. */ +#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0 + +/* MC_CMD_SET_PF_COUNT_OUT msgresponse */ +#define MC_CMD_SET_PF_COUNT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PORT_ASSIGNMENT + * Get port assignment for current PCI function. + */ +#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8 + +/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */ +#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0 + +/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */ +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4 +/* Identifies the port assignment for this function. */ +#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0 + + +/***********************************/ +/* MC_CMD_SET_PORT_ASSIGNMENT + * Set port assignment for current PCI function. + */ +#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9 + +/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */ +#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4 +/* Identifies the port assignment for this function. */ +#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0 + +/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */ +#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_ALLOC_VIS + * Allocate VIs for current PCI function. + */ +#define MC_CMD_ALLOC_VIS 0x8b + +/* MC_CMD_ALLOC_VIS_IN msgrequest */ +#define MC_CMD_ALLOC_VIS_IN_LEN 8 +/* The minimum number of VIs that is acceptable */ +#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0 +/* The maximum number of VIs that would be useful */ +#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4 + +/* MC_CMD_ALLOC_VIS_OUT msgresponse */ +#define MC_CMD_ALLOC_VIS_OUT_LEN 8 +/* The number of VIs allocated on this function */ +#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4 + + +/***********************************/ +/* MC_CMD_FREE_VIS + * Free VIs for current PCI function. Any linked PIO buffers will be unlinked, + * but not freed. + */ +#define MC_CMD_FREE_VIS 0x8c + +/* MC_CMD_FREE_VIS_IN msgrequest */ +#define MC_CMD_FREE_VIS_IN_LEN 0 + +/* MC_CMD_FREE_VIS_OUT msgresponse */ +#define MC_CMD_FREE_VIS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_SRIOV_CFG + * Get SRIOV config for this PF. + */ +#define MC_CMD_GET_SRIOV_CFG 0xba + +/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */ +#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0 + +/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */ +#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20 +/* Number of VFs currently enabled. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0 +/* Max number of VFs before sriov stride and offset may need to be changed. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4 +#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1 +/* RID offset of first VF from PF. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12 +/* RID offset of each subsequent VF from the previous. */ +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16 + + +/***********************************/ +/* MC_CMD_SET_SRIOV_CFG + * Set SRIOV config for this PF. + */ +#define MC_CMD_SET_SRIOV_CFG 0xbb + +/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */ +#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20 +/* Number of VFs currently enabled. */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0 +/* Max number of VFs before sriov stride and offset may need to be changed. */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4 +#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1 +/* RID offset of first VF from PF, or 0 for no change, or + * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset. + */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12 +/* RID offset of each subsequent VF from the previous, 0 for no change, or + * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride. + */ +#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16 + +/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */ +#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_VI_ALLOC_INFO + * Get information about number of VI's and base VI number allocated to this + * function. + */ +#define MC_CMD_GET_VI_ALLOC_INFO 0x8d + +/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */ +#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0 + +/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 8 +/* The number of VIs allocated on this function */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4 + + +/***********************************/ +/* MC_CMD_DUMP_VI_STATE + * For CmdClient use. Dump pertinent information on a specific absolute VI. + */ +#define MC_CMD_DUMP_VI_STATE 0x8e + +/* MC_CMD_DUMP_VI_STATE_IN msgrequest */ +#define MC_CMD_DUMP_VI_STATE_IN_LEN 4 +/* The VI number to query. */ +#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0 + +/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */ +#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96 +/* The PF part of the function owning this VI. */ +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0 +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2 +/* The VF part of the function owning this VI. */ +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2 +#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2 +/* Base of VIs allocated to this function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2 +/* Count of VIs allocated to the owner function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2 +/* Base interrupt vector allocated to this function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2 +/* Number of interrupt vectors allocated to this function. */ +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10 +#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2 +/* Raw evq ptr table data. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16 +/* Raw evq timer table data. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24 +/* Combined metadata field. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8 +/* TXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36 +/* TXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44 +/* TXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52 +/* Combined metadata field. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24 +/* RXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68 +/* RXDPCPU raw table data for queue. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76 +/* Reserved, currently 0. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84 +/* Combined metadata field. */ +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8 + + +/***********************************/ +/* MC_CMD_ALLOC_PIOBUF + * Allocate a push I/O buffer for later use with a tx queue. + */ +#define MC_CMD_ALLOC_PIOBUF 0x8f + +/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */ +#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0 + +/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */ +#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4 +/* Handle for allocated push I/O buffer. */ +#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0 + + +/***********************************/ +/* MC_CMD_FREE_PIOBUF + * Free a push I/O buffer. + */ +#define MC_CMD_FREE_PIOBUF 0x90 + +/* MC_CMD_FREE_PIOBUF_IN msgrequest */ +#define MC_CMD_FREE_PIOBUF_IN_LEN 4 +/* Handle for allocated push I/O buffer. */ +#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0 + +/* MC_CMD_FREE_PIOBUF_OUT msgresponse */ +#define MC_CMD_FREE_PIOBUF_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_VI_TLP_PROCESSING + * Get TLP steering and ordering information for a VI. + */ +#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0 + +/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */ +#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4 +/* VI number to get information for. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0 + +/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4 +/* Transaction processing steering hint 1 for use with the Rx Queue. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1 +/* Transaction processing steering hint 2 for use with the Ev Queue. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1 +/* Use Relaxed ordering model for TLPs on this VI. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1 +/* Use ID based ordering for TLPs on this VI. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1 +/* Set no snoop bit for TLPs on this VI. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1 +/* Enable TPH for TLPs on this VI. */ +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1 +#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0 + + +/***********************************/ +/* MC_CMD_SET_VI_TLP_PROCESSING + * Set TLP steering and ordering information for a VI. + */ +#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1 + +/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8 +/* VI number to set information for. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0 +/* Transaction processing steering hint 1 for use with the Rx Queue. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1 +/* Transaction processing steering hint 2 for use with the Ev Queue. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1 +/* Use Relaxed ordering model for TLPs on this VI. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1 +/* Use ID based ordering for TLPs on this VI. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1 +/* Set the no snoop bit for TLPs on this VI. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1 +/* Enable TPH for TLPs on this VI. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1 +#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4 + +/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */ +#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_TLP_PROCESSING_GLOBALS + * Get global PCIe steering and transaction processing configuration. + */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc + +/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0 +/* enum: MISC. */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0 +/* enum: IDO. */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1 +/* enum: RO. */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2 +/* enum: TPH Type. */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3 + +/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */ +/* Amalgamated TLP info word. */ +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23 + + +/***********************************/ +/* MC_CMD_SET_TLP_PROCESSING_GLOBALS + * Set global PCIe steering and transaction processing configuration. + */ +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd + +/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */ +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */ +/* Amalgamated TLP info word. */ +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22 + +/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */ +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SATELLITE_DOWNLOAD + * Download a new set of images to the satellite CPUs from the host. + */ +#define MC_CMD_SATELLITE_DOWNLOAD 0x91 + +/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs + * are subtle, and so downloads must proceed in a number of phases. + * + * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0. + * + * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download + * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should + * be a checksum (a simple 32-bit sum) of the transferred data. An individual + * download may be aborted using CHUNK_ID_ABORT. + * + * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15), + * similar to PHASE_IMEMS. + * + * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0. + * + * After any error (a requested abort is not considered to be an error) the + * sequence must be restarted from PHASE_RESET. + */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num)) +/* Download phase. (Note: the IDLE phase is used internally and is never valid + * in a command from the host.) + */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */ +/* Target for download. (These match the blob numbers defined in + * mc_flash_layout.h.) + */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9 +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa +/* enum: Valid in phase 2 (PHASE_IMEMS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb +/* enum: Valid in phase 3 (PHASE_VECTORS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc +/* enum: Valid in phase 3 (PHASE_VECTORS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd +/* enum: Valid in phase 3 (PHASE_VECTORS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe +/* enum: Valid in phase 3 (PHASE_VECTORS) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf +/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff +/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8 +/* enum: Last chunk, containing checksum rather than data */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff +/* enum: Abort download of this item */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe +/* Length of this chunk in bytes */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12 +/* Data for this chunk */ +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59 + +/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8 +/* Same as MC_CMD_ERR field, but included as 0 in success cases */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0 +/* Extra status information */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4 +/* enum: Code download OK, completed. */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0 +/* enum: Code download aborted as requested. */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1 +/* enum: Code download OK so far, send next chunk. */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2 +/* enum: Download phases out of sequence */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100 +/* enum: Bad target for this phase */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101 +/* enum: Chunk ID out of sequence */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200 +/* enum: Chunk length zero or too large */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201 +/* enum: Checksum was incorrect */ +#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300 + + +/***********************************/ +/* MC_CMD_GET_CAPABILITIES + * Get device capabilities. + * + * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to + * reference inherent device capabilities as opposed to current NVRAM config. + */ +#define MC_CMD_GET_CAPABILITIES 0xbe + +/* MC_CMD_GET_CAPABILITIES_IN msgrequest */ +#define MC_CMD_GET_CAPABILITIES_IN_LEN 0 + +/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16 + + +/***********************************/ +/* MC_CMD_V2_EXTN + * Encapsulation for a v2 extended command + */ +#define MC_CMD_V2_EXTN 0x7f + +/* MC_CMD_V2_EXTN_IN msgrequest */ +#define MC_CMD_V2_EXTN_IN_LEN 4 +/* the extended command number */ +#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0 +#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15 +#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15 +#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1 +/* the actual length of the encapsulated command (which is not in the v1 + * header) + */ +#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16 +#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10 +#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26 +#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 6 + + +/***********************************/ +/* MC_CMD_TCM_BUCKET_ALLOC + * Allocate a pacer bucket (for qau rp or a snapper test) + */ +#define MC_CMD_TCM_BUCKET_ALLOC 0xb2 + +/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */ +#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0 + +/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */ +#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4 +/* the bucket id */ +#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0 + + +/***********************************/ +/* MC_CMD_TCM_BUCKET_FREE + * Free a pacer bucket + */ +#define MC_CMD_TCM_BUCKET_FREE 0xb3 + +/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */ +#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4 +/* the bucket id */ +#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0 + +/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */ +#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TCM_BUCKET_INIT + * Initialise pacer bucket with a given rate + */ +#define MC_CMD_TCM_BUCKET_INIT 0xb4 + +/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */ +#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8 +/* the bucket id */ +#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0 +/* the rate in mbps */ +#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4 + +/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */ +#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_TCM_TXQ_INIT + * Initialise txq in pacer with given options or set options + */ +#define MC_CMD_TCM_TXQ_INIT 0xb5 + +/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */ +#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28 +/* the txq id */ +#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0 +/* the static priority associated with the txq */ +#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4 +/* bitmask of the priority queues this txq is inserted into */ +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8 +/* the reaction point (RP) bucket */ +#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12 +/* an already reserved bucket (typically set to bucket associated with outer + * vswitch) + */ +#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16 +/* an already reserved bucket (typically set to bucket associated with inner + * vswitch) + */ +#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20 +/* the min bucket (typically for ETS/minimum bandwidth) */ +#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24 + +/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */ +#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LINK_PIOBUF + * Link a push I/O buffer to a TxQ + */ +#define MC_CMD_LINK_PIOBUF 0x92 + +/* MC_CMD_LINK_PIOBUF_IN msgrequest */ +#define MC_CMD_LINK_PIOBUF_IN_LEN 8 +/* Handle for allocated push I/O buffer. */ +#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0 +/* Function Local Instance (VI) number. */ +#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4 + +/* MC_CMD_LINK_PIOBUF_OUT msgresponse */ +#define MC_CMD_LINK_PIOBUF_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_UNLINK_PIOBUF + * Unlink a push I/O buffer from a TxQ + */ +#define MC_CMD_UNLINK_PIOBUF 0x93 + +/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */ +#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4 +/* Function Local Instance (VI) number. */ +#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0 + +/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */ +#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VSWITCH_ALLOC + * allocate and initialise a v-switch. + */ +#define MC_CMD_VSWITCH_ALLOC 0x94 + +/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */ +#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16 +/* The port to connect to the v-switch's upstream port. */ +#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +/* The type of v-switch to create. */ +#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4 +/* enum: VLAN */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1 +/* enum: VEB */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2 +/* enum: VEPA */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3 +/* Flags controlling v-port creation */ +#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 +/* The number of VLAN tags to support. */ +#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 + +/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */ +#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VSWITCH_FREE + * de-allocate a v-switch. + */ +#define MC_CMD_VSWITCH_FREE 0x95 + +/* MC_CMD_VSWITCH_FREE_IN msgrequest */ +#define MC_CMD_VSWITCH_FREE_IN_LEN 4 +/* The port to which the v-switch is connected. */ +#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0 + +/* MC_CMD_VSWITCH_FREE_OUT msgresponse */ +#define MC_CMD_VSWITCH_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VPORT_ALLOC + * allocate a v-port. + */ +#define MC_CMD_VPORT_ALLOC 0x96 + +/* MC_CMD_VPORT_ALLOC_IN msgrequest */ +#define MC_CMD_VPORT_ALLOC_IN_LEN 20 +/* The port to which the v-switch is connected. */ +#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +/* The type of the new v-port. */ +#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4 +/* enum: VLAN (obsolete) */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1 +/* enum: VEB (obsolete) */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2 +/* enum: VEPA (obsolete) */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3 +/* enum: A normal v-port receives packets which match a specified MAC and/or + * VLAN. + */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4 +/* enum: An expansion v-port packets traffic which don't match any other + * v-port. + */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5 +/* enum: An test v-port receives packets which match any filters installed by + * its downstream components. + */ +#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6 +/* Flags controlling v-port creation */ +#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 +/* The number of VLAN tags to insert/remove. */ +#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 +/* The actual VLAN tags to insert/remove */ +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16 + +/* MC_CMD_VPORT_ALLOC_OUT msgresponse */ +#define MC_CMD_VPORT_ALLOC_OUT_LEN 4 +/* The handle of the new v-port */ +#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0 + + +/***********************************/ +/* MC_CMD_VPORT_FREE + * de-allocate a v-port. + */ +#define MC_CMD_VPORT_FREE 0x97 + +/* MC_CMD_VPORT_FREE_IN msgrequest */ +#define MC_CMD_VPORT_FREE_IN_LEN 4 +/* The handle of the v-port */ +#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0 + +/* MC_CMD_VPORT_FREE_OUT msgresponse */ +#define MC_CMD_VPORT_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_ALLOC + * allocate a v-adaptor. + */ +#define MC_CMD_VADAPTOR_ALLOC 0x98 + +/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */ +#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 16 +/* The port to connect to the v-adaptor's port. */ +#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +/* Flags controlling v-adaptor creation */ +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1 +/* The number of VLAN tags to strip on receive */ +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12 + +/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */ +#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_FREE + * de-allocate a v-adaptor. + */ +#define MC_CMD_VADAPTOR_FREE 0x99 + +/* MC_CMD_VADAPTOR_FREE_IN msgrequest */ +#define MC_CMD_VADAPTOR_FREE_IN_LEN 4 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0 + +/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */ +#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_EVB_PORT_ASSIGN + * assign a port to a PCI function. + */ +#define MC_CMD_EVB_PORT_ASSIGN 0x9a + +/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */ +#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8 +/* The port to assign. */ +#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0 +/* The target function to modify. */ +#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16 +#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16 +#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16 + +/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */ +#define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RDWR_A64_REGIONS + * Assign the 64 bit region addresses. + */ +#define MC_CMD_RDWR_A64_REGIONS 0x9b + +/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */ +#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8 +#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12 +/* Write enable bits 0-3, set to write, clear to read. */ +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128 +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4 +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16 +#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1 + +/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included + * regardless of state of write bits in the request. + */ +#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8 +#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12 + + +/***********************************/ +/* MC_CMD_ONLOAD_STACK_ALLOC + * Allocate an Onload stack ID. + */ +#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c + +/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */ +#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4 +/* The handle of the owning upstream port */ +#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 + +/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */ +#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4 +/* The handle of the new Onload stack */ +#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0 + + +/***********************************/ +/* MC_CMD_ONLOAD_STACK_FREE + * Free an Onload stack ID. + */ +#define MC_CMD_ONLOAD_STACK_FREE 0x9d + +/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */ +#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4 +/* The handle of the Onload stack */ +#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0 + +/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */ +#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_ALLOC + * Allocate an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e + +/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12 +/* The handle of the owning upstream port */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +/* The type of context to allocate */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4 +/* enum: Allocate a context for exclusive use. The key and indirection table + * must be explicitly configured. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0 +/* enum: Allocate a context for shared use; this will spread across a range of + * queues, but the key and indirection table are pre-configured and may not be + * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1 +/* Number of queues spanned by this context, in the range 1-64; valid offsets + * in the indirection table will be in the range 0 to NUM_QUEUES-1. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8 + +/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4 +/* The handle of the new RSS context */ +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_FREE + * Free an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_FREE 0x9f + +/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0 + +/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_SET_KEY + * Set the Toeplitz hash key for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0 + +/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0 +/* The 40-byte Toeplitz hash key (TBD endianness issues?) */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40 + +/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_GET_KEY + * Get the Toeplitz hash key for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1 + +/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0 + +/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44 +/* The 40-byte Toeplitz hash key (TBD endianness issues?) */ +#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_SET_TABLE + * Set the indirection table for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2 + +/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +/* The 128-byte indirection table (1 byte per entry) */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128 + +/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_GET_TABLE + * Get the indirection table for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3 + +/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0 + +/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132 +/* The 128-byte indirection table (1 byte per entry) */ +#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_SET_FLAGS + * Set various control flags for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1 + +/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 +/* Hash control flags */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1 + +/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_GET_FLAGS + * Get various control flags for an RSS context. + */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2 + +/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4 +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 + +/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8 +/* Hash control flags */ +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1 + + +/***********************************/ +/* MC_CMD_DOT1P_MAPPING_ALLOC + * Allocate a .1p mapping. + */ +#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4 + +/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8 +/* The handle of the owning upstream port */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 +/* Number of queues spanned by this mapping, in the range 1-64; valid fixed + * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and + * referenced RSS contexts must span no more than this number. + */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4 + +/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4 +/* The handle of the new .1p mapping */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0 + + +/***********************************/ +/* MC_CMD_DOT1P_MAPPING_FREE + * Free a .1p mapping. + */ +#define MC_CMD_DOT1P_MAPPING_FREE 0xa5 + +/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */ +#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4 +/* The handle of the .1p mapping */ +#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0 + +/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */ +#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DOT1P_MAPPING_SET_TABLE + * Set the mapping table for a .1p mapping. + */ +#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6 + +/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */ +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36 +/* The handle of the .1p mapping */ +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0 +/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context + * handle) + */ +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4 +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32 + +/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */ +#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DOT1P_MAPPING_GET_TABLE + * Get the mapping table for a .1p mapping. + */ +#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7 + +/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */ +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4 +/* The handle of the .1p mapping */ +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0 + +/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */ +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36 +/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context + * handle) + */ +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4 +#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32 + + +/***********************************/ +/* MC_CMD_GET_VECTOR_CFG + * Get Interrupt Vector config for this PF. + */ +#define MC_CMD_GET_VECTOR_CFG 0xbf + +/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */ +#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0 + +/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */ +#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12 +/* Base absolute interrupt vector number. */ +#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0 +/* Number of interrupt vectors allocate to this PF. */ +#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4 +/* Number of interrupt vectors to allocate per VF. */ +#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8 + + +/***********************************/ +/* MC_CMD_SET_VECTOR_CFG + * Set Interrupt Vector config for this PF. + */ +#define MC_CMD_SET_VECTOR_CFG 0xc0 + +/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */ +#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12 +/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to + * let the system find a suitable base. + */ +#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0 +/* Number of interrupt vectors allocate to this PF. */ +#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4 +/* Number of interrupt vectors to allocate per VF. */ +#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8 + +/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */ +#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RMON_RX_CLASS_STATS + * Retrieve rmon rx class statistics + */ +#define MC_CMD_RMON_RX_CLASS_STATS 0xc3 + +/* MC_CMD_RMON_RX_CLASS_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_CLASS_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_CLASS_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_LBN 0 +#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_WIDTH 8 +#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_LBN 8 +#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_CLASS_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_TX_CLASS_STATS + * Retrieve rmon tx class statistics + */ +#define MC_CMD_RMON_TX_CLASS_STATS 0xc4 + +/* MC_CMD_RMON_TX_CLASS_STATS_IN msgrequest */ +#define MC_CMD_RMON_TX_CLASS_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_TX_CLASS_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_LBN 0 +#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_WIDTH 8 +#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_LBN 8 +#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_TX_CLASS_STATS_OUT msgresponse */ +#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_RX_SUPER_CLASS_STATS + * Retrieve rmon rx super_class statistics + */ +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS 0xc5 + +/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0 +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4 +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_LBN 4 +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_TX_SUPER_CLASS_STATS + * Retrieve rmon tx super_class statistics + */ +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS 0xc6 + +/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN msgrequest */ +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0 +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4 +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_LBN 4 +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT msgresponse */ +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS + * Add qid to class for statistics collection + */ +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS 0xc7 + +/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN msgrequest */ +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_LEN 12 +/* class */ +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0 +/* qid */ +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_QID_OFST 4 +/* flags */ +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8 +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0 +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4 +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4 +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4 +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_LBN 8 +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14 + +/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT msgresponse */ +#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS + * Add qid to class for statistics collection + */ +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS 0xc8 + +/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN msgrequest */ +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_LEN 12 +/* class */ +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0 +/* qid */ +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_QID_OFST 4 +/* flags */ +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8 +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0 +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4 +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4 +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4 +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_LBN 8 +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14 + +/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT msgresponse */ +#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS + * Add qid to class for statistics collection + */ +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS 0xc9 + +/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN msgrequest */ +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_LEN 12 +/* class */ +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_CLASS_OFST 0 +/* qid */ +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_QID_OFST 4 +/* flags */ +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8 +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0 +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4 +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4 +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4 +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_LBN 8 +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14 + +/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT msgresponse */ +#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RMON_ALLOC_CLASS + * Allocate an rmon class + */ +#define MC_CMD_RMON_ALLOC_CLASS 0xca + +/* MC_CMD_RMON_ALLOC_CLASS_IN msgrequest */ +#define MC_CMD_RMON_ALLOC_CLASS_IN_LEN 0 + +/* MC_CMD_RMON_ALLOC_CLASS_OUT msgresponse */ +#define MC_CMD_RMON_ALLOC_CLASS_OUT_LEN 4 +/* class */ +#define MC_CMD_RMON_ALLOC_CLASS_OUT_CLASS_OFST 0 + + +/***********************************/ +/* MC_CMD_RMON_DEALLOC_CLASS + * Deallocate an rmon class + */ +#define MC_CMD_RMON_DEALLOC_CLASS 0xcb + +/* MC_CMD_RMON_DEALLOC_CLASS_IN msgrequest */ +#define MC_CMD_RMON_DEALLOC_CLASS_IN_LEN 4 +/* class */ +#define MC_CMD_RMON_DEALLOC_CLASS_IN_CLASS_OFST 0 + +/* MC_CMD_RMON_DEALLOC_CLASS_OUT msgresponse */ +#define MC_CMD_RMON_DEALLOC_CLASS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RMON_ALLOC_SUPER_CLASS + * Allocate an rmon super_class + */ +#define MC_CMD_RMON_ALLOC_SUPER_CLASS 0xcc + +/* MC_CMD_RMON_ALLOC_SUPER_CLASS_IN msgrequest */ +#define MC_CMD_RMON_ALLOC_SUPER_CLASS_IN_LEN 0 + +/* MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT msgresponse */ +#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_LEN 4 +/* super_class */ +#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_SUPER_CLASS_OFST 0 + + +/***********************************/ +/* MC_CMD_RMON_DEALLOC_SUPER_CLASS + * Deallocate an rmon tx super_class + */ +#define MC_CMD_RMON_DEALLOC_SUPER_CLASS 0xcd + +/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN msgrequest */ +#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_LEN 4 +/* super_class */ +#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_SUPER_CLASS_OFST 0 + +/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT msgresponse */ +#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_RMON_RX_UP_CONV_STATS + * Retrieve up converter statistics + */ +#define MC_CMD_RMON_RX_UP_CONV_STATS 0xce + +/* MC_CMD_RMON_RX_UP_CONV_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_LBN 0 +#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_WIDTH 2 +#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_LBN 2 +#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_UP_CONV_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_RX_IPI_STATS + * Retrieve rx ipi stats + */ +#define MC_CMD_RMON_RX_IPI_STATS 0xcf + +/* MC_CMD_RMON_RX_IPI_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_IPI_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_IPI_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_LBN 0 +#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_WIDTH 5 +#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_LBN 5 +#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_IPI_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_IPI_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS + * Retrieve rx ipsec cntxt_ptr indexed stats + */ +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS 0xd0 + +/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0 +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9 +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9 +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_RX_IPSEC_PORT_STATS + * Retrieve rx ipsec port indexed stats + */ +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS 0xd1 + +/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_LBN 0 +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2 +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_LBN 2 +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS + * Retrieve tx ipsec overflow + */ +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS 0xd2 + +/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0 +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2 +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_LBN 2 +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_VPORT_ADD_MAC_ADDRESS + * Add a MAC address to a v-port + */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8 + +/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10 +/* The handle of the v-port */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0 +/* MAC address to add */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4 +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6 + +/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */ +#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VPORT_DEL_MAC_ADDRESS + * Delete a MAC address from a v-port + */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9 + +/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10 +/* The handle of the v-port */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0 +/* MAC address to add */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4 +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6 + +/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */ +#define MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VPORT_GET_MAC_ADDRESSES + * Delete a MAC address from a v-port + */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa + +/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4 +/* The handle of the v-port */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0 + +/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num)) +/* The number of MAC addresses returned */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0 +/* Array of MAC addresses */ +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41 + + +/***********************************/ +/* MC_CMD_DUMP_BUFTBL_ENTRIES + * Dump buffer table entries, mainly for command client debug use. Dumps + * absolute entries, and does not use chunk handles. All entries must be in + * range, and used for q page mapping, Although the latter restriction may be + * lifted in future. + */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab + +/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8 +/* Index of the first buffer table entry. */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0 +/* Number of buffer table entries to dump. */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4 + +/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num)) +/* Raw buffer table entries, laid out as BUFTBL_ENTRY. */ +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21 + + +/***********************************/ +/* MC_CMD_SET_RXDP_CONFIG + * Set global RXDP configuration settings + */ +#define MC_CMD_SET_RXDP_CONFIG 0xc1 + +/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */ +#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4 +#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1 + +/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_RXDP_CONFIG + * Get global RXDP configuration settings + */ +#define MC_CMD_GET_RXDP_CONFIG 0xc2 + +/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */ +#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0 + +/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4 +#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0 +#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0 +#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1 + + +/***********************************/ +/* MC_CMD_RMON_RX_CLASS_DROPS_STATS + * Retrieve rx class drop stats + */ +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS 0xd3 + +/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_LBN 0 +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_WIDTH 8 +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_LBN 8 +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS + * Retrieve rx super class drop stats + */ +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS 0xd4 + +/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_LBN 0 +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_WIDTH 4 +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_LBN 4 +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_RX_ERRORS_STATS + * Retrieve rxdp errors + */ +#define MC_CMD_RMON_RX_ERRORS_STATS 0xd5 + +/* MC_CMD_RMON_RX_ERRORS_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_ERRORS_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_ERRORS_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_LBN 0 +#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_WIDTH 11 +#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_LBN 11 +#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_ERRORS_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_RX_OVERFLOW_STATS + * Retrieve rxdp overflow + */ +#define MC_CMD_RMON_RX_OVERFLOW_STATS 0xd6 + +/* MC_CMD_RMON_RX_OVERFLOW_STATS_IN msgrequest */ +#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_LBN 0 +#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_WIDTH 8 +#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_LBN 8 +#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_RX_OVERFLOW_STATS_OUT msgresponse */ +#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_TX_IPI_STATS + * Retrieve tx ipi stats + */ +#define MC_CMD_RMON_TX_IPI_STATS 0xd7 + +/* MC_CMD_RMON_TX_IPI_STATS_IN msgrequest */ +#define MC_CMD_RMON_TX_IPI_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_TX_IPI_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_LBN 0 +#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_WIDTH 5 +#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_LBN 5 +#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_TX_IPI_STATS_OUT msgresponse */ +#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_TX_IPI_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS + * Retrieve tx ipsec counters by cntxt_ptr + */ +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS 0xd8 + +/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */ +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0 +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9 +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9 +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */ +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_TX_IPSEC_PORT_STATS + * Retrieve tx ipsec counters by port + */ +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS 0xd9 + +/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN msgrequest */ +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_LBN 0 +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2 +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_LBN 2 +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT msgresponse */ +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS + * Retrieve tx ipsec overflow + */ +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS 0xda + +/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN msgrequest */ +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0 +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2 +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_LBN 2 +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT msgresponse */ +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_TX_NOWHERE_STATS + * Retrieve tx nowhere stats + */ +#define MC_CMD_RMON_TX_NOWHERE_STATS 0xdb + +/* MC_CMD_RMON_TX_NOWHERE_STATS_IN msgrequest */ +#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_LBN 0 +#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_WIDTH 8 +#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_LBN 8 +#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_TX_NOWHERE_STATS_OUT msgresponse */ +#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS + * Retrieve tx nowhere qbb stats + */ +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS 0xdc + +/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN msgrequest */ +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_LBN 0 +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_WIDTH 3 +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_LBN 3 +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT msgresponse */ +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_TX_ERRORS_STATS + * Retrieve rxdp errors + */ +#define MC_CMD_RMON_TX_ERRORS_STATS 0xdd + +/* MC_CMD_RMON_TX_ERRORS_STATS_IN msgrequest */ +#define MC_CMD_RMON_TX_ERRORS_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_TX_ERRORS_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_LBN 0 +#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_WIDTH 11 +#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_LBN 11 +#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_TX_ERRORS_STATS_OUT msgresponse */ +#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_TX_OVERFLOW_STATS + * Retrieve rxdp overflow + */ +#define MC_CMD_RMON_TX_OVERFLOW_STATS 0xde + +/* MC_CMD_RMON_TX_OVERFLOW_STATS_IN msgrequest */ +#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_LEN 4 +/* flags */ +#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_FLAGS_OFST 0 +#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_LBN 0 +#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_WIDTH 8 +#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_LBN 8 +#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_TX_OVERFLOW_STATS_OUT msgresponse */ +#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMIN 4 +#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMAX 252 +#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num)) +/* Array of stats */ +#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_OFST 0 +#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_LEN 4 +#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1 +#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_RMON_COLLECT_CLASS_STATS + * Explicitly collect class stats at the specified evb port + */ +#define MC_CMD_RMON_COLLECT_CLASS_STATS 0xdf + +/* MC_CMD_RMON_COLLECT_CLASS_STATS_IN msgrequest */ +#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_LEN 4 +/* The port id associated with the vport/pport at which to collect class stats + */ +#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_PORT_ID_OFST 0 + +/* MC_CMD_RMON_COLLECT_CLASS_STATS_OUT msgresponse */ +#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_LEN 4 +/* class */ +#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_CLASS_OFST 0 + + +/***********************************/ +/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS + * Explicitly collect class stats at the specified evb port + */ +#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS 0xe0 + +/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN msgrequest */ +#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_LEN 4 +/* The port id associated with the vport/pport at which to collect class stats + */ +#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_PORT_ID_OFST 0 + +/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT msgresponse */ +#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_LEN 4 +/* super_class */ +#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_SUPER_CLASS_OFST 0 + + +/***********************************/ +/* MC_CMD_GET_CLOCK + * Return the system and PDCPU clock frequencies. + */ +#define MC_CMD_GET_CLOCK 0xac + +/* MC_CMD_GET_CLOCK_IN msgrequest */ +#define MC_CMD_GET_CLOCK_IN_LEN 0 + +/* MC_CMD_GET_CLOCK_OUT msgresponse */ +#define MC_CMD_GET_CLOCK_OUT_LEN 8 +/* System frequency, MHz */ +#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0 +/* DPCPU frequency, MHz */ +#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4 + + +/***********************************/ +/* MC_CMD_SET_CLOCK + * Control the system and DPCPU clock frequencies. Changes are lost reboot. + */ +#define MC_CMD_SET_CLOCK 0xad + +/* MC_CMD_SET_CLOCK_IN msgrequest */ +#define MC_CMD_SET_CLOCK_IN_LEN 12 +/* Requested system frequency in MHz; 0 leaves unchanged. */ +#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0 +/* Requested inter-core frequency in MHz; 0 leaves unchanged. */ +#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4 +/* Request DPCPU frequency in MHz; 0 leaves unchanged. */ +#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8 + +/* MC_CMD_SET_CLOCK_OUT msgresponse */ +#define MC_CMD_SET_CLOCK_OUT_LEN 12 +/* Resulting system frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0 +/* Resulting inter-core frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4 +/* Resulting DPCPU frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8 + + +/***********************************/ +/* MC_CMD_DPCPU_RPC + * Send an arbitrary DPCPU message. + */ +#define MC_CMD_DPCPU_RPC 0xae + +/* MC_CMD_DPCPU_RPC_IN msgrequest */ +#define MC_CMD_DPCPU_RPC_IN_LEN 36 +#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0 +/* enum: RxDPCPU */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x0 +/* enum: TxDPCPU0 */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1 +/* enum: TxDPCPU1 */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2 +/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be + * initialised to zero + */ +#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4 +#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8 +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */ +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16 +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64 +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12 +#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24 +/* Register data to write. Only valid in write/write-read. */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16 +/* Register address. */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20 + +/* MC_CMD_DPCPU_RPC_OUT msgresponse */ +#define MC_CMD_DPCPU_RPC_OUT_LEN 36 +#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0 +/* DATA */ +#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4 +#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32 +#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32 +#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16 +#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12 +#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24 + + +/***********************************/ +/* MC_CMD_TRIGGER_INTERRUPT + * Trigger an interrupt by prodding the BIU. + */ +#define MC_CMD_TRIGGER_INTERRUPT 0xe3 + +/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */ +#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4 +/* Interrupt level relative to base for function. */ +#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0 + +/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */ +#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_CAP_BLK_READ + * Read multiple 64bit words from capture block memory + */ +#define MC_CMD_CAP_BLK_READ 0xe7 + +/* MC_CMD_CAP_BLK_READ_IN msgrequest */ +#define MC_CMD_CAP_BLK_READ_IN_LEN 12 +#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0 +#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4 +#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8 + +/* MC_CMD_CAP_BLK_READ_OUT msgresponse */ +#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8 +#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248 +#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num)) +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31 + + +/***********************************/ +/* MC_CMD_DUMP_DO + * Take a dump of the DUT state + */ +#define MC_CMD_DUMP_DO 0xe8 + +/* MC_CMD_DUMP_DO_IN msgrequest */ +#define MC_CMD_DUMP_DO_IN_LEN 52 +#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8 +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20 +#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12 +/* enum: The uart port this command was received over (if using a uart + * transport) + */ +#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff +#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */ +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32 +/* Enum values, see field(s): */ +/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36 +#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48 + +/* MC_CMD_DUMP_DO_OUT msgresponse */ +#define MC_CMD_DUMP_DO_OUT_LEN 4 +#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0 + + +/***********************************/ +/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED + * Configure unsolicited dumps + */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9 + +/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4 +/* Enum values, see field(s): */ +/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8 +/* Enum values, see field(s): */ +/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28 +/* Enum values, see field(s): */ +/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32 +/* Enum values, see field(s): */ +/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */ +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36 +#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48 + + +/***********************************/ +/* MC_CMD_SET_PSU + * Adjusts power supply parameters. This is a warranty-voiding operation. + * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if + * the parameter is out of range. + */ +#define MC_CMD_SET_PSU 0xea + +/* MC_CMD_SET_PSU_IN msgrequest */ +#define MC_CMD_SET_PSU_IN_LEN 12 +#define MC_CMD_SET_PSU_IN_PARAM_OFST 0 +#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */ +#define MC_CMD_SET_PSU_IN_RAIL_OFST 4 +#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */ +#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */ +/* desired value, eg voltage in mV */ +#define MC_CMD_SET_PSU_IN_VALUE_OFST 8 + +/* MC_CMD_SET_PSU_OUT msgresponse */ +#define MC_CMD_SET_PSU_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_FUNCTION_INFO + * Get function information. PF and VF number. + */ +#define MC_CMD_GET_FUNCTION_INFO 0xec + +/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */ +#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0 + +/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */ +#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8 +#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0 +#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4 + + +/***********************************/ +/* MC_CMD_ENABLE_OFFLINE_BIST + * Enters offline BIST mode. All queues are torn down, chip enters quiescent + * mode, calling function gets exclusive MCDI ownership. The only way out is + * reboot. + */ +#define MC_CMD_ENABLE_OFFLINE_BIST 0xed + +/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */ +#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0 + +/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */ +#define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_UART_SEND_DATA + * Send checksummed[sic] block of data over the uart. Response is a placeholder + * should we wish to make this reliable; currently requests are fire-and- + * forget. + */ +#define MC_CMD_UART_SEND_DATA 0xee + +/* MC_CMD_UART_SEND_DATA_OUT msgrequest */ +#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16 +#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252 +#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num)) +/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */ +#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0 +/* Offset at which to write the data */ +#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4 +/* Length of data */ +#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8 +/* Reserved for future use */ +#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12 +#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16 +#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1 +#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0 +#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236 + +/* MC_CMD_UART_SEND_DATA_IN msgresponse */ +#define MC_CMD_UART_SEND_DATA_IN_LEN 0 + + +/***********************************/ +/* MC_CMD_UART_RECV_DATA + * Request checksummed[sic] block of data over the uart. Only a placeholder, + * subject to change and not currently implemented. + */ +#define MC_CMD_UART_RECV_DATA 0xef + +/* MC_CMD_UART_RECV_DATA_OUT msgrequest */ +#define MC_CMD_UART_RECV_DATA_OUT_LEN 16 +/* CRC32 over OFFSET, LENGTH, RESERVED */ +#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0 +/* Offset from which to read the data */ +#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4 +/* Length of data */ +#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8 +/* Reserved for future use */ +#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12 + +/* MC_CMD_UART_RECV_DATA_IN msgresponse */ +#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16 +#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252 +#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num)) +/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */ +#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0 +/* Offset at which to write the data */ +#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4 +/* Length of data */ +#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8 +/* Reserved for future use */ +#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12 +#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16 +#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1 +#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0 +#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236 + + +/***********************************/ +/* MC_CMD_READ_FUSES + * Read data programmed into the device One-Time-Programmable (OTP) Fuses + */ +#define MC_CMD_READ_FUSES 0xf0 + +/* MC_CMD_READ_FUSES_IN msgrequest */ +#define MC_CMD_READ_FUSES_IN_LEN 8 +/* Offset in OTP to read */ +#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0 +/* Length of data to read in bytes */ +#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4 + +/* MC_CMD_READ_FUSES_OUT msgresponse */ +#define MC_CMD_READ_FUSES_OUT_LENMIN 4 +#define MC_CMD_READ_FUSES_OUT_LENMAX 252 +#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num)) +/* Length of returned OTP data in bytes */ +#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0 +/* Returned data */ +#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4 +#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1 +#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0 +#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248 + + +/***********************************/ +/* MC_CMD_KR_TUNE + * Get or set KR Serdes RXEQ and TX Driver settings + */ +#define MC_CMD_KR_TUNE 0xf1 + +/* MC_CMD_KR_TUNE_IN msgrequest */ +#define MC_CMD_KR_TUNE_IN_LENMIN 4 +#define MC_CMD_KR_TUNE_IN_LENMAX 252 +#define MC_CMD_KR_TUNE_IN_LEN(num) (4+4*(num)) +/* Requested operation */ +#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1 +/* enum: Get current RXEQ settings */ +#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0 +/* enum: Override RXEQ settings */ +#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1 +/* enum: Get current TX Driver settings */ +#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2 +/* enum: Override TX Driver settings */ +#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3 +/* enum: Force KR Serdes reset / recalibration */ +#define MC_CMD_KR_TUNE_IN_RECAL 0x4 +/* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid + * signal. + */ +#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5 +/* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The + * caller should call this command repeatedly after starting eye plot, until no + * more data is returned. + */ +#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3 +/* Arguments specific to the operation */ +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_OFST 4 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_LEN 4 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MINNUM 0 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM 62 + +/* MC_CMD_KR_TUNE_OUT msgresponse */ +#define MC_CMD_KR_TUNE_OUT_LEN 0 + +/* MC_CMD_KR_TUNE_RXEQ_GET_IN msgrequest */ +#define MC_CMD_KR_TUNE_RXEQ_GET_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_LEN 3 + +/* MC_CMD_KR_TUNE_RXEQ_GET_OUT msgresponse */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMIN 4 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX 252 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num)) +/* RXEQ Parameter */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_OFST 0 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LEN 4 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 +/* enum: Attenuation (0-15) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0 +/* enum: CTLE Boost (0-15) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1 +/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2 +/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3 +/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4 +/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5 +/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 4 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_LBN 16 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 + +/* MC_CMD_KR_TUNE_RXEQ_SET_IN msgrequest */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMIN 8 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX 252 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num)) +/* Requested operation */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_LEN 3 +/* RXEQ Parameter */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_OFST 4 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LEN 4 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_ID */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 3 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_LANE */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 11 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_LBN 12 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 4 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8 + +/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */ +#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0 + +/* MC_CMD_KR_TUNE_TXEQ_GET_IN msgrequest */ +#define MC_CMD_KR_TUNE_TXEQ_GET_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_LEN 3 + +/* MC_CMD_KR_TUNE_TXEQ_GET_OUT msgresponse */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMIN 4 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX 252 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num)) +/* TXEQ Parameter */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_OFST 0 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LEN 4 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 +/* enum: TX Amplitude */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0 +/* enum: De-Emphasis Tap1 Magnitude (0-7) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1 +/* enum: De-Emphasis Tap1 Fine */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2 +/* enum: De-Emphasis Tap2 Magnitude (0-6) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3 +/* enum: De-Emphasis Tap2 Fine */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4 +/* enum: Pre-Emphasis Magnitude */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5 +/* enum: Pre-Emphasis Fine */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6 +/* enum: TX Slew Rate Coarse control */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7 +/* enum: TX Slew Rate Fine control */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_LBN 24 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_WIDTH 8 + +/* MC_CMD_KR_TUNE_TXEQ_SET_IN msgrequest */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMIN 8 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX 252 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LEN(num) (4+4*(num)) +/* Requested operation */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_LEN 3 +/* TXEQ Parameter */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_OFST 4 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LEN 4 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MINNUM 1 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM 62 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_LBN 0 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_WIDTH 8 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_ID */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_LBN 8 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_WIDTH 3 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_LANE */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_LBN 11 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_WIDTH 5 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_LBN 16 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_LBN 24 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_WIDTH 8 + +/* MC_CMD_KR_TUNE_TXEQ_SET_OUT msgresponse */ +#define MC_CMD_KR_TUNE_TXEQ_SET_OUT_LEN 0 + +/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */ +#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_LEN 3 + +/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */ +#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0 + +/* MC_CMD_KR_TUNE_START_EYE_PLOT_IN msgrequest */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LEN 8 +/* Requested operation */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4 + +/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */ +#define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0 + +/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN msgrequest */ +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3 + +/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT msgresponse */ +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num)) +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126 + + +/***********************************/ +/* MC_CMD_PCIE_TUNE + * Get or set PCIE Serdes RXEQ and TX Driver settings + */ +#define MC_CMD_PCIE_TUNE 0xf2 + +/* MC_CMD_PCIE_TUNE_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_IN_LENMIN 4 +#define MC_CMD_PCIE_TUNE_IN_LENMAX 252 +#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num)) +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1 +/* enum: Get current RXEQ settings */ +#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0 +/* enum: Override RXEQ settings */ +#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1 +/* enum: Get current TX Driver settings */ +#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2 +/* enum: Override TX Driver settings */ +#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3 +/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */ +#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5 +/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The + * caller should call this command repeatedly after starting eye plot, until no + * more data is returned. + */ +#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3 +/* Arguments specific to the operation */ +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62 + +/* MC_CMD_PCIE_TUNE_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_OUT_LEN 0 + +/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3 + +/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num)) +/* RXEQ Parameter */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 +/* enum: Attenuation (0-15) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0 +/* enum: CTLE Boost (0-15) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1 +/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2 +/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3 +/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4 +/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5 +/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 4 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x8 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 12 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 + +/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3 + +/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num)) +/* RXEQ Parameter */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 +/* enum: TxMargin (PIPE) */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0 +/* enum: TxSwing (PIPE) */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1 +/* enum: De-emphasis coefficient C(-1) (PIPE) */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2 +/* enum: De-emphasis coefficient C(0) (PIPE) */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3 +/* enum: De-emphasis coefficient C(+1) (PIPE) */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4 +/* Enum values, see field(s): */ +/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 + +/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8 +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3 +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4 + +/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0 + +/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4 +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3 + +/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num)) +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126 + + +/***********************************/ +/* MC_CMD_LICENSING + * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition + */ +#define MC_CMD_LICENSING 0xf3 + +/* MC_CMD_LICENSING_IN msgrequest */ +#define MC_CMD_LICENSING_IN_LEN 4 +/* identifies the type of operation requested */ +#define MC_CMD_LICENSING_IN_OP_OFST 0 +/* enum: re-read and apply licenses after a license key partition update; note + * that this operation returns a zero-length response + */ +#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0 +/* enum: report counts of installed licenses */ +#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1 + +/* MC_CMD_LICENSING_OUT msgresponse */ +#define MC_CMD_LICENSING_OUT_LEN 28 +/* count of application keys which are valid */ +#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0 +/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with + * MC_CMD_FC_OP_LICENSE) + */ +#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4 +/* count of application keys which are invalid due to being blacklisted */ +#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8 +/* count of application keys which are invalid due to being unverifiable */ +#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12 +/* count of application keys which are invalid due to being for the wrong node + */ +#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16 +/* licensing state (for diagnostics; the exact meaning of the bits in this + * field are private to the firmware) + */ +#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20 +/* licensing subsystem self-test report (for manftest) */ +#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24 +/* enum: licensing subsystem self-test failed */ +#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0 +/* enum: licensing subsystem self-test passed */ +#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1 + + +/***********************************/ +/* MC_CMD_MC2MC_PROXY + * Execute an arbitrary MCDI command on the slave MC of a dual-core device. + * This will fail on a single-core system. + */ +#define MC_CMD_MC2MC_PROXY 0xf4 + +/* MC_CMD_MC2MC_PROXY_IN msgrequest */ +#define MC_CMD_MC2MC_PROXY_IN_LEN 0 + +/* MC_CMD_MC2MC_PROXY_OUT msgresponse */ +#define MC_CMD_MC2MC_PROXY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_LICENSED_APP_STATE + * Query the state of an individual licensed application. (Note that the actual + * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation + * or a reboot of the MC.) + */ +#define MC_CMD_GET_LICENSED_APP_STATE 0xf5 + +/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */ +#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4 +/* application ID to query (LICENSED_APP_ID_xxx) */ +#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0 + +/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4 +/* state of this application */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0 +/* enum: no (or invalid) license is present for the application */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0 +/* enum: a valid license is present for the application */ +#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1 + + +/***********************************/ +/* MC_CMD_LICENSED_APP_OP + * Perform an action for an individual licensed application. + */ +#define MC_CMD_LICENSED_APP_OP 0xf6 + +/* MC_CMD_LICENSED_APP_OP_IN msgrequest */ +#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8 +#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252 +#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num)) +/* application ID */ +#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0 +/* the type of operation requested */ +#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4 +/* enum: validate application */ +#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0 +/* arguments specific to this particular operation */ +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61 + +/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */ +#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0 +#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252 +#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num)) +/* result specific to this particular operation */ +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63 + +/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72 +/* application ID */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0 +/* the type of operation requested */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4 +/* validation challenge */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64 + +/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68 +/* feature expiry (time_t) */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0 +/* validation response */ +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4 +#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64 + + +/***********************************/ +/* MC_CMD_SET_PORT_SNIFF_CONFIG + * Configure port sniffing for the physical port associated with the calling + * function. Only a privileged function may change the port sniffing + * configuration. A copy of all traffic delivered to the host (non-promiscuous + * mode) or all traffic arriving at the port (promiscuous mode) may be + * delivered to a specific queue, or a set of queues with RSS. + */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7 + +/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16 +/* configuration flags */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1 +/* receive queue handle (for RSS mode, this is the base queue) */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4 +/* receive mode */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8 +/* enum: receive to just the specified queue */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 +/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note + * that these handles should be considered opaque to the host, although a value + * of 0xFFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12 + +/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PORT_SNIFF_CONFIG + * Obtain the current port sniffing configuration for the physical port + * associated with the calling function. Only a privileged function may read + * the configuration. + */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8 + +/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0 + +/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16 +/* configuration flags */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1 +/* receiving queue handle (for RSS mode, this is the base queue) */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4 +/* receive mode */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8 +/* enum: receiving to just the specified queue */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 +/* enum: receiving to multiple queues using RSS context */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 +/* RSS context (for RX_MODE_RSS) */ +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 + + +#endif /* MCDI_PCOL_H */ diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c new file mode 100644 index 000000000..fb19b70ea --- /dev/null +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -0,0 +1,1035 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2009-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +/* + * Driver for PHY related operations via MCDI. + */ + +#include +#include "efx.h" +#include "phy.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "nic.h" +#include "selftest.h" + +struct efx_mcdi_phy_data { + u32 flags; + u32 type; + u32 supported_cap; + u32 channel; + u32 port; + u32 stats_mask; + u8 name[20]; + u32 media; + u32 mmd_mask; + u8 revision[20]; + u32 forced_cap; +}; + +static int +efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0); + BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name)); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) { + rc = -EIO; + goto fail; + } + + cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS); + cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE); + cfg->supported_cap = + MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP); + cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL); + cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT); + cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK); + memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME), + sizeof(cfg->name)); + cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE); + cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK); + memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION), + sizeof(cfg->revision)); + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, + u32 flags, u32 loopback_mode, + u32 loopback_speed) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN); + int rc; + + BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0); + + MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities); + MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags); + MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode); + MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed); + + rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf), + NULL, 0, NULL); + return rc; +} + +static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN); + size_t outlen; + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST + + MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) { + rc = -EIO; + goto fail; + } + + *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED); + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +static int efx_mcdi_mdio_read(struct net_device *net_dev, + int prtad, int devad, u16 addr) +{ + struct efx_nic *efx = netdev_priv(net_dev); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus); + MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad); + MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad); + MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr); + + rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) != + MC_CMD_MDIO_STATUS_GOOD) + return -EIO; + + return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE); +} + +static int efx_mcdi_mdio_write(struct net_device *net_dev, + int prtad, int devad, u16 addr, u16 value) +{ + struct efx_nic *efx = netdev_priv(net_dev); + MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus); + MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad); + MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad); + MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr); + MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value); + + rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) != + MC_CMD_MDIO_STATUS_GOOD) + return -EIO; + + return 0; +} + +static u32 mcdi_to_ethtool_cap(u32 media, u32 cap) +{ + u32 result = 0; + + switch (media) { + case MC_CMD_MEDIA_KX4: + result |= SUPPORTED_Backplane; + if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) + result |= SUPPORTED_1000baseKX_Full; + if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) + result |= SUPPORTED_10000baseKX4_Full; + if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) + result |= SUPPORTED_40000baseKR4_Full; + break; + + case MC_CMD_MEDIA_XFP: + case MC_CMD_MEDIA_SFP_PLUS: + result |= SUPPORTED_FIBRE; + break; + + case MC_CMD_MEDIA_QSFP_PLUS: + result |= SUPPORTED_FIBRE; + if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) + result |= SUPPORTED_40000baseCR4_Full; + break; + + case MC_CMD_MEDIA_BASE_T: + result |= SUPPORTED_TP; + if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN)) + result |= SUPPORTED_10baseT_Half; + if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN)) + result |= SUPPORTED_10baseT_Full; + if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN)) + result |= SUPPORTED_100baseT_Half; + if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN)) + result |= SUPPORTED_100baseT_Full; + if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN)) + result |= SUPPORTED_1000baseT_Half; + if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) + result |= SUPPORTED_1000baseT_Full; + if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) + result |= SUPPORTED_10000baseT_Full; + break; + } + + if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) + result |= SUPPORTED_Pause; + if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) + result |= SUPPORTED_Asym_Pause; + if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) + result |= SUPPORTED_Autoneg; + + return result; +} + +static u32 ethtool_to_mcdi_cap(u32 cap) +{ + u32 result = 0; + + if (cap & SUPPORTED_10baseT_Half) + result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN); + if (cap & SUPPORTED_10baseT_Full) + result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN); + if (cap & SUPPORTED_100baseT_Half) + result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN); + if (cap & SUPPORTED_100baseT_Full) + result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN); + if (cap & SUPPORTED_1000baseT_Half) + result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN); + if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full)) + result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN); + if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full)) + result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN); + if (cap & (SUPPORTED_40000baseCR4_Full | SUPPORTED_40000baseKR4_Full)) + result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN); + if (cap & SUPPORTED_Pause) + result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN); + if (cap & SUPPORTED_Asym_Pause) + result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN); + if (cap & SUPPORTED_Autoneg) + result |= (1 << MC_CMD_PHY_CAP_AN_LBN); + + return result; +} + +static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + enum efx_phy_mode mode, supported; + u32 flags; + + /* TODO: Advertise the capabilities supported by this PHY */ + supported = 0; + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN)) + supported |= PHY_MODE_TX_DISABLED; + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN)) + supported |= PHY_MODE_LOW_POWER; + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN)) + supported |= PHY_MODE_OFF; + + mode = efx->phy_mode & supported; + + flags = 0; + if (mode & PHY_MODE_TX_DISABLED) + flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN); + if (mode & PHY_MODE_LOW_POWER) + flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN); + if (mode & PHY_MODE_OFF) + flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN); + + return flags; +} + +static u32 mcdi_to_ethtool_media(u32 media) +{ + switch (media) { + case MC_CMD_MEDIA_XAUI: + case MC_CMD_MEDIA_CX4: + case MC_CMD_MEDIA_KX4: + return PORT_OTHER; + + case MC_CMD_MEDIA_XFP: + case MC_CMD_MEDIA_SFP_PLUS: + case MC_CMD_MEDIA_QSFP_PLUS: + return PORT_FIBRE; + + case MC_CMD_MEDIA_BASE_T: + return PORT_TP; + + default: + return PORT_OTHER; + } +} + +static void efx_mcdi_phy_decode_link(struct efx_nic *efx, + struct efx_link_state *link_state, + u32 speed, u32 flags, u32 fcntl) +{ + switch (fcntl) { + case MC_CMD_FCNTL_AUTO: + WARN_ON(1); /* This is not a link mode */ + link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX; + break; + case MC_CMD_FCNTL_BIDIR: + link_state->fc = EFX_FC_TX | EFX_FC_RX; + break; + case MC_CMD_FCNTL_RESPOND: + link_state->fc = EFX_FC_RX; + break; + default: + WARN_ON(1); + case MC_CMD_FCNTL_OFF: + link_state->fc = 0; + break; + } + + link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN)); + link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN)); + link_state->speed = speed; +} + +static int efx_mcdi_phy_probe(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + u32 caps; + int rc; + + /* Initialise and populate phy_data */ + phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); + if (phy_data == NULL) + return -ENOMEM; + + rc = efx_mcdi_get_phy_cfg(efx, phy_data); + if (rc != 0) + goto fail; + + /* Read initial link advertisement */ + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) + goto fail; + + /* Fill out nic state */ + efx->phy_data = phy_data; + efx->phy_type = phy_data->type; + + efx->mdio_bus = phy_data->channel; + efx->mdio.prtad = phy_data->port; + efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22); + efx->mdio.mode_support = 0; + if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22)) + efx->mdio.mode_support |= MDIO_SUPPORTS_C22; + if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22)) + efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; + + caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP); + if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN)) + efx->link_advertising = + mcdi_to_ethtool_cap(phy_data->media, caps); + else + phy_data->forced_cap = caps; + + /* Assert that we can map efx -> mcdi loopback modes */ + BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE); + BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA); + BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC); + BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII); + BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS); + BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI); + BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII); + BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII); + BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR); + BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI); + BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR); + BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR); + BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR); + BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR); + BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY); + BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS); + BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS); + BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD); + BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT); + BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS); + BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS); + BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR); + BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR); + BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS); + BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS); + BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR); + BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS); + + rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes); + if (rc != 0) + goto fail; + /* The MC indicates that LOOPBACK_NONE is a valid loopback mode, + * but by convention we don't */ + efx->loopback_modes &= ~(1 << LOOPBACK_NONE); + + /* Set the initial link mode */ + efx_mcdi_phy_decode_link( + efx, &efx->link_state, + MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), + MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), + MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL)); + + /* Default to Autonegotiated flow control if the PHY supports it */ + efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; + if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) + efx->wanted_fc |= EFX_FC_AUTO; + efx_link_set_wanted_fc(efx, efx->wanted_fc); + + return 0; + +fail: + kfree(phy_data); + return rc; +} + +int efx_mcdi_port_reconfigure(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 caps = (efx->link_advertising ? + ethtool_to_mcdi_cap(efx->link_advertising) : + phy_cfg->forced_cap); + + return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), + efx->loopback_mode, 0); +} + +/* Verify that the forced flow control settings (!EFX_FC_AUTO) are + * supported by the link partner. Warn the user if this isn't the case + */ +static void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 rmtadv; + + /* The link partner capabilities are only relevant if the + * link supports flow control autonegotiation */ + if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) + return; + + /* If flow control autoneg is supported and enabled, then fine */ + if (efx->wanted_fc & EFX_FC_AUTO) + return; + + rmtadv = 0; + if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) + rmtadv |= ADVERTISED_Pause; + if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) + rmtadv |= ADVERTISED_Asym_Pause; + + if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause) + netif_err(efx, link, efx->net_dev, + "warning: link partner doesn't support pause frames"); +} + +static bool efx_mcdi_phy_poll(struct efx_nic *efx) +{ + struct efx_link_state old_state = efx->link_state; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + int rc; + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) + efx->link_state.up = false; + else + efx_mcdi_phy_decode_link( + efx, &efx->link_state, + MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), + MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), + MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL)); + + return !efx_link_state_equal(&efx->link_state, &old_state); +} + +static void efx_mcdi_phy_remove(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data = efx->phy_data; + + efx->phy_data = NULL; + kfree(phy_data); +} + +static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + int rc; + + ecmd->supported = + mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap); + ecmd->advertising = efx->link_advertising; + ethtool_cmd_speed_set(ecmd, efx->link_state.speed); + ecmd->duplex = efx->link_state.fd; + ecmd->port = mcdi_to_ethtool_media(phy_cfg->media); + ecmd->phy_address = phy_cfg->port; + ecmd->transceiver = XCVR_INTERNAL; + ecmd->autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg); + ecmd->mdio_support = (efx->mdio.mode_support & + (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22)); + + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) + return; + ecmd->lp_advertising = + mcdi_to_ethtool_cap(phy_cfg->media, + MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP)); +} + +static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 caps; + int rc; + + if (ecmd->autoneg) { + caps = (ethtool_to_mcdi_cap(ecmd->advertising) | + 1 << MC_CMD_PHY_CAP_AN_LBN); + } else if (ecmd->duplex) { + switch (ethtool_cmd_speed(ecmd)) { + case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break; + case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break; + case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break; + case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break; + case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break; + default: return -EINVAL; + } + } else { + switch (ethtool_cmd_speed(ecmd)) { + case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break; + case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break; + case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break; + default: return -EINVAL; + } + } + + rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), + efx->loopback_mode, 0); + if (rc) + return rc; + + if (ecmd->autoneg) { + efx_link_set_advertising( + efx, ecmd->advertising | ADVERTISED_Autoneg); + phy_cfg->forced_cap = 0; + } else { + efx_link_set_advertising(efx, 0); + phy_cfg->forced_cap = caps; + } + return 0; +} + +static int efx_mcdi_phy_test_alive(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN) + return -EIO; + if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK) + return -EINVAL; + + return 0; +} + +static const char *const mcdi_sft9001_cable_diag_names[] = { + "cable.pairA.length", + "cable.pairB.length", + "cable.pairC.length", + "cable.pairD.length", + "cable.pairA.status", + "cable.pairB.status", + "cable.pairC.status", + "cable.pairD.status", +}; + +static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode, + int *results) +{ + unsigned int retry, i, count = 0; + size_t outlen; + u32 status; + MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_SFT9001_LEN); + u8 *ptr; + int rc; + + BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0); + MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode); + rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, + inbuf, MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL); + if (rc) + goto out; + + /* Wait up to 10s for BIST to finish */ + for (retry = 0; retry < 100; ++retry) { + BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0); + rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto out; + + status = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT); + if (status != MC_CMD_POLL_BIST_RUNNING) + goto finished; + + msleep(100); + } + + rc = -ETIMEDOUT; + goto out; + +finished: + results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1; + + /* SFT9001 specific cable diagnostics output */ + if (efx->phy_type == PHY_TYPE_SFT9001B && + (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT || + bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) { + ptr = MCDI_PTR(outbuf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A); + if (status == MC_CMD_POLL_BIST_PASSED && + outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) { + for (i = 0; i < 8; i++) { + results[count + i] = + EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i], + EFX_DWORD_0); + } + } + count += 8; + } + rc = count; + +out: + return rc; +} + +static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, + unsigned flags) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 mode; + int rc; + + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) { + rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results); + if (rc < 0) + return rc; + + results += rc; + } + + /* If we support both LONG and SHORT, then run each in response to + * break or not. Otherwise, run the one we support */ + mode = 0; + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN)) { + if ((flags & ETH_TEST_FL_OFFLINE) && + (phy_cfg->flags & + (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) + mode = MC_CMD_PHY_BIST_CABLE_LONG; + else + mode = MC_CMD_PHY_BIST_CABLE_SHORT; + } else if (phy_cfg->flags & + (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN)) + mode = MC_CMD_PHY_BIST_CABLE_LONG; + + if (mode != 0) { + rc = efx_mcdi_bist(efx, mode, results); + if (rc < 0) + return rc; + results += rc; + } + + return 0; +} + +static const char *efx_mcdi_phy_test_name(struct efx_nic *efx, + unsigned int index) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + + if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) { + if (index == 0) + return "bist"; + --index; + } + + if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN) | + (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) { + if (index == 0) + return "cable"; + --index; + + if (efx->phy_type == PHY_TYPE_SFT9001B) { + if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names)) + return mcdi_sft9001_cable_diag_names[index]; + index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names); + } + } + + return NULL; +} + +#define SFP_PAGE_SIZE 128 +#define SFP_NUM_PAGES 2 +static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, + struct ethtool_eeprom *ee, u8 *data) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX); + MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN); + size_t outlen; + int rc; + unsigned int payload_len; + unsigned int space_remaining = ee->len; + unsigned int page; + unsigned int page_off; + unsigned int to_copy; + u8 *user_data = data; + + BUILD_BUG_ON(SFP_PAGE_SIZE * SFP_NUM_PAGES != ETH_MODULE_SFF_8079_LEN); + + page_off = ee->offset % SFP_PAGE_SIZE; + page = ee->offset / SFP_PAGE_SIZE; + + while (space_remaining && (page < SFP_NUM_PAGES)) { + MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_MEDIA_INFO, + inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), + &outlen); + if (rc) + return rc; + + if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST + + SFP_PAGE_SIZE)) + return -EIO; + + payload_len = MCDI_DWORD(outbuf, + GET_PHY_MEDIA_INFO_OUT_DATALEN); + if (payload_len != SFP_PAGE_SIZE) + return -EIO; + + /* Copy as much as we can into data */ + payload_len -= page_off; + to_copy = (space_remaining < payload_len) ? + space_remaining : payload_len; + + memcpy(user_data, + MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + page_off, + to_copy); + + space_remaining -= to_copy; + user_data += to_copy; + page_off = 0; + page++; + } + + return 0; +} + +static int efx_mcdi_phy_get_module_info(struct efx_nic *efx, + struct ethtool_modinfo *modinfo) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + + switch (phy_cfg->media) { + case MC_CMD_MEDIA_SFP_PLUS: + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + return 0; + default: + return -EOPNOTSUPP; + } +} + +static const struct efx_phy_operations efx_mcdi_phy_ops = { + .probe = efx_mcdi_phy_probe, + .init = efx_port_dummy_op_int, + .reconfigure = efx_mcdi_port_reconfigure, + .poll = efx_mcdi_phy_poll, + .fini = efx_port_dummy_op_void, + .remove = efx_mcdi_phy_remove, + .get_settings = efx_mcdi_phy_get_settings, + .set_settings = efx_mcdi_phy_set_settings, + .test_alive = efx_mcdi_phy_test_alive, + .run_tests = efx_mcdi_phy_run_tests, + .test_name = efx_mcdi_phy_test_name, + .get_module_eeprom = efx_mcdi_phy_get_module_eeprom, + .get_module_info = efx_mcdi_phy_get_module_info, +}; + +u32 efx_mcdi_phy_get_caps(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data = efx->phy_data; + + return phy_data->supported_cap; +} + +static unsigned int efx_mcdi_event_link_speed[] = { + [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, + [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, + [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, + [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000, +}; + +void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) +{ + u32 flags, fcntl, speed, lpa; + + speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED); + EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed)); + speed = efx_mcdi_event_link_speed[speed]; + + flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS); + fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL); + lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP); + + /* efx->link_state is only modified by efx_mcdi_phy_get_link(), + * which is only run after flushing the event queues. Therefore, it + * is safe to modify the link state outside of the mac_lock here. + */ + efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl); + + efx_mcdi_phy_check_fcntl(efx, lpa); + + efx_link_status_changed(efx); +} + +int efx_mcdi_set_mac(struct efx_nic *efx) +{ + u32 fcntl; + MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN); + + BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0); + + ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR), + efx->net_dev->dev_addr); + + MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, + EFX_MAX_FRAME_LEN(efx->net_dev->mtu)); + MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0); + + /* Set simple MAC filter for Siena */ + MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT, + SET_MAC_IN_REJECT_UNCST, efx->unicast_filter); + + switch (efx->wanted_fc) { + case EFX_FC_RX | EFX_FC_TX: + fcntl = MC_CMD_FCNTL_BIDIR; + break; + case EFX_FC_RX: + fcntl = MC_CMD_FCNTL_RESPOND; + break; + default: + fcntl = MC_CMD_FCNTL_OFF; + break; + } + if (efx->wanted_fc & EFX_FC_AUTO) + fcntl = MC_CMD_FCNTL_AUTO; + if (efx->fc_disable) + fcntl = MC_CMD_FCNTL_OFF; + + MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl); + + return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes), + NULL, 0, NULL); +} + +bool efx_mcdi_mac_check_fault(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); + size_t outlength; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, + outbuf, sizeof(outbuf), &outlength); + if (rc) + return true; + + return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0; +} + +enum efx_stats_action { + EFX_STATS_ENABLE, + EFX_STATS_DISABLE, + EFX_STATS_PULL, +}; + +static int efx_mcdi_mac_stats(struct efx_nic *efx, + enum efx_stats_action action, int clear) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); + int rc; + int change = action == EFX_STATS_PULL ? 0 : 1; + int enable = action == EFX_STATS_ENABLE ? 1 : 0; + int period = action == EFX_STATS_ENABLE ? 1000 : 0; + dma_addr_t dma_addr = efx->stats_buffer.dma_addr; + u32 dma_len = action != EFX_STATS_DISABLE ? + MC_CMD_MAC_NSTATS * sizeof(u64) : 0; + + BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0); + + MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr); + MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD, + MAC_STATS_IN_DMA, !!enable, + MAC_STATS_IN_CLEAR, clear, + MAC_STATS_IN_PERIODIC_CHANGE, change, + MAC_STATS_IN_PERIODIC_ENABLE, enable, + MAC_STATS_IN_PERIODIC_CLEAR, 0, + MAC_STATS_IN_PERIODIC_NOEVENT, 1, + MAC_STATS_IN_PERIOD_MS, period); + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); + + rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), + NULL, 0, NULL); + return rc; +} + +void efx_mcdi_mac_start_stats(struct efx_nic *efx) +{ + __le64 *dma_stats = efx->stats_buffer.addr; + + dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID; + + efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0); +} + +void efx_mcdi_mac_stop_stats(struct efx_nic *efx) +{ + efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0); +} + +#define EFX_MAC_STATS_WAIT_US 100 +#define EFX_MAC_STATS_WAIT_ATTEMPTS 10 + +void efx_mcdi_mac_pull_stats(struct efx_nic *efx) +{ + __le64 *dma_stats = efx->stats_buffer.addr; + int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS; + + dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID; + efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0); + + while (dma_stats[MC_CMD_MAC_GENERATION_END] == + EFX_MC_STATS_GENERATION_INVALID && + attempts-- != 0) + udelay(EFX_MAC_STATS_WAIT_US); +} + +int efx_mcdi_port_probe(struct efx_nic *efx) +{ + int rc; + + /* Hook in PHY operations table */ + efx->phy_op = &efx_mcdi_phy_ops; + + /* Set up MDIO structure for PHY */ + efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; + efx->mdio.mdio_read = efx_mcdi_mdio_read; + efx->mdio.mdio_write = efx_mcdi_mdio_write; + + /* Fill out MDIO structure, loopback modes, and initial link state */ + rc = efx->phy_op->probe(efx); + if (rc != 0) + return rc; + + /* Allocate buffer for stats */ + rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, + MC_CMD_MAC_NSTATS * sizeof(u64), GFP_KERNEL); + if (rc) + return rc; + netif_dbg(efx, probe, efx->net_dev, + "stats buffer at %llx (virt %p phys %llx)\n", + (u64)efx->stats_buffer.dma_addr, + efx->stats_buffer.addr, + (u64)virt_to_phys(efx->stats_buffer.addr)); + + efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 1); + + return 0; +} + +void efx_mcdi_port_remove(struct efx_nic *efx) +{ + efx->phy_op->remove(efx); + efx_nic_free_buffer(efx, &efx->stats_buffer); +} + +/* Get physical port number (EF10 only; on Siena it is same as PF number) */ +int efx_mcdi_port_get_number(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN); + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0, + outbuf, sizeof(outbuf), NULL); + if (rc) + return rc; + + return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT); +} diff --git a/drivers/net/ethernet/sfc/mdio_10g.c b/drivers/net/ethernet/sfc/mdio_10g.c new file mode 100644 index 000000000..8ff954c59 --- /dev/null +++ b/drivers/net/ethernet/sfc/mdio_10g.c @@ -0,0 +1,323 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2006-2011 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +/* + * Useful functions for working with MDIO clause 45 PHYs + */ +#include +#include +#include +#include "net_driver.h" +#include "mdio_10g.h" +#include "workarounds.h" + +unsigned efx_mdio_id_oui(u32 id) +{ + unsigned oui = 0; + int i; + + /* The bits of the OUI are designated a..x, with a=0 and b variable. + * In the id register c is the MSB but the OUI is conventionally + * written as bytes h..a, p..i, x..q. Reorder the bits accordingly. */ + for (i = 0; i < 22; ++i) + if (id & (1 << (i + 10))) + oui |= 1 << (i ^ 7); + + return oui; +} + +int efx_mdio_reset_mmd(struct efx_nic *port, int mmd, + int spins, int spintime) +{ + u32 ctrl; + + /* Catch callers passing values in the wrong units (or just silly) */ + EFX_BUG_ON_PARANOID(spins * spintime >= 5000); + + efx_mdio_write(port, mmd, MDIO_CTRL1, MDIO_CTRL1_RESET); + /* Wait for the reset bit to clear. */ + do { + msleep(spintime); + ctrl = efx_mdio_read(port, mmd, MDIO_CTRL1); + spins--; + + } while (spins && (ctrl & MDIO_CTRL1_RESET)); + + return spins ? spins : -ETIMEDOUT; +} + +static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd) +{ + int status; + + if (mmd != MDIO_MMD_AN) { + /* Read MMD STATUS2 to check it is responding. */ + status = efx_mdio_read(efx, mmd, MDIO_STAT2); + if ((status & MDIO_STAT2_DEVPRST) != MDIO_STAT2_DEVPRST_VAL) { + netif_err(efx, hw, efx->net_dev, + "PHY MMD %d not responding.\n", mmd); + return -EIO; + } + } + + return 0; +} + +/* This ought to be ridiculous overkill. We expect it to fail rarely */ +#define MDIO45_RESET_TIME 1000 /* ms */ +#define MDIO45_RESET_ITERS 100 + +int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask) +{ + const int spintime = MDIO45_RESET_TIME / MDIO45_RESET_ITERS; + int tries = MDIO45_RESET_ITERS; + int rc = 0; + int in_reset; + + while (tries) { + int mask = mmd_mask; + int mmd = 0; + int stat; + in_reset = 0; + while (mask) { + if (mask & 1) { + stat = efx_mdio_read(efx, mmd, MDIO_CTRL1); + if (stat < 0) { + netif_err(efx, hw, efx->net_dev, + "failed to read status of" + " MMD %d\n", mmd); + return -EIO; + } + if (stat & MDIO_CTRL1_RESET) + in_reset |= (1 << mmd); + } + mask = mask >> 1; + mmd++; + } + if (!in_reset) + break; + tries--; + msleep(spintime); + } + if (in_reset != 0) { + netif_err(efx, hw, efx->net_dev, + "not all MMDs came out of reset in time." + " MMDs still in reset: %x\n", in_reset); + rc = -ETIMEDOUT; + } + return rc; +} + +int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask) +{ + int mmd = 0, probe_mmd, devs1, devs2; + u32 devices; + + /* Historically we have probed the PHYXS to find out what devices are + * present,but that doesn't work so well if the PHYXS isn't expected + * to exist, if so just find the first item in the list supplied. */ + probe_mmd = (mmd_mask & MDIO_DEVS_PHYXS) ? MDIO_MMD_PHYXS : + __ffs(mmd_mask); + + /* Check all the expected MMDs are present */ + devs1 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS1); + devs2 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS2); + if (devs1 < 0 || devs2 < 0) { + netif_err(efx, hw, efx->net_dev, + "failed to read devices present\n"); + return -EIO; + } + devices = devs1 | (devs2 << 16); + if ((devices & mmd_mask) != mmd_mask) { + netif_err(efx, hw, efx->net_dev, + "required MMDs not present: got %x, wanted %x\n", + devices, mmd_mask); + return -ENODEV; + } + netif_vdbg(efx, hw, efx->net_dev, "Devices present: %x\n", devices); + + /* Check all required MMDs are responding and happy. */ + while (mmd_mask) { + if ((mmd_mask & 1) && efx_mdio_check_mmd(efx, mmd)) + return -EIO; + mmd_mask = mmd_mask >> 1; + mmd++; + } + + return 0; +} + +bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask) +{ + /* If the port is in loopback, then we should only consider a subset + * of mmd's */ + if (LOOPBACK_INTERNAL(efx)) + return true; + else if (LOOPBACK_MASK(efx) & LOOPBACKS_WS) + return false; + else if (efx_phy_mode_disabled(efx->phy_mode)) + return false; + else if (efx->loopback_mode == LOOPBACK_PHYXS) + mmd_mask &= ~(MDIO_DEVS_PHYXS | + MDIO_DEVS_PCS | + MDIO_DEVS_PMAPMD | + MDIO_DEVS_AN); + else if (efx->loopback_mode == LOOPBACK_PCS) + mmd_mask &= ~(MDIO_DEVS_PCS | + MDIO_DEVS_PMAPMD | + MDIO_DEVS_AN); + else if (efx->loopback_mode == LOOPBACK_PMAPMD) + mmd_mask &= ~(MDIO_DEVS_PMAPMD | + MDIO_DEVS_AN); + + return mdio45_links_ok(&efx->mdio, mmd_mask); +} + +void efx_mdio_transmit_disable(struct efx_nic *efx) +{ + efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, + MDIO_PMA_TXDIS, MDIO_PMD_TXDIS_GLOBAL, + efx->phy_mode & PHY_MODE_TX_DISABLED); +} + +void efx_mdio_phy_reconfigure(struct efx_nic *efx) +{ + efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, + MDIO_CTRL1, MDIO_PMA_CTRL1_LOOPBACK, + efx->loopback_mode == LOOPBACK_PMAPMD); + efx_mdio_set_flag(efx, MDIO_MMD_PCS, + MDIO_CTRL1, MDIO_PCS_CTRL1_LOOPBACK, + efx->loopback_mode == LOOPBACK_PCS); + efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, + MDIO_CTRL1, MDIO_PHYXS_CTRL1_LOOPBACK, + efx->loopback_mode == LOOPBACK_PHYXS_WS); +} + +static void efx_mdio_set_mmd_lpower(struct efx_nic *efx, + int lpower, int mmd) +{ + int stat = efx_mdio_read(efx, mmd, MDIO_STAT1); + + netif_vdbg(efx, drv, efx->net_dev, "Setting low power mode for MMD %d to %d\n", + mmd, lpower); + + if (stat & MDIO_STAT1_LPOWERABLE) { + efx_mdio_set_flag(efx, mmd, MDIO_CTRL1, + MDIO_CTRL1_LPOWER, lpower); + } +} + +void efx_mdio_set_mmds_lpower(struct efx_nic *efx, + int low_power, unsigned int mmd_mask) +{ + int mmd = 0; + mmd_mask &= ~MDIO_DEVS_AN; + while (mmd_mask) { + if (mmd_mask & 1) + efx_mdio_set_mmd_lpower(efx, low_power, mmd); + mmd_mask = (mmd_mask >> 1); + mmd++; + } +} + +/** + * efx_mdio_set_settings - Set (some of) the PHY settings over MDIO. + * @efx: Efx NIC + * @ecmd: New settings + */ +int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) +{ + struct ethtool_cmd prev = { .cmd = ETHTOOL_GSET }; + + efx->phy_op->get_settings(efx, &prev); + + if (ecmd->advertising == prev.advertising && + ethtool_cmd_speed(ecmd) == ethtool_cmd_speed(&prev) && + ecmd->duplex == prev.duplex && + ecmd->port == prev.port && + ecmd->autoneg == prev.autoneg) + return 0; + + /* We can only change these settings for -T PHYs */ + if (prev.port != PORT_TP || ecmd->port != PORT_TP) + return -EINVAL; + + /* Check that PHY supports these settings */ + if (!ecmd->autoneg || + (ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported) + return -EINVAL; + + efx_link_set_advertising(efx, ecmd->advertising | ADVERTISED_Autoneg); + efx_mdio_an_reconfigure(efx); + return 0; +} + +/** + * efx_mdio_an_reconfigure - Push advertising flags and restart autonegotiation + * @efx: Efx NIC + */ +void efx_mdio_an_reconfigure(struct efx_nic *efx) +{ + int reg; + + WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN)); + + /* Set up the base page */ + reg = ADVERTISE_CSMA | ADVERTISE_RESV; + if (efx->link_advertising & ADVERTISED_Pause) + reg |= ADVERTISE_PAUSE_CAP; + if (efx->link_advertising & ADVERTISED_Asym_Pause) + reg |= ADVERTISE_PAUSE_ASYM; + efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg); + + /* Set up the (extended) next page */ + efx->phy_op->set_npage_adv(efx, efx->link_advertising); + + /* Enable and restart AN */ + reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1); + reg |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART | MDIO_AN_CTRL1_XNP; + efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg); +} + +u8 efx_mdio_get_pause(struct efx_nic *efx) +{ + BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX)); + + if (!(efx->wanted_fc & EFX_FC_AUTO)) + return efx->wanted_fc; + + WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN)); + + return mii_resolve_flowctrl_fdx( + mii_advertise_flowctrl(efx->wanted_fc), + efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA)); +} + +int efx_mdio_test_alive(struct efx_nic *efx) +{ + int rc; + int devad = __ffs(efx->mdio.mmds); + u16 physid1, physid2; + + mutex_lock(&efx->mac_lock); + + physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1); + physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2); + + if ((physid1 == 0x0000) || (physid1 == 0xffff) || + (physid2 == 0x0000) || (physid2 == 0xffff)) { + netif_err(efx, hw, efx->net_dev, + "no MDIO PHY present with ID %d\n", efx->mdio.prtad); + rc = -EINVAL; + } else { + rc = efx_mdio_check_mmds(efx, efx->mdio.mmds); + } + + mutex_unlock(&efx->mac_lock); + return rc; +} diff --git a/drivers/net/ethernet/sfc/mdio_10g.h b/drivers/net/ethernet/sfc/mdio_10g.h new file mode 100644 index 000000000..4a2dc4c28 --- /dev/null +++ b/drivers/net/ethernet/sfc/mdio_10g.h @@ -0,0 +1,110 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2006-2011 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_MDIO_10G_H +#define EFX_MDIO_10G_H + +#include + +/* + * Helper functions for doing 10G MDIO as specified in IEEE 802.3 clause 45. + */ + +#include "efx.h" + +static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; } +static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; } +unsigned efx_mdio_id_oui(u32 id); + +static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr) +{ + return efx->mdio.mdio_read(efx->net_dev, efx->mdio.prtad, devad, addr); +} + +static inline void +efx_mdio_write(struct efx_nic *efx, int devad, int addr, int value) +{ + efx->mdio.mdio_write(efx->net_dev, efx->mdio.prtad, devad, addr, value); +} + +static inline u32 efx_mdio_read_id(struct efx_nic *efx, int mmd) +{ + u16 id_low = efx_mdio_read(efx, mmd, MDIO_DEVID2); + u16 id_hi = efx_mdio_read(efx, mmd, MDIO_DEVID1); + return (id_hi << 16) | (id_low); +} + +static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx) +{ + int i, lane_status; + bool sync; + + for (i = 0; i < 2; ++i) + lane_status = efx_mdio_read(efx, MDIO_MMD_PHYXS, + MDIO_PHYXS_LNSTAT); + + sync = !!(lane_status & MDIO_PHYXS_LNSTAT_ALIGN); + if (!sync) + netif_dbg(efx, hw, efx->net_dev, "XGXS lane status: %x\n", + lane_status); + return sync; +} + +const char *efx_mdio_mmd_name(int mmd); + +/* + * Reset a specific MMD and wait for reset to clear. + * Return number of spins left (>0) on success, -%ETIMEDOUT on failure. + * + * This function will sleep + */ +int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd, int spins, int spintime); + +/* As efx_mdio_check_mmd but for multiple MMDs */ +int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask); + +/* Check the link status of specified mmds in bit mask */ +bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask); + +/* Generic transmit disable support though PMAPMD */ +void efx_mdio_transmit_disable(struct efx_nic *efx); + +/* Generic part of reconfigure: set/clear loopback bits */ +void efx_mdio_phy_reconfigure(struct efx_nic *efx); + +/* Set the power state of the specified MMDs */ +void efx_mdio_set_mmds_lpower(struct efx_nic *efx, int low_power, + unsigned int mmd_mask); + +/* Set (some of) the PHY settings over MDIO */ +int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd); + +/* Push advertising flags and restart autonegotiation */ +void efx_mdio_an_reconfigure(struct efx_nic *efx); + +/* Get pause parameters from AN if available (otherwise return + * requested pause parameters) + */ +u8 efx_mdio_get_pause(struct efx_nic *efx); + +/* Wait for specified MMDs to exit reset within a timeout */ +int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask); + +/* Set or clear flag, debouncing */ +static inline void +efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr, + int mask, bool state) +{ + mdio_set_flag(&efx->mdio, efx->mdio.prtad, devad, addr, mask, state); +} + +/* Liveness self-test for MDIO PHYs */ +int efx_mdio_test_alive(struct efx_nic *efx); + +#endif /* EFX_MDIO_10G_H */ diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c new file mode 100644 index 000000000..a77a8bd2d --- /dev/null +++ b/drivers/net/ethernet/sfc/mtd.c @@ -0,0 +1,133 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include + +#include "net_driver.h" +#include "efx.h" + +#define to_efx_mtd_partition(mtd) \ + container_of(mtd, struct efx_mtd_partition, mtd) + +/* MTD interface */ + +static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) +{ + struct efx_nic *efx = mtd->priv; + int rc; + + rc = efx->type->mtd_erase(mtd, erase->addr, erase->len); + if (rc == 0) { + erase->state = MTD_ERASE_DONE; + } else { + erase->state = MTD_ERASE_FAILED; + erase->fail_addr = MTD_FAIL_ADDR_UNKNOWN; + } + mtd_erase_callback(erase); + return rc; +} + +static void efx_mtd_sync(struct mtd_info *mtd) +{ + struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); + struct efx_nic *efx = mtd->priv; + int rc; + + rc = efx->type->mtd_sync(mtd); + if (rc) + pr_err("%s: %s sync failed (%d)\n", + part->name, part->dev_type_name, rc); +} + +static void efx_mtd_remove_partition(struct efx_mtd_partition *part) +{ + int rc; + + for (;;) { + rc = mtd_device_unregister(&part->mtd); + if (rc != -EBUSY) + break; + ssleep(1); + } + WARN_ON(rc); + list_del(&part->node); +} + +int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, + size_t n_parts, size_t sizeof_part) +{ + struct efx_mtd_partition *part; + size_t i; + + for (i = 0; i < n_parts; i++) { + part = (struct efx_mtd_partition *)((char *)parts + + i * sizeof_part); + + part->mtd.writesize = 1; + + part->mtd.owner = THIS_MODULE; + part->mtd.priv = efx; + part->mtd.name = part->name; + part->mtd._erase = efx_mtd_erase; + part->mtd._read = efx->type->mtd_read; + part->mtd._write = efx->type->mtd_write; + part->mtd._sync = efx_mtd_sync; + + efx->type->mtd_rename(part); + + if (mtd_device_register(&part->mtd, NULL, 0)) + goto fail; + + /* Add to list in order - efx_mtd_remove() depends on this */ + list_add_tail(&part->node, &efx->mtd_list); + } + + return 0; + +fail: + while (i--) { + part = (struct efx_mtd_partition *)((char *)parts + + i * sizeof_part); + efx_mtd_remove_partition(part); + } + /* Failure is unlikely here, but probably means we're out of memory */ + return -ENOMEM; +} + +void efx_mtd_remove(struct efx_nic *efx) +{ + struct efx_mtd_partition *parts, *part, *next; + + WARN_ON(efx_dev_registered(efx)); + + if (list_empty(&efx->mtd_list)) + return; + + parts = list_first_entry(&efx->mtd_list, struct efx_mtd_partition, + node); + + list_for_each_entry_safe(part, next, &efx->mtd_list, node) + efx_mtd_remove_partition(part); + + kfree(parts); +} + +void efx_mtd_rename(struct efx_nic *efx) +{ + struct efx_mtd_partition *part; + + ASSERT_RTNL(); + + list_for_each_entry(part, &efx->mtd_list, node) + efx->type->mtd_rename(part); +} diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h new file mode 100644 index 000000000..325dd94bc --- /dev/null +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -0,0 +1,1504 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2005-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +/* Common definitions for all Efx net driver code */ + +#ifndef EFX_NET_DRIVER_H +#define EFX_NET_DRIVER_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "enum.h" +#include "bitfield.h" +#include "filter.h" + +/************************************************************************** + * + * Build definitions + * + **************************************************************************/ + +#define EFX_DRIVER_VERSION "4.0" + +#ifdef DEBUG +#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) +#define EFX_WARN_ON_PARANOID(x) WARN_ON(x) +#else +#define EFX_BUG_ON_PARANOID(x) do {} while (0) +#define EFX_WARN_ON_PARANOID(x) do {} while (0) +#endif + +/************************************************************************** + * + * Efx data structures + * + **************************************************************************/ + +#define EFX_MAX_CHANNELS 32U +#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS +#define EFX_EXTRA_CHANNEL_IOV 0 +#define EFX_EXTRA_CHANNEL_PTP 1 +#define EFX_MAX_EXTRA_CHANNELS 2U + +/* Checksum generation is a per-queue option in hardware, so each + * queue visible to the networking core is backed by two hardware TX + * queues. */ +#define EFX_MAX_TX_TC 2 +#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS) +#define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */ +#define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */ +#define EFX_TXQ_TYPES 4 +#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) + +/* Maximum possible MTU the driver supports */ +#define EFX_MAX_MTU (9 * 1024) + +/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page, + * and should be a multiple of the cache line size. + */ +#define EFX_RX_USR_BUF_SIZE (2048 - 256) + +/* If possible, we should ensure cache line alignment at start and end + * of every buffer. Otherwise, we just need to ensure 4-byte + * alignment of the network header. + */ +#if NET_IP_ALIGN == 0 +#define EFX_RX_BUF_ALIGNMENT L1_CACHE_BYTES +#else +#define EFX_RX_BUF_ALIGNMENT 4 +#endif + +/* Forward declare Precision Time Protocol (PTP) support structure. */ +struct efx_ptp_data; +struct hwtstamp_config; + +struct efx_self_tests; + +/** + * struct efx_buffer - A general-purpose DMA buffer + * @addr: host base address of the buffer + * @dma_addr: DMA base address of the buffer + * @len: Buffer length, in bytes + * + * The NIC uses these buffers for its interrupt status registers and + * MAC stats dumps. + */ +struct efx_buffer { + void *addr; + dma_addr_t dma_addr; + unsigned int len; +}; + +/** + * struct efx_special_buffer - DMA buffer entered into buffer table + * @buf: Standard &struct efx_buffer + * @index: Buffer index within controller;s buffer table + * @entries: Number of buffer table entries + * + * The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE. + * Event and descriptor rings are addressed via one or more buffer + * table entries (and so can be physically non-contiguous, although we + * currently do not take advantage of that). On Falcon and Siena we + * have to take care of allocating and initialising the entries + * ourselves. On later hardware this is managed by the firmware and + * @index and @entries are left as 0. + */ +struct efx_special_buffer { + struct efx_buffer buf; + unsigned int index; + unsigned int entries; +}; + +/** + * struct efx_tx_buffer - buffer state for a TX descriptor + * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be + * freed when descriptor completes + * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be + * freed when descriptor completes. + * @option: When @flags & %EFX_TX_BUF_OPTION, a NIC-specific option descriptor. + * @dma_addr: DMA address of the fragment. + * @flags: Flags for allocation and DMA mapping type + * @len: Length of this fragment. + * This field is zero when the queue slot is empty. + * @unmap_len: Length of this fragment to unmap + * @dma_offset: Offset of @dma_addr from the address of the backing DMA mapping. + * Only valid if @unmap_len != 0. + */ +struct efx_tx_buffer { + union { + const struct sk_buff *skb; + void *heap_buf; + }; + union { + efx_qword_t option; + dma_addr_t dma_addr; + }; + unsigned short flags; + unsigned short len; + unsigned short unmap_len; + unsigned short dma_offset; +}; +#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */ +#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */ +#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */ +#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */ +#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */ + +/** + * struct efx_tx_queue - An Efx TX queue + * + * This is a ring buffer of TX fragments. + * Since the TX completion path always executes on the same + * CPU and the xmit path can operate on different CPUs, + * performance is increased by ensuring that the completion + * path and the xmit path operate on different cache lines. + * This is particularly important if the xmit path is always + * executing on one CPU which is different from the completion + * path. There is also a cache line for members which are + * read but not written on the fast path. + * + * @efx: The associated Efx NIC + * @queue: DMA queue number + * @channel: The associated channel + * @core_txq: The networking core TX queue structure + * @buffer: The software buffer ring + * @tsoh_page: Array of pages of TSO header buffers + * @txd: The hardware descriptor ring + * @ptr_mask: The size of the ring minus 1. + * @piobuf: PIO buffer region for this TX queue (shared with its partner). + * Size of the region is efx_piobuf_size. + * @piobuf_offset: Buffer offset to be specified in PIO descriptors + * @initialised: Has hardware queue been initialised? + * @read_count: Current read pointer. + * This is the number of buffers that have been removed from both rings. + * @old_write_count: The value of @write_count when last checked. + * This is here for performance reasons. The xmit path will + * only get the up-to-date value of @write_count if this + * variable indicates that the queue is empty. This is to + * avoid cache-line ping-pong between the xmit path and the + * completion path. + * @merge_events: Number of TX merged completion events + * @insert_count: Current insert pointer + * This is the number of buffers that have been added to the + * software ring. + * @write_count: Current write pointer + * This is the number of buffers that have been added to the + * hardware ring. + * @old_read_count: The value of read_count when last checked. + * This is here for performance reasons. The xmit path will + * only get the up-to-date value of read_count if this + * variable indicates that the queue is full. This is to + * avoid cache-line ping-pong between the xmit path and the + * completion path. + * @tso_bursts: Number of times TSO xmit invoked by kernel + * @tso_long_headers: Number of packets with headers too long for standard + * blocks + * @tso_packets: Number of packets via the TSO xmit path + * @pushes: Number of times the TX push feature has been used + * @pio_packets: Number of times the TX PIO feature has been used + * @empty_read_count: If the completion path has seen the queue as empty + * and the transmission path has not yet checked this, the value of + * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0. + */ +struct efx_tx_queue { + /* Members which don't change on the fast path */ + struct efx_nic *efx ____cacheline_aligned_in_smp; + unsigned queue; + struct efx_channel *channel; + struct netdev_queue *core_txq; + struct efx_tx_buffer *buffer; + struct efx_buffer *tsoh_page; + struct efx_special_buffer txd; + unsigned int ptr_mask; + void __iomem *piobuf; + unsigned int piobuf_offset; + bool initialised; + + /* Members used mainly on the completion path */ + unsigned int read_count ____cacheline_aligned_in_smp; + unsigned int old_write_count; + unsigned int merge_events; + + /* Members used only on the xmit path */ + unsigned int insert_count ____cacheline_aligned_in_smp; + unsigned int write_count; + unsigned int old_read_count; + unsigned int tso_bursts; + unsigned int tso_long_headers; + unsigned int tso_packets; + unsigned int pushes; + unsigned int pio_packets; + /* Statistics to supplement MAC stats */ + unsigned long tx_packets; + + /* Members shared between paths and sometimes updated */ + unsigned int empty_read_count ____cacheline_aligned_in_smp; +#define EFX_EMPTY_COUNT_VALID 0x80000000 + atomic_t flush_outstanding; +}; + +/** + * struct efx_rx_buffer - An Efx RX data buffer + * @dma_addr: DMA base address of the buffer + * @page: The associated page buffer. + * Will be %NULL if the buffer slot is currently free. + * @page_offset: If pending: offset in @page of DMA base address. + * If completed: offset in @page of Ethernet header. + * @len: If pending: length for DMA descriptor. + * If completed: received length, excluding hash prefix. + * @flags: Flags for buffer and packet state. These are only set on the + * first buffer of a scattered packet. + */ +struct efx_rx_buffer { + dma_addr_t dma_addr; + struct page *page; + u16 page_offset; + u16 len; + u16 flags; +}; +#define EFX_RX_BUF_LAST_IN_PAGE 0x0001 +#define EFX_RX_PKT_CSUMMED 0x0002 +#define EFX_RX_PKT_DISCARD 0x0004 +#define EFX_RX_PKT_TCP 0x0040 +#define EFX_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */ + +/** + * struct efx_rx_page_state - Page-based rx buffer state + * + * Inserted at the start of every page allocated for receive buffers. + * Used to facilitate sharing dma mappings between recycled rx buffers + * and those passed up to the kernel. + * + * @dma_addr: The dma address of this page. + */ +struct efx_rx_page_state { + dma_addr_t dma_addr; + + unsigned int __pad[0] ____cacheline_aligned; +}; + +/** + * struct efx_rx_queue - An Efx RX queue + * @efx: The associated Efx NIC + * @core_index: Index of network core RX queue. Will be >= 0 iff this + * is associated with a real RX queue. + * @buffer: The software buffer ring + * @rxd: The hardware descriptor ring + * @ptr_mask: The size of the ring minus 1. + * @refill_enabled: Enable refill whenever fill level is low + * @flush_pending: Set when a RX flush is pending. Has the same lifetime as + * @rxq_flush_pending. + * @added_count: Number of buffers added to the receive queue. + * @notified_count: Number of buffers given to NIC (<= @added_count). + * @removed_count: Number of buffers removed from the receive queue. + * @scatter_n: Used by NIC specific receive code. + * @scatter_len: Used by NIC specific receive code. + * @page_ring: The ring to store DMA mapped pages for reuse. + * @page_add: Counter to calculate the write pointer for the recycle ring. + * @page_remove: Counter to calculate the read pointer for the recycle ring. + * @page_recycle_count: The number of pages that have been recycled. + * @page_recycle_failed: The number of pages that couldn't be recycled because + * the kernel still held a reference to them. + * @page_recycle_full: The number of pages that were released because the + * recycle ring was full. + * @page_ptr_mask: The number of pages in the RX recycle ring minus 1. + * @max_fill: RX descriptor maximum fill level (<= ring size) + * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill + * (<= @max_fill) + * @min_fill: RX descriptor minimum non-zero fill level. + * This records the minimum fill level observed when a ring + * refill was triggered. + * @recycle_count: RX buffer recycle counter. + * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). + */ +struct efx_rx_queue { + struct efx_nic *efx; + int core_index; + struct efx_rx_buffer *buffer; + struct efx_special_buffer rxd; + unsigned int ptr_mask; + bool refill_enabled; + bool flush_pending; + + unsigned int added_count; + unsigned int notified_count; + unsigned int removed_count; + unsigned int scatter_n; + unsigned int scatter_len; + struct page **page_ring; + unsigned int page_add; + unsigned int page_remove; + unsigned int page_recycle_count; + unsigned int page_recycle_failed; + unsigned int page_recycle_full; + unsigned int page_ptr_mask; + unsigned int max_fill; + unsigned int fast_fill_trigger; + unsigned int min_fill; + unsigned int min_overfill; + unsigned int recycle_count; + struct timer_list slow_fill; + unsigned int slow_fill_count; + /* Statistics to supplement MAC stats */ + unsigned long rx_packets; +}; + +enum efx_sync_events_state { + SYNC_EVENTS_DISABLED = 0, + SYNC_EVENTS_QUIESCENT, + SYNC_EVENTS_REQUESTED, + SYNC_EVENTS_VALID, +}; + +/** + * struct efx_channel - An Efx channel + * + * A channel comprises an event queue, at least one TX queue, at least + * one RX queue, and an associated tasklet for processing the event + * queue. + * + * @efx: Associated Efx NIC + * @channel: Channel instance number + * @type: Channel type definition + * @eventq_init: Event queue initialised flag + * @enabled: Channel enabled indicator + * @irq: IRQ number (MSI and MSI-X only) + * @irq_moderation: IRQ moderation value (in hardware ticks) + * @napi_dev: Net device used with NAPI + * @napi_str: NAPI control structure + * @state: state for NAPI vs busy polling + * @state_lock: lock protecting @state + * @eventq: Event queue buffer + * @eventq_mask: Event queue pointer mask + * @eventq_read_ptr: Event queue read pointer + * @event_test_cpu: Last CPU to handle interrupt or test event for this channel + * @irq_count: Number of IRQs since last adaptive moderation decision + * @irq_mod_score: IRQ moderation score + * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors + * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors + * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors + * @n_rx_mcast_mismatch: Count of unmatched multicast frames + * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors + * @n_rx_overlength: Count of RX_OVERLENGTH errors + * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun + * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to + * lack of descriptors + * @n_rx_merge_events: Number of RX merged completion events + * @n_rx_merge_packets: Number of RX packets completed by merged events + * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by + * __efx_rx_packet(), or zero if there is none + * @rx_pkt_index: Ring index of first buffer for next packet to be delivered + * by __efx_rx_packet(), if @rx_pkt_n_frags != 0 + * @rx_queue: RX queue for this channel + * @tx_queue: TX queues for this channel + * @sync_events_state: Current state of sync events on this channel + * @sync_timestamp_major: Major part of the last ptp sync event + * @sync_timestamp_minor: Minor part of the last ptp sync event + */ +struct efx_channel { + struct efx_nic *efx; + int channel; + const struct efx_channel_type *type; + bool eventq_init; + bool enabled; + int irq; + unsigned int irq_moderation; + struct net_device *napi_dev; + struct napi_struct napi_str; +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int state; + spinlock_t state_lock; +#define EFX_CHANNEL_STATE_IDLE 0 +#define EFX_CHANNEL_STATE_NAPI (1 << 0) /* NAPI owns this channel */ +#define EFX_CHANNEL_STATE_POLL (1 << 1) /* poll owns this channel */ +#define EFX_CHANNEL_STATE_DISABLED (1 << 2) /* channel is disabled */ +#define EFX_CHANNEL_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this channel */ +#define EFX_CHANNEL_STATE_POLL_YIELD (1 << 4) /* poll yielded this channel */ +#define EFX_CHANNEL_OWNED \ + (EFX_CHANNEL_STATE_NAPI | EFX_CHANNEL_STATE_POLL) +#define EFX_CHANNEL_LOCKED \ + (EFX_CHANNEL_OWNED | EFX_CHANNEL_STATE_DISABLED) +#define EFX_CHANNEL_USER_PEND \ + (EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_POLL_YIELD) +#endif /* CONFIG_NET_RX_BUSY_POLL */ + struct efx_special_buffer eventq; + unsigned int eventq_mask; + unsigned int eventq_read_ptr; + int event_test_cpu; + + unsigned int irq_count; + unsigned int irq_mod_score; +#ifdef CONFIG_RFS_ACCEL + unsigned int rfs_filters_added; +#endif + + unsigned n_rx_tobe_disc; + unsigned n_rx_ip_hdr_chksum_err; + unsigned n_rx_tcp_udp_chksum_err; + unsigned n_rx_mcast_mismatch; + unsigned n_rx_frm_trunc; + unsigned n_rx_overlength; + unsigned n_skbuff_leaks; + unsigned int n_rx_nodesc_trunc; + unsigned int n_rx_merge_events; + unsigned int n_rx_merge_packets; + + unsigned int rx_pkt_n_frags; + unsigned int rx_pkt_index; + + struct efx_rx_queue rx_queue; + struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; + + enum efx_sync_events_state sync_events_state; + u32 sync_timestamp_major; + u32 sync_timestamp_minor; +}; + +#ifdef CONFIG_NET_RX_BUSY_POLL +static inline void efx_channel_init_lock(struct efx_channel *channel) +{ + spin_lock_init(&channel->state_lock); +} + +/* Called from the device poll routine to get ownership of a channel. */ +static inline bool efx_channel_lock_napi(struct efx_channel *channel) +{ + bool rc = true; + + spin_lock_bh(&channel->state_lock); + if (channel->state & EFX_CHANNEL_LOCKED) { + WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI); + channel->state |= EFX_CHANNEL_STATE_NAPI_YIELD; + rc = false; + } else { + /* we don't care if someone yielded */ + channel->state = EFX_CHANNEL_STATE_NAPI; + } + spin_unlock_bh(&channel->state_lock); + return rc; +} + +static inline void efx_channel_unlock_napi(struct efx_channel *channel) +{ + spin_lock_bh(&channel->state_lock); + WARN_ON(channel->state & + (EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_YIELD)); + + channel->state &= EFX_CHANNEL_STATE_DISABLED; + spin_unlock_bh(&channel->state_lock); +} + +/* Called from efx_busy_poll(). */ +static inline bool efx_channel_lock_poll(struct efx_channel *channel) +{ + bool rc = true; + + spin_lock_bh(&channel->state_lock); + if ((channel->state & EFX_CHANNEL_LOCKED)) { + channel->state |= EFX_CHANNEL_STATE_POLL_YIELD; + rc = false; + } else { + /* preserve yield marks */ + channel->state |= EFX_CHANNEL_STATE_POLL; + } + spin_unlock_bh(&channel->state_lock); + return rc; +} + +/* Returns true if NAPI tried to get the channel while it was locked. */ +static inline void efx_channel_unlock_poll(struct efx_channel *channel) +{ + spin_lock_bh(&channel->state_lock); + WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI); + + /* will reset state to idle, unless channel is disabled */ + channel->state &= EFX_CHANNEL_STATE_DISABLED; + spin_unlock_bh(&channel->state_lock); +} + +/* True if a socket is polling, even if it did not get the lock. */ +static inline bool efx_channel_busy_polling(struct efx_channel *channel) +{ + WARN_ON(!(channel->state & EFX_CHANNEL_OWNED)); + return channel->state & EFX_CHANNEL_USER_PEND; +} + +static inline void efx_channel_enable(struct efx_channel *channel) +{ + spin_lock_bh(&channel->state_lock); + channel->state = EFX_CHANNEL_STATE_IDLE; + spin_unlock_bh(&channel->state_lock); +} + +/* False if the channel is currently owned. */ +static inline bool efx_channel_disable(struct efx_channel *channel) +{ + bool rc = true; + + spin_lock_bh(&channel->state_lock); + if (channel->state & EFX_CHANNEL_OWNED) + rc = false; + channel->state |= EFX_CHANNEL_STATE_DISABLED; + spin_unlock_bh(&channel->state_lock); + + return rc; +} + +#else /* CONFIG_NET_RX_BUSY_POLL */ + +static inline void efx_channel_init_lock(struct efx_channel *channel) +{ +} + +static inline bool efx_channel_lock_napi(struct efx_channel *channel) +{ + return true; +} + +static inline void efx_channel_unlock_napi(struct efx_channel *channel) +{ +} + +static inline bool efx_channel_lock_poll(struct efx_channel *channel) +{ + return false; +} + +static inline void efx_channel_unlock_poll(struct efx_channel *channel) +{ +} + +static inline bool efx_channel_busy_polling(struct efx_channel *channel) +{ + return false; +} + +static inline void efx_channel_enable(struct efx_channel *channel) +{ +} + +static inline bool efx_channel_disable(struct efx_channel *channel) +{ + return true; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +/** + * struct efx_msi_context - Context for each MSI + * @efx: The associated NIC + * @index: Index of the channel/IRQ + * @name: Name of the channel/IRQ + * + * Unlike &struct efx_channel, this is never reallocated and is always + * safe for the IRQ handler to access. + */ +struct efx_msi_context { + struct efx_nic *efx; + unsigned int index; + char name[IFNAMSIZ + 6]; +}; + +/** + * struct efx_channel_type - distinguishes traffic and extra channels + * @handle_no_channel: Handle failure to allocate an extra channel + * @pre_probe: Set up extra state prior to initialisation + * @post_remove: Tear down extra state after finalisation, if allocated. + * May be called on channels that have not been probed. + * @get_name: Generate the channel's name (used for its IRQ handler) + * @copy: Copy the channel state prior to reallocation. May be %NULL if + * reallocation is not supported. + * @receive_skb: Handle an skb ready to be passed to netif_receive_skb() + * @keep_eventq: Flag for whether event queue should be kept initialised + * while the device is stopped + */ +struct efx_channel_type { + void (*handle_no_channel)(struct efx_nic *); + int (*pre_probe)(struct efx_channel *); + void (*post_remove)(struct efx_channel *); + void (*get_name)(struct efx_channel *, char *buf, size_t len); + struct efx_channel *(*copy)(const struct efx_channel *); + bool (*receive_skb)(struct efx_channel *, struct sk_buff *); + bool keep_eventq; +}; + +enum efx_led_mode { + EFX_LED_OFF = 0, + EFX_LED_ON = 1, + EFX_LED_DEFAULT = 2 +}; + +#define STRING_TABLE_LOOKUP(val, member) \ + ((val) < member ## _max) ? member ## _names[val] : "(invalid)" + +extern const char *const efx_loopback_mode_names[]; +extern const unsigned int efx_loopback_mode_max; +#define LOOPBACK_MODE(efx) \ + STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode) + +extern const char *const efx_reset_type_names[]; +extern const unsigned int efx_reset_type_max; +#define RESET_TYPE(type) \ + STRING_TABLE_LOOKUP(type, efx_reset_type) + +enum efx_int_mode { + /* Be careful if altering to correct macro below */ + EFX_INT_MODE_MSIX = 0, + EFX_INT_MODE_MSI = 1, + EFX_INT_MODE_LEGACY = 2, + EFX_INT_MODE_MAX /* Insert any new items before this */ +}; +#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) + +enum nic_state { + STATE_UNINIT = 0, /* device being probed/removed or is frozen */ + STATE_READY = 1, /* hardware ready and netdev registered */ + STATE_DISABLED = 2, /* device disabled due to hardware errors */ + STATE_RECOVERY = 3, /* device recovering from PCI error */ +}; + +/* Forward declaration */ +struct efx_nic; + +/* Pseudo bit-mask flow control field */ +#define EFX_FC_RX FLOW_CTRL_RX +#define EFX_FC_TX FLOW_CTRL_TX +#define EFX_FC_AUTO 4 + +/** + * struct efx_link_state - Current state of the link + * @up: Link is up + * @fd: Link is full-duplex + * @fc: Actual flow control flags + * @speed: Link speed (Mbps) + */ +struct efx_link_state { + bool up; + bool fd; + u8 fc; + unsigned int speed; +}; + +static inline bool efx_link_state_equal(const struct efx_link_state *left, + const struct efx_link_state *right) +{ + return left->up == right->up && left->fd == right->fd && + left->fc == right->fc && left->speed == right->speed; +} + +/** + * struct efx_phy_operations - Efx PHY operations table + * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds, + * efx->loopback_modes. + * @init: Initialise PHY + * @fini: Shut down PHY + * @reconfigure: Reconfigure PHY (e.g. for new link parameters) + * @poll: Update @link_state and report whether it changed. + * Serialised by the mac_lock. + * @get_settings: Get ethtool settings. Serialised by the mac_lock. + * @set_settings: Set ethtool settings. Serialised by the mac_lock. + * @set_npage_adv: Set abilities advertised in (Extended) Next Page + * (only needed where AN bit is set in mmds) + * @test_alive: Test that PHY is 'alive' (online) + * @test_name: Get the name of a PHY-specific test/result + * @run_tests: Run tests and record results as appropriate (offline). + * Flags are the ethtool tests flags. + */ +struct efx_phy_operations { + int (*probe) (struct efx_nic *efx); + int (*init) (struct efx_nic *efx); + void (*fini) (struct efx_nic *efx); + void (*remove) (struct efx_nic *efx); + int (*reconfigure) (struct efx_nic *efx); + bool (*poll) (struct efx_nic *efx); + void (*get_settings) (struct efx_nic *efx, + struct ethtool_cmd *ecmd); + int (*set_settings) (struct efx_nic *efx, + struct ethtool_cmd *ecmd); + void (*set_npage_adv) (struct efx_nic *efx, u32); + int (*test_alive) (struct efx_nic *efx); + const char *(*test_name) (struct efx_nic *efx, unsigned int index); + int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); + int (*get_module_eeprom) (struct efx_nic *efx, + struct ethtool_eeprom *ee, + u8 *data); + int (*get_module_info) (struct efx_nic *efx, + struct ethtool_modinfo *modinfo); +}; + +/** + * enum efx_phy_mode - PHY operating mode flags + * @PHY_MODE_NORMAL: on and should pass traffic + * @PHY_MODE_TX_DISABLED: on with TX disabled + * @PHY_MODE_LOW_POWER: set to low power through MDIO + * @PHY_MODE_OFF: switched off through external control + * @PHY_MODE_SPECIAL: on but will not pass traffic + */ +enum efx_phy_mode { + PHY_MODE_NORMAL = 0, + PHY_MODE_TX_DISABLED = 1, + PHY_MODE_LOW_POWER = 2, + PHY_MODE_OFF = 4, + PHY_MODE_SPECIAL = 8, +}; + +static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode) +{ + return !!(mode & ~PHY_MODE_TX_DISABLED); +} + +/** + * struct efx_hw_stat_desc - Description of a hardware statistic + * @name: Name of the statistic as visible through ethtool, or %NULL if + * it should not be exposed + * @dma_width: Width in bits (0 for non-DMA statistics) + * @offset: Offset within stats (ignored for non-DMA statistics) + */ +struct efx_hw_stat_desc { + const char *name; + u16 dma_width; + u16 offset; +}; + +/* Number of bits used in a multicast filter hash address */ +#define EFX_MCAST_HASH_BITS 8 + +/* Number of (single-bit) entries in a multicast filter hash */ +#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS) + +/* An Efx multicast filter hash */ +union efx_multicast_hash { + u8 byte[EFX_MCAST_HASH_ENTRIES / 8]; + efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; +}; + +struct efx_vf; +struct vfdi_status; + +/** + * struct efx_nic - an Efx NIC + * @name: Device name (net device name or bus id before net device registered) + * @pci_dev: The PCI device + * @node: List node for maintaning primary/secondary function lists + * @primary: &struct efx_nic instance for the primary function of this + * controller. May be the same structure, and may be %NULL if no + * primary function is bound. Serialised by rtnl_lock. + * @secondary_list: List of &struct efx_nic instances for the secondary PCI + * functions of the controller, if this is for the primary function. + * Serialised by rtnl_lock. + * @type: Controller type attributes + * @legacy_irq: IRQ number + * @workqueue: Workqueue for port reconfigures and the HW monitor. + * Work items do not hold and must not acquire RTNL. + * @workqueue_name: Name of workqueue + * @reset_work: Scheduled reset workitem + * @membase_phys: Memory BAR value as physical address + * @membase: Memory BAR value + * @interrupt_mode: Interrupt mode + * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds + * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues + * @irq_rx_moderation: IRQ moderation time for RX event queues + * @msg_enable: Log message enable flags + * @state: Device state number (%STATE_*). Serialised by the rtnl_lock. + * @reset_pending: Bitmask for pending resets + * @tx_queue: TX DMA queues + * @rx_queue: RX DMA queues + * @channel: Channels + * @msi_context: Context for each MSI + * @extra_channel_types: Types of extra (non-traffic) channels that + * should be allocated for this NIC + * @rxq_entries: Size of receive queues requested by user. + * @txq_entries: Size of transmit queues requested by user. + * @txq_stop_thresh: TX queue fill level at or above which we stop it. + * @txq_wake_thresh: TX queue fill level at or below which we wake it. + * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches + * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches + * @sram_lim_qw: Qword address limit of SRAM + * @next_buffer_table: First available buffer table id + * @n_channels: Number of channels in use + * @n_rx_channels: Number of channels used for RX (= number of RX queues) + * @n_tx_channels: Number of channels used for TX + * @rx_ip_align: RX DMA address offset to have IP header aligned in + * in accordance with NET_IP_ALIGN + * @rx_dma_len: Current maximum RX DMA length + * @rx_buffer_order: Order (log2) of number of pages for each RX buffer + * @rx_buffer_truesize: Amortised allocation size of an RX buffer, + * for use in sk_buff::truesize + * @rx_prefix_size: Size of RX prefix before packet data + * @rx_packet_hash_offset: Offset of RX flow hash from start of packet data + * (valid only if @rx_prefix_size != 0; always negative) + * @rx_packet_len_offset: Offset of RX packet length from start of packet data + * (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative) + * @rx_packet_ts_offset: Offset of timestamp from start of packet data + * (valid only if channel->sync_timestamps_enabled; always negative) + * @rx_hash_key: Toeplitz hash key for RSS + * @rx_indir_table: Indirection table for RSS + * @rx_scatter: Scatter mode enabled for receives + * @int_error_count: Number of internal errors seen recently + * @int_error_expire: Time at which error count will be expired + * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will + * acknowledge but do nothing else. + * @irq_status: Interrupt status buffer + * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 + * @irq_level: IRQ level/index for IRQs not triggered by an event queue + * @selftest_work: Work item for asynchronous self-test + * @mtd_list: List of MTDs attached to the NIC + * @nic_data: Hardware dependent state + * @mcdi: Management-Controller-to-Driver Interface state + * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, + * efx_monitor() and efx_reconfigure_port() + * @port_enabled: Port enabled indicator. + * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and + * efx_mac_work() with kernel interfaces. Safe to read under any + * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must + * be held to modify it. + * @port_initialized: Port initialized? + * @net_dev: Operating system network device. Consider holding the rtnl lock + * @stats_buffer: DMA buffer for statistics + * @phy_type: PHY type + * @phy_op: PHY interface + * @phy_data: PHY private data (including PHY-specific stats) + * @mdio: PHY MDIO interface + * @mdio_bus: PHY MDIO bus ID (only used by Siena) + * @phy_mode: PHY operating mode. Serialised by @mac_lock. + * @link_advertising: Autonegotiation advertising flags + * @link_state: Current state of the link + * @n_link_state_changes: Number of times the link has changed state + * @unicast_filter: Flag for Falcon-arch simple unicast filter. + * Protected by @mac_lock. + * @multicast_hash: Multicast hash table for Falcon-arch. + * Protected by @mac_lock. + * @wanted_fc: Wanted flow control flags + * @fc_disable: When non-zero flow control is disabled. Typically used to + * ensure that network back pressure doesn't delay dma queue flushes. + * Serialised by the rtnl lock. + * @mac_work: Work item for changing MAC promiscuity and multicast hash + * @loopback_mode: Loopback status + * @loopback_modes: Supported loopback mode bitmask + * @loopback_selftest: Offline self-test private state + * @filter_lock: Filter table lock + * @filter_state: Architecture-dependent filter table state + * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, + * indexed by filter ID + * @rps_expire_index: Next index to check for expiry in @rps_flow_id + * @active_queues: Count of RX and TX queues that haven't been flushed and drained. + * @rxq_flush_pending: Count of number of receive queues that need to be flushed. + * Decremented when the efx_flush_rx_queue() is called. + * @rxq_flush_outstanding: Count of number of RX flushes started but not yet + * completed (either success or failure). Not used when MCDI is used to + * flush receive queues. + * @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions. + * @vf: Array of &struct efx_vf objects. + * @vf_count: Number of VFs intended to be enabled. + * @vf_init_count: Number of VFs that have been fully initialised. + * @vi_scale: log2 number of vnics per VF. + * @ptp_data: PTP state data + * @vpd_sn: Serial number read from VPD + * @monitor_work: Hardware monitor workitem + * @biu_lock: BIU (bus interface unit) lock + * @last_irq_cpu: Last CPU to handle a possible test interrupt. This + * field is used by efx_test_interrupts() to verify that an + * interrupt has occurred. + * @stats_lock: Statistics update lock. Must be held when calling + * efx_nic_type::{update,start,stop}_stats. + * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb + * + * This is stored in the private area of the &struct net_device. + */ +struct efx_nic { + /* The following fields should be written very rarely */ + + char name[IFNAMSIZ]; + struct list_head node; + struct efx_nic *primary; + struct list_head secondary_list; + struct pci_dev *pci_dev; + unsigned int port_num; + const struct efx_nic_type *type; + int legacy_irq; + bool eeh_disabled_legacy_irq; + struct workqueue_struct *workqueue; + char workqueue_name[16]; + struct work_struct reset_work; + resource_size_t membase_phys; + void __iomem *membase; + + enum efx_int_mode interrupt_mode; + unsigned int timer_quantum_ns; + bool irq_rx_adaptive; + unsigned int irq_rx_moderation; + u32 msg_enable; + + enum nic_state state; + unsigned long reset_pending; + + struct efx_channel *channel[EFX_MAX_CHANNELS]; + struct efx_msi_context msi_context[EFX_MAX_CHANNELS]; + const struct efx_channel_type * + extra_channel_type[EFX_MAX_EXTRA_CHANNELS]; + + unsigned rxq_entries; + unsigned txq_entries; + unsigned int txq_stop_thresh; + unsigned int txq_wake_thresh; + + unsigned tx_dc_base; + unsigned rx_dc_base; + unsigned sram_lim_qw; + unsigned next_buffer_table; + + unsigned int max_channels; + unsigned n_channels; + unsigned n_rx_channels; + unsigned rss_spread; + unsigned tx_channel_offset; + unsigned n_tx_channels; + unsigned int rx_ip_align; + unsigned int rx_dma_len; + unsigned int rx_buffer_order; + unsigned int rx_buffer_truesize; + unsigned int rx_page_buf_step; + unsigned int rx_bufs_per_page; + unsigned int rx_pages_per_batch; + unsigned int rx_prefix_size; + int rx_packet_hash_offset; + int rx_packet_len_offset; + int rx_packet_ts_offset; + u8 rx_hash_key[40]; + u32 rx_indir_table[128]; + bool rx_scatter; + + unsigned int_error_count; + unsigned long int_error_expire; + + bool irq_soft_enabled; + struct efx_buffer irq_status; + unsigned irq_zero_count; + unsigned irq_level; + struct delayed_work selftest_work; + +#ifdef CONFIG_SFC_MTD + struct list_head mtd_list; +#endif + + void *nic_data; + struct efx_mcdi_data *mcdi; + + struct mutex mac_lock; + struct work_struct mac_work; + bool port_enabled; + + bool mc_bist_for_other_fn; + bool port_initialized; + struct net_device *net_dev; + + struct efx_buffer stats_buffer; + u64 rx_nodesc_drops_total; + u64 rx_nodesc_drops_while_down; + bool rx_nodesc_drops_prev_state; + + unsigned int phy_type; + const struct efx_phy_operations *phy_op; + void *phy_data; + struct mdio_if_info mdio; + unsigned int mdio_bus; + enum efx_phy_mode phy_mode; + + u32 link_advertising; + struct efx_link_state link_state; + unsigned int n_link_state_changes; + + bool unicast_filter; + union efx_multicast_hash multicast_hash; + u8 wanted_fc; + unsigned fc_disable; + + atomic_t rx_reset; + enum efx_loopback_mode loopback_mode; + u64 loopback_modes; + + void *loopback_selftest; + + spinlock_t filter_lock; + void *filter_state; +#ifdef CONFIG_RFS_ACCEL + u32 *rps_flow_id; + unsigned int rps_expire_index; +#endif + + atomic_t active_queues; + atomic_t rxq_flush_pending; + atomic_t rxq_flush_outstanding; + wait_queue_head_t flush_wq; + +#ifdef CONFIG_SFC_SRIOV + struct efx_vf *vf; + unsigned vf_count; + unsigned vf_init_count; + unsigned vi_scale; +#endif + + struct efx_ptp_data *ptp_data; + + char *vpd_sn; + + /* The following fields may be written more often */ + + struct delayed_work monitor_work ____cacheline_aligned_in_smp; + spinlock_t biu_lock; + int last_irq_cpu; + spinlock_t stats_lock; + atomic_t n_rx_noskb_drops; +}; + +static inline int efx_dev_registered(struct efx_nic *efx) +{ + return efx->net_dev->reg_state == NETREG_REGISTERED; +} + +static inline unsigned int efx_port_num(struct efx_nic *efx) +{ + return efx->port_num; +} + +struct efx_mtd_partition { + struct list_head node; + struct mtd_info mtd; + const char *dev_type_name; + const char *type_name; + char name[IFNAMSIZ + 20]; +}; + +/** + * struct efx_nic_type - Efx device type definition + * @mem_map_size: Get memory BAR mapped size + * @probe: Probe the controller + * @remove: Free resources allocated by probe() + * @init: Initialise the controller + * @dimension_resources: Dimension controller resources (buffer table, + * and VIs once the available interrupt resources are clear) + * @fini: Shut down the controller + * @monitor: Periodic function for polling link state and hardware monitor + * @map_reset_reason: Map ethtool reset reason to a reset method + * @map_reset_flags: Map ethtool reset flags to a reset method, if possible + * @reset: Reset the controller hardware and possibly the PHY. This will + * be called while the controller is uninitialised. + * @probe_port: Probe the MAC and PHY + * @remove_port: Free resources allocated by probe_port() + * @handle_global_event: Handle a "global" event (may be %NULL) + * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues) + * @prepare_flush: Prepare the hardware for flushing the DMA queues + * (for Falcon architecture) + * @finish_flush: Clean up after flushing the DMA queues (for Falcon + * architecture) + * @prepare_flr: Prepare for an FLR + * @finish_flr: Clean up after an FLR + * @describe_stats: Describe statistics for ethtool + * @update_stats: Update statistics not provided by event handling. + * Either argument may be %NULL. + * @start_stats: Start the regular fetching of statistics + * @pull_stats: Pull stats from the NIC and wait until they arrive. + * @stop_stats: Stop the regular fetching of statistics + * @set_id_led: Set state of identifying LED or revert to automatic function + * @push_irq_moderation: Apply interrupt moderation value + * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY + * @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL) + * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings + * to the hardware. Serialised by the mac_lock. + * @check_mac_fault: Check MAC fault state. True if fault present. + * @get_wol: Get WoL configuration from driver state + * @set_wol: Push WoL configuration to the NIC + * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) + * @test_chip: Test registers. May use efx_farch_test_registers(), and is + * expected to reset the NIC. + * @test_nvram: Test validity of NVRAM contents + * @mcdi_request: Send an MCDI request with the given header and SDU. + * The SDU length may be any value from 0 up to the protocol- + * defined maximum, but its buffer will be padded to a multiple + * of 4 bytes. + * @mcdi_poll_response: Test whether an MCDI response is available. + * @mcdi_read_response: Read the MCDI response PDU. The offset will + * be a multiple of 4. The length may not be, but the buffer + * will be padded so it is safe to round up. + * @mcdi_poll_reboot: Test whether the MCDI has rebooted. If so, + * return an appropriate error code for aborting any current + * request; otherwise return 0. + * @irq_enable_master: Enable IRQs on the NIC. Each event queue must + * be separately enabled after this. + * @irq_test_generate: Generate a test IRQ + * @irq_disable_non_ev: Disable non-event IRQs on the NIC. Each event + * queue must be separately disabled before this. + * @irq_handle_msi: Handle MSI for a channel. The @dev_id argument is + * a pointer to the &struct efx_msi_context for the channel. + * @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument + * is a pointer to the &struct efx_nic. + * @tx_probe: Allocate resources for TX queue + * @tx_init: Initialise TX queue on the NIC + * @tx_remove: Free resources for TX queue + * @tx_write: Write TX descriptors and doorbell + * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC + * @rx_probe: Allocate resources for RX queue + * @rx_init: Initialise RX queue on the NIC + * @rx_remove: Free resources for RX queue + * @rx_write: Write RX descriptors and doorbell + * @rx_defer_refill: Generate a refill reminder event + * @ev_probe: Allocate resources for event queue + * @ev_init: Initialise event queue on the NIC + * @ev_fini: Deinitialise event queue on the NIC + * @ev_remove: Free resources for event queue + * @ev_process: Process events for a queue, up to the given NAPI quota + * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ + * @ev_test_generate: Generate a test event + * @filter_table_probe: Probe filter capabilities and set up filter software state + * @filter_table_restore: Restore filters removed from hardware + * @filter_table_remove: Remove filters from hardware and tear down software state + * @filter_update_rx_scatter: Update filters after change to rx scatter setting + * @filter_insert: add or replace a filter + * @filter_remove_safe: remove a filter by ID, carefully + * @filter_get_safe: retrieve a filter by ID, carefully + * @filter_clear_rx: Remove all RX filters whose priority is less than or + * equal to the given priority and is not %EFX_FILTER_PRI_AUTO + * @filter_count_rx_used: Get the number of filters in use at a given priority + * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1 + * @filter_get_rx_ids: Get list of RX filters at a given priority + * @filter_rfs_insert: Add or replace a filter for RFS. This must be + * atomic. The hardware change may be asynchronous but should + * not be delayed for long. It may fail if this can't be done + * atomically. + * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS. + * This must check whether the specified table entry is used by RFS + * and that rps_may_expire_flow() returns true for it. + * @mtd_probe: Probe and add MTD partitions associated with this net device, + * using efx_mtd_add() + * @mtd_rename: Set an MTD partition name using the net device name + * @mtd_read: Read from an MTD partition + * @mtd_erase: Erase part of an MTD partition + * @mtd_write: Write to an MTD partition + * @mtd_sync: Wait for write-back to complete on MTD partition. This + * also notifies the driver that a writer has finished using this + * partition. + * @ptp_write_host_time: Send host time to MC as part of sync protocol + * @ptp_set_ts_sync_events: Enable or disable sync events for inline RX + * timestamping, possibly only temporarily for the purposes of a reset. + * @ptp_set_ts_config: Set hardware timestamp configuration. The flags + * and tx_type will already have been validated but this operation + * must validate and update rx_filter. + * @revision: Hardware architecture revision + * @txd_ptr_tbl_base: TX descriptor ring base address + * @rxd_ptr_tbl_base: RX descriptor ring base address + * @buf_tbl_base: Buffer table base address + * @evq_ptr_tbl_base: Event queue pointer table base address + * @evq_rptr_tbl_base: Event queue read-pointer table base address + * @max_dma_mask: Maximum possible DMA mask + * @rx_prefix_size: Size of RX prefix before packet data + * @rx_hash_offset: Offset of RX flow hash within prefix + * @rx_ts_offset: Offset of timestamp within prefix + * @rx_buffer_padding: Size of padding at end of RX packet + * @can_rx_scatter: NIC is able to scatter packets to multiple buffers + * @always_rx_scatter: NIC will always scatter packets to multiple buffers + * @max_interrupt_mode: Highest capability interrupt mode supported + * from &enum efx_init_mode. + * @timer_period_max: Maximum period of interrupt timer (in ticks) + * @offload_features: net_device feature flags for protocol offload + * features implemented in hardware + * @mcdi_max_ver: Maximum MCDI version supported + * @hwtstamp_filters: Mask of hardware timestamp filter types supported + */ +struct efx_nic_type { + unsigned int (*mem_map_size)(struct efx_nic *efx); + int (*probe)(struct efx_nic *efx); + void (*remove)(struct efx_nic *efx); + int (*init)(struct efx_nic *efx); + int (*dimension_resources)(struct efx_nic *efx); + void (*fini)(struct efx_nic *efx); + void (*monitor)(struct efx_nic *efx); + enum reset_type (*map_reset_reason)(enum reset_type reason); + int (*map_reset_flags)(u32 *flags); + int (*reset)(struct efx_nic *efx, enum reset_type method); + int (*probe_port)(struct efx_nic *efx); + void (*remove_port)(struct efx_nic *efx); + bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *); + int (*fini_dmaq)(struct efx_nic *efx); + void (*prepare_flush)(struct efx_nic *efx); + void (*finish_flush)(struct efx_nic *efx); + void (*prepare_flr)(struct efx_nic *efx); + void (*finish_flr)(struct efx_nic *efx); + size_t (*describe_stats)(struct efx_nic *efx, u8 *names); + size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats); + void (*start_stats)(struct efx_nic *efx); + void (*pull_stats)(struct efx_nic *efx); + void (*stop_stats)(struct efx_nic *efx); + void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode); + void (*push_irq_moderation)(struct efx_channel *channel); + int (*reconfigure_port)(struct efx_nic *efx); + void (*prepare_enable_fc_tx)(struct efx_nic *efx); + int (*reconfigure_mac)(struct efx_nic *efx); + bool (*check_mac_fault)(struct efx_nic *efx); + void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); + int (*set_wol)(struct efx_nic *efx, u32 type); + void (*resume_wol)(struct efx_nic *efx); + int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests); + int (*test_nvram)(struct efx_nic *efx); + void (*mcdi_request)(struct efx_nic *efx, + const efx_dword_t *hdr, size_t hdr_len, + const efx_dword_t *sdu, size_t sdu_len); + bool (*mcdi_poll_response)(struct efx_nic *efx); + void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu, + size_t pdu_offset, size_t pdu_len); + int (*mcdi_poll_reboot)(struct efx_nic *efx); + void (*irq_enable_master)(struct efx_nic *efx); + void (*irq_test_generate)(struct efx_nic *efx); + void (*irq_disable_non_ev)(struct efx_nic *efx); + irqreturn_t (*irq_handle_msi)(int irq, void *dev_id); + irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id); + int (*tx_probe)(struct efx_tx_queue *tx_queue); + void (*tx_init)(struct efx_tx_queue *tx_queue); + void (*tx_remove)(struct efx_tx_queue *tx_queue); + void (*tx_write)(struct efx_tx_queue *tx_queue); + void (*rx_push_rss_config)(struct efx_nic *efx); + int (*rx_probe)(struct efx_rx_queue *rx_queue); + void (*rx_init)(struct efx_rx_queue *rx_queue); + void (*rx_remove)(struct efx_rx_queue *rx_queue); + void (*rx_write)(struct efx_rx_queue *rx_queue); + void (*rx_defer_refill)(struct efx_rx_queue *rx_queue); + int (*ev_probe)(struct efx_channel *channel); + int (*ev_init)(struct efx_channel *channel); + void (*ev_fini)(struct efx_channel *channel); + void (*ev_remove)(struct efx_channel *channel); + int (*ev_process)(struct efx_channel *channel, int quota); + void (*ev_read_ack)(struct efx_channel *channel); + void (*ev_test_generate)(struct efx_channel *channel); + int (*filter_table_probe)(struct efx_nic *efx); + void (*filter_table_restore)(struct efx_nic *efx); + void (*filter_table_remove)(struct efx_nic *efx); + void (*filter_update_rx_scatter)(struct efx_nic *efx); + s32 (*filter_insert)(struct efx_nic *efx, + struct efx_filter_spec *spec, bool replace); + int (*filter_remove_safe)(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id); + int (*filter_get_safe)(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id, struct efx_filter_spec *); + int (*filter_clear_rx)(struct efx_nic *efx, + enum efx_filter_priority priority); + u32 (*filter_count_rx_used)(struct efx_nic *efx, + enum efx_filter_priority priority); + u32 (*filter_get_rx_id_limit)(struct efx_nic *efx); + s32 (*filter_get_rx_ids)(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 *buf, u32 size); +#ifdef CONFIG_RFS_ACCEL + s32 (*filter_rfs_insert)(struct efx_nic *efx, + struct efx_filter_spec *spec); + bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id, + unsigned int index); +#endif +#ifdef CONFIG_SFC_MTD + int (*mtd_probe)(struct efx_nic *efx); + void (*mtd_rename)(struct efx_mtd_partition *part); + int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, u8 *buffer); + int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len); + int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len, + size_t *retlen, const u8 *buffer); + int (*mtd_sync)(struct mtd_info *mtd); +#endif + void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time); + int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp); + int (*ptp_set_ts_config)(struct efx_nic *efx, + struct hwtstamp_config *init); + int (*sriov_init)(struct efx_nic *efx); + void (*sriov_fini)(struct efx_nic *efx); + void (*sriov_mac_address_changed)(struct efx_nic *efx); + bool (*sriov_wanted)(struct efx_nic *efx); + void (*sriov_reset)(struct efx_nic *efx); + + int revision; + unsigned int txd_ptr_tbl_base; + unsigned int rxd_ptr_tbl_base; + unsigned int buf_tbl_base; + unsigned int evq_ptr_tbl_base; + unsigned int evq_rptr_tbl_base; + u64 max_dma_mask; + unsigned int rx_prefix_size; + unsigned int rx_hash_offset; + unsigned int rx_ts_offset; + unsigned int rx_buffer_padding; + bool can_rx_scatter; + bool always_rx_scatter; + unsigned int max_interrupt_mode; + unsigned int timer_period_max; + netdev_features_t offload_features; + int mcdi_max_ver; + unsigned int max_rx_ip_filters; + u32 hwtstamp_filters; +}; + +/************************************************************************** + * + * Prototypes and inline functions + * + *************************************************************************/ + +static inline struct efx_channel * +efx_get_channel(struct efx_nic *efx, unsigned index) +{ + EFX_BUG_ON_PARANOID(index >= efx->n_channels); + return efx->channel[index]; +} + +/* Iterate over all used channels */ +#define efx_for_each_channel(_channel, _efx) \ + for (_channel = (_efx)->channel[0]; \ + _channel; \ + _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ + (_efx)->channel[_channel->channel + 1] : NULL) + +/* Iterate over all used channels in reverse */ +#define efx_for_each_channel_rev(_channel, _efx) \ + for (_channel = (_efx)->channel[(_efx)->n_channels - 1]; \ + _channel; \ + _channel = _channel->channel ? \ + (_efx)->channel[_channel->channel - 1] : NULL) + +static inline struct efx_tx_queue * +efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) +{ + EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels || + type >= EFX_TXQ_TYPES); + return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; +} + +static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) +{ + return channel->channel - channel->efx->tx_channel_offset < + channel->efx->n_tx_channels; +} + +static inline struct efx_tx_queue * +efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) +{ + EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) || + type >= EFX_TXQ_TYPES); + return &channel->tx_queue[type]; +} + +static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue) +{ + return !(tx_queue->efx->net_dev->num_tc < 2 && + tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI); +} + +/* Iterate over all TX queues belonging to a channel */ +#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ + if (!efx_channel_has_tx_queues(_channel)) \ + ; \ + else \ + for (_tx_queue = (_channel)->tx_queue; \ + _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \ + efx_tx_queue_used(_tx_queue); \ + _tx_queue++) + +/* Iterate over all possible TX queues belonging to a channel */ +#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \ + if (!efx_channel_has_tx_queues(_channel)) \ + ; \ + else \ + for (_tx_queue = (_channel)->tx_queue; \ + _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ + _tx_queue++) + +static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) +{ + return channel->rx_queue.core_index >= 0; +} + +static inline struct efx_rx_queue * +efx_channel_get_rx_queue(struct efx_channel *channel) +{ + EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel)); + return &channel->rx_queue; +} + +/* Iterate over all RX queues belonging to a channel */ +#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ + if (!efx_channel_has_rx_queue(_channel)) \ + ; \ + else \ + for (_rx_queue = &(_channel)->rx_queue; \ + _rx_queue; \ + _rx_queue = NULL) + +static inline struct efx_channel * +efx_rx_queue_channel(struct efx_rx_queue *rx_queue) +{ + return container_of(rx_queue, struct efx_channel, rx_queue); +} + +static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue) +{ + return efx_rx_queue_channel(rx_queue)->channel; +} + +/* Returns a pointer to the specified receive buffer in the RX + * descriptor queue. + */ +static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, + unsigned int index) +{ + return &rx_queue->buffer[index]; +} + +/** + * EFX_MAX_FRAME_LEN - calculate maximum frame length + * + * This calculates the maximum frame length that will be used for a + * given MTU. The frame length will be equal to the MTU plus a + * constant amount of header space and padding. This is the quantity + * that the net driver will program into the MAC as the maximum frame + * length. + * + * The 10G MAC requires 8-byte alignment on the frame + * length, so we round up to the nearest 8. + * + * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an + * XGMII cycle). If the frame length reaches the maximum value in the + * same cycle, the XMAC can miss the IPG altogether. We work around + * this by adding a further 16 bytes. + */ +#define EFX_MAX_FRAME_LEN(mtu) \ + ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16) + +static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb) +{ + return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP; +} +static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb) +{ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; +} + +#endif /* EFX_NET_DRIVER_H */ diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c new file mode 100644 index 000000000..89b83e59e --- /dev/null +++ b/drivers/net/ethernet/sfc/nic.c @@ -0,0 +1,534 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "bitfield.h" +#include "efx.h" +#include "nic.h" +#include "ef10_regs.h" +#include "farch_regs.h" +#include "io.h" +#include "workarounds.h" + +/************************************************************************** + * + * Generic buffer handling + * These buffers are used for interrupt status, MAC stats, etc. + * + **************************************************************************/ + +int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, + unsigned int len, gfp_t gfp_flags) +{ + buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len, + &buffer->dma_addr, gfp_flags); + if (!buffer->addr) + return -ENOMEM; + buffer->len = len; + return 0; +} + +void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) +{ + if (buffer->addr) { + dma_free_coherent(&efx->pci_dev->dev, buffer->len, + buffer->addr, buffer->dma_addr); + buffer->addr = NULL; + } +} + +/* Check whether an event is present in the eventq at the current + * read pointer. Only useful for self-test. + */ +bool efx_nic_event_present(struct efx_channel *channel) +{ + return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); +} + +void efx_nic_event_test_start(struct efx_channel *channel) +{ + channel->event_test_cpu = -1; + smp_wmb(); + channel->efx->type->ev_test_generate(channel); +} + +void efx_nic_irq_test_start(struct efx_nic *efx) +{ + efx->last_irq_cpu = -1; + smp_wmb(); + efx->type->irq_test_generate(efx); +} + +/* Hook interrupt handler(s) + * Try MSI and then legacy interrupts. + */ +int efx_nic_init_interrupt(struct efx_nic *efx) +{ + struct efx_channel *channel; + unsigned int n_irqs; + int rc; + + if (!EFX_INT_MODE_USE_MSI(efx)) { + rc = request_irq(efx->legacy_irq, + efx->type->irq_handle_legacy, IRQF_SHARED, + efx->name, efx); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to hook legacy IRQ %d\n", + efx->pci_dev->irq); + goto fail1; + } + return 0; + } + +#ifdef CONFIG_RFS_ACCEL + if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { + efx->net_dev->rx_cpu_rmap = + alloc_irq_cpu_rmap(efx->n_rx_channels); + if (!efx->net_dev->rx_cpu_rmap) { + rc = -ENOMEM; + goto fail1; + } + } +#endif + + /* Hook MSI or MSI-X interrupt */ + n_irqs = 0; + efx_for_each_channel(channel, efx) { + rc = request_irq(channel->irq, efx->type->irq_handle_msi, + IRQF_PROBE_SHARED, /* Not shared */ + efx->msi_context[channel->channel].name, + &efx->msi_context[channel->channel]); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to hook IRQ %d\n", channel->irq); + goto fail2; + } + ++n_irqs; + +#ifdef CONFIG_RFS_ACCEL + if (efx->interrupt_mode == EFX_INT_MODE_MSIX && + channel->channel < efx->n_rx_channels) { + rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, + channel->irq); + if (rc) + goto fail2; + } +#endif + } + + return 0; + + fail2: +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); + efx->net_dev->rx_cpu_rmap = NULL; +#endif + efx_for_each_channel(channel, efx) { + if (n_irqs-- == 0) + break; + free_irq(channel->irq, &efx->msi_context[channel->channel]); + } + fail1: + return rc; +} + +void efx_nic_fini_interrupt(struct efx_nic *efx) +{ + struct efx_channel *channel; + +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); + efx->net_dev->rx_cpu_rmap = NULL; +#endif + + if (EFX_INT_MODE_USE_MSI(efx)) { + /* Disable MSI/MSI-X interrupts */ + efx_for_each_channel(channel, efx) + free_irq(channel->irq, + &efx->msi_context[channel->channel]); + } else { + /* Disable legacy interrupt */ + free_irq(efx->legacy_irq, efx); + } +} + +/* Register dump */ + +#define REGISTER_REVISION_FA 1 +#define REGISTER_REVISION_FB 2 +#define REGISTER_REVISION_FC 3 +#define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */ +#define REGISTER_REVISION_ED 4 +#define REGISTER_REVISION_EZ 4 /* latest EF10 revision */ + +struct efx_nic_reg { + u32 offset:24; + u32 min_revision:3, max_revision:3; +}; + +#define REGISTER(name, arch, min_rev, max_rev) { \ + arch ## R_ ## min_rev ## max_rev ## _ ## name, \ + REGISTER_REVISION_ ## arch ## min_rev, \ + REGISTER_REVISION_ ## arch ## max_rev \ +} +#define REGISTER_AA(name) REGISTER(name, F, A, A) +#define REGISTER_AB(name) REGISTER(name, F, A, B) +#define REGISTER_AZ(name) REGISTER(name, F, A, Z) +#define REGISTER_BB(name) REGISTER(name, F, B, B) +#define REGISTER_BZ(name) REGISTER(name, F, B, Z) +#define REGISTER_CZ(name) REGISTER(name, F, C, Z) +#define REGISTER_DZ(name) REGISTER(name, E, D, Z) + +static const struct efx_nic_reg efx_nic_regs[] = { + REGISTER_AZ(ADR_REGION), + REGISTER_AZ(INT_EN_KER), + REGISTER_BZ(INT_EN_CHAR), + REGISTER_AZ(INT_ADR_KER), + REGISTER_BZ(INT_ADR_CHAR), + /* INT_ACK_KER is WO */ + /* INT_ISR0 is RC */ + REGISTER_AZ(HW_INIT), + REGISTER_CZ(USR_EV_CFG), + REGISTER_AB(EE_SPI_HCMD), + REGISTER_AB(EE_SPI_HADR), + REGISTER_AB(EE_SPI_HDATA), + REGISTER_AB(EE_BASE_PAGE), + REGISTER_AB(EE_VPD_CFG0), + /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ + /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ + /* PCIE_CORE_INDIRECT is indirect */ + REGISTER_AB(NIC_STAT), + REGISTER_AB(GPIO_CTL), + REGISTER_AB(GLB_CTL), + /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ + REGISTER_BZ(DP_CTRL), + REGISTER_AZ(MEM_STAT), + REGISTER_AZ(CS_DEBUG), + REGISTER_AZ(ALTERA_BUILD), + REGISTER_AZ(CSR_SPARE), + REGISTER_AB(PCIE_SD_CTL0123), + REGISTER_AB(PCIE_SD_CTL45), + REGISTER_AB(PCIE_PCS_CTL_STAT), + /* DEBUG_DATA_OUT is not used */ + /* DRV_EV is WO */ + REGISTER_AZ(EVQ_CTL), + REGISTER_AZ(EVQ_CNT1), + REGISTER_AZ(EVQ_CNT2), + REGISTER_AZ(BUF_TBL_CFG), + REGISTER_AZ(SRM_RX_DC_CFG), + REGISTER_AZ(SRM_TX_DC_CFG), + REGISTER_AZ(SRM_CFG), + /* BUF_TBL_UPD is WO */ + REGISTER_AZ(SRM_UPD_EVQ), + REGISTER_AZ(SRAM_PARITY), + REGISTER_AZ(RX_CFG), + REGISTER_BZ(RX_FILTER_CTL), + /* RX_FLUSH_DESCQ is WO */ + REGISTER_AZ(RX_DC_CFG), + REGISTER_AZ(RX_DC_PF_WM), + REGISTER_BZ(RX_RSS_TKEY), + /* RX_NODESC_DROP is RC */ + REGISTER_AA(RX_SELF_RST), + /* RX_DEBUG, RX_PUSH_DROP are not used */ + REGISTER_CZ(RX_RSS_IPV6_REG1), + REGISTER_CZ(RX_RSS_IPV6_REG2), + REGISTER_CZ(RX_RSS_IPV6_REG3), + /* TX_FLUSH_DESCQ is WO */ + REGISTER_AZ(TX_DC_CFG), + REGISTER_AA(TX_CHKSM_CFG), + REGISTER_AZ(TX_CFG), + /* TX_PUSH_DROP is not used */ + REGISTER_AZ(TX_RESERVED), + REGISTER_BZ(TX_PACE), + /* TX_PACE_DROP_QID is RC */ + REGISTER_BB(TX_VLAN), + REGISTER_BZ(TX_IPFIL_PORTEN), + REGISTER_AB(MD_TXD), + REGISTER_AB(MD_RXD), + REGISTER_AB(MD_CS), + REGISTER_AB(MD_PHY_ADR), + REGISTER_AB(MD_ID), + /* MD_STAT is RC */ + REGISTER_AB(MAC_STAT_DMA), + REGISTER_AB(MAC_CTRL), + REGISTER_BB(GEN_MODE), + REGISTER_AB(MAC_MC_HASH_REG0), + REGISTER_AB(MAC_MC_HASH_REG1), + REGISTER_AB(GM_CFG1), + REGISTER_AB(GM_CFG2), + /* GM_IPG and GM_HD are not used */ + REGISTER_AB(GM_MAX_FLEN), + /* GM_TEST is not used */ + REGISTER_AB(GM_ADR1), + REGISTER_AB(GM_ADR2), + REGISTER_AB(GMF_CFG0), + REGISTER_AB(GMF_CFG1), + REGISTER_AB(GMF_CFG2), + REGISTER_AB(GMF_CFG3), + REGISTER_AB(GMF_CFG4), + REGISTER_AB(GMF_CFG5), + REGISTER_BB(TX_SRC_MAC_CTL), + REGISTER_AB(XM_ADR_LO), + REGISTER_AB(XM_ADR_HI), + REGISTER_AB(XM_GLB_CFG), + REGISTER_AB(XM_TX_CFG), + REGISTER_AB(XM_RX_CFG), + REGISTER_AB(XM_MGT_INT_MASK), + REGISTER_AB(XM_FC), + REGISTER_AB(XM_PAUSE_TIME), + REGISTER_AB(XM_TX_PARAM), + REGISTER_AB(XM_RX_PARAM), + /* XM_MGT_INT_MSK (note no 'A') is RC */ + REGISTER_AB(XX_PWR_RST), + REGISTER_AB(XX_SD_CTL), + REGISTER_AB(XX_TXDRV_CTL), + /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ + /* XX_CORE_STAT is partly RC */ + REGISTER_DZ(BIU_HW_REV_ID), + REGISTER_DZ(MC_DB_LWRD), + REGISTER_DZ(MC_DB_HWRD), +}; + +struct efx_nic_reg_table { + u32 offset:24; + u32 min_revision:3, max_revision:3; + u32 step:6, rows:21; +}; + +#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \ + offset, \ + REGISTER_REVISION_ ## arch ## min_rev, \ + REGISTER_REVISION_ ## arch ## max_rev, \ + step, rows \ +} +#define REGISTER_TABLE(name, arch, min_rev, max_rev) \ + REGISTER_TABLE_DIMENSIONS( \ + name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \ + arch, min_rev, max_rev, \ + arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ + arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS) +#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A) +#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z) +#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B) +#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z) +#define REGISTER_TABLE_BB_CZ(name) \ + REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \ + FR_BZ_ ## name ## _STEP, \ + FR_BB_ ## name ## _ROWS), \ + REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \ + FR_BZ_ ## name ## _STEP, \ + FR_CZ_ ## name ## _ROWS) +#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z) +#define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z) + +static const struct efx_nic_reg_table efx_nic_reg_tables[] = { + /* DRIVER is not used */ + /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ + REGISTER_TABLE_BB(TX_IPFIL_TBL), + REGISTER_TABLE_BB(TX_SRC_MAC_TBL), + REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), + REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), + REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), + REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), + REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), + REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), + /* We can't reasonably read all of the buffer table (up to 8MB!). + * However this driver will only use a few entries. Reading + * 1K entries allows for some expansion of queue count and + * size before we need to change the version. */ + REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, + F, A, A, 8, 1024), + REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, + F, B, Z, 8, 1024), + REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), + REGISTER_TABLE_BB_CZ(TIMER_TBL), + REGISTER_TABLE_BB_CZ(TX_PACE_TBL), + REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), + /* TX_FILTER_TBL0 is huge and not used by this driver */ + REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), + REGISTER_TABLE_CZ(MC_TREG_SMEM), + /* MSIX_PBA_TABLE is not mapped */ + /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ + REGISTER_TABLE_BZ(RX_FILTER_TBL0), + REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS), +}; + +size_t efx_nic_get_regs_len(struct efx_nic *efx) +{ + const struct efx_nic_reg *reg; + const struct efx_nic_reg_table *table; + size_t len = 0; + + for (reg = efx_nic_regs; + reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); + reg++) + if (efx->type->revision >= reg->min_revision && + efx->type->revision <= reg->max_revision) + len += sizeof(efx_oword_t); + + for (table = efx_nic_reg_tables; + table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); + table++) + if (efx->type->revision >= table->min_revision && + efx->type->revision <= table->max_revision) + len += table->rows * min_t(size_t, table->step, 16); + + return len; +} + +void efx_nic_get_regs(struct efx_nic *efx, void *buf) +{ + const struct efx_nic_reg *reg; + const struct efx_nic_reg_table *table; + + for (reg = efx_nic_regs; + reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); + reg++) { + if (efx->type->revision >= reg->min_revision && + efx->type->revision <= reg->max_revision) { + efx_reado(efx, (efx_oword_t *)buf, reg->offset); + buf += sizeof(efx_oword_t); + } + } + + for (table = efx_nic_reg_tables; + table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); + table++) { + size_t size, i; + + if (!(efx->type->revision >= table->min_revision && + efx->type->revision <= table->max_revision)) + continue; + + size = min_t(size_t, table->step, 16); + + for (i = 0; i < table->rows; i++) { + switch (table->step) { + case 4: /* 32-bit SRAM */ + efx_readd(efx, buf, table->offset + 4 * i); + break; + case 8: /* 64-bit SRAM */ + efx_sram_readq(efx, + efx->membase + table->offset, + buf, i); + break; + case 16: /* 128-bit-readable register */ + efx_reado_table(efx, buf, table->offset, i); + break; + case 32: /* 128-bit register, interleaved */ + efx_reado_table(efx, buf, table->offset, 2 * i); + break; + default: + WARN_ON(1); + return; + } + buf += size; + } + } +} + +/** + * efx_nic_describe_stats - Describe supported statistics for ethtool + * @desc: Array of &struct efx_hw_stat_desc describing the statistics + * @count: Length of the @desc array + * @mask: Bitmask of which elements of @desc are enabled + * @names: Buffer to copy names to, or %NULL. The names are copied + * starting at intervals of %ETH_GSTRING_LEN bytes. + * + * Returns the number of visible statistics, i.e. the number of set + * bits in the first @count bits of @mask for which a name is defined. + */ +size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u8 *names) +{ + size_t visible = 0; + size_t index; + + for_each_set_bit(index, mask, count) { + if (desc[index].name) { + if (names) { + strlcpy(names, desc[index].name, + ETH_GSTRING_LEN); + names += ETH_GSTRING_LEN; + } + ++visible; + } + } + + return visible; +} + +/** + * efx_nic_update_stats - Convert statistics DMA buffer to array of u64 + * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer + * layout. DMA widths of 0, 16, 32 and 64 are supported; where + * the width is specified as 0 the corresponding element of + * @stats is not updated. + * @count: Length of the @desc array + * @mask: Bitmask of which elements of @desc are enabled + * @stats: Buffer to update with the converted statistics. The length + * of this array must be at least @count. + * @dma_buf: DMA buffer containing hardware statistics + * @accumulate: If set, the converted values will be added rather than + * directly stored to the corresponding elements of @stats + */ +void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, + u64 *stats, const void *dma_buf, bool accumulate) +{ + size_t index; + + for_each_set_bit(index, mask, count) { + if (desc[index].dma_width) { + const void *addr = dma_buf + desc[index].offset; + u64 val; + + switch (desc[index].dma_width) { + case 16: + val = le16_to_cpup((__le16 *)addr); + break; + case 32: + val = le32_to_cpup((__le32 *)addr); + break; + case 64: + val = le64_to_cpup((__le64 *)addr); + break; + default: + WARN_ON(1); + val = 0; + break; + } + + if (accumulate) + stats[index] += val; + else + stats[index] = val; + } + } +} + +void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops) +{ + /* if down, or this is the first update after coming up */ + if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state) + efx->rx_nodesc_drops_while_down += + *rx_nodesc_drops - efx->rx_nodesc_drops_total; + efx->rx_nodesc_drops_total = *rx_nodesc_drops; + efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP); + *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down; +} diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h new file mode 100644 index 000000000..93d10cbbd --- /dev/null +++ b/drivers/net/ethernet/sfc/nic.h @@ -0,0 +1,868 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_NIC_H +#define EFX_NIC_H + +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "mcdi.h" + +enum { + EFX_REV_FALCON_A0 = 0, + EFX_REV_FALCON_A1 = 1, + EFX_REV_FALCON_B0 = 2, + EFX_REV_SIENA_A0 = 3, + EFX_REV_HUNT_A0 = 4, +}; + +static inline int efx_nic_rev(struct efx_nic *efx) +{ + return efx->type->revision; +} + +u32 efx_farch_fpga_ver(struct efx_nic *efx); + +/* NIC has two interlinked PCI functions for the same port. */ +static inline bool efx_nic_is_dual_func(struct efx_nic *efx) +{ + return efx_nic_rev(efx) < EFX_REV_FALCON_B0; +} + +/* Read the current event from the event queue */ +static inline efx_qword_t *efx_event(struct efx_channel *channel, + unsigned int index) +{ + return ((efx_qword_t *) (channel->eventq.buf.addr)) + + (index & channel->eventq_mask); +} + +/* See if an event is present + * + * We check both the high and low dword of the event for all ones. We + * wrote all ones when we cleared the event, and no valid event can + * have all ones in either its high or low dwords. This approach is + * robust against reordering. + * + * Note that using a single 64-bit comparison is incorrect; even + * though the CPU read will be atomic, the DMA write may not be. + */ +static inline int efx_event_present(efx_qword_t *event) +{ + return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | + EFX_DWORD_IS_ALL_ONES(event->dword[1])); +} + +/* Returns a pointer to the specified transmit descriptor in the TX + * descriptor queue belonging to the specified channel. + */ +static inline efx_qword_t * +efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) +{ + return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; +} + +/* Get partner of a TX queue, seen as part of the same net core queue */ +static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) +{ + if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) + return tx_queue - EFX_TXQ_TYPE_OFFLOAD; + else + return tx_queue + EFX_TXQ_TYPE_OFFLOAD; +} + +/* Report whether this TX queue would be empty for the given write_count. + * May return false negative. + */ +static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, + unsigned int write_count) +{ + unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); + + if (empty_read_count == 0) + return false; + + return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; +} + +/* Decide whether we can use TX PIO, ie. write packet data directly into + * a buffer on the device. This can reduce latency at the expense of + * throughput, so we only do this if both hardware and software TX rings + * are empty. This also ensures that only one packet at a time can be + * using the PIO buffer. + */ +static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue) +{ + struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue); + return tx_queue->piobuf && + __efx_nic_tx_is_empty(tx_queue, tx_queue->insert_count) && + __efx_nic_tx_is_empty(partner, partner->insert_count); +} + +/* Decide whether to push a TX descriptor to the NIC vs merely writing + * the doorbell. This can reduce latency when we are adding a single + * descriptor to an empty queue, but is otherwise pointless. Further, + * Falcon and Siena have hardware bugs (SF bug 33851) that may be + * triggered if we don't check this. + * We use the write_count used for the last doorbell push, to get the + * NIC's view of the tx queue. + */ +static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, + unsigned int write_count) +{ + bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count); + + tx_queue->empty_read_count = 0; + return was_empty && tx_queue->write_count - write_count == 1; +} + +/* Returns a pointer to the specified descriptor in the RX descriptor queue */ +static inline efx_qword_t * +efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) +{ + return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index; +} + +enum { + PHY_TYPE_NONE = 0, + PHY_TYPE_TXC43128 = 1, + PHY_TYPE_88E1111 = 2, + PHY_TYPE_SFX7101 = 3, + PHY_TYPE_QT2022C2 = 4, + PHY_TYPE_PM8358 = 6, + PHY_TYPE_SFT9001A = 8, + PHY_TYPE_QT2025C = 9, + PHY_TYPE_SFT9001B = 10, +}; + +#define FALCON_XMAC_LOOPBACKS \ + ((1 << LOOPBACK_XGMII) | \ + (1 << LOOPBACK_XGXS) | \ + (1 << LOOPBACK_XAUI)) + +/* Alignment of PCIe DMA boundaries (4KB) */ +#define EFX_PAGE_SIZE 4096 +/* Size and alignment of buffer table entries (same) */ +#define EFX_BUF_SIZE EFX_PAGE_SIZE + +/* NIC-generic software stats */ +enum { + GENERIC_STAT_rx_noskb_drops, + GENERIC_STAT_rx_nodesc_trunc, + GENERIC_STAT_COUNT +}; + +/** + * struct falcon_board_type - board operations and type information + * @id: Board type id, as found in NVRAM + * @init: Allocate resources and initialise peripheral hardware + * @init_phy: Do board-specific PHY initialisation + * @fini: Shut down hardware and free resources + * @set_id_led: Set state of identifying LED or revert to automatic function + * @monitor: Board-specific health check function + */ +struct falcon_board_type { + u8 id; + int (*init) (struct efx_nic *nic); + void (*init_phy) (struct efx_nic *efx); + void (*fini) (struct efx_nic *nic); + void (*set_id_led) (struct efx_nic *efx, enum efx_led_mode mode); + int (*monitor) (struct efx_nic *nic); +}; + +/** + * struct falcon_board - board information + * @type: Type of board + * @major: Major rev. ('A', 'B' ...) + * @minor: Minor rev. (0, 1, ...) + * @i2c_adap: I2C adapter for on-board peripherals + * @i2c_data: Data for bit-banging algorithm + * @hwmon_client: I2C client for hardware monitor + * @ioexp_client: I2C client for power/port control + */ +struct falcon_board { + const struct falcon_board_type *type; + int major; + int minor; + struct i2c_adapter i2c_adap; + struct i2c_algo_bit_data i2c_data; + struct i2c_client *hwmon_client, *ioexp_client; +}; + +/** + * struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device + * @device_id: Controller's id for the device + * @size: Size (in bytes) + * @addr_len: Number of address bytes in read/write commands + * @munge_address: Flag whether addresses should be munged. + * Some devices with 9-bit addresses (e.g. AT25040A EEPROM) + * use bit 3 of the command byte as address bit A8, rather + * than having a two-byte address. If this flag is set, then + * commands should be munged in this way. + * @erase_command: Erase command (or 0 if sector erase not needed). + * @erase_size: Erase sector size (in bytes) + * Erase commands affect sectors with this size and alignment. + * This must be a power of two. + * @block_size: Write block size (in bytes). + * Write commands are limited to blocks with this size and alignment. + */ +struct falcon_spi_device { + int device_id; + unsigned int size; + unsigned int addr_len; + unsigned int munge_address:1; + u8 erase_command; + unsigned int erase_size; + unsigned int block_size; +}; + +static inline bool falcon_spi_present(const struct falcon_spi_device *spi) +{ + return spi->size != 0; +} + +enum { + FALCON_STAT_tx_bytes = GENERIC_STAT_COUNT, + FALCON_STAT_tx_packets, + FALCON_STAT_tx_pause, + FALCON_STAT_tx_control, + FALCON_STAT_tx_unicast, + FALCON_STAT_tx_multicast, + FALCON_STAT_tx_broadcast, + FALCON_STAT_tx_lt64, + FALCON_STAT_tx_64, + FALCON_STAT_tx_65_to_127, + FALCON_STAT_tx_128_to_255, + FALCON_STAT_tx_256_to_511, + FALCON_STAT_tx_512_to_1023, + FALCON_STAT_tx_1024_to_15xx, + FALCON_STAT_tx_15xx_to_jumbo, + FALCON_STAT_tx_gtjumbo, + FALCON_STAT_tx_non_tcpudp, + FALCON_STAT_tx_mac_src_error, + FALCON_STAT_tx_ip_src_error, + FALCON_STAT_rx_bytes, + FALCON_STAT_rx_good_bytes, + FALCON_STAT_rx_bad_bytes, + FALCON_STAT_rx_packets, + FALCON_STAT_rx_good, + FALCON_STAT_rx_bad, + FALCON_STAT_rx_pause, + FALCON_STAT_rx_control, + FALCON_STAT_rx_unicast, + FALCON_STAT_rx_multicast, + FALCON_STAT_rx_broadcast, + FALCON_STAT_rx_lt64, + FALCON_STAT_rx_64, + FALCON_STAT_rx_65_to_127, + FALCON_STAT_rx_128_to_255, + FALCON_STAT_rx_256_to_511, + FALCON_STAT_rx_512_to_1023, + FALCON_STAT_rx_1024_to_15xx, + FALCON_STAT_rx_15xx_to_jumbo, + FALCON_STAT_rx_gtjumbo, + FALCON_STAT_rx_bad_lt64, + FALCON_STAT_rx_bad_gtjumbo, + FALCON_STAT_rx_overflow, + FALCON_STAT_rx_symbol_error, + FALCON_STAT_rx_align_error, + FALCON_STAT_rx_length_error, + FALCON_STAT_rx_internal_error, + FALCON_STAT_rx_nodesc_drop_cnt, + FALCON_STAT_COUNT +}; + +/** + * struct falcon_nic_data - Falcon NIC state + * @pci_dev2: Secondary function of Falcon A + * @board: Board state and functions + * @stats: Hardware statistics + * @stats_disable_count: Nest count for disabling statistics fetches + * @stats_pending: Is there a pending DMA of MAC statistics. + * @stats_timer: A timer for regularly fetching MAC statistics. + * @spi_flash: SPI flash device + * @spi_eeprom: SPI EEPROM device + * @spi_lock: SPI bus lock + * @mdio_lock: MDIO bus lock + * @xmac_poll_required: XMAC link state needs polling + */ +struct falcon_nic_data { + struct pci_dev *pci_dev2; + struct falcon_board board; + u64 stats[FALCON_STAT_COUNT]; + unsigned int stats_disable_count; + bool stats_pending; + struct timer_list stats_timer; + struct falcon_spi_device spi_flash; + struct falcon_spi_device spi_eeprom; + struct mutex spi_lock; + struct mutex mdio_lock; + bool xmac_poll_required; +}; + +static inline struct falcon_board *falcon_board(struct efx_nic *efx) +{ + struct falcon_nic_data *data = efx->nic_data; + return &data->board; +} + +enum { + SIENA_STAT_tx_bytes = GENERIC_STAT_COUNT, + SIENA_STAT_tx_good_bytes, + SIENA_STAT_tx_bad_bytes, + SIENA_STAT_tx_packets, + SIENA_STAT_tx_bad, + SIENA_STAT_tx_pause, + SIENA_STAT_tx_control, + SIENA_STAT_tx_unicast, + SIENA_STAT_tx_multicast, + SIENA_STAT_tx_broadcast, + SIENA_STAT_tx_lt64, + SIENA_STAT_tx_64, + SIENA_STAT_tx_65_to_127, + SIENA_STAT_tx_128_to_255, + SIENA_STAT_tx_256_to_511, + SIENA_STAT_tx_512_to_1023, + SIENA_STAT_tx_1024_to_15xx, + SIENA_STAT_tx_15xx_to_jumbo, + SIENA_STAT_tx_gtjumbo, + SIENA_STAT_tx_collision, + SIENA_STAT_tx_single_collision, + SIENA_STAT_tx_multiple_collision, + SIENA_STAT_tx_excessive_collision, + SIENA_STAT_tx_deferred, + SIENA_STAT_tx_late_collision, + SIENA_STAT_tx_excessive_deferred, + SIENA_STAT_tx_non_tcpudp, + SIENA_STAT_tx_mac_src_error, + SIENA_STAT_tx_ip_src_error, + SIENA_STAT_rx_bytes, + SIENA_STAT_rx_good_bytes, + SIENA_STAT_rx_bad_bytes, + SIENA_STAT_rx_packets, + SIENA_STAT_rx_good, + SIENA_STAT_rx_bad, + SIENA_STAT_rx_pause, + SIENA_STAT_rx_control, + SIENA_STAT_rx_unicast, + SIENA_STAT_rx_multicast, + SIENA_STAT_rx_broadcast, + SIENA_STAT_rx_lt64, + SIENA_STAT_rx_64, + SIENA_STAT_rx_65_to_127, + SIENA_STAT_rx_128_to_255, + SIENA_STAT_rx_256_to_511, + SIENA_STAT_rx_512_to_1023, + SIENA_STAT_rx_1024_to_15xx, + SIENA_STAT_rx_15xx_to_jumbo, + SIENA_STAT_rx_gtjumbo, + SIENA_STAT_rx_bad_gtjumbo, + SIENA_STAT_rx_overflow, + SIENA_STAT_rx_false_carrier, + SIENA_STAT_rx_symbol_error, + SIENA_STAT_rx_align_error, + SIENA_STAT_rx_length_error, + SIENA_STAT_rx_internal_error, + SIENA_STAT_rx_nodesc_drop_cnt, + SIENA_STAT_COUNT +}; + +/** + * struct siena_nic_data - Siena NIC state + * @efx: Pointer back to main interface structure + * @wol_filter_id: Wake-on-LAN packet filter id + * @stats: Hardware statistics + * @vf_buftbl_base: The zeroth buffer table index used to back VF queues. + * @vfdi_status: Common VFDI status page to be dmad to VF address space. + * @local_addr_list: List of local addresses. Protected by %local_lock. + * @local_page_list: List of DMA addressable pages used to broadcast + * %local_addr_list. Protected by %local_lock. + * @local_lock: Mutex protecting %local_addr_list and %local_page_list. + * @peer_work: Work item to broadcast peer addresses to VMs. + */ +struct siena_nic_data { + struct efx_nic *efx; + int wol_filter_id; + u64 stats[SIENA_STAT_COUNT]; +#ifdef CONFIG_SFC_SRIOV + struct efx_channel *vfdi_channel; + unsigned vf_buftbl_base; + struct efx_buffer vfdi_status; + struct list_head local_addr_list; + struct list_head local_page_list; + struct mutex local_lock; + struct work_struct peer_work; +#endif +}; + +enum { + EF10_STAT_tx_bytes = GENERIC_STAT_COUNT, + EF10_STAT_tx_packets, + EF10_STAT_tx_pause, + EF10_STAT_tx_control, + EF10_STAT_tx_unicast, + EF10_STAT_tx_multicast, + EF10_STAT_tx_broadcast, + EF10_STAT_tx_lt64, + EF10_STAT_tx_64, + EF10_STAT_tx_65_to_127, + EF10_STAT_tx_128_to_255, + EF10_STAT_tx_256_to_511, + EF10_STAT_tx_512_to_1023, + EF10_STAT_tx_1024_to_15xx, + EF10_STAT_tx_15xx_to_jumbo, + EF10_STAT_rx_bytes, + EF10_STAT_rx_bytes_minus_good_bytes, + EF10_STAT_rx_good_bytes, + EF10_STAT_rx_bad_bytes, + EF10_STAT_rx_packets, + EF10_STAT_rx_good, + EF10_STAT_rx_bad, + EF10_STAT_rx_pause, + EF10_STAT_rx_control, + EF10_STAT_rx_unicast, + EF10_STAT_rx_multicast, + EF10_STAT_rx_broadcast, + EF10_STAT_rx_lt64, + EF10_STAT_rx_64, + EF10_STAT_rx_65_to_127, + EF10_STAT_rx_128_to_255, + EF10_STAT_rx_256_to_511, + EF10_STAT_rx_512_to_1023, + EF10_STAT_rx_1024_to_15xx, + EF10_STAT_rx_15xx_to_jumbo, + EF10_STAT_rx_gtjumbo, + EF10_STAT_rx_bad_gtjumbo, + EF10_STAT_rx_overflow, + EF10_STAT_rx_align_error, + EF10_STAT_rx_length_error, + EF10_STAT_rx_nodesc_drops, + EF10_STAT_rx_pm_trunc_bb_overflow, + EF10_STAT_rx_pm_discard_bb_overflow, + EF10_STAT_rx_pm_trunc_vfifo_full, + EF10_STAT_rx_pm_discard_vfifo_full, + EF10_STAT_rx_pm_trunc_qbb, + EF10_STAT_rx_pm_discard_qbb, + EF10_STAT_rx_pm_discard_mapping, + EF10_STAT_rx_dp_q_disabled_packets, + EF10_STAT_rx_dp_di_dropped_packets, + EF10_STAT_rx_dp_streaming_packets, + EF10_STAT_rx_dp_hlb_fetch, + EF10_STAT_rx_dp_hlb_wait, + EF10_STAT_COUNT +}; + +/* Maximum number of TX PIO buffers we may allocate to a function. + * This matches the total number of buffers on each SFC9100-family + * controller. + */ +#define EF10_TX_PIOBUF_COUNT 16 + +/** + * struct efx_ef10_nic_data - EF10 architecture NIC state + * @mcdi_buf: DMA buffer for MCDI + * @warm_boot_count: Last seen MC warm boot count + * @vi_base: Absolute index of first VI in this function + * @n_allocated_vis: Number of VIs allocated to this function + * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot + * @must_restore_filters: Flag: filters have yet to be restored after MC reboot + * @n_piobufs: Number of PIO buffers allocated to this function + * @wc_membase: Base address of write-combining mapping of the memory BAR + * @pio_write_base: Base address for writing PIO buffers + * @pio_write_vi_base: Relative VI number for @pio_write_base + * @piobuf_handle: Handle of each PIO buffer allocated + * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC + * reboot + * @rx_rss_context: Firmware handle for our RSS context + * @stats: Hardware statistics + * @workaround_35388: Flag: firmware supports workaround for bug 35388 + * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated + * after MC reboot + * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of + * %MC_CMD_GET_CAPABILITIES response) + */ +struct efx_ef10_nic_data { + struct efx_buffer mcdi_buf; + u16 warm_boot_count; + unsigned int vi_base; + unsigned int n_allocated_vis; + bool must_realloc_vis; + bool must_restore_filters; + unsigned int n_piobufs; + void __iomem *wc_membase, *pio_write_base; + unsigned int pio_write_vi_base; + unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT]; + bool must_restore_piobufs; + u32 rx_rss_context; + u64 stats[EF10_STAT_COUNT]; + bool workaround_35388; + bool must_check_datapath_caps; + u32 datapath_caps; +}; + +/* + * On the SFC9000 family each port is associated with 1 PCI physical + * function (PF) handled by sfc and a configurable number of virtual + * functions (VFs) that may be handled by some other driver, often in + * a VM guest. The queue pointer registers are mapped in both PF and + * VF BARs such that an 8K region provides access to a single RX, TX + * and event queue (collectively a Virtual Interface, VI or VNIC). + * + * The PF has access to all 1024 VIs while VFs are mapped to VIs + * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered + * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE). + * The number of VIs and the VI_SCALE value are configurable but must + * be established at boot time by firmware. + */ + +/* Maximum VI_SCALE parameter supported by Siena */ +#define EFX_VI_SCALE_MAX 6 +/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX), + * so this is the smallest allowed value. */ +#define EFX_VI_BASE 128U +/* Maximum number of VFs allowed */ +#define EFX_VF_COUNT_MAX 127 +/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */ +#define EFX_MAX_VF_EVQ_SIZE 8192UL +/* The number of buffer table entries reserved for each VI on a VF */ +#define EFX_VF_BUFTBL_PER_VI \ + ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \ + sizeof(efx_qword_t) / EFX_BUF_SIZE) + +#ifdef CONFIG_SFC_SRIOV + +/* SIENA */ +static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) +{ + return efx->vf_count != 0; +} + +static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) +{ + return efx->vf_init_count != 0; +} + +static inline unsigned int efx_vf_size(struct efx_nic *efx) +{ + return 1 << efx->vi_scale; +} + +int efx_init_sriov(void); +void efx_siena_sriov_probe(struct efx_nic *efx); +int efx_siena_sriov_init(struct efx_nic *efx); +void efx_siena_sriov_mac_address_changed(struct efx_nic *efx); +void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event); +void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event); +void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event); +void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq); +void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr); +void efx_siena_sriov_reset(struct efx_nic *efx); +void efx_siena_sriov_fini(struct efx_nic *efx); +void efx_fini_sriov(void); + +/* EF10 */ +static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; } +static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } +static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {} +static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {} +static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {} + +#else + +/* SIENA */ +static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) { return false; } +static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return false; } +static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; } +static inline int efx_init_sriov(void) { return 0; } +static inline void efx_siena_sriov_probe(struct efx_nic *efx) {} +static inline int efx_siena_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } +static inline void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) {} +static inline void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, + efx_qword_t *event) {} +static inline void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, + efx_qword_t *event) {} +static inline void efx_siena_sriov_event(struct efx_channel *channel, + efx_qword_t *event) {} +static inline void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, + unsigned dmaq) {} +static inline void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr) {} +static inline void efx_siena_sriov_reset(struct efx_nic *efx) {} +static inline void efx_siena_sriov_fini(struct efx_nic *efx) {} +static inline void efx_fini_sriov(void) {} + +/* EF10 */ +static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; } +static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } +static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {} +static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {} +static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {} + +#endif + +/* FALCON */ +static inline bool efx_falcon_sriov_wanted(struct efx_nic *efx) { return false; } +static inline int efx_falcon_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } +static inline void efx_falcon_sriov_mac_address_changed(struct efx_nic *efx) {} +static inline void efx_falcon_sriov_reset(struct efx_nic *efx) {} +static inline void efx_falcon_sriov_fini(struct efx_nic *efx) {} + +int efx_siena_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac); +int efx_siena_sriov_set_vf_vlan(struct net_device *dev, int vf, + u16 vlan, u8 qos); +int efx_siena_sriov_get_vf_config(struct net_device *dev, int vf, + struct ifla_vf_info *ivf); +int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf, + bool spoofchk); + +struct ethtool_ts_info; +int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel); +void efx_ptp_defer_probe_with_channel(struct efx_nic *efx); +void efx_ptp_remove(struct efx_nic *efx); +int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr); +int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr); +void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info); +bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); +int efx_ptp_get_mode(struct efx_nic *efx); +int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, + unsigned int new_mode); +int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); +void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); +size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings); +size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats); +void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev); +void __efx_rx_skb_attach_timestamp(struct efx_channel *channel, + struct sk_buff *skb); +static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel, + struct sk_buff *skb) +{ + if (channel->sync_events_state == SYNC_EVENTS_VALID) + __efx_rx_skb_attach_timestamp(channel, skb); +} +void efx_ptp_start_datapath(struct efx_nic *efx); +void efx_ptp_stop_datapath(struct efx_nic *efx); + +extern const struct efx_nic_type falcon_a1_nic_type; +extern const struct efx_nic_type falcon_b0_nic_type; +extern const struct efx_nic_type siena_a0_nic_type; +extern const struct efx_nic_type efx_hunt_a0_nic_type; + +/************************************************************************** + * + * Externs + * + ************************************************************************** + */ + +int falcon_probe_board(struct efx_nic *efx, u16 revision_info); + +/* TX data path */ +static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) +{ + return tx_queue->efx->type->tx_probe(tx_queue); +} +static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue) +{ + tx_queue->efx->type->tx_init(tx_queue); +} +static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) +{ + tx_queue->efx->type->tx_remove(tx_queue); +} +static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) +{ + tx_queue->efx->type->tx_write(tx_queue); +} + +/* RX data path */ +static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) +{ + return rx_queue->efx->type->rx_probe(rx_queue); +} +static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_init(rx_queue); +} +static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_remove(rx_queue); +} +static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_write(rx_queue); +} +static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_defer_refill(rx_queue); +} + +/* Event data path */ +static inline int efx_nic_probe_eventq(struct efx_channel *channel) +{ + return channel->efx->type->ev_probe(channel); +} +static inline int efx_nic_init_eventq(struct efx_channel *channel) +{ + return channel->efx->type->ev_init(channel); +} +static inline void efx_nic_fini_eventq(struct efx_channel *channel) +{ + channel->efx->type->ev_fini(channel); +} +static inline void efx_nic_remove_eventq(struct efx_channel *channel) +{ + channel->efx->type->ev_remove(channel); +} +static inline int +efx_nic_process_eventq(struct efx_channel *channel, int quota) +{ + return channel->efx->type->ev_process(channel, quota); +} +static inline void efx_nic_eventq_read_ack(struct efx_channel *channel) +{ + channel->efx->type->ev_read_ack(channel); +} +void efx_nic_event_test_start(struct efx_channel *channel); + +/* Falcon/Siena queue operations */ +int efx_farch_tx_probe(struct efx_tx_queue *tx_queue); +void efx_farch_tx_init(struct efx_tx_queue *tx_queue); +void efx_farch_tx_fini(struct efx_tx_queue *tx_queue); +void efx_farch_tx_remove(struct efx_tx_queue *tx_queue); +void efx_farch_tx_write(struct efx_tx_queue *tx_queue); +int efx_farch_rx_probe(struct efx_rx_queue *rx_queue); +void efx_farch_rx_init(struct efx_rx_queue *rx_queue); +void efx_farch_rx_fini(struct efx_rx_queue *rx_queue); +void efx_farch_rx_remove(struct efx_rx_queue *rx_queue); +void efx_farch_rx_write(struct efx_rx_queue *rx_queue); +void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue); +int efx_farch_ev_probe(struct efx_channel *channel); +int efx_farch_ev_init(struct efx_channel *channel); +void efx_farch_ev_fini(struct efx_channel *channel); +void efx_farch_ev_remove(struct efx_channel *channel); +int efx_farch_ev_process(struct efx_channel *channel, int quota); +void efx_farch_ev_read_ack(struct efx_channel *channel); +void efx_farch_ev_test_generate(struct efx_channel *channel); + +/* Falcon/Siena filter operations */ +int efx_farch_filter_table_probe(struct efx_nic *efx); +void efx_farch_filter_table_restore(struct efx_nic *efx); +void efx_farch_filter_table_remove(struct efx_nic *efx); +void efx_farch_filter_update_rx_scatter(struct efx_nic *efx); +s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec, + bool replace); +int efx_farch_filter_remove_safe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id); +int efx_farch_filter_get_safe(struct efx_nic *efx, + enum efx_filter_priority priority, u32 filter_id, + struct efx_filter_spec *); +int efx_farch_filter_clear_rx(struct efx_nic *efx, + enum efx_filter_priority priority); +u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, + enum efx_filter_priority priority); +u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx); +s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, + enum efx_filter_priority priority, u32 *buf, + u32 size); +#ifdef CONFIG_RFS_ACCEL +s32 efx_farch_filter_rfs_insert(struct efx_nic *efx, + struct efx_filter_spec *spec); +bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, + unsigned int index); +#endif +void efx_farch_filter_sync_rx_mode(struct efx_nic *efx); + +bool efx_nic_event_present(struct efx_channel *channel); + +/* Some statistics are computed as A - B where A and B each increase + * linearly with some hardware counter(s) and the counters are read + * asynchronously. If the counters contributing to B are always read + * after those contributing to A, the computed value may be lower than + * the true value by some variable amount, and may decrease between + * subsequent computations. + * + * We should never allow statistics to decrease or to exceed the true + * value. Since the computed value will never be greater than the + * true value, we can achieve this by only storing the computed value + * when it increases. + */ +static inline void efx_update_diff_stat(u64 *stat, u64 diff) +{ + if ((s64)(diff - *stat) > 0) + *stat = diff; +} + +/* Interrupts */ +int efx_nic_init_interrupt(struct efx_nic *efx); +void efx_nic_irq_test_start(struct efx_nic *efx); +void efx_nic_fini_interrupt(struct efx_nic *efx); + +/* Falcon/Siena interrupts */ +void efx_farch_irq_enable_master(struct efx_nic *efx); +void efx_farch_irq_test_generate(struct efx_nic *efx); +void efx_farch_irq_disable_master(struct efx_nic *efx); +irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id); +irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id); +irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx); + +static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) +{ + return ACCESS_ONCE(channel->event_test_cpu); +} +static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) +{ + return ACCESS_ONCE(efx->last_irq_cpu); +} + +/* Global Resources */ +int efx_nic_flush_queues(struct efx_nic *efx); +void siena_prepare_flush(struct efx_nic *efx); +int efx_farch_fini_dmaq(struct efx_nic *efx); +void efx_farch_finish_flr(struct efx_nic *efx); +void siena_finish_flush(struct efx_nic *efx); +void falcon_start_nic_stats(struct efx_nic *efx); +void falcon_stop_nic_stats(struct efx_nic *efx); +int falcon_reset_xaui(struct efx_nic *efx); +void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); +void efx_farch_init_common(struct efx_nic *efx); +void efx_ef10_handle_drain_event(struct efx_nic *efx); +void efx_farch_rx_push_indir_table(struct efx_nic *efx); + +int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, + unsigned int len, gfp_t gfp_flags); +void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); + +/* Tests */ +struct efx_farch_register_test { + unsigned address; + efx_oword_t mask; +}; +int efx_farch_test_registers(struct efx_nic *efx, + const struct efx_farch_register_test *regs, + size_t n_regs); + +size_t efx_nic_get_regs_len(struct efx_nic *efx); +void efx_nic_get_regs(struct efx_nic *efx, void *buf); + +size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u8 *names); +void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u64 *stats, + const void *dma_buf, bool accumulate); +void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat); + +#define EFX_MAX_FLUSH_TIME 5000 + +void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, + efx_qword_t *event); + +#endif /* EFX_NIC_H */ diff --git a/drivers/net/ethernet/sfc/phy.h b/drivers/net/ethernet/sfc/phy.h new file mode 100644 index 000000000..803bf445c --- /dev/null +++ b/drivers/net/ethernet/sfc/phy.h @@ -0,0 +1,50 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2007-2010 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_PHY_H +#define EFX_PHY_H + +/**************************************************************************** + * 10Xpress (SFX7101) PHY + */ +extern const struct efx_phy_operations falcon_sfx7101_phy_ops; + +void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); + +/**************************************************************************** + * AMCC/Quake QT202x PHYs + */ +extern const struct efx_phy_operations falcon_qt202x_phy_ops; + +/* These PHYs provide various H/W control states for LEDs */ +#define QUAKE_LED_LINK_INVAL (0) +#define QUAKE_LED_LINK_STAT (1) +#define QUAKE_LED_LINK_ACT (2) +#define QUAKE_LED_LINK_ACTSTAT (3) +#define QUAKE_LED_OFF (4) +#define QUAKE_LED_ON (5) +#define QUAKE_LED_LINK_INPUT (6) /* Pin is an input. */ +/* What link the LED tracks */ +#define QUAKE_LED_TXLINK (0) +#define QUAKE_LED_RXLINK (8) + +void falcon_qt202x_set_led(struct efx_nic *p, int led, int state); + +/**************************************************************************** +* Transwitch CX4 retimer +*/ +extern const struct efx_phy_operations falcon_txc_phy_ops; + +#define TXC_GPIO_DIR_INPUT 0 +#define TXC_GPIO_DIR_OUTPUT 1 + +void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir); +void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val); + +#endif diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c new file mode 100644 index 000000000..a2e9aee05 --- /dev/null +++ b/drivers/net/ethernet/sfc/ptp.c @@ -0,0 +1,1939 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2011-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +/* Theory of operation: + * + * PTP support is assisted by firmware running on the MC, which provides + * the hardware timestamping capabilities. Both transmitted and received + * PTP event packets are queued onto internal queues for subsequent processing; + * this is because the MC operations are relatively long and would block + * block NAPI/interrupt operation. + * + * Receive event processing: + * The event contains the packet's UUID and sequence number, together + * with the hardware timestamp. The PTP receive packet queue is searched + * for this UUID/sequence number and, if found, put on a pending queue. + * Packets not matching are delivered without timestamps (MCDI events will + * always arrive after the actual packet). + * It is important for the operation of the PTP protocol that the ordering + * of packets between the event and general port is maintained. + * + * Work queue processing: + * If work waiting, synchronise host/hardware time + * + * Transmit: send packet through MC, which returns the transmission time + * that is converted to an appropriate timestamp. + * + * Receive: the packet's reception time is converted to an appropriate + * timestamp. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "io.h" +#include "farch_regs.h" +#include "nic.h" + +/* Maximum number of events expected to make up a PTP event */ +#define MAX_EVENT_FRAGS 3 + +/* Maximum delay, ms, to begin synchronisation */ +#define MAX_SYNCHRONISE_WAIT_MS 2 + +/* How long, at most, to spend synchronising */ +#define SYNCHRONISE_PERIOD_NS 250000 + +/* How often to update the shared memory time */ +#define SYNCHRONISATION_GRANULARITY_NS 200 + +/* Minimum permitted length of a (corrected) synchronisation time */ +#define DEFAULT_MIN_SYNCHRONISATION_NS 120 + +/* Maximum permitted length of a (corrected) synchronisation time */ +#define MAX_SYNCHRONISATION_NS 1000 + +/* How many (MC) receive events that can be queued */ +#define MAX_RECEIVE_EVENTS 8 + +/* Length of (modified) moving average. */ +#define AVERAGE_LENGTH 16 + +/* How long an unmatched event or packet can be held */ +#define PKT_EVENT_LIFETIME_MS 10 + +/* Offsets into PTP packet for identification. These offsets are from the + * start of the IP header, not the MAC header. Note that neither PTP V1 nor + * PTP V2 permit the use of IPV4 options. + */ +#define PTP_DPORT_OFFSET 22 + +#define PTP_V1_VERSION_LENGTH 2 +#define PTP_V1_VERSION_OFFSET 28 + +#define PTP_V1_UUID_LENGTH 6 +#define PTP_V1_UUID_OFFSET 50 + +#define PTP_V1_SEQUENCE_LENGTH 2 +#define PTP_V1_SEQUENCE_OFFSET 58 + +/* The minimum length of a PTP V1 packet for offsets, etc. to be valid: + * includes IP header. + */ +#define PTP_V1_MIN_LENGTH 64 + +#define PTP_V2_VERSION_LENGTH 1 +#define PTP_V2_VERSION_OFFSET 29 + +#define PTP_V2_UUID_LENGTH 8 +#define PTP_V2_UUID_OFFSET 48 + +/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2), + * the MC only captures the last six bytes of the clock identity. These values + * reflect those, not the ones used in the standard. The standard permits + * mapping of V1 UUIDs to V2 UUIDs with these same values. + */ +#define PTP_V2_MC_UUID_LENGTH 6 +#define PTP_V2_MC_UUID_OFFSET 50 + +#define PTP_V2_SEQUENCE_LENGTH 2 +#define PTP_V2_SEQUENCE_OFFSET 58 + +/* The minimum length of a PTP V2 packet for offsets, etc. to be valid: + * includes IP header. + */ +#define PTP_V2_MIN_LENGTH 63 + +#define PTP_MIN_LENGTH 63 + +#define PTP_ADDRESS 0xe0000181 /* 224.0.1.129 */ +#define PTP_EVENT_PORT 319 +#define PTP_GENERAL_PORT 320 + +/* Annoyingly the format of the version numbers are different between + * versions 1 and 2 so it isn't possible to simply look for 1 or 2. + */ +#define PTP_VERSION_V1 1 + +#define PTP_VERSION_V2 2 +#define PTP_VERSION_V2_MASK 0x0f + +enum ptp_packet_state { + PTP_PACKET_STATE_UNMATCHED = 0, + PTP_PACKET_STATE_MATCHED, + PTP_PACKET_STATE_TIMED_OUT, + PTP_PACKET_STATE_MATCH_UNWANTED +}; + +/* NIC synchronised with single word of time only comprising + * partial seconds and full nanoseconds: 10^9 ~ 2^30 so 2 bits for seconds. + */ +#define MC_NANOSECOND_BITS 30 +#define MC_NANOSECOND_MASK ((1 << MC_NANOSECOND_BITS) - 1) +#define MC_SECOND_MASK ((1 << (32 - MC_NANOSECOND_BITS)) - 1) + +/* Maximum parts-per-billion adjustment that is acceptable */ +#define MAX_PPB 1000000 + +/* Number of bits required to hold the above */ +#define MAX_PPB_BITS 20 + +/* Number of extra bits allowed when calculating fractional ns. + * EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS + MAX_PPB_BITS should + * be less than 63. + */ +#define PPB_EXTRA_BITS 2 + +/* Precalculate scale word to avoid long long division at runtime */ +#define PPB_SCALE_WORD ((1LL << (PPB_EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS +\ + MAX_PPB_BITS)) / 1000000000LL) + +#define PTP_SYNC_ATTEMPTS 4 + +/** + * struct efx_ptp_match - Matching structure, stored in sk_buff's cb area. + * @words: UUID and (partial) sequence number + * @expiry: Time after which the packet should be delivered irrespective of + * event arrival. + * @state: The state of the packet - whether it is ready for processing or + * whether that is of no interest. + */ +struct efx_ptp_match { + u32 words[DIV_ROUND_UP(PTP_V1_UUID_LENGTH, 4)]; + unsigned long expiry; + enum ptp_packet_state state; +}; + +/** + * struct efx_ptp_event_rx - A PTP receive event (from MC) + * @seq0: First part of (PTP) UUID + * @seq1: Second part of (PTP) UUID and sequence number + * @hwtimestamp: Event timestamp + */ +struct efx_ptp_event_rx { + struct list_head link; + u32 seq0; + u32 seq1; + ktime_t hwtimestamp; + unsigned long expiry; +}; + +/** + * struct efx_ptp_timeset - Synchronisation between host and MC + * @host_start: Host time immediately before hardware timestamp taken + * @major: Hardware timestamp, major + * @minor: Hardware timestamp, minor + * @host_end: Host time immediately after hardware timestamp taken + * @wait: Number of NIC clock ticks between hardware timestamp being read and + * host end time being seen + * @window: Difference of host_end and host_start + * @valid: Whether this timeset is valid + */ +struct efx_ptp_timeset { + u32 host_start; + u32 major; + u32 minor; + u32 host_end; + u32 wait; + u32 window; /* Derived: end - start, allowing for wrap */ +}; + +/** + * struct efx_ptp_data - Precision Time Protocol (PTP) state + * @efx: The NIC context + * @channel: The PTP channel (Siena only) + * @rx_ts_inline: Flag for whether RX timestamps are inline (else they are + * separate events) + * @rxq: Receive queue (awaiting timestamps) + * @txq: Transmit queue + * @evt_list: List of MC receive events awaiting packets + * @evt_free_list: List of free events + * @evt_lock: Lock for manipulating evt_list and evt_free_list + * @rx_evts: Instantiated events (on evt_list and evt_free_list) + * @workwq: Work queue for processing pending PTP operations + * @work: Work task + * @reset_required: A serious error has occurred and the PTP task needs to be + * reset (disable, enable). + * @rxfilter_event: Receive filter when operating + * @rxfilter_general: Receive filter when operating + * @config: Current timestamp configuration + * @enabled: PTP operation enabled + * @mode: Mode in which PTP operating (PTP version) + * @time_format: Time format supported by this NIC + * @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time + * @nic_to_kernel_time: Function to convert from NIC to kernel time + * @min_synchronisation_ns: Minimum acceptable corrected sync window + * @ts_corrections.tx: Required driver correction of transmit timestamps + * @ts_corrections.rx: Required driver correction of receive timestamps + * @ts_corrections.pps_out: PPS output error (information only) + * @ts_corrections.pps_in: Required driver correction of PPS input timestamps + * @evt_frags: Partly assembled PTP events + * @evt_frag_idx: Current fragment number + * @evt_code: Last event code + * @start: Address at which MC indicates ready for synchronisation + * @host_time_pps: Host time at last PPS + * @current_adjfreq: Current ppb adjustment. + * @phc_clock: Pointer to registered phc device (if primary function) + * @phc_clock_info: Registration structure for phc device + * @pps_work: pps work task for handling pps events + * @pps_workwq: pps work queue + * @nic_ts_enabled: Flag indicating if NIC generated TS events are handled + * @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids + * allocations in main data path). + * @good_syncs: Number of successful synchronisations. + * @fast_syncs: Number of synchronisations requiring short delay + * @bad_syncs: Number of failed synchronisations. + * @sync_timeouts: Number of synchronisation timeouts + * @no_time_syncs: Number of synchronisations with no good times. + * @invalid_sync_windows: Number of sync windows with bad durations. + * @undersize_sync_windows: Number of corrected sync windows that are too small + * @oversize_sync_windows: Number of corrected sync windows that are too large + * @rx_no_timestamp: Number of packets received without a timestamp. + * @timeset: Last set of synchronisation statistics. + */ +struct efx_ptp_data { + struct efx_nic *efx; + struct efx_channel *channel; + bool rx_ts_inline; + struct sk_buff_head rxq; + struct sk_buff_head txq; + struct list_head evt_list; + struct list_head evt_free_list; + spinlock_t evt_lock; + struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS]; + struct workqueue_struct *workwq; + struct work_struct work; + bool reset_required; + u32 rxfilter_event; + u32 rxfilter_general; + bool rxfilter_installed; + struct hwtstamp_config config; + bool enabled; + unsigned int mode; + unsigned int time_format; + void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor); + ktime_t (*nic_to_kernel_time)(u32 nic_major, u32 nic_minor, + s32 correction); + unsigned int min_synchronisation_ns; + struct { + s32 tx; + s32 rx; + s32 pps_out; + s32 pps_in; + } ts_corrections; + efx_qword_t evt_frags[MAX_EVENT_FRAGS]; + int evt_frag_idx; + int evt_code; + struct efx_buffer start; + struct pps_event_time host_time_pps; + s64 current_adjfreq; + struct ptp_clock *phc_clock; + struct ptp_clock_info phc_clock_info; + struct work_struct pps_work; + struct workqueue_struct *pps_workwq; + bool nic_ts_enabled; + MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX); + + unsigned int good_syncs; + unsigned int fast_syncs; + unsigned int bad_syncs; + unsigned int sync_timeouts; + unsigned int no_time_syncs; + unsigned int invalid_sync_windows; + unsigned int undersize_sync_windows; + unsigned int oversize_sync_windows; + unsigned int rx_no_timestamp; + struct efx_ptp_timeset + timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM]; +}; + +static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta); +static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta); +static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts); +static int efx_phc_settime(struct ptp_clock_info *ptp, + const struct timespec64 *e_ts); +static int efx_phc_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *request, int on); + +#define PTP_SW_STAT(ext_name, field_name) \ + { #ext_name, 0, offsetof(struct efx_ptp_data, field_name) } +#define PTP_MC_STAT(ext_name, mcdi_name) \ + { #ext_name, 32, MC_CMD_PTP_OUT_STATUS_STATS_ ## mcdi_name ## _OFST } +static const struct efx_hw_stat_desc efx_ptp_stat_desc[] = { + PTP_SW_STAT(ptp_good_syncs, good_syncs), + PTP_SW_STAT(ptp_fast_syncs, fast_syncs), + PTP_SW_STAT(ptp_bad_syncs, bad_syncs), + PTP_SW_STAT(ptp_sync_timeouts, sync_timeouts), + PTP_SW_STAT(ptp_no_time_syncs, no_time_syncs), + PTP_SW_STAT(ptp_invalid_sync_windows, invalid_sync_windows), + PTP_SW_STAT(ptp_undersize_sync_windows, undersize_sync_windows), + PTP_SW_STAT(ptp_oversize_sync_windows, oversize_sync_windows), + PTP_SW_STAT(ptp_rx_no_timestamp, rx_no_timestamp), + PTP_MC_STAT(ptp_tx_timestamp_packets, TX), + PTP_MC_STAT(ptp_rx_timestamp_packets, RX), + PTP_MC_STAT(ptp_timestamp_packets, TS), + PTP_MC_STAT(ptp_filter_matches, FM), + PTP_MC_STAT(ptp_non_filter_matches, NFM), +}; +#define PTP_STAT_COUNT ARRAY_SIZE(efx_ptp_stat_desc) +static const unsigned long efx_ptp_stat_mask[] = { + [0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL, +}; + +size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings) +{ + if (!efx->ptp_data) + return 0; + + return efx_nic_describe_stats(efx_ptp_stat_desc, PTP_STAT_COUNT, + efx_ptp_stat_mask, strings); +} + +size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_STATUS_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_STATUS_LEN); + size_t i; + int rc; + + if (!efx->ptp_data) + return 0; + + /* Copy software statistics */ + for (i = 0; i < PTP_STAT_COUNT; i++) { + if (efx_ptp_stat_desc[i].dma_width) + continue; + stats[i] = *(unsigned int *)((char *)efx->ptp_data + + efx_ptp_stat_desc[i].offset); + } + + /* Fetch MC statistics. We *must* fill in all statistics or + * risk leaking kernel memory to userland, so if the MCDI + * request fails we pretend we got zeroes. + */ + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_STATUS); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "MC_CMD_PTP_OP_STATUS failed (%d)\n", rc); + memset(outbuf, 0, sizeof(outbuf)); + } + efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT, + efx_ptp_stat_mask, + stats, _MCDI_PTR(outbuf, 0), false); + + return PTP_STAT_COUNT; +} + +/* For Siena platforms NIC time is s and ns */ +static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor) +{ + struct timespec ts = ns_to_timespec(ns); + *nic_major = ts.tv_sec; + *nic_minor = ts.tv_nsec; +} + +static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor, + s32 correction) +{ + ktime_t kt = ktime_set(nic_major, nic_minor); + if (correction >= 0) + kt = ktime_add_ns(kt, (u64)correction); + else + kt = ktime_sub_ns(kt, (u64)-correction); + return kt; +} + +/* To convert from s27 format to ns we multiply then divide by a power of 2. + * For the conversion from ns to s27, the operation is also converted to a + * multiply and shift. + */ +#define S27_TO_NS_SHIFT (27) +#define NS_TO_S27_MULT (((1ULL << 63) + NSEC_PER_SEC / 2) / NSEC_PER_SEC) +#define NS_TO_S27_SHIFT (63 - S27_TO_NS_SHIFT) +#define S27_MINOR_MAX (1 << S27_TO_NS_SHIFT) + +/* For Huntington platforms NIC time is in seconds and fractions of a second + * where the minor register only uses 27 bits in units of 2^-27s. + */ +static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor) +{ + struct timespec ts = ns_to_timespec(ns); + u32 maj = ts.tv_sec; + u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT + + (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT); + + /* The conversion can result in the minor value exceeding the maximum. + * In this case, round up to the next second. + */ + if (min >= S27_MINOR_MAX) { + min -= S27_MINOR_MAX; + maj++; + } + + *nic_major = maj; + *nic_minor = min; +} + +static inline ktime_t efx_ptp_s27_to_ktime(u32 nic_major, u32 nic_minor) +{ + u32 ns = (u32)(((u64)nic_minor * NSEC_PER_SEC + + (1ULL << (S27_TO_NS_SHIFT - 1))) >> S27_TO_NS_SHIFT); + return ktime_set(nic_major, ns); +} + +static ktime_t efx_ptp_s27_to_ktime_correction(u32 nic_major, u32 nic_minor, + s32 correction) +{ + /* Apply the correction and deal with carry */ + nic_minor += correction; + if ((s32)nic_minor < 0) { + nic_minor += S27_MINOR_MAX; + nic_major--; + } else if (nic_minor >= S27_MINOR_MAX) { + nic_minor -= S27_MINOR_MAX; + nic_major++; + } + + return efx_ptp_s27_to_ktime(nic_major, nic_minor); +} + +/* Get PTP attributes and set up time conversions */ +static int efx_ptp_get_attributes(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN); + struct efx_ptp_data *ptp = efx->ptp_data; + int rc; + u32 fmt; + size_t out_len; + + /* Get the PTP attributes. If the NIC doesn't support the operation we + * use the default format for compatibility with older NICs i.e. + * seconds and nanoseconds. + */ + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &out_len); + if (rc == 0) + fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT); + else if (rc == -EINVAL) + fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS; + else + return rc; + + if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) { + ptp->ns_to_nic_time = efx_ptp_ns_to_s27; + ptp->nic_to_kernel_time = efx_ptp_s27_to_ktime_correction; + } else if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS) { + ptp->ns_to_nic_time = efx_ptp_ns_to_s_ns; + ptp->nic_to_kernel_time = efx_ptp_s_ns_to_ktime_correction; + } else { + return -ERANGE; + } + + ptp->time_format = fmt; + + /* MC_CMD_PTP_OP_GET_ATTRIBUTES is an extended version of an older + * operation MC_CMD_PTP_OP_GET_TIME_FORMAT that also returns a value + * to use for the minimum acceptable corrected synchronization window. + * If we have the extra information store it. For older firmware that + * does not implement the extended command use the default value. + */ + if (rc == 0 && out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN) + ptp->min_synchronisation_ns = + MCDI_DWORD(outbuf, + PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN); + else + ptp->min_synchronisation_ns = DEFAULT_MIN_SYNCHRONISATION_NS; + + return 0; +} + +/* Get PTP timestamp corrections */ +static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN); + int rc; + + /* Get the timestamp corrections from the NIC. If this operation is + * not supported (older NICs) then no correction is required. + */ + MCDI_SET_DWORD(inbuf, PTP_IN_OP, + MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc == 0) { + efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT); + efx->ptp_data->ts_corrections.rx = MCDI_DWORD(outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE); + efx->ptp_data->ts_corrections.pps_out = MCDI_DWORD(outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT); + efx->ptp_data->ts_corrections.pps_in = MCDI_DWORD(outbuf, + PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN); + } else if (rc == -EINVAL) { + efx->ptp_data->ts_corrections.tx = 0; + efx->ptp_data->ts_corrections.rx = 0; + efx->ptp_data->ts_corrections.pps_out = 0; + efx->ptp_data->ts_corrections.pps_in = 0; + } else { + return rc; + } + + return 0; +} + +/* Enable MCDI PTP support. */ +static int efx_ptp_enable(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN); + MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0); + int rc; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE, + efx->ptp_data->channel ? + efx->ptp_data->channel->channel : 0); + MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + rc = (rc == -EALREADY) ? 0 : rc; + if (rc) + efx_mcdi_display_error(efx, MC_CMD_PTP, + MC_CMD_PTP_IN_ENABLE_LEN, + outbuf, sizeof(outbuf), rc); + return rc; +} + +/* Disable MCDI PTP support. + * + * Note that this function should never rely on the presence of ptp_data - + * may be called before that exists. + */ +static int efx_ptp_disable(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN); + MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0); + int rc; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + rc = (rc == -EALREADY) ? 0 : rc; + if (rc) + efx_mcdi_display_error(efx, MC_CMD_PTP, + MC_CMD_PTP_IN_DISABLE_LEN, + outbuf, sizeof(outbuf), rc); + return rc; +} + +static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q) +{ + struct sk_buff *skb; + + while ((skb = skb_dequeue(q))) { + local_bh_disable(); + netif_receive_skb(skb); + local_bh_enable(); + } +} + +static void efx_ptp_handle_no_channel(struct efx_nic *efx) +{ + netif_err(efx, drv, efx->net_dev, + "ERROR: PTP requires MSI-X and 1 additional interrupt" + "vector. PTP disabled\n"); +} + +/* Repeatedly send the host time to the MC which will capture the hardware + * time. + */ +static void efx_ptp_send_times(struct efx_nic *efx, + struct pps_event_time *last_time) +{ + struct pps_event_time now; + struct timespec limit; + struct efx_ptp_data *ptp = efx->ptp_data; + struct timespec start; + int *mc_running = ptp->start.addr; + + pps_get_ts(&now); + start = now.ts_real; + limit = now.ts_real; + timespec_add_ns(&limit, SYNCHRONISE_PERIOD_NS); + + /* Write host time for specified period or until MC is done */ + while ((timespec_compare(&now.ts_real, &limit) < 0) && + ACCESS_ONCE(*mc_running)) { + struct timespec update_time; + unsigned int host_time; + + /* Don't update continuously to avoid saturating the PCIe bus */ + update_time = now.ts_real; + timespec_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS); + do { + pps_get_ts(&now); + } while ((timespec_compare(&now.ts_real, &update_time) < 0) && + ACCESS_ONCE(*mc_running)); + + /* Synchronise NIC with single word of time only */ + host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS | + now.ts_real.tv_nsec); + /* Update host time in NIC memory */ + efx->type->ptp_write_host_time(efx, host_time); + } + *last_time = now; +} + +/* Read a timeset from the MC's results and partial process. */ +static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data), + struct efx_ptp_timeset *timeset) +{ + unsigned start_ns, end_ns; + + timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART); + timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR); + timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR); + timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND), + timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS); + + /* Ignore seconds */ + start_ns = timeset->host_start & MC_NANOSECOND_MASK; + end_ns = timeset->host_end & MC_NANOSECOND_MASK; + /* Allow for rollover */ + if (end_ns < start_ns) + end_ns += NSEC_PER_SEC; + /* Determine duration of operation */ + timeset->window = end_ns - start_ns; +} + +/* Process times received from MC. + * + * Extract times from returned results, and establish the minimum value + * seen. The minimum value represents the "best" possible time and events + * too much greater than this are rejected - the machine is, perhaps, too + * busy. A number of readings are taken so that, hopefully, at least one good + * synchronisation will be seen in the results. + */ +static int +efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), + size_t response_length, + const struct pps_event_time *last_time) +{ + unsigned number_readings = + MCDI_VAR_ARRAY_LEN(response_length, + PTP_OUT_SYNCHRONIZE_TIMESET); + unsigned i; + unsigned ngood = 0; + unsigned last_good = 0; + struct efx_ptp_data *ptp = efx->ptp_data; + u32 last_sec; + u32 start_sec; + struct timespec delta; + ktime_t mc_time; + + if (number_readings == 0) + return -EAGAIN; + + /* Read the set of results and find the last good host-MC + * synchronization result. The MC times when it finishes reading the + * host time so the corrected window time should be fairly constant + * for a given platform. Increment stats for any results that appear + * to be erroneous. + */ + for (i = 0; i < number_readings; i++) { + s32 window, corrected; + struct timespec wait; + + efx_ptp_read_timeset( + MCDI_ARRAY_STRUCT_PTR(synch_buf, + PTP_OUT_SYNCHRONIZE_TIMESET, i), + &ptp->timeset[i]); + + wait = ktime_to_timespec( + ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0)); + window = ptp->timeset[i].window; + corrected = window - wait.tv_nsec; + + /* We expect the uncorrected synchronization window to be at + * least as large as the interval between host start and end + * times. If it is smaller than this then this is mostly likely + * to be a consequence of the host's time being adjusted. + * Check that the corrected sync window is in a reasonable + * range. If it is out of range it is likely to be because an + * interrupt or other delay occurred between reading the system + * time and writing it to MC memory. + */ + if (window < SYNCHRONISATION_GRANULARITY_NS) { + ++ptp->invalid_sync_windows; + } else if (corrected >= MAX_SYNCHRONISATION_NS) { + ++ptp->oversize_sync_windows; + } else if (corrected < ptp->min_synchronisation_ns) { + ++ptp->undersize_sync_windows; + } else { + ngood++; + last_good = i; + } + } + + if (ngood == 0) { + netif_warn(efx, drv, efx->net_dev, + "PTP no suitable synchronisations\n"); + return -EAGAIN; + } + + /* Calculate delay from last good sync (host time) to last_time. + * It is possible that the seconds rolled over between taking + * the start reading and the last value written by the host. The + * timescales are such that a gap of more than one second is never + * expected. delta is *not* normalised. + */ + start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS; + last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK; + if (start_sec != last_sec && + ((start_sec + 1) & MC_SECOND_MASK) != last_sec) { + netif_warn(efx, hw, efx->net_dev, + "PTP bad synchronisation seconds\n"); + return -EAGAIN; + } + delta.tv_sec = (last_sec - start_sec) & 1; + delta.tv_nsec = + last_time->ts_real.tv_nsec - + (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK); + + /* Convert the NIC time at last good sync into kernel time. + * No correction is required - this time is the output of a + * firmware process. + */ + mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major, + ptp->timeset[last_good].minor, 0); + + /* Calculate delay from NIC top of second to last_time */ + delta.tv_nsec += ktime_to_timespec(mc_time).tv_nsec; + + /* Set PPS timestamp to match NIC top of second */ + ptp->host_time_pps = *last_time; + pps_sub_ts(&ptp->host_time_pps, delta); + + return 0; +} + +/* Synchronize times between the host and the MC */ +static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + MCDI_DECLARE_BUF(synch_buf, MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX); + size_t response_length; + int rc; + unsigned long timeout; + struct pps_event_time last_time = {}; + unsigned int loops = 0; + int *start = ptp->start.addr; + + MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE); + MCDI_SET_DWORD(synch_buf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS, + num_readings); + MCDI_SET_QWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR, + ptp->start.dma_addr); + + /* Clear flag that signals MC ready */ + ACCESS_ONCE(*start) = 0; + rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, + MC_CMD_PTP_IN_SYNCHRONIZE_LEN); + EFX_BUG_ON_PARANOID(rc); + + /* Wait for start from MCDI (or timeout) */ + timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS); + while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) { + udelay(20); /* Usually start MCDI execution quickly */ + loops++; + } + + if (loops <= 1) + ++ptp->fast_syncs; + if (!time_before(jiffies, timeout)) + ++ptp->sync_timeouts; + + if (ACCESS_ONCE(*start)) + efx_ptp_send_times(efx, &last_time); + + /* Collect results */ + rc = efx_mcdi_rpc_finish(efx, MC_CMD_PTP, + MC_CMD_PTP_IN_SYNCHRONIZE_LEN, + synch_buf, sizeof(synch_buf), + &response_length); + if (rc == 0) { + rc = efx_ptp_process_times(efx, synch_buf, response_length, + &last_time); + if (rc == 0) + ++ptp->good_syncs; + else + ++ptp->no_time_syncs; + } + + /* Increment the bad syncs counter if the synchronize fails, whatever + * the reason. + */ + if (rc != 0) + ++ptp->bad_syncs; + + return rc; +} + +/* Transmit a PTP packet, via the MCDI interface, to the wire. */ +static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb) +{ + struct efx_ptp_data *ptp_data = efx->ptp_data; + struct skb_shared_hwtstamps timestamps; + int rc = -EIO; + MCDI_DECLARE_BUF(txtime, MC_CMD_PTP_OUT_TRANSMIT_LEN); + size_t len; + + MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT); + MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len); + if (skb_shinfo(skb)->nr_frags != 0) { + rc = skb_linearize(skb); + if (rc != 0) + goto fail; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + rc = skb_checksum_help(skb); + if (rc != 0) + goto fail; + } + skb_copy_from_linear_data(skb, + MCDI_PTR(ptp_data->txbuf, + PTP_IN_TRANSMIT_PACKET), + skb->len); + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, + ptp_data->txbuf, MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), + txtime, sizeof(txtime), &len); + if (rc != 0) + goto fail; + + memset(×tamps, 0, sizeof(timestamps)); + timestamps.hwtstamp = ptp_data->nic_to_kernel_time( + MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MAJOR), + MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MINOR), + ptp_data->ts_corrections.tx); + + skb_tstamp_tx(skb, ×tamps); + + rc = 0; + +fail: + dev_kfree_skb(skb); + + return rc; +} + +static void efx_ptp_drop_time_expired_events(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct list_head *cursor; + struct list_head *next; + + if (ptp->rx_ts_inline) + return; + + /* Drop time-expired events */ + spin_lock_bh(&ptp->evt_lock); + if (!list_empty(&ptp->evt_list)) { + list_for_each_safe(cursor, next, &ptp->evt_list) { + struct efx_ptp_event_rx *evt; + + evt = list_entry(cursor, struct efx_ptp_event_rx, + link); + if (time_after(jiffies, evt->expiry)) { + list_move(&evt->link, &ptp->evt_free_list); + netif_warn(efx, hw, efx->net_dev, + "PTP rx event dropped\n"); + } + } + } + spin_unlock_bh(&ptp->evt_lock); +} + +static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx, + struct sk_buff *skb) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + bool evts_waiting; + struct list_head *cursor; + struct list_head *next; + struct efx_ptp_match *match; + enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED; + + WARN_ON_ONCE(ptp->rx_ts_inline); + + spin_lock_bh(&ptp->evt_lock); + evts_waiting = !list_empty(&ptp->evt_list); + spin_unlock_bh(&ptp->evt_lock); + + if (!evts_waiting) + return PTP_PACKET_STATE_UNMATCHED; + + match = (struct efx_ptp_match *)skb->cb; + /* Look for a matching timestamp in the event queue */ + spin_lock_bh(&ptp->evt_lock); + list_for_each_safe(cursor, next, &ptp->evt_list) { + struct efx_ptp_event_rx *evt; + + evt = list_entry(cursor, struct efx_ptp_event_rx, link); + if ((evt->seq0 == match->words[0]) && + (evt->seq1 == match->words[1])) { + struct skb_shared_hwtstamps *timestamps; + + /* Match - add in hardware timestamp */ + timestamps = skb_hwtstamps(skb); + timestamps->hwtstamp = evt->hwtimestamp; + + match->state = PTP_PACKET_STATE_MATCHED; + rc = PTP_PACKET_STATE_MATCHED; + list_move(&evt->link, &ptp->evt_free_list); + break; + } + } + spin_unlock_bh(&ptp->evt_lock); + + return rc; +} + +/* Process any queued receive events and corresponding packets + * + * q is returned with all the packets that are ready for delivery. + */ +static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct sk_buff *skb; + + while ((skb = skb_dequeue(&ptp->rxq))) { + struct efx_ptp_match *match; + + match = (struct efx_ptp_match *)skb->cb; + if (match->state == PTP_PACKET_STATE_MATCH_UNWANTED) { + __skb_queue_tail(q, skb); + } else if (efx_ptp_match_rx(efx, skb) == + PTP_PACKET_STATE_MATCHED) { + __skb_queue_tail(q, skb); + } else if (time_after(jiffies, match->expiry)) { + match->state = PTP_PACKET_STATE_TIMED_OUT; + ++ptp->rx_no_timestamp; + __skb_queue_tail(q, skb); + } else { + /* Replace unprocessed entry and stop */ + skb_queue_head(&ptp->rxq, skb); + break; + } + } +} + +/* Complete processing of a received packet */ +static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb) +{ + local_bh_disable(); + netif_receive_skb(skb); + local_bh_enable(); +} + +static void efx_ptp_remove_multicast_filters(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + + if (ptp->rxfilter_installed) { + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + ptp->rxfilter_general); + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + ptp->rxfilter_event); + ptp->rxfilter_installed = false; + } +} + +static int efx_ptp_insert_multicast_filters(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct efx_filter_spec rxfilter; + int rc; + + if (!ptp->channel || ptp->rxfilter_installed) + return 0; + + /* Must filter on both event and general ports to ensure + * that there is no packet re-ordering. + */ + efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0, + efx_rx_queue_index( + efx_channel_get_rx_queue(ptp->channel))); + rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP, + htonl(PTP_ADDRESS), + htons(PTP_EVENT_PORT)); + if (rc != 0) + return rc; + + rc = efx_filter_insert_filter(efx, &rxfilter, true); + if (rc < 0) + return rc; + ptp->rxfilter_event = rc; + + efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0, + efx_rx_queue_index( + efx_channel_get_rx_queue(ptp->channel))); + rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP, + htonl(PTP_ADDRESS), + htons(PTP_GENERAL_PORT)); + if (rc != 0) + goto fail; + + rc = efx_filter_insert_filter(efx, &rxfilter, true); + if (rc < 0) + goto fail; + ptp->rxfilter_general = rc; + + ptp->rxfilter_installed = true; + return 0; + +fail: + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + ptp->rxfilter_event); + return rc; +} + +static int efx_ptp_start(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + int rc; + + ptp->reset_required = false; + + rc = efx_ptp_insert_multicast_filters(efx); + if (rc) + return rc; + + rc = efx_ptp_enable(efx); + if (rc != 0) + goto fail; + + ptp->evt_frag_idx = 0; + ptp->current_adjfreq = 0; + + return 0; + +fail: + efx_ptp_remove_multicast_filters(efx); + return rc; +} + +static int efx_ptp_stop(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct list_head *cursor; + struct list_head *next; + int rc; + + if (ptp == NULL) + return 0; + + rc = efx_ptp_disable(efx); + + efx_ptp_remove_multicast_filters(efx); + + /* Make sure RX packets are really delivered */ + efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq); + skb_queue_purge(&efx->ptp_data->txq); + + /* Drop any pending receive events */ + spin_lock_bh(&efx->ptp_data->evt_lock); + list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { + list_move(cursor, &efx->ptp_data->evt_free_list); + } + spin_unlock_bh(&efx->ptp_data->evt_lock); + + return rc; +} + +static int efx_ptp_restart(struct efx_nic *efx) +{ + if (efx->ptp_data && efx->ptp_data->enabled) + return efx_ptp_start(efx); + return 0; +} + +static void efx_ptp_pps_worker(struct work_struct *work) +{ + struct efx_ptp_data *ptp = + container_of(work, struct efx_ptp_data, pps_work); + struct efx_nic *efx = ptp->efx; + struct ptp_clock_event ptp_evt; + + if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS)) + return; + + ptp_evt.type = PTP_CLOCK_PPSUSR; + ptp_evt.pps_times = ptp->host_time_pps; + ptp_clock_event(ptp->phc_clock, &ptp_evt); +} + +static void efx_ptp_worker(struct work_struct *work) +{ + struct efx_ptp_data *ptp_data = + container_of(work, struct efx_ptp_data, work); + struct efx_nic *efx = ptp_data->efx; + struct sk_buff *skb; + struct sk_buff_head tempq; + + if (ptp_data->reset_required) { + efx_ptp_stop(efx); + efx_ptp_start(efx); + return; + } + + efx_ptp_drop_time_expired_events(efx); + + __skb_queue_head_init(&tempq); + efx_ptp_process_events(efx, &tempq); + + while ((skb = skb_dequeue(&ptp_data->txq))) + efx_ptp_xmit_skb(efx, skb); + + while ((skb = __skb_dequeue(&tempq))) + efx_ptp_process_rx(efx, skb); +} + +static const struct ptp_clock_info efx_phc_clock_info = { + .owner = THIS_MODULE, + .name = "sfc", + .max_adj = MAX_PPB, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .n_pins = 0, + .pps = 1, + .adjfreq = efx_phc_adjfreq, + .adjtime = efx_phc_adjtime, + .gettime64 = efx_phc_gettime, + .settime64 = efx_phc_settime, + .enable = efx_phc_enable, +}; + +/* Initialise PTP state. */ +int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel) +{ + struct efx_ptp_data *ptp; + int rc = 0; + unsigned int pos; + + ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL); + efx->ptp_data = ptp; + if (!efx->ptp_data) + return -ENOMEM; + + ptp->efx = efx; + ptp->channel = channel; + ptp->rx_ts_inline = efx_nic_rev(efx) >= EFX_REV_HUNT_A0; + + rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL); + if (rc != 0) + goto fail1; + + skb_queue_head_init(&ptp->rxq); + skb_queue_head_init(&ptp->txq); + ptp->workwq = create_singlethread_workqueue("sfc_ptp"); + if (!ptp->workwq) { + rc = -ENOMEM; + goto fail2; + } + + INIT_WORK(&ptp->work, efx_ptp_worker); + ptp->config.flags = 0; + ptp->config.tx_type = HWTSTAMP_TX_OFF; + ptp->config.rx_filter = HWTSTAMP_FILTER_NONE; + INIT_LIST_HEAD(&ptp->evt_list); + INIT_LIST_HEAD(&ptp->evt_free_list); + spin_lock_init(&ptp->evt_lock); + for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++) + list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); + + /* Get the NIC PTP attributes and set up time conversions */ + rc = efx_ptp_get_attributes(efx); + if (rc < 0) + goto fail3; + + /* Get the timestamp corrections */ + rc = efx_ptp_get_timestamp_corrections(efx); + if (rc < 0) + goto fail3; + + if (efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) { + ptp->phc_clock_info = efx_phc_clock_info; + ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info, + &efx->pci_dev->dev); + if (IS_ERR(ptp->phc_clock)) { + rc = PTR_ERR(ptp->phc_clock); + goto fail3; + } + + INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker); + ptp->pps_workwq = create_singlethread_workqueue("sfc_pps"); + if (!ptp->pps_workwq) { + rc = -ENOMEM; + goto fail4; + } + } + ptp->nic_ts_enabled = false; + + return 0; +fail4: + ptp_clock_unregister(efx->ptp_data->phc_clock); + +fail3: + destroy_workqueue(efx->ptp_data->workwq); + +fail2: + efx_nic_free_buffer(efx, &ptp->start); + +fail1: + kfree(efx->ptp_data); + efx->ptp_data = NULL; + + return rc; +} + +/* Initialise PTP channel. + * + * Setting core_index to zero causes the queue to be initialised and doesn't + * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue. + */ +static int efx_ptp_probe_channel(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + + channel->irq_moderation = 0; + channel->rx_queue.core_index = 0; + + return efx_ptp_probe(efx, channel); +} + +void efx_ptp_remove(struct efx_nic *efx) +{ + if (!efx->ptp_data) + return; + + (void)efx_ptp_disable(efx); + + cancel_work_sync(&efx->ptp_data->work); + cancel_work_sync(&efx->ptp_data->pps_work); + + skb_queue_purge(&efx->ptp_data->rxq); + skb_queue_purge(&efx->ptp_data->txq); + + if (efx->ptp_data->phc_clock) { + destroy_workqueue(efx->ptp_data->pps_workwq); + ptp_clock_unregister(efx->ptp_data->phc_clock); + } + + destroy_workqueue(efx->ptp_data->workwq); + + efx_nic_free_buffer(efx, &efx->ptp_data->start); + kfree(efx->ptp_data); +} + +static void efx_ptp_remove_channel(struct efx_channel *channel) +{ + efx_ptp_remove(channel->efx); +} + +static void efx_ptp_get_channel_name(struct efx_channel *channel, + char *buf, size_t len) +{ + snprintf(buf, len, "%s-ptp", channel->efx->name); +} + +/* Determine whether this packet should be processed by the PTP module + * or transmitted conventionally. + */ +bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) +{ + return efx->ptp_data && + efx->ptp_data->enabled && + skb->len >= PTP_MIN_LENGTH && + skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM && + likely(skb->protocol == htons(ETH_P_IP)) && + skb_transport_header_was_set(skb) && + skb_network_header_len(skb) >= sizeof(struct iphdr) && + ip_hdr(skb)->protocol == IPPROTO_UDP && + skb_headlen(skb) >= + skb_transport_offset(skb) + sizeof(struct udphdr) && + udp_hdr(skb)->dest == htons(PTP_EVENT_PORT); +} + +/* Receive a PTP packet. Packets are queued until the arrival of + * the receive timestamp from the MC - this will probably occur after the + * packet arrival because of the processing in the MC. + */ +static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) +{ + struct efx_nic *efx = channel->efx; + struct efx_ptp_data *ptp = efx->ptp_data; + struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb; + u8 *match_data_012, *match_data_345; + unsigned int version; + u8 *data; + + match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); + + /* Correct version? */ + if (ptp->mode == MC_CMD_PTP_MODE_V1) { + if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) { + return false; + } + data = skb->data; + version = ntohs(*(__be16 *)&data[PTP_V1_VERSION_OFFSET]); + if (version != PTP_VERSION_V1) { + return false; + } + + /* PTP V1 uses all six bytes of the UUID to match the packet + * to the timestamp + */ + match_data_012 = data + PTP_V1_UUID_OFFSET; + match_data_345 = data + PTP_V1_UUID_OFFSET + 3; + } else { + if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) { + return false; + } + data = skb->data; + version = data[PTP_V2_VERSION_OFFSET]; + if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) { + return false; + } + + /* The original V2 implementation uses bytes 2-7 of + * the UUID to match the packet to the timestamp. This + * discards two of the bytes of the MAC address used + * to create the UUID (SF bug 33070). The PTP V2 + * enhanced mode fixes this issue and uses bytes 0-2 + * and byte 5-7 of the UUID. + */ + match_data_345 = data + PTP_V2_UUID_OFFSET + 5; + if (ptp->mode == MC_CMD_PTP_MODE_V2) { + match_data_012 = data + PTP_V2_UUID_OFFSET + 2; + } else { + match_data_012 = data + PTP_V2_UUID_OFFSET + 0; + BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED); + } + } + + /* Does this packet require timestamping? */ + if (ntohs(*(__be16 *)&data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) { + match->state = PTP_PACKET_STATE_UNMATCHED; + + /* We expect the sequence number to be in the same position in + * the packet for PTP V1 and V2 + */ + BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET); + BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH); + + /* Extract UUID/Sequence information */ + match->words[0] = (match_data_012[0] | + (match_data_012[1] << 8) | + (match_data_012[2] << 16) | + (match_data_345[0] << 24)); + match->words[1] = (match_data_345[1] | + (match_data_345[2] << 8) | + (data[PTP_V1_SEQUENCE_OFFSET + + PTP_V1_SEQUENCE_LENGTH - 1] << + 16)); + } else { + match->state = PTP_PACKET_STATE_MATCH_UNWANTED; + } + + skb_queue_tail(&ptp->rxq, skb); + queue_work(ptp->workwq, &ptp->work); + + return true; +} + +/* Transmit a PTP packet. This has to be transmitted by the MC + * itself, through an MCDI call. MCDI calls aren't permitted + * in the transmit path so defer the actual transmission to a suitable worker. + */ +int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + + skb_queue_tail(&ptp->txq, skb); + + if ((udp_hdr(skb)->dest == htons(PTP_EVENT_PORT)) && + (skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM)) + efx_xmit_hwtstamp_pending(skb); + queue_work(ptp->workwq, &ptp->work); + + return NETDEV_TX_OK; +} + +int efx_ptp_get_mode(struct efx_nic *efx) +{ + return efx->ptp_data->mode; +} + +int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, + unsigned int new_mode) +{ + if ((enable_wanted != efx->ptp_data->enabled) || + (enable_wanted && (efx->ptp_data->mode != new_mode))) { + int rc = 0; + + if (enable_wanted) { + /* Change of mode requires disable */ + if (efx->ptp_data->enabled && + (efx->ptp_data->mode != new_mode)) { + efx->ptp_data->enabled = false; + rc = efx_ptp_stop(efx); + if (rc != 0) + return rc; + } + + /* Set new operating mode and establish + * baseline synchronisation, which must + * succeed. + */ + efx->ptp_data->mode = new_mode; + if (netif_running(efx->net_dev)) + rc = efx_ptp_start(efx); + if (rc == 0) { + rc = efx_ptp_synchronize(efx, + PTP_SYNC_ATTEMPTS * 2); + if (rc != 0) + efx_ptp_stop(efx); + } + } else { + rc = efx_ptp_stop(efx); + } + + if (rc != 0) + return rc; + + efx->ptp_data->enabled = enable_wanted; + } + + return 0; +} + +static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init) +{ + int rc; + + if (init->flags) + return -EINVAL; + + if ((init->tx_type != HWTSTAMP_TX_OFF) && + (init->tx_type != HWTSTAMP_TX_ON)) + return -ERANGE; + + rc = efx->type->ptp_set_ts_config(efx, init); + if (rc) + return rc; + + efx->ptp_data->config = *init; + return 0; +} + +void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct efx_nic *primary = efx->primary; + + ASSERT_RTNL(); + + if (!ptp) + return; + + ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE); + if (primary && primary->ptp_data && primary->ptp_data->phc_clock) + ts_info->phc_index = + ptp_clock_index(primary->ptp_data->phc_clock); + ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON; + ts_info->rx_filters = ptp->efx->type->hwtstamp_filters; +} + +int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int rc; + + /* Not a PTP enabled port */ + if (!efx->ptp_data) + return -EOPNOTSUPP; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + rc = efx_ptp_ts_init(efx, &config); + if (rc != 0) + return rc; + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) + ? -EFAULT : 0; +} + +int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr) +{ + if (!efx->ptp_data) + return -EOPNOTSUPP; + + return copy_to_user(ifr->ifr_data, &efx->ptp_data->config, + sizeof(efx->ptp_data->config)) ? -EFAULT : 0; +} + +static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + + netif_err(efx, hw, efx->net_dev, + "PTP unexpected event length: got %d expected %d\n", + ptp->evt_frag_idx, expected_frag_len); + ptp->reset_required = true; + queue_work(ptp->workwq, &ptp->work); +} + +/* Process a completed receive event. Put it on the event queue and + * start worker thread. This is required because event and their + * correspoding packets may come in either order. + */ +static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp) +{ + struct efx_ptp_event_rx *evt = NULL; + + if (WARN_ON_ONCE(ptp->rx_ts_inline)) + return; + + if (ptp->evt_frag_idx != 3) { + ptp_event_failure(efx, 3); + return; + } + + spin_lock_bh(&ptp->evt_lock); + if (!list_empty(&ptp->evt_free_list)) { + evt = list_first_entry(&ptp->evt_free_list, + struct efx_ptp_event_rx, link); + list_del(&evt->link); + + evt->seq0 = EFX_QWORD_FIELD(ptp->evt_frags[2], MCDI_EVENT_DATA); + evt->seq1 = (EFX_QWORD_FIELD(ptp->evt_frags[2], + MCDI_EVENT_SRC) | + (EFX_QWORD_FIELD(ptp->evt_frags[1], + MCDI_EVENT_SRC) << 8) | + (EFX_QWORD_FIELD(ptp->evt_frags[0], + MCDI_EVENT_SRC) << 16)); + evt->hwtimestamp = efx->ptp_data->nic_to_kernel_time( + EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA), + EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA), + ptp->ts_corrections.rx); + evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); + list_add_tail(&evt->link, &ptp->evt_list); + + queue_work(ptp->workwq, &ptp->work); + } else if (net_ratelimit()) { + /* Log a rate-limited warning message. */ + netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n"); + } + spin_unlock_bh(&ptp->evt_lock); +} + +static void ptp_event_fault(struct efx_nic *efx, struct efx_ptp_data *ptp) +{ + int code = EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA); + if (ptp->evt_frag_idx != 1) { + ptp_event_failure(efx, 1); + return; + } + + netif_err(efx, hw, efx->net_dev, "PTP error %d\n", code); +} + +static void ptp_event_pps(struct efx_nic *efx, struct efx_ptp_data *ptp) +{ + if (ptp->nic_ts_enabled) + queue_work(ptp->pps_workwq, &ptp->pps_work); +} + +void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE); + + if (!ptp) { + if (net_ratelimit()) + netif_warn(efx, drv, efx->net_dev, + "Received PTP event but PTP not set up\n"); + return; + } + + if (!ptp->enabled) + return; + + if (ptp->evt_frag_idx == 0) { + ptp->evt_code = code; + } else if (ptp->evt_code != code) { + netif_err(efx, hw, efx->net_dev, + "PTP out of sequence event %d\n", code); + ptp->evt_frag_idx = 0; + } + + ptp->evt_frags[ptp->evt_frag_idx++] = *ev; + if (!MCDI_EVENT_FIELD(*ev, CONT)) { + /* Process resulting event */ + switch (code) { + case MCDI_EVENT_CODE_PTP_RX: + ptp_event_rx(efx, ptp); + break; + case MCDI_EVENT_CODE_PTP_FAULT: + ptp_event_fault(efx, ptp); + break; + case MCDI_EVENT_CODE_PTP_PPS: + ptp_event_pps(efx, ptp); + break; + default: + netif_err(efx, hw, efx->net_dev, + "PTP unknown event %d\n", code); + break; + } + ptp->evt_frag_idx = 0; + } else if (MAX_EVENT_FRAGS == ptp->evt_frag_idx) { + netif_err(efx, hw, efx->net_dev, + "PTP too many event fragments\n"); + ptp->evt_frag_idx = 0; + } +} + +void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev) +{ + channel->sync_timestamp_major = MCDI_EVENT_FIELD(*ev, PTP_TIME_MAJOR); + channel->sync_timestamp_minor = + MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_26_19) << 19; + /* if sync events have been disabled then we want to silently ignore + * this event, so throw away result. + */ + (void) cmpxchg(&channel->sync_events_state, SYNC_EVENTS_REQUESTED, + SYNC_EVENTS_VALID); +} + +/* make some assumptions about the time representation rather than abstract it, + * since we currently only support one type of inline timestamping and only on + * EF10. + */ +#define MINOR_TICKS_PER_SECOND 0x8000000 +/* Fuzz factor for sync events to be out of order with RX events */ +#define FUZZ (MINOR_TICKS_PER_SECOND / 10) +#define EXPECTED_SYNC_EVENTS_PER_SECOND 4 + +static inline u32 efx_rx_buf_timestamp_minor(struct efx_nic *efx, const u8 *eh) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_ts_offset)); +#else + const u8 *data = eh + efx->rx_packet_ts_offset; + return (u32)data[0] | + (u32)data[1] << 8 | + (u32)data[2] << 16 | + (u32)data[3] << 24; +#endif +} + +void __efx_rx_skb_attach_timestamp(struct efx_channel *channel, + struct sk_buff *skb) +{ + struct efx_nic *efx = channel->efx; + u32 pkt_timestamp_major, pkt_timestamp_minor; + u32 diff, carry; + struct skb_shared_hwtstamps *timestamps; + + pkt_timestamp_minor = (efx_rx_buf_timestamp_minor(efx, + skb_mac_header(skb)) + + (u32) efx->ptp_data->ts_corrections.rx) & + (MINOR_TICKS_PER_SECOND - 1); + + /* get the difference between the packet and sync timestamps, + * modulo one second + */ + diff = (pkt_timestamp_minor - channel->sync_timestamp_minor) & + (MINOR_TICKS_PER_SECOND - 1); + /* do we roll over a second boundary and need to carry the one? */ + carry = channel->sync_timestamp_minor + diff > MINOR_TICKS_PER_SECOND ? + 1 : 0; + + if (diff <= MINOR_TICKS_PER_SECOND / EXPECTED_SYNC_EVENTS_PER_SECOND + + FUZZ) { + /* packet is ahead of the sync event by a quarter of a second or + * less (allowing for fuzz) + */ + pkt_timestamp_major = channel->sync_timestamp_major + carry; + } else if (diff >= MINOR_TICKS_PER_SECOND - FUZZ) { + /* packet is behind the sync event but within the fuzz factor. + * This means the RX packet and sync event crossed as they were + * placed on the event queue, which can sometimes happen. + */ + pkt_timestamp_major = channel->sync_timestamp_major - 1 + carry; + } else { + /* it's outside tolerance in both directions. this might be + * indicative of us missing sync events for some reason, so + * we'll call it an error rather than risk giving a bogus + * timestamp. + */ + netif_vdbg(efx, drv, efx->net_dev, + "packet timestamp %x too far from sync event %x:%x\n", + pkt_timestamp_minor, channel->sync_timestamp_major, + channel->sync_timestamp_minor); + return; + } + + /* attach the timestamps to the skb */ + timestamps = skb_hwtstamps(skb); + timestamps->hwtstamp = + efx_ptp_s27_to_ktime(pkt_timestamp_major, pkt_timestamp_minor); +} + +static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) +{ + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + struct efx_nic *efx = ptp_data->efx; + MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN); + s64 adjustment_ns; + int rc; + + if (delta > MAX_PPB) + delta = MAX_PPB; + else if (delta < -MAX_PPB) + delta = -MAX_PPB; + + /* Convert ppb to fixed point ns. */ + adjustment_ns = (((s64)delta * PPB_SCALE_WORD) >> + (PPB_EXTRA_BITS + MAX_PPB_BITS)); + + MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); + MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0); + MCDI_SET_QWORD(inadj, PTP_IN_ADJUST_FREQ, adjustment_ns); + MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0); + MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0); + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj), + NULL, 0, NULL); + if (rc != 0) + return rc; + + ptp_data->current_adjfreq = adjustment_ns; + return 0; +} + +static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + u32 nic_major, nic_minor; + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + struct efx_nic *efx = ptp_data->efx; + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN); + + efx->ptp_data->ns_to_nic_time(delta, &nic_major, &nic_minor); + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq); + MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MAJOR, nic_major); + MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MINOR, nic_minor); + return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + struct efx_nic *efx = ptp_data->efx; + MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN); + int rc; + ktime_t kt; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME); + MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc != 0) + return rc; + + kt = ptp_data->nic_to_kernel_time( + MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MAJOR), + MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MINOR), 0); + *ts = ktime_to_timespec64(kt); + return 0; +} + +static int efx_phc_settime(struct ptp_clock_info *ptp, + const struct timespec64 *e_ts) +{ + /* Get the current NIC time, efx_phc_gettime. + * Subtract from the desired time to get the offset + * call efx_phc_adjtime with the offset + */ + int rc; + struct timespec64 time_now; + struct timespec64 delta; + + rc = efx_phc_gettime(ptp, &time_now); + if (rc != 0) + return rc; + + delta = timespec64_sub(*e_ts, time_now); + + rc = efx_phc_adjtime(ptp, timespec64_to_ns(&delta)); + if (rc != 0) + return rc; + + return 0; +} + +static int efx_phc_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *request, + int enable) +{ + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + if (request->type != PTP_CLK_REQ_PPS) + return -EOPNOTSUPP; + + ptp_data->nic_ts_enabled = !!enable; + return 0; +} + +static const struct efx_channel_type efx_ptp_channel_type = { + .handle_no_channel = efx_ptp_handle_no_channel, + .pre_probe = efx_ptp_probe_channel, + .post_remove = efx_ptp_remove_channel, + .get_name = efx_ptp_get_channel_name, + /* no copy operation; there is no need to reallocate this channel */ + .receive_skb = efx_ptp_rx, + .keep_eventq = false, +}; + +void efx_ptp_defer_probe_with_channel(struct efx_nic *efx) +{ + /* Check whether PTP is implemented on this NIC. The DISABLE + * operation will succeed if and only if it is implemented. + */ + if (efx_ptp_disable(efx) == 0) + efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] = + &efx_ptp_channel_type; +} + +void efx_ptp_start_datapath(struct efx_nic *efx) +{ + if (efx_ptp_restart(efx)) + netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n"); + /* re-enable timestamping if it was previously enabled */ + if (efx->type->ptp_set_ts_sync_events) + efx->type->ptp_set_ts_sync_events(efx, true, true); +} + +void efx_ptp_stop_datapath(struct efx_nic *efx) +{ + /* temporarily disable timestamping */ + if (efx->type->ptp_set_ts_sync_events) + efx->type->ptp_set_ts_sync_events(efx, false, true); + efx_ptp_stop(efx); +} diff --git a/drivers/net/ethernet/sfc/qt202x_phy.c b/drivers/net/ethernet/sfc/qt202x_phy.c new file mode 100644 index 000000000..efa3612af --- /dev/null +++ b/drivers/net/ethernet/sfc/qt202x_phy.c @@ -0,0 +1,495 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2006-2012 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +/* + * Driver for AMCC QT202x SFP+ and XFP adapters; see www.amcc.com for details + */ + +#include +#include +#include +#include "efx.h" +#include "mdio_10g.h" +#include "phy.h" +#include "nic.h" + +#define QT202X_REQUIRED_DEVS (MDIO_DEVS_PCS | \ + MDIO_DEVS_PMAPMD | \ + MDIO_DEVS_PHYXS) + +#define QT202X_LOOPBACKS ((1 << LOOPBACK_PCS) | \ + (1 << LOOPBACK_PMAPMD) | \ + (1 << LOOPBACK_PHYXS_WS)) + +/****************************************************************************/ +/* Quake-specific MDIO registers */ +#define MDIO_QUAKE_LED0_REG (0xD006) + +/* QT2025C only */ +#define PCS_FW_HEARTBEAT_REG 0xd7ee +#define PCS_FW_HEARTB_LBN 0 +#define PCS_FW_HEARTB_WIDTH 8 +#define PCS_FW_PRODUCT_CODE_1 0xd7f0 +#define PCS_FW_VERSION_1 0xd7f3 +#define PCS_FW_BUILD_1 0xd7f6 +#define PCS_UC8051_STATUS_REG 0xd7fd +#define PCS_UC_STATUS_LBN 0 +#define PCS_UC_STATUS_WIDTH 8 +#define PCS_UC_STATUS_FW_SAVE 0x20 +#define PMA_PMD_MODE_REG 0xc301 +#define PMA_PMD_RXIN_SEL_LBN 6 +#define PMA_PMD_FTX_CTRL2_REG 0xc309 +#define PMA_PMD_FTX_STATIC_LBN 13 +#define PMA_PMD_VEND1_REG 0xc001 +#define PMA_PMD_VEND1_LBTXD_LBN 15 +#define PCS_VEND1_REG 0xc000 +#define PCS_VEND1_LBTXD_LBN 5 + +void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode) +{ + int addr = MDIO_QUAKE_LED0_REG + led; + efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode); +} + +struct qt202x_phy_data { + enum efx_phy_mode phy_mode; + bool bug17190_in_bad_state; + unsigned long bug17190_timer; + u32 firmware_ver; +}; + +#define QT2022C2_MAX_RESET_TIME 500 +#define QT2022C2_RESET_WAIT 10 + +#define QT2025C_MAX_HEARTB_TIME (5 * HZ) +#define QT2025C_HEARTB_WAIT 100 +#define QT2025C_MAX_FWSTART_TIME (25 * HZ / 10) +#define QT2025C_FWSTART_WAIT 100 + +#define BUG17190_INTERVAL (2 * HZ) + +static int qt2025c_wait_heartbeat(struct efx_nic *efx) +{ + unsigned long timeout = jiffies + QT2025C_MAX_HEARTB_TIME; + int reg, old_counter = 0; + + /* Wait for firmware heartbeat to start */ + for (;;) { + int counter; + reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_FW_HEARTBEAT_REG); + if (reg < 0) + return reg; + counter = ((reg >> PCS_FW_HEARTB_LBN) & + ((1 << PCS_FW_HEARTB_WIDTH) - 1)); + if (old_counter == 0) + old_counter = counter; + else if (counter != old_counter) + break; + if (time_after(jiffies, timeout)) { + /* Some cables have EEPROMs that conflict with the + * PHY's on-board EEPROM so it cannot load firmware */ + netif_err(efx, hw, efx->net_dev, + "If an SFP+ direct attach cable is" + " connected, please check that it complies" + " with the SFP+ specification\n"); + return -ETIMEDOUT; + } + msleep(QT2025C_HEARTB_WAIT); + } + + return 0; +} + +static int qt2025c_wait_fw_status_good(struct efx_nic *efx) +{ + unsigned long timeout = jiffies + QT2025C_MAX_FWSTART_TIME; + int reg; + + /* Wait for firmware status to look good */ + for (;;) { + reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG); + if (reg < 0) + return reg; + if ((reg & + ((1 << PCS_UC_STATUS_WIDTH) - 1) << PCS_UC_STATUS_LBN) >= + PCS_UC_STATUS_FW_SAVE) + break; + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + msleep(QT2025C_FWSTART_WAIT); + } + + return 0; +} + +static void qt2025c_restart_firmware(struct efx_nic *efx) +{ + /* Restart microcontroller execution of firmware from RAM */ + efx_mdio_write(efx, 3, 0xe854, 0x00c0); + efx_mdio_write(efx, 3, 0xe854, 0x0040); + msleep(50); +} + +static int qt2025c_wait_reset(struct efx_nic *efx) +{ + int rc; + + rc = qt2025c_wait_heartbeat(efx); + if (rc != 0) + return rc; + + rc = qt2025c_wait_fw_status_good(efx); + if (rc == -ETIMEDOUT) { + /* Bug 17689: occasionally heartbeat starts but firmware status + * code never progresses beyond 0x00. Try again, once, after + * restarting execution of the firmware image. */ + netif_dbg(efx, hw, efx->net_dev, + "bashing QT2025C microcontroller\n"); + qt2025c_restart_firmware(efx); + rc = qt2025c_wait_heartbeat(efx); + if (rc != 0) + return rc; + rc = qt2025c_wait_fw_status_good(efx); + } + + return rc; +} + +static void qt2025c_firmware_id(struct efx_nic *efx) +{ + struct qt202x_phy_data *phy_data = efx->phy_data; + u8 firmware_id[9]; + size_t i; + + for (i = 0; i < sizeof(firmware_id); i++) + firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS, + PCS_FW_PRODUCT_CODE_1 + i); + netif_info(efx, probe, efx->net_dev, + "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n", + (firmware_id[0] << 8) | firmware_id[1], firmware_id[2], + firmware_id[3] >> 4, firmware_id[3] & 0xf, + firmware_id[4], firmware_id[5], + firmware_id[6], firmware_id[7], firmware_id[8]); + phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) | + ((firmware_id[3] & 0x0f) << 16) | + (firmware_id[4] << 8) | firmware_id[5]; +} + +static void qt2025c_bug17190_workaround(struct efx_nic *efx) +{ + struct qt202x_phy_data *phy_data = efx->phy_data; + + /* The PHY can get stuck in a state where it reports PHY_XS and PMA/PMD + * layers up, but PCS down (no block_lock). If we notice this state + * persisting for a couple of seconds, we switch PMA/PMD loopback + * briefly on and then off again, which is normally sufficient to + * recover it. + */ + if (efx->link_state.up || + !efx_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) { + phy_data->bug17190_in_bad_state = false; + return; + } + + if (!phy_data->bug17190_in_bad_state) { + phy_data->bug17190_in_bad_state = true; + phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL; + return; + } + + if (time_after_eq(jiffies, phy_data->bug17190_timer)) { + netif_dbg(efx, hw, efx->net_dev, "bashing QT2025C PMA/PMD\n"); + efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, + MDIO_PMA_CTRL1_LOOPBACK, true); + msleep(100); + efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, + MDIO_PMA_CTRL1_LOOPBACK, false); + phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL; + } +} + +static int qt2025c_select_phy_mode(struct efx_nic *efx) +{ + struct qt202x_phy_data *phy_data = efx->phy_data; + struct falcon_board *board = falcon_board(efx); + int reg, rc, i; + uint16_t phy_op_mode; + + /* Only 2.0.1.0+ PHY firmware supports the more optimal SFP+ + * Self-Configure mode. Don't attempt any switching if we encounter + * older firmware. */ + if (phy_data->firmware_ver < 0x02000100) + return 0; + + /* In general we will get optimal behaviour in "SFP+ Self-Configure" + * mode; however, that powers down most of the PHY when no module is + * present, so we must use a different mode (any fixed mode will do) + * to be sure that loopbacks will work. */ + phy_op_mode = (efx->loopback_mode == LOOPBACK_NONE) ? 0x0038 : 0x0020; + + /* Only change mode if really necessary */ + reg = efx_mdio_read(efx, 1, 0xc319); + if ((reg & 0x0038) == phy_op_mode) + return 0; + netif_dbg(efx, hw, efx->net_dev, "Switching PHY to mode 0x%04x\n", + phy_op_mode); + + /* This sequence replicates the register writes configured in the boot + * EEPROM (including the differences between board revisions), except + * that the operating mode is changed, and the PHY is prevented from + * unnecessarily reloading the main firmware image again. */ + efx_mdio_write(efx, 1, 0xc300, 0x0000); + /* (Note: this portion of the boot EEPROM sequence, which bit-bashes 9 + * STOPs onto the firmware/module I2C bus to reset it, varies across + * board revisions, as the bus is connected to different GPIO/LED + * outputs on the PHY.) */ + if (board->major == 0 && board->minor < 2) { + efx_mdio_write(efx, 1, 0xc303, 0x4498); + for (i = 0; i < 9; i++) { + efx_mdio_write(efx, 1, 0xc303, 0x4488); + efx_mdio_write(efx, 1, 0xc303, 0x4480); + efx_mdio_write(efx, 1, 0xc303, 0x4490); + efx_mdio_write(efx, 1, 0xc303, 0x4498); + } + } else { + efx_mdio_write(efx, 1, 0xc303, 0x0920); + efx_mdio_write(efx, 1, 0xd008, 0x0004); + for (i = 0; i < 9; i++) { + efx_mdio_write(efx, 1, 0xc303, 0x0900); + efx_mdio_write(efx, 1, 0xd008, 0x0005); + efx_mdio_write(efx, 1, 0xc303, 0x0920); + efx_mdio_write(efx, 1, 0xd008, 0x0004); + } + efx_mdio_write(efx, 1, 0xc303, 0x4900); + } + efx_mdio_write(efx, 1, 0xc303, 0x4900); + efx_mdio_write(efx, 1, 0xc302, 0x0004); + efx_mdio_write(efx, 1, 0xc316, 0x0013); + efx_mdio_write(efx, 1, 0xc318, 0x0054); + efx_mdio_write(efx, 1, 0xc319, phy_op_mode); + efx_mdio_write(efx, 1, 0xc31a, 0x0098); + efx_mdio_write(efx, 3, 0x0026, 0x0e00); + efx_mdio_write(efx, 3, 0x0027, 0x0013); + efx_mdio_write(efx, 3, 0x0028, 0xa528); + efx_mdio_write(efx, 1, 0xd006, 0x000a); + efx_mdio_write(efx, 1, 0xd007, 0x0009); + efx_mdio_write(efx, 1, 0xd008, 0x0004); + /* This additional write is not present in the boot EEPROM. It + * prevents the PHY's internal boot ROM doing another pointless (and + * slow) reload of the firmware image (the microcontroller's code + * memory is not affected by the microcontroller reset). */ + efx_mdio_write(efx, 1, 0xc317, 0x00ff); + /* PMA/PMD loopback sets RXIN to inverse polarity and the firmware + * restart doesn't reset it. We need to do that ourselves. */ + efx_mdio_set_flag(efx, 1, PMA_PMD_MODE_REG, + 1 << PMA_PMD_RXIN_SEL_LBN, false); + efx_mdio_write(efx, 1, 0xc300, 0x0002); + msleep(20); + + /* Restart microcontroller execution of firmware from RAM */ + qt2025c_restart_firmware(efx); + + /* Wait for the microcontroller to be ready again */ + rc = qt2025c_wait_reset(efx); + if (rc < 0) { + netif_err(efx, hw, efx->net_dev, + "PHY microcontroller reset during mode switch " + "timed out\n"); + return rc; + } + + return 0; +} + +static int qt202x_reset_phy(struct efx_nic *efx) +{ + int rc; + + if (efx->phy_type == PHY_TYPE_QT2025C) { + /* Wait for the reset triggered by falcon_reset_hw() + * to complete */ + rc = qt2025c_wait_reset(efx); + if (rc < 0) + goto fail; + } else { + /* Reset the PHYXS MMD. This is documented as doing + * a complete soft reset. */ + rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS, + QT2022C2_MAX_RESET_TIME / + QT2022C2_RESET_WAIT, + QT2022C2_RESET_WAIT); + if (rc < 0) + goto fail; + } + + /* Wait 250ms for the PHY to complete bootup */ + msleep(250); + + falcon_board(efx)->type->init_phy(efx); + + return 0; + + fail: + netif_err(efx, hw, efx->net_dev, "PHY reset timed out\n"); + return rc; +} + +static int qt202x_phy_probe(struct efx_nic *efx) +{ + struct qt202x_phy_data *phy_data; + + phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL); + if (!phy_data) + return -ENOMEM; + efx->phy_data = phy_data; + phy_data->phy_mode = efx->phy_mode; + phy_data->bug17190_in_bad_state = false; + phy_data->bug17190_timer = 0; + + efx->mdio.mmds = QT202X_REQUIRED_DEVS; + efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; + efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS; + return 0; +} + +static int qt202x_phy_init(struct efx_nic *efx) +{ + u32 devid; + int rc; + + rc = qt202x_reset_phy(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, "PHY init failed\n"); + return rc; + } + + devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS); + netif_info(efx, probe, efx->net_dev, + "PHY ID reg %x (OUI %06x model %02x revision %x)\n", + devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid), + efx_mdio_id_rev(devid)); + + if (efx->phy_type == PHY_TYPE_QT2025C) + qt2025c_firmware_id(efx); + + return 0; +} + +static int qt202x_link_ok(struct efx_nic *efx) +{ + return efx_mdio_links_ok(efx, QT202X_REQUIRED_DEVS); +} + +static bool qt202x_phy_poll(struct efx_nic *efx) +{ + bool was_up = efx->link_state.up; + + efx->link_state.up = qt202x_link_ok(efx); + efx->link_state.speed = 10000; + efx->link_state.fd = true; + efx->link_state.fc = efx->wanted_fc; + + if (efx->phy_type == PHY_TYPE_QT2025C) + qt2025c_bug17190_workaround(efx); + + return efx->link_state.up != was_up; +} + +static int qt202x_phy_reconfigure(struct efx_nic *efx) +{ + struct qt202x_phy_data *phy_data = efx->phy_data; + + if (efx->phy_type == PHY_TYPE_QT2025C) { + int rc = qt2025c_select_phy_mode(efx); + if (rc) + return rc; + + /* There are several different register bits which can + * disable TX (and save power) on direct-attach cables + * or optical transceivers, varying somewhat between + * firmware versions. Only 'static mode' appears to + * cover everything. */ + mdio_set_flag( + &efx->mdio, efx->mdio.prtad, MDIO_MMD_PMAPMD, + PMA_PMD_FTX_CTRL2_REG, 1 << PMA_PMD_FTX_STATIC_LBN, + efx->phy_mode & PHY_MODE_TX_DISABLED || + efx->phy_mode & PHY_MODE_LOW_POWER || + efx->loopback_mode == LOOPBACK_PCS || + efx->loopback_mode == LOOPBACK_PMAPMD); + } else { + /* Reset the PHY when moving from tx off to tx on */ + if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) && + (phy_data->phy_mode & PHY_MODE_TX_DISABLED)) + qt202x_reset_phy(efx); + + efx_mdio_transmit_disable(efx); + } + + efx_mdio_phy_reconfigure(efx); + + phy_data->phy_mode = efx->phy_mode; + + return 0; +} + +static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) +{ + mdio45_ethtool_gset(&efx->mdio, ecmd); +} + +static void qt202x_phy_remove(struct efx_nic *efx) +{ + /* Free the context block */ + kfree(efx->phy_data); + efx->phy_data = NULL; +} + +static int qt202x_phy_get_module_info(struct efx_nic *efx, + struct ethtool_modinfo *modinfo) +{ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + return 0; +} + +static int qt202x_phy_get_module_eeprom(struct efx_nic *efx, + struct ethtool_eeprom *ee, u8 *data) +{ + int mmd, reg_base, rc, i; + + if (efx->phy_type == PHY_TYPE_QT2025C) { + mmd = MDIO_MMD_PCS; + reg_base = 0xd000; + } else { + mmd = MDIO_MMD_PMAPMD; + reg_base = 0x8007; + } + + for (i = 0; i < ee->len; i++) { + rc = efx_mdio_read(efx, mmd, reg_base + ee->offset + i); + if (rc < 0) + return rc; + data[i] = rc; + } + + return 0; +} + +const struct efx_phy_operations falcon_qt202x_phy_ops = { + .probe = qt202x_phy_probe, + .init = qt202x_phy_init, + .reconfigure = qt202x_phy_reconfigure, + .poll = qt202x_phy_poll, + .fini = efx_port_dummy_op_void, + .remove = qt202x_phy_remove, + .get_settings = qt202x_phy_get_settings, + .set_settings = efx_mdio_set_settings, + .test_alive = efx_mdio_test_alive, + .get_module_eeprom = qt202x_phy_get_module_eeprom, + .get_module_info = qt202x_phy_get_module_info, +}; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c new file mode 100644 index 000000000..809ea4610 --- /dev/null +++ b/drivers/net/ethernet/sfc/rx.c @@ -0,0 +1,997 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2005-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "filter.h" +#include "nic.h" +#include "selftest.h" +#include "workarounds.h" + +/* Preferred number of descriptors to fill at once */ +#define EFX_RX_PREFERRED_BATCH 8U + +/* Number of RX buffers to recycle pages for. When creating the RX page recycle + * ring, this number is divided by the number of buffers per page to calculate + * the number of pages to store in the RX page recycle ring. + */ +#define EFX_RECYCLE_RING_SIZE_IOMMU 4096 +#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH) + +/* Size of buffer allocated for skb header area. */ +#define EFX_SKB_HEADERS 128u + +/* This is the percentage fill level below which new RX descriptors + * will be added to the RX descriptor ring. + */ +static unsigned int rx_refill_threshold; + +/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */ +#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \ + EFX_RX_USR_BUF_SIZE) + +/* + * RX maximum head room required. + * + * This must be at least 1 to prevent overflow, plus one packet-worth + * to allow pipelined receives. + */ +#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS) + +static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf) +{ + return page_address(buf->page) + buf->page_offset; +} + +static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset)); +#else + const u8 *data = eh + efx->rx_packet_hash_offset; + return (u32)data[0] | + (u32)data[1] << 8 | + (u32)data[2] << 16 | + (u32)data[3] << 24; +#endif +} + +static inline struct efx_rx_buffer * +efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf) +{ + if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask))) + return efx_rx_buffer(rx_queue, 0); + else + return rx_buf + 1; +} + +static inline void efx_sync_rx_buffer(struct efx_nic *efx, + struct efx_rx_buffer *rx_buf, + unsigned int len) +{ + dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len, + DMA_FROM_DEVICE); +} + +void efx_rx_config_page_split(struct efx_nic *efx) +{ + efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align, + EFX_RX_BUF_ALIGNMENT); + efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : + ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / + efx->rx_page_buf_step); + efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) / + efx->rx_bufs_per_page; + efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH, + efx->rx_bufs_per_page); +} + +/* Check the RX page recycle ring for a page that can be reused. */ +static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + struct page *page; + struct efx_rx_page_state *state; + unsigned index; + + index = rx_queue->page_remove & rx_queue->page_ptr_mask; + page = rx_queue->page_ring[index]; + if (page == NULL) + return NULL; + + rx_queue->page_ring[index] = NULL; + /* page_remove cannot exceed page_add. */ + if (rx_queue->page_remove != rx_queue->page_add) + ++rx_queue->page_remove; + + /* If page_count is 1 then we hold the only reference to this page. */ + if (page_count(page) == 1) { + ++rx_queue->page_recycle_count; + return page; + } else { + state = page_address(page); + dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + put_page(page); + ++rx_queue->page_recycle_failed; + } + + return NULL; +} + +/** + * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers + * + * @rx_queue: Efx RX queue + * + * This allocates a batch of pages, maps them for DMA, and populates + * struct efx_rx_buffers for each one. Return a negative error code or + * 0 on success. If a single page can be used for multiple buffers, + * then the page will either be inserted fully, or not at all. + */ +static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic) +{ + struct efx_nic *efx = rx_queue->efx; + struct efx_rx_buffer *rx_buf; + struct page *page; + unsigned int page_offset; + struct efx_rx_page_state *state; + dma_addr_t dma_addr; + unsigned index, count; + + count = 0; + do { + page = efx_reuse_page(rx_queue); + if (page == NULL) { + page = alloc_pages(__GFP_COLD | __GFP_COMP | + (atomic ? GFP_ATOMIC : GFP_KERNEL), + efx->rx_buffer_order); + if (unlikely(page == NULL)) + return -ENOMEM; + dma_addr = + dma_map_page(&efx->pci_dev->dev, page, 0, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&efx->pci_dev->dev, + dma_addr))) { + __free_pages(page, efx->rx_buffer_order); + return -EIO; + } + state = page_address(page); + state->dma_addr = dma_addr; + } else { + state = page_address(page); + dma_addr = state->dma_addr; + } + + dma_addr += sizeof(struct efx_rx_page_state); + page_offset = sizeof(struct efx_rx_page_state); + + do { + index = rx_queue->added_count & rx_queue->ptr_mask; + rx_buf = efx_rx_buffer(rx_queue, index); + rx_buf->dma_addr = dma_addr + efx->rx_ip_align; + rx_buf->page = page; + rx_buf->page_offset = page_offset + efx->rx_ip_align; + rx_buf->len = efx->rx_dma_len; + rx_buf->flags = 0; + ++rx_queue->added_count; + get_page(page); + dma_addr += efx->rx_page_buf_step; + page_offset += efx->rx_page_buf_step; + } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); + + rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE; + } while (++count < efx->rx_pages_per_batch); + + return 0; +} + +/* Unmap a DMA-mapped page. This function is only called for the final RX + * buffer in a page. + */ +static void efx_unmap_rx_buffer(struct efx_nic *efx, + struct efx_rx_buffer *rx_buf) +{ + struct page *page = rx_buf->page; + + if (page) { + struct efx_rx_page_state *state = page_address(page); + dma_unmap_page(&efx->pci_dev->dev, + state->dma_addr, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + } +} + +static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, + unsigned int num_bufs) +{ + do { + if (rx_buf->page) { + put_page(rx_buf->page); + rx_buf->page = NULL; + } + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + } while (--num_bufs); +} + +/* Attempt to recycle the page if there is an RX recycle ring; the page can + * only be added if this is the final RX buffer, to prevent pages being used in + * the descriptor ring and appearing in the recycle ring simultaneously. + */ +static void efx_recycle_rx_page(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf) +{ + struct page *page = rx_buf->page; + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + struct efx_nic *efx = rx_queue->efx; + unsigned index; + + /* Only recycle the page after processing the final buffer. */ + if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE)) + return; + + index = rx_queue->page_add & rx_queue->page_ptr_mask; + if (rx_queue->page_ring[index] == NULL) { + unsigned read_index = rx_queue->page_remove & + rx_queue->page_ptr_mask; + + /* The next slot in the recycle ring is available, but + * increment page_remove if the read pointer currently + * points here. + */ + if (read_index == index) + ++rx_queue->page_remove; + rx_queue->page_ring[index] = page; + ++rx_queue->page_add; + return; + } + ++rx_queue->page_recycle_full; + efx_unmap_rx_buffer(efx, rx_buf); + put_page(rx_buf->page); +} + +static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf) +{ + /* Release the page reference we hold for the buffer. */ + if (rx_buf->page) + put_page(rx_buf->page); + + /* If this is the last buffer in a page, unmap and free it. */ + if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { + efx_unmap_rx_buffer(rx_queue->efx, rx_buf); + efx_free_rx_buffers(rx_queue, rx_buf, 1); + } + rx_buf->page = NULL; +} + +/* Recycle the pages that are used by buffers that have just been received. */ +static void efx_recycle_rx_pages(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + + do { + efx_recycle_rx_page(channel, rx_buf); + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + } while (--n_frags); +} + +static void efx_discard_rx_packet(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + + efx_recycle_rx_pages(channel, rx_buf, n_frags); + + efx_free_rx_buffers(rx_queue, rx_buf, n_frags); +} + +/** + * efx_fast_push_rx_descriptors - push new RX descriptors quickly + * @rx_queue: RX descriptor queue + * + * This will aim to fill the RX descriptor queue up to + * @rx_queue->@max_fill. If there is insufficient atomic + * memory to do so, a slow fill will be scheduled. + * + * The caller must provide serialisation (none is used here). In practise, + * this means this function must run from the NAPI handler, or be called + * when NAPI is disabled. + */ +void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned int fill_level, batch_size; + int space, rc = 0; + + if (!rx_queue->refill_enabled) + return; + + /* Calculate current fill level, and exit if we don't need to fill */ + fill_level = (rx_queue->added_count - rx_queue->removed_count); + EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); + if (fill_level >= rx_queue->fast_fill_trigger) + goto out; + + /* Record minimum fill level */ + if (unlikely(fill_level < rx_queue->min_fill)) { + if (fill_level) + rx_queue->min_fill = fill_level; + } + + batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page; + space = rx_queue->max_fill - fill_level; + EFX_BUG_ON_PARANOID(space < batch_size); + + netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, + "RX queue %d fast-filling descriptor ring from" + " level %d to level %d\n", + efx_rx_queue_index(rx_queue), fill_level, + rx_queue->max_fill); + + + do { + rc = efx_init_rx_buffers(rx_queue, atomic); + if (unlikely(rc)) { + /* Ensure that we don't leave the rx queue empty */ + if (rx_queue->added_count == rx_queue->removed_count) + efx_schedule_slow_fill(rx_queue); + goto out; + } + } while ((space -= batch_size) >= batch_size); + + netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, + "RX queue %d fast-filled descriptor ring " + "to level %d\n", efx_rx_queue_index(rx_queue), + rx_queue->added_count - rx_queue->removed_count); + + out: + if (rx_queue->notified_count != rx_queue->added_count) + efx_nic_notify_rx_desc(rx_queue); +} + +void efx_rx_slow_fill(unsigned long context) +{ + struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; + + /* Post an event to cause NAPI to run and refill the queue */ + efx_nic_generate_fill_event(rx_queue); + ++rx_queue->slow_fill_count; +} + +static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, + int len) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; + + if (likely(len <= max_len)) + return; + + /* The packet must be discarded, but this is only a fatal error + * if the caller indicated it was + */ + rx_buf->flags |= EFX_RX_PKT_DISCARD; + + if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { + if (net_ratelimit()) + netif_err(efx, rx_err, efx->net_dev, + " RX queue %d seriously overlength " + "RX event (0x%x > 0x%x+0x%x). Leaking\n", + efx_rx_queue_index(rx_queue), len, max_len, + efx->type->rx_buffer_padding); + efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); + } else { + if (net_ratelimit()) + netif_err(efx, rx_err, efx->net_dev, + " RX queue %d overlength RX event " + "(0x%x > 0x%x)\n", + efx_rx_queue_index(rx_queue), len, max_len); + } + + efx_rx_queue_channel(rx_queue)->n_rx_overlength++; +} + +/* Pass a received packet up through GRO. GRO can handle pages + * regardless of checksum state and skbs with a good checksum. + */ +static void +efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, + unsigned int n_frags, u8 *eh) +{ + struct napi_struct *napi = &channel->napi_str; + gro_result_t gro_result; + struct efx_nic *efx = channel->efx; + struct sk_buff *skb; + + skb = napi_get_frags(napi); + if (unlikely(!skb)) { + struct efx_rx_queue *rx_queue; + + rx_queue = efx_channel_get_rx_queue(channel); + efx_free_rx_buffers(rx_queue, rx_buf, n_frags); + return; + } + + if (efx->net_dev->features & NETIF_F_RXHASH) + skb_set_hash(skb, efx_rx_buf_hash(efx, eh), + PKT_HASH_TYPE_L3); + skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? + CHECKSUM_UNNECESSARY : CHECKSUM_NONE); + + for (;;) { + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + rx_buf->page, rx_buf->page_offset, + rx_buf->len); + rx_buf->page = NULL; + skb->len += rx_buf->len; + if (skb_shinfo(skb)->nr_frags == n_frags) + break; + + rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); + } + + skb->data_len = skb->len; + skb->truesize += n_frags * efx->rx_buffer_truesize; + + skb_record_rx_queue(skb, channel->rx_queue.core_index); + + skb_mark_napi_id(skb, &channel->napi_str); + gro_result = napi_gro_frags(napi); + if (gro_result != GRO_DROP) + channel->irq_mod_score += 2; +} + +/* Allocate and construct an SKB around page fragments */ +static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags, + u8 *eh, int hdr_len) +{ + struct efx_nic *efx = channel->efx; + struct sk_buff *skb; + + /* Allocate an SKB to store the headers */ + skb = netdev_alloc_skb(efx->net_dev, + efx->rx_ip_align + efx->rx_prefix_size + + hdr_len); + if (unlikely(skb == NULL)) { + atomic_inc(&efx->n_rx_noskb_drops); + return NULL; + } + + EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len); + + memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size, + efx->rx_prefix_size + hdr_len); + skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size); + __skb_put(skb, hdr_len); + + /* Append the remaining page(s) onto the frag list */ + if (rx_buf->len > hdr_len) { + rx_buf->page_offset += hdr_len; + rx_buf->len -= hdr_len; + + for (;;) { + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + rx_buf->page, rx_buf->page_offset, + rx_buf->len); + rx_buf->page = NULL; + skb->len += rx_buf->len; + skb->data_len += rx_buf->len; + if (skb_shinfo(skb)->nr_frags == n_frags) + break; + + rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); + } + } else { + __free_pages(rx_buf->page, efx->rx_buffer_order); + rx_buf->page = NULL; + n_frags = 0; + } + + skb->truesize += n_frags * efx->rx_buffer_truesize; + + /* Move past the ethernet header */ + skb->protocol = eth_type_trans(skb, efx->net_dev); + + skb_mark_napi_id(skb, &channel->napi_str); + + return skb; +} + +void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, + unsigned int n_frags, unsigned int len, u16 flags) +{ + struct efx_nic *efx = rx_queue->efx; + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + struct efx_rx_buffer *rx_buf; + + rx_queue->rx_packets++; + + rx_buf = efx_rx_buffer(rx_queue, index); + rx_buf->flags |= flags; + + /* Validate the number of fragments and completed length */ + if (n_frags == 1) { + if (!(flags & EFX_RX_PKT_PREFIX_LEN)) + efx_rx_packet__check_len(rx_queue, rx_buf, len); + } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) || + unlikely(len <= (n_frags - 1) * efx->rx_dma_len) || + unlikely(len > n_frags * efx->rx_dma_len) || + unlikely(!efx->rx_scatter)) { + /* If this isn't an explicit discard request, either + * the hardware or the driver is broken. + */ + WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD)); + rx_buf->flags |= EFX_RX_PKT_DISCARD; + } + + netif_vdbg(efx, rx_status, efx->net_dev, + "RX queue %d received ids %x-%x len %d %s%s\n", + efx_rx_queue_index(rx_queue), index, + (index + n_frags - 1) & rx_queue->ptr_mask, len, + (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "", + (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : ""); + + /* Discard packet, if instructed to do so. Process the + * previous receive first. + */ + if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { + efx_rx_flush_packet(channel); + efx_discard_rx_packet(channel, rx_buf, n_frags); + return; + } + + if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN)) + rx_buf->len = len; + + /* Release and/or sync the DMA mapping - assumes all RX buffers + * consumed in-order per RX queue. + */ + efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); + + /* Prefetch nice and early so data will (hopefully) be in cache by + * the time we look at it. + */ + prefetch(efx_rx_buf_va(rx_buf)); + + rx_buf->page_offset += efx->rx_prefix_size; + rx_buf->len -= efx->rx_prefix_size; + + if (n_frags > 1) { + /* Release/sync DMA mapping for additional fragments. + * Fix length for last fragment. + */ + unsigned int tail_frags = n_frags - 1; + + for (;;) { + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + if (--tail_frags == 0) + break; + efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len); + } + rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len; + efx_sync_rx_buffer(efx, rx_buf, rx_buf->len); + } + + /* All fragments have been DMA-synced, so recycle pages. */ + rx_buf = efx_rx_buffer(rx_queue, index); + efx_recycle_rx_pages(channel, rx_buf, n_frags); + + /* Pipeline receives so that we give time for packet headers to be + * prefetched into cache. + */ + efx_rx_flush_packet(channel); + channel->rx_pkt_n_frags = n_frags; + channel->rx_pkt_index = index; +} + +static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, + struct efx_rx_buffer *rx_buf, + unsigned int n_frags) +{ + struct sk_buff *skb; + u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS); + + skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); + if (unlikely(skb == NULL)) { + struct efx_rx_queue *rx_queue; + + rx_queue = efx_channel_get_rx_queue(channel); + efx_free_rx_buffers(rx_queue, rx_buf, n_frags); + return; + } + skb_record_rx_queue(skb, channel->rx_queue.core_index); + + /* Set the SKB flags */ + skb_checksum_none_assert(skb); + if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + efx_rx_skb_attach_timestamp(channel, skb); + + if (channel->type->receive_skb) + if (channel->type->receive_skb(channel, skb)) + return; + + /* Pass the packet up */ + netif_receive_skb(skb); +} + +/* Handle a received packet. Second half: Touches packet payload. */ +void __efx_rx_packet(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + struct efx_rx_buffer *rx_buf = + efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); + u8 *eh = efx_rx_buf_va(rx_buf); + + /* Read length from the prefix if necessary. This already + * excludes the length of the prefix itself. + */ + if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN) + rx_buf->len = le16_to_cpup((__le16 *) + (eh + efx->rx_packet_len_offset)); + + /* If we're in loopback test, then pass the packet directly to the + * loopback layer, and free the rx_buf here + */ + if (unlikely(efx->loopback_selftest)) { + struct efx_rx_queue *rx_queue; + + efx_loopback_rx_packet(efx, eh, rx_buf->len); + rx_queue = efx_channel_get_rx_queue(channel); + efx_free_rx_buffers(rx_queue, rx_buf, + channel->rx_pkt_n_frags); + goto out; + } + + if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) + rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; + + if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb && + !efx_channel_busy_polling(channel)) + efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); + else + efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); +out: + channel->rx_pkt_n_frags = 0; +} + +int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned int entries; + int rc; + + /* Create the smallest power-of-two aligned ring */ + entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); + EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); + rx_queue->ptr_mask = entries - 1; + + netif_dbg(efx, probe, efx->net_dev, + "creating RX queue %d size %#x mask %#x\n", + efx_rx_queue_index(rx_queue), efx->rxq_entries, + rx_queue->ptr_mask); + + /* Allocate RX buffers */ + rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), + GFP_KERNEL); + if (!rx_queue->buffer) + return -ENOMEM; + + rc = efx_nic_probe_rx(rx_queue); + if (rc) { + kfree(rx_queue->buffer); + rx_queue->buffer = NULL; + } + + return rc; +} + +static void efx_init_rx_recycle_ring(struct efx_nic *efx, + struct efx_rx_queue *rx_queue) +{ + unsigned int bufs_in_recycle_ring, page_ring_size; + + /* Set the RX recycle ring size */ +#ifdef CONFIG_PPC64 + bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; +#else + if (iommu_present(&pci_bus_type)) + bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU; + else + bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU; +#endif /* CONFIG_PPC64 */ + + page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring / + efx->rx_bufs_per_page); + rx_queue->page_ring = kcalloc(page_ring_size, + sizeof(*rx_queue->page_ring), GFP_KERNEL); + rx_queue->page_ptr_mask = page_ring_size - 1; +} + +void efx_init_rx_queue(struct efx_rx_queue *rx_queue) +{ + struct efx_nic *efx = rx_queue->efx; + unsigned int max_fill, trigger, max_trigger; + + netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, + "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); + + /* Initialise ptr fields */ + rx_queue->added_count = 0; + rx_queue->notified_count = 0; + rx_queue->removed_count = 0; + rx_queue->min_fill = -1U; + efx_init_rx_recycle_ring(efx, rx_queue); + + rx_queue->page_remove = 0; + rx_queue->page_add = rx_queue->page_ptr_mask + 1; + rx_queue->page_recycle_count = 0; + rx_queue->page_recycle_failed = 0; + rx_queue->page_recycle_full = 0; + + /* Initialise limit fields */ + max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; + max_trigger = + max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page; + if (rx_refill_threshold != 0) { + trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; + if (trigger > max_trigger) + trigger = max_trigger; + } else { + trigger = max_trigger; + } + + rx_queue->max_fill = max_fill; + rx_queue->fast_fill_trigger = trigger; + rx_queue->refill_enabled = true; + + /* Set up RX descriptor ring */ + efx_nic_init_rx(rx_queue); +} + +void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) +{ + int i; + struct efx_nic *efx = rx_queue->efx; + struct efx_rx_buffer *rx_buf; + + netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, + "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); + + del_timer_sync(&rx_queue->slow_fill); + + /* Release RX buffers from the current read ptr to the write ptr */ + if (rx_queue->buffer) { + for (i = rx_queue->removed_count; i < rx_queue->added_count; + i++) { + unsigned index = i & rx_queue->ptr_mask; + rx_buf = efx_rx_buffer(rx_queue, index); + efx_fini_rx_buffer(rx_queue, rx_buf); + } + } + + /* Unmap and release the pages in the recycle ring. Remove the ring. */ + for (i = 0; i <= rx_queue->page_ptr_mask; i++) { + struct page *page = rx_queue->page_ring[i]; + struct efx_rx_page_state *state; + + if (page == NULL) + continue; + + state = page_address(page); + dma_unmap_page(&efx->pci_dev->dev, state->dma_addr, + PAGE_SIZE << efx->rx_buffer_order, + DMA_FROM_DEVICE); + put_page(page); + } + kfree(rx_queue->page_ring); + rx_queue->page_ring = NULL; +} + +void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) +{ + netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, + "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); + + efx_nic_remove_rx(rx_queue); + + kfree(rx_queue->buffer); + rx_queue->buffer = NULL; +} + + +module_param(rx_refill_threshold, uint, 0444); +MODULE_PARM_DESC(rx_refill_threshold, + "RX descriptor ring refill threshold (%)"); + +#ifdef CONFIG_RFS_ACCEL + +int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_channel *channel; + struct efx_filter_spec spec; + const __be16 *ports; + __be16 ether_type; + int nhoff; + int rc; + + /* The core RPS/RFS code has already parsed and validated + * VLAN, IP and transport headers. We assume they are in the + * header area. + */ + + if (skb->protocol == htons(ETH_P_8021Q)) { + const struct vlan_hdr *vh = + (const struct vlan_hdr *)skb->data; + + /* We can't filter on the IP 5-tuple and the vlan + * together, so just strip the vlan header and filter + * on the IP part. + */ + EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh)); + ether_type = vh->h_vlan_encapsulated_proto; + nhoff = sizeof(struct vlan_hdr); + } else { + ether_type = skb->protocol; + nhoff = 0; + } + + if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6)) + return -EPROTONOSUPPORT; + + efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, + efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, + rxq_index); + spec.match_flags = + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | + EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; + spec.ether_type = ether_type; + + if (ether_type == htons(ETH_P_IP)) { + const struct iphdr *ip = + (const struct iphdr *)(skb->data + nhoff); + + EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip)); + if (ip_is_fragment(ip)) + return -EPROTONOSUPPORT; + spec.ip_proto = ip->protocol; + spec.rem_host[0] = ip->saddr; + spec.loc_host[0] = ip->daddr; + EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4); + ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); + } else { + const struct ipv6hdr *ip6 = + (const struct ipv6hdr *)(skb->data + nhoff); + + EFX_BUG_ON_PARANOID(skb_headlen(skb) < + nhoff + sizeof(*ip6) + 4); + spec.ip_proto = ip6->nexthdr; + memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr)); + memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr)); + ports = (const __be16 *)(ip6 + 1); + } + + spec.rem_port = ports[0]; + spec.loc_port = ports[1]; + + rc = efx->type->filter_rfs_insert(efx, &spec); + if (rc < 0) + return rc; + + /* Remember this so we can check whether to expire the filter later */ + efx->rps_flow_id[rc] = flow_id; + channel = efx_get_channel(efx, skb_get_rx_queue(skb)); + ++channel->rfs_filters_added; + + if (ether_type == htons(ETH_P_IP)) + netif_info(efx, rx_status, efx->net_dev, + "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", + (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + spec.rem_host, ntohs(ports[0]), spec.loc_host, + ntohs(ports[1]), rxq_index, flow_id, rc); + else + netif_info(efx, rx_status, efx->net_dev, + "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", + (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + spec.rem_host, ntohs(ports[0]), spec.loc_host, + ntohs(ports[1]), rxq_index, flow_id, rc); + + return rc; +} + +bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) +{ + bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); + unsigned int index, size; + u32 flow_id; + + if (!spin_trylock_bh(&efx->filter_lock)) + return false; + + expire_one = efx->type->filter_rfs_expire_one; + index = efx->rps_expire_index; + size = efx->type->max_rx_ip_filters; + while (quota--) { + flow_id = efx->rps_flow_id[index]; + if (expire_one(efx, flow_id, index)) + netif_info(efx, rx_status, efx->net_dev, + "expired filter %d [flow %u]\n", + index, flow_id); + if (++index == size) + index = 0; + } + efx->rps_expire_index = index; + + spin_unlock_bh(&efx->filter_lock); + return true; +} + +#endif /* CONFIG_RFS_ACCEL */ + +/** + * efx_filter_is_mc_recipient - test whether spec is a multicast recipient + * @spec: Specification to test + * + * Return: %true if the specification is a non-drop RX filter that + * matches a local MAC address I/G bit value of 1 or matches a local + * IPv4 or IPv6 address value in the respective multicast address + * range. Otherwise %false. + */ +bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec) +{ + if (!(spec->flags & EFX_FILTER_FLAG_RX) || + spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) + return false; + + if (spec->match_flags & + (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) && + is_multicast_ether_addr(spec->loc_mac)) + return true; + + if ((spec->match_flags & + (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == + (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { + if (spec->ether_type == htons(ETH_P_IP) && + ipv4_is_multicast(spec->loc_host[0])) + return true; + if (spec->ether_type == htons(ETH_P_IPV6) && + ((const u8 *)spec->loc_host)[0] == 0xff) + return true; + } + + return false; +} diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c new file mode 100644 index 000000000..b605dfd5c --- /dev/null +++ b/drivers/net/ethernet/sfc/selftest.c @@ -0,0 +1,788 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2012 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "nic.h" +#include "selftest.h" +#include "workarounds.h" + +/* IRQ latency can be enormous because: + * - All IRQs may be disabled on a CPU for a *long* time by e.g. a + * slow serial console or an old IDE driver doing error recovery + * - The PREEMPT_RT patches mostly deal with this, but also allow a + * tasklet or normal task to be given higher priority than our IRQ + * threads + * Try to avoid blaming the hardware for this. + */ +#define IRQ_TIMEOUT HZ + +/* + * Loopback test packet structure + * + * The self-test should stress every RSS vector, and unfortunately + * Falcon only performs RSS on TCP/UDP packets. + */ +struct efx_loopback_payload { + struct ethhdr header; + struct iphdr ip; + struct udphdr udp; + __be16 iteration; + char msg[64]; +} __packed; + +/* Loopback test source MAC address */ +static const u8 payload_source[ETH_ALEN] __aligned(2) = { + 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, +}; + +static const char payload_msg[] = + "Hello world! This is an Efx loopback test in progress!"; + +/* Interrupt mode names */ +static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX; +static const char *const efx_interrupt_mode_names[] = { + [EFX_INT_MODE_MSIX] = "MSI-X", + [EFX_INT_MODE_MSI] = "MSI", + [EFX_INT_MODE_LEGACY] = "legacy", +}; +#define INT_MODE(efx) \ + STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode) + +/** + * efx_loopback_state - persistent state during a loopback selftest + * @flush: Drop all packets in efx_loopback_rx_packet + * @packet_count: Number of packets being used in this test + * @skbs: An array of skbs transmitted + * @offload_csum: Checksums are being offloaded + * @rx_good: RX good packet count + * @rx_bad: RX bad packet count + * @payload: Payload used in tests + */ +struct efx_loopback_state { + bool flush; + int packet_count; + struct sk_buff **skbs; + bool offload_csum; + atomic_t rx_good; + atomic_t rx_bad; + struct efx_loopback_payload payload; +}; + +/* How long to wait for all the packets to arrive (in ms) */ +#define LOOPBACK_TIMEOUT_MS 1000 + +/************************************************************************** + * + * MII, NVRAM and register tests + * + **************************************************************************/ + +static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests) +{ + int rc = 0; + + if (efx->phy_op->test_alive) { + rc = efx->phy_op->test_alive(efx); + tests->phy_alive = rc ? -1 : 1; + } + + return rc; +} + +static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) +{ + int rc = 0; + + if (efx->type->test_nvram) { + rc = efx->type->test_nvram(efx); + tests->nvram = rc ? -1 : 1; + } + + return rc; +} + +/************************************************************************** + * + * Interrupt and event queue testing + * + **************************************************************************/ + +/* Test generation and receipt of interrupts */ +static int efx_test_interrupts(struct efx_nic *efx, + struct efx_self_tests *tests) +{ + unsigned long timeout, wait; + int cpu; + + netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); + tests->interrupt = -1; + + efx_nic_irq_test_start(efx); + timeout = jiffies + IRQ_TIMEOUT; + wait = 1; + + /* Wait for arrival of test interrupt. */ + netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); + do { + schedule_timeout_uninterruptible(wait); + cpu = efx_nic_irq_test_irq_cpu(efx); + if (cpu >= 0) + goto success; + wait *= 2; + } while (time_before(jiffies, timeout)); + + netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); + return -ETIMEDOUT; + + success: + netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", + INT_MODE(efx), cpu); + tests->interrupt = 1; + return 0; +} + +/* Test generation and receipt of interrupting events */ +static int efx_test_eventq_irq(struct efx_nic *efx, + struct efx_self_tests *tests) +{ + struct efx_channel *channel; + unsigned int read_ptr[EFX_MAX_CHANNELS]; + unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0; + unsigned long timeout, wait; + + BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG); + + efx_for_each_channel(channel, efx) { + read_ptr[channel->channel] = channel->eventq_read_ptr; + set_bit(channel->channel, &dma_pend); + set_bit(channel->channel, &int_pend); + efx_nic_event_test_start(channel); + } + + timeout = jiffies + IRQ_TIMEOUT; + wait = 1; + + /* Wait for arrival of interrupts. NAPI processing may or may + * not complete in time, but we can cope in any case. + */ + do { + schedule_timeout_uninterruptible(wait); + + efx_for_each_channel(channel, efx) { + efx_stop_eventq(channel); + if (channel->eventq_read_ptr != + read_ptr[channel->channel]) { + set_bit(channel->channel, &napi_ran); + clear_bit(channel->channel, &dma_pend); + clear_bit(channel->channel, &int_pend); + } else { + if (efx_nic_event_present(channel)) + clear_bit(channel->channel, &dma_pend); + if (efx_nic_event_test_irq_cpu(channel) >= 0) + clear_bit(channel->channel, &int_pend); + } + efx_start_eventq(channel); + } + + wait *= 2; + } while ((dma_pend || int_pend) && time_before(jiffies, timeout)); + + efx_for_each_channel(channel, efx) { + bool dma_seen = !test_bit(channel->channel, &dma_pend); + bool int_seen = !test_bit(channel->channel, &int_pend); + + tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1; + tests->eventq_int[channel->channel] = int_seen ? 1 : -1; + + if (dma_seen && int_seen) { + netif_dbg(efx, drv, efx->net_dev, + "channel %d event queue passed (with%s NAPI)\n", + channel->channel, + test_bit(channel->channel, &napi_ran) ? + "" : "out"); + } else { + /* Report failure and whether either interrupt or DMA + * worked + */ + netif_err(efx, drv, efx->net_dev, + "channel %d timed out waiting for event queue\n", + channel->channel); + if (int_seen) + netif_err(efx, drv, efx->net_dev, + "channel %d saw interrupt " + "during event queue test\n", + channel->channel); + if (dma_seen) + netif_err(efx, drv, efx->net_dev, + "channel %d event was generated, but " + "failed to trigger an interrupt\n", + channel->channel); + } + } + + return (dma_pend || int_pend) ? -ETIMEDOUT : 0; +} + +static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests, + unsigned flags) +{ + int rc; + + if (!efx->phy_op->run_tests) + return 0; + + mutex_lock(&efx->mac_lock); + rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags); + mutex_unlock(&efx->mac_lock); + return rc; +} + +/************************************************************************** + * + * Loopback testing + * NB Only one loopback test can be executing concurrently. + * + **************************************************************************/ + +/* Loopback test RX callback + * This is called for each received packet during loopback testing. + */ +void efx_loopback_rx_packet(struct efx_nic *efx, + const char *buf_ptr, int pkt_len) +{ + struct efx_loopback_state *state = efx->loopback_selftest; + struct efx_loopback_payload *received; + struct efx_loopback_payload *payload; + + BUG_ON(!buf_ptr); + + /* If we are just flushing, then drop the packet */ + if ((state == NULL) || state->flush) + return; + + payload = &state->payload; + + received = (struct efx_loopback_payload *) buf_ptr; + received->ip.saddr = payload->ip.saddr; + if (state->offload_csum) + received->ip.check = payload->ip.check; + + /* Check that header exists */ + if (pkt_len < sizeof(received->header)) { + netif_err(efx, drv, efx->net_dev, + "saw runt RX packet (length %d) in %s loopback " + "test\n", pkt_len, LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that the ethernet header exists */ + if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { + netif_err(efx, drv, efx->net_dev, + "saw non-loopback RX packet in %s loopback test\n", + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check packet length */ + if (pkt_len != sizeof(*payload)) { + netif_err(efx, drv, efx->net_dev, + "saw incorrect RX packet length %d (wanted %d) in " + "%s loopback test\n", pkt_len, (int)sizeof(*payload), + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that IP header matches */ + if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { + netif_err(efx, drv, efx->net_dev, + "saw corrupted IP header in %s loopback test\n", + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that msg and padding matches */ + if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { + netif_err(efx, drv, efx->net_dev, + "saw corrupted RX packet in %s loopback test\n", + LOOPBACK_MODE(efx)); + goto err; + } + + /* Check that iteration matches */ + if (received->iteration != payload->iteration) { + netif_err(efx, drv, efx->net_dev, + "saw RX packet from iteration %d (wanted %d) in " + "%s loopback test\n", ntohs(received->iteration), + ntohs(payload->iteration), LOOPBACK_MODE(efx)); + goto err; + } + + /* Increase correct RX count */ + netif_vdbg(efx, drv, efx->net_dev, + "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx)); + + atomic_inc(&state->rx_good); + return; + + err: +#ifdef DEBUG + if (atomic_read(&state->rx_bad) == 0) { + netif_err(efx, drv, efx->net_dev, "received packet:\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, + buf_ptr, pkt_len, 0); + netif_err(efx, drv, efx->net_dev, "expected packet:\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, + &state->payload, sizeof(state->payload), 0); + } +#endif + atomic_inc(&state->rx_bad); +} + +/* Initialise an efx_selftest_state for a new iteration */ +static void efx_iterate_state(struct efx_nic *efx) +{ + struct efx_loopback_state *state = efx->loopback_selftest; + struct net_device *net_dev = efx->net_dev; + struct efx_loopback_payload *payload = &state->payload; + + /* Initialise the layerII header */ + ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr); + ether_addr_copy((u8 *)&payload->header.h_source, payload_source); + payload->header.h_proto = htons(ETH_P_IP); + + /* saddr set later and used as incrementing count */ + payload->ip.daddr = htonl(INADDR_LOOPBACK); + payload->ip.ihl = 5; + payload->ip.check = (__force __sum16) htons(0xdead); + payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); + payload->ip.version = IPVERSION; + payload->ip.protocol = IPPROTO_UDP; + + /* Initialise udp header */ + payload->udp.source = 0; + payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) - + sizeof(struct iphdr)); + payload->udp.check = 0; /* checksum ignored */ + + /* Fill out payload */ + payload->iteration = htons(ntohs(payload->iteration) + 1); + memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); + + /* Fill out remaining state members */ + atomic_set(&state->rx_good, 0); + atomic_set(&state->rx_bad, 0); + smp_wmb(); +} + +static int efx_begin_loopback(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_loopback_state *state = efx->loopback_selftest; + struct efx_loopback_payload *payload; + struct sk_buff *skb; + int i; + netdev_tx_t rc; + + /* Transmit N copies of buffer */ + for (i = 0; i < state->packet_count; i++) { + /* Allocate an skb, holding an extra reference for + * transmit completion counting */ + skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); + if (!skb) + return -ENOMEM; + state->skbs[i] = skb; + skb_get(skb); + + /* Copy the payload in, incrementing the source address to + * exercise the rss vectors */ + payload = ((struct efx_loopback_payload *) + skb_put(skb, sizeof(state->payload))); + memcpy(payload, &state->payload, sizeof(state->payload)); + payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); + + /* Ensure everything we've written is visible to the + * interrupt handler. */ + smp_wmb(); + + netif_tx_lock_bh(efx->net_dev); + rc = efx_enqueue_skb(tx_queue, skb); + netif_tx_unlock_bh(efx->net_dev); + + if (rc != NETDEV_TX_OK) { + netif_err(efx, drv, efx->net_dev, + "TX queue %d could not transmit packet %d of " + "%d in %s loopback test\n", tx_queue->queue, + i + 1, state->packet_count, + LOOPBACK_MODE(efx)); + + /* Defer cleaning up the other skbs for the caller */ + kfree_skb(skb); + return -EPIPE; + } + } + + return 0; +} + +static int efx_poll_loopback(struct efx_nic *efx) +{ + struct efx_loopback_state *state = efx->loopback_selftest; + + return atomic_read(&state->rx_good) == state->packet_count; +} + +static int efx_end_loopback(struct efx_tx_queue *tx_queue, + struct efx_loopback_self_tests *lb_tests) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_loopback_state *state = efx->loopback_selftest; + struct sk_buff *skb; + int tx_done = 0, rx_good, rx_bad; + int i, rc = 0; + + netif_tx_lock_bh(efx->net_dev); + + /* Count the number of tx completions, and decrement the refcnt. Any + * skbs not already completed will be free'd when the queue is flushed */ + for (i = 0; i < state->packet_count; i++) { + skb = state->skbs[i]; + if (skb && !skb_shared(skb)) + ++tx_done; + dev_kfree_skb(skb); + } + + netif_tx_unlock_bh(efx->net_dev); + + /* Check TX completion and received packet counts */ + rx_good = atomic_read(&state->rx_good); + rx_bad = atomic_read(&state->rx_bad); + if (tx_done != state->packet_count) { + /* Don't free the skbs; they will be picked up on TX + * overflow or channel teardown. + */ + netif_err(efx, drv, efx->net_dev, + "TX queue %d saw only %d out of an expected %d " + "TX completion events in %s loopback test\n", + tx_queue->queue, tx_done, state->packet_count, + LOOPBACK_MODE(efx)); + rc = -ETIMEDOUT; + /* Allow to fall through so we see the RX errors as well */ + } + + /* We may always be up to a flush away from our desired packet total */ + if (rx_good != state->packet_count) { + netif_dbg(efx, drv, efx->net_dev, + "TX queue %d saw only %d out of an expected %d " + "received packets in %s loopback test\n", + tx_queue->queue, rx_good, state->packet_count, + LOOPBACK_MODE(efx)); + rc = -ETIMEDOUT; + /* Fall through */ + } + + /* Update loopback test structure */ + lb_tests->tx_sent[tx_queue->queue] += state->packet_count; + lb_tests->tx_done[tx_queue->queue] += tx_done; + lb_tests->rx_good += rx_good; + lb_tests->rx_bad += rx_bad; + + return rc; +} + +static int +efx_test_loopback(struct efx_tx_queue *tx_queue, + struct efx_loopback_self_tests *lb_tests) +{ + struct efx_nic *efx = tx_queue->efx; + struct efx_loopback_state *state = efx->loopback_selftest; + int i, begin_rc, end_rc; + + for (i = 0; i < 3; i++) { + /* Determine how many packets to send */ + state->packet_count = efx->txq_entries / 3; + state->packet_count = min(1 << (i << 2), state->packet_count); + state->skbs = kcalloc(state->packet_count, + sizeof(state->skbs[0]), GFP_KERNEL); + if (!state->skbs) + return -ENOMEM; + state->flush = false; + + netif_dbg(efx, drv, efx->net_dev, + "TX queue %d testing %s loopback with %d packets\n", + tx_queue->queue, LOOPBACK_MODE(efx), + state->packet_count); + + efx_iterate_state(efx); + begin_rc = efx_begin_loopback(tx_queue); + + /* This will normally complete very quickly, but be + * prepared to wait much longer. */ + msleep(1); + if (!efx_poll_loopback(efx)) { + msleep(LOOPBACK_TIMEOUT_MS); + efx_poll_loopback(efx); + } + + end_rc = efx_end_loopback(tx_queue, lb_tests); + kfree(state->skbs); + + if (begin_rc || end_rc) { + /* Wait a while to ensure there are no packets + * floating around after a failure. */ + schedule_timeout_uninterruptible(HZ / 10); + return begin_rc ? begin_rc : end_rc; + } + } + + netif_dbg(efx, drv, efx->net_dev, + "TX queue %d passed %s loopback test with a burst length " + "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), + state->packet_count); + + return 0; +} + +/* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but + * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it + * to delay and retry. Therefore, it's safer to just poll directly. Wait + * for link up and any faults to dissipate. */ +static int efx_wait_for_link(struct efx_nic *efx) +{ + struct efx_link_state *link_state = &efx->link_state; + int count, link_up_count = 0; + bool link_up; + + for (count = 0; count < 40; count++) { + schedule_timeout_uninterruptible(HZ / 10); + + if (efx->type->monitor != NULL) { + mutex_lock(&efx->mac_lock); + efx->type->monitor(efx); + mutex_unlock(&efx->mac_lock); + } + + mutex_lock(&efx->mac_lock); + link_up = link_state->up; + if (link_up) + link_up = !efx->type->check_mac_fault(efx); + mutex_unlock(&efx->mac_lock); + + if (link_up) { + if (++link_up_count == 2) + return 0; + } else { + link_up_count = 0; + } + } + + return -ETIMEDOUT; +} + +static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, + unsigned int loopback_modes) +{ + enum efx_loopback_mode mode; + struct efx_loopback_state *state; + struct efx_channel *channel = + efx_get_channel(efx, efx->tx_channel_offset); + struct efx_tx_queue *tx_queue; + int rc = 0; + + /* Set the port loopback_selftest member. From this point on + * all received packets will be dropped. Mark the state as + * "flushing" so all inflight packets are dropped */ + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state == NULL) + return -ENOMEM; + BUG_ON(efx->loopback_selftest); + state->flush = true; + efx->loopback_selftest = state; + + /* Test all supported loopback modes */ + for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { + if (!(loopback_modes & (1 << mode))) + continue; + + /* Move the port into the specified loopback mode. */ + state->flush = true; + mutex_lock(&efx->mac_lock); + efx->loopback_mode = mode; + rc = __efx_reconfigure_port(efx); + mutex_unlock(&efx->mac_lock); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "unable to move into %s loopback\n", + LOOPBACK_MODE(efx)); + goto out; + } + + rc = efx_wait_for_link(efx); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "loopback %s never came up\n", + LOOPBACK_MODE(efx)); + goto out; + } + + /* Test all enabled types of TX queue */ + efx_for_each_channel_tx_queue(tx_queue, channel) { + state->offload_csum = (tx_queue->queue & + EFX_TXQ_TYPE_OFFLOAD); + rc = efx_test_loopback(tx_queue, + &tests->loopback[mode]); + if (rc) + goto out; + } + } + + out: + /* Remove the flush. The caller will remove the loopback setting */ + state->flush = true; + efx->loopback_selftest = NULL; + wmb(); + kfree(state); + + return rc; +} + +/************************************************************************** + * + * Entry point + * + *************************************************************************/ + +int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, + unsigned flags) +{ + enum efx_loopback_mode loopback_mode = efx->loopback_mode; + int phy_mode = efx->phy_mode; + int rc_test = 0, rc_reset, rc; + + efx_selftest_async_cancel(efx); + + /* Online (i.e. non-disruptive) testing + * This checks interrupt generation, event delivery and PHY presence. */ + + rc = efx_test_phy_alive(efx, tests); + if (rc && !rc_test) + rc_test = rc; + + rc = efx_test_nvram(efx, tests); + if (rc && !rc_test) + rc_test = rc; + + rc = efx_test_interrupts(efx, tests); + if (rc && !rc_test) + rc_test = rc; + + rc = efx_test_eventq_irq(efx, tests); + if (rc && !rc_test) + rc_test = rc; + + if (rc_test) + return rc_test; + + if (!(flags & ETH_TEST_FL_OFFLINE)) + return efx_test_phy(efx, tests, flags); + + /* Offline (i.e. disruptive) testing + * This checks MAC and PHY loopback on the specified port. */ + + /* Detach the device so the kernel doesn't transmit during the + * loopback test and the watchdog timeout doesn't fire. + */ + efx_device_detach_sync(efx); + + if (efx->type->test_chip) { + rc_reset = efx->type->test_chip(efx, tests); + if (rc_reset) { + netif_err(efx, hw, efx->net_dev, + "Unable to recover from chip test\n"); + efx_schedule_reset(efx, RESET_TYPE_DISABLE); + return rc_reset; + } + + if ((tests->memory < 0 || tests->registers < 0) && !rc_test) + rc_test = -EIO; + } + + /* Ensure that the phy is powered and out of loopback + * for the bist and loopback tests */ + mutex_lock(&efx->mac_lock); + efx->phy_mode &= ~PHY_MODE_LOW_POWER; + efx->loopback_mode = LOOPBACK_NONE; + __efx_reconfigure_port(efx); + mutex_unlock(&efx->mac_lock); + + rc = efx_test_phy(efx, tests, flags); + if (rc && !rc_test) + rc_test = rc; + + rc = efx_test_loopbacks(efx, tests, efx->loopback_modes); + if (rc && !rc_test) + rc_test = rc; + + /* restore the PHY to the previous state */ + mutex_lock(&efx->mac_lock); + efx->phy_mode = phy_mode; + efx->loopback_mode = loopback_mode; + __efx_reconfigure_port(efx); + mutex_unlock(&efx->mac_lock); + + netif_device_attach(efx->net_dev); + + return rc_test; +} + +void efx_selftest_async_start(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + efx_nic_event_test_start(channel); + schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT); +} + +void efx_selftest_async_cancel(struct efx_nic *efx) +{ + cancel_delayed_work_sync(&efx->selftest_work); +} + +void efx_selftest_async_work(struct work_struct *data) +{ + struct efx_nic *efx = container_of(data, struct efx_nic, + selftest_work.work); + struct efx_channel *channel; + int cpu; + + efx_for_each_channel(channel, efx) { + cpu = efx_nic_event_test_irq_cpu(channel); + if (cpu < 0) + netif_err(efx, ifup, efx->net_dev, + "channel %d failed to trigger an interrupt\n", + channel->channel); + else + netif_dbg(efx, ifup, efx->net_dev, + "channel %d triggered interrupt on CPU %d\n", + channel->channel, cpu); + } +} diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h new file mode 100644 index 000000000..009dbe88f --- /dev/null +++ b/drivers/net/ethernet/sfc/selftest.h @@ -0,0 +1,55 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2012 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_SELFTEST_H +#define EFX_SELFTEST_H + +#include "net_driver.h" + +/* + * Self tests + */ + +struct efx_loopback_self_tests { + int tx_sent[EFX_TXQ_TYPES]; + int tx_done[EFX_TXQ_TYPES]; + int rx_good; + int rx_bad; +}; + +#define EFX_MAX_PHY_TESTS 20 + +/* Efx self test results + * For fields which are not counters, 1 indicates success and -1 + * indicates failure. + */ +struct efx_self_tests { + /* online tests */ + int phy_alive; + int nvram; + int interrupt; + int eventq_dma[EFX_MAX_CHANNELS]; + int eventq_int[EFX_MAX_CHANNELS]; + /* offline tests */ + int memory; + int registers; + int phy_ext[EFX_MAX_PHY_TESTS]; + struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1]; +}; + +void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr, + int pkt_len); +int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, + unsigned flags); +void efx_selftest_async_start(struct efx_nic *efx); +void efx_selftest_async_cancel(struct efx_nic *efx); +void efx_selftest_async_work(struct work_struct *data); + +#endif /* EFX_SELFTEST_H */ diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c new file mode 100644 index 000000000..f12c81193 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena.c @@ -0,0 +1,1029 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "bitfield.h" +#include "efx.h" +#include "nic.h" +#include "farch_regs.h" +#include "io.h" +#include "phy.h" +#include "workarounds.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "selftest.h" + +/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */ + +static void siena_init_wol(struct efx_nic *efx); + + +static void siena_push_irq_moderation(struct efx_channel *channel) +{ + efx_dword_t timer_cmd; + + if (channel->irq_moderation) + EFX_POPULATE_DWORD_2(timer_cmd, + FRF_CZ_TC_TIMER_MODE, + FFE_CZ_TIMER_MODE_INT_HLDOFF, + FRF_CZ_TC_TIMER_VAL, + channel->irq_moderation - 1); + else + EFX_POPULATE_DWORD_2(timer_cmd, + FRF_CZ_TC_TIMER_MODE, + FFE_CZ_TIMER_MODE_DIS, + FRF_CZ_TC_TIMER_VAL, 0); + efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, + channel->channel); +} + +void siena_prepare_flush(struct efx_nic *efx) +{ + if (efx->fc_disable++ == 0) + efx_mcdi_set_mac(efx); +} + +void siena_finish_flush(struct efx_nic *efx) +{ + if (--efx->fc_disable == 0) + efx_mcdi_set_mac(efx); +} + +static const struct efx_farch_register_test siena_register_tests[] = { + { FR_AZ_ADR_REGION, + EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, + { FR_CZ_USR_EV_CFG, + EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AZ_RX_CFG, + EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) }, + { FR_AZ_TX_CFG, + EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) }, + { FR_AZ_TX_RESERVED, + EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, + { FR_AZ_SRM_TX_DC_CFG, + EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AZ_RX_DC_CFG, + EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) }, + { FR_AZ_RX_DC_PF_WM, + EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, + { FR_BZ_DP_CTRL, + EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, + { FR_BZ_RX_RSS_TKEY, + EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, + { FR_CZ_RX_RSS_IPV6_REG1, + EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, + { FR_CZ_RX_RSS_IPV6_REG2, + EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, + { FR_CZ_RX_RSS_IPV6_REG3, + EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) }, +}; + +static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) +{ + enum reset_type reset_method = RESET_TYPE_ALL; + int rc, rc2; + + efx_reset_down(efx, reset_method); + + /* Reset the chip immediately so that it is completely + * quiescent regardless of what any VF driver does. + */ + rc = efx_mcdi_reset(efx, reset_method); + if (rc) + goto out; + + tests->registers = + efx_farch_test_registers(efx, siena_register_tests, + ARRAY_SIZE(siena_register_tests)) + ? -1 : 1; + + rc = efx_mcdi_reset(efx, reset_method); +out: + rc2 = efx_reset_up(efx, reset_method, rc == 0); + return rc ? rc : rc2; +} + +/************************************************************************** + * + * PTP + * + ************************************************************************** + */ + +static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time) +{ + _efx_writed(efx, cpu_to_le32(host_time), + FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST); +} + +static int siena_ptp_set_ts_config(struct efx_nic *efx, + struct hwtstamp_config *init) +{ + int rc; + + switch (init->rx_filter) { + case HWTSTAMP_FILTER_NONE: + /* if TX timestamping is still requested then leave PTP on */ + return efx_ptp_change_mode(efx, + init->tx_type != HWTSTAMP_TX_OFF, + efx_ptp_get_mode(efx)); + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + return efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V1); + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + rc = efx_ptp_change_mode(efx, true, + MC_CMD_PTP_MODE_V2_ENHANCED); + /* bug 33070 - old versions of the firmware do not support the + * improved UUID filtering option. Similarly old versions of the + * application do not expect it to be enabled. If the firmware + * does not accept the enhanced mode, fall back to the standard + * PTP v2 UUID filtering. */ + if (rc != 0) + rc = efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V2); + return rc; + default: + return -ERANGE; + } +} + +/************************************************************************** + * + * Device reset + * + ************************************************************************** + */ + +static int siena_map_reset_flags(u32 *flags) +{ + enum { + SIENA_RESET_PORT = (ETH_RESET_DMA | ETH_RESET_FILTER | + ETH_RESET_OFFLOAD | ETH_RESET_MAC | + ETH_RESET_PHY), + SIENA_RESET_MC = (SIENA_RESET_PORT | + ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT), + }; + + if ((*flags & SIENA_RESET_MC) == SIENA_RESET_MC) { + *flags &= ~SIENA_RESET_MC; + return RESET_TYPE_WORLD; + } + + if ((*flags & SIENA_RESET_PORT) == SIENA_RESET_PORT) { + *flags &= ~SIENA_RESET_PORT; + return RESET_TYPE_ALL; + } + + /* no invisible reset implemented */ + + return -EINVAL; +} + +#ifdef CONFIG_EEH +/* When a PCI device is isolated from the bus, a subsequent MMIO read is + * required for the kernel EEH mechanisms to notice. As the Solarflare driver + * was written to minimise MMIO read (for latency) then a periodic call to check + * the EEH status of the device is required so that device recovery can happen + * in a timely fashion. + */ +static void siena_monitor(struct efx_nic *efx) +{ + struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev); + + eeh_dev_check_failure(eehdev); +} +#endif + +static int siena_probe_nvconfig(struct efx_nic *efx) +{ + u32 caps = 0; + int rc; + + rc = efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL, &caps); + + efx->timer_quantum_ns = + (caps & (1 << MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN)) ? + 3072 : 6144; /* 768 cycles */ + return rc; +} + +static int siena_dimension_resources(struct efx_nic *efx) +{ + /* Each port has a small block of internal SRAM dedicated to + * the buffer table and descriptor caches. In theory we can + * map both blocks to one port, but we don't. + */ + efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2); + return 0; +} + +static unsigned int siena_mem_map_size(struct efx_nic *efx) +{ + return FR_CZ_MC_TREG_SMEM + + FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS; +} + +static int siena_probe_nic(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data; + efx_oword_t reg; + int rc; + + /* Allocate storage for hardware specific data */ + nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL); + if (!nic_data) + return -ENOMEM; + nic_data->efx = efx; + efx->nic_data = nic_data; + + if (efx_farch_fpga_ver(efx) != 0) { + netif_err(efx, probe, efx->net_dev, + "Siena FPGA not supported\n"); + rc = -ENODEV; + goto fail1; + } + + efx->max_channels = EFX_MAX_CHANNELS; + + efx_reado(efx, ®, FR_AZ_CS_DEBUG); + efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; + + rc = efx_mcdi_init(efx); + if (rc) + goto fail1; + + /* Now we can reset the NIC */ + rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); + if (rc) { + netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); + goto fail3; + } + + siena_init_wol(efx); + + /* Allocate memory for INT_KER */ + rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t), + GFP_KERNEL); + if (rc) + goto fail4; + BUG_ON(efx->irq_status.dma_addr & 0x0f); + + netif_dbg(efx, probe, efx->net_dev, + "INT_KER at %llx (virt %p phys %llx)\n", + (unsigned long long)efx->irq_status.dma_addr, + efx->irq_status.addr, + (unsigned long long)virt_to_phys(efx->irq_status.addr)); + + /* Read in the non-volatile configuration */ + rc = siena_probe_nvconfig(efx); + if (rc == -EINVAL) { + netif_err(efx, probe, efx->net_dev, + "NVRAM is invalid therefore using defaults\n"); + efx->phy_type = PHY_TYPE_NONE; + efx->mdio.prtad = MDIO_PRTAD_NONE; + } else if (rc) { + goto fail5; + } + + rc = efx_mcdi_mon_probe(efx); + if (rc) + goto fail5; + + efx_siena_sriov_probe(efx); + efx_ptp_defer_probe_with_channel(efx); + + return 0; + +fail5: + efx_nic_free_buffer(efx, &efx->irq_status); +fail4: +fail3: + efx_mcdi_fini(efx); +fail1: + kfree(efx->nic_data); + return rc; +} + +static void siena_rx_push_rss_config(struct efx_nic *efx) +{ + efx_oword_t temp; + + /* Set hash key for IPv4 */ + memcpy(&temp, efx->rx_hash_key, sizeof(temp)); + efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); + + /* Enable IPv6 RSS */ + BUILD_BUG_ON(sizeof(efx->rx_hash_key) < + 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 || + FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0); + memcpy(&temp, efx->rx_hash_key, sizeof(temp)); + efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); + memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp)); + efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); + EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1, + FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1); + memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp), + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); + efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); + + efx_farch_rx_push_indir_table(efx); +} + +/* This call performs hardware-specific global initialisation, such as + * defining the descriptor cache sizes and number of RSS channels. + * It does not set up any buffers, descriptor rings or event queues. + */ +static int siena_init_nic(struct efx_nic *efx) +{ + efx_oword_t temp; + int rc; + + /* Recover from a failed assertion post-reset */ + rc = efx_mcdi_handle_assertion(efx); + if (rc) + return rc; + + /* Squash TX of packets of 16 bytes or less */ + efx_reado(efx, &temp, FR_AZ_TX_RESERVED); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); + efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); + + /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 + * descriptors (which is bad). + */ + efx_reado(efx, &temp, FR_AZ_TX_CFG); + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); + EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1); + efx_writeo(efx, &temp, FR_AZ_TX_CFG); + + efx_reado(efx, &temp, FR_AZ_RX_CFG); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1); + /* Enable hash insertion. This is broken for the 'Falcon' hash + * if IPv6 hashing is also enabled, so also select Toeplitz + * TCP/IPv4 and IPv4 hashes. */ + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1); + EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_USR_BUF_SIZE, + EFX_RX_USR_BUF_SIZE >> 5); + efx_writeo(efx, &temp, FR_AZ_RX_CFG); + + siena_rx_push_rss_config(efx); + + /* Enable event logging */ + rc = efx_mcdi_log_ctrl(efx, true, false, 0); + if (rc) + return rc; + + /* Set destination of both TX and RX Flush events */ + EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); + efx_writeo(efx, &temp, FR_BZ_DP_CTRL); + + EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1); + efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG); + + efx_farch_init_common(efx); + return 0; +} + +static void siena_remove_nic(struct efx_nic *efx) +{ + efx_mcdi_mon_remove(efx); + + efx_nic_free_buffer(efx, &efx->irq_status); + + efx_mcdi_reset(efx, RESET_TYPE_ALL); + + efx_mcdi_fini(efx); + + /* Tear down the private nic state */ + kfree(efx->nic_data); + efx->nic_data = NULL; +} + +#define SIENA_DMA_STAT(ext_name, mcdi_name) \ + [SIENA_STAT_ ## ext_name] = \ + { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } +#define SIENA_OTHER_STAT(ext_name) \ + [SIENA_STAT_ ## ext_name] = { #ext_name, 0, 0 } +#define GENERIC_SW_STAT(ext_name) \ + [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } + +static const struct efx_hw_stat_desc siena_stat_desc[SIENA_STAT_COUNT] = { + SIENA_DMA_STAT(tx_bytes, TX_BYTES), + SIENA_OTHER_STAT(tx_good_bytes), + SIENA_DMA_STAT(tx_bad_bytes, TX_BAD_BYTES), + SIENA_DMA_STAT(tx_packets, TX_PKTS), + SIENA_DMA_STAT(tx_bad, TX_BAD_FCS_PKTS), + SIENA_DMA_STAT(tx_pause, TX_PAUSE_PKTS), + SIENA_DMA_STAT(tx_control, TX_CONTROL_PKTS), + SIENA_DMA_STAT(tx_unicast, TX_UNICAST_PKTS), + SIENA_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS), + SIENA_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS), + SIENA_DMA_STAT(tx_lt64, TX_LT64_PKTS), + SIENA_DMA_STAT(tx_64, TX_64_PKTS), + SIENA_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS), + SIENA_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS), + SIENA_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS), + SIENA_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS), + SIENA_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), + SIENA_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), + SIENA_DMA_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS), + SIENA_OTHER_STAT(tx_collision), + SIENA_DMA_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS), + SIENA_DMA_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS), + SIENA_DMA_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS), + SIENA_DMA_STAT(tx_deferred, TX_DEFERRED_PKTS), + SIENA_DMA_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS), + SIENA_DMA_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS), + SIENA_DMA_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS), + SIENA_DMA_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS), + SIENA_DMA_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS), + SIENA_DMA_STAT(rx_bytes, RX_BYTES), + SIENA_OTHER_STAT(rx_good_bytes), + SIENA_DMA_STAT(rx_bad_bytes, RX_BAD_BYTES), + SIENA_DMA_STAT(rx_packets, RX_PKTS), + SIENA_DMA_STAT(rx_good, RX_GOOD_PKTS), + SIENA_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS), + SIENA_DMA_STAT(rx_pause, RX_PAUSE_PKTS), + SIENA_DMA_STAT(rx_control, RX_CONTROL_PKTS), + SIENA_DMA_STAT(rx_unicast, RX_UNICAST_PKTS), + SIENA_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS), + SIENA_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS), + SIENA_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS), + SIENA_DMA_STAT(rx_64, RX_64_PKTS), + SIENA_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS), + SIENA_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS), + SIENA_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS), + SIENA_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS), + SIENA_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), + SIENA_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), + SIENA_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS), + SIENA_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS), + SIENA_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS), + SIENA_DMA_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS), + SIENA_DMA_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS), + SIENA_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS), + SIENA_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS), + SIENA_DMA_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS), + SIENA_DMA_STAT(rx_nodesc_drop_cnt, RX_NODESC_DROPS), + GENERIC_SW_STAT(rx_nodesc_trunc), + GENERIC_SW_STAT(rx_noskb_drops), +}; +static const unsigned long siena_stat_mask[] = { + [0 ... BITS_TO_LONGS(SIENA_STAT_COUNT) - 1] = ~0UL, +}; + +static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 *names) +{ + return efx_nic_describe_stats(siena_stat_desc, SIENA_STAT_COUNT, + siena_stat_mask, names); +} + +static int siena_try_update_nic_stats(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data = efx->nic_data; + u64 *stats = nic_data->stats; + __le64 *dma_stats; + __le64 generation_start, generation_end; + + dma_stats = efx->stats_buffer.addr; + + generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; + if (generation_end == EFX_MC_STATS_GENERATION_INVALID) + return 0; + rmb(); + efx_nic_update_stats(siena_stat_desc, SIENA_STAT_COUNT, siena_stat_mask, + stats, efx->stats_buffer.addr, false); + rmb(); + generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; + if (generation_end != generation_start) + return -EAGAIN; + + /* Update derived statistics */ + efx_nic_fix_nodesc_drop_stat(efx, + &stats[SIENA_STAT_rx_nodesc_drop_cnt]); + efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes], + stats[SIENA_STAT_tx_bytes] - + stats[SIENA_STAT_tx_bad_bytes]); + stats[SIENA_STAT_tx_collision] = + stats[SIENA_STAT_tx_single_collision] + + stats[SIENA_STAT_tx_multiple_collision] + + stats[SIENA_STAT_tx_excessive_collision] + + stats[SIENA_STAT_tx_late_collision]; + efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes], + stats[SIENA_STAT_rx_bytes] - + stats[SIENA_STAT_rx_bad_bytes]); + efx_update_sw_stats(efx, stats); + return 0; +} + +static size_t siena_update_nic_stats(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +{ + struct siena_nic_data *nic_data = efx->nic_data; + u64 *stats = nic_data->stats; + int retry; + + /* If we're unlucky enough to read statistics wduring the DMA, wait + * up to 10ms for it to finish (typically takes <500us) */ + for (retry = 0; retry < 100; ++retry) { + if (siena_try_update_nic_stats(efx) == 0) + break; + udelay(100); + } + + if (full_stats) + memcpy(full_stats, stats, sizeof(u64) * SIENA_STAT_COUNT); + + if (core_stats) { + core_stats->rx_packets = stats[SIENA_STAT_rx_packets]; + core_stats->tx_packets = stats[SIENA_STAT_tx_packets]; + core_stats->rx_bytes = stats[SIENA_STAT_rx_bytes]; + core_stats->tx_bytes = stats[SIENA_STAT_tx_bytes]; + core_stats->rx_dropped = stats[SIENA_STAT_rx_nodesc_drop_cnt] + + stats[GENERIC_STAT_rx_nodesc_trunc] + + stats[GENERIC_STAT_rx_noskb_drops]; + core_stats->multicast = stats[SIENA_STAT_rx_multicast]; + core_stats->collisions = stats[SIENA_STAT_tx_collision]; + core_stats->rx_length_errors = + stats[SIENA_STAT_rx_gtjumbo] + + stats[SIENA_STAT_rx_length_error]; + core_stats->rx_crc_errors = stats[SIENA_STAT_rx_bad]; + core_stats->rx_frame_errors = stats[SIENA_STAT_rx_align_error]; + core_stats->rx_fifo_errors = stats[SIENA_STAT_rx_overflow]; + core_stats->tx_window_errors = + stats[SIENA_STAT_tx_late_collision]; + + core_stats->rx_errors = (core_stats->rx_length_errors + + core_stats->rx_crc_errors + + core_stats->rx_frame_errors + + stats[SIENA_STAT_rx_symbol_error]); + core_stats->tx_errors = (core_stats->tx_window_errors + + stats[SIENA_STAT_tx_bad]); + } + + return SIENA_STAT_COUNT; +} + +static int siena_mac_reconfigure(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MCAST_HASH_IN_LEN); + int rc; + + BUILD_BUG_ON(MC_CMD_SET_MCAST_HASH_IN_LEN != + MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST + + sizeof(efx->multicast_hash)); + + efx_farch_filter_sync_rx_mode(efx); + + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + rc = efx_mcdi_set_mac(efx); + if (rc != 0) + return rc; + + memcpy(MCDI_PTR(inbuf, SET_MCAST_HASH_IN_HASH0), + efx->multicast_hash.byte, sizeof(efx->multicast_hash)); + return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH, + inbuf, sizeof(inbuf), NULL, 0, NULL); +} + +/************************************************************************** + * + * Wake on LAN + * + ************************************************************************** + */ + +static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) +{ + struct siena_nic_data *nic_data = efx->nic_data; + + wol->supported = WAKE_MAGIC; + if (nic_data->wol_filter_id != -1) + wol->wolopts = WAKE_MAGIC; + else + wol->wolopts = 0; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + + +static int siena_set_wol(struct efx_nic *efx, u32 type) +{ + struct siena_nic_data *nic_data = efx->nic_data; + int rc; + + if (type & ~WAKE_MAGIC) + return -EINVAL; + + if (type & WAKE_MAGIC) { + if (nic_data->wol_filter_id != -1) + efx_mcdi_wol_filter_remove(efx, + nic_data->wol_filter_id); + rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr, + &nic_data->wol_filter_id); + if (rc) + goto fail; + + pci_wake_from_d3(efx->pci_dev, true); + } else { + rc = efx_mcdi_wol_filter_reset(efx); + nic_data->wol_filter_id = -1; + pci_wake_from_d3(efx->pci_dev, false); + if (rc) + goto fail; + } + + return 0; + fail: + netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n", + __func__, type, rc); + return rc; +} + + +static void siena_init_wol(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data = efx->nic_data; + int rc; + + rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id); + + if (rc != 0) { + /* If it failed, attempt to get into a synchronised + * state with MC by resetting any set WoL filters */ + efx_mcdi_wol_filter_reset(efx); + nic_data->wol_filter_id = -1; + } else if (nic_data->wol_filter_id != -1) { + pci_wake_from_d3(efx->pci_dev, true); + } +} + +/************************************************************************** + * + * MCDI + * + ************************************************************************** + */ + +#define MCDI_PDU(efx) \ + (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST) +#define MCDI_DOORBELL(efx) \ + (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST) +#define MCDI_STATUS(efx) \ + (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST) + +static void siena_mcdi_request(struct efx_nic *efx, + const efx_dword_t *hdr, size_t hdr_len, + const efx_dword_t *sdu, size_t sdu_len) +{ + unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); + unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); + unsigned int i; + unsigned int inlen_dw = DIV_ROUND_UP(sdu_len, 4); + + EFX_BUG_ON_PARANOID(hdr_len != 4); + + efx_writed(efx, hdr, pdu); + + for (i = 0; i < inlen_dw; i++) + efx_writed(efx, &sdu[i], pdu + hdr_len + 4 * i); + + /* Ensure the request is written out before the doorbell */ + wmb(); + + /* ring the doorbell with a distinctive value */ + _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); +} + +static bool siena_mcdi_poll_response(struct efx_nic *efx) +{ + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); + efx_dword_t hdr; + + efx_readd(efx, &hdr, pdu); + + /* All 1's indicates that shared memory is in reset (and is + * not a valid hdr). Wait for it to come out reset before + * completing the command + */ + return EFX_DWORD_FIELD(hdr, EFX_DWORD_0) != 0xffffffff && + EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); +} + +static void siena_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, + size_t offset, size_t outlen) +{ + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); + unsigned int outlen_dw = DIV_ROUND_UP(outlen, 4); + int i; + + for (i = 0; i < outlen_dw; i++) + efx_readd(efx, &outbuf[i], pdu + offset + 4 * i); +} + +static int siena_mcdi_poll_reboot(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data = efx->nic_data; + unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx); + efx_dword_t reg; + u32 value; + + efx_readd(efx, ®, addr); + value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); + + if (value == 0) + return 0; + + EFX_ZERO_DWORD(reg); + efx_writed(efx, ®, addr); + + /* MAC statistics have been cleared on the NIC; clear the local + * copies that we update with efx_update_diff_stat(). + */ + nic_data->stats[SIENA_STAT_tx_good_bytes] = 0; + nic_data->stats[SIENA_STAT_rx_good_bytes] = 0; + + if (value == MC_STATUS_DWORD_ASSERT) + return -EINTR; + else + return -EIO; +} + +/************************************************************************** + * + * MTD + * + ************************************************************************** + */ + +#ifdef CONFIG_SFC_MTD + +struct siena_nvram_type_info { + int port; + const char *name; +}; + +static const struct siena_nvram_type_info siena_nvram_types[] = { + [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" }, + [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" }, + [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" }, + [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" }, + [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" }, + [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" }, + [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" }, + [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" }, + [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" }, + [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" }, + [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" }, + [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" }, + [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" }, +}; + +static int siena_mtd_probe_partition(struct efx_nic *efx, + struct efx_mcdi_mtd_partition *part, + unsigned int type) +{ + const struct siena_nvram_type_info *info; + size_t size, erase_size; + bool protected; + int rc; + + if (type >= ARRAY_SIZE(siena_nvram_types) || + siena_nvram_types[type].name == NULL) + return -ENODEV; + + info = &siena_nvram_types[type]; + + if (info->port != efx_port_num(efx)) + return -ENODEV; + + rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); + if (rc) + return rc; + if (protected) + return -ENODEV; /* hide it */ + + part->nvram_type = type; + part->common.dev_type_name = "Siena NVRAM manager"; + part->common.type_name = info->name; + + part->common.mtd.type = MTD_NORFLASH; + part->common.mtd.flags = MTD_CAP_NORFLASH; + part->common.mtd.size = size; + part->common.mtd.erasesize = erase_size; + + return 0; +} + +static int siena_mtd_get_fw_subtypes(struct efx_nic *efx, + struct efx_mcdi_mtd_partition *parts, + size_t n_parts) +{ + uint16_t fw_subtype_list[ + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM]; + size_t i; + int rc; + + rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL); + if (rc) + return rc; + + for (i = 0; i < n_parts; i++) + parts[i].fw_subtype = fw_subtype_list[parts[i].nvram_type]; + + return 0; +} + +static int siena_mtd_probe(struct efx_nic *efx) +{ + struct efx_mcdi_mtd_partition *parts; + u32 nvram_types; + unsigned int type; + size_t n_parts; + int rc; + + ASSERT_RTNL(); + + rc = efx_mcdi_nvram_types(efx, &nvram_types); + if (rc) + return rc; + + parts = kcalloc(hweight32(nvram_types), sizeof(*parts), GFP_KERNEL); + if (!parts) + return -ENOMEM; + + type = 0; + n_parts = 0; + + while (nvram_types != 0) { + if (nvram_types & 1) { + rc = siena_mtd_probe_partition(efx, &parts[n_parts], + type); + if (rc == 0) + n_parts++; + else if (rc != -ENODEV) + goto fail; + } + type++; + nvram_types >>= 1; + } + + rc = siena_mtd_get_fw_subtypes(efx, parts, n_parts); + if (rc) + goto fail; + + rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); +fail: + if (rc) + kfree(parts); + return rc; +} + +#endif /* CONFIG_SFC_MTD */ + +/************************************************************************** + * + * Revision-dependent attributes used by efx.c and nic.c + * + ************************************************************************** + */ + +const struct efx_nic_type siena_a0_nic_type = { + .mem_map_size = siena_mem_map_size, + .probe = siena_probe_nic, + .remove = siena_remove_nic, + .init = siena_init_nic, + .dimension_resources = siena_dimension_resources, + .fini = efx_port_dummy_op_void, +#ifdef CONFIG_EEH + .monitor = siena_monitor, +#else + .monitor = NULL, +#endif + .map_reset_reason = efx_mcdi_map_reset_reason, + .map_reset_flags = siena_map_reset_flags, + .reset = efx_mcdi_reset, + .probe_port = efx_mcdi_port_probe, + .remove_port = efx_mcdi_port_remove, + .fini_dmaq = efx_farch_fini_dmaq, + .prepare_flush = siena_prepare_flush, + .finish_flush = siena_finish_flush, + .prepare_flr = efx_port_dummy_op_void, + .finish_flr = efx_farch_finish_flr, + .describe_stats = siena_describe_nic_stats, + .update_stats = siena_update_nic_stats, + .start_stats = efx_mcdi_mac_start_stats, + .pull_stats = efx_mcdi_mac_pull_stats, + .stop_stats = efx_mcdi_mac_stop_stats, + .set_id_led = efx_mcdi_set_id_led, + .push_irq_moderation = siena_push_irq_moderation, + .reconfigure_mac = siena_mac_reconfigure, + .check_mac_fault = efx_mcdi_mac_check_fault, + .reconfigure_port = efx_mcdi_port_reconfigure, + .get_wol = siena_get_wol, + .set_wol = siena_set_wol, + .resume_wol = siena_init_wol, + .test_chip = siena_test_chip, + .test_nvram = efx_mcdi_nvram_test_all, + .mcdi_request = siena_mcdi_request, + .mcdi_poll_response = siena_mcdi_poll_response, + .mcdi_read_response = siena_mcdi_read_response, + .mcdi_poll_reboot = siena_mcdi_poll_reboot, + .irq_enable_master = efx_farch_irq_enable_master, + .irq_test_generate = efx_farch_irq_test_generate, + .irq_disable_non_ev = efx_farch_irq_disable_master, + .irq_handle_msi = efx_farch_msi_interrupt, + .irq_handle_legacy = efx_farch_legacy_interrupt, + .tx_probe = efx_farch_tx_probe, + .tx_init = efx_farch_tx_init, + .tx_remove = efx_farch_tx_remove, + .tx_write = efx_farch_tx_write, + .rx_push_rss_config = siena_rx_push_rss_config, + .rx_probe = efx_farch_rx_probe, + .rx_init = efx_farch_rx_init, + .rx_remove = efx_farch_rx_remove, + .rx_write = efx_farch_rx_write, + .rx_defer_refill = efx_farch_rx_defer_refill, + .ev_probe = efx_farch_ev_probe, + .ev_init = efx_farch_ev_init, + .ev_fini = efx_farch_ev_fini, + .ev_remove = efx_farch_ev_remove, + .ev_process = efx_farch_ev_process, + .ev_read_ack = efx_farch_ev_read_ack, + .ev_test_generate = efx_farch_ev_test_generate, + .filter_table_probe = efx_farch_filter_table_probe, + .filter_table_restore = efx_farch_filter_table_restore, + .filter_table_remove = efx_farch_filter_table_remove, + .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter, + .filter_insert = efx_farch_filter_insert, + .filter_remove_safe = efx_farch_filter_remove_safe, + .filter_get_safe = efx_farch_filter_get_safe, + .filter_clear_rx = efx_farch_filter_clear_rx, + .filter_count_rx_used = efx_farch_filter_count_rx_used, + .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit, + .filter_get_rx_ids = efx_farch_filter_get_rx_ids, +#ifdef CONFIG_RFS_ACCEL + .filter_rfs_insert = efx_farch_filter_rfs_insert, + .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one, +#endif +#ifdef CONFIG_SFC_MTD + .mtd_probe = siena_mtd_probe, + .mtd_rename = efx_mcdi_mtd_rename, + .mtd_read = efx_mcdi_mtd_read, + .mtd_erase = efx_mcdi_mtd_erase, + .mtd_write = efx_mcdi_mtd_write, + .mtd_sync = efx_mcdi_mtd_sync, +#endif + .ptp_write_host_time = siena_ptp_write_host_time, + .ptp_set_ts_config = siena_ptp_set_ts_config, + .sriov_init = efx_siena_sriov_init, + .sriov_fini = efx_siena_sriov_fini, + .sriov_mac_address_changed = efx_siena_sriov_mac_address_changed, + .sriov_wanted = efx_siena_sriov_wanted, + .sriov_reset = efx_siena_sriov_reset, + + .revision = EFX_REV_SIENA_A0, + .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, + .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, + .buf_tbl_base = FR_BZ_BUF_FULL_TBL, + .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, + .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, + .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), + .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE, + .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST, + .rx_buffer_padding = 0, + .can_rx_scatter = true, + .max_interrupt_mode = EFX_INT_MODE_MSIX, + .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH, + .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXHASH | NETIF_F_NTUPLE), + .mcdi_max_ver = 1, + .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS, + .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE | + 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT | + 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC | + 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ | + 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT | + 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC | + 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ), +}; diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c new file mode 100644 index 000000000..fe8343079 --- /dev/null +++ b/drivers/net/ethernet/sfc/siena_sriov.c @@ -0,0 +1,1668 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2010-2012 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "nic.h" +#include "io.h" +#include "mcdi.h" +#include "filter.h" +#include "mcdi_pcol.h" +#include "farch_regs.h" +#include "vfdi.h" + +/* Number of longs required to track all the VIs in a VF */ +#define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX) + +/* Maximum number of RX queues supported */ +#define VF_MAX_RX_QUEUES 63 + +/** + * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour + * @VF_TX_FILTER_OFF: Disabled + * @VF_TX_FILTER_AUTO: Enabled if MAC address assigned to VF and only + * 2 TX queues allowed per VF. + * @VF_TX_FILTER_ON: Enabled + */ +enum efx_vf_tx_filter_mode { + VF_TX_FILTER_OFF, + VF_TX_FILTER_AUTO, + VF_TX_FILTER_ON, +}; + +/** + * struct efx_vf - Back-end resource and protocol state for a PCI VF + * @efx: The Efx NIC owning this VF + * @pci_rid: The PCI requester ID for this VF + * @pci_name: The PCI name (formatted address) of this VF + * @index: Index of VF within its port and PF. + * @req: VFDI incoming request work item. Incoming USR_EV events are received + * by the NAPI handler, but must be handled by executing MCDI requests + * inside a work item. + * @req_addr: VFDI incoming request DMA address (in VF's PCI address space). + * @req_type: Expected next incoming (from VF) %VFDI_EV_TYPE member. + * @req_seqno: Expected next incoming (from VF) %VFDI_EV_SEQ member. + * @msg_seqno: Next %VFDI_EV_SEQ member to reply to VF. Protected by + * @status_lock + * @busy: VFDI request queued to be processed or being processed. Receiving + * a VFDI request when @busy is set is an error condition. + * @buf: Incoming VFDI requests are DMA from the VF into this buffer. + * @buftbl_base: Buffer table entries for this VF start at this index. + * @rx_filtering: Receive filtering has been requested by the VF driver. + * @rx_filter_flags: The flags sent in the %VFDI_OP_INSERT_FILTER request. + * @rx_filter_qid: VF relative qid for RX filter requested by VF. + * @rx_filter_id: Receive MAC filter ID. Only one filter per VF is supported. + * @tx_filter_mode: Transmit MAC filtering mode. + * @tx_filter_id: Transmit MAC filter ID. + * @addr: The MAC address and outer vlan tag of the VF. + * @status_addr: VF DMA address of page for &struct vfdi_status updates. + * @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr, + * @peer_page_addrs and @peer_page_count from simultaneous + * updates by the VM and consumption by + * efx_siena_sriov_update_vf_addr() + * @peer_page_addrs: Pointer to an array of guest pages for local addresses. + * @peer_page_count: Number of entries in @peer_page_count. + * @evq0_addrs: Array of guest pages backing evq0. + * @evq0_count: Number of entries in @evq0_addrs. + * @flush_waitq: wait queue used by %VFDI_OP_FINI_ALL_QUEUES handler + * to wait for flush completions. + * @txq_lock: Mutex for TX queue allocation. + * @txq_mask: Mask of initialized transmit queues. + * @txq_count: Number of initialized transmit queues. + * @rxq_mask: Mask of initialized receive queues. + * @rxq_count: Number of initialized receive queues. + * @rxq_retry_mask: Mask or receive queues that need to be flushed again + * due to flush failure. + * @rxq_retry_count: Number of receive queues in @rxq_retry_mask. + * @reset_work: Work item to schedule a VF reset. + */ +struct efx_vf { + struct efx_nic *efx; + unsigned int pci_rid; + char pci_name[13]; /* dddd:bb:dd.f */ + unsigned int index; + struct work_struct req; + u64 req_addr; + int req_type; + unsigned req_seqno; + unsigned msg_seqno; + bool busy; + struct efx_buffer buf; + unsigned buftbl_base; + bool rx_filtering; + enum efx_filter_flags rx_filter_flags; + unsigned rx_filter_qid; + int rx_filter_id; + enum efx_vf_tx_filter_mode tx_filter_mode; + int tx_filter_id; + struct vfdi_endpoint addr; + u64 status_addr; + struct mutex status_lock; + u64 *peer_page_addrs; + unsigned peer_page_count; + u64 evq0_addrs[EFX_MAX_VF_EVQ_SIZE * sizeof(efx_qword_t) / + EFX_BUF_SIZE]; + unsigned evq0_count; + wait_queue_head_t flush_waitq; + struct mutex txq_lock; + unsigned long txq_mask[VI_MASK_LENGTH]; + unsigned txq_count; + unsigned long rxq_mask[VI_MASK_LENGTH]; + unsigned rxq_count; + unsigned long rxq_retry_mask[VI_MASK_LENGTH]; + atomic_t rxq_retry_count; + struct work_struct reset_work; +}; + +struct efx_memcpy_req { + unsigned int from_rid; + void *from_buf; + u64 from_addr; + unsigned int to_rid; + u64 to_addr; + unsigned length; +}; + +/** + * struct efx_local_addr - A MAC address on the vswitch without a VF. + * + * Siena does not have a switch, so VFs can't transmit data to each + * other. Instead the VFs must be made aware of the local addresses + * on the vswitch, so that they can arrange for an alternative + * software datapath to be used. + * + * @link: List head for insertion into efx->local_addr_list. + * @addr: Ethernet address + */ +struct efx_local_addr { + struct list_head link; + u8 addr[ETH_ALEN]; +}; + +/** + * struct efx_endpoint_page - Page of vfdi_endpoint structures + * + * @link: List head for insertion into efx->local_page_list. + * @ptr: Pointer to page. + * @addr: DMA address of page. + */ +struct efx_endpoint_page { + struct list_head link; + void *ptr; + dma_addr_t addr; +}; + +/* Buffer table entries are reserved txq0,rxq0,evq0,txq1,rxq1,evq1 */ +#define EFX_BUFTBL_TXQ_BASE(_vf, _qid) \ + ((_vf)->buftbl_base + EFX_VF_BUFTBL_PER_VI * (_qid)) +#define EFX_BUFTBL_RXQ_BASE(_vf, _qid) \ + (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \ + (EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE)) +#define EFX_BUFTBL_EVQ_BASE(_vf, _qid) \ + (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \ + (2 * EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE)) + +#define EFX_FIELD_MASK(_field) \ + ((1 << _field ## _WIDTH) - 1) + +/* VFs can only use this many transmit channels */ +static unsigned int vf_max_tx_channels = 2; +module_param(vf_max_tx_channels, uint, 0444); +MODULE_PARM_DESC(vf_max_tx_channels, + "Limit the number of TX channels VFs can use"); + +static int max_vfs = -1; +module_param(max_vfs, int, 0444); +MODULE_PARM_DESC(max_vfs, + "Reduce the number of VFs initialized by the driver"); + +/* Workqueue used by VFDI communication. We can't use the global + * workqueue because it may be running the VF driver's probe() + * routine, which will be blocked there waiting for a VFDI response. + */ +static struct workqueue_struct *vfdi_workqueue; + +static unsigned abs_index(struct efx_vf *vf, unsigned index) +{ + return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index; +} + +static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable, + unsigned *vi_scale_out, unsigned *vf_total_out) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN); + unsigned vi_scale, vf_total; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, SRIOV_IN_ENABLE, enable ? 1 : 0); + MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE); + MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count); + + rc = efx_mcdi_rpc(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN, + outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_SRIOV_OUT_LEN) + return -EIO; + + vf_total = MCDI_DWORD(outbuf, SRIOV_OUT_VF_TOTAL); + vi_scale = MCDI_DWORD(outbuf, SRIOV_OUT_VI_SCALE); + if (vi_scale > EFX_VI_SCALE_MAX) + return -EOPNOTSUPP; + + if (vi_scale_out) + *vi_scale_out = vi_scale; + if (vf_total_out) + *vf_total_out = vf_total; + + return 0; +} + +static void efx_siena_sriov_usrev(struct efx_nic *efx, bool enabled) +{ + struct siena_nic_data *nic_data = efx->nic_data; + efx_oword_t reg; + + EFX_POPULATE_OWORD_2(reg, + FRF_CZ_USREV_DIS, enabled ? 0 : 1, + FRF_CZ_DFLT_EVQ, nic_data->vfdi_channel->channel); + efx_writeo(efx, ®, FR_CZ_USR_EV_CFG); +} + +static int efx_siena_sriov_memcpy(struct efx_nic *efx, + struct efx_memcpy_req *req, + unsigned int count) +{ + MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1); + MCDI_DECLARE_STRUCT_PTR(record); + unsigned int index, used; + u64 from_addr; + u32 from_rid; + int rc; + + mb(); /* Finish writing source/reading dest before DMA starts */ + + if (WARN_ON(count > MC_CMD_MEMCPY_IN_RECORD_MAXNUM)) + return -ENOBUFS; + used = MC_CMD_MEMCPY_IN_LEN(count); + + for (index = 0; index < count; index++) { + record = MCDI_ARRAY_STRUCT_PTR(inbuf, MEMCPY_IN_RECORD, index); + MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_NUM_RECORDS, + count); + MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID, + req->to_rid); + MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR, + req->to_addr); + if (req->from_buf == NULL) { + from_rid = req->from_rid; + from_addr = req->from_addr; + } else { + if (WARN_ON(used + req->length > + MCDI_CTL_SDU_LEN_MAX_V1)) { + rc = -ENOBUFS; + goto out; + } + + from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE; + from_addr = used; + memcpy(_MCDI_PTR(inbuf, used), req->from_buf, + req->length); + used += req->length; + } + + MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid); + MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR, + from_addr); + MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH, + req->length); + + ++req; + } + + rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL); +out: + mb(); /* Don't write source/read dest before DMA is complete */ + + return rc; +} + +/* The TX filter is entirely controlled by this driver, and is modified + * underneath the feet of the VF + */ +static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct efx_filter_spec filter; + u16 vlan; + int rc; + + if (vf->tx_filter_id != -1) { + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + vf->tx_filter_id); + netif_dbg(efx, hw, efx->net_dev, "Removed vf %s tx filter %d\n", + vf->pci_name, vf->tx_filter_id); + vf->tx_filter_id = -1; + } + + if (is_zero_ether_addr(vf->addr.mac_addr)) + return; + + /* Turn on TX filtering automatically if not explicitly + * enabled or disabled. + */ + if (vf->tx_filter_mode == VF_TX_FILTER_AUTO && vf_max_tx_channels <= 2) + vf->tx_filter_mode = VF_TX_FILTER_ON; + + vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK; + efx_filter_init_tx(&filter, abs_index(vf, 0)); + rc = efx_filter_set_eth_local(&filter, + vlan ? vlan : EFX_FILTER_VID_UNSPEC, + vf->addr.mac_addr); + BUG_ON(rc); + + rc = efx_filter_insert_filter(efx, &filter, true); + if (rc < 0) { + netif_warn(efx, hw, efx->net_dev, + "Unable to migrate tx filter for vf %s\n", + vf->pci_name); + } else { + netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s tx filter %d\n", + vf->pci_name, rc); + vf->tx_filter_id = rc; + } +} + +/* The RX filter is managed here on behalf of the VF driver */ +static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct efx_filter_spec filter; + u16 vlan; + int rc; + + if (vf->rx_filter_id != -1) { + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + vf->rx_filter_id); + netif_dbg(efx, hw, efx->net_dev, "Removed vf %s rx filter %d\n", + vf->pci_name, vf->rx_filter_id); + vf->rx_filter_id = -1; + } + + if (!vf->rx_filtering || is_zero_ether_addr(vf->addr.mac_addr)) + return; + + vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK; + efx_filter_init_rx(&filter, EFX_FILTER_PRI_REQUIRED, + vf->rx_filter_flags, + abs_index(vf, vf->rx_filter_qid)); + rc = efx_filter_set_eth_local(&filter, + vlan ? vlan : EFX_FILTER_VID_UNSPEC, + vf->addr.mac_addr); + BUG_ON(rc); + + rc = efx_filter_insert_filter(efx, &filter, true); + if (rc < 0) { + netif_warn(efx, hw, efx->net_dev, + "Unable to insert rx filter for vf %s\n", + vf->pci_name); + } else { + netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s rx filter %d\n", + vf->pci_name, rc); + vf->rx_filter_id = rc; + } +} + +static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; + + efx_siena_sriov_reset_tx_filter(vf); + efx_siena_sriov_reset_rx_filter(vf); + queue_work(vfdi_workqueue, &nic_data->peer_work); +} + +/* Push the peer list to this VF. The caller must hold status_lock to interlock + * with VFDI requests, and they must be serialised against manipulation of + * local_page_list, either by acquiring local_lock or by running from + * efx_siena_sriov_peer_work() + */ +static void __efx_siena_sriov_push_vf_status(struct efx_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; + struct vfdi_status *status = nic_data->vfdi_status.addr; + struct efx_memcpy_req copy[4]; + struct efx_endpoint_page *epp; + unsigned int pos, count; + unsigned data_offset; + efx_qword_t event; + + WARN_ON(!mutex_is_locked(&vf->status_lock)); + WARN_ON(!vf->status_addr); + + status->local = vf->addr; + status->generation_end = ++status->generation_start; + + memset(copy, '\0', sizeof(copy)); + /* Write generation_start */ + copy[0].from_buf = &status->generation_start; + copy[0].to_rid = vf->pci_rid; + copy[0].to_addr = vf->status_addr + offsetof(struct vfdi_status, + generation_start); + copy[0].length = sizeof(status->generation_start); + /* DMA the rest of the structure (excluding the generations). This + * assumes that the non-generation portion of vfdi_status is in + * one chunk starting at the version member. + */ + data_offset = offsetof(struct vfdi_status, version); + copy[1].from_rid = efx->pci_dev->devfn; + copy[1].from_addr = nic_data->vfdi_status.dma_addr + data_offset; + copy[1].to_rid = vf->pci_rid; + copy[1].to_addr = vf->status_addr + data_offset; + copy[1].length = status->length - data_offset; + + /* Copy the peer pages */ + pos = 2; + count = 0; + list_for_each_entry(epp, &nic_data->local_page_list, link) { + if (count == vf->peer_page_count) { + /* The VF driver will know they need to provide more + * pages because peer_addr_count is too large. + */ + break; + } + copy[pos].from_buf = NULL; + copy[pos].from_rid = efx->pci_dev->devfn; + copy[pos].from_addr = epp->addr; + copy[pos].to_rid = vf->pci_rid; + copy[pos].to_addr = vf->peer_page_addrs[count]; + copy[pos].length = EFX_PAGE_SIZE; + + if (++pos == ARRAY_SIZE(copy)) { + efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); + pos = 0; + } + ++count; + } + + /* Write generation_end */ + copy[pos].from_buf = &status->generation_end; + copy[pos].to_rid = vf->pci_rid; + copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status, + generation_end); + copy[pos].length = sizeof(status->generation_end); + efx_siena_sriov_memcpy(efx, copy, pos + 1); + + /* Notify the guest */ + EFX_POPULATE_QWORD_3(event, + FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV, + VFDI_EV_SEQ, (vf->msg_seqno & 0xff), + VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS); + ++vf->msg_seqno; + efx_farch_generate_event(efx, + EFX_VI_BASE + vf->index * efx_vf_size(efx), + &event); +} + +static void efx_siena_sriov_bufs(struct efx_nic *efx, unsigned offset, + u64 *addr, unsigned count) +{ + efx_qword_t buf; + unsigned pos; + + for (pos = 0; pos < count; ++pos) { + EFX_POPULATE_QWORD_3(buf, + FRF_AZ_BUF_ADR_REGION, 0, + FRF_AZ_BUF_ADR_FBUF, + addr ? addr[pos] >> 12 : 0, + FRF_AZ_BUF_OWNER_ID_FBUF, 0); + efx_sram_writeq(efx, efx->membase + FR_BZ_BUF_FULL_TBL, + &buf, offset + pos); + } +} + +static bool bad_vf_index(struct efx_nic *efx, unsigned index) +{ + return index >= efx_vf_size(efx); +} + +static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count) +{ + unsigned max_buf_count = max_entry_count * + sizeof(efx_qword_t) / EFX_BUF_SIZE; + + return ((buf_count & (buf_count - 1)) || buf_count > max_buf_count); +} + +/* Check that VI specified by per-port index belongs to a VF. + * Optionally set VF index and VI index within the VF. + */ +static bool map_vi_index(struct efx_nic *efx, unsigned abs_index, + struct efx_vf **vf_out, unsigned *rel_index_out) +{ + unsigned vf_i; + + if (abs_index < EFX_VI_BASE) + return true; + vf_i = (abs_index - EFX_VI_BASE) / efx_vf_size(efx); + if (vf_i >= efx->vf_init_count) + return true; + + if (vf_out) + *vf_out = efx->vf + vf_i; + if (rel_index_out) + *rel_index_out = abs_index % efx_vf_size(efx); + return false; +} + +static int efx_vfdi_init_evq(struct efx_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct vfdi_req *req = vf->buf.addr; + unsigned vf_evq = req->u.init_evq.index; + unsigned buf_count = req->u.init_evq.buf_count; + unsigned abs_evq = abs_index(vf, vf_evq); + unsigned buftbl = EFX_BUFTBL_EVQ_BASE(vf, vf_evq); + efx_oword_t reg; + + if (bad_vf_index(efx, vf_evq) || + bad_buf_count(buf_count, EFX_MAX_VF_EVQ_SIZE)) { + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Invalid INIT_EVQ from %s: evq %d bufs %d\n", + vf->pci_name, vf_evq, buf_count); + return VFDI_RC_EINVAL; + } + + efx_siena_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count); + + EFX_POPULATE_OWORD_3(reg, + FRF_CZ_TIMER_Q_EN, 1, + FRF_CZ_HOST_NOTIFY_MODE, 0, + FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); + efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, abs_evq); + EFX_POPULATE_OWORD_3(reg, + FRF_AZ_EVQ_EN, 1, + FRF_AZ_EVQ_SIZE, __ffs(buf_count), + FRF_AZ_EVQ_BUF_BASE_ID, buftbl); + efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL, abs_evq); + + if (vf_evq == 0) { + memcpy(vf->evq0_addrs, req->u.init_evq.addr, + buf_count * sizeof(u64)); + vf->evq0_count = buf_count; + } + + return VFDI_RC_SUCCESS; +} + +static int efx_vfdi_init_rxq(struct efx_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct vfdi_req *req = vf->buf.addr; + unsigned vf_rxq = req->u.init_rxq.index; + unsigned vf_evq = req->u.init_rxq.evq; + unsigned buf_count = req->u.init_rxq.buf_count; + unsigned buftbl = EFX_BUFTBL_RXQ_BASE(vf, vf_rxq); + unsigned label; + efx_oword_t reg; + + if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) || + vf_rxq >= VF_MAX_RX_QUEUES || + bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) { + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Invalid INIT_RXQ from %s: rxq %d evq %d " + "buf_count %d\n", vf->pci_name, vf_rxq, + vf_evq, buf_count); + return VFDI_RC_EINVAL; + } + if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask)) + ++vf->rxq_count; + efx_siena_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count); + + label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL); + EFX_POPULATE_OWORD_6(reg, + FRF_AZ_RX_DESCQ_BUF_BASE_ID, buftbl, + FRF_AZ_RX_DESCQ_EVQ_ID, abs_index(vf, vf_evq), + FRF_AZ_RX_DESCQ_LABEL, label, + FRF_AZ_RX_DESCQ_SIZE, __ffs(buf_count), + FRF_AZ_RX_DESCQ_JUMBO, + !!(req->u.init_rxq.flags & + VFDI_RXQ_FLAG_SCATTER_EN), + FRF_AZ_RX_DESCQ_EN, 1); + efx_writeo_table(efx, ®, FR_BZ_RX_DESC_PTR_TBL, + abs_index(vf, vf_rxq)); + + return VFDI_RC_SUCCESS; +} + +static int efx_vfdi_init_txq(struct efx_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct vfdi_req *req = vf->buf.addr; + unsigned vf_txq = req->u.init_txq.index; + unsigned vf_evq = req->u.init_txq.evq; + unsigned buf_count = req->u.init_txq.buf_count; + unsigned buftbl = EFX_BUFTBL_TXQ_BASE(vf, vf_txq); + unsigned label, eth_filt_en; + efx_oword_t reg; + + if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_txq) || + vf_txq >= vf_max_tx_channels || + bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) { + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Invalid INIT_TXQ from %s: txq %d evq %d " + "buf_count %d\n", vf->pci_name, vf_txq, + vf_evq, buf_count); + return VFDI_RC_EINVAL; + } + + mutex_lock(&vf->txq_lock); + if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask)) + ++vf->txq_count; + mutex_unlock(&vf->txq_lock); + efx_siena_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count); + + eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON; + + label = req->u.init_txq.label & EFX_FIELD_MASK(FRF_AZ_TX_DESCQ_LABEL); + EFX_POPULATE_OWORD_8(reg, + FRF_CZ_TX_DPT_Q_MASK_WIDTH, min(efx->vi_scale, 1U), + FRF_CZ_TX_DPT_ETH_FILT_EN, eth_filt_en, + FRF_AZ_TX_DESCQ_EN, 1, + FRF_AZ_TX_DESCQ_BUF_BASE_ID, buftbl, + FRF_AZ_TX_DESCQ_EVQ_ID, abs_index(vf, vf_evq), + FRF_AZ_TX_DESCQ_LABEL, label, + FRF_AZ_TX_DESCQ_SIZE, __ffs(buf_count), + FRF_BZ_TX_NON_IP_DROP_DIS, 1); + efx_writeo_table(efx, ®, FR_BZ_TX_DESC_PTR_TBL, + abs_index(vf, vf_txq)); + + return VFDI_RC_SUCCESS; +} + +/* Returns true when efx_vfdi_fini_all_queues should wake */ +static bool efx_vfdi_flush_wake(struct efx_vf *vf) +{ + /* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */ + smp_mb(); + + return (!vf->txq_count && !vf->rxq_count) || + atomic_read(&vf->rxq_retry_count); +} + +static void efx_vfdi_flush_clear(struct efx_vf *vf) +{ + memset(vf->txq_mask, 0, sizeof(vf->txq_mask)); + vf->txq_count = 0; + memset(vf->rxq_mask, 0, sizeof(vf->rxq_mask)); + vf->rxq_count = 0; + memset(vf->rxq_retry_mask, 0, sizeof(vf->rxq_retry_mask)); + atomic_set(&vf->rxq_retry_count, 0); +} + +static int efx_vfdi_fini_all_queues(struct efx_vf *vf) +{ + struct efx_nic *efx = vf->efx; + efx_oword_t reg; + unsigned count = efx_vf_size(efx); + unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx); + unsigned timeout = HZ; + unsigned index, rxqs_count; + MCDI_DECLARE_BUF(inbuf, MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX); + int rc; + + BUILD_BUG_ON(VF_MAX_RX_QUEUES > + MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); + + rtnl_lock(); + siena_prepare_flush(efx); + rtnl_unlock(); + + /* Flush all the initialized queues */ + rxqs_count = 0; + for (index = 0; index < count; ++index) { + if (test_bit(index, vf->txq_mask)) { + EFX_POPULATE_OWORD_2(reg, + FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, + FRF_AZ_TX_FLUSH_DESCQ, + vf_offset + index); + efx_writeo(efx, ®, FR_AZ_TX_FLUSH_DESCQ); + } + if (test_bit(index, vf->rxq_mask)) { + MCDI_SET_ARRAY_DWORD( + inbuf, FLUSH_RX_QUEUES_IN_QID_OFST, + rxqs_count, vf_offset + index); + rxqs_count++; + } + } + + atomic_set(&vf->rxq_retry_count, 0); + while (timeout && (vf->rxq_count || vf->txq_count)) { + rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf, + MC_CMD_FLUSH_RX_QUEUES_IN_LEN(rxqs_count), + NULL, 0, NULL); + WARN_ON(rc < 0); + + timeout = wait_event_timeout(vf->flush_waitq, + efx_vfdi_flush_wake(vf), + timeout); + rxqs_count = 0; + for (index = 0; index < count; ++index) { + if (test_and_clear_bit(index, vf->rxq_retry_mask)) { + atomic_dec(&vf->rxq_retry_count); + MCDI_SET_ARRAY_DWORD( + inbuf, FLUSH_RX_QUEUES_IN_QID_OFST, + rxqs_count, vf_offset + index); + rxqs_count++; + } + } + } + + rtnl_lock(); + siena_finish_flush(efx); + rtnl_unlock(); + + /* Irrespective of success/failure, fini the queues */ + EFX_ZERO_OWORD(reg); + for (index = 0; index < count; ++index) { + efx_writeo_table(efx, ®, FR_BZ_RX_DESC_PTR_TBL, + vf_offset + index); + efx_writeo_table(efx, ®, FR_BZ_TX_DESC_PTR_TBL, + vf_offset + index); + efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL, + vf_offset + index); + efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, + vf_offset + index); + } + efx_siena_sriov_bufs(efx, vf->buftbl_base, NULL, + EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx)); + efx_vfdi_flush_clear(vf); + + vf->evq0_count = 0; + + return timeout ? 0 : VFDI_RC_ETIMEDOUT; +} + +static int efx_vfdi_insert_filter(struct efx_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; + struct vfdi_req *req = vf->buf.addr; + unsigned vf_rxq = req->u.mac_filter.rxq; + unsigned flags; + + if (bad_vf_index(efx, vf_rxq) || vf->rx_filtering) { + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Invalid INSERT_FILTER from %s: rxq %d " + "flags 0x%x\n", vf->pci_name, vf_rxq, + req->u.mac_filter.flags); + return VFDI_RC_EINVAL; + } + + flags = 0; + if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_RSS) + flags |= EFX_FILTER_FLAG_RX_RSS; + if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_SCATTER) + flags |= EFX_FILTER_FLAG_RX_SCATTER; + vf->rx_filter_flags = flags; + vf->rx_filter_qid = vf_rxq; + vf->rx_filtering = true; + + efx_siena_sriov_reset_rx_filter(vf); + queue_work(vfdi_workqueue, &nic_data->peer_work); + + return VFDI_RC_SUCCESS; +} + +static int efx_vfdi_remove_all_filters(struct efx_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; + + vf->rx_filtering = false; + efx_siena_sriov_reset_rx_filter(vf); + queue_work(vfdi_workqueue, &nic_data->peer_work); + + return VFDI_RC_SUCCESS; +} + +static int efx_vfdi_set_status_page(struct efx_vf *vf) +{ + struct efx_nic *efx = vf->efx; + struct siena_nic_data *nic_data = efx->nic_data; + struct vfdi_req *req = vf->buf.addr; + u64 page_count = req->u.set_status_page.peer_page_count; + u64 max_page_count = + (EFX_PAGE_SIZE - + offsetof(struct vfdi_req, u.set_status_page.peer_page_addr[0])) + / sizeof(req->u.set_status_page.peer_page_addr[0]); + + if (!req->u.set_status_page.dma_addr || page_count > max_page_count) { + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Invalid SET_STATUS_PAGE from %s\n", + vf->pci_name); + return VFDI_RC_EINVAL; + } + + mutex_lock(&nic_data->local_lock); + mutex_lock(&vf->status_lock); + vf->status_addr = req->u.set_status_page.dma_addr; + + kfree(vf->peer_page_addrs); + vf->peer_page_addrs = NULL; + vf->peer_page_count = 0; + + if (page_count) { + vf->peer_page_addrs = kcalloc(page_count, sizeof(u64), + GFP_KERNEL); + if (vf->peer_page_addrs) { + memcpy(vf->peer_page_addrs, + req->u.set_status_page.peer_page_addr, + page_count * sizeof(u64)); + vf->peer_page_count = page_count; + } + } + + __efx_siena_sriov_push_vf_status(vf); + mutex_unlock(&vf->status_lock); + mutex_unlock(&nic_data->local_lock); + + return VFDI_RC_SUCCESS; +} + +static int efx_vfdi_clear_status_page(struct efx_vf *vf) +{ + mutex_lock(&vf->status_lock); + vf->status_addr = 0; + mutex_unlock(&vf->status_lock); + + return VFDI_RC_SUCCESS; +} + +typedef int (*efx_vfdi_op_t)(struct efx_vf *vf); + +static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = { + [VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq, + [VFDI_OP_INIT_TXQ] = efx_vfdi_init_txq, + [VFDI_OP_INIT_RXQ] = efx_vfdi_init_rxq, + [VFDI_OP_FINI_ALL_QUEUES] = efx_vfdi_fini_all_queues, + [VFDI_OP_INSERT_FILTER] = efx_vfdi_insert_filter, + [VFDI_OP_REMOVE_ALL_FILTERS] = efx_vfdi_remove_all_filters, + [VFDI_OP_SET_STATUS_PAGE] = efx_vfdi_set_status_page, + [VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page, +}; + +static void efx_siena_sriov_vfdi(struct work_struct *work) +{ + struct efx_vf *vf = container_of(work, struct efx_vf, req); + struct efx_nic *efx = vf->efx; + struct vfdi_req *req = vf->buf.addr; + struct efx_memcpy_req copy[2]; + int rc; + + /* Copy this page into the local address space */ + memset(copy, '\0', sizeof(copy)); + copy[0].from_rid = vf->pci_rid; + copy[0].from_addr = vf->req_addr; + copy[0].to_rid = efx->pci_dev->devfn; + copy[0].to_addr = vf->buf.dma_addr; + copy[0].length = EFX_PAGE_SIZE; + rc = efx_siena_sriov_memcpy(efx, copy, 1); + if (rc) { + /* If we can't get the request, we can't reply to the caller */ + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Unable to fetch VFDI request from %s rc %d\n", + vf->pci_name, -rc); + vf->busy = false; + return; + } + + if (req->op < VFDI_OP_LIMIT && vfdi_ops[req->op] != NULL) { + rc = vfdi_ops[req->op](vf); + if (rc == 0) { + netif_dbg(efx, hw, efx->net_dev, + "vfdi request %d from %s ok\n", + req->op, vf->pci_name); + } + } else { + netif_dbg(efx, hw, efx->net_dev, + "ERROR: Unrecognised request %d from VF %s addr " + "%llx\n", req->op, vf->pci_name, + (unsigned long long)vf->req_addr); + rc = VFDI_RC_EOPNOTSUPP; + } + + /* Allow subsequent VF requests */ + vf->busy = false; + smp_wmb(); + + /* Respond to the request */ + req->rc = rc; + req->op = VFDI_OP_RESPONSE; + + memset(copy, '\0', sizeof(copy)); + copy[0].from_buf = &req->rc; + copy[0].to_rid = vf->pci_rid; + copy[0].to_addr = vf->req_addr + offsetof(struct vfdi_req, rc); + copy[0].length = sizeof(req->rc); + copy[1].from_buf = &req->op; + copy[1].to_rid = vf->pci_rid; + copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op); + copy[1].length = sizeof(req->op); + + (void)efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy)); +} + + + +/* After a reset the event queues inside the guests no longer exist. Fill the + * event ring in guest memory with VFDI reset events, then (re-initialise) the + * event queue to raise an interrupt. The guest driver will then recover. + */ +static void efx_siena_sriov_reset_vf(struct efx_vf *vf, + struct efx_buffer *buffer) +{ + struct efx_nic *efx = vf->efx; + struct efx_memcpy_req copy_req[4]; + efx_qword_t event; + unsigned int pos, count, k, buftbl, abs_evq; + efx_oword_t reg; + efx_dword_t ptr; + int rc; + + BUG_ON(buffer->len != EFX_PAGE_SIZE); + + if (!vf->evq0_count) + return; + BUG_ON(vf->evq0_count & (vf->evq0_count - 1)); + + mutex_lock(&vf->status_lock); + EFX_POPULATE_QWORD_3(event, + FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV, + VFDI_EV_SEQ, vf->msg_seqno, + VFDI_EV_TYPE, VFDI_EV_TYPE_RESET); + vf->msg_seqno++; + for (pos = 0; pos < EFX_PAGE_SIZE; pos += sizeof(event)) + memcpy(buffer->addr + pos, &event, sizeof(event)); + + for (pos = 0; pos < vf->evq0_count; pos += count) { + count = min_t(unsigned, vf->evq0_count - pos, + ARRAY_SIZE(copy_req)); + for (k = 0; k < count; k++) { + copy_req[k].from_buf = NULL; + copy_req[k].from_rid = efx->pci_dev->devfn; + copy_req[k].from_addr = buffer->dma_addr; + copy_req[k].to_rid = vf->pci_rid; + copy_req[k].to_addr = vf->evq0_addrs[pos + k]; + copy_req[k].length = EFX_PAGE_SIZE; + } + rc = efx_siena_sriov_memcpy(efx, copy_req, count); + if (rc) { + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Unable to notify %s of reset" + ": %d\n", vf->pci_name, -rc); + break; + } + } + + /* Reinitialise, arm and trigger evq0 */ + abs_evq = abs_index(vf, 0); + buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0); + efx_siena_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count); + + EFX_POPULATE_OWORD_3(reg, + FRF_CZ_TIMER_Q_EN, 1, + FRF_CZ_HOST_NOTIFY_MODE, 0, + FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); + efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, abs_evq); + EFX_POPULATE_OWORD_3(reg, + FRF_AZ_EVQ_EN, 1, + FRF_AZ_EVQ_SIZE, __ffs(vf->evq0_count), + FRF_AZ_EVQ_BUF_BASE_ID, buftbl); + efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL, abs_evq); + EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0); + efx_writed(efx, &ptr, FR_BZ_EVQ_RPTR + FR_BZ_EVQ_RPTR_STEP * abs_evq); + + mutex_unlock(&vf->status_lock); +} + +static void efx_siena_sriov_reset_vf_work(struct work_struct *work) +{ + struct efx_vf *vf = container_of(work, struct efx_vf, req); + struct efx_nic *efx = vf->efx; + struct efx_buffer buf; + + if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) { + efx_siena_sriov_reset_vf(vf, &buf); + efx_nic_free_buffer(efx, &buf); + } +} + +static void efx_siena_sriov_handle_no_channel(struct efx_nic *efx) +{ + netif_err(efx, drv, efx->net_dev, + "ERROR: IOV requires MSI-X and 1 additional interrupt" + "vector. IOV disabled\n"); + efx->vf_count = 0; +} + +static int efx_siena_sriov_probe_channel(struct efx_channel *channel) +{ + struct siena_nic_data *nic_data = channel->efx->nic_data; + nic_data->vfdi_channel = channel; + + return 0; +} + +static void +efx_siena_sriov_get_channel_name(struct efx_channel *channel, + char *buf, size_t len) +{ + snprintf(buf, len, "%s-iov", channel->efx->name); +} + +static const struct efx_channel_type efx_siena_sriov_channel_type = { + .handle_no_channel = efx_siena_sriov_handle_no_channel, + .pre_probe = efx_siena_sriov_probe_channel, + .post_remove = efx_channel_dummy_op_void, + .get_name = efx_siena_sriov_get_channel_name, + /* no copy operation; channel must not be reallocated */ + .keep_eventq = true, +}; + +void efx_siena_sriov_probe(struct efx_nic *efx) +{ + unsigned count; + + if (!max_vfs) + return; + + if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) + return; + if (count > 0 && count > max_vfs) + count = max_vfs; + + /* efx_nic_dimension_resources() will reduce vf_count as appopriate */ + efx->vf_count = count; + + efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_siena_sriov_channel_type; +} + +/* Copy the list of individual addresses into the vfdi_status.peers + * array and auxiliary pages, protected by %local_lock. Drop that lock + * and then broadcast the address list to every VF. + */ +static void efx_siena_sriov_peer_work(struct work_struct *data) +{ + struct siena_nic_data *nic_data = container_of(data, + struct siena_nic_data, + peer_work); + struct efx_nic *efx = nic_data->efx; + struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr; + struct efx_vf *vf; + struct efx_local_addr *local_addr; + struct vfdi_endpoint *peer; + struct efx_endpoint_page *epp; + struct list_head pages; + unsigned int peer_space; + unsigned int peer_count; + unsigned int pos; + + mutex_lock(&nic_data->local_lock); + + /* Move the existing peer pages off %local_page_list */ + INIT_LIST_HEAD(&pages); + list_splice_tail_init(&nic_data->local_page_list, &pages); + + /* Populate the VF addresses starting from entry 1 (entry 0 is + * the PF address) + */ + peer = vfdi_status->peers + 1; + peer_space = ARRAY_SIZE(vfdi_status->peers) - 1; + peer_count = 1; + for (pos = 0; pos < efx->vf_count; ++pos) { + vf = efx->vf + pos; + + mutex_lock(&vf->status_lock); + if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) { + *peer++ = vf->addr; + ++peer_count; + --peer_space; + BUG_ON(peer_space == 0); + } + mutex_unlock(&vf->status_lock); + } + + /* Fill the remaining addresses */ + list_for_each_entry(local_addr, &nic_data->local_addr_list, link) { + ether_addr_copy(peer->mac_addr, local_addr->addr); + peer->tci = 0; + ++peer; + ++peer_count; + if (--peer_space == 0) { + if (list_empty(&pages)) { + epp = kmalloc(sizeof(*epp), GFP_KERNEL); + if (!epp) + break; + epp->ptr = dma_alloc_coherent( + &efx->pci_dev->dev, EFX_PAGE_SIZE, + &epp->addr, GFP_KERNEL); + if (!epp->ptr) { + kfree(epp); + break; + } + } else { + epp = list_first_entry( + &pages, struct efx_endpoint_page, link); + list_del(&epp->link); + } + + list_add_tail(&epp->link, &nic_data->local_page_list); + peer = (struct vfdi_endpoint *)epp->ptr; + peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint); + } + } + vfdi_status->peer_count = peer_count; + mutex_unlock(&nic_data->local_lock); + + /* Free any now unused endpoint pages */ + while (!list_empty(&pages)) { + epp = list_first_entry( + &pages, struct efx_endpoint_page, link); + list_del(&epp->link); + dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE, + epp->ptr, epp->addr); + kfree(epp); + } + + /* Finally, push the pages */ + for (pos = 0; pos < efx->vf_count; ++pos) { + vf = efx->vf + pos; + + mutex_lock(&vf->status_lock); + if (vf->status_addr) + __efx_siena_sriov_push_vf_status(vf); + mutex_unlock(&vf->status_lock); + } +} + +static void efx_siena_sriov_free_local(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data = efx->nic_data; + struct efx_local_addr *local_addr; + struct efx_endpoint_page *epp; + + while (!list_empty(&nic_data->local_addr_list)) { + local_addr = list_first_entry(&nic_data->local_addr_list, + struct efx_local_addr, link); + list_del(&local_addr->link); + kfree(local_addr); + } + + while (!list_empty(&nic_data->local_page_list)) { + epp = list_first_entry(&nic_data->local_page_list, + struct efx_endpoint_page, link); + list_del(&epp->link); + dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE, + epp->ptr, epp->addr); + kfree(epp); + } +} + +static int efx_siena_sriov_vf_alloc(struct efx_nic *efx) +{ + unsigned index; + struct efx_vf *vf; + + efx->vf = kzalloc(sizeof(struct efx_vf) * efx->vf_count, GFP_KERNEL); + if (!efx->vf) + return -ENOMEM; + + for (index = 0; index < efx->vf_count; ++index) { + vf = efx->vf + index; + + vf->efx = efx; + vf->index = index; + vf->rx_filter_id = -1; + vf->tx_filter_mode = VF_TX_FILTER_AUTO; + vf->tx_filter_id = -1; + INIT_WORK(&vf->req, efx_siena_sriov_vfdi); + INIT_WORK(&vf->reset_work, efx_siena_sriov_reset_vf_work); + init_waitqueue_head(&vf->flush_waitq); + mutex_init(&vf->status_lock); + mutex_init(&vf->txq_lock); + } + + return 0; +} + +static void efx_siena_sriov_vfs_fini(struct efx_nic *efx) +{ + struct efx_vf *vf; + unsigned int pos; + + for (pos = 0; pos < efx->vf_count; ++pos) { + vf = efx->vf + pos; + + efx_nic_free_buffer(efx, &vf->buf); + kfree(vf->peer_page_addrs); + vf->peer_page_addrs = NULL; + vf->peer_page_count = 0; + + vf->evq0_count = 0; + } +} + +static int efx_siena_sriov_vfs_init(struct efx_nic *efx) +{ + struct pci_dev *pci_dev = efx->pci_dev; + struct siena_nic_data *nic_data = efx->nic_data; + unsigned index, devfn, sriov, buftbl_base; + u16 offset, stride; + struct efx_vf *vf; + int rc; + + sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV); + if (!sriov) + return -ENOENT; + + pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset); + pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride); + + buftbl_base = nic_data->vf_buftbl_base; + devfn = pci_dev->devfn + offset; + for (index = 0; index < efx->vf_count; ++index) { + vf = efx->vf + index; + + /* Reserve buffer entries */ + vf->buftbl_base = buftbl_base; + buftbl_base += EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx); + + vf->pci_rid = devfn; + snprintf(vf->pci_name, sizeof(vf->pci_name), + "%04x:%02x:%02x.%d", + pci_domain_nr(pci_dev->bus), pci_dev->bus->number, + PCI_SLOT(devfn), PCI_FUNC(devfn)); + + rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE, + GFP_KERNEL); + if (rc) + goto fail; + + devfn += stride; + } + + return 0; + +fail: + efx_siena_sriov_vfs_fini(efx); + return rc; +} + +int efx_siena_sriov_init(struct efx_nic *efx) +{ + struct net_device *net_dev = efx->net_dev; + struct siena_nic_data *nic_data = efx->nic_data; + struct vfdi_status *vfdi_status; + int rc; + + /* Ensure there's room for vf_channel */ + BUILD_BUG_ON(EFX_MAX_CHANNELS + 1 >= EFX_VI_BASE); + /* Ensure that VI_BASE is aligned on VI_SCALE */ + BUILD_BUG_ON(EFX_VI_BASE & ((1 << EFX_VI_SCALE_MAX) - 1)); + + if (efx->vf_count == 0) + return 0; + + rc = efx_siena_sriov_cmd(efx, true, NULL, NULL); + if (rc) + goto fail_cmd; + + rc = efx_nic_alloc_buffer(efx, &nic_data->vfdi_status, + sizeof(*vfdi_status), GFP_KERNEL); + if (rc) + goto fail_status; + vfdi_status = nic_data->vfdi_status.addr; + memset(vfdi_status, 0, sizeof(*vfdi_status)); + vfdi_status->version = 1; + vfdi_status->length = sizeof(*vfdi_status); + vfdi_status->max_tx_channels = vf_max_tx_channels; + vfdi_status->vi_scale = efx->vi_scale; + vfdi_status->rss_rxq_count = efx->rss_spread; + vfdi_status->peer_count = 1 + efx->vf_count; + vfdi_status->timer_quantum_ns = efx->timer_quantum_ns; + + rc = efx_siena_sriov_vf_alloc(efx); + if (rc) + goto fail_alloc; + + mutex_init(&nic_data->local_lock); + INIT_WORK(&nic_data->peer_work, efx_siena_sriov_peer_work); + INIT_LIST_HEAD(&nic_data->local_addr_list); + INIT_LIST_HEAD(&nic_data->local_page_list); + + rc = efx_siena_sriov_vfs_init(efx); + if (rc) + goto fail_vfs; + + rtnl_lock(); + ether_addr_copy(vfdi_status->peers[0].mac_addr, net_dev->dev_addr); + efx->vf_init_count = efx->vf_count; + rtnl_unlock(); + + efx_siena_sriov_usrev(efx, true); + + /* At this point we must be ready to accept VFDI requests */ + + rc = pci_enable_sriov(efx->pci_dev, efx->vf_count); + if (rc) + goto fail_pci; + + netif_info(efx, probe, net_dev, + "enabled SR-IOV for %d VFs, %d VI per VF\n", + efx->vf_count, efx_vf_size(efx)); + return 0; + +fail_pci: + efx_siena_sriov_usrev(efx, false); + rtnl_lock(); + efx->vf_init_count = 0; + rtnl_unlock(); + efx_siena_sriov_vfs_fini(efx); +fail_vfs: + cancel_work_sync(&nic_data->peer_work); + efx_siena_sriov_free_local(efx); + kfree(efx->vf); +fail_alloc: + efx_nic_free_buffer(efx, &nic_data->vfdi_status); +fail_status: + efx_siena_sriov_cmd(efx, false, NULL, NULL); +fail_cmd: + return rc; +} + +void efx_siena_sriov_fini(struct efx_nic *efx) +{ + struct efx_vf *vf; + unsigned int pos; + struct siena_nic_data *nic_data = efx->nic_data; + + if (efx->vf_init_count == 0) + return; + + /* Disable all interfaces to reconfiguration */ + BUG_ON(nic_data->vfdi_channel->enabled); + efx_siena_sriov_usrev(efx, false); + rtnl_lock(); + efx->vf_init_count = 0; + rtnl_unlock(); + + /* Flush all reconfiguration work */ + for (pos = 0; pos < efx->vf_count; ++pos) { + vf = efx->vf + pos; + cancel_work_sync(&vf->req); + cancel_work_sync(&vf->reset_work); + } + cancel_work_sync(&nic_data->peer_work); + + pci_disable_sriov(efx->pci_dev); + + /* Tear down back-end state */ + efx_siena_sriov_vfs_fini(efx); + efx_siena_sriov_free_local(efx); + kfree(efx->vf); + efx_nic_free_buffer(efx, &nic_data->vfdi_status); + efx_siena_sriov_cmd(efx, false, NULL, NULL); +} + +void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event) +{ + struct efx_nic *efx = channel->efx; + struct efx_vf *vf; + unsigned qid, seq, type, data; + + qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID); + + /* USR_EV_REG_VALUE is dword0, so access the VFDI_EV fields directly */ + BUILD_BUG_ON(FSF_CZ_USER_EV_REG_VALUE_LBN != 0); + seq = EFX_QWORD_FIELD(*event, VFDI_EV_SEQ); + type = EFX_QWORD_FIELD(*event, VFDI_EV_TYPE); + data = EFX_QWORD_FIELD(*event, VFDI_EV_DATA); + + netif_vdbg(efx, hw, efx->net_dev, + "USR_EV event from qid %d seq 0x%x type %d data 0x%x\n", + qid, seq, type, data); + + if (map_vi_index(efx, qid, &vf, NULL)) + return; + if (vf->busy) + goto error; + + if (type == VFDI_EV_TYPE_REQ_WORD0) { + /* Resynchronise */ + vf->req_type = VFDI_EV_TYPE_REQ_WORD0; + vf->req_seqno = seq + 1; + vf->req_addr = 0; + } else if (seq != (vf->req_seqno++ & 0xff) || type != vf->req_type) + goto error; + + switch (vf->req_type) { + case VFDI_EV_TYPE_REQ_WORD0: + case VFDI_EV_TYPE_REQ_WORD1: + case VFDI_EV_TYPE_REQ_WORD2: + vf->req_addr |= (u64)data << (vf->req_type << 4); + ++vf->req_type; + return; + + case VFDI_EV_TYPE_REQ_WORD3: + vf->req_addr |= (u64)data << 48; + vf->req_type = VFDI_EV_TYPE_REQ_WORD0; + vf->busy = true; + queue_work(vfdi_workqueue, &vf->req); + return; + } + +error: + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "ERROR: Screaming VFDI request from %s\n", + vf->pci_name); + /* Reset the request and sequence number */ + vf->req_type = VFDI_EV_TYPE_REQ_WORD0; + vf->req_seqno = seq + 1; +} + +void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i) +{ + struct efx_vf *vf; + + if (vf_i > efx->vf_init_count) + return; + vf = efx->vf + vf_i; + netif_info(efx, hw, efx->net_dev, + "FLR on VF %s\n", vf->pci_name); + + vf->status_addr = 0; + efx_vfdi_remove_all_filters(vf); + efx_vfdi_flush_clear(vf); + + vf->evq0_count = 0; +} + +void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) +{ + struct siena_nic_data *nic_data = efx->nic_data; + struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr; + + if (!efx->vf_init_count) + return; + ether_addr_copy(vfdi_status->peers[0].mac_addr, + efx->net_dev->dev_addr); + queue_work(vfdi_workqueue, &nic_data->peer_work); +} + +void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) +{ + struct efx_vf *vf; + unsigned queue, qid; + + queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); + if (map_vi_index(efx, queue, &vf, &qid)) + return; + /* Ignore flush completions triggered by an FLR */ + if (!test_bit(qid, vf->txq_mask)) + return; + + __clear_bit(qid, vf->txq_mask); + --vf->txq_count; + + if (efx_vfdi_flush_wake(vf)) + wake_up(&vf->flush_waitq); +} + +void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) +{ + struct efx_vf *vf; + unsigned ev_failed, queue, qid; + + queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); + ev_failed = EFX_QWORD_FIELD(*event, + FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); + if (map_vi_index(efx, queue, &vf, &qid)) + return; + if (!test_bit(qid, vf->rxq_mask)) + return; + + if (ev_failed) { + set_bit(qid, vf->rxq_retry_mask); + atomic_inc(&vf->rxq_retry_count); + } else { + __clear_bit(qid, vf->rxq_mask); + --vf->rxq_count; + } + if (efx_vfdi_flush_wake(vf)) + wake_up(&vf->flush_waitq); +} + +/* Called from napi. Schedule the reset work item */ +void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) +{ + struct efx_vf *vf; + unsigned int rel; + + if (map_vi_index(efx, dmaq, &vf, &rel)) + return; + + if (net_ratelimit()) + netif_err(efx, hw, efx->net_dev, + "VF %d DMA Q %d reports descriptor fetch error.\n", + vf->index, rel); + queue_work(vfdi_workqueue, &vf->reset_work); +} + +/* Reset all VFs */ +void efx_siena_sriov_reset(struct efx_nic *efx) +{ + unsigned int vf_i; + struct efx_buffer buf; + struct efx_vf *vf; + + ASSERT_RTNL(); + + if (efx->vf_init_count == 0) + return; + + efx_siena_sriov_usrev(efx, true); + (void)efx_siena_sriov_cmd(efx, true, NULL, NULL); + + if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) + return; + + for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) { + vf = efx->vf + vf_i; + efx_siena_sriov_reset_vf(vf, &buf); + } + + efx_nic_free_buffer(efx, &buf); +} + +int efx_init_sriov(void) +{ + /* A single threaded workqueue is sufficient. efx_siena_sriov_vfdi() and + * efx_siena_sriov_peer_work() spend almost all their time sleeping for + * MCDI to complete anyway + */ + vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi"); + if (!vfdi_workqueue) + return -ENOMEM; + + return 0; +} + +void efx_fini_sriov(void) +{ + destroy_workqueue(vfdi_workqueue); +} + +int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_vf *vf; + + if (vf_i >= efx->vf_init_count) + return -EINVAL; + vf = efx->vf + vf_i; + + mutex_lock(&vf->status_lock); + ether_addr_copy(vf->addr.mac_addr, mac); + __efx_siena_sriov_update_vf_addr(vf); + mutex_unlock(&vf->status_lock); + + return 0; +} + +int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, + u16 vlan, u8 qos) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_vf *vf; + u16 tci; + + if (vf_i >= efx->vf_init_count) + return -EINVAL; + vf = efx->vf + vf_i; + + mutex_lock(&vf->status_lock); + tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT); + vf->addr.tci = htons(tci); + __efx_siena_sriov_update_vf_addr(vf); + mutex_unlock(&vf->status_lock); + + return 0; +} + +int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, + bool spoofchk) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_vf *vf; + int rc; + + if (vf_i >= efx->vf_init_count) + return -EINVAL; + vf = efx->vf + vf_i; + + mutex_lock(&vf->txq_lock); + if (vf->txq_count == 0) { + vf->tx_filter_mode = + spoofchk ? VF_TX_FILTER_ON : VF_TX_FILTER_OFF; + rc = 0; + } else { + /* This cannot be changed while TX queues are running */ + rc = -EBUSY; + } + mutex_unlock(&vf->txq_lock); + return rc; +} + +int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i, + struct ifla_vf_info *ivi) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_vf *vf; + u16 tci; + + if (vf_i >= efx->vf_init_count) + return -EINVAL; + vf = efx->vf + vf_i; + + ivi->vf = vf_i; + ether_addr_copy(ivi->mac, vf->addr.mac_addr); + ivi->max_tx_rate = 0; + ivi->min_tx_rate = 0; + tci = ntohs(vf->addr.tci); + ivi->vlan = tci & VLAN_VID_MASK; + ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7; + ivi->spoofchk = vf->tx_filter_mode == VF_TX_FILTER_ON; + + return 0; +} + diff --git a/drivers/net/ethernet/sfc/tenxpress.c b/drivers/net/ethernet/sfc/tenxpress.c new file mode 100644 index 000000000..2c90e6b31 --- /dev/null +++ b/drivers/net/ethernet/sfc/tenxpress.c @@ -0,0 +1,494 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2007-2011 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include "efx.h" +#include "mdio_10g.h" +#include "nic.h" +#include "phy.h" +#include "workarounds.h" + +/* We expect these MMDs to be in the package. */ +#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \ + MDIO_DEVS_PCS | \ + MDIO_DEVS_PHYXS | \ + MDIO_DEVS_AN) + +#define SFX7101_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \ + (1 << LOOPBACK_PCS) | \ + (1 << LOOPBACK_PMAPMD) | \ + (1 << LOOPBACK_PHYXS_WS)) + +/* We complain if we fail to see the link partner as 10G capable this many + * times in a row (must be > 1 as sampling the autoneg. registers is racy) + */ +#define MAX_BAD_LP_TRIES (5) + +/* Extended control register */ +#define PMA_PMD_XCONTROL_REG 49152 +#define PMA_PMD_EXT_GMII_EN_LBN 1 +#define PMA_PMD_EXT_GMII_EN_WIDTH 1 +#define PMA_PMD_EXT_CLK_OUT_LBN 2 +#define PMA_PMD_EXT_CLK_OUT_WIDTH 1 +#define PMA_PMD_LNPGA_POWERDOWN_LBN 8 +#define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1 +#define PMA_PMD_EXT_CLK312_WIDTH 1 +#define PMA_PMD_EXT_LPOWER_LBN 12 +#define PMA_PMD_EXT_LPOWER_WIDTH 1 +#define PMA_PMD_EXT_ROBUST_LBN 14 +#define PMA_PMD_EXT_ROBUST_WIDTH 1 +#define PMA_PMD_EXT_SSR_LBN 15 +#define PMA_PMD_EXT_SSR_WIDTH 1 + +/* extended status register */ +#define PMA_PMD_XSTATUS_REG 49153 +#define PMA_PMD_XSTAT_MDIX_LBN 14 +#define PMA_PMD_XSTAT_FLP_LBN (12) + +/* LED control register */ +#define PMA_PMD_LED_CTRL_REG 49159 +#define PMA_PMA_LED_ACTIVITY_LBN (3) + +/* LED function override register */ +#define PMA_PMD_LED_OVERR_REG 49161 +/* Bit positions for different LEDs (there are more but not wired on SFE4001)*/ +#define PMA_PMD_LED_LINK_LBN (0) +#define PMA_PMD_LED_SPEED_LBN (2) +#define PMA_PMD_LED_TX_LBN (4) +#define PMA_PMD_LED_RX_LBN (6) +/* Override settings */ +#define PMA_PMD_LED_AUTO (0) /* H/W control */ +#define PMA_PMD_LED_ON (1) +#define PMA_PMD_LED_OFF (2) +#define PMA_PMD_LED_FLASH (3) +#define PMA_PMD_LED_MASK 3 +/* All LEDs under hardware control */ +/* Green and Amber under hardware control, Red off */ +#define SFX7101_PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) + +#define PMA_PMD_SPEED_ENABLE_REG 49192 +#define PMA_PMD_100TX_ADV_LBN 1 +#define PMA_PMD_100TX_ADV_WIDTH 1 +#define PMA_PMD_1000T_ADV_LBN 2 +#define PMA_PMD_1000T_ADV_WIDTH 1 +#define PMA_PMD_10000T_ADV_LBN 3 +#define PMA_PMD_10000T_ADV_WIDTH 1 +#define PMA_PMD_SPEED_LBN 4 +#define PMA_PMD_SPEED_WIDTH 4 + +/* Misc register defines */ +#define PCS_CLOCK_CTRL_REG 55297 +#define PLL312_RST_N_LBN 2 + +#define PCS_SOFT_RST2_REG 55302 +#define SERDES_RST_N_LBN 13 +#define XGXS_RST_N_LBN 12 + +#define PCS_TEST_SELECT_REG 55303 /* PRM 10.5.8 */ +#define CLK312_EN_LBN 3 + +/* PHYXS registers */ +#define PHYXS_XCONTROL_REG 49152 +#define PHYXS_RESET_LBN 15 +#define PHYXS_RESET_WIDTH 1 + +#define PHYXS_TEST1 (49162) +#define LOOPBACK_NEAR_LBN (8) +#define LOOPBACK_NEAR_WIDTH (1) + +/* Boot status register */ +#define PCS_BOOT_STATUS_REG 53248 +#define PCS_BOOT_FATAL_ERROR_LBN 0 +#define PCS_BOOT_PROGRESS_LBN 1 +#define PCS_BOOT_PROGRESS_WIDTH 2 +#define PCS_BOOT_PROGRESS_INIT 0 +#define PCS_BOOT_PROGRESS_WAIT_MDIO 1 +#define PCS_BOOT_PROGRESS_CHECKSUM 2 +#define PCS_BOOT_PROGRESS_JUMP 3 +#define PCS_BOOT_DOWNLOAD_WAIT_LBN 3 +#define PCS_BOOT_CODE_STARTED_LBN 4 + +/* 100M/1G PHY registers */ +#define GPHY_XCONTROL_REG 49152 +#define GPHY_ISOLATE_LBN 10 +#define GPHY_ISOLATE_WIDTH 1 +#define GPHY_DUPLEX_LBN 8 +#define GPHY_DUPLEX_WIDTH 1 +#define GPHY_LOOPBACK_NEAR_LBN 14 +#define GPHY_LOOPBACK_NEAR_WIDTH 1 + +#define C22EXT_STATUS_REG 49153 +#define C22EXT_STATUS_LINK_LBN 2 +#define C22EXT_STATUS_LINK_WIDTH 1 + +#define C22EXT_MSTSLV_CTRL 49161 +#define C22EXT_MSTSLV_CTRL_ADV_1000_HD_LBN 8 +#define C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN 9 + +#define C22EXT_MSTSLV_STATUS 49162 +#define C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN 10 +#define C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN 11 + +/* Time to wait between powering down the LNPGA and turning off the power + * rails */ +#define LNPGA_PDOWN_WAIT (HZ / 5) + +struct tenxpress_phy_data { + enum efx_loopback_mode loopback_mode; + enum efx_phy_mode phy_mode; + int bad_lp_tries; +}; + +static int tenxpress_init(struct efx_nic *efx) +{ + /* Enable 312.5 MHz clock */ + efx_mdio_write(efx, MDIO_MMD_PCS, PCS_TEST_SELECT_REG, + 1 << CLK312_EN_LBN); + + /* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */ + efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG, + 1 << PMA_PMA_LED_ACTIVITY_LBN, true); + efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, + SFX7101_PMA_PMD_LED_DEFAULT); + + return 0; +} + +static int tenxpress_phy_probe(struct efx_nic *efx) +{ + struct tenxpress_phy_data *phy_data; + + /* Allocate phy private storage */ + phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); + if (!phy_data) + return -ENOMEM; + efx->phy_data = phy_data; + phy_data->phy_mode = efx->phy_mode; + + efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS; + efx->mdio.mode_support = MDIO_SUPPORTS_C45; + + efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS; + + efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg | + ADVERTISED_10000baseT_Full); + + return 0; +} + +static int tenxpress_phy_init(struct efx_nic *efx) +{ + int rc; + + falcon_board(efx)->type->init_phy(efx); + + if (!(efx->phy_mode & PHY_MODE_SPECIAL)) { + rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS); + if (rc < 0) + return rc; + + rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS); + if (rc < 0) + return rc; + } + + rc = tenxpress_init(efx); + if (rc < 0) + return rc; + + /* Reinitialise flow control settings */ + efx_link_set_wanted_fc(efx, efx->wanted_fc); + efx_mdio_an_reconfigure(efx); + + schedule_timeout_uninterruptible(HZ / 5); /* 200ms */ + + /* Let XGXS and SerDes out of reset */ + falcon_reset_xaui(efx); + + return 0; +} + +/* Perform a "special software reset" on the PHY. The caller is + * responsible for saving and restoring the PHY hardware registers + * properly, and masking/unmasking LASI */ +static int tenxpress_special_reset(struct efx_nic *efx) +{ + int rc, reg; + + /* The XGMAC clock is driven from the SFX7101 312MHz clock, so + * a special software reset can glitch the XGMAC sufficiently for stats + * requests to fail. */ + falcon_stop_nic_stats(efx); + + /* Initiate reset */ + reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG); + reg |= (1 << PMA_PMD_EXT_SSR_LBN); + efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg); + + mdelay(200); + + /* Wait for the blocks to come out of reset */ + rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS); + if (rc < 0) + goto out; + + /* Try and reconfigure the device */ + rc = tenxpress_init(efx); + if (rc < 0) + goto out; + + /* Wait for the XGXS state machine to churn */ + mdelay(10); +out: + falcon_start_nic_stats(efx); + return rc; +} + +static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok) +{ + struct tenxpress_phy_data *pd = efx->phy_data; + bool bad_lp; + int reg; + + if (link_ok) { + bad_lp = false; + } else { + /* Check that AN has started but not completed. */ + reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_STAT1); + if (!(reg & MDIO_AN_STAT1_LPABLE)) + return; /* LP status is unknown */ + bad_lp = !(reg & MDIO_AN_STAT1_COMPLETE); + if (bad_lp) + pd->bad_lp_tries++; + } + + /* Nothing to do if all is well and was previously so. */ + if (!pd->bad_lp_tries) + return; + + /* Use the RX (red) LED as an error indicator once we've seen AN + * failure several times in a row, and also log a message. */ + if (!bad_lp || pd->bad_lp_tries == MAX_BAD_LP_TRIES) { + reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, + PMA_PMD_LED_OVERR_REG); + reg &= ~(PMA_PMD_LED_MASK << PMA_PMD_LED_RX_LBN); + if (!bad_lp) { + reg |= PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN; + } else { + reg |= PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN; + netif_err(efx, link, efx->net_dev, + "appears to be plugged into a port" + " that is not 10GBASE-T capable. The PHY" + " supports 10GBASE-T ONLY, so no link can" + " be established\n"); + } + efx_mdio_write(efx, MDIO_MMD_PMAPMD, + PMA_PMD_LED_OVERR_REG, reg); + pd->bad_lp_tries = bad_lp; + } +} + +static bool sfx7101_link_ok(struct efx_nic *efx) +{ + return efx_mdio_links_ok(efx, + MDIO_DEVS_PMAPMD | + MDIO_DEVS_PCS | + MDIO_DEVS_PHYXS); +} + +static void tenxpress_ext_loopback(struct efx_nic *efx) +{ + efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, PHYXS_TEST1, + 1 << LOOPBACK_NEAR_LBN, + efx->loopback_mode == LOOPBACK_PHYXS); +} + +static void tenxpress_low_power(struct efx_nic *efx) +{ + efx_mdio_set_mmds_lpower( + efx, !!(efx->phy_mode & PHY_MODE_LOW_POWER), + TENXPRESS_REQUIRED_DEVS); +} + +static int tenxpress_phy_reconfigure(struct efx_nic *efx) +{ + struct tenxpress_phy_data *phy_data = efx->phy_data; + bool phy_mode_change, loop_reset; + + if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) { + phy_data->phy_mode = efx->phy_mode; + return 0; + } + + phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL && + phy_data->phy_mode != PHY_MODE_NORMAL); + loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, LOOPBACKS_EXTERNAL(efx)) || + LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY)); + + if (loop_reset || phy_mode_change) { + tenxpress_special_reset(efx); + falcon_reset_xaui(efx); + } + + tenxpress_low_power(efx); + efx_mdio_transmit_disable(efx); + efx_mdio_phy_reconfigure(efx); + tenxpress_ext_loopback(efx); + efx_mdio_an_reconfigure(efx); + + phy_data->loopback_mode = efx->loopback_mode; + phy_data->phy_mode = efx->phy_mode; + + return 0; +} + +static void +tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd); + +/* Poll for link state changes */ +static bool tenxpress_phy_poll(struct efx_nic *efx) +{ + struct efx_link_state old_state = efx->link_state; + + efx->link_state.up = sfx7101_link_ok(efx); + efx->link_state.speed = 10000; + efx->link_state.fd = true; + efx->link_state.fc = efx_mdio_get_pause(efx); + + sfx7101_check_bad_lp(efx, efx->link_state.up); + + return !efx_link_state_equal(&efx->link_state, &old_state); +} + +static void sfx7101_phy_fini(struct efx_nic *efx) +{ + int reg; + + /* Power down the LNPGA */ + reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN); + efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg); + + /* Waiting here ensures that the board fini, which can turn + * off the power to the PHY, won't get run until the LNPGA + * powerdown has been given long enough to complete. */ + schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */ +} + +static void tenxpress_phy_remove(struct efx_nic *efx) +{ + kfree(efx->phy_data); + efx->phy_data = NULL; +} + + +/* Override the RX, TX and link LEDs */ +void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) +{ + int reg; + + switch (mode) { + case EFX_LED_OFF: + reg = (PMA_PMD_LED_OFF << PMA_PMD_LED_TX_LBN) | + (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) | + (PMA_PMD_LED_OFF << PMA_PMD_LED_LINK_LBN); + break; + case EFX_LED_ON: + reg = (PMA_PMD_LED_ON << PMA_PMD_LED_TX_LBN) | + (PMA_PMD_LED_ON << PMA_PMD_LED_RX_LBN) | + (PMA_PMD_LED_ON << PMA_PMD_LED_LINK_LBN); + break; + default: + reg = SFX7101_PMA_PMD_LED_DEFAULT; + break; + } + + efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, reg); +} + +static const char *const sfx7101_test_names[] = { + "bist" +}; + +static const char *sfx7101_test_name(struct efx_nic *efx, unsigned int index) +{ + if (index < ARRAY_SIZE(sfx7101_test_names)) + return sfx7101_test_names[index]; + return NULL; +} + +static int +sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags) +{ + int rc; + + if (!(flags & ETH_TEST_FL_OFFLINE)) + return 0; + + /* BIST is automatically run after a special software reset */ + rc = tenxpress_special_reset(efx); + results[0] = rc ? -1 : 1; + + efx_mdio_an_reconfigure(efx); + + return rc; +} + +static void +tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) +{ + u32 adv = 0, lpa = 0; + int reg; + + reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL); + if (reg & MDIO_AN_10GBT_CTRL_ADV10G) + adv |= ADVERTISED_10000baseT_Full; + reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_STAT); + if (reg & MDIO_AN_10GBT_STAT_LP10G) + lpa |= ADVERTISED_10000baseT_Full; + + mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa); + + /* In loopback, the PHY automatically brings up the correct interface, + * but doesn't advertise the correct speed. So override it */ + if (LOOPBACK_EXTERNAL(efx)) + ethtool_cmd_speed_set(ecmd, SPEED_10000); +} + +static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) +{ + if (!ecmd->autoneg) + return -EINVAL; + + return efx_mdio_set_settings(efx, ecmd); +} + +static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising) +{ + efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, + MDIO_AN_10GBT_CTRL_ADV10G, + advertising & ADVERTISED_10000baseT_Full); +} + +const struct efx_phy_operations falcon_sfx7101_phy_ops = { + .probe = tenxpress_phy_probe, + .init = tenxpress_phy_init, + .reconfigure = tenxpress_phy_reconfigure, + .poll = tenxpress_phy_poll, + .fini = sfx7101_phy_fini, + .remove = tenxpress_phy_remove, + .get_settings = tenxpress_get_settings, + .set_settings = tenxpress_set_settings, + .set_npage_adv = sfx7101_set_npage_adv, + .test_alive = efx_mdio_test_alive, + .test_name = sfx7101_test_name, + .run_tests = sfx7101_run_tests, +}; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c new file mode 100644 index 000000000..aaf298751 --- /dev/null +++ b/drivers/net/ethernet/sfc/tx.c @@ -0,0 +1,1332 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2005-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "io.h" +#include "nic.h" +#include "workarounds.h" +#include "ef10_regs.h" + +#ifdef EFX_USE_PIO + +#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE +#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES) +unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF; + +#endif /* EFX_USE_PIO */ + +static inline unsigned int +efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue) +{ + return tx_queue->insert_count & tx_queue->ptr_mask; +} + +static inline struct efx_tx_buffer * +__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue) +{ + return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)]; +} + +static inline struct efx_tx_buffer * +efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue) +{ + struct efx_tx_buffer *buffer = + __efx_tx_queue_get_insert_buffer(tx_queue); + + EFX_BUG_ON_PARANOID(buffer->len); + EFX_BUG_ON_PARANOID(buffer->flags); + EFX_BUG_ON_PARANOID(buffer->unmap_len); + + return buffer; +} + +static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer, + unsigned int *pkts_compl, + unsigned int *bytes_compl) +{ + if (buffer->unmap_len) { + struct device *dma_dev = &tx_queue->efx->pci_dev->dev; + dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset; + if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) + dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, + DMA_TO_DEVICE); + else + dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, + DMA_TO_DEVICE); + buffer->unmap_len = 0; + } + + if (buffer->flags & EFX_TX_BUF_SKB) { + (*pkts_compl)++; + (*bytes_compl) += buffer->skb->len; + dev_consume_skb_any((struct sk_buff *)buffer->skb); + netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, + "TX queue %d transmission id %x complete\n", + tx_queue->queue, tx_queue->read_count); + } else if (buffer->flags & EFX_TX_BUF_HEAP) { + kfree(buffer->heap_buf); + } + + buffer->len = 0; + buffer->flags = 0; +} + +static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, + struct sk_buff *skb); + +static inline unsigned +efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) +{ + /* Depending on the NIC revision, we can use descriptor + * lengths up to 8K or 8K-1. However, since PCI Express + * devices must split read requests at 4K boundaries, there is + * little benefit from using descriptors that cross those + * boundaries and we keep things simple by not doing so. + */ + unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1; + + /* Work around hardware bug for unaligned buffers. */ + if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) + len = min_t(unsigned, len, 512 - (dma_addr & 0xf)); + + return len; +} + +unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) +{ + /* Header and payload descriptor for each output segment, plus + * one for every input fragment boundary within a segment + */ + unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; + + /* Possibly one more per segment for the alignment workaround, + * or for option descriptors + */ + if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0) + max_descs += EFX_TSO_MAX_SEGS; + + /* Possibly more for PCIe page boundaries within input fragments */ + if (PAGE_SIZE > EFX_PAGE_SIZE) + max_descs += max_t(unsigned int, MAX_SKB_FRAGS, + DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); + + return max_descs; +} + +static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) +{ + /* We need to consider both queues that the net core sees as one */ + struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1); + struct efx_nic *efx = txq1->efx; + unsigned int fill_level; + + fill_level = max(txq1->insert_count - txq1->old_read_count, + txq2->insert_count - txq2->old_read_count); + if (likely(fill_level < efx->txq_stop_thresh)) + return; + + /* We used the stale old_read_count above, which gives us a + * pessimistic estimate of the fill level (which may even + * validly be >= efx->txq_entries). Now try again using + * read_count (more likely to be a cache miss). + * + * If we read read_count and then conditionally stop the + * queue, it is possible for the completion path to race with + * us and complete all outstanding descriptors in the middle, + * after which there will be no more completions to wake it. + * Therefore we stop the queue first, then read read_count + * (with a memory barrier to ensure the ordering), then + * restart the queue if the fill level turns out to be low + * enough. + */ + netif_tx_stop_queue(txq1->core_txq); + smp_mb(); + txq1->old_read_count = ACCESS_ONCE(txq1->read_count); + txq2->old_read_count = ACCESS_ONCE(txq2->read_count); + + fill_level = max(txq1->insert_count - txq1->old_read_count, + txq2->insert_count - txq2->old_read_count); + EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries); + if (likely(fill_level < efx->txq_stop_thresh)) { + smp_mb(); + if (likely(!efx->loopback_selftest)) + netif_tx_start_queue(txq1->core_txq); + } +} + +#ifdef EFX_USE_PIO + +struct efx_short_copy_buffer { + int used; + u8 buf[L1_CACHE_BYTES]; +}; + +/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned. + * Advances piobuf pointer. Leaves additional data in the copy buffer. + */ +static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf, + u8 *data, int len, + struct efx_short_copy_buffer *copy_buf) +{ + int block_len = len & ~(sizeof(copy_buf->buf) - 1); + + __iowrite64_copy(*piobuf, data, block_len >> 3); + *piobuf += block_len; + len -= block_len; + + if (len) { + data += block_len; + BUG_ON(copy_buf->used); + BUG_ON(len > sizeof(copy_buf->buf)); + memcpy(copy_buf->buf, data, len); + copy_buf->used = len; + } +} + +/* Copy to PIO, respecting dword alignment, popping data from copy buffer first. + * Advances piobuf pointer. Leaves additional data in the copy buffer. + */ +static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf, + u8 *data, int len, + struct efx_short_copy_buffer *copy_buf) +{ + if (copy_buf->used) { + /* if the copy buffer is partially full, fill it up and write */ + int copy_to_buf = + min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len); + + memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf); + copy_buf->used += copy_to_buf; + + /* if we didn't fill it up then we're done for now */ + if (copy_buf->used < sizeof(copy_buf->buf)) + return; + + __iowrite64_copy(*piobuf, copy_buf->buf, + sizeof(copy_buf->buf) >> 3); + *piobuf += sizeof(copy_buf->buf); + data += copy_to_buf; + len -= copy_to_buf; + copy_buf->used = 0; + } + + efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf); +} + +static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf, + struct efx_short_copy_buffer *copy_buf) +{ + /* if there's anything in it, write the whole buffer, including junk */ + if (copy_buf->used) + __iowrite64_copy(piobuf, copy_buf->buf, + sizeof(copy_buf->buf) >> 3); +} + +/* Traverse skb structure and copy fragments in to PIO buffer. + * Advances piobuf pointer. + */ +static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb, + u8 __iomem **piobuf, + struct efx_short_copy_buffer *copy_buf) +{ + int i; + + efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb), + copy_buf); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + u8 *vaddr; + + vaddr = kmap_atomic(skb_frag_page(f)); + + efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset, + skb_frag_size(f), copy_buf); + kunmap_atomic(vaddr); + } + + EFX_BUG_ON_PARANOID(skb_shinfo(skb)->frag_list); +} + +static struct efx_tx_buffer * +efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb) +{ + struct efx_tx_buffer *buffer = + efx_tx_queue_get_insert_buffer(tx_queue); + u8 __iomem *piobuf = tx_queue->piobuf; + + /* Copy to PIO buffer. Ensure the writes are padded to the end + * of a cache line, as this is required for write-combining to be + * effective on at least x86. + */ + + if (skb_shinfo(skb)->nr_frags) { + /* The size of the copy buffer will ensure all writes + * are the size of a cache line. + */ + struct efx_short_copy_buffer copy_buf; + + copy_buf.used = 0; + + efx_skb_copy_bits_to_pio(tx_queue->efx, skb, + &piobuf, ©_buf); + efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf); + } else { + /* Pad the write to the size of a cache line. + * We can do this because we know the skb_shared_info sruct is + * after the source, and the destination buffer is big enough. + */ + BUILD_BUG_ON(L1_CACHE_BYTES > + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); + __iowrite64_copy(tx_queue->piobuf, skb->data, + ALIGN(skb->len, L1_CACHE_BYTES) >> 3); + } + + EFX_POPULATE_QWORD_5(buffer->option, + ESF_DZ_TX_DESC_IS_OPT, 1, + ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO, + ESF_DZ_TX_PIO_CONT, 0, + ESF_DZ_TX_PIO_BYTE_CNT, skb->len, + ESF_DZ_TX_PIO_BUF_ADDR, + tx_queue->piobuf_offset); + ++tx_queue->pio_packets; + ++tx_queue->insert_count; + return buffer; +} +#endif /* EFX_USE_PIO */ + +/* + * Add a socket buffer to a TX queue + * + * This maps all fragments of a socket buffer for DMA and adds them to + * the TX queue. The queue's insert pointer will be incremented by + * the number of fragments in the socket buffer. + * + * If any DMA mapping fails, any mapped fragments will be unmapped, + * the queue's insert pointer will be restored to its original value. + * + * This function is split out from efx_hard_start_xmit to allow the + * loopback test to direct packets via specific TX queues. + * + * Returns NETDEV_TX_OK. + * You must hold netif_tx_lock() to call this function. + */ +netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) +{ + struct efx_nic *efx = tx_queue->efx; + struct device *dma_dev = &efx->pci_dev->dev; + struct efx_tx_buffer *buffer; + unsigned int old_insert_count = tx_queue->insert_count; + skb_frag_t *fragment; + unsigned int len, unmap_len = 0; + dma_addr_t dma_addr, unmap_addr = 0; + unsigned int dma_len; + unsigned short dma_flags; + int i = 0; + + if (skb_shinfo(skb)->gso_size) + return efx_enqueue_skb_tso(tx_queue, skb); + + /* Get size of the initial fragment */ + len = skb_headlen(skb); + + /* Pad if necessary */ + if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) { + EFX_BUG_ON_PARANOID(skb->data_len); + len = 32 + 1; + if (skb_pad(skb, len - skb->len)) + return NETDEV_TX_OK; + } + + /* Consider using PIO for short packets */ +#ifdef EFX_USE_PIO + if (skb->len <= efx_piobuf_size && !skb->xmit_more && + efx_nic_may_tx_pio(tx_queue)) { + buffer = efx_enqueue_skb_pio(tx_queue, skb); + dma_flags = EFX_TX_BUF_OPTION; + goto finish_packet; + } +#endif + + /* Map for DMA. Use dma_map_single rather than dma_map_page + * since this is more efficient on machines with sparse + * memory. + */ + dma_flags = EFX_TX_BUF_MAP_SINGLE; + dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE); + + /* Process all fragments */ + while (1) { + if (unlikely(dma_mapping_error(dma_dev, dma_addr))) + goto dma_err; + + /* Store fields for marking in the per-fragment final + * descriptor */ + unmap_len = len; + unmap_addr = dma_addr; + + /* Add to TX queue, splitting across DMA boundaries */ + do { + buffer = efx_tx_queue_get_insert_buffer(tx_queue); + + dma_len = efx_max_tx_len(efx, dma_addr); + if (likely(dma_len >= len)) + dma_len = len; + + /* Fill out per descriptor fields */ + buffer->len = dma_len; + buffer->dma_addr = dma_addr; + buffer->flags = EFX_TX_BUF_CONT; + len -= dma_len; + dma_addr += dma_len; + ++tx_queue->insert_count; + } while (len); + + /* Transfer ownership of the unmapping to the final buffer */ + buffer->flags = EFX_TX_BUF_CONT | dma_flags; + buffer->unmap_len = unmap_len; + buffer->dma_offset = buffer->dma_addr - unmap_addr; + unmap_len = 0; + + /* Get address and size of next fragment */ + if (i >= skb_shinfo(skb)->nr_frags) + break; + fragment = &skb_shinfo(skb)->frags[i]; + len = skb_frag_size(fragment); + i++; + /* Map for DMA */ + dma_flags = 0; + dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, + DMA_TO_DEVICE); + } + + /* Transfer ownership of the skb to the final buffer */ +#ifdef EFX_USE_PIO +finish_packet: +#endif + buffer->skb = skb; + buffer->flags = EFX_TX_BUF_SKB | dma_flags; + + netdev_tx_sent_queue(tx_queue->core_txq, skb->len); + + efx_tx_maybe_stop_queue(tx_queue); + + /* Pass off to hardware */ + if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) + efx_nic_push_buffers(tx_queue); + + tx_queue->tx_packets++; + + return NETDEV_TX_OK; + + dma_err: + netif_err(efx, tx_err, efx->net_dev, + " TX queue %d could not map skb with %d bytes %d " + "fragments for DMA\n", tx_queue->queue, skb->len, + skb_shinfo(skb)->nr_frags + 1); + + /* Mark the packet as transmitted, and free the SKB ourselves */ + dev_kfree_skb_any(skb); + + /* Work backwards until we hit the original insert pointer value */ + while (tx_queue->insert_count != old_insert_count) { + unsigned int pkts_compl = 0, bytes_compl = 0; + --tx_queue->insert_count; + buffer = __efx_tx_queue_get_insert_buffer(tx_queue); + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); + } + + /* Free the fragment we were mid-way through pushing */ + if (unmap_len) { + if (dma_flags & EFX_TX_BUF_MAP_SINGLE) + dma_unmap_single(dma_dev, unmap_addr, unmap_len, + DMA_TO_DEVICE); + else + dma_unmap_page(dma_dev, unmap_addr, unmap_len, + DMA_TO_DEVICE); + } + + return NETDEV_TX_OK; +} + +/* Remove packets from the TX queue + * + * This removes packets from the TX queue, up to and including the + * specified index. + */ +static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, + unsigned int index, + unsigned int *pkts_compl, + unsigned int *bytes_compl) +{ + struct efx_nic *efx = tx_queue->efx; + unsigned int stop_index, read_ptr; + + stop_index = (index + 1) & tx_queue->ptr_mask; + read_ptr = tx_queue->read_count & tx_queue->ptr_mask; + + while (read_ptr != stop_index) { + struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; + + if (!(buffer->flags & EFX_TX_BUF_OPTION) && + unlikely(buffer->len == 0)) { + netif_err(efx, tx_err, efx->net_dev, + "TX queue %d spurious TX completion id %x\n", + tx_queue->queue, read_ptr); + efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); + return; + } + + efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); + + ++tx_queue->read_count; + read_ptr = tx_queue->read_count & tx_queue->ptr_mask; + } +} + +/* Initiate a packet transmission. We use one channel per CPU + * (sharing when we have more CPUs than channels). On Falcon, the TX + * completion events will be directed back to the CPU that transmitted + * the packet, which should be cache-efficient. + * + * Context: non-blocking. + * Note that returning anything other than NETDEV_TX_OK will cause the + * OS to free the skb. + */ +netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, + struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_tx_queue *tx_queue; + unsigned index, type; + + EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); + + /* PTP "event" packet */ + if (unlikely(efx_xmit_with_hwtstamp(skb)) && + unlikely(efx_ptp_is_ptp_tx(efx, skb))) { + return efx_ptp_tx(efx, skb); + } + + index = skb_get_queue_mapping(skb); + type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; + if (index >= efx->n_tx_channels) { + index -= efx->n_tx_channels; + type |= EFX_TXQ_TYPE_HIGHPRI; + } + tx_queue = efx_get_tx_queue(efx, index, type); + + return efx_enqueue_skb(tx_queue, skb); +} + +void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + + /* Must be inverse of queue lookup in efx_hard_start_xmit() */ + tx_queue->core_txq = + netdev_get_tx_queue(efx->net_dev, + tx_queue->queue / EFX_TXQ_TYPES + + ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? + efx->n_tx_channels : 0)); +} + +int efx_setup_tc(struct net_device *net_dev, u8 num_tc) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + unsigned tc; + int rc; + + if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC) + return -EINVAL; + + if (num_tc == net_dev->num_tc) + return 0; + + for (tc = 0; tc < num_tc; tc++) { + net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; + net_dev->tc_to_txq[tc].count = efx->n_tx_channels; + } + + if (num_tc > net_dev->num_tc) { + /* Initialise high-priority queues as necessary */ + efx_for_each_channel(channel, efx) { + efx_for_each_possible_channel_tx_queue(tx_queue, + channel) { + if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) + continue; + if (!tx_queue->buffer) { + rc = efx_probe_tx_queue(tx_queue); + if (rc) + return rc; + } + if (!tx_queue->initialised) + efx_init_tx_queue(tx_queue); + efx_init_tx_queue_core_txq(tx_queue); + } + } + } else { + /* Reduce number of classes before number of queues */ + net_dev->num_tc = num_tc; + } + + rc = netif_set_real_num_tx_queues(net_dev, + max_t(int, num_tc, 1) * + efx->n_tx_channels); + if (rc) + return rc; + + /* Do not destroy high-priority queues when they become + * unused. We would have to flush them first, and it is + * fairly difficult to flush a subset of TX queues. Leave + * it to efx_fini_channels(). + */ + + net_dev->num_tc = num_tc; + return 0; +} + +void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) +{ + unsigned fill_level; + struct efx_nic *efx = tx_queue->efx; + struct efx_tx_queue *txq2; + unsigned int pkts_compl = 0, bytes_compl = 0; + + EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); + + efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); + netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); + + if (pkts_compl > 1) + ++tx_queue->merge_events; + + /* See if we need to restart the netif queue. This memory + * barrier ensures that we write read_count (inside + * efx_dequeue_buffers()) before reading the queue status. + */ + smp_mb(); + if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && + likely(efx->port_enabled) && + likely(netif_device_present(efx->net_dev))) { + txq2 = efx_tx_queue_partner(tx_queue); + fill_level = max(tx_queue->insert_count - tx_queue->read_count, + txq2->insert_count - txq2->read_count); + if (fill_level <= efx->txq_wake_thresh) + netif_tx_wake_queue(tx_queue->core_txq); + } + + /* Check whether the hardware queue is now empty */ + if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { + tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); + if (tx_queue->read_count == tx_queue->old_write_count) { + smp_mb(); + tx_queue->empty_read_count = + tx_queue->read_count | EFX_EMPTY_COUNT_VALID; + } + } +} + +/* Size of page-based TSO header buffers. Larger blocks must be + * allocated from the heap. + */ +#define TSOH_STD_SIZE 128 +#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE) + +/* At most half the descriptors in the queue at any time will refer to + * a TSO header buffer, since they must always be followed by a + * payload descriptor referring to an skb. + */ +static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue) +{ + return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE); +} + +int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + unsigned int entries; + int rc; + + /* Create the smallest power-of-two aligned ring */ + entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); + EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); + tx_queue->ptr_mask = entries - 1; + + netif_dbg(efx, probe, efx->net_dev, + "creating TX queue %d size %#x mask %#x\n", + tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); + + /* Allocate software ring */ + tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), + GFP_KERNEL); + if (!tx_queue->buffer) + return -ENOMEM; + + if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) { + tx_queue->tsoh_page = + kcalloc(efx_tsoh_page_count(tx_queue), + sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL); + if (!tx_queue->tsoh_page) { + rc = -ENOMEM; + goto fail1; + } + } + + /* Allocate hardware ring */ + rc = efx_nic_probe_tx(tx_queue); + if (rc) + goto fail2; + + return 0; + +fail2: + kfree(tx_queue->tsoh_page); + tx_queue->tsoh_page = NULL; +fail1: + kfree(tx_queue->buffer); + tx_queue->buffer = NULL; + return rc; +} + +void efx_init_tx_queue(struct efx_tx_queue *tx_queue) +{ + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, + "initialising TX queue %d\n", tx_queue->queue); + + tx_queue->insert_count = 0; + tx_queue->write_count = 0; + tx_queue->old_write_count = 0; + tx_queue->read_count = 0; + tx_queue->old_read_count = 0; + tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; + + /* Set up TX descriptor ring */ + efx_nic_init_tx(tx_queue); + + tx_queue->initialised = true; +} + +void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) +{ + struct efx_tx_buffer *buffer; + + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, + "shutting down TX queue %d\n", tx_queue->queue); + + if (!tx_queue->buffer) + return; + + /* Free any buffers left in the ring */ + while (tx_queue->read_count != tx_queue->write_count) { + unsigned int pkts_compl = 0, bytes_compl = 0; + buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; + efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); + + ++tx_queue->read_count; + } + netdev_tx_reset_queue(tx_queue->core_txq); +} + +void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) +{ + int i; + + if (!tx_queue->buffer) + return; + + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, + "destroying TX queue %d\n", tx_queue->queue); + efx_nic_remove_tx(tx_queue); + + if (tx_queue->tsoh_page) { + for (i = 0; i < efx_tsoh_page_count(tx_queue); i++) + efx_nic_free_buffer(tx_queue->efx, + &tx_queue->tsoh_page[i]); + kfree(tx_queue->tsoh_page); + tx_queue->tsoh_page = NULL; + } + + kfree(tx_queue->buffer); + tx_queue->buffer = NULL; +} + + +/* Efx TCP segmentation acceleration. + * + * Why? Because by doing it here in the driver we can go significantly + * faster than the GSO. + * + * Requires TX checksum offload support. + */ + +#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) + +/** + * struct tso_state - TSO state for an SKB + * @out_len: Remaining length in current segment + * @seqnum: Current sequence number + * @ipv4_id: Current IPv4 ID, host endian + * @packet_space: Remaining space in current packet + * @dma_addr: DMA address of current position + * @in_len: Remaining length in current SKB fragment + * @unmap_len: Length of SKB fragment + * @unmap_addr: DMA address of SKB fragment + * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0 + * @protocol: Network protocol (after any VLAN header) + * @ip_off: Offset of IP header + * @tcp_off: Offset of TCP header + * @header_len: Number of bytes of header + * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload + * @header_dma_addr: Header DMA address, when using option descriptors + * @header_unmap_len: Header DMA mapped length, or 0 if not using option + * descriptors + * + * The state used during segmentation. It is put into this data structure + * just to make it easy to pass into inline functions. + */ +struct tso_state { + /* Output position */ + unsigned out_len; + unsigned seqnum; + u16 ipv4_id; + unsigned packet_space; + + /* Input position */ + dma_addr_t dma_addr; + unsigned in_len; + unsigned unmap_len; + dma_addr_t unmap_addr; + unsigned short dma_flags; + + __be16 protocol; + unsigned int ip_off; + unsigned int tcp_off; + unsigned header_len; + unsigned int ip_base_len; + dma_addr_t header_dma_addr; + unsigned int header_unmap_len; +}; + + +/* + * Verify that our various assumptions about sk_buffs and the conditions + * under which TSO will be attempted hold true. Return the protocol number. + */ +static __be16 efx_tso_check_protocol(struct sk_buff *skb) +{ + __be16 protocol = skb->protocol; + + EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != + protocol); + if (protocol == htons(ETH_P_8021Q)) { + struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; + protocol = veh->h_vlan_encapsulated_proto; + } + + if (protocol == htons(ETH_P_IP)) { + EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); + } else { + EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6)); + EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP); + } + EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) + + (tcp_hdr(skb)->doff << 2u)) > + skb_headlen(skb)); + + return protocol; +} + +static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer, unsigned int len) +{ + u8 *result; + + EFX_BUG_ON_PARANOID(buffer->len); + EFX_BUG_ON_PARANOID(buffer->flags); + EFX_BUG_ON_PARANOID(buffer->unmap_len); + + if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) { + unsigned index = + (tx_queue->insert_count & tx_queue->ptr_mask) / 2; + struct efx_buffer *page_buf = + &tx_queue->tsoh_page[index / TSOH_PER_PAGE]; + unsigned offset = + TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN; + + if (unlikely(!page_buf->addr) && + efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, + GFP_ATOMIC)) + return NULL; + + result = (u8 *)page_buf->addr + offset; + buffer->dma_addr = page_buf->dma_addr + offset; + buffer->flags = EFX_TX_BUF_CONT; + } else { + tx_queue->tso_long_headers++; + + buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC); + if (unlikely(!buffer->heap_buf)) + return NULL; + result = (u8 *)buffer->heap_buf + NET_IP_ALIGN; + buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP; + } + + buffer->len = len; + + return result; +} + +/** + * efx_tx_queue_insert - push descriptors onto the TX queue + * @tx_queue: Efx TX queue + * @dma_addr: DMA address of fragment + * @len: Length of fragment + * @final_buffer: The final buffer inserted into the queue + * + * Push descriptors onto the TX queue. + */ +static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, unsigned len, + struct efx_tx_buffer **final_buffer) +{ + struct efx_tx_buffer *buffer; + struct efx_nic *efx = tx_queue->efx; + unsigned dma_len; + + EFX_BUG_ON_PARANOID(len <= 0); + + while (1) { + buffer = efx_tx_queue_get_insert_buffer(tx_queue); + ++tx_queue->insert_count; + + EFX_BUG_ON_PARANOID(tx_queue->insert_count - + tx_queue->read_count >= + efx->txq_entries); + + buffer->dma_addr = dma_addr; + + dma_len = efx_max_tx_len(efx, dma_addr); + + /* If there is enough space to send then do so */ + if (dma_len >= len) + break; + + buffer->len = dma_len; + buffer->flags = EFX_TX_BUF_CONT; + dma_addr += dma_len; + len -= dma_len; + } + + EFX_BUG_ON_PARANOID(!len); + buffer->len = len; + *final_buffer = buffer; +} + + +/* + * Put a TSO header into the TX queue. + * + * This is special-cased because we know that it is small enough to fit in + * a single fragment, and we know it doesn't cross a page boundary. It + * also allows us to not worry about end-of-packet etc. + */ +static int efx_tso_put_header(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer, u8 *header) +{ + if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) { + buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev, + header, buffer->len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev, + buffer->dma_addr))) { + kfree(buffer->heap_buf); + buffer->len = 0; + buffer->flags = 0; + return -ENOMEM; + } + buffer->unmap_len = buffer->len; + buffer->dma_offset = 0; + buffer->flags |= EFX_TX_BUF_MAP_SINGLE; + } + + ++tx_queue->insert_count; + return 0; +} + + +/* Remove buffers put into a tx_queue. None of the buffers must have + * an skb attached. + */ +static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, + unsigned int insert_count) +{ + struct efx_tx_buffer *buffer; + + /* Work backwards until we hit the original insert pointer value */ + while (tx_queue->insert_count != insert_count) { + --tx_queue->insert_count; + buffer = __efx_tx_queue_get_insert_buffer(tx_queue); + efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); + } +} + + +/* Parse the SKB header and initialise state. */ +static int tso_start(struct tso_state *st, struct efx_nic *efx, + const struct sk_buff *skb) +{ + bool use_opt_desc = efx_nic_rev(efx) >= EFX_REV_HUNT_A0; + struct device *dma_dev = &efx->pci_dev->dev; + unsigned int header_len, in_len; + dma_addr_t dma_addr; + + st->ip_off = skb_network_header(skb) - skb->data; + st->tcp_off = skb_transport_header(skb) - skb->data; + header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u); + in_len = skb_headlen(skb) - header_len; + st->header_len = header_len; + st->in_len = in_len; + if (st->protocol == htons(ETH_P_IP)) { + st->ip_base_len = st->header_len - st->ip_off; + st->ipv4_id = ntohs(ip_hdr(skb)->id); + } else { + st->ip_base_len = st->header_len - st->tcp_off; + st->ipv4_id = 0; + } + st->seqnum = ntohl(tcp_hdr(skb)->seq); + + EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); + EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); + EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); + + st->out_len = skb->len - header_len; + + if (!use_opt_desc) { + st->header_unmap_len = 0; + + if (likely(in_len == 0)) { + st->dma_flags = 0; + st->unmap_len = 0; + return 0; + } + + dma_addr = dma_map_single(dma_dev, skb->data + header_len, + in_len, DMA_TO_DEVICE); + st->dma_flags = EFX_TX_BUF_MAP_SINGLE; + st->dma_addr = dma_addr; + st->unmap_addr = dma_addr; + st->unmap_len = in_len; + } else { + dma_addr = dma_map_single(dma_dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + st->header_dma_addr = dma_addr; + st->header_unmap_len = skb_headlen(skb); + st->dma_flags = 0; + st->dma_addr = dma_addr + header_len; + st->unmap_len = 0; + } + + return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0; +} + +static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, + skb_frag_t *frag) +{ + st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { + st->dma_flags = 0; + st->unmap_len = skb_frag_size(frag); + st->in_len = skb_frag_size(frag); + st->dma_addr = st->unmap_addr; + return 0; + } + return -ENOMEM; +} + + +/** + * tso_fill_packet_with_fragment - form descriptors for the current fragment + * @tx_queue: Efx TX queue + * @skb: Socket buffer + * @st: TSO state + * + * Form descriptors for the current fragment, until we reach the end + * of fragment or end-of-packet. + */ +static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, + const struct sk_buff *skb, + struct tso_state *st) +{ + struct efx_tx_buffer *buffer; + int n; + + if (st->in_len == 0) + return; + if (st->packet_space == 0) + return; + + EFX_BUG_ON_PARANOID(st->in_len <= 0); + EFX_BUG_ON_PARANOID(st->packet_space <= 0); + + n = min(st->in_len, st->packet_space); + + st->packet_space -= n; + st->out_len -= n; + st->in_len -= n; + + efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); + + if (st->out_len == 0) { + /* Transfer ownership of the skb */ + buffer->skb = skb; + buffer->flags = EFX_TX_BUF_SKB; + } else if (st->packet_space != 0) { + buffer->flags = EFX_TX_BUF_CONT; + } + + if (st->in_len == 0) { + /* Transfer ownership of the DMA mapping */ + buffer->unmap_len = st->unmap_len; + buffer->dma_offset = buffer->unmap_len - buffer->len; + buffer->flags |= st->dma_flags; + st->unmap_len = 0; + } + + st->dma_addr += n; +} + + +/** + * tso_start_new_packet - generate a new header and prepare for the new packet + * @tx_queue: Efx TX queue + * @skb: Socket buffer + * @st: TSO state + * + * Generate a new header and prepare for the new packet. Return 0 on + * success, or -%ENOMEM if failed to alloc header. + */ +static int tso_start_new_packet(struct efx_tx_queue *tx_queue, + const struct sk_buff *skb, + struct tso_state *st) +{ + struct efx_tx_buffer *buffer = + efx_tx_queue_get_insert_buffer(tx_queue); + bool is_last = st->out_len <= skb_shinfo(skb)->gso_size; + u8 tcp_flags_clear; + + if (!is_last) { + st->packet_space = skb_shinfo(skb)->gso_size; + tcp_flags_clear = 0x09; /* mask out FIN and PSH */ + } else { + st->packet_space = st->out_len; + tcp_flags_clear = 0x00; + } + + if (!st->header_unmap_len) { + /* Allocate and insert a DMA-mapped header buffer. */ + struct tcphdr *tsoh_th; + unsigned ip_length; + u8 *header; + int rc; + + header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len); + if (!header) + return -ENOMEM; + + tsoh_th = (struct tcphdr *)(header + st->tcp_off); + + /* Copy and update the headers. */ + memcpy(header, skb->data, st->header_len); + + tsoh_th->seq = htonl(st->seqnum); + ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear; + + ip_length = st->ip_base_len + st->packet_space; + + if (st->protocol == htons(ETH_P_IP)) { + struct iphdr *tsoh_iph = + (struct iphdr *)(header + st->ip_off); + + tsoh_iph->tot_len = htons(ip_length); + tsoh_iph->id = htons(st->ipv4_id); + } else { + struct ipv6hdr *tsoh_iph = + (struct ipv6hdr *)(header + st->ip_off); + + tsoh_iph->payload_len = htons(ip_length); + } + + rc = efx_tso_put_header(tx_queue, buffer, header); + if (unlikely(rc)) + return rc; + } else { + /* Send the original headers with a TSO option descriptor + * in front + */ + u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear; + + buffer->flags = EFX_TX_BUF_OPTION; + buffer->len = 0; + buffer->unmap_len = 0; + EFX_POPULATE_QWORD_5(buffer->option, + ESF_DZ_TX_DESC_IS_OPT, 1, + ESF_DZ_TX_OPTION_TYPE, + ESE_DZ_TX_OPTION_DESC_TSO, + ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags, + ESF_DZ_TX_TSO_IP_ID, st->ipv4_id, + ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum); + ++tx_queue->insert_count; + + /* We mapped the headers in tso_start(). Unmap them + * when the last segment is completed. + */ + buffer = efx_tx_queue_get_insert_buffer(tx_queue); + buffer->dma_addr = st->header_dma_addr; + buffer->len = st->header_len; + if (is_last) { + buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE; + buffer->unmap_len = st->header_unmap_len; + buffer->dma_offset = 0; + /* Ensure we only unmap them once in case of a + * later DMA mapping error and rollback + */ + st->header_unmap_len = 0; + } else { + buffer->flags = EFX_TX_BUF_CONT; + buffer->unmap_len = 0; + } + ++tx_queue->insert_count; + } + + st->seqnum += skb_shinfo(skb)->gso_size; + + /* Linux leaves suitable gaps in the IP ID space for us to fill. */ + ++st->ipv4_id; + + ++tx_queue->tso_packets; + + ++tx_queue->tx_packets; + + return 0; +} + + +/** + * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer + * @tx_queue: Efx TX queue + * @skb: Socket buffer + * + * Context: You must hold netif_tx_lock() to call this function. + * + * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if + * @skb was not enqueued. In all cases @skb is consumed. Return + * %NETDEV_TX_OK. + */ +static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, + struct sk_buff *skb) +{ + struct efx_nic *efx = tx_queue->efx; + unsigned int old_insert_count = tx_queue->insert_count; + int frag_i, rc; + struct tso_state state; + + /* Find the packet protocol and sanity-check it */ + state.protocol = efx_tso_check_protocol(skb); + + rc = tso_start(&state, efx, skb); + if (rc) + goto mem_err; + + if (likely(state.in_len == 0)) { + /* Grab the first payload fragment. */ + EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); + frag_i = 0; + rc = tso_get_fragment(&state, efx, + skb_shinfo(skb)->frags + frag_i); + if (rc) + goto mem_err; + } else { + /* Payload starts in the header area. */ + frag_i = -1; + } + + if (tso_start_new_packet(tx_queue, skb, &state) < 0) + goto mem_err; + + while (1) { + tso_fill_packet_with_fragment(tx_queue, skb, &state); + + /* Move onto the next fragment? */ + if (state.in_len == 0) { + if (++frag_i >= skb_shinfo(skb)->nr_frags) + /* End of payload reached. */ + break; + rc = tso_get_fragment(&state, efx, + skb_shinfo(skb)->frags + frag_i); + if (rc) + goto mem_err; + } + + /* Start at new packet? */ + if (state.packet_space == 0 && + tso_start_new_packet(tx_queue, skb, &state) < 0) + goto mem_err; + } + + netdev_tx_sent_queue(tx_queue->core_txq, skb->len); + + efx_tx_maybe_stop_queue(tx_queue); + + /* Pass off to hardware */ + if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) + efx_nic_push_buffers(tx_queue); + + tx_queue->tso_bursts++; + return NETDEV_TX_OK; + + mem_err: + netif_err(efx, tx_err, efx->net_dev, + "Out of memory for TSO headers, or DMA mapping error\n"); + dev_kfree_skb_any(skb); + + /* Free the DMA mapping we were in the process of writing out */ + if (state.unmap_len) { + if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE) + dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr, + state.unmap_len, DMA_TO_DEVICE); + else + dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr, + state.unmap_len, DMA_TO_DEVICE); + } + + /* Free the header DMA mapping, if using option descriptors */ + if (state.header_unmap_len) + dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr, + state.header_unmap_len, DMA_TO_DEVICE); + + efx_enqueue_unwind(tx_queue, old_insert_count); + return NETDEV_TX_OK; +} diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c new file mode 100644 index 000000000..3d5ee3259 --- /dev/null +++ b/drivers/net/ethernet/sfc/txc43128_phy.c @@ -0,0 +1,560 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2006-2011 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +/* + * Driver for Transwitch/Mysticom CX4 retimer + * see www.transwitch.com, part is TXC-43128 + */ + +#include +#include +#include "efx.h" +#include "mdio_10g.h" +#include "phy.h" +#include "nic.h" + +/* We expect these MMDs to be in the package */ +#define TXC_REQUIRED_DEVS (MDIO_DEVS_PCS | \ + MDIO_DEVS_PMAPMD | \ + MDIO_DEVS_PHYXS) + +#define TXC_LOOPBACKS ((1 << LOOPBACK_PCS) | \ + (1 << LOOPBACK_PMAPMD) | \ + (1 << LOOPBACK_PHYXS_WS)) + +/************************************************************************** + * + * Compile-time config + * + ************************************************************************** + */ +#define TXCNAME "TXC43128" +/* Total length of time we'll wait for the PHY to come out of reset (ms) */ +#define TXC_MAX_RESET_TIME 500 +/* Interval between checks (ms) */ +#define TXC_RESET_WAIT 10 +/* How long to run BIST (us) */ +#define TXC_BIST_DURATION 50 + +/************************************************************************** + * + * Register definitions + * + ************************************************************************** + */ + +/* Command register */ +#define TXC_GLRGS_GLCMD 0xc004 +/* Useful bits in command register */ +/* Lane power-down */ +#define TXC_GLCMD_L01PD_LBN 5 +#define TXC_GLCMD_L23PD_LBN 6 +/* Limited SW reset: preserves configuration but + * initiates a logic reset. Self-clearing */ +#define TXC_GLCMD_LMTSWRST_LBN 14 + +/* Signal Quality Control */ +#define TXC_GLRGS_GSGQLCTL 0xc01a +/* Enable bit */ +#define TXC_GSGQLCT_SGQLEN_LBN 15 +/* Lane selection */ +#define TXC_GSGQLCT_LNSL_LBN 13 +#define TXC_GSGQLCT_LNSL_WIDTH 2 + +/* Analog TX control */ +#define TXC_ALRGS_ATXCTL 0xc040 +/* Lane power-down */ +#define TXC_ATXCTL_TXPD3_LBN 15 +#define TXC_ATXCTL_TXPD2_LBN 14 +#define TXC_ATXCTL_TXPD1_LBN 13 +#define TXC_ATXCTL_TXPD0_LBN 12 + +/* Amplitude on lanes 0, 1 */ +#define TXC_ALRGS_ATXAMP0 0xc041 +/* Amplitude on lanes 2, 3 */ +#define TXC_ALRGS_ATXAMP1 0xc042 +/* Bit position of value for lane 0 (or 2) */ +#define TXC_ATXAMP_LANE02_LBN 3 +/* Bit position of value for lane 1 (or 3) */ +#define TXC_ATXAMP_LANE13_LBN 11 + +#define TXC_ATXAMP_1280_mV 0 +#define TXC_ATXAMP_1200_mV 8 +#define TXC_ATXAMP_1120_mV 12 +#define TXC_ATXAMP_1060_mV 14 +#define TXC_ATXAMP_0820_mV 25 +#define TXC_ATXAMP_0720_mV 26 +#define TXC_ATXAMP_0580_mV 27 +#define TXC_ATXAMP_0440_mV 28 + +#define TXC_ATXAMP_0820_BOTH \ + ((TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE02_LBN) \ + | (TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE13_LBN)) + +#define TXC_ATXAMP_DEFAULT 0x6060 /* From databook */ + +/* Preemphasis on lanes 0, 1 */ +#define TXC_ALRGS_ATXPRE0 0xc043 +/* Preemphasis on lanes 2, 3 */ +#define TXC_ALRGS_ATXPRE1 0xc044 + +#define TXC_ATXPRE_NONE 0 +#define TXC_ATXPRE_DEFAULT 0x1010 /* From databook */ + +#define TXC_ALRGS_ARXCTL 0xc045 +/* Lane power-down */ +#define TXC_ARXCTL_RXPD3_LBN 15 +#define TXC_ARXCTL_RXPD2_LBN 14 +#define TXC_ARXCTL_RXPD1_LBN 13 +#define TXC_ARXCTL_RXPD0_LBN 12 + +/* Main control */ +#define TXC_MRGS_CTL 0xc340 +/* Bits in main control */ +#define TXC_MCTL_RESET_LBN 15 /* Self clear */ +#define TXC_MCTL_TXLED_LBN 14 /* 1 to show align status */ +#define TXC_MCTL_RXLED_LBN 13 /* 1 to show align status */ + +/* GPIO output */ +#define TXC_GPIO_OUTPUT 0xc346 +#define TXC_GPIO_DIR 0xc348 + +/* Vendor-specific BIST registers */ +#define TXC_BIST_CTL 0xc280 +#define TXC_BIST_TXFRMCNT 0xc281 +#define TXC_BIST_RX0FRMCNT 0xc282 +#define TXC_BIST_RX1FRMCNT 0xc283 +#define TXC_BIST_RX2FRMCNT 0xc284 +#define TXC_BIST_RX3FRMCNT 0xc285 +#define TXC_BIST_RX0ERRCNT 0xc286 +#define TXC_BIST_RX1ERRCNT 0xc287 +#define TXC_BIST_RX2ERRCNT 0xc288 +#define TXC_BIST_RX3ERRCNT 0xc289 + +/* BIST type (controls bit patter in test) */ +#define TXC_BIST_CTRL_TYPE_LBN 10 +#define TXC_BIST_CTRL_TYPE_TSD 0 /* TranSwitch Deterministic */ +#define TXC_BIST_CTRL_TYPE_CRP 1 /* CRPAT standard */ +#define TXC_BIST_CTRL_TYPE_CJP 2 /* CJPAT standard */ +#define TXC_BIST_CTRL_TYPE_TSR 3 /* TranSwitch pseudo-random */ +/* Set this to 1 for 10 bit and 0 for 8 bit */ +#define TXC_BIST_CTRL_B10EN_LBN 12 +/* Enable BIST (write 0 to disable) */ +#define TXC_BIST_CTRL_ENAB_LBN 13 +/* Stop BIST (self-clears when stop complete) */ +#define TXC_BIST_CTRL_STOP_LBN 14 +/* Start BIST (cleared by writing 1 to STOP) */ +#define TXC_BIST_CTRL_STRT_LBN 15 + +/* Mt. Diablo test configuration */ +#define TXC_MTDIABLO_CTRL 0xc34f +#define TXC_MTDIABLO_CTRL_PMA_LOOP_LBN 10 + +struct txc43128_data { + unsigned long bug10934_timer; + enum efx_phy_mode phy_mode; + enum efx_loopback_mode loopback_mode; +}; + +/* The PHY sometimes needs a reset to bring the link back up. So long as + * it reports link down, we reset it every 5 seconds. + */ +#define BUG10934_RESET_INTERVAL (5 * HZ) + +/* Perform a reset that doesn't clear configuration changes */ +static void txc_reset_logic(struct efx_nic *efx); + +/* Set the output value of a gpio */ +void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int on) +{ + efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_OUTPUT, 1 << pin, on); +} + +/* Set up the GPIO direction register */ +void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir) +{ + efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_DIR, 1 << pin, dir); +} + +/* Reset the PMA/PMD MMD. The documentation is explicit that this does a + * global reset (it's less clear what reset of other MMDs does).*/ +static int txc_reset_phy(struct efx_nic *efx) +{ + int rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PMAPMD, + TXC_MAX_RESET_TIME / TXC_RESET_WAIT, + TXC_RESET_WAIT); + if (rc < 0) + goto fail; + + /* Check that all the MMDs we expect are present and responding. */ + rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS); + if (rc < 0) + goto fail; + + return 0; + +fail: + netif_err(efx, hw, efx->net_dev, TXCNAME ": reset timed out!\n"); + return rc; +} + +/* Run a single BIST on one MMD */ +static int txc_bist_one(struct efx_nic *efx, int mmd, int test) +{ + int ctrl, bctl; + int lane; + int rc = 0; + + /* Set PMA to test into loopback using Mt Diablo reg as per app note */ + ctrl = efx_mdio_read(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL); + ctrl |= (1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN); + efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl); + + /* The BIST app. note lists these as 3 distinct steps. */ + /* Set the BIST type */ + bctl = (test << TXC_BIST_CTRL_TYPE_LBN); + efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl); + + /* Set the BSTEN bit in the BIST Control register to enable */ + bctl |= (1 << TXC_BIST_CTRL_ENAB_LBN); + efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl); + + /* Set the BSTRT bit in the BIST Control register */ + efx_mdio_write(efx, mmd, TXC_BIST_CTL, + bctl | (1 << TXC_BIST_CTRL_STRT_LBN)); + + /* Wait. */ + udelay(TXC_BIST_DURATION); + + /* Set the BSTOP bit in the BIST Control register */ + bctl |= (1 << TXC_BIST_CTRL_STOP_LBN); + efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl); + + /* The STOP bit should go off when things have stopped */ + while (bctl & (1 << TXC_BIST_CTRL_STOP_LBN)) + bctl = efx_mdio_read(efx, mmd, TXC_BIST_CTL); + + /* Check all the error counts are 0 and all the frame counts are + non-zero */ + for (lane = 0; lane < 4; lane++) { + int count = efx_mdio_read(efx, mmd, TXC_BIST_RX0ERRCNT + lane); + if (count != 0) { + netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. " + "Lane %d had %d errs\n", lane, count); + rc = -EIO; + } + count = efx_mdio_read(efx, mmd, TXC_BIST_RX0FRMCNT + lane); + if (count == 0) { + netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. " + "Lane %d got 0 frames\n", lane); + rc = -EIO; + } + } + + if (rc == 0) + netif_info(efx, hw, efx->net_dev, TXCNAME": BIST pass\n"); + + /* Disable BIST */ + efx_mdio_write(efx, mmd, TXC_BIST_CTL, 0); + + /* Turn off loopback */ + ctrl &= ~(1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN); + efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl); + + return rc; +} + +static int txc_bist(struct efx_nic *efx) +{ + return txc_bist_one(efx, MDIO_MMD_PCS, TXC_BIST_CTRL_TYPE_TSD); +} + +/* Push the non-configurable defaults into the PHY. This must be + * done after every full reset */ +static void txc_apply_defaults(struct efx_nic *efx) +{ + int mctrl; + + /* Turn amplitude down and preemphasis off on the host side + * (PHY<->MAC) as this is believed less likely to upset Falcon + * and no adverse effects have been noted. It probably also + * saves a picowatt or two */ + + /* Turn off preemphasis */ + efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE0, TXC_ATXPRE_NONE); + efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE1, TXC_ATXPRE_NONE); + + /* Turn down the amplitude */ + efx_mdio_write(efx, MDIO_MMD_PHYXS, + TXC_ALRGS_ATXAMP0, TXC_ATXAMP_0820_BOTH); + efx_mdio_write(efx, MDIO_MMD_PHYXS, + TXC_ALRGS_ATXAMP1, TXC_ATXAMP_0820_BOTH); + + /* Set the line side amplitude and preemphasis to the databook + * defaults as an erratum causes them to be 0 on at least some + * PHY rev.s */ + efx_mdio_write(efx, MDIO_MMD_PMAPMD, + TXC_ALRGS_ATXPRE0, TXC_ATXPRE_DEFAULT); + efx_mdio_write(efx, MDIO_MMD_PMAPMD, + TXC_ALRGS_ATXPRE1, TXC_ATXPRE_DEFAULT); + efx_mdio_write(efx, MDIO_MMD_PMAPMD, + TXC_ALRGS_ATXAMP0, TXC_ATXAMP_DEFAULT); + efx_mdio_write(efx, MDIO_MMD_PMAPMD, + TXC_ALRGS_ATXAMP1, TXC_ATXAMP_DEFAULT); + + /* Set up the LEDs */ + mctrl = efx_mdio_read(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL); + + /* Set the Green and Red LEDs to their default modes */ + mctrl &= ~((1 << TXC_MCTL_TXLED_LBN) | (1 << TXC_MCTL_RXLED_LBN)); + efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL, mctrl); + + /* Databook recommends doing this after configuration changes */ + txc_reset_logic(efx); + + falcon_board(efx)->type->init_phy(efx); +} + +static int txc43128_phy_probe(struct efx_nic *efx) +{ + struct txc43128_data *phy_data; + + /* Allocate phy private storage */ + phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); + if (!phy_data) + return -ENOMEM; + efx->phy_data = phy_data; + phy_data->phy_mode = efx->phy_mode; + + efx->mdio.mmds = TXC_REQUIRED_DEVS; + efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; + + efx->loopback_modes = TXC_LOOPBACKS | FALCON_XMAC_LOOPBACKS; + + return 0; +} + +/* Initialisation entry point for this PHY driver */ +static int txc43128_phy_init(struct efx_nic *efx) +{ + int rc; + + rc = txc_reset_phy(efx); + if (rc < 0) + return rc; + + rc = txc_bist(efx); + if (rc < 0) + return rc; + + txc_apply_defaults(efx); + + return 0; +} + +/* Set the lane power down state in the global registers */ +static void txc_glrgs_lane_power(struct efx_nic *efx, int mmd) +{ + int pd = (1 << TXC_GLCMD_L01PD_LBN) | (1 << TXC_GLCMD_L23PD_LBN); + int ctl = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD); + + if (!(efx->phy_mode & PHY_MODE_LOW_POWER)) + ctl &= ~pd; + else + ctl |= pd; + + efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, ctl); +} + +/* Set the lane power down state in the analog control registers */ +static void txc_analog_lane_power(struct efx_nic *efx, int mmd) +{ + int txpd = (1 << TXC_ATXCTL_TXPD3_LBN) | (1 << TXC_ATXCTL_TXPD2_LBN) + | (1 << TXC_ATXCTL_TXPD1_LBN) | (1 << TXC_ATXCTL_TXPD0_LBN); + int rxpd = (1 << TXC_ARXCTL_RXPD3_LBN) | (1 << TXC_ARXCTL_RXPD2_LBN) + | (1 << TXC_ARXCTL_RXPD1_LBN) | (1 << TXC_ARXCTL_RXPD0_LBN); + int txctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ATXCTL); + int rxctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ARXCTL); + + if (!(efx->phy_mode & PHY_MODE_LOW_POWER)) { + txctl &= ~txpd; + rxctl &= ~rxpd; + } else { + txctl |= txpd; + rxctl |= rxpd; + } + + efx_mdio_write(efx, mmd, TXC_ALRGS_ATXCTL, txctl); + efx_mdio_write(efx, mmd, TXC_ALRGS_ARXCTL, rxctl); +} + +static void txc_set_power(struct efx_nic *efx) +{ + /* According to the data book, all the MMDs can do low power */ + efx_mdio_set_mmds_lpower(efx, + !!(efx->phy_mode & PHY_MODE_LOW_POWER), + TXC_REQUIRED_DEVS); + + /* Global register bank is in PCS, PHY XS. These control the host + * side and line side settings respectively. */ + txc_glrgs_lane_power(efx, MDIO_MMD_PCS); + txc_glrgs_lane_power(efx, MDIO_MMD_PHYXS); + + /* Analog register bank in PMA/PMD, PHY XS */ + txc_analog_lane_power(efx, MDIO_MMD_PMAPMD); + txc_analog_lane_power(efx, MDIO_MMD_PHYXS); +} + +static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd) +{ + int val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD); + int tries = 50; + + val |= (1 << TXC_GLCMD_LMTSWRST_LBN); + efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val); + while (tries--) { + val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD); + if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN))) + break; + udelay(1); + } + if (!tries) + netif_info(efx, hw, efx->net_dev, + TXCNAME " Logic reset timed out!\n"); +} + +/* Perform a logic reset. This preserves the configuration registers + * and is needed for some configuration changes to take effect */ +static void txc_reset_logic(struct efx_nic *efx) +{ + /* The data sheet claims we can do the logic reset on either the + * PCS or the PHYXS and the result is a reset of both host- and + * line-side logic. */ + txc_reset_logic_mmd(efx, MDIO_MMD_PCS); +} + +static bool txc43128_phy_read_link(struct efx_nic *efx) +{ + return efx_mdio_links_ok(efx, TXC_REQUIRED_DEVS); +} + +static int txc43128_phy_reconfigure(struct efx_nic *efx) +{ + struct txc43128_data *phy_data = efx->phy_data; + enum efx_phy_mode mode_change = efx->phy_mode ^ phy_data->phy_mode; + bool loop_change = LOOPBACK_CHANGED(phy_data, efx, TXC_LOOPBACKS); + + if (efx->phy_mode & mode_change & PHY_MODE_TX_DISABLED) { + txc_reset_phy(efx); + txc_apply_defaults(efx); + falcon_reset_xaui(efx); + mode_change &= ~PHY_MODE_TX_DISABLED; + } + + efx_mdio_transmit_disable(efx); + efx_mdio_phy_reconfigure(efx); + if (mode_change & PHY_MODE_LOW_POWER) + txc_set_power(efx); + + /* The data sheet claims this is required after every reconfiguration + * (note at end of 7.1), but we mustn't do it when nothing changes as + * it glitches the link, and reconfigure gets called on link change, + * so we get an IRQ storm on link up. */ + if (loop_change || mode_change) + txc_reset_logic(efx); + + phy_data->phy_mode = efx->phy_mode; + phy_data->loopback_mode = efx->loopback_mode; + + return 0; +} + +static void txc43128_phy_fini(struct efx_nic *efx) +{ + /* Disable link events */ + efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0); +} + +static void txc43128_phy_remove(struct efx_nic *efx) +{ + kfree(efx->phy_data); + efx->phy_data = NULL; +} + +/* Periodic callback: this exists mainly to poll link status as we + * don't use LASI interrupts */ +static bool txc43128_phy_poll(struct efx_nic *efx) +{ + struct txc43128_data *data = efx->phy_data; + bool was_up = efx->link_state.up; + + efx->link_state.up = txc43128_phy_read_link(efx); + efx->link_state.speed = 10000; + efx->link_state.fd = true; + efx->link_state.fc = efx->wanted_fc; + + if (efx->link_state.up || (efx->loopback_mode != LOOPBACK_NONE)) { + data->bug10934_timer = jiffies; + } else { + if (time_after_eq(jiffies, (data->bug10934_timer + + BUG10934_RESET_INTERVAL))) { + data->bug10934_timer = jiffies; + txc_reset_logic(efx); + } + } + + return efx->link_state.up != was_up; +} + +static const char *const txc43128_test_names[] = { + "bist" +}; + +static const char *txc43128_test_name(struct efx_nic *efx, unsigned int index) +{ + if (index < ARRAY_SIZE(txc43128_test_names)) + return txc43128_test_names[index]; + return NULL; +} + +static int txc43128_run_tests(struct efx_nic *efx, int *results, unsigned flags) +{ + int rc; + + if (!(flags & ETH_TEST_FL_OFFLINE)) + return 0; + + rc = txc_reset_phy(efx); + if (rc < 0) + return rc; + + rc = txc_bist(efx); + txc_apply_defaults(efx); + results[0] = rc ? -1 : 1; + return rc; +} + +static void txc43128_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) +{ + mdio45_ethtool_gset(&efx->mdio, ecmd); +} + +const struct efx_phy_operations falcon_txc_phy_ops = { + .probe = txc43128_phy_probe, + .init = txc43128_phy_init, + .reconfigure = txc43128_phy_reconfigure, + .poll = txc43128_phy_poll, + .fini = txc43128_phy_fini, + .remove = txc43128_phy_remove, + .get_settings = txc43128_get_settings, + .set_settings = efx_mdio_set_settings, + .test_alive = efx_mdio_test_alive, + .run_tests = txc43128_run_tests, + .test_name = txc43128_test_name, +}; diff --git a/drivers/net/ethernet/sfc/vfdi.h b/drivers/net/ethernet/sfc/vfdi.h new file mode 100644 index 000000000..f62901d4c --- /dev/null +++ b/drivers/net/ethernet/sfc/vfdi.h @@ -0,0 +1,255 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2010-2012 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#ifndef _VFDI_H +#define _VFDI_H + +/** + * DOC: Virtual Function Driver Interface + * + * This file contains software structures used to form a two way + * communication channel between the VF driver and the PF driver, + * named Virtual Function Driver Interface (VFDI). + * + * For the purposes of VFDI, a page is a memory region with size and + * alignment of 4K. All addresses are DMA addresses to be used within + * the domain of the relevant VF. + * + * The only hardware-defined channels for a VF driver to communicate + * with the PF driver are the event mailboxes (%FR_CZ_USR_EV + * registers). Writing to these registers generates an event with + * EV_CODE = EV_CODE_USR_EV, USER_QID set to the index of the mailbox + * and USER_EV_REG_VALUE set to the value written. The PF driver may + * direct or disable delivery of these events by setting + * %FR_CZ_USR_EV_CFG. + * + * The PF driver can send arbitrary events to arbitrary event queues. + * However, for consistency, VFDI events from the PF are defined to + * follow the same form and be sent to the first event queue assigned + * to the VF while that queue is enabled by the VF driver. + * + * The general form of the variable bits of VFDI events is: + * + * 0 16 24 31 + * | DATA | TYPE | SEQ | + * + * SEQ is a sequence number which should be incremented by 1 (modulo + * 256) for each event. The sequence numbers used in each direction + * are independent. + * + * The VF submits requests of type &struct vfdi_req by sending the + * address of the request (ADDR) in a series of 4 events: + * + * 0 16 24 31 + * | ADDR[0:15] | VFDI_EV_TYPE_REQ_WORD0 | SEQ | + * | ADDR[16:31] | VFDI_EV_TYPE_REQ_WORD1 | SEQ+1 | + * | ADDR[32:47] | VFDI_EV_TYPE_REQ_WORD2 | SEQ+2 | + * | ADDR[48:63] | VFDI_EV_TYPE_REQ_WORD3 | SEQ+3 | + * + * The address must be page-aligned. After receiving such a valid + * series of events, the PF driver will attempt to read the request + * and write a response to the same address. In case of an invalid + * sequence of events or a DMA error, there will be no response. + * + * The VF driver may request that the PF driver writes status + * information into its domain asynchronously. After writing the + * status, the PF driver will send an event of the form: + * + * 0 16 24 31 + * | reserved | VFDI_EV_TYPE_STATUS | SEQ | + * + * In case the VF must be reset for any reason, the PF driver will + * send an event of the form: + * + * 0 16 24 31 + * | reserved | VFDI_EV_TYPE_RESET | SEQ | + * + * It is then the responsibility of the VF driver to request + * reinitialisation of its queues. + */ +#define VFDI_EV_SEQ_LBN 24 +#define VFDI_EV_SEQ_WIDTH 8 +#define VFDI_EV_TYPE_LBN 16 +#define VFDI_EV_TYPE_WIDTH 8 +#define VFDI_EV_TYPE_REQ_WORD0 0 +#define VFDI_EV_TYPE_REQ_WORD1 1 +#define VFDI_EV_TYPE_REQ_WORD2 2 +#define VFDI_EV_TYPE_REQ_WORD3 3 +#define VFDI_EV_TYPE_STATUS 4 +#define VFDI_EV_TYPE_RESET 5 +#define VFDI_EV_DATA_LBN 0 +#define VFDI_EV_DATA_WIDTH 16 + +struct vfdi_endpoint { + u8 mac_addr[ETH_ALEN]; + __be16 tci; +}; + +/** + * enum vfdi_op - VFDI operation enumeration + * @VFDI_OP_RESPONSE: Indicates a response to the request. + * @VFDI_OP_INIT_EVQ: Initialize SRAM entries and initialize an EVQ. + * @VFDI_OP_INIT_RXQ: Initialize SRAM entries and initialize an RXQ. + * @VFDI_OP_INIT_TXQ: Initialize SRAM entries and initialize a TXQ. + * @VFDI_OP_FINI_ALL_QUEUES: Flush all queues, finalize all queues, then + * finalize the SRAM entries. + * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targeting the given RXQ. + * @VFDI_OP_REMOVE_ALL_FILTERS: Remove all filters. + * @VFDI_OP_SET_STATUS_PAGE: Set the DMA page(s) used for status updates + * from PF and write the initial status. + * @VFDI_OP_CLEAR_STATUS_PAGE: Clear the DMA page(s) used for status + * updates from PF. + */ +enum vfdi_op { + VFDI_OP_RESPONSE = 0, + VFDI_OP_INIT_EVQ = 1, + VFDI_OP_INIT_RXQ = 2, + VFDI_OP_INIT_TXQ = 3, + VFDI_OP_FINI_ALL_QUEUES = 4, + VFDI_OP_INSERT_FILTER = 5, + VFDI_OP_REMOVE_ALL_FILTERS = 6, + VFDI_OP_SET_STATUS_PAGE = 7, + VFDI_OP_CLEAR_STATUS_PAGE = 8, + VFDI_OP_LIMIT, +}; + +/* Response codes for VFDI operations. Other values may be used in future. */ +#define VFDI_RC_SUCCESS 0 +#define VFDI_RC_ENOMEM (-12) +#define VFDI_RC_EINVAL (-22) +#define VFDI_RC_EOPNOTSUPP (-95) +#define VFDI_RC_ETIMEDOUT (-110) + +/** + * struct vfdi_req - Request from VF driver to PF driver + * @op: Operation code or response indicator, taken from &enum vfdi_op. + * @rc: Response code. Set to 0 on success or a negative error code on failure. + * @u.init_evq.index: Index of event queue to create. + * @u.init_evq.buf_count: Number of 4k buffers backing event queue. + * @u.init_evq.addr: Array of length %u.init_evq.buf_count containing DMA + * address of each page backing the event queue. + * @u.init_rxq.index: Index of receive queue to create. + * @u.init_rxq.buf_count: Number of 4k buffers backing receive queue. + * @u.init_rxq.evq: Instance of event queue to target receive events at. + * @u.init_rxq.label: Label used in receive events. + * @u.init_rxq.flags: Unused. + * @u.init_rxq.addr: Array of length %u.init_rxq.buf_count containing DMA + * address of each page backing the receive queue. + * @u.init_txq.index: Index of transmit queue to create. + * @u.init_txq.buf_count: Number of 4k buffers backing transmit queue. + * @u.init_txq.evq: Instance of event queue to target transmit completion + * events at. + * @u.init_txq.label: Label used in transmit completion events. + * @u.init_txq.flags: Checksum offload flags. + * @u.init_txq.addr: Array of length %u.init_txq.buf_count containing DMA + * address of each page backing the transmit queue. + * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targeting + * all traffic at this receive queue. + * @u.mac_filter.flags: MAC filter flags. + * @u.set_status_page.dma_addr: Base address for the &struct vfdi_status. + * This address must be page-aligned and the PF may write up to a + * whole page (allowing for extension of the structure). + * @u.set_status_page.peer_page_count: Number of additional pages the VF + * has provided into which peer addresses may be DMAd. + * @u.set_status_page.peer_page_addr: Array of DMA addresses of pages. + * If the number of peers exceeds 256, then the VF must provide + * additional pages in this array. The PF will then DMA up to + * 512 vfdi_endpoint structures into each page. These addresses + * must be page-aligned. + */ +struct vfdi_req { + u32 op; + u32 reserved1; + s32 rc; + u32 reserved2; + union { + struct { + u32 index; + u32 buf_count; + u64 addr[]; + } init_evq; + struct { + u32 index; + u32 buf_count; + u32 evq; + u32 label; + u32 flags; +#define VFDI_RXQ_FLAG_SCATTER_EN 1 + u32 reserved; + u64 addr[]; + } init_rxq; + struct { + u32 index; + u32 buf_count; + u32 evq; + u32 label; + u32 flags; +#define VFDI_TXQ_FLAG_IP_CSUM_DIS 1 +#define VFDI_TXQ_FLAG_TCPUDP_CSUM_DIS 2 + u32 reserved; + u64 addr[]; + } init_txq; + struct { + u32 rxq; + u32 flags; +#define VFDI_MAC_FILTER_FLAG_RSS 1 +#define VFDI_MAC_FILTER_FLAG_SCATTER 2 + } mac_filter; + struct { + u64 dma_addr; + u64 peer_page_count; + u64 peer_page_addr[]; + } set_status_page; + } u; +}; + +/** + * struct vfdi_status - Status provided by PF driver to VF driver + * @generation_start: A generation count DMA'd to VF *before* the + * rest of the structure. + * @generation_end: A generation count DMA'd to VF *after* the + * rest of the structure. + * @version: Version of this structure; currently set to 1. Later + * versions must either be layout-compatible or only be sent to VFs + * that specifically request them. + * @length: Total length of this structure including embedded tables + * @vi_scale: log2 the number of VIs available on this VF. This quantity + * is used by the hardware for register decoding. + * @max_tx_channels: The maximum number of transmit queues the VF can use. + * @rss_rxq_count: The number of receive queues present in the shared RSS + * indirection table. + * @peer_count: Total number of peers in the complete peer list. If larger + * than ARRAY_SIZE(%peers), then the VF must provide sufficient + * additional pages each of which is filled with vfdi_endpoint structures. + * @local: The MAC address and outer VLAN tag of *this* VF + * @peers: Table of peer addresses. The @tci fields in these structures + * are currently unused and must be ignored. Additional peers are + * written into any additional pages provided by the VF. + * @timer_quantum_ns: Timer quantum (nominal period between timer ticks) + * for interrupt moderation timers, in nanoseconds. This member is only + * present if @length is sufficiently large. + */ +struct vfdi_status { + u32 generation_start; + u32 generation_end; + u32 version; + u32 length; + u8 vi_scale; + u8 max_tx_channels; + u8 rss_rxq_count; + u8 reserved1; + u16 peer_count; + u16 reserved2; + struct vfdi_endpoint local; + struct vfdi_endpoint peers[256]; + + /* Members below here extend version 1 of this structure */ + u32 timer_quantum_ns; +}; + +#endif diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h new file mode 100644 index 000000000..2310b75d4 --- /dev/null +++ b/drivers/net/ethernet/sfc/workarounds.h @@ -0,0 +1,53 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2006-2013 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_WORKAROUNDS_H +#define EFX_WORKAROUNDS_H + +/* + * Hardware workarounds. + * Bug numbers are from Solarflare's Bugzilla. + */ + +#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) +#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0) +#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0) +#define EFX_WORKAROUND_10G(efx) 1 + +/* Bit-bashed I2C reads cause performance drop */ +#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G +/* Truncated IPv4 packets can confuse the TX packet parser */ +#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB +/* Legacy interrupt storm when interrupt fifo fills */ +#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA + +/* Spurious parity errors in TSORT buffers */ +#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A +/* Unaligned read request >512 bytes after aligning may break TSORT */ +#define EFX_WORKAROUND_5391 EFX_WORKAROUND_FALCON_A +/* iSCSI parsing errors */ +#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A +/* RX events go missing */ +#define EFX_WORKAROUND_5676 EFX_WORKAROUND_FALCON_A +/* RX_RESET on A1 */ +#define EFX_WORKAROUND_6555 EFX_WORKAROUND_FALCON_A +/* Increase filter depth to avoid RX_RESET */ +#define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A +/* Flushes may never complete */ +#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_AB +/* Leak overlength packets rather than free */ +#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A + +/* Lockup when writing event block registers at gen2/gen3 */ +#define EFX_EF10_WORKAROUND_35388(efx) \ + (((struct efx_ef10_nic_data *)efx->nic_data)->workaround_35388) +#define EFX_WORKAROUND_35388(efx) \ + (efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx)) + +#endif /* EFX_WORKAROUNDS_H */ diff --git a/drivers/net/ethernet/sgi/Kconfig b/drivers/net/ethernet/sgi/Kconfig new file mode 100644 index 000000000..e832f4666 --- /dev/null +++ b/drivers/net/ethernet/sgi/Kconfig @@ -0,0 +1,35 @@ +# +# SGI device configuration +# + +config NET_VENDOR_SGI + bool "SGI devices" + default y + depends on (PCI && SGI_IP27) || SGI_IP32 + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about SGI devices. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_SGI + +config SGI_IOC3_ETH + bool "SGI IOC3 Ethernet" + depends on PCI && SGI_IP27 + select CRC32 + select MII + ---help--- + If you have a network (Ethernet) card of this type, say Y and read + the Ethernet-HOWTO, available from + . + +config SGI_O2MACE_ETH + tristate "SGI O2 MACE Fast Ethernet support" + depends on SGI_IP32=y + +endif # NET_VENDOR_SGI diff --git a/drivers/net/ethernet/sgi/Makefile b/drivers/net/ethernet/sgi/Makefile new file mode 100644 index 000000000..e5bedd271 --- /dev/null +++ b/drivers/net/ethernet/sgi/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the SGI device drivers. +# + +obj-$(CONFIG_SGI_O2MACE_ETH) += meth.o +obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c new file mode 100644 index 000000000..7a254da85 --- /dev/null +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -0,0 +1,1672 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Driver for SGI's IOC3 based Ethernet cards as found in the PCI card. + * + * Copyright (C) 1999, 2000, 01, 03, 06 Ralf Baechle + * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc. + * + * References: + * o IOC3 ASIC specification 4.51, 1996-04-18 + * o IEEE 802.3 specification, 2000 edition + * o DP38840A Specification, National Semiconductor, March 1997 + * + * To do: + * + * o Handle allocation failures in ioc3_alloc_skb() more gracefully. + * o Handle allocation failures in ioc3_init_rings(). + * o Use prefetching for large packets. What is a good lower limit for + * prefetching? + * o We're probably allocating a bit too much memory. + * o Use hardware checksums. + * o Convert to using a IOC3 meta driver. + * o Which PHYs might possibly be attached to the IOC3 in real live, + * which workarounds are required for them? Do we ever have Lucent's? + * o For the 2.5 branch kill the mii-tool ioctls. + */ + +#define IOC3_NAME "ioc3-eth" +#define IOC3_VERSION "2.6.3-4" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SERIAL_8250 +#include +#include +#include +#endif + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* + * 64 RX buffers. This is tunable in the range of 16 <= x < 512. The + * value must be a power of two. + */ +#define RX_BUFFS 64 + +#define ETCSR_FD ((17<data); + if (offset) + skb_reserve(skb, offset); + } + + return skb; +} + +static inline unsigned long ioc3_map(void *ptr, unsigned long vdev) +{ +#ifdef CONFIG_SGI_IP27 + vdev <<= 57; /* Shift to PCI64_ATTR_VIRTUAL */ + + return vdev | (0xaUL << PCI64_ATTR_TARG_SHFT) | PCI64_ATTR_PREF | + ((unsigned long)ptr & TO_PHYS_MASK); +#else + return virt_to_bus(ptr); +#endif +} + +/* BEWARE: The IOC3 documentation documents the size of rx buffers as + 1644 while it's actually 1664. This one was nasty to track down ... */ +#define RX_OFFSET 10 +#define RX_BUF_ALLOC_SIZE (1664 + RX_OFFSET + IOC3_CACHELINE) + +/* DMA barrier to separate cached and uncached accesses. */ +#define BARRIER() \ + __asm__("sync" ::: "memory") + + +#define IOC3_SIZE 0x100000 + +/* + * IOC3 is a big endian device + * + * Unorthodox but makes the users of these macros more readable - the pointer + * to the IOC3's memory mapped registers is expected as struct ioc3 * ioc3 + * in the environment. + */ +#define ioc3_r_mcr() be32_to_cpu(ioc3->mcr) +#define ioc3_w_mcr(v) do { ioc3->mcr = cpu_to_be32(v); } while (0) +#define ioc3_w_gpcr_s(v) do { ioc3->gpcr_s = cpu_to_be32(v); } while (0) +#define ioc3_r_emcr() be32_to_cpu(ioc3->emcr) +#define ioc3_w_emcr(v) do { ioc3->emcr = cpu_to_be32(v); } while (0) +#define ioc3_r_eisr() be32_to_cpu(ioc3->eisr) +#define ioc3_w_eisr(v) do { ioc3->eisr = cpu_to_be32(v); } while (0) +#define ioc3_r_eier() be32_to_cpu(ioc3->eier) +#define ioc3_w_eier(v) do { ioc3->eier = cpu_to_be32(v); } while (0) +#define ioc3_r_ercsr() be32_to_cpu(ioc3->ercsr) +#define ioc3_w_ercsr(v) do { ioc3->ercsr = cpu_to_be32(v); } while (0) +#define ioc3_r_erbr_h() be32_to_cpu(ioc3->erbr_h) +#define ioc3_w_erbr_h(v) do { ioc3->erbr_h = cpu_to_be32(v); } while (0) +#define ioc3_r_erbr_l() be32_to_cpu(ioc3->erbr_l) +#define ioc3_w_erbr_l(v) do { ioc3->erbr_l = cpu_to_be32(v); } while (0) +#define ioc3_r_erbar() be32_to_cpu(ioc3->erbar) +#define ioc3_w_erbar(v) do { ioc3->erbar = cpu_to_be32(v); } while (0) +#define ioc3_r_ercir() be32_to_cpu(ioc3->ercir) +#define ioc3_w_ercir(v) do { ioc3->ercir = cpu_to_be32(v); } while (0) +#define ioc3_r_erpir() be32_to_cpu(ioc3->erpir) +#define ioc3_w_erpir(v) do { ioc3->erpir = cpu_to_be32(v); } while (0) +#define ioc3_r_ertr() be32_to_cpu(ioc3->ertr) +#define ioc3_w_ertr(v) do { ioc3->ertr = cpu_to_be32(v); } while (0) +#define ioc3_r_etcsr() be32_to_cpu(ioc3->etcsr) +#define ioc3_w_etcsr(v) do { ioc3->etcsr = cpu_to_be32(v); } while (0) +#define ioc3_r_ersr() be32_to_cpu(ioc3->ersr) +#define ioc3_w_ersr(v) do { ioc3->ersr = cpu_to_be32(v); } while (0) +#define ioc3_r_etcdc() be32_to_cpu(ioc3->etcdc) +#define ioc3_w_etcdc(v) do { ioc3->etcdc = cpu_to_be32(v); } while (0) +#define ioc3_r_ebir() be32_to_cpu(ioc3->ebir) +#define ioc3_w_ebir(v) do { ioc3->ebir = cpu_to_be32(v); } while (0) +#define ioc3_r_etbr_h() be32_to_cpu(ioc3->etbr_h) +#define ioc3_w_etbr_h(v) do { ioc3->etbr_h = cpu_to_be32(v); } while (0) +#define ioc3_r_etbr_l() be32_to_cpu(ioc3->etbr_l) +#define ioc3_w_etbr_l(v) do { ioc3->etbr_l = cpu_to_be32(v); } while (0) +#define ioc3_r_etcir() be32_to_cpu(ioc3->etcir) +#define ioc3_w_etcir(v) do { ioc3->etcir = cpu_to_be32(v); } while (0) +#define ioc3_r_etpir() be32_to_cpu(ioc3->etpir) +#define ioc3_w_etpir(v) do { ioc3->etpir = cpu_to_be32(v); } while (0) +#define ioc3_r_emar_h() be32_to_cpu(ioc3->emar_h) +#define ioc3_w_emar_h(v) do { ioc3->emar_h = cpu_to_be32(v); } while (0) +#define ioc3_r_emar_l() be32_to_cpu(ioc3->emar_l) +#define ioc3_w_emar_l(v) do { ioc3->emar_l = cpu_to_be32(v); } while (0) +#define ioc3_r_ehar_h() be32_to_cpu(ioc3->ehar_h) +#define ioc3_w_ehar_h(v) do { ioc3->ehar_h = cpu_to_be32(v); } while (0) +#define ioc3_r_ehar_l() be32_to_cpu(ioc3->ehar_l) +#define ioc3_w_ehar_l(v) do { ioc3->ehar_l = cpu_to_be32(v); } while (0) +#define ioc3_r_micr() be32_to_cpu(ioc3->micr) +#define ioc3_w_micr(v) do { ioc3->micr = cpu_to_be32(v); } while (0) +#define ioc3_r_midr_r() be32_to_cpu(ioc3->midr_r) +#define ioc3_w_midr_r(v) do { ioc3->midr_r = cpu_to_be32(v); } while (0) +#define ioc3_r_midr_w() be32_to_cpu(ioc3->midr_w) +#define ioc3_w_midr_w(v) do { ioc3->midr_w = cpu_to_be32(v); } while (0) + +static inline u32 mcr_pack(u32 pulse, u32 sample) +{ + return (pulse << 10) | (sample << 2); +} + +static int nic_wait(struct ioc3 *ioc3) +{ + u32 mcr; + + do { + mcr = ioc3_r_mcr(); + } while (!(mcr & 2)); + + return mcr & 1; +} + +static int nic_reset(struct ioc3 *ioc3) +{ + int presence; + + ioc3_w_mcr(mcr_pack(500, 65)); + presence = nic_wait(ioc3); + + ioc3_w_mcr(mcr_pack(0, 500)); + nic_wait(ioc3); + + return presence; +} + +static inline int nic_read_bit(struct ioc3 *ioc3) +{ + int result; + + ioc3_w_mcr(mcr_pack(6, 13)); + result = nic_wait(ioc3); + ioc3_w_mcr(mcr_pack(0, 100)); + nic_wait(ioc3); + + return result; +} + +static inline void nic_write_bit(struct ioc3 *ioc3, int bit) +{ + if (bit) + ioc3_w_mcr(mcr_pack(6, 110)); + else + ioc3_w_mcr(mcr_pack(80, 30)); + + nic_wait(ioc3); +} + +/* + * Read a byte from an iButton device + */ +static u32 nic_read_byte(struct ioc3 *ioc3) +{ + u32 result = 0; + int i; + + for (i = 0; i < 8; i++) + result = (result >> 1) | (nic_read_bit(ioc3) << 7); + + return result; +} + +/* + * Write a byte to an iButton device + */ +static void nic_write_byte(struct ioc3 *ioc3, int byte) +{ + int i, bit; + + for (i = 8; i; i--) { + bit = byte & 1; + byte >>= 1; + + nic_write_bit(ioc3, bit); + } +} + +static u64 nic_find(struct ioc3 *ioc3, int *last) +{ + int a, b, index, disc; + u64 address = 0; + + nic_reset(ioc3); + /* Search ROM. */ + nic_write_byte(ioc3, 0xf0); + + /* Algorithm from ``Book of iButton Standards''. */ + for (index = 0, disc = 0; index < 64; index++) { + a = nic_read_bit(ioc3); + b = nic_read_bit(ioc3); + + if (a && b) { + printk("NIC search failed (not fatal).\n"); + *last = 0; + return 0; + } + + if (!a && !b) { + if (index == *last) { + address |= 1UL << index; + } else if (index > *last) { + address &= ~(1UL << index); + disc = index; + } else if ((address & (1UL << index)) == 0) + disc = index; + nic_write_bit(ioc3, address & (1UL << index)); + continue; + } else { + if (a) + address |= 1UL << index; + else + address &= ~(1UL << index); + nic_write_bit(ioc3, a); + continue; + } + } + + *last = disc; + + return address; +} + +static int nic_init(struct ioc3 *ioc3) +{ + const char *unknown = "unknown"; + const char *type = unknown; + u8 crc; + u8 serial[6]; + int save = 0, i; + + while (1) { + u64 reg; + reg = nic_find(ioc3, &save); + + switch (reg & 0xff) { + case 0x91: + type = "DS1981U"; + break; + default: + if (save == 0) { + /* Let the caller try again. */ + return -1; + } + continue; + } + + nic_reset(ioc3); + + /* Match ROM. */ + nic_write_byte(ioc3, 0x55); + for (i = 0; i < 8; i++) + nic_write_byte(ioc3, (reg >> (i << 3)) & 0xff); + + reg >>= 8; /* Shift out type. */ + for (i = 0; i < 6; i++) { + serial[i] = reg & 0xff; + reg >>= 8; + } + crc = reg & 0xff; + break; + } + + printk("Found %s NIC", type); + if (type != unknown) + printk (" registration number %pM, CRC %02x", serial, crc); + printk(".\n"); + + return 0; +} + +/* + * Read the NIC (Number-In-a-Can) device used to store the MAC address on + * SN0 / SN00 nodeboards and PCI cards. + */ +static void ioc3_get_eaddr_nic(struct ioc3_private *ip) +{ + struct ioc3 *ioc3 = ip->regs; + u8 nic[14]; + int tries = 2; /* There may be some problem with the battery? */ + int i; + + ioc3_w_gpcr_s(1 << 21); + + while (tries--) { + if (!nic_init(ioc3)) + break; + udelay(500); + } + + if (tries < 0) { + printk("Failed to read MAC address\n"); + return; + } + + /* Read Memory. */ + nic_write_byte(ioc3, 0xf0); + nic_write_byte(ioc3, 0x00); + nic_write_byte(ioc3, 0x00); + + for (i = 13; i >= 0; i--) + nic[i] = nic_read_byte(ioc3); + + for (i = 2; i < 8; i++) + priv_netdev(ip)->dev_addr[i - 2] = nic[i]; +} + +/* + * Ok, this is hosed by design. It's necessary to know what machine the + * NIC is in in order to know how to read the NIC address. We also have + * to know if it's a PCI card or a NIC in on the node board ... + */ +static void ioc3_get_eaddr(struct ioc3_private *ip) +{ + ioc3_get_eaddr_nic(ip); + + printk("Ethernet address is %pM.\n", priv_netdev(ip)->dev_addr); +} + +static void __ioc3_set_mac_address(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + struct ioc3 *ioc3 = ip->regs; + + ioc3_w_emar_h((dev->dev_addr[5] << 8) | dev->dev_addr[4]); + ioc3_w_emar_l((dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | + (dev->dev_addr[1] << 8) | dev->dev_addr[0]); +} + +static int ioc3_set_mac_address(struct net_device *dev, void *addr) +{ + struct ioc3_private *ip = netdev_priv(dev); + struct sockaddr *sa = addr; + + memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); + + spin_lock_irq(&ip->ioc3_lock); + __ioc3_set_mac_address(dev); + spin_unlock_irq(&ip->ioc3_lock); + + return 0; +} + +/* + * Caller must hold the ioc3_lock ever for MII readers. This is also + * used to protect the transmitter side but it's low contention. + */ +static int ioc3_mdio_read(struct net_device *dev, int phy, int reg) +{ + struct ioc3_private *ip = netdev_priv(dev); + struct ioc3 *ioc3 = ip->regs; + + while (ioc3_r_micr() & MICR_BUSY); + ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG); + while (ioc3_r_micr() & MICR_BUSY); + + return ioc3_r_midr_r() & MIDR_DATA_MASK; +} + +static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data) +{ + struct ioc3_private *ip = netdev_priv(dev); + struct ioc3 *ioc3 = ip->regs; + + while (ioc3_r_micr() & MICR_BUSY); + ioc3_w_midr_w(data); + ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg); + while (ioc3_r_micr() & MICR_BUSY); +} + +static int ioc3_mii_init(struct ioc3_private *ip); + +static struct net_device_stats *ioc3_get_stats(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + struct ioc3 *ioc3 = ip->regs; + + dev->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK); + return &dev->stats; +} + +static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len) +{ + struct ethhdr *eh = eth_hdr(skb); + uint32_t csum, ehsum; + unsigned int proto; + struct iphdr *ih; + uint16_t *ew; + unsigned char *cp; + + /* + * Did hardware handle the checksum at all? The cases we can handle + * are: + * + * - TCP and UDP checksums of IPv4 only. + * - IPv6 would be doable but we keep that for later ... + * - Only unfragmented packets. Did somebody already tell you + * fragmentation is evil? + * - don't care about packet size. Worst case when processing a + * malformed packet we'll try to access the packet at ip header + + * 64 bytes which is still inside the skb. Even in the unlikely + * case where the checksum is right the higher layers will still + * drop the packet as appropriate. + */ + if (eh->h_proto != htons(ETH_P_IP)) + return; + + ih = (struct iphdr *) ((char *)eh + ETH_HLEN); + if (ip_is_fragment(ih)) + return; + + proto = ih->protocol; + if (proto != IPPROTO_TCP && proto != IPPROTO_UDP) + return; + + /* Same as tx - compute csum of pseudo header */ + csum = hwsum + + (ih->tot_len - (ih->ihl << 2)) + + htons((uint16_t)ih->protocol) + + (ih->saddr >> 16) + (ih->saddr & 0xffff) + + (ih->daddr >> 16) + (ih->daddr & 0xffff); + + /* Sum up ethernet dest addr, src addr and protocol */ + ew = (uint16_t *) eh; + ehsum = ew[0] + ew[1] + ew[2] + ew[3] + ew[4] + ew[5] + ew[6]; + + ehsum = (ehsum & 0xffff) + (ehsum >> 16); + ehsum = (ehsum & 0xffff) + (ehsum >> 16); + + csum += 0xffff ^ ehsum; + + /* In the next step we also subtract the 1's complement + checksum of the trailing ethernet CRC. */ + cp = (char *)eh + len; /* points at trailing CRC */ + if (len & 1) { + csum += 0xffff ^ (uint16_t) ((cp[1] << 8) | cp[0]); + csum += 0xffff ^ (uint16_t) ((cp[3] << 8) | cp[2]); + } else { + csum += 0xffff ^ (uint16_t) ((cp[0] << 8) | cp[1]); + csum += 0xffff ^ (uint16_t) ((cp[2] << 8) | cp[3]); + } + + csum = (csum & 0xffff) + (csum >> 16); + csum = (csum & 0xffff) + (csum >> 16); + + if (csum == 0xffff) + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +static inline void ioc3_rx(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + struct sk_buff *skb, *new_skb; + struct ioc3 *ioc3 = ip->regs; + int rx_entry, n_entry, len; + struct ioc3_erxbuf *rxb; + unsigned long *rxr; + u32 w0, err; + + rxr = ip->rxr; /* Ring base */ + rx_entry = ip->rx_ci; /* RX consume index */ + n_entry = ip->rx_pi; + + skb = ip->rx_skbs[rx_entry]; + rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET); + w0 = be32_to_cpu(rxb->w0); + + while (w0 & ERXBUF_V) { + err = be32_to_cpu(rxb->err); /* It's valid ... */ + if (err & ERXBUF_GOODPKT) { + len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4; + skb_trim(skb, len); + skb->protocol = eth_type_trans(skb, dev); + + new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); + if (!new_skb) { + /* Ouch, drop packet and just recycle packet + to keep the ring filled. */ + dev->stats.rx_dropped++; + new_skb = skb; + goto next; + } + + if (likely(dev->features & NETIF_F_RXCSUM)) + ioc3_tcpudp_checksum(skb, + w0 & ERXBUF_IPCKSUM_MASK, len); + + netif_rx(skb); + + ip->rx_skbs[rx_entry] = NULL; /* Poison */ + + /* Because we reserve afterwards. */ + skb_put(new_skb, (1664 + RX_OFFSET)); + rxb = (struct ioc3_erxbuf *) new_skb->data; + skb_reserve(new_skb, RX_OFFSET); + + dev->stats.rx_packets++; /* Statistics */ + dev->stats.rx_bytes += len; + } else { + /* The frame is invalid and the skb never + reached the network layer so we can just + recycle it. */ + new_skb = skb; + dev->stats.rx_errors++; + } + if (err & ERXBUF_CRCERR) /* Statistics */ + dev->stats.rx_crc_errors++; + if (err & ERXBUF_FRAMERR) + dev->stats.rx_frame_errors++; +next: + ip->rx_skbs[n_entry] = new_skb; + rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1)); + rxb->w0 = 0; /* Clear valid flag */ + n_entry = (n_entry + 1) & 511; /* Update erpir */ + + /* Now go on to the next ring entry. */ + rx_entry = (rx_entry + 1) & 511; + skb = ip->rx_skbs[rx_entry]; + rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET); + w0 = be32_to_cpu(rxb->w0); + } + ioc3_w_erpir((n_entry << 3) | ERPIR_ARM); + ip->rx_pi = n_entry; + ip->rx_ci = rx_entry; +} + +static inline void ioc3_tx(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + unsigned long packets, bytes; + struct ioc3 *ioc3 = ip->regs; + int tx_entry, o_entry; + struct sk_buff *skb; + u32 etcir; + + spin_lock(&ip->ioc3_lock); + etcir = ioc3_r_etcir(); + + tx_entry = (etcir >> 7) & 127; + o_entry = ip->tx_ci; + packets = 0; + bytes = 0; + + while (o_entry != tx_entry) { + packets++; + skb = ip->tx_skbs[o_entry]; + bytes += skb->len; + dev_kfree_skb_irq(skb); + ip->tx_skbs[o_entry] = NULL; + + o_entry = (o_entry + 1) & 127; /* Next */ + + etcir = ioc3_r_etcir(); /* More pkts sent? */ + tx_entry = (etcir >> 7) & 127; + } + + dev->stats.tx_packets += packets; + dev->stats.tx_bytes += bytes; + ip->txqlen -= packets; + + if (ip->txqlen < 128) + netif_wake_queue(dev); + + ip->tx_ci = o_entry; + spin_unlock(&ip->ioc3_lock); +} + +/* + * Deal with fatal IOC3 errors. This condition might be caused by a hard or + * software problems, so we should try to recover + * more gracefully if this ever happens. In theory we might be flooded + * with such error interrupts if something really goes wrong, so we might + * also consider to take the interface down. + */ +static void ioc3_error(struct net_device *dev, u32 eisr) +{ + struct ioc3_private *ip = netdev_priv(dev); + unsigned char *iface = dev->name; + + spin_lock(&ip->ioc3_lock); + + if (eisr & EISR_RXOFLO) + printk(KERN_ERR "%s: RX overflow.\n", iface); + if (eisr & EISR_RXBUFOFLO) + printk(KERN_ERR "%s: RX buffer overflow.\n", iface); + if (eisr & EISR_RXMEMERR) + printk(KERN_ERR "%s: RX PCI error.\n", iface); + if (eisr & EISR_RXPARERR) + printk(KERN_ERR "%s: RX SSRAM parity error.\n", iface); + if (eisr & EISR_TXBUFUFLO) + printk(KERN_ERR "%s: TX buffer underflow.\n", iface); + if (eisr & EISR_TXMEMERR) + printk(KERN_ERR "%s: TX PCI error.\n", iface); + + ioc3_stop(ip); + ioc3_init(dev); + ioc3_mii_init(ip); + + netif_wake_queue(dev); + + spin_unlock(&ip->ioc3_lock); +} + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ +static irqreturn_t ioc3_interrupt(int irq, void *_dev) +{ + struct net_device *dev = (struct net_device *)_dev; + struct ioc3_private *ip = netdev_priv(dev); + struct ioc3 *ioc3 = ip->regs; + const u32 enabled = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO | + EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO | + EISR_TXEXPLICIT | EISR_TXMEMERR; + u32 eisr; + + eisr = ioc3_r_eisr() & enabled; + + ioc3_w_eisr(eisr); + (void) ioc3_r_eisr(); /* Flush */ + + if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR | + EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR)) + ioc3_error(dev, eisr); + if (eisr & EISR_RXTIMERINT) + ioc3_rx(dev); + if (eisr & EISR_TXEXPLICIT) + ioc3_tx(dev); + + return IRQ_HANDLED; +} + +static inline void ioc3_setup_duplex(struct ioc3_private *ip) +{ + struct ioc3 *ioc3 = ip->regs; + + if (ip->mii.full_duplex) { + ioc3_w_etcsr(ETCSR_FD); + ip->emcr |= EMCR_DUPLEX; + } else { + ioc3_w_etcsr(ETCSR_HD); + ip->emcr &= ~EMCR_DUPLEX; + } + ioc3_w_emcr(ip->emcr); +} + +static void ioc3_timer(unsigned long data) +{ + struct ioc3_private *ip = (struct ioc3_private *) data; + + /* Print the link status if it has changed */ + mii_check_media(&ip->mii, 1, 0); + ioc3_setup_duplex(ip); + + ip->ioc3_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2s */ + add_timer(&ip->ioc3_timer); +} + +/* + * Try to find a PHY. There is no apparent relation between the MII addresses + * in the SGI documentation and what we find in reality, so we simply probe + * for the PHY. It seems IOC3 PHYs usually live on address 31. One of my + * onboard IOC3s has the special oddity that probing doesn't seem to find it + * yet the interface seems to work fine, so if probing fails we for now will + * simply default to PHY 31 instead of bailing out. + */ +static int ioc3_mii_init(struct ioc3_private *ip) +{ + struct net_device *dev = priv_netdev(ip); + int i, found = 0, res = 0; + int ioc3_phy_workaround = 1; + u16 word; + + for (i = 0; i < 32; i++) { + word = ioc3_mdio_read(dev, i, MII_PHYSID1); + + if (word != 0xffff && word != 0x0000) { + found = 1; + break; /* Found a PHY */ + } + } + + if (!found) { + if (ioc3_phy_workaround) + i = 31; + else { + ip->mii.phy_id = -1; + res = -ENODEV; + goto out; + } + } + + ip->mii.phy_id = i; + +out: + return res; +} + +static void ioc3_mii_start(struct ioc3_private *ip) +{ + ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */ + ip->ioc3_timer.data = (unsigned long) ip; + ip->ioc3_timer.function = ioc3_timer; + add_timer(&ip->ioc3_timer); +} + +static inline void ioc3_clean_rx_ring(struct ioc3_private *ip) +{ + struct sk_buff *skb; + int i; + + for (i = ip->rx_ci; i & 15; i++) { + ip->rx_skbs[ip->rx_pi] = ip->rx_skbs[ip->rx_ci]; + ip->rxr[ip->rx_pi++] = ip->rxr[ip->rx_ci++]; + } + ip->rx_pi &= 511; + ip->rx_ci &= 511; + + for (i = ip->rx_ci; i != ip->rx_pi; i = (i+1) & 511) { + struct ioc3_erxbuf *rxb; + skb = ip->rx_skbs[i]; + rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET); + rxb->w0 = 0; + } +} + +static inline void ioc3_clean_tx_ring(struct ioc3_private *ip) +{ + struct sk_buff *skb; + int i; + + for (i=0; i < 128; i++) { + skb = ip->tx_skbs[i]; + if (skb) { + ip->tx_skbs[i] = NULL; + dev_kfree_skb_any(skb); + } + ip->txr[i].cmd = 0; + } + ip->tx_pi = 0; + ip->tx_ci = 0; +} + +static void ioc3_free_rings(struct ioc3_private *ip) +{ + struct sk_buff *skb; + int rx_entry, n_entry; + + if (ip->txr) { + ioc3_clean_tx_ring(ip); + free_pages((unsigned long)ip->txr, 2); + ip->txr = NULL; + } + + if (ip->rxr) { + n_entry = ip->rx_ci; + rx_entry = ip->rx_pi; + + while (n_entry != rx_entry) { + skb = ip->rx_skbs[n_entry]; + if (skb) + dev_kfree_skb_any(skb); + + n_entry = (n_entry + 1) & 511; + } + free_page((unsigned long)ip->rxr); + ip->rxr = NULL; + } +} + +static void ioc3_alloc_rings(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + struct ioc3_erxbuf *rxb; + unsigned long *rxr; + int i; + + if (ip->rxr == NULL) { + /* Allocate and initialize rx ring. 4kb = 512 entries */ + ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC); + rxr = ip->rxr; + if (!rxr) + printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n"); + + /* Now the rx buffers. The RX ring may be larger but + we only allocate 16 buffers for now. Need to tune + this for performance and memory later. */ + for (i = 0; i < RX_BUFFS; i++) { + struct sk_buff *skb; + + skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); + if (!skb) { + show_free_areas(0); + continue; + } + + ip->rx_skbs[i] = skb; + + /* Because we reserve afterwards. */ + skb_put(skb, (1664 + RX_OFFSET)); + rxb = (struct ioc3_erxbuf *) skb->data; + rxr[i] = cpu_to_be64(ioc3_map(rxb, 1)); + skb_reserve(skb, RX_OFFSET); + } + ip->rx_ci = 0; + ip->rx_pi = RX_BUFFS; + } + + if (ip->txr == NULL) { + /* Allocate and initialize tx rings. 16kb = 128 bufs. */ + ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2); + if (!ip->txr) + printk("ioc3_alloc_rings(): __get_free_pages() failed!\n"); + ip->tx_pi = 0; + ip->tx_ci = 0; + } +} + +static void ioc3_init_rings(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + struct ioc3 *ioc3 = ip->regs; + unsigned long ring; + + ioc3_free_rings(ip); + ioc3_alloc_rings(dev); + + ioc3_clean_rx_ring(ip); + ioc3_clean_tx_ring(ip); + + /* Now the rx ring base, consume & produce registers. */ + ring = ioc3_map(ip->rxr, 0); + ioc3_w_erbr_h(ring >> 32); + ioc3_w_erbr_l(ring & 0xffffffff); + ioc3_w_ercir(ip->rx_ci << 3); + ioc3_w_erpir((ip->rx_pi << 3) | ERPIR_ARM); + + ring = ioc3_map(ip->txr, 0); + + ip->txqlen = 0; /* nothing queued */ + + /* Now the tx ring base, consume & produce registers. */ + ioc3_w_etbr_h(ring >> 32); + ioc3_w_etbr_l(ring & 0xffffffff); + ioc3_w_etpir(ip->tx_pi << 7); + ioc3_w_etcir(ip->tx_ci << 7); + (void) ioc3_r_etcir(); /* Flush */ +} + +static inline void ioc3_ssram_disc(struct ioc3_private *ip) +{ + struct ioc3 *ioc3 = ip->regs; + volatile u32 *ssram0 = &ioc3->ssram[0x0000]; + volatile u32 *ssram1 = &ioc3->ssram[0x4000]; + unsigned int pattern = 0x5555; + + /* Assume the larger size SSRAM and enable parity checking */ + ioc3_w_emcr(ioc3_r_emcr() | (EMCR_BUFSIZ | EMCR_RAMPAR)); + + *ssram0 = pattern; + *ssram1 = ~pattern & IOC3_SSRAM_DM; + + if ((*ssram0 & IOC3_SSRAM_DM) != pattern || + (*ssram1 & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) { + /* set ssram size to 64 KB */ + ip->emcr = EMCR_RAMPAR; + ioc3_w_emcr(ioc3_r_emcr() & ~EMCR_BUFSIZ); + } else + ip->emcr = EMCR_BUFSIZ | EMCR_RAMPAR; +} + +static void ioc3_init(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + struct ioc3 *ioc3 = ip->regs; + + del_timer_sync(&ip->ioc3_timer); /* Kill if running */ + + ioc3_w_emcr(EMCR_RST); /* Reset */ + (void) ioc3_r_emcr(); /* Flush WB */ + udelay(4); /* Give it time ... */ + ioc3_w_emcr(0); + (void) ioc3_r_emcr(); + + /* Misc registers */ +#ifdef CONFIG_SGI_IP27 + ioc3_w_erbar(PCI64_ATTR_BAR >> 32); /* Barrier on last store */ +#else + ioc3_w_erbar(0); /* Let PCI API get it right */ +#endif + (void) ioc3_r_etcdc(); /* Clear on read */ + ioc3_w_ercsr(15); /* RX low watermark */ + ioc3_w_ertr(0); /* Interrupt immediately */ + __ioc3_set_mac_address(dev); + ioc3_w_ehar_h(ip->ehar_h); + ioc3_w_ehar_l(ip->ehar_l); + ioc3_w_ersr(42); /* XXX should be random */ + + ioc3_init_rings(dev); + + ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN | + EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN; + ioc3_w_emcr(ip->emcr); + ioc3_w_eier(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO | + EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO | + EISR_TXEXPLICIT | EISR_TXMEMERR); + (void) ioc3_r_eier(); +} + +static inline void ioc3_stop(struct ioc3_private *ip) +{ + struct ioc3 *ioc3 = ip->regs; + + ioc3_w_emcr(0); /* Shutup */ + ioc3_w_eier(0); /* Disable interrupts */ + (void) ioc3_r_eier(); /* Flush */ +} + +static int ioc3_open(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + + if (request_irq(dev->irq, ioc3_interrupt, IRQF_SHARED, ioc3_str, dev)) { + printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq); + + return -EAGAIN; + } + + ip->ehar_h = 0; + ip->ehar_l = 0; + ioc3_init(dev); + ioc3_mii_start(ip); + + netif_start_queue(dev); + return 0; +} + +static int ioc3_close(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + + del_timer_sync(&ip->ioc3_timer); + + netif_stop_queue(dev); + + ioc3_stop(ip); + free_irq(dev->irq, dev); + + ioc3_free_rings(ip); + return 0; +} + +/* + * MENET cards have four IOC3 chips, which are attached to two sets of + * PCI slot resources each: the primary connections are on slots + * 0..3 and the secondaries are on 4..7 + * + * All four ethernets are brought out to connectors; six serial ports + * (a pair from each of the first three IOC3s) are brought out to + * MiniDINs; all other subdevices are left swinging in the wind, leave + * them disabled. + */ + +static int ioc3_adjacent_is_ioc3(struct pci_dev *pdev, int slot) +{ + struct pci_dev *dev = pci_get_slot(pdev->bus, PCI_DEVFN(slot, 0)); + int ret = 0; + + if (dev) { + if (dev->vendor == PCI_VENDOR_ID_SGI && + dev->device == PCI_DEVICE_ID_SGI_IOC3) + ret = 1; + pci_dev_put(dev); + } + + return ret; +} + +static int ioc3_is_menet(struct pci_dev *pdev) +{ + return pdev->bus->parent == NULL && + ioc3_adjacent_is_ioc3(pdev, 0) && + ioc3_adjacent_is_ioc3(pdev, 1) && + ioc3_adjacent_is_ioc3(pdev, 2); +} + +#ifdef CONFIG_SERIAL_8250 +/* + * Note about serial ports and consoles: + * For console output, everyone uses the IOC3 UARTA (offset 0x178) + * connected to the master node (look in ip27_setup_console() and + * ip27prom_console_write()). + * + * For serial (/dev/ttyS0 etc), we can not have hardcoded serial port + * addresses on a partitioned machine. Since we currently use the ioc3 + * serial ports, we use dynamic serial port discovery that the serial.c + * driver uses for pci/pnp ports (there is an entry for the SGI ioc3 + * boards in pci_boards[]). Unfortunately, UARTA's pio address is greater + * than UARTB's, although UARTA on o200s has traditionally been known as + * port 0. So, we just use one serial port from each ioc3 (since the + * serial driver adds addresses to get to higher ports). + * + * The first one to do a register_console becomes the preferred console + * (if there is no kernel command line console= directive). /dev/console + * (ie 5, 1) is then "aliased" into the device number returned by the + * "device" routine referred to in this console structure + * (ip27prom_console_dev). + * + * Also look in ip27-pci.c:pci_fixup_ioc3() for some comments on working + * around ioc3 oddities in this respect. + * + * The IOC3 serials use a 22MHz clock rate with an additional divider which + * can be programmed in the SCR register if the DLAB bit is set. + * + * Register to interrupt zero because we share the interrupt with + * the serial driver which we don't properly support yet. + * + * Can't use UPF_IOREMAP as the whole of IOC3 resources have already been + * registered. + */ +static void ioc3_8250_register(struct ioc3_uartregs __iomem *uart) +{ +#define COSMISC_CONSTANT 6 + + struct uart_8250_port port = { + .port = { + .irq = 0, + .flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF, + .iotype = UPIO_MEM, + .regshift = 0, + .uartclk = (22000000 << 1) / COSMISC_CONSTANT, + + .membase = (unsigned char __iomem *) uart, + .mapbase = (unsigned long) uart, + } + }; + unsigned char lcr; + + lcr = uart->iu_lcr; + uart->iu_lcr = lcr | UART_LCR_DLAB; + uart->iu_scr = COSMISC_CONSTANT, + uart->iu_lcr = lcr; + uart->iu_lcr; + serial8250_register_8250_port(&port); +} + +static void ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3) +{ + /* + * We need to recognice and treat the fourth MENET serial as it + * does not have an SuperIO chip attached to it, therefore attempting + * to access it will result in bus errors. We call something an + * MENET if PCI slot 0, 1, 2 and 3 of a master PCI bus all have an IOC3 + * in it. This is paranoid but we want to avoid blowing up on a + * showhorn PCI box that happens to have 4 IOC3 cards in it so it's + * not paranoid enough ... + */ + if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3) + return; + + /* + * Switch IOC3 to PIO mode. It probably already was but let's be + * paranoid + */ + ioc3->gpcr_s = GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL; + ioc3->gpcr_s; + ioc3->gppr_6 = 0; + ioc3->gppr_6; + ioc3->gppr_7 = 0; + ioc3->gppr_7; + ioc3->sscr_a = ioc3->sscr_a & ~SSCR_DMA_EN; + ioc3->sscr_a; + ioc3->sscr_b = ioc3->sscr_b & ~SSCR_DMA_EN; + ioc3->sscr_b; + /* Disable all SA/B interrupts except for SA/B_INT in SIO_IEC. */ + ioc3->sio_iec &= ~ (SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL | + SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER | + SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS | + SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR); + ioc3->sio_iec |= SIO_IR_SA_INT; + ioc3->sscr_a = 0; + ioc3->sio_iec &= ~ (SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL | + SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER | + SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS | + SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR); + ioc3->sio_iec |= SIO_IR_SB_INT; + ioc3->sscr_b = 0; + + ioc3_8250_register(&ioc3->sregs.uarta); + ioc3_8250_register(&ioc3->sregs.uartb); +} +#endif + +static const struct net_device_ops ioc3_netdev_ops = { + .ndo_open = ioc3_open, + .ndo_stop = ioc3_close, + .ndo_start_xmit = ioc3_start_xmit, + .ndo_tx_timeout = ioc3_timeout, + .ndo_get_stats = ioc3_get_stats, + .ndo_set_rx_mode = ioc3_set_multicast_list, + .ndo_do_ioctl = ioc3_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ioc3_set_mac_address, + .ndo_change_mtu = eth_change_mtu, +}; + +static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + unsigned int sw_physid1, sw_physid2; + struct net_device *dev = NULL; + struct ioc3_private *ip; + struct ioc3 *ioc3; + unsigned long ioc3_base, ioc3_size; + u32 vendor, model, rev; + int err, pci_using_dac; + + /* Configure DMA attributes. */ + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (!err) { + pci_using_dac = 1; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err < 0) { + printk(KERN_ERR "%s: Unable to obtain 64 bit DMA " + "for consistent allocations\n", pci_name(pdev)); + goto out; + } + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + printk(KERN_ERR "%s: No usable DMA configuration, " + "aborting.\n", pci_name(pdev)); + goto out; + } + pci_using_dac = 0; + } + + if (pci_enable_device(pdev)) + return -ENODEV; + + dev = alloc_etherdev(sizeof(struct ioc3_private)); + if (!dev) { + err = -ENOMEM; + goto out_disable; + } + + if (pci_using_dac) + dev->features |= NETIF_F_HIGHDMA; + + err = pci_request_regions(pdev, "ioc3"); + if (err) + goto out_free; + + SET_NETDEV_DEV(dev, &pdev->dev); + + ip = netdev_priv(dev); + + dev->irq = pdev->irq; + + ioc3_base = pci_resource_start(pdev, 0); + ioc3_size = pci_resource_len(pdev, 0); + ioc3 = (struct ioc3 *) ioremap(ioc3_base, ioc3_size); + if (!ioc3) { + printk(KERN_CRIT "ioc3eth(%s): ioremap failed, goodbye.\n", + pci_name(pdev)); + err = -ENOMEM; + goto out_res; + } + ip->regs = ioc3; + +#ifdef CONFIG_SERIAL_8250 + ioc3_serial_probe(pdev, ioc3); +#endif + + spin_lock_init(&ip->ioc3_lock); + init_timer(&ip->ioc3_timer); + + ioc3_stop(ip); + ioc3_init(dev); + + ip->pdev = pdev; + + ip->mii.phy_id_mask = 0x1f; + ip->mii.reg_num_mask = 0x1f; + ip->mii.dev = dev; + ip->mii.mdio_read = ioc3_mdio_read; + ip->mii.mdio_write = ioc3_mdio_write; + + ioc3_mii_init(ip); + + if (ip->mii.phy_id == -1) { + printk(KERN_CRIT "ioc3-eth(%s): Didn't find a PHY, goodbye.\n", + pci_name(pdev)); + err = -ENODEV; + goto out_stop; + } + + ioc3_mii_start(ip); + ioc3_ssram_disc(ip); + ioc3_get_eaddr(ip); + + /* The IOC3-specific entries in the device structure. */ + dev->watchdog_timeo = 5 * HZ; + dev->netdev_ops = &ioc3_netdev_ops; + dev->ethtool_ops = &ioc3_ethtool_ops; + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM; + dev->features = NETIF_F_IP_CSUM; + + sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1); + sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2); + + err = register_netdev(dev); + if (err) + goto out_stop; + + mii_check_media(&ip->mii, 1, 1); + ioc3_setup_duplex(ip); + + vendor = (sw_physid1 << 12) | (sw_physid2 >> 4); + model = (sw_physid2 >> 4) & 0x3f; + rev = sw_physid2 & 0xf; + printk(KERN_INFO "%s: Using PHY %d, vendor 0x%x, model %d, " + "rev %d.\n", dev->name, ip->mii.phy_id, vendor, model, rev); + printk(KERN_INFO "%s: IOC3 SSRAM has %d kbyte.\n", dev->name, + ip->emcr & EMCR_BUFSIZ ? 128 : 64); + + return 0; + +out_stop: + ioc3_stop(ip); + del_timer_sync(&ip->ioc3_timer); + ioc3_free_rings(ip); +out_res: + pci_release_regions(pdev); +out_free: + free_netdev(dev); +out_disable: + /* + * We should call pci_disable_device(pdev); here if the IOC3 wasn't + * such a weird device ... + */ +out: + return err; +} + +static void ioc3_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct ioc3_private *ip = netdev_priv(dev); + struct ioc3 *ioc3 = ip->regs; + + unregister_netdev(dev); + del_timer_sync(&ip->ioc3_timer); + + iounmap(ioc3); + pci_release_regions(pdev); + free_netdev(dev); + /* + * We should call pci_disable_device(pdev); here if the IOC3 wasn't + * such a weird device ... + */ +} + +static const struct pci_device_id ioc3_pci_tbl[] = { + { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, ioc3_pci_tbl); + +static struct pci_driver ioc3_driver = { + .name = "ioc3-eth", + .id_table = ioc3_pci_tbl, + .probe = ioc3_probe, + .remove = ioc3_remove_one, +}; + +static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + unsigned long data; + struct ioc3_private *ip = netdev_priv(dev); + struct ioc3 *ioc3 = ip->regs; + unsigned int len; + struct ioc3_etxd *desc; + uint32_t w0 = 0; + int produce; + + /* + * IOC3 has a fairly simple minded checksumming hardware which simply + * adds up the 1's complement checksum for the entire packet and + * inserts it at an offset which can be specified in the descriptor + * into the transmit packet. This means we have to compensate for the + * MAC header which should not be summed and the TCP/UDP pseudo headers + * manually. + */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ih = ip_hdr(skb); + const int proto = ntohs(ih->protocol); + unsigned int csoff; + uint32_t csum, ehsum; + uint16_t *eh; + + /* The MAC header. skb->mac seem the logic approach + to find the MAC header - except it's a NULL pointer ... */ + eh = (uint16_t *) skb->data; + + /* Sum up dest addr, src addr and protocol */ + ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6]; + + /* Fold ehsum. can't use csum_fold which negates also ... */ + ehsum = (ehsum & 0xffff) + (ehsum >> 16); + ehsum = (ehsum & 0xffff) + (ehsum >> 16); + + /* Skip IP header; it's sum is always zero and was + already filled in by ip_output.c */ + csum = csum_tcpudp_nofold(ih->saddr, ih->daddr, + ih->tot_len - (ih->ihl << 2), + proto, 0xffff ^ ehsum); + + csum = (csum & 0xffff) + (csum >> 16); /* Fold again */ + csum = (csum & 0xffff) + (csum >> 16); + + csoff = ETH_HLEN + (ih->ihl << 2); + if (proto == IPPROTO_UDP) { + csoff += offsetof(struct udphdr, check); + udp_hdr(skb)->check = csum; + } + if (proto == IPPROTO_TCP) { + csoff += offsetof(struct tcphdr, check); + tcp_hdr(skb)->check = csum; + } + + w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT); + } + + spin_lock_irq(&ip->ioc3_lock); + + data = (unsigned long) skb->data; + len = skb->len; + + produce = ip->tx_pi; + desc = &ip->txr[produce]; + + if (len <= 104) { + /* Short packet, let's copy it directly into the ring. */ + skb_copy_from_linear_data(skb, desc->data, skb->len); + if (len < ETH_ZLEN) { + /* Very short packet, pad with zeros at the end. */ + memset(desc->data + len, 0, ETH_ZLEN - len); + len = ETH_ZLEN; + } + desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_D0V | w0); + desc->bufcnt = cpu_to_be32(len); + } else if ((data ^ (data + len - 1)) & 0x4000) { + unsigned long b2 = (data | 0x3fffUL) + 1UL; + unsigned long s1 = b2 - data; + unsigned long s2 = data + len - b2; + + desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | + ETXD_B1V | ETXD_B2V | w0); + desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) | + (s2 << ETXD_B2CNT_SHIFT)); + desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1)); + desc->p2 = cpu_to_be64(ioc3_map((void *) b2, 1)); + } else { + /* Normal sized packet that doesn't cross a page boundary. */ + desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0); + desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT); + desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1)); + } + + BARRIER(); + + ip->tx_skbs[produce] = skb; /* Remember skb */ + produce = (produce + 1) & 127; + ip->tx_pi = produce; + ioc3_w_etpir(produce << 7); /* Fire ... */ + + ip->txqlen++; + + if (ip->txqlen >= 127) + netif_stop_queue(dev); + + spin_unlock_irq(&ip->ioc3_lock); + + return NETDEV_TX_OK; +} + +static void ioc3_timeout(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + + printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); + + spin_lock_irq(&ip->ioc3_lock); + + ioc3_stop(ip); + ioc3_init(dev); + ioc3_mii_init(ip); + ioc3_mii_start(ip); + + spin_unlock_irq(&ip->ioc3_lock); + + netif_wake_queue(dev); +} + +/* + * Given a multicast ethernet address, this routine calculates the + * address's bit index in the logical address filter mask + */ + +static inline unsigned int ioc3_hash(const unsigned char *addr) +{ + unsigned int temp = 0; + u32 crc; + int bits; + + crc = ether_crc_le(ETH_ALEN, addr); + + crc &= 0x3f; /* bit reverse lowest 6 bits for hash index */ + for (bits = 6; --bits >= 0; ) { + temp <<= 1; + temp |= (crc & 0x1); + crc >>= 1; + } + + return temp; +} + +static void ioc3_get_drvinfo (struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct ioc3_private *ip = netdev_priv(dev); + + strlcpy(info->driver, IOC3_NAME, sizeof(info->driver)); + strlcpy(info->version, IOC3_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info)); +} + +static int ioc3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ioc3_private *ip = netdev_priv(dev); + int rc; + + spin_lock_irq(&ip->ioc3_lock); + rc = mii_ethtool_gset(&ip->mii, cmd); + spin_unlock_irq(&ip->ioc3_lock); + + return rc; +} + +static int ioc3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ioc3_private *ip = netdev_priv(dev); + int rc; + + spin_lock_irq(&ip->ioc3_lock); + rc = mii_ethtool_sset(&ip->mii, cmd); + spin_unlock_irq(&ip->ioc3_lock); + + return rc; +} + +static int ioc3_nway_reset(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + int rc; + + spin_lock_irq(&ip->ioc3_lock); + rc = mii_nway_restart(&ip->mii); + spin_unlock_irq(&ip->ioc3_lock); + + return rc; +} + +static u32 ioc3_get_link(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + int rc; + + spin_lock_irq(&ip->ioc3_lock); + rc = mii_link_ok(&ip->mii); + spin_unlock_irq(&ip->ioc3_lock); + + return rc; +} + +static const struct ethtool_ops ioc3_ethtool_ops = { + .get_drvinfo = ioc3_get_drvinfo, + .get_settings = ioc3_get_settings, + .set_settings = ioc3_set_settings, + .nway_reset = ioc3_nway_reset, + .get_link = ioc3_get_link, +}; + +static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct ioc3_private *ip = netdev_priv(dev); + int rc; + + spin_lock_irq(&ip->ioc3_lock); + rc = generic_mii_ioctl(&ip->mii, if_mii(rq), cmd, NULL); + spin_unlock_irq(&ip->ioc3_lock); + + return rc; +} + +static void ioc3_set_multicast_list(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + struct ioc3_private *ip = netdev_priv(dev); + struct ioc3 *ioc3 = ip->regs; + u64 ehar = 0; + + netif_stop_queue(dev); /* Lock out others. */ + + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + ip->emcr |= EMCR_PROMISC; + ioc3_w_emcr(ip->emcr); + (void) ioc3_r_emcr(); + } else { + ip->emcr &= ~EMCR_PROMISC; + ioc3_w_emcr(ip->emcr); /* Clear promiscuous. */ + (void) ioc3_r_emcr(); + + if ((dev->flags & IFF_ALLMULTI) || + (netdev_mc_count(dev) > 64)) { + /* Too many for hashing to make sense or we want all + multicast packets anyway, so skip computing all the + hashes and just accept all packets. */ + ip->ehar_h = 0xffffffff; + ip->ehar_l = 0xffffffff; + } else { + netdev_for_each_mc_addr(ha, dev) { + ehar |= (1UL << ioc3_hash(ha->addr)); + } + ip->ehar_h = ehar >> 32; + ip->ehar_l = ehar & 0xffffffff; + } + ioc3_w_ehar_h(ip->ehar_h); + ioc3_w_ehar_l(ip->ehar_l); + } + + netif_wake_queue(dev); /* Let us get going again. */ +} + +module_pci_driver(ioc3_driver); +MODULE_AUTHOR("Ralf Baechle "); +MODULE_DESCRIPTION("SGI IOC3 Ethernet driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c new file mode 100644 index 000000000..5eac523b4 --- /dev/null +++ b/drivers/net/ethernet/sgi/meth.c @@ -0,0 +1,881 @@ +/* + * meth.c -- O2 Builtin 10/100 Ethernet driver + * + * Copyright (C) 2001-2003 Ilya Volynets + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include /* struct device, et al */ +#include /* struct device, and other headers */ +#include /* eth_type_trans */ +#include /* struct iphdr */ +#include /* struct tcphdr */ +#include +#include /* MII definitions */ +#include + +#include +#include + +#include + +#include "meth.h" + +#ifndef MFE_DEBUG +#define MFE_DEBUG 0 +#endif + +#if MFE_DEBUG>=1 +#define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __func__ , ## args) +#define MFE_RX_DEBUG 2 +#else +#define DPRINTK(str,args...) +#define MFE_RX_DEBUG 0 +#endif + + +static const char *meth_str="SGI O2 Fast Ethernet"; + +/* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */ +#define TX_TIMEOUT (400*HZ/1000) + +static int timeout = TX_TIMEOUT; +module_param(timeout, int, 0); + +/* + * Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + * MACE Ethernet uses a 64 element hash table based on the Ethernet CRC. + */ +#define METH_MCF_LIMIT 32 + +/* + * This structure is private to each device. It is used to pass + * packets in and out, so there is place for a packet + */ +struct meth_private { + /* in-memory copy of MAC Control register */ + u64 mac_ctrl; + + /* in-memory copy of DMA Control register */ + unsigned long dma_ctrl; + /* address of PHY, used by mdio_* functions, initialized in mdio_probe */ + unsigned long phy_addr; + tx_packet *tx_ring; + dma_addr_t tx_ring_dma; + struct sk_buff *tx_skbs[TX_RING_ENTRIES]; + dma_addr_t tx_skb_dmas[TX_RING_ENTRIES]; + unsigned long tx_read, tx_write, tx_count; + + rx_packet *rx_ring[RX_RING_ENTRIES]; + dma_addr_t rx_ring_dmas[RX_RING_ENTRIES]; + struct sk_buff *rx_skbs[RX_RING_ENTRIES]; + unsigned long rx_write; + + /* Multicast filter. */ + u64 mcast_filter; + + spinlock_t meth_lock; +}; + +static void meth_tx_timeout(struct net_device *dev); +static irqreturn_t meth_interrupt(int irq, void *dev_id); + +/* global, initialized in ip32-setup.c */ +char o2meth_eaddr[8]={0,0,0,0,0,0,0,0}; + +static inline void load_eaddr(struct net_device *dev) +{ + int i; + u64 macaddr; + + DPRINTK("Loading MAC Address: %pM\n", dev->dev_addr); + macaddr = 0; + for (i = 0; i < 6; i++) + macaddr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); + + mace->eth.mac_addr = macaddr; +} + +/* + * Waits for BUSY status of mdio bus to clear + */ +#define WAIT_FOR_PHY(___rval) \ + while ((___rval = mace->eth.phy_data) & MDIO_BUSY) { \ + udelay(25); \ + } +/*read phy register, return value read */ +static unsigned long mdio_read(struct meth_private *priv, unsigned long phyreg) +{ + unsigned long rval; + WAIT_FOR_PHY(rval); + mace->eth.phy_regs = (priv->phy_addr << 5) | (phyreg & 0x1f); + udelay(25); + mace->eth.phy_trans_go = 1; + udelay(25); + WAIT_FOR_PHY(rval); + return rval & MDIO_DATA_MASK; +} + +static int mdio_probe(struct meth_private *priv) +{ + int i; + unsigned long p2, p3, flags; + /* check if phy is detected already */ + if(priv->phy_addr>=0&&priv->phy_addr<32) + return 0; + spin_lock_irqsave(&priv->meth_lock, flags); + for (i=0;i<32;++i){ + priv->phy_addr=i; + p2=mdio_read(priv,2); + p3=mdio_read(priv,3); +#if MFE_DEBUG>=2 + switch ((p2<<12)|(p3>>4)){ + case PHY_QS6612X: + DPRINTK("PHY is QS6612X\n"); + break; + case PHY_ICS1889: + DPRINTK("PHY is ICS1889\n"); + break; + case PHY_ICS1890: + DPRINTK("PHY is ICS1890\n"); + break; + case PHY_DP83840: + DPRINTK("PHY is DP83840\n"); + break; + } +#endif + if(p2!=0xffff&&p2!=0x0000){ + DPRINTK("PHY code: %x\n",(p2<<12)|(p3>>4)); + break; + } + } + spin_unlock_irqrestore(&priv->meth_lock, flags); + if(priv->phy_addr<32) { + return 0; + } + DPRINTK("Oopsie! PHY is not known!\n"); + priv->phy_addr=-1; + return -ENODEV; +} + +static void meth_check_link(struct net_device *dev) +{ + struct meth_private *priv = netdev_priv(dev); + unsigned long mii_advertising = mdio_read(priv, 4); + unsigned long mii_partner = mdio_read(priv, 5); + unsigned long negotiated = mii_advertising & mii_partner; + unsigned long duplex, speed; + + if (mii_partner == 0xffff) + return; + + speed = (negotiated & 0x0380) ? METH_100MBIT : 0; + duplex = ((negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040) ? + METH_PHY_FDX : 0; + + if ((priv->mac_ctrl & METH_PHY_FDX) ^ duplex) { + DPRINTK("Setting %s-duplex\n", duplex ? "full" : "half"); + if (duplex) + priv->mac_ctrl |= METH_PHY_FDX; + else + priv->mac_ctrl &= ~METH_PHY_FDX; + mace->eth.mac_ctrl = priv->mac_ctrl; + } + + if ((priv->mac_ctrl & METH_100MBIT) ^ speed) { + DPRINTK("Setting %dMbs mode\n", speed ? 100 : 10); + if (duplex) + priv->mac_ctrl |= METH_100MBIT; + else + priv->mac_ctrl &= ~METH_100MBIT; + mace->eth.mac_ctrl = priv->mac_ctrl; + } +} + + +static int meth_init_tx_ring(struct meth_private *priv) +{ + /* Init TX ring */ + priv->tx_ring = dma_zalloc_coherent(NULL, TX_RING_BUFFER_SIZE, + &priv->tx_ring_dma, GFP_ATOMIC); + if (!priv->tx_ring) + return -ENOMEM; + + priv->tx_count = priv->tx_read = priv->tx_write = 0; + mace->eth.tx_ring_base = priv->tx_ring_dma; + /* Now init skb save area */ + memset(priv->tx_skbs, 0, sizeof(priv->tx_skbs)); + memset(priv->tx_skb_dmas, 0, sizeof(priv->tx_skb_dmas)); + return 0; +} + +static int meth_init_rx_ring(struct meth_private *priv) +{ + int i; + + for (i = 0; i < RX_RING_ENTRIES; i++) { + priv->rx_skbs[i] = alloc_skb(METH_RX_BUFF_SIZE, 0); + /* 8byte status vector + 3quad padding + 2byte padding, + * to put data on 64bit aligned boundary */ + skb_reserve(priv->rx_skbs[i],METH_RX_HEAD); + priv->rx_ring[i]=(rx_packet*)(priv->rx_skbs[i]->head); + /* I'll need to re-sync it after each RX */ + priv->rx_ring_dmas[i] = + dma_map_single(NULL, priv->rx_ring[i], + METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); + mace->eth.rx_fifo = priv->rx_ring_dmas[i]; + } + priv->rx_write = 0; + return 0; +} +static void meth_free_tx_ring(struct meth_private *priv) +{ + int i; + + /* Remove any pending skb */ + for (i = 0; i < TX_RING_ENTRIES; i++) { + if (priv->tx_skbs[i]) + dev_kfree_skb(priv->tx_skbs[i]); + priv->tx_skbs[i] = NULL; + } + dma_free_coherent(NULL, TX_RING_BUFFER_SIZE, priv->tx_ring, + priv->tx_ring_dma); +} + +/* Presumes RX DMA engine is stopped, and RX fifo ring is reset */ +static void meth_free_rx_ring(struct meth_private *priv) +{ + int i; + + for (i = 0; i < RX_RING_ENTRIES; i++) { + dma_unmap_single(NULL, priv->rx_ring_dmas[i], + METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); + priv->rx_ring[i] = 0; + priv->rx_ring_dmas[i] = 0; + kfree_skb(priv->rx_skbs[i]); + } +} + +int meth_reset(struct net_device *dev) +{ + struct meth_private *priv = netdev_priv(dev); + + /* Reset card */ + mace->eth.mac_ctrl = SGI_MAC_RESET; + udelay(1); + mace->eth.mac_ctrl = 0; + udelay(25); + + /* Load ethernet address */ + load_eaddr(dev); + /* Should load some "errata", but later */ + + /* Check for device */ + if (mdio_probe(priv) < 0) { + DPRINTK("Unable to find PHY\n"); + return -ENODEV; + } + + /* Initial mode: 10 | Half-duplex | Accept normal packets */ + priv->mac_ctrl = METH_ACCEPT_MCAST | METH_DEFAULT_IPG; + if (dev->flags & IFF_PROMISC) + priv->mac_ctrl |= METH_PROMISC; + mace->eth.mac_ctrl = priv->mac_ctrl; + + /* Autonegotiate speed and duplex mode */ + meth_check_link(dev); + + /* Now set dma control, but don't enable DMA, yet */ + priv->dma_ctrl = (4 << METH_RX_OFFSET_SHIFT) | + (RX_RING_ENTRIES << METH_RX_DEPTH_SHIFT); + mace->eth.dma_ctrl = priv->dma_ctrl; + + return 0; +} + +/*============End Helper Routines=====================*/ + +/* + * Open and close + */ +static int meth_open(struct net_device *dev) +{ + struct meth_private *priv = netdev_priv(dev); + int ret; + + priv->phy_addr = -1; /* No PHY is known yet... */ + + /* Initialize the hardware */ + ret = meth_reset(dev); + if (ret < 0) + return ret; + + /* Allocate the ring buffers */ + ret = meth_init_tx_ring(priv); + if (ret < 0) + return ret; + ret = meth_init_rx_ring(priv); + if (ret < 0) + goto out_free_tx_ring; + + ret = request_irq(dev->irq, meth_interrupt, 0, meth_str, dev); + if (ret) { + printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq); + goto out_free_rx_ring; + } + + /* Start DMA */ + priv->dma_ctrl |= METH_DMA_TX_EN | /*METH_DMA_TX_INT_EN |*/ + METH_DMA_RX_EN | METH_DMA_RX_INT_EN; + mace->eth.dma_ctrl = priv->dma_ctrl; + + DPRINTK("About to start queue\n"); + netif_start_queue(dev); + + return 0; + +out_free_rx_ring: + meth_free_rx_ring(priv); +out_free_tx_ring: + meth_free_tx_ring(priv); + + return ret; +} + +static int meth_release(struct net_device *dev) +{ + struct meth_private *priv = netdev_priv(dev); + + DPRINTK("Stopping queue\n"); + netif_stop_queue(dev); /* can't transmit any more */ + /* shut down DMA */ + priv->dma_ctrl &= ~(METH_DMA_TX_EN | METH_DMA_TX_INT_EN | + METH_DMA_RX_EN | METH_DMA_RX_INT_EN); + mace->eth.dma_ctrl = priv->dma_ctrl; + free_irq(dev->irq, dev); + meth_free_tx_ring(priv); + meth_free_rx_ring(priv); + + return 0; +} + +/* + * Receive a packet: retrieve, encapsulate and pass over to upper levels + */ +static void meth_rx(struct net_device* dev, unsigned long int_status) +{ + struct sk_buff *skb; + unsigned long status, flags; + struct meth_private *priv = netdev_priv(dev); + unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8; + + spin_lock_irqsave(&priv->meth_lock, flags); + priv->dma_ctrl &= ~METH_DMA_RX_INT_EN; + mace->eth.dma_ctrl = priv->dma_ctrl; + spin_unlock_irqrestore(&priv->meth_lock, flags); + + if (int_status & METH_INT_RX_UNDERFLOW) { + fifo_rptr = (fifo_rptr - 1) & 0x0f; + } + while (priv->rx_write != fifo_rptr) { + dma_unmap_single(NULL, priv->rx_ring_dmas[priv->rx_write], + METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); + status = priv->rx_ring[priv->rx_write]->status.raw; +#if MFE_DEBUG + if (!(status & METH_RX_ST_VALID)) { + DPRINTK("Not received? status=%016lx\n",status); + } +#endif + if ((!(status & METH_RX_STATUS_ERRORS)) && (status & METH_RX_ST_VALID)) { + int len = (status & 0xffff) - 4; /* omit CRC */ + /* length sanity check */ + if (len < 60 || len > 1518) { + printk(KERN_DEBUG "%s: bogus packet size: %ld, status=%#2Lx.\n", + dev->name, priv->rx_write, + priv->rx_ring[priv->rx_write]->status.raw); + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + skb = priv->rx_skbs[priv->rx_write]; + } else { + skb = alloc_skb(METH_RX_BUFF_SIZE, GFP_ATOMIC); + if (!skb) { + /* Ouch! No memory! Drop packet on the floor */ + DPRINTK("No mem: dropping packet\n"); + dev->stats.rx_dropped++; + skb = priv->rx_skbs[priv->rx_write]; + } else { + struct sk_buff *skb_c = priv->rx_skbs[priv->rx_write]; + /* 8byte status vector + 3quad padding + 2byte padding, + * to put data on 64bit aligned boundary */ + skb_reserve(skb, METH_RX_HEAD); + /* Write metadata, and then pass to the receive level */ + skb_put(skb_c, len); + priv->rx_skbs[priv->rx_write] = skb; + skb_c->protocol = eth_type_trans(skb_c, dev); + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + netif_rx(skb_c); + } + } + } else { + dev->stats.rx_errors++; + skb=priv->rx_skbs[priv->rx_write]; +#if MFE_DEBUG>0 + printk(KERN_WARNING "meth: RX error: status=0x%016lx\n",status); + if(status&METH_RX_ST_RCV_CODE_VIOLATION) + printk(KERN_WARNING "Receive Code Violation\n"); + if(status&METH_RX_ST_CRC_ERR) + printk(KERN_WARNING "CRC error\n"); + if(status&METH_RX_ST_INV_PREAMBLE_CTX) + printk(KERN_WARNING "Invalid Preamble Context\n"); + if(status&METH_RX_ST_LONG_EVT_SEEN) + printk(KERN_WARNING "Long Event Seen...\n"); + if(status&METH_RX_ST_BAD_PACKET) + printk(KERN_WARNING "Bad Packet\n"); + if(status&METH_RX_ST_CARRIER_EVT_SEEN) + printk(KERN_WARNING "Carrier Event Seen\n"); +#endif + } + priv->rx_ring[priv->rx_write] = (rx_packet*)skb->head; + priv->rx_ring[priv->rx_write]->status.raw = 0; + priv->rx_ring_dmas[priv->rx_write] = + dma_map_single(NULL, priv->rx_ring[priv->rx_write], + METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); + mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write]; + ADVANCE_RX_PTR(priv->rx_write); + } + spin_lock_irqsave(&priv->meth_lock, flags); + /* In case there was underflow, and Rx DMA was disabled */ + priv->dma_ctrl |= METH_DMA_RX_INT_EN | METH_DMA_RX_EN; + mace->eth.dma_ctrl = priv->dma_ctrl; + mace->eth.int_stat = METH_INT_RX_THRESHOLD; + spin_unlock_irqrestore(&priv->meth_lock, flags); +} + +static int meth_tx_full(struct net_device *dev) +{ + struct meth_private *priv = netdev_priv(dev); + + return priv->tx_count >= TX_RING_ENTRIES - 1; +} + +static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status) +{ + struct meth_private *priv = netdev_priv(dev); + unsigned long status, flags; + struct sk_buff *skb; + unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16; + + spin_lock_irqsave(&priv->meth_lock, flags); + + /* Stop DMA notification */ + priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN); + mace->eth.dma_ctrl = priv->dma_ctrl; + + while (priv->tx_read != rptr) { + skb = priv->tx_skbs[priv->tx_read]; + status = priv->tx_ring[priv->tx_read].header.raw; +#if MFE_DEBUG>=1 + if (priv->tx_read == priv->tx_write) + DPRINTK("Auchi! tx_read=%d,tx_write=%d,rptr=%d?\n", priv->tx_read, priv->tx_write,rptr); +#endif + if (status & METH_TX_ST_DONE) { + if (status & METH_TX_ST_SUCCESS){ + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + } else { + dev->stats.tx_errors++; +#if MFE_DEBUG>=1 + DPRINTK("TX error: status=%016lx <",status); + if(status & METH_TX_ST_SUCCESS) + printk(" SUCCESS"); + if(status & METH_TX_ST_TOOLONG) + printk(" TOOLONG"); + if(status & METH_TX_ST_UNDERRUN) + printk(" UNDERRUN"); + if(status & METH_TX_ST_EXCCOLL) + printk(" EXCCOLL"); + if(status & METH_TX_ST_DEFER) + printk(" DEFER"); + if(status & METH_TX_ST_LATECOLL) + printk(" LATECOLL"); + printk(" >\n"); +#endif + } + } else { + DPRINTK("RPTR points us here, but packet not done?\n"); + break; + } + dev_kfree_skb_irq(skb); + priv->tx_skbs[priv->tx_read] = NULL; + priv->tx_ring[priv->tx_read].header.raw = 0; + priv->tx_read = (priv->tx_read+1)&(TX_RING_ENTRIES-1); + priv->tx_count--; + } + + /* wake up queue if it was stopped */ + if (netif_queue_stopped(dev) && !meth_tx_full(dev)) { + netif_wake_queue(dev); + } + + mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT; + spin_unlock_irqrestore(&priv->meth_lock, flags); +} + +static void meth_error(struct net_device* dev, unsigned status) +{ + struct meth_private *priv = netdev_priv(dev); + unsigned long flags; + + printk(KERN_WARNING "meth: error status: 0x%08x\n",status); + /* check for errors too... */ + if (status & (METH_INT_TX_LINK_FAIL)) + printk(KERN_WARNING "meth: link failure\n"); + /* Should I do full reset in this case? */ + if (status & (METH_INT_MEM_ERROR)) + printk(KERN_WARNING "meth: memory error\n"); + if (status & (METH_INT_TX_ABORT)) + printk(KERN_WARNING "meth: aborted\n"); + if (status & (METH_INT_RX_OVERFLOW)) + printk(KERN_WARNING "meth: Rx overflow\n"); + if (status & (METH_INT_RX_UNDERFLOW)) { + printk(KERN_WARNING "meth: Rx underflow\n"); + spin_lock_irqsave(&priv->meth_lock, flags); + mace->eth.int_stat = METH_INT_RX_UNDERFLOW; + /* more underflow interrupts will be delivered, + * effectively throwing us into an infinite loop. + * Thus I stop processing Rx in this case. */ + priv->dma_ctrl &= ~METH_DMA_RX_EN; + mace->eth.dma_ctrl = priv->dma_ctrl; + DPRINTK("Disabled meth Rx DMA temporarily\n"); + spin_unlock_irqrestore(&priv->meth_lock, flags); + } + mace->eth.int_stat = METH_INT_ERROR; +} + +/* + * The typical interrupt entry point + */ +static irqreturn_t meth_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct meth_private *priv = netdev_priv(dev); + unsigned long status; + + status = mace->eth.int_stat; + while (status & 0xff) { + /* First handle errors - if we get Rx underflow, + * Rx DMA will be disabled, and Rx handler will reenable + * it. I don't think it's possible to get Rx underflow, + * without getting Rx interrupt */ + if (status & METH_INT_ERROR) { + meth_error(dev, status); + } + if (status & (METH_INT_TX_EMPTY | METH_INT_TX_PKT)) { + /* a transmission is over: free the skb */ + meth_tx_cleanup(dev, status); + } + if (status & METH_INT_RX_THRESHOLD) { + if (!(priv->dma_ctrl & METH_DMA_RX_INT_EN)) + break; + /* send it to meth_rx for handling */ + meth_rx(dev, status); + } + status = mace->eth.int_stat; + } + + return IRQ_HANDLED; +} + +/* + * Transmits packets that fit into TX descriptor (are <=120B) + */ +static void meth_tx_short_prepare(struct meth_private *priv, + struct sk_buff *skb) +{ + tx_packet *desc = &priv->tx_ring[priv->tx_write]; + int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; + + desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16); + /* maybe I should set whole thing to 0 first... */ + skb_copy_from_linear_data(skb, desc->data.dt + (120 - len), skb->len); + if (skb->len < len) + memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len); +} +#define TX_CATBUF1 BIT(25) +static void meth_tx_1page_prepare(struct meth_private *priv, + struct sk_buff *skb) +{ + tx_packet *desc = &priv->tx_ring[priv->tx_write]; + void *buffer_data = (void *)(((unsigned long)skb->data + 7) & ~7); + int unaligned_len = (int)((unsigned long)buffer_data - (unsigned long)skb->data); + int buffer_len = skb->len - unaligned_len; + dma_addr_t catbuf; + + desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | (skb->len - 1); + + /* unaligned part */ + if (unaligned_len) { + skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len), + unaligned_len); + desc->header.raw |= (128 - unaligned_len) << 16; + } + + /* first page */ + catbuf = dma_map_single(NULL, buffer_data, buffer_len, + DMA_TO_DEVICE); + desc->data.cat_buf[0].form.start_addr = catbuf >> 3; + desc->data.cat_buf[0].form.len = buffer_len - 1; +} +#define TX_CATBUF2 BIT(26) +static void meth_tx_2page_prepare(struct meth_private *priv, + struct sk_buff *skb) +{ + tx_packet *desc = &priv->tx_ring[priv->tx_write]; + void *buffer1_data = (void *)(((unsigned long)skb->data + 7) & ~7); + void *buffer2_data = (void *)PAGE_ALIGN((unsigned long)skb->data); + int unaligned_len = (int)((unsigned long)buffer1_data - (unsigned long)skb->data); + int buffer1_len = (int)((unsigned long)buffer2_data - (unsigned long)buffer1_data); + int buffer2_len = skb->len - buffer1_len - unaligned_len; + dma_addr_t catbuf1, catbuf2; + + desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1); + /* unaligned part */ + if (unaligned_len){ + skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len), + unaligned_len); + desc->header.raw |= (128 - unaligned_len) << 16; + } + + /* first page */ + catbuf1 = dma_map_single(NULL, buffer1_data, buffer1_len, + DMA_TO_DEVICE); + desc->data.cat_buf[0].form.start_addr = catbuf1 >> 3; + desc->data.cat_buf[0].form.len = buffer1_len - 1; + /* second page */ + catbuf2 = dma_map_single(NULL, buffer2_data, buffer2_len, + DMA_TO_DEVICE); + desc->data.cat_buf[1].form.start_addr = catbuf2 >> 3; + desc->data.cat_buf[1].form.len = buffer2_len - 1; +} + +static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb) +{ + /* Remember the skb, so we can free it at interrupt time */ + priv->tx_skbs[priv->tx_write] = skb; + if (skb->len <= 120) { + /* Whole packet fits into descriptor */ + meth_tx_short_prepare(priv, skb); + } else if (PAGE_ALIGN((unsigned long)skb->data) != + PAGE_ALIGN((unsigned long)skb->data + skb->len - 1)) { + /* Packet crosses page boundary */ + meth_tx_2page_prepare(priv, skb); + } else { + /* Packet is in one page */ + meth_tx_1page_prepare(priv, skb); + } + priv->tx_write = (priv->tx_write + 1) & (TX_RING_ENTRIES - 1); + mace->eth.tx_info = priv->tx_write; + priv->tx_count++; +} + +/* + * Transmit a packet (called by the kernel) + */ +static int meth_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct meth_private *priv = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&priv->meth_lock, flags); + /* Stop DMA notification */ + priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN); + mace->eth.dma_ctrl = priv->dma_ctrl; + + meth_add_to_tx_ring(priv, skb); + dev->trans_start = jiffies; /* save the timestamp */ + + /* If TX ring is full, tell the upper layer to stop sending packets */ + if (meth_tx_full(dev)) { + printk(KERN_DEBUG "TX full: stopping\n"); + netif_stop_queue(dev); + } + + /* Restart DMA notification */ + priv->dma_ctrl |= METH_DMA_TX_INT_EN; + mace->eth.dma_ctrl = priv->dma_ctrl; + + spin_unlock_irqrestore(&priv->meth_lock, flags); + + return NETDEV_TX_OK; +} + +/* + * Deal with a transmit timeout. + */ +static void meth_tx_timeout(struct net_device *dev) +{ + struct meth_private *priv = netdev_priv(dev); + unsigned long flags; + + printk(KERN_WARNING "%s: transmit timed out\n", dev->name); + + /* Protect against concurrent rx interrupts */ + spin_lock_irqsave(&priv->meth_lock,flags); + + /* Try to reset the interface. */ + meth_reset(dev); + + dev->stats.tx_errors++; + + /* Clear all rings */ + meth_free_tx_ring(priv); + meth_free_rx_ring(priv); + meth_init_tx_ring(priv); + meth_init_rx_ring(priv); + + /* Restart dma */ + priv->dma_ctrl |= METH_DMA_TX_EN | METH_DMA_RX_EN | METH_DMA_RX_INT_EN; + mace->eth.dma_ctrl = priv->dma_ctrl; + + /* Enable interrupt */ + spin_unlock_irqrestore(&priv->meth_lock, flags); + + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); +} + +/* + * Ioctl commands + */ +static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + /* XXX Not yet implemented */ + switch(cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } +} + +static void meth_set_rx_mode(struct net_device *dev) +{ + struct meth_private *priv = netdev_priv(dev); + unsigned long flags; + + netif_stop_queue(dev); + spin_lock_irqsave(&priv->meth_lock, flags); + priv->mac_ctrl &= ~METH_PROMISC; + + if (dev->flags & IFF_PROMISC) { + priv->mac_ctrl |= METH_PROMISC; + priv->mcast_filter = 0xffffffffffffffffUL; + } else if ((netdev_mc_count(dev) > METH_MCF_LIMIT) || + (dev->flags & IFF_ALLMULTI)) { + priv->mac_ctrl |= METH_ACCEPT_AMCAST; + priv->mcast_filter = 0xffffffffffffffffUL; + } else { + struct netdev_hw_addr *ha; + priv->mac_ctrl |= METH_ACCEPT_MCAST; + + netdev_for_each_mc_addr(ha, dev) + set_bit((ether_crc(ETH_ALEN, ha->addr) >> 26), + (volatile unsigned long *)&priv->mcast_filter); + } + + /* Write the changes to the chip registers. */ + mace->eth.mac_ctrl = priv->mac_ctrl; + mace->eth.mcast_filter = priv->mcast_filter; + + /* Done! */ + spin_unlock_irqrestore(&priv->meth_lock, flags); + netif_wake_queue(dev); +} + +static const struct net_device_ops meth_netdev_ops = { + .ndo_open = meth_open, + .ndo_stop = meth_release, + .ndo_start_xmit = meth_tx, + .ndo_do_ioctl = meth_ioctl, + .ndo_tx_timeout = meth_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_set_rx_mode = meth_set_rx_mode, +}; + +/* + * The init function. + */ +static int meth_probe(struct platform_device *pdev) +{ + struct net_device *dev; + struct meth_private *priv; + int err; + + dev = alloc_etherdev(sizeof(struct meth_private)); + if (!dev) + return -ENOMEM; + + dev->netdev_ops = &meth_netdev_ops; + dev->watchdog_timeo = timeout; + dev->irq = MACE_ETHERNET_IRQ; + dev->base_addr = (unsigned long)&mace->eth; + memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN); + + priv = netdev_priv(dev); + spin_lock_init(&priv->meth_lock); + SET_NETDEV_DEV(dev, &pdev->dev); + + err = register_netdev(dev); + if (err) { + free_netdev(dev); + return err; + } + + printk(KERN_INFO "%s: SGI MACE Ethernet rev. %d\n", + dev->name, (unsigned int)(mace->eth.mac_ctrl >> 29)); + return 0; +} + +static int __exit meth_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + + unregister_netdev(dev); + free_netdev(dev); + + return 0; +} + +static struct platform_driver meth_driver = { + .probe = meth_probe, + .remove = __exit_p(meth_remove), + .driver = { + .name = "meth", + } +}; + +module_platform_driver(meth_driver); + +MODULE_AUTHOR("Ilya Volynets "); +MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:meth"); diff --git a/drivers/net/ethernet/sgi/meth.h b/drivers/net/ethernet/sgi/meth.h new file mode 100644 index 000000000..5b145c6ba --- /dev/null +++ b/drivers/net/ethernet/sgi/meth.h @@ -0,0 +1,243 @@ + +/* + * snull.h -- definitions for the network module + * + * Copyright (C) 2001 Alessandro Rubini and Jonathan Corbet + * Copyright (C) 2001 O'Reilly & Associates + * + * The source code in this file can be freely used, adapted, + * and redistributed in source or binary form, so long as an + * acknowledgment appears in derived source files. The citation + * should list that the code comes from the book "Linux Device + * Drivers" by Alessandro Rubini and Jonathan Corbet, published + * by O'Reilly & Associates. No warranty is attached; + * we cannot take responsibility for errors or fitness for use. + */ + +/* version dependencies have been confined to a separate file */ + +/* Tunable parameters */ +#define TX_RING_ENTRIES 64 /* 64-512?*/ + +#define RX_RING_ENTRIES 16 /* Do not change */ +/* Internal constants */ +#define TX_RING_BUFFER_SIZE (TX_RING_ENTRIES*sizeof(tx_packet)) +#define RX_BUFFER_SIZE 1546 /* ethenet packet size */ +#define METH_RX_BUFF_SIZE 4096 +#define METH_RX_HEAD 34 /* status + 3 quad garbage-fill + 2 byte zero-pad */ +#define RX_BUFFER_OFFSET (sizeof(rx_status_vector)+2) /* staus vector + 2 bytes of padding */ +#define RX_BUCKET_SIZE 256 + +/* For more detailed explanations of what each field menas, + see Nick's great comments to #defines below (or docs, if + you are lucky enough toget hold of them :)*/ + +/* tx status vector is written over tx command header upon + dma completion. */ + +typedef struct tx_status_vector { + u64 sent:1; /* always set to 1...*/ + u64 pad0:34;/* always set to 0 */ + u64 flags:9; /*I'm too lazy to specify each one separately at the moment*/ + u64 col_retry_cnt:4; /*collision retry count*/ + u64 len:16; /*Transmit length in bytes*/ +} tx_status_vector; + +/* + * Each packet is 128 bytes long. + * It consists of header, 0-3 concatination + * buffer pointers and up to 120 data bytes. + */ +typedef struct tx_packet_hdr { + u64 pad1:36; /*should be filled with 0 */ + u64 cat_ptr3_valid:1, /*Concatination pointer valid flags*/ + cat_ptr2_valid:1, + cat_ptr1_valid:1; + u64 tx_int_flag:1; /*Generate TX intrrupt when packet has been sent*/ + u64 term_dma_flag:1; /*Terminate transmit DMA on transmit abort conditions*/ + u64 data_offset:7; /*Starting byte offset in ring data block*/ + u64 data_len:16; /*Length of valid data in bytes-1*/ +} tx_packet_hdr; +typedef union tx_cat_ptr { + struct { + u64 pad2:16; /* should be 0 */ + u64 len:16; /*length of buffer data - 1*/ + u64 start_addr:29; /*Physical starting address*/ + u64 pad1:3; /* should be zero */ + } form; + u64 raw; +} tx_cat_ptr; + +typedef struct tx_packet { + union { + tx_packet_hdr header; + tx_status_vector res; + u64 raw; + }header; + union { + tx_cat_ptr cat_buf[3]; + char dt[120]; + } data; +} tx_packet; + +typedef union rx_status_vector { + volatile struct { + u64 pad1:1;/*fill it with ones*/ + u64 pad2:15;/*fill with 0*/ + u64 ip_chk_sum:16; + u64 seq_num:5; + u64 mac_addr_match:1; + u64 mcast_addr_match:1; + u64 carrier_event_seen:1; + u64 bad_packet:1; + u64 long_event_seen:1; + u64 invalid_preamble:1; + u64 broadcast:1; + u64 multicast:1; + u64 crc_error:1; + u64 huh:1;/*???*/ + u64 rx_code_violation:1; + u64 rx_len:16; + } parsed; + volatile u64 raw; +} rx_status_vector; + +typedef struct rx_packet { + rx_status_vector status; + u64 pad[3]; /* For whatever reason, there needs to be 4 double-word offset */ + u16 pad2; + char buf[METH_RX_BUFF_SIZE-sizeof(rx_status_vector)-3*sizeof(u64)-sizeof(u16)];/* data */ +} rx_packet; + +#define TX_INFO_RPTR 0x00FF0000 +#define TX_INFO_WPTR 0x000000FF + + /* Bits in METH_MAC */ + +#define SGI_MAC_RESET BIT(0) /* 0: MAC110 active in run mode, 1: Global reset signal to MAC110 core is active */ +#define METH_PHY_FDX BIT(1) /* 0: Disable full duplex, 1: Enable full duplex */ +#define METH_PHY_LOOP BIT(2) /* 0: Normal operation, follows 10/100mbit and M10T/MII select, 1: loops internal MII bus */ + /* selects ignored */ +#define METH_100MBIT BIT(3) /* 0: 10meg mode, 1: 100meg mode */ +#define METH_PHY_MII BIT(4) /* 0: MII selected, 1: SIA selected */ + /* Note: when loopback is set this bit becomes collision control. Setting this bit will */ + /* cause a collision to be reported. */ + + /* Bits 5 and 6 are used to determine the Destination address filter mode */ +#define METH_ACCEPT_MY 0 /* 00: Accept PHY address only */ +#define METH_ACCEPT_MCAST 0x20 /* 01: Accept physical, broadcast, and multicast filter matches only */ +#define METH_ACCEPT_AMCAST 0x40 /* 10: Accept physical, broadcast, and all multicast packets */ +#define METH_PROMISC 0x60 /* 11: Promiscious mode */ + +#define METH_PHY_LINK_FAIL BIT(7) /* 0: Link failure detection disabled, 1: Hardware scans for link failure in PHY */ + +#define METH_MAC_IPG 0x1ffff00 + +#define METH_DEFAULT_IPG ((17<<15) | (11<<22) | (21<<8)) + /* 0x172e5c00 */ /* 23, 23, 23 */ /*0x54A9500 *//*21,21,21*/ + /* Bits 8 through 14 are used to determine Inter-Packet Gap between "Back to Back" packets */ + /* The gap depends on the clock speed of the link, 80ns per increment for 100baseT, 800ns */ + /* per increment for 10BaseT */ + + /* Bits 15 through 21 are used to determine IPGR1 */ + + /* Bits 22 through 28 are used to determine IPGR2 */ + +#define METH_REV_SHIFT 29 /* Bits 29 through 31 are used to determine the revision */ + /* 000: Initial revision */ + /* 001: First revision, Improved TX concatenation */ + + +/* DMA control bits */ +#define METH_RX_OFFSET_SHIFT 12 /* Bits 12:14 of DMA control register indicate starting offset of packet data for RX operation */ +#define METH_RX_DEPTH_SHIFT 4 /* Bits 8:4 define RX fifo depth -- when # of RX fifo entries != depth, interrupt is generted */ + +#define METH_DMA_TX_EN BIT(1) /* enable TX DMA */ +#define METH_DMA_TX_INT_EN BIT(0) /* enable TX Buffer Empty interrupt */ +#define METH_DMA_RX_EN BIT(15) /* Enable RX */ +#define METH_DMA_RX_INT_EN BIT(9) /* Enable interrupt on RX packet */ + +/* RX FIFO MCL Info bits */ +#define METH_RX_FIFO_WPTR(x) (((x)>>16)&0xf) +#define METH_RX_FIFO_RPTR(x) (((x)>>8)&0xf) +#define METH_RX_FIFO_DEPTH(x) ((x)&0x1f) + +/* RX status bits */ + +#define METH_RX_ST_VALID BIT(63) +#define METH_RX_ST_RCV_CODE_VIOLATION BIT(16) +#define METH_RX_ST_DRBL_NBL BIT(17) +#define METH_RX_ST_CRC_ERR BIT(18) +#define METH_RX_ST_MCAST_PKT BIT(19) +#define METH_RX_ST_BCAST_PKT BIT(20) +#define METH_RX_ST_INV_PREAMBLE_CTX BIT(21) +#define METH_RX_ST_LONG_EVT_SEEN BIT(22) +#define METH_RX_ST_BAD_PACKET BIT(23) +#define METH_RX_ST_CARRIER_EVT_SEEN BIT(24) +#define METH_RX_ST_MCAST_FILTER_MATCH BIT(25) +#define METH_RX_ST_PHYS_ADDR_MATCH BIT(26) + +#define METH_RX_STATUS_ERRORS \ + ( \ + METH_RX_ST_RCV_CODE_VIOLATION| \ + METH_RX_ST_CRC_ERR| \ + METH_RX_ST_INV_PREAMBLE_CTX| \ + METH_RX_ST_LONG_EVT_SEEN| \ + METH_RX_ST_BAD_PACKET| \ + METH_RX_ST_CARRIER_EVT_SEEN \ + ) + /* Bits in METH_INT */ + /* Write _1_ to corresponding bit to clear */ +#define METH_INT_TX_EMPTY BIT(0) /* 0: No interrupt pending, 1: The TX ring buffer is empty */ +#define METH_INT_TX_PKT BIT(1) /* 0: No interrupt pending */ + /* 1: A TX message had the INT request bit set, the packet has been sent. */ +#define METH_INT_TX_LINK_FAIL BIT(2) /* 0: No interrupt pending, 1: PHY has reported a link failure */ +#define METH_INT_MEM_ERROR BIT(3) /* 0: No interrupt pending */ + /* 1: A memory error occurred during DMA, DMA stopped, Fatal */ +#define METH_INT_TX_ABORT BIT(4) /* 0: No interrupt pending, 1: The TX aborted operation, DMA stopped, FATAL */ +#define METH_INT_RX_THRESHOLD BIT(5) /* 0: No interrupt pending, 1: Selected receive threshold condition Valid */ +#define METH_INT_RX_UNDERFLOW BIT(6) /* 0: No interrupt pending, 1: FIFO was empty, packet could not be queued */ +#define METH_INT_RX_OVERFLOW BIT(7) /* 0: No interrupt pending, 1: DMA FIFO Overflow, DMA stopped, FATAL */ + +/*#define METH_INT_RX_RPTR_MASK 0x0001F00*/ /* Bits 8 through 12 alias of RX read-pointer */ +#define METH_INT_RX_RPTR_MASK 0x0000F00 /* Bits 8 through 11 alias of RX read-pointer - so, is Rx FIFO 16 or 32 entry?*/ + + /* Bits 13 through 15 are always 0. */ + +#define METH_INT_TX_RPTR_MASK 0x1FF0000 /* Bits 16 through 24 alias of TX read-pointer */ + +#define METH_INT_RX_SEQ_MASK 0x2E000000 /* Bits 25 through 29 are the starting seq number for the message at the */ + + /* top of the queue */ + +#define METH_INT_ERROR (METH_INT_TX_LINK_FAIL| \ + METH_INT_MEM_ERROR| \ + METH_INT_TX_ABORT| \ + METH_INT_RX_OVERFLOW| \ + METH_INT_RX_UNDERFLOW) + +#define METH_INT_MCAST_HASH BIT(30) /* If RX DMA is enabled the hash select logic output is latched here */ + +/* TX status bits */ +#define METH_TX_ST_DONE BIT(63) /* TX complete */ +#define METH_TX_ST_SUCCESS BIT(23) /* Packet was transmitted successfully */ +#define METH_TX_ST_TOOLONG BIT(24) /* TX abort due to excessive length */ +#define METH_TX_ST_UNDERRUN BIT(25) /* TX abort due to underrun (?) */ +#define METH_TX_ST_EXCCOLL BIT(26) /* TX abort due to excess collisions */ +#define METH_TX_ST_DEFER BIT(27) /* TX abort due to excess deferals */ +#define METH_TX_ST_LATECOLL BIT(28) /* TX abort due to late collision */ + + +/* Tx command header bits */ +#define METH_TX_CMD_INT_EN BIT(24) /* Generate TX interrupt when packet is sent */ + +/* Phy MDIO interface busy flag */ +#define MDIO_BUSY BIT(16) +#define MDIO_DATA_MASK 0xFFFF +/* PHY defines */ +#define PHY_QS6612X 0x0181441 /* Quality TX */ +#define PHY_ICS1889 0x0015F41 /* ICS FX */ +#define PHY_ICS1890 0x0015F42 /* ICS TX */ +#define PHY_DP83840 0x20005C0 /* National TX */ + +#define ADVANCE_RX_PTR(x) x=(x+1)&(RX_RING_ENTRIES-1) diff --git a/drivers/net/ethernet/silan/Kconfig b/drivers/net/ethernet/silan/Kconfig new file mode 100644 index 000000000..3409b3f97 --- /dev/null +++ b/drivers/net/ethernet/silan/Kconfig @@ -0,0 +1,33 @@ +# +# Silan device configuration +# + +config NET_VENDOR_SILAN + bool "Silan devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Silan devices. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_SILAN + +config SC92031 + tristate "Silan SC92031 PCI Fast Ethernet Adapter driver" + depends on PCI + select CRC32 + ---help--- + This is a driver for the Fast Ethernet PCI network cards based on + the Silan SC92031 chip (sometimes also called Rsltek 8139D). If you + have one of these, say Y here. + + To compile this driver as a module, choose M here: the module + will be called sc92031. This is recommended. + +endif # NET_VENDOR_SILAN diff --git a/drivers/net/ethernet/silan/Makefile b/drivers/net/ethernet/silan/Makefile new file mode 100644 index 000000000..4ad3523dc --- /dev/null +++ b/drivers/net/ethernet/silan/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Silan network device drivers. +# + +obj-$(CONFIG_SC92031) += sc92031.o diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c new file mode 100644 index 000000000..7426f8b21 --- /dev/null +++ b/drivers/net/ethernet/silan/sc92031.c @@ -0,0 +1,1584 @@ +/* Silan SC92031 PCI Fast Ethernet Adapter driver + * + * Based on vendor drivers: + * Silan Fast Ethernet Netcard Driver: + * MODULE_AUTHOR ("gaoyonghong"); + * MODULE_DESCRIPTION ("SILAN Fast Ethernet driver"); + * MODULE_LICENSE("GPL"); + * 8139D Fast Ethernet driver: + * (C) 2002 by gaoyonghong + * MODULE_AUTHOR ("gaoyonghong"); + * MODULE_DESCRIPTION ("Rsltek 8139D PCI Fast Ethernet Adapter driver"); + * MODULE_LICENSE("GPL"); + * Both are almost identical and seem to be based on pci-skeleton.c + * + * Rewritten for 2.6 by Cesar Eduardo Barros + * + * A datasheet for this chip can be found at + * http://www.silan.com.cn/english/product/pdf/SC92031AY.pdf + */ + +/* Note about set_mac_address: I don't know how to change the hardware + * matching, so you need to enable IFF_PROMISC when using it. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define SC92031_NAME "sc92031" + +/* BAR 0 is MMIO, BAR 1 is PIO */ +#define SC92031_USE_PIO 0 + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */ +static int multicast_filter_limit = 64; +module_param(multicast_filter_limit, int, 0); +MODULE_PARM_DESC(multicast_filter_limit, + "Maximum number of filtered multicast addresses"); + +static int media; +module_param(media, int, 0); +MODULE_PARM_DESC(media, "Media type (0x00 = autodetect," + " 0x01 = 10M half, 0x02 = 10M full," + " 0x04 = 100M half, 0x08 = 100M full)"); + +/* Size of the in-memory receive ring. */ +#define RX_BUF_LEN_IDX 3 /* 0==8K, 1==16K, 2==32K, 3==64K ,4==128K*/ +#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX) + +/* Number of Tx descriptor registers. */ +#define NUM_TX_DESC 4 + +/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/ +#define MAX_ETH_FRAME_SIZE 1536 + +/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */ +#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE +#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC) + +/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */ +#define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */ + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (4*HZ) + +#define SILAN_STATS_NUM 2 /* number of ETHTOOL_GSTATS */ + +/* media options */ +#define AUTOSELECT 0x00 +#define M10_HALF 0x01 +#define M10_FULL 0x02 +#define M100_HALF 0x04 +#define M100_FULL 0x08 + + /* Symbolic offsets to registers. */ +enum silan_registers { + Config0 = 0x00, // Config0 + Config1 = 0x04, // Config1 + RxBufWPtr = 0x08, // Rx buffer writer poiter + IntrStatus = 0x0C, // Interrupt status + IntrMask = 0x10, // Interrupt mask + RxbufAddr = 0x14, // Rx buffer start address + RxBufRPtr = 0x18, // Rx buffer read pointer + Txstatusall = 0x1C, // Transmit status of all descriptors + TxStatus0 = 0x20, // Transmit status (Four 32bit registers). + TxAddr0 = 0x30, // Tx descriptors (also four 32bit). + RxConfig = 0x40, // Rx configuration + MAC0 = 0x44, // Ethernet hardware address. + MAR0 = 0x4C, // Multicast filter. + RxStatus0 = 0x54, // Rx status + TxConfig = 0x5C, // Tx configuration + PhyCtrl = 0x60, // physical control + FlowCtrlConfig = 0x64, // flow control + Miicmd0 = 0x68, // Mii command0 register + Miicmd1 = 0x6C, // Mii command1 register + Miistatus = 0x70, // Mii status register + Timercnt = 0x74, // Timer counter register + TimerIntr = 0x78, // Timer interrupt register + PMConfig = 0x7C, // Power Manager configuration + CRC0 = 0x80, // Power Manager CRC ( Two 32bit regisers) + Wakeup0 = 0x88, // power Manager wakeup( Eight 64bit regiser) + LSBCRC0 = 0xC8, // power Manager LSBCRC(Two 32bit regiser) + TestD0 = 0xD0, + TestD4 = 0xD4, + TestD8 = 0xD8, +}; + +#define MII_JAB 16 +#define MII_OutputStatus 24 + +#define PHY_16_JAB_ENB 0x1000 +#define PHY_16_PORT_ENB 0x1 + +enum IntrStatusBits { + LinkFail = 0x80000000, + LinkOK = 0x40000000, + TimeOut = 0x20000000, + RxOverflow = 0x0040, + RxOK = 0x0020, + TxOK = 0x0001, + IntrBits = LinkFail|LinkOK|TimeOut|RxOverflow|RxOK|TxOK, +}; + +enum TxStatusBits { + TxCarrierLost = 0x20000000, + TxAborted = 0x10000000, + TxOutOfWindow = 0x08000000, + TxNccShift = 22, + EarlyTxThresShift = 16, + TxStatOK = 0x8000, + TxUnderrun = 0x4000, + TxOwn = 0x2000, +}; + +enum RxStatusBits { + RxStatesOK = 0x80000, + RxBadAlign = 0x40000, + RxHugeFrame = 0x20000, + RxSmallFrame = 0x10000, + RxCRCOK = 0x8000, + RxCrlFrame = 0x4000, + Rx_Broadcast = 0x2000, + Rx_Multicast = 0x1000, + RxAddrMatch = 0x0800, + MiiErr = 0x0400, +}; + +enum RxConfigBits { + RxFullDx = 0x80000000, + RxEnb = 0x40000000, + RxSmall = 0x20000000, + RxHuge = 0x10000000, + RxErr = 0x08000000, + RxAllphys = 0x04000000, + RxMulticast = 0x02000000, + RxBroadcast = 0x01000000, + RxLoopBack = (1 << 23) | (1 << 22), + LowThresholdShift = 12, + HighThresholdShift = 2, +}; + +enum TxConfigBits { + TxFullDx = 0x80000000, + TxEnb = 0x40000000, + TxEnbPad = 0x20000000, + TxEnbHuge = 0x10000000, + TxEnbFCS = 0x08000000, + TxNoBackOff = 0x04000000, + TxEnbPrem = 0x02000000, + TxCareLostCrs = 0x1000000, + TxExdCollNum = 0xf00000, + TxDataRate = 0x80000, +}; + +enum PhyCtrlconfigbits { + PhyCtrlAne = 0x80000000, + PhyCtrlSpd100 = 0x40000000, + PhyCtrlSpd10 = 0x20000000, + PhyCtrlPhyBaseAddr = 0x1f000000, + PhyCtrlDux = 0x800000, + PhyCtrlReset = 0x400000, +}; + +enum FlowCtrlConfigBits { + FlowCtrlFullDX = 0x80000000, + FlowCtrlEnb = 0x40000000, +}; + +enum Config0Bits { + Cfg0_Reset = 0x80000000, + Cfg0_Anaoff = 0x40000000, + Cfg0_LDPS = 0x20000000, +}; + +enum Config1Bits { + Cfg1_EarlyRx = 1 << 31, + Cfg1_EarlyTx = 1 << 30, + + //rx buffer size + Cfg1_Rcv8K = 0x0, + Cfg1_Rcv16K = 0x1, + Cfg1_Rcv32K = 0x3, + Cfg1_Rcv64K = 0x7, + Cfg1_Rcv128K = 0xf, +}; + +enum MiiCmd0Bits { + Mii_Divider = 0x20000000, + Mii_WRITE = 0x400000, + Mii_READ = 0x200000, + Mii_SCAN = 0x100000, + Mii_Tamod = 0x80000, + Mii_Drvmod = 0x40000, + Mii_mdc = 0x20000, + Mii_mdoen = 0x10000, + Mii_mdo = 0x8000, + Mii_mdi = 0x4000, +}; + +enum MiiStatusBits { + Mii_StatusBusy = 0x80000000, +}; + +enum PMConfigBits { + PM_Enable = 1 << 31, + PM_LongWF = 1 << 30, + PM_Magic = 1 << 29, + PM_LANWake = 1 << 28, + PM_LWPTN = (1 << 27 | 1<< 26), + PM_LinkUp = 1 << 25, + PM_WakeUp = 1 << 24, +}; + +/* Locking rules: + * priv->lock protects most of the fields of priv and most of the + * hardware registers. It does not have to protect against softirqs + * between sc92031_disable_interrupts and sc92031_enable_interrupts; + * it also does not need to be used in ->open and ->stop while the + * device interrupts are off. + * Not having to protect against softirqs is very useful due to heavy + * use of mdelay() at _sc92031_reset. + * Functions prefixed with _sc92031_ must be called with the lock held; + * functions prefixed with sc92031_ must be called without the lock held. + * Use mmiowb() before unlocking if the hardware was written to. + */ + +/* Locking rules for the interrupt: + * - the interrupt and the tasklet never run at the same time + * - neither run between sc92031_disable_interrupts and + * sc92031_enable_interrupt + */ + +struct sc92031_priv { + spinlock_t lock; + /* iomap.h cookie */ + void __iomem *port_base; + /* pci device structure */ + struct pci_dev *pdev; + /* tasklet */ + struct tasklet_struct tasklet; + + /* CPU address of rx ring */ + void *rx_ring; + /* PCI address of rx ring */ + dma_addr_t rx_ring_dma_addr; + /* PCI address of rx ring read pointer */ + dma_addr_t rx_ring_tail; + + /* tx ring write index */ + unsigned tx_head; + /* tx ring read index */ + unsigned tx_tail; + /* CPU address of tx bounce buffer */ + void *tx_bufs; + /* PCI address of tx bounce buffer */ + dma_addr_t tx_bufs_dma_addr; + + /* copies of some hardware registers */ + u32 intr_status; + atomic_t intr_mask; + u32 rx_config; + u32 tx_config; + u32 pm_config; + + /* copy of some flags from dev->flags */ + unsigned int mc_flags; + + /* for ETHTOOL_GSTATS */ + u64 tx_timeouts; + u64 rx_loss; + + /* for dev->get_stats */ + long rx_value; +}; + +/* I don't know which registers can be safely read; however, I can guess + * MAC0 is one of them. */ +static inline void _sc92031_dummy_read(void __iomem *port_base) +{ + ioread32(port_base + MAC0); +} + +static u32 _sc92031_mii_wait(void __iomem *port_base) +{ + u32 mii_status; + + do { + udelay(10); + mii_status = ioread32(port_base + Miistatus); + } while (mii_status & Mii_StatusBusy); + + return mii_status; +} + +static u32 _sc92031_mii_cmd(void __iomem *port_base, u32 cmd0, u32 cmd1) +{ + iowrite32(Mii_Divider, port_base + Miicmd0); + + _sc92031_mii_wait(port_base); + + iowrite32(cmd1, port_base + Miicmd1); + iowrite32(Mii_Divider | cmd0, port_base + Miicmd0); + + return _sc92031_mii_wait(port_base); +} + +static void _sc92031_mii_scan(void __iomem *port_base) +{ + _sc92031_mii_cmd(port_base, Mii_SCAN, 0x1 << 6); +} + +static u16 _sc92031_mii_read(void __iomem *port_base, unsigned reg) +{ + return _sc92031_mii_cmd(port_base, Mii_READ, reg << 6) >> 13; +} + +static void _sc92031_mii_write(void __iomem *port_base, unsigned reg, u16 val) +{ + _sc92031_mii_cmd(port_base, Mii_WRITE, (reg << 6) | ((u32)val << 11)); +} + +static void sc92031_disable_interrupts(struct net_device *dev) +{ + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem *port_base = priv->port_base; + + /* tell the tasklet/interrupt not to enable interrupts */ + atomic_set(&priv->intr_mask, 0); + wmb(); + + /* stop interrupts */ + iowrite32(0, port_base + IntrMask); + _sc92031_dummy_read(port_base); + mmiowb(); + + /* wait for any concurrent interrupt/tasklet to finish */ + synchronize_irq(priv->pdev->irq); + tasklet_disable(&priv->tasklet); +} + +static void sc92031_enable_interrupts(struct net_device *dev) +{ + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem *port_base = priv->port_base; + + tasklet_enable(&priv->tasklet); + + atomic_set(&priv->intr_mask, IntrBits); + wmb(); + + iowrite32(IntrBits, port_base + IntrMask); + mmiowb(); +} + +static void _sc92031_disable_tx_rx(struct net_device *dev) +{ + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem *port_base = priv->port_base; + + priv->rx_config &= ~RxEnb; + priv->tx_config &= ~TxEnb; + iowrite32(priv->rx_config, port_base + RxConfig); + iowrite32(priv->tx_config, port_base + TxConfig); +} + +static void _sc92031_enable_tx_rx(struct net_device *dev) +{ + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem *port_base = priv->port_base; + + priv->rx_config |= RxEnb; + priv->tx_config |= TxEnb; + iowrite32(priv->rx_config, port_base + RxConfig); + iowrite32(priv->tx_config, port_base + TxConfig); +} + +static void _sc92031_tx_clear(struct net_device *dev) +{ + struct sc92031_priv *priv = netdev_priv(dev); + + while (priv->tx_head - priv->tx_tail > 0) { + priv->tx_tail++; + dev->stats.tx_dropped++; + } + priv->tx_head = priv->tx_tail = 0; +} + +static void _sc92031_set_mar(struct net_device *dev) +{ + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem *port_base = priv->port_base; + u32 mar0 = 0, mar1 = 0; + + if ((dev->flags & IFF_PROMISC) || + netdev_mc_count(dev) > multicast_filter_limit || + (dev->flags & IFF_ALLMULTI)) + mar0 = mar1 = 0xffffffff; + else if (dev->flags & IFF_MULTICAST) { + struct netdev_hw_addr *ha; + + netdev_for_each_mc_addr(ha, dev) { + u32 crc; + unsigned bit = 0; + + crc = ~ether_crc(ETH_ALEN, ha->addr); + crc >>= 24; + + if (crc & 0x01) bit |= 0x02; + if (crc & 0x02) bit |= 0x01; + if (crc & 0x10) bit |= 0x20; + if (crc & 0x20) bit |= 0x10; + if (crc & 0x40) bit |= 0x08; + if (crc & 0x80) bit |= 0x04; + + if (bit > 31) + mar0 |= 0x1 << (bit - 32); + else + mar1 |= 0x1 << bit; + } + } + + iowrite32(mar0, port_base + MAR0); + iowrite32(mar1, port_base + MAR0 + 4); +} + +static void _sc92031_set_rx_config(struct net_device *dev) +{ + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem *port_base = priv->port_base; + unsigned int old_mc_flags; + u32 rx_config_bits = 0; + + old_mc_flags = priv->mc_flags; + + if (dev->flags & IFF_PROMISC) + rx_config_bits |= RxSmall | RxHuge | RxErr | RxBroadcast + | RxMulticast | RxAllphys; + + if (dev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) + rx_config_bits |= RxMulticast; + + if (dev->flags & IFF_BROADCAST) + rx_config_bits |= RxBroadcast; + + priv->rx_config &= ~(RxSmall | RxHuge | RxErr | RxBroadcast + | RxMulticast | RxAllphys); + priv->rx_config |= rx_config_bits; + + priv->mc_flags = dev->flags & (IFF_PROMISC | IFF_ALLMULTI + | IFF_MULTICAST | IFF_BROADCAST); + + if (netif_carrier_ok(dev) && priv->mc_flags != old_mc_flags) + iowrite32(priv->rx_config, port_base + RxConfig); +} + +static bool _sc92031_check_media(struct net_device *dev) +{ + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem *port_base = priv->port_base; + u16 bmsr; + + bmsr = _sc92031_mii_read(port_base, MII_BMSR); + rmb(); + if (bmsr & BMSR_LSTATUS) { + bool speed_100, duplex_full; + u32 flow_ctrl_config = 0; + u16 output_status = _sc92031_mii_read(port_base, + MII_OutputStatus); + _sc92031_mii_scan(port_base); + + speed_100 = output_status & 0x2; + duplex_full = output_status & 0x4; + + /* Initial Tx/Rx configuration */ + priv->rx_config = (0x40 << LowThresholdShift) | (0x1c0 << HighThresholdShift); + priv->tx_config = 0x48800000; + + /* NOTE: vendor driver had dead code here to enable tx padding */ + + if (!speed_100) + priv->tx_config |= 0x80000; + + // configure rx mode + _sc92031_set_rx_config(dev); + + if (duplex_full) { + priv->rx_config |= RxFullDx; + priv->tx_config |= TxFullDx; + flow_ctrl_config = FlowCtrlFullDX | FlowCtrlEnb; + } else { + priv->rx_config &= ~RxFullDx; + priv->tx_config &= ~TxFullDx; + } + + _sc92031_set_mar(dev); + _sc92031_set_rx_config(dev); + _sc92031_enable_tx_rx(dev); + iowrite32(flow_ctrl_config, port_base + FlowCtrlConfig); + + netif_carrier_on(dev); + + if (printk_ratelimit()) + printk(KERN_INFO "%s: link up, %sMbps, %s-duplex\n", + dev->name, + speed_100 ? "100" : "10", + duplex_full ? "full" : "half"); + return true; + } else { + _sc92031_mii_scan(port_base); + + netif_carrier_off(dev); + + _sc92031_disable_tx_rx(dev); + + if (printk_ratelimit()) + printk(KERN_INFO "%s: link down\n", dev->name); + return false; + } +} + +static void _sc92031_phy_reset(struct net_device *dev) +{ + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem *port_base = priv->port_base; + u32 phy_ctrl; + + phy_ctrl = ioread32(port_base + PhyCtrl); + phy_ctrl &= ~(PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10); + phy_ctrl |= PhyCtrlAne | PhyCtrlReset; + + switch (media) { + default: + case AUTOSELECT: + phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10; + break; + case M10_HALF: + phy_ctrl |= PhyCtrlSpd10; + break; + case M10_FULL: + phy_ctrl |= PhyCtrlDux | PhyCtrlSpd10; + break; + case M100_HALF: + phy_ctrl |= PhyCtrlSpd100; + break; + case M100_FULL: + phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100; + break; + } + + iowrite32(phy_ctrl, port_base + PhyCtrl); + mdelay(10); + + phy_ctrl &= ~PhyCtrlReset; + iowrite32(phy_ctrl, port_base + PhyCtrl); + mdelay(1); + + _sc92031_mii_write(port_base, MII_JAB, + PHY_16_JAB_ENB | PHY_16_PORT_ENB); + _sc92031_mii_scan(port_base); + + netif_carrier_off(dev); + netif_stop_queue(dev); +} + +static void _sc92031_reset(struct net_device *dev) +{ + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem *port_base = priv->port_base; + + /* disable PM */ + iowrite32(0, port_base + PMConfig); + + /* soft reset the chip */ + iowrite32(Cfg0_Reset, port_base + Config0); + mdelay(200); + + iowrite32(0, port_base + Config0); + mdelay(10); + + /* disable interrupts */ + iowrite32(0, port_base + IntrMask); + + /* clear multicast address */ + iowrite32(0, port_base + MAR0); + iowrite32(0, port_base + MAR0 + 4); + + /* init rx ring */ + iowrite32(priv->rx_ring_dma_addr, port_base + RxbufAddr); + priv->rx_ring_tail = priv->rx_ring_dma_addr; + + /* init tx ring */ + _sc92031_tx_clear(dev); + + /* clear old register values */ + priv->intr_status = 0; + atomic_set(&priv->intr_mask, 0); + priv->rx_config = 0; + priv->tx_config = 0; + priv->mc_flags = 0; + + /* configure rx buffer size */ + /* NOTE: vendor driver had dead code here to enable early tx/rx */ + iowrite32(Cfg1_Rcv64K, port_base + Config1); + + _sc92031_phy_reset(dev); + _sc92031_check_media(dev); + + /* calculate rx fifo overflow */ + priv->rx_value = 0; + + /* enable PM */ + iowrite32(priv->pm_config, port_base + PMConfig); + + /* clear intr register */ + ioread32(port_base + IntrStatus); +} + +static void _sc92031_tx_tasklet(struct net_device *dev) +{ + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem *port_base = priv->port_base; + + unsigned old_tx_tail; + unsigned entry; + u32 tx_status; + + old_tx_tail = priv->tx_tail; + while (priv->tx_head - priv->tx_tail > 0) { + entry = priv->tx_tail % NUM_TX_DESC; + tx_status = ioread32(port_base + TxStatus0 + entry * 4); + + if (!(tx_status & (TxStatOK | TxUnderrun | TxAborted))) + break; + + priv->tx_tail++; + + if (tx_status & TxStatOK) { + dev->stats.tx_bytes += tx_status & 0x1fff; + dev->stats.tx_packets++; + /* Note: TxCarrierLost is always asserted at 100mbps. */ + dev->stats.collisions += (tx_status >> 22) & 0xf; + } + + if (tx_status & (TxOutOfWindow | TxAborted)) { + dev->stats.tx_errors++; + + if (tx_status & TxAborted) + dev->stats.tx_aborted_errors++; + + if (tx_status & TxCarrierLost) + dev->stats.tx_carrier_errors++; + + if (tx_status & TxOutOfWindow) + dev->stats.tx_window_errors++; + } + + if (tx_status & TxUnderrun) + dev->stats.tx_fifo_errors++; + } + + if (priv->tx_tail != old_tx_tail) + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); +} + +static void _sc92031_rx_tasklet_error(struct net_device *dev, + u32 rx_status, unsigned rx_size) +{ + if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) { + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + } + + if (!(rx_status & RxStatesOK)) { + dev->stats.rx_errors++; + + if (rx_status & (RxHugeFrame | RxSmallFrame)) + dev->stats.rx_length_errors++; + + if (rx_status & RxBadAlign) + dev->stats.rx_frame_errors++; + + if (!(rx_status & RxCRCOK)) + dev->stats.rx_crc_errors++; + } else { + struct sc92031_priv *priv = netdev_priv(dev); + priv->rx_loss++; + } +} + +static void _sc92031_rx_tasklet(struct net_device *dev) +{ + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem *port_base = priv->port_base; + + dma_addr_t rx_ring_head; + unsigned rx_len; + unsigned rx_ring_offset; + void *rx_ring = priv->rx_ring; + + rx_ring_head = ioread32(port_base + RxBufWPtr); + rmb(); + + /* rx_ring_head is only 17 bits in the RxBufWPtr register. + * we need to change it to 32 bits physical address + */ + rx_ring_head &= (dma_addr_t)(RX_BUF_LEN - 1); + rx_ring_head |= priv->rx_ring_dma_addr & ~(dma_addr_t)(RX_BUF_LEN - 1); + if (rx_ring_head < priv->rx_ring_dma_addr) + rx_ring_head += RX_BUF_LEN; + + if (rx_ring_head >= priv->rx_ring_tail) + rx_len = rx_ring_head - priv->rx_ring_tail; + else + rx_len = RX_BUF_LEN - (priv->rx_ring_tail - rx_ring_head); + + if (!rx_len) + return; + + if (unlikely(rx_len > RX_BUF_LEN)) { + if (printk_ratelimit()) + printk(KERN_ERR "%s: rx packets length > rx buffer\n", + dev->name); + return; + } + + rx_ring_offset = (priv->rx_ring_tail - priv->rx_ring_dma_addr) % RX_BUF_LEN; + + while (rx_len) { + u32 rx_status; + unsigned rx_size, rx_size_align, pkt_size; + struct sk_buff *skb; + + rx_status = le32_to_cpup((__le32 *)(rx_ring + rx_ring_offset)); + rmb(); + + rx_size = rx_status >> 20; + rx_size_align = (rx_size + 3) & ~3; // for 4 bytes aligned + pkt_size = rx_size - 4; // Omit the four octet CRC from the length. + + rx_ring_offset = (rx_ring_offset + 4) % RX_BUF_LEN; + + if (unlikely(rx_status == 0 || + rx_size > (MAX_ETH_FRAME_SIZE + 4) || + rx_size < 16 || + !(rx_status & RxStatesOK))) { + _sc92031_rx_tasklet_error(dev, rx_status, rx_size); + break; + } + + if (unlikely(rx_size_align + 4 > rx_len)) { + if (printk_ratelimit()) + printk(KERN_ERR "%s: rx_len is too small\n", dev->name); + break; + } + + rx_len -= rx_size_align + 4; + + skb = netdev_alloc_skb_ip_align(dev, pkt_size); + if (unlikely(!skb)) { + if (printk_ratelimit()) + printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n", + dev->name, pkt_size); + goto next; + } + + if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) { + memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset), + rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset); + memcpy(skb_put(skb, pkt_size - (RX_BUF_LEN - rx_ring_offset)), + rx_ring, pkt_size - (RX_BUF_LEN - rx_ring_offset)); + } else { + memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size); + } + + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + + dev->stats.rx_bytes += pkt_size; + dev->stats.rx_packets++; + + if (rx_status & Rx_Multicast) + dev->stats.multicast++; + + next: + rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN; + } + mb(); + + priv->rx_ring_tail = rx_ring_head; + iowrite32(priv->rx_ring_tail, port_base + RxBufRPtr); +} + +static void _sc92031_link_tasklet(struct net_device *dev) +{ + if (_sc92031_check_media(dev)) + netif_wake_queue(dev); + else { + netif_stop_queue(dev); + dev->stats.tx_carrier_errors++; + } +} + +static void sc92031_tasklet(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem *port_base = priv->port_base; + u32 intr_status, intr_mask; + + intr_status = priv->intr_status; + + spin_lock(&priv->lock); + + if (unlikely(!netif_running(dev))) + goto out; + + if (intr_status & TxOK) + _sc92031_tx_tasklet(dev); + + if (intr_status & RxOK) + _sc92031_rx_tasklet(dev); + + if (intr_status & RxOverflow) + dev->stats.rx_errors++; + + if (intr_status & TimeOut) { + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + } + + if (intr_status & (LinkFail | LinkOK)) + _sc92031_link_tasklet(dev); + +out: + intr_mask = atomic_read(&priv->intr_mask); + rmb(); + + iowrite32(intr_mask, port_base + IntrMask); + mmiowb(); + + spin_unlock(&priv->lock); +} + +static irqreturn_t sc92031_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem *port_base = priv->port_base; + u32 intr_status, intr_mask; + + /* mask interrupts before clearing IntrStatus */ + iowrite32(0, port_base + IntrMask); + _sc92031_dummy_read(port_base); + + intr_status = ioread32(port_base + IntrStatus); + if (unlikely(intr_status == 0xffffffff)) + return IRQ_NONE; // hardware has gone missing + + intr_status &= IntrBits; + if (!intr_status) + goto out_none; + + priv->intr_status = intr_status; + tasklet_schedule(&priv->tasklet); + + return IRQ_HANDLED; + +out_none: + intr_mask = atomic_read(&priv->intr_mask); + rmb(); + + iowrite32(intr_mask, port_base + IntrMask); + mmiowb(); + + return IRQ_NONE; +} + +static struct net_device_stats *sc92031_get_stats(struct net_device *dev) +{ + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem *port_base = priv->port_base; + + // FIXME I do not understand what is this trying to do. + if (netif_running(dev)) { + int temp; + + spin_lock_bh(&priv->lock); + + /* Update the error count. */ + temp = (ioread32(port_base + RxStatus0) >> 16) & 0xffff; + + if (temp == 0xffff) { + priv->rx_value += temp; + dev->stats.rx_fifo_errors = priv->rx_value; + } else + dev->stats.rx_fifo_errors = temp + priv->rx_value; + + spin_unlock_bh(&priv->lock); + } + + return &dev->stats; +} + +static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem *port_base = priv->port_base; + unsigned len; + unsigned entry; + u32 tx_status; + + if (unlikely(skb->len > TX_BUF_SIZE)) { + dev->stats.tx_dropped++; + goto out; + } + + spin_lock(&priv->lock); + + if (unlikely(!netif_carrier_ok(dev))) { + dev->stats.tx_dropped++; + goto out_unlock; + } + + BUG_ON(priv->tx_head - priv->tx_tail >= NUM_TX_DESC); + + entry = priv->tx_head++ % NUM_TX_DESC; + + skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); + + len = skb->len; + if (len < ETH_ZLEN) { + memset(priv->tx_bufs + entry * TX_BUF_SIZE + len, + 0, ETH_ZLEN - len); + len = ETH_ZLEN; + } + + wmb(); + + if (len < 100) + tx_status = len; + else if (len < 300) + tx_status = 0x30000 | len; + else + tx_status = 0x50000 | len; + + iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE, + port_base + TxAddr0 + entry * 4); + iowrite32(tx_status, port_base + TxStatus0 + entry * 4); + mmiowb(); + + if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC) + netif_stop_queue(dev); + +out_unlock: + spin_unlock(&priv->lock); + +out: + dev_consume_skb_any(skb); + + return NETDEV_TX_OK; +} + +static int sc92031_open(struct net_device *dev) +{ + int err; + struct sc92031_priv *priv = netdev_priv(dev); + struct pci_dev *pdev = priv->pdev; + + priv->rx_ring = pci_alloc_consistent(pdev, RX_BUF_LEN, + &priv->rx_ring_dma_addr); + if (unlikely(!priv->rx_ring)) { + err = -ENOMEM; + goto out_alloc_rx_ring; + } + + priv->tx_bufs = pci_alloc_consistent(pdev, TX_BUF_TOT_LEN, + &priv->tx_bufs_dma_addr); + if (unlikely(!priv->tx_bufs)) { + err = -ENOMEM; + goto out_alloc_tx_bufs; + } + priv->tx_head = priv->tx_tail = 0; + + err = request_irq(pdev->irq, sc92031_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(err < 0)) + goto out_request_irq; + + priv->pm_config = 0; + + /* Interrupts already disabled by sc92031_stop or sc92031_probe */ + spin_lock_bh(&priv->lock); + + _sc92031_reset(dev); + mmiowb(); + + spin_unlock_bh(&priv->lock); + sc92031_enable_interrupts(dev); + + if (netif_carrier_ok(dev)) + netif_start_queue(dev); + else + netif_tx_disable(dev); + + return 0; + +out_request_irq: + pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs, + priv->tx_bufs_dma_addr); +out_alloc_tx_bufs: + pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring, + priv->rx_ring_dma_addr); +out_alloc_rx_ring: + return err; +} + +static int sc92031_stop(struct net_device *dev) +{ + struct sc92031_priv *priv = netdev_priv(dev); + struct pci_dev *pdev = priv->pdev; + + netif_tx_disable(dev); + + /* Disable interrupts, stop Tx and Rx. */ + sc92031_disable_interrupts(dev); + + spin_lock_bh(&priv->lock); + + _sc92031_disable_tx_rx(dev); + _sc92031_tx_clear(dev); + mmiowb(); + + spin_unlock_bh(&priv->lock); + + free_irq(pdev->irq, dev); + pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs, + priv->tx_bufs_dma_addr); + pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring, + priv->rx_ring_dma_addr); + + return 0; +} + +static void sc92031_set_multicast_list(struct net_device *dev) +{ + struct sc92031_priv *priv = netdev_priv(dev); + + spin_lock_bh(&priv->lock); + + _sc92031_set_mar(dev); + _sc92031_set_rx_config(dev); + mmiowb(); + + spin_unlock_bh(&priv->lock); +} + +static void sc92031_tx_timeout(struct net_device *dev) +{ + struct sc92031_priv *priv = netdev_priv(dev); + + /* Disable interrupts by clearing the interrupt mask.*/ + sc92031_disable_interrupts(dev); + + spin_lock(&priv->lock); + + priv->tx_timeouts++; + + _sc92031_reset(dev); + mmiowb(); + + spin_unlock(&priv->lock); + + /* enable interrupts */ + sc92031_enable_interrupts(dev); + + if (netif_carrier_ok(dev)) + netif_wake_queue(dev); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void sc92031_poll_controller(struct net_device *dev) +{ + struct sc92031_priv *priv = netdev_priv(dev); + const int irq = priv->pdev->irq; + + disable_irq(irq); + if (sc92031_interrupt(irq, dev) != IRQ_NONE) + sc92031_tasklet((unsigned long)dev); + enable_irq(irq); +} +#endif + +static int sc92031_ethtool_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem *port_base = priv->port_base; + u8 phy_address; + u32 phy_ctrl; + u16 output_status; + + spin_lock_bh(&priv->lock); + + phy_address = ioread32(port_base + Miicmd1) >> 27; + phy_ctrl = ioread32(port_base + PhyCtrl); + + output_status = _sc92031_mii_read(port_base, MII_OutputStatus); + _sc92031_mii_scan(port_base); + mmiowb(); + + spin_unlock_bh(&priv->lock); + + cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full + | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full + | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII; + + cmd->advertising = ADVERTISED_TP | ADVERTISED_MII; + + if ((phy_ctrl & (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10)) + == (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10)) + cmd->advertising |= ADVERTISED_Autoneg; + + if ((phy_ctrl & PhyCtrlSpd10) == PhyCtrlSpd10) + cmd->advertising |= ADVERTISED_10baseT_Half; + + if ((phy_ctrl & (PhyCtrlSpd10 | PhyCtrlDux)) + == (PhyCtrlSpd10 | PhyCtrlDux)) + cmd->advertising |= ADVERTISED_10baseT_Full; + + if ((phy_ctrl & PhyCtrlSpd100) == PhyCtrlSpd100) + cmd->advertising |= ADVERTISED_100baseT_Half; + + if ((phy_ctrl & (PhyCtrlSpd100 | PhyCtrlDux)) + == (PhyCtrlSpd100 | PhyCtrlDux)) + cmd->advertising |= ADVERTISED_100baseT_Full; + + if (phy_ctrl & PhyCtrlAne) + cmd->advertising |= ADVERTISED_Autoneg; + + ethtool_cmd_speed_set(cmd, + (output_status & 0x2) ? SPEED_100 : SPEED_10); + cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF; + cmd->port = PORT_MII; + cmd->phy_address = phy_address; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = (phy_ctrl & PhyCtrlAne) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + return 0; +} + +static int sc92031_ethtool_set_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem *port_base = priv->port_base; + u32 speed = ethtool_cmd_speed(cmd); + u32 phy_ctrl; + u32 old_phy_ctrl; + + if (!(speed == SPEED_10 || speed == SPEED_100)) + return -EINVAL; + if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL)) + return -EINVAL; + if (!(cmd->port == PORT_MII)) + return -EINVAL; + if (!(cmd->phy_address == 0x1f)) + return -EINVAL; + if (!(cmd->transceiver == XCVR_INTERNAL)) + return -EINVAL; + if (!(cmd->autoneg == AUTONEG_DISABLE || cmd->autoneg == AUTONEG_ENABLE)) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_ENABLE) { + if (!(cmd->advertising & (ADVERTISED_Autoneg + | ADVERTISED_100baseT_Full + | ADVERTISED_100baseT_Half + | ADVERTISED_10baseT_Full + | ADVERTISED_10baseT_Half))) + return -EINVAL; + + phy_ctrl = PhyCtrlAne; + + // FIXME: I'm not sure what the original code was trying to do + if (cmd->advertising & ADVERTISED_Autoneg) + phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10; + if (cmd->advertising & ADVERTISED_100baseT_Full) + phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100; + if (cmd->advertising & ADVERTISED_100baseT_Half) + phy_ctrl |= PhyCtrlSpd100; + if (cmd->advertising & ADVERTISED_10baseT_Full) + phy_ctrl |= PhyCtrlSpd10 | PhyCtrlDux; + if (cmd->advertising & ADVERTISED_10baseT_Half) + phy_ctrl |= PhyCtrlSpd10; + } else { + // FIXME: Whole branch guessed + phy_ctrl = 0; + + if (speed == SPEED_10) + phy_ctrl |= PhyCtrlSpd10; + else /* cmd->speed == SPEED_100 */ + phy_ctrl |= PhyCtrlSpd100; + + if (cmd->duplex == DUPLEX_FULL) + phy_ctrl |= PhyCtrlDux; + } + + spin_lock_bh(&priv->lock); + + old_phy_ctrl = ioread32(port_base + PhyCtrl); + phy_ctrl |= old_phy_ctrl & ~(PhyCtrlAne | PhyCtrlDux + | PhyCtrlSpd100 | PhyCtrlSpd10); + if (phy_ctrl != old_phy_ctrl) + iowrite32(phy_ctrl, port_base + PhyCtrl); + + spin_unlock_bh(&priv->lock); + + return 0; +} + +static void sc92031_ethtool_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wolinfo) +{ + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem *port_base = priv->port_base; + u32 pm_config; + + spin_lock_bh(&priv->lock); + pm_config = ioread32(port_base + PMConfig); + spin_unlock_bh(&priv->lock); + + // FIXME: Guessed + wolinfo->supported = WAKE_PHY | WAKE_MAGIC + | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; + wolinfo->wolopts = 0; + + if (pm_config & PM_LinkUp) + wolinfo->wolopts |= WAKE_PHY; + + if (pm_config & PM_Magic) + wolinfo->wolopts |= WAKE_MAGIC; + + if (pm_config & PM_WakeUp) + // FIXME: Guessed + wolinfo->wolopts |= WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; +} + +static int sc92031_ethtool_set_wol(struct net_device *dev, + struct ethtool_wolinfo *wolinfo) +{ + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem *port_base = priv->port_base; + u32 pm_config; + + spin_lock_bh(&priv->lock); + + pm_config = ioread32(port_base + PMConfig) + & ~(PM_LinkUp | PM_Magic | PM_WakeUp); + + if (wolinfo->wolopts & WAKE_PHY) + pm_config |= PM_LinkUp; + + if (wolinfo->wolopts & WAKE_MAGIC) + pm_config |= PM_Magic; + + // FIXME: Guessed + if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)) + pm_config |= PM_WakeUp; + + priv->pm_config = pm_config; + iowrite32(pm_config, port_base + PMConfig); + mmiowb(); + + spin_unlock_bh(&priv->lock); + + return 0; +} + +static int sc92031_ethtool_nway_reset(struct net_device *dev) +{ + int err = 0; + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem *port_base = priv->port_base; + u16 bmcr; + + spin_lock_bh(&priv->lock); + + bmcr = _sc92031_mii_read(port_base, MII_BMCR); + if (!(bmcr & BMCR_ANENABLE)) { + err = -EINVAL; + goto out; + } + + _sc92031_mii_write(port_base, MII_BMCR, bmcr | BMCR_ANRESTART); + +out: + _sc92031_mii_scan(port_base); + mmiowb(); + + spin_unlock_bh(&priv->lock); + + return err; +} + +static const char sc92031_ethtool_stats_strings[SILAN_STATS_NUM][ETH_GSTRING_LEN] = { + "tx_timeout", + "rx_loss", +}; + +static void sc92031_ethtool_get_strings(struct net_device *dev, + u32 stringset, u8 *data) +{ + if (stringset == ETH_SS_STATS) + memcpy(data, sc92031_ethtool_stats_strings, + SILAN_STATS_NUM * ETH_GSTRING_LEN); +} + +static int sc92031_ethtool_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return SILAN_STATS_NUM; + default: + return -EOPNOTSUPP; + } +} + +static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct sc92031_priv *priv = netdev_priv(dev); + + spin_lock_bh(&priv->lock); + data[0] = priv->tx_timeouts; + data[1] = priv->rx_loss; + spin_unlock_bh(&priv->lock); +} + +static const struct ethtool_ops sc92031_ethtool_ops = { + .get_settings = sc92031_ethtool_get_settings, + .set_settings = sc92031_ethtool_set_settings, + .get_wol = sc92031_ethtool_get_wol, + .set_wol = sc92031_ethtool_set_wol, + .nway_reset = sc92031_ethtool_nway_reset, + .get_link = ethtool_op_get_link, + .get_strings = sc92031_ethtool_get_strings, + .get_sset_count = sc92031_ethtool_get_sset_count, + .get_ethtool_stats = sc92031_ethtool_get_ethtool_stats, +}; + + +static const struct net_device_ops sc92031_netdev_ops = { + .ndo_get_stats = sc92031_get_stats, + .ndo_start_xmit = sc92031_start_xmit, + .ndo_open = sc92031_open, + .ndo_stop = sc92031_stop, + .ndo_set_rx_mode = sc92031_set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_tx_timeout = sc92031_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = sc92031_poll_controller, +#endif +}; + +static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int err; + void __iomem* port_base; + struct net_device *dev; + struct sc92031_priv *priv; + u32 mac0, mac1; + + err = pci_enable_device(pdev); + if (unlikely(err < 0)) + goto out_enable_device; + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (unlikely(err < 0)) + goto out_set_dma_mask; + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (unlikely(err < 0)) + goto out_set_dma_mask; + + err = pci_request_regions(pdev, SC92031_NAME); + if (unlikely(err < 0)) + goto out_request_regions; + + port_base = pci_iomap(pdev, SC92031_USE_PIO, 0); + if (unlikely(!port_base)) { + err = -EIO; + goto out_iomap; + } + + dev = alloc_etherdev(sizeof(struct sc92031_priv)); + if (unlikely(!dev)) { + err = -ENOMEM; + goto out_alloc_etherdev; + } + + pci_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + + /* faked with skb_copy_and_csum_dev */ + dev->features = NETIF_F_SG | NETIF_F_HIGHDMA | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + + dev->netdev_ops = &sc92031_netdev_ops; + dev->watchdog_timeo = TX_TIMEOUT; + dev->ethtool_ops = &sc92031_ethtool_ops; + + priv = netdev_priv(dev); + spin_lock_init(&priv->lock); + priv->port_base = port_base; + priv->pdev = pdev; + tasklet_init(&priv->tasklet, sc92031_tasklet, (unsigned long)dev); + /* Fudge tasklet count so the call to sc92031_enable_interrupts at + * sc92031_open will work correctly */ + tasklet_disable_nosync(&priv->tasklet); + + /* PCI PM Wakeup */ + iowrite32((~PM_LongWF & ~PM_LWPTN) | PM_Enable, port_base + PMConfig); + + mac0 = ioread32(port_base + MAC0); + mac1 = ioread32(port_base + MAC0 + 4); + dev->dev_addr[0] = mac0 >> 24; + dev->dev_addr[1] = mac0 >> 16; + dev->dev_addr[2] = mac0 >> 8; + dev->dev_addr[3] = mac0; + dev->dev_addr[4] = mac1 >> 8; + dev->dev_addr[5] = mac1; + + err = register_netdev(dev); + if (err < 0) + goto out_register_netdev; + + printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name, + (long)pci_resource_start(pdev, SC92031_USE_PIO), dev->dev_addr, + pdev->irq); + + return 0; + +out_register_netdev: + free_netdev(dev); +out_alloc_etherdev: + pci_iounmap(pdev, port_base); +out_iomap: + pci_release_regions(pdev); +out_request_regions: +out_set_dma_mask: + pci_disable_device(pdev); +out_enable_device: + return err; +} + +static void sc92031_remove(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct sc92031_priv *priv = netdev_priv(dev); + void __iomem* port_base = priv->port_base; + + unregister_netdev(dev); + free_netdev(dev); + pci_iounmap(pdev, port_base); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct sc92031_priv *priv = netdev_priv(dev); + + pci_save_state(pdev); + + if (!netif_running(dev)) + goto out; + + netif_device_detach(dev); + + /* Disable interrupts, stop Tx and Rx. */ + sc92031_disable_interrupts(dev); + + spin_lock_bh(&priv->lock); + + _sc92031_disable_tx_rx(dev); + _sc92031_tx_clear(dev); + mmiowb(); + + spin_unlock_bh(&priv->lock); + +out: + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int sc92031_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct sc92031_priv *priv = netdev_priv(dev); + + pci_restore_state(pdev); + pci_set_power_state(pdev, PCI_D0); + + if (!netif_running(dev)) + goto out; + + /* Interrupts already disabled by sc92031_suspend */ + spin_lock_bh(&priv->lock); + + _sc92031_reset(dev); + mmiowb(); + + spin_unlock_bh(&priv->lock); + sc92031_enable_interrupts(dev); + + netif_device_attach(dev); + + if (netif_carrier_ok(dev)) + netif_wake_queue(dev); + else + netif_tx_disable(dev); + +out: + return 0; +} + +static const struct pci_device_id sc92031_pci_device_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) }, + { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) }, + { PCI_DEVICE(0x1088, 0x2031) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table); + +static struct pci_driver sc92031_pci_driver = { + .name = SC92031_NAME, + .id_table = sc92031_pci_device_id_table, + .probe = sc92031_probe, + .remove = sc92031_remove, + .suspend = sc92031_suspend, + .resume = sc92031_resume, +}; + +module_pci_driver(sc92031_pci_driver); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Cesar Eduardo Barros "); +MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver"); diff --git a/drivers/net/ethernet/sis/Kconfig b/drivers/net/ethernet/sis/Kconfig new file mode 100644 index 000000000..68d052b09 --- /dev/null +++ b/drivers/net/ethernet/sis/Kconfig @@ -0,0 +1,51 @@ +# +# Silicon Integrated Systems (SiS) device configuration +# + +config NET_VENDOR_SIS + bool "Silicon Integrated Systems (SiS) devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about SiS devices. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_SIS + +config SIS900 + tristate "SiS 900/7016 PCI Fast Ethernet Adapter support" + depends on PCI + select CRC32 + select MII + ---help--- + This is a driver for the Fast Ethernet PCI network cards based on + the SiS 900 and SiS 7016 chips. The SiS 900 core is also embedded in + SiS 630 and SiS 540 chipsets. + + This driver also supports AMD 79C901 HomePNA so that you can use + your phone line as a network cable. + + To compile this driver as a module, choose M here: the module + will be called sis900. This is recommended. + +config SIS190 + tristate "SiS190/SiS191 gigabit ethernet support" + depends on PCI + select CRC32 + select MII + ---help--- + Say Y here if you have a SiS 190 PCI Fast Ethernet adapter or + a SiS 191 PCI Gigabit Ethernet adapter. Both are expected to + appear in lan on motherboard designs which are based on SiS 965 + and SiS 966 south bridge. + + To compile this driver as a module, choose M here: the module + will be called sis190. This is recommended. + +endif # NET_VENDOR_SIS diff --git a/drivers/net/ethernet/sis/Makefile b/drivers/net/ethernet/sis/Makefile new file mode 100644 index 000000000..58d3ac198 --- /dev/null +++ b/drivers/net/ethernet/sis/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for Silicon Integrated Systems (SiS) network device drivers. +# + +obj-$(CONFIG_SIS190) += sis190.o +obj-$(CONFIG_SIS900) += sis900.o diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c new file mode 100644 index 000000000..27be6c869 --- /dev/null +++ b/drivers/net/ethernet/sis/sis190.c @@ -0,0 +1,1933 @@ +/* + sis190.c: Silicon Integrated Systems SiS190 ethernet driver + + Copyright (c) 2003 K.M. Liu + Copyright (c) 2003, 2004 Jeff Garzik + Copyright (c) 2003, 2004, 2005 Francois Romieu + + Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191 + genuine driver. + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. + + See the file COPYING in this distribution for more information. + +*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PHY_MAX_ADDR 32 +#define PHY_ID_ANY 0x1f +#define MII_REG_ANY 0x1f + +#define DRV_VERSION "1.4" +#define DRV_NAME "sis190" +#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION + +#define sis190_rx_skb netif_rx +#define sis190_rx_quota(count, quota) count + +#define NUM_TX_DESC 64 /* [8..1024] */ +#define NUM_RX_DESC 64 /* [8..8192] */ +#define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) +#define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) +#define RX_BUF_SIZE 1536 +#define RX_BUF_MASK 0xfff8 + +#define SIS190_REGS_SIZE 0x80 +#define SIS190_TX_TIMEOUT (6*HZ) +#define SIS190_PHY_TIMEOUT (10*HZ) +#define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | NETIF_MSG_IFUP | \ + NETIF_MSG_IFDOWN) + +/* Enhanced PHY access register bit definitions */ +#define EhnMIIread 0x0000 +#define EhnMIIwrite 0x0020 +#define EhnMIIdataShift 16 +#define EhnMIIpmdShift 6 /* 7016 only */ +#define EhnMIIregShift 11 +#define EhnMIIreq 0x0010 +#define EhnMIInotDone 0x0010 + +/* Write/read MMIO register */ +#define SIS_W8(reg, val) writeb ((val), ioaddr + (reg)) +#define SIS_W16(reg, val) writew ((val), ioaddr + (reg)) +#define SIS_W32(reg, val) writel ((val), ioaddr + (reg)) +#define SIS_R8(reg) readb (ioaddr + (reg)) +#define SIS_R16(reg) readw (ioaddr + (reg)) +#define SIS_R32(reg) readl (ioaddr + (reg)) + +#define SIS_PCI_COMMIT() SIS_R32(IntrControl) + +enum sis190_registers { + TxControl = 0x00, + TxDescStartAddr = 0x04, + rsv0 = 0x08, // reserved + TxSts = 0x0c, // unused (Control/Status) + RxControl = 0x10, + RxDescStartAddr = 0x14, + rsv1 = 0x18, // reserved + RxSts = 0x1c, // unused + IntrStatus = 0x20, + IntrMask = 0x24, + IntrControl = 0x28, + IntrTimer = 0x2c, // unused (Interrupt Timer) + PMControl = 0x30, // unused (Power Mgmt Control/Status) + rsv2 = 0x34, // reserved + ROMControl = 0x38, + ROMInterface = 0x3c, + StationControl = 0x40, + GMIIControl = 0x44, + GIoCR = 0x48, // unused (GMAC IO Compensation) + GIoCtrl = 0x4c, // unused (GMAC IO Control) + TxMacControl = 0x50, + TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit) + RGDelay = 0x58, // unused (RGMII Tx Internal Delay) + rsv3 = 0x5c, // reserved + RxMacControl = 0x60, + RxMacAddr = 0x62, + RxHashTable = 0x68, + // Undocumented = 0x6c, + RxWolCtrl = 0x70, + RxWolData = 0x74, // unused (Rx WOL Data Access) + RxMPSControl = 0x78, // unused (Rx MPS Control) + rsv4 = 0x7c, // reserved +}; + +enum sis190_register_content { + /* IntrStatus */ + SoftInt = 0x40000000, // unused + Timeup = 0x20000000, // unused + PauseFrame = 0x00080000, // unused + MagicPacket = 0x00040000, // unused + WakeupFrame = 0x00020000, // unused + LinkChange = 0x00010000, + RxQEmpty = 0x00000080, + RxQInt = 0x00000040, + TxQ1Empty = 0x00000020, // unused + TxQ1Int = 0x00000010, + TxQ0Empty = 0x00000008, // unused + TxQ0Int = 0x00000004, + RxHalt = 0x00000002, + TxHalt = 0x00000001, + + /* {Rx/Tx}CmdBits */ + CmdReset = 0x10, + CmdRxEnb = 0x08, // unused + CmdTxEnb = 0x01, + RxBufEmpty = 0x01, // unused + + /* Cfg9346Bits */ + Cfg9346_Lock = 0x00, // unused + Cfg9346_Unlock = 0xc0, // unused + + /* RxMacControl */ + AcceptErr = 0x20, // unused + AcceptRunt = 0x10, // unused + AcceptBroadcast = 0x0800, + AcceptMulticast = 0x0400, + AcceptMyPhys = 0x0200, + AcceptAllPhys = 0x0100, + + /* RxConfigBits */ + RxCfgFIFOShift = 13, + RxCfgDMAShift = 8, // 0x1a in RxControl ? + + /* TxConfigBits */ + TxInterFrameGapShift = 24, + TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + + LinkStatus = 0x02, // unused + FullDup = 0x01, // unused + + /* TBICSRBit */ + TBILinkOK = 0x02000000, // unused +}; + +struct TxDesc { + __le32 PSize; + __le32 status; + __le32 addr; + __le32 size; +}; + +struct RxDesc { + __le32 PSize; + __le32 status; + __le32 addr; + __le32 size; +}; + +enum _DescStatusBit { + /* _Desc.status */ + OWNbit = 0x80000000, // RXOWN/TXOWN + INTbit = 0x40000000, // RXINT/TXINT + CRCbit = 0x00020000, // CRCOFF/CRCEN + PADbit = 0x00010000, // PREADD/PADEN + /* _Desc.size */ + RingEnd = 0x80000000, + /* TxDesc.status */ + LSEN = 0x08000000, // TSO ? -- FR + IPCS = 0x04000000, + TCPCS = 0x02000000, + UDPCS = 0x01000000, + BSTEN = 0x00800000, + EXTEN = 0x00400000, + DEFEN = 0x00200000, + BKFEN = 0x00100000, + CRSEN = 0x00080000, + COLEN = 0x00040000, + THOL3 = 0x30000000, + THOL2 = 0x20000000, + THOL1 = 0x10000000, + THOL0 = 0x00000000, + + WND = 0x00080000, + TABRT = 0x00040000, + FIFO = 0x00020000, + LINK = 0x00010000, + ColCountMask = 0x0000ffff, + /* RxDesc.status */ + IPON = 0x20000000, + TCPON = 0x10000000, + UDPON = 0x08000000, + Wakup = 0x00400000, + Magic = 0x00200000, + Pause = 0x00100000, + DEFbit = 0x00200000, + BCAST = 0x000c0000, + MCAST = 0x00080000, + UCAST = 0x00040000, + /* RxDesc.PSize */ + TAGON = 0x80000000, + RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR + ABORT = 0x00800000, + SHORT = 0x00400000, + LIMIT = 0x00200000, + MIIER = 0x00100000, + OVRUN = 0x00080000, + NIBON = 0x00040000, + COLON = 0x00020000, + CRCOK = 0x00010000, + RxSizeMask = 0x0000ffff + /* + * The asic could apparently do vlan, TSO, jumbo (sis191 only) and + * provide two (unused with Linux) Tx queues. No publicly + * available documentation alas. + */ +}; + +enum sis190_eeprom_access_register_bits { + EECS = 0x00000001, // unused + EECLK = 0x00000002, // unused + EEDO = 0x00000008, // unused + EEDI = 0x00000004, // unused + EEREQ = 0x00000080, + EEROP = 0x00000200, + EEWOP = 0x00000100 // unused +}; + +/* EEPROM Addresses */ +enum sis190_eeprom_address { + EEPROMSignature = 0x00, + EEPROMCLK = 0x01, // unused + EEPROMInfo = 0x02, + EEPROMMACAddr = 0x03 +}; + +enum sis190_feature { + F_HAS_RGMII = 1, + F_PHY_88E1111 = 2, + F_PHY_BCM5461 = 4 +}; + +struct sis190_private { + void __iomem *mmio_addr; + struct pci_dev *pci_dev; + struct net_device *dev; + spinlock_t lock; + u32 rx_buf_sz; + u32 cur_rx; + u32 cur_tx; + u32 dirty_rx; + u32 dirty_tx; + dma_addr_t rx_dma; + dma_addr_t tx_dma; + struct RxDesc *RxDescRing; + struct TxDesc *TxDescRing; + struct sk_buff *Rx_skbuff[NUM_RX_DESC]; + struct sk_buff *Tx_skbuff[NUM_TX_DESC]; + struct work_struct phy_task; + struct timer_list timer; + u32 msg_enable; + struct mii_if_info mii_if; + struct list_head first_phy; + u32 features; + u32 negotiated_lpa; + enum { + LNK_OFF, + LNK_ON, + LNK_AUTONEG, + } link_status; +}; + +struct sis190_phy { + struct list_head list; + int phy_id; + u16 id[2]; + u16 status; + u8 type; +}; + +enum sis190_phy_type { + UNKNOWN = 0x00, + HOME = 0x01, + LAN = 0x02, + MIX = 0x03 +}; + +static struct mii_chip_info { + const char *name; + u16 id[2]; + unsigned int type; + u32 feature; +} mii_chip_table[] = { + { "Atheros PHY", { 0x004d, 0xd010 }, LAN, 0 }, + { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 }, + { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 }, + { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 }, + { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 }, + { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 }, + { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 }, + { NULL, } +}; + +static const struct { + const char *name; +} sis_chip_info[] = { + { "SiS 190 PCI Fast Ethernet adapter" }, + { "SiS 191 PCI Gigabit Ethernet adapter" }, +}; + +static const struct pci_device_id sis190_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 }, + { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 }, + { 0, }, +}; + +MODULE_DEVICE_TABLE(pci, sis190_pci_tbl); + +static int rx_copybreak = 200; + +static struct { + u32 msg_enable; +} debug = { -1 }; + +MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver"); +module_param(rx_copybreak, int, 0); +MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); +module_param_named(debug, debug.msg_enable, int, 0); +MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); +MODULE_AUTHOR("K.M. Liu , Ueimor "); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("GPL"); + +static const u32 sis190_intr_mask = + RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange; + +/* + * Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + * The chips use a 64 element hash table based on the Ethernet CRC. + */ +static const int multicast_filter_limit = 32; + +static void __mdio_cmd(void __iomem *ioaddr, u32 ctl) +{ + unsigned int i; + + SIS_W32(GMIIControl, ctl); + + msleep(1); + + for (i = 0; i < 100; i++) { + if (!(SIS_R32(GMIIControl) & EhnMIInotDone)) + break; + msleep(1); + } + + if (i > 99) + pr_err("PHY command failed !\n"); +} + +static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val) +{ + __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite | + (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) | + (((u32) val) << EhnMIIdataShift)); +} + +static int mdio_read(void __iomem *ioaddr, int phy_id, int reg) +{ + __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread | + (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift)); + + return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift); +} + +static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val) +{ + struct sis190_private *tp = netdev_priv(dev); + + mdio_write(tp->mmio_addr, phy_id, reg, val); +} + +static int __mdio_read(struct net_device *dev, int phy_id, int reg) +{ + struct sis190_private *tp = netdev_priv(dev); + + return mdio_read(tp->mmio_addr, phy_id, reg); +} + +static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg) +{ + mdio_read(ioaddr, phy_id, reg); + return mdio_read(ioaddr, phy_id, reg); +} + +static u16 sis190_read_eeprom(void __iomem *ioaddr, u32 reg) +{ + u16 data = 0xffff; + unsigned int i; + + if (!(SIS_R32(ROMControl) & 0x0002)) + return 0; + + SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10)); + + for (i = 0; i < 200; i++) { + if (!(SIS_R32(ROMInterface) & EEREQ)) { + data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16; + break; + } + msleep(1); + } + + return data; +} + +static void sis190_irq_mask_and_ack(void __iomem *ioaddr) +{ + SIS_W32(IntrMask, 0x00); + SIS_W32(IntrStatus, 0xffffffff); + SIS_PCI_COMMIT(); +} + +static void sis190_asic_down(void __iomem *ioaddr) +{ + /* Stop the chip's Tx and Rx DMA processes. */ + + SIS_W32(TxControl, 0x1a00); + SIS_W32(RxControl, 0x1a00); + + sis190_irq_mask_and_ack(ioaddr); +} + +static void sis190_mark_as_last_descriptor(struct RxDesc *desc) +{ + desc->size |= cpu_to_le32(RingEnd); +} + +static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz) +{ + u32 eor = le32_to_cpu(desc->size) & RingEnd; + + desc->PSize = 0x0; + desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor); + wmb(); + desc->status = cpu_to_le32(OWNbit | INTbit); +} + +static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, + u32 rx_buf_sz) +{ + desc->addr = cpu_to_le32(mapping); + sis190_give_to_asic(desc, rx_buf_sz); +} + +static inline void sis190_make_unusable_by_asic(struct RxDesc *desc) +{ + desc->PSize = 0x0; + desc->addr = cpu_to_le32(0xdeadbeef); + desc->size &= cpu_to_le32(RingEnd); + wmb(); + desc->status = 0x0; +} + +static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp, + struct RxDesc *desc) +{ + u32 rx_buf_sz = tp->rx_buf_sz; + struct sk_buff *skb; + dma_addr_t mapping; + + skb = netdev_alloc_skb(tp->dev, rx_buf_sz); + if (unlikely(!skb)) + goto skb_alloc_failed; + mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(tp->pci_dev, mapping)) + goto out; + sis190_map_to_asic(desc, mapping, rx_buf_sz); + + return skb; + +out: + dev_kfree_skb_any(skb); +skb_alloc_failed: + sis190_make_unusable_by_asic(desc); + return NULL; +} + +static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev, + u32 start, u32 end) +{ + u32 cur; + + for (cur = start; cur < end; cur++) { + unsigned int i = cur % NUM_RX_DESC; + + if (tp->Rx_skbuff[i]) + continue; + + tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i); + + if (!tp->Rx_skbuff[i]) + break; + } + return cur - start; +} + +static bool sis190_try_rx_copy(struct sis190_private *tp, + struct sk_buff **sk_buff, int pkt_size, + dma_addr_t addr) +{ + struct sk_buff *skb; + bool done = false; + + if (pkt_size >= rx_copybreak) + goto out; + + skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size); + if (!skb) + goto out; + + pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz, + PCI_DMA_FROMDEVICE); + skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size); + *sk_buff = skb; + done = true; +out: + return done; +} + +static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats) +{ +#define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT) + + if ((status & CRCOK) && !(status & ErrMask)) + return 0; + + if (!(status & CRCOK)) + stats->rx_crc_errors++; + else if (status & OVRUN) + stats->rx_over_errors++; + else if (status & (SHORT | LIMIT)) + stats->rx_length_errors++; + else if (status & (MIIER | NIBON | COLON)) + stats->rx_frame_errors++; + + stats->rx_errors++; + return -1; +} + +static int sis190_rx_interrupt(struct net_device *dev, + struct sis190_private *tp, void __iomem *ioaddr) +{ + struct net_device_stats *stats = &dev->stats; + u32 rx_left, cur_rx = tp->cur_rx; + u32 delta, count; + + rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; + rx_left = sis190_rx_quota(rx_left, (u32) dev->quota); + + for (; rx_left > 0; rx_left--, cur_rx++) { + unsigned int entry = cur_rx % NUM_RX_DESC; + struct RxDesc *desc = tp->RxDescRing + entry; + u32 status; + + if (le32_to_cpu(desc->status) & OWNbit) + break; + + status = le32_to_cpu(desc->PSize); + + //netif_info(tp, intr, dev, "Rx PSize = %08x\n", status); + + if (sis190_rx_pkt_err(status, stats) < 0) + sis190_give_to_asic(desc, tp->rx_buf_sz); + else { + struct sk_buff *skb = tp->Rx_skbuff[entry]; + dma_addr_t addr = le32_to_cpu(desc->addr); + int pkt_size = (status & RxSizeMask) - 4; + struct pci_dev *pdev = tp->pci_dev; + + if (unlikely(pkt_size > tp->rx_buf_sz)) { + netif_info(tp, intr, dev, + "(frag) status = %08x\n", status); + stats->rx_dropped++; + stats->rx_length_errors++; + sis190_give_to_asic(desc, tp->rx_buf_sz); + continue; + } + + + if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) { + pci_dma_sync_single_for_device(pdev, addr, + tp->rx_buf_sz, PCI_DMA_FROMDEVICE); + sis190_give_to_asic(desc, tp->rx_buf_sz); + } else { + pci_unmap_single(pdev, addr, tp->rx_buf_sz, + PCI_DMA_FROMDEVICE); + tp->Rx_skbuff[entry] = NULL; + sis190_make_unusable_by_asic(desc); + } + + skb_put(skb, pkt_size); + skb->protocol = eth_type_trans(skb, dev); + + sis190_rx_skb(skb); + + stats->rx_packets++; + stats->rx_bytes += pkt_size; + if ((status & BCAST) == MCAST) + stats->multicast++; + } + } + count = cur_rx - tp->cur_rx; + tp->cur_rx = cur_rx; + + delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx); + if (!delta && count) + netif_info(tp, intr, dev, "no Rx buffer allocated\n"); + tp->dirty_rx += delta; + + if ((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) + netif_emerg(tp, intr, dev, "Rx buffers exhausted\n"); + + return count; +} + +static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, + struct TxDesc *desc) +{ + unsigned int len; + + len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; + + pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE); + + memset(desc, 0x00, sizeof(*desc)); +} + +static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats) +{ +#define TxErrMask (WND | TABRT | FIFO | LINK) + + if (!unlikely(status & TxErrMask)) + return 0; + + if (status & WND) + stats->tx_window_errors++; + if (status & TABRT) + stats->tx_aborted_errors++; + if (status & FIFO) + stats->tx_fifo_errors++; + if (status & LINK) + stats->tx_carrier_errors++; + + stats->tx_errors++; + + return -1; +} + +static void sis190_tx_interrupt(struct net_device *dev, + struct sis190_private *tp, void __iomem *ioaddr) +{ + struct net_device_stats *stats = &dev->stats; + u32 pending, dirty_tx = tp->dirty_tx; + /* + * It would not be needed if queueing was allowed to be enabled + * again too early (hint: think preempt and unclocked smp systems). + */ + unsigned int queue_stopped; + + smp_rmb(); + pending = tp->cur_tx - dirty_tx; + queue_stopped = (pending == NUM_TX_DESC); + + for (; pending; pending--, dirty_tx++) { + unsigned int entry = dirty_tx % NUM_TX_DESC; + struct TxDesc *txd = tp->TxDescRing + entry; + u32 status = le32_to_cpu(txd->status); + struct sk_buff *skb; + + if (status & OWNbit) + break; + + skb = tp->Tx_skbuff[entry]; + + if (likely(sis190_tx_pkt_err(status, stats) == 0)) { + stats->tx_packets++; + stats->tx_bytes += skb->len; + stats->collisions += ((status & ColCountMask) - 1); + } + + sis190_unmap_tx_skb(tp->pci_dev, skb, txd); + tp->Tx_skbuff[entry] = NULL; + dev_kfree_skb_irq(skb); + } + + if (tp->dirty_tx != dirty_tx) { + tp->dirty_tx = dirty_tx; + smp_wmb(); + if (queue_stopped) + netif_wake_queue(dev); + } +} + +/* + * The interrupt handler does all of the Rx thread work and cleans up after + * the Tx thread. + */ +static irqreturn_t sis190_irq(int irq, void *__dev) +{ + struct net_device *dev = __dev; + struct sis190_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned int handled = 0; + u32 status; + + status = SIS_R32(IntrStatus); + + if ((status == 0xffffffff) || !status) + goto out; + + handled = 1; + + if (unlikely(!netif_running(dev))) { + sis190_asic_down(ioaddr); + goto out; + } + + SIS_W32(IntrStatus, status); + +// netif_info(tp, intr, dev, "status = %08x\n", status); + + if (status & LinkChange) { + netif_info(tp, intr, dev, "link change\n"); + del_timer(&tp->timer); + schedule_work(&tp->phy_task); + } + + if (status & RxQInt) + sis190_rx_interrupt(dev, tp, ioaddr); + + if (status & TxQ0Int) + sis190_tx_interrupt(dev, tp, ioaddr); +out: + return IRQ_RETVAL(handled); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void sis190_netpoll(struct net_device *dev) +{ + struct sis190_private *tp = netdev_priv(dev); + const int irq = tp->pci_dev->irq; + + disable_irq(irq); + sis190_irq(irq, dev); + enable_irq(irq); +} +#endif + +static void sis190_free_rx_skb(struct sis190_private *tp, + struct sk_buff **sk_buff, struct RxDesc *desc) +{ + struct pci_dev *pdev = tp->pci_dev; + + pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz, + PCI_DMA_FROMDEVICE); + dev_kfree_skb(*sk_buff); + *sk_buff = NULL; + sis190_make_unusable_by_asic(desc); +} + +static void sis190_rx_clear(struct sis190_private *tp) +{ + unsigned int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + if (!tp->Rx_skbuff[i]) + continue; + sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i); + } +} + +static void sis190_init_ring_indexes(struct sis190_private *tp) +{ + tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0; +} + +static int sis190_init_ring(struct net_device *dev) +{ + struct sis190_private *tp = netdev_priv(dev); + + sis190_init_ring_indexes(tp); + + memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *)); + memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *)); + + if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC) + goto err_rx_clear; + + sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1); + + return 0; + +err_rx_clear: + sis190_rx_clear(tp); + return -ENOMEM; +} + +static void sis190_set_rx_mode(struct net_device *dev) +{ + struct sis190_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags; + u32 mc_filter[2]; /* Multicast hash filter */ + u16 rx_mode; + + if (dev->flags & IFF_PROMISC) { + rx_mode = + AcceptBroadcast | AcceptMulticast | AcceptMyPhys | + AcceptAllPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to filter perfectly -- accept all multicasts. */ + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else { + struct netdev_hw_addr *ha; + + rx_mode = AcceptBroadcast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = + ether_crc(ETH_ALEN, ha->addr) & 0x3f; + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + rx_mode |= AcceptMulticast; + } + } + + spin_lock_irqsave(&tp->lock, flags); + + SIS_W16(RxMacControl, rx_mode | 0x2); + SIS_W32(RxHashTable, mc_filter[0]); + SIS_W32(RxHashTable + 4, mc_filter[1]); + + spin_unlock_irqrestore(&tp->lock, flags); +} + +static void sis190_soft_reset(void __iomem *ioaddr) +{ + SIS_W32(IntrControl, 0x8000); + SIS_PCI_COMMIT(); + SIS_W32(IntrControl, 0x0); + sis190_asic_down(ioaddr); +} + +static void sis190_hw_start(struct net_device *dev) +{ + struct sis190_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + + sis190_soft_reset(ioaddr); + + SIS_W32(TxDescStartAddr, tp->tx_dma); + SIS_W32(RxDescStartAddr, tp->rx_dma); + + SIS_W32(IntrStatus, 0xffffffff); + SIS_W32(IntrMask, 0x0); + SIS_W32(GMIIControl, 0x0); + SIS_W32(TxMacControl, 0x60); + SIS_W16(RxMacControl, 0x02); + SIS_W32(RxHashTable, 0x0); + SIS_W32(0x6c, 0x0); + SIS_W32(RxWolCtrl, 0x0); + SIS_W32(RxWolData, 0x0); + + SIS_PCI_COMMIT(); + + sis190_set_rx_mode(dev); + + /* Enable all known interrupts by setting the interrupt mask. */ + SIS_W32(IntrMask, sis190_intr_mask); + + SIS_W32(TxControl, 0x1a00 | CmdTxEnb); + SIS_W32(RxControl, 0x1a1d); + + netif_start_queue(dev); +} + +static void sis190_phy_task(struct work_struct *work) +{ + struct sis190_private *tp = + container_of(work, struct sis190_private, phy_task); + struct net_device *dev = tp->dev; + void __iomem *ioaddr = tp->mmio_addr; + int phy_id = tp->mii_if.phy_id; + u16 val; + + rtnl_lock(); + + if (!netif_running(dev)) + goto out_unlock; + + val = mdio_read(ioaddr, phy_id, MII_BMCR); + if (val & BMCR_RESET) { + // FIXME: needlessly high ? -- FR 02/07/2005 + mod_timer(&tp->timer, jiffies + HZ/10); + goto out_unlock; + } + + val = mdio_read_latched(ioaddr, phy_id, MII_BMSR); + if (!(val & BMSR_ANEGCOMPLETE) && tp->link_status != LNK_AUTONEG) { + netif_carrier_off(dev); + netif_warn(tp, link, dev, "auto-negotiating...\n"); + tp->link_status = LNK_AUTONEG; + } else if ((val & BMSR_LSTATUS) && tp->link_status != LNK_ON) { + /* Rejoice ! */ + struct { + int val; + u32 ctl; + const char *msg; + } reg31[] = { + { LPA_1000FULL, 0x07000c00 | 0x00001000, + "1000 Mbps Full Duplex" }, + { LPA_1000HALF, 0x07000c00, + "1000 Mbps Half Duplex" }, + { LPA_100FULL, 0x04000800 | 0x00001000, + "100 Mbps Full Duplex" }, + { LPA_100HALF, 0x04000800, + "100 Mbps Half Duplex" }, + { LPA_10FULL, 0x04000400 | 0x00001000, + "10 Mbps Full Duplex" }, + { LPA_10HALF, 0x04000400, + "10 Mbps Half Duplex" }, + { 0, 0x04000400, "unknown" } + }, *p = NULL; + u16 adv, autoexp, gigadv, gigrec; + + val = mdio_read(ioaddr, phy_id, 0x1f); + netif_info(tp, link, dev, "mii ext = %04x\n", val); + + val = mdio_read(ioaddr, phy_id, MII_LPA); + adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE); + autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION); + netif_info(tp, link, dev, "mii lpa=%04x adv=%04x exp=%04x\n", + val, adv, autoexp); + + if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) { + /* check for gigabit speed */ + gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000); + gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000); + val = (gigadv & (gigrec >> 2)); + if (val & ADVERTISE_1000FULL) + p = reg31; + else if (val & ADVERTISE_1000HALF) + p = reg31 + 1; + } + if (!p) { + val &= adv; + + for (p = reg31; p->val; p++) { + if ((val & p->val) == p->val) + break; + } + } + + p->ctl |= SIS_R32(StationControl) & ~0x0f001c00; + + if ((tp->features & F_HAS_RGMII) && + (tp->features & F_PHY_BCM5461)) { + // Set Tx Delay in RGMII mode. + mdio_write(ioaddr, phy_id, 0x18, 0xf1c7); + udelay(200); + mdio_write(ioaddr, phy_id, 0x1c, 0x8c00); + p->ctl |= 0x03000000; + } + + SIS_W32(StationControl, p->ctl); + + if (tp->features & F_HAS_RGMII) { + SIS_W32(RGDelay, 0x0441); + SIS_W32(RGDelay, 0x0440); + } + + tp->negotiated_lpa = p->val; + + netif_info(tp, link, dev, "link on %s mode\n", p->msg); + netif_carrier_on(dev); + tp->link_status = LNK_ON; + } else if (!(val & BMSR_LSTATUS) && tp->link_status != LNK_AUTONEG) + tp->link_status = LNK_OFF; + mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT); + +out_unlock: + rtnl_unlock(); +} + +static void sis190_phy_timer(unsigned long __opaque) +{ + struct net_device *dev = (struct net_device *)__opaque; + struct sis190_private *tp = netdev_priv(dev); + + if (likely(netif_running(dev))) + schedule_work(&tp->phy_task); +} + +static inline void sis190_delete_timer(struct net_device *dev) +{ + struct sis190_private *tp = netdev_priv(dev); + + del_timer_sync(&tp->timer); +} + +static inline void sis190_request_timer(struct net_device *dev) +{ + struct sis190_private *tp = netdev_priv(dev); + struct timer_list *timer = &tp->timer; + + init_timer(timer); + timer->expires = jiffies + SIS190_PHY_TIMEOUT; + timer->data = (unsigned long)dev; + timer->function = sis190_phy_timer; + add_timer(timer); +} + +static void sis190_set_rxbufsize(struct sis190_private *tp, + struct net_device *dev) +{ + unsigned int mtu = dev->mtu; + + tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE; + /* RxDesc->size has a licence to kill the lower bits */ + if (tp->rx_buf_sz & 0x07) { + tp->rx_buf_sz += 8; + tp->rx_buf_sz &= RX_BUF_MASK; + } +} + +static int sis190_open(struct net_device *dev) +{ + struct sis190_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + int rc = -ENOMEM; + + sis190_set_rxbufsize(tp, dev); + + /* + * Rx and Tx descriptors need 256 bytes alignment. + * pci_alloc_consistent() guarantees a stronger alignment. + */ + tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma); + if (!tp->TxDescRing) + goto out; + + tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma); + if (!tp->RxDescRing) + goto err_free_tx_0; + + rc = sis190_init_ring(dev); + if (rc < 0) + goto err_free_rx_1; + + sis190_request_timer(dev); + + rc = request_irq(pdev->irq, sis190_irq, IRQF_SHARED, dev->name, dev); + if (rc < 0) + goto err_release_timer_2; + + sis190_hw_start(dev); +out: + return rc; + +err_release_timer_2: + sis190_delete_timer(dev); + sis190_rx_clear(tp); +err_free_rx_1: + pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma); +err_free_tx_0: + pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma); + goto out; +} + +static void sis190_tx_clear(struct sis190_private *tp) +{ + unsigned int i; + + for (i = 0; i < NUM_TX_DESC; i++) { + struct sk_buff *skb = tp->Tx_skbuff[i]; + + if (!skb) + continue; + + sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i); + tp->Tx_skbuff[i] = NULL; + dev_kfree_skb(skb); + + tp->dev->stats.tx_dropped++; + } + tp->cur_tx = tp->dirty_tx = 0; +} + +static void sis190_down(struct net_device *dev) +{ + struct sis190_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned int poll_locked = 0; + + sis190_delete_timer(dev); + + netif_stop_queue(dev); + + do { + spin_lock_irq(&tp->lock); + + sis190_asic_down(ioaddr); + + spin_unlock_irq(&tp->lock); + + synchronize_irq(tp->pci_dev->irq); + + if (!poll_locked) + poll_locked++; + + synchronize_sched(); + + } while (SIS_R32(IntrMask)); + + sis190_tx_clear(tp); + sis190_rx_clear(tp); +} + +static int sis190_close(struct net_device *dev) +{ + struct sis190_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + sis190_down(dev); + + free_irq(pdev->irq, dev); + + pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma); + pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma); + + tp->TxDescRing = NULL; + tp->RxDescRing = NULL; + + return 0; +} + +static netdev_tx_t sis190_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct sis190_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 len, entry, dirty_tx; + struct TxDesc *desc; + dma_addr_t mapping; + + if (unlikely(skb->len < ETH_ZLEN)) { + if (skb_padto(skb, ETH_ZLEN)) { + dev->stats.tx_dropped++; + goto out; + } + len = ETH_ZLEN; + } else { + len = skb->len; + } + + entry = tp->cur_tx % NUM_TX_DESC; + desc = tp->TxDescRing + entry; + + if (unlikely(le32_to_cpu(desc->status) & OWNbit)) { + netif_stop_queue(dev); + netif_err(tp, tx_err, dev, + "BUG! Tx Ring full when queue awake!\n"); + return NETDEV_TX_BUSY; + } + + mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(tp->pci_dev, mapping)) { + netif_err(tp, tx_err, dev, + "PCI mapping failed, dropping packet"); + return NETDEV_TX_BUSY; + } + + tp->Tx_skbuff[entry] = skb; + + desc->PSize = cpu_to_le32(len); + desc->addr = cpu_to_le32(mapping); + + desc->size = cpu_to_le32(len); + if (entry == (NUM_TX_DESC - 1)) + desc->size |= cpu_to_le32(RingEnd); + + wmb(); + + desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit); + if (tp->negotiated_lpa & (LPA_1000HALF | LPA_100HALF | LPA_10HALF)) { + /* Half Duplex */ + desc->status |= cpu_to_le32(COLEN | CRSEN | BKFEN); + if (tp->negotiated_lpa & (LPA_1000HALF | LPA_1000FULL)) + desc->status |= cpu_to_le32(EXTEN | BSTEN); /* gigabit HD */ + } + + tp->cur_tx++; + + smp_wmb(); + + SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb); + + dirty_tx = tp->dirty_tx; + if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) { + netif_stop_queue(dev); + smp_rmb(); + if (dirty_tx != tp->dirty_tx) + netif_wake_queue(dev); + } +out: + return NETDEV_TX_OK; +} + +static void sis190_free_phy(struct list_head *first_phy) +{ + struct sis190_phy *cur, *next; + + list_for_each_entry_safe(cur, next, first_phy, list) { + kfree(cur); + } +} + +/** + * sis190_default_phy - Select default PHY for sis190 mac. + * @dev: the net device to probe for + * + * Select first detected PHY with link as default. + * If no one is link on, select PHY whose types is HOME as default. + * If HOME doesn't exist, select LAN. + */ +static u16 sis190_default_phy(struct net_device *dev) +{ + struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan; + struct sis190_private *tp = netdev_priv(dev); + struct mii_if_info *mii_if = &tp->mii_if; + void __iomem *ioaddr = tp->mmio_addr; + u16 status; + + phy_home = phy_default = phy_lan = NULL; + + list_for_each_entry(phy, &tp->first_phy, list) { + status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR); + + // Link ON & Not select default PHY & not ghost PHY. + if ((status & BMSR_LSTATUS) && + !phy_default && + (phy->type != UNKNOWN)) { + phy_default = phy; + } else { + status = mdio_read(ioaddr, phy->phy_id, MII_BMCR); + mdio_write(ioaddr, phy->phy_id, MII_BMCR, + status | BMCR_ANENABLE | BMCR_ISOLATE); + if (phy->type == HOME) + phy_home = phy; + else if (phy->type == LAN) + phy_lan = phy; + } + } + + if (!phy_default) { + if (phy_home) + phy_default = phy_home; + else if (phy_lan) + phy_default = phy_lan; + else + phy_default = list_first_entry(&tp->first_phy, + struct sis190_phy, list); + } + + if (mii_if->phy_id != phy_default->phy_id) { + mii_if->phy_id = phy_default->phy_id; + if (netif_msg_probe(tp)) + pr_info("%s: Using transceiver at address %d as default\n", + pci_name(tp->pci_dev), mii_if->phy_id); + } + + status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR); + status &= (~BMCR_ISOLATE); + + mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status); + status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR); + + return status; +} + +static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp, + struct sis190_phy *phy, unsigned int phy_id, + u16 mii_status) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct mii_chip_info *p; + + INIT_LIST_HEAD(&phy->list); + phy->status = mii_status; + phy->phy_id = phy_id; + + phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1); + phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2); + + for (p = mii_chip_table; p->type; p++) { + if ((p->id[0] == phy->id[0]) && + (p->id[1] == (phy->id[1] & 0xfff0))) { + break; + } + } + + if (p->id[1]) { + phy->type = (p->type == MIX) ? + ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ? + LAN : HOME) : p->type; + tp->features |= p->feature; + if (netif_msg_probe(tp)) + pr_info("%s: %s transceiver at address %d\n", + pci_name(tp->pci_dev), p->name, phy_id); + } else { + phy->type = UNKNOWN; + if (netif_msg_probe(tp)) + pr_info("%s: unknown PHY 0x%x:0x%x transceiver at address %d\n", + pci_name(tp->pci_dev), + phy->id[0], (phy->id[1] & 0xfff0), phy_id); + } +} + +static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp) +{ + if (tp->features & F_PHY_88E1111) { + void __iomem *ioaddr = tp->mmio_addr; + int phy_id = tp->mii_if.phy_id; + u16 reg[2][2] = { + { 0x808b, 0x0ce1 }, + { 0x808f, 0x0c60 } + }, *p; + + p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1]; + + mdio_write(ioaddr, phy_id, 0x1b, p[0]); + udelay(200); + mdio_write(ioaddr, phy_id, 0x14, p[1]); + udelay(200); + } +} + +/** + * sis190_mii_probe - Probe MII PHY for sis190 + * @dev: the net device to probe for + * + * Search for total of 32 possible mii phy addresses. + * Identify and set current phy if found one, + * return error if it failed to found. + */ +static int sis190_mii_probe(struct net_device *dev) +{ + struct sis190_private *tp = netdev_priv(dev); + struct mii_if_info *mii_if = &tp->mii_if; + void __iomem *ioaddr = tp->mmio_addr; + int phy_id; + int rc = 0; + + INIT_LIST_HEAD(&tp->first_phy); + + for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) { + struct sis190_phy *phy; + u16 status; + + status = mdio_read_latched(ioaddr, phy_id, MII_BMSR); + + // Try next mii if the current one is not accessible. + if (status == 0xffff || status == 0x0000) + continue; + + phy = kmalloc(sizeof(*phy), GFP_KERNEL); + if (!phy) { + sis190_free_phy(&tp->first_phy); + rc = -ENOMEM; + goto out; + } + + sis190_init_phy(dev, tp, phy, phy_id, status); + + list_add(&tp->first_phy, &phy->list); + } + + if (list_empty(&tp->first_phy)) { + if (netif_msg_probe(tp)) + pr_info("%s: No MII transceivers found!\n", + pci_name(tp->pci_dev)); + rc = -EIO; + goto out; + } + + /* Select default PHY for mac */ + sis190_default_phy(dev); + + sis190_mii_probe_88e1111_fixup(tp); + + mii_if->dev = dev; + mii_if->mdio_read = __mdio_read; + mii_if->mdio_write = __mdio_write; + mii_if->phy_id_mask = PHY_ID_ANY; + mii_if->reg_num_mask = MII_REG_ANY; +out: + return rc; +} + +static void sis190_mii_remove(struct net_device *dev) +{ + struct sis190_private *tp = netdev_priv(dev); + + sis190_free_phy(&tp->first_phy); +} + +static void sis190_release_board(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct sis190_private *tp = netdev_priv(dev); + + iounmap(tp->mmio_addr); + pci_release_regions(pdev); + pci_disable_device(pdev); + free_netdev(dev); +} + +static struct net_device *sis190_init_board(struct pci_dev *pdev) +{ + struct sis190_private *tp; + struct net_device *dev; + void __iomem *ioaddr; + int rc; + + dev = alloc_etherdev(sizeof(*tp)); + if (!dev) { + rc = -ENOMEM; + goto err_out_0; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + + tp = netdev_priv(dev); + tp->dev = dev; + tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT); + + rc = pci_enable_device(pdev); + if (rc < 0) { + if (netif_msg_probe(tp)) + pr_err("%s: enable failure\n", pci_name(pdev)); + goto err_free_dev_1; + } + + rc = -ENODEV; + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + if (netif_msg_probe(tp)) + pr_err("%s: region #0 is no MMIO resource\n", + pci_name(pdev)); + goto err_pci_disable_2; + } + if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) { + if (netif_msg_probe(tp)) + pr_err("%s: invalid PCI region size(s)\n", + pci_name(pdev)); + goto err_pci_disable_2; + } + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc < 0) { + if (netif_msg_probe(tp)) + pr_err("%s: could not request regions\n", + pci_name(pdev)); + goto err_pci_disable_2; + } + + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc < 0) { + if (netif_msg_probe(tp)) + pr_err("%s: DMA configuration failed\n", + pci_name(pdev)); + goto err_free_res_3; + } + + pci_set_master(pdev); + + ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE); + if (!ioaddr) { + if (netif_msg_probe(tp)) + pr_err("%s: cannot remap MMIO, aborting\n", + pci_name(pdev)); + rc = -EIO; + goto err_free_res_3; + } + + tp->pci_dev = pdev; + tp->mmio_addr = ioaddr; + tp->link_status = LNK_OFF; + + sis190_irq_mask_and_ack(ioaddr); + + sis190_soft_reset(ioaddr); +out: + return dev; + +err_free_res_3: + pci_release_regions(pdev); +err_pci_disable_2: + pci_disable_device(pdev); +err_free_dev_1: + free_netdev(dev); +err_out_0: + dev = ERR_PTR(rc); + goto out; +} + +static void sis190_tx_timeout(struct net_device *dev) +{ + struct sis190_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u8 tmp8; + + /* Disable Tx, if not already */ + tmp8 = SIS_R8(TxControl); + if (tmp8 & CmdTxEnb) + SIS_W8(TxControl, tmp8 & ~CmdTxEnb); + + netif_info(tp, tx_err, dev, "Transmit timeout, status %08x %08x\n", + SIS_R32(TxControl), SIS_R32(TxSts)); + + /* Disable interrupts by clearing the interrupt mask. */ + SIS_W32(IntrMask, 0x0000); + + /* Stop a shared interrupt from scavenging while we are. */ + spin_lock_irq(&tp->lock); + sis190_tx_clear(tp); + spin_unlock_irq(&tp->lock); + + /* ...and finally, reset everything. */ + sis190_hw_start(dev); + + netif_wake_queue(dev); +} + +static void sis190_set_rgmii(struct sis190_private *tp, u8 reg) +{ + tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0; +} + +static int sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev, + struct net_device *dev) +{ + struct sis190_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u16 sig; + int i; + + if (netif_msg_probe(tp)) + pr_info("%s: Read MAC address from EEPROM\n", pci_name(pdev)); + + /* Check to see if there is a sane EEPROM */ + sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature); + + if ((sig == 0xffff) || (sig == 0x0000)) { + if (netif_msg_probe(tp)) + pr_info("%s: Error EEPROM read %x\n", + pci_name(pdev), sig); + return -EIO; + } + + /* Get MAC address from EEPROM */ + for (i = 0; i < ETH_ALEN / 2; i++) { + u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i); + + ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w); + } + + sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo)); + + return 0; +} + +/** + * sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model + * @pdev: PCI device + * @dev: network device to get address for + * + * SiS96x model, use APC CMOS RAM to store MAC address. + * APC CMOS RAM is accessed through ISA bridge. + * MAC address is read into @net_dev->dev_addr. + */ +static int sis190_get_mac_addr_from_apc(struct pci_dev *pdev, + struct net_device *dev) +{ + static const u16 ids[] = { 0x0965, 0x0966, 0x0968 }; + struct sis190_private *tp = netdev_priv(dev); + struct pci_dev *isa_bridge; + u8 reg, tmp8; + unsigned int i; + + if (netif_msg_probe(tp)) + pr_info("%s: Read MAC address from APC\n", pci_name(pdev)); + + for (i = 0; i < ARRAY_SIZE(ids); i++) { + isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL); + if (isa_bridge) + break; + } + + if (!isa_bridge) { + if (netif_msg_probe(tp)) + pr_info("%s: Can not find ISA bridge\n", + pci_name(pdev)); + return -EIO; + } + + /* Enable port 78h & 79h to access APC Registers. */ + pci_read_config_byte(isa_bridge, 0x48, &tmp8); + reg = (tmp8 & ~0x02); + pci_write_config_byte(isa_bridge, 0x48, reg); + udelay(50); + pci_read_config_byte(isa_bridge, 0x48, ®); + + for (i = 0; i < ETH_ALEN; i++) { + outb(0x9 + i, 0x78); + dev->dev_addr[i] = inb(0x79); + } + + outb(0x12, 0x78); + reg = inb(0x79); + + sis190_set_rgmii(tp, reg); + + /* Restore the value to ISA Bridge */ + pci_write_config_byte(isa_bridge, 0x48, tmp8); + pci_dev_put(isa_bridge); + + return 0; +} + +/** + * sis190_init_rxfilter - Initialize the Rx filter + * @dev: network device to initialize + * + * Set receive filter address to our MAC address + * and enable packet filtering. + */ +static inline void sis190_init_rxfilter(struct net_device *dev) +{ + struct sis190_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u16 ctl; + int i; + + ctl = SIS_R16(RxMacControl); + /* + * Disable packet filtering before setting filter. + * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits + * only and followed by RxMacAddr (6 bytes). Strange. -- FR + */ + SIS_W16(RxMacControl, ctl & ~0x0f00); + + for (i = 0; i < ETH_ALEN; i++) + SIS_W8(RxMacAddr + i, dev->dev_addr[i]); + + SIS_W16(RxMacControl, ctl); + SIS_PCI_COMMIT(); +} + +static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev) +{ + int rc; + + rc = sis190_get_mac_addr_from_eeprom(pdev, dev); + if (rc < 0) { + u8 reg; + + pci_read_config_byte(pdev, 0x73, ®); + + if (reg & 0x00000001) + rc = sis190_get_mac_addr_from_apc(pdev, dev); + } + return rc; +} + +static void sis190_set_speed_auto(struct net_device *dev) +{ + struct sis190_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + int phy_id = tp->mii_if.phy_id; + int val; + + netif_info(tp, link, dev, "Enabling Auto-negotiation\n"); + + val = mdio_read(ioaddr, phy_id, MII_ADVERTISE); + + // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0 + // unchanged. + mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) | + ADVERTISE_100FULL | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_10HALF); + + // Enable 1000 Full Mode. + mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL); + + // Enable auto-negotiation and restart auto-negotiation. + mdio_write(ioaddr, phy_id, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET); +} + +static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct sis190_private *tp = netdev_priv(dev); + + return mii_ethtool_gset(&tp->mii_if, cmd); +} + +static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct sis190_private *tp = netdev_priv(dev); + + return mii_ethtool_sset(&tp->mii_if, cmd); +} + +static void sis190_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct sis190_private *tp = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(tp->pci_dev), + sizeof(info->bus_info)); +} + +static int sis190_get_regs_len(struct net_device *dev) +{ + return SIS190_REGS_SIZE; +} + +static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct sis190_private *tp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + memcpy_fromio(p, tp->mmio_addr, regs->len); + spin_unlock_irqrestore(&tp->lock, flags); +} + +static int sis190_nway_reset(struct net_device *dev) +{ + struct sis190_private *tp = netdev_priv(dev); + + return mii_nway_restart(&tp->mii_if); +} + +static u32 sis190_get_msglevel(struct net_device *dev) +{ + struct sis190_private *tp = netdev_priv(dev); + + return tp->msg_enable; +} + +static void sis190_set_msglevel(struct net_device *dev, u32 value) +{ + struct sis190_private *tp = netdev_priv(dev); + + tp->msg_enable = value; +} + +static const struct ethtool_ops sis190_ethtool_ops = { + .get_settings = sis190_get_settings, + .set_settings = sis190_set_settings, + .get_drvinfo = sis190_get_drvinfo, + .get_regs_len = sis190_get_regs_len, + .get_regs = sis190_get_regs, + .get_link = ethtool_op_get_link, + .get_msglevel = sis190_get_msglevel, + .set_msglevel = sis190_set_msglevel, + .nway_reset = sis190_nway_reset, +}; + +static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct sis190_private *tp = netdev_priv(dev); + + return !netif_running(dev) ? -EINVAL : + generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL); +} + +static int sis190_mac_addr(struct net_device *dev, void *p) +{ + int rc; + + rc = eth_mac_addr(dev, p); + if (!rc) + sis190_init_rxfilter(dev); + return rc; +} + +static const struct net_device_ops sis190_netdev_ops = { + .ndo_open = sis190_open, + .ndo_stop = sis190_close, + .ndo_do_ioctl = sis190_ioctl, + .ndo_start_xmit = sis190_start_xmit, + .ndo_tx_timeout = sis190_tx_timeout, + .ndo_set_rx_mode = sis190_set_rx_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = sis190_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = sis190_netpoll, +#endif +}; + +static int sis190_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + static int printed_version = 0; + struct sis190_private *tp; + struct net_device *dev; + void __iomem *ioaddr; + int rc; + + if (!printed_version) { + if (netif_msg_drv(&debug)) + pr_info(SIS190_DRIVER_NAME " loaded\n"); + printed_version = 1; + } + + dev = sis190_init_board(pdev); + if (IS_ERR(dev)) { + rc = PTR_ERR(dev); + goto out; + } + + pci_set_drvdata(pdev, dev); + + tp = netdev_priv(dev); + ioaddr = tp->mmio_addr; + + rc = sis190_get_mac_addr(pdev, dev); + if (rc < 0) + goto err_release_board; + + sis190_init_rxfilter(dev); + + INIT_WORK(&tp->phy_task, sis190_phy_task); + + dev->netdev_ops = &sis190_netdev_ops; + + dev->ethtool_ops = &sis190_ethtool_ops; + dev->watchdog_timeo = SIS190_TX_TIMEOUT; + + spin_lock_init(&tp->lock); + + rc = sis190_mii_probe(dev); + if (rc < 0) + goto err_release_board; + + rc = register_netdev(dev); + if (rc < 0) + goto err_remove_mii; + + if (netif_msg_probe(tp)) { + netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n", + pci_name(pdev), + sis_chip_info[ent->driver_data].name, + ioaddr, pdev->irq, dev->dev_addr); + netdev_info(dev, "%s mode.\n", + (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII"); + } + + netif_carrier_off(dev); + + sis190_set_speed_auto(dev); +out: + return rc; + +err_remove_mii: + sis190_mii_remove(dev); +err_release_board: + sis190_release_board(pdev); + goto out; +} + +static void sis190_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct sis190_private *tp = netdev_priv(dev); + + sis190_mii_remove(dev); + cancel_work_sync(&tp->phy_task); + unregister_netdev(dev); + sis190_release_board(pdev); +} + +static struct pci_driver sis190_pci_driver = { + .name = DRV_NAME, + .id_table = sis190_pci_tbl, + .probe = sis190_init_one, + .remove = sis190_remove_one, +}; + +module_pci_driver(sis190_pci_driver); diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c new file mode 100644 index 000000000..fd812d2e5 --- /dev/null +++ b/drivers/net/ethernet/sis/sis900.c @@ -0,0 +1,2517 @@ +/* sis900.c: A SiS 900/7016 PCI Fast Ethernet driver for Linux. + Copyright 1999 Silicon Integrated System Corporation + Revision: 1.08.10 Apr. 2 2006 + + Modified from the driver which is originally written by Donald Becker. + + This software may be used and distributed according to the terms + of the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on this skeleton fall under the GPL and must retain + the authorship (implicit copyright) notice. + + References: + SiS 7016 Fast Ethernet PCI Bus 10/100 Mbps LAN Controller with OnNow Support, + preliminary Rev. 1.0 Jan. 14, 1998 + SiS 900 Fast Ethernet PCI Bus 10/100 Mbps LAN Single Chip with OnNow Support, + preliminary Rev. 1.0 Nov. 10, 1998 + SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution, + preliminary Rev. 1.0 Jan. 18, 1998 + + Rev 1.08.10 Apr. 2 2006 Daniele Venzano add vlan (jumbo packets) support + Rev 1.08.09 Sep. 19 2005 Daniele Venzano add Wake on LAN support + Rev 1.08.08 Jan. 22 2005 Daniele Venzano use netif_msg for debugging messages + Rev 1.08.07 Nov. 2 2003 Daniele Venzano add suspend/resume support + Rev 1.08.06 Sep. 24 2002 Mufasa Yang bug fix for Tx timeout & add SiS963 support + Rev 1.08.05 Jun. 6 2002 Mufasa Yang bug fix for read_eeprom & Tx descriptor over-boundary + Rev 1.08.04 Apr. 25 2002 Mufasa Yang added SiS962 support + Rev 1.08.03 Feb. 1 2002 Matt Domsch update to use library crc32 function + Rev 1.08.02 Nov. 30 2001 Hui-Fen Hsu workaround for EDB & bug fix for dhcp problem + Rev 1.08.01 Aug. 25 2001 Hui-Fen Hsu update for 630ET & workaround for ICS1893 PHY + Rev 1.08.00 Jun. 11 2001 Hui-Fen Hsu workaround for RTL8201 PHY and some bug fix + Rev 1.07.11 Apr. 2 2001 Hui-Fen Hsu updates PCI drivers to use the new pci_set_dma_mask for kernel 2.4.3 + Rev 1.07.10 Mar. 1 2001 Hui-Fen Hsu some bug fix & 635M/B support + Rev 1.07.09 Feb. 9 2001 Dave Jones PCI enable cleanup + Rev 1.07.08 Jan. 8 2001 Lei-Chun Chang added RTL8201 PHY support + Rev 1.07.07 Nov. 29 2000 Lei-Chun Chang added kernel-doc extractable documentation and 630 workaround fix + Rev 1.07.06 Nov. 7 2000 Jeff Garzik some bug fix and cleaning + Rev 1.07.05 Nov. 6 2000 metapirat contribute media type select by ifconfig + Rev 1.07.04 Sep. 6 2000 Lei-Chun Chang added ICS1893 PHY support + Rev 1.07.03 Aug. 24 2000 Lei-Chun Chang (lcchang@sis.com.tw) modified 630E equalizer workaround rule + Rev 1.07.01 Aug. 08 2000 Ollie Lho minor update for SiS 630E and SiS 630E A1 + Rev 1.07 Mar. 07 2000 Ollie Lho bug fix in Rx buffer ring + Rev 1.06.04 Feb. 11 2000 Jeff Garzik softnet and init for kernel 2.4 + Rev 1.06.03 Dec. 23 1999 Ollie Lho Third release + Rev 1.06.02 Nov. 23 1999 Ollie Lho bug in mac probing fixed + Rev 1.06.01 Nov. 16 1999 Ollie Lho CRC calculation provide by Joseph Zbiciak (im14u2c@primenet.com) + Rev 1.06 Nov. 4 1999 Ollie Lho (ollie@sis.com.tw) Second release + Rev 1.05.05 Oct. 29 1999 Ollie Lho (ollie@sis.com.tw) Single buffer Tx/Rx + Chin-Shan Li (lcs@sis.com.tw) Added AMD Am79c901 HomePNA PHY support + Rev 1.05 Aug. 7 1999 Jim Huang (cmhuang@sis.com.tw) Initial release +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include /* Processor type for cache alignment. */ +#include +#include +#include /* User space memory access functions */ + +#include "sis900.h" + +#define SIS900_MODULE_NAME "sis900" +#define SIS900_DRV_VERSION "v1.08.10 Apr. 2 2006" + +static const char version[] = + KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n"; + +static int max_interrupt_work = 40; +static int multicast_filter_limit = 128; + +static int sis900_debug = -1; /* Use SIS900_DEF_MSG as value */ + +#define SIS900_DEF_MSG \ + (NETIF_MSG_DRV | \ + NETIF_MSG_LINK | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (4*HZ) + +enum { + SIS_900 = 0, + SIS_7016 +}; +static const char * card_names[] = { + "SiS 900 PCI Fast Ethernet", + "SiS 7016 PCI Fast Ethernet" +}; + +static const struct pci_device_id sis900_pci_tbl[] = { + {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_900, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_900}, + {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7016, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_7016}, + {0,} +}; +MODULE_DEVICE_TABLE (pci, sis900_pci_tbl); + +static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex); + +static const struct mii_chip_info { + const char * name; + u16 phy_id0; + u16 phy_id1; + u8 phy_types; +#define HOME 0x0001 +#define LAN 0x0002 +#define MIX 0x0003 +#define UNKNOWN 0x0 +} mii_chip_table[] = { + { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN }, + { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN }, + { "SiS 900 on Foxconn 661 7MI", 0x0143, 0xBC70, LAN }, + { "Altimata AC101LF PHY", 0x0022, 0x5520, LAN }, + { "ADM 7001 LAN PHY", 0x002e, 0xcc60, LAN }, + { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN }, + { "AMD 79C901 HomePNA PHY", 0x0000, 0x6B90, HOME}, + { "ICS LAN PHY", 0x0015, 0xF440, LAN }, + { "ICS LAN PHY", 0x0143, 0xBC70, LAN }, + { "NS 83851 PHY", 0x2000, 0x5C20, MIX }, + { "NS 83847 PHY", 0x2000, 0x5C30, MIX }, + { "Realtek RTL8201 PHY", 0x0000, 0x8200, LAN }, + { "VIA 6103 PHY", 0x0101, 0x8f20, LAN }, + {NULL,}, +}; + +struct mii_phy { + struct mii_phy * next; + int phy_addr; + u16 phy_id0; + u16 phy_id1; + u16 status; + u8 phy_types; +}; + +typedef struct _BufferDesc { + u32 link; + u32 cmdsts; + u32 bufptr; +} BufferDesc; + +struct sis900_private { + struct pci_dev * pci_dev; + + spinlock_t lock; + + struct mii_phy * mii; + struct mii_phy * first_mii; /* record the first mii structure */ + unsigned int cur_phy; + struct mii_if_info mii_info; + + void __iomem *ioaddr; + + struct timer_list timer; /* Link status detection timer. */ + u8 autong_complete; /* 1: auto-negotiate complete */ + + u32 msg_enable; + + unsigned int cur_rx, dirty_rx; /* producer/comsumer pointers for Tx/Rx ring */ + unsigned int cur_tx, dirty_tx; + + /* The saved address of a sent/receive-in-place packet buffer */ + struct sk_buff *tx_skbuff[NUM_TX_DESC]; + struct sk_buff *rx_skbuff[NUM_RX_DESC]; + BufferDesc *tx_ring; + BufferDesc *rx_ring; + + dma_addr_t tx_ring_dma; + dma_addr_t rx_ring_dma; + + unsigned int tx_full; /* The Tx queue is full. */ + u8 host_bridge_rev; + u8 chipset_rev; +}; + +MODULE_AUTHOR("Jim Huang , Ollie Lho "); +MODULE_DESCRIPTION("SiS 900 PCI Fast Ethernet driver"); +MODULE_LICENSE("GPL"); + +module_param(multicast_filter_limit, int, 0444); +module_param(max_interrupt_work, int, 0444); +module_param(sis900_debug, int, 0444); +MODULE_PARM_DESC(multicast_filter_limit, "SiS 900/7016 maximum number of filtered multicast addresses"); +MODULE_PARM_DESC(max_interrupt_work, "SiS 900/7016 maximum events handled per interrupt"); +MODULE_PARM_DESC(sis900_debug, "SiS 900/7016 bitmapped debugging message level"); + +#define sw32(reg, val) iowrite32(val, ioaddr + (reg)) +#define sw8(reg, val) iowrite8(val, ioaddr + (reg)) +#define sr32(reg) ioread32(ioaddr + (reg)) +#define sr16(reg) ioread16(ioaddr + (reg)) + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void sis900_poll(struct net_device *dev); +#endif +static int sis900_open(struct net_device *net_dev); +static int sis900_mii_probe (struct net_device * net_dev); +static void sis900_init_rxfilter (struct net_device * net_dev); +static u16 read_eeprom(void __iomem *ioaddr, int location); +static int mdio_read(struct net_device *net_dev, int phy_id, int location); +static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val); +static void sis900_timer(unsigned long data); +static void sis900_check_mode (struct net_device *net_dev, struct mii_phy *mii_phy); +static void sis900_tx_timeout(struct net_device *net_dev); +static void sis900_init_tx_ring(struct net_device *net_dev); +static void sis900_init_rx_ring(struct net_device *net_dev); +static netdev_tx_t sis900_start_xmit(struct sk_buff *skb, + struct net_device *net_dev); +static int sis900_rx(struct net_device *net_dev); +static void sis900_finish_xmit (struct net_device *net_dev); +static irqreturn_t sis900_interrupt(int irq, void *dev_instance); +static int sis900_close(struct net_device *net_dev); +static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd); +static u16 sis900_mcast_bitnr(u8 *addr, u8 revision); +static void set_rx_mode(struct net_device *net_dev); +static void sis900_reset(struct net_device *net_dev); +static void sis630_set_eq(struct net_device *net_dev, u8 revision); +static int sis900_set_config(struct net_device *dev, struct ifmap *map); +static u16 sis900_default_phy(struct net_device * net_dev); +static void sis900_set_capability( struct net_device *net_dev ,struct mii_phy *phy); +static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr); +static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr); +static void sis900_set_mode(struct sis900_private *, int speed, int duplex); +static const struct ethtool_ops sis900_ethtool_ops; + +/** + * sis900_get_mac_addr - Get MAC address for stand alone SiS900 model + * @pci_dev: the sis900 pci device + * @net_dev: the net device to get address for + * + * Older SiS900 and friends, use EEPROM to store MAC address. + * MAC address is read from read_eeprom() into @net_dev->dev_addr. + */ + +static int sis900_get_mac_addr(struct pci_dev *pci_dev, + struct net_device *net_dev) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; + u16 signature; + int i; + + /* check to see if we have sane EEPROM */ + signature = (u16) read_eeprom(ioaddr, EEPROMSignature); + if (signature == 0xffff || signature == 0x0000) { + printk (KERN_WARNING "%s: Error EERPOM read %x\n", + pci_name(pci_dev), signature); + return 0; + } + + /* get MAC address from EEPROM */ + for (i = 0; i < 3; i++) + ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); + + return 1; +} + +/** + * sis630e_get_mac_addr - Get MAC address for SiS630E model + * @pci_dev: the sis900 pci device + * @net_dev: the net device to get address for + * + * SiS630E model, use APC CMOS RAM to store MAC address. + * APC CMOS RAM is accessed through ISA bridge. + * MAC address is read into @net_dev->dev_addr. + */ + +static int sis630e_get_mac_addr(struct pci_dev *pci_dev, + struct net_device *net_dev) +{ + struct pci_dev *isa_bridge = NULL; + u8 reg; + int i; + + isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0008, isa_bridge); + if (!isa_bridge) + isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0018, isa_bridge); + if (!isa_bridge) { + printk(KERN_WARNING "%s: Can not find ISA bridge\n", + pci_name(pci_dev)); + return 0; + } + pci_read_config_byte(isa_bridge, 0x48, ®); + pci_write_config_byte(isa_bridge, 0x48, reg | 0x40); + + for (i = 0; i < 6; i++) { + outb(0x09 + i, 0x70); + ((u8 *)(net_dev->dev_addr))[i] = inb(0x71); + } + + pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40); + pci_dev_put(isa_bridge); + + return 1; +} + + +/** + * sis635_get_mac_addr - Get MAC address for SIS635 model + * @pci_dev: the sis900 pci device + * @net_dev: the net device to get address for + * + * SiS635 model, set MAC Reload Bit to load Mac address from APC + * to rfdr. rfdr is accessed through rfcr. MAC address is read into + * @net_dev->dev_addr. + */ + +static int sis635_get_mac_addr(struct pci_dev *pci_dev, + struct net_device *net_dev) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; + u32 rfcrSave; + u32 i; + + rfcrSave = sr32(rfcr); + + sw32(cr, rfcrSave | RELOAD); + sw32(cr, 0); + + /* disable packet filtering before setting filter */ + sw32(rfcr, rfcrSave & ~RFEN); + + /* load MAC addr to filter data register */ + for (i = 0 ; i < 3 ; i++) { + sw32(rfcr, (i << RFADDR_shift)); + *( ((u16 *)net_dev->dev_addr) + i) = sr16(rfdr); + } + + /* enable packet filtering */ + sw32(rfcr, rfcrSave | RFEN); + + return 1; +} + +/** + * sis96x_get_mac_addr - Get MAC address for SiS962 or SiS963 model + * @pci_dev: the sis900 pci device + * @net_dev: the net device to get address for + * + * SiS962 or SiS963 model, use EEPROM to store MAC address. And EEPROM + * is shared by + * LAN and 1394. When access EEPROM, send EEREQ signal to hardware first + * and wait for EEGNT. If EEGNT is ON, EEPROM is permitted to be access + * by LAN, otherwise is not. After MAC address is read from EEPROM, send + * EEDONE signal to refuse EEPROM access by LAN. + * The EEPROM map of SiS962 or SiS963 is different to SiS900. + * The signature field in SiS962 or SiS963 spec is meaningless. + * MAC address is read into @net_dev->dev_addr. + */ + +static int sis96x_get_mac_addr(struct pci_dev *pci_dev, + struct net_device *net_dev) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; + int wait, rc = 0; + + sw32(mear, EEREQ); + for (wait = 0; wait < 2000; wait++) { + if (sr32(mear) & EEGNT) { + u16 *mac = (u16 *)net_dev->dev_addr; + int i; + + /* get MAC address from EEPROM */ + for (i = 0; i < 3; i++) + mac[i] = read_eeprom(ioaddr, i + EEPROMMACAddr); + + rc = 1; + break; + } + udelay(1); + } + sw32(mear, EEDONE); + return rc; +} + +static const struct net_device_ops sis900_netdev_ops = { + .ndo_open = sis900_open, + .ndo_stop = sis900_close, + .ndo_start_xmit = sis900_start_xmit, + .ndo_set_config = sis900_set_config, + .ndo_set_rx_mode = set_rx_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_do_ioctl = mii_ioctl, + .ndo_tx_timeout = sis900_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = sis900_poll, +#endif +}; + +/** + * sis900_probe - Probe for sis900 device + * @pci_dev: the sis900 pci device + * @pci_id: the pci device ID + * + * Check and probe sis900 net device for @pci_dev. + * Get mac address according to the chip revision, + * and assign SiS900-specific entries in the device structure. + * ie: sis900_open(), sis900_start_xmit(), sis900_close(), etc. + */ + +static int sis900_probe(struct pci_dev *pci_dev, + const struct pci_device_id *pci_id) +{ + struct sis900_private *sis_priv; + struct net_device *net_dev; + struct pci_dev *dev; + dma_addr_t ring_dma; + void *ring_space; + void __iomem *ioaddr; + int i, ret; + const char *card_name = card_names[pci_id->driver_data]; + const char *dev_name = pci_name(pci_dev); + +/* when built into the kernel, we only print version if device is found */ +#ifndef MODULE + static int printed_version; + if (!printed_version++) + printk(version); +#endif + + /* setup various bits in PCI command register */ + ret = pci_enable_device(pci_dev); + if(ret) return ret; + + i = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); + if(i){ + printk(KERN_ERR "sis900.c: architecture does not support " + "32bit PCI busmaster DMA\n"); + return i; + } + + pci_set_master(pci_dev); + + net_dev = alloc_etherdev(sizeof(struct sis900_private)); + if (!net_dev) + return -ENOMEM; + SET_NETDEV_DEV(net_dev, &pci_dev->dev); + + /* We do a request_region() to register /proc/ioports info. */ + ret = pci_request_regions(pci_dev, "sis900"); + if (ret) + goto err_out; + + /* IO region. */ + ioaddr = pci_iomap(pci_dev, 0, 0); + if (!ioaddr) { + ret = -ENOMEM; + goto err_out_cleardev; + } + + sis_priv = netdev_priv(net_dev); + sis_priv->ioaddr = ioaddr; + sis_priv->pci_dev = pci_dev; + spin_lock_init(&sis_priv->lock); + + pci_set_drvdata(pci_dev, net_dev); + + ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma); + if (!ring_space) { + ret = -ENOMEM; + goto err_out_unmap; + } + sis_priv->tx_ring = ring_space; + sis_priv->tx_ring_dma = ring_dma; + + ring_space = pci_alloc_consistent(pci_dev, RX_TOTAL_SIZE, &ring_dma); + if (!ring_space) { + ret = -ENOMEM; + goto err_unmap_tx; + } + sis_priv->rx_ring = ring_space; + sis_priv->rx_ring_dma = ring_dma; + + /* The SiS900-specific entries in the device structure. */ + net_dev->netdev_ops = &sis900_netdev_ops; + net_dev->watchdog_timeo = TX_TIMEOUT; + net_dev->ethtool_ops = &sis900_ethtool_ops; + + if (sis900_debug > 0) + sis_priv->msg_enable = sis900_debug; + else + sis_priv->msg_enable = SIS900_DEF_MSG; + + sis_priv->mii_info.dev = net_dev; + sis_priv->mii_info.mdio_read = mdio_read; + sis_priv->mii_info.mdio_write = mdio_write; + sis_priv->mii_info.phy_id_mask = 0x1f; + sis_priv->mii_info.reg_num_mask = 0x1f; + + /* Get Mac address according to the chip revision */ + sis_priv->chipset_rev = pci_dev->revision; + if(netif_msg_probe(sis_priv)) + printk(KERN_DEBUG "%s: detected revision %2.2x, " + "trying to get MAC address...\n", + dev_name, sis_priv->chipset_rev); + + ret = 0; + if (sis_priv->chipset_rev == SIS630E_900_REV) + ret = sis630e_get_mac_addr(pci_dev, net_dev); + else if ((sis_priv->chipset_rev > 0x81) && (sis_priv->chipset_rev <= 0x90) ) + ret = sis635_get_mac_addr(pci_dev, net_dev); + else if (sis_priv->chipset_rev == SIS96x_900_REV) + ret = sis96x_get_mac_addr(pci_dev, net_dev); + else + ret = sis900_get_mac_addr(pci_dev, net_dev); + + if (!ret || !is_valid_ether_addr(net_dev->dev_addr)) { + eth_hw_addr_random(net_dev); + printk(KERN_WARNING "%s: Unreadable or invalid MAC address," + "using random generated one\n", dev_name); + } + + /* 630ET : set the mii access mode as software-mode */ + if (sis_priv->chipset_rev == SIS630ET_900_REV) + sw32(cr, ACCESSMODE | sr32(cr)); + + /* probe for mii transceiver */ + if (sis900_mii_probe(net_dev) == 0) { + printk(KERN_WARNING "%s: Error probing MII device.\n", + dev_name); + ret = -ENODEV; + goto err_unmap_rx; + } + + /* save our host bridge revision */ + dev = pci_get_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_630, NULL); + if (dev) { + sis_priv->host_bridge_rev = dev->revision; + pci_dev_put(dev); + } + + ret = register_netdev(net_dev); + if (ret) + goto err_unmap_rx; + + /* print some information about our NIC */ + printk(KERN_INFO "%s: %s at 0x%p, IRQ %d, %pM\n", + net_dev->name, card_name, ioaddr, pci_dev->irq, + net_dev->dev_addr); + + /* Detect Wake on Lan support */ + ret = (sr32(CFGPMC) & PMESP) >> 27; + if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0) + printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name); + + return 0; + +err_unmap_rx: + pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring, + sis_priv->rx_ring_dma); +err_unmap_tx: + pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring, + sis_priv->tx_ring_dma); +err_out_unmap: + pci_iounmap(pci_dev, ioaddr); +err_out_cleardev: + pci_release_regions(pci_dev); + err_out: + free_netdev(net_dev); + return ret; +} + +/** + * sis900_mii_probe - Probe MII PHY for sis900 + * @net_dev: the net device to probe for + * + * Search for total of 32 possible mii phy addresses. + * Identify and set current phy if found one, + * return error if it failed to found. + */ + +static int sis900_mii_probe(struct net_device *net_dev) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + const char *dev_name = pci_name(sis_priv->pci_dev); + u16 poll_bit = MII_STAT_LINK, status = 0; + unsigned long timeout = jiffies + 5 * HZ; + int phy_addr; + + sis_priv->mii = NULL; + + /* search for total of 32 possible mii phy addresses */ + for (phy_addr = 0; phy_addr < 32; phy_addr++) { + struct mii_phy * mii_phy = NULL; + u16 mii_status; + int i; + + mii_phy = NULL; + for(i = 0; i < 2; i++) + mii_status = mdio_read(net_dev, phy_addr, MII_STATUS); + + if (mii_status == 0xffff || mii_status == 0x0000) { + if (netif_msg_probe(sis_priv)) + printk(KERN_DEBUG "%s: MII at address %d" + " not accessible\n", + dev_name, phy_addr); + continue; + } + + if ((mii_phy = kmalloc(sizeof(struct mii_phy), GFP_KERNEL)) == NULL) { + mii_phy = sis_priv->first_mii; + while (mii_phy) { + struct mii_phy *phy; + phy = mii_phy; + mii_phy = mii_phy->next; + kfree(phy); + } + return 0; + } + + mii_phy->phy_id0 = mdio_read(net_dev, phy_addr, MII_PHY_ID0); + mii_phy->phy_id1 = mdio_read(net_dev, phy_addr, MII_PHY_ID1); + mii_phy->phy_addr = phy_addr; + mii_phy->status = mii_status; + mii_phy->next = sis_priv->mii; + sis_priv->mii = mii_phy; + sis_priv->first_mii = mii_phy; + + for (i = 0; mii_chip_table[i].phy_id1; i++) + if ((mii_phy->phy_id0 == mii_chip_table[i].phy_id0 ) && + ((mii_phy->phy_id1 & 0xFFF0) == mii_chip_table[i].phy_id1)){ + mii_phy->phy_types = mii_chip_table[i].phy_types; + if (mii_chip_table[i].phy_types == MIX) + mii_phy->phy_types = + (mii_status & (MII_STAT_CAN_TX_FDX | MII_STAT_CAN_TX)) ? LAN : HOME; + printk(KERN_INFO "%s: %s transceiver found " + "at address %d.\n", + dev_name, + mii_chip_table[i].name, + phy_addr); + break; + } + + if( !mii_chip_table[i].phy_id1 ) { + printk(KERN_INFO "%s: Unknown PHY transceiver found at address %d.\n", + dev_name, phy_addr); + mii_phy->phy_types = UNKNOWN; + } + } + + if (sis_priv->mii == NULL) { + printk(KERN_INFO "%s: No MII transceivers found!\n", dev_name); + return 0; + } + + /* select default PHY for mac */ + sis_priv->mii = NULL; + sis900_default_phy( net_dev ); + + /* Reset phy if default phy is internal sis900 */ + if ((sis_priv->mii->phy_id0 == 0x001D) && + ((sis_priv->mii->phy_id1&0xFFF0) == 0x8000)) + status = sis900_reset_phy(net_dev, sis_priv->cur_phy); + + /* workaround for ICS1893 PHY */ + if ((sis_priv->mii->phy_id0 == 0x0015) && + ((sis_priv->mii->phy_id1&0xFFF0) == 0xF440)) + mdio_write(net_dev, sis_priv->cur_phy, 0x0018, 0xD200); + + if(status & MII_STAT_LINK){ + while (poll_bit) { + yield(); + + poll_bit ^= (mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS) & poll_bit); + if (time_after_eq(jiffies, timeout)) { + printk(KERN_WARNING "%s: reset phy and link down now\n", + dev_name); + return -ETIME; + } + } + } + + if (sis_priv->chipset_rev == SIS630E_900_REV) { + /* SiS 630E has some bugs on default value of PHY registers */ + mdio_write(net_dev, sis_priv->cur_phy, MII_ANADV, 0x05e1); + mdio_write(net_dev, sis_priv->cur_phy, MII_CONFIG1, 0x22); + mdio_write(net_dev, sis_priv->cur_phy, MII_CONFIG2, 0xff00); + mdio_write(net_dev, sis_priv->cur_phy, MII_MASK, 0xffc0); + //mdio_write(net_dev, sis_priv->cur_phy, MII_CONTROL, 0x1000); + } + + if (sis_priv->mii->status & MII_STAT_LINK) + netif_carrier_on(net_dev); + else + netif_carrier_off(net_dev); + + return 1; +} + +/** + * sis900_default_phy - Select default PHY for sis900 mac. + * @net_dev: the net device to probe for + * + * Select first detected PHY with link as default. + * If no one is link on, select PHY whose types is HOME as default. + * If HOME doesn't exist, select LAN. + */ + +static u16 sis900_default_phy(struct net_device * net_dev) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + struct mii_phy *phy = NULL, *phy_home = NULL, + *default_phy = NULL, *phy_lan = NULL; + u16 status; + + for (phy=sis_priv->first_mii; phy; phy=phy->next) { + status = mdio_read(net_dev, phy->phy_addr, MII_STATUS); + status = mdio_read(net_dev, phy->phy_addr, MII_STATUS); + + /* Link ON & Not select default PHY & not ghost PHY */ + if ((status & MII_STAT_LINK) && !default_phy && + (phy->phy_types != UNKNOWN)) + default_phy = phy; + else { + status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL); + mdio_write(net_dev, phy->phy_addr, MII_CONTROL, + status | MII_CNTL_AUTO | MII_CNTL_ISOLATE); + if (phy->phy_types == HOME) + phy_home = phy; + else if(phy->phy_types == LAN) + phy_lan = phy; + } + } + + if (!default_phy && phy_home) + default_phy = phy_home; + else if (!default_phy && phy_lan) + default_phy = phy_lan; + else if (!default_phy) + default_phy = sis_priv->first_mii; + + if (sis_priv->mii != default_phy) { + sis_priv->mii = default_phy; + sis_priv->cur_phy = default_phy->phy_addr; + printk(KERN_INFO "%s: Using transceiver found at address %d as default\n", + pci_name(sis_priv->pci_dev), sis_priv->cur_phy); + } + + sis_priv->mii_info.phy_id = sis_priv->cur_phy; + + status = mdio_read(net_dev, sis_priv->cur_phy, MII_CONTROL); + status &= (~MII_CNTL_ISOLATE); + + mdio_write(net_dev, sis_priv->cur_phy, MII_CONTROL, status); + status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS); + status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS); + + return status; +} + + +/** + * sis900_set_capability - set the media capability of network adapter. + * @net_dev : the net device to probe for + * @phy : default PHY + * + * Set the media capability of network adapter according to + * mii status register. It's necessary before auto-negotiate. + */ + +static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *phy) +{ + u16 cap; + u16 status; + + status = mdio_read(net_dev, phy->phy_addr, MII_STATUS); + status = mdio_read(net_dev, phy->phy_addr, MII_STATUS); + + cap = MII_NWAY_CSMA_CD | + ((phy->status & MII_STAT_CAN_TX_FDX)? MII_NWAY_TX_FDX:0) | + ((phy->status & MII_STAT_CAN_TX) ? MII_NWAY_TX:0) | + ((phy->status & MII_STAT_CAN_T_FDX) ? MII_NWAY_T_FDX:0)| + ((phy->status & MII_STAT_CAN_T) ? MII_NWAY_T:0); + + mdio_write(net_dev, phy->phy_addr, MII_ANADV, cap); +} + + +/* Delay between EEPROM clock transitions. */ +#define eeprom_delay() sr32(mear) + +/** + * read_eeprom - Read Serial EEPROM + * @ioaddr: base i/o address + * @location: the EEPROM location to read + * + * Read Serial EEPROM through EEPROM Access Register. + * Note that location is in word (16 bits) unit + */ + +static u16 read_eeprom(void __iomem *ioaddr, int location) +{ + u32 read_cmd = location | EEread; + int i; + u16 retval = 0; + + sw32(mear, 0); + eeprom_delay(); + sw32(mear, EECS); + eeprom_delay(); + + /* Shift the read command (9) bits out. */ + for (i = 8; i >= 0; i--) { + u32 dataval = (read_cmd & (1 << i)) ? EEDI | EECS : EECS; + + sw32(mear, dataval); + eeprom_delay(); + sw32(mear, dataval | EECLK); + eeprom_delay(); + } + sw32(mear, EECS); + eeprom_delay(); + + /* read the 16-bits data in */ + for (i = 16; i > 0; i--) { + sw32(mear, EECS); + eeprom_delay(); + sw32(mear, EECS | EECLK); + eeprom_delay(); + retval = (retval << 1) | ((sr32(mear) & EEDO) ? 1 : 0); + eeprom_delay(); + } + + /* Terminate the EEPROM access. */ + sw32(mear, 0); + eeprom_delay(); + + return retval; +} + +/* Read and write the MII management registers using software-generated + serial MDIO protocol. Note that the command bits and data bits are + send out separately */ +#define mdio_delay() sr32(mear) + +static void mdio_idle(struct sis900_private *sp) +{ + void __iomem *ioaddr = sp->ioaddr; + + sw32(mear, MDIO | MDDIR); + mdio_delay(); + sw32(mear, MDIO | MDDIR | MDC); +} + +/* Synchronize the MII management interface by shifting 32 one bits out. */ +static void mdio_reset(struct sis900_private *sp) +{ + void __iomem *ioaddr = sp->ioaddr; + int i; + + for (i = 31; i >= 0; i--) { + sw32(mear, MDDIR | MDIO); + mdio_delay(); + sw32(mear, MDDIR | MDIO | MDC); + mdio_delay(); + } +} + +/** + * mdio_read - read MII PHY register + * @net_dev: the net device to read + * @phy_id: the phy address to read + * @location: the phy regiester id to read + * + * Read MII registers through MDIO and MDC + * using MDIO management frame structure and protocol(defined by ISO/IEC). + * Please see SiS7014 or ICS spec + */ + +static int mdio_read(struct net_device *net_dev, int phy_id, int location) +{ + int mii_cmd = MIIread|(phy_id<ioaddr; + u16 retval = 0; + int i; + + mdio_reset(sp); + mdio_idle(sp); + + for (i = 15; i >= 0; i--) { + int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR; + + sw32(mear, dataval); + mdio_delay(); + sw32(mear, dataval | MDC); + mdio_delay(); + } + + /* Read the 16 data bits. */ + for (i = 16; i > 0; i--) { + sw32(mear, 0); + mdio_delay(); + retval = (retval << 1) | ((sr32(mear) & MDIO) ? 1 : 0); + sw32(mear, MDC); + mdio_delay(); + } + sw32(mear, 0x00); + + return retval; +} + +/** + * mdio_write - write MII PHY register + * @net_dev: the net device to write + * @phy_id: the phy address to write + * @location: the phy regiester id to write + * @value: the register value to write with + * + * Write MII registers with @value through MDIO and MDC + * using MDIO management frame structure and protocol(defined by ISO/IEC) + * please see SiS7014 or ICS spec + */ + +static void mdio_write(struct net_device *net_dev, int phy_id, int location, + int value) +{ + int mii_cmd = MIIwrite|(phy_id<ioaddr; + int i; + + mdio_reset(sp); + mdio_idle(sp); + + /* Shift the command bits out. */ + for (i = 15; i >= 0; i--) { + int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR; + + sw8(mear, dataval); + mdio_delay(); + sw8(mear, dataval | MDC); + mdio_delay(); + } + mdio_delay(); + + /* Shift the value bits out. */ + for (i = 15; i >= 0; i--) { + int dataval = (value & (1 << i)) ? MDDIR | MDIO : MDDIR; + + sw32(mear, dataval); + mdio_delay(); + sw32(mear, dataval | MDC); + mdio_delay(); + } + mdio_delay(); + + /* Clear out extra bits. */ + for (i = 2; i > 0; i--) { + sw8(mear, 0); + mdio_delay(); + sw8(mear, MDC); + mdio_delay(); + } + sw32(mear, 0x00); +} + + +/** + * sis900_reset_phy - reset sis900 mii phy. + * @net_dev: the net device to write + * @phy_addr: default phy address + * + * Some specific phy can't work properly without reset. + * This function will be called during initialization and + * link status change from ON to DOWN. + */ + +static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr) +{ + int i; + u16 status; + + for (i = 0; i < 2; i++) + status = mdio_read(net_dev, phy_addr, MII_STATUS); + + mdio_write( net_dev, phy_addr, MII_CONTROL, MII_CNTL_RESET ); + + return status; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. +*/ +static void sis900_poll(struct net_device *dev) +{ + struct sis900_private *sp = netdev_priv(dev); + const int irq = sp->pci_dev->irq; + + disable_irq(irq); + sis900_interrupt(irq, dev); + enable_irq(irq); +} +#endif + +/** + * sis900_open - open sis900 device + * @net_dev: the net device to open + * + * Do some initialization and start net interface. + * enable interrupts and set sis900 timer. + */ + +static int +sis900_open(struct net_device *net_dev) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; + int ret; + + /* Soft reset the chip. */ + sis900_reset(net_dev); + + /* Equalizer workaround Rule */ + sis630_set_eq(net_dev, sis_priv->chipset_rev); + + ret = request_irq(sis_priv->pci_dev->irq, sis900_interrupt, IRQF_SHARED, + net_dev->name, net_dev); + if (ret) + return ret; + + sis900_init_rxfilter(net_dev); + + sis900_init_tx_ring(net_dev); + sis900_init_rx_ring(net_dev); + + set_rx_mode(net_dev); + + netif_start_queue(net_dev); + + /* Workaround for EDB */ + sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); + + /* Enable all known interrupts by setting the interrupt mask. */ + sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); + sw32(cr, RxENA | sr32(cr)); + sw32(ier, IE); + + sis900_check_mode(net_dev, sis_priv->mii); + + /* Set the timer to switch to check for link beat and perhaps switch + to an alternate media type. */ + init_timer(&sis_priv->timer); + sis_priv->timer.expires = jiffies + HZ; + sis_priv->timer.data = (unsigned long)net_dev; + sis_priv->timer.function = sis900_timer; + add_timer(&sis_priv->timer); + + return 0; +} + +/** + * sis900_init_rxfilter - Initialize the Rx filter + * @net_dev: the net device to initialize for + * + * Set receive filter address to our MAC address + * and enable packet filtering. + */ + +static void +sis900_init_rxfilter (struct net_device * net_dev) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; + u32 rfcrSave; + u32 i; + + rfcrSave = sr32(rfcr); + + /* disable packet filtering before setting filter */ + sw32(rfcr, rfcrSave & ~RFEN); + + /* load MAC addr to filter data register */ + for (i = 0 ; i < 3 ; i++) { + u32 w = (u32) *((u16 *)(net_dev->dev_addr)+i); + + sw32(rfcr, i << RFADDR_shift); + sw32(rfdr, w); + + if (netif_msg_hw(sis_priv)) { + printk(KERN_DEBUG "%s: Receive Filter Addrss[%d]=%x\n", + net_dev->name, i, sr32(rfdr)); + } + } + + /* enable packet filtering */ + sw32(rfcr, rfcrSave | RFEN); +} + +/** + * sis900_init_tx_ring - Initialize the Tx descriptor ring + * @net_dev: the net device to initialize for + * + * Initialize the Tx descriptor ring, + */ + +static void +sis900_init_tx_ring(struct net_device *net_dev) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; + int i; + + sis_priv->tx_full = 0; + sis_priv->dirty_tx = sis_priv->cur_tx = 0; + + for (i = 0; i < NUM_TX_DESC; i++) { + sis_priv->tx_skbuff[i] = NULL; + + sis_priv->tx_ring[i].link = sis_priv->tx_ring_dma + + ((i+1)%NUM_TX_DESC)*sizeof(BufferDesc); + sis_priv->tx_ring[i].cmdsts = 0; + sis_priv->tx_ring[i].bufptr = 0; + } + + /* load Transmit Descriptor Register */ + sw32(txdp, sis_priv->tx_ring_dma); + if (netif_msg_hw(sis_priv)) + printk(KERN_DEBUG "%s: TX descriptor register loaded with: %8.8x\n", + net_dev->name, sr32(txdp)); +} + +/** + * sis900_init_rx_ring - Initialize the Rx descriptor ring + * @net_dev: the net device to initialize for + * + * Initialize the Rx descriptor ring, + * and pre-allocate recevie buffers (socket buffer) + */ + +static void +sis900_init_rx_ring(struct net_device *net_dev) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; + int i; + + sis_priv->cur_rx = 0; + sis_priv->dirty_rx = 0; + + /* init RX descriptor */ + for (i = 0; i < NUM_RX_DESC; i++) { + sis_priv->rx_skbuff[i] = NULL; + + sis_priv->rx_ring[i].link = sis_priv->rx_ring_dma + + ((i+1)%NUM_RX_DESC)*sizeof(BufferDesc); + sis_priv->rx_ring[i].cmdsts = 0; + sis_priv->rx_ring[i].bufptr = 0; + } + + /* allocate sock buffers */ + for (i = 0; i < NUM_RX_DESC; i++) { + struct sk_buff *skb; + + if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) { + /* not enough memory for skbuff, this makes a "hole" + on the buffer ring, it is not clear how the + hardware will react to this kind of degenerated + buffer */ + break; + } + sis_priv->rx_skbuff[i] = skb; + sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE; + sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev, + skb->data, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev, + sis_priv->rx_ring[i].bufptr))) { + dev_kfree_skb(skb); + sis_priv->rx_skbuff[i] = NULL; + break; + } + } + sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC); + + /* load Receive Descriptor Register */ + sw32(rxdp, sis_priv->rx_ring_dma); + if (netif_msg_hw(sis_priv)) + printk(KERN_DEBUG "%s: RX descriptor register loaded with: %8.8x\n", + net_dev->name, sr32(rxdp)); +} + +/** + * sis630_set_eq - set phy equalizer value for 630 LAN + * @net_dev: the net device to set equalizer value + * @revision: 630 LAN revision number + * + * 630E equalizer workaround rule(Cyrus Huang 08/15) + * PHY register 14h(Test) + * Bit 14: 0 -- Automatically detect (default) + * 1 -- Manually set Equalizer filter + * Bit 13: 0 -- (Default) + * 1 -- Speed up convergence of equalizer setting + * Bit 9 : 0 -- (Default) + * 1 -- Disable Baseline Wander + * Bit 3~7 -- Equalizer filter setting + * Link ON: Set Bit 9, 13 to 1, Bit 14 to 0 + * Then calculate equalizer value + * Then set equalizer value, and set Bit 14 to 1, Bit 9 to 0 + * Link Off:Set Bit 13 to 1, Bit 14 to 0 + * Calculate Equalizer value: + * When Link is ON and Bit 14 is 0, SIS900PHY will auto-detect proper equalizer value. + * When the equalizer is stable, this value is not a fixed value. It will be within + * a small range(eg. 7~9). Then we get a minimum and a maximum value(eg. min=7, max=9) + * 0 <= max <= 4 --> set equalizer to max + * 5 <= max <= 14 --> set equalizer to max+1 or set equalizer to max+2 if max == min + * max >= 15 --> set equalizer to max+5 or set equalizer to max+6 if max == min + */ + +static void sis630_set_eq(struct net_device *net_dev, u8 revision) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + u16 reg14h, eq_value=0, max_value=0, min_value=0; + int i, maxcount=10; + + if ( !(revision == SIS630E_900_REV || revision == SIS630EA1_900_REV || + revision == SIS630A_900_REV || revision == SIS630ET_900_REV) ) + return; + + if (netif_carrier_ok(net_dev)) { + reg14h = mdio_read(net_dev, sis_priv->cur_phy, MII_RESV); + mdio_write(net_dev, sis_priv->cur_phy, MII_RESV, + (0x2200 | reg14h) & 0xBFFF); + for (i=0; i < maxcount; i++) { + eq_value = (0x00F8 & mdio_read(net_dev, + sis_priv->cur_phy, MII_RESV)) >> 3; + if (i == 0) + max_value=min_value=eq_value; + max_value = (eq_value > max_value) ? + eq_value : max_value; + min_value = (eq_value < min_value) ? + eq_value : min_value; + } + /* 630E rule to determine the equalizer value */ + if (revision == SIS630E_900_REV || revision == SIS630EA1_900_REV || + revision == SIS630ET_900_REV) { + if (max_value < 5) + eq_value = max_value; + else if (max_value >= 5 && max_value < 15) + eq_value = (max_value == min_value) ? + max_value+2 : max_value+1; + else if (max_value >= 15) + eq_value=(max_value == min_value) ? + max_value+6 : max_value+5; + } + /* 630B0&B1 rule to determine the equalizer value */ + if (revision == SIS630A_900_REV && + (sis_priv->host_bridge_rev == SIS630B0 || + sis_priv->host_bridge_rev == SIS630B1)) { + if (max_value == 0) + eq_value = 3; + else + eq_value = (max_value + min_value + 1)/2; + } + /* write equalizer value and setting */ + reg14h = mdio_read(net_dev, sis_priv->cur_phy, MII_RESV); + reg14h = (reg14h & 0xFF07) | ((eq_value << 3) & 0x00F8); + reg14h = (reg14h | 0x6000) & 0xFDFF; + mdio_write(net_dev, sis_priv->cur_phy, MII_RESV, reg14h); + } else { + reg14h = mdio_read(net_dev, sis_priv->cur_phy, MII_RESV); + if (revision == SIS630A_900_REV && + (sis_priv->host_bridge_rev == SIS630B0 || + sis_priv->host_bridge_rev == SIS630B1)) + mdio_write(net_dev, sis_priv->cur_phy, MII_RESV, + (reg14h | 0x2200) & 0xBFFF); + else + mdio_write(net_dev, sis_priv->cur_phy, MII_RESV, + (reg14h | 0x2000) & 0xBFFF); + } +} + +/** + * sis900_timer - sis900 timer routine + * @data: pointer to sis900 net device + * + * On each timer ticks we check two things, + * link status (ON/OFF) and link mode (10/100/Full/Half) + */ + +static void sis900_timer(unsigned long data) +{ + struct net_device *net_dev = (struct net_device *)data; + struct sis900_private *sis_priv = netdev_priv(net_dev); + struct mii_phy *mii_phy = sis_priv->mii; + static const int next_tick = 5*HZ; + int speed = 0, duplex = 0; + u16 status; + + status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS); + status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS); + + /* Link OFF -> ON */ + if (!netif_carrier_ok(net_dev)) { + LookForLink: + /* Search for new PHY */ + status = sis900_default_phy(net_dev); + mii_phy = sis_priv->mii; + + if (status & MII_STAT_LINK) { + WARN_ON(!(status & MII_STAT_AUTO_DONE)); + + sis900_read_mode(net_dev, &speed, &duplex); + if (duplex) { + sis900_set_mode(sis_priv, speed, duplex); + sis630_set_eq(net_dev, sis_priv->chipset_rev); + netif_carrier_on(net_dev); + } + } + } else { + /* Link ON -> OFF */ + if (!(status & MII_STAT_LINK)){ + netif_carrier_off(net_dev); + if(netif_msg_link(sis_priv)) + printk(KERN_INFO "%s: Media Link Off\n", net_dev->name); + + /* Change mode issue */ + if ((mii_phy->phy_id0 == 0x001D) && + ((mii_phy->phy_id1 & 0xFFF0) == 0x8000)) + sis900_reset_phy(net_dev, sis_priv->cur_phy); + + sis630_set_eq(net_dev, sis_priv->chipset_rev); + + goto LookForLink; + } + } + + sis_priv->timer.expires = jiffies + next_tick; + add_timer(&sis_priv->timer); +} + +/** + * sis900_check_mode - check the media mode for sis900 + * @net_dev: the net device to be checked + * @mii_phy: the mii phy + * + * Older driver gets the media mode from mii status output + * register. Now we set our media capability and auto-negotiate + * to get the upper bound of speed and duplex between two ends. + * If the types of mii phy is HOME, it doesn't need to auto-negotiate + * and autong_complete should be set to 1. + */ + +static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_phy) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; + int speed, duplex; + + if (mii_phy->phy_types == LAN) { + sw32(cfg, ~EXD & sr32(cfg)); + sis900_set_capability(net_dev , mii_phy); + sis900_auto_negotiate(net_dev, sis_priv->cur_phy); + } else { + sw32(cfg, EXD | sr32(cfg)); + speed = HW_SPEED_HOME; + duplex = FDX_CAPABLE_HALF_SELECTED; + sis900_set_mode(sis_priv, speed, duplex); + sis_priv->autong_complete = 1; + } +} + +/** + * sis900_set_mode - Set the media mode of mac register. + * @sp: the device private data + * @speed : the transmit speed to be determined + * @duplex: the duplex mode to be determined + * + * Set the media mode of mac register txcfg/rxcfg according to + * speed and duplex of phy. Bit EDB_MASTER_EN indicates the EDB + * bus is used instead of PCI bus. When this bit is set 1, the + * Max DMA Burst Size for TX/RX DMA should be no larger than 16 + * double words. + */ + +static void sis900_set_mode(struct sis900_private *sp, int speed, int duplex) +{ + void __iomem *ioaddr = sp->ioaddr; + u32 tx_flags = 0, rx_flags = 0; + + if (sr32( cfg) & EDB_MASTER_EN) { + tx_flags = TxATP | (DMA_BURST_64 << TxMXDMA_shift) | + (TX_FILL_THRESH << TxFILLT_shift); + rx_flags = DMA_BURST_64 << RxMXDMA_shift; + } else { + tx_flags = TxATP | (DMA_BURST_512 << TxMXDMA_shift) | + (TX_FILL_THRESH << TxFILLT_shift); + rx_flags = DMA_BURST_512 << RxMXDMA_shift; + } + + if (speed == HW_SPEED_HOME || speed == HW_SPEED_10_MBPS) { + rx_flags |= (RxDRNT_10 << RxDRNT_shift); + tx_flags |= (TxDRNT_10 << TxDRNT_shift); + } else { + rx_flags |= (RxDRNT_100 << RxDRNT_shift); + tx_flags |= (TxDRNT_100 << TxDRNT_shift); + } + + if (duplex == FDX_CAPABLE_FULL_SELECTED) { + tx_flags |= (TxCSI | TxHBI); + rx_flags |= RxATX; + } + +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) + /* Can accept Jumbo packet */ + rx_flags |= RxAJAB; +#endif + + sw32(txcfg, tx_flags); + sw32(rxcfg, rx_flags); +} + +/** + * sis900_auto_negotiate - Set the Auto-Negotiation Enable/Reset bit. + * @net_dev: the net device to read mode for + * @phy_addr: mii phy address + * + * If the adapter is link-on, set the auto-negotiate enable/reset bit. + * autong_complete should be set to 0 when starting auto-negotiation. + * autong_complete should be set to 1 if we didn't start auto-negotiation. + * sis900_timer will wait for link on again if autong_complete = 0. + */ + +static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + int i = 0; + u32 status; + + for (i = 0; i < 2; i++) + status = mdio_read(net_dev, phy_addr, MII_STATUS); + + if (!(status & MII_STAT_LINK)){ + if(netif_msg_link(sis_priv)) + printk(KERN_INFO "%s: Media Link Off\n", net_dev->name); + sis_priv->autong_complete = 1; + netif_carrier_off(net_dev); + return; + } + + /* (Re)start AutoNegotiate */ + mdio_write(net_dev, phy_addr, MII_CONTROL, + MII_CNTL_AUTO | MII_CNTL_RST_AUTO); + sis_priv->autong_complete = 0; +} + + +/** + * sis900_read_mode - read media mode for sis900 internal phy + * @net_dev: the net device to read mode for + * @speed : the transmit speed to be determined + * @duplex : the duplex mode to be determined + * + * The capability of remote end will be put in mii register autorec + * after auto-negotiation. Use AND operation to get the upper bound + * of speed and duplex between two ends. + */ + +static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + struct mii_phy *phy = sis_priv->mii; + int phy_addr = sis_priv->cur_phy; + u32 status; + u16 autoadv, autorec; + int i; + + for (i = 0; i < 2; i++) + status = mdio_read(net_dev, phy_addr, MII_STATUS); + + if (!(status & MII_STAT_LINK)) + return; + + /* AutoNegotiate completed */ + autoadv = mdio_read(net_dev, phy_addr, MII_ANADV); + autorec = mdio_read(net_dev, phy_addr, MII_ANLPAR); + status = autoadv & autorec; + + *speed = HW_SPEED_10_MBPS; + *duplex = FDX_CAPABLE_HALF_SELECTED; + + if (status & (MII_NWAY_TX | MII_NWAY_TX_FDX)) + *speed = HW_SPEED_100_MBPS; + if (status & ( MII_NWAY_TX_FDX | MII_NWAY_T_FDX)) + *duplex = FDX_CAPABLE_FULL_SELECTED; + + sis_priv->autong_complete = 1; + + /* Workaround for Realtek RTL8201 PHY issue */ + if ((phy->phy_id0 == 0x0000) && ((phy->phy_id1 & 0xFFF0) == 0x8200)) { + if (mdio_read(net_dev, phy_addr, MII_CONTROL) & MII_CNTL_FDX) + *duplex = FDX_CAPABLE_FULL_SELECTED; + if (mdio_read(net_dev, phy_addr, 0x0019) & 0x01) + *speed = HW_SPEED_100_MBPS; + } + + if(netif_msg_link(sis_priv)) + printk(KERN_INFO "%s: Media Link On %s %s-duplex\n", + net_dev->name, + *speed == HW_SPEED_100_MBPS ? + "100mbps" : "10mbps", + *duplex == FDX_CAPABLE_FULL_SELECTED ? + "full" : "half"); +} + +/** + * sis900_tx_timeout - sis900 transmit timeout routine + * @net_dev: the net device to transmit + * + * print transmit timeout status + * disable interrupts and do some tasks + */ + +static void sis900_tx_timeout(struct net_device *net_dev) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; + unsigned long flags; + int i; + + if (netif_msg_tx_err(sis_priv)) { + printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n", + net_dev->name, sr32(cr), sr32(isr)); + } + + /* Disable interrupts by clearing the interrupt mask. */ + sw32(imr, 0x0000); + + /* use spinlock to prevent interrupt handler accessing buffer ring */ + spin_lock_irqsave(&sis_priv->lock, flags); + + /* discard unsent packets */ + sis_priv->dirty_tx = sis_priv->cur_tx = 0; + for (i = 0; i < NUM_TX_DESC; i++) { + struct sk_buff *skb = sis_priv->tx_skbuff[i]; + + if (skb) { + pci_unmap_single(sis_priv->pci_dev, + sis_priv->tx_ring[i].bufptr, skb->len, + PCI_DMA_TODEVICE); + dev_kfree_skb_irq(skb); + sis_priv->tx_skbuff[i] = NULL; + sis_priv->tx_ring[i].cmdsts = 0; + sis_priv->tx_ring[i].bufptr = 0; + net_dev->stats.tx_dropped++; + } + } + sis_priv->tx_full = 0; + netif_wake_queue(net_dev); + + spin_unlock_irqrestore(&sis_priv->lock, flags); + + net_dev->trans_start = jiffies; /* prevent tx timeout */ + + /* load Transmit Descriptor Register */ + sw32(txdp, sis_priv->tx_ring_dma); + + /* Enable all known interrupts by setting the interrupt mask. */ + sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); +} + +/** + * sis900_start_xmit - sis900 start transmit routine + * @skb: socket buffer pointer to put the data being transmitted + * @net_dev: the net device to transmit with + * + * Set the transmit buffer descriptor, + * and write TxENA to enable transmit state machine. + * tell upper layer if the buffer is full + */ + +static netdev_tx_t +sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; + unsigned int entry; + unsigned long flags; + unsigned int index_cur_tx, index_dirty_tx; + unsigned int count_dirty_tx; + + spin_lock_irqsave(&sis_priv->lock, flags); + + /* Calculate the next Tx descriptor entry. */ + entry = sis_priv->cur_tx % NUM_TX_DESC; + sis_priv->tx_skbuff[entry] = skb; + + /* set the transmit buffer descriptor and enable Transmit State Machine */ + sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev, + skb->data, skb->len, PCI_DMA_TODEVICE); + if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev, + sis_priv->tx_ring[entry].bufptr))) { + dev_kfree_skb_any(skb); + sis_priv->tx_skbuff[entry] = NULL; + net_dev->stats.tx_dropped++; + spin_unlock_irqrestore(&sis_priv->lock, flags); + return NETDEV_TX_OK; + } + sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len); + sw32(cr, TxENA | sr32(cr)); + + sis_priv->cur_tx ++; + index_cur_tx = sis_priv->cur_tx; + index_dirty_tx = sis_priv->dirty_tx; + + for (count_dirty_tx = 0; index_cur_tx != index_dirty_tx; index_dirty_tx++) + count_dirty_tx ++; + + if (index_cur_tx == index_dirty_tx) { + /* dirty_tx is met in the cycle of cur_tx, buffer full */ + sis_priv->tx_full = 1; + netif_stop_queue(net_dev); + } else if (count_dirty_tx < NUM_TX_DESC) { + /* Typical path, tell upper layer that more transmission is possible */ + netif_start_queue(net_dev); + } else { + /* buffer full, tell upper layer no more transmission */ + sis_priv->tx_full = 1; + netif_stop_queue(net_dev); + } + + spin_unlock_irqrestore(&sis_priv->lock, flags); + + if (netif_msg_tx_queued(sis_priv)) + printk(KERN_DEBUG "%s: Queued Tx packet at %p size %d " + "to slot %d.\n", + net_dev->name, skb->data, (int)skb->len, entry); + + return NETDEV_TX_OK; +} + +/** + * sis900_interrupt - sis900 interrupt handler + * @irq: the irq number + * @dev_instance: the client data object + * + * The interrupt handler does all of the Rx thread work, + * and cleans up after the Tx thread + */ + +static irqreturn_t sis900_interrupt(int irq, void *dev_instance) +{ + struct net_device *net_dev = dev_instance; + struct sis900_private *sis_priv = netdev_priv(net_dev); + int boguscnt = max_interrupt_work; + void __iomem *ioaddr = sis_priv->ioaddr; + u32 status; + unsigned int handled = 0; + + spin_lock (&sis_priv->lock); + + do { + status = sr32(isr); + + if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0) + /* nothing intresting happened */ + break; + handled = 1; + + /* why dow't we break after Tx/Rx case ?? keyword: full-duplex */ + if (status & (RxORN | RxERR | RxOK)) + /* Rx interrupt */ + sis900_rx(net_dev); + + if (status & (TxURN | TxERR | TxIDLE)) + /* Tx interrupt */ + sis900_finish_xmit(net_dev); + + /* something strange happened !!! */ + if (status & HIBERR) { + if(netif_msg_intr(sis_priv)) + printk(KERN_INFO "%s: Abnormal interrupt, " + "status %#8.8x.\n", net_dev->name, status); + break; + } + if (--boguscnt < 0) { + if(netif_msg_intr(sis_priv)) + printk(KERN_INFO "%s: Too much work at interrupt, " + "interrupt status = %#8.8x.\n", + net_dev->name, status); + break; + } + } while (1); + + if(netif_msg_intr(sis_priv)) + printk(KERN_DEBUG "%s: exiting interrupt, " + "interrupt status = %#8.8x\n", + net_dev->name, sr32(isr)); + + spin_unlock (&sis_priv->lock); + return IRQ_RETVAL(handled); +} + +/** + * sis900_rx - sis900 receive routine + * @net_dev: the net device which receives data + * + * Process receive interrupt events, + * put buffer to higher layer and refill buffer pool + * Note: This function is called by interrupt handler, + * don't do "too much" work here + */ + +static int sis900_rx(struct net_device *net_dev) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; + unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC; + u32 rx_status = sis_priv->rx_ring[entry].cmdsts; + int rx_work_limit; + + if (netif_msg_rx_status(sis_priv)) + printk(KERN_DEBUG "sis900_rx, cur_rx:%4.4d, dirty_rx:%4.4d " + "status:0x%8.8x\n", + sis_priv->cur_rx, sis_priv->dirty_rx, rx_status); + rx_work_limit = sis_priv->dirty_rx + NUM_RX_DESC - sis_priv->cur_rx; + + while (rx_status & OWN) { + unsigned int rx_size; + unsigned int data_size; + + if (--rx_work_limit < 0) + break; + + data_size = rx_status & DSIZE; + rx_size = data_size - CRC_SIZE; + +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) + /* ``TOOLONG'' flag means jumbo packet received. */ + if ((rx_status & TOOLONG) && data_size <= MAX_FRAME_SIZE) + rx_status &= (~ ((unsigned int)TOOLONG)); +#endif + + if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) { + /* corrupted packet received */ + if (netif_msg_rx_err(sis_priv)) + printk(KERN_DEBUG "%s: Corrupted packet " + "received, buffer status = 0x%8.8x/%d.\n", + net_dev->name, rx_status, data_size); + net_dev->stats.rx_errors++; + if (rx_status & OVERRUN) + net_dev->stats.rx_over_errors++; + if (rx_status & (TOOLONG|RUNT)) + net_dev->stats.rx_length_errors++; + if (rx_status & (RXISERR | FAERR)) + net_dev->stats.rx_frame_errors++; + if (rx_status & CRCERR) + net_dev->stats.rx_crc_errors++; + /* reset buffer descriptor state */ + sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; + } else { + struct sk_buff * skb; + struct sk_buff * rx_skb; + + pci_unmap_single(sis_priv->pci_dev, + sis_priv->rx_ring[entry].bufptr, RX_BUF_SIZE, + PCI_DMA_FROMDEVICE); + + /* refill the Rx buffer, what if there is not enough + * memory for new socket buffer ?? */ + if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) { + /* + * Not enough memory to refill the buffer + * so we need to recycle the old one so + * as to avoid creating a memory hole + * in the rx ring + */ + skb = sis_priv->rx_skbuff[entry]; + net_dev->stats.rx_dropped++; + goto refill_rx_ring; + } + + /* This situation should never happen, but due to + some unknown bugs, it is possible that + we are working on NULL sk_buff :-( */ + if (sis_priv->rx_skbuff[entry] == NULL) { + if (netif_msg_rx_err(sis_priv)) + printk(KERN_WARNING "%s: NULL pointer " + "encountered in Rx ring\n" + "cur_rx:%4.4d, dirty_rx:%4.4d\n", + net_dev->name, sis_priv->cur_rx, + sis_priv->dirty_rx); + dev_kfree_skb(skb); + break; + } + + /* give the socket buffer to upper layers */ + rx_skb = sis_priv->rx_skbuff[entry]; + skb_put(rx_skb, rx_size); + rx_skb->protocol = eth_type_trans(rx_skb, net_dev); + netif_rx(rx_skb); + + /* some network statistics */ + if ((rx_status & BCAST) == MCAST) + net_dev->stats.multicast++; + net_dev->stats.rx_bytes += rx_size; + net_dev->stats.rx_packets++; + sis_priv->dirty_rx++; +refill_rx_ring: + sis_priv->rx_skbuff[entry] = skb; + sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; + sis_priv->rx_ring[entry].bufptr = + pci_map_single(sis_priv->pci_dev, skb->data, + RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev, + sis_priv->rx_ring[entry].bufptr))) { + dev_kfree_skb_irq(skb); + sis_priv->rx_skbuff[entry] = NULL; + break; + } + } + sis_priv->cur_rx++; + entry = sis_priv->cur_rx % NUM_RX_DESC; + rx_status = sis_priv->rx_ring[entry].cmdsts; + } // while + + /* refill the Rx buffer, what if the rate of refilling is slower + * than consuming ?? */ + for (; sis_priv->cur_rx != sis_priv->dirty_rx; sis_priv->dirty_rx++) { + struct sk_buff *skb; + + entry = sis_priv->dirty_rx % NUM_RX_DESC; + + if (sis_priv->rx_skbuff[entry] == NULL) { + skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE); + if (skb == NULL) { + /* not enough memory for skbuff, this makes a + * "hole" on the buffer ring, it is not clear + * how the hardware will react to this kind + * of degenerated buffer */ + net_dev->stats.rx_dropped++; + break; + } + sis_priv->rx_skbuff[entry] = skb; + sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; + sis_priv->rx_ring[entry].bufptr = + pci_map_single(sis_priv->pci_dev, skb->data, + RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev, + sis_priv->rx_ring[entry].bufptr))) { + dev_kfree_skb_irq(skb); + sis_priv->rx_skbuff[entry] = NULL; + break; + } + } + } + /* re-enable the potentially idle receive state matchine */ + sw32(cr , RxENA | sr32(cr)); + + return 0; +} + +/** + * sis900_finish_xmit - finish up transmission of packets + * @net_dev: the net device to be transmitted on + * + * Check for error condition and free socket buffer etc + * schedule for more transmission as needed + * Note: This function is called by interrupt handler, + * don't do "too much" work here + */ + +static void sis900_finish_xmit (struct net_device *net_dev) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + + for (; sis_priv->dirty_tx != sis_priv->cur_tx; sis_priv->dirty_tx++) { + struct sk_buff *skb; + unsigned int entry; + u32 tx_status; + + entry = sis_priv->dirty_tx % NUM_TX_DESC; + tx_status = sis_priv->tx_ring[entry].cmdsts; + + if (tx_status & OWN) { + /* The packet is not transmitted yet (owned by hardware) ! + * Note: the interrupt is generated only when Tx Machine + * is idle, so this is an almost impossible case */ + break; + } + + if (tx_status & (ABORT | UNDERRUN | OWCOLL)) { + /* packet unsuccessfully transmitted */ + if (netif_msg_tx_err(sis_priv)) + printk(KERN_DEBUG "%s: Transmit " + "error, Tx status %8.8x.\n", + net_dev->name, tx_status); + net_dev->stats.tx_errors++; + if (tx_status & UNDERRUN) + net_dev->stats.tx_fifo_errors++; + if (tx_status & ABORT) + net_dev->stats.tx_aborted_errors++; + if (tx_status & NOCARRIER) + net_dev->stats.tx_carrier_errors++; + if (tx_status & OWCOLL) + net_dev->stats.tx_window_errors++; + } else { + /* packet successfully transmitted */ + net_dev->stats.collisions += (tx_status & COLCNT) >> 16; + net_dev->stats.tx_bytes += tx_status & DSIZE; + net_dev->stats.tx_packets++; + } + /* Free the original skb. */ + skb = sis_priv->tx_skbuff[entry]; + pci_unmap_single(sis_priv->pci_dev, + sis_priv->tx_ring[entry].bufptr, skb->len, + PCI_DMA_TODEVICE); + dev_kfree_skb_irq(skb); + sis_priv->tx_skbuff[entry] = NULL; + sis_priv->tx_ring[entry].bufptr = 0; + sis_priv->tx_ring[entry].cmdsts = 0; + } + + if (sis_priv->tx_full && netif_queue_stopped(net_dev) && + sis_priv->cur_tx - sis_priv->dirty_tx < NUM_TX_DESC - 4) { + /* The ring is no longer full, clear tx_full and schedule + * more transmission by netif_wake_queue(net_dev) */ + sis_priv->tx_full = 0; + netif_wake_queue (net_dev); + } +} + +/** + * sis900_close - close sis900 device + * @net_dev: the net device to be closed + * + * Disable interrupts, stop the Tx and Rx Status Machine + * free Tx and RX socket buffer + */ + +static int sis900_close(struct net_device *net_dev) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + struct pci_dev *pdev = sis_priv->pci_dev; + void __iomem *ioaddr = sis_priv->ioaddr; + struct sk_buff *skb; + int i; + + netif_stop_queue(net_dev); + + /* Disable interrupts by clearing the interrupt mask. */ + sw32(imr, 0x0000); + sw32(ier, 0x0000); + + /* Stop the chip's Tx and Rx Status Machine */ + sw32(cr, RxDIS | TxDIS | sr32(cr)); + + del_timer(&sis_priv->timer); + + free_irq(pdev->irq, net_dev); + + /* Free Tx and RX skbuff */ + for (i = 0; i < NUM_RX_DESC; i++) { + skb = sis_priv->rx_skbuff[i]; + if (skb) { + pci_unmap_single(pdev, sis_priv->rx_ring[i].bufptr, + RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + dev_kfree_skb(skb); + sis_priv->rx_skbuff[i] = NULL; + } + } + for (i = 0; i < NUM_TX_DESC; i++) { + skb = sis_priv->tx_skbuff[i]; + if (skb) { + pci_unmap_single(pdev, sis_priv->tx_ring[i].bufptr, + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb(skb); + sis_priv->tx_skbuff[i] = NULL; + } + } + + /* Green! Put the chip in low-power mode. */ + + return 0; +} + +/** + * sis900_get_drvinfo - Return information about driver + * @net_dev: the net device to probe + * @info: container for info returned + * + * Process ethtool command such as "ehtool -i" to show information + */ + +static void sis900_get_drvinfo(struct net_device *net_dev, + struct ethtool_drvinfo *info) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + + strlcpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, SIS900_DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(sis_priv->pci_dev), + sizeof(info->bus_info)); +} + +static u32 sis900_get_msglevel(struct net_device *net_dev) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + return sis_priv->msg_enable; +} + +static void sis900_set_msglevel(struct net_device *net_dev, u32 value) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + sis_priv->msg_enable = value; +} + +static u32 sis900_get_link(struct net_device *net_dev) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + return mii_link_ok(&sis_priv->mii_info); +} + +static int sis900_get_settings(struct net_device *net_dev, + struct ethtool_cmd *cmd) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + spin_lock_irq(&sis_priv->lock); + mii_ethtool_gset(&sis_priv->mii_info, cmd); + spin_unlock_irq(&sis_priv->lock); + return 0; +} + +static int sis900_set_settings(struct net_device *net_dev, + struct ethtool_cmd *cmd) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + int rt; + spin_lock_irq(&sis_priv->lock); + rt = mii_ethtool_sset(&sis_priv->mii_info, cmd); + spin_unlock_irq(&sis_priv->lock); + return rt; +} + +static int sis900_nway_reset(struct net_device *net_dev) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + return mii_nway_restart(&sis_priv->mii_info); +} + +/** + * sis900_set_wol - Set up Wake on Lan registers + * @net_dev: the net device to probe + * @wol: container for info passed to the driver + * + * Process ethtool command "wol" to setup wake on lan features. + * SiS900 supports sending WoL events if a correct packet is received, + * but there is no simple way to filter them to only a subset (broadcast, + * multicast, unicast or arp). + */ + +static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; + u32 cfgpmcsr = 0, pmctrl_bits = 0; + + if (wol->wolopts == 0) { + pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr); + cfgpmcsr &= ~PME_EN; + pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr); + sw32(pmctrl, pmctrl_bits); + if (netif_msg_wol(sis_priv)) + printk(KERN_DEBUG "%s: Wake on LAN disabled\n", net_dev->name); + return 0; + } + + if (wol->wolopts & (WAKE_MAGICSECURE | WAKE_UCAST | WAKE_MCAST + | WAKE_BCAST | WAKE_ARP)) + return -EINVAL; + + if (wol->wolopts & WAKE_MAGIC) + pmctrl_bits |= MAGICPKT; + if (wol->wolopts & WAKE_PHY) + pmctrl_bits |= LINKON; + + sw32(pmctrl, pmctrl_bits); + + pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr); + cfgpmcsr |= PME_EN; + pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr); + if (netif_msg_wol(sis_priv)) + printk(KERN_DEBUG "%s: Wake on LAN enabled\n", net_dev->name); + + return 0; +} + +static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) +{ + struct sis900_private *sp = netdev_priv(net_dev); + void __iomem *ioaddr = sp->ioaddr; + u32 pmctrl_bits; + + pmctrl_bits = sr32(pmctrl); + if (pmctrl_bits & MAGICPKT) + wol->wolopts |= WAKE_MAGIC; + if (pmctrl_bits & LINKON) + wol->wolopts |= WAKE_PHY; + + wol->supported = (WAKE_PHY | WAKE_MAGIC); +} + +static const struct ethtool_ops sis900_ethtool_ops = { + .get_drvinfo = sis900_get_drvinfo, + .get_msglevel = sis900_get_msglevel, + .set_msglevel = sis900_set_msglevel, + .get_link = sis900_get_link, + .get_settings = sis900_get_settings, + .set_settings = sis900_set_settings, + .nway_reset = sis900_nway_reset, + .get_wol = sis900_get_wol, + .set_wol = sis900_set_wol +}; + +/** + * mii_ioctl - process MII i/o control command + * @net_dev: the net device to command for + * @rq: parameter for command + * @cmd: the i/o command + * + * Process MII command like read/write MII register + */ + +static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + struct mii_ioctl_data *data = if_mii(rq); + + switch(cmd) { + case SIOCGMIIPHY: /* Get address of MII PHY in use. */ + data->phy_id = sis_priv->mii->phy_addr; + /* Fall Through */ + + case SIOCGMIIREG: /* Read MII PHY register. */ + data->val_out = mdio_read(net_dev, data->phy_id & 0x1f, data->reg_num & 0x1f); + return 0; + + case SIOCSMIIREG: /* Write MII PHY register. */ + mdio_write(net_dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); + return 0; + default: + return -EOPNOTSUPP; + } +} + +/** + * sis900_set_config - Set media type by net_device.set_config + * @dev: the net device for media type change + * @map: ifmap passed by ifconfig + * + * Set media type to 10baseT, 100baseT or 0(for auto) by ifconfig + * we support only port changes. All other runtime configuration + * changes will be ignored + */ + +static int sis900_set_config(struct net_device *dev, struct ifmap *map) +{ + struct sis900_private *sis_priv = netdev_priv(dev); + struct mii_phy *mii_phy = sis_priv->mii; + + u16 status; + + if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { + /* we switch on the ifmap->port field. I couldn't find anything + * like a definition or standard for the values of that field. + * I think the meaning of those values is device specific. But + * since I would like to change the media type via the ifconfig + * command I use the definition from linux/netdevice.h + * (which seems to be different from the ifport(pcmcia) definition) */ + switch(map->port){ + case IF_PORT_UNKNOWN: /* use auto here */ + dev->if_port = map->port; + /* we are going to change the media type, so the Link + * will be temporary down and we need to reflect that + * here. When the Link comes up again, it will be + * sensed by the sis_timer procedure, which also does + * all the rest for us */ + netif_carrier_off(dev); + + /* read current state */ + status = mdio_read(dev, mii_phy->phy_addr, MII_CONTROL); + + /* enable auto negotiation and reset the negotioation + * (I don't really know what the auto negatiotiation + * reset really means, but it sounds for me right to + * do one here) */ + mdio_write(dev, mii_phy->phy_addr, + MII_CONTROL, status | MII_CNTL_AUTO | MII_CNTL_RST_AUTO); + + break; + + case IF_PORT_10BASET: /* 10BaseT */ + dev->if_port = map->port; + + /* we are going to change the media type, so the Link + * will be temporary down and we need to reflect that + * here. When the Link comes up again, it will be + * sensed by the sis_timer procedure, which also does + * all the rest for us */ + netif_carrier_off(dev); + + /* set Speed to 10Mbps */ + /* read current state */ + status = mdio_read(dev, mii_phy->phy_addr, MII_CONTROL); + + /* disable auto negotiation and force 10MBit mode*/ + mdio_write(dev, mii_phy->phy_addr, + MII_CONTROL, status & ~(MII_CNTL_SPEED | + MII_CNTL_AUTO)); + break; + + case IF_PORT_100BASET: /* 100BaseT */ + case IF_PORT_100BASETX: /* 100BaseTx */ + dev->if_port = map->port; + + /* we are going to change the media type, so the Link + * will be temporary down and we need to reflect that + * here. When the Link comes up again, it will be + * sensed by the sis_timer procedure, which also does + * all the rest for us */ + netif_carrier_off(dev); + + /* set Speed to 100Mbps */ + /* disable auto negotiation and enable 100MBit Mode */ + status = mdio_read(dev, mii_phy->phy_addr, MII_CONTROL); + mdio_write(dev, mii_phy->phy_addr, + MII_CONTROL, (status & ~MII_CNTL_SPEED) | + MII_CNTL_SPEED); + + break; + + case IF_PORT_10BASE2: /* 10Base2 */ + case IF_PORT_AUI: /* AUI */ + case IF_PORT_100BASEFX: /* 100BaseFx */ + /* These Modes are not supported (are they?)*/ + return -EOPNOTSUPP; + + default: + return -EINVAL; + } + } + return 0; +} + +/** + * sis900_mcast_bitnr - compute hashtable index + * @addr: multicast address + * @revision: revision id of chip + * + * SiS 900 uses the most sigificant 7 bits to index a 128 bits multicast + * hash table, which makes this function a little bit different from other drivers + * SiS 900 B0 & 635 M/B uses the most significat 8 bits to index 256 bits + * multicast hash table. + */ + +static inline u16 sis900_mcast_bitnr(u8 *addr, u8 revision) +{ + + u32 crc = ether_crc(6, addr); + + /* leave 8 or 7 most siginifant bits */ + if ((revision >= SIS635A_900_REV) || (revision == SIS900B_900_REV)) + return (int)(crc >> 24); + else + return (int)(crc >> 25); +} + +/** + * set_rx_mode - Set SiS900 receive mode + * @net_dev: the net device to be set + * + * Set SiS900 receive mode for promiscuous, multicast, or broadcast mode. + * And set the appropriate multicast filter. + * Multicast hash table changes from 128 to 256 bits for 635M/B & 900B0. + */ + +static void set_rx_mode(struct net_device *net_dev) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; + u16 mc_filter[16] = {0}; /* 256/128 bits multicast hash table */ + int i, table_entries; + u32 rx_mode; + + /* 635 Hash Table entries = 256(2^16) */ + if((sis_priv->chipset_rev >= SIS635A_900_REV) || + (sis_priv->chipset_rev == SIS900B_900_REV)) + table_entries = 16; + else + table_entries = 8; + + if (net_dev->flags & IFF_PROMISC) { + /* Accept any kinds of packets */ + rx_mode = RFPromiscuous; + for (i = 0; i < table_entries; i++) + mc_filter[i] = 0xffff; + } else if ((netdev_mc_count(net_dev) > multicast_filter_limit) || + (net_dev->flags & IFF_ALLMULTI)) { + /* too many multicast addresses or accept all multicast packet */ + rx_mode = RFAAB | RFAAM; + for (i = 0; i < table_entries; i++) + mc_filter[i] = 0xffff; + } else { + /* Accept Broadcast packet, destination address matchs our + * MAC address, use Receive Filter to reject unwanted MCAST + * packets */ + struct netdev_hw_addr *ha; + rx_mode = RFAAB; + + netdev_for_each_mc_addr(ha, net_dev) { + unsigned int bit_nr; + + bit_nr = sis900_mcast_bitnr(ha->addr, + sis_priv->chipset_rev); + mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf)); + } + } + + /* update Multicast Hash Table in Receive Filter */ + for (i = 0; i < table_entries; i++) { + /* why plus 0x04 ??, That makes the correct value for hash table. */ + sw32(rfcr, (u32)(0x00000004 + i) << RFADDR_shift); + sw32(rfdr, mc_filter[i]); + } + + sw32(rfcr, RFEN | rx_mode); + + /* sis900 is capable of looping back packets at MAC level for + * debugging purpose */ + if (net_dev->flags & IFF_LOOPBACK) { + u32 cr_saved; + /* We must disable Tx/Rx before setting loopback mode */ + cr_saved = sr32(cr); + sw32(cr, cr_saved | TxDIS | RxDIS); + /* enable loopback */ + sw32(txcfg, sr32(txcfg) | TxMLB); + sw32(rxcfg, sr32(rxcfg) | RxATX); + /* restore cr */ + sw32(cr, cr_saved); + } +} + +/** + * sis900_reset - Reset sis900 MAC + * @net_dev: the net device to reset + * + * reset sis900 MAC and wait until finished + * reset through command register + * change backoff algorithm for 900B0 & 635 M/B + */ + +static void sis900_reset(struct net_device *net_dev) +{ + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; + u32 status = TxRCMP | RxRCMP; + int i; + + sw32(ier, 0); + sw32(imr, 0); + sw32(rfcr, 0); + + sw32(cr, RxRESET | TxRESET | RESET | sr32(cr)); + + /* Check that the chip has finished the reset. */ + for (i = 0; status && (i < 1000); i++) + status ^= sr32(isr) & status; + + if (sis_priv->chipset_rev >= SIS635A_900_REV || + sis_priv->chipset_rev == SIS900B_900_REV) + sw32(cfg, PESEL | RND_CNT); + else + sw32(cfg, PESEL); +} + +/** + * sis900_remove - Remove sis900 device + * @pci_dev: the pci device to be removed + * + * remove and release SiS900 net device + */ + +static void sis900_remove(struct pci_dev *pci_dev) +{ + struct net_device *net_dev = pci_get_drvdata(pci_dev); + struct sis900_private *sis_priv = netdev_priv(net_dev); + + unregister_netdev(net_dev); + + while (sis_priv->first_mii) { + struct mii_phy *phy = sis_priv->first_mii; + + sis_priv->first_mii = phy->next; + kfree(phy); + } + + pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring, + sis_priv->rx_ring_dma); + pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring, + sis_priv->tx_ring_dma); + pci_iounmap(pci_dev, sis_priv->ioaddr); + free_netdev(net_dev); + pci_release_regions(pci_dev); +} + +#ifdef CONFIG_PM + +static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state) +{ + struct net_device *net_dev = pci_get_drvdata(pci_dev); + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; + + if(!netif_running(net_dev)) + return 0; + + netif_stop_queue(net_dev); + netif_device_detach(net_dev); + + /* Stop the chip's Tx and Rx Status Machine */ + sw32(cr, RxDIS | TxDIS | sr32(cr)); + + pci_set_power_state(pci_dev, PCI_D3hot); + pci_save_state(pci_dev); + + return 0; +} + +static int sis900_resume(struct pci_dev *pci_dev) +{ + struct net_device *net_dev = pci_get_drvdata(pci_dev); + struct sis900_private *sis_priv = netdev_priv(net_dev); + void __iomem *ioaddr = sis_priv->ioaddr; + + if(!netif_running(net_dev)) + return 0; + pci_restore_state(pci_dev); + pci_set_power_state(pci_dev, PCI_D0); + + sis900_init_rxfilter(net_dev); + + sis900_init_tx_ring(net_dev); + sis900_init_rx_ring(net_dev); + + set_rx_mode(net_dev); + + netif_device_attach(net_dev); + netif_start_queue(net_dev); + + /* Workaround for EDB */ + sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); + + /* Enable all known interrupts by setting the interrupt mask. */ + sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); + sw32(cr, RxENA | sr32(cr)); + sw32(ier, IE); + + sis900_check_mode(net_dev, sis_priv->mii); + + return 0; +} +#endif /* CONFIG_PM */ + +static struct pci_driver sis900_pci_driver = { + .name = SIS900_MODULE_NAME, + .id_table = sis900_pci_tbl, + .probe = sis900_probe, + .remove = sis900_remove, +#ifdef CONFIG_PM + .suspend = sis900_suspend, + .resume = sis900_resume, +#endif /* CONFIG_PM */ +}; + +static int __init sis900_init_module(void) +{ +/* when a module, this is printed whether or not devices are found in probe */ +#ifdef MODULE + printk(version); +#endif + + return pci_register_driver(&sis900_pci_driver); +} + +static void __exit sis900_cleanup_module(void) +{ + pci_unregister_driver(&sis900_pci_driver); +} + +module_init(sis900_init_module); +module_exit(sis900_cleanup_module); + diff --git a/drivers/net/ethernet/sis/sis900.h b/drivers/net/ethernet/sis/sis900.h new file mode 100644 index 000000000..1341f33e6 --- /dev/null +++ b/drivers/net/ethernet/sis/sis900.h @@ -0,0 +1,329 @@ +/* sis900.h Definitions for SiS ethernet controllers including 7014/7016 and 900 + * Copyright 1999 Silicon Integrated System Corporation + * References: + * SiS 7016 Fast Ethernet PCI Bus 10/100 Mbps LAN Controller with OnNow Support, + * preliminary Rev. 1.0 Jan. 14, 1998 + * SiS 900 Fast Ethernet PCI Bus 10/100 Mbps LAN Single Chip with OnNow Support, + * preliminary Rev. 1.0 Nov. 10, 1998 + * SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution, + * preliminary Rev. 1.0 Jan. 18, 1998 + * http://www.sis.com.tw/support/databook.htm + */ + +/* + * SiS 7016 and SiS 900 ethernet controller registers + */ + +/* The I/O extent, SiS 900 needs 256 bytes of io address */ +#define SIS900_TOTAL_SIZE 0x100 + +/* Symbolic offsets to registers. */ +enum sis900_registers { + cr=0x0, //Command Register + cfg=0x4, //Configuration Register + mear=0x8, //EEPROM Access Register + ptscr=0xc, //PCI Test Control Register + isr=0x10, //Interrupt Status Register + imr=0x14, //Interrupt Mask Register + ier=0x18, //Interrupt Enable Register + epar=0x18, //Enhanced PHY Access Register + txdp=0x20, //Transmit Descriptor Pointer Register + txcfg=0x24, //Transmit Configuration Register + rxdp=0x30, //Receive Descriptor Pointer Register + rxcfg=0x34, //Receive Configuration Register + flctrl=0x38, //Flow Control Register + rxlen=0x3c, //Receive Packet Length Register + rfcr=0x48, //Receive Filter Control Register + rfdr=0x4C, //Receive Filter Data Register + pmctrl=0xB0, //Power Management Control Register + pmer=0xB4 //Power Management Wake-up Event Register +}; + +/* Symbolic names for bits in various registers */ +enum sis900_command_register_bits { + RELOAD = 0x00000400, ACCESSMODE = 0x00000200,/* ET */ + RESET = 0x00000100, SWI = 0x00000080, RxRESET = 0x00000020, + TxRESET = 0x00000010, RxDIS = 0x00000008, RxENA = 0x00000004, + TxDIS = 0x00000002, TxENA = 0x00000001 +}; + +enum sis900_configuration_register_bits { + DESCRFMT = 0x00000100 /* 7016 specific */, REQALG = 0x00000080, + SB = 0x00000040, POW = 0x00000020, EXD = 0x00000010, + PESEL = 0x00000008, LPM = 0x00000004, BEM = 0x00000001, + /* 635 & 900B Specific */ + RND_CNT = 0x00000400, FAIR_BACKOFF = 0x00000200, + EDB_MASTER_EN = 0x00002000 +}; + +enum sis900_eeprom_access_reigster_bits { + MDC = 0x00000040, MDDIR = 0x00000020, MDIO = 0x00000010, /* 7016 specific */ + EECS = 0x00000008, EECLK = 0x00000004, EEDO = 0x00000002, + EEDI = 0x00000001 +}; + +enum sis900_interrupt_register_bits { + WKEVT = 0x10000000, TxPAUSEEND = 0x08000000, TxPAUSE = 0x04000000, + TxRCMP = 0x02000000, RxRCMP = 0x01000000, DPERR = 0x00800000, + SSERR = 0x00400000, RMABT = 0x00200000, RTABT = 0x00100000, + RxSOVR = 0x00010000, HIBERR = 0x00008000, SWINT = 0x00001000, + MIBINT = 0x00000800, TxURN = 0x00000400, TxIDLE = 0x00000200, + TxERR = 0x00000100, TxDESC = 0x00000080, TxOK = 0x00000040, + RxORN = 0x00000020, RxIDLE = 0x00000010, RxEARLY = 0x00000008, + RxERR = 0x00000004, RxDESC = 0x00000002, RxOK = 0x00000001 +}; + +enum sis900_interrupt_enable_reigster_bits { + IE = 0x00000001 +}; + +/* maximum dma burst for transmission and receive */ +#define MAX_DMA_RANGE 7 /* actually 0 means MAXIMUM !! */ +#define TxMXDMA_shift 20 +#define RxMXDMA_shift 20 + +enum sis900_tx_rx_dma{ + DMA_BURST_512 = 0, DMA_BURST_64 = 5 +}; + +/* transmit FIFO thresholds */ +#define TX_FILL_THRESH 16 /* 1/4 FIFO size */ +#define TxFILLT_shift 8 +#define TxDRNT_shift 0 +#define TxDRNT_100 48 /* 3/4 FIFO size */ +#define TxDRNT_10 16 /* 1/2 FIFO size */ + +enum sis900_transmit_config_register_bits { + TxCSI = 0x80000000, TxHBI = 0x40000000, TxMLB = 0x20000000, + TxATP = 0x10000000, TxIFG = 0x0C000000, TxFILLT = 0x00003F00, + TxDRNT = 0x0000003F +}; + +/* recevie FIFO thresholds */ +#define RxDRNT_shift 1 +#define RxDRNT_100 16 /* 1/2 FIFO size */ +#define RxDRNT_10 24 /* 3/4 FIFO size */ + +enum sis900_reveive_config_register_bits { + RxAEP = 0x80000000, RxARP = 0x40000000, RxATX = 0x10000000, + RxAJAB = 0x08000000, RxDRNT = 0x0000007F +}; + +#define RFAA_shift 28 +#define RFADDR_shift 16 + +enum sis900_receive_filter_control_register_bits { + RFEN = 0x80000000, RFAAB = 0x40000000, RFAAM = 0x20000000, + RFAAP = 0x10000000, RFPromiscuous = (RFAAB|RFAAM|RFAAP) +}; + +enum sis900_reveive_filter_data_mask { + RFDAT = 0x0000FFFF +}; + +/* EEPROM Addresses */ +enum sis900_eeprom_address { + EEPROMSignature = 0x00, EEPROMVendorID = 0x02, EEPROMDeviceID = 0x03, + EEPROMMACAddr = 0x08, EEPROMChecksum = 0x0b +}; + +/* The EEPROM commands include the alway-set leading bit. Refer to NM93Cxx datasheet */ +enum sis900_eeprom_command { + EEread = 0x0180, EEwrite = 0x0140, EEerase = 0x01C0, + EEwriteEnable = 0x0130, EEwriteDisable = 0x0100, + EEeraseAll = 0x0120, EEwriteAll = 0x0110, + EEaddrMask = 0x013F, EEcmdShift = 16 +}; + +/* For SiS962 or SiS963, request the eeprom software access */ +enum sis96x_eeprom_command { + EEREQ = 0x00000400, EEDONE = 0x00000200, EEGNT = 0x00000100 +}; + +/* PCI Registers */ +enum sis900_pci_registers { + CFGPMC = 0x40, + CFGPMCSR = 0x44 +}; + +/* Power management capabilities bits */ +enum sis900_cfgpmc_register_bits { + PMVER = 0x00070000, + DSI = 0x00100000, + PMESP = 0xf8000000 +}; + +enum sis900_pmesp_bits { + PME_D0 = 0x1, + PME_D1 = 0x2, + PME_D2 = 0x4, + PME_D3H = 0x8, + PME_D3C = 0x10 +}; + +/* Power management control/status bits */ +enum sis900_cfgpmcsr_register_bits { + PMESTS = 0x00004000, + PME_EN = 0x00000100, // Power management enable + PWR_STA = 0x00000003 // Current power state +}; + +/* Wake-on-LAN support. */ +enum sis900_power_management_control_register_bits { + LINKLOSS = 0x00000001, + LINKON = 0x00000002, + MAGICPKT = 0x00000400, + ALGORITHM = 0x00000800, + FRM1EN = 0x00100000, + FRM2EN = 0x00200000, + FRM3EN = 0x00400000, + FRM1ACS = 0x01000000, + FRM2ACS = 0x02000000, + FRM3ACS = 0x04000000, + WAKEALL = 0x40000000, + GATECLK = 0x80000000 +}; + +/* Management Data I/O (mdio) frame */ +#define MIIread 0x6000 +#define MIIwrite 0x5002 +#define MIIpmdShift 7 +#define MIIregShift 2 +#define MIIcmdLen 16 +#define MIIcmdShift 16 + +/* Buffer Descriptor Status*/ +enum sis900_buffer_status { + OWN = 0x80000000, MORE = 0x40000000, INTR = 0x20000000, + SUPCRC = 0x10000000, INCCRC = 0x10000000, + OK = 0x08000000, DSIZE = 0x00000FFF +}; +/* Status for TX Buffers */ +enum sis900_tx_buffer_status { + ABORT = 0x04000000, UNDERRUN = 0x02000000, NOCARRIER = 0x01000000, + DEFERD = 0x00800000, EXCDEFER = 0x00400000, OWCOLL = 0x00200000, + EXCCOLL = 0x00100000, COLCNT = 0x000F0000 +}; + +enum sis900_rx_buffer_status { + OVERRUN = 0x02000000, DEST = 0x00800000, BCAST = 0x01800000, + MCAST = 0x01000000, UNIMATCH = 0x00800000, TOOLONG = 0x00400000, + RUNT = 0x00200000, RXISERR = 0x00100000, CRCERR = 0x00080000, + FAERR = 0x00040000, LOOPBK = 0x00020000, RXCOL = 0x00010000 +}; + +/* MII register offsets */ +enum mii_registers { + MII_CONTROL = 0x0000, MII_STATUS = 0x0001, MII_PHY_ID0 = 0x0002, + MII_PHY_ID1 = 0x0003, MII_ANADV = 0x0004, MII_ANLPAR = 0x0005, + MII_ANEXT = 0x0006 +}; + +/* mii registers specific to SiS 900 */ +enum sis_mii_registers { + MII_CONFIG1 = 0x0010, MII_CONFIG2 = 0x0011, MII_STSOUT = 0x0012, + MII_MASK = 0x0013, MII_RESV = 0x0014 +}; + +/* mii registers specific to ICS 1893 */ +enum ics_mii_registers { + MII_EXTCTRL = 0x0010, MII_QPDSTS = 0x0011, MII_10BTOP = 0x0012, + MII_EXTCTRL2 = 0x0013 +}; + +/* mii registers specific to AMD 79C901 */ +enum amd_mii_registers { + MII_STATUS_SUMMARY = 0x0018 +}; + +/* MII Control register bit definitions. */ +enum mii_control_register_bits { + MII_CNTL_FDX = 0x0100, MII_CNTL_RST_AUTO = 0x0200, + MII_CNTL_ISOLATE = 0x0400, MII_CNTL_PWRDWN = 0x0800, + MII_CNTL_AUTO = 0x1000, MII_CNTL_SPEED = 0x2000, + MII_CNTL_LPBK = 0x4000, MII_CNTL_RESET = 0x8000 +}; + +/* MII Status register bit */ +enum mii_status_register_bits { + MII_STAT_EXT = 0x0001, MII_STAT_JAB = 0x0002, + MII_STAT_LINK = 0x0004, MII_STAT_CAN_AUTO = 0x0008, + MII_STAT_FAULT = 0x0010, MII_STAT_AUTO_DONE = 0x0020, + MII_STAT_CAN_T = 0x0800, MII_STAT_CAN_T_FDX = 0x1000, + MII_STAT_CAN_TX = 0x2000, MII_STAT_CAN_TX_FDX = 0x4000, + MII_STAT_CAN_T4 = 0x8000 +}; + +#define MII_ID1_OUI_LO 0xFC00 /* low bits of OUI mask */ +#define MII_ID1_MODEL 0x03F0 /* model number */ +#define MII_ID1_REV 0x000F /* model number */ + +/* MII NWAY Register Bits ... + valid for the ANAR (Auto-Negotiation Advertisement) and + ANLPAR (Auto-Negotiation Link Partner) registers */ +enum mii_nway_register_bits { + MII_NWAY_NODE_SEL = 0x001f, MII_NWAY_CSMA_CD = 0x0001, + MII_NWAY_T = 0x0020, MII_NWAY_T_FDX = 0x0040, + MII_NWAY_TX = 0x0080, MII_NWAY_TX_FDX = 0x0100, + MII_NWAY_T4 = 0x0200, MII_NWAY_PAUSE = 0x0400, + MII_NWAY_RF = 0x2000, MII_NWAY_ACK = 0x4000, + MII_NWAY_NP = 0x8000 +}; + +enum mii_stsout_register_bits { + MII_STSOUT_LINK_FAIL = 0x4000, + MII_STSOUT_SPD = 0x0080, MII_STSOUT_DPLX = 0x0040 +}; + +enum mii_stsics_register_bits { + MII_STSICS_SPD = 0x8000, MII_STSICS_DPLX = 0x4000, + MII_STSICS_LINKSTS = 0x0001 +}; + +enum mii_stssum_register_bits { + MII_STSSUM_LINK = 0x0008, MII_STSSUM_DPLX = 0x0004, + MII_STSSUM_AUTO = 0x0002, MII_STSSUM_SPD = 0x0001 +}; + +enum sis900_revision_id { + SIS630A_900_REV = 0x80, SIS630E_900_REV = 0x81, + SIS630S_900_REV = 0x82, SIS630EA1_900_REV = 0x83, + SIS630ET_900_REV = 0x84, SIS635A_900_REV = 0x90, + SIS96x_900_REV = 0X91, SIS900B_900_REV = 0x03 +}; + +enum sis630_revision_id { + SIS630A0 = 0x00, SIS630A1 = 0x01, + SIS630B0 = 0x10, SIS630B1 = 0x11 +}; + +#define FDX_CAPABLE_DUPLEX_UNKNOWN 0 +#define FDX_CAPABLE_HALF_SELECTED 1 +#define FDX_CAPABLE_FULL_SELECTED 2 + +#define HW_SPEED_UNCONFIG 0 +#define HW_SPEED_HOME 1 +#define HW_SPEED_10_MBPS 10 +#define HW_SPEED_100_MBPS 100 +#define HW_SPEED_DEFAULT (HW_SPEED_100_MBPS) + +#define CRC_SIZE 4 +#define MAC_HEADER_SIZE 14 + +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#define MAX_FRAME_SIZE (1518 + 4) +#else +#define MAX_FRAME_SIZE 1518 +#endif /* CONFIG_VLAN_802_1Q */ + +#define TX_BUF_SIZE (MAX_FRAME_SIZE+18) +#define RX_BUF_SIZE (MAX_FRAME_SIZE+18) + +#define NUM_TX_DESC 16 /* Number of Tx descriptor registers. */ +#define NUM_RX_DESC 16 /* Number of Rx descriptor registers. */ +#define TX_TOTAL_SIZE NUM_TX_DESC*sizeof(BufferDesc) +#define RX_TOTAL_SIZE NUM_RX_DESC*sizeof(BufferDesc) + +/* PCI stuff, should be move to pci.h */ +#define SIS630_VENDOR_ID 0x1039 +#define SIS630_DEVICE_ID 0x0630 diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig new file mode 100644 index 000000000..3e97a8b43 --- /dev/null +++ b/drivers/net/ethernet/smsc/Kconfig @@ -0,0 +1,133 @@ +# +# Western Digital/SMC network device configuration +# + +config NET_VENDOR_SMSC + bool "SMC (SMSC)/Western Digital devices" + default y + depends on ARM || ARM64 || ATARI_ETHERNAT || BLACKFIN || COLDFIRE || \ + ISA || M32R || MAC || MIPS || MN10300 || NIOS2 || PCI || \ + PCMCIA || SUPERH || XTENSA + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about SMC/Western Digital cards. If you say Y, you will + be asked for your specific card in the following questions. + +if NET_VENDOR_SMSC + +config SMC9194 + tristate "SMC 9194 support" + depends on (ISA || MAC && BROKEN) + select CRC32 + ---help--- + This is support for the SMC9xxx based Ethernet cards. Choose this + option if you have a DELL laptop with the docking station, or + another SMC9192/9194 based chipset. Say Y if you want it compiled + into the kernel, and read the file + and the Ethernet-HOWTO, + available from . + + To compile this driver as a module, choose M here. The module + will be called smc9194. + +config SMC91X + tristate "SMC 91C9x/91C1xxx support" + select CRC32 + select MII + depends on !OF || GPIOLIB + depends on ARM || ARM64 || ATARI_ETHERNAT || BLACKFIN || COLDFIRE || \ + M32R || MIPS || MN10300 || NIOS2 || SUPERH || XTENSA + ---help--- + This is a driver for SMC's 91x series of Ethernet chipsets, + including the SMC91C94 and the SMC91C111. Say Y if you want it + compiled into the kernel, and read the file + and the Ethernet-HOWTO, + available from . + + This driver is also available as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called smc91x. If you want to compile it as a + module, say M here and read . + +config PCMCIA_SMC91C92 + tristate "SMC 91Cxx PCMCIA support" + depends on PCMCIA + select CRC32 + select MII + ---help--- + Say Y here if you intend to attach an SMC 91Cxx compatible PCMCIA + (PC-card) Ethernet or Fast Ethernet card to your computer. + + To compile this driver as a module, choose M here: the module will be + called smc91c92_cs. If unsure, say N. + +config EPIC100 + tristate "SMC EtherPower II" + depends on PCI + select CRC32 + select MII + ---help--- + This driver is for the SMC EtherPower II 9432 PCI Ethernet NIC, + which is based on the SMC83c17x (EPIC/100). + More specific information and updates are available from + . + +config SMC911X + tristate "SMSC LAN911[5678] support" + select CRC32 + select MII + depends on (ARM || SUPERH || MN10300) + ---help--- + This is a driver for SMSC's LAN911x series of Ethernet chipsets + including the new LAN9115, LAN9116, LAN9117, and LAN9118. + Say Y if you want it compiled into the kernel, + and read the Ethernet-HOWTO, available from + . + + This driver is also available as a module. The module will be + called smc911x. If you want to compile it as a module, say M + here and read + +config SMSC911X + tristate "SMSC LAN911x/LAN921x families embedded ethernet support" + depends on HAS_IOMEM + select CRC32 + select MII + select PHYLIB + ---help--- + Say Y here if you want support for SMSC LAN911x and LAN921x families + of ethernet controllers. + + To compile this driver as a module, choose M here. The module + will be called smsc911x. + +config SMSC911X_ARCH_HOOKS + def_bool n + depends on SMSC911X + ---help--- + If the arch enables this, it allows the arch to implement various + hooks for more comprehensive interrupt control and also to override + the source of the MAC address. + +config SMSC9420 + tristate "SMSC LAN9420 PCI ethernet adapter support" + depends on PCI + select CRC32 + select PHYLIB + select SMSC_PHY + ---help--- + This is a driver for SMSC's LAN9420 PCI ethernet adapter. + Say Y if you want it compiled into the kernel, + and read the Ethernet-HOWTO, available from + . + + This driver is also available as a module. The module will be + called smsc9420. If you want to compile it as a module, say M + here and read + +endif # NET_VENDOR_SMSC diff --git a/drivers/net/ethernet/smsc/Makefile b/drivers/net/ethernet/smsc/Makefile new file mode 100644 index 000000000..f3438dec9 --- /dev/null +++ b/drivers/net/ethernet/smsc/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the SMSC network device drivers. +# + +obj-$(CONFIG_SMC9194) += smc9194.o +obj-$(CONFIG_SMC91X) += smc91x.o +obj-$(CONFIG_PCMCIA_SMC91C92) += smc91c92_cs.o +obj-$(CONFIG_EPIC100) += epic100.o +obj-$(CONFIG_SMSC9420) += smsc9420.o +obj-$(CONFIG_SMC911X) += smc911x.o +obj-$(CONFIG_SMSC911X) += smsc911x.o diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c new file mode 100644 index 000000000..443f1da9f --- /dev/null +++ b/drivers/net/ethernet/smsc/epic100.c @@ -0,0 +1,1596 @@ +/* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */ +/* + Written/copyright 1997-2001 by Donald Becker. + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. + + This driver is for the SMC83c170/175 "EPIC" series, as used on the + SMC EtherPower II 9432 PCI adapter, and several CardBus cards. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + Information and updates available at + http://www.scyld.com/network/epic100.html + [this link no longer provides anything useful -jgarzik] + + --------------------------------------------------------------------- + +*/ + +#define DRV_NAME "epic100" +#define DRV_VERSION "2.1" +#define DRV_RELDATE "Sept 11, 2006" + +/* The user-configurable values. + These may be modified when a driver module is loaded.*/ + +static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ + +/* Used to pass the full-duplex flag, etc. */ +#define MAX_UNITS 8 /* More are supported, limit only on options */ +static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; +static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; + +/* Set the copy breakpoint for the copy-only-tiny-frames scheme. + Setting to > 1518 effectively disables this feature. */ +static int rx_copybreak; + +/* Operational parameters that are set at compile time. */ + +/* Keep the ring sizes a power of two for operational efficiency. + The compiler will convert '%'<2^N> into a bit mask. + Making the Tx ring too large decreases the effectiveness of channel + bonding and packet priority. + There are no ill effects from too-large receive rings. */ +#define TX_RING_SIZE 256 +#define TX_QUEUE_LEN 240 /* Limit ring entries actually used. */ +#define RX_RING_SIZE 256 +#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc) +#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc) + +/* Operational parameters that usually are not changed. */ +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (2*HZ) + +#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ + +/* Bytes transferred to chip before transmission starts. */ +/* Initial threshold, increased on underflow, rounded down to 4 byte units. */ +#define TX_FIFO_THRESH 256 +#define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* These identify the driver base version and may not be removed. */ +static char version[] = +DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker "; +static char version2[] = +" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")"; + +MODULE_AUTHOR("Donald Becker "); +MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver"); +MODULE_LICENSE("GPL"); + +module_param(debug, int, 0); +module_param(rx_copybreak, int, 0); +module_param_array(options, int, NULL, 0); +module_param_array(full_duplex, int, NULL, 0); +MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)"); +MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex"); +MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames"); +MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)"); + +/* + Theory of Operation + +I. Board Compatibility + +This device driver is designed for the SMC "EPIC/100", the SMC +single-chip Ethernet controllers for PCI. This chip is used on +the SMC EtherPower II boards. + +II. Board-specific settings + +PCI bus devices are configured by the system at boot time, so no jumpers +need to be set on the board. The system BIOS will assign the +PCI INTA signal to a (preferably otherwise unused) system IRQ line. +Note: Kernel versions earlier than 1.3.73 do not support shared PCI +interrupt lines. + +III. Driver operation + +IIIa. Ring buffers + +IVb. References + +http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf +http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf +http://scyld.com/expert/NWay.html +http://www.national.com/pf/DP/DP83840A.html + +IVc. Errata + +*/ + + +enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 }; + +#define EPIC_TOTAL_SIZE 0x100 +#define USE_IO_OPS 1 + +#ifdef USE_IO_OPS +#define EPIC_BAR 0 +#else +#define EPIC_BAR 1 +#endif + +typedef enum { + SMSC_83C170_0, + SMSC_83C170, + SMSC_83C175, +} chip_t; + + +struct epic_chip_info { + const char *name; + int drv_flags; /* Driver use, intended as capability flags. */ +}; + + +/* indexed by chip_t */ +static const struct epic_chip_info pci_id_tbl[] = { + { "SMSC EPIC/100 83c170", TYPE2_INTR | NO_MII | MII_PWRDWN }, + { "SMSC EPIC/100 83c170", TYPE2_INTR }, + { "SMSC EPIC/C 83c175", TYPE2_INTR | MII_PWRDWN }, +}; + + +static const struct pci_device_id epic_pci_tbl[] = { + { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 }, + { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 }, + { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 }, + { 0,} +}; +MODULE_DEVICE_TABLE (pci, epic_pci_tbl); + +#define ew16(reg, val) iowrite16(val, ioaddr + (reg)) +#define ew32(reg, val) iowrite32(val, ioaddr + (reg)) +#define er8(reg) ioread8(ioaddr + (reg)) +#define er16(reg) ioread16(ioaddr + (reg)) +#define er32(reg) ioread32(ioaddr + (reg)) + +/* Offsets to registers, using the (ugh) SMC names. */ +enum epic_registers { + COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14, + PCIBurstCnt=0x18, + TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */ + MIICtrl=0x30, MIIData=0x34, MIICfg=0x38, + LAN0=64, /* MAC address. */ + MC0=80, /* Multicast filter table. */ + RxCtrl=96, TxCtrl=112, TxSTAT=0x74, + PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC, +}; + +/* Interrupt register bits, using my own meaningful names. */ +enum IntrStatus { + TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000, + PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000, + RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100, + TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010, + RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001, +}; +enum CommandBits { + StopRx=1, StartRx=2, TxQueued=4, RxQueued=8, + StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80, +}; + +#define EpicRemoved 0xffffffff /* Chip failed or removed (CardBus) */ + +#define EpicNapiEvent (TxEmpty | TxDone | \ + RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull) +#define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent) + +static const u16 media2miictl[16] = { + 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 }; + +/* + * The EPIC100 Rx and Tx buffer descriptors. Note that these + * really ARE host-endian; it's not a misannotation. We tell + * the card to byteswap them internally on big-endian hosts - + * look for #ifdef __BIG_ENDIAN in epic_open(). + */ + +struct epic_tx_desc { + u32 txstatus; + u32 bufaddr; + u32 buflength; + u32 next; +}; + +struct epic_rx_desc { + u32 rxstatus; + u32 bufaddr; + u32 buflength; + u32 next; +}; + +enum desc_status_bits { + DescOwn=0x8000, +}; + +#define PRIV_ALIGN 15 /* Required alignment mask */ +struct epic_private { + struct epic_rx_desc *rx_ring; + struct epic_tx_desc *tx_ring; + /* The saved address of a sent-in-place packet/buffer, for skfree(). */ + struct sk_buff* tx_skbuff[TX_RING_SIZE]; + /* The addresses of receive-in-place skbuffs. */ + struct sk_buff* rx_skbuff[RX_RING_SIZE]; + + dma_addr_t tx_ring_dma; + dma_addr_t rx_ring_dma; + + /* Ring pointers. */ + spinlock_t lock; /* Group with Tx control cache line. */ + spinlock_t napi_lock; + struct napi_struct napi; + unsigned int reschedule_in_poll; + unsigned int cur_tx, dirty_tx; + + unsigned int cur_rx, dirty_rx; + u32 irq_mask; + unsigned int rx_buf_sz; /* Based on MTU+slack. */ + + void __iomem *ioaddr; + struct pci_dev *pci_dev; /* PCI bus location. */ + int chip_id, chip_flags; + + struct timer_list timer; /* Media selection timer. */ + int tx_threshold; + unsigned char mc_filter[8]; + signed char phys[4]; /* MII device addresses. */ + u16 advertising; /* NWay media advertisement */ + int mii_phy_cnt; + struct mii_if_info mii; + unsigned int tx_full:1; /* The Tx queue is full. */ + unsigned int default_port:4; /* Last dev->if_port value. */ +}; + +static int epic_open(struct net_device *dev); +static int read_eeprom(struct epic_private *, int); +static int mdio_read(struct net_device *dev, int phy_id, int location); +static void mdio_write(struct net_device *dev, int phy_id, int loc, int val); +static void epic_restart(struct net_device *dev); +static void epic_timer(unsigned long data); +static void epic_tx_timeout(struct net_device *dev); +static void epic_init_ring(struct net_device *dev); +static netdev_tx_t epic_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static int epic_rx(struct net_device *dev, int budget); +static int epic_poll(struct napi_struct *napi, int budget); +static irqreturn_t epic_interrupt(int irq, void *dev_instance); +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static const struct ethtool_ops netdev_ethtool_ops; +static int epic_close(struct net_device *dev); +static struct net_device_stats *epic_get_stats(struct net_device *dev); +static void set_rx_mode(struct net_device *dev); + +static const struct net_device_ops epic_netdev_ops = { + .ndo_open = epic_open, + .ndo_stop = epic_close, + .ndo_start_xmit = epic_start_xmit, + .ndo_tx_timeout = epic_tx_timeout, + .ndo_get_stats = epic_get_stats, + .ndo_set_rx_mode = set_rx_mode, + .ndo_do_ioctl = netdev_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int card_idx = -1; + void __iomem *ioaddr; + int chip_idx = (int) ent->driver_data; + int irq; + struct net_device *dev; + struct epic_private *ep; + int i, ret, option = 0, duplex = 0; + void *ring_space; + dma_addr_t ring_dma; + +/* when built into the kernel, we only print version if device is found */ +#ifndef MODULE + pr_info_once("%s%s\n", version, version2); +#endif + + card_idx++; + + ret = pci_enable_device(pdev); + if (ret) + goto out; + irq = pdev->irq; + + if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) { + dev_err(&pdev->dev, "no PCI region space\n"); + ret = -ENODEV; + goto err_out_disable; + } + + pci_set_master(pdev); + + ret = pci_request_regions(pdev, DRV_NAME); + if (ret < 0) + goto err_out_disable; + + ret = -ENOMEM; + + dev = alloc_etherdev(sizeof (*ep)); + if (!dev) + goto err_out_free_res; + + SET_NETDEV_DEV(dev, &pdev->dev); + + ioaddr = pci_iomap(pdev, EPIC_BAR, 0); + if (!ioaddr) { + dev_err(&pdev->dev, "ioremap failed\n"); + goto err_out_free_netdev; + } + + pci_set_drvdata(pdev, dev); + ep = netdev_priv(dev); + ep->ioaddr = ioaddr; + ep->mii.dev = dev; + ep->mii.mdio_read = mdio_read; + ep->mii.mdio_write = mdio_write; + ep->mii.phy_id_mask = 0x1f; + ep->mii.reg_num_mask = 0x1f; + + ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); + if (!ring_space) + goto err_out_iounmap; + ep->tx_ring = ring_space; + ep->tx_ring_dma = ring_dma; + + ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); + if (!ring_space) + goto err_out_unmap_tx; + ep->rx_ring = ring_space; + ep->rx_ring_dma = ring_dma; + + if (dev->mem_start) { + option = dev->mem_start; + duplex = (dev->mem_start & 16) ? 1 : 0; + } else if (card_idx >= 0 && card_idx < MAX_UNITS) { + if (options[card_idx] >= 0) + option = options[card_idx]; + if (full_duplex[card_idx] >= 0) + duplex = full_duplex[card_idx]; + } + + spin_lock_init(&ep->lock); + spin_lock_init(&ep->napi_lock); + ep->reschedule_in_poll = 0; + + /* Bring the chip out of low-power mode. */ + ew32(GENCTL, 0x4200); + /* Magic?! If we don't set this bit the MII interface won't work. */ + /* This magic is documented in SMSC app note 7.15 */ + for (i = 16; i > 0; i--) + ew32(TEST1, 0x0008); + + /* Turn on the MII transceiver. */ + ew32(MIICfg, 0x12); + if (chip_idx == 1) + ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800); + ew32(GENCTL, 0x0200); + + /* Note: the '175 does not have a serial EEPROM. */ + for (i = 0; i < 3; i++) + ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4)); + + if (debug > 2) { + dev_dbg(&pdev->dev, "EEPROM contents:\n"); + for (i = 0; i < 64; i++) + pr_cont(" %4.4x%s", read_eeprom(ep, i), + i % 16 == 15 ? "\n" : ""); + } + + ep->pci_dev = pdev; + ep->chip_id = chip_idx; + ep->chip_flags = pci_id_tbl[chip_idx].drv_flags; + ep->irq_mask = + (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) + | CntFull | TxUnderrun | EpicNapiEvent; + + /* Find the connected MII xcvrs. + Doing this in open() would allow detecting external xcvrs later, but + takes much time and no cards have external MII. */ + { + int phy, phy_idx = 0; + for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) { + int mii_status = mdio_read(dev, phy, MII_BMSR); + if (mii_status != 0xffff && mii_status != 0x0000) { + ep->phys[phy_idx++] = phy; + dev_info(&pdev->dev, + "MII transceiver #%d control " + "%4.4x status %4.4x.\n", + phy, mdio_read(dev, phy, 0), mii_status); + } + } + ep->mii_phy_cnt = phy_idx; + if (phy_idx != 0) { + phy = ep->phys[0]; + ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE); + dev_info(&pdev->dev, + "Autonegotiation advertising %4.4x link " + "partner %4.4x.\n", + ep->mii.advertising, mdio_read(dev, phy, 5)); + } else if ( ! (ep->chip_flags & NO_MII)) { + dev_warn(&pdev->dev, + "***WARNING***: No MII transceiver found!\n"); + /* Use the known PHY address of the EPII. */ + ep->phys[0] = 3; + } + ep->mii.phy_id = ep->phys[0]; + } + + /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */ + if (ep->chip_flags & MII_PWRDWN) + ew32(NVCTL, er32(NVCTL) & ~0x483c); + ew32(GENCTL, 0x0008); + + /* The lower four bits are the media type. */ + if (duplex) { + ep->mii.force_media = ep->mii.full_duplex = 1; + dev_info(&pdev->dev, "Forced full duplex requested.\n"); + } + dev->if_port = ep->default_port = option; + + /* The Epic-specific entries in the device structure. */ + dev->netdev_ops = &epic_netdev_ops; + dev->ethtool_ops = &netdev_ethtool_ops; + dev->watchdog_timeo = TX_TIMEOUT; + netif_napi_add(dev, &ep->napi, epic_poll, 64); + + ret = register_netdev(dev); + if (ret < 0) + goto err_out_unmap_rx; + + netdev_info(dev, "%s at %lx, IRQ %d, %pM\n", + pci_id_tbl[chip_idx].name, + (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq, + dev->dev_addr); + +out: + return ret; + +err_out_unmap_rx: + pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); +err_out_unmap_tx: + pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); +err_out_iounmap: + pci_iounmap(pdev, ioaddr); +err_out_free_netdev: + free_netdev(dev); +err_out_free_res: + pci_release_regions(pdev); +err_out_disable: + pci_disable_device(pdev); + goto out; +} + +/* Serial EEPROM section. */ + +/* EEPROM_Ctrl bits. */ +#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */ +#define EE_CS 0x02 /* EEPROM chip select. */ +#define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */ +#define EE_WRITE_0 0x01 +#define EE_WRITE_1 0x09 +#define EE_DATA_READ 0x10 /* EEPROM chip data out. */ +#define EE_ENB (0x0001 | EE_CS) + +/* Delay between EEPROM clock transitions. + This serves to flush the operation to the PCI bus. + */ + +#define eeprom_delay() er32(EECTL) + +/* The EEPROM commands include the alway-set leading bit. */ +#define EE_WRITE_CMD (5 << 6) +#define EE_READ64_CMD (6 << 6) +#define EE_READ256_CMD (6 << 8) +#define EE_ERASE_CMD (7 << 6) + +static void epic_disable_int(struct net_device *dev, struct epic_private *ep) +{ + void __iomem *ioaddr = ep->ioaddr; + + ew32(INTMASK, 0x00000000); +} + +static inline void __epic_pci_commit(void __iomem *ioaddr) +{ +#ifndef USE_IO_OPS + er32(INTMASK); +#endif +} + +static inline void epic_napi_irq_off(struct net_device *dev, + struct epic_private *ep) +{ + void __iomem *ioaddr = ep->ioaddr; + + ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent); + __epic_pci_commit(ioaddr); +} + +static inline void epic_napi_irq_on(struct net_device *dev, + struct epic_private *ep) +{ + void __iomem *ioaddr = ep->ioaddr; + + /* No need to commit possible posted write */ + ew32(INTMASK, ep->irq_mask | EpicNapiEvent); +} + +static int read_eeprom(struct epic_private *ep, int location) +{ + void __iomem *ioaddr = ep->ioaddr; + int i; + int retval = 0; + int read_cmd = location | + (er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD); + + ew32(EECTL, EE_ENB & ~EE_CS); + ew32(EECTL, EE_ENB); + + /* Shift the read command bits out. */ + for (i = 12; i >= 0; i--) { + short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0; + ew32(EECTL, EE_ENB | dataval); + eeprom_delay(); + ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK); + eeprom_delay(); + } + ew32(EECTL, EE_ENB); + + for (i = 16; i > 0; i--) { + ew32(EECTL, EE_ENB | EE_SHIFT_CLK); + eeprom_delay(); + retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0); + ew32(EECTL, EE_ENB); + eeprom_delay(); + } + + /* Terminate the EEPROM access. */ + ew32(EECTL, EE_ENB & ~EE_CS); + return retval; +} + +#define MII_READOP 1 +#define MII_WRITEOP 2 +static int mdio_read(struct net_device *dev, int phy_id, int location) +{ + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; + int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP; + int i; + + ew32(MIICtrl, read_cmd); + /* Typical operation takes 25 loops. */ + for (i = 400; i > 0; i--) { + barrier(); + if ((er32(MIICtrl) & MII_READOP) == 0) { + /* Work around read failure bug. */ + if (phy_id == 1 && location < 6 && + er16(MIIData) == 0xffff) { + ew32(MIICtrl, read_cmd); + continue; + } + return er16(MIIData); + } + } + return 0xffff; +} + +static void mdio_write(struct net_device *dev, int phy_id, int loc, int value) +{ + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; + int i; + + ew16(MIIData, value); + ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP); + for (i = 10000; i > 0; i--) { + barrier(); + if ((er32(MIICtrl) & MII_WRITEOP) == 0) + break; + } +} + + +static int epic_open(struct net_device *dev) +{ + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; + const int irq = ep->pci_dev->irq; + int rc, i; + + /* Soft reset the chip. */ + ew32(GENCTL, 0x4001); + + napi_enable(&ep->napi); + rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev); + if (rc) { + napi_disable(&ep->napi); + return rc; + } + + epic_init_ring(dev); + + ew32(GENCTL, 0x4000); + /* This magic is documented in SMSC app note 7.15 */ + for (i = 16; i > 0; i--) + ew32(TEST1, 0x0008); + + /* Pull the chip out of low-power mode, enable interrupts, and set for + PCI read multiple. The MIIcfg setting and strange write order are + required by the details of which bits are reset and the transceiver + wiring on the Ositech CardBus card. + */ +#if 0 + ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12); +#endif + if (ep->chip_flags & MII_PWRDWN) + ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800); + + /* Tell the chip to byteswap descriptors on big-endian hosts */ +#ifdef __BIG_ENDIAN + ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8)); + er32(GENCTL); + ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8)); +#else + ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8)); + er32(GENCTL); + ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8)); +#endif + + udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */ + + for (i = 0; i < 3; i++) + ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i])); + + ep->tx_threshold = TX_FIFO_THRESH; + ew32(TxThresh, ep->tx_threshold); + + if (media2miictl[dev->if_port & 15]) { + if (ep->mii_phy_cnt) + mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]); + if (dev->if_port == 1) { + if (debug > 1) + netdev_info(dev, "Using the 10base2 transceiver, MII status %4.4x.\n", + mdio_read(dev, ep->phys[0], MII_BMSR)); + } + } else { + int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA); + if (mii_lpa != 0xffff) { + if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL) + ep->mii.full_duplex = 1; + else if (! (mii_lpa & LPA_LPACK)) + mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART); + if (debug > 1) + netdev_info(dev, "Setting %s-duplex based on MII xcvr %d register read of %4.4x.\n", + ep->mii.full_duplex ? "full" + : "half", + ep->phys[0], mii_lpa); + } + } + + ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79); + ew32(PRxCDAR, ep->rx_ring_dma); + ew32(PTxCDAR, ep->tx_ring_dma); + + /* Start the chip's Rx process. */ + set_rx_mode(dev); + ew32(COMMAND, StartRx | RxQueued); + + netif_start_queue(dev); + + /* Enable interrupts by setting the interrupt mask. */ + ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull | + ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) | + TxUnderrun); + + if (debug > 1) { + netdev_dbg(dev, "epic_open() ioaddr %p IRQ %d status %4.4x %s-duplex.\n", + ioaddr, irq, er32(GENCTL), + ep->mii.full_duplex ? "full" : "half"); + } + + /* Set the timer to switch to check for link beat and perhaps switch + to an alternate media type. */ + init_timer(&ep->timer); + ep->timer.expires = jiffies + 3*HZ; + ep->timer.data = (unsigned long)dev; + ep->timer.function = epic_timer; /* timer handler */ + add_timer(&ep->timer); + + return rc; +} + +/* Reset the chip to recover from a PCI transaction error. + This may occur at interrupt time. */ +static void epic_pause(struct net_device *dev) +{ + struct net_device_stats *stats = &dev->stats; + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; + + netif_stop_queue (dev); + + /* Disable interrupts by clearing the interrupt mask. */ + ew32(INTMASK, 0x00000000); + /* Stop the chip's Tx and Rx DMA processes. */ + ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA); + + /* Update the error counts. */ + if (er16(COMMAND) != 0xffff) { + stats->rx_missed_errors += er8(MPCNT); + stats->rx_frame_errors += er8(ALICNT); + stats->rx_crc_errors += er8(CRCCNT); + } + + /* Remove the packets on the Rx queue. */ + epic_rx(dev, RX_RING_SIZE); +} + +static void epic_restart(struct net_device *dev) +{ + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; + int i; + + /* Soft reset the chip. */ + ew32(GENCTL, 0x4001); + + netdev_dbg(dev, "Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n", + ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx); + udelay(1); + + /* This magic is documented in SMSC app note 7.15 */ + for (i = 16; i > 0; i--) + ew32(TEST1, 0x0008); + +#ifdef __BIG_ENDIAN + ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8)); +#else + ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8)); +#endif + ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12); + if (ep->chip_flags & MII_PWRDWN) + ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800); + + for (i = 0; i < 3; i++) + ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i])); + + ep->tx_threshold = TX_FIFO_THRESH; + ew32(TxThresh, ep->tx_threshold); + ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79); + ew32(PRxCDAR, ep->rx_ring_dma + + (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc)); + ew32(PTxCDAR, ep->tx_ring_dma + + (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc)); + + /* Start the chip's Rx process. */ + set_rx_mode(dev); + ew32(COMMAND, StartRx | RxQueued); + + /* Enable interrupts by setting the interrupt mask. */ + ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull | + ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) | + TxUnderrun); + + netdev_dbg(dev, "epic_restart() done, cmd status %4.4x, ctl %4.4x interrupt %4.4x.\n", + er32(COMMAND), er32(GENCTL), er32(INTSTAT)); +} + +static void check_media(struct net_device *dev) +{ + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; + int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0; + int negotiated = mii_lpa & ep->mii.advertising; + int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040; + + if (ep->mii.force_media) + return; + if (mii_lpa == 0xffff) /* Bogus read */ + return; + if (ep->mii.full_duplex != duplex) { + ep->mii.full_duplex = duplex; + netdev_info(dev, "Setting %s-duplex based on MII #%d link partner capability of %4.4x.\n", + ep->mii.full_duplex ? "full" : "half", + ep->phys[0], mii_lpa); + ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79); + } +} + +static void epic_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; + int next_tick = 5*HZ; + + if (debug > 3) { + netdev_dbg(dev, "Media monitor tick, Tx status %8.8x.\n", + er32(TxSTAT)); + netdev_dbg(dev, "Other registers are IntMask %4.4x IntStatus %4.4x RxStatus %4.4x.\n", + er32(INTMASK), er32(INTSTAT), er32(RxSTAT)); + } + + check_media(dev); + + ep->timer.expires = jiffies + next_tick; + add_timer(&ep->timer); +} + +static void epic_tx_timeout(struct net_device *dev) +{ + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; + + if (debug > 0) { + netdev_warn(dev, "Transmit timeout using MII device, Tx status %4.4x.\n", + er16(TxSTAT)); + if (debug > 1) { + netdev_dbg(dev, "Tx indices: dirty_tx %d, cur_tx %d.\n", + ep->dirty_tx, ep->cur_tx); + } + } + if (er16(TxSTAT) & 0x10) { /* Tx FIFO underflow. */ + dev->stats.tx_fifo_errors++; + ew32(COMMAND, RestartTx); + } else { + epic_restart(dev); + ew32(COMMAND, TxQueued); + } + + dev->trans_start = jiffies; /* prevent tx timeout */ + dev->stats.tx_errors++; + if (!ep->tx_full) + netif_wake_queue(dev); +} + +/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ +static void epic_init_ring(struct net_device *dev) +{ + struct epic_private *ep = netdev_priv(dev); + int i; + + ep->tx_full = 0; + ep->dirty_tx = ep->cur_tx = 0; + ep->cur_rx = ep->dirty_rx = 0; + ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); + + /* Initialize all Rx descriptors. */ + for (i = 0; i < RX_RING_SIZE; i++) { + ep->rx_ring[i].rxstatus = 0; + ep->rx_ring[i].buflength = ep->rx_buf_sz; + ep->rx_ring[i].next = ep->rx_ring_dma + + (i+1)*sizeof(struct epic_rx_desc); + ep->rx_skbuff[i] = NULL; + } + /* Mark the last entry as wrapping the ring. */ + ep->rx_ring[i-1].next = ep->rx_ring_dma; + + /* Fill in the Rx buffers. Handle allocation failure gracefully. */ + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2); + ep->rx_skbuff[i] = skb; + if (skb == NULL) + break; + skb_reserve(skb, 2); /* 16 byte align the IP header. */ + ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, + skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); + ep->rx_ring[i].rxstatus = DescOwn; + } + ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE); + + /* The Tx buffer descriptor is filled in as needed, but we + do need to clear the ownership bit. */ + for (i = 0; i < TX_RING_SIZE; i++) { + ep->tx_skbuff[i] = NULL; + ep->tx_ring[i].txstatus = 0x0000; + ep->tx_ring[i].next = ep->tx_ring_dma + + (i+1)*sizeof(struct epic_tx_desc); + } + ep->tx_ring[i-1].next = ep->tx_ring_dma; +} + +static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; + int entry, free_count; + u32 ctrl_word; + unsigned long flags; + + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + /* Caution: the write order is important here, set the field with the + "ownership" bit last. */ + + /* Calculate the next Tx descriptor entry. */ + spin_lock_irqsave(&ep->lock, flags); + free_count = ep->cur_tx - ep->dirty_tx; + entry = ep->cur_tx % TX_RING_SIZE; + + ep->tx_skbuff[entry] = skb; + ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data, + skb->len, PCI_DMA_TODEVICE); + if (free_count < TX_QUEUE_LEN/2) {/* Typical path */ + ctrl_word = 0x100000; /* No interrupt */ + } else if (free_count == TX_QUEUE_LEN/2) { + ctrl_word = 0x140000; /* Tx-done intr. */ + } else if (free_count < TX_QUEUE_LEN - 1) { + ctrl_word = 0x100000; /* No Tx-done intr. */ + } else { + /* Leave room for an additional entry. */ + ctrl_word = 0x140000; /* Tx-done intr. */ + ep->tx_full = 1; + } + ep->tx_ring[entry].buflength = ctrl_word | skb->len; + ep->tx_ring[entry].txstatus = + ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16) + | DescOwn; + + ep->cur_tx++; + if (ep->tx_full) + netif_stop_queue(dev); + + spin_unlock_irqrestore(&ep->lock, flags); + /* Trigger an immediate transmit demand. */ + ew32(COMMAND, TxQueued); + + if (debug > 4) + netdev_dbg(dev, "Queued Tx packet size %d to slot %d, flag %2.2x Tx status %8.8x.\n", + skb->len, entry, ctrl_word, er32(TxSTAT)); + + return NETDEV_TX_OK; +} + +static void epic_tx_error(struct net_device *dev, struct epic_private *ep, + int status) +{ + struct net_device_stats *stats = &dev->stats; + +#ifndef final_version + /* There was an major error, log it. */ + if (debug > 1) + netdev_dbg(dev, "Transmit error, Tx status %8.8x.\n", + status); +#endif + stats->tx_errors++; + if (status & 0x1050) + stats->tx_aborted_errors++; + if (status & 0x0008) + stats->tx_carrier_errors++; + if (status & 0x0040) + stats->tx_window_errors++; + if (status & 0x0010) + stats->tx_fifo_errors++; +} + +static void epic_tx(struct net_device *dev, struct epic_private *ep) +{ + unsigned int dirty_tx, cur_tx; + + /* + * Note: if this lock becomes a problem we can narrow the locked + * region at the cost of occasionally grabbing the lock more times. + */ + cur_tx = ep->cur_tx; + for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) { + struct sk_buff *skb; + int entry = dirty_tx % TX_RING_SIZE; + int txstatus = ep->tx_ring[entry].txstatus; + + if (txstatus & DescOwn) + break; /* It still hasn't been Txed */ + + if (likely(txstatus & 0x0001)) { + dev->stats.collisions += (txstatus >> 8) & 15; + dev->stats.tx_packets++; + dev->stats.tx_bytes += ep->tx_skbuff[entry]->len; + } else + epic_tx_error(dev, ep, txstatus); + + /* Free the original skb. */ + skb = ep->tx_skbuff[entry]; + pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb_irq(skb); + ep->tx_skbuff[entry] = NULL; + } + +#ifndef final_version + if (cur_tx - dirty_tx > TX_RING_SIZE) { + netdev_warn(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d.\n", + dirty_tx, cur_tx, ep->tx_full); + dirty_tx += TX_RING_SIZE; + } +#endif + ep->dirty_tx = dirty_tx; + if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) { + /* The ring is no longer full, allow new TX entries. */ + ep->tx_full = 0; + netif_wake_queue(dev); + } +} + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ +static irqreturn_t epic_interrupt(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; + unsigned int handled = 0; + int status; + + status = er32(INTSTAT); + /* Acknowledge all of the current interrupt sources ASAP. */ + ew32(INTSTAT, status & EpicNormalEvent); + + if (debug > 4) { + netdev_dbg(dev, "Interrupt, status=%#8.8x new intstat=%#8.8x.\n", + status, er32(INTSTAT)); + } + + if ((status & IntrSummary) == 0) + goto out; + + handled = 1; + + if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) { + spin_lock(&ep->napi_lock); + if (napi_schedule_prep(&ep->napi)) { + epic_napi_irq_off(dev, ep); + __napi_schedule(&ep->napi); + } else + ep->reschedule_in_poll++; + spin_unlock(&ep->napi_lock); + } + status &= ~EpicNapiEvent; + + /* Check uncommon events all at once. */ + if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) { + struct net_device_stats *stats = &dev->stats; + + if (status == EpicRemoved) + goto out; + + /* Always update the error counts to avoid overhead later. */ + stats->rx_missed_errors += er8(MPCNT); + stats->rx_frame_errors += er8(ALICNT); + stats->rx_crc_errors += er8(CRCCNT); + + if (status & TxUnderrun) { /* Tx FIFO underflow. */ + stats->tx_fifo_errors++; + ew32(TxThresh, ep->tx_threshold += 128); + /* Restart the transmit process. */ + ew32(COMMAND, RestartTx); + } + if (status & PCIBusErr170) { + netdev_err(dev, "PCI Bus Error! status %4.4x.\n", + status); + epic_pause(dev); + epic_restart(dev); + } + /* Clear all error sources. */ + ew32(INTSTAT, status & 0x7f18); + } + +out: + if (debug > 3) { + netdev_dbg(dev, "exit interrupt, intr_status=%#4.4x.\n", + status); + } + + return IRQ_RETVAL(handled); +} + +static int epic_rx(struct net_device *dev, int budget) +{ + struct epic_private *ep = netdev_priv(dev); + int entry = ep->cur_rx % RX_RING_SIZE; + int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx; + int work_done = 0; + + if (debug > 4) + netdev_dbg(dev, " In epic_rx(), entry %d %8.8x.\n", entry, + ep->rx_ring[entry].rxstatus); + + if (rx_work_limit > budget) + rx_work_limit = budget; + + /* If we own the next entry, it's a new packet. Send it up. */ + while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) { + int status = ep->rx_ring[entry].rxstatus; + + if (debug > 4) + netdev_dbg(dev, " epic_rx() status was %8.8x.\n", + status); + if (--rx_work_limit < 0) + break; + if (status & 0x2006) { + if (debug > 2) + netdev_dbg(dev, "epic_rx() error status was %8.8x.\n", + status); + if (status & 0x2000) { + netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %4.4x!\n", + status); + dev->stats.rx_length_errors++; + } else if (status & 0x0006) + /* Rx Frame errors are counted in hardware. */ + dev->stats.rx_errors++; + } else { + /* Malloc up new buffer, compatible with net-2e. */ + /* Omit the four octet CRC from the length. */ + short pkt_len = (status >> 16) - 4; + struct sk_buff *skb; + + if (pkt_len > PKT_BUF_SZ - 4) { + netdev_err(dev, "Oversized Ethernet frame, status %x %d bytes.\n", + status, pkt_len); + pkt_len = 1514; + } + /* Check if the packet is long enough to accept without copying + to a minimally-sized skbuff. */ + if (pkt_len < rx_copybreak && + (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { + skb_reserve(skb, 2); /* 16 byte align the IP header */ + pci_dma_sync_single_for_cpu(ep->pci_dev, + ep->rx_ring[entry].bufaddr, + ep->rx_buf_sz, + PCI_DMA_FROMDEVICE); + skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len); + skb_put(skb, pkt_len); + pci_dma_sync_single_for_device(ep->pci_dev, + ep->rx_ring[entry].bufaddr, + ep->rx_buf_sz, + PCI_DMA_FROMDEVICE); + } else { + pci_unmap_single(ep->pci_dev, + ep->rx_ring[entry].bufaddr, + ep->rx_buf_sz, PCI_DMA_FROMDEVICE); + skb_put(skb = ep->rx_skbuff[entry], pkt_len); + ep->rx_skbuff[entry] = NULL; + } + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } + work_done++; + entry = (++ep->cur_rx) % RX_RING_SIZE; + } + + /* Refill the Rx ring buffers. */ + for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) { + entry = ep->dirty_rx % RX_RING_SIZE; + if (ep->rx_skbuff[entry] == NULL) { + struct sk_buff *skb; + skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2); + if (skb == NULL) + break; + skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, + skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); + work_done++; + } + /* AV: shouldn't we add a barrier here? */ + ep->rx_ring[entry].rxstatus = DescOwn; + } + return work_done; +} + +static void epic_rx_err(struct net_device *dev, struct epic_private *ep) +{ + void __iomem *ioaddr = ep->ioaddr; + int status; + + status = er32(INTSTAT); + + if (status == EpicRemoved) + return; + if (status & RxOverflow) /* Missed a Rx frame. */ + dev->stats.rx_errors++; + if (status & (RxOverflow | RxFull)) + ew16(COMMAND, RxQueued); +} + +static int epic_poll(struct napi_struct *napi, int budget) +{ + struct epic_private *ep = container_of(napi, struct epic_private, napi); + struct net_device *dev = ep->mii.dev; + int work_done = 0; + void __iomem *ioaddr = ep->ioaddr; + +rx_action: + + epic_tx(dev, ep); + + work_done += epic_rx(dev, budget); + + epic_rx_err(dev, ep); + + if (work_done < budget) { + unsigned long flags; + int more; + + /* A bit baroque but it avoids a (space hungry) spin_unlock */ + + spin_lock_irqsave(&ep->napi_lock, flags); + + more = ep->reschedule_in_poll; + if (!more) { + __napi_complete(napi); + ew32(INTSTAT, EpicNapiEvent); + epic_napi_irq_on(dev, ep); + } else + ep->reschedule_in_poll--; + + spin_unlock_irqrestore(&ep->napi_lock, flags); + + if (more) + goto rx_action; + } + + return work_done; +} + +static int epic_close(struct net_device *dev) +{ + struct epic_private *ep = netdev_priv(dev); + struct pci_dev *pdev = ep->pci_dev; + void __iomem *ioaddr = ep->ioaddr; + struct sk_buff *skb; + int i; + + netif_stop_queue(dev); + napi_disable(&ep->napi); + + if (debug > 1) + netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n", + er32(INTSTAT)); + + del_timer_sync(&ep->timer); + + epic_disable_int(dev, ep); + + free_irq(pdev->irq, dev); + + epic_pause(dev); + + /* Free all the skbuffs in the Rx queue. */ + for (i = 0; i < RX_RING_SIZE; i++) { + skb = ep->rx_skbuff[i]; + ep->rx_skbuff[i] = NULL; + ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */ + ep->rx_ring[i].buflength = 0; + if (skb) { + pci_unmap_single(pdev, ep->rx_ring[i].bufaddr, + ep->rx_buf_sz, PCI_DMA_FROMDEVICE); + dev_kfree_skb(skb); + } + ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */ + } + for (i = 0; i < TX_RING_SIZE; i++) { + skb = ep->tx_skbuff[i]; + ep->tx_skbuff[i] = NULL; + if (!skb) + continue; + pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len, + PCI_DMA_TODEVICE); + dev_kfree_skb(skb); + } + + /* Green! Leave the chip in low-power mode. */ + ew32(GENCTL, 0x0008); + + return 0; +} + +static struct net_device_stats *epic_get_stats(struct net_device *dev) +{ + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; + + if (netif_running(dev)) { + struct net_device_stats *stats = &dev->stats; + + stats->rx_missed_errors += er8(MPCNT); + stats->rx_frame_errors += er8(ALICNT); + stats->rx_crc_errors += er8(CRCCNT); + } + + return &dev->stats; +} + +/* Set or clear the multicast filter for this adaptor. + Note that we only use exclusion around actually queueing the + new frame, not around filling ep->setup_frame. This is non-deterministic + when re-entered but still correct. */ + +static void set_rx_mode(struct net_device *dev) +{ + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; + unsigned char mc_filter[8]; /* Multicast hash filter */ + int i; + + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + ew32(RxCtrl, 0x002c); + /* Unconditionally log net taps. */ + memset(mc_filter, 0xff, sizeof(mc_filter)); + } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) { + /* There is apparently a chip bug, so the multicast filter + is never enabled. */ + /* Too many to filter perfectly -- accept all multicasts. */ + memset(mc_filter, 0xff, sizeof(mc_filter)); + ew32(RxCtrl, 0x000c); + } else if (netdev_mc_empty(dev)) { + ew32(RxCtrl, 0x0004); + return; + } else { /* Never executed, for now. */ + struct netdev_hw_addr *ha; + + memset(mc_filter, 0, sizeof(mc_filter)); + netdev_for_each_mc_addr(ha, dev) { + unsigned int bit_nr = + ether_crc_le(ETH_ALEN, ha->addr) & 0x3f; + mc_filter[bit_nr >> 3] |= (1 << bit_nr); + } + } + /* ToDo: perhaps we need to stop the Tx and Rx process here? */ + if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) { + for (i = 0; i < 4; i++) + ew16(MC0 + i*4, ((u16 *)mc_filter)[i]); + memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter)); + } +} + +static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct epic_private *np = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); +} + +static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct epic_private *np = netdev_priv(dev); + int rc; + + spin_lock_irq(&np->lock); + rc = mii_ethtool_gset(&np->mii, cmd); + spin_unlock_irq(&np->lock); + + return rc; +} + +static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct epic_private *np = netdev_priv(dev); + int rc; + + spin_lock_irq(&np->lock); + rc = mii_ethtool_sset(&np->mii, cmd); + spin_unlock_irq(&np->lock); + + return rc; +} + +static int netdev_nway_reset(struct net_device *dev) +{ + struct epic_private *np = netdev_priv(dev); + return mii_nway_restart(&np->mii); +} + +static u32 netdev_get_link(struct net_device *dev) +{ + struct epic_private *np = netdev_priv(dev); + return mii_link_ok(&np->mii); +} + +static u32 netdev_get_msglevel(struct net_device *dev) +{ + return debug; +} + +static void netdev_set_msglevel(struct net_device *dev, u32 value) +{ + debug = value; +} + +static int ethtool_begin(struct net_device *dev) +{ + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; + + /* power-up, if interface is down */ + if (!netif_running(dev)) { + ew32(GENCTL, 0x0200); + ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800); + } + return 0; +} + +static void ethtool_complete(struct net_device *dev) +{ + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; + + /* power-down, if interface is down */ + if (!netif_running(dev)) { + ew32(GENCTL, 0x0008); + ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000); + } +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, + .get_settings = netdev_get_settings, + .set_settings = netdev_set_settings, + .nway_reset = netdev_nway_reset, + .get_link = netdev_get_link, + .get_msglevel = netdev_get_msglevel, + .set_msglevel = netdev_set_msglevel, + .begin = ethtool_begin, + .complete = ethtool_complete +}; + +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct epic_private *np = netdev_priv(dev); + void __iomem *ioaddr = np->ioaddr; + struct mii_ioctl_data *data = if_mii(rq); + int rc; + + /* power-up, if interface is down */ + if (! netif_running(dev)) { + ew32(GENCTL, 0x0200); + ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800); + } + + /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */ + spin_lock_irq(&np->lock); + rc = generic_mii_ioctl(&np->mii, data, cmd, NULL); + spin_unlock_irq(&np->lock); + + /* power-down, if interface is down */ + if (! netif_running(dev)) { + ew32(GENCTL, 0x0008); + ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000); + } + return rc; +} + + +static void epic_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct epic_private *ep = netdev_priv(dev); + + pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); + pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); + unregister_netdev(dev); + pci_iounmap(pdev, ep->ioaddr); + pci_release_regions(pdev); + free_netdev(dev); + pci_disable_device(pdev); + /* pci_power_off(pdev, -1); */ +} + + +#ifdef CONFIG_PM + +static int epic_suspend (struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct epic_private *ep = netdev_priv(dev); + void __iomem *ioaddr = ep->ioaddr; + + if (!netif_running(dev)) + return 0; + epic_pause(dev); + /* Put the chip into low-power mode. */ + ew32(GENCTL, 0x0008); + /* pci_power_off(pdev, -1); */ + return 0; +} + + +static int epic_resume (struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (!netif_running(dev)) + return 0; + epic_restart(dev); + /* pci_power_on(pdev); */ + return 0; +} + +#endif /* CONFIG_PM */ + + +static struct pci_driver epic_driver = { + .name = DRV_NAME, + .id_table = epic_pci_tbl, + .probe = epic_init_one, + .remove = epic_remove_one, +#ifdef CONFIG_PM + .suspend = epic_suspend, + .resume = epic_resume, +#endif /* CONFIG_PM */ +}; + + +static int __init epic_init (void) +{ +/* when a module, this is printed whether or not devices are found in probe */ +#ifdef MODULE + pr_info("%s%s\n", version, version2); +#endif + + return pci_register_driver(&epic_driver); +} + + +static void __exit epic_cleanup (void) +{ + pci_unregister_driver (&epic_driver); +} + + +module_init(epic_init); +module_exit(epic_cleanup); diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c new file mode 100644 index 000000000..bd64eb982 --- /dev/null +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -0,0 +1,2179 @@ +/* + * smc911x.c + * This is a driver for SMSC's LAN911{5,6,7,8} single-chip Ethernet devices. + * + * Copyright (C) 2005 Sensoria Corp + * Derived from the unified SMC91x driver by Nicolas Pitre + * and the smsc911x.c reference driver by SMSC + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * Arguments: + * watchdog = TX watchdog timeout + * tx_fifo_kb = Size of TX FIFO in KB + * + * History: + * 04/16/05 Dustin McIntire Initial version + */ +static const char version[] = + "smc911x.c: v1.0 04-16-2005 by Dustin McIntire \n"; + +/* Debugging options */ +#define ENABLE_SMC_DEBUG_RX 0 +#define ENABLE_SMC_DEBUG_TX 0 +#define ENABLE_SMC_DEBUG_DMA 0 +#define ENABLE_SMC_DEBUG_PKTS 0 +#define ENABLE_SMC_DEBUG_MISC 0 +#define ENABLE_SMC_DEBUG_FUNC 0 + +#define SMC_DEBUG_RX ((ENABLE_SMC_DEBUG_RX ? 1 : 0) << 0) +#define SMC_DEBUG_TX ((ENABLE_SMC_DEBUG_TX ? 1 : 0) << 1) +#define SMC_DEBUG_DMA ((ENABLE_SMC_DEBUG_DMA ? 1 : 0) << 2) +#define SMC_DEBUG_PKTS ((ENABLE_SMC_DEBUG_PKTS ? 1 : 0) << 3) +#define SMC_DEBUG_MISC ((ENABLE_SMC_DEBUG_MISC ? 1 : 0) << 4) +#define SMC_DEBUG_FUNC ((ENABLE_SMC_DEBUG_FUNC ? 1 : 0) << 5) + +#ifndef SMC_DEBUG +#define SMC_DEBUG ( SMC_DEBUG_RX | \ + SMC_DEBUG_TX | \ + SMC_DEBUG_DMA | \ + SMC_DEBUG_PKTS | \ + SMC_DEBUG_MISC | \ + SMC_DEBUG_FUNC \ + ) +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "smc911x.h" + +/* + * Transmit timeout, default 5 seconds. + */ +static int watchdog = 5000; +module_param(watchdog, int, 0400); +MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); + +static int tx_fifo_kb=8; +module_param(tx_fifo_kb, int, 0400); +MODULE_PARM_DESC(tx_fifo_kb,"transmit FIFO size in KB (1 0 +#define DBG(n, dev, args...) \ + do { \ + if (SMC_DEBUG & (n)) \ + netdev_dbg(dev, args); \ + } while (0) + +#define PRINTK(dev, args...) netdev_info(dev, args) +#else +#define DBG(n, dev, args...) do { } while (0) +#define PRINTK(dev, args...) netdev_dbg(dev, args) +#endif + +#if SMC_DEBUG_PKTS > 0 +static void PRINT_PKT(u_char *buf, int length) +{ + int i; + int remainder; + int lines; + + lines = length / 16; + remainder = length % 16; + + for (i = 0; i < lines ; i ++) { + int cur; + printk(KERN_DEBUG); + for (cur = 0; cur < 8; cur++) { + u_char a, b; + a = *buf++; + b = *buf++; + pr_cont("%02x%02x ", a, b); + } + pr_cont("\n"); + } + printk(KERN_DEBUG); + for (i = 0; i < remainder/2 ; i++) { + u_char a, b; + a = *buf++; + b = *buf++; + pr_cont("%02x%02x ", a, b); + } + pr_cont("\n"); +} +#else +#define PRINT_PKT(x...) do { } while (0) +#endif + + +/* this enables an interrupt in the interrupt mask register */ +#define SMC_ENABLE_INT(lp, x) do { \ + unsigned int __mask; \ + __mask = SMC_GET_INT_EN((lp)); \ + __mask |= (x); \ + SMC_SET_INT_EN((lp), __mask); \ +} while (0) + +/* this disables an interrupt from the interrupt mask register */ +#define SMC_DISABLE_INT(lp, x) do { \ + unsigned int __mask; \ + __mask = SMC_GET_INT_EN((lp)); \ + __mask &= ~(x); \ + SMC_SET_INT_EN((lp), __mask); \ +} while (0) + +/* + * this does a soft reset on the device + */ +static void smc911x_reset(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned int reg, timeout=0, resets=1, irq_cfg; + unsigned long flags; + + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); + + /* Take out of PM setting first */ + if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) { + /* Write to the bytetest will take out of powerdown */ + SMC_SET_BYTE_TEST(lp, 0); + timeout=10; + do { + udelay(10); + reg = SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_; + } while (--timeout && !reg); + if (timeout == 0) { + PRINTK(dev, "smc911x_reset timeout waiting for PM restore\n"); + return; + } + } + + /* Disable all interrupts */ + spin_lock_irqsave(&lp->lock, flags); + SMC_SET_INT_EN(lp, 0); + spin_unlock_irqrestore(&lp->lock, flags); + + while (resets--) { + SMC_SET_HW_CFG(lp, HW_CFG_SRST_); + timeout=10; + do { + udelay(10); + reg = SMC_GET_HW_CFG(lp); + /* If chip indicates reset timeout then try again */ + if (reg & HW_CFG_SRST_TO_) { + PRINTK(dev, "chip reset timeout, retrying...\n"); + resets++; + break; + } + } while (--timeout && (reg & HW_CFG_SRST_)); + } + if (timeout == 0) { + PRINTK(dev, "smc911x_reset timeout waiting for reset\n"); + return; + } + + /* make sure EEPROM has finished loading before setting GPIO_CFG */ + timeout=1000; + while (--timeout && (SMC_GET_E2P_CMD(lp) & E2P_CMD_EPC_BUSY_)) + udelay(10); + + if (timeout == 0){ + PRINTK(dev, "smc911x_reset timeout waiting for EEPROM busy\n"); + return; + } + + /* Initialize interrupts */ + SMC_SET_INT_EN(lp, 0); + SMC_ACK_INT(lp, -1); + + /* Reset the FIFO level and flow control settings */ + SMC_SET_HW_CFG(lp, (lp->tx_fifo_kb & 0xF) << 16); +//TODO: Figure out what appropriate pause time is + SMC_SET_FLOW(lp, FLOW_FCPT_ | FLOW_FCEN_); + SMC_SET_AFC_CFG(lp, lp->afc_cfg); + + + /* Set to LED outputs */ + SMC_SET_GPIO_CFG(lp, 0x70070000); + + /* + * Deassert IRQ for 1*10us for edge type interrupts + * and drive IRQ pin push-pull + */ + irq_cfg = (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_; +#ifdef SMC_DYNAMIC_BUS_CONFIG + if (lp->cfg.irq_polarity) + irq_cfg |= INT_CFG_IRQ_POL_; +#endif + SMC_SET_IRQ_CFG(lp, irq_cfg); + + /* clear anything saved */ + if (lp->pending_tx_skb != NULL) { + dev_kfree_skb (lp->pending_tx_skb); + lp->pending_tx_skb = NULL; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; + } +} + +/* + * Enable Interrupts, Receive, and Transmit + */ +static void smc911x_enable(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned mask, cfg, cr; + unsigned long flags; + + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); + + spin_lock_irqsave(&lp->lock, flags); + + SMC_SET_MAC_ADDR(lp, dev->dev_addr); + + /* Enable TX */ + cfg = SMC_GET_HW_CFG(lp); + cfg &= HW_CFG_TX_FIF_SZ_ | 0xFFF; + cfg |= HW_CFG_SF_; + SMC_SET_HW_CFG(lp, cfg); + SMC_SET_FIFO_TDA(lp, 0xFF); + /* Update TX stats on every 64 packets received or every 1 sec */ + SMC_SET_FIFO_TSL(lp, 64); + SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); + + SMC_GET_MAC_CR(lp, cr); + cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_; + SMC_SET_MAC_CR(lp, cr); + SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_); + + /* Add 2 byte padding to start of packets */ + SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_); + + /* Turn on receiver and enable RX */ + if (cr & MAC_CR_RXEN_) + DBG(SMC_DEBUG_RX, dev, "Receiver already enabled\n"); + + SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_); + + /* Interrupt on every received packet */ + SMC_SET_FIFO_RSA(lp, 0x01); + SMC_SET_FIFO_RSL(lp, 0x00); + + /* now, enable interrupts */ + mask = INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_ | INT_EN_RSFL_EN_ | + INT_EN_GPT_INT_EN_ | INT_EN_RXDFH_INT_EN_ | INT_EN_RXE_EN_ | + INT_EN_PHY_INT_EN_; + if (IS_REV_A(lp->revision)) + mask|=INT_EN_RDFL_EN_; + else { + mask|=INT_EN_RDFO_EN_; + } + SMC_ENABLE_INT(lp, mask); + + spin_unlock_irqrestore(&lp->lock, flags); +} + +/* + * this puts the device in an inactive state + */ +static void smc911x_shutdown(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned cr; + unsigned long flags; + + DBG(SMC_DEBUG_FUNC, dev, "%s: --> %s\n", CARDNAME, __func__); + + /* Disable IRQ's */ + SMC_SET_INT_EN(lp, 0); + + /* Turn of Rx and TX */ + spin_lock_irqsave(&lp->lock, flags); + SMC_GET_MAC_CR(lp, cr); + cr &= ~(MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_); + SMC_SET_MAC_CR(lp, cr); + SMC_SET_TX_CFG(lp, TX_CFG_STOP_TX_); + spin_unlock_irqrestore(&lp->lock, flags); +} + +static inline void smc911x_drop_pkt(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned int fifo_count, timeout, reg; + + DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "%s: --> %s\n", + CARDNAME, __func__); + fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF; + if (fifo_count <= 4) { + /* Manually dump the packet data */ + while (fifo_count--) + SMC_GET_RX_FIFO(lp); + } else { + /* Fast forward through the bad packet */ + SMC_SET_RX_DP_CTRL(lp, RX_DP_CTRL_FFWD_BUSY_); + timeout=50; + do { + udelay(10); + reg = SMC_GET_RX_DP_CTRL(lp) & RX_DP_CTRL_FFWD_BUSY_; + } while (--timeout && reg); + if (timeout == 0) { + PRINTK(dev, "timeout waiting for RX fast forward\n"); + } + } +} + +/* + * This is the procedure to handle the receipt of a packet. + * It should be called after checking for packet presence in + * the RX status FIFO. It must be called with the spin lock + * already held. + */ +static inline void smc911x_rcv(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned int pkt_len, status; + struct sk_buff *skb; + unsigned char *data; + + DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "--> %s\n", + __func__); + status = SMC_GET_RX_STS_FIFO(lp); + DBG(SMC_DEBUG_RX, dev, "Rx pkt len %d status 0x%08x\n", + (status & 0x3fff0000) >> 16, status & 0xc000ffff); + pkt_len = (status & RX_STS_PKT_LEN_) >> 16; + if (status & RX_STS_ES_) { + /* Deal with a bad packet */ + dev->stats.rx_errors++; + if (status & RX_STS_CRC_ERR_) + dev->stats.rx_crc_errors++; + else { + if (status & RX_STS_LEN_ERR_) + dev->stats.rx_length_errors++; + if (status & RX_STS_MCAST_) + dev->stats.multicast++; + } + /* Remove the bad packet data from the RX FIFO */ + smc911x_drop_pkt(dev); + } else { + /* Receive a valid packet */ + /* Alloc a buffer with extra room for DMA alignment */ + skb = netdev_alloc_skb(dev, pkt_len+32); + if (unlikely(skb == NULL)) { + PRINTK(dev, "Low memory, rcvd packet dropped.\n"); + dev->stats.rx_dropped++; + smc911x_drop_pkt(dev); + return; + } + /* Align IP header to 32 bits + * Note that the device is configured to add a 2 + * byte padding to the packet start, so we really + * want to write to the orignal data pointer */ + data = skb->data; + skb_reserve(skb, 2); + skb_put(skb,pkt_len-4); +#ifdef SMC_USE_DMA + { + unsigned int fifo; + /* Lower the FIFO threshold if possible */ + fifo = SMC_GET_FIFO_INT(lp); + if (fifo & 0xFF) fifo--; + DBG(SMC_DEBUG_RX, dev, "Setting RX stat FIFO threshold to %d\n", + fifo & 0xff); + SMC_SET_FIFO_INT(lp, fifo); + /* Setup RX DMA */ + SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_)); + lp->rxdma_active = 1; + lp->current_rx_skb = skb; + SMC_PULL_DATA(lp, data, (pkt_len+2+15) & ~15); + /* Packet processing deferred to DMA RX interrupt */ + } +#else + SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_)); + SMC_PULL_DATA(lp, data, pkt_len+2+3); + + DBG(SMC_DEBUG_PKTS, dev, "Received packet\n"); + PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len-4; +#endif + } +} + +/* + * This is called to actually send a packet to the chip. + */ +static void smc911x_hardware_send_pkt(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + struct sk_buff *skb; + unsigned int cmdA, cmdB, len; + unsigned char *buf; + + DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", __func__); + BUG_ON(lp->pending_tx_skb == NULL); + + skb = lp->pending_tx_skb; + lp->pending_tx_skb = NULL; + + /* cmdA {25:24] data alignment [20:16] start offset [10:0] buffer length */ + /* cmdB {31:16] pkt tag [10:0] length */ +#ifdef SMC_USE_DMA + /* 16 byte buffer alignment mode */ + buf = (char*)((u32)(skb->data) & ~0xF); + len = (skb->len + 0xF + ((u32)skb->data & 0xF)) & ~0xF; + cmdA = (1<<24) | (((u32)skb->data & 0xF)<<16) | + TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ | + skb->len; +#else + buf = (char*)((u32)skb->data & ~0x3); + len = (skb->len + 3 + ((u32)skb->data & 3)) & ~0x3; + cmdA = (((u32)skb->data & 0x3) << 16) | + TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ | + skb->len; +#endif + /* tag is packet length so we can use this in stats update later */ + cmdB = (skb->len << 16) | (skb->len & 0x7FF); + + DBG(SMC_DEBUG_TX, dev, "TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n", + len, len, buf, cmdA, cmdB); + SMC_SET_TX_FIFO(lp, cmdA); + SMC_SET_TX_FIFO(lp, cmdB); + + DBG(SMC_DEBUG_PKTS, dev, "Transmitted packet\n"); + PRINT_PKT(buf, len <= 64 ? len : 64); + + /* Send pkt via PIO or DMA */ +#ifdef SMC_USE_DMA + lp->current_tx_skb = skb; + SMC_PUSH_DATA(lp, buf, len); + /* DMA complete IRQ will free buffer and set jiffies */ +#else + SMC_PUSH_DATA(lp, buf, len); + dev->trans_start = jiffies; + dev_kfree_skb_irq(skb); +#endif + if (!lp->tx_throttle) { + netif_wake_queue(dev); + } + SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_); +} + +/* + * Since I am not sure if I will have enough room in the chip's ram + * to store the packet, I call this routine which either sends it + * now, or set the card to generates an interrupt when ready + * for the packet. + */ +static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned int free; + unsigned long flags; + + DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", + __func__); + + spin_lock_irqsave(&lp->lock, flags); + + BUG_ON(lp->pending_tx_skb != NULL); + + free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_; + DBG(SMC_DEBUG_TX, dev, "TX free space %d\n", free); + + /* Turn off the flow when running out of space in FIFO */ + if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) { + DBG(SMC_DEBUG_TX, dev, "Disabling data flow due to low FIFO space (%d)\n", + free); + /* Reenable when at least 1 packet of size MTU present */ + SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64); + lp->tx_throttle = 1; + netif_stop_queue(dev); + } + + /* Drop packets when we run out of space in TX FIFO + * Account for overhead required for: + * + * Tx command words 8 bytes + * Start offset 15 bytes + * End padding 15 bytes + */ + if (unlikely(free < (skb->len + 8 + 15 + 15))) { + netdev_warn(dev, "No Tx free space %d < %d\n", + free, skb->len); + lp->pending_tx_skb = NULL; + dev->stats.tx_errors++; + dev->stats.tx_dropped++; + spin_unlock_irqrestore(&lp->lock, flags); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + +#ifdef SMC_USE_DMA + { + /* If the DMA is already running then defer this packet Tx until + * the DMA IRQ starts it + */ + if (lp->txdma_active) { + DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Tx DMA running, deferring packet\n"); + lp->pending_tx_skb = skb; + netif_stop_queue(dev); + spin_unlock_irqrestore(&lp->lock, flags); + return NETDEV_TX_OK; + } else { + DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Activating Tx DMA\n"); + lp->txdma_active = 1; + } + } +#endif + lp->pending_tx_skb = skb; + smc911x_hardware_send_pkt(dev); + spin_unlock_irqrestore(&lp->lock, flags); + + return NETDEV_TX_OK; +} + +/* + * This handles a TX status interrupt, which is only called when: + * - a TX error occurred, or + * - TX of a packet completed. + */ +static void smc911x_tx(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned int tx_status; + + DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", + __func__); + + /* Collect the TX status */ + while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) { + DBG(SMC_DEBUG_TX, dev, "Tx stat FIFO used 0x%04x\n", + (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16); + tx_status = SMC_GET_TX_STS_FIFO(lp); + dev->stats.tx_packets++; + dev->stats.tx_bytes+=tx_status>>16; + DBG(SMC_DEBUG_TX, dev, "Tx FIFO tag 0x%04x status 0x%04x\n", + (tx_status & 0xffff0000) >> 16, + tx_status & 0x0000ffff); + /* count Tx errors, but ignore lost carrier errors when in + * full-duplex mode */ + if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx && + !(tx_status & 0x00000306))) { + dev->stats.tx_errors++; + } + if (tx_status & TX_STS_MANY_COLL_) { + dev->stats.collisions+=16; + dev->stats.tx_aborted_errors++; + } else { + dev->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3; + } + /* carrier error only has meaning for half-duplex communication */ + if ((tx_status & (TX_STS_LOC_ | TX_STS_NO_CARR_)) && + !lp->ctl_rfduplx) { + dev->stats.tx_carrier_errors++; + } + if (tx_status & TX_STS_LATE_COLL_) { + dev->stats.collisions++; + dev->stats.tx_aborted_errors++; + } + } +} + + +/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/ +/* + * Reads a register from the MII Management serial interface + */ + +static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned int phydata; + + SMC_GET_MII(lp, phyreg, phyaddr, phydata); + + DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n", + __func__, phyaddr, phyreg, phydata); + return phydata; +} + + +/* + * Writes a register to the MII Management serial interface + */ +static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg, + int phydata) +{ + struct smc911x_local *lp = netdev_priv(dev); + + DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", + __func__, phyaddr, phyreg, phydata); + + SMC_SET_MII(lp, phyreg, phyaddr, phydata); +} + +/* + * Finds and reports the PHY address (115 and 117 have external + * PHY interface 118 has internal only + */ +static void smc911x_phy_detect(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + int phyaddr; + unsigned int cfg, id1, id2; + + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); + + lp->phy_type = 0; + + /* + * Scan all 32 PHY addresses if necessary, starting at + * PHY#1 to PHY#31, and then PHY#0 last. + */ + switch(lp->version) { + case CHIP_9115: + case CHIP_9117: + case CHIP_9215: + case CHIP_9217: + cfg = SMC_GET_HW_CFG(lp); + if (cfg & HW_CFG_EXT_PHY_DET_) { + cfg &= ~HW_CFG_PHY_CLK_SEL_; + cfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_; + SMC_SET_HW_CFG(lp, cfg); + udelay(10); /* Wait for clocks to stop */ + + cfg |= HW_CFG_EXT_PHY_EN_; + SMC_SET_HW_CFG(lp, cfg); + udelay(10); /* Wait for clocks to stop */ + + cfg &= ~HW_CFG_PHY_CLK_SEL_; + cfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_; + SMC_SET_HW_CFG(lp, cfg); + udelay(10); /* Wait for clocks to stop */ + + cfg |= HW_CFG_SMI_SEL_; + SMC_SET_HW_CFG(lp, cfg); + + for (phyaddr = 1; phyaddr < 32; ++phyaddr) { + + /* Read the PHY identifiers */ + SMC_GET_PHY_ID1(lp, phyaddr & 31, id1); + SMC_GET_PHY_ID2(lp, phyaddr & 31, id2); + + /* Make sure it is a valid identifier */ + if (id1 != 0x0000 && id1 != 0xffff && + id1 != 0x8000 && id2 != 0x0000 && + id2 != 0xffff && id2 != 0x8000) { + /* Save the PHY's address */ + lp->mii.phy_id = phyaddr & 31; + lp->phy_type = id1 << 16 | id2; + break; + } + } + if (phyaddr < 32) + /* Found an external PHY */ + break; + } + default: + /* Internal media only */ + SMC_GET_PHY_ID1(lp, 1, id1); + SMC_GET_PHY_ID2(lp, 1, id2); + /* Save the PHY's address */ + lp->mii.phy_id = 1; + lp->phy_type = id1 << 16 | id2; + } + + DBG(SMC_DEBUG_MISC, dev, "phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%x\n", + id1, id2, lp->mii.phy_id); +} + +/* + * Sets the PHY to a configuration as determined by the user. + * Called with spin_lock held. + */ +static int smc911x_phy_fixed(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + int phyaddr = lp->mii.phy_id; + int bmcr; + + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); + + /* Enter Link Disable state */ + SMC_GET_PHY_BMCR(lp, phyaddr, bmcr); + bmcr |= BMCR_PDOWN; + SMC_SET_PHY_BMCR(lp, phyaddr, bmcr); + + /* + * Set our fixed capabilities + * Disable auto-negotiation + */ + bmcr &= ~BMCR_ANENABLE; + if (lp->ctl_rfduplx) + bmcr |= BMCR_FULLDPLX; + + if (lp->ctl_rspeed == 100) + bmcr |= BMCR_SPEED100; + + /* Write our capabilities to the phy control register */ + SMC_SET_PHY_BMCR(lp, phyaddr, bmcr); + + /* Re-Configure the Receive/Phy Control register */ + bmcr &= ~BMCR_PDOWN; + SMC_SET_PHY_BMCR(lp, phyaddr, bmcr); + + return 1; +} + +/** + * smc911x_phy_reset - reset the phy + * @dev: net device + * @phy: phy address + * + * Issue a software reset for the specified PHY and + * wait up to 100ms for the reset to complete. We should + * not access the PHY for 50ms after issuing the reset. + * + * The time to wait appears to be dependent on the PHY. + * + */ +static int smc911x_phy_reset(struct net_device *dev, int phy) +{ + struct smc911x_local *lp = netdev_priv(dev); + int timeout; + unsigned long flags; + unsigned int reg; + + DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__); + + spin_lock_irqsave(&lp->lock, flags); + reg = SMC_GET_PMT_CTRL(lp); + reg &= ~0xfffff030; + reg |= PMT_CTRL_PHY_RST_; + SMC_SET_PMT_CTRL(lp, reg); + spin_unlock_irqrestore(&lp->lock, flags); + for (timeout = 2; timeout; timeout--) { + msleep(50); + spin_lock_irqsave(&lp->lock, flags); + reg = SMC_GET_PMT_CTRL(lp); + spin_unlock_irqrestore(&lp->lock, flags); + if (!(reg & PMT_CTRL_PHY_RST_)) { + /* extra delay required because the phy may + * not be completed with its reset + * when PHY_BCR_RESET_ is cleared. 256us + * should suffice, but use 500us to be safe + */ + udelay(500); + break; + } + } + + return reg & PMT_CTRL_PHY_RST_; +} + +/** + * smc911x_phy_powerdown - powerdown phy + * @dev: net device + * @phy: phy address + * + * Power down the specified PHY + */ +static void smc911x_phy_powerdown(struct net_device *dev, int phy) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned int bmcr; + + /* Enter Link Disable state */ + SMC_GET_PHY_BMCR(lp, phy, bmcr); + bmcr |= BMCR_PDOWN; + SMC_SET_PHY_BMCR(lp, phy, bmcr); +} + +/** + * smc911x_phy_check_media - check the media status and adjust BMCR + * @dev: net device + * @init: set true for initialisation + * + * Select duplex mode depending on negotiation state. This + * also updates our carrier state. + */ +static void smc911x_phy_check_media(struct net_device *dev, int init) +{ + struct smc911x_local *lp = netdev_priv(dev); + int phyaddr = lp->mii.phy_id; + unsigned int bmcr, cr; + + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); + + if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) { + /* duplex state has changed */ + SMC_GET_PHY_BMCR(lp, phyaddr, bmcr); + SMC_GET_MAC_CR(lp, cr); + if (lp->mii.full_duplex) { + DBG(SMC_DEBUG_MISC, dev, "Configuring for full-duplex mode\n"); + bmcr |= BMCR_FULLDPLX; + cr |= MAC_CR_RCVOWN_; + } else { + DBG(SMC_DEBUG_MISC, dev, "Configuring for half-duplex mode\n"); + bmcr &= ~BMCR_FULLDPLX; + cr &= ~MAC_CR_RCVOWN_; + } + SMC_SET_PHY_BMCR(lp, phyaddr, bmcr); + SMC_SET_MAC_CR(lp, cr); + } +} + +/* + * Configures the specified PHY through the MII management interface + * using Autonegotiation. + * Calls smc911x_phy_fixed() if the user has requested a certain config. + * If RPC ANEG bit is set, the media selection is dependent purely on + * the selection by the MII (either in the MII BMCR reg or the result + * of autonegotiation.) If the RPC ANEG bit is cleared, the selection + * is controlled by the RPC SPEED and RPC DPLX bits. + */ +static void smc911x_phy_configure(struct work_struct *work) +{ + struct smc911x_local *lp = container_of(work, struct smc911x_local, + phy_configure); + struct net_device *dev = lp->netdev; + int phyaddr = lp->mii.phy_id; + int my_phy_caps; /* My PHY capabilities */ + int my_ad_caps; /* My Advertised capabilities */ + int status; + unsigned long flags; + + DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__); + + /* + * We should not be called if phy_type is zero. + */ + if (lp->phy_type == 0) + return; + + if (smc911x_phy_reset(dev, phyaddr)) { + netdev_info(dev, "PHY reset timed out\n"); + return; + } + spin_lock_irqsave(&lp->lock, flags); + + /* + * Enable PHY Interrupts (for register 18) + * Interrupts listed here are enabled + */ + SMC_SET_PHY_INT_MASK(lp, phyaddr, PHY_INT_MASK_ENERGY_ON_ | + PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_REMOTE_FAULT_ | + PHY_INT_MASK_LINK_DOWN_); + + /* If the user requested no auto neg, then go set his request */ + if (lp->mii.force_media) { + smc911x_phy_fixed(dev); + goto smc911x_phy_configure_exit; + } + + /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */ + SMC_GET_PHY_BMSR(lp, phyaddr, my_phy_caps); + if (!(my_phy_caps & BMSR_ANEGCAPABLE)) { + netdev_info(dev, "Auto negotiation NOT supported\n"); + smc911x_phy_fixed(dev); + goto smc911x_phy_configure_exit; + } + + /* CSMA capable w/ both pauses */ + my_ad_caps = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + + if (my_phy_caps & BMSR_100BASE4) + my_ad_caps |= ADVERTISE_100BASE4; + if (my_phy_caps & BMSR_100FULL) + my_ad_caps |= ADVERTISE_100FULL; + if (my_phy_caps & BMSR_100HALF) + my_ad_caps |= ADVERTISE_100HALF; + if (my_phy_caps & BMSR_10FULL) + my_ad_caps |= ADVERTISE_10FULL; + if (my_phy_caps & BMSR_10HALF) + my_ad_caps |= ADVERTISE_10HALF; + + /* Disable capabilities not selected by our user */ + if (lp->ctl_rspeed != 100) + my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF); + + if (!lp->ctl_rfduplx) + my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL); + + /* Update our Auto-Neg Advertisement Register */ + SMC_SET_PHY_MII_ADV(lp, phyaddr, my_ad_caps); + lp->mii.advertising = my_ad_caps; + + /* + * Read the register back. Without this, it appears that when + * auto-negotiation is restarted, sometimes it isn't ready and + * the link does not come up. + */ + udelay(10); + SMC_GET_PHY_MII_ADV(lp, phyaddr, status); + + DBG(SMC_DEBUG_MISC, dev, "phy caps=0x%04x\n", my_phy_caps); + DBG(SMC_DEBUG_MISC, dev, "phy advertised caps=0x%04x\n", my_ad_caps); + + /* Restart auto-negotiation process in order to advertise my caps */ + SMC_SET_PHY_BMCR(lp, phyaddr, BMCR_ANENABLE | BMCR_ANRESTART); + + smc911x_phy_check_media(dev, 1); + +smc911x_phy_configure_exit: + spin_unlock_irqrestore(&lp->lock, flags); +} + +/* + * smc911x_phy_interrupt + * + * Purpose: Handle interrupts relating to PHY register 18. This is + * called from the "hard" interrupt handler under our private spinlock. + */ +static void smc911x_phy_interrupt(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + int phyaddr = lp->mii.phy_id; + int status; + + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); + + if (lp->phy_type == 0) + return; + + smc911x_phy_check_media(dev, 0); + /* read to clear status bits */ + SMC_GET_PHY_INT_SRC(lp, phyaddr,status); + DBG(SMC_DEBUG_MISC, dev, "PHY interrupt status 0x%04x\n", + status & 0xffff); + DBG(SMC_DEBUG_MISC, dev, "AFC_CFG 0x%08x\n", + SMC_GET_AFC_CFG(lp)); +} + +/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/ + +/* + * This is the main routine of the driver, to handle the device when + * it needs some attention. + */ +static irqreturn_t smc911x_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct smc911x_local *lp = netdev_priv(dev); + unsigned int status, mask, timeout; + unsigned int rx_overrun=0, cr, pkts; + unsigned long flags; + + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); + + spin_lock_irqsave(&lp->lock, flags); + + /* Spurious interrupt check */ + if ((SMC_GET_IRQ_CFG(lp) & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) != + (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) { + spin_unlock_irqrestore(&lp->lock, flags); + return IRQ_NONE; + } + + mask = SMC_GET_INT_EN(lp); + SMC_SET_INT_EN(lp, 0); + + /* set a timeout value, so I don't stay here forever */ + timeout = 8; + + + do { + status = SMC_GET_INT(lp); + + DBG(SMC_DEBUG_MISC, dev, "INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n", + status, mask, status & ~mask); + + status &= mask; + if (!status) + break; + + /* Handle SW interrupt condition */ + if (status & INT_STS_SW_INT_) { + SMC_ACK_INT(lp, INT_STS_SW_INT_); + mask &= ~INT_EN_SW_INT_EN_; + } + /* Handle various error conditions */ + if (status & INT_STS_RXE_) { + SMC_ACK_INT(lp, INT_STS_RXE_); + dev->stats.rx_errors++; + } + if (status & INT_STS_RXDFH_INT_) { + SMC_ACK_INT(lp, INT_STS_RXDFH_INT_); + dev->stats.rx_dropped+=SMC_GET_RX_DROP(lp); + } + /* Undocumented interrupt-what is the right thing to do here? */ + if (status & INT_STS_RXDF_INT_) { + SMC_ACK_INT(lp, INT_STS_RXDF_INT_); + } + + /* Rx Data FIFO exceeds set level */ + if (status & INT_STS_RDFL_) { + if (IS_REV_A(lp->revision)) { + rx_overrun=1; + SMC_GET_MAC_CR(lp, cr); + cr &= ~MAC_CR_RXEN_; + SMC_SET_MAC_CR(lp, cr); + DBG(SMC_DEBUG_RX, dev, "RX overrun\n"); + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; + } + SMC_ACK_INT(lp, INT_STS_RDFL_); + } + if (status & INT_STS_RDFO_) { + if (!IS_REV_A(lp->revision)) { + SMC_GET_MAC_CR(lp, cr); + cr &= ~MAC_CR_RXEN_; + SMC_SET_MAC_CR(lp, cr); + rx_overrun=1; + DBG(SMC_DEBUG_RX, dev, "RX overrun\n"); + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; + } + SMC_ACK_INT(lp, INT_STS_RDFO_); + } + /* Handle receive condition */ + if ((status & INT_STS_RSFL_) || rx_overrun) { + unsigned int fifo; + DBG(SMC_DEBUG_RX, dev, "RX irq\n"); + fifo = SMC_GET_RX_FIFO_INF(lp); + pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16; + DBG(SMC_DEBUG_RX, dev, "Rx FIFO pkts %d, bytes %d\n", + pkts, fifo & 0xFFFF); + if (pkts != 0) { +#ifdef SMC_USE_DMA + unsigned int fifo; + if (lp->rxdma_active){ + DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, + "RX DMA active\n"); + /* The DMA is already running so up the IRQ threshold */ + fifo = SMC_GET_FIFO_INT(lp) & ~0xFF; + fifo |= pkts & 0xFF; + DBG(SMC_DEBUG_RX, dev, + "Setting RX stat FIFO threshold to %d\n", + fifo & 0xff); + SMC_SET_FIFO_INT(lp, fifo); + } else +#endif + smc911x_rcv(dev); + } + SMC_ACK_INT(lp, INT_STS_RSFL_); + } + /* Handle transmit FIFO available */ + if (status & INT_STS_TDFA_) { + DBG(SMC_DEBUG_TX, dev, "TX data FIFO space available irq\n"); + SMC_SET_FIFO_TDA(lp, 0xFF); + lp->tx_throttle = 0; +#ifdef SMC_USE_DMA + if (!lp->txdma_active) +#endif + netif_wake_queue(dev); + SMC_ACK_INT(lp, INT_STS_TDFA_); + } + /* Handle transmit done condition */ +#if 1 + if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) { + DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC, dev, + "Tx stat FIFO limit (%d) /GPT irq\n", + (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16); + smc911x_tx(dev); + SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); + SMC_ACK_INT(lp, INT_STS_TSFL_); + SMC_ACK_INT(lp, INT_STS_TSFL_ | INT_STS_GPT_INT_); + } +#else + if (status & INT_STS_TSFL_) { + DBG(SMC_DEBUG_TX, dev, "TX status FIFO limit (%d) irq\n", ?); + smc911x_tx(dev); + SMC_ACK_INT(lp, INT_STS_TSFL_); + } + + if (status & INT_STS_GPT_INT_) { + DBG(SMC_DEBUG_RX, dev, "IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n", + SMC_GET_IRQ_CFG(lp), + SMC_GET_FIFO_INT(lp), + SMC_GET_RX_CFG(lp)); + DBG(SMC_DEBUG_RX, dev, "Rx Stat FIFO Used 0x%02x Data FIFO Used 0x%04x Stat FIFO 0x%08x\n", + (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16, + SMC_GET_RX_FIFO_INF(lp) & 0xffff, + SMC_GET_RX_STS_FIFO_PEEK(lp)); + SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000); + SMC_ACK_INT(lp, INT_STS_GPT_INT_); + } +#endif + + /* Handle PHY interrupt condition */ + if (status & INT_STS_PHY_INT_) { + DBG(SMC_DEBUG_MISC, dev, "PHY irq\n"); + smc911x_phy_interrupt(dev); + SMC_ACK_INT(lp, INT_STS_PHY_INT_); + } + } while (--timeout); + + /* restore mask state */ + SMC_SET_INT_EN(lp, mask); + + DBG(SMC_DEBUG_MISC, dev, "Interrupt done (%d loops)\n", + 8-timeout); + + spin_unlock_irqrestore(&lp->lock, flags); + + return IRQ_HANDLED; +} + +#ifdef SMC_USE_DMA +static void +smc911x_tx_dma_irq(int dma, void *data) +{ + struct net_device *dev = (struct net_device *)data; + struct smc911x_local *lp = netdev_priv(dev); + struct sk_buff *skb = lp->current_tx_skb; + unsigned long flags; + + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); + + DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n"); + /* Clear the DMA interrupt sources */ + SMC_DMA_ACK_IRQ(dev, dma); + BUG_ON(skb == NULL); + dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE); + dev->trans_start = jiffies; + dev_kfree_skb_irq(skb); + lp->current_tx_skb = NULL; + if (lp->pending_tx_skb != NULL) + smc911x_hardware_send_pkt(dev); + else { + DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, + "No pending Tx packets. DMA disabled\n"); + spin_lock_irqsave(&lp->lock, flags); + lp->txdma_active = 0; + if (!lp->tx_throttle) { + netif_wake_queue(dev); + } + spin_unlock_irqrestore(&lp->lock, flags); + } + + DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, + "TX DMA irq completed\n"); +} +static void +smc911x_rx_dma_irq(int dma, void *data) +{ + struct net_device *dev = (struct net_device *)data; + struct smc911x_local *lp = netdev_priv(dev); + struct sk_buff *skb = lp->current_rx_skb; + unsigned long flags; + unsigned int pkts; + + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); + DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n"); + /* Clear the DMA interrupt sources */ + SMC_DMA_ACK_IRQ(dev, dma); + dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE); + BUG_ON(skb == NULL); + lp->current_rx_skb = NULL; + PRINT_PKT(skb->data, skb->len); + skb->protocol = eth_type_trans(skb, dev); + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + netif_rx(skb); + + spin_lock_irqsave(&lp->lock, flags); + pkts = (SMC_GET_RX_FIFO_INF(lp) & RX_FIFO_INF_RXSUSED_) >> 16; + if (pkts != 0) { + smc911x_rcv(dev); + }else { + lp->rxdma_active = 0; + } + spin_unlock_irqrestore(&lp->lock, flags); + DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, + "RX DMA irq completed. DMA RX FIFO PKTS %d\n", + pkts); +} +#endif /* SMC_USE_DMA */ + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling receive - used by netconsole and other diagnostic tools + * to allow network i/o with interrupts disabled. + */ +static void smc911x_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + smc911x_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +/* Our watchdog timed out. Called by the networking layer */ +static void smc911x_timeout(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + int status, mask; + unsigned long flags; + + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); + + spin_lock_irqsave(&lp->lock, flags); + status = SMC_GET_INT(lp); + mask = SMC_GET_INT_EN(lp); + spin_unlock_irqrestore(&lp->lock, flags); + DBG(SMC_DEBUG_MISC, dev, "INT 0x%02x MASK 0x%02x\n", + status, mask); + + /* Dump the current TX FIFO contents and restart */ + mask = SMC_GET_TX_CFG(lp); + SMC_SET_TX_CFG(lp, mask | TX_CFG_TXS_DUMP_ | TX_CFG_TXD_DUMP_); + /* + * Reconfiguring the PHY doesn't seem like a bad idea here, but + * smc911x_phy_configure() calls msleep() which calls schedule_timeout() + * which calls schedule(). Hence we use a work queue. + */ + if (lp->phy_type != 0) + schedule_work(&lp->phy_configure); + + /* We can accept TX packets again */ + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); +} + +/* + * This routine will, depending on the values passed to it, + * either make it accept multicast packets, go into + * promiscuous mode (for TCPDUMP and cousins) or accept + * a select set of multicast packets + */ +static void smc911x_set_multicast_list(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned int multicast_table[2]; + unsigned int mcr, update_multicast = 0; + unsigned long flags; + + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); + + spin_lock_irqsave(&lp->lock, flags); + SMC_GET_MAC_CR(lp, mcr); + spin_unlock_irqrestore(&lp->lock, flags); + + if (dev->flags & IFF_PROMISC) { + + DBG(SMC_DEBUG_MISC, dev, "RCR_PRMS\n"); + mcr |= MAC_CR_PRMS_; + } + /* + * Here, I am setting this to accept all multicast packets. + * I don't need to zero the multicast table, because the flag is + * checked before the table is + */ + else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) { + DBG(SMC_DEBUG_MISC, dev, "RCR_ALMUL\n"); + mcr |= MAC_CR_MCPAS_; + } + + /* + * This sets the internal hardware table to filter out unwanted + * multicast packets before they take up memory. + * + * The SMC chip uses a hash table where the high 6 bits of the CRC of + * address are the offset into the table. If that bit is 1, then the + * multicast packet is accepted. Otherwise, it's dropped silently. + * + * To use the 6 bits as an offset into the table, the high 1 bit is + * the number of the 32 bit register, while the low 5 bits are the bit + * within that register. + */ + else if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + + /* Set the Hash perfec mode */ + mcr |= MAC_CR_HPFILT_; + + /* start with a table of all zeros: reject all */ + memset(multicast_table, 0, sizeof(multicast_table)); + + netdev_for_each_mc_addr(ha, dev) { + u32 position; + + /* upper 6 bits are used as hash index */ + position = ether_crc(ETH_ALEN, ha->addr)>>26; + + multicast_table[position>>5] |= 1 << (position&0x1f); + } + + /* be sure I get rid of flags I might have set */ + mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_); + + /* now, the table can be loaded into the chipset */ + update_multicast = 1; + } else { + DBG(SMC_DEBUG_MISC, dev, "~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n"); + mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_); + + /* + * since I'm disabling all multicast entirely, I need to + * clear the multicast list + */ + memset(multicast_table, 0, sizeof(multicast_table)); + update_multicast = 1; + } + + spin_lock_irqsave(&lp->lock, flags); + SMC_SET_MAC_CR(lp, mcr); + if (update_multicast) { + DBG(SMC_DEBUG_MISC, dev, + "update mcast hash table 0x%08x 0x%08x\n", + multicast_table[0], multicast_table[1]); + SMC_SET_HASHL(lp, multicast_table[0]); + SMC_SET_HASHH(lp, multicast_table[1]); + } + spin_unlock_irqrestore(&lp->lock, flags); +} + + +/* + * Open and Initialize the board + * + * Set up everything, reset the card, etc.. + */ +static int +smc911x_open(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); + + /* reset the hardware */ + smc911x_reset(dev); + + /* Configure the PHY, initialize the link state */ + smc911x_phy_configure(&lp->phy_configure); + + /* Turn on Tx + Rx */ + smc911x_enable(dev); + + netif_start_queue(dev); + + return 0; +} + +/* + * smc911x_close + * + * this makes the board clean up everything that it can + * and not talk to the outside world. Caused by + * an 'ifconfig ethX down' + */ +static int smc911x_close(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); + + netif_stop_queue(dev); + netif_carrier_off(dev); + + /* clear everything */ + smc911x_shutdown(dev); + + if (lp->phy_type != 0) { + /* We need to ensure that no calls to + * smc911x_phy_configure are pending. + */ + cancel_work_sync(&lp->phy_configure); + smc911x_phy_powerdown(dev, lp->mii.phy_id); + } + + if (lp->pending_tx_skb) { + dev_kfree_skb(lp->pending_tx_skb); + lp->pending_tx_skb = NULL; + } + + return 0; +} + +/* + * Ethtool support + */ +static int +smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct smc911x_local *lp = netdev_priv(dev); + int ret, status; + unsigned long flags; + + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); + cmd->maxtxpkt = 1; + cmd->maxrxpkt = 1; + + if (lp->phy_type != 0) { + spin_lock_irqsave(&lp->lock, flags); + ret = mii_ethtool_gset(&lp->mii, cmd); + spin_unlock_irqrestore(&lp->lock, flags); + } else { + cmd->supported = SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_TP | SUPPORTED_AUI; + + if (lp->ctl_rspeed == 10) + ethtool_cmd_speed_set(cmd, SPEED_10); + else if (lp->ctl_rspeed == 100) + ethtool_cmd_speed_set(cmd, SPEED_100); + + cmd->autoneg = AUTONEG_DISABLE; + if (lp->mii.phy_id==1) + cmd->transceiver = XCVR_INTERNAL; + else + cmd->transceiver = XCVR_EXTERNAL; + cmd->port = 0; + SMC_GET_PHY_SPECIAL(lp, lp->mii.phy_id, status); + cmd->duplex = + (status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ? + DUPLEX_FULL : DUPLEX_HALF; + ret = 0; + } + + return ret; +} + +static int +smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct smc911x_local *lp = netdev_priv(dev); + int ret; + unsigned long flags; + + if (lp->phy_type != 0) { + spin_lock_irqsave(&lp->lock, flags); + ret = mii_ethtool_sset(&lp->mii, cmd); + spin_unlock_irqrestore(&lp->lock, flags); + } else { + if (cmd->autoneg != AUTONEG_DISABLE || + cmd->speed != SPEED_10 || + (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) || + (cmd->port != PORT_TP && cmd->port != PORT_AUI)) + return -EINVAL; + + lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL; + + ret = 0; + } + + return ret; +} + +static void +smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, CARDNAME, sizeof(info->driver)); + strlcpy(info->version, version, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(dev->dev.parent), + sizeof(info->bus_info)); +} + +static int smc911x_ethtool_nwayreset(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + int ret = -EINVAL; + unsigned long flags; + + if (lp->phy_type != 0) { + spin_lock_irqsave(&lp->lock, flags); + ret = mii_nway_restart(&lp->mii); + spin_unlock_irqrestore(&lp->lock, flags); + } + + return ret; +} + +static u32 smc911x_ethtool_getmsglevel(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + return lp->msg_enable; +} + +static void smc911x_ethtool_setmsglevel(struct net_device *dev, u32 level) +{ + struct smc911x_local *lp = netdev_priv(dev); + lp->msg_enable = level; +} + +static int smc911x_ethtool_getregslen(struct net_device *dev) +{ + /* System regs + MAC regs + PHY regs */ + return (((E2P_CMD - ID_REV)/4 + 1) + + (WUCSR - MAC_CR)+1 + 32) * sizeof(u32); +} + +static void smc911x_ethtool_getregs(struct net_device *dev, + struct ethtool_regs* regs, void *buf) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned long flags; + u32 reg,i,j=0; + u32 *data = (u32*)buf; + + regs->version = lp->version; + for(i=ID_REV;i<=E2P_CMD;i+=4) { + data[j++] = SMC_inl(lp, i); + } + for(i=MAC_CR;i<=WUCSR;i++) { + spin_lock_irqsave(&lp->lock, flags); + SMC_GET_MAC_CSR(lp, i, reg); + spin_unlock_irqrestore(&lp->lock, flags); + data[j++] = reg; + } + for(i=0;i<=31;i++) { + spin_lock_irqsave(&lp->lock, flags); + SMC_GET_MII(lp, i, lp->mii.phy_id, reg); + spin_unlock_irqrestore(&lp->lock, flags); + data[j++] = reg & 0xFFFF; + } +} + +static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + unsigned int timeout; + int e2p_cmd; + + e2p_cmd = SMC_GET_E2P_CMD(lp); + for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) { + if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) { + PRINTK(dev, "%s timeout waiting for EEPROM to respond\n", + __func__); + return -EFAULT; + } + mdelay(1); + e2p_cmd = SMC_GET_E2P_CMD(lp); + } + if (timeout == 0) { + PRINTK(dev, "%s timeout waiting for EEPROM CMD not busy\n", + __func__); + return -ETIMEDOUT; + } + return 0; +} + +static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev, + int cmd, int addr) +{ + struct smc911x_local *lp = netdev_priv(dev); + int ret; + + if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) + return ret; + SMC_SET_E2P_CMD(lp, E2P_CMD_EPC_BUSY_ | + ((cmd) & (0x7<<28)) | + ((addr) & 0xFF)); + return 0; +} + +static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev, + u8 *data) +{ + struct smc911x_local *lp = netdev_priv(dev); + int ret; + + if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) + return ret; + *data = SMC_GET_E2P_DATA(lp); + return 0; +} + +static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev, + u8 data) +{ + struct smc911x_local *lp = netdev_priv(dev); + int ret; + + if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0) + return ret; + SMC_SET_E2P_DATA(lp, data); + return 0; +} + +static int smc911x_ethtool_geteeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + u8 eebuf[SMC911X_EEPROM_LEN]; + int i, ret; + + for(i=0;ioffset, eeprom->len); + return 0; +} + +static int smc911x_ethtool_seteeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + int i, ret; + + /* Enable erase */ + if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_EWEN_, 0 ))!=0) + return ret; + for(i=eeprom->offset;i<(eeprom->offset+eeprom->len);i++) { + /* erase byte */ + if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_ERASE_, i ))!=0) + return ret; + /* write byte */ + if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0) + return ret; + if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0) + return ret; + } + return 0; +} + +static int smc911x_ethtool_geteeprom_len(struct net_device *dev) +{ + return SMC911X_EEPROM_LEN; +} + +static const struct ethtool_ops smc911x_ethtool_ops = { + .get_settings = smc911x_ethtool_getsettings, + .set_settings = smc911x_ethtool_setsettings, + .get_drvinfo = smc911x_ethtool_getdrvinfo, + .get_msglevel = smc911x_ethtool_getmsglevel, + .set_msglevel = smc911x_ethtool_setmsglevel, + .nway_reset = smc911x_ethtool_nwayreset, + .get_link = ethtool_op_get_link, + .get_regs_len = smc911x_ethtool_getregslen, + .get_regs = smc911x_ethtool_getregs, + .get_eeprom_len = smc911x_ethtool_geteeprom_len, + .get_eeprom = smc911x_ethtool_geteeprom, + .set_eeprom = smc911x_ethtool_seteeprom, +}; + +/* + * smc911x_findirq + * + * This routine has a simple purpose -- make the SMC chip generate an + * interrupt, so an auto-detect routine can detect it, and find the IRQ, + */ +static int smc911x_findirq(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + int timeout = 20; + unsigned long cookie; + + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); + + cookie = probe_irq_on(); + + /* + * Force a SW interrupt + */ + + SMC_SET_INT_EN(lp, INT_EN_SW_INT_EN_); + + /* + * Wait until positive that the interrupt has been generated + */ + do { + int int_status; + udelay(10); + int_status = SMC_GET_INT_EN(lp); + if (int_status & INT_EN_SW_INT_EN_) + break; /* got the interrupt */ + } while (--timeout); + + /* + * there is really nothing that I can do here if timeout fails, + * as autoirq_report will return a 0 anyway, which is what I + * want in this case. Plus, the clean up is needed in both + * cases. + */ + + /* and disable all interrupts again */ + SMC_SET_INT_EN(lp, 0); + + /* and return what I found */ + return probe_irq_off(cookie); +} + +static const struct net_device_ops smc911x_netdev_ops = { + .ndo_open = smc911x_open, + .ndo_stop = smc911x_close, + .ndo_start_xmit = smc911x_hard_start_xmit, + .ndo_tx_timeout = smc911x_timeout, + .ndo_set_rx_mode = smc911x_set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = smc911x_poll_controller, +#endif +}; + +/* + * Function: smc911x_probe(unsigned long ioaddr) + * + * Purpose: + * Tests to see if a given ioaddr points to an SMC911x chip. + * Returns a 0 on success + * + * Algorithm: + * (1) see if the endian word is OK + * (1) see if I recognize the chip ID in the appropriate register + * + * Here I do typical initialization tasks. + * + * o Initialize the structure if needed + * o print out my vanity message if not done so already + * o print out what type of hardware is detected + * o print out the ethernet address + * o find the IRQ + * o set up my private data + * o configure the dev structure with my subroutines + * o actually GRAB the irq. + * o GRAB the region + */ +static int smc911x_probe(struct net_device *dev) +{ + struct smc911x_local *lp = netdev_priv(dev); + int i, retval; + unsigned int val, chip_id, revision; + const char *version_string; + unsigned long irq_flags; + + DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); + + /* First, see if the endian word is recognized */ + val = SMC_GET_BYTE_TEST(lp); + DBG(SMC_DEBUG_MISC, dev, "%s: endian probe returned 0x%04x\n", + CARDNAME, val); + if (val != 0x87654321) { + netdev_err(dev, "Invalid chip endian 0x%08x\n", val); + retval = -ENODEV; + goto err_out; + } + + /* + * check if the revision register is something that I + * recognize. These might need to be added to later, + * as future revisions could be added. + */ + chip_id = SMC_GET_PN(lp); + DBG(SMC_DEBUG_MISC, dev, "%s: id probe returned 0x%04x\n", + CARDNAME, chip_id); + for(i=0;chip_ids[i].id != 0; i++) { + if (chip_ids[i].id == chip_id) break; + } + if (!chip_ids[i].id) { + netdev_err(dev, "Unknown chip ID %04x\n", chip_id); + retval = -ENODEV; + goto err_out; + } + version_string = chip_ids[i].name; + + revision = SMC_GET_REV(lp); + DBG(SMC_DEBUG_MISC, dev, "%s: revision = 0x%04x\n", CARDNAME, revision); + + /* At this point I'll assume that the chip is an SMC911x. */ + DBG(SMC_DEBUG_MISC, dev, "%s: Found a %s\n", + CARDNAME, chip_ids[i].name); + + /* Validate the TX FIFO size requested */ + if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) { + netdev_err(dev, "Invalid TX FIFO size requested %d\n", + tx_fifo_kb); + retval = -EINVAL; + goto err_out; + } + + /* fill in some of the fields */ + lp->version = chip_ids[i].id; + lp->revision = revision; + lp->tx_fifo_kb = tx_fifo_kb; + /* Reverse calculate the RX FIFO size from the TX */ + lp->tx_fifo_size=(lp->tx_fifo_kb<<10) - 512; + lp->rx_fifo_size= ((0x4000 - 512 - lp->tx_fifo_size) / 16) * 15; + + /* Set the automatic flow control values */ + switch(lp->tx_fifo_kb) { + /* + * AFC_HI is about ((Rx Data Fifo Size)*2/3)/64 + * AFC_LO is AFC_HI/2 + * BACK_DUR is about 5uS*(AFC_LO) rounded down + */ + case 2:/* 13440 Rx Data Fifo Size */ + lp->afc_cfg=0x008C46AF;break; + case 3:/* 12480 Rx Data Fifo Size */ + lp->afc_cfg=0x0082419F;break; + case 4:/* 11520 Rx Data Fifo Size */ + lp->afc_cfg=0x00783C9F;break; + case 5:/* 10560 Rx Data Fifo Size */ + lp->afc_cfg=0x006E374F;break; + case 6:/* 9600 Rx Data Fifo Size */ + lp->afc_cfg=0x0064328F;break; + case 7:/* 8640 Rx Data Fifo Size */ + lp->afc_cfg=0x005A2D7F;break; + case 8:/* 7680 Rx Data Fifo Size */ + lp->afc_cfg=0x0050287F;break; + case 9:/* 6720 Rx Data Fifo Size */ + lp->afc_cfg=0x0046236F;break; + case 10:/* 5760 Rx Data Fifo Size */ + lp->afc_cfg=0x003C1E6F;break; + case 11:/* 4800 Rx Data Fifo Size */ + lp->afc_cfg=0x0032195F;break; + /* + * AFC_HI is ~1520 bytes less than RX Data Fifo Size + * AFC_LO is AFC_HI/2 + * BACK_DUR is about 5uS*(AFC_LO) rounded down + */ + case 12:/* 3840 Rx Data Fifo Size */ + lp->afc_cfg=0x0024124F;break; + case 13:/* 2880 Rx Data Fifo Size */ + lp->afc_cfg=0x0015073F;break; + case 14:/* 1920 Rx Data Fifo Size */ + lp->afc_cfg=0x0006032F;break; + default: + PRINTK(dev, "ERROR -- no AFC_CFG setting found"); + break; + } + + DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX, dev, + "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME, + lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg); + + spin_lock_init(&lp->lock); + + /* Get the MAC address */ + SMC_GET_MAC_ADDR(lp, dev->dev_addr); + + /* now, reset the chip, and put it into a known state */ + smc911x_reset(dev); + + /* + * If dev->irq is 0, then the device has to be banged on to see + * what the IRQ is. + * + * Specifying an IRQ is done with the assumption that the user knows + * what (s)he is doing. No checking is done!!!! + */ + if (dev->irq < 1) { + int trials; + + trials = 3; + while (trials--) { + dev->irq = smc911x_findirq(dev); + if (dev->irq) + break; + /* kick the card and try again */ + smc911x_reset(dev); + } + } + if (dev->irq == 0) { + netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n"); + retval = -ENODEV; + goto err_out; + } + dev->irq = irq_canonicalize(dev->irq); + + dev->netdev_ops = &smc911x_netdev_ops; + dev->watchdog_timeo = msecs_to_jiffies(watchdog); + dev->ethtool_ops = &smc911x_ethtool_ops; + + INIT_WORK(&lp->phy_configure, smc911x_phy_configure); + lp->mii.phy_id_mask = 0x1f; + lp->mii.reg_num_mask = 0x1f; + lp->mii.force_media = 0; + lp->mii.full_duplex = 0; + lp->mii.dev = dev; + lp->mii.mdio_read = smc911x_phy_read; + lp->mii.mdio_write = smc911x_phy_write; + + /* + * Locate the phy, if any. + */ + smc911x_phy_detect(dev); + + /* Set default parameters */ + lp->msg_enable = NETIF_MSG_LINK; + lp->ctl_rfduplx = 1; + lp->ctl_rspeed = 100; + +#ifdef SMC_DYNAMIC_BUS_CONFIG + irq_flags = lp->cfg.irq_flags; +#else + irq_flags = IRQF_SHARED | SMC_IRQ_SENSE; +#endif + + /* Grab the IRQ */ + retval = request_irq(dev->irq, smc911x_interrupt, + irq_flags, dev->name, dev); + if (retval) + goto err_out; + +#ifdef SMC_USE_DMA + lp->rxdma = SMC_DMA_REQUEST(dev, smc911x_rx_dma_irq); + lp->txdma = SMC_DMA_REQUEST(dev, smc911x_tx_dma_irq); + lp->rxdma_active = 0; + lp->txdma_active = 0; + dev->dma = lp->rxdma; +#endif + + retval = register_netdev(dev); + if (retval == 0) { + /* now, print out the card info, in a short format.. */ + netdev_info(dev, "%s (rev %d) at %#lx IRQ %d", + version_string, lp->revision, + dev->base_addr, dev->irq); + +#ifdef SMC_USE_DMA + if (lp->rxdma != -1) + pr_cont(" RXDMA %d", lp->rxdma); + + if (lp->txdma != -1) + pr_cont(" TXDMA %d", lp->txdma); +#endif + pr_cont("\n"); + if (!is_valid_ether_addr(dev->dev_addr)) { + netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n"); + } else { + /* Print the Ethernet address */ + netdev_info(dev, "Ethernet addr: %pM\n", + dev->dev_addr); + } + + if (lp->phy_type == 0) { + PRINTK(dev, "No PHY found\n"); + } else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) { + PRINTK(dev, "LAN911x Internal PHY\n"); + } else { + PRINTK(dev, "External PHY 0x%08x\n", lp->phy_type); + } + } + +err_out: +#ifdef SMC_USE_DMA + if (retval) { + if (lp->rxdma != -1) { + SMC_DMA_FREE(dev, lp->rxdma); + } + if (lp->txdma != -1) { + SMC_DMA_FREE(dev, lp->txdma); + } + } +#endif + return retval; +} + +/* + * smc911x_drv_probe(void) + * + * Output: + * 0 --> there is a device + * anything else, error + */ +static int smc911x_drv_probe(struct platform_device *pdev) +{ + struct net_device *ndev; + struct resource *res; + struct smc911x_local *lp; + void __iomem *addr; + int ret; + + /* ndev is not valid yet, so avoid passing it in. */ + DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ret = -ENODEV; + goto out; + } + + /* + * Request the regions. + */ + if (!request_mem_region(res->start, SMC911X_IO_EXTENT, CARDNAME)) { + ret = -EBUSY; + goto out; + } + + ndev = alloc_etherdev(sizeof(struct smc911x_local)); + if (!ndev) { + ret = -ENOMEM; + goto release_1; + } + SET_NETDEV_DEV(ndev, &pdev->dev); + + ndev->dma = (unsigned char)-1; + ndev->irq = platform_get_irq(pdev, 0); + lp = netdev_priv(ndev); + lp->netdev = ndev; +#ifdef SMC_DYNAMIC_BUS_CONFIG + { + struct smc911x_platdata *pd = dev_get_platdata(&pdev->dev); + if (!pd) { + ret = -EINVAL; + goto release_both; + } + memcpy(&lp->cfg, pd, sizeof(lp->cfg)); + } +#endif + + addr = ioremap(res->start, SMC911X_IO_EXTENT); + if (!addr) { + ret = -ENOMEM; + goto release_both; + } + + platform_set_drvdata(pdev, ndev); + lp->base = addr; + ndev->base_addr = res->start; + ret = smc911x_probe(ndev); + if (ret != 0) { + iounmap(addr); +release_both: + free_netdev(ndev); +release_1: + release_mem_region(res->start, SMC911X_IO_EXTENT); +out: + pr_info("%s: not found (%d).\n", CARDNAME, ret); + } +#ifdef SMC_USE_DMA + else { + lp->physaddr = res->start; + lp->dev = &pdev->dev; + } +#endif + + return ret; +} + +static int smc911x_drv_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct smc911x_local *lp = netdev_priv(ndev); + struct resource *res; + + DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__); + + unregister_netdev(ndev); + + free_irq(ndev->irq, ndev); + +#ifdef SMC_USE_DMA + { + if (lp->rxdma != -1) { + SMC_DMA_FREE(dev, lp->rxdma); + } + if (lp->txdma != -1) { + SMC_DMA_FREE(dev, lp->txdma); + } + } +#endif + iounmap(lp->base); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, SMC911X_IO_EXTENT); + + free_netdev(ndev); + return 0; +} + +static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state) +{ + struct net_device *ndev = platform_get_drvdata(dev); + struct smc911x_local *lp = netdev_priv(ndev); + + DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__); + if (ndev) { + if (netif_running(ndev)) { + netif_device_detach(ndev); + smc911x_shutdown(ndev); +#if POWER_DOWN + /* Set D2 - Energy detect only setting */ + SMC_SET_PMT_CTRL(lp, 2<<12); +#endif + } + } + return 0; +} + +static int smc911x_drv_resume(struct platform_device *dev) +{ + struct net_device *ndev = platform_get_drvdata(dev); + + DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__); + if (ndev) { + struct smc911x_local *lp = netdev_priv(ndev); + + if (netif_running(ndev)) { + smc911x_reset(ndev); + if (lp->phy_type != 0) + smc911x_phy_configure(&lp->phy_configure); + smc911x_enable(ndev); + netif_device_attach(ndev); + } + } + return 0; +} + +static struct platform_driver smc911x_driver = { + .probe = smc911x_drv_probe, + .remove = smc911x_drv_remove, + .suspend = smc911x_drv_suspend, + .resume = smc911x_drv_resume, + .driver = { + .name = CARDNAME, + }, +}; + +module_platform_driver(smc911x_driver); diff --git a/drivers/net/ethernet/smsc/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h new file mode 100644 index 000000000..04b35f55d --- /dev/null +++ b/drivers/net/ethernet/smsc/smc911x.h @@ -0,0 +1,923 @@ +/*------------------------------------------------------------------------ + . smc911x.h - macros for SMSC's LAN911{5,6,7,8} single-chip Ethernet device. + . + . Copyright (C) 2005 Sensoria Corp. + . Derived from the unified SMC91x driver by Nicolas Pitre + . + . This program is free software; you can redistribute it and/or modify + . it under the terms of the GNU General Public License as published by + . the Free Software Foundation; either version 2 of the License, or + . (at your option) any later version. + . + . This program is distributed in the hope that it will be useful, + . but WITHOUT ANY WARRANTY; without even the implied warranty of + . MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + . GNU General Public License for more details. + . + . You should have received a copy of the GNU General Public License + . along with this program; if not, see . + . + . Information contained in this file was obtained from the LAN9118 + . manual from SMC. To get a copy, if you really want one, you can find + . information under www.smsc.com. + . + . Authors + . Dustin McIntire + . + ---------------------------------------------------------------------------*/ +#ifndef _SMC911X_H_ +#define _SMC911X_H_ + +#include +/* + * Use the DMA feature on PXA chips + */ +#ifdef CONFIG_ARCH_PXA + #define SMC_USE_PXA_DMA 1 + #define SMC_USE_16BIT 0 + #define SMC_USE_32BIT 1 + #define SMC_IRQ_SENSE IRQF_TRIGGER_FALLING +#elif defined(CONFIG_SH_MAGIC_PANEL_R2) + #define SMC_USE_16BIT 0 + #define SMC_USE_32BIT 1 + #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW +#elif defined(CONFIG_ARCH_OMAP3) + #define SMC_USE_16BIT 0 + #define SMC_USE_32BIT 1 + #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW + #define SMC_MEM_RESERVED 1 +#elif defined(CONFIG_ARCH_OMAP2) + #define SMC_USE_16BIT 0 + #define SMC_USE_32BIT 1 + #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW + #define SMC_MEM_RESERVED 1 +#else +/* + * Default configuration + */ + +#define SMC_DYNAMIC_BUS_CONFIG +#endif + +#ifdef SMC_USE_PXA_DMA +#define SMC_USE_DMA +#endif + +/* store this information for the driver.. */ +struct smc911x_local { + /* + * If I have to wait until the DMA is finished and ready to reload a + * packet, I will store the skbuff here. Then, the DMA will send it + * out and free it. + */ + struct sk_buff *pending_tx_skb; + + /* version/revision of the SMC911x chip */ + u16 version; + u16 revision; + + /* FIFO sizes */ + int tx_fifo_kb; + int tx_fifo_size; + int rx_fifo_size; + int afc_cfg; + + /* Contains the current active receive/phy mode */ + int ctl_rfduplx; + int ctl_rspeed; + + u32 msg_enable; + u32 phy_type; + struct mii_if_info mii; + + /* work queue */ + struct work_struct phy_configure; + + int tx_throttle; + spinlock_t lock; + + struct net_device *netdev; + +#ifdef SMC_USE_DMA + /* DMA needs the physical address of the chip */ + u_long physaddr; + int rxdma; + int txdma; + int rxdma_active; + int txdma_active; + struct sk_buff *current_rx_skb; + struct sk_buff *current_tx_skb; + struct device *dev; +#endif + void __iomem *base; +#ifdef SMC_DYNAMIC_BUS_CONFIG + struct smc911x_platdata cfg; +#endif +}; + +/* + * Define the bus width specific IO macros + */ + +#ifdef SMC_DYNAMIC_BUS_CONFIG +static inline unsigned int SMC_inl(struct smc911x_local *lp, int reg) +{ + void __iomem *ioaddr = lp->base + reg; + + if (lp->cfg.flags & SMC911X_USE_32BIT) + return readl(ioaddr); + + if (lp->cfg.flags & SMC911X_USE_16BIT) + return readw(ioaddr) | (readw(ioaddr + 2) << 16); + + BUG(); +} + +static inline void SMC_outl(unsigned int value, struct smc911x_local *lp, + int reg) +{ + void __iomem *ioaddr = lp->base + reg; + + if (lp->cfg.flags & SMC911X_USE_32BIT) { + writel(value, ioaddr); + return; + } + + if (lp->cfg.flags & SMC911X_USE_16BIT) { + writew(value & 0xffff, ioaddr); + writew(value >> 16, ioaddr + 2); + return; + } + + BUG(); +} + +static inline void SMC_insl(struct smc911x_local *lp, int reg, + void *addr, unsigned int count) +{ + void __iomem *ioaddr = lp->base + reg; + + if (lp->cfg.flags & SMC911X_USE_32BIT) { + ioread32_rep(ioaddr, addr, count); + return; + } + + if (lp->cfg.flags & SMC911X_USE_16BIT) { + ioread16_rep(ioaddr, addr, count * 2); + return; + } + + BUG(); +} + +static inline void SMC_outsl(struct smc911x_local *lp, int reg, + void *addr, unsigned int count) +{ + void __iomem *ioaddr = lp->base + reg; + + if (lp->cfg.flags & SMC911X_USE_32BIT) { + iowrite32_rep(ioaddr, addr, count); + return; + } + + if (lp->cfg.flags & SMC911X_USE_16BIT) { + iowrite16_rep(ioaddr, addr, count * 2); + return; + } + + BUG(); +} +#else +#if SMC_USE_16BIT +#define SMC_inl(lp, r) ((readw((lp)->base + (r)) & 0xFFFF) + (readw((lp)->base + (r) + 2) << 16)) +#define SMC_outl(v, lp, r) \ + do{ \ + writew(v & 0xFFFF, (lp)->base + (r)); \ + writew(v >> 16, (lp)->base + (r) + 2); \ + } while (0) +#define SMC_insl(lp, r, p, l) ioread16_rep((short*)((lp)->base + (r)), p, l*2) +#define SMC_outsl(lp, r, p, l) iowrite16_rep((short*)((lp)->base + (r)), p, l*2) + +#elif SMC_USE_32BIT +#define SMC_inl(lp, r) readl((lp)->base + (r)) +#define SMC_outl(v, lp, r) writel(v, (lp)->base + (r)) +#define SMC_insl(lp, r, p, l) ioread32_rep((int*)((lp)->base + (r)), p, l) +#define SMC_outsl(lp, r, p, l) iowrite32_rep((int*)((lp)->base + (r)), p, l) + +#endif /* SMC_USE_16BIT */ +#endif /* SMC_DYNAMIC_BUS_CONFIG */ + + +#ifdef SMC_USE_PXA_DMA + +#include + +/* + * Define the request and free functions + * These are unfortunately architecture specific as no generic allocation + * mechanism exits + */ +#define SMC_DMA_REQUEST(dev, handler) \ + pxa_request_dma(dev->name, DMA_PRIO_LOW, handler, dev) + +#define SMC_DMA_FREE(dev, dma) \ + pxa_free_dma(dma) + +#define SMC_DMA_ACK_IRQ(dev, dma) \ +{ \ + if (DCSR(dma) & DCSR_BUSERR) { \ + netdev_err(dev, "DMA %d bus error!\n", dma); \ + } \ + DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; \ +} + +/* + * Use a DMA for RX and TX packets. + */ +#include + +static dma_addr_t rx_dmabuf, tx_dmabuf; +static int rx_dmalen, tx_dmalen; + +#ifdef SMC_insl +#undef SMC_insl +#define SMC_insl(lp, r, p, l) \ + smc_pxa_dma_insl(lp, lp->physaddr, r, lp->rxdma, p, l) + +static inline void +smc_pxa_dma_insl(struct smc911x_local *lp, u_long physaddr, + int reg, int dma, u_char *buf, int len) +{ + /* 64 bit alignment is required for memory to memory DMA */ + if ((long)buf & 4) { + *((u32 *)buf) = SMC_inl(lp, reg); + buf += 4; + len--; + } + + len *= 4; + rx_dmabuf = dma_map_single(lp->dev, buf, len, DMA_FROM_DEVICE); + rx_dmalen = len; + DCSR(dma) = DCSR_NODESC; + DTADR(dma) = rx_dmabuf; + DSADR(dma) = physaddr + reg; + DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 | + DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & rx_dmalen)); + DCSR(dma) = DCSR_NODESC | DCSR_RUN; +} +#endif + +#ifdef SMC_outsl +#undef SMC_outsl +#define SMC_outsl(lp, r, p, l) \ + smc_pxa_dma_outsl(lp, lp->physaddr, r, lp->txdma, p, l) + +static inline void +smc_pxa_dma_outsl(struct smc911x_local *lp, u_long physaddr, + int reg, int dma, u_char *buf, int len) +{ + /* 64 bit alignment is required for memory to memory DMA */ + if ((long)buf & 4) { + SMC_outl(*((u32 *)buf), lp, reg); + buf += 4; + len--; + } + + len *= 4; + tx_dmabuf = dma_map_single(lp->dev, buf, len, DMA_TO_DEVICE); + tx_dmalen = len; + DCSR(dma) = DCSR_NODESC; + DSADR(dma) = tx_dmabuf; + DTADR(dma) = physaddr + reg; + DCMD(dma) = (DCMD_INCSRCADDR | DCMD_BURST32 | + DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & tx_dmalen)); + DCSR(dma) = DCSR_NODESC | DCSR_RUN; +} +#endif +#endif /* SMC_USE_PXA_DMA */ + + +/* Chip Parameters and Register Definitions */ + +#define SMC911X_TX_FIFO_LOW_THRESHOLD (1536*2) + +#define SMC911X_IO_EXTENT 0x100 + +#define SMC911X_EEPROM_LEN 7 + +/* Below are the register offsets and bit definitions + * of the Lan911x memory space + */ +#define RX_DATA_FIFO (0x00) + +#define TX_DATA_FIFO (0x20) +#define TX_CMD_A_INT_ON_COMP_ (0x80000000) +#define TX_CMD_A_INT_BUF_END_ALGN_ (0x03000000) +#define TX_CMD_A_INT_4_BYTE_ALGN_ (0x00000000) +#define TX_CMD_A_INT_16_BYTE_ALGN_ (0x01000000) +#define TX_CMD_A_INT_32_BYTE_ALGN_ (0x02000000) +#define TX_CMD_A_INT_DATA_OFFSET_ (0x001F0000) +#define TX_CMD_A_INT_FIRST_SEG_ (0x00002000) +#define TX_CMD_A_INT_LAST_SEG_ (0x00001000) +#define TX_CMD_A_BUF_SIZE_ (0x000007FF) +#define TX_CMD_B_PKT_TAG_ (0xFFFF0000) +#define TX_CMD_B_ADD_CRC_DISABLE_ (0x00002000) +#define TX_CMD_B_DISABLE_PADDING_ (0x00001000) +#define TX_CMD_B_PKT_BYTE_LENGTH_ (0x000007FF) + +#define RX_STATUS_FIFO (0x40) +#define RX_STS_PKT_LEN_ (0x3FFF0000) +#define RX_STS_ES_ (0x00008000) +#define RX_STS_BCST_ (0x00002000) +#define RX_STS_LEN_ERR_ (0x00001000) +#define RX_STS_RUNT_ERR_ (0x00000800) +#define RX_STS_MCAST_ (0x00000400) +#define RX_STS_TOO_LONG_ (0x00000080) +#define RX_STS_COLL_ (0x00000040) +#define RX_STS_ETH_TYPE_ (0x00000020) +#define RX_STS_WDOG_TMT_ (0x00000010) +#define RX_STS_MII_ERR_ (0x00000008) +#define RX_STS_DRIBBLING_ (0x00000004) +#define RX_STS_CRC_ERR_ (0x00000002) +#define RX_STATUS_FIFO_PEEK (0x44) +#define TX_STATUS_FIFO (0x48) +#define TX_STS_TAG_ (0xFFFF0000) +#define TX_STS_ES_ (0x00008000) +#define TX_STS_LOC_ (0x00000800) +#define TX_STS_NO_CARR_ (0x00000400) +#define TX_STS_LATE_COLL_ (0x00000200) +#define TX_STS_MANY_COLL_ (0x00000100) +#define TX_STS_COLL_CNT_ (0x00000078) +#define TX_STS_MANY_DEFER_ (0x00000004) +#define TX_STS_UNDERRUN_ (0x00000002) +#define TX_STS_DEFERRED_ (0x00000001) +#define TX_STATUS_FIFO_PEEK (0x4C) +#define ID_REV (0x50) +#define ID_REV_CHIP_ID_ (0xFFFF0000) /* RO */ +#define ID_REV_REV_ID_ (0x0000FFFF) /* RO */ + +#define INT_CFG (0x54) +#define INT_CFG_INT_DEAS_ (0xFF000000) /* R/W */ +#define INT_CFG_INT_DEAS_CLR_ (0x00004000) +#define INT_CFG_INT_DEAS_STS_ (0x00002000) +#define INT_CFG_IRQ_INT_ (0x00001000) /* RO */ +#define INT_CFG_IRQ_EN_ (0x00000100) /* R/W */ +#define INT_CFG_IRQ_POL_ (0x00000010) /* R/W Not Affected by SW Reset */ +#define INT_CFG_IRQ_TYPE_ (0x00000001) /* R/W Not Affected by SW Reset */ + +#define INT_STS (0x58) +#define INT_STS_SW_INT_ (0x80000000) /* R/WC */ +#define INT_STS_TXSTOP_INT_ (0x02000000) /* R/WC */ +#define INT_STS_RXSTOP_INT_ (0x01000000) /* R/WC */ +#define INT_STS_RXDFH_INT_ (0x00800000) /* R/WC */ +#define INT_STS_RXDF_INT_ (0x00400000) /* R/WC */ +#define INT_STS_TX_IOC_ (0x00200000) /* R/WC */ +#define INT_STS_RXD_INT_ (0x00100000) /* R/WC */ +#define INT_STS_GPT_INT_ (0x00080000) /* R/WC */ +#define INT_STS_PHY_INT_ (0x00040000) /* RO */ +#define INT_STS_PME_INT_ (0x00020000) /* R/WC */ +#define INT_STS_TXSO_ (0x00010000) /* R/WC */ +#define INT_STS_RWT_ (0x00008000) /* R/WC */ +#define INT_STS_RXE_ (0x00004000) /* R/WC */ +#define INT_STS_TXE_ (0x00002000) /* R/WC */ +//#define INT_STS_ERX_ (0x00001000) /* R/WC */ +#define INT_STS_TDFU_ (0x00000800) /* R/WC */ +#define INT_STS_TDFO_ (0x00000400) /* R/WC */ +#define INT_STS_TDFA_ (0x00000200) /* R/WC */ +#define INT_STS_TSFF_ (0x00000100) /* R/WC */ +#define INT_STS_TSFL_ (0x00000080) /* R/WC */ +//#define INT_STS_RXDF_ (0x00000040) /* R/WC */ +#define INT_STS_RDFO_ (0x00000040) /* R/WC */ +#define INT_STS_RDFL_ (0x00000020) /* R/WC */ +#define INT_STS_RSFF_ (0x00000010) /* R/WC */ +#define INT_STS_RSFL_ (0x00000008) /* R/WC */ +#define INT_STS_GPIO2_INT_ (0x00000004) /* R/WC */ +#define INT_STS_GPIO1_INT_ (0x00000002) /* R/WC */ +#define INT_STS_GPIO0_INT_ (0x00000001) /* R/WC */ + +#define INT_EN (0x5C) +#define INT_EN_SW_INT_EN_ (0x80000000) /* R/W */ +#define INT_EN_TXSTOP_INT_EN_ (0x02000000) /* R/W */ +#define INT_EN_RXSTOP_INT_EN_ (0x01000000) /* R/W */ +#define INT_EN_RXDFH_INT_EN_ (0x00800000) /* R/W */ +//#define INT_EN_RXDF_INT_EN_ (0x00400000) /* R/W */ +#define INT_EN_TIOC_INT_EN_ (0x00200000) /* R/W */ +#define INT_EN_RXD_INT_EN_ (0x00100000) /* R/W */ +#define INT_EN_GPT_INT_EN_ (0x00080000) /* R/W */ +#define INT_EN_PHY_INT_EN_ (0x00040000) /* R/W */ +#define INT_EN_PME_INT_EN_ (0x00020000) /* R/W */ +#define INT_EN_TXSO_EN_ (0x00010000) /* R/W */ +#define INT_EN_RWT_EN_ (0x00008000) /* R/W */ +#define INT_EN_RXE_EN_ (0x00004000) /* R/W */ +#define INT_EN_TXE_EN_ (0x00002000) /* R/W */ +//#define INT_EN_ERX_EN_ (0x00001000) /* R/W */ +#define INT_EN_TDFU_EN_ (0x00000800) /* R/W */ +#define INT_EN_TDFO_EN_ (0x00000400) /* R/W */ +#define INT_EN_TDFA_EN_ (0x00000200) /* R/W */ +#define INT_EN_TSFF_EN_ (0x00000100) /* R/W */ +#define INT_EN_TSFL_EN_ (0x00000080) /* R/W */ +//#define INT_EN_RXDF_EN_ (0x00000040) /* R/W */ +#define INT_EN_RDFO_EN_ (0x00000040) /* R/W */ +#define INT_EN_RDFL_EN_ (0x00000020) /* R/W */ +#define INT_EN_RSFF_EN_ (0x00000010) /* R/W */ +#define INT_EN_RSFL_EN_ (0x00000008) /* R/W */ +#define INT_EN_GPIO2_INT_ (0x00000004) /* R/W */ +#define INT_EN_GPIO1_INT_ (0x00000002) /* R/W */ +#define INT_EN_GPIO0_INT_ (0x00000001) /* R/W */ + +#define BYTE_TEST (0x64) +#define FIFO_INT (0x68) +#define FIFO_INT_TX_AVAIL_LEVEL_ (0xFF000000) /* R/W */ +#define FIFO_INT_TX_STS_LEVEL_ (0x00FF0000) /* R/W */ +#define FIFO_INT_RX_AVAIL_LEVEL_ (0x0000FF00) /* R/W */ +#define FIFO_INT_RX_STS_LEVEL_ (0x000000FF) /* R/W */ + +#define RX_CFG (0x6C) +#define RX_CFG_RX_END_ALGN_ (0xC0000000) /* R/W */ +#define RX_CFG_RX_END_ALGN4_ (0x00000000) /* R/W */ +#define RX_CFG_RX_END_ALGN16_ (0x40000000) /* R/W */ +#define RX_CFG_RX_END_ALGN32_ (0x80000000) /* R/W */ +#define RX_CFG_RX_DMA_CNT_ (0x0FFF0000) /* R/W */ +#define RX_CFG_RX_DUMP_ (0x00008000) /* R/W */ +#define RX_CFG_RXDOFF_ (0x00001F00) /* R/W */ +//#define RX_CFG_RXBAD_ (0x00000001) /* R/W */ + +#define TX_CFG (0x70) +//#define TX_CFG_TX_DMA_LVL_ (0xE0000000) /* R/W */ +//#define TX_CFG_TX_DMA_CNT_ (0x0FFF0000) /* R/W Self Clearing */ +#define TX_CFG_TXS_DUMP_ (0x00008000) /* Self Clearing */ +#define TX_CFG_TXD_DUMP_ (0x00004000) /* Self Clearing */ +#define TX_CFG_TXSAO_ (0x00000004) /* R/W */ +#define TX_CFG_TX_ON_ (0x00000002) /* R/W */ +#define TX_CFG_STOP_TX_ (0x00000001) /* Self Clearing */ + +#define HW_CFG (0x74) +#define HW_CFG_TTM_ (0x00200000) /* R/W */ +#define HW_CFG_SF_ (0x00100000) /* R/W */ +#define HW_CFG_TX_FIF_SZ_ (0x000F0000) /* R/W */ +#define HW_CFG_TR_ (0x00003000) /* R/W */ +#define HW_CFG_PHY_CLK_SEL_ (0x00000060) /* R/W */ +#define HW_CFG_PHY_CLK_SEL_INT_PHY_ (0x00000000) /* R/W */ +#define HW_CFG_PHY_CLK_SEL_EXT_PHY_ (0x00000020) /* R/W */ +#define HW_CFG_PHY_CLK_SEL_CLK_DIS_ (0x00000040) /* R/W */ +#define HW_CFG_SMI_SEL_ (0x00000010) /* R/W */ +#define HW_CFG_EXT_PHY_DET_ (0x00000008) /* RO */ +#define HW_CFG_EXT_PHY_EN_ (0x00000004) /* R/W */ +#define HW_CFG_32_16_BIT_MODE_ (0x00000004) /* RO */ +#define HW_CFG_SRST_TO_ (0x00000002) /* RO */ +#define HW_CFG_SRST_ (0x00000001) /* Self Clearing */ + +#define RX_DP_CTRL (0x78) +#define RX_DP_CTRL_RX_FFWD_ (0x80000000) /* R/W */ +#define RX_DP_CTRL_FFWD_BUSY_ (0x80000000) /* RO */ + +#define RX_FIFO_INF (0x7C) +#define RX_FIFO_INF_RXSUSED_ (0x00FF0000) /* RO */ +#define RX_FIFO_INF_RXDUSED_ (0x0000FFFF) /* RO */ + +#define TX_FIFO_INF (0x80) +#define TX_FIFO_INF_TSUSED_ (0x00FF0000) /* RO */ +#define TX_FIFO_INF_TDFREE_ (0x0000FFFF) /* RO */ + +#define PMT_CTRL (0x84) +#define PMT_CTRL_PM_MODE_ (0x00003000) /* Self Clearing */ +#define PMT_CTRL_PHY_RST_ (0x00000400) /* Self Clearing */ +#define PMT_CTRL_WOL_EN_ (0x00000200) /* R/W */ +#define PMT_CTRL_ED_EN_ (0x00000100) /* R/W */ +#define PMT_CTRL_PME_TYPE_ (0x00000040) /* R/W Not Affected by SW Reset */ +#define PMT_CTRL_WUPS_ (0x00000030) /* R/WC */ +#define PMT_CTRL_WUPS_NOWAKE_ (0x00000000) /* R/WC */ +#define PMT_CTRL_WUPS_ED_ (0x00000010) /* R/WC */ +#define PMT_CTRL_WUPS_WOL_ (0x00000020) /* R/WC */ +#define PMT_CTRL_WUPS_MULTI_ (0x00000030) /* R/WC */ +#define PMT_CTRL_PME_IND_ (0x00000008) /* R/W */ +#define PMT_CTRL_PME_POL_ (0x00000004) /* R/W */ +#define PMT_CTRL_PME_EN_ (0x00000002) /* R/W Not Affected by SW Reset */ +#define PMT_CTRL_READY_ (0x00000001) /* RO */ + +#define GPIO_CFG (0x88) +#define GPIO_CFG_LED3_EN_ (0x40000000) /* R/W */ +#define GPIO_CFG_LED2_EN_ (0x20000000) /* R/W */ +#define GPIO_CFG_LED1_EN_ (0x10000000) /* R/W */ +#define GPIO_CFG_GPIO2_INT_POL_ (0x04000000) /* R/W */ +#define GPIO_CFG_GPIO1_INT_POL_ (0x02000000) /* R/W */ +#define GPIO_CFG_GPIO0_INT_POL_ (0x01000000) /* R/W */ +#define GPIO_CFG_EEPR_EN_ (0x00700000) /* R/W */ +#define GPIO_CFG_GPIOBUF2_ (0x00040000) /* R/W */ +#define GPIO_CFG_GPIOBUF1_ (0x00020000) /* R/W */ +#define GPIO_CFG_GPIOBUF0_ (0x00010000) /* R/W */ +#define GPIO_CFG_GPIODIR2_ (0x00000400) /* R/W */ +#define GPIO_CFG_GPIODIR1_ (0x00000200) /* R/W */ +#define GPIO_CFG_GPIODIR0_ (0x00000100) /* R/W */ +#define GPIO_CFG_GPIOD4_ (0x00000010) /* R/W */ +#define GPIO_CFG_GPIOD3_ (0x00000008) /* R/W */ +#define GPIO_CFG_GPIOD2_ (0x00000004) /* R/W */ +#define GPIO_CFG_GPIOD1_ (0x00000002) /* R/W */ +#define GPIO_CFG_GPIOD0_ (0x00000001) /* R/W */ + +#define GPT_CFG (0x8C) +#define GPT_CFG_TIMER_EN_ (0x20000000) /* R/W */ +#define GPT_CFG_GPT_LOAD_ (0x0000FFFF) /* R/W */ + +#define GPT_CNT (0x90) +#define GPT_CNT_GPT_CNT_ (0x0000FFFF) /* RO */ + +#define ENDIAN (0x98) +#define FREE_RUN (0x9C) +#define RX_DROP (0xA0) +#define MAC_CSR_CMD (0xA4) +#define MAC_CSR_CMD_CSR_BUSY_ (0x80000000) /* Self Clearing */ +#define MAC_CSR_CMD_R_NOT_W_ (0x40000000) /* R/W */ +#define MAC_CSR_CMD_CSR_ADDR_ (0x000000FF) /* R/W */ + +#define MAC_CSR_DATA (0xA8) +#define AFC_CFG (0xAC) +#define AFC_CFG_AFC_HI_ (0x00FF0000) /* R/W */ +#define AFC_CFG_AFC_LO_ (0x0000FF00) /* R/W */ +#define AFC_CFG_BACK_DUR_ (0x000000F0) /* R/W */ +#define AFC_CFG_FCMULT_ (0x00000008) /* R/W */ +#define AFC_CFG_FCBRD_ (0x00000004) /* R/W */ +#define AFC_CFG_FCADD_ (0x00000002) /* R/W */ +#define AFC_CFG_FCANY_ (0x00000001) /* R/W */ + +#define E2P_CMD (0xB0) +#define E2P_CMD_EPC_BUSY_ (0x80000000) /* Self Clearing */ +#define E2P_CMD_EPC_CMD_ (0x70000000) /* R/W */ +#define E2P_CMD_EPC_CMD_READ_ (0x00000000) /* R/W */ +#define E2P_CMD_EPC_CMD_EWDS_ (0x10000000) /* R/W */ +#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000) /* R/W */ +#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000) /* R/W */ +#define E2P_CMD_EPC_CMD_WRAL_ (0x40000000) /* R/W */ +#define E2P_CMD_EPC_CMD_ERASE_ (0x50000000) /* R/W */ +#define E2P_CMD_EPC_CMD_ERAL_ (0x60000000) /* R/W */ +#define E2P_CMD_EPC_CMD_RELOAD_ (0x70000000) /* R/W */ +#define E2P_CMD_EPC_TIMEOUT_ (0x00000200) /* RO */ +#define E2P_CMD_MAC_ADDR_LOADED_ (0x00000100) /* RO */ +#define E2P_CMD_EPC_ADDR_ (0x000000FF) /* R/W */ + +#define E2P_DATA (0xB4) +#define E2P_DATA_EEPROM_DATA_ (0x000000FF) /* R/W */ +/* end of LAN register offsets and bit definitions */ + +/* + **************************************************************************** + **************************************************************************** + * MAC Control and Status Register (Indirect Address) + * Offset (through the MAC_CSR CMD and DATA port) + **************************************************************************** + **************************************************************************** + * + */ +#define MAC_CR (0x01) /* R/W */ + +/* MAC_CR - MAC Control Register */ +#define MAC_CR_RXALL_ (0x80000000) +// TODO: delete this bit? It is not described in the data sheet. +#define MAC_CR_HBDIS_ (0x10000000) +#define MAC_CR_RCVOWN_ (0x00800000) +#define MAC_CR_LOOPBK_ (0x00200000) +#define MAC_CR_FDPX_ (0x00100000) +#define MAC_CR_MCPAS_ (0x00080000) +#define MAC_CR_PRMS_ (0x00040000) +#define MAC_CR_INVFILT_ (0x00020000) +#define MAC_CR_PASSBAD_ (0x00010000) +#define MAC_CR_HFILT_ (0x00008000) +#define MAC_CR_HPFILT_ (0x00002000) +#define MAC_CR_LCOLL_ (0x00001000) +#define MAC_CR_BCAST_ (0x00000800) +#define MAC_CR_DISRTY_ (0x00000400) +#define MAC_CR_PADSTR_ (0x00000100) +#define MAC_CR_BOLMT_MASK_ (0x000000C0) +#define MAC_CR_DFCHK_ (0x00000020) +#define MAC_CR_TXEN_ (0x00000008) +#define MAC_CR_RXEN_ (0x00000004) + +#define ADDRH (0x02) /* R/W mask 0x0000FFFFUL */ +#define ADDRL (0x03) /* R/W mask 0xFFFFFFFFUL */ +#define HASHH (0x04) /* R/W */ +#define HASHL (0x05) /* R/W */ + +#define MII_ACC (0x06) /* R/W */ +#define MII_ACC_PHY_ADDR_ (0x0000F800) +#define MII_ACC_MIIRINDA_ (0x000007C0) +#define MII_ACC_MII_WRITE_ (0x00000002) +#define MII_ACC_MII_BUSY_ (0x00000001) + +#define MII_DATA (0x07) /* R/W mask 0x0000FFFFUL */ + +#define FLOW (0x08) /* R/W */ +#define FLOW_FCPT_ (0xFFFF0000) +#define FLOW_FCPASS_ (0x00000004) +#define FLOW_FCEN_ (0x00000002) +#define FLOW_FCBSY_ (0x00000001) + +#define VLAN1 (0x09) /* R/W mask 0x0000FFFFUL */ +#define VLAN1_VTI1_ (0x0000ffff) + +#define VLAN2 (0x0A) /* R/W mask 0x0000FFFFUL */ +#define VLAN2_VTI2_ (0x0000ffff) + +#define WUFF (0x0B) /* WO */ + +#define WUCSR (0x0C) /* R/W */ +#define WUCSR_GUE_ (0x00000200) +#define WUCSR_WUFR_ (0x00000040) +#define WUCSR_MPR_ (0x00000020) +#define WUCSR_WAKE_EN_ (0x00000004) +#define WUCSR_MPEN_ (0x00000002) + +/* + **************************************************************************** + * Chip Specific MII Defines + **************************************************************************** + * + * Phy register offsets and bit definitions + * + */ + +#define PHY_MODE_CTRL_STS ((u32)17) /* Mode Control/Status Register */ +//#define MODE_CTRL_STS_FASTRIP_ ((u16)0x4000) +#define MODE_CTRL_STS_EDPWRDOWN_ ((u16)0x2000) +//#define MODE_CTRL_STS_LOWSQEN_ ((u16)0x0800) +//#define MODE_CTRL_STS_MDPREBP_ ((u16)0x0400) +//#define MODE_CTRL_STS_FARLOOPBACK_ ((u16)0x0200) +//#define MODE_CTRL_STS_FASTEST_ ((u16)0x0100) +//#define MODE_CTRL_STS_REFCLKEN_ ((u16)0x0010) +//#define MODE_CTRL_STS_PHYADBP_ ((u16)0x0008) +//#define MODE_CTRL_STS_FORCE_G_LINK_ ((u16)0x0004) +#define MODE_CTRL_STS_ENERGYON_ ((u16)0x0002) + +#define PHY_INT_SRC ((u32)29) +#define PHY_INT_SRC_ENERGY_ON_ ((u16)0x0080) +#define PHY_INT_SRC_ANEG_COMP_ ((u16)0x0040) +#define PHY_INT_SRC_REMOTE_FAULT_ ((u16)0x0020) +#define PHY_INT_SRC_LINK_DOWN_ ((u16)0x0010) +#define PHY_INT_SRC_ANEG_LP_ACK_ ((u16)0x0008) +#define PHY_INT_SRC_PAR_DET_FAULT_ ((u16)0x0004) +#define PHY_INT_SRC_ANEG_PGRX_ ((u16)0x0002) + +#define PHY_INT_MASK ((u32)30) +#define PHY_INT_MASK_ENERGY_ON_ ((u16)0x0080) +#define PHY_INT_MASK_ANEG_COMP_ ((u16)0x0040) +#define PHY_INT_MASK_REMOTE_FAULT_ ((u16)0x0020) +#define PHY_INT_MASK_LINK_DOWN_ ((u16)0x0010) +#define PHY_INT_MASK_ANEG_LP_ACK_ ((u16)0x0008) +#define PHY_INT_MASK_PAR_DET_FAULT_ ((u16)0x0004) +#define PHY_INT_MASK_ANEG_PGRX_ ((u16)0x0002) + +#define PHY_SPECIAL ((u32)31) +#define PHY_SPECIAL_ANEG_DONE_ ((u16)0x1000) +#define PHY_SPECIAL_RES_ ((u16)0x0040) +#define PHY_SPECIAL_RES_MASK_ ((u16)0x0FE1) +#define PHY_SPECIAL_SPD_ ((u16)0x001C) +#define PHY_SPECIAL_SPD_10HALF_ ((u16)0x0004) +#define PHY_SPECIAL_SPD_10FULL_ ((u16)0x0014) +#define PHY_SPECIAL_SPD_100HALF_ ((u16)0x0008) +#define PHY_SPECIAL_SPD_100FULL_ ((u16)0x0018) + +#define LAN911X_INTERNAL_PHY_ID (0x0007C000) + +/* Chip ID values */ +#define CHIP_9115 0x0115 +#define CHIP_9116 0x0116 +#define CHIP_9117 0x0117 +#define CHIP_9118 0x0118 +#define CHIP_9211 0x9211 +#define CHIP_9215 0x115A +#define CHIP_9217 0x117A +#define CHIP_9218 0x118A + +struct chip_id { + u16 id; + char *name; +}; + +static const struct chip_id chip_ids[] = { + { CHIP_9115, "LAN9115" }, + { CHIP_9116, "LAN9116" }, + { CHIP_9117, "LAN9117" }, + { CHIP_9118, "LAN9118" }, + { CHIP_9211, "LAN9211" }, + { CHIP_9215, "LAN9215" }, + { CHIP_9217, "LAN9217" }, + { CHIP_9218, "LAN9218" }, + { 0, NULL }, +}; + +#define IS_REV_A(x) ((x & 0xFFFF)==0) + +/* + * Macros to abstract register access according to the data bus + * capabilities. Please use those and not the in/out primitives. + */ +/* FIFO read/write macros */ +#define SMC_PUSH_DATA(lp, p, l) SMC_outsl( lp, TX_DATA_FIFO, p, (l) >> 2 ) +#define SMC_PULL_DATA(lp, p, l) SMC_insl ( lp, RX_DATA_FIFO, p, (l) >> 2 ) +#define SMC_SET_TX_FIFO(lp, x) SMC_outl( x, lp, TX_DATA_FIFO ) +#define SMC_GET_RX_FIFO(lp) SMC_inl( lp, RX_DATA_FIFO ) + + +/* I/O mapped register read/write macros */ +#define SMC_GET_TX_STS_FIFO(lp) SMC_inl( lp, TX_STATUS_FIFO ) +#define SMC_GET_RX_STS_FIFO(lp) SMC_inl( lp, RX_STATUS_FIFO ) +#define SMC_GET_RX_STS_FIFO_PEEK(lp) SMC_inl( lp, RX_STATUS_FIFO_PEEK ) +#define SMC_GET_PN(lp) (SMC_inl( lp, ID_REV ) >> 16) +#define SMC_GET_REV(lp) (SMC_inl( lp, ID_REV ) & 0xFFFF) +#define SMC_GET_IRQ_CFG(lp) SMC_inl( lp, INT_CFG ) +#define SMC_SET_IRQ_CFG(lp, x) SMC_outl( x, lp, INT_CFG ) +#define SMC_GET_INT(lp) SMC_inl( lp, INT_STS ) +#define SMC_ACK_INT(lp, x) SMC_outl( x, lp, INT_STS ) +#define SMC_GET_INT_EN(lp) SMC_inl( lp, INT_EN ) +#define SMC_SET_INT_EN(lp, x) SMC_outl( x, lp, INT_EN ) +#define SMC_GET_BYTE_TEST(lp) SMC_inl( lp, BYTE_TEST ) +#define SMC_SET_BYTE_TEST(lp, x) SMC_outl( x, lp, BYTE_TEST ) +#define SMC_GET_FIFO_INT(lp) SMC_inl( lp, FIFO_INT ) +#define SMC_SET_FIFO_INT(lp, x) SMC_outl( x, lp, FIFO_INT ) +#define SMC_SET_FIFO_TDA(lp, x) \ + do { \ + unsigned long __flags; \ + int __mask; \ + local_irq_save(__flags); \ + __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<24); \ + SMC_SET_FIFO_INT( (lp), __mask | (x)<<24 ); \ + local_irq_restore(__flags); \ + } while (0) +#define SMC_SET_FIFO_TSL(lp, x) \ + do { \ + unsigned long __flags; \ + int __mask; \ + local_irq_save(__flags); \ + __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<16); \ + SMC_SET_FIFO_INT( (lp), __mask | (((x) & 0xFF)<<16)); \ + local_irq_restore(__flags); \ + } while (0) +#define SMC_SET_FIFO_RSA(lp, x) \ + do { \ + unsigned long __flags; \ + int __mask; \ + local_irq_save(__flags); \ + __mask = SMC_GET_FIFO_INT((lp)) & ~(0xFF<<8); \ + SMC_SET_FIFO_INT( (lp), __mask | (((x) & 0xFF)<<8)); \ + local_irq_restore(__flags); \ + } while (0) +#define SMC_SET_FIFO_RSL(lp, x) \ + do { \ + unsigned long __flags; \ + int __mask; \ + local_irq_save(__flags); \ + __mask = SMC_GET_FIFO_INT((lp)) & ~0xFF; \ + SMC_SET_FIFO_INT( (lp),__mask | ((x) & 0xFF)); \ + local_irq_restore(__flags); \ + } while (0) +#define SMC_GET_RX_CFG(lp) SMC_inl( lp, RX_CFG ) +#define SMC_SET_RX_CFG(lp, x) SMC_outl( x, lp, RX_CFG ) +#define SMC_GET_TX_CFG(lp) SMC_inl( lp, TX_CFG ) +#define SMC_SET_TX_CFG(lp, x) SMC_outl( x, lp, TX_CFG ) +#define SMC_GET_HW_CFG(lp) SMC_inl( lp, HW_CFG ) +#define SMC_SET_HW_CFG(lp, x) SMC_outl( x, lp, HW_CFG ) +#define SMC_GET_RX_DP_CTRL(lp) SMC_inl( lp, RX_DP_CTRL ) +#define SMC_SET_RX_DP_CTRL(lp, x) SMC_outl( x, lp, RX_DP_CTRL ) +#define SMC_GET_PMT_CTRL(lp) SMC_inl( lp, PMT_CTRL ) +#define SMC_SET_PMT_CTRL(lp, x) SMC_outl( x, lp, PMT_CTRL ) +#define SMC_GET_GPIO_CFG(lp) SMC_inl( lp, GPIO_CFG ) +#define SMC_SET_GPIO_CFG(lp, x) SMC_outl( x, lp, GPIO_CFG ) +#define SMC_GET_RX_FIFO_INF(lp) SMC_inl( lp, RX_FIFO_INF ) +#define SMC_SET_RX_FIFO_INF(lp, x) SMC_outl( x, lp, RX_FIFO_INF ) +#define SMC_GET_TX_FIFO_INF(lp) SMC_inl( lp, TX_FIFO_INF ) +#define SMC_SET_TX_FIFO_INF(lp, x) SMC_outl( x, lp, TX_FIFO_INF ) +#define SMC_GET_GPT_CFG(lp) SMC_inl( lp, GPT_CFG ) +#define SMC_SET_GPT_CFG(lp, x) SMC_outl( x, lp, GPT_CFG ) +#define SMC_GET_RX_DROP(lp) SMC_inl( lp, RX_DROP ) +#define SMC_SET_RX_DROP(lp, x) SMC_outl( x, lp, RX_DROP ) +#define SMC_GET_MAC_CMD(lp) SMC_inl( lp, MAC_CSR_CMD ) +#define SMC_SET_MAC_CMD(lp, x) SMC_outl( x, lp, MAC_CSR_CMD ) +#define SMC_GET_MAC_DATA(lp) SMC_inl( lp, MAC_CSR_DATA ) +#define SMC_SET_MAC_DATA(lp, x) SMC_outl( x, lp, MAC_CSR_DATA ) +#define SMC_GET_AFC_CFG(lp) SMC_inl( lp, AFC_CFG ) +#define SMC_SET_AFC_CFG(lp, x) SMC_outl( x, lp, AFC_CFG ) +#define SMC_GET_E2P_CMD(lp) SMC_inl( lp, E2P_CMD ) +#define SMC_SET_E2P_CMD(lp, x) SMC_outl( x, lp, E2P_CMD ) +#define SMC_GET_E2P_DATA(lp) SMC_inl( lp, E2P_DATA ) +#define SMC_SET_E2P_DATA(lp, x) SMC_outl( x, lp, E2P_DATA ) + +/* MAC register read/write macros */ +#define SMC_GET_MAC_CSR(lp,a,v) \ + do { \ + while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ + SMC_SET_MAC_CMD((lp),MAC_CSR_CMD_CSR_BUSY_ | \ + MAC_CSR_CMD_R_NOT_W_ | (a) ); \ + while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ + v = SMC_GET_MAC_DATA((lp)); \ + } while (0) +#define SMC_SET_MAC_CSR(lp,a,v) \ + do { \ + while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ + SMC_SET_MAC_DATA((lp), v); \ + SMC_SET_MAC_CMD((lp), MAC_CSR_CMD_CSR_BUSY_ | (a) ); \ + while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ + } while (0) +#define SMC_GET_MAC_CR(lp, x) SMC_GET_MAC_CSR( (lp), MAC_CR, x ) +#define SMC_SET_MAC_CR(lp, x) SMC_SET_MAC_CSR( (lp), MAC_CR, x ) +#define SMC_GET_ADDRH(lp, x) SMC_GET_MAC_CSR( (lp), ADDRH, x ) +#define SMC_SET_ADDRH(lp, x) SMC_SET_MAC_CSR( (lp), ADDRH, x ) +#define SMC_GET_ADDRL(lp, x) SMC_GET_MAC_CSR( (lp), ADDRL, x ) +#define SMC_SET_ADDRL(lp, x) SMC_SET_MAC_CSR( (lp), ADDRL, x ) +#define SMC_GET_HASHH(lp, x) SMC_GET_MAC_CSR( (lp), HASHH, x ) +#define SMC_SET_HASHH(lp, x) SMC_SET_MAC_CSR( (lp), HASHH, x ) +#define SMC_GET_HASHL(lp, x) SMC_GET_MAC_CSR( (lp), HASHL, x ) +#define SMC_SET_HASHL(lp, x) SMC_SET_MAC_CSR( (lp), HASHL, x ) +#define SMC_GET_MII_ACC(lp, x) SMC_GET_MAC_CSR( (lp), MII_ACC, x ) +#define SMC_SET_MII_ACC(lp, x) SMC_SET_MAC_CSR( (lp), MII_ACC, x ) +#define SMC_GET_MII_DATA(lp, x) SMC_GET_MAC_CSR( (lp), MII_DATA, x ) +#define SMC_SET_MII_DATA(lp, x) SMC_SET_MAC_CSR( (lp), MII_DATA, x ) +#define SMC_GET_FLOW(lp, x) SMC_GET_MAC_CSR( (lp), FLOW, x ) +#define SMC_SET_FLOW(lp, x) SMC_SET_MAC_CSR( (lp), FLOW, x ) +#define SMC_GET_VLAN1(lp, x) SMC_GET_MAC_CSR( (lp), VLAN1, x ) +#define SMC_SET_VLAN1(lp, x) SMC_SET_MAC_CSR( (lp), VLAN1, x ) +#define SMC_GET_VLAN2(lp, x) SMC_GET_MAC_CSR( (lp), VLAN2, x ) +#define SMC_SET_VLAN2(lp, x) SMC_SET_MAC_CSR( (lp), VLAN2, x ) +#define SMC_SET_WUFF(lp, x) SMC_SET_MAC_CSR( (lp), WUFF, x ) +#define SMC_GET_WUCSR(lp, x) SMC_GET_MAC_CSR( (lp), WUCSR, x ) +#define SMC_SET_WUCSR(lp, x) SMC_SET_MAC_CSR( (lp), WUCSR, x ) + +/* PHY register read/write macros */ +#define SMC_GET_MII(lp,a,phy,v) \ + do { \ + u32 __v; \ + do { \ + SMC_GET_MII_ACC((lp), __v); \ + } while ( __v & MII_ACC_MII_BUSY_ ); \ + SMC_SET_MII_ACC( (lp), ((phy)<<11) | ((a)<<6) | \ + MII_ACC_MII_BUSY_); \ + do { \ + SMC_GET_MII_ACC( (lp), __v); \ + } while ( __v & MII_ACC_MII_BUSY_ ); \ + SMC_GET_MII_DATA((lp), v); \ + } while (0) +#define SMC_SET_MII(lp,a,phy,v) \ + do { \ + u32 __v; \ + do { \ + SMC_GET_MII_ACC((lp), __v); \ + } while ( __v & MII_ACC_MII_BUSY_ ); \ + SMC_SET_MII_DATA((lp), v); \ + SMC_SET_MII_ACC( (lp), ((phy)<<11) | ((a)<<6) | \ + MII_ACC_MII_BUSY_ | \ + MII_ACC_MII_WRITE_ ); \ + do { \ + SMC_GET_MII_ACC((lp), __v); \ + } while ( __v & MII_ACC_MII_BUSY_ ); \ + } while (0) +#define SMC_GET_PHY_BMCR(lp,phy,x) SMC_GET_MII( (lp), MII_BMCR, phy, x ) +#define SMC_SET_PHY_BMCR(lp,phy,x) SMC_SET_MII( (lp), MII_BMCR, phy, x ) +#define SMC_GET_PHY_BMSR(lp,phy,x) SMC_GET_MII( (lp), MII_BMSR, phy, x ) +#define SMC_GET_PHY_ID1(lp,phy,x) SMC_GET_MII( (lp), MII_PHYSID1, phy, x ) +#define SMC_GET_PHY_ID2(lp,phy,x) SMC_GET_MII( (lp), MII_PHYSID2, phy, x ) +#define SMC_GET_PHY_MII_ADV(lp,phy,x) SMC_GET_MII( (lp), MII_ADVERTISE, phy, x ) +#define SMC_SET_PHY_MII_ADV(lp,phy,x) SMC_SET_MII( (lp), MII_ADVERTISE, phy, x ) +#define SMC_GET_PHY_MII_LPA(lp,phy,x) SMC_GET_MII( (lp), MII_LPA, phy, x ) +#define SMC_SET_PHY_MII_LPA(lp,phy,x) SMC_SET_MII( (lp), MII_LPA, phy, x ) +#define SMC_GET_PHY_CTRL_STS(lp,phy,x) SMC_GET_MII( (lp), PHY_MODE_CTRL_STS, phy, x ) +#define SMC_SET_PHY_CTRL_STS(lp,phy,x) SMC_SET_MII( (lp), PHY_MODE_CTRL_STS, phy, x ) +#define SMC_GET_PHY_INT_SRC(lp,phy,x) SMC_GET_MII( (lp), PHY_INT_SRC, phy, x ) +#define SMC_SET_PHY_INT_SRC(lp,phy,x) SMC_SET_MII( (lp), PHY_INT_SRC, phy, x ) +#define SMC_GET_PHY_INT_MASK(lp,phy,x) SMC_GET_MII( (lp), PHY_INT_MASK, phy, x ) +#define SMC_SET_PHY_INT_MASK(lp,phy,x) SMC_SET_MII( (lp), PHY_INT_MASK, phy, x ) +#define SMC_GET_PHY_SPECIAL(lp,phy,x) SMC_GET_MII( (lp), PHY_SPECIAL, phy, x ) + + + +/* Misc read/write macros */ + +#ifndef SMC_GET_MAC_ADDR +#define SMC_GET_MAC_ADDR(lp, addr) \ + do { \ + unsigned int __v; \ + \ + SMC_GET_MAC_CSR((lp), ADDRL, __v); \ + addr[0] = __v; addr[1] = __v >> 8; \ + addr[2] = __v >> 16; addr[3] = __v >> 24; \ + SMC_GET_MAC_CSR((lp), ADDRH, __v); \ + addr[4] = __v; addr[5] = __v >> 8; \ + } while (0) +#endif + +#define SMC_SET_MAC_ADDR(lp, addr) \ + do { \ + SMC_SET_MAC_CSR((lp), ADDRL, \ + addr[0] | \ + (addr[1] << 8) | \ + (addr[2] << 16) | \ + (addr[3] << 24)); \ + SMC_SET_MAC_CSR((lp), ADDRH, addr[4]|(addr[5] << 8));\ + } while (0) + + +#define SMC_WRITE_EEPROM_CMD(lp, cmd, addr) \ + do { \ + while (SMC_GET_E2P_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ + SMC_SET_MAC_CMD((lp), MAC_CSR_CMD_R_NOT_W_ | a ); \ + while (SMC_GET_MAC_CMD((lp)) & MAC_CSR_CMD_CSR_BUSY_); \ + } while (0) + +#endif /* _SMC911X_H_ */ diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c new file mode 100644 index 000000000..67d9fdeed --- /dev/null +++ b/drivers/net/ethernet/smsc/smc9194.c @@ -0,0 +1,1559 @@ +/*------------------------------------------------------------------------ + . smc9194.c + . This is a driver for SMC's 9000 series of Ethernet cards. + . + . Copyright (C) 1996 by Erik Stahlman + . This software may be used and distributed according to the terms + . of the GNU General Public License, incorporated herein by reference. + . + . "Features" of the SMC chip: + . 4608 byte packet memory. ( for the 91C92. Others have more ) + . EEPROM for configuration + . AUI/TP selection ( mine has 10Base2/10BaseT select ) + . + . Arguments: + . io = for the base address + . irq = for the IRQ + . ifport = 0 for autodetect, 1 for TP, 2 for AUI ( or 10base2 ) + . + . author: + . Erik Stahlman ( erik@vt.edu ) + . contributors: + . Arnaldo Carvalho de Melo + . + . Hardware multicast code from Peter Cammaert ( pc@denkart.be ) + . + . Sources: + . o SMC databook + . o skeleton.c by Donald Becker ( becker@scyld.com ) + . o ( a LOT of advice from Becker as well ) + . + . History: + . 12/07/95 Erik Stahlman written, got receive/xmit handled + . 01/03/96 Erik Stahlman worked out some bugs, actually usable!!! :-) + . 01/06/96 Erik Stahlman cleaned up some, better testing, etc + . 01/29/96 Erik Stahlman fixed autoirq, added multicast + . 02/01/96 Erik Stahlman 1. disabled all interrupts in smc_reset + . 2. got rid of post-decrementing bug -- UGH. + . 02/13/96 Erik Stahlman Tried to fix autoirq failure. Added more + . descriptive error messages. + . 02/15/96 Erik Stahlman Fixed typo that caused detection failure + . 02/23/96 Erik Stahlman Modified it to fit into kernel tree + . Added support to change hardware address + . Cleared stats on opens + . 02/26/96 Erik Stahlman Trial support for Kernel 1.2.13 + . Kludge for automatic IRQ detection + . 03/04/96 Erik Stahlman Fixed kernel 1.3.70 + + . Fixed bug reported by Gardner Buchanan in + . smc_enable, with outw instead of outb + . 03/06/96 Erik Stahlman Added hardware multicast from Peter Cammaert + . 04/14/00 Heiko Pruessing (SMA Regelsysteme) Fixed bug in chip memory + . allocation + . 08/20/00 Arnaldo Melo fix kfree(skb) in smc_hardware_send_packet + . 12/15/00 Christian Jullien fix "Warning: kfree_skb on hard IRQ" + . 11/08/01 Matt Domsch Use common crc32 function + ----------------------------------------------------------------------------*/ + +static const char version[] = + "smc9194.c:v0.14 12/15/00 by Erik Stahlman (erik@vt.edu)"; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "smc9194.h" + +#define DRV_NAME "smc9194" + +/*------------------------------------------------------------------------ + . + . Configuration options, for the experienced user to change. + . + -------------------------------------------------------------------------*/ + +/* + . Do you want to use 32 bit xfers? This should work on all chips, as + . the chipset is designed to accommodate them. +*/ +#ifdef __sh__ +#undef USE_32_BIT +#else +#define USE_32_BIT 1 +#endif + +/* + .the SMC9194 can be at any of the following port addresses. To change, + .for a slightly different card, you can add it to the array. Keep in + .mind that the array must end in zero. +*/ + +struct devlist { + unsigned int port; + unsigned int irq; +}; + +static struct devlist smc_devlist[] __initdata = { + {.port = 0x200, .irq = 0}, + {.port = 0x220, .irq = 0}, + {.port = 0x240, .irq = 0}, + {.port = 0x260, .irq = 0}, + {.port = 0x280, .irq = 0}, + {.port = 0x2A0, .irq = 0}, + {.port = 0x2C0, .irq = 0}, + {.port = 0x2E0, .irq = 0}, + {.port = 0x300, .irq = 0}, + {.port = 0x320, .irq = 0}, + {.port = 0x340, .irq = 0}, + {.port = 0x360, .irq = 0}, + {.port = 0x380, .irq = 0}, + {.port = 0x3A0, .irq = 0}, + {.port = 0x3C0, .irq = 0}, + {.port = 0x3E0, .irq = 0}, + {.port = 0, .irq = 0}, +}; +/* + . Wait time for memory to be free. This probably shouldn't be + . tuned that much, as waiting for this means nothing else happens + . in the system +*/ +#define MEMORY_WAIT_TIME 16 + +/* + . DEBUGGING LEVELS + . + . 0 for normal operation + . 1 for slightly more details + . >2 for various levels of increasingly useless information + . 2 for interrupt tracking, status flags + . 3 for packet dumps, etc. +*/ +#define SMC_DEBUG 0 + +#if (SMC_DEBUG > 2 ) +#define PRINTK3(x) printk x +#else +#define PRINTK3(x) +#endif + +#if SMC_DEBUG > 1 +#define PRINTK2(x) printk x +#else +#define PRINTK2(x) +#endif + +#ifdef SMC_DEBUG +#define PRINTK(x) printk x +#else +#define PRINTK(x) +#endif + + +/*------------------------------------------------------------------------ + . + . The internal workings of the driver. If you are changing anything + . here with the SMC stuff, you should have the datasheet and known + . what you are doing. + . + -------------------------------------------------------------------------*/ +#define CARDNAME "SMC9194" + + +/* store this information for the driver.. */ +struct smc_local { + /* + If I have to wait until memory is available to send + a packet, I will store the skbuff here, until I get the + desired memory. Then, I'll send it out and free it. + */ + struct sk_buff * saved_skb; + + /* + . This keeps track of how many packets that I have + . sent out. When an TX_EMPTY interrupt comes, I know + . that all of these have been sent. + */ + int packets_waiting; +}; + + +/*----------------------------------------------------------------- + . + . The driver can be entered at any of the following entry points. + . + .------------------------------------------------------------------ */ + +/* + . This is called by register_netdev(). It is responsible for + . checking the portlist for the SMC9000 series chipset. If it finds + . one, then it will initialize the device, find the hardware information, + . and sets up the appropriate device parameters. + . NOTE: Interrupts are *OFF* when this procedure is called. + . + . NB:This shouldn't be static since it is referred to externally. +*/ +struct net_device *smc_init(int unit); + +/* + . The kernel calls this function when someone wants to use the device, + . typically 'ifconfig ethX up'. +*/ +static int smc_open(struct net_device *dev); + +/* + . Our watchdog timed out. Called by the networking layer +*/ +static void smc_timeout(struct net_device *dev); + +/* + . This is called by the kernel in response to 'ifconfig ethX down'. It + . is responsible for cleaning up everything that the open routine + . does, and maybe putting the card into a powerdown state. +*/ +static int smc_close(struct net_device *dev); + +/* + . Finally, a call to set promiscuous mode ( for TCPDUMP and related + . programs ) and multicast modes. +*/ +static void smc_set_multicast_list(struct net_device *dev); + + +/*--------------------------------------------------------------- + . + . Interrupt level calls.. + . + ----------------------------------------------------------------*/ + +/* + . Handles the actual interrupt +*/ +static irqreturn_t smc_interrupt(int irq, void *); +/* + . This is a separate procedure to handle the receipt of a packet, to + . leave the interrupt code looking slightly cleaner +*/ +static inline void smc_rcv( struct net_device *dev ); +/* + . This handles a TX interrupt, which is only called when an error + . relating to a packet is sent. +*/ +static inline void smc_tx( struct net_device * dev ); + +/* + ------------------------------------------------------------ + . + . Internal routines + . + ------------------------------------------------------------ +*/ + +/* + . Test if a given location contains a chip, trying to cause as + . little damage as possible if it's not a SMC chip. +*/ +static int smc_probe(struct net_device *dev, int ioaddr); + +/* + . A rather simple routine to print out a packet for debugging purposes. +*/ +#if SMC_DEBUG > 2 +static void print_packet( byte *, int ); +#endif + +#define tx_done(dev) 1 + +/* this is called to actually send the packet to the chip */ +static void smc_hardware_send_packet( struct net_device * dev ); + +/* Since I am not sure if I will have enough room in the chip's ram + . to store the packet, I call this routine, which either sends it + . now, or generates an interrupt when the card is ready for the + . packet */ +static netdev_tx_t smc_wait_to_send_packet( struct sk_buff * skb, + struct net_device *dev ); + +/* this does a soft reset on the device */ +static void smc_reset( int ioaddr ); + +/* Enable Interrupts, Receive, and Transmit */ +static void smc_enable( int ioaddr ); + +/* this puts the device in an inactive state */ +static void smc_shutdown( int ioaddr ); + +/* This routine will find the IRQ of the driver if one is not + . specified in the input to the device. */ +static int smc_findirq( int ioaddr ); + +/* + . Function: smc_reset( int ioaddr ) + . Purpose: + . This sets the SMC91xx chip to its normal state, hopefully from whatever + . mess that any other DOS driver has put it in. + . + . Maybe I should reset more registers to defaults in here? SOFTRESET should + . do that for me. + . + . Method: + . 1. send a SOFT RESET + . 2. wait for it to finish + . 3. enable autorelease mode + . 4. reset the memory management unit + . 5. clear all interrupts + . +*/ +static void smc_reset( int ioaddr ) +{ + /* This resets the registers mostly to defaults, but doesn't + affect EEPROM. That seems unnecessary */ + SMC_SELECT_BANK( 0 ); + outw( RCR_SOFTRESET, ioaddr + RCR ); + + /* this should pause enough for the chip to be happy */ + SMC_DELAY( ); + + /* Set the transmit and receive configuration registers to + default values */ + outw( RCR_CLEAR, ioaddr + RCR ); + outw( TCR_CLEAR, ioaddr + TCR ); + + /* set the control register to automatically + release successfully transmitted packets, to make the best + use out of our limited memory */ + SMC_SELECT_BANK( 1 ); + outw( inw( ioaddr + CONTROL ) | CTL_AUTO_RELEASE , ioaddr + CONTROL ); + + /* Reset the MMU */ + SMC_SELECT_BANK( 2 ); + outw( MC_RESET, ioaddr + MMU_CMD ); + + /* Note: It doesn't seem that waiting for the MMU busy is needed here, + but this is a place where future chipsets _COULD_ break. Be wary + of issuing another MMU command right after this */ + + outb( 0, ioaddr + INT_MASK ); +} + +/* + . Function: smc_enable + . Purpose: let the chip talk to the outside work + . Method: + . 1. Enable the transmitter + . 2. Enable the receiver + . 3. Enable interrupts +*/ +static void smc_enable( int ioaddr ) +{ + SMC_SELECT_BANK( 0 ); + /* see the header file for options in TCR/RCR NORMAL*/ + outw( TCR_NORMAL, ioaddr + TCR ); + outw( RCR_NORMAL, ioaddr + RCR ); + + /* now, enable interrupts */ + SMC_SELECT_BANK( 2 ); + outb( SMC_INTERRUPT_MASK, ioaddr + INT_MASK ); +} + +/* + . Function: smc_shutdown + . Purpose: closes down the SMC91xxx chip. + . Method: + . 1. zero the interrupt mask + . 2. clear the enable receive flag + . 3. clear the enable xmit flags + . + . TODO: + . (1) maybe utilize power down mode. + . Why not yet? Because while the chip will go into power down mode, + . the manual says that it will wake up in response to any I/O requests + . in the register space. Empirical results do not show this working. +*/ +static void smc_shutdown( int ioaddr ) +{ + /* no more interrupts for me */ + SMC_SELECT_BANK( 2 ); + outb( 0, ioaddr + INT_MASK ); + + /* and tell the card to stay away from that nasty outside world */ + SMC_SELECT_BANK( 0 ); + outb( RCR_CLEAR, ioaddr + RCR ); + outb( TCR_CLEAR, ioaddr + TCR ); +#if 0 + /* finally, shut the chip down */ + SMC_SELECT_BANK( 1 ); + outw( inw( ioaddr + CONTROL ), CTL_POWERDOWN, ioaddr + CONTROL ); +#endif +} + + +/* + . Function: smc_setmulticast( int ioaddr, struct net_device *dev ) + . Purpose: + . This sets the internal hardware table to filter out unwanted multicast + . packets before they take up memory. + . + . The SMC chip uses a hash table where the high 6 bits of the CRC of + . address are the offset into the table. If that bit is 1, then the + . multicast packet is accepted. Otherwise, it's dropped silently. + . + . To use the 6 bits as an offset into the table, the high 3 bits are the + . number of the 8 bit register, while the low 3 bits are the bit within + . that register. + . + . This routine is based very heavily on the one provided by Peter Cammaert. +*/ + + +static void smc_setmulticast(int ioaddr, struct net_device *dev) +{ + int i; + unsigned char multicast_table[ 8 ]; + struct netdev_hw_addr *ha; + /* table for flipping the order of 3 bits */ + unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 }; + + /* start with a table of all zeros: reject all */ + memset( multicast_table, 0, sizeof( multicast_table ) ); + + netdev_for_each_mc_addr(ha, dev) { + int position; + + /* only use the low order bits */ + position = ether_crc_le(6, ha->addr) & 0x3f; + + /* do some messy swapping to put the bit in the right spot */ + multicast_table[invert3[position&7]] |= + (1<>3)&7]); + + } + /* now, the table can be loaded into the chipset */ + SMC_SELECT_BANK( 3 ); + + for ( i = 0; i < 8 ; i++ ) { + outb( multicast_table[i], ioaddr + MULTICAST1 + i ); + } +} + +/* + . Function: smc_wait_to_send_packet( struct sk_buff * skb, struct net_device * ) + . Purpose: + . Attempt to allocate memory for a packet, if chip-memory is not + . available, then tell the card to generate an interrupt when it + . is available. + . + . Algorithm: + . + . o if the saved_skb is not currently null, then drop this packet + . on the floor. This should never happen, because of TBUSY. + . o if the saved_skb is null, then replace it with the current packet, + . o See if I can sending it now. + . o (NO): Enable interrupts and let the interrupt handler deal with it. + . o (YES):Send it now. +*/ +static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb, + struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + word length; + unsigned short numPages; + word time_out; + + netif_stop_queue(dev); + /* Well, I want to send the packet.. but I don't know + if I can send it right now... */ + + if ( lp->saved_skb) { + /* THIS SHOULD NEVER HAPPEN. */ + dev->stats.tx_aborted_errors++; + printk(CARDNAME": Bad Craziness - sent packet while busy.\n" ); + return NETDEV_TX_BUSY; + } + lp->saved_skb = skb; + + length = skb->len; + + if (length < ETH_ZLEN) { + if (skb_padto(skb, ETH_ZLEN)) { + netif_wake_queue(dev); + return NETDEV_TX_OK; + } + length = ETH_ZLEN; + } + + /* + ** The MMU wants the number of pages to be the number of 256 bytes + ** 'pages', minus 1 ( since a packet can't ever have 0 pages :) ) + ** + ** Pkt size for allocating is data length +6 (for additional status words, + ** length and ctl!) If odd size last byte is included in this header. + */ + numPages = ((length & 0xfffe) + 6) / 256; + + if (numPages > 7 ) { + printk(CARDNAME": Far too big packet error.\n"); + /* freeing the packet is a good thing here... but should + . any packets of this size get down here? */ + dev_kfree_skb (skb); + lp->saved_skb = NULL; + /* this IS an error, but, i don't want the skb saved */ + netif_wake_queue(dev); + return NETDEV_TX_OK; + } + /* either way, a packet is waiting now */ + lp->packets_waiting++; + + /* now, try to allocate the memory */ + SMC_SELECT_BANK( 2 ); + outw( MC_ALLOC | numPages, ioaddr + MMU_CMD ); + /* + . Performance Hack + . + . wait a short amount of time.. if I can send a packet now, I send + . it now. Otherwise, I enable an interrupt and wait for one to be + . available. + . + . I could have handled this a slightly different way, by checking to + . see if any memory was available in the FREE MEMORY register. However, + . either way, I need to generate an allocation, and the allocation works + . no matter what, so I saw no point in checking free memory. + */ + time_out = MEMORY_WAIT_TIME; + do { + word status; + + status = inb( ioaddr + INTERRUPT ); + if ( status & IM_ALLOC_INT ) { + /* acknowledge the interrupt */ + outb( IM_ALLOC_INT, ioaddr + INTERRUPT ); + break; + } + } while ( -- time_out ); + + if ( !time_out ) { + /* oh well, wait until the chip finds memory later */ + SMC_ENABLE_INT( IM_ALLOC_INT ); + PRINTK2((CARDNAME": memory allocation deferred.\n")); + /* it's deferred, but I'll handle it later */ + return NETDEV_TX_OK; + } + /* or YES! I can send the packet now.. */ + smc_hardware_send_packet(dev); + netif_wake_queue(dev); + return NETDEV_TX_OK; +} + +/* + . Function: smc_hardware_send_packet(struct net_device * ) + . Purpose: + . This sends the actual packet to the SMC9xxx chip. + . + . Algorithm: + . First, see if a saved_skb is available. + . ( this should NOT be called if there is no 'saved_skb' + . Now, find the packet number that the chip allocated + . Point the data pointers at it in memory + . Set the length word in the chip's memory + . Dump the packet to chip memory + . Check if a last byte is needed ( odd length packet ) + . if so, set the control flag right + . Tell the card to send it + . Enable the transmit interrupt, so I know if it failed + . Free the kernel data if I actually sent it. +*/ +static void smc_hardware_send_packet( struct net_device * dev ) +{ + struct smc_local *lp = netdev_priv(dev); + byte packet_no; + struct sk_buff * skb = lp->saved_skb; + word length; + unsigned int ioaddr; + byte * buf; + + ioaddr = dev->base_addr; + + if ( !skb ) { + PRINTK((CARDNAME": In XMIT with no packet to send\n")); + return; + } + length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; + buf = skb->data; + + /* If I get here, I _know_ there is a packet slot waiting for me */ + packet_no = inb( ioaddr + PNR_ARR + 1 ); + if ( packet_no & 0x80 ) { + /* or isn't there? BAD CHIP! */ + netdev_dbg(dev, CARDNAME": Memory allocation failed.\n"); + dev_kfree_skb_any(skb); + lp->saved_skb = NULL; + netif_wake_queue(dev); + return; + } + + /* we have a packet address, so tell the card to use it */ + outb( packet_no, ioaddr + PNR_ARR ); + + /* point to the beginning of the packet */ + outw( PTR_AUTOINC , ioaddr + POINTER ); + + PRINTK3((CARDNAME": Trying to xmit packet of length %x\n", length)); +#if SMC_DEBUG > 2 + print_packet( buf, length ); +#endif + + /* send the packet length ( +6 for status, length and ctl byte ) + and the status word ( set to zeros ) */ +#ifdef USE_32_BIT + outl( (length +6 ) << 16 , ioaddr + DATA_1 ); +#else + outw( 0, ioaddr + DATA_1 ); + /* send the packet length ( +6 for status words, length, and ctl*/ + outb( (length+6) & 0xFF,ioaddr + DATA_1 ); + outb( (length+6) >> 8 , ioaddr + DATA_1 ); +#endif + + /* send the actual data + . I _think_ it's faster to send the longs first, and then + . mop up by sending the last word. It depends heavily + . on alignment, at least on the 486. Maybe it would be + . a good idea to check which is optimal? But that could take + . almost as much time as is saved? + */ +#ifdef USE_32_BIT + if ( length & 0x2 ) { + outsl(ioaddr + DATA_1, buf, length >> 2 ); + outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1); + } + else + outsl(ioaddr + DATA_1, buf, length >> 2 ); +#else + outsw(ioaddr + DATA_1 , buf, (length ) >> 1); +#endif + /* Send the last byte, if there is one. */ + + if ( (length & 1) == 0 ) { + outw( 0, ioaddr + DATA_1 ); + } else { + outb( buf[length -1 ], ioaddr + DATA_1 ); + outb( 0x20, ioaddr + DATA_1); + } + + /* enable the interrupts */ + SMC_ENABLE_INT( (IM_TX_INT | IM_TX_EMPTY_INT) ); + + /* and let the chipset deal with it */ + outw( MC_ENQUEUE , ioaddr + MMU_CMD ); + + PRINTK2((CARDNAME": Sent packet of length %d\n", length)); + + lp->saved_skb = NULL; + dev_kfree_skb_any (skb); + + dev->trans_start = jiffies; + + /* we can send another packet */ + netif_wake_queue(dev); +} + +/*------------------------------------------------------------------------- + | + | smc_init(int unit) + | Input parameters: + | dev->base_addr == 0, try to find all possible locations + | dev->base_addr == 1, return failure code + | dev->base_addr == 2, always allocate space, and return success + | dev->base_addr == this is the address to check + | + | Output: + | pointer to net_device or ERR_PTR(error) + | + --------------------------------------------------------------------------- +*/ +static int io; +static int irq; +static int ifport; + +struct net_device * __init smc_init(int unit) +{ + struct net_device *dev = alloc_etherdev(sizeof(struct smc_local)); + struct devlist *smcdev = smc_devlist; + int err = 0; + + if (!dev) + return ERR_PTR(-ENODEV); + + if (unit >= 0) { + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + io = dev->base_addr; + irq = dev->irq; + } + + if (io > 0x1ff) { /* Check a single specified location. */ + err = smc_probe(dev, io); + } else if (io != 0) { /* Don't probe at all. */ + err = -ENXIO; + } else { + for (;smcdev->port; smcdev++) { + if (smc_probe(dev, smcdev->port) == 0) + break; + } + if (!smcdev->port) + err = -ENODEV; + } + if (err) + goto out; + err = register_netdev(dev); + if (err) + goto out1; + return dev; +out1: + free_irq(dev->irq, dev); + release_region(dev->base_addr, SMC_IO_EXTENT); +out: + free_netdev(dev); + return ERR_PTR(err); +} + +/*---------------------------------------------------------------------- + . smc_findirq + . + . This routine has a simple purpose -- make the SMC chip generate an + . interrupt, so an auto-detect routine can detect it, and find the IRQ, + ------------------------------------------------------------------------ +*/ +static int __init smc_findirq(int ioaddr) +{ +#ifndef NO_AUTOPROBE + int timeout = 20; + unsigned long cookie; + + + cookie = probe_irq_on(); + + /* + * What I try to do here is trigger an ALLOC_INT. This is done + * by allocating a small chunk of memory, which will give an interrupt + * when done. + */ + + + SMC_SELECT_BANK(2); + /* enable ALLOCation interrupts ONLY */ + outb( IM_ALLOC_INT, ioaddr + INT_MASK ); + + /* + . Allocate 512 bytes of memory. Note that the chip was just + . reset so all the memory is available + */ + outw( MC_ALLOC | 1, ioaddr + MMU_CMD ); + + /* + . Wait until positive that the interrupt has been generated + */ + while ( timeout ) { + byte int_status; + + int_status = inb( ioaddr + INTERRUPT ); + + if ( int_status & IM_ALLOC_INT ) + break; /* got the interrupt */ + timeout--; + } + /* there is really nothing that I can do here if timeout fails, + as probe_irq_off will return a 0 anyway, which is what I + want in this case. Plus, the clean up is needed in both + cases. */ + + /* DELAY HERE! + On a fast machine, the status might change before the interrupt + is given to the processor. This means that the interrupt was + never detected, and probe_irq_off fails to report anything. + This should fix probe_irq_* problems. + */ + SMC_DELAY(); + SMC_DELAY(); + + /* and disable all interrupts again */ + outb( 0, ioaddr + INT_MASK ); + + /* and return what I found */ + return probe_irq_off(cookie); +#else /* NO_AUTOPROBE */ + struct devlist *smcdev; + for (smcdev = smc_devlist; smcdev->port; smcdev++) { + if (smcdev->port == ioaddr) + return smcdev->irq; + } + return 0; +#endif +} + +static const struct net_device_ops smc_netdev_ops = { + .ndo_open = smc_open, + .ndo_stop = smc_close, + .ndo_start_xmit = smc_wait_to_send_packet, + .ndo_tx_timeout = smc_timeout, + .ndo_set_rx_mode = smc_set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +/*---------------------------------------------------------------------- + . Function: smc_probe( int ioaddr ) + . + . Purpose: + . Tests to see if a given ioaddr points to an SMC9xxx chip. + . Returns a 0 on success + . + . Algorithm: + . (1) see if the high byte of BANK_SELECT is 0x33 + . (2) compare the ioaddr with the base register's address + . (3) see if I recognize the chip ID in the appropriate register + . + .--------------------------------------------------------------------- + */ + +/*--------------------------------------------------------------- + . Here I do typical initialization tasks. + . + . o Initialize the structure if needed + . o print out my vanity message if not done so already + . o print out what type of hardware is detected + . o print out the ethernet address + . o find the IRQ + . o set up my private data + . o configure the dev structure with my subroutines + . o actually GRAB the irq. + . o GRAB the region + .----------------------------------------------------------------- +*/ +static int __init smc_probe(struct net_device *dev, int ioaddr) +{ + int i, memory, retval; + unsigned int bank; + + const char *version_string; + const char *if_string; + + /* registers */ + word revision_register; + word base_address_register; + word configuration_register; + word memory_info_register; + word memory_cfg_register; + + /* Grab the region so that no one else tries to probe our ioports. */ + if (!request_region(ioaddr, SMC_IO_EXTENT, DRV_NAME)) + return -EBUSY; + + dev->irq = irq; + dev->if_port = ifport; + + /* First, see if the high byte is 0x33 */ + bank = inw( ioaddr + BANK_SELECT ); + if ( (bank & 0xFF00) != 0x3300 ) { + retval = -ENODEV; + goto err_out; + } + /* The above MIGHT indicate a device, but I need to write to further + test this. */ + outw( 0x0, ioaddr + BANK_SELECT ); + bank = inw( ioaddr + BANK_SELECT ); + if ( (bank & 0xFF00 ) != 0x3300 ) { + retval = -ENODEV; + goto err_out; + } + /* well, we've already written once, so hopefully another time won't + hurt. This time, I need to switch the bank register to bank 1, + so I can access the base address register */ + SMC_SELECT_BANK(1); + base_address_register = inw( ioaddr + BASE ); + if ( ioaddr != ( base_address_register >> 3 & 0x3E0 ) ) { + printk(CARDNAME ": IOADDR %x doesn't match configuration (%x). " + "Probably not a SMC chip\n", + ioaddr, base_address_register >> 3 & 0x3E0 ); + /* well, the base address register didn't match. Must not have + been a SMC chip after all. */ + retval = -ENODEV; + goto err_out; + } + + /* check if the revision register is something that I recognize. + These might need to be added to later, as future revisions + could be added. */ + SMC_SELECT_BANK(3); + revision_register = inw( ioaddr + REVISION ); + if ( !chip_ids[ ( revision_register >> 4 ) & 0xF ] ) { + /* I don't recognize this chip, so... */ + printk(CARDNAME ": IO %x: Unrecognized revision register:" + " %x, Contact author.\n", ioaddr, revision_register); + + retval = -ENODEV; + goto err_out; + } + + /* at this point I'll assume that the chip is an SMC9xxx. + It might be prudent to check a listing of MAC addresses + against the hardware address, or do some other tests. */ + + pr_info_once("%s\n", version); + + /* fill in some of the fields */ + dev->base_addr = ioaddr; + + /* + . Get the MAC address ( bank 1, regs 4 - 9 ) + */ + SMC_SELECT_BANK( 1 ); + for ( i = 0; i < 6; i += 2 ) { + word address; + + address = inw( ioaddr + ADDR0 + i ); + dev->dev_addr[ i + 1] = address >> 8; + dev->dev_addr[ i ] = address & 0xFF; + } + + /* get the memory information */ + + SMC_SELECT_BANK( 0 ); + memory_info_register = inw( ioaddr + MIR ); + memory_cfg_register = inw( ioaddr + MCR ); + memory = ( memory_cfg_register >> 9 ) & 0x7; /* multiplier */ + memory *= 256 * ( memory_info_register & 0xFF ); + + /* + Now, I want to find out more about the chip. This is sort of + redundant, but it's cleaner to have it in both, rather than having + one VERY long probe procedure. + */ + SMC_SELECT_BANK(3); + revision_register = inw( ioaddr + REVISION ); + version_string = chip_ids[ ( revision_register >> 4 ) & 0xF ]; + if ( !version_string ) { + /* I shouldn't get here because this call was done before.... */ + retval = -ENODEV; + goto err_out; + } + + /* is it using AUI or 10BaseT ? */ + if ( dev->if_port == 0 ) { + SMC_SELECT_BANK(1); + configuration_register = inw( ioaddr + CONFIG ); + if ( configuration_register & CFG_AUI_SELECT ) + dev->if_port = 2; + else + dev->if_port = 1; + } + if_string = interfaces[ dev->if_port - 1 ]; + + /* now, reset the chip, and put it into a known state */ + smc_reset( ioaddr ); + + /* + . If dev->irq is 0, then the device has to be banged on to see + . what the IRQ is. + . + . This banging doesn't always detect the IRQ, for unknown reasons. + . a workaround is to reset the chip and try again. + . + . Interestingly, the DOS packet driver *SETS* the IRQ on the card to + . be what is requested on the command line. I don't do that, mostly + . because the card that I have uses a non-standard method of accessing + . the IRQs, and because this _should_ work in most configurations. + . + . Specifying an IRQ is done with the assumption that the user knows + . what (s)he is doing. No checking is done!!!! + . + */ + if ( dev->irq < 2 ) { + int trials; + + trials = 3; + while ( trials-- ) { + dev->irq = smc_findirq( ioaddr ); + if ( dev->irq ) + break; + /* kick the card and try again */ + smc_reset( ioaddr ); + } + } + if (dev->irq == 0 ) { + printk(CARDNAME": Couldn't autodetect your IRQ. Use irq=xx.\n"); + retval = -ENODEV; + goto err_out; + } + + /* now, print out the card info, in a short format.. */ + + netdev_info(dev, "%s(r:%d) at %#3x IRQ:%d INTF:%s MEM:%db ", + version_string, revision_register & 0xF, ioaddr, dev->irq, + if_string, memory); + /* + . Print the Ethernet address + */ + netdev_info(dev, "ADDR: %pM\n", dev->dev_addr); + + /* Grab the IRQ */ + retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev); + if (retval) { + netdev_warn(dev, "%s: unable to get IRQ %d (irqval=%d).\n", + DRV_NAME, dev->irq, retval); + goto err_out; + } + + dev->netdev_ops = &smc_netdev_ops; + dev->watchdog_timeo = HZ/20; + + return 0; + +err_out: + release_region(ioaddr, SMC_IO_EXTENT); + return retval; +} + +#if SMC_DEBUG > 2 +static void print_packet( byte * buf, int length ) +{ +#if 0 + int i; + int remainder; + int lines; + + pr_dbg("Packet of length %d\n", length); + lines = length / 16; + remainder = length % 16; + + for ( i = 0; i < lines ; i ++ ) { + int cur; + + printk(KERN_DEBUG); + for ( cur = 0; cur < 8; cur ++ ) { + byte a, b; + + a = *(buf ++ ); + b = *(buf ++ ); + pr_cont("%02x%02x ", a, b); + } + pr_cont("\n"); + } + printk(KERN_DEBUG); + for ( i = 0; i < remainder/2 ; i++ ) { + byte a, b; + + a = *(buf ++ ); + b = *(buf ++ ); + pr_cont("%02x%02x ", a, b); + } + pr_cont("\n"); +#endif +} +#endif + + +/* + * Open and Initialize the board + * + * Set up everything, reset the card, etc .. + * + */ +static int smc_open(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + + int i; /* used to set hw ethernet address */ + + /* clear out all the junk that was put here before... */ + memset(netdev_priv(dev), 0, sizeof(struct smc_local)); + + /* reset the hardware */ + + smc_reset( ioaddr ); + smc_enable( ioaddr ); + + /* Select which interface to use */ + + SMC_SELECT_BANK( 1 ); + if ( dev->if_port == 1 ) { + outw( inw( ioaddr + CONFIG ) & ~CFG_AUI_SELECT, + ioaddr + CONFIG ); + } + else if ( dev->if_port == 2 ) { + outw( inw( ioaddr + CONFIG ) | CFG_AUI_SELECT, + ioaddr + CONFIG ); + } + + /* + According to Becker, I have to set the hardware address + at this point, because the (l)user can set it with an + ioctl. Easily done... + */ + SMC_SELECT_BANK( 1 ); + for ( i = 0; i < 6; i += 2 ) { + word address; + + address = dev->dev_addr[ i + 1 ] << 8 ; + address |= dev->dev_addr[ i ]; + outw( address, ioaddr + ADDR0 + i ); + } + + netif_start_queue(dev); + return 0; +} + +/*-------------------------------------------------------- + . Called by the kernel to send a packet out into the void + . of the net. This routine is largely based on + . skeleton.c, from Becker. + .-------------------------------------------------------- +*/ + +static void smc_timeout(struct net_device *dev) +{ + /* If we get here, some higher level has decided we are broken. + There should really be a "kick me" function call instead. */ + netdev_warn(dev, CARDNAME": transmit timed out, %s?\n", + tx_done(dev) ? "IRQ conflict" : "network cable problem"); + /* "kick" the adaptor */ + smc_reset( dev->base_addr ); + smc_enable( dev->base_addr ); + dev->trans_start = jiffies; /* prevent tx timeout */ + /* clear anything saved */ + ((struct smc_local *)netdev_priv(dev))->saved_skb = NULL; + netif_wake_queue(dev); +} + +/*------------------------------------------------------------- + . + . smc_rcv - receive a packet from the card + . + . There is ( at least ) a packet waiting to be read from + . chip-memory. + . + . o Read the status + . o If an error, record it + . o otherwise, read in the packet + -------------------------------------------------------------- +*/ +static void smc_rcv(struct net_device *dev) +{ + int ioaddr = dev->base_addr; + int packet_number; + word status; + word packet_length; + + /* assume bank 2 */ + + packet_number = inw( ioaddr + FIFO_PORTS ); + + if ( packet_number & FP_RXEMPTY ) { + /* we got called , but nothing was on the FIFO */ + PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO.\n")); + /* don't need to restore anything */ + return; + } + + /* start reading from the start of the packet */ + outw( PTR_READ | PTR_RCV | PTR_AUTOINC, ioaddr + POINTER ); + + /* First two words are status and packet_length */ + status = inw( ioaddr + DATA_1 ); + packet_length = inw( ioaddr + DATA_1 ); + + packet_length &= 0x07ff; /* mask off top bits */ + + PRINTK2(("RCV: STATUS %4x LENGTH %4x\n", status, packet_length )); + /* + . the packet length contains 3 extra words : + . status, length, and an extra word with an odd byte . + */ + packet_length -= 6; + + if ( !(status & RS_ERRORS ) ){ + /* do stuff to make a new packet */ + struct sk_buff * skb; + byte * data; + + /* read one extra byte */ + if ( status & RS_ODDFRAME ) + packet_length++; + + /* set multicast stats */ + if ( status & RS_MULTICAST ) + dev->stats.multicast++; + + skb = netdev_alloc_skb(dev, packet_length + 5); + if ( skb == NULL ) { + dev->stats.rx_dropped++; + goto done; + } + + /* + ! This should work without alignment, but it could be + ! in the worse case + */ + + skb_reserve( skb, 2 ); /* 16 bit alignment */ + + data = skb_put( skb, packet_length); + +#ifdef USE_32_BIT + /* QUESTION: Like in the TX routine, do I want + to send the DWORDs or the bytes first, or some + mixture. A mixture might improve already slow PIO + performance */ + PRINTK3((" Reading %d dwords (and %d bytes)\n", + packet_length >> 2, packet_length & 3 )); + insl(ioaddr + DATA_1 , data, packet_length >> 2 ); + /* read the left over bytes */ + insb( ioaddr + DATA_1, data + (packet_length & 0xFFFFFC), + packet_length & 0x3 ); +#else + PRINTK3((" Reading %d words and %d byte(s)\n", + (packet_length >> 1 ), packet_length & 1 )); + insw(ioaddr + DATA_1 , data, packet_length >> 1); + if ( packet_length & 1 ) { + data += packet_length & ~1; + *(data++) = inb( ioaddr + DATA_1 ); + } +#endif +#if SMC_DEBUG > 2 + print_packet( data, packet_length ); +#endif + + skb->protocol = eth_type_trans(skb, dev ); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += packet_length; + } else { + /* error ... */ + dev->stats.rx_errors++; + + if ( status & RS_ALGNERR ) dev->stats.rx_frame_errors++; + if ( status & (RS_TOOSHORT | RS_TOOLONG ) ) + dev->stats.rx_length_errors++; + if ( status & RS_BADCRC) dev->stats.rx_crc_errors++; + } + +done: + /* error or good, tell the card to get rid of this packet */ + outw( MC_RELEASE, ioaddr + MMU_CMD ); +} + + +/************************************************************************* + . smc_tx + . + . Purpose: Handle a transmit error message. This will only be called + . when an error, because of the AUTO_RELEASE mode. + . + . Algorithm: + . Save pointer and packet no + . Get the packet no from the top of the queue + . check if it's valid ( if not, is this an error??? ) + . read the status word + . record the error + . ( resend? Not really, since we don't want old packets around ) + . Restore saved values + ************************************************************************/ +static void smc_tx( struct net_device * dev ) +{ + int ioaddr = dev->base_addr; + struct smc_local *lp = netdev_priv(dev); + byte saved_packet; + byte packet_no; + word tx_status; + + + /* assume bank 2 */ + + saved_packet = inb( ioaddr + PNR_ARR ); + packet_no = inw( ioaddr + FIFO_PORTS ); + packet_no &= 0x7F; + + /* select this as the packet to read from */ + outb( packet_no, ioaddr + PNR_ARR ); + + /* read the first word from this packet */ + outw( PTR_AUTOINC | PTR_READ, ioaddr + POINTER ); + + tx_status = inw( ioaddr + DATA_1 ); + PRINTK3((CARDNAME": TX DONE STATUS: %4x\n", tx_status)); + + dev->stats.tx_errors++; + if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++; + if ( tx_status & TS_LATCOL ) { + netdev_dbg(dev, CARDNAME": Late collision occurred on last xmit.\n"); + dev->stats.tx_window_errors++; + } +#if 0 + if ( tx_status & TS_16COL ) { ... } +#endif + + if ( tx_status & TS_SUCCESS ) { + netdev_info(dev, CARDNAME": Successful packet caused interrupt\n"); + } + /* re-enable transmit */ + SMC_SELECT_BANK( 0 ); + outw( inw( ioaddr + TCR ) | TCR_ENABLE, ioaddr + TCR ); + + /* kill the packet */ + SMC_SELECT_BANK( 2 ); + outw( MC_FREEPKT, ioaddr + MMU_CMD ); + + /* one less packet waiting for me */ + lp->packets_waiting--; + + outb( saved_packet, ioaddr + PNR_ARR ); +} + +/*-------------------------------------------------------------------- + . + . This is the main routine of the driver, to handle the device when + . it needs some attention. + . + . So: + . first, save state of the chipset + . branch off into routines to handle each case, and acknowledge + . each to the interrupt register + . and finally restore state. + . + ---------------------------------------------------------------------*/ + +static irqreturn_t smc_interrupt(int irq, void * dev_id) +{ + struct net_device *dev = dev_id; + int ioaddr = dev->base_addr; + struct smc_local *lp = netdev_priv(dev); + + byte status; + word card_stats; + byte mask; + int timeout; + /* state registers */ + word saved_bank; + word saved_pointer; + int handled = 0; + + + PRINTK3((CARDNAME": SMC interrupt started\n")); + + saved_bank = inw( ioaddr + BANK_SELECT ); + + SMC_SELECT_BANK(2); + saved_pointer = inw( ioaddr + POINTER ); + + mask = inb( ioaddr + INT_MASK ); + /* clear all interrupts */ + outb( 0, ioaddr + INT_MASK ); + + + /* set a timeout value, so I don't stay here forever */ + timeout = 4; + + PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x\n", mask)); + do { + /* read the status flag, and mask it */ + status = inb( ioaddr + INTERRUPT ) & mask; + if (!status ) + break; + + handled = 1; + + PRINTK3((KERN_WARNING CARDNAME + ": Handling interrupt status %x\n", status)); + + if (status & IM_RCV_INT) { + /* Got a packet(s). */ + PRINTK2((KERN_WARNING CARDNAME + ": Receive Interrupt\n")); + smc_rcv(dev); + } else if (status & IM_TX_INT ) { + PRINTK2((KERN_WARNING CARDNAME + ": TX ERROR handled\n")); + smc_tx(dev); + outb(IM_TX_INT, ioaddr + INTERRUPT ); + } else if (status & IM_TX_EMPTY_INT ) { + /* update stats */ + SMC_SELECT_BANK( 0 ); + card_stats = inw( ioaddr + COUNTER ); + /* single collisions */ + dev->stats.collisions += card_stats & 0xF; + card_stats >>= 4; + /* multiple collisions */ + dev->stats.collisions += card_stats & 0xF; + + /* these are for when linux supports these statistics */ + + SMC_SELECT_BANK( 2 ); + PRINTK2((KERN_WARNING CARDNAME + ": TX_BUFFER_EMPTY handled\n")); + outb( IM_TX_EMPTY_INT, ioaddr + INTERRUPT ); + mask &= ~IM_TX_EMPTY_INT; + dev->stats.tx_packets += lp->packets_waiting; + lp->packets_waiting = 0; + + } else if (status & IM_ALLOC_INT ) { + PRINTK2((KERN_DEBUG CARDNAME + ": Allocation interrupt\n")); + /* clear this interrupt so it doesn't happen again */ + mask &= ~IM_ALLOC_INT; + + smc_hardware_send_packet( dev ); + + /* enable xmit interrupts based on this */ + mask |= ( IM_TX_EMPTY_INT | IM_TX_INT ); + + /* and let the card send more packets to me */ + netif_wake_queue(dev); + + PRINTK2((CARDNAME": Handoff done successfully.\n")); + } else if (status & IM_RX_OVRN_INT ) { + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; + outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT ); + } else if (status & IM_EPH_INT ) { + PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT\n")); + } else if (status & IM_ERCV_INT ) { + PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT\n")); + outb( IM_ERCV_INT, ioaddr + INTERRUPT ); + } + } while ( timeout -- ); + + + /* restore state register */ + SMC_SELECT_BANK( 2 ); + outb( mask, ioaddr + INT_MASK ); + + PRINTK3((KERN_WARNING CARDNAME ": MASK is now %x\n", mask)); + outw( saved_pointer, ioaddr + POINTER ); + + SMC_SELECT_BANK( saved_bank ); + + PRINTK3((CARDNAME ": Interrupt done\n")); + return IRQ_RETVAL(handled); +} + + +/*---------------------------------------------------- + . smc_close + . + . this makes the board clean up everything that it can + . and not talk to the outside world. Caused by + . an 'ifconfig ethX down' + . + -----------------------------------------------------*/ +static int smc_close(struct net_device *dev) +{ + netif_stop_queue(dev); + /* clear everything */ + smc_shutdown( dev->base_addr ); + + /* Update the statistics here. */ + return 0; +} + +/*----------------------------------------------------------- + . smc_set_multicast_list + . + . This routine will, depending on the values passed to it, + . either make it accept multicast packets, go into + . promiscuous mode ( for TCPDUMP and cousins ) or accept + . a select set of multicast packets +*/ +static void smc_set_multicast_list(struct net_device *dev) +{ + short ioaddr = dev->base_addr; + + SMC_SELECT_BANK(0); + if ( dev->flags & IFF_PROMISC ) + outw( inw(ioaddr + RCR ) | RCR_PROMISC, ioaddr + RCR ); + +/* BUG? I never disable promiscuous mode if multicasting was turned on. + Now, I turn off promiscuous mode, but I don't do anything to multicasting + when promiscuous mode is turned on. +*/ + + /* Here, I am setting this to accept all multicast packets. + I don't need to zero the multicast table, because the flag is + checked before the table is + */ + else if (dev->flags & IFF_ALLMULTI) + outw( inw(ioaddr + RCR ) | RCR_ALMUL, ioaddr + RCR ); + + /* We just get all multicast packets even if we only want them + . from one source. This will be changed at some future + . point. */ + else if (!netdev_mc_empty(dev)) { + /* support hardware multicasting */ + + /* be sure I get rid of flags I might have set */ + outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL), + ioaddr + RCR ); + /* NOTE: this has to set the bank, so make sure it is the + last thing called. The bank is set to zero at the top */ + smc_setmulticast(ioaddr, dev); + } + else { + outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL), + ioaddr + RCR ); + + /* + since I'm disabling all multicast entirely, I need to + clear the multicast list + */ + SMC_SELECT_BANK( 3 ); + outw( 0, ioaddr + MULTICAST1 ); + outw( 0, ioaddr + MULTICAST2 ); + outw( 0, ioaddr + MULTICAST3 ); + outw( 0, ioaddr + MULTICAST4 ); + } +} + +#ifdef MODULE + +static struct net_device *devSMC9194; +MODULE_LICENSE("GPL"); + +module_param(io, int, 0); +module_param(irq, int, 0); +module_param(ifport, int, 0); +MODULE_PARM_DESC(io, "SMC 99194 I/O base address"); +MODULE_PARM_DESC(irq, "SMC 99194 IRQ number"); +MODULE_PARM_DESC(ifport, "SMC 99194 interface port (0-default, 1-TP, 2-AUI)"); + +int __init init_module(void) +{ + if (io == 0) + printk(KERN_WARNING + CARDNAME": You shouldn't use auto-probing with insmod!\n" ); + + /* copy the parameters from insmod into the device structure */ + devSMC9194 = smc_init(-1); + return PTR_ERR_OR_ZERO(devSMC9194); +} + +void __exit cleanup_module(void) +{ + unregister_netdev(devSMC9194); + free_irq(devSMC9194->irq, devSMC9194); + release_region(devSMC9194->base_addr, SMC_IO_EXTENT); + free_netdev(devSMC9194); +} + +#endif /* MODULE */ diff --git a/drivers/net/ethernet/smsc/smc9194.h b/drivers/net/ethernet/smsc/smc9194.h new file mode 100644 index 000000000..cf69d0a5a --- /dev/null +++ b/drivers/net/ethernet/smsc/smc9194.h @@ -0,0 +1,241 @@ +/*------------------------------------------------------------------------ + . smc9194.h + . Copyright (C) 1996 by Erik Stahlman + . + . This software may be used and distributed according to the terms + . of the GNU General Public License, incorporated herein by reference. + . + . This file contains register information and access macros for + . the SMC91xxx chipset. + . + . Information contained in this file was obtained from the SMC91C94 + . manual from SMC. To get a copy, if you really want one, you can find + . information under www.smc.com in the components division. + . ( this thanks to advice from Donald Becker ). + . + . Authors + . Erik Stahlman ( erik@vt.edu ) + . + . History + . 01/06/96 Erik Stahlman moved definitions here from main .c file + . 01/19/96 Erik Stahlman polished this up some, and added better + . error handling + . + ---------------------------------------------------------------------------*/ +#ifndef _SMC9194_H_ +#define _SMC9194_H_ + +/* I want some simple types */ + +typedef unsigned char byte; +typedef unsigned short word; +typedef unsigned long int dword; + + +/* Because of bank switching, the SMC91xxx uses only 16 I/O ports */ + +#define SMC_IO_EXTENT 16 + + +/*--------------------------------------------------------------- + . + . A description of the SMC registers is probably in order here, + . although for details, the SMC datasheet is invaluable. + . + . Basically, the chip has 4 banks of registers ( 0 to 3 ), which + . are accessed by writing a number into the BANK_SELECT register + . ( I also use a SMC_SELECT_BANK macro for this ). + . + . The banks are configured so that for most purposes, bank 2 is all + . that is needed for simple run time tasks. + -----------------------------------------------------------------------*/ + +/* + . Bank Select Register: + . + . yyyy yyyy 0000 00xx + . xx = bank number + . yyyy yyyy = 0x33, for identification purposes. +*/ +#define BANK_SELECT 14 + +/* BANK 0 */ + +#define TCR 0 /* transmit control register */ +#define TCR_ENABLE 0x0001 /* if this is 1, we can transmit */ +#define TCR_FDUPLX 0x0800 /* receive packets sent out */ +#define TCR_STP_SQET 0x1000 /* stop transmitting if Signal quality error */ +#define TCR_MON_CNS 0x0400 /* monitors the carrier status */ +#define TCR_PAD_ENABLE 0x0080 /* pads short packets to 64 bytes */ + +#define TCR_CLEAR 0 /* do NOTHING */ +/* the normal settings for the TCR register : */ +/* QUESTION: do I want to enable padding of short packets ? */ +#define TCR_NORMAL TCR_ENABLE + + +#define EPH_STATUS 2 +#define ES_LINK_OK 0x4000 /* is the link integrity ok ? */ + +#define RCR 4 +#define RCR_SOFTRESET 0x8000 /* resets the chip */ +#define RCR_STRIP_CRC 0x200 /* strips CRC */ +#define RCR_ENABLE 0x100 /* IFF this is set, we can receive packets */ +#define RCR_ALMUL 0x4 /* receive all multicast packets */ +#define RCR_PROMISC 0x2 /* enable promiscuous mode */ + +/* the normal settings for the RCR register : */ +#define RCR_NORMAL (RCR_STRIP_CRC | RCR_ENABLE) +#define RCR_CLEAR 0x0 /* set it to a base state */ + +#define COUNTER 6 +#define MIR 8 +#define MCR 10 +/* 12 is reserved */ + +/* BANK 1 */ +#define CONFIG 0 +#define CFG_AUI_SELECT 0x100 +#define BASE 2 +#define ADDR0 4 +#define ADDR1 6 +#define ADDR2 8 +#define GENERAL 10 +#define CONTROL 12 +#define CTL_POWERDOWN 0x2000 +#define CTL_LE_ENABLE 0x80 +#define CTL_CR_ENABLE 0x40 +#define CTL_TE_ENABLE 0x0020 +#define CTL_AUTO_RELEASE 0x0800 +#define CTL_EPROM_ACCESS 0x0003 /* high if Eprom is being read */ + +/* BANK 2 */ +#define MMU_CMD 0 +#define MC_BUSY 1 /* only readable bit in the register */ +#define MC_NOP 0 +#define MC_ALLOC 0x20 /* or with number of 256 byte packets */ +#define MC_RESET 0x40 +#define MC_REMOVE 0x60 /* remove the current rx packet */ +#define MC_RELEASE 0x80 /* remove and release the current rx packet */ +#define MC_FREEPKT 0xA0 /* Release packet in PNR register */ +#define MC_ENQUEUE 0xC0 /* Enqueue the packet for transmit */ + +#define PNR_ARR 2 +#define FIFO_PORTS 4 + +#define FP_RXEMPTY 0x8000 +#define FP_TXEMPTY 0x80 + +#define POINTER 6 +#define PTR_READ 0x2000 +#define PTR_RCV 0x8000 +#define PTR_AUTOINC 0x4000 +#define PTR_AUTO_INC 0x0040 + +#define DATA_1 8 +#define DATA_2 10 +#define INTERRUPT 12 + +#define INT_MASK 13 +#define IM_RCV_INT 0x1 +#define IM_TX_INT 0x2 +#define IM_TX_EMPTY_INT 0x4 +#define IM_ALLOC_INT 0x8 +#define IM_RX_OVRN_INT 0x10 +#define IM_EPH_INT 0x20 +#define IM_ERCV_INT 0x40 /* not on SMC9192 */ + +/* BANK 3 */ +#define MULTICAST1 0 +#define MULTICAST2 2 +#define MULTICAST3 4 +#define MULTICAST4 6 +#define MGMT 8 +#define REVISION 10 /* ( hi: chip id low: rev # ) */ + + +/* this is NOT on SMC9192 */ +#define ERCV 12 + +#define CHIP_9190 3 +#define CHIP_9194 4 +#define CHIP_9195 5 +#define CHIP_91100 7 + +static const char * chip_ids[ 15 ] = { + NULL, NULL, NULL, + /* 3 */ "SMC91C90/91C92", + /* 4 */ "SMC91C94", + /* 5 */ "SMC91C95", + NULL, + /* 7 */ "SMC91C100", + /* 8 */ "SMC91C100FD", + NULL, NULL, NULL, + NULL, NULL, NULL}; + +/* + . Transmit status bits +*/ +#define TS_SUCCESS 0x0001 +#define TS_LOSTCAR 0x0400 +#define TS_LATCOL 0x0200 +#define TS_16COL 0x0010 + +/* + . Receive status bits +*/ +#define RS_ALGNERR 0x8000 +#define RS_BADCRC 0x2000 +#define RS_ODDFRAME 0x1000 +#define RS_TOOLONG 0x0800 +#define RS_TOOSHORT 0x0400 +#define RS_MULTICAST 0x0001 +#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT) + +static const char * interfaces[ 2 ] = { "TP", "AUI" }; + +/*------------------------------------------------------------------------- + . I define some macros to make it easier to do somewhat common + . or slightly complicated, repeated tasks. + --------------------------------------------------------------------------*/ + +/* select a register bank, 0 to 3 */ + +#define SMC_SELECT_BANK(x) { outw( x, ioaddr + BANK_SELECT ); } + +/* define a small delay for the reset */ +#define SMC_DELAY() { inw( ioaddr + RCR );\ + inw( ioaddr + RCR );\ + inw( ioaddr + RCR ); } + +/* this enables an interrupt in the interrupt mask register */ +#define SMC_ENABLE_INT(x) {\ + unsigned char mask;\ + SMC_SELECT_BANK(2);\ + mask = inb( ioaddr + INT_MASK );\ + mask |= (x);\ + outb( mask, ioaddr + INT_MASK ); \ +} + +/* this disables an interrupt from the interrupt mask register */ + +#define SMC_DISABLE_INT(x) {\ + unsigned char mask;\ + SMC_SELECT_BANK(2);\ + mask = inb( ioaddr + INT_MASK );\ + mask &= ~(x);\ + outb( mask, ioaddr + INT_MASK ); \ +} + +/*---------------------------------------------------------------------- + . Define the interrupts that I want to receive from the card + . + . I want: + . IM_EPH_INT, for nasty errors + . IM_RCV_INT, for happy received packets + . IM_RX_OVRN_INT, because I have to kick the receiver + --------------------------------------------------------------------------*/ +#define SMC_INTERRUPT_MASK (IM_EPH_INT | IM_RX_OVRN_INT | IM_RCV_INT) + +#endif /* _SMC_9194_H_ */ + diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c new file mode 100644 index 000000000..e841e1eb8 --- /dev/null +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -0,0 +1,2052 @@ +/*====================================================================== + + A PCMCIA ethernet driver for SMC91c92-based cards. + + This driver supports Megahertz PCMCIA ethernet cards; and + Megahertz, Motorola, Ositech, and Psion Dacom ethernet/modem + multifunction cards. + + Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net + + smc91c92_cs.c 1.122 2002/10/25 06:26:39 + + This driver contains code written by Donald Becker + (becker@scyld.com), Rowan Hughes (x-csrdh@jcu.edu.au), + David Hinds (dahinds@users.sourceforge.net), and Erik Stahlman + (erik@vt.edu). Donald wrote the SMC 91c92 code using parts of + Erik's SMC 91c94 driver. Rowan wrote a similar driver, and I've + incorporated some parts of his driver here. I (Dave) wrote most + of the PCMCIA glue code, and the Ositech support code. Kelly + Stephens (kstephen@holli.com) added support for the Motorola + Mariner, with help from Allen Brost. + + This software may be used and distributed according to the terms of + the GNU General Public License, incorporated herein by reference. + +======================================================================*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +/*====================================================================*/ + +static const char *if_names[] = { "auto", "10baseT", "10base2"}; + +/* Firmware name */ +#define FIRMWARE_NAME "/*(DEBLOBBED)*/" + +/* Module parameters */ + +MODULE_DESCRIPTION("SMC 91c92 series PCMCIA ethernet driver"); +MODULE_LICENSE("GPL"); +/*(DEBLOBBED)*/ + +#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) + +/* + Transceiver/media type. + 0 = auto + 1 = 10baseT (and autoselect if #define AUTOSELECT), + 2 = AUI/10base2, +*/ +INT_MODULE_PARM(if_port, 0); + + +#define DRV_NAME "smc91c92_cs" +#define DRV_VERSION "1.123" + +/*====================================================================*/ + +/* Operational parameter that usually are not changed. */ + +/* Time in jiffies before concluding Tx hung */ +#define TX_TIMEOUT ((400*HZ)/1000) + +/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ +#define INTR_WORK 4 + +/* Times to check the check the chip before concluding that it doesn't + currently have room for another Tx packet. */ +#define MEMORY_WAIT_TIME 8 + +struct smc_private { + struct pcmcia_device *p_dev; + spinlock_t lock; + u_short manfid; + u_short cardid; + + struct sk_buff *saved_skb; + int packets_waiting; + void __iomem *base; + u_short cfg; + struct timer_list media; + int watchdog, tx_err; + u_short media_status; + u_short fast_poll; + u_short link_status; + struct mii_if_info mii_if; + int duplex; + int rx_ovrn; +}; + +/* Special definitions for Megahertz multifunction cards */ +#define MEGAHERTZ_ISR 0x0380 + +/* Special function registers for Motorola Mariner */ +#define MOT_LAN 0x0000 +#define MOT_UART 0x0020 +#define MOT_EEPROM 0x20 + +#define MOT_NORMAL \ +(COR_LEVEL_REQ | COR_FUNC_ENA | COR_ADDR_DECODE | COR_IREQ_ENA) + +/* Special function registers for Ositech cards */ +#define OSITECH_AUI_CTL 0x0c +#define OSITECH_PWRDOWN 0x0d +#define OSITECH_RESET 0x0e +#define OSITECH_ISR 0x0f +#define OSITECH_AUI_PWR 0x0c +#define OSITECH_RESET_ISR 0x0e + +#define OSI_AUI_PWR 0x40 +#define OSI_LAN_PWRDOWN 0x02 +#define OSI_MODEM_PWRDOWN 0x01 +#define OSI_LAN_RESET 0x02 +#define OSI_MODEM_RESET 0x01 + +/* Symbolic constants for the SMC91c9* series chips, from Erik Stahlman. */ +#define BANK_SELECT 14 /* Window select register. */ +#define SMC_SELECT_BANK(x) { outw(x, ioaddr + BANK_SELECT); } + +/* Bank 0 registers. */ +#define TCR 0 /* transmit control register */ +#define TCR_CLEAR 0 /* do NOTHING */ +#define TCR_ENABLE 0x0001 /* if this is 1, we can transmit */ +#define TCR_PAD_EN 0x0080 /* pads short packets to 64 bytes */ +#define TCR_MONCSN 0x0400 /* Monitor Carrier. */ +#define TCR_FDUPLX 0x0800 /* Full duplex mode. */ +#define TCR_NORMAL TCR_ENABLE | TCR_PAD_EN + +#define EPH 2 /* Ethernet Protocol Handler report. */ +#define EPH_TX_SUC 0x0001 +#define EPH_SNGLCOL 0x0002 +#define EPH_MULCOL 0x0004 +#define EPH_LTX_MULT 0x0008 +#define EPH_16COL 0x0010 +#define EPH_SQET 0x0020 +#define EPH_LTX_BRD 0x0040 +#define EPH_TX_DEFR 0x0080 +#define EPH_LAT_COL 0x0200 +#define EPH_LOST_CAR 0x0400 +#define EPH_EXC_DEF 0x0800 +#define EPH_CTR_ROL 0x1000 +#define EPH_RX_OVRN 0x2000 +#define EPH_LINK_OK 0x4000 +#define EPH_TX_UNRN 0x8000 +#define MEMINFO 8 /* Memory Information Register */ +#define MEMCFG 10 /* Memory Configuration Register */ + +/* Bank 1 registers. */ +#define CONFIG 0 +#define CFG_MII_SELECT 0x8000 /* 91C100 only */ +#define CFG_NO_WAIT 0x1000 +#define CFG_FULL_STEP 0x0400 +#define CFG_SET_SQLCH 0x0200 +#define CFG_AUI_SELECT 0x0100 +#define CFG_16BIT 0x0080 +#define CFG_DIS_LINK 0x0040 +#define CFG_STATIC 0x0030 +#define CFG_IRQ_SEL_1 0x0004 +#define CFG_IRQ_SEL_0 0x0002 +#define BASE_ADDR 2 +#define ADDR0 4 +#define GENERAL 10 +#define CONTROL 12 +#define CTL_STORE 0x0001 +#define CTL_RELOAD 0x0002 +#define CTL_EE_SELECT 0x0004 +#define CTL_TE_ENABLE 0x0020 +#define CTL_CR_ENABLE 0x0040 +#define CTL_LE_ENABLE 0x0080 +#define CTL_AUTO_RELEASE 0x0800 +#define CTL_POWERDOWN 0x2000 + +/* Bank 2 registers. */ +#define MMU_CMD 0 +#define MC_ALLOC 0x20 /* or with number of 256 byte packets */ +#define MC_RESET 0x40 +#define MC_RELEASE 0x80 /* remove and release the current rx packet */ +#define MC_FREEPKT 0xA0 /* Release packet in PNR register */ +#define MC_ENQUEUE 0xC0 /* Enqueue the packet for transmit */ +#define PNR_ARR 2 +#define FIFO_PORTS 4 +#define FP_RXEMPTY 0x8000 +#define POINTER 6 +#define PTR_AUTO_INC 0x0040 +#define PTR_READ 0x2000 +#define PTR_AUTOINC 0x4000 +#define PTR_RCV 0x8000 +#define DATA_1 8 +#define INTERRUPT 12 +#define IM_RCV_INT 0x1 +#define IM_TX_INT 0x2 +#define IM_TX_EMPTY_INT 0x4 +#define IM_ALLOC_INT 0x8 +#define IM_RX_OVRN_INT 0x10 +#define IM_EPH_INT 0x20 + +#define RCR 4 +enum RxCfg { RxAllMulti = 0x0004, RxPromisc = 0x0002, + RxEnable = 0x0100, RxStripCRC = 0x0200}; +#define RCR_SOFTRESET 0x8000 /* resets the chip */ +#define RCR_STRIP_CRC 0x200 /* strips CRC */ +#define RCR_ENABLE 0x100 /* IFF this is set, we can receive packets */ +#define RCR_ALMUL 0x4 /* receive all multicast packets */ +#define RCR_PROMISC 0x2 /* enable promiscuous mode */ + +/* the normal settings for the RCR register : */ +#define RCR_NORMAL (RCR_STRIP_CRC | RCR_ENABLE) +#define RCR_CLEAR 0x0 /* set it to a base state */ +#define COUNTER 6 + +/* BANK 3 -- not the same values as in smc9194! */ +#define MULTICAST0 0 +#define MULTICAST2 2 +#define MULTICAST4 4 +#define MULTICAST6 6 +#define MGMT 8 +#define REVISION 0x0a + +/* Transmit status bits. */ +#define TS_SUCCESS 0x0001 +#define TS_16COL 0x0010 +#define TS_LATCOL 0x0200 +#define TS_LOSTCAR 0x0400 + +/* Receive status bits. */ +#define RS_ALGNERR 0x8000 +#define RS_BADCRC 0x2000 +#define RS_ODDFRAME 0x1000 +#define RS_TOOLONG 0x0800 +#define RS_TOOSHORT 0x0400 +#define RS_MULTICAST 0x0001 +#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT) + +#define set_bits(v, p) outw(inw(p)|(v), (p)) +#define mask_bits(v, p) outw(inw(p)&(v), (p)) + +/*====================================================================*/ + +static void smc91c92_detach(struct pcmcia_device *p_dev); +static int smc91c92_config(struct pcmcia_device *link); +static void smc91c92_release(struct pcmcia_device *link); + +static int smc_open(struct net_device *dev); +static int smc_close(struct net_device *dev); +static int smc_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static void smc_tx_timeout(struct net_device *dev); +static netdev_tx_t smc_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static irqreturn_t smc_interrupt(int irq, void *dev_id); +static void smc_rx(struct net_device *dev); +static void set_rx_mode(struct net_device *dev); +static int s9k_config(struct net_device *dev, struct ifmap *map); +static void smc_set_xcvr(struct net_device *dev, int if_port); +static void smc_reset(struct net_device *dev); +static void media_check(u_long arg); +static void mdio_sync(unsigned int addr); +static int mdio_read(struct net_device *dev, int phy_id, int loc); +static void mdio_write(struct net_device *dev, int phy_id, int loc, int value); +static int smc_link_ok(struct net_device *dev); +static const struct ethtool_ops ethtool_ops; + +static const struct net_device_ops smc_netdev_ops = { + .ndo_open = smc_open, + .ndo_stop = smc_close, + .ndo_start_xmit = smc_start_xmit, + .ndo_tx_timeout = smc_tx_timeout, + .ndo_set_config = s9k_config, + .ndo_set_rx_mode = set_rx_mode, + .ndo_do_ioctl = smc_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int smc91c92_probe(struct pcmcia_device *link) +{ + struct smc_private *smc; + struct net_device *dev; + + dev_dbg(&link->dev, "smc91c92_attach()\n"); + + /* Create new ethernet device */ + dev = alloc_etherdev(sizeof(struct smc_private)); + if (!dev) + return -ENOMEM; + smc = netdev_priv(dev); + smc->p_dev = link; + link->priv = dev; + + spin_lock_init(&smc->lock); + + /* The SMC91c92-specific entries in the device structure. */ + dev->netdev_ops = &smc_netdev_ops; + dev->ethtool_ops = ðtool_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + smc->mii_if.dev = dev; + smc->mii_if.mdio_read = mdio_read; + smc->mii_if.mdio_write = mdio_write; + smc->mii_if.phy_id_mask = 0x1f; + smc->mii_if.reg_num_mask = 0x1f; + + return smc91c92_config(link); +} /* smc91c92_attach */ + +static void smc91c92_detach(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + dev_dbg(&link->dev, "smc91c92_detach\n"); + + unregister_netdev(dev); + + smc91c92_release(link); + + free_netdev(dev); +} /* smc91c92_detach */ + +/*====================================================================*/ + +static int cvt_ascii_address(struct net_device *dev, char *s) +{ + int i, j, da, c; + + if (strlen(s) != 12) + return -1; + for (i = 0; i < 6; i++) { + da = 0; + for (j = 0; j < 2; j++) { + c = *s++; + da <<= 4; + da += ((c >= '0') && (c <= '9')) ? + (c - '0') : ((c & 0x0f) + 9); + } + dev->dev_addr[i] = da; + } + return 0; +} + +/*==================================================================== + + Configuration stuff for Megahertz cards + + mhz_3288_power() is used to power up a 3288's ethernet chip. + mhz_mfc_config() handles socket setup for multifunction (1144 + and 3288) cards. mhz_setup() gets a card's hardware ethernet + address. + +======================================================================*/ + +static int mhz_3288_power(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + struct smc_private *smc = netdev_priv(dev); + u_char tmp; + + /* Read the ISR twice... */ + readb(smc->base+MEGAHERTZ_ISR); + udelay(5); + readb(smc->base+MEGAHERTZ_ISR); + + /* Pause 200ms... */ + mdelay(200); + + /* Now read and write the COR... */ + tmp = readb(smc->base + link->config_base + CISREG_COR); + udelay(5); + writeb(tmp, smc->base + link->config_base + CISREG_COR); + + return 0; +} + +static int mhz_mfc_config_check(struct pcmcia_device *p_dev, void *priv_data) +{ + int k; + p_dev->io_lines = 16; + p_dev->resource[1]->start = p_dev->resource[0]->start; + p_dev->resource[1]->end = 8; + p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; + p_dev->resource[0]->end = 16; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; + for (k = 0; k < 0x400; k += 0x10) { + if (k & 0x80) + continue; + p_dev->resource[0]->start = k ^ 0x300; + if (!pcmcia_request_io(p_dev)) + return 0; + } + return -ENODEV; +} + +static int mhz_mfc_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + struct smc_private *smc = netdev_priv(dev); + unsigned int offset; + int i; + + link->config_flags |= CONF_ENABLE_SPKR | CONF_ENABLE_IRQ | + CONF_AUTO_SET_IO; + + /* The Megahertz combo cards have modem-like CIS entries, so + we have to explicitly try a bunch of port combinations. */ + if (pcmcia_loop_config(link, mhz_mfc_config_check, NULL)) + return -ENODEV; + + dev->base_addr = link->resource[0]->start; + + /* Allocate a memory window, for accessing the ISR */ + link->resource[2]->flags = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; + link->resource[2]->start = link->resource[2]->end = 0; + i = pcmcia_request_window(link, link->resource[2], 0); + if (i != 0) + return -ENODEV; + + smc->base = ioremap(link->resource[2]->start, + resource_size(link->resource[2])); + offset = (smc->manfid == MANFID_MOTOROLA) ? link->config_base : 0; + i = pcmcia_map_mem_page(link, link->resource[2], offset); + if ((i == 0) && + (smc->manfid == MANFID_MEGAHERTZ) && + (smc->cardid == PRODID_MEGAHERTZ_EM3288)) + mhz_3288_power(link); + + return 0; +} + +static int pcmcia_get_versmac(struct pcmcia_device *p_dev, + tuple_t *tuple, + void *priv) +{ + struct net_device *dev = priv; + cisparse_t parse; + u8 *buf; + + if (pcmcia_parse_tuple(tuple, &parse)) + return -EINVAL; + + buf = parse.version_1.str + parse.version_1.ofs[3]; + + if ((parse.version_1.ns > 3) && (cvt_ascii_address(dev, buf) == 0)) + return 0; + + return -EINVAL; +}; + +static int mhz_setup(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + size_t len; + u8 *buf; + int rc; + + /* Read the station address from the CIS. It is stored as the last + (fourth) string in the Version 1 Version/ID tuple. */ + if ((link->prod_id[3]) && + (cvt_ascii_address(dev, link->prod_id[3]) == 0)) + return 0; + + /* Workarounds for broken cards start here. */ + /* Ugh -- the EM1144 card has two VERS_1 tuples!?! */ + if (!pcmcia_loop_tuple(link, CISTPL_VERS_1, pcmcia_get_versmac, dev)) + return 0; + + /* Another possibility: for the EM3288, in a special tuple */ + rc = -1; + len = pcmcia_get_tuple(link, 0x81, &buf); + if (buf && len >= 13) { + buf[12] = '\0'; + if (cvt_ascii_address(dev, buf) == 0) + rc = 0; + } + kfree(buf); + + return rc; +}; + +/*====================================================================== + + Configuration stuff for the Motorola Mariner + + mot_config() writes directly to the Mariner configuration + registers because the CIS is just bogus. + +======================================================================*/ + +static void mot_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + unsigned int iouart = link->resource[1]->start; + + /* Set UART base address and force map with COR bit 1 */ + writeb(iouart & 0xff, smc->base + MOT_UART + CISREG_IOBASE_0); + writeb((iouart >> 8) & 0xff, smc->base + MOT_UART + CISREG_IOBASE_1); + writeb(MOT_NORMAL, smc->base + MOT_UART + CISREG_COR); + + /* Set SMC base address and force map with COR bit 1 */ + writeb(ioaddr & 0xff, smc->base + MOT_LAN + CISREG_IOBASE_0); + writeb((ioaddr >> 8) & 0xff, smc->base + MOT_LAN + CISREG_IOBASE_1); + writeb(MOT_NORMAL, smc->base + MOT_LAN + CISREG_COR); + + /* Wait for things to settle down */ + mdelay(100); +} + +static int mot_setup(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + unsigned int ioaddr = dev->base_addr; + int i, wait, loop; + u_int addr; + + /* Read Ethernet address from Serial EEPROM */ + + for (i = 0; i < 3; i++) { + SMC_SELECT_BANK(2); + outw(MOT_EEPROM + i, ioaddr + POINTER); + SMC_SELECT_BANK(1); + outw((CTL_RELOAD | CTL_EE_SELECT), ioaddr + CONTROL); + + for (loop = wait = 0; loop < 200; loop++) { + udelay(10); + wait = ((CTL_RELOAD | CTL_STORE) & inw(ioaddr + CONTROL)); + if (wait == 0) break; + } + + if (wait) + return -1; + + addr = inw(ioaddr + GENERAL); + dev->dev_addr[2*i] = addr & 0xff; + dev->dev_addr[2*i+1] = (addr >> 8) & 0xff; + } + + return 0; +} + +/*====================================================================*/ + +static int smc_configcheck(struct pcmcia_device *p_dev, void *priv_data) +{ + p_dev->resource[0]->end = 16; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; + + return pcmcia_request_io(p_dev); +} + +static int smc_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + int i; + + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + + i = pcmcia_loop_config(link, smc_configcheck, NULL); + if (!i) + dev->base_addr = link->resource[0]->start; + + return i; +} + + +static int smc_setup(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + /* Check for a LAN function extension tuple */ + if (!pcmcia_get_mac_from_cis(link, dev)) + return 0; + + /* Try the third string in the Version 1 Version/ID tuple. */ + if (link->prod_id[2]) { + if (cvt_ascii_address(dev, link->prod_id[2]) == 0) + return 0; + } + return -1; +} + +/*====================================================================*/ + +static int osi_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + static const unsigned int com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 }; + int i, j; + + link->config_flags |= CONF_ENABLE_SPKR | CONF_ENABLE_IRQ; + link->resource[0]->end = 64; + link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; + link->resource[1]->end = 8; + + /* Enable Hard Decode, LAN, Modem */ + link->io_lines = 16; + link->config_index = 0x23; + + for (i = j = 0; j < 4; j++) { + link->resource[1]->start = com[j]; + i = pcmcia_request_io(link); + if (i == 0) + break; + } + if (i != 0) { + /* Fallback: turn off hard decode */ + link->config_index = 0x03; + link->resource[1]->end = 0; + i = pcmcia_request_io(link); + } + dev->base_addr = link->resource[0]->start + 0x10; + return i; +} + +static int osi_load_firmware(struct pcmcia_device *link) +{ + const struct firmware *fw; + int i, err; + + err = reject_firmware(&fw, FIRMWARE_NAME, &link->dev); + if (err) { + pr_err("Failed to load firmware \"%s\"\n", FIRMWARE_NAME); + return err; + } + + /* Download the Seven of Diamonds firmware */ + for (i = 0; i < fw->size; i++) { + outb(fw->data[i], link->resource[0]->start + 2); + udelay(50); + } + release_firmware(fw); + return err; +} + +static int pcmcia_osi_mac(struct pcmcia_device *p_dev, + tuple_t *tuple, + void *priv) +{ + struct net_device *dev = priv; + int i; + + if (tuple->TupleDataLen < 8) + return -EINVAL; + if (tuple->TupleData[0] != 0x04) + return -EINVAL; + for (i = 0; i < 6; i++) + dev->dev_addr[i] = tuple->TupleData[i+2]; + return 0; +}; + + +static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid) +{ + struct net_device *dev = link->priv; + int rc; + + /* Read the station address from tuple 0x90, subtuple 0x04 */ + if (pcmcia_loop_tuple(link, 0x90, pcmcia_osi_mac, dev)) + return -1; + + if (((manfid == MANFID_OSITECH) && + (cardid == PRODID_OSITECH_SEVEN)) || + ((manfid == MANFID_PSION) && + (cardid == PRODID_PSION_NET100))) { + rc = osi_load_firmware(link); + if (rc) + return rc; + } else if (manfid == MANFID_OSITECH) { + /* Make sure both functions are powered up */ + set_bits(0x300, link->resource[0]->start + OSITECH_AUI_PWR); + /* Now, turn on the interrupt for both card functions */ + set_bits(0x300, link->resource[0]->start + OSITECH_RESET_ISR); + dev_dbg(&link->dev, "AUI/PWR: %4.4x RESET/ISR: %4.4x\n", + inw(link->resource[0]->start + OSITECH_AUI_PWR), + inw(link->resource[0]->start + OSITECH_RESET_ISR)); + } + return 0; +} + +static int smc91c92_suspend(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) + netif_device_detach(dev); + + return 0; +} + +static int smc91c92_resume(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + struct smc_private *smc = netdev_priv(dev); + int i; + + if ((smc->manfid == MANFID_MEGAHERTZ) && + (smc->cardid == PRODID_MEGAHERTZ_EM3288)) + mhz_3288_power(link); + if (smc->manfid == MANFID_MOTOROLA) + mot_config(link); + if ((smc->manfid == MANFID_OSITECH) && + (smc->cardid != PRODID_OSITECH_SEVEN)) { + /* Power up the card and enable interrupts */ + set_bits(0x0300, dev->base_addr-0x10+OSITECH_AUI_PWR); + set_bits(0x0300, dev->base_addr-0x10+OSITECH_RESET_ISR); + } + if (((smc->manfid == MANFID_OSITECH) && + (smc->cardid == PRODID_OSITECH_SEVEN)) || + ((smc->manfid == MANFID_PSION) && + (smc->cardid == PRODID_PSION_NET100))) { + i = osi_load_firmware(link); + if (i) { + netdev_err(dev, "Failed to load firmware\n"); + return i; + } + } + if (link->open) { + smc_reset(dev); + netif_device_attach(dev); + } + + return 0; +} + + +/*====================================================================== + + This verifies that the chip is some SMC91cXX variant, and returns + the revision code if successful. Otherwise, it returns -ENODEV. + +======================================================================*/ + +static int check_sig(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + unsigned int ioaddr = dev->base_addr; + int width; + u_short s; + + SMC_SELECT_BANK(1); + if (inw(ioaddr + BANK_SELECT) >> 8 != 0x33) { + /* Try powering up the chip */ + outw(0, ioaddr + CONTROL); + mdelay(55); + } + + /* Try setting bus width */ + width = (link->resource[0]->flags == IO_DATA_PATH_WIDTH_AUTO); + s = inb(ioaddr + CONFIG); + if (width) + s |= CFG_16BIT; + else + s &= ~CFG_16BIT; + outb(s, ioaddr + CONFIG); + + /* Check Base Address Register to make sure bus width is OK */ + s = inw(ioaddr + BASE_ADDR); + if ((inw(ioaddr + BANK_SELECT) >> 8 == 0x33) && + ((s >> 8) != (s & 0xff))) { + SMC_SELECT_BANK(3); + s = inw(ioaddr + REVISION); + return s & 0xff; + } + + if (width) { + netdev_info(dev, "using 8-bit IO window\n"); + + smc91c92_suspend(link); + pcmcia_fixup_iowidth(link); + smc91c92_resume(link); + return check_sig(link); + } + return -ENODEV; +} + +static int smc91c92_config(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + struct smc_private *smc = netdev_priv(dev); + char *name; + int i, rev, j = 0; + unsigned int ioaddr; + u_long mir; + + dev_dbg(&link->dev, "smc91c92_config\n"); + + smc->manfid = link->manf_id; + smc->cardid = link->card_id; + + if ((smc->manfid == MANFID_OSITECH) && + (smc->cardid != PRODID_OSITECH_SEVEN)) { + i = osi_config(link); + } else if ((smc->manfid == MANFID_MOTOROLA) || + ((smc->manfid == MANFID_MEGAHERTZ) && + ((smc->cardid == PRODID_MEGAHERTZ_VARIOUS) || + (smc->cardid == PRODID_MEGAHERTZ_EM3288)))) { + i = mhz_mfc_config(link); + } else { + i = smc_config(link); + } + if (i) + goto config_failed; + + i = pcmcia_request_irq(link, smc_interrupt); + if (i) + goto config_failed; + i = pcmcia_enable_device(link); + if (i) + goto config_failed; + + if (smc->manfid == MANFID_MOTOROLA) + mot_config(link); + + dev->irq = link->irq; + + if ((if_port >= 0) && (if_port <= 2)) + dev->if_port = if_port; + else + dev_notice(&link->dev, "invalid if_port requested\n"); + + switch (smc->manfid) { + case MANFID_OSITECH: + case MANFID_PSION: + i = osi_setup(link, smc->manfid, smc->cardid); break; + case MANFID_SMC: + case MANFID_NEW_MEDIA: + i = smc_setup(link); break; + case 0x128: /* For broken Megahertz cards */ + case MANFID_MEGAHERTZ: + i = mhz_setup(link); break; + case MANFID_MOTOROLA: + default: /* get the hw address from EEPROM */ + i = mot_setup(link); break; + } + + if (i != 0) { + dev_notice(&link->dev, "Unable to find hardware address.\n"); + goto config_failed; + } + + smc->duplex = 0; + smc->rx_ovrn = 0; + + rev = check_sig(link); + name = "???"; + if (rev > 0) + switch (rev >> 4) { + case 3: name = "92"; break; + case 4: name = ((rev & 15) >= 6) ? "96" : "94"; break; + case 5: name = "95"; break; + case 7: name = "100"; break; + case 8: name = "100-FD"; break; + case 9: name = "110"; break; + } + + ioaddr = dev->base_addr; + if (rev > 0) { + u_long mcr; + SMC_SELECT_BANK(0); + mir = inw(ioaddr + MEMINFO) & 0xff; + if (mir == 0xff) mir++; + /* Get scale factor for memory size */ + mcr = ((rev >> 4) > 3) ? inw(ioaddr + MEMCFG) : 0x0200; + mir *= 128 * (1<<((mcr >> 9) & 7)); + SMC_SELECT_BANK(1); + smc->cfg = inw(ioaddr + CONFIG) & ~CFG_AUI_SELECT; + smc->cfg |= CFG_NO_WAIT | CFG_16BIT | CFG_STATIC; + if (smc->manfid == MANFID_OSITECH) + smc->cfg |= CFG_IRQ_SEL_1 | CFG_IRQ_SEL_0; + if ((rev >> 4) >= 7) + smc->cfg |= CFG_MII_SELECT; + } else + mir = 0; + + if (smc->cfg & CFG_MII_SELECT) { + SMC_SELECT_BANK(3); + + for (i = 0; i < 32; i++) { + j = mdio_read(dev, i, 1); + if ((j != 0) && (j != 0xffff)) break; + } + smc->mii_if.phy_id = (i < 32) ? i : -1; + + SMC_SELECT_BANK(0); + } + + SET_NETDEV_DEV(dev, &link->dev); + + if (register_netdev(dev) != 0) { + dev_err(&link->dev, "register_netdev() failed\n"); + goto config_undo; + } + + netdev_info(dev, "smc91c%s rev %d: io %#3lx, irq %d, hw_addr %pM\n", + name, (rev & 0x0f), dev->base_addr, dev->irq, dev->dev_addr); + + if (rev > 0) { + if (mir & 0x3ff) + netdev_info(dev, " %lu byte", mir); + else + netdev_info(dev, " %lu kb", mir>>10); + pr_cont(" buffer, %s xcvr\n", + (smc->cfg & CFG_MII_SELECT) ? "MII" : if_names[dev->if_port]); + } + + if (smc->cfg & CFG_MII_SELECT) { + if (smc->mii_if.phy_id != -1) { + netdev_dbg(dev, " MII transceiver at index %d, status %x\n", + smc->mii_if.phy_id, j); + } else { + netdev_notice(dev, " No MII transceivers found!\n"); + } + } + return 0; + +config_undo: + unregister_netdev(dev); +config_failed: + smc91c92_release(link); + free_netdev(dev); + return -ENODEV; +} /* smc91c92_config */ + +static void smc91c92_release(struct pcmcia_device *link) +{ + dev_dbg(&link->dev, "smc91c92_release\n"); + if (link->resource[2]->end) { + struct net_device *dev = link->priv; + struct smc_private *smc = netdev_priv(dev); + iounmap(smc->base); + } + pcmcia_disable_device(link); +} + +/*====================================================================== + + MII interface support for SMC91cXX based cards +======================================================================*/ + +#define MDIO_SHIFT_CLK 0x04 +#define MDIO_DATA_OUT 0x01 +#define MDIO_DIR_WRITE 0x08 +#define MDIO_DATA_WRITE0 (MDIO_DIR_WRITE) +#define MDIO_DATA_WRITE1 (MDIO_DIR_WRITE | MDIO_DATA_OUT) +#define MDIO_DATA_READ 0x02 + +static void mdio_sync(unsigned int addr) +{ + int bits; + for (bits = 0; bits < 32; bits++) { + outb(MDIO_DATA_WRITE1, addr); + outb(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, addr); + } +} + +static int mdio_read(struct net_device *dev, int phy_id, int loc) +{ + unsigned int addr = dev->base_addr + MGMT; + u_int cmd = (0x06<<10)|(phy_id<<5)|loc; + int i, retval = 0; + + mdio_sync(addr); + for (i = 13; i >= 0; i--) { + int dat = (cmd&(1< 0; i--) { + outb(0, addr); + retval = (retval << 1) | ((inb(addr) & MDIO_DATA_READ) != 0); + outb(MDIO_SHIFT_CLK, addr); + } + return (retval>>1) & 0xffff; +} + +static void mdio_write(struct net_device *dev, int phy_id, int loc, int value) +{ + unsigned int addr = dev->base_addr + MGMT; + u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value; + int i; + + mdio_sync(addr); + for (i = 31; i >= 0; i--) { + int dat = (cmd&(1<= 0; i--) { + outb(0, addr); + outb(MDIO_SHIFT_CLK, addr); + } +} + +/*====================================================================== + + The driver core code, most of which should be common with a + non-PCMCIA implementation. + +======================================================================*/ + +#ifdef PCMCIA_DEBUG +static void smc_dump(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + u_short i, w, save; + save = inw(ioaddr + BANK_SELECT); + for (w = 0; w < 4; w++) { + SMC_SELECT_BANK(w); + netdev_dbg(dev, "bank %d: ", w); + for (i = 0; i < 14; i += 2) + pr_cont(" %04x", inw(ioaddr + i)); + pr_cont("\n"); + } + outw(save, ioaddr + BANK_SELECT); +} +#endif + +static int smc_open(struct net_device *dev) +{ + struct smc_private *smc = netdev_priv(dev); + struct pcmcia_device *link = smc->p_dev; + + dev_dbg(&link->dev, "%s: smc_open(%p), ID/Window %4.4x.\n", + dev->name, dev, inw(dev->base_addr + BANK_SELECT)); +#ifdef PCMCIA_DEBUG + smc_dump(dev); +#endif + + /* Check that the PCMCIA card is still here. */ + if (!pcmcia_dev_present(link)) + return -ENODEV; + /* Physical device present signature. */ + if (check_sig(link) < 0) { + netdev_info(dev, "Yikes! Bad chip signature!\n"); + return -ENODEV; + } + link->open++; + + netif_start_queue(dev); + smc->saved_skb = NULL; + smc->packets_waiting = 0; + + smc_reset(dev); + setup_timer(&smc->media, media_check, (u_long)dev); + mod_timer(&smc->media, jiffies + HZ); + + return 0; +} /* smc_open */ + +/*====================================================================*/ + +static int smc_close(struct net_device *dev) +{ + struct smc_private *smc = netdev_priv(dev); + struct pcmcia_device *link = smc->p_dev; + unsigned int ioaddr = dev->base_addr; + + dev_dbg(&link->dev, "%s: smc_close(), status %4.4x.\n", + dev->name, inw(ioaddr + BANK_SELECT)); + + netif_stop_queue(dev); + + /* Shut off all interrupts, and turn off the Tx and Rx sections. + Don't bother to check for chip present. */ + SMC_SELECT_BANK(2); /* Nominally paranoia, but do no assume... */ + outw(0, ioaddr + INTERRUPT); + SMC_SELECT_BANK(0); + mask_bits(0xff00, ioaddr + RCR); + mask_bits(0xff00, ioaddr + TCR); + + /* Put the chip into power-down mode. */ + SMC_SELECT_BANK(1); + outw(CTL_POWERDOWN, ioaddr + CONTROL ); + + link->open--; + del_timer_sync(&smc->media); + + return 0; +} /* smc_close */ + +/*====================================================================== + + Transfer a packet to the hardware and trigger the packet send. + This may be called at either from either the Tx queue code + or the interrupt handler. + +======================================================================*/ + +static void smc_hardware_send_packet(struct net_device * dev) +{ + struct smc_private *smc = netdev_priv(dev); + struct sk_buff *skb = smc->saved_skb; + unsigned int ioaddr = dev->base_addr; + u_char packet_no; + + if (!skb) { + netdev_err(dev, "In XMIT with no packet to send\n"); + return; + } + + /* There should be a packet slot waiting. */ + packet_no = inw(ioaddr + PNR_ARR) >> 8; + if (packet_no & 0x80) { + /* If not, there is a hardware problem! Likely an ejected card. */ + netdev_warn(dev, "hardware Tx buffer allocation failed, status %#2.2x\n", + packet_no); + dev_kfree_skb_irq(skb); + smc->saved_skb = NULL; + netif_start_queue(dev); + return; + } + + dev->stats.tx_bytes += skb->len; + /* The card should use the just-allocated buffer. */ + outw(packet_no, ioaddr + PNR_ARR); + /* point to the beginning of the packet */ + outw(PTR_AUTOINC , ioaddr + POINTER); + + /* Send the packet length (+6 for status, length and ctl byte) + and the status word (set to zeros). */ + { + u_char *buf = skb->data; + u_int length = skb->len; /* The chip will pad to ethernet min. */ + + netdev_dbg(dev, "Trying to xmit packet of length %d\n", length); + + /* send the packet length: +6 for status word, length, and ctl */ + outw(0, ioaddr + DATA_1); + outw(length + 6, ioaddr + DATA_1); + outsw(ioaddr + DATA_1, buf, length >> 1); + + /* The odd last byte, if there is one, goes in the control word. */ + outw((length & 1) ? 0x2000 | buf[length-1] : 0, ioaddr + DATA_1); + } + + /* Enable the Tx interrupts, both Tx (TxErr) and TxEmpty. */ + outw(((IM_TX_INT|IM_TX_EMPTY_INT)<<8) | + (inw(ioaddr + INTERRUPT) & 0xff00), + ioaddr + INTERRUPT); + + /* The chip does the rest of the work. */ + outw(MC_ENQUEUE , ioaddr + MMU_CMD); + + smc->saved_skb = NULL; + dev_kfree_skb_irq(skb); + dev->trans_start = jiffies; + netif_start_queue(dev); +} + +/*====================================================================*/ + +static void smc_tx_timeout(struct net_device *dev) +{ + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + + netdev_notice(dev, "transmit timed out, Tx_status %2.2x status %4.4x.\n", + inw(ioaddr)&0xff, inw(ioaddr + 2)); + dev->stats.tx_errors++; + smc_reset(dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + smc->saved_skb = NULL; + netif_wake_queue(dev); +} + +static netdev_tx_t smc_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + u_short num_pages; + short time_out, ir; + unsigned long flags; + + netif_stop_queue(dev); + + netdev_dbg(dev, "smc_start_xmit(length = %d) called, status %04x\n", + skb->len, inw(ioaddr + 2)); + + if (smc->saved_skb) { + /* THIS SHOULD NEVER HAPPEN. */ + dev->stats.tx_aborted_errors++; + netdev_dbg(dev, "Internal error -- sent packet while busy\n"); + return NETDEV_TX_BUSY; + } + smc->saved_skb = skb; + + num_pages = skb->len >> 8; + + if (num_pages > 7) { + netdev_err(dev, "Far too big packet error: %d pages\n", num_pages); + dev_kfree_skb (skb); + smc->saved_skb = NULL; + dev->stats.tx_dropped++; + return NETDEV_TX_OK; /* Do not re-queue this packet. */ + } + /* A packet is now waiting. */ + smc->packets_waiting++; + + spin_lock_irqsave(&smc->lock, flags); + SMC_SELECT_BANK(2); /* Paranoia, we should always be in window 2 */ + + /* need MC_RESET to keep the memory consistent. errata? */ + if (smc->rx_ovrn) { + outw(MC_RESET, ioaddr + MMU_CMD); + smc->rx_ovrn = 0; + } + + /* Allocate the memory; send the packet now if we win. */ + outw(MC_ALLOC | num_pages, ioaddr + MMU_CMD); + for (time_out = MEMORY_WAIT_TIME; time_out >= 0; time_out--) { + ir = inw(ioaddr+INTERRUPT); + if (ir & IM_ALLOC_INT) { + /* Acknowledge the interrupt, send the packet. */ + outw((ir&0xff00) | IM_ALLOC_INT, ioaddr + INTERRUPT); + smc_hardware_send_packet(dev); /* Send the packet now.. */ + spin_unlock_irqrestore(&smc->lock, flags); + return NETDEV_TX_OK; + } + } + + /* Otherwise defer until the Tx-space-allocated interrupt. */ + netdev_dbg(dev, "memory allocation deferred.\n"); + outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT); + spin_unlock_irqrestore(&smc->lock, flags); + + return NETDEV_TX_OK; +} + +/*====================================================================== + + Handle a Tx anomalous event. Entered while in Window 2. + +======================================================================*/ + +static void smc_tx_err(struct net_device * dev) +{ + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + int saved_packet = inw(ioaddr + PNR_ARR) & 0xff; + int packet_no = inw(ioaddr + FIFO_PORTS) & 0x7f; + int tx_status; + + /* select this as the packet to read from */ + outw(packet_no, ioaddr + PNR_ARR); + + /* read the first word from this packet */ + outw(PTR_AUTOINC | PTR_READ | 0, ioaddr + POINTER); + + tx_status = inw(ioaddr + DATA_1); + + dev->stats.tx_errors++; + if (tx_status & TS_LOSTCAR) dev->stats.tx_carrier_errors++; + if (tx_status & TS_LATCOL) dev->stats.tx_window_errors++; + if (tx_status & TS_16COL) { + dev->stats.tx_aborted_errors++; + smc->tx_err++; + } + + if (tx_status & TS_SUCCESS) { + netdev_notice(dev, "Successful packet caused error interrupt?\n"); + } + /* re-enable transmit */ + SMC_SELECT_BANK(0); + outw(inw(ioaddr + TCR) | TCR_ENABLE | smc->duplex, ioaddr + TCR); + SMC_SELECT_BANK(2); + + outw(MC_FREEPKT, ioaddr + MMU_CMD); /* Free the packet memory. */ + + /* one less packet waiting for me */ + smc->packets_waiting--; + + outw(saved_packet, ioaddr + PNR_ARR); +} + +/*====================================================================*/ + +static void smc_eph_irq(struct net_device *dev) +{ + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + u_short card_stats, ephs; + + SMC_SELECT_BANK(0); + ephs = inw(ioaddr + EPH); + netdev_dbg(dev, "Ethernet protocol handler interrupt, status %4.4x.\n", + ephs); + /* Could be a counter roll-over warning: update stats. */ + card_stats = inw(ioaddr + COUNTER); + /* single collisions */ + dev->stats.collisions += card_stats & 0xF; + card_stats >>= 4; + /* multiple collisions */ + dev->stats.collisions += card_stats & 0xF; +#if 0 /* These are for when linux supports these statistics */ + card_stats >>= 4; /* deferred */ + card_stats >>= 4; /* excess deferred */ +#endif + /* If we had a transmit error we must re-enable the transmitter. */ + outw(inw(ioaddr + TCR) | TCR_ENABLE | smc->duplex, ioaddr + TCR); + + /* Clear a link error interrupt. */ + SMC_SELECT_BANK(1); + outw(CTL_AUTO_RELEASE | 0x0000, ioaddr + CONTROL); + outw(CTL_AUTO_RELEASE | CTL_TE_ENABLE | CTL_CR_ENABLE, + ioaddr + CONTROL); + SMC_SELECT_BANK(2); +} + +/*====================================================================*/ + +static irqreturn_t smc_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr; + u_short saved_bank, saved_pointer, mask, status; + unsigned int handled = 1; + char bogus_cnt = INTR_WORK; /* Work we are willing to do. */ + + if (!netif_device_present(dev)) + return IRQ_NONE; + + ioaddr = dev->base_addr; + + netdev_dbg(dev, "SMC91c92 interrupt %d at %#x.\n", + irq, ioaddr); + + spin_lock(&smc->lock); + smc->watchdog = 0; + saved_bank = inw(ioaddr + BANK_SELECT); + if ((saved_bank & 0xff00) != 0x3300) { + /* The device does not exist -- the card could be off-line, or + maybe it has been ejected. */ + netdev_dbg(dev, "SMC91c92 interrupt %d for non-existent/ejected device.\n", + irq); + handled = 0; + goto irq_done; + } + + SMC_SELECT_BANK(2); + saved_pointer = inw(ioaddr + POINTER); + mask = inw(ioaddr + INTERRUPT) >> 8; + /* clear all interrupts */ + outw(0, ioaddr + INTERRUPT); + + do { /* read the status flag, and mask it */ + status = inw(ioaddr + INTERRUPT) & 0xff; + netdev_dbg(dev, "Status is %#2.2x (mask %#2.2x).\n", + status, mask); + if ((status & mask) == 0) { + if (bogus_cnt == INTR_WORK) + handled = 0; + break; + } + if (status & IM_RCV_INT) { + /* Got a packet(s). */ + smc_rx(dev); + } + if (status & IM_TX_INT) { + smc_tx_err(dev); + outw(IM_TX_INT, ioaddr + INTERRUPT); + } + status &= mask; + if (status & IM_TX_EMPTY_INT) { + outw(IM_TX_EMPTY_INT, ioaddr + INTERRUPT); + mask &= ~IM_TX_EMPTY_INT; + dev->stats.tx_packets += smc->packets_waiting; + smc->packets_waiting = 0; + } + if (status & IM_ALLOC_INT) { + /* Clear this interrupt so it doesn't happen again */ + mask &= ~IM_ALLOC_INT; + + smc_hardware_send_packet(dev); + + /* enable xmit interrupts based on this */ + mask |= (IM_TX_EMPTY_INT | IM_TX_INT); + + /* and let the card send more packets to me */ + netif_wake_queue(dev); + } + if (status & IM_RX_OVRN_INT) { + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; + if (smc->duplex) + smc->rx_ovrn = 1; /* need MC_RESET outside smc_interrupt */ + outw(IM_RX_OVRN_INT, ioaddr + INTERRUPT); + } + if (status & IM_EPH_INT) + smc_eph_irq(dev); + } while (--bogus_cnt); + + netdev_dbg(dev, " Restoring saved registers mask %2.2x bank %4.4x pointer %4.4x.\n", + mask, saved_bank, saved_pointer); + + /* restore state register */ + outw((mask<<8), ioaddr + INTERRUPT); + outw(saved_pointer, ioaddr + POINTER); + SMC_SELECT_BANK(saved_bank); + + netdev_dbg(dev, "Exiting interrupt IRQ%d.\n", irq); + +irq_done: + + if ((smc->manfid == MANFID_OSITECH) && + (smc->cardid != PRODID_OSITECH_SEVEN)) { + /* Retrigger interrupt if needed */ + mask_bits(0x00ff, ioaddr-0x10+OSITECH_RESET_ISR); + set_bits(0x0300, ioaddr-0x10+OSITECH_RESET_ISR); + } + if (smc->manfid == MANFID_MOTOROLA) { + u_char cor; + cor = readb(smc->base + MOT_UART + CISREG_COR); + writeb(cor & ~COR_IREQ_ENA, smc->base + MOT_UART + CISREG_COR); + writeb(cor, smc->base + MOT_UART + CISREG_COR); + cor = readb(smc->base + MOT_LAN + CISREG_COR); + writeb(cor & ~COR_IREQ_ENA, smc->base + MOT_LAN + CISREG_COR); + writeb(cor, smc->base + MOT_LAN + CISREG_COR); + } + + if ((smc->base != NULL) && /* Megahertz MFC's */ + (smc->manfid == MANFID_MEGAHERTZ) && + (smc->cardid == PRODID_MEGAHERTZ_EM3288)) { + + u_char tmp; + tmp = readb(smc->base+MEGAHERTZ_ISR); + tmp = readb(smc->base+MEGAHERTZ_ISR); + + /* Retrigger interrupt if needed */ + writeb(tmp, smc->base + MEGAHERTZ_ISR); + writeb(tmp, smc->base + MEGAHERTZ_ISR); + } + + spin_unlock(&smc->lock); + return IRQ_RETVAL(handled); +} + +/*====================================================================*/ + +static void smc_rx(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + int rx_status; + int packet_length; /* Caution: not frame length, rather words + to transfer from the chip. */ + + /* Assertion: we are in Window 2. */ + + if (inw(ioaddr + FIFO_PORTS) & FP_RXEMPTY) { + netdev_err(dev, "smc_rx() with nothing on Rx FIFO\n"); + return; + } + + /* Reset the read pointer, and read the status and packet length. */ + outw(PTR_READ | PTR_RCV | PTR_AUTOINC, ioaddr + POINTER); + rx_status = inw(ioaddr + DATA_1); + packet_length = inw(ioaddr + DATA_1) & 0x07ff; + + netdev_dbg(dev, "Receive status %4.4x length %d.\n", + rx_status, packet_length); + + if (!(rx_status & RS_ERRORS)) { + /* do stuff to make a new packet */ + struct sk_buff *skb; + + /* Note: packet_length adds 5 or 6 extra bytes here! */ + skb = netdev_alloc_skb(dev, packet_length+2); + + if (skb == NULL) { + netdev_dbg(dev, "Low memory, packet dropped.\n"); + dev->stats.rx_dropped++; + outw(MC_RELEASE, ioaddr + MMU_CMD); + return; + } + + packet_length -= (rx_status & RS_ODDFRAME ? 5 : 6); + skb_reserve(skb, 2); + insw(ioaddr+DATA_1, skb_put(skb, packet_length), + (packet_length+1)>>1); + skb->protocol = eth_type_trans(skb, dev); + + netif_rx(skb); + dev->last_rx = jiffies; + dev->stats.rx_packets++; + dev->stats.rx_bytes += packet_length; + if (rx_status & RS_MULTICAST) + dev->stats.multicast++; + } else { + /* error ... */ + dev->stats.rx_errors++; + + if (rx_status & RS_ALGNERR) dev->stats.rx_frame_errors++; + if (rx_status & (RS_TOOSHORT | RS_TOOLONG)) + dev->stats.rx_length_errors++; + if (rx_status & RS_BADCRC) dev->stats.rx_crc_errors++; + } + /* Let the MMU free the memory of this packet. */ + outw(MC_RELEASE, ioaddr + MMU_CMD); +} + +/*====================================================================== + + Set the receive mode. + + This routine is used by both the protocol level to notify us of + promiscuous/multicast mode changes, and by the open/reset code to + initialize the Rx registers. We always set the multicast list and + leave the receiver running. + +======================================================================*/ + +static void set_rx_mode(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + struct smc_private *smc = netdev_priv(dev); + unsigned char multicast_table[8]; + unsigned long flags; + u_short rx_cfg_setting; + int i; + + memset(multicast_table, 0, sizeof(multicast_table)); + + if (dev->flags & IFF_PROMISC) { + rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti; + } else if (dev->flags & IFF_ALLMULTI) + rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti; + else { + if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + + netdev_for_each_mc_addr(ha, dev) { + u_int position = ether_crc(6, ha->addr); + multicast_table[position >> 29] |= 1 << ((position >> 26) & 7); + } + } + rx_cfg_setting = RxStripCRC | RxEnable; + } + + /* Load MC table and Rx setting into the chip without interrupts. */ + spin_lock_irqsave(&smc->lock, flags); + SMC_SELECT_BANK(3); + for (i = 0; i < 8; i++) + outb(multicast_table[i], ioaddr + MULTICAST0 + i); + SMC_SELECT_BANK(0); + outw(rx_cfg_setting, ioaddr + RCR); + SMC_SELECT_BANK(2); + spin_unlock_irqrestore(&smc->lock, flags); +} + +/*====================================================================== + + Senses when a card's config changes. Here, it's coax or TP. + +======================================================================*/ + +static int s9k_config(struct net_device *dev, struct ifmap *map) +{ + struct smc_private *smc = netdev_priv(dev); + if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { + if (smc->cfg & CFG_MII_SELECT) + return -EOPNOTSUPP; + else if (map->port > 2) + return -EINVAL; + dev->if_port = map->port; + netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); + smc_reset(dev); + } + return 0; +} + +/*====================================================================== + + Reset the chip, reloading every register that might be corrupted. + +======================================================================*/ + +/* + Set transceiver type, perhaps to something other than what the user + specified in dev->if_port. +*/ +static void smc_set_xcvr(struct net_device *dev, int if_port) +{ + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + u_short saved_bank; + + saved_bank = inw(ioaddr + BANK_SELECT); + SMC_SELECT_BANK(1); + if (if_port == 2) { + outw(smc->cfg | CFG_AUI_SELECT, ioaddr + CONFIG); + if ((smc->manfid == MANFID_OSITECH) && + (smc->cardid != PRODID_OSITECH_SEVEN)) + set_bits(OSI_AUI_PWR, ioaddr - 0x10 + OSITECH_AUI_PWR); + smc->media_status = ((dev->if_port == 0) ? 0x0001 : 0x0002); + } else { + outw(smc->cfg, ioaddr + CONFIG); + if ((smc->manfid == MANFID_OSITECH) && + (smc->cardid != PRODID_OSITECH_SEVEN)) + mask_bits(~OSI_AUI_PWR, ioaddr - 0x10 + OSITECH_AUI_PWR); + smc->media_status = ((dev->if_port == 0) ? 0x0012 : 0x4001); + } + SMC_SELECT_BANK(saved_bank); +} + +static void smc_reset(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + struct smc_private *smc = netdev_priv(dev); + int i; + + netdev_dbg(dev, "smc91c92 reset called.\n"); + + /* The first interaction must be a write to bring the chip out + of sleep mode. */ + SMC_SELECT_BANK(0); + /* Reset the chip. */ + outw(RCR_SOFTRESET, ioaddr + RCR); + udelay(10); + + /* Clear the transmit and receive configuration registers. */ + outw(RCR_CLEAR, ioaddr + RCR); + outw(TCR_CLEAR, ioaddr + TCR); + + /* Set the Window 1 control, configuration and station addr registers. + No point in writing the I/O base register ;-> */ + SMC_SELECT_BANK(1); + /* Automatically release successfully transmitted packets, + Accept link errors, counter and Tx error interrupts. */ + outw(CTL_AUTO_RELEASE | CTL_TE_ENABLE | CTL_CR_ENABLE, + ioaddr + CONTROL); + smc_set_xcvr(dev, dev->if_port); + if ((smc->manfid == MANFID_OSITECH) && + (smc->cardid != PRODID_OSITECH_SEVEN)) + outw((dev->if_port == 2 ? OSI_AUI_PWR : 0) | + (inw(ioaddr-0x10+OSITECH_AUI_PWR) & 0xff00), + ioaddr - 0x10 + OSITECH_AUI_PWR); + + /* Fill in the physical address. The databook is wrong about the order! */ + for (i = 0; i < 6; i += 2) + outw((dev->dev_addr[i+1]<<8)+dev->dev_addr[i], + ioaddr + ADDR0 + i); + + /* Reset the MMU */ + SMC_SELECT_BANK(2); + outw(MC_RESET, ioaddr + MMU_CMD); + outw(0, ioaddr + INTERRUPT); + + /* Re-enable the chip. */ + SMC_SELECT_BANK(0); + outw(((smc->cfg & CFG_MII_SELECT) ? 0 : TCR_MONCSN) | + TCR_ENABLE | TCR_PAD_EN | smc->duplex, ioaddr + TCR); + set_rx_mode(dev); + + if (smc->cfg & CFG_MII_SELECT) { + SMC_SELECT_BANK(3); + + /* Reset MII */ + mdio_write(dev, smc->mii_if.phy_id, 0, 0x8000); + + /* Advertise 100F, 100H, 10F, 10H */ + mdio_write(dev, smc->mii_if.phy_id, 4, 0x01e1); + + /* Restart MII autonegotiation */ + mdio_write(dev, smc->mii_if.phy_id, 0, 0x0000); + mdio_write(dev, smc->mii_if.phy_id, 0, 0x1200); + } + + /* Enable interrupts. */ + SMC_SELECT_BANK(2); + outw((IM_EPH_INT | IM_RX_OVRN_INT | IM_RCV_INT) << 8, + ioaddr + INTERRUPT); +} + +/*====================================================================== + + Media selection timer routine + +======================================================================*/ + +static void media_check(u_long arg) +{ + struct net_device *dev = (struct net_device *) arg; + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + u_short i, media, saved_bank; + u_short link; + unsigned long flags; + + spin_lock_irqsave(&smc->lock, flags); + + saved_bank = inw(ioaddr + BANK_SELECT); + + if (!netif_device_present(dev)) + goto reschedule; + + SMC_SELECT_BANK(2); + + /* need MC_RESET to keep the memory consistent. errata? */ + if (smc->rx_ovrn) { + outw(MC_RESET, ioaddr + MMU_CMD); + smc->rx_ovrn = 0; + } + i = inw(ioaddr + INTERRUPT); + SMC_SELECT_BANK(0); + media = inw(ioaddr + EPH) & EPH_LINK_OK; + SMC_SELECT_BANK(1); + media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1; + + SMC_SELECT_BANK(saved_bank); + spin_unlock_irqrestore(&smc->lock, flags); + + /* Check for pending interrupt with watchdog flag set: with + this, we can limp along even if the interrupt is blocked */ + if (smc->watchdog++ && ((i>>8) & i)) { + if (!smc->fast_poll) + netdev_info(dev, "interrupt(s) dropped!\n"); + local_irq_save(flags); + smc_interrupt(dev->irq, dev); + local_irq_restore(flags); + smc->fast_poll = HZ; + } + if (smc->fast_poll) { + smc->fast_poll--; + smc->media.expires = jiffies + HZ/100; + add_timer(&smc->media); + return; + } + + spin_lock_irqsave(&smc->lock, flags); + + saved_bank = inw(ioaddr + BANK_SELECT); + + if (smc->cfg & CFG_MII_SELECT) { + if (smc->mii_if.phy_id < 0) + goto reschedule; + + SMC_SELECT_BANK(3); + link = mdio_read(dev, smc->mii_if.phy_id, 1); + if (!link || (link == 0xffff)) { + netdev_info(dev, "MII is missing!\n"); + smc->mii_if.phy_id = -1; + goto reschedule; + } + + link &= 0x0004; + if (link != smc->link_status) { + u_short p = mdio_read(dev, smc->mii_if.phy_id, 5); + netdev_info(dev, "%s link beat\n", link ? "found" : "lost"); + smc->duplex = (((p & 0x0100) || ((p & 0x1c0) == 0x40)) + ? TCR_FDUPLX : 0); + if (link) { + netdev_info(dev, "autonegotiation complete: " + "%dbaseT-%cD selected\n", + (p & 0x0180) ? 100 : 10, smc->duplex ? 'F' : 'H'); + } + SMC_SELECT_BANK(0); + outw(inw(ioaddr + TCR) | smc->duplex, ioaddr + TCR); + smc->link_status = link; + } + goto reschedule; + } + + /* Ignore collisions unless we've had no rx's recently */ + if (time_after(jiffies, dev->last_rx + HZ)) { + if (smc->tx_err || (smc->media_status & EPH_16COL)) + media |= EPH_16COL; + } + smc->tx_err = 0; + + if (media != smc->media_status) { + if ((media & smc->media_status & 1) && + ((smc->media_status ^ media) & EPH_LINK_OK)) + netdev_info(dev, "%s link beat\n", + smc->media_status & EPH_LINK_OK ? "lost" : "found"); + else if ((media & smc->media_status & 2) && + ((smc->media_status ^ media) & EPH_16COL)) + netdev_info(dev, "coax cable %s\n", + media & EPH_16COL ? "problem" : "ok"); + if (dev->if_port == 0) { + if (media & 1) { + if (media & EPH_LINK_OK) + netdev_info(dev, "flipped to 10baseT\n"); + else + smc_set_xcvr(dev, 2); + } else { + if (media & EPH_16COL) + smc_set_xcvr(dev, 1); + else + netdev_info(dev, "flipped to 10base2\n"); + } + } + smc->media_status = media; + } + +reschedule: + smc->media.expires = jiffies + HZ; + add_timer(&smc->media); + SMC_SELECT_BANK(saved_bank); + spin_unlock_irqrestore(&smc->lock, flags); +} + +static int smc_link_ok(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + struct smc_private *smc = netdev_priv(dev); + + if (smc->cfg & CFG_MII_SELECT) { + return mii_link_ok(&smc->mii_if); + } else { + SMC_SELECT_BANK(0); + return inw(ioaddr + EPH) & EPH_LINK_OK; + } +} + +static int smc_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + u16 tmp; + unsigned int ioaddr = dev->base_addr; + + ecmd->supported = (SUPPORTED_TP | SUPPORTED_AUI | + SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full); + + SMC_SELECT_BANK(1); + tmp = inw(ioaddr + CONFIG); + ecmd->port = (tmp & CFG_AUI_SELECT) ? PORT_AUI : PORT_TP; + ecmd->transceiver = XCVR_INTERNAL; + ethtool_cmd_speed_set(ecmd, SPEED_10); + ecmd->phy_address = ioaddr + MGMT; + + SMC_SELECT_BANK(0); + tmp = inw(ioaddr + TCR); + ecmd->duplex = (tmp & TCR_FDUPLX) ? DUPLEX_FULL : DUPLEX_HALF; + + return 0; +} + +static int smc_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + u16 tmp; + unsigned int ioaddr = dev->base_addr; + + if (ethtool_cmd_speed(ecmd) != SPEED_10) + return -EINVAL; + if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + return -EINVAL; + if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI) + return -EINVAL; + if (ecmd->transceiver != XCVR_INTERNAL) + return -EINVAL; + + if (ecmd->port == PORT_AUI) + smc_set_xcvr(dev, 1); + else + smc_set_xcvr(dev, 0); + + SMC_SELECT_BANK(0); + tmp = inw(ioaddr + TCR); + if (ecmd->duplex == DUPLEX_FULL) + tmp |= TCR_FDUPLX; + else + tmp &= ~TCR_FDUPLX; + outw(tmp, ioaddr + TCR); + + return 0; +} + +static int check_if_running(struct net_device *dev) +{ + if (!netif_running(dev)) + return -EINVAL; + return 0; +} + +static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); +} + +static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + u16 saved_bank = inw(ioaddr + BANK_SELECT); + int ret; + unsigned long flags; + + spin_lock_irqsave(&smc->lock, flags); + SMC_SELECT_BANK(3); + if (smc->cfg & CFG_MII_SELECT) + ret = mii_ethtool_gset(&smc->mii_if, ecmd); + else + ret = smc_netdev_get_ecmd(dev, ecmd); + SMC_SELECT_BANK(saved_bank); + spin_unlock_irqrestore(&smc->lock, flags); + return ret; +} + +static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + u16 saved_bank = inw(ioaddr + BANK_SELECT); + int ret; + unsigned long flags; + + spin_lock_irqsave(&smc->lock, flags); + SMC_SELECT_BANK(3); + if (smc->cfg & CFG_MII_SELECT) + ret = mii_ethtool_sset(&smc->mii_if, ecmd); + else + ret = smc_netdev_set_ecmd(dev, ecmd); + SMC_SELECT_BANK(saved_bank); + spin_unlock_irqrestore(&smc->lock, flags); + return ret; +} + +static u32 smc_get_link(struct net_device *dev) +{ + struct smc_private *smc = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + u16 saved_bank = inw(ioaddr + BANK_SELECT); + u32 ret; + unsigned long flags; + + spin_lock_irqsave(&smc->lock, flags); + SMC_SELECT_BANK(3); + ret = smc_link_ok(dev); + SMC_SELECT_BANK(saved_bank); + spin_unlock_irqrestore(&smc->lock, flags); + return ret; +} + +static int smc_nway_reset(struct net_device *dev) +{ + struct smc_private *smc = netdev_priv(dev); + if (smc->cfg & CFG_MII_SELECT) { + unsigned int ioaddr = dev->base_addr; + u16 saved_bank = inw(ioaddr + BANK_SELECT); + int res; + + SMC_SELECT_BANK(3); + res = mii_nway_restart(&smc->mii_if); + SMC_SELECT_BANK(saved_bank); + + return res; + } else + return -EOPNOTSUPP; +} + +static const struct ethtool_ops ethtool_ops = { + .begin = check_if_running, + .get_drvinfo = smc_get_drvinfo, + .get_settings = smc_get_settings, + .set_settings = smc_set_settings, + .get_link = smc_get_link, + .nway_reset = smc_nway_reset, +}; + +static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct smc_private *smc = netdev_priv(dev); + struct mii_ioctl_data *mii = if_mii(rq); + int rc = 0; + u16 saved_bank; + unsigned int ioaddr = dev->base_addr; + unsigned long flags; + + if (!netif_running(dev)) + return -EINVAL; + + spin_lock_irqsave(&smc->lock, flags); + saved_bank = inw(ioaddr + BANK_SELECT); + SMC_SELECT_BANK(3); + rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL); + SMC_SELECT_BANK(saved_bank); + spin_unlock_irqrestore(&smc->lock, flags); + return rc; +} + +static const struct pcmcia_device_id smc91c92_ids[] = { + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0109, 0x0501), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0140, 0x000a), + PCMCIA_PFC_DEVICE_PROD_ID123(0, "MEGAHERTZ", "CC/XJEM3288", "DATA/FAX/CELL ETHERNET MODEM", 0xf510db04, 0x04cd2988, 0x46a52d63), + PCMCIA_PFC_DEVICE_PROD_ID123(0, "MEGAHERTZ", "CC/XJEM3336", "DATA/FAX/CELL ETHERNET MODEM", 0xf510db04, 0x0143b773, 0x46a52d63), + PCMCIA_PFC_DEVICE_PROD_ID123(0, "MEGAHERTZ", "EM1144T", "PCMCIA MODEM", 0xf510db04, 0x856d66c8, 0xbd6c43ef), + PCMCIA_PFC_DEVICE_PROD_ID123(0, "MEGAHERTZ", "XJEM1144/CCEM1144", "PCMCIA MODEM", 0xf510db04, 0x52d21e1e, 0xbd6c43ef), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "Gateway 2000", "XJEM3336", 0xdd9989be, 0x662c394c), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "MEGAHERTZ", "XJEM1144/CCEM1144", 0xf510db04, 0x52d21e1e), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "Ositech", "Trumpcard:Jack of Diamonds Modem+Ethernet", 0xc2f80cd, 0x656947b9), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "Ositech", "Trumpcard:Jack of Hearts Modem+Ethernet", 0xc2f80cd, 0xdc9ba5ed), + PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x016c, 0x0020), + PCMCIA_DEVICE_MANF_CARD(0x016c, 0x0023), + PCMCIA_DEVICE_PROD_ID123("BASICS by New Media Corporation", "Ethernet", "SMC91C94", 0x23c78a9d, 0x00b2e941, 0xcef397fb), + PCMCIA_DEVICE_PROD_ID12("ARGOSY", "Fast Ethernet PCCard", 0x78f308dc, 0xdcea68bc), + PCMCIA_DEVICE_PROD_ID12("dit Co., Ltd.", "PC Card-10/100BTX", 0xe59365c8, 0x6a2161d1), + PCMCIA_DEVICE_PROD_ID12("DYNALINK", "L100C", 0x6a26d1cf, 0xc16ce9c5), + PCMCIA_DEVICE_PROD_ID12("Farallon", "Farallon Enet", 0x58d93fc4, 0x244734e9), + PCMCIA_DEVICE_PROD_ID12("Megahertz", "CC10BT/2", 0x33234748, 0x3c95b953), + PCMCIA_DEVICE_PROD_ID12("MELCO/SMC", "LPC-TX", 0xa2cd8e6d, 0x42da662a), + PCMCIA_DEVICE_PROD_ID12("Ositech", "Trumpcard:Four of Diamonds Ethernet", 0xc2f80cd, 0xb3466314), + PCMCIA_DEVICE_PROD_ID12("Ositech", "Trumpcard:Seven of Diamonds Ethernet", 0xc2f80cd, 0x194b650a), + PCMCIA_DEVICE_PROD_ID12("PCMCIA", "Fast Ethernet PCCard", 0x281f1c5d, 0xdcea68bc), + PCMCIA_DEVICE_PROD_ID12("Psion", "10Mb Ethernet", 0x4ef00b21, 0x844be9e9), + PCMCIA_DEVICE_PROD_ID12("SMC", "EtherEZ Ethernet 8020", 0xc4f8b18b, 0x4a0eeb2d), + /* These conflict with other cards! */ + /* PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0100), */ + /* PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab), */ + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, smc91c92_ids); + +static struct pcmcia_driver smc91c92_cs_driver = { + .owner = THIS_MODULE, + .name = "smc91c92_cs", + .probe = smc91c92_probe, + .remove = smc91c92_detach, + .id_table = smc91c92_ids, + .suspend = smc91c92_suspend, + .resume = smc91c92_resume, +}; +module_pcmcia_driver(smc91c92_cs_driver); diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c new file mode 100644 index 000000000..630f0b780 --- /dev/null +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -0,0 +1,2476 @@ +/* + * smc91x.c + * This is a driver for SMSC's 91C9x/91C1xx single-chip Ethernet devices. + * + * Copyright (C) 1996 by Erik Stahlman + * Copyright (C) 2001 Standard Microsystems Corporation + * Developed by Simple Network Magic Corporation + * Copyright (C) 2003 Monta Vista Software, Inc. + * Unified SMC91x driver by Nicolas Pitre + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * Arguments: + * io = for the base address + * irq = for the IRQ + * nowait = 0 for normal wait states, 1 eliminates additional wait states + * + * original author: + * Erik Stahlman + * + * hardware multicast code: + * Peter Cammaert + * + * contributors: + * Daris A Nevil + * Nicolas Pitre + * Russell King + * + * History: + * 08/20/00 Arnaldo Melo fix kfree(skb) in smc_hardware_send_packet + * 12/15/00 Christian Jullien fix "Warning: kfree_skb on hard IRQ" + * 03/16/01 Daris A Nevil modified smc9194.c for use with LAN91C111 + * 08/22/01 Scott Anderson merge changes from smc9194 to smc91111 + * 08/21/01 Pramod B Bhardwaj added support for RevB of LAN91C111 + * 12/20/01 Jeff Sutherland initial port to Xscale PXA with DMA support + * 04/07/03 Nicolas Pitre unified SMC91x driver, killed irq races, + * more bus abstraction, big cleanup, etc. + * 29/09/03 Russell King - add driver model support + * - ethtool support + * - convert to use generic MII interface + * - add link up/down notification + * - don't try to handle full negotiation in + * smc_phy_configure + * - clean up (and fix stack overrun) in PHY + * MII read/write functions + * 22/09/04 Nicolas Pitre big update (see commit log for details) + */ +static const char version[] = + "smc91x.c: v1.1, sep 22 2004 by Nicolas Pitre "; + +/* Debugging level */ +#ifndef SMC_DEBUG +#define SMC_DEBUG 0 +#endif + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "smc91x.h" + +#if defined(CONFIG_ASSABET_NEPONSET) +#include +#include +#endif + +#ifndef SMC_NOWAIT +# define SMC_NOWAIT 0 +#endif +static int nowait = SMC_NOWAIT; +module_param(nowait, int, 0400); +MODULE_PARM_DESC(nowait, "set to 1 for no wait state"); + +/* + * Transmit timeout, default 5 seconds. + */ +static int watchdog = 1000; +module_param(watchdog, int, 0400); +MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:smc91x"); + +/* + * The internal workings of the driver. If you are changing anything + * here with the SMC stuff, you should have the datasheet and know + * what you are doing. + */ +#define CARDNAME "smc91x" + +/* + * Use power-down feature of the chip + */ +#define POWER_DOWN 1 + +/* + * Wait time for memory to be free. This probably shouldn't be + * tuned that much, as waiting for this means nothing else happens + * in the system + */ +#define MEMORY_WAIT_TIME 16 + +/* + * The maximum number of processing loops allowed for each call to the + * IRQ handler. + */ +#define MAX_IRQ_LOOPS 8 + +/* + * This selects whether TX packets are sent one by one to the SMC91x internal + * memory and throttled until transmission completes. This may prevent + * RX overruns a litle by keeping much of the memory free for RX packets + * but to the expense of reduced TX throughput and increased IRQ overhead. + * Note this is not a cure for a too slow data bus or too high IRQ latency. + */ +#define THROTTLE_TX_PKTS 0 + +/* + * The MII clock high/low times. 2x this number gives the MII clock period + * in microseconds. (was 50, but this gives 6.4ms for each MII transaction!) + */ +#define MII_DELAY 1 + +#define DBG(n, dev, fmt, ...) \ + do { \ + if (SMC_DEBUG >= (n)) \ + netdev_dbg(dev, fmt, ##__VA_ARGS__); \ + } while (0) + +#define PRINTK(dev, fmt, ...) \ + do { \ + if (SMC_DEBUG > 0) \ + netdev_info(dev, fmt, ##__VA_ARGS__); \ + else \ + netdev_dbg(dev, fmt, ##__VA_ARGS__); \ + } while (0) + +#if SMC_DEBUG > 3 +static void PRINT_PKT(u_char *buf, int length) +{ + int i; + int remainder; + int lines; + + lines = length / 16; + remainder = length % 16; + + for (i = 0; i < lines ; i ++) { + int cur; + printk(KERN_DEBUG); + for (cur = 0; cur < 8; cur++) { + u_char a, b; + a = *buf++; + b = *buf++; + pr_cont("%02x%02x ", a, b); + } + pr_cont("\n"); + } + printk(KERN_DEBUG); + for (i = 0; i < remainder/2 ; i++) { + u_char a, b; + a = *buf++; + b = *buf++; + pr_cont("%02x%02x ", a, b); + } + pr_cont("\n"); +} +#else +static inline void PRINT_PKT(u_char *buf, int length) { } +#endif + + +/* this enables an interrupt in the interrupt mask register */ +#define SMC_ENABLE_INT(lp, x) do { \ + unsigned char mask; \ + unsigned long smc_enable_flags; \ + spin_lock_irqsave(&lp->lock, smc_enable_flags); \ + mask = SMC_GET_INT_MASK(lp); \ + mask |= (x); \ + SMC_SET_INT_MASK(lp, mask); \ + spin_unlock_irqrestore(&lp->lock, smc_enable_flags); \ +} while (0) + +/* this disables an interrupt from the interrupt mask register */ +#define SMC_DISABLE_INT(lp, x) do { \ + unsigned char mask; \ + unsigned long smc_disable_flags; \ + spin_lock_irqsave(&lp->lock, smc_disable_flags); \ + mask = SMC_GET_INT_MASK(lp); \ + mask &= ~(x); \ + SMC_SET_INT_MASK(lp, mask); \ + spin_unlock_irqrestore(&lp->lock, smc_disable_flags); \ +} while (0) + +/* + * Wait while MMU is busy. This is usually in the order of a few nanosecs + * if at all, but let's avoid deadlocking the system if the hardware + * decides to go south. + */ +#define SMC_WAIT_MMU_BUSY(lp) do { \ + if (unlikely(SMC_GET_MMU_CMD(lp) & MC_BUSY)) { \ + unsigned long timeout = jiffies + 2; \ + while (SMC_GET_MMU_CMD(lp) & MC_BUSY) { \ + if (time_after(jiffies, timeout)) { \ + netdev_dbg(dev, "timeout %s line %d\n", \ + __FILE__, __LINE__); \ + break; \ + } \ + cpu_relax(); \ + } \ + } \ +} while (0) + + +/* + * this does a soft reset on the device + */ +static void smc_reset(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned int ctl, cfg; + struct sk_buff *pending_skb; + + DBG(2, dev, "%s\n", __func__); + + /* Disable all interrupts, block TX tasklet */ + spin_lock_irq(&lp->lock); + SMC_SELECT_BANK(lp, 2); + SMC_SET_INT_MASK(lp, 0); + pending_skb = lp->pending_tx_skb; + lp->pending_tx_skb = NULL; + spin_unlock_irq(&lp->lock); + + /* free any pending tx skb */ + if (pending_skb) { + dev_kfree_skb(pending_skb); + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; + } + + /* + * This resets the registers mostly to defaults, but doesn't + * affect EEPROM. That seems unnecessary + */ + SMC_SELECT_BANK(lp, 0); + SMC_SET_RCR(lp, RCR_SOFTRST); + + /* + * Setup the Configuration Register + * This is necessary because the CONFIG_REG is not affected + * by a soft reset + */ + SMC_SELECT_BANK(lp, 1); + + cfg = CONFIG_DEFAULT; + + /* + * Setup for fast accesses if requested. If the card/system + * can't handle it then there will be no recovery except for + * a hard reset or power cycle + */ + if (lp->cfg.flags & SMC91X_NOWAIT) + cfg |= CONFIG_NO_WAIT; + + /* + * Release from possible power-down state + * Configuration register is not affected by Soft Reset + */ + cfg |= CONFIG_EPH_POWER_EN; + + SMC_SET_CONFIG(lp, cfg); + + /* this should pause enough for the chip to be happy */ + /* + * elaborate? What does the chip _need_? --jgarzik + * + * This seems to be undocumented, but something the original + * driver(s) have always done. Suspect undocumented timing + * info/determined empirically. --rmk + */ + udelay(1); + + /* Disable transmit and receive functionality */ + SMC_SELECT_BANK(lp, 0); + SMC_SET_RCR(lp, RCR_CLEAR); + SMC_SET_TCR(lp, TCR_CLEAR); + + SMC_SELECT_BANK(lp, 1); + ctl = SMC_GET_CTL(lp) | CTL_LE_ENABLE; + + /* + * Set the control register to automatically release successfully + * transmitted packets, to make the best use out of our limited + * memory + */ + if(!THROTTLE_TX_PKTS) + ctl |= CTL_AUTO_RELEASE; + else + ctl &= ~CTL_AUTO_RELEASE; + SMC_SET_CTL(lp, ctl); + + /* Reset the MMU */ + SMC_SELECT_BANK(lp, 2); + SMC_SET_MMU_CMD(lp, MC_RESET); + SMC_WAIT_MMU_BUSY(lp); +} + +/* + * Enable Interrupts, Receive, and Transmit + */ +static void smc_enable(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + int mask; + + DBG(2, dev, "%s\n", __func__); + + /* see the header file for options in TCR/RCR DEFAULT */ + SMC_SELECT_BANK(lp, 0); + SMC_SET_TCR(lp, lp->tcr_cur_mode); + SMC_SET_RCR(lp, lp->rcr_cur_mode); + + SMC_SELECT_BANK(lp, 1); + SMC_SET_MAC_ADDR(lp, dev->dev_addr); + + /* now, enable interrupts */ + mask = IM_EPH_INT|IM_RX_OVRN_INT|IM_RCV_INT; + if (lp->version >= (CHIP_91100 << 4)) + mask |= IM_MDINT; + SMC_SELECT_BANK(lp, 2); + SMC_SET_INT_MASK(lp, mask); + + /* + * From this point the register bank must _NOT_ be switched away + * to something else than bank 2 without proper locking against + * races with any tasklet or interrupt handlers until smc_shutdown() + * or smc_reset() is called. + */ +} + +/* + * this puts the device in an inactive state + */ +static void smc_shutdown(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + struct sk_buff *pending_skb; + + DBG(2, dev, "%s: %s\n", CARDNAME, __func__); + + /* no more interrupts for me */ + spin_lock_irq(&lp->lock); + SMC_SELECT_BANK(lp, 2); + SMC_SET_INT_MASK(lp, 0); + pending_skb = lp->pending_tx_skb; + lp->pending_tx_skb = NULL; + spin_unlock_irq(&lp->lock); + if (pending_skb) + dev_kfree_skb(pending_skb); + + /* and tell the card to stay away from that nasty outside world */ + SMC_SELECT_BANK(lp, 0); + SMC_SET_RCR(lp, RCR_CLEAR); + SMC_SET_TCR(lp, TCR_CLEAR); + +#ifdef POWER_DOWN + /* finally, shut the chip down */ + SMC_SELECT_BANK(lp, 1); + SMC_SET_CONFIG(lp, SMC_GET_CONFIG(lp) & ~CONFIG_EPH_POWER_EN); +#endif +} + +/* + * This is the procedure to handle the receipt of a packet. + */ +static inline void smc_rcv(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned int packet_number, status, packet_len; + + DBG(3, dev, "%s\n", __func__); + + packet_number = SMC_GET_RXFIFO(lp); + if (unlikely(packet_number & RXFIFO_REMPTY)) { + PRINTK(dev, "smc_rcv with nothing on FIFO.\n"); + return; + } + + /* read from start of packet */ + SMC_SET_PTR(lp, PTR_READ | PTR_RCV | PTR_AUTOINC); + + /* First two words are status and packet length */ + SMC_GET_PKT_HDR(lp, status, packet_len); + packet_len &= 0x07ff; /* mask off top bits */ + DBG(2, dev, "RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n", + packet_number, status, packet_len, packet_len); + + back: + if (unlikely(packet_len < 6 || status & RS_ERRORS)) { + if (status & RS_TOOLONG && packet_len <= (1514 + 4 + 6)) { + /* accept VLAN packets */ + status &= ~RS_TOOLONG; + goto back; + } + if (packet_len < 6) { + /* bloody hardware */ + netdev_err(dev, "fubar (rxlen %u status %x\n", + packet_len, status); + status |= RS_TOOSHORT; + } + SMC_WAIT_MMU_BUSY(lp); + SMC_SET_MMU_CMD(lp, MC_RELEASE); + dev->stats.rx_errors++; + if (status & RS_ALGNERR) + dev->stats.rx_frame_errors++; + if (status & (RS_TOOSHORT | RS_TOOLONG)) + dev->stats.rx_length_errors++; + if (status & RS_BADCRC) + dev->stats.rx_crc_errors++; + } else { + struct sk_buff *skb; + unsigned char *data; + unsigned int data_len; + + /* set multicast stats */ + if (status & RS_MULTICAST) + dev->stats.multicast++; + + /* + * Actual payload is packet_len - 6 (or 5 if odd byte). + * We want skb_reserve(2) and the final ctrl word + * (2 bytes, possibly containing the payload odd byte). + * Furthermore, we add 2 bytes to allow rounding up to + * multiple of 4 bytes on 32 bit buses. + * Hence packet_len - 6 + 2 + 2 + 2. + */ + skb = netdev_alloc_skb(dev, packet_len); + if (unlikely(skb == NULL)) { + SMC_WAIT_MMU_BUSY(lp); + SMC_SET_MMU_CMD(lp, MC_RELEASE); + dev->stats.rx_dropped++; + return; + } + + /* Align IP header to 32 bits */ + skb_reserve(skb, 2); + + /* BUG: the LAN91C111 rev A never sets this bit. Force it. */ + if (lp->version == 0x90) + status |= RS_ODDFRAME; + + /* + * If odd length: packet_len - 5, + * otherwise packet_len - 6. + * With the trailing ctrl byte it's packet_len - 4. + */ + data_len = packet_len - ((status & RS_ODDFRAME) ? 5 : 6); + data = skb_put(skb, data_len); + SMC_PULL_DATA(lp, data, packet_len - 4); + + SMC_WAIT_MMU_BUSY(lp); + SMC_SET_MMU_CMD(lp, MC_RELEASE); + + PRINT_PKT(data, packet_len - 4); + + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += data_len; + } +} + +#ifdef CONFIG_SMP +/* + * On SMP we have the following problem: + * + * A = smc_hardware_send_pkt() + * B = smc_hard_start_xmit() + * C = smc_interrupt() + * + * A and B can never be executed simultaneously. However, at least on UP, + * it is possible (and even desirable) for C to interrupt execution of + * A or B in order to have better RX reliability and avoid overruns. + * C, just like A and B, must have exclusive access to the chip and + * each of them must lock against any other concurrent access. + * Unfortunately this is not possible to have C suspend execution of A or + * B taking place on another CPU. On UP this is no an issue since A and B + * are run from softirq context and C from hard IRQ context, and there is + * no other CPU where concurrent access can happen. + * If ever there is a way to force at least B and C to always be executed + * on the same CPU then we could use read/write locks to protect against + * any other concurrent access and C would always interrupt B. But life + * isn't that easy in a SMP world... + */ +#define smc_special_trylock(lock, flags) \ +({ \ + int __ret; \ + local_irq_save(flags); \ + __ret = spin_trylock(lock); \ + if (!__ret) \ + local_irq_restore(flags); \ + __ret; \ +}) +#define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags) +#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags) +#else +#define smc_special_trylock(lock, flags) (flags == flags) +#define smc_special_lock(lock, flags) do { flags = 0; } while (0) +#define smc_special_unlock(lock, flags) do { flags = 0; } while (0) +#endif + +/* + * This is called to actually send a packet to the chip. + */ +static void smc_hardware_send_pkt(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + struct sk_buff *skb; + unsigned int packet_no, len; + unsigned char *buf; + unsigned long flags; + + DBG(3, dev, "%s\n", __func__); + + if (!smc_special_trylock(&lp->lock, flags)) { + netif_stop_queue(dev); + tasklet_schedule(&lp->tx_task); + return; + } + + skb = lp->pending_tx_skb; + if (unlikely(!skb)) { + smc_special_unlock(&lp->lock, flags); + return; + } + lp->pending_tx_skb = NULL; + + packet_no = SMC_GET_AR(lp); + if (unlikely(packet_no & AR_FAILED)) { + netdev_err(dev, "Memory allocation failed.\n"); + dev->stats.tx_errors++; + dev->stats.tx_fifo_errors++; + smc_special_unlock(&lp->lock, flags); + goto done; + } + + /* point to the beginning of the packet */ + SMC_SET_PN(lp, packet_no); + SMC_SET_PTR(lp, PTR_AUTOINC); + + buf = skb->data; + len = skb->len; + DBG(2, dev, "TX PNR 0x%x LENGTH 0x%04x (%d) BUF 0x%p\n", + packet_no, len, len, buf); + PRINT_PKT(buf, len); + + /* + * Send the packet length (+6 for status words, length, and ctl. + * The card will pad to 64 bytes with zeroes if packet is too small. + */ + SMC_PUT_PKT_HDR(lp, 0, len + 6); + + /* send the actual data */ + SMC_PUSH_DATA(lp, buf, len & ~1); + + /* Send final ctl word with the last byte if there is one */ + SMC_outw(((len & 1) ? (0x2000 | buf[len-1]) : 0), ioaddr, DATA_REG(lp)); + + /* + * If THROTTLE_TX_PKTS is set, we stop the queue here. This will + * have the effect of having at most one packet queued for TX + * in the chip's memory at all time. + * + * If THROTTLE_TX_PKTS is not set then the queue is stopped only + * when memory allocation (MC_ALLOC) does not succeed right away. + */ + if (THROTTLE_TX_PKTS) + netif_stop_queue(dev); + + /* queue the packet for TX */ + SMC_SET_MMU_CMD(lp, MC_ENQUEUE); + smc_special_unlock(&lp->lock, flags); + + dev->trans_start = jiffies; + dev->stats.tx_packets++; + dev->stats.tx_bytes += len; + + SMC_ENABLE_INT(lp, IM_TX_INT | IM_TX_EMPTY_INT); + +done: if (!THROTTLE_TX_PKTS) + netif_wake_queue(dev); + + dev_consume_skb_any(skb); +} + +/* + * Since I am not sure if I will have enough room in the chip's ram + * to store the packet, I call this routine which either sends it + * now, or set the card to generates an interrupt when ready + * for the packet. + */ +static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned int numPages, poll_count, status; + unsigned long flags; + + DBG(3, dev, "%s\n", __func__); + + BUG_ON(lp->pending_tx_skb != NULL); + + /* + * The MMU wants the number of pages to be the number of 256 bytes + * 'pages', minus 1 (since a packet can't ever have 0 pages :)) + * + * The 91C111 ignores the size bits, but earlier models don't. + * + * Pkt size for allocating is data length +6 (for additional status + * words, length and ctl) + * + * If odd size then last byte is included in ctl word. + */ + numPages = ((skb->len & ~1) + (6 - 1)) >> 8; + if (unlikely(numPages > 7)) { + netdev_warn(dev, "Far too big packet error.\n"); + dev->stats.tx_errors++; + dev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + smc_special_lock(&lp->lock, flags); + + /* now, try to allocate the memory */ + SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages); + + /* + * Poll the chip for a short amount of time in case the + * allocation succeeds quickly. + */ + poll_count = MEMORY_WAIT_TIME; + do { + status = SMC_GET_INT(lp); + if (status & IM_ALLOC_INT) { + SMC_ACK_INT(lp, IM_ALLOC_INT); + break; + } + } while (--poll_count); + + smc_special_unlock(&lp->lock, flags); + + lp->pending_tx_skb = skb; + if (!poll_count) { + /* oh well, wait until the chip finds memory later */ + netif_stop_queue(dev); + DBG(2, dev, "TX memory allocation deferred.\n"); + SMC_ENABLE_INT(lp, IM_ALLOC_INT); + } else { + /* + * Allocation succeeded: push packet to the chip's own memory + * immediately. + */ + smc_hardware_send_pkt((unsigned long)dev); + } + + return NETDEV_TX_OK; +} + +/* + * This handles a TX interrupt, which is only called when: + * - a TX error occurred, or + * - CTL_AUTO_RELEASE is not set and TX of a packet completed. + */ +static void smc_tx(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned int saved_packet, packet_no, tx_status, pkt_len; + + DBG(3, dev, "%s\n", __func__); + + /* If the TX FIFO is empty then nothing to do */ + packet_no = SMC_GET_TXFIFO(lp); + if (unlikely(packet_no & TXFIFO_TEMPTY)) { + PRINTK(dev, "smc_tx with nothing on FIFO.\n"); + return; + } + + /* select packet to read from */ + saved_packet = SMC_GET_PN(lp); + SMC_SET_PN(lp, packet_no); + + /* read the first word (status word) from this packet */ + SMC_SET_PTR(lp, PTR_AUTOINC | PTR_READ); + SMC_GET_PKT_HDR(lp, tx_status, pkt_len); + DBG(2, dev, "TX STATUS 0x%04x PNR 0x%02x\n", + tx_status, packet_no); + + if (!(tx_status & ES_TX_SUC)) + dev->stats.tx_errors++; + + if (tx_status & ES_LOSTCARR) + dev->stats.tx_carrier_errors++; + + if (tx_status & (ES_LATCOL | ES_16COL)) { + PRINTK(dev, "%s occurred on last xmit\n", + (tx_status & ES_LATCOL) ? + "late collision" : "too many collisions"); + dev->stats.tx_window_errors++; + if (!(dev->stats.tx_window_errors & 63) && net_ratelimit()) { + netdev_info(dev, "unexpectedly large number of bad collisions. Please check duplex setting.\n"); + } + } + + /* kill the packet */ + SMC_WAIT_MMU_BUSY(lp); + SMC_SET_MMU_CMD(lp, MC_FREEPKT); + + /* Don't restore Packet Number Reg until busy bit is cleared */ + SMC_WAIT_MMU_BUSY(lp); + SMC_SET_PN(lp, saved_packet); + + /* re-enable transmit */ + SMC_SELECT_BANK(lp, 0); + SMC_SET_TCR(lp, lp->tcr_cur_mode); + SMC_SELECT_BANK(lp, 2); +} + + +/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/ + +static void smc_mii_out(struct net_device *dev, unsigned int val, int bits) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned int mii_reg, mask; + + mii_reg = SMC_GET_MII(lp) & ~(MII_MCLK | MII_MDOE | MII_MDO); + mii_reg |= MII_MDOE; + + for (mask = 1 << (bits - 1); mask; mask >>= 1) { + if (val & mask) + mii_reg |= MII_MDO; + else + mii_reg &= ~MII_MDO; + + SMC_SET_MII(lp, mii_reg); + udelay(MII_DELAY); + SMC_SET_MII(lp, mii_reg | MII_MCLK); + udelay(MII_DELAY); + } +} + +static unsigned int smc_mii_in(struct net_device *dev, int bits) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned int mii_reg, mask, val; + + mii_reg = SMC_GET_MII(lp) & ~(MII_MCLK | MII_MDOE | MII_MDO); + SMC_SET_MII(lp, mii_reg); + + for (mask = 1 << (bits - 1), val = 0; mask; mask >>= 1) { + if (SMC_GET_MII(lp) & MII_MDI) + val |= mask; + + SMC_SET_MII(lp, mii_reg); + udelay(MII_DELAY); + SMC_SET_MII(lp, mii_reg | MII_MCLK); + udelay(MII_DELAY); + } + + return val; +} + +/* + * Reads a register from the MII Management serial interface + */ +static int smc_phy_read(struct net_device *dev, int phyaddr, int phyreg) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned int phydata; + + SMC_SELECT_BANK(lp, 3); + + /* Idle - 32 ones */ + smc_mii_out(dev, 0xffffffff, 32); + + /* Start code (01) + read (10) + phyaddr + phyreg */ + smc_mii_out(dev, 6 << 10 | phyaddr << 5 | phyreg, 14); + + /* Turnaround (2bits) + phydata */ + phydata = smc_mii_in(dev, 18); + + /* Return to idle state */ + SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO)); + + DBG(3, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", + __func__, phyaddr, phyreg, phydata); + + SMC_SELECT_BANK(lp, 2); + return phydata; +} + +/* + * Writes a register to the MII Management serial interface + */ +static void smc_phy_write(struct net_device *dev, int phyaddr, int phyreg, + int phydata) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + + SMC_SELECT_BANK(lp, 3); + + /* Idle - 32 ones */ + smc_mii_out(dev, 0xffffffff, 32); + + /* Start code (01) + write (01) + phyaddr + phyreg + turnaround + phydata */ + smc_mii_out(dev, 5 << 28 | phyaddr << 23 | phyreg << 18 | 2 << 16 | phydata, 32); + + /* Return to idle state */ + SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO)); + + DBG(3, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", + __func__, phyaddr, phyreg, phydata); + + SMC_SELECT_BANK(lp, 2); +} + +/* + * Finds and reports the PHY address + */ +static void smc_phy_detect(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + int phyaddr; + + DBG(2, dev, "%s\n", __func__); + + lp->phy_type = 0; + + /* + * Scan all 32 PHY addresses if necessary, starting at + * PHY#1 to PHY#31, and then PHY#0 last. + */ + for (phyaddr = 1; phyaddr < 33; ++phyaddr) { + unsigned int id1, id2; + + /* Read the PHY identifiers */ + id1 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID1); + id2 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID2); + + DBG(3, dev, "phy_id1=0x%x, phy_id2=0x%x\n", + id1, id2); + + /* Make sure it is a valid identifier */ + if (id1 != 0x0000 && id1 != 0xffff && id1 != 0x8000 && + id2 != 0x0000 && id2 != 0xffff && id2 != 0x8000) { + /* Save the PHY's address */ + lp->mii.phy_id = phyaddr & 31; + lp->phy_type = id1 << 16 | id2; + break; + } + } +} + +/* + * Sets the PHY to a configuration as determined by the user + */ +static int smc_phy_fixed(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + int phyaddr = lp->mii.phy_id; + int bmcr, cfg1; + + DBG(3, dev, "%s\n", __func__); + + /* Enter Link Disable state */ + cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG); + cfg1 |= PHY_CFG1_LNKDIS; + smc_phy_write(dev, phyaddr, PHY_CFG1_REG, cfg1); + + /* + * Set our fixed capabilities + * Disable auto-negotiation + */ + bmcr = 0; + + if (lp->ctl_rfduplx) + bmcr |= BMCR_FULLDPLX; + + if (lp->ctl_rspeed == 100) + bmcr |= BMCR_SPEED100; + + /* Write our capabilities to the phy control register */ + smc_phy_write(dev, phyaddr, MII_BMCR, bmcr); + + /* Re-Configure the Receive/Phy Control register */ + SMC_SELECT_BANK(lp, 0); + SMC_SET_RPC(lp, lp->rpc_cur_mode); + SMC_SELECT_BANK(lp, 2); + + return 1; +} + +/** + * smc_phy_reset - reset the phy + * @dev: net device + * @phy: phy address + * + * Issue a software reset for the specified PHY and + * wait up to 100ms for the reset to complete. We should + * not access the PHY for 50ms after issuing the reset. + * + * The time to wait appears to be dependent on the PHY. + * + * Must be called with lp->lock locked. + */ +static int smc_phy_reset(struct net_device *dev, int phy) +{ + struct smc_local *lp = netdev_priv(dev); + unsigned int bmcr; + int timeout; + + smc_phy_write(dev, phy, MII_BMCR, BMCR_RESET); + + for (timeout = 2; timeout; timeout--) { + spin_unlock_irq(&lp->lock); + msleep(50); + spin_lock_irq(&lp->lock); + + bmcr = smc_phy_read(dev, phy, MII_BMCR); + if (!(bmcr & BMCR_RESET)) + break; + } + + return bmcr & BMCR_RESET; +} + +/** + * smc_phy_powerdown - powerdown phy + * @dev: net device + * + * Power down the specified PHY + */ +static void smc_phy_powerdown(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + unsigned int bmcr; + int phy = lp->mii.phy_id; + + if (lp->phy_type == 0) + return; + + /* We need to ensure that no calls to smc_phy_configure are + pending. + */ + cancel_work_sync(&lp->phy_configure); + + bmcr = smc_phy_read(dev, phy, MII_BMCR); + smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN); +} + +/** + * smc_phy_check_media - check the media status and adjust TCR + * @dev: net device + * @init: set true for initialisation + * + * Select duplex mode depending on negotiation state. This + * also updates our carrier state. + */ +static void smc_phy_check_media(struct net_device *dev, int init) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + + if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) { + /* duplex state has changed */ + if (lp->mii.full_duplex) { + lp->tcr_cur_mode |= TCR_SWFDUP; + } else { + lp->tcr_cur_mode &= ~TCR_SWFDUP; + } + + SMC_SELECT_BANK(lp, 0); + SMC_SET_TCR(lp, lp->tcr_cur_mode); + } +} + +/* + * Configures the specified PHY through the MII management interface + * using Autonegotiation. + * Calls smc_phy_fixed() if the user has requested a certain config. + * If RPC ANEG bit is set, the media selection is dependent purely on + * the selection by the MII (either in the MII BMCR reg or the result + * of autonegotiation.) If the RPC ANEG bit is cleared, the selection + * is controlled by the RPC SPEED and RPC DPLX bits. + */ +static void smc_phy_configure(struct work_struct *work) +{ + struct smc_local *lp = + container_of(work, struct smc_local, phy_configure); + struct net_device *dev = lp->dev; + void __iomem *ioaddr = lp->base; + int phyaddr = lp->mii.phy_id; + int my_phy_caps; /* My PHY capabilities */ + int my_ad_caps; /* My Advertised capabilities */ + int status; + + DBG(3, dev, "smc_program_phy()\n"); + + spin_lock_irq(&lp->lock); + + /* + * We should not be called if phy_type is zero. + */ + if (lp->phy_type == 0) + goto smc_phy_configure_exit; + + if (smc_phy_reset(dev, phyaddr)) { + netdev_info(dev, "PHY reset timed out\n"); + goto smc_phy_configure_exit; + } + + /* + * Enable PHY Interrupts (for register 18) + * Interrupts listed here are disabled + */ + smc_phy_write(dev, phyaddr, PHY_MASK_REG, + PHY_INT_LOSSSYNC | PHY_INT_CWRD | PHY_INT_SSD | + PHY_INT_ESD | PHY_INT_RPOL | PHY_INT_JAB | + PHY_INT_SPDDET | PHY_INT_DPLXDET); + + /* Configure the Receive/Phy Control register */ + SMC_SELECT_BANK(lp, 0); + SMC_SET_RPC(lp, lp->rpc_cur_mode); + + /* If the user requested no auto neg, then go set his request */ + if (lp->mii.force_media) { + smc_phy_fixed(dev); + goto smc_phy_configure_exit; + } + + /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */ + my_phy_caps = smc_phy_read(dev, phyaddr, MII_BMSR); + + if (!(my_phy_caps & BMSR_ANEGCAPABLE)) { + netdev_info(dev, "Auto negotiation NOT supported\n"); + smc_phy_fixed(dev); + goto smc_phy_configure_exit; + } + + my_ad_caps = ADVERTISE_CSMA; /* I am CSMA capable */ + + if (my_phy_caps & BMSR_100BASE4) + my_ad_caps |= ADVERTISE_100BASE4; + if (my_phy_caps & BMSR_100FULL) + my_ad_caps |= ADVERTISE_100FULL; + if (my_phy_caps & BMSR_100HALF) + my_ad_caps |= ADVERTISE_100HALF; + if (my_phy_caps & BMSR_10FULL) + my_ad_caps |= ADVERTISE_10FULL; + if (my_phy_caps & BMSR_10HALF) + my_ad_caps |= ADVERTISE_10HALF; + + /* Disable capabilities not selected by our user */ + if (lp->ctl_rspeed != 100) + my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF); + + if (!lp->ctl_rfduplx) + my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL); + + /* Update our Auto-Neg Advertisement Register */ + smc_phy_write(dev, phyaddr, MII_ADVERTISE, my_ad_caps); + lp->mii.advertising = my_ad_caps; + + /* + * Read the register back. Without this, it appears that when + * auto-negotiation is restarted, sometimes it isn't ready and + * the link does not come up. + */ + status = smc_phy_read(dev, phyaddr, MII_ADVERTISE); + + DBG(2, dev, "phy caps=%x\n", my_phy_caps); + DBG(2, dev, "phy advertised caps=%x\n", my_ad_caps); + + /* Restart auto-negotiation process in order to advertise my caps */ + smc_phy_write(dev, phyaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART); + + smc_phy_check_media(dev, 1); + +smc_phy_configure_exit: + SMC_SELECT_BANK(lp, 2); + spin_unlock_irq(&lp->lock); +} + +/* + * smc_phy_interrupt + * + * Purpose: Handle interrupts relating to PHY register 18. This is + * called from the "hard" interrupt handler under our private spinlock. + */ +static void smc_phy_interrupt(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + int phyaddr = lp->mii.phy_id; + int phy18; + + DBG(2, dev, "%s\n", __func__); + + if (lp->phy_type == 0) + return; + + for(;;) { + smc_phy_check_media(dev, 0); + + /* Read PHY Register 18, Status Output */ + phy18 = smc_phy_read(dev, phyaddr, PHY_INT_REG); + if ((phy18 & PHY_INT_INT) == 0) + break; + } +} + +/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/ + +static void smc_10bt_check_media(struct net_device *dev, int init) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned int old_carrier, new_carrier; + + old_carrier = netif_carrier_ok(dev) ? 1 : 0; + + SMC_SELECT_BANK(lp, 0); + new_carrier = (SMC_GET_EPH_STATUS(lp) & ES_LINK_OK) ? 1 : 0; + SMC_SELECT_BANK(lp, 2); + + if (init || (old_carrier != new_carrier)) { + if (!new_carrier) { + netif_carrier_off(dev); + } else { + netif_carrier_on(dev); + } + if (netif_msg_link(lp)) + netdev_info(dev, "link %s\n", + new_carrier ? "up" : "down"); + } +} + +static void smc_eph_interrupt(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned int ctl; + + smc_10bt_check_media(dev, 0); + + SMC_SELECT_BANK(lp, 1); + ctl = SMC_GET_CTL(lp); + SMC_SET_CTL(lp, ctl & ~CTL_LE_ENABLE); + SMC_SET_CTL(lp, ctl); + SMC_SELECT_BANK(lp, 2); +} + +/* + * This is the main routine of the driver, to handle the device when + * it needs some attention. + */ +static irqreturn_t smc_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + int status, mask, timeout, card_stats; + int saved_pointer; + + DBG(3, dev, "%s\n", __func__); + + spin_lock(&lp->lock); + + /* A preamble may be used when there is a potential race + * between the interruptible transmit functions and this + * ISR. */ + SMC_INTERRUPT_PREAMBLE; + + saved_pointer = SMC_GET_PTR(lp); + mask = SMC_GET_INT_MASK(lp); + SMC_SET_INT_MASK(lp, 0); + + /* set a timeout value, so I don't stay here forever */ + timeout = MAX_IRQ_LOOPS; + + do { + status = SMC_GET_INT(lp); + + DBG(2, dev, "INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n", + status, mask, + ({ int meminfo; SMC_SELECT_BANK(lp, 0); + meminfo = SMC_GET_MIR(lp); + SMC_SELECT_BANK(lp, 2); meminfo; }), + SMC_GET_FIFO(lp)); + + status &= mask; + if (!status) + break; + + if (status & IM_TX_INT) { + /* do this before RX as it will free memory quickly */ + DBG(3, dev, "TX int\n"); + smc_tx(dev); + SMC_ACK_INT(lp, IM_TX_INT); + if (THROTTLE_TX_PKTS) + netif_wake_queue(dev); + } else if (status & IM_RCV_INT) { + DBG(3, dev, "RX irq\n"); + smc_rcv(dev); + } else if (status & IM_ALLOC_INT) { + DBG(3, dev, "Allocation irq\n"); + tasklet_hi_schedule(&lp->tx_task); + mask &= ~IM_ALLOC_INT; + } else if (status & IM_TX_EMPTY_INT) { + DBG(3, dev, "TX empty\n"); + mask &= ~IM_TX_EMPTY_INT; + + /* update stats */ + SMC_SELECT_BANK(lp, 0); + card_stats = SMC_GET_COUNTER(lp); + SMC_SELECT_BANK(lp, 2); + + /* single collisions */ + dev->stats.collisions += card_stats & 0xF; + card_stats >>= 4; + + /* multiple collisions */ + dev->stats.collisions += card_stats & 0xF; + } else if (status & IM_RX_OVRN_INT) { + DBG(1, dev, "RX overrun (EPH_ST 0x%04x)\n", + ({ int eph_st; SMC_SELECT_BANK(lp, 0); + eph_st = SMC_GET_EPH_STATUS(lp); + SMC_SELECT_BANK(lp, 2); eph_st; })); + SMC_ACK_INT(lp, IM_RX_OVRN_INT); + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; + } else if (status & IM_EPH_INT) { + smc_eph_interrupt(dev); + } else if (status & IM_MDINT) { + SMC_ACK_INT(lp, IM_MDINT); + smc_phy_interrupt(dev); + } else if (status & IM_ERCV_INT) { + SMC_ACK_INT(lp, IM_ERCV_INT); + PRINTK(dev, "UNSUPPORTED: ERCV INTERRUPT\n"); + } + } while (--timeout); + + /* restore register states */ + SMC_SET_PTR(lp, saved_pointer); + SMC_SET_INT_MASK(lp, mask); + spin_unlock(&lp->lock); + +#ifndef CONFIG_NET_POLL_CONTROLLER + if (timeout == MAX_IRQ_LOOPS) + PRINTK(dev, "spurious interrupt (mask = 0x%02x)\n", + mask); +#endif + DBG(3, dev, "Interrupt done (%d loops)\n", + MAX_IRQ_LOOPS - timeout); + + /* + * We return IRQ_HANDLED unconditionally here even if there was + * nothing to do. There is a possibility that a packet might + * get enqueued into the chip right after TX_EMPTY_INT is raised + * but just before the CPU acknowledges the IRQ. + * Better take an unneeded IRQ in some occasions than complexifying + * the code for all cases. + */ + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling receive - used by netconsole and other diagnostic tools + * to allow network i/o with interrupts disabled. + */ +static void smc_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + smc_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +/* Our watchdog timed out. Called by the networking layer */ +static void smc_timeout(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + int status, mask, eph_st, meminfo, fifo; + + DBG(2, dev, "%s\n", __func__); + + spin_lock_irq(&lp->lock); + status = SMC_GET_INT(lp); + mask = SMC_GET_INT_MASK(lp); + fifo = SMC_GET_FIFO(lp); + SMC_SELECT_BANK(lp, 0); + eph_st = SMC_GET_EPH_STATUS(lp); + meminfo = SMC_GET_MIR(lp); + SMC_SELECT_BANK(lp, 2); + spin_unlock_irq(&lp->lock); + PRINTK(dev, "TX timeout (INT 0x%02x INTMASK 0x%02x MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n", + status, mask, meminfo, fifo, eph_st); + + smc_reset(dev); + smc_enable(dev); + + /* + * Reconfiguring the PHY doesn't seem like a bad idea here, but + * smc_phy_configure() calls msleep() which calls schedule_timeout() + * which calls schedule(). Hence we use a work queue. + */ + if (lp->phy_type != 0) + schedule_work(&lp->phy_configure); + + /* We can accept TX packets again */ + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); +} + +/* + * This routine will, depending on the values passed to it, + * either make it accept multicast packets, go into + * promiscuous mode (for TCPDUMP and cousins) or accept + * a select set of multicast packets + */ +static void smc_set_multicast_list(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + unsigned char multicast_table[8]; + int update_multicast = 0; + + DBG(2, dev, "%s\n", __func__); + + if (dev->flags & IFF_PROMISC) { + DBG(2, dev, "RCR_PRMS\n"); + lp->rcr_cur_mode |= RCR_PRMS; + } + +/* BUG? I never disable promiscuous mode if multicasting was turned on. + Now, I turn off promiscuous mode, but I don't do anything to multicasting + when promiscuous mode is turned on. +*/ + + /* + * Here, I am setting this to accept all multicast packets. + * I don't need to zero the multicast table, because the flag is + * checked before the table is + */ + else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) { + DBG(2, dev, "RCR_ALMUL\n"); + lp->rcr_cur_mode |= RCR_ALMUL; + } + + /* + * This sets the internal hardware table to filter out unwanted + * multicast packets before they take up memory. + * + * The SMC chip uses a hash table where the high 6 bits of the CRC of + * address are the offset into the table. If that bit is 1, then the + * multicast packet is accepted. Otherwise, it's dropped silently. + * + * To use the 6 bits as an offset into the table, the high 3 bits are + * the number of the 8 bit register, while the low 3 bits are the bit + * within that register. + */ + else if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + + /* table for flipping the order of 3 bits */ + static const unsigned char invert3[] = {0, 4, 2, 6, 1, 5, 3, 7}; + + /* start with a table of all zeros: reject all */ + memset(multicast_table, 0, sizeof(multicast_table)); + + netdev_for_each_mc_addr(ha, dev) { + int position; + + /* only use the low order bits */ + position = crc32_le(~0, ha->addr, 6) & 0x3f; + + /* do some messy swapping to put the bit in the right spot */ + multicast_table[invert3[position&7]] |= + (1<>3)&7]); + } + + /* be sure I get rid of flags I might have set */ + lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL); + + /* now, the table can be loaded into the chipset */ + update_multicast = 1; + } else { + DBG(2, dev, "~(RCR_PRMS|RCR_ALMUL)\n"); + lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL); + + /* + * since I'm disabling all multicast entirely, I need to + * clear the multicast list + */ + memset(multicast_table, 0, sizeof(multicast_table)); + update_multicast = 1; + } + + spin_lock_irq(&lp->lock); + SMC_SELECT_BANK(lp, 0); + SMC_SET_RCR(lp, lp->rcr_cur_mode); + if (update_multicast) { + SMC_SELECT_BANK(lp, 3); + SMC_SET_MCAST(lp, multicast_table); + } + SMC_SELECT_BANK(lp, 2); + spin_unlock_irq(&lp->lock); +} + + +/* + * Open and Initialize the board + * + * Set up everything, reset the card, etc.. + */ +static int +smc_open(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + + DBG(2, dev, "%s\n", __func__); + + /* Setup the default Register Modes */ + lp->tcr_cur_mode = TCR_DEFAULT; + lp->rcr_cur_mode = RCR_DEFAULT; + lp->rpc_cur_mode = RPC_DEFAULT | + lp->cfg.leda << RPC_LSXA_SHFT | + lp->cfg.ledb << RPC_LSXB_SHFT; + + /* + * If we are not using a MII interface, we need to + * monitor our own carrier signal to detect faults. + */ + if (lp->phy_type == 0) + lp->tcr_cur_mode |= TCR_MON_CSN; + + /* reset the hardware */ + smc_reset(dev); + smc_enable(dev); + + /* Configure the PHY, initialize the link state */ + if (lp->phy_type != 0) + smc_phy_configure(&lp->phy_configure); + else { + spin_lock_irq(&lp->lock); + smc_10bt_check_media(dev, 1); + spin_unlock_irq(&lp->lock); + } + + netif_start_queue(dev); + return 0; +} + +/* + * smc_close + * + * this makes the board clean up everything that it can + * and not talk to the outside world. Caused by + * an 'ifconfig ethX down' + */ +static int smc_close(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + + DBG(2, dev, "%s\n", __func__); + + netif_stop_queue(dev); + netif_carrier_off(dev); + + /* clear everything */ + smc_shutdown(dev); + tasklet_kill(&lp->tx_task); + smc_phy_powerdown(dev); + return 0; +} + +/* + * Ethtool support + */ +static int +smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct smc_local *lp = netdev_priv(dev); + int ret; + + cmd->maxtxpkt = 1; + cmd->maxrxpkt = 1; + + if (lp->phy_type != 0) { + spin_lock_irq(&lp->lock); + ret = mii_ethtool_gset(&lp->mii, cmd); + spin_unlock_irq(&lp->lock); + } else { + cmd->supported = SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_TP | SUPPORTED_AUI; + + if (lp->ctl_rspeed == 10) + ethtool_cmd_speed_set(cmd, SPEED_10); + else if (lp->ctl_rspeed == 100) + ethtool_cmd_speed_set(cmd, SPEED_100); + + cmd->autoneg = AUTONEG_DISABLE; + cmd->transceiver = XCVR_INTERNAL; + cmd->port = 0; + cmd->duplex = lp->tcr_cur_mode & TCR_SWFDUP ? DUPLEX_FULL : DUPLEX_HALF; + + ret = 0; + } + + return ret; +} + +static int +smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct smc_local *lp = netdev_priv(dev); + int ret; + + if (lp->phy_type != 0) { + spin_lock_irq(&lp->lock); + ret = mii_ethtool_sset(&lp->mii, cmd); + spin_unlock_irq(&lp->lock); + } else { + if (cmd->autoneg != AUTONEG_DISABLE || + cmd->speed != SPEED_10 || + (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) || + (cmd->port != PORT_TP && cmd->port != PORT_AUI)) + return -EINVAL; + +// lp->port = cmd->port; + lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL; + +// if (netif_running(dev)) +// smc_set_port(dev); + + ret = 0; + } + + return ret; +} + +static void +smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, CARDNAME, sizeof(info->driver)); + strlcpy(info->version, version, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(dev->dev.parent), + sizeof(info->bus_info)); +} + +static int smc_ethtool_nwayreset(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + int ret = -EINVAL; + + if (lp->phy_type != 0) { + spin_lock_irq(&lp->lock); + ret = mii_nway_restart(&lp->mii); + spin_unlock_irq(&lp->lock); + } + + return ret; +} + +static u32 smc_ethtool_getmsglevel(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + return lp->msg_enable; +} + +static void smc_ethtool_setmsglevel(struct net_device *dev, u32 level) +{ + struct smc_local *lp = netdev_priv(dev); + lp->msg_enable = level; +} + +static int smc_write_eeprom_word(struct net_device *dev, u16 addr, u16 word) +{ + u16 ctl; + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + + spin_lock_irq(&lp->lock); + /* load word into GP register */ + SMC_SELECT_BANK(lp, 1); + SMC_SET_GP(lp, word); + /* set the address to put the data in EEPROM */ + SMC_SELECT_BANK(lp, 2); + SMC_SET_PTR(lp, addr); + /* tell it to write */ + SMC_SELECT_BANK(lp, 1); + ctl = SMC_GET_CTL(lp); + SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_STORE)); + /* wait for it to finish */ + do { + udelay(1); + } while (SMC_GET_CTL(lp) & CTL_STORE); + /* clean up */ + SMC_SET_CTL(lp, ctl); + SMC_SELECT_BANK(lp, 2); + spin_unlock_irq(&lp->lock); + return 0; +} + +static int smc_read_eeprom_word(struct net_device *dev, u16 addr, u16 *word) +{ + u16 ctl; + struct smc_local *lp = netdev_priv(dev); + void __iomem *ioaddr = lp->base; + + spin_lock_irq(&lp->lock); + /* set the EEPROM address to get the data from */ + SMC_SELECT_BANK(lp, 2); + SMC_SET_PTR(lp, addr | PTR_READ); + /* tell it to load */ + SMC_SELECT_BANK(lp, 1); + SMC_SET_GP(lp, 0xffff); /* init to known */ + ctl = SMC_GET_CTL(lp); + SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_RELOAD)); + /* wait for it to finish */ + do { + udelay(1); + } while (SMC_GET_CTL(lp) & CTL_RELOAD); + /* read word from GP register */ + *word = SMC_GET_GP(lp); + /* clean up */ + SMC_SET_CTL(lp, ctl); + SMC_SELECT_BANK(lp, 2); + spin_unlock_irq(&lp->lock); + return 0; +} + +static int smc_ethtool_geteeprom_len(struct net_device *dev) +{ + return 0x23 * 2; +} + +static int smc_ethtool_geteeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + int i; + int imax; + + DBG(1, dev, "Reading %d bytes at %d(0x%x)\n", + eeprom->len, eeprom->offset, eeprom->offset); + imax = smc_ethtool_geteeprom_len(dev); + for (i = 0; i < eeprom->len; i += 2) { + int ret; + u16 wbuf; + int offset = i + eeprom->offset; + if (offset > imax) + break; + ret = smc_read_eeprom_word(dev, offset >> 1, &wbuf); + if (ret != 0) + return ret; + DBG(2, dev, "Read 0x%x from 0x%x\n", wbuf, offset >> 1); + data[i] = (wbuf >> 8) & 0xff; + data[i+1] = wbuf & 0xff; + } + return 0; +} + +static int smc_ethtool_seteeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + int i; + int imax; + + DBG(1, dev, "Writing %d bytes to %d(0x%x)\n", + eeprom->len, eeprom->offset, eeprom->offset); + imax = smc_ethtool_geteeprom_len(dev); + for (i = 0; i < eeprom->len; i += 2) { + int ret; + u16 wbuf; + int offset = i + eeprom->offset; + if (offset > imax) + break; + wbuf = (data[i] << 8) | data[i + 1]; + DBG(2, dev, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1); + ret = smc_write_eeprom_word(dev, offset >> 1, wbuf); + if (ret != 0) + return ret; + } + return 0; +} + + +static const struct ethtool_ops smc_ethtool_ops = { + .get_settings = smc_ethtool_getsettings, + .set_settings = smc_ethtool_setsettings, + .get_drvinfo = smc_ethtool_getdrvinfo, + + .get_msglevel = smc_ethtool_getmsglevel, + .set_msglevel = smc_ethtool_setmsglevel, + .nway_reset = smc_ethtool_nwayreset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = smc_ethtool_geteeprom_len, + .get_eeprom = smc_ethtool_geteeprom, + .set_eeprom = smc_ethtool_seteeprom, +}; + +static const struct net_device_ops smc_netdev_ops = { + .ndo_open = smc_open, + .ndo_stop = smc_close, + .ndo_start_xmit = smc_hard_start_xmit, + .ndo_tx_timeout = smc_timeout, + .ndo_set_rx_mode = smc_set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = smc_poll_controller, +#endif +}; + +/* + * smc_findirq + * + * This routine has a simple purpose -- make the SMC chip generate an + * interrupt, so an auto-detect routine can detect it, and find the IRQ, + */ +/* + * does this still work? + * + * I just deleted auto_irq.c, since it was never built... + * --jgarzik + */ +static int smc_findirq(struct smc_local *lp) +{ + void __iomem *ioaddr = lp->base; + int timeout = 20; + unsigned long cookie; + + DBG(2, lp->dev, "%s: %s\n", CARDNAME, __func__); + + cookie = probe_irq_on(); + + /* + * What I try to do here is trigger an ALLOC_INT. This is done + * by allocating a small chunk of memory, which will give an interrupt + * when done. + */ + /* enable ALLOCation interrupts ONLY */ + SMC_SELECT_BANK(lp, 2); + SMC_SET_INT_MASK(lp, IM_ALLOC_INT); + + /* + * Allocate 512 bytes of memory. Note that the chip was just + * reset so all the memory is available + */ + SMC_SET_MMU_CMD(lp, MC_ALLOC | 1); + + /* + * Wait until positive that the interrupt has been generated + */ + do { + int int_status; + udelay(10); + int_status = SMC_GET_INT(lp); + if (int_status & IM_ALLOC_INT) + break; /* got the interrupt */ + } while (--timeout); + + /* + * there is really nothing that I can do here if timeout fails, + * as autoirq_report will return a 0 anyway, which is what I + * want in this case. Plus, the clean up is needed in both + * cases. + */ + + /* and disable all interrupts again */ + SMC_SET_INT_MASK(lp, 0); + + /* and return what I found */ + return probe_irq_off(cookie); +} + +/* + * Function: smc_probe(unsigned long ioaddr) + * + * Purpose: + * Tests to see if a given ioaddr points to an SMC91x chip. + * Returns a 0 on success + * + * Algorithm: + * (1) see if the high byte of BANK_SELECT is 0x33 + * (2) compare the ioaddr with the base register's address + * (3) see if I recognize the chip ID in the appropriate register + * + * Here I do typical initialization tasks. + * + * o Initialize the structure if needed + * o print out my vanity message if not done so already + * o print out what type of hardware is detected + * o print out the ethernet address + * o find the IRQ + * o set up my private data + * o configure the dev structure with my subroutines + * o actually GRAB the irq. + * o GRAB the region + */ +static int smc_probe(struct net_device *dev, void __iomem *ioaddr, + unsigned long irq_flags) +{ + struct smc_local *lp = netdev_priv(dev); + int retval; + unsigned int val, revision_register; + const char *version_string; + + DBG(2, dev, "%s: %s\n", CARDNAME, __func__); + + /* First, see if the high byte is 0x33 */ + val = SMC_CURRENT_BANK(lp); + DBG(2, dev, "%s: bank signature probe returned 0x%04x\n", + CARDNAME, val); + if ((val & 0xFF00) != 0x3300) { + if ((val & 0xFF) == 0x33) { + netdev_warn(dev, + "%s: Detected possible byte-swapped interface at IOADDR %p\n", + CARDNAME, ioaddr); + } + retval = -ENODEV; + goto err_out; + } + + /* + * The above MIGHT indicate a device, but I need to write to + * further test this. + */ + SMC_SELECT_BANK(lp, 0); + val = SMC_CURRENT_BANK(lp); + if ((val & 0xFF00) != 0x3300) { + retval = -ENODEV; + goto err_out; + } + + /* + * well, we've already written once, so hopefully another + * time won't hurt. This time, I need to switch the bank + * register to bank 1, so I can access the base address + * register + */ + SMC_SELECT_BANK(lp, 1); + val = SMC_GET_BASE(lp); + val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT; + if (((unsigned long)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) { + netdev_warn(dev, "%s: IOADDR %p doesn't match configuration (%x).\n", + CARDNAME, ioaddr, val); + } + + /* + * check if the revision register is something that I + * recognize. These might need to be added to later, + * as future revisions could be added. + */ + SMC_SELECT_BANK(lp, 3); + revision_register = SMC_GET_REV(lp); + DBG(2, dev, "%s: revision = 0x%04x\n", CARDNAME, revision_register); + version_string = chip_ids[ (revision_register >> 4) & 0xF]; + if (!version_string || (revision_register & 0xff00) != 0x3300) { + /* I don't recognize this chip, so... */ + netdev_warn(dev, "%s: IO %p: Unrecognized revision register 0x%04x, Contact author.\n", + CARDNAME, ioaddr, revision_register); + + retval = -ENODEV; + goto err_out; + } + + /* At this point I'll assume that the chip is an SMC91x. */ + pr_info_once("%s\n", version); + + /* fill in some of the fields */ + dev->base_addr = (unsigned long)ioaddr; + lp->base = ioaddr; + lp->version = revision_register & 0xff; + spin_lock_init(&lp->lock); + + /* Get the MAC address */ + SMC_SELECT_BANK(lp, 1); + SMC_GET_MAC_ADDR(lp, dev->dev_addr); + + /* now, reset the chip, and put it into a known state */ + smc_reset(dev); + + /* + * If dev->irq is 0, then the device has to be banged on to see + * what the IRQ is. + * + * This banging doesn't always detect the IRQ, for unknown reasons. + * a workaround is to reset the chip and try again. + * + * Interestingly, the DOS packet driver *SETS* the IRQ on the card to + * be what is requested on the command line. I don't do that, mostly + * because the card that I have uses a non-standard method of accessing + * the IRQs, and because this _should_ work in most configurations. + * + * Specifying an IRQ is done with the assumption that the user knows + * what (s)he is doing. No checking is done!!!! + */ + if (dev->irq < 1) { + int trials; + + trials = 3; + while (trials--) { + dev->irq = smc_findirq(lp); + if (dev->irq) + break; + /* kick the card and try again */ + smc_reset(dev); + } + } + if (dev->irq == 0) { + netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n"); + retval = -ENODEV; + goto err_out; + } + dev->irq = irq_canonicalize(dev->irq); + + dev->watchdog_timeo = msecs_to_jiffies(watchdog); + dev->netdev_ops = &smc_netdev_ops; + dev->ethtool_ops = &smc_ethtool_ops; + + tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev); + INIT_WORK(&lp->phy_configure, smc_phy_configure); + lp->dev = dev; + lp->mii.phy_id_mask = 0x1f; + lp->mii.reg_num_mask = 0x1f; + lp->mii.force_media = 0; + lp->mii.full_duplex = 0; + lp->mii.dev = dev; + lp->mii.mdio_read = smc_phy_read; + lp->mii.mdio_write = smc_phy_write; + + /* + * Locate the phy, if any. + */ + if (lp->version >= (CHIP_91100 << 4)) + smc_phy_detect(dev); + + /* then shut everything down to save power */ + smc_shutdown(dev); + smc_phy_powerdown(dev); + + /* Set default parameters */ + lp->msg_enable = NETIF_MSG_LINK; + lp->ctl_rfduplx = 0; + lp->ctl_rspeed = 10; + + if (lp->version >= (CHIP_91100 << 4)) { + lp->ctl_rfduplx = 1; + lp->ctl_rspeed = 100; + } + + /* Grab the IRQ */ + retval = request_irq(dev->irq, smc_interrupt, irq_flags, dev->name, dev); + if (retval) + goto err_out; + +#ifdef CONFIG_ARCH_PXA +# ifdef SMC_USE_PXA_DMA + lp->cfg.flags |= SMC91X_USE_DMA; +# endif + if (lp->cfg.flags & SMC91X_USE_DMA) { + int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW, + smc_pxa_dma_irq, NULL); + if (dma >= 0) + dev->dma = dma; + } +#endif + + retval = register_netdev(dev); + if (retval == 0) { + /* now, print out the card info, in a short format.. */ + netdev_info(dev, "%s (rev %d) at %p IRQ %d", + version_string, revision_register & 0x0f, + lp->base, dev->irq); + + if (dev->dma != (unsigned char)-1) + pr_cont(" DMA %d", dev->dma); + + pr_cont("%s%s\n", + lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "", + THROTTLE_TX_PKTS ? " [throttle_tx]" : ""); + + if (!is_valid_ether_addr(dev->dev_addr)) { + netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n"); + } else { + /* Print the Ethernet address */ + netdev_info(dev, "Ethernet addr: %pM\n", + dev->dev_addr); + } + + if (lp->phy_type == 0) { + PRINTK(dev, "No PHY found\n"); + } else if ((lp->phy_type & 0xfffffff0) == 0x0016f840) { + PRINTK(dev, "PHY LAN83C183 (LAN91C111 Internal)\n"); + } else if ((lp->phy_type & 0xfffffff0) == 0x02821c50) { + PRINTK(dev, "PHY LAN83C180\n"); + } + } + +err_out: +#ifdef CONFIG_ARCH_PXA + if (retval && dev->dma != (unsigned char)-1) + pxa_free_dma(dev->dma); +#endif + return retval; +} + +static int smc_enable_device(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct smc_local *lp = netdev_priv(ndev); + unsigned long flags; + unsigned char ecor, ecsr; + void __iomem *addr; + struct resource * res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); + if (!res) + return 0; + + /* + * Map the attribute space. This is overkill, but clean. + */ + addr = ioremap(res->start, ATTRIB_SIZE); + if (!addr) + return -ENOMEM; + + /* + * Reset the device. We must disable IRQs around this + * since a reset causes the IRQ line become active. + */ + local_irq_save(flags); + ecor = readb(addr + (ECOR << SMC_IO_SHIFT)) & ~ECOR_RESET; + writeb(ecor | ECOR_RESET, addr + (ECOR << SMC_IO_SHIFT)); + readb(addr + (ECOR << SMC_IO_SHIFT)); + + /* + * Wait 100us for the chip to reset. + */ + udelay(100); + + /* + * The device will ignore all writes to the enable bit while + * reset is asserted, even if the reset bit is cleared in the + * same write. Must clear reset first, then enable the device. + */ + writeb(ecor, addr + (ECOR << SMC_IO_SHIFT)); + writeb(ecor | ECOR_ENABLE, addr + (ECOR << SMC_IO_SHIFT)); + + /* + * Set the appropriate byte/word mode. + */ + ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8; + if (!SMC_16BIT(lp)) + ecsr |= ECSR_IOIS8; + writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT)); + local_irq_restore(flags); + + iounmap(addr); + + /* + * Wait for the chip to wake up. We could poll the control + * register in the main register space, but that isn't mapped + * yet. We know this is going to take 750us. + */ + msleep(1); + + return 0; +} + +static int smc_request_attrib(struct platform_device *pdev, + struct net_device *ndev) +{ + struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); + struct smc_local *lp __maybe_unused = netdev_priv(ndev); + + if (!res) + return 0; + + if (!request_mem_region(res->start, ATTRIB_SIZE, CARDNAME)) + return -EBUSY; + + return 0; +} + +static void smc_release_attrib(struct platform_device *pdev, + struct net_device *ndev) +{ + struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib"); + struct smc_local *lp __maybe_unused = netdev_priv(ndev); + + if (res) + release_mem_region(res->start, ATTRIB_SIZE); +} + +static inline void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev) +{ + if (SMC_CAN_USE_DATACS) { + struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32"); + struct smc_local *lp = netdev_priv(ndev); + + if (!res) + return; + + if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) { + netdev_info(ndev, "%s: failed to request datacs memory region.\n", + CARDNAME); + return; + } + + lp->datacs = ioremap(res->start, SMC_DATA_EXTENT); + } +} + +static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev) +{ + if (SMC_CAN_USE_DATACS) { + struct smc_local *lp = netdev_priv(ndev); + struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32"); + + if (lp->datacs) + iounmap(lp->datacs); + + lp->datacs = NULL; + + if (res) + release_mem_region(res->start, SMC_DATA_EXTENT); + } +} + +#if IS_BUILTIN(CONFIG_OF) +static const struct of_device_id smc91x_match[] = { + { .compatible = "smsc,lan91c94", }, + { .compatible = "smsc,lan91c111", }, + {}, +}; +MODULE_DEVICE_TABLE(of, smc91x_match); + +/** + * of_try_set_control_gpio - configure a gpio if it exists + */ +static int try_toggle_control_gpio(struct device *dev, + struct gpio_desc **desc, + const char *name, int index, + int value, unsigned int nsdelay) +{ + struct gpio_desc *gpio = *desc; + enum gpiod_flags flags = value ? GPIOD_OUT_LOW : GPIOD_OUT_HIGH; + + gpio = devm_gpiod_get_index_optional(dev, name, index, flags); + if (IS_ERR(gpio)) + return PTR_ERR(gpio); + + if (gpio) { + if (nsdelay) + usleep_range(nsdelay, 2 * nsdelay); + gpiod_set_value_cansleep(gpio, value); + } + *desc = gpio; + + return 0; +} +#endif + +/* + * smc_init(void) + * Input parameters: + * dev->base_addr == 0, try to find all possible locations + * dev->base_addr > 0x1ff, this is the address to check + * dev->base_addr == , return failure code + * + * Output: + * 0 --> there is a device + * anything else, error + */ +static int smc_drv_probe(struct platform_device *pdev) +{ + struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev); + const struct of_device_id *match = NULL; + struct smc_local *lp; + struct net_device *ndev; + struct resource *res; + unsigned int __iomem *addr; + unsigned long irq_flags = SMC_IRQ_FLAGS; + unsigned long irq_resflags; + int ret; + + ndev = alloc_etherdev(sizeof(struct smc_local)); + if (!ndev) { + ret = -ENOMEM; + goto out; + } + SET_NETDEV_DEV(ndev, &pdev->dev); + + /* get configuration from platform data, only allow use of + * bus width if both SMC_CAN_USE_xxx and SMC91X_USE_xxx are set. + */ + + lp = netdev_priv(ndev); + lp->cfg.flags = 0; + + if (pd) { + memcpy(&lp->cfg, pd, sizeof(lp->cfg)); + lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags); + } + +#if IS_BUILTIN(CONFIG_OF) + match = of_match_device(of_match_ptr(smc91x_match), &pdev->dev); + if (match) { + struct device_node *np = pdev->dev.of_node; + u32 val; + + /* Optional pwrdwn GPIO configured? */ + ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio, + "power", 0, 0, 100); + if (ret) + return ret; + + /* + * Optional reset GPIO configured? Minimum 100 ns reset needed + * according to LAN91C96 datasheet page 14. + */ + ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio, + "reset", 0, 0, 100); + if (ret) + return ret; + + /* + * Need to wait for optional EEPROM to load, max 750 us according + * to LAN91C96 datasheet page 55. + */ + if (lp->reset_gpio) + usleep_range(750, 1000); + + /* Combination of IO widths supported, default to 16-bit */ + if (!of_property_read_u32(np, "reg-io-width", &val)) { + if (val & 1) + lp->cfg.flags |= SMC91X_USE_8BIT; + if ((val == 0) || (val & 2)) + lp->cfg.flags |= SMC91X_USE_16BIT; + if (val & 4) + lp->cfg.flags |= SMC91X_USE_32BIT; + } else { + lp->cfg.flags |= SMC91X_USE_16BIT; + } + } +#endif + + if (!pd && !match) { + lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0; + lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0; + lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0; + lp->cfg.flags |= (nowait) ? SMC91X_NOWAIT : 0; + } + + if (!lp->cfg.leda && !lp->cfg.ledb) { + lp->cfg.leda = RPC_LSA_DEFAULT; + lp->cfg.ledb = RPC_LSB_DEFAULT; + } + + ndev->dma = (unsigned char)-1; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ret = -ENODEV; + goto out_free_netdev; + } + + + if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) { + ret = -EBUSY; + goto out_free_netdev; + } + + ndev->irq = platform_get_irq(pdev, 0); + if (ndev->irq <= 0) { + ret = -ENODEV; + goto out_release_io; + } + /* + * If this platform does not specify any special irqflags, or if + * the resource supplies a trigger, override the irqflags with + * the trigger flags from the resource. + */ + irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq)); + if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK) + irq_flags = irq_resflags & IRQF_TRIGGER_MASK; + + ret = smc_request_attrib(pdev, ndev); + if (ret) + goto out_release_io; +#if defined(CONFIG_ASSABET_NEPONSET) + if (machine_is_assabet() && machine_has_neponset()) + neponset_ncr_set(NCR_ENET_OSC_EN); +#endif + platform_set_drvdata(pdev, ndev); + ret = smc_enable_device(pdev); + if (ret) + goto out_release_attrib; + + addr = ioremap(res->start, SMC_IO_EXTENT); + if (!addr) { + ret = -ENOMEM; + goto out_release_attrib; + } + +#ifdef CONFIG_ARCH_PXA + { + struct smc_local *lp = netdev_priv(ndev); + lp->device = &pdev->dev; + lp->physaddr = res->start; + } +#endif + + ret = smc_probe(ndev, addr, irq_flags); + if (ret != 0) + goto out_iounmap; + + smc_request_datacs(pdev, ndev); + + return 0; + + out_iounmap: + iounmap(addr); + out_release_attrib: + smc_release_attrib(pdev, ndev); + out_release_io: + release_mem_region(res->start, SMC_IO_EXTENT); + out_free_netdev: + free_netdev(ndev); + out: + pr_info("%s: not found (%d).\n", CARDNAME, ret); + + return ret; +} + +static int smc_drv_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct smc_local *lp = netdev_priv(ndev); + struct resource *res; + + unregister_netdev(ndev); + + free_irq(ndev->irq, ndev); + +#ifdef CONFIG_ARCH_PXA + if (ndev->dma != (unsigned char)-1) + pxa_free_dma(ndev->dma); +#endif + iounmap(lp->base); + + smc_release_datacs(pdev,ndev); + smc_release_attrib(pdev,ndev); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, SMC_IO_EXTENT); + + free_netdev(ndev); + + return 0; +} + +static int smc_drv_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = platform_get_drvdata(pdev); + + if (ndev) { + if (netif_running(ndev)) { + netif_device_detach(ndev); + smc_shutdown(ndev); + smc_phy_powerdown(ndev); + } + } + return 0; +} + +static int smc_drv_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = platform_get_drvdata(pdev); + + if (ndev) { + struct smc_local *lp = netdev_priv(ndev); + smc_enable_device(pdev); + if (netif_running(ndev)) { + smc_reset(ndev); + smc_enable(ndev); + if (lp->phy_type != 0) + smc_phy_configure(&lp->phy_configure); + netif_device_attach(ndev); + } + } + return 0; +} + +static struct dev_pm_ops smc_drv_pm_ops = { + .suspend = smc_drv_suspend, + .resume = smc_drv_resume, +}; + +static struct platform_driver smc_driver = { + .probe = smc_drv_probe, + .remove = smc_drv_remove, + .driver = { + .name = CARDNAME, + .pm = &smc_drv_pm_ops, + .of_match_table = of_match_ptr(smc91x_match), + }, +}; + +module_platform_driver(smc_driver); diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h new file mode 100644 index 000000000..3a18501d1 --- /dev/null +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -0,0 +1,1079 @@ +/*------------------------------------------------------------------------ + . smc91x.h - macros for SMSC's 91C9x/91C1xx single-chip Ethernet device. + . + . Copyright (C) 1996 by Erik Stahlman + . Copyright (C) 2001 Standard Microsystems Corporation + . Developed by Simple Network Magic Corporation + . Copyright (C) 2003 Monta Vista Software, Inc. + . Unified SMC91x driver by Nicolas Pitre + . + . This program is free software; you can redistribute it and/or modify + . it under the terms of the GNU General Public License as published by + . the Free Software Foundation; either version 2 of the License, or + . (at your option) any later version. + . + . This program is distributed in the hope that it will be useful, + . but WITHOUT ANY WARRANTY; without even the implied warranty of + . MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + . GNU General Public License for more details. + . + . You should have received a copy of the GNU General Public License + . along with this program; if not, see . + . + . Information contained in this file was obtained from the LAN91C111 + . manual from SMC. To get a copy, if you really want one, you can find + . information under www.smsc.com. + . + . Authors + . Erik Stahlman + . Daris A Nevil + . Nicolas Pitre + . + ---------------------------------------------------------------------------*/ +#ifndef _SMC91X_H_ +#define _SMC91X_H_ + +#include + +/* + * Define your architecture specific bus configuration parameters here. + */ + +#if defined(CONFIG_ARM) + +#include + +/* Now the bus width is specified in the platform data + * pretend here to support all I/O access types + */ +#define SMC_CAN_USE_8BIT 1 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 1 +#define SMC_NOWAIT 1 + +#define SMC_IO_SHIFT (lp->io_shift) + +#define SMC_inb(a, r) readb((a) + (r)) +#define SMC_inw(a, r) readw((a) + (r)) +#define SMC_inl(a, r) readl((a) + (r)) +#define SMC_outb(v, a, r) writeb(v, (a) + (r)) +#define SMC_outl(v, a, r) writel(v, (a) + (r)) +#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) +#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) +#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) +#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) +#define SMC_IRQ_FLAGS (-1) /* from resource */ + +/* We actually can't write halfwords properly if not word aligned */ +static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) +{ + if ((machine_is_mainstone() || machine_is_stargate2() || + machine_is_pxa_idp()) && reg & 2) { + unsigned int v = val << 16; + v |= readl(ioaddr + (reg & ~2)) & 0xffff; + writel(v, ioaddr + (reg & ~2)); + } else { + writew(val, ioaddr + reg); + } +} + +#elif defined(CONFIG_SH_SH4202_MICRODEV) + +#define SMC_CAN_USE_8BIT 0 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 0 + +#define SMC_inb(a, r) inb((a) + (r) - 0xa0000000) +#define SMC_inw(a, r) inw((a) + (r) - 0xa0000000) +#define SMC_inl(a, r) inl((a) + (r) - 0xa0000000) +#define SMC_outb(v, a, r) outb(v, (a) + (r) - 0xa0000000) +#define SMC_outw(v, a, r) outw(v, (a) + (r) - 0xa0000000) +#define SMC_outl(v, a, r) outl(v, (a) + (r) - 0xa0000000) +#define SMC_insl(a, r, p, l) insl((a) + (r) - 0xa0000000, p, l) +#define SMC_outsl(a, r, p, l) outsl((a) + (r) - 0xa0000000, p, l) +#define SMC_insw(a, r, p, l) insw((a) + (r) - 0xa0000000, p, l) +#define SMC_outsw(a, r, p, l) outsw((a) + (r) - 0xa0000000, p, l) + +#define SMC_IRQ_FLAGS (0) + +#elif defined(CONFIG_M32R) + +#define SMC_CAN_USE_8BIT 0 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 0 + +#define SMC_inb(a, r) inb(((u32)a) + (r)) +#define SMC_inw(a, r) inw(((u32)a) + (r)) +#define SMC_outb(v, a, r) outb(v, ((u32)a) + (r)) +#define SMC_outw(v, a, r) outw(v, ((u32)a) + (r)) +#define SMC_insw(a, r, p, l) insw(((u32)a) + (r), p, l) +#define SMC_outsw(a, r, p, l) outsw(((u32)a) + (r), p, l) + +#define SMC_IRQ_FLAGS (0) + +#define RPC_LSA_DEFAULT RPC_LED_TX_RX +#define RPC_LSB_DEFAULT RPC_LED_100_10 + +#elif defined(CONFIG_MN10300) + +/* + * MN10300/AM33 configuration + */ + +#include + +#elif defined(CONFIG_ATARI) + +#define SMC_CAN_USE_8BIT 1 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 1 +#define SMC_NOWAIT 1 + +#define SMC_inb(a, r) readb((a) + (r)) +#define SMC_inw(a, r) readw((a) + (r)) +#define SMC_inl(a, r) readl((a) + (r)) +#define SMC_outb(v, a, r) writeb(v, (a) + (r)) +#define SMC_outw(v, a, r) writew(v, (a) + (r)) +#define SMC_outl(v, a, r) writel(v, (a) + (r)) +#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) +#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) +#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) +#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) + +#define RPC_LSA_DEFAULT RPC_LED_100_10 +#define RPC_LSB_DEFAULT RPC_LED_TX_RX + +#elif defined(CONFIG_COLDFIRE) + +#define SMC_CAN_USE_8BIT 0 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 0 +#define SMC_NOWAIT 1 + +static inline void mcf_insw(void *a, unsigned char *p, int l) +{ + u16 *wp = (u16 *) p; + while (l-- > 0) + *wp++ = readw(a); +} + +static inline void mcf_outsw(void *a, unsigned char *p, int l) +{ + u16 *wp = (u16 *) p; + while (l-- > 0) + writew(*wp++, a); +} + +#define SMC_inw(a, r) _swapw(readw((a) + (r))) +#define SMC_outw(v, a, r) writew(_swapw(v), (a) + (r)) +#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l) +#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l) + +#define SMC_IRQ_FLAGS 0 + +#else + +/* + * Default configuration + */ + +#define SMC_CAN_USE_8BIT 1 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 1 +#define SMC_NOWAIT 1 + +#define SMC_IO_SHIFT (lp->io_shift) + +#define SMC_inb(a, r) ioread8((a) + (r)) +#define SMC_inw(a, r) ioread16((a) + (r)) +#define SMC_inl(a, r) ioread32((a) + (r)) +#define SMC_outb(v, a, r) iowrite8(v, (a) + (r)) +#define SMC_outw(v, a, r) iowrite16(v, (a) + (r)) +#define SMC_outl(v, a, r) iowrite32(v, (a) + (r)) +#define SMC_insw(a, r, p, l) ioread16_rep((a) + (r), p, l) +#define SMC_outsw(a, r, p, l) iowrite16_rep((a) + (r), p, l) +#define SMC_insl(a, r, p, l) ioread32_rep((a) + (r), p, l) +#define SMC_outsl(a, r, p, l) iowrite32_rep((a) + (r), p, l) + +#define RPC_LSA_DEFAULT RPC_LED_100_10 +#define RPC_LSB_DEFAULT RPC_LED_TX_RX + +#endif + + +/* store this information for the driver.. */ +struct smc_local { + /* + * If I have to wait until memory is available to send a + * packet, I will store the skbuff here, until I get the + * desired memory. Then, I'll send it out and free it. + */ + struct sk_buff *pending_tx_skb; + struct tasklet_struct tx_task; + + struct gpio_desc *power_gpio; + struct gpio_desc *reset_gpio; + + /* version/revision of the SMC91x chip */ + int version; + + /* Contains the current active transmission mode */ + int tcr_cur_mode; + + /* Contains the current active receive mode */ + int rcr_cur_mode; + + /* Contains the current active receive/phy mode */ + int rpc_cur_mode; + int ctl_rfduplx; + int ctl_rspeed; + + u32 msg_enable; + u32 phy_type; + struct mii_if_info mii; + + /* work queue */ + struct work_struct phy_configure; + struct net_device *dev; + int work_pending; + + spinlock_t lock; + +#ifdef CONFIG_ARCH_PXA + /* DMA needs the physical address of the chip */ + u_long physaddr; + struct device *device; +#endif + void __iomem *base; + void __iomem *datacs; + + /* the low address lines on some platforms aren't connected... */ + int io_shift; + + struct smc91x_platdata cfg; +}; + +#define SMC_8BIT(p) ((p)->cfg.flags & SMC91X_USE_8BIT) +#define SMC_16BIT(p) ((p)->cfg.flags & SMC91X_USE_16BIT) +#define SMC_32BIT(p) ((p)->cfg.flags & SMC91X_USE_32BIT) + +#ifdef CONFIG_ARCH_PXA +/* + * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is + * always happening in irq context so no need to worry about races. TX is + * different and probably not worth it for that reason, and not as critical + * as RX which can overrun memory and lose packets. + */ +#include +#include + +#ifdef SMC_insl +#undef SMC_insl +#define SMC_insl(a, r, p, l) \ + smc_pxa_dma_insl(a, lp, r, dev->dma, p, l) +static inline void +smc_pxa_dma_insl(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma, + u_char *buf, int len) +{ + u_long physaddr = lp->physaddr; + dma_addr_t dmabuf; + + /* fallback if no DMA available */ + if (dma == (unsigned char)-1) { + readsl(ioaddr + reg, buf, len); + return; + } + + /* 64 bit alignment is required for memory to memory DMA */ + if ((long)buf & 4) { + *((u32 *)buf) = SMC_inl(ioaddr, reg); + buf += 4; + len--; + } + + len *= 4; + dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE); + DCSR(dma) = DCSR_NODESC; + DTADR(dma) = dmabuf; + DSADR(dma) = physaddr + reg; + DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 | + DCMD_WIDTH4 | (DCMD_LENGTH & len)); + DCSR(dma) = DCSR_NODESC | DCSR_RUN; + while (!(DCSR(dma) & DCSR_STOPSTATE)) + cpu_relax(); + DCSR(dma) = 0; + dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE); +} +#endif + +#ifdef SMC_insw +#undef SMC_insw +#define SMC_insw(a, r, p, l) \ + smc_pxa_dma_insw(a, lp, r, dev->dma, p, l) +static inline void +smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma, + u_char *buf, int len) +{ + u_long physaddr = lp->physaddr; + dma_addr_t dmabuf; + + /* fallback if no DMA available */ + if (dma == (unsigned char)-1) { + readsw(ioaddr + reg, buf, len); + return; + } + + /* 64 bit alignment is required for memory to memory DMA */ + while ((long)buf & 6) { + *((u16 *)buf) = SMC_inw(ioaddr, reg); + buf += 2; + len--; + } + + len *= 2; + dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE); + DCSR(dma) = DCSR_NODESC; + DTADR(dma) = dmabuf; + DSADR(dma) = physaddr + reg; + DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 | + DCMD_WIDTH2 | (DCMD_LENGTH & len)); + DCSR(dma) = DCSR_NODESC | DCSR_RUN; + while (!(DCSR(dma) & DCSR_STOPSTATE)) + cpu_relax(); + DCSR(dma) = 0; + dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE); +} +#endif + +static void +smc_pxa_dma_irq(int dma, void *dummy) +{ + DCSR(dma) = 0; +} +#endif /* CONFIG_ARCH_PXA */ + + +/* + * Everything a particular hardware setup needs should have been defined + * at this point. Add stubs for the undefined cases, mainly to avoid + * compilation warnings since they'll be optimized away, or to prevent buggy + * use of them. + */ + +#if ! SMC_CAN_USE_32BIT +#define SMC_inl(ioaddr, reg) ({ BUG(); 0; }) +#define SMC_outl(x, ioaddr, reg) BUG() +#define SMC_insl(a, r, p, l) BUG() +#define SMC_outsl(a, r, p, l) BUG() +#endif + +#if !defined(SMC_insl) || !defined(SMC_outsl) +#define SMC_insl(a, r, p, l) BUG() +#define SMC_outsl(a, r, p, l) BUG() +#endif + +#if ! SMC_CAN_USE_16BIT + +/* + * Any 16-bit access is performed with two 8-bit accesses if the hardware + * can't do it directly. Most registers are 16-bit so those are mandatory. + */ +#define SMC_outw(x, ioaddr, reg) \ + do { \ + unsigned int __val16 = (x); \ + SMC_outb( __val16, ioaddr, reg ); \ + SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\ + } while (0) +#define SMC_inw(ioaddr, reg) \ + ({ \ + unsigned int __val16; \ + __val16 = SMC_inb( ioaddr, reg ); \ + __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \ + __val16; \ + }) + +#define SMC_insw(a, r, p, l) BUG() +#define SMC_outsw(a, r, p, l) BUG() + +#endif + +#if !defined(SMC_insw) || !defined(SMC_outsw) +#define SMC_insw(a, r, p, l) BUG() +#define SMC_outsw(a, r, p, l) BUG() +#endif + +#if ! SMC_CAN_USE_8BIT +#define SMC_inb(ioaddr, reg) ({ BUG(); 0; }) +#define SMC_outb(x, ioaddr, reg) BUG() +#define SMC_insb(a, r, p, l) BUG() +#define SMC_outsb(a, r, p, l) BUG() +#endif + +#if !defined(SMC_insb) || !defined(SMC_outsb) +#define SMC_insb(a, r, p, l) BUG() +#define SMC_outsb(a, r, p, l) BUG() +#endif + +#ifndef SMC_CAN_USE_DATACS +#define SMC_CAN_USE_DATACS 0 +#endif + +#ifndef SMC_IO_SHIFT +#define SMC_IO_SHIFT 0 +#endif + +#ifndef SMC_IRQ_FLAGS +#define SMC_IRQ_FLAGS IRQF_TRIGGER_RISING +#endif + +#ifndef SMC_INTERRUPT_PREAMBLE +#define SMC_INTERRUPT_PREAMBLE +#endif + + +/* Because of bank switching, the LAN91x uses only 16 I/O ports */ +#define SMC_IO_EXTENT (16 << SMC_IO_SHIFT) +#define SMC_DATA_EXTENT (4) + +/* + . Bank Select Register: + . + . yyyy yyyy 0000 00xx + . xx = bank number + . yyyy yyyy = 0x33, for identification purposes. +*/ +#define BANK_SELECT (14 << SMC_IO_SHIFT) + + +// Transmit Control Register +/* BANK 0 */ +#define TCR_REG(lp) SMC_REG(lp, 0x0000, 0) +#define TCR_ENABLE 0x0001 // When 1 we can transmit +#define TCR_LOOP 0x0002 // Controls output pin LBK +#define TCR_FORCOL 0x0004 // When 1 will force a collision +#define TCR_PAD_EN 0x0080 // When 1 will pad tx frames < 64 bytes w/0 +#define TCR_NOCRC 0x0100 // When 1 will not append CRC to tx frames +#define TCR_MON_CSN 0x0400 // When 1 tx monitors carrier +#define TCR_FDUPLX 0x0800 // When 1 enables full duplex operation +#define TCR_STP_SQET 0x1000 // When 1 stops tx if Signal Quality Error +#define TCR_EPH_LOOP 0x2000 // When 1 enables EPH block loopback +#define TCR_SWFDUP 0x8000 // When 1 enables Switched Full Duplex mode + +#define TCR_CLEAR 0 /* do NOTHING */ +/* the default settings for the TCR register : */ +#define TCR_DEFAULT (TCR_ENABLE | TCR_PAD_EN) + + +// EPH Status Register +/* BANK 0 */ +#define EPH_STATUS_REG(lp) SMC_REG(lp, 0x0002, 0) +#define ES_TX_SUC 0x0001 // Last TX was successful +#define ES_SNGL_COL 0x0002 // Single collision detected for last tx +#define ES_MUL_COL 0x0004 // Multiple collisions detected for last tx +#define ES_LTX_MULT 0x0008 // Last tx was a multicast +#define ES_16COL 0x0010 // 16 Collisions Reached +#define ES_SQET 0x0020 // Signal Quality Error Test +#define ES_LTXBRD 0x0040 // Last tx was a broadcast +#define ES_TXDEFR 0x0080 // Transmit Deferred +#define ES_LATCOL 0x0200 // Late collision detected on last tx +#define ES_LOSTCARR 0x0400 // Lost Carrier Sense +#define ES_EXC_DEF 0x0800 // Excessive Deferral +#define ES_CTR_ROL 0x1000 // Counter Roll Over indication +#define ES_LINK_OK 0x4000 // Driven by inverted value of nLNK pin +#define ES_TXUNRN 0x8000 // Tx Underrun + + +// Receive Control Register +/* BANK 0 */ +#define RCR_REG(lp) SMC_REG(lp, 0x0004, 0) +#define RCR_RX_ABORT 0x0001 // Set if a rx frame was aborted +#define RCR_PRMS 0x0002 // Enable promiscuous mode +#define RCR_ALMUL 0x0004 // When set accepts all multicast frames +#define RCR_RXEN 0x0100 // IFF this is set, we can receive packets +#define RCR_STRIP_CRC 0x0200 // When set strips CRC from rx packets +#define RCR_ABORT_ENB 0x0200 // When set will abort rx on collision +#define RCR_FILT_CAR 0x0400 // When set filters leading 12 bit s of carrier +#define RCR_SOFTRST 0x8000 // resets the chip + +/* the normal settings for the RCR register : */ +#define RCR_DEFAULT (RCR_STRIP_CRC | RCR_RXEN) +#define RCR_CLEAR 0x0 // set it to a base state + + +// Counter Register +/* BANK 0 */ +#define COUNTER_REG(lp) SMC_REG(lp, 0x0006, 0) + + +// Memory Information Register +/* BANK 0 */ +#define MIR_REG(lp) SMC_REG(lp, 0x0008, 0) + + +// Receive/Phy Control Register +/* BANK 0 */ +#define RPC_REG(lp) SMC_REG(lp, 0x000A, 0) +#define RPC_SPEED 0x2000 // When 1 PHY is in 100Mbps mode. +#define RPC_DPLX 0x1000 // When 1 PHY is in Full-Duplex Mode +#define RPC_ANEG 0x0800 // When 1 PHY is in Auto-Negotiate Mode +#define RPC_LSXA_SHFT 5 // Bits to shift LS2A,LS1A,LS0A to lsb +#define RPC_LSXB_SHFT 2 // Bits to get LS2B,LS1B,LS0B to lsb + +#ifndef RPC_LSA_DEFAULT +#define RPC_LSA_DEFAULT RPC_LED_100 +#endif +#ifndef RPC_LSB_DEFAULT +#define RPC_LSB_DEFAULT RPC_LED_FD +#endif + +#define RPC_DEFAULT (RPC_ANEG | RPC_SPEED | RPC_DPLX) + + +/* Bank 0 0x0C is reserved */ + +// Bank Select Register +/* All Banks */ +#define BSR_REG 0x000E + + +// Configuration Reg +/* BANK 1 */ +#define CONFIG_REG(lp) SMC_REG(lp, 0x0000, 1) +#define CONFIG_EXT_PHY 0x0200 // 1=external MII, 0=internal Phy +#define CONFIG_GPCNTRL 0x0400 // Inverse value drives pin nCNTRL +#define CONFIG_NO_WAIT 0x1000 // When 1 no extra wait states on ISA bus +#define CONFIG_EPH_POWER_EN 0x8000 // When 0 EPH is placed into low power mode. + +// Default is powered-up, Internal Phy, Wait States, and pin nCNTRL=low +#define CONFIG_DEFAULT (CONFIG_EPH_POWER_EN) + + +// Base Address Register +/* BANK 1 */ +#define BASE_REG(lp) SMC_REG(lp, 0x0002, 1) + + +// Individual Address Registers +/* BANK 1 */ +#define ADDR0_REG(lp) SMC_REG(lp, 0x0004, 1) +#define ADDR1_REG(lp) SMC_REG(lp, 0x0006, 1) +#define ADDR2_REG(lp) SMC_REG(lp, 0x0008, 1) + + +// General Purpose Register +/* BANK 1 */ +#define GP_REG(lp) SMC_REG(lp, 0x000A, 1) + + +// Control Register +/* BANK 1 */ +#define CTL_REG(lp) SMC_REG(lp, 0x000C, 1) +#define CTL_RCV_BAD 0x4000 // When 1 bad CRC packets are received +#define CTL_AUTO_RELEASE 0x0800 // When 1 tx pages are released automatically +#define CTL_LE_ENABLE 0x0080 // When 1 enables Link Error interrupt +#define CTL_CR_ENABLE 0x0040 // When 1 enables Counter Rollover interrupt +#define CTL_TE_ENABLE 0x0020 // When 1 enables Transmit Error interrupt +#define CTL_EEPROM_SELECT 0x0004 // Controls EEPROM reload & store +#define CTL_RELOAD 0x0002 // When set reads EEPROM into registers +#define CTL_STORE 0x0001 // When set stores registers into EEPROM + + +// MMU Command Register +/* BANK 2 */ +#define MMU_CMD_REG(lp) SMC_REG(lp, 0x0000, 2) +#define MC_BUSY 1 // When 1 the last release has not completed +#define MC_NOP (0<<5) // No Op +#define MC_ALLOC (1<<5) // OR with number of 256 byte packets +#define MC_RESET (2<<5) // Reset MMU to initial state +#define MC_REMOVE (3<<5) // Remove the current rx packet +#define MC_RELEASE (4<<5) // Remove and release the current rx packet +#define MC_FREEPKT (5<<5) // Release packet in PNR register +#define MC_ENQUEUE (6<<5) // Enqueue the packet for transmit +#define MC_RSTTXFIFO (7<<5) // Reset the TX FIFOs + + +// Packet Number Register +/* BANK 2 */ +#define PN_REG(lp) SMC_REG(lp, 0x0002, 2) + + +// Allocation Result Register +/* BANK 2 */ +#define AR_REG(lp) SMC_REG(lp, 0x0003, 2) +#define AR_FAILED 0x80 // Alocation Failed + + +// TX FIFO Ports Register +/* BANK 2 */ +#define TXFIFO_REG(lp) SMC_REG(lp, 0x0004, 2) +#define TXFIFO_TEMPTY 0x80 // TX FIFO Empty + +// RX FIFO Ports Register +/* BANK 2 */ +#define RXFIFO_REG(lp) SMC_REG(lp, 0x0005, 2) +#define RXFIFO_REMPTY 0x80 // RX FIFO Empty + +#define FIFO_REG(lp) SMC_REG(lp, 0x0004, 2) + +// Pointer Register +/* BANK 2 */ +#define PTR_REG(lp) SMC_REG(lp, 0x0006, 2) +#define PTR_RCV 0x8000 // 1=Receive area, 0=Transmit area +#define PTR_AUTOINC 0x4000 // Auto increment the pointer on each access +#define PTR_READ 0x2000 // When 1 the operation is a read + + +// Data Register +/* BANK 2 */ +#define DATA_REG(lp) SMC_REG(lp, 0x0008, 2) + + +// Interrupt Status/Acknowledge Register +/* BANK 2 */ +#define INT_REG(lp) SMC_REG(lp, 0x000C, 2) + + +// Interrupt Mask Register +/* BANK 2 */ +#define IM_REG(lp) SMC_REG(lp, 0x000D, 2) +#define IM_MDINT 0x80 // PHY MI Register 18 Interrupt +#define IM_ERCV_INT 0x40 // Early Receive Interrupt +#define IM_EPH_INT 0x20 // Set by Ethernet Protocol Handler section +#define IM_RX_OVRN_INT 0x10 // Set by Receiver Overruns +#define IM_ALLOC_INT 0x08 // Set when allocation request is completed +#define IM_TX_EMPTY_INT 0x04 // Set if the TX FIFO goes empty +#define IM_TX_INT 0x02 // Transmit Interrupt +#define IM_RCV_INT 0x01 // Receive Interrupt + + +// Multicast Table Registers +/* BANK 3 */ +#define MCAST_REG1(lp) SMC_REG(lp, 0x0000, 3) +#define MCAST_REG2(lp) SMC_REG(lp, 0x0002, 3) +#define MCAST_REG3(lp) SMC_REG(lp, 0x0004, 3) +#define MCAST_REG4(lp) SMC_REG(lp, 0x0006, 3) + + +// Management Interface Register (MII) +/* BANK 3 */ +#define MII_REG(lp) SMC_REG(lp, 0x0008, 3) +#define MII_MSK_CRS100 0x4000 // Disables CRS100 detection during tx half dup +#define MII_MDOE 0x0008 // MII Output Enable +#define MII_MCLK 0x0004 // MII Clock, pin MDCLK +#define MII_MDI 0x0002 // MII Input, pin MDI +#define MII_MDO 0x0001 // MII Output, pin MDO + + +// Revision Register +/* BANK 3 */ +/* ( hi: chip id low: rev # ) */ +#define REV_REG(lp) SMC_REG(lp, 0x000A, 3) + + +// Early RCV Register +/* BANK 3 */ +/* this is NOT on SMC9192 */ +#define ERCV_REG(lp) SMC_REG(lp, 0x000C, 3) +#define ERCV_RCV_DISCRD 0x0080 // When 1 discards a packet being received +#define ERCV_THRESHOLD 0x001F // ERCV Threshold Mask + + +// External Register +/* BANK 7 */ +#define EXT_REG(lp) SMC_REG(lp, 0x0000, 7) + + +#define CHIP_9192 3 +#define CHIP_9194 4 +#define CHIP_9195 5 +#define CHIP_9196 6 +#define CHIP_91100 7 +#define CHIP_91100FD 8 +#define CHIP_91111FD 9 + +static const char * chip_ids[ 16 ] = { + NULL, NULL, NULL, + /* 3 */ "SMC91C90/91C92", + /* 4 */ "SMC91C94", + /* 5 */ "SMC91C95", + /* 6 */ "SMC91C96", + /* 7 */ "SMC91C100", + /* 8 */ "SMC91C100FD", + /* 9 */ "SMC91C11xFD", + NULL, NULL, NULL, + NULL, NULL, NULL}; + + +/* + . Receive status bits +*/ +#define RS_ALGNERR 0x8000 +#define RS_BRODCAST 0x4000 +#define RS_BADCRC 0x2000 +#define RS_ODDFRAME 0x1000 +#define RS_TOOLONG 0x0800 +#define RS_TOOSHORT 0x0400 +#define RS_MULTICAST 0x0001 +#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT) + + +/* + * PHY IDs + * LAN83C183 == LAN91C111 Internal PHY + */ +#define PHY_LAN83C183 0x0016f840 +#define PHY_LAN83C180 0x02821c50 + +/* + * PHY Register Addresses (LAN91C111 Internal PHY) + * + * Generic PHY registers can be found in + * + * These phy registers are specific to our on-board phy. + */ + +// PHY Configuration Register 1 +#define PHY_CFG1_REG 0x10 +#define PHY_CFG1_LNKDIS 0x8000 // 1=Rx Link Detect Function disabled +#define PHY_CFG1_XMTDIS 0x4000 // 1=TP Transmitter Disabled +#define PHY_CFG1_XMTPDN 0x2000 // 1=TP Transmitter Powered Down +#define PHY_CFG1_BYPSCR 0x0400 // 1=Bypass scrambler/descrambler +#define PHY_CFG1_UNSCDS 0x0200 // 1=Unscramble Idle Reception Disable +#define PHY_CFG1_EQLZR 0x0100 // 1=Rx Equalizer Disabled +#define PHY_CFG1_CABLE 0x0080 // 1=STP(150ohm), 0=UTP(100ohm) +#define PHY_CFG1_RLVL0 0x0040 // 1=Rx Squelch level reduced by 4.5db +#define PHY_CFG1_TLVL_SHIFT 2 // Transmit Output Level Adjust +#define PHY_CFG1_TLVL_MASK 0x003C +#define PHY_CFG1_TRF_MASK 0x0003 // Transmitter Rise/Fall time + + +// PHY Configuration Register 2 +#define PHY_CFG2_REG 0x11 +#define PHY_CFG2_APOLDIS 0x0020 // 1=Auto Polarity Correction disabled +#define PHY_CFG2_JABDIS 0x0010 // 1=Jabber disabled +#define PHY_CFG2_MREG 0x0008 // 1=Multiple register access (MII mgt) +#define PHY_CFG2_INTMDIO 0x0004 // 1=Interrupt signaled with MDIO pulseo + +// PHY Status Output (and Interrupt status) Register +#define PHY_INT_REG 0x12 // Status Output (Interrupt Status) +#define PHY_INT_INT 0x8000 // 1=bits have changed since last read +#define PHY_INT_LNKFAIL 0x4000 // 1=Link Not detected +#define PHY_INT_LOSSSYNC 0x2000 // 1=Descrambler has lost sync +#define PHY_INT_CWRD 0x1000 // 1=Invalid 4B5B code detected on rx +#define PHY_INT_SSD 0x0800 // 1=No Start Of Stream detected on rx +#define PHY_INT_ESD 0x0400 // 1=No End Of Stream detected on rx +#define PHY_INT_RPOL 0x0200 // 1=Reverse Polarity detected +#define PHY_INT_JAB 0x0100 // 1=Jabber detected +#define PHY_INT_SPDDET 0x0080 // 1=100Base-TX mode, 0=10Base-T mode +#define PHY_INT_DPLXDET 0x0040 // 1=Device in Full Duplex + +// PHY Interrupt/Status Mask Register +#define PHY_MASK_REG 0x13 // Interrupt Mask +// Uses the same bit definitions as PHY_INT_REG + + +/* + * SMC91C96 ethernet config and status registers. + * These are in the "attribute" space. + */ +#define ECOR 0x8000 +#define ECOR_RESET 0x80 +#define ECOR_LEVEL_IRQ 0x40 +#define ECOR_WR_ATTRIB 0x04 +#define ECOR_ENABLE 0x01 + +#define ECSR 0x8002 +#define ECSR_IOIS8 0x20 +#define ECSR_PWRDWN 0x04 +#define ECSR_INT 0x02 + +#define ATTRIB_SIZE ((64*1024) << SMC_IO_SHIFT) + + +/* + * Macros to abstract register access according to the data bus + * capabilities. Please use those and not the in/out primitives. + * Note: the following macros do *not* select the bank -- this must + * be done separately as needed in the main code. The SMC_REG() macro + * only uses the bank argument for debugging purposes (when enabled). + * + * Note: despite inline functions being safer, everything leading to this + * should preferably be macros to let BUG() display the line number in + * the core source code since we're interested in the top call site + * not in any inline function location. + */ + +#if SMC_DEBUG > 0 +#define SMC_REG(lp, reg, bank) \ + ({ \ + int __b = SMC_CURRENT_BANK(lp); \ + if (unlikely((__b & ~0xf0) != (0x3300 | bank))) { \ + pr_err("%s: bank reg screwed (0x%04x)\n", \ + CARDNAME, __b); \ + BUG(); \ + } \ + reg<> 8)) + +#define SMC_GET_TXFIFO(lp) \ + (SMC_8BIT(lp) ? (SMC_inb(ioaddr, TXFIFO_REG(lp))) \ + : (SMC_inw(ioaddr, TXFIFO_REG(lp)) & 0xFF)) + +#define SMC_GET_RXFIFO(lp) \ + (SMC_8BIT(lp) ? (SMC_inb(ioaddr, RXFIFO_REG(lp))) \ + : (SMC_inw(ioaddr, TXFIFO_REG(lp)) >> 8)) + +#define SMC_GET_INT(lp) \ + (SMC_8BIT(lp) ? (SMC_inb(ioaddr, INT_REG(lp))) \ + : (SMC_inw(ioaddr, INT_REG(lp)) & 0xFF)) + +#define SMC_ACK_INT(lp, x) \ + do { \ + if (SMC_8BIT(lp)) \ + SMC_outb(x, ioaddr, INT_REG(lp)); \ + else { \ + unsigned long __flags; \ + int __mask; \ + local_irq_save(__flags); \ + __mask = SMC_inw(ioaddr, INT_REG(lp)) & ~0xff; \ + SMC_outw(__mask | (x), ioaddr, INT_REG(lp)); \ + local_irq_restore(__flags); \ + } \ + } while (0) + +#define SMC_GET_INT_MASK(lp) \ + (SMC_8BIT(lp) ? (SMC_inb(ioaddr, IM_REG(lp))) \ + : (SMC_inw(ioaddr, INT_REG(lp)) >> 8)) + +#define SMC_SET_INT_MASK(lp, x) \ + do { \ + if (SMC_8BIT(lp)) \ + SMC_outb(x, ioaddr, IM_REG(lp)); \ + else \ + SMC_outw((x) << 8, ioaddr, INT_REG(lp)); \ + } while (0) + +#define SMC_CURRENT_BANK(lp) SMC_inw(ioaddr, BANK_SELECT) + +#define SMC_SELECT_BANK(lp, x) \ + do { \ + if (SMC_MUST_ALIGN_WRITE(lp)) \ + SMC_outl((x)<<16, ioaddr, 12<> 8; \ + __v = SMC_inw(ioaddr, ADDR1_REG(lp)); \ + addr[2] = __v; addr[3] = __v >> 8; \ + __v = SMC_inw(ioaddr, ADDR2_REG(lp)); \ + addr[4] = __v; addr[5] = __v >> 8; \ + } while (0) +#endif + +#define SMC_SET_MAC_ADDR(lp, addr) \ + do { \ + SMC_outw(addr[0]|(addr[1] << 8), ioaddr, ADDR0_REG(lp)); \ + SMC_outw(addr[2]|(addr[3] << 8), ioaddr, ADDR1_REG(lp)); \ + SMC_outw(addr[4]|(addr[5] << 8), ioaddr, ADDR2_REG(lp)); \ + } while (0) + +#define SMC_SET_MCAST(lp, x) \ + do { \ + const unsigned char *mt = (x); \ + SMC_outw(mt[0] | (mt[1] << 8), ioaddr, MCAST_REG1(lp)); \ + SMC_outw(mt[2] | (mt[3] << 8), ioaddr, MCAST_REG2(lp)); \ + SMC_outw(mt[4] | (mt[5] << 8), ioaddr, MCAST_REG3(lp)); \ + SMC_outw(mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4(lp)); \ + } while (0) + +#define SMC_PUT_PKT_HDR(lp, status, length) \ + do { \ + if (SMC_32BIT(lp)) \ + SMC_outl((status) | (length)<<16, ioaddr, \ + DATA_REG(lp)); \ + else { \ + SMC_outw(status, ioaddr, DATA_REG(lp)); \ + SMC_outw(length, ioaddr, DATA_REG(lp)); \ + } \ + } while (0) + +#define SMC_GET_PKT_HDR(lp, status, length) \ + do { \ + if (SMC_32BIT(lp)) { \ + unsigned int __val = SMC_inl(ioaddr, DATA_REG(lp)); \ + (status) = __val & 0xffff; \ + (length) = __val >> 16; \ + } else { \ + (status) = SMC_inw(ioaddr, DATA_REG(lp)); \ + (length) = SMC_inw(ioaddr, DATA_REG(lp)); \ + } \ + } while (0) + +#define SMC_PUSH_DATA(lp, p, l) \ + do { \ + if (SMC_32BIT(lp)) { \ + void *__ptr = (p); \ + int __len = (l); \ + void __iomem *__ioaddr = ioaddr; \ + if (__len >= 2 && (unsigned long)__ptr & 2) { \ + __len -= 2; \ + SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \ + __ptr += 2; \ + } \ + if (SMC_CAN_USE_DATACS && lp->datacs) \ + __ioaddr = lp->datacs; \ + SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \ + if (__len & 2) { \ + __ptr += (__len & ~3); \ + SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \ + } \ + } else if (SMC_16BIT(lp)) \ + SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1); \ + else if (SMC_8BIT(lp)) \ + SMC_outsb(ioaddr, DATA_REG(lp), p, l); \ + } while (0) + +#define SMC_PULL_DATA(lp, p, l) \ + do { \ + if (SMC_32BIT(lp)) { \ + void *__ptr = (p); \ + int __len = (l); \ + void __iomem *__ioaddr = ioaddr; \ + if ((unsigned long)__ptr & 2) { \ + /* \ + * We want 32bit alignment here. \ + * Since some buses perform a full \ + * 32bit fetch even for 16bit data \ + * we can't use SMC_inw() here. \ + * Back both source (on-chip) and \ + * destination pointers of 2 bytes. \ + * This is possible since the call to \ + * SMC_GET_PKT_HDR() already advanced \ + * the source pointer of 4 bytes, and \ + * the skb_reserve(skb, 2) advanced \ + * the destination pointer of 2 bytes. \ + */ \ + __ptr -= 2; \ + __len += 2; \ + SMC_SET_PTR(lp, \ + 2|PTR_READ|PTR_RCV|PTR_AUTOINC); \ + } \ + if (SMC_CAN_USE_DATACS && lp->datacs) \ + __ioaddr = lp->datacs; \ + __len += 2; \ + SMC_insl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \ + } else if (SMC_16BIT(lp)) \ + SMC_insw(ioaddr, DATA_REG(lp), p, (l) >> 1); \ + else if (SMC_8BIT(lp)) \ + SMC_insb(ioaddr, DATA_REG(lp), p, l); \ + } while (0) + +#endif /* _SMC91X_H_ */ diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c new file mode 100644 index 000000000..959aeeade --- /dev/null +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -0,0 +1,2681 @@ +/*************************************************************************** + * + * Copyright (C) 2004-2008 SMSC + * Copyright (C) 2005-2008 ARM + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + *************************************************************************** + * Rewritten, heavily based on smsc911x simple driver by SMSC. + * Partly uses io macros from smc91x.c by Nicolas Pitre + * + * Supported devices: + * LAN9115, LAN9116, LAN9117, LAN9118 + * LAN9215, LAN9216, LAN9217, LAN9218 + * LAN9210, LAN9211 + * LAN9220, LAN9221 + * LAN89218 + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "smsc911x.h" + +#define SMSC_CHIPNAME "smsc911x" +#define SMSC_MDIONAME "smsc911x-mdio" +#define SMSC_DRV_VERSION "2008-10-21" + +MODULE_LICENSE("GPL"); +MODULE_VERSION(SMSC_DRV_VERSION); +MODULE_ALIAS("platform:smsc911x"); + +#if USE_DEBUG > 0 +static int debug = 16; +#else +static int debug = 3; +#endif + +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +struct smsc911x_data; + +struct smsc911x_ops { + u32 (*reg_read)(struct smsc911x_data *pdata, u32 reg); + void (*reg_write)(struct smsc911x_data *pdata, u32 reg, u32 val); + void (*rx_readfifo)(struct smsc911x_data *pdata, + unsigned int *buf, unsigned int wordcount); + void (*tx_writefifo)(struct smsc911x_data *pdata, + unsigned int *buf, unsigned int wordcount); +}; + +#define SMSC911X_NUM_SUPPLIES 2 + +struct smsc911x_data { + void __iomem *ioaddr; + + unsigned int idrev; + + /* used to decide which workarounds apply */ + unsigned int generation; + + /* device configuration (copied from platform_data during probe) */ + struct smsc911x_platform_config config; + + /* This needs to be acquired before calling any of below: + * smsc911x_mac_read(), smsc911x_mac_write() + */ + spinlock_t mac_lock; + + /* spinlock to ensure register accesses are serialised */ + spinlock_t dev_lock; + + struct phy_device *phy_dev; + struct mii_bus *mii_bus; + int phy_irq[PHY_MAX_ADDR]; + unsigned int using_extphy; + int last_duplex; + int last_carrier; + + u32 msg_enable; + unsigned int gpio_setting; + unsigned int gpio_orig_setting; + struct net_device *dev; + struct napi_struct napi; + + unsigned int software_irq_signal; + +#ifdef USE_PHY_WORK_AROUND +#define MIN_PACKET_SIZE (64) + char loopback_tx_pkt[MIN_PACKET_SIZE]; + char loopback_rx_pkt[MIN_PACKET_SIZE]; + unsigned int resetcount; +#endif + + /* Members for Multicast filter workaround */ + unsigned int multicast_update_pending; + unsigned int set_bits_mask; + unsigned int clear_bits_mask; + unsigned int hashhi; + unsigned int hashlo; + + /* register access functions */ + const struct smsc911x_ops *ops; + + /* regulators */ + struct regulator_bulk_data supplies[SMSC911X_NUM_SUPPLIES]; + + /* clock */ + struct clk *clk; +}; + +/* Easy access to information */ +#define __smsc_shift(pdata, reg) ((reg) << ((pdata)->config.shift)) + +static inline u32 __smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg) +{ + if (pdata->config.flags & SMSC911X_USE_32BIT) + return readl(pdata->ioaddr + reg); + + if (pdata->config.flags & SMSC911X_USE_16BIT) + return ((readw(pdata->ioaddr + reg) & 0xFFFF) | + ((readw(pdata->ioaddr + reg + 2) & 0xFFFF) << 16)); + + BUG(); + return 0; +} + +static inline u32 +__smsc911x_reg_read_shift(struct smsc911x_data *pdata, u32 reg) +{ + if (pdata->config.flags & SMSC911X_USE_32BIT) + return readl(pdata->ioaddr + __smsc_shift(pdata, reg)); + + if (pdata->config.flags & SMSC911X_USE_16BIT) + return (readw(pdata->ioaddr + + __smsc_shift(pdata, reg)) & 0xFFFF) | + ((readw(pdata->ioaddr + + __smsc_shift(pdata, reg + 2)) & 0xFFFF) << 16); + + BUG(); + return 0; +} + +static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg) +{ + u32 data; + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + data = pdata->ops->reg_read(pdata, reg); + spin_unlock_irqrestore(&pdata->dev_lock, flags); + + return data; +} + +static inline void __smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, + u32 val) +{ + if (pdata->config.flags & SMSC911X_USE_32BIT) { + writel(val, pdata->ioaddr + reg); + return; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + writew(val & 0xFFFF, pdata->ioaddr + reg); + writew((val >> 16) & 0xFFFF, pdata->ioaddr + reg + 2); + return; + } + + BUG(); +} + +static inline void +__smsc911x_reg_write_shift(struct smsc911x_data *pdata, u32 reg, u32 val) +{ + if (pdata->config.flags & SMSC911X_USE_32BIT) { + writel(val, pdata->ioaddr + __smsc_shift(pdata, reg)); + return; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + writew(val & 0xFFFF, + pdata->ioaddr + __smsc_shift(pdata, reg)); + writew((val >> 16) & 0xFFFF, + pdata->ioaddr + __smsc_shift(pdata, reg + 2)); + return; + } + + BUG(); +} + +static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, + u32 val) +{ + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + pdata->ops->reg_write(pdata, reg, val); + spin_unlock_irqrestore(&pdata->dev_lock, flags); +} + +/* Writes a packet to the TX_DATA_FIFO */ +static inline void +smsc911x_tx_writefifo(struct smsc911x_data *pdata, unsigned int *buf, + unsigned int wordcount) +{ + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + + if (pdata->config.flags & SMSC911X_SWAP_FIFO) { + while (wordcount--) + __smsc911x_reg_write(pdata, TX_DATA_FIFO, + swab32(*buf++)); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_32BIT) { + iowrite32_rep(pdata->ioaddr + TX_DATA_FIFO, buf, wordcount); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + while (wordcount--) + __smsc911x_reg_write(pdata, TX_DATA_FIFO, *buf++); + goto out; + } + + BUG(); +out: + spin_unlock_irqrestore(&pdata->dev_lock, flags); +} + +/* Writes a packet to the TX_DATA_FIFO - shifted version */ +static inline void +smsc911x_tx_writefifo_shift(struct smsc911x_data *pdata, unsigned int *buf, + unsigned int wordcount) +{ + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + + if (pdata->config.flags & SMSC911X_SWAP_FIFO) { + while (wordcount--) + __smsc911x_reg_write_shift(pdata, TX_DATA_FIFO, + swab32(*buf++)); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_32BIT) { + iowrite32_rep(pdata->ioaddr + __smsc_shift(pdata, + TX_DATA_FIFO), buf, wordcount); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + while (wordcount--) + __smsc911x_reg_write_shift(pdata, + TX_DATA_FIFO, *buf++); + goto out; + } + + BUG(); +out: + spin_unlock_irqrestore(&pdata->dev_lock, flags); +} + +/* Reads a packet out of the RX_DATA_FIFO */ +static inline void +smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf, + unsigned int wordcount) +{ + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + + if (pdata->config.flags & SMSC911X_SWAP_FIFO) { + while (wordcount--) + *buf++ = swab32(__smsc911x_reg_read(pdata, + RX_DATA_FIFO)); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_32BIT) { + ioread32_rep(pdata->ioaddr + RX_DATA_FIFO, buf, wordcount); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + while (wordcount--) + *buf++ = __smsc911x_reg_read(pdata, RX_DATA_FIFO); + goto out; + } + + BUG(); +out: + spin_unlock_irqrestore(&pdata->dev_lock, flags); +} + +/* Reads a packet out of the RX_DATA_FIFO - shifted version */ +static inline void +smsc911x_rx_readfifo_shift(struct smsc911x_data *pdata, unsigned int *buf, + unsigned int wordcount) +{ + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + + if (pdata->config.flags & SMSC911X_SWAP_FIFO) { + while (wordcount--) + *buf++ = swab32(__smsc911x_reg_read_shift(pdata, + RX_DATA_FIFO)); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_32BIT) { + ioread32_rep(pdata->ioaddr + __smsc_shift(pdata, + RX_DATA_FIFO), buf, wordcount); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + while (wordcount--) + *buf++ = __smsc911x_reg_read_shift(pdata, + RX_DATA_FIFO); + goto out; + } + + BUG(); +out: + spin_unlock_irqrestore(&pdata->dev_lock, flags); +} + +/* + * enable regulator and clock resources. + */ +static int smsc911x_enable_resources(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct smsc911x_data *pdata = netdev_priv(ndev); + int ret = 0; + + ret = regulator_bulk_enable(ARRAY_SIZE(pdata->supplies), + pdata->supplies); + if (ret) + netdev_err(ndev, "failed to enable regulators %d\n", + ret); + + if (!IS_ERR(pdata->clk)) { + ret = clk_prepare_enable(pdata->clk); + if (ret < 0) + netdev_err(ndev, "failed to enable clock %d\n", ret); + } + + return ret; +} + +/* + * disable resources, currently just regulators. + */ +static int smsc911x_disable_resources(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct smsc911x_data *pdata = netdev_priv(ndev); + int ret = 0; + + ret = regulator_bulk_disable(ARRAY_SIZE(pdata->supplies), + pdata->supplies); + + if (!IS_ERR(pdata->clk)) + clk_disable_unprepare(pdata->clk); + + return ret; +} + +/* + * Request resources, currently just regulators. + * + * The SMSC911x has two power pins: vddvario and vdd33a, in designs where + * these are not always-on we need to request regulators to be turned on + * before we can try to access the device registers. + */ +static int smsc911x_request_resources(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct smsc911x_data *pdata = netdev_priv(ndev); + int ret = 0; + + /* Request regulators */ + pdata->supplies[0].supply = "vdd33a"; + pdata->supplies[1].supply = "vddvario"; + ret = regulator_bulk_get(&pdev->dev, + ARRAY_SIZE(pdata->supplies), + pdata->supplies); + if (ret) + netdev_err(ndev, "couldn't get regulators %d\n", + ret); + + /* Request clock */ + pdata->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(pdata->clk)) + dev_dbg(&pdev->dev, "couldn't get clock %li\n", + PTR_ERR(pdata->clk)); + + return ret; +} + +/* + * Free resources, currently just regulators. + * + */ +static void smsc911x_free_resources(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct smsc911x_data *pdata = netdev_priv(ndev); + + /* Free regulators */ + regulator_bulk_free(ARRAY_SIZE(pdata->supplies), + pdata->supplies); + + /* Free clock */ + if (!IS_ERR(pdata->clk)) { + clk_put(pdata->clk); + pdata->clk = NULL; + } +} + +/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read + * and smsc911x_mac_write, so assumes mac_lock is held */ +static int smsc911x_mac_complete(struct smsc911x_data *pdata) +{ + int i; + u32 val; + + SMSC_ASSERT_MAC_LOCK(pdata); + + for (i = 0; i < 40; i++) { + val = smsc911x_reg_read(pdata, MAC_CSR_CMD); + if (!(val & MAC_CSR_CMD_CSR_BUSY_)) + return 0; + } + SMSC_WARN(pdata, hw, "Timed out waiting for MAC not BUSY. " + "MAC_CSR_CMD: 0x%08X", val); + return -EIO; +} + +/* Fetches a MAC register value. Assumes mac_lock is acquired */ +static u32 smsc911x_mac_read(struct smsc911x_data *pdata, unsigned int offset) +{ + unsigned int temp; + + SMSC_ASSERT_MAC_LOCK(pdata); + + temp = smsc911x_reg_read(pdata, MAC_CSR_CMD); + if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) { + SMSC_WARN(pdata, hw, "MAC busy at entry"); + return 0xFFFFFFFF; + } + + /* Send the MAC cmd */ + smsc911x_reg_write(pdata, MAC_CSR_CMD, ((offset & 0xFF) | + MAC_CSR_CMD_CSR_BUSY_ | MAC_CSR_CMD_R_NOT_W_)); + + /* Workaround for hardware read-after-write restriction */ + temp = smsc911x_reg_read(pdata, BYTE_TEST); + + /* Wait for the read to complete */ + if (likely(smsc911x_mac_complete(pdata) == 0)) + return smsc911x_reg_read(pdata, MAC_CSR_DATA); + + SMSC_WARN(pdata, hw, "MAC busy after read"); + return 0xFFFFFFFF; +} + +/* Set a mac register, mac_lock must be acquired before calling */ +static void smsc911x_mac_write(struct smsc911x_data *pdata, + unsigned int offset, u32 val) +{ + unsigned int temp; + + SMSC_ASSERT_MAC_LOCK(pdata); + + temp = smsc911x_reg_read(pdata, MAC_CSR_CMD); + if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) { + SMSC_WARN(pdata, hw, + "smsc911x_mac_write failed, MAC busy at entry"); + return; + } + + /* Send data to write */ + smsc911x_reg_write(pdata, MAC_CSR_DATA, val); + + /* Write the actual data */ + smsc911x_reg_write(pdata, MAC_CSR_CMD, ((offset & 0xFF) | + MAC_CSR_CMD_CSR_BUSY_)); + + /* Workaround for hardware read-after-write restriction */ + temp = smsc911x_reg_read(pdata, BYTE_TEST); + + /* Wait for the write to complete */ + if (likely(smsc911x_mac_complete(pdata) == 0)) + return; + + SMSC_WARN(pdata, hw, "smsc911x_mac_write failed, MAC busy after write"); +} + +/* Get a phy register */ +static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx) +{ + struct smsc911x_data *pdata = (struct smsc911x_data *)bus->priv; + unsigned long flags; + unsigned int addr; + int i, reg; + + spin_lock_irqsave(&pdata->mac_lock, flags); + + /* Confirm MII not busy */ + if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { + SMSC_WARN(pdata, hw, "MII is busy in smsc911x_mii_read???"); + reg = -EIO; + goto out; + } + + /* Set the address, index & direction (read from PHY) */ + addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6); + smsc911x_mac_write(pdata, MII_ACC, addr); + + /* Wait for read to complete w/ timeout */ + for (i = 0; i < 100; i++) + if (!(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { + reg = smsc911x_mac_read(pdata, MII_DATA); + goto out; + } + + SMSC_WARN(pdata, hw, "Timed out waiting for MII read to finish"); + reg = -EIO; + +out: + spin_unlock_irqrestore(&pdata->mac_lock, flags); + return reg; +} + +/* Set a phy register */ +static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx, + u16 val) +{ + struct smsc911x_data *pdata = (struct smsc911x_data *)bus->priv; + unsigned long flags; + unsigned int addr; + int i, reg; + + spin_lock_irqsave(&pdata->mac_lock, flags); + + /* Confirm MII not busy */ + if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { + SMSC_WARN(pdata, hw, "MII is busy in smsc911x_mii_write???"); + reg = -EIO; + goto out; + } + + /* Put the data to write in the MAC */ + smsc911x_mac_write(pdata, MII_DATA, val); + + /* Set the address, index & direction (write to PHY) */ + addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) | + MII_ACC_MII_WRITE_; + smsc911x_mac_write(pdata, MII_ACC, addr); + + /* Wait for write to complete w/ timeout */ + for (i = 0; i < 100; i++) + if (!(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { + reg = 0; + goto out; + } + + SMSC_WARN(pdata, hw, "Timed out waiting for MII write to finish"); + reg = -EIO; + +out: + spin_unlock_irqrestore(&pdata->mac_lock, flags); + return reg; +} + +/* Switch to external phy. Assumes tx and rx are stopped. */ +static void smsc911x_phy_enable_external(struct smsc911x_data *pdata) +{ + unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG); + + /* Disable phy clocks to the MAC */ + hwcfg &= (~HW_CFG_PHY_CLK_SEL_); + hwcfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_; + smsc911x_reg_write(pdata, HW_CFG, hwcfg); + udelay(10); /* Enough time for clocks to stop */ + + /* Switch to external phy */ + hwcfg |= HW_CFG_EXT_PHY_EN_; + smsc911x_reg_write(pdata, HW_CFG, hwcfg); + + /* Enable phy clocks to the MAC */ + hwcfg &= (~HW_CFG_PHY_CLK_SEL_); + hwcfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_; + smsc911x_reg_write(pdata, HW_CFG, hwcfg); + udelay(10); /* Enough time for clocks to restart */ + + hwcfg |= HW_CFG_SMI_SEL_; + smsc911x_reg_write(pdata, HW_CFG, hwcfg); +} + +/* Autodetects and enables external phy if present on supported chips. + * autodetection can be overridden by specifying SMSC911X_FORCE_INTERNAL_PHY + * or SMSC911X_FORCE_EXTERNAL_PHY in the platform_data flags. */ +static void smsc911x_phy_initialise_external(struct smsc911x_data *pdata) +{ + unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG); + + if (pdata->config.flags & SMSC911X_FORCE_INTERNAL_PHY) { + SMSC_TRACE(pdata, hw, "Forcing internal PHY"); + pdata->using_extphy = 0; + } else if (pdata->config.flags & SMSC911X_FORCE_EXTERNAL_PHY) { + SMSC_TRACE(pdata, hw, "Forcing external PHY"); + smsc911x_phy_enable_external(pdata); + pdata->using_extphy = 1; + } else if (hwcfg & HW_CFG_EXT_PHY_DET_) { + SMSC_TRACE(pdata, hw, + "HW_CFG EXT_PHY_DET set, using external PHY"); + smsc911x_phy_enable_external(pdata); + pdata->using_extphy = 1; + } else { + SMSC_TRACE(pdata, hw, + "HW_CFG EXT_PHY_DET clear, using internal PHY"); + pdata->using_extphy = 0; + } +} + +/* Fetches a tx status out of the status fifo */ +static unsigned int smsc911x_tx_get_txstatus(struct smsc911x_data *pdata) +{ + unsigned int result = + smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TSUSED_; + + if (result != 0) + result = smsc911x_reg_read(pdata, TX_STATUS_FIFO); + + return result; +} + +/* Fetches the next rx status */ +static unsigned int smsc911x_rx_get_rxstatus(struct smsc911x_data *pdata) +{ + unsigned int result = + smsc911x_reg_read(pdata, RX_FIFO_INF) & RX_FIFO_INF_RXSUSED_; + + if (result != 0) + result = smsc911x_reg_read(pdata, RX_STATUS_FIFO); + + return result; +} + +#ifdef USE_PHY_WORK_AROUND +static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata) +{ + unsigned int tries; + u32 wrsz; + u32 rdsz; + ulong bufp; + + for (tries = 0; tries < 10; tries++) { + unsigned int txcmd_a; + unsigned int txcmd_b; + unsigned int status; + unsigned int pktlength; + unsigned int i; + + /* Zero-out rx packet memory */ + memset(pdata->loopback_rx_pkt, 0, MIN_PACKET_SIZE); + + /* Write tx packet to 118 */ + txcmd_a = (u32)((ulong)pdata->loopback_tx_pkt & 0x03) << 16; + txcmd_a |= TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_; + txcmd_a |= MIN_PACKET_SIZE; + + txcmd_b = MIN_PACKET_SIZE << 16 | MIN_PACKET_SIZE; + + smsc911x_reg_write(pdata, TX_DATA_FIFO, txcmd_a); + smsc911x_reg_write(pdata, TX_DATA_FIFO, txcmd_b); + + bufp = (ulong)pdata->loopback_tx_pkt & (~0x3); + wrsz = MIN_PACKET_SIZE + 3; + wrsz += (u32)((ulong)pdata->loopback_tx_pkt & 0x3); + wrsz >>= 2; + + pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz); + + /* Wait till transmit is done */ + i = 60; + do { + udelay(5); + status = smsc911x_tx_get_txstatus(pdata); + } while ((i--) && (!status)); + + if (!status) { + SMSC_WARN(pdata, hw, + "Failed to transmit during loopback test"); + continue; + } + if (status & TX_STS_ES_) { + SMSC_WARN(pdata, hw, + "Transmit encountered errors during loopback test"); + continue; + } + + /* Wait till receive is done */ + i = 60; + do { + udelay(5); + status = smsc911x_rx_get_rxstatus(pdata); + } while ((i--) && (!status)); + + if (!status) { + SMSC_WARN(pdata, hw, + "Failed to receive during loopback test"); + continue; + } + if (status & RX_STS_ES_) { + SMSC_WARN(pdata, hw, + "Receive encountered errors during loopback test"); + continue; + } + + pktlength = ((status & 0x3FFF0000UL) >> 16); + bufp = (ulong)pdata->loopback_rx_pkt; + rdsz = pktlength + 3; + rdsz += (u32)((ulong)pdata->loopback_rx_pkt & 0x3); + rdsz >>= 2; + + pdata->ops->rx_readfifo(pdata, (unsigned int *)bufp, rdsz); + + if (pktlength != (MIN_PACKET_SIZE + 4)) { + SMSC_WARN(pdata, hw, "Unexpected packet size " + "during loop back test, size=%d, will retry", + pktlength); + } else { + unsigned int j; + int mismatch = 0; + for (j = 0; j < MIN_PACKET_SIZE; j++) { + if (pdata->loopback_tx_pkt[j] + != pdata->loopback_rx_pkt[j]) { + mismatch = 1; + break; + } + } + if (!mismatch) { + SMSC_TRACE(pdata, hw, "Successfully verified " + "loopback packet"); + return 0; + } else { + SMSC_WARN(pdata, hw, "Data mismatch " + "during loop back test, will retry"); + } + } + } + + return -EIO; +} + +static int smsc911x_phy_reset(struct smsc911x_data *pdata) +{ + struct phy_device *phy_dev = pdata->phy_dev; + unsigned int temp; + unsigned int i = 100000; + + BUG_ON(!phy_dev); + BUG_ON(!phy_dev->bus); + + SMSC_TRACE(pdata, hw, "Performing PHY BCR Reset"); + smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET); + do { + msleep(1); + temp = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, + MII_BMCR); + } while ((i--) && (temp & BMCR_RESET)); + + if (temp & BMCR_RESET) { + SMSC_WARN(pdata, hw, "PHY reset failed to complete"); + return -EIO; + } + /* Extra delay required because the phy may not be completed with + * its reset when BMCR_RESET is cleared. Specs say 256 uS is + * enough delay but using 1ms here to be safe */ + msleep(1); + + return 0; +} + +static int smsc911x_phy_loopbacktest(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + struct phy_device *phy_dev = pdata->phy_dev; + int result = -EIO; + unsigned int i, val; + unsigned long flags; + + /* Initialise tx packet using broadcast destination address */ + eth_broadcast_addr(pdata->loopback_tx_pkt); + + /* Use incrementing source address */ + for (i = 6; i < 12; i++) + pdata->loopback_tx_pkt[i] = (char)i; + + /* Set length type field */ + pdata->loopback_tx_pkt[12] = 0x00; + pdata->loopback_tx_pkt[13] = 0x00; + + for (i = 14; i < MIN_PACKET_SIZE; i++) + pdata->loopback_tx_pkt[i] = (char)i; + + val = smsc911x_reg_read(pdata, HW_CFG); + val &= HW_CFG_TX_FIF_SZ_; + val |= HW_CFG_SF_; + smsc911x_reg_write(pdata, HW_CFG, val); + + smsc911x_reg_write(pdata, TX_CFG, TX_CFG_TX_ON_); + smsc911x_reg_write(pdata, RX_CFG, + (u32)((ulong)pdata->loopback_rx_pkt & 0x03) << 8); + + for (i = 0; i < 10; i++) { + /* Set PHY to 10/FD, no ANEG, and loopback mode */ + smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, + BMCR_LOOPBACK | BMCR_FULLDPLX); + + /* Enable MAC tx/rx, FD */ + spin_lock_irqsave(&pdata->mac_lock, flags); + smsc911x_mac_write(pdata, MAC_CR, MAC_CR_FDPX_ + | MAC_CR_TXEN_ | MAC_CR_RXEN_); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + + if (smsc911x_phy_check_loopbackpkt(pdata) == 0) { + result = 0; + break; + } + pdata->resetcount++; + + /* Disable MAC rx */ + spin_lock_irqsave(&pdata->mac_lock, flags); + smsc911x_mac_write(pdata, MAC_CR, 0); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + + smsc911x_phy_reset(pdata); + } + + /* Disable MAC */ + spin_lock_irqsave(&pdata->mac_lock, flags); + smsc911x_mac_write(pdata, MAC_CR, 0); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + + /* Cancel PHY loopback mode */ + smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, 0); + + smsc911x_reg_write(pdata, TX_CFG, 0); + smsc911x_reg_write(pdata, RX_CFG, 0); + + return result; +} +#endif /* USE_PHY_WORK_AROUND */ + +static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata) +{ + struct phy_device *phy_dev = pdata->phy_dev; + u32 afc = smsc911x_reg_read(pdata, AFC_CFG); + u32 flow; + unsigned long flags; + + if (phy_dev->duplex == DUPLEX_FULL) { + u16 lcladv = phy_read(phy_dev, MII_ADVERTISE); + u16 rmtadv = phy_read(phy_dev, MII_LPA); + u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); + + if (cap & FLOW_CTRL_RX) + flow = 0xFFFF0002; + else + flow = 0; + + if (cap & FLOW_CTRL_TX) + afc |= 0xF; + else + afc &= ~0xF; + + SMSC_TRACE(pdata, hw, "rx pause %s, tx pause %s", + (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), + (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); + } else { + SMSC_TRACE(pdata, hw, "half duplex"); + flow = 0; + afc |= 0xF; + } + + spin_lock_irqsave(&pdata->mac_lock, flags); + smsc911x_mac_write(pdata, FLOW, flow); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + + smsc911x_reg_write(pdata, AFC_CFG, afc); +} + +/* Update link mode if anything has changed. Called periodically when the + * PHY is in polling mode, even if nothing has changed. */ +static void smsc911x_phy_adjust_link(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + struct phy_device *phy_dev = pdata->phy_dev; + unsigned long flags; + int carrier; + + if (phy_dev->duplex != pdata->last_duplex) { + unsigned int mac_cr; + SMSC_TRACE(pdata, hw, "duplex state has changed"); + + spin_lock_irqsave(&pdata->mac_lock, flags); + mac_cr = smsc911x_mac_read(pdata, MAC_CR); + if (phy_dev->duplex) { + SMSC_TRACE(pdata, hw, + "configuring for full duplex mode"); + mac_cr |= MAC_CR_FDPX_; + } else { + SMSC_TRACE(pdata, hw, + "configuring for half duplex mode"); + mac_cr &= ~MAC_CR_FDPX_; + } + smsc911x_mac_write(pdata, MAC_CR, mac_cr); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + + smsc911x_phy_update_flowcontrol(pdata); + pdata->last_duplex = phy_dev->duplex; + } + + carrier = netif_carrier_ok(dev); + if (carrier != pdata->last_carrier) { + SMSC_TRACE(pdata, hw, "carrier state has changed"); + if (carrier) { + SMSC_TRACE(pdata, hw, "configuring for carrier OK"); + if ((pdata->gpio_orig_setting & GPIO_CFG_LED1_EN_) && + (!pdata->using_extphy)) { + /* Restore original GPIO configuration */ + pdata->gpio_setting = pdata->gpio_orig_setting; + smsc911x_reg_write(pdata, GPIO_CFG, + pdata->gpio_setting); + } + } else { + SMSC_TRACE(pdata, hw, "configuring for no carrier"); + /* Check global setting that LED1 + * usage is 10/100 indicator */ + pdata->gpio_setting = smsc911x_reg_read(pdata, + GPIO_CFG); + if ((pdata->gpio_setting & GPIO_CFG_LED1_EN_) && + (!pdata->using_extphy)) { + /* Force 10/100 LED off, after saving + * original GPIO configuration */ + pdata->gpio_orig_setting = pdata->gpio_setting; + + pdata->gpio_setting &= ~GPIO_CFG_LED1_EN_; + pdata->gpio_setting |= (GPIO_CFG_GPIOBUF0_ + | GPIO_CFG_GPIODIR0_ + | GPIO_CFG_GPIOD0_); + smsc911x_reg_write(pdata, GPIO_CFG, + pdata->gpio_setting); + } + } + pdata->last_carrier = carrier; + } +} + +static int smsc911x_mii_probe(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + struct phy_device *phydev = NULL; + int ret; + + /* find the first phy */ + phydev = phy_find_first(pdata->mii_bus); + if (!phydev) { + netdev_err(dev, "no PHY found\n"); + return -ENODEV; + } + + SMSC_TRACE(pdata, probe, "PHY: addr %d, phy_id 0x%08X", + phydev->addr, phydev->phy_id); + + ret = phy_connect_direct(dev, phydev, &smsc911x_phy_adjust_link, + pdata->config.phy_interface); + + if (ret) { + netdev_err(dev, "Could not attach to PHY\n"); + return ret; + } + + netdev_info(dev, + "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + + /* mask with MAC supported features */ + phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + phydev->advertising = phydev->supported; + + pdata->phy_dev = phydev; + pdata->last_duplex = -1; + pdata->last_carrier = -1; + +#ifdef USE_PHY_WORK_AROUND + if (smsc911x_phy_loopbacktest(dev) < 0) { + SMSC_WARN(pdata, hw, "Failed Loop Back Test"); + return -ENODEV; + } + SMSC_TRACE(pdata, hw, "Passed Loop Back Test"); +#endif /* USE_PHY_WORK_AROUND */ + + SMSC_TRACE(pdata, hw, "phy initialised successfully"); + return 0; +} + +static int smsc911x_mii_init(struct platform_device *pdev, + struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + int err = -ENXIO, i; + + pdata->mii_bus = mdiobus_alloc(); + if (!pdata->mii_bus) { + err = -ENOMEM; + goto err_out_1; + } + + pdata->mii_bus->name = SMSC_MDIONAME; + snprintf(pdata->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + pdata->mii_bus->priv = pdata; + pdata->mii_bus->read = smsc911x_mii_read; + pdata->mii_bus->write = smsc911x_mii_write; + pdata->mii_bus->irq = pdata->phy_irq; + for (i = 0; i < PHY_MAX_ADDR; ++i) + pdata->mii_bus->irq[i] = PHY_POLL; + + pdata->mii_bus->parent = &pdev->dev; + + switch (pdata->idrev & 0xFFFF0000) { + case 0x01170000: + case 0x01150000: + case 0x117A0000: + case 0x115A0000: + /* External PHY supported, try to autodetect */ + smsc911x_phy_initialise_external(pdata); + break; + default: + SMSC_TRACE(pdata, hw, "External PHY is not supported, " + "using internal PHY"); + pdata->using_extphy = 0; + break; + } + + if (!pdata->using_extphy) { + /* Mask all PHYs except ID 1 (internal) */ + pdata->mii_bus->phy_mask = ~(1 << 1); + } + + if (mdiobus_register(pdata->mii_bus)) { + SMSC_WARN(pdata, probe, "Error registering mii bus"); + goto err_out_free_bus_2; + } + + if (smsc911x_mii_probe(dev) < 0) { + SMSC_WARN(pdata, probe, "Error registering mii bus"); + goto err_out_unregister_bus_3; + } + + return 0; + +err_out_unregister_bus_3: + mdiobus_unregister(pdata->mii_bus); +err_out_free_bus_2: + mdiobus_free(pdata->mii_bus); +err_out_1: + return err; +} + +/* Gets the number of tx statuses in the fifo */ +static unsigned int smsc911x_tx_get_txstatcount(struct smsc911x_data *pdata) +{ + return (smsc911x_reg_read(pdata, TX_FIFO_INF) + & TX_FIFO_INF_TSUSED_) >> 16; +} + +/* Reads tx statuses and increments counters where necessary */ +static void smsc911x_tx_update_txcounters(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned int tx_stat; + + while ((tx_stat = smsc911x_tx_get_txstatus(pdata)) != 0) { + if (unlikely(tx_stat & 0x80000000)) { + /* In this driver the packet tag is used as the packet + * length. Since a packet length can never reach the + * size of 0x8000, this bit is reserved. It is worth + * noting that the "reserved bit" in the warning above + * does not reference a hardware defined reserved bit + * but rather a driver defined one. + */ + SMSC_WARN(pdata, hw, "Packet tag reserved bit is high"); + } else { + if (unlikely(tx_stat & TX_STS_ES_)) { + dev->stats.tx_errors++; + } else { + dev->stats.tx_packets++; + dev->stats.tx_bytes += (tx_stat >> 16); + } + if (unlikely(tx_stat & TX_STS_EXCESS_COL_)) { + dev->stats.collisions += 16; + dev->stats.tx_aborted_errors += 1; + } else { + dev->stats.collisions += + ((tx_stat >> 3) & 0xF); + } + if (unlikely(tx_stat & TX_STS_LOST_CARRIER_)) + dev->stats.tx_carrier_errors += 1; + if (unlikely(tx_stat & TX_STS_LATE_COL_)) { + dev->stats.collisions++; + dev->stats.tx_aborted_errors++; + } + } + } +} + +/* Increments the Rx error counters */ +static void +smsc911x_rx_counterrors(struct net_device *dev, unsigned int rxstat) +{ + int crc_err = 0; + + if (unlikely(rxstat & RX_STS_ES_)) { + dev->stats.rx_errors++; + if (unlikely(rxstat & RX_STS_CRC_ERR_)) { + dev->stats.rx_crc_errors++; + crc_err = 1; + } + } + if (likely(!crc_err)) { + if (unlikely((rxstat & RX_STS_FRAME_TYPE_) && + (rxstat & RX_STS_LENGTH_ERR_))) + dev->stats.rx_length_errors++; + if (rxstat & RX_STS_MCAST_) + dev->stats.multicast++; + } +} + +/* Quickly dumps bad packets */ +static void +smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktwords) +{ + if (likely(pktwords >= 4)) { + unsigned int timeout = 500; + unsigned int val; + smsc911x_reg_write(pdata, RX_DP_CTRL, RX_DP_CTRL_RX_FFWD_); + do { + udelay(1); + val = smsc911x_reg_read(pdata, RX_DP_CTRL); + } while ((val & RX_DP_CTRL_RX_FFWD_) && --timeout); + + if (unlikely(timeout == 0)) + SMSC_WARN(pdata, hw, "Timed out waiting for " + "RX FFWD to finish, RX_DP_CTRL: 0x%08X", val); + } else { + unsigned int temp; + while (pktwords--) + temp = smsc911x_reg_read(pdata, RX_DATA_FIFO); + } +} + +/* NAPI poll function */ +static int smsc911x_poll(struct napi_struct *napi, int budget) +{ + struct smsc911x_data *pdata = + container_of(napi, struct smsc911x_data, napi); + struct net_device *dev = pdata->dev; + int npackets = 0; + + while (npackets < budget) { + unsigned int pktlength; + unsigned int pktwords; + struct sk_buff *skb; + unsigned int rxstat = smsc911x_rx_get_rxstatus(pdata); + + if (!rxstat) { + unsigned int temp; + /* We processed all packets available. Tell NAPI it can + * stop polling then re-enable rx interrupts */ + smsc911x_reg_write(pdata, INT_STS, INT_STS_RSFL_); + napi_complete(napi); + temp = smsc911x_reg_read(pdata, INT_EN); + temp |= INT_EN_RSFL_EN_; + smsc911x_reg_write(pdata, INT_EN, temp); + break; + } + + /* Count packet for NAPI scheduling, even if it has an error. + * Error packets still require cycles to discard */ + npackets++; + + pktlength = ((rxstat & 0x3FFF0000) >> 16); + pktwords = (pktlength + NET_IP_ALIGN + 3) >> 2; + smsc911x_rx_counterrors(dev, rxstat); + + if (unlikely(rxstat & RX_STS_ES_)) { + SMSC_WARN(pdata, rx_err, + "Discarding packet with error bit set"); + /* Packet has an error, discard it and continue with + * the next */ + smsc911x_rx_fastforward(pdata, pktwords); + dev->stats.rx_dropped++; + continue; + } + + skb = netdev_alloc_skb(dev, pktwords << 2); + if (unlikely(!skb)) { + SMSC_WARN(pdata, rx_err, + "Unable to allocate skb for rx packet"); + /* Drop the packet and stop this polling iteration */ + smsc911x_rx_fastforward(pdata, pktwords); + dev->stats.rx_dropped++; + break; + } + + pdata->ops->rx_readfifo(pdata, + (unsigned int *)skb->data, pktwords); + + /* Align IP on 16B boundary */ + skb_reserve(skb, NET_IP_ALIGN); + skb_put(skb, pktlength - 4); + skb->protocol = eth_type_trans(skb, dev); + skb_checksum_none_assert(skb); + netif_receive_skb(skb); + + /* Update counters */ + dev->stats.rx_packets++; + dev->stats.rx_bytes += (pktlength - 4); + } + + /* Return total received packets */ + return npackets; +} + +/* Returns hash bit number for given MAC address + * Example: + * 01 00 5E 00 00 01 -> returns bit number 31 */ +static unsigned int smsc911x_hash(char addr[ETH_ALEN]) +{ + return (ether_crc(ETH_ALEN, addr) >> 26) & 0x3f; +} + +static void smsc911x_rx_multicast_update(struct smsc911x_data *pdata) +{ + /* Performs the multicast & mac_cr update. This is called when + * safe on the current hardware, and with the mac_lock held */ + unsigned int mac_cr; + + SMSC_ASSERT_MAC_LOCK(pdata); + + mac_cr = smsc911x_mac_read(pdata, MAC_CR); + mac_cr |= pdata->set_bits_mask; + mac_cr &= ~(pdata->clear_bits_mask); + smsc911x_mac_write(pdata, MAC_CR, mac_cr); + smsc911x_mac_write(pdata, HASHH, pdata->hashhi); + smsc911x_mac_write(pdata, HASHL, pdata->hashlo); + SMSC_TRACE(pdata, hw, "maccr 0x%08X, HASHH 0x%08X, HASHL 0x%08X", + mac_cr, pdata->hashhi, pdata->hashlo); +} + +static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata) +{ + unsigned int mac_cr; + + /* This function is only called for older LAN911x devices + * (revA or revB), where MAC_CR, HASHH and HASHL should not + * be modified during Rx - newer devices immediately update the + * registers. + * + * This is called from interrupt context */ + + spin_lock(&pdata->mac_lock); + + /* Check Rx has stopped */ + if (smsc911x_mac_read(pdata, MAC_CR) & MAC_CR_RXEN_) + SMSC_WARN(pdata, drv, "Rx not stopped"); + + /* Perform the update - safe to do now Rx has stopped */ + smsc911x_rx_multicast_update(pdata); + + /* Re-enable Rx */ + mac_cr = smsc911x_mac_read(pdata, MAC_CR); + mac_cr |= MAC_CR_RXEN_; + smsc911x_mac_write(pdata, MAC_CR, mac_cr); + + pdata->multicast_update_pending = 0; + + spin_unlock(&pdata->mac_lock); +} + +static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata) +{ + int rc = 0; + + if (!pdata->phy_dev) + return rc; + + /* If the internal PHY is in General Power-Down mode, all, except the + * management interface, is powered-down and stays in that condition as + * long as Phy register bit 0.11 is HIGH. + * + * In that case, clear the bit 0.11, so the PHY powers up and we can + * access to the phy registers. + */ + rc = phy_read(pdata->phy_dev, MII_BMCR); + if (rc < 0) { + SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); + return rc; + } + + /* If the PHY general power-down bit is not set is not necessary to + * disable the general power down-mode. + */ + if (rc & BMCR_PDOWN) { + rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN); + if (rc < 0) { + SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); + return rc; + } + + usleep_range(1000, 1500); + } + + return 0; +} + +static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) +{ + int rc = 0; + + if (!pdata->phy_dev) + return rc; + + rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS); + + if (rc < 0) { + SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); + return rc; + } + + /* Only disable if energy detect mode is already enabled */ + if (rc & MII_LAN83C185_EDPWRDOWN) { + /* Disable energy detect mode for this SMSC Transceivers */ + rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, + rc & (~MII_LAN83C185_EDPWRDOWN)); + + if (rc < 0) { + SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); + return rc; + } + /* Allow PHY to wakeup */ + mdelay(2); + } + + return 0; +} + +static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata) +{ + int rc = 0; + + if (!pdata->phy_dev) + return rc; + + rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS); + + if (rc < 0) { + SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); + return rc; + } + + /* Only enable if energy detect mode is already disabled */ + if (!(rc & MII_LAN83C185_EDPWRDOWN)) { + /* Enable energy detect mode for this SMSC Transceivers */ + rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, + rc | MII_LAN83C185_EDPWRDOWN); + + if (rc < 0) { + SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); + return rc; + } + } + return 0; +} + +static int smsc911x_soft_reset(struct smsc911x_data *pdata) +{ + unsigned int timeout; + unsigned int temp; + int ret; + + /* + * Make sure to power-up the PHY chip before doing a reset, otherwise + * the reset fails. + */ + ret = smsc911x_phy_general_power_up(pdata); + if (ret) { + SMSC_WARN(pdata, drv, "Failed to power-up the PHY chip"); + return ret; + } + + /* + * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that + * are initialized in a Energy Detect Power-Down mode that prevents + * the MAC chip to be software reseted. So we have to wakeup the PHY + * before. + */ + if (pdata->generation == 4) { + ret = smsc911x_phy_disable_energy_detect(pdata); + + if (ret) { + SMSC_WARN(pdata, drv, "Failed to wakeup the PHY chip"); + return ret; + } + } + + /* Reset the LAN911x */ + smsc911x_reg_write(pdata, HW_CFG, HW_CFG_SRST_); + timeout = 10; + do { + udelay(10); + temp = smsc911x_reg_read(pdata, HW_CFG); + } while ((--timeout) && (temp & HW_CFG_SRST_)); + + if (unlikely(temp & HW_CFG_SRST_)) { + SMSC_WARN(pdata, drv, "Failed to complete reset"); + return -EIO; + } + + if (pdata->generation == 4) { + ret = smsc911x_phy_enable_energy_detect(pdata); + + if (ret) { + SMSC_WARN(pdata, drv, "Failed to wakeup the PHY chip"); + return ret; + } + } + + return 0; +} + +/* Sets the device MAC address to dev_addr, called with mac_lock held */ +static void +smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, u8 dev_addr[6]) +{ + u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4]; + u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | + (dev_addr[1] << 8) | dev_addr[0]; + + SMSC_ASSERT_MAC_LOCK(pdata); + + smsc911x_mac_write(pdata, ADDRH, mac_high16); + smsc911x_mac_write(pdata, ADDRL, mac_low32); +} + +static void smsc911x_disable_irq_chip(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + + smsc911x_reg_write(pdata, INT_EN, 0); + smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); +} + +static int smsc911x_open(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned int timeout; + unsigned int temp; + unsigned int intcfg; + + /* if the phy is not yet registered, retry later*/ + if (!pdata->phy_dev) { + SMSC_WARN(pdata, hw, "phy_dev is NULL"); + return -EAGAIN; + } + + /* Reset the LAN911x */ + if (smsc911x_soft_reset(pdata)) { + SMSC_WARN(pdata, hw, "soft reset failed"); + return -EIO; + } + + smsc911x_reg_write(pdata, HW_CFG, 0x00050000); + smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740); + + /* Increase the legal frame size of VLAN tagged frames to 1522 bytes */ + spin_lock_irq(&pdata->mac_lock); + smsc911x_mac_write(pdata, VLAN1, ETH_P_8021Q); + spin_unlock_irq(&pdata->mac_lock); + + /* Make sure EEPROM has finished loading before setting GPIO_CFG */ + timeout = 50; + while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) && + --timeout) { + udelay(10); + } + + if (unlikely(timeout == 0)) + SMSC_WARN(pdata, ifup, + "Timed out waiting for EEPROM busy bit to clear"); + + smsc911x_reg_write(pdata, GPIO_CFG, 0x70070000); + + /* The soft reset above cleared the device's MAC address, + * restore it from local copy (set in probe) */ + spin_lock_irq(&pdata->mac_lock); + smsc911x_set_hw_mac_address(pdata, dev->dev_addr); + spin_unlock_irq(&pdata->mac_lock); + + /* Initialise irqs, but leave all sources disabled */ + smsc911x_disable_irq_chip(dev); + + /* Set interrupt deassertion to 100uS */ + intcfg = ((10 << 24) | INT_CFG_IRQ_EN_); + + if (pdata->config.irq_polarity) { + SMSC_TRACE(pdata, ifup, "irq polarity: active high"); + intcfg |= INT_CFG_IRQ_POL_; + } else { + SMSC_TRACE(pdata, ifup, "irq polarity: active low"); + } + + if (pdata->config.irq_type) { + SMSC_TRACE(pdata, ifup, "irq type: push-pull"); + intcfg |= INT_CFG_IRQ_TYPE_; + } else { + SMSC_TRACE(pdata, ifup, "irq type: open drain"); + } + + smsc911x_reg_write(pdata, INT_CFG, intcfg); + + SMSC_TRACE(pdata, ifup, "Testing irq handler using IRQ %d", dev->irq); + pdata->software_irq_signal = 0; + smp_wmb(); + + temp = smsc911x_reg_read(pdata, INT_EN); + temp |= INT_EN_SW_INT_EN_; + smsc911x_reg_write(pdata, INT_EN, temp); + + timeout = 1000; + while (timeout--) { + if (pdata->software_irq_signal) + break; + msleep(1); + } + + if (!pdata->software_irq_signal) { + netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n", + dev->irq); + return -ENODEV; + } + SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d", + dev->irq); + + netdev_info(dev, "SMSC911x/921x identified at %#08lx, IRQ: %d\n", + (unsigned long)pdata->ioaddr, dev->irq); + + /* Reset the last known duplex and carrier */ + pdata->last_duplex = -1; + pdata->last_carrier = -1; + + /* Bring the PHY up */ + phy_start(pdata->phy_dev); + + temp = smsc911x_reg_read(pdata, HW_CFG); + /* Preserve TX FIFO size and external PHY configuration */ + temp &= (HW_CFG_TX_FIF_SZ_|0x00000FFF); + temp |= HW_CFG_SF_; + smsc911x_reg_write(pdata, HW_CFG, temp); + + temp = smsc911x_reg_read(pdata, FIFO_INT); + temp |= FIFO_INT_TX_AVAIL_LEVEL_; + temp &= ~(FIFO_INT_RX_STS_LEVEL_); + smsc911x_reg_write(pdata, FIFO_INT, temp); + + /* set RX Data offset to 2 bytes for alignment */ + smsc911x_reg_write(pdata, RX_CFG, (NET_IP_ALIGN << 8)); + + /* enable NAPI polling before enabling RX interrupts */ + napi_enable(&pdata->napi); + + temp = smsc911x_reg_read(pdata, INT_EN); + temp |= (INT_EN_TDFA_EN_ | INT_EN_RSFL_EN_ | INT_EN_RXSTOP_INT_EN_); + smsc911x_reg_write(pdata, INT_EN, temp); + + spin_lock_irq(&pdata->mac_lock); + temp = smsc911x_mac_read(pdata, MAC_CR); + temp |= (MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_); + smsc911x_mac_write(pdata, MAC_CR, temp); + spin_unlock_irq(&pdata->mac_lock); + + smsc911x_reg_write(pdata, TX_CFG, TX_CFG_TX_ON_); + + netif_start_queue(dev); + return 0; +} + +/* Entry point for stopping the interface */ +static int smsc911x_stop(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned int temp; + + /* Disable all device interrupts */ + temp = smsc911x_reg_read(pdata, INT_CFG); + temp &= ~INT_CFG_IRQ_EN_; + smsc911x_reg_write(pdata, INT_CFG, temp); + + /* Stop Tx and Rx polling */ + netif_stop_queue(dev); + napi_disable(&pdata->napi); + + /* At this point all Rx and Tx activity is stopped */ + dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP); + smsc911x_tx_update_txcounters(dev); + + /* Bring the PHY down */ + if (pdata->phy_dev) + phy_stop(pdata->phy_dev); + + SMSC_TRACE(pdata, ifdown, "Interface stopped"); + return 0; +} + +/* Entry point for transmitting a packet */ +static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned int freespace; + unsigned int tx_cmd_a; + unsigned int tx_cmd_b; + unsigned int temp; + u32 wrsz; + ulong bufp; + + freespace = smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TDFREE_; + + if (unlikely(freespace < TX_FIFO_LOW_THRESHOLD)) + SMSC_WARN(pdata, tx_err, + "Tx data fifo low, space available: %d", freespace); + + /* Word alignment adjustment */ + tx_cmd_a = (u32)((ulong)skb->data & 0x03) << 16; + tx_cmd_a |= TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_; + tx_cmd_a |= (unsigned int)skb->len; + + tx_cmd_b = ((unsigned int)skb->len) << 16; + tx_cmd_b |= (unsigned int)skb->len; + + smsc911x_reg_write(pdata, TX_DATA_FIFO, tx_cmd_a); + smsc911x_reg_write(pdata, TX_DATA_FIFO, tx_cmd_b); + + bufp = (ulong)skb->data & (~0x3); + wrsz = (u32)skb->len + 3; + wrsz += (u32)((ulong)skb->data & 0x3); + wrsz >>= 2; + + pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz); + freespace -= (skb->len + 32); + skb_tx_timestamp(skb); + dev_consume_skb_any(skb); + + if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30)) + smsc911x_tx_update_txcounters(dev); + + if (freespace < TX_FIFO_LOW_THRESHOLD) { + netif_stop_queue(dev); + temp = smsc911x_reg_read(pdata, FIFO_INT); + temp &= 0x00FFFFFF; + temp |= 0x32000000; + smsc911x_reg_write(pdata, FIFO_INT, temp); + } + + return NETDEV_TX_OK; +} + +/* Entry point for getting status counters */ +static struct net_device_stats *smsc911x_get_stats(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + smsc911x_tx_update_txcounters(dev); + dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP); + return &dev->stats; +} + +/* Entry point for setting addressing modes */ +static void smsc911x_set_multicast_list(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned long flags; + + if (dev->flags & IFF_PROMISC) { + /* Enabling promiscuous mode */ + pdata->set_bits_mask = MAC_CR_PRMS_; + pdata->clear_bits_mask = (MAC_CR_MCPAS_ | MAC_CR_HPFILT_); + pdata->hashhi = 0; + pdata->hashlo = 0; + } else if (dev->flags & IFF_ALLMULTI) { + /* Enabling all multicast mode */ + pdata->set_bits_mask = MAC_CR_MCPAS_; + pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_HPFILT_); + pdata->hashhi = 0; + pdata->hashlo = 0; + } else if (!netdev_mc_empty(dev)) { + /* Enabling specific multicast addresses */ + unsigned int hash_high = 0; + unsigned int hash_low = 0; + struct netdev_hw_addr *ha; + + pdata->set_bits_mask = MAC_CR_HPFILT_; + pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_); + + netdev_for_each_mc_addr(ha, dev) { + unsigned int bitnum = smsc911x_hash(ha->addr); + unsigned int mask = 0x01 << (bitnum & 0x1F); + + if (bitnum & 0x20) + hash_high |= mask; + else + hash_low |= mask; + } + + pdata->hashhi = hash_high; + pdata->hashlo = hash_low; + } else { + /* Enabling local MAC address only */ + pdata->set_bits_mask = 0; + pdata->clear_bits_mask = + (MAC_CR_PRMS_ | MAC_CR_MCPAS_ | MAC_CR_HPFILT_); + pdata->hashhi = 0; + pdata->hashlo = 0; + } + + spin_lock_irqsave(&pdata->mac_lock, flags); + + if (pdata->generation <= 1) { + /* Older hardware revision - cannot change these flags while + * receiving data */ + if (!pdata->multicast_update_pending) { + unsigned int temp; + SMSC_TRACE(pdata, hw, "scheduling mcast update"); + pdata->multicast_update_pending = 1; + + /* Request the hardware to stop, then perform the + * update when we get an RX_STOP interrupt */ + temp = smsc911x_mac_read(pdata, MAC_CR); + temp &= ~(MAC_CR_RXEN_); + smsc911x_mac_write(pdata, MAC_CR, temp); + } else { + /* There is another update pending, this should now + * use the newer values */ + } + } else { + /* Newer hardware revision - can write immediately */ + smsc911x_rx_multicast_update(pdata); + } + + spin_unlock_irqrestore(&pdata->mac_lock, flags); +} + +static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct smsc911x_data *pdata = netdev_priv(dev); + u32 intsts = smsc911x_reg_read(pdata, INT_STS); + u32 inten = smsc911x_reg_read(pdata, INT_EN); + int serviced = IRQ_NONE; + u32 temp; + + if (unlikely(intsts & inten & INT_STS_SW_INT_)) { + temp = smsc911x_reg_read(pdata, INT_EN); + temp &= (~INT_EN_SW_INT_EN_); + smsc911x_reg_write(pdata, INT_EN, temp); + smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_); + pdata->software_irq_signal = 1; + smp_wmb(); + serviced = IRQ_HANDLED; + } + + if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) { + /* Called when there is a multicast update scheduled and + * it is now safe to complete the update */ + SMSC_TRACE(pdata, intr, "RX Stop interrupt"); + smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_); + if (pdata->multicast_update_pending) + smsc911x_rx_multicast_update_workaround(pdata); + serviced = IRQ_HANDLED; + } + + if (intsts & inten & INT_STS_TDFA_) { + temp = smsc911x_reg_read(pdata, FIFO_INT); + temp |= FIFO_INT_TX_AVAIL_LEVEL_; + smsc911x_reg_write(pdata, FIFO_INT, temp); + smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_); + netif_wake_queue(dev); + serviced = IRQ_HANDLED; + } + + if (unlikely(intsts & inten & INT_STS_RXE_)) { + SMSC_TRACE(pdata, intr, "RX Error interrupt"); + smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_); + serviced = IRQ_HANDLED; + } + + if (likely(intsts & inten & INT_STS_RSFL_)) { + if (likely(napi_schedule_prep(&pdata->napi))) { + /* Disable Rx interrupts */ + temp = smsc911x_reg_read(pdata, INT_EN); + temp &= (~INT_EN_RSFL_EN_); + smsc911x_reg_write(pdata, INT_EN, temp); + /* Schedule a NAPI poll */ + __napi_schedule(&pdata->napi); + } else { + SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed"); + } + serviced = IRQ_HANDLED; + } + + return serviced; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void smsc911x_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + smsc911x_irqhandler(0, dev); + enable_irq(dev->irq); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static int smsc911x_set_mac_address(struct net_device *dev, void *p) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + struct sockaddr *addr = p; + + /* On older hardware revisions we cannot change the mac address + * registers while receiving data. Newer devices can safely change + * this at any time. */ + if (pdata->generation <= 1 && netif_running(dev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + + spin_lock_irq(&pdata->mac_lock); + smsc911x_set_hw_mac_address(pdata, dev->dev_addr); + spin_unlock_irq(&pdata->mac_lock); + + netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr); + + return 0; +} + +/* Standard ioctls for mii-tool */ +static int smsc911x_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + + if (!netif_running(dev) || !pdata->phy_dev) + return -EINVAL; + + return phy_mii_ioctl(pdata->phy_dev, ifr, cmd); +} + +static int +smsc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + + cmd->maxtxpkt = 1; + cmd->maxrxpkt = 1; + return phy_ethtool_gset(pdata->phy_dev, cmd); +} + +static int +smsc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + + return phy_ethtool_sset(pdata->phy_dev, cmd); +} + +static void smsc911x_ethtool_getdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, SMSC_CHIPNAME, sizeof(info->driver)); + strlcpy(info->version, SMSC_DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(dev->dev.parent), + sizeof(info->bus_info)); +} + +static int smsc911x_ethtool_nwayreset(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + + return phy_start_aneg(pdata->phy_dev); +} + +static u32 smsc911x_ethtool_getmsglevel(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + return pdata->msg_enable; +} + +static void smsc911x_ethtool_setmsglevel(struct net_device *dev, u32 level) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + pdata->msg_enable = level; +} + +static int smsc911x_ethtool_getregslen(struct net_device *dev) +{ + return (((E2P_DATA - ID_REV) / 4 + 1) + (WUCSR - MAC_CR) + 1 + 32) * + sizeof(u32); +} + +static void +smsc911x_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs, + void *buf) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + struct phy_device *phy_dev = pdata->phy_dev; + unsigned long flags; + unsigned int i; + unsigned int j = 0; + u32 *data = buf; + + regs->version = pdata->idrev; + for (i = ID_REV; i <= E2P_DATA; i += (sizeof(u32))) + data[j++] = smsc911x_reg_read(pdata, i); + + for (i = MAC_CR; i <= WUCSR; i++) { + spin_lock_irqsave(&pdata->mac_lock, flags); + data[j++] = smsc911x_mac_read(pdata, i); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + } + + for (i = 0; i <= 31; i++) + data[j++] = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, i); +} + +static void smsc911x_eeprom_enable_access(struct smsc911x_data *pdata) +{ + unsigned int temp = smsc911x_reg_read(pdata, GPIO_CFG); + temp &= ~GPIO_CFG_EEPR_EN_; + smsc911x_reg_write(pdata, GPIO_CFG, temp); + msleep(1); +} + +static int smsc911x_eeprom_send_cmd(struct smsc911x_data *pdata, u32 op) +{ + int timeout = 100; + u32 e2cmd; + + SMSC_TRACE(pdata, drv, "op 0x%08x", op); + if (smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) { + SMSC_WARN(pdata, drv, "Busy at start"); + return -EBUSY; + } + + e2cmd = op | E2P_CMD_EPC_BUSY_; + smsc911x_reg_write(pdata, E2P_CMD, e2cmd); + + do { + msleep(1); + e2cmd = smsc911x_reg_read(pdata, E2P_CMD); + } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout)); + + if (!timeout) { + SMSC_TRACE(pdata, drv, "TIMED OUT"); + return -EAGAIN; + } + + if (e2cmd & E2P_CMD_EPC_TIMEOUT_) { + SMSC_TRACE(pdata, drv, "Error occurred during eeprom operation"); + return -EINVAL; + } + + return 0; +} + +static int smsc911x_eeprom_read_location(struct smsc911x_data *pdata, + u8 address, u8 *data) +{ + u32 op = E2P_CMD_EPC_CMD_READ_ | address; + int ret; + + SMSC_TRACE(pdata, drv, "address 0x%x", address); + ret = smsc911x_eeprom_send_cmd(pdata, op); + + if (!ret) + data[address] = smsc911x_reg_read(pdata, E2P_DATA); + + return ret; +} + +static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata, + u8 address, u8 data) +{ + u32 op = E2P_CMD_EPC_CMD_ERASE_ | address; + u32 temp; + int ret; + + SMSC_TRACE(pdata, drv, "address 0x%x, data 0x%x", address, data); + ret = smsc911x_eeprom_send_cmd(pdata, op); + + if (!ret) { + op = E2P_CMD_EPC_CMD_WRITE_ | address; + smsc911x_reg_write(pdata, E2P_DATA, (u32)data); + + /* Workaround for hardware read-after-write restriction */ + temp = smsc911x_reg_read(pdata, BYTE_TEST); + + ret = smsc911x_eeprom_send_cmd(pdata, op); + } + + return ret; +} + +static int smsc911x_ethtool_get_eeprom_len(struct net_device *dev) +{ + return SMSC911X_EEPROM_SIZE; +} + +static int smsc911x_ethtool_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + u8 eeprom_data[SMSC911X_EEPROM_SIZE]; + int len; + int i; + + smsc911x_eeprom_enable_access(pdata); + + len = min(eeprom->len, SMSC911X_EEPROM_SIZE); + for (i = 0; i < len; i++) { + int ret = smsc911x_eeprom_read_location(pdata, i, eeprom_data); + if (ret < 0) { + eeprom->len = 0; + return ret; + } + } + + memcpy(data, &eeprom_data[eeprom->offset], len); + eeprom->len = len; + return 0; +} + +static int smsc911x_ethtool_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + int ret; + struct smsc911x_data *pdata = netdev_priv(dev); + + smsc911x_eeprom_enable_access(pdata); + smsc911x_eeprom_send_cmd(pdata, E2P_CMD_EPC_CMD_EWEN_); + ret = smsc911x_eeprom_write_location(pdata, eeprom->offset, *data); + smsc911x_eeprom_send_cmd(pdata, E2P_CMD_EPC_CMD_EWDS_); + + /* Single byte write, according to man page */ + eeprom->len = 1; + + return ret; +} + +static const struct ethtool_ops smsc911x_ethtool_ops = { + .get_settings = smsc911x_ethtool_getsettings, + .set_settings = smsc911x_ethtool_setsettings, + .get_link = ethtool_op_get_link, + .get_drvinfo = smsc911x_ethtool_getdrvinfo, + .nway_reset = smsc911x_ethtool_nwayreset, + .get_msglevel = smsc911x_ethtool_getmsglevel, + .set_msglevel = smsc911x_ethtool_setmsglevel, + .get_regs_len = smsc911x_ethtool_getregslen, + .get_regs = smsc911x_ethtool_getregs, + .get_eeprom_len = smsc911x_ethtool_get_eeprom_len, + .get_eeprom = smsc911x_ethtool_get_eeprom, + .set_eeprom = smsc911x_ethtool_set_eeprom, + .get_ts_info = ethtool_op_get_ts_info, +}; + +static const struct net_device_ops smsc911x_netdev_ops = { + .ndo_open = smsc911x_open, + .ndo_stop = smsc911x_stop, + .ndo_start_xmit = smsc911x_hard_start_xmit, + .ndo_get_stats = smsc911x_get_stats, + .ndo_set_rx_mode = smsc911x_set_multicast_list, + .ndo_do_ioctl = smsc911x_do_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = smsc911x_set_mac_address, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = smsc911x_poll_controller, +#endif +}; + +/* copies the current mac address from hardware to dev->dev_addr */ +static void smsc911x_read_mac_address(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH); + u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL); + + dev->dev_addr[0] = (u8)(mac_low32); + dev->dev_addr[1] = (u8)(mac_low32 >> 8); + dev->dev_addr[2] = (u8)(mac_low32 >> 16); + dev->dev_addr[3] = (u8)(mac_low32 >> 24); + dev->dev_addr[4] = (u8)(mac_high16); + dev->dev_addr[5] = (u8)(mac_high16 >> 8); +} + +/* Initializing private device structures, only called from probe */ +static int smsc911x_init(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned int byte_test, mask; + unsigned int to = 100; + + SMSC_TRACE(pdata, probe, "Driver Parameters:"); + SMSC_TRACE(pdata, probe, "LAN base: 0x%08lX", + (unsigned long)pdata->ioaddr); + SMSC_TRACE(pdata, probe, "IRQ: %d", dev->irq); + SMSC_TRACE(pdata, probe, "PHY will be autodetected."); + + spin_lock_init(&pdata->dev_lock); + spin_lock_init(&pdata->mac_lock); + + if (pdata->ioaddr == NULL) { + SMSC_WARN(pdata, probe, "pdata->ioaddr: 0x00000000"); + return -ENODEV; + } + + /* + * poll the READY bit in PMT_CTRL. Any other access to the device is + * forbidden while this bit isn't set. Try for 100ms + * + * Note that this test is done before the WORD_SWAP register is + * programmed. So in some configurations the READY bit is at 16 before + * WORD_SWAP is written to. This issue is worked around by waiting + * until either bit 0 or bit 16 gets set in PMT_CTRL. + * + * SMSC has confirmed that checking bit 16 (marked as reserved in + * the datasheet) is fine since these bits "will either never be set + * or can only go high after READY does (so also indicate the device + * is ready)". + */ + + mask = PMT_CTRL_READY_ | swahw32(PMT_CTRL_READY_); + while (!(smsc911x_reg_read(pdata, PMT_CTRL) & mask) && --to) + udelay(1000); + + if (to == 0) { + netdev_err(dev, "Device not READY in 100ms aborting\n"); + return -ENODEV; + } + + /* Check byte ordering */ + byte_test = smsc911x_reg_read(pdata, BYTE_TEST); + SMSC_TRACE(pdata, probe, "BYTE_TEST: 0x%08X", byte_test); + if (byte_test == 0x43218765) { + SMSC_TRACE(pdata, probe, "BYTE_TEST looks swapped, " + "applying WORD_SWAP"); + smsc911x_reg_write(pdata, WORD_SWAP, 0xffffffff); + + /* 1 dummy read of BYTE_TEST is needed after a write to + * WORD_SWAP before its contents are valid */ + byte_test = smsc911x_reg_read(pdata, BYTE_TEST); + + byte_test = smsc911x_reg_read(pdata, BYTE_TEST); + } + + if (byte_test != 0x87654321) { + SMSC_WARN(pdata, drv, "BYTE_TEST: 0x%08X", byte_test); + if (((byte_test >> 16) & 0xFFFF) == (byte_test & 0xFFFF)) { + SMSC_WARN(pdata, probe, + "top 16 bits equal to bottom 16 bits"); + SMSC_TRACE(pdata, probe, + "This may mean the chip is set " + "for 32 bit while the bus is reading 16 bit"); + } + return -ENODEV; + } + + /* Default generation to zero (all workarounds apply) */ + pdata->generation = 0; + + pdata->idrev = smsc911x_reg_read(pdata, ID_REV); + switch (pdata->idrev & 0xFFFF0000) { + case 0x01180000: + case 0x01170000: + case 0x01160000: + case 0x01150000: + case 0x218A0000: + /* LAN911[5678] family */ + pdata->generation = pdata->idrev & 0x0000FFFF; + break; + + case 0x118A0000: + case 0x117A0000: + case 0x116A0000: + case 0x115A0000: + /* LAN921[5678] family */ + pdata->generation = 3; + break; + + case 0x92100000: + case 0x92110000: + case 0x92200000: + case 0x92210000: + /* LAN9210/LAN9211/LAN9220/LAN9221 */ + pdata->generation = 4; + break; + + default: + SMSC_WARN(pdata, probe, "LAN911x not identified, idrev: 0x%08X", + pdata->idrev); + return -ENODEV; + } + + SMSC_TRACE(pdata, probe, + "LAN911x identified, idrev: 0x%08X, generation: %d", + pdata->idrev, pdata->generation); + + if (pdata->generation == 0) + SMSC_WARN(pdata, probe, + "This driver is not intended for this chip revision"); + + /* workaround for platforms without an eeprom, where the mac address + * is stored elsewhere and set by the bootloader. This saves the + * mac address before resetting the device */ + if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS) { + spin_lock_irq(&pdata->mac_lock); + smsc911x_read_mac_address(dev); + spin_unlock_irq(&pdata->mac_lock); + } + + /* Reset the LAN911x */ + if (smsc911x_soft_reset(pdata)) + return -ENODEV; + + dev->flags |= IFF_MULTICAST; + netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT); + dev->netdev_ops = &smsc911x_netdev_ops; + dev->ethtool_ops = &smsc911x_ethtool_ops; + + return 0; +} + +static int smsc911x_drv_remove(struct platform_device *pdev) +{ + struct net_device *dev; + struct smsc911x_data *pdata; + struct resource *res; + + dev = platform_get_drvdata(pdev); + BUG_ON(!dev); + pdata = netdev_priv(dev); + BUG_ON(!pdata); + BUG_ON(!pdata->ioaddr); + BUG_ON(!pdata->phy_dev); + + SMSC_TRACE(pdata, ifdown, "Stopping driver"); + + phy_disconnect(pdata->phy_dev); + pdata->phy_dev = NULL; + mdiobus_unregister(pdata->mii_bus); + mdiobus_free(pdata->mii_bus); + + unregister_netdev(dev); + free_irq(dev->irq, dev); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "smsc911x-memory"); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + release_mem_region(res->start, resource_size(res)); + + iounmap(pdata->ioaddr); + + (void)smsc911x_disable_resources(pdev); + smsc911x_free_resources(pdev); + + free_netdev(dev); + + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +/* standard register acces */ +static const struct smsc911x_ops standard_smsc911x_ops = { + .reg_read = __smsc911x_reg_read, + .reg_write = __smsc911x_reg_write, + .rx_readfifo = smsc911x_rx_readfifo, + .tx_writefifo = smsc911x_tx_writefifo, +}; + +/* shifted register access */ +static const struct smsc911x_ops shifted_smsc911x_ops = { + .reg_read = __smsc911x_reg_read_shift, + .reg_write = __smsc911x_reg_write_shift, + .rx_readfifo = smsc911x_rx_readfifo_shift, + .tx_writefifo = smsc911x_tx_writefifo_shift, +}; + +#ifdef CONFIG_OF +static int smsc911x_probe_config_dt(struct smsc911x_platform_config *config, + struct device_node *np) +{ + const char *mac; + u32 width = 0; + + if (!np) + return -ENODEV; + + config->phy_interface = of_get_phy_mode(np); + + mac = of_get_mac_address(np); + if (mac) + memcpy(config->mac, mac, ETH_ALEN); + + of_property_read_u32(np, "reg-shift", &config->shift); + + of_property_read_u32(np, "reg-io-width", &width); + if (width == 4) + config->flags |= SMSC911X_USE_32BIT; + else + config->flags |= SMSC911X_USE_16BIT; + + if (of_get_property(np, "smsc,irq-active-high", NULL)) + config->irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH; + + if (of_get_property(np, "smsc,irq-push-pull", NULL)) + config->irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL; + + if (of_get_property(np, "smsc,force-internal-phy", NULL)) + config->flags |= SMSC911X_FORCE_INTERNAL_PHY; + + if (of_get_property(np, "smsc,force-external-phy", NULL)) + config->flags |= SMSC911X_FORCE_EXTERNAL_PHY; + + if (of_get_property(np, "smsc,save-mac-address", NULL)) + config->flags |= SMSC911X_SAVE_MAC_ADDRESS; + + return 0; +} +#else +static inline int smsc911x_probe_config_dt( + struct smsc911x_platform_config *config, + struct device_node *np) +{ + return -ENODEV; +} +#endif /* CONFIG_OF */ + +static int smsc911x_drv_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct net_device *dev; + struct smsc911x_data *pdata; + struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev); + struct resource *res; + unsigned int intcfg = 0; + int res_size, irq, irq_flags; + int retval; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "smsc911x-memory"); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + pr_warn("Could not allocate resource\n"); + retval = -ENODEV; + goto out_0; + } + res_size = resource_size(res); + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + pr_warn("Could not allocate irq resource\n"); + retval = -ENODEV; + goto out_0; + } + + if (!request_mem_region(res->start, res_size, SMSC_CHIPNAME)) { + retval = -EBUSY; + goto out_0; + } + + dev = alloc_etherdev(sizeof(struct smsc911x_data)); + if (!dev) { + retval = -ENOMEM; + goto out_release_io_1; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + + pdata = netdev_priv(dev); + dev->irq = irq; + irq_flags = irq_get_trigger_type(irq); + pdata->ioaddr = ioremap_nocache(res->start, res_size); + + pdata->dev = dev; + pdata->msg_enable = ((1 << debug) - 1); + + platform_set_drvdata(pdev, dev); + + retval = smsc911x_request_resources(pdev); + if (retval) + goto out_request_resources_fail; + + retval = smsc911x_enable_resources(pdev); + if (retval) + goto out_enable_resources_fail; + + if (pdata->ioaddr == NULL) { + SMSC_WARN(pdata, probe, "Error smsc911x base address invalid"); + retval = -ENOMEM; + goto out_disable_resources; + } + + retval = smsc911x_probe_config_dt(&pdata->config, np); + if (retval && config) { + /* copy config parameters across to pdata */ + memcpy(&pdata->config, config, sizeof(pdata->config)); + retval = 0; + } + + if (retval) { + SMSC_WARN(pdata, probe, "Error smsc911x config not found"); + goto out_disable_resources; + } + + /* assume standard, non-shifted, access to HW registers */ + pdata->ops = &standard_smsc911x_ops; + /* apply the right access if shifting is needed */ + if (pdata->config.shift) + pdata->ops = &shifted_smsc911x_ops; + + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + + retval = smsc911x_init(dev); + if (retval < 0) + goto out_disable_resources; + + /* configure irq polarity and type before connecting isr */ + if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH) + intcfg |= INT_CFG_IRQ_POL_; + + if (pdata->config.irq_type == SMSC911X_IRQ_TYPE_PUSH_PULL) + intcfg |= INT_CFG_IRQ_TYPE_; + + smsc911x_reg_write(pdata, INT_CFG, intcfg); + + /* Ensure interrupts are globally disabled before connecting ISR */ + smsc911x_disable_irq_chip(dev); + + retval = request_irq(dev->irq, smsc911x_irqhandler, + irq_flags | IRQF_SHARED, dev->name, dev); + if (retval) { + SMSC_WARN(pdata, probe, + "Unable to claim requested irq: %d", dev->irq); + goto out_disable_resources; + } + + netif_carrier_off(dev); + + retval = register_netdev(dev); + if (retval) { + SMSC_WARN(pdata, probe, "Error %i registering device", retval); + goto out_free_irq; + } else { + SMSC_TRACE(pdata, probe, + "Network interface: \"%s\"", dev->name); + } + + retval = smsc911x_mii_init(pdev, dev); + if (retval) { + SMSC_WARN(pdata, probe, "Error %i initialising mii", retval); + goto out_unregister_netdev_5; + } + + spin_lock_irq(&pdata->mac_lock); + + /* Check if mac address has been specified when bringing interface up */ + if (is_valid_ether_addr(dev->dev_addr)) { + smsc911x_set_hw_mac_address(pdata, dev->dev_addr); + SMSC_TRACE(pdata, probe, + "MAC Address is specified by configuration"); + } else if (is_valid_ether_addr(pdata->config.mac)) { + memcpy(dev->dev_addr, pdata->config.mac, ETH_ALEN); + SMSC_TRACE(pdata, probe, + "MAC Address specified by platform data"); + } else { + /* Try reading mac address from device. if EEPROM is present + * it will already have been set */ + smsc_get_mac(dev); + + if (is_valid_ether_addr(dev->dev_addr)) { + /* eeprom values are valid so use them */ + SMSC_TRACE(pdata, probe, + "Mac Address is read from LAN911x EEPROM"); + } else { + /* eeprom values are invalid, generate random MAC */ + eth_hw_addr_random(dev); + smsc911x_set_hw_mac_address(pdata, dev->dev_addr); + SMSC_TRACE(pdata, probe, + "MAC Address is set to eth_random_addr"); + } + } + + spin_unlock_irq(&pdata->mac_lock); + + netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr); + + return 0; + +out_unregister_netdev_5: + unregister_netdev(dev); +out_free_irq: + free_irq(dev->irq, dev); +out_disable_resources: + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + (void)smsc911x_disable_resources(pdev); +out_enable_resources_fail: + smsc911x_free_resources(pdev); +out_request_resources_fail: + iounmap(pdata->ioaddr); + free_netdev(dev); +out_release_io_1: + release_mem_region(res->start, resource_size(res)); +out_0: + return retval; +} + +#ifdef CONFIG_PM +/* This implementation assumes the devices remains powered on its VDDVARIO + * pins during suspend. */ + +/* TODO: implement freeze/thaw callbacks for hibernation.*/ + +static int smsc911x_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct smsc911x_data *pdata = netdev_priv(ndev); + + /* enable wake on LAN, energy detection and the external PME + * signal. */ + smsc911x_reg_write(pdata, PMT_CTRL, + PMT_CTRL_PM_MODE_D1_ | PMT_CTRL_WOL_EN_ | + PMT_CTRL_ED_EN_ | PMT_CTRL_PME_EN_); + + return 0; +} + +static int smsc911x_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct smsc911x_data *pdata = netdev_priv(ndev); + unsigned int to = 100; + + /* Note 3.11 from the datasheet: + * "When the LAN9220 is in a power saving state, a write of any + * data to the BYTE_TEST register will wake-up the device." + */ + smsc911x_reg_write(pdata, BYTE_TEST, 0); + + /* poll the READY bit in PMT_CTRL. Any other access to the device is + * forbidden while this bit isn't set. Try for 100ms and return -EIO + * if it failed. */ + while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to) + udelay(1000); + + return (to == 0) ? -EIO : 0; +} + +static const struct dev_pm_ops smsc911x_pm_ops = { + .suspend = smsc911x_suspend, + .resume = smsc911x_resume, +}; + +#define SMSC911X_PM_OPS (&smsc911x_pm_ops) + +#else +#define SMSC911X_PM_OPS NULL +#endif + +#ifdef CONFIG_OF +static const struct of_device_id smsc911x_dt_ids[] = { + { .compatible = "smsc,lan9115", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, smsc911x_dt_ids); +#endif + +static struct platform_driver smsc911x_driver = { + .probe = smsc911x_drv_probe, + .remove = smsc911x_drv_remove, + .driver = { + .name = SMSC_CHIPNAME, + .pm = SMSC911X_PM_OPS, + .of_match_table = of_match_ptr(smsc911x_dt_ids), + }, +}; + +/* Entry point for loading the module */ +static int __init smsc911x_init_module(void) +{ + SMSC_INITIALIZE(); + return platform_driver_register(&smsc911x_driver); +} + +/* entry point for unloading the module */ +static void __exit smsc911x_cleanup_module(void) +{ + platform_driver_unregister(&smsc911x_driver); +} + +module_init(smsc911x_init_module); +module_exit(smsc911x_cleanup_module); diff --git a/drivers/net/ethernet/smsc/smsc911x.h b/drivers/net/ethernet/smsc/smsc911x.h new file mode 100644 index 000000000..54d648920 --- /dev/null +++ b/drivers/net/ethernet/smsc/smsc911x.h @@ -0,0 +1,405 @@ +/*************************************************************************** + * + * Copyright (C) 2004-2008 SMSC + * Copyright (C) 2005-2008 ARM + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + ***************************************************************************/ +#ifndef __SMSC911X_H__ +#define __SMSC911X_H__ + +#define TX_FIFO_LOW_THRESHOLD ((u32)1600) +#define SMSC911X_EEPROM_SIZE ((u32)128) +#define USE_DEBUG 0 + +/* This is the maximum number of packets to be received every + * NAPI poll */ +#define SMSC_NAPI_WEIGHT 16 + +/* implements a PHY loopback test at initialisation time, to ensure a packet + * can be successfully looped back */ +#define USE_PHY_WORK_AROUND + +#if USE_DEBUG >= 1 +#define SMSC_WARN(pdata, nlevel, fmt, args...) \ + netif_warn(pdata, nlevel, (pdata)->dev, \ + "%s: " fmt "\n", __func__, ##args) +#else +#define SMSC_WARN(pdata, nlevel, fmt, args...) \ + no_printk(fmt "\n", ##args) +#endif + +#if USE_DEBUG >= 2 +#define SMSC_TRACE(pdata, nlevel, fmt, args...) \ + netif_info(pdata, nlevel, pdata->dev, fmt "\n", ##args) +#else +#define SMSC_TRACE(pdata, nlevel, fmt, args...) \ + no_printk(fmt "\n", ##args) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK +#define SMSC_ASSERT_MAC_LOCK(pdata) \ + WARN_ON_SMP(!spin_is_locked(&pdata->mac_lock)) +#else +#define SMSC_ASSERT_MAC_LOCK(pdata) do {} while (0) +#endif /* CONFIG_DEBUG_SPINLOCK */ + +/* SMSC911x registers and bitfields */ +#define RX_DATA_FIFO 0x00 + +#define TX_DATA_FIFO 0x20 +#define TX_CMD_A_ON_COMP_ 0x80000000 +#define TX_CMD_A_BUF_END_ALGN_ 0x03000000 +#define TX_CMD_A_4_BYTE_ALGN_ 0x00000000 +#define TX_CMD_A_16_BYTE_ALGN_ 0x01000000 +#define TX_CMD_A_32_BYTE_ALGN_ 0x02000000 +#define TX_CMD_A_DATA_OFFSET_ 0x001F0000 +#define TX_CMD_A_FIRST_SEG_ 0x00002000 +#define TX_CMD_A_LAST_SEG_ 0x00001000 +#define TX_CMD_A_BUF_SIZE_ 0x000007FF +#define TX_CMD_B_PKT_TAG_ 0xFFFF0000 +#define TX_CMD_B_ADD_CRC_DISABLE_ 0x00002000 +#define TX_CMD_B_DISABLE_PADDING_ 0x00001000 +#define TX_CMD_B_PKT_BYTE_LENGTH_ 0x000007FF + +#define RX_STATUS_FIFO 0x40 +#define RX_STS_ES_ 0x00008000 +#define RX_STS_LENGTH_ERR_ 0x00001000 +#define RX_STS_MCAST_ 0x00000400 +#define RX_STS_FRAME_TYPE_ 0x00000020 +#define RX_STS_CRC_ERR_ 0x00000002 + +#define RX_STATUS_FIFO_PEEK 0x44 + +#define TX_STATUS_FIFO 0x48 +#define TX_STS_ES_ 0x00008000 +#define TX_STS_LOST_CARRIER_ 0x00000800 +#define TX_STS_NO_CARRIER_ 0x00000400 +#define TX_STS_LATE_COL_ 0x00000200 +#define TX_STS_EXCESS_COL_ 0x00000100 + +#define TX_STATUS_FIFO_PEEK 0x4C + +#define ID_REV 0x50 +#define ID_REV_CHIP_ID_ 0xFFFF0000 +#define ID_REV_REV_ID_ 0x0000FFFF + +#define INT_CFG 0x54 +#define INT_CFG_INT_DEAS_ 0xFF000000 +#define INT_CFG_INT_DEAS_CLR_ 0x00004000 +#define INT_CFG_INT_DEAS_STS_ 0x00002000 +#define INT_CFG_IRQ_INT_ 0x00001000 +#define INT_CFG_IRQ_EN_ 0x00000100 +#define INT_CFG_IRQ_POL_ 0x00000010 +#define INT_CFG_IRQ_TYPE_ 0x00000001 + +#define INT_STS 0x58 +#define INT_STS_SW_INT_ 0x80000000 +#define INT_STS_TXSTOP_INT_ 0x02000000 +#define INT_STS_RXSTOP_INT_ 0x01000000 +#define INT_STS_RXDFH_INT_ 0x00800000 +#define INT_STS_RXDF_INT_ 0x00400000 +#define INT_STS_TX_IOC_ 0x00200000 +#define INT_STS_RXD_INT_ 0x00100000 +#define INT_STS_GPT_INT_ 0x00080000 +#define INT_STS_PHY_INT_ 0x00040000 +#define INT_STS_PME_INT_ 0x00020000 +#define INT_STS_TXSO_ 0x00010000 +#define INT_STS_RWT_ 0x00008000 +#define INT_STS_RXE_ 0x00004000 +#define INT_STS_TXE_ 0x00002000 +#define INT_STS_TDFU_ 0x00000800 +#define INT_STS_TDFO_ 0x00000400 +#define INT_STS_TDFA_ 0x00000200 +#define INT_STS_TSFF_ 0x00000100 +#define INT_STS_TSFL_ 0x00000080 +#define INT_STS_RXDF_ 0x00000040 +#define INT_STS_RDFL_ 0x00000020 +#define INT_STS_RSFF_ 0x00000010 +#define INT_STS_RSFL_ 0x00000008 +#define INT_STS_GPIO2_INT_ 0x00000004 +#define INT_STS_GPIO1_INT_ 0x00000002 +#define INT_STS_GPIO0_INT_ 0x00000001 + +#define INT_EN 0x5C +#define INT_EN_SW_INT_EN_ 0x80000000 +#define INT_EN_TXSTOP_INT_EN_ 0x02000000 +#define INT_EN_RXSTOP_INT_EN_ 0x01000000 +#define INT_EN_RXDFH_INT_EN_ 0x00800000 +#define INT_EN_TIOC_INT_EN_ 0x00200000 +#define INT_EN_RXD_INT_EN_ 0x00100000 +#define INT_EN_GPT_INT_EN_ 0x00080000 +#define INT_EN_PHY_INT_EN_ 0x00040000 +#define INT_EN_PME_INT_EN_ 0x00020000 +#define INT_EN_TXSO_EN_ 0x00010000 +#define INT_EN_RWT_EN_ 0x00008000 +#define INT_EN_RXE_EN_ 0x00004000 +#define INT_EN_TXE_EN_ 0x00002000 +#define INT_EN_TDFU_EN_ 0x00000800 +#define INT_EN_TDFO_EN_ 0x00000400 +#define INT_EN_TDFA_EN_ 0x00000200 +#define INT_EN_TSFF_EN_ 0x00000100 +#define INT_EN_TSFL_EN_ 0x00000080 +#define INT_EN_RXDF_EN_ 0x00000040 +#define INT_EN_RDFL_EN_ 0x00000020 +#define INT_EN_RSFF_EN_ 0x00000010 +#define INT_EN_RSFL_EN_ 0x00000008 +#define INT_EN_GPIO2_INT_ 0x00000004 +#define INT_EN_GPIO1_INT_ 0x00000002 +#define INT_EN_GPIO0_INT_ 0x00000001 + +#define BYTE_TEST 0x64 + +#define FIFO_INT 0x68 +#define FIFO_INT_TX_AVAIL_LEVEL_ 0xFF000000 +#define FIFO_INT_TX_STS_LEVEL_ 0x00FF0000 +#define FIFO_INT_RX_AVAIL_LEVEL_ 0x0000FF00 +#define FIFO_INT_RX_STS_LEVEL_ 0x000000FF + +#define RX_CFG 0x6C +#define RX_CFG_RX_END_ALGN_ 0xC0000000 +#define RX_CFG_RX_END_ALGN4_ 0x00000000 +#define RX_CFG_RX_END_ALGN16_ 0x40000000 +#define RX_CFG_RX_END_ALGN32_ 0x80000000 +#define RX_CFG_RX_DMA_CNT_ 0x0FFF0000 +#define RX_CFG_RX_DUMP_ 0x00008000 +#define RX_CFG_RXDOFF_ 0x00001F00 + +#define TX_CFG 0x70 +#define TX_CFG_TXS_DUMP_ 0x00008000 +#define TX_CFG_TXD_DUMP_ 0x00004000 +#define TX_CFG_TXSAO_ 0x00000004 +#define TX_CFG_TX_ON_ 0x00000002 +#define TX_CFG_STOP_TX_ 0x00000001 + +#define HW_CFG 0x74 +#define HW_CFG_TTM_ 0x00200000 +#define HW_CFG_SF_ 0x00100000 +#define HW_CFG_TX_FIF_SZ_ 0x000F0000 +#define HW_CFG_TR_ 0x00003000 +#define HW_CFG_SRST_ 0x00000001 + +/* only available on 115/117 */ +#define HW_CFG_PHY_CLK_SEL_ 0x00000060 +#define HW_CFG_PHY_CLK_SEL_INT_PHY_ 0x00000000 +#define HW_CFG_PHY_CLK_SEL_EXT_PHY_ 0x00000020 +#define HW_CFG_PHY_CLK_SEL_CLK_DIS_ 0x00000040 +#define HW_CFG_SMI_SEL_ 0x00000010 +#define HW_CFG_EXT_PHY_DET_ 0x00000008 +#define HW_CFG_EXT_PHY_EN_ 0x00000004 +#define HW_CFG_SRST_TO_ 0x00000002 + +/* only available on 116/118 */ +#define HW_CFG_32_16_BIT_MODE_ 0x00000004 + +#define RX_DP_CTRL 0x78 +#define RX_DP_CTRL_RX_FFWD_ 0x80000000 + +#define RX_FIFO_INF 0x7C +#define RX_FIFO_INF_RXSUSED_ 0x00FF0000 +#define RX_FIFO_INF_RXDUSED_ 0x0000FFFF + +#define TX_FIFO_INF 0x80 +#define TX_FIFO_INF_TSUSED_ 0x00FF0000 +#define TX_FIFO_INF_TDFREE_ 0x0000FFFF + +#define PMT_CTRL 0x84 +#define PMT_CTRL_PM_MODE_ 0x00003000 +#define PMT_CTRL_PM_MODE_D0_ 0x00000000 +#define PMT_CTRL_PM_MODE_D1_ 0x00001000 +#define PMT_CTRL_PM_MODE_D2_ 0x00002000 +#define PMT_CTRL_PM_MODE_D3_ 0x00003000 +#define PMT_CTRL_PHY_RST_ 0x00000400 +#define PMT_CTRL_WOL_EN_ 0x00000200 +#define PMT_CTRL_ED_EN_ 0x00000100 +#define PMT_CTRL_PME_TYPE_ 0x00000040 +#define PMT_CTRL_WUPS_ 0x00000030 +#define PMT_CTRL_WUPS_NOWAKE_ 0x00000000 +#define PMT_CTRL_WUPS_ED_ 0x00000010 +#define PMT_CTRL_WUPS_WOL_ 0x00000020 +#define PMT_CTRL_WUPS_MULTI_ 0x00000030 +#define PMT_CTRL_PME_IND_ 0x00000008 +#define PMT_CTRL_PME_POL_ 0x00000004 +#define PMT_CTRL_PME_EN_ 0x00000002 +#define PMT_CTRL_READY_ 0x00000001 + +#define GPIO_CFG 0x88 +#define GPIO_CFG_LED3_EN_ 0x40000000 +#define GPIO_CFG_LED2_EN_ 0x20000000 +#define GPIO_CFG_LED1_EN_ 0x10000000 +#define GPIO_CFG_GPIO2_INT_POL_ 0x04000000 +#define GPIO_CFG_GPIO1_INT_POL_ 0x02000000 +#define GPIO_CFG_GPIO0_INT_POL_ 0x01000000 +#define GPIO_CFG_EEPR_EN_ 0x00700000 +#define GPIO_CFG_GPIOBUF2_ 0x00040000 +#define GPIO_CFG_GPIOBUF1_ 0x00020000 +#define GPIO_CFG_GPIOBUF0_ 0x00010000 +#define GPIO_CFG_GPIODIR2_ 0x00000400 +#define GPIO_CFG_GPIODIR1_ 0x00000200 +#define GPIO_CFG_GPIODIR0_ 0x00000100 +#define GPIO_CFG_GPIOD4_ 0x00000020 +#define GPIO_CFG_GPIOD3_ 0x00000010 +#define GPIO_CFG_GPIOD2_ 0x00000004 +#define GPIO_CFG_GPIOD1_ 0x00000002 +#define GPIO_CFG_GPIOD0_ 0x00000001 + +#define GPT_CFG 0x8C +#define GPT_CFG_TIMER_EN_ 0x20000000 +#define GPT_CFG_GPT_LOAD_ 0x0000FFFF + +#define GPT_CNT 0x90 +#define GPT_CNT_GPT_CNT_ 0x0000FFFF + +#define WORD_SWAP 0x98 + +#define FREE_RUN 0x9C + +#define RX_DROP 0xA0 + +#define MAC_CSR_CMD 0xA4 +#define MAC_CSR_CMD_CSR_BUSY_ 0x80000000 +#define MAC_CSR_CMD_R_NOT_W_ 0x40000000 +#define MAC_CSR_CMD_CSR_ADDR_ 0x000000FF + +#define MAC_CSR_DATA 0xA8 + +#define AFC_CFG 0xAC +#define AFC_CFG_AFC_HI_ 0x00FF0000 +#define AFC_CFG_AFC_LO_ 0x0000FF00 +#define AFC_CFG_BACK_DUR_ 0x000000F0 +#define AFC_CFG_FCMULT_ 0x00000008 +#define AFC_CFG_FCBRD_ 0x00000004 +#define AFC_CFG_FCADD_ 0x00000002 +#define AFC_CFG_FCANY_ 0x00000001 + +#define E2P_CMD 0xB0 +#define E2P_CMD_EPC_BUSY_ 0x80000000 +#define E2P_CMD_EPC_CMD_ 0x70000000 +#define E2P_CMD_EPC_CMD_READ_ 0x00000000 +#define E2P_CMD_EPC_CMD_EWDS_ 0x10000000 +#define E2P_CMD_EPC_CMD_EWEN_ 0x20000000 +#define E2P_CMD_EPC_CMD_WRITE_ 0x30000000 +#define E2P_CMD_EPC_CMD_WRAL_ 0x40000000 +#define E2P_CMD_EPC_CMD_ERASE_ 0x50000000 +#define E2P_CMD_EPC_CMD_ERAL_ 0x60000000 +#define E2P_CMD_EPC_CMD_RELOAD_ 0x70000000 +#define E2P_CMD_EPC_TIMEOUT_ 0x00000200 +#define E2P_CMD_MAC_ADDR_LOADED_ 0x00000100 +#define E2P_CMD_EPC_ADDR_ 0x000000FF + +#define E2P_DATA 0xB4 +#define E2P_DATA_EEPROM_DATA_ 0x000000FF +#define LAN_REGISTER_EXTENT 0x00000100 + +/* + * MAC Control and Status Register (Indirect Address) + * Offset (through the MAC_CSR CMD and DATA port) + */ +#define MAC_CR 0x01 +#define MAC_CR_RXALL_ 0x80000000 +#define MAC_CR_HBDIS_ 0x10000000 +#define MAC_CR_RCVOWN_ 0x00800000 +#define MAC_CR_LOOPBK_ 0x00200000 +#define MAC_CR_FDPX_ 0x00100000 +#define MAC_CR_MCPAS_ 0x00080000 +#define MAC_CR_PRMS_ 0x00040000 +#define MAC_CR_INVFILT_ 0x00020000 +#define MAC_CR_PASSBAD_ 0x00010000 +#define MAC_CR_HFILT_ 0x00008000 +#define MAC_CR_HPFILT_ 0x00002000 +#define MAC_CR_LCOLL_ 0x00001000 +#define MAC_CR_BCAST_ 0x00000800 +#define MAC_CR_DISRTY_ 0x00000400 +#define MAC_CR_PADSTR_ 0x00000100 +#define MAC_CR_BOLMT_MASK_ 0x000000C0 +#define MAC_CR_DFCHK_ 0x00000020 +#define MAC_CR_TXEN_ 0x00000008 +#define MAC_CR_RXEN_ 0x00000004 + +#define ADDRH 0x02 + +#define ADDRL 0x03 + +#define HASHH 0x04 + +#define HASHL 0x05 + +#define MII_ACC 0x06 +#define MII_ACC_PHY_ADDR_ 0x0000F800 +#define MII_ACC_MIIRINDA_ 0x000007C0 +#define MII_ACC_MII_WRITE_ 0x00000002 +#define MII_ACC_MII_BUSY_ 0x00000001 + +#define MII_DATA 0x07 + +#define FLOW 0x08 +#define FLOW_FCPT_ 0xFFFF0000 +#define FLOW_FCPASS_ 0x00000004 +#define FLOW_FCEN_ 0x00000002 +#define FLOW_FCBSY_ 0x00000001 + +#define VLAN1 0x09 + +#define VLAN2 0x0A + +#define WUFF 0x0B + +#define WUCSR 0x0C +#define WUCSR_GUE_ 0x00000200 +#define WUCSR_WUFR_ 0x00000040 +#define WUCSR_MPR_ 0x00000020 +#define WUCSR_WAKE_EN_ 0x00000004 +#define WUCSR_MPEN_ 0x00000002 + +/* + * Phy definitions (vendor-specific) + */ +#define LAN9118_PHY_ID 0x00C0001C + +#define MII_INTSTS 0x1D + +#define MII_INTMSK 0x1E +#define PHY_INTMSK_AN_RCV_ (1 << 1) +#define PHY_INTMSK_PDFAULT_ (1 << 2) +#define PHY_INTMSK_AN_ACK_ (1 << 3) +#define PHY_INTMSK_LNKDOWN_ (1 << 4) +#define PHY_INTMSK_RFAULT_ (1 << 5) +#define PHY_INTMSK_AN_COMP_ (1 << 6) +#define PHY_INTMSK_ENERGYON_ (1 << 7) +#define PHY_INTMSK_DEFAULT_ (PHY_INTMSK_ENERGYON_ | \ + PHY_INTMSK_AN_COMP_ | \ + PHY_INTMSK_RFAULT_ | \ + PHY_INTMSK_LNKDOWN_) + +#define ADVERTISE_PAUSE_ALL (ADVERTISE_PAUSE_CAP | \ + ADVERTISE_PAUSE_ASYM) + +#define LPA_PAUSE_ALL (LPA_PAUSE_CAP | \ + LPA_PAUSE_ASYM) + +/* + * Provide hooks to let the arch add to the initialisation procedure + * and to override the source of the MAC address. + */ +#define SMSC_INITIALIZE() do {} while (0) +#define smsc_get_mac(dev) smsc911x_read_mac_address((dev)) + +#ifdef CONFIG_SMSC911X_ARCH_HOOKS +#include +#endif + +#include + +#endif /* __SMSC911X_H__ */ diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c new file mode 100644 index 000000000..4a90cdae5 --- /dev/null +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -0,0 +1,1751 @@ + /*************************************************************************** + * + * Copyright (C) 2007,2008 SMSC + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + *************************************************************************** + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "smsc9420.h" + +#define DRV_NAME "smsc9420" +#define DRV_MDIONAME "smsc9420-mdio" +#define DRV_DESCRIPTION "SMSC LAN9420 driver" +#define DRV_VERSION "1.01" + +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +struct smsc9420_dma_desc { + u32 status; + u32 length; + u32 buffer1; + u32 buffer2; +}; + +struct smsc9420_ring_info { + struct sk_buff *skb; + dma_addr_t mapping; +}; + +struct smsc9420_pdata { + void __iomem *ioaddr; + struct pci_dev *pdev; + struct net_device *dev; + + struct smsc9420_dma_desc *rx_ring; + struct smsc9420_dma_desc *tx_ring; + struct smsc9420_ring_info *tx_buffers; + struct smsc9420_ring_info *rx_buffers; + dma_addr_t rx_dma_addr; + dma_addr_t tx_dma_addr; + int tx_ring_head, tx_ring_tail; + int rx_ring_head, rx_ring_tail; + + spinlock_t int_lock; + spinlock_t phy_lock; + + struct napi_struct napi; + + bool software_irq_signal; + bool rx_csum; + u32 msg_enable; + + struct phy_device *phy_dev; + struct mii_bus *mii_bus; + int phy_irq[PHY_MAX_ADDR]; + int last_duplex; + int last_carrier; +}; + +static const struct pci_device_id smsc9420_id_table[] = { + { PCI_VENDOR_ID_9420, PCI_DEVICE_ID_9420, PCI_ANY_ID, PCI_ANY_ID, }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, smsc9420_id_table); + +#define SMSC_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +static uint smsc_debug; +static uint debug = -1; +module_param(debug, uint, 0); +MODULE_PARM_DESC(debug, "debug level"); + +static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset) +{ + return ioread32(pd->ioaddr + offset); +} + +static inline void +smsc9420_reg_write(struct smsc9420_pdata *pd, u32 offset, u32 value) +{ + iowrite32(value, pd->ioaddr + offset); +} + +static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd) +{ + /* to ensure PCI write completion, we must perform a PCI read */ + smsc9420_reg_read(pd, ID_REV); +} + +static int smsc9420_mii_read(struct mii_bus *bus, int phyaddr, int regidx) +{ + struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv; + unsigned long flags; + u32 addr; + int i, reg = -EIO; + + spin_lock_irqsave(&pd->phy_lock, flags); + + /* confirm MII not busy */ + if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) { + netif_warn(pd, drv, pd->dev, "MII is busy???\n"); + goto out; + } + + /* set the address, index & direction (read from PHY) */ + addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) | + MII_ACCESS_MII_READ_; + smsc9420_reg_write(pd, MII_ACCESS, addr); + + /* wait for read to complete with 50us timeout */ + for (i = 0; i < 5; i++) { + if (!(smsc9420_reg_read(pd, MII_ACCESS) & + MII_ACCESS_MII_BUSY_)) { + reg = (u16)smsc9420_reg_read(pd, MII_DATA); + goto out; + } + udelay(10); + } + + netif_warn(pd, drv, pd->dev, "MII busy timeout!\n"); + +out: + spin_unlock_irqrestore(&pd->phy_lock, flags); + return reg; +} + +static int smsc9420_mii_write(struct mii_bus *bus, int phyaddr, int regidx, + u16 val) +{ + struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv; + unsigned long flags; + u32 addr; + int i, reg = -EIO; + + spin_lock_irqsave(&pd->phy_lock, flags); + + /* confirm MII not busy */ + if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) { + netif_warn(pd, drv, pd->dev, "MII is busy???\n"); + goto out; + } + + /* put the data to write in the MAC */ + smsc9420_reg_write(pd, MII_DATA, (u32)val); + + /* set the address, index & direction (write to PHY) */ + addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) | + MII_ACCESS_MII_WRITE_; + smsc9420_reg_write(pd, MII_ACCESS, addr); + + /* wait for write to complete with 50us timeout */ + for (i = 0; i < 5; i++) { + if (!(smsc9420_reg_read(pd, MII_ACCESS) & + MII_ACCESS_MII_BUSY_)) { + reg = 0; + goto out; + } + udelay(10); + } + + netif_warn(pd, drv, pd->dev, "MII busy timeout!\n"); + +out: + spin_unlock_irqrestore(&pd->phy_lock, flags); + return reg; +} + +/* Returns hash bit number for given MAC address + * Example: + * 01 00 5E 00 00 01 -> returns bit number 31 */ +static u32 smsc9420_hash(u8 addr[ETH_ALEN]) +{ + return (ether_crc(ETH_ALEN, addr) >> 26) & 0x3f; +} + +static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd) +{ + int timeout = 100000; + + BUG_ON(!pd); + + if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) { + netif_dbg(pd, drv, pd->dev, "%s: Eeprom busy\n", __func__); + return -EIO; + } + + smsc9420_reg_write(pd, E2P_CMD, + (E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_RELOAD_)); + + do { + udelay(10); + if (!(smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_)) + return 0; + } while (timeout--); + + netif_warn(pd, drv, pd->dev, "%s: Eeprom timed out\n", __func__); + return -EIO; +} + +/* Standard ioctls for mii-tool */ +static int smsc9420_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + + if (!netif_running(dev) || !pd->phy_dev) + return -EINVAL; + + return phy_mii_ioctl(pd->phy_dev, ifr, cmd); +} + +static int smsc9420_ethtool_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + + if (!pd->phy_dev) + return -ENODEV; + + cmd->maxtxpkt = 1; + cmd->maxrxpkt = 1; + return phy_ethtool_gset(pd->phy_dev, cmd); +} + +static int smsc9420_ethtool_set_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + + if (!pd->phy_dev) + return -ENODEV; + + return phy_ethtool_sset(pd->phy_dev, cmd); +} + +static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct smsc9420_pdata *pd = netdev_priv(netdev); + + strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->bus_info, pci_name(pd->pdev), + sizeof(drvinfo->bus_info)); + strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); +} + +static u32 smsc9420_ethtool_get_msglevel(struct net_device *netdev) +{ + struct smsc9420_pdata *pd = netdev_priv(netdev); + return pd->msg_enable; +} + +static void smsc9420_ethtool_set_msglevel(struct net_device *netdev, u32 data) +{ + struct smsc9420_pdata *pd = netdev_priv(netdev); + pd->msg_enable = data; +} + +static int smsc9420_ethtool_nway_reset(struct net_device *netdev) +{ + struct smsc9420_pdata *pd = netdev_priv(netdev); + + if (!pd->phy_dev) + return -ENODEV; + + return phy_start_aneg(pd->phy_dev); +} + +static int smsc9420_ethtool_getregslen(struct net_device *dev) +{ + /* all smsc9420 registers plus all phy registers */ + return 0x100 + (32 * sizeof(u32)); +} + +static void +smsc9420_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs, + void *buf) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + struct phy_device *phy_dev = pd->phy_dev; + unsigned int i, j = 0; + u32 *data = buf; + + regs->version = smsc9420_reg_read(pd, ID_REV); + for (i = 0; i < 0x100; i += (sizeof(u32))) + data[j++] = smsc9420_reg_read(pd, i); + + // cannot read phy registers if the net device is down + if (!phy_dev) + return; + + for (i = 0; i <= 31; i++) + data[j++] = smsc9420_mii_read(phy_dev->bus, phy_dev->addr, i); +} + +static void smsc9420_eeprom_enable_access(struct smsc9420_pdata *pd) +{ + unsigned int temp = smsc9420_reg_read(pd, GPIO_CFG); + temp &= ~GPIO_CFG_EEPR_EN_; + smsc9420_reg_write(pd, GPIO_CFG, temp); + msleep(1); +} + +static int smsc9420_eeprom_send_cmd(struct smsc9420_pdata *pd, u32 op) +{ + int timeout = 100; + u32 e2cmd; + + netif_dbg(pd, hw, pd->dev, "op 0x%08x\n", op); + if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) { + netif_warn(pd, hw, pd->dev, "Busy at start\n"); + return -EBUSY; + } + + e2cmd = op | E2P_CMD_EPC_BUSY_; + smsc9420_reg_write(pd, E2P_CMD, e2cmd); + + do { + msleep(1); + e2cmd = smsc9420_reg_read(pd, E2P_CMD); + } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout)); + + if (!timeout) { + netif_info(pd, hw, pd->dev, "TIMED OUT\n"); + return -EAGAIN; + } + + if (e2cmd & E2P_CMD_EPC_TIMEOUT_) { + netif_info(pd, hw, pd->dev, + "Error occurred during eeprom operation\n"); + return -EINVAL; + } + + return 0; +} + +static int smsc9420_eeprom_read_location(struct smsc9420_pdata *pd, + u8 address, u8 *data) +{ + u32 op = E2P_CMD_EPC_CMD_READ_ | address; + int ret; + + netif_dbg(pd, hw, pd->dev, "address 0x%x\n", address); + ret = smsc9420_eeprom_send_cmd(pd, op); + + if (!ret) + data[address] = smsc9420_reg_read(pd, E2P_DATA); + + return ret; +} + +static int smsc9420_eeprom_write_location(struct smsc9420_pdata *pd, + u8 address, u8 data) +{ + u32 op = E2P_CMD_EPC_CMD_ERASE_ | address; + int ret; + + netif_dbg(pd, hw, pd->dev, "address 0x%x, data 0x%x\n", address, data); + ret = smsc9420_eeprom_send_cmd(pd, op); + + if (!ret) { + op = E2P_CMD_EPC_CMD_WRITE_ | address; + smsc9420_reg_write(pd, E2P_DATA, (u32)data); + ret = smsc9420_eeprom_send_cmd(pd, op); + } + + return ret; +} + +static int smsc9420_ethtool_get_eeprom_len(struct net_device *dev) +{ + return SMSC9420_EEPROM_SIZE; +} + +static int smsc9420_ethtool_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + u8 eeprom_data[SMSC9420_EEPROM_SIZE]; + int len, i; + + smsc9420_eeprom_enable_access(pd); + + len = min(eeprom->len, SMSC9420_EEPROM_SIZE); + for (i = 0; i < len; i++) { + int ret = smsc9420_eeprom_read_location(pd, i, eeprom_data); + if (ret < 0) { + eeprom->len = 0; + return ret; + } + } + + memcpy(data, &eeprom_data[eeprom->offset], len); + eeprom->magic = SMSC9420_EEPROM_MAGIC; + eeprom->len = len; + return 0; +} + +static int smsc9420_ethtool_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + int ret; + + if (eeprom->magic != SMSC9420_EEPROM_MAGIC) + return -EINVAL; + + smsc9420_eeprom_enable_access(pd); + smsc9420_eeprom_send_cmd(pd, E2P_CMD_EPC_CMD_EWEN_); + ret = smsc9420_eeprom_write_location(pd, eeprom->offset, *data); + smsc9420_eeprom_send_cmd(pd, E2P_CMD_EPC_CMD_EWDS_); + + /* Single byte write, according to man page */ + eeprom->len = 1; + + return ret; +} + +static const struct ethtool_ops smsc9420_ethtool_ops = { + .get_settings = smsc9420_ethtool_get_settings, + .set_settings = smsc9420_ethtool_set_settings, + .get_drvinfo = smsc9420_ethtool_get_drvinfo, + .get_msglevel = smsc9420_ethtool_get_msglevel, + .set_msglevel = smsc9420_ethtool_set_msglevel, + .nway_reset = smsc9420_ethtool_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = smsc9420_ethtool_get_eeprom_len, + .get_eeprom = smsc9420_ethtool_get_eeprom, + .set_eeprom = smsc9420_ethtool_set_eeprom, + .get_regs_len = smsc9420_ethtool_getregslen, + .get_regs = smsc9420_ethtool_getregs, + .get_ts_info = ethtool_op_get_ts_info, +}; + +/* Sets the device MAC address to dev_addr */ +static void smsc9420_set_mac_address(struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + u8 *dev_addr = dev->dev_addr; + u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4]; + u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | + (dev_addr[1] << 8) | dev_addr[0]; + + smsc9420_reg_write(pd, ADDRH, mac_high16); + smsc9420_reg_write(pd, ADDRL, mac_low32); +} + +static void smsc9420_check_mac_address(struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + + /* Check if mac address has been specified when bringing interface up */ + if (is_valid_ether_addr(dev->dev_addr)) { + smsc9420_set_mac_address(dev); + netif_dbg(pd, probe, pd->dev, + "MAC Address is specified by configuration\n"); + } else { + /* Try reading mac address from device. if EEPROM is present + * it will already have been set */ + u32 mac_high16 = smsc9420_reg_read(pd, ADDRH); + u32 mac_low32 = smsc9420_reg_read(pd, ADDRL); + dev->dev_addr[0] = (u8)(mac_low32); + dev->dev_addr[1] = (u8)(mac_low32 >> 8); + dev->dev_addr[2] = (u8)(mac_low32 >> 16); + dev->dev_addr[3] = (u8)(mac_low32 >> 24); + dev->dev_addr[4] = (u8)(mac_high16); + dev->dev_addr[5] = (u8)(mac_high16 >> 8); + + if (is_valid_ether_addr(dev->dev_addr)) { + /* eeprom values are valid so use them */ + netif_dbg(pd, probe, pd->dev, + "Mac Address is read from EEPROM\n"); + } else { + /* eeprom values are invalid, generate random MAC */ + eth_hw_addr_random(dev); + smsc9420_set_mac_address(dev); + netif_dbg(pd, probe, pd->dev, + "MAC Address is set to random\n"); + } + } +} + +static void smsc9420_stop_tx(struct smsc9420_pdata *pd) +{ + u32 dmac_control, mac_cr, dma_intr_ena; + int timeout = 1000; + + /* disable TX DMAC */ + dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL); + dmac_control &= (~DMAC_CONTROL_ST_); + smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control); + + /* Wait max 10ms for transmit process to stop */ + while (--timeout) { + if (smsc9420_reg_read(pd, DMAC_STATUS) & DMAC_STS_TS_) + break; + udelay(10); + } + + if (!timeout) + netif_warn(pd, ifdown, pd->dev, "TX DMAC failed to stop\n"); + + /* ACK Tx DMAC stop bit */ + smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_TXPS_); + + /* mask TX DMAC interrupts */ + dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); + dma_intr_ena &= ~(DMAC_INTR_ENA_TX_); + smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); + smsc9420_pci_flush_write(pd); + + /* stop MAC TX */ + mac_cr = smsc9420_reg_read(pd, MAC_CR) & (~MAC_CR_TXEN_); + smsc9420_reg_write(pd, MAC_CR, mac_cr); + smsc9420_pci_flush_write(pd); +} + +static void smsc9420_free_tx_ring(struct smsc9420_pdata *pd) +{ + int i; + + BUG_ON(!pd->tx_ring); + + if (!pd->tx_buffers) + return; + + for (i = 0; i < TX_RING_SIZE; i++) { + struct sk_buff *skb = pd->tx_buffers[i].skb; + + if (skb) { + BUG_ON(!pd->tx_buffers[i].mapping); + pci_unmap_single(pd->pdev, pd->tx_buffers[i].mapping, + skb->len, PCI_DMA_TODEVICE); + dev_kfree_skb_any(skb); + } + + pd->tx_ring[i].status = 0; + pd->tx_ring[i].length = 0; + pd->tx_ring[i].buffer1 = 0; + pd->tx_ring[i].buffer2 = 0; + } + wmb(); + + kfree(pd->tx_buffers); + pd->tx_buffers = NULL; + + pd->tx_ring_head = 0; + pd->tx_ring_tail = 0; +} + +static void smsc9420_free_rx_ring(struct smsc9420_pdata *pd) +{ + int i; + + BUG_ON(!pd->rx_ring); + + if (!pd->rx_buffers) + return; + + for (i = 0; i < RX_RING_SIZE; i++) { + if (pd->rx_buffers[i].skb) + dev_kfree_skb_any(pd->rx_buffers[i].skb); + + if (pd->rx_buffers[i].mapping) + pci_unmap_single(pd->pdev, pd->rx_buffers[i].mapping, + PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + + pd->rx_ring[i].status = 0; + pd->rx_ring[i].length = 0; + pd->rx_ring[i].buffer1 = 0; + pd->rx_ring[i].buffer2 = 0; + } + wmb(); + + kfree(pd->rx_buffers); + pd->rx_buffers = NULL; + + pd->rx_ring_head = 0; + pd->rx_ring_tail = 0; +} + +static void smsc9420_stop_rx(struct smsc9420_pdata *pd) +{ + int timeout = 1000; + u32 mac_cr, dmac_control, dma_intr_ena; + + /* mask RX DMAC interrupts */ + dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); + dma_intr_ena &= (~DMAC_INTR_ENA_RX_); + smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); + smsc9420_pci_flush_write(pd); + + /* stop RX MAC prior to stoping DMA */ + mac_cr = smsc9420_reg_read(pd, MAC_CR) & (~MAC_CR_RXEN_); + smsc9420_reg_write(pd, MAC_CR, mac_cr); + smsc9420_pci_flush_write(pd); + + /* stop RX DMAC */ + dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL); + dmac_control &= (~DMAC_CONTROL_SR_); + smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control); + smsc9420_pci_flush_write(pd); + + /* wait up to 10ms for receive to stop */ + while (--timeout) { + if (smsc9420_reg_read(pd, DMAC_STATUS) & DMAC_STS_RS_) + break; + udelay(10); + } + + if (!timeout) + netif_warn(pd, ifdown, pd->dev, + "RX DMAC did not stop! timeout\n"); + + /* ACK the Rx DMAC stop bit */ + smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_RXPS_); +} + +static irqreturn_t smsc9420_isr(int irq, void *dev_id) +{ + struct smsc9420_pdata *pd = dev_id; + u32 int_cfg, int_sts, int_ctl; + irqreturn_t ret = IRQ_NONE; + ulong flags; + + BUG_ON(!pd); + BUG_ON(!pd->ioaddr); + + int_cfg = smsc9420_reg_read(pd, INT_CFG); + + /* check if it's our interrupt */ + if ((int_cfg & (INT_CFG_IRQ_EN_ | INT_CFG_IRQ_INT_)) != + (INT_CFG_IRQ_EN_ | INT_CFG_IRQ_INT_)) + return IRQ_NONE; + + int_sts = smsc9420_reg_read(pd, INT_STAT); + + if (likely(INT_STAT_DMAC_INT_ & int_sts)) { + u32 status = smsc9420_reg_read(pd, DMAC_STATUS); + u32 ints_to_clear = 0; + + if (status & DMAC_STS_TX_) { + ints_to_clear |= (DMAC_STS_TX_ | DMAC_STS_NIS_); + netif_wake_queue(pd->dev); + } + + if (status & DMAC_STS_RX_) { + /* mask RX DMAC interrupts */ + u32 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); + dma_intr_ena &= (~DMAC_INTR_ENA_RX_); + smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); + smsc9420_pci_flush_write(pd); + + ints_to_clear |= (DMAC_STS_RX_ | DMAC_STS_NIS_); + napi_schedule(&pd->napi); + } + + if (ints_to_clear) + smsc9420_reg_write(pd, DMAC_STATUS, ints_to_clear); + + ret = IRQ_HANDLED; + } + + if (unlikely(INT_STAT_SW_INT_ & int_sts)) { + /* mask software interrupt */ + spin_lock_irqsave(&pd->int_lock, flags); + int_ctl = smsc9420_reg_read(pd, INT_CTL); + int_ctl &= (~INT_CTL_SW_INT_EN_); + smsc9420_reg_write(pd, INT_CTL, int_ctl); + spin_unlock_irqrestore(&pd->int_lock, flags); + + smsc9420_reg_write(pd, INT_STAT, INT_STAT_SW_INT_); + pd->software_irq_signal = true; + smp_wmb(); + + ret = IRQ_HANDLED; + } + + /* to ensure PCI write completion, we must perform a PCI read */ + smsc9420_pci_flush_write(pd); + + return ret; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void smsc9420_poll_controller(struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + const int irq = pd->pdev->irq; + + disable_irq(irq); + smsc9420_isr(0, dev); + enable_irq(irq); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static void smsc9420_dmac_soft_reset(struct smsc9420_pdata *pd) +{ + smsc9420_reg_write(pd, BUS_MODE, BUS_MODE_SWR_); + smsc9420_reg_read(pd, BUS_MODE); + udelay(2); + if (smsc9420_reg_read(pd, BUS_MODE) & BUS_MODE_SWR_) + netif_warn(pd, drv, pd->dev, "Software reset not cleared\n"); +} + +static int smsc9420_stop(struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + u32 int_cfg; + ulong flags; + + BUG_ON(!pd); + BUG_ON(!pd->phy_dev); + + /* disable master interrupt */ + spin_lock_irqsave(&pd->int_lock, flags); + int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); + smsc9420_reg_write(pd, INT_CFG, int_cfg); + spin_unlock_irqrestore(&pd->int_lock, flags); + + netif_tx_disable(dev); + napi_disable(&pd->napi); + + smsc9420_stop_tx(pd); + smsc9420_free_tx_ring(pd); + + smsc9420_stop_rx(pd); + smsc9420_free_rx_ring(pd); + + free_irq(pd->pdev->irq, pd); + + smsc9420_dmac_soft_reset(pd); + + phy_stop(pd->phy_dev); + + phy_disconnect(pd->phy_dev); + pd->phy_dev = NULL; + mdiobus_unregister(pd->mii_bus); + mdiobus_free(pd->mii_bus); + + return 0; +} + +static void smsc9420_rx_count_stats(struct net_device *dev, u32 desc_status) +{ + if (unlikely(desc_status & RDES0_ERROR_SUMMARY_)) { + dev->stats.rx_errors++; + if (desc_status & RDES0_DESCRIPTOR_ERROR_) + dev->stats.rx_over_errors++; + else if (desc_status & (RDES0_FRAME_TOO_LONG_ | + RDES0_RUNT_FRAME_ | RDES0_COLLISION_SEEN_)) + dev->stats.rx_frame_errors++; + else if (desc_status & RDES0_CRC_ERROR_) + dev->stats.rx_crc_errors++; + } + + if (unlikely(desc_status & RDES0_LENGTH_ERROR_)) + dev->stats.rx_length_errors++; + + if (unlikely(!((desc_status & RDES0_LAST_DESCRIPTOR_) && + (desc_status & RDES0_FIRST_DESCRIPTOR_)))) + dev->stats.rx_length_errors++; + + if (desc_status & RDES0_MULTICAST_FRAME_) + dev->stats.multicast++; +} + +static void smsc9420_rx_handoff(struct smsc9420_pdata *pd, const int index, + const u32 status) +{ + struct net_device *dev = pd->dev; + struct sk_buff *skb; + u16 packet_length = (status & RDES0_FRAME_LENGTH_MASK_) + >> RDES0_FRAME_LENGTH_SHFT_; + + /* remove crc from packet lendth */ + packet_length -= 4; + + if (pd->rx_csum) + packet_length -= 2; + + dev->stats.rx_packets++; + dev->stats.rx_bytes += packet_length; + + pci_unmap_single(pd->pdev, pd->rx_buffers[index].mapping, + PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + pd->rx_buffers[index].mapping = 0; + + skb = pd->rx_buffers[index].skb; + pd->rx_buffers[index].skb = NULL; + + if (pd->rx_csum) { + u16 hw_csum = get_unaligned_le16(skb_tail_pointer(skb) + + NET_IP_ALIGN + packet_length + 4); + put_unaligned_le16(hw_csum, &skb->csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } + + skb_reserve(skb, NET_IP_ALIGN); + skb_put(skb, packet_length); + + skb->protocol = eth_type_trans(skb, dev); + + netif_receive_skb(skb); +} + +static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index) +{ + struct sk_buff *skb = netdev_alloc_skb(pd->dev, PKT_BUF_SZ); + dma_addr_t mapping; + + BUG_ON(pd->rx_buffers[index].skb); + BUG_ON(pd->rx_buffers[index].mapping); + + if (unlikely(!skb)) + return -ENOMEM; + + mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb), + PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(pd->pdev, mapping)) { + dev_kfree_skb_any(skb); + netif_warn(pd, rx_err, pd->dev, "pci_map_single failed!\n"); + return -ENOMEM; + } + + pd->rx_buffers[index].skb = skb; + pd->rx_buffers[index].mapping = mapping; + pd->rx_ring[index].buffer1 = mapping + NET_IP_ALIGN; + pd->rx_ring[index].status = RDES0_OWN_; + wmb(); + + return 0; +} + +static void smsc9420_alloc_new_rx_buffers(struct smsc9420_pdata *pd) +{ + while (pd->rx_ring_tail != pd->rx_ring_head) { + if (smsc9420_alloc_rx_buffer(pd, pd->rx_ring_tail)) + break; + + pd->rx_ring_tail = (pd->rx_ring_tail + 1) % RX_RING_SIZE; + } +} + +static int smsc9420_rx_poll(struct napi_struct *napi, int budget) +{ + struct smsc9420_pdata *pd = + container_of(napi, struct smsc9420_pdata, napi); + struct net_device *dev = pd->dev; + u32 drop_frame_cnt, dma_intr_ena, status; + int work_done; + + for (work_done = 0; work_done < budget; work_done++) { + rmb(); + status = pd->rx_ring[pd->rx_ring_head].status; + + /* stop if DMAC owns this dma descriptor */ + if (status & RDES0_OWN_) + break; + + smsc9420_rx_count_stats(dev, status); + smsc9420_rx_handoff(pd, pd->rx_ring_head, status); + pd->rx_ring_head = (pd->rx_ring_head + 1) % RX_RING_SIZE; + smsc9420_alloc_new_rx_buffers(pd); + } + + drop_frame_cnt = smsc9420_reg_read(pd, MISS_FRAME_CNTR); + dev->stats.rx_dropped += + (drop_frame_cnt & 0xFFFF) + ((drop_frame_cnt >> 17) & 0x3FF); + + /* Kick RXDMA */ + smsc9420_reg_write(pd, RX_POLL_DEMAND, 1); + smsc9420_pci_flush_write(pd); + + if (work_done < budget) { + napi_complete(&pd->napi); + + /* re-enable RX DMA interrupts */ + dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); + dma_intr_ena |= (DMAC_INTR_ENA_RX_ | DMAC_INTR_ENA_NIS_); + smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); + smsc9420_pci_flush_write(pd); + } + return work_done; +} + +static void +smsc9420_tx_update_stats(struct net_device *dev, u32 status, u32 length) +{ + if (unlikely(status & TDES0_ERROR_SUMMARY_)) { + dev->stats.tx_errors++; + if (status & (TDES0_EXCESSIVE_DEFERRAL_ | + TDES0_EXCESSIVE_COLLISIONS_)) + dev->stats.tx_aborted_errors++; + + if (status & (TDES0_LOSS_OF_CARRIER_ | TDES0_NO_CARRIER_)) + dev->stats.tx_carrier_errors++; + } else { + dev->stats.tx_packets++; + dev->stats.tx_bytes += (length & 0x7FF); + } + + if (unlikely(status & TDES0_EXCESSIVE_COLLISIONS_)) { + dev->stats.collisions += 16; + } else { + dev->stats.collisions += + (status & TDES0_COLLISION_COUNT_MASK_) >> + TDES0_COLLISION_COUNT_SHFT_; + } + + if (unlikely(status & TDES0_HEARTBEAT_FAIL_)) + dev->stats.tx_heartbeat_errors++; +} + +/* Check for completed dma transfers, update stats and free skbs */ +static void smsc9420_complete_tx(struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + + while (pd->tx_ring_tail != pd->tx_ring_head) { + int index = pd->tx_ring_tail; + u32 status, length; + + rmb(); + status = pd->tx_ring[index].status; + length = pd->tx_ring[index].length; + + /* Check if DMA still owns this descriptor */ + if (unlikely(TDES0_OWN_ & status)) + break; + + smsc9420_tx_update_stats(dev, status, length); + + BUG_ON(!pd->tx_buffers[index].skb); + BUG_ON(!pd->tx_buffers[index].mapping); + + pci_unmap_single(pd->pdev, pd->tx_buffers[index].mapping, + pd->tx_buffers[index].skb->len, PCI_DMA_TODEVICE); + pd->tx_buffers[index].mapping = 0; + + dev_kfree_skb_any(pd->tx_buffers[index].skb); + pd->tx_buffers[index].skb = NULL; + + pd->tx_ring[index].buffer1 = 0; + wmb(); + + pd->tx_ring_tail = (pd->tx_ring_tail + 1) % TX_RING_SIZE; + } +} + +static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + dma_addr_t mapping; + int index = pd->tx_ring_head; + u32 tmp_desc1; + bool about_to_take_last_desc = + (((pd->tx_ring_head + 2) % TX_RING_SIZE) == pd->tx_ring_tail); + + smsc9420_complete_tx(dev); + + rmb(); + BUG_ON(pd->tx_ring[index].status & TDES0_OWN_); + BUG_ON(pd->tx_buffers[index].skb); + BUG_ON(pd->tx_buffers[index].mapping); + + mapping = pci_map_single(pd->pdev, skb->data, + skb->len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pd->pdev, mapping)) { + netif_warn(pd, tx_err, pd->dev, + "pci_map_single failed, dropping packet\n"); + return NETDEV_TX_BUSY; + } + + pd->tx_buffers[index].skb = skb; + pd->tx_buffers[index].mapping = mapping; + + tmp_desc1 = (TDES1_LS_ | ((u32)skb->len & 0x7FF)); + if (unlikely(about_to_take_last_desc)) { + tmp_desc1 |= TDES1_IC_; + netif_stop_queue(pd->dev); + } + + /* check if we are at the last descriptor and need to set EOR */ + if (unlikely(index == (TX_RING_SIZE - 1))) + tmp_desc1 |= TDES1_TER_; + + pd->tx_ring[index].buffer1 = mapping; + pd->tx_ring[index].length = tmp_desc1; + wmb(); + + /* increment head */ + pd->tx_ring_head = (pd->tx_ring_head + 1) % TX_RING_SIZE; + + /* assign ownership to DMAC */ + pd->tx_ring[index].status = TDES0_OWN_; + wmb(); + + skb_tx_timestamp(skb); + + /* kick the DMA */ + smsc9420_reg_write(pd, TX_POLL_DEMAND, 1); + smsc9420_pci_flush_write(pd); + + return NETDEV_TX_OK; +} + +static struct net_device_stats *smsc9420_get_stats(struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + u32 counter = smsc9420_reg_read(pd, MISS_FRAME_CNTR); + dev->stats.rx_dropped += + (counter & 0x0000FFFF) + ((counter >> 17) & 0x000003FF); + return &dev->stats; +} + +static void smsc9420_set_multicast_list(struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + u32 mac_cr = smsc9420_reg_read(pd, MAC_CR); + + if (dev->flags & IFF_PROMISC) { + netif_dbg(pd, hw, pd->dev, "Promiscuous Mode Enabled\n"); + mac_cr |= MAC_CR_PRMS_; + mac_cr &= (~MAC_CR_MCPAS_); + mac_cr &= (~MAC_CR_HPFILT_); + } else if (dev->flags & IFF_ALLMULTI) { + netif_dbg(pd, hw, pd->dev, "Receive all Multicast Enabled\n"); + mac_cr &= (~MAC_CR_PRMS_); + mac_cr |= MAC_CR_MCPAS_; + mac_cr &= (~MAC_CR_HPFILT_); + } else if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + u32 hash_lo = 0, hash_hi = 0; + + netif_dbg(pd, hw, pd->dev, "Multicast filter enabled\n"); + netdev_for_each_mc_addr(ha, dev) { + u32 bit_num = smsc9420_hash(ha->addr); + u32 mask = 1 << (bit_num & 0x1F); + + if (bit_num & 0x20) + hash_hi |= mask; + else + hash_lo |= mask; + + } + smsc9420_reg_write(pd, HASHH, hash_hi); + smsc9420_reg_write(pd, HASHL, hash_lo); + + mac_cr &= (~MAC_CR_PRMS_); + mac_cr &= (~MAC_CR_MCPAS_); + mac_cr |= MAC_CR_HPFILT_; + } else { + netif_dbg(pd, hw, pd->dev, "Receive own packets only\n"); + smsc9420_reg_write(pd, HASHH, 0); + smsc9420_reg_write(pd, HASHL, 0); + + mac_cr &= (~MAC_CR_PRMS_); + mac_cr &= (~MAC_CR_MCPAS_); + mac_cr &= (~MAC_CR_HPFILT_); + } + + smsc9420_reg_write(pd, MAC_CR, mac_cr); + smsc9420_pci_flush_write(pd); +} + +static void smsc9420_phy_update_flowcontrol(struct smsc9420_pdata *pd) +{ + struct phy_device *phy_dev = pd->phy_dev; + u32 flow; + + if (phy_dev->duplex == DUPLEX_FULL) { + u16 lcladv = phy_read(phy_dev, MII_ADVERTISE); + u16 rmtadv = phy_read(phy_dev, MII_LPA); + u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); + + if (cap & FLOW_CTRL_RX) + flow = 0xFFFF0002; + else + flow = 0; + + netif_info(pd, link, pd->dev, "rx pause %s, tx pause %s\n", + cap & FLOW_CTRL_RX ? "enabled" : "disabled", + cap & FLOW_CTRL_TX ? "enabled" : "disabled"); + } else { + netif_info(pd, link, pd->dev, "half duplex\n"); + flow = 0; + } + + smsc9420_reg_write(pd, FLOW, flow); +} + +/* Update link mode if anything has changed. Called periodically when the + * PHY is in polling mode, even if nothing has changed. */ +static void smsc9420_phy_adjust_link(struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + struct phy_device *phy_dev = pd->phy_dev; + int carrier; + + if (phy_dev->duplex != pd->last_duplex) { + u32 mac_cr = smsc9420_reg_read(pd, MAC_CR); + if (phy_dev->duplex) { + netif_dbg(pd, link, pd->dev, "full duplex mode\n"); + mac_cr |= MAC_CR_FDPX_; + } else { + netif_dbg(pd, link, pd->dev, "half duplex mode\n"); + mac_cr &= ~MAC_CR_FDPX_; + } + smsc9420_reg_write(pd, MAC_CR, mac_cr); + + smsc9420_phy_update_flowcontrol(pd); + pd->last_duplex = phy_dev->duplex; + } + + carrier = netif_carrier_ok(dev); + if (carrier != pd->last_carrier) { + if (carrier) + netif_dbg(pd, link, pd->dev, "carrier OK\n"); + else + netif_dbg(pd, link, pd->dev, "no carrier\n"); + pd->last_carrier = carrier; + } +} + +static int smsc9420_mii_probe(struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + struct phy_device *phydev = NULL; + + BUG_ON(pd->phy_dev); + + /* Device only supports internal PHY at address 1 */ + if (!pd->mii_bus->phy_map[1]) { + netdev_err(dev, "no PHY found at address 1\n"); + return -ENODEV; + } + + phydev = pd->mii_bus->phy_map[1]; + netif_info(pd, probe, pd->dev, "PHY addr %d, phy_id 0x%08X\n", + phydev->addr, phydev->phy_id); + + phydev = phy_connect(dev, dev_name(&phydev->dev), + smsc9420_phy_adjust_link, PHY_INTERFACE_MODE_MII); + + if (IS_ERR(phydev)) { + netdev_err(dev, "Could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + + /* mask with MAC supported features */ + phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + phydev->advertising = phydev->supported; + + pd->phy_dev = phydev; + pd->last_duplex = -1; + pd->last_carrier = -1; + + return 0; +} + +static int smsc9420_mii_init(struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + int err = -ENXIO, i; + + pd->mii_bus = mdiobus_alloc(); + if (!pd->mii_bus) { + err = -ENOMEM; + goto err_out_1; + } + pd->mii_bus->name = DRV_MDIONAME; + snprintf(pd->mii_bus->id, MII_BUS_ID_SIZE, "%x", + (pd->pdev->bus->number << 8) | pd->pdev->devfn); + pd->mii_bus->priv = pd; + pd->mii_bus->read = smsc9420_mii_read; + pd->mii_bus->write = smsc9420_mii_write; + pd->mii_bus->irq = pd->phy_irq; + for (i = 0; i < PHY_MAX_ADDR; ++i) + pd->mii_bus->irq[i] = PHY_POLL; + + /* Mask all PHYs except ID 1 (internal) */ + pd->mii_bus->phy_mask = ~(1 << 1); + + if (mdiobus_register(pd->mii_bus)) { + netif_warn(pd, probe, pd->dev, "Error registering mii bus\n"); + goto err_out_free_bus_2; + } + + if (smsc9420_mii_probe(dev) < 0) { + netif_warn(pd, probe, pd->dev, "Error probing mii bus\n"); + goto err_out_unregister_bus_3; + } + + return 0; + +err_out_unregister_bus_3: + mdiobus_unregister(pd->mii_bus); +err_out_free_bus_2: + mdiobus_free(pd->mii_bus); +err_out_1: + return err; +} + +static int smsc9420_alloc_tx_ring(struct smsc9420_pdata *pd) +{ + int i; + + BUG_ON(!pd->tx_ring); + + pd->tx_buffers = kmalloc_array(TX_RING_SIZE, + sizeof(struct smsc9420_ring_info), + GFP_KERNEL); + if (!pd->tx_buffers) + return -ENOMEM; + + /* Initialize the TX Ring */ + for (i = 0; i < TX_RING_SIZE; i++) { + pd->tx_buffers[i].skb = NULL; + pd->tx_buffers[i].mapping = 0; + pd->tx_ring[i].status = 0; + pd->tx_ring[i].length = 0; + pd->tx_ring[i].buffer1 = 0; + pd->tx_ring[i].buffer2 = 0; + } + pd->tx_ring[TX_RING_SIZE - 1].length = TDES1_TER_; + wmb(); + + pd->tx_ring_head = 0; + pd->tx_ring_tail = 0; + + smsc9420_reg_write(pd, TX_BASE_ADDR, pd->tx_dma_addr); + smsc9420_pci_flush_write(pd); + + return 0; +} + +static int smsc9420_alloc_rx_ring(struct smsc9420_pdata *pd) +{ + int i; + + BUG_ON(!pd->rx_ring); + + pd->rx_buffers = kmalloc_array(RX_RING_SIZE, + sizeof(struct smsc9420_ring_info), + GFP_KERNEL); + if (pd->rx_buffers == NULL) + goto out; + + /* initialize the rx ring */ + for (i = 0; i < RX_RING_SIZE; i++) { + pd->rx_ring[i].status = 0; + pd->rx_ring[i].length = PKT_BUF_SZ; + pd->rx_ring[i].buffer2 = 0; + pd->rx_buffers[i].skb = NULL; + pd->rx_buffers[i].mapping = 0; + } + pd->rx_ring[RX_RING_SIZE - 1].length = (PKT_BUF_SZ | RDES1_RER_); + + /* now allocate the entire ring of skbs */ + for (i = 0; i < RX_RING_SIZE; i++) { + if (smsc9420_alloc_rx_buffer(pd, i)) { + netif_warn(pd, ifup, pd->dev, + "failed to allocate rx skb %d\n", i); + goto out_free_rx_skbs; + } + } + + pd->rx_ring_head = 0; + pd->rx_ring_tail = 0; + + smsc9420_reg_write(pd, VLAN1, ETH_P_8021Q); + netif_dbg(pd, ifup, pd->dev, "VLAN1 = 0x%08x\n", + smsc9420_reg_read(pd, VLAN1)); + + if (pd->rx_csum) { + /* Enable RX COE */ + u32 coe = smsc9420_reg_read(pd, COE_CR) | RX_COE_EN; + smsc9420_reg_write(pd, COE_CR, coe); + netif_dbg(pd, ifup, pd->dev, "COE_CR = 0x%08x\n", coe); + } + + smsc9420_reg_write(pd, RX_BASE_ADDR, pd->rx_dma_addr); + smsc9420_pci_flush_write(pd); + + return 0; + +out_free_rx_skbs: + smsc9420_free_rx_ring(pd); +out: + return -ENOMEM; +} + +static int smsc9420_open(struct net_device *dev) +{ + struct smsc9420_pdata *pd = netdev_priv(dev); + u32 bus_mode, mac_cr, dmac_control, int_cfg, dma_intr_ena, int_ctl; + const int irq = pd->pdev->irq; + unsigned long flags; + int result = 0, timeout; + + if (!is_valid_ether_addr(dev->dev_addr)) { + netif_warn(pd, ifup, pd->dev, + "dev_addr is not a valid MAC address\n"); + result = -EADDRNOTAVAIL; + goto out_0; + } + + netif_carrier_off(dev); + + /* disable, mask and acknowledge all interrupts */ + spin_lock_irqsave(&pd->int_lock, flags); + int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); + smsc9420_reg_write(pd, INT_CFG, int_cfg); + smsc9420_reg_write(pd, INT_CTL, 0); + spin_unlock_irqrestore(&pd->int_lock, flags); + smsc9420_reg_write(pd, DMAC_INTR_ENA, 0); + smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF); + smsc9420_pci_flush_write(pd); + + result = request_irq(irq, smsc9420_isr, IRQF_SHARED, DRV_NAME, pd); + if (result) { + netif_warn(pd, ifup, pd->dev, "Unable to use IRQ = %d\n", irq); + result = -ENODEV; + goto out_0; + } + + smsc9420_dmac_soft_reset(pd); + + /* make sure MAC_CR is sane */ + smsc9420_reg_write(pd, MAC_CR, 0); + + smsc9420_set_mac_address(dev); + + /* Configure GPIO pins to drive LEDs */ + smsc9420_reg_write(pd, GPIO_CFG, + (GPIO_CFG_LED_3_ | GPIO_CFG_LED_2_ | GPIO_CFG_LED_1_)); + + bus_mode = BUS_MODE_DMA_BURST_LENGTH_16; + +#ifdef __BIG_ENDIAN + bus_mode |= BUS_MODE_DBO_; +#endif + + smsc9420_reg_write(pd, BUS_MODE, bus_mode); + + smsc9420_pci_flush_write(pd); + + /* set bus master bridge arbitration priority for Rx and TX DMA */ + smsc9420_reg_write(pd, BUS_CFG, BUS_CFG_RXTXWEIGHT_4_1); + + smsc9420_reg_write(pd, DMAC_CONTROL, + (DMAC_CONTROL_SF_ | DMAC_CONTROL_OSF_)); + + smsc9420_pci_flush_write(pd); + + /* test the IRQ connection to the ISR */ + netif_dbg(pd, ifup, pd->dev, "Testing ISR using IRQ %d\n", irq); + pd->software_irq_signal = false; + + spin_lock_irqsave(&pd->int_lock, flags); + /* configure interrupt deassertion timer and enable interrupts */ + int_cfg = smsc9420_reg_read(pd, INT_CFG) | INT_CFG_IRQ_EN_; + int_cfg &= ~(INT_CFG_INT_DEAS_MASK); + int_cfg |= (INT_DEAS_TIME & INT_CFG_INT_DEAS_MASK); + smsc9420_reg_write(pd, INT_CFG, int_cfg); + + /* unmask software interrupt */ + int_ctl = smsc9420_reg_read(pd, INT_CTL) | INT_CTL_SW_INT_EN_; + smsc9420_reg_write(pd, INT_CTL, int_ctl); + spin_unlock_irqrestore(&pd->int_lock, flags); + smsc9420_pci_flush_write(pd); + + timeout = 1000; + while (timeout--) { + if (pd->software_irq_signal) + break; + msleep(1); + } + + /* disable interrupts */ + spin_lock_irqsave(&pd->int_lock, flags); + int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); + smsc9420_reg_write(pd, INT_CFG, int_cfg); + spin_unlock_irqrestore(&pd->int_lock, flags); + + if (!pd->software_irq_signal) { + netif_warn(pd, ifup, pd->dev, "ISR failed signaling test\n"); + result = -ENODEV; + goto out_free_irq_1; + } + + netif_dbg(pd, ifup, pd->dev, "ISR passed test using IRQ %d\n", irq); + + result = smsc9420_alloc_tx_ring(pd); + if (result) { + netif_warn(pd, ifup, pd->dev, + "Failed to Initialize tx dma ring\n"); + result = -ENOMEM; + goto out_free_irq_1; + } + + result = smsc9420_alloc_rx_ring(pd); + if (result) { + netif_warn(pd, ifup, pd->dev, + "Failed to Initialize rx dma ring\n"); + result = -ENOMEM; + goto out_free_tx_ring_2; + } + + result = smsc9420_mii_init(dev); + if (result) { + netif_warn(pd, ifup, pd->dev, "Failed to initialize Phy\n"); + result = -ENODEV; + goto out_free_rx_ring_3; + } + + /* Bring the PHY up */ + phy_start(pd->phy_dev); + + napi_enable(&pd->napi); + + /* start tx and rx */ + mac_cr = smsc9420_reg_read(pd, MAC_CR) | MAC_CR_TXEN_ | MAC_CR_RXEN_; + smsc9420_reg_write(pd, MAC_CR, mac_cr); + + dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL); + dmac_control |= DMAC_CONTROL_ST_ | DMAC_CONTROL_SR_; + smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control); + smsc9420_pci_flush_write(pd); + + dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA); + dma_intr_ena |= + (DMAC_INTR_ENA_TX_ | DMAC_INTR_ENA_RX_ | DMAC_INTR_ENA_NIS_); + smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena); + smsc9420_pci_flush_write(pd); + + netif_wake_queue(dev); + + smsc9420_reg_write(pd, RX_POLL_DEMAND, 1); + + /* enable interrupts */ + spin_lock_irqsave(&pd->int_lock, flags); + int_cfg = smsc9420_reg_read(pd, INT_CFG) | INT_CFG_IRQ_EN_; + smsc9420_reg_write(pd, INT_CFG, int_cfg); + spin_unlock_irqrestore(&pd->int_lock, flags); + + return 0; + +out_free_rx_ring_3: + smsc9420_free_rx_ring(pd); +out_free_tx_ring_2: + smsc9420_free_tx_ring(pd); +out_free_irq_1: + free_irq(irq, pd); +out_0: + return result; +} + +#ifdef CONFIG_PM + +static int smsc9420_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct smsc9420_pdata *pd = netdev_priv(dev); + u32 int_cfg; + ulong flags; + + /* disable interrupts */ + spin_lock_irqsave(&pd->int_lock, flags); + int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_); + smsc9420_reg_write(pd, INT_CFG, int_cfg); + spin_unlock_irqrestore(&pd->int_lock, flags); + + if (netif_running(dev)) { + netif_tx_disable(dev); + smsc9420_stop_tx(pd); + smsc9420_free_tx_ring(pd); + + napi_disable(&pd->napi); + smsc9420_stop_rx(pd); + smsc9420_free_rx_ring(pd); + + free_irq(pd->pdev->irq, pd); + + netif_device_detach(dev); + } + + pci_save_state(pdev); + pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int smsc9420_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct smsc9420_pdata *pd = netdev_priv(dev); + int err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + err = pci_enable_device(pdev); + if (err) + return err; + + pci_set_master(pdev); + + err = pci_enable_wake(pdev, PCI_D0, 0); + if (err) + netif_warn(pd, ifup, pd->dev, "pci_enable_wake failed: %d\n", + err); + + if (netif_running(dev)) { + /* FIXME: gross. It looks like ancient PM relic.*/ + err = smsc9420_open(dev); + netif_device_attach(dev); + } + return err; +} + +#endif /* CONFIG_PM */ + +static const struct net_device_ops smsc9420_netdev_ops = { + .ndo_open = smsc9420_open, + .ndo_stop = smsc9420_stop, + .ndo_start_xmit = smsc9420_hard_start_xmit, + .ndo_get_stats = smsc9420_get_stats, + .ndo_set_rx_mode = smsc9420_set_multicast_list, + .ndo_do_ioctl = smsc9420_do_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = smsc9420_poll_controller, +#endif /* CONFIG_NET_POLL_CONTROLLER */ +}; + +static int +smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct net_device *dev; + struct smsc9420_pdata *pd; + void __iomem *virt_addr; + int result = 0; + u32 id_rev; + + pr_info("%s version %s\n", DRV_DESCRIPTION, DRV_VERSION); + + /* First do the PCI initialisation */ + result = pci_enable_device(pdev); + if (unlikely(result)) { + pr_err("Cannot enable smsc9420\n"); + goto out_0; + } + + pci_set_master(pdev); + + dev = alloc_etherdev(sizeof(*pd)); + if (!dev) + goto out_disable_pci_device_1; + + SET_NETDEV_DEV(dev, &pdev->dev); + + if (!(pci_resource_flags(pdev, SMSC_BAR) & IORESOURCE_MEM)) { + netdev_err(dev, "Cannot find PCI device base address\n"); + goto out_free_netdev_2; + } + + if ((pci_request_regions(pdev, DRV_NAME))) { + netdev_err(dev, "Cannot obtain PCI resources, aborting\n"); + goto out_free_netdev_2; + } + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + netdev_err(dev, "No usable DMA configuration, aborting\n"); + goto out_free_regions_3; + } + + virt_addr = ioremap(pci_resource_start(pdev, SMSC_BAR), + pci_resource_len(pdev, SMSC_BAR)); + if (!virt_addr) { + netdev_err(dev, "Cannot map device registers, aborting\n"); + goto out_free_regions_3; + } + + /* registers are double mapped with 0 offset for LE and 0x200 for BE */ + virt_addr += LAN9420_CPSR_ENDIAN_OFFSET; + + pd = netdev_priv(dev); + + /* pci descriptors are created in the PCI consistent area */ + pd->rx_ring = pci_alloc_consistent(pdev, + sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE + + sizeof(struct smsc9420_dma_desc) * TX_RING_SIZE, + &pd->rx_dma_addr); + + if (!pd->rx_ring) + goto out_free_io_4; + + /* descriptors are aligned due to the nature of pci_alloc_consistent */ + pd->tx_ring = (pd->rx_ring + RX_RING_SIZE); + pd->tx_dma_addr = pd->rx_dma_addr + + sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE; + + pd->pdev = pdev; + pd->dev = dev; + pd->ioaddr = virt_addr; + pd->msg_enable = smsc_debug; + pd->rx_csum = true; + + netif_dbg(pd, probe, pd->dev, "lan_base=0x%08lx\n", (ulong)virt_addr); + + id_rev = smsc9420_reg_read(pd, ID_REV); + switch (id_rev & 0xFFFF0000) { + case 0x94200000: + netif_info(pd, probe, pd->dev, + "LAN9420 identified, ID_REV=0x%08X\n", id_rev); + break; + default: + netif_warn(pd, probe, pd->dev, "LAN9420 NOT identified\n"); + netif_warn(pd, probe, pd->dev, "ID_REV=0x%08X\n", id_rev); + goto out_free_dmadesc_5; + } + + smsc9420_dmac_soft_reset(pd); + smsc9420_eeprom_reload(pd); + smsc9420_check_mac_address(dev); + + dev->netdev_ops = &smsc9420_netdev_ops; + dev->ethtool_ops = &smsc9420_ethtool_ops; + + netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT); + + result = register_netdev(dev); + if (result) { + netif_warn(pd, probe, pd->dev, "error %i registering device\n", + result); + goto out_free_dmadesc_5; + } + + pci_set_drvdata(pdev, dev); + + spin_lock_init(&pd->int_lock); + spin_lock_init(&pd->phy_lock); + + dev_info(&dev->dev, "MAC Address: %pM\n", dev->dev_addr); + + return 0; + +out_free_dmadesc_5: + pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) * + (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr); +out_free_io_4: + iounmap(virt_addr - LAN9420_CPSR_ENDIAN_OFFSET); +out_free_regions_3: + pci_release_regions(pdev); +out_free_netdev_2: + free_netdev(dev); +out_disable_pci_device_1: + pci_disable_device(pdev); +out_0: + return -ENODEV; +} + +static void smsc9420_remove(struct pci_dev *pdev) +{ + struct net_device *dev; + struct smsc9420_pdata *pd; + + dev = pci_get_drvdata(pdev); + if (!dev) + return; + + pd = netdev_priv(dev); + unregister_netdev(dev); + + /* tx_buffers and rx_buffers are freed in stop */ + BUG_ON(pd->tx_buffers); + BUG_ON(pd->rx_buffers); + + BUG_ON(!pd->tx_ring); + BUG_ON(!pd->rx_ring); + + pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) * + (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr); + + iounmap(pd->ioaddr - LAN9420_CPSR_ENDIAN_OFFSET); + pci_release_regions(pdev); + free_netdev(dev); + pci_disable_device(pdev); +} + +static struct pci_driver smsc9420_driver = { + .name = DRV_NAME, + .id_table = smsc9420_id_table, + .probe = smsc9420_probe, + .remove = smsc9420_remove, +#ifdef CONFIG_PM + .suspend = smsc9420_suspend, + .resume = smsc9420_resume, +#endif /* CONFIG_PM */ +}; + +static int __init smsc9420_init_module(void) +{ + smsc_debug = netif_msg_init(debug, SMSC_MSG_DEFAULT); + + return pci_register_driver(&smsc9420_driver); +} + +static void __exit smsc9420_exit_module(void) +{ + pci_unregister_driver(&smsc9420_driver); +} + +module_init(smsc9420_init_module); +module_exit(smsc9420_exit_module); diff --git a/drivers/net/ethernet/smsc/smsc9420.h b/drivers/net/ethernet/smsc/smsc9420.h new file mode 100644 index 000000000..c63c76381 --- /dev/null +++ b/drivers/net/ethernet/smsc/smsc9420.h @@ -0,0 +1,275 @@ + /*************************************************************************** + * + * Copyright (C) 2007,2008 SMSC + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + *************************************************************************** + */ + +#ifndef _SMSC9420_H +#define _SMSC9420_H + +#define TX_RING_SIZE (32) +#define RX_RING_SIZE (128) + +/* interrupt deassertion in multiples of 10us */ +#define INT_DEAS_TIME (50) + +#define NAPI_WEIGHT (64) +#define SMSC_BAR (3) + +#ifdef __BIG_ENDIAN +/* Register set is duplicated for BE at an offset of 0x200 */ +#define LAN9420_CPSR_ENDIAN_OFFSET (0x200) +#else +#define LAN9420_CPSR_ENDIAN_OFFSET (0) +#endif + +#define PCI_VENDOR_ID_9420 (0x1055) +#define PCI_DEVICE_ID_9420 (0xE420) + +#define LAN_REGISTER_EXTENT (0x400) + +#define SMSC9420_EEPROM_SIZE ((u32)11) +#define SMSC9420_EEPROM_MAGIC (0x9420) + +#define PKT_BUF_SZ (VLAN_ETH_FRAME_LEN + NET_IP_ALIGN + 4) + +/***********************************************/ +/* DMA Controller Control and Status Registers */ +/***********************************************/ +#define BUS_MODE (0x00) +#define BUS_MODE_SWR_ (BIT(0)) +#define BUS_MODE_DMA_BURST_LENGTH_1 (BIT(8)) +#define BUS_MODE_DMA_BURST_LENGTH_2 (BIT(9)) +#define BUS_MODE_DMA_BURST_LENGTH_4 (BIT(10)) +#define BUS_MODE_DMA_BURST_LENGTH_8 (BIT(11)) +#define BUS_MODE_DMA_BURST_LENGTH_16 (BIT(12)) +#define BUS_MODE_DMA_BURST_LENGTH_32 (BIT(13)) +#define BUS_MODE_DBO_ (BIT(20)) + +#define TX_POLL_DEMAND (0x04) + +#define RX_POLL_DEMAND (0x08) + +#define RX_BASE_ADDR (0x0C) + +#define TX_BASE_ADDR (0x10) + +#define DMAC_STATUS (0x14) +#define DMAC_STS_TS_ (7 << 20) +#define DMAC_STS_RS_ (7 << 17) +#define DMAC_STS_NIS_ (BIT(16)) +#define DMAC_STS_AIS_ (BIT(15)) +#define DMAC_STS_RWT_ (BIT(9)) +#define DMAC_STS_RXPS_ (BIT(8)) +#define DMAC_STS_RXBU_ (BIT(7)) +#define DMAC_STS_RX_ (BIT(6)) +#define DMAC_STS_TXUNF_ (BIT(5)) +#define DMAC_STS_TXBU_ (BIT(2)) +#define DMAC_STS_TXPS_ (BIT(1)) +#define DMAC_STS_TX_ (BIT(0)) + +#define DMAC_CONTROL (0x18) +#define DMAC_CONTROL_TTM_ (BIT(22)) +#define DMAC_CONTROL_SF_ (BIT(21)) +#define DMAC_CONTROL_ST_ (BIT(13)) +#define DMAC_CONTROL_OSF_ (BIT(2)) +#define DMAC_CONTROL_SR_ (BIT(1)) + +#define DMAC_INTR_ENA (0x1C) +#define DMAC_INTR_ENA_NIS_ (BIT(16)) +#define DMAC_INTR_ENA_AIS_ (BIT(15)) +#define DMAC_INTR_ENA_RWT_ (BIT(9)) +#define DMAC_INTR_ENA_RXPS_ (BIT(8)) +#define DMAC_INTR_ENA_RXBU_ (BIT(7)) +#define DMAC_INTR_ENA_RX_ (BIT(6)) +#define DMAC_INTR_ENA_TXBU_ (BIT(2)) +#define DMAC_INTR_ENA_TXPS_ (BIT(1)) +#define DMAC_INTR_ENA_TX_ (BIT(0)) + +#define MISS_FRAME_CNTR (0x20) + +#define TX_BUFF_ADDR (0x50) + +#define RX_BUFF_ADDR (0x54) + +/* Transmit Descriptor Bit Defs */ +#define TDES0_OWN_ (0x80000000) +#define TDES0_ERROR_SUMMARY_ (0x00008000) +#define TDES0_LOSS_OF_CARRIER_ (0x00000800) +#define TDES0_NO_CARRIER_ (0x00000400) +#define TDES0_LATE_COLLISION_ (0x00000200) +#define TDES0_EXCESSIVE_COLLISIONS_ (0x00000100) +#define TDES0_HEARTBEAT_FAIL_ (0x00000080) +#define TDES0_COLLISION_COUNT_MASK_ (0x00000078) +#define TDES0_COLLISION_COUNT_SHFT_ (3) +#define TDES0_EXCESSIVE_DEFERRAL_ (0x00000004) +#define TDES0_DEFERRED_ (0x00000001) + +#define TDES1_IC_ 0x80000000 +#define TDES1_LS_ 0x40000000 +#define TDES1_FS_ 0x20000000 +#define TDES1_TXCSEN_ 0x08000000 +#define TDES1_TER_ (BIT(25)) +#define TDES1_TCH_ 0x01000000 + +/* Receive Descriptor 0 Bit Defs */ +#define RDES0_OWN_ (0x80000000) +#define RDES0_FRAME_LENGTH_MASK_ (0x07FF0000) +#define RDES0_FRAME_LENGTH_SHFT_ (16) +#define RDES0_ERROR_SUMMARY_ (0x00008000) +#define RDES0_DESCRIPTOR_ERROR_ (0x00004000) +#define RDES0_LENGTH_ERROR_ (0x00001000) +#define RDES0_RUNT_FRAME_ (0x00000800) +#define RDES0_MULTICAST_FRAME_ (0x00000400) +#define RDES0_FIRST_DESCRIPTOR_ (0x00000200) +#define RDES0_LAST_DESCRIPTOR_ (0x00000100) +#define RDES0_FRAME_TOO_LONG_ (0x00000080) +#define RDES0_COLLISION_SEEN_ (0x00000040) +#define RDES0_FRAME_TYPE_ (0x00000020) +#define RDES0_WATCHDOG_TIMEOUT_ (0x00000010) +#define RDES0_MII_ERROR_ (0x00000008) +#define RDES0_DRIBBLING_BIT_ (0x00000004) +#define RDES0_CRC_ERROR_ (0x00000002) + +/* Receive Descriptor 1 Bit Defs */ +#define RDES1_RER_ (0x02000000) + +/***********************************************/ +/* MAC Control and Status Registers */ +/***********************************************/ +#define MAC_CR (0x80) +#define MAC_CR_RXALL_ (0x80000000) +#define MAC_CR_DIS_RXOWN_ (0x00800000) +#define MAC_CR_LOOPBK_ (0x00200000) +#define MAC_CR_FDPX_ (0x00100000) +#define MAC_CR_MCPAS_ (0x00080000) +#define MAC_CR_PRMS_ (0x00040000) +#define MAC_CR_INVFILT_ (0x00020000) +#define MAC_CR_PASSBAD_ (0x00010000) +#define MAC_CR_HFILT_ (0x00008000) +#define MAC_CR_HPFILT_ (0x00002000) +#define MAC_CR_LCOLL_ (0x00001000) +#define MAC_CR_DIS_BCAST_ (0x00000800) +#define MAC_CR_DIS_RTRY_ (0x00000400) +#define MAC_CR_PADSTR_ (0x00000100) +#define MAC_CR_BOLMT_MSK (0x000000C0) +#define MAC_CR_MFCHK_ (0x00000020) +#define MAC_CR_TXEN_ (0x00000008) +#define MAC_CR_RXEN_ (0x00000004) + +#define ADDRH (0x84) + +#define ADDRL (0x88) + +#define HASHH (0x8C) + +#define HASHL (0x90) + +#define MII_ACCESS (0x94) +#define MII_ACCESS_MII_BUSY_ (0x00000001) +#define MII_ACCESS_MII_WRITE_ (0x00000002) +#define MII_ACCESS_MII_READ_ (0x00000000) +#define MII_ACCESS_INDX_MSK_ (0x000007C0) +#define MII_ACCESS_PHYADDR_MSK_ (0x0000F8C0) +#define MII_ACCESS_INDX_SHFT_CNT (6) +#define MII_ACCESS_PHYADDR_SHFT_CNT (11) + +#define MII_DATA (0x98) + +#define FLOW (0x9C) + +#define VLAN1 (0xA0) + +#define VLAN2 (0xA4) + +#define WUFF (0xA8) + +#define WUCSR (0xAC) + +#define COE_CR (0xB0) +#define TX_COE_EN (0x00010000) +#define RX_COE_MODE (0x00000002) +#define RX_COE_EN (0x00000001) + +/***********************************************/ +/* System Control and Status Registers */ +/***********************************************/ +#define ID_REV (0xC0) + +#define INT_CTL (0xC4) +#define INT_CTL_SW_INT_EN_ (0x00008000) +#define INT_CTL_SBERR_INT_EN_ (1 << 12) +#define INT_CTL_MBERR_INT_EN_ (1 << 13) +#define INT_CTL_GPT_INT_EN_ (0x00000008) +#define INT_CTL_PHY_INT_EN_ (0x00000004) +#define INT_CTL_WAKE_INT_EN_ (0x00000002) + +#define INT_STAT (0xC8) +#define INT_STAT_SW_INT_ (1 << 15) +#define INT_STAT_MBERR_INT_ (1 << 13) +#define INT_STAT_SBERR_INT_ (1 << 12) +#define INT_STAT_GPT_INT_ (1 << 3) +#define INT_STAT_PHY_INT_ (0x00000004) +#define INT_STAT_WAKE_INT_ (0x00000002) +#define INT_STAT_DMAC_INT_ (0x00000001) + +#define INT_CFG (0xCC) +#define INT_CFG_IRQ_INT_ (0x00080000) +#define INT_CFG_IRQ_EN_ (0x00040000) +#define INT_CFG_INT_DEAS_CLR_ (0x00000200) +#define INT_CFG_INT_DEAS_MASK (0x000000FF) + +#define GPIO_CFG (0xD0) +#define GPIO_CFG_LED_3_ (0x40000000) +#define GPIO_CFG_LED_2_ (0x20000000) +#define GPIO_CFG_LED_1_ (0x10000000) +#define GPIO_CFG_EEPR_EN_ (0x00700000) + +#define GPT_CFG (0xD4) +#define GPT_CFG_TIMER_EN_ (0x20000000) + +#define GPT_CNT (0xD8) + +#define BUS_CFG (0xDC) +#define BUS_CFG_RXTXWEIGHT_1_1 (0 << 25) +#define BUS_CFG_RXTXWEIGHT_2_1 (1 << 25) +#define BUS_CFG_RXTXWEIGHT_3_1 (2 << 25) +#define BUS_CFG_RXTXWEIGHT_4_1 (3 << 25) + +#define PMT_CTRL (0xE0) + +#define FREE_RUN (0xF4) + +#define E2P_CMD (0xF8) +#define E2P_CMD_EPC_BUSY_ (0x80000000) +#define E2P_CMD_EPC_CMD_ (0x70000000) +#define E2P_CMD_EPC_CMD_READ_ (0x00000000) +#define E2P_CMD_EPC_CMD_EWDS_ (0x10000000) +#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000) +#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000) +#define E2P_CMD_EPC_CMD_WRAL_ (0x40000000) +#define E2P_CMD_EPC_CMD_ERASE_ (0x50000000) +#define E2P_CMD_EPC_CMD_ERAL_ (0x60000000) +#define E2P_CMD_EPC_CMD_RELOAD_ (0x70000000) +#define E2P_CMD_EPC_TIMEOUT_ (0x00000200) +#define E2P_CMD_MAC_ADDR_LOADED_ (0x00000100) +#define E2P_CMD_EPC_ADDR_ (0x000000FF) + +#define E2P_DATA (0xFC) +#define E2P_DATA_EEPROM_DATA_ (0x000000FF) + +#endif /* _SMSC9420_H */ diff --git a/drivers/net/ethernet/stmicro/Kconfig b/drivers/net/ethernet/stmicro/Kconfig new file mode 100644 index 000000000..f4a80da00 --- /dev/null +++ b/drivers/net/ethernet/stmicro/Kconfig @@ -0,0 +1,23 @@ +# +# STMicroelectronics device configuration +# + +config NET_VENDOR_STMICRO + bool "STMicroelectronics devices" + default y + depends on HAS_IOMEM + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about STMicroelectronics cards. If you say Y, you will + be asked for your specific card in the following questions. + +if NET_VENDOR_STMICRO + +source "drivers/net/ethernet/stmicro/stmmac/Kconfig" + +endif # NET_VENDOR_STMICRO diff --git a/drivers/net/ethernet/stmicro/Makefile b/drivers/net/ethernet/stmicro/Makefile new file mode 100644 index 000000000..9b3bfddda --- /dev/null +++ b/drivers/net/ethernet/stmicro/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the STMicroelectronics device drivers. +# + +obj-$(CONFIG_STMMAC_ETH) += stmmac/ diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig new file mode 100644 index 000000000..7d3af190b --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -0,0 +1,40 @@ +config STMMAC_ETH + tristate "STMicroelectronics 10/100/1000 Ethernet driver" + depends on HAS_IOMEM && HAS_DMA + select MII + select PHYLIB + select CRC32 + select PTP_1588_CLOCK + select RESET_CONTROLLER + ---help--- + This is the driver for the Ethernet IPs are built around a + Synopsys IP Core and only tested on the STMicroelectronics + platforms. + +if STMMAC_ETH + +config STMMAC_PLATFORM + tristate "STMMAC Platform bus support" + depends on STMMAC_ETH + default y + ---help--- + This selects the platform specific bus support for the stmmac driver. + This is the driver used on several SoCs: + STi, Allwinner, Amlogic Meson, Altera SOCFPGA. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config STMMAC_PCI + tristate "STMMAC PCI bus support" + depends on STMMAC_ETH && PCI + ---help--- + This is to select the Synopsys DWMAC available on PCI devices, + if you have a controller with this interface, say Y or M here. + + This PCI support is tested on XLINX XC2V3000 FF1152AMT0221 + D1215994A VIRTEX FPGA board. + + If unsure, say N. +endif diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile new file mode 100644 index 000000000..73c2715a2 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -0,0 +1,12 @@ +obj-$(CONFIG_STMMAC_ETH) += stmmac.o +stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ + chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ + dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ + mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y) + +obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o +stmmac-platform-objs:= stmmac_platform.o dwmac-meson.o dwmac-sunxi.o \ + dwmac-sti.o dwmac-socfpga.o dwmac-rk.o + +obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o +stmmac-pci-objs:= stmmac_pci.o diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c new file mode 100644 index 000000000..cf28daba4 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c @@ -0,0 +1,166 @@ +/******************************************************************************* + Specialised functions for managing Chained mode + + Copyright(C) 2011 STMicroelectronics Ltd + + It defines all the functions used to handle the normal/enhanced + descriptors in case of the DMA is configured to work in chained or + in ring mode. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include "stmmac.h" + +static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)p; + unsigned int txsize = priv->dma_tx_size; + unsigned int entry = priv->cur_tx % txsize; + struct dma_desc *desc = priv->dma_tx + entry; + unsigned int nopaged_len = skb_headlen(skb); + unsigned int bmax; + unsigned int i = 1, len; + + if (priv->plat->enh_desc) + bmax = BUF_SIZE_8KiB; + else + bmax = BUF_SIZE_2KiB; + + len = nopaged_len - bmax; + + desc->des2 = dma_map_single(priv->device, skb->data, + bmax, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, desc->des2)) + return -1; + priv->tx_skbuff_dma[entry].buf = desc->des2; + priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE); + + while (len != 0) { + priv->tx_skbuff[entry] = NULL; + entry = (++priv->cur_tx) % txsize; + desc = priv->dma_tx + entry; + + if (len > bmax) { + desc->des2 = dma_map_single(priv->device, + (skb->data + bmax * i), + bmax, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, desc->des2)) + return -1; + priv->tx_skbuff_dma[entry].buf = desc->des2; + priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, + STMMAC_CHAIN_MODE); + priv->hw->desc->set_tx_owner(desc); + len -= bmax; + i++; + } else { + desc->des2 = dma_map_single(priv->device, + (skb->data + bmax * i), len, + DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, desc->des2)) + return -1; + priv->tx_skbuff_dma[entry].buf = desc->des2; + priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, + STMMAC_CHAIN_MODE); + priv->hw->desc->set_tx_owner(desc); + len = 0; + } + } + return entry; +} + +static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc) +{ + unsigned int ret = 0; + + if ((enh_desc && (len > BUF_SIZE_8KiB)) || + (!enh_desc && (len > BUF_SIZE_2KiB))) { + ret = 1; + } + + return ret; +} + +static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr, + unsigned int size, unsigned int extend_desc) +{ + /* + * In chained mode the des3 points to the next element in the ring. + * The latest element has to point to the head. + */ + int i; + dma_addr_t dma_phy = phy_addr; + + if (extend_desc) { + struct dma_extended_desc *p = (struct dma_extended_desc *)des; + for (i = 0; i < (size - 1); i++) { + dma_phy += sizeof(struct dma_extended_desc); + p->basic.des3 = (unsigned int)dma_phy; + p++; + } + p->basic.des3 = (unsigned int)phy_addr; + + } else { + struct dma_desc *p = (struct dma_desc *)des; + for (i = 0; i < (size - 1); i++) { + dma_phy += sizeof(struct dma_desc); + p->des3 = (unsigned int)dma_phy; + p++; + } + p->des3 = (unsigned int)phy_addr; + } +} + +static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; + + if (priv->hwts_rx_en && !priv->extend_desc) + /* NOTE: Device will overwrite des3 with timestamp value if + * 1588-2002 time stamping is enabled, hence reinitialize it + * to keep explicit chaining in the descriptor. + */ + p->des3 = (unsigned int)(priv->dma_rx_phy + + (((priv->dirty_rx) + 1) % + priv->dma_rx_size) * + sizeof(struct dma_desc)); +} + +static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; + + if (priv->hw->desc->get_tx_ls(p) && !priv->extend_desc) + /* NOTE: Device will overwrite des3 with timestamp value if + * 1588-2002 time stamping is enabled, hence reinitialize it + * to keep explicit chaining in the descriptor. + */ + p->des3 = (unsigned int)(priv->dma_tx_phy + + (((priv->dirty_tx + 1) % + priv->dma_tx_size) * + sizeof(struct dma_desc))); +} + +const struct stmmac_mode_ops chain_mode_ops = { + .init = stmmac_init_dma_chain, + .is_jumbo_frm = stmmac_is_jumbo_frm, + .jumbo_frm = stmmac_jumbo_frm, + .refill_desc3 = stmmac_refill_desc3, + .clean_desc3 = stmmac_clean_desc3, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h new file mode 100644 index 000000000..623c6ed87 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -0,0 +1,480 @@ +/******************************************************************************* + STMMAC Common Header File + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __COMMON_H__ +#define __COMMON_H__ + +#include +#include +#include +#include +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#define STMMAC_VLAN_TAG_USED +#include +#endif + +#include "descs.h" +#include "mmc.h" + +/* Synopsys Core versions */ +#define DWMAC_CORE_3_40 0x34 +#define DWMAC_CORE_3_50 0x35 + +#undef FRAME_FILTER_DEBUG +/* #define FRAME_FILTER_DEBUG */ + +/* Extra statistic and debug information exposed by ethtool */ +struct stmmac_extra_stats { + /* Transmit errors */ + unsigned long tx_underflow ____cacheline_aligned; + unsigned long tx_carrier; + unsigned long tx_losscarrier; + unsigned long vlan_tag; + unsigned long tx_deferred; + unsigned long tx_vlan; + unsigned long tx_jabber; + unsigned long tx_frame_flushed; + unsigned long tx_payload_error; + unsigned long tx_ip_header_error; + /* Receive errors */ + unsigned long rx_desc; + unsigned long sa_filter_fail; + unsigned long overflow_error; + unsigned long ipc_csum_error; + unsigned long rx_collision; + unsigned long rx_crc; + unsigned long dribbling_bit; + unsigned long rx_length; + unsigned long rx_mii; + unsigned long rx_multicast; + unsigned long rx_gmac_overflow; + unsigned long rx_watchdog; + unsigned long da_rx_filter_fail; + unsigned long sa_rx_filter_fail; + unsigned long rx_missed_cntr; + unsigned long rx_overflow_cntr; + unsigned long rx_vlan; + /* Tx/Rx IRQ error info */ + unsigned long tx_undeflow_irq; + unsigned long tx_process_stopped_irq; + unsigned long tx_jabber_irq; + unsigned long rx_overflow_irq; + unsigned long rx_buf_unav_irq; + unsigned long rx_process_stopped_irq; + unsigned long rx_watchdog_irq; + unsigned long tx_early_irq; + unsigned long fatal_bus_error_irq; + /* Tx/Rx IRQ Events */ + unsigned long rx_early_irq; + unsigned long threshold; + unsigned long tx_pkt_n; + unsigned long rx_pkt_n; + unsigned long normal_irq_n; + unsigned long rx_normal_irq_n; + unsigned long napi_poll; + unsigned long tx_normal_irq_n; + unsigned long tx_clean; + unsigned long tx_reset_ic_bit; + unsigned long irq_receive_pmt_irq_n; + /* MMC info */ + unsigned long mmc_tx_irq_n; + unsigned long mmc_rx_irq_n; + unsigned long mmc_rx_csum_offload_irq_n; + /* EEE */ + unsigned long irq_tx_path_in_lpi_mode_n; + unsigned long irq_tx_path_exit_lpi_mode_n; + unsigned long irq_rx_path_in_lpi_mode_n; + unsigned long irq_rx_path_exit_lpi_mode_n; + unsigned long phy_eee_wakeup_error_n; + /* Extended RDES status */ + unsigned long ip_hdr_err; + unsigned long ip_payload_err; + unsigned long ip_csum_bypassed; + unsigned long ipv4_pkt_rcvd; + unsigned long ipv6_pkt_rcvd; + unsigned long rx_msg_type_ext_no_ptp; + unsigned long rx_msg_type_sync; + unsigned long rx_msg_type_follow_up; + unsigned long rx_msg_type_delay_req; + unsigned long rx_msg_type_delay_resp; + unsigned long rx_msg_type_pdelay_req; + unsigned long rx_msg_type_pdelay_resp; + unsigned long rx_msg_type_pdelay_follow_up; + unsigned long ptp_frame_type; + unsigned long ptp_ver; + unsigned long timestamp_dropped; + unsigned long av_pkt_rcvd; + unsigned long av_tagged_pkt_rcvd; + unsigned long vlan_tag_priority_val; + unsigned long l3_filter_match; + unsigned long l4_filter_match; + unsigned long l3_l4_filter_no_match; + /* PCS */ + unsigned long irq_pcs_ane_n; + unsigned long irq_pcs_link_n; + unsigned long irq_rgmii_n; + unsigned long pcs_link; + unsigned long pcs_duplex; + unsigned long pcs_speed; +}; + +/* CSR Frequency Access Defines*/ +#define CSR_F_35M 35000000 +#define CSR_F_60M 60000000 +#define CSR_F_100M 100000000 +#define CSR_F_150M 150000000 +#define CSR_F_250M 250000000 +#define CSR_F_300M 300000000 + +#define MAC_CSR_H_FRQ_MASK 0x20 + +#define HASH_TABLE_SIZE 64 +#define PAUSE_TIME 0xffff + +/* Flow Control defines */ +#define FLOW_OFF 0 +#define FLOW_RX 1 +#define FLOW_TX 2 +#define FLOW_AUTO (FLOW_TX | FLOW_RX) + +/* PCS defines */ +#define STMMAC_PCS_RGMII (1 << 0) +#define STMMAC_PCS_SGMII (1 << 1) +#define STMMAC_PCS_TBI (1 << 2) +#define STMMAC_PCS_RTBI (1 << 3) + +#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */ + +/* DAM HW feature register fields */ +#define DMA_HW_FEAT_MIISEL 0x00000001 /* 10/100 Mbps Support */ +#define DMA_HW_FEAT_GMIISEL 0x00000002 /* 1000 Mbps Support */ +#define DMA_HW_FEAT_HDSEL 0x00000004 /* Half-Duplex Support */ +#define DMA_HW_FEAT_EXTHASHEN 0x00000008 /* Expanded DA Hash Filter */ +#define DMA_HW_FEAT_HASHSEL 0x00000010 /* HASH Filter */ +#define DMA_HW_FEAT_ADDMAC 0x00000020 /* Multiple MAC Addr Reg */ +#define DMA_HW_FEAT_PCSSEL 0x00000040 /* PCS registers */ +#define DMA_HW_FEAT_L3L4FLTREN 0x00000080 /* Layer 3 & Layer 4 Feature */ +#define DMA_HW_FEAT_SMASEL 0x00000100 /* SMA(MDIO) Interface */ +#define DMA_HW_FEAT_RWKSEL 0x00000200 /* PMT Remote Wakeup */ +#define DMA_HW_FEAT_MGKSEL 0x00000400 /* PMT Magic Packet */ +#define DMA_HW_FEAT_MMCSEL 0x00000800 /* RMON Module */ +#define DMA_HW_FEAT_TSVER1SEL 0x00001000 /* Only IEEE 1588-2002 */ +#define DMA_HW_FEAT_TSVER2SEL 0x00002000 /* IEEE 1588-2008 PTPv2 */ +#define DMA_HW_FEAT_EEESEL 0x00004000 /* Energy Efficient Ethernet */ +#define DMA_HW_FEAT_AVSEL 0x00008000 /* AV Feature */ +#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* Checksum Offload in Tx */ +#define DMA_HW_FEAT_RXTYP1COE 0x00020000 /* IP COE (Type 1) in Rx */ +#define DMA_HW_FEAT_RXTYP2COE 0x00040000 /* IP COE (Type 2) in Rx */ +#define DMA_HW_FEAT_RXFIFOSIZE 0x00080000 /* Rx FIFO > 2048 Bytes */ +#define DMA_HW_FEAT_RXCHCNT 0x00300000 /* No. additional Rx Channels */ +#define DMA_HW_FEAT_TXCHCNT 0x00c00000 /* No. additional Tx Channels */ +#define DMA_HW_FEAT_ENHDESSEL 0x01000000 /* Alternate Descriptor */ +/* Timestamping with Internal System Time */ +#define DMA_HW_FEAT_INTTSEN 0x02000000 +#define DMA_HW_FEAT_FLEXIPPSEN 0x04000000 /* Flexible PPS Output */ +#define DMA_HW_FEAT_SAVLANINS 0x08000000 /* Source Addr or VLAN */ +#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */ +#define DEFAULT_DMA_PBL 8 + +/* Max/Min RI Watchdog Timer count value */ +#define MAX_DMA_RIWT 0xff +#define MIN_DMA_RIWT 0x20 +/* Tx coalesce parameters */ +#define STMMAC_COAL_TX_TIMER 40000 +#define STMMAC_MAX_COAL_TX_TICK 100000 +#define STMMAC_TX_MAX_FRAMES 256 +#define STMMAC_TX_FRAMES 64 + +/* Rx IPC status */ +enum rx_frame_status { + good_frame = 0, + discard_frame = 1, + csum_none = 2, + llc_snap = 4, +}; + +enum dma_irq_status { + tx_hard_error = 0x1, + tx_hard_error_bump_tc = 0x2, + handle_rx = 0x4, + handle_tx = 0x8, +}; + +/* EEE and LPI defines */ +#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0) +#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1) +#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2) +#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3) + +#define CORE_PCS_ANE_COMPLETE (1 << 5) +#define CORE_PCS_LINK_STATUS (1 << 6) +#define CORE_RGMII_IRQ (1 << 7) + +/* Physical Coding Sublayer */ +struct rgmii_adv { + unsigned int pause; + unsigned int duplex; + unsigned int lp_pause; + unsigned int lp_duplex; +}; + +#define STMMAC_PCS_PAUSE 1 +#define STMMAC_PCS_ASYM_PAUSE 2 + +/* DMA HW capabilities */ +struct dma_features { + unsigned int mbps_10_100; + unsigned int mbps_1000; + unsigned int half_duplex; + unsigned int hash_filter; + unsigned int multi_addr; + unsigned int pcs; + unsigned int sma_mdio; + unsigned int pmt_remote_wake_up; + unsigned int pmt_magic_frame; + unsigned int rmon; + /* IEEE 1588-2002 */ + unsigned int time_stamp; + /* IEEE 1588-2008 */ + unsigned int atime_stamp; + /* 802.3az - Energy-Efficient Ethernet (EEE) */ + unsigned int eee; + unsigned int av; + /* TX and RX csum */ + unsigned int tx_coe; + unsigned int rx_coe_type1; + unsigned int rx_coe_type2; + unsigned int rxfifo_over_2048; + /* TX and RX number of channels */ + unsigned int number_rx_channel; + unsigned int number_tx_channel; + /* Alternate (enhanced) DESC mode */ + unsigned int enh_desc; +}; + +/* GMAC TX FIFO is 8K, Rx FIFO is 16K */ +#define BUF_SIZE_16KiB 16384 +#define BUF_SIZE_8KiB 8192 +#define BUF_SIZE_4KiB 4096 +#define BUF_SIZE_2KiB 2048 + +/* Power Down and WOL */ +#define PMT_NOT_SUPPORTED 0 +#define PMT_SUPPORTED 1 + +/* Common MAC defines */ +#define MAC_CTRL_REG 0x00000000 /* MAC Control */ +#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ +#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */ + +/* Default LPI timers */ +#define STMMAC_DEFAULT_LIT_LS 0x3E8 +#define STMMAC_DEFAULT_TWT_LS 0x1E + +#define STMMAC_CHAIN_MODE 0x1 +#define STMMAC_RING_MODE 0x2 + +#define JUMBO_LEN 9000 + +/* Descriptors helpers */ +struct stmmac_desc_ops { + /* DMA RX descriptor ring initialization */ + void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode, + int end); + /* DMA TX descriptor ring initialization */ + void (*init_tx_desc) (struct dma_desc *p, int mode, int end); + + /* Invoked by the xmit function to prepare the tx descriptor */ + void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, + int csum_flag, int mode); + /* Set/get the owner of the descriptor */ + void (*set_tx_owner) (struct dma_desc *p); + int (*get_tx_owner) (struct dma_desc *p); + /* Invoked by the xmit function to close the tx descriptor */ + void (*close_tx_desc) (struct dma_desc *p); + /* Clean the tx descriptor as soon as the tx irq is received */ + void (*release_tx_desc) (struct dma_desc *p, int mode); + /* Clear interrupt on tx frame completion. When this bit is + * set an interrupt happens as soon as the frame is transmitted */ + void (*clear_tx_ic) (struct dma_desc *p); + /* Last tx segment reports the transmit status */ + int (*get_tx_ls) (struct dma_desc *p); + /* Return the transmit status looking at the TDES1 */ + int (*tx_status) (void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, void __iomem *ioaddr); + /* Get the buffer size from the descriptor */ + int (*get_tx_len) (struct dma_desc *p); + /* Handle extra events on specific interrupts hw dependent */ + int (*get_rx_owner) (struct dma_desc *p); + void (*set_rx_owner) (struct dma_desc *p); + /* Get the receive frame size */ + int (*get_rx_frame_len) (struct dma_desc *p, int rx_coe_type); + /* Return the reception status looking at the RDES1 */ + int (*rx_status) (void *data, struct stmmac_extra_stats *x, + struct dma_desc *p); + void (*rx_extended_status) (void *data, struct stmmac_extra_stats *x, + struct dma_extended_desc *p); + /* Set tx timestamp enable bit */ + void (*enable_tx_timestamp) (struct dma_desc *p); + /* get tx timestamp status */ + int (*get_tx_timestamp_status) (struct dma_desc *p); + /* get timestamp value */ + u64(*get_timestamp) (void *desc, u32 ats); + /* get rx timestamp status */ + int (*get_rx_timestamp_status) (void *desc, u32 ats); +}; + +extern const struct stmmac_desc_ops enh_desc_ops; +extern const struct stmmac_desc_ops ndesc_ops; + +/* Specific DMA helpers */ +struct stmmac_dma_ops { + /* DMA core initialization */ + int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb, + int burst_len, u32 dma_tx, u32 dma_rx, int atds); + /* Dump DMA registers */ + void (*dump_regs) (void __iomem *ioaddr); + /* Set tx/rx threshold in the csr6 register + * An invalid value enables the store-and-forward mode */ + void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, + int rxfifosz); + /* To track extra statistic (if supported) */ + void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, + void __iomem *ioaddr); + void (*enable_dma_transmission) (void __iomem *ioaddr); + void (*enable_dma_irq) (void __iomem *ioaddr); + void (*disable_dma_irq) (void __iomem *ioaddr); + void (*start_tx) (void __iomem *ioaddr); + void (*stop_tx) (void __iomem *ioaddr); + void (*start_rx) (void __iomem *ioaddr); + void (*stop_rx) (void __iomem *ioaddr); + int (*dma_interrupt) (void __iomem *ioaddr, + struct stmmac_extra_stats *x); + /* If supported then get the optional core features */ + unsigned int (*get_hw_feature) (void __iomem *ioaddr); + /* Program the HW RX Watchdog */ + void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt); +}; + +struct mac_device_info; + +/* Helpers to program the MAC core */ +struct stmmac_ops { + /* MAC core initialization */ + void (*core_init)(struct mac_device_info *hw, int mtu); + /* Enable and verify that the IPC module is supported */ + int (*rx_ipc)(struct mac_device_info *hw); + /* Dump MAC registers */ + void (*dump_regs)(struct mac_device_info *hw); + /* Handle extra events on specific interrupts hw dependent */ + int (*host_irq_status)(struct mac_device_info *hw, + struct stmmac_extra_stats *x); + /* Multicast filter setting */ + void (*set_filter)(struct mac_device_info *hw, struct net_device *dev); + /* Flow control setting */ + void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex, + unsigned int fc, unsigned int pause_time); + /* Set power management mode (e.g. magic frame) */ + void (*pmt)(struct mac_device_info *hw, unsigned long mode); + /* Set/Get Unicast MAC addresses */ + void (*set_umac_addr)(struct mac_device_info *hw, unsigned char *addr, + unsigned int reg_n); + void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr, + unsigned int reg_n); + void (*set_eee_mode)(struct mac_device_info *hw); + void (*reset_eee_mode)(struct mac_device_info *hw); + void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); + void (*set_eee_pls)(struct mac_device_info *hw, int link); + void (*ctrl_ane)(struct mac_device_info *hw, bool restart); + void (*get_adv)(struct mac_device_info *hw, struct rgmii_adv *adv); +}; + +/* PTP and HW Timer helpers */ +struct stmmac_hwtimestamp { + void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); + void (*config_sub_second_increment) (void __iomem *ioaddr); + int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec); + int (*config_addend) (void __iomem *ioaddr, u32 addend); + int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, + int add_sub); + u64(*get_systime) (void __iomem *ioaddr); +}; + +extern const struct stmmac_hwtimestamp stmmac_ptp; + +struct mac_link { + int port; + int duplex; + int speed; +}; + +struct mii_regs { + unsigned int addr; /* MII Address */ + unsigned int data; /* MII Data */ +}; + +/* Helpers to manage the descriptors for chain and ring modes */ +struct stmmac_mode_ops { + void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, + unsigned int extend_desc); + unsigned int (*is_jumbo_frm) (int len, int ehn_desc); + int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum); + int (*set_16kib_bfsize)(int mtu); + void (*init_desc3)(struct dma_desc *p); + void (*refill_desc3) (void *priv, struct dma_desc *p); + void (*clean_desc3) (void *priv, struct dma_desc *p); +}; + +struct mac_device_info { + const struct stmmac_ops *mac; + const struct stmmac_desc_ops *desc; + const struct stmmac_dma_ops *dma; + const struct stmmac_mode_ops *mode; + const struct stmmac_hwtimestamp *ptp; + struct mii_regs mii; /* MII register Addresses */ + struct mac_link link; + unsigned int synopsys_uid; + void __iomem *pcsr; /* vpointer to device CSRs */ + int multicast_filter_bins; + int unicast_filter_entries; + int mcast_bits_log2; + unsigned int rx_csum; +}; + +struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, + int perfect_uc_entries); +struct mac_device_info *dwmac100_setup(void __iomem *ioaddr); + +void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], + unsigned int high, unsigned int low); +void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int high, unsigned int low); + +void stmmac_set_mac(void __iomem *ioaddr, bool enable); + +void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); +extern const struct stmmac_mode_ops ring_mode_ops; +extern const struct stmmac_mode_ops chain_mode_ops; + +#endif /* __COMMON_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h new file mode 100644 index 000000000..ad3996038 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/descs.h @@ -0,0 +1,219 @@ +/******************************************************************************* + Header File to describe the DMA descriptors. + Enhanced descriptors have been in case of DWMAC1000 Cores. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __DESCS_H__ +#define __DESCS_H__ + +/* Basic descriptor structure for normal and alternate descriptors */ +struct dma_desc { + /* Receive descriptor */ + union { + struct { + /* RDES0 */ + u32 payload_csum_error:1; + u32 crc_error:1; + u32 dribbling:1; + u32 mii_error:1; + u32 receive_watchdog:1; + u32 frame_type:1; + u32 collision:1; + u32 ipc_csum_error:1; + u32 last_descriptor:1; + u32 first_descriptor:1; + u32 vlan_tag:1; + u32 overflow_error:1; + u32 length_error:1; + u32 sa_filter_fail:1; + u32 descriptor_error:1; + u32 error_summary:1; + u32 frame_length:14; + u32 da_filter_fail:1; + u32 own:1; + /* RDES1 */ + u32 buffer1_size:11; + u32 buffer2_size:11; + u32 reserved1:2; + u32 second_address_chained:1; + u32 end_ring:1; + u32 reserved2:5; + u32 disable_ic:1; + + } rx; + struct { + /* RDES0 */ + u32 rx_mac_addr:1; + u32 crc_error:1; + u32 dribbling:1; + u32 error_gmii:1; + u32 receive_watchdog:1; + u32 frame_type:1; + u32 late_collision:1; + u32 ipc_csum_error:1; + u32 last_descriptor:1; + u32 first_descriptor:1; + u32 vlan_tag:1; + u32 overflow_error:1; + u32 length_error:1; + u32 sa_filter_fail:1; + u32 descriptor_error:1; + u32 error_summary:1; + u32 frame_length:14; + u32 da_filter_fail:1; + u32 own:1; + /* RDES1 */ + u32 buffer1_size:13; + u32 reserved1:1; + u32 second_address_chained:1; + u32 end_ring:1; + u32 buffer2_size:13; + u32 reserved2:2; + u32 disable_ic:1; + } erx; /* -- enhanced -- */ + + /* Transmit descriptor */ + struct { + /* TDES0 */ + u32 deferred:1; + u32 underflow_error:1; + u32 excessive_deferral:1; + u32 collision_count:4; + u32 vlan_frame:1; + u32 excessive_collisions:1; + u32 late_collision:1; + u32 no_carrier:1; + u32 loss_carrier:1; + u32 payload_error:1; + u32 frame_flushed:1; + u32 jabber_timeout:1; + u32 error_summary:1; + u32 ip_header_error:1; + u32 time_stamp_status:1; + u32 reserved1:13; + u32 own:1; + /* TDES1 */ + u32 buffer1_size:11; + u32 buffer2_size:11; + u32 time_stamp_enable:1; + u32 disable_padding:1; + u32 second_address_chained:1; + u32 end_ring:1; + u32 crc_disable:1; + u32 checksum_insertion:2; + u32 first_segment:1; + u32 last_segment:1; + u32 interrupt:1; + } tx; + struct { + /* TDES0 */ + u32 deferred:1; + u32 underflow_error:1; + u32 excessive_deferral:1; + u32 collision_count:4; + u32 vlan_frame:1; + u32 excessive_collisions:1; + u32 late_collision:1; + u32 no_carrier:1; + u32 loss_carrier:1; + u32 payload_error:1; + u32 frame_flushed:1; + u32 jabber_timeout:1; + u32 error_summary:1; + u32 ip_header_error:1; + u32 time_stamp_status:1; + u32 reserved1:2; + u32 second_address_chained:1; + u32 end_ring:1; + u32 checksum_insertion:2; + u32 reserved2:1; + u32 time_stamp_enable:1; + u32 disable_padding:1; + u32 crc_disable:1; + u32 first_segment:1; + u32 last_segment:1; + u32 interrupt:1; + u32 own:1; + /* TDES1 */ + u32 buffer1_size:13; + u32 reserved3:3; + u32 buffer2_size:13; + u32 reserved4:3; + } etx; /* -- enhanced -- */ + } des01; + unsigned int des2; + unsigned int des3; +}; + +/* Extended descriptor structure (supported by new SYNP GMAC generations) */ +struct dma_extended_desc { + struct dma_desc basic; + union { + struct { + u32 ip_payload_type:3; + u32 ip_hdr_err:1; + u32 ip_payload_err:1; + u32 ip_csum_bypassed:1; + u32 ipv4_pkt_rcvd:1; + u32 ipv6_pkt_rcvd:1; + u32 msg_type:4; + u32 ptp_frame_type:1; + u32 ptp_ver:1; + u32 timestamp_dropped:1; + u32 reserved:1; + u32 av_pkt_rcvd:1; + u32 av_tagged_pkt_rcvd:1; + u32 vlan_tag_priority_val:3; + u32 reserved3:3; + u32 l3_filter_match:1; + u32 l4_filter_match:1; + u32 l3_l4_filter_no_match:2; + u32 reserved4:4; + } erx; + struct { + u32 reserved; + } etx; + } des4; + unsigned int des5; /* Reserved */ + unsigned int des6; /* Tx/Rx Timestamp Low */ + unsigned int des7; /* Tx/Rx Timestamp High */ +}; + +/* Transmit checksum insertion control */ +enum tdes_csum_insertion { + cic_disabled = 0, /* Checksum Insertion Control */ + cic_only_ip = 1, /* Only IP header */ + /* IP header but pseudoheader is not calculated */ + cic_no_pseudoheader = 2, + cic_full = 3, /* IP header and pseudoheader */ +}; + +/* Extended RDES4 definitions */ +#define RDES_EXT_NO_PTP 0 +#define RDES_EXT_SYNC 0x1 +#define RDES_EXT_FOLLOW_UP 0x2 +#define RDES_EXT_DELAY_REQ 0x3 +#define RDES_EXT_DELAY_RESP 0x4 +#define RDES_EXT_PDELAY_REQ 0x5 +#define RDES_EXT_PDELAY_RESP 0x6 +#define RDES_EXT_PDELAY_FOLLOW_UP 0x7 + +#endif /* __DESCS_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h new file mode 100644 index 000000000..6f2cc78c5 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h @@ -0,0 +1,134 @@ +/******************************************************************************* + Header File to describe Normal/enhanced descriptor functions used for RING + and CHAINED modes. + + Copyright(C) 2011 STMicroelectronics Ltd + + It defines all the functions used to handle the normal/enhanced + descriptors in case of the DMA is configured to work in chained or + in ring mode. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __DESC_COM_H__ +#define __DESC_COM_H__ + +/* Specific functions used for Ring mode */ + +/* Enhanced descriptors */ +static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) +{ + p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1; + if (end) + p->des01.erx.end_ring = 1; +} + +static inline void ehn_desc_tx_set_on_ring(struct dma_desc *p, int end) +{ + if (end) + p->des01.etx.end_ring = 1; +} + +static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int ter) +{ + p->des01.etx.end_ring = ter; +} + +static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len) +{ + if (unlikely(len > BUF_SIZE_4KiB)) { + p->des01.etx.buffer1_size = BUF_SIZE_4KiB; + p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB; + } else + p->des01.etx.buffer1_size = len; +} + +/* Normal descriptors */ +static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end) +{ + p->des01.rx.buffer2_size = BUF_SIZE_2KiB - 1; + if (end) + p->des01.rx.end_ring = 1; +} + +static inline void ndesc_tx_set_on_ring(struct dma_desc *p, int end) +{ + if (end) + p->des01.tx.end_ring = 1; +} + +static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int ter) +{ + p->des01.tx.end_ring = ter; +} + +static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len) +{ + if (unlikely(len > BUF_SIZE_2KiB)) { + p->des01.etx.buffer1_size = BUF_SIZE_2KiB - 1; + p->des01.etx.buffer2_size = len - p->des01.etx.buffer1_size; + } else + p->des01.tx.buffer1_size = len; +} + +/* Specific functions used for Chain mode */ + +/* Enhanced descriptors */ +static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p, int end) +{ + p->des01.erx.second_address_chained = 1; +} + +static inline void ehn_desc_tx_set_on_chain(struct dma_desc *p, int end) +{ + p->des01.etx.second_address_chained = 1; +} + +static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p, int ter) +{ + p->des01.etx.second_address_chained = 1; +} + +static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len) +{ + p->des01.etx.buffer1_size = len; +} + +/* Normal descriptors */ +static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end) +{ + p->des01.rx.second_address_chained = 1; +} + +static inline void ndesc_tx_set_on_chain(struct dma_desc *p, int ring_size) +{ + p->des01.tx.second_address_chained = 1; +} + +static inline void ndesc_end_tx_desc_on_chain(struct dma_desc *p, int ter) +{ + p->des01.tx.second_address_chained = 1; +} + +static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len) +{ + p->des01.tx.buffer1_size = len; +} +#endif /* __DESC_COM_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c new file mode 100644 index 000000000..cca028d63 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c @@ -0,0 +1,69 @@ +/* + * Amlogic Meson DWMAC glue layer + * + * Copyright (C) 2014 Beniamino Galvani + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include + +#include "stmmac_platform.h" + +#define ETHMAC_SPEED_100 BIT(1) + +struct meson_dwmac { + struct device *dev; + void __iomem *reg; +}; + +static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed) +{ + struct meson_dwmac *dwmac = priv; + unsigned int val; + + val = readl(dwmac->reg); + + switch (speed) { + case SPEED_10: + val &= ~ETHMAC_SPEED_100; + break; + case SPEED_100: + val |= ETHMAC_SPEED_100; + break; + } + + writel(val, dwmac->reg); +} + +static void *meson6_dwmac_setup(struct platform_device *pdev) +{ + struct meson_dwmac *dwmac; + struct resource *res; + + dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); + if (!dwmac) + return ERR_PTR(-ENOMEM); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + dwmac->reg = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(dwmac->reg)) + return ERR_CAST(dwmac->reg); + + return dwmac; +} + +const struct stmmac_of_data meson6_dwmac_data = { + .setup = meson6_dwmac_setup, + .fix_mac_speed = meson6_dwmac_fix_mac_speed, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c new file mode 100644 index 000000000..6249a4ec0 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -0,0 +1,437 @@ +/** + * dwmac-rk.c - Rockchip RK3288 DWMAC specific glue layer + * + * Copyright (C) 2014 Chen-Zhi (Roger Chen) + * + * Chen-Zhi (Roger Chen) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct rk_priv_data { + struct platform_device *pdev; + int phy_iface; + struct regulator *regulator; + + bool clk_enabled; + bool clock_input; + + struct clk *clk_mac; + struct clk *clk_mac_pll; + struct clk *gmac_clkin; + struct clk *mac_clk_rx; + struct clk *mac_clk_tx; + struct clk *clk_mac_ref; + struct clk *clk_mac_refout; + struct clk *aclk_mac; + struct clk *pclk_mac; + + int tx_delay; + int rx_delay; + + struct regmap *grf; +}; + +#define HIWORD_UPDATE(val, mask, shift) \ + ((val) << (shift) | (mask) << ((shift) + 16)) + +#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16)) +#define GRF_CLR_BIT(nr) (BIT(nr+16)) + +#define RK3288_GRF_SOC_CON1 0x0248 +#define RK3288_GRF_SOC_CON3 0x0250 +#define RK3288_GRF_GPIO3D_E 0x01ec +#define RK3288_GRF_GPIO4A_E 0x01f0 +#define RK3288_GRF_GPIO4B_E 0x01f4 + +/*RK3288_GRF_SOC_CON1*/ +#define GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8)) +#define GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8)) +#define GMAC_FLOW_CTRL GRF_BIT(9) +#define GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9) +#define GMAC_SPEED_10M GRF_CLR_BIT(10) +#define GMAC_SPEED_100M GRF_BIT(10) +#define GMAC_RMII_CLK_25M GRF_BIT(11) +#define GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11) +#define GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13)) +#define GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13)) +#define GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13)) +#define GMAC_RMII_MODE GRF_BIT(14) +#define GMAC_RMII_MODE_CLR GRF_CLR_BIT(14) + +/*RK3288_GRF_SOC_CON3*/ +#define GMAC_TXCLK_DLY_ENABLE GRF_BIT(14) +#define GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14) +#define GMAC_RXCLK_DLY_ENABLE GRF_BIT(15) +#define GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15) +#define GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7) +#define GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) + +static void set_to_rgmii(struct rk_priv_data *bsp_priv, + int tx_delay, int rx_delay) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); + return; + } + + regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, + GMAC_PHY_INTF_SEL_RGMII | GMAC_RMII_MODE_CLR); + regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3, + GMAC_RXCLK_DLY_ENABLE | GMAC_TXCLK_DLY_ENABLE | + GMAC_CLK_RX_DL_CFG(rx_delay) | + GMAC_CLK_TX_DL_CFG(tx_delay)); +} + +static void set_to_rmii(struct rk_priv_data *bsp_priv) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); + return; + } + + regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, + GMAC_PHY_INTF_SEL_RMII | GMAC_RMII_MODE); +} + +static void set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); + return; + } + + if (speed == 10) + regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_2_5M); + else if (speed == 100) + regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_25M); + else if (speed == 1000) + regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, GMAC_CLK_125M); + else + dev_err(dev, "unknown speed value for RGMII! speed=%d", speed); +} + +static void set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); + return; + } + + if (speed == 10) { + regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, + GMAC_RMII_CLK_2_5M | GMAC_SPEED_10M); + } else if (speed == 100) { + regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, + GMAC_RMII_CLK_25M | GMAC_SPEED_100M); + } else { + dev_err(dev, "unknown speed value for RMII! speed=%d", speed); + } +} + +static int gmac_clk_init(struct rk_priv_data *bsp_priv) +{ + struct device *dev = &bsp_priv->pdev->dev; + + bsp_priv->clk_enabled = false; + + bsp_priv->mac_clk_rx = devm_clk_get(dev, "mac_clk_rx"); + if (IS_ERR(bsp_priv->mac_clk_rx)) + dev_err(dev, "%s: cannot get clock %s\n", + __func__, "mac_clk_rx"); + + bsp_priv->mac_clk_tx = devm_clk_get(dev, "mac_clk_tx"); + if (IS_ERR(bsp_priv->mac_clk_tx)) + dev_err(dev, "%s: cannot get clock %s\n", + __func__, "mac_clk_tx"); + + bsp_priv->aclk_mac = devm_clk_get(dev, "aclk_mac"); + if (IS_ERR(bsp_priv->aclk_mac)) + dev_err(dev, "%s: cannot get clock %s\n", + __func__, "aclk_mac"); + + bsp_priv->pclk_mac = devm_clk_get(dev, "pclk_mac"); + if (IS_ERR(bsp_priv->pclk_mac)) + dev_err(dev, "%s: cannot get clock %s\n", + __func__, "pclk_mac"); + + bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth"); + if (IS_ERR(bsp_priv->clk_mac)) + dev_err(dev, "%s: cannot get clock %s\n", + __func__, "stmmaceth"); + + if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) { + bsp_priv->clk_mac_ref = devm_clk_get(dev, "clk_mac_ref"); + if (IS_ERR(bsp_priv->clk_mac_ref)) + dev_err(dev, "%s: cannot get clock %s\n", + __func__, "clk_mac_ref"); + + if (!bsp_priv->clock_input) { + bsp_priv->clk_mac_refout = + devm_clk_get(dev, "clk_mac_refout"); + if (IS_ERR(bsp_priv->clk_mac_refout)) + dev_err(dev, "%s: cannot get clock %s\n", + __func__, "clk_mac_refout"); + } + } + + if (bsp_priv->clock_input) { + dev_info(dev, "%s: clock input from PHY\n", __func__); + } else { + if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) + clk_set_rate(bsp_priv->clk_mac_pll, 50000000); + } + + return 0; +} + +static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) +{ + int phy_iface = phy_iface = bsp_priv->phy_iface; + + if (enable) { + if (!bsp_priv->clk_enabled) { + if (phy_iface == PHY_INTERFACE_MODE_RMII) { + if (!IS_ERR(bsp_priv->mac_clk_rx)) + clk_prepare_enable( + bsp_priv->mac_clk_rx); + + if (!IS_ERR(bsp_priv->clk_mac_ref)) + clk_prepare_enable( + bsp_priv->clk_mac_ref); + + if (!IS_ERR(bsp_priv->clk_mac_refout)) + clk_prepare_enable( + bsp_priv->clk_mac_refout); + } + + if (!IS_ERR(bsp_priv->aclk_mac)) + clk_prepare_enable(bsp_priv->aclk_mac); + + if (!IS_ERR(bsp_priv->pclk_mac)) + clk_prepare_enable(bsp_priv->pclk_mac); + + if (!IS_ERR(bsp_priv->mac_clk_tx)) + clk_prepare_enable(bsp_priv->mac_clk_tx); + + /** + * if (!IS_ERR(bsp_priv->clk_mac)) + * clk_prepare_enable(bsp_priv->clk_mac); + */ + mdelay(5); + bsp_priv->clk_enabled = true; + } + } else { + if (bsp_priv->clk_enabled) { + if (phy_iface == PHY_INTERFACE_MODE_RMII) { + if (!IS_ERR(bsp_priv->mac_clk_rx)) + clk_disable_unprepare( + bsp_priv->mac_clk_rx); + + if (!IS_ERR(bsp_priv->clk_mac_ref)) + clk_disable_unprepare( + bsp_priv->clk_mac_ref); + + if (!IS_ERR(bsp_priv->clk_mac_refout)) + clk_disable_unprepare( + bsp_priv->clk_mac_refout); + } + + if (!IS_ERR(bsp_priv->aclk_mac)) + clk_disable_unprepare(bsp_priv->aclk_mac); + + if (!IS_ERR(bsp_priv->pclk_mac)) + clk_disable_unprepare(bsp_priv->pclk_mac); + + if (!IS_ERR(bsp_priv->mac_clk_tx)) + clk_disable_unprepare(bsp_priv->mac_clk_tx); + /** + * if (!IS_ERR(bsp_priv->clk_mac)) + * clk_disable_unprepare(bsp_priv->clk_mac); + */ + bsp_priv->clk_enabled = false; + } + } + + return 0; +} + +static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable) +{ + struct regulator *ldo = bsp_priv->regulator; + int ret; + struct device *dev = &bsp_priv->pdev->dev; + + if (!ldo) { + dev_err(dev, "%s: no regulator found\n", __func__); + return -1; + } + + if (enable) { + ret = regulator_enable(ldo); + if (ret) + dev_err(dev, "%s: fail to enable phy-supply\n", + __func__); + } else { + ret = regulator_disable(ldo); + if (ret) + dev_err(dev, "%s: fail to disable phy-supply\n", + __func__); + } + + return 0; +} + +static void *rk_gmac_setup(struct platform_device *pdev) +{ + struct rk_priv_data *bsp_priv; + struct device *dev = &pdev->dev; + int ret; + const char *strings = NULL; + int value; + + bsp_priv = devm_kzalloc(dev, sizeof(*bsp_priv), GFP_KERNEL); + if (!bsp_priv) + return ERR_PTR(-ENOMEM); + + bsp_priv->phy_iface = of_get_phy_mode(dev->of_node); + + bsp_priv->regulator = devm_regulator_get_optional(dev, "phy"); + if (IS_ERR(bsp_priv->regulator)) { + if (PTR_ERR(bsp_priv->regulator) == -EPROBE_DEFER) { + dev_err(dev, "phy regulator is not available yet, deferred probing\n"); + return ERR_PTR(-EPROBE_DEFER); + } + dev_err(dev, "no regulator found\n"); + bsp_priv->regulator = NULL; + } + + ret = of_property_read_string(dev->of_node, "clock_in_out", &strings); + if (ret) { + dev_err(dev, "%s: Can not read property: clock_in_out.\n", + __func__); + bsp_priv->clock_input = true; + } else { + dev_info(dev, "%s: clock input or output? (%s).\n", + __func__, strings); + if (!strcmp(strings, "input")) + bsp_priv->clock_input = true; + else + bsp_priv->clock_input = false; + } + + ret = of_property_read_u32(dev->of_node, "tx_delay", &value); + if (ret) { + bsp_priv->tx_delay = 0x30; + dev_err(dev, "%s: Can not read property: tx_delay.", __func__); + dev_err(dev, "%s: set tx_delay to 0x%x\n", + __func__, bsp_priv->tx_delay); + } else { + dev_info(dev, "%s: TX delay(0x%x).\n", __func__, value); + bsp_priv->tx_delay = value; + } + + ret = of_property_read_u32(dev->of_node, "rx_delay", &value); + if (ret) { + bsp_priv->rx_delay = 0x10; + dev_err(dev, "%s: Can not read property: rx_delay.", __func__); + dev_err(dev, "%s: set rx_delay to 0x%x\n", + __func__, bsp_priv->rx_delay); + } else { + dev_info(dev, "%s: RX delay(0x%x).\n", __func__, value); + bsp_priv->rx_delay = value; + } + + bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node, + "rockchip,grf"); + bsp_priv->pdev = pdev; + + /*rmii or rgmii*/ + if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) { + dev_info(dev, "%s: init for RGMII\n", __func__); + set_to_rgmii(bsp_priv, bsp_priv->tx_delay, bsp_priv->rx_delay); + } else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) { + dev_info(dev, "%s: init for RMII\n", __func__); + set_to_rmii(bsp_priv); + } else { + dev_err(dev, "%s: NO interface defined!\n", __func__); + } + + gmac_clk_init(bsp_priv); + + return bsp_priv; +} + +static int rk_gmac_init(struct platform_device *pdev, void *priv) +{ + struct rk_priv_data *bsp_priv = priv; + int ret; + + ret = phy_power_on(bsp_priv, true); + if (ret) + return ret; + + ret = gmac_clk_enable(bsp_priv, true); + if (ret) + return ret; + + return 0; +} + +static void rk_gmac_exit(struct platform_device *pdev, void *priv) +{ + struct rk_priv_data *gmac = priv; + + phy_power_on(gmac, false); + gmac_clk_enable(gmac, false); +} + +static void rk_fix_speed(void *priv, unsigned int speed) +{ + struct rk_priv_data *bsp_priv = priv; + struct device *dev = &bsp_priv->pdev->dev; + + if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) + set_rgmii_speed(bsp_priv, speed); + else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) + set_rmii_speed(bsp_priv, speed); + else + dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface); +} + +const struct stmmac_of_data rk3288_gmac_data = { + .has_gmac = 1, + .fix_mac_speed = rk_fix_speed, + .setup = rk_gmac_setup, + .init = rk_gmac_init, + .exit = rk_gmac_exit, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c new file mode 100644 index 000000000..5a36bd2c7 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -0,0 +1,265 @@ +/* Copyright Altera Corporation (C) 2014. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Adopted from dwmac-sti.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "stmmac.h" +#include "stmmac_platform.h" + +#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0 +#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1 +#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2 +#define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2 +#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003 + +#define EMAC_SPLITTER_CTRL_REG 0x0 +#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3 +#define EMAC_SPLITTER_CTRL_SPEED_10 0x2 +#define EMAC_SPLITTER_CTRL_SPEED_100 0x3 +#define EMAC_SPLITTER_CTRL_SPEED_1000 0x0 + +struct socfpga_dwmac { + int interface; + u32 reg_offset; + u32 reg_shift; + struct device *dev; + struct regmap *sys_mgr_base_addr; + struct reset_control *stmmac_rst; + void __iomem *splitter_base; +}; + +static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed) +{ + struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv; + void __iomem *splitter_base = dwmac->splitter_base; + u32 val; + + if (!splitter_base) + return; + + val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG); + val &= ~EMAC_SPLITTER_CTRL_SPEED_MASK; + + switch (speed) { + case 1000: + val |= EMAC_SPLITTER_CTRL_SPEED_1000; + break; + case 100: + val |= EMAC_SPLITTER_CTRL_SPEED_100; + break; + case 10: + val |= EMAC_SPLITTER_CTRL_SPEED_10; + break; + default: + return; + } + + writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG); +} + +static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev) +{ + struct device_node *np = dev->of_node; + struct regmap *sys_mgr_base_addr; + u32 reg_offset, reg_shift; + int ret; + struct device_node *np_splitter; + struct resource res_splitter; + + dwmac->stmmac_rst = devm_reset_control_get(dev, + STMMAC_RESOURCE_NAME); + if (IS_ERR(dwmac->stmmac_rst)) { + dev_info(dev, "Could not get reset control!\n"); + if (PTR_ERR(dwmac->stmmac_rst) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dwmac->stmmac_rst = NULL; + } + + dwmac->interface = of_get_phy_mode(np); + + sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon"); + if (IS_ERR(sys_mgr_base_addr)) { + dev_info(dev, "No sysmgr-syscon node found\n"); + return PTR_ERR(sys_mgr_base_addr); + } + + ret = of_property_read_u32_index(np, "altr,sysmgr-syscon", 1, ®_offset); + if (ret) { + dev_info(dev, "Could not read reg_offset from sysmgr-syscon!\n"); + return -EINVAL; + } + + ret = of_property_read_u32_index(np, "altr,sysmgr-syscon", 2, ®_shift); + if (ret) { + dev_info(dev, "Could not read reg_shift from sysmgr-syscon!\n"); + return -EINVAL; + } + + np_splitter = of_parse_phandle(np, "altr,emac-splitter", 0); + if (np_splitter) { + if (of_address_to_resource(np_splitter, 0, &res_splitter)) { + dev_info(dev, "Missing emac splitter address\n"); + return -EINVAL; + } + + dwmac->splitter_base = devm_ioremap_resource(dev, &res_splitter); + if (IS_ERR(dwmac->splitter_base)) { + dev_info(dev, "Failed to mapping emac splitter\n"); + return PTR_ERR(dwmac->splitter_base); + } + } + + dwmac->reg_offset = reg_offset; + dwmac->reg_shift = reg_shift; + dwmac->sys_mgr_base_addr = sys_mgr_base_addr; + dwmac->dev = dev; + + return 0; +} + +static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac) +{ + struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr; + int phymode = dwmac->interface; + u32 reg_offset = dwmac->reg_offset; + u32 reg_shift = dwmac->reg_shift; + u32 ctrl, val; + + switch (phymode) { + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII; + break; + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_GMII: + val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII; + break; + default: + dev_err(dwmac->dev, "bad phy mode %d\n", phymode); + return -EINVAL; + } + + /* Overwrite val to GMII if splitter core is enabled. The phymode here + * is the actual phy mode on phy hardware, but phy interface from + * EMAC core is GMII. + */ + if (dwmac->splitter_base) + val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII; + + regmap_read(sys_mgr_base_addr, reg_offset, &ctrl); + ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); + ctrl |= val << reg_shift; + + regmap_write(sys_mgr_base_addr, reg_offset, ctrl); + return 0; +} + +static void *socfpga_dwmac_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int ret; + struct socfpga_dwmac *dwmac; + + dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL); + if (!dwmac) + return ERR_PTR(-ENOMEM); + + ret = socfpga_dwmac_parse_data(dwmac, dev); + if (ret) { + dev_err(dev, "Unable to parse OF data\n"); + return ERR_PTR(ret); + } + + ret = socfpga_dwmac_setup(dwmac); + if (ret) { + dev_err(dev, "couldn't setup SoC glue (%d)\n", ret); + return ERR_PTR(ret); + } + + return dwmac; +} + +static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv) +{ + struct socfpga_dwmac *dwmac = priv; + + /* On socfpga platform exit, assert and hold reset to the + * enet controller - the default state after a hard reset. + */ + if (dwmac->stmmac_rst) + reset_control_assert(dwmac->stmmac_rst); +} + +static int socfpga_dwmac_init(struct platform_device *pdev, void *priv) +{ + struct socfpga_dwmac *dwmac = priv; + struct net_device *ndev = platform_get_drvdata(pdev); + struct stmmac_priv *stpriv = NULL; + int ret = 0; + + if (ndev) + stpriv = netdev_priv(ndev); + + /* Assert reset to the enet controller before changing the phy mode */ + if (dwmac->stmmac_rst) + reset_control_assert(dwmac->stmmac_rst); + + /* Setup the phy mode in the system manager registers according to + * devicetree configuration + */ + ret = socfpga_dwmac_setup(dwmac); + + /* Deassert reset for the phy configuration to be sampled by + * the enet controller, and operation to start in requested mode + */ + if (dwmac->stmmac_rst) + reset_control_deassert(dwmac->stmmac_rst); + + /* Before the enet controller is suspended, the phy is suspended. + * This causes the phy clock to be gated. The enet controller is + * resumed before the phy, so the clock is still gated "off" when + * the enet controller is resumed. This code makes sure the phy + * is "resumed" before reinitializing the enet controller since + * the enet controller depends on an active phy clock to complete + * a DMA reset. A DMA reset will "time out" if executed + * with no phy clock input on the Synopsys enet controller. + * Verified through Synopsys Case #8000711656. + * + * Note that the phy clock is also gated when the phy is isolated. + * Phy "suspend" and "isolate" controls are located in phy basic + * control register 0, and can be modified by the phy driver + * framework. + */ + if (stpriv && stpriv->phydev) + phy_resume(stpriv->phydev); + + return ret; +} + +const struct stmmac_of_data socfpga_gmac_data = { + .setup = socfpga_dwmac_probe, + .init = socfpga_dwmac_init, + .exit = socfpga_dwmac_exit, + .fix_mac_speed = socfpga_dwmac_fix_mac_speed, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c new file mode 100644 index 000000000..bb6e2dc61 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -0,0 +1,366 @@ +/* + * dwmac-sti.c - STMicroelectronics DWMAC Specific Glue layer + * + * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited + * Author: Srinivas Kandagatla + * Contributors: Giuseppe Cavallaro + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "stmmac_platform.h" + +#define DWMAC_125MHZ 125000000 +#define DWMAC_50MHZ 50000000 +#define DWMAC_25MHZ 25000000 +#define DWMAC_2_5MHZ 2500000 + +#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \ + iface == PHY_INTERFACE_MODE_RGMII_ID || \ + iface == PHY_INTERFACE_MODE_RGMII_RXID || \ + iface == PHY_INTERFACE_MODE_RGMII_TXID) + +#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \ + iface == PHY_INTERFACE_MODE_GMII) + +/* STiH4xx register definitions (STiH415/STiH416/STiH407/STiH410 families) + * + * Below table summarizes the clock requirement and clock sources for + * supported phy interface modes with link speeds. + * ________________________________________________ + *| PHY_MODE | 1000 Mbit Link | 100 Mbit Link | + * ------------------------------------------------ + *| MII | n/a | 25Mhz | + *| | | txclk | + * ------------------------------------------------ + *| GMII | 125Mhz | 25Mhz | + *| | clk-125/txclk | txclk | + * ------------------------------------------------ + *| RGMII | 125Mhz | 25Mhz | + *| | clk-125/txclk | clkgen | + *| | clkgen | | + * ------------------------------------------------ + *| RMII | n/a | 25Mhz | + *| | |clkgen/phyclk-in | + * ------------------------------------------------ + * + * Register Configuration + *------------------------------- + * src |BIT(8)| BIT(7)| BIT(6)| + *------------------------------- + * txclk | 0 | n/a | 1 | + *------------------------------- + * ck_125| 0 | n/a | 0 | + *------------------------------- + * phyclk| 1 | 0 | n/a | + *------------------------------- + * clkgen| 1 | 1 | n/a | + *------------------------------- + */ + +#define STIH4XX_RETIME_SRC_MASK GENMASK(8, 6) +#define STIH4XX_ETH_SEL_TX_RETIME_CLK BIT(8) +#define STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7) +#define STIH4XX_ETH_SEL_TXCLK_NOT_CLK125 BIT(6) + +/* STiD127 register definitions + *----------------------- + * src |BIT(6)| BIT(7)| + *----------------------- + * MII | 1 | n/a | + *----------------------- + * RMII | n/a | 1 | + * clkgen| | | + *----------------------- + * RMII | n/a | 0 | + * phyclk| | | + *----------------------- + * RGMII | 1 | n/a | + * clkgen| | | + *----------------------- + */ + +#define STID127_RETIME_SRC_MASK GENMASK(7, 6) +#define STID127_ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7) +#define STID127_ETH_SEL_INTERNAL_NOTEXT_TXCLK BIT(6) + +#define ENMII_MASK GENMASK(5, 5) +#define ENMII BIT(5) +#define EN_MASK GENMASK(1, 1) +#define EN BIT(1) + +/* + * 3 bits [4:2] + * 000-GMII/MII + * 001-RGMII + * 010-SGMII + * 100-RMII + */ +#define MII_PHY_SEL_MASK GENMASK(4, 2) +#define ETH_PHY_SEL_RMII BIT(4) +#define ETH_PHY_SEL_SGMII BIT(3) +#define ETH_PHY_SEL_RGMII BIT(2) +#define ETH_PHY_SEL_GMII 0x0 +#define ETH_PHY_SEL_MII 0x0 + +struct sti_dwmac { + int interface; /* MII interface */ + bool ext_phyclk; /* Clock from external PHY */ + u32 tx_retime_src; /* TXCLK Retiming*/ + struct clk *clk; /* PHY clock */ + u32 ctrl_reg; /* GMAC glue-logic control register */ + int clk_sel_reg; /* GMAC ext clk selection register */ + struct device *dev; + struct regmap *regmap; + u32 speed; +}; + +static u32 phy_intf_sels[] = { + [PHY_INTERFACE_MODE_MII] = ETH_PHY_SEL_MII, + [PHY_INTERFACE_MODE_GMII] = ETH_PHY_SEL_GMII, + [PHY_INTERFACE_MODE_RGMII] = ETH_PHY_SEL_RGMII, + [PHY_INTERFACE_MODE_RGMII_ID] = ETH_PHY_SEL_RGMII, + [PHY_INTERFACE_MODE_SGMII] = ETH_PHY_SEL_SGMII, + [PHY_INTERFACE_MODE_RMII] = ETH_PHY_SEL_RMII, +}; + +enum { + TX_RETIME_SRC_NA = 0, + TX_RETIME_SRC_TXCLK = 1, + TX_RETIME_SRC_CLK_125, + TX_RETIME_SRC_PHYCLK, + TX_RETIME_SRC_CLKGEN, +}; + +static u32 stih4xx_tx_retime_val[] = { + [TX_RETIME_SRC_TXCLK] = STIH4XX_ETH_SEL_TXCLK_NOT_CLK125, + [TX_RETIME_SRC_CLK_125] = 0x0, + [TX_RETIME_SRC_PHYCLK] = STIH4XX_ETH_SEL_TX_RETIME_CLK, + [TX_RETIME_SRC_CLKGEN] = STIH4XX_ETH_SEL_TX_RETIME_CLK + | STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK, +}; + +static void stih4xx_fix_retime_src(void *priv, u32 spd) +{ + struct sti_dwmac *dwmac = priv; + u32 src = dwmac->tx_retime_src; + u32 reg = dwmac->ctrl_reg; + u32 freq = 0; + + if (dwmac->interface == PHY_INTERFACE_MODE_MII) { + src = TX_RETIME_SRC_TXCLK; + } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) { + if (dwmac->ext_phyclk) { + src = TX_RETIME_SRC_PHYCLK; + } else { + src = TX_RETIME_SRC_CLKGEN; + freq = DWMAC_50MHZ; + } + } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) { + /* On GiGa clk source can be either ext or from clkgen */ + if (spd == SPEED_1000) { + freq = DWMAC_125MHZ; + } else { + /* Switch to clkgen for these speeds */ + src = TX_RETIME_SRC_CLKGEN; + if (spd == SPEED_100) + freq = DWMAC_25MHZ; + else if (spd == SPEED_10) + freq = DWMAC_2_5MHZ; + } + } + + if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk && freq) + clk_set_rate(dwmac->clk, freq); + + regmap_update_bits(dwmac->regmap, reg, STIH4XX_RETIME_SRC_MASK, + stih4xx_tx_retime_val[src]); +} + +static void stid127_fix_retime_src(void *priv, u32 spd) +{ + struct sti_dwmac *dwmac = priv; + u32 reg = dwmac->ctrl_reg; + u32 freq = 0; + u32 val = 0; + + if (dwmac->interface == PHY_INTERFACE_MODE_MII) { + val = STID127_ETH_SEL_INTERNAL_NOTEXT_TXCLK; + } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) { + if (!dwmac->ext_phyclk) { + val = STID127_ETH_SEL_INTERNAL_NOTEXT_PHYCLK; + freq = DWMAC_50MHZ; + } + } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) { + val = STID127_ETH_SEL_INTERNAL_NOTEXT_TXCLK; + if (spd == SPEED_1000) + freq = DWMAC_125MHZ; + else if (spd == SPEED_100) + freq = DWMAC_25MHZ; + else if (spd == SPEED_10) + freq = DWMAC_2_5MHZ; + } + + if (dwmac->clk && freq) + clk_set_rate(dwmac->clk, freq); + + regmap_update_bits(dwmac->regmap, reg, STID127_RETIME_SRC_MASK, val); +} + +static void sti_dwmac_ctrl_init(struct sti_dwmac *dwmac) +{ + struct regmap *regmap = dwmac->regmap; + int iface = dwmac->interface; + struct device *dev = dwmac->dev; + struct device_node *np = dev->of_node; + u32 reg = dwmac->ctrl_reg; + u32 val; + + if (dwmac->clk) + clk_prepare_enable(dwmac->clk); + + if (of_property_read_bool(np, "st,gmac_en")) + regmap_update_bits(regmap, reg, EN_MASK, EN); + + regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]); + + val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII; + regmap_update_bits(regmap, reg, ENMII_MASK, val); +} + +static int stix4xx_init(struct platform_device *pdev, void *priv) +{ + struct sti_dwmac *dwmac = priv; + u32 spd = dwmac->speed; + + sti_dwmac_ctrl_init(dwmac); + + stih4xx_fix_retime_src(priv, spd); + + return 0; +} + +static int stid127_init(struct platform_device *pdev, void *priv) +{ + struct sti_dwmac *dwmac = priv; + u32 spd = dwmac->speed; + + sti_dwmac_ctrl_init(dwmac); + + stid127_fix_retime_src(priv, spd); + + return 0; +} + +static void sti_dwmac_exit(struct platform_device *pdev, void *priv) +{ + struct sti_dwmac *dwmac = priv; + + if (dwmac->clk) + clk_disable_unprepare(dwmac->clk); +} +static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, + struct platform_device *pdev) +{ + struct resource *res; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct regmap *regmap; + int err; + + if (!np) + return -EINVAL; + + /* clk selection from extra syscfg register */ + dwmac->clk_sel_reg = -ENXIO; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-clkconf"); + if (res) + dwmac->clk_sel_reg = res->start; + + regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon"); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->ctrl_reg); + if (err) { + dev_err(dev, "Can't get sysconfig ctrl offset (%d)\n", err); + return err; + } + + dwmac->dev = dev; + dwmac->interface = of_get_phy_mode(np); + dwmac->regmap = regmap; + dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk"); + dwmac->tx_retime_src = TX_RETIME_SRC_NA; + dwmac->speed = SPEED_100; + + if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) { + const char *rs; + + err = of_property_read_string(np, "st,tx-retime-src", &rs); + if (err < 0) { + dev_warn(dev, "Use internal clock source\n"); + dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN; + } else if (!strcasecmp(rs, "clk_125")) { + dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125; + } else if (!strcasecmp(rs, "txclk")) { + dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK; + } + + dwmac->speed = SPEED_1000; + } + + dwmac->clk = devm_clk_get(dev, "sti-ethclk"); + if (IS_ERR(dwmac->clk)) { + dev_warn(dev, "No phy clock provided...\n"); + dwmac->clk = NULL; + } + + return 0; +} + +static void *sti_dwmac_setup(struct platform_device *pdev) +{ + struct sti_dwmac *dwmac; + int ret; + + dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); + if (!dwmac) + return ERR_PTR(-ENOMEM); + + ret = sti_dwmac_parse_data(dwmac, pdev); + if (ret) { + dev_err(&pdev->dev, "Unable to parse OF data\n"); + return ERR_PTR(ret); + } + + return dwmac; +} + +const struct stmmac_of_data stih4xx_dwmac_data = { + .fix_mac_speed = stih4xx_fix_retime_src, + .setup = sti_dwmac_setup, + .init = stix4xx_init, + .exit = sti_dwmac_exit, +}; + +const struct stmmac_of_data stid127_dwmac_data = { + .fix_mac_speed = stid127_fix_retime_src, + .setup = sti_dwmac_setup, + .init = stid127_init, + .exit = sti_dwmac_exit, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c new file mode 100644 index 000000000..c5ea9ab75 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -0,0 +1,142 @@ +/* + * dwmac-sunxi.c - Allwinner sunxi DWMAC specific glue layer + * + * Copyright (C) 2013 Chen-Yu Tsai + * + * Chen-Yu Tsai + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +#include "stmmac_platform.h" + +struct sunxi_priv_data { + int interface; + int clk_enabled; + struct clk *tx_clk; + struct regulator *regulator; +}; + +static void *sun7i_gmac_setup(struct platform_device *pdev) +{ + struct sunxi_priv_data *gmac; + struct device *dev = &pdev->dev; + + gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); + if (!gmac) + return ERR_PTR(-ENOMEM); + + gmac->interface = of_get_phy_mode(dev->of_node); + + gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx"); + if (IS_ERR(gmac->tx_clk)) { + dev_err(dev, "could not get tx clock\n"); + return gmac->tx_clk; + } + + /* Optional regulator for PHY */ + gmac->regulator = devm_regulator_get_optional(dev, "phy"); + if (IS_ERR(gmac->regulator)) { + if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) + return ERR_PTR(-EPROBE_DEFER); + dev_info(dev, "no regulator found\n"); + gmac->regulator = NULL; + } + + return gmac; +} + +#define SUN7I_GMAC_GMII_RGMII_RATE 125000000 +#define SUN7I_GMAC_MII_RATE 25000000 + +static int sun7i_gmac_init(struct platform_device *pdev, void *priv) +{ + struct sunxi_priv_data *gmac = priv; + int ret; + + if (gmac->regulator) { + ret = regulator_enable(gmac->regulator); + if (ret) + return ret; + } + + /* Set GMAC interface port mode + * + * The GMAC TX clock lines are configured by setting the clock + * rate, which then uses the auto-reparenting feature of the + * clock driver, and enabling/disabling the clock. + */ + if (gmac->interface == PHY_INTERFACE_MODE_RGMII) { + clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE); + clk_prepare_enable(gmac->tx_clk); + gmac->clk_enabled = 1; + } else { + clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE); + clk_prepare(gmac->tx_clk); + } + + return 0; +} + +static void sun7i_gmac_exit(struct platform_device *pdev, void *priv) +{ + struct sunxi_priv_data *gmac = priv; + + if (gmac->clk_enabled) { + clk_disable(gmac->tx_clk); + gmac->clk_enabled = 0; + } + clk_unprepare(gmac->tx_clk); + + if (gmac->regulator) + regulator_disable(gmac->regulator); +} + +static void sun7i_fix_speed(void *priv, unsigned int speed) +{ + struct sunxi_priv_data *gmac = priv; + + /* only GMII mode requires us to reconfigure the clock lines */ + if (gmac->interface != PHY_INTERFACE_MODE_GMII) + return; + + if (gmac->clk_enabled) { + clk_disable(gmac->tx_clk); + gmac->clk_enabled = 0; + } + clk_unprepare(gmac->tx_clk); + + if (speed == 1000) { + clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE); + clk_prepare_enable(gmac->tx_clk); + gmac->clk_enabled = 1; + } else { + clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE); + clk_prepare(gmac->tx_clk); + } +} + +/* of_data specifying hardware features and callbacks. + * hardware features were copied from Allwinner drivers. */ +const struct stmmac_of_data sun7i_gmac_data = { + .has_gmac = 1, + .tx_coe = 1, + .fix_mac_speed = sun7i_fix_speed, + .setup = sun7i_gmac_setup, + .init = sun7i_gmac_init, + .exit = sun7i_gmac_exit, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h new file mode 100644 index 000000000..2ec6aeae3 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h @@ -0,0 +1,126 @@ +/******************************************************************************* + MAC 10/100 Header File + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __DWMAC100_H__ +#define __DWMAC100_H__ + +#include +#include "common.h" + +/*---------------------------------------------------------------------------- + * MAC BLOCK defines + *---------------------------------------------------------------------------*/ +/* MAC CSR offset */ +#define MAC_CONTROL 0x00000000 /* MAC Control */ +#define MAC_ADDR_HIGH 0x00000004 /* MAC Address High */ +#define MAC_ADDR_LOW 0x00000008 /* MAC Address Low */ +#define MAC_HASH_HIGH 0x0000000c /* Multicast Hash Table High */ +#define MAC_HASH_LOW 0x00000010 /* Multicast Hash Table Low */ +#define MAC_MII_ADDR 0x00000014 /* MII Address */ +#define MAC_MII_DATA 0x00000018 /* MII Data */ +#define MAC_FLOW_CTRL 0x0000001c /* Flow Control */ +#define MAC_VLAN1 0x00000020 /* VLAN1 Tag */ +#define MAC_VLAN2 0x00000024 /* VLAN2 Tag */ + +/* MAC CTRL defines */ +#define MAC_CONTROL_RA 0x80000000 /* Receive All Mode */ +#define MAC_CONTROL_BLE 0x40000000 /* Endian Mode */ +#define MAC_CONTROL_HBD 0x10000000 /* Heartbeat Disable */ +#define MAC_CONTROL_PS 0x08000000 /* Port Select */ +#define MAC_CONTROL_DRO 0x00800000 /* Disable Receive Own */ +#define MAC_CONTROL_EXT_LOOPBACK 0x00400000 /* Reserved (ext loopback?) */ +#define MAC_CONTROL_OM 0x00200000 /* Loopback Operating Mode */ +#define MAC_CONTROL_F 0x00100000 /* Full Duplex Mode */ +#define MAC_CONTROL_PM 0x00080000 /* Pass All Multicast */ +#define MAC_CONTROL_PR 0x00040000 /* Promiscuous Mode */ +#define MAC_CONTROL_IF 0x00020000 /* Inverse Filtering */ +#define MAC_CONTROL_PB 0x00010000 /* Pass Bad Frames */ +#define MAC_CONTROL_HO 0x00008000 /* Hash Only Filtering Mode */ +#define MAC_CONTROL_HP 0x00002000 /* Hash/Perfect Filtering Mode */ +#define MAC_CONTROL_LCC 0x00001000 /* Late Collision Control */ +#define MAC_CONTROL_DBF 0x00000800 /* Disable Broadcast Frames */ +#define MAC_CONTROL_DRTY 0x00000400 /* Disable Retry */ +#define MAC_CONTROL_ASTP 0x00000100 /* Automatic Pad Stripping */ +#define MAC_CONTROL_BOLMT_10 0x00000000 /* Back Off Limit 10 */ +#define MAC_CONTROL_BOLMT_8 0x00000040 /* Back Off Limit 8 */ +#define MAC_CONTROL_BOLMT_4 0x00000080 /* Back Off Limit 4 */ +#define MAC_CONTROL_BOLMT_1 0x000000c0 /* Back Off Limit 1 */ +#define MAC_CONTROL_DC 0x00000020 /* Deferral Check */ +#define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ +#define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */ + +#define MAC_CORE_INIT (MAC_CONTROL_HBD | MAC_CONTROL_ASTP) + +/* MAC FLOW CTRL defines */ +#define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ +#define MAC_FLOW_CTRL_PT_SHIFT 16 +#define MAC_FLOW_CTRL_PASS 0x00000004 /* Pass Control Frames */ +#define MAC_FLOW_CTRL_ENABLE 0x00000002 /* Flow Control Enable */ +#define MAC_FLOW_CTRL_PAUSE 0x00000001 /* Flow Control Busy ... */ + +/* MII ADDR defines */ +#define MAC_MII_ADDR_WRITE 0x00000002 /* MII Write */ +#define MAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */ + +/*---------------------------------------------------------------------------- + * DMA BLOCK defines + *---------------------------------------------------------------------------*/ + +/* DMA Bus Mode register defines */ +#define DMA_BUS_MODE_DBO 0x00100000 /* Descriptor Byte Ordering */ +#define DMA_BUS_MODE_BLE 0x00000080 /* Big Endian/Little Endian */ +#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ +#define DMA_BUS_MODE_PBL_SHIFT 8 +#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ +#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ +#define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */ +#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ +#define DMA_BUS_MODE_DEFAULT 0x00000000 + +/* DMA Control register defines */ +#define DMA_CONTROL_SF 0x00200000 /* Store And Forward */ + +/* Transmit Threshold Control */ +enum ttc_control { + DMA_CONTROL_TTC_DEFAULT = 0x00000000, /* Threshold is 32 DWORDS */ + DMA_CONTROL_TTC_64 = 0x00004000, /* Threshold is 64 DWORDS */ + DMA_CONTROL_TTC_128 = 0x00008000, /* Threshold is 128 DWORDS */ + DMA_CONTROL_TTC_256 = 0x0000c000, /* Threshold is 256 DWORDS */ + DMA_CONTROL_TTC_18 = 0x00400000, /* Threshold is 18 DWORDS */ + DMA_CONTROL_TTC_24 = 0x00404000, /* Threshold is 24 DWORDS */ + DMA_CONTROL_TTC_32 = 0x00408000, /* Threshold is 32 DWORDS */ + DMA_CONTROL_TTC_40 = 0x0040c000, /* Threshold is 40 DWORDS */ + DMA_CONTROL_SE = 0x00000008, /* Stop On Empty */ + DMA_CONTROL_OSF = 0x00000004, /* Operate On 2nd Frame */ +}; + +/* STMAC110 DMA Missed Frame Counter register defines */ +#define DMA_MISSED_FRAME_OVE 0x10000000 /* FIFO Overflow Overflow */ +#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */ +#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */ +#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ + +extern const struct stmmac_dma_ops dwmac100_dma_ops; + +#endif /* __DWMAC100_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h new file mode 100644 index 000000000..b3fe0575f --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -0,0 +1,318 @@ +/******************************************************************************* + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ +#ifndef __DWMAC1000_H__ +#define __DWMAC1000_H__ + +#include +#include "common.h" + +#define GMAC_CONTROL 0x00000000 /* Configuration */ +#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */ +#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */ +#define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */ +#define GMAC_MII_ADDR 0x00000010 /* MII Address */ +#define GMAC_MII_DATA 0x00000014 /* MII Data */ +#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */ +#define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */ +#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ +#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */ + +#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */ +enum dwmac1000_irq_status { + lpiis_irq = 0x400, + time_stamp_irq = 0x0200, + mmc_rx_csum_offload_irq = 0x0080, + mmc_tx_irq = 0x0040, + mmc_rx_irq = 0x0020, + mmc_irq = 0x0010, + pmt_irq = 0x0008, + pcs_ane_irq = 0x0004, + pcs_link_irq = 0x0002, + rgmii_irq = 0x0001, +}; +#define GMAC_INT_MASK 0x0000003c /* interrupt mask register */ + +/* PMT Control and Status */ +#define GMAC_PMT 0x0000002c +enum power_event { + pointer_reset = 0x80000000, + global_unicast = 0x00000200, + wake_up_rx_frame = 0x00000040, + magic_frame = 0x00000020, + wake_up_frame_en = 0x00000004, + magic_pkt_en = 0x00000002, + power_down = 0x00000001, +}; + +/* Energy Efficient Ethernet (EEE) + * + * LPI status, timer and control register offset + */ +#define LPI_CTRL_STATUS 0x0030 +#define LPI_TIMER_CTRL 0x0034 + +/* LPI control and status defines */ +#define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */ +#define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */ +#define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */ +#define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */ +#define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */ +#define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */ +#define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */ +#define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */ +#define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */ +#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */ + +/* GMAC HW ADDR regs */ +#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \ + (reg * 8)) +#define GMAC_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \ + (reg * 8)) +#define GMAC_MAX_PERFECT_ADDRESSES 1 + +/* PCS registers (AN/TBI/SGMII/RGMII) offset */ +#define GMAC_AN_CTRL 0x000000c0 /* AN control */ +#define GMAC_AN_STATUS 0x000000c4 /* AN status */ +#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */ +#define GMAC_ANE_LPA 0x000000cc /* Auto-Neg. link partener ability */ +#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */ +#define GMAC_TBI 0x000000d4 /* TBI extend status */ +#define GMAC_S_R_GMII 0x000000d8 /* SGMII RGMII status */ + +/* AN Configuration defines */ +#define GMAC_AN_CTRL_RAN 0x00000200 /* Restart Auto-Negotiation */ +#define GMAC_AN_CTRL_ANE 0x00001000 /* Auto-Negotiation Enable */ +#define GMAC_AN_CTRL_ELE 0x00004000 /* External Loopback Enable */ +#define GMAC_AN_CTRL_ECD 0x00010000 /* Enable Comma Detect */ +#define GMAC_AN_CTRL_LR 0x00020000 /* Lock to Reference */ +#define GMAC_AN_CTRL_SGMRAL 0x00040000 /* SGMII RAL Control */ + +/* AN Status defines */ +#define GMAC_AN_STATUS_LS 0x00000004 /* Link Status 0:down 1:up */ +#define GMAC_AN_STATUS_ANA 0x00000008 /* Auto-Negotiation Ability */ +#define GMAC_AN_STATUS_ANC 0x00000020 /* Auto-Negotiation Complete */ +#define GMAC_AN_STATUS_ES 0x00000100 /* Extended Status */ + +/* Register 54 (SGMII/RGMII status register) */ +#define GMAC_S_R_GMII_LINK 0x8 +#define GMAC_S_R_GMII_SPEED 0x5 +#define GMAC_S_R_GMII_SPEED_SHIFT 0x1 +#define GMAC_S_R_GMII_MODE 0x1 +#define GMAC_S_R_GMII_SPEED_125 2 +#define GMAC_S_R_GMII_SPEED_25 1 + +/* Common ADV and LPA defines */ +#define GMAC_ANE_FD (1 << 5) +#define GMAC_ANE_HD (1 << 6) +#define GMAC_ANE_PSE (3 << 7) +#define GMAC_ANE_PSE_SHIFT 7 + +/* GMAC Configuration defines */ +#define GMAC_CONTROL_2K 0x08000000 /* IEEE 802.3as 2K packets */ +#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */ +#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */ +#define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */ +#define GMAC_CONTROL_BE 0x00200000 /* Frame Burst Enable */ +#define GMAC_CONTROL_JE 0x00100000 /* Jumbo frame */ +enum inter_frame_gap { + GMAC_CONTROL_IFG_88 = 0x00040000, + GMAC_CONTROL_IFG_80 = 0x00020000, + GMAC_CONTROL_IFG_40 = 0x000e0000, +}; +#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense */ +#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */ +#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */ +#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */ +#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */ +#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */ +#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ +#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */ +#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */ +#define GMAC_CONTROL_ACS 0x00000080 /* Auto Pad/FCS Stripping */ +#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */ +#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ +#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ + +#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \ + GMAC_CONTROL_BE | GMAC_CONTROL_DCRS) + +/* GMAC Frame Filter defines */ +#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ +#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ +#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ +#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ +#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ +#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ +#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ +#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ +#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ +#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ +/* GMII ADDR defines */ +#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */ +#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */ +/* GMAC FLOW CTRL defines */ +#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ +#define GMAC_FLOW_CTRL_PT_SHIFT 16 +#define GMAC_FLOW_CTRL_UP 0x00000008 /* Unicast pause frame enable */ +#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ +#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ +#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ + +/*--- DMA BLOCK defines ---*/ +/* DMA Bus Mode register defines */ +#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ +#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */ +#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ +#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ +/* Programmable burst length (passed thorugh platform)*/ +#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ +#define DMA_BUS_MODE_PBL_SHIFT 8 +#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */ + +enum rx_tx_priority_ratio { + double_ratio = 0x00004000, /* 2:1 */ + triple_ratio = 0x00008000, /* 3:1 */ + quadruple_ratio = 0x0000c000, /* 4:1 */ +}; + +#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */ +#define DMA_BUS_MODE_MB 0x04000000 /* Mixed burst */ +#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */ +#define DMA_BUS_MODE_RPBL_SHIFT 17 +#define DMA_BUS_MODE_USP 0x00800000 +#define DMA_BUS_MODE_PBL 0x01000000 +#define DMA_BUS_MODE_AAL 0x02000000 + +/* DMA CRS Control and Status Register Mapping */ +#define DMA_HOST_TX_DESC 0x00001048 /* Current Host Tx descriptor */ +#define DMA_HOST_RX_DESC 0x0000104c /* Current Host Rx descriptor */ +/* DMA Bus Mode register defines */ +#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */ +#define DMA_BUS_PR_RATIO_SHIFT 14 +#define DMA_BUS_FB 0x00010000 /* Fixed Burst */ + +/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/ +/* Disable Drop TCP/IP csum error */ +#define DMA_CONTROL_DT 0x04000000 +#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */ +#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */ +/* Threshold for Activating the FC */ +enum rfa { + act_full_minus_1 = 0x00800000, + act_full_minus_2 = 0x00800200, + act_full_minus_3 = 0x00800400, + act_full_minus_4 = 0x00800600, +}; +/* Threshold for Deactivating the FC */ +enum rfd { + deac_full_minus_1 = 0x00400000, + deac_full_minus_2 = 0x00400800, + deac_full_minus_3 = 0x00401000, + deac_full_minus_4 = 0x00401800, +}; +#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */ + +enum ttc_control { + DMA_CONTROL_TTC_64 = 0x00000000, + DMA_CONTROL_TTC_128 = 0x00004000, + DMA_CONTROL_TTC_192 = 0x00008000, + DMA_CONTROL_TTC_256 = 0x0000c000, + DMA_CONTROL_TTC_40 = 0x00010000, + DMA_CONTROL_TTC_32 = 0x00014000, + DMA_CONTROL_TTC_24 = 0x00018000, + DMA_CONTROL_TTC_16 = 0x0001c000, +}; +#define DMA_CONTROL_TC_TX_MASK 0xfffe3fff + +#define DMA_CONTROL_EFC 0x00000100 +#define DMA_CONTROL_FEF 0x00000080 +#define DMA_CONTROL_FUF 0x00000040 + +/* Receive flow control activation field + * RFA field in DMA control register, bits 23,10:9 + */ +#define DMA_CONTROL_RFA_MASK 0x00800600 + +/* Receive flow control deactivation field + * RFD field in DMA control register, bits 22,12:11 + */ +#define DMA_CONTROL_RFD_MASK 0x00401800 + +/* RFD and RFA fields are encoded as follows + * + * Bit Field + * 0,00 - Full minus 1KB (only valid when rxfifo >= 4KB and EFC enabled) + * 0,01 - Full minus 2KB (only valid when rxfifo >= 4KB and EFC enabled) + * 0,10 - Full minus 3KB (only valid when rxfifo >= 4KB and EFC enabled) + * 0,11 - Full minus 4KB (only valid when rxfifo > 4KB and EFC enabled) + * 1,00 - Full minus 5KB (only valid when rxfifo > 8KB and EFC enabled) + * 1,01 - Full minus 6KB (only valid when rxfifo > 8KB and EFC enabled) + * 1,10 - Full minus 7KB (only valid when rxfifo > 8KB and EFC enabled) + * 1,11 - Reserved + * + * RFD should always be > RFA for a given FIFO size. RFD == RFA may work, + * but packet throughput performance may not be as expected. + * + * Be sure that bit 3 in GMAC Register 6 is set for Unicast Pause frame + * detection (IEEE Specification Requirement, Annex 31B, 31B.1, Pause + * Description). + * + * Be sure that DZPA (bit 7 in Flow Control Register, GMAC Register 6), + * is set to 0. This allows pause frames with a quanta of 0 to be sent + * as an XOFF message to the link peer. + */ + +#define RFA_FULL_MINUS_1K 0x00000000 +#define RFA_FULL_MINUS_2K 0x00000200 +#define RFA_FULL_MINUS_3K 0x00000400 +#define RFA_FULL_MINUS_4K 0x00000600 +#define RFA_FULL_MINUS_5K 0x00800000 +#define RFA_FULL_MINUS_6K 0x00800200 +#define RFA_FULL_MINUS_7K 0x00800400 + +#define RFD_FULL_MINUS_1K 0x00000000 +#define RFD_FULL_MINUS_2K 0x00000800 +#define RFD_FULL_MINUS_3K 0x00001000 +#define RFD_FULL_MINUS_4K 0x00001800 +#define RFD_FULL_MINUS_5K 0x00400000 +#define RFD_FULL_MINUS_6K 0x00400800 +#define RFD_FULL_MINUS_7K 0x00401000 + +enum rtc_control { + DMA_CONTROL_RTC_64 = 0x00000000, + DMA_CONTROL_RTC_32 = 0x00000008, + DMA_CONTROL_RTC_96 = 0x00000010, + DMA_CONTROL_RTC_128 = 0x00000018, +}; +#define DMA_CONTROL_TC_RX_MASK 0xffffffe7 + +#define DMA_CONTROL_OSF 0x00000004 /* Operate on second frame */ + +/* MMC registers offset */ +#define GMAC_MMC_CTRL 0x100 +#define GMAC_MMC_RX_INTR 0x104 +#define GMAC_MMC_TX_INTR 0x108 +#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 +#define GMAC_EXTHASH_BASE 0x500 + +extern const struct stmmac_dma_ops dwmac1000_dma_ops; +#endif /* __DWMAC1000_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c new file mode 100644 index 000000000..371a669d6 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -0,0 +1,447 @@ +/******************************************************************************* + This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. + DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for + developing this code. + + This only implements the mac core functions for this chip. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include +#include +#include "dwmac1000.h" + +static void dwmac1000_core_init(struct mac_device_info *hw, int mtu) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_CONTROL); + value |= GMAC_CORE_INIT; + if (mtu > 1500) + value |= GMAC_CONTROL_2K; + if (mtu > 2000) + value |= GMAC_CONTROL_JE; + + writel(value, ioaddr + GMAC_CONTROL); + + /* Mask GMAC interrupts */ + writel(0x207, ioaddr + GMAC_INT_MASK); + +#ifdef STMMAC_VLAN_TAG_USED + /* Tag detection without filtering */ + writel(0x0, ioaddr + GMAC_VLAN_TAG); +#endif +} + +static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_CONTROL); + + if (hw->rx_csum) + value |= GMAC_CONTROL_IPC; + else + value &= ~GMAC_CONTROL_IPC; + + writel(value, ioaddr + GMAC_CONTROL); + + value = readl(ioaddr + GMAC_CONTROL); + + return !!(value & GMAC_CONTROL_IPC); +} + +static void dwmac1000_dump_regs(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + int i; + pr_info("\tDWMAC1000 regs (base addr = 0x%p)\n", ioaddr); + + for (i = 0; i < 55; i++) { + int offset = i * 4; + pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i, + offset, readl(ioaddr + offset)); + } +} + +static void dwmac1000_set_umac_addr(struct mac_device_info *hw, + unsigned char *addr, + unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), + GMAC_ADDR_LOW(reg_n)); +} + +static void dwmac1000_get_umac_addr(struct mac_device_info *hw, + unsigned char *addr, + unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), + GMAC_ADDR_LOW(reg_n)); +} + +static void dwmac1000_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits, + int mcbitslog2) +{ + int numhashregs, regs; + + switch (mcbitslog2) { + case 6: + writel(mcfilterbits[0], ioaddr + GMAC_HASH_LOW); + writel(mcfilterbits[1], ioaddr + GMAC_HASH_HIGH); + return; + break; + case 7: + numhashregs = 4; + break; + case 8: + numhashregs = 8; + break; + default: + pr_debug("STMMAC: err in setting mulitcast filter\n"); + return; + break; + } + for (regs = 0; regs < numhashregs; regs++) + writel(mcfilterbits[regs], + ioaddr + GMAC_EXTHASH_BASE + regs * 4); +} + +static void dwmac1000_set_filter(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + unsigned int value = 0; + unsigned int perfect_addr_number = hw->unicast_filter_entries; + u32 mc_filter[8]; + int mcbitslog2 = hw->mcast_bits_log2; + + pr_debug("%s: # mcasts %d, # unicast %d\n", __func__, + netdev_mc_count(dev), netdev_uc_count(dev)); + + memset(mc_filter, 0, sizeof(mc_filter)); + + if (dev->flags & IFF_PROMISC) { + value = GMAC_FRAME_FILTER_PR; + } else if (dev->flags & IFF_ALLMULTI) { + value = GMAC_FRAME_FILTER_PM; /* pass all multi */ + } else if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + + /* Hash filter for multicast */ + value = GMAC_FRAME_FILTER_HMC; + + netdev_for_each_mc_addr(ha, dev) { + /* The upper n bits of the calculated CRC are used to + * index the contents of the hash table. The number of + * bits used depends on the hardware configuration + * selected at core configuration time. + */ + int bit_nr = bitrev32(~crc32_le(~0, ha->addr, + ETH_ALEN)) >> + (32 - mcbitslog2); + /* The most significant bit determines the register to + * use (H/L) while the other 5 bits determine the bit + * within the register. + */ + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + } + } + + dwmac1000_set_mchash(ioaddr, mc_filter, mcbitslog2); + + /* Handle multiple unicast addresses (perfect filtering) */ + if (netdev_uc_count(dev) > perfect_addr_number) + /* Switch to promiscuous mode if more than unicast + * addresses are requested than supported by hardware. + */ + value |= GMAC_FRAME_FILTER_PR; + else { + int reg = 1; + struct netdev_hw_addr *ha; + + netdev_for_each_uc_addr(ha, dev) { + stmmac_set_mac_addr(ioaddr, ha->addr, + GMAC_ADDR_HIGH(reg), + GMAC_ADDR_LOW(reg)); + reg++; + } + } + +#ifdef FRAME_FILTER_DEBUG + /* Enable Receive all mode (to debug filtering_fail errors) */ + value |= GMAC_FRAME_FILTER_RA; +#endif + writel(value, ioaddr + GMAC_FRAME_FILTER); +} + + +static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, + unsigned int fc, unsigned int pause_time) +{ + void __iomem *ioaddr = hw->pcsr; + /* Set flow such that DZPQ in Mac Register 6 is 0, + * and unicast pause detect is enabled. + */ + unsigned int flow = GMAC_FLOW_CTRL_UP; + + pr_debug("GMAC Flow-Control:\n"); + if (fc & FLOW_RX) { + pr_debug("\tReceive Flow-Control ON\n"); + flow |= GMAC_FLOW_CTRL_RFE; + } + if (fc & FLOW_TX) { + pr_debug("\tTransmit Flow-Control ON\n"); + flow |= GMAC_FLOW_CTRL_TFE; + } + + if (duplex) { + pr_debug("\tduplex mode: PAUSE %d\n", pause_time); + flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT); + } + + writel(flow, ioaddr + GMAC_FLOW_CTRL); +} + +static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode) +{ + void __iomem *ioaddr = hw->pcsr; + unsigned int pmt = 0; + + if (mode & WAKE_MAGIC) { + pr_debug("GMAC: WOL Magic frame\n"); + pmt |= power_down | magic_pkt_en; + } + if (mode & WAKE_UCAST) { + pr_debug("GMAC: WOL on global unicast\n"); + pmt |= global_unicast; + } + + writel(pmt, ioaddr + GMAC_PMT); +} + +static int dwmac1000_irq_status(struct mac_device_info *hw, + struct stmmac_extra_stats *x) +{ + void __iomem *ioaddr = hw->pcsr; + u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); + int ret = 0; + + /* Not used events (e.g. MMC interrupts) are not handled. */ + if ((intr_status & mmc_tx_irq)) + x->mmc_tx_irq_n++; + if (unlikely(intr_status & mmc_rx_irq)) + x->mmc_rx_irq_n++; + if (unlikely(intr_status & mmc_rx_csum_offload_irq)) + x->mmc_rx_csum_offload_irq_n++; + if (unlikely(intr_status & pmt_irq)) { + /* clear the PMT bits 5 and 6 by reading the PMT status reg */ + readl(ioaddr + GMAC_PMT); + x->irq_receive_pmt_irq_n++; + } + /* MAC trx/rx EEE LPI entry/exit interrupts */ + if (intr_status & lpiis_irq) { + /* Clean LPI interrupt by reading the Reg 12 */ + ret = readl(ioaddr + LPI_CTRL_STATUS); + + if (ret & LPI_CTRL_STATUS_TLPIEN) + x->irq_tx_path_in_lpi_mode_n++; + if (ret & LPI_CTRL_STATUS_TLPIEX) + x->irq_tx_path_exit_lpi_mode_n++; + if (ret & LPI_CTRL_STATUS_RLPIEN) + x->irq_rx_path_in_lpi_mode_n++; + if (ret & LPI_CTRL_STATUS_RLPIEX) + x->irq_rx_path_exit_lpi_mode_n++; + } + + if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) { + readl(ioaddr + GMAC_AN_STATUS); + x->irq_pcs_ane_n++; + } + if (intr_status & rgmii_irq) { + u32 status = readl(ioaddr + GMAC_S_R_GMII); + x->irq_rgmii_n++; + + /* Save and dump the link status. */ + if (status & GMAC_S_R_GMII_LINK) { + int speed_value = (status & GMAC_S_R_GMII_SPEED) >> + GMAC_S_R_GMII_SPEED_SHIFT; + x->pcs_duplex = (status & GMAC_S_R_GMII_MODE); + + if (speed_value == GMAC_S_R_GMII_SPEED_125) + x->pcs_speed = SPEED_1000; + else if (speed_value == GMAC_S_R_GMII_SPEED_25) + x->pcs_speed = SPEED_100; + else + x->pcs_speed = SPEED_10; + + x->pcs_link = 1; + pr_debug("%s: Link is Up - %d/%s\n", __func__, + (int)x->pcs_speed, + x->pcs_duplex ? "Full" : "Half"); + } else { + x->pcs_link = 0; + pr_debug("%s: Link is Down\n", __func__); + } + } + + return ret; +} + +static void dwmac1000_set_eee_mode(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + /* Enable the link status receive on RGMII, SGMII ore SMII + * receive path and instruct the transmit to enter in LPI + * state. + */ + value = readl(ioaddr + LPI_CTRL_STATUS); + value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA; + writel(value, ioaddr + LPI_CTRL_STATUS); +} + +static void dwmac1000_reset_eee_mode(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + LPI_CTRL_STATUS); + value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA); + writel(value, ioaddr + LPI_CTRL_STATUS); +} + +static void dwmac1000_set_eee_pls(struct mac_device_info *hw, int link) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + value = readl(ioaddr + LPI_CTRL_STATUS); + + if (link) + value |= LPI_CTRL_STATUS_PLS; + else + value &= ~LPI_CTRL_STATUS_PLS; + + writel(value, ioaddr + LPI_CTRL_STATUS); +} + +static void dwmac1000_set_eee_timer(struct mac_device_info *hw, int ls, int tw) +{ + void __iomem *ioaddr = hw->pcsr; + int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16); + + /* Program the timers in the LPI timer control register: + * LS: minimum time (ms) for which the link + * status from PHY should be ok before transmitting + * the LPI pattern. + * TW: minimum time (us) for which the core waits + * after it has stopped transmitting the LPI pattern. + */ + writel(value, ioaddr + LPI_TIMER_CTRL); +} + +static void dwmac1000_ctrl_ane(struct mac_device_info *hw, bool restart) +{ + void __iomem *ioaddr = hw->pcsr; + /* auto negotiation enable and External Loopback enable */ + u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE; + + if (restart) + value |= GMAC_AN_CTRL_RAN; + + writel(value, ioaddr + GMAC_AN_CTRL); +} + +static void dwmac1000_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_ANE_ADV); + + if (value & GMAC_ANE_FD) + adv->duplex = DUPLEX_FULL; + if (value & GMAC_ANE_HD) + adv->duplex |= DUPLEX_HALF; + + adv->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; + + value = readl(ioaddr + GMAC_ANE_LPA); + + if (value & GMAC_ANE_FD) + adv->lp_duplex = DUPLEX_FULL; + if (value & GMAC_ANE_HD) + adv->lp_duplex = DUPLEX_HALF; + + adv->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; +} + +static const struct stmmac_ops dwmac1000_ops = { + .core_init = dwmac1000_core_init, + .rx_ipc = dwmac1000_rx_ipc_enable, + .dump_regs = dwmac1000_dump_regs, + .host_irq_status = dwmac1000_irq_status, + .set_filter = dwmac1000_set_filter, + .flow_ctrl = dwmac1000_flow_ctrl, + .pmt = dwmac1000_pmt, + .set_umac_addr = dwmac1000_set_umac_addr, + .get_umac_addr = dwmac1000_get_umac_addr, + .set_eee_mode = dwmac1000_set_eee_mode, + .reset_eee_mode = dwmac1000_reset_eee_mode, + .set_eee_timer = dwmac1000_set_eee_timer, + .set_eee_pls = dwmac1000_set_eee_pls, + .ctrl_ane = dwmac1000_ctrl_ane, + .get_adv = dwmac1000_get_adv, +}; + +struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, + int perfect_uc_entries) +{ + struct mac_device_info *mac; + u32 hwid = readl(ioaddr + GMAC_VERSION); + + mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); + if (!mac) + return NULL; + + mac->pcsr = ioaddr; + mac->multicast_filter_bins = mcbins; + mac->unicast_filter_entries = perfect_uc_entries; + mac->mcast_bits_log2 = 0; + + if (mac->multicast_filter_bins) + mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); + + mac->mac = &dwmac1000_ops; + mac->dma = &dwmac1000_dma_ops; + + mac->link.port = GMAC_CONTROL_PS; + mac->link.duplex = GMAC_CONTROL_DM; + mac->link.speed = GMAC_CONTROL_FES; + mac->mii.addr = GMAC_MII_ADDR; + mac->mii.data = GMAC_MII_DATA; + mac->synopsys_uid = hwid; + + return mac; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c new file mode 100644 index 000000000..0e8937c11 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -0,0 +1,221 @@ +/******************************************************************************* + This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. + DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for + developing this code. + + This contains the functions to handle the dma. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include "dwmac1000.h" +#include "dwmac_dma.h" + +static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, + int burst_len, u32 dma_tx, u32 dma_rx, int atds) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); + int limit; + + /* DMA SW reset */ + value |= DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + DMA_BUS_MODE); + limit = 10; + while (limit--) { + if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + /* + * Set the DMA PBL (Programmable Burst Length) mode + * Before stmmac core 3.50 this mode bit was 4xPBL, and + * post 3.5 mode bit acts as 8*PBL. + * For core rev < 3.5, when the core is set for 4xPBL mode, the + * DMA transfers the data in 4, 8, 16, 32, 64 & 128 beats + * depending on pbl value. + * For core rev > 3.5, when the core is set for 8xPBL mode, the + * DMA transfers the data in 8, 16, 32, 64, 128 & 256 beats + * depending on pbl value. + */ + value = DMA_BUS_MODE_PBL | ((pbl << DMA_BUS_MODE_PBL_SHIFT) | + (pbl << DMA_BUS_MODE_RPBL_SHIFT)); + + /* Set the Fixed burst mode */ + if (fb) + value |= DMA_BUS_MODE_FB; + + /* Mixed Burst has no effect when fb is set */ + if (mb) + value |= DMA_BUS_MODE_MB; + + if (atds) + value |= DMA_BUS_MODE_ATDS; + + writel(value, ioaddr + DMA_BUS_MODE); + + /* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE + * for supported bursts. + * + * Note: This is applicable only for revision GMACv3.61a. For + * older version this register is reserved and shall have no + * effect. + * + * Note: + * For Fixed Burst Mode: if we directly write 0xFF to this + * register using the configurations pass from platform code, + * this would ensure that all bursts supported by core are set + * and those which are not supported would remain ineffective. + * + * For Non Fixed Burst Mode: provide the maximum value of the + * burst length. Any burst equal or below the provided burst + * length would be allowed to perform. + */ + writel(burst_len, ioaddr + DMA_AXI_BUS_MODE); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); + + /* RX/TX descriptor base address lists must be written into + * DMA CSR3 and CSR4, respectively + */ + writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); + writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); + + return 0; +} + +static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz) +{ + csr6 &= ~DMA_CONTROL_RFA_MASK; + csr6 &= ~DMA_CONTROL_RFD_MASK; + + /* Leave flow control disabled if receive fifo size is less than + * 4K or 0. Otherwise, send XOFF when fifo is 1K less than full, + * and send XON when 2K less than full. + */ + if (rxfifosz < 4096) { + csr6 &= ~DMA_CONTROL_EFC; + pr_debug("GMAC: disabling flow control, rxfifo too small(%d)\n", + rxfifosz); + } else { + csr6 |= DMA_CONTROL_EFC; + csr6 |= RFA_FULL_MINUS_1K; + csr6 |= RFD_FULL_MINUS_2K; + } + return csr6; +} + +static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, + int rxmode, int rxfifosz) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + + if (txmode == SF_DMA_MODE) { + pr_debug("GMAC: enable TX store and forward mode\n"); + /* Transmit COE type 2 cannot be done in cut-through mode. */ + csr6 |= DMA_CONTROL_TSF; + /* Operating on second frame increase the performance + * especially when transmit store-and-forward is used. + */ + csr6 |= DMA_CONTROL_OSF; + } else { + pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode); + csr6 &= ~DMA_CONTROL_TSF; + csr6 &= DMA_CONTROL_TC_TX_MASK; + /* Set the transmit threshold */ + if (txmode <= 32) + csr6 |= DMA_CONTROL_TTC_32; + else if (txmode <= 64) + csr6 |= DMA_CONTROL_TTC_64; + else if (txmode <= 128) + csr6 |= DMA_CONTROL_TTC_128; + else if (txmode <= 192) + csr6 |= DMA_CONTROL_TTC_192; + else + csr6 |= DMA_CONTROL_TTC_256; + } + + if (rxmode == SF_DMA_MODE) { + pr_debug("GMAC: enable RX store and forward mode\n"); + csr6 |= DMA_CONTROL_RSF; + } else { + pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode); + csr6 &= ~DMA_CONTROL_RSF; + csr6 &= DMA_CONTROL_TC_RX_MASK; + if (rxmode <= 32) + csr6 |= DMA_CONTROL_RTC_32; + else if (rxmode <= 64) + csr6 |= DMA_CONTROL_RTC_64; + else if (rxmode <= 96) + csr6 |= DMA_CONTROL_RTC_96; + else + csr6 |= DMA_CONTROL_RTC_128; + } + + /* Configure flow control based on rx fifo size */ + csr6 = dwmac1000_configure_fc(csr6, rxfifosz); + + writel(csr6, ioaddr + DMA_CONTROL); +} + +static void dwmac1000_dump_dma_regs(void __iomem *ioaddr) +{ + int i; + pr_info(" DMA registers\n"); + for (i = 0; i < 22; i++) { + if ((i < 9) || (i > 17)) { + int offset = i * 4; + pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i, + (DMA_BUS_MODE + offset), + readl(ioaddr + DMA_BUS_MODE + offset)); + } + } +} + +static unsigned int dwmac1000_get_hw_feature(void __iomem *ioaddr) +{ + return readl(ioaddr + DMA_HW_FEATURE); +} + +static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt) +{ + writel(riwt, ioaddr + DMA_RX_WATCHDOG); +} + +const struct stmmac_dma_ops dwmac1000_dma_ops = { + .init = dwmac1000_dma_init, + .dump_regs = dwmac1000_dump_dma_regs, + .dma_mode = dwmac1000_dma_operation_mode, + .enable_dma_transmission = dwmac_enable_dma_transmission, + .enable_dma_irq = dwmac_enable_dma_irq, + .disable_dma_irq = dwmac_disable_dma_irq, + .start_tx = dwmac_dma_start_tx, + .stop_tx = dwmac_dma_stop_tx, + .start_rx = dwmac_dma_start_rx, + .stop_rx = dwmac_dma_stop_rx, + .dma_interrupt = dwmac_dma_interrupt, + .get_hw_feature = dwmac1000_get_hw_feature, + .rx_watchdog = dwmac1000_rx_watchdog, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c new file mode 100644 index 000000000..f8dd773f2 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c @@ -0,0 +1,198 @@ +/******************************************************************************* + This is the driver for the MAC 10/100 on-chip Ethernet controller + currently tested on all the ST boards based on STb7109 and stx7200 SoCs. + + DWC Ether MAC 10/100 Universal version 4.0 has been used for developing + this code. + + This only implements the mac core functions for this chip. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include "dwmac100.h" + +static void dwmac100_core_init(struct mac_device_info *hw, int mtu) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + MAC_CONTROL); + + writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL); + +#ifdef STMMAC_VLAN_TAG_USED + writel(ETH_P_8021Q, ioaddr + MAC_VLAN1); +#endif +} + +static void dwmac100_dump_mac_regs(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + pr_info("\t----------------------------------------------\n" + "\t DWMAC 100 CSR (base addr = 0x%p)\n" + "\t----------------------------------------------\n", ioaddr); + pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL, + readl(ioaddr + MAC_CONTROL)); + pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH, + readl(ioaddr + MAC_ADDR_HIGH)); + pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW, + readl(ioaddr + MAC_ADDR_LOW)); + pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n", + MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH)); + pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n", + MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW)); + pr_info("\tflow control (offset 0x%x): 0x%08x\n", + MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL)); + pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1, + readl(ioaddr + MAC_VLAN1)); + pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2, + readl(ioaddr + MAC_VLAN2)); +} + +static int dwmac100_rx_ipc_enable(struct mac_device_info *hw) +{ + return 0; +} + +static int dwmac100_irq_status(struct mac_device_info *hw, + struct stmmac_extra_stats *x) +{ + return 0; +} + +static void dwmac100_set_umac_addr(struct mac_device_info *hw, + unsigned char *addr, + unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); +} + +static void dwmac100_get_umac_addr(struct mac_device_info *hw, + unsigned char *addr, + unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); +} + +static void dwmac100_set_filter(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + u32 value = readl(ioaddr + MAC_CONTROL); + + if (dev->flags & IFF_PROMISC) { + value |= MAC_CONTROL_PR; + value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO | + MAC_CONTROL_HP); + } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE) + || (dev->flags & IFF_ALLMULTI)) { + value |= MAC_CONTROL_PM; + value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO); + writel(0xffffffff, ioaddr + MAC_HASH_HIGH); + writel(0xffffffff, ioaddr + MAC_HASH_LOW); + } else if (netdev_mc_empty(dev)) { /* no multicast */ + value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF | + MAC_CONTROL_HO | MAC_CONTROL_HP); + } else { + u32 mc_filter[2]; + struct netdev_hw_addr *ha; + + /* Perfect filter mode for physical address and Hash + * filter for multicast + */ + value |= MAC_CONTROL_HP; + value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | + MAC_CONTROL_IF | MAC_CONTROL_HO); + + memset(mc_filter, 0, sizeof(mc_filter)); + netdev_for_each_mc_addr(ha, dev) { + /* The upper 6 bits of the calculated CRC are used to + * index the contens of the hash table + */ + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + /* The most significant bit determines the register to + * use (H/L) while the other 5 bits determine the bit + * within the register. + */ + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + } + writel(mc_filter[0], ioaddr + MAC_HASH_LOW); + writel(mc_filter[1], ioaddr + MAC_HASH_HIGH); + } + + writel(value, ioaddr + MAC_CONTROL); +} + +static void dwmac100_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, + unsigned int fc, unsigned int pause_time) +{ + void __iomem *ioaddr = hw->pcsr; + unsigned int flow = MAC_FLOW_CTRL_ENABLE; + + if (duplex) + flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT); + writel(flow, ioaddr + MAC_FLOW_CTRL); +} + +/* No PMT module supported on ST boards with this Eth chip. */ +static void dwmac100_pmt(struct mac_device_info *hw, unsigned long mode) +{ + return; +} + +static const struct stmmac_ops dwmac100_ops = { + .core_init = dwmac100_core_init, + .rx_ipc = dwmac100_rx_ipc_enable, + .dump_regs = dwmac100_dump_mac_regs, + .host_irq_status = dwmac100_irq_status, + .set_filter = dwmac100_set_filter, + .flow_ctrl = dwmac100_flow_ctrl, + .pmt = dwmac100_pmt, + .set_umac_addr = dwmac100_set_umac_addr, + .get_umac_addr = dwmac100_get_umac_addr, +}; + +struct mac_device_info *dwmac100_setup(void __iomem *ioaddr) +{ + struct mac_device_info *mac; + + mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); + if (!mac) + return NULL; + + pr_info("\tDWMAC100\n"); + + mac->pcsr = ioaddr; + mac->mac = &dwmac100_ops; + mac->dma = &dwmac100_dma_ops; + + mac->link.port = MAC_CONTROL_PS; + mac->link.duplex = MAC_CONTROL_F; + mac->link.speed = 0; + mac->mii.addr = MAC_MII_ADDR; + mac->mii.data = MAC_MII_DATA; + mac->synopsys_uid = 0; + + return mac; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c new file mode 100644 index 000000000..9d0971c1c --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -0,0 +1,146 @@ +/******************************************************************************* + This is the driver for the MAC 10/100 on-chip Ethernet controller + currently tested on all the ST boards based on STb7109 and stx7200 SoCs. + + DWC Ether MAC 10/100 Universal version 4.0 has been used for developing + this code. + + This contains the functions to handle the dma. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include "dwmac100.h" +#include "dwmac_dma.h" + +static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, + int burst_len, u32 dma_tx, u32 dma_rx, int atds) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); + int limit; + + /* DMA SW reset */ + value |= DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + DMA_BUS_MODE); + limit = 10; + while (limit--) { + if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + /* Enable Application Access by writing to DMA CSR0 */ + writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT), + ioaddr + DMA_BUS_MODE); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); + + /* RX/TX descriptor base addr lists must be written into + * DMA CSR3 and CSR4, respectively + */ + writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); + writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); + + return 0; +} + +/* Store and Forward capability is not used at all. + * + * The transmit threshold can be programmed by setting the TTC bits in the DMA + * control register. + */ +static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode, + int rxmode, int rxfifosz) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + + if (txmode <= 32) + csr6 |= DMA_CONTROL_TTC_32; + else if (txmode <= 64) + csr6 |= DMA_CONTROL_TTC_64; + else + csr6 |= DMA_CONTROL_TTC_128; + + writel(csr6, ioaddr + DMA_CONTROL); +} + +static void dwmac100_dump_dma_regs(void __iomem *ioaddr) +{ + int i; + + pr_debug("DWMAC 100 DMA CSR\n"); + for (i = 0; i < 9; i++) + pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i, + (DMA_BUS_MODE + i * 4), + readl(ioaddr + DMA_BUS_MODE + i * 4)); + + pr_debug("\tCSR20 (0x%x): 0x%08x, CSR21 (0x%x): 0x%08x\n", + DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR), + DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR)); +} + +/* DMA controller has two counters to track the number of the missed frames. */ +static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, + void __iomem *ioaddr) +{ + struct net_device_stats *stats = (struct net_device_stats *)data; + u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR); + + if (unlikely(csr8)) { + if (csr8 & DMA_MISSED_FRAME_OVE) { + stats->rx_over_errors += 0x800; + x->rx_overflow_cntr += 0x800; + } else { + unsigned int ove_cntr; + ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17); + stats->rx_over_errors += ove_cntr; + x->rx_overflow_cntr += ove_cntr; + } + + if (csr8 & DMA_MISSED_FRAME_OVE_M) { + stats->rx_missed_errors += 0xffff; + x->rx_missed_cntr += 0xffff; + } else { + unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR); + stats->rx_missed_errors += miss_f; + x->rx_missed_cntr += miss_f; + } + } +} + +const struct stmmac_dma_ops dwmac100_dma_ops = { + .init = dwmac100_dma_init, + .dump_regs = dwmac100_dump_dma_regs, + .dma_mode = dwmac100_dma_operation_mode, + .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr, + .enable_dma_transmission = dwmac_enable_dma_transmission, + .enable_dma_irq = dwmac_enable_dma_irq, + .disable_dma_irq = dwmac_disable_dma_irq, + .start_tx = dwmac_dma_start_tx, + .stop_tx = dwmac_dma_stop_tx, + .start_rx = dwmac_dma_start_rx, + .stop_rx = dwmac_dma_stop_rx, + .dma_interrupt = dwmac_dma_interrupt, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h new file mode 100644 index 000000000..def266da5 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -0,0 +1,116 @@ +/******************************************************************************* + DWMAC DMA Header file. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __DWMAC_DMA_H__ +#define __DWMAC_DMA_H__ + +/* DMA CRS Control and Status Register Mapping */ +#define DMA_BUS_MODE 0x00001000 /* Bus Mode */ +#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */ +#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */ +#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */ +#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */ +#define DMA_STATUS 0x00001014 /* Status Register */ +#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */ +#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */ +#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */ +/* Rx watchdog register */ +#define DMA_RX_WATCHDOG 0x00001024 +/* AXI Bus Mode */ +#define DMA_AXI_BUS_MODE 0x00001028 +#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */ +#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */ +#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */ + +/* DMA Control register defines */ +#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ +#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ + +/* DMA Normal interrupt */ +#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */ +#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */ +#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */ +#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */ +#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */ + +#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \ + DMA_INTR_ENA_TIE) + +/* DMA Abnormal interrupt */ +#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */ +#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */ +#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */ +#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */ +#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */ +#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */ +#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */ +#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */ +#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */ +#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */ + +#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \ + DMA_INTR_ENA_UNE) + +/* DMA default interrupt mask */ +#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL) + +/* DMA Status register defines */ +#define DMA_STATUS_GLPII 0x40000000 /* GMAC LPI interrupt */ +#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */ +#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */ +#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */ +#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */ +#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */ +#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */ +#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */ +#define DMA_STATUS_TS_SHIFT 20 +#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */ +#define DMA_STATUS_RS_SHIFT 17 +#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */ +#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */ +#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */ +#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */ +#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */ +#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */ +#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */ +#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */ +#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */ +#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */ +#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */ +#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */ +#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */ +#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */ +#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ +#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ + +void dwmac_enable_dma_transmission(void __iomem *ioaddr); +void dwmac_enable_dma_irq(void __iomem *ioaddr); +void dwmac_disable_dma_irq(void __iomem *ioaddr); +void dwmac_dma_start_tx(void __iomem *ioaddr); +void dwmac_dma_stop_tx(void __iomem *ioaddr); +void dwmac_dma_start_rx(void __iomem *ioaddr); +void dwmac_dma_stop_rx(void __iomem *ioaddr); +int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x); + +#endif /* __DWMAC_DMA_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c new file mode 100644 index 000000000..484e3cf9c --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -0,0 +1,267 @@ +/******************************************************************************* + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include "common.h" +#include "dwmac_dma.h" + +#define GMAC_HI_REG_AE 0x80000000 + +/* CSR1 enables the transmit DMA to check for new descriptor */ +void dwmac_enable_dma_transmission(void __iomem *ioaddr) +{ + writel(1, ioaddr + DMA_XMT_POLL_DEMAND); +} + +void dwmac_enable_dma_irq(void __iomem *ioaddr) +{ + writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); +} + +void dwmac_disable_dma_irq(void __iomem *ioaddr) +{ + writel(0, ioaddr + DMA_INTR_ENA); +} + +void dwmac_dma_start_tx(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_CONTROL); + value |= DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CONTROL); +} + +void dwmac_dma_stop_tx(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_CONTROL); + value &= ~DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CONTROL); +} + +void dwmac_dma_start_rx(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_CONTROL); + value |= DMA_CONTROL_SR; + writel(value, ioaddr + DMA_CONTROL); +} + +void dwmac_dma_stop_rx(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_CONTROL); + value &= ~DMA_CONTROL_SR; + writel(value, ioaddr + DMA_CONTROL); +} + +#ifdef DWMAC_DMA_DEBUG +static void show_tx_process_state(unsigned int status) +{ + unsigned int state; + state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT; + + switch (state) { + case 0: + pr_debug("- TX (Stopped): Reset or Stop command\n"); + break; + case 1: + pr_debug("- TX (Running):Fetching the Tx desc\n"); + break; + case 2: + pr_debug("- TX (Running): Waiting for end of tx\n"); + break; + case 3: + pr_debug("- TX (Running): Reading the data " + "and queuing the data into the Tx buf\n"); + break; + case 6: + pr_debug("- TX (Suspended): Tx Buff Underflow " + "or an unavailable Transmit descriptor\n"); + break; + case 7: + pr_debug("- TX (Running): Closing Tx descriptor\n"); + break; + default: + break; + } +} + +static void show_rx_process_state(unsigned int status) +{ + unsigned int state; + state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT; + + switch (state) { + case 0: + pr_debug("- RX (Stopped): Reset or Stop command\n"); + break; + case 1: + pr_debug("- RX (Running): Fetching the Rx desc\n"); + break; + case 2: + pr_debug("- RX (Running):Checking for end of pkt\n"); + break; + case 3: + pr_debug("- RX (Running): Waiting for Rx pkt\n"); + break; + case 4: + pr_debug("- RX (Suspended): Unavailable Rx buf\n"); + break; + case 5: + pr_debug("- RX (Running): Closing Rx descriptor\n"); + break; + case 6: + pr_debug("- RX(Running): Flushing the current frame" + " from the Rx buf\n"); + break; + case 7: + pr_debug("- RX (Running): Queuing the Rx frame" + " from the Rx buf into memory\n"); + break; + default: + break; + } +} +#endif + +int dwmac_dma_interrupt(void __iomem *ioaddr, + struct stmmac_extra_stats *x) +{ + int ret = 0; + /* read the status register (CSR5) */ + u32 intr_status = readl(ioaddr + DMA_STATUS); + +#ifdef DWMAC_DMA_DEBUG + /* Enable it to monitor DMA rx/tx status in case of critical problems */ + pr_debug("%s: [CSR5: 0x%08x]\n", __func__, intr_status); + show_tx_process_state(intr_status); + show_rx_process_state(intr_status); +#endif + /* ABNORMAL interrupts */ + if (unlikely(intr_status & DMA_STATUS_AIS)) { + if (unlikely(intr_status & DMA_STATUS_UNF)) { + ret = tx_hard_error_bump_tc; + x->tx_undeflow_irq++; + } + if (unlikely(intr_status & DMA_STATUS_TJT)) + x->tx_jabber_irq++; + + if (unlikely(intr_status & DMA_STATUS_OVF)) + x->rx_overflow_irq++; + + if (unlikely(intr_status & DMA_STATUS_RU)) + x->rx_buf_unav_irq++; + if (unlikely(intr_status & DMA_STATUS_RPS)) + x->rx_process_stopped_irq++; + if (unlikely(intr_status & DMA_STATUS_RWT)) + x->rx_watchdog_irq++; + if (unlikely(intr_status & DMA_STATUS_ETI)) + x->tx_early_irq++; + if (unlikely(intr_status & DMA_STATUS_TPS)) { + x->tx_process_stopped_irq++; + ret = tx_hard_error; + } + if (unlikely(intr_status & DMA_STATUS_FBI)) { + x->fatal_bus_error_irq++; + ret = tx_hard_error; + } + } + /* TX/RX NORMAL interrupts */ + if (likely(intr_status & DMA_STATUS_NIS)) { + x->normal_irq_n++; + if (likely(intr_status & DMA_STATUS_RI)) { + u32 value = readl(ioaddr + DMA_INTR_ENA); + /* to schedule NAPI on real RIE event. */ + if (likely(value & DMA_INTR_ENA_RIE)) { + x->rx_normal_irq_n++; + ret |= handle_rx; + } + } + if (likely(intr_status & DMA_STATUS_TI)) { + x->tx_normal_irq_n++; + ret |= handle_tx; + } + if (unlikely(intr_status & DMA_STATUS_ERI)) + x->rx_early_irq++; + } + /* Optional hardware blocks, interrupts should be disabled */ + if (unlikely(intr_status & + (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI))) + pr_warn("%s: unexpected status %08x\n", __func__, intr_status); + + /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */ + writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS); + + return ret; +} + +void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL); + + do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF)); +} + +void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], + unsigned int high, unsigned int low) +{ + unsigned long data; + + data = (addr[5] << 8) | addr[4]; + /* For MAC Addr registers se have to set the Address Enable (AE) + * bit that has no effect on the High Reg 0 where the bit 31 (MO) + * is RO. + */ + writel(data | GMAC_HI_REG_AE, ioaddr + high); + data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + writel(data, ioaddr + low); +} + +/* Enable disable MAC RX/TX */ +void stmmac_set_mac(void __iomem *ioaddr, bool enable) +{ + u32 value = readl(ioaddr + MAC_CTRL_REG); + + if (enable) + value |= MAC_RNABLE_RX | MAC_ENABLE_TX; + else + value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX); + + writel(value, ioaddr + MAC_CTRL_REG); +} + +void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int high, unsigned int low) +{ + unsigned int hi_addr, lo_addr; + + /* Read the MAC address from the hardware */ + hi_addr = readl(ioaddr + high); + lo_addr = readl(ioaddr + low); + + /* Extract the MAC address from the high and low words */ + addr[0] = lo_addr & 0xff; + addr[1] = (lo_addr >> 8) & 0xff; + addr[2] = (lo_addr >> 16) & 0xff; + addr[3] = (lo_addr >> 24) & 0xff; + addr[4] = hi_addr & 0xff; + addr[5] = (hi_addr >> 8) & 0xff; +} + diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c new file mode 100644 index 000000000..1e2bcf5f8 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -0,0 +1,402 @@ +/******************************************************************************* + This contains the functions to handle the enhanced descriptors. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include "common.h" +#include "descs_com.h" + +static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, void __iomem *ioaddr) +{ + int ret = 0; + struct net_device_stats *stats = (struct net_device_stats *)data; + + if (unlikely(p->des01.etx.error_summary)) { + if (unlikely(p->des01.etx.jabber_timeout)) + x->tx_jabber++; + + if (unlikely(p->des01.etx.frame_flushed)) { + x->tx_frame_flushed++; + dwmac_dma_flush_tx_fifo(ioaddr); + } + + if (unlikely(p->des01.etx.loss_carrier)) { + x->tx_losscarrier++; + stats->tx_carrier_errors++; + } + if (unlikely(p->des01.etx.no_carrier)) { + x->tx_carrier++; + stats->tx_carrier_errors++; + } + if (unlikely(p->des01.etx.late_collision)) + stats->collisions += p->des01.etx.collision_count; + + if (unlikely(p->des01.etx.excessive_collisions)) + stats->collisions += p->des01.etx.collision_count; + + if (unlikely(p->des01.etx.excessive_deferral)) + x->tx_deferred++; + + if (unlikely(p->des01.etx.underflow_error)) { + dwmac_dma_flush_tx_fifo(ioaddr); + x->tx_underflow++; + } + + if (unlikely(p->des01.etx.ip_header_error)) + x->tx_ip_header_error++; + + if (unlikely(p->des01.etx.payload_error)) { + x->tx_payload_error++; + dwmac_dma_flush_tx_fifo(ioaddr); + } + + ret = -1; + } + + if (unlikely(p->des01.etx.deferred)) + x->tx_deferred++; + +#ifdef STMMAC_VLAN_TAG_USED + if (p->des01.etx.vlan_frame) + x->tx_vlan++; +#endif + + return ret; +} + +static int enh_desc_get_tx_len(struct dma_desc *p) +{ + return p->des01.etx.buffer1_size; +} + +static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) +{ + int ret = good_frame; + u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7; + + /* bits 5 7 0 | Frame status + * ---------------------------------------------------------- + * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects) + * 1 0 0 | IPv4/6 No CSUM errorS. + * 1 0 1 | IPv4/6 CSUM PAYLOAD error + * 1 1 0 | IPv4/6 CSUM IP HR error + * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS + * 0 0 1 | IPv4/6 unsupported IP PAYLOAD + * 0 1 1 | COE bypassed.. no IPv4/6 frame + * 0 1 0 | Reserved. + */ + if (status == 0x0) + ret = llc_snap; + else if (status == 0x4) + ret = good_frame; + else if (status == 0x5) + ret = csum_none; + else if (status == 0x6) + ret = csum_none; + else if (status == 0x7) + ret = csum_none; + else if (status == 0x1) + ret = discard_frame; + else if (status == 0x3) + ret = discard_frame; + return ret; +} + +static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x, + struct dma_extended_desc *p) +{ + if (unlikely(p->basic.des01.erx.rx_mac_addr)) { + if (p->des4.erx.ip_hdr_err) + x->ip_hdr_err++; + if (p->des4.erx.ip_payload_err) + x->ip_payload_err++; + if (p->des4.erx.ip_csum_bypassed) + x->ip_csum_bypassed++; + if (p->des4.erx.ipv4_pkt_rcvd) + x->ipv4_pkt_rcvd++; + if (p->des4.erx.ipv6_pkt_rcvd) + x->ipv6_pkt_rcvd++; + if (p->des4.erx.msg_type == RDES_EXT_SYNC) + x->rx_msg_type_sync++; + else if (p->des4.erx.msg_type == RDES_EXT_FOLLOW_UP) + x->rx_msg_type_follow_up++; + else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ) + x->rx_msg_type_delay_req++; + else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP) + x->rx_msg_type_delay_resp++; + else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_REQ) + x->rx_msg_type_pdelay_req++; + else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP) + x->rx_msg_type_pdelay_resp++; + else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_FOLLOW_UP) + x->rx_msg_type_pdelay_follow_up++; + else + x->rx_msg_type_ext_no_ptp++; + if (p->des4.erx.ptp_frame_type) + x->ptp_frame_type++; + if (p->des4.erx.ptp_ver) + x->ptp_ver++; + if (p->des4.erx.timestamp_dropped) + x->timestamp_dropped++; + if (p->des4.erx.av_pkt_rcvd) + x->av_pkt_rcvd++; + if (p->des4.erx.av_tagged_pkt_rcvd) + x->av_tagged_pkt_rcvd++; + if (p->des4.erx.vlan_tag_priority_val) + x->vlan_tag_priority_val++; + if (p->des4.erx.l3_filter_match) + x->l3_filter_match++; + if (p->des4.erx.l4_filter_match) + x->l4_filter_match++; + if (p->des4.erx.l3_l4_filter_no_match) + x->l3_l4_filter_no_match++; + } +} + +static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p) +{ + int ret = good_frame; + struct net_device_stats *stats = (struct net_device_stats *)data; + + if (unlikely(p->des01.erx.error_summary)) { + if (unlikely(p->des01.erx.descriptor_error)) { + x->rx_desc++; + stats->rx_length_errors++; + } + if (unlikely(p->des01.erx.overflow_error)) + x->rx_gmac_overflow++; + + if (unlikely(p->des01.erx.ipc_csum_error)) + pr_err("\tIPC Csum Error/Giant frame\n"); + + if (unlikely(p->des01.erx.late_collision)) { + stats->collisions++; + } + if (unlikely(p->des01.erx.receive_watchdog)) + x->rx_watchdog++; + + if (unlikely(p->des01.erx.error_gmii)) + x->rx_mii++; + + if (unlikely(p->des01.erx.crc_error)) { + x->rx_crc++; + stats->rx_crc_errors++; + } + ret = discard_frame; + } + + /* After a payload csum error, the ES bit is set. + * It doesn't match with the information reported into the databook. + * At any rate, we need to understand if the CSUM hw computation is ok + * and report this info to the upper layers. */ + ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error, + p->des01.erx.frame_type, p->des01.erx.rx_mac_addr); + + if (unlikely(p->des01.erx.dribbling)) + x->dribbling_bit++; + + if (unlikely(p->des01.erx.sa_filter_fail)) { + x->sa_rx_filter_fail++; + ret = discard_frame; + } + if (unlikely(p->des01.erx.da_filter_fail)) { + x->da_rx_filter_fail++; + ret = discard_frame; + } + if (unlikely(p->des01.erx.length_error)) { + x->rx_length++; + ret = discard_frame; + } +#ifdef STMMAC_VLAN_TAG_USED + if (p->des01.erx.vlan_tag) + x->rx_vlan++; +#endif + + return ret; +} + +static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, + int mode, int end) +{ + p->des01.erx.own = 1; + p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1; + + if (mode == STMMAC_CHAIN_MODE) + ehn_desc_rx_set_on_chain(p, end); + else + ehn_desc_rx_set_on_ring(p, end); + + if (disable_rx_ic) + p->des01.erx.disable_ic = 1; +} + +static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end) +{ + p->des01.etx.own = 0; + if (mode == STMMAC_CHAIN_MODE) + ehn_desc_tx_set_on_chain(p, end); + else + ehn_desc_tx_set_on_ring(p, end); +} + +static int enh_desc_get_tx_owner(struct dma_desc *p) +{ + return p->des01.etx.own; +} + +static int enh_desc_get_rx_owner(struct dma_desc *p) +{ + return p->des01.erx.own; +} + +static void enh_desc_set_tx_owner(struct dma_desc *p) +{ + p->des01.etx.own = 1; +} + +static void enh_desc_set_rx_owner(struct dma_desc *p) +{ + p->des01.erx.own = 1; +} + +static int enh_desc_get_tx_ls(struct dma_desc *p) +{ + return p->des01.etx.last_segment; +} + +static void enh_desc_release_tx_desc(struct dma_desc *p, int mode) +{ + int ter = p->des01.etx.end_ring; + + memset(p, 0, offsetof(struct dma_desc, des2)); + if (mode == STMMAC_CHAIN_MODE) + enh_desc_end_tx_desc_on_chain(p, ter); + else + enh_desc_end_tx_desc_on_ring(p, ter); +} + +static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + int csum_flag, int mode) +{ + p->des01.etx.first_segment = is_fs; + + if (mode == STMMAC_CHAIN_MODE) + enh_set_tx_desc_len_on_chain(p, len); + else + enh_set_tx_desc_len_on_ring(p, len); + + if (likely(csum_flag)) + p->des01.etx.checksum_insertion = cic_full; +} + +static void enh_desc_clear_tx_ic(struct dma_desc *p) +{ + p->des01.etx.interrupt = 0; +} + +static void enh_desc_close_tx_desc(struct dma_desc *p) +{ + p->des01.etx.last_segment = 1; + p->des01.etx.interrupt = 1; +} + +static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) +{ + /* The type-1 checksum offload engines append the checksum at + * the end of frame and the two bytes of checksum are added in + * the length. + * Adjust for that in the framelen for type-1 checksum offload + * engines. */ + if (rx_coe_type == STMMAC_RX_COE_TYPE1) + return p->des01.erx.frame_length - 2; + else + return p->des01.erx.frame_length; +} + +static void enh_desc_enable_tx_timestamp(struct dma_desc *p) +{ + p->des01.etx.time_stamp_enable = 1; +} + +static int enh_desc_get_tx_timestamp_status(struct dma_desc *p) +{ + return p->des01.etx.time_stamp_status; +} + +static u64 enh_desc_get_timestamp(void *desc, u32 ats) +{ + u64 ns; + + if (ats) { + struct dma_extended_desc *p = (struct dma_extended_desc *)desc; + ns = p->des6; + /* convert high/sec time stamp value to nanosecond */ + ns += p->des7 * 1000000000ULL; + } else { + struct dma_desc *p = (struct dma_desc *)desc; + ns = p->des2; + ns += p->des3 * 1000000000ULL; + } + + return ns; +} + +static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats) +{ + if (ats) { + struct dma_extended_desc *p = (struct dma_extended_desc *)desc; + return p->basic.des01.erx.ipc_csum_error; + } else { + struct dma_desc *p = (struct dma_desc *)desc; + if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff)) + /* timestamp is corrupted, hence don't store it */ + return 0; + else + return 1; + } +} + +const struct stmmac_desc_ops enh_desc_ops = { + .tx_status = enh_desc_get_tx_status, + .rx_status = enh_desc_get_rx_status, + .get_tx_len = enh_desc_get_tx_len, + .init_rx_desc = enh_desc_init_rx_desc, + .init_tx_desc = enh_desc_init_tx_desc, + .get_tx_owner = enh_desc_get_tx_owner, + .get_rx_owner = enh_desc_get_rx_owner, + .release_tx_desc = enh_desc_release_tx_desc, + .prepare_tx_desc = enh_desc_prepare_tx_desc, + .clear_tx_ic = enh_desc_clear_tx_ic, + .close_tx_desc = enh_desc_close_tx_desc, + .get_tx_ls = enh_desc_get_tx_ls, + .set_tx_owner = enh_desc_set_tx_owner, + .set_rx_owner = enh_desc_set_rx_owner, + .get_rx_frame_len = enh_desc_get_rx_frame_len, + .rx_extended_status = enh_desc_get_ext_status, + .enable_tx_timestamp = enh_desc_enable_tx_timestamp, + .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status, + .get_timestamp = enh_desc_get_timestamp, + .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h new file mode 100644 index 000000000..192c24913 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h @@ -0,0 +1,135 @@ +/******************************************************************************* + MMC Header file + + Copyright (C) 2011 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __MMC_H__ +#define __MMC_H__ + +/* MMC control register */ +/* When set, all counter are reset */ +#define MMC_CNTRL_COUNTER_RESET 0x1 +/* When set, do not roll over zero after reaching the max value*/ +#define MMC_CNTRL_COUNTER_STOP_ROLLOVER 0x2 +#define MMC_CNTRL_RESET_ON_READ 0x4 /* Reset after reading */ +#define MMC_CNTRL_COUNTER_FREEZER 0x8 /* Freeze counter values to the + * current value.*/ +#define MMC_CNTRL_PRESET 0x10 +#define MMC_CNTRL_FULL_HALF_PRESET 0x20 +struct stmmac_counters { + unsigned int mmc_tx_octetcount_gb; + unsigned int mmc_tx_framecount_gb; + unsigned int mmc_tx_broadcastframe_g; + unsigned int mmc_tx_multicastframe_g; + unsigned int mmc_tx_64_octets_gb; + unsigned int mmc_tx_65_to_127_octets_gb; + unsigned int mmc_tx_128_to_255_octets_gb; + unsigned int mmc_tx_256_to_511_octets_gb; + unsigned int mmc_tx_512_to_1023_octets_gb; + unsigned int mmc_tx_1024_to_max_octets_gb; + unsigned int mmc_tx_unicast_gb; + unsigned int mmc_tx_multicast_gb; + unsigned int mmc_tx_broadcast_gb; + unsigned int mmc_tx_underflow_error; + unsigned int mmc_tx_singlecol_g; + unsigned int mmc_tx_multicol_g; + unsigned int mmc_tx_deferred; + unsigned int mmc_tx_latecol; + unsigned int mmc_tx_exesscol; + unsigned int mmc_tx_carrier_error; + unsigned int mmc_tx_octetcount_g; + unsigned int mmc_tx_framecount_g; + unsigned int mmc_tx_excessdef; + unsigned int mmc_tx_pause_frame; + unsigned int mmc_tx_vlan_frame_g; + + /* MMC RX counter registers */ + unsigned int mmc_rx_framecount_gb; + unsigned int mmc_rx_octetcount_gb; + unsigned int mmc_rx_octetcount_g; + unsigned int mmc_rx_broadcastframe_g; + unsigned int mmc_rx_multicastframe_g; + unsigned int mmc_rx_crc_error; + unsigned int mmc_rx_align_error; + unsigned int mmc_rx_run_error; + unsigned int mmc_rx_jabber_error; + unsigned int mmc_rx_undersize_g; + unsigned int mmc_rx_oversize_g; + unsigned int mmc_rx_64_octets_gb; + unsigned int mmc_rx_65_to_127_octets_gb; + unsigned int mmc_rx_128_to_255_octets_gb; + unsigned int mmc_rx_256_to_511_octets_gb; + unsigned int mmc_rx_512_to_1023_octets_gb; + unsigned int mmc_rx_1024_to_max_octets_gb; + unsigned int mmc_rx_unicast_g; + unsigned int mmc_rx_length_error; + unsigned int mmc_rx_autofrangetype; + unsigned int mmc_rx_pause_frames; + unsigned int mmc_rx_fifo_overflow; + unsigned int mmc_rx_vlan_frames_gb; + unsigned int mmc_rx_watchdog_error; + /* IPC */ + unsigned int mmc_rx_ipc_intr_mask; + unsigned int mmc_rx_ipc_intr; + /* IPv4 */ + unsigned int mmc_rx_ipv4_gd; + unsigned int mmc_rx_ipv4_hderr; + unsigned int mmc_rx_ipv4_nopay; + unsigned int mmc_rx_ipv4_frag; + unsigned int mmc_rx_ipv4_udsbl; + + unsigned int mmc_rx_ipv4_gd_octets; + unsigned int mmc_rx_ipv4_hderr_octets; + unsigned int mmc_rx_ipv4_nopay_octets; + unsigned int mmc_rx_ipv4_frag_octets; + unsigned int mmc_rx_ipv4_udsbl_octets; + + /* IPV6 */ + unsigned int mmc_rx_ipv6_gd_octets; + unsigned int mmc_rx_ipv6_hderr_octets; + unsigned int mmc_rx_ipv6_nopay_octets; + + unsigned int mmc_rx_ipv6_gd; + unsigned int mmc_rx_ipv6_hderr; + unsigned int mmc_rx_ipv6_nopay; + + /* Protocols */ + unsigned int mmc_rx_udp_gd; + unsigned int mmc_rx_udp_err; + unsigned int mmc_rx_tcp_gd; + unsigned int mmc_rx_tcp_err; + unsigned int mmc_rx_icmp_gd; + unsigned int mmc_rx_icmp_err; + + unsigned int mmc_rx_udp_gd_octets; + unsigned int mmc_rx_udp_err_octets; + unsigned int mmc_rx_tcp_gd_octets; + unsigned int mmc_rx_tcp_err_octets; + unsigned int mmc_rx_icmp_gd_octets; + unsigned int mmc_rx_icmp_err_octets; +}; + +void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode); +void dwmac_mmc_intr_all_mask(void __iomem *ioaddr); +void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc); + +#endif /* __MMC_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c new file mode 100644 index 000000000..08c483bd2 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -0,0 +1,267 @@ +/******************************************************************************* + DWMAC Management Counters + + Copyright (C) 2011 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include "mmc.h" + +/* MAC Management Counters register offset */ + +#define MMC_CNTRL 0x00000100 /* MMC Control */ +#define MMC_RX_INTR 0x00000104 /* MMC RX Interrupt */ +#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */ +#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */ +#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */ +#define MMC_DEFAULT_MASK 0xffffffff + +/* MMC TX counter registers */ + +/* Note: + * _GB register stands for good and bad frames + * _G is for good only. + */ +#define MMC_TX_OCTETCOUNT_GB 0x00000114 +#define MMC_TX_FRAMECOUNT_GB 0x00000118 +#define MMC_TX_BROADCASTFRAME_G 0x0000011c +#define MMC_TX_MULTICASTFRAME_G 0x00000120 +#define MMC_TX_64_OCTETS_GB 0x00000124 +#define MMC_TX_65_TO_127_OCTETS_GB 0x00000128 +#define MMC_TX_128_TO_255_OCTETS_GB 0x0000012c +#define MMC_TX_256_TO_511_OCTETS_GB 0x00000130 +#define MMC_TX_512_TO_1023_OCTETS_GB 0x00000134 +#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x00000138 +#define MMC_TX_UNICAST_GB 0x0000013c +#define MMC_TX_MULTICAST_GB 0x00000140 +#define MMC_TX_BROADCAST_GB 0x00000144 +#define MMC_TX_UNDERFLOW_ERROR 0x00000148 +#define MMC_TX_SINGLECOL_G 0x0000014c +#define MMC_TX_MULTICOL_G 0x00000150 +#define MMC_TX_DEFERRED 0x00000154 +#define MMC_TX_LATECOL 0x00000158 +#define MMC_TX_EXESSCOL 0x0000015c +#define MMC_TX_CARRIER_ERROR 0x00000160 +#define MMC_TX_OCTETCOUNT_G 0x00000164 +#define MMC_TX_FRAMECOUNT_G 0x00000168 +#define MMC_TX_EXCESSDEF 0x0000016c +#define MMC_TX_PAUSE_FRAME 0x00000170 +#define MMC_TX_VLAN_FRAME_G 0x00000174 + +/* MMC RX counter registers */ +#define MMC_RX_FRAMECOUNT_GB 0x00000180 +#define MMC_RX_OCTETCOUNT_GB 0x00000184 +#define MMC_RX_OCTETCOUNT_G 0x00000188 +#define MMC_RX_BROADCASTFRAME_G 0x0000018c +#define MMC_RX_MULTICASTFRAME_G 0x00000190 +#define MMC_RX_CRC_ERRROR 0x00000194 +#define MMC_RX_ALIGN_ERROR 0x00000198 +#define MMC_RX_RUN_ERROR 0x0000019C +#define MMC_RX_JABBER_ERROR 0x000001A0 +#define MMC_RX_UNDERSIZE_G 0x000001A4 +#define MMC_RX_OVERSIZE_G 0x000001A8 +#define MMC_RX_64_OCTETS_GB 0x000001AC +#define MMC_RX_65_TO_127_OCTETS_GB 0x000001b0 +#define MMC_RX_128_TO_255_OCTETS_GB 0x000001b4 +#define MMC_RX_256_TO_511_OCTETS_GB 0x000001b8 +#define MMC_RX_512_TO_1023_OCTETS_GB 0x000001bc +#define MMC_RX_1024_TO_MAX_OCTETS_GB 0x000001c0 +#define MMC_RX_UNICAST_G 0x000001c4 +#define MMC_RX_LENGTH_ERROR 0x000001c8 +#define MMC_RX_AUTOFRANGETYPE 0x000001cc +#define MMC_RX_PAUSE_FRAMES 0x000001d0 +#define MMC_RX_FIFO_OVERFLOW 0x000001d4 +#define MMC_RX_VLAN_FRAMES_GB 0x000001d8 +#define MMC_RX_WATCHDOG_ERROR 0x000001dc +/* IPC*/ +#define MMC_RX_IPC_INTR_MASK 0x00000200 +#define MMC_RX_IPC_INTR 0x00000208 +/* IPv4*/ +#define MMC_RX_IPV4_GD 0x00000210 +#define MMC_RX_IPV4_HDERR 0x00000214 +#define MMC_RX_IPV4_NOPAY 0x00000218 +#define MMC_RX_IPV4_FRAG 0x0000021C +#define MMC_RX_IPV4_UDSBL 0x00000220 + +#define MMC_RX_IPV4_GD_OCTETS 0x00000250 +#define MMC_RX_IPV4_HDERR_OCTETS 0x00000254 +#define MMC_RX_IPV4_NOPAY_OCTETS 0x00000258 +#define MMC_RX_IPV4_FRAG_OCTETS 0x0000025c +#define MMC_RX_IPV4_UDSBL_OCTETS 0x00000260 + +/* IPV6*/ +#define MMC_RX_IPV6_GD_OCTETS 0x00000264 +#define MMC_RX_IPV6_HDERR_OCTETS 0x00000268 +#define MMC_RX_IPV6_NOPAY_OCTETS 0x0000026c + +#define MMC_RX_IPV6_GD 0x00000224 +#define MMC_RX_IPV6_HDERR 0x00000228 +#define MMC_RX_IPV6_NOPAY 0x0000022c + +/* Protocols*/ +#define MMC_RX_UDP_GD 0x00000230 +#define MMC_RX_UDP_ERR 0x00000234 +#define MMC_RX_TCP_GD 0x00000238 +#define MMC_RX_TCP_ERR 0x0000023c +#define MMC_RX_ICMP_GD 0x00000240 +#define MMC_RX_ICMP_ERR 0x00000244 + +#define MMC_RX_UDP_GD_OCTETS 0x00000270 +#define MMC_RX_UDP_ERR_OCTETS 0x00000274 +#define MMC_RX_TCP_GD_OCTETS 0x00000278 +#define MMC_RX_TCP_ERR_OCTETS 0x0000027c +#define MMC_RX_ICMP_GD_OCTETS 0x00000280 +#define MMC_RX_ICMP_ERR_OCTETS 0x00000284 + +void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode) +{ + u32 value = readl(ioaddr + MMC_CNTRL); + + value |= (mode & 0x3F); + + writel(value, ioaddr + MMC_CNTRL); + + pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n", + MMC_CNTRL, value); +} + +/* To mask all all interrupts.*/ +void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) +{ + writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK); + writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK); + writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK); +} + +/* This reads the MAC core counters (if actaully supported). + * by default the MMC core is programmed to reset each + * counter after a read. So all the field of the mmc struct + * have to be incremented. + */ +void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc) +{ + mmc->mmc_tx_octetcount_gb += readl(ioaddr + MMC_TX_OCTETCOUNT_GB); + mmc->mmc_tx_framecount_gb += readl(ioaddr + MMC_TX_FRAMECOUNT_GB); + mmc->mmc_tx_broadcastframe_g += readl(ioaddr + MMC_TX_BROADCASTFRAME_G); + mmc->mmc_tx_multicastframe_g += readl(ioaddr + MMC_TX_MULTICASTFRAME_G); + mmc->mmc_tx_64_octets_gb += readl(ioaddr + MMC_TX_64_OCTETS_GB); + mmc->mmc_tx_65_to_127_octets_gb += + readl(ioaddr + MMC_TX_65_TO_127_OCTETS_GB); + mmc->mmc_tx_128_to_255_octets_gb += + readl(ioaddr + MMC_TX_128_TO_255_OCTETS_GB); + mmc->mmc_tx_256_to_511_octets_gb += + readl(ioaddr + MMC_TX_256_TO_511_OCTETS_GB); + mmc->mmc_tx_512_to_1023_octets_gb += + readl(ioaddr + MMC_TX_512_TO_1023_OCTETS_GB); + mmc->mmc_tx_1024_to_max_octets_gb += + readl(ioaddr + MMC_TX_1024_TO_MAX_OCTETS_GB); + mmc->mmc_tx_unicast_gb += readl(ioaddr + MMC_TX_UNICAST_GB); + mmc->mmc_tx_multicast_gb += readl(ioaddr + MMC_TX_MULTICAST_GB); + mmc->mmc_tx_broadcast_gb += readl(ioaddr + MMC_TX_BROADCAST_GB); + mmc->mmc_tx_underflow_error += readl(ioaddr + MMC_TX_UNDERFLOW_ERROR); + mmc->mmc_tx_singlecol_g += readl(ioaddr + MMC_TX_SINGLECOL_G); + mmc->mmc_tx_multicol_g += readl(ioaddr + MMC_TX_MULTICOL_G); + mmc->mmc_tx_deferred += readl(ioaddr + MMC_TX_DEFERRED); + mmc->mmc_tx_latecol += readl(ioaddr + MMC_TX_LATECOL); + mmc->mmc_tx_exesscol += readl(ioaddr + MMC_TX_EXESSCOL); + mmc->mmc_tx_carrier_error += readl(ioaddr + MMC_TX_CARRIER_ERROR); + mmc->mmc_tx_octetcount_g += readl(ioaddr + MMC_TX_OCTETCOUNT_G); + mmc->mmc_tx_framecount_g += readl(ioaddr + MMC_TX_FRAMECOUNT_G); + mmc->mmc_tx_excessdef += readl(ioaddr + MMC_TX_EXCESSDEF); + mmc->mmc_tx_pause_frame += readl(ioaddr + MMC_TX_PAUSE_FRAME); + mmc->mmc_tx_vlan_frame_g += readl(ioaddr + MMC_TX_VLAN_FRAME_G); + + /* MMC RX counter registers */ + mmc->mmc_rx_framecount_gb += readl(ioaddr + MMC_RX_FRAMECOUNT_GB); + mmc->mmc_rx_octetcount_gb += readl(ioaddr + MMC_RX_OCTETCOUNT_GB); + mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G); + mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G); + mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G); + mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERRROR); + mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR); + mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR); + mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR); + mmc->mmc_rx_undersize_g += readl(ioaddr + MMC_RX_UNDERSIZE_G); + mmc->mmc_rx_oversize_g += readl(ioaddr + MMC_RX_OVERSIZE_G); + mmc->mmc_rx_64_octets_gb += readl(ioaddr + MMC_RX_64_OCTETS_GB); + mmc->mmc_rx_65_to_127_octets_gb += + readl(ioaddr + MMC_RX_65_TO_127_OCTETS_GB); + mmc->mmc_rx_128_to_255_octets_gb += + readl(ioaddr + MMC_RX_128_TO_255_OCTETS_GB); + mmc->mmc_rx_256_to_511_octets_gb += + readl(ioaddr + MMC_RX_256_TO_511_OCTETS_GB); + mmc->mmc_rx_512_to_1023_octets_gb += + readl(ioaddr + MMC_RX_512_TO_1023_OCTETS_GB); + mmc->mmc_rx_1024_to_max_octets_gb += + readl(ioaddr + MMC_RX_1024_TO_MAX_OCTETS_GB); + mmc->mmc_rx_unicast_g += readl(ioaddr + MMC_RX_UNICAST_G); + mmc->mmc_rx_length_error += readl(ioaddr + MMC_RX_LENGTH_ERROR); + mmc->mmc_rx_autofrangetype += readl(ioaddr + MMC_RX_AUTOFRANGETYPE); + mmc->mmc_rx_pause_frames += readl(ioaddr + MMC_RX_PAUSE_FRAMES); + mmc->mmc_rx_fifo_overflow += readl(ioaddr + MMC_RX_FIFO_OVERFLOW); + mmc->mmc_rx_vlan_frames_gb += readl(ioaddr + MMC_RX_VLAN_FRAMES_GB); + mmc->mmc_rx_watchdog_error += readl(ioaddr + MMC_RX_WATCHDOG_ERROR); + /* IPC */ + mmc->mmc_rx_ipc_intr_mask += readl(ioaddr + MMC_RX_IPC_INTR_MASK); + mmc->mmc_rx_ipc_intr += readl(ioaddr + MMC_RX_IPC_INTR); + /* IPv4 */ + mmc->mmc_rx_ipv4_gd += readl(ioaddr + MMC_RX_IPV4_GD); + mmc->mmc_rx_ipv4_hderr += readl(ioaddr + MMC_RX_IPV4_HDERR); + mmc->mmc_rx_ipv4_nopay += readl(ioaddr + MMC_RX_IPV4_NOPAY); + mmc->mmc_rx_ipv4_frag += readl(ioaddr + MMC_RX_IPV4_FRAG); + mmc->mmc_rx_ipv4_udsbl += readl(ioaddr + MMC_RX_IPV4_UDSBL); + + mmc->mmc_rx_ipv4_gd_octets += readl(ioaddr + MMC_RX_IPV4_GD_OCTETS); + mmc->mmc_rx_ipv4_hderr_octets += + readl(ioaddr + MMC_RX_IPV4_HDERR_OCTETS); + mmc->mmc_rx_ipv4_nopay_octets += + readl(ioaddr + MMC_RX_IPV4_NOPAY_OCTETS); + mmc->mmc_rx_ipv4_frag_octets += readl(ioaddr + MMC_RX_IPV4_FRAG_OCTETS); + mmc->mmc_rx_ipv4_udsbl_octets += + readl(ioaddr + MMC_RX_IPV4_UDSBL_OCTETS); + + /* IPV6 */ + mmc->mmc_rx_ipv6_gd_octets += readl(ioaddr + MMC_RX_IPV6_GD_OCTETS); + mmc->mmc_rx_ipv6_hderr_octets += + readl(ioaddr + MMC_RX_IPV6_HDERR_OCTETS); + mmc->mmc_rx_ipv6_nopay_octets += + readl(ioaddr + MMC_RX_IPV6_NOPAY_OCTETS); + + mmc->mmc_rx_ipv6_gd += readl(ioaddr + MMC_RX_IPV6_GD); + mmc->mmc_rx_ipv6_hderr += readl(ioaddr + MMC_RX_IPV6_HDERR); + mmc->mmc_rx_ipv6_nopay += readl(ioaddr + MMC_RX_IPV6_NOPAY); + + /* Protocols */ + mmc->mmc_rx_udp_gd += readl(ioaddr + MMC_RX_UDP_GD); + mmc->mmc_rx_udp_err += readl(ioaddr + MMC_RX_UDP_ERR); + mmc->mmc_rx_tcp_gd += readl(ioaddr + MMC_RX_TCP_GD); + mmc->mmc_rx_tcp_err += readl(ioaddr + MMC_RX_TCP_ERR); + mmc->mmc_rx_icmp_gd += readl(ioaddr + MMC_RX_ICMP_GD); + mmc->mmc_rx_icmp_err += readl(ioaddr + MMC_RX_ICMP_ERR); + + mmc->mmc_rx_udp_gd_octets += readl(ioaddr + MMC_RX_UDP_GD_OCTETS); + mmc->mmc_rx_udp_err_octets += readl(ioaddr + MMC_RX_UDP_ERR_OCTETS); + mmc->mmc_rx_tcp_gd_octets += readl(ioaddr + MMC_RX_TCP_GD_OCTETS); + mmc->mmc_rx_tcp_err_octets += readl(ioaddr + MMC_RX_TCP_ERR_OCTETS); + mmc->mmc_rx_icmp_gd_octets += readl(ioaddr + MMC_RX_ICMP_GD_OCTETS); + mmc->mmc_rx_icmp_err_octets += readl(ioaddr + MMC_RX_ICMP_ERR_OCTETS); +} diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c new file mode 100644 index 000000000..35ad4f427 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -0,0 +1,273 @@ +/******************************************************************************* + This contains the functions to handle the normal descriptors. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include "common.h" +#include "descs_com.h" + +static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, void __iomem *ioaddr) +{ + int ret = 0; + struct net_device_stats *stats = (struct net_device_stats *)data; + + if (unlikely(p->des01.tx.error_summary)) { + if (unlikely(p->des01.tx.underflow_error)) { + x->tx_underflow++; + stats->tx_fifo_errors++; + } + if (unlikely(p->des01.tx.no_carrier)) { + x->tx_carrier++; + stats->tx_carrier_errors++; + } + if (unlikely(p->des01.tx.loss_carrier)) { + x->tx_losscarrier++; + stats->tx_carrier_errors++; + } + if (unlikely((p->des01.tx.excessive_deferral) || + (p->des01.tx.excessive_collisions) || + (p->des01.tx.late_collision))) + stats->collisions += p->des01.tx.collision_count; + ret = -1; + } + + if (p->des01.etx.vlan_frame) + x->tx_vlan++; + + if (unlikely(p->des01.tx.deferred)) + x->tx_deferred++; + + return ret; +} + +static int ndesc_get_tx_len(struct dma_desc *p) +{ + return p->des01.tx.buffer1_size; +} + +/* This function verifies if each incoming frame has some errors + * and, if required, updates the multicast statistics. + * In case of success, it returns good_frame because the GMAC device + * is supposed to be able to compute the csum in HW. */ +static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p) +{ + int ret = good_frame; + struct net_device_stats *stats = (struct net_device_stats *)data; + + if (unlikely(p->des01.rx.last_descriptor == 0)) { + pr_warn("%s: Oversized frame spanned multiple buffers\n", + __func__); + stats->rx_length_errors++; + return discard_frame; + } + + if (unlikely(p->des01.rx.error_summary)) { + if (unlikely(p->des01.rx.descriptor_error)) + x->rx_desc++; + if (unlikely(p->des01.rx.sa_filter_fail)) + x->sa_filter_fail++; + if (unlikely(p->des01.rx.overflow_error)) + x->overflow_error++; + if (unlikely(p->des01.rx.ipc_csum_error)) + x->ipc_csum_error++; + if (unlikely(p->des01.rx.collision)) { + x->rx_collision++; + stats->collisions++; + } + if (unlikely(p->des01.rx.crc_error)) { + x->rx_crc++; + stats->rx_crc_errors++; + } + ret = discard_frame; + } + if (unlikely(p->des01.rx.dribbling)) + x->dribbling_bit++; + + if (unlikely(p->des01.rx.length_error)) { + x->rx_length++; + ret = discard_frame; + } + if (unlikely(p->des01.rx.mii_error)) { + x->rx_mii++; + ret = discard_frame; + } +#ifdef STMMAC_VLAN_TAG_USED + if (p->des01.rx.vlan_tag) + x->vlan_tag++; +#endif + return ret; +} + +static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, + int end) +{ + p->des01.rx.own = 1; + p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1; + + if (mode == STMMAC_CHAIN_MODE) + ndesc_rx_set_on_chain(p, end); + else + ndesc_rx_set_on_ring(p, end); + + if (disable_rx_ic) + p->des01.rx.disable_ic = 1; +} + +static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end) +{ + p->des01.tx.own = 0; + if (mode == STMMAC_CHAIN_MODE) + ndesc_tx_set_on_chain(p, end); + else + ndesc_tx_set_on_ring(p, end); +} + +static int ndesc_get_tx_owner(struct dma_desc *p) +{ + return p->des01.tx.own; +} + +static int ndesc_get_rx_owner(struct dma_desc *p) +{ + return p->des01.rx.own; +} + +static void ndesc_set_tx_owner(struct dma_desc *p) +{ + p->des01.tx.own = 1; +} + +static void ndesc_set_rx_owner(struct dma_desc *p) +{ + p->des01.rx.own = 1; +} + +static int ndesc_get_tx_ls(struct dma_desc *p) +{ + return p->des01.tx.last_segment; +} + +static void ndesc_release_tx_desc(struct dma_desc *p, int mode) +{ + int ter = p->des01.tx.end_ring; + + memset(p, 0, offsetof(struct dma_desc, des2)); + if (mode == STMMAC_CHAIN_MODE) + ndesc_end_tx_desc_on_chain(p, ter); + else + ndesc_end_tx_desc_on_ring(p, ter); +} + +static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + int csum_flag, int mode) +{ + p->des01.tx.first_segment = is_fs; + if (mode == STMMAC_CHAIN_MODE) + norm_set_tx_desc_len_on_chain(p, len); + else + norm_set_tx_desc_len_on_ring(p, len); + + if (likely(csum_flag)) + p->des01.tx.checksum_insertion = cic_full; +} + +static void ndesc_clear_tx_ic(struct dma_desc *p) +{ + p->des01.tx.interrupt = 0; +} + +static void ndesc_close_tx_desc(struct dma_desc *p) +{ + p->des01.tx.last_segment = 1; + p->des01.tx.interrupt = 1; +} + +static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) +{ + /* The type-1 checksum offload engines append the checksum at + * the end of frame and the two bytes of checksum are added in + * the length. + * Adjust for that in the framelen for type-1 checksum offload + * engines. */ + if (rx_coe_type == STMMAC_RX_COE_TYPE1) + return p->des01.rx.frame_length - 2; + else + return p->des01.rx.frame_length; +} + +static void ndesc_enable_tx_timestamp(struct dma_desc *p) +{ + p->des01.tx.time_stamp_enable = 1; +} + +static int ndesc_get_tx_timestamp_status(struct dma_desc *p) +{ + return p->des01.tx.time_stamp_status; +} + +static u64 ndesc_get_timestamp(void *desc, u32 ats) +{ + struct dma_desc *p = (struct dma_desc *)desc; + u64 ns; + + ns = p->des2; + /* convert high/sec time stamp value to nanosecond */ + ns += p->des3 * 1000000000ULL; + + return ns; +} + +static int ndesc_get_rx_timestamp_status(void *desc, u32 ats) +{ + struct dma_desc *p = (struct dma_desc *)desc; + + if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff)) + /* timestamp is corrupted, hence don't store it */ + return 0; + else + return 1; +} + +const struct stmmac_desc_ops ndesc_ops = { + .tx_status = ndesc_get_tx_status, + .rx_status = ndesc_get_rx_status, + .get_tx_len = ndesc_get_tx_len, + .init_rx_desc = ndesc_init_rx_desc, + .init_tx_desc = ndesc_init_tx_desc, + .get_tx_owner = ndesc_get_tx_owner, + .get_rx_owner = ndesc_get_rx_owner, + .release_tx_desc = ndesc_release_tx_desc, + .prepare_tx_desc = ndesc_prepare_tx_desc, + .clear_tx_ic = ndesc_clear_tx_ic, + .close_tx_desc = ndesc_close_tx_desc, + .get_tx_ls = ndesc_get_tx_ls, + .set_tx_owner = ndesc_set_tx_owner, + .set_rx_owner = ndesc_set_rx_owner, + .get_rx_frame_len = ndesc_get_rx_frame_len, + .enable_tx_timestamp = ndesc_enable_tx_timestamp, + .get_tx_timestamp_status = ndesc_get_tx_timestamp_status, + .get_timestamp = ndesc_get_timestamp, + .get_rx_timestamp_status = ndesc_get_rx_timestamp_status, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c new file mode 100644 index 000000000..5dd50c6cd --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -0,0 +1,142 @@ +/******************************************************************************* + Specialised functions for managing Ring mode + + Copyright(C) 2011 STMicroelectronics Ltd + + It defines all the functions used to handle the normal/enhanced + descriptors in case of the DMA is configured to work in chained or + in ring mode. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include "stmmac.h" + +static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)p; + unsigned int txsize = priv->dma_tx_size; + unsigned int entry = priv->cur_tx % txsize; + struct dma_desc *desc; + unsigned int nopaged_len = skb_headlen(skb); + unsigned int bmax, len; + + if (priv->extend_desc) + desc = (struct dma_desc *)(priv->dma_etx + entry); + else + desc = priv->dma_tx + entry; + + if (priv->plat->enh_desc) + bmax = BUF_SIZE_8KiB; + else + bmax = BUF_SIZE_2KiB; + + len = nopaged_len - bmax; + + if (nopaged_len > BUF_SIZE_8KiB) { + + desc->des2 = dma_map_single(priv->device, skb->data, + bmax, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, desc->des2)) + return -1; + + priv->tx_skbuff_dma[entry].buf = desc->des2; + desc->des3 = desc->des2 + BUF_SIZE_4KiB; + priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, + STMMAC_RING_MODE); + wmb(); + priv->tx_skbuff[entry] = NULL; + entry = (++priv->cur_tx) % txsize; + + if (priv->extend_desc) + desc = (struct dma_desc *)(priv->dma_etx + entry); + else + desc = priv->dma_tx + entry; + + desc->des2 = dma_map_single(priv->device, skb->data + bmax, + len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, desc->des2)) + return -1; + priv->tx_skbuff_dma[entry].buf = desc->des2; + desc->des3 = desc->des2 + BUF_SIZE_4KiB; + priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, + STMMAC_RING_MODE); + wmb(); + priv->hw->desc->set_tx_owner(desc); + } else { + desc->des2 = dma_map_single(priv->device, skb->data, + nopaged_len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, desc->des2)) + return -1; + priv->tx_skbuff_dma[entry].buf = desc->des2; + desc->des3 = desc->des2 + BUF_SIZE_4KiB; + priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, + STMMAC_RING_MODE); + } + + return entry; +} + +static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc) +{ + unsigned int ret = 0; + + if (len >= BUF_SIZE_4KiB) + ret = 1; + + return ret; +} + +static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; + + /* Fill DES3 in case of RING mode */ + if (priv->dma_buf_sz >= BUF_SIZE_8KiB) + p->des3 = p->des2 + BUF_SIZE_8KiB; +} + +/* In ring mode we need to fill the desc3 because it is used as buffer */ +static void stmmac_init_desc3(struct dma_desc *p) +{ + p->des3 = p->des2 + BUF_SIZE_8KiB; +} + +static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) +{ + if (unlikely(p->des3)) + p->des3 = 0; +} + +static int stmmac_set_16kib_bfsize(int mtu) +{ + int ret = 0; + if (unlikely(mtu >= BUF_SIZE_8KiB)) + ret = BUF_SIZE_16KiB; + return ret; +} + +const struct stmmac_mode_ops ring_mode_ops = { + .is_jumbo_frm = stmmac_is_jumbo_frm, + .jumbo_frm = stmmac_jumbo_frm, + .refill_desc3 = stmmac_refill_desc3, + .init_desc3 = stmmac_init_desc3, + .clean_desc3 = stmmac_clean_desc3, + .set_16kib_bfsize = stmmac_set_16kib_bfsize, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h new file mode 100644 index 000000000..73bab983e --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -0,0 +1,144 @@ +/******************************************************************************* + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __STMMAC_H__ +#define __STMMAC_H__ + +#define STMMAC_RESOURCE_NAME "stmmaceth" +#define DRV_MODULE_VERSION "March_2013" + +#include +#include +#include +#include +#include "common.h" +#include +#include + +struct stmmac_tx_info { + dma_addr_t buf; + bool map_as_page; +}; + +struct stmmac_priv { + /* Frequently used values are kept adjacent for cache effect */ + struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; + struct dma_desc *dma_tx; + struct sk_buff **tx_skbuff; + unsigned int cur_tx; + unsigned int dirty_tx; + unsigned int dma_tx_size; + u32 tx_count_frames; + u32 tx_coal_frames; + u32 tx_coal_timer; + struct stmmac_tx_info *tx_skbuff_dma; + dma_addr_t dma_tx_phy; + int tx_coalesce; + int hwts_tx_en; + spinlock_t tx_lock; + bool tx_path_in_lpi_mode; + struct timer_list txtimer; + + struct dma_desc *dma_rx ____cacheline_aligned_in_smp; + struct dma_extended_desc *dma_erx; + struct sk_buff **rx_skbuff; + unsigned int cur_rx; + unsigned int dirty_rx; + unsigned int dma_rx_size; + unsigned int dma_buf_sz; + u32 rx_riwt; + int hwts_rx_en; + dma_addr_t *rx_skbuff_dma; + dma_addr_t dma_rx_phy; + + struct napi_struct napi ____cacheline_aligned_in_smp; + + void __iomem *ioaddr; + struct net_device *dev; + struct device *device; + struct mac_device_info *hw; + spinlock_t lock; + + struct phy_device *phydev ____cacheline_aligned_in_smp; + int oldlink; + int speed; + int oldduplex; + unsigned int flow_ctrl; + unsigned int pause; + struct mii_bus *mii; + int mii_irq[PHY_MAX_ADDR]; + + struct stmmac_extra_stats xstats ____cacheline_aligned_in_smp; + struct plat_stmmacenet_data *plat; + struct dma_features dma_cap; + struct stmmac_counters mmc; + int hw_cap_support; + int synopsys_id; + u32 msg_enable; + int wolopts; + int wol_irq; + struct clk *stmmac_clk; + struct clk *pclk; + struct reset_control *stmmac_rst; + int clk_csr; + struct timer_list eee_ctrl_timer; + int lpi_irq; + int eee_enabled; + int eee_active; + int tx_lpi_timer; + int pcs; + unsigned int mode; + int extend_desc; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_ops; + unsigned int default_addend; + struct clk *clk_ptp_ref; + unsigned int clk_ptp_rate; + u32 adv_ts; + int use_riwt; + int irq_wake; + spinlock_t ptp_lock; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dbgfs_dir; + struct dentry *dbgfs_rings_status; + struct dentry *dbgfs_dma_cap; +#endif +}; + +int stmmac_mdio_unregister(struct net_device *ndev); +int stmmac_mdio_register(struct net_device *ndev); +int stmmac_mdio_reset(struct mii_bus *mii); +void stmmac_set_ethtool_ops(struct net_device *netdev); + +int stmmac_ptp_register(struct stmmac_priv *priv); +void stmmac_ptp_unregister(struct stmmac_priv *priv); +int stmmac_resume(struct net_device *ndev); +int stmmac_suspend(struct net_device *ndev); +int stmmac_dvr_remove(struct net_device *ndev); +struct stmmac_priv *stmmac_dvr_probe(struct device *device, + struct plat_stmmacenet_data *plat_dat, + void __iomem *addr); +void stmmac_disable_eee_mode(struct stmmac_priv *priv); +bool stmmac_eee_init(struct stmmac_priv *priv); + +#endif /* __STMMAC_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c new file mode 100644 index 000000000..771cda2a4 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -0,0 +1,778 @@ +/******************************************************************************* + STMMAC Ethtool support + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include + +#include "stmmac.h" +#include "dwmac_dma.h" + +#define REG_SPACE_SIZE 0x1054 +#define MAC100_ETHTOOL_NAME "st_mac100" +#define GMAC_ETHTOOL_NAME "st_gmac" + +struct stmmac_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define STMMAC_STAT(m) \ + { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \ + offsetof(struct stmmac_priv, xstats.m)} + +static const struct stmmac_stats stmmac_gstrings_stats[] = { + /* Transmit errors */ + STMMAC_STAT(tx_underflow), + STMMAC_STAT(tx_carrier), + STMMAC_STAT(tx_losscarrier), + STMMAC_STAT(vlan_tag), + STMMAC_STAT(tx_deferred), + STMMAC_STAT(tx_vlan), + STMMAC_STAT(tx_jabber), + STMMAC_STAT(tx_frame_flushed), + STMMAC_STAT(tx_payload_error), + STMMAC_STAT(tx_ip_header_error), + /* Receive errors */ + STMMAC_STAT(rx_desc), + STMMAC_STAT(sa_filter_fail), + STMMAC_STAT(overflow_error), + STMMAC_STAT(ipc_csum_error), + STMMAC_STAT(rx_collision), + STMMAC_STAT(rx_crc), + STMMAC_STAT(dribbling_bit), + STMMAC_STAT(rx_length), + STMMAC_STAT(rx_mii), + STMMAC_STAT(rx_multicast), + STMMAC_STAT(rx_gmac_overflow), + STMMAC_STAT(rx_watchdog), + STMMAC_STAT(da_rx_filter_fail), + STMMAC_STAT(sa_rx_filter_fail), + STMMAC_STAT(rx_missed_cntr), + STMMAC_STAT(rx_overflow_cntr), + STMMAC_STAT(rx_vlan), + /* Tx/Rx IRQ error info */ + STMMAC_STAT(tx_undeflow_irq), + STMMAC_STAT(tx_process_stopped_irq), + STMMAC_STAT(tx_jabber_irq), + STMMAC_STAT(rx_overflow_irq), + STMMAC_STAT(rx_buf_unav_irq), + STMMAC_STAT(rx_process_stopped_irq), + STMMAC_STAT(rx_watchdog_irq), + STMMAC_STAT(tx_early_irq), + STMMAC_STAT(fatal_bus_error_irq), + /* Tx/Rx IRQ Events */ + STMMAC_STAT(rx_early_irq), + STMMAC_STAT(threshold), + STMMAC_STAT(tx_pkt_n), + STMMAC_STAT(rx_pkt_n), + STMMAC_STAT(normal_irq_n), + STMMAC_STAT(rx_normal_irq_n), + STMMAC_STAT(napi_poll), + STMMAC_STAT(tx_normal_irq_n), + STMMAC_STAT(tx_clean), + STMMAC_STAT(tx_reset_ic_bit), + STMMAC_STAT(irq_receive_pmt_irq_n), + /* MMC info */ + STMMAC_STAT(mmc_tx_irq_n), + STMMAC_STAT(mmc_rx_irq_n), + STMMAC_STAT(mmc_rx_csum_offload_irq_n), + /* EEE */ + STMMAC_STAT(irq_tx_path_in_lpi_mode_n), + STMMAC_STAT(irq_tx_path_exit_lpi_mode_n), + STMMAC_STAT(irq_rx_path_in_lpi_mode_n), + STMMAC_STAT(irq_rx_path_exit_lpi_mode_n), + STMMAC_STAT(phy_eee_wakeup_error_n), + /* Extended RDES status */ + STMMAC_STAT(ip_hdr_err), + STMMAC_STAT(ip_payload_err), + STMMAC_STAT(ip_csum_bypassed), + STMMAC_STAT(ipv4_pkt_rcvd), + STMMAC_STAT(ipv6_pkt_rcvd), + STMMAC_STAT(rx_msg_type_ext_no_ptp), + STMMAC_STAT(rx_msg_type_sync), + STMMAC_STAT(rx_msg_type_follow_up), + STMMAC_STAT(rx_msg_type_delay_req), + STMMAC_STAT(rx_msg_type_delay_resp), + STMMAC_STAT(rx_msg_type_pdelay_req), + STMMAC_STAT(rx_msg_type_pdelay_resp), + STMMAC_STAT(rx_msg_type_pdelay_follow_up), + STMMAC_STAT(ptp_frame_type), + STMMAC_STAT(ptp_ver), + STMMAC_STAT(timestamp_dropped), + STMMAC_STAT(av_pkt_rcvd), + STMMAC_STAT(av_tagged_pkt_rcvd), + STMMAC_STAT(vlan_tag_priority_val), + STMMAC_STAT(l3_filter_match), + STMMAC_STAT(l4_filter_match), + STMMAC_STAT(l3_l4_filter_no_match), + /* PCS */ + STMMAC_STAT(irq_pcs_ane_n), + STMMAC_STAT(irq_pcs_link_n), + STMMAC_STAT(irq_rgmii_n), +}; +#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) + +/* HW MAC Management counters (if supported) */ +#define STMMAC_MMC_STAT(m) \ + { #m, FIELD_SIZEOF(struct stmmac_counters, m), \ + offsetof(struct stmmac_priv, mmc.m)} + +static const struct stmmac_stats stmmac_mmc[] = { + STMMAC_MMC_STAT(mmc_tx_octetcount_gb), + STMMAC_MMC_STAT(mmc_tx_framecount_gb), + STMMAC_MMC_STAT(mmc_tx_broadcastframe_g), + STMMAC_MMC_STAT(mmc_tx_multicastframe_g), + STMMAC_MMC_STAT(mmc_tx_64_octets_gb), + STMMAC_MMC_STAT(mmc_tx_65_to_127_octets_gb), + STMMAC_MMC_STAT(mmc_tx_128_to_255_octets_gb), + STMMAC_MMC_STAT(mmc_tx_256_to_511_octets_gb), + STMMAC_MMC_STAT(mmc_tx_512_to_1023_octets_gb), + STMMAC_MMC_STAT(mmc_tx_1024_to_max_octets_gb), + STMMAC_MMC_STAT(mmc_tx_unicast_gb), + STMMAC_MMC_STAT(mmc_tx_multicast_gb), + STMMAC_MMC_STAT(mmc_tx_broadcast_gb), + STMMAC_MMC_STAT(mmc_tx_underflow_error), + STMMAC_MMC_STAT(mmc_tx_singlecol_g), + STMMAC_MMC_STAT(mmc_tx_multicol_g), + STMMAC_MMC_STAT(mmc_tx_deferred), + STMMAC_MMC_STAT(mmc_tx_latecol), + STMMAC_MMC_STAT(mmc_tx_exesscol), + STMMAC_MMC_STAT(mmc_tx_carrier_error), + STMMAC_MMC_STAT(mmc_tx_octetcount_g), + STMMAC_MMC_STAT(mmc_tx_framecount_g), + STMMAC_MMC_STAT(mmc_tx_excessdef), + STMMAC_MMC_STAT(mmc_tx_pause_frame), + STMMAC_MMC_STAT(mmc_tx_vlan_frame_g), + STMMAC_MMC_STAT(mmc_rx_framecount_gb), + STMMAC_MMC_STAT(mmc_rx_octetcount_gb), + STMMAC_MMC_STAT(mmc_rx_octetcount_g), + STMMAC_MMC_STAT(mmc_rx_broadcastframe_g), + STMMAC_MMC_STAT(mmc_rx_multicastframe_g), + STMMAC_MMC_STAT(mmc_rx_crc_error), + STMMAC_MMC_STAT(mmc_rx_align_error), + STMMAC_MMC_STAT(mmc_rx_run_error), + STMMAC_MMC_STAT(mmc_rx_jabber_error), + STMMAC_MMC_STAT(mmc_rx_undersize_g), + STMMAC_MMC_STAT(mmc_rx_oversize_g), + STMMAC_MMC_STAT(mmc_rx_64_octets_gb), + STMMAC_MMC_STAT(mmc_rx_65_to_127_octets_gb), + STMMAC_MMC_STAT(mmc_rx_128_to_255_octets_gb), + STMMAC_MMC_STAT(mmc_rx_256_to_511_octets_gb), + STMMAC_MMC_STAT(mmc_rx_512_to_1023_octets_gb), + STMMAC_MMC_STAT(mmc_rx_1024_to_max_octets_gb), + STMMAC_MMC_STAT(mmc_rx_unicast_g), + STMMAC_MMC_STAT(mmc_rx_length_error), + STMMAC_MMC_STAT(mmc_rx_autofrangetype), + STMMAC_MMC_STAT(mmc_rx_pause_frames), + STMMAC_MMC_STAT(mmc_rx_fifo_overflow), + STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb), + STMMAC_MMC_STAT(mmc_rx_watchdog_error), + STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask), + STMMAC_MMC_STAT(mmc_rx_ipc_intr), + STMMAC_MMC_STAT(mmc_rx_ipv4_gd), + STMMAC_MMC_STAT(mmc_rx_ipv4_hderr), + STMMAC_MMC_STAT(mmc_rx_ipv4_nopay), + STMMAC_MMC_STAT(mmc_rx_ipv4_frag), + STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl), + STMMAC_MMC_STAT(mmc_rx_ipv4_gd_octets), + STMMAC_MMC_STAT(mmc_rx_ipv4_hderr_octets), + STMMAC_MMC_STAT(mmc_rx_ipv4_nopay_octets), + STMMAC_MMC_STAT(mmc_rx_ipv4_frag_octets), + STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl_octets), + STMMAC_MMC_STAT(mmc_rx_ipv6_gd_octets), + STMMAC_MMC_STAT(mmc_rx_ipv6_hderr_octets), + STMMAC_MMC_STAT(mmc_rx_ipv6_nopay_octets), + STMMAC_MMC_STAT(mmc_rx_ipv6_gd), + STMMAC_MMC_STAT(mmc_rx_ipv6_hderr), + STMMAC_MMC_STAT(mmc_rx_ipv6_nopay), + STMMAC_MMC_STAT(mmc_rx_udp_gd), + STMMAC_MMC_STAT(mmc_rx_udp_err), + STMMAC_MMC_STAT(mmc_rx_tcp_gd), + STMMAC_MMC_STAT(mmc_rx_tcp_err), + STMMAC_MMC_STAT(mmc_rx_icmp_gd), + STMMAC_MMC_STAT(mmc_rx_icmp_err), + STMMAC_MMC_STAT(mmc_rx_udp_gd_octets), + STMMAC_MMC_STAT(mmc_rx_udp_err_octets), + STMMAC_MMC_STAT(mmc_rx_tcp_gd_octets), + STMMAC_MMC_STAT(mmc_rx_tcp_err_octets), + STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets), + STMMAC_MMC_STAT(mmc_rx_icmp_err_octets), +}; +#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc) + +static void stmmac_ethtool_getdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (priv->plat->has_gmac) + strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver)); + else + strlcpy(info->driver, MAC100_ETHTOOL_NAME, + sizeof(info->driver)); + + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); +} + +static int stmmac_ethtool_getsettings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct phy_device *phy = priv->phydev; + int rc; + + if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) { + struct rgmii_adv adv; + + if (!priv->xstats.pcs_link) { + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->duplex = DUPLEX_UNKNOWN; + return 0; + } + cmd->duplex = priv->xstats.pcs_duplex; + + ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed); + + /* Get and convert ADV/LP_ADV from the HW AN registers */ + if (!priv->hw->mac->get_adv) + return -EOPNOTSUPP; /* should never happen indeed */ + + priv->hw->mac->get_adv(priv->hw, &adv); + + /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ + + if (adv.pause & STMMAC_PCS_PAUSE) + cmd->advertising |= ADVERTISED_Pause; + if (adv.pause & STMMAC_PCS_ASYM_PAUSE) + cmd->advertising |= ADVERTISED_Asym_Pause; + if (adv.lp_pause & STMMAC_PCS_PAUSE) + cmd->lp_advertising |= ADVERTISED_Pause; + if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE) + cmd->lp_advertising |= ADVERTISED_Asym_Pause; + + /* Reg49[3] always set because ANE is always supported */ + cmd->autoneg = ADVERTISED_Autoneg; + cmd->supported |= SUPPORTED_Autoneg; + cmd->advertising |= ADVERTISED_Autoneg; + cmd->lp_advertising |= ADVERTISED_Autoneg; + + if (adv.duplex) { + cmd->supported |= (SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full | + SUPPORTED_10baseT_Full); + cmd->advertising |= (ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Full); + } else { + cmd->supported |= (SUPPORTED_1000baseT_Half | + SUPPORTED_100baseT_Half | + SUPPORTED_10baseT_Half); + cmd->advertising |= (ADVERTISED_1000baseT_Half | + ADVERTISED_100baseT_Half | + ADVERTISED_10baseT_Half); + } + if (adv.lp_duplex) + cmd->lp_advertising |= (ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Full); + else + cmd->lp_advertising |= (ADVERTISED_1000baseT_Half | + ADVERTISED_100baseT_Half | + ADVERTISED_10baseT_Half); + cmd->port = PORT_OTHER; + + return 0; + } + + if (phy == NULL) { + pr_err("%s: %s: PHY is not registered\n", + __func__, dev->name); + return -ENODEV; + } + if (!netif_running(dev)) { + pr_err("%s: interface is disabled: we cannot track " + "link speed / duplex setting\n", dev->name); + return -EBUSY; + } + cmd->transceiver = XCVR_INTERNAL; + rc = phy_ethtool_gset(phy, cmd); + return rc; +} + +static int stmmac_ethtool_setsettings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct phy_device *phy = priv->phydev; + int rc; + + if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) { + u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause; + + /* Only support ANE */ + if (cmd->autoneg != AUTONEG_ENABLE) + return -EINVAL; + + mask &= (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full); + + spin_lock(&priv->lock); + if (priv->hw->mac->ctrl_ane) + priv->hw->mac->ctrl_ane(priv->hw, 1); + spin_unlock(&priv->lock); + + return 0; + } + + spin_lock(&priv->lock); + rc = phy_ethtool_sset(phy, cmd); + spin_unlock(&priv->lock); + + return rc; +} + +static u32 stmmac_ethtool_getmsglevel(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + return priv->msg_enable; +} + +static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level) +{ + struct stmmac_priv *priv = netdev_priv(dev); + priv->msg_enable = level; + +} + +static int stmmac_check_if_running(struct net_device *dev) +{ + if (!netif_running(dev)) + return -EBUSY; + return 0; +} + +static int stmmac_ethtool_get_regs_len(struct net_device *dev) +{ + return REG_SPACE_SIZE; +} + +static void stmmac_ethtool_gregs(struct net_device *dev, + struct ethtool_regs *regs, void *space) +{ + int i; + u32 *reg_space = (u32 *) space; + + struct stmmac_priv *priv = netdev_priv(dev); + + memset(reg_space, 0x0, REG_SPACE_SIZE); + + if (!priv->plat->has_gmac) { + /* MAC registers */ + for (i = 0; i < 12; i++) + reg_space[i] = readl(priv->ioaddr + (i * 4)); + /* DMA registers */ + for (i = 0; i < 9; i++) + reg_space[i + 12] = + readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4))); + reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR); + reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR); + } else { + /* MAC registers */ + for (i = 0; i < 55; i++) + reg_space[i] = readl(priv->ioaddr + (i * 4)); + /* DMA registers */ + for (i = 0; i < 22; i++) + reg_space[i + 55] = + readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4))); + } +} + +static void +stmmac_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + + if (priv->pcs) /* FIXME */ + return; + + pause->rx_pause = 0; + pause->tx_pause = 0; + pause->autoneg = priv->phydev->autoneg; + + if (priv->flow_ctrl & FLOW_RX) + pause->rx_pause = 1; + if (priv->flow_ctrl & FLOW_TX) + pause->tx_pause = 1; + +} + +static int +stmmac_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + struct phy_device *phy = priv->phydev; + int new_pause = FLOW_OFF; + int ret = 0; + + if (priv->pcs) /* FIXME */ + return -EOPNOTSUPP; + + if (pause->rx_pause) + new_pause |= FLOW_RX; + if (pause->tx_pause) + new_pause |= FLOW_TX; + + priv->flow_ctrl = new_pause; + phy->autoneg = pause->autoneg; + + if (phy->autoneg) { + if (netif_running(netdev)) + ret = phy_start_aneg(phy); + } else + priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, + priv->flow_ctrl, priv->pause); + return ret; +} + +static void stmmac_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *dummy, u64 *data) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int i, j = 0; + + /* Update the DMA HW counters for dwmac10/100 */ + if (!priv->plat->has_gmac) + priv->hw->dma->dma_diagnostic_fr(&dev->stats, + (void *) &priv->xstats, + priv->ioaddr); + else { + /* If supported, for new GMAC chips expose the MMC counters */ + if (priv->dma_cap.rmon) { + dwmac_mmc_read(priv->ioaddr, &priv->mmc); + + for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) { + char *p; + p = (char *)priv + stmmac_mmc[i].stat_offset; + + data[j++] = (stmmac_mmc[i].sizeof_stat == + sizeof(u64)) ? (*(u64 *)p) : + (*(u32 *)p); + } + } + if (priv->eee_enabled) { + int val = phy_get_eee_err(priv->phydev); + if (val) + priv->xstats.phy_eee_wakeup_error_n = val; + } + } + for (i = 0; i < STMMAC_STATS_LEN; i++) { + char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; + data[j++] = (stmmac_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); + } +} + +static int stmmac_get_sset_count(struct net_device *netdev, int sset) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + int len; + + switch (sset) { + case ETH_SS_STATS: + len = STMMAC_STATS_LEN; + + if (priv->dma_cap.rmon) + len += STMMAC_MMC_STATS_LEN; + + return len; + default: + return -EOPNOTSUPP; + } +} + +static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + u8 *p = data; + struct stmmac_priv *priv = netdev_priv(dev); + + switch (stringset) { + case ETH_SS_STATS: + if (priv->dma_cap.rmon) + for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) { + memcpy(p, stmmac_mmc[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < STMMAC_STATS_LEN; i++) { + memcpy(p, stmmac_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + default: + WARN_ON(1); + break; + } +} + +/* Currently only support WOL through Magic packet. */ +static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + spin_lock_irq(&priv->lock); + if (device_can_wakeup(priv->device)) { + wol->supported = WAKE_MAGIC | WAKE_UCAST; + wol->wolopts = priv->wolopts; + } + spin_unlock_irq(&priv->lock); +} + +static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 support = WAKE_MAGIC | WAKE_UCAST; + + /* By default almost all GMAC devices support the WoL via + * magic frame but we can disable it if the HW capability + * register shows no support for pmt_magic_frame. */ + if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame)) + wol->wolopts &= ~WAKE_MAGIC; + + if (!device_can_wakeup(priv->device)) + return -EINVAL; + + if (wol->wolopts & ~support) + return -EINVAL; + + if (wol->wolopts) { + pr_info("stmmac: wakeup enable\n"); + device_set_wakeup_enable(priv->device, 1); + enable_irq_wake(priv->wol_irq); + } else { + device_set_wakeup_enable(priv->device, 0); + disable_irq_wake(priv->wol_irq); + } + + spin_lock_irq(&priv->lock); + priv->wolopts = wol->wolopts; + spin_unlock_irq(&priv->lock); + + return 0; +} + +static int stmmac_ethtool_op_get_eee(struct net_device *dev, + struct ethtool_eee *edata) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (!priv->dma_cap.eee) + return -EOPNOTSUPP; + + edata->eee_enabled = priv->eee_enabled; + edata->eee_active = priv->eee_active; + edata->tx_lpi_timer = priv->tx_lpi_timer; + + return phy_ethtool_get_eee(priv->phydev, edata); +} + +static int stmmac_ethtool_op_set_eee(struct net_device *dev, + struct ethtool_eee *edata) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + priv->eee_enabled = edata->eee_enabled; + + if (!priv->eee_enabled) + stmmac_disable_eee_mode(priv); + else { + /* We are asking for enabling the EEE but it is safe + * to verify all by invoking the eee_init function. + * In case of failure it will return an error. + */ + priv->eee_enabled = stmmac_eee_init(priv); + if (!priv->eee_enabled) + return -EOPNOTSUPP; + + /* Do not change tx_lpi_timer in case of failure */ + priv->tx_lpi_timer = edata->tx_lpi_timer; + } + + return phy_ethtool_set_eee(priv->phydev, edata); +} + +static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) +{ + unsigned long clk = clk_get_rate(priv->stmmac_clk); + + if (!clk) + return 0; + + return (usec * (clk / 1000000)) / 256; +} + +static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv) +{ + unsigned long clk = clk_get_rate(priv->stmmac_clk); + + if (!clk) + return 0; + + return (riwt * 256) / (clk / 1000000); +} + +static int stmmac_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + ec->tx_coalesce_usecs = priv->tx_coal_timer; + ec->tx_max_coalesced_frames = priv->tx_coal_frames; + + if (priv->use_riwt) + ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt, priv); + + return 0; +} + +static int stmmac_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec) +{ + struct stmmac_priv *priv = netdev_priv(dev); + unsigned int rx_riwt; + + /* Check not supported parameters */ + if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) || + (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) || + (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) || + (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) || + (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) || + (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) || + (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) || + (ec->rx_max_coalesced_frames_high) || + (ec->tx_max_coalesced_frames_irq) || + (ec->stats_block_coalesce_usecs) || + (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval)) + return -EOPNOTSUPP; + + if (ec->rx_coalesce_usecs == 0) + return -EINVAL; + + if ((ec->tx_coalesce_usecs == 0) && + (ec->tx_max_coalesced_frames == 0)) + return -EINVAL; + + if ((ec->tx_coalesce_usecs > STMMAC_MAX_COAL_TX_TICK) || + (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES)) + return -EINVAL; + + rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv); + + if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT)) + return -EINVAL; + else if (!priv->use_riwt) + return -EOPNOTSUPP; + + /* Only copy relevant parameters, ignore all others. */ + priv->tx_coal_frames = ec->tx_max_coalesced_frames; + priv->tx_coal_timer = ec->tx_coalesce_usecs; + priv->rx_riwt = rx_riwt; + priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt); + + return 0; +} + +static int stmmac_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if ((priv->hwts_tx_en) && (priv->hwts_rx_en)) { + + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (priv->ptp_clock) + info->phc_index = ptp_clock_index(priv->ptp_clock); + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_ALL)); + return 0; + } else + return ethtool_op_get_ts_info(dev, info); +} + +static const struct ethtool_ops stmmac_ethtool_ops = { + .begin = stmmac_check_if_running, + .get_drvinfo = stmmac_ethtool_getdrvinfo, + .get_settings = stmmac_ethtool_getsettings, + .set_settings = stmmac_ethtool_setsettings, + .get_msglevel = stmmac_ethtool_getmsglevel, + .set_msglevel = stmmac_ethtool_setmsglevel, + .get_regs = stmmac_ethtool_gregs, + .get_regs_len = stmmac_ethtool_get_regs_len, + .get_link = ethtool_op_get_link, + .get_pauseparam = stmmac_get_pauseparam, + .set_pauseparam = stmmac_set_pauseparam, + .get_ethtool_stats = stmmac_get_ethtool_stats, + .get_strings = stmmac_get_strings, + .get_wol = stmmac_get_wol, + .set_wol = stmmac_set_wol, + .get_eee = stmmac_ethtool_op_get_eee, + .set_eee = stmmac_ethtool_op_set_eee, + .get_sset_count = stmmac_get_sset_count, + .get_ts_info = stmmac_get_ts_info, + .get_coalesce = stmmac_get_coalesce, + .set_coalesce = stmmac_set_coalesce, +}; + +void stmmac_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &stmmac_ethtool_ops; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c new file mode 100644 index 000000000..76ad214b4 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -0,0 +1,148 @@ +/******************************************************************************* + Copyright (C) 2013 Vayavya Labs Pvt Ltd + + This implements all the API for managing HW timestamp & PTP. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Rayagond Kokatanur + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include "common.h" +#include "stmmac_ptp.h" + +static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data) +{ + writel(data, ioaddr + PTP_TCR); +} + +static void stmmac_config_sub_second_increment(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + PTP_TCR); + unsigned long data; + + /* Convert the ptp_clock to nano second + * formula = (1/ptp_clock) * 1000000000 + * where, ptp_clock = 50MHz. + */ + data = (1000000000ULL / 50000000); + + /* 0.465ns accuracy */ + if (!(value & PTP_TCR_TSCTRLSSR)) + data = (data * 1000) / 465; + + writel(data, ioaddr + PTP_SSIR); +} + +static int stmmac_init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) +{ + int limit; + u32 value; + + writel(sec, ioaddr + PTP_STSUR); + writel(nsec, ioaddr + PTP_STNSUR); + /* issue command to initialize the system time value */ + value = readl(ioaddr + PTP_TCR); + value |= PTP_TCR_TSINIT; + writel(value, ioaddr + PTP_TCR); + + /* wait for present system time initialize to complete */ + limit = 10; + while (limit--) { + if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSINIT)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static int stmmac_config_addend(void __iomem *ioaddr, u32 addend) +{ + u32 value; + int limit; + + writel(addend, ioaddr + PTP_TAR); + /* issue command to update the addend value */ + value = readl(ioaddr + PTP_TCR); + value |= PTP_TCR_TSADDREG; + writel(value, ioaddr + PTP_TCR); + + /* wait for present addend update to complete */ + limit = 10; + while (limit--) { + if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSADDREG)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, + int add_sub) +{ + u32 value; + int limit; + + writel(sec, ioaddr + PTP_STSUR); + writel(((add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec), + ioaddr + PTP_STNSUR); + /* issue command to initialize the system time value */ + value = readl(ioaddr + PTP_TCR); + value |= PTP_TCR_TSUPDT; + writel(value, ioaddr + PTP_TCR); + + /* wait for present system time adjust/update to complete */ + limit = 10; + while (limit--) { + if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSUPDT)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static u64 stmmac_get_systime(void __iomem *ioaddr) +{ + u64 ns; + + ns = readl(ioaddr + PTP_STNSR); + /* convert sec time value to nanosecond */ + ns += readl(ioaddr + PTP_STSR) * 1000000000ULL; + + return ns; +} + +const struct stmmac_hwtimestamp stmmac_ptp = { + .config_hw_tstamping = stmmac_config_hw_tstamping, + .init_systime = stmmac_init_systime, + .config_sub_second_increment = stmmac_config_sub_second_increment, + .config_addend = stmmac_config_addend, + .adjust_systime = stmmac_adjust_systime, + .get_systime = stmmac_get_systime, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c new file mode 100644 index 000000000..2c5ce2bac --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -0,0 +1,3184 @@ +/******************************************************************************* + This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. + ST Ethernet IPs are built around a Synopsys IP Core. + + Copyright(C) 2007-2011 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro + + Documentation available at: + http://www.stlinux.com + Support available at: + https://bugzilla.stlinux.com/ +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_DEBUG_FS +#include +#include +#endif /* CONFIG_DEBUG_FS */ +#include +#include "stmmac_ptp.h" +#include "stmmac.h" +#include + +#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) + +/* Module parameters */ +#define TX_TIMEO 5000 +static int watchdog = TX_TIMEO; +module_param(watchdog, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); + +static int debug = -1; +module_param(debug, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); + +static int phyaddr = -1; +module_param(phyaddr, int, S_IRUGO); +MODULE_PARM_DESC(phyaddr, "Physical device address"); + +#define DMA_TX_SIZE 256 +static int dma_txsize = DMA_TX_SIZE; +module_param(dma_txsize, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list"); + +#define DMA_RX_SIZE 256 +static int dma_rxsize = DMA_RX_SIZE; +module_param(dma_rxsize, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list"); + +static int flow_ctrl = FLOW_OFF; +module_param(flow_ctrl, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); + +static int pause = PAUSE_TIME; +module_param(pause, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(pause, "Flow Control Pause Time"); + +#define TC_DEFAULT 64 +static int tc = TC_DEFAULT; +module_param(tc, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(tc, "DMA threshold control value"); + +#define DEFAULT_BUFSIZE 1536 +static int buf_sz = DEFAULT_BUFSIZE; +module_param(buf_sz, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(buf_sz, "DMA buffer size"); + +static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); + +#define STMMAC_DEFAULT_LPI_TIMER 1000 +static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; +module_param(eee_timer, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); +#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x)) + +/* By default the driver will use the ring mode to manage tx and rx descriptors + * but passing this value so user can force to use the chain instead of the ring + */ +static unsigned int chain_mode; +module_param(chain_mode, int, S_IRUGO); +MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); + +static irqreturn_t stmmac_interrupt(int irq, void *dev_id); + +#ifdef CONFIG_DEBUG_FS +static int stmmac_init_fs(struct net_device *dev); +static void stmmac_exit_fs(struct net_device *dev); +#endif + +#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) + +/** + * stmmac_verify_args - verify the driver parameters. + * Description: it checks the driver parameters and set a default in case of + * errors. + */ +static void stmmac_verify_args(void) +{ + if (unlikely(watchdog < 0)) + watchdog = TX_TIMEO; + if (unlikely(dma_rxsize < 0)) + dma_rxsize = DMA_RX_SIZE; + if (unlikely(dma_txsize < 0)) + dma_txsize = DMA_TX_SIZE; + if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) + buf_sz = DEFAULT_BUFSIZE; + if (unlikely(flow_ctrl > 1)) + flow_ctrl = FLOW_AUTO; + else if (likely(flow_ctrl < 0)) + flow_ctrl = FLOW_OFF; + if (unlikely((pause < 0) || (pause > 0xffff))) + pause = PAUSE_TIME; + if (eee_timer < 0) + eee_timer = STMMAC_DEFAULT_LPI_TIMER; +} + +/** + * stmmac_clk_csr_set - dynamically set the MDC clock + * @priv: driver private structure + * Description: this is to dynamically set the MDC clock according to the csr + * clock input. + * Note: + * If a specific clk_csr value is passed from the platform + * this means that the CSR Clock Range selection cannot be + * changed at run-time and it is fixed (as reported in the driver + * documentation). Viceversa the driver will try to set the MDC + * clock dynamically according to the actual clock input. + */ +static void stmmac_clk_csr_set(struct stmmac_priv *priv) +{ + u32 clk_rate; + + clk_rate = clk_get_rate(priv->stmmac_clk); + + /* Platform provided default clk_csr would be assumed valid + * for all other cases except for the below mentioned ones. + * For values higher than the IEEE 802.3 specified frequency + * we can not estimate the proper divider as it is not known + * the frequency of clk_csr_i. So we do not change the default + * divider. + */ + if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { + if (clk_rate < CSR_F_35M) + priv->clk_csr = STMMAC_CSR_20_35M; + else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) + priv->clk_csr = STMMAC_CSR_35_60M; + else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) + priv->clk_csr = STMMAC_CSR_60_100M; + else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) + priv->clk_csr = STMMAC_CSR_100_150M; + else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) + priv->clk_csr = STMMAC_CSR_150_250M; + else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) + priv->clk_csr = STMMAC_CSR_250_300M; + } +} + +static void print_pkt(unsigned char *buf, int len) +{ + pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); +} + +/* minimum number of free TX descriptors required to wake up TX process */ +#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4) + +static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) +{ + return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1; +} + +/** + * stmmac_hw_fix_mac_speed - callback for speed selection + * @priv: driver private structure + * Description: on some platforms (e.g. ST), some HW system configuraton + * registers have to be set according to the link speed negotiated. + */ +static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) +{ + struct phy_device *phydev = priv->phydev; + + if (likely(priv->plat->fix_mac_speed)) + priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed); +} + +/** + * stmmac_enable_eee_mode - check and enter in LPI mode + * @priv: driver private structure + * Description: this function is to verify and enter in LPI mode in case of + * EEE. + */ +static void stmmac_enable_eee_mode(struct stmmac_priv *priv) +{ + /* Check and enter in LPI mode */ + if ((priv->dirty_tx == priv->cur_tx) && + (priv->tx_path_in_lpi_mode == false)) + priv->hw->mac->set_eee_mode(priv->hw); +} + +/** + * stmmac_disable_eee_mode - disable and exit from LPI mode + * @priv: driver private structure + * Description: this function is to exit and disable EEE in case of + * LPI state is true. This is called by the xmit. + */ +void stmmac_disable_eee_mode(struct stmmac_priv *priv) +{ + priv->hw->mac->reset_eee_mode(priv->hw); + del_timer_sync(&priv->eee_ctrl_timer); + priv->tx_path_in_lpi_mode = false; +} + +/** + * stmmac_eee_ctrl_timer - EEE TX SW timer. + * @arg : data hook + * Description: + * if there is no data transfer and if we are not in LPI state, + * then MAC Transmitter can be moved to LPI state. + */ +static void stmmac_eee_ctrl_timer(unsigned long arg) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)arg; + + stmmac_enable_eee_mode(priv); + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); +} + +/** + * stmmac_eee_init - init EEE + * @priv: driver private structure + * Description: + * if the GMAC supports the EEE (from the HW cap reg) and the phy device + * can also manage EEE, this function enable the LPI state and start related + * timer. + */ +bool stmmac_eee_init(struct stmmac_priv *priv) +{ + char *phy_bus_name = priv->plat->phy_bus_name; + unsigned long flags; + bool ret = false; + + /* Using PCS we cannot dial with the phy registers at this stage + * so we do not support extra feature like EEE. + */ + if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) || + (priv->pcs == STMMAC_PCS_RTBI)) + goto out; + + /* Never init EEE in case of a switch is attached */ + if (phy_bus_name && (!strcmp(phy_bus_name, "fixed"))) + goto out; + + /* MAC core supports the EEE feature. */ + if (priv->dma_cap.eee) { + int tx_lpi_timer = priv->tx_lpi_timer; + + /* Check if the PHY supports EEE */ + if (phy_init_eee(priv->phydev, 1)) { + /* To manage at run-time if the EEE cannot be supported + * anymore (for example because the lp caps have been + * changed). + * In that case the driver disable own timers. + */ + spin_lock_irqsave(&priv->lock, flags); + if (priv->eee_active) { + pr_debug("stmmac: disable EEE\n"); + del_timer_sync(&priv->eee_ctrl_timer); + priv->hw->mac->set_eee_timer(priv->hw, 0, + tx_lpi_timer); + } + priv->eee_active = 0; + spin_unlock_irqrestore(&priv->lock, flags); + goto out; + } + /* Activate the EEE and start timers */ + spin_lock_irqsave(&priv->lock, flags); + if (!priv->eee_active) { + priv->eee_active = 1; + setup_timer(&priv->eee_ctrl_timer, + stmmac_eee_ctrl_timer, + (unsigned long)priv); + mod_timer(&priv->eee_ctrl_timer, + STMMAC_LPI_T(eee_timer)); + + priv->hw->mac->set_eee_timer(priv->hw, + STMMAC_DEFAULT_LIT_LS, + tx_lpi_timer); + } + /* Set HW EEE according to the speed */ + priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link); + + ret = true; + spin_unlock_irqrestore(&priv->lock, flags); + + pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); + } +out: + return ret; +} + +/* stmmac_get_tx_hwtstamp - get HW TX timestamps + * @priv: driver private structure + * @entry : descriptor index to be used. + * @skb : the socket buffer + * Description : + * This function will read timestamp from the descriptor & pass it to stack. + * and also perform some sanity checks. + */ +static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, + unsigned int entry, struct sk_buff *skb) +{ + struct skb_shared_hwtstamps shhwtstamp; + u64 ns; + void *desc = NULL; + + if (!priv->hwts_tx_en) + return; + + /* exit if skb doesn't support hw tstamp */ + if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) + return; + + if (priv->adv_ts) + desc = (priv->dma_etx + entry); + else + desc = (priv->dma_tx + entry); + + /* check tx tstamp status */ + if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc)) + return; + + /* get the valid tstamp */ + ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); + + memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamp.hwtstamp = ns_to_ktime(ns); + /* pass tstamp to stack */ + skb_tstamp_tx(skb, &shhwtstamp); + + return; +} + +/* stmmac_get_rx_hwtstamp - get HW RX timestamps + * @priv: driver private structure + * @entry : descriptor index to be used. + * @skb : the socket buffer + * Description : + * This function will read received packet's timestamp from the descriptor + * and pass it to stack. It also perform some sanity checks. + */ +static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, + unsigned int entry, struct sk_buff *skb) +{ + struct skb_shared_hwtstamps *shhwtstamp = NULL; + u64 ns; + void *desc = NULL; + + if (!priv->hwts_rx_en) + return; + + if (priv->adv_ts) + desc = (priv->dma_erx + entry); + else + desc = (priv->dma_rx + entry); + + /* exit if rx tstamp is not valid */ + if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) + return; + + /* get valid tstamp */ + ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); + shhwtstamp = skb_hwtstamps(skb); + memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamp->hwtstamp = ns_to_ktime(ns); +} + +/** + * stmmac_hwtstamp_ioctl - control hardware timestamping. + * @dev: device pointer. + * @ifr: An IOCTL specefic structure, that can contain a pointer to + * a proprietary structure used to pass information to the driver. + * Description: + * This function configures the MAC to enable/disable both outgoing(TX) + * and incoming(RX) packets time stamping based on user input. + * Return Value: + * 0 on success and an appropriate -ve integer on failure. + */ +static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct hwtstamp_config config; + struct timespec now; + u64 temp = 0; + u32 ptp_v2 = 0; + u32 tstamp_all = 0; + u32 ptp_over_ipv4_udp = 0; + u32 ptp_over_ipv6_udp = 0; + u32 ptp_over_ethernet = 0; + u32 snap_type_sel = 0; + u32 ts_master_en = 0; + u32 ts_event_en = 0; + u32 value = 0; + + if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { + netdev_alert(priv->dev, "No support for HW time stamping\n"); + priv->hwts_tx_en = 0; + priv->hwts_rx_en = 0; + + return -EOPNOTSUPP; + } + + if (copy_from_user(&config, ifr->ifr_data, + sizeof(struct hwtstamp_config))) + return -EFAULT; + + pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", + __func__, config.flags, config.tx_type, config.rx_filter); + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + if (config.tx_type != HWTSTAMP_TX_OFF && + config.tx_type != HWTSTAMP_TX_ON) + return -ERANGE; + + if (priv->adv_ts) { + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + /* time stamp no incoming packet at all */ + config.rx_filter = HWTSTAMP_FILTER_NONE; + break; + + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + /* PTP v1, UDP, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + /* take time stamp for all event messages */ + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + /* PTP v1, UDP, Sync packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; + /* take time stamp for SYNC messages only */ + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + /* PTP v1, UDP, Delay_req packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; + /* take time stamp for Delay_Req messages only */ + ts_master_en = PTP_TCR_TSMSTRENA; + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + /* PTP v2, UDP, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for all event messages */ + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + /* PTP v2, UDP, Sync packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for SYNC messages only */ + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + /* PTP v2, UDP, Delay_req packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for Delay_Req messages only */ + ts_master_en = PTP_TCR_TSMSTRENA; + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_EVENT: + /* PTP v2/802.AS1 any layer, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for all event messages */ + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = PTP_TCR_TSIPENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_SYNC: + /* PTP v2/802.AS1, any layer, Sync packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for SYNC messages only */ + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = PTP_TCR_TSIPENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + /* PTP v2/802.AS1, any layer, Delay_req packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; + ptp_v2 = PTP_TCR_TSVER2ENA; + /* take time stamp for Delay_Req messages only */ + ts_master_en = PTP_TCR_TSMSTRENA; + ts_event_en = PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = PTP_TCR_TSIPENA; + break; + + case HWTSTAMP_FILTER_ALL: + /* time stamp any incoming packet */ + config.rx_filter = HWTSTAMP_FILTER_ALL; + tstamp_all = PTP_TCR_TSENALL; + break; + + default: + return -ERANGE; + } + } else { + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + config.rx_filter = HWTSTAMP_FILTER_NONE; + break; + default: + /* PTP v1, UDP, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + break; + } + } + priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); + priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; + + if (!priv->hwts_tx_en && !priv->hwts_rx_en) + priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0); + else { + value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | + tstamp_all | ptp_v2 | ptp_over_ethernet | + ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | + ts_master_en | snap_type_sel); + + priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value); + + /* program Sub Second Increment reg */ + priv->hw->ptp->config_sub_second_increment(priv->ioaddr); + + /* calculate default added value: + * formula is : + * addend = (2^32)/freq_div_ratio; + * where, freq_div_ratio = clk_ptp_ref_i/50MHz + * hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i; + * NOTE: clk_ptp_ref_i should be >= 50MHz to + * achieve 20ns accuracy. + * + * 2^x * y == (y << x), hence + * 2^32 * 50000000 ==> (50000000 << 32) + */ + temp = (u64) (50000000ULL << 32); + priv->default_addend = div_u64(temp, priv->clk_ptp_rate); + priv->hw->ptp->config_addend(priv->ioaddr, + priv->default_addend); + + /* initialize system time */ + getnstimeofday(&now); + priv->hw->ptp->init_systime(priv->ioaddr, now.tv_sec, + now.tv_nsec); + } + + return copy_to_user(ifr->ifr_data, &config, + sizeof(struct hwtstamp_config)) ? -EFAULT : 0; +} + +/** + * stmmac_init_ptp - init PTP + * @priv: driver private structure + * Description: this is to verify if the HW supports the PTPv1 or PTPv2. + * This is done by looking at the HW cap. register. + * This function also registers the ptp driver. + */ +static int stmmac_init_ptp(struct stmmac_priv *priv) +{ + if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) + return -EOPNOTSUPP; + + /* Fall-back to main clock in case of no PTP ref is passed */ + priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref"); + if (IS_ERR(priv->clk_ptp_ref)) { + priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk); + priv->clk_ptp_ref = NULL; + } else { + clk_prepare_enable(priv->clk_ptp_ref); + priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref); + } + + priv->adv_ts = 0; + if (priv->dma_cap.atime_stamp && priv->extend_desc) + priv->adv_ts = 1; + + if (netif_msg_hw(priv) && priv->dma_cap.time_stamp) + pr_debug("IEEE 1588-2002 Time Stamp supported\n"); + + if (netif_msg_hw(priv) && priv->adv_ts) + pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n"); + + priv->hw->ptp = &stmmac_ptp; + priv->hwts_tx_en = 0; + priv->hwts_rx_en = 0; + + return stmmac_ptp_register(priv); +} + +static void stmmac_release_ptp(struct stmmac_priv *priv) +{ + if (priv->clk_ptp_ref) + clk_disable_unprepare(priv->clk_ptp_ref); + stmmac_ptp_unregister(priv); +} + +/** + * stmmac_adjust_link - adjusts the link parameters + * @dev: net device structure + * Description: this is the helper called by the physical abstraction layer + * drivers to communicate the phy link status. According the speed and duplex + * this driver can invoke registered glue-logic as well. + * It also invoke the eee initialization because it could happen when switch + * on different networks (that are eee capable). + */ +static void stmmac_adjust_link(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + unsigned long flags; + int new_state = 0; + unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; + + if (phydev == NULL) + return; + + spin_lock_irqsave(&priv->lock, flags); + + if (phydev->link) { + u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); + + /* Now we make sure that we can be in full duplex mode. + * If not, we operate in half-duplex mode. */ + if (phydev->duplex != priv->oldduplex) { + new_state = 1; + if (!(phydev->duplex)) + ctrl &= ~priv->hw->link.duplex; + else + ctrl |= priv->hw->link.duplex; + priv->oldduplex = phydev->duplex; + } + /* Flow Control operation */ + if (phydev->pause) + priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex, + fc, pause_time); + + if (phydev->speed != priv->speed) { + new_state = 1; + switch (phydev->speed) { + case 1000: + if (likely(priv->plat->has_gmac)) + ctrl &= ~priv->hw->link.port; + stmmac_hw_fix_mac_speed(priv); + break; + case 100: + case 10: + if (priv->plat->has_gmac) { + ctrl |= priv->hw->link.port; + if (phydev->speed == SPEED_100) { + ctrl |= priv->hw->link.speed; + } else { + ctrl &= ~(priv->hw->link.speed); + } + } else { + ctrl &= ~priv->hw->link.port; + } + stmmac_hw_fix_mac_speed(priv); + break; + default: + if (netif_msg_link(priv)) + pr_warn("%s: Speed (%d) not 10/100\n", + dev->name, phydev->speed); + break; + } + + priv->speed = phydev->speed; + } + + writel(ctrl, priv->ioaddr + MAC_CTRL_REG); + + if (!priv->oldlink) { + new_state = 1; + priv->oldlink = 1; + } + } else if (priv->oldlink) { + new_state = 1; + priv->oldlink = 0; + priv->speed = 0; + priv->oldduplex = -1; + } + + if (new_state && netif_msg_link(priv)) + phy_print_status(phydev); + + spin_unlock_irqrestore(&priv->lock, flags); + + /* At this stage, it could be needed to setup the EEE or adjust some + * MAC related HW registers. + */ + priv->eee_enabled = stmmac_eee_init(priv); +} + +/** + * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported + * @priv: driver private structure + * Description: this is to verify if the HW supports the PCS. + * Physical Coding Sublayer (PCS) interface that can be used when the MAC is + * configured for the TBI, RTBI, or SGMII PHY interface. + */ +static void stmmac_check_pcs_mode(struct stmmac_priv *priv) +{ + int interface = priv->plat->interface; + + if (priv->dma_cap.pcs) { + if ((interface == PHY_INTERFACE_MODE_RGMII) || + (interface == PHY_INTERFACE_MODE_RGMII_ID) || + (interface == PHY_INTERFACE_MODE_RGMII_RXID) || + (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { + pr_debug("STMMAC: PCS RGMII support enable\n"); + priv->pcs = STMMAC_PCS_RGMII; + } else if (interface == PHY_INTERFACE_MODE_SGMII) { + pr_debug("STMMAC: PCS SGMII support enable\n"); + priv->pcs = STMMAC_PCS_SGMII; + } + } +} + +/** + * stmmac_init_phy - PHY initialization + * @dev: net device structure + * Description: it initializes the driver's PHY state, and attaches the PHY + * to the mac driver. + * Return value: + * 0 on success + */ +static int stmmac_init_phy(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct phy_device *phydev; + char phy_id_fmt[MII_BUS_ID_SIZE + 3]; + char bus_id[MII_BUS_ID_SIZE]; + int interface = priv->plat->interface; + int max_speed = priv->plat->max_speed; + priv->oldlink = 0; + priv->speed = 0; + priv->oldduplex = -1; + + if (priv->plat->phy_bus_name) + snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x", + priv->plat->phy_bus_name, priv->plat->bus_id); + else + snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", + priv->plat->bus_id); + + snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, + priv->plat->phy_addr); + pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt); + + phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface); + + if (IS_ERR(phydev)) { + pr_err("%s: Could not attach to PHY\n", dev->name); + return PTR_ERR(phydev); + } + + /* Stop Advertising 1000BASE Capability if interface is not GMII */ + if ((interface == PHY_INTERFACE_MODE_MII) || + (interface == PHY_INTERFACE_MODE_RMII) || + (max_speed < 1000 && max_speed > 0)) + phydev->advertising &= ~(SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + + /* + * Broken HW is sometimes missing the pull-up resistor on the + * MDIO line, which results in reads to non-existent devices returning + * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent + * device as well. + * Note: phydev->phy_id is the result of reading the UID PHY registers. + */ + if (phydev->phy_id == 0) { + phy_disconnect(phydev); + return -ENODEV; + } + pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" + " Link = %d\n", dev->name, phydev->phy_id, phydev->link); + + priv->phydev = phydev; + + return 0; +} + +/** + * stmmac_display_ring - display ring + * @head: pointer to the head of the ring passed. + * @size: size of the ring. + * @extend_desc: to verify if extended descriptors are used. + * Description: display the control/status and buffer descriptors. + */ +static void stmmac_display_ring(void *head, int size, int extend_desc) +{ + int i; + struct dma_extended_desc *ep = (struct dma_extended_desc *)head; + struct dma_desc *p = (struct dma_desc *)head; + + for (i = 0; i < size; i++) { + u64 x; + if (extend_desc) { + x = *(u64 *) ep; + pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + i, (unsigned int)virt_to_phys(ep), + (unsigned int)x, (unsigned int)(x >> 32), + ep->basic.des2, ep->basic.des3); + ep++; + } else { + x = *(u64 *) p; + pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x", + i, (unsigned int)virt_to_phys(p), + (unsigned int)x, (unsigned int)(x >> 32), + p->des2, p->des3); + p++; + } + pr_info("\n"); + } +} + +static void stmmac_display_rings(struct stmmac_priv *priv) +{ + unsigned int txsize = priv->dma_tx_size; + unsigned int rxsize = priv->dma_rx_size; + + if (priv->extend_desc) { + pr_info("Extended RX descriptor ring:\n"); + stmmac_display_ring((void *)priv->dma_erx, rxsize, 1); + pr_info("Extended TX descriptor ring:\n"); + stmmac_display_ring((void *)priv->dma_etx, txsize, 1); + } else { + pr_info("RX descriptor ring:\n"); + stmmac_display_ring((void *)priv->dma_rx, rxsize, 0); + pr_info("TX descriptor ring:\n"); + stmmac_display_ring((void *)priv->dma_tx, txsize, 0); + } +} + +static int stmmac_set_bfsize(int mtu, int bufsize) +{ + int ret = bufsize; + + if (mtu >= BUF_SIZE_4KiB) + ret = BUF_SIZE_8KiB; + else if (mtu >= BUF_SIZE_2KiB) + ret = BUF_SIZE_4KiB; + else if (mtu > DEFAULT_BUFSIZE) + ret = BUF_SIZE_2KiB; + else + ret = DEFAULT_BUFSIZE; + + return ret; +} + +/** + * stmmac_clear_descriptors - clear descriptors + * @priv: driver private structure + * Description: this function is called to clear the tx and rx descriptors + * in case of both basic and extended descriptors are used. + */ +static void stmmac_clear_descriptors(struct stmmac_priv *priv) +{ + int i; + unsigned int txsize = priv->dma_tx_size; + unsigned int rxsize = priv->dma_rx_size; + + /* Clear the Rx/Tx descriptors */ + for (i = 0; i < rxsize; i++) + if (priv->extend_desc) + priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic, + priv->use_riwt, priv->mode, + (i == rxsize - 1)); + else + priv->hw->desc->init_rx_desc(&priv->dma_rx[i], + priv->use_riwt, priv->mode, + (i == rxsize - 1)); + for (i = 0; i < txsize; i++) + if (priv->extend_desc) + priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, + priv->mode, + (i == txsize - 1)); + else + priv->hw->desc->init_tx_desc(&priv->dma_tx[i], + priv->mode, + (i == txsize - 1)); +} + +/** + * stmmac_init_rx_buffers - init the RX descriptor buffer. + * @priv: driver private structure + * @p: descriptor pointer + * @i: descriptor index + * @flags: gfp flag. + * Description: this function is called to allocate a receive buffer, perform + * the DMA mapping and init the descriptor. + */ +static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, + int i, gfp_t flags) +{ + struct sk_buff *skb; + + skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, + flags); + if (!skb) { + pr_err("%s: Rx init fails; skb is NULL\n", __func__); + return -ENOMEM; + } + skb_reserve(skb, NET_IP_ALIGN); + priv->rx_skbuff[i] = skb; + priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, + priv->dma_buf_sz, + DMA_FROM_DEVICE); + if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) { + pr_err("%s: DMA mapping error\n", __func__); + dev_kfree_skb_any(skb); + return -EINVAL; + } + + p->des2 = priv->rx_skbuff_dma[i]; + + if ((priv->hw->mode->init_desc3) && + (priv->dma_buf_sz == BUF_SIZE_16KiB)) + priv->hw->mode->init_desc3(p); + + return 0; +} + +static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) +{ + if (priv->rx_skbuff[i]) { + dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], + priv->dma_buf_sz, DMA_FROM_DEVICE); + dev_kfree_skb_any(priv->rx_skbuff[i]); + } + priv->rx_skbuff[i] = NULL; +} + +/** + * init_dma_desc_rings - init the RX/TX descriptor rings + * @dev: net device structure + * @flags: gfp flag. + * Description: this function initializes the DMA RX/TX descriptors + * and allocates the socket buffers. It suppors the chained and ring + * modes. + */ +static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) +{ + int i; + struct stmmac_priv *priv = netdev_priv(dev); + unsigned int txsize = priv->dma_tx_size; + unsigned int rxsize = priv->dma_rx_size; + unsigned int bfsize = 0; + int ret = -ENOMEM; + + if (priv->hw->mode->set_16kib_bfsize) + bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); + + if (bfsize < BUF_SIZE_16KiB) + bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); + + priv->dma_buf_sz = bfsize; + + if (netif_msg_probe(priv)) + pr_debug("%s: txsize %d, rxsize %d, bfsize %d\n", __func__, + txsize, rxsize, bfsize); + + if (netif_msg_probe(priv)) { + pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, + (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); + + /* RX INITIALIZATION */ + pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n"); + } + for (i = 0; i < rxsize; i++) { + struct dma_desc *p; + if (priv->extend_desc) + p = &((priv->dma_erx + i)->basic); + else + p = priv->dma_rx + i; + + ret = stmmac_init_rx_buffers(priv, p, i, flags); + if (ret) + goto err_init_rx_buffers; + + if (netif_msg_probe(priv)) + pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], + priv->rx_skbuff[i]->data, + (unsigned int)priv->rx_skbuff_dma[i]); + } + priv->cur_rx = 0; + priv->dirty_rx = (unsigned int)(i - rxsize); + buf_sz = bfsize; + + /* Setup the chained descriptor addresses */ + if (priv->mode == STMMAC_CHAIN_MODE) { + if (priv->extend_desc) { + priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy, + rxsize, 1); + priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy, + txsize, 1); + } else { + priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy, + rxsize, 0); + priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy, + txsize, 0); + } + } + + /* TX INITIALIZATION */ + for (i = 0; i < txsize; i++) { + struct dma_desc *p; + if (priv->extend_desc) + p = &((priv->dma_etx + i)->basic); + else + p = priv->dma_tx + i; + p->des2 = 0; + priv->tx_skbuff_dma[i].buf = 0; + priv->tx_skbuff_dma[i].map_as_page = false; + priv->tx_skbuff[i] = NULL; + } + + priv->dirty_tx = 0; + priv->cur_tx = 0; + netdev_reset_queue(priv->dev); + + stmmac_clear_descriptors(priv); + + if (netif_msg_hw(priv)) + stmmac_display_rings(priv); + + return 0; +err_init_rx_buffers: + while (--i >= 0) + stmmac_free_rx_buffers(priv, i); + return ret; +} + +static void dma_free_rx_skbufs(struct stmmac_priv *priv) +{ + int i; + + for (i = 0; i < priv->dma_rx_size; i++) + stmmac_free_rx_buffers(priv, i); +} + +static void dma_free_tx_skbufs(struct stmmac_priv *priv) +{ + int i; + + for (i = 0; i < priv->dma_tx_size; i++) { + struct dma_desc *p; + + if (priv->extend_desc) + p = &((priv->dma_etx + i)->basic); + else + p = priv->dma_tx + i; + + if (priv->tx_skbuff_dma[i].buf) { + if (priv->tx_skbuff_dma[i].map_as_page) + dma_unmap_page(priv->device, + priv->tx_skbuff_dma[i].buf, + priv->hw->desc->get_tx_len(p), + DMA_TO_DEVICE); + else + dma_unmap_single(priv->device, + priv->tx_skbuff_dma[i].buf, + priv->hw->desc->get_tx_len(p), + DMA_TO_DEVICE); + } + + if (priv->tx_skbuff[i] != NULL) { + dev_kfree_skb_any(priv->tx_skbuff[i]); + priv->tx_skbuff[i] = NULL; + priv->tx_skbuff_dma[i].buf = 0; + priv->tx_skbuff_dma[i].map_as_page = false; + } + } +} + +/** + * alloc_dma_desc_resources - alloc TX/RX resources. + * @priv: private structure + * Description: according to which descriptor can be used (extend or basic) + * this function allocates the resources for TX and RX paths. In case of + * reception, for example, it pre-allocated the RX socket buffer in order to + * allow zero-copy mechanism. + */ +static int alloc_dma_desc_resources(struct stmmac_priv *priv) +{ + unsigned int txsize = priv->dma_tx_size; + unsigned int rxsize = priv->dma_rx_size; + int ret = -ENOMEM; + + priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), + GFP_KERNEL); + if (!priv->rx_skbuff_dma) + return -ENOMEM; + + priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), + GFP_KERNEL); + if (!priv->rx_skbuff) + goto err_rx_skbuff; + + priv->tx_skbuff_dma = kmalloc_array(txsize, + sizeof(*priv->tx_skbuff_dma), + GFP_KERNEL); + if (!priv->tx_skbuff_dma) + goto err_tx_skbuff_dma; + + priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), + GFP_KERNEL); + if (!priv->tx_skbuff) + goto err_tx_skbuff; + + if (priv->extend_desc) { + priv->dma_erx = dma_alloc_coherent(priv->device, rxsize * + sizeof(struct + dma_extended_desc), + &priv->dma_rx_phy, + GFP_KERNEL); + if (!priv->dma_erx) + goto err_dma; + + priv->dma_etx = dma_alloc_coherent(priv->device, txsize * + sizeof(struct + dma_extended_desc), + &priv->dma_tx_phy, + GFP_KERNEL); + if (!priv->dma_etx) { + dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_extended_desc), + priv->dma_erx, priv->dma_rx_phy); + goto err_dma; + } + } else { + priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * + sizeof(struct dma_desc), + &priv->dma_rx_phy, + GFP_KERNEL); + if (!priv->dma_rx) + goto err_dma; + + priv->dma_tx = dma_alloc_coherent(priv->device, txsize * + sizeof(struct dma_desc), + &priv->dma_tx_phy, + GFP_KERNEL); + if (!priv->dma_tx) { + dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_desc), + priv->dma_rx, priv->dma_rx_phy); + goto err_dma; + } + } + + return 0; + +err_dma: + kfree(priv->tx_skbuff); +err_tx_skbuff: + kfree(priv->tx_skbuff_dma); +err_tx_skbuff_dma: + kfree(priv->rx_skbuff); +err_rx_skbuff: + kfree(priv->rx_skbuff_dma); + return ret; +} + +static void free_dma_desc_resources(struct stmmac_priv *priv) +{ + /* Release the DMA TX/RX socket buffers */ + dma_free_rx_skbufs(priv); + dma_free_tx_skbufs(priv); + + /* Free DMA regions of consistent memory previously allocated */ + if (!priv->extend_desc) { + dma_free_coherent(priv->device, + priv->dma_tx_size * sizeof(struct dma_desc), + priv->dma_tx, priv->dma_tx_phy); + dma_free_coherent(priv->device, + priv->dma_rx_size * sizeof(struct dma_desc), + priv->dma_rx, priv->dma_rx_phy); + } else { + dma_free_coherent(priv->device, priv->dma_tx_size * + sizeof(struct dma_extended_desc), + priv->dma_etx, priv->dma_tx_phy); + dma_free_coherent(priv->device, priv->dma_rx_size * + sizeof(struct dma_extended_desc), + priv->dma_erx, priv->dma_rx_phy); + } + kfree(priv->rx_skbuff_dma); + kfree(priv->rx_skbuff); + kfree(priv->tx_skbuff_dma); + kfree(priv->tx_skbuff); +} + +/** + * stmmac_dma_operation_mode - HW DMA operation mode + * @priv: driver private structure + * Description: it is used for configuring the DMA operation mode register in + * order to program the tx/rx DMA thresholds or Store-And-Forward mode. + */ +static void stmmac_dma_operation_mode(struct stmmac_priv *priv) +{ + int rxfifosz = priv->plat->rx_fifo_size; + + if (priv->plat->force_thresh_dma_mode) + priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz); + else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { + /* + * In case of GMAC, SF mode can be enabled + * to perform the TX COE in HW. This depends on: + * 1) TX COE if actually supported + * 2) There is no bugged Jumbo frame support + * that needs to not insert csum in the TDES. + */ + priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE, + rxfifosz); + priv->xstats.threshold = SF_DMA_MODE; + } else + priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE, + rxfifosz); +} + +/** + * stmmac_tx_clean - to manage the transmission completion + * @priv: driver private structure + * Description: it reclaims the transmit resources after transmission completes. + */ +static void stmmac_tx_clean(struct stmmac_priv *priv) +{ + unsigned int txsize = priv->dma_tx_size; + unsigned int bytes_compl = 0, pkts_compl = 0; + + spin_lock(&priv->tx_lock); + + priv->xstats.tx_clean++; + + while (priv->dirty_tx != priv->cur_tx) { + int last; + unsigned int entry = priv->dirty_tx % txsize; + struct sk_buff *skb = priv->tx_skbuff[entry]; + struct dma_desc *p; + + if (priv->extend_desc) + p = (struct dma_desc *)(priv->dma_etx + entry); + else + p = priv->dma_tx + entry; + + /* Check if the descriptor is owned by the DMA. */ + if (priv->hw->desc->get_tx_owner(p)) + break; + + /* Verify tx error by looking at the last segment. */ + last = priv->hw->desc->get_tx_ls(p); + if (likely(last)) { + int tx_error = + priv->hw->desc->tx_status(&priv->dev->stats, + &priv->xstats, p, + priv->ioaddr); + if (likely(tx_error == 0)) { + priv->dev->stats.tx_packets++; + priv->xstats.tx_pkt_n++; + } else + priv->dev->stats.tx_errors++; + + stmmac_get_tx_hwtstamp(priv, entry, skb); + } + if (netif_msg_tx_done(priv)) + pr_debug("%s: curr %d, dirty %d\n", __func__, + priv->cur_tx, priv->dirty_tx); + + if (likely(priv->tx_skbuff_dma[entry].buf)) { + if (priv->tx_skbuff_dma[entry].map_as_page) + dma_unmap_page(priv->device, + priv->tx_skbuff_dma[entry].buf, + priv->hw->desc->get_tx_len(p), + DMA_TO_DEVICE); + else + dma_unmap_single(priv->device, + priv->tx_skbuff_dma[entry].buf, + priv->hw->desc->get_tx_len(p), + DMA_TO_DEVICE); + priv->tx_skbuff_dma[entry].buf = 0; + priv->tx_skbuff_dma[entry].map_as_page = false; + } + priv->hw->mode->clean_desc3(priv, p); + + if (likely(skb != NULL)) { + pkts_compl++; + bytes_compl += skb->len; + dev_consume_skb_any(skb); + priv->tx_skbuff[entry] = NULL; + } + + priv->hw->desc->release_tx_desc(p, priv->mode); + + priv->dirty_tx++; + } + + netdev_completed_queue(priv->dev, pkts_compl, bytes_compl); + + if (unlikely(netif_queue_stopped(priv->dev) && + stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) { + netif_tx_lock(priv->dev); + if (netif_queue_stopped(priv->dev) && + stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) { + if (netif_msg_tx_done(priv)) + pr_debug("%s: restart transmit\n", __func__); + netif_wake_queue(priv->dev); + } + netif_tx_unlock(priv->dev); + } + + if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { + stmmac_enable_eee_mode(priv); + mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); + } + spin_unlock(&priv->tx_lock); +} + +static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv) +{ + priv->hw->dma->enable_dma_irq(priv->ioaddr); +} + +static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv) +{ + priv->hw->dma->disable_dma_irq(priv->ioaddr); +} + +/** + * stmmac_tx_err - to manage the tx error + * @priv: driver private structure + * Description: it cleans the descriptors and restarts the transmission + * in case of transmission errors. + */ +static void stmmac_tx_err(struct stmmac_priv *priv) +{ + int i; + int txsize = priv->dma_tx_size; + netif_stop_queue(priv->dev); + + priv->hw->dma->stop_tx(priv->ioaddr); + dma_free_tx_skbufs(priv); + for (i = 0; i < txsize; i++) + if (priv->extend_desc) + priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, + priv->mode, + (i == txsize - 1)); + else + priv->hw->desc->init_tx_desc(&priv->dma_tx[i], + priv->mode, + (i == txsize - 1)); + priv->dirty_tx = 0; + priv->cur_tx = 0; + netdev_reset_queue(priv->dev); + priv->hw->dma->start_tx(priv->ioaddr); + + priv->dev->stats.tx_errors++; + netif_wake_queue(priv->dev); +} + +/** + * stmmac_dma_interrupt - DMA ISR + * @priv: driver private structure + * Description: this is the DMA ISR. It is called by the main ISR. + * It calls the dwmac dma routine and schedule poll method in case of some + * work can be done. + */ +static void stmmac_dma_interrupt(struct stmmac_priv *priv) +{ + int status; + int rxfifosz = priv->plat->rx_fifo_size; + + status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats); + if (likely((status & handle_rx)) || (status & handle_tx)) { + if (likely(napi_schedule_prep(&priv->napi))) { + stmmac_disable_dma_irq(priv); + __napi_schedule(&priv->napi); + } + } + if (unlikely(status & tx_hard_error_bump_tc)) { + /* Try to bump up the dma threshold on this failure */ + if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && + (tc <= 256)) { + tc += 64; + if (priv->plat->force_thresh_dma_mode) + priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, + rxfifosz); + else + priv->hw->dma->dma_mode(priv->ioaddr, tc, + SF_DMA_MODE, rxfifosz); + priv->xstats.threshold = tc; + } + } else if (unlikely(status == tx_hard_error)) + stmmac_tx_err(priv); +} + +/** + * stmmac_mmc_setup: setup the Mac Management Counters (MMC) + * @priv: driver private structure + * Description: this masks the MMC irq, in fact, the counters are managed in SW. + */ +static void stmmac_mmc_setup(struct stmmac_priv *priv) +{ + unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | + MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; + + dwmac_mmc_intr_all_mask(priv->ioaddr); + + if (priv->dma_cap.rmon) { + dwmac_mmc_ctrl(priv->ioaddr, mode); + memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); + } else + pr_info(" No MAC Management Counters available\n"); +} + +/** + * stmmac_get_synopsys_id - return the SYINID. + * @priv: driver private structure + * Description: this simple function is to decode and return the SYINID + * starting from the HW core register. + */ +static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) +{ + u32 hwid = priv->hw->synopsys_uid; + + /* Check Synopsys Id (not available on old chips) */ + if (likely(hwid)) { + u32 uid = ((hwid & 0x0000ff00) >> 8); + u32 synid = (hwid & 0x000000ff); + + pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n", + uid, synid); + + return synid; + } + return 0; +} + +/** + * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors + * @priv: driver private structure + * Description: select the Enhanced/Alternate or Normal descriptors. + * In case of Enhanced/Alternate, it checks if the extended descriptors are + * supported by the HW capability register. + */ +static void stmmac_selec_desc_mode(struct stmmac_priv *priv) +{ + if (priv->plat->enh_desc) { + pr_info(" Enhanced/Alternate descriptors\n"); + + /* GMAC older than 3.50 has no extended descriptors */ + if (priv->synopsys_id >= DWMAC_CORE_3_50) { + pr_info("\tEnabled extended descriptors\n"); + priv->extend_desc = 1; + } else + pr_warn("Extended descriptors not supported\n"); + + priv->hw->desc = &enh_desc_ops; + } else { + pr_info(" Normal descriptors\n"); + priv->hw->desc = &ndesc_ops; + } +} + +/** + * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. + * @priv: driver private structure + * Description: + * new GMAC chip generations have a new register to indicate the + * presence of the optional feature/functions. + * This can be also used to override the value passed through the + * platform and necessary for old MAC10/100 and GMAC chips. + */ +static int stmmac_get_hw_features(struct stmmac_priv *priv) +{ + u32 hw_cap = 0; + + if (priv->hw->dma->get_hw_feature) { + hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr); + + priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL); + priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1; + priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2; + priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4; + priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5; + priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6; + priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8; + priv->dma_cap.pmt_remote_wake_up = + (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9; + priv->dma_cap.pmt_magic_frame = + (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10; + /* MMC */ + priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11; + /* IEEE 1588-2002 */ + priv->dma_cap.time_stamp = + (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12; + /* IEEE 1588-2008 */ + priv->dma_cap.atime_stamp = + (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13; + /* 802.3az - Energy-Efficient Ethernet (EEE) */ + priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14; + priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15; + /* TX and RX csum */ + priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16; + priv->dma_cap.rx_coe_type1 = + (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17; + priv->dma_cap.rx_coe_type2 = + (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18; + priv->dma_cap.rxfifo_over_2048 = + (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19; + /* TX and RX number of channels */ + priv->dma_cap.number_rx_channel = + (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20; + priv->dma_cap.number_tx_channel = + (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22; + /* Alternate (enhanced) DESC mode */ + priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; + } + + return hw_cap; +} + +/** + * stmmac_check_ether_addr - check if the MAC addr is valid + * @priv: driver private structure + * Description: + * it is to verify if the MAC address is valid, in case of failures it + * generates a random MAC address + */ +static void stmmac_check_ether_addr(struct stmmac_priv *priv) +{ + if (!is_valid_ether_addr(priv->dev->dev_addr)) { + priv->hw->mac->get_umac_addr(priv->hw, + priv->dev->dev_addr, 0); + if (!is_valid_ether_addr(priv->dev->dev_addr)) + eth_hw_addr_random(priv->dev); + pr_info("%s: device MAC address %pM\n", priv->dev->name, + priv->dev->dev_addr); + } +} + +/** + * stmmac_init_dma_engine - DMA init. + * @priv: driver private structure + * Description: + * It inits the DMA invoking the specific MAC/GMAC callback. + * Some DMA parameters can be passed from the platform; + * in case of these are not passed a default is kept for the MAC or GMAC. + */ +static int stmmac_init_dma_engine(struct stmmac_priv *priv) +{ + int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0; + int mixed_burst = 0; + int atds = 0; + + if (priv->plat->dma_cfg) { + pbl = priv->plat->dma_cfg->pbl; + fixed_burst = priv->plat->dma_cfg->fixed_burst; + mixed_burst = priv->plat->dma_cfg->mixed_burst; + burst_len = priv->plat->dma_cfg->burst_len; + } + + if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) + atds = 1; + + return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, + burst_len, priv->dma_tx_phy, + priv->dma_rx_phy, atds); +} + +/** + * stmmac_tx_timer - mitigation sw timer for tx. + * @data: data pointer + * Description: + * This is the timer handler to directly invoke the stmmac_tx_clean. + */ +static void stmmac_tx_timer(unsigned long data) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)data; + + stmmac_tx_clean(priv); +} + +/** + * stmmac_init_tx_coalesce - init tx mitigation options. + * @priv: driver private structure + * Description: + * This inits the transmit coalesce parameters: i.e. timer rate, + * timer handler and default threshold used for enabling the + * interrupt on completion bit. + */ +static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) +{ + priv->tx_coal_frames = STMMAC_TX_FRAMES; + priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; + init_timer(&priv->txtimer); + priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer); + priv->txtimer.data = (unsigned long)priv; + priv->txtimer.function = stmmac_tx_timer; + add_timer(&priv->txtimer); +} + +/** + * stmmac_hw_setup - setup mac in a usable state. + * @dev : pointer to the device structure. + * Description: + * this is the main function to setup the HW in a usable state because the + * dma engine is reset, the core registers are configured (e.g. AXI, + * Checksum features, timers). The DMA is ready to start receiving and + * transmitting. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret; + + /* DMA initialization and SW reset */ + ret = stmmac_init_dma_engine(priv); + if (ret < 0) { + pr_err("%s: DMA engine initialization failed\n", __func__); + return ret; + } + + /* Copy the MAC addr into the HW */ + priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0); + + /* If required, perform hw setup of the bus. */ + if (priv->plat->bus_setup) + priv->plat->bus_setup(priv->ioaddr); + + /* Initialize the MAC Core */ + priv->hw->mac->core_init(priv->hw, dev->mtu); + + ret = priv->hw->mac->rx_ipc(priv->hw); + if (!ret) { + pr_warn(" RX IPC Checksum Offload disabled\n"); + priv->plat->rx_coe = STMMAC_RX_COE_NONE; + priv->hw->rx_csum = 0; + } + + /* Enable the MAC Rx/Tx */ + stmmac_set_mac(priv->ioaddr, true); + + /* Set the HW DMA mode and the COE */ + stmmac_dma_operation_mode(priv); + + stmmac_mmc_setup(priv); + + if (init_ptp) { + ret = stmmac_init_ptp(priv); + if (ret && ret != -EOPNOTSUPP) + pr_warn("%s: failed PTP initialisation\n", __func__); + } + +#ifdef CONFIG_DEBUG_FS + ret = stmmac_init_fs(dev); + if (ret < 0) + pr_warn("%s: failed debugFS registration\n", __func__); +#endif + /* Start the ball rolling... */ + pr_debug("%s: DMA RX/TX processes started...\n", dev->name); + priv->hw->dma->start_tx(priv->ioaddr); + priv->hw->dma->start_rx(priv->ioaddr); + + /* Dump DMA/MAC registers */ + if (netif_msg_hw(priv)) { + priv->hw->mac->dump_regs(priv->hw); + priv->hw->dma->dump_regs(priv->ioaddr); + } + priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; + + if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { + priv->rx_riwt = MAX_DMA_RIWT; + priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); + } + + if (priv->pcs && priv->hw->mac->ctrl_ane) + priv->hw->mac->ctrl_ane(priv->hw, 0); + + return 0; +} + +/** + * stmmac_open - open entry point of the driver + * @dev : pointer to the device structure. + * Description: + * This function is the open entry point of the driver. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int stmmac_open(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret; + + stmmac_check_ether_addr(priv); + + if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && + priv->pcs != STMMAC_PCS_RTBI) { + ret = stmmac_init_phy(dev); + if (ret) { + pr_err("%s: Cannot attach to PHY (error: %d)\n", + __func__, ret); + return ret; + } + } + + /* Extra statistics */ + memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); + priv->xstats.threshold = tc; + + /* Create and initialize the TX/RX descriptors chains. */ + priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); + priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); + priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); + + ret = alloc_dma_desc_resources(priv); + if (ret < 0) { + pr_err("%s: DMA descriptors allocation failed\n", __func__); + goto dma_desc_error; + } + + ret = init_dma_desc_rings(dev, GFP_KERNEL); + if (ret < 0) { + pr_err("%s: DMA descriptors initialization failed\n", __func__); + goto init_error; + } + + ret = stmmac_hw_setup(dev, true); + if (ret < 0) { + pr_err("%s: Hw setup failed\n", __func__); + goto init_error; + } + + stmmac_init_tx_coalesce(priv); + + if (priv->phydev) + phy_start(priv->phydev); + + /* Request the IRQ lines */ + ret = request_irq(dev->irq, stmmac_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", + __func__, dev->irq, ret); + goto init_error; + } + + /* Request the Wake IRQ in case of another line is used for WoL */ + if (priv->wol_irq != dev->irq) { + ret = request_irq(priv->wol_irq, stmmac_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n", + __func__, priv->wol_irq, ret); + goto wolirq_error; + } + } + + /* Request the IRQ lines */ + if (priv->lpi_irq > 0) { + ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, + dev->name, dev); + if (unlikely(ret < 0)) { + pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n", + __func__, priv->lpi_irq, ret); + goto lpiirq_error; + } + } + + napi_enable(&priv->napi); + netif_start_queue(dev); + + return 0; + +lpiirq_error: + if (priv->wol_irq != dev->irq) + free_irq(priv->wol_irq, dev); +wolirq_error: + free_irq(dev->irq, dev); + +init_error: + free_dma_desc_resources(priv); +dma_desc_error: + if (priv->phydev) + phy_disconnect(priv->phydev); + + return ret; +} + +/** + * stmmac_release - close entry point of the driver + * @dev : device pointer. + * Description: + * This is the stop entry point of the driver. + */ +static int stmmac_release(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (priv->eee_enabled) + del_timer_sync(&priv->eee_ctrl_timer); + + /* Stop and disconnect the PHY */ + if (priv->phydev) { + phy_stop(priv->phydev); + phy_disconnect(priv->phydev); + priv->phydev = NULL; + } + + netif_stop_queue(dev); + + napi_disable(&priv->napi); + + del_timer_sync(&priv->txtimer); + + /* Free the IRQ lines */ + free_irq(dev->irq, dev); + if (priv->wol_irq != dev->irq) + free_irq(priv->wol_irq, dev); + if (priv->lpi_irq > 0) + free_irq(priv->lpi_irq, dev); + + /* Stop TX/RX DMA and clear the descriptors */ + priv->hw->dma->stop_tx(priv->ioaddr); + priv->hw->dma->stop_rx(priv->ioaddr); + + /* Release and free the Rx/Tx resources */ + free_dma_desc_resources(priv); + + /* Disable the MAC Rx/Tx */ + stmmac_set_mac(priv->ioaddr, false); + + netif_carrier_off(dev); + +#ifdef CONFIG_DEBUG_FS + stmmac_exit_fs(dev); +#endif + + stmmac_release_ptp(priv); + + return 0; +} + +/** + * stmmac_xmit - Tx entry point of the driver + * @skb : the socket buffer + * @dev : device pointer + * Description : this is the tx entry point of the driver. + * It programs the chain or the ring and supports oversized frames + * and SG feature. + */ +static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + unsigned int txsize = priv->dma_tx_size; + unsigned int entry; + int i, csum_insertion = 0, is_jumbo = 0; + int nfrags = skb_shinfo(skb)->nr_frags; + struct dma_desc *desc, *first; + unsigned int nopaged_len = skb_headlen(skb); + unsigned int enh_desc = priv->plat->enh_desc; + + spin_lock(&priv->tx_lock); + + if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { + spin_unlock(&priv->tx_lock); + if (!netif_queue_stopped(dev)) { + netif_stop_queue(dev); + /* This is a hard error, log it. */ + pr_err("%s: Tx Ring full when queue awake\n", __func__); + } + return NETDEV_TX_BUSY; + } + + if (priv->tx_path_in_lpi_mode) + stmmac_disable_eee_mode(priv); + + entry = priv->cur_tx % txsize; + + csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); + + if (priv->extend_desc) + desc = (struct dma_desc *)(priv->dma_etx + entry); + else + desc = priv->dma_tx + entry; + + first = desc; + + /* To program the descriptors according to the size of the frame */ + if (enh_desc) + is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc); + + if (likely(!is_jumbo)) { + desc->des2 = dma_map_single(priv->device, skb->data, + nopaged_len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, desc->des2)) + goto dma_map_err; + priv->tx_skbuff_dma[entry].buf = desc->des2; + priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, + csum_insertion, priv->mode); + } else { + desc = first; + entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); + if (unlikely(entry < 0)) + goto dma_map_err; + } + + for (i = 0; i < nfrags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + int len = skb_frag_size(frag); + + priv->tx_skbuff[entry] = NULL; + entry = (++priv->cur_tx) % txsize; + if (priv->extend_desc) + desc = (struct dma_desc *)(priv->dma_etx + entry); + else + desc = priv->dma_tx + entry; + + desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, + DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, desc->des2)) + goto dma_map_err; /* should reuse desc w/o issues */ + + priv->tx_skbuff_dma[entry].buf = desc->des2; + priv->tx_skbuff_dma[entry].map_as_page = true; + priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, + priv->mode); + wmb(); + priv->hw->desc->set_tx_owner(desc); + wmb(); + } + + priv->tx_skbuff[entry] = skb; + + /* Finalize the latest segment. */ + priv->hw->desc->close_tx_desc(desc); + + wmb(); + /* According to the coalesce parameter the IC bit for the latest + * segment could be reset and the timer re-started to invoke the + * stmmac_tx function. This approach takes care about the fragments. + */ + priv->tx_count_frames += nfrags + 1; + if (priv->tx_coal_frames > priv->tx_count_frames) { + priv->hw->desc->clear_tx_ic(desc); + priv->xstats.tx_reset_ic_bit++; + mod_timer(&priv->txtimer, + STMMAC_COAL_TIMER(priv->tx_coal_timer)); + } else + priv->tx_count_frames = 0; + + /* To avoid raise condition */ + priv->hw->desc->set_tx_owner(first); + wmb(); + + priv->cur_tx++; + + if (netif_msg_pktdata(priv)) { + pr_debug("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d", + __func__, (priv->cur_tx % txsize), + (priv->dirty_tx % txsize), entry, first, nfrags); + + if (priv->extend_desc) + stmmac_display_ring((void *)priv->dma_etx, txsize, 1); + else + stmmac_display_ring((void *)priv->dma_tx, txsize, 0); + + pr_debug(">>> frame to be transmitted: "); + print_pkt(skb->data, skb->len); + } + if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { + if (netif_msg_hw(priv)) + pr_debug("%s: stop transmitted packets\n", __func__); + netif_stop_queue(dev); + } + + dev->stats.tx_bytes += skb->len; + + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en)) { + /* declare that device is doing timestamping */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + priv->hw->desc->enable_tx_timestamp(first); + } + + if (!priv->hwts_tx_en) + skb_tx_timestamp(skb); + + netdev_sent_queue(dev, skb->len); + priv->hw->dma->enable_dma_transmission(priv->ioaddr); + + spin_unlock(&priv->tx_lock); + return NETDEV_TX_OK; + +dma_map_err: + spin_unlock(&priv->tx_lock); + dev_err(priv->device, "Tx dma map failed\n"); + dev_kfree_skb(skb); + priv->dev->stats.tx_dropped++; + return NETDEV_TX_OK; +} + +static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) +{ + struct ethhdr *ehdr; + u16 vlanid; + + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) == + NETIF_F_HW_VLAN_CTAG_RX && + !__vlan_get_tag(skb, &vlanid)) { + /* pop the vlan tag */ + ehdr = (struct ethhdr *)skb->data; + memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2); + skb_pull(skb, VLAN_HLEN); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid); + } +} + + +/** + * stmmac_rx_refill - refill used skb preallocated buffers + * @priv: driver private structure + * Description : this is to reallocate the skb for the reception process + * that is based on zero-copy. + */ +static inline void stmmac_rx_refill(struct stmmac_priv *priv) +{ + unsigned int rxsize = priv->dma_rx_size; + int bfsize = priv->dma_buf_sz; + + for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) { + unsigned int entry = priv->dirty_rx % rxsize; + struct dma_desc *p; + + if (priv->extend_desc) + p = (struct dma_desc *)(priv->dma_erx + entry); + else + p = priv->dma_rx + entry; + + if (likely(priv->rx_skbuff[entry] == NULL)) { + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); + + if (unlikely(skb == NULL)) + break; + + priv->rx_skbuff[entry] = skb; + priv->rx_skbuff_dma[entry] = + dma_map_single(priv->device, skb->data, bfsize, + DMA_FROM_DEVICE); + if (dma_mapping_error(priv->device, + priv->rx_skbuff_dma[entry])) { + dev_err(priv->device, "Rx dma map failed\n"); + dev_kfree_skb(skb); + break; + } + p->des2 = priv->rx_skbuff_dma[entry]; + + priv->hw->mode->refill_desc3(priv, p); + + if (netif_msg_rx_status(priv)) + pr_debug("\trefill entry #%d\n", entry); + } + wmb(); + priv->hw->desc->set_rx_owner(p); + wmb(); + } +} + +/** + * stmmac_rx - manage the receive process + * @priv: driver private structure + * @limit: napi bugget. + * Description : this the function called by the napi poll method. + * It gets all the frames inside the ring. + */ +static int stmmac_rx(struct stmmac_priv *priv, int limit) +{ + unsigned int rxsize = priv->dma_rx_size; + unsigned int entry = priv->cur_rx % rxsize; + unsigned int next_entry; + unsigned int count = 0; + int coe = priv->hw->rx_csum; + + if (netif_msg_rx_status(priv)) { + pr_debug("%s: descriptor ring:\n", __func__); + if (priv->extend_desc) + stmmac_display_ring((void *)priv->dma_erx, rxsize, 1); + else + stmmac_display_ring((void *)priv->dma_rx, rxsize, 0); + } + while (count < limit) { + int status; + struct dma_desc *p; + + if (priv->extend_desc) + p = (struct dma_desc *)(priv->dma_erx + entry); + else + p = priv->dma_rx + entry; + + if (priv->hw->desc->get_rx_owner(p)) + break; + + count++; + + next_entry = (++priv->cur_rx) % rxsize; + if (priv->extend_desc) + prefetch(priv->dma_erx + next_entry); + else + prefetch(priv->dma_rx + next_entry); + + /* read the status of the incoming frame */ + status = priv->hw->desc->rx_status(&priv->dev->stats, + &priv->xstats, p); + if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) + priv->hw->desc->rx_extended_status(&priv->dev->stats, + &priv->xstats, + priv->dma_erx + + entry); + if (unlikely(status == discard_frame)) { + priv->dev->stats.rx_errors++; + if (priv->hwts_rx_en && !priv->extend_desc) { + /* DESC2 & DESC3 will be overwitten by device + * with timestamp value, hence reinitialize + * them in stmmac_rx_refill() function so that + * device can reuse it. + */ + priv->rx_skbuff[entry] = NULL; + dma_unmap_single(priv->device, + priv->rx_skbuff_dma[entry], + priv->dma_buf_sz, + DMA_FROM_DEVICE); + } + } else { + struct sk_buff *skb; + int frame_len; + + frame_len = priv->hw->desc->get_rx_frame_len(p, coe); + + /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 + * Type frames (LLC/LLC-SNAP) + */ + if (unlikely(status != llc_snap)) + frame_len -= ETH_FCS_LEN; + + if (netif_msg_rx_status(priv)) { + pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", + p, entry, p->des2); + if (frame_len > ETH_FRAME_LEN) + pr_debug("\tframe size %d, COE: %d\n", + frame_len, status); + } + skb = priv->rx_skbuff[entry]; + if (unlikely(!skb)) { + pr_err("%s: Inconsistent Rx descriptor chain\n", + priv->dev->name); + priv->dev->stats.rx_dropped++; + break; + } + prefetch(skb->data - NET_IP_ALIGN); + priv->rx_skbuff[entry] = NULL; + + stmmac_get_rx_hwtstamp(priv, entry, skb); + + skb_put(skb, frame_len); + dma_unmap_single(priv->device, + priv->rx_skbuff_dma[entry], + priv->dma_buf_sz, DMA_FROM_DEVICE); + + if (netif_msg_pktdata(priv)) { + pr_debug("frame received (%dbytes)", frame_len); + print_pkt(skb->data, frame_len); + } + + stmmac_rx_vlan(priv->dev, skb); + + skb->protocol = eth_type_trans(skb, priv->dev); + + if (unlikely(!coe)) + skb_checksum_none_assert(skb); + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + + napi_gro_receive(&priv->napi, skb); + + priv->dev->stats.rx_packets++; + priv->dev->stats.rx_bytes += frame_len; + } + entry = next_entry; + } + + stmmac_rx_refill(priv); + + priv->xstats.rx_pkt_n += count; + + return count; +} + +/** + * stmmac_poll - stmmac poll method (NAPI) + * @napi : pointer to the napi structure. + * @budget : maximum number of packets that the current CPU can receive from + * all interfaces. + * Description : + * To look at the incoming frames and clear the tx resources. + */ +static int stmmac_poll(struct napi_struct *napi, int budget) +{ + struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi); + int work_done = 0; + + priv->xstats.napi_poll++; + stmmac_tx_clean(priv); + + work_done = stmmac_rx(priv, budget); + if (work_done < budget) { + napi_complete(napi); + stmmac_enable_dma_irq(priv); + } + return work_done; +} + +/** + * stmmac_tx_timeout + * @dev : Pointer to net device structure + * Description: this function is called when a packet transmission fails to + * complete within a reasonable time. The driver will mark the error in the + * netdev structure and arrange for the device to be reset to a sane state + * in order to transmit a new packet. + */ +static void stmmac_tx_timeout(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + /* Clear Tx resources and restart transmitting again */ + stmmac_tx_err(priv); +} + +/** + * stmmac_set_rx_mode - entry point for multicast addressing + * @dev : pointer to the device structure + * Description: + * This function is a driver entry point which gets called by the kernel + * whenever multicast addresses must be enabled/disabled. + * Return value: + * void. + */ +static void stmmac_set_rx_mode(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + priv->hw->mac->set_filter(priv->hw, dev); +} + +/** + * stmmac_change_mtu - entry point to change MTU size for the device. + * @dev : device pointer. + * @new_mtu : the new MTU size for the device. + * Description: the Maximum Transfer Unit (MTU) is used by the network layer + * to drive packet transmission. Ethernet has an MTU of 1500 octets + * (ETH_DATA_LEN). This value can be changed with ifconfig. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int stmmac_change_mtu(struct net_device *dev, int new_mtu) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int max_mtu; + + if (netif_running(dev)) { + pr_err("%s: must be stopped to change its MTU\n", dev->name); + return -EBUSY; + } + + if (priv->plat->enh_desc) + max_mtu = JUMBO_LEN; + else + max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); + + if (priv->plat->maxmtu < max_mtu) + max_mtu = priv->plat->maxmtu; + + if ((new_mtu < 46) || (new_mtu > max_mtu)) { + pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu); + return -EINVAL; + } + + dev->mtu = new_mtu; + netdev_update_features(dev); + + return 0; +} + +static netdev_features_t stmmac_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) + features &= ~NETIF_F_RXCSUM; + + if (!priv->plat->tx_coe) + features &= ~NETIF_F_ALL_CSUM; + + /* Some GMAC devices have a bugged Jumbo frame support that + * needs to have the Tx COE disabled for oversized frames + * (due to limited buffer sizes). In this case we disable + * the TX csum insertionin the TDES and not use SF. + */ + if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) + features &= ~NETIF_F_ALL_CSUM; + + return features; +} + +static int stmmac_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + + /* Keep the COE Type in case of csum is supporting */ + if (features & NETIF_F_RXCSUM) + priv->hw->rx_csum = priv->plat->rx_coe; + else + priv->hw->rx_csum = 0; + /* No check needed because rx_coe has been set before and it will be + * fixed in case of issue. + */ + priv->hw->mac->rx_ipc(priv->hw); + + return 0; +} + +/** + * stmmac_interrupt - main ISR + * @irq: interrupt number. + * @dev_id: to pass the net device pointer. + * Description: this is the main driver interrupt service routine. + * It can call: + * o DMA service routine (to manage incoming frame reception and transmission + * status) + * o Core interrupts to manage: remote wake-up, management counter, LPI + * interrupts. + */ +static irqreturn_t stmmac_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct stmmac_priv *priv = netdev_priv(dev); + + if (priv->irq_wake) + pm_wakeup_event(priv->device, 0); + + if (unlikely(!dev)) { + pr_err("%s: invalid dev pointer\n", __func__); + return IRQ_NONE; + } + + /* To handle GMAC own interrupts */ + if (priv->plat->has_gmac) { + int status = priv->hw->mac->host_irq_status(priv->hw, + &priv->xstats); + if (unlikely(status)) { + /* For LPI we need to save the tx status */ + if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) + priv->tx_path_in_lpi_mode = true; + if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) + priv->tx_path_in_lpi_mode = false; + } + } + + /* To handle DMA interrupts */ + stmmac_dma_interrupt(priv); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling receive - used by NETCONSOLE and other diagnostic tools + * to allow network I/O with interrupts disabled. + */ +static void stmmac_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + stmmac_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +/** + * stmmac_ioctl - Entry point for the Ioctl + * @dev: Device pointer. + * @rq: An IOCTL specefic structure, that can contain a pointer to + * a proprietary structure used to pass information to the driver. + * @cmd: IOCTL command + * Description: + * Currently it supports the phy_mii_ioctl(...) and HW time stamping. + */ +static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + if (!netif_running(dev)) + return -EINVAL; + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + if (!priv->phydev) + return -EINVAL; + ret = phy_mii_ioctl(priv->phydev, rq, cmd); + break; + case SIOCSHWTSTAMP: + ret = stmmac_hwtstamp_ioctl(dev, rq); + break; + default: + break; + } + + return ret; +} + +#ifdef CONFIG_DEBUG_FS +static struct dentry *stmmac_fs_dir; + +static void sysfs_display_ring(void *head, int size, int extend_desc, + struct seq_file *seq) +{ + int i; + struct dma_extended_desc *ep = (struct dma_extended_desc *)head; + struct dma_desc *p = (struct dma_desc *)head; + + for (i = 0; i < size; i++) { + u64 x; + if (extend_desc) { + x = *(u64 *) ep; + seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + i, (unsigned int)virt_to_phys(ep), + (unsigned int)x, (unsigned int)(x >> 32), + ep->basic.des2, ep->basic.des3); + ep++; + } else { + x = *(u64 *) p; + seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + i, (unsigned int)virt_to_phys(ep), + (unsigned int)x, (unsigned int)(x >> 32), + p->des2, p->des3); + p++; + } + seq_printf(seq, "\n"); + } +} + +static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) +{ + struct net_device *dev = seq->private; + struct stmmac_priv *priv = netdev_priv(dev); + unsigned int txsize = priv->dma_tx_size; + unsigned int rxsize = priv->dma_rx_size; + + if (priv->extend_desc) { + seq_printf(seq, "Extended RX descriptor ring:\n"); + sysfs_display_ring((void *)priv->dma_erx, rxsize, 1, seq); + seq_printf(seq, "Extended TX descriptor ring:\n"); + sysfs_display_ring((void *)priv->dma_etx, txsize, 1, seq); + } else { + seq_printf(seq, "RX descriptor ring:\n"); + sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq); + seq_printf(seq, "TX descriptor ring:\n"); + sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq); + } + + return 0; +} + +static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file) +{ + return single_open(file, stmmac_sysfs_ring_read, inode->i_private); +} + +static const struct file_operations stmmac_rings_status_fops = { + .owner = THIS_MODULE, + .open = stmmac_sysfs_ring_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v) +{ + struct net_device *dev = seq->private; + struct stmmac_priv *priv = netdev_priv(dev); + + if (!priv->hw_cap_support) { + seq_printf(seq, "DMA HW features not supported\n"); + return 0; + } + + seq_printf(seq, "==============================\n"); + seq_printf(seq, "\tDMA HW features\n"); + seq_printf(seq, "==============================\n"); + + seq_printf(seq, "\t10/100 Mbps %s\n", + (priv->dma_cap.mbps_10_100) ? "Y" : "N"); + seq_printf(seq, "\t1000 Mbps %s\n", + (priv->dma_cap.mbps_1000) ? "Y" : "N"); + seq_printf(seq, "\tHalf duple %s\n", + (priv->dma_cap.half_duplex) ? "Y" : "N"); + seq_printf(seq, "\tHash Filter: %s\n", + (priv->dma_cap.hash_filter) ? "Y" : "N"); + seq_printf(seq, "\tMultiple MAC address registers: %s\n", + (priv->dma_cap.multi_addr) ? "Y" : "N"); + seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n", + (priv->dma_cap.pcs) ? "Y" : "N"); + seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", + (priv->dma_cap.sma_mdio) ? "Y" : "N"); + seq_printf(seq, "\tPMT Remote wake up: %s\n", + (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); + seq_printf(seq, "\tPMT Magic Frame: %s\n", + (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); + seq_printf(seq, "\tRMON module: %s\n", + (priv->dma_cap.rmon) ? "Y" : "N"); + seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", + (priv->dma_cap.time_stamp) ? "Y" : "N"); + seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n", + (priv->dma_cap.atime_stamp) ? "Y" : "N"); + seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n", + (priv->dma_cap.eee) ? "Y" : "N"); + seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); + seq_printf(seq, "\tChecksum Offload in TX: %s\n", + (priv->dma_cap.tx_coe) ? "Y" : "N"); + seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", + (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); + seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", + (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); + seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", + (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); + seq_printf(seq, "\tNumber of Additional RX channel: %d\n", + priv->dma_cap.number_rx_channel); + seq_printf(seq, "\tNumber of Additional TX channel: %d\n", + priv->dma_cap.number_tx_channel); + seq_printf(seq, "\tEnhanced descriptors: %s\n", + (priv->dma_cap.enh_desc) ? "Y" : "N"); + + return 0; +} + +static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file) +{ + return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private); +} + +static const struct file_operations stmmac_dma_cap_fops = { + .owner = THIS_MODULE, + .open = stmmac_sysfs_dma_cap_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int stmmac_init_fs(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + /* Create per netdev entries */ + priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); + + if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) { + pr_err("ERROR %s/%s, debugfs create directory failed\n", + STMMAC_RESOURCE_NAME, dev->name); + + return -ENOMEM; + } + + /* Entry to report DMA RX/TX rings */ + priv->dbgfs_rings_status = + debugfs_create_file("descriptors_status", S_IRUGO, + priv->dbgfs_dir, dev, + &stmmac_rings_status_fops); + + if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) { + pr_info("ERROR creating stmmac ring debugfs file\n"); + debugfs_remove_recursive(priv->dbgfs_dir); + + return -ENOMEM; + } + + /* Entry to report the DMA HW features */ + priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, + priv->dbgfs_dir, + dev, &stmmac_dma_cap_fops); + + if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) { + pr_info("ERROR creating stmmac MMC debugfs file\n"); + debugfs_remove_recursive(priv->dbgfs_dir); + + return -ENOMEM; + } + + return 0; +} + +static void stmmac_exit_fs(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + debugfs_remove_recursive(priv->dbgfs_dir); +} +#endif /* CONFIG_DEBUG_FS */ + +static const struct net_device_ops stmmac_netdev_ops = { + .ndo_open = stmmac_open, + .ndo_start_xmit = stmmac_xmit, + .ndo_stop = stmmac_release, + .ndo_change_mtu = stmmac_change_mtu, + .ndo_fix_features = stmmac_fix_features, + .ndo_set_features = stmmac_set_features, + .ndo_set_rx_mode = stmmac_set_rx_mode, + .ndo_tx_timeout = stmmac_tx_timeout, + .ndo_do_ioctl = stmmac_ioctl, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = stmmac_poll_controller, +#endif + .ndo_set_mac_address = eth_mac_addr, +}; + +/** + * stmmac_hw_init - Init the MAC device + * @priv: driver private structure + * Description: this function is to configure the MAC device according to + * some platform parameters or the HW capability register. It prepares the + * driver to use either ring or chain modes and to setup either enhanced or + * normal descriptors. + */ +static int stmmac_hw_init(struct stmmac_priv *priv) +{ + struct mac_device_info *mac; + + /* Identify the MAC HW device */ + if (priv->plat->has_gmac) { + priv->dev->priv_flags |= IFF_UNICAST_FLT; + mac = dwmac1000_setup(priv->ioaddr, + priv->plat->multicast_filter_bins, + priv->plat->unicast_filter_entries); + } else { + mac = dwmac100_setup(priv->ioaddr); + } + if (!mac) + return -ENOMEM; + + priv->hw = mac; + + /* Get and dump the chip ID */ + priv->synopsys_id = stmmac_get_synopsys_id(priv); + + /* To use the chained or ring mode */ + if (chain_mode) { + priv->hw->mode = &chain_mode_ops; + pr_info(" Chain mode enabled\n"); + priv->mode = STMMAC_CHAIN_MODE; + } else { + priv->hw->mode = &ring_mode_ops; + pr_info(" Ring mode enabled\n"); + priv->mode = STMMAC_RING_MODE; + } + + /* Get the HW capability (new GMAC newer than 3.50a) */ + priv->hw_cap_support = stmmac_get_hw_features(priv); + if (priv->hw_cap_support) { + pr_info(" DMA HW capability register supported"); + + /* We can override some gmac/dma configuration fields: e.g. + * enh_desc, tx_coe (e.g. that are passed through the + * platform) with the values from the HW capability + * register (if supported). + */ + priv->plat->enh_desc = priv->dma_cap.enh_desc; + priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; + + /* TXCOE doesn't work in thresh DMA mode */ + if (priv->plat->force_thresh_dma_mode) + priv->plat->tx_coe = 0; + else + priv->plat->tx_coe = priv->dma_cap.tx_coe; + + if (priv->dma_cap.rx_coe_type2) + priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; + else if (priv->dma_cap.rx_coe_type1) + priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; + + } else + pr_info(" No HW DMA feature register supported"); + + /* To use alternate (extended) or normal descriptor structures */ + stmmac_selec_desc_mode(priv); + + if (priv->plat->rx_coe) { + priv->hw->rx_csum = priv->plat->rx_coe; + pr_info(" RX Checksum Offload Engine supported (type %d)\n", + priv->plat->rx_coe); + } + if (priv->plat->tx_coe) + pr_info(" TX Checksum insertion supported\n"); + + if (priv->plat->pmt) { + pr_info(" Wake-Up On Lan supported\n"); + device_set_wakeup_capable(priv->device, 1); + } + + return 0; +} + +/** + * stmmac_dvr_probe + * @device: device pointer + * @plat_dat: platform data pointer + * @addr: iobase memory address + * Description: this is the main probe function used to + * call the alloc_etherdev, allocate the priv structure. + * Return: + * on success the new private structure is returned, otherwise the error + * pointer. + */ +struct stmmac_priv *stmmac_dvr_probe(struct device *device, + struct plat_stmmacenet_data *plat_dat, + void __iomem *addr) +{ + int ret = 0; + struct net_device *ndev = NULL; + struct stmmac_priv *priv; + + ndev = alloc_etherdev(sizeof(struct stmmac_priv)); + if (!ndev) + return ERR_PTR(-ENOMEM); + + SET_NETDEV_DEV(ndev, device); + + priv = netdev_priv(ndev); + priv->device = device; + priv->dev = ndev; + + stmmac_set_ethtool_ops(ndev); + priv->pause = pause; + priv->plat = plat_dat; + priv->ioaddr = addr; + priv->dev->base_addr = (unsigned long)addr; + + /* Verify driver arguments */ + stmmac_verify_args(); + + /* Override with kernel parameters if supplied XXX CRS XXX + * this needs to have multiple instances + */ + if ((phyaddr >= 0) && (phyaddr <= 31)) + priv->plat->phy_addr = phyaddr; + + priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME); + if (IS_ERR(priv->stmmac_clk)) { + dev_warn(priv->device, "%s: warning: cannot get CSR clock\n", + __func__); + /* If failed to obtain stmmac_clk and specific clk_csr value + * is NOT passed from the platform, probe fail. + */ + if (!priv->plat->clk_csr) { + ret = PTR_ERR(priv->stmmac_clk); + goto error_clk_get; + } else { + priv->stmmac_clk = NULL; + } + } + clk_prepare_enable(priv->stmmac_clk); + + priv->pclk = devm_clk_get(priv->device, "pclk"); + if (IS_ERR(priv->pclk)) { + if (PTR_ERR(priv->pclk) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto error_pclk_get; + } + priv->pclk = NULL; + } + clk_prepare_enable(priv->pclk); + + priv->stmmac_rst = devm_reset_control_get(priv->device, + STMMAC_RESOURCE_NAME); + if (IS_ERR(priv->stmmac_rst)) { + if (PTR_ERR(priv->stmmac_rst) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto error_hw_init; + } + dev_info(priv->device, "no reset control found\n"); + priv->stmmac_rst = NULL; + } + if (priv->stmmac_rst) + reset_control_deassert(priv->stmmac_rst); + + /* Init MAC and get the capabilities */ + ret = stmmac_hw_init(priv); + if (ret) + goto error_hw_init; + + ndev->netdev_ops = &stmmac_netdev_ops; + + ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM; + ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; + ndev->watchdog_timeo = msecs_to_jiffies(watchdog); +#ifdef STMMAC_VLAN_TAG_USED + /* Both mac100 and gmac support receive VLAN tag detection */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; +#endif + priv->msg_enable = netif_msg_init(debug, default_msg_level); + + if (flow_ctrl) + priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ + + /* Rx Watchdog is available in the COREs newer than the 3.40. + * In some case, for example on bugged HW this feature + * has to be disable and this can be done by passing the + * riwt_off field from the platform. + */ + if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) { + priv->use_riwt = 1; + pr_info(" Enable RX Mitigation via HW Watchdog Timer\n"); + } + + netif_napi_add(ndev, &priv->napi, stmmac_poll, 64); + + spin_lock_init(&priv->lock); + spin_lock_init(&priv->tx_lock); + + ret = register_netdev(ndev); + if (ret) { + pr_err("%s: ERROR %i registering the device\n", __func__, ret); + goto error_netdev_register; + } + + /* If a specific clk_csr value is passed from the platform + * this means that the CSR Clock Range selection cannot be + * changed at run-time and it is fixed. Viceversa the driver'll try to + * set the MDC clock dynamically according to the csr actual + * clock input. + */ + if (!priv->plat->clk_csr) + stmmac_clk_csr_set(priv); + else + priv->clk_csr = priv->plat->clk_csr; + + stmmac_check_pcs_mode(priv); + + if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && + priv->pcs != STMMAC_PCS_RTBI) { + /* MDIO bus Registration */ + ret = stmmac_mdio_register(ndev); + if (ret < 0) { + pr_debug("%s: MDIO bus (id: %d) registration failed", + __func__, priv->plat->bus_id); + goto error_mdio_register; + } + } + + return priv; + +error_mdio_register: + unregister_netdev(ndev); +error_netdev_register: + netif_napi_del(&priv->napi); +error_hw_init: + clk_disable_unprepare(priv->pclk); +error_pclk_get: + clk_disable_unprepare(priv->stmmac_clk); +error_clk_get: + free_netdev(ndev); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(stmmac_dvr_probe); + +/** + * stmmac_dvr_remove + * @ndev: net device pointer + * Description: this function resets the TX/RX processes, disables the MAC RX/TX + * changes the link status, releases the DMA descriptor rings. + */ +int stmmac_dvr_remove(struct net_device *ndev) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + + pr_info("%s:\n\tremoving driver", __func__); + + priv->hw->dma->stop_rx(priv->ioaddr); + priv->hw->dma->stop_tx(priv->ioaddr); + + stmmac_set_mac(priv->ioaddr, false); + netif_carrier_off(ndev); + unregister_netdev(ndev); + if (priv->stmmac_rst) + reset_control_assert(priv->stmmac_rst); + clk_disable_unprepare(priv->pclk); + clk_disable_unprepare(priv->stmmac_clk); + if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && + priv->pcs != STMMAC_PCS_RTBI) + stmmac_mdio_unregister(ndev); + free_netdev(ndev); + + return 0; +} +EXPORT_SYMBOL_GPL(stmmac_dvr_remove); + +/** + * stmmac_suspend - suspend callback + * @ndev: net device pointer + * Description: this is the function to suspend the device and it is called + * by the platform driver to stop the network queue, release the resources, + * program the PMT register (for WoL), clean and release driver resources. + */ +int stmmac_suspend(struct net_device *ndev) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned long flags; + + if (!ndev || !netif_running(ndev)) + return 0; + + if (priv->phydev) + phy_stop(priv->phydev); + + spin_lock_irqsave(&priv->lock, flags); + + netif_device_detach(ndev); + netif_stop_queue(ndev); + + napi_disable(&priv->napi); + + /* Stop TX/RX DMA */ + priv->hw->dma->stop_tx(priv->ioaddr); + priv->hw->dma->stop_rx(priv->ioaddr); + + stmmac_clear_descriptors(priv); + + /* Enable Power down mode by programming the PMT regs */ + if (device_may_wakeup(priv->device)) { + priv->hw->mac->pmt(priv->hw, priv->wolopts); + priv->irq_wake = 1; + } else { + stmmac_set_mac(priv->ioaddr, false); + pinctrl_pm_select_sleep_state(priv->device); + /* Disable clock in case of PWM is off */ + clk_disable(priv->pclk); + clk_disable(priv->stmmac_clk); + } + spin_unlock_irqrestore(&priv->lock, flags); + + priv->oldlink = 0; + priv->speed = 0; + priv->oldduplex = -1; + return 0; +} +EXPORT_SYMBOL_GPL(stmmac_suspend); + +/** + * stmmac_resume - resume callback + * @ndev: net device pointer + * Description: when resume this function is invoked to setup the DMA and CORE + * in a usable state. + */ +int stmmac_resume(struct net_device *ndev) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned long flags; + + if (!netif_running(ndev)) + return 0; + + spin_lock_irqsave(&priv->lock, flags); + + /* Power Down bit, into the PM register, is cleared + * automatically as soon as a magic packet or a Wake-up frame + * is received. Anyway, it's better to manually clear + * this bit because it can generate problems while resuming + * from another devices (e.g. serial console). + */ + if (device_may_wakeup(priv->device)) { + priv->hw->mac->pmt(priv->hw, 0); + priv->irq_wake = 0; + } else { + pinctrl_pm_select_default_state(priv->device); + /* enable the clk prevously disabled */ + clk_enable(priv->stmmac_clk); + clk_enable(priv->pclk); + /* reset the phy so that it's ready */ + if (priv->mii) + stmmac_mdio_reset(priv->mii); + } + + netif_device_attach(ndev); + + init_dma_desc_rings(ndev, GFP_ATOMIC); + stmmac_hw_setup(ndev, false); + stmmac_init_tx_coalesce(priv); + + napi_enable(&priv->napi); + + netif_start_queue(ndev); + + spin_unlock_irqrestore(&priv->lock, flags); + + if (priv->phydev) + phy_start(priv->phydev); + + return 0; +} +EXPORT_SYMBOL_GPL(stmmac_resume); + +#ifndef MODULE +static int __init stmmac_cmdline_opt(char *str) +{ + char *opt; + + if (!str || !*str) + return -EINVAL; + while ((opt = strsep(&str, ",")) != NULL) { + if (!strncmp(opt, "debug:", 6)) { + if (kstrtoint(opt + 6, 0, &debug)) + goto err; + } else if (!strncmp(opt, "phyaddr:", 8)) { + if (kstrtoint(opt + 8, 0, &phyaddr)) + goto err; + } else if (!strncmp(opt, "dma_txsize:", 11)) { + if (kstrtoint(opt + 11, 0, &dma_txsize)) + goto err; + } else if (!strncmp(opt, "dma_rxsize:", 11)) { + if (kstrtoint(opt + 11, 0, &dma_rxsize)) + goto err; + } else if (!strncmp(opt, "buf_sz:", 7)) { + if (kstrtoint(opt + 7, 0, &buf_sz)) + goto err; + } else if (!strncmp(opt, "tc:", 3)) { + if (kstrtoint(opt + 3, 0, &tc)) + goto err; + } else if (!strncmp(opt, "watchdog:", 9)) { + if (kstrtoint(opt + 9, 0, &watchdog)) + goto err; + } else if (!strncmp(opt, "flow_ctrl:", 10)) { + if (kstrtoint(opt + 10, 0, &flow_ctrl)) + goto err; + } else if (!strncmp(opt, "pause:", 6)) { + if (kstrtoint(opt + 6, 0, &pause)) + goto err; + } else if (!strncmp(opt, "eee_timer:", 10)) { + if (kstrtoint(opt + 10, 0, &eee_timer)) + goto err; + } else if (!strncmp(opt, "chain_mode:", 11)) { + if (kstrtoint(opt + 11, 0, &chain_mode)) + goto err; + } + } + return 0; + +err: + pr_err("%s: ERROR broken module parameter conversion", __func__); + return -EINVAL; +} + +__setup("stmmaceth=", stmmac_cmdline_opt); +#endif /* MODULE */ + +static int __init stmmac_init(void) +{ +#ifdef CONFIG_DEBUG_FS + /* Create debugfs main directory if it doesn't exist yet */ + if (!stmmac_fs_dir) { + stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); + + if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { + pr_err("ERROR %s, debugfs create directory failed\n", + STMMAC_RESOURCE_NAME); + + return -ENOMEM; + } + } +#endif + + return 0; +} + +static void __exit stmmac_exit(void) +{ +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(stmmac_fs_dir); +#endif +} + +module_init(stmmac_init) +module_exit(stmmac_exit) + +MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); +MODULE_AUTHOR("Giuseppe Cavallaro "); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c new file mode 100644 index 000000000..b735fa22a --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -0,0 +1,318 @@ +/******************************************************************************* + STMMAC Ethernet Driver -- MDIO bus implementation + Provides Bus interface for MII registers + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Carl Shaw + Maintainer: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include +#include +#include + +#include + +#include "stmmac.h" + +#define MII_BUSY 0x00000001 +#define MII_WRITE 0x00000002 + +static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr) +{ + unsigned long curr; + unsigned long finish = jiffies + 3 * HZ; + + do { + curr = jiffies; + if (readl(ioaddr + mii_addr) & MII_BUSY) + cpu_relax(); + else + return 0; + } while (!time_after_eq(curr, finish)); + + return -EBUSY; +} + +/** + * stmmac_mdio_read + * @bus: points to the mii_bus structure + * @phyaddr: MII addr reg bits 15-11 + * @phyreg: MII addr reg bits 10-6 + * Description: it reads data from the MII register from within the phy device. + * For the 7111 GMAC, we must set the bit 0 in the MII address register while + * accessing the PHY registers. + * Fortunately, it seems this has no drawback for the 7109 MAC. + */ +static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; + + int data; + u16 regValue = (((phyaddr << 11) & (0x0000F800)) | + ((phyreg << 6) & (0x000007C0))); + regValue |= MII_BUSY | ((priv->clk_csr & 0xF) << 2); + + if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) + return -EBUSY; + + writel(regValue, priv->ioaddr + mii_address); + + if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) + return -EBUSY; + + /* Read the data from the MII data register */ + data = (int)readl(priv->ioaddr + mii_data); + + return data; +} + +/** + * stmmac_mdio_write + * @bus: points to the mii_bus structure + * @phyaddr: MII addr reg bits 15-11 + * @phyreg: MII addr reg bits 10-6 + * @phydata: phy data + * Description: it writes the data into the MII register from within the device. + */ +static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, + u16 phydata) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; + + u16 value = + (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))) + | MII_WRITE; + + value |= MII_BUSY | ((priv->clk_csr & 0xF) << 2); + + /* Wait until any existing MII operation is complete */ + if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) + return -EBUSY; + + /* Set the MII address register to write */ + writel(phydata, priv->ioaddr + mii_data); + writel(value, priv->ioaddr + mii_address); + + /* Wait until any existing MII operation is complete */ + return stmmac_mdio_busy_wait(priv->ioaddr, mii_address); +} + +/** + * stmmac_mdio_reset + * @bus: points to the mii_bus structure + * Description: reset the MII bus + */ +int stmmac_mdio_reset(struct mii_bus *bus) +{ +#if defined(CONFIG_STMMAC_PLATFORM) + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + struct stmmac_mdio_bus_data *data = priv->plat->mdio_bus_data; + +#ifdef CONFIG_OF + if (priv->device->of_node) { + int reset_gpio, active_low; + + if (data->reset_gpio < 0) { + struct device_node *np = priv->device->of_node; + if (!np) + return 0; + + data->reset_gpio = of_get_named_gpio(np, + "snps,reset-gpio", 0); + if (data->reset_gpio < 0) + return 0; + + data->active_low = of_property_read_bool(np, + "snps,reset-active-low"); + of_property_read_u32_array(np, + "snps,reset-delays-us", data->delays, 3); + } + + reset_gpio = data->reset_gpio; + active_low = data->active_low; + + if (!gpio_request(reset_gpio, "mdio-reset")) { + gpio_direction_output(reset_gpio, active_low ? 1 : 0); + udelay(data->delays[0]); + gpio_set_value(reset_gpio, active_low ? 0 : 1); + udelay(data->delays[1]); + gpio_set_value(reset_gpio, active_low ? 1 : 0); + udelay(data->delays[2]); + } + } +#endif + + if (data->phy_reset) { + pr_debug("stmmac_mdio_reset: calling phy_reset\n"); + data->phy_reset(priv->plat->bsp_priv); + } + + /* This is a workaround for problems with the STE101P PHY. + * It doesn't complete its reset until at least one clock cycle + * on MDC, so perform a dummy mdio read. + */ + writel(0, priv->ioaddr + mii_address); +#endif + return 0; +} + +/** + * stmmac_mdio_register + * @ndev: net device structure + * Description: it registers the MII bus + */ +int stmmac_mdio_register(struct net_device *ndev) +{ + int err = 0; + struct mii_bus *new_bus; + int *irqlist; + struct stmmac_priv *priv = netdev_priv(ndev); + struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; + int addr, found; + + if (!mdio_bus_data) + return 0; + + new_bus = mdiobus_alloc(); + if (new_bus == NULL) + return -ENOMEM; + + if (mdio_bus_data->irqs) { + irqlist = mdio_bus_data->irqs; + } else { + for (addr = 0; addr < PHY_MAX_ADDR; addr++) + priv->mii_irq[addr] = PHY_POLL; + irqlist = priv->mii_irq; + } + +#ifdef CONFIG_OF + if (priv->device->of_node) + mdio_bus_data->reset_gpio = -1; +#endif + + new_bus->name = "stmmac"; + new_bus->read = &stmmac_mdio_read; + new_bus->write = &stmmac_mdio_write; + new_bus->reset = &stmmac_mdio_reset; + snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", + new_bus->name, priv->plat->bus_id); + new_bus->priv = ndev; + new_bus->irq = irqlist; + new_bus->phy_mask = mdio_bus_data->phy_mask; + new_bus->parent = priv->device; + err = mdiobus_register(new_bus); + if (err != 0) { + pr_err("%s: Cannot register as MDIO bus\n", new_bus->name); + goto bus_register_fail; + } + + found = 0; + for (addr = 0; addr < PHY_MAX_ADDR; addr++) { + struct phy_device *phydev = new_bus->phy_map[addr]; + if (phydev) { + int act = 0; + char irq_num[4]; + char *irq_str; + + /* + * If an IRQ was provided to be assigned after + * the bus probe, do it here. + */ + if ((mdio_bus_data->irqs == NULL) && + (mdio_bus_data->probed_phy_irq > 0)) { + irqlist[addr] = mdio_bus_data->probed_phy_irq; + phydev->irq = mdio_bus_data->probed_phy_irq; + } + + /* + * If we're going to bind the MAC to this PHY bus, + * and no PHY number was provided to the MAC, + * use the one probed here. + */ + if (priv->plat->phy_addr == -1) + priv->plat->phy_addr = addr; + + act = (priv->plat->phy_addr == addr); + switch (phydev->irq) { + case PHY_POLL: + irq_str = "POLL"; + break; + case PHY_IGNORE_INTERRUPT: + irq_str = "IGNORE"; + break; + default: + sprintf(irq_num, "%d", phydev->irq); + irq_str = irq_num; + break; + } + pr_info("%s: PHY ID %08x at %d IRQ %s (%s)%s\n", + ndev->name, phydev->phy_id, addr, + irq_str, dev_name(&phydev->dev), + act ? " active" : ""); + found = 1; + } + } + + if (!found) { + pr_warn("%s: No PHY found\n", ndev->name); + mdiobus_unregister(new_bus); + mdiobus_free(new_bus); + return -ENODEV; + } + + priv->mii = new_bus; + + return 0; + +bus_register_fail: + mdiobus_free(new_bus); + return err; +} + +/** + * stmmac_mdio_unregister + * @ndev: net device structure + * Description: it unregisters the MII bus + */ +int stmmac_mdio_unregister(struct net_device *ndev) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + + if (!priv->mii) + return 0; + + mdiobus_unregister(priv->mii); + priv->mii->priv = NULL; + mdiobus_free(priv->mii); + priv->mii = NULL; + + return 0; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c new file mode 100644 index 000000000..3bca90871 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -0,0 +1,294 @@ +/******************************************************************************* + This contains the functions to handle the pci driver. + + Copyright (C) 2011-2012 Vayavya Labs Pvt Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Rayagond Kokatanur + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include + +#include "stmmac.h" + +/* + * This struct is used to associate PCI Function of MAC controller on a board, + * discovered via DMI, with the address of PHY connected to the MAC. The + * negative value of the address means that MAC controller is not connected + * with PHY. + */ +struct stmmac_pci_dmi_data { + const char *name; + unsigned int func; + int phy_addr; +}; + +struct stmmac_pci_info { + struct pci_dev *pdev; + int (*setup)(struct plat_stmmacenet_data *plat, + struct stmmac_pci_info *info); + struct stmmac_pci_dmi_data *dmi; +}; + +static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info) +{ + const char *name = dmi_get_system_info(DMI_BOARD_NAME); + unsigned int func = PCI_FUNC(info->pdev->devfn); + struct stmmac_pci_dmi_data *dmi; + + /* + * Galileo boards with old firmware don't support DMI. We always return + * 1 here, so at least first found MAC controller would be probed. + */ + if (!name) + return 1; + + for (dmi = info->dmi; dmi->name && *dmi->name; dmi++) { + if (!strcmp(dmi->name, name) && dmi->func == func) + return dmi->phy_addr; + } + + return -ENODEV; +} + +static void stmmac_default_data(struct plat_stmmacenet_data *plat) +{ + plat->bus_id = 1; + plat->phy_addr = 0; + plat->interface = PHY_INTERFACE_MODE_GMII; + plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ + plat->has_gmac = 1; + plat->force_sf_dma_mode = 1; + + plat->mdio_bus_data->phy_reset = NULL; + plat->mdio_bus_data->phy_mask = 0; + + plat->dma_cfg->pbl = 32; + plat->dma_cfg->burst_len = DMA_AXI_BLEN_256; + + /* Set default value for multicast hash bins */ + plat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat->unicast_filter_entries = 1; +} + +static int quark_default_data(struct plat_stmmacenet_data *plat, + struct stmmac_pci_info *info) +{ + struct pci_dev *pdev = info->pdev; + int ret; + + /* + * Refuse to load the driver and register net device if MAC controller + * does not connect to any PHY interface. + */ + ret = stmmac_pci_find_phy_addr(info); + if (ret < 0) + return ret; + + plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn); + plat->phy_addr = ret; + plat->interface = PHY_INTERFACE_MODE_RMII; + plat->clk_csr = 2; + plat->has_gmac = 1; + plat->force_sf_dma_mode = 1; + + plat->mdio_bus_data->phy_reset = NULL; + plat->mdio_bus_data->phy_mask = 0; + + plat->dma_cfg->pbl = 16; + plat->dma_cfg->burst_len = DMA_AXI_BLEN_256; + plat->dma_cfg->fixed_burst = 1; + + /* Set default value for multicast hash bins */ + plat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat->unicast_filter_entries = 1; + + return 0; +} + +static struct stmmac_pci_dmi_data quark_pci_dmi_data[] = { + { + .name = "Galileo", + .func = 6, + .phy_addr = 1, + }, + { + .name = "GalileoGen2", + .func = 6, + .phy_addr = 1, + }, + {} +}; + +static struct stmmac_pci_info quark_pci_info = { + .setup = quark_default_data, + .dmi = quark_pci_dmi_data, +}; + +/** + * stmmac_pci_probe + * + * @pdev: pci device pointer + * @id: pointer to table of device id/id's. + * + * Description: This probing function gets called for all PCI devices which + * match the ID table and are not "owned" by other driver yet. This function + * gets passed a "struct pci_dev *" for each device whose entry in the ID table + * matches the device. The probe functions returns zero when the driver choose + * to take "ownership" of the device or an error code(-ve no) otherwise. + */ +static int stmmac_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data; + struct plat_stmmacenet_data *plat; + struct stmmac_priv *priv; + int i; + int ret; + + plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); + if (!plat) + return -ENOMEM; + + plat->mdio_bus_data = devm_kzalloc(&pdev->dev, + sizeof(*plat->mdio_bus_data), + GFP_KERNEL); + if (!plat->mdio_bus_data) + return -ENOMEM; + + plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), + GFP_KERNEL); + if (!plat->dma_cfg) + return -ENOMEM; + + /* Enable pci device */ + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", + __func__); + return ret; + } + + /* Get the base address of device */ + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev)); + if (ret) + return ret; + break; + } + + pci_set_master(pdev); + + if (info) { + info->pdev = pdev; + if (info->setup) { + ret = info->setup(plat, info); + if (ret) + return ret; + } + } else + stmmac_default_data(plat); + + pci_enable_msi(pdev); + + priv = stmmac_dvr_probe(&pdev->dev, plat, pcim_iomap_table(pdev)[i]); + if (IS_ERR(priv)) { + dev_err(&pdev->dev, "%s: main driver probe failed\n", __func__); + return PTR_ERR(priv); + } + priv->dev->irq = pdev->irq; + priv->wol_irq = pdev->irq; + + pci_set_drvdata(pdev, priv->dev); + + dev_dbg(&pdev->dev, "STMMAC PCI driver registration completed\n"); + + return 0; +} + +/** + * stmmac_pci_remove + * + * @pdev: platform device pointer + * Description: this function calls the main to free the net resources + * and releases the PCI resources. + */ +static void stmmac_pci_remove(struct pci_dev *pdev) +{ + struct net_device *ndev = pci_get_drvdata(pdev); + + stmmac_dvr_remove(ndev); +} + +#ifdef CONFIG_PM_SLEEP +static int stmmac_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *ndev = pci_get_drvdata(pdev); + + return stmmac_suspend(ndev); +} + +static int stmmac_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *ndev = pci_get_drvdata(pdev); + + return stmmac_resume(ndev); +} +#endif + +static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume); + +#define STMMAC_VENDOR_ID 0x700 +#define STMMAC_QUARK_ID 0x0937 +#define STMMAC_DEVICE_ID 0x1108 + +static const struct pci_device_id stmmac_id_table[] = { + {PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_MAC)}, + {PCI_VDEVICE(INTEL, STMMAC_QUARK_ID), (kernel_ulong_t)&quark_pci_info}, + {} +}; + +MODULE_DEVICE_TABLE(pci, stmmac_id_table); + +static struct pci_driver stmmac_pci_driver = { + .name = STMMAC_RESOURCE_NAME, + .id_table = stmmac_id_table, + .probe = stmmac_pci_probe, + .remove = stmmac_pci_remove, + .driver = { + .pm = &stmmac_pm_ops, + }, +}; + +module_pci_driver(stmmac_pci_driver); + +MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); +MODULE_AUTHOR("Rayagond Kokatanur "); +MODULE_AUTHOR("Giuseppe Cavallaro "); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c new file mode 100644 index 000000000..68aec5c46 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -0,0 +1,458 @@ +/******************************************************************************* + This contains the functions to handle the platform driver. + + Copyright (C) 2007-2011 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include + +#include "stmmac.h" +#include "stmmac_platform.h" + +static const struct of_device_id stmmac_dt_ids[] = { + /* SoC specific glue layers should come before generic bindings */ + { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data}, + { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data}, + { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data}, + { .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data}, + { .compatible = "st,stih416-dwmac", .data = &stih4xx_dwmac_data}, + { .compatible = "st,stid127-dwmac", .data = &stid127_dwmac_data}, + { .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data}, + { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data }, + { .compatible = "st,spear600-gmac"}, + { .compatible = "snps,dwmac-3.610"}, + { .compatible = "snps,dwmac-3.70a"}, + { .compatible = "snps,dwmac-3.710"}, + { .compatible = "snps,dwmac"}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, stmmac_dt_ids); + +#ifdef CONFIG_OF + +/** + * dwmac1000_validate_mcast_bins - validates the number of Multicast filter bins + * @mcast_bins: Multicast filtering bins + * Description: + * this function validates the number of Multicast filtering bins specified + * by the configuration through the device tree. The Synopsys GMAC supports + * 64 bins, 128 bins, or 256 bins. "bins" refer to the division of CRC + * number space. 64 bins correspond to 6 bits of the CRC, 128 corresponds + * to 7 bits, and 256 refers to 8 bits of the CRC. Any other setting is + * invalid and will cause the filtering algorithm to use Multicast + * promiscuous mode. + */ +static int dwmac1000_validate_mcast_bins(int mcast_bins) +{ + int x = mcast_bins; + + switch (x) { + case HASH_TABLE_SIZE: + case 128: + case 256: + break; + default: + x = 0; + pr_info("Hash table entries set to unexpected value %d", + mcast_bins); + break; + } + return x; +} + +/** + * dwmac1000_validate_ucast_entries - validate the Unicast address entries + * @ucast_entries: number of Unicast address entries + * Description: + * This function validates the number of Unicast address entries supported + * by a particular Synopsys 10/100/1000 controller. The Synopsys controller + * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter + * logic. This function validates a valid, supported configuration is + * selected, and defaults to 1 Unicast address if an unsupported + * configuration is selected. + */ +static int dwmac1000_validate_ucast_entries(int ucast_entries) +{ + int x = ucast_entries; + + switch (x) { + case 1: + case 32: + case 64: + case 128: + break; + default: + x = 1; + pr_info("Unicast table entries set to unexpected value %d\n", + ucast_entries); + break; + } + return x; +} + +/** + * stmmac_probe_config_dt - parse device-tree driver parameters + * @pdev: platform_device structure + * @plat: driver data platform structure + * @mac: MAC address to use + * Description: + * this function is to read the driver parameters from device-tree and + * set some private fields that will be used by the main at runtime. + */ +static int stmmac_probe_config_dt(struct platform_device *pdev, + struct plat_stmmacenet_data *plat, + const char **mac) +{ + struct device_node *np = pdev->dev.of_node; + struct stmmac_dma_cfg *dma_cfg; + const struct of_device_id *device; + + if (!np) + return -ENODEV; + + device = of_match_device(stmmac_dt_ids, &pdev->dev); + if (!device) + return -ENODEV; + + if (device->data) { + const struct stmmac_of_data *data = device->data; + plat->has_gmac = data->has_gmac; + plat->enh_desc = data->enh_desc; + plat->tx_coe = data->tx_coe; + plat->rx_coe = data->rx_coe; + plat->bugged_jumbo = data->bugged_jumbo; + plat->pmt = data->pmt; + plat->riwt_off = data->riwt_off; + plat->fix_mac_speed = data->fix_mac_speed; + plat->bus_setup = data->bus_setup; + plat->setup = data->setup; + plat->free = data->free; + plat->init = data->init; + plat->exit = data->exit; + } + + *mac = of_get_mac_address(np); + plat->interface = of_get_phy_mode(np); + + /* Get max speed of operation from device tree */ + if (of_property_read_u32(np, "max-speed", &plat->max_speed)) + plat->max_speed = -1; + + plat->bus_id = of_alias_get_id(np, "ethernet"); + if (plat->bus_id < 0) + plat->bus_id = 0; + + /* Default to phy auto-detection */ + plat->phy_addr = -1; + + /* "snps,phy-addr" is not a standard property. Mark it as deprecated + * and warn of its use. Remove this when phy node support is added. + */ + if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) + dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); + + if (plat->phy_bus_name) + plat->mdio_bus_data = NULL; + else + plat->mdio_bus_data = + devm_kzalloc(&pdev->dev, + sizeof(struct stmmac_mdio_bus_data), + GFP_KERNEL); + + of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); + + of_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size); + + plat->force_sf_dma_mode = + of_property_read_bool(np, "snps,force_sf_dma_mode"); + + /* Set the maxmtu to a default of JUMBO_LEN in case the + * parameter is not present in the device tree. + */ + plat->maxmtu = JUMBO_LEN; + + /* + * Currently only the properties needed on SPEAr600 + * are provided. All other properties should be added + * once needed on other platforms. + */ + if (of_device_is_compatible(np, "st,spear600-gmac") || + of_device_is_compatible(np, "snps,dwmac-3.70a") || + of_device_is_compatible(np, "snps,dwmac")) { + /* Note that the max-frame-size parameter as defined in the + * ePAPR v1.1 spec is defined as max-frame-size, it's + * actually used as the IEEE definition of MAC Client + * data, or MTU. The ePAPR specification is confusing as + * the definition is max-frame-size, but usage examples + * are clearly MTUs + */ + of_property_read_u32(np, "max-frame-size", &plat->maxmtu); + of_property_read_u32(np, "snps,multicast-filter-bins", + &plat->multicast_filter_bins); + of_property_read_u32(np, "snps,perfect-filter-entries", + &plat->unicast_filter_entries); + plat->unicast_filter_entries = dwmac1000_validate_ucast_entries( + plat->unicast_filter_entries); + plat->multicast_filter_bins = dwmac1000_validate_mcast_bins( + plat->multicast_filter_bins); + plat->has_gmac = 1; + plat->pmt = 1; + } + + if (of_device_is_compatible(np, "snps,dwmac-3.610") || + of_device_is_compatible(np, "snps,dwmac-3.710")) { + plat->enh_desc = 1; + plat->bugged_jumbo = 1; + plat->force_sf_dma_mode = 1; + } + + if (of_find_property(np, "snps,pbl", NULL)) { + dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), + GFP_KERNEL); + if (!dma_cfg) + return -ENOMEM; + plat->dma_cfg = dma_cfg; + of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); + dma_cfg->fixed_burst = + of_property_read_bool(np, "snps,fixed-burst"); + dma_cfg->mixed_burst = + of_property_read_bool(np, "snps,mixed-burst"); + of_property_read_u32(np, "snps,burst_len", &dma_cfg->burst_len); + if (dma_cfg->burst_len < 0 || dma_cfg->burst_len > 256) + dma_cfg->burst_len = 0; + } + plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); + if (plat->force_thresh_dma_mode) { + plat->force_sf_dma_mode = 0; + pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); + } + + return 0; +} +#else +static int stmmac_probe_config_dt(struct platform_device *pdev, + struct plat_stmmacenet_data *plat, + const char **mac) +{ + return -ENOSYS; +} +#endif /* CONFIG_OF */ + +/** + * stmmac_pltfr_probe - platform driver probe. + * @pdev: platform device pointer + * Description: platform_device probe function. It is to allocate + * the necessary platform resources, invoke custom helper (if required) and + * invoke the main probe function. + */ +static int stmmac_pltfr_probe(struct platform_device *pdev) +{ + int ret = 0; + struct resource *res; + struct device *dev = &pdev->dev; + void __iomem *addr = NULL; + struct stmmac_priv *priv = NULL; + struct plat_stmmacenet_data *plat_dat = NULL; + const char *mac = NULL; + int irq, wol_irq, lpi_irq; + + /* Get IRQ information early to have an ability to ask for deferred + * probe if needed before we went too far with resource allocation. + */ + irq = platform_get_irq_byname(pdev, "macirq"); + if (irq < 0) { + if (irq != -EPROBE_DEFER) { + dev_err(dev, + "MAC IRQ configuration information not found\n"); + } + return irq; + } + + /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq + * The external wake up irq can be passed through the platform code + * named as "eth_wake_irq" + * + * In case the wake up interrupt is not passed from the platform + * so the driver will continue to use the mac irq (ndev->irq) + */ + wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); + if (wol_irq < 0) { + if (wol_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + wol_irq = irq; + } + + lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); + if (lpi_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + addr = devm_ioremap_resource(dev, res); + if (IS_ERR(addr)) + return PTR_ERR(addr); + + plat_dat = dev_get_platdata(&pdev->dev); + + if (!plat_dat) + plat_dat = devm_kzalloc(&pdev->dev, + sizeof(struct plat_stmmacenet_data), + GFP_KERNEL); + if (!plat_dat) { + pr_err("%s: ERROR: no memory", __func__); + return -ENOMEM; + } + + /* Set default value for multicast hash bins */ + plat_dat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat_dat->unicast_filter_entries = 1; + + if (pdev->dev.of_node) { + ret = stmmac_probe_config_dt(pdev, plat_dat, &mac); + if (ret) { + pr_err("%s: main dt probe failed", __func__); + return ret; + } + } + + /* Custom setup (if needed) */ + if (plat_dat->setup) { + plat_dat->bsp_priv = plat_dat->setup(pdev); + if (IS_ERR(plat_dat->bsp_priv)) + return PTR_ERR(plat_dat->bsp_priv); + } + + /* Custom initialisation (if needed)*/ + if (plat_dat->init) { + ret = plat_dat->init(pdev, plat_dat->bsp_priv); + if (unlikely(ret)) + return ret; + } + + priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr); + if (IS_ERR(priv)) { + pr_err("%s: main driver probe failed", __func__); + return PTR_ERR(priv); + } + + /* Copy IRQ values to priv structure which is now avaialble */ + priv->dev->irq = irq; + priv->wol_irq = wol_irq; + priv->lpi_irq = lpi_irq; + + /* Get MAC address if available (DT) */ + if (mac) + memcpy(priv->dev->dev_addr, mac, ETH_ALEN); + + platform_set_drvdata(pdev, priv->dev); + + pr_debug("STMMAC platform driver registration completed"); + + return 0; +} + +/** + * stmmac_pltfr_remove + * @pdev: platform device pointer + * Description: this function calls the main to free the net resources + * and calls the platforms hook and release the resources (e.g. mem). + */ +static int stmmac_pltfr_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct stmmac_priv *priv = netdev_priv(ndev); + int ret = stmmac_dvr_remove(ndev); + + if (priv->plat->exit) + priv->plat->exit(pdev, priv->plat->bsp_priv); + + if (priv->plat->free) + priv->plat->free(pdev, priv->plat->bsp_priv); + + return ret; +} + +#ifdef CONFIG_PM_SLEEP +/** + * stmmac_pltfr_suspend + * @dev: device pointer + * Description: this function is invoked when suspend the driver and it direcly + * call the main suspend function and then, if required, on some platform, it + * can call an exit helper. + */ +static int stmmac_pltfr_suspend(struct device *dev) +{ + int ret; + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + struct platform_device *pdev = to_platform_device(dev); + + ret = stmmac_suspend(ndev); + if (priv->plat->exit) + priv->plat->exit(pdev, priv->plat->bsp_priv); + + return ret; +} + +/** + * stmmac_pltfr_resume + * @dev: device pointer + * Description: this function is invoked when resume the driver before calling + * the main resume function, on some platforms, it can call own init helper + * if required. + */ +static int stmmac_pltfr_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + struct platform_device *pdev = to_platform_device(dev); + + if (priv->plat->init) + priv->plat->init(pdev, priv->plat->bsp_priv); + + return stmmac_resume(ndev); +} +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, + stmmac_pltfr_suspend, stmmac_pltfr_resume); + +static struct platform_driver stmmac_pltfr_driver = { + .probe = stmmac_pltfr_probe, + .remove = stmmac_pltfr_remove, + .driver = { + .name = STMMAC_RESOURCE_NAME, + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = of_match_ptr(stmmac_dt_ids), + }, +}; + +module_platform_driver(stmmac_pltfr_driver); + +MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver"); +MODULE_AUTHOR("Giuseppe Cavallaro "); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h new file mode 100644 index 000000000..093eb99e5 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h @@ -0,0 +1,29 @@ +/******************************************************************************* + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro +*******************************************************************************/ + +#ifndef __STMMAC_PLATFORM_H__ +#define __STMMAC_PLATFORM_H__ + +extern const struct stmmac_of_data meson6_dwmac_data; +extern const struct stmmac_of_data sun7i_gmac_data; +extern const struct stmmac_of_data stih4xx_dwmac_data; +extern const struct stmmac_of_data stid127_dwmac_data; +extern const struct stmmac_of_data socfpga_gmac_data; +extern const struct stmmac_of_data rk3288_gmac_data; + +#endif /* __STMMAC_PLATFORM_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c new file mode 100644 index 000000000..170a18b61 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -0,0 +1,211 @@ +/******************************************************************************* + PTP 1588 clock using the STMMAC. + + Copyright (C) 2013 Vayavya Labs Pvt Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Rayagond Kokatanur +*******************************************************************************/ +#include "stmmac.h" +#include "stmmac_ptp.h" + +/** + * stmmac_adjust_freq + * + * @ptp: pointer to ptp_clock_info structure + * @ppb: desired period change in parts ber billion + * + * Description: this function will adjust the frequency of hardware clock. + */ +static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + unsigned long flags; + u32 diff, addend; + int neg_adj = 0; + u64 adj; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + addend = priv->default_addend; + adj = addend; + adj *= ppb; + diff = div_u64(adj, 1000000000ULL); + addend = neg_adj ? (addend - diff) : (addend + diff); + + spin_lock_irqsave(&priv->ptp_lock, flags); + + priv->hw->ptp->config_addend(priv->ioaddr, addend); + + spin_unlock_irqrestore(&priv->ptp_lock, flags); + + return 0; +} + +/** + * stmmac_adjust_time + * + * @ptp: pointer to ptp_clock_info structure + * @delta: desired change in nanoseconds + * + * Description: this function will shift/adjust the hardware clock time. + */ +static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + unsigned long flags; + u32 sec, nsec; + u32 quotient, reminder; + int neg_adj = 0; + + if (delta < 0) { + neg_adj = 1; + delta = -delta; + } + + quotient = div_u64_rem(delta, 1000000000ULL, &reminder); + sec = quotient; + nsec = reminder; + + spin_lock_irqsave(&priv->ptp_lock, flags); + + priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); + + spin_unlock_irqrestore(&priv->ptp_lock, flags); + + return 0; +} + +/** + * stmmac_get_time + * + * @ptp: pointer to ptp_clock_info structure + * @ts: pointer to hold time/result + * + * Description: this function will read the current time from the + * hardware clock and store it in @ts. + */ +static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + unsigned long flags; + u64 ns; + + spin_lock_irqsave(&priv->ptp_lock, flags); + + ns = priv->hw->ptp->get_systime(priv->ioaddr); + + spin_unlock_irqrestore(&priv->ptp_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/** + * stmmac_set_time + * + * @ptp: pointer to ptp_clock_info structure + * @ts: time value to set + * + * Description: this function will set the current time on the + * hardware clock. + */ +static int stmmac_set_time(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + unsigned long flags; + + spin_lock_irqsave(&priv->ptp_lock, flags); + + priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec); + + spin_unlock_irqrestore(&priv->ptp_lock, flags); + + return 0; +} + +static int stmmac_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +/* structure describing a PTP hardware clock */ +static struct ptp_clock_info stmmac_ptp_clock_ops = { + .owner = THIS_MODULE, + .name = "stmmac_ptp_clock", + .max_adj = 62500000, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .n_pins = 0, + .pps = 0, + .adjfreq = stmmac_adjust_freq, + .adjtime = stmmac_adjust_time, + .gettime64 = stmmac_get_time, + .settime64 = stmmac_set_time, + .enable = stmmac_enable, +}; + +/** + * stmmac_ptp_register + * @priv: driver private structure + * Description: this function will register the ptp clock driver + * to kernel. It also does some house keeping work. + */ +int stmmac_ptp_register(struct stmmac_priv *priv) +{ + spin_lock_init(&priv->ptp_lock); + priv->ptp_clock_ops = stmmac_ptp_clock_ops; + + priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops, + priv->device); + if (IS_ERR(priv->ptp_clock)) { + priv->ptp_clock = NULL; + pr_err("ptp_clock_register() failed on %s\n", priv->dev->name); + } else + pr_debug("Added PTP HW clock successfully on %s\n", + priv->dev->name); + + return 0; +} + +/** + * stmmac_ptp_unregister + * @priv: driver private structure + * Description: this function will remove/unregister the ptp clock driver + * from the kernel. + */ +void stmmac_ptp_unregister(struct stmmac_priv *priv) +{ + if (priv->ptp_clock) { + ptp_clock_unregister(priv->ptp_clock); + priv->ptp_clock = NULL; + pr_debug("Removed PTP HW clock successfully on %s\n", + priv->dev->name); + } +} diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h new file mode 100644 index 000000000..4535df37c --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -0,0 +1,72 @@ +/****************************************************************************** + PTP Header file + + Copyright (C) 2013 Vayavya Labs Pvt Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Rayagond Kokatanur +******************************************************************************/ + +#ifndef __STMMAC_PTP_H__ +#define __STMMAC_PTP_H__ + +/* IEEE 1588 PTP register offsets */ +#define PTP_TCR 0x0700 /* Timestamp Control Reg */ +#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */ +#define PTP_STSR 0x0708 /* System Time – Seconds Regr */ +#define PTP_STNSR 0x070C /* System Time – Nanoseconds Reg */ +#define PTP_STSUR 0x0710 /* System Time – Seconds Update Reg */ +#define PTP_STNSUR 0x0714 /* System Time – Nanoseconds Update Reg */ +#define PTP_TAR 0x0718 /* Timestamp Addend Reg */ +#define PTP_TTSR 0x071C /* Target Time Seconds Reg */ +#define PTP_TTNSR 0x0720 /* Target Time Nanoseconds Reg */ +#define PTP_STHWSR 0x0724 /* System Time - Higher Word Seconds Reg */ +#define PTP_TSR 0x0728 /* Timestamp Status */ + +#define PTP_STNSUR_ADDSUB_SHIFT 31 + +/* PTP TCR defines */ +#define PTP_TCR_TSENA 0x00000001 /* Timestamp Enable */ +#define PTP_TCR_TSCFUPDT 0x00000002 /* Timestamp Fine/Coarse Update */ +#define PTP_TCR_TSINIT 0x00000004 /* Timestamp Initialize */ +#define PTP_TCR_TSUPDT 0x00000008 /* Timestamp Update */ +/* Timestamp Interrupt Trigger Enable */ +#define PTP_TCR_TSTRIG 0x00000010 +#define PTP_TCR_TSADDREG 0x00000020 /* Addend Reg Update */ +#define PTP_TCR_TSENALL 0x00000100 /* Enable Timestamp for All Frames */ +/* Timestamp Digital or Binary Rollover Control */ +#define PTP_TCR_TSCTRLSSR 0x00000200 + +/* Enable PTP packet Processing for Version 2 Format */ +#define PTP_TCR_TSVER2ENA 0x00000400 +/* Enable Processing of PTP over Ethernet Frames */ +#define PTP_TCR_TSIPENA 0x00000800 +/* Enable Processing of PTP Frames Sent over IPv6-UDP */ +#define PTP_TCR_TSIPV6ENA 0x00001000 +/* Enable Processing of PTP Frames Sent over IPv4-UDP */ +#define PTP_TCR_TSIPV4ENA 0x00002000 +/* Enable Timestamp Snapshot for Event Messages */ +#define PTP_TCR_TSEVNTENA 0x00004000 +/* Enable Snapshot for Messages Relevant to Master */ +#define PTP_TCR_TSMSTRENA 0x00008000 +/* Select PTP packets for Taking Snapshots */ +#define PTP_TCR_SNAPTYPSEL_1 0x00010000 +/* Enable MAC address for PTP Frame Filtering */ +#define PTP_TCR_TSENMACADDR 0x00040000 + +#endif /* __STMMAC_PTP_H__ */ diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig new file mode 100644 index 000000000..3074aa374 --- /dev/null +++ b/drivers/net/ethernet/sun/Kconfig @@ -0,0 +1,88 @@ +# +# Sun network device configuration +# + +config NET_VENDOR_SUN + bool "Sun devices" + default y + depends on SUN3 || SBUS || PCI || SUN_LDOMS + ---help--- + If you have a network (Ethernet) card belonging to this class, say + Y and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Sun network interfaces. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_SUN + +config HAPPYMEAL + tristate "Sun Happy Meal 10/100baseT support" + depends on (SBUS || PCI) + select CRC32 + ---help--- + This driver supports the "hme" interface present on most Ultra + systems and as an option on older Sbus systems. This driver supports + both PCI and Sbus devices. This driver also supports the "qfe" quad + 100baseT device available in both PCI and Sbus configurations. + + To compile this driver as a module, choose M here: the module + will be called sunhme. + +config SUNBMAC + tristate "Sun BigMAC 10/100baseT support" + depends on SBUS + select CRC32 + ---help--- + This driver supports the "be" interface available as an Sbus option. + This is Sun's older 100baseT Ethernet device. + + To compile this driver as a module, choose M here: the module + will be called sunbmac. + +config SUNQE + tristate "Sun QuadEthernet support" + depends on SBUS + select CRC32 + ---help--- + This driver supports the "qe" 10baseT Ethernet device, available as + an Sbus option. Note that this is not the same as Quad FastEthernet + "qfe" which is supported by the Happy Meal driver instead. + + To compile this driver as a module, choose M here: the module + will be called sunqe. + +config SUNGEM + tristate "Sun GEM support" + depends on PCI + select CRC32 + select SUNGEM_PHY + ---help--- + Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also + . + +config CASSINI + tristate "Sun Cassini support" + depends on PCI + select CRC32 + ---help--- + Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also + . + +config SUNVNET + tristate "Sun Virtual Network support" + depends on SUN_LDOMS + ---help--- + Support for virtual network devices under Sun Logical Domains. + +config NIU + tristate "Sun Neptune 10Gbit Ethernet support" + depends on PCI + select CRC32 + ---help--- + This enables support for cards based upon Sun's + Neptune chipset. + +endif # NET_VENDOR_SUN diff --git a/drivers/net/ethernet/sun/Makefile b/drivers/net/ethernet/sun/Makefile new file mode 100644 index 000000000..1e620ff88 --- /dev/null +++ b/drivers/net/ethernet/sun/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the Sun network device drivers. +# + +obj-$(CONFIG_HAPPYMEAL) += sunhme.o +obj-$(CONFIG_SUNQE) += sunqe.o +obj-$(CONFIG_SUNBMAC) += sunbmac.o +obj-$(CONFIG_SUNGEM) += sungem.o +obj-$(CONFIG_CASSINI) += cassini.o +obj-$(CONFIG_SUNVNET) += sunvnet.o +obj-$(CONFIG_NIU) += niu.o diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c new file mode 100644 index 000000000..d034a70f6 --- /dev/null +++ b/drivers/net/ethernet/sun/cassini.c @@ -0,0 +1,5291 @@ +/* cassini.c: Sun Microsystems Cassini(+) ethernet driver. + * + * Copyright (C) 2004 Sun Microsystems Inc. + * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * This driver uses the sungem driver (c) David Miller + * (davem@redhat.com) as its basis. + * + * The cassini chip has a number of features that distinguish it from + * the gem chip: + * 4 transmit descriptor rings that are used for either QoS (VLAN) or + * load balancing (non-VLAN mode) + * batching of multiple packets + * multiple CPU dispatching + * page-based RX descriptor engine with separate completion rings + * Gigabit support (GMII and PCS interface) + * MIF link up/down detection works + * + * RX is handled by page sized buffers that are attached as fragments to + * the skb. here's what's done: + * -- driver allocates pages at a time and keeps reference counts + * on them. + * -- the upper protocol layers assume that the header is in the skb + * itself. as a result, cassini will copy a small amount (64 bytes) + * to make them happy. + * -- driver appends the rest of the data pages as frags to skbuffs + * and increments the reference count + * -- on page reclamation, the driver swaps the page with a spare page. + * if that page is still in use, it frees its reference to that page, + * and allocates a new page for use. otherwise, it just recycles the + * the page. + * + * NOTE: cassini can parse the header. however, it's not worth it + * as long as the network stack requires a header copy. + * + * TX has 4 queues. currently these queues are used in a round-robin + * fashion for load balancing. They can also be used for QoS. for that + * to work, however, QoS information needs to be exposed down to the driver + * level so that subqueues get targeted to particular transmit rings. + * alternatively, the queues can be configured via use of the all-purpose + * ioctl. + * + * RX DATA: the rx completion ring has all the info, but the rx desc + * ring has all of the data. RX can conceivably come in under multiple + * interrupts, but the INT# assignment needs to be set up properly by + * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do + * that. also, the two descriptor rings are designed to distinguish between + * encrypted and non-encrypted packets, but we use them for buffering + * instead. + * + * by default, the selective clear mask is set up to process rx packets. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#define cas_page_map(x) kmap_atomic((x)) +#define cas_page_unmap(x) kunmap_atomic((x)) +#define CAS_NCPUS num_online_cpus() + +#define cas_skb_release(x) netif_rx(x) + +/* select which firmware to use */ +#define USE_HP_WORKAROUND +#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */ +#define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */ + +#include "cassini.h" + +#define USE_TX_COMPWB /* use completion writeback registers */ +#define USE_CSMA_CD_PROTO /* standard CSMA/CD */ +#define USE_RX_BLANK /* hw interrupt mitigation */ +#undef USE_ENTROPY_DEV /* don't test for entropy device */ + +/* NOTE: these aren't useable unless PCI interrupts can be assigned. + * also, we need to make cp->lock finer-grained. + */ +#undef USE_PCI_INTB +#undef USE_PCI_INTC +#undef USE_PCI_INTD +#undef USE_QOS + +#undef USE_VPD_DEBUG /* debug vpd information if defined */ + +/* rx processing options */ +#define USE_PAGE_ORDER /* specify to allocate large rx pages */ +#define RX_DONT_BATCH 0 /* if 1, don't batch flows */ +#define RX_COPY_ALWAYS 0 /* if 0, use frags */ +#define RX_COPY_MIN 64 /* copy a little to make upper layers happy */ +#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */ + +#define DRV_MODULE_NAME "cassini" +#define DRV_MODULE_VERSION "1.6" +#define DRV_MODULE_RELDATE "21 May 2008" + +#define CAS_DEF_MSG_ENABLE \ + (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + +/* length of time before we decide the hardware is borked, + * and dev->tx_timeout() should be called to fix the problem + */ +#define CAS_TX_TIMEOUT (HZ) +#define CAS_LINK_TIMEOUT (22*HZ/10) +#define CAS_LINK_FAST_TIMEOUT (1) + +/* timeout values for state changing. these specify the number + * of 10us delays to be used before giving up. + */ +#define STOP_TRIES_PHY 1000 +#define STOP_TRIES 5000 + +/* specify a minimum frame size to deal with some fifo issues + * max mtu == 2 * page size - ethernet header - 64 - swivel = + * 2 * page_size - 0x50 + */ +#define CAS_MIN_FRAME 97 +#define CAS_1000MB_MIN_FRAME 255 +#define CAS_MIN_MTU 60 +#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000) + +#if 1 +/* + * Eliminate these and use separate atomic counters for each, to + * avoid a race condition. + */ +#else +#define CAS_RESET_MTU 1 +#define CAS_RESET_ALL 2 +#define CAS_RESET_SPARE 3 +#endif + +static char version[] = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + +static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */ +static int link_mode; + +MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)"); +MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver"); +MODULE_LICENSE("GPL"); +/*(DEBLOBBED)*/ +module_param(cassini_debug, int, 0); +MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value"); +module_param(link_mode, int, 0); +MODULE_PARM_DESC(link_mode, "default link mode"); + +/* + * Work around for a PCS bug in which the link goes down due to the chip + * being confused and never showing a link status of "up." + */ +#define DEFAULT_LINKDOWN_TIMEOUT 5 +/* + * Value in seconds, for user input. + */ +static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT; +module_param(linkdown_timeout, int, 0); +MODULE_PARM_DESC(linkdown_timeout, +"min reset interval in sec. for PCS linkdown issue; disabled if not positive"); + +/* + * value in 'ticks' (units used by jiffies). Set when we init the + * module because 'HZ' in actually a function call on some flavors of + * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ. + */ +static int link_transition_timeout; + + + +static u16 link_modes[] = { + BMCR_ANENABLE, /* 0 : autoneg */ + 0, /* 1 : 10bt half duplex */ + BMCR_SPEED100, /* 2 : 100bt half duplex */ + BMCR_FULLDPLX, /* 3 : 10bt full duplex */ + BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */ + CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */ +}; + +static const struct pci_device_id cas_pci_tbl[] = { + { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, cas_pci_tbl); + +static void cas_set_link_modes(struct cas *cp); + +static inline void cas_lock_tx(struct cas *cp) +{ + int i; + + for (i = 0; i < N_TX_RINGS; i++) + spin_lock_nested(&cp->tx_lock[i], i); +} + +static inline void cas_lock_all(struct cas *cp) +{ + spin_lock_irq(&cp->lock); + cas_lock_tx(cp); +} + +/* WTZ: QA was finding deadlock problems with the previous + * versions after long test runs with multiple cards per machine. + * See if replacing cas_lock_all with safer versions helps. The + * symptoms QA is reporting match those we'd expect if interrupts + * aren't being properly restored, and we fixed a previous deadlock + * with similar symptoms by using save/restore versions in other + * places. + */ +#define cas_lock_all_save(cp, flags) \ +do { \ + struct cas *xxxcp = (cp); \ + spin_lock_irqsave(&xxxcp->lock, flags); \ + cas_lock_tx(xxxcp); \ +} while (0) + +static inline void cas_unlock_tx(struct cas *cp) +{ + int i; + + for (i = N_TX_RINGS; i > 0; i--) + spin_unlock(&cp->tx_lock[i - 1]); +} + +static inline void cas_unlock_all(struct cas *cp) +{ + cas_unlock_tx(cp); + spin_unlock_irq(&cp->lock); +} + +#define cas_unlock_all_restore(cp, flags) \ +do { \ + struct cas *xxxcp = (cp); \ + cas_unlock_tx(xxxcp); \ + spin_unlock_irqrestore(&xxxcp->lock, flags); \ +} while (0) + +static void cas_disable_irq(struct cas *cp, const int ring) +{ + /* Make sure we won't get any more interrupts */ + if (ring == 0) { + writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK); + return; + } + + /* disable completion interrupts and selectively mask */ + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + switch (ring) { +#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) +#ifdef USE_PCI_INTB + case 1: +#endif +#ifdef USE_PCI_INTC + case 2: +#endif +#ifdef USE_PCI_INTD + case 3: +#endif + writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN, + cp->regs + REG_PLUS_INTRN_MASK(ring)); + break; +#endif + default: + writel(INTRN_MASK_CLEAR_ALL, cp->regs + + REG_PLUS_INTRN_MASK(ring)); + break; + } + } +} + +static inline void cas_mask_intr(struct cas *cp) +{ + int i; + + for (i = 0; i < N_RX_COMP_RINGS; i++) + cas_disable_irq(cp, i); +} + +static void cas_enable_irq(struct cas *cp, const int ring) +{ + if (ring == 0) { /* all but TX_DONE */ + writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK); + return; + } + + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + switch (ring) { +#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) +#ifdef USE_PCI_INTB + case 1: +#endif +#ifdef USE_PCI_INTC + case 2: +#endif +#ifdef USE_PCI_INTD + case 3: +#endif + writel(INTRN_MASK_RX_EN, cp->regs + + REG_PLUS_INTRN_MASK(ring)); + break; +#endif + default: + break; + } + } +} + +static inline void cas_unmask_intr(struct cas *cp) +{ + int i; + + for (i = 0; i < N_RX_COMP_RINGS; i++) + cas_enable_irq(cp, i); +} + +static inline void cas_entropy_gather(struct cas *cp) +{ +#ifdef USE_ENTROPY_DEV + if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) + return; + + batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV), + readl(cp->regs + REG_ENTROPY_IV), + sizeof(uint64_t)*8); +#endif +} + +static inline void cas_entropy_reset(struct cas *cp) +{ +#ifdef USE_ENTROPY_DEV + if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) + return; + + writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT, + cp->regs + REG_BIM_LOCAL_DEV_EN); + writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET); + writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG); + + /* if we read back 0x0, we don't have an entropy device */ + if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0) + cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV; +#endif +} + +/* access to the phy. the following assumes that we've initialized the MIF to + * be in frame rather than bit-bang mode + */ +static u16 cas_phy_read(struct cas *cp, int reg) +{ + u32 cmd; + int limit = STOP_TRIES_PHY; + + cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ; + cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); + cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); + cmd |= MIF_FRAME_TURN_AROUND_MSB; + writel(cmd, cp->regs + REG_MIF_FRAME); + + /* poll for completion */ + while (limit-- > 0) { + udelay(10); + cmd = readl(cp->regs + REG_MIF_FRAME); + if (cmd & MIF_FRAME_TURN_AROUND_LSB) + return cmd & MIF_FRAME_DATA_MASK; + } + return 0xFFFF; /* -1 */ +} + +static int cas_phy_write(struct cas *cp, int reg, u16 val) +{ + int limit = STOP_TRIES_PHY; + u32 cmd; + + cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE; + cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); + cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); + cmd |= MIF_FRAME_TURN_AROUND_MSB; + cmd |= val & MIF_FRAME_DATA_MASK; + writel(cmd, cp->regs + REG_MIF_FRAME); + + /* poll for completion */ + while (limit-- > 0) { + udelay(10); + cmd = readl(cp->regs + REG_MIF_FRAME); + if (cmd & MIF_FRAME_TURN_AROUND_LSB) + return 0; + } + return -1; +} + +static void cas_phy_powerup(struct cas *cp) +{ + u16 ctl = cas_phy_read(cp, MII_BMCR); + + if ((ctl & BMCR_PDOWN) == 0) + return; + ctl &= ~BMCR_PDOWN; + cas_phy_write(cp, MII_BMCR, ctl); +} + +static void cas_phy_powerdown(struct cas *cp) +{ + u16 ctl = cas_phy_read(cp, MII_BMCR); + + if (ctl & BMCR_PDOWN) + return; + ctl |= BMCR_PDOWN; + cas_phy_write(cp, MII_BMCR, ctl); +} + +/* cp->lock held. note: the last put_page will free the buffer */ +static int cas_page_free(struct cas *cp, cas_page_t *page) +{ + pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, + PCI_DMA_FROMDEVICE); + __free_pages(page->buffer, cp->page_order); + kfree(page); + return 0; +} + +#ifdef RX_COUNT_BUFFERS +#define RX_USED_ADD(x, y) ((x)->used += (y)) +#define RX_USED_SET(x, y) ((x)->used = (y)) +#else +#define RX_USED_ADD(x, y) +#define RX_USED_SET(x, y) +#endif + +/* local page allocation routines for the receive buffers. jumbo pages + * require at least 8K contiguous and 8K aligned buffers. + */ +static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags) +{ + cas_page_t *page; + + page = kmalloc(sizeof(cas_page_t), flags); + if (!page) + return NULL; + + INIT_LIST_HEAD(&page->list); + RX_USED_SET(page, 0); + page->buffer = alloc_pages(flags, cp->page_order); + if (!page->buffer) + goto page_err; + page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, + cp->page_size, PCI_DMA_FROMDEVICE); + return page; + +page_err: + kfree(page); + return NULL; +} + +/* initialize spare pool of rx buffers, but allocate during the open */ +static void cas_spare_init(struct cas *cp) +{ + spin_lock(&cp->rx_inuse_lock); + INIT_LIST_HEAD(&cp->rx_inuse_list); + spin_unlock(&cp->rx_inuse_lock); + + spin_lock(&cp->rx_spare_lock); + INIT_LIST_HEAD(&cp->rx_spare_list); + cp->rx_spares_needed = RX_SPARE_COUNT; + spin_unlock(&cp->rx_spare_lock); +} + +/* used on close. free all the spare buffers. */ +static void cas_spare_free(struct cas *cp) +{ + struct list_head list, *elem, *tmp; + + /* free spare buffers */ + INIT_LIST_HEAD(&list); + spin_lock(&cp->rx_spare_lock); + list_splice_init(&cp->rx_spare_list, &list); + spin_unlock(&cp->rx_spare_lock); + list_for_each_safe(elem, tmp, &list) { + cas_page_free(cp, list_entry(elem, cas_page_t, list)); + } + + INIT_LIST_HEAD(&list); +#if 1 + /* + * Looks like Adrian had protected this with a different + * lock than used everywhere else to manipulate this list. + */ + spin_lock(&cp->rx_inuse_lock); + list_splice_init(&cp->rx_inuse_list, &list); + spin_unlock(&cp->rx_inuse_lock); +#else + spin_lock(&cp->rx_spare_lock); + list_splice_init(&cp->rx_inuse_list, &list); + spin_unlock(&cp->rx_spare_lock); +#endif + list_for_each_safe(elem, tmp, &list) { + cas_page_free(cp, list_entry(elem, cas_page_t, list)); + } +} + +/* replenish spares if needed */ +static void cas_spare_recover(struct cas *cp, const gfp_t flags) +{ + struct list_head list, *elem, *tmp; + int needed, i; + + /* check inuse list. if we don't need any more free buffers, + * just free it + */ + + /* make a local copy of the list */ + INIT_LIST_HEAD(&list); + spin_lock(&cp->rx_inuse_lock); + list_splice_init(&cp->rx_inuse_list, &list); + spin_unlock(&cp->rx_inuse_lock); + + list_for_each_safe(elem, tmp, &list) { + cas_page_t *page = list_entry(elem, cas_page_t, list); + + /* + * With the lockless pagecache, cassini buffering scheme gets + * slightly less accurate: we might find that a page has an + * elevated reference count here, due to a speculative ref, + * and skip it as in-use. Ideally we would be able to reclaim + * it. However this would be such a rare case, it doesn't + * matter too much as we should pick it up the next time round. + * + * Importantly, if we find that the page has a refcount of 1 + * here (our refcount), then we know it is definitely not inuse + * so we can reuse it. + */ + if (page_count(page->buffer) > 1) + continue; + + list_del(elem); + spin_lock(&cp->rx_spare_lock); + if (cp->rx_spares_needed > 0) { + list_add(elem, &cp->rx_spare_list); + cp->rx_spares_needed--; + spin_unlock(&cp->rx_spare_lock); + } else { + spin_unlock(&cp->rx_spare_lock); + cas_page_free(cp, page); + } + } + + /* put any inuse buffers back on the list */ + if (!list_empty(&list)) { + spin_lock(&cp->rx_inuse_lock); + list_splice(&list, &cp->rx_inuse_list); + spin_unlock(&cp->rx_inuse_lock); + } + + spin_lock(&cp->rx_spare_lock); + needed = cp->rx_spares_needed; + spin_unlock(&cp->rx_spare_lock); + if (!needed) + return; + + /* we still need spares, so try to allocate some */ + INIT_LIST_HEAD(&list); + i = 0; + while (i < needed) { + cas_page_t *spare = cas_page_alloc(cp, flags); + if (!spare) + break; + list_add(&spare->list, &list); + i++; + } + + spin_lock(&cp->rx_spare_lock); + list_splice(&list, &cp->rx_spare_list); + cp->rx_spares_needed -= i; + spin_unlock(&cp->rx_spare_lock); +} + +/* pull a page from the list. */ +static cas_page_t *cas_page_dequeue(struct cas *cp) +{ + struct list_head *entry; + int recover; + + spin_lock(&cp->rx_spare_lock); + if (list_empty(&cp->rx_spare_list)) { + /* try to do a quick recovery */ + spin_unlock(&cp->rx_spare_lock); + cas_spare_recover(cp, GFP_ATOMIC); + spin_lock(&cp->rx_spare_lock); + if (list_empty(&cp->rx_spare_list)) { + netif_err(cp, rx_err, cp->dev, + "no spare buffers available\n"); + spin_unlock(&cp->rx_spare_lock); + return NULL; + } + } + + entry = cp->rx_spare_list.next; + list_del(entry); + recover = ++cp->rx_spares_needed; + spin_unlock(&cp->rx_spare_lock); + + /* trigger the timer to do the recovery */ + if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) { +#if 1 + atomic_inc(&cp->reset_task_pending); + atomic_inc(&cp->reset_task_pending_spare); + schedule_work(&cp->reset_task); +#else + atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE); + schedule_work(&cp->reset_task); +#endif + } + return list_entry(entry, cas_page_t, list); +} + + +static void cas_mif_poll(struct cas *cp, const int enable) +{ + u32 cfg; + + cfg = readl(cp->regs + REG_MIF_CFG); + cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1); + + if (cp->phy_type & CAS_PHY_MII_MDIO1) + cfg |= MIF_CFG_PHY_SELECT; + + /* poll and interrupt on link status change. */ + if (enable) { + cfg |= MIF_CFG_POLL_EN; + cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR); + cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr); + } + writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF, + cp->regs + REG_MIF_MASK); + writel(cfg, cp->regs + REG_MIF_CFG); +} + +/* Must be invoked under cp->lock */ +static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep) +{ + u16 ctl; +#if 1 + int lcntl; + int changed = 0; + int oldstate = cp->lstate; + int link_was_not_down = !(oldstate == link_down); +#endif + /* Setup link parameters */ + if (!ep) + goto start_aneg; + lcntl = cp->link_cntl; + if (ep->autoneg == AUTONEG_ENABLE) + cp->link_cntl = BMCR_ANENABLE; + else { + u32 speed = ethtool_cmd_speed(ep); + cp->link_cntl = 0; + if (speed == SPEED_100) + cp->link_cntl |= BMCR_SPEED100; + else if (speed == SPEED_1000) + cp->link_cntl |= CAS_BMCR_SPEED1000; + if (ep->duplex == DUPLEX_FULL) + cp->link_cntl |= BMCR_FULLDPLX; + } +#if 1 + changed = (lcntl != cp->link_cntl); +#endif +start_aneg: + if (cp->lstate == link_up) { + netdev_info(cp->dev, "PCS link down\n"); + } else { + if (changed) { + netdev_info(cp->dev, "link configuration changed\n"); + } + } + cp->lstate = link_down; + cp->link_transition = LINK_TRANSITION_LINK_DOWN; + if (!cp->hw_running) + return; +#if 1 + /* + * WTZ: If the old state was link_up, we turn off the carrier + * to replicate everything we do elsewhere on a link-down + * event when we were already in a link-up state.. + */ + if (oldstate == link_up) + netif_carrier_off(cp->dev); + if (changed && link_was_not_down) { + /* + * WTZ: This branch will simply schedule a full reset after + * we explicitly changed link modes in an ioctl. See if this + * fixes the link-problems we were having for forced mode. + */ + atomic_inc(&cp->reset_task_pending); + atomic_inc(&cp->reset_task_pending_all); + schedule_work(&cp->reset_task); + cp->timer_ticks = 0; + mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); + return; + } +#endif + if (cp->phy_type & CAS_PHY_SERDES) { + u32 val = readl(cp->regs + REG_PCS_MII_CTRL); + + if (cp->link_cntl & BMCR_ANENABLE) { + val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN); + cp->lstate = link_aneg; + } else { + if (cp->link_cntl & BMCR_FULLDPLX) + val |= PCS_MII_CTRL_DUPLEX; + val &= ~PCS_MII_AUTONEG_EN; + cp->lstate = link_force_ok; + } + cp->link_transition = LINK_TRANSITION_LINK_CONFIG; + writel(val, cp->regs + REG_PCS_MII_CTRL); + + } else { + cas_mif_poll(cp, 0); + ctl = cas_phy_read(cp, MII_BMCR); + ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | + CAS_BMCR_SPEED1000 | BMCR_ANENABLE); + ctl |= cp->link_cntl; + if (ctl & BMCR_ANENABLE) { + ctl |= BMCR_ANRESTART; + cp->lstate = link_aneg; + } else { + cp->lstate = link_force_ok; + } + cp->link_transition = LINK_TRANSITION_LINK_CONFIG; + cas_phy_write(cp, MII_BMCR, ctl); + cas_mif_poll(cp, 1); + } + + cp->timer_ticks = 0; + mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); +} + +/* Must be invoked under cp->lock. */ +static int cas_reset_mii_phy(struct cas *cp) +{ + int limit = STOP_TRIES_PHY; + u16 val; + + cas_phy_write(cp, MII_BMCR, BMCR_RESET); + udelay(100); + while (--limit) { + val = cas_phy_read(cp, MII_BMCR); + if ((val & BMCR_RESET) == 0) + break; + udelay(10); + } + return limit <= 0; +} + +static void cas_saturn_firmware_init(struct cas *cp) +{ + const struct firmware *fw; + const char fw_name[] = "/*(DEBLOBBED)*/"; + int err; + + if (PHY_NS_DP83065 != cp->phy_id) + return; + + err = reject_firmware(&fw, fw_name, &cp->pdev->dev); + if (err) { + pr_err("Failed to load firmware \"%s\"\n", + fw_name); + return; + } + if (fw->size < 2) { + pr_err("bogus length %zu in \"%s\"\n", + fw->size, fw_name); + goto out; + } + cp->fw_load_addr= fw->data[1] << 8 | fw->data[0]; + cp->fw_size = fw->size - 2; + cp->fw_data = vmalloc(cp->fw_size); + if (!cp->fw_data) + goto out; + memcpy(cp->fw_data, &fw->data[2], cp->fw_size); +out: + release_firmware(fw); +} + +static void cas_saturn_firmware_load(struct cas *cp) +{ + int i; + + if (!cp->fw_data) + return; + + cas_phy_powerdown(cp); + + /* expanded memory access mode */ + cas_phy_write(cp, DP83065_MII_MEM, 0x0); + + /* pointer configuration for new firmware */ + cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9); + cas_phy_write(cp, DP83065_MII_REGD, 0xbd); + cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa); + cas_phy_write(cp, DP83065_MII_REGD, 0x82); + cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb); + cas_phy_write(cp, DP83065_MII_REGD, 0x0); + cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc); + cas_phy_write(cp, DP83065_MII_REGD, 0x39); + + /* download new firmware */ + cas_phy_write(cp, DP83065_MII_MEM, 0x1); + cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr); + for (i = 0; i < cp->fw_size; i++) + cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]); + + /* enable firmware */ + cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8); + cas_phy_write(cp, DP83065_MII_REGD, 0x1); +} + + +/* phy initialization */ +static void cas_phy_init(struct cas *cp) +{ + u16 val; + + /* if we're in MII/GMII mode, set up phy */ + if (CAS_PHY_MII(cp->phy_type)) { + writel(PCS_DATAPATH_MODE_MII, + cp->regs + REG_PCS_DATAPATH_MODE); + + cas_mif_poll(cp, 0); + cas_reset_mii_phy(cp); /* take out of isolate mode */ + + if (PHY_LUCENT_B0 == cp->phy_id) { + /* workaround link up/down issue with lucent */ + cas_phy_write(cp, LUCENT_MII_REG, 0x8000); + cas_phy_write(cp, MII_BMCR, 0x00f1); + cas_phy_write(cp, LUCENT_MII_REG, 0x0); + + } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) { + /* workarounds for broadcom phy */ + cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20); + cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012); + cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804); + cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013); + cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204); + cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); + cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132); + cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); + cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232); + cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F); + cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20); + + } else if (PHY_BROADCOM_5411 == cp->phy_id) { + val = cas_phy_read(cp, BROADCOM_MII_REG4); + val = cas_phy_read(cp, BROADCOM_MII_REG4); + if (val & 0x0080) { + /* link workaround */ + cas_phy_write(cp, BROADCOM_MII_REG4, + val & ~0x0080); + } + + } else if (cp->cas_flags & CAS_FLAG_SATURN) { + writel((cp->phy_type & CAS_PHY_MII_MDIO0) ? + SATURN_PCFG_FSI : 0x0, + cp->regs + REG_SATURN_PCFG); + + /* load firmware to address 10Mbps auto-negotiation + * issue. NOTE: this will need to be changed if the + * default firmware gets fixed. + */ + if (PHY_NS_DP83065 == cp->phy_id) { + cas_saturn_firmware_load(cp); + } + cas_phy_powerup(cp); + } + + /* advertise capabilities */ + val = cas_phy_read(cp, MII_BMCR); + val &= ~BMCR_ANENABLE; + cas_phy_write(cp, MII_BMCR, val); + udelay(10); + + cas_phy_write(cp, MII_ADVERTISE, + cas_phy_read(cp, MII_ADVERTISE) | + (ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL | + CAS_ADVERTISE_PAUSE | + CAS_ADVERTISE_ASYM_PAUSE)); + + if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { + /* make sure that we don't advertise half + * duplex to avoid a chip issue + */ + val = cas_phy_read(cp, CAS_MII_1000_CTRL); + val &= ~CAS_ADVERTISE_1000HALF; + val |= CAS_ADVERTISE_1000FULL; + cas_phy_write(cp, CAS_MII_1000_CTRL, val); + } + + } else { + /* reset pcs for serdes */ + u32 val; + int limit; + + writel(PCS_DATAPATH_MODE_SERDES, + cp->regs + REG_PCS_DATAPATH_MODE); + + /* enable serdes pins on saturn */ + if (cp->cas_flags & CAS_FLAG_SATURN) + writel(0, cp->regs + REG_SATURN_PCFG); + + /* Reset PCS unit. */ + val = readl(cp->regs + REG_PCS_MII_CTRL); + val |= PCS_MII_RESET; + writel(val, cp->regs + REG_PCS_MII_CTRL); + + limit = STOP_TRIES; + while (--limit > 0) { + udelay(10); + if ((readl(cp->regs + REG_PCS_MII_CTRL) & + PCS_MII_RESET) == 0) + break; + } + if (limit <= 0) + netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n", + readl(cp->regs + REG_PCS_STATE_MACHINE)); + + /* Make sure PCS is disabled while changing advertisement + * configuration. + */ + writel(0x0, cp->regs + REG_PCS_CFG); + + /* Advertise all capabilities except half-duplex. */ + val = readl(cp->regs + REG_PCS_MII_ADVERT); + val &= ~PCS_MII_ADVERT_HD; + val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE | + PCS_MII_ADVERT_ASYM_PAUSE); + writel(val, cp->regs + REG_PCS_MII_ADVERT); + + /* enable PCS */ + writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG); + + /* pcs workaround: enable sync detect */ + writel(PCS_SERDES_CTRL_SYNCD_EN, + cp->regs + REG_PCS_SERDES_CTRL); + } +} + + +static int cas_pcs_link_check(struct cas *cp) +{ + u32 stat, state_machine; + int retval = 0; + + /* The link status bit latches on zero, so you must + * read it twice in such a case to see a transition + * to the link being up. + */ + stat = readl(cp->regs + REG_PCS_MII_STATUS); + if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0) + stat = readl(cp->regs + REG_PCS_MII_STATUS); + + /* The remote-fault indication is only valid + * when autoneg has completed. + */ + if ((stat & (PCS_MII_STATUS_AUTONEG_COMP | + PCS_MII_STATUS_REMOTE_FAULT)) == + (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) + netif_info(cp, link, cp->dev, "PCS RemoteFault\n"); + + /* work around link detection issue by querying the PCS state + * machine directly. + */ + state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE); + if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) { + stat &= ~PCS_MII_STATUS_LINK_STATUS; + } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) { + stat |= PCS_MII_STATUS_LINK_STATUS; + } + + if (stat & PCS_MII_STATUS_LINK_STATUS) { + if (cp->lstate != link_up) { + if (cp->opened) { + cp->lstate = link_up; + cp->link_transition = LINK_TRANSITION_LINK_UP; + + cas_set_link_modes(cp); + netif_carrier_on(cp->dev); + } + } + } else if (cp->lstate == link_up) { + cp->lstate = link_down; + if (link_transition_timeout != 0 && + cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && + !cp->link_transition_jiffies_valid) { + /* + * force a reset, as a workaround for the + * link-failure problem. May want to move this to a + * point a bit earlier in the sequence. If we had + * generated a reset a short time ago, we'll wait for + * the link timer to check the status until a + * timer expires (link_transistion_jiffies_valid is + * true when the timer is running.) Instead of using + * a system timer, we just do a check whenever the + * link timer is running - this clears the flag after + * a suitable delay. + */ + retval = 1; + cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; + cp->link_transition_jiffies = jiffies; + cp->link_transition_jiffies_valid = 1; + } else { + cp->link_transition = LINK_TRANSITION_ON_FAILURE; + } + netif_carrier_off(cp->dev); + if (cp->opened) + netif_info(cp, link, cp->dev, "PCS link down\n"); + + /* Cassini only: if you force a mode, there can be + * sync problems on link down. to fix that, the following + * things need to be checked: + * 1) read serialink state register + * 2) read pcs status register to verify link down. + * 3) if link down and serial link == 0x03, then you need + * to global reset the chip. + */ + if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) { + /* should check to see if we're in a forced mode */ + stat = readl(cp->regs + REG_PCS_SERDES_STATE); + if (stat == 0x03) + return 1; + } + } else if (cp->lstate == link_down) { + if (link_transition_timeout != 0 && + cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && + !cp->link_transition_jiffies_valid) { + /* force a reset, as a workaround for the + * link-failure problem. May want to move + * this to a point a bit earlier in the + * sequence. + */ + retval = 1; + cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; + cp->link_transition_jiffies = jiffies; + cp->link_transition_jiffies_valid = 1; + } else { + cp->link_transition = LINK_TRANSITION_STILL_FAILED; + } + } + + return retval; +} + +static int cas_pcs_interrupt(struct net_device *dev, + struct cas *cp, u32 status) +{ + u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS); + + if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0) + return 0; + return cas_pcs_link_check(cp); +} + +static int cas_txmac_interrupt(struct net_device *dev, + struct cas *cp, u32 status) +{ + u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS); + + if (!txmac_stat) + return 0; + + netif_printk(cp, intr, KERN_DEBUG, cp->dev, + "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat); + + /* Defer timer expiration is quite normal, + * don't even log the event. + */ + if ((txmac_stat & MAC_TX_DEFER_TIMER) && + !(txmac_stat & ~MAC_TX_DEFER_TIMER)) + return 0; + + spin_lock(&cp->stat_lock[0]); + if (txmac_stat & MAC_TX_UNDERRUN) { + netdev_err(dev, "TX MAC xmit underrun\n"); + cp->net_stats[0].tx_fifo_errors++; + } + + if (txmac_stat & MAC_TX_MAX_PACKET_ERR) { + netdev_err(dev, "TX MAC max packet size error\n"); + cp->net_stats[0].tx_errors++; + } + + /* The rest are all cases of one of the 16-bit TX + * counters expiring. + */ + if (txmac_stat & MAC_TX_COLL_NORMAL) + cp->net_stats[0].collisions += 0x10000; + + if (txmac_stat & MAC_TX_COLL_EXCESS) { + cp->net_stats[0].tx_aborted_errors += 0x10000; + cp->net_stats[0].collisions += 0x10000; + } + + if (txmac_stat & MAC_TX_COLL_LATE) { + cp->net_stats[0].tx_aborted_errors += 0x10000; + cp->net_stats[0].collisions += 0x10000; + } + spin_unlock(&cp->stat_lock[0]); + + /* We do not keep track of MAC_TX_COLL_FIRST and + * MAC_TX_PEAK_ATTEMPTS events. + */ + return 0; +} + +static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware) +{ + cas_hp_inst_t *inst; + u32 val; + int i; + + i = 0; + while ((inst = firmware) && inst->note) { + writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR); + + val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val); + val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask); + writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI); + + val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10); + val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop); + val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext); + val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff); + val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext); + val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff); + val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op); + writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID); + + val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask); + val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift); + val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab); + val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg); + writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW); + ++firmware; + ++i; + } +} + +static void cas_init_rx_dma(struct cas *cp) +{ + u64 desc_dma = cp->block_dvma; + u32 val; + int i, size; + + /* rx free descriptors */ + val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL); + val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0)); + val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0)); + if ((N_RX_DESC_RINGS > 1) && + (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */ + val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1)); + writel(val, cp->regs + REG_RX_CFG); + + val = (unsigned long) cp->init_rxds[0] - + (unsigned long) cp->init_block; + writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI); + writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW); + writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); + + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + /* rx desc 2 is for IPSEC packets. however, + * we don't it that for that purpose. + */ + val = (unsigned long) cp->init_rxds[1] - + (unsigned long) cp->init_block; + writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI); + writel((desc_dma + val) & 0xffffffff, cp->regs + + REG_PLUS_RX_DB1_LOW); + writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs + + REG_PLUS_RX_KICK1); + } + + /* rx completion registers */ + val = (unsigned long) cp->init_rxcs[0] - + (unsigned long) cp->init_block; + writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI); + writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW); + + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + /* rx comp 2-4 */ + for (i = 1; i < MAX_RX_COMP_RINGS; i++) { + val = (unsigned long) cp->init_rxcs[i] - + (unsigned long) cp->init_block; + writel((desc_dma + val) >> 32, cp->regs + + REG_PLUS_RX_CBN_HI(i)); + writel((desc_dma + val) & 0xffffffff, cp->regs + + REG_PLUS_RX_CBN_LOW(i)); + } + } + + /* read selective clear regs to prevent spurious interrupts + * on reset because complete == kick. + * selective clear set up to prevent interrupts on resets + */ + readl(cp->regs + REG_INTR_STATUS_ALIAS); + writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR); + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + for (i = 1; i < N_RX_COMP_RINGS; i++) + readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i)); + + /* 2 is different from 3 and 4 */ + if (N_RX_COMP_RINGS > 1) + writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1, + cp->regs + REG_PLUS_ALIASN_CLEAR(1)); + + for (i = 2; i < N_RX_COMP_RINGS; i++) + writel(INTR_RX_DONE_ALT, + cp->regs + REG_PLUS_ALIASN_CLEAR(i)); + } + + /* set up pause thresholds */ + val = CAS_BASE(RX_PAUSE_THRESH_OFF, + cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM); + val |= CAS_BASE(RX_PAUSE_THRESH_ON, + cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM); + writel(val, cp->regs + REG_RX_PAUSE_THRESH); + + /* zero out dma reassembly buffers */ + for (i = 0; i < 64; i++) { + writel(i, cp->regs + REG_RX_TABLE_ADDR); + writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW); + writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID); + writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI); + } + + /* make sure address register is 0 for normal operation */ + writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR); + writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR); + + /* interrupt mitigation */ +#ifdef USE_RX_BLANK + val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL); + val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL); + writel(val, cp->regs + REG_RX_BLANK); +#else + writel(0x0, cp->regs + REG_RX_BLANK); +#endif + + /* interrupt generation as a function of low water marks for + * free desc and completion entries. these are used to trigger + * housekeeping for rx descs. we don't use the free interrupt + * as it's not very useful + */ + /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */ + val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL); + writel(val, cp->regs + REG_RX_AE_THRESH); + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1)); + writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH); + } + + /* Random early detect registers. useful for congestion avoidance. + * this should be tunable. + */ + writel(0x0, cp->regs + REG_RX_RED); + + /* receive page sizes. default == 2K (0x800) */ + val = 0; + if (cp->page_size == 0x1000) + val = 0x1; + else if (cp->page_size == 0x2000) + val = 0x2; + else if (cp->page_size == 0x4000) + val = 0x3; + + /* round mtu + offset. constrain to page size. */ + size = cp->dev->mtu + 64; + if (size > cp->page_size) + size = cp->page_size; + + if (size <= 0x400) + i = 0x0; + else if (size <= 0x800) + i = 0x1; + else if (size <= 0x1000) + i = 0x2; + else + i = 0x3; + + cp->mtu_stride = 1 << (i + 10); + val = CAS_BASE(RX_PAGE_SIZE, val); + val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i); + val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10)); + val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1); + writel(val, cp->regs + REG_RX_PAGE_SIZE); + + /* enable the header parser if desired */ + if (CAS_HP_FIRMWARE == cas_prog_null) + return; + + val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS); + val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK; + val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL); + writel(val, cp->regs + REG_HP_CFG); +} + +static inline void cas_rxc_init(struct cas_rx_comp *rxc) +{ + memset(rxc, 0, sizeof(*rxc)); + rxc->word4 = cpu_to_le64(RX_COMP4_ZERO); +} + +/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1] + * flipping is protected by the fact that the chip will not + * hand back the same page index while it's being processed. + */ +static inline cas_page_t *cas_page_spare(struct cas *cp, const int index) +{ + cas_page_t *page = cp->rx_pages[1][index]; + cas_page_t *new; + + if (page_count(page->buffer) == 1) + return page; + + new = cas_page_dequeue(cp); + if (new) { + spin_lock(&cp->rx_inuse_lock); + list_add(&page->list, &cp->rx_inuse_list); + spin_unlock(&cp->rx_inuse_lock); + } + return new; +} + +/* this needs to be changed if we actually use the ENC RX DESC ring */ +static cas_page_t *cas_page_swap(struct cas *cp, const int ring, + const int index) +{ + cas_page_t **page0 = cp->rx_pages[0]; + cas_page_t **page1 = cp->rx_pages[1]; + + /* swap if buffer is in use */ + if (page_count(page0[index]->buffer) > 1) { + cas_page_t *new = cas_page_spare(cp, index); + if (new) { + page1[index] = page0[index]; + page0[index] = new; + } + } + RX_USED_SET(page0[index], 0); + return page0[index]; +} + +static void cas_clean_rxds(struct cas *cp) +{ + /* only clean ring 0 as ring 1 is used for spare buffers */ + struct cas_rx_desc *rxd = cp->init_rxds[0]; + int i, size; + + /* release all rx flows */ + for (i = 0; i < N_RX_FLOWS; i++) { + struct sk_buff *skb; + while ((skb = __skb_dequeue(&cp->rx_flows[i]))) { + cas_skb_release(skb); + } + } + + /* initialize descriptors */ + size = RX_DESC_RINGN_SIZE(0); + for (i = 0; i < size; i++) { + cas_page_t *page = cas_page_swap(cp, 0, i); + rxd[i].buffer = cpu_to_le64(page->dma_addr); + rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) | + CAS_BASE(RX_INDEX_RING, 0)); + } + + cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4; + cp->rx_last[0] = 0; + cp->cas_flags &= ~CAS_FLAG_RXD_POST(0); +} + +static void cas_clean_rxcs(struct cas *cp) +{ + int i, j; + + /* take ownership of rx comp descriptors */ + memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS); + memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS); + for (i = 0; i < N_RX_COMP_RINGS; i++) { + struct cas_rx_comp *rxc = cp->init_rxcs[i]; + for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) { + cas_rxc_init(rxc + j); + } + } +} + +#if 0 +/* When we get a RX fifo overflow, the RX unit is probably hung + * so we do the following. + * + * If any part of the reset goes wrong, we return 1 and that causes the + * whole chip to be reset. + */ +static int cas_rxmac_reset(struct cas *cp) +{ + struct net_device *dev = cp->dev; + int limit; + u32 val; + + /* First, reset MAC RX. */ + writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); + for (limit = 0; limit < STOP_TRIES; limit++) { + if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN)) + break; + udelay(10); + } + if (limit == STOP_TRIES) { + netdev_err(dev, "RX MAC will not disable, resetting whole chip\n"); + return 1; + } + + /* Second, disable RX DMA. */ + writel(0, cp->regs + REG_RX_CFG); + for (limit = 0; limit < STOP_TRIES; limit++) { + if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN)) + break; + udelay(10); + } + if (limit == STOP_TRIES) { + netdev_err(dev, "RX DMA will not disable, resetting whole chip\n"); + return 1; + } + + mdelay(5); + + /* Execute RX reset command. */ + writel(SW_RESET_RX, cp->regs + REG_SW_RESET); + for (limit = 0; limit < STOP_TRIES; limit++) { + if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX)) + break; + udelay(10); + } + if (limit == STOP_TRIES) { + netdev_err(dev, "RX reset command will not execute, resetting whole chip\n"); + return 1; + } + + /* reset driver rx state */ + cas_clean_rxds(cp); + cas_clean_rxcs(cp); + + /* Now, reprogram the rest of RX unit. */ + cas_init_rx_dma(cp); + + /* re-enable */ + val = readl(cp->regs + REG_RX_CFG); + writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG); + writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); + val = readl(cp->regs + REG_MAC_RX_CFG); + writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); + return 0; +} +#endif + +static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp, + u32 status) +{ + u32 stat = readl(cp->regs + REG_MAC_RX_STATUS); + + if (!stat) + return 0; + + netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat); + + /* these are all rollovers */ + spin_lock(&cp->stat_lock[0]); + if (stat & MAC_RX_ALIGN_ERR) + cp->net_stats[0].rx_frame_errors += 0x10000; + + if (stat & MAC_RX_CRC_ERR) + cp->net_stats[0].rx_crc_errors += 0x10000; + + if (stat & MAC_RX_LEN_ERR) + cp->net_stats[0].rx_length_errors += 0x10000; + + if (stat & MAC_RX_OVERFLOW) { + cp->net_stats[0].rx_over_errors++; + cp->net_stats[0].rx_fifo_errors++; + } + + /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR + * events. + */ + spin_unlock(&cp->stat_lock[0]); + return 0; +} + +static int cas_mac_interrupt(struct net_device *dev, struct cas *cp, + u32 status) +{ + u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS); + + if (!stat) + return 0; + + netif_printk(cp, intr, KERN_DEBUG, cp->dev, + "mac interrupt, stat: 0x%x\n", stat); + + /* This interrupt is just for pause frame and pause + * tracking. It is useful for diagnostics and debug + * but probably by default we will mask these events. + */ + if (stat & MAC_CTRL_PAUSE_STATE) + cp->pause_entered++; + + if (stat & MAC_CTRL_PAUSE_RECEIVED) + cp->pause_last_time_recvd = (stat >> 16); + + return 0; +} + + +/* Must be invoked under cp->lock. */ +static inline int cas_mdio_link_not_up(struct cas *cp) +{ + u16 val; + + switch (cp->lstate) { + case link_force_ret: + netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n"); + cas_phy_write(cp, MII_BMCR, cp->link_fcntl); + cp->timer_ticks = 5; + cp->lstate = link_force_ok; + cp->link_transition = LINK_TRANSITION_LINK_CONFIG; + break; + + case link_aneg: + val = cas_phy_read(cp, MII_BMCR); + + /* Try forced modes. we try things in the following order: + * 1000 full -> 100 full/half -> 10 half + */ + val &= ~(BMCR_ANRESTART | BMCR_ANENABLE); + val |= BMCR_FULLDPLX; + val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? + CAS_BMCR_SPEED1000 : BMCR_SPEED100; + cas_phy_write(cp, MII_BMCR, val); + cp->timer_ticks = 5; + cp->lstate = link_force_try; + cp->link_transition = LINK_TRANSITION_LINK_CONFIG; + break; + + case link_force_try: + /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */ + val = cas_phy_read(cp, MII_BMCR); + cp->timer_ticks = 5; + if (val & CAS_BMCR_SPEED1000) { /* gigabit */ + val &= ~CAS_BMCR_SPEED1000; + val |= (BMCR_SPEED100 | BMCR_FULLDPLX); + cas_phy_write(cp, MII_BMCR, val); + break; + } + + if (val & BMCR_SPEED100) { + if (val & BMCR_FULLDPLX) /* fd failed */ + val &= ~BMCR_FULLDPLX; + else { /* 100Mbps failed */ + val &= ~BMCR_SPEED100; + } + cas_phy_write(cp, MII_BMCR, val); + break; + } + default: + break; + } + return 0; +} + + +/* must be invoked with cp->lock held */ +static int cas_mii_link_check(struct cas *cp, const u16 bmsr) +{ + int restart; + + if (bmsr & BMSR_LSTATUS) { + /* Ok, here we got a link. If we had it due to a forced + * fallback, and we were configured for autoneg, we + * retry a short autoneg pass. If you know your hub is + * broken, use ethtool ;) + */ + if ((cp->lstate == link_force_try) && + (cp->link_cntl & BMCR_ANENABLE)) { + cp->lstate = link_force_ret; + cp->link_transition = LINK_TRANSITION_LINK_CONFIG; + cas_mif_poll(cp, 0); + cp->link_fcntl = cas_phy_read(cp, MII_BMCR); + cp->timer_ticks = 5; + if (cp->opened) + netif_info(cp, link, cp->dev, + "Got link after fallback, retrying autoneg once...\n"); + cas_phy_write(cp, MII_BMCR, + cp->link_fcntl | BMCR_ANENABLE | + BMCR_ANRESTART); + cas_mif_poll(cp, 1); + + } else if (cp->lstate != link_up) { + cp->lstate = link_up; + cp->link_transition = LINK_TRANSITION_LINK_UP; + + if (cp->opened) { + cas_set_link_modes(cp); + netif_carrier_on(cp->dev); + } + } + return 0; + } + + /* link not up. if the link was previously up, we restart the + * whole process + */ + restart = 0; + if (cp->lstate == link_up) { + cp->lstate = link_down; + cp->link_transition = LINK_TRANSITION_LINK_DOWN; + + netif_carrier_off(cp->dev); + if (cp->opened) + netif_info(cp, link, cp->dev, "Link down\n"); + restart = 1; + + } else if (++cp->timer_ticks > 10) + cas_mdio_link_not_up(cp); + + return restart; +} + +static int cas_mif_interrupt(struct net_device *dev, struct cas *cp, + u32 status) +{ + u32 stat = readl(cp->regs + REG_MIF_STATUS); + u16 bmsr; + + /* check for a link change */ + if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0) + return 0; + + bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat); + return cas_mii_link_check(cp, bmsr); +} + +static int cas_pci_interrupt(struct net_device *dev, struct cas *cp, + u32 status) +{ + u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS); + + if (!stat) + return 0; + + netdev_err(dev, "PCI error [%04x:%04x]", + stat, readl(cp->regs + REG_BIM_DIAG)); + + /* cassini+ has this reserved */ + if ((stat & PCI_ERR_BADACK) && + ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0)) + pr_cont(" "); + + if (stat & PCI_ERR_DTRTO) + pr_cont(" "); + if (stat & PCI_ERR_OTHER) + pr_cont(" "); + if (stat & PCI_ERR_BIM_DMA_WRITE) + pr_cont(" "); + if (stat & PCI_ERR_BIM_DMA_READ) + pr_cont(" "); + pr_cont("\n"); + + if (stat & PCI_ERR_OTHER) { + u16 cfg; + + /* Interrogate PCI config space for the + * true cause. + */ + pci_read_config_word(cp->pdev, PCI_STATUS, &cfg); + netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg); + if (cfg & PCI_STATUS_PARITY) + netdev_err(dev, "PCI parity error detected\n"); + if (cfg & PCI_STATUS_SIG_TARGET_ABORT) + netdev_err(dev, "PCI target abort\n"); + if (cfg & PCI_STATUS_REC_TARGET_ABORT) + netdev_err(dev, "PCI master acks target abort\n"); + if (cfg & PCI_STATUS_REC_MASTER_ABORT) + netdev_err(dev, "PCI master abort\n"); + if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR) + netdev_err(dev, "PCI system error SERR#\n"); + if (cfg & PCI_STATUS_DETECTED_PARITY) + netdev_err(dev, "PCI parity error\n"); + + /* Write the error bits back to clear them. */ + cfg &= (PCI_STATUS_PARITY | + PCI_STATUS_SIG_TARGET_ABORT | + PCI_STATUS_REC_TARGET_ABORT | + PCI_STATUS_REC_MASTER_ABORT | + PCI_STATUS_SIG_SYSTEM_ERROR | + PCI_STATUS_DETECTED_PARITY); + pci_write_config_word(cp->pdev, PCI_STATUS, cfg); + } + + /* For all PCI errors, we should reset the chip. */ + return 1; +} + +/* All non-normal interrupt conditions get serviced here. + * Returns non-zero if we should just exit the interrupt + * handler right now (ie. if we reset the card which invalidates + * all of the other original irq status bits). + */ +static int cas_abnormal_irq(struct net_device *dev, struct cas *cp, + u32 status) +{ + if (status & INTR_RX_TAG_ERROR) { + /* corrupt RX tag framing */ + netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, + "corrupt rx tag framing\n"); + spin_lock(&cp->stat_lock[0]); + cp->net_stats[0].rx_errors++; + spin_unlock(&cp->stat_lock[0]); + goto do_reset; + } + + if (status & INTR_RX_LEN_MISMATCH) { + /* length mismatch. */ + netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, + "length mismatch for rx frame\n"); + spin_lock(&cp->stat_lock[0]); + cp->net_stats[0].rx_errors++; + spin_unlock(&cp->stat_lock[0]); + goto do_reset; + } + + if (status & INTR_PCS_STATUS) { + if (cas_pcs_interrupt(dev, cp, status)) + goto do_reset; + } + + if (status & INTR_TX_MAC_STATUS) { + if (cas_txmac_interrupt(dev, cp, status)) + goto do_reset; + } + + if (status & INTR_RX_MAC_STATUS) { + if (cas_rxmac_interrupt(dev, cp, status)) + goto do_reset; + } + + if (status & INTR_MAC_CTRL_STATUS) { + if (cas_mac_interrupt(dev, cp, status)) + goto do_reset; + } + + if (status & INTR_MIF_STATUS) { + if (cas_mif_interrupt(dev, cp, status)) + goto do_reset; + } + + if (status & INTR_PCI_ERROR_STATUS) { + if (cas_pci_interrupt(dev, cp, status)) + goto do_reset; + } + return 0; + +do_reset: +#if 1 + atomic_inc(&cp->reset_task_pending); + atomic_inc(&cp->reset_task_pending_all); + netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status); + schedule_work(&cp->reset_task); +#else + atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); + netdev_err(dev, "reset called in cas_abnormal_irq\n"); + schedule_work(&cp->reset_task); +#endif + return 1; +} + +/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when + * determining whether to do a netif_stop/wakeup + */ +#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1) +#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK) +static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr, + const int len) +{ + unsigned long off = addr + len; + + if (CAS_TABORT(cp) == 1) + return 0; + if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN) + return 0; + return TX_TARGET_ABORT_LEN; +} + +static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) +{ + struct cas_tx_desc *txds; + struct sk_buff **skbs; + struct net_device *dev = cp->dev; + int entry, count; + + spin_lock(&cp->tx_lock[ring]); + txds = cp->init_txds[ring]; + skbs = cp->tx_skbs[ring]; + entry = cp->tx_old[ring]; + + count = TX_BUFF_COUNT(ring, entry, limit); + while (entry != limit) { + struct sk_buff *skb = skbs[entry]; + dma_addr_t daddr; + u32 dlen; + int frag; + + if (!skb) { + /* this should never occur */ + entry = TX_DESC_NEXT(ring, entry); + continue; + } + + /* however, we might get only a partial skb release. */ + count -= skb_shinfo(skb)->nr_frags + + + cp->tx_tiny_use[ring][entry].nbufs + 1; + if (count < 0) + break; + + netif_printk(cp, tx_done, KERN_DEBUG, cp->dev, + "tx[%d] done, slot %d\n", ring, entry); + + skbs[entry] = NULL; + cp->tx_tiny_use[ring][entry].nbufs = 0; + + for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { + struct cas_tx_desc *txd = txds + entry; + + daddr = le64_to_cpu(txd->buffer); + dlen = CAS_VAL(TX_DESC_BUFLEN, + le64_to_cpu(txd->control)); + pci_unmap_page(cp->pdev, daddr, dlen, + PCI_DMA_TODEVICE); + entry = TX_DESC_NEXT(ring, entry); + + /* tiny buffer may follow */ + if (cp->tx_tiny_use[ring][entry].used) { + cp->tx_tiny_use[ring][entry].used = 0; + entry = TX_DESC_NEXT(ring, entry); + } + } + + spin_lock(&cp->stat_lock[ring]); + cp->net_stats[ring].tx_packets++; + cp->net_stats[ring].tx_bytes += skb->len; + spin_unlock(&cp->stat_lock[ring]); + dev_kfree_skb_irq(skb); + } + cp->tx_old[ring] = entry; + + /* this is wrong for multiple tx rings. the net device needs + * multiple queues for this to do the right thing. we wait + * for 2*packets to be available when using tiny buffers + */ + if (netif_queue_stopped(dev) && + (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))) + netif_wake_queue(dev); + spin_unlock(&cp->tx_lock[ring]); +} + +static void cas_tx(struct net_device *dev, struct cas *cp, + u32 status) +{ + int limit, ring; +#ifdef USE_TX_COMPWB + u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); +#endif + netif_printk(cp, intr, KERN_DEBUG, cp->dev, + "tx interrupt, status: 0x%x, %llx\n", + status, (unsigned long long)compwb); + /* process all the rings */ + for (ring = 0; ring < N_TX_RINGS; ring++) { +#ifdef USE_TX_COMPWB + /* use the completion writeback registers */ + limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) | + CAS_VAL(TX_COMPWB_LSB, compwb); + compwb = TX_COMPWB_NEXT(compwb); +#else + limit = readl(cp->regs + REG_TX_COMPN(ring)); +#endif + if (cp->tx_old[ring] != limit) + cas_tx_ringN(cp, ring, limit); + } +} + + +static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, + int entry, const u64 *words, + struct sk_buff **skbref) +{ + int dlen, hlen, len, i, alloclen; + int off, swivel = RX_SWIVEL_OFF_VAL; + struct cas_page *page; + struct sk_buff *skb; + void *addr, *crcaddr; + __sum16 csum; + char *p; + + hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]); + dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]); + len = hlen + dlen; + + if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT)) + alloclen = len; + else + alloclen = max(hlen, RX_COPY_MIN); + + skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size); + if (skb == NULL) + return -1; + + *skbref = skb; + skb_reserve(skb, swivel); + + p = skb->data; + addr = crcaddr = NULL; + if (hlen) { /* always copy header pages */ + i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); + page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; + off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 + + swivel; + + i = hlen; + if (!dlen) /* attach FCS */ + i += cp->crc_size; + pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, + PCI_DMA_FROMDEVICE); + addr = cas_page_map(page->buffer); + memcpy(p, addr + off, i); + pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, + PCI_DMA_FROMDEVICE); + cas_page_unmap(addr); + RX_USED_ADD(page, 0x100); + p += hlen; + swivel = 0; + } + + + if (alloclen < (hlen + dlen)) { + skb_frag_t *frag = skb_shinfo(skb)->frags; + + /* normal or jumbo packets. we use frags */ + i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); + page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; + off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; + + hlen = min(cp->page_size - off, dlen); + if (hlen < 0) { + netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, + "rx page overflow: %d\n", hlen); + dev_kfree_skb_irq(skb); + return -1; + } + i = hlen; + if (i == dlen) /* attach FCS */ + i += cp->crc_size; + pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, + PCI_DMA_FROMDEVICE); + + /* make sure we always copy a header */ + swivel = 0; + if (p == (char *) skb->data) { /* not split */ + addr = cas_page_map(page->buffer); + memcpy(p, addr + off, RX_COPY_MIN); + pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, + PCI_DMA_FROMDEVICE); + cas_page_unmap(addr); + off += RX_COPY_MIN; + swivel = RX_COPY_MIN; + RX_USED_ADD(page, cp->mtu_stride); + } else { + RX_USED_ADD(page, hlen); + } + skb_put(skb, alloclen); + + skb_shinfo(skb)->nr_frags++; + skb->data_len += hlen - swivel; + skb->truesize += hlen - swivel; + skb->len += hlen - swivel; + + __skb_frag_set_page(frag, page->buffer); + __skb_frag_ref(frag); + frag->page_offset = off; + skb_frag_size_set(frag, hlen - swivel); + + /* any more data? */ + if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { + hlen = dlen; + off = 0; + + i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); + page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; + pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, + hlen + cp->crc_size, + PCI_DMA_FROMDEVICE); + pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, + hlen + cp->crc_size, + PCI_DMA_FROMDEVICE); + + skb_shinfo(skb)->nr_frags++; + skb->data_len += hlen; + skb->len += hlen; + frag++; + + __skb_frag_set_page(frag, page->buffer); + __skb_frag_ref(frag); + frag->page_offset = 0; + skb_frag_size_set(frag, hlen); + RX_USED_ADD(page, hlen + cp->crc_size); + } + + if (cp->crc_size) { + addr = cas_page_map(page->buffer); + crcaddr = addr + off + hlen; + } + + } else { + /* copying packet */ + if (!dlen) + goto end_copy_pkt; + + i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); + page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; + off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; + hlen = min(cp->page_size - off, dlen); + if (hlen < 0) { + netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, + "rx page overflow: %d\n", hlen); + dev_kfree_skb_irq(skb); + return -1; + } + i = hlen; + if (i == dlen) /* attach FCS */ + i += cp->crc_size; + pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, + PCI_DMA_FROMDEVICE); + addr = cas_page_map(page->buffer); + memcpy(p, addr + off, i); + pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, + PCI_DMA_FROMDEVICE); + cas_page_unmap(addr); + if (p == (char *) skb->data) /* not split */ + RX_USED_ADD(page, cp->mtu_stride); + else + RX_USED_ADD(page, i); + + /* any more data? */ + if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { + p += hlen; + i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); + page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; + pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, + dlen + cp->crc_size, + PCI_DMA_FROMDEVICE); + addr = cas_page_map(page->buffer); + memcpy(p, addr, dlen + cp->crc_size); + pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, + dlen + cp->crc_size, + PCI_DMA_FROMDEVICE); + cas_page_unmap(addr); + RX_USED_ADD(page, dlen + cp->crc_size); + } +end_copy_pkt: + if (cp->crc_size) { + addr = NULL; + crcaddr = skb->data + alloclen; + } + skb_put(skb, alloclen); + } + + csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3])); + if (cp->crc_size) { + /* checksum includes FCS. strip it out. */ + csum = csum_fold(csum_partial(crcaddr, cp->crc_size, + csum_unfold(csum))); + if (addr) + cas_page_unmap(addr); + } + skb->protocol = eth_type_trans(skb, cp->dev); + if (skb->protocol == htons(ETH_P_IP)) { + skb->csum = csum_unfold(~csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } else + skb_checksum_none_assert(skb); + return len; +} + + +/* we can handle up to 64 rx flows at a time. we do the same thing + * as nonreassm except that we batch up the buffers. + * NOTE: we currently just treat each flow as a bunch of packets that + * we pass up. a better way would be to coalesce the packets + * into a jumbo packet. to do that, we need to do the following: + * 1) the first packet will have a clean split between header and + * data. save both. + * 2) each time the next flow packet comes in, extend the + * data length and merge the checksums. + * 3) on flow release, fix up the header. + * 4) make sure the higher layer doesn't care. + * because packets get coalesced, we shouldn't run into fragment count + * issues. + */ +static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words, + struct sk_buff *skb) +{ + int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1); + struct sk_buff_head *flow = &cp->rx_flows[flowid]; + + /* this is protected at a higher layer, so no need to + * do any additional locking here. stick the buffer + * at the end. + */ + __skb_queue_tail(flow, skb); + if (words[0] & RX_COMP1_RELEASE_FLOW) { + while ((skb = __skb_dequeue(flow))) { + cas_skb_release(skb); + } + } +} + +/* put rx descriptor back on ring. if a buffer is in use by a higher + * layer, this will need to put in a replacement. + */ +static void cas_post_page(struct cas *cp, const int ring, const int index) +{ + cas_page_t *new; + int entry; + + entry = cp->rx_old[ring]; + + new = cas_page_swap(cp, ring, index); + cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr); + cp->init_rxds[ring][entry].index = + cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) | + CAS_BASE(RX_INDEX_RING, ring)); + + entry = RX_DESC_ENTRY(ring, entry + 1); + cp->rx_old[ring] = entry; + + if (entry % 4) + return; + + if (ring == 0) + writel(entry, cp->regs + REG_RX_KICK); + else if ((N_RX_DESC_RINGS > 1) && + (cp->cas_flags & CAS_FLAG_REG_PLUS)) + writel(entry, cp->regs + REG_PLUS_RX_KICK1); +} + + +/* only when things are bad */ +static int cas_post_rxds_ringN(struct cas *cp, int ring, int num) +{ + unsigned int entry, last, count, released; + int cluster; + cas_page_t **page = cp->rx_pages[ring]; + + entry = cp->rx_old[ring]; + + netif_printk(cp, intr, KERN_DEBUG, cp->dev, + "rxd[%d] interrupt, done: %d\n", ring, entry); + + cluster = -1; + count = entry & 0x3; + last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4); + released = 0; + while (entry != last) { + /* make a new buffer if it's still in use */ + if (page_count(page[entry]->buffer) > 1) { + cas_page_t *new = cas_page_dequeue(cp); + if (!new) { + /* let the timer know that we need to + * do this again + */ + cp->cas_flags |= CAS_FLAG_RXD_POST(ring); + if (!timer_pending(&cp->link_timer)) + mod_timer(&cp->link_timer, jiffies + + CAS_LINK_FAST_TIMEOUT); + cp->rx_old[ring] = entry; + cp->rx_last[ring] = num ? num - released : 0; + return -ENOMEM; + } + spin_lock(&cp->rx_inuse_lock); + list_add(&page[entry]->list, &cp->rx_inuse_list); + spin_unlock(&cp->rx_inuse_lock); + cp->init_rxds[ring][entry].buffer = + cpu_to_le64(new->dma_addr); + page[entry] = new; + + } + + if (++count == 4) { + cluster = entry; + count = 0; + } + released++; + entry = RX_DESC_ENTRY(ring, entry + 1); + } + cp->rx_old[ring] = entry; + + if (cluster < 0) + return 0; + + if (ring == 0) + writel(cluster, cp->regs + REG_RX_KICK); + else if ((N_RX_DESC_RINGS > 1) && + (cp->cas_flags & CAS_FLAG_REG_PLUS)) + writel(cluster, cp->regs + REG_PLUS_RX_KICK1); + return 0; +} + + +/* process a completion ring. packets are set up in three basic ways: + * small packets: should be copied header + data in single buffer. + * large packets: header and data in a single buffer. + * split packets: header in a separate buffer from data. + * data may be in multiple pages. data may be > 256 + * bytes but in a single page. + * + * NOTE: RX page posting is done in this routine as well. while there's + * the capability of using multiple RX completion rings, it isn't + * really worthwhile due to the fact that the page posting will + * force serialization on the single descriptor ring. + */ +static int cas_rx_ringN(struct cas *cp, int ring, int budget) +{ + struct cas_rx_comp *rxcs = cp->init_rxcs[ring]; + int entry, drops; + int npackets = 0; + + netif_printk(cp, intr, KERN_DEBUG, cp->dev, + "rx[%d] interrupt, done: %d/%d\n", + ring, + readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]); + + entry = cp->rx_new[ring]; + drops = 0; + while (1) { + struct cas_rx_comp *rxc = rxcs + entry; + struct sk_buff *uninitialized_var(skb); + int type, len; + u64 words[4]; + int i, dring; + + words[0] = le64_to_cpu(rxc->word1); + words[1] = le64_to_cpu(rxc->word2); + words[2] = le64_to_cpu(rxc->word3); + words[3] = le64_to_cpu(rxc->word4); + + /* don't touch if still owned by hw */ + type = CAS_VAL(RX_COMP1_TYPE, words[0]); + if (type == 0) + break; + + /* hw hasn't cleared the zero bit yet */ + if (words[3] & RX_COMP4_ZERO) { + break; + } + + /* get info on the packet */ + if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) { + spin_lock(&cp->stat_lock[ring]); + cp->net_stats[ring].rx_errors++; + if (words[3] & RX_COMP4_LEN_MISMATCH) + cp->net_stats[ring].rx_length_errors++; + if (words[3] & RX_COMP4_BAD) + cp->net_stats[ring].rx_crc_errors++; + spin_unlock(&cp->stat_lock[ring]); + + /* We'll just return it to Cassini. */ + drop_it: + spin_lock(&cp->stat_lock[ring]); + ++cp->net_stats[ring].rx_dropped; + spin_unlock(&cp->stat_lock[ring]); + goto next; + } + + len = cas_rx_process_pkt(cp, rxc, entry, words, &skb); + if (len < 0) { + ++drops; + goto drop_it; + } + + /* see if it's a flow re-assembly or not. the driver + * itself handles release back up. + */ + if (RX_DONT_BATCH || (type == 0x2)) { + /* non-reassm: these always get released */ + cas_skb_release(skb); + } else { + cas_rx_flow_pkt(cp, words, skb); + } + + spin_lock(&cp->stat_lock[ring]); + cp->net_stats[ring].rx_packets++; + cp->net_stats[ring].rx_bytes += len; + spin_unlock(&cp->stat_lock[ring]); + + next: + npackets++; + + /* should it be released? */ + if (words[0] & RX_COMP1_RELEASE_HDR) { + i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); + dring = CAS_VAL(RX_INDEX_RING, i); + i = CAS_VAL(RX_INDEX_NUM, i); + cas_post_page(cp, dring, i); + } + + if (words[0] & RX_COMP1_RELEASE_DATA) { + i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); + dring = CAS_VAL(RX_INDEX_RING, i); + i = CAS_VAL(RX_INDEX_NUM, i); + cas_post_page(cp, dring, i); + } + + if (words[0] & RX_COMP1_RELEASE_NEXT) { + i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); + dring = CAS_VAL(RX_INDEX_RING, i); + i = CAS_VAL(RX_INDEX_NUM, i); + cas_post_page(cp, dring, i); + } + + /* skip to the next entry */ + entry = RX_COMP_ENTRY(ring, entry + 1 + + CAS_VAL(RX_COMP1_SKIP, words[0])); +#ifdef USE_NAPI + if (budget && (npackets >= budget)) + break; +#endif + } + cp->rx_new[ring] = entry; + + if (drops) + netdev_info(cp->dev, "Memory squeeze, deferring packet\n"); + return npackets; +} + + +/* put completion entries back on the ring */ +static void cas_post_rxcs_ringN(struct net_device *dev, + struct cas *cp, int ring) +{ + struct cas_rx_comp *rxc = cp->init_rxcs[ring]; + int last, entry; + + last = cp->rx_cur[ring]; + entry = cp->rx_new[ring]; + netif_printk(cp, intr, KERN_DEBUG, dev, + "rxc[%d] interrupt, done: %d/%d\n", + ring, readl(cp->regs + REG_RX_COMP_HEAD), entry); + + /* zero and re-mark descriptors */ + while (last != entry) { + cas_rxc_init(rxc + last); + last = RX_COMP_ENTRY(ring, last + 1); + } + cp->rx_cur[ring] = last; + + if (ring == 0) + writel(last, cp->regs + REG_RX_COMP_TAIL); + else if (cp->cas_flags & CAS_FLAG_REG_PLUS) + writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring)); +} + + + +/* cassini can use all four PCI interrupts for the completion ring. + * rings 3 and 4 are identical + */ +#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD) +static inline void cas_handle_irqN(struct net_device *dev, + struct cas *cp, const u32 status, + const int ring) +{ + if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT)) + cas_post_rxcs_ringN(dev, cp, ring); +} + +static irqreturn_t cas_interruptN(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct cas *cp = netdev_priv(dev); + unsigned long flags; + int ring = (irq == cp->pci_irq_INTC) ? 2 : 3; + u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); + + /* check for shared irq */ + if (status == 0) + return IRQ_NONE; + + spin_lock_irqsave(&cp->lock, flags); + if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ +#ifdef USE_NAPI + cas_mask_intr(cp); + napi_schedule(&cp->napi); +#else + cas_rx_ringN(cp, ring, 0); +#endif + status &= ~INTR_RX_DONE_ALT; + } + + if (status) + cas_handle_irqN(dev, cp, status, ring); + spin_unlock_irqrestore(&cp->lock, flags); + return IRQ_HANDLED; +} +#endif + +#ifdef USE_PCI_INTB +/* everything but rx packets */ +static inline void cas_handle_irq1(struct cas *cp, const u32 status) +{ + if (status & INTR_RX_BUF_UNAVAIL_1) { + /* Frame arrived, no free RX buffers available. + * NOTE: we can get this on a link transition. */ + cas_post_rxds_ringN(cp, 1, 0); + spin_lock(&cp->stat_lock[1]); + cp->net_stats[1].rx_dropped++; + spin_unlock(&cp->stat_lock[1]); + } + + if (status & INTR_RX_BUF_AE_1) + cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) - + RX_AE_FREEN_VAL(1)); + + if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) + cas_post_rxcs_ringN(cp, 1); +} + +/* ring 2 handles a few more events than 3 and 4 */ +static irqreturn_t cas_interrupt1(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct cas *cp = netdev_priv(dev); + unsigned long flags; + u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); + + /* check for shared interrupt */ + if (status == 0) + return IRQ_NONE; + + spin_lock_irqsave(&cp->lock, flags); + if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ +#ifdef USE_NAPI + cas_mask_intr(cp); + napi_schedule(&cp->napi); +#else + cas_rx_ringN(cp, 1, 0); +#endif + status &= ~INTR_RX_DONE_ALT; + } + if (status) + cas_handle_irq1(cp, status); + spin_unlock_irqrestore(&cp->lock, flags); + return IRQ_HANDLED; +} +#endif + +static inline void cas_handle_irq(struct net_device *dev, + struct cas *cp, const u32 status) +{ + /* housekeeping interrupts */ + if (status & INTR_ERROR_MASK) + cas_abnormal_irq(dev, cp, status); + + if (status & INTR_RX_BUF_UNAVAIL) { + /* Frame arrived, no free RX buffers available. + * NOTE: we can get this on a link transition. + */ + cas_post_rxds_ringN(cp, 0, 0); + spin_lock(&cp->stat_lock[0]); + cp->net_stats[0].rx_dropped++; + spin_unlock(&cp->stat_lock[0]); + } else if (status & INTR_RX_BUF_AE) { + cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) - + RX_AE_FREEN_VAL(0)); + } + + if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) + cas_post_rxcs_ringN(dev, cp, 0); +} + +static irqreturn_t cas_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct cas *cp = netdev_priv(dev); + unsigned long flags; + u32 status = readl(cp->regs + REG_INTR_STATUS); + + if (status == 0) + return IRQ_NONE; + + spin_lock_irqsave(&cp->lock, flags); + if (status & (INTR_TX_ALL | INTR_TX_INTME)) { + cas_tx(dev, cp, status); + status &= ~(INTR_TX_ALL | INTR_TX_INTME); + } + + if (status & INTR_RX_DONE) { +#ifdef USE_NAPI + cas_mask_intr(cp); + napi_schedule(&cp->napi); +#else + cas_rx_ringN(cp, 0, 0); +#endif + status &= ~INTR_RX_DONE; + } + + if (status) + cas_handle_irq(dev, cp, status); + spin_unlock_irqrestore(&cp->lock, flags); + return IRQ_HANDLED; +} + + +#ifdef USE_NAPI +static int cas_poll(struct napi_struct *napi, int budget) +{ + struct cas *cp = container_of(napi, struct cas, napi); + struct net_device *dev = cp->dev; + int i, enable_intr, credits; + u32 status = readl(cp->regs + REG_INTR_STATUS); + unsigned long flags; + + spin_lock_irqsave(&cp->lock, flags); + cas_tx(dev, cp, status); + spin_unlock_irqrestore(&cp->lock, flags); + + /* NAPI rx packets. we spread the credits across all of the + * rxc rings + * + * to make sure we're fair with the work we loop through each + * ring N_RX_COMP_RING times with a request of + * budget / N_RX_COMP_RINGS + */ + enable_intr = 1; + credits = 0; + for (i = 0; i < N_RX_COMP_RINGS; i++) { + int j; + for (j = 0; j < N_RX_COMP_RINGS; j++) { + credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS); + if (credits >= budget) { + enable_intr = 0; + goto rx_comp; + } + } + } + +rx_comp: + /* final rx completion */ + spin_lock_irqsave(&cp->lock, flags); + if (status) + cas_handle_irq(dev, cp, status); + +#ifdef USE_PCI_INTB + if (N_RX_COMP_RINGS > 1) { + status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); + if (status) + cas_handle_irq1(dev, cp, status); + } +#endif + +#ifdef USE_PCI_INTC + if (N_RX_COMP_RINGS > 2) { + status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2)); + if (status) + cas_handle_irqN(dev, cp, status, 2); + } +#endif + +#ifdef USE_PCI_INTD + if (N_RX_COMP_RINGS > 3) { + status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3)); + if (status) + cas_handle_irqN(dev, cp, status, 3); + } +#endif + spin_unlock_irqrestore(&cp->lock, flags); + if (enable_intr) { + napi_complete(napi); + cas_unmask_intr(cp); + } + return credits; +} +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void cas_netpoll(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + + cas_disable_irq(cp, 0); + cas_interrupt(cp->pdev->irq, dev); + cas_enable_irq(cp, 0); + +#ifdef USE_PCI_INTB + if (N_RX_COMP_RINGS > 1) { + /* cas_interrupt1(); */ + } +#endif +#ifdef USE_PCI_INTC + if (N_RX_COMP_RINGS > 2) { + /* cas_interruptN(); */ + } +#endif +#ifdef USE_PCI_INTD + if (N_RX_COMP_RINGS > 3) { + /* cas_interruptN(); */ + } +#endif +} +#endif + +static void cas_tx_timeout(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + + netdev_err(dev, "transmit timed out, resetting\n"); + if (!cp->hw_running) { + netdev_err(dev, "hrm.. hw not running!\n"); + return; + } + + netdev_err(dev, "MIF_STATE[%08x]\n", + readl(cp->regs + REG_MIF_STATE_MACHINE)); + + netdev_err(dev, "MAC_STATE[%08x]\n", + readl(cp->regs + REG_MAC_STATE_MACHINE)); + + netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n", + readl(cp->regs + REG_TX_CFG), + readl(cp->regs + REG_MAC_TX_STATUS), + readl(cp->regs + REG_MAC_TX_CFG), + readl(cp->regs + REG_TX_FIFO_PKT_CNT), + readl(cp->regs + REG_TX_FIFO_WRITE_PTR), + readl(cp->regs + REG_TX_FIFO_READ_PTR), + readl(cp->regs + REG_TX_SM_1), + readl(cp->regs + REG_TX_SM_2)); + + netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n", + readl(cp->regs + REG_RX_CFG), + readl(cp->regs + REG_MAC_RX_STATUS), + readl(cp->regs + REG_MAC_RX_CFG)); + + netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n", + readl(cp->regs + REG_HP_STATE_MACHINE), + readl(cp->regs + REG_HP_STATUS0), + readl(cp->regs + REG_HP_STATUS1), + readl(cp->regs + REG_HP_STATUS2)); + +#if 1 + atomic_inc(&cp->reset_task_pending); + atomic_inc(&cp->reset_task_pending_all); + schedule_work(&cp->reset_task); +#else + atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); + schedule_work(&cp->reset_task); +#endif +} + +static inline int cas_intme(int ring, int entry) +{ + /* Algorithm: IRQ every 1/2 of descriptors. */ + if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1))) + return 1; + return 0; +} + + +static void cas_write_txd(struct cas *cp, int ring, int entry, + dma_addr_t mapping, int len, u64 ctrl, int last) +{ + struct cas_tx_desc *txd = cp->init_txds[ring] + entry; + + ctrl |= CAS_BASE(TX_DESC_BUFLEN, len); + if (cas_intme(ring, entry)) + ctrl |= TX_DESC_INTME; + if (last) + ctrl |= TX_DESC_EOF; + txd->control = cpu_to_le64(ctrl); + txd->buffer = cpu_to_le64(mapping); +} + +static inline void *tx_tiny_buf(struct cas *cp, const int ring, + const int entry) +{ + return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry; +} + +static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring, + const int entry, const int tentry) +{ + cp->tx_tiny_use[ring][tentry].nbufs++; + cp->tx_tiny_use[ring][entry].used = 1; + return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry; +} + +static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, + struct sk_buff *skb) +{ + struct net_device *dev = cp->dev; + int entry, nr_frags, frag, tabort, tentry; + dma_addr_t mapping; + unsigned long flags; + u64 ctrl; + u32 len; + + spin_lock_irqsave(&cp->tx_lock[ring], flags); + + /* This is a hard error, log it. */ + if (TX_BUFFS_AVAIL(cp, ring) <= + CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) { + netif_stop_queue(dev); + spin_unlock_irqrestore(&cp->tx_lock[ring], flags); + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + return 1; + } + + ctrl = 0; + if (skb->ip_summed == CHECKSUM_PARTIAL) { + const u64 csum_start_off = skb_checksum_start_offset(skb); + const u64 csum_stuff_off = csum_start_off + skb->csum_offset; + + ctrl = TX_DESC_CSUM_EN | + CAS_BASE(TX_DESC_CSUM_START, csum_start_off) | + CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off); + } + + entry = cp->tx_new[ring]; + cp->tx_skbs[ring][entry] = skb; + + nr_frags = skb_shinfo(skb)->nr_frags; + len = skb_headlen(skb); + mapping = pci_map_page(cp->pdev, virt_to_page(skb->data), + offset_in_page(skb->data), len, + PCI_DMA_TODEVICE); + + tentry = entry; + tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len); + if (unlikely(tabort)) { + /* NOTE: len is always > tabort */ + cas_write_txd(cp, ring, entry, mapping, len - tabort, + ctrl | TX_DESC_SOF, 0); + entry = TX_DESC_NEXT(ring, entry); + + skb_copy_from_linear_data_offset(skb, len - tabort, + tx_tiny_buf(cp, ring, entry), tabort); + mapping = tx_tiny_map(cp, ring, entry, tentry); + cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, + (nr_frags == 0)); + } else { + cas_write_txd(cp, ring, entry, mapping, len, ctrl | + TX_DESC_SOF, (nr_frags == 0)); + } + entry = TX_DESC_NEXT(ring, entry); + + for (frag = 0; frag < nr_frags; frag++) { + const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; + + len = skb_frag_size(fragp); + mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len, + DMA_TO_DEVICE); + + tabort = cas_calc_tabort(cp, fragp->page_offset, len); + if (unlikely(tabort)) { + void *addr; + + /* NOTE: len is always > tabort */ + cas_write_txd(cp, ring, entry, mapping, len - tabort, + ctrl, 0); + entry = TX_DESC_NEXT(ring, entry); + + addr = cas_page_map(skb_frag_page(fragp)); + memcpy(tx_tiny_buf(cp, ring, entry), + addr + fragp->page_offset + len - tabort, + tabort); + cas_page_unmap(addr); + mapping = tx_tiny_map(cp, ring, entry, tentry); + len = tabort; + } + + cas_write_txd(cp, ring, entry, mapping, len, ctrl, + (frag + 1 == nr_frags)); + entry = TX_DESC_NEXT(ring, entry); + } + + cp->tx_new[ring] = entry; + if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)) + netif_stop_queue(dev); + + netif_printk(cp, tx_queued, KERN_DEBUG, dev, + "tx[%d] queued, slot %d, skblen %d, avail %d\n", + ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring)); + writel(entry, cp->regs + REG_TX_KICKN(ring)); + spin_unlock_irqrestore(&cp->tx_lock[ring], flags); + return 0; +} + +static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + + /* this is only used as a load-balancing hint, so it doesn't + * need to be SMP safe + */ + static int ring; + + if (skb_padto(skb, cp->min_frame_size)) + return NETDEV_TX_OK; + + /* XXX: we need some higher-level QoS hooks to steer packets to + * individual queues. + */ + if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb)) + return NETDEV_TX_BUSY; + return NETDEV_TX_OK; +} + +static void cas_init_tx_dma(struct cas *cp) +{ + u64 desc_dma = cp->block_dvma; + unsigned long off; + u32 val; + int i; + + /* set up tx completion writeback registers. must be 8-byte aligned */ +#ifdef USE_TX_COMPWB + off = offsetof(struct cas_init_block, tx_compwb); + writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI); + writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW); +#endif + + /* enable completion writebacks, enable paced mode, + * disable read pipe, and disable pre-interrupt compwbs + */ + val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 | + TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 | + TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE | + TX_CFG_INTR_COMPWB_DIS; + + /* write out tx ring info and tx desc bases */ + for (i = 0; i < MAX_TX_RINGS; i++) { + off = (unsigned long) cp->init_txds[i] - + (unsigned long) cp->init_block; + + val |= CAS_TX_RINGN_BASE(i); + writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i)); + writel((desc_dma + off) & 0xffffffff, cp->regs + + REG_TX_DBN_LOW(i)); + /* don't zero out the kick register here as the system + * will wedge + */ + } + writel(val, cp->regs + REG_TX_CFG); + + /* program max burst sizes. these numbers should be different + * if doing QoS. + */ +#ifdef USE_QOS + writel(0x800, cp->regs + REG_TX_MAXBURST_0); + writel(0x1600, cp->regs + REG_TX_MAXBURST_1); + writel(0x2400, cp->regs + REG_TX_MAXBURST_2); + writel(0x4800, cp->regs + REG_TX_MAXBURST_3); +#else + writel(0x800, cp->regs + REG_TX_MAXBURST_0); + writel(0x800, cp->regs + REG_TX_MAXBURST_1); + writel(0x800, cp->regs + REG_TX_MAXBURST_2); + writel(0x800, cp->regs + REG_TX_MAXBURST_3); +#endif +} + +/* Must be invoked under cp->lock. */ +static inline void cas_init_dma(struct cas *cp) +{ + cas_init_tx_dma(cp); + cas_init_rx_dma(cp); +} + +static void cas_process_mc_list(struct cas *cp) +{ + u16 hash_table[16]; + u32 crc; + struct netdev_hw_addr *ha; + int i = 1; + + memset(hash_table, 0, sizeof(hash_table)); + netdev_for_each_mc_addr(ha, cp->dev) { + if (i <= CAS_MC_EXACT_MATCH_SIZE) { + /* use the alternate mac address registers for the + * first 15 multicast addresses + */ + writel((ha->addr[4] << 8) | ha->addr[5], + cp->regs + REG_MAC_ADDRN(i*3 + 0)); + writel((ha->addr[2] << 8) | ha->addr[3], + cp->regs + REG_MAC_ADDRN(i*3 + 1)); + writel((ha->addr[0] << 8) | ha->addr[1], + cp->regs + REG_MAC_ADDRN(i*3 + 2)); + i++; + } + else { + /* use hw hash table for the next series of + * multicast addresses + */ + crc = ether_crc_le(ETH_ALEN, ha->addr); + crc >>= 24; + hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); + } + } + for (i = 0; i < 16; i++) + writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i)); +} + +/* Must be invoked under cp->lock. */ +static u32 cas_setup_multicast(struct cas *cp) +{ + u32 rxcfg = 0; + int i; + + if (cp->dev->flags & IFF_PROMISC) { + rxcfg |= MAC_RX_CFG_PROMISC_EN; + + } else if (cp->dev->flags & IFF_ALLMULTI) { + for (i=0; i < 16; i++) + writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i)); + rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; + + } else { + cas_process_mc_list(cp); + rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; + } + + return rxcfg; +} + +/* must be invoked under cp->stat_lock[N_TX_RINGS] */ +static void cas_clear_mac_err(struct cas *cp) +{ + writel(0, cp->regs + REG_MAC_COLL_NORMAL); + writel(0, cp->regs + REG_MAC_COLL_FIRST); + writel(0, cp->regs + REG_MAC_COLL_EXCESS); + writel(0, cp->regs + REG_MAC_COLL_LATE); + writel(0, cp->regs + REG_MAC_TIMER_DEFER); + writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK); + writel(0, cp->regs + REG_MAC_RECV_FRAME); + writel(0, cp->regs + REG_MAC_LEN_ERR); + writel(0, cp->regs + REG_MAC_ALIGN_ERR); + writel(0, cp->regs + REG_MAC_FCS_ERR); + writel(0, cp->regs + REG_MAC_RX_CODE_ERR); +} + + +static void cas_mac_reset(struct cas *cp) +{ + int i; + + /* do both TX and RX reset */ + writel(0x1, cp->regs + REG_MAC_TX_RESET); + writel(0x1, cp->regs + REG_MAC_RX_RESET); + + /* wait for TX */ + i = STOP_TRIES; + while (i-- > 0) { + if (readl(cp->regs + REG_MAC_TX_RESET) == 0) + break; + udelay(10); + } + + /* wait for RX */ + i = STOP_TRIES; + while (i-- > 0) { + if (readl(cp->regs + REG_MAC_RX_RESET) == 0) + break; + udelay(10); + } + + if (readl(cp->regs + REG_MAC_TX_RESET) | + readl(cp->regs + REG_MAC_RX_RESET)) + netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n", + readl(cp->regs + REG_MAC_TX_RESET), + readl(cp->regs + REG_MAC_RX_RESET), + readl(cp->regs + REG_MAC_STATE_MACHINE)); +} + + +/* Must be invoked under cp->lock. */ +static void cas_init_mac(struct cas *cp) +{ + unsigned char *e = &cp->dev->dev_addr[0]; + int i; + cas_mac_reset(cp); + + /* setup core arbitration weight register */ + writel(CAWR_RR_DIS, cp->regs + REG_CAWR); + + /* XXX Use pci_dma_burst_advice() */ +#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) + /* set the infinite burst register for chips that don't have + * pci issues. + */ + if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0) + writel(INF_BURST_EN, cp->regs + REG_INF_BURST); +#endif + + writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE); + + writel(0x00, cp->regs + REG_MAC_IPG0); + writel(0x08, cp->regs + REG_MAC_IPG1); + writel(0x04, cp->regs + REG_MAC_IPG2); + + /* change later for 802.3z */ + writel(0x40, cp->regs + REG_MAC_SLOT_TIME); + + /* min frame + FCS */ + writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN); + + /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we + * specify the maximum frame size to prevent RX tag errors on + * oversized frames. + */ + writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) | + CAS_BASE(MAC_FRAMESIZE_MAX_FRAME, + (CAS_MAX_MTU + ETH_HLEN + 4 + 4)), + cp->regs + REG_MAC_FRAMESIZE_MAX); + + /* NOTE: crc_size is used as a surrogate for half-duplex. + * workaround saturn half-duplex issue by increasing preamble + * size to 65 bytes. + */ + if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size) + writel(0x41, cp->regs + REG_MAC_PA_SIZE); + else + writel(0x07, cp->regs + REG_MAC_PA_SIZE); + writel(0x04, cp->regs + REG_MAC_JAM_SIZE); + writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT); + writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE); + + writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED); + + writel(0, cp->regs + REG_MAC_ADDR_FILTER0); + writel(0, cp->regs + REG_MAC_ADDR_FILTER1); + writel(0, cp->regs + REG_MAC_ADDR_FILTER2); + writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK); + writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK); + + /* setup mac address in perfect filter array */ + for (i = 0; i < 45; i++) + writel(0x0, cp->regs + REG_MAC_ADDRN(i)); + + writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0)); + writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1)); + writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2)); + + writel(0x0001, cp->regs + REG_MAC_ADDRN(42)); + writel(0xc200, cp->regs + REG_MAC_ADDRN(43)); + writel(0x0180, cp->regs + REG_MAC_ADDRN(44)); + + cp->mac_rx_cfg = cas_setup_multicast(cp); + + spin_lock(&cp->stat_lock[N_TX_RINGS]); + cas_clear_mac_err(cp); + spin_unlock(&cp->stat_lock[N_TX_RINGS]); + + /* Setup MAC interrupts. We want to get all of the interesting + * counter expiration events, but we do not want to hear about + * normal rx/tx as the DMA engine tells us that. + */ + writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK); + writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); + + /* Don't enable even the PAUSE interrupts for now, we + * make no use of those events other than to record them. + */ + writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK); +} + +/* Must be invoked under cp->lock. */ +static void cas_init_pause_thresholds(struct cas *cp) +{ + /* Calculate pause thresholds. Setting the OFF threshold to the + * full RX fifo size effectively disables PAUSE generation + */ + if (cp->rx_fifo_size <= (2 * 1024)) { + cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size; + } else { + int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63; + if (max_frame * 3 > cp->rx_fifo_size) { + cp->rx_pause_off = 7104; + cp->rx_pause_on = 960; + } else { + int off = (cp->rx_fifo_size - (max_frame * 2)); + int on = off - max_frame; + cp->rx_pause_off = off; + cp->rx_pause_on = on; + } + } +} + +static int cas_vpd_match(const void __iomem *p, const char *str) +{ + int len = strlen(str) + 1; + int i; + + for (i = 0; i < len; i++) { + if (readb(p + i) != str[i]) + return 0; + } + return 1; +} + + +/* get the mac address by reading the vpd information in the rom. + * also get the phy type and determine if there's an entropy generator. + * NOTE: this is a bit convoluted for the following reasons: + * 1) vpd info has order-dependent mac addresses for multinic cards + * 2) the only way to determine the nic order is to use the slot + * number. + * 3) fiber cards don't have bridges, so their slot numbers don't + * mean anything. + * 4) we don't actually know we have a fiber card until after + * the mac addresses are parsed. + */ +static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr, + const int offset) +{ + void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START; + void __iomem *base, *kstart; + int i, len; + int found = 0; +#define VPD_FOUND_MAC 0x01 +#define VPD_FOUND_PHY 0x02 + + int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */ + int mac_off = 0; + +#if defined(CONFIG_SPARC) + const unsigned char *addr; +#endif + + /* give us access to the PROM */ + writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD, + cp->regs + REG_BIM_LOCAL_DEV_EN); + + /* check for an expansion rom */ + if (readb(p) != 0x55 || readb(p + 1) != 0xaa) + goto use_random_mac_addr; + + /* search for beginning of vpd */ + base = NULL; + for (i = 2; i < EXPANSION_ROM_SIZE; i++) { + /* check for PCIR */ + if ((readb(p + i + 0) == 0x50) && + (readb(p + i + 1) == 0x43) && + (readb(p + i + 2) == 0x49) && + (readb(p + i + 3) == 0x52)) { + base = p + (readb(p + i + 8) | + (readb(p + i + 9) << 8)); + break; + } + } + + if (!base || (readb(base) != 0x82)) + goto use_random_mac_addr; + + i = (readb(base + 1) | (readb(base + 2) << 8)) + 3; + while (i < EXPANSION_ROM_SIZE) { + if (readb(base + i) != 0x90) /* no vpd found */ + goto use_random_mac_addr; + + /* found a vpd field */ + len = readb(base + i + 1) | (readb(base + i + 2) << 8); + + /* extract keywords */ + kstart = base + i + 3; + p = kstart; + while ((p - kstart) < len) { + int klen = readb(p + 2); + int j; + char type; + + p += 3; + + /* look for the following things: + * -- correct length == 29 + * 3 (type) + 2 (size) + + * 18 (strlen("local-mac-address") + 1) + + * 6 (mac addr) + * -- VPD Instance 'I' + * -- VPD Type Bytes 'B' + * -- VPD data length == 6 + * -- property string == local-mac-address + * + * -- correct length == 24 + * 3 (type) + 2 (size) + + * 12 (strlen("entropy-dev") + 1) + + * 7 (strlen("vms110") + 1) + * -- VPD Instance 'I' + * -- VPD Type String 'B' + * -- VPD data length == 7 + * -- property string == entropy-dev + * + * -- correct length == 18 + * 3 (type) + 2 (size) + + * 9 (strlen("phy-type") + 1) + + * 4 (strlen("pcs") + 1) + * -- VPD Instance 'I' + * -- VPD Type String 'S' + * -- VPD data length == 4 + * -- property string == phy-type + * + * -- correct length == 23 + * 3 (type) + 2 (size) + + * 14 (strlen("phy-interface") + 1) + + * 4 (strlen("pcs") + 1) + * -- VPD Instance 'I' + * -- VPD Type String 'S' + * -- VPD data length == 4 + * -- property string == phy-interface + */ + if (readb(p) != 'I') + goto next; + + /* finally, check string and length */ + type = readb(p + 3); + if (type == 'B') { + if ((klen == 29) && readb(p + 4) == 6 && + cas_vpd_match(p + 5, + "local-mac-address")) { + if (mac_off++ > offset) + goto next; + + /* set mac address */ + for (j = 0; j < 6; j++) + dev_addr[j] = + readb(p + 23 + j); + goto found_mac; + } + } + + if (type != 'S') + goto next; + +#ifdef USE_ENTROPY_DEV + if ((klen == 24) && + cas_vpd_match(p + 5, "entropy-dev") && + cas_vpd_match(p + 17, "vms110")) { + cp->cas_flags |= CAS_FLAG_ENTROPY_DEV; + goto next; + } +#endif + + if (found & VPD_FOUND_PHY) + goto next; + + if ((klen == 18) && readb(p + 4) == 4 && + cas_vpd_match(p + 5, "phy-type")) { + if (cas_vpd_match(p + 14, "pcs")) { + phy_type = CAS_PHY_SERDES; + goto found_phy; + } + } + + if ((klen == 23) && readb(p + 4) == 4 && + cas_vpd_match(p + 5, "phy-interface")) { + if (cas_vpd_match(p + 19, "pcs")) { + phy_type = CAS_PHY_SERDES; + goto found_phy; + } + } +found_mac: + found |= VPD_FOUND_MAC; + goto next; + +found_phy: + found |= VPD_FOUND_PHY; + +next: + p += klen; + } + i += len + 3; + } + +use_random_mac_addr: + if (found & VPD_FOUND_MAC) + goto done; + +#if defined(CONFIG_SPARC) + addr = of_get_property(cp->of_node, "local-mac-address", NULL); + if (addr != NULL) { + memcpy(dev_addr, addr, ETH_ALEN); + goto done; + } +#endif + + /* Sun MAC prefix then 3 random bytes. */ + pr_info("MAC address not found in ROM VPD\n"); + dev_addr[0] = 0x08; + dev_addr[1] = 0x00; + dev_addr[2] = 0x20; + get_random_bytes(dev_addr + 3, 3); + +done: + writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN); + return phy_type; +} + +/* check pci invariants */ +static void cas_check_pci_invariants(struct cas *cp) +{ + struct pci_dev *pdev = cp->pdev; + + cp->cas_flags = 0; + if ((pdev->vendor == PCI_VENDOR_ID_SUN) && + (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) { + if (pdev->revision >= CAS_ID_REVPLUS) + cp->cas_flags |= CAS_FLAG_REG_PLUS; + if (pdev->revision < CAS_ID_REVPLUS02u) + cp->cas_flags |= CAS_FLAG_TARGET_ABORT; + + /* Original Cassini supports HW CSUM, but it's not + * enabled by default as it can trigger TX hangs. + */ + if (pdev->revision < CAS_ID_REV2) + cp->cas_flags |= CAS_FLAG_NO_HW_CSUM; + } else { + /* Only sun has original cassini chips. */ + cp->cas_flags |= CAS_FLAG_REG_PLUS; + + /* We use a flag because the same phy might be externally + * connected. + */ + if ((pdev->vendor == PCI_VENDOR_ID_NS) && + (pdev->device == PCI_DEVICE_ID_NS_SATURN)) + cp->cas_flags |= CAS_FLAG_SATURN; + } +} + + +static int cas_check_invariants(struct cas *cp) +{ + struct pci_dev *pdev = cp->pdev; + u32 cfg; + int i; + + /* get page size for rx buffers. */ + cp->page_order = 0; +#ifdef USE_PAGE_ORDER + if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) { + /* see if we can allocate larger pages */ + struct page *page = alloc_pages(GFP_ATOMIC, + CAS_JUMBO_PAGE_SHIFT - + PAGE_SHIFT); + if (page) { + __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT); + cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT; + } else { + printk("MTU limited to %d bytes\n", CAS_MAX_MTU); + } + } +#endif + cp->page_size = (PAGE_SIZE << cp->page_order); + + /* Fetch the FIFO configurations. */ + cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64; + cp->rx_fifo_size = RX_FIFO_SIZE; + + /* finish phy determination. MDIO1 takes precedence over MDIO0 if + * they're both connected. + */ + cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr, + PCI_SLOT(pdev->devfn)); + if (cp->phy_type & CAS_PHY_SERDES) { + cp->cas_flags |= CAS_FLAG_1000MB_CAP; + return 0; /* no more checking needed */ + } + + /* MII */ + cfg = readl(cp->regs + REG_MIF_CFG); + if (cfg & MIF_CFG_MDIO_1) { + cp->phy_type = CAS_PHY_MII_MDIO1; + } else if (cfg & MIF_CFG_MDIO_0) { + cp->phy_type = CAS_PHY_MII_MDIO0; + } + + cas_mif_poll(cp, 0); + writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); + + for (i = 0; i < 32; i++) { + u32 phy_id; + int j; + + for (j = 0; j < 3; j++) { + cp->phy_addr = i; + phy_id = cas_phy_read(cp, MII_PHYSID1) << 16; + phy_id |= cas_phy_read(cp, MII_PHYSID2); + if (phy_id && (phy_id != 0xFFFFFFFF)) { + cp->phy_id = phy_id; + goto done; + } + } + } + pr_err("MII phy did not respond [%08x]\n", + readl(cp->regs + REG_MIF_STATE_MACHINE)); + return -1; + +done: + /* see if we can do gigabit */ + cfg = cas_phy_read(cp, MII_BMSR); + if ((cfg & CAS_BMSR_1000_EXTEND) && + cas_phy_read(cp, CAS_MII_1000_EXTEND)) + cp->cas_flags |= CAS_FLAG_1000MB_CAP; + return 0; +} + +/* Must be invoked under cp->lock. */ +static inline void cas_start_dma(struct cas *cp) +{ + int i; + u32 val; + int txfailed = 0; + + /* enable dma */ + val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN; + writel(val, cp->regs + REG_TX_CFG); + val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN; + writel(val, cp->regs + REG_RX_CFG); + + /* enable the mac */ + val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN; + writel(val, cp->regs + REG_MAC_TX_CFG); + val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN; + writel(val, cp->regs + REG_MAC_RX_CFG); + + i = STOP_TRIES; + while (i-- > 0) { + val = readl(cp->regs + REG_MAC_TX_CFG); + if ((val & MAC_TX_CFG_EN)) + break; + udelay(10); + } + if (i < 0) txfailed = 1; + i = STOP_TRIES; + while (i-- > 0) { + val = readl(cp->regs + REG_MAC_RX_CFG); + if ((val & MAC_RX_CFG_EN)) { + if (txfailed) { + netdev_err(cp->dev, + "enabling mac failed [tx:%08x:%08x]\n", + readl(cp->regs + REG_MIF_STATE_MACHINE), + readl(cp->regs + REG_MAC_STATE_MACHINE)); + } + goto enable_rx_done; + } + udelay(10); + } + netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n", + (txfailed ? "tx,rx" : "rx"), + readl(cp->regs + REG_MIF_STATE_MACHINE), + readl(cp->regs + REG_MAC_STATE_MACHINE)); + +enable_rx_done: + cas_unmask_intr(cp); /* enable interrupts */ + writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); + writel(0, cp->regs + REG_RX_COMP_TAIL); + + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + if (N_RX_DESC_RINGS > 1) + writel(RX_DESC_RINGN_SIZE(1) - 4, + cp->regs + REG_PLUS_RX_KICK1); + + for (i = 1; i < N_RX_COMP_RINGS; i++) + writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i)); + } +} + +/* Must be invoked under cp->lock. */ +static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd, + int *pause) +{ + u32 val = readl(cp->regs + REG_PCS_MII_LPA); + *fd = (val & PCS_MII_LPA_FD) ? 1 : 0; + *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00; + if (val & PCS_MII_LPA_ASYM_PAUSE) + *pause |= 0x10; + *spd = 1000; +} + +/* Must be invoked under cp->lock. */ +static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd, + int *pause) +{ + u32 val; + + *fd = 0; + *spd = 10; + *pause = 0; + + /* use GMII registers */ + val = cas_phy_read(cp, MII_LPA); + if (val & CAS_LPA_PAUSE) + *pause = 0x01; + + if (val & CAS_LPA_ASYM_PAUSE) + *pause |= 0x10; + + if (val & LPA_DUPLEX) + *fd = 1; + if (val & LPA_100) + *spd = 100; + + if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { + val = cas_phy_read(cp, CAS_MII_1000_STATUS); + if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF)) + *spd = 1000; + if (val & CAS_LPA_1000FULL) + *fd = 1; + } +} + +/* A link-up condition has occurred, initialize and enable the + * rest of the chip. + * + * Must be invoked under cp->lock. + */ +static void cas_set_link_modes(struct cas *cp) +{ + u32 val; + int full_duplex, speed, pause; + + full_duplex = 0; + speed = 10; + pause = 0; + + if (CAS_PHY_MII(cp->phy_type)) { + cas_mif_poll(cp, 0); + val = cas_phy_read(cp, MII_BMCR); + if (val & BMCR_ANENABLE) { + cas_read_mii_link_mode(cp, &full_duplex, &speed, + &pause); + } else { + if (val & BMCR_FULLDPLX) + full_duplex = 1; + + if (val & BMCR_SPEED100) + speed = 100; + else if (val & CAS_BMCR_SPEED1000) + speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? + 1000 : 100; + } + cas_mif_poll(cp, 1); + + } else { + val = readl(cp->regs + REG_PCS_MII_CTRL); + cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause); + if ((val & PCS_MII_AUTONEG_EN) == 0) { + if (val & PCS_MII_CTRL_DUPLEX) + full_duplex = 1; + } + } + + netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n", + speed, full_duplex ? "full" : "half"); + + val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED; + if (CAS_PHY_MII(cp->phy_type)) { + val |= MAC_XIF_MII_BUFFER_OUTPUT_EN; + if (!full_duplex) + val |= MAC_XIF_DISABLE_ECHO; + } + if (full_duplex) + val |= MAC_XIF_FDPLX_LED; + if (speed == 1000) + val |= MAC_XIF_GMII_MODE; + writel(val, cp->regs + REG_MAC_XIF_CFG); + + /* deal with carrier and collision detect. */ + val = MAC_TX_CFG_IPG_EN; + if (full_duplex) { + val |= MAC_TX_CFG_IGNORE_CARRIER; + val |= MAC_TX_CFG_IGNORE_COLL; + } else { +#ifndef USE_CSMA_CD_PROTO + val |= MAC_TX_CFG_NEVER_GIVE_UP_EN; + val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM; +#endif + } + /* val now set up for REG_MAC_TX_CFG */ + + /* If gigabit and half-duplex, enable carrier extension + * mode. increase slot time to 512 bytes as well. + * else, disable it and make sure slot time is 64 bytes. + * also activate checksum bug workaround + */ + if ((speed == 1000) && !full_duplex) { + writel(val | MAC_TX_CFG_CARRIER_EXTEND, + cp->regs + REG_MAC_TX_CFG); + + val = readl(cp->regs + REG_MAC_RX_CFG); + val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */ + writel(val | MAC_RX_CFG_CARRIER_EXTEND, + cp->regs + REG_MAC_RX_CFG); + + writel(0x200, cp->regs + REG_MAC_SLOT_TIME); + + cp->crc_size = 4; + /* minimum size gigabit frame at half duplex */ + cp->min_frame_size = CAS_1000MB_MIN_FRAME; + + } else { + writel(val, cp->regs + REG_MAC_TX_CFG); + + /* checksum bug workaround. don't strip FCS when in + * half-duplex mode + */ + val = readl(cp->regs + REG_MAC_RX_CFG); + if (full_duplex) { + val |= MAC_RX_CFG_STRIP_FCS; + cp->crc_size = 0; + cp->min_frame_size = CAS_MIN_MTU; + } else { + val &= ~MAC_RX_CFG_STRIP_FCS; + cp->crc_size = 4; + cp->min_frame_size = CAS_MIN_FRAME; + } + writel(val & ~MAC_RX_CFG_CARRIER_EXTEND, + cp->regs + REG_MAC_RX_CFG); + writel(0x40, cp->regs + REG_MAC_SLOT_TIME); + } + + if (netif_msg_link(cp)) { + if (pause & 0x01) { + netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n", + cp->rx_fifo_size, + cp->rx_pause_off, + cp->rx_pause_on); + } else if (pause & 0x10) { + netdev_info(cp->dev, "TX pause enabled\n"); + } else { + netdev_info(cp->dev, "Pause is disabled\n"); + } + } + + val = readl(cp->regs + REG_MAC_CTRL_CFG); + val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN); + if (pause) { /* symmetric or asymmetric pause */ + val |= MAC_CTRL_CFG_SEND_PAUSE_EN; + if (pause & 0x01) { /* symmetric pause */ + val |= MAC_CTRL_CFG_RECV_PAUSE_EN; + } + } + writel(val, cp->regs + REG_MAC_CTRL_CFG); + cas_start_dma(cp); +} + +/* Must be invoked under cp->lock. */ +static void cas_init_hw(struct cas *cp, int restart_link) +{ + if (restart_link) + cas_phy_init(cp); + + cas_init_pause_thresholds(cp); + cas_init_mac(cp); + cas_init_dma(cp); + + if (restart_link) { + /* Default aneg parameters */ + cp->timer_ticks = 0; + cas_begin_auto_negotiation(cp, NULL); + } else if (cp->lstate == link_up) { + cas_set_link_modes(cp); + netif_carrier_on(cp->dev); + } +} + +/* Must be invoked under cp->lock. on earlier cassini boards, + * SOFT_0 is tied to PCI reset. we use this to force a pci reset, + * let it settle out, and then restore pci state. + */ +static void cas_hard_reset(struct cas *cp) +{ + writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN); + udelay(20); + pci_restore_state(cp->pdev); +} + + +static void cas_global_reset(struct cas *cp, int blkflag) +{ + int limit; + + /* issue a global reset. don't use RSTOUT. */ + if (blkflag && !CAS_PHY_MII(cp->phy_type)) { + /* For PCS, when the blkflag is set, we should set the + * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of + * the last autonegotiation from being cleared. We'll + * need some special handling if the chip is set into a + * loopback mode. + */ + writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK), + cp->regs + REG_SW_RESET); + } else { + writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET); + } + + /* need to wait at least 3ms before polling register */ + mdelay(3); + + limit = STOP_TRIES; + while (limit-- > 0) { + u32 val = readl(cp->regs + REG_SW_RESET); + if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0) + goto done; + udelay(10); + } + netdev_err(cp->dev, "sw reset failed\n"); + +done: + /* enable various BIM interrupts */ + writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE | + BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG); + + /* clear out pci error status mask for handled errors. + * we don't deal with DMA counter overflows as they happen + * all the time. + */ + writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO | + PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE | + PCI_ERR_BIM_DMA_READ), cp->regs + + REG_PCI_ERR_STATUS_MASK); + + /* set up for MII by default to address mac rx reset timeout + * issue + */ + writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); +} + +static void cas_reset(struct cas *cp, int blkflag) +{ + u32 val; + + cas_mask_intr(cp); + cas_global_reset(cp, blkflag); + cas_mac_reset(cp); + cas_entropy_reset(cp); + + /* disable dma engines. */ + val = readl(cp->regs + REG_TX_CFG); + val &= ~TX_CFG_DMA_EN; + writel(val, cp->regs + REG_TX_CFG); + + val = readl(cp->regs + REG_RX_CFG); + val &= ~RX_CFG_DMA_EN; + writel(val, cp->regs + REG_RX_CFG); + + /* program header parser */ + if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) || + (CAS_HP_ALT_FIRMWARE == cas_prog_null)) { + cas_load_firmware(cp, CAS_HP_FIRMWARE); + } else { + cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE); + } + + /* clear out error registers */ + spin_lock(&cp->stat_lock[N_TX_RINGS]); + cas_clear_mac_err(cp); + spin_unlock(&cp->stat_lock[N_TX_RINGS]); +} + +/* Shut down the chip, must be called with pm_mutex held. */ +static void cas_shutdown(struct cas *cp) +{ + unsigned long flags; + + /* Make us not-running to avoid timers respawning */ + cp->hw_running = 0; + + del_timer_sync(&cp->link_timer); + + /* Stop the reset task */ +#if 0 + while (atomic_read(&cp->reset_task_pending_mtu) || + atomic_read(&cp->reset_task_pending_spare) || + atomic_read(&cp->reset_task_pending_all)) + schedule(); + +#else + while (atomic_read(&cp->reset_task_pending)) + schedule(); +#endif + /* Actually stop the chip */ + cas_lock_all_save(cp, flags); + cas_reset(cp, 0); + if (cp->cas_flags & CAS_FLAG_SATURN) + cas_phy_powerdown(cp); + cas_unlock_all_restore(cp, flags); +} + +static int cas_change_mtu(struct net_device *dev, int new_mtu) +{ + struct cas *cp = netdev_priv(dev); + + if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU) + return -EINVAL; + + dev->mtu = new_mtu; + if (!netif_running(dev) || !netif_device_present(dev)) + return 0; + + /* let the reset task handle it */ +#if 1 + atomic_inc(&cp->reset_task_pending); + if ((cp->phy_type & CAS_PHY_SERDES)) { + atomic_inc(&cp->reset_task_pending_all); + } else { + atomic_inc(&cp->reset_task_pending_mtu); + } + schedule_work(&cp->reset_task); +#else + atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ? + CAS_RESET_ALL : CAS_RESET_MTU); + pr_err("reset called in cas_change_mtu\n"); + schedule_work(&cp->reset_task); +#endif + + flush_work(&cp->reset_task); + return 0; +} + +static void cas_clean_txd(struct cas *cp, int ring) +{ + struct cas_tx_desc *txd = cp->init_txds[ring]; + struct sk_buff *skb, **skbs = cp->tx_skbs[ring]; + u64 daddr, dlen; + int i, size; + + size = TX_DESC_RINGN_SIZE(ring); + for (i = 0; i < size; i++) { + int frag; + + if (skbs[i] == NULL) + continue; + + skb = skbs[i]; + skbs[i] = NULL; + + for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { + int ent = i & (size - 1); + + /* first buffer is never a tiny buffer and so + * needs to be unmapped. + */ + daddr = le64_to_cpu(txd[ent].buffer); + dlen = CAS_VAL(TX_DESC_BUFLEN, + le64_to_cpu(txd[ent].control)); + pci_unmap_page(cp->pdev, daddr, dlen, + PCI_DMA_TODEVICE); + + if (frag != skb_shinfo(skb)->nr_frags) { + i++; + + /* next buffer might by a tiny buffer. + * skip past it. + */ + ent = i & (size - 1); + if (cp->tx_tiny_use[ring][ent].used) + i++; + } + } + dev_kfree_skb_any(skb); + } + + /* zero out tiny buf usage */ + memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring])); +} + +/* freed on close */ +static inline void cas_free_rx_desc(struct cas *cp, int ring) +{ + cas_page_t **page = cp->rx_pages[ring]; + int i, size; + + size = RX_DESC_RINGN_SIZE(ring); + for (i = 0; i < size; i++) { + if (page[i]) { + cas_page_free(cp, page[i]); + page[i] = NULL; + } + } +} + +static void cas_free_rxds(struct cas *cp) +{ + int i; + + for (i = 0; i < N_RX_DESC_RINGS; i++) + cas_free_rx_desc(cp, i); +} + +/* Must be invoked under cp->lock. */ +static void cas_clean_rings(struct cas *cp) +{ + int i; + + /* need to clean all tx rings */ + memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS); + memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS); + for (i = 0; i < N_TX_RINGS; i++) + cas_clean_txd(cp, i); + + /* zero out init block */ + memset(cp->init_block, 0, sizeof(struct cas_init_block)); + cas_clean_rxds(cp); + cas_clean_rxcs(cp); +} + +/* allocated on open */ +static inline int cas_alloc_rx_desc(struct cas *cp, int ring) +{ + cas_page_t **page = cp->rx_pages[ring]; + int size, i = 0; + + size = RX_DESC_RINGN_SIZE(ring); + for (i = 0; i < size; i++) { + if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL) + return -1; + } + return 0; +} + +static int cas_alloc_rxds(struct cas *cp) +{ + int i; + + for (i = 0; i < N_RX_DESC_RINGS; i++) { + if (cas_alloc_rx_desc(cp, i) < 0) { + cas_free_rxds(cp); + return -1; + } + } + return 0; +} + +static void cas_reset_task(struct work_struct *work) +{ + struct cas *cp = container_of(work, struct cas, reset_task); +#if 0 + int pending = atomic_read(&cp->reset_task_pending); +#else + int pending_all = atomic_read(&cp->reset_task_pending_all); + int pending_spare = atomic_read(&cp->reset_task_pending_spare); + int pending_mtu = atomic_read(&cp->reset_task_pending_mtu); + + if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) { + /* We can have more tasks scheduled than actually + * needed. + */ + atomic_dec(&cp->reset_task_pending); + return; + } +#endif + /* The link went down, we reset the ring, but keep + * DMA stopped. Use this function for reset + * on error as well. + */ + if (cp->hw_running) { + unsigned long flags; + + /* Make sure we don't get interrupts or tx packets */ + netif_device_detach(cp->dev); + cas_lock_all_save(cp, flags); + + if (cp->opened) { + /* We call cas_spare_recover when we call cas_open. + * but we do not initialize the lists cas_spare_recover + * uses until cas_open is called. + */ + cas_spare_recover(cp, GFP_ATOMIC); + } +#if 1 + /* test => only pending_spare set */ + if (!pending_all && !pending_mtu) + goto done; +#else + if (pending == CAS_RESET_SPARE) + goto done; +#endif + /* when pending == CAS_RESET_ALL, the following + * call to cas_init_hw will restart auto negotiation. + * Setting the second argument of cas_reset to + * !(pending == CAS_RESET_ALL) will set this argument + * to 1 (avoiding reinitializing the PHY for the normal + * PCS case) when auto negotiation is not restarted. + */ +#if 1 + cas_reset(cp, !(pending_all > 0)); + if (cp->opened) + cas_clean_rings(cp); + cas_init_hw(cp, (pending_all > 0)); +#else + cas_reset(cp, !(pending == CAS_RESET_ALL)); + if (cp->opened) + cas_clean_rings(cp); + cas_init_hw(cp, pending == CAS_RESET_ALL); +#endif + +done: + cas_unlock_all_restore(cp, flags); + netif_device_attach(cp->dev); + } +#if 1 + atomic_sub(pending_all, &cp->reset_task_pending_all); + atomic_sub(pending_spare, &cp->reset_task_pending_spare); + atomic_sub(pending_mtu, &cp->reset_task_pending_mtu); + atomic_dec(&cp->reset_task_pending); +#else + atomic_set(&cp->reset_task_pending, 0); +#endif +} + +static void cas_link_timer(unsigned long data) +{ + struct cas *cp = (struct cas *) data; + int mask, pending = 0, reset = 0; + unsigned long flags; + + if (link_transition_timeout != 0 && + cp->link_transition_jiffies_valid && + ((jiffies - cp->link_transition_jiffies) > + (link_transition_timeout))) { + /* One-second counter so link-down workaround doesn't + * cause resets to occur so fast as to fool the switch + * into thinking the link is down. + */ + cp->link_transition_jiffies_valid = 0; + } + + if (!cp->hw_running) + return; + + spin_lock_irqsave(&cp->lock, flags); + cas_lock_tx(cp); + cas_entropy_gather(cp); + + /* If the link task is still pending, we just + * reschedule the link timer + */ +#if 1 + if (atomic_read(&cp->reset_task_pending_all) || + atomic_read(&cp->reset_task_pending_spare) || + atomic_read(&cp->reset_task_pending_mtu)) + goto done; +#else + if (atomic_read(&cp->reset_task_pending)) + goto done; +#endif + + /* check for rx cleaning */ + if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) { + int i, rmask; + + for (i = 0; i < MAX_RX_DESC_RINGS; i++) { + rmask = CAS_FLAG_RXD_POST(i); + if ((mask & rmask) == 0) + continue; + + /* post_rxds will do a mod_timer */ + if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) { + pending = 1; + continue; + } + cp->cas_flags &= ~rmask; + } + } + + if (CAS_PHY_MII(cp->phy_type)) { + u16 bmsr; + cas_mif_poll(cp, 0); + bmsr = cas_phy_read(cp, MII_BMSR); + /* WTZ: Solaris driver reads this twice, but that + * may be due to the PCS case and the use of a + * common implementation. Read it twice here to be + * safe. + */ + bmsr = cas_phy_read(cp, MII_BMSR); + cas_mif_poll(cp, 1); + readl(cp->regs + REG_MIF_STATUS); /* avoid dups */ + reset = cas_mii_link_check(cp, bmsr); + } else { + reset = cas_pcs_link_check(cp); + } + + if (reset) + goto done; + + /* check for tx state machine confusion */ + if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) { + u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE); + u32 wptr, rptr; + int tlm = CAS_VAL(MAC_SM_TLM, val); + + if (((tlm == 0x5) || (tlm == 0x3)) && + (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) { + netif_printk(cp, tx_err, KERN_DEBUG, cp->dev, + "tx err: MAC_STATE[%08x]\n", val); + reset = 1; + goto done; + } + + val = readl(cp->regs + REG_TX_FIFO_PKT_CNT); + wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR); + rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR); + if ((val == 0) && (wptr != rptr)) { + netif_printk(cp, tx_err, KERN_DEBUG, cp->dev, + "tx err: TX_FIFO[%08x:%08x:%08x]\n", + val, wptr, rptr); + reset = 1; + } + + if (reset) + cas_hard_reset(cp); + } + +done: + if (reset) { +#if 1 + atomic_inc(&cp->reset_task_pending); + atomic_inc(&cp->reset_task_pending_all); + schedule_work(&cp->reset_task); +#else + atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); + pr_err("reset called in cas_link_timer\n"); + schedule_work(&cp->reset_task); +#endif + } + + if (!pending) + mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); + cas_unlock_tx(cp); + spin_unlock_irqrestore(&cp->lock, flags); +} + +/* tiny buffers are used to avoid target abort issues with + * older cassini's + */ +static void cas_tx_tiny_free(struct cas *cp) +{ + struct pci_dev *pdev = cp->pdev; + int i; + + for (i = 0; i < N_TX_RINGS; i++) { + if (!cp->tx_tiny_bufs[i]) + continue; + + pci_free_consistent(pdev, TX_TINY_BUF_BLOCK, + cp->tx_tiny_bufs[i], + cp->tx_tiny_dvma[i]); + cp->tx_tiny_bufs[i] = NULL; + } +} + +static int cas_tx_tiny_alloc(struct cas *cp) +{ + struct pci_dev *pdev = cp->pdev; + int i; + + for (i = 0; i < N_TX_RINGS; i++) { + cp->tx_tiny_bufs[i] = + pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK, + &cp->tx_tiny_dvma[i]); + if (!cp->tx_tiny_bufs[i]) { + cas_tx_tiny_free(cp); + return -1; + } + } + return 0; +} + + +static int cas_open(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + int hw_was_up, err; + unsigned long flags; + + mutex_lock(&cp->pm_mutex); + + hw_was_up = cp->hw_running; + + /* The power-management mutex protects the hw_running + * etc. state so it is safe to do this bit without cp->lock + */ + if (!cp->hw_running) { + /* Reset the chip */ + cas_lock_all_save(cp, flags); + /* We set the second arg to cas_reset to zero + * because cas_init_hw below will have its second + * argument set to non-zero, which will force + * autonegotiation to start. + */ + cas_reset(cp, 0); + cp->hw_running = 1; + cas_unlock_all_restore(cp, flags); + } + + err = -ENOMEM; + if (cas_tx_tiny_alloc(cp) < 0) + goto err_unlock; + + /* alloc rx descriptors */ + if (cas_alloc_rxds(cp) < 0) + goto err_tx_tiny; + + /* allocate spares */ + cas_spare_init(cp); + cas_spare_recover(cp, GFP_KERNEL); + + /* We can now request the interrupt as we know it's masked + * on the controller. cassini+ has up to 4 interrupts + * that can be used, but you need to do explicit pci interrupt + * mapping to expose them + */ + if (request_irq(cp->pdev->irq, cas_interrupt, + IRQF_SHARED, dev->name, (void *) dev)) { + netdev_err(cp->dev, "failed to request irq !\n"); + err = -EAGAIN; + goto err_spare; + } + +#ifdef USE_NAPI + napi_enable(&cp->napi); +#endif + /* init hw */ + cas_lock_all_save(cp, flags); + cas_clean_rings(cp); + cas_init_hw(cp, !hw_was_up); + cp->opened = 1; + cas_unlock_all_restore(cp, flags); + + netif_start_queue(dev); + mutex_unlock(&cp->pm_mutex); + return 0; + +err_spare: + cas_spare_free(cp); + cas_free_rxds(cp); +err_tx_tiny: + cas_tx_tiny_free(cp); +err_unlock: + mutex_unlock(&cp->pm_mutex); + return err; +} + +static int cas_close(struct net_device *dev) +{ + unsigned long flags; + struct cas *cp = netdev_priv(dev); + +#ifdef USE_NAPI + napi_disable(&cp->napi); +#endif + /* Make sure we don't get distracted by suspend/resume */ + mutex_lock(&cp->pm_mutex); + + netif_stop_queue(dev); + + /* Stop traffic, mark us closed */ + cas_lock_all_save(cp, flags); + cp->opened = 0; + cas_reset(cp, 0); + cas_phy_init(cp); + cas_begin_auto_negotiation(cp, NULL); + cas_clean_rings(cp); + cas_unlock_all_restore(cp, flags); + + free_irq(cp->pdev->irq, (void *) dev); + cas_spare_free(cp); + cas_free_rxds(cp); + cas_tx_tiny_free(cp); + mutex_unlock(&cp->pm_mutex); + return 0; +} + +static struct { + const char name[ETH_GSTRING_LEN]; +} ethtool_cassini_statnames[] = { + {"collisions"}, + {"rx_bytes"}, + {"rx_crc_errors"}, + {"rx_dropped"}, + {"rx_errors"}, + {"rx_fifo_errors"}, + {"rx_frame_errors"}, + {"rx_length_errors"}, + {"rx_over_errors"}, + {"rx_packets"}, + {"tx_aborted_errors"}, + {"tx_bytes"}, + {"tx_dropped"}, + {"tx_errors"}, + {"tx_fifo_errors"}, + {"tx_packets"} +}; +#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames) + +static struct { + const int offsets; /* neg. values for 2nd arg to cas_read_phy */ +} ethtool_register_table[] = { + {-MII_BMSR}, + {-MII_BMCR}, + {REG_CAWR}, + {REG_INF_BURST}, + {REG_BIM_CFG}, + {REG_RX_CFG}, + {REG_HP_CFG}, + {REG_MAC_TX_CFG}, + {REG_MAC_RX_CFG}, + {REG_MAC_CTRL_CFG}, + {REG_MAC_XIF_CFG}, + {REG_MIF_CFG}, + {REG_PCS_CFG}, + {REG_SATURN_PCFG}, + {REG_PCS_MII_STATUS}, + {REG_PCS_STATE_MACHINE}, + {REG_MAC_COLL_EXCESS}, + {REG_MAC_COLL_LATE} +}; +#define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table) +#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN) + +static void cas_read_regs(struct cas *cp, u8 *ptr, int len) +{ + u8 *p; + int i; + unsigned long flags; + + spin_lock_irqsave(&cp->lock, flags); + for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) { + u16 hval; + u32 val; + if (ethtool_register_table[i].offsets < 0) { + hval = cas_phy_read(cp, + -ethtool_register_table[i].offsets); + val = hval; + } else { + val= readl(cp->regs+ethtool_register_table[i].offsets); + } + memcpy(p, (u8 *)&val, sizeof(u32)); + } + spin_unlock_irqrestore(&cp->lock, flags); +} + +static struct net_device_stats *cas_get_stats(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + struct net_device_stats *stats = cp->net_stats; + unsigned long flags; + int i; + unsigned long tmp; + + /* we collate all of the stats into net_stats[N_TX_RING] */ + if (!cp->hw_running) + return stats + N_TX_RINGS; + + /* collect outstanding stats */ + /* WTZ: the Cassini spec gives these as 16 bit counters but + * stored in 32-bit words. Added a mask of 0xffff to be safe, + * in case the chip somehow puts any garbage in the other bits. + * Also, counter usage didn't seem to mach what Adrian did + * in the parts of the code that set these quantities. Made + * that consistent. + */ + spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags); + stats[N_TX_RINGS].rx_crc_errors += + readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff; + stats[N_TX_RINGS].rx_frame_errors += + readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff; + stats[N_TX_RINGS].rx_length_errors += + readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff; +#if 1 + tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) + + (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff); + stats[N_TX_RINGS].tx_aborted_errors += tmp; + stats[N_TX_RINGS].collisions += + tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff); +#else + stats[N_TX_RINGS].tx_aborted_errors += + readl(cp->regs + REG_MAC_COLL_EXCESS); + stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) + + readl(cp->regs + REG_MAC_COLL_LATE); +#endif + cas_clear_mac_err(cp); + + /* saved bits that are unique to ring 0 */ + spin_lock(&cp->stat_lock[0]); + stats[N_TX_RINGS].collisions += stats[0].collisions; + stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors; + stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors; + stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors; + stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors; + stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors; + spin_unlock(&cp->stat_lock[0]); + + for (i = 0; i < N_TX_RINGS; i++) { + spin_lock(&cp->stat_lock[i]); + stats[N_TX_RINGS].rx_length_errors += + stats[i].rx_length_errors; + stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors; + stats[N_TX_RINGS].rx_packets += stats[i].rx_packets; + stats[N_TX_RINGS].tx_packets += stats[i].tx_packets; + stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes; + stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes; + stats[N_TX_RINGS].rx_errors += stats[i].rx_errors; + stats[N_TX_RINGS].tx_errors += stats[i].tx_errors; + stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped; + stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped; + memset(stats + i, 0, sizeof(struct net_device_stats)); + spin_unlock(&cp->stat_lock[i]); + } + spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags); + return stats + N_TX_RINGS; +} + + +static void cas_set_multicast(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + u32 rxcfg, rxcfg_new; + unsigned long flags; + int limit = STOP_TRIES; + + if (!cp->hw_running) + return; + + spin_lock_irqsave(&cp->lock, flags); + rxcfg = readl(cp->regs + REG_MAC_RX_CFG); + + /* disable RX MAC and wait for completion */ + writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); + while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) { + if (!limit--) + break; + udelay(10); + } + + /* disable hash filter and wait for completion */ + limit = STOP_TRIES; + rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN); + writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); + while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) { + if (!limit--) + break; + udelay(10); + } + + /* program hash filters */ + cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp); + rxcfg |= rxcfg_new; + writel(rxcfg, cp->regs + REG_MAC_RX_CFG); + spin_unlock_irqrestore(&cp->lock, flags); +} + +static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct cas *cp = netdev_priv(dev); + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); + info->regdump_len = cp->casreg_len < CAS_MAX_REGS ? + cp->casreg_len : CAS_MAX_REGS; + info->n_stats = CAS_NUM_STAT_KEYS; +} + +static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct cas *cp = netdev_priv(dev); + u16 bmcr; + int full_duplex, speed, pause; + unsigned long flags; + enum link_state linkstate = link_up; + + cmd->advertising = 0; + cmd->supported = SUPPORTED_Autoneg; + if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { + cmd->supported |= SUPPORTED_1000baseT_Full; + cmd->advertising |= ADVERTISED_1000baseT_Full; + } + + /* Record PHY settings if HW is on. */ + spin_lock_irqsave(&cp->lock, flags); + bmcr = 0; + linkstate = cp->lstate; + if (CAS_PHY_MII(cp->phy_type)) { + cmd->port = PORT_MII; + cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ? + XCVR_INTERNAL : XCVR_EXTERNAL; + cmd->phy_address = cp->phy_addr; + cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full; + + cmd->supported |= + (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_TP | SUPPORTED_MII); + + if (cp->hw_running) { + cas_mif_poll(cp, 0); + bmcr = cas_phy_read(cp, MII_BMCR); + cas_read_mii_link_mode(cp, &full_duplex, + &speed, &pause); + cas_mif_poll(cp, 1); + } + + } else { + cmd->port = PORT_FIBRE; + cmd->transceiver = XCVR_INTERNAL; + cmd->phy_address = 0; + cmd->supported |= SUPPORTED_FIBRE; + cmd->advertising |= ADVERTISED_FIBRE; + + if (cp->hw_running) { + /* pcs uses the same bits as mii */ + bmcr = readl(cp->regs + REG_PCS_MII_CTRL); + cas_read_pcs_link_mode(cp, &full_duplex, + &speed, &pause); + } + } + spin_unlock_irqrestore(&cp->lock, flags); + + if (bmcr & BMCR_ANENABLE) { + cmd->advertising |= ADVERTISED_Autoneg; + cmd->autoneg = AUTONEG_ENABLE; + ethtool_cmd_speed_set(cmd, ((speed == 10) ? + SPEED_10 : + ((speed == 1000) ? + SPEED_1000 : SPEED_100))); + cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF; + } else { + cmd->autoneg = AUTONEG_DISABLE; + ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ? + SPEED_1000 : + ((bmcr & BMCR_SPEED100) ? + SPEED_100 : SPEED_10))); + cmd->duplex = + (bmcr & BMCR_FULLDPLX) ? + DUPLEX_FULL : DUPLEX_HALF; + } + if (linkstate != link_up) { + /* Force these to "unknown" if the link is not up and + * autonogotiation in enabled. We can set the link + * speed to 0, but not cmd->duplex, + * because its legal values are 0 and 1. Ethtool will + * print the value reported in parentheses after the + * word "Unknown" for unrecognized values. + * + * If in forced mode, we report the speed and duplex + * settings that we configured. + */ + if (cp->link_cntl & BMCR_ANENABLE) { + ethtool_cmd_speed_set(cmd, 0); + cmd->duplex = 0xff; + } else { + ethtool_cmd_speed_set(cmd, SPEED_10); + if (cp->link_cntl & BMCR_SPEED100) { + ethtool_cmd_speed_set(cmd, SPEED_100); + } else if (cp->link_cntl & CAS_BMCR_SPEED1000) { + ethtool_cmd_speed_set(cmd, SPEED_1000); + } + cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)? + DUPLEX_FULL : DUPLEX_HALF; + } + } + return 0; +} + +static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct cas *cp = netdev_priv(dev); + unsigned long flags; + u32 speed = ethtool_cmd_speed(cmd); + + /* Verify the settings we care about. */ + if (cmd->autoneg != AUTONEG_ENABLE && + cmd->autoneg != AUTONEG_DISABLE) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_DISABLE && + ((speed != SPEED_1000 && + speed != SPEED_100 && + speed != SPEED_10) || + (cmd->duplex != DUPLEX_HALF && + cmd->duplex != DUPLEX_FULL))) + return -EINVAL; + + /* Apply settings and restart link process. */ + spin_lock_irqsave(&cp->lock, flags); + cas_begin_auto_negotiation(cp, cmd); + spin_unlock_irqrestore(&cp->lock, flags); + return 0; +} + +static int cas_nway_reset(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + unsigned long flags; + + if ((cp->link_cntl & BMCR_ANENABLE) == 0) + return -EINVAL; + + /* Restart link process. */ + spin_lock_irqsave(&cp->lock, flags); + cas_begin_auto_negotiation(cp, NULL); + spin_unlock_irqrestore(&cp->lock, flags); + + return 0; +} + +static u32 cas_get_link(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + return cp->lstate == link_up; +} + +static u32 cas_get_msglevel(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + return cp->msg_enable; +} + +static void cas_set_msglevel(struct net_device *dev, u32 value) +{ + struct cas *cp = netdev_priv(dev); + cp->msg_enable = value; +} + +static int cas_get_regs_len(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS; +} + +static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct cas *cp = netdev_priv(dev); + regs->version = 0; + /* cas_read_regs handles locks (cp->lock). */ + cas_read_regs(cp, p, regs->len / sizeof(u32)); +} + +static int cas_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return CAS_NUM_STAT_KEYS; + default: + return -EOPNOTSUPP; + } +} + +static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + memcpy(data, ðtool_cassini_statnames, + CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN); +} + +static void cas_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *estats, u64 *data) +{ + struct cas *cp = netdev_priv(dev); + struct net_device_stats *stats = cas_get_stats(cp->dev); + int i = 0; + data[i++] = stats->collisions; + data[i++] = stats->rx_bytes; + data[i++] = stats->rx_crc_errors; + data[i++] = stats->rx_dropped; + data[i++] = stats->rx_errors; + data[i++] = stats->rx_fifo_errors; + data[i++] = stats->rx_frame_errors; + data[i++] = stats->rx_length_errors; + data[i++] = stats->rx_over_errors; + data[i++] = stats->rx_packets; + data[i++] = stats->tx_aborted_errors; + data[i++] = stats->tx_bytes; + data[i++] = stats->tx_dropped; + data[i++] = stats->tx_errors; + data[i++] = stats->tx_fifo_errors; + data[i++] = stats->tx_packets; + BUG_ON(i != CAS_NUM_STAT_KEYS); +} + +static const struct ethtool_ops cas_ethtool_ops = { + .get_drvinfo = cas_get_drvinfo, + .get_settings = cas_get_settings, + .set_settings = cas_set_settings, + .nway_reset = cas_nway_reset, + .get_link = cas_get_link, + .get_msglevel = cas_get_msglevel, + .set_msglevel = cas_set_msglevel, + .get_regs_len = cas_get_regs_len, + .get_regs = cas_get_regs, + .get_sset_count = cas_get_sset_count, + .get_strings = cas_get_strings, + .get_ethtool_stats = cas_get_ethtool_stats, +}; + +static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct cas *cp = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(ifr); + unsigned long flags; + int rc = -EOPNOTSUPP; + + /* Hold the PM mutex while doing ioctl's or we may collide + * with open/close and power management and oops. + */ + mutex_lock(&cp->pm_mutex); + switch (cmd) { + case SIOCGMIIPHY: /* Get address of MII PHY in use. */ + data->phy_id = cp->phy_addr; + /* Fallthrough... */ + + case SIOCGMIIREG: /* Read MII PHY register. */ + spin_lock_irqsave(&cp->lock, flags); + cas_mif_poll(cp, 0); + data->val_out = cas_phy_read(cp, data->reg_num & 0x1f); + cas_mif_poll(cp, 1); + spin_unlock_irqrestore(&cp->lock, flags); + rc = 0; + break; + + case SIOCSMIIREG: /* Write MII PHY register. */ + spin_lock_irqsave(&cp->lock, flags); + cas_mif_poll(cp, 0); + rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in); + cas_mif_poll(cp, 1); + spin_unlock_irqrestore(&cp->lock, flags); + break; + default: + break; + } + + mutex_unlock(&cp->pm_mutex); + return rc; +} + +/* When this chip sits underneath an Intel 31154 bridge, it is the + * only subordinate device and we can tweak the bridge settings to + * reflect that fact. + */ +static void cas_program_bridge(struct pci_dev *cas_pdev) +{ + struct pci_dev *pdev = cas_pdev->bus->self; + u32 val; + + if (!pdev) + return; + + if (pdev->vendor != 0x8086 || pdev->device != 0x537c) + return; + + /* Clear bit 10 (Bus Parking Control) in the Secondary + * Arbiter Control/Status Register which lives at offset + * 0x41. Using a 32-bit word read/modify/write at 0x40 + * is much simpler so that's how we do this. + */ + pci_read_config_dword(pdev, 0x40, &val); + val &= ~0x00040000; + pci_write_config_dword(pdev, 0x40, val); + + /* Max out the Multi-Transaction Timer settings since + * Cassini is the only device present. + * + * The register is 16-bit and lives at 0x50. When the + * settings are enabled, it extends the GRANT# signal + * for a requestor after a transaction is complete. This + * allows the next request to run without first needing + * to negotiate the GRANT# signal back. + * + * Bits 12:10 define the grant duration: + * + * 1 -- 16 clocks + * 2 -- 32 clocks + * 3 -- 64 clocks + * 4 -- 128 clocks + * 5 -- 256 clocks + * + * All other values are illegal. + * + * Bits 09:00 define which REQ/GNT signal pairs get the + * GRANT# signal treatment. We set them all. + */ + pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff); + + /* The Read Prefecth Policy register is 16-bit and sits at + * offset 0x52. It enables a "smart" pre-fetch policy. We + * enable it and max out all of the settings since only one + * device is sitting underneath and thus bandwidth sharing is + * not an issue. + * + * The register has several 3 bit fields, which indicates a + * multiplier applied to the base amount of prefetching the + * chip would do. These fields are at: + * + * 15:13 --- ReRead Primary Bus + * 12:10 --- FirstRead Primary Bus + * 09:07 --- ReRead Secondary Bus + * 06:04 --- FirstRead Secondary Bus + * + * Bits 03:00 control which REQ/GNT pairs the prefetch settings + * get enabled on. Bit 3 is a grouped enabler which controls + * all of the REQ/GNT pairs from [8:3]. Bits 2 to 0 control + * the individual REQ/GNT pairs [2:0]. + */ + pci_write_config_word(pdev, 0x52, + (0x7 << 13) | + (0x7 << 10) | + (0x7 << 7) | + (0x7 << 4) | + (0xf << 0)); + + /* Force cacheline size to 0x8 */ + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08); + + /* Force latency timer to maximum setting so Cassini can + * sit on the bus as long as it likes. + */ + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff); +} + +static const struct net_device_ops cas_netdev_ops = { + .ndo_open = cas_open, + .ndo_stop = cas_close, + .ndo_start_xmit = cas_start_xmit, + .ndo_get_stats = cas_get_stats, + .ndo_set_rx_mode = cas_set_multicast, + .ndo_do_ioctl = cas_ioctl, + .ndo_tx_timeout = cas_tx_timeout, + .ndo_change_mtu = cas_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cas_netpoll, +#endif +}; + +static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int cas_version_printed = 0; + unsigned long casreg_len; + struct net_device *dev; + struct cas *cp; + int i, err, pci_using_dac; + u16 pci_cmd; + u8 orig_cacheline_size = 0, cas_cacheline_size = 0; + + if (cas_version_printed++ == 0) + pr_info("%s", version); + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); + return err; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "Cannot find proper PCI device " + "base address, aborting\n"); + err = -ENODEV; + goto err_out_disable_pdev; + } + + dev = alloc_etherdev(sizeof(*cp)); + if (!dev) { + err = -ENOMEM; + goto err_out_disable_pdev; + } + SET_NETDEV_DEV(dev, &pdev->dev); + + err = pci_request_regions(pdev, dev->name); + if (err) { + dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_free_netdev; + } + pci_set_master(pdev); + + /* we must always turn on parity response or else parity + * doesn't get generated properly. disable SERR/PERR as well. + * in addition, we want to turn MWI on. + */ + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + pci_cmd &= ~PCI_COMMAND_SERR; + pci_cmd |= PCI_COMMAND_PARITY; + pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); + if (pci_try_set_mwi(pdev)) + pr_warn("Could not enable MWI for %s\n", pci_name(pdev)); + + cas_program_bridge(pdev); + + /* + * On some architectures, the default cache line size set + * by pci_try_set_mwi reduces perforamnce. We have to increase + * it for this case. To start, we'll print some configuration + * data. + */ +#if 1 + pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, + &orig_cacheline_size); + if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) { + cas_cacheline_size = + (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ? + CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES; + if (pci_write_config_byte(pdev, + PCI_CACHE_LINE_SIZE, + cas_cacheline_size)) { + dev_err(&pdev->dev, "Could not set PCI cache " + "line size\n"); + goto err_write_cacheline; + } + } +#endif + + + /* Configure DMA attributes. */ + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + err = pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(64)); + if (err < 0) { + dev_err(&pdev->dev, "Unable to obtain 64-bit DMA " + "for consistent allocations\n"); + goto err_out_free_res; + } + + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "No usable DMA configuration, " + "aborting\n"); + goto err_out_free_res; + } + pci_using_dac = 0; + } + + casreg_len = pci_resource_len(pdev, 0); + + cp = netdev_priv(dev); + cp->pdev = pdev; +#if 1 + /* A value of 0 indicates we never explicitly set it */ + cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0; +#endif + cp->dev = dev; + cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : + cassini_debug; + +#if defined(CONFIG_SPARC) + cp->of_node = pci_device_to_OF_node(pdev); +#endif + + cp->link_transition = LINK_TRANSITION_UNKNOWN; + cp->link_transition_jiffies_valid = 0; + + spin_lock_init(&cp->lock); + spin_lock_init(&cp->rx_inuse_lock); + spin_lock_init(&cp->rx_spare_lock); + for (i = 0; i < N_TX_RINGS; i++) { + spin_lock_init(&cp->stat_lock[i]); + spin_lock_init(&cp->tx_lock[i]); + } + spin_lock_init(&cp->stat_lock[N_TX_RINGS]); + mutex_init(&cp->pm_mutex); + + init_timer(&cp->link_timer); + cp->link_timer.function = cas_link_timer; + cp->link_timer.data = (unsigned long) cp; + +#if 1 + /* Just in case the implementation of atomic operations + * change so that an explicit initialization is necessary. + */ + atomic_set(&cp->reset_task_pending, 0); + atomic_set(&cp->reset_task_pending_all, 0); + atomic_set(&cp->reset_task_pending_spare, 0); + atomic_set(&cp->reset_task_pending_mtu, 0); +#endif + INIT_WORK(&cp->reset_task, cas_reset_task); + + /* Default link parameters */ + if (link_mode >= 0 && link_mode < 6) + cp->link_cntl = link_modes[link_mode]; + else + cp->link_cntl = BMCR_ANENABLE; + cp->lstate = link_down; + cp->link_transition = LINK_TRANSITION_LINK_DOWN; + netif_carrier_off(cp->dev); + cp->timer_ticks = 0; + + /* give us access to cassini registers */ + cp->regs = pci_iomap(pdev, 0, casreg_len); + if (!cp->regs) { + dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); + goto err_out_free_res; + } + cp->casreg_len = casreg_len; + + pci_save_state(pdev); + cas_check_pci_invariants(cp); + cas_hard_reset(cp); + cas_reset(cp, 0); + if (cas_check_invariants(cp)) + goto err_out_iounmap; + if (cp->cas_flags & CAS_FLAG_SATURN) + cas_saturn_firmware_init(cp); + + cp->init_block = (struct cas_init_block *) + pci_alloc_consistent(pdev, sizeof(struct cas_init_block), + &cp->block_dvma); + if (!cp->init_block) { + dev_err(&pdev->dev, "Cannot allocate init block, aborting\n"); + goto err_out_iounmap; + } + + for (i = 0; i < N_TX_RINGS; i++) + cp->init_txds[i] = cp->init_block->txds[i]; + + for (i = 0; i < N_RX_DESC_RINGS; i++) + cp->init_rxds[i] = cp->init_block->rxds[i]; + + for (i = 0; i < N_RX_COMP_RINGS; i++) + cp->init_rxcs[i] = cp->init_block->rxcs[i]; + + for (i = 0; i < N_RX_FLOWS; i++) + skb_queue_head_init(&cp->rx_flows[i]); + + dev->netdev_ops = &cas_netdev_ops; + dev->ethtool_ops = &cas_ethtool_ops; + dev->watchdog_timeo = CAS_TX_TIMEOUT; + +#ifdef USE_NAPI + netif_napi_add(dev, &cp->napi, cas_poll, 64); +#endif + dev->irq = pdev->irq; + dev->dma = 0; + + /* Cassini features. */ + if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0) + dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; + + if (pci_using_dac) + dev->features |= NETIF_F_HIGHDMA; + + if (register_netdev(dev)) { + dev_err(&pdev->dev, "Cannot register net device, aborting\n"); + goto err_out_free_consistent; + } + + i = readl(cp->regs + REG_BIM_CFG); + netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n", + (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", + (i & BIM_CFG_32BIT) ? "32" : "64", + (i & BIM_CFG_66MHZ) ? "66" : "33", + (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq, + dev->dev_addr); + + pci_set_drvdata(pdev, dev); + cp->hw_running = 1; + cas_entropy_reset(cp); + cas_phy_init(cp); + cas_begin_auto_negotiation(cp, NULL); + return 0; + +err_out_free_consistent: + pci_free_consistent(pdev, sizeof(struct cas_init_block), + cp->init_block, cp->block_dvma); + +err_out_iounmap: + mutex_lock(&cp->pm_mutex); + if (cp->hw_running) + cas_shutdown(cp); + mutex_unlock(&cp->pm_mutex); + + pci_iounmap(pdev, cp->regs); + + +err_out_free_res: + pci_release_regions(pdev); + +err_write_cacheline: + /* Try to restore it in case the error occurred after we + * set it. + */ + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size); + +err_out_free_netdev: + free_netdev(dev); + +err_out_disable_pdev: + pci_disable_device(pdev); + return -ENODEV; +} + +static void cas_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct cas *cp; + if (!dev) + return; + + cp = netdev_priv(dev); + unregister_netdev(dev); + + vfree(cp->fw_data); + + mutex_lock(&cp->pm_mutex); + cancel_work_sync(&cp->reset_task); + if (cp->hw_running) + cas_shutdown(cp); + mutex_unlock(&cp->pm_mutex); + +#if 1 + if (cp->orig_cacheline_size) { + /* Restore the cache line size if we had modified + * it. + */ + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, + cp->orig_cacheline_size); + } +#endif + pci_free_consistent(pdev, sizeof(struct cas_init_block), + cp->init_block, cp->block_dvma); + pci_iounmap(pdev, cp->regs); + free_netdev(dev); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +#ifdef CONFIG_PM +static int cas_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct cas *cp = netdev_priv(dev); + unsigned long flags; + + mutex_lock(&cp->pm_mutex); + + /* If the driver is opened, we stop the DMA */ + if (cp->opened) { + netif_device_detach(dev); + + cas_lock_all_save(cp, flags); + + /* We can set the second arg of cas_reset to 0 + * because on resume, we'll call cas_init_hw with + * its second arg set so that autonegotiation is + * restarted. + */ + cas_reset(cp, 0); + cas_clean_rings(cp); + cas_unlock_all_restore(cp, flags); + } + + if (cp->hw_running) + cas_shutdown(cp); + mutex_unlock(&cp->pm_mutex); + + return 0; +} + +static int cas_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct cas *cp = netdev_priv(dev); + + netdev_info(dev, "resuming\n"); + + mutex_lock(&cp->pm_mutex); + cas_hard_reset(cp); + if (cp->opened) { + unsigned long flags; + cas_lock_all_save(cp, flags); + cas_reset(cp, 0); + cp->hw_running = 1; + cas_clean_rings(cp); + cas_init_hw(cp, 1); + cas_unlock_all_restore(cp, flags); + + netif_device_attach(dev); + } + mutex_unlock(&cp->pm_mutex); + return 0; +} +#endif /* CONFIG_PM */ + +static struct pci_driver cas_driver = { + .name = DRV_MODULE_NAME, + .id_table = cas_pci_tbl, + .probe = cas_init_one, + .remove = cas_remove_one, +#ifdef CONFIG_PM + .suspend = cas_suspend, + .resume = cas_resume +#endif +}; + +static int __init cas_init(void) +{ + if (linkdown_timeout > 0) + link_transition_timeout = linkdown_timeout * HZ; + else + link_transition_timeout = 0; + + return pci_register_driver(&cas_driver); +} + +static void __exit cas_cleanup(void) +{ + pci_unregister_driver(&cas_driver); +} + +module_init(cas_init); +module_exit(cas_cleanup); diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h new file mode 100644 index 000000000..882ce168a --- /dev/null +++ b/drivers/net/ethernet/sun/cassini.h @@ -0,0 +1,2912 @@ +/* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $ + * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver. + * + * Copyright (C) 2004 Sun Microsystems Inc. + * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * vendor id: 0x108E (Sun Microsystems, Inc.) + * device id: 0xabba (Cassini) + * revision ids: 0x01 = Cassini + * 0x02 = Cassini rev 2 + * 0x10 = Cassini+ + * 0x11 = Cassini+ 0.2u + * + * vendor id: 0x100b (National Semiconductor) + * device id: 0x0035 (DP83065/Saturn) + * revision ids: 0x30 = Saturn B2 + * + * rings are all offset from 0. + * + * there are two clock domains: + * PCI: 33/66MHz clock + * chip: 125MHz clock + */ + +#ifndef _CASSINI_H +#define _CASSINI_H + +/* cassini register map: 2M memory mapped in 32-bit memory space accessible as + * 32-bit words. there is no i/o port access. REG_ addresses are + * shared between cassini and cassini+. REG_PLUS_ addresses only + * appear in cassini+. REG_MINUS_ addresses only appear in cassini. + */ +#define CAS_ID_REV2 0x02 +#define CAS_ID_REVPLUS 0x10 +#define CAS_ID_REVPLUS02u 0x11 +#define CAS_ID_REVSATURNB2 0x30 + +/** global resources **/ + +/* this register sets the weights for the weighted round robin arbiter. e.g., + * if rx weight == 1 and tx weight == 0, rx == 2x tx transfer credit + * for its next turn to access the pci bus. + * map: 0x0 = x1, 0x1 = x2, 0x2 = x4, 0x3 = x8 + * DEFAULT: 0x0, SIZE: 5 bits + */ +#define REG_CAWR 0x0004 /* core arbitration weight */ +#define CAWR_RX_DMA_WEIGHT_SHIFT 0 +#define CAWR_RX_DMA_WEIGHT_MASK 0x03 /* [0:1] */ +#define CAWR_TX_DMA_WEIGHT_SHIFT 2 +#define CAWR_TX_DMA_WEIGHT_MASK 0x0C /* [3:2] */ +#define CAWR_RR_DIS 0x10 /* [4] */ + +/* if enabled, BIM can send bursts across PCI bus > cacheline size. burst + * sizes determined by length of packet or descriptor transfer and the + * max length allowed by the target. + * DEFAULT: 0x0, SIZE: 1 bit + */ +#define REG_INF_BURST 0x0008 /* infinite burst enable reg */ +#define INF_BURST_EN 0x1 /* enable */ + +/* top level interrupts [0-9] are auto-cleared to 0 when the status + * register is read. second level interrupts [13 - 18] are cleared at + * the source. tx completion register 3 is replicated in [19 - 31] + * DEFAULT: 0x00000000, SIZE: 29 bits + */ +#define REG_INTR_STATUS 0x000C /* interrupt status register */ +#define INTR_TX_INTME 0x00000001 /* frame w/ INT ME desc bit set + xferred from host queue to + TX FIFO */ +#define INTR_TX_ALL 0x00000002 /* all xmit frames xferred into + TX FIFO. i.e., + TX Kick == TX complete. if + PACED_MODE set, then TX FIFO + also empty */ +#define INTR_TX_DONE 0x00000004 /* any frame xferred into tx + FIFO */ +#define INTR_TX_TAG_ERROR 0x00000008 /* TX FIFO tag framing + corrupted. FATAL ERROR */ +#define INTR_RX_DONE 0x00000010 /* at least 1 frame xferred + from RX FIFO to host mem. + RX completion reg updated. + may be delayed by recv + intr blanking. */ +#define INTR_RX_BUF_UNAVAIL 0x00000020 /* no more receive buffers. + RX Kick == RX complete */ +#define INTR_RX_TAG_ERROR 0x00000040 /* RX FIFO tag framing + corrupted. FATAL ERROR */ +#define INTR_RX_COMP_FULL 0x00000080 /* no more room in completion + ring to post descriptors. + RX complete head incr to + almost reach RX complete + tail */ +#define INTR_RX_BUF_AE 0x00000100 /* less than the + programmable threshold # + of free descr avail for + hw use */ +#define INTR_RX_COMP_AF 0x00000200 /* less than the + programmable threshold # + of descr spaces for hw + use in completion descr + ring */ +#define INTR_RX_LEN_MISMATCH 0x00000400 /* len field from MAC != + len of non-reassembly pkt + from fifo during DMA or + header parser provides TCP + header and payload size > + MAC packet size. + FATAL ERROR */ +#define INTR_SUMMARY 0x00001000 /* summary interrupt bit. this + bit will be set if an interrupt + generated on the pci bus. useful + when driver is polling for + interrupts */ +#define INTR_PCS_STATUS 0x00002000 /* PCS interrupt status register */ +#define INTR_TX_MAC_STATUS 0x00004000 /* TX MAC status register has at + least 1 unmasked interrupt set */ +#define INTR_RX_MAC_STATUS 0x00008000 /* RX MAC status register has at + least 1 unmasked interrupt set */ +#define INTR_MAC_CTRL_STATUS 0x00010000 /* MAC control status register has + at least 1 unmasked interrupt + set */ +#define INTR_MIF_STATUS 0x00020000 /* MIF status register has at least + 1 unmasked interrupt set */ +#define INTR_PCI_ERROR_STATUS 0x00040000 /* PCI error status register in the + BIF has at least 1 unmasked + interrupt set */ +#define INTR_TX_COMP_3_MASK 0xFFF80000 /* mask for TX completion + 3 reg data */ +#define INTR_TX_COMP_3_SHIFT 19 +#define INTR_ERROR_MASK (INTR_MIF_STATUS | INTR_PCI_ERROR_STATUS | \ + INTR_PCS_STATUS | INTR_RX_LEN_MISMATCH | \ + INTR_TX_MAC_STATUS | INTR_RX_MAC_STATUS | \ + INTR_TX_TAG_ERROR | INTR_RX_TAG_ERROR | \ + INTR_MAC_CTRL_STATUS) + +/* determines which status events will cause an interrupt. layout same + * as REG_INTR_STATUS. + * DEFAULT: 0xFFFFFFFF, SIZE: 16 bits + */ +#define REG_INTR_MASK 0x0010 /* Interrupt mask */ + +/* top level interrupt bits that are cleared during read of REG_INTR_STATUS_ALIAS. + * useful when driver is polling for interrupts. layout same as REG_INTR_MASK. + * DEFAULT: 0x00000000, SIZE: 12 bits + */ +#define REG_ALIAS_CLEAR 0x0014 /* alias clear mask + (used w/ status alias) */ +/* same as REG_INTR_STATUS except that only bits cleared are those selected by + * REG_ALIAS_CLEAR + * DEFAULT: 0x00000000, SIZE: 29 bits + */ +#define REG_INTR_STATUS_ALIAS 0x001C /* interrupt status alias + (selective clear) */ + +/* DEFAULT: 0x0, SIZE: 3 bits */ +#define REG_PCI_ERR_STATUS 0x1000 /* PCI error status */ +#define PCI_ERR_BADACK 0x01 /* reserved in Cassini+. + set if no ACK64# during ABS64 cycle + in Cassini. */ +#define PCI_ERR_DTRTO 0x02 /* delayed xaction timeout. set if + no read retry after 2^15 clocks */ +#define PCI_ERR_OTHER 0x04 /* other PCI errors */ +#define PCI_ERR_BIM_DMA_WRITE 0x08 /* BIM received 0 count DMA write req. + unused in Cassini. */ +#define PCI_ERR_BIM_DMA_READ 0x10 /* BIM received 0 count DMA read req. + unused in Cassini. */ +#define PCI_ERR_BIM_DMA_TIMEOUT 0x20 /* BIM received 255 retries during + DMA. unused in cassini. */ + +/* mask for PCI status events that will set PCI_ERR_STATUS. if cleared, event + * causes an interrupt to be generated. + * DEFAULT: 0x7, SIZE: 3 bits + */ +#define REG_PCI_ERR_STATUS_MASK 0x1004 /* PCI Error status mask */ + +/* used to configure PCI related parameters that are not in PCI config space. + * DEFAULT: 0bxx000, SIZE: 5 bits + */ +#define REG_BIM_CFG 0x1008 /* BIM Configuration */ +#define BIM_CFG_RESERVED0 0x001 /* reserved */ +#define BIM_CFG_RESERVED1 0x002 /* reserved */ +#define BIM_CFG_64BIT_DISABLE 0x004 /* disable 64-bit mode */ +#define BIM_CFG_66MHZ 0x008 /* (ro) 1 = 66MHz, 0 = < 66MHz */ +#define BIM_CFG_32BIT 0x010 /* (ro) 1 = 32-bit slot, 0 = 64-bit */ +#define BIM_CFG_DPAR_INTR_ENABLE 0x020 /* detected parity err enable */ +#define BIM_CFG_RMA_INTR_ENABLE 0x040 /* master abort intr enable */ +#define BIM_CFG_RTA_INTR_ENABLE 0x080 /* target abort intr enable */ +#define BIM_CFG_RESERVED2 0x100 /* reserved */ +#define BIM_CFG_BIM_DISABLE 0x200 /* stop BIM DMA. use before global + reset. reserved in Cassini. */ +#define BIM_CFG_BIM_STATUS 0x400 /* (ro) 1 = BIM DMA suspended. + reserved in Cassini. */ +#define BIM_CFG_PERROR_BLOCK 0x800 /* block PERR# to pci bus. def: 0. + reserved in Cassini. */ + +/* DEFAULT: 0x00000000, SIZE: 32 bits */ +#define REG_BIM_DIAG 0x100C /* BIM Diagnostic */ +#define BIM_DIAG_MSTR_SM_MASK 0x3FFFFF00 /* PCI master controller state + machine bits [21:0] */ +#define BIM_DIAG_BRST_SM_MASK 0x7F /* PCI burst controller state + machine bits [6:0] */ + +/* writing to SW_RESET_TX and SW_RESET_RX will issue a global + * reset. poll until TX and RX read back as 0's for completion. + */ +#define REG_SW_RESET 0x1010 /* Software reset */ +#define SW_RESET_TX 0x00000001 /* reset TX DMA engine. poll until + cleared to 0. */ +#define SW_RESET_RX 0x00000002 /* reset RX DMA engine. poll until + cleared to 0. */ +#define SW_RESET_RSTOUT 0x00000004 /* force RSTOUT# pin active (low). + resets PHY and anything else + connected to RSTOUT#. RSTOUT# + is also activated by local PCI + reset when hot-swap is being + done. */ +#define SW_RESET_BLOCK_PCS_SLINK 0x00000008 /* if a global reset is done with + this bit set, PCS and SLINK + modules won't be reset. + i.e., link won't drop. */ +#define SW_RESET_BREQ_SM_MASK 0x00007F00 /* breq state machine [6:0] */ +#define SW_RESET_PCIARB_SM_MASK 0x00070000 /* pci arbitration state bits: + 0b000: ARB_IDLE1 + 0b001: ARB_IDLE2 + 0b010: ARB_WB_ACK + 0b011: ARB_WB_WAT + 0b100: ARB_RB_ACK + 0b101: ARB_RB_WAT + 0b110: ARB_RB_END + 0b111: ARB_WB_END */ +#define SW_RESET_RDPCI_SM_MASK 0x00300000 /* read pci state bits: + 0b00: RD_PCI_WAT + 0b01: RD_PCI_RDY + 0b11: RD_PCI_ACK */ +#define SW_RESET_RDARB_SM_MASK 0x00C00000 /* read arbitration state bits: + 0b00: AD_IDL_RX + 0b01: AD_ACK_RX + 0b10: AD_ACK_TX + 0b11: AD_IDL_TX */ +#define SW_RESET_WRPCI_SM_MASK 0x06000000 /* write pci state bits + 0b00: WR_PCI_WAT + 0b01: WR_PCI_RDY + 0b11: WR_PCI_ACK */ +#define SW_RESET_WRARB_SM_MASK 0x38000000 /* write arbitration state bits: + 0b000: ARB_IDLE1 + 0b001: ARB_IDLE2 + 0b010: ARB_TX_ACK + 0b011: ARB_TX_WAT + 0b100: ARB_RX_ACK + 0b110: ARB_RX_WAT */ + +/* Cassini only. 64-bit register used to check PCI datapath. when read, + * value written has both lower and upper 32-bit halves rotated to the right + * one bit position. e.g., FFFFFFFF FFFFFFFF -> 7FFFFFFF 7FFFFFFF + */ +#define REG_MINUS_BIM_DATAPATH_TEST 0x1018 /* Cassini: BIM datapath test + Cassini+: reserved */ + +/* output enables are provided for each device's chip select and for the rest + * of the outputs from cassini to its local bus devices. two sw programmable + * bits are connected to general purpus control/status bits. + * DEFAULT: 0x7 + */ +#define REG_BIM_LOCAL_DEV_EN 0x1020 /* BIM local device + output EN. default: 0x7 */ +#define BIM_LOCAL_DEV_PAD 0x01 /* address bus, RW signal, and + OE signal output enable on the + local bus interface. these + are shared between both local + bus devices. tristate when 0. */ +#define BIM_LOCAL_DEV_PROM 0x02 /* PROM chip select */ +#define BIM_LOCAL_DEV_EXT 0x04 /* secondary local bus device chip + select output enable */ +#define BIM_LOCAL_DEV_SOFT_0 0x08 /* sw programmable ctrl bit 0 */ +#define BIM_LOCAL_DEV_SOFT_1 0x10 /* sw programmable ctrl bit 1 */ +#define BIM_LOCAL_DEV_HW_RESET 0x20 /* internal hw reset. Cassini+ only. */ + +/* access 24 entry BIM read and write buffers. put address in REG_BIM_BUFFER_ADDR + * and read/write from/to it REG_BIM_BUFFER_DATA_LOW and _DATA_HI. + * _DATA_HI should be the last access of the sequence. + * DEFAULT: undefined + */ +#define REG_BIM_BUFFER_ADDR 0x1024 /* BIM buffer address. for + purposes. */ +#define BIM_BUFFER_ADDR_MASK 0x3F /* index (0 - 23) of buffer */ +#define BIM_BUFFER_WR_SELECT 0x40 /* write buffer access = 1 + read buffer access = 0 */ +/* DEFAULT: undefined */ +#define REG_BIM_BUFFER_DATA_LOW 0x1028 /* BIM buffer data low */ +#define REG_BIM_BUFFER_DATA_HI 0x102C /* BIM buffer data high */ + +/* set BIM_RAM_BIST_START to start built-in self test for BIM read buffer. + * bit auto-clears when done with status read from _SUMMARY and _PASS bits. + */ +#define REG_BIM_RAM_BIST 0x102C /* BIM RAM (read buffer) BIST + control/status */ +#define BIM_RAM_BIST_RD_START 0x01 /* start BIST for BIM read buffer */ +#define BIM_RAM_BIST_WR_START 0x02 /* start BIST for BIM write buffer. + Cassini only. reserved in + Cassini+. */ +#define BIM_RAM_BIST_RD_PASS 0x04 /* summary BIST pass status for read + buffer. */ +#define BIM_RAM_BIST_WR_PASS 0x08 /* summary BIST pass status for write + buffer. Cassini only. reserved + in Cassini+. */ +#define BIM_RAM_BIST_RD_LOW_PASS 0x10 /* read low bank passes BIST */ +#define BIM_RAM_BIST_RD_HI_PASS 0x20 /* read high bank passes BIST */ +#define BIM_RAM_BIST_WR_LOW_PASS 0x40 /* write low bank passes BIST. + Cassini only. reserved in + Cassini+. */ +#define BIM_RAM_BIST_WR_HI_PASS 0x80 /* write high bank passes BIST. + Cassini only. reserved in + Cassini+. */ + +/* ASUN: i'm not sure what this does as it's not in the spec. + * DEFAULT: 0xFC + */ +#define REG_BIM_DIAG_MUX 0x1030 /* BIM diagnostic probe mux + select register */ + +/* enable probe monitoring mode and select data appearing on the P_A* bus. bit + * values for _SEL_HI_MASK and _SEL_LOW_MASK: + * 0x0: internal probe[7:0] (pci arb state, wtc empty w, wtc full w, wtc empty w, + * wtc empty r, post pci) + * 0x1: internal probe[15:8] (pci wbuf comp, pci wpkt comp, pci rbuf comp, + * pci rpkt comp, txdma wr req, txdma wr ack, + * txdma wr rdy, txdma wr xfr done) + * 0x2: internal probe[23:16] (txdma rd req, txdma rd ack, txdma rd rdy, rxdma rd, + * rd arb state, rd pci state) + * 0x3: internal probe[31:24] (rxdma req, rxdma ack, rxdma rdy, wrarb state, + * wrpci state) + * 0x4: pci io probe[7:0] 0x5: pci io probe[15:8] + * 0x6: pci io probe[23:16] 0x7: pci io probe[31:24] + * 0x8: pci io probe[39:32] 0x9: pci io probe[47:40] + * 0xa: pci io probe[55:48] 0xb: pci io probe[63:56] + * the following are not available in Cassini: + * 0xc: rx probe[7:0] 0xd: tx probe[7:0] + * 0xe: hp probe[7:0] 0xf: mac probe[7:0] + */ +#define REG_PLUS_PROBE_MUX_SELECT 0x1034 /* Cassini+: PROBE MUX SELECT */ +#define PROBE_MUX_EN 0x80000000 /* allow probe signals to be + driven on local bus P_A[15:0] + for debugging */ +#define PROBE_MUX_SUB_MUX_MASK 0x0000FF00 /* select sub module probe signals: + 0x03 = mac[1:0] + 0x0C = rx[1:0] + 0x30 = tx[1:0] + 0xC0 = hp[1:0] */ +#define PROBE_MUX_SEL_HI_MASK 0x000000F0 /* select which module to appear + on P_A[15:8]. see above for + values. */ +#define PROBE_MUX_SEL_LOW_MASK 0x0000000F /* select which module to appear + on P_A[7:0]. see above for + values. */ + +/* values mean the same thing as REG_INTR_MASK excep that it's for INTB. + DEFAULT: 0x1F */ +#define REG_PLUS_INTR_MASK_1 0x1038 /* Cassini+: interrupt mask + register 2 for INTB */ +#define REG_PLUS_INTRN_MASK(x) (REG_PLUS_INTR_MASK_1 + ((x) - 1)*16) +/* bits correspond to both _MASK and _STATUS registers. _ALT corresponds to + * all of the alternate (2-4) INTR registers while _1 corresponds to only + * _MASK_1 and _STATUS_1 registers. + * DEFAULT: 0x7 for MASK registers, 0x0 for ALIAS_CLEAR registers + */ +#define INTR_RX_DONE_ALT 0x01 +#define INTR_RX_COMP_FULL_ALT 0x02 +#define INTR_RX_COMP_AF_ALT 0x04 +#define INTR_RX_BUF_UNAVAIL_1 0x08 +#define INTR_RX_BUF_AE_1 0x10 /* almost empty */ +#define INTRN_MASK_RX_EN 0x80 +#define INTRN_MASK_CLEAR_ALL (INTR_RX_DONE_ALT | \ + INTR_RX_COMP_FULL_ALT | \ + INTR_RX_COMP_AF_ALT | \ + INTR_RX_BUF_UNAVAIL_1 | \ + INTR_RX_BUF_AE_1) +#define REG_PLUS_INTR_STATUS_1 0x103C /* Cassini+: interrupt status + register 2 for INTB. default: 0x1F */ +#define REG_PLUS_INTRN_STATUS(x) (REG_PLUS_INTR_STATUS_1 + ((x) - 1)*16) +#define INTR_STATUS_ALT_INTX_EN 0x80 /* generate INTX when one of the + flags are set. enables desc ring. */ + +#define REG_PLUS_ALIAS_CLEAR_1 0x1040 /* Cassini+: alias clear mask + register 2 for INTB */ +#define REG_PLUS_ALIASN_CLEAR(x) (REG_PLUS_ALIAS_CLEAR_1 + ((x) - 1)*16) + +#define REG_PLUS_INTR_STATUS_ALIAS_1 0x1044 /* Cassini+: interrupt status + register alias 2 for INTB */ +#define REG_PLUS_INTRN_STATUS_ALIAS(x) (REG_PLUS_INTR_STATUS_ALIAS_1 + ((x) - 1)*16) + +#define REG_SATURN_PCFG 0x106c /* pin configuration register for + integrated macphy */ + +#define SATURN_PCFG_TLA 0x00000001 /* 1 = phy actled */ +#define SATURN_PCFG_FLA 0x00000002 /* 1 = phy link10led */ +#define SATURN_PCFG_CLA 0x00000004 /* 1 = phy link100led */ +#define SATURN_PCFG_LLA 0x00000008 /* 1 = phy link1000led */ +#define SATURN_PCFG_RLA 0x00000010 /* 1 = phy duplexled */ +#define SATURN_PCFG_PDS 0x00000020 /* phy debug mode. + 0 = normal */ +#define SATURN_PCFG_MTP 0x00000080 /* test point select */ +#define SATURN_PCFG_GMO 0x00000100 /* GMII observe. 1 = + GMII on SERDES pins for + monitoring. */ +#define SATURN_PCFG_FSI 0x00000200 /* 1 = freeze serdes/gmii. all + pins configed as outputs. + for power saving when using + internal phy. */ +#define SATURN_PCFG_LAD 0x00000800 /* 0 = mac core led ctrl + polarity from strapping + value. + 1 = mac core led ctrl + polarity active low. */ + + +/** transmit dma registers **/ +#define MAX_TX_RINGS_SHIFT 2 +#define MAX_TX_RINGS (1 << MAX_TX_RINGS_SHIFT) +#define MAX_TX_RINGS_MASK (MAX_TX_RINGS - 1) + +/* TX configuration. + * descr ring sizes size = 32 * (1 << n), n < 9. e.g., 0x8 = 8k. default: 0x8 + * DEFAULT: 0x3F000001 + */ +#define REG_TX_CFG 0x2004 /* TX config */ +#define TX_CFG_DMA_EN 0x00000001 /* enable TX DMA. if cleared, DMA + will stop after xfer of current + buffer has been completed. */ +#define TX_CFG_FIFO_PIO_SEL 0x00000002 /* TX DMA FIFO can be + accessed w/ FIFO addr + and data registers. + TX DMA should be + disabled. */ +#define TX_CFG_DESC_RING0_MASK 0x0000003C /* # desc entries in + ring 1. */ +#define TX_CFG_DESC_RING0_SHIFT 2 +#define TX_CFG_DESC_RINGN_MASK(a) (TX_CFG_DESC_RING0_MASK << (a)*4) +#define TX_CFG_DESC_RINGN_SHIFT(a) (TX_CFG_DESC_RING0_SHIFT + (a)*4) +#define TX_CFG_PACED_MODE 0x00100000 /* TX_ALL only set after + TX FIFO becomes empty. + if 0, TX_ALL set + if descr queue empty. */ +#define TX_CFG_DMA_RDPIPE_DIS 0x01000000 /* always set to 1 */ +#define TX_CFG_COMPWB_Q1 0x02000000 /* completion writeback happens at + the end of every packet kicked + through Q1. */ +#define TX_CFG_COMPWB_Q2 0x04000000 /* completion writeback happens at + the end of every packet kicked + through Q2. */ +#define TX_CFG_COMPWB_Q3 0x08000000 /* completion writeback happens at + the end of every packet kicked + through Q3 */ +#define TX_CFG_COMPWB_Q4 0x10000000 /* completion writeback happens at + the end of every packet kicked + through Q4 */ +#define TX_CFG_INTR_COMPWB_DIS 0x20000000 /* disable pre-interrupt completion + writeback */ +#define TX_CFG_CTX_SEL_MASK 0xC0000000 /* selects tx test port + connection + 0b00: tx mac req, + tx mac retry req, + tx ack and tx tag. + 0b01: txdma rd req, + txdma rd ack, + txdma rd rdy, + txdma rd type0 + 0b11: txdma wr req, + txdma wr ack, + txdma wr rdy, + txdma wr xfr done. */ +#define TX_CFG_CTX_SEL_SHIFT 30 + +/* 11-bit counters that point to next location in FIFO to be loaded/retrieved. + * used for diagnostics only. + */ +#define REG_TX_FIFO_WRITE_PTR 0x2014 /* TX FIFO write pointer */ +#define REG_TX_FIFO_SHADOW_WRITE_PTR 0x2018 /* TX FIFO shadow write + pointer. temp hold reg. + diagnostics only. */ +#define REG_TX_FIFO_READ_PTR 0x201C /* TX FIFO read pointer */ +#define REG_TX_FIFO_SHADOW_READ_PTR 0x2020 /* TX FIFO shadow read + pointer */ + +/* (ro) 11-bit up/down counter w/ # of frames currently in TX FIFO */ +#define REG_TX_FIFO_PKT_CNT 0x2024 /* TX FIFO packet counter */ + +/* current state of all state machines in TX */ +#define REG_TX_SM_1 0x2028 /* TX state machine reg #1 */ +#define TX_SM_1_CHAIN_MASK 0x000003FF /* chaining state machine */ +#define TX_SM_1_CSUM_MASK 0x00000C00 /* checksum state machine */ +#define TX_SM_1_FIFO_LOAD_MASK 0x0003F000 /* FIFO load state machine. + = 0x01 when TX disabled. */ +#define TX_SM_1_FIFO_UNLOAD_MASK 0x003C0000 /* FIFO unload state machine */ +#define TX_SM_1_CACHE_MASK 0x03C00000 /* desc. prefetch cache controller + state machine */ +#define TX_SM_1_CBQ_ARB_MASK 0xF8000000 /* CBQ arbiter state machine */ + +#define REG_TX_SM_2 0x202C /* TX state machine reg #2 */ +#define TX_SM_2_COMP_WB_MASK 0x07 /* completion writeback sm */ +#define TX_SM_2_SUB_LOAD_MASK 0x38 /* sub load state machine */ +#define TX_SM_2_KICK_MASK 0xC0 /* kick state machine */ + +/* 64-bit pointer to the transmit data buffer. only the 50 LSB are incremented + * while the upper 23 bits are taken from the TX descriptor + */ +#define REG_TX_DATA_PTR_LOW 0x2030 /* TX data pointer low */ +#define REG_TX_DATA_PTR_HI 0x2034 /* TX data pointer high */ + +/* 13 bit registers written by driver w/ descriptor value that follows + * last valid xmit descriptor. kick # and complete # values are used by + * the xmit dma engine to control tx descr fetching. if > 1 valid + * tx descr is available within the cache line being read, cassini will + * internally cache up to 4 of them. 0 on reset. _KICK = rw, _COMP = ro. + */ +#define REG_TX_KICK0 0x2038 /* TX kick reg #1 */ +#define REG_TX_KICKN(x) (REG_TX_KICK0 + (x)*4) +#define REG_TX_COMP0 0x2048 /* TX completion reg #1 */ +#define REG_TX_COMPN(x) (REG_TX_COMP0 + (x)*4) + +/* values of TX_COMPLETE_1-4 are written. each completion register + * is 2bytes in size and contiguous. 8B allocation w/ 8B alignment. + * NOTE: completion reg values are only written back prior to TX_INTME and + * TX_ALL interrupts. at all other times, the most up-to-date index values + * should be obtained from the REG_TX_COMPLETE_# registers. + * here's the layout: + * offset from base addr completion # byte + * 0 TX_COMPLETE_1_MSB + * 1 TX_COMPLETE_1_LSB + * 2 TX_COMPLETE_2_MSB + * 3 TX_COMPLETE_2_LSB + * 4 TX_COMPLETE_3_MSB + * 5 TX_COMPLETE_3_LSB + * 6 TX_COMPLETE_4_MSB + * 7 TX_COMPLETE_4_LSB + */ +#define TX_COMPWB_SIZE 8 +#define REG_TX_COMPWB_DB_LOW 0x2058 /* TX completion write back + base low */ +#define REG_TX_COMPWB_DB_HI 0x205C /* TX completion write back + base high */ +#define TX_COMPWB_MSB_MASK 0x00000000000000FFULL +#define TX_COMPWB_MSB_SHIFT 0 +#define TX_COMPWB_LSB_MASK 0x000000000000FF00ULL +#define TX_COMPWB_LSB_SHIFT 8 +#define TX_COMPWB_NEXT(x) ((x) >> 16) + +/* 53 MSB used as base address. 11 LSB assumed to be 0. TX desc pointer must + * be 2KB-aligned. */ +#define REG_TX_DB0_LOW 0x2060 /* TX descriptor base low #1 */ +#define REG_TX_DB0_HI 0x2064 /* TX descriptor base hi #1 */ +#define REG_TX_DBN_LOW(x) (REG_TX_DB0_LOW + (x)*8) +#define REG_TX_DBN_HI(x) (REG_TX_DB0_HI + (x)*8) + +/* 16-bit registers hold weights for the weighted round-robin of the + * four CBQ TX descr rings. weights correspond to # bytes xferred from + * host to TXFIFO in a round of WRR arbitration. can be set + * dynamically with new weights set upon completion of the current + * packet transfer from host memory to TXFIFO. a dummy write to any of + * these registers causes a queue1 pre-emption with all historical bw + * deficit data reset to 0 (useful when congestion requires a + * pre-emption/re-allocation of network bandwidth + */ +#define REG_TX_MAXBURST_0 0x2080 /* TX MaxBurst #1 */ +#define REG_TX_MAXBURST_1 0x2084 /* TX MaxBurst #2 */ +#define REG_TX_MAXBURST_2 0x2088 /* TX MaxBurst #3 */ +#define REG_TX_MAXBURST_3 0x208C /* TX MaxBurst #4 */ + +/* diagnostics access to any TX FIFO location. every access is 65 + * bits. _DATA_LOW = 32 LSB, _DATA_HI_T1/T0 = 32 MSB. _TAG = tag bit. + * writing _DATA_HI_T0 sets tag bit low, writing _DATA_HI_T1 sets tag + * bit high. TX_FIFO_PIO_SEL must be set for TX FIFO PIO access. if + * TX FIFO data integrity is desired, TX DMA should be + * disabled. _DATA_HI_Tx should be the last access of the sequence. + */ +#define REG_TX_FIFO_ADDR 0x2104 /* TX FIFO address */ +#define REG_TX_FIFO_TAG 0x2108 /* TX FIFO tag */ +#define REG_TX_FIFO_DATA_LOW 0x210C /* TX FIFO data low */ +#define REG_TX_FIFO_DATA_HI_T1 0x2110 /* TX FIFO data high t1 */ +#define REG_TX_FIFO_DATA_HI_T0 0x2114 /* TX FIFO data high t0 */ +#define REG_TX_FIFO_SIZE 0x2118 /* (ro) TX FIFO size = 0x090 = 9KB */ + +/* 9-bit register controls BIST of TX FIFO. bit set indicates that the BIST + * passed for the specified memory + */ +#define REG_TX_RAMBIST 0x211C /* TX RAMBIST control/status */ +#define TX_RAMBIST_STATE 0x01C0 /* progress state of RAMBIST + controller state machine */ +#define TX_RAMBIST_RAM33A_PASS 0x0020 /* RAM33A passed */ +#define TX_RAMBIST_RAM32A_PASS 0x0010 /* RAM32A passed */ +#define TX_RAMBIST_RAM33B_PASS 0x0008 /* RAM33B passed */ +#define TX_RAMBIST_RAM32B_PASS 0x0004 /* RAM32B passed */ +#define TX_RAMBIST_SUMMARY 0x0002 /* all RAM passed */ +#define TX_RAMBIST_START 0x0001 /* write 1 to start BIST. self + clears on completion. */ + +/** receive dma registers **/ +#define MAX_RX_DESC_RINGS 2 +#define MAX_RX_COMP_RINGS 4 + +/* receive DMA channel configuration. default: 0x80910 + * free ring size = (1 << n)*32 -> [32 - 8k] + * completion ring size = (1 << n)*128 -> [128 - 32k], n < 9 + * DEFAULT: 0x80910 + */ +#define REG_RX_CFG 0x4000 /* RX config */ +#define RX_CFG_DMA_EN 0x00000001 /* enable RX DMA. 0 stops + channel as soon as current + frame xfer has completed. + driver should disable MAC + for 200ms before disabling + RX */ +#define RX_CFG_DESC_RING_MASK 0x0000001E /* # desc entries in RX + free desc ring. + def: 0x8 = 8k */ +#define RX_CFG_DESC_RING_SHIFT 1 +#define RX_CFG_COMP_RING_MASK 0x000001E0 /* # desc entries in RX complete + ring. def: 0x8 = 32k */ +#define RX_CFG_COMP_RING_SHIFT 5 +#define RX_CFG_BATCH_DIS 0x00000200 /* disable receive desc + batching. def: 0x0 = + enabled */ +#define RX_CFG_SWIVEL_MASK 0x00001C00 /* byte offset of the 1st + data byte of the packet + w/in 8 byte boundares. + this swivels the data + DMA'ed to header + buffers, jumbo buffers + when header split is not + requested and MTU sized + buffers. def: 0x2 */ +#define RX_CFG_SWIVEL_SHIFT 10 + +/* cassini+ only */ +#define RX_CFG_DESC_RING1_MASK 0x000F0000 /* # of desc entries in + RX free desc ring 2. + def: 0x8 = 8k */ +#define RX_CFG_DESC_RING1_SHIFT 16 + + +/* the page size register allows cassini chips to do the following with + * received data: + * [--------------------------------------------------------------] page + * [off][buf1][pad][off][buf2][pad][off][buf3][pad][off][buf4][pad] + * |--------------| = PAGE_SIZE_BUFFER_STRIDE + * page = PAGE_SIZE + * offset = PAGE_SIZE_MTU_OFF + * for the above example, MTU_BUFFER_COUNT = 4. + * NOTE: as is apparent, you need to ensure that the following holds: + * MTU_BUFFER_COUNT <= PAGE_SIZE/PAGE_SIZE_BUFFER_STRIDE + * DEFAULT: 0x48002002 (8k pages) + */ +#define REG_RX_PAGE_SIZE 0x4004 /* RX page size */ +#define RX_PAGE_SIZE_MASK 0x00000003 /* size of pages pointed to + by receive descriptors. + if jumbo buffers are + supported the page size + should not be < 8k. + 0b00 = 2k, 0b01 = 4k + 0b10 = 8k, 0b11 = 16k + DEFAULT: 8k */ +#define RX_PAGE_SIZE_SHIFT 0 +#define RX_PAGE_SIZE_MTU_COUNT_MASK 0x00007800 /* # of MTU buffers the hw + packs into a page. + DEFAULT: 4 */ +#define RX_PAGE_SIZE_MTU_COUNT_SHIFT 11 +#define RX_PAGE_SIZE_MTU_STRIDE_MASK 0x18000000 /* # of bytes that separate + each MTU buffer + + offset from each + other. + 0b00 = 1k, 0b01 = 2k + 0b10 = 4k, 0b11 = 8k + DEFAULT: 0x1 */ +#define RX_PAGE_SIZE_MTU_STRIDE_SHIFT 27 +#define RX_PAGE_SIZE_MTU_OFF_MASK 0xC0000000 /* offset in each page that + hw writes the MTU buffer + into. + 0b00 = 0, + 0b01 = 64 bytes + 0b10 = 96, 0b11 = 128 + DEFAULT: 0x1 */ +#define RX_PAGE_SIZE_MTU_OFF_SHIFT 30 + +/* 11-bit counter points to next location in RX FIFO to be loaded/read. + * shadow write pointers enable retries in case of early receive aborts. + * DEFAULT: 0x0. generated on 64-bit boundaries. + */ +#define REG_RX_FIFO_WRITE_PTR 0x4008 /* RX FIFO write pointer */ +#define REG_RX_FIFO_READ_PTR 0x400C /* RX FIFO read pointer */ +#define REG_RX_IPP_FIFO_SHADOW_WRITE_PTR 0x4010 /* RX IPP FIFO shadow write + pointer */ +#define REG_RX_IPP_FIFO_SHADOW_READ_PTR 0x4014 /* RX IPP FIFO shadow read + pointer */ +#define REG_RX_IPP_FIFO_READ_PTR 0x400C /* RX IPP FIFO read + pointer. (8-bit counter) */ + +/* current state of RX DMA state engines + other info + * DEFAULT: 0x0 + */ +#define REG_RX_DEBUG 0x401C /* RX debug */ +#define RX_DEBUG_LOAD_STATE_MASK 0x0000000F /* load state machine w/ MAC: + 0x0 = idle, 0x1 = load_bop + 0x2 = load 1, 0x3 = load 2 + 0x4 = load 3, 0x5 = load 4 + 0x6 = last detect + 0x7 = wait req + 0x8 = wait req statuss 1st + 0x9 = load st + 0xa = bubble mac + 0xb = error */ +#define RX_DEBUG_LM_STATE_MASK 0x00000070 /* load state machine w/ HP and + RX FIFO: + 0x0 = idle, 0x1 = hp xfr + 0x2 = wait hp ready + 0x3 = wait flow code + 0x4 = fifo xfer + 0x5 = make status + 0x6 = csum ready + 0x7 = error */ +#define RX_DEBUG_FC_STATE_MASK 0x000000180 /* flow control state machine + w/ MAC: + 0x0 = idle + 0x1 = wait xoff ack + 0x2 = wait xon + 0x3 = wait xon ack */ +#define RX_DEBUG_DATA_STATE_MASK 0x000001E00 /* unload data state machine + states: + 0x0 = idle data + 0x1 = header begin + 0x2 = xfer header + 0x3 = xfer header ld + 0x4 = mtu begin + 0x5 = xfer mtu + 0x6 = xfer mtu ld + 0x7 = jumbo begin + 0x8 = xfer jumbo + 0x9 = xfer jumbo ld + 0xa = reas begin + 0xb = xfer reas + 0xc = flush tag + 0xd = xfer reas ld + 0xe = error + 0xf = bubble idle */ +#define RX_DEBUG_DESC_STATE_MASK 0x0001E000 /* unload desc state machine + states: + 0x0 = idle desc + 0x1 = wait ack + 0x9 = wait ack 2 + 0x2 = fetch desc 1 + 0xa = fetch desc 2 + 0x3 = load ptrs + 0x4 = wait dma + 0x5 = wait ack batch + 0x6 = post batch + 0x7 = xfr done */ +#define RX_DEBUG_INTR_READ_PTR_MASK 0x30000000 /* interrupt read ptr of the + interrupt queue */ +#define RX_DEBUG_INTR_WRITE_PTR_MASK 0xC0000000 /* interrupt write pointer + of the interrupt queue */ + +/* flow control frames are emitted using two PAUSE thresholds: + * XOFF PAUSE uses pause time value pre-programmed in the Send PAUSE MAC reg + * XON PAUSE uses a pause time of 0. granularity of threshold is 64bytes. + * PAUSE thresholds defined in terms of FIFO occupancy and may be translated + * into FIFO vacancy using RX_FIFO_SIZE. setting ON will trigger XON frames + * when FIFO reaches 0. OFF threshold should not be > size of RX FIFO. max + * value is is 0x6F. + * DEFAULT: 0x00078 + */ +#define REG_RX_PAUSE_THRESH 0x4020 /* RX pause thresholds */ +#define RX_PAUSE_THRESH_QUANTUM 64 +#define RX_PAUSE_THRESH_OFF_MASK 0x000001FF /* XOFF PAUSE emitted when + RX FIFO occupancy > + value*64B */ +#define RX_PAUSE_THRESH_OFF_SHIFT 0 +#define RX_PAUSE_THRESH_ON_MASK 0x001FF000 /* XON PAUSE emitted after + emitting XOFF PAUSE when RX + FIFO occupancy falls below + this value*64B. must be + < XOFF threshold. if = + RX_FIFO_SIZE< XON frames are + never emitted. */ +#define RX_PAUSE_THRESH_ON_SHIFT 12 + +/* 13-bit register used to control RX desc fetching and intr generation. if 4+ + * valid RX descriptors are available, Cassini will read 4 at a time. + * writing N means that all desc up to *but* excluding N are available. N must + * be a multiple of 4 (N % 4 = 0). first desc should be cache-line aligned. + * DEFAULT: 0 on reset + */ +#define REG_RX_KICK 0x4024 /* RX kick reg */ + +/* 8KB aligned 64-bit pointer to the base of the RX free/completion rings. + * lower 13 bits of the low register are hard-wired to 0. + */ +#define REG_RX_DB_LOW 0x4028 /* RX descriptor ring + base low */ +#define REG_RX_DB_HI 0x402C /* RX descriptor ring + base hi */ +#define REG_RX_CB_LOW 0x4030 /* RX completion ring + base low */ +#define REG_RX_CB_HI 0x4034 /* RX completion ring + base hi */ +/* 13-bit register indicate desc used by cassini for receive frames. used + * for diagnostic purposes. + * DEFAULT: 0 on reset + */ +#define REG_RX_COMP 0x4038 /* (ro) RX completion */ + +/* HEAD and TAIL are used to control RX desc posting and interrupt + * generation. hw moves the head register to pass ownership to sw. sw + * moves the tail register to pass ownership back to hw. to give all + * entries to hw, set TAIL = HEAD. if HEAD and TAIL indicate that no + * more entries are available, DMA will pause and an interrupt will be + * generated to indicate no more entries are available. sw can use + * this interrupt to reduce the # of times it must update the + * completion tail register. + * DEFAULT: 0 on reset + */ +#define REG_RX_COMP_HEAD 0x403C /* RX completion head */ +#define REG_RX_COMP_TAIL 0x4040 /* RX completion tail */ + +/* values used for receive interrupt blanking. loaded each time the ISR is read + * DEFAULT: 0x00000000 + */ +#define REG_RX_BLANK 0x4044 /* RX blanking register + for ISR read */ +#define RX_BLANK_INTR_PKT_MASK 0x000001FF /* RX_DONE intr asserted if + this many sets of completion + writebacks (up to 2 packets) + occur since the last time + the ISR was read. 0 = no + packet blanking */ +#define RX_BLANK_INTR_PKT_SHIFT 0 +#define RX_BLANK_INTR_TIME_MASK 0x3FFFF000 /* RX_DONE interrupt asserted + if that many clocks were + counted since last time the + ISR was read. + each count is 512 core + clocks (125MHz). 0 = no + time blanking */ +#define RX_BLANK_INTR_TIME_SHIFT 12 + +/* values used for interrupt generation based on threshold values of how + * many free desc and completion entries are available for hw use. + * DEFAULT: 0x00000000 + */ +#define REG_RX_AE_THRESH 0x4048 /* RX almost empty + thresholds */ +#define RX_AE_THRESH_FREE_MASK 0x00001FFF /* RX_BUF_AE will be + generated if # desc + avail for hw use <= + # */ +#define RX_AE_THRESH_FREE_SHIFT 0 +#define RX_AE_THRESH_COMP_MASK 0x0FFFE000 /* RX_COMP_AE will be + generated if # of + completion entries + avail for hw use <= + # */ +#define RX_AE_THRESH_COMP_SHIFT 13 + +/* probabilities for random early drop (RED) thresholds on a FIFO threshold + * basis. probability should increase when the FIFO level increases. control + * packets are never dropped and not counted in stats. probability programmed + * on a 12.5% granularity. e.g., 0x1 = 1/8 packets dropped. + * DEFAULT: 0x00000000 + */ +#define REG_RX_RED 0x404C /* RX random early detect enable */ +#define RX_RED_4K_6K_FIFO_MASK 0x000000FF /* 4KB < FIFO thresh < 6KB */ +#define RX_RED_6K_8K_FIFO_MASK 0x0000FF00 /* 6KB < FIFO thresh < 8KB */ +#define RX_RED_8K_10K_FIFO_MASK 0x00FF0000 /* 8KB < FIFO thresh < 10KB */ +#define RX_RED_10K_12K_FIFO_MASK 0xFF000000 /* 10KB < FIFO thresh < 12KB */ + +/* FIFO fullness levels for RX FIFO, RX control FIFO, and RX IPP FIFO. + * RX control FIFO = # of packets in RX FIFO. + * DEFAULT: 0x0 + */ +#define REG_RX_FIFO_FULLNESS 0x4050 /* (ro) RX FIFO fullness */ +#define RX_FIFO_FULLNESS_RX_FIFO_MASK 0x3FF80000 /* level w/ 8B granularity */ +#define RX_FIFO_FULLNESS_IPP_FIFO_MASK 0x0007FF00 /* level w/ 8B granularity */ +#define RX_FIFO_FULLNESS_RX_PKT_MASK 0x000000FF /* # packets in RX FIFO */ +#define REG_RX_IPP_PACKET_COUNT 0x4054 /* RX IPP packet counter */ +#define REG_RX_WORK_DMA_PTR_LOW 0x4058 /* RX working DMA ptr low */ +#define REG_RX_WORK_DMA_PTR_HI 0x405C /* RX working DMA ptr + high */ + +/* BIST testing ro RX FIFO, RX control FIFO, and RX IPP FIFO. only RX BIST + * START/COMPLETE is writeable. START will clear when the BIST has completed + * checking all 17 RAMS. + * DEFAULT: 0bxxxx xxxxx xxxx xxxx xxxx x000 0000 0000 00x0 + */ +#define REG_RX_BIST 0x4060 /* (ro) RX BIST */ +#define RX_BIST_32A_PASS 0x80000000 /* RX FIFO 32A passed */ +#define RX_BIST_33A_PASS 0x40000000 /* RX FIFO 33A passed */ +#define RX_BIST_32B_PASS 0x20000000 /* RX FIFO 32B passed */ +#define RX_BIST_33B_PASS 0x10000000 /* RX FIFO 33B passed */ +#define RX_BIST_32C_PASS 0x08000000 /* RX FIFO 32C passed */ +#define RX_BIST_33C_PASS 0x04000000 /* RX FIFO 33C passed */ +#define RX_BIST_IPP_32A_PASS 0x02000000 /* RX IPP FIFO 33B passed */ +#define RX_BIST_IPP_33A_PASS 0x01000000 /* RX IPP FIFO 33A passed */ +#define RX_BIST_IPP_32B_PASS 0x00800000 /* RX IPP FIFO 32B passed */ +#define RX_BIST_IPP_33B_PASS 0x00400000 /* RX IPP FIFO 33B passed */ +#define RX_BIST_IPP_32C_PASS 0x00200000 /* RX IPP FIFO 32C passed */ +#define RX_BIST_IPP_33C_PASS 0x00100000 /* RX IPP FIFO 33C passed */ +#define RX_BIST_CTRL_32_PASS 0x00800000 /* RX CTRL FIFO 32 passed */ +#define RX_BIST_CTRL_33_PASS 0x00400000 /* RX CTRL FIFO 33 passed */ +#define RX_BIST_REAS_26A_PASS 0x00200000 /* RX Reas 26A passed */ +#define RX_BIST_REAS_26B_PASS 0x00100000 /* RX Reas 26B passed */ +#define RX_BIST_REAS_27_PASS 0x00080000 /* RX Reas 27 passed */ +#define RX_BIST_STATE_MASK 0x00078000 /* BIST state machine */ +#define RX_BIST_SUMMARY 0x00000002 /* when BIST complete, + summary pass bit + contains AND of BIST + results of all 16 + RAMS */ +#define RX_BIST_START 0x00000001 /* write 1 to start + BIST. self clears + on completion. */ + +/* next location in RX CTRL FIFO that will be loaded w/ data from RX IPP/read + * from to retrieve packet control info. + * DEFAULT: 0 + */ +#define REG_RX_CTRL_FIFO_WRITE_PTR 0x4064 /* (ro) RX control FIFO + write ptr */ +#define REG_RX_CTRL_FIFO_READ_PTR 0x4068 /* (ro) RX control FIFO read + ptr */ + +/* receive interrupt blanking. loaded each time interrupt alias register is + * read. + * DEFAULT: 0x0 + */ +#define REG_RX_BLANK_ALIAS_READ 0x406C /* RX blanking register for + alias read */ +#define RX_BAR_INTR_PACKET_MASK 0x000001FF /* assert RX_DONE if # + completion writebacks + > # since last ISR + read. 0 = no + blanking. up to 2 + packets per + completion wb. */ +#define RX_BAR_INTR_TIME_MASK 0x3FFFF000 /* assert RX_DONE if # + clocks > # since last + ISR read. each count + is 512 core clocks + (125MHz). 0 = no + blanking. */ + +/* diagnostic access to RX FIFO. 32 LSB accessed via DATA_LOW. 32 MSB accessed + * via DATA_HI_T0 or DATA_HI_T1. TAG reads the tag bit. writing HI_T0 + * will unset the tag bit while writing HI_T1 will set the tag bit. to reset + * to normal operation after diagnostics, write to address location 0x0. + * RX_DMA_EN bit must be set to 0x0 for RX FIFO PIO access. DATA_HI should + * be the last write access of a write sequence. + * DEFAULT: undefined + */ +#define REG_RX_FIFO_ADDR 0x4080 /* RX FIFO address */ +#define REG_RX_FIFO_TAG 0x4084 /* RX FIFO tag */ +#define REG_RX_FIFO_DATA_LOW 0x4088 /* RX FIFO data low */ +#define REG_RX_FIFO_DATA_HI_T0 0x408C /* RX FIFO data high T0 */ +#define REG_RX_FIFO_DATA_HI_T1 0x4090 /* RX FIFO data high T1 */ + +/* diagnostic assess to RX CTRL FIFO. 8-bit FIFO_ADDR holds address of + * 81 bit control entry and 6 bit flow id. LOW and MID are both 32-bit + * accesses. HI is 7-bits with 6-bit flow id and 1 bit control + * word. RX_DMA_EN must be 0 for RX CTRL FIFO PIO access. DATA_HI + * should be last write access of the write sequence. + * DEFAULT: undefined + */ +#define REG_RX_CTRL_FIFO_ADDR 0x4094 /* RX Control FIFO and + Batching FIFO addr */ +#define REG_RX_CTRL_FIFO_DATA_LOW 0x4098 /* RX Control FIFO data + low */ +#define REG_RX_CTRL_FIFO_DATA_MID 0x409C /* RX Control FIFO data + mid */ +#define REG_RX_CTRL_FIFO_DATA_HI 0x4100 /* RX Control FIFO data + hi and flow id */ +#define RX_CTRL_FIFO_DATA_HI_CTRL 0x0001 /* upper bit of ctrl word */ +#define RX_CTRL_FIFO_DATA_HI_FLOW_MASK 0x007E /* flow id */ + +/* diagnostic access to RX IPP FIFO. same semantics as RX_FIFO. + * DEFAULT: undefined + */ +#define REG_RX_IPP_FIFO_ADDR 0x4104 /* RX IPP FIFO address */ +#define REG_RX_IPP_FIFO_TAG 0x4108 /* RX IPP FIFO tag */ +#define REG_RX_IPP_FIFO_DATA_LOW 0x410C /* RX IPP FIFO data low */ +#define REG_RX_IPP_FIFO_DATA_HI_T0 0x4110 /* RX IPP FIFO data high + T0 */ +#define REG_RX_IPP_FIFO_DATA_HI_T1 0x4114 /* RX IPP FIFO data high + T1 */ + +/* 64-bit pointer to receive data buffer in host memory used for headers and + * small packets. MSB in high register. loaded by DMA state machine and + * increments as DMA writes receive data. only 50 LSB are incremented. top + * 13 bits taken from RX descriptor. + * DEFAULT: undefined + */ +#define REG_RX_HEADER_PAGE_PTR_LOW 0x4118 /* (ro) RX header page ptr + low */ +#define REG_RX_HEADER_PAGE_PTR_HI 0x411C /* (ro) RX header page ptr + high */ +#define REG_RX_MTU_PAGE_PTR_LOW 0x4120 /* (ro) RX MTU page pointer + low */ +#define REG_RX_MTU_PAGE_PTR_HI 0x4124 /* (ro) RX MTU page pointer + high */ + +/* PIO diagnostic access to RX reassembly DMA Table RAM. 6-bit register holds + * one of 64 79-bit locations in the RX Reassembly DMA table and the addr of + * one of the 64 byte locations in the Batching table. LOW holds 32 LSB. + * MID holds the next 32 LSB. HIGH holds the 15 MSB. RX_DMA_EN must be set + * to 0 for PIO access. DATA_HIGH should be last write of write sequence. + * layout: + * reassmbl ptr [78:15] | reassmbl index [14:1] | reassmbl entry valid [0] + * DEFAULT: undefined + */ +#define REG_RX_TABLE_ADDR 0x4128 /* RX reassembly DMA table + address */ +#define RX_TABLE_ADDR_MASK 0x0000003F /* address mask */ + +#define REG_RX_TABLE_DATA_LOW 0x412C /* RX reassembly DMA table + data low */ +#define REG_RX_TABLE_DATA_MID 0x4130 /* RX reassembly DMA table + data mid */ +#define REG_RX_TABLE_DATA_HI 0x4134 /* RX reassembly DMA table + data high */ + +/* cassini+ only */ +/* 8KB aligned 64-bit pointer to base of RX rings. lower 13 bits hardwired to + * 0. same semantics as primary desc/complete rings. + */ +#define REG_PLUS_RX_DB1_LOW 0x4200 /* RX descriptor ring + 2 base low */ +#define REG_PLUS_RX_DB1_HI 0x4204 /* RX descriptor ring + 2 base high */ +#define REG_PLUS_RX_CB1_LOW 0x4208 /* RX completion ring + 2 base low. 4 total */ +#define REG_PLUS_RX_CB1_HI 0x420C /* RX completion ring + 2 base high. 4 total */ +#define REG_PLUS_RX_CBN_LOW(x) (REG_PLUS_RX_CB1_LOW + 8*((x) - 1)) +#define REG_PLUS_RX_CBN_HI(x) (REG_PLUS_RX_CB1_HI + 8*((x) - 1)) +#define REG_PLUS_RX_KICK1 0x4220 /* RX Kick 2 register */ +#define REG_PLUS_RX_COMP1 0x4224 /* (ro) RX completion 2 + reg */ +#define REG_PLUS_RX_COMP1_HEAD 0x4228 /* (ro) RX completion 2 + head reg. 4 total. */ +#define REG_PLUS_RX_COMP1_TAIL 0x422C /* RX completion 2 + tail reg. 4 total. */ +#define REG_PLUS_RX_COMPN_HEAD(x) (REG_PLUS_RX_COMP1_HEAD + 8*((x) - 1)) +#define REG_PLUS_RX_COMPN_TAIL(x) (REG_PLUS_RX_COMP1_TAIL + 8*((x) - 1)) +#define REG_PLUS_RX_AE1_THRESH 0x4240 /* RX almost empty 2 + thresholds */ +#define RX_AE1_THRESH_FREE_MASK RX_AE_THRESH_FREE_MASK +#define RX_AE1_THRESH_FREE_SHIFT RX_AE_THRESH_FREE_SHIFT + +/** header parser registers **/ + +/* RX parser configuration register. + * DEFAULT: 0x1651004 + */ +#define REG_HP_CFG 0x4140 /* header parser + configuration reg */ +#define HP_CFG_PARSE_EN 0x00000001 /* enab header parsing */ +#define HP_CFG_NUM_CPU_MASK 0x000000FC /* # processors + 0 = 64. 0x3f = 63 */ +#define HP_CFG_NUM_CPU_SHIFT 2 +#define HP_CFG_SYN_INC_MASK 0x00000100 /* SYN bit won't increment + TCP seq # by one when + stored in FDBM */ +#define HP_CFG_TCP_THRESH_MASK 0x000FFE00 /* # bytes of TCP data + needed to be considered + for reassembly */ +#define HP_CFG_TCP_THRESH_SHIFT 9 + +/* access to RX Instruction RAM. 5-bit register/counter holds addr + * of 39 bit entry to be read/written. 32 LSB in _DATA_LOW. 7 MSB in _DATA_HI. + * RX_DMA_EN must be 0 for RX instr PIO access. DATA_HI should be last access + * of sequence. + * DEFAULT: undefined + */ +#define REG_HP_INSTR_RAM_ADDR 0x4144 /* HP instruction RAM + address */ +#define HP_INSTR_RAM_ADDR_MASK 0x01F /* 5-bit mask */ +#define REG_HP_INSTR_RAM_DATA_LOW 0x4148 /* HP instruction RAM + data low */ +#define HP_INSTR_RAM_LOW_OUTMASK_MASK 0x0000FFFF +#define HP_INSTR_RAM_LOW_OUTMASK_SHIFT 0 +#define HP_INSTR_RAM_LOW_OUTSHIFT_MASK 0x000F0000 +#define HP_INSTR_RAM_LOW_OUTSHIFT_SHIFT 16 +#define HP_INSTR_RAM_LOW_OUTEN_MASK 0x00300000 +#define HP_INSTR_RAM_LOW_OUTEN_SHIFT 20 +#define HP_INSTR_RAM_LOW_OUTARG_MASK 0xFFC00000 +#define HP_INSTR_RAM_LOW_OUTARG_SHIFT 22 +#define REG_HP_INSTR_RAM_DATA_MID 0x414C /* HP instruction RAM + data mid */ +#define HP_INSTR_RAM_MID_OUTARG_MASK 0x00000003 +#define HP_INSTR_RAM_MID_OUTARG_SHIFT 0 +#define HP_INSTR_RAM_MID_OUTOP_MASK 0x0000003C +#define HP_INSTR_RAM_MID_OUTOP_SHIFT 2 +#define HP_INSTR_RAM_MID_FNEXT_MASK 0x000007C0 +#define HP_INSTR_RAM_MID_FNEXT_SHIFT 6 +#define HP_INSTR_RAM_MID_FOFF_MASK 0x0003F800 +#define HP_INSTR_RAM_MID_FOFF_SHIFT 11 +#define HP_INSTR_RAM_MID_SNEXT_MASK 0x007C0000 +#define HP_INSTR_RAM_MID_SNEXT_SHIFT 18 +#define HP_INSTR_RAM_MID_SOFF_MASK 0x3F800000 +#define HP_INSTR_RAM_MID_SOFF_SHIFT 23 +#define HP_INSTR_RAM_MID_OP_MASK 0xC0000000 +#define HP_INSTR_RAM_MID_OP_SHIFT 30 +#define REG_HP_INSTR_RAM_DATA_HI 0x4150 /* HP instruction RAM + data high */ +#define HP_INSTR_RAM_HI_VAL_MASK 0x0000FFFF +#define HP_INSTR_RAM_HI_VAL_SHIFT 0 +#define HP_INSTR_RAM_HI_MASK_MASK 0xFFFF0000 +#define HP_INSTR_RAM_HI_MASK_SHIFT 16 + +/* PIO access into RX Header parser data RAM and flow database. + * 11-bit register. Data fills the LSB portion of bus if less than 32 bits. + * DATA_RAM: write RAM_FDB_DATA with index to access DATA_RAM. + * RAM bytes = 4*(x - 1) + [3:0]. e.g., 0 -> [3:0], 31 -> [123:120] + * FLOWDB: write DATA_RAM_FDB register and then read/write FDB1-12 to access + * flow database. + * RX_DMA_EN must be 0 for RX parser RAM PIO access. RX Parser RAM data reg + * should be the last write access of the write sequence. + * DEFAULT: undefined + */ +#define REG_HP_DATA_RAM_FDB_ADDR 0x4154 /* HP data and FDB + RAM address */ +#define HP_DATA_RAM_FDB_DATA_MASK 0x001F /* select 1 of 86 byte + locations in header + parser data ram to + read/write */ +#define HP_DATA_RAM_FDB_FDB_MASK 0x3F00 /* 1 of 64 353-bit locations + in the flow database */ +#define REG_HP_DATA_RAM_DATA 0x4158 /* HP data RAM data */ + +/* HP flow database registers: 1 - 12, 0x415C - 0x4188, 4 8-bit bytes + * FLOW_DB(1) = IP_SA[127:96], FLOW_DB(2) = IP_SA[95:64] + * FLOW_DB(3) = IP_SA[63:32], FLOW_DB(4) = IP_SA[31:0] + * FLOW_DB(5) = IP_DA[127:96], FLOW_DB(6) = IP_DA[95:64] + * FLOW_DB(7) = IP_DA[63:32], FLOW_DB(8) = IP_DA[31:0] + * FLOW_DB(9) = {TCP_SP[15:0],TCP_DP[15:0]} + * FLOW_DB(10) = bit 0 has value for flow valid + * FLOW_DB(11) = TCP_SEQ[63:32], FLOW_DB(12) = TCP_SEQ[31:0] + */ +#define REG_HP_FLOW_DB0 0x415C /* HP flow database 1 reg */ +#define REG_HP_FLOW_DBN(x) (REG_HP_FLOW_DB0 + (x)*4) + +/* diagnostics for RX Header Parser block. + * ASUN: the header parser state machine register is used for diagnostics + * purposes. however, the spec doesn't have any details on it. + */ +#define REG_HP_STATE_MACHINE 0x418C /* (ro) HP state machine */ +#define REG_HP_STATUS0 0x4190 /* (ro) HP status 1 */ +#define HP_STATUS0_SAP_MASK 0xFFFF0000 /* SAP */ +#define HP_STATUS0_L3_OFF_MASK 0x0000FE00 /* L3 offset */ +#define HP_STATUS0_LB_CPUNUM_MASK 0x000001F8 /* load balancing CPU + number */ +#define HP_STATUS0_HRP_OPCODE_MASK 0x00000007 /* HRP opcode */ + +#define REG_HP_STATUS1 0x4194 /* (ro) HP status 2 */ +#define HP_STATUS1_ACCUR2_MASK 0xE0000000 /* accu R2[6:4] */ +#define HP_STATUS1_FLOWID_MASK 0x1F800000 /* flow id */ +#define HP_STATUS1_TCP_OFF_MASK 0x007F0000 /* tcp payload offset */ +#define HP_STATUS1_TCP_SIZE_MASK 0x0000FFFF /* tcp payload size */ + +#define REG_HP_STATUS2 0x4198 /* (ro) HP status 3 */ +#define HP_STATUS2_ACCUR2_MASK 0xF0000000 /* accu R2[3:0] */ +#define HP_STATUS2_CSUM_OFF_MASK 0x07F00000 /* checksum start + start offset */ +#define HP_STATUS2_ACCUR1_MASK 0x000FE000 /* accu R1 */ +#define HP_STATUS2_FORCE_DROP 0x00001000 /* force drop */ +#define HP_STATUS2_BWO_REASSM 0x00000800 /* batching w/o + reassembly */ +#define HP_STATUS2_JH_SPLIT_EN 0x00000400 /* jumbo header split + enable */ +#define HP_STATUS2_FORCE_TCP_NOCHECK 0x00000200 /* force tcp no payload + check */ +#define HP_STATUS2_DATA_MASK_ZERO 0x00000100 /* mask of data length + equal to zero */ +#define HP_STATUS2_FORCE_TCP_CHECK 0x00000080 /* force tcp payload + chk */ +#define HP_STATUS2_MASK_TCP_THRESH 0x00000040 /* mask of payload + threshold */ +#define HP_STATUS2_NO_ASSIST 0x00000020 /* no assist */ +#define HP_STATUS2_CTRL_PACKET_FLAG 0x00000010 /* control packet flag */ +#define HP_STATUS2_TCP_FLAG_CHECK 0x00000008 /* tcp flag check */ +#define HP_STATUS2_SYN_FLAG 0x00000004 /* syn flag */ +#define HP_STATUS2_TCP_CHECK 0x00000002 /* tcp payload chk */ +#define HP_STATUS2_TCP_NOCHECK 0x00000001 /* tcp no payload chk */ + +/* BIST for header parser(HP) and flow database memories (FDBM). set _START + * to start BIST. controller clears _START on completion. _START can also + * be cleared to force termination of BIST. a bit set indicates that that + * memory passed its BIST. + */ +#define REG_HP_RAM_BIST 0x419C /* HP RAM BIST reg */ +#define HP_RAM_BIST_HP_DATA_PASS 0x80000000 /* HP data ram */ +#define HP_RAM_BIST_HP_INSTR0_PASS 0x40000000 /* HP instr ram 0 */ +#define HP_RAM_BIST_HP_INSTR1_PASS 0x20000000 /* HP instr ram 1 */ +#define HP_RAM_BIST_HP_INSTR2_PASS 0x10000000 /* HP instr ram 2 */ +#define HP_RAM_BIST_FDBM_AGE0_PASS 0x08000000 /* FDBM aging RAM0 */ +#define HP_RAM_BIST_FDBM_AGE1_PASS 0x04000000 /* FDBM aging RAM1 */ +#define HP_RAM_BIST_FDBM_FLOWID00_PASS 0x02000000 /* FDBM flowid RAM0 + bank 0 */ +#define HP_RAM_BIST_FDBM_FLOWID10_PASS 0x01000000 /* FDBM flowid RAM1 + bank 0 */ +#define HP_RAM_BIST_FDBM_FLOWID20_PASS 0x00800000 /* FDBM flowid RAM2 + bank 0 */ +#define HP_RAM_BIST_FDBM_FLOWID30_PASS 0x00400000 /* FDBM flowid RAM3 + bank 0 */ +#define HP_RAM_BIST_FDBM_FLOWID01_PASS 0x00200000 /* FDBM flowid RAM0 + bank 1 */ +#define HP_RAM_BIST_FDBM_FLOWID11_PASS 0x00100000 /* FDBM flowid RAM1 + bank 2 */ +#define HP_RAM_BIST_FDBM_FLOWID21_PASS 0x00080000 /* FDBM flowid RAM2 + bank 1 */ +#define HP_RAM_BIST_FDBM_FLOWID31_PASS 0x00040000 /* FDBM flowid RAM3 + bank 1 */ +#define HP_RAM_BIST_FDBM_TCPSEQ_PASS 0x00020000 /* FDBM tcp sequence + RAM */ +#define HP_RAM_BIST_SUMMARY 0x00000002 /* all BIST tests */ +#define HP_RAM_BIST_START 0x00000001 /* start/stop BIST */ + + +/** MAC registers. **/ +/* reset bits are set using a PIO write and self-cleared after the command + * execution has completed. + */ +#define REG_MAC_TX_RESET 0x6000 /* TX MAC software reset + command (default: 0x0) */ +#define REG_MAC_RX_RESET 0x6004 /* RX MAC software reset + command (default: 0x0) */ +/* execute a pause flow control frame transmission + DEFAULT: 0x0XXXX */ +#define REG_MAC_SEND_PAUSE 0x6008 /* send pause command reg */ +#define MAC_SEND_PAUSE_TIME_MASK 0x0000FFFF /* value of pause time + to be sent on network + in units of slot + times */ +#define MAC_SEND_PAUSE_SEND 0x00010000 /* send pause flow ctrl + frame on network */ + +/* bit set indicates that event occurred. auto-cleared when status register + * is read and have corresponding mask bits in mask register. events will + * trigger an interrupt if the corresponding mask bit is 0. + * status register default: 0x00000000 + * mask register default = 0xFFFFFFFF on reset + */ +#define REG_MAC_TX_STATUS 0x6010 /* TX MAC status reg */ +#define MAC_TX_FRAME_XMIT 0x0001 /* successful frame + transmision */ +#define MAC_TX_UNDERRUN 0x0002 /* terminated frame + transmission due to + data starvation in the + xmit data path */ +#define MAC_TX_MAX_PACKET_ERR 0x0004 /* frame exceeds max allowed + length passed to TX MAC + by the DMA engine */ +#define MAC_TX_COLL_NORMAL 0x0008 /* rollover of the normal + collision counter */ +#define MAC_TX_COLL_EXCESS 0x0010 /* rollover of the excessive + collision counter */ +#define MAC_TX_COLL_LATE 0x0020 /* rollover of the late + collision counter */ +#define MAC_TX_COLL_FIRST 0x0040 /* rollover of the first + collision counter */ +#define MAC_TX_DEFER_TIMER 0x0080 /* rollover of the defer + timer */ +#define MAC_TX_PEAK_ATTEMPTS 0x0100 /* rollover of the peak + attempts counter */ + +#define REG_MAC_RX_STATUS 0x6014 /* RX MAC status reg */ +#define MAC_RX_FRAME_RECV 0x0001 /* successful receipt of + a frame */ +#define MAC_RX_OVERFLOW 0x0002 /* dropped frame due to + RX FIFO overflow */ +#define MAC_RX_FRAME_COUNT 0x0004 /* rollover of receive frame + counter */ +#define MAC_RX_ALIGN_ERR 0x0008 /* rollover of alignment + error counter */ +#define MAC_RX_CRC_ERR 0x0010 /* rollover of crc error + counter */ +#define MAC_RX_LEN_ERR 0x0020 /* rollover of length + error counter */ +#define MAC_RX_VIOL_ERR 0x0040 /* rollover of code + violation error */ + +/* DEFAULT: 0xXXXX0000 on reset */ +#define REG_MAC_CTRL_STATUS 0x6018 /* MAC control status reg */ +#define MAC_CTRL_PAUSE_RECEIVED 0x00000001 /* successful + reception of a + pause control + frame */ +#define MAC_CTRL_PAUSE_STATE 0x00000002 /* MAC has made a + transition from + "not paused" to + "paused" */ +#define MAC_CTRL_NOPAUSE_STATE 0x00000004 /* MAC has made a + transition from + "paused" to "not + paused" */ +#define MAC_CTRL_PAUSE_TIME_MASK 0xFFFF0000 /* value of pause time + operand that was + received in the last + pause flow control + frame */ + +/* layout identical to TX MAC[8:0] */ +#define REG_MAC_TX_MASK 0x6020 /* TX MAC mask reg */ +/* layout identical to RX MAC[6:0] */ +#define REG_MAC_RX_MASK 0x6024 /* RX MAC mask reg */ +/* layout identical to CTRL MAC[2:0] */ +#define REG_MAC_CTRL_MASK 0x6028 /* MAC control mask reg */ + +/* to ensure proper operation, CFG_EN must be cleared to 0 and a delay + * imposed before writes to other bits in the TX_MAC_CFG register or any of + * the MAC parameters is performed. delay dependent upon time required to + * transmit a maximum size frame (= MAC_FRAMESIZE_MAX*8/Mbps). e.g., + * the delay for a 1518-byte frame on a 100Mbps network is 125us. + * alternatively, just poll TX_CFG_EN until it reads back as 0. + * NOTE: on half-duplex 1Gbps, TX_CFG_CARRIER_EXTEND and + * RX_CFG_CARRIER_EXTEND should be set and the SLOT_TIME register should + * be 0x200 (slot time of 512 bytes) + */ +#define REG_MAC_TX_CFG 0x6030 /* TX MAC config reg */ +#define MAC_TX_CFG_EN 0x0001 /* enable TX MAC. 0 will + force TXMAC state + machine to remain in + idle state or to + transition to idle state + on completion of an + ongoing packet. */ +#define MAC_TX_CFG_IGNORE_CARRIER 0x0002 /* disable CSMA/CD deferral + process. set to 1 when + full duplex and 0 when + half duplex */ +#define MAC_TX_CFG_IGNORE_COLL 0x0004 /* disable CSMA/CD backoff + algorithm. set to 1 when + full duplex and 0 when + half duplex */ +#define MAC_TX_CFG_IPG_EN 0x0008 /* enable extension of the + Rx-to-TX IPG. after + receiving a frame, TX + MAC will reset its + deferral process to + carrier sense for the + amount of time = IPG0 + + IPG1 and commit to + transmission for time + specified in IPG2. when + 0 or when xmitting frames + back-to-pack (Tx-to-Tx + IPG), TX MAC ignores + IPG0 and will only use + IPG1 for deferral time. + IPG2 still used. */ +#define MAC_TX_CFG_NEVER_GIVE_UP_EN 0x0010 /* TX MAC will not easily + give up on frame + xmission. if backoff + algorithm reaches the + ATTEMPT_LIMIT, it will + clear attempts counter + and continue trying to + send the frame as + specified by + GIVE_UP_LIM. when 0, + TX MAC will execute + standard CSMA/CD prot. */ +#define MAC_TX_CFG_NEVER_GIVE_UP_LIM 0x0020 /* when set, TX MAC will + continue to try to xmit + until successful. when + 0, TX MAC will continue + to try xmitting until + successful or backoff + algorithm reaches + ATTEMPT_LIMIT*16 */ +#define MAC_TX_CFG_NO_BACKOFF 0x0040 /* modify CSMA/CD to disable + backoff algorithm. TX + MAC will not back off + after a xmission attempt + that resulted in a + collision. */ +#define MAC_TX_CFG_SLOW_DOWN 0x0080 /* modify CSMA/CD so that + deferral process is reset + in response to carrier + sense during the entire + duration of IPG. TX MAC + will only commit to frame + xmission after frame + xmission has actually + begun. */ +#define MAC_TX_CFG_NO_FCS 0x0100 /* TX MAC will not generate + CRC for all xmitted + packets. when clear, CRC + generation is dependent + upon NO_CRC bit in the + xmit control word from + TX DMA */ +#define MAC_TX_CFG_CARRIER_EXTEND 0x0200 /* enables xmit part of the + carrier extension + feature. this allows for + longer collision domains + by extending the carrier + and collision window + from the end of FCS until + the end of the slot time + if necessary. Required + for half-duplex at 1Gbps, + clear otherwise. */ + +/* when CRC is not stripped, reassembly packets will not contain the CRC. + * these will be stripped by HRP because it reassembles layer 4 data, and the + * CRC is layer 2. however, non-reassembly packets will still contain the CRC + * when passed to the host. to ensure proper operation, need to wait 3.2ms + * after clearing RX_CFG_EN before writing to any other RX MAC registers + * or other MAC parameters. alternatively, poll RX_CFG_EN until it clears + * to 0. similary, HASH_FILTER_EN and ADDR_FILTER_EN have the same + * restrictions as CFG_EN. + */ +#define REG_MAC_RX_CFG 0x6034 /* RX MAC config reg */ +#define MAC_RX_CFG_EN 0x0001 /* enable RX MAC */ +#define MAC_RX_CFG_STRIP_PAD 0x0002 /* always program to 0. + feature not supported */ +#define MAC_RX_CFG_STRIP_FCS 0x0004 /* RX MAC will strip the + last 4 bytes of a + received frame. */ +#define MAC_RX_CFG_PROMISC_EN 0x0008 /* promiscuous mode */ +#define MAC_RX_CFG_PROMISC_GROUP_EN 0x0010 /* accept all valid + multicast frames (group + bit in DA field set) */ +#define MAC_RX_CFG_HASH_FILTER_EN 0x0020 /* use hash table to filter + multicast addresses */ +#define MAC_RX_CFG_ADDR_FILTER_EN 0x0040 /* cause RX MAC to use + address filtering regs + to filter both unicast + and multicast + addresses */ +#define MAC_RX_CFG_DISABLE_DISCARD 0x0080 /* pass errored frames to + RX DMA by setting BAD + bit but not Abort bit + in the status. CRC, + framing, and length errs + will not increment + error counters. frames + which don't match dest + addr will be passed up + w/ BAD bit set. */ +#define MAC_RX_CFG_CARRIER_EXTEND 0x0100 /* enable reception of + packet bursts generated + by carrier extension + with packet bursting + senders. only applies + to half-duplex 1Gbps */ + +/* DEFAULT: 0x0 */ +#define REG_MAC_CTRL_CFG 0x6038 /* MAC control config reg */ +#define MAC_CTRL_CFG_SEND_PAUSE_EN 0x0001 /* respond to requests for + sending pause flow ctrl + frames */ +#define MAC_CTRL_CFG_RECV_PAUSE_EN 0x0002 /* respond to received + pause flow ctrl frames */ +#define MAC_CTRL_CFG_PASS_CTRL 0x0004 /* pass valid MAC ctrl + packets to RX DMA */ + +/* to ensure proper operation, a global initialization sequence should be + * performed when a loopback config is entered or exited. if programmed after + * a hw or global sw reset, RX/TX MAC software reset and initialization + * should be done to ensure stable clocking. + * DEFAULT: 0x0 + */ +#define REG_MAC_XIF_CFG 0x603C /* XIF config reg */ +#define MAC_XIF_TX_MII_OUTPUT_EN 0x0001 /* enable output drivers + on MII xmit bus */ +#define MAC_XIF_MII_INT_LOOPBACK 0x0002 /* loopback GMII xmit data + path to GMII recv data + path. phy mode register + clock selection must be + set to GMII mode and + GMII_MODE should be set + to 1. in loopback mode, + REFCLK will drive the + entire mac core. 0 for + normal operation. */ +#define MAC_XIF_DISABLE_ECHO 0x0004 /* disables receive data + path during packet + xmission. clear to 0 + in any full duplex mode, + in any loopback mode, + or in half-duplex SERDES + or SLINK modes. set when + in half-duplex when + using external phy. */ +#define MAC_XIF_GMII_MODE 0x0008 /* MAC operates with GMII + clocks and datapath */ +#define MAC_XIF_MII_BUFFER_OUTPUT_EN 0x0010 /* MII_BUF_EN pin. enable + external tristate buffer + on the MII receive + bus. */ +#define MAC_XIF_LINK_LED 0x0020 /* LINKLED# active (low) */ +#define MAC_XIF_FDPLX_LED 0x0040 /* FDPLXLED# active (low) */ + +#define REG_MAC_IPG0 0x6040 /* inter-packet gap0 reg. + recommended: 0x00 */ +#define REG_MAC_IPG1 0x6044 /* inter-packet gap1 reg + recommended: 0x08 */ +#define REG_MAC_IPG2 0x6048 /* inter-packet gap2 reg + recommended: 0x04 */ +#define REG_MAC_SLOT_TIME 0x604C /* slot time reg + recommended: 0x40 */ +#define REG_MAC_FRAMESIZE_MIN 0x6050 /* min frame size reg + recommended: 0x40 */ + +/* FRAMESIZE_MAX holds both the max frame size as well as the max burst size. + * recommended value: 0x2000.05EE + */ +#define REG_MAC_FRAMESIZE_MAX 0x6054 /* max frame size reg */ +#define MAC_FRAMESIZE_MAX_BURST_MASK 0x3FFF0000 /* max burst size */ +#define MAC_FRAMESIZE_MAX_BURST_SHIFT 16 +#define MAC_FRAMESIZE_MAX_FRAME_MASK 0x00007FFF /* max frame size */ +#define MAC_FRAMESIZE_MAX_FRAME_SHIFT 0 +#define REG_MAC_PA_SIZE 0x6058 /* PA size reg. number of + preamble bytes that the + TX MAC will xmit at the + beginning of each frame + value should be 2 or + greater. recommended + value: 0x07 */ +#define REG_MAC_JAM_SIZE 0x605C /* jam size reg. duration + of jam in units of media + byte time. recommended + value: 0x04 */ +#define REG_MAC_ATTEMPT_LIMIT 0x6060 /* attempt limit reg. # + of attempts TX MAC will + make to xmit a frame + before it resets its + attempts counter. after + the limit has been + reached, TX MAC may or + may not drop the frame + dependent upon value + in TX_MAC_CFG. + recommended + value: 0x10 */ +#define REG_MAC_CTRL_TYPE 0x6064 /* MAC control type reg. + type field of a MAC + ctrl frame. recommended + value: 0x8808 */ + +/* mac address registers: 0 - 44, 0x6080 - 0x6130, 4 8-bit bytes. + * register contains comparison + * 0 16 MSB of primary MAC addr [47:32] of DA field + * 1 16 middle bits "" [31:16] of DA field + * 2 16 LSB "" [15:0] of DA field + * 3*x 16MSB of alt MAC addr 1-15 [47:32] of DA field + * 4*x 16 middle bits "" [31:16] + * 5*x 16 LSB "" [15:0] + * 42 16 MSB of MAC CTRL addr [47:32] of DA. + * 43 16 middle bits "" [31:16] + * 44 16 LSB "" [15:0] + * MAC CTRL addr must be the reserved multicast addr for MAC CTRL frames. + * if there is a match, MAC will set the bit for alternative address + * filter pass [15] + + * here is the map of registers given MAC address notation: a:b:c:d:e:f + * ab cd ef + * primary addr reg 2 reg 1 reg 0 + * alt addr 1 reg 5 reg 4 reg 3 + * alt addr x reg 5*x reg 4*x reg 3*x + * ctrl addr reg 44 reg 43 reg 42 + */ +#define REG_MAC_ADDR0 0x6080 /* MAC address 0 reg */ +#define REG_MAC_ADDRN(x) (REG_MAC_ADDR0 + (x)*4) +#define REG_MAC_ADDR_FILTER0 0x614C /* address filter 0 reg + [47:32] */ +#define REG_MAC_ADDR_FILTER1 0x6150 /* address filter 1 reg + [31:16] */ +#define REG_MAC_ADDR_FILTER2 0x6154 /* address filter 2 reg + [15:0] */ +#define REG_MAC_ADDR_FILTER2_1_MASK 0x6158 /* address filter 2 and 1 + mask reg. 8-bit reg + contains nibble mask for + reg 2 and 1. */ +#define REG_MAC_ADDR_FILTER0_MASK 0x615C /* address filter 0 mask + reg */ + +/* hash table registers: 0 - 15, 0x6160 - 0x619C, 4 8-bit bytes + * 16-bit registers contain bits of the hash table. + * reg x -> [16*(15 - x) + 15 : 16*(15 - x)]. + * e.g., 15 -> [15:0], 0 -> [255:240] + */ +#define REG_MAC_HASH_TABLE0 0x6160 /* hash table 0 reg */ +#define REG_MAC_HASH_TABLEN(x) (REG_MAC_HASH_TABLE0 + (x)*4) + +/* statistics registers. these registers generate an interrupt on + * overflow. recommended initialization: 0x0000. most are 16-bits except + * for PEAK_ATTEMPTS register which is 8 bits. + */ +#define REG_MAC_COLL_NORMAL 0x61A0 /* normal collision + counter. */ +#define REG_MAC_COLL_FIRST 0x61A4 /* first attempt + successful collision + counter */ +#define REG_MAC_COLL_EXCESS 0x61A8 /* excessive collision + counter */ +#define REG_MAC_COLL_LATE 0x61AC /* late collision counter */ +#define REG_MAC_TIMER_DEFER 0x61B0 /* defer timer. time base + is the media byte + clock/256 */ +#define REG_MAC_ATTEMPTS_PEAK 0x61B4 /* peak attempts reg */ +#define REG_MAC_RECV_FRAME 0x61B8 /* receive frame counter */ +#define REG_MAC_LEN_ERR 0x61BC /* length error counter */ +#define REG_MAC_ALIGN_ERR 0x61C0 /* alignment error counter */ +#define REG_MAC_FCS_ERR 0x61C4 /* FCS error counter */ +#define REG_MAC_RX_CODE_ERR 0x61C8 /* RX code violation + error counter */ + +/* misc registers */ +#define REG_MAC_RANDOM_SEED 0x61CC /* random number seed reg. + 10-bit register used as a + seed for the random number + generator for the CSMA/CD + backoff algorithm. only + programmed after power-on + reset and should be a + random value which has a + high likelihood of being + unique for each MAC + attached to a network + segment (e.g., 10 LSB of + MAC address) */ + +/* ASUN: there's a PAUSE_TIMER (ro) described, but it's not in the address + * map + */ + +/* 27-bit register has the current state for key state machines in the MAC */ +#define REG_MAC_STATE_MACHINE 0x61D0 /* (ro) state machine reg */ +#define MAC_SM_RLM_MASK 0x07800000 +#define MAC_SM_RLM_SHIFT 23 +#define MAC_SM_RX_FC_MASK 0x00700000 +#define MAC_SM_RX_FC_SHIFT 20 +#define MAC_SM_TLM_MASK 0x000F0000 +#define MAC_SM_TLM_SHIFT 16 +#define MAC_SM_ENCAP_SM_MASK 0x0000F000 +#define MAC_SM_ENCAP_SM_SHIFT 12 +#define MAC_SM_TX_REQ_MASK 0x00000C00 +#define MAC_SM_TX_REQ_SHIFT 10 +#define MAC_SM_TX_FC_MASK 0x000003C0 +#define MAC_SM_TX_FC_SHIFT 6 +#define MAC_SM_FIFO_WRITE_SEL_MASK 0x00000038 +#define MAC_SM_FIFO_WRITE_SEL_SHIFT 3 +#define MAC_SM_TX_FIFO_EMPTY_MASK 0x00000007 +#define MAC_SM_TX_FIFO_EMPTY_SHIFT 0 + +/** MIF registers. the MIF can be programmed in either bit-bang or + * frame mode. + **/ +#define REG_MIF_BIT_BANG_CLOCK 0x6200 /* MIF bit-bang clock. + 1 -> 0 will generate a + rising edge. 0 -> 1 will + generate a falling edge. */ +#define REG_MIF_BIT_BANG_DATA 0x6204 /* MIF bit-bang data. 1-bit + register generates data */ +#define REG_MIF_BIT_BANG_OUTPUT_EN 0x6208 /* MIF bit-bang output + enable. enable when + xmitting data from MIF to + transceiver. */ + +/* 32-bit register serves as an instruction register when the MIF is + * programmed in frame mode. load this register w/ a valid instruction + * (as per IEEE 802.3u MII spec). poll this register to check for instruction + * execution completion. during a read operation, this register will also + * contain the 16-bit data returned by the tranceiver. unless specified + * otherwise, fields are considered "don't care" when polling for + * completion. + */ +#define REG_MIF_FRAME 0x620C /* MIF frame/output reg */ +#define MIF_FRAME_START_MASK 0xC0000000 /* start of frame. + load w/ 01 when + issuing an instr */ +#define MIF_FRAME_ST 0x40000000 /* STart of frame */ +#define MIF_FRAME_OPCODE_MASK 0x30000000 /* opcode. 01 for a + write. 10 for a + read */ +#define MIF_FRAME_OP_READ 0x20000000 /* read OPcode */ +#define MIF_FRAME_OP_WRITE 0x10000000 /* write OPcode */ +#define MIF_FRAME_PHY_ADDR_MASK 0x0F800000 /* phy address. when + issuing an instr, + this field should be + loaded w/ the XCVR + addr */ +#define MIF_FRAME_PHY_ADDR_SHIFT 23 +#define MIF_FRAME_REG_ADDR_MASK 0x007C0000 /* register address. + when issuing an instr, + addr of register + to be read/written */ +#define MIF_FRAME_REG_ADDR_SHIFT 18 +#define MIF_FRAME_TURN_AROUND_MSB 0x00020000 /* turn around, MSB. + when issuing an instr, + set this bit to 1 */ +#define MIF_FRAME_TURN_AROUND_LSB 0x00010000 /* turn around, LSB. + when issuing an instr, + set this bit to 0. + when polling for + completion, 1 means + that instr execution + has been completed */ +#define MIF_FRAME_DATA_MASK 0x0000FFFF /* instruction payload + load with 16-bit data + to be written in + transceiver reg for a + write. doesn't matter + in a read. when + polling for + completion, field is + "don't care" for write + and 16-bit data + returned by the + transceiver for a + read (if valid bit + is set) */ +#define REG_MIF_CFG 0x6210 /* MIF config reg */ +#define MIF_CFG_PHY_SELECT 0x0001 /* 1 -> select MDIO_1 + 0 -> select MDIO_0 */ +#define MIF_CFG_POLL_EN 0x0002 /* enable polling + mechanism. if set, + BB_MODE should be 0 */ +#define MIF_CFG_BB_MODE 0x0004 /* 1 -> bit-bang mode + 0 -> frame mode */ +#define MIF_CFG_POLL_REG_MASK 0x00F8 /* register address to be + used by polling mode. + only meaningful if POLL_EN + is set to 1 */ +#define MIF_CFG_POLL_REG_SHIFT 3 +#define MIF_CFG_MDIO_0 0x0100 /* (ro) dual purpose. + when MDIO_0 is idle, + 1 -> tranceiver is + connected to MDIO_0. + when MIF is communicating + w/ MDIO_0 in bit-bang + mode, this bit indicates + the incoming bit stream + during a read op */ +#define MIF_CFG_MDIO_1 0x0200 /* (ro) dual purpose. + when MDIO_1 is idle, + 1 -> transceiver is + connected to MDIO_1. + when MIF is communicating + w/ MDIO_1 in bit-bang + mode, this bit indicates + the incoming bit stream + during a read op */ +#define MIF_CFG_POLL_PHY_MASK 0x7C00 /* tranceiver address to + be polled */ +#define MIF_CFG_POLL_PHY_SHIFT 10 + +/* 16-bit register used to determine which bits in the POLL_STATUS portion of + * the MIF_STATUS register will cause an interrupt. if a mask bit is 0, + * corresponding bit of the POLL_STATUS will generate a MIF interrupt when + * set. DEFAULT: 0xFFFF + */ +#define REG_MIF_MASK 0x6214 /* MIF mask reg */ + +/* 32-bit register used when in poll mode. auto-cleared after being read */ +#define REG_MIF_STATUS 0x6218 /* MIF status reg */ +#define MIF_STATUS_POLL_DATA_MASK 0xFFFF0000 /* poll data contains + the "latest image" + update of the XCVR + reg being read */ +#define MIF_STATUS_POLL_DATA_SHIFT 16 +#define MIF_STATUS_POLL_STATUS_MASK 0x0000FFFF /* poll status indicates + which bits in the + POLL_DATA field have + changed since the + MIF_STATUS reg was + last read */ +#define MIF_STATUS_POLL_STATUS_SHIFT 0 + +/* 7-bit register has current state for all state machines in the MIF */ +#define REG_MIF_STATE_MACHINE 0x621C /* MIF state machine reg */ +#define MIF_SM_CONTROL_MASK 0x07 /* control state machine + state */ +#define MIF_SM_EXECUTION_MASK 0x60 /* execution state machine + state */ + +/** PCS/Serialink. the following registers are equivalent to the standard + * MII management registers except that they're directly mapped in + * Cassini's register space. + **/ + +/* the auto-negotiation enable bit should be programmed the same at + * the link partner as in the local device to enable auto-negotiation to + * complete. when that bit is reprogrammed, auto-neg/manual config is + * restarted automatically. + * DEFAULT: 0x1040 + */ +#define REG_PCS_MII_CTRL 0x9000 /* PCS MII control reg */ +#define PCS_MII_CTRL_1000_SEL 0x0040 /* reads 1. ignored on + writes */ +#define PCS_MII_CTRL_COLLISION_TEST 0x0080 /* COL signal at the PCS + to MAC interface is + activated regardless + of activity */ +#define PCS_MII_CTRL_DUPLEX 0x0100 /* forced 0x0. PCS + behaviour same for + half and full dplx */ +#define PCS_MII_RESTART_AUTONEG 0x0200 /* self clearing. + restart auto- + negotiation */ +#define PCS_MII_ISOLATE 0x0400 /* read as 0. ignored + on writes */ +#define PCS_MII_POWER_DOWN 0x0800 /* read as 0. ignored + on writes */ +#define PCS_MII_AUTONEG_EN 0x1000 /* default 1. PCS goes + through automatic + link config before it + can be used. when 0, + link can be used + w/out any link config + phase */ +#define PCS_MII_10_100_SEL 0x2000 /* read as 0. ignored on + writes */ +#define PCS_MII_RESET 0x8000 /* reset PCS. self-clears + when done */ + +/* DEFAULT: 0x0108 */ +#define REG_PCS_MII_STATUS 0x9004 /* PCS MII status reg */ +#define PCS_MII_STATUS_EXTEND_CAP 0x0001 /* reads 0 */ +#define PCS_MII_STATUS_JABBER_DETECT 0x0002 /* reads 0 */ +#define PCS_MII_STATUS_LINK_STATUS 0x0004 /* 1 -> link up. + 0 -> link down. 0 is + latched so that 0 is + kept until read. read + 2x to determine if the + link has gone up again */ +#define PCS_MII_STATUS_AUTONEG_ABLE 0x0008 /* reads 1 (able to perform + auto-neg) */ +#define PCS_MII_STATUS_REMOTE_FAULT 0x0010 /* 1 -> remote fault detected + from received link code + word. only valid after + auto-neg completed */ +#define PCS_MII_STATUS_AUTONEG_COMP 0x0020 /* 1 -> auto-negotiation + completed + 0 -> auto-negotiation not + completed */ +#define PCS_MII_STATUS_EXTEND_STATUS 0x0100 /* reads as 1. used as an + indication that this is + a 1000 Base-X PHY. writes + to it are ignored */ + +/* used during auto-negotiation. + * DEFAULT: 0x00E0 + */ +#define REG_PCS_MII_ADVERT 0x9008 /* PCS MII advertisement + reg */ +#define PCS_MII_ADVERT_FD 0x0020 /* advertise full duplex + 1000 Base-X */ +#define PCS_MII_ADVERT_HD 0x0040 /* advertise half-duplex + 1000 Base-X */ +#define PCS_MII_ADVERT_SYM_PAUSE 0x0080 /* advertise PAUSE + symmetric capability */ +#define PCS_MII_ADVERT_ASYM_PAUSE 0x0100 /* advertises PAUSE + asymmetric capability */ +#define PCS_MII_ADVERT_RF_MASK 0x3000 /* remote fault. write bit13 + to optionally indicate to + link partner that chip is + going off-line. bit12 will + get set when signal + detect == FAIL and will + remain set until + successful negotiation */ +#define PCS_MII_ADVERT_ACK 0x4000 /* (ro) */ +#define PCS_MII_ADVERT_NEXT_PAGE 0x8000 /* (ro) forced 0x0 */ + +/* contents updated as a result of autonegotiation. layout and definitions + * identical to PCS_MII_ADVERT + */ +#define REG_PCS_MII_LPA 0x900C /* PCS MII link partner + ability reg */ +#define PCS_MII_LPA_FD PCS_MII_ADVERT_FD +#define PCS_MII_LPA_HD PCS_MII_ADVERT_HD +#define PCS_MII_LPA_SYM_PAUSE PCS_MII_ADVERT_SYM_PAUSE +#define PCS_MII_LPA_ASYM_PAUSE PCS_MII_ADVERT_ASYM_PAUSE +#define PCS_MII_LPA_RF_MASK PCS_MII_ADVERT_RF_MASK +#define PCS_MII_LPA_ACK PCS_MII_ADVERT_ACK +#define PCS_MII_LPA_NEXT_PAGE PCS_MII_ADVERT_NEXT_PAGE + +/* DEFAULT: 0x0 */ +#define REG_PCS_CFG 0x9010 /* PCS config reg */ +#define PCS_CFG_EN 0x01 /* enable PCS. must be + 0 when modifying + PCS_MII_ADVERT */ +#define PCS_CFG_SD_OVERRIDE 0x02 /* sets signal detect to + OK. bit is + non-resettable */ +#define PCS_CFG_SD_ACTIVE_LOW 0x04 /* changes interpretation + of optical signal to make + signal detect okay when + signal is low */ +#define PCS_CFG_JITTER_STUDY_MASK 0x18 /* used to make jitter + measurements. a single + code group is xmitted + regularly. + 0x0 = normal operation + 0x1 = high freq test + pattern, D21.5 + 0x2 = low freq test + pattern, K28.7 + 0x3 = reserved */ +#define PCS_CFG_10MS_TIMER_OVERRIDE 0x20 /* shortens 10-20ms auto- + negotiation timer to + a few cycles for test + purposes */ + +/* used for diagnostic purposes. bits 20-22 autoclear on read */ +#define REG_PCS_STATE_MACHINE 0x9014 /* (ro) PCS state machine + and diagnostic reg */ +#define PCS_SM_TX_STATE_MASK 0x0000000F /* 0 and 1 indicate + xmission of idle. + otherwise, xmission of + a packet */ +#define PCS_SM_RX_STATE_MASK 0x000000F0 /* 0 indicates reception + of idle. otherwise, + reception of packet */ +#define PCS_SM_WORD_SYNC_STATE_MASK 0x00000700 /* 0 indicates loss of + sync */ +#define PCS_SM_SEQ_DETECT_STATE_MASK 0x00001800 /* cycling through 0-3 + indicates reception of + Config codes. cycling + through 0-1 indicates + reception of idles */ +#define PCS_SM_LINK_STATE_MASK 0x0001E000 +#define SM_LINK_STATE_UP 0x00016000 /* link state is up */ + +#define PCS_SM_LOSS_LINK_C 0x00100000 /* loss of link due to + recept of Config + codes */ +#define PCS_SM_LOSS_LINK_SYNC 0x00200000 /* loss of link due to + loss of sync */ +#define PCS_SM_LOSS_SIGNAL_DETECT 0x00400000 /* signal detect goes + from OK to FAIL. bit29 + will also be set if + this is set */ +#define PCS_SM_NO_LINK_BREAKLINK 0x01000000 /* link not up due to + receipt of breaklink + C codes from partner. + C codes w/ 0 content + received triggering + start/restart of + autonegotiation. + should be sent for + no longer than 20ms */ +#define PCS_SM_NO_LINK_SERDES 0x02000000 /* serdes being + initialized. see serdes + state reg */ +#define PCS_SM_NO_LINK_C 0x04000000 /* C codes not stable or + not received */ +#define PCS_SM_NO_LINK_SYNC 0x08000000 /* word sync not + achieved */ +#define PCS_SM_NO_LINK_WAIT_C 0x10000000 /* waiting for C codes + w/ ack bit set */ +#define PCS_SM_NO_LINK_NO_IDLE 0x20000000 /* link partner continues + to send C codes + instead of idle + symbols or pkt data */ + +/* this register indicates interrupt changes in specific PCS MII status bits. + * PCS_INT may be masked at the ISR level. only a single bit is implemented + * for link status change. + */ +#define REG_PCS_INTR_STATUS 0x9018 /* PCS interrupt status */ +#define PCS_INTR_STATUS_LINK_CHANGE 0x04 /* link status has changed + since last read */ + +/* control which network interface is used. no more than one bit should + * be set. + * DEFAULT: none + */ +#define REG_PCS_DATAPATH_MODE 0x9050 /* datapath mode reg */ +#define PCS_DATAPATH_MODE_MII 0x00 /* PCS is not used and + MII/GMII is selected. + selection between MII and + GMII is controlled by + XIF_CFG */ +#define PCS_DATAPATH_MODE_SERDES 0x02 /* PCS is used via the + 10-bit interface */ + +/* input to serdes chip or serialink block */ +#define REG_PCS_SERDES_CTRL 0x9054 /* serdes control reg */ +#define PCS_SERDES_CTRL_LOOPBACK 0x01 /* enable loopback on + serdes interface */ +#define PCS_SERDES_CTRL_SYNCD_EN 0x02 /* enable sync carrier + detection. should be + 0x0 for normal + operation */ +#define PCS_SERDES_CTRL_LOCKREF 0x04 /* frequency-lock RBC[0:1] + to REFCLK when set. + when clear, receiver + clock locks to incoming + serial data */ + +/* multiplex test outputs into the PROM address (PA_3 through PA_0) pins. + * should be 0x0 for normal operations. + * 0b000 normal operation, PROM address[3:0] selected + * 0b001 rxdma req, rxdma ack, rxdma ready, rxdma read + * 0b010 rxmac req, rx ack, rx tag, rx clk shared + * 0b011 txmac req, tx ack, tx tag, tx retry req + * 0b100 tx tp3, tx tp2, tx tp1, tx tp0 + * 0b101 R period RX, R period TX, R period HP, R period BIM + * DEFAULT: 0x0 + */ +#define REG_PCS_SHARED_OUTPUT_SEL 0x9058 /* shared output select */ +#define PCS_SOS_PROM_ADDR_MASK 0x0007 + +/* used for diagnostics. this register indicates progress of the SERDES + * boot up. + * 0b00 undergoing reset + * 0b01 waiting 500us while lockrefn is asserted + * 0b10 waiting for comma detect + * 0b11 receive data is synchronized + * DEFAULT: 0x0 + */ +#define REG_PCS_SERDES_STATE 0x905C /* (ro) serdes state */ +#define PCS_SERDES_STATE_MASK 0x03 + +/* used for diagnostics. indicates number of packets transmitted or received. + * counters rollover w/out generating an interrupt. + * DEFAULT: 0x0 + */ +#define REG_PCS_PACKET_COUNT 0x9060 /* (ro) PCS packet counter */ +#define PCS_PACKET_COUNT_TX 0x000007FF /* pkts xmitted by PCS */ +#define PCS_PACKET_COUNT_RX 0x07FF0000 /* pkts recvd by PCS + whether they + encountered an error + or not */ + +/** LocalBus Devices. the following provides run-time access to the + * Cassini's PROM + ***/ +#define REG_EXPANSION_ROM_RUN_START 0x100000 /* expansion rom run time + access */ +#define REG_EXPANSION_ROM_RUN_END 0x17FFFF + +#define REG_SECOND_LOCALBUS_START 0x180000 /* secondary local bus + device */ +#define REG_SECOND_LOCALBUS_END 0x1FFFFF + +/* entropy device */ +#define REG_ENTROPY_START REG_SECOND_LOCALBUS_START +#define REG_ENTROPY_DATA (REG_ENTROPY_START + 0x00) +#define REG_ENTROPY_STATUS (REG_ENTROPY_START + 0x04) +#define ENTROPY_STATUS_DRDY 0x01 +#define ENTROPY_STATUS_BUSY 0x02 +#define ENTROPY_STATUS_CIPHER 0x04 +#define ENTROPY_STATUS_BYPASS_MASK 0x18 +#define REG_ENTROPY_MODE (REG_ENTROPY_START + 0x05) +#define ENTROPY_MODE_KEY_MASK 0x07 +#define ENTROPY_MODE_ENCRYPT 0x40 +#define REG_ENTROPY_RAND_REG (REG_ENTROPY_START + 0x06) +#define REG_ENTROPY_RESET (REG_ENTROPY_START + 0x07) +#define ENTROPY_RESET_DES_IO 0x01 +#define ENTROPY_RESET_STC_MODE 0x02 +#define ENTROPY_RESET_KEY_CACHE 0x04 +#define ENTROPY_RESET_IV 0x08 +#define REG_ENTROPY_IV (REG_ENTROPY_START + 0x08) +#define REG_ENTROPY_KEY0 (REG_ENTROPY_START + 0x10) +#define REG_ENTROPY_KEYN(x) (REG_ENTROPY_KEY0 + 4*(x)) + +/* phys of interest w/ their special mii registers */ +#define PHY_LUCENT_B0 0x00437421 +#define LUCENT_MII_REG 0x1F + +#define PHY_NS_DP83065 0x20005c78 +#define DP83065_MII_MEM 0x16 +#define DP83065_MII_REGD 0x1D +#define DP83065_MII_REGE 0x1E + +#define PHY_BROADCOM_5411 0x00206071 +#define PHY_BROADCOM_B0 0x00206050 +#define BROADCOM_MII_REG4 0x14 +#define BROADCOM_MII_REG5 0x15 +#define BROADCOM_MII_REG7 0x17 +#define BROADCOM_MII_REG8 0x18 + +#define CAS_MII_ANNPTR 0x07 +#define CAS_MII_ANNPRR 0x08 +#define CAS_MII_1000_CTRL 0x09 +#define CAS_MII_1000_STATUS 0x0A +#define CAS_MII_1000_EXTEND 0x0F + +#define CAS_BMSR_1000_EXTEND 0x0100 /* supports 1000Base-T extended status */ +/* + * if autoneg is disabled, here's the table: + * BMCR_SPEED100 = 100Mbps + * BMCR_SPEED1000 = 1000Mbps + * ~(BMCR_SPEED100 | BMCR_SPEED1000) = 10Mbps + */ +#define CAS_BMCR_SPEED1000 0x0040 /* Select 1000Mbps */ + +#define CAS_ADVERTISE_1000HALF 0x0100 +#define CAS_ADVERTISE_1000FULL 0x0200 +#define CAS_ADVERTISE_PAUSE 0x0400 +#define CAS_ADVERTISE_ASYM_PAUSE 0x0800 + +/* regular lpa register */ +#define CAS_LPA_PAUSE CAS_ADVERTISE_PAUSE +#define CAS_LPA_ASYM_PAUSE CAS_ADVERTISE_ASYM_PAUSE + +/* 1000_STATUS register */ +#define CAS_LPA_1000HALF 0x0400 +#define CAS_LPA_1000FULL 0x0800 + +#define CAS_EXTEND_1000XFULL 0x8000 +#define CAS_EXTEND_1000XHALF 0x4000 +#define CAS_EXTEND_1000TFULL 0x2000 +#define CAS_EXTEND_1000THALF 0x1000 + +/* cassini header parser firmware */ +typedef struct cas_hp_inst { + const char *note; + + u16 mask, val; + + u8 op; + u8 soff, snext; /* if match succeeds, new offset and match */ + u8 foff, fnext; /* if match fails, new offset and match */ + /* output info */ + u8 outop; /* output opcode */ + + u16 outarg; /* output argument */ + u8 outenab; /* output enable: 0 = not, 1 = if match + 2 = if !match, 3 = always */ + u8 outshift; /* barrel shift right, 4 bits */ + u16 outmask; +} cas_hp_inst_t; + +/* comparison */ +#define OP_EQ 0 /* packet == value */ +#define OP_LT 1 /* packet < value */ +#define OP_GT 2 /* packet > value */ +#define OP_NP 3 /* new packet */ + +/* output opcodes */ +#define CL_REG 0 +#define LD_FID 1 +#define LD_SEQ 2 +#define LD_CTL 3 +#define LD_SAP 4 +#define LD_R1 5 +#define LD_L3 6 +#define LD_SUM 7 +#define LD_HDR 8 +#define IM_FID 9 +#define IM_SEQ 10 +#define IM_SAP 11 +#define IM_R1 12 +#define IM_CTL 13 +#define LD_LEN 14 +#define ST_FLG 15 + +/* match setp #s for IP4TCP4 */ +#define S1_PCKT 0 +#define S1_VLAN 1 +#define S1_CFI 2 +#define S1_8023 3 +#define S1_LLC 4 +#define S1_LLCc 5 +#define S1_IPV4 6 +#define S1_IPV4c 7 +#define S1_IPV4F 8 +#define S1_TCP44 9 +#define S1_IPV6 10 +#define S1_IPV6L 11 +#define S1_IPV6c 12 +#define S1_TCP64 13 +#define S1_TCPSQ 14 +#define S1_TCPFG 15 +#define S1_TCPHL 16 +#define S1_TCPHc 17 +#define S1_CLNP 18 +#define S1_CLNP2 19 +#define S1_DROP 20 +#define S2_HTTP 21 +#define S1_ESP4 22 +#define S1_AH4 23 +#define S1_ESP6 24 +#define S1_AH6 25 + +#define CAS_PROG_IP46TCP4_PREAMBLE \ +{ "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, S1_PCKT, \ + CL_REG, 0x3ff, 1, 0x0, 0x0000}, \ +{ "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, \ + IM_CTL, 0x00a, 3, 0x0, 0xffff}, \ +{ "CFI?", 0x1000, 0x1000, OP_EQ, 0, S1_DROP, 1, S1_8023, \ + CL_REG, 0x000, 0, 0x0, 0x0000}, \ +{ "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, \ + CL_REG, 0x000, 0, 0x0, 0x0000}, \ +{ "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP, \ + CL_REG, 0x000, 0, 0x0, 0x0000}, \ +{ "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP, \ + CL_REG, 0x000, 0, 0x0, 0x0000}, \ +{ "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, \ + LD_SAP, 0x100, 3, 0x0, 0xffff}, \ +{ "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP, \ + LD_SUM, 0x00a, 1, 0x0, 0x0000}, \ +{ "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP, \ + LD_LEN, 0x03e, 1, 0x0, 0xffff}, \ +{ "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_CLNP, \ + LD_FID, 0x182, 1, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ \ +{ "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP, \ + LD_SUM, 0x015, 1, 0x0, 0x0000}, \ +{ "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP, \ + IM_R1, 0x128, 1, 0x0, 0xffff}, \ +{ "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP, \ + LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ \ +{ "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP, \ + LD_LEN, 0x03f, 1, 0x0, 0xffff} + +#ifdef USE_HP_IP46TCP4 +static cas_hp_inst_t cas_prog_ip46tcp4tab[] = { + CAS_PROG_IP46TCP4_PREAMBLE, + { "TCP seq", /* DADDR should point to dest port */ + 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ, + 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ + { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0, + S1_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */ + { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, + S1_TCPHc, LD_R1, 0x205, 3, 0xB, 0xf000}, + { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, + S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff}, + { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2, + IM_CTL, 0x001, 3, 0x0, 0x0001}, + { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x000, 0, 0x0, 0x0000}, + { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x080, 3, 0x0, 0xffff}, + { NULL }, +}; +#ifdef HP_IP46TCP4_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_ip46tcp4tab +#endif +#endif + +/* + * Alternate table load which excludes HTTP server traffic from reassembly. + * It is substantially similar to the basic table, with one extra state + * and a few extra compares. */ +#ifdef USE_HP_IP46TCP4NOHTTP +static cas_hp_inst_t cas_prog_ip46tcp4nohttptab[] = { + CAS_PROG_IP46TCP4_PREAMBLE, + { "TCP seq", /* DADDR should point to dest port */ + 0xFFFF, 0x0080, OP_EQ, 0, S2_HTTP, 0, S1_TCPFG, LD_SEQ, + 0x081, 3, 0x0, 0xffff} , /* Load TCP seq # */ + { "TCP control flags", 0xFFFF, 0x8080, OP_EQ, 0, S2_HTTP, 0, + S1_TCPHL, ST_FLG, 0x145, 2, 0x0, 0x002f, }, /* Load TCP flags */ + { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc, + LD_R1, 0x205, 3, 0xB, 0xf000}, + { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + LD_HDR, 0x0ff, 3, 0x0, 0xffff}, + { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2, + IM_CTL, 0x001, 3, 0x0, 0x0001}, + { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + CL_REG, 0x002, 3, 0x0, 0x0000}, + { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x080, 3, 0x0, 0xffff}, + { "No HTTP", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x044, 3, 0x0, 0xffff}, + { NULL }, +}; +#ifdef HP_IP46TCP4NOHTTP_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_ip46tcp4nohttptab +#endif +#endif + +/* match step #s for IP4FRAG */ +#define S3_IPV6c 11 +#define S3_TCP64 12 +#define S3_TCPSQ 13 +#define S3_TCPFG 14 +#define S3_TCPHL 15 +#define S3_TCPHc 16 +#define S3_FRAG 17 +#define S3_FOFF 18 +#define S3_CLNP 19 + +#ifdef USE_HP_IP4FRAG +static cas_hp_inst_t cas_prog_ip4fragtab[] = { + { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, S1_PCKT, + CL_REG, 0x3ff, 1, 0x0, 0x0000}, + { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, + IM_CTL, 0x00a, 3, 0x0, 0xffff}, + { "CFI?", 0x1000, 0x1000, OP_EQ, 0, S3_CLNP, 1, S1_8023, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S3_CLNP, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "LLCc?",0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S3_CLNP, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, + LD_SAP, 0x100, 3, 0x0, 0xffff}, + { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S3_CLNP, + LD_SUM, 0x00a, 1, 0x0, 0x0000}, + { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S3_FRAG, + LD_LEN, 0x03e, 3, 0x0, 0xffff}, + { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S3_TCPSQ, 0, S3_CLNP, + LD_FID, 0x182, 3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ + { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S3_IPV6c, 0, S3_CLNP, + LD_SUM, 0x015, 1, 0x0, 0x0000}, + { "IPV6 cont?", 0xf000, 0x6000, OP_EQ, 3, S3_TCP64, 0, S3_CLNP, + LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ + { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S3_TCPSQ, 0, S3_CLNP, + LD_LEN, 0x03f, 1, 0x0, 0xffff}, + { "TCP seq", /* DADDR should point to dest port */ + 0x0000, 0x0000, OP_EQ, 0, S3_TCPFG, 4, S3_TCPFG, LD_SEQ, + 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ + { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S3_TCPHL, 0, + S3_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */ + { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S3_TCPHc, 0, S3_TCPHc, + LD_R1, 0x205, 3, 0xB, 0xf000}, + { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + LD_HDR, 0x0ff, 3, 0x0, 0xffff}, + { "IP4 Fragment", 0x0000, 0x0000, OP_EQ, 0, S3_FOFF, 0, S3_FOFF, + LD_FID, 0x103, 3, 0x0, 0xffff}, /* FID IP4 src+dst */ + { "IP4 frag offset", 0x0000, 0x0000, OP_EQ, 0, S3_FOFF, 0, S3_FOFF, + LD_SEQ, 0x040, 1, 0xD, 0xfff8}, + { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x001, 3, 0x0, 0x0001}, + { NULL }, +}; +#ifdef HP_IP4FRAG_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_ip4fragtab +#endif +#endif + +/* + * Alternate table which does batching without reassembly + */ +#ifdef USE_HP_IP46TCP4BATCH +static cas_hp_inst_t cas_prog_ip46tcp4batchtab[] = { + CAS_PROG_IP46TCP4_PREAMBLE, + { "TCP seq", /* DADDR should point to dest port */ + 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 0, S1_TCPFG, LD_SEQ, + 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ + { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0, + S1_TCPHL, ST_FLG, 0x000, 3, 0x0, 0x0000}, /* Load TCP flags */ + { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, + S1_TCPHc, LD_R1, 0x205, 3, 0xB, 0xf000}, + { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, + S1_PCKT, IM_CTL, 0x040, 3, 0x0, 0xffff}, /* set batch bit */ + { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x001, 3, 0x0, 0x0001}, + { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, + S1_PCKT, IM_CTL, 0x080, 3, 0x0, 0xffff}, + { NULL }, +}; +#ifdef HP_IP46TCP4BATCH_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_ip46tcp4batchtab +#endif +#endif + +/* Workaround for Cassini rev2 descriptor corruption problem. + * Does batching without reassembly, and sets the SAP to a known + * data pattern for all packets. + */ +#ifdef USE_HP_WORKAROUND +static cas_hp_inst_t cas_prog_workaroundtab[] = { + { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, + S1_PCKT, CL_REG, 0x3ff, 1, 0x0, 0x0000} , + { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, + IM_CTL, 0x04a, 3, 0x0, 0xffff}, + { "CFI?", 0x1000, 0x1000, OP_EQ, 0, S1_CLNP, 1, S1_8023, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, + IM_SAP, 0x6AE, 3, 0x0, 0xffff}, + { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP, + LD_SUM, 0x00a, 1, 0x0, 0x0000}, + { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP, + LD_LEN, 0x03e, 1, 0x0, 0xffff}, + { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_CLNP, + LD_FID, 0x182, 3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ + { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP, + LD_SUM, 0x015, 1, 0x0, 0x0000}, + { "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP, + IM_R1, 0x128, 1, 0x0, 0xffff}, + { "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP, + LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ + { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP, + LD_LEN, 0x03f, 1, 0x0, 0xffff}, + { "TCP seq", /* DADDR should point to dest port */ + 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ, + 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ + { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0, + S1_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */ + { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc, + LD_R1, 0x205, 3, 0xB, 0xf000}, + { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, + S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff}, + { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2, + IM_SAP, 0x6AE, 3, 0x0, 0xffff} , + { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x001, 3, 0x0, 0x0001}, + { NULL }, +}; +#ifdef HP_WORKAROUND_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_workaroundtab +#endif +#endif + +#ifdef USE_HP_ENCRYPT +static cas_hp_inst_t cas_prog_encryptiontab[] = { + { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, + S1_PCKT, CL_REG, 0x3ff, 1, 0x0, 0x0000}, + { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, + IM_CTL, 0x00a, 3, 0x0, 0xffff}, +#if 0 +//"CFI?", /* 02 FIND CFI and If FIND go to S1_DROP */ +//0x1000, 0x1000, OP_EQ, 0, S1_DROP, 1, S1_8023, CL_REG, 0x000, 0, 0x0, 0x00 + 00, +#endif + { "CFI?", /* FIND CFI and If FIND go to CleanUP1 (ignore and send to host) */ + 0x1000, 0x1000, OP_EQ, 0, S1_CLNP, 1, S1_8023, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, + LD_SAP, 0x100, 3, 0x0, 0xffff}, + { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP, + LD_SUM, 0x00a, 1, 0x0, 0x0000}, + { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP, + LD_LEN, 0x03e, 1, 0x0, 0xffff}, + { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_ESP4, + LD_FID, 0x182, 1, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ + { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP, + LD_SUM, 0x015, 1, 0x0, 0x0000}, + { "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP, + IM_R1, 0x128, 1, 0x0, 0xffff}, + { "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP, + LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ + { "TCP64?", +#if 0 +//@@@0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_ESP6, LD_LEN, 0x03f, 1, 0x0, 0xffff, +#endif + 0xff00, 0x0600, OP_EQ, 12, S1_TCPSQ, 0, S1_ESP6, LD_LEN, + 0x03f, 1, 0x0, 0xffff}, + { "TCP seq", /* 14:DADDR should point to dest port */ + 0xFFFF, 0x0080, OP_EQ, 0, S2_HTTP, 0, S1_TCPFG, LD_SEQ, + 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ + { "TCP control flags", 0xFFFF, 0x8080, OP_EQ, 0, S2_HTTP, 0, + S1_TCPHL, ST_FLG, 0x145, 2, 0x0, 0x002f}, /* Load TCP flags */ + { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc, + LD_R1, 0x205, 3, 0xB, 0xf000} , + { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, + S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff}, + { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2, + IM_CTL, 0x001, 3, 0x0, 0x0001}, + { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + CL_REG, 0x002, 3, 0x0, 0x0000}, + { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x080, 3, 0x0, 0xffff}, + { "No HTTP", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x044, 3, 0x0, 0xffff}, + { "IPV4 ESP encrypted?", /* S1_ESP4 */ + 0x00ff, 0x0032, OP_EQ, 0, S1_CLNP2, 0, S1_AH4, IM_CTL, + 0x021, 1, 0x0, 0xffff}, + { "IPV4 AH encrypted?", /* S1_AH4 */ + 0x00ff, 0x0033, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL, + 0x021, 1, 0x0, 0xffff}, + { "IPV6 ESP encrypted?", /* S1_ESP6 */ +#if 0 +//@@@0x00ff, 0x0032, OP_EQ, 0, S1_CLNP2, 0, S1_AH6, IM_CTL, 0x021, 1, 0x0, 0xffff, +#endif + 0xff00, 0x3200, OP_EQ, 0, S1_CLNP2, 0, S1_AH6, IM_CTL, + 0x021, 1, 0x0, 0xffff}, + { "IPV6 AH encrypted?", /* S1_AH6 */ +#if 0 +//@@@0x00ff, 0x0033, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL, 0x021, 1, 0x0, 0xffff, +#endif + 0xff00, 0x3300, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL, + 0x021, 1, 0x0, 0xffff}, + { NULL }, +}; +#ifdef HP_ENCRYPT_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_encryptiontab +#endif +#endif + +static cas_hp_inst_t cas_prog_null[] = { {NULL} }; +#ifdef HP_NULL_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_null +#endif + +/* phy types */ +#define CAS_PHY_UNKNOWN 0x00 +#define CAS_PHY_SERDES 0x01 +#define CAS_PHY_MII_MDIO0 0x02 +#define CAS_PHY_MII_MDIO1 0x04 +#define CAS_PHY_MII(x) ((x) & (CAS_PHY_MII_MDIO0 | CAS_PHY_MII_MDIO1)) + +/* _RING_INDEX is the index for the ring sizes to be used. _RING_SIZE + * is the actual size. the default index for the various rings is + * 8. NOTE: there a bunch of alignment constraints for the rings. to + * deal with that, i just allocate rings to create the desired + * alignment. here are the constraints: + * RX DESC and COMP rings must be 8KB aligned + * TX DESC must be 2KB aligned. + * if you change the numbers, be cognizant of how the alignment will change + * in INIT_BLOCK as well. + */ + +#define DESC_RING_I_TO_S(x) (32*(1 << (x))) +#define COMP_RING_I_TO_S(x) (128*(1 << (x))) +#define TX_DESC_RING_INDEX 4 /* 512 = 8k */ +#define RX_DESC_RING_INDEX 4 /* 512 = 8k */ +#define RX_COMP_RING_INDEX 4 /* 2048 = 64k: should be 4x rx ring size */ + +#if (TX_DESC_RING_INDEX > 8) || (TX_DESC_RING_INDEX < 0) +#error TX_DESC_RING_INDEX must be between 0 and 8 +#endif + +#if (RX_DESC_RING_INDEX > 8) || (RX_DESC_RING_INDEX < 0) +#error RX_DESC_RING_INDEX must be between 0 and 8 +#endif + +#if (RX_COMP_RING_INDEX > 8) || (RX_COMP_RING_INDEX < 0) +#error RX_COMP_RING_INDEX must be between 0 and 8 +#endif + +#define N_TX_RINGS MAX_TX_RINGS /* for QoS */ +#define N_TX_RINGS_MASK MAX_TX_RINGS_MASK +#define N_RX_DESC_RINGS MAX_RX_DESC_RINGS /* 1 for ipsec */ +#define N_RX_COMP_RINGS 0x1 /* for mult. PCI interrupts */ + +/* number of flows that can go through re-assembly */ +#define N_RX_FLOWS 64 + +#define TX_DESC_RING_SIZE DESC_RING_I_TO_S(TX_DESC_RING_INDEX) +#define RX_DESC_RING_SIZE DESC_RING_I_TO_S(RX_DESC_RING_INDEX) +#define RX_COMP_RING_SIZE COMP_RING_I_TO_S(RX_COMP_RING_INDEX) +#define TX_DESC_RINGN_INDEX(x) TX_DESC_RING_INDEX +#define RX_DESC_RINGN_INDEX(x) RX_DESC_RING_INDEX +#define RX_COMP_RINGN_INDEX(x) RX_COMP_RING_INDEX +#define TX_DESC_RINGN_SIZE(x) TX_DESC_RING_SIZE +#define RX_DESC_RINGN_SIZE(x) RX_DESC_RING_SIZE +#define RX_COMP_RINGN_SIZE(x) RX_COMP_RING_SIZE + +/* convert values */ +#define CAS_BASE(x, y) (((y) << (x ## _SHIFT)) & (x ## _MASK)) +#define CAS_VAL(x, y) (((y) & (x ## _MASK)) >> (x ## _SHIFT)) +#define CAS_TX_RINGN_BASE(y) ((TX_DESC_RINGN_INDEX(y) << \ + TX_CFG_DESC_RINGN_SHIFT(y)) & \ + TX_CFG_DESC_RINGN_MASK(y)) + +/* min is 2k, but we can't do jumbo frames unless it's at least 8k */ +#define CAS_MIN_PAGE_SHIFT 11 /* 2048 */ +#define CAS_JUMBO_PAGE_SHIFT 13 /* 8192 */ +#define CAS_MAX_PAGE_SHIFT 14 /* 16384 */ + +#define TX_DESC_BUFLEN_MASK 0x0000000000003FFFULL /* buffer length in + bytes. 0 - 9256 */ +#define TX_DESC_BUFLEN_SHIFT 0 +#define TX_DESC_CSUM_START_MASK 0x00000000001F8000ULL /* checksum start. # + of bytes to be + skipped before + csum calc begins. + value must be + even */ +#define TX_DESC_CSUM_START_SHIFT 15 +#define TX_DESC_CSUM_STUFF_MASK 0x000000001FE00000ULL /* checksum stuff. + byte offset w/in + the pkt for the + 1st csum byte. + must be > 8 */ +#define TX_DESC_CSUM_STUFF_SHIFT 21 +#define TX_DESC_CSUM_EN 0x0000000020000000ULL /* enable checksum */ +#define TX_DESC_EOF 0x0000000040000000ULL /* end of frame */ +#define TX_DESC_SOF 0x0000000080000000ULL /* start of frame */ +#define TX_DESC_INTME 0x0000000100000000ULL /* interrupt me */ +#define TX_DESC_NO_CRC 0x0000000200000000ULL /* debugging only. + CRC will not be + inserted into + outgoing frame. */ +struct cas_tx_desc { + __le64 control; + __le64 buffer; +}; + +/* descriptor ring for free buffers contains page-sized buffers. the index + * value is not used by the hw in any way. it's just stored and returned in + * the completion ring. + */ +struct cas_rx_desc { + __le64 index; + __le64 buffer; +}; + +/* received packets are put on the completion ring. */ +/* word 1 */ +#define RX_COMP1_DATA_SIZE_MASK 0x0000000007FFE000ULL +#define RX_COMP1_DATA_SIZE_SHIFT 13 +#define RX_COMP1_DATA_OFF_MASK 0x000001FFF8000000ULL +#define RX_COMP1_DATA_OFF_SHIFT 27 +#define RX_COMP1_DATA_INDEX_MASK 0x007FFE0000000000ULL +#define RX_COMP1_DATA_INDEX_SHIFT 41 +#define RX_COMP1_SKIP_MASK 0x0180000000000000ULL +#define RX_COMP1_SKIP_SHIFT 55 +#define RX_COMP1_RELEASE_NEXT 0x0200000000000000ULL +#define RX_COMP1_SPLIT_PKT 0x0400000000000000ULL +#define RX_COMP1_RELEASE_FLOW 0x0800000000000000ULL +#define RX_COMP1_RELEASE_DATA 0x1000000000000000ULL +#define RX_COMP1_RELEASE_HDR 0x2000000000000000ULL +#define RX_COMP1_TYPE_MASK 0xC000000000000000ULL +#define RX_COMP1_TYPE_SHIFT 62 + +/* word 2 */ +#define RX_COMP2_NEXT_INDEX_MASK 0x00000007FFE00000ULL +#define RX_COMP2_NEXT_INDEX_SHIFT 21 +#define RX_COMP2_HDR_SIZE_MASK 0x00000FF800000000ULL +#define RX_COMP2_HDR_SIZE_SHIFT 35 +#define RX_COMP2_HDR_OFF_MASK 0x0003F00000000000ULL +#define RX_COMP2_HDR_OFF_SHIFT 44 +#define RX_COMP2_HDR_INDEX_MASK 0xFFFC000000000000ULL +#define RX_COMP2_HDR_INDEX_SHIFT 50 + +/* word 3 */ +#define RX_COMP3_SMALL_PKT 0x0000000000000001ULL +#define RX_COMP3_JUMBO_PKT 0x0000000000000002ULL +#define RX_COMP3_JUMBO_HDR_SPLIT_EN 0x0000000000000004ULL +#define RX_COMP3_CSUM_START_MASK 0x000000000007F000ULL +#define RX_COMP3_CSUM_START_SHIFT 12 +#define RX_COMP3_FLOWID_MASK 0x0000000001F80000ULL +#define RX_COMP3_FLOWID_SHIFT 19 +#define RX_COMP3_OPCODE_MASK 0x000000000E000000ULL +#define RX_COMP3_OPCODE_SHIFT 25 +#define RX_COMP3_FORCE_FLAG 0x0000000010000000ULL +#define RX_COMP3_NO_ASSIST 0x0000000020000000ULL +#define RX_COMP3_LOAD_BAL_MASK 0x000001F800000000ULL +#define RX_COMP3_LOAD_BAL_SHIFT 35 +#define RX_PLUS_COMP3_ENC_PKT 0x0000020000000000ULL /* cas+ */ +#define RX_COMP3_L3_HEAD_OFF_MASK 0x0000FE0000000000ULL /* cas */ +#define RX_COMP3_L3_HEAD_OFF_SHIFT 41 +#define RX_PLUS_COMP_L3_HEAD_OFF_MASK 0x0000FC0000000000ULL /* cas+ */ +#define RX_PLUS_COMP_L3_HEAD_OFF_SHIFT 42 +#define RX_COMP3_SAP_MASK 0xFFFF000000000000ULL +#define RX_COMP3_SAP_SHIFT 48 + +/* word 4 */ +#define RX_COMP4_TCP_CSUM_MASK 0x000000000000FFFFULL +#define RX_COMP4_TCP_CSUM_SHIFT 0 +#define RX_COMP4_PKT_LEN_MASK 0x000000003FFF0000ULL +#define RX_COMP4_PKT_LEN_SHIFT 16 +#define RX_COMP4_PERFECT_MATCH_MASK 0x00000003C0000000ULL +#define RX_COMP4_PERFECT_MATCH_SHIFT 30 +#define RX_COMP4_ZERO 0x0000080000000000ULL +#define RX_COMP4_HASH_VAL_MASK 0x0FFFF00000000000ULL +#define RX_COMP4_HASH_VAL_SHIFT 44 +#define RX_COMP4_HASH_PASS 0x1000000000000000ULL +#define RX_COMP4_BAD 0x4000000000000000ULL +#define RX_COMP4_LEN_MISMATCH 0x8000000000000000ULL + +/* we encode the following: ring/index/release. only 14 bits + * are usable. + * NOTE: the encoding is dependent upon RX_DESC_RING_SIZE and + * MAX_RX_DESC_RINGS. */ +#define RX_INDEX_NUM_MASK 0x0000000000000FFFULL +#define RX_INDEX_NUM_SHIFT 0 +#define RX_INDEX_RING_MASK 0x0000000000001000ULL +#define RX_INDEX_RING_SHIFT 12 +#define RX_INDEX_RELEASE 0x0000000000002000ULL + +struct cas_rx_comp { + __le64 word1; + __le64 word2; + __le64 word3; + __le64 word4; +}; + +enum link_state { + link_down = 0, /* No link, will retry */ + link_aneg, /* Autoneg in progress */ + link_force_try, /* Try Forced link speed */ + link_force_ret, /* Forced mode worked, retrying autoneg */ + link_force_ok, /* Stay in forced mode */ + link_up /* Link is up */ +}; + +typedef struct cas_page { + struct list_head list; + struct page *buffer; + dma_addr_t dma_addr; + int used; +} cas_page_t; + + +/* some alignment constraints: + * TX DESC, RX DESC, and RX COMP must each be 8K aligned. + * TX COMPWB must be 8-byte aligned. + * to accomplish this, here's what we do: + * + * INIT_BLOCK_RX_COMP = 64k (already aligned) + * INIT_BLOCK_RX_DESC = 8k + * INIT_BLOCK_TX = 8k + * INIT_BLOCK_RX1_DESC = 8k + * TX COMPWB + */ +#define INIT_BLOCK_TX (TX_DESC_RING_SIZE) +#define INIT_BLOCK_RX_DESC (RX_DESC_RING_SIZE) +#define INIT_BLOCK_RX_COMP (RX_COMP_RING_SIZE) + +struct cas_init_block { + struct cas_rx_comp rxcs[N_RX_COMP_RINGS][INIT_BLOCK_RX_COMP]; + struct cas_rx_desc rxds[N_RX_DESC_RINGS][INIT_BLOCK_RX_DESC]; + struct cas_tx_desc txds[N_TX_RINGS][INIT_BLOCK_TX]; + __le64 tx_compwb; +}; + +/* tiny buffers to deal with target abort issue. we allocate a bit + * over so that we don't have target abort issues with these buffers + * as well. + */ +#define TX_TINY_BUF_LEN 0x100 +#define TX_TINY_BUF_BLOCK ((INIT_BLOCK_TX + 1)*TX_TINY_BUF_LEN) + +struct cas_tiny_count { + int nbufs; + int used; +}; + +struct cas { + spinlock_t lock; /* for most bits */ + spinlock_t tx_lock[N_TX_RINGS]; /* tx bits */ + spinlock_t stat_lock[N_TX_RINGS + 1]; /* for stat gathering */ + spinlock_t rx_inuse_lock; /* rx inuse list */ + spinlock_t rx_spare_lock; /* rx spare list */ + + void __iomem *regs; + int tx_new[N_TX_RINGS], tx_old[N_TX_RINGS]; + int rx_old[N_RX_DESC_RINGS]; + int rx_cur[N_RX_COMP_RINGS], rx_new[N_RX_COMP_RINGS]; + int rx_last[N_RX_DESC_RINGS]; + + struct napi_struct napi; + + /* Set when chip is actually in operational state + * (ie. not power managed) */ + int hw_running; + int opened; + struct mutex pm_mutex; /* open/close/suspend/resume */ + + struct cas_init_block *init_block; + struct cas_tx_desc *init_txds[MAX_TX_RINGS]; + struct cas_rx_desc *init_rxds[MAX_RX_DESC_RINGS]; + struct cas_rx_comp *init_rxcs[MAX_RX_COMP_RINGS]; + + /* we use sk_buffs for tx and pages for rx. the rx skbuffs + * are there for flow re-assembly. */ + struct sk_buff *tx_skbs[N_TX_RINGS][TX_DESC_RING_SIZE]; + struct sk_buff_head rx_flows[N_RX_FLOWS]; + cas_page_t *rx_pages[N_RX_DESC_RINGS][RX_DESC_RING_SIZE]; + struct list_head rx_spare_list, rx_inuse_list; + int rx_spares_needed; + + /* for small packets when copying would be quicker than + mapping */ + struct cas_tiny_count tx_tiny_use[N_TX_RINGS][TX_DESC_RING_SIZE]; + u8 *tx_tiny_bufs[N_TX_RINGS]; + + u32 msg_enable; + + /* N_TX_RINGS must be >= N_RX_DESC_RINGS */ + struct net_device_stats net_stats[N_TX_RINGS + 1]; + + u32 pci_cfg[64 >> 2]; + u8 pci_revision; + + int phy_type; + int phy_addr; + u32 phy_id; +#define CAS_FLAG_1000MB_CAP 0x00000001 +#define CAS_FLAG_REG_PLUS 0x00000002 +#define CAS_FLAG_TARGET_ABORT 0x00000004 +#define CAS_FLAG_SATURN 0x00000008 +#define CAS_FLAG_RXD_POST_MASK 0x000000F0 +#define CAS_FLAG_RXD_POST_SHIFT 4 +#define CAS_FLAG_RXD_POST(x) ((1 << (CAS_FLAG_RXD_POST_SHIFT + (x))) & \ + CAS_FLAG_RXD_POST_MASK) +#define CAS_FLAG_ENTROPY_DEV 0x00000100 +#define CAS_FLAG_NO_HW_CSUM 0x00000200 + u32 cas_flags; + int packet_min; /* minimum packet size */ + int tx_fifo_size; + int rx_fifo_size; + int rx_pause_off; + int rx_pause_on; + int crc_size; /* 4 if half-duplex */ + + int pci_irq_INTC; + int min_frame_size; /* for tx fifo workaround */ + + /* page size allocation */ + int page_size; + int page_order; + int mtu_stride; + + u32 mac_rx_cfg; + + /* Autoneg & PHY control */ + int link_cntl; + int link_fcntl; + enum link_state lstate; + struct timer_list link_timer; + int timer_ticks; + struct work_struct reset_task; +#if 0 + atomic_t reset_task_pending; +#else + atomic_t reset_task_pending; + atomic_t reset_task_pending_mtu; + atomic_t reset_task_pending_spare; + atomic_t reset_task_pending_all; +#endif + + /* Link-down problem workaround */ +#define LINK_TRANSITION_UNKNOWN 0 +#define LINK_TRANSITION_ON_FAILURE 1 +#define LINK_TRANSITION_STILL_FAILED 2 +#define LINK_TRANSITION_LINK_UP 3 +#define LINK_TRANSITION_LINK_CONFIG 4 +#define LINK_TRANSITION_LINK_DOWN 5 +#define LINK_TRANSITION_REQUESTED_RESET 6 + int link_transition; + int link_transition_jiffies_valid; + unsigned long link_transition_jiffies; + + /* Tuning */ + u8 orig_cacheline_size; /* value when loaded */ +#define CAS_PREF_CACHELINE_SIZE 0x20 /* Minimum desired */ + + /* Diagnostic counters and state. */ + int casreg_len; /* reg-space size for dumping */ + u64 pause_entered; + u16 pause_last_time_recvd; + + dma_addr_t block_dvma, tx_tiny_dvma[N_TX_RINGS]; + struct pci_dev *pdev; + struct net_device *dev; +#if defined(CONFIG_OF) + struct device_node *of_node; +#endif + + /* Firmware Info */ + u16 fw_load_addr; + u32 fw_size; + u8 *fw_data; +}; + +#define TX_DESC_NEXT(r, x) (((x) + 1) & (TX_DESC_RINGN_SIZE(r) - 1)) +#define RX_DESC_ENTRY(r, x) ((x) & (RX_DESC_RINGN_SIZE(r) - 1)) +#define RX_COMP_ENTRY(r, x) ((x) & (RX_COMP_RINGN_SIZE(r) - 1)) + +#define TX_BUFF_COUNT(r, x, y) ((x) <= (y) ? ((y) - (x)) : \ + (TX_DESC_RINGN_SIZE(r) - (x) + (y))) + +#define TX_BUFFS_AVAIL(cp, i) ((cp)->tx_old[(i)] <= (cp)->tx_new[(i)] ? \ + (cp)->tx_old[(i)] + (TX_DESC_RINGN_SIZE(i) - 1) - (cp)->tx_new[(i)] : \ + (cp)->tx_old[(i)] - (cp)->tx_new[(i)] - 1) + +#define CAS_ALIGN(addr, align) \ + (((unsigned long) (addr) + ((align) - 1UL)) & ~((align) - 1)) + +#define RX_FIFO_SIZE 16384 +#define EXPANSION_ROM_SIZE 65536 + +#define CAS_MC_EXACT_MATCH_SIZE 15 +#define CAS_MC_HASH_SIZE 256 +#define CAS_MC_HASH_MAX (CAS_MC_EXACT_MATCH_SIZE + \ + CAS_MC_HASH_SIZE) + +#define TX_TARGET_ABORT_LEN 0x20 +#define RX_SWIVEL_OFF_VAL 0x2 +#define RX_AE_FREEN_VAL(x) (RX_DESC_RINGN_SIZE(x) >> 1) +#define RX_AE_COMP_VAL (RX_COMP_RING_SIZE >> 1) +#define RX_BLANK_INTR_PKT_VAL 0x05 +#define RX_BLANK_INTR_TIME_VAL 0x0F +#define HP_TCP_THRESH_VAL 1530 /* reduce to enable reassembly */ + +#define RX_SPARE_COUNT (RX_DESC_RING_SIZE >> 1) +#define RX_SPARE_RECOVER_VAL (RX_SPARE_COUNT >> 2) + +#endif /* _CASSINI_H */ diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c new file mode 100644 index 000000000..0c5842aeb --- /dev/null +++ b/drivers/net/ethernet/sun/niu.c @@ -0,0 +1,10220 @@ +/* niu.c: Neptune ethernet driver. + * + * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "niu.h" + +#define DRV_MODULE_NAME "niu" +#define DRV_MODULE_VERSION "1.1" +#define DRV_MODULE_RELDATE "Apr 22, 2010" + +static char version[] = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); +MODULE_DESCRIPTION("NIU ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +#ifndef readq +static u64 readq(void __iomem *reg) +{ + return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32); +} + +static void writeq(u64 val, void __iomem *reg) +{ + writel(val & 0xffffffff, reg); + writel(val >> 32, reg + 0x4UL); +} +#endif + +static const struct pci_device_id niu_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)}, + {} +}; + +MODULE_DEVICE_TABLE(pci, niu_pci_tbl); + +#define NIU_TX_TIMEOUT (5 * HZ) + +#define nr64(reg) readq(np->regs + (reg)) +#define nw64(reg, val) writeq((val), np->regs + (reg)) + +#define nr64_mac(reg) readq(np->mac_regs + (reg)) +#define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg)) + +#define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg)) +#define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg)) + +#define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg)) +#define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg)) + +#define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg)) +#define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg)) + +#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +static int niu_debug; +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "NIU debug level"); + +#define niu_lock_parent(np, flags) \ + spin_lock_irqsave(&np->parent->lock, flags) +#define niu_unlock_parent(np, flags) \ + spin_unlock_irqrestore(&np->parent->lock, flags) + +static int serdes_init_10g_serdes(struct niu *np); + +static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg, + u64 bits, int limit, int delay) +{ + while (--limit >= 0) { + u64 val = nr64_mac(reg); + + if (!(val & bits)) + break; + udelay(delay); + } + if (limit < 0) + return -ENODEV; + return 0; +} + +static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg, + u64 bits, int limit, int delay, + const char *reg_name) +{ + int err; + + nw64_mac(reg, bits); + err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay); + if (err) + netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", + (unsigned long long)bits, reg_name, + (unsigned long long)nr64_mac(reg)); + return err; +} + +#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ +({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ + __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ +}) + +static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg, + u64 bits, int limit, int delay) +{ + while (--limit >= 0) { + u64 val = nr64_ipp(reg); + + if (!(val & bits)) + break; + udelay(delay); + } + if (limit < 0) + return -ENODEV; + return 0; +} + +static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg, + u64 bits, int limit, int delay, + const char *reg_name) +{ + int err; + u64 val; + + val = nr64_ipp(reg); + val |= bits; + nw64_ipp(reg, val); + + err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay); + if (err) + netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", + (unsigned long long)bits, reg_name, + (unsigned long long)nr64_ipp(reg)); + return err; +} + +#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ +({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ + __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ +}) + +static int __niu_wait_bits_clear(struct niu *np, unsigned long reg, + u64 bits, int limit, int delay) +{ + while (--limit >= 0) { + u64 val = nr64(reg); + + if (!(val & bits)) + break; + udelay(delay); + } + if (limit < 0) + return -ENODEV; + return 0; +} + +#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \ +({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ + __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \ +}) + +static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg, + u64 bits, int limit, int delay, + const char *reg_name) +{ + int err; + + nw64(reg, bits); + err = __niu_wait_bits_clear(np, reg, bits, limit, delay); + if (err) + netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", + (unsigned long long)bits, reg_name, + (unsigned long long)nr64(reg)); + return err; +} + +#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ +({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ + __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ +}) + +static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on) +{ + u64 val = (u64) lp->timer; + + if (on) + val |= LDG_IMGMT_ARM; + + nw64(LDG_IMGMT(lp->ldg_num), val); +} + +static int niu_ldn_irq_enable(struct niu *np, int ldn, int on) +{ + unsigned long mask_reg, bits; + u64 val; + + if (ldn < 0 || ldn > LDN_MAX) + return -EINVAL; + + if (ldn < 64) { + mask_reg = LD_IM0(ldn); + bits = LD_IM0_MASK; + } else { + mask_reg = LD_IM1(ldn - 64); + bits = LD_IM1_MASK; + } + + val = nr64(mask_reg); + if (on) + val &= ~bits; + else + val |= bits; + nw64(mask_reg, val); + + return 0; +} + +static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on) +{ + struct niu_parent *parent = np->parent; + int i; + + for (i = 0; i <= LDN_MAX; i++) { + int err; + + if (parent->ldg_map[i] != lp->ldg_num) + continue; + + err = niu_ldn_irq_enable(np, i, on); + if (err) + return err; + } + return 0; +} + +static int niu_enable_interrupts(struct niu *np, int on) +{ + int i; + + for (i = 0; i < np->num_ldg; i++) { + struct niu_ldg *lp = &np->ldg[i]; + int err; + + err = niu_enable_ldn_in_ldg(np, lp, on); + if (err) + return err; + } + for (i = 0; i < np->num_ldg; i++) + niu_ldg_rearm(np, &np->ldg[i], on); + + return 0; +} + +static u32 phy_encode(u32 type, int port) +{ + return type << (port * 2); +} + +static u32 phy_decode(u32 val, int port) +{ + return (val >> (port * 2)) & PORT_TYPE_MASK; +} + +static int mdio_wait(struct niu *np) +{ + int limit = 1000; + u64 val; + + while (--limit > 0) { + val = nr64(MIF_FRAME_OUTPUT); + if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1) + return val & MIF_FRAME_OUTPUT_DATA; + + udelay(10); + } + + return -ENODEV; +} + +static int mdio_read(struct niu *np, int port, int dev, int reg) +{ + int err; + + nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); + err = mdio_wait(np); + if (err < 0) + return err; + + nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev)); + return mdio_wait(np); +} + +static int mdio_write(struct niu *np, int port, int dev, int reg, int data) +{ + int err; + + nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); + err = mdio_wait(np); + if (err < 0) + return err; + + nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data)); + err = mdio_wait(np); + if (err < 0) + return err; + + return 0; +} + +static int mii_read(struct niu *np, int port, int reg) +{ + nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg)); + return mdio_wait(np); +} + +static int mii_write(struct niu *np, int port, int reg, int data) +{ + int err; + + nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data)); + err = mdio_wait(np); + if (err < 0) + return err; + + return 0; +} + +static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val) +{ + int err; + + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_TX_CFG_L(channel), + val & 0xffff); + if (!err) + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_TX_CFG_H(channel), + val >> 16); + return err; +} + +static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val) +{ + int err; + + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_RX_CFG_L(channel), + val & 0xffff); + if (!err) + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_RX_CFG_H(channel), + val >> 16); + return err; +} + +/* Mode is always 10G fiber. */ +static int serdes_init_niu_10g_fiber(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u32 tx_cfg, rx_cfg; + unsigned long i; + + tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); + rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | + PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | + PLL_RX_CFG_EQ_LP_ADAPTIVE); + + if (lp->loopback_mode == LOOPBACK_PHY) { + u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; + + mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_TEST_CFG_L, test_cfg); + + tx_cfg |= PLL_TX_CFG_ENTEST; + rx_cfg |= PLL_RX_CFG_ENTEST; + } + + /* Initialize all 4 lanes of the SERDES. */ + for (i = 0; i < 4; i++) { + int err = esr2_set_tx_cfg(np, i, tx_cfg); + if (err) + return err; + } + + for (i = 0; i < 4; i++) { + int err = esr2_set_rx_cfg(np, i, rx_cfg); + if (err) + return err; + } + + return 0; +} + +static int serdes_init_niu_1g_serdes(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u16 pll_cfg, pll_sts; + int max_retry = 100; + u64 uninitialized_var(sig), mask, val; + u32 tx_cfg, rx_cfg; + unsigned long i; + int err; + + tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV | + PLL_TX_CFG_RATE_HALF); + rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | + PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | + PLL_RX_CFG_RATE_HALF); + + if (np->port == 0) + rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE; + + if (lp->loopback_mode == LOOPBACK_PHY) { + u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; + + mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_TEST_CFG_L, test_cfg); + + tx_cfg |= PLL_TX_CFG_ENTEST; + rx_cfg |= PLL_RX_CFG_ENTEST; + } + + /* Initialize PLL for 1G */ + pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X); + + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_CFG_L, pll_cfg); + if (err) { + netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", + np->port, __func__); + return err; + } + + pll_sts = PLL_CFG_ENPLL; + + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_STS_L, pll_sts); + if (err) { + netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", + np->port, __func__); + return err; + } + + udelay(200); + + /* Initialize all 4 lanes of the SERDES. */ + for (i = 0; i < 4; i++) { + err = esr2_set_tx_cfg(np, i, tx_cfg); + if (err) + return err; + } + + for (i = 0; i < 4; i++) { + err = esr2_set_rx_cfg(np, i, rx_cfg); + if (err) + return err; + } + + switch (np->port) { + case 0: + val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); + mask = val; + break; + + case 1: + val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); + mask = val; + break; + + default: + return -EINVAL; + } + + while (max_retry--) { + sig = nr64(ESR_INT_SIGNALS); + if ((sig & mask) == val) + break; + + mdelay(500); + } + + if ((sig & mask) != val) { + netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", + np->port, (int)(sig & mask), (int)val); + return -ENODEV; + } + + return 0; +} + +static int serdes_init_niu_10g_serdes(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u32 tx_cfg, rx_cfg, pll_cfg, pll_sts; + int max_retry = 100; + u64 uninitialized_var(sig), mask, val; + unsigned long i; + int err; + + tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); + rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | + PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | + PLL_RX_CFG_EQ_LP_ADAPTIVE); + + if (lp->loopback_mode == LOOPBACK_PHY) { + u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; + + mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_TEST_CFG_L, test_cfg); + + tx_cfg |= PLL_TX_CFG_ENTEST; + rx_cfg |= PLL_RX_CFG_ENTEST; + } + + /* Initialize PLL for 10G */ + pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X); + + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff); + if (err) { + netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", + np->port, __func__); + return err; + } + + pll_sts = PLL_CFG_ENPLL; + + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_STS_L, pll_sts & 0xffff); + if (err) { + netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", + np->port, __func__); + return err; + } + + udelay(200); + + /* Initialize all 4 lanes of the SERDES. */ + for (i = 0; i < 4; i++) { + err = esr2_set_tx_cfg(np, i, tx_cfg); + if (err) + return err; + } + + for (i = 0; i < 4; i++) { + err = esr2_set_rx_cfg(np, i, rx_cfg); + if (err) + return err; + } + + /* check if serdes is ready */ + + switch (np->port) { + case 0: + mask = ESR_INT_SIGNALS_P0_BITS; + val = (ESR_INT_SRDY0_P0 | + ESR_INT_DET0_P0 | + ESR_INT_XSRDY_P0 | + ESR_INT_XDP_P0_CH3 | + ESR_INT_XDP_P0_CH2 | + ESR_INT_XDP_P0_CH1 | + ESR_INT_XDP_P0_CH0); + break; + + case 1: + mask = ESR_INT_SIGNALS_P1_BITS; + val = (ESR_INT_SRDY0_P1 | + ESR_INT_DET0_P1 | + ESR_INT_XSRDY_P1 | + ESR_INT_XDP_P1_CH3 | + ESR_INT_XDP_P1_CH2 | + ESR_INT_XDP_P1_CH1 | + ESR_INT_XDP_P1_CH0); + break; + + default: + return -EINVAL; + } + + while (max_retry--) { + sig = nr64(ESR_INT_SIGNALS); + if ((sig & mask) == val) + break; + + mdelay(500); + } + + if ((sig & mask) != val) { + pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n", + np->port, (int)(sig & mask), (int)val); + + /* 10G failed, try initializing at 1G */ + err = serdes_init_niu_1g_serdes(np); + if (!err) { + np->flags &= ~NIU_FLAGS_10G; + np->mac_xcvr = MAC_XCVR_PCS; + } else { + netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", + np->port); + return -ENODEV; + } + } + return 0; +} + +static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val) +{ + int err; + + err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan)); + if (err >= 0) { + *val = (err & 0xffff); + err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_CTRL_H(chan)); + if (err >= 0) + *val |= ((err & 0xffff) << 16); + err = 0; + } + return err; +} + +static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val) +{ + int err; + + err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, + ESR_GLUE_CTRL0_L(chan)); + if (err >= 0) { + *val = (err & 0xffff); + err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, + ESR_GLUE_CTRL0_H(chan)); + if (err >= 0) { + *val |= ((err & 0xffff) << 16); + err = 0; + } + } + return err; +} + +static int esr_read_reset(struct niu *np, u32 *val) +{ + int err; + + err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_RESET_CTRL_L); + if (err >= 0) { + *val = (err & 0xffff); + err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_RESET_CTRL_H); + if (err >= 0) { + *val |= ((err & 0xffff) << 16); + err = 0; + } + } + return err; +} + +static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val) +{ + int err; + + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_CTRL_L(chan), val & 0xffff); + if (!err) + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_CTRL_H(chan), (val >> 16)); + return err; +} + +static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val) +{ + int err; + + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_GLUE_CTRL0_L(chan), val & 0xffff); + if (!err) + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_GLUE_CTRL0_H(chan), (val >> 16)); + return err; +} + +static int esr_reset(struct niu *np) +{ + u32 uninitialized_var(reset); + int err; + + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_RESET_CTRL_L, 0x0000); + if (err) + return err; + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_RESET_CTRL_H, 0xffff); + if (err) + return err; + udelay(200); + + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_RESET_CTRL_L, 0xffff); + if (err) + return err; + udelay(200); + + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_RESET_CTRL_H, 0x0000); + if (err) + return err; + udelay(200); + + err = esr_read_reset(np, &reset); + if (err) + return err; + if (reset != 0) { + netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n", + np->port, reset); + return -ENODEV; + } + + return 0; +} + +static int serdes_init_10g(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + unsigned long ctrl_reg, test_cfg_reg, i; + u64 ctrl_val, test_cfg_val, sig, mask, val; + int err; + + switch (np->port) { + case 0: + ctrl_reg = ENET_SERDES_0_CTRL_CFG; + test_cfg_reg = ENET_SERDES_0_TEST_CFG; + break; + case 1: + ctrl_reg = ENET_SERDES_1_CTRL_CFG; + test_cfg_reg = ENET_SERDES_1_TEST_CFG; + break; + + default: + return -EINVAL; + } + ctrl_val = (ENET_SERDES_CTRL_SDET_0 | + ENET_SERDES_CTRL_SDET_1 | + ENET_SERDES_CTRL_SDET_2 | + ENET_SERDES_CTRL_SDET_3 | + (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | + (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | + (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | + (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | + (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | + (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | + (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | + (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); + test_cfg_val = 0; + + if (lp->loopback_mode == LOOPBACK_PHY) { + test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << + ENET_SERDES_TEST_MD_0_SHIFT) | + (ENET_TEST_MD_PAD_LOOPBACK << + ENET_SERDES_TEST_MD_1_SHIFT) | + (ENET_TEST_MD_PAD_LOOPBACK << + ENET_SERDES_TEST_MD_2_SHIFT) | + (ENET_TEST_MD_PAD_LOOPBACK << + ENET_SERDES_TEST_MD_3_SHIFT)); + } + + nw64(ctrl_reg, ctrl_val); + nw64(test_cfg_reg, test_cfg_val); + + /* Initialize all 4 lanes of the SERDES. */ + for (i = 0; i < 4; i++) { + u32 rxtx_ctrl, glue0; + + err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); + if (err) + return err; + err = esr_read_glue0(np, i, &glue0); + if (err) + return err; + + rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); + rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | + (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); + + glue0 &= ~(ESR_GLUE_CTRL0_SRATE | + ESR_GLUE_CTRL0_THCNT | + ESR_GLUE_CTRL0_BLTIME); + glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | + (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | + (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | + (BLTIME_300_CYCLES << + ESR_GLUE_CTRL0_BLTIME_SHIFT)); + + err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); + if (err) + return err; + err = esr_write_glue0(np, i, glue0); + if (err) + return err; + } + + err = esr_reset(np); + if (err) + return err; + + sig = nr64(ESR_INT_SIGNALS); + switch (np->port) { + case 0: + mask = ESR_INT_SIGNALS_P0_BITS; + val = (ESR_INT_SRDY0_P0 | + ESR_INT_DET0_P0 | + ESR_INT_XSRDY_P0 | + ESR_INT_XDP_P0_CH3 | + ESR_INT_XDP_P0_CH2 | + ESR_INT_XDP_P0_CH1 | + ESR_INT_XDP_P0_CH0); + break; + + case 1: + mask = ESR_INT_SIGNALS_P1_BITS; + val = (ESR_INT_SRDY0_P1 | + ESR_INT_DET0_P1 | + ESR_INT_XSRDY_P1 | + ESR_INT_XDP_P1_CH3 | + ESR_INT_XDP_P1_CH2 | + ESR_INT_XDP_P1_CH1 | + ESR_INT_XDP_P1_CH0); + break; + + default: + return -EINVAL; + } + + if ((sig & mask) != val) { + if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { + np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; + return 0; + } + netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", + np->port, (int)(sig & mask), (int)val); + return -ENODEV; + } + if (np->flags & NIU_FLAGS_HOTPLUG_PHY) + np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; + return 0; +} + +static int serdes_init_1g(struct niu *np) +{ + u64 val; + + val = nr64(ENET_SERDES_1_PLL_CFG); + val &= ~ENET_SERDES_PLL_FBDIV2; + switch (np->port) { + case 0: + val |= ENET_SERDES_PLL_HRATE0; + break; + case 1: + val |= ENET_SERDES_PLL_HRATE1; + break; + case 2: + val |= ENET_SERDES_PLL_HRATE2; + break; + case 3: + val |= ENET_SERDES_PLL_HRATE3; + break; + default: + return -EINVAL; + } + nw64(ENET_SERDES_1_PLL_CFG, val); + + return 0; +} + +static int serdes_init_1g_serdes(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; + u64 ctrl_val, test_cfg_val, sig, mask, val; + int err; + u64 reset_val, val_rd; + + val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 | + ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 | + ENET_SERDES_PLL_FBDIV0; + switch (np->port) { + case 0: + reset_val = ENET_SERDES_RESET_0; + ctrl_reg = ENET_SERDES_0_CTRL_CFG; + test_cfg_reg = ENET_SERDES_0_TEST_CFG; + pll_cfg = ENET_SERDES_0_PLL_CFG; + break; + case 1: + reset_val = ENET_SERDES_RESET_1; + ctrl_reg = ENET_SERDES_1_CTRL_CFG; + test_cfg_reg = ENET_SERDES_1_TEST_CFG; + pll_cfg = ENET_SERDES_1_PLL_CFG; + break; + + default: + return -EINVAL; + } + ctrl_val = (ENET_SERDES_CTRL_SDET_0 | + ENET_SERDES_CTRL_SDET_1 | + ENET_SERDES_CTRL_SDET_2 | + ENET_SERDES_CTRL_SDET_3 | + (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | + (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | + (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | + (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | + (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | + (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | + (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | + (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); + test_cfg_val = 0; + + if (lp->loopback_mode == LOOPBACK_PHY) { + test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << + ENET_SERDES_TEST_MD_0_SHIFT) | + (ENET_TEST_MD_PAD_LOOPBACK << + ENET_SERDES_TEST_MD_1_SHIFT) | + (ENET_TEST_MD_PAD_LOOPBACK << + ENET_SERDES_TEST_MD_2_SHIFT) | + (ENET_TEST_MD_PAD_LOOPBACK << + ENET_SERDES_TEST_MD_3_SHIFT)); + } + + nw64(ENET_SERDES_RESET, reset_val); + mdelay(20); + val_rd = nr64(ENET_SERDES_RESET); + val_rd &= ~reset_val; + nw64(pll_cfg, val); + nw64(ctrl_reg, ctrl_val); + nw64(test_cfg_reg, test_cfg_val); + nw64(ENET_SERDES_RESET, val_rd); + mdelay(2000); + + /* Initialize all 4 lanes of the SERDES. */ + for (i = 0; i < 4; i++) { + u32 rxtx_ctrl, glue0; + + err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); + if (err) + return err; + err = esr_read_glue0(np, i, &glue0); + if (err) + return err; + + rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); + rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | + (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); + + glue0 &= ~(ESR_GLUE_CTRL0_SRATE | + ESR_GLUE_CTRL0_THCNT | + ESR_GLUE_CTRL0_BLTIME); + glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | + (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | + (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | + (BLTIME_300_CYCLES << + ESR_GLUE_CTRL0_BLTIME_SHIFT)); + + err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); + if (err) + return err; + err = esr_write_glue0(np, i, glue0); + if (err) + return err; + } + + + sig = nr64(ESR_INT_SIGNALS); + switch (np->port) { + case 0: + val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); + mask = val; + break; + + case 1: + val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); + mask = val; + break; + + default: + return -EINVAL; + } + + if ((sig & mask) != val) { + netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", + np->port, (int)(sig & mask), (int)val); + return -ENODEV; + } + + return 0; +} + +static int link_status_1g_serdes(struct niu *np, int *link_up_p) +{ + struct niu_link_config *lp = &np->link_config; + int link_up; + u64 val; + u16 current_speed; + unsigned long flags; + u8 current_duplex; + + link_up = 0; + current_speed = SPEED_INVALID; + current_duplex = DUPLEX_INVALID; + + spin_lock_irqsave(&np->lock, flags); + + val = nr64_pcs(PCS_MII_STAT); + + if (val & PCS_MII_STAT_LINK_STATUS) { + link_up = 1; + current_speed = SPEED_1000; + current_duplex = DUPLEX_FULL; + } + + lp->active_speed = current_speed; + lp->active_duplex = current_duplex; + spin_unlock_irqrestore(&np->lock, flags); + + *link_up_p = link_up; + return 0; +} + +static int link_status_10g_serdes(struct niu *np, int *link_up_p) +{ + unsigned long flags; + struct niu_link_config *lp = &np->link_config; + int link_up = 0; + int link_ok = 1; + u64 val, val2; + u16 current_speed; + u8 current_duplex; + + if (!(np->flags & NIU_FLAGS_10G)) + return link_status_1g_serdes(np, link_up_p); + + current_speed = SPEED_INVALID; + current_duplex = DUPLEX_INVALID; + spin_lock_irqsave(&np->lock, flags); + + val = nr64_xpcs(XPCS_STATUS(0)); + val2 = nr64_mac(XMAC_INTER2); + if (val2 & 0x01000000) + link_ok = 0; + + if ((val & 0x1000ULL) && link_ok) { + link_up = 1; + current_speed = SPEED_10000; + current_duplex = DUPLEX_FULL; + } + lp->active_speed = current_speed; + lp->active_duplex = current_duplex; + spin_unlock_irqrestore(&np->lock, flags); + *link_up_p = link_up; + return 0; +} + +static int link_status_mii(struct niu *np, int *link_up_p) +{ + struct niu_link_config *lp = &np->link_config; + int err; + int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus; + int supported, advertising, active_speed, active_duplex; + + err = mii_read(np, np->phy_addr, MII_BMCR); + if (unlikely(err < 0)) + return err; + bmcr = err; + + err = mii_read(np, np->phy_addr, MII_BMSR); + if (unlikely(err < 0)) + return err; + bmsr = err; + + err = mii_read(np, np->phy_addr, MII_ADVERTISE); + if (unlikely(err < 0)) + return err; + advert = err; + + err = mii_read(np, np->phy_addr, MII_LPA); + if (unlikely(err < 0)) + return err; + lpa = err; + + if (likely(bmsr & BMSR_ESTATEN)) { + err = mii_read(np, np->phy_addr, MII_ESTATUS); + if (unlikely(err < 0)) + return err; + estatus = err; + + err = mii_read(np, np->phy_addr, MII_CTRL1000); + if (unlikely(err < 0)) + return err; + ctrl1000 = err; + + err = mii_read(np, np->phy_addr, MII_STAT1000); + if (unlikely(err < 0)) + return err; + stat1000 = err; + } else + estatus = ctrl1000 = stat1000 = 0; + + supported = 0; + if (bmsr & BMSR_ANEGCAPABLE) + supported |= SUPPORTED_Autoneg; + if (bmsr & BMSR_10HALF) + supported |= SUPPORTED_10baseT_Half; + if (bmsr & BMSR_10FULL) + supported |= SUPPORTED_10baseT_Full; + if (bmsr & BMSR_100HALF) + supported |= SUPPORTED_100baseT_Half; + if (bmsr & BMSR_100FULL) + supported |= SUPPORTED_100baseT_Full; + if (estatus & ESTATUS_1000_THALF) + supported |= SUPPORTED_1000baseT_Half; + if (estatus & ESTATUS_1000_TFULL) + supported |= SUPPORTED_1000baseT_Full; + lp->supported = supported; + + advertising = mii_adv_to_ethtool_adv_t(advert); + advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000); + + if (bmcr & BMCR_ANENABLE) { + int neg, neg1000; + + lp->active_autoneg = 1; + advertising |= ADVERTISED_Autoneg; + + neg = advert & lpa; + neg1000 = (ctrl1000 << 2) & stat1000; + + if (neg1000 & (LPA_1000FULL | LPA_1000HALF)) + active_speed = SPEED_1000; + else if (neg & LPA_100) + active_speed = SPEED_100; + else if (neg & (LPA_10HALF | LPA_10FULL)) + active_speed = SPEED_10; + else + active_speed = SPEED_INVALID; + + if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX)) + active_duplex = DUPLEX_FULL; + else if (active_speed != SPEED_INVALID) + active_duplex = DUPLEX_HALF; + else + active_duplex = DUPLEX_INVALID; + } else { + lp->active_autoneg = 0; + + if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100)) + active_speed = SPEED_1000; + else if (bmcr & BMCR_SPEED100) + active_speed = SPEED_100; + else + active_speed = SPEED_10; + + if (bmcr & BMCR_FULLDPLX) + active_duplex = DUPLEX_FULL; + else + active_duplex = DUPLEX_HALF; + } + + lp->active_advertising = advertising; + lp->active_speed = active_speed; + lp->active_duplex = active_duplex; + *link_up_p = !!(bmsr & BMSR_LSTATUS); + + return 0; +} + +static int link_status_1g_rgmii(struct niu *np, int *link_up_p) +{ + struct niu_link_config *lp = &np->link_config; + u16 current_speed, bmsr; + unsigned long flags; + u8 current_duplex; + int err, link_up; + + link_up = 0; + current_speed = SPEED_INVALID; + current_duplex = DUPLEX_INVALID; + + spin_lock_irqsave(&np->lock, flags); + + err = -EINVAL; + + err = mii_read(np, np->phy_addr, MII_BMSR); + if (err < 0) + goto out; + + bmsr = err; + if (bmsr & BMSR_LSTATUS) { + u16 adv, lpa; + + err = mii_read(np, np->phy_addr, MII_ADVERTISE); + if (err < 0) + goto out; + adv = err; + + err = mii_read(np, np->phy_addr, MII_LPA); + if (err < 0) + goto out; + lpa = err; + + err = mii_read(np, np->phy_addr, MII_ESTATUS); + if (err < 0) + goto out; + link_up = 1; + current_speed = SPEED_1000; + current_duplex = DUPLEX_FULL; + + } + lp->active_speed = current_speed; + lp->active_duplex = current_duplex; + err = 0; + +out: + spin_unlock_irqrestore(&np->lock, flags); + + *link_up_p = link_up; + return err; +} + +static int link_status_1g(struct niu *np, int *link_up_p) +{ + struct niu_link_config *lp = &np->link_config; + unsigned long flags; + int err; + + spin_lock_irqsave(&np->lock, flags); + + err = link_status_mii(np, link_up_p); + lp->supported |= SUPPORTED_TP; + lp->active_advertising |= ADVERTISED_TP; + + spin_unlock_irqrestore(&np->lock, flags); + return err; +} + +static int bcm8704_reset(struct niu *np) +{ + int err, limit; + + err = mdio_read(np, np->phy_addr, + BCM8704_PHYXS_DEV_ADDR, MII_BMCR); + if (err < 0 || err == 0xffff) + return err; + err |= BMCR_RESET; + err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, + MII_BMCR, err); + if (err) + return err; + + limit = 1000; + while (--limit >= 0) { + err = mdio_read(np, np->phy_addr, + BCM8704_PHYXS_DEV_ADDR, MII_BMCR); + if (err < 0) + return err; + if (!(err & BMCR_RESET)) + break; + } + if (limit < 0) { + netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n", + np->port, (err & 0xffff)); + return -ENODEV; + } + return 0; +} + +/* When written, certain PHY registers need to be read back twice + * in order for the bits to settle properly. + */ +static int bcm8704_user_dev3_readback(struct niu *np, int reg) +{ + int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); + if (err < 0) + return err; + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); + if (err < 0) + return err; + return 0; +} + +static int bcm8706_init_user_dev3(struct niu *np) +{ + int err; + + + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, + BCM8704_USER_OPT_DIGITAL_CTRL); + if (err < 0) + return err; + err &= ~USER_ODIG_CTRL_GPIOS; + err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); + err |= USER_ODIG_CTRL_RESV2; + err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, + BCM8704_USER_OPT_DIGITAL_CTRL, err); + if (err) + return err; + + mdelay(1000); + + return 0; +} + +static int bcm8704_init_user_dev3(struct niu *np) +{ + int err; + + err = mdio_write(np, np->phy_addr, + BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL, + (USER_CONTROL_OPTXRST_LVL | + USER_CONTROL_OPBIASFLT_LVL | + USER_CONTROL_OBTMPFLT_LVL | + USER_CONTROL_OPPRFLT_LVL | + USER_CONTROL_OPTXFLT_LVL | + USER_CONTROL_OPRXLOS_LVL | + USER_CONTROL_OPRXFLT_LVL | + USER_CONTROL_OPTXON_LVL | + (0x3f << USER_CONTROL_RES1_SHIFT))); + if (err) + return err; + + err = mdio_write(np, np->phy_addr, + BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL, + (USER_PMD_TX_CTL_XFP_CLKEN | + (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) | + (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) | + USER_PMD_TX_CTL_TSCK_LPWREN)); + if (err) + return err; + + err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL); + if (err) + return err; + err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL); + if (err) + return err; + + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, + BCM8704_USER_OPT_DIGITAL_CTRL); + if (err < 0) + return err; + err &= ~USER_ODIG_CTRL_GPIOS; + err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); + err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, + BCM8704_USER_OPT_DIGITAL_CTRL, err); + if (err) + return err; + + mdelay(1000); + + return 0; +} + +static int mrvl88x2011_act_led(struct niu *np, int val) +{ + int err; + + err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, + MRVL88X2011_LED_8_TO_11_CTL); + if (err < 0) + return err; + + err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK); + err |= MRVL88X2011_LED(MRVL88X2011_LED_ACT,val); + + return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, + MRVL88X2011_LED_8_TO_11_CTL, err); +} + +static int mrvl88x2011_led_blink_rate(struct niu *np, int rate) +{ + int err; + + err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, + MRVL88X2011_LED_BLINK_CTL); + if (err >= 0) { + err &= ~MRVL88X2011_LED_BLKRATE_MASK; + err |= (rate << 4); + + err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, + MRVL88X2011_LED_BLINK_CTL, err); + } + + return err; +} + +static int xcvr_init_10g_mrvl88x2011(struct niu *np) +{ + int err; + + /* Set LED functions */ + err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS); + if (err) + return err; + + /* led activity */ + err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF); + if (err) + return err; + + err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, + MRVL88X2011_GENERAL_CTL); + if (err < 0) + return err; + + err |= MRVL88X2011_ENA_XFPREFCLK; + + err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, + MRVL88X2011_GENERAL_CTL, err); + if (err < 0) + return err; + + err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, + MRVL88X2011_PMA_PMD_CTL_1); + if (err < 0) + return err; + + if (np->link_config.loopback_mode == LOOPBACK_MAC) + err |= MRVL88X2011_LOOPBACK; + else + err &= ~MRVL88X2011_LOOPBACK; + + err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, + MRVL88X2011_PMA_PMD_CTL_1, err); + if (err < 0) + return err; + + /* Enable PMD */ + return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, + MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX); +} + + +static int xcvr_diag_bcm870x(struct niu *np) +{ + u16 analog_stat0, tx_alarm_status; + int err = 0; + +#if 1 + err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, + MII_STAT1000); + if (err < 0) + return err; + pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err); + + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20); + if (err < 0) + return err; + pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err); + + err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, + MII_NWAYTEST); + if (err < 0) + return err; + pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err); +#endif + + /* XXX dig this out it might not be so useful XXX */ + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, + BCM8704_USER_ANALOG_STATUS0); + if (err < 0) + return err; + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, + BCM8704_USER_ANALOG_STATUS0); + if (err < 0) + return err; + analog_stat0 = err; + + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, + BCM8704_USER_TX_ALARM_STATUS); + if (err < 0) + return err; + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, + BCM8704_USER_TX_ALARM_STATUS); + if (err < 0) + return err; + tx_alarm_status = err; + + if (analog_stat0 != 0x03fc) { + if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) { + pr_info("Port %u cable not connected or bad cable\n", + np->port); + } else if (analog_stat0 == 0x639c) { + pr_info("Port %u optical module is bad or missing\n", + np->port); + } + } + + return 0; +} + +static int xcvr_10g_set_lb_bcm870x(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + int err; + + err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, + MII_BMCR); + if (err < 0) + return err; + + err &= ~BMCR_LOOPBACK; + + if (lp->loopback_mode == LOOPBACK_MAC) + err |= BMCR_LOOPBACK; + + err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, + MII_BMCR, err); + if (err) + return err; + + return 0; +} + +static int xcvr_init_10g_bcm8706(struct niu *np) +{ + int err = 0; + u64 val; + + if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) && + (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0) + return err; + + val = nr64_mac(XMAC_CONFIG); + val &= ~XMAC_CONFIG_LED_POLARITY; + val |= XMAC_CONFIG_FORCE_LED_ON; + nw64_mac(XMAC_CONFIG, val); + + val = nr64(MIF_CONFIG); + val |= MIF_CONFIG_INDIRECT_MODE; + nw64(MIF_CONFIG, val); + + err = bcm8704_reset(np); + if (err) + return err; + + err = xcvr_10g_set_lb_bcm870x(np); + if (err) + return err; + + err = bcm8706_init_user_dev3(np); + if (err) + return err; + + err = xcvr_diag_bcm870x(np); + if (err) + return err; + + return 0; +} + +static int xcvr_init_10g_bcm8704(struct niu *np) +{ + int err; + + err = bcm8704_reset(np); + if (err) + return err; + + err = bcm8704_init_user_dev3(np); + if (err) + return err; + + err = xcvr_10g_set_lb_bcm870x(np); + if (err) + return err; + + err = xcvr_diag_bcm870x(np); + if (err) + return err; + + return 0; +} + +static int xcvr_init_10g(struct niu *np) +{ + int phy_id, err; + u64 val; + + val = nr64_mac(XMAC_CONFIG); + val &= ~XMAC_CONFIG_LED_POLARITY; + val |= XMAC_CONFIG_FORCE_LED_ON; + nw64_mac(XMAC_CONFIG, val); + + /* XXX shared resource, lock parent XXX */ + val = nr64(MIF_CONFIG); + val |= MIF_CONFIG_INDIRECT_MODE; + nw64(MIF_CONFIG, val); + + phy_id = phy_decode(np->parent->port_phy, np->port); + phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; + + /* handle different phy types */ + switch (phy_id & NIU_PHY_ID_MASK) { + case NIU_PHY_ID_MRVL88X2011: + err = xcvr_init_10g_mrvl88x2011(np); + break; + + default: /* bcom 8704 */ + err = xcvr_init_10g_bcm8704(np); + break; + } + + return err; +} + +static int mii_reset(struct niu *np) +{ + int limit, err; + + err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET); + if (err) + return err; + + limit = 1000; + while (--limit >= 0) { + udelay(500); + err = mii_read(np, np->phy_addr, MII_BMCR); + if (err < 0) + return err; + if (!(err & BMCR_RESET)) + break; + } + if (limit < 0) { + netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n", + np->port, err); + return -ENODEV; + } + + return 0; +} + +static int xcvr_init_1g_rgmii(struct niu *np) +{ + int err; + u64 val; + u16 bmcr, bmsr, estat; + + val = nr64(MIF_CONFIG); + val &= ~MIF_CONFIG_INDIRECT_MODE; + nw64(MIF_CONFIG, val); + + err = mii_reset(np); + if (err) + return err; + + err = mii_read(np, np->phy_addr, MII_BMSR); + if (err < 0) + return err; + bmsr = err; + + estat = 0; + if (bmsr & BMSR_ESTATEN) { + err = mii_read(np, np->phy_addr, MII_ESTATUS); + if (err < 0) + return err; + estat = err; + } + + bmcr = 0; + err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); + if (err) + return err; + + if (bmsr & BMSR_ESTATEN) { + u16 ctrl1000 = 0; + + if (estat & ESTATUS_1000_TFULL) + ctrl1000 |= ADVERTISE_1000FULL; + err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); + if (err) + return err; + } + + bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX); + + err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); + if (err) + return err; + + err = mii_read(np, np->phy_addr, MII_BMCR); + if (err < 0) + return err; + bmcr = mii_read(np, np->phy_addr, MII_BMCR); + + err = mii_read(np, np->phy_addr, MII_BMSR); + if (err < 0) + return err; + + return 0; +} + +static int mii_init_common(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u16 bmcr, bmsr, adv, estat; + int err; + + err = mii_reset(np); + if (err) + return err; + + err = mii_read(np, np->phy_addr, MII_BMSR); + if (err < 0) + return err; + bmsr = err; + + estat = 0; + if (bmsr & BMSR_ESTATEN) { + err = mii_read(np, np->phy_addr, MII_ESTATUS); + if (err < 0) + return err; + estat = err; + } + + bmcr = 0; + err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); + if (err) + return err; + + if (lp->loopback_mode == LOOPBACK_MAC) { + bmcr |= BMCR_LOOPBACK; + if (lp->active_speed == SPEED_1000) + bmcr |= BMCR_SPEED1000; + if (lp->active_duplex == DUPLEX_FULL) + bmcr |= BMCR_FULLDPLX; + } + + if (lp->loopback_mode == LOOPBACK_PHY) { + u16 aux; + + aux = (BCM5464R_AUX_CTL_EXT_LB | + BCM5464R_AUX_CTL_WRITE_1); + err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux); + if (err) + return err; + } + + if (lp->autoneg) { + u16 ctrl1000; + + adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; + if ((bmsr & BMSR_10HALF) && + (lp->advertising & ADVERTISED_10baseT_Half)) + adv |= ADVERTISE_10HALF; + if ((bmsr & BMSR_10FULL) && + (lp->advertising & ADVERTISED_10baseT_Full)) + adv |= ADVERTISE_10FULL; + if ((bmsr & BMSR_100HALF) && + (lp->advertising & ADVERTISED_100baseT_Half)) + adv |= ADVERTISE_100HALF; + if ((bmsr & BMSR_100FULL) && + (lp->advertising & ADVERTISED_100baseT_Full)) + adv |= ADVERTISE_100FULL; + err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv); + if (err) + return err; + + if (likely(bmsr & BMSR_ESTATEN)) { + ctrl1000 = 0; + if ((estat & ESTATUS_1000_THALF) && + (lp->advertising & ADVERTISED_1000baseT_Half)) + ctrl1000 |= ADVERTISE_1000HALF; + if ((estat & ESTATUS_1000_TFULL) && + (lp->advertising & ADVERTISED_1000baseT_Full)) + ctrl1000 |= ADVERTISE_1000FULL; + err = mii_write(np, np->phy_addr, + MII_CTRL1000, ctrl1000); + if (err) + return err; + } + + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + } else { + /* !lp->autoneg */ + int fulldpx; + + if (lp->duplex == DUPLEX_FULL) { + bmcr |= BMCR_FULLDPLX; + fulldpx = 1; + } else if (lp->duplex == DUPLEX_HALF) + fulldpx = 0; + else + return -EINVAL; + + if (lp->speed == SPEED_1000) { + /* if X-full requested while not supported, or + X-half requested while not supported... */ + if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) || + (!fulldpx && !(estat & ESTATUS_1000_THALF))) + return -EINVAL; + bmcr |= BMCR_SPEED1000; + } else if (lp->speed == SPEED_100) { + if ((fulldpx && !(bmsr & BMSR_100FULL)) || + (!fulldpx && !(bmsr & BMSR_100HALF))) + return -EINVAL; + bmcr |= BMCR_SPEED100; + } else if (lp->speed == SPEED_10) { + if ((fulldpx && !(bmsr & BMSR_10FULL)) || + (!fulldpx && !(bmsr & BMSR_10HALF))) + return -EINVAL; + } else + return -EINVAL; + } + + err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); + if (err) + return err; + +#if 0 + err = mii_read(np, np->phy_addr, MII_BMCR); + if (err < 0) + return err; + bmcr = err; + + err = mii_read(np, np->phy_addr, MII_BMSR); + if (err < 0) + return err; + bmsr = err; + + pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n", + np->port, bmcr, bmsr); +#endif + + return 0; +} + +static int xcvr_init_1g(struct niu *np) +{ + u64 val; + + /* XXX shared resource, lock parent XXX */ + val = nr64(MIF_CONFIG); + val &= ~MIF_CONFIG_INDIRECT_MODE; + nw64(MIF_CONFIG, val); + + return mii_init_common(np); +} + +static int niu_xcvr_init(struct niu *np) +{ + const struct niu_phy_ops *ops = np->phy_ops; + int err; + + err = 0; + if (ops->xcvr_init) + err = ops->xcvr_init(np); + + return err; +} + +static int niu_serdes_init(struct niu *np) +{ + const struct niu_phy_ops *ops = np->phy_ops; + int err; + + err = 0; + if (ops->serdes_init) + err = ops->serdes_init(np); + + return err; +} + +static void niu_init_xif(struct niu *); +static void niu_handle_led(struct niu *, int status); + +static int niu_link_status_common(struct niu *np, int link_up) +{ + struct niu_link_config *lp = &np->link_config; + struct net_device *dev = np->dev; + unsigned long flags; + + if (!netif_carrier_ok(dev) && link_up) { + netif_info(np, link, dev, "Link is up at %s, %s duplex\n", + lp->active_speed == SPEED_10000 ? "10Gb/sec" : + lp->active_speed == SPEED_1000 ? "1Gb/sec" : + lp->active_speed == SPEED_100 ? "100Mbit/sec" : + "10Mbit/sec", + lp->active_duplex == DUPLEX_FULL ? "full" : "half"); + + spin_lock_irqsave(&np->lock, flags); + niu_init_xif(np); + niu_handle_led(np, 1); + spin_unlock_irqrestore(&np->lock, flags); + + netif_carrier_on(dev); + } else if (netif_carrier_ok(dev) && !link_up) { + netif_warn(np, link, dev, "Link is down\n"); + spin_lock_irqsave(&np->lock, flags); + niu_handle_led(np, 0); + spin_unlock_irqrestore(&np->lock, flags); + netif_carrier_off(dev); + } + + return 0; +} + +static int link_status_10g_mrvl(struct niu *np, int *link_up_p) +{ + int err, link_up, pma_status, pcs_status; + + link_up = 0; + + err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, + MRVL88X2011_10G_PMD_STATUS_2); + if (err < 0) + goto out; + + /* Check PMA/PMD Register: 1.0001.2 == 1 */ + err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, + MRVL88X2011_PMA_PMD_STATUS_1); + if (err < 0) + goto out; + + pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); + + /* Check PMC Register : 3.0001.2 == 1: read twice */ + err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, + MRVL88X2011_PMA_PMD_STATUS_1); + if (err < 0) + goto out; + + err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, + MRVL88X2011_PMA_PMD_STATUS_1); + if (err < 0) + goto out; + + pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); + + /* Check XGXS Register : 4.0018.[0-3,12] */ + err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR, + MRVL88X2011_10G_XGXS_LANE_STAT); + if (err < 0) + goto out; + + if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 | + PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 | + PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC | + 0x800)) + link_up = (pma_status && pcs_status) ? 1 : 0; + + np->link_config.active_speed = SPEED_10000; + np->link_config.active_duplex = DUPLEX_FULL; + err = 0; +out: + mrvl88x2011_act_led(np, (link_up ? + MRVL88X2011_LED_CTL_PCS_ACT : + MRVL88X2011_LED_CTL_OFF)); + + *link_up_p = link_up; + return err; +} + +static int link_status_10g_bcm8706(struct niu *np, int *link_up_p) +{ + int err, link_up; + link_up = 0; + + err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, + BCM8704_PMD_RCV_SIGDET); + if (err < 0 || err == 0xffff) + goto out; + if (!(err & PMD_RCV_SIGDET_GLOBAL)) { + err = 0; + goto out; + } + + err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, + BCM8704_PCS_10G_R_STATUS); + if (err < 0) + goto out; + + if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { + err = 0; + goto out; + } + + err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, + BCM8704_PHYXS_XGXS_LANE_STAT); + if (err < 0) + goto out; + if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | + PHYXS_XGXS_LANE_STAT_MAGIC | + PHYXS_XGXS_LANE_STAT_PATTEST | + PHYXS_XGXS_LANE_STAT_LANE3 | + PHYXS_XGXS_LANE_STAT_LANE2 | + PHYXS_XGXS_LANE_STAT_LANE1 | + PHYXS_XGXS_LANE_STAT_LANE0)) { + err = 0; + np->link_config.active_speed = SPEED_INVALID; + np->link_config.active_duplex = DUPLEX_INVALID; + goto out; + } + + link_up = 1; + np->link_config.active_speed = SPEED_10000; + np->link_config.active_duplex = DUPLEX_FULL; + err = 0; + +out: + *link_up_p = link_up; + return err; +} + +static int link_status_10g_bcom(struct niu *np, int *link_up_p) +{ + int err, link_up; + + link_up = 0; + + err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, + BCM8704_PMD_RCV_SIGDET); + if (err < 0) + goto out; + if (!(err & PMD_RCV_SIGDET_GLOBAL)) { + err = 0; + goto out; + } + + err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, + BCM8704_PCS_10G_R_STATUS); + if (err < 0) + goto out; + if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { + err = 0; + goto out; + } + + err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, + BCM8704_PHYXS_XGXS_LANE_STAT); + if (err < 0) + goto out; + + if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | + PHYXS_XGXS_LANE_STAT_MAGIC | + PHYXS_XGXS_LANE_STAT_LANE3 | + PHYXS_XGXS_LANE_STAT_LANE2 | + PHYXS_XGXS_LANE_STAT_LANE1 | + PHYXS_XGXS_LANE_STAT_LANE0)) { + err = 0; + goto out; + } + + link_up = 1; + np->link_config.active_speed = SPEED_10000; + np->link_config.active_duplex = DUPLEX_FULL; + err = 0; + +out: + *link_up_p = link_up; + return err; +} + +static int link_status_10g(struct niu *np, int *link_up_p) +{ + unsigned long flags; + int err = -EINVAL; + + spin_lock_irqsave(&np->lock, flags); + + if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { + int phy_id; + + phy_id = phy_decode(np->parent->port_phy, np->port); + phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; + + /* handle different phy types */ + switch (phy_id & NIU_PHY_ID_MASK) { + case NIU_PHY_ID_MRVL88X2011: + err = link_status_10g_mrvl(np, link_up_p); + break; + + default: /* bcom 8704 */ + err = link_status_10g_bcom(np, link_up_p); + break; + } + } + + spin_unlock_irqrestore(&np->lock, flags); + + return err; +} + +static int niu_10g_phy_present(struct niu *np) +{ + u64 sig, mask, val; + + sig = nr64(ESR_INT_SIGNALS); + switch (np->port) { + case 0: + mask = ESR_INT_SIGNALS_P0_BITS; + val = (ESR_INT_SRDY0_P0 | + ESR_INT_DET0_P0 | + ESR_INT_XSRDY_P0 | + ESR_INT_XDP_P0_CH3 | + ESR_INT_XDP_P0_CH2 | + ESR_INT_XDP_P0_CH1 | + ESR_INT_XDP_P0_CH0); + break; + + case 1: + mask = ESR_INT_SIGNALS_P1_BITS; + val = (ESR_INT_SRDY0_P1 | + ESR_INT_DET0_P1 | + ESR_INT_XSRDY_P1 | + ESR_INT_XDP_P1_CH3 | + ESR_INT_XDP_P1_CH2 | + ESR_INT_XDP_P1_CH1 | + ESR_INT_XDP_P1_CH0); + break; + + default: + return 0; + } + + if ((sig & mask) != val) + return 0; + return 1; +} + +static int link_status_10g_hotplug(struct niu *np, int *link_up_p) +{ + unsigned long flags; + int err = 0; + int phy_present; + int phy_present_prev; + + spin_lock_irqsave(&np->lock, flags); + + if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { + phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ? + 1 : 0; + phy_present = niu_10g_phy_present(np); + if (phy_present != phy_present_prev) { + /* state change */ + if (phy_present) { + /* A NEM was just plugged in */ + np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; + if (np->phy_ops->xcvr_init) + err = np->phy_ops->xcvr_init(np); + if (err) { + err = mdio_read(np, np->phy_addr, + BCM8704_PHYXS_DEV_ADDR, MII_BMCR); + if (err == 0xffff) { + /* No mdio, back-to-back XAUI */ + goto out; + } + /* debounce */ + np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; + } + } else { + np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; + *link_up_p = 0; + netif_warn(np, link, np->dev, + "Hotplug PHY Removed\n"); + } + } +out: + if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) { + err = link_status_10g_bcm8706(np, link_up_p); + if (err == 0xffff) { + /* No mdio, back-to-back XAUI: it is C10NEM */ + *link_up_p = 1; + np->link_config.active_speed = SPEED_10000; + np->link_config.active_duplex = DUPLEX_FULL; + } + } + } + + spin_unlock_irqrestore(&np->lock, flags); + + return 0; +} + +static int niu_link_status(struct niu *np, int *link_up_p) +{ + const struct niu_phy_ops *ops = np->phy_ops; + int err; + + err = 0; + if (ops->link_status) + err = ops->link_status(np, link_up_p); + + return err; +} + +static void niu_timer(unsigned long __opaque) +{ + struct niu *np = (struct niu *) __opaque; + unsigned long off; + int err, link_up; + + err = niu_link_status(np, &link_up); + if (!err) + niu_link_status_common(np, link_up); + + if (netif_carrier_ok(np->dev)) + off = 5 * HZ; + else + off = 1 * HZ; + np->timer.expires = jiffies + off; + + add_timer(&np->timer); +} + +static const struct niu_phy_ops phy_ops_10g_serdes = { + .serdes_init = serdes_init_10g_serdes, + .link_status = link_status_10g_serdes, +}; + +static const struct niu_phy_ops phy_ops_10g_serdes_niu = { + .serdes_init = serdes_init_niu_10g_serdes, + .link_status = link_status_10g_serdes, +}; + +static const struct niu_phy_ops phy_ops_1g_serdes_niu = { + .serdes_init = serdes_init_niu_1g_serdes, + .link_status = link_status_1g_serdes, +}; + +static const struct niu_phy_ops phy_ops_1g_rgmii = { + .xcvr_init = xcvr_init_1g_rgmii, + .link_status = link_status_1g_rgmii, +}; + +static const struct niu_phy_ops phy_ops_10g_fiber_niu = { + .serdes_init = serdes_init_niu_10g_fiber, + .xcvr_init = xcvr_init_10g, + .link_status = link_status_10g, +}; + +static const struct niu_phy_ops phy_ops_10g_fiber = { + .serdes_init = serdes_init_10g, + .xcvr_init = xcvr_init_10g, + .link_status = link_status_10g, +}; + +static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = { + .serdes_init = serdes_init_10g, + .xcvr_init = xcvr_init_10g_bcm8706, + .link_status = link_status_10g_hotplug, +}; + +static const struct niu_phy_ops phy_ops_niu_10g_hotplug = { + .serdes_init = serdes_init_niu_10g_fiber, + .xcvr_init = xcvr_init_10g_bcm8706, + .link_status = link_status_10g_hotplug, +}; + +static const struct niu_phy_ops phy_ops_10g_copper = { + .serdes_init = serdes_init_10g, + .link_status = link_status_10g, /* XXX */ +}; + +static const struct niu_phy_ops phy_ops_1g_fiber = { + .serdes_init = serdes_init_1g, + .xcvr_init = xcvr_init_1g, + .link_status = link_status_1g, +}; + +static const struct niu_phy_ops phy_ops_1g_copper = { + .xcvr_init = xcvr_init_1g, + .link_status = link_status_1g, +}; + +struct niu_phy_template { + const struct niu_phy_ops *ops; + u32 phy_addr_base; +}; + +static const struct niu_phy_template phy_template_niu_10g_fiber = { + .ops = &phy_ops_10g_fiber_niu, + .phy_addr_base = 16, +}; + +static const struct niu_phy_template phy_template_niu_10g_serdes = { + .ops = &phy_ops_10g_serdes_niu, + .phy_addr_base = 0, +}; + +static const struct niu_phy_template phy_template_niu_1g_serdes = { + .ops = &phy_ops_1g_serdes_niu, + .phy_addr_base = 0, +}; + +static const struct niu_phy_template phy_template_10g_fiber = { + .ops = &phy_ops_10g_fiber, + .phy_addr_base = 8, +}; + +static const struct niu_phy_template phy_template_10g_fiber_hotplug = { + .ops = &phy_ops_10g_fiber_hotplug, + .phy_addr_base = 8, +}; + +static const struct niu_phy_template phy_template_niu_10g_hotplug = { + .ops = &phy_ops_niu_10g_hotplug, + .phy_addr_base = 8, +}; + +static const struct niu_phy_template phy_template_10g_copper = { + .ops = &phy_ops_10g_copper, + .phy_addr_base = 10, +}; + +static const struct niu_phy_template phy_template_1g_fiber = { + .ops = &phy_ops_1g_fiber, + .phy_addr_base = 0, +}; + +static const struct niu_phy_template phy_template_1g_copper = { + .ops = &phy_ops_1g_copper, + .phy_addr_base = 0, +}; + +static const struct niu_phy_template phy_template_1g_rgmii = { + .ops = &phy_ops_1g_rgmii, + .phy_addr_base = 0, +}; + +static const struct niu_phy_template phy_template_10g_serdes = { + .ops = &phy_ops_10g_serdes, + .phy_addr_base = 0, +}; + +static int niu_atca_port_num[4] = { + 0, 0, 11, 10 +}; + +static int serdes_init_10g_serdes(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; + u64 ctrl_val, test_cfg_val, sig, mask, val; + + switch (np->port) { + case 0: + ctrl_reg = ENET_SERDES_0_CTRL_CFG; + test_cfg_reg = ENET_SERDES_0_TEST_CFG; + pll_cfg = ENET_SERDES_0_PLL_CFG; + break; + case 1: + ctrl_reg = ENET_SERDES_1_CTRL_CFG; + test_cfg_reg = ENET_SERDES_1_TEST_CFG; + pll_cfg = ENET_SERDES_1_PLL_CFG; + break; + + default: + return -EINVAL; + } + ctrl_val = (ENET_SERDES_CTRL_SDET_0 | + ENET_SERDES_CTRL_SDET_1 | + ENET_SERDES_CTRL_SDET_2 | + ENET_SERDES_CTRL_SDET_3 | + (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | + (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | + (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | + (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | + (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | + (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | + (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | + (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); + test_cfg_val = 0; + + if (lp->loopback_mode == LOOPBACK_PHY) { + test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << + ENET_SERDES_TEST_MD_0_SHIFT) | + (ENET_TEST_MD_PAD_LOOPBACK << + ENET_SERDES_TEST_MD_1_SHIFT) | + (ENET_TEST_MD_PAD_LOOPBACK << + ENET_SERDES_TEST_MD_2_SHIFT) | + (ENET_TEST_MD_PAD_LOOPBACK << + ENET_SERDES_TEST_MD_3_SHIFT)); + } + + esr_reset(np); + nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2); + nw64(ctrl_reg, ctrl_val); + nw64(test_cfg_reg, test_cfg_val); + + /* Initialize all 4 lanes of the SERDES. */ + for (i = 0; i < 4; i++) { + u32 rxtx_ctrl, glue0; + int err; + + err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); + if (err) + return err; + err = esr_read_glue0(np, i, &glue0); + if (err) + return err; + + rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); + rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | + (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); + + glue0 &= ~(ESR_GLUE_CTRL0_SRATE | + ESR_GLUE_CTRL0_THCNT | + ESR_GLUE_CTRL0_BLTIME); + glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | + (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | + (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | + (BLTIME_300_CYCLES << + ESR_GLUE_CTRL0_BLTIME_SHIFT)); + + err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); + if (err) + return err; + err = esr_write_glue0(np, i, glue0); + if (err) + return err; + } + + + sig = nr64(ESR_INT_SIGNALS); + switch (np->port) { + case 0: + mask = ESR_INT_SIGNALS_P0_BITS; + val = (ESR_INT_SRDY0_P0 | + ESR_INT_DET0_P0 | + ESR_INT_XSRDY_P0 | + ESR_INT_XDP_P0_CH3 | + ESR_INT_XDP_P0_CH2 | + ESR_INT_XDP_P0_CH1 | + ESR_INT_XDP_P0_CH0); + break; + + case 1: + mask = ESR_INT_SIGNALS_P1_BITS; + val = (ESR_INT_SRDY0_P1 | + ESR_INT_DET0_P1 | + ESR_INT_XSRDY_P1 | + ESR_INT_XDP_P1_CH3 | + ESR_INT_XDP_P1_CH2 | + ESR_INT_XDP_P1_CH1 | + ESR_INT_XDP_P1_CH0); + break; + + default: + return -EINVAL; + } + + if ((sig & mask) != val) { + int err; + err = serdes_init_1g_serdes(np); + if (!err) { + np->flags &= ~NIU_FLAGS_10G; + np->mac_xcvr = MAC_XCVR_PCS; + } else { + netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", + np->port); + return -ENODEV; + } + } + + return 0; +} + +static int niu_determine_phy_disposition(struct niu *np) +{ + struct niu_parent *parent = np->parent; + u8 plat_type = parent->plat_type; + const struct niu_phy_template *tp; + u32 phy_addr_off = 0; + + if (plat_type == PLAT_TYPE_NIU) { + switch (np->flags & + (NIU_FLAGS_10G | + NIU_FLAGS_FIBER | + NIU_FLAGS_XCVR_SERDES)) { + case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: + /* 10G Serdes */ + tp = &phy_template_niu_10g_serdes; + break; + case NIU_FLAGS_XCVR_SERDES: + /* 1G Serdes */ + tp = &phy_template_niu_1g_serdes; + break; + case NIU_FLAGS_10G | NIU_FLAGS_FIBER: + /* 10G Fiber */ + default: + if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { + tp = &phy_template_niu_10g_hotplug; + if (np->port == 0) + phy_addr_off = 8; + if (np->port == 1) + phy_addr_off = 12; + } else { + tp = &phy_template_niu_10g_fiber; + phy_addr_off += np->port; + } + break; + } + } else { + switch (np->flags & + (NIU_FLAGS_10G | + NIU_FLAGS_FIBER | + NIU_FLAGS_XCVR_SERDES)) { + case 0: + /* 1G copper */ + tp = &phy_template_1g_copper; + if (plat_type == PLAT_TYPE_VF_P0) + phy_addr_off = 10; + else if (plat_type == PLAT_TYPE_VF_P1) + phy_addr_off = 26; + + phy_addr_off += (np->port ^ 0x3); + break; + + case NIU_FLAGS_10G: + /* 10G copper */ + tp = &phy_template_10g_copper; + break; + + case NIU_FLAGS_FIBER: + /* 1G fiber */ + tp = &phy_template_1g_fiber; + break; + + case NIU_FLAGS_10G | NIU_FLAGS_FIBER: + /* 10G fiber */ + tp = &phy_template_10g_fiber; + if (plat_type == PLAT_TYPE_VF_P0 || + plat_type == PLAT_TYPE_VF_P1) + phy_addr_off = 8; + phy_addr_off += np->port; + if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { + tp = &phy_template_10g_fiber_hotplug; + if (np->port == 0) + phy_addr_off = 8; + if (np->port == 1) + phy_addr_off = 12; + } + break; + + case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: + case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: + case NIU_FLAGS_XCVR_SERDES: + switch(np->port) { + case 0: + case 1: + tp = &phy_template_10g_serdes; + break; + case 2: + case 3: + tp = &phy_template_1g_rgmii; + break; + default: + return -EINVAL; + } + phy_addr_off = niu_atca_port_num[np->port]; + break; + + default: + return -EINVAL; + } + } + + np->phy_ops = tp->ops; + np->phy_addr = tp->phy_addr_base + phy_addr_off; + + return 0; +} + +static int niu_init_link(struct niu *np) +{ + struct niu_parent *parent = np->parent; + int err, ignore; + + if (parent->plat_type == PLAT_TYPE_NIU) { + err = niu_xcvr_init(np); + if (err) + return err; + msleep(200); + } + err = niu_serdes_init(np); + if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY)) + return err; + msleep(200); + err = niu_xcvr_init(np); + if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY)) + niu_link_status(np, &ignore); + return 0; +} + +static void niu_set_primary_mac(struct niu *np, unsigned char *addr) +{ + u16 reg0 = addr[4] << 8 | addr[5]; + u16 reg1 = addr[2] << 8 | addr[3]; + u16 reg2 = addr[0] << 8 | addr[1]; + + if (np->flags & NIU_FLAGS_XMAC) { + nw64_mac(XMAC_ADDR0, reg0); + nw64_mac(XMAC_ADDR1, reg1); + nw64_mac(XMAC_ADDR2, reg2); + } else { + nw64_mac(BMAC_ADDR0, reg0); + nw64_mac(BMAC_ADDR1, reg1); + nw64_mac(BMAC_ADDR2, reg2); + } +} + +static int niu_num_alt_addr(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + return XMAC_NUM_ALT_ADDR; + else + return BMAC_NUM_ALT_ADDR; +} + +static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr) +{ + u16 reg0 = addr[4] << 8 | addr[5]; + u16 reg1 = addr[2] << 8 | addr[3]; + u16 reg2 = addr[0] << 8 | addr[1]; + + if (index >= niu_num_alt_addr(np)) + return -EINVAL; + + if (np->flags & NIU_FLAGS_XMAC) { + nw64_mac(XMAC_ALT_ADDR0(index), reg0); + nw64_mac(XMAC_ALT_ADDR1(index), reg1); + nw64_mac(XMAC_ALT_ADDR2(index), reg2); + } else { + nw64_mac(BMAC_ALT_ADDR0(index), reg0); + nw64_mac(BMAC_ALT_ADDR1(index), reg1); + nw64_mac(BMAC_ALT_ADDR2(index), reg2); + } + + return 0; +} + +static int niu_enable_alt_mac(struct niu *np, int index, int on) +{ + unsigned long reg; + u64 val, mask; + + if (index >= niu_num_alt_addr(np)) + return -EINVAL; + + if (np->flags & NIU_FLAGS_XMAC) { + reg = XMAC_ADDR_CMPEN; + mask = 1 << index; + } else { + reg = BMAC_ADDR_CMPEN; + mask = 1 << (index + 1); + } + + val = nr64_mac(reg); + if (on) + val |= mask; + else + val &= ~mask; + nw64_mac(reg, val); + + return 0; +} + +static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg, + int num, int mac_pref) +{ + u64 val = nr64_mac(reg); + val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR); + val |= num; + if (mac_pref) + val |= HOST_INFO_MPR; + nw64_mac(reg, val); +} + +static int __set_rdc_table_num(struct niu *np, + int xmac_index, int bmac_index, + int rdc_table_num, int mac_pref) +{ + unsigned long reg; + + if (rdc_table_num & ~HOST_INFO_MACRDCTBLN) + return -EINVAL; + if (np->flags & NIU_FLAGS_XMAC) + reg = XMAC_HOST_INFO(xmac_index); + else + reg = BMAC_HOST_INFO(bmac_index); + __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref); + return 0; +} + +static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num, + int mac_pref) +{ + return __set_rdc_table_num(np, 17, 0, table_num, mac_pref); +} + +static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num, + int mac_pref) +{ + return __set_rdc_table_num(np, 16, 8, table_num, mac_pref); +} + +static int niu_set_alt_mac_rdc_table(struct niu *np, int idx, + int table_num, int mac_pref) +{ + if (idx >= niu_num_alt_addr(np)) + return -EINVAL; + return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref); +} + +static u64 vlan_entry_set_parity(u64 reg_val) +{ + u64 port01_mask; + u64 port23_mask; + + port01_mask = 0x00ff; + port23_mask = 0xff00; + + if (hweight64(reg_val & port01_mask) & 1) + reg_val |= ENET_VLAN_TBL_PARITY0; + else + reg_val &= ~ENET_VLAN_TBL_PARITY0; + + if (hweight64(reg_val & port23_mask) & 1) + reg_val |= ENET_VLAN_TBL_PARITY1; + else + reg_val &= ~ENET_VLAN_TBL_PARITY1; + + return reg_val; +} + +static void vlan_tbl_write(struct niu *np, unsigned long index, + int port, int vpr, int rdc_table) +{ + u64 reg_val = nr64(ENET_VLAN_TBL(index)); + + reg_val &= ~((ENET_VLAN_TBL_VPR | + ENET_VLAN_TBL_VLANRDCTBLN) << + ENET_VLAN_TBL_SHIFT(port)); + if (vpr) + reg_val |= (ENET_VLAN_TBL_VPR << + ENET_VLAN_TBL_SHIFT(port)); + reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port)); + + reg_val = vlan_entry_set_parity(reg_val); + + nw64(ENET_VLAN_TBL(index), reg_val); +} + +static void vlan_tbl_clear(struct niu *np) +{ + int i; + + for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) + nw64(ENET_VLAN_TBL(i), 0); +} + +static int tcam_wait_bit(struct niu *np, u64 bit) +{ + int limit = 1000; + + while (--limit > 0) { + if (nr64(TCAM_CTL) & bit) + break; + udelay(1); + } + if (limit <= 0) + return -ENODEV; + + return 0; +} + +static int tcam_flush(struct niu *np, int index) +{ + nw64(TCAM_KEY_0, 0x00); + nw64(TCAM_KEY_MASK_0, 0xff); + nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); + + return tcam_wait_bit(np, TCAM_CTL_STAT); +} + +#if 0 +static int tcam_read(struct niu *np, int index, + u64 *key, u64 *mask) +{ + int err; + + nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index)); + err = tcam_wait_bit(np, TCAM_CTL_STAT); + if (!err) { + key[0] = nr64(TCAM_KEY_0); + key[1] = nr64(TCAM_KEY_1); + key[2] = nr64(TCAM_KEY_2); + key[3] = nr64(TCAM_KEY_3); + mask[0] = nr64(TCAM_KEY_MASK_0); + mask[1] = nr64(TCAM_KEY_MASK_1); + mask[2] = nr64(TCAM_KEY_MASK_2); + mask[3] = nr64(TCAM_KEY_MASK_3); + } + return err; +} +#endif + +static int tcam_write(struct niu *np, int index, + u64 *key, u64 *mask) +{ + nw64(TCAM_KEY_0, key[0]); + nw64(TCAM_KEY_1, key[1]); + nw64(TCAM_KEY_2, key[2]); + nw64(TCAM_KEY_3, key[3]); + nw64(TCAM_KEY_MASK_0, mask[0]); + nw64(TCAM_KEY_MASK_1, mask[1]); + nw64(TCAM_KEY_MASK_2, mask[2]); + nw64(TCAM_KEY_MASK_3, mask[3]); + nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); + + return tcam_wait_bit(np, TCAM_CTL_STAT); +} + +#if 0 +static int tcam_assoc_read(struct niu *np, int index, u64 *data) +{ + int err; + + nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index)); + err = tcam_wait_bit(np, TCAM_CTL_STAT); + if (!err) + *data = nr64(TCAM_KEY_1); + + return err; +} +#endif + +static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data) +{ + nw64(TCAM_KEY_1, assoc_data); + nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index)); + + return tcam_wait_bit(np, TCAM_CTL_STAT); +} + +static void tcam_enable(struct niu *np, int on) +{ + u64 val = nr64(FFLP_CFG_1); + + if (on) + val &= ~FFLP_CFG_1_TCAM_DIS; + else + val |= FFLP_CFG_1_TCAM_DIS; + nw64(FFLP_CFG_1, val); +} + +static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio) +{ + u64 val = nr64(FFLP_CFG_1); + + val &= ~(FFLP_CFG_1_FFLPINITDONE | + FFLP_CFG_1_CAMLAT | + FFLP_CFG_1_CAMRATIO); + val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT); + val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT); + nw64(FFLP_CFG_1, val); + + val = nr64(FFLP_CFG_1); + val |= FFLP_CFG_1_FFLPINITDONE; + nw64(FFLP_CFG_1, val); +} + +static int tcam_user_eth_class_enable(struct niu *np, unsigned long class, + int on) +{ + unsigned long reg; + u64 val; + + if (class < CLASS_CODE_ETHERTYPE1 || + class > CLASS_CODE_ETHERTYPE2) + return -EINVAL; + + reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); + val = nr64(reg); + if (on) + val |= L2_CLS_VLD; + else + val &= ~L2_CLS_VLD; + nw64(reg, val); + + return 0; +} + +#if 0 +static int tcam_user_eth_class_set(struct niu *np, unsigned long class, + u64 ether_type) +{ + unsigned long reg; + u64 val; + + if (class < CLASS_CODE_ETHERTYPE1 || + class > CLASS_CODE_ETHERTYPE2 || + (ether_type & ~(u64)0xffff) != 0) + return -EINVAL; + + reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); + val = nr64(reg); + val &= ~L2_CLS_ETYPE; + val |= (ether_type << L2_CLS_ETYPE_SHIFT); + nw64(reg, val); + + return 0; +} +#endif + +static int tcam_user_ip_class_enable(struct niu *np, unsigned long class, + int on) +{ + unsigned long reg; + u64 val; + + if (class < CLASS_CODE_USER_PROG1 || + class > CLASS_CODE_USER_PROG4) + return -EINVAL; + + reg = L3_CLS(class - CLASS_CODE_USER_PROG1); + val = nr64(reg); + if (on) + val |= L3_CLS_VALID; + else + val &= ~L3_CLS_VALID; + nw64(reg, val); + + return 0; +} + +static int tcam_user_ip_class_set(struct niu *np, unsigned long class, + int ipv6, u64 protocol_id, + u64 tos_mask, u64 tos_val) +{ + unsigned long reg; + u64 val; + + if (class < CLASS_CODE_USER_PROG1 || + class > CLASS_CODE_USER_PROG4 || + (protocol_id & ~(u64)0xff) != 0 || + (tos_mask & ~(u64)0xff) != 0 || + (tos_val & ~(u64)0xff) != 0) + return -EINVAL; + + reg = L3_CLS(class - CLASS_CODE_USER_PROG1); + val = nr64(reg); + val &= ~(L3_CLS_IPVER | L3_CLS_PID | + L3_CLS_TOSMASK | L3_CLS_TOS); + if (ipv6) + val |= L3_CLS_IPVER; + val |= (protocol_id << L3_CLS_PID_SHIFT); + val |= (tos_mask << L3_CLS_TOSMASK_SHIFT); + val |= (tos_val << L3_CLS_TOS_SHIFT); + nw64(reg, val); + + return 0; +} + +static int tcam_early_init(struct niu *np) +{ + unsigned long i; + int err; + + tcam_enable(np, 0); + tcam_set_lat_and_ratio(np, + DEFAULT_TCAM_LATENCY, + DEFAULT_TCAM_ACCESS_RATIO); + for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) { + err = tcam_user_eth_class_enable(np, i, 0); + if (err) + return err; + } + for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) { + err = tcam_user_ip_class_enable(np, i, 0); + if (err) + return err; + } + + return 0; +} + +static int tcam_flush_all(struct niu *np) +{ + unsigned long i; + + for (i = 0; i < np->parent->tcam_num_entries; i++) { + int err = tcam_flush(np, i); + if (err) + return err; + } + return 0; +} + +static u64 hash_addr_regval(unsigned long index, unsigned long num_entries) +{ + return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0); +} + +#if 0 +static int hash_read(struct niu *np, unsigned long partition, + unsigned long index, unsigned long num_entries, + u64 *data) +{ + u64 val = hash_addr_regval(index, num_entries); + unsigned long i; + + if (partition >= FCRAM_NUM_PARTITIONS || + index + num_entries > FCRAM_SIZE) + return -EINVAL; + + nw64(HASH_TBL_ADDR(partition), val); + for (i = 0; i < num_entries; i++) + data[i] = nr64(HASH_TBL_DATA(partition)); + + return 0; +} +#endif + +static int hash_write(struct niu *np, unsigned long partition, + unsigned long index, unsigned long num_entries, + u64 *data) +{ + u64 val = hash_addr_regval(index, num_entries); + unsigned long i; + + if (partition >= FCRAM_NUM_PARTITIONS || + index + (num_entries * 8) > FCRAM_SIZE) + return -EINVAL; + + nw64(HASH_TBL_ADDR(partition), val); + for (i = 0; i < num_entries; i++) + nw64(HASH_TBL_DATA(partition), data[i]); + + return 0; +} + +static void fflp_reset(struct niu *np) +{ + u64 val; + + nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST); + udelay(10); + nw64(FFLP_CFG_1, 0); + + val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE; + nw64(FFLP_CFG_1, val); +} + +static void fflp_set_timings(struct niu *np) +{ + u64 val = nr64(FFLP_CFG_1); + + val &= ~FFLP_CFG_1_FFLPINITDONE; + val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT); + nw64(FFLP_CFG_1, val); + + val = nr64(FFLP_CFG_1); + val |= FFLP_CFG_1_FFLPINITDONE; + nw64(FFLP_CFG_1, val); + + val = nr64(FCRAM_REF_TMR); + val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN); + val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT); + val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT); + nw64(FCRAM_REF_TMR, val); +} + +static int fflp_set_partition(struct niu *np, u64 partition, + u64 mask, u64 base, int enable) +{ + unsigned long reg; + u64 val; + + if (partition >= FCRAM_NUM_PARTITIONS || + (mask & ~(u64)0x1f) != 0 || + (base & ~(u64)0x1f) != 0) + return -EINVAL; + + reg = FLW_PRT_SEL(partition); + + val = nr64(reg); + val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE); + val |= (mask << FLW_PRT_SEL_MASK_SHIFT); + val |= (base << FLW_PRT_SEL_BASE_SHIFT); + if (enable) + val |= FLW_PRT_SEL_EXT; + nw64(reg, val); + + return 0; +} + +static int fflp_disable_all_partitions(struct niu *np) +{ + unsigned long i; + + for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) { + int err = fflp_set_partition(np, 0, 0, 0, 0); + if (err) + return err; + } + return 0; +} + +static void fflp_llcsnap_enable(struct niu *np, int on) +{ + u64 val = nr64(FFLP_CFG_1); + + if (on) + val |= FFLP_CFG_1_LLCSNAP; + else + val &= ~FFLP_CFG_1_LLCSNAP; + nw64(FFLP_CFG_1, val); +} + +static void fflp_errors_enable(struct niu *np, int on) +{ + u64 val = nr64(FFLP_CFG_1); + + if (on) + val &= ~FFLP_CFG_1_ERRORDIS; + else + val |= FFLP_CFG_1_ERRORDIS; + nw64(FFLP_CFG_1, val); +} + +static int fflp_hash_clear(struct niu *np) +{ + struct fcram_hash_ipv4 ent; + unsigned long i; + + /* IPV4 hash entry with valid bit clear, rest is don't care. */ + memset(&ent, 0, sizeof(ent)); + ent.header = HASH_HEADER_EXT; + + for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) { + int err = hash_write(np, 0, i, 1, (u64 *) &ent); + if (err) + return err; + } + return 0; +} + +static int fflp_early_init(struct niu *np) +{ + struct niu_parent *parent; + unsigned long flags; + int err; + + niu_lock_parent(np, flags); + + parent = np->parent; + err = 0; + if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) { + if (np->parent->plat_type != PLAT_TYPE_NIU) { + fflp_reset(np); + fflp_set_timings(np); + err = fflp_disable_all_partitions(np); + if (err) { + netif_printk(np, probe, KERN_DEBUG, np->dev, + "fflp_disable_all_partitions failed, err=%d\n", + err); + goto out; + } + } + + err = tcam_early_init(np); + if (err) { + netif_printk(np, probe, KERN_DEBUG, np->dev, + "tcam_early_init failed, err=%d\n", err); + goto out; + } + fflp_llcsnap_enable(np, 1); + fflp_errors_enable(np, 0); + nw64(H1POLY, 0); + nw64(H2POLY, 0); + + err = tcam_flush_all(np); + if (err) { + netif_printk(np, probe, KERN_DEBUG, np->dev, + "tcam_flush_all failed, err=%d\n", err); + goto out; + } + if (np->parent->plat_type != PLAT_TYPE_NIU) { + err = fflp_hash_clear(np); + if (err) { + netif_printk(np, probe, KERN_DEBUG, np->dev, + "fflp_hash_clear failed, err=%d\n", + err); + goto out; + } + } + + vlan_tbl_clear(np); + + parent->flags |= PARENT_FLGS_CLS_HWINIT; + } +out: + niu_unlock_parent(np, flags); + return err; +} + +static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key) +{ + if (class_code < CLASS_CODE_USER_PROG1 || + class_code > CLASS_CODE_SCTP_IPV6) + return -EINVAL; + + nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key); + return 0; +} + +static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key) +{ + if (class_code < CLASS_CODE_USER_PROG1 || + class_code > CLASS_CODE_SCTP_IPV6) + return -EINVAL; + + nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key); + return 0; +} + +/* Entries for the ports are interleaved in the TCAM */ +static u16 tcam_get_index(struct niu *np, u16 idx) +{ + /* One entry reserved for IP fragment rule */ + if (idx >= (np->clas.tcam_sz - 1)) + idx = 0; + return np->clas.tcam_top + ((idx+1) * np->parent->num_ports); +} + +static u16 tcam_get_size(struct niu *np) +{ + /* One entry reserved for IP fragment rule */ + return np->clas.tcam_sz - 1; +} + +static u16 tcam_get_valid_entry_cnt(struct niu *np) +{ + /* One entry reserved for IP fragment rule */ + return np->clas.tcam_valid_entries - 1; +} + +static void niu_rx_skb_append(struct sk_buff *skb, struct page *page, + u32 offset, u32 size, u32 truesize) +{ + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, offset, size); + + skb->len += size; + skb->data_len += size; + skb->truesize += truesize; +} + +static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a) +{ + a >>= PAGE_SHIFT; + a ^= (a >> ilog2(MAX_RBR_RING_SIZE)); + + return a & (MAX_RBR_RING_SIZE - 1); +} + +static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr, + struct page ***link) +{ + unsigned int h = niu_hash_rxaddr(rp, addr); + struct page *p, **pp; + + addr &= PAGE_MASK; + pp = &rp->rxhash[h]; + for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) { + if (p->index == addr) { + *link = pp; + goto found; + } + } + BUG(); + +found: + return p; +} + +static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base) +{ + unsigned int h = niu_hash_rxaddr(rp, base); + + page->index = base; + page->mapping = (struct address_space *) rp->rxhash[h]; + rp->rxhash[h] = page; +} + +static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp, + gfp_t mask, int start_index) +{ + struct page *page; + u64 addr; + int i; + + page = alloc_page(mask); + if (!page) + return -ENOMEM; + + addr = np->ops->map_page(np->device, page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + if (!addr) { + __free_page(page); + return -ENOMEM; + } + + niu_hash_page(rp, page, addr); + if (rp->rbr_blocks_per_page > 1) + atomic_add(rp->rbr_blocks_per_page - 1, &page->_count); + + for (i = 0; i < rp->rbr_blocks_per_page; i++) { + __le32 *rbr = &rp->rbr[start_index + i]; + + *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT); + addr += rp->rbr_block_size; + } + + return 0; +} + +static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) +{ + int index = rp->rbr_index; + + rp->rbr_pending++; + if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) { + int err = niu_rbr_add_page(np, rp, mask, index); + + if (unlikely(err)) { + rp->rbr_pending--; + return; + } + + rp->rbr_index += rp->rbr_blocks_per_page; + BUG_ON(rp->rbr_index > rp->rbr_table_size); + if (rp->rbr_index == rp->rbr_table_size) + rp->rbr_index = 0; + + if (rp->rbr_pending >= rp->rbr_kick_thresh) { + nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending); + rp->rbr_pending = 0; + } + } +} + +static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp) +{ + unsigned int index = rp->rcr_index; + int num_rcr = 0; + + rp->rx_dropped++; + while (1) { + struct page *page, **link; + u64 addr, val; + u32 rcr_size; + + num_rcr++; + + val = le64_to_cpup(&rp->rcr[index]); + addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << + RCR_ENTRY_PKT_BUF_ADDR_SHIFT; + page = niu_find_rxpage(rp, addr, &link); + + rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> + RCR_ENTRY_PKTBUFSZ_SHIFT]; + if ((page->index + PAGE_SIZE) - rcr_size == addr) { + *link = (struct page *) page->mapping; + np->ops->unmap_page(np->device, page->index, + PAGE_SIZE, DMA_FROM_DEVICE); + page->index = 0; + page->mapping = NULL; + __free_page(page); + rp->rbr_refill_pending++; + } + + index = NEXT_RCR(rp, index); + if (!(val & RCR_ENTRY_MULTI)) + break; + + } + rp->rcr_index = index; + + return num_rcr; +} + +static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, + struct rx_ring_info *rp) +{ + unsigned int index = rp->rcr_index; + struct rx_pkt_hdr1 *rh; + struct sk_buff *skb; + int len, num_rcr; + + skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE); + if (unlikely(!skb)) + return niu_rx_pkt_ignore(np, rp); + + num_rcr = 0; + while (1) { + struct page *page, **link; + u32 rcr_size, append_size; + u64 addr, val, off; + + num_rcr++; + + val = le64_to_cpup(&rp->rcr[index]); + + len = (val & RCR_ENTRY_L2_LEN) >> + RCR_ENTRY_L2_LEN_SHIFT; + len -= ETH_FCS_LEN; + + addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << + RCR_ENTRY_PKT_BUF_ADDR_SHIFT; + page = niu_find_rxpage(rp, addr, &link); + + rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> + RCR_ENTRY_PKTBUFSZ_SHIFT]; + + off = addr & ~PAGE_MASK; + append_size = rcr_size; + if (num_rcr == 1) { + int ptype; + + ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT); + if ((ptype == RCR_PKT_TYPE_TCP || + ptype == RCR_PKT_TYPE_UDP) && + !(val & (RCR_ENTRY_NOPORT | + RCR_ENTRY_ERROR))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + } else if (!(val & RCR_ENTRY_MULTI)) + append_size = len - skb->len; + + niu_rx_skb_append(skb, page, off, append_size, rcr_size); + if ((page->index + rp->rbr_block_size) - rcr_size == addr) { + *link = (struct page *) page->mapping; + np->ops->unmap_page(np->device, page->index, + PAGE_SIZE, DMA_FROM_DEVICE); + page->index = 0; + page->mapping = NULL; + rp->rbr_refill_pending++; + } else + get_page(page); + + index = NEXT_RCR(rp, index); + if (!(val & RCR_ENTRY_MULTI)) + break; + + } + rp->rcr_index = index; + + len += sizeof(*rh); + len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN); + __pskb_pull_tail(skb, len); + + rh = (struct rx_pkt_hdr1 *) skb->data; + if (np->dev->features & NETIF_F_RXHASH) + skb_set_hash(skb, + ((u32)rh->hashval2_0 << 24 | + (u32)rh->hashval2_1 << 16 | + (u32)rh->hashval1_1 << 8 | + (u32)rh->hashval1_2 << 0), + PKT_HASH_TYPE_L3); + skb_pull(skb, sizeof(*rh)); + + rp->rx_packets++; + rp->rx_bytes += skb->len; + + skb->protocol = eth_type_trans(skb, np->dev); + skb_record_rx_queue(skb, rp->rx_channel); + napi_gro_receive(napi, skb); + + return num_rcr; +} + +static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) +{ + int blocks_per_page = rp->rbr_blocks_per_page; + int err, index = rp->rbr_index; + + err = 0; + while (index < (rp->rbr_table_size - blocks_per_page)) { + err = niu_rbr_add_page(np, rp, mask, index); + if (unlikely(err)) + break; + + index += blocks_per_page; + } + + rp->rbr_index = index; + return err; +} + +static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp) +{ + int i; + + for (i = 0; i < MAX_RBR_RING_SIZE; i++) { + struct page *page; + + page = rp->rxhash[i]; + while (page) { + struct page *next = (struct page *) page->mapping; + u64 base = page->index; + + np->ops->unmap_page(np->device, base, PAGE_SIZE, + DMA_FROM_DEVICE); + page->index = 0; + page->mapping = NULL; + + __free_page(page); + + page = next; + } + } + + for (i = 0; i < rp->rbr_table_size; i++) + rp->rbr[i] = cpu_to_le32(0); + rp->rbr_index = 0; +} + +static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) +{ + struct tx_buff_info *tb = &rp->tx_buffs[idx]; + struct sk_buff *skb = tb->skb; + struct tx_pkt_hdr *tp; + u64 tx_flags; + int i, len; + + tp = (struct tx_pkt_hdr *) skb->data; + tx_flags = le64_to_cpup(&tp->flags); + + rp->tx_packets++; + rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) - + ((tx_flags & TXHDR_PAD) / 2)); + + len = skb_headlen(skb); + np->ops->unmap_single(np->device, tb->mapping, + len, DMA_TO_DEVICE); + + if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK) + rp->mark_pending--; + + tb->skb = NULL; + do { + idx = NEXT_TX(rp, idx); + len -= MAX_TX_DESC_LEN; + } while (len > 0); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + tb = &rp->tx_buffs[idx]; + BUG_ON(tb->skb != NULL); + np->ops->unmap_page(np->device, tb->mapping, + skb_frag_size(&skb_shinfo(skb)->frags[i]), + DMA_TO_DEVICE); + idx = NEXT_TX(rp, idx); + } + + dev_kfree_skb(skb); + + return idx; +} + +#define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4) + +static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) +{ + struct netdev_queue *txq; + u16 pkt_cnt, tmp; + int cons, index; + u64 cs; + + index = (rp - np->tx_rings); + txq = netdev_get_tx_queue(np->dev, index); + + cs = rp->tx_cs; + if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) + goto out; + + tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT; + pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) & + (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT); + + rp->last_pkt_cnt = tmp; + + cons = rp->cons; + + netif_printk(np, tx_done, KERN_DEBUG, np->dev, + "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); + + while (pkt_cnt--) + cons = release_tx_packet(np, rp, cons); + + rp->cons = cons; + smp_mb(); + +out: + if (unlikely(netif_tx_queue_stopped(txq) && + (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { + __netif_tx_lock(txq, smp_processor_id()); + if (netif_tx_queue_stopped(txq) && + (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) + netif_tx_wake_queue(txq); + __netif_tx_unlock(txq); + } +} + +static inline void niu_sync_rx_discard_stats(struct niu *np, + struct rx_ring_info *rp, + const int limit) +{ + /* This elaborate scheme is needed for reading the RX discard + * counters, as they are only 16-bit and can overflow quickly, + * and because the overflow indication bit is not usable as + * the counter value does not wrap, but remains at max value + * 0xFFFF. + * + * In theory and in practice counters can be lost in between + * reading nr64() and clearing the counter nw64(). For this + * reason, the number of counter clearings nw64() is + * limited/reduced though the limit parameter. + */ + int rx_channel = rp->rx_channel; + u32 misc, wred; + + /* RXMISC (Receive Miscellaneous Discard Count), covers the + * following discard events: IPP (Input Port Process), + * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive + * Block Ring) prefetch buffer is empty. + */ + misc = nr64(RXMISC(rx_channel)); + if (unlikely((misc & RXMISC_COUNT) > limit)) { + nw64(RXMISC(rx_channel), 0); + rp->rx_errors += misc & RXMISC_COUNT; + + if (unlikely(misc & RXMISC_OFLOW)) + dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n", + rx_channel); + + netif_printk(np, rx_err, KERN_DEBUG, np->dev, + "rx-%d: MISC drop=%u over=%u\n", + rx_channel, misc, misc-limit); + } + + /* WRED (Weighted Random Early Discard) by hardware */ + wred = nr64(RED_DIS_CNT(rx_channel)); + if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) { + nw64(RED_DIS_CNT(rx_channel), 0); + rp->rx_dropped += wred & RED_DIS_CNT_COUNT; + + if (unlikely(wred & RED_DIS_CNT_OFLOW)) + dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel); + + netif_printk(np, rx_err, KERN_DEBUG, np->dev, + "rx-%d: WRED drop=%u over=%u\n", + rx_channel, wred, wred-limit); + } +} + +static int niu_rx_work(struct napi_struct *napi, struct niu *np, + struct rx_ring_info *rp, int budget) +{ + int qlen, rcr_done = 0, work_done = 0; + struct rxdma_mailbox *mbox = rp->mbox; + u64 stat; + +#if 1 + stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); + qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN; +#else + stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); + qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN); +#endif + mbox->rx_dma_ctl_stat = 0; + mbox->rcrstat_a = 0; + + netif_printk(np, rx_status, KERN_DEBUG, np->dev, + "%s(chan[%d]), stat[%llx] qlen=%d\n", + __func__, rp->rx_channel, (unsigned long long)stat, qlen); + + rcr_done = work_done = 0; + qlen = min(qlen, budget); + while (work_done < qlen) { + rcr_done += niu_process_rx_pkt(napi, np, rp); + work_done++; + } + + if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) { + unsigned int i; + + for (i = 0; i < rp->rbr_refill_pending; i++) + niu_rbr_refill(np, rp, GFP_ATOMIC); + rp->rbr_refill_pending = 0; + } + + stat = (RX_DMA_CTL_STAT_MEX | + ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) | + ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT)); + + nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat); + + /* Only sync discards stats when qlen indicate potential for drops */ + if (qlen > 10) + niu_sync_rx_discard_stats(np, rp, 0x7FFF); + + return work_done; +} + +static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget) +{ + u64 v0 = lp->v0; + u32 tx_vec = (v0 >> 32); + u32 rx_vec = (v0 & 0xffffffff); + int i, work_done = 0; + + netif_printk(np, intr, KERN_DEBUG, np->dev, + "%s() v0[%016llx]\n", __func__, (unsigned long long)v0); + + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + if (tx_vec & (1 << rp->tx_channel)) + niu_tx_work(np, rp); + nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0); + } + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + if (rx_vec & (1 << rp->rx_channel)) { + int this_work_done; + + this_work_done = niu_rx_work(&lp->napi, np, rp, + budget); + + budget -= this_work_done; + work_done += this_work_done; + } + nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0); + } + + return work_done; +} + +static int niu_poll(struct napi_struct *napi, int budget) +{ + struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi); + struct niu *np = lp->np; + int work_done; + + work_done = niu_poll_core(np, lp, budget); + + if (work_done < budget) { + napi_complete(napi); + niu_ldg_rearm(np, lp, 1); + } + return work_done; +} + +static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp, + u64 stat) +{ + netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel); + + if (stat & RX_DMA_CTL_STAT_RBR_TMOUT) + pr_cont("RBR_TMOUT "); + if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR) + pr_cont("RSP_CNT "); + if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS) + pr_cont("BYTE_EN_BUS "); + if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR) + pr_cont("RSP_DAT "); + if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR) + pr_cont("RCR_ACK "); + if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR) + pr_cont("RCR_SHA_PAR "); + if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR) + pr_cont("RBR_PRE_PAR "); + if (stat & RX_DMA_CTL_STAT_CONFIG_ERR) + pr_cont("CONFIG "); + if (stat & RX_DMA_CTL_STAT_RCRINCON) + pr_cont("RCRINCON "); + if (stat & RX_DMA_CTL_STAT_RCRFULL) + pr_cont("RCRFULL "); + if (stat & RX_DMA_CTL_STAT_RBRFULL) + pr_cont("RBRFULL "); + if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE) + pr_cont("RBRLOGPAGE "); + if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE) + pr_cont("CFIGLOGPAGE "); + if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR) + pr_cont("DC_FIDO "); + + pr_cont(")\n"); +} + +static int niu_rx_error(struct niu *np, struct rx_ring_info *rp) +{ + u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); + int err = 0; + + + if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL | + RX_DMA_CTL_STAT_PORT_FATAL)) + err = -EINVAL; + + if (err) { + netdev_err(np->dev, "RX channel %u error, stat[%llx]\n", + rp->rx_channel, + (unsigned long long) stat); + + niu_log_rxchan_errors(np, rp, stat); + } + + nw64(RX_DMA_CTL_STAT(rp->rx_channel), + stat & RX_DMA_CTL_WRITE_CLEAR_ERRS); + + return err; +} + +static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp, + u64 cs) +{ + netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel); + + if (cs & TX_CS_MBOX_ERR) + pr_cont("MBOX "); + if (cs & TX_CS_PKT_SIZE_ERR) + pr_cont("PKT_SIZE "); + if (cs & TX_CS_TX_RING_OFLOW) + pr_cont("TX_RING_OFLOW "); + if (cs & TX_CS_PREF_BUF_PAR_ERR) + pr_cont("PREF_BUF_PAR "); + if (cs & TX_CS_NACK_PREF) + pr_cont("NACK_PREF "); + if (cs & TX_CS_NACK_PKT_RD) + pr_cont("NACK_PKT_RD "); + if (cs & TX_CS_CONF_PART_ERR) + pr_cont("CONF_PART "); + if (cs & TX_CS_PKT_PRT_ERR) + pr_cont("PKT_PTR "); + + pr_cont(")\n"); +} + +static int niu_tx_error(struct niu *np, struct tx_ring_info *rp) +{ + u64 cs, logh, logl; + + cs = nr64(TX_CS(rp->tx_channel)); + logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel)); + logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel)); + + netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n", + rp->tx_channel, + (unsigned long long)cs, + (unsigned long long)logh, + (unsigned long long)logl); + + niu_log_txchan_errors(np, rp, cs); + + return -ENODEV; +} + +static int niu_mif_interrupt(struct niu *np) +{ + u64 mif_status = nr64(MIF_STATUS); + int phy_mdint = 0; + + if (np->flags & NIU_FLAGS_XMAC) { + u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS); + + if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT) + phy_mdint = 1; + } + + netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n", + (unsigned long long)mif_status, phy_mdint); + + return -ENODEV; +} + +static void niu_xmac_interrupt(struct niu *np) +{ + struct niu_xmac_stats *mp = &np->mac_stats.xmac; + u64 val; + + val = nr64_mac(XTXMAC_STATUS); + if (val & XTXMAC_STATUS_FRAME_CNT_EXP) + mp->tx_frames += TXMAC_FRM_CNT_COUNT; + if (val & XTXMAC_STATUS_BYTE_CNT_EXP) + mp->tx_bytes += TXMAC_BYTE_CNT_COUNT; + if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR) + mp->tx_fifo_errors++; + if (val & XTXMAC_STATUS_TXMAC_OFLOW) + mp->tx_overflow_errors++; + if (val & XTXMAC_STATUS_MAX_PSIZE_ERR) + mp->tx_max_pkt_size_errors++; + if (val & XTXMAC_STATUS_TXMAC_UFLOW) + mp->tx_underflow_errors++; + + val = nr64_mac(XRXMAC_STATUS); + if (val & XRXMAC_STATUS_LCL_FLT_STATUS) + mp->rx_local_faults++; + if (val & XRXMAC_STATUS_RFLT_DET) + mp->rx_remote_faults++; + if (val & XRXMAC_STATUS_LFLT_CNT_EXP) + mp->rx_link_faults += LINK_FAULT_CNT_COUNT; + if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP) + mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT; + if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP) + mp->rx_frags += RXMAC_FRAG_CNT_COUNT; + if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP) + mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; + if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) + mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; + if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) + mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; + if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP) + mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; + if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP) + mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT; + if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP) + mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT; + if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP) + mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT; + if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP) + mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT; + if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP) + mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT; + if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP) + mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT; + if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP) + mp->rx_octets += RXMAC_BT_CNT_COUNT; + if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP) + mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT; + if (val & XRXMAC_STATUS_LENERR_CNT_EXP) + mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT; + if (val & XRXMAC_STATUS_CRCERR_CNT_EXP) + mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT; + if (val & XRXMAC_STATUS_RXUFLOW) + mp->rx_underflows++; + if (val & XRXMAC_STATUS_RXOFLOW) + mp->rx_overflows++; + + val = nr64_mac(XMAC_FC_STAT); + if (val & XMAC_FC_STAT_TX_MAC_NPAUSE) + mp->pause_off_state++; + if (val & XMAC_FC_STAT_TX_MAC_PAUSE) + mp->pause_on_state++; + if (val & XMAC_FC_STAT_RX_MAC_RPAUSE) + mp->pause_received++; +} + +static void niu_bmac_interrupt(struct niu *np) +{ + struct niu_bmac_stats *mp = &np->mac_stats.bmac; + u64 val; + + val = nr64_mac(BTXMAC_STATUS); + if (val & BTXMAC_STATUS_UNDERRUN) + mp->tx_underflow_errors++; + if (val & BTXMAC_STATUS_MAX_PKT_ERR) + mp->tx_max_pkt_size_errors++; + if (val & BTXMAC_STATUS_BYTE_CNT_EXP) + mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT; + if (val & BTXMAC_STATUS_FRAME_CNT_EXP) + mp->tx_frames += BTXMAC_FRM_CNT_COUNT; + + val = nr64_mac(BRXMAC_STATUS); + if (val & BRXMAC_STATUS_OVERFLOW) + mp->rx_overflows++; + if (val & BRXMAC_STATUS_FRAME_CNT_EXP) + mp->rx_frames += BRXMAC_FRAME_CNT_COUNT; + if (val & BRXMAC_STATUS_ALIGN_ERR_EXP) + mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; + if (val & BRXMAC_STATUS_CRC_ERR_EXP) + mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; + if (val & BRXMAC_STATUS_LEN_ERR_EXP) + mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT; + + val = nr64_mac(BMAC_CTRL_STATUS); + if (val & BMAC_CTRL_STATUS_NOPAUSE) + mp->pause_off_state++; + if (val & BMAC_CTRL_STATUS_PAUSE) + mp->pause_on_state++; + if (val & BMAC_CTRL_STATUS_PAUSE_RECV) + mp->pause_received++; +} + +static int niu_mac_interrupt(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + niu_xmac_interrupt(np); + else + niu_bmac_interrupt(np); + + return 0; +} + +static void niu_log_device_error(struct niu *np, u64 stat) +{ + netdev_err(np->dev, "Core device errors ( "); + + if (stat & SYS_ERR_MASK_META2) + pr_cont("META2 "); + if (stat & SYS_ERR_MASK_META1) + pr_cont("META1 "); + if (stat & SYS_ERR_MASK_PEU) + pr_cont("PEU "); + if (stat & SYS_ERR_MASK_TXC) + pr_cont("TXC "); + if (stat & SYS_ERR_MASK_RDMC) + pr_cont("RDMC "); + if (stat & SYS_ERR_MASK_TDMC) + pr_cont("TDMC "); + if (stat & SYS_ERR_MASK_ZCP) + pr_cont("ZCP "); + if (stat & SYS_ERR_MASK_FFLP) + pr_cont("FFLP "); + if (stat & SYS_ERR_MASK_IPP) + pr_cont("IPP "); + if (stat & SYS_ERR_MASK_MAC) + pr_cont("MAC "); + if (stat & SYS_ERR_MASK_SMX) + pr_cont("SMX "); + + pr_cont(")\n"); +} + +static int niu_device_error(struct niu *np) +{ + u64 stat = nr64(SYS_ERR_STAT); + + netdev_err(np->dev, "Core device error, stat[%llx]\n", + (unsigned long long)stat); + + niu_log_device_error(np, stat); + + return -ENODEV; +} + +static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp, + u64 v0, u64 v1, u64 v2) +{ + + int i, err = 0; + + lp->v0 = v0; + lp->v1 = v1; + lp->v2 = v2; + + if (v1 & 0x00000000ffffffffULL) { + u32 rx_vec = (v1 & 0xffffffff); + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + if (rx_vec & (1 << rp->rx_channel)) { + int r = niu_rx_error(np, rp); + if (r) { + err = r; + } else { + if (!v0) + nw64(RX_DMA_CTL_STAT(rp->rx_channel), + RX_DMA_CTL_STAT_MEX); + } + } + } + } + if (v1 & 0x7fffffff00000000ULL) { + u32 tx_vec = (v1 >> 32) & 0x7fffffff; + + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + if (tx_vec & (1 << rp->tx_channel)) { + int r = niu_tx_error(np, rp); + if (r) + err = r; + } + } + } + if ((v0 | v1) & 0x8000000000000000ULL) { + int r = niu_mif_interrupt(np); + if (r) + err = r; + } + if (v2) { + if (v2 & 0x01ef) { + int r = niu_mac_interrupt(np); + if (r) + err = r; + } + if (v2 & 0x0210) { + int r = niu_device_error(np); + if (r) + err = r; + } + } + + if (err) + niu_enable_interrupts(np, 0); + + return err; +} + +static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp, + int ldn) +{ + struct rxdma_mailbox *mbox = rp->mbox; + u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); + + stat_write = (RX_DMA_CTL_STAT_RCRTHRES | + RX_DMA_CTL_STAT_RCRTO); + nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write); + + netif_printk(np, intr, KERN_DEBUG, np->dev, + "%s() stat[%llx]\n", __func__, (unsigned long long)stat); +} + +static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp, + int ldn) +{ + rp->tx_cs = nr64(TX_CS(rp->tx_channel)); + + netif_printk(np, intr, KERN_DEBUG, np->dev, + "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs); +} + +static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0) +{ + struct niu_parent *parent = np->parent; + u32 rx_vec, tx_vec; + int i; + + tx_vec = (v0 >> 32); + rx_vec = (v0 & 0xffffffff); + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + int ldn = LDN_RXDMA(rp->rx_channel); + + if (parent->ldg_map[ldn] != ldg) + continue; + + nw64(LD_IM0(ldn), LD_IM0_MASK); + if (rx_vec & (1 << rp->rx_channel)) + niu_rxchan_intr(np, rp, ldn); + } + + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + int ldn = LDN_TXDMA(rp->tx_channel); + + if (parent->ldg_map[ldn] != ldg) + continue; + + nw64(LD_IM0(ldn), LD_IM0_MASK); + if (tx_vec & (1 << rp->tx_channel)) + niu_txchan_intr(np, rp, ldn); + } +} + +static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, + u64 v0, u64 v1, u64 v2) +{ + if (likely(napi_schedule_prep(&lp->napi))) { + lp->v0 = v0; + lp->v1 = v1; + lp->v2 = v2; + __niu_fastpath_interrupt(np, lp->ldg_num, v0); + __napi_schedule(&lp->napi); + } +} + +static irqreturn_t niu_interrupt(int irq, void *dev_id) +{ + struct niu_ldg *lp = dev_id; + struct niu *np = lp->np; + int ldg = lp->ldg_num; + unsigned long flags; + u64 v0, v1, v2; + + if (netif_msg_intr(np)) + printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)", + __func__, lp, ldg); + + spin_lock_irqsave(&np->lock, flags); + + v0 = nr64(LDSV0(ldg)); + v1 = nr64(LDSV1(ldg)); + v2 = nr64(LDSV2(ldg)); + + if (netif_msg_intr(np)) + pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n", + (unsigned long long) v0, + (unsigned long long) v1, + (unsigned long long) v2); + + if (unlikely(!v0 && !v1 && !v2)) { + spin_unlock_irqrestore(&np->lock, flags); + return IRQ_NONE; + } + + if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) { + int err = niu_slowpath_interrupt(np, lp, v0, v1, v2); + if (err) + goto out; + } + if (likely(v0 & ~((u64)1 << LDN_MIF))) + niu_schedule_napi(np, lp, v0, v1, v2); + else + niu_ldg_rearm(np, lp, 1); +out: + spin_unlock_irqrestore(&np->lock, flags); + + return IRQ_HANDLED; +} + +static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp) +{ + if (rp->mbox) { + np->ops->free_coherent(np->device, + sizeof(struct rxdma_mailbox), + rp->mbox, rp->mbox_dma); + rp->mbox = NULL; + } + if (rp->rcr) { + np->ops->free_coherent(np->device, + MAX_RCR_RING_SIZE * sizeof(__le64), + rp->rcr, rp->rcr_dma); + rp->rcr = NULL; + rp->rcr_table_size = 0; + rp->rcr_index = 0; + } + if (rp->rbr) { + niu_rbr_free(np, rp); + + np->ops->free_coherent(np->device, + MAX_RBR_RING_SIZE * sizeof(__le32), + rp->rbr, rp->rbr_dma); + rp->rbr = NULL; + rp->rbr_table_size = 0; + rp->rbr_index = 0; + } + kfree(rp->rxhash); + rp->rxhash = NULL; +} + +static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp) +{ + if (rp->mbox) { + np->ops->free_coherent(np->device, + sizeof(struct txdma_mailbox), + rp->mbox, rp->mbox_dma); + rp->mbox = NULL; + } + if (rp->descr) { + int i; + + for (i = 0; i < MAX_TX_RING_SIZE; i++) { + if (rp->tx_buffs[i].skb) + (void) release_tx_packet(np, rp, i); + } + + np->ops->free_coherent(np->device, + MAX_TX_RING_SIZE * sizeof(__le64), + rp->descr, rp->descr_dma); + rp->descr = NULL; + rp->pending = 0; + rp->prod = 0; + rp->cons = 0; + rp->wrap_bit = 0; + } +} + +static void niu_free_channels(struct niu *np) +{ + int i; + + if (np->rx_rings) { + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + niu_free_rx_ring_info(np, rp); + } + kfree(np->rx_rings); + np->rx_rings = NULL; + np->num_rx_rings = 0; + } + + if (np->tx_rings) { + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + niu_free_tx_ring_info(np, rp); + } + kfree(np->tx_rings); + np->tx_rings = NULL; + np->num_tx_rings = 0; + } +} + +static int niu_alloc_rx_ring_info(struct niu *np, + struct rx_ring_info *rp) +{ + BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64); + + rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *), + GFP_KERNEL); + if (!rp->rxhash) + return -ENOMEM; + + rp->mbox = np->ops->alloc_coherent(np->device, + sizeof(struct rxdma_mailbox), + &rp->mbox_dma, GFP_KERNEL); + if (!rp->mbox) + return -ENOMEM; + if ((unsigned long)rp->mbox & (64UL - 1)) { + netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n", + rp->mbox); + return -EINVAL; + } + + rp->rcr = np->ops->alloc_coherent(np->device, + MAX_RCR_RING_SIZE * sizeof(__le64), + &rp->rcr_dma, GFP_KERNEL); + if (!rp->rcr) + return -ENOMEM; + if ((unsigned long)rp->rcr & (64UL - 1)) { + netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n", + rp->rcr); + return -EINVAL; + } + rp->rcr_table_size = MAX_RCR_RING_SIZE; + rp->rcr_index = 0; + + rp->rbr = np->ops->alloc_coherent(np->device, + MAX_RBR_RING_SIZE * sizeof(__le32), + &rp->rbr_dma, GFP_KERNEL); + if (!rp->rbr) + return -ENOMEM; + if ((unsigned long)rp->rbr & (64UL - 1)) { + netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n", + rp->rbr); + return -EINVAL; + } + rp->rbr_table_size = MAX_RBR_RING_SIZE; + rp->rbr_index = 0; + rp->rbr_pending = 0; + + return 0; +} + +static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp) +{ + int mtu = np->dev->mtu; + + /* These values are recommended by the HW designers for fair + * utilization of DRR amongst the rings. + */ + rp->max_burst = mtu + 32; + if (rp->max_burst > 4096) + rp->max_burst = 4096; +} + +static int niu_alloc_tx_ring_info(struct niu *np, + struct tx_ring_info *rp) +{ + BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64); + + rp->mbox = np->ops->alloc_coherent(np->device, + sizeof(struct txdma_mailbox), + &rp->mbox_dma, GFP_KERNEL); + if (!rp->mbox) + return -ENOMEM; + if ((unsigned long)rp->mbox & (64UL - 1)) { + netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n", + rp->mbox); + return -EINVAL; + } + + rp->descr = np->ops->alloc_coherent(np->device, + MAX_TX_RING_SIZE * sizeof(__le64), + &rp->descr_dma, GFP_KERNEL); + if (!rp->descr) + return -ENOMEM; + if ((unsigned long)rp->descr & (64UL - 1)) { + netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n", + rp->descr); + return -EINVAL; + } + + rp->pending = MAX_TX_RING_SIZE; + rp->prod = 0; + rp->cons = 0; + rp->wrap_bit = 0; + + /* XXX make these configurable... XXX */ + rp->mark_freq = rp->pending / 4; + + niu_set_max_burst(np, rp); + + return 0; +} + +static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp) +{ + u16 bss; + + bss = min(PAGE_SHIFT, 15); + + rp->rbr_block_size = 1 << bss; + rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss); + + rp->rbr_sizes[0] = 256; + rp->rbr_sizes[1] = 1024; + if (np->dev->mtu > ETH_DATA_LEN) { + switch (PAGE_SIZE) { + case 4 * 1024: + rp->rbr_sizes[2] = 4096; + break; + + default: + rp->rbr_sizes[2] = 8192; + break; + } + } else { + rp->rbr_sizes[2] = 2048; + } + rp->rbr_sizes[3] = rp->rbr_block_size; +} + +static int niu_alloc_channels(struct niu *np) +{ + struct niu_parent *parent = np->parent; + int first_rx_channel, first_tx_channel; + int num_rx_rings, num_tx_rings; + struct rx_ring_info *rx_rings; + struct tx_ring_info *tx_rings; + int i, port, err; + + port = np->port; + first_rx_channel = first_tx_channel = 0; + for (i = 0; i < port; i++) { + first_rx_channel += parent->rxchan_per_port[i]; + first_tx_channel += parent->txchan_per_port[i]; + } + + num_rx_rings = parent->rxchan_per_port[port]; + num_tx_rings = parent->txchan_per_port[port]; + + rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info), + GFP_KERNEL); + err = -ENOMEM; + if (!rx_rings) + goto out_err; + + np->num_rx_rings = num_rx_rings; + smp_wmb(); + np->rx_rings = rx_rings; + + netif_set_real_num_rx_queues(np->dev, num_rx_rings); + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + rp->np = np; + rp->rx_channel = first_rx_channel + i; + + err = niu_alloc_rx_ring_info(np, rp); + if (err) + goto out_err; + + niu_size_rbr(np, rp); + + /* XXX better defaults, configurable, etc... XXX */ + rp->nonsyn_window = 64; + rp->nonsyn_threshold = rp->rcr_table_size - 64; + rp->syn_window = 64; + rp->syn_threshold = rp->rcr_table_size - 64; + rp->rcr_pkt_threshold = 16; + rp->rcr_timeout = 8; + rp->rbr_kick_thresh = RBR_REFILL_MIN; + if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page) + rp->rbr_kick_thresh = rp->rbr_blocks_per_page; + + err = niu_rbr_fill(np, rp, GFP_KERNEL); + if (err) + return err; + } + + tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info), + GFP_KERNEL); + err = -ENOMEM; + if (!tx_rings) + goto out_err; + + np->num_tx_rings = num_tx_rings; + smp_wmb(); + np->tx_rings = tx_rings; + + netif_set_real_num_tx_queues(np->dev, num_tx_rings); + + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + rp->np = np; + rp->tx_channel = first_tx_channel + i; + + err = niu_alloc_tx_ring_info(np, rp); + if (err) + goto out_err; + } + + return 0; + +out_err: + niu_free_channels(np); + return err; +} + +static int niu_tx_cs_sng_poll(struct niu *np, int channel) +{ + int limit = 1000; + + while (--limit > 0) { + u64 val = nr64(TX_CS(channel)); + if (val & TX_CS_SNG_STATE) + return 0; + } + return -ENODEV; +} + +static int niu_tx_channel_stop(struct niu *np, int channel) +{ + u64 val = nr64(TX_CS(channel)); + + val |= TX_CS_STOP_N_GO; + nw64(TX_CS(channel), val); + + return niu_tx_cs_sng_poll(np, channel); +} + +static int niu_tx_cs_reset_poll(struct niu *np, int channel) +{ + int limit = 1000; + + while (--limit > 0) { + u64 val = nr64(TX_CS(channel)); + if (!(val & TX_CS_RST)) + return 0; + } + return -ENODEV; +} + +static int niu_tx_channel_reset(struct niu *np, int channel) +{ + u64 val = nr64(TX_CS(channel)); + int err; + + val |= TX_CS_RST; + nw64(TX_CS(channel), val); + + err = niu_tx_cs_reset_poll(np, channel); + if (!err) + nw64(TX_RING_KICK(channel), 0); + + return err; +} + +static int niu_tx_channel_lpage_init(struct niu *np, int channel) +{ + u64 val; + + nw64(TX_LOG_MASK1(channel), 0); + nw64(TX_LOG_VAL1(channel), 0); + nw64(TX_LOG_MASK2(channel), 0); + nw64(TX_LOG_VAL2(channel), 0); + nw64(TX_LOG_PAGE_RELO1(channel), 0); + nw64(TX_LOG_PAGE_RELO2(channel), 0); + nw64(TX_LOG_PAGE_HDL(channel), 0); + + val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT; + val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1); + nw64(TX_LOG_PAGE_VLD(channel), val); + + /* XXX TXDMA 32bit mode? XXX */ + + return 0; +} + +static void niu_txc_enable_port(struct niu *np, int on) +{ + unsigned long flags; + u64 val, mask; + + niu_lock_parent(np, flags); + val = nr64(TXC_CONTROL); + mask = (u64)1 << np->port; + if (on) { + val |= TXC_CONTROL_ENABLE | mask; + } else { + val &= ~mask; + if ((val & ~TXC_CONTROL_ENABLE) == 0) + val &= ~TXC_CONTROL_ENABLE; + } + nw64(TXC_CONTROL, val); + niu_unlock_parent(np, flags); +} + +static void niu_txc_set_imask(struct niu *np, u64 imask) +{ + unsigned long flags; + u64 val; + + niu_lock_parent(np, flags); + val = nr64(TXC_INT_MASK); + val &= ~TXC_INT_MASK_VAL(np->port); + val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port)); + niu_unlock_parent(np, flags); +} + +static void niu_txc_port_dma_enable(struct niu *np, int on) +{ + u64 val = 0; + + if (on) { + int i; + + for (i = 0; i < np->num_tx_rings; i++) + val |= (1 << np->tx_rings[i].tx_channel); + } + nw64(TXC_PORT_DMA(np->port), val); +} + +static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp) +{ + int err, channel = rp->tx_channel; + u64 val, ring_len; + + err = niu_tx_channel_stop(np, channel); + if (err) + return err; + + err = niu_tx_channel_reset(np, channel); + if (err) + return err; + + err = niu_tx_channel_lpage_init(np, channel); + if (err) + return err; + + nw64(TXC_DMA_MAX(channel), rp->max_burst); + nw64(TX_ENT_MSK(channel), 0); + + if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE | + TX_RNG_CFIG_STADDR)) { + netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n", + channel, (unsigned long long)rp->descr_dma); + return -EINVAL; + } + + /* The length field in TX_RNG_CFIG is measured in 64-byte + * blocks. rp->pending is the number of TX descriptors in + * our ring, 8 bytes each, thus we divide by 8 bytes more + * to get the proper value the chip wants. + */ + ring_len = (rp->pending / 8); + + val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) | + rp->descr_dma); + nw64(TX_RNG_CFIG(channel), val); + + if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) || + ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) { + netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n", + channel, (unsigned long long)rp->mbox_dma); + return -EINVAL; + } + nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32); + nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR); + + nw64(TX_CS(channel), 0); + + rp->last_pkt_cnt = 0; + + return 0; +} + +static void niu_init_rdc_groups(struct niu *np) +{ + struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port]; + int i, first_table_num = tp->first_table_num; + + for (i = 0; i < tp->num_tables; i++) { + struct rdc_table *tbl = &tp->tables[i]; + int this_table = first_table_num + i; + int slot; + + for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) + nw64(RDC_TBL(this_table, slot), + tbl->rxdma_channel[slot]); + } + + nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]); +} + +static void niu_init_drr_weight(struct niu *np) +{ + int type = phy_decode(np->parent->port_phy, np->port); + u64 val; + + switch (type) { + case PORT_TYPE_10G: + val = PT_DRR_WEIGHT_DEFAULT_10G; + break; + + case PORT_TYPE_1G: + default: + val = PT_DRR_WEIGHT_DEFAULT_1G; + break; + } + nw64(PT_DRR_WT(np->port), val); +} + +static int niu_init_hostinfo(struct niu *np) +{ + struct niu_parent *parent = np->parent; + struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; + int i, err, num_alt = niu_num_alt_addr(np); + int first_rdc_table = tp->first_table_num; + + err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); + if (err) + return err; + + err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); + if (err) + return err; + + for (i = 0; i < num_alt; i++) { + err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1); + if (err) + return err; + } + + return 0; +} + +static int niu_rx_channel_reset(struct niu *np, int channel) +{ + return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel), + RXDMA_CFIG1_RST, 1000, 10, + "RXDMA_CFIG1"); +} + +static int niu_rx_channel_lpage_init(struct niu *np, int channel) +{ + u64 val; + + nw64(RX_LOG_MASK1(channel), 0); + nw64(RX_LOG_VAL1(channel), 0); + nw64(RX_LOG_MASK2(channel), 0); + nw64(RX_LOG_VAL2(channel), 0); + nw64(RX_LOG_PAGE_RELO1(channel), 0); + nw64(RX_LOG_PAGE_RELO2(channel), 0); + nw64(RX_LOG_PAGE_HDL(channel), 0); + + val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT; + val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1); + nw64(RX_LOG_PAGE_VLD(channel), val); + + return 0; +} + +static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp) +{ + u64 val; + + val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) | + ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) | + ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) | + ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT)); + nw64(RDC_RED_PARA(rp->rx_channel), val); +} + +static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret) +{ + u64 val = 0; + + *ret = 0; + switch (rp->rbr_block_size) { + case 4 * 1024: + val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT); + break; + case 8 * 1024: + val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT); + break; + case 16 * 1024: + val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT); + break; + case 32 * 1024: + val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT); + break; + default: + return -EINVAL; + } + val |= RBR_CFIG_B_VLD2; + switch (rp->rbr_sizes[2]) { + case 2 * 1024: + val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT); + break; + case 4 * 1024: + val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT); + break; + case 8 * 1024: + val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT); + break; + case 16 * 1024: + val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT); + break; + + default: + return -EINVAL; + } + val |= RBR_CFIG_B_VLD1; + switch (rp->rbr_sizes[1]) { + case 1 * 1024: + val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT); + break; + case 2 * 1024: + val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT); + break; + case 4 * 1024: + val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT); + break; + case 8 * 1024: + val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT); + break; + + default: + return -EINVAL; + } + val |= RBR_CFIG_B_VLD0; + switch (rp->rbr_sizes[0]) { + case 256: + val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT); + break; + case 512: + val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT); + break; + case 1 * 1024: + val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT); + break; + case 2 * 1024: + val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT); + break; + + default: + return -EINVAL; + } + + *ret = val; + return 0; +} + +static int niu_enable_rx_channel(struct niu *np, int channel, int on) +{ + u64 val = nr64(RXDMA_CFIG1(channel)); + int limit; + + if (on) + val |= RXDMA_CFIG1_EN; + else + val &= ~RXDMA_CFIG1_EN; + nw64(RXDMA_CFIG1(channel), val); + + limit = 1000; + while (--limit > 0) { + if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST) + break; + udelay(10); + } + if (limit <= 0) + return -ENODEV; + return 0; +} + +static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp) +{ + int err, channel = rp->rx_channel; + u64 val; + + err = niu_rx_channel_reset(np, channel); + if (err) + return err; + + err = niu_rx_channel_lpage_init(np, channel); + if (err) + return err; + + niu_rx_channel_wred_init(np, rp); + + nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY); + nw64(RX_DMA_CTL_STAT(channel), + (RX_DMA_CTL_STAT_MEX | + RX_DMA_CTL_STAT_RCRTHRES | + RX_DMA_CTL_STAT_RCRTO | + RX_DMA_CTL_STAT_RBR_EMPTY)); + nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32); + nw64(RXDMA_CFIG2(channel), + ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) | + RXDMA_CFIG2_FULL_HDR)); + nw64(RBR_CFIG_A(channel), + ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) | + (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR))); + err = niu_compute_rbr_cfig_b(rp, &val); + if (err) + return err; + nw64(RBR_CFIG_B(channel), val); + nw64(RCRCFIG_A(channel), + ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) | + (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR))); + nw64(RCRCFIG_B(channel), + ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) | + RCRCFIG_B_ENTOUT | + ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT)); + + err = niu_enable_rx_channel(np, channel, 1); + if (err) + return err; + + nw64(RBR_KICK(channel), rp->rbr_index); + + val = nr64(RX_DMA_CTL_STAT(channel)); + val |= RX_DMA_CTL_STAT_RBR_EMPTY; + nw64(RX_DMA_CTL_STAT(channel), val); + + return 0; +} + +static int niu_init_rx_channels(struct niu *np) +{ + unsigned long flags; + u64 seed = jiffies_64; + int err, i; + + niu_lock_parent(np, flags); + nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider); + nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL)); + niu_unlock_parent(np, flags); + + /* XXX RXDMA 32bit mode? XXX */ + + niu_init_rdc_groups(np); + niu_init_drr_weight(np); + + err = niu_init_hostinfo(np); + if (err) + return err; + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + err = niu_init_one_rx_channel(np, rp); + if (err) + return err; + } + + return 0; +} + +static int niu_set_ip_frag_rule(struct niu *np) +{ + struct niu_parent *parent = np->parent; + struct niu_classifier *cp = &np->clas; + struct niu_tcam_entry *tp; + int index, err; + + index = cp->tcam_top; + tp = &parent->tcam[index]; + + /* Note that the noport bit is the same in both ipv4 and + * ipv6 format TCAM entries. + */ + memset(tp, 0, sizeof(*tp)); + tp->key[1] = TCAM_V4KEY1_NOPORT; + tp->key_mask[1] = TCAM_V4KEY1_NOPORT; + tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | + ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT)); + err = tcam_write(np, index, tp->key, tp->key_mask); + if (err) + return err; + err = tcam_assoc_write(np, index, tp->assoc_data); + if (err) + return err; + tp->valid = 1; + cp->tcam_valid_entries++; + + return 0; +} + +static int niu_init_classifier_hw(struct niu *np) +{ + struct niu_parent *parent = np->parent; + struct niu_classifier *cp = &np->clas; + int i, err; + + nw64(H1POLY, cp->h1_init); + nw64(H2POLY, cp->h2_init); + + err = niu_init_hostinfo(np); + if (err) + return err; + + for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) { + struct niu_vlan_rdc *vp = &cp->vlan_mappings[i]; + + vlan_tbl_write(np, i, np->port, + vp->vlan_pref, vp->rdc_num); + } + + for (i = 0; i < cp->num_alt_mac_mappings; i++) { + struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i]; + + err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num, + ap->rdc_num, ap->mac_pref); + if (err) + return err; + } + + for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { + int index = i - CLASS_CODE_USER_PROG1; + + err = niu_set_tcam_key(np, i, parent->tcam_key[index]); + if (err) + return err; + err = niu_set_flow_key(np, i, parent->flow_key[index]); + if (err) + return err; + } + + err = niu_set_ip_frag_rule(np); + if (err) + return err; + + tcam_enable(np, 1); + + return 0; +} + +static int niu_zcp_write(struct niu *np, int index, u64 *data) +{ + nw64(ZCP_RAM_DATA0, data[0]); + nw64(ZCP_RAM_DATA1, data[1]); + nw64(ZCP_RAM_DATA2, data[2]); + nw64(ZCP_RAM_DATA3, data[3]); + nw64(ZCP_RAM_DATA4, data[4]); + nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL); + nw64(ZCP_RAM_ACC, + (ZCP_RAM_ACC_WRITE | + (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | + (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); + + return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, + 1000, 100); +} + +static int niu_zcp_read(struct niu *np, int index, u64 *data) +{ + int err; + + err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, + 1000, 100); + if (err) { + netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n", + (unsigned long long)nr64(ZCP_RAM_ACC)); + return err; + } + + nw64(ZCP_RAM_ACC, + (ZCP_RAM_ACC_READ | + (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | + (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); + + err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, + 1000, 100); + if (err) { + netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n", + (unsigned long long)nr64(ZCP_RAM_ACC)); + return err; + } + + data[0] = nr64(ZCP_RAM_DATA0); + data[1] = nr64(ZCP_RAM_DATA1); + data[2] = nr64(ZCP_RAM_DATA2); + data[3] = nr64(ZCP_RAM_DATA3); + data[4] = nr64(ZCP_RAM_DATA4); + + return 0; +} + +static void niu_zcp_cfifo_reset(struct niu *np) +{ + u64 val = nr64(RESET_CFIFO); + + val |= RESET_CFIFO_RST(np->port); + nw64(RESET_CFIFO, val); + udelay(10); + + val &= ~RESET_CFIFO_RST(np->port); + nw64(RESET_CFIFO, val); +} + +static int niu_init_zcp(struct niu *np) +{ + u64 data[5], rbuf[5]; + int i, max, err; + + if (np->parent->plat_type != PLAT_TYPE_NIU) { + if (np->port == 0 || np->port == 1) + max = ATLAS_P0_P1_CFIFO_ENTRIES; + else + max = ATLAS_P2_P3_CFIFO_ENTRIES; + } else + max = NIU_CFIFO_ENTRIES; + + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + data[4] = 0; + + for (i = 0; i < max; i++) { + err = niu_zcp_write(np, i, data); + if (err) + return err; + err = niu_zcp_read(np, i, rbuf); + if (err) + return err; + } + + niu_zcp_cfifo_reset(np); + nw64(CFIFO_ECC(np->port), 0); + nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL); + (void) nr64(ZCP_INT_STAT); + nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL); + + return 0; +} + +static void niu_ipp_write(struct niu *np, int index, u64 *data) +{ + u64 val = nr64_ipp(IPP_CFIG); + + nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W); + nw64_ipp(IPP_DFIFO_WR_PTR, index); + nw64_ipp(IPP_DFIFO_WR0, data[0]); + nw64_ipp(IPP_DFIFO_WR1, data[1]); + nw64_ipp(IPP_DFIFO_WR2, data[2]); + nw64_ipp(IPP_DFIFO_WR3, data[3]); + nw64_ipp(IPP_DFIFO_WR4, data[4]); + nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W); +} + +static void niu_ipp_read(struct niu *np, int index, u64 *data) +{ + nw64_ipp(IPP_DFIFO_RD_PTR, index); + data[0] = nr64_ipp(IPP_DFIFO_RD0); + data[1] = nr64_ipp(IPP_DFIFO_RD1); + data[2] = nr64_ipp(IPP_DFIFO_RD2); + data[3] = nr64_ipp(IPP_DFIFO_RD3); + data[4] = nr64_ipp(IPP_DFIFO_RD4); +} + +static int niu_ipp_reset(struct niu *np) +{ + return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST, + 1000, 100, "IPP_CFIG"); +} + +static int niu_init_ipp(struct niu *np) +{ + u64 data[5], rbuf[5], val; + int i, max, err; + + if (np->parent->plat_type != PLAT_TYPE_NIU) { + if (np->port == 0 || np->port == 1) + max = ATLAS_P0_P1_DFIFO_ENTRIES; + else + max = ATLAS_P2_P3_DFIFO_ENTRIES; + } else + max = NIU_DFIFO_ENTRIES; + + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + data[4] = 0; + + for (i = 0; i < max; i++) { + niu_ipp_write(np, i, data); + niu_ipp_read(np, i, rbuf); + } + + (void) nr64_ipp(IPP_INT_STAT); + (void) nr64_ipp(IPP_INT_STAT); + + err = niu_ipp_reset(np); + if (err) + return err; + + (void) nr64_ipp(IPP_PKT_DIS); + (void) nr64_ipp(IPP_BAD_CS_CNT); + (void) nr64_ipp(IPP_ECC); + + (void) nr64_ipp(IPP_INT_STAT); + + nw64_ipp(IPP_MSK, ~IPP_MSK_ALL); + + val = nr64_ipp(IPP_CFIG); + val &= ~IPP_CFIG_IP_MAX_PKT; + val |= (IPP_CFIG_IPP_ENABLE | + IPP_CFIG_DFIFO_ECC_EN | + IPP_CFIG_DROP_BAD_CRC | + IPP_CFIG_CKSUM_EN | + (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT)); + nw64_ipp(IPP_CFIG, val); + + return 0; +} + +static void niu_handle_led(struct niu *np, int status) +{ + u64 val; + val = nr64_mac(XMAC_CONFIG); + + if ((np->flags & NIU_FLAGS_10G) != 0 && + (np->flags & NIU_FLAGS_FIBER) != 0) { + if (status) { + val |= XMAC_CONFIG_LED_POLARITY; + val &= ~XMAC_CONFIG_FORCE_LED_ON; + } else { + val |= XMAC_CONFIG_FORCE_LED_ON; + val &= ~XMAC_CONFIG_LED_POLARITY; + } + } + + nw64_mac(XMAC_CONFIG, val); +} + +static void niu_init_xif_xmac(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u64 val; + + if (np->flags & NIU_FLAGS_XCVR_SERDES) { + val = nr64(MIF_CONFIG); + val |= MIF_CONFIG_ATCA_GE; + nw64(MIF_CONFIG, val); + } + + val = nr64_mac(XMAC_CONFIG); + val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; + + val |= XMAC_CONFIG_TX_OUTPUT_EN; + + if (lp->loopback_mode == LOOPBACK_MAC) { + val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; + val |= XMAC_CONFIG_LOOPBACK; + } else { + val &= ~XMAC_CONFIG_LOOPBACK; + } + + if (np->flags & NIU_FLAGS_10G) { + val &= ~XMAC_CONFIG_LFS_DISABLE; + } else { + val |= XMAC_CONFIG_LFS_DISABLE; + if (!(np->flags & NIU_FLAGS_FIBER) && + !(np->flags & NIU_FLAGS_XCVR_SERDES)) + val |= XMAC_CONFIG_1G_PCS_BYPASS; + else + val &= ~XMAC_CONFIG_1G_PCS_BYPASS; + } + + val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; + + if (lp->active_speed == SPEED_100) + val |= XMAC_CONFIG_SEL_CLK_25MHZ; + else + val &= ~XMAC_CONFIG_SEL_CLK_25MHZ; + + nw64_mac(XMAC_CONFIG, val); + + val = nr64_mac(XMAC_CONFIG); + val &= ~XMAC_CONFIG_MODE_MASK; + if (np->flags & NIU_FLAGS_10G) { + val |= XMAC_CONFIG_MODE_XGMII; + } else { + if (lp->active_speed == SPEED_1000) + val |= XMAC_CONFIG_MODE_GMII; + else + val |= XMAC_CONFIG_MODE_MII; + } + + nw64_mac(XMAC_CONFIG, val); +} + +static void niu_init_xif_bmac(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u64 val; + + val = BMAC_XIF_CONFIG_TX_OUTPUT_EN; + + if (lp->loopback_mode == LOOPBACK_MAC) + val |= BMAC_XIF_CONFIG_MII_LOOPBACK; + else + val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK; + + if (lp->active_speed == SPEED_1000) + val |= BMAC_XIF_CONFIG_GMII_MODE; + else + val &= ~BMAC_XIF_CONFIG_GMII_MODE; + + val &= ~(BMAC_XIF_CONFIG_LINK_LED | + BMAC_XIF_CONFIG_LED_POLARITY); + + if (!(np->flags & NIU_FLAGS_10G) && + !(np->flags & NIU_FLAGS_FIBER) && + lp->active_speed == SPEED_100) + val |= BMAC_XIF_CONFIG_25MHZ_CLOCK; + else + val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK; + + nw64_mac(BMAC_XIF_CONFIG, val); +} + +static void niu_init_xif(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + niu_init_xif_xmac(np); + else + niu_init_xif_bmac(np); +} + +static void niu_pcs_mii_reset(struct niu *np) +{ + int limit = 1000; + u64 val = nr64_pcs(PCS_MII_CTL); + val |= PCS_MII_CTL_RST; + nw64_pcs(PCS_MII_CTL, val); + while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) { + udelay(100); + val = nr64_pcs(PCS_MII_CTL); + } +} + +static void niu_xpcs_reset(struct niu *np) +{ + int limit = 1000; + u64 val = nr64_xpcs(XPCS_CONTROL1); + val |= XPCS_CONTROL1_RESET; + nw64_xpcs(XPCS_CONTROL1, val); + while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) { + udelay(100); + val = nr64_xpcs(XPCS_CONTROL1); + } +} + +static int niu_init_pcs(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u64 val; + + switch (np->flags & (NIU_FLAGS_10G | + NIU_FLAGS_FIBER | + NIU_FLAGS_XCVR_SERDES)) { + case NIU_FLAGS_FIBER: + /* 1G fiber */ + nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); + nw64_pcs(PCS_DPATH_MODE, 0); + niu_pcs_mii_reset(np); + break; + + case NIU_FLAGS_10G: + case NIU_FLAGS_10G | NIU_FLAGS_FIBER: + case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: + /* 10G SERDES */ + if (!(np->flags & NIU_FLAGS_XMAC)) + return -EINVAL; + + /* 10G copper or fiber */ + val = nr64_mac(XMAC_CONFIG); + val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; + nw64_mac(XMAC_CONFIG, val); + + niu_xpcs_reset(np); + + val = nr64_xpcs(XPCS_CONTROL1); + if (lp->loopback_mode == LOOPBACK_PHY) + val |= XPCS_CONTROL1_LOOPBACK; + else + val &= ~XPCS_CONTROL1_LOOPBACK; + nw64_xpcs(XPCS_CONTROL1, val); + + nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0); + (void) nr64_xpcs(XPCS_SYMERR_CNT01); + (void) nr64_xpcs(XPCS_SYMERR_CNT23); + break; + + + case NIU_FLAGS_XCVR_SERDES: + /* 1G SERDES */ + niu_pcs_mii_reset(np); + nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); + nw64_pcs(PCS_DPATH_MODE, 0); + break; + + case 0: + /* 1G copper */ + case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: + /* 1G RGMII FIBER */ + nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII); + niu_pcs_mii_reset(np); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int niu_reset_tx_xmac(struct niu *np) +{ + return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST, + (XTXMAC_SW_RST_REG_RS | + XTXMAC_SW_RST_SOFT_RST), + 1000, 100, "XTXMAC_SW_RST"); +} + +static int niu_reset_tx_bmac(struct niu *np) +{ + int limit; + + nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET); + limit = 1000; + while (--limit >= 0) { + if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET)) + break; + udelay(100); + } + if (limit < 0) { + dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n", + np->port, + (unsigned long long) nr64_mac(BTXMAC_SW_RST)); + return -ENODEV; + } + + return 0; +} + +static int niu_reset_tx_mac(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + return niu_reset_tx_xmac(np); + else + return niu_reset_tx_bmac(np); +} + +static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max) +{ + u64 val; + + val = nr64_mac(XMAC_MIN); + val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE | + XMAC_MIN_RX_MIN_PKT_SIZE); + val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT); + val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT); + nw64_mac(XMAC_MIN, val); + + nw64_mac(XMAC_MAX, max); + + nw64_mac(XTXMAC_STAT_MSK, ~(u64)0); + + val = nr64_mac(XMAC_IPG); + if (np->flags & NIU_FLAGS_10G) { + val &= ~XMAC_IPG_IPG_XGMII; + val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT); + } else { + val &= ~XMAC_IPG_IPG_MII_GMII; + val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT); + } + nw64_mac(XMAC_IPG, val); + + val = nr64_mac(XMAC_CONFIG); + val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC | + XMAC_CONFIG_STRETCH_MODE | + XMAC_CONFIG_VAR_MIN_IPG_EN | + XMAC_CONFIG_TX_ENABLE); + nw64_mac(XMAC_CONFIG, val); + + nw64_mac(TXMAC_FRM_CNT, 0); + nw64_mac(TXMAC_BYTE_CNT, 0); +} + +static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max) +{ + u64 val; + + nw64_mac(BMAC_MIN_FRAME, min); + nw64_mac(BMAC_MAX_FRAME, max); + + nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0); + nw64_mac(BMAC_CTRL_TYPE, 0x8808); + nw64_mac(BMAC_PREAMBLE_SIZE, 7); + + val = nr64_mac(BTXMAC_CONFIG); + val &= ~(BTXMAC_CONFIG_FCS_DISABLE | + BTXMAC_CONFIG_ENABLE); + nw64_mac(BTXMAC_CONFIG, val); +} + +static void niu_init_tx_mac(struct niu *np) +{ + u64 min, max; + + min = 64; + if (np->dev->mtu > ETH_DATA_LEN) + max = 9216; + else + max = 1522; + + /* The XMAC_MIN register only accepts values for TX min which + * have the low 3 bits cleared. + */ + BUG_ON(min & 0x7); + + if (np->flags & NIU_FLAGS_XMAC) + niu_init_tx_xmac(np, min, max); + else + niu_init_tx_bmac(np, min, max); +} + +static int niu_reset_rx_xmac(struct niu *np) +{ + int limit; + + nw64_mac(XRXMAC_SW_RST, + XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST); + limit = 1000; + while (--limit >= 0) { + if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS | + XRXMAC_SW_RST_SOFT_RST))) + break; + udelay(100); + } + if (limit < 0) { + dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n", + np->port, + (unsigned long long) nr64_mac(XRXMAC_SW_RST)); + return -ENODEV; + } + + return 0; +} + +static int niu_reset_rx_bmac(struct niu *np) +{ + int limit; + + nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET); + limit = 1000; + while (--limit >= 0) { + if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET)) + break; + udelay(100); + } + if (limit < 0) { + dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n", + np->port, + (unsigned long long) nr64_mac(BRXMAC_SW_RST)); + return -ENODEV; + } + + return 0; +} + +static int niu_reset_rx_mac(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + return niu_reset_rx_xmac(np); + else + return niu_reset_rx_bmac(np); +} + +static void niu_init_rx_xmac(struct niu *np) +{ + struct niu_parent *parent = np->parent; + struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; + int first_rdc_table = tp->first_table_num; + unsigned long i; + u64 val; + + nw64_mac(XMAC_ADD_FILT0, 0); + nw64_mac(XMAC_ADD_FILT1, 0); + nw64_mac(XMAC_ADD_FILT2, 0); + nw64_mac(XMAC_ADD_FILT12_MASK, 0); + nw64_mac(XMAC_ADD_FILT00_MASK, 0); + for (i = 0; i < MAC_NUM_HASH; i++) + nw64_mac(XMAC_HASH_TBL(i), 0); + nw64_mac(XRXMAC_STAT_MSK, ~(u64)0); + niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); + niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); + + val = nr64_mac(XMAC_CONFIG); + val &= ~(XMAC_CONFIG_RX_MAC_ENABLE | + XMAC_CONFIG_PROMISCUOUS | + XMAC_CONFIG_PROMISC_GROUP | + XMAC_CONFIG_ERR_CHK_DIS | + XMAC_CONFIG_RX_CRC_CHK_DIS | + XMAC_CONFIG_RESERVED_MULTICAST | + XMAC_CONFIG_RX_CODEV_CHK_DIS | + XMAC_CONFIG_ADDR_FILTER_EN | + XMAC_CONFIG_RCV_PAUSE_ENABLE | + XMAC_CONFIG_STRIP_CRC | + XMAC_CONFIG_PASS_FLOW_CTRL | + XMAC_CONFIG_MAC2IPP_PKT_CNT_EN); + val |= (XMAC_CONFIG_HASH_FILTER_EN); + nw64_mac(XMAC_CONFIG, val); + + nw64_mac(RXMAC_BT_CNT, 0); + nw64_mac(RXMAC_BC_FRM_CNT, 0); + nw64_mac(RXMAC_MC_FRM_CNT, 0); + nw64_mac(RXMAC_FRAG_CNT, 0); + nw64_mac(RXMAC_HIST_CNT1, 0); + nw64_mac(RXMAC_HIST_CNT2, 0); + nw64_mac(RXMAC_HIST_CNT3, 0); + nw64_mac(RXMAC_HIST_CNT4, 0); + nw64_mac(RXMAC_HIST_CNT5, 0); + nw64_mac(RXMAC_HIST_CNT6, 0); + nw64_mac(RXMAC_HIST_CNT7, 0); + nw64_mac(RXMAC_MPSZER_CNT, 0); + nw64_mac(RXMAC_CRC_ER_CNT, 0); + nw64_mac(RXMAC_CD_VIO_CNT, 0); + nw64_mac(LINK_FAULT_CNT, 0); +} + +static void niu_init_rx_bmac(struct niu *np) +{ + struct niu_parent *parent = np->parent; + struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; + int first_rdc_table = tp->first_table_num; + unsigned long i; + u64 val; + + nw64_mac(BMAC_ADD_FILT0, 0); + nw64_mac(BMAC_ADD_FILT1, 0); + nw64_mac(BMAC_ADD_FILT2, 0); + nw64_mac(BMAC_ADD_FILT12_MASK, 0); + nw64_mac(BMAC_ADD_FILT00_MASK, 0); + for (i = 0; i < MAC_NUM_HASH; i++) + nw64_mac(BMAC_HASH_TBL(i), 0); + niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); + niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); + nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0); + + val = nr64_mac(BRXMAC_CONFIG); + val &= ~(BRXMAC_CONFIG_ENABLE | + BRXMAC_CONFIG_STRIP_PAD | + BRXMAC_CONFIG_STRIP_FCS | + BRXMAC_CONFIG_PROMISC | + BRXMAC_CONFIG_PROMISC_GRP | + BRXMAC_CONFIG_ADDR_FILT_EN | + BRXMAC_CONFIG_DISCARD_DIS); + val |= (BRXMAC_CONFIG_HASH_FILT_EN); + nw64_mac(BRXMAC_CONFIG, val); + + val = nr64_mac(BMAC_ADDR_CMPEN); + val |= BMAC_ADDR_CMPEN_EN0; + nw64_mac(BMAC_ADDR_CMPEN, val); +} + +static void niu_init_rx_mac(struct niu *np) +{ + niu_set_primary_mac(np, np->dev->dev_addr); + + if (np->flags & NIU_FLAGS_XMAC) + niu_init_rx_xmac(np); + else + niu_init_rx_bmac(np); +} + +static void niu_enable_tx_xmac(struct niu *np, int on) +{ + u64 val = nr64_mac(XMAC_CONFIG); + + if (on) + val |= XMAC_CONFIG_TX_ENABLE; + else + val &= ~XMAC_CONFIG_TX_ENABLE; + nw64_mac(XMAC_CONFIG, val); +} + +static void niu_enable_tx_bmac(struct niu *np, int on) +{ + u64 val = nr64_mac(BTXMAC_CONFIG); + + if (on) + val |= BTXMAC_CONFIG_ENABLE; + else + val &= ~BTXMAC_CONFIG_ENABLE; + nw64_mac(BTXMAC_CONFIG, val); +} + +static void niu_enable_tx_mac(struct niu *np, int on) +{ + if (np->flags & NIU_FLAGS_XMAC) + niu_enable_tx_xmac(np, on); + else + niu_enable_tx_bmac(np, on); +} + +static void niu_enable_rx_xmac(struct niu *np, int on) +{ + u64 val = nr64_mac(XMAC_CONFIG); + + val &= ~(XMAC_CONFIG_HASH_FILTER_EN | + XMAC_CONFIG_PROMISCUOUS); + + if (np->flags & NIU_FLAGS_MCAST) + val |= XMAC_CONFIG_HASH_FILTER_EN; + if (np->flags & NIU_FLAGS_PROMISC) + val |= XMAC_CONFIG_PROMISCUOUS; + + if (on) + val |= XMAC_CONFIG_RX_MAC_ENABLE; + else + val &= ~XMAC_CONFIG_RX_MAC_ENABLE; + nw64_mac(XMAC_CONFIG, val); +} + +static void niu_enable_rx_bmac(struct niu *np, int on) +{ + u64 val = nr64_mac(BRXMAC_CONFIG); + + val &= ~(BRXMAC_CONFIG_HASH_FILT_EN | + BRXMAC_CONFIG_PROMISC); + + if (np->flags & NIU_FLAGS_MCAST) + val |= BRXMAC_CONFIG_HASH_FILT_EN; + if (np->flags & NIU_FLAGS_PROMISC) + val |= BRXMAC_CONFIG_PROMISC; + + if (on) + val |= BRXMAC_CONFIG_ENABLE; + else + val &= ~BRXMAC_CONFIG_ENABLE; + nw64_mac(BRXMAC_CONFIG, val); +} + +static void niu_enable_rx_mac(struct niu *np, int on) +{ + if (np->flags & NIU_FLAGS_XMAC) + niu_enable_rx_xmac(np, on); + else + niu_enable_rx_bmac(np, on); +} + +static int niu_init_mac(struct niu *np) +{ + int err; + + niu_init_xif(np); + err = niu_init_pcs(np); + if (err) + return err; + + err = niu_reset_tx_mac(np); + if (err) + return err; + niu_init_tx_mac(np); + err = niu_reset_rx_mac(np); + if (err) + return err; + niu_init_rx_mac(np); + + /* This looks hookey but the RX MAC reset we just did will + * undo some of the state we setup in niu_init_tx_mac() so we + * have to call it again. In particular, the RX MAC reset will + * set the XMAC_MAX register back to it's default value. + */ + niu_init_tx_mac(np); + niu_enable_tx_mac(np, 1); + + niu_enable_rx_mac(np, 1); + + return 0; +} + +static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp) +{ + (void) niu_tx_channel_stop(np, rp->tx_channel); +} + +static void niu_stop_tx_channels(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + niu_stop_one_tx_channel(np, rp); + } +} + +static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp) +{ + (void) niu_tx_channel_reset(np, rp->tx_channel); +} + +static void niu_reset_tx_channels(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + niu_reset_one_tx_channel(np, rp); + } +} + +static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp) +{ + (void) niu_enable_rx_channel(np, rp->rx_channel, 0); +} + +static void niu_stop_rx_channels(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + niu_stop_one_rx_channel(np, rp); + } +} + +static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp) +{ + int channel = rp->rx_channel; + + (void) niu_rx_channel_reset(np, channel); + nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL); + nw64(RX_DMA_CTL_STAT(channel), 0); + (void) niu_enable_rx_channel(np, channel, 0); +} + +static void niu_reset_rx_channels(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + niu_reset_one_rx_channel(np, rp); + } +} + +static void niu_disable_ipp(struct niu *np) +{ + u64 rd, wr, val; + int limit; + + rd = nr64_ipp(IPP_DFIFO_RD_PTR); + wr = nr64_ipp(IPP_DFIFO_WR_PTR); + limit = 100; + while (--limit >= 0 && (rd != wr)) { + rd = nr64_ipp(IPP_DFIFO_RD_PTR); + wr = nr64_ipp(IPP_DFIFO_WR_PTR); + } + if (limit < 0 && + (rd != 0 && wr != 1)) { + netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n", + (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR), + (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR)); + } + + val = nr64_ipp(IPP_CFIG); + val &= ~(IPP_CFIG_IPP_ENABLE | + IPP_CFIG_DFIFO_ECC_EN | + IPP_CFIG_DROP_BAD_CRC | + IPP_CFIG_CKSUM_EN); + nw64_ipp(IPP_CFIG, val); + + (void) niu_ipp_reset(np); +} + +static int niu_init_hw(struct niu *np) +{ + int i, err; + + netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n"); + niu_txc_enable_port(np, 1); + niu_txc_port_dma_enable(np, 1); + niu_txc_set_imask(np, 0); + + netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n"); + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + err = niu_init_one_tx_channel(np, rp); + if (err) + return err; + } + + netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n"); + err = niu_init_rx_channels(np); + if (err) + goto out_uninit_tx_channels; + + netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n"); + err = niu_init_classifier_hw(np); + if (err) + goto out_uninit_rx_channels; + + netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n"); + err = niu_init_zcp(np); + if (err) + goto out_uninit_rx_channels; + + netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n"); + err = niu_init_ipp(np); + if (err) + goto out_uninit_rx_channels; + + netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n"); + err = niu_init_mac(np); + if (err) + goto out_uninit_ipp; + + return 0; + +out_uninit_ipp: + netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n"); + niu_disable_ipp(np); + +out_uninit_rx_channels: + netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n"); + niu_stop_rx_channels(np); + niu_reset_rx_channels(np); + +out_uninit_tx_channels: + netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n"); + niu_stop_tx_channels(np); + niu_reset_tx_channels(np); + + return err; +} + +static void niu_stop_hw(struct niu *np) +{ + netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n"); + niu_enable_interrupts(np, 0); + + netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n"); + niu_enable_rx_mac(np, 0); + + netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n"); + niu_disable_ipp(np); + + netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n"); + niu_stop_tx_channels(np); + + netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n"); + niu_stop_rx_channels(np); + + netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n"); + niu_reset_tx_channels(np); + + netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n"); + niu_reset_rx_channels(np); +} + +static void niu_set_irq_name(struct niu *np) +{ + int port = np->port; + int i, j = 1; + + sprintf(np->irq_name[0], "%s:MAC", np->dev->name); + + if (port == 0) { + sprintf(np->irq_name[1], "%s:MIF", np->dev->name); + sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name); + j = 3; + } + + for (i = 0; i < np->num_ldg - j; i++) { + if (i < np->num_rx_rings) + sprintf(np->irq_name[i+j], "%s-rx-%d", + np->dev->name, i); + else if (i < np->num_tx_rings + np->num_rx_rings) + sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name, + i - np->num_rx_rings); + } +} + +static int niu_request_irq(struct niu *np) +{ + int i, j, err; + + niu_set_irq_name(np); + + err = 0; + for (i = 0; i < np->num_ldg; i++) { + struct niu_ldg *lp = &np->ldg[i]; + + err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED, + np->irq_name[i], lp); + if (err) + goto out_free_irqs; + + } + + return 0; + +out_free_irqs: + for (j = 0; j < i; j++) { + struct niu_ldg *lp = &np->ldg[j]; + + free_irq(lp->irq, lp); + } + return err; +} + +static void niu_free_irq(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_ldg; i++) { + struct niu_ldg *lp = &np->ldg[i]; + + free_irq(lp->irq, lp); + } +} + +static void niu_enable_napi(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_ldg; i++) + napi_enable(&np->ldg[i].napi); +} + +static void niu_disable_napi(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_ldg; i++) + napi_disable(&np->ldg[i].napi); +} + +static int niu_open(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + int err; + + netif_carrier_off(dev); + + err = niu_alloc_channels(np); + if (err) + goto out_err; + + err = niu_enable_interrupts(np, 0); + if (err) + goto out_free_channels; + + err = niu_request_irq(np); + if (err) + goto out_free_channels; + + niu_enable_napi(np); + + spin_lock_irq(&np->lock); + + err = niu_init_hw(np); + if (!err) { + init_timer(&np->timer); + np->timer.expires = jiffies + HZ; + np->timer.data = (unsigned long) np; + np->timer.function = niu_timer; + + err = niu_enable_interrupts(np, 1); + if (err) + niu_stop_hw(np); + } + + spin_unlock_irq(&np->lock); + + if (err) { + niu_disable_napi(np); + goto out_free_irq; + } + + netif_tx_start_all_queues(dev); + + if (np->link_config.loopback_mode != LOOPBACK_DISABLED) + netif_carrier_on(dev); + + add_timer(&np->timer); + + return 0; + +out_free_irq: + niu_free_irq(np); + +out_free_channels: + niu_free_channels(np); + +out_err: + return err; +} + +static void niu_full_shutdown(struct niu *np, struct net_device *dev) +{ + cancel_work_sync(&np->reset_task); + + niu_disable_napi(np); + netif_tx_stop_all_queues(dev); + + del_timer_sync(&np->timer); + + spin_lock_irq(&np->lock); + + niu_stop_hw(np); + + spin_unlock_irq(&np->lock); +} + +static int niu_close(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + + niu_full_shutdown(np, dev); + + niu_free_irq(np); + + niu_free_channels(np); + + niu_handle_led(np, 0); + + return 0; +} + +static void niu_sync_xmac_stats(struct niu *np) +{ + struct niu_xmac_stats *mp = &np->mac_stats.xmac; + + mp->tx_frames += nr64_mac(TXMAC_FRM_CNT); + mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT); + + mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT); + mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT); + mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT); + mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT); + mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT); + mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1); + mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2); + mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3); + mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4); + mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5); + mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6); + mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7); + mp->rx_octets += nr64_mac(RXMAC_BT_CNT); + mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT); + mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT); + mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT); +} + +static void niu_sync_bmac_stats(struct niu *np) +{ + struct niu_bmac_stats *mp = &np->mac_stats.bmac; + + mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT); + mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT); + + mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT); + mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); + mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); + mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT); +} + +static void niu_sync_mac_stats(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + niu_sync_xmac_stats(np); + else + niu_sync_bmac_stats(np); +} + +static void niu_get_rx_stats(struct niu *np, + struct rtnl_link_stats64 *stats) +{ + u64 pkts, dropped, errors, bytes; + struct rx_ring_info *rx_rings; + int i; + + pkts = dropped = errors = bytes = 0; + + rx_rings = ACCESS_ONCE(np->rx_rings); + if (!rx_rings) + goto no_rings; + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &rx_rings[i]; + + niu_sync_rx_discard_stats(np, rp, 0); + + pkts += rp->rx_packets; + bytes += rp->rx_bytes; + dropped += rp->rx_dropped; + errors += rp->rx_errors; + } + +no_rings: + stats->rx_packets = pkts; + stats->rx_bytes = bytes; + stats->rx_dropped = dropped; + stats->rx_errors = errors; +} + +static void niu_get_tx_stats(struct niu *np, + struct rtnl_link_stats64 *stats) +{ + u64 pkts, errors, bytes; + struct tx_ring_info *tx_rings; + int i; + + pkts = errors = bytes = 0; + + tx_rings = ACCESS_ONCE(np->tx_rings); + if (!tx_rings) + goto no_rings; + + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &tx_rings[i]; + + pkts += rp->tx_packets; + bytes += rp->tx_bytes; + errors += rp->tx_errors; + } + +no_rings: + stats->tx_packets = pkts; + stats->tx_bytes = bytes; + stats->tx_errors = errors; +} + +static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct niu *np = netdev_priv(dev); + + if (netif_running(dev)) { + niu_get_rx_stats(np, stats); + niu_get_tx_stats(np, stats); + } + + return stats; +} + +static void niu_load_hash_xmac(struct niu *np, u16 *hash) +{ + int i; + + for (i = 0; i < 16; i++) + nw64_mac(XMAC_HASH_TBL(i), hash[i]); +} + +static void niu_load_hash_bmac(struct niu *np, u16 *hash) +{ + int i; + + for (i = 0; i < 16; i++) + nw64_mac(BMAC_HASH_TBL(i), hash[i]); +} + +static void niu_load_hash(struct niu *np, u16 *hash) +{ + if (np->flags & NIU_FLAGS_XMAC) + niu_load_hash_xmac(np, hash); + else + niu_load_hash_bmac(np, hash); +} + +static void niu_set_rx_mode(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + int i, alt_cnt, err; + struct netdev_hw_addr *ha; + unsigned long flags; + u16 hash[16] = { 0, }; + + spin_lock_irqsave(&np->lock, flags); + niu_enable_rx_mac(np, 0); + + np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC); + if (dev->flags & IFF_PROMISC) + np->flags |= NIU_FLAGS_PROMISC; + if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev))) + np->flags |= NIU_FLAGS_MCAST; + + alt_cnt = netdev_uc_count(dev); + if (alt_cnt > niu_num_alt_addr(np)) { + alt_cnt = 0; + np->flags |= NIU_FLAGS_PROMISC; + } + + if (alt_cnt) { + int index = 0; + + netdev_for_each_uc_addr(ha, dev) { + err = niu_set_alt_mac(np, index, ha->addr); + if (err) + netdev_warn(dev, "Error %d adding alt mac %d\n", + err, index); + err = niu_enable_alt_mac(np, index, 1); + if (err) + netdev_warn(dev, "Error %d enabling alt mac %d\n", + err, index); + + index++; + } + } else { + int alt_start; + if (np->flags & NIU_FLAGS_XMAC) + alt_start = 0; + else + alt_start = 1; + for (i = alt_start; i < niu_num_alt_addr(np); i++) { + err = niu_enable_alt_mac(np, i, 0); + if (err) + netdev_warn(dev, "Error %d disabling alt mac %d\n", + err, i); + } + } + if (dev->flags & IFF_ALLMULTI) { + for (i = 0; i < 16; i++) + hash[i] = 0xffff; + } else if (!netdev_mc_empty(dev)) { + netdev_for_each_mc_addr(ha, dev) { + u32 crc = ether_crc_le(ETH_ALEN, ha->addr); + + crc >>= 24; + hash[crc >> 4] |= (1 << (15 - (crc & 0xf))); + } + } + + if (np->flags & NIU_FLAGS_MCAST) + niu_load_hash(np, hash); + + niu_enable_rx_mac(np, 1); + spin_unlock_irqrestore(&np->lock, flags); +} + +static int niu_set_mac_addr(struct net_device *dev, void *p) +{ + struct niu *np = netdev_priv(dev); + struct sockaddr *addr = p; + unsigned long flags; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + + if (!netif_running(dev)) + return 0; + + spin_lock_irqsave(&np->lock, flags); + niu_enable_rx_mac(np, 0); + niu_set_primary_mac(np, dev->dev_addr); + niu_enable_rx_mac(np, 1); + spin_unlock_irqrestore(&np->lock, flags); + + return 0; +} + +static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + return -EOPNOTSUPP; +} + +static void niu_netif_stop(struct niu *np) +{ + np->dev->trans_start = jiffies; /* prevent tx timeout */ + + niu_disable_napi(np); + + netif_tx_disable(np->dev); +} + +static void niu_netif_start(struct niu *np) +{ + /* NOTE: unconditional netif_wake_queue is only appropriate + * so long as all callers are assured to have free tx slots + * (such as after niu_init_hw). + */ + netif_tx_wake_all_queues(np->dev); + + niu_enable_napi(np); + + niu_enable_interrupts(np, 1); +} + +static void niu_reset_buffers(struct niu *np) +{ + int i, j, k, err; + + if (np->rx_rings) { + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) { + struct page *page; + + page = rp->rxhash[j]; + while (page) { + struct page *next = + (struct page *) page->mapping; + u64 base = page->index; + base = base >> RBR_DESCR_ADDR_SHIFT; + rp->rbr[k++] = cpu_to_le32(base); + page = next; + } + } + for (; k < MAX_RBR_RING_SIZE; k++) { + err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k); + if (unlikely(err)) + break; + } + + rp->rbr_index = rp->rbr_table_size - 1; + rp->rcr_index = 0; + rp->rbr_pending = 0; + rp->rbr_refill_pending = 0; + } + } + if (np->tx_rings) { + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + for (j = 0; j < MAX_TX_RING_SIZE; j++) { + if (rp->tx_buffs[j].skb) + (void) release_tx_packet(np, rp, j); + } + + rp->pending = MAX_TX_RING_SIZE; + rp->prod = 0; + rp->cons = 0; + rp->wrap_bit = 0; + } + } +} + +static void niu_reset_task(struct work_struct *work) +{ + struct niu *np = container_of(work, struct niu, reset_task); + unsigned long flags; + int err; + + spin_lock_irqsave(&np->lock, flags); + if (!netif_running(np->dev)) { + spin_unlock_irqrestore(&np->lock, flags); + return; + } + + spin_unlock_irqrestore(&np->lock, flags); + + del_timer_sync(&np->timer); + + niu_netif_stop(np); + + spin_lock_irqsave(&np->lock, flags); + + niu_stop_hw(np); + + spin_unlock_irqrestore(&np->lock, flags); + + niu_reset_buffers(np); + + spin_lock_irqsave(&np->lock, flags); + + err = niu_init_hw(np); + if (!err) { + np->timer.expires = jiffies + HZ; + add_timer(&np->timer); + niu_netif_start(np); + } + + spin_unlock_irqrestore(&np->lock, flags); +} + +static void niu_tx_timeout(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + + dev_err(np->device, "%s: Transmit timed out, resetting\n", + dev->name); + + schedule_work(&np->reset_task); +} + +static void niu_set_txd(struct tx_ring_info *rp, int index, + u64 mapping, u64 len, u64 mark, + u64 n_frags) +{ + __le64 *desc = &rp->descr[index]; + + *desc = cpu_to_le64(mark | + (n_frags << TX_DESC_NUM_PTR_SHIFT) | + (len << TX_DESC_TR_LEN_SHIFT) | + (mapping & TX_DESC_SAD)); +} + +static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, + u64 pad_bytes, u64 len) +{ + u16 eth_proto, eth_proto_inner; + u64 csum_bits, l3off, ihl, ret; + u8 ip_proto; + int ipv6; + + eth_proto = be16_to_cpu(ehdr->h_proto); + eth_proto_inner = eth_proto; + if (eth_proto == ETH_P_8021Q) { + struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr; + __be16 val = vp->h_vlan_encapsulated_proto; + + eth_proto_inner = be16_to_cpu(val); + } + + ipv6 = ihl = 0; + switch (skb->protocol) { + case cpu_to_be16(ETH_P_IP): + ip_proto = ip_hdr(skb)->protocol; + ihl = ip_hdr(skb)->ihl; + break; + case cpu_to_be16(ETH_P_IPV6): + ip_proto = ipv6_hdr(skb)->nexthdr; + ihl = (40 >> 2); + ipv6 = 1; + break; + default: + ip_proto = ihl = 0; + break; + } + + csum_bits = TXHDR_CSUM_NONE; + if (skb->ip_summed == CHECKSUM_PARTIAL) { + u64 start, stuff; + + csum_bits = (ip_proto == IPPROTO_TCP ? + TXHDR_CSUM_TCP : + (ip_proto == IPPROTO_UDP ? + TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP)); + + start = skb_checksum_start_offset(skb) - + (pad_bytes + sizeof(struct tx_pkt_hdr)); + stuff = start + skb->csum_offset; + + csum_bits |= (start / 2) << TXHDR_L4START_SHIFT; + csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT; + } + + l3off = skb_network_offset(skb) - + (pad_bytes + sizeof(struct tx_pkt_hdr)); + + ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) | + (len << TXHDR_LEN_SHIFT) | + ((l3off / 2) << TXHDR_L3START_SHIFT) | + (ihl << TXHDR_IHL_SHIFT) | + ((eth_proto_inner < ETH_P_802_3_MIN) ? TXHDR_LLC : 0) | + ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) | + (ipv6 ? TXHDR_IP_VER : 0) | + csum_bits); + + return ret; +} + +static netdev_tx_t niu_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + unsigned long align, headroom; + struct netdev_queue *txq; + struct tx_ring_info *rp; + struct tx_pkt_hdr *tp; + unsigned int len, nfg; + struct ethhdr *ehdr; + int prod, i, tlen; + u64 mapping, mrk; + + i = skb_get_queue_mapping(skb); + rp = &np->tx_rings[i]; + txq = netdev_get_tx_queue(dev, i); + + if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { + netif_tx_stop_queue(txq); + dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name); + rp->tx_errors++; + return NETDEV_TX_BUSY; + } + + if (eth_skb_pad(skb)) + goto out; + + len = sizeof(struct tx_pkt_hdr) + 15; + if (skb_headroom(skb) < len) { + struct sk_buff *skb_new; + + skb_new = skb_realloc_headroom(skb, len); + if (!skb_new) { + rp->tx_errors++; + goto out_drop; + } + kfree_skb(skb); + skb = skb_new; + } else + skb_orphan(skb); + + align = ((unsigned long) skb->data & (16 - 1)); + headroom = align + sizeof(struct tx_pkt_hdr); + + ehdr = (struct ethhdr *) skb->data; + tp = (struct tx_pkt_hdr *) skb_push(skb, headroom); + + len = skb->len - sizeof(struct tx_pkt_hdr); + tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len)); + tp->resv = 0; + + len = skb_headlen(skb); + mapping = np->ops->map_single(np->device, skb->data, + len, DMA_TO_DEVICE); + + prod = rp->prod; + + rp->tx_buffs[prod].skb = skb; + rp->tx_buffs[prod].mapping = mapping; + + mrk = TX_DESC_SOP; + if (++rp->mark_counter == rp->mark_freq) { + rp->mark_counter = 0; + mrk |= TX_DESC_MARK; + rp->mark_pending++; + } + + tlen = len; + nfg = skb_shinfo(skb)->nr_frags; + while (tlen > 0) { + tlen -= MAX_TX_DESC_LEN; + nfg++; + } + + while (len > 0) { + unsigned int this_len = len; + + if (this_len > MAX_TX_DESC_LEN) + this_len = MAX_TX_DESC_LEN; + + niu_set_txd(rp, prod, mapping, this_len, mrk, nfg); + mrk = nfg = 0; + + prod = NEXT_TX(rp, prod); + mapping += this_len; + len -= this_len; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + len = skb_frag_size(frag); + mapping = np->ops->map_page(np->device, skb_frag_page(frag), + frag->page_offset, len, + DMA_TO_DEVICE); + + rp->tx_buffs[prod].skb = NULL; + rp->tx_buffs[prod].mapping = mapping; + + niu_set_txd(rp, prod, mapping, len, 0, 0); + + prod = NEXT_TX(rp, prod); + } + + if (prod < rp->prod) + rp->wrap_bit ^= TX_RING_KICK_WRAP; + rp->prod = prod; + + nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); + + if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { + netif_tx_stop_queue(txq); + if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)) + netif_tx_wake_queue(txq); + } + +out: + return NETDEV_TX_OK; + +out_drop: + rp->tx_errors++; + kfree_skb(skb); + goto out; +} + +static int niu_change_mtu(struct net_device *dev, int new_mtu) +{ + struct niu *np = netdev_priv(dev); + int err, orig_jumbo, new_jumbo; + + if (new_mtu < 68 || new_mtu > NIU_MAX_MTU) + return -EINVAL; + + orig_jumbo = (dev->mtu > ETH_DATA_LEN); + new_jumbo = (new_mtu > ETH_DATA_LEN); + + dev->mtu = new_mtu; + + if (!netif_running(dev) || + (orig_jumbo == new_jumbo)) + return 0; + + niu_full_shutdown(np, dev); + + niu_free_channels(np); + + niu_enable_napi(np); + + err = niu_alloc_channels(np); + if (err) + return err; + + spin_lock_irq(&np->lock); + + err = niu_init_hw(np); + if (!err) { + init_timer(&np->timer); + np->timer.expires = jiffies + HZ; + np->timer.data = (unsigned long) np; + np->timer.function = niu_timer; + + err = niu_enable_interrupts(np, 1); + if (err) + niu_stop_hw(np); + } + + spin_unlock_irq(&np->lock); + + if (!err) { + netif_tx_start_all_queues(dev); + if (np->link_config.loopback_mode != LOOPBACK_DISABLED) + netif_carrier_on(dev); + + add_timer(&np->timer); + } + + return err; +} + +static void niu_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct niu *np = netdev_priv(dev); + struct niu_vpd *vpd = &np->vpd; + + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d", + vpd->fcode_major, vpd->fcode_minor); + if (np->parent->plat_type != PLAT_TYPE_NIU) + strlcpy(info->bus_info, pci_name(np->pdev), + sizeof(info->bus_info)); +} + +static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct niu *np = netdev_priv(dev); + struct niu_link_config *lp; + + lp = &np->link_config; + + memset(cmd, 0, sizeof(*cmd)); + cmd->phy_address = np->phy_addr; + cmd->supported = lp->supported; + cmd->advertising = lp->active_advertising; + cmd->autoneg = lp->active_autoneg; + ethtool_cmd_speed_set(cmd, lp->active_speed); + cmd->duplex = lp->active_duplex; + cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP; + cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ? + XCVR_EXTERNAL : XCVR_INTERNAL; + + return 0; +} + +static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct niu *np = netdev_priv(dev); + struct niu_link_config *lp = &np->link_config; + + lp->advertising = cmd->advertising; + lp->speed = ethtool_cmd_speed(cmd); + lp->duplex = cmd->duplex; + lp->autoneg = cmd->autoneg; + return niu_init_link(np); +} + +static u32 niu_get_msglevel(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + return np->msg_enable; +} + +static void niu_set_msglevel(struct net_device *dev, u32 value) +{ + struct niu *np = netdev_priv(dev); + np->msg_enable = value; +} + +static int niu_nway_reset(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + + if (np->link_config.autoneg) + return niu_init_link(np); + + return 0; +} + +static int niu_get_eeprom_len(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + + return np->eeprom_len; +} + +static int niu_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct niu *np = netdev_priv(dev); + u32 offset, len, val; + + offset = eeprom->offset; + len = eeprom->len; + + if (offset + len < offset) + return -EINVAL; + if (offset >= np->eeprom_len) + return -EINVAL; + if (offset + len > np->eeprom_len) + len = eeprom->len = np->eeprom_len - offset; + + if (offset & 3) { + u32 b_offset, b_count; + + b_offset = offset & 3; + b_count = 4 - b_offset; + if (b_count > len) + b_count = len; + + val = nr64(ESPC_NCR((offset - b_offset) / 4)); + memcpy(data, ((char *)&val) + b_offset, b_count); + data += b_count; + len -= b_count; + offset += b_count; + } + while (len >= 4) { + val = nr64(ESPC_NCR(offset / 4)); + memcpy(data, &val, 4); + data += 4; + len -= 4; + offset += 4; + } + if (len) { + val = nr64(ESPC_NCR(offset / 4)); + memcpy(data, &val, len); + } + return 0; +} + +static void niu_ethflow_to_l3proto(int flow_type, u8 *pid) +{ + switch (flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + *pid = IPPROTO_TCP; + break; + case UDP_V4_FLOW: + case UDP_V6_FLOW: + *pid = IPPROTO_UDP; + break; + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + *pid = IPPROTO_SCTP; + break; + case AH_V4_FLOW: + case AH_V6_FLOW: + *pid = IPPROTO_AH; + break; + case ESP_V4_FLOW: + case ESP_V6_FLOW: + *pid = IPPROTO_ESP; + break; + default: + *pid = 0; + break; + } +} + +static int niu_class_to_ethflow(u64 class, int *flow_type) +{ + switch (class) { + case CLASS_CODE_TCP_IPV4: + *flow_type = TCP_V4_FLOW; + break; + case CLASS_CODE_UDP_IPV4: + *flow_type = UDP_V4_FLOW; + break; + case CLASS_CODE_AH_ESP_IPV4: + *flow_type = AH_V4_FLOW; + break; + case CLASS_CODE_SCTP_IPV4: + *flow_type = SCTP_V4_FLOW; + break; + case CLASS_CODE_TCP_IPV6: + *flow_type = TCP_V6_FLOW; + break; + case CLASS_CODE_UDP_IPV6: + *flow_type = UDP_V6_FLOW; + break; + case CLASS_CODE_AH_ESP_IPV6: + *flow_type = AH_V6_FLOW; + break; + case CLASS_CODE_SCTP_IPV6: + *flow_type = SCTP_V6_FLOW; + break; + case CLASS_CODE_USER_PROG1: + case CLASS_CODE_USER_PROG2: + case CLASS_CODE_USER_PROG3: + case CLASS_CODE_USER_PROG4: + *flow_type = IP_USER_FLOW; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int niu_ethflow_to_class(int flow_type, u64 *class) +{ + switch (flow_type) { + case TCP_V4_FLOW: + *class = CLASS_CODE_TCP_IPV4; + break; + case UDP_V4_FLOW: + *class = CLASS_CODE_UDP_IPV4; + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + *class = CLASS_CODE_AH_ESP_IPV4; + break; + case SCTP_V4_FLOW: + *class = CLASS_CODE_SCTP_IPV4; + break; + case TCP_V6_FLOW: + *class = CLASS_CODE_TCP_IPV6; + break; + case UDP_V6_FLOW: + *class = CLASS_CODE_UDP_IPV6; + break; + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + *class = CLASS_CODE_AH_ESP_IPV6; + break; + case SCTP_V6_FLOW: + *class = CLASS_CODE_SCTP_IPV6; + break; + default: + return 0; + } + + return 1; +} + +static u64 niu_flowkey_to_ethflow(u64 flow_key) +{ + u64 ethflow = 0; + + if (flow_key & FLOW_KEY_L2DA) + ethflow |= RXH_L2DA; + if (flow_key & FLOW_KEY_VLAN) + ethflow |= RXH_VLAN; + if (flow_key & FLOW_KEY_IPSA) + ethflow |= RXH_IP_SRC; + if (flow_key & FLOW_KEY_IPDA) + ethflow |= RXH_IP_DST; + if (flow_key & FLOW_KEY_PROTO) + ethflow |= RXH_L3_PROTO; + if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT)) + ethflow |= RXH_L4_B_0_1; + if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT)) + ethflow |= RXH_L4_B_2_3; + + return ethflow; + +} + +static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key) +{ + u64 key = 0; + + if (ethflow & RXH_L2DA) + key |= FLOW_KEY_L2DA; + if (ethflow & RXH_VLAN) + key |= FLOW_KEY_VLAN; + if (ethflow & RXH_IP_SRC) + key |= FLOW_KEY_IPSA; + if (ethflow & RXH_IP_DST) + key |= FLOW_KEY_IPDA; + if (ethflow & RXH_L3_PROTO) + key |= FLOW_KEY_PROTO; + if (ethflow & RXH_L4_B_0_1) + key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT); + if (ethflow & RXH_L4_B_2_3) + key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT); + + *flow_key = key; + + return 1; + +} + +static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) +{ + u64 class; + + nfc->data = 0; + + if (!niu_ethflow_to_class(nfc->flow_type, &class)) + return -EINVAL; + + if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & + TCAM_KEY_DISC) + nfc->data = RXH_DISCARD; + else + nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class - + CLASS_CODE_USER_PROG1]); + return 0; +} + +static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp, + struct ethtool_rx_flow_spec *fsp) +{ + u32 tmp; + u16 prt; + + tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; + fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); + + tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; + fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); + + tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; + fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); + + tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; + fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); + + fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >> + TCAM_V4KEY2_TOS_SHIFT; + fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >> + TCAM_V4KEY2_TOS_SHIFT; + + switch (fsp->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; + fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); + + prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; + fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); + + prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; + fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); + + prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; + fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT; + fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp); + + tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT; + fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp); + break; + case IP_USER_FLOW: + tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT; + fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); + + tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> + TCAM_V4KEY2_PORT_SPI_SHIFT; + fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); + + fsp->h_u.usr_ip4_spec.proto = + (tp->key[2] & TCAM_V4KEY2_PROTO) >> + TCAM_V4KEY2_PROTO_SHIFT; + fsp->m_u.usr_ip4_spec.proto = + (tp->key_mask[2] & TCAM_V4KEY2_PROTO) >> + TCAM_V4KEY2_PROTO_SHIFT; + + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + break; + default: + break; + } +} + +static int niu_get_ethtool_tcam_entry(struct niu *np, + struct ethtool_rxnfc *nfc) +{ + struct niu_parent *parent = np->parent; + struct niu_tcam_entry *tp; + struct ethtool_rx_flow_spec *fsp = &nfc->fs; + u16 idx; + u64 class; + int ret = 0; + + idx = tcam_get_index(np, (u16)nfc->fs.location); + + tp = &parent->tcam[idx]; + if (!tp->valid) { + netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n", + parent->index, (u16)nfc->fs.location, idx); + return -EINVAL; + } + + /* fill the flow spec entry */ + class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> + TCAM_V4KEY0_CLASS_CODE_SHIFT; + ret = niu_class_to_ethflow(class, &fsp->flow_type); + if (ret < 0) { + netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", + parent->index); + goto out; + } + + if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) { + u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >> + TCAM_V4KEY2_PROTO_SHIFT; + if (proto == IPPROTO_ESP) { + if (fsp->flow_type == AH_V4_FLOW) + fsp->flow_type = ESP_V4_FLOW; + else + fsp->flow_type = ESP_V6_FLOW; + } + } + + switch (fsp->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + niu_get_ip4fs_from_tcam_key(tp, fsp); + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + /* Not yet implemented */ + ret = -EINVAL; + break; + case IP_USER_FLOW: + niu_get_ip4fs_from_tcam_key(tp, fsp); + break; + default: + ret = -EINVAL; + break; + } + + if (ret < 0) + goto out; + + if (tp->assoc_data & TCAM_ASSOCDATA_DISC) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else + fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >> + TCAM_ASSOCDATA_OFFSET_SHIFT; + + /* put the tcam size here */ + nfc->data = tcam_get_size(np); +out: + return ret; +} + +static int niu_get_ethtool_tcam_all(struct niu *np, + struct ethtool_rxnfc *nfc, + u32 *rule_locs) +{ + struct niu_parent *parent = np->parent; + struct niu_tcam_entry *tp; + int i, idx, cnt; + unsigned long flags; + int ret = 0; + + /* put the tcam size here */ + nfc->data = tcam_get_size(np); + + niu_lock_parent(np, flags); + for (cnt = 0, i = 0; i < nfc->data; i++) { + idx = tcam_get_index(np, i); + tp = &parent->tcam[idx]; + if (!tp->valid) + continue; + if (cnt == nfc->rule_cnt) { + ret = -EMSGSIZE; + break; + } + rule_locs[cnt] = i; + cnt++; + } + niu_unlock_parent(np, flags); + + nfc->rule_cnt = cnt; + + return ret; +} + +static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct niu *np = netdev_priv(dev); + int ret = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXFH: + ret = niu_get_hash_opts(np, cmd); + break; + case ETHTOOL_GRXRINGS: + cmd->data = np->num_rx_rings; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = tcam_get_valid_entry_cnt(np); + break; + case ETHTOOL_GRXCLSRULE: + ret = niu_get_ethtool_tcam_entry(np, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = niu_get_ethtool_tcam_all(np, cmd, rule_locs); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) +{ + u64 class; + u64 flow_key = 0; + unsigned long flags; + + if (!niu_ethflow_to_class(nfc->flow_type, &class)) + return -EINVAL; + + if (class < CLASS_CODE_USER_PROG1 || + class > CLASS_CODE_SCTP_IPV6) + return -EINVAL; + + if (nfc->data & RXH_DISCARD) { + niu_lock_parent(np, flags); + flow_key = np->parent->tcam_key[class - + CLASS_CODE_USER_PROG1]; + flow_key |= TCAM_KEY_DISC; + nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key); + np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key; + niu_unlock_parent(np, flags); + return 0; + } else { + /* Discard was set before, but is not set now */ + if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & + TCAM_KEY_DISC) { + niu_lock_parent(np, flags); + flow_key = np->parent->tcam_key[class - + CLASS_CODE_USER_PROG1]; + flow_key &= ~TCAM_KEY_DISC; + nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), + flow_key); + np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = + flow_key; + niu_unlock_parent(np, flags); + } + } + + if (!niu_ethflow_to_flowkey(nfc->data, &flow_key)) + return -EINVAL; + + niu_lock_parent(np, flags); + nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key); + np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key; + niu_unlock_parent(np, flags); + + return 0; +} + +static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp, + struct niu_tcam_entry *tp, + int l2_rdc_tab, u64 class) +{ + u8 pid = 0; + u32 sip, dip, sipm, dipm, spi, spim; + u16 sport, dport, spm, dpm; + + sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src); + sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src); + dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst); + dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst); + + tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT; + tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE; + tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT; + tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM; + + tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT; + tp->key[3] |= dip; + + tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT; + tp->key_mask[3] |= dipm; + + tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos << + TCAM_V4KEY2_TOS_SHIFT); + tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos << + TCAM_V4KEY2_TOS_SHIFT); + switch (fsp->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc); + spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc); + dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst); + dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst); + + tp->key[2] |= (((u64)sport << 16) | dport); + tp->key_mask[2] |= (((u64)spm << 16) | dpm); + niu_ethflow_to_l3proto(fsp->flow_type, &pid); + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi); + spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi); + + tp->key[2] |= spi; + tp->key_mask[2] |= spim; + niu_ethflow_to_l3proto(fsp->flow_type, &pid); + break; + case IP_USER_FLOW: + spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes); + spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes); + + tp->key[2] |= spi; + tp->key_mask[2] |= spim; + pid = fsp->h_u.usr_ip4_spec.proto; + break; + default: + break; + } + + tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT); + if (pid) { + tp->key_mask[2] |= TCAM_V4KEY2_PROTO; + } +} + +static int niu_add_ethtool_tcam_entry(struct niu *np, + struct ethtool_rxnfc *nfc) +{ + struct niu_parent *parent = np->parent; + struct niu_tcam_entry *tp; + struct ethtool_rx_flow_spec *fsp = &nfc->fs; + struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port]; + int l2_rdc_table = rdc_table->first_table_num; + u16 idx; + u64 class; + unsigned long flags; + int err, ret; + + ret = 0; + + idx = nfc->fs.location; + if (idx >= tcam_get_size(np)) + return -EINVAL; + + if (fsp->flow_type == IP_USER_FLOW) { + int i; + int add_usr_cls = 0; + struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec; + + if (uspec->ip_ver != ETH_RX_NFC_IP4) + return -EINVAL; + + niu_lock_parent(np, flags); + + for (i = 0; i < NIU_L3_PROG_CLS; i++) { + if (parent->l3_cls[i]) { + if (uspec->proto == parent->l3_cls_pid[i]) { + class = parent->l3_cls[i]; + parent->l3_cls_refcnt[i]++; + add_usr_cls = 1; + break; + } + } else { + /* Program new user IP class */ + switch (i) { + case 0: + class = CLASS_CODE_USER_PROG1; + break; + case 1: + class = CLASS_CODE_USER_PROG2; + break; + case 2: + class = CLASS_CODE_USER_PROG3; + break; + case 3: + class = CLASS_CODE_USER_PROG4; + break; + default: + break; + } + ret = tcam_user_ip_class_set(np, class, 0, + uspec->proto, + uspec->tos, + umask->tos); + if (ret) + goto out; + + ret = tcam_user_ip_class_enable(np, class, 1); + if (ret) + goto out; + parent->l3_cls[i] = class; + parent->l3_cls_pid[i] = uspec->proto; + parent->l3_cls_refcnt[i]++; + add_usr_cls = 1; + break; + } + } + if (!add_usr_cls) { + netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n", + parent->index, __func__, uspec->proto); + ret = -EINVAL; + goto out; + } + niu_unlock_parent(np, flags); + } else { + if (!niu_ethflow_to_class(fsp->flow_type, &class)) { + return -EINVAL; + } + } + + niu_lock_parent(np, flags); + + idx = tcam_get_index(np, idx); + tp = &parent->tcam[idx]; + + memset(tp, 0, sizeof(*tp)); + + /* fill in the tcam key and mask */ + switch (fsp->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class); + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + /* Not yet implemented */ + netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n", + parent->index, __func__, fsp->flow_type); + ret = -EINVAL; + goto out; + case IP_USER_FLOW: + niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class); + break; + default: + netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n", + parent->index, __func__, fsp->flow_type); + ret = -EINVAL; + goto out; + } + + /* fill in the assoc data */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + tp->assoc_data = TCAM_ASSOCDATA_DISC; + } else { + if (fsp->ring_cookie >= np->num_rx_rings) { + netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n", + parent->index, __func__, + (long long)fsp->ring_cookie); + ret = -EINVAL; + goto out; + } + tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | + (fsp->ring_cookie << + TCAM_ASSOCDATA_OFFSET_SHIFT)); + } + + err = tcam_write(np, idx, tp->key, tp->key_mask); + if (err) { + ret = -EINVAL; + goto out; + } + err = tcam_assoc_write(np, idx, tp->assoc_data); + if (err) { + ret = -EINVAL; + goto out; + } + + /* validate the entry */ + tp->valid = 1; + np->clas.tcam_valid_entries++; +out: + niu_unlock_parent(np, flags); + + return ret; +} + +static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc) +{ + struct niu_parent *parent = np->parent; + struct niu_tcam_entry *tp; + u16 idx; + unsigned long flags; + u64 class; + int ret = 0; + + if (loc >= tcam_get_size(np)) + return -EINVAL; + + niu_lock_parent(np, flags); + + idx = tcam_get_index(np, loc); + tp = &parent->tcam[idx]; + + /* if the entry is of a user defined class, then update*/ + class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> + TCAM_V4KEY0_CLASS_CODE_SHIFT; + + if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) { + int i; + for (i = 0; i < NIU_L3_PROG_CLS; i++) { + if (parent->l3_cls[i] == class) { + parent->l3_cls_refcnt[i]--; + if (!parent->l3_cls_refcnt[i]) { + /* disable class */ + ret = tcam_user_ip_class_enable(np, + class, + 0); + if (ret) + goto out; + parent->l3_cls[i] = 0; + parent->l3_cls_pid[i] = 0; + } + break; + } + } + if (i == NIU_L3_PROG_CLS) { + netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n", + parent->index, __func__, + (unsigned long long)class); + ret = -EINVAL; + goto out; + } + } + + ret = tcam_flush(np, idx); + if (ret) + goto out; + + /* invalidate the entry */ + tp->valid = 0; + np->clas.tcam_valid_entries--; +out: + niu_unlock_parent(np, flags); + + return ret; +} + +static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct niu *np = netdev_priv(dev); + int ret = 0; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + ret = niu_set_hash_opts(np, cmd); + break; + case ETHTOOL_SRXCLSRLINS: + ret = niu_add_ethtool_tcam_entry(np, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static const struct { + const char string[ETH_GSTRING_LEN]; +} niu_xmac_stat_keys[] = { + { "tx_frames" }, + { "tx_bytes" }, + { "tx_fifo_errors" }, + { "tx_overflow_errors" }, + { "tx_max_pkt_size_errors" }, + { "tx_underflow_errors" }, + { "rx_local_faults" }, + { "rx_remote_faults" }, + { "rx_link_faults" }, + { "rx_align_errors" }, + { "rx_frags" }, + { "rx_mcasts" }, + { "rx_bcasts" }, + { "rx_hist_cnt1" }, + { "rx_hist_cnt2" }, + { "rx_hist_cnt3" }, + { "rx_hist_cnt4" }, + { "rx_hist_cnt5" }, + { "rx_hist_cnt6" }, + { "rx_hist_cnt7" }, + { "rx_octets" }, + { "rx_code_violations" }, + { "rx_len_errors" }, + { "rx_crc_errors" }, + { "rx_underflows" }, + { "rx_overflows" }, + { "pause_off_state" }, + { "pause_on_state" }, + { "pause_received" }, +}; + +#define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys) + +static const struct { + const char string[ETH_GSTRING_LEN]; +} niu_bmac_stat_keys[] = { + { "tx_underflow_errors" }, + { "tx_max_pkt_size_errors" }, + { "tx_bytes" }, + { "tx_frames" }, + { "rx_overflows" }, + { "rx_frames" }, + { "rx_align_errors" }, + { "rx_crc_errors" }, + { "rx_len_errors" }, + { "pause_off_state" }, + { "pause_on_state" }, + { "pause_received" }, +}; + +#define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys) + +static const struct { + const char string[ETH_GSTRING_LEN]; +} niu_rxchan_stat_keys[] = { + { "rx_channel" }, + { "rx_packets" }, + { "rx_bytes" }, + { "rx_dropped" }, + { "rx_errors" }, +}; + +#define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys) + +static const struct { + const char string[ETH_GSTRING_LEN]; +} niu_txchan_stat_keys[] = { + { "tx_channel" }, + { "tx_packets" }, + { "tx_bytes" }, + { "tx_errors" }, +}; + +#define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys) + +static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + struct niu *np = netdev_priv(dev); + int i; + + if (stringset != ETH_SS_STATS) + return; + + if (np->flags & NIU_FLAGS_XMAC) { + memcpy(data, niu_xmac_stat_keys, + sizeof(niu_xmac_stat_keys)); + data += sizeof(niu_xmac_stat_keys); + } else { + memcpy(data, niu_bmac_stat_keys, + sizeof(niu_bmac_stat_keys)); + data += sizeof(niu_bmac_stat_keys); + } + for (i = 0; i < np->num_rx_rings; i++) { + memcpy(data, niu_rxchan_stat_keys, + sizeof(niu_rxchan_stat_keys)); + data += sizeof(niu_rxchan_stat_keys); + } + for (i = 0; i < np->num_tx_rings; i++) { + memcpy(data, niu_txchan_stat_keys, + sizeof(niu_txchan_stat_keys)); + data += sizeof(niu_txchan_stat_keys); + } +} + +static int niu_get_sset_count(struct net_device *dev, int stringset) +{ + struct niu *np = netdev_priv(dev); + + if (stringset != ETH_SS_STATS) + return -EINVAL; + + return (np->flags & NIU_FLAGS_XMAC ? + NUM_XMAC_STAT_KEYS : + NUM_BMAC_STAT_KEYS) + + (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) + + (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS); +} + +static void niu_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct niu *np = netdev_priv(dev); + int i; + + niu_sync_mac_stats(np); + if (np->flags & NIU_FLAGS_XMAC) { + memcpy(data, &np->mac_stats.xmac, + sizeof(struct niu_xmac_stats)); + data += (sizeof(struct niu_xmac_stats) / sizeof(u64)); + } else { + memcpy(data, &np->mac_stats.bmac, + sizeof(struct niu_bmac_stats)); + data += (sizeof(struct niu_bmac_stats) / sizeof(u64)); + } + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + niu_sync_rx_discard_stats(np, rp, 0); + + data[0] = rp->rx_channel; + data[1] = rp->rx_packets; + data[2] = rp->rx_bytes; + data[3] = rp->rx_dropped; + data[4] = rp->rx_errors; + data += 5; + } + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + data[0] = rp->tx_channel; + data[1] = rp->tx_packets; + data[2] = rp->tx_bytes; + data[3] = rp->tx_errors; + data += 4; + } +} + +static u64 niu_led_state_save(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + return nr64_mac(XMAC_CONFIG); + else + return nr64_mac(BMAC_XIF_CONFIG); +} + +static void niu_led_state_restore(struct niu *np, u64 val) +{ + if (np->flags & NIU_FLAGS_XMAC) + nw64_mac(XMAC_CONFIG, val); + else + nw64_mac(BMAC_XIF_CONFIG, val); +} + +static void niu_force_led(struct niu *np, int on) +{ + u64 val, reg, bit; + + if (np->flags & NIU_FLAGS_XMAC) { + reg = XMAC_CONFIG; + bit = XMAC_CONFIG_FORCE_LED_ON; + } else { + reg = BMAC_XIF_CONFIG; + bit = BMAC_XIF_CONFIG_LINK_LED; + } + + val = nr64_mac(reg); + if (on) + val |= bit; + else + val &= ~bit; + nw64_mac(reg, val); +} + +static int niu_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) + +{ + struct niu *np = netdev_priv(dev); + + if (!netif_running(dev)) + return -EAGAIN; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + np->orig_led_state = niu_led_state_save(np); + return 1; /* cycle on/off once per second */ + + case ETHTOOL_ID_ON: + niu_force_led(np, 1); + break; + + case ETHTOOL_ID_OFF: + niu_force_led(np, 0); + break; + + case ETHTOOL_ID_INACTIVE: + niu_led_state_restore(np, np->orig_led_state); + } + + return 0; +} + +static const struct ethtool_ops niu_ethtool_ops = { + .get_drvinfo = niu_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = niu_get_msglevel, + .set_msglevel = niu_set_msglevel, + .nway_reset = niu_nway_reset, + .get_eeprom_len = niu_get_eeprom_len, + .get_eeprom = niu_get_eeprom, + .get_settings = niu_get_settings, + .set_settings = niu_set_settings, + .get_strings = niu_get_strings, + .get_sset_count = niu_get_sset_count, + .get_ethtool_stats = niu_get_ethtool_stats, + .set_phys_id = niu_set_phys_id, + .get_rxnfc = niu_get_nfc, + .set_rxnfc = niu_set_nfc, +}; + +static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, + int ldg, int ldn) +{ + if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) + return -EINVAL; + if (ldn < 0 || ldn > LDN_MAX) + return -EINVAL; + + parent->ldg_map[ldn] = ldg; + + if (np->parent->plat_type == PLAT_TYPE_NIU) { + /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by + * the firmware, and we're not supposed to change them. + * Validate the mapping, because if it's wrong we probably + * won't get any interrupts and that's painful to debug. + */ + if (nr64(LDG_NUM(ldn)) != ldg) { + dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n", + np->port, ldn, ldg, + (unsigned long long) nr64(LDG_NUM(ldn))); + return -EINVAL; + } + } else + nw64(LDG_NUM(ldn), ldg); + + return 0; +} + +static int niu_set_ldg_timer_res(struct niu *np, int res) +{ + if (res < 0 || res > LDG_TIMER_RES_VAL) + return -EINVAL; + + + nw64(LDG_TIMER_RES, res); + + return 0; +} + +static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector) +{ + if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) || + (func < 0 || func > 3) || + (vector < 0 || vector > 0x1f)) + return -EINVAL; + + nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector); + + return 0; +} + +static int niu_pci_eeprom_read(struct niu *np, u32 addr) +{ + u64 frame, frame_base = (ESPC_PIO_STAT_READ_START | + (addr << ESPC_PIO_STAT_ADDR_SHIFT)); + int limit; + + if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT)) + return -EINVAL; + + frame = frame_base; + nw64(ESPC_PIO_STAT, frame); + limit = 64; + do { + udelay(5); + frame = nr64(ESPC_PIO_STAT); + if (frame & ESPC_PIO_STAT_READ_END) + break; + } while (limit--); + if (!(frame & ESPC_PIO_STAT_READ_END)) { + dev_err(np->device, "EEPROM read timeout frame[%llx]\n", + (unsigned long long) frame); + return -ENODEV; + } + + frame = frame_base; + nw64(ESPC_PIO_STAT, frame); + limit = 64; + do { + udelay(5); + frame = nr64(ESPC_PIO_STAT); + if (frame & ESPC_PIO_STAT_READ_END) + break; + } while (limit--); + if (!(frame & ESPC_PIO_STAT_READ_END)) { + dev_err(np->device, "EEPROM read timeout frame[%llx]\n", + (unsigned long long) frame); + return -ENODEV; + } + + frame = nr64(ESPC_PIO_STAT); + return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT; +} + +static int niu_pci_eeprom_read16(struct niu *np, u32 off) +{ + int err = niu_pci_eeprom_read(np, off); + u16 val; + + if (err < 0) + return err; + val = (err << 8); + err = niu_pci_eeprom_read(np, off + 1); + if (err < 0) + return err; + val |= (err & 0xff); + + return val; +} + +static int niu_pci_eeprom_read16_swp(struct niu *np, u32 off) +{ + int err = niu_pci_eeprom_read(np, off); + u16 val; + + if (err < 0) + return err; + + val = (err & 0xff); + err = niu_pci_eeprom_read(np, off + 1); + if (err < 0) + return err; + + val |= (err & 0xff) << 8; + + return val; +} + +static int niu_pci_vpd_get_propname(struct niu *np, u32 off, char *namebuf, + int namebuf_len) +{ + int i; + + for (i = 0; i < namebuf_len; i++) { + int err = niu_pci_eeprom_read(np, off + i); + if (err < 0) + return err; + *namebuf++ = err; + if (!err) + break; + } + if (i >= namebuf_len) + return -EINVAL; + + return i + 1; +} + +static void niu_vpd_parse_version(struct niu *np) +{ + struct niu_vpd *vpd = &np->vpd; + int len = strlen(vpd->version) + 1; + const char *s = vpd->version; + int i; + + for (i = 0; i < len - 5; i++) { + if (!strncmp(s + i, "FCode ", 6)) + break; + } + if (i >= len - 5) + return; + + s += i + 5; + sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor); + + netif_printk(np, probe, KERN_DEBUG, np->dev, + "VPD_SCAN: FCODE major(%d) minor(%d)\n", + vpd->fcode_major, vpd->fcode_minor); + if (vpd->fcode_major > NIU_VPD_MIN_MAJOR || + (vpd->fcode_major == NIU_VPD_MIN_MAJOR && + vpd->fcode_minor >= NIU_VPD_MIN_MINOR)) + np->flags |= NIU_FLAGS_VPD_VALID; +} + +/* ESPC_PIO_EN_ENABLE must be set */ +static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end) +{ + unsigned int found_mask = 0; +#define FOUND_MASK_MODEL 0x00000001 +#define FOUND_MASK_BMODEL 0x00000002 +#define FOUND_MASK_VERS 0x00000004 +#define FOUND_MASK_MAC 0x00000008 +#define FOUND_MASK_NMAC 0x00000010 +#define FOUND_MASK_PHY 0x00000020 +#define FOUND_MASK_ALL 0x0000003f + + netif_printk(np, probe, KERN_DEBUG, np->dev, + "VPD_SCAN: start[%x] end[%x]\n", start, end); + while (start < end) { + int len, err, prop_len; + char namebuf[64]; + u8 *prop_buf; + int max_len; + + if (found_mask == FOUND_MASK_ALL) { + niu_vpd_parse_version(np); + return 1; + } + + err = niu_pci_eeprom_read(np, start + 2); + if (err < 0) + return err; + len = err; + start += 3; + + prop_len = niu_pci_eeprom_read(np, start + 4); + err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64); + if (err < 0) + return err; + + prop_buf = NULL; + max_len = 0; + if (!strcmp(namebuf, "model")) { + prop_buf = np->vpd.model; + max_len = NIU_VPD_MODEL_MAX; + found_mask |= FOUND_MASK_MODEL; + } else if (!strcmp(namebuf, "board-model")) { + prop_buf = np->vpd.board_model; + max_len = NIU_VPD_BD_MODEL_MAX; + found_mask |= FOUND_MASK_BMODEL; + } else if (!strcmp(namebuf, "version")) { + prop_buf = np->vpd.version; + max_len = NIU_VPD_VERSION_MAX; + found_mask |= FOUND_MASK_VERS; + } else if (!strcmp(namebuf, "local-mac-address")) { + prop_buf = np->vpd.local_mac; + max_len = ETH_ALEN; + found_mask |= FOUND_MASK_MAC; + } else if (!strcmp(namebuf, "num-mac-addresses")) { + prop_buf = &np->vpd.mac_num; + max_len = 1; + found_mask |= FOUND_MASK_NMAC; + } else if (!strcmp(namebuf, "phy-type")) { + prop_buf = np->vpd.phy_type; + max_len = NIU_VPD_PHY_TYPE_MAX; + found_mask |= FOUND_MASK_PHY; + } + + if (max_len && prop_len > max_len) { + dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len); + return -EINVAL; + } + + if (prop_buf) { + u32 off = start + 5 + err; + int i; + + netif_printk(np, probe, KERN_DEBUG, np->dev, + "VPD_SCAN: Reading in property [%s] len[%d]\n", + namebuf, prop_len); + for (i = 0; i < prop_len; i++) + *prop_buf++ = niu_pci_eeprom_read(np, off + i); + } + + start += len; + } + + return 0; +} + +/* ESPC_PIO_EN_ENABLE must be set */ +static void niu_pci_vpd_fetch(struct niu *np, u32 start) +{ + u32 offset; + int err; + + err = niu_pci_eeprom_read16_swp(np, start + 1); + if (err < 0) + return; + + offset = err + 3; + + while (start + offset < ESPC_EEPROM_SIZE) { + u32 here = start + offset; + u32 end; + + err = niu_pci_eeprom_read(np, here); + if (err != 0x90) + return; + + err = niu_pci_eeprom_read16_swp(np, here + 1); + if (err < 0) + return; + + here = start + offset + 3; + end = start + offset + err; + + offset += err; + + err = niu_pci_vpd_scan_props(np, here, end); + if (err < 0 || err == 1) + return; + } +} + +/* ESPC_PIO_EN_ENABLE must be set */ +static u32 niu_pci_vpd_offset(struct niu *np) +{ + u32 start = 0, end = ESPC_EEPROM_SIZE, ret; + int err; + + while (start < end) { + ret = start; + + /* ROM header signature? */ + err = niu_pci_eeprom_read16(np, start + 0); + if (err != 0x55aa) + return 0; + + /* Apply offset to PCI data structure. */ + err = niu_pci_eeprom_read16(np, start + 23); + if (err < 0) + return 0; + start += err; + + /* Check for "PCIR" signature. */ + err = niu_pci_eeprom_read16(np, start + 0); + if (err != 0x5043) + return 0; + err = niu_pci_eeprom_read16(np, start + 2); + if (err != 0x4952) + return 0; + + /* Check for OBP image type. */ + err = niu_pci_eeprom_read(np, start + 20); + if (err < 0) + return 0; + if (err != 0x01) { + err = niu_pci_eeprom_read(np, ret + 2); + if (err < 0) + return 0; + + start = ret + (err * 512); + continue; + } + + err = niu_pci_eeprom_read16_swp(np, start + 8); + if (err < 0) + return err; + ret += err; + + err = niu_pci_eeprom_read(np, ret + 0); + if (err != 0x82) + return 0; + + return ret; + } + + return 0; +} + +static int niu_phy_type_prop_decode(struct niu *np, const char *phy_prop) +{ + if (!strcmp(phy_prop, "mif")) { + /* 1G copper, MII */ + np->flags &= ~(NIU_FLAGS_FIBER | + NIU_FLAGS_10G); + np->mac_xcvr = MAC_XCVR_MII; + } else if (!strcmp(phy_prop, "xgf")) { + /* 10G fiber, XPCS */ + np->flags |= (NIU_FLAGS_10G | + NIU_FLAGS_FIBER); + np->mac_xcvr = MAC_XCVR_XPCS; + } else if (!strcmp(phy_prop, "pcs")) { + /* 1G fiber, PCS */ + np->flags &= ~NIU_FLAGS_10G; + np->flags |= NIU_FLAGS_FIBER; + np->mac_xcvr = MAC_XCVR_PCS; + } else if (!strcmp(phy_prop, "xgc")) { + /* 10G copper, XPCS */ + np->flags |= NIU_FLAGS_10G; + np->flags &= ~NIU_FLAGS_FIBER; + np->mac_xcvr = MAC_XCVR_XPCS; + } else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) { + /* 10G Serdes or 1G Serdes, default to 10G */ + np->flags |= NIU_FLAGS_10G; + np->flags &= ~NIU_FLAGS_FIBER; + np->flags |= NIU_FLAGS_XCVR_SERDES; + np->mac_xcvr = MAC_XCVR_XPCS; + } else { + return -EINVAL; + } + return 0; +} + +static int niu_pci_vpd_get_nports(struct niu *np) +{ + int ports = 0; + + if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) { + ports = 4; + } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) || + (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) { + ports = 2; + } + + return ports; +} + +static void niu_pci_vpd_validate(struct niu *np) +{ + struct net_device *dev = np->dev; + struct niu_vpd *vpd = &np->vpd; + u8 val8; + + if (!is_valid_ether_addr(&vpd->local_mac[0])) { + dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n"); + + np->flags &= ~NIU_FLAGS_VPD_VALID; + return; + } + + if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || + !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { + np->flags |= NIU_FLAGS_10G; + np->flags &= ~NIU_FLAGS_FIBER; + np->flags |= NIU_FLAGS_XCVR_SERDES; + np->mac_xcvr = MAC_XCVR_PCS; + if (np->port > 1) { + np->flags |= NIU_FLAGS_FIBER; + np->flags &= ~NIU_FLAGS_10G; + } + if (np->flags & NIU_FLAGS_10G) + np->mac_xcvr = MAC_XCVR_XPCS; + } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { + np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | + NIU_FLAGS_HOTPLUG_PHY); + } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { + dev_err(np->device, "Illegal phy string [%s]\n", + np->vpd.phy_type); + dev_err(np->device, "Falling back to SPROM\n"); + np->flags &= ~NIU_FLAGS_VPD_VALID; + return; + } + + memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN); + + val8 = dev->dev_addr[5]; + dev->dev_addr[5] += np->port; + if (dev->dev_addr[5] < val8) + dev->dev_addr[4]++; +} + +static int niu_pci_probe_sprom(struct niu *np) +{ + struct net_device *dev = np->dev; + int len, i; + u64 val, sum; + u8 val8; + + val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ); + val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT; + len = val / 4; + + np->eeprom_len = len; + + netif_printk(np, probe, KERN_DEBUG, np->dev, + "SPROM: Image size %llu\n", (unsigned long long)val); + + sum = 0; + for (i = 0; i < len; i++) { + val = nr64(ESPC_NCR(i)); + sum += (val >> 0) & 0xff; + sum += (val >> 8) & 0xff; + sum += (val >> 16) & 0xff; + sum += (val >> 24) & 0xff; + } + netif_printk(np, probe, KERN_DEBUG, np->dev, + "SPROM: Checksum %x\n", (int)(sum & 0xff)); + if ((sum & 0xff) != 0xab) { + dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff)); + return -EINVAL; + } + + val = nr64(ESPC_PHY_TYPE); + switch (np->port) { + case 0: + val8 = (val & ESPC_PHY_TYPE_PORT0) >> + ESPC_PHY_TYPE_PORT0_SHIFT; + break; + case 1: + val8 = (val & ESPC_PHY_TYPE_PORT1) >> + ESPC_PHY_TYPE_PORT1_SHIFT; + break; + case 2: + val8 = (val & ESPC_PHY_TYPE_PORT2) >> + ESPC_PHY_TYPE_PORT2_SHIFT; + break; + case 3: + val8 = (val & ESPC_PHY_TYPE_PORT3) >> + ESPC_PHY_TYPE_PORT3_SHIFT; + break; + default: + dev_err(np->device, "Bogus port number %u\n", + np->port); + return -EINVAL; + } + netif_printk(np, probe, KERN_DEBUG, np->dev, + "SPROM: PHY type %x\n", val8); + + switch (val8) { + case ESPC_PHY_TYPE_1G_COPPER: + /* 1G copper, MII */ + np->flags &= ~(NIU_FLAGS_FIBER | + NIU_FLAGS_10G); + np->mac_xcvr = MAC_XCVR_MII; + break; + + case ESPC_PHY_TYPE_1G_FIBER: + /* 1G fiber, PCS */ + np->flags &= ~NIU_FLAGS_10G; + np->flags |= NIU_FLAGS_FIBER; + np->mac_xcvr = MAC_XCVR_PCS; + break; + + case ESPC_PHY_TYPE_10G_COPPER: + /* 10G copper, XPCS */ + np->flags |= NIU_FLAGS_10G; + np->flags &= ~NIU_FLAGS_FIBER; + np->mac_xcvr = MAC_XCVR_XPCS; + break; + + case ESPC_PHY_TYPE_10G_FIBER: + /* 10G fiber, XPCS */ + np->flags |= (NIU_FLAGS_10G | + NIU_FLAGS_FIBER); + np->mac_xcvr = MAC_XCVR_XPCS; + break; + + default: + dev_err(np->device, "Bogus SPROM phy type %u\n", val8); + return -EINVAL; + } + + val = nr64(ESPC_MAC_ADDR0); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val); + dev->dev_addr[0] = (val >> 0) & 0xff; + dev->dev_addr[1] = (val >> 8) & 0xff; + dev->dev_addr[2] = (val >> 16) & 0xff; + dev->dev_addr[3] = (val >> 24) & 0xff; + + val = nr64(ESPC_MAC_ADDR1); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val); + dev->dev_addr[4] = (val >> 0) & 0xff; + dev->dev_addr[5] = (val >> 8) & 0xff; + + if (!is_valid_ether_addr(&dev->dev_addr[0])) { + dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n", + dev->dev_addr); + return -EINVAL; + } + + val8 = dev->dev_addr[5]; + dev->dev_addr[5] += np->port; + if (dev->dev_addr[5] < val8) + dev->dev_addr[4]++; + + val = nr64(ESPC_MOD_STR_LEN); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val); + if (val >= 8 * 4) + return -EINVAL; + + for (i = 0; i < val; i += 4) { + u64 tmp = nr64(ESPC_NCR(5 + (i / 4))); + + np->vpd.model[i + 3] = (tmp >> 0) & 0xff; + np->vpd.model[i + 2] = (tmp >> 8) & 0xff; + np->vpd.model[i + 1] = (tmp >> 16) & 0xff; + np->vpd.model[i + 0] = (tmp >> 24) & 0xff; + } + np->vpd.model[val] = '\0'; + + val = nr64(ESPC_BD_MOD_STR_LEN); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val); + if (val >= 4 * 4) + return -EINVAL; + + for (i = 0; i < val; i += 4) { + u64 tmp = nr64(ESPC_NCR(14 + (i / 4))); + + np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff; + np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff; + np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff; + np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff; + } + np->vpd.board_model[val] = '\0'; + + np->vpd.mac_num = + nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; + netif_printk(np, probe, KERN_DEBUG, np->dev, + "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num); + + return 0; +} + +static int niu_get_and_validate_port(struct niu *np) +{ + struct niu_parent *parent = np->parent; + + if (np->port <= 1) + np->flags |= NIU_FLAGS_XMAC; + + if (!parent->num_ports) { + if (parent->plat_type == PLAT_TYPE_NIU) { + parent->num_ports = 2; + } else { + parent->num_ports = niu_pci_vpd_get_nports(np); + if (!parent->num_ports) { + /* Fall back to SPROM as last resort. + * This will fail on most cards. + */ + parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & + ESPC_NUM_PORTS_MACS_VAL; + + /* All of the current probing methods fail on + * Maramba on-board parts. + */ + if (!parent->num_ports) + parent->num_ports = 4; + } + } + } + + if (np->port >= parent->num_ports) + return -ENODEV; + + return 0; +} + +static int phy_record(struct niu_parent *parent, struct phy_probe_info *p, + int dev_id_1, int dev_id_2, u8 phy_port, int type) +{ + u32 id = (dev_id_1 << 16) | dev_id_2; + u8 idx; + + if (dev_id_1 < 0 || dev_id_2 < 0) + return 0; + if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) { + /* Because of the NIU_PHY_ID_MASK being applied, the 8704 + * test covers the 8706 as well. + */ + if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) && + ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011)) + return 0; + } else { + if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R) + return 0; + } + + pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n", + parent->index, id, + type == PHY_TYPE_PMA_PMD ? "PMA/PMD" : + type == PHY_TYPE_PCS ? "PCS" : "MII", + phy_port); + + if (p->cur[type] >= NIU_MAX_PORTS) { + pr_err("Too many PHY ports\n"); + return -EINVAL; + } + idx = p->cur[type]; + p->phy_id[type][idx] = id; + p->phy_port[type][idx] = phy_port; + p->cur[type] = idx + 1; + return 0; +} + +static int port_has_10g(struct phy_probe_info *p, int port) +{ + int i; + + for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) { + if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port) + return 1; + } + for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) { + if (p->phy_port[PHY_TYPE_PCS][i] == port) + return 1; + } + + return 0; +} + +static int count_10g_ports(struct phy_probe_info *p, int *lowest) +{ + int port, cnt; + + cnt = 0; + *lowest = 32; + for (port = 8; port < 32; port++) { + if (port_has_10g(p, port)) { + if (!cnt) + *lowest = port; + cnt++; + } + } + + return cnt; +} + +static int count_1g_ports(struct phy_probe_info *p, int *lowest) +{ + *lowest = 32; + if (p->cur[PHY_TYPE_MII]) + *lowest = p->phy_port[PHY_TYPE_MII][0]; + + return p->cur[PHY_TYPE_MII]; +} + +static void niu_n2_divide_channels(struct niu_parent *parent) +{ + int num_ports = parent->num_ports; + int i; + + for (i = 0; i < num_ports; i++) { + parent->rxchan_per_port[i] = (16 / num_ports); + parent->txchan_per_port[i] = (16 / num_ports); + + pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n", + parent->index, i, + parent->rxchan_per_port[i], + parent->txchan_per_port[i]); + } +} + +static void niu_divide_channels(struct niu_parent *parent, + int num_10g, int num_1g) +{ + int num_ports = parent->num_ports; + int rx_chans_per_10g, rx_chans_per_1g; + int tx_chans_per_10g, tx_chans_per_1g; + int i, tot_rx, tot_tx; + + if (!num_10g || !num_1g) { + rx_chans_per_10g = rx_chans_per_1g = + (NIU_NUM_RXCHAN / num_ports); + tx_chans_per_10g = tx_chans_per_1g = + (NIU_NUM_TXCHAN / num_ports); + } else { + rx_chans_per_1g = NIU_NUM_RXCHAN / 8; + rx_chans_per_10g = (NIU_NUM_RXCHAN - + (rx_chans_per_1g * num_1g)) / + num_10g; + + tx_chans_per_1g = NIU_NUM_TXCHAN / 6; + tx_chans_per_10g = (NIU_NUM_TXCHAN - + (tx_chans_per_1g * num_1g)) / + num_10g; + } + + tot_rx = tot_tx = 0; + for (i = 0; i < num_ports; i++) { + int type = phy_decode(parent->port_phy, i); + + if (type == PORT_TYPE_10G) { + parent->rxchan_per_port[i] = rx_chans_per_10g; + parent->txchan_per_port[i] = tx_chans_per_10g; + } else { + parent->rxchan_per_port[i] = rx_chans_per_1g; + parent->txchan_per_port[i] = tx_chans_per_1g; + } + pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n", + parent->index, i, + parent->rxchan_per_port[i], + parent->txchan_per_port[i]); + tot_rx += parent->rxchan_per_port[i]; + tot_tx += parent->txchan_per_port[i]; + } + + if (tot_rx > NIU_NUM_RXCHAN) { + pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n", + parent->index, tot_rx); + for (i = 0; i < num_ports; i++) + parent->rxchan_per_port[i] = 1; + } + if (tot_tx > NIU_NUM_TXCHAN) { + pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n", + parent->index, tot_tx); + for (i = 0; i < num_ports; i++) + parent->txchan_per_port[i] = 1; + } + if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) { + pr_warn("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n", + parent->index, tot_rx, tot_tx); + } +} + +static void niu_divide_rdc_groups(struct niu_parent *parent, + int num_10g, int num_1g) +{ + int i, num_ports = parent->num_ports; + int rdc_group, rdc_groups_per_port; + int rdc_channel_base; + + rdc_group = 0; + rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports; + + rdc_channel_base = 0; + + for (i = 0; i < num_ports; i++) { + struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i]; + int grp, num_channels = parent->rxchan_per_port[i]; + int this_channel_offset; + + tp->first_table_num = rdc_group; + tp->num_tables = rdc_groups_per_port; + this_channel_offset = 0; + for (grp = 0; grp < tp->num_tables; grp++) { + struct rdc_table *rt = &tp->tables[grp]; + int slot; + + pr_info("niu%d: Port %d RDC tbl(%d) [ ", + parent->index, i, tp->first_table_num + grp); + for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) { + rt->rxdma_channel[slot] = + rdc_channel_base + this_channel_offset; + + pr_cont("%d ", rt->rxdma_channel[slot]); + + if (++this_channel_offset == num_channels) + this_channel_offset = 0; + } + pr_cont("]\n"); + } + + parent->rdc_default[i] = rdc_channel_base; + + rdc_channel_base += num_channels; + rdc_group += rdc_groups_per_port; + } +} + +static int fill_phy_probe_info(struct niu *np, struct niu_parent *parent, + struct phy_probe_info *info) +{ + unsigned long flags; + int port, err; + + memset(info, 0, sizeof(*info)); + + /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */ + niu_lock_parent(np, flags); + err = 0; + for (port = 8; port < 32; port++) { + int dev_id_1, dev_id_2; + + dev_id_1 = mdio_read(np, port, + NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1); + dev_id_2 = mdio_read(np, port, + NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2); + err = phy_record(parent, info, dev_id_1, dev_id_2, port, + PHY_TYPE_PMA_PMD); + if (err) + break; + dev_id_1 = mdio_read(np, port, + NIU_PCS_DEV_ADDR, MII_PHYSID1); + dev_id_2 = mdio_read(np, port, + NIU_PCS_DEV_ADDR, MII_PHYSID2); + err = phy_record(parent, info, dev_id_1, dev_id_2, port, + PHY_TYPE_PCS); + if (err) + break; + dev_id_1 = mii_read(np, port, MII_PHYSID1); + dev_id_2 = mii_read(np, port, MII_PHYSID2); + err = phy_record(parent, info, dev_id_1, dev_id_2, port, + PHY_TYPE_MII); + if (err) + break; + } + niu_unlock_parent(np, flags); + + return err; +} + +static int walk_phys(struct niu *np, struct niu_parent *parent) +{ + struct phy_probe_info *info = &parent->phy_probe_info; + int lowest_10g, lowest_1g; + int num_10g, num_1g; + u32 val; + int err; + + num_10g = num_1g = 0; + + if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || + !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { + num_10g = 0; + num_1g = 2; + parent->plat_type = PLAT_TYPE_ATCA_CP3220; + parent->num_ports = 4; + val = (phy_encode(PORT_TYPE_1G, 0) | + phy_encode(PORT_TYPE_1G, 1) | + phy_encode(PORT_TYPE_1G, 2) | + phy_encode(PORT_TYPE_1G, 3)); + } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { + num_10g = 2; + num_1g = 0; + parent->num_ports = 2; + val = (phy_encode(PORT_TYPE_10G, 0) | + phy_encode(PORT_TYPE_10G, 1)); + } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) && + (parent->plat_type == PLAT_TYPE_NIU)) { + /* this is the Monza case */ + if (np->flags & NIU_FLAGS_10G) { + val = (phy_encode(PORT_TYPE_10G, 0) | + phy_encode(PORT_TYPE_10G, 1)); + } else { + val = (phy_encode(PORT_TYPE_1G, 0) | + phy_encode(PORT_TYPE_1G, 1)); + } + } else { + err = fill_phy_probe_info(np, parent, info); + if (err) + return err; + + num_10g = count_10g_ports(info, &lowest_10g); + num_1g = count_1g_ports(info, &lowest_1g); + + switch ((num_10g << 4) | num_1g) { + case 0x24: + if (lowest_1g == 10) + parent->plat_type = PLAT_TYPE_VF_P0; + else if (lowest_1g == 26) + parent->plat_type = PLAT_TYPE_VF_P1; + else + goto unknown_vg_1g_port; + + /* fallthru */ + case 0x22: + val = (phy_encode(PORT_TYPE_10G, 0) | + phy_encode(PORT_TYPE_10G, 1) | + phy_encode(PORT_TYPE_1G, 2) | + phy_encode(PORT_TYPE_1G, 3)); + break; + + case 0x20: + val = (phy_encode(PORT_TYPE_10G, 0) | + phy_encode(PORT_TYPE_10G, 1)); + break; + + case 0x10: + val = phy_encode(PORT_TYPE_10G, np->port); + break; + + case 0x14: + if (lowest_1g == 10) + parent->plat_type = PLAT_TYPE_VF_P0; + else if (lowest_1g == 26) + parent->plat_type = PLAT_TYPE_VF_P1; + else + goto unknown_vg_1g_port; + + /* fallthru */ + case 0x13: + if ((lowest_10g & 0x7) == 0) + val = (phy_encode(PORT_TYPE_10G, 0) | + phy_encode(PORT_TYPE_1G, 1) | + phy_encode(PORT_TYPE_1G, 2) | + phy_encode(PORT_TYPE_1G, 3)); + else + val = (phy_encode(PORT_TYPE_1G, 0) | + phy_encode(PORT_TYPE_10G, 1) | + phy_encode(PORT_TYPE_1G, 2) | + phy_encode(PORT_TYPE_1G, 3)); + break; + + case 0x04: + if (lowest_1g == 10) + parent->plat_type = PLAT_TYPE_VF_P0; + else if (lowest_1g == 26) + parent->plat_type = PLAT_TYPE_VF_P1; + else + goto unknown_vg_1g_port; + + val = (phy_encode(PORT_TYPE_1G, 0) | + phy_encode(PORT_TYPE_1G, 1) | + phy_encode(PORT_TYPE_1G, 2) | + phy_encode(PORT_TYPE_1G, 3)); + break; + + default: + pr_err("Unsupported port config 10G[%d] 1G[%d]\n", + num_10g, num_1g); + return -EINVAL; + } + } + + parent->port_phy = val; + + if (parent->plat_type == PLAT_TYPE_NIU) + niu_n2_divide_channels(parent); + else + niu_divide_channels(parent, num_10g, num_1g); + + niu_divide_rdc_groups(parent, num_10g, num_1g); + + return 0; + +unknown_vg_1g_port: + pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g); + return -EINVAL; +} + +static int niu_probe_ports(struct niu *np) +{ + struct niu_parent *parent = np->parent; + int err, i; + + if (parent->port_phy == PORT_PHY_UNKNOWN) { + err = walk_phys(np, parent); + if (err) + return err; + + niu_set_ldg_timer_res(np, 2); + for (i = 0; i <= LDN_MAX; i++) + niu_ldn_irq_enable(np, i, 0); + } + + if (parent->port_phy == PORT_PHY_INVALID) + return -EINVAL; + + return 0; +} + +static int niu_classifier_swstate_init(struct niu *np) +{ + struct niu_classifier *cp = &np->clas; + + cp->tcam_top = (u16) np->port; + cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports; + cp->h1_init = 0xffffffff; + cp->h2_init = 0xffff; + + return fflp_early_init(np); +} + +static void niu_link_config_init(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + + lp->advertising = (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full | + ADVERTISED_10000baseT_Full | + ADVERTISED_Autoneg); + lp->speed = lp->active_speed = SPEED_INVALID; + lp->duplex = DUPLEX_FULL; + lp->active_duplex = DUPLEX_INVALID; + lp->autoneg = 1; +#if 0 + lp->loopback_mode = LOOPBACK_MAC; + lp->active_speed = SPEED_10000; + lp->active_duplex = DUPLEX_FULL; +#else + lp->loopback_mode = LOOPBACK_DISABLED; +#endif +} + +static int niu_init_mac_ipp_pcs_base(struct niu *np) +{ + switch (np->port) { + case 0: + np->mac_regs = np->regs + XMAC_PORT0_OFF; + np->ipp_off = 0x00000; + np->pcs_off = 0x04000; + np->xpcs_off = 0x02000; + break; + + case 1: + np->mac_regs = np->regs + XMAC_PORT1_OFF; + np->ipp_off = 0x08000; + np->pcs_off = 0x0a000; + np->xpcs_off = 0x08000; + break; + + case 2: + np->mac_regs = np->regs + BMAC_PORT2_OFF; + np->ipp_off = 0x04000; + np->pcs_off = 0x0e000; + np->xpcs_off = ~0UL; + break; + + case 3: + np->mac_regs = np->regs + BMAC_PORT3_OFF; + np->ipp_off = 0x0c000; + np->pcs_off = 0x12000; + np->xpcs_off = ~0UL; + break; + + default: + dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port); + return -EINVAL; + } + + return 0; +} + +static void niu_try_msix(struct niu *np, u8 *ldg_num_map) +{ + struct msix_entry msi_vec[NIU_NUM_LDG]; + struct niu_parent *parent = np->parent; + struct pci_dev *pdev = np->pdev; + int i, num_irqs; + u8 first_ldg; + + first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; + for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++) + ldg_num_map[i] = first_ldg + i; + + num_irqs = (parent->rxchan_per_port[np->port] + + parent->txchan_per_port[np->port] + + (np->port == 0 ? 3 : 1)); + BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports)); + + for (i = 0; i < num_irqs; i++) { + msi_vec[i].vector = 0; + msi_vec[i].entry = i; + } + + num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs); + if (num_irqs < 0) { + np->flags &= ~NIU_FLAGS_MSIX; + return; + } + + np->flags |= NIU_FLAGS_MSIX; + for (i = 0; i < num_irqs; i++) + np->ldg[i].irq = msi_vec[i].vector; + np->num_ldg = num_irqs; +} + +static int niu_n2_irq_init(struct niu *np, u8 *ldg_num_map) +{ +#ifdef CONFIG_SPARC64 + struct platform_device *op = np->op; + const u32 *int_prop; + int i; + + int_prop = of_get_property(op->dev.of_node, "interrupts", NULL); + if (!int_prop) + return -ENODEV; + + for (i = 0; i < op->archdata.num_irqs; i++) { + ldg_num_map[i] = int_prop[i]; + np->ldg[i].irq = op->archdata.irqs[i]; + } + + np->num_ldg = op->archdata.num_irqs; + + return 0; +#else + return -EINVAL; +#endif +} + +static int niu_ldg_init(struct niu *np) +{ + struct niu_parent *parent = np->parent; + u8 ldg_num_map[NIU_NUM_LDG]; + int first_chan, num_chan; + int i, err, ldg_rotor; + u8 port; + + np->num_ldg = 1; + np->ldg[0].irq = np->dev->irq; + if (parent->plat_type == PLAT_TYPE_NIU) { + err = niu_n2_irq_init(np, ldg_num_map); + if (err) + return err; + } else + niu_try_msix(np, ldg_num_map); + + port = np->port; + for (i = 0; i < np->num_ldg; i++) { + struct niu_ldg *lp = &np->ldg[i]; + + netif_napi_add(np->dev, &lp->napi, niu_poll, 64); + + lp->np = np; + lp->ldg_num = ldg_num_map[i]; + lp->timer = 2; /* XXX */ + + /* On N2 NIU the firmware has setup the SID mappings so they go + * to the correct values that will route the LDG to the proper + * interrupt in the NCU interrupt table. + */ + if (np->parent->plat_type != PLAT_TYPE_NIU) { + err = niu_set_ldg_sid(np, lp->ldg_num, port, i); + if (err) + return err; + } + } + + /* We adopt the LDG assignment ordering used by the N2 NIU + * 'interrupt' properties because that simplifies a lot of + * things. This ordering is: + * + * MAC + * MIF (if port zero) + * SYSERR (if port zero) + * RX channels + * TX channels + */ + + ldg_rotor = 0; + + err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], + LDN_MAC(port)); + if (err) + return err; + + ldg_rotor++; + if (ldg_rotor == np->num_ldg) + ldg_rotor = 0; + + if (port == 0) { + err = niu_ldg_assign_ldn(np, parent, + ldg_num_map[ldg_rotor], + LDN_MIF); + if (err) + return err; + + ldg_rotor++; + if (ldg_rotor == np->num_ldg) + ldg_rotor = 0; + + err = niu_ldg_assign_ldn(np, parent, + ldg_num_map[ldg_rotor], + LDN_DEVICE_ERROR); + if (err) + return err; + + ldg_rotor++; + if (ldg_rotor == np->num_ldg) + ldg_rotor = 0; + + } + + first_chan = 0; + for (i = 0; i < port; i++) + first_chan += parent->rxchan_per_port[i]; + num_chan = parent->rxchan_per_port[port]; + + for (i = first_chan; i < (first_chan + num_chan); i++) { + err = niu_ldg_assign_ldn(np, parent, + ldg_num_map[ldg_rotor], + LDN_RXDMA(i)); + if (err) + return err; + ldg_rotor++; + if (ldg_rotor == np->num_ldg) + ldg_rotor = 0; + } + + first_chan = 0; + for (i = 0; i < port; i++) + first_chan += parent->txchan_per_port[i]; + num_chan = parent->txchan_per_port[port]; + for (i = first_chan; i < (first_chan + num_chan); i++) { + err = niu_ldg_assign_ldn(np, parent, + ldg_num_map[ldg_rotor], + LDN_TXDMA(i)); + if (err) + return err; + ldg_rotor++; + if (ldg_rotor == np->num_ldg) + ldg_rotor = 0; + } + + return 0; +} + +static void niu_ldg_free(struct niu *np) +{ + if (np->flags & NIU_FLAGS_MSIX) + pci_disable_msix(np->pdev); +} + +static int niu_get_of_props(struct niu *np) +{ +#ifdef CONFIG_SPARC64 + struct net_device *dev = np->dev; + struct device_node *dp; + const char *phy_type; + const u8 *mac_addr; + const char *model; + int prop_len; + + if (np->parent->plat_type == PLAT_TYPE_NIU) + dp = np->op->dev.of_node; + else + dp = pci_device_to_OF_node(np->pdev); + + phy_type = of_get_property(dp, "phy-type", &prop_len); + if (!phy_type) { + netdev_err(dev, "%s: OF node lacks phy-type property\n", + dp->full_name); + return -EINVAL; + } + + if (!strcmp(phy_type, "none")) + return -ENODEV; + + strcpy(np->vpd.phy_type, phy_type); + + if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { + netdev_err(dev, "%s: Illegal phy string [%s]\n", + dp->full_name, np->vpd.phy_type); + return -EINVAL; + } + + mac_addr = of_get_property(dp, "local-mac-address", &prop_len); + if (!mac_addr) { + netdev_err(dev, "%s: OF node lacks local-mac-address property\n", + dp->full_name); + return -EINVAL; + } + if (prop_len != dev->addr_len) { + netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n", + dp->full_name, prop_len); + } + memcpy(dev->dev_addr, mac_addr, dev->addr_len); + if (!is_valid_ether_addr(&dev->dev_addr[0])) { + netdev_err(dev, "%s: OF MAC address is invalid\n", + dp->full_name); + netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->dev_addr); + return -EINVAL; + } + + model = of_get_property(dp, "model", &prop_len); + + if (model) + strcpy(np->vpd.model, model); + + if (of_find_property(dp, "hot-swappable-phy", &prop_len)) { + np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | + NIU_FLAGS_HOTPLUG_PHY); + } + + return 0; +#else + return -EINVAL; +#endif +} + +static int niu_get_invariants(struct niu *np) +{ + int err, have_props; + u32 offset; + + err = niu_get_of_props(np); + if (err == -ENODEV) + return err; + + have_props = !err; + + err = niu_init_mac_ipp_pcs_base(np); + if (err) + return err; + + if (have_props) { + err = niu_get_and_validate_port(np); + if (err) + return err; + + } else { + if (np->parent->plat_type == PLAT_TYPE_NIU) + return -EINVAL; + + nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE); + offset = niu_pci_vpd_offset(np); + netif_printk(np, probe, KERN_DEBUG, np->dev, + "%s() VPD offset [%08x]\n", __func__, offset); + if (offset) + niu_pci_vpd_fetch(np, offset); + nw64(ESPC_PIO_EN, 0); + + if (np->flags & NIU_FLAGS_VPD_VALID) { + niu_pci_vpd_validate(np); + err = niu_get_and_validate_port(np); + if (err) + return err; + } + + if (!(np->flags & NIU_FLAGS_VPD_VALID)) { + err = niu_get_and_validate_port(np); + if (err) + return err; + err = niu_pci_probe_sprom(np); + if (err) + return err; + } + } + + err = niu_probe_ports(np); + if (err) + return err; + + niu_ldg_init(np); + + niu_classifier_swstate_init(np); + niu_link_config_init(np); + + err = niu_determine_phy_disposition(np); + if (!err) + err = niu_init_link(np); + + return err; +} + +static LIST_HEAD(niu_parent_list); +static DEFINE_MUTEX(niu_parent_lock); +static int niu_parent_index; + +static ssize_t show_port_phy(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *plat_dev = to_platform_device(dev); + struct niu_parent *p = dev_get_platdata(&plat_dev->dev); + u32 port_phy = p->port_phy; + char *orig_buf = buf; + int i; + + if (port_phy == PORT_PHY_UNKNOWN || + port_phy == PORT_PHY_INVALID) + return 0; + + for (i = 0; i < p->num_ports; i++) { + const char *type_str; + int type; + + type = phy_decode(port_phy, i); + if (type == PORT_TYPE_10G) + type_str = "10G"; + else + type_str = "1G"; + buf += sprintf(buf, + (i == 0) ? "%s" : " %s", + type_str); + } + buf += sprintf(buf, "\n"); + return buf - orig_buf; +} + +static ssize_t show_plat_type(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *plat_dev = to_platform_device(dev); + struct niu_parent *p = dev_get_platdata(&plat_dev->dev); + const char *type_str; + + switch (p->plat_type) { + case PLAT_TYPE_ATLAS: + type_str = "atlas"; + break; + case PLAT_TYPE_NIU: + type_str = "niu"; + break; + case PLAT_TYPE_VF_P0: + type_str = "vf_p0"; + break; + case PLAT_TYPE_VF_P1: + type_str = "vf_p1"; + break; + default: + type_str = "unknown"; + break; + } + + return sprintf(buf, "%s\n", type_str); +} + +static ssize_t __show_chan_per_port(struct device *dev, + struct device_attribute *attr, char *buf, + int rx) +{ + struct platform_device *plat_dev = to_platform_device(dev); + struct niu_parent *p = dev_get_platdata(&plat_dev->dev); + char *orig_buf = buf; + u8 *arr; + int i; + + arr = (rx ? p->rxchan_per_port : p->txchan_per_port); + + for (i = 0; i < p->num_ports; i++) { + buf += sprintf(buf, + (i == 0) ? "%d" : " %d", + arr[i]); + } + buf += sprintf(buf, "\n"); + + return buf - orig_buf; +} + +static ssize_t show_rxchan_per_port(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return __show_chan_per_port(dev, attr, buf, 1); +} + +static ssize_t show_txchan_per_port(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return __show_chan_per_port(dev, attr, buf, 1); +} + +static ssize_t show_num_ports(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *plat_dev = to_platform_device(dev); + struct niu_parent *p = dev_get_platdata(&plat_dev->dev); + + return sprintf(buf, "%d\n", p->num_ports); +} + +static struct device_attribute niu_parent_attributes[] = { + __ATTR(port_phy, S_IRUGO, show_port_phy, NULL), + __ATTR(plat_type, S_IRUGO, show_plat_type, NULL), + __ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL), + __ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL), + __ATTR(num_ports, S_IRUGO, show_num_ports, NULL), + {} +}; + +static struct niu_parent *niu_new_parent(struct niu *np, + union niu_parent_id *id, u8 ptype) +{ + struct platform_device *plat_dev; + struct niu_parent *p; + int i; + + plat_dev = platform_device_register_simple("niu-board", niu_parent_index, + NULL, 0); + if (IS_ERR(plat_dev)) + return NULL; + + for (i = 0; niu_parent_attributes[i].attr.name; i++) { + int err = device_create_file(&plat_dev->dev, + &niu_parent_attributes[i]); + if (err) + goto fail_unregister; + } + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + goto fail_unregister; + + p->index = niu_parent_index++; + + plat_dev->dev.platform_data = p; + p->plat_dev = plat_dev; + + memcpy(&p->id, id, sizeof(*id)); + p->plat_type = ptype; + INIT_LIST_HEAD(&p->list); + atomic_set(&p->refcnt, 0); + list_add(&p->list, &niu_parent_list); + spin_lock_init(&p->lock); + + p->rxdma_clock_divider = 7500; + + p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES; + if (p->plat_type == PLAT_TYPE_NIU) + p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES; + + for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { + int index = i - CLASS_CODE_USER_PROG1; + + p->tcam_key[index] = TCAM_KEY_TSEL; + p->flow_key[index] = (FLOW_KEY_IPSA | + FLOW_KEY_IPDA | + FLOW_KEY_PROTO | + (FLOW_KEY_L4_BYTE12 << + FLOW_KEY_L4_0_SHIFT) | + (FLOW_KEY_L4_BYTE12 << + FLOW_KEY_L4_1_SHIFT)); + } + + for (i = 0; i < LDN_MAX + 1; i++) + p->ldg_map[i] = LDG_INVALID; + + return p; + +fail_unregister: + platform_device_unregister(plat_dev); + return NULL; +} + +static struct niu_parent *niu_get_parent(struct niu *np, + union niu_parent_id *id, u8 ptype) +{ + struct niu_parent *p, *tmp; + int port = np->port; + + mutex_lock(&niu_parent_lock); + p = NULL; + list_for_each_entry(tmp, &niu_parent_list, list) { + if (!memcmp(id, &tmp->id, sizeof(*id))) { + p = tmp; + break; + } + } + if (!p) + p = niu_new_parent(np, id, ptype); + + if (p) { + char port_name[6]; + int err; + + sprintf(port_name, "port%d", port); + err = sysfs_create_link(&p->plat_dev->dev.kobj, + &np->device->kobj, + port_name); + if (!err) { + p->ports[port] = np; + atomic_inc(&p->refcnt); + } + } + mutex_unlock(&niu_parent_lock); + + return p; +} + +static void niu_put_parent(struct niu *np) +{ + struct niu_parent *p = np->parent; + u8 port = np->port; + char port_name[6]; + + BUG_ON(!p || p->ports[port] != np); + + netif_printk(np, probe, KERN_DEBUG, np->dev, + "%s() port[%u]\n", __func__, port); + + sprintf(port_name, "port%d", port); + + mutex_lock(&niu_parent_lock); + + sysfs_remove_link(&p->plat_dev->dev.kobj, port_name); + + p->ports[port] = NULL; + np->parent = NULL; + + if (atomic_dec_and_test(&p->refcnt)) { + list_del(&p->list); + platform_device_unregister(p->plat_dev); + } + + mutex_unlock(&niu_parent_lock); +} + +static void *niu_pci_alloc_coherent(struct device *dev, size_t size, + u64 *handle, gfp_t flag) +{ + dma_addr_t dh; + void *ret; + + ret = dma_alloc_coherent(dev, size, &dh, flag); + if (ret) + *handle = dh; + return ret; +} + +static void niu_pci_free_coherent(struct device *dev, size_t size, + void *cpu_addr, u64 handle) +{ + dma_free_coherent(dev, size, cpu_addr, handle); +} + +static u64 niu_pci_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + return dma_map_page(dev, page, offset, size, direction); +} + +static void niu_pci_unmap_page(struct device *dev, u64 dma_address, + size_t size, enum dma_data_direction direction) +{ + dma_unmap_page(dev, dma_address, size, direction); +} + +static u64 niu_pci_map_single(struct device *dev, void *cpu_addr, + size_t size, + enum dma_data_direction direction) +{ + return dma_map_single(dev, cpu_addr, size, direction); +} + +static void niu_pci_unmap_single(struct device *dev, u64 dma_address, + size_t size, + enum dma_data_direction direction) +{ + dma_unmap_single(dev, dma_address, size, direction); +} + +static const struct niu_ops niu_pci_ops = { + .alloc_coherent = niu_pci_alloc_coherent, + .free_coherent = niu_pci_free_coherent, + .map_page = niu_pci_map_page, + .unmap_page = niu_pci_unmap_page, + .map_single = niu_pci_map_single, + .unmap_single = niu_pci_unmap_single, +}; + +static void niu_driver_version(void) +{ + static int niu_version_printed; + + if (niu_version_printed++ == 0) + pr_info("%s", version); +} + +static struct net_device *niu_alloc_and_init(struct device *gen_dev, + struct pci_dev *pdev, + struct platform_device *op, + const struct niu_ops *ops, u8 port) +{ + struct net_device *dev; + struct niu *np; + + dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN); + if (!dev) + return NULL; + + SET_NETDEV_DEV(dev, gen_dev); + + np = netdev_priv(dev); + np->dev = dev; + np->pdev = pdev; + np->op = op; + np->device = gen_dev; + np->ops = ops; + + np->msg_enable = niu_debug; + + spin_lock_init(&np->lock); + INIT_WORK(&np->reset_task, niu_reset_task); + + np->port = port; + + return dev; +} + +static const struct net_device_ops niu_netdev_ops = { + .ndo_open = niu_open, + .ndo_stop = niu_close, + .ndo_start_xmit = niu_start_xmit, + .ndo_get_stats64 = niu_get_stats, + .ndo_set_rx_mode = niu_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = niu_set_mac_addr, + .ndo_do_ioctl = niu_ioctl, + .ndo_tx_timeout = niu_tx_timeout, + .ndo_change_mtu = niu_change_mtu, +}; + +static void niu_assign_netdev_ops(struct net_device *dev) +{ + dev->netdev_ops = &niu_netdev_ops; + dev->ethtool_ops = &niu_ethtool_ops; + dev->watchdog_timeo = NIU_TX_TIMEOUT; +} + +static void niu_device_announce(struct niu *np) +{ + struct net_device *dev = np->dev; + + pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr); + + if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) { + pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", + dev->name, + (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), + (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), + (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"), + (np->mac_xcvr == MAC_XCVR_MII ? "MII" : + (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), + np->vpd.phy_type); + } else { + pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", + dev->name, + (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), + (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), + (np->flags & NIU_FLAGS_FIBER ? "FIBER" : + (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" : + "COPPER")), + (np->mac_xcvr == MAC_XCVR_MII ? "MII" : + (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), + np->vpd.phy_type); + } +} + +static void niu_set_basic_features(struct net_device *dev) +{ + dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH; + dev->features |= dev->hw_features | NETIF_F_RXCSUM; +} + +static int niu_pci_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + union niu_parent_id parent_id; + struct net_device *dev; + struct niu *np; + int err; + u64 dma_mask; + + niu_driver_version(); + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); + return err; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || + !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n"); + err = -ENODEV; + goto err_out_disable_pdev; + } + + err = pci_request_regions(pdev, DRV_MODULE_NAME); + if (err) { + dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable_pdev; + } + + if (!pci_is_pcie(pdev)) { + dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n"); + err = -ENODEV; + goto err_out_free_res; + } + + dev = niu_alloc_and_init(&pdev->dev, pdev, NULL, + &niu_pci_ops, PCI_FUNC(pdev->devfn)); + if (!dev) { + err = -ENOMEM; + goto err_out_free_res; + } + np = netdev_priv(dev); + + memset(&parent_id, 0, sizeof(parent_id)); + parent_id.pci.domain = pci_domain_nr(pdev->bus); + parent_id.pci.bus = pdev->bus->number; + parent_id.pci.device = PCI_SLOT(pdev->devfn); + + np->parent = niu_get_parent(np, &parent_id, + PLAT_TYPE_ATLAS); + if (!np->parent) { + err = -ENOMEM; + goto err_out_free_dev; + } + + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_NOSNOOP_EN, + PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | + PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE | + PCI_EXP_DEVCTL_RELAX_EN); + + dma_mask = DMA_BIT_MASK(44); + err = pci_set_dma_mask(pdev, dma_mask); + if (!err) { + dev->features |= NETIF_F_HIGHDMA; + err = pci_set_consistent_dma_mask(pdev, dma_mask); + if (err) { + dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n"); + goto err_out_release_parent; + } + } + if (err) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); + goto err_out_release_parent; + } + } + + niu_set_basic_features(dev); + + dev->priv_flags |= IFF_UNICAST_FLT; + + np->regs = pci_ioremap_bar(pdev, 0); + if (!np->regs) { + dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_release_parent; + } + + pci_set_master(pdev); + pci_save_state(pdev); + + dev->irq = pdev->irq; + + niu_assign_netdev_ops(dev); + + err = niu_get_invariants(np); + if (err) { + if (err != -ENODEV) + dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n"); + goto err_out_iounmap; + } + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "Cannot register net device, aborting\n"); + goto err_out_iounmap; + } + + pci_set_drvdata(pdev, dev); + + niu_device_announce(np); + + return 0; + +err_out_iounmap: + if (np->regs) { + iounmap(np->regs); + np->regs = NULL; + } + +err_out_release_parent: + niu_put_parent(np); + +err_out_free_dev: + free_netdev(dev); + +err_out_free_res: + pci_release_regions(pdev); + +err_out_disable_pdev: + pci_disable_device(pdev); + + return err; +} + +static void niu_pci_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (dev) { + struct niu *np = netdev_priv(dev); + + unregister_netdev(dev); + if (np->regs) { + iounmap(np->regs); + np->regs = NULL; + } + + niu_ldg_free(np); + + niu_put_parent(np); + + free_netdev(dev); + pci_release_regions(pdev); + pci_disable_device(pdev); + } +} + +static int niu_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct niu *np = netdev_priv(dev); + unsigned long flags; + + if (!netif_running(dev)) + return 0; + + flush_work(&np->reset_task); + niu_netif_stop(np); + + del_timer_sync(&np->timer); + + spin_lock_irqsave(&np->lock, flags); + niu_enable_interrupts(np, 0); + spin_unlock_irqrestore(&np->lock, flags); + + netif_device_detach(dev); + + spin_lock_irqsave(&np->lock, flags); + niu_stop_hw(np); + spin_unlock_irqrestore(&np->lock, flags); + + pci_save_state(pdev); + + return 0; +} + +static int niu_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct niu *np = netdev_priv(dev); + unsigned long flags; + int err; + + if (!netif_running(dev)) + return 0; + + pci_restore_state(pdev); + + netif_device_attach(dev); + + spin_lock_irqsave(&np->lock, flags); + + err = niu_init_hw(np); + if (!err) { + np->timer.expires = jiffies + HZ; + add_timer(&np->timer); + niu_netif_start(np); + } + + spin_unlock_irqrestore(&np->lock, flags); + + return err; +} + +static struct pci_driver niu_pci_driver = { + .name = DRV_MODULE_NAME, + .id_table = niu_pci_tbl, + .probe = niu_pci_init_one, + .remove = niu_pci_remove_one, + .suspend = niu_suspend, + .resume = niu_resume, +}; + +#ifdef CONFIG_SPARC64 +static void *niu_phys_alloc_coherent(struct device *dev, size_t size, + u64 *dma_addr, gfp_t flag) +{ + unsigned long order = get_order(size); + unsigned long page = __get_free_pages(flag, order); + + if (page == 0UL) + return NULL; + memset((char *)page, 0, PAGE_SIZE << order); + *dma_addr = __pa(page); + + return (void *) page; +} + +static void niu_phys_free_coherent(struct device *dev, size_t size, + void *cpu_addr, u64 handle) +{ + unsigned long order = get_order(size); + + free_pages((unsigned long) cpu_addr, order); +} + +static u64 niu_phys_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + return page_to_phys(page) + offset; +} + +static void niu_phys_unmap_page(struct device *dev, u64 dma_address, + size_t size, enum dma_data_direction direction) +{ + /* Nothing to do. */ +} + +static u64 niu_phys_map_single(struct device *dev, void *cpu_addr, + size_t size, + enum dma_data_direction direction) +{ + return __pa(cpu_addr); +} + +static void niu_phys_unmap_single(struct device *dev, u64 dma_address, + size_t size, + enum dma_data_direction direction) +{ + /* Nothing to do. */ +} + +static const struct niu_ops niu_phys_ops = { + .alloc_coherent = niu_phys_alloc_coherent, + .free_coherent = niu_phys_free_coherent, + .map_page = niu_phys_map_page, + .unmap_page = niu_phys_unmap_page, + .map_single = niu_phys_map_single, + .unmap_single = niu_phys_unmap_single, +}; + +static int niu_of_probe(struct platform_device *op) +{ + union niu_parent_id parent_id; + struct net_device *dev; + struct niu *np; + const u32 *reg; + int err; + + niu_driver_version(); + + reg = of_get_property(op->dev.of_node, "reg", NULL); + if (!reg) { + dev_err(&op->dev, "%s: No 'reg' property, aborting\n", + op->dev.of_node->full_name); + return -ENODEV; + } + + dev = niu_alloc_and_init(&op->dev, NULL, op, + &niu_phys_ops, reg[0] & 0x1); + if (!dev) { + err = -ENOMEM; + goto err_out; + } + np = netdev_priv(dev); + + memset(&parent_id, 0, sizeof(parent_id)); + parent_id.of = of_get_parent(op->dev.of_node); + + np->parent = niu_get_parent(np, &parent_id, + PLAT_TYPE_NIU); + if (!np->parent) { + err = -ENOMEM; + goto err_out_free_dev; + } + + niu_set_basic_features(dev); + + np->regs = of_ioremap(&op->resource[1], 0, + resource_size(&op->resource[1]), + "niu regs"); + if (!np->regs) { + dev_err(&op->dev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_release_parent; + } + + np->vir_regs_1 = of_ioremap(&op->resource[2], 0, + resource_size(&op->resource[2]), + "niu vregs-1"); + if (!np->vir_regs_1) { + dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n"); + err = -ENOMEM; + goto err_out_iounmap; + } + + np->vir_regs_2 = of_ioremap(&op->resource[3], 0, + resource_size(&op->resource[3]), + "niu vregs-2"); + if (!np->vir_regs_2) { + dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n"); + err = -ENOMEM; + goto err_out_iounmap; + } + + niu_assign_netdev_ops(dev); + + err = niu_get_invariants(np); + if (err) { + if (err != -ENODEV) + dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n"); + goto err_out_iounmap; + } + + err = register_netdev(dev); + if (err) { + dev_err(&op->dev, "Cannot register net device, aborting\n"); + goto err_out_iounmap; + } + + platform_set_drvdata(op, dev); + + niu_device_announce(np); + + return 0; + +err_out_iounmap: + if (np->vir_regs_1) { + of_iounmap(&op->resource[2], np->vir_regs_1, + resource_size(&op->resource[2])); + np->vir_regs_1 = NULL; + } + + if (np->vir_regs_2) { + of_iounmap(&op->resource[3], np->vir_regs_2, + resource_size(&op->resource[3])); + np->vir_regs_2 = NULL; + } + + if (np->regs) { + of_iounmap(&op->resource[1], np->regs, + resource_size(&op->resource[1])); + np->regs = NULL; + } + +err_out_release_parent: + niu_put_parent(np); + +err_out_free_dev: + free_netdev(dev); + +err_out: + return err; +} + +static int niu_of_remove(struct platform_device *op) +{ + struct net_device *dev = platform_get_drvdata(op); + + if (dev) { + struct niu *np = netdev_priv(dev); + + unregister_netdev(dev); + + if (np->vir_regs_1) { + of_iounmap(&op->resource[2], np->vir_regs_1, + resource_size(&op->resource[2])); + np->vir_regs_1 = NULL; + } + + if (np->vir_regs_2) { + of_iounmap(&op->resource[3], np->vir_regs_2, + resource_size(&op->resource[3])); + np->vir_regs_2 = NULL; + } + + if (np->regs) { + of_iounmap(&op->resource[1], np->regs, + resource_size(&op->resource[1])); + np->regs = NULL; + } + + niu_ldg_free(np); + + niu_put_parent(np); + + free_netdev(dev); + } + return 0; +} + +static const struct of_device_id niu_match[] = { + { + .name = "network", + .compatible = "SUNW,niusl", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, niu_match); + +static struct platform_driver niu_of_driver = { + .driver = { + .name = "niu", + .of_match_table = niu_match, + }, + .probe = niu_of_probe, + .remove = niu_of_remove, +}; + +#endif /* CONFIG_SPARC64 */ + +static int __init niu_init(void) +{ + int err = 0; + + BUILD_BUG_ON(PAGE_SIZE < 4 * 1024); + + niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT); + +#ifdef CONFIG_SPARC64 + err = platform_driver_register(&niu_of_driver); +#endif + + if (!err) { + err = pci_register_driver(&niu_pci_driver); +#ifdef CONFIG_SPARC64 + if (err) + platform_driver_unregister(&niu_of_driver); +#endif + } + + return err; +} + +static void __exit niu_exit(void) +{ + pci_unregister_driver(&niu_pci_driver); +#ifdef CONFIG_SPARC64 + platform_driver_unregister(&niu_of_driver); +#endif +} + +module_init(niu_init); +module_exit(niu_exit); diff --git a/drivers/net/ethernet/sun/niu.h b/drivers/net/ethernet/sun/niu.h new file mode 100644 index 000000000..51e177e18 --- /dev/null +++ b/drivers/net/ethernet/sun/niu.h @@ -0,0 +1,3306 @@ +/* niu.h: Definitions for Neptune ethernet driver. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#ifndef _NIU_H +#define _NIU_H + +#define PIO 0x000000UL +#define FZC_PIO 0x080000UL +#define FZC_MAC 0x180000UL +#define FZC_IPP 0x280000UL +#define FFLP 0x300000UL +#define FZC_FFLP 0x380000UL +#define PIO_VADDR 0x400000UL +#define ZCP 0x500000UL +#define FZC_ZCP 0x580000UL +#define DMC 0x600000UL +#define FZC_DMC 0x680000UL +#define TXC 0x700000UL +#define FZC_TXC 0x780000UL +#define PIO_LDSV 0x800000UL +#define PIO_PIO_LDGIM 0x900000UL +#define PIO_IMASK0 0xa00000UL +#define PIO_IMASK1 0xb00000UL +#define FZC_PROM 0xc80000UL +#define FZC_PIM 0xd80000UL + +#define LDSV0(LDG) (PIO_LDSV + 0x00000UL + (LDG) * 0x2000UL) +#define LDSV1(LDG) (PIO_LDSV + 0x00008UL + (LDG) * 0x2000UL) +#define LDSV2(LDG) (PIO_LDSV + 0x00010UL + (LDG) * 0x2000UL) + +#define LDG_IMGMT(LDG) (PIO_LDSV + 0x00018UL + (LDG) * 0x2000UL) +#define LDG_IMGMT_ARM 0x0000000080000000ULL +#define LDG_IMGMT_TIMER 0x000000000000003fULL + +#define LD_IM0(IDX) (PIO_IMASK0 + 0x00000UL + (IDX) * 0x2000UL) +#define LD_IM0_MASK 0x0000000000000003ULL + +#define LD_IM1(IDX) (PIO_IMASK1 + 0x00000UL + (IDX) * 0x2000UL) +#define LD_IM1_MASK 0x0000000000000003ULL + +#define LDG_TIMER_RES (FZC_PIO + 0x00008UL) +#define LDG_TIMER_RES_VAL 0x00000000000fffffULL + +#define DIRTY_TID_CTL (FZC_PIO + 0x00010UL) +#define DIRTY_TID_CTL_NPTHRED 0x00000000003f0000ULL +#define DIRTY_TID_CTL_RDTHRED 0x00000000000003f0ULL +#define DIRTY_TID_CTL_DTIDCLR 0x0000000000000002ULL +#define DIRTY_TID_CTL_DTIDENAB 0x0000000000000001ULL + +#define DIRTY_TID_STAT (FZC_PIO + 0x00018UL) +#define DIRTY_TID_STAT_NPWSTAT 0x0000000000003f00ULL +#define DIRTY_TID_STAT_RDSTAT 0x000000000000003fULL + +#define RST_CTL (FZC_PIO + 0x00038UL) +#define RST_CTL_MAC_RST3 0x0000000000400000ULL +#define RST_CTL_MAC_RST2 0x0000000000200000ULL +#define RST_CTL_MAC_RST1 0x0000000000100000ULL +#define RST_CTL_MAC_RST0 0x0000000000080000ULL +#define RST_CTL_ACK_TO_EN 0x0000000000000800ULL +#define RST_CTL_ACK_TO_VAL 0x00000000000007feULL + +#define SMX_CFIG_DAT (FZC_PIO + 0x00040UL) +#define SMX_CFIG_DAT_RAS_DET 0x0000000080000000ULL +#define SMX_CFIG_DAT_RAS_INJ 0x0000000040000000ULL +#define SMX_CFIG_DAT_XACT_TO 0x000000000fffffffULL + +#define SMX_INT_STAT (FZC_PIO + 0x00048UL) +#define SMX_INT_STAT_STAT 0x00000000ffffffffULL + +#define SMX_CTL (FZC_PIO + 0x00050UL) +#define SMX_CTL_CTL 0x00000000ffffffffULL + +#define SMX_DBG_VEC (FZC_PIO + 0x00058UL) +#define SMX_DBG_VEC_VEC 0x00000000ffffffffULL + +#define PIO_DBG_SEL (FZC_PIO + 0x00060UL) +#define PIO_DBG_SEL_SEL 0x000000000000003fULL + +#define PIO_TRAIN_VEC (FZC_PIO + 0x00068UL) +#define PIO_TRAIN_VEC_VEC 0x00000000ffffffffULL + +#define PIO_ARB_CTL (FZC_PIO + 0x00070UL) +#define PIO_ARB_CTL_CTL 0x00000000ffffffffULL + +#define PIO_ARB_DBG_VEC (FZC_PIO + 0x00078UL) +#define PIO_ARB_DBG_VEC_VEC 0x00000000ffffffffULL + +#define SYS_ERR_MASK (FZC_PIO + 0x00090UL) +#define SYS_ERR_MASK_META2 0x0000000000000400ULL +#define SYS_ERR_MASK_META1 0x0000000000000200ULL +#define SYS_ERR_MASK_PEU 0x0000000000000100ULL +#define SYS_ERR_MASK_TXC 0x0000000000000080ULL +#define SYS_ERR_MASK_RDMC 0x0000000000000040ULL +#define SYS_ERR_MASK_TDMC 0x0000000000000020ULL +#define SYS_ERR_MASK_ZCP 0x0000000000000010ULL +#define SYS_ERR_MASK_FFLP 0x0000000000000008ULL +#define SYS_ERR_MASK_IPP 0x0000000000000004ULL +#define SYS_ERR_MASK_MAC 0x0000000000000002ULL +#define SYS_ERR_MASK_SMX 0x0000000000000001ULL + +#define SYS_ERR_STAT (FZC_PIO + 0x00098UL) +#define SYS_ERR_STAT_META2 0x0000000000000400ULL +#define SYS_ERR_STAT_META1 0x0000000000000200ULL +#define SYS_ERR_STAT_PEU 0x0000000000000100ULL +#define SYS_ERR_STAT_TXC 0x0000000000000080ULL +#define SYS_ERR_STAT_RDMC 0x0000000000000040ULL +#define SYS_ERR_STAT_TDMC 0x0000000000000020ULL +#define SYS_ERR_STAT_ZCP 0x0000000000000010ULL +#define SYS_ERR_STAT_FFLP 0x0000000000000008ULL +#define SYS_ERR_STAT_IPP 0x0000000000000004ULL +#define SYS_ERR_STAT_MAC 0x0000000000000002ULL +#define SYS_ERR_STAT_SMX 0x0000000000000001ULL + +#define SID(LDG) (FZC_PIO + 0x10200UL + (LDG) * 8UL) +#define SID_FUNC 0x0000000000000060ULL +#define SID_FUNC_SHIFT 5 +#define SID_VECTOR 0x000000000000001fULL +#define SID_VECTOR_SHIFT 0 + +#define LDG_NUM(LDN) (FZC_PIO + 0x20000UL + (LDN) * 8UL) + +#define XMAC_PORT0_OFF (FZC_MAC + 0x000000) +#define XMAC_PORT1_OFF (FZC_MAC + 0x006000) +#define BMAC_PORT2_OFF (FZC_MAC + 0x00c000) +#define BMAC_PORT3_OFF (FZC_MAC + 0x010000) + +/* XMAC registers, offset from np->mac_regs */ + +#define XTXMAC_SW_RST 0x00000UL +#define XTXMAC_SW_RST_REG_RS 0x0000000000000002ULL +#define XTXMAC_SW_RST_SOFT_RST 0x0000000000000001ULL + +#define XRXMAC_SW_RST 0x00008UL +#define XRXMAC_SW_RST_REG_RS 0x0000000000000002ULL +#define XRXMAC_SW_RST_SOFT_RST 0x0000000000000001ULL + +#define XTXMAC_STATUS 0x00020UL +#define XTXMAC_STATUS_FRAME_CNT_EXP 0x0000000000000800ULL +#define XTXMAC_STATUS_BYTE_CNT_EXP 0x0000000000000400ULL +#define XTXMAC_STATUS_TXFIFO_XFR_ERR 0x0000000000000010ULL +#define XTXMAC_STATUS_TXMAC_OFLOW 0x0000000000000008ULL +#define XTXMAC_STATUS_MAX_PSIZE_ERR 0x0000000000000004ULL +#define XTXMAC_STATUS_TXMAC_UFLOW 0x0000000000000002ULL +#define XTXMAC_STATUS_FRAME_XMITED 0x0000000000000001ULL + +#define XRXMAC_STATUS 0x00028UL +#define XRXMAC_STATUS_RXHIST7_CNT_EXP 0x0000000000100000ULL +#define XRXMAC_STATUS_LCL_FLT_STATUS 0x0000000000080000ULL +#define XRXMAC_STATUS_RFLT_DET 0x0000000000040000ULL +#define XRXMAC_STATUS_LFLT_CNT_EXP 0x0000000000020000ULL +#define XRXMAC_STATUS_PHY_MDINT 0x0000000000010000ULL +#define XRXMAC_STATUS_ALIGNERR_CNT_EXP 0x0000000000010000ULL +#define XRXMAC_STATUS_RXFRAG_CNT_EXP 0x0000000000008000ULL +#define XRXMAC_STATUS_RXMULTF_CNT_EXP 0x0000000000004000ULL +#define XRXMAC_STATUS_RXBCAST_CNT_EXP 0x0000000000002000ULL +#define XRXMAC_STATUS_RXHIST6_CNT_EXP 0x0000000000001000ULL +#define XRXMAC_STATUS_RXHIST5_CNT_EXP 0x0000000000000800ULL +#define XRXMAC_STATUS_RXHIST4_CNT_EXP 0x0000000000000400ULL +#define XRXMAC_STATUS_RXHIST3_CNT_EXP 0x0000000000000200ULL +#define XRXMAC_STATUS_RXHIST2_CNT_EXP 0x0000000000000100ULL +#define XRXMAC_STATUS_RXHIST1_CNT_EXP 0x0000000000000080ULL +#define XRXMAC_STATUS_RXOCTET_CNT_EXP 0x0000000000000040ULL +#define XRXMAC_STATUS_CVIOLERR_CNT_EXP 0x0000000000000020ULL +#define XRXMAC_STATUS_LENERR_CNT_EXP 0x0000000000000010ULL +#define XRXMAC_STATUS_CRCERR_CNT_EXP 0x0000000000000008ULL +#define XRXMAC_STATUS_RXUFLOW 0x0000000000000004ULL +#define XRXMAC_STATUS_RXOFLOW 0x0000000000000002ULL +#define XRXMAC_STATUS_FRAME_RCVD 0x0000000000000001ULL + +#define XMAC_FC_STAT 0x00030UL +#define XMAC_FC_STAT_RX_RCV_PAUSE_TIME 0x00000000ffff0000ULL +#define XMAC_FC_STAT_TX_MAC_NPAUSE 0x0000000000000004ULL +#define XMAC_FC_STAT_TX_MAC_PAUSE 0x0000000000000002ULL +#define XMAC_FC_STAT_RX_MAC_RPAUSE 0x0000000000000001ULL + +#define XTXMAC_STAT_MSK 0x00040UL +#define XTXMAC_STAT_MSK_FRAME_CNT_EXP 0x0000000000000800ULL +#define XTXMAC_STAT_MSK_BYTE_CNT_EXP 0x0000000000000400ULL +#define XTXMAC_STAT_MSK_TXFIFO_XFR_ERR 0x0000000000000010ULL +#define XTXMAC_STAT_MSK_TXMAC_OFLOW 0x0000000000000008ULL +#define XTXMAC_STAT_MSK_MAX_PSIZE_ERR 0x0000000000000004ULL +#define XTXMAC_STAT_MSK_TXMAC_UFLOW 0x0000000000000002ULL +#define XTXMAC_STAT_MSK_FRAME_XMITED 0x0000000000000001ULL + +#define XRXMAC_STAT_MSK 0x00048UL +#define XRXMAC_STAT_MSK_LCL_FLT_STAT_MSK 0x0000000000080000ULL +#define XRXMAC_STAT_MSK_RFLT_DET 0x0000000000040000ULL +#define XRXMAC_STAT_MSK_LFLT_CNT_EXP 0x0000000000020000ULL +#define XRXMAC_STAT_MSK_PHY_MDINT 0x0000000000010000ULL +#define XRXMAC_STAT_MSK_RXFRAG_CNT_EXP 0x0000000000008000ULL +#define XRXMAC_STAT_MSK_RXMULTF_CNT_EXP 0x0000000000004000ULL +#define XRXMAC_STAT_MSK_RXBCAST_CNT_EXP 0x0000000000002000ULL +#define XRXMAC_STAT_MSK_RXHIST6_CNT_EXP 0x0000000000001000ULL +#define XRXMAC_STAT_MSK_RXHIST5_CNT_EXP 0x0000000000000800ULL +#define XRXMAC_STAT_MSK_RXHIST4_CNT_EXP 0x0000000000000400ULL +#define XRXMAC_STAT_MSK_RXHIST3_CNT_EXP 0x0000000000000200ULL +#define XRXMAC_STAT_MSK_RXHIST2_CNT_EXP 0x0000000000000100ULL +#define XRXMAC_STAT_MSK_RXHIST1_CNT_EXP 0x0000000000000080ULL +#define XRXMAC_STAT_MSK_RXOCTET_CNT_EXP 0x0000000000000040ULL +#define XRXMAC_STAT_MSK_CVIOLERR_CNT_EXP 0x0000000000000020ULL +#define XRXMAC_STAT_MSK_LENERR_CNT_EXP 0x0000000000000010ULL +#define XRXMAC_STAT_MSK_CRCERR_CNT_EXP 0x0000000000000008ULL +#define XRXMAC_STAT_MSK_RXUFLOW_CNT_EXP 0x0000000000000004ULL +#define XRXMAC_STAT_MSK_RXOFLOW_CNT_EXP 0x0000000000000002ULL +#define XRXMAC_STAT_MSK_FRAME_RCVD 0x0000000000000001ULL + +#define XMAC_FC_MSK 0x00050UL +#define XMAC_FC_MSK_TX_MAC_NPAUSE 0x0000000000000004ULL +#define XMAC_FC_MSK_TX_MAC_PAUSE 0x0000000000000002ULL +#define XMAC_FC_MSK_RX_MAC_RPAUSE 0x0000000000000001ULL + +#define XMAC_CONFIG 0x00060UL +#define XMAC_CONFIG_SEL_CLK_25MHZ 0x0000000080000000ULL +#define XMAC_CONFIG_1G_PCS_BYPASS 0x0000000040000000ULL +#define XMAC_CONFIG_10G_XPCS_BYPASS 0x0000000020000000ULL +#define XMAC_CONFIG_MODE_MASK 0x0000000018000000ULL +#define XMAC_CONFIG_MODE_XGMII 0x0000000000000000ULL +#define XMAC_CONFIG_MODE_GMII 0x0000000008000000ULL +#define XMAC_CONFIG_MODE_MII 0x0000000010000000ULL +#define XMAC_CONFIG_LFS_DISABLE 0x0000000004000000ULL +#define XMAC_CONFIG_LOOPBACK 0x0000000002000000ULL +#define XMAC_CONFIG_TX_OUTPUT_EN 0x0000000001000000ULL +#define XMAC_CONFIG_SEL_POR_CLK_SRC 0x0000000000800000ULL +#define XMAC_CONFIG_LED_POLARITY 0x0000000000400000ULL +#define XMAC_CONFIG_FORCE_LED_ON 0x0000000000200000ULL +#define XMAC_CONFIG_PASS_FLOW_CTRL 0x0000000000100000ULL +#define XMAC_CONFIG_RCV_PAUSE_ENABLE 0x0000000000080000ULL +#define XMAC_CONFIG_MAC2IPP_PKT_CNT_EN 0x0000000000040000ULL +#define XMAC_CONFIG_STRIP_CRC 0x0000000000020000ULL +#define XMAC_CONFIG_ADDR_FILTER_EN 0x0000000000010000ULL +#define XMAC_CONFIG_HASH_FILTER_EN 0x0000000000008000ULL +#define XMAC_CONFIG_RX_CODEV_CHK_DIS 0x0000000000004000ULL +#define XMAC_CONFIG_RESERVED_MULTICAST 0x0000000000002000ULL +#define XMAC_CONFIG_RX_CRC_CHK_DIS 0x0000000000001000ULL +#define XMAC_CONFIG_ERR_CHK_DIS 0x0000000000000800ULL +#define XMAC_CONFIG_PROMISC_GROUP 0x0000000000000400ULL +#define XMAC_CONFIG_PROMISCUOUS 0x0000000000000200ULL +#define XMAC_CONFIG_RX_MAC_ENABLE 0x0000000000000100ULL +#define XMAC_CONFIG_WARNING_MSG_EN 0x0000000000000080ULL +#define XMAC_CONFIG_ALWAYS_NO_CRC 0x0000000000000008ULL +#define XMAC_CONFIG_VAR_MIN_IPG_EN 0x0000000000000004ULL +#define XMAC_CONFIG_STRETCH_MODE 0x0000000000000002ULL +#define XMAC_CONFIG_TX_ENABLE 0x0000000000000001ULL + +#define XMAC_IPG 0x00080UL +#define XMAC_IPG_STRETCH_CONST 0x0000000000e00000ULL +#define XMAC_IPG_STRETCH_CONST_SHIFT 21 +#define XMAC_IPG_STRETCH_RATIO 0x00000000001f0000ULL +#define XMAC_IPG_STRETCH_RATIO_SHIFT 16 +#define XMAC_IPG_IPG_MII_GMII 0x000000000000ff00ULL +#define XMAC_IPG_IPG_MII_GMII_SHIFT 8 +#define XMAC_IPG_IPG_XGMII 0x0000000000000007ULL +#define XMAC_IPG_IPG_XGMII_SHIFT 0 + +#define IPG_12_15_XGMII 3 +#define IPG_16_19_XGMII 4 +#define IPG_20_23_XGMII 5 +#define IPG_12_MII_GMII 10 +#define IPG_13_MII_GMII 11 +#define IPG_14_MII_GMII 12 +#define IPG_15_MII_GMII 13 +#define IPG_16_MII_GMII 14 + +#define XMAC_MIN 0x00088UL +#define XMAC_MIN_RX_MIN_PKT_SIZE 0x000000003ff00000ULL +#define XMAC_MIN_RX_MIN_PKT_SIZE_SHFT 20 +#define XMAC_MIN_SLOT_TIME 0x000000000003fc00ULL +#define XMAC_MIN_SLOT_TIME_SHFT 10 +#define XMAC_MIN_TX_MIN_PKT_SIZE 0x00000000000003ffULL +#define XMAC_MIN_TX_MIN_PKT_SIZE_SHFT 0 + +#define XMAC_MAX 0x00090UL +#define XMAC_MAX_FRAME_SIZE 0x0000000000003fffULL +#define XMAC_MAX_FRAME_SIZE_SHFT 0 + +#define XMAC_ADDR0 0x000a0UL +#define XMAC_ADDR0_ADDR0 0x000000000000ffffULL + +#define XMAC_ADDR1 0x000a8UL +#define XMAC_ADDR1_ADDR1 0x000000000000ffffULL + +#define XMAC_ADDR2 0x000b0UL +#define XMAC_ADDR2_ADDR2 0x000000000000ffffULL + +#define XMAC_ADDR_CMPEN 0x00208UL +#define XMAC_ADDR_CMPEN_EN15 0x0000000000008000ULL +#define XMAC_ADDR_CMPEN_EN14 0x0000000000004000ULL +#define XMAC_ADDR_CMPEN_EN13 0x0000000000002000ULL +#define XMAC_ADDR_CMPEN_EN12 0x0000000000001000ULL +#define XMAC_ADDR_CMPEN_EN11 0x0000000000000800ULL +#define XMAC_ADDR_CMPEN_EN10 0x0000000000000400ULL +#define XMAC_ADDR_CMPEN_EN9 0x0000000000000200ULL +#define XMAC_ADDR_CMPEN_EN8 0x0000000000000100ULL +#define XMAC_ADDR_CMPEN_EN7 0x0000000000000080ULL +#define XMAC_ADDR_CMPEN_EN6 0x0000000000000040ULL +#define XMAC_ADDR_CMPEN_EN5 0x0000000000000020ULL +#define XMAC_ADDR_CMPEN_EN4 0x0000000000000010ULL +#define XMAC_ADDR_CMPEN_EN3 0x0000000000000008ULL +#define XMAC_ADDR_CMPEN_EN2 0x0000000000000004ULL +#define XMAC_ADDR_CMPEN_EN1 0x0000000000000002ULL +#define XMAC_ADDR_CMPEN_EN0 0x0000000000000001ULL + +#define XMAC_NUM_ALT_ADDR 16 + +#define XMAC_ALT_ADDR0(NUM) (0x00218UL + (NUM)*0x18UL) +#define XMAC_ALT_ADDR0_ADDR0 0x000000000000ffffULL + +#define XMAC_ALT_ADDR1(NUM) (0x00220UL + (NUM)*0x18UL) +#define XMAC_ALT_ADDR1_ADDR1 0x000000000000ffffULL + +#define XMAC_ALT_ADDR2(NUM) (0x00228UL + (NUM)*0x18UL) +#define XMAC_ALT_ADDR2_ADDR2 0x000000000000ffffULL + +#define XMAC_ADD_FILT0 0x00818UL +#define XMAC_ADD_FILT0_FILT0 0x000000000000ffffULL + +#define XMAC_ADD_FILT1 0x00820UL +#define XMAC_ADD_FILT1_FILT1 0x000000000000ffffULL + +#define XMAC_ADD_FILT2 0x00828UL +#define XMAC_ADD_FILT2_FILT2 0x000000000000ffffULL + +#define XMAC_ADD_FILT12_MASK 0x00830UL +#define XMAC_ADD_FILT12_MASK_VAL 0x00000000000000ffULL + +#define XMAC_ADD_FILT00_MASK 0x00838UL +#define XMAC_ADD_FILT00_MASK_VAL 0x000000000000ffffULL + +#define XMAC_HASH_TBL(NUM) (0x00840UL + (NUM) * 0x8UL) +#define XMAC_HASH_TBL_VAL 0x000000000000ffffULL + +#define XMAC_NUM_HOST_INFO 20 + +#define XMAC_HOST_INFO(NUM) (0x00900UL + (NUM) * 0x8UL) + +#define XMAC_PA_DATA0 0x00b80UL +#define XMAC_PA_DATA0_VAL 0x00000000ffffffffULL + +#define XMAC_PA_DATA1 0x00b88UL +#define XMAC_PA_DATA1_VAL 0x00000000ffffffffULL + +#define XMAC_DEBUG_SEL 0x00b90UL +#define XMAC_DEBUG_SEL_XMAC 0x0000000000000078ULL +#define XMAC_DEBUG_SEL_MAC 0x0000000000000007ULL + +#define XMAC_TRAIN_VEC 0x00b98UL +#define XMAC_TRAIN_VEC_VAL 0x00000000ffffffffULL + +#define RXMAC_BT_CNT 0x00100UL +#define RXMAC_BT_CNT_COUNT 0x00000000ffffffffULL + +#define RXMAC_BC_FRM_CNT 0x00108UL +#define RXMAC_BC_FRM_CNT_COUNT 0x00000000001fffffULL + +#define RXMAC_MC_FRM_CNT 0x00110UL +#define RXMAC_MC_FRM_CNT_COUNT 0x00000000001fffffULL + +#define RXMAC_FRAG_CNT 0x00118UL +#define RXMAC_FRAG_CNT_COUNT 0x00000000001fffffULL + +#define RXMAC_HIST_CNT1 0x00120UL +#define RXMAC_HIST_CNT1_COUNT 0x00000000001fffffULL + +#define RXMAC_HIST_CNT2 0x00128UL +#define RXMAC_HIST_CNT2_COUNT 0x00000000001fffffULL + +#define RXMAC_HIST_CNT3 0x00130UL +#define RXMAC_HIST_CNT3_COUNT 0x00000000000fffffULL + +#define RXMAC_HIST_CNT4 0x00138UL +#define RXMAC_HIST_CNT4_COUNT 0x000000000007ffffULL + +#define RXMAC_HIST_CNT5 0x00140UL +#define RXMAC_HIST_CNT5_COUNT 0x000000000003ffffULL + +#define RXMAC_HIST_CNT6 0x00148UL +#define RXMAC_HIST_CNT6_COUNT 0x000000000000ffffULL + +#define RXMAC_MPSZER_CNT 0x00150UL +#define RXMAC_MPSZER_CNT_COUNT 0x00000000000000ffULL + +#define RXMAC_CRC_ER_CNT 0x00158UL +#define RXMAC_CRC_ER_CNT_COUNT 0x00000000000000ffULL + +#define RXMAC_CD_VIO_CNT 0x00160UL +#define RXMAC_CD_VIO_CNT_COUNT 0x00000000000000ffULL + +#define RXMAC_ALIGN_ERR_CNT 0x00168UL +#define RXMAC_ALIGN_ERR_CNT_COUNT 0x00000000000000ffULL + +#define TXMAC_FRM_CNT 0x00170UL +#define TXMAC_FRM_CNT_COUNT 0x00000000ffffffffULL + +#define TXMAC_BYTE_CNT 0x00178UL +#define TXMAC_BYTE_CNT_COUNT 0x00000000ffffffffULL + +#define LINK_FAULT_CNT 0x00180UL +#define LINK_FAULT_CNT_COUNT 0x00000000000000ffULL + +#define RXMAC_HIST_CNT7 0x00188UL +#define RXMAC_HIST_CNT7_COUNT 0x0000000007ffffffULL + +#define XMAC_SM_REG 0x001a8UL +#define XMAC_SM_REG_STATE 0x00000000ffffffffULL + +#define XMAC_INTER1 0x001b0UL +#define XMAC_INTERN1_SIGNALS1 0x00000000ffffffffULL + +#define XMAC_INTER2 0x001b8UL +#define XMAC_INTERN2_SIGNALS2 0x00000000ffffffffULL + +/* BMAC registers, offset from np->mac_regs */ + +#define BTXMAC_SW_RST 0x00000UL +#define BTXMAC_SW_RST_RESET 0x0000000000000001ULL + +#define BRXMAC_SW_RST 0x00008UL +#define BRXMAC_SW_RST_RESET 0x0000000000000001ULL + +#define BMAC_SEND_PAUSE 0x00010UL +#define BMAC_SEND_PAUSE_SEND 0x0000000000010000ULL +#define BMAC_SEND_PAUSE_TIME 0x000000000000ffffULL + +#define BTXMAC_STATUS 0x00020UL +#define BTXMAC_STATUS_XMIT 0x0000000000000001ULL +#define BTXMAC_STATUS_UNDERRUN 0x0000000000000002ULL +#define BTXMAC_STATUS_MAX_PKT_ERR 0x0000000000000004ULL +#define BTXMAC_STATUS_BYTE_CNT_EXP 0x0000000000000400ULL +#define BTXMAC_STATUS_FRAME_CNT_EXP 0x0000000000000800ULL + +#define BRXMAC_STATUS 0x00028UL +#define BRXMAC_STATUS_RX_PKT 0x0000000000000001ULL +#define BRXMAC_STATUS_OVERFLOW 0x0000000000000002ULL +#define BRXMAC_STATUS_FRAME_CNT_EXP 0x0000000000000004ULL +#define BRXMAC_STATUS_ALIGN_ERR_EXP 0x0000000000000008ULL +#define BRXMAC_STATUS_CRC_ERR_EXP 0x0000000000000010ULL +#define BRXMAC_STATUS_LEN_ERR_EXP 0x0000000000000020ULL + +#define BMAC_CTRL_STATUS 0x00030UL +#define BMAC_CTRL_STATUS_PAUSE_RECV 0x0000000000000001ULL +#define BMAC_CTRL_STATUS_PAUSE 0x0000000000000002ULL +#define BMAC_CTRL_STATUS_NOPAUSE 0x0000000000000004ULL +#define BMAC_CTRL_STATUS_TIME 0x00000000ffff0000ULL +#define BMAC_CTRL_STATUS_TIME_SHIFT 16 + +#define BTXMAC_STATUS_MASK 0x00040UL +#define BRXMAC_STATUS_MASK 0x00048UL +#define BMAC_CTRL_STATUS_MASK 0x00050UL + +#define BTXMAC_CONFIG 0x00060UL +#define BTXMAC_CONFIG_ENABLE 0x0000000000000001ULL +#define BTXMAC_CONFIG_FCS_DISABLE 0x0000000000000002ULL + +#define BRXMAC_CONFIG 0x00068UL +#define BRXMAC_CONFIG_DISCARD_DIS 0x0000000000000080ULL +#define BRXMAC_CONFIG_ADDR_FILT_EN 0x0000000000000040ULL +#define BRXMAC_CONFIG_HASH_FILT_EN 0x0000000000000020ULL +#define BRXMAC_CONFIG_PROMISC_GRP 0x0000000000000010ULL +#define BRXMAC_CONFIG_PROMISC 0x0000000000000008ULL +#define BRXMAC_CONFIG_STRIP_FCS 0x0000000000000004ULL +#define BRXMAC_CONFIG_STRIP_PAD 0x0000000000000002ULL +#define BRXMAC_CONFIG_ENABLE 0x0000000000000001ULL + +#define BMAC_CTRL_CONFIG 0x00070UL +#define BMAC_CTRL_CONFIG_TX_PAUSE_EN 0x0000000000000001ULL +#define BMAC_CTRL_CONFIG_RX_PAUSE_EN 0x0000000000000002ULL +#define BMAC_CTRL_CONFIG_PASS_CTRL 0x0000000000000004ULL + +#define BMAC_XIF_CONFIG 0x00078UL +#define BMAC_XIF_CONFIG_TX_OUTPUT_EN 0x0000000000000001ULL +#define BMAC_XIF_CONFIG_MII_LOOPBACK 0x0000000000000002ULL +#define BMAC_XIF_CONFIG_GMII_MODE 0x0000000000000008ULL +#define BMAC_XIF_CONFIG_LINK_LED 0x0000000000000020ULL +#define BMAC_XIF_CONFIG_LED_POLARITY 0x0000000000000040ULL +#define BMAC_XIF_CONFIG_25MHZ_CLOCK 0x0000000000000080ULL + +#define BMAC_MIN_FRAME 0x000a0UL +#define BMAC_MIN_FRAME_VAL 0x00000000000003ffULL + +#define BMAC_MAX_FRAME 0x000a8UL +#define BMAC_MAX_FRAME_MAX_BURST 0x000000003fff0000ULL +#define BMAC_MAX_FRAME_MAX_BURST_SHIFT 16 +#define BMAC_MAX_FRAME_MAX_FRAME 0x0000000000003fffULL +#define BMAC_MAX_FRAME_MAX_FRAME_SHIFT 0 + +#define BMAC_PREAMBLE_SIZE 0x000b0UL +#define BMAC_PREAMBLE_SIZE_VAL 0x00000000000003ffULL + +#define BMAC_CTRL_TYPE 0x000c8UL + +#define BMAC_ADDR0 0x00100UL +#define BMAC_ADDR0_ADDR0 0x000000000000ffffULL + +#define BMAC_ADDR1 0x00108UL +#define BMAC_ADDR1_ADDR1 0x000000000000ffffULL + +#define BMAC_ADDR2 0x00110UL +#define BMAC_ADDR2_ADDR2 0x000000000000ffffULL + +#define BMAC_NUM_ALT_ADDR 6 + +#define BMAC_ALT_ADDR0(NUM) (0x00118UL + (NUM)*0x18UL) +#define BMAC_ALT_ADDR0_ADDR0 0x000000000000ffffULL + +#define BMAC_ALT_ADDR1(NUM) (0x00120UL + (NUM)*0x18UL) +#define BMAC_ALT_ADDR1_ADDR1 0x000000000000ffffULL + +#define BMAC_ALT_ADDR2(NUM) (0x00128UL + (NUM)*0x18UL) +#define BMAC_ALT_ADDR2_ADDR2 0x000000000000ffffULL + +#define BMAC_FC_ADDR0 0x00268UL +#define BMAC_FC_ADDR0_ADDR0 0x000000000000ffffULL + +#define BMAC_FC_ADDR1 0x00270UL +#define BMAC_FC_ADDR1_ADDR1 0x000000000000ffffULL + +#define BMAC_FC_ADDR2 0x00278UL +#define BMAC_FC_ADDR2_ADDR2 0x000000000000ffffULL + +#define BMAC_ADD_FILT0 0x00298UL +#define BMAC_ADD_FILT0_FILT0 0x000000000000ffffULL + +#define BMAC_ADD_FILT1 0x002a0UL +#define BMAC_ADD_FILT1_FILT1 0x000000000000ffffULL + +#define BMAC_ADD_FILT2 0x002a8UL +#define BMAC_ADD_FILT2_FILT2 0x000000000000ffffULL + +#define BMAC_ADD_FILT12_MASK 0x002b0UL +#define BMAC_ADD_FILT12_MASK_VAL 0x00000000000000ffULL + +#define BMAC_ADD_FILT00_MASK 0x002b8UL +#define BMAC_ADD_FILT00_MASK_VAL 0x000000000000ffffULL + +#define BMAC_HASH_TBL(NUM) (0x002c0UL + (NUM) * 0x8UL) +#define BMAC_HASH_TBL_VAL 0x000000000000ffffULL + +#define BRXMAC_FRAME_CNT 0x00370 +#define BRXMAC_FRAME_CNT_COUNT 0x000000000000ffffULL + +#define BRXMAC_MAX_LEN_ERR_CNT 0x00378 + +#define BRXMAC_ALIGN_ERR_CNT 0x00380 +#define BRXMAC_ALIGN_ERR_CNT_COUNT 0x000000000000ffffULL + +#define BRXMAC_CRC_ERR_CNT 0x00388 +#define BRXMAC_ALIGN_ERR_CNT_COUNT 0x000000000000ffffULL + +#define BRXMAC_CODE_VIOL_ERR_CNT 0x00390 +#define BRXMAC_CODE_VIOL_ERR_CNT_COUNT 0x000000000000ffffULL + +#define BMAC_STATE_MACHINE 0x003a0 + +#define BMAC_ADDR_CMPEN 0x003f8UL +#define BMAC_ADDR_CMPEN_EN15 0x0000000000008000ULL +#define BMAC_ADDR_CMPEN_EN14 0x0000000000004000ULL +#define BMAC_ADDR_CMPEN_EN13 0x0000000000002000ULL +#define BMAC_ADDR_CMPEN_EN12 0x0000000000001000ULL +#define BMAC_ADDR_CMPEN_EN11 0x0000000000000800ULL +#define BMAC_ADDR_CMPEN_EN10 0x0000000000000400ULL +#define BMAC_ADDR_CMPEN_EN9 0x0000000000000200ULL +#define BMAC_ADDR_CMPEN_EN8 0x0000000000000100ULL +#define BMAC_ADDR_CMPEN_EN7 0x0000000000000080ULL +#define BMAC_ADDR_CMPEN_EN6 0x0000000000000040ULL +#define BMAC_ADDR_CMPEN_EN5 0x0000000000000020ULL +#define BMAC_ADDR_CMPEN_EN4 0x0000000000000010ULL +#define BMAC_ADDR_CMPEN_EN3 0x0000000000000008ULL +#define BMAC_ADDR_CMPEN_EN2 0x0000000000000004ULL +#define BMAC_ADDR_CMPEN_EN1 0x0000000000000002ULL +#define BMAC_ADDR_CMPEN_EN0 0x0000000000000001ULL + +#define BMAC_NUM_HOST_INFO 9 + +#define BMAC_HOST_INFO(NUM) (0x00400UL + (NUM) * 0x8UL) + +#define BTXMAC_BYTE_CNT 0x00448UL +#define BTXMAC_BYTE_CNT_COUNT 0x00000000ffffffffULL + +#define BTXMAC_FRM_CNT 0x00450UL +#define BTXMAC_FRM_CNT_COUNT 0x00000000ffffffffULL + +#define BRXMAC_BYTE_CNT 0x00458UL +#define BRXMAC_BYTE_CNT_COUNT 0x00000000ffffffffULL + +#define HOST_INFO_MPR 0x0000000000000100ULL +#define HOST_INFO_MACRDCTBLN 0x0000000000000007ULL + +/* XPCS registers, offset from np->regs + np->xpcs_off */ + +#define XPCS_CONTROL1 (FZC_MAC + 0x00000UL) +#define XPCS_CONTROL1_RESET 0x0000000000008000ULL +#define XPCS_CONTROL1_LOOPBACK 0x0000000000004000ULL +#define XPCS_CONTROL1_SPEED_SELECT3 0x0000000000002000ULL +#define XPCS_CONTROL1_CSR_LOW_PWR 0x0000000000000800ULL +#define XPCS_CONTROL1_CSR_SPEED1 0x0000000000000040ULL +#define XPCS_CONTROL1_CSR_SPEED0 0x000000000000003cULL + +#define XPCS_STATUS1 (FZC_MAC + 0x00008UL) +#define XPCS_STATUS1_CSR_FAULT 0x0000000000000080ULL +#define XPCS_STATUS1_CSR_RXLNK_STAT 0x0000000000000004ULL +#define XPCS_STATUS1_CSR_LPWR_ABLE 0x0000000000000002ULL + +#define XPCS_DEVICE_IDENTIFIER (FZC_MAC + 0x00010UL) +#define XPCS_DEVICE_IDENTIFIER_VAL 0x00000000ffffffffULL + +#define XPCS_SPEED_ABILITY (FZC_MAC + 0x00018UL) +#define XPCS_SPEED_ABILITY_10GIG 0x0000000000000001ULL + +#define XPCS_DEV_IN_PKG (FZC_MAC + 0x00020UL) +#define XPCS_DEV_IN_PKG_CSR_VEND2 0x0000000080000000ULL +#define XPCS_DEV_IN_PKG_CSR_VEND1 0x0000000040000000ULL +#define XPCS_DEV_IN_PKG_DTE_XS 0x0000000000000020ULL +#define XPCS_DEV_IN_PKG_PHY_XS 0x0000000000000010ULL +#define XPCS_DEV_IN_PKG_PCS 0x0000000000000008ULL +#define XPCS_DEV_IN_PKG_WIS 0x0000000000000004ULL +#define XPCS_DEV_IN_PKG_PMD_PMA 0x0000000000000002ULL +#define XPCS_DEV_IN_PKG_CLS22 0x0000000000000001ULL + +#define XPCS_CONTROL2 (FZC_MAC + 0x00028UL) +#define XPCS_CONTROL2_CSR_PSC_SEL 0x0000000000000003ULL + +#define XPCS_STATUS2 (FZC_MAC + 0x00030UL) +#define XPCS_STATUS2_CSR_DEV_PRES 0x000000000000c000ULL +#define XPCS_STATUS2_CSR_TX_FAULT 0x0000000000000800ULL +#define XPCS_STATUS2_CSR_RCV_FAULT 0x0000000000000400ULL +#define XPCS_STATUS2_TEN_GBASE_W 0x0000000000000004ULL +#define XPCS_STATUS2_TEN_GBASE_X 0x0000000000000002ULL +#define XPCS_STATUS2_TEN_GBASE_R 0x0000000000000001ULL + +#define XPCS_PKG_ID (FZC_MAC + 0x00038UL) +#define XPCS_PKG_ID_VAL 0x00000000ffffffffULL + +#define XPCS_STATUS(IDX) (FZC_MAC + 0x00040UL) +#define XPCS_STATUS_CSR_LANE_ALIGN 0x0000000000001000ULL +#define XPCS_STATUS_CSR_PATTEST_CAP 0x0000000000000800ULL +#define XPCS_STATUS_CSR_LANE3_SYNC 0x0000000000000008ULL +#define XPCS_STATUS_CSR_LANE2_SYNC 0x0000000000000004ULL +#define XPCS_STATUS_CSR_LANE1_SYNC 0x0000000000000002ULL +#define XPCS_STATUS_CSR_LANE0_SYNC 0x0000000000000001ULL + +#define XPCS_TEST_CONTROL (FZC_MAC + 0x00048UL) +#define XPCS_TEST_CONTROL_TXTST_EN 0x0000000000000004ULL +#define XPCS_TEST_CONTROL_TPAT_SEL 0x0000000000000003ULL + +#define XPCS_CFG_VENDOR1 (FZC_MAC + 0x00050UL) +#define XPCS_CFG_VENDOR1_DBG_IOTST 0x0000000000000080ULL +#define XPCS_CFG_VENDOR1_DBG_SEL 0x0000000000000078ULL +#define XPCS_CFG_VENDOR1_BYPASS_DET 0x0000000000000004ULL +#define XPCS_CFG_VENDOR1_TXBUF_EN 0x0000000000000002ULL +#define XPCS_CFG_VENDOR1_XPCS_EN 0x0000000000000001ULL + +#define XPCS_DIAG_VENDOR2 (FZC_MAC + 0x00058UL) +#define XPCS_DIAG_VENDOR2_SSM_LANE3 0x0000000001e00000ULL +#define XPCS_DIAG_VENDOR2_SSM_LANE2 0x00000000001e0000ULL +#define XPCS_DIAG_VENDOR2_SSM_LANE1 0x000000000001e000ULL +#define XPCS_DIAG_VENDOR2_SSM_LANE0 0x0000000000001e00ULL +#define XPCS_DIAG_VENDOR2_EBUF_SM 0x00000000000001feULL +#define XPCS_DIAG_VENDOR2_RCV_SM 0x0000000000000001ULL + +#define XPCS_MASK1 (FZC_MAC + 0x00060UL) +#define XPCS_MASK1_FAULT_MASK 0x0000000000000080ULL +#define XPCS_MASK1_RXALIGN_STAT_MSK 0x0000000000000004ULL + +#define XPCS_PKT_COUNT (FZC_MAC + 0x00068UL) +#define XPCS_PKT_COUNT_TX 0x00000000ffff0000ULL +#define XPCS_PKT_COUNT_RX 0x000000000000ffffULL + +#define XPCS_TX_SM (FZC_MAC + 0x00070UL) +#define XPCS_TX_SM_VAL 0x000000000000000fULL + +#define XPCS_DESKEW_ERR_CNT (FZC_MAC + 0x00078UL) +#define XPCS_DESKEW_ERR_CNT_VAL 0x00000000000000ffULL + +#define XPCS_SYMERR_CNT01 (FZC_MAC + 0x00080UL) +#define XPCS_SYMERR_CNT01_LANE1 0x00000000ffff0000ULL +#define XPCS_SYMERR_CNT01_LANE0 0x000000000000ffffULL + +#define XPCS_SYMERR_CNT23 (FZC_MAC + 0x00088UL) +#define XPCS_SYMERR_CNT23_LANE3 0x00000000ffff0000ULL +#define XPCS_SYMERR_CNT23_LANE2 0x000000000000ffffULL + +#define XPCS_TRAINING_VECTOR (FZC_MAC + 0x00090UL) +#define XPCS_TRAINING_VECTOR_VAL 0x00000000ffffffffULL + +/* PCS registers, offset from np->regs + np->pcs_off */ + +#define PCS_MII_CTL (FZC_MAC + 0x00000UL) +#define PCS_MII_CTL_RST 0x0000000000008000ULL +#define PCS_MII_CTL_10_100_SPEED 0x0000000000002000ULL +#define PCS_MII_AUTONEG_EN 0x0000000000001000ULL +#define PCS_MII_PWR_DOWN 0x0000000000000800ULL +#define PCS_MII_ISOLATE 0x0000000000000400ULL +#define PCS_MII_AUTONEG_RESTART 0x0000000000000200ULL +#define PCS_MII_DUPLEX 0x0000000000000100ULL +#define PCS_MII_COLL_TEST 0x0000000000000080ULL +#define PCS_MII_1000MB_SPEED 0x0000000000000040ULL + +#define PCS_MII_STAT (FZC_MAC + 0x00008UL) +#define PCS_MII_STAT_EXT_STATUS 0x0000000000000100ULL +#define PCS_MII_STAT_AUTONEG_DONE 0x0000000000000020ULL +#define PCS_MII_STAT_REMOTE_FAULT 0x0000000000000010ULL +#define PCS_MII_STAT_AUTONEG_ABLE 0x0000000000000008ULL +#define PCS_MII_STAT_LINK_STATUS 0x0000000000000004ULL +#define PCS_MII_STAT_JABBER_DET 0x0000000000000002ULL +#define PCS_MII_STAT_EXT_CAP 0x0000000000000001ULL + +#define PCS_MII_ADV (FZC_MAC + 0x00010UL) +#define PCS_MII_ADV_NEXT_PAGE 0x0000000000008000ULL +#define PCS_MII_ADV_ACK 0x0000000000004000ULL +#define PCS_MII_ADV_REMOTE_FAULT 0x0000000000003000ULL +#define PCS_MII_ADV_ASM_DIR 0x0000000000000100ULL +#define PCS_MII_ADV_PAUSE 0x0000000000000080ULL +#define PCS_MII_ADV_HALF_DUPLEX 0x0000000000000040ULL +#define PCS_MII_ADV_FULL_DUPLEX 0x0000000000000020ULL + +#define PCS_MII_PARTNER (FZC_MAC + 0x00018UL) +#define PCS_MII_PARTNER_NEXT_PAGE 0x0000000000008000ULL +#define PCS_MII_PARTNER_ACK 0x0000000000004000ULL +#define PCS_MII_PARTNER_REMOTE_FAULT 0x0000000000002000ULL +#define PCS_MII_PARTNER_PAUSE 0x0000000000000180ULL +#define PCS_MII_PARTNER_HALF_DUPLEX 0x0000000000000040ULL +#define PCS_MII_PARTNER_FULL_DUPLEX 0x0000000000000020ULL + +#define PCS_CONF (FZC_MAC + 0x00020UL) +#define PCS_CONF_MASK 0x0000000000000040ULL +#define PCS_CONF_10MS_TMR_OVERRIDE 0x0000000000000020ULL +#define PCS_CONF_JITTER_STUDY 0x0000000000000018ULL +#define PCS_CONF_SIGDET_ACTIVE_LOW 0x0000000000000004ULL +#define PCS_CONF_SIGDET_OVERRIDE 0x0000000000000002ULL +#define PCS_CONF_ENABLE 0x0000000000000001ULL + +#define PCS_STATE (FZC_MAC + 0x00028UL) +#define PCS_STATE_D_PARTNER_FAIL 0x0000000020000000ULL +#define PCS_STATE_D_WAIT_C_CODES_ACK 0x0000000010000000ULL +#define PCS_STATE_D_SYNC_LOSS 0x0000000008000000ULL +#define PCS_STATE_D_NO_GOOD_C_CODES 0x0000000004000000ULL +#define PCS_STATE_D_SERDES 0x0000000002000000ULL +#define PCS_STATE_D_BREAKLINK_C_CODES 0x0000000001000000ULL +#define PCS_STATE_L_SIGDET 0x0000000000400000ULL +#define PCS_STATE_L_SYNC_LOSS 0x0000000000200000ULL +#define PCS_STATE_L_C_CODES 0x0000000000100000ULL +#define PCS_STATE_LINK_CFG_STATE 0x000000000001e000ULL +#define PCS_STATE_SEQ_DET_STATE 0x0000000000001800ULL +#define PCS_STATE_WORD_SYNC_STATE 0x0000000000000700ULL +#define PCS_STATE_NO_IDLE 0x000000000000000fULL + +#define PCS_INTERRUPT (FZC_MAC + 0x00030UL) +#define PCS_INTERRUPT_LSTATUS 0x0000000000000004ULL + +#define PCS_DPATH_MODE (FZC_MAC + 0x000a0UL) +#define PCS_DPATH_MODE_PCS 0x0000000000000000ULL +#define PCS_DPATH_MODE_MII 0x0000000000000002ULL +#define PCS_DPATH_MODE_LINKUP_F_ENAB 0x0000000000000001ULL + +#define PCS_PKT_CNT (FZC_MAC + 0x000c0UL) +#define PCS_PKT_CNT_RX 0x0000000007ff0000ULL +#define PCS_PKT_CNT_TX 0x00000000000007ffULL + +#define MIF_BB_MDC (FZC_MAC + 0x16000UL) +#define MIF_BB_MDC_CLK 0x0000000000000001ULL + +#define MIF_BB_MDO (FZC_MAC + 0x16008UL) +#define MIF_BB_MDO_DAT 0x0000000000000001ULL + +#define MIF_BB_MDO_EN (FZC_MAC + 0x16010UL) +#define MIF_BB_MDO_EN_VAL 0x0000000000000001ULL + +#define MIF_FRAME_OUTPUT (FZC_MAC + 0x16018UL) +#define MIF_FRAME_OUTPUT_ST 0x00000000c0000000ULL +#define MIF_FRAME_OUTPUT_ST_SHIFT 30 +#define MIF_FRAME_OUTPUT_OP_ADDR 0x0000000000000000ULL +#define MIF_FRAME_OUTPUT_OP_WRITE 0x0000000010000000ULL +#define MIF_FRAME_OUTPUT_OP_READ_INC 0x0000000020000000ULL +#define MIF_FRAME_OUTPUT_OP_READ 0x0000000030000000ULL +#define MIF_FRAME_OUTPUT_OP_SHIFT 28 +#define MIF_FRAME_OUTPUT_PORT 0x000000000f800000ULL +#define MIF_FRAME_OUTPUT_PORT_SHIFT 23 +#define MIF_FRAME_OUTPUT_REG 0x00000000007c0000ULL +#define MIF_FRAME_OUTPUT_REG_SHIFT 18 +#define MIF_FRAME_OUTPUT_TA 0x0000000000030000ULL +#define MIF_FRAME_OUTPUT_TA_SHIFT 16 +#define MIF_FRAME_OUTPUT_DATA 0x000000000000ffffULL +#define MIF_FRAME_OUTPUT_DATA_SHIFT 0 + +#define MDIO_ADDR_OP(port, dev, reg) \ + ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \ + MIF_FRAME_OUTPUT_OP_ADDR | \ + (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \ + (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \ + (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \ + (reg << MIF_FRAME_OUTPUT_DATA_SHIFT)) + +#define MDIO_READ_OP(port, dev) \ + ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \ + MIF_FRAME_OUTPUT_OP_READ | \ + (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \ + (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \ + (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT)) + +#define MDIO_WRITE_OP(port, dev, data) \ + ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \ + MIF_FRAME_OUTPUT_OP_WRITE | \ + (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \ + (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \ + (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \ + (data << MIF_FRAME_OUTPUT_DATA_SHIFT)) + +#define MII_READ_OP(port, reg) \ + ((1 << MIF_FRAME_OUTPUT_ST_SHIFT) | \ + (2 << MIF_FRAME_OUTPUT_OP_SHIFT) | \ + (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \ + (reg << MIF_FRAME_OUTPUT_REG_SHIFT) | \ + (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT)) + +#define MII_WRITE_OP(port, reg, data) \ + ((1 << MIF_FRAME_OUTPUT_ST_SHIFT) | \ + (1 << MIF_FRAME_OUTPUT_OP_SHIFT) | \ + (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \ + (reg << MIF_FRAME_OUTPUT_REG_SHIFT) | \ + (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \ + (data << MIF_FRAME_OUTPUT_DATA_SHIFT)) + +#define MIF_CONFIG (FZC_MAC + 0x16020UL) +#define MIF_CONFIG_ATCA_GE 0x0000000000010000ULL +#define MIF_CONFIG_INDIRECT_MODE 0x0000000000008000ULL +#define MIF_CONFIG_POLL_PRT_PHYADDR 0x0000000000003c00ULL +#define MIF_CONFIG_POLL_DEV_REG_ADDR 0x00000000000003e0ULL +#define MIF_CONFIG_BB_MODE 0x0000000000000010ULL +#define MIF_CONFIG_POLL_EN 0x0000000000000008ULL +#define MIF_CONFIG_BB_SER_SEL 0x0000000000000006ULL +#define MIF_CONFIG_MANUAL_MODE 0x0000000000000001ULL + +#define MIF_POLL_STATUS (FZC_MAC + 0x16028UL) +#define MIF_POLL_STATUS_DATA 0x00000000ffff0000ULL +#define MIF_POLL_STATUS_STAT 0x000000000000ffffULL + +#define MIF_POLL_MASK (FZC_MAC + 0x16030UL) +#define MIF_POLL_MASK_VAL 0x000000000000ffffULL + +#define MIF_SM (FZC_MAC + 0x16038UL) +#define MIF_SM_PORT_ADDR 0x00000000001f0000ULL +#define MIF_SM_MDI_1 0x0000000000004000ULL +#define MIF_SM_MDI_0 0x0000000000002400ULL +#define MIF_SM_MDCLK 0x0000000000001000ULL +#define MIF_SM_MDO_EN 0x0000000000000800ULL +#define MIF_SM_MDO 0x0000000000000400ULL +#define MIF_SM_MDI 0x0000000000000200ULL +#define MIF_SM_CTL 0x00000000000001c0ULL +#define MIF_SM_EX 0x000000000000003fULL + +#define MIF_STATUS (FZC_MAC + 0x16040UL) +#define MIF_STATUS_MDINT1 0x0000000000000020ULL +#define MIF_STATUS_MDINT0 0x0000000000000010ULL + +#define MIF_MASK (FZC_MAC + 0x16048UL) +#define MIF_MASK_MDINT1 0x0000000000000020ULL +#define MIF_MASK_MDINT0 0x0000000000000010ULL +#define MIF_MASK_PEU_ERR 0x0000000000000008ULL +#define MIF_MASK_YC 0x0000000000000004ULL +#define MIF_MASK_XGE_ERR0 0x0000000000000002ULL +#define MIF_MASK_MIF_INIT_DONE 0x0000000000000001ULL + +#define ENET_SERDES_RESET (FZC_MAC + 0x14000UL) +#define ENET_SERDES_RESET_1 0x0000000000000002ULL +#define ENET_SERDES_RESET_0 0x0000000000000001ULL + +#define ENET_SERDES_CFG (FZC_MAC + 0x14008UL) +#define ENET_SERDES_BE_LOOPBACK 0x0000000000000002ULL +#define ENET_SERDES_CFG_FORCE_RDY 0x0000000000000001ULL + +#define ENET_SERDES_0_PLL_CFG (FZC_MAC + 0x14010UL) +#define ENET_SERDES_PLL_FBDIV0 0x0000000000000001ULL +#define ENET_SERDES_PLL_FBDIV1 0x0000000000000002ULL +#define ENET_SERDES_PLL_FBDIV2 0x0000000000000004ULL +#define ENET_SERDES_PLL_HRATE0 0x0000000000000008ULL +#define ENET_SERDES_PLL_HRATE1 0x0000000000000010ULL +#define ENET_SERDES_PLL_HRATE2 0x0000000000000020ULL +#define ENET_SERDES_PLL_HRATE3 0x0000000000000040ULL + +#define ENET_SERDES_0_CTRL_CFG (FZC_MAC + 0x14018UL) +#define ENET_SERDES_CTRL_SDET_0 0x0000000000000001ULL +#define ENET_SERDES_CTRL_SDET_1 0x0000000000000002ULL +#define ENET_SERDES_CTRL_SDET_2 0x0000000000000004ULL +#define ENET_SERDES_CTRL_SDET_3 0x0000000000000008ULL +#define ENET_SERDES_CTRL_EMPH_0 0x0000000000000070ULL +#define ENET_SERDES_CTRL_EMPH_0_SHIFT 4 +#define ENET_SERDES_CTRL_EMPH_1 0x0000000000000380ULL +#define ENET_SERDES_CTRL_EMPH_1_SHIFT 7 +#define ENET_SERDES_CTRL_EMPH_2 0x0000000000001c00ULL +#define ENET_SERDES_CTRL_EMPH_2_SHIFT 10 +#define ENET_SERDES_CTRL_EMPH_3 0x000000000000e000ULL +#define ENET_SERDES_CTRL_EMPH_3_SHIFT 13 +#define ENET_SERDES_CTRL_LADJ_0 0x0000000000070000ULL +#define ENET_SERDES_CTRL_LADJ_0_SHIFT 16 +#define ENET_SERDES_CTRL_LADJ_1 0x0000000000380000ULL +#define ENET_SERDES_CTRL_LADJ_1_SHIFT 19 +#define ENET_SERDES_CTRL_LADJ_2 0x0000000001c00000ULL +#define ENET_SERDES_CTRL_LADJ_2_SHIFT 22 +#define ENET_SERDES_CTRL_LADJ_3 0x000000000e000000ULL +#define ENET_SERDES_CTRL_LADJ_3_SHIFT 25 +#define ENET_SERDES_CTRL_RXITERM_0 0x0000000010000000ULL +#define ENET_SERDES_CTRL_RXITERM_1 0x0000000020000000ULL +#define ENET_SERDES_CTRL_RXITERM_2 0x0000000040000000ULL +#define ENET_SERDES_CTRL_RXITERM_3 0x0000000080000000ULL + +#define ENET_SERDES_0_TEST_CFG (FZC_MAC + 0x14020UL) +#define ENET_SERDES_TEST_MD_0 0x0000000000000003ULL +#define ENET_SERDES_TEST_MD_0_SHIFT 0 +#define ENET_SERDES_TEST_MD_1 0x000000000000000cULL +#define ENET_SERDES_TEST_MD_1_SHIFT 2 +#define ENET_SERDES_TEST_MD_2 0x0000000000000030ULL +#define ENET_SERDES_TEST_MD_2_SHIFT 4 +#define ENET_SERDES_TEST_MD_3 0x00000000000000c0ULL +#define ENET_SERDES_TEST_MD_3_SHIFT 6 + +#define ENET_TEST_MD_NO_LOOPBACK 0x0 +#define ENET_TEST_MD_EWRAP 0x1 +#define ENET_TEST_MD_PAD_LOOPBACK 0x2 +#define ENET_TEST_MD_REV_LOOPBACK 0x3 + +#define ENET_SERDES_1_PLL_CFG (FZC_MAC + 0x14028UL) +#define ENET_SERDES_1_CTRL_CFG (FZC_MAC + 0x14030UL) +#define ENET_SERDES_1_TEST_CFG (FZC_MAC + 0x14038UL) + +#define ENET_RGMII_CFG_REG (FZC_MAC + 0x14040UL) + +#define ESR_INT_SIGNALS (FZC_MAC + 0x14800UL) +#define ESR_INT_SIGNALS_ALL 0x00000000ffffffffULL +#define ESR_INT_SIGNALS_P0_BITS 0x0000000033e0000fULL +#define ESR_INT_SIGNALS_P1_BITS 0x000000000c1f00f0ULL +#define ESR_INT_SRDY0_P0 0x0000000020000000ULL +#define ESR_INT_DET0_P0 0x0000000010000000ULL +#define ESR_INT_SRDY0_P1 0x0000000008000000ULL +#define ESR_INT_DET0_P1 0x0000000004000000ULL +#define ESR_INT_XSRDY_P0 0x0000000002000000ULL +#define ESR_INT_XDP_P0_CH3 0x0000000001000000ULL +#define ESR_INT_XDP_P0_CH2 0x0000000000800000ULL +#define ESR_INT_XDP_P0_CH1 0x0000000000400000ULL +#define ESR_INT_XDP_P0_CH0 0x0000000000200000ULL +#define ESR_INT_XSRDY_P1 0x0000000000100000ULL +#define ESR_INT_XDP_P1_CH3 0x0000000000080000ULL +#define ESR_INT_XDP_P1_CH2 0x0000000000040000ULL +#define ESR_INT_XDP_P1_CH1 0x0000000000020000ULL +#define ESR_INT_XDP_P1_CH0 0x0000000000010000ULL +#define ESR_INT_SLOSS_P1_CH3 0x0000000000000080ULL +#define ESR_INT_SLOSS_P1_CH2 0x0000000000000040ULL +#define ESR_INT_SLOSS_P1_CH1 0x0000000000000020ULL +#define ESR_INT_SLOSS_P1_CH0 0x0000000000000010ULL +#define ESR_INT_SLOSS_P0_CH3 0x0000000000000008ULL +#define ESR_INT_SLOSS_P0_CH2 0x0000000000000004ULL +#define ESR_INT_SLOSS_P0_CH1 0x0000000000000002ULL +#define ESR_INT_SLOSS_P0_CH0 0x0000000000000001ULL + +#define ESR_DEBUG_SEL (FZC_MAC + 0x14808UL) +#define ESR_DEBUG_SEL_VAL 0x000000000000003fULL + +/* SerDes registers behind MIF */ +#define NIU_ESR_DEV_ADDR 0x1e +#define ESR_BASE 0x0000 + +#define ESR_RXTX_COMM_CTRL_L (ESR_BASE + 0x0000) +#define ESR_RXTX_COMM_CTRL_H (ESR_BASE + 0x0001) + +#define ESR_RXTX_RESET_CTRL_L (ESR_BASE + 0x0002) +#define ESR_RXTX_RESET_CTRL_H (ESR_BASE + 0x0003) + +#define ESR_RX_POWER_CTRL_L (ESR_BASE + 0x0004) +#define ESR_RX_POWER_CTRL_H (ESR_BASE + 0x0005) + +#define ESR_TX_POWER_CTRL_L (ESR_BASE + 0x0006) +#define ESR_TX_POWER_CTRL_H (ESR_BASE + 0x0007) + +#define ESR_MISC_POWER_CTRL_L (ESR_BASE + 0x0008) +#define ESR_MISC_POWER_CTRL_H (ESR_BASE + 0x0009) + +#define ESR_RXTX_CTRL_L(CHAN) (ESR_BASE + 0x0080 + (CHAN) * 0x10) +#define ESR_RXTX_CTRL_H(CHAN) (ESR_BASE + 0x0081 + (CHAN) * 0x10) +#define ESR_RXTX_CTRL_BIASCNTL 0x80000000 +#define ESR_RXTX_CTRL_RESV1 0x7c000000 +#define ESR_RXTX_CTRL_TDENFIFO 0x02000000 +#define ESR_RXTX_CTRL_TDWS20 0x01000000 +#define ESR_RXTX_CTRL_VMUXLO 0x00c00000 +#define ESR_RXTX_CTRL_VMUXLO_SHIFT 22 +#define ESR_RXTX_CTRL_VPULSELO 0x00300000 +#define ESR_RXTX_CTRL_VPULSELO_SHIFT 20 +#define ESR_RXTX_CTRL_RESV2 0x000f0000 +#define ESR_RXTX_CTRL_RESV3 0x0000c000 +#define ESR_RXTX_CTRL_RXPRESWIN 0x00003000 +#define ESR_RXTX_CTRL_RXPRESWIN_SHIFT 12 +#define ESR_RXTX_CTRL_RESV4 0x00000800 +#define ESR_RXTX_CTRL_RISEFALL 0x00000700 +#define ESR_RXTX_CTRL_RISEFALL_SHIFT 8 +#define ESR_RXTX_CTRL_RESV5 0x000000fe +#define ESR_RXTX_CTRL_ENSTRETCH 0x00000001 + +#define ESR_RXTX_TUNING_L(CHAN) (ESR_BASE + 0x0082 + (CHAN) * 0x10) +#define ESR_RXTX_TUNING_H(CHAN) (ESR_BASE + 0x0083 + (CHAN) * 0x10) + +#define ESR_RX_SYNCCHAR_L(CHAN) (ESR_BASE + 0x0084 + (CHAN) * 0x10) +#define ESR_RX_SYNCCHAR_H(CHAN) (ESR_BASE + 0x0085 + (CHAN) * 0x10) + +#define ESR_RXTX_TEST_L(CHAN) (ESR_BASE + 0x0086 + (CHAN) * 0x10) +#define ESR_RXTX_TEST_H(CHAN) (ESR_BASE + 0x0087 + (CHAN) * 0x10) + +#define ESR_GLUE_CTRL0_L(CHAN) (ESR_BASE + 0x0088 + (CHAN) * 0x10) +#define ESR_GLUE_CTRL0_H(CHAN) (ESR_BASE + 0x0089 + (CHAN) * 0x10) +#define ESR_GLUE_CTRL0_RESV1 0xf8000000 +#define ESR_GLUE_CTRL0_BLTIME 0x07000000 +#define ESR_GLUE_CTRL0_BLTIME_SHIFT 24 +#define ESR_GLUE_CTRL0_RESV2 0x00ff0000 +#define ESR_GLUE_CTRL0_RXLOS_TEST 0x00008000 +#define ESR_GLUE_CTRL0_RESV3 0x00004000 +#define ESR_GLUE_CTRL0_RXLOSENAB 0x00002000 +#define ESR_GLUE_CTRL0_FASTRESYNC 0x00001000 +#define ESR_GLUE_CTRL0_SRATE 0x00000f00 +#define ESR_GLUE_CTRL0_SRATE_SHIFT 8 +#define ESR_GLUE_CTRL0_THCNT 0x000000ff +#define ESR_GLUE_CTRL0_THCNT_SHIFT 0 + +#define BLTIME_64_CYCLES 0 +#define BLTIME_128_CYCLES 1 +#define BLTIME_256_CYCLES 2 +#define BLTIME_300_CYCLES 3 +#define BLTIME_384_CYCLES 4 +#define BLTIME_512_CYCLES 5 +#define BLTIME_1024_CYCLES 6 +#define BLTIME_2048_CYCLES 7 + +#define ESR_GLUE_CTRL1_L(CHAN) (ESR_BASE + 0x008a + (CHAN) * 0x10) +#define ESR_GLUE_CTRL1_H(CHAN) (ESR_BASE + 0x008b + (CHAN) * 0x10) +#define ESR_RXTX_TUNING1_L(CHAN) (ESR_BASE + 0x00c2 + (CHAN) * 0x10) +#define ESR_RXTX_TUNING1_H(CHAN) (ESR_BASE + 0x00c2 + (CHAN) * 0x10) +#define ESR_RXTX_TUNING2_L(CHAN) (ESR_BASE + 0x0102 + (CHAN) * 0x10) +#define ESR_RXTX_TUNING2_H(CHAN) (ESR_BASE + 0x0102 + (CHAN) * 0x10) +#define ESR_RXTX_TUNING3_L(CHAN) (ESR_BASE + 0x0142 + (CHAN) * 0x10) +#define ESR_RXTX_TUNING3_H(CHAN) (ESR_BASE + 0x0142 + (CHAN) * 0x10) + +#define NIU_ESR2_DEV_ADDR 0x1e +#define ESR2_BASE 0x8000 + +#define ESR2_TI_PLL_CFG_L (ESR2_BASE + 0x000) +#define ESR2_TI_PLL_CFG_H (ESR2_BASE + 0x001) +#define PLL_CFG_STD 0x00000c00 +#define PLL_CFG_STD_SHIFT 10 +#define PLL_CFG_LD 0x00000300 +#define PLL_CFG_LD_SHIFT 8 +#define PLL_CFG_MPY 0x0000001e +#define PLL_CFG_MPY_SHIFT 1 +#define PLL_CFG_MPY_4X 0x0 +#define PLL_CFG_MPY_5X 0x00000002 +#define PLL_CFG_MPY_6X 0x00000004 +#define PLL_CFG_MPY_8X 0x00000008 +#define PLL_CFG_MPY_10X 0x0000000a +#define PLL_CFG_MPY_12X 0x0000000c +#define PLL_CFG_MPY_12P5X 0x0000000e +#define PLL_CFG_ENPLL 0x00000001 + +#define ESR2_TI_PLL_STS_L (ESR2_BASE + 0x002) +#define ESR2_TI_PLL_STS_H (ESR2_BASE + 0x003) +#define PLL_STS_LOCK 0x00000001 + +#define ESR2_TI_PLL_TEST_CFG_L (ESR2_BASE + 0x004) +#define ESR2_TI_PLL_TEST_CFG_H (ESR2_BASE + 0x005) +#define PLL_TEST_INVPATT 0x00004000 +#define PLL_TEST_RATE 0x00003000 +#define PLL_TEST_RATE_SHIFT 12 +#define PLL_TEST_CFG_ENBSAC 0x00000400 +#define PLL_TEST_CFG_ENBSRX 0x00000200 +#define PLL_TEST_CFG_ENBSTX 0x00000100 +#define PLL_TEST_CFG_LOOPBACK_PAD 0x00000040 +#define PLL_TEST_CFG_LOOPBACK_CML_DIS 0x00000080 +#define PLL_TEST_CFG_LOOPBACK_CML_EN 0x000000c0 +#define PLL_TEST_CFG_CLKBYP 0x00000030 +#define PLL_TEST_CFG_CLKBYP_SHIFT 4 +#define PLL_TEST_CFG_EN_RXPATT 0x00000008 +#define PLL_TEST_CFG_EN_TXPATT 0x00000004 +#define PLL_TEST_CFG_TPATT 0x00000003 +#define PLL_TEST_CFG_TPATT_SHIFT 0 + +#define ESR2_TI_PLL_TX_CFG_L(CHAN) (ESR2_BASE + 0x100 + (CHAN) * 4) +#define ESR2_TI_PLL_TX_CFG_H(CHAN) (ESR2_BASE + 0x101 + (CHAN) * 4) +#define PLL_TX_CFG_RDTCT 0x00600000 +#define PLL_TX_CFG_RDTCT_SHIFT 21 +#define PLL_TX_CFG_ENIDL 0x00100000 +#define PLL_TX_CFG_BSTX 0x00020000 +#define PLL_TX_CFG_ENFTP 0x00010000 +#define PLL_TX_CFG_DE 0x0000f000 +#define PLL_TX_CFG_DE_SHIFT 12 +#define PLL_TX_CFG_SWING_125MV 0x00000000 +#define PLL_TX_CFG_SWING_250MV 0x00000200 +#define PLL_TX_CFG_SWING_500MV 0x00000400 +#define PLL_TX_CFG_SWING_625MV 0x00000600 +#define PLL_TX_CFG_SWING_750MV 0x00000800 +#define PLL_TX_CFG_SWING_1000MV 0x00000a00 +#define PLL_TX_CFG_SWING_1250MV 0x00000c00 +#define PLL_TX_CFG_SWING_1375MV 0x00000e00 +#define PLL_TX_CFG_CM 0x00000100 +#define PLL_TX_CFG_INVPAIR 0x00000080 +#define PLL_TX_CFG_RATE 0x00000060 +#define PLL_TX_CFG_RATE_SHIFT 5 +#define PLL_TX_CFG_RATE_FULL 0x0 +#define PLL_TX_CFG_RATE_HALF 0x20 +#define PLL_TX_CFG_RATE_QUAD 0x40 +#define PLL_TX_CFG_BUSWIDTH 0x0000001c +#define PLL_TX_CFG_BUSWIDTH_SHIFT 2 +#define PLL_TX_CFG_ENTEST 0x00000002 +#define PLL_TX_CFG_ENTX 0x00000001 + +#define ESR2_TI_PLL_TX_STS_L(CHAN) (ESR2_BASE + 0x102 + (CHAN) * 4) +#define ESR2_TI_PLL_TX_STS_H(CHAN) (ESR2_BASE + 0x103 + (CHAN) * 4) +#define PLL_TX_STS_RDTCTIP 0x00000002 +#define PLL_TX_STS_TESTFAIL 0x00000001 + +#define ESR2_TI_PLL_RX_CFG_L(CHAN) (ESR2_BASE + 0x120 + (CHAN) * 4) +#define ESR2_TI_PLL_RX_CFG_H(CHAN) (ESR2_BASE + 0x121 + (CHAN) * 4) +#define PLL_RX_CFG_BSINRXN 0x02000000 +#define PLL_RX_CFG_BSINRXP 0x01000000 +#define PLL_RX_CFG_EQ_MAX_LF 0x00000000 +#define PLL_RX_CFG_EQ_LP_ADAPTIVE 0x00080000 +#define PLL_RX_CFG_EQ_LP_1084MHZ 0x00400000 +#define PLL_RX_CFG_EQ_LP_805MHZ 0x00480000 +#define PLL_RX_CFG_EQ_LP_573MHZ 0x00500000 +#define PLL_RX_CFG_EQ_LP_402MHZ 0x00580000 +#define PLL_RX_CFG_EQ_LP_304MHZ 0x00600000 +#define PLL_RX_CFG_EQ_LP_216MHZ 0x00680000 +#define PLL_RX_CFG_EQ_LP_156MHZ 0x00700000 +#define PLL_RX_CFG_EQ_LP_135MHZ 0x00780000 +#define PLL_RX_CFG_EQ_SHIFT 19 +#define PLL_RX_CFG_CDR 0x00070000 +#define PLL_RX_CFG_CDR_SHIFT 16 +#define PLL_RX_CFG_LOS_DIS 0x00000000 +#define PLL_RX_CFG_LOS_HTHRESH 0x00004000 +#define PLL_RX_CFG_LOS_LTHRESH 0x00008000 +#define PLL_RX_CFG_ALIGN_DIS 0x00000000 +#define PLL_RX_CFG_ALIGN_ENA 0x00001000 +#define PLL_RX_CFG_ALIGN_JOG 0x00002000 +#define PLL_RX_CFG_TERM_VDDT 0x00000000 +#define PLL_RX_CFG_TERM_0P8VDDT 0x00000100 +#define PLL_RX_CFG_TERM_FLOAT 0x00000300 +#define PLL_RX_CFG_INVPAIR 0x00000080 +#define PLL_RX_CFG_RATE 0x00000060 +#define PLL_RX_CFG_RATE_SHIFT 5 +#define PLL_RX_CFG_RATE_FULL 0x0 +#define PLL_RX_CFG_RATE_HALF 0x20 +#define PLL_RX_CFG_RATE_QUAD 0x40 +#define PLL_RX_CFG_BUSWIDTH 0x0000001c +#define PLL_RX_CFG_BUSWIDTH_SHIFT 2 +#define PLL_RX_CFG_ENTEST 0x00000002 +#define PLL_RX_CFG_ENRX 0x00000001 + +#define ESR2_TI_PLL_RX_STS_L(CHAN) (ESR2_BASE + 0x122 + (CHAN) * 4) +#define ESR2_TI_PLL_RX_STS_H(CHAN) (ESR2_BASE + 0x123 + (CHAN) * 4) +#define PLL_RX_STS_CRCIDTCT 0x00000200 +#define PLL_RX_STS_CWDTCT 0x00000100 +#define PLL_RX_STS_BSRXN 0x00000020 +#define PLL_RX_STS_BSRXP 0x00000010 +#define PLL_RX_STS_LOSDTCT 0x00000008 +#define PLL_RX_STS_ODDCG 0x00000004 +#define PLL_RX_STS_SYNC 0x00000002 +#define PLL_RX_STS_TESTFAIL 0x00000001 + +#define ENET_VLAN_TBL(IDX) (FZC_FFLP + 0x00000UL + (IDX) * 8UL) +#define ENET_VLAN_TBL_PARITY1 0x0000000000020000ULL +#define ENET_VLAN_TBL_PARITY0 0x0000000000010000ULL +#define ENET_VLAN_TBL_VPR 0x0000000000000008ULL +#define ENET_VLAN_TBL_VLANRDCTBLN 0x0000000000000007ULL +#define ENET_VLAN_TBL_SHIFT(PORT) ((PORT) * 4) + +#define ENET_VLAN_TBL_NUM_ENTRIES 4096 + +#define FFLP_VLAN_PAR_ERR (FZC_FFLP + 0x0800UL) +#define FFLP_VLAN_PAR_ERR_ERR 0x0000000080000000ULL +#define FFLP_VLAN_PAR_ERR_M_ERR 0x0000000040000000ULL +#define FFLP_VLAN_PAR_ERR_ADDR 0x000000003ffc0000ULL +#define FFLP_VLAN_PAR_ERR_DATA 0x000000000003ffffULL + +#define L2_CLS(IDX) (FZC_FFLP + 0x20000UL + (IDX) * 8UL) +#define L2_CLS_VLD 0x0000000000010000ULL +#define L2_CLS_ETYPE 0x000000000000ffffULL +#define L2_CLS_ETYPE_SHIFT 0 + +#define L3_CLS(IDX) (FZC_FFLP + 0x20010UL + (IDX) * 8UL) +#define L3_CLS_VALID 0x0000000002000000ULL +#define L3_CLS_IPVER 0x0000000001000000ULL +#define L3_CLS_PID 0x0000000000ff0000ULL +#define L3_CLS_PID_SHIFT 16 +#define L3_CLS_TOSMASK 0x000000000000ff00ULL +#define L3_CLS_TOSMASK_SHIFT 8 +#define L3_CLS_TOS 0x00000000000000ffULL +#define L3_CLS_TOS_SHIFT 0 + +#define TCAM_KEY(IDX) (FZC_FFLP + 0x20030UL + (IDX) * 8UL) +#define TCAM_KEY_DISC 0x0000000000000008ULL +#define TCAM_KEY_TSEL 0x0000000000000004ULL +#define TCAM_KEY_IPADDR 0x0000000000000001ULL + +#define TCAM_KEY_0 (FZC_FFLP + 0x20090UL) +#define TCAM_KEY_0_KEY 0x00000000000000ffULL /* bits 192-199 */ + +#define TCAM_KEY_1 (FZC_FFLP + 0x20098UL) +#define TCAM_KEY_1_KEY 0xffffffffffffffffULL /* bits 128-191 */ + +#define TCAM_KEY_2 (FZC_FFLP + 0x200a0UL) +#define TCAM_KEY_2_KEY 0xffffffffffffffffULL /* bits 64-127 */ + +#define TCAM_KEY_3 (FZC_FFLP + 0x200a8UL) +#define TCAM_KEY_3_KEY 0xffffffffffffffffULL /* bits 0-63 */ + +#define TCAM_KEY_MASK_0 (FZC_FFLP + 0x200b0UL) +#define TCAM_KEY_MASK_0_KEY_SEL 0x00000000000000ffULL /* bits 192-199 */ + +#define TCAM_KEY_MASK_1 (FZC_FFLP + 0x200b8UL) +#define TCAM_KEY_MASK_1_KEY_SEL 0xffffffffffffffffULL /* bits 128-191 */ + +#define TCAM_KEY_MASK_2 (FZC_FFLP + 0x200c0UL) +#define TCAM_KEY_MASK_2_KEY_SEL 0xffffffffffffffffULL /* bits 64-127 */ + +#define TCAM_KEY_MASK_3 (FZC_FFLP + 0x200c8UL) +#define TCAM_KEY_MASK_3_KEY_SEL 0xffffffffffffffffULL /* bits 0-63 */ + +#define TCAM_CTL (FZC_FFLP + 0x200d0UL) +#define TCAM_CTL_RWC 0x00000000001c0000ULL +#define TCAM_CTL_RWC_TCAM_WRITE 0x0000000000000000ULL +#define TCAM_CTL_RWC_TCAM_READ 0x0000000000040000ULL +#define TCAM_CTL_RWC_TCAM_COMPARE 0x0000000000080000ULL +#define TCAM_CTL_RWC_RAM_WRITE 0x0000000000100000ULL +#define TCAM_CTL_RWC_RAM_READ 0x0000000000140000ULL +#define TCAM_CTL_STAT 0x0000000000020000ULL +#define TCAM_CTL_MATCH 0x0000000000010000ULL +#define TCAM_CTL_LOC 0x00000000000003ffULL + +#define TCAM_ERR (FZC_FFLP + 0x200d8UL) +#define TCAM_ERR_ERR 0x0000000080000000ULL +#define TCAM_ERR_P_ECC 0x0000000040000000ULL +#define TCAM_ERR_MULT 0x0000000020000000ULL +#define TCAM_ERR_ADDR 0x0000000000ff0000ULL +#define TCAM_ERR_SYNDROME 0x000000000000ffffULL + +#define HASH_LOOKUP_ERR_LOG1 (FZC_FFLP + 0x200e0UL) +#define HASH_LOOKUP_ERR_LOG1_ERR 0x0000000000000008ULL +#define HASH_LOOKUP_ERR_LOG1_MULT_LK 0x0000000000000004ULL +#define HASH_LOOKUP_ERR_LOG1_CU 0x0000000000000002ULL +#define HASH_LOOKUP_ERR_LOG1_MULT_BIT 0x0000000000000001ULL + +#define HASH_LOOKUP_ERR_LOG2 (FZC_FFLP + 0x200e8UL) +#define HASH_LOOKUP_ERR_LOG2_H1 0x000000007ffff800ULL +#define HASH_LOOKUP_ERR_LOG2_SUBAREA 0x0000000000000700ULL +#define HASH_LOOKUP_ERR_LOG2_SYNDROME 0x00000000000000ffULL + +#define FFLP_CFG_1 (FZC_FFLP + 0x20100UL) +#define FFLP_CFG_1_TCAM_DIS 0x0000000004000000ULL +#define FFLP_CFG_1_PIO_DBG_SEL 0x0000000003800000ULL +#define FFLP_CFG_1_PIO_FIO_RST 0x0000000000400000ULL +#define FFLP_CFG_1_PIO_FIO_LAT 0x0000000000300000ULL +#define FFLP_CFG_1_CAMLAT 0x00000000000f0000ULL +#define FFLP_CFG_1_CAMLAT_SHIFT 16 +#define FFLP_CFG_1_CAMRATIO 0x000000000000f000ULL +#define FFLP_CFG_1_CAMRATIO_SHIFT 12 +#define FFLP_CFG_1_FCRAMRATIO 0x0000000000000f00ULL +#define FFLP_CFG_1_FCRAMRATIO_SHIFT 8 +#define FFLP_CFG_1_FCRAMOUTDR_MASK 0x00000000000000f0ULL +#define FFLP_CFG_1_FCRAMOUTDR_NORMAL 0x0000000000000000ULL +#define FFLP_CFG_1_FCRAMOUTDR_STRONG 0x0000000000000050ULL +#define FFLP_CFG_1_FCRAMOUTDR_WEAK 0x00000000000000a0ULL +#define FFLP_CFG_1_FCRAMQS 0x0000000000000008ULL +#define FFLP_CFG_1_ERRORDIS 0x0000000000000004ULL +#define FFLP_CFG_1_FFLPINITDONE 0x0000000000000002ULL +#define FFLP_CFG_1_LLCSNAP 0x0000000000000001ULL + +#define DEFAULT_FCRAMRATIO 10 + +#define DEFAULT_TCAM_LATENCY 4 +#define DEFAULT_TCAM_ACCESS_RATIO 10 + +#define TCP_CFLAG_MSK (FZC_FFLP + 0x20108UL) +#define TCP_CFLAG_MSK_MASK 0x0000000000000fffULL + +#define FCRAM_REF_TMR (FZC_FFLP + 0x20110UL) +#define FCRAM_REF_TMR_MAX 0x00000000ffff0000ULL +#define FCRAM_REF_TMR_MAX_SHIFT 16 +#define FCRAM_REF_TMR_MIN 0x000000000000ffffULL +#define FCRAM_REF_TMR_MIN_SHIFT 0 + +#define DEFAULT_FCRAM_REFRESH_MAX 512 +#define DEFAULT_FCRAM_REFRESH_MIN 512 + +#define FCRAM_FIO_ADDR (FZC_FFLP + 0x20118UL) +#define FCRAM_FIO_ADDR_ADDR 0x00000000000000ffULL + +#define FCRAM_FIO_DAT (FZC_FFLP + 0x20120UL) +#define FCRAM_FIO_DAT_DATA 0x000000000000ffffULL + +#define FCRAM_ERR_TST0 (FZC_FFLP + 0x20128UL) +#define FCRAM_ERR_TST0_SYND 0x00000000000000ffULL + +#define FCRAM_ERR_TST1 (FZC_FFLP + 0x20130UL) +#define FCRAM_ERR_TST1_DAT 0x00000000ffffffffULL + +#define FCRAM_ERR_TST2 (FZC_FFLP + 0x20138UL) +#define FCRAM_ERR_TST2_DAT 0x00000000ffffffffULL + +#define FFLP_ERR_MASK (FZC_FFLP + 0x20140UL) +#define FFLP_ERR_MASK_HSH_TBL_DAT 0x00000000000007f8ULL +#define FFLP_ERR_MASK_HSH_TBL_LKUP 0x0000000000000004ULL +#define FFLP_ERR_MASK_TCAM 0x0000000000000002ULL +#define FFLP_ERR_MASK_VLAN 0x0000000000000001ULL + +#define FFLP_DBG_TRAIN_VCT (FZC_FFLP + 0x20148UL) +#define FFLP_DBG_TRAIN_VCT_VECTOR 0x00000000ffffffffULL + +#define FCRAM_PHY_RD_LAT (FZC_FFLP + 0x20150UL) +#define FCRAM_PHY_RD_LAT_LAT 0x00000000000000ffULL + +/* Ethernet TCAM format */ +#define TCAM_ETHKEY0_RESV1 0xffffffffffffff00ULL +#define TCAM_ETHKEY0_CLASS_CODE 0x00000000000000f8ULL +#define TCAM_ETHKEY0_CLASS_CODE_SHIFT 3 +#define TCAM_ETHKEY0_RESV2 0x0000000000000007ULL +#define TCAM_ETHKEY1_FRAME_BYTE0_7(NUM) (0xff << ((7 - NUM) * 8)) +#define TCAM_ETHKEY2_FRAME_BYTE8 0xff00000000000000ULL +#define TCAM_ETHKEY2_FRAME_BYTE8_SHIFT 56 +#define TCAM_ETHKEY2_FRAME_BYTE9 0x00ff000000000000ULL +#define TCAM_ETHKEY2_FRAME_BYTE9_SHIFT 48 +#define TCAM_ETHKEY2_FRAME_BYTE10 0x0000ff0000000000ULL +#define TCAM_ETHKEY2_FRAME_BYTE10_SHIFT 40 +#define TCAM_ETHKEY2_FRAME_RESV 0x000000ffffffffffULL +#define TCAM_ETHKEY3_FRAME_RESV 0xffffffffffffffffULL + +/* IPV4 TCAM format */ +#define TCAM_V4KEY0_RESV1 0xffffffffffffff00ULL +#define TCAM_V4KEY0_CLASS_CODE 0x00000000000000f8ULL +#define TCAM_V4KEY0_CLASS_CODE_SHIFT 3 +#define TCAM_V4KEY0_RESV2 0x0000000000000007ULL +#define TCAM_V4KEY1_L2RDCNUM 0xf800000000000000ULL +#define TCAM_V4KEY1_L2RDCNUM_SHIFT 59 +#define TCAM_V4KEY1_NOPORT 0x0400000000000000ULL +#define TCAM_V4KEY1_RESV 0x03ffffffffffffffULL +#define TCAM_V4KEY2_RESV 0xffff000000000000ULL +#define TCAM_V4KEY2_TOS 0x0000ff0000000000ULL +#define TCAM_V4KEY2_TOS_SHIFT 40 +#define TCAM_V4KEY2_PROTO 0x000000ff00000000ULL +#define TCAM_V4KEY2_PROTO_SHIFT 32 +#define TCAM_V4KEY2_PORT_SPI 0x00000000ffffffffULL +#define TCAM_V4KEY2_PORT_SPI_SHIFT 0 +#define TCAM_V4KEY3_SADDR 0xffffffff00000000ULL +#define TCAM_V4KEY3_SADDR_SHIFT 32 +#define TCAM_V4KEY3_DADDR 0x00000000ffffffffULL +#define TCAM_V4KEY3_DADDR_SHIFT 0 + +/* IPV6 TCAM format */ +#define TCAM_V6KEY0_RESV1 0xffffffffffffff00ULL +#define TCAM_V6KEY0_CLASS_CODE 0x00000000000000f8ULL +#define TCAM_V6KEY0_CLASS_CODE_SHIFT 3 +#define TCAM_V6KEY0_RESV2 0x0000000000000007ULL +#define TCAM_V6KEY1_L2RDCNUM 0xf800000000000000ULL +#define TCAM_V6KEY1_L2RDCNUM_SHIFT 59 +#define TCAM_V6KEY1_NOPORT 0x0400000000000000ULL +#define TCAM_V6KEY1_RESV 0x03ff000000000000ULL +#define TCAM_V6KEY1_TOS 0x0000ff0000000000ULL +#define TCAM_V6KEY1_TOS_SHIFT 40 +#define TCAM_V6KEY1_NEXT_HDR 0x000000ff00000000ULL +#define TCAM_V6KEY1_NEXT_HDR_SHIFT 32 +#define TCAM_V6KEY1_PORT_SPI 0x00000000ffffffffULL +#define TCAM_V6KEY1_PORT_SPI_SHIFT 0 +#define TCAM_V6KEY2_ADDR_HIGH 0xffffffffffffffffULL +#define TCAM_V6KEY3_ADDR_LOW 0xffffffffffffffffULL + +#define TCAM_ASSOCDATA_SYNDROME 0x000003fffc000000ULL +#define TCAM_ASSOCDATA_SYNDROME_SHIFT 26 +#define TCAM_ASSOCDATA_ZFID 0x0000000003ffc000ULL +#define TCAM_ASSOCDATA_ZFID_SHIFT 14 +#define TCAM_ASSOCDATA_V4_ECC_OK 0x0000000000002000ULL +#define TCAM_ASSOCDATA_DISC 0x0000000000001000ULL +#define TCAM_ASSOCDATA_TRES_MASK 0x0000000000000c00ULL +#define TCAM_ASSOCDATA_TRES_USE_L2RDC 0x0000000000000000ULL +#define TCAM_ASSOCDATA_TRES_USE_OFFSET 0x0000000000000400ULL +#define TCAM_ASSOCDATA_TRES_OVR_RDC 0x0000000000000800ULL +#define TCAM_ASSOCDATA_TRES_OVR_RDC_OFF 0x0000000000000c00ULL +#define TCAM_ASSOCDATA_RDCTBL 0x0000000000000380ULL +#define TCAM_ASSOCDATA_RDCTBL_SHIFT 7 +#define TCAM_ASSOCDATA_OFFSET 0x000000000000007cULL +#define TCAM_ASSOCDATA_OFFSET_SHIFT 2 +#define TCAM_ASSOCDATA_ZFVLD 0x0000000000000002ULL +#define TCAM_ASSOCDATA_AGE 0x0000000000000001ULL + +#define FLOW_KEY(IDX) (FZC_FFLP + 0x40000UL + (IDX) * 8UL) +#define FLOW_KEY_PORT 0x0000000000000200ULL +#define FLOW_KEY_L2DA 0x0000000000000100ULL +#define FLOW_KEY_VLAN 0x0000000000000080ULL +#define FLOW_KEY_IPSA 0x0000000000000040ULL +#define FLOW_KEY_IPDA 0x0000000000000020ULL +#define FLOW_KEY_PROTO 0x0000000000000010ULL +#define FLOW_KEY_L4_0 0x000000000000000cULL +#define FLOW_KEY_L4_0_SHIFT 2 +#define FLOW_KEY_L4_1 0x0000000000000003ULL +#define FLOW_KEY_L4_1_SHIFT 0 + +#define FLOW_KEY_L4_NONE 0x0 +#define FLOW_KEY_L4_RESV 0x1 +#define FLOW_KEY_L4_BYTE12 0x2 +#define FLOW_KEY_L4_BYTE56 0x3 + +#define H1POLY (FZC_FFLP + 0x40060UL) +#define H1POLY_INITVAL 0x00000000ffffffffULL + +#define H2POLY (FZC_FFLP + 0x40068UL) +#define H2POLY_INITVAL 0x000000000000ffffULL + +#define FLW_PRT_SEL(IDX) (FZC_FFLP + 0x40070UL + (IDX) * 8UL) +#define FLW_PRT_SEL_EXT 0x0000000000010000ULL +#define FLW_PRT_SEL_MASK 0x0000000000001f00ULL +#define FLW_PRT_SEL_MASK_SHIFT 8 +#define FLW_PRT_SEL_BASE 0x000000000000001fULL +#define FLW_PRT_SEL_BASE_SHIFT 0 + +#define HASH_TBL_ADDR(IDX) (FFLP + 0x00000UL + (IDX) * 8192UL) +#define HASH_TBL_ADDR_AUTOINC 0x0000000000800000ULL +#define HASH_TBL_ADDR_ADDR 0x00000000007fffffULL + +#define HASH_TBL_DATA(IDX) (FFLP + 0x00008UL + (IDX) * 8192UL) +#define HASH_TBL_DATA_DATA 0xffffffffffffffffULL + +/* FCRAM hash table entries are up to 8 64-bit words in size. + * The layout of each entry is determined by the settings in the + * first word, which is the header. + * + * The indexing is controllable per partition (there is one partition + * per RDC group, thus a total of eight) using the BASE and MASK fields + * of FLW_PRT_SEL above. + */ +#define FCRAM_SIZE 0x800000 +#define FCRAM_NUM_PARTITIONS 8 + +/* Generic HASH entry header, used for all non-optimized formats. */ +#define HASH_HEADER_FMT 0x8000000000000000ULL +#define HASH_HEADER_EXT 0x4000000000000000ULL +#define HASH_HEADER_VALID 0x2000000000000000ULL +#define HASH_HEADER_RESVD 0x1000000000000000ULL +#define HASH_HEADER_L2_DADDR 0x0ffffffffffff000ULL +#define HASH_HEADER_L2_DADDR_SHIFT 12 +#define HASH_HEADER_VLAN 0x0000000000000fffULL +#define HASH_HEADER_VLAN_SHIFT 0 + +/* Optimized format, just a header with a special layout defined below. + * Set FMT and EXT both to zero to indicate this layout is being used. + */ +#define HASH_OPT_HEADER_FMT 0x8000000000000000ULL +#define HASH_OPT_HEADER_EXT 0x4000000000000000ULL +#define HASH_OPT_HEADER_VALID 0x2000000000000000ULL +#define HASH_OPT_HEADER_RDCOFF 0x1f00000000000000ULL +#define HASH_OPT_HEADER_RDCOFF_SHIFT 56 +#define HASH_OPT_HEADER_HASH2 0x00ffff0000000000ULL +#define HASH_OPT_HEADER_HASH2_SHIFT 40 +#define HASH_OPT_HEADER_RESVD 0x000000ff00000000ULL +#define HASH_OPT_HEADER_USERINFO 0x00000000ffffffffULL +#define HASH_OPT_HEADER_USERINFO_SHIFT 0 + +/* Port and protocol word used for ipv4 and ipv6 layouts. */ +#define HASH_PORT_DPORT 0xffff000000000000ULL +#define HASH_PORT_DPORT_SHIFT 48 +#define HASH_PORT_SPORT 0x0000ffff00000000ULL +#define HASH_PORT_SPORT_SHIFT 32 +#define HASH_PORT_PROTO 0x00000000ff000000ULL +#define HASH_PORT_PROTO_SHIFT 24 +#define HASH_PORT_PORT_OFF 0x0000000000c00000ULL +#define HASH_PORT_PORT_OFF_SHIFT 22 +#define HASH_PORT_PORT_RESV 0x00000000003fffffULL + +/* Action word used for ipv4 and ipv6 layouts. */ +#define HASH_ACTION_RESV1 0xe000000000000000ULL +#define HASH_ACTION_RDCOFF 0x1f00000000000000ULL +#define HASH_ACTION_RDCOFF_SHIFT 56 +#define HASH_ACTION_ZFVALID 0x0080000000000000ULL +#define HASH_ACTION_RESV2 0x0070000000000000ULL +#define HASH_ACTION_ZFID 0x000fff0000000000ULL +#define HASH_ACTION_ZFID_SHIFT 40 +#define HASH_ACTION_RESV3 0x000000ff00000000ULL +#define HASH_ACTION_USERINFO 0x00000000ffffffffULL +#define HASH_ACTION_USERINFO_SHIFT 0 + +/* IPV4 address word. Addresses are in network endian. */ +#define HASH_IP4ADDR_SADDR 0xffffffff00000000ULL +#define HASH_IP4ADDR_SADDR_SHIFT 32 +#define HASH_IP4ADDR_DADDR 0x00000000ffffffffULL +#define HASH_IP4ADDR_DADDR_SHIFT 0 + +/* IPV6 address layout is 4 words, first two are saddr, next two + * are daddr. Addresses are in network endian. + */ + +struct fcram_hash_opt { + u64 header; +}; + +/* EXT=1, FMT=0 */ +struct fcram_hash_ipv4 { + u64 header; + u64 addrs; + u64 ports; + u64 action; +}; + +/* EXT=1, FMT=1 */ +struct fcram_hash_ipv6 { + u64 header; + u64 addrs[4]; + u64 ports; + u64 action; +}; + +#define HASH_TBL_DATA_LOG(IDX) (FFLP + 0x00010UL + (IDX) * 8192UL) +#define HASH_TBL_DATA_LOG_ERR 0x0000000080000000ULL +#define HASH_TBL_DATA_LOG_ADDR 0x000000007fffff00ULL +#define HASH_TBL_DATA_LOG_SYNDROME 0x00000000000000ffULL + +#define RX_DMA_CK_DIV (FZC_DMC + 0x00000UL) +#define RX_DMA_CK_DIV_CNT 0x000000000000ffffULL + +#define DEF_RDC(IDX) (FZC_DMC + 0x00008UL + (IDX) * 0x8UL) +#define DEF_RDC_VAL 0x000000000000001fULL + +#define PT_DRR_WT(IDX) (FZC_DMC + 0x00028UL + (IDX) * 0x8UL) +#define PT_DRR_WT_VAL 0x000000000000ffffULL + +#define PT_DRR_WEIGHT_DEFAULT_10G 0x0400 +#define PT_DRR_WEIGHT_DEFAULT_1G 0x0066 + +#define PT_USE(IDX) (FZC_DMC + 0x00048UL + (IDX) * 0x8UL) +#define PT_USE_CNT 0x00000000000fffffULL + +#define RED_RAN_INIT (FZC_DMC + 0x00068UL) +#define RED_RAN_INIT_OPMODE 0x0000000000010000ULL +#define RED_RAN_INIT_VAL 0x000000000000ffffULL + +#define RX_ADDR_MD (FZC_DMC + 0x00070UL) +#define RX_ADDR_MD_DBG_PT_MUX_SEL 0x000000000000000cULL +#define RX_ADDR_MD_RAM_ACC 0x0000000000000002ULL +#define RX_ADDR_MD_MODE32 0x0000000000000001ULL + +#define RDMC_PRE_PAR_ERR (FZC_DMC + 0x00078UL) +#define RDMC_PRE_PAR_ERR_ERR 0x0000000000008000ULL +#define RDMC_PRE_PAR_ERR_MERR 0x0000000000004000ULL +#define RDMC_PRE_PAR_ERR_ADDR 0x00000000000000ffULL + +#define RDMC_SHA_PAR_ERR (FZC_DMC + 0x00080UL) +#define RDMC_SHA_PAR_ERR_ERR 0x0000000000008000ULL +#define RDMC_SHA_PAR_ERR_MERR 0x0000000000004000ULL +#define RDMC_SHA_PAR_ERR_ADDR 0x00000000000000ffULL + +#define RDMC_MEM_ADDR (FZC_DMC + 0x00088UL) +#define RDMC_MEM_ADDR_PRE_SHAD 0x0000000000000100ULL +#define RDMC_MEM_ADDR_ADDR 0x00000000000000ffULL + +#define RDMC_MEM_DAT0 (FZC_DMC + 0x00090UL) +#define RDMC_MEM_DAT0_DATA 0x00000000ffffffffULL /* bits 31:0 */ + +#define RDMC_MEM_DAT1 (FZC_DMC + 0x00098UL) +#define RDMC_MEM_DAT1_DATA 0x00000000ffffffffULL /* bits 63:32 */ + +#define RDMC_MEM_DAT2 (FZC_DMC + 0x000a0UL) +#define RDMC_MEM_DAT2_DATA 0x00000000ffffffffULL /* bits 95:64 */ + +#define RDMC_MEM_DAT3 (FZC_DMC + 0x000a8UL) +#define RDMC_MEM_DAT3_DATA 0x00000000ffffffffULL /* bits 127:96 */ + +#define RDMC_MEM_DAT4 (FZC_DMC + 0x000b0UL) +#define RDMC_MEM_DAT4_DATA 0x00000000000fffffULL /* bits 147:128 */ + +#define RX_CTL_DAT_FIFO_STAT (FZC_DMC + 0x000b8UL) +#define RX_CTL_DAT_FIFO_STAT_ID_MISMATCH 0x0000000000000100ULL +#define RX_CTL_DAT_FIFO_STAT_ZCP_EOP_ERR 0x00000000000000f0ULL +#define RX_CTL_DAT_FIFO_STAT_IPP_EOP_ERR 0x000000000000000fULL + +#define RX_CTL_DAT_FIFO_MASK (FZC_DMC + 0x000c0UL) +#define RX_CTL_DAT_FIFO_MASK_ID_MISMATCH 0x0000000000000100ULL +#define RX_CTL_DAT_FIFO_MASK_ZCP_EOP_ERR 0x00000000000000f0ULL +#define RX_CTL_DAT_FIFO_MASK_IPP_EOP_ERR 0x000000000000000fULL + +#define RDMC_TRAINING_VECTOR (FZC_DMC + 0x000c8UL) +#define RDMC_TRAINING_VECTOR_TRAINING_VECTOR 0x00000000ffffffffULL + +#define RX_CTL_DAT_FIFO_STAT_DBG (FZC_DMC + 0x000d0UL) +#define RX_CTL_DAT_FIFO_STAT_DBG_ID_MISMATCH 0x0000000000000100ULL +#define RX_CTL_DAT_FIFO_STAT_DBG_ZCP_EOP_ERR 0x00000000000000f0ULL +#define RX_CTL_DAT_FIFO_STAT_DBG_IPP_EOP_ERR 0x000000000000000fULL + +#define RDC_TBL(TBL,SLOT) (FZC_ZCP + 0x10000UL + \ + (TBL) * (8UL * 16UL) + \ + (SLOT) * 8UL) +#define RDC_TBL_RDC 0x000000000000000fULL + +#define RX_LOG_PAGE_VLD(IDX) (FZC_DMC + 0x20000UL + (IDX) * 0x40UL) +#define RX_LOG_PAGE_VLD_FUNC 0x000000000000000cULL +#define RX_LOG_PAGE_VLD_FUNC_SHIFT 2 +#define RX_LOG_PAGE_VLD_PAGE1 0x0000000000000002ULL +#define RX_LOG_PAGE_VLD_PAGE0 0x0000000000000001ULL + +#define RX_LOG_MASK1(IDX) (FZC_DMC + 0x20008UL + (IDX) * 0x40UL) +#define RX_LOG_MASK1_MASK 0x00000000ffffffffULL + +#define RX_LOG_VAL1(IDX) (FZC_DMC + 0x20010UL + (IDX) * 0x40UL) +#define RX_LOG_VAL1_VALUE 0x00000000ffffffffULL + +#define RX_LOG_MASK2(IDX) (FZC_DMC + 0x20018UL + (IDX) * 0x40UL) +#define RX_LOG_MASK2_MASK 0x00000000ffffffffULL + +#define RX_LOG_VAL2(IDX) (FZC_DMC + 0x20020UL + (IDX) * 0x40UL) +#define RX_LOG_VAL2_VALUE 0x00000000ffffffffULL + +#define RX_LOG_PAGE_RELO1(IDX) (FZC_DMC + 0x20028UL + (IDX) * 0x40UL) +#define RX_LOG_PAGE_RELO1_RELO 0x00000000ffffffffULL + +#define RX_LOG_PAGE_RELO2(IDX) (FZC_DMC + 0x20030UL + (IDX) * 0x40UL) +#define RX_LOG_PAGE_RELO2_RELO 0x00000000ffffffffULL + +#define RX_LOG_PAGE_HDL(IDX) (FZC_DMC + 0x20038UL + (IDX) * 0x40UL) +#define RX_LOG_PAGE_HDL_HANDLE 0x00000000000fffffULL + +#define TX_LOG_PAGE_VLD(IDX) (FZC_DMC + 0x40000UL + (IDX) * 0x200UL) +#define TX_LOG_PAGE_VLD_FUNC 0x000000000000000cULL +#define TX_LOG_PAGE_VLD_FUNC_SHIFT 2 +#define TX_LOG_PAGE_VLD_PAGE1 0x0000000000000002ULL +#define TX_LOG_PAGE_VLD_PAGE0 0x0000000000000001ULL + +#define TX_LOG_MASK1(IDX) (FZC_DMC + 0x40008UL + (IDX) * 0x200UL) +#define TX_LOG_MASK1_MASK 0x00000000ffffffffULL + +#define TX_LOG_VAL1(IDX) (FZC_DMC + 0x40010UL + (IDX) * 0x200UL) +#define TX_LOG_VAL1_VALUE 0x00000000ffffffffULL + +#define TX_LOG_MASK2(IDX) (FZC_DMC + 0x40018UL + (IDX) * 0x200UL) +#define TX_LOG_MASK2_MASK 0x00000000ffffffffULL + +#define TX_LOG_VAL2(IDX) (FZC_DMC + 0x40020UL + (IDX) * 0x200UL) +#define TX_LOG_VAL2_VALUE 0x00000000ffffffffULL + +#define TX_LOG_PAGE_RELO1(IDX) (FZC_DMC + 0x40028UL + (IDX) * 0x200UL) +#define TX_LOG_PAGE_RELO1_RELO 0x00000000ffffffffULL + +#define TX_LOG_PAGE_RELO2(IDX) (FZC_DMC + 0x40030UL + (IDX) * 0x200UL) +#define TX_LOG_PAGE_RELO2_RELO 0x00000000ffffffffULL + +#define TX_LOG_PAGE_HDL(IDX) (FZC_DMC + 0x40038UL + (IDX) * 0x200UL) +#define TX_LOG_PAGE_HDL_HANDLE 0x00000000000fffffULL + +#define TX_ADDR_MD (FZC_DMC + 0x45000UL) +#define TX_ADDR_MD_MODE32 0x0000000000000001ULL + +#define RDC_RED_PARA(IDX) (FZC_DMC + 0x30000UL + (IDX) * 0x40UL) +#define RDC_RED_PARA_THRE_SYN 0x00000000fff00000ULL +#define RDC_RED_PARA_THRE_SYN_SHIFT 20 +#define RDC_RED_PARA_WIN_SYN 0x00000000000f0000ULL +#define RDC_RED_PARA_WIN_SYN_SHIFT 16 +#define RDC_RED_PARA_THRE 0x000000000000fff0ULL +#define RDC_RED_PARA_THRE_SHIFT 4 +#define RDC_RED_PARA_WIN 0x000000000000000fULL +#define RDC_RED_PARA_WIN_SHIFT 0 + +#define RED_DIS_CNT(IDX) (FZC_DMC + 0x30008UL + (IDX) * 0x40UL) +#define RED_DIS_CNT_OFLOW 0x0000000000010000ULL +#define RED_DIS_CNT_COUNT 0x000000000000ffffULL + +#define IPP_CFIG (FZC_IPP + 0x00000UL) +#define IPP_CFIG_SOFT_RST 0x0000000080000000ULL +#define IPP_CFIG_IP_MAX_PKT 0x0000000001ffff00ULL +#define IPP_CFIG_IP_MAX_PKT_SHIFT 8 +#define IPP_CFIG_FFLP_CS_PIO_W 0x0000000000000080ULL +#define IPP_CFIG_PFIFO_PIO_W 0x0000000000000040ULL +#define IPP_CFIG_DFIFO_PIO_W 0x0000000000000020ULL +#define IPP_CFIG_CKSUM_EN 0x0000000000000010ULL +#define IPP_CFIG_DROP_BAD_CRC 0x0000000000000008ULL +#define IPP_CFIG_DFIFO_ECC_EN 0x0000000000000004ULL +#define IPP_CFIG_DEBUG_BUS_OUT_EN 0x0000000000000002ULL +#define IPP_CFIG_IPP_ENABLE 0x0000000000000001ULL + +#define IPP_PKT_DIS (FZC_IPP + 0x00020UL) +#define IPP_PKT_DIS_COUNT 0x0000000000003fffULL + +#define IPP_BAD_CS_CNT (FZC_IPP + 0x00028UL) +#define IPP_BAD_CS_CNT_COUNT 0x0000000000003fffULL + +#define IPP_ECC (FZC_IPP + 0x00030UL) +#define IPP_ECC_COUNT 0x00000000000000ffULL + +#define IPP_INT_STAT (FZC_IPP + 0x00040UL) +#define IPP_INT_STAT_SOP_MISS 0x0000000080000000ULL +#define IPP_INT_STAT_EOP_MISS 0x0000000040000000ULL +#define IPP_INT_STAT_DFIFO_UE 0x0000000030000000ULL +#define IPP_INT_STAT_DFIFO_CE 0x000000000c000000ULL +#define IPP_INT_STAT_DFIFO_ECC 0x0000000003000000ULL +#define IPP_INT_STAT_DFIFO_ECC_IDX 0x00000000007ff000ULL +#define IPP_INT_STAT_PFIFO_PERR 0x0000000000000800ULL +#define IPP_INT_STAT_ECC_ERR_MAX 0x0000000000000400ULL +#define IPP_INT_STAT_PFIFO_ERR_IDX 0x00000000000003f0ULL +#define IPP_INT_STAT_PFIFO_OVER 0x0000000000000008ULL +#define IPP_INT_STAT_PFIFO_UND 0x0000000000000004ULL +#define IPP_INT_STAT_BAD_CS_MX 0x0000000000000002ULL +#define IPP_INT_STAT_PKT_DIS_MX 0x0000000000000001ULL +#define IPP_INT_STAT_ALL 0x00000000ff7fffffULL + +#define IPP_MSK (FZC_IPP + 0x00048UL) +#define IPP_MSK_ECC_ERR_MX 0x0000000000000080ULL +#define IPP_MSK_DFIFO_EOP_SOP 0x0000000000000040ULL +#define IPP_MSK_DFIFO_UC 0x0000000000000020ULL +#define IPP_MSK_PFIFO_PAR 0x0000000000000010ULL +#define IPP_MSK_PFIFO_OVER 0x0000000000000008ULL +#define IPP_MSK_PFIFO_UND 0x0000000000000004ULL +#define IPP_MSK_BAD_CS 0x0000000000000002ULL +#define IPP_MSK_PKT_DIS_CNT 0x0000000000000001ULL +#define IPP_MSK_ALL 0x00000000000000ffULL + +#define IPP_PFIFO_RD0 (FZC_IPP + 0x00060UL) +#define IPP_PFIFO_RD0_DATA 0x00000000ffffffffULL /* bits 31:0 */ + +#define IPP_PFIFO_RD1 (FZC_IPP + 0x00068UL) +#define IPP_PFIFO_RD1_DATA 0x00000000ffffffffULL /* bits 63:32 */ + +#define IPP_PFIFO_RD2 (FZC_IPP + 0x00070UL) +#define IPP_PFIFO_RD2_DATA 0x00000000ffffffffULL /* bits 95:64 */ + +#define IPP_PFIFO_RD3 (FZC_IPP + 0x00078UL) +#define IPP_PFIFO_RD3_DATA 0x00000000ffffffffULL /* bits 127:96 */ + +#define IPP_PFIFO_RD4 (FZC_IPP + 0x00080UL) +#define IPP_PFIFO_RD4_DATA 0x00000000ffffffffULL /* bits 145:128 */ + +#define IPP_PFIFO_WR0 (FZC_IPP + 0x00088UL) +#define IPP_PFIFO_WR0_DATA 0x00000000ffffffffULL /* bits 31:0 */ + +#define IPP_PFIFO_WR1 (FZC_IPP + 0x00090UL) +#define IPP_PFIFO_WR1_DATA 0x00000000ffffffffULL /* bits 63:32 */ + +#define IPP_PFIFO_WR2 (FZC_IPP + 0x00098UL) +#define IPP_PFIFO_WR2_DATA 0x00000000ffffffffULL /* bits 95:64 */ + +#define IPP_PFIFO_WR3 (FZC_IPP + 0x000a0UL) +#define IPP_PFIFO_WR3_DATA 0x00000000ffffffffULL /* bits 127:96 */ + +#define IPP_PFIFO_WR4 (FZC_IPP + 0x000a8UL) +#define IPP_PFIFO_WR4_DATA 0x00000000ffffffffULL /* bits 145:128 */ + +#define IPP_PFIFO_RD_PTR (FZC_IPP + 0x000b0UL) +#define IPP_PFIFO_RD_PTR_PTR 0x000000000000003fULL + +#define IPP_PFIFO_WR_PTR (FZC_IPP + 0x000b8UL) +#define IPP_PFIFO_WR_PTR_PTR 0x000000000000007fULL + +#define IPP_DFIFO_RD0 (FZC_IPP + 0x000c0UL) +#define IPP_DFIFO_RD0_DATA 0x00000000ffffffffULL /* bits 31:0 */ + +#define IPP_DFIFO_RD1 (FZC_IPP + 0x000c8UL) +#define IPP_DFIFO_RD1_DATA 0x00000000ffffffffULL /* bits 63:32 */ + +#define IPP_DFIFO_RD2 (FZC_IPP + 0x000d0UL) +#define IPP_DFIFO_RD2_DATA 0x00000000ffffffffULL /* bits 95:64 */ + +#define IPP_DFIFO_RD3 (FZC_IPP + 0x000d8UL) +#define IPP_DFIFO_RD3_DATA 0x00000000ffffffffULL /* bits 127:96 */ + +#define IPP_DFIFO_RD4 (FZC_IPP + 0x000e0UL) +#define IPP_DFIFO_RD4_DATA 0x00000000ffffffffULL /* bits 145:128 */ + +#define IPP_DFIFO_WR0 (FZC_IPP + 0x000e8UL) +#define IPP_DFIFO_WR0_DATA 0x00000000ffffffffULL /* bits 31:0 */ + +#define IPP_DFIFO_WR1 (FZC_IPP + 0x000f0UL) +#define IPP_DFIFO_WR1_DATA 0x00000000ffffffffULL /* bits 63:32 */ + +#define IPP_DFIFO_WR2 (FZC_IPP + 0x000f8UL) +#define IPP_DFIFO_WR2_DATA 0x00000000ffffffffULL /* bits 95:64 */ + +#define IPP_DFIFO_WR3 (FZC_IPP + 0x00100UL) +#define IPP_DFIFO_WR3_DATA 0x00000000ffffffffULL /* bits 127:96 */ + +#define IPP_DFIFO_WR4 (FZC_IPP + 0x00108UL) +#define IPP_DFIFO_WR4_DATA 0x00000000ffffffffULL /* bits 145:128 */ + +#define IPP_DFIFO_RD_PTR (FZC_IPP + 0x00110UL) +#define IPP_DFIFO_RD_PTR_PTR 0x0000000000000fffULL + +#define IPP_DFIFO_WR_PTR (FZC_IPP + 0x00118UL) +#define IPP_DFIFO_WR_PTR_PTR 0x0000000000000fffULL + +#define IPP_SM (FZC_IPP + 0x00120UL) +#define IPP_SM_SM 0x00000000ffffffffULL + +#define IPP_CS_STAT (FZC_IPP + 0x00128UL) +#define IPP_CS_STAT_BCYC_CNT 0x00000000ff000000ULL +#define IPP_CS_STAT_IP_LEN 0x0000000000fff000ULL +#define IPP_CS_STAT_CS_FAIL 0x0000000000000800ULL +#define IPP_CS_STAT_TERM 0x0000000000000400ULL +#define IPP_CS_STAT_BAD_NUM 0x0000000000000200ULL +#define IPP_CS_STAT_CS_STATE 0x00000000000001ffULL + +#define IPP_FFLP_CS_INFO (FZC_IPP + 0x00130UL) +#define IPP_FFLP_CS_INFO_PKT_ID 0x0000000000003c00ULL +#define IPP_FFLP_CS_INFO_L4_PROTO 0x0000000000000300ULL +#define IPP_FFLP_CS_INFO_V4_HD_LEN 0x00000000000000f0ULL +#define IPP_FFLP_CS_INFO_L3_VER 0x000000000000000cULL +#define IPP_FFLP_CS_INFO_L2_OP 0x0000000000000003ULL + +#define IPP_DBG_SEL (FZC_IPP + 0x00138UL) +#define IPP_DBG_SEL_SEL 0x000000000000000fULL + +#define IPP_DFIFO_ECC_SYND (FZC_IPP + 0x00140UL) +#define IPP_DFIFO_ECC_SYND_SYND 0x000000000000ffffULL + +#define IPP_DFIFO_EOP_RD_PTR (FZC_IPP + 0x00148UL) +#define IPP_DFIFO_EOP_RD_PTR_PTR 0x0000000000000fffULL + +#define IPP_ECC_CTL (FZC_IPP + 0x00150UL) +#define IPP_ECC_CTL_DIS_DBL 0x0000000080000000ULL +#define IPP_ECC_CTL_COR_DBL 0x0000000000020000ULL +#define IPP_ECC_CTL_COR_SNG 0x0000000000010000ULL +#define IPP_ECC_CTL_COR_ALL 0x0000000000000400ULL +#define IPP_ECC_CTL_COR_1 0x0000000000000100ULL +#define IPP_ECC_CTL_COR_LST 0x0000000000000004ULL +#define IPP_ECC_CTL_COR_SND 0x0000000000000002ULL +#define IPP_ECC_CTL_COR_FSR 0x0000000000000001ULL + +#define NIU_DFIFO_ENTRIES 1024 +#define ATLAS_P0_P1_DFIFO_ENTRIES 2048 +#define ATLAS_P2_P3_DFIFO_ENTRIES 1024 + +#define ZCP_CFIG (FZC_ZCP + 0x00000UL) +#define ZCP_CFIG_ZCP_32BIT_MODE 0x0000000001000000ULL +#define ZCP_CFIG_ZCP_DEBUG_SEL 0x0000000000ff0000ULL +#define ZCP_CFIG_DMA_TH 0x000000000000ffe0ULL +#define ZCP_CFIG_ECC_CHK_DIS 0x0000000000000010ULL +#define ZCP_CFIG_PAR_CHK_DIS 0x0000000000000008ULL +#define ZCP_CFIG_DIS_BUFF_RSP_IF 0x0000000000000004ULL +#define ZCP_CFIG_DIS_BUFF_REQ_IF 0x0000000000000002ULL +#define ZCP_CFIG_ZC_ENABLE 0x0000000000000001ULL + +#define ZCP_INT_STAT (FZC_ZCP + 0x00008UL) +#define ZCP_INT_STAT_RRFIFO_UNDERRUN 0x0000000000008000ULL +#define ZCP_INT_STAT_RRFIFO_OVERRUN 0x0000000000004000ULL +#define ZCP_INT_STAT_RSPFIFO_UNCOR_ERR 0x0000000000001000ULL +#define ZCP_INT_STAT_BUFFER_OVERFLOW 0x0000000000000800ULL +#define ZCP_INT_STAT_STAT_TBL_PERR 0x0000000000000400ULL +#define ZCP_INT_STAT_DYN_TBL_PERR 0x0000000000000200ULL +#define ZCP_INT_STAT_BUF_TBL_PERR 0x0000000000000100ULL +#define ZCP_INT_STAT_TT_PROGRAM_ERR 0x0000000000000080ULL +#define ZCP_INT_STAT_RSP_TT_INDEX_ERR 0x0000000000000040ULL +#define ZCP_INT_STAT_SLV_TT_INDEX_ERR 0x0000000000000020ULL +#define ZCP_INT_STAT_ZCP_TT_INDEX_ERR 0x0000000000000010ULL +#define ZCP_INT_STAT_CFIFO_ECC3 0x0000000000000008ULL +#define ZCP_INT_STAT_CFIFO_ECC2 0x0000000000000004ULL +#define ZCP_INT_STAT_CFIFO_ECC1 0x0000000000000002ULL +#define ZCP_INT_STAT_CFIFO_ECC0 0x0000000000000001ULL +#define ZCP_INT_STAT_ALL 0x000000000000ffffULL + +#define ZCP_INT_MASK (FZC_ZCP + 0x00010UL) +#define ZCP_INT_MASK_RRFIFO_UNDERRUN 0x0000000000008000ULL +#define ZCP_INT_MASK_RRFIFO_OVERRUN 0x0000000000004000ULL +#define ZCP_INT_MASK_LOJ 0x0000000000002000ULL +#define ZCP_INT_MASK_RSPFIFO_UNCOR_ERR 0x0000000000001000ULL +#define ZCP_INT_MASK_BUFFER_OVERFLOW 0x0000000000000800ULL +#define ZCP_INT_MASK_STAT_TBL_PERR 0x0000000000000400ULL +#define ZCP_INT_MASK_DYN_TBL_PERR 0x0000000000000200ULL +#define ZCP_INT_MASK_BUF_TBL_PERR 0x0000000000000100ULL +#define ZCP_INT_MASK_TT_PROGRAM_ERR 0x0000000000000080ULL +#define ZCP_INT_MASK_RSP_TT_INDEX_ERR 0x0000000000000040ULL +#define ZCP_INT_MASK_SLV_TT_INDEX_ERR 0x0000000000000020ULL +#define ZCP_INT_MASK_ZCP_TT_INDEX_ERR 0x0000000000000010ULL +#define ZCP_INT_MASK_CFIFO_ECC3 0x0000000000000008ULL +#define ZCP_INT_MASK_CFIFO_ECC2 0x0000000000000004ULL +#define ZCP_INT_MASK_CFIFO_ECC1 0x0000000000000002ULL +#define ZCP_INT_MASK_CFIFO_ECC0 0x0000000000000001ULL +#define ZCP_INT_MASK_ALL 0x000000000000ffffULL + +#define BAM4BUF (FZC_ZCP + 0x00018UL) +#define BAM4BUF_LOJ 0x0000000080000000ULL +#define BAM4BUF_EN_CK 0x0000000040000000ULL +#define BAM4BUF_IDX_END0 0x000000003ff00000ULL +#define BAM4BUF_IDX_ST0 0x00000000000ffc00ULL +#define BAM4BUF_OFFSET0 0x00000000000003ffULL + +#define BAM8BUF (FZC_ZCP + 0x00020UL) +#define BAM8BUF_LOJ 0x0000000080000000ULL +#define BAM8BUF_EN_CK 0x0000000040000000ULL +#define BAM8BUF_IDX_END1 0x000000003ff00000ULL +#define BAM8BUF_IDX_ST1 0x00000000000ffc00ULL +#define BAM8BUF_OFFSET1 0x00000000000003ffULL + +#define BAM16BUF (FZC_ZCP + 0x00028UL) +#define BAM16BUF_LOJ 0x0000000080000000ULL +#define BAM16BUF_EN_CK 0x0000000040000000ULL +#define BAM16BUF_IDX_END2 0x000000003ff00000ULL +#define BAM16BUF_IDX_ST2 0x00000000000ffc00ULL +#define BAM16BUF_OFFSET2 0x00000000000003ffULL + +#define BAM32BUF (FZC_ZCP + 0x00030UL) +#define BAM32BUF_LOJ 0x0000000080000000ULL +#define BAM32BUF_EN_CK 0x0000000040000000ULL +#define BAM32BUF_IDX_END3 0x000000003ff00000ULL +#define BAM32BUF_IDX_ST3 0x00000000000ffc00ULL +#define BAM32BUF_OFFSET3 0x00000000000003ffULL + +#define DST4BUF (FZC_ZCP + 0x00038UL) +#define DST4BUF_DS_OFFSET0 0x00000000000003ffULL + +#define DST8BUF (FZC_ZCP + 0x00040UL) +#define DST8BUF_DS_OFFSET1 0x00000000000003ffULL + +#define DST16BUF (FZC_ZCP + 0x00048UL) +#define DST16BUF_DS_OFFSET2 0x00000000000003ffULL + +#define DST32BUF (FZC_ZCP + 0x00050UL) +#define DST32BUF_DS_OFFSET3 0x00000000000003ffULL + +#define ZCP_RAM_DATA0 (FZC_ZCP + 0x00058UL) +#define ZCP_RAM_DATA0_DAT0 0x00000000ffffffffULL + +#define ZCP_RAM_DATA1 (FZC_ZCP + 0x00060UL) +#define ZCP_RAM_DAT10_DAT1 0x00000000ffffffffULL + +#define ZCP_RAM_DATA2 (FZC_ZCP + 0x00068UL) +#define ZCP_RAM_DATA2_DAT2 0x00000000ffffffffULL + +#define ZCP_RAM_DATA3 (FZC_ZCP + 0x00070UL) +#define ZCP_RAM_DATA3_DAT3 0x00000000ffffffffULL + +#define ZCP_RAM_DATA4 (FZC_ZCP + 0x00078UL) +#define ZCP_RAM_DATA4_DAT4 0x00000000000000ffULL + +#define ZCP_RAM_BE (FZC_ZCP + 0x00080UL) +#define ZCP_RAM_BE_VAL 0x000000000001ffffULL + +#define ZCP_RAM_ACC (FZC_ZCP + 0x00088UL) +#define ZCP_RAM_ACC_BUSY 0x0000000080000000ULL +#define ZCP_RAM_ACC_READ 0x0000000040000000ULL +#define ZCP_RAM_ACC_WRITE 0x0000000000000000ULL +#define ZCP_RAM_ACC_LOJ 0x0000000020000000ULL +#define ZCP_RAM_ACC_ZFCID 0x000000001ffe0000ULL +#define ZCP_RAM_ACC_ZFCID_SHIFT 17 +#define ZCP_RAM_ACC_RAM_SEL 0x000000000001f000ULL +#define ZCP_RAM_ACC_RAM_SEL_SHIFT 12 +#define ZCP_RAM_ACC_CFIFOADDR 0x0000000000000fffULL +#define ZCP_RAM_ACC_CFIFOADDR_SHIFT 0 + +#define ZCP_RAM_SEL_BAM(INDEX) (0x00 + (INDEX)) +#define ZCP_RAM_SEL_TT_STATIC 0x08 +#define ZCP_RAM_SEL_TT_DYNAMIC 0x09 +#define ZCP_RAM_SEL_CFIFO(PORT) (0x10 + (PORT)) + +#define NIU_CFIFO_ENTRIES 1024 +#define ATLAS_P0_P1_CFIFO_ENTRIES 2048 +#define ATLAS_P2_P3_CFIFO_ENTRIES 1024 + +#define CHK_BIT_DATA (FZC_ZCP + 0x00090UL) +#define CHK_BIT_DATA_DATA 0x000000000000ffffULL + +#define RESET_CFIFO (FZC_ZCP + 0x00098UL) +#define RESET_CFIFO_RST(PORT) (0x1 << (PORT)) + +#define CFIFO_ECC(PORT) (FZC_ZCP + 0x000a0UL + (PORT) * 8UL) +#define CFIFO_ECC_DIS_DBLBIT_ERR 0x0000000080000000ULL +#define CFIFO_ECC_DBLBIT_ERR 0x0000000000020000ULL +#define CFIFO_ECC_SINGLEBIT_ERR 0x0000000000010000ULL +#define CFIFO_ECC_ALL_PKT 0x0000000000000400ULL +#define CFIFO_ECC_LAST_LINE 0x0000000000000004ULL +#define CFIFO_ECC_2ND_LINE 0x0000000000000002ULL +#define CFIFO_ECC_1ST_LINE 0x0000000000000001ULL + +#define ZCP_TRAINING_VECTOR (FZC_ZCP + 0x000c0UL) +#define ZCP_TRAINING_VECTOR_VECTOR 0x00000000ffffffffULL + +#define ZCP_STATE_MACHINE (FZC_ZCP + 0x000c8UL) +#define ZCP_STATE_MACHINE_SM 0x00000000ffffffffULL + +/* Same bits as ZCP_INT_STAT */ +#define ZCP_INT_STAT_TEST (FZC_ZCP + 0x00108UL) + +#define RXDMA_CFIG1(IDX) (DMC + 0x00000UL + (IDX) * 0x200UL) +#define RXDMA_CFIG1_EN 0x0000000080000000ULL +#define RXDMA_CFIG1_RST 0x0000000040000000ULL +#define RXDMA_CFIG1_QST 0x0000000020000000ULL +#define RXDMA_CFIG1_MBADDR_H 0x0000000000000fffULL /* mboxaddr 43:32 */ + +#define RXDMA_CFIG2(IDX) (DMC + 0x00008UL + (IDX) * 0x200UL) +#define RXDMA_CFIG2_MBADDR_L 0x00000000ffffffc0ULL /* mboxaddr 31:6 */ +#define RXDMA_CFIG2_OFFSET 0x0000000000000006ULL +#define RXDMA_CFIG2_OFFSET_SHIFT 1 +#define RXDMA_CFIG2_FULL_HDR 0x0000000000000001ULL + +#define RBR_CFIG_A(IDX) (DMC + 0x00010UL + (IDX) * 0x200UL) +#define RBR_CFIG_A_LEN 0xffff000000000000ULL +#define RBR_CFIG_A_LEN_SHIFT 48 +#define RBR_CFIG_A_STADDR_BASE 0x00000ffffffc0000ULL +#define RBR_CFIG_A_STADDR 0x000000000003ffc0ULL + +#define RBR_CFIG_B(IDX) (DMC + 0x00018UL + (IDX) * 0x200UL) +#define RBR_CFIG_B_BLKSIZE 0x0000000003000000ULL +#define RBR_CFIG_B_BLKSIZE_SHIFT 24 +#define RBR_CFIG_B_VLD2 0x0000000000800000ULL +#define RBR_CFIG_B_BUFSZ2 0x0000000000030000ULL +#define RBR_CFIG_B_BUFSZ2_SHIFT 16 +#define RBR_CFIG_B_VLD1 0x0000000000008000ULL +#define RBR_CFIG_B_BUFSZ1 0x0000000000000300ULL +#define RBR_CFIG_B_BUFSZ1_SHIFT 8 +#define RBR_CFIG_B_VLD0 0x0000000000000080ULL +#define RBR_CFIG_B_BUFSZ0 0x0000000000000003ULL +#define RBR_CFIG_B_BUFSZ0_SHIFT 0 + +#define RBR_BLKSIZE_4K 0x0 +#define RBR_BLKSIZE_8K 0x1 +#define RBR_BLKSIZE_16K 0x2 +#define RBR_BLKSIZE_32K 0x3 +#define RBR_BUFSZ2_2K 0x0 +#define RBR_BUFSZ2_4K 0x1 +#define RBR_BUFSZ2_8K 0x2 +#define RBR_BUFSZ2_16K 0x3 +#define RBR_BUFSZ1_1K 0x0 +#define RBR_BUFSZ1_2K 0x1 +#define RBR_BUFSZ1_4K 0x2 +#define RBR_BUFSZ1_8K 0x3 +#define RBR_BUFSZ0_256 0x0 +#define RBR_BUFSZ0_512 0x1 +#define RBR_BUFSZ0_1K 0x2 +#define RBR_BUFSZ0_2K 0x3 + +#define RBR_KICK(IDX) (DMC + 0x00020UL + (IDX) * 0x200UL) +#define RBR_KICK_BKADD 0x000000000000ffffULL + +#define RBR_STAT(IDX) (DMC + 0x00028UL + (IDX) * 0x200UL) +#define RBR_STAT_QLEN 0x000000000000ffffULL + +#define RBR_HDH(IDX) (DMC + 0x00030UL + (IDX) * 0x200UL) +#define RBR_HDH_HEAD_H 0x0000000000000fffULL + +#define RBR_HDL(IDX) (DMC + 0x00038UL + (IDX) * 0x200UL) +#define RBR_HDL_HEAD_L 0x00000000fffffffcULL + +#define RCRCFIG_A(IDX) (DMC + 0x00040UL + (IDX) * 0x200UL) +#define RCRCFIG_A_LEN 0xffff000000000000ULL +#define RCRCFIG_A_LEN_SHIFT 48 +#define RCRCFIG_A_STADDR_BASE 0x00000ffffff80000ULL +#define RCRCFIG_A_STADDR 0x000000000007ffc0ULL + +#define RCRCFIG_B(IDX) (DMC + 0x00048UL + (IDX) * 0x200UL) +#define RCRCFIG_B_PTHRES 0x00000000ffff0000ULL +#define RCRCFIG_B_PTHRES_SHIFT 16 +#define RCRCFIG_B_ENTOUT 0x0000000000008000ULL +#define RCRCFIG_B_TIMEOUT 0x000000000000003fULL +#define RCRCFIG_B_TIMEOUT_SHIFT 0 + +#define RCRSTAT_A(IDX) (DMC + 0x00050UL + (IDX) * 0x200UL) +#define RCRSTAT_A_QLEN 0x000000000000ffffULL + +#define RCRSTAT_B(IDX) (DMC + 0x00058UL + (IDX) * 0x200UL) +#define RCRSTAT_B_TIPTR_H 0x0000000000000fffULL + +#define RCRSTAT_C(IDX) (DMC + 0x00060UL + (IDX) * 0x200UL) +#define RCRSTAT_C_TIPTR_L 0x00000000fffffff8ULL + +#define RX_DMA_CTL_STAT(IDX) (DMC + 0x00070UL + (IDX) * 0x200UL) +#define RX_DMA_CTL_STAT_RBR_TMOUT 0x0020000000000000ULL +#define RX_DMA_CTL_STAT_RSP_CNT_ERR 0x0010000000000000ULL +#define RX_DMA_CTL_STAT_BYTE_EN_BUS 0x0008000000000000ULL +#define RX_DMA_CTL_STAT_RSP_DAT_ERR 0x0004000000000000ULL +#define RX_DMA_CTL_STAT_RCR_ACK_ERR 0x0002000000000000ULL +#define RX_DMA_CTL_STAT_DC_FIFO_ERR 0x0001000000000000ULL +#define RX_DMA_CTL_STAT_MEX 0x0000800000000000ULL +#define RX_DMA_CTL_STAT_RCRTHRES 0x0000400000000000ULL +#define RX_DMA_CTL_STAT_RCRTO 0x0000200000000000ULL +#define RX_DMA_CTL_STAT_RCR_SHA_PAR 0x0000100000000000ULL +#define RX_DMA_CTL_STAT_RBR_PRE_PAR 0x0000080000000000ULL +#define RX_DMA_CTL_STAT_PORT_DROP_PKT 0x0000040000000000ULL +#define RX_DMA_CTL_STAT_WRED_DROP 0x0000020000000000ULL +#define RX_DMA_CTL_STAT_RBR_PRE_EMTY 0x0000010000000000ULL +#define RX_DMA_CTL_STAT_RCRSHADOW_FULL 0x0000008000000000ULL +#define RX_DMA_CTL_STAT_CONFIG_ERR 0x0000004000000000ULL +#define RX_DMA_CTL_STAT_RCRINCON 0x0000002000000000ULL +#define RX_DMA_CTL_STAT_RCRFULL 0x0000001000000000ULL +#define RX_DMA_CTL_STAT_RBR_EMPTY 0x0000000800000000ULL +#define RX_DMA_CTL_STAT_RBRFULL 0x0000000400000000ULL +#define RX_DMA_CTL_STAT_RBRLOGPAGE 0x0000000200000000ULL +#define RX_DMA_CTL_STAT_CFIGLOGPAGE 0x0000000100000000ULL +#define RX_DMA_CTL_STAT_PTRREAD 0x00000000ffff0000ULL +#define RX_DMA_CTL_STAT_PTRREAD_SHIFT 16 +#define RX_DMA_CTL_STAT_PKTREAD 0x000000000000ffffULL +#define RX_DMA_CTL_STAT_PKTREAD_SHIFT 0 + +#define RX_DMA_CTL_STAT_CHAN_FATAL (RX_DMA_CTL_STAT_RBR_TMOUT | \ + RX_DMA_CTL_STAT_RSP_CNT_ERR | \ + RX_DMA_CTL_STAT_BYTE_EN_BUS | \ + RX_DMA_CTL_STAT_RSP_DAT_ERR | \ + RX_DMA_CTL_STAT_RCR_ACK_ERR | \ + RX_DMA_CTL_STAT_RCR_SHA_PAR | \ + RX_DMA_CTL_STAT_RBR_PRE_PAR | \ + RX_DMA_CTL_STAT_CONFIG_ERR | \ + RX_DMA_CTL_STAT_RCRINCON | \ + RX_DMA_CTL_STAT_RCRFULL | \ + RX_DMA_CTL_STAT_RBRFULL | \ + RX_DMA_CTL_STAT_RBRLOGPAGE | \ + RX_DMA_CTL_STAT_CFIGLOGPAGE) + +#define RX_DMA_CTL_STAT_PORT_FATAL (RX_DMA_CTL_STAT_DC_FIFO_ERR) + +#define RX_DMA_CTL_WRITE_CLEAR_ERRS (RX_DMA_CTL_STAT_RBR_EMPTY | \ + RX_DMA_CTL_STAT_RCRSHADOW_FULL | \ + RX_DMA_CTL_STAT_RBR_PRE_EMTY | \ + RX_DMA_CTL_STAT_WRED_DROP | \ + RX_DMA_CTL_STAT_PORT_DROP_PKT | \ + RX_DMA_CTL_STAT_RCRTO | \ + RX_DMA_CTL_STAT_RCRTHRES | \ + RX_DMA_CTL_STAT_DC_FIFO_ERR) + +#define RCR_FLSH(IDX) (DMC + 0x00078UL + (IDX) * 0x200UL) +#define RCR_FLSH_FLSH 0x0000000000000001ULL + +#define RXMISC(IDX) (DMC + 0x00090UL + (IDX) * 0x200UL) +#define RXMISC_OFLOW 0x0000000000010000ULL +#define RXMISC_COUNT 0x000000000000ffffULL + +#define RX_DMA_CTL_STAT_DBG(IDX) (DMC + 0x00098UL + (IDX) * 0x200UL) +#define RX_DMA_CTL_STAT_DBG_RBR_TMOUT 0x0020000000000000ULL +#define RX_DMA_CTL_STAT_DBG_RSP_CNT_ERR 0x0010000000000000ULL +#define RX_DMA_CTL_STAT_DBG_BYTE_EN_BUS 0x0008000000000000ULL +#define RX_DMA_CTL_STAT_DBG_RSP_DAT_ERR 0x0004000000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCR_ACK_ERR 0x0002000000000000ULL +#define RX_DMA_CTL_STAT_DBG_DC_FIFO_ERR 0x0001000000000000ULL +#define RX_DMA_CTL_STAT_DBG_MEX 0x0000800000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCRTHRES 0x0000400000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCRTO 0x0000200000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCR_SHA_PAR 0x0000100000000000ULL +#define RX_DMA_CTL_STAT_DBG_RBR_PRE_PAR 0x0000080000000000ULL +#define RX_DMA_CTL_STAT_DBG_PORT_DROP_PKT 0x0000040000000000ULL +#define RX_DMA_CTL_STAT_DBG_WRED_DROP 0x0000020000000000ULL +#define RX_DMA_CTL_STAT_DBG_RBR_PRE_EMTY 0x0000010000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCRSHADOW_FULL 0x0000008000000000ULL +#define RX_DMA_CTL_STAT_DBG_CONFIG_ERR 0x0000004000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCRINCON 0x0000002000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCRFULL 0x0000001000000000ULL +#define RX_DMA_CTL_STAT_DBG_RBR_EMPTY 0x0000000800000000ULL +#define RX_DMA_CTL_STAT_DBG_RBRFULL 0x0000000400000000ULL +#define RX_DMA_CTL_STAT_DBG_RBRLOGPAGE 0x0000000200000000ULL +#define RX_DMA_CTL_STAT_DBG_CFIGLOGPAGE 0x0000000100000000ULL +#define RX_DMA_CTL_STAT_DBG_PTRREAD 0x00000000ffff0000ULL +#define RX_DMA_CTL_STAT_DBG_PKTREAD 0x000000000000ffffULL + +#define RX_DMA_ENT_MSK(IDX) (DMC + 0x00068UL + (IDX) * 0x200UL) +#define RX_DMA_ENT_MSK_RBR_TMOUT 0x0000000000200000ULL +#define RX_DMA_ENT_MSK_RSP_CNT_ERR 0x0000000000100000ULL +#define RX_DMA_ENT_MSK_BYTE_EN_BUS 0x0000000000080000ULL +#define RX_DMA_ENT_MSK_RSP_DAT_ERR 0x0000000000040000ULL +#define RX_DMA_ENT_MSK_RCR_ACK_ERR 0x0000000000020000ULL +#define RX_DMA_ENT_MSK_DC_FIFO_ERR 0x0000000000010000ULL +#define RX_DMA_ENT_MSK_RCRTHRES 0x0000000000004000ULL +#define RX_DMA_ENT_MSK_RCRTO 0x0000000000002000ULL +#define RX_DMA_ENT_MSK_RCR_SHA_PAR 0x0000000000001000ULL +#define RX_DMA_ENT_MSK_RBR_PRE_PAR 0x0000000000000800ULL +#define RX_DMA_ENT_MSK_PORT_DROP_PKT 0x0000000000000400ULL +#define RX_DMA_ENT_MSK_WRED_DROP 0x0000000000000200ULL +#define RX_DMA_ENT_MSK_RBR_PRE_EMTY 0x0000000000000100ULL +#define RX_DMA_ENT_MSK_RCR_SHADOW_FULL 0x0000000000000080ULL +#define RX_DMA_ENT_MSK_CONFIG_ERR 0x0000000000000040ULL +#define RX_DMA_ENT_MSK_RCRINCON 0x0000000000000020ULL +#define RX_DMA_ENT_MSK_RCRFULL 0x0000000000000010ULL +#define RX_DMA_ENT_MSK_RBR_EMPTY 0x0000000000000008ULL +#define RX_DMA_ENT_MSK_RBRFULL 0x0000000000000004ULL +#define RX_DMA_ENT_MSK_RBRLOGPAGE 0x0000000000000002ULL +#define RX_DMA_ENT_MSK_CFIGLOGPAGE 0x0000000000000001ULL +#define RX_DMA_ENT_MSK_ALL 0x00000000003f7fffULL + +#define TX_RNG_CFIG(IDX) (DMC + 0x40000UL + (IDX) * 0x200UL) +#define TX_RNG_CFIG_LEN 0x1fff000000000000ULL +#define TX_RNG_CFIG_LEN_SHIFT 48 +#define TX_RNG_CFIG_STADDR_BASE 0x00000ffffff80000ULL +#define TX_RNG_CFIG_STADDR 0x000000000007ffc0ULL + +#define TX_RING_HDL(IDX) (DMC + 0x40010UL + (IDX) * 0x200UL) +#define TX_RING_HDL_WRAP 0x0000000000080000ULL +#define TX_RING_HDL_HEAD 0x000000000007fff8ULL +#define TX_RING_HDL_HEAD_SHIFT 3 + +#define TX_RING_KICK(IDX) (DMC + 0x40018UL + (IDX) * 0x200UL) +#define TX_RING_KICK_WRAP 0x0000000000080000ULL +#define TX_RING_KICK_TAIL 0x000000000007fff8ULL + +#define TX_ENT_MSK(IDX) (DMC + 0x40020UL + (IDX) * 0x200UL) +#define TX_ENT_MSK_MK 0x0000000000008000ULL +#define TX_ENT_MSK_MBOX_ERR 0x0000000000000080ULL +#define TX_ENT_MSK_PKT_SIZE_ERR 0x0000000000000040ULL +#define TX_ENT_MSK_TX_RING_OFLOW 0x0000000000000020ULL +#define TX_ENT_MSK_PREF_BUF_ECC_ERR 0x0000000000000010ULL +#define TX_ENT_MSK_NACK_PREF 0x0000000000000008ULL +#define TX_ENT_MSK_NACK_PKT_RD 0x0000000000000004ULL +#define TX_ENT_MSK_CONF_PART_ERR 0x0000000000000002ULL +#define TX_ENT_MSK_PKT_PRT_ERR 0x0000000000000001ULL + +#define TX_CS(IDX) (DMC + 0x40028UL + (IDX)*0x200UL) +#define TX_CS_PKT_CNT 0x0fff000000000000ULL +#define TX_CS_PKT_CNT_SHIFT 48 +#define TX_CS_LASTMARK 0x00000fff00000000ULL +#define TX_CS_LASTMARK_SHIFT 32 +#define TX_CS_RST 0x0000000080000000ULL +#define TX_CS_RST_STATE 0x0000000040000000ULL +#define TX_CS_MB 0x0000000020000000ULL +#define TX_CS_STOP_N_GO 0x0000000010000000ULL +#define TX_CS_SNG_STATE 0x0000000008000000ULL +#define TX_CS_MK 0x0000000000008000ULL +#define TX_CS_MMK 0x0000000000004000ULL +#define TX_CS_MBOX_ERR 0x0000000000000080ULL +#define TX_CS_PKT_SIZE_ERR 0x0000000000000040ULL +#define TX_CS_TX_RING_OFLOW 0x0000000000000020ULL +#define TX_CS_PREF_BUF_PAR_ERR 0x0000000000000010ULL +#define TX_CS_NACK_PREF 0x0000000000000008ULL +#define TX_CS_NACK_PKT_RD 0x0000000000000004ULL +#define TX_CS_CONF_PART_ERR 0x0000000000000002ULL +#define TX_CS_PKT_PRT_ERR 0x0000000000000001ULL + +#define TXDMA_MBH(IDX) (DMC + 0x40030UL + (IDX) * 0x200UL) +#define TXDMA_MBH_MBADDR 0x0000000000000fffULL + +#define TXDMA_MBL(IDX) (DMC + 0x40038UL + (IDX) * 0x200UL) +#define TXDMA_MBL_MBADDR 0x00000000ffffffc0ULL + +#define TX_DMA_PRE_ST(IDX) (DMC + 0x40040UL + (IDX) * 0x200UL) +#define TX_DMA_PRE_ST_SHADOW_HD 0x000000000007ffffULL + +#define TX_RNG_ERR_LOGH(IDX) (DMC + 0x40048UL + (IDX) * 0x200UL) +#define TX_RNG_ERR_LOGH_ERR 0x0000000080000000ULL +#define TX_RNG_ERR_LOGH_MERR 0x0000000040000000ULL +#define TX_RNG_ERR_LOGH_ERRCODE 0x0000000038000000ULL +#define TX_RNG_ERR_LOGH_ERRADDR 0x0000000000000fffULL + +#define TX_RNG_ERR_LOGL(IDX) (DMC + 0x40050UL + (IDX) * 0x200UL) +#define TX_RNG_ERR_LOGL_ERRADDR 0x00000000ffffffffULL + +#define TDMC_INTR_DBG(IDX) (DMC + 0x40060UL + (IDX) * 0x200UL) +#define TDMC_INTR_DBG_MK 0x0000000000008000ULL +#define TDMC_INTR_DBG_MBOX_ERR 0x0000000000000080ULL +#define TDMC_INTR_DBG_PKT_SIZE_ERR 0x0000000000000040ULL +#define TDMC_INTR_DBG_TX_RING_OFLOW 0x0000000000000020ULL +#define TDMC_INTR_DBG_PREF_BUF_PAR_ERR 0x0000000000000010ULL +#define TDMC_INTR_DBG_NACK_PREF 0x0000000000000008ULL +#define TDMC_INTR_DBG_NACK_PKT_RD 0x0000000000000004ULL +#define TDMC_INTR_DBG_CONF_PART_ERR 0x0000000000000002ULL +#define TDMC_INTR_DBG_PKT_PART_ERR 0x0000000000000001ULL + +#define TX_CS_DBG(IDX) (DMC + 0x40068UL + (IDX) * 0x200UL) +#define TX_CS_DBG_PKT_CNT 0x0fff000000000000ULL + +#define TDMC_INJ_PAR_ERR(IDX) (DMC + 0x45040UL + (IDX) * 0x200UL) +#define TDMC_INJ_PAR_ERR_VAL 0x000000000000ffffULL + +#define TDMC_DBG_SEL(IDX) (DMC + 0x45080UL + (IDX) * 0x200UL) +#define TDMC_DBG_SEL_DBG_SEL 0x000000000000003fULL + +#define TDMC_TRAINING_VECTOR(IDX) (DMC + 0x45088UL + (IDX) * 0x200UL) +#define TDMC_TRAINING_VECTOR_VEC 0x00000000ffffffffULL + +#define TXC_DMA_MAX(CHAN) (FZC_TXC + 0x00000UL + (CHAN)*0x1000UL) +#define TXC_DMA_MAX_LEN(CHAN) (FZC_TXC + 0x00008UL + (CHAN)*0x1000UL) + +#define TXC_CONTROL (FZC_TXC + 0x20000UL) +#define TXC_CONTROL_ENABLE 0x0000000000000010ULL +#define TXC_CONTROL_PORT_ENABLE(X) (1 << (X)) + +#define TXC_TRAINING_VEC (FZC_TXC + 0x20008UL) +#define TXC_TRAINING_VEC_MASK 0x00000000ffffffffULL + +#define TXC_DEBUG (FZC_TXC + 0x20010UL) +#define TXC_DEBUG_SELECT 0x000000000000003fULL + +#define TXC_MAX_REORDER (FZC_TXC + 0x20018UL) +#define TXC_MAX_REORDER_PORT3 0x000000000f000000ULL +#define TXC_MAX_REORDER_PORT2 0x00000000000f0000ULL +#define TXC_MAX_REORDER_PORT1 0x0000000000000f00ULL +#define TXC_MAX_REORDER_PORT0 0x000000000000000fULL + +#define TXC_PORT_CTL(PORT) (FZC_TXC + 0x20020UL + (PORT)*0x100UL) +#define TXC_PORT_CTL_CLR_ALL_STAT 0x0000000000000001ULL + +#define TXC_PKT_STUFFED(PORT) (FZC_TXC + 0x20030UL + (PORT)*0x100UL) +#define TXC_PKT_STUFFED_PP_REORDER 0x00000000ffff0000ULL +#define TXC_PKT_STUFFED_PP_PACKETASSY 0x000000000000ffffULL + +#define TXC_PKT_XMIT(PORT) (FZC_TXC + 0x20038UL + (PORT)*0x100UL) +#define TXC_PKT_XMIT_BYTES 0x00000000ffff0000ULL +#define TXC_PKT_XMIT_PKTS 0x000000000000ffffULL + +#define TXC_ROECC_CTL(PORT) (FZC_TXC + 0x20040UL + (PORT)*0x100UL) +#define TXC_ROECC_CTL_DISABLE_UE 0x0000000080000000ULL +#define TXC_ROECC_CTL_DBL_BIT_ERR 0x0000000000020000ULL +#define TXC_ROECC_CTL_SNGL_BIT_ERR 0x0000000000010000ULL +#define TXC_ROECC_CTL_ALL_PKTS 0x0000000000000400ULL +#define TXC_ROECC_CTL_ALT_PKTS 0x0000000000000200ULL +#define TXC_ROECC_CTL_ONE_PKT_ONLY 0x0000000000000100ULL +#define TXC_ROECC_CTL_LST_PKT_LINE 0x0000000000000004ULL +#define TXC_ROECC_CTL_2ND_PKT_LINE 0x0000000000000002ULL +#define TXC_ROECC_CTL_1ST_PKT_LINE 0x0000000000000001ULL + +#define TXC_ROECC_ST(PORT) (FZC_TXC + 0x20048UL + (PORT)*0x100UL) +#define TXC_ROECC_CLR_ST 0x0000000080000000ULL +#define TXC_ROECC_CE 0x0000000000020000ULL +#define TXC_ROECC_UE 0x0000000000010000ULL +#define TXC_ROECC_ST_ECC_ADDR 0x00000000000003ffULL + +#define TXC_RO_DATA0(PORT) (FZC_TXC + 0x20050UL + (PORT)*0x100UL) +#define TXC_RO_DATA0_DATA0 0x00000000ffffffffULL /* bits 31:0 */ + +#define TXC_RO_DATA1(PORT) (FZC_TXC + 0x20058UL + (PORT)*0x100UL) +#define TXC_RO_DATA1_DATA1 0x00000000ffffffffULL /* bits 63:32 */ + +#define TXC_RO_DATA2(PORT) (FZC_TXC + 0x20060UL + (PORT)*0x100UL) +#define TXC_RO_DATA2_DATA2 0x00000000ffffffffULL /* bits 95:64 */ + +#define TXC_RO_DATA3(PORT) (FZC_TXC + 0x20068UL + (PORT)*0x100UL) +#define TXC_RO_DATA3_DATA3 0x00000000ffffffffULL /* bits 127:96 */ + +#define TXC_RO_DATA4(PORT) (FZC_TXC + 0x20070UL + (PORT)*0x100UL) +#define TXC_RO_DATA4_DATA4 0x0000000000ffffffULL /* bits 151:128 */ + +#define TXC_SFECC_CTL(PORT) (FZC_TXC + 0x20078UL + (PORT)*0x100UL) +#define TXC_SFECC_CTL_DISABLE_UE 0x0000000080000000ULL +#define TXC_SFECC_CTL_DBL_BIT_ERR 0x0000000000020000ULL +#define TXC_SFECC_CTL_SNGL_BIT_ERR 0x0000000000010000ULL +#define TXC_SFECC_CTL_ALL_PKTS 0x0000000000000400ULL +#define TXC_SFECC_CTL_ALT_PKTS 0x0000000000000200ULL +#define TXC_SFECC_CTL_ONE_PKT_ONLY 0x0000000000000100ULL +#define TXC_SFECC_CTL_LST_PKT_LINE 0x0000000000000004ULL +#define TXC_SFECC_CTL_2ND_PKT_LINE 0x0000000000000002ULL +#define TXC_SFECC_CTL_1ST_PKT_LINE 0x0000000000000001ULL + +#define TXC_SFECC_ST(PORT) (FZC_TXC + 0x20080UL + (PORT)*0x100UL) +#define TXC_SFECC_ST_CLR_ST 0x0000000080000000ULL +#define TXC_SFECC_ST_CE 0x0000000000020000ULL +#define TXC_SFECC_ST_UE 0x0000000000010000ULL +#define TXC_SFECC_ST_ECC_ADDR 0x00000000000003ffULL + +#define TXC_SF_DATA0(PORT) (FZC_TXC + 0x20088UL + (PORT)*0x100UL) +#define TXC_SF_DATA0_DATA0 0x00000000ffffffffULL /* bits 31:0 */ + +#define TXC_SF_DATA1(PORT) (FZC_TXC + 0x20090UL + (PORT)*0x100UL) +#define TXC_SF_DATA1_DATA1 0x00000000ffffffffULL /* bits 63:32 */ + +#define TXC_SF_DATA2(PORT) (FZC_TXC + 0x20098UL + (PORT)*0x100UL) +#define TXC_SF_DATA2_DATA2 0x00000000ffffffffULL /* bits 95:64 */ + +#define TXC_SF_DATA3(PORT) (FZC_TXC + 0x200a0UL + (PORT)*0x100UL) +#define TXC_SF_DATA3_DATA3 0x00000000ffffffffULL /* bits 127:96 */ + +#define TXC_SF_DATA4(PORT) (FZC_TXC + 0x200a8UL + (PORT)*0x100UL) +#define TXC_SF_DATA4_DATA4 0x0000000000ffffffULL /* bits 151:128 */ + +#define TXC_RO_TIDS(PORT) (FZC_TXC + 0x200b0UL + (PORT)*0x100UL) +#define TXC_RO_TIDS_IN_USE 0x00000000ffffffffULL + +#define TXC_RO_STATE0(PORT) (FZC_TXC + 0x200b8UL + (PORT)*0x100UL) +#define TXC_RO_STATE0_DUPLICATE_TID 0x00000000ffffffffULL + +#define TXC_RO_STATE1(PORT) (FZC_TXC + 0x200c0UL + (PORT)*0x100UL) +#define TXC_RO_STATE1_UNUSED_TID 0x00000000ffffffffULL + +#define TXC_RO_STATE2(PORT) (FZC_TXC + 0x200c8UL + (PORT)*0x100UL) +#define TXC_RO_STATE2_TRANS_TIMEOUT 0x00000000ffffffffULL + +#define TXC_RO_STATE3(PORT) (FZC_TXC + 0x200d0UL + (PORT)*0x100UL) +#define TXC_RO_STATE3_ENAB_SPC_WMARK 0x0000000080000000ULL +#define TXC_RO_STATE3_RO_SPC_WMARK 0x000000007fe00000ULL +#define TXC_RO_STATE3_ROFIFO_SPC_AVAIL 0x00000000001ff800ULL +#define TXC_RO_STATE3_ENAB_RO_WMARK 0x0000000000000100ULL +#define TXC_RO_STATE3_HIGH_RO_USED 0x00000000000000f0ULL +#define TXC_RO_STATE3_NUM_RO_USED 0x000000000000000fULL + +#define TXC_RO_CTL(PORT) (FZC_TXC + 0x200d8UL + (PORT)*0x100UL) +#define TXC_RO_CTL_CLR_FAIL_STATE 0x0000000080000000ULL +#define TXC_RO_CTL_RO_ADDR 0x000000000f000000ULL +#define TXC_RO_CTL_ADDR_FAILED 0x0000000000400000ULL +#define TXC_RO_CTL_DMA_FAILED 0x0000000000200000ULL +#define TXC_RO_CTL_LEN_FAILED 0x0000000000100000ULL +#define TXC_RO_CTL_CAPT_ADDR_FAILED 0x0000000000040000ULL +#define TXC_RO_CTL_CAPT_DMA_FAILED 0x0000000000020000ULL +#define TXC_RO_CTL_CAPT_LEN_FAILED 0x0000000000010000ULL +#define TXC_RO_CTL_RO_STATE_RD_DONE 0x0000000000000080ULL +#define TXC_RO_CTL_RO_STATE_WR_DONE 0x0000000000000040ULL +#define TXC_RO_CTL_RO_STATE_RD 0x0000000000000020ULL +#define TXC_RO_CTL_RO_STATE_WR 0x0000000000000010ULL +#define TXC_RO_CTL_RO_STATE_ADDR 0x000000000000000fULL + +#define TXC_RO_ST_DATA0(PORT) (FZC_TXC + 0x200e0UL + (PORT)*0x100UL) +#define TXC_RO_ST_DATA0_DATA0 0x00000000ffffffffULL + +#define TXC_RO_ST_DATA1(PORT) (FZC_TXC + 0x200e8UL + (PORT)*0x100UL) +#define TXC_RO_ST_DATA1_DATA1 0x00000000ffffffffULL + +#define TXC_RO_ST_DATA2(PORT) (FZC_TXC + 0x200f0UL + (PORT)*0x100UL) +#define TXC_RO_ST_DATA2_DATA2 0x00000000ffffffffULL + +#define TXC_RO_ST_DATA3(PORT) (FZC_TXC + 0x200f8UL + (PORT)*0x100UL) +#define TXC_RO_ST_DATA3_DATA3 0x00000000ffffffffULL + +#define TXC_PORT_PACKET_REQ(PORT) (FZC_TXC + 0x20100UL + (PORT)*0x100UL) +#define TXC_PORT_PACKET_REQ_GATHER_REQ 0x00000000f0000000ULL +#define TXC_PORT_PACKET_REQ_PKT_REQ 0x000000000fff0000ULL +#define TXC_PORT_PACKET_REQ_PERR_ABRT 0x000000000000ffffULL + + /* bits are same as TXC_INT_STAT */ +#define TXC_INT_STAT_DBG (FZC_TXC + 0x20420UL) + +#define TXC_INT_STAT (FZC_TXC + 0x20428UL) +#define TXC_INT_STAT_VAL_SHIFT(PORT) ((PORT) * 8) +#define TXC_INT_STAT_VAL(PORT) (0x3f << TXC_INT_STAT_VAL_SHIFT(PORT)) +#define TXC_INT_STAT_SF_CE(PORT) (0x01 << TXC_INT_STAT_VAL_SHIFT(PORT)) +#define TXC_INT_STAT_SF_UE(PORT) (0x02 << TXC_INT_STAT_VAL_SHIFT(PORT)) +#define TXC_INT_STAT_RO_CE(PORT) (0x04 << TXC_INT_STAT_VAL_SHIFT(PORT)) +#define TXC_INT_STAT_RO_UE(PORT) (0x08 << TXC_INT_STAT_VAL_SHIFT(PORT)) +#define TXC_INT_STAT_REORDER_ERR(PORT) (0x10 << TXC_INT_STAT_VAL_SHIFT(PORT)) +#define TXC_INT_STAT_PKTASM_DEAD(PORT) (0x20 << TXC_INT_STAT_VAL_SHIFT(PORT)) + +#define TXC_INT_MASK (FZC_TXC + 0x20430UL) +#define TXC_INT_MASK_VAL_SHIFT(PORT) ((PORT) * 8) +#define TXC_INT_MASK_VAL(PORT) (0x3f << TXC_INT_STAT_VAL_SHIFT(PORT)) + +#define TXC_INT_MASK_SF_CE 0x01 +#define TXC_INT_MASK_SF_UE 0x02 +#define TXC_INT_MASK_RO_CE 0x04 +#define TXC_INT_MASK_RO_UE 0x08 +#define TXC_INT_MASK_REORDER_ERR 0x10 +#define TXC_INT_MASK_PKTASM_DEAD 0x20 +#define TXC_INT_MASK_ALL 0x3f + +#define TXC_PORT_DMA(IDX) (FZC_TXC + 0x20028UL + (IDX)*0x100UL) + +#define ESPC_PIO_EN (FZC_PROM + 0x40000UL) +#define ESPC_PIO_EN_ENABLE 0x0000000000000001ULL + +#define ESPC_PIO_STAT (FZC_PROM + 0x40008UL) +#define ESPC_PIO_STAT_READ_START 0x0000000080000000ULL +#define ESPC_PIO_STAT_READ_END 0x0000000040000000ULL +#define ESPC_PIO_STAT_WRITE_INIT 0x0000000020000000ULL +#define ESPC_PIO_STAT_WRITE_END 0x0000000010000000ULL +#define ESPC_PIO_STAT_ADDR 0x0000000003ffff00ULL +#define ESPC_PIO_STAT_ADDR_SHIFT 8 +#define ESPC_PIO_STAT_DATA 0x00000000000000ffULL +#define ESPC_PIO_STAT_DATA_SHIFT 0 + +#define ESPC_NCR(IDX) (FZC_PROM + 0x40020UL + (IDX)*0x8UL) +#define ESPC_NCR_VAL 0x00000000ffffffffULL + +#define ESPC_MAC_ADDR0 ESPC_NCR(0) +#define ESPC_MAC_ADDR1 ESPC_NCR(1) +#define ESPC_NUM_PORTS_MACS ESPC_NCR(2) +#define ESPC_NUM_PORTS_MACS_VAL 0x00000000000000ffULL +#define ESPC_MOD_STR_LEN ESPC_NCR(4) +#define ESPC_MOD_STR_1 ESPC_NCR(5) +#define ESPC_MOD_STR_2 ESPC_NCR(6) +#define ESPC_MOD_STR_3 ESPC_NCR(7) +#define ESPC_MOD_STR_4 ESPC_NCR(8) +#define ESPC_MOD_STR_5 ESPC_NCR(9) +#define ESPC_MOD_STR_6 ESPC_NCR(10) +#define ESPC_MOD_STR_7 ESPC_NCR(11) +#define ESPC_MOD_STR_8 ESPC_NCR(12) +#define ESPC_BD_MOD_STR_LEN ESPC_NCR(13) +#define ESPC_BD_MOD_STR_1 ESPC_NCR(14) +#define ESPC_BD_MOD_STR_2 ESPC_NCR(15) +#define ESPC_BD_MOD_STR_3 ESPC_NCR(16) +#define ESPC_BD_MOD_STR_4 ESPC_NCR(17) + +#define ESPC_PHY_TYPE ESPC_NCR(18) +#define ESPC_PHY_TYPE_PORT0 0x00000000ff000000ULL +#define ESPC_PHY_TYPE_PORT0_SHIFT 24 +#define ESPC_PHY_TYPE_PORT1 0x0000000000ff0000ULL +#define ESPC_PHY_TYPE_PORT1_SHIFT 16 +#define ESPC_PHY_TYPE_PORT2 0x000000000000ff00ULL +#define ESPC_PHY_TYPE_PORT2_SHIFT 8 +#define ESPC_PHY_TYPE_PORT3 0x00000000000000ffULL +#define ESPC_PHY_TYPE_PORT3_SHIFT 0 + +#define ESPC_PHY_TYPE_1G_COPPER 3 +#define ESPC_PHY_TYPE_1G_FIBER 2 +#define ESPC_PHY_TYPE_10G_COPPER 1 +#define ESPC_PHY_TYPE_10G_FIBER 0 + +#define ESPC_MAX_FM_SZ ESPC_NCR(19) + +#define ESPC_INTR_NUM ESPC_NCR(20) +#define ESPC_INTR_NUM_PORT0 0x00000000ff000000ULL +#define ESPC_INTR_NUM_PORT1 0x0000000000ff0000ULL +#define ESPC_INTR_NUM_PORT2 0x000000000000ff00ULL +#define ESPC_INTR_NUM_PORT3 0x00000000000000ffULL + +#define ESPC_VER_IMGSZ ESPC_NCR(21) +#define ESPC_VER_IMGSZ_IMGSZ 0x00000000ffff0000ULL +#define ESPC_VER_IMGSZ_IMGSZ_SHIFT 16 +#define ESPC_VER_IMGSZ_VER 0x000000000000ffffULL +#define ESPC_VER_IMGSZ_VER_SHIFT 0 + +#define ESPC_CHKSUM ESPC_NCR(22) +#define ESPC_CHKSUM_SUM 0x00000000000000ffULL + +#define ESPC_EEPROM_SIZE 0x100000 + +#define CLASS_CODE_UNRECOG 0x00 +#define CLASS_CODE_DUMMY1 0x01 +#define CLASS_CODE_ETHERTYPE1 0x02 +#define CLASS_CODE_ETHERTYPE2 0x03 +#define CLASS_CODE_USER_PROG1 0x04 +#define CLASS_CODE_USER_PROG2 0x05 +#define CLASS_CODE_USER_PROG3 0x06 +#define CLASS_CODE_USER_PROG4 0x07 +#define CLASS_CODE_TCP_IPV4 0x08 +#define CLASS_CODE_UDP_IPV4 0x09 +#define CLASS_CODE_AH_ESP_IPV4 0x0a +#define CLASS_CODE_SCTP_IPV4 0x0b +#define CLASS_CODE_TCP_IPV6 0x0c +#define CLASS_CODE_UDP_IPV6 0x0d +#define CLASS_CODE_AH_ESP_IPV6 0x0e +#define CLASS_CODE_SCTP_IPV6 0x0f +#define CLASS_CODE_ARP 0x10 +#define CLASS_CODE_RARP 0x11 +#define CLASS_CODE_DUMMY2 0x12 +#define CLASS_CODE_DUMMY3 0x13 +#define CLASS_CODE_DUMMY4 0x14 +#define CLASS_CODE_DUMMY5 0x15 +#define CLASS_CODE_DUMMY6 0x16 +#define CLASS_CODE_DUMMY7 0x17 +#define CLASS_CODE_DUMMY8 0x18 +#define CLASS_CODE_DUMMY9 0x19 +#define CLASS_CODE_DUMMY10 0x1a +#define CLASS_CODE_DUMMY11 0x1b +#define CLASS_CODE_DUMMY12 0x1c +#define CLASS_CODE_DUMMY13 0x1d +#define CLASS_CODE_DUMMY14 0x1e +#define CLASS_CODE_DUMMY15 0x1f + +/* Logical devices and device groups */ +#define LDN_RXDMA(CHAN) (0 + (CHAN)) +#define LDN_RESV1(OFF) (16 + (OFF)) +#define LDN_TXDMA(CHAN) (32 + (CHAN)) +#define LDN_RESV2(OFF) (56 + (OFF)) +#define LDN_MIF 63 +#define LDN_MAC(PORT) (64 + (PORT)) +#define LDN_DEVICE_ERROR 68 +#define LDN_MAX LDN_DEVICE_ERROR + +#define NIU_LDG_MIN 0 +#define NIU_LDG_MAX 63 +#define NIU_NUM_LDG 64 +#define LDG_INVALID 0xff + +/* PHY stuff */ +#define NIU_PMA_PMD_DEV_ADDR 1 +#define NIU_PCS_DEV_ADDR 3 + +#define NIU_PHY_ID_MASK 0xfffff0f0 +#define NIU_PHY_ID_BCM8704 0x00206030 +#define NIU_PHY_ID_BCM8706 0x00206035 +#define NIU_PHY_ID_BCM5464R 0x002060b0 +#define NIU_PHY_ID_MRVL88X2011 0x01410020 + +/* MRVL88X2011 register addresses */ +#define MRVL88X2011_USER_DEV1_ADDR 1 +#define MRVL88X2011_USER_DEV2_ADDR 2 +#define MRVL88X2011_USER_DEV3_ADDR 3 +#define MRVL88X2011_USER_DEV4_ADDR 4 +#define MRVL88X2011_PMA_PMD_CTL_1 0x0000 +#define MRVL88X2011_PMA_PMD_STATUS_1 0x0001 +#define MRVL88X2011_10G_PMD_STATUS_2 0x0008 +#define MRVL88X2011_10G_PMD_TX_DIS 0x0009 +#define MRVL88X2011_10G_XGXS_LANE_STAT 0x0018 +#define MRVL88X2011_GENERAL_CTL 0x8300 +#define MRVL88X2011_LED_BLINK_CTL 0x8303 +#define MRVL88X2011_LED_8_TO_11_CTL 0x8306 + +/* MRVL88X2011 register control */ +#define MRVL88X2011_ENA_XFPREFCLK 0x0001 +#define MRVL88X2011_ENA_PMDTX 0x0000 +#define MRVL88X2011_LOOPBACK 0x1 +#define MRVL88X2011_LED_ACT 0x1 +#define MRVL88X2011_LNK_STATUS_OK 0x4 +#define MRVL88X2011_LED_BLKRATE_MASK 0x70 +#define MRVL88X2011_LED_BLKRATE_034MS 0x0 +#define MRVL88X2011_LED_BLKRATE_067MS 0x1 +#define MRVL88X2011_LED_BLKRATE_134MS 0x2 +#define MRVL88X2011_LED_BLKRATE_269MS 0x3 +#define MRVL88X2011_LED_BLKRATE_538MS 0x4 +#define MRVL88X2011_LED_CTL_OFF 0x0 +#define MRVL88X2011_LED_CTL_PCS_ACT 0x5 +#define MRVL88X2011_LED_CTL_MASK 0x7 +#define MRVL88X2011_LED(n,v) ((v)<<((n)*4)) +#define MRVL88X2011_LED_STAT(n,v) ((v)>>((n)*4)) + +#define BCM8704_PMA_PMD_DEV_ADDR 1 +#define BCM8704_PCS_DEV_ADDR 2 +#define BCM8704_USER_DEV3_ADDR 3 +#define BCM8704_PHYXS_DEV_ADDR 4 +#define BCM8704_USER_DEV4_ADDR 4 + +#define BCM8704_PMD_RCV_SIGDET 0x000a +#define PMD_RCV_SIGDET_LANE3 0x0010 +#define PMD_RCV_SIGDET_LANE2 0x0008 +#define PMD_RCV_SIGDET_LANE1 0x0004 +#define PMD_RCV_SIGDET_LANE0 0x0002 +#define PMD_RCV_SIGDET_GLOBAL 0x0001 + +#define BCM8704_PCS_10G_R_STATUS 0x0020 +#define PCS_10G_R_STATUS_LINKSTAT 0x1000 +#define PCS_10G_R_STATUS_PRBS31_ABLE 0x0004 +#define PCS_10G_R_STATUS_HI_BER 0x0002 +#define PCS_10G_R_STATUS_BLK_LOCK 0x0001 + +#define BCM8704_USER_CONTROL 0xc800 +#define USER_CONTROL_OPTXENB_LVL 0x8000 +#define USER_CONTROL_OPTXRST_LVL 0x4000 +#define USER_CONTROL_OPBIASFLT_LVL 0x2000 +#define USER_CONTROL_OBTMPFLT_LVL 0x1000 +#define USER_CONTROL_OPPRFLT_LVL 0x0800 +#define USER_CONTROL_OPTXFLT_LVL 0x0400 +#define USER_CONTROL_OPRXLOS_LVL 0x0200 +#define USER_CONTROL_OPRXFLT_LVL 0x0100 +#define USER_CONTROL_OPTXON_LVL 0x0080 +#define USER_CONTROL_RES1 0x007f +#define USER_CONTROL_RES1_SHIFT 0 + +#define BCM8704_USER_ANALOG_CLK 0xc801 +#define BCM8704_USER_PMD_RX_CONTROL 0xc802 + +#define BCM8704_USER_PMD_TX_CONTROL 0xc803 +#define USER_PMD_TX_CTL_RES1 0xfe00 +#define USER_PMD_TX_CTL_XFP_CLKEN 0x0100 +#define USER_PMD_TX_CTL_TX_DAC_TXD 0x00c0 +#define USER_PMD_TX_CTL_TX_DAC_TXD_SH 6 +#define USER_PMD_TX_CTL_TX_DAC_TXCK 0x0030 +#define USER_PMD_TX_CTL_TX_DAC_TXCK_SH 4 +#define USER_PMD_TX_CTL_TSD_LPWREN 0x0008 +#define USER_PMD_TX_CTL_TSCK_LPWREN 0x0004 +#define USER_PMD_TX_CTL_CMU_LPWREN 0x0002 +#define USER_PMD_TX_CTL_SFIFORST 0x0001 + +#define BCM8704_USER_ANALOG_STATUS0 0xc804 +#define BCM8704_USER_OPT_DIGITAL_CTRL 0xc808 +#define BCM8704_USER_TX_ALARM_STATUS 0x9004 + +#define USER_ODIG_CTRL_FMODE 0x8000 +#define USER_ODIG_CTRL_TX_PDOWN 0x4000 +#define USER_ODIG_CTRL_RX_PDOWN 0x2000 +#define USER_ODIG_CTRL_EFILT_EN 0x1000 +#define USER_ODIG_CTRL_OPT_RST 0x0800 +#define USER_ODIG_CTRL_PCS_TIB 0x0400 +#define USER_ODIG_CTRL_PCS_RI 0x0200 +#define USER_ODIG_CTRL_RESV1 0x0180 +#define USER_ODIG_CTRL_GPIOS 0x0060 +#define USER_ODIG_CTRL_GPIOS_SHIFT 5 +#define USER_ODIG_CTRL_RESV2 0x0010 +#define USER_ODIG_CTRL_LB_ERR_DIS 0x0008 +#define USER_ODIG_CTRL_RESV3 0x0006 +#define USER_ODIG_CTRL_TXONOFF_PD_DIS 0x0001 + +#define BCM8704_PHYXS_XGXS_LANE_STAT 0x0018 +#define PHYXS_XGXS_LANE_STAT_ALINGED 0x1000 +#define PHYXS_XGXS_LANE_STAT_PATTEST 0x0800 +#define PHYXS_XGXS_LANE_STAT_MAGIC 0x0400 +#define PHYXS_XGXS_LANE_STAT_LANE3 0x0008 +#define PHYXS_XGXS_LANE_STAT_LANE2 0x0004 +#define PHYXS_XGXS_LANE_STAT_LANE1 0x0002 +#define PHYXS_XGXS_LANE_STAT_LANE0 0x0001 + +#define BCM5464R_AUX_CTL 24 +#define BCM5464R_AUX_CTL_EXT_LB 0x8000 +#define BCM5464R_AUX_CTL_EXT_PLEN 0x4000 +#define BCM5464R_AUX_CTL_ER1000 0x3000 +#define BCM5464R_AUX_CTL_ER1000_SHIFT 12 +#define BCM5464R_AUX_CTL_RESV1 0x0800 +#define BCM5464R_AUX_CTL_WRITE_1 0x0400 +#define BCM5464R_AUX_CTL_RESV2 0x0300 +#define BCM5464R_AUX_CTL_PRESP_DIS 0x0080 +#define BCM5464R_AUX_CTL_RESV3 0x0040 +#define BCM5464R_AUX_CTL_ER100 0x0030 +#define BCM5464R_AUX_CTL_ER100_SHIFT 4 +#define BCM5464R_AUX_CTL_DIAG_MODE 0x0008 +#define BCM5464R_AUX_CTL_SR_SEL 0x0007 +#define BCM5464R_AUX_CTL_SR_SEL_SHIFT 0 + +#define BCM5464R_CTRL1000_AS_MASTER 0x0800 +#define BCM5464R_CTRL1000_ENABLE_AS_MASTER 0x1000 + +#define RCR_ENTRY_MULTI 0x8000000000000000ULL +#define RCR_ENTRY_PKT_TYPE 0x6000000000000000ULL +#define RCR_ENTRY_PKT_TYPE_SHIFT 61 +#define RCR_ENTRY_ZERO_COPY 0x1000000000000000ULL +#define RCR_ENTRY_NOPORT 0x0800000000000000ULL +#define RCR_ENTRY_PROMISC 0x0400000000000000ULL +#define RCR_ENTRY_ERROR 0x0380000000000000ULL +#define RCR_ENTRY_DCF_ERR 0x0040000000000000ULL +#define RCR_ENTRY_L2_LEN 0x003fff0000000000ULL +#define RCR_ENTRY_L2_LEN_SHIFT 40 +#define RCR_ENTRY_PKTBUFSZ 0x000000c000000000ULL +#define RCR_ENTRY_PKTBUFSZ_SHIFT 38 +#define RCR_ENTRY_PKT_BUF_ADDR 0x0000003fffffffffULL /* bits 43:6 */ +#define RCR_ENTRY_PKT_BUF_ADDR_SHIFT 6 + +#define RCR_PKT_TYPE_OTHER 0x0 +#define RCR_PKT_TYPE_TCP 0x1 +#define RCR_PKT_TYPE_UDP 0x2 +#define RCR_PKT_TYPE_SCTP 0x3 + +#define NIU_RXPULL_MAX ETH_HLEN + +struct rx_pkt_hdr0 { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 inputport:2, + maccheck:1, + class:5; + u8 vlan:1, + llcsnap:1, + noport:1, + badip:1, + tcamhit:1, + tres:2, + tzfvld:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + u8 class:5, + maccheck:1, + inputport:2; + u8 tzfvld:1, + tres:2, + tcamhit:1, + badip:1, + noport:1, + llcsnap:1, + vlan:1; +#endif +}; + +struct rx_pkt_hdr1 { + u8 hwrsvd1; + u8 tcammatch; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 hwrsvd2:2, + hashit:1, + exact:1, + hzfvld:1, + hashsidx:3; +#elif defined(__BIG_ENDIAN_BITFIELD) + u8 hashsidx:3, + hzfvld:1, + exact:1, + hashit:1, + hwrsvd2:2; +#endif + u8 zcrsvd; + + /* Bits 11:8 of zero copy flow ID. */ +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 hwrsvd3:4, zflowid0:4; +#elif defined(__BIG_ENDIAN_BITFIELD) + u8 zflowid0:4, hwrsvd3:4; +#endif + + /* Bits 7:0 of zero copy flow ID. */ + u8 zflowid1; + + /* Bits 15:8 of hash value, H2. */ + u8 hashval2_0; + + /* Bits 7:0 of hash value, H2. */ + u8 hashval2_1; + + /* Bits 19:16 of hash value, H1. */ +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 hwrsvd4:4, hashval1_0:4; +#elif defined(__BIG_ENDIAN_BITFIELD) + u8 hashval1_0:4, hwrsvd4:4; +#endif + + /* Bits 15:8 of hash value, H1. */ + u8 hashval1_1; + + /* Bits 7:0 of hash value, H1. */ + u8 hashval1_2; + + u8 hwrsvd5; + u8 hwrsvd6; + + u8 usrdata_0; /* Bits 39:32 of user data. */ + u8 usrdata_1; /* Bits 31:24 of user data. */ + u8 usrdata_2; /* Bits 23:16 of user data. */ + u8 usrdata_3; /* Bits 15:8 of user data. */ + u8 usrdata_4; /* Bits 7:0 of user data. */ +}; + +struct tx_dma_mbox { + u64 tx_dma_pre_st; + u64 tx_cs; + u64 tx_ring_kick; + u64 tx_ring_hdl; + u64 resv1; + u32 tx_rng_err_logl; + u32 tx_rng_err_logh; + u64 resv2; + u64 resv3; +}; + +struct tx_pkt_hdr { + __le64 flags; +#define TXHDR_PAD 0x0000000000000007ULL +#define TXHDR_PAD_SHIFT 0 +#define TXHDR_LEN 0x000000003fff0000ULL +#define TXHDR_LEN_SHIFT 16 +#define TXHDR_L4STUFF 0x0000003f00000000ULL +#define TXHDR_L4STUFF_SHIFT 32 +#define TXHDR_L4START 0x00003f0000000000ULL +#define TXHDR_L4START_SHIFT 40 +#define TXHDR_L3START 0x000f000000000000ULL +#define TXHDR_L3START_SHIFT 48 +#define TXHDR_IHL 0x00f0000000000000ULL +#define TXHDR_IHL_SHIFT 52 +#define TXHDR_VLAN 0x0100000000000000ULL +#define TXHDR_LLC 0x0200000000000000ULL +#define TXHDR_IP_VER 0x2000000000000000ULL +#define TXHDR_CSUM_NONE 0x0000000000000000ULL +#define TXHDR_CSUM_TCP 0x4000000000000000ULL +#define TXHDR_CSUM_UDP 0x8000000000000000ULL +#define TXHDR_CSUM_SCTP 0xc000000000000000ULL + __le64 resv; +}; + +#define TX_DESC_SOP 0x8000000000000000ULL +#define TX_DESC_MARK 0x4000000000000000ULL +#define TX_DESC_NUM_PTR 0x3c00000000000000ULL +#define TX_DESC_NUM_PTR_SHIFT 58 +#define TX_DESC_TR_LEN 0x01fff00000000000ULL +#define TX_DESC_TR_LEN_SHIFT 44 +#define TX_DESC_SAD 0x00000fffffffffffULL +#define TX_DESC_SAD_SHIFT 0 + +struct tx_buff_info { + struct sk_buff *skb; + u64 mapping; +}; + +struct txdma_mailbox { + __le64 tx_dma_pre_st; + __le64 tx_cs; + __le64 tx_ring_kick; + __le64 tx_ring_hdl; + __le64 resv1; + __le32 tx_rng_err_logl; + __le32 tx_rng_err_logh; + __le64 resv2[2]; +} __attribute__((aligned(64))); + +#define MAX_TX_RING_SIZE 256 +#define MAX_TX_DESC_LEN 4076 + +struct tx_ring_info { + struct tx_buff_info tx_buffs[MAX_TX_RING_SIZE]; + struct niu *np; + u64 tx_cs; + int pending; + int prod; + int cons; + int wrap_bit; + u16 last_pkt_cnt; + u16 tx_channel; + u16 mark_counter; + u16 mark_freq; + u16 mark_pending; + u16 __pad; + struct txdma_mailbox *mbox; + __le64 *descr; + + u64 tx_packets; + u64 tx_bytes; + u64 tx_errors; + + u64 mbox_dma; + u64 descr_dma; + int max_burst; +}; + +#define NEXT_TX(tp, index) \ + (((index) + 1) < (tp)->pending ? ((index) + 1) : 0) + +static inline u32 niu_tx_avail(struct tx_ring_info *tp) +{ + return (tp->pending - + ((tp->prod - tp->cons) & (MAX_TX_RING_SIZE - 1))); +} + +struct rxdma_mailbox { + __le64 rx_dma_ctl_stat; + __le64 rbr_stat; + __le32 rbr_hdl; + __le32 rbr_hdh; + __le64 resv1; + __le32 rcrstat_c; + __le32 rcrstat_b; + __le64 rcrstat_a; + __le64 resv2[2]; +} __attribute__((aligned(64))); + +#define MAX_RBR_RING_SIZE 128 +#define MAX_RCR_RING_SIZE (MAX_RBR_RING_SIZE * 2) + +#define RBR_REFILL_MIN 16 + +#define RX_SKB_ALLOC_SIZE 128 + NET_IP_ALIGN + +struct rx_ring_info { + struct niu *np; + int rx_channel; + u16 rbr_block_size; + u16 rbr_blocks_per_page; + u16 rbr_sizes[4]; + unsigned int rcr_index; + unsigned int rcr_table_size; + unsigned int rbr_index; + unsigned int rbr_pending; + unsigned int rbr_refill_pending; + unsigned int rbr_kick_thresh; + unsigned int rbr_table_size; + struct page **rxhash; + struct rxdma_mailbox *mbox; + __le64 *rcr; + __le32 *rbr; +#define RBR_DESCR_ADDR_SHIFT 12 + + u64 rx_packets; + u64 rx_bytes; + u64 rx_dropped; + u64 rx_errors; + + u64 mbox_dma; + u64 rcr_dma; + u64 rbr_dma; + + /* WRED */ + int nonsyn_window; + int nonsyn_threshold; + int syn_window; + int syn_threshold; + + /* interrupt mitigation */ + int rcr_pkt_threshold; + int rcr_timeout; +}; + +#define NEXT_RCR(rp, index) \ + (((index) + 1) < (rp)->rcr_table_size ? ((index) + 1) : 0) +#define NEXT_RBR(rp, index) \ + (((index) + 1) < (rp)->rbr_table_size ? ((index) + 1) : 0) + +#define NIU_MAX_PORTS 4 +#define NIU_NUM_RXCHAN 16 +#define NIU_NUM_TXCHAN 24 +#define MAC_NUM_HASH 16 + +#define NIU_MAX_MTU 9216 + +/* VPD strings */ +#define NIU_QGC_LP_BM_STR "501-7606" +#define NIU_2XGF_LP_BM_STR "501-7283" +#define NIU_QGC_PEM_BM_STR "501-7765" +#define NIU_2XGF_PEM_BM_STR "501-7626" +#define NIU_ALONSO_BM_STR "373-0202" +#define NIU_FOXXY_BM_STR "501-7961" +#define NIU_2XGF_MRVL_BM_STR "SK-6E82" +#define NIU_QGC_LP_MDL_STR "SUNW,pcie-qgc" +#define NIU_2XGF_LP_MDL_STR "SUNW,pcie-2xgf" +#define NIU_QGC_PEM_MDL_STR "SUNW,pcie-qgc-pem" +#define NIU_2XGF_PEM_MDL_STR "SUNW,pcie-2xgf-pem" +#define NIU_ALONSO_MDL_STR "SUNW,CP3220" +#define NIU_KIMI_MDL_STR "SUNW,CP3260" +#define NIU_MARAMBA_MDL_STR "SUNW,pcie-neptune" +#define NIU_FOXXY_MDL_STR "SUNW,pcie-rfem" +#define NIU_2XGF_MRVL_MDL_STR "SysKonnect,pcie-2xgf" + +#define NIU_VPD_MIN_MAJOR 3 +#define NIU_VPD_MIN_MINOR 4 + +#define NIU_VPD_MODEL_MAX 32 +#define NIU_VPD_BD_MODEL_MAX 16 +#define NIU_VPD_VERSION_MAX 64 +#define NIU_VPD_PHY_TYPE_MAX 8 + +struct niu_vpd { + char model[NIU_VPD_MODEL_MAX]; + char board_model[NIU_VPD_BD_MODEL_MAX]; + char version[NIU_VPD_VERSION_MAX]; + char phy_type[NIU_VPD_PHY_TYPE_MAX]; + u8 mac_num; + u8 __pad; + u8 local_mac[6]; + int fcode_major; + int fcode_minor; +}; + +struct niu_altmac_rdc { + u8 alt_mac_num; + u8 rdc_num; + u8 mac_pref; +}; + +struct niu_vlan_rdc { + u8 rdc_num; + u8 vlan_pref; +}; + +struct niu_classifier { + struct niu_altmac_rdc alt_mac_mappings[16]; + struct niu_vlan_rdc vlan_mappings[ENET_VLAN_TBL_NUM_ENTRIES]; + + u16 tcam_top; + u16 tcam_sz; + u16 tcam_valid_entries; + u16 num_alt_mac_mappings; + + u32 h1_init; + u16 h2_init; +}; + +#define NIU_NUM_RDC_TABLES 8 +#define NIU_RDC_TABLE_SLOTS 16 + +struct rdc_table { + u8 rxdma_channel[NIU_RDC_TABLE_SLOTS]; +}; + +struct niu_rdc_tables { + struct rdc_table tables[NIU_NUM_RDC_TABLES]; + int first_table_num; + int num_tables; +}; + +#define PHY_TYPE_PMA_PMD 0 +#define PHY_TYPE_PCS 1 +#define PHY_TYPE_MII 2 +#define PHY_TYPE_MAX 3 + +struct phy_probe_info { + u32 phy_id[PHY_TYPE_MAX][NIU_MAX_PORTS]; + u8 phy_port[PHY_TYPE_MAX][NIU_MAX_PORTS]; + u8 cur[PHY_TYPE_MAX]; + + struct device_attribute phy_port_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS]; + struct device_attribute phy_type_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS]; + struct device_attribute phy_id_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS]; +}; + +struct niu_tcam_entry { + u8 valid; + u64 key[4]; + u64 key_mask[4]; + u64 assoc_data; +}; + +struct device_node; +union niu_parent_id { + struct { + int domain; + int bus; + int device; + } pci; + struct device_node *of; +}; + +struct niu; +struct niu_parent { + struct platform_device *plat_dev; + int index; + + union niu_parent_id id; + + struct niu *ports[NIU_MAX_PORTS]; + + atomic_t refcnt; + struct list_head list; + + spinlock_t lock; + + u32 flags; +#define PARENT_FLGS_CLS_HWINIT 0x00000001 + + u32 port_phy; +#define PORT_PHY_UNKNOWN 0x00000000 +#define PORT_PHY_INVALID 0xffffffff +#define PORT_TYPE_10G 0x01 +#define PORT_TYPE_1G 0x02 +#define PORT_TYPE_MASK 0x03 + + u8 rxchan_per_port[NIU_MAX_PORTS]; + u8 txchan_per_port[NIU_MAX_PORTS]; + + struct niu_rdc_tables rdc_group_cfg[NIU_MAX_PORTS]; + u8 rdc_default[NIU_MAX_PORTS]; + + u8 ldg_map[LDN_MAX + 1]; + + u8 plat_type; +#define PLAT_TYPE_INVALID 0x00 +#define PLAT_TYPE_ATLAS 0x01 +#define PLAT_TYPE_NIU 0x02 +#define PLAT_TYPE_VF_P0 0x03 +#define PLAT_TYPE_VF_P1 0x04 +#define PLAT_TYPE_ATCA_CP3220 0x08 + + u8 num_ports; + + u16 tcam_num_entries; +#define NIU_PCI_TCAM_ENTRIES 256 +#define NIU_NONPCI_TCAM_ENTRIES 128 +#define NIU_TCAM_ENTRIES_MAX 256 + + int rxdma_clock_divider; + + struct phy_probe_info phy_probe_info; + + struct niu_tcam_entry tcam[NIU_TCAM_ENTRIES_MAX]; + +#define NIU_L2_PROG_CLS 2 +#define NIU_L3_PROG_CLS 4 + u64 l2_cls[NIU_L2_PROG_CLS]; + u64 l3_cls[NIU_L3_PROG_CLS]; + u64 tcam_key[12]; + u64 flow_key[12]; + u16 l3_cls_refcnt[NIU_L3_PROG_CLS]; + u8 l3_cls_pid[NIU_L3_PROG_CLS]; +}; + +struct niu_ops { + void *(*alloc_coherent)(struct device *dev, size_t size, + u64 *handle, gfp_t flag); + void (*free_coherent)(struct device *dev, size_t size, + void *cpu_addr, u64 handle); + u64 (*map_page)(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction); + void (*unmap_page)(struct device *dev, u64 dma_address, + size_t size, enum dma_data_direction direction); + u64 (*map_single)(struct device *dev, void *cpu_addr, + size_t size, + enum dma_data_direction direction); + void (*unmap_single)(struct device *dev, u64 dma_address, + size_t size, enum dma_data_direction direction); +}; + +struct niu_link_config { + u32 supported; + + /* Describes what we're trying to get. */ + u32 advertising; + u16 speed; + u8 duplex; + u8 autoneg; + + /* Describes what we actually have. */ + u32 active_advertising; + u16 active_speed; + u8 active_duplex; + u8 active_autoneg; +#define SPEED_INVALID 0xffff +#define DUPLEX_INVALID 0xff +#define AUTONEG_INVALID 0xff + + u8 loopback_mode; +#define LOOPBACK_DISABLED 0x00 +#define LOOPBACK_PHY 0x01 +#define LOOPBACK_MAC 0x02 +}; + +struct niu_ldg { + struct napi_struct napi; + struct niu *np; + u8 ldg_num; + u8 timer; + u64 v0, v1, v2; + unsigned int irq; +}; + +struct niu_xmac_stats { + u64 tx_frames; + u64 tx_bytes; + u64 tx_fifo_errors; + u64 tx_overflow_errors; + u64 tx_max_pkt_size_errors; + u64 tx_underflow_errors; + + u64 rx_local_faults; + u64 rx_remote_faults; + u64 rx_link_faults; + u64 rx_align_errors; + u64 rx_frags; + u64 rx_mcasts; + u64 rx_bcasts; + u64 rx_hist_cnt1; + u64 rx_hist_cnt2; + u64 rx_hist_cnt3; + u64 rx_hist_cnt4; + u64 rx_hist_cnt5; + u64 rx_hist_cnt6; + u64 rx_hist_cnt7; + u64 rx_octets; + u64 rx_code_violations; + u64 rx_len_errors; + u64 rx_crc_errors; + u64 rx_underflows; + u64 rx_overflows; + + u64 pause_off_state; + u64 pause_on_state; + u64 pause_received; +}; + +struct niu_bmac_stats { + u64 tx_underflow_errors; + u64 tx_max_pkt_size_errors; + u64 tx_bytes; + u64 tx_frames; + + u64 rx_overflows; + u64 rx_frames; + u64 rx_align_errors; + u64 rx_crc_errors; + u64 rx_len_errors; + + u64 pause_off_state; + u64 pause_on_state; + u64 pause_received; +}; + +union niu_mac_stats { + struct niu_xmac_stats xmac; + struct niu_bmac_stats bmac; +}; + +struct niu_phy_ops { + int (*serdes_init)(struct niu *np); + int (*xcvr_init)(struct niu *np); + int (*link_status)(struct niu *np, int *); +}; + +struct platform_device; +struct niu { + void __iomem *regs; + struct net_device *dev; + struct pci_dev *pdev; + struct device *device; + struct niu_parent *parent; + + u32 flags; +#define NIU_FLAGS_HOTPLUG_PHY_PRESENT 0x02000000 /* Removeable PHY detected*/ +#define NIU_FLAGS_HOTPLUG_PHY 0x01000000 /* Removeable PHY */ +#define NIU_FLAGS_VPD_VALID 0x00800000 /* VPD has valid version */ +#define NIU_FLAGS_MSIX 0x00400000 /* MSI-X in use */ +#define NIU_FLAGS_MCAST 0x00200000 /* multicast filter enabled */ +#define NIU_FLAGS_PROMISC 0x00100000 /* PROMISC enabled */ +#define NIU_FLAGS_XCVR_SERDES 0x00080000 /* 0=PHY 1=SERDES */ +#define NIU_FLAGS_10G 0x00040000 /* 0=1G 1=10G */ +#define NIU_FLAGS_FIBER 0x00020000 /* 0=COPPER 1=FIBER */ +#define NIU_FLAGS_XMAC 0x00010000 /* 0=BMAC 1=XMAC */ + + u32 msg_enable; + char irq_name[NIU_NUM_RXCHAN+NIU_NUM_TXCHAN+3][IFNAMSIZ + 6]; + + /* Protects hw programming, and ring state. */ + spinlock_t lock; + + const struct niu_ops *ops; + union niu_mac_stats mac_stats; + + struct rx_ring_info *rx_rings; + struct tx_ring_info *tx_rings; + int num_rx_rings; + int num_tx_rings; + + struct niu_ldg ldg[NIU_NUM_LDG]; + int num_ldg; + + void __iomem *mac_regs; + unsigned long ipp_off; + unsigned long pcs_off; + unsigned long xpcs_off; + + struct timer_list timer; + u64 orig_led_state; + const struct niu_phy_ops *phy_ops; + int phy_addr; + + struct niu_link_config link_config; + + struct work_struct reset_task; + + u8 port; + u8 mac_xcvr; +#define MAC_XCVR_MII 1 +#define MAC_XCVR_PCS 2 +#define MAC_XCVR_XPCS 3 + + struct niu_classifier clas; + + struct niu_vpd vpd; + u32 eeprom_len; + + struct platform_device *op; + void __iomem *vir_regs_1; + void __iomem *vir_regs_2; +}; + +#endif /* _NIU_H */ diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c new file mode 100644 index 000000000..aa4f9d2d8 --- /dev/null +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -0,0 +1,1282 @@ +/* sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters. + * + * Copyright (C) 1997, 1998, 1999, 2003, 2008 David S. Miller (davem@davemloft.net) + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sunbmac.h" + +#define DRV_NAME "sunbmac" +#define DRV_VERSION "2.1" +#define DRV_RELDATE "August 26, 2008" +#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" + +static char version[] = + DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; + +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_AUTHOR); +MODULE_DESCRIPTION("Sun BigMAC 100baseT ethernet driver"); +MODULE_LICENSE("GPL"); + +#undef DEBUG_PROBE +#undef DEBUG_TX +#undef DEBUG_IRQ + +#ifdef DEBUG_PROBE +#define DP(x) printk x +#else +#define DP(x) +#endif + +#ifdef DEBUG_TX +#define DTX(x) printk x +#else +#define DTX(x) +#endif + +#ifdef DEBUG_IRQ +#define DIRQ(x) printk x +#else +#define DIRQ(x) +#endif + +#define DEFAULT_JAMSIZE 4 /* Toe jam */ + +#define QEC_RESET_TRIES 200 + +static int qec_global_reset(void __iomem *gregs) +{ + int tries = QEC_RESET_TRIES; + + sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); + while (--tries) { + if (sbus_readl(gregs + GLOB_CTRL) & GLOB_CTRL_RESET) { + udelay(20); + continue; + } + break; + } + if (tries) + return 0; + printk(KERN_ERR "BigMAC: Cannot reset the QEC.\n"); + return -1; +} + +static void qec_init(struct bigmac *bp) +{ + struct platform_device *qec_op = bp->qec_op; + void __iomem *gregs = bp->gregs; + u8 bsizes = bp->bigmac_bursts; + u32 regval; + + /* 64byte bursts do not work at the moment, do + * not even try to enable them. -DaveM + */ + if (bsizes & DMA_BURST32) + regval = GLOB_CTRL_B32; + else + regval = GLOB_CTRL_B16; + sbus_writel(regval | GLOB_CTRL_BMODE, gregs + GLOB_CTRL); + sbus_writel(GLOB_PSIZE_2048, gregs + GLOB_PSIZE); + + /* All of memsize is given to bigmac. */ + sbus_writel(resource_size(&qec_op->resource[1]), + gregs + GLOB_MSIZE); + + /* Half to the transmitter, half to the receiver. */ + sbus_writel(resource_size(&qec_op->resource[1]) >> 1, + gregs + GLOB_TSIZE); + sbus_writel(resource_size(&qec_op->resource[1]) >> 1, + gregs + GLOB_RSIZE); +} + +#define TX_RESET_TRIES 32 +#define RX_RESET_TRIES 32 + +static void bigmac_tx_reset(void __iomem *bregs) +{ + int tries = TX_RESET_TRIES; + + sbus_writel(0, bregs + BMAC_TXCFG); + + /* The fifo threshold bit is read-only and does + * not clear. -DaveM + */ + while ((sbus_readl(bregs + BMAC_TXCFG) & ~(BIGMAC_TXCFG_FIFO)) != 0 && + --tries != 0) + udelay(20); + + if (!tries) { + printk(KERN_ERR "BIGMAC: Transmitter will not reset.\n"); + printk(KERN_ERR "BIGMAC: tx_cfg is %08x\n", + sbus_readl(bregs + BMAC_TXCFG)); + } +} + +static void bigmac_rx_reset(void __iomem *bregs) +{ + int tries = RX_RESET_TRIES; + + sbus_writel(0, bregs + BMAC_RXCFG); + while (sbus_readl(bregs + BMAC_RXCFG) && --tries) + udelay(20); + + if (!tries) { + printk(KERN_ERR "BIGMAC: Receiver will not reset.\n"); + printk(KERN_ERR "BIGMAC: rx_cfg is %08x\n", + sbus_readl(bregs + BMAC_RXCFG)); + } +} + +/* Reset the transmitter and receiver. */ +static void bigmac_stop(struct bigmac *bp) +{ + bigmac_tx_reset(bp->bregs); + bigmac_rx_reset(bp->bregs); +} + +static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs) +{ + struct net_device_stats *stats = &bp->enet_stats; + + stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR); + sbus_writel(0, bregs + BMAC_RCRCECTR); + + stats->rx_frame_errors += sbus_readl(bregs + BMAC_UNALECTR); + sbus_writel(0, bregs + BMAC_UNALECTR); + + stats->rx_length_errors += sbus_readl(bregs + BMAC_GLECTR); + sbus_writel(0, bregs + BMAC_GLECTR); + + stats->tx_aborted_errors += sbus_readl(bregs + BMAC_EXCTR); + + stats->collisions += + (sbus_readl(bregs + BMAC_EXCTR) + + sbus_readl(bregs + BMAC_LTCTR)); + sbus_writel(0, bregs + BMAC_EXCTR); + sbus_writel(0, bregs + BMAC_LTCTR); +} + +static void bigmac_clean_rings(struct bigmac *bp) +{ + int i; + + for (i = 0; i < RX_RING_SIZE; i++) { + if (bp->rx_skbs[i] != NULL) { + dev_kfree_skb_any(bp->rx_skbs[i]); + bp->rx_skbs[i] = NULL; + } + } + + for (i = 0; i < TX_RING_SIZE; i++) { + if (bp->tx_skbs[i] != NULL) { + dev_kfree_skb_any(bp->tx_skbs[i]); + bp->tx_skbs[i] = NULL; + } + } +} + +static void bigmac_init_rings(struct bigmac *bp, int from_irq) +{ + struct bmac_init_block *bb = bp->bmac_block; + int i; + gfp_t gfp_flags = GFP_KERNEL; + + if (from_irq || in_interrupt()) + gfp_flags = GFP_ATOMIC; + + bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0; + + /* Free any skippy bufs left around in the rings. */ + bigmac_clean_rings(bp); + + /* Now get new skbufs for the receive ring. */ + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb; + + skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags); + if (!skb) + continue; + + bp->rx_skbs[i] = skb; + + /* Because we reserve afterwards. */ + skb_put(skb, ETH_FRAME_LEN); + skb_reserve(skb, 34); + + bb->be_rxd[i].rx_addr = + dma_map_single(&bp->bigmac_op->dev, + skb->data, + RX_BUF_ALLOC_SIZE - 34, + DMA_FROM_DEVICE); + bb->be_rxd[i].rx_flags = + (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); + } + + for (i = 0; i < TX_RING_SIZE; i++) + bb->be_txd[i].tx_flags = bb->be_txd[i].tx_addr = 0; +} + +#define MGMT_CLKON (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB|MGMT_PAL_DCLOCK) +#define MGMT_CLKOFF (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB) + +static void idle_transceiver(void __iomem *tregs) +{ + int i = 20; + + while (i--) { + sbus_writel(MGMT_CLKOFF, tregs + TCVR_MPAL); + sbus_readl(tregs + TCVR_MPAL); + sbus_writel(MGMT_CLKON, tregs + TCVR_MPAL); + sbus_readl(tregs + TCVR_MPAL); + } +} + +static void write_tcvr_bit(struct bigmac *bp, void __iomem *tregs, int bit) +{ + if (bp->tcvr_type == internal) { + bit = (bit & 1) << 3; + sbus_writel(bit | (MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO), + tregs + TCVR_MPAL); + sbus_readl(tregs + TCVR_MPAL); + sbus_writel(bit | MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, + tregs + TCVR_MPAL); + sbus_readl(tregs + TCVR_MPAL); + } else if (bp->tcvr_type == external) { + bit = (bit & 1) << 2; + sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB, + tregs + TCVR_MPAL); + sbus_readl(tregs + TCVR_MPAL); + sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB | MGMT_PAL_DCLOCK, + tregs + TCVR_MPAL); + sbus_readl(tregs + TCVR_MPAL); + } else { + printk(KERN_ERR "write_tcvr_bit: No transceiver type known!\n"); + } +} + +static int read_tcvr_bit(struct bigmac *bp, void __iomem *tregs) +{ + int retval = 0; + + if (bp->tcvr_type == internal) { + sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); + sbus_readl(tregs + TCVR_MPAL); + sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, + tregs + TCVR_MPAL); + sbus_readl(tregs + TCVR_MPAL); + retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3; + } else if (bp->tcvr_type == external) { + sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL); + sbus_readl(tregs + TCVR_MPAL); + sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); + sbus_readl(tregs + TCVR_MPAL); + retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2; + } else { + printk(KERN_ERR "read_tcvr_bit: No transceiver type known!\n"); + } + return retval; +} + +static int read_tcvr_bit2(struct bigmac *bp, void __iomem *tregs) +{ + int retval = 0; + + if (bp->tcvr_type == internal) { + sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); + sbus_readl(tregs + TCVR_MPAL); + retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3; + sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); + sbus_readl(tregs + TCVR_MPAL); + } else if (bp->tcvr_type == external) { + sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL); + sbus_readl(tregs + TCVR_MPAL); + retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2; + sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); + sbus_readl(tregs + TCVR_MPAL); + } else { + printk(KERN_ERR "read_tcvr_bit2: No transceiver type known!\n"); + } + return retval; +} + +static void put_tcvr_byte(struct bigmac *bp, + void __iomem *tregs, + unsigned int byte) +{ + int shift = 4; + + do { + write_tcvr_bit(bp, tregs, ((byte >> shift) & 1)); + shift -= 1; + } while (shift >= 0); +} + +static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs, + int reg, unsigned short val) +{ + int shift; + + reg &= 0xff; + val &= 0xffff; + switch(bp->tcvr_type) { + case internal: + case external: + break; + + default: + printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n"); + return; + } + + idle_transceiver(tregs); + write_tcvr_bit(bp, tregs, 0); + write_tcvr_bit(bp, tregs, 1); + write_tcvr_bit(bp, tregs, 0); + write_tcvr_bit(bp, tregs, 1); + + put_tcvr_byte(bp, tregs, + ((bp->tcvr_type == internal) ? + BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL)); + + put_tcvr_byte(bp, tregs, reg); + + write_tcvr_bit(bp, tregs, 1); + write_tcvr_bit(bp, tregs, 0); + + shift = 15; + do { + write_tcvr_bit(bp, tregs, (val >> shift) & 1); + shift -= 1; + } while (shift >= 0); +} + +static unsigned short bigmac_tcvr_read(struct bigmac *bp, + void __iomem *tregs, + int reg) +{ + unsigned short retval = 0; + + reg &= 0xff; + switch(bp->tcvr_type) { + case internal: + case external: + break; + + default: + printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n"); + return 0xffff; + } + + idle_transceiver(tregs); + write_tcvr_bit(bp, tregs, 0); + write_tcvr_bit(bp, tregs, 1); + write_tcvr_bit(bp, tregs, 1); + write_tcvr_bit(bp, tregs, 0); + + put_tcvr_byte(bp, tregs, + ((bp->tcvr_type == internal) ? + BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL)); + + put_tcvr_byte(bp, tregs, reg); + + if (bp->tcvr_type == external) { + int shift = 15; + + (void) read_tcvr_bit2(bp, tregs); + (void) read_tcvr_bit2(bp, tregs); + + do { + int tmp; + + tmp = read_tcvr_bit2(bp, tregs); + retval |= ((tmp & 1) << shift); + shift -= 1; + } while (shift >= 0); + + (void) read_tcvr_bit2(bp, tregs); + (void) read_tcvr_bit2(bp, tregs); + (void) read_tcvr_bit2(bp, tregs); + } else { + int shift = 15; + + (void) read_tcvr_bit(bp, tregs); + (void) read_tcvr_bit(bp, tregs); + + do { + int tmp; + + tmp = read_tcvr_bit(bp, tregs); + retval |= ((tmp & 1) << shift); + shift -= 1; + } while (shift >= 0); + + (void) read_tcvr_bit(bp, tregs); + (void) read_tcvr_bit(bp, tregs); + (void) read_tcvr_bit(bp, tregs); + } + return retval; +} + +static void bigmac_tcvr_init(struct bigmac *bp) +{ + void __iomem *tregs = bp->tregs; + u32 mpal; + + idle_transceiver(tregs); + sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, + tregs + TCVR_MPAL); + sbus_readl(tregs + TCVR_MPAL); + + /* Only the bit for the present transceiver (internal or + * external) will stick, set them both and see what stays. + */ + sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); + sbus_readl(tregs + TCVR_MPAL); + udelay(20); + + mpal = sbus_readl(tregs + TCVR_MPAL); + if (mpal & MGMT_PAL_EXT_MDIO) { + bp->tcvr_type = external; + sbus_writel(~(TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE), + tregs + TCVR_TPAL); + sbus_readl(tregs + TCVR_TPAL); + } else if (mpal & MGMT_PAL_INT_MDIO) { + bp->tcvr_type = internal; + sbus_writel(~(TCVR_PAL_SERIAL | TCVR_PAL_EXTLBACK | + TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE), + tregs + TCVR_TPAL); + sbus_readl(tregs + TCVR_TPAL); + } else { + printk(KERN_ERR "BIGMAC: AIEEE, neither internal nor " + "external MDIO available!\n"); + printk(KERN_ERR "BIGMAC: mgmt_pal[%08x] tcvr_pal[%08x]\n", + sbus_readl(tregs + TCVR_MPAL), + sbus_readl(tregs + TCVR_TPAL)); + } +} + +static int bigmac_init_hw(struct bigmac *, int); + +static int try_next_permutation(struct bigmac *bp, void __iomem *tregs) +{ + if (bp->sw_bmcr & BMCR_SPEED100) { + int timeout; + + /* Reset the PHY. */ + bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); + bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); + bp->sw_bmcr = (BMCR_RESET); + bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); + + timeout = 64; + while (--timeout) { + bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); + if ((bp->sw_bmcr & BMCR_RESET) == 0) + break; + udelay(20); + } + if (timeout == 0) + printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); + + bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); + + /* Now we try 10baseT. */ + bp->sw_bmcr &= ~(BMCR_SPEED100); + bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); + return 0; + } + + /* We've tried them all. */ + return -1; +} + +static void bigmac_timer(unsigned long data) +{ + struct bigmac *bp = (struct bigmac *) data; + void __iomem *tregs = bp->tregs; + int restart_timer = 0; + + bp->timer_ticks++; + if (bp->timer_state == ltrywait) { + bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR); + bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); + if (bp->sw_bmsr & BMSR_LSTATUS) { + printk(KERN_INFO "%s: Link is now up at %s.\n", + bp->dev->name, + (bp->sw_bmcr & BMCR_SPEED100) ? + "100baseT" : "10baseT"); + bp->timer_state = asleep; + restart_timer = 0; + } else { + if (bp->timer_ticks >= 4) { + int ret; + + ret = try_next_permutation(bp, tregs); + if (ret == -1) { + printk(KERN_ERR "%s: Link down, cable problem?\n", + bp->dev->name); + ret = bigmac_init_hw(bp, 0); + if (ret) { + printk(KERN_ERR "%s: Error, cannot re-init the " + "BigMAC.\n", bp->dev->name); + } + return; + } + bp->timer_ticks = 0; + restart_timer = 1; + } else { + restart_timer = 1; + } + } + } else { + /* Can't happens.... */ + printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n", + bp->dev->name); + restart_timer = 0; + bp->timer_ticks = 0; + bp->timer_state = asleep; /* foo on you */ + } + + if (restart_timer != 0) { + bp->bigmac_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */ + add_timer(&bp->bigmac_timer); + } +} + +/* Well, really we just force the chip into 100baseT then + * 10baseT, each time checking for a link status. + */ +static void bigmac_begin_auto_negotiation(struct bigmac *bp) +{ + void __iomem *tregs = bp->tregs; + int timeout; + + /* Grab new software copies of PHY registers. */ + bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR); + bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); + + /* Reset the PHY. */ + bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); + bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); + bp->sw_bmcr = (BMCR_RESET); + bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); + + timeout = 64; + while (--timeout) { + bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); + if ((bp->sw_bmcr & BMCR_RESET) == 0) + break; + udelay(20); + } + if (timeout == 0) + printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); + + bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR); + + /* First we try 100baseT. */ + bp->sw_bmcr |= BMCR_SPEED100; + bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr); + + bp->timer_state = ltrywait; + bp->timer_ticks = 0; + bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10; + bp->bigmac_timer.data = (unsigned long) bp; + bp->bigmac_timer.function = bigmac_timer; + add_timer(&bp->bigmac_timer); +} + +static int bigmac_init_hw(struct bigmac *bp, int from_irq) +{ + void __iomem *gregs = bp->gregs; + void __iomem *cregs = bp->creg; + void __iomem *bregs = bp->bregs; + unsigned char *e = &bp->dev->dev_addr[0]; + + /* Latch current counters into statistics. */ + bigmac_get_counters(bp, bregs); + + /* Reset QEC. */ + qec_global_reset(gregs); + + /* Init QEC. */ + qec_init(bp); + + /* Alloc and reset the tx/rx descriptor chains. */ + bigmac_init_rings(bp, from_irq); + + /* Initialize the PHY. */ + bigmac_tcvr_init(bp); + + /* Stop transmitter and receiver. */ + bigmac_stop(bp); + + /* Set hardware ethernet address. */ + sbus_writel(((e[4] << 8) | e[5]), bregs + BMAC_MACADDR2); + sbus_writel(((e[2] << 8) | e[3]), bregs + BMAC_MACADDR1); + sbus_writel(((e[0] << 8) | e[1]), bregs + BMAC_MACADDR0); + + /* Clear the hash table until mc upload occurs. */ + sbus_writel(0, bregs + BMAC_HTABLE3); + sbus_writel(0, bregs + BMAC_HTABLE2); + sbus_writel(0, bregs + BMAC_HTABLE1); + sbus_writel(0, bregs + BMAC_HTABLE0); + + /* Enable Big Mac hash table filter. */ + sbus_writel(BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_FIFO, + bregs + BMAC_RXCFG); + udelay(20); + + /* Ok, configure the Big Mac transmitter. */ + sbus_writel(BIGMAC_TXCFG_FIFO, bregs + BMAC_TXCFG); + + /* The HME docs recommend to use the 10LSB of our MAC here. */ + sbus_writel(((e[5] | e[4] << 8) & 0x3ff), + bregs + BMAC_RSEED); + + /* Enable the output drivers no matter what. */ + sbus_writel(BIGMAC_XCFG_ODENABLE | BIGMAC_XCFG_RESV, + bregs + BMAC_XIFCFG); + + /* Tell the QEC where the ring descriptors are. */ + sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0), + cregs + CREG_RXDS); + sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0), + cregs + CREG_TXDS); + + /* Setup the FIFO pointers into QEC local memory. */ + sbus_writel(0, cregs + CREG_RXRBUFPTR); + sbus_writel(0, cregs + CREG_RXWBUFPTR); + sbus_writel(sbus_readl(gregs + GLOB_RSIZE), + cregs + CREG_TXRBUFPTR); + sbus_writel(sbus_readl(gregs + GLOB_RSIZE), + cregs + CREG_TXWBUFPTR); + + /* Tell bigmac what interrupts we don't want to hear about. */ + sbus_writel(BIGMAC_IMASK_GOTFRAME | BIGMAC_IMASK_SENTFRAME, + bregs + BMAC_IMASK); + + /* Enable the various other irq's. */ + sbus_writel(0, cregs + CREG_RIMASK); + sbus_writel(0, cregs + CREG_TIMASK); + sbus_writel(0, cregs + CREG_QMASK); + sbus_writel(0, cregs + CREG_BMASK); + + /* Set jam size to a reasonable default. */ + sbus_writel(DEFAULT_JAMSIZE, bregs + BMAC_JSIZE); + + /* Clear collision counter. */ + sbus_writel(0, cregs + CREG_CCNT); + + /* Enable transmitter and receiver. */ + sbus_writel(sbus_readl(bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE, + bregs + BMAC_TXCFG); + sbus_writel(sbus_readl(bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE, + bregs + BMAC_RXCFG); + + /* Ok, start detecting link speed/duplex. */ + bigmac_begin_auto_negotiation(bp); + + /* Success. */ + return 0; +} + +/* Error interrupts get sent here. */ +static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_status) +{ + printk(KERN_ERR "bigmac_is_medium_rare: "); + if (qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) { + if (qec_status & GLOB_STAT_ER) + printk("QEC_ERROR, "); + if (qec_status & GLOB_STAT_BM) + printk("QEC_BMAC_ERROR, "); + } + if (bmac_status & CREG_STAT_ERRORS) { + if (bmac_status & CREG_STAT_BERROR) + printk("BMAC_ERROR, "); + if (bmac_status & CREG_STAT_TXDERROR) + printk("TXD_ERROR, "); + if (bmac_status & CREG_STAT_TXLERR) + printk("TX_LATE_ERROR, "); + if (bmac_status & CREG_STAT_TXPERR) + printk("TX_PARITY_ERROR, "); + if (bmac_status & CREG_STAT_TXSERR) + printk("TX_SBUS_ERROR, "); + + if (bmac_status & CREG_STAT_RXDROP) + printk("RX_DROP_ERROR, "); + + if (bmac_status & CREG_STAT_RXSMALL) + printk("RX_SMALL_ERROR, "); + if (bmac_status & CREG_STAT_RXLERR) + printk("RX_LATE_ERROR, "); + if (bmac_status & CREG_STAT_RXPERR) + printk("RX_PARITY_ERROR, "); + if (bmac_status & CREG_STAT_RXSERR) + printk("RX_SBUS_ERROR, "); + } + + printk(" RESET\n"); + bigmac_init_hw(bp, 1); +} + +/* BigMAC transmit complete service routines. */ +static void bigmac_tx(struct bigmac *bp) +{ + struct be_txd *txbase = &bp->bmac_block->be_txd[0]; + struct net_device *dev = bp->dev; + int elem; + + spin_lock(&bp->lock); + + elem = bp->tx_old; + DTX(("bigmac_tx: tx_old[%d] ", elem)); + while (elem != bp->tx_new) { + struct sk_buff *skb; + struct be_txd *this = &txbase[elem]; + + DTX(("this(%p) [flags(%08x)addr(%08x)]", + this, this->tx_flags, this->tx_addr)); + + if (this->tx_flags & TXD_OWN) + break; + skb = bp->tx_skbs[elem]; + bp->enet_stats.tx_packets++; + bp->enet_stats.tx_bytes += skb->len; + dma_unmap_single(&bp->bigmac_op->dev, + this->tx_addr, skb->len, + DMA_TO_DEVICE); + + DTX(("skb(%p) ", skb)); + bp->tx_skbs[elem] = NULL; + dev_kfree_skb_irq(skb); + + elem = NEXT_TX(elem); + } + DTX((" DONE, tx_old=%d\n", elem)); + bp->tx_old = elem; + + if (netif_queue_stopped(dev) && + TX_BUFFS_AVAIL(bp) > 0) + netif_wake_queue(bp->dev); + + spin_unlock(&bp->lock); +} + +/* BigMAC receive complete service routines. */ +static void bigmac_rx(struct bigmac *bp) +{ + struct be_rxd *rxbase = &bp->bmac_block->be_rxd[0]; + struct be_rxd *this; + int elem = bp->rx_new, drops = 0; + u32 flags; + + this = &rxbase[elem]; + while (!((flags = this->rx_flags) & RXD_OWN)) { + struct sk_buff *skb; + int len = (flags & RXD_LENGTH); /* FCS not included */ + + /* Check for errors. */ + if (len < ETH_ZLEN) { + bp->enet_stats.rx_errors++; + bp->enet_stats.rx_length_errors++; + + drop_it: + /* Return it to the BigMAC. */ + bp->enet_stats.rx_dropped++; + this->rx_flags = + (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); + goto next; + } + skb = bp->rx_skbs[elem]; + if (len > RX_COPY_THRESHOLD) { + struct sk_buff *new_skb; + + /* Now refill the entry, if we can. */ + new_skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); + if (new_skb == NULL) { + drops++; + goto drop_it; + } + dma_unmap_single(&bp->bigmac_op->dev, + this->rx_addr, + RX_BUF_ALLOC_SIZE - 34, + DMA_FROM_DEVICE); + bp->rx_skbs[elem] = new_skb; + skb_put(new_skb, ETH_FRAME_LEN); + skb_reserve(new_skb, 34); + this->rx_addr = + dma_map_single(&bp->bigmac_op->dev, + new_skb->data, + RX_BUF_ALLOC_SIZE - 34, + DMA_FROM_DEVICE); + this->rx_flags = + (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); + + /* Trim the original skb for the netif. */ + skb_trim(skb, len); + } else { + struct sk_buff *copy_skb = netdev_alloc_skb(bp->dev, len + 2); + + if (copy_skb == NULL) { + drops++; + goto drop_it; + } + skb_reserve(copy_skb, 2); + skb_put(copy_skb, len); + dma_sync_single_for_cpu(&bp->bigmac_op->dev, + this->rx_addr, len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len); + dma_sync_single_for_device(&bp->bigmac_op->dev, + this->rx_addr, len, + DMA_FROM_DEVICE); + + /* Reuse original ring buffer. */ + this->rx_flags = + (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); + + skb = copy_skb; + } + + /* No checksums done by the BigMAC ;-( */ + skb->protocol = eth_type_trans(skb, bp->dev); + netif_rx(skb); + bp->enet_stats.rx_packets++; + bp->enet_stats.rx_bytes += len; + next: + elem = NEXT_RX(elem); + this = &rxbase[elem]; + } + bp->rx_new = elem; + if (drops) + printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", bp->dev->name); +} + +static irqreturn_t bigmac_interrupt(int irq, void *dev_id) +{ + struct bigmac *bp = (struct bigmac *) dev_id; + u32 qec_status, bmac_status; + + DIRQ(("bigmac_interrupt: ")); + + /* Latch status registers now. */ + bmac_status = sbus_readl(bp->creg + CREG_STAT); + qec_status = sbus_readl(bp->gregs + GLOB_STAT); + + DIRQ(("qec_status=%08x bmac_status=%08x\n", qec_status, bmac_status)); + if ((qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) || + (bmac_status & CREG_STAT_ERRORS)) + bigmac_is_medium_rare(bp, qec_status, bmac_status); + + if (bmac_status & CREG_STAT_TXIRQ) + bigmac_tx(bp); + + if (bmac_status & CREG_STAT_RXIRQ) + bigmac_rx(bp); + + return IRQ_HANDLED; +} + +static int bigmac_open(struct net_device *dev) +{ + struct bigmac *bp = netdev_priv(dev); + int ret; + + ret = request_irq(dev->irq, bigmac_interrupt, IRQF_SHARED, dev->name, bp); + if (ret) { + printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq); + return ret; + } + init_timer(&bp->bigmac_timer); + ret = bigmac_init_hw(bp, 0); + if (ret) + free_irq(dev->irq, bp); + return ret; +} + +static int bigmac_close(struct net_device *dev) +{ + struct bigmac *bp = netdev_priv(dev); + + del_timer(&bp->bigmac_timer); + bp->timer_state = asleep; + bp->timer_ticks = 0; + + bigmac_stop(bp); + bigmac_clean_rings(bp); + free_irq(dev->irq, bp); + return 0; +} + +static void bigmac_tx_timeout(struct net_device *dev) +{ + struct bigmac *bp = netdev_priv(dev); + + bigmac_init_hw(bp, 0); + netif_wake_queue(dev); +} + +/* Put a packet on the wire. */ +static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bigmac *bp = netdev_priv(dev); + int len, entry; + u32 mapping; + + len = skb->len; + mapping = dma_map_single(&bp->bigmac_op->dev, skb->data, + len, DMA_TO_DEVICE); + + /* Avoid a race... */ + spin_lock_irq(&bp->lock); + entry = bp->tx_new; + DTX(("bigmac_start_xmit: len(%d) entry(%d)\n", len, entry)); + bp->bmac_block->be_txd[entry].tx_flags = TXD_UPDATE; + bp->tx_skbs[entry] = skb; + bp->bmac_block->be_txd[entry].tx_addr = mapping; + bp->bmac_block->be_txd[entry].tx_flags = + (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); + bp->tx_new = NEXT_TX(entry); + if (TX_BUFFS_AVAIL(bp) <= 0) + netif_stop_queue(dev); + spin_unlock_irq(&bp->lock); + + /* Get it going. */ + sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL); + + + return NETDEV_TX_OK; +} + +static struct net_device_stats *bigmac_get_stats(struct net_device *dev) +{ + struct bigmac *bp = netdev_priv(dev); + + bigmac_get_counters(bp, bp->bregs); + return &bp->enet_stats; +} + +static void bigmac_set_multicast(struct net_device *dev) +{ + struct bigmac *bp = netdev_priv(dev); + void __iomem *bregs = bp->bregs; + struct netdev_hw_addr *ha; + u32 tmp, crc; + + /* Disable the receiver. The bit self-clears when + * the operation is complete. + */ + tmp = sbus_readl(bregs + BMAC_RXCFG); + tmp &= ~(BIGMAC_RXCFG_ENABLE); + sbus_writel(tmp, bregs + BMAC_RXCFG); + while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0) + udelay(20); + + if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { + sbus_writel(0xffff, bregs + BMAC_HTABLE0); + sbus_writel(0xffff, bregs + BMAC_HTABLE1); + sbus_writel(0xffff, bregs + BMAC_HTABLE2); + sbus_writel(0xffff, bregs + BMAC_HTABLE3); + } else if (dev->flags & IFF_PROMISC) { + tmp = sbus_readl(bregs + BMAC_RXCFG); + tmp |= BIGMAC_RXCFG_PMISC; + sbus_writel(tmp, bregs + BMAC_RXCFG); + } else { + u16 hash_table[4] = { 0 }; + + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr); + crc >>= 26; + hash_table[crc >> 4] |= 1 << (crc & 0xf); + } + sbus_writel(hash_table[0], bregs + BMAC_HTABLE0); + sbus_writel(hash_table[1], bregs + BMAC_HTABLE1); + sbus_writel(hash_table[2], bregs + BMAC_HTABLE2); + sbus_writel(hash_table[3], bregs + BMAC_HTABLE3); + } + + /* Re-enable the receiver. */ + tmp = sbus_readl(bregs + BMAC_RXCFG); + tmp |= BIGMAC_RXCFG_ENABLE; + sbus_writel(tmp, bregs + BMAC_RXCFG); +} + +/* Ethtool support... */ +static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, "sunbmac", sizeof(info->driver)); + strlcpy(info->version, "2.0", sizeof(info->version)); +} + +static u32 bigmac_get_link(struct net_device *dev) +{ + struct bigmac *bp = netdev_priv(dev); + + spin_lock_irq(&bp->lock); + bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, MII_BMSR); + spin_unlock_irq(&bp->lock); + + return (bp->sw_bmsr & BMSR_LSTATUS); +} + +static const struct ethtool_ops bigmac_ethtool_ops = { + .get_drvinfo = bigmac_get_drvinfo, + .get_link = bigmac_get_link, +}; + +static const struct net_device_ops bigmac_ops = { + .ndo_open = bigmac_open, + .ndo_stop = bigmac_close, + .ndo_start_xmit = bigmac_start_xmit, + .ndo_get_stats = bigmac_get_stats, + .ndo_set_rx_mode = bigmac_set_multicast, + .ndo_tx_timeout = bigmac_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int bigmac_ether_init(struct platform_device *op, + struct platform_device *qec_op) +{ + static int version_printed; + struct net_device *dev; + u8 bsizes, bsizes_more; + struct bigmac *bp; + int i; + + /* Get a new device struct for this interface. */ + dev = alloc_etherdev(sizeof(struct bigmac)); + if (!dev) + return -ENOMEM; + + if (version_printed++ == 0) + printk(KERN_INFO "%s", version); + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = idprom->id_ethaddr[i]; + + /* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */ + bp = netdev_priv(dev); + bp->qec_op = qec_op; + bp->bigmac_op = op; + + SET_NETDEV_DEV(dev, &op->dev); + + spin_lock_init(&bp->lock); + + /* Map in QEC global control registers. */ + bp->gregs = of_ioremap(&qec_op->resource[0], 0, + GLOB_REG_SIZE, "BigMAC QEC GLobal Regs"); + if (!bp->gregs) { + printk(KERN_ERR "BIGMAC: Cannot map QEC global registers.\n"); + goto fail_and_cleanup; + } + + /* Make sure QEC is in BigMAC mode. */ + if ((sbus_readl(bp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_BMODE) { + printk(KERN_ERR "BigMAC: AIEEE, QEC is not in BigMAC mode!\n"); + goto fail_and_cleanup; + } + + /* Reset the QEC. */ + if (qec_global_reset(bp->gregs)) + goto fail_and_cleanup; + + /* Get supported SBUS burst sizes. */ + bsizes = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff); + bsizes_more = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff); + + bsizes &= 0xff; + if (bsizes_more != 0xff) + bsizes &= bsizes_more; + if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || + (bsizes & DMA_BURST32) == 0) + bsizes = (DMA_BURST32 - 1); + bp->bigmac_bursts = bsizes; + + /* Perform QEC initialization. */ + qec_init(bp); + + /* Map in the BigMAC channel registers. */ + bp->creg = of_ioremap(&op->resource[0], 0, + CREG_REG_SIZE, "BigMAC QEC Channel Regs"); + if (!bp->creg) { + printk(KERN_ERR "BIGMAC: Cannot map QEC channel registers.\n"); + goto fail_and_cleanup; + } + + /* Map in the BigMAC control registers. */ + bp->bregs = of_ioremap(&op->resource[1], 0, + BMAC_REG_SIZE, "BigMAC Primary Regs"); + if (!bp->bregs) { + printk(KERN_ERR "BIGMAC: Cannot map BigMAC primary registers.\n"); + goto fail_and_cleanup; + } + + /* Map in the BigMAC transceiver registers, this is how you poke at + * the BigMAC's PHY. + */ + bp->tregs = of_ioremap(&op->resource[2], 0, + TCVR_REG_SIZE, "BigMAC Transceiver Regs"); + if (!bp->tregs) { + printk(KERN_ERR "BIGMAC: Cannot map BigMAC transceiver registers.\n"); + goto fail_and_cleanup; + } + + /* Stop the BigMAC. */ + bigmac_stop(bp); + + /* Allocate transmit/receive descriptor DVMA block. */ + bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev, + PAGE_SIZE, + &bp->bblock_dvma, GFP_ATOMIC); + if (bp->bmac_block == NULL || bp->bblock_dvma == 0) + goto fail_and_cleanup; + + /* Get the board revision of this BigMAC. */ + bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node, + "board-version", 1); + + /* Init auto-negotiation timer state. */ + init_timer(&bp->bigmac_timer); + bp->timer_state = asleep; + bp->timer_ticks = 0; + + /* Backlink to generic net device struct. */ + bp->dev = dev; + + /* Set links to our BigMAC open and close routines. */ + dev->ethtool_ops = &bigmac_ethtool_ops; + dev->netdev_ops = &bigmac_ops; + dev->watchdog_timeo = 5*HZ; + + /* Finish net device registration. */ + dev->irq = bp->bigmac_op->archdata.irqs[0]; + dev->dma = 0; + + if (register_netdev(dev)) { + printk(KERN_ERR "BIGMAC: Cannot register device.\n"); + goto fail_and_cleanup; + } + + dev_set_drvdata(&bp->bigmac_op->dev, bp); + + printk(KERN_INFO "%s: BigMAC 100baseT Ethernet %pM\n", + dev->name, dev->dev_addr); + + return 0; + +fail_and_cleanup: + /* Something went wrong, undo whatever we did so far. */ + /* Free register mappings if any. */ + if (bp->gregs) + of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); + if (bp->creg) + of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); + if (bp->bregs) + of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); + if (bp->tregs) + of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); + + if (bp->bmac_block) + dma_free_coherent(&bp->bigmac_op->dev, + PAGE_SIZE, + bp->bmac_block, + bp->bblock_dvma); + + /* This also frees the co-located private data */ + free_netdev(dev); + return -ENODEV; +} + +/* QEC can be the parent of either QuadEthernet or a BigMAC. We want + * the latter. + */ +static int bigmac_sbus_probe(struct platform_device *op) +{ + struct device *parent = op->dev.parent; + struct platform_device *qec_op; + + qec_op = to_platform_device(parent); + + return bigmac_ether_init(op, qec_op); +} + +static int bigmac_sbus_remove(struct platform_device *op) +{ + struct bigmac *bp = platform_get_drvdata(op); + struct device *parent = op->dev.parent; + struct net_device *net_dev = bp->dev; + struct platform_device *qec_op; + + qec_op = to_platform_device(parent); + + unregister_netdev(net_dev); + + of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); + of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); + of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); + of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); + dma_free_coherent(&op->dev, + PAGE_SIZE, + bp->bmac_block, + bp->bblock_dvma); + + free_netdev(net_dev); + + return 0; +} + +static const struct of_device_id bigmac_sbus_match[] = { + { + .name = "be", + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, bigmac_sbus_match); + +static struct platform_driver bigmac_sbus_driver = { + .driver = { + .name = "sunbmac", + .of_match_table = bigmac_sbus_match, + }, + .probe = bigmac_sbus_probe, + .remove = bigmac_sbus_remove, +}; + +module_platform_driver(bigmac_sbus_driver); diff --git a/drivers/net/ethernet/sun/sunbmac.h b/drivers/net/ethernet/sun/sunbmac.h new file mode 100644 index 000000000..06dd21707 --- /dev/null +++ b/drivers/net/ethernet/sun/sunbmac.h @@ -0,0 +1,338 @@ +/* $Id: sunbmac.h,v 1.7 2000/07/11 22:35:22 davem Exp $ + * sunbmac.h: Defines for the Sun "Big MAC" 100baseT ethernet cards. + * + * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) + */ + +#ifndef _SUNBMAC_H +#define _SUNBMAC_H + +/* QEC global registers. */ +#define GLOB_CTRL 0x00UL /* Control */ +#define GLOB_STAT 0x04UL /* Status */ +#define GLOB_PSIZE 0x08UL /* Packet Size */ +#define GLOB_MSIZE 0x0cUL /* Local-mem size (64K) */ +#define GLOB_RSIZE 0x10UL /* Receive partition size */ +#define GLOB_TSIZE 0x14UL /* Transmit partition size */ +#define GLOB_REG_SIZE 0x18UL + +#define GLOB_CTRL_MMODE 0x40000000 /* MACE qec mode */ +#define GLOB_CTRL_BMODE 0x10000000 /* BigMAC qec mode */ +#define GLOB_CTRL_EPAR 0x00000020 /* Enable parity */ +#define GLOB_CTRL_ACNTRL 0x00000018 /* SBUS arbitration control */ +#define GLOB_CTRL_B64 0x00000004 /* 64 byte dvma bursts */ +#define GLOB_CTRL_B32 0x00000002 /* 32 byte dvma bursts */ +#define GLOB_CTRL_B16 0x00000000 /* 16 byte dvma bursts */ +#define GLOB_CTRL_RESET 0x00000001 /* Reset the QEC */ + +#define GLOB_STAT_TX 0x00000008 /* BigMAC Transmit IRQ */ +#define GLOB_STAT_RX 0x00000004 /* BigMAC Receive IRQ */ +#define GLOB_STAT_BM 0x00000002 /* BigMAC Global IRQ */ +#define GLOB_STAT_ER 0x00000001 /* BigMAC Error IRQ */ + +#define GLOB_PSIZE_2048 0x00 /* 2k packet size */ +#define GLOB_PSIZE_4096 0x01 /* 4k packet size */ +#define GLOB_PSIZE_6144 0x10 /* 6k packet size */ +#define GLOB_PSIZE_8192 0x11 /* 8k packet size */ + +/* QEC BigMAC channel registers. */ +#define CREG_CTRL 0x00UL /* Control */ +#define CREG_STAT 0x04UL /* Status */ +#define CREG_RXDS 0x08UL /* RX descriptor ring ptr */ +#define CREG_TXDS 0x0cUL /* TX descriptor ring ptr */ +#define CREG_RIMASK 0x10UL /* RX Interrupt Mask */ +#define CREG_TIMASK 0x14UL /* TX Interrupt Mask */ +#define CREG_QMASK 0x18UL /* QEC Error Interrupt Mask */ +#define CREG_BMASK 0x1cUL /* BigMAC Error Interrupt Mask*/ +#define CREG_RXWBUFPTR 0x20UL /* Local memory rx write ptr */ +#define CREG_RXRBUFPTR 0x24UL /* Local memory rx read ptr */ +#define CREG_TXWBUFPTR 0x28UL /* Local memory tx write ptr */ +#define CREG_TXRBUFPTR 0x2cUL /* Local memory tx read ptr */ +#define CREG_CCNT 0x30UL /* Collision Counter */ +#define CREG_REG_SIZE 0x34UL + +#define CREG_CTRL_TWAKEUP 0x00000001 /* Transmitter Wakeup, 'go'. */ + +#define CREG_STAT_BERROR 0x80000000 /* BigMAC error */ +#define CREG_STAT_TXIRQ 0x00200000 /* Transmit Interrupt */ +#define CREG_STAT_TXDERROR 0x00080000 /* TX Descriptor is bogus */ +#define CREG_STAT_TXLERR 0x00040000 /* Late Transmit Error */ +#define CREG_STAT_TXPERR 0x00020000 /* Transmit Parity Error */ +#define CREG_STAT_TXSERR 0x00010000 /* Transmit SBUS error ack */ +#define CREG_STAT_RXIRQ 0x00000020 /* Receive Interrupt */ +#define CREG_STAT_RXDROP 0x00000010 /* Dropped a RX'd packet */ +#define CREG_STAT_RXSMALL 0x00000008 /* Receive buffer too small */ +#define CREG_STAT_RXLERR 0x00000004 /* Receive Late Error */ +#define CREG_STAT_RXPERR 0x00000002 /* Receive Parity Error */ +#define CREG_STAT_RXSERR 0x00000001 /* Receive SBUS Error ACK */ + +#define CREG_STAT_ERRORS (CREG_STAT_BERROR|CREG_STAT_TXDERROR|CREG_STAT_TXLERR| \ + CREG_STAT_TXPERR|CREG_STAT_TXSERR|CREG_STAT_RXDROP| \ + CREG_STAT_RXSMALL|CREG_STAT_RXLERR|CREG_STAT_RXPERR| \ + CREG_STAT_RXSERR) + +#define CREG_QMASK_TXDERROR 0x00080000 /* TXD error */ +#define CREG_QMASK_TXLERR 0x00040000 /* TX late error */ +#define CREG_QMASK_TXPERR 0x00020000 /* TX parity error */ +#define CREG_QMASK_TXSERR 0x00010000 /* TX sbus error ack */ +#define CREG_QMASK_RXDROP 0x00000010 /* RX drop */ +#define CREG_QMASK_RXBERROR 0x00000008 /* RX buffer error */ +#define CREG_QMASK_RXLEERR 0x00000004 /* RX late error */ +#define CREG_QMASK_RXPERR 0x00000002 /* RX parity error */ +#define CREG_QMASK_RXSERR 0x00000001 /* RX sbus error ack */ + +/* BIGMAC core registers */ +#define BMAC_XIFCFG 0x000UL /* XIF config register */ + /* 0x004-->0x0fc, reserved */ +#define BMAC_STATUS 0x100UL /* Status register, clear on read */ +#define BMAC_IMASK 0x104UL /* Interrupt mask register */ + /* 0x108-->0x204, reserved */ +#define BMAC_TXSWRESET 0x208UL /* Transmitter software reset */ +#define BMAC_TXCFG 0x20cUL /* Transmitter config register */ +#define BMAC_IGAP1 0x210UL /* Inter-packet gap 1 */ +#define BMAC_IGAP2 0x214UL /* Inter-packet gap 2 */ +#define BMAC_ALIMIT 0x218UL /* Transmit attempt limit */ +#define BMAC_STIME 0x21cUL /* Transmit slot time */ +#define BMAC_PLEN 0x220UL /* Size of transmit preamble */ +#define BMAC_PPAT 0x224UL /* Pattern for transmit preamble */ +#define BMAC_TXDELIM 0x228UL /* Transmit delimiter */ +#define BMAC_JSIZE 0x22cUL /* Toe jam... */ +#define BMAC_TXPMAX 0x230UL /* Transmit max pkt size */ +#define BMAC_TXPMIN 0x234UL /* Transmit min pkt size */ +#define BMAC_PATTEMPT 0x238UL /* Count of transmit peak attempts */ +#define BMAC_DTCTR 0x23cUL /* Transmit defer timer */ +#define BMAC_NCCTR 0x240UL /* Transmit normal-collision counter */ +#define BMAC_FCCTR 0x244UL /* Transmit first-collision counter */ +#define BMAC_EXCTR 0x248UL /* Transmit excess-collision counter */ +#define BMAC_LTCTR 0x24cUL /* Transmit late-collision counter */ +#define BMAC_RSEED 0x250UL /* Transmit random number seed */ +#define BMAC_TXSMACHINE 0x254UL /* Transmit state machine */ + /* 0x258-->0x304, reserved */ +#define BMAC_RXSWRESET 0x308UL /* Receiver software reset */ +#define BMAC_RXCFG 0x30cUL /* Receiver config register */ +#define BMAC_RXPMAX 0x310UL /* Receive max pkt size */ +#define BMAC_RXPMIN 0x314UL /* Receive min pkt size */ +#define BMAC_MACADDR2 0x318UL /* Ether address register 2 */ +#define BMAC_MACADDR1 0x31cUL /* Ether address register 1 */ +#define BMAC_MACADDR0 0x320UL /* Ether address register 0 */ +#define BMAC_FRCTR 0x324UL /* Receive frame receive counter */ +#define BMAC_GLECTR 0x328UL /* Receive giant-length error counter */ +#define BMAC_UNALECTR 0x32cUL /* Receive unaligned error counter */ +#define BMAC_RCRCECTR 0x330UL /* Receive CRC error counter */ +#define BMAC_RXSMACHINE 0x334UL /* Receiver state machine */ +#define BMAC_RXCVALID 0x338UL /* Receiver code violation */ + /* 0x33c, reserved */ +#define BMAC_HTABLE3 0x340UL /* Hash table 3 */ +#define BMAC_HTABLE2 0x344UL /* Hash table 2 */ +#define BMAC_HTABLE1 0x348UL /* Hash table 1 */ +#define BMAC_HTABLE0 0x34cUL /* Hash table 0 */ +#define BMAC_AFILTER2 0x350UL /* Address filter 2 */ +#define BMAC_AFILTER1 0x354UL /* Address filter 1 */ +#define BMAC_AFILTER0 0x358UL /* Address filter 0 */ +#define BMAC_AFMASK 0x35cUL /* Address filter mask */ +#define BMAC_REG_SIZE 0x360UL + +/* BigMac XIF config register. */ +#define BIGMAC_XCFG_ODENABLE 0x00000001 /* Output driver enable */ +#define BIGMAC_XCFG_RESV 0x00000002 /* Reserved, write always as 1 */ +#define BIGMAC_XCFG_MLBACK 0x00000004 /* Loopback-mode MII enable */ +#define BIGMAC_XCFG_SMODE 0x00000008 /* Enable serial mode */ + +/* BigMAC status register. */ +#define BIGMAC_STAT_GOTFRAME 0x00000001 /* Received a frame */ +#define BIGMAC_STAT_RCNTEXP 0x00000002 /* Receive frame counter expired */ +#define BIGMAC_STAT_ACNTEXP 0x00000004 /* Align-error counter expired */ +#define BIGMAC_STAT_CCNTEXP 0x00000008 /* CRC-error counter expired */ +#define BIGMAC_STAT_LCNTEXP 0x00000010 /* Length-error counter expired */ +#define BIGMAC_STAT_RFIFOVF 0x00000020 /* Receive FIFO overflow */ +#define BIGMAC_STAT_CVCNTEXP 0x00000040 /* Code-violation counter expired */ +#define BIGMAC_STAT_SENTFRAME 0x00000100 /* Transmitted a frame */ +#define BIGMAC_STAT_TFIFO_UND 0x00000200 /* Transmit FIFO underrun */ +#define BIGMAC_STAT_MAXPKTERR 0x00000400 /* Max-packet size error */ +#define BIGMAC_STAT_NCNTEXP 0x00000800 /* Normal-collision counter expired */ +#define BIGMAC_STAT_ECNTEXP 0x00001000 /* Excess-collision counter expired */ +#define BIGMAC_STAT_LCCNTEXP 0x00002000 /* Late-collision counter expired */ +#define BIGMAC_STAT_FCNTEXP 0x00004000 /* First-collision counter expired */ +#define BIGMAC_STAT_DTIMEXP 0x00008000 /* Defer-timer expired */ + +/* BigMAC interrupt mask register. */ +#define BIGMAC_IMASK_GOTFRAME 0x00000001 /* Received a frame */ +#define BIGMAC_IMASK_RCNTEXP 0x00000002 /* Receive frame counter expired */ +#define BIGMAC_IMASK_ACNTEXP 0x00000004 /* Align-error counter expired */ +#define BIGMAC_IMASK_CCNTEXP 0x00000008 /* CRC-error counter expired */ +#define BIGMAC_IMASK_LCNTEXP 0x00000010 /* Length-error counter expired */ +#define BIGMAC_IMASK_RFIFOVF 0x00000020 /* Receive FIFO overflow */ +#define BIGMAC_IMASK_CVCNTEXP 0x00000040 /* Code-violation counter expired */ +#define BIGMAC_IMASK_SENTFRAME 0x00000100 /* Transmitted a frame */ +#define BIGMAC_IMASK_TFIFO_UND 0x00000200 /* Transmit FIFO underrun */ +#define BIGMAC_IMASK_MAXPKTERR 0x00000400 /* Max-packet size error */ +#define BIGMAC_IMASK_NCNTEXP 0x00000800 /* Normal-collision counter expired */ +#define BIGMAC_IMASK_ECNTEXP 0x00001000 /* Excess-collision counter expired */ +#define BIGMAC_IMASK_LCCNTEXP 0x00002000 /* Late-collision counter expired */ +#define BIGMAC_IMASK_FCNTEXP 0x00004000 /* First-collision counter expired */ +#define BIGMAC_IMASK_DTIMEXP 0x00008000 /* Defer-timer expired */ + +/* BigMac transmit config register. */ +#define BIGMAC_TXCFG_ENABLE 0x00000001 /* Enable the transmitter */ +#define BIGMAC_TXCFG_FIFO 0x00000010 /* Default tx fthresh... */ +#define BIGMAC_TXCFG_SMODE 0x00000020 /* Enable slow transmit mode */ +#define BIGMAC_TXCFG_CIGN 0x00000040 /* Ignore transmit collisions */ +#define BIGMAC_TXCFG_FCSOFF 0x00000080 /* Do not emit FCS */ +#define BIGMAC_TXCFG_DBACKOFF 0x00000100 /* Disable backoff */ +#define BIGMAC_TXCFG_FULLDPLX 0x00000200 /* Enable full-duplex */ + +/* BigMac receive config register. */ +#define BIGMAC_RXCFG_ENABLE 0x00000001 /* Enable the receiver */ +#define BIGMAC_RXCFG_FIFO 0x0000000e /* Default rx fthresh... */ +#define BIGMAC_RXCFG_PSTRIP 0x00000020 /* Pad byte strip enable */ +#define BIGMAC_RXCFG_PMISC 0x00000040 /* Enable promiscuous mode */ +#define BIGMAC_RXCFG_DERR 0x00000080 /* Disable error checking */ +#define BIGMAC_RXCFG_DCRCS 0x00000100 /* Disable CRC stripping */ +#define BIGMAC_RXCFG_ME 0x00000200 /* Receive packets addressed to me */ +#define BIGMAC_RXCFG_PGRP 0x00000400 /* Enable promisc group mode */ +#define BIGMAC_RXCFG_HENABLE 0x00000800 /* Enable the hash filter */ +#define BIGMAC_RXCFG_AENABLE 0x00001000 /* Enable the address filter */ + +/* The BigMAC PHY transceiver. Not nearly as sophisticated as the happy meal + * one. But it does have the "bit banger", oh baby. + */ +#define TCVR_TPAL 0x00UL +#define TCVR_MPAL 0x04UL +#define TCVR_REG_SIZE 0x08UL + +/* Frame commands. */ +#define FRAME_WRITE 0x50020000 +#define FRAME_READ 0x60020000 + +/* Tranceiver registers. */ +#define TCVR_PAL_SERIAL 0x00000001 /* Enable serial mode */ +#define TCVR_PAL_EXTLBACK 0x00000002 /* Enable external loopback */ +#define TCVR_PAL_MSENSE 0x00000004 /* Media sense */ +#define TCVR_PAL_LTENABLE 0x00000008 /* Link test enable */ +#define TCVR_PAL_LTSTATUS 0x00000010 /* Link test status (P1 only) */ + +/* Management PAL. */ +#define MGMT_PAL_DCLOCK 0x00000001 /* Data clock */ +#define MGMT_PAL_OENAB 0x00000002 /* Output enabler */ +#define MGMT_PAL_MDIO 0x00000004 /* MDIO Data/attached */ +#define MGMT_PAL_TIMEO 0x00000008 /* Transmit enable timeout error */ +#define MGMT_PAL_EXT_MDIO MGMT_PAL_MDIO +#define MGMT_PAL_INT_MDIO MGMT_PAL_TIMEO + +/* Here are some PHY addresses. */ +#define BIGMAC_PHY_EXTERNAL 0 /* External transceiver */ +#define BIGMAC_PHY_INTERNAL 1 /* Internal transceiver */ + +/* Ring descriptors and such, same as Quad Ethernet. */ +struct be_rxd { + u32 rx_flags; + u32 rx_addr; +}; + +#define RXD_OWN 0x80000000 /* Ownership. */ +#define RXD_UPDATE 0x10000000 /* Being Updated? */ +#define RXD_LENGTH 0x000007ff /* Packet Length. */ + +struct be_txd { + u32 tx_flags; + u32 tx_addr; +}; + +#define TXD_OWN 0x80000000 /* Ownership. */ +#define TXD_SOP 0x40000000 /* Start Of Packet */ +#define TXD_EOP 0x20000000 /* End Of Packet */ +#define TXD_UPDATE 0x10000000 /* Being Updated? */ +#define TXD_LENGTH 0x000007ff /* Packet Length. */ + +#define TX_RING_MAXSIZE 256 +#define RX_RING_MAXSIZE 256 + +#define TX_RING_SIZE 256 +#define RX_RING_SIZE 256 + +#define NEXT_RX(num) (((num) + 1) & (RX_RING_SIZE - 1)) +#define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1)) +#define PREV_RX(num) (((num) - 1) & (RX_RING_SIZE - 1)) +#define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1)) + +#define TX_BUFFS_AVAIL(bp) \ + (((bp)->tx_old <= (bp)->tx_new) ? \ + (bp)->tx_old + (TX_RING_SIZE - 1) - (bp)->tx_new : \ + (bp)->tx_old - (bp)->tx_new - 1) + + +#define RX_COPY_THRESHOLD 256 +#define RX_BUF_ALLOC_SIZE (ETH_FRAME_LEN + (64 * 3)) + +struct bmac_init_block { + struct be_rxd be_rxd[RX_RING_MAXSIZE]; + struct be_txd be_txd[TX_RING_MAXSIZE]; +}; + +#define bib_offset(mem, elem) \ +((__u32)((unsigned long)(&(((struct bmac_init_block *)0)->mem[elem])))) + +/* Now software state stuff. */ +enum bigmac_transceiver { + external = 0, + internal = 1, + none = 2, +}; + +/* Timer state engine. */ +enum bigmac_timer_state { + ltrywait = 1, /* Forcing try of all modes, from fastest to slowest. */ + asleep = 2, /* Timer inactive. */ +}; + +struct bigmac { + void __iomem *gregs; /* QEC Global Registers */ + void __iomem *creg; /* QEC BigMAC Channel Registers */ + void __iomem *bregs; /* BigMAC Registers */ + void __iomem *tregs; /* BigMAC Transceiver */ + struct bmac_init_block *bmac_block; /* RX and TX descriptors */ + __u32 bblock_dvma; /* RX and TX descriptors */ + + spinlock_t lock; + + struct sk_buff *rx_skbs[RX_RING_SIZE]; + struct sk_buff *tx_skbs[TX_RING_SIZE]; + + int rx_new, tx_new, rx_old, tx_old; + + int board_rev; /* BigMAC board revision. */ + + enum bigmac_transceiver tcvr_type; + unsigned int bigmac_bursts; + unsigned int paddr; + unsigned short sw_bmsr; /* SW copy of PHY BMSR */ + unsigned short sw_bmcr; /* SW copy of PHY BMCR */ + struct timer_list bigmac_timer; + enum bigmac_timer_state timer_state; + unsigned int timer_ticks; + + struct net_device_stats enet_stats; + struct platform_device *qec_op; + struct platform_device *bigmac_op; + struct net_device *dev; +}; + +/* We use this to acquire receive skb's that we can DMA directly into. */ +#define ALIGNED_RX_SKB_ADDR(addr) \ + ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr)) + +static inline struct sk_buff *big_mac_alloc_skb(unsigned int length, gfp_t gfp_flags) +{ + struct sk_buff *skb; + + skb = alloc_skb(length + 64, gfp_flags); + if(skb) { + int offset = ALIGNED_RX_SKB_ADDR(skb->data); + + if(offset) + skb_reserve(skb, offset); + } + return skb; +} + +#endif /* !(_SUNBMAC_H) */ diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c new file mode 100644 index 000000000..e23a64235 --- /dev/null +++ b/drivers/net/ethernet/sun/sungem.c @@ -0,0 +1,3028 @@ +/* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $ + * sungem.c: Sun GEM ethernet driver. + * + * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com) + * + * Support for Apple GMAC and assorted PHYs, WOL, Power Management + * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org) + * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp. + * + * NAPI and NETPOLL support + * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com) + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#ifdef CONFIG_SPARC +#include +#include +#endif + +#ifdef CONFIG_PPC_PMAC +#include +#include +#include +#include +#endif + +#include +#include "sungem.h" + +/* Stripping FCS is causing problems, disabled for now */ +#undef STRIP_FCS + +#define DEFAULT_MSG (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + +#define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ + SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \ + SUPPORTED_Pause | SUPPORTED_Autoneg) + +#define DRV_NAME "sungem" +#define DRV_VERSION "1.0" +#define DRV_AUTHOR "David S. Miller " + +static char version[] = + DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n"; + +MODULE_AUTHOR(DRV_AUTHOR); +MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver"); +MODULE_LICENSE("GPL"); + +#define GEM_MODULE_NAME "gem" + +static const struct pci_device_id gem_pci_tbl[] = { + { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + + /* These models only differ from the original GEM in + * that their tx/rx fifos are of a different size and + * they only support 10/100 speeds. -DaveM + * + * Apple's GMAC does support gigabit on machines with + * the BCM54xx PHYs. -BenH + */ + { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + {0, } +}; + +MODULE_DEVICE_TABLE(pci, gem_pci_tbl); + +static u16 __sungem_phy_read(struct gem *gp, int phy_addr, int reg) +{ + u32 cmd; + int limit = 10000; + + cmd = (1 << 30); + cmd |= (2 << 28); + cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; + cmd |= (reg << 18) & MIF_FRAME_REGAD; + cmd |= (MIF_FRAME_TAMSB); + writel(cmd, gp->regs + MIF_FRAME); + + while (--limit) { + cmd = readl(gp->regs + MIF_FRAME); + if (cmd & MIF_FRAME_TALSB) + break; + + udelay(10); + } + + if (!limit) + cmd = 0xffff; + + return cmd & MIF_FRAME_DATA; +} + +static inline int _sungem_phy_read(struct net_device *dev, int mii_id, int reg) +{ + struct gem *gp = netdev_priv(dev); + return __sungem_phy_read(gp, mii_id, reg); +} + +static inline u16 sungem_phy_read(struct gem *gp, int reg) +{ + return __sungem_phy_read(gp, gp->mii_phy_addr, reg); +} + +static void __sungem_phy_write(struct gem *gp, int phy_addr, int reg, u16 val) +{ + u32 cmd; + int limit = 10000; + + cmd = (1 << 30); + cmd |= (1 << 28); + cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; + cmd |= (reg << 18) & MIF_FRAME_REGAD; + cmd |= (MIF_FRAME_TAMSB); + cmd |= (val & MIF_FRAME_DATA); + writel(cmd, gp->regs + MIF_FRAME); + + while (limit--) { + cmd = readl(gp->regs + MIF_FRAME); + if (cmd & MIF_FRAME_TALSB) + break; + + udelay(10); + } +} + +static inline void _sungem_phy_write(struct net_device *dev, int mii_id, int reg, int val) +{ + struct gem *gp = netdev_priv(dev); + __sungem_phy_write(gp, mii_id, reg, val & 0xffff); +} + +static inline void sungem_phy_write(struct gem *gp, int reg, u16 val) +{ + __sungem_phy_write(gp, gp->mii_phy_addr, reg, val); +} + +static inline void gem_enable_ints(struct gem *gp) +{ + /* Enable all interrupts but TXDONE */ + writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK); +} + +static inline void gem_disable_ints(struct gem *gp) +{ + /* Disable all interrupts, including TXDONE */ + writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK); + (void)readl(gp->regs + GREG_IMASK); /* write posting */ +} + +static void gem_get_cell(struct gem *gp) +{ + BUG_ON(gp->cell_enabled < 0); + gp->cell_enabled++; +#ifdef CONFIG_PPC_PMAC + if (gp->cell_enabled == 1) { + mb(); + pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1); + udelay(10); + } +#endif /* CONFIG_PPC_PMAC */ +} + +/* Turn off the chip's clock */ +static void gem_put_cell(struct gem *gp) +{ + BUG_ON(gp->cell_enabled <= 0); + gp->cell_enabled--; +#ifdef CONFIG_PPC_PMAC + if (gp->cell_enabled == 0) { + mb(); + pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0); + udelay(10); + } +#endif /* CONFIG_PPC_PMAC */ +} + +static inline void gem_netif_stop(struct gem *gp) +{ + gp->dev->trans_start = jiffies; /* prevent tx timeout */ + napi_disable(&gp->napi); + netif_tx_disable(gp->dev); +} + +static inline void gem_netif_start(struct gem *gp) +{ + /* NOTE: unconditional netif_wake_queue is only + * appropriate so long as all callers are assured to + * have free tx slots. + */ + netif_wake_queue(gp->dev); + napi_enable(&gp->napi); +} + +static void gem_schedule_reset(struct gem *gp) +{ + gp->reset_task_pending = 1; + schedule_work(&gp->reset_task); +} + +static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits) +{ + if (netif_msg_intr(gp)) + printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name); +} + +static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) +{ + u32 pcs_istat = readl(gp->regs + PCS_ISTAT); + u32 pcs_miistat; + + if (netif_msg_intr(gp)) + printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n", + gp->dev->name, pcs_istat); + + if (!(pcs_istat & PCS_ISTAT_LSC)) { + netdev_err(dev, "PCS irq but no link status change???\n"); + return 0; + } + + /* The link status bit latches on zero, so you must + * read it twice in such a case to see a transition + * to the link being up. + */ + pcs_miistat = readl(gp->regs + PCS_MIISTAT); + if (!(pcs_miistat & PCS_MIISTAT_LS)) + pcs_miistat |= + (readl(gp->regs + PCS_MIISTAT) & + PCS_MIISTAT_LS); + + if (pcs_miistat & PCS_MIISTAT_ANC) { + /* The remote-fault indication is only valid + * when autoneg has completed. + */ + if (pcs_miistat & PCS_MIISTAT_RF) + netdev_info(dev, "PCS AutoNEG complete, RemoteFault\n"); + else + netdev_info(dev, "PCS AutoNEG complete\n"); + } + + if (pcs_miistat & PCS_MIISTAT_LS) { + netdev_info(dev, "PCS link is now up\n"); + netif_carrier_on(gp->dev); + } else { + netdev_info(dev, "PCS link is now down\n"); + netif_carrier_off(gp->dev); + /* If this happens and the link timer is not running, + * reset so we re-negotiate. + */ + if (!timer_pending(&gp->link_timer)) + return 1; + } + + return 0; +} + +static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) +{ + u32 txmac_stat = readl(gp->regs + MAC_TXSTAT); + + if (netif_msg_intr(gp)) + printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", + gp->dev->name, txmac_stat); + + /* Defer timer expiration is quite normal, + * don't even log the event. + */ + if ((txmac_stat & MAC_TXSTAT_DTE) && + !(txmac_stat & ~MAC_TXSTAT_DTE)) + return 0; + + if (txmac_stat & MAC_TXSTAT_URUN) { + netdev_err(dev, "TX MAC xmit underrun\n"); + dev->stats.tx_fifo_errors++; + } + + if (txmac_stat & MAC_TXSTAT_MPE) { + netdev_err(dev, "TX MAC max packet size error\n"); + dev->stats.tx_errors++; + } + + /* The rest are all cases of one of the 16-bit TX + * counters expiring. + */ + if (txmac_stat & MAC_TXSTAT_NCE) + dev->stats.collisions += 0x10000; + + if (txmac_stat & MAC_TXSTAT_ECE) { + dev->stats.tx_aborted_errors += 0x10000; + dev->stats.collisions += 0x10000; + } + + if (txmac_stat & MAC_TXSTAT_LCE) { + dev->stats.tx_aborted_errors += 0x10000; + dev->stats.collisions += 0x10000; + } + + /* We do not keep track of MAC_TXSTAT_FCE and + * MAC_TXSTAT_PCE events. + */ + return 0; +} + +/* When we get a RX fifo overflow, the RX unit in GEM is probably hung + * so we do the following. + * + * If any part of the reset goes wrong, we return 1 and that causes the + * whole chip to be reset. + */ +static int gem_rxmac_reset(struct gem *gp) +{ + struct net_device *dev = gp->dev; + int limit, i; + u64 desc_dma; + u32 val; + + /* First, reset & disable MAC RX. */ + writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); + for (limit = 0; limit < 5000; limit++) { + if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD)) + break; + udelay(10); + } + if (limit == 5000) { + netdev_err(dev, "RX MAC will not reset, resetting whole chip\n"); + return 1; + } + + writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB, + gp->regs + MAC_RXCFG); + for (limit = 0; limit < 5000; limit++) { + if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB)) + break; + udelay(10); + } + if (limit == 5000) { + netdev_err(dev, "RX MAC will not disable, resetting whole chip\n"); + return 1; + } + + /* Second, disable RX DMA. */ + writel(0, gp->regs + RXDMA_CFG); + for (limit = 0; limit < 5000; limit++) { + if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE)) + break; + udelay(10); + } + if (limit == 5000) { + netdev_err(dev, "RX DMA will not disable, resetting whole chip\n"); + return 1; + } + + mdelay(5); + + /* Execute RX reset command. */ + writel(gp->swrst_base | GREG_SWRST_RXRST, + gp->regs + GREG_SWRST); + for (limit = 0; limit < 5000; limit++) { + if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST)) + break; + udelay(10); + } + if (limit == 5000) { + netdev_err(dev, "RX reset command will not execute, resetting whole chip\n"); + return 1; + } + + /* Refresh the RX ring. */ + for (i = 0; i < RX_RING_SIZE; i++) { + struct gem_rxd *rxd = &gp->init_block->rxd[i]; + + if (gp->rx_skbs[i] == NULL) { + netdev_err(dev, "Parts of RX ring empty, resetting whole chip\n"); + return 1; + } + + rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); + } + gp->rx_new = gp->rx_old = 0; + + /* Now we must reprogram the rest of RX unit. */ + desc_dma = (u64) gp->gblock_dvma; + desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); + writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); + writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); + writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); + val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | + ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); + writel(val, gp->regs + RXDMA_CFG); + if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) + writel(((5 & RXDMA_BLANK_IPKTS) | + ((8 << 12) & RXDMA_BLANK_ITIME)), + gp->regs + RXDMA_BLANK); + else + writel(((5 & RXDMA_BLANK_IPKTS) | + ((4 << 12) & RXDMA_BLANK_ITIME)), + gp->regs + RXDMA_BLANK); + val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); + val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); + writel(val, gp->regs + RXDMA_PTHRESH); + val = readl(gp->regs + RXDMA_CFG); + writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); + writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); + val = readl(gp->regs + MAC_RXCFG); + writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); + + return 0; +} + +static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) +{ + u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT); + int ret = 0; + + if (netif_msg_intr(gp)) + printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n", + gp->dev->name, rxmac_stat); + + if (rxmac_stat & MAC_RXSTAT_OFLW) { + u32 smac = readl(gp->regs + MAC_SMACHINE); + + netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac); + dev->stats.rx_over_errors++; + dev->stats.rx_fifo_errors++; + + ret = gem_rxmac_reset(gp); + } + + if (rxmac_stat & MAC_RXSTAT_ACE) + dev->stats.rx_frame_errors += 0x10000; + + if (rxmac_stat & MAC_RXSTAT_CCE) + dev->stats.rx_crc_errors += 0x10000; + + if (rxmac_stat & MAC_RXSTAT_LCE) + dev->stats.rx_length_errors += 0x10000; + + /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE + * events. + */ + return ret; +} + +static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) +{ + u32 mac_cstat = readl(gp->regs + MAC_CSTAT); + + if (netif_msg_intr(gp)) + printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n", + gp->dev->name, mac_cstat); + + /* This interrupt is just for pause frame and pause + * tracking. It is useful for diagnostics and debug + * but probably by default we will mask these events. + */ + if (mac_cstat & MAC_CSTAT_PS) + gp->pause_entered++; + + if (mac_cstat & MAC_CSTAT_PRCV) + gp->pause_last_time_recvd = (mac_cstat >> 16); + + return 0; +} + +static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) +{ + u32 mif_status = readl(gp->regs + MIF_STATUS); + u32 reg_val, changed_bits; + + reg_val = (mif_status & MIF_STATUS_DATA) >> 16; + changed_bits = (mif_status & MIF_STATUS_STAT); + + gem_handle_mif_event(gp, reg_val, changed_bits); + + return 0; +} + +static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) +{ + u32 pci_estat = readl(gp->regs + GREG_PCIESTAT); + + if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && + gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { + netdev_err(dev, "PCI error [%04x]", pci_estat); + + if (pci_estat & GREG_PCIESTAT_BADACK) + pr_cont(" "); + if (pci_estat & GREG_PCIESTAT_DTRTO) + pr_cont(" "); + if (pci_estat & GREG_PCIESTAT_OTHER) + pr_cont(" "); + pr_cont("\n"); + } else { + pci_estat |= GREG_PCIESTAT_OTHER; + netdev_err(dev, "PCI error\n"); + } + + if (pci_estat & GREG_PCIESTAT_OTHER) { + u16 pci_cfg_stat; + + /* Interrogate PCI config space for the + * true cause. + */ + pci_read_config_word(gp->pdev, PCI_STATUS, + &pci_cfg_stat); + netdev_err(dev, "Read PCI cfg space status [%04x]\n", + pci_cfg_stat); + if (pci_cfg_stat & PCI_STATUS_PARITY) + netdev_err(dev, "PCI parity error detected\n"); + if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT) + netdev_err(dev, "PCI target abort\n"); + if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT) + netdev_err(dev, "PCI master acks target abort\n"); + if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT) + netdev_err(dev, "PCI master abort\n"); + if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR) + netdev_err(dev, "PCI system error SERR#\n"); + if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY) + netdev_err(dev, "PCI parity error\n"); + + /* Write the error bits back to clear them. */ + pci_cfg_stat &= (PCI_STATUS_PARITY | + PCI_STATUS_SIG_TARGET_ABORT | + PCI_STATUS_REC_TARGET_ABORT | + PCI_STATUS_REC_MASTER_ABORT | + PCI_STATUS_SIG_SYSTEM_ERROR | + PCI_STATUS_DETECTED_PARITY); + pci_write_config_word(gp->pdev, + PCI_STATUS, pci_cfg_stat); + } + + /* For all PCI errors, we should reset the chip. */ + return 1; +} + +/* All non-normal interrupt conditions get serviced here. + * Returns non-zero if we should just exit the interrupt + * handler right now (ie. if we reset the card which invalidates + * all of the other original irq status bits). + */ +static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status) +{ + if (gem_status & GREG_STAT_RXNOBUF) { + /* Frame arrived, no free RX buffers available. */ + if (netif_msg_rx_err(gp)) + printk(KERN_DEBUG "%s: no buffer for rx frame\n", + gp->dev->name); + dev->stats.rx_dropped++; + } + + if (gem_status & GREG_STAT_RXTAGERR) { + /* corrupt RX tag framing */ + if (netif_msg_rx_err(gp)) + printk(KERN_DEBUG "%s: corrupt rx tag framing\n", + gp->dev->name); + dev->stats.rx_errors++; + + return 1; + } + + if (gem_status & GREG_STAT_PCS) { + if (gem_pcs_interrupt(dev, gp, gem_status)) + return 1; + } + + if (gem_status & GREG_STAT_TXMAC) { + if (gem_txmac_interrupt(dev, gp, gem_status)) + return 1; + } + + if (gem_status & GREG_STAT_RXMAC) { + if (gem_rxmac_interrupt(dev, gp, gem_status)) + return 1; + } + + if (gem_status & GREG_STAT_MAC) { + if (gem_mac_interrupt(dev, gp, gem_status)) + return 1; + } + + if (gem_status & GREG_STAT_MIF) { + if (gem_mif_interrupt(dev, gp, gem_status)) + return 1; + } + + if (gem_status & GREG_STAT_PCIERR) { + if (gem_pci_interrupt(dev, gp, gem_status)) + return 1; + } + + return 0; +} + +static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status) +{ + int entry, limit; + + entry = gp->tx_old; + limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT); + while (entry != limit) { + struct sk_buff *skb; + struct gem_txd *txd; + dma_addr_t dma_addr; + u32 dma_len; + int frag; + + if (netif_msg_tx_done(gp)) + printk(KERN_DEBUG "%s: tx done, slot %d\n", + gp->dev->name, entry); + skb = gp->tx_skbs[entry]; + if (skb_shinfo(skb)->nr_frags) { + int last = entry + skb_shinfo(skb)->nr_frags; + int walk = entry; + int incomplete = 0; + + last &= (TX_RING_SIZE - 1); + for (;;) { + walk = NEXT_TX(walk); + if (walk == limit) + incomplete = 1; + if (walk == last) + break; + } + if (incomplete) + break; + } + gp->tx_skbs[entry] = NULL; + dev->stats.tx_bytes += skb->len; + + for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { + txd = &gp->init_block->txd[entry]; + + dma_addr = le64_to_cpu(txd->buffer); + dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ; + + pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); + entry = NEXT_TX(entry); + } + + dev->stats.tx_packets++; + dev_consume_skb_any(skb); + } + gp->tx_old = entry; + + /* Need to make the tx_old update visible to gem_start_xmit() + * before checking for netif_queue_stopped(). Without the + * memory barrier, there is a small possibility that gem_start_xmit() + * will miss it and cause the queue to be stopped forever. + */ + smp_mb(); + + if (unlikely(netif_queue_stopped(dev) && + TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); + + __netif_tx_lock(txq, smp_processor_id()); + if (netif_queue_stopped(dev) && + TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) + netif_wake_queue(dev); + __netif_tx_unlock(txq); + } +} + +static __inline__ void gem_post_rxds(struct gem *gp, int limit) +{ + int cluster_start, curr, count, kick; + + cluster_start = curr = (gp->rx_new & ~(4 - 1)); + count = 0; + kick = -1; + dma_wmb(); + while (curr != limit) { + curr = NEXT_RX(curr); + if (++count == 4) { + struct gem_rxd *rxd = + &gp->init_block->rxd[cluster_start]; + for (;;) { + rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); + rxd++; + cluster_start = NEXT_RX(cluster_start); + if (cluster_start == curr) + break; + } + kick = curr; + count = 0; + } + } + if (kick >= 0) { + mb(); + writel(kick, gp->regs + RXDMA_KICK); + } +} + +#define ALIGNED_RX_SKB_ADDR(addr) \ + ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) +static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size, + gfp_t gfp_flags) +{ + struct sk_buff *skb = alloc_skb(size + 64, gfp_flags); + + if (likely(skb)) { + unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data); + skb_reserve(skb, offset); + } + return skb; +} + +static int gem_rx(struct gem *gp, int work_to_do) +{ + struct net_device *dev = gp->dev; + int entry, drops, work_done = 0; + u32 done; + __sum16 csum; + + if (netif_msg_rx_status(gp)) + printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", + gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new); + + entry = gp->rx_new; + drops = 0; + done = readl(gp->regs + RXDMA_DONE); + for (;;) { + struct gem_rxd *rxd = &gp->init_block->rxd[entry]; + struct sk_buff *skb; + u64 status = le64_to_cpu(rxd->status_word); + dma_addr_t dma_addr; + int len; + + if ((status & RXDCTRL_OWN) != 0) + break; + + if (work_done >= RX_RING_SIZE || work_done >= work_to_do) + break; + + /* When writing back RX descriptor, GEM writes status + * then buffer address, possibly in separate transactions. + * If we don't wait for the chip to write both, we could + * post a new buffer to this descriptor then have GEM spam + * on the buffer address. We sync on the RX completion + * register to prevent this from happening. + */ + if (entry == done) { + done = readl(gp->regs + RXDMA_DONE); + if (entry == done) + break; + } + + /* We can now account for the work we're about to do */ + work_done++; + + skb = gp->rx_skbs[entry]; + + len = (status & RXDCTRL_BUFSZ) >> 16; + if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { + dev->stats.rx_errors++; + if (len < ETH_ZLEN) + dev->stats.rx_length_errors++; + if (len & RXDCTRL_BAD) + dev->stats.rx_crc_errors++; + + /* We'll just return it to GEM. */ + drop_it: + dev->stats.rx_dropped++; + goto next; + } + + dma_addr = le64_to_cpu(rxd->buffer); + if (len > RX_COPY_THRESHOLD) { + struct sk_buff *new_skb; + + new_skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); + if (new_skb == NULL) { + drops++; + goto drop_it; + } + pci_unmap_page(gp->pdev, dma_addr, + RX_BUF_ALLOC_SIZE(gp), + PCI_DMA_FROMDEVICE); + gp->rx_skbs[entry] = new_skb; + skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); + rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev, + virt_to_page(new_skb->data), + offset_in_page(new_skb->data), + RX_BUF_ALLOC_SIZE(gp), + PCI_DMA_FROMDEVICE)); + skb_reserve(new_skb, RX_OFFSET); + + /* Trim the original skb for the netif. */ + skb_trim(skb, len); + } else { + struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2); + + if (copy_skb == NULL) { + drops++; + goto drop_it; + } + + skb_reserve(copy_skb, 2); + skb_put(copy_skb, len); + pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + skb_copy_from_linear_data(skb, copy_skb->data, len); + pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + + /* We'll reuse the original ring buffer. */ + skb = copy_skb; + } + + csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff); + skb->csum = csum_unfold(csum); + skb->ip_summed = CHECKSUM_COMPLETE; + skb->protocol = eth_type_trans(skb, gp->dev); + + napi_gro_receive(&gp->napi, skb); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + + next: + entry = NEXT_RX(entry); + } + + gem_post_rxds(gp, entry); + + gp->rx_new = entry; + + if (drops) + netdev_info(gp->dev, "Memory squeeze, deferring packet\n"); + + return work_done; +} + +static int gem_poll(struct napi_struct *napi, int budget) +{ + struct gem *gp = container_of(napi, struct gem, napi); + struct net_device *dev = gp->dev; + int work_done; + + work_done = 0; + do { + /* Handle anomalies */ + if (unlikely(gp->status & GREG_STAT_ABNORMAL)) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); + int reset; + + /* We run the abnormal interrupt handling code with + * the Tx lock. It only resets the Rx portion of the + * chip, but we need to guard it against DMA being + * restarted by the link poll timer + */ + __netif_tx_lock(txq, smp_processor_id()); + reset = gem_abnormal_irq(dev, gp, gp->status); + __netif_tx_unlock(txq); + if (reset) { + gem_schedule_reset(gp); + napi_complete(napi); + return work_done; + } + } + + /* Run TX completion thread */ + gem_tx(dev, gp, gp->status); + + /* Run RX thread. We don't use any locking here, + * code willing to do bad things - like cleaning the + * rx ring - must call napi_disable(), which + * schedule_timeout()'s if polling is already disabled. + */ + work_done += gem_rx(gp, budget - work_done); + + if (work_done >= budget) + return work_done; + + gp->status = readl(gp->regs + GREG_STAT); + } while (gp->status & GREG_STAT_NAPI); + + napi_complete(napi); + gem_enable_ints(gp); + + return work_done; +} + +static irqreturn_t gem_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct gem *gp = netdev_priv(dev); + + if (napi_schedule_prep(&gp->napi)) { + u32 gem_status = readl(gp->regs + GREG_STAT); + + if (unlikely(gem_status == 0)) { + napi_enable(&gp->napi); + return IRQ_NONE; + } + if (netif_msg_intr(gp)) + printk(KERN_DEBUG "%s: gem_interrupt() gem_status: 0x%x\n", + gp->dev->name, gem_status); + + gp->status = gem_status; + gem_disable_ints(gp); + __napi_schedule(&gp->napi); + } + + /* If polling was disabled at the time we received that + * interrupt, we may return IRQ_HANDLED here while we + * should return IRQ_NONE. No big deal... + */ + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void gem_poll_controller(struct net_device *dev) +{ + struct gem *gp = netdev_priv(dev); + + disable_irq(gp->pdev->irq); + gem_interrupt(gp->pdev->irq, dev); + enable_irq(gp->pdev->irq); +} +#endif + +static void gem_tx_timeout(struct net_device *dev) +{ + struct gem *gp = netdev_priv(dev); + + netdev_err(dev, "transmit timed out, resetting\n"); + + netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n", + readl(gp->regs + TXDMA_CFG), + readl(gp->regs + MAC_TXSTAT), + readl(gp->regs + MAC_TXCFG)); + netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n", + readl(gp->regs + RXDMA_CFG), + readl(gp->regs + MAC_RXSTAT), + readl(gp->regs + MAC_RXCFG)); + + gem_schedule_reset(gp); +} + +static __inline__ int gem_intme(int entry) +{ + /* Algorithm: IRQ every 1/2 of descriptors. */ + if (!(entry & ((TX_RING_SIZE>>1)-1))) + return 1; + + return 0; +} + +static netdev_tx_t gem_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct gem *gp = netdev_priv(dev); + int entry; + u64 ctrl; + + ctrl = 0; + if (skb->ip_summed == CHECKSUM_PARTIAL) { + const u64 csum_start_off = skb_checksum_start_offset(skb); + const u64 csum_stuff_off = csum_start_off + skb->csum_offset; + + ctrl = (TXDCTRL_CENAB | + (csum_start_off << 15) | + (csum_stuff_off << 21)); + } + + if (unlikely(TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1))) { + /* This is a hard error, log it. */ + if (!netif_queue_stopped(dev)) { + netif_stop_queue(dev); + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + } + return NETDEV_TX_BUSY; + } + + entry = gp->tx_new; + gp->tx_skbs[entry] = skb; + + if (skb_shinfo(skb)->nr_frags == 0) { + struct gem_txd *txd = &gp->init_block->txd[entry]; + dma_addr_t mapping; + u32 len; + + len = skb->len; + mapping = pci_map_page(gp->pdev, + virt_to_page(skb->data), + offset_in_page(skb->data), + len, PCI_DMA_TODEVICE); + ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len; + if (gem_intme(entry)) + ctrl |= TXDCTRL_INTME; + txd->buffer = cpu_to_le64(mapping); + dma_wmb(); + txd->control_word = cpu_to_le64(ctrl); + entry = NEXT_TX(entry); + } else { + struct gem_txd *txd; + u32 first_len; + u64 intme; + dma_addr_t first_mapping; + int frag, first_entry = entry; + + intme = 0; + if (gem_intme(entry)) + intme |= TXDCTRL_INTME; + + /* We must give this initial chunk to the device last. + * Otherwise we could race with the device. + */ + first_len = skb_headlen(skb); + first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data), + offset_in_page(skb->data), + first_len, PCI_DMA_TODEVICE); + entry = NEXT_TX(entry); + + for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { + const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; + u32 len; + dma_addr_t mapping; + u64 this_ctrl; + + len = skb_frag_size(this_frag); + mapping = skb_frag_dma_map(&gp->pdev->dev, this_frag, + 0, len, DMA_TO_DEVICE); + this_ctrl = ctrl; + if (frag == skb_shinfo(skb)->nr_frags - 1) + this_ctrl |= TXDCTRL_EOF; + + txd = &gp->init_block->txd[entry]; + txd->buffer = cpu_to_le64(mapping); + dma_wmb(); + txd->control_word = cpu_to_le64(this_ctrl | len); + + if (gem_intme(entry)) + intme |= TXDCTRL_INTME; + + entry = NEXT_TX(entry); + } + txd = &gp->init_block->txd[first_entry]; + txd->buffer = cpu_to_le64(first_mapping); + dma_wmb(); + txd->control_word = + cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len); + } + + gp->tx_new = entry; + if (unlikely(TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))) { + netif_stop_queue(dev); + + /* netif_stop_queue() must be done before checking + * checking tx index in TX_BUFFS_AVAIL() below, because + * in gem_tx(), we update tx_old before checking for + * netif_queue_stopped(). + */ + smp_mb(); + if (TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) + netif_wake_queue(dev); + } + if (netif_msg_tx_queued(gp)) + printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", + dev->name, entry, skb->len); + mb(); + writel(gp->tx_new, gp->regs + TXDMA_KICK); + + return NETDEV_TX_OK; +} + +static void gem_pcs_reset(struct gem *gp) +{ + int limit; + u32 val; + + /* Reset PCS unit. */ + val = readl(gp->regs + PCS_MIICTRL); + val |= PCS_MIICTRL_RST; + writel(val, gp->regs + PCS_MIICTRL); + + limit = 32; + while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) { + udelay(100); + if (limit-- <= 0) + break; + } + if (limit < 0) + netdev_warn(gp->dev, "PCS reset bit would not clear\n"); +} + +static void gem_pcs_reinit_adv(struct gem *gp) +{ + u32 val; + + /* Make sure PCS is disabled while changing advertisement + * configuration. + */ + val = readl(gp->regs + PCS_CFG); + val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO); + writel(val, gp->regs + PCS_CFG); + + /* Advertise all capabilities except asymmetric + * pause. + */ + val = readl(gp->regs + PCS_MIIADV); + val |= (PCS_MIIADV_FD | PCS_MIIADV_HD | + PCS_MIIADV_SP | PCS_MIIADV_AP); + writel(val, gp->regs + PCS_MIIADV); + + /* Enable and restart auto-negotiation, disable wrapback/loopback, + * and re-enable PCS. + */ + val = readl(gp->regs + PCS_MIICTRL); + val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE); + val &= ~PCS_MIICTRL_WB; + writel(val, gp->regs + PCS_MIICTRL); + + val = readl(gp->regs + PCS_CFG); + val |= PCS_CFG_ENABLE; + writel(val, gp->regs + PCS_CFG); + + /* Make sure serialink loopback is off. The meaning + * of this bit is logically inverted based upon whether + * you are in Serialink or SERDES mode. + */ + val = readl(gp->regs + PCS_SCTRL); + if (gp->phy_type == phy_serialink) + val &= ~PCS_SCTRL_LOOP; + else + val |= PCS_SCTRL_LOOP; + writel(val, gp->regs + PCS_SCTRL); +} + +#define STOP_TRIES 32 + +static void gem_reset(struct gem *gp) +{ + int limit; + u32 val; + + /* Make sure we won't get any more interrupts */ + writel(0xffffffff, gp->regs + GREG_IMASK); + + /* Reset the chip */ + writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST, + gp->regs + GREG_SWRST); + + limit = STOP_TRIES; + + do { + udelay(20); + val = readl(gp->regs + GREG_SWRST); + if (limit-- <= 0) + break; + } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)); + + if (limit < 0) + netdev_err(gp->dev, "SW reset is ghetto\n"); + + if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) + gem_pcs_reinit_adv(gp); +} + +static void gem_start_dma(struct gem *gp) +{ + u32 val; + + /* We are ready to rock, turn everything on. */ + val = readl(gp->regs + TXDMA_CFG); + writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); + val = readl(gp->regs + RXDMA_CFG); + writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); + val = readl(gp->regs + MAC_TXCFG); + writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); + val = readl(gp->regs + MAC_RXCFG); + writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); + + (void) readl(gp->regs + MAC_RXCFG); + udelay(100); + + gem_enable_ints(gp); + + writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); +} + +/* DMA won't be actually stopped before about 4ms tho ... + */ +static void gem_stop_dma(struct gem *gp) +{ + u32 val; + + /* We are done rocking, turn everything off. */ + val = readl(gp->regs + TXDMA_CFG); + writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); + val = readl(gp->regs + RXDMA_CFG); + writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); + val = readl(gp->regs + MAC_TXCFG); + writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); + val = readl(gp->regs + MAC_RXCFG); + writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); + + (void) readl(gp->regs + MAC_RXCFG); + + /* Need to wait a bit ... done by the caller */ +} + + +// XXX dbl check what that function should do when called on PCS PHY +static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) +{ + u32 advertise, features; + int autoneg; + int speed; + int duplex; + + if (gp->phy_type != phy_mii_mdio0 && + gp->phy_type != phy_mii_mdio1) + goto non_mii; + + /* Setup advertise */ + if (found_mii_phy(gp)) + features = gp->phy_mii.def->features; + else + features = 0; + + advertise = features & ADVERTISE_MASK; + if (gp->phy_mii.advertising != 0) + advertise &= gp->phy_mii.advertising; + + autoneg = gp->want_autoneg; + speed = gp->phy_mii.speed; + duplex = gp->phy_mii.duplex; + + /* Setup link parameters */ + if (!ep) + goto start_aneg; + if (ep->autoneg == AUTONEG_ENABLE) { + advertise = ep->advertising; + autoneg = 1; + } else { + autoneg = 0; + speed = ethtool_cmd_speed(ep); + duplex = ep->duplex; + } + +start_aneg: + /* Sanitize settings based on PHY capabilities */ + if ((features & SUPPORTED_Autoneg) == 0) + autoneg = 0; + if (speed == SPEED_1000 && + !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full))) + speed = SPEED_100; + if (speed == SPEED_100 && + !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full))) + speed = SPEED_10; + if (duplex == DUPLEX_FULL && + !(features & (SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full | + SUPPORTED_10baseT_Full))) + duplex = DUPLEX_HALF; + if (speed == 0) + speed = SPEED_10; + + /* If we are asleep, we don't try to actually setup the PHY, we + * just store the settings + */ + if (!netif_device_present(gp->dev)) { + gp->phy_mii.autoneg = gp->want_autoneg = autoneg; + gp->phy_mii.speed = speed; + gp->phy_mii.duplex = duplex; + return; + } + + /* Configure PHY & start aneg */ + gp->want_autoneg = autoneg; + if (autoneg) { + if (found_mii_phy(gp)) + gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise); + gp->lstate = link_aneg; + } else { + if (found_mii_phy(gp)) + gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex); + gp->lstate = link_force_ok; + } + +non_mii: + gp->timer_ticks = 0; + mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); +} + +/* A link-up condition has occurred, initialize and enable the + * rest of the chip. + */ +static int gem_set_link_modes(struct gem *gp) +{ + struct netdev_queue *txq = netdev_get_tx_queue(gp->dev, 0); + int full_duplex, speed, pause; + u32 val; + + full_duplex = 0; + speed = SPEED_10; + pause = 0; + + if (found_mii_phy(gp)) { + if (gp->phy_mii.def->ops->read_link(&gp->phy_mii)) + return 1; + full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL); + speed = gp->phy_mii.speed; + pause = gp->phy_mii.pause; + } else if (gp->phy_type == phy_serialink || + gp->phy_type == phy_serdes) { + u32 pcs_lpa = readl(gp->regs + PCS_MIILP); + + if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes) + full_duplex = 1; + speed = SPEED_1000; + } + + netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n", + speed, (full_duplex ? "full" : "half")); + + + /* We take the tx queue lock to avoid collisions between + * this code, the tx path and the NAPI-driven error path + */ + __netif_tx_lock(txq, smp_processor_id()); + + val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU); + if (full_duplex) { + val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL); + } else { + /* MAC_TXCFG_NBO must be zero. */ + } + writel(val, gp->regs + MAC_TXCFG); + + val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED); + if (!full_duplex && + (gp->phy_type == phy_mii_mdio0 || + gp->phy_type == phy_mii_mdio1)) { + val |= MAC_XIFCFG_DISE; + } else if (full_duplex) { + val |= MAC_XIFCFG_FLED; + } + + if (speed == SPEED_1000) + val |= (MAC_XIFCFG_GMII); + + writel(val, gp->regs + MAC_XIFCFG); + + /* If gigabit and half-duplex, enable carrier extension + * mode. Else, disable it. + */ + if (speed == SPEED_1000 && !full_duplex) { + val = readl(gp->regs + MAC_TXCFG); + writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); + + val = readl(gp->regs + MAC_RXCFG); + writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); + } else { + val = readl(gp->regs + MAC_TXCFG); + writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); + + val = readl(gp->regs + MAC_RXCFG); + writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); + } + + if (gp->phy_type == phy_serialink || + gp->phy_type == phy_serdes) { + u32 pcs_lpa = readl(gp->regs + PCS_MIILP); + + if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP)) + pause = 1; + } + + if (!full_duplex) + writel(512, gp->regs + MAC_STIME); + else + writel(64, gp->regs + MAC_STIME); + val = readl(gp->regs + MAC_MCCFG); + if (pause) + val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE); + else + val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE); + writel(val, gp->regs + MAC_MCCFG); + + gem_start_dma(gp); + + __netif_tx_unlock(txq); + + if (netif_msg_link(gp)) { + if (pause) { + netdev_info(gp->dev, + "Pause is enabled (rxfifo: %d off: %d on: %d)\n", + gp->rx_fifo_sz, + gp->rx_pause_off, + gp->rx_pause_on); + } else { + netdev_info(gp->dev, "Pause is disabled\n"); + } + } + + return 0; +} + +static int gem_mdio_link_not_up(struct gem *gp) +{ + switch (gp->lstate) { + case link_force_ret: + netif_info(gp, link, gp->dev, + "Autoneg failed again, keeping forced mode\n"); + gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, + gp->last_forced_speed, DUPLEX_HALF); + gp->timer_ticks = 5; + gp->lstate = link_force_ok; + return 0; + case link_aneg: + /* We try forced modes after a failed aneg only on PHYs that don't + * have "magic_aneg" bit set, which means they internally do the + * while forced-mode thingy. On these, we just restart aneg + */ + if (gp->phy_mii.def->magic_aneg) + return 1; + netif_info(gp, link, gp->dev, "switching to forced 100bt\n"); + /* Try forced modes. */ + gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100, + DUPLEX_HALF); + gp->timer_ticks = 5; + gp->lstate = link_force_try; + return 0; + case link_force_try: + /* Downgrade from 100 to 10 Mbps if necessary. + * If already at 10Mbps, warn user about the + * situation every 10 ticks. + */ + if (gp->phy_mii.speed == SPEED_100) { + gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10, + DUPLEX_HALF); + gp->timer_ticks = 5; + netif_info(gp, link, gp->dev, + "switching to forced 10bt\n"); + return 0; + } else + return 1; + default: + return 0; + } +} + +static void gem_link_timer(unsigned long data) +{ + struct gem *gp = (struct gem *) data; + struct net_device *dev = gp->dev; + int restart_aneg = 0; + + /* There's no point doing anything if we're going to be reset */ + if (gp->reset_task_pending) + return; + + if (gp->phy_type == phy_serialink || + gp->phy_type == phy_serdes) { + u32 val = readl(gp->regs + PCS_MIISTAT); + + if (!(val & PCS_MIISTAT_LS)) + val = readl(gp->regs + PCS_MIISTAT); + + if ((val & PCS_MIISTAT_LS) != 0) { + if (gp->lstate == link_up) + goto restart; + + gp->lstate = link_up; + netif_carrier_on(dev); + (void)gem_set_link_modes(gp); + } + goto restart; + } + if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) { + /* Ok, here we got a link. If we had it due to a forced + * fallback, and we were configured for autoneg, we do + * retry a short autoneg pass. If you know your hub is + * broken, use ethtool ;) + */ + if (gp->lstate == link_force_try && gp->want_autoneg) { + gp->lstate = link_force_ret; + gp->last_forced_speed = gp->phy_mii.speed; + gp->timer_ticks = 5; + if (netif_msg_link(gp)) + netdev_info(dev, + "Got link after fallback, retrying autoneg once...\n"); + gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); + } else if (gp->lstate != link_up) { + gp->lstate = link_up; + netif_carrier_on(dev); + if (gem_set_link_modes(gp)) + restart_aneg = 1; + } + } else { + /* If the link was previously up, we restart the + * whole process + */ + if (gp->lstate == link_up) { + gp->lstate = link_down; + netif_info(gp, link, dev, "Link down\n"); + netif_carrier_off(dev); + gem_schedule_reset(gp); + /* The reset task will restart the timer */ + return; + } else if (++gp->timer_ticks > 10) { + if (found_mii_phy(gp)) + restart_aneg = gem_mdio_link_not_up(gp); + else + restart_aneg = 1; + } + } + if (restart_aneg) { + gem_begin_auto_negotiation(gp, NULL); + return; + } +restart: + mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); +} + +static void gem_clean_rings(struct gem *gp) +{ + struct gem_init_block *gb = gp->init_block; + struct sk_buff *skb; + int i; + dma_addr_t dma_addr; + + for (i = 0; i < RX_RING_SIZE; i++) { + struct gem_rxd *rxd; + + rxd = &gb->rxd[i]; + if (gp->rx_skbs[i] != NULL) { + skb = gp->rx_skbs[i]; + dma_addr = le64_to_cpu(rxd->buffer); + pci_unmap_page(gp->pdev, dma_addr, + RX_BUF_ALLOC_SIZE(gp), + PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(skb); + gp->rx_skbs[i] = NULL; + } + rxd->status_word = 0; + dma_wmb(); + rxd->buffer = 0; + } + + for (i = 0; i < TX_RING_SIZE; i++) { + if (gp->tx_skbs[i] != NULL) { + struct gem_txd *txd; + int frag; + + skb = gp->tx_skbs[i]; + gp->tx_skbs[i] = NULL; + + for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { + int ent = i & (TX_RING_SIZE - 1); + + txd = &gb->txd[ent]; + dma_addr = le64_to_cpu(txd->buffer); + pci_unmap_page(gp->pdev, dma_addr, + le64_to_cpu(txd->control_word) & + TXDCTRL_BUFSZ, PCI_DMA_TODEVICE); + + if (frag != skb_shinfo(skb)->nr_frags) + i++; + } + dev_kfree_skb_any(skb); + } + } +} + +static void gem_init_rings(struct gem *gp) +{ + struct gem_init_block *gb = gp->init_block; + struct net_device *dev = gp->dev; + int i; + dma_addr_t dma_addr; + + gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0; + + gem_clean_rings(gp); + + gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN, + (unsigned)VLAN_ETH_FRAME_LEN); + + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb; + struct gem_rxd *rxd = &gb->rxd[i]; + + skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL); + if (!skb) { + rxd->buffer = 0; + rxd->status_word = 0; + continue; + } + + gp->rx_skbs[i] = skb; + skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); + dma_addr = pci_map_page(gp->pdev, + virt_to_page(skb->data), + offset_in_page(skb->data), + RX_BUF_ALLOC_SIZE(gp), + PCI_DMA_FROMDEVICE); + rxd->buffer = cpu_to_le64(dma_addr); + dma_wmb(); + rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); + skb_reserve(skb, RX_OFFSET); + } + + for (i = 0; i < TX_RING_SIZE; i++) { + struct gem_txd *txd = &gb->txd[i]; + + txd->control_word = 0; + dma_wmb(); + txd->buffer = 0; + } + wmb(); +} + +/* Init PHY interface and start link poll state machine */ +static void gem_init_phy(struct gem *gp) +{ + u32 mifcfg; + + /* Revert MIF CFG setting done on stop_phy */ + mifcfg = readl(gp->regs + MIF_CFG); + mifcfg &= ~MIF_CFG_BBMODE; + writel(mifcfg, gp->regs + MIF_CFG); + + if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) { + int i; + + /* Those delay sucks, the HW seem to love them though, I'll + * serisouly consider breaking some locks here to be able + * to schedule instead + */ + for (i = 0; i < 3; i++) { +#ifdef CONFIG_PPC_PMAC + pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0); + msleep(20); +#endif + /* Some PHYs used by apple have problem getting back to us, + * we do an additional reset here + */ + sungem_phy_write(gp, MII_BMCR, BMCR_RESET); + msleep(20); + if (sungem_phy_read(gp, MII_BMCR) != 0xffff) + break; + if (i == 2) + netdev_warn(gp->dev, "GMAC PHY not responding !\n"); + } + } + + if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && + gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { + u32 val; + + /* Init datapath mode register. */ + if (gp->phy_type == phy_mii_mdio0 || + gp->phy_type == phy_mii_mdio1) { + val = PCS_DMODE_MGM; + } else if (gp->phy_type == phy_serialink) { + val = PCS_DMODE_SM | PCS_DMODE_GMOE; + } else { + val = PCS_DMODE_ESM; + } + + writel(val, gp->regs + PCS_DMODE); + } + + if (gp->phy_type == phy_mii_mdio0 || + gp->phy_type == phy_mii_mdio1) { + /* Reset and detect MII PHY */ + sungem_phy_probe(&gp->phy_mii, gp->mii_phy_addr); + + /* Init PHY */ + if (gp->phy_mii.def && gp->phy_mii.def->ops->init) + gp->phy_mii.def->ops->init(&gp->phy_mii); + } else { + gem_pcs_reset(gp); + gem_pcs_reinit_adv(gp); + } + + /* Default aneg parameters */ + gp->timer_ticks = 0; + gp->lstate = link_down; + netif_carrier_off(gp->dev); + + /* Print things out */ + if (gp->phy_type == phy_mii_mdio0 || + gp->phy_type == phy_mii_mdio1) + netdev_info(gp->dev, "Found %s PHY\n", + gp->phy_mii.def ? gp->phy_mii.def->name : "no"); + + gem_begin_auto_negotiation(gp, NULL); +} + +static void gem_init_dma(struct gem *gp) +{ + u64 desc_dma = (u64) gp->gblock_dvma; + u32 val; + + val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE); + writel(val, gp->regs + TXDMA_CFG); + + writel(desc_dma >> 32, gp->regs + TXDMA_DBHI); + writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW); + desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); + + writel(0, gp->regs + TXDMA_KICK); + + val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | + ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); + writel(val, gp->regs + RXDMA_CFG); + + writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); + writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); + + writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); + + val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); + val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); + writel(val, gp->regs + RXDMA_PTHRESH); + + if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) + writel(((5 & RXDMA_BLANK_IPKTS) | + ((8 << 12) & RXDMA_BLANK_ITIME)), + gp->regs + RXDMA_BLANK); + else + writel(((5 & RXDMA_BLANK_IPKTS) | + ((4 << 12) & RXDMA_BLANK_ITIME)), + gp->regs + RXDMA_BLANK); +} + +static u32 gem_setup_multicast(struct gem *gp) +{ + u32 rxcfg = 0; + int i; + + if ((gp->dev->flags & IFF_ALLMULTI) || + (netdev_mc_count(gp->dev) > 256)) { + for (i=0; i<16; i++) + writel(0xffff, gp->regs + MAC_HASH0 + (i << 2)); + rxcfg |= MAC_RXCFG_HFE; + } else if (gp->dev->flags & IFF_PROMISC) { + rxcfg |= MAC_RXCFG_PROM; + } else { + u16 hash_table[16]; + u32 crc; + struct netdev_hw_addr *ha; + int i; + + memset(hash_table, 0, sizeof(hash_table)); + netdev_for_each_mc_addr(ha, gp->dev) { + crc = ether_crc_le(6, ha->addr); + crc >>= 24; + hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); + } + for (i=0; i<16; i++) + writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2)); + rxcfg |= MAC_RXCFG_HFE; + } + + return rxcfg; +} + +static void gem_init_mac(struct gem *gp) +{ + unsigned char *e = &gp->dev->dev_addr[0]; + + writel(0x1bf0, gp->regs + MAC_SNDPAUSE); + + writel(0x00, gp->regs + MAC_IPG0); + writel(0x08, gp->regs + MAC_IPG1); + writel(0x04, gp->regs + MAC_IPG2); + writel(0x40, gp->regs + MAC_STIME); + writel(0x40, gp->regs + MAC_MINFSZ); + + /* Ethernet payload + header + FCS + optional VLAN tag. */ + writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ); + + writel(0x07, gp->regs + MAC_PASIZE); + writel(0x04, gp->regs + MAC_JAMSIZE); + writel(0x10, gp->regs + MAC_ATTLIM); + writel(0x8808, gp->regs + MAC_MCTYPE); + + writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED); + + writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); + writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); + writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); + + writel(0, gp->regs + MAC_ADDR3); + writel(0, gp->regs + MAC_ADDR4); + writel(0, gp->regs + MAC_ADDR5); + + writel(0x0001, gp->regs + MAC_ADDR6); + writel(0xc200, gp->regs + MAC_ADDR7); + writel(0x0180, gp->regs + MAC_ADDR8); + + writel(0, gp->regs + MAC_AFILT0); + writel(0, gp->regs + MAC_AFILT1); + writel(0, gp->regs + MAC_AFILT2); + writel(0, gp->regs + MAC_AF21MSK); + writel(0, gp->regs + MAC_AF0MSK); + + gp->mac_rx_cfg = gem_setup_multicast(gp); +#ifdef STRIP_FCS + gp->mac_rx_cfg |= MAC_RXCFG_SFCS; +#endif + writel(0, gp->regs + MAC_NCOLL); + writel(0, gp->regs + MAC_FASUCC); + writel(0, gp->regs + MAC_ECOLL); + writel(0, gp->regs + MAC_LCOLL); + writel(0, gp->regs + MAC_DTIMER); + writel(0, gp->regs + MAC_PATMPS); + writel(0, gp->regs + MAC_RFCTR); + writel(0, gp->regs + MAC_LERR); + writel(0, gp->regs + MAC_AERR); + writel(0, gp->regs + MAC_FCSERR); + writel(0, gp->regs + MAC_RXCVERR); + + /* Clear RX/TX/MAC/XIF config, we will set these up and enable + * them once a link is established. + */ + writel(0, gp->regs + MAC_TXCFG); + writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG); + writel(0, gp->regs + MAC_MCCFG); + writel(0, gp->regs + MAC_XIFCFG); + + /* Setup MAC interrupts. We want to get all of the interesting + * counter expiration events, but we do not want to hear about + * normal rx/tx as the DMA engine tells us that. + */ + writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK); + writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); + + /* Don't enable even the PAUSE interrupts for now, we + * make no use of those events other than to record them. + */ + writel(0xffffffff, gp->regs + MAC_MCMASK); + + /* Don't enable GEM's WOL in normal operations + */ + if (gp->has_wol) + writel(0, gp->regs + WOL_WAKECSR); +} + +static void gem_init_pause_thresholds(struct gem *gp) +{ + u32 cfg; + + /* Calculate pause thresholds. Setting the OFF threshold to the + * full RX fifo size effectively disables PAUSE generation which + * is what we do for 10/100 only GEMs which have FIFOs too small + * to make real gains from PAUSE. + */ + if (gp->rx_fifo_sz <= (2 * 1024)) { + gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz; + } else { + int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63; + int off = (gp->rx_fifo_sz - (max_frame * 2)); + int on = off - max_frame; + + gp->rx_pause_off = off; + gp->rx_pause_on = on; + } + + + /* Configure the chip "burst" DMA mode & enable some + * HW bug fixes on Apple version + */ + cfg = 0; + if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) + cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX; +#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) + cfg |= GREG_CFG_IBURST; +#endif + cfg |= ((31 << 1) & GREG_CFG_TXDMALIM); + cfg |= ((31 << 6) & GREG_CFG_RXDMALIM); + writel(cfg, gp->regs + GREG_CFG); + + /* If Infinite Burst didn't stick, then use different + * thresholds (and Apple bug fixes don't exist) + */ + if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) { + cfg = ((2 << 1) & GREG_CFG_TXDMALIM); + cfg |= ((8 << 6) & GREG_CFG_RXDMALIM); + writel(cfg, gp->regs + GREG_CFG); + } +} + +static int gem_check_invariants(struct gem *gp) +{ + struct pci_dev *pdev = gp->pdev; + u32 mif_cfg; + + /* On Apple's sungem, we can't rely on registers as the chip + * was been powered down by the firmware. The PHY is looked + * up later on. + */ + if (pdev->vendor == PCI_VENDOR_ID_APPLE) { + gp->phy_type = phy_mii_mdio0; + gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; + gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; + gp->swrst_base = 0; + + mif_cfg = readl(gp->regs + MIF_CFG); + mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1); + mif_cfg |= MIF_CFG_MDI0; + writel(mif_cfg, gp->regs + MIF_CFG); + writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE); + writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG); + + /* We hard-code the PHY address so we can properly bring it out of + * reset later on, we can't really probe it at this point, though + * that isn't an issue. + */ + if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC) + gp->mii_phy_addr = 1; + else + gp->mii_phy_addr = 0; + + return 0; + } + + mif_cfg = readl(gp->regs + MIF_CFG); + + if (pdev->vendor == PCI_VENDOR_ID_SUN && + pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) { + /* One of the MII PHYs _must_ be present + * as this chip has no gigabit PHY. + */ + if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) { + pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n", + mif_cfg); + return -1; + } + } + + /* Determine initial PHY interface type guess. MDIO1 is the + * external PHY and thus takes precedence over MDIO0. + */ + + if (mif_cfg & MIF_CFG_MDI1) { + gp->phy_type = phy_mii_mdio1; + mif_cfg |= MIF_CFG_PSELECT; + writel(mif_cfg, gp->regs + MIF_CFG); + } else if (mif_cfg & MIF_CFG_MDI0) { + gp->phy_type = phy_mii_mdio0; + mif_cfg &= ~MIF_CFG_PSELECT; + writel(mif_cfg, gp->regs + MIF_CFG); + } else { +#ifdef CONFIG_SPARC + const char *p; + + p = of_get_property(gp->of_node, "shared-pins", NULL); + if (p && !strcmp(p, "serdes")) + gp->phy_type = phy_serdes; + else +#endif + gp->phy_type = phy_serialink; + } + if (gp->phy_type == phy_mii_mdio1 || + gp->phy_type == phy_mii_mdio0) { + int i; + + for (i = 0; i < 32; i++) { + gp->mii_phy_addr = i; + if (sungem_phy_read(gp, MII_BMCR) != 0xffff) + break; + } + if (i == 32) { + if (pdev->device != PCI_DEVICE_ID_SUN_GEM) { + pr_err("RIO MII phy will not respond\n"); + return -1; + } + gp->phy_type = phy_serdes; + } + } + + /* Fetch the FIFO configurations now too. */ + gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; + gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; + + if (pdev->vendor == PCI_VENDOR_ID_SUN) { + if (pdev->device == PCI_DEVICE_ID_SUN_GEM) { + if (gp->tx_fifo_sz != (9 * 1024) || + gp->rx_fifo_sz != (20 * 1024)) { + pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n", + gp->tx_fifo_sz, gp->rx_fifo_sz); + return -1; + } + gp->swrst_base = 0; + } else { + if (gp->tx_fifo_sz != (2 * 1024) || + gp->rx_fifo_sz != (2 * 1024)) { + pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n", + gp->tx_fifo_sz, gp->rx_fifo_sz); + return -1; + } + gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT; + } + } + + return 0; +} + +static void gem_reinit_chip(struct gem *gp) +{ + /* Reset the chip */ + gem_reset(gp); + + /* Make sure ints are disabled */ + gem_disable_ints(gp); + + /* Allocate & setup ring buffers */ + gem_init_rings(gp); + + /* Configure pause thresholds */ + gem_init_pause_thresholds(gp); + + /* Init DMA & MAC engines */ + gem_init_dma(gp); + gem_init_mac(gp); +} + + +static void gem_stop_phy(struct gem *gp, int wol) +{ + u32 mifcfg; + + /* Let the chip settle down a bit, it seems that helps + * for sleep mode on some models + */ + msleep(10); + + /* Make sure we aren't polling PHY status change. We + * don't currently use that feature though + */ + mifcfg = readl(gp->regs + MIF_CFG); + mifcfg &= ~MIF_CFG_POLL; + writel(mifcfg, gp->regs + MIF_CFG); + + if (wol && gp->has_wol) { + unsigned char *e = &gp->dev->dev_addr[0]; + u32 csr; + + /* Setup wake-on-lan for MAGIC packet */ + writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB, + gp->regs + MAC_RXCFG); + writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0); + writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1); + writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2); + + writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT); + csr = WOL_WAKECSR_ENABLE; + if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0) + csr |= WOL_WAKECSR_MII; + writel(csr, gp->regs + WOL_WAKECSR); + } else { + writel(0, gp->regs + MAC_RXCFG); + (void)readl(gp->regs + MAC_RXCFG); + /* Machine sleep will die in strange ways if we + * dont wait a bit here, looks like the chip takes + * some time to really shut down + */ + msleep(10); + } + + writel(0, gp->regs + MAC_TXCFG); + writel(0, gp->regs + MAC_XIFCFG); + writel(0, gp->regs + TXDMA_CFG); + writel(0, gp->regs + RXDMA_CFG); + + if (!wol) { + gem_reset(gp); + writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST); + writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); + + if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend) + gp->phy_mii.def->ops->suspend(&gp->phy_mii); + + /* According to Apple, we must set the MDIO pins to this begnign + * state or we may 1) eat more current, 2) damage some PHYs + */ + writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG); + writel(0, gp->regs + MIF_BBCLK); + writel(0, gp->regs + MIF_BBDATA); + writel(0, gp->regs + MIF_BBOENAB); + writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG); + (void) readl(gp->regs + MAC_XIFCFG); + } +} + +static int gem_do_start(struct net_device *dev) +{ + struct gem *gp = netdev_priv(dev); + int rc; + + /* Enable the cell */ + gem_get_cell(gp); + + /* Make sure PCI access and bus master are enabled */ + rc = pci_enable_device(gp->pdev); + if (rc) { + netdev_err(dev, "Failed to enable chip on PCI bus !\n"); + + /* Put cell and forget it for now, it will be considered as + * still asleep, a new sleep cycle may bring it back + */ + gem_put_cell(gp); + return -ENXIO; + } + pci_set_master(gp->pdev); + + /* Init & setup chip hardware */ + gem_reinit_chip(gp); + + /* An interrupt might come in handy */ + rc = request_irq(gp->pdev->irq, gem_interrupt, + IRQF_SHARED, dev->name, (void *)dev); + if (rc) { + netdev_err(dev, "failed to request irq !\n"); + + gem_reset(gp); + gem_clean_rings(gp); + gem_put_cell(gp); + return rc; + } + + /* Mark us as attached again if we come from resume(), this has + * no effect if we weren't detached and needs to be done now. + */ + netif_device_attach(dev); + + /* Restart NAPI & queues */ + gem_netif_start(gp); + + /* Detect & init PHY, start autoneg etc... this will + * eventually result in starting DMA operations when + * the link is up + */ + gem_init_phy(gp); + + return 0; +} + +static void gem_do_stop(struct net_device *dev, int wol) +{ + struct gem *gp = netdev_priv(dev); + + /* Stop NAPI and stop tx queue */ + gem_netif_stop(gp); + + /* Make sure ints are disabled. We don't care about + * synchronizing as NAPI is disabled, thus a stray + * interrupt will do nothing bad (our irq handler + * just schedules NAPI) + */ + gem_disable_ints(gp); + + /* Stop the link timer */ + del_timer_sync(&gp->link_timer); + + /* We cannot cancel the reset task while holding the + * rtnl lock, we'd get an A->B / B->A deadlock stituation + * if we did. This is not an issue however as the reset + * task is synchronized vs. us (rtnl_lock) and will do + * nothing if the device is down or suspended. We do + * still clear reset_task_pending to avoid a spurrious + * reset later on in case we do resume before it gets + * scheduled. + */ + gp->reset_task_pending = 0; + + /* If we are going to sleep with WOL */ + gem_stop_dma(gp); + msleep(10); + if (!wol) + gem_reset(gp); + msleep(10); + + /* Get rid of rings */ + gem_clean_rings(gp); + + /* No irq needed anymore */ + free_irq(gp->pdev->irq, (void *) dev); + + /* Shut the PHY down eventually and setup WOL */ + gem_stop_phy(gp, wol); + + /* Make sure bus master is disabled */ + pci_disable_device(gp->pdev); + + /* Cell not needed neither if no WOL */ + if (!wol) + gem_put_cell(gp); +} + +static void gem_reset_task(struct work_struct *work) +{ + struct gem *gp = container_of(work, struct gem, reset_task); + + /* Lock out the network stack (essentially shield ourselves + * against a racing open, close, control call, or suspend + */ + rtnl_lock(); + + /* Skip the reset task if suspended or closed, or if it's + * been cancelled by gem_do_stop (see comment there) + */ + if (!netif_device_present(gp->dev) || + !netif_running(gp->dev) || + !gp->reset_task_pending) { + rtnl_unlock(); + return; + } + + /* Stop the link timer */ + del_timer_sync(&gp->link_timer); + + /* Stop NAPI and tx */ + gem_netif_stop(gp); + + /* Reset the chip & rings */ + gem_reinit_chip(gp); + if (gp->lstate == link_up) + gem_set_link_modes(gp); + + /* Restart NAPI and Tx */ + gem_netif_start(gp); + + /* We are back ! */ + gp->reset_task_pending = 0; + + /* If the link is not up, restart autoneg, else restart the + * polling timer + */ + if (gp->lstate != link_up) + gem_begin_auto_negotiation(gp, NULL); + else + mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); + + rtnl_unlock(); +} + +static int gem_open(struct net_device *dev) +{ + /* We allow open while suspended, we just do nothing, + * the chip will be initialized in resume() + */ + if (netif_device_present(dev)) + return gem_do_start(dev); + return 0; +} + +static int gem_close(struct net_device *dev) +{ + if (netif_device_present(dev)) + gem_do_stop(dev, 0); + + return 0; +} + +#ifdef CONFIG_PM +static int gem_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct gem *gp = netdev_priv(dev); + + /* Lock the network stack first to avoid racing with open/close, + * reset task and setting calls + */ + rtnl_lock(); + + /* Not running, mark ourselves non-present, no need for + * a lock here + */ + if (!netif_running(dev)) { + netif_device_detach(dev); + rtnl_unlock(); + return 0; + } + netdev_info(dev, "suspending, WakeOnLan %s\n", + (gp->wake_on_lan && netif_running(dev)) ? + "enabled" : "disabled"); + + /* Tell the network stack we're gone. gem_do_stop() below will + * synchronize with TX, stop NAPI etc... + */ + netif_device_detach(dev); + + /* Switch off chip, remember WOL setting */ + gp->asleep_wol = !!gp->wake_on_lan; + gem_do_stop(dev, gp->asleep_wol); + + /* Unlock the network stack */ + rtnl_unlock(); + + return 0; +} + +static int gem_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct gem *gp = netdev_priv(dev); + + /* See locking comment in gem_suspend */ + rtnl_lock(); + + /* Not running, mark ourselves present, no need for + * a lock here + */ + if (!netif_running(dev)) { + netif_device_attach(dev); + rtnl_unlock(); + return 0; + } + + /* Restart chip. If that fails there isn't much we can do, we + * leave things stopped. + */ + gem_do_start(dev); + + /* If we had WOL enabled, the cell clock was never turned off during + * sleep, so we end up beeing unbalanced. Fix that here + */ + if (gp->asleep_wol) + gem_put_cell(gp); + + /* Unlock the network stack */ + rtnl_unlock(); + + return 0; +} +#endif /* CONFIG_PM */ + +static struct net_device_stats *gem_get_stats(struct net_device *dev) +{ + struct gem *gp = netdev_priv(dev); + + /* I have seen this being called while the PM was in progress, + * so we shield against this. Let's also not poke at registers + * while the reset task is going on. + * + * TODO: Move stats collection elsewhere (link timer ?) and + * make this a nop to avoid all those synchro issues + */ + if (!netif_device_present(dev) || !netif_running(dev)) + goto bail; + + /* Better safe than sorry... */ + if (WARN_ON(!gp->cell_enabled)) + goto bail; + + dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR); + writel(0, gp->regs + MAC_FCSERR); + + dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR); + writel(0, gp->regs + MAC_AERR); + + dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR); + writel(0, gp->regs + MAC_LERR); + + dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL); + dev->stats.collisions += + (readl(gp->regs + MAC_ECOLL) + readl(gp->regs + MAC_LCOLL)); + writel(0, gp->regs + MAC_ECOLL); + writel(0, gp->regs + MAC_LCOLL); + bail: + return &dev->stats; +} + +static int gem_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *macaddr = (struct sockaddr *) addr; + struct gem *gp = netdev_priv(dev); + unsigned char *e = &dev->dev_addr[0]; + + if (!is_valid_ether_addr(macaddr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len); + + /* We'll just catch it later when the device is up'd or resumed */ + if (!netif_running(dev) || !netif_device_present(dev)) + return 0; + + /* Better safe than sorry... */ + if (WARN_ON(!gp->cell_enabled)) + return 0; + + writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); + writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); + writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); + + return 0; +} + +static void gem_set_multicast(struct net_device *dev) +{ + struct gem *gp = netdev_priv(dev); + u32 rxcfg, rxcfg_new; + int limit = 10000; + + if (!netif_running(dev) || !netif_device_present(dev)) + return; + + /* Better safe than sorry... */ + if (gp->reset_task_pending || WARN_ON(!gp->cell_enabled)) + return; + + rxcfg = readl(gp->regs + MAC_RXCFG); + rxcfg_new = gem_setup_multicast(gp); +#ifdef STRIP_FCS + rxcfg_new |= MAC_RXCFG_SFCS; +#endif + gp->mac_rx_cfg = rxcfg_new; + + writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); + while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) { + if (!limit--) + break; + udelay(10); + } + + rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE); + rxcfg |= rxcfg_new; + + writel(rxcfg, gp->regs + MAC_RXCFG); +} + +/* Jumbo-grams don't seem to work :-( */ +#define GEM_MIN_MTU 68 +#if 1 +#define GEM_MAX_MTU 1500 +#else +#define GEM_MAX_MTU 9000 +#endif + +static int gem_change_mtu(struct net_device *dev, int new_mtu) +{ + struct gem *gp = netdev_priv(dev); + + if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU) + return -EINVAL; + + dev->mtu = new_mtu; + + /* We'll just catch it later when the device is up'd or resumed */ + if (!netif_running(dev) || !netif_device_present(dev)) + return 0; + + /* Better safe than sorry... */ + if (WARN_ON(!gp->cell_enabled)) + return 0; + + gem_netif_stop(gp); + gem_reinit_chip(gp); + if (gp->lstate == link_up) + gem_set_link_modes(gp); + gem_netif_start(gp); + + return 0; +} + +static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct gem *gp = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info)); +} + +static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct gem *gp = netdev_priv(dev); + + if (gp->phy_type == phy_mii_mdio0 || + gp->phy_type == phy_mii_mdio1) { + if (gp->phy_mii.def) + cmd->supported = gp->phy_mii.def->features; + else + cmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full); + + /* XXX hardcoded stuff for now */ + cmd->port = PORT_MII; + cmd->transceiver = XCVR_EXTERNAL; + cmd->phy_address = 0; /* XXX fixed PHYAD */ + + /* Return current PHY settings */ + cmd->autoneg = gp->want_autoneg; + ethtool_cmd_speed_set(cmd, gp->phy_mii.speed); + cmd->duplex = gp->phy_mii.duplex; + cmd->advertising = gp->phy_mii.advertising; + + /* If we started with a forced mode, we don't have a default + * advertise set, we need to return something sensible so + * userland can re-enable autoneg properly. + */ + if (cmd->advertising == 0) + cmd->advertising = cmd->supported; + } else { // XXX PCS ? + cmd->supported = + (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg); + cmd->advertising = cmd->supported; + ethtool_cmd_speed_set(cmd, 0); + cmd->duplex = cmd->port = cmd->phy_address = + cmd->transceiver = cmd->autoneg = 0; + + /* serdes means usually a Fibre connector, with most fixed */ + if (gp->phy_type == phy_serdes) { + cmd->port = PORT_FIBRE; + cmd->supported = (SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | SUPPORTED_Autoneg | + SUPPORTED_Pause | SUPPORTED_Asym_Pause); + cmd->advertising = cmd->supported; + cmd->transceiver = XCVR_INTERNAL; + if (gp->lstate == link_up) + ethtool_cmd_speed_set(cmd, SPEED_1000); + cmd->duplex = DUPLEX_FULL; + cmd->autoneg = 1; + } + } + cmd->maxtxpkt = cmd->maxrxpkt = 0; + + return 0; +} + +static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct gem *gp = netdev_priv(dev); + u32 speed = ethtool_cmd_speed(cmd); + + /* Verify the settings we care about. */ + if (cmd->autoneg != AUTONEG_ENABLE && + cmd->autoneg != AUTONEG_DISABLE) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_ENABLE && + cmd->advertising == 0) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_DISABLE && + ((speed != SPEED_1000 && + speed != SPEED_100 && + speed != SPEED_10) || + (cmd->duplex != DUPLEX_HALF && + cmd->duplex != DUPLEX_FULL))) + return -EINVAL; + + /* Apply settings and restart link process. */ + if (netif_device_present(gp->dev)) { + del_timer_sync(&gp->link_timer); + gem_begin_auto_negotiation(gp, cmd); + } + + return 0; +} + +static int gem_nway_reset(struct net_device *dev) +{ + struct gem *gp = netdev_priv(dev); + + if (!gp->want_autoneg) + return -EINVAL; + + /* Restart link process */ + if (netif_device_present(gp->dev)) { + del_timer_sync(&gp->link_timer); + gem_begin_auto_negotiation(gp, NULL); + } + + return 0; +} + +static u32 gem_get_msglevel(struct net_device *dev) +{ + struct gem *gp = netdev_priv(dev); + return gp->msg_enable; +} + +static void gem_set_msglevel(struct net_device *dev, u32 value) +{ + struct gem *gp = netdev_priv(dev); + gp->msg_enable = value; +} + + +/* Add more when I understand how to program the chip */ +/* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */ + +#define WOL_SUPPORTED_MASK (WAKE_MAGIC) + +static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct gem *gp = netdev_priv(dev); + + /* Add more when I understand how to program the chip */ + if (gp->has_wol) { + wol->supported = WOL_SUPPORTED_MASK; + wol->wolopts = gp->wake_on_lan; + } else { + wol->supported = 0; + wol->wolopts = 0; + } +} + +static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct gem *gp = netdev_priv(dev); + + if (!gp->has_wol) + return -EOPNOTSUPP; + gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK; + return 0; +} + +static const struct ethtool_ops gem_ethtool_ops = { + .get_drvinfo = gem_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_settings = gem_get_settings, + .set_settings = gem_set_settings, + .nway_reset = gem_nway_reset, + .get_msglevel = gem_get_msglevel, + .set_msglevel = gem_set_msglevel, + .get_wol = gem_get_wol, + .set_wol = gem_set_wol, +}; + +static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct gem *gp = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(ifr); + int rc = -EOPNOTSUPP; + + /* For SIOCGMIIREG and SIOCSMIIREG the core checks for us that + * netif_device_present() is true and holds rtnl_lock for us + * so we have nothing to worry about + */ + + switch (cmd) { + case SIOCGMIIPHY: /* Get address of MII PHY in use. */ + data->phy_id = gp->mii_phy_addr; + /* Fallthrough... */ + + case SIOCGMIIREG: /* Read MII PHY register. */ + data->val_out = __sungem_phy_read(gp, data->phy_id & 0x1f, + data->reg_num & 0x1f); + rc = 0; + break; + + case SIOCSMIIREG: /* Write MII PHY register. */ + __sungem_phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, + data->val_in); + rc = 0; + break; + } + return rc; +} + +#if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC)) +/* Fetch MAC address from vital product data of PCI ROM. */ +static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) +{ + int this_offset; + + for (this_offset = 0x20; this_offset < len; this_offset++) { + void __iomem *p = rom_base + this_offset; + int i; + + if (readb(p + 0) != 0x90 || + readb(p + 1) != 0x00 || + readb(p + 2) != 0x09 || + readb(p + 3) != 0x4e || + readb(p + 4) != 0x41 || + readb(p + 5) != 0x06) + continue; + + this_offset += 6; + p += 6; + + for (i = 0; i < 6; i++) + dev_addr[i] = readb(p + i); + return 1; + } + return 0; +} + +static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr) +{ + size_t size; + void __iomem *p = pci_map_rom(pdev, &size); + + if (p) { + int found; + + found = readb(p) == 0x55 && + readb(p + 1) == 0xaa && + find_eth_addr_in_vpd(p, (64 * 1024), dev_addr); + pci_unmap_rom(pdev, p); + if (found) + return; + } + + /* Sun MAC prefix then 3 random bytes. */ + dev_addr[0] = 0x08; + dev_addr[1] = 0x00; + dev_addr[2] = 0x20; + get_random_bytes(dev_addr + 3, 3); +} +#endif /* not Sparc and not PPC */ + +static int gem_get_device_address(struct gem *gp) +{ +#if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC) + struct net_device *dev = gp->dev; + const unsigned char *addr; + + addr = of_get_property(gp->of_node, "local-mac-address", NULL); + if (addr == NULL) { +#ifdef CONFIG_SPARC + addr = idprom->id_ethaddr; +#else + printk("\n"); + pr_err("%s: can't get mac-address\n", dev->name); + return -1; +#endif + } + memcpy(dev->dev_addr, addr, ETH_ALEN); +#else + get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr); +#endif + return 0; +} + +static void gem_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (dev) { + struct gem *gp = netdev_priv(dev); + + unregister_netdev(dev); + + /* Ensure reset task is truly gone */ + cancel_work_sync(&gp->reset_task); + + /* Free resources */ + pci_free_consistent(pdev, + sizeof(struct gem_init_block), + gp->init_block, + gp->gblock_dvma); + iounmap(gp->regs); + pci_release_regions(pdev); + free_netdev(dev); + } +} + +static const struct net_device_ops gem_netdev_ops = { + .ndo_open = gem_open, + .ndo_stop = gem_close, + .ndo_start_xmit = gem_start_xmit, + .ndo_get_stats = gem_get_stats, + .ndo_set_rx_mode = gem_set_multicast, + .ndo_do_ioctl = gem_ioctl, + .ndo_tx_timeout = gem_tx_timeout, + .ndo_change_mtu = gem_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = gem_set_mac_address, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = gem_poll_controller, +#endif +}; + +static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + unsigned long gemreg_base, gemreg_len; + struct net_device *dev; + struct gem *gp; + int err, pci_using_dac; + + printk_once(KERN_INFO "%s", version); + + /* Apple gmac note: during probe, the chip is powered up by + * the arch code to allow the code below to work (and to let + * the chip be probed on the config space. It won't stay powered + * up until the interface is brought up however, so we can't rely + * on register configuration done at this point. + */ + err = pci_enable_device(pdev); + if (err) { + pr_err("Cannot enable MMIO operation, aborting\n"); + return err; + } + pci_set_master(pdev); + + /* Configure DMA attributes. */ + + /* All of the GEM documentation states that 64-bit DMA addressing + * is fully supported and should work just fine. However the + * front end for RIO based GEMs is different and only supports + * 32-bit addressing. + * + * For now we assume the various PPC GEMs are 32-bit only as well. + */ + if (pdev->vendor == PCI_VENDOR_ID_SUN && + pdev->device == PCI_DEVICE_ID_SUN_GEM && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + pr_err("No usable DMA configuration, aborting\n"); + goto err_disable_device; + } + pci_using_dac = 0; + } + + gemreg_base = pci_resource_start(pdev, 0); + gemreg_len = pci_resource_len(pdev, 0); + + if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { + pr_err("Cannot find proper PCI device base address, aborting\n"); + err = -ENODEV; + goto err_disable_device; + } + + dev = alloc_etherdev(sizeof(*gp)); + if (!dev) { + err = -ENOMEM; + goto err_disable_device; + } + SET_NETDEV_DEV(dev, &pdev->dev); + + gp = netdev_priv(dev); + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + pr_err("Cannot obtain PCI resources, aborting\n"); + goto err_out_free_netdev; + } + + gp->pdev = pdev; + gp->dev = dev; + + gp->msg_enable = DEFAULT_MSG; + + init_timer(&gp->link_timer); + gp->link_timer.function = gem_link_timer; + gp->link_timer.data = (unsigned long) gp; + + INIT_WORK(&gp->reset_task, gem_reset_task); + + gp->lstate = link_down; + gp->timer_ticks = 0; + netif_carrier_off(dev); + + gp->regs = ioremap(gemreg_base, gemreg_len); + if (!gp->regs) { + pr_err("Cannot map device registers, aborting\n"); + err = -EIO; + goto err_out_free_res; + } + + /* On Apple, we want a reference to the Open Firmware device-tree + * node. We use it for clock control. + */ +#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC) + gp->of_node = pci_device_to_OF_node(pdev); +#endif + + /* Only Apple version supports WOL afaik */ + if (pdev->vendor == PCI_VENDOR_ID_APPLE) + gp->has_wol = 1; + + /* Make sure cell is enabled */ + gem_get_cell(gp); + + /* Make sure everything is stopped and in init state */ + gem_reset(gp); + + /* Fill up the mii_phy structure (even if we won't use it) */ + gp->phy_mii.dev = dev; + gp->phy_mii.mdio_read = _sungem_phy_read; + gp->phy_mii.mdio_write = _sungem_phy_write; +#ifdef CONFIG_PPC_PMAC + gp->phy_mii.platform_data = gp->of_node; +#endif + /* By default, we start with autoneg */ + gp->want_autoneg = 1; + + /* Check fifo sizes, PHY type, etc... */ + if (gem_check_invariants(gp)) { + err = -ENODEV; + goto err_out_iounmap; + } + + /* It is guaranteed that the returned buffer will be at least + * PAGE_SIZE aligned. + */ + gp->init_block = (struct gem_init_block *) + pci_alloc_consistent(pdev, sizeof(struct gem_init_block), + &gp->gblock_dvma); + if (!gp->init_block) { + pr_err("Cannot allocate init block, aborting\n"); + err = -ENOMEM; + goto err_out_iounmap; + } + + err = gem_get_device_address(gp); + if (err) + goto err_out_free_consistent; + + dev->netdev_ops = &gem_netdev_ops; + netif_napi_add(dev, &gp->napi, gem_poll, 64); + dev->ethtool_ops = &gem_ethtool_ops; + dev->watchdog_timeo = 5 * HZ; + dev->dma = 0; + + /* Set that now, in case PM kicks in now */ + pci_set_drvdata(pdev, dev); + + /* We can do scatter/gather and HW checksum */ + dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; + dev->features |= dev->hw_features | NETIF_F_RXCSUM; + if (pci_using_dac) + dev->features |= NETIF_F_HIGHDMA; + + /* Register with kernel */ + if (register_netdev(dev)) { + pr_err("Cannot register net device, aborting\n"); + err = -ENOMEM; + goto err_out_free_consistent; + } + + /* Undo the get_cell with appropriate locking (we could use + * ndo_init/uninit but that would be even more clumsy imho) + */ + rtnl_lock(); + gem_put_cell(gp); + rtnl_unlock(); + + netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n", + dev->dev_addr); + return 0; + +err_out_free_consistent: + gem_remove_one(pdev); +err_out_iounmap: + gem_put_cell(gp); + iounmap(gp->regs); + +err_out_free_res: + pci_release_regions(pdev); + +err_out_free_netdev: + free_netdev(dev); +err_disable_device: + pci_disable_device(pdev); + return err; + +} + + +static struct pci_driver gem_driver = { + .name = GEM_MODULE_NAME, + .id_table = gem_pci_tbl, + .probe = gem_init_one, + .remove = gem_remove_one, +#ifdef CONFIG_PM + .suspend = gem_suspend, + .resume = gem_resume, +#endif /* CONFIG_PM */ +}; + +module_pci_driver(gem_driver); diff --git a/drivers/net/ethernet/sun/sungem.h b/drivers/net/ethernet/sun/sungem.h new file mode 100644 index 000000000..835ce1b3c --- /dev/null +++ b/drivers/net/ethernet/sun/sungem.h @@ -0,0 +1,1027 @@ +/* $Id: sungem.h,v 1.10.2.4 2002/03/11 08:54:48 davem Exp $ + * sungem.h: Definitions for Sun GEM ethernet driver. + * + * Copyright (C) 2000 David S. Miller (davem@redhat.com) + */ + +#ifndef _SUNGEM_H +#define _SUNGEM_H + +/* Global Registers */ +#define GREG_SEBSTATE 0x0000UL /* SEB State Register */ +#define GREG_CFG 0x0004UL /* Configuration Register */ +#define GREG_STAT 0x000CUL /* Status Register */ +#define GREG_IMASK 0x0010UL /* Interrupt Mask Register */ +#define GREG_IACK 0x0014UL /* Interrupt ACK Register */ +#define GREG_STAT2 0x001CUL /* Alias of GREG_STAT */ +#define GREG_PCIESTAT 0x1000UL /* PCI Error Status Register */ +#define GREG_PCIEMASK 0x1004UL /* PCI Error Mask Register */ +#define GREG_BIFCFG 0x1008UL /* BIF Configuration Register */ +#define GREG_BIFDIAG 0x100CUL /* BIF Diagnostics Register */ +#define GREG_SWRST 0x1010UL /* Software Reset Register */ + +/* Global SEB State Register */ +#define GREG_SEBSTATE_ARB 0x00000003 /* State of Arbiter */ +#define GREG_SEBSTATE_RXWON 0x00000004 /* RX won internal arbitration */ + +/* Global Configuration Register */ +#define GREG_CFG_IBURST 0x00000001 /* Infinite Burst */ +#define GREG_CFG_TXDMALIM 0x0000003e /* TX DMA grant limit */ +#define GREG_CFG_RXDMALIM 0x000007c0 /* RX DMA grant limit */ +#define GREG_CFG_RONPAULBIT 0x00000800 /* Use mem read multiple for PCI read + * after infinite burst (Apple) */ +#define GREG_CFG_ENBUG2FIX 0x00001000 /* Fix Rx hang after overflow */ + +/* Global Interrupt Status Register. + * + * Reading this register automatically clears bits 0 through 6. + * This auto-clearing does not occur when the alias at GREG_STAT2 + * is read instead. The rest of the interrupt bits only clear when + * the secondary interrupt status register corresponding to that + * bit is read (ie. if GREG_STAT_PCS is set, it will be cleared by + * reading PCS_ISTAT). + */ +#define GREG_STAT_TXINTME 0x00000001 /* TX INTME frame transferred */ +#define GREG_STAT_TXALL 0x00000002 /* All TX frames transferred */ +#define GREG_STAT_TXDONE 0x00000004 /* One TX frame transferred */ +#define GREG_STAT_RXDONE 0x00000010 /* One RX frame arrived */ +#define GREG_STAT_RXNOBUF 0x00000020 /* No free RX buffers available */ +#define GREG_STAT_RXTAGERR 0x00000040 /* RX tag framing is corrupt */ +#define GREG_STAT_PCS 0x00002000 /* PCS signalled interrupt */ +#define GREG_STAT_TXMAC 0x00004000 /* TX MAC signalled interrupt */ +#define GREG_STAT_RXMAC 0x00008000 /* RX MAC signalled interrupt */ +#define GREG_STAT_MAC 0x00010000 /* MAC Control signalled irq */ +#define GREG_STAT_MIF 0x00020000 /* MIF signalled interrupt */ +#define GREG_STAT_PCIERR 0x00040000 /* PCI Error interrupt */ +#define GREG_STAT_TXNR 0xfff80000 /* == TXDMA_TXDONE reg val */ +#define GREG_STAT_TXNR_SHIFT 19 + +#define GREG_STAT_ABNORMAL (GREG_STAT_RXNOBUF | GREG_STAT_RXTAGERR | \ + GREG_STAT_PCS | GREG_STAT_TXMAC | GREG_STAT_RXMAC | \ + GREG_STAT_MAC | GREG_STAT_MIF | GREG_STAT_PCIERR) + +#define GREG_STAT_NAPI (GREG_STAT_TXALL | GREG_STAT_TXINTME | \ + GREG_STAT_RXDONE | GREG_STAT_ABNORMAL) + +/* The layout of GREG_IMASK and GREG_IACK is identical to GREG_STAT. + * Bits set in GREG_IMASK will prevent that interrupt type from being + * signalled to the cpu. GREG_IACK can be used to clear specific top-level + * interrupt conditions in GREG_STAT, ie. it only works for bits 0 through 6. + * Setting the bit will clear that interrupt, clear bits will have no effect + * on GREG_STAT. + */ + +/* Global PCI Error Status Register */ +#define GREG_PCIESTAT_BADACK 0x00000001 /* No ACK64# during ABS64 cycle */ +#define GREG_PCIESTAT_DTRTO 0x00000002 /* Delayed transaction timeout */ +#define GREG_PCIESTAT_OTHER 0x00000004 /* Other PCI error, check cfg space */ + +/* The layout of the GREG_PCIEMASK is identical to that of GREG_PCIESTAT. + * Bits set in GREG_PCIEMASK will prevent that interrupt type from being + * signalled to the cpu. + */ + +/* Global BIF Configuration Register */ +#define GREG_BIFCFG_SLOWCLK 0x00000001 /* Set if PCI runs < 25Mhz */ +#define GREG_BIFCFG_B64DIS 0x00000002 /* Disable 64bit wide data cycle*/ +#define GREG_BIFCFG_M66EN 0x00000004 /* Set if on 66Mhz PCI segment */ + +/* Global BIF Diagnostics Register */ +#define GREG_BIFDIAG_BURSTSM 0x007f0000 /* PCI Burst state machine */ +#define GREG_BIFDIAG_BIFSM 0xff000000 /* BIF state machine */ + +/* Global Software Reset Register. + * + * This register is used to perform a global reset of the RX and TX portions + * of the GEM asic. Setting the RX or TX reset bit will start the reset. + * The driver _MUST_ poll these bits until they clear. One may not attempt + * to program any other part of GEM until the bits clear. + */ +#define GREG_SWRST_TXRST 0x00000001 /* TX Software Reset */ +#define GREG_SWRST_RXRST 0x00000002 /* RX Software Reset */ +#define GREG_SWRST_RSTOUT 0x00000004 /* Force RST# pin active */ +#define GREG_SWRST_CACHESIZE 0x00ff0000 /* RIO only: cache line size */ +#define GREG_SWRST_CACHE_SHIFT 16 + +/* TX DMA Registers */ +#define TXDMA_KICK 0x2000UL /* TX Kick Register */ +#define TXDMA_CFG 0x2004UL /* TX Configuration Register */ +#define TXDMA_DBLOW 0x2008UL /* TX Desc. Base Low */ +#define TXDMA_DBHI 0x200CUL /* TX Desc. Base High */ +#define TXDMA_FWPTR 0x2014UL /* TX FIFO Write Pointer */ +#define TXDMA_FSWPTR 0x2018UL /* TX FIFO Shadow Write Pointer */ +#define TXDMA_FRPTR 0x201CUL /* TX FIFO Read Pointer */ +#define TXDMA_FSRPTR 0x2020UL /* TX FIFO Shadow Read Pointer */ +#define TXDMA_PCNT 0x2024UL /* TX FIFO Packet Counter */ +#define TXDMA_SMACHINE 0x2028UL /* TX State Machine Register */ +#define TXDMA_DPLOW 0x2030UL /* TX Data Pointer Low */ +#define TXDMA_DPHI 0x2034UL /* TX Data Pointer High */ +#define TXDMA_TXDONE 0x2100UL /* TX Completion Register */ +#define TXDMA_FADDR 0x2104UL /* TX FIFO Address */ +#define TXDMA_FTAG 0x2108UL /* TX FIFO Tag */ +#define TXDMA_DLOW 0x210CUL /* TX FIFO Data Low */ +#define TXDMA_DHIT1 0x2110UL /* TX FIFO Data HighT1 */ +#define TXDMA_DHIT0 0x2114UL /* TX FIFO Data HighT0 */ +#define TXDMA_FSZ 0x2118UL /* TX FIFO Size */ + +/* TX Kick Register. + * + * This 13-bit register is programmed by the driver to hold the descriptor + * entry index which follows the last valid transmit descriptor. + */ + +/* TX Completion Register. + * + * This 13-bit register is updated by GEM to hold to descriptor entry index + * which follows the last descriptor already processed by GEM. Note that + * this value is mirrored in GREG_STAT which eliminates the need to even + * access this register in the driver during interrupt processing. + */ + +/* TX Configuration Register. + * + * Note that TXDMA_CFG_FTHRESH, the TX FIFO Threshold, is an obsolete feature + * that was meant to be used with jumbo packets. It should be set to the + * maximum value of 0x4ff, else one risks getting TX MAC Underrun errors. + */ +#define TXDMA_CFG_ENABLE 0x00000001 /* Enable TX DMA channel */ +#define TXDMA_CFG_RINGSZ 0x0000001e /* TX descriptor ring size */ +#define TXDMA_CFG_RINGSZ_32 0x00000000 /* 32 TX descriptors */ +#define TXDMA_CFG_RINGSZ_64 0x00000002 /* 64 TX descriptors */ +#define TXDMA_CFG_RINGSZ_128 0x00000004 /* 128 TX descriptors */ +#define TXDMA_CFG_RINGSZ_256 0x00000006 /* 256 TX descriptors */ +#define TXDMA_CFG_RINGSZ_512 0x00000008 /* 512 TX descriptors */ +#define TXDMA_CFG_RINGSZ_1K 0x0000000a /* 1024 TX descriptors */ +#define TXDMA_CFG_RINGSZ_2K 0x0000000c /* 2048 TX descriptors */ +#define TXDMA_CFG_RINGSZ_4K 0x0000000e /* 4096 TX descriptors */ +#define TXDMA_CFG_RINGSZ_8K 0x00000010 /* 8192 TX descriptors */ +#define TXDMA_CFG_PIOSEL 0x00000020 /* Enable TX FIFO PIO from cpu */ +#define TXDMA_CFG_FTHRESH 0x001ffc00 /* TX FIFO Threshold, obsolete */ +#define TXDMA_CFG_PMODE 0x00200000 /* TXALL irq means TX FIFO empty*/ + +/* TX Descriptor Base Low/High. + * + * These two registers store the 53 most significant bits of the base address + * of the TX descriptor table. The 11 least significant bits are always + * zero. As a result, the TX descriptor table must be 2K aligned. + */ + +/* The rest of the TXDMA_* registers are for diagnostics and debug, I will document + * them later. -DaveM + */ + +/* WakeOnLan Registers */ +#define WOL_MATCH0 0x3000UL +#define WOL_MATCH1 0x3004UL +#define WOL_MATCH2 0x3008UL +#define WOL_MCOUNT 0x300CUL +#define WOL_WAKECSR 0x3010UL + +/* WOL Match count register + */ +#define WOL_MCOUNT_N 0x00000010 +#define WOL_MCOUNT_M 0x00000000 /* 0 << 8 */ + +#define WOL_WAKECSR_ENABLE 0x00000001 +#define WOL_WAKECSR_MII 0x00000002 +#define WOL_WAKECSR_SEEN 0x00000004 +#define WOL_WAKECSR_FILT_UCAST 0x00000008 +#define WOL_WAKECSR_FILT_MCAST 0x00000010 +#define WOL_WAKECSR_FILT_BCAST 0x00000020 +#define WOL_WAKECSR_FILT_SEEN 0x00000040 + + +/* Receive DMA Registers */ +#define RXDMA_CFG 0x4000UL /* RX Configuration Register */ +#define RXDMA_DBLOW 0x4004UL /* RX Descriptor Base Low */ +#define RXDMA_DBHI 0x4008UL /* RX Descriptor Base High */ +#define RXDMA_FWPTR 0x400CUL /* RX FIFO Write Pointer */ +#define RXDMA_FSWPTR 0x4010UL /* RX FIFO Shadow Write Pointer */ +#define RXDMA_FRPTR 0x4014UL /* RX FIFO Read Pointer */ +#define RXDMA_PCNT 0x4018UL /* RX FIFO Packet Counter */ +#define RXDMA_SMACHINE 0x401CUL /* RX State Machine Register */ +#define RXDMA_PTHRESH 0x4020UL /* Pause Thresholds */ +#define RXDMA_DPLOW 0x4024UL /* RX Data Pointer Low */ +#define RXDMA_DPHI 0x4028UL /* RX Data Pointer High */ +#define RXDMA_KICK 0x4100UL /* RX Kick Register */ +#define RXDMA_DONE 0x4104UL /* RX Completion Register */ +#define RXDMA_BLANK 0x4108UL /* RX Blanking Register */ +#define RXDMA_FADDR 0x410CUL /* RX FIFO Address */ +#define RXDMA_FTAG 0x4110UL /* RX FIFO Tag */ +#define RXDMA_DLOW 0x4114UL /* RX FIFO Data Low */ +#define RXDMA_DHIT1 0x4118UL /* RX FIFO Data HighT0 */ +#define RXDMA_DHIT0 0x411CUL /* RX FIFO Data HighT1 */ +#define RXDMA_FSZ 0x4120UL /* RX FIFO Size */ + +/* RX Configuration Register. */ +#define RXDMA_CFG_ENABLE 0x00000001 /* Enable RX DMA channel */ +#define RXDMA_CFG_RINGSZ 0x0000001e /* RX descriptor ring size */ +#define RXDMA_CFG_RINGSZ_32 0x00000000 /* - 32 entries */ +#define RXDMA_CFG_RINGSZ_64 0x00000002 /* - 64 entries */ +#define RXDMA_CFG_RINGSZ_128 0x00000004 /* - 128 entries */ +#define RXDMA_CFG_RINGSZ_256 0x00000006 /* - 256 entries */ +#define RXDMA_CFG_RINGSZ_512 0x00000008 /* - 512 entries */ +#define RXDMA_CFG_RINGSZ_1K 0x0000000a /* - 1024 entries */ +#define RXDMA_CFG_RINGSZ_2K 0x0000000c /* - 2048 entries */ +#define RXDMA_CFG_RINGSZ_4K 0x0000000e /* - 4096 entries */ +#define RXDMA_CFG_RINGSZ_8K 0x00000010 /* - 8192 entries */ +#define RXDMA_CFG_RINGSZ_BDISAB 0x00000020 /* Disable RX desc batching */ +#define RXDMA_CFG_FBOFF 0x00001c00 /* Offset of first data byte */ +#define RXDMA_CFG_CSUMOFF 0x000fe000 /* Skip bytes before csum calc */ +#define RXDMA_CFG_FTHRESH 0x07000000 /* RX FIFO dma start threshold */ +#define RXDMA_CFG_FTHRESH_64 0x00000000 /* - 64 bytes */ +#define RXDMA_CFG_FTHRESH_128 0x01000000 /* - 128 bytes */ +#define RXDMA_CFG_FTHRESH_256 0x02000000 /* - 256 bytes */ +#define RXDMA_CFG_FTHRESH_512 0x03000000 /* - 512 bytes */ +#define RXDMA_CFG_FTHRESH_1K 0x04000000 /* - 1024 bytes */ +#define RXDMA_CFG_FTHRESH_2K 0x05000000 /* - 2048 bytes */ + +/* RX Descriptor Base Low/High. + * + * These two registers store the 53 most significant bits of the base address + * of the RX descriptor table. The 11 least significant bits are always + * zero. As a result, the RX descriptor table must be 2K aligned. + */ + +/* RX PAUSE Thresholds. + * + * These values determine when XOFF and XON PAUSE frames are emitted by + * GEM. The thresholds measure RX FIFO occupancy in units of 64 bytes. + */ +#define RXDMA_PTHRESH_OFF 0x000001ff /* XOFF emitted w/FIFO > this */ +#define RXDMA_PTHRESH_ON 0x001ff000 /* XON emitted w/FIFO < this */ + +/* RX Kick Register. + * + * This 13-bit register is written by the host CPU and holds the last + * valid RX descriptor number plus one. This is, if 'N' is written to + * this register, it means that all RX descriptors up to but excluding + * 'N' are valid. + * + * The hardware requires that RX descriptors are posted in increments + * of 4. This means 'N' must be a multiple of four. For the best + * performance, the first new descriptor being posted should be (PCI) + * cache line aligned. + */ + +/* RX Completion Register. + * + * This 13-bit register is updated by GEM to indicate which RX descriptors + * have already been used for receive frames. All descriptors up to but + * excluding the value in this register are ready to be processed. GEM + * updates this register value after the RX FIFO empties completely into + * the RX descriptor's buffer, but before the RX_DONE bit is set in the + * interrupt status register. + */ + +/* RX Blanking Register. */ +#define RXDMA_BLANK_IPKTS 0x000001ff /* RX_DONE asserted after this + * many packets received since + * previous RX_DONE. + */ +#define RXDMA_BLANK_ITIME 0x000ff000 /* RX_DONE asserted after this + * many clocks (measured in 2048 + * PCI clocks) were counted since + * the previous RX_DONE. + */ + +/* RX FIFO Size. + * + * This 11-bit read-only register indicates how large, in units of 64-bytes, + * the RX FIFO is. The driver uses this to properly configure the RX PAUSE + * thresholds. + */ + +/* The rest of the RXDMA_* registers are for diagnostics and debug, I will document + * them later. -DaveM + */ + +/* MAC Registers */ +#define MAC_TXRST 0x6000UL /* TX MAC Software Reset Command*/ +#define MAC_RXRST 0x6004UL /* RX MAC Software Reset Command*/ +#define MAC_SNDPAUSE 0x6008UL /* Send Pause Command Register */ +#define MAC_TXSTAT 0x6010UL /* TX MAC Status Register */ +#define MAC_RXSTAT 0x6014UL /* RX MAC Status Register */ +#define MAC_CSTAT 0x6018UL /* MAC Control Status Register */ +#define MAC_TXMASK 0x6020UL /* TX MAC Mask Register */ +#define MAC_RXMASK 0x6024UL /* RX MAC Mask Register */ +#define MAC_MCMASK 0x6028UL /* MAC Control Mask Register */ +#define MAC_TXCFG 0x6030UL /* TX MAC Configuration Register*/ +#define MAC_RXCFG 0x6034UL /* RX MAC Configuration Register*/ +#define MAC_MCCFG 0x6038UL /* MAC Control Config Register */ +#define MAC_XIFCFG 0x603CUL /* XIF Configuration Register */ +#define MAC_IPG0 0x6040UL /* InterPacketGap0 Register */ +#define MAC_IPG1 0x6044UL /* InterPacketGap1 Register */ +#define MAC_IPG2 0x6048UL /* InterPacketGap2 Register */ +#define MAC_STIME 0x604CUL /* SlotTime Register */ +#define MAC_MINFSZ 0x6050UL /* MinFrameSize Register */ +#define MAC_MAXFSZ 0x6054UL /* MaxFrameSize Register */ +#define MAC_PASIZE 0x6058UL /* PA Size Register */ +#define MAC_JAMSIZE 0x605CUL /* JamSize Register */ +#define MAC_ATTLIM 0x6060UL /* Attempt Limit Register */ +#define MAC_MCTYPE 0x6064UL /* MAC Control Type Register */ +#define MAC_ADDR0 0x6080UL /* MAC Address 0 Register */ +#define MAC_ADDR1 0x6084UL /* MAC Address 1 Register */ +#define MAC_ADDR2 0x6088UL /* MAC Address 2 Register */ +#define MAC_ADDR3 0x608CUL /* MAC Address 3 Register */ +#define MAC_ADDR4 0x6090UL /* MAC Address 4 Register */ +#define MAC_ADDR5 0x6094UL /* MAC Address 5 Register */ +#define MAC_ADDR6 0x6098UL /* MAC Address 6 Register */ +#define MAC_ADDR7 0x609CUL /* MAC Address 7 Register */ +#define MAC_ADDR8 0x60A0UL /* MAC Address 8 Register */ +#define MAC_AFILT0 0x60A4UL /* Address Filter 0 Register */ +#define MAC_AFILT1 0x60A8UL /* Address Filter 1 Register */ +#define MAC_AFILT2 0x60ACUL /* Address Filter 2 Register */ +#define MAC_AF21MSK 0x60B0UL /* Address Filter 2&1 Mask Reg */ +#define MAC_AF0MSK 0x60B4UL /* Address Filter 0 Mask Reg */ +#define MAC_HASH0 0x60C0UL /* Hash Table 0 Register */ +#define MAC_HASH1 0x60C4UL /* Hash Table 1 Register */ +#define MAC_HASH2 0x60C8UL /* Hash Table 2 Register */ +#define MAC_HASH3 0x60CCUL /* Hash Table 3 Register */ +#define MAC_HASH4 0x60D0UL /* Hash Table 4 Register */ +#define MAC_HASH5 0x60D4UL /* Hash Table 5 Register */ +#define MAC_HASH6 0x60D8UL /* Hash Table 6 Register */ +#define MAC_HASH7 0x60DCUL /* Hash Table 7 Register */ +#define MAC_HASH8 0x60E0UL /* Hash Table 8 Register */ +#define MAC_HASH9 0x60E4UL /* Hash Table 9 Register */ +#define MAC_HASH10 0x60E8UL /* Hash Table 10 Register */ +#define MAC_HASH11 0x60ECUL /* Hash Table 11 Register */ +#define MAC_HASH12 0x60F0UL /* Hash Table 12 Register */ +#define MAC_HASH13 0x60F4UL /* Hash Table 13 Register */ +#define MAC_HASH14 0x60F8UL /* Hash Table 14 Register */ +#define MAC_HASH15 0x60FCUL /* Hash Table 15 Register */ +#define MAC_NCOLL 0x6100UL /* Normal Collision Counter */ +#define MAC_FASUCC 0x6104UL /* First Attmpt. Succ Coll Ctr. */ +#define MAC_ECOLL 0x6108UL /* Excessive Collision Counter */ +#define MAC_LCOLL 0x610CUL /* Late Collision Counter */ +#define MAC_DTIMER 0x6110UL /* Defer Timer */ +#define MAC_PATMPS 0x6114UL /* Peak Attempts Register */ +#define MAC_RFCTR 0x6118UL /* Receive Frame Counter */ +#define MAC_LERR 0x611CUL /* Length Error Counter */ +#define MAC_AERR 0x6120UL /* Alignment Error Counter */ +#define MAC_FCSERR 0x6124UL /* FCS Error Counter */ +#define MAC_RXCVERR 0x6128UL /* RX code Violation Error Ctr */ +#define MAC_RANDSEED 0x6130UL /* Random Number Seed Register */ +#define MAC_SMACHINE 0x6134UL /* State Machine Register */ + +/* TX MAC Software Reset Command. */ +#define MAC_TXRST_CMD 0x00000001 /* Start sw reset, self-clears */ + +/* RX MAC Software Reset Command. */ +#define MAC_RXRST_CMD 0x00000001 /* Start sw reset, self-clears */ + +/* Send Pause Command. */ +#define MAC_SNDPAUSE_TS 0x0000ffff /* The pause_time operand used in + * Send_Pause and flow-control + * handshakes. + */ +#define MAC_SNDPAUSE_SP 0x00010000 /* Setting this bit instructs the MAC + * to send a Pause Flow Control + * frame onto the network. + */ + +/* TX MAC Status Register. */ +#define MAC_TXSTAT_XMIT 0x00000001 /* Frame Transmitted */ +#define MAC_TXSTAT_URUN 0x00000002 /* TX Underrun */ +#define MAC_TXSTAT_MPE 0x00000004 /* Max Packet Size Error */ +#define MAC_TXSTAT_NCE 0x00000008 /* Normal Collision Cntr Expire */ +#define MAC_TXSTAT_ECE 0x00000010 /* Excess Collision Cntr Expire */ +#define MAC_TXSTAT_LCE 0x00000020 /* Late Collision Cntr Expire */ +#define MAC_TXSTAT_FCE 0x00000040 /* First Collision Cntr Expire */ +#define MAC_TXSTAT_DTE 0x00000080 /* Defer Timer Expire */ +#define MAC_TXSTAT_PCE 0x00000100 /* Peak Attempts Cntr Expire */ + +/* RX MAC Status Register. */ +#define MAC_RXSTAT_RCV 0x00000001 /* Frame Received */ +#define MAC_RXSTAT_OFLW 0x00000002 /* Receive Overflow */ +#define MAC_RXSTAT_FCE 0x00000004 /* Frame Cntr Expire */ +#define MAC_RXSTAT_ACE 0x00000008 /* Align Error Cntr Expire */ +#define MAC_RXSTAT_CCE 0x00000010 /* CRC Error Cntr Expire */ +#define MAC_RXSTAT_LCE 0x00000020 /* Length Error Cntr Expire */ +#define MAC_RXSTAT_VCE 0x00000040 /* Code Violation Cntr Expire */ + +/* MAC Control Status Register. */ +#define MAC_CSTAT_PRCV 0x00000001 /* Pause Received */ +#define MAC_CSTAT_PS 0x00000002 /* Paused State */ +#define MAC_CSTAT_NPS 0x00000004 /* Not Paused State */ +#define MAC_CSTAT_PTR 0xffff0000 /* Pause Time Received */ + +/* The layout of the MAC_{TX,RX,C}MASK registers is identical to that + * of MAC_{TX,RX,C}STAT. Bits set in MAC_{TX,RX,C}MASK will prevent + * that interrupt type from being signalled to front end of GEM. For + * the interrupt to actually get sent to the cpu, it is necessary to + * properly set the appropriate GREG_IMASK_{TX,RX,}MAC bits as well. + */ + +/* TX MAC Configuration Register. + * + * NOTE: The TX MAC Enable bit must be cleared and polled until + * zero before any other bits in this register are changed. + * + * Also, enabling the Carrier Extension feature of GEM is + * a 3 step process 1) Set TX Carrier Extension 2) Set + * RX Carrier Extension 3) Set Slot Time to 0x200. This + * mode must be enabled when in half-duplex at 1Gbps, else + * it must be disabled. + */ +#define MAC_TXCFG_ENAB 0x00000001 /* TX MAC Enable */ +#define MAC_TXCFG_ICS 0x00000002 /* Ignore Carrier Sense */ +#define MAC_TXCFG_ICOLL 0x00000004 /* Ignore Collisions */ +#define MAC_TXCFG_EIPG0 0x00000008 /* Enable IPG0 */ +#define MAC_TXCFG_NGU 0x00000010 /* Never Give Up */ +#define MAC_TXCFG_NGUL 0x00000020 /* Never Give Up Limit */ +#define MAC_TXCFG_NBO 0x00000040 /* No Backoff */ +#define MAC_TXCFG_SD 0x00000080 /* Slow Down */ +#define MAC_TXCFG_NFCS 0x00000100 /* No FCS */ +#define MAC_TXCFG_TCE 0x00000200 /* TX Carrier Extension */ + +/* RX MAC Configuration Register. + * + * NOTE: The RX MAC Enable bit must be cleared and polled until + * zero before any other bits in this register are changed. + * + * Similar rules apply to the Hash Filter Enable bit when + * programming the hash table registers, and the Address Filter + * Enable bit when programming the address filter registers. + */ +#define MAC_RXCFG_ENAB 0x00000001 /* RX MAC Enable */ +#define MAC_RXCFG_SPAD 0x00000002 /* Strip Pad */ +#define MAC_RXCFG_SFCS 0x00000004 /* Strip FCS */ +#define MAC_RXCFG_PROM 0x00000008 /* Promiscuous Mode */ +#define MAC_RXCFG_PGRP 0x00000010 /* Promiscuous Group */ +#define MAC_RXCFG_HFE 0x00000020 /* Hash Filter Enable */ +#define MAC_RXCFG_AFE 0x00000040 /* Address Filter Enable */ +#define MAC_RXCFG_DDE 0x00000080 /* Disable Discard on Error */ +#define MAC_RXCFG_RCE 0x00000100 /* RX Carrier Extension */ + +/* MAC Control Config Register. */ +#define MAC_MCCFG_SPE 0x00000001 /* Send Pause Enable */ +#define MAC_MCCFG_RPE 0x00000002 /* Receive Pause Enable */ +#define MAC_MCCFG_PMC 0x00000004 /* Pass MAC Control */ + +/* XIF Configuration Register. + * + * NOTE: When leaving or entering loopback mode, a global hardware + * init of GEM should be performed. + */ +#define MAC_XIFCFG_OE 0x00000001 /* MII TX Output Driver Enable */ +#define MAC_XIFCFG_LBCK 0x00000002 /* Loopback TX to RX */ +#define MAC_XIFCFG_DISE 0x00000004 /* Disable RX path during TX */ +#define MAC_XIFCFG_GMII 0x00000008 /* Use GMII clocks + datapath */ +#define MAC_XIFCFG_MBOE 0x00000010 /* Controls MII_BUF_EN pin */ +#define MAC_XIFCFG_LLED 0x00000020 /* Force LINKLED# active (low) */ +#define MAC_XIFCFG_FLED 0x00000040 /* Force FDPLXLED# active (low) */ + +/* InterPacketGap0 Register. This 8-bit value is used as an extension + * to the InterPacketGap1 Register. Specifically it contributes to the + * timing of the RX-to-TX IPG. This value is ignored and presumed to + * be zero for TX-to-TX IPG calculations and/or when the Enable IPG0 bit + * is cleared in the TX MAC Configuration Register. + * + * This value in this register in terms of media byte time. + * + * Recommended value: 0x00 + */ + +/* InterPacketGap1 Register. This 8-bit value defines the first 2/3 + * portion of the Inter Packet Gap. + * + * This value in this register in terms of media byte time. + * + * Recommended value: 0x08 + */ + +/* InterPacketGap2 Register. This 8-bit value defines the second 1/3 + * portion of the Inter Packet Gap. + * + * This value in this register in terms of media byte time. + * + * Recommended value: 0x04 + */ + +/* Slot Time Register. This 10-bit value specifies the slot time + * parameter in units of media byte time. It determines the physical + * span of the network. + * + * Recommended value: 0x40 + */ + +/* Minimum Frame Size Register. This 10-bit register specifies the + * smallest sized frame the TXMAC will send onto the medium, and the + * RXMAC will receive from the medium. + * + * Recommended value: 0x40 + */ + +/* Maximum Frame and Burst Size Register. + * + * This register specifies two things. First it specifies the maximum + * sized frame the TXMAC will send and the RXMAC will recognize as + * valid. Second, it specifies the maximum run length of a burst of + * packets sent in half-duplex gigabit modes. + * + * Recommended value: 0x200005ee + */ +#define MAC_MAXFSZ_MFS 0x00007fff /* Max Frame Size */ +#define MAC_MAXFSZ_MBS 0x7fff0000 /* Max Burst Size */ + +/* PA Size Register. This 10-bit register specifies the number of preamble + * bytes which will be transmitted at the beginning of each frame. A + * value of two or greater should be programmed here. + * + * Recommended value: 0x07 + */ + +/* Jam Size Register. This 4-bit register specifies the duration of + * the jam in units of media byte time. + * + * Recommended value: 0x04 + */ + +/* Attempts Limit Register. This 8-bit register specifies the number + * of attempts that the TXMAC will make to transmit a frame, before it + * resets its Attempts Counter. After reaching the Attempts Limit the + * TXMAC may or may not drop the frame, as determined by the NGU + * (Never Give Up) and NGUL (Never Give Up Limit) bits in the TXMAC + * Configuration Register. + * + * Recommended value: 0x10 + */ + +/* MAX Control Type Register. This 16-bit register specifies the + * "type" field of a MAC Control frame. The TXMAC uses this field to + * encapsulate the MAC Control frame for transmission, and the RXMAC + * uses it for decoding valid MAC Control frames received from the + * network. + * + * Recommended value: 0x8808 + */ + +/* MAC Address Registers. Each of these registers specify the + * ethernet MAC of the interface, 16-bits at a time. Register + * 0 specifies bits [47:32], register 1 bits [31:16], and register + * 2 bits [15:0]. + * + * Registers 3 through and including 5 specify an alternate + * MAC address for the interface. + * + * Registers 6 through and including 8 specify the MAC Control + * Address, which must be the reserved multicast address for MAC + * Control frames. + * + * Example: To program primary station address a:b:c:d:e:f into + * the chip. + * MAC_Address_2 = (a << 8) | b + * MAC_Address_1 = (c << 8) | d + * MAC_Address_0 = (e << 8) | f + */ + +/* Address Filter Registers. Registers 0 through 2 specify bit + * fields [47:32] through [15:0], respectively, of the address + * filter. The Address Filter 2&1 Mask Register denotes the 8-bit + * nibble mask for Address Filter Registers 2 and 1. The Address + * Filter 0 Mask Register denotes the 16-bit mask for the Address + * Filter Register 0. + */ + +/* Hash Table Registers. Registers 0 through 15 specify bit fields + * [255:240] through [15:0], respectively, of the hash table. + */ + +/* Statistics Registers. All of these registers are 16-bits and + * track occurrences of a specific event. GEM can be configured + * to interrupt the host cpu when any of these counters overflow. + * They should all be explicitly initialized to zero when the interface + * is brought up. + */ + +/* Random Number Seed Register. This 10-bit value is used as the + * RNG seed inside GEM for the CSMA/CD backoff algorithm. It is + * recommended to program this register to the 10 LSB of the + * interfaces MAC address. + */ + +/* Pause Timer, read-only. This 16-bit timer is used to time the pause + * interval as indicated by a received pause flow control frame. + * A non-zero value in this timer indicates that the MAC is currently in + * the paused state. + */ + +/* MIF Registers */ +#define MIF_BBCLK 0x6200UL /* MIF Bit-Bang Clock */ +#define MIF_BBDATA 0x6204UL /* MIF Bit-Band Data */ +#define MIF_BBOENAB 0x6208UL /* MIF Bit-Bang Output Enable */ +#define MIF_FRAME 0x620CUL /* MIF Frame/Output Register */ +#define MIF_CFG 0x6210UL /* MIF Configuration Register */ +#define MIF_MASK 0x6214UL /* MIF Mask Register */ +#define MIF_STATUS 0x6218UL /* MIF Status Register */ +#define MIF_SMACHINE 0x621CUL /* MIF State Machine Register */ + +/* MIF Bit-Bang Clock. This 1-bit register is used to generate the + * MDC clock waveform on the MII Management Interface when the MIF is + * programmed in the "Bit-Bang" mode. Writing a '1' after a '0' into + * this register will create a rising edge on the MDC, while writing + * a '0' after a '1' will create a falling edge. For every bit that + * is transferred on the management interface, both edges have to be + * generated. + */ + +/* MIF Bit-Bang Data. This 1-bit register is used to generate the + * outgoing data (MDO) on the MII Management Interface when the MIF + * is programmed in the "Bit-Bang" mode. The daa will be steered to the + * appropriate MDIO based on the state of the PHY_Select bit in the MIF + * Configuration Register. + */ + +/* MIF Big-Band Output Enable. THis 1-bit register is used to enable + * ('1') or disable ('0') the I-directional driver on the MII when the + * MIF is programmed in the "Bit-Bang" mode. The MDIO should be enabled + * when data bits are transferred from the MIF to the transceiver, and it + * should be disabled when the interface is idle or when data bits are + * transferred from the transceiver to the MIF (data portion of a read + * instruction). Only one MDIO will be enabled at a given time, depending + * on the state of the PHY_Select bit in the MIF Configuration Register. + */ + +/* MIF Configuration Register. This 15-bit register controls the operation + * of the MIF. + */ +#define MIF_CFG_PSELECT 0x00000001 /* Xcvr slct: 0=mdio0 1=mdio1 */ +#define MIF_CFG_POLL 0x00000002 /* Enable polling mechanism */ +#define MIF_CFG_BBMODE 0x00000004 /* 1=bit-bang 0=frame mode */ +#define MIF_CFG_PRADDR 0x000000f8 /* Xcvr poll register address */ +#define MIF_CFG_MDI0 0x00000100 /* MDIO_0 present or read-bit */ +#define MIF_CFG_MDI1 0x00000200 /* MDIO_1 present or read-bit */ +#define MIF_CFG_PPADDR 0x00007c00 /* Xcvr poll PHY address */ + +/* MIF Frame/Output Register. This 32-bit register allows the host to + * communicate with a transceiver in frame mode (as opposed to big-bang + * mode). Writes by the host specify an instrution. After being issued + * the host must poll this register for completion. Also, after + * completion this register holds the data returned by the transceiver + * if applicable. + */ +#define MIF_FRAME_ST 0xc0000000 /* STart of frame */ +#define MIF_FRAME_OP 0x30000000 /* OPcode */ +#define MIF_FRAME_PHYAD 0x0f800000 /* PHY ADdress */ +#define MIF_FRAME_REGAD 0x007c0000 /* REGister ADdress */ +#define MIF_FRAME_TAMSB 0x00020000 /* Turn Around MSB */ +#define MIF_FRAME_TALSB 0x00010000 /* Turn Around LSB */ +#define MIF_FRAME_DATA 0x0000ffff /* Instruction Payload */ + +/* MIF Status Register. This register reports status when the MIF is + * operating in the poll mode. The poll status field is auto-clearing + * on read. + */ +#define MIF_STATUS_DATA 0xffff0000 /* Live image of XCVR reg */ +#define MIF_STATUS_STAT 0x0000ffff /* Which bits have changed */ + +/* MIF Mask Register. This 16-bit register is used when in poll mode + * to say which bits of the polled register will cause an interrupt + * when changed. + */ + +/* PCS/Serialink Registers */ +#define PCS_MIICTRL 0x9000UL /* PCS MII Control Register */ +#define PCS_MIISTAT 0x9004UL /* PCS MII Status Register */ +#define PCS_MIIADV 0x9008UL /* PCS MII Advertisement Reg */ +#define PCS_MIILP 0x900CUL /* PCS MII Link Partner Ability */ +#define PCS_CFG 0x9010UL /* PCS Configuration Register */ +#define PCS_SMACHINE 0x9014UL /* PCS State Machine Register */ +#define PCS_ISTAT 0x9018UL /* PCS Interrupt Status Reg */ +#define PCS_DMODE 0x9050UL /* Datapath Mode Register */ +#define PCS_SCTRL 0x9054UL /* Serialink Control Register */ +#define PCS_SOS 0x9058UL /* Shared Output Select Reg */ +#define PCS_SSTATE 0x905CUL /* Serialink State Register */ + +/* PCD MII Control Register. */ +#define PCS_MIICTRL_SPD 0x00000040 /* Read as one, writes ignored */ +#define PCS_MIICTRL_CT 0x00000080 /* Force COL signal active */ +#define PCS_MIICTRL_DM 0x00000100 /* Duplex mode, forced low */ +#define PCS_MIICTRL_RAN 0x00000200 /* Restart auto-neg, self clear */ +#define PCS_MIICTRL_ISO 0x00000400 /* Read as zero, writes ignored */ +#define PCS_MIICTRL_PD 0x00000800 /* Read as zero, writes ignored */ +#define PCS_MIICTRL_ANE 0x00001000 /* Auto-neg enable */ +#define PCS_MIICTRL_SS 0x00002000 /* Read as zero, writes ignored */ +#define PCS_MIICTRL_WB 0x00004000 /* Wrapback, loopback at 10-bit + * input side of Serialink + */ +#define PCS_MIICTRL_RST 0x00008000 /* Resets PCS, self clearing */ + +/* PCS MII Status Register. */ +#define PCS_MIISTAT_EC 0x00000001 /* Ext Capability: Read as zero */ +#define PCS_MIISTAT_JD 0x00000002 /* Jabber Detect: Read as zero */ +#define PCS_MIISTAT_LS 0x00000004 /* Link Status: 1=up 0=down */ +#define PCS_MIISTAT_ANA 0x00000008 /* Auto-neg Ability, always 1 */ +#define PCS_MIISTAT_RF 0x00000010 /* Remote Fault */ +#define PCS_MIISTAT_ANC 0x00000020 /* Auto-neg complete */ +#define PCS_MIISTAT_ES 0x00000100 /* Extended Status, always 1 */ + +/* PCS MII Advertisement Register. */ +#define PCS_MIIADV_FD 0x00000020 /* Advertise Full Duplex */ +#define PCS_MIIADV_HD 0x00000040 /* Advertise Half Duplex */ +#define PCS_MIIADV_SP 0x00000080 /* Advertise Symmetric Pause */ +#define PCS_MIIADV_AP 0x00000100 /* Advertise Asymmetric Pause */ +#define PCS_MIIADV_RF 0x00003000 /* Remote Fault */ +#define PCS_MIIADV_ACK 0x00004000 /* Read-only */ +#define PCS_MIIADV_NP 0x00008000 /* Next-page, forced low */ + +/* PCS MII Link Partner Ability Register. This register is equivalent + * to the Link Partnet Ability Register of the standard MII register set. + * It's layout corresponds to the PCS MII Advertisement Register. + */ + +/* PCS Configuration Register. */ +#define PCS_CFG_ENABLE 0x00000001 /* Must be zero while changing + * PCS MII advertisement reg. + */ +#define PCS_CFG_SDO 0x00000002 /* Signal detect override */ +#define PCS_CFG_SDL 0x00000004 /* Signal detect active low */ +#define PCS_CFG_JS 0x00000018 /* Jitter-study: + * 0 = normal operation + * 1 = high-frequency test pattern + * 2 = low-frequency test pattern + * 3 = reserved + */ +#define PCS_CFG_TO 0x00000020 /* 10ms auto-neg timer override */ + +/* PCS Interrupt Status Register. This register is self-clearing + * when read. + */ +#define PCS_ISTAT_LSC 0x00000004 /* Link Status Change */ + +/* Datapath Mode Register. */ +#define PCS_DMODE_SM 0x00000001 /* 1 = use internal Serialink */ +#define PCS_DMODE_ESM 0x00000002 /* External SERDES mode */ +#define PCS_DMODE_MGM 0x00000004 /* MII/GMII mode */ +#define PCS_DMODE_GMOE 0x00000008 /* GMII Output Enable */ + +/* Serialink Control Register. + * + * NOTE: When in SERDES mode, the loopback bit has inverse logic. + */ +#define PCS_SCTRL_LOOP 0x00000001 /* Loopback enable */ +#define PCS_SCTRL_ESCD 0x00000002 /* Enable sync char detection */ +#define PCS_SCTRL_LOCK 0x00000004 /* Lock to reference clock */ +#define PCS_SCTRL_EMP 0x00000018 /* Output driver emphasis */ +#define PCS_SCTRL_STEST 0x000001c0 /* Self test patterns */ +#define PCS_SCTRL_PDWN 0x00000200 /* Software power-down */ +#define PCS_SCTRL_RXZ 0x00000c00 /* PLL input to Serialink */ +#define PCS_SCTRL_RXP 0x00003000 /* PLL input to Serialink */ +#define PCS_SCTRL_TXZ 0x0000c000 /* PLL input to Serialink */ +#define PCS_SCTRL_TXP 0x00030000 /* PLL input to Serialink */ + +/* Shared Output Select Register. For test and debug, allows multiplexing + * test outputs into the PROM address pins. Set to zero for normal + * operation. + */ +#define PCS_SOS_PADDR 0x00000003 /* PROM Address */ + +/* PROM Image Space */ +#define PROM_START 0x100000UL /* Expansion ROM run time access*/ +#define PROM_SIZE 0x0fffffUL /* Size of ROM */ +#define PROM_END 0x200000UL /* End of ROM */ + +/* MII definitions missing from mii.h */ + +#define BMCR_SPD2 0x0040 /* Gigabit enable? (bcm5411) */ +#define LPA_PAUSE 0x0400 + +/* More PHY registers (specific to Broadcom models) */ + +/* MII BCM5201 MULTIPHY interrupt register */ +#define MII_BCM5201_INTERRUPT 0x1A +#define MII_BCM5201_INTERRUPT_INTENABLE 0x4000 + +#define MII_BCM5201_AUXMODE2 0x1B +#define MII_BCM5201_AUXMODE2_LOWPOWER 0x0008 + +#define MII_BCM5201_MULTIPHY 0x1E + +/* MII BCM5201 MULTIPHY register bits */ +#define MII_BCM5201_MULTIPHY_SERIALMODE 0x0002 +#define MII_BCM5201_MULTIPHY_SUPERISOLATE 0x0008 + +/* MII BCM5400 1000-BASET Control register */ +#define MII_BCM5400_GB_CONTROL 0x09 +#define MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP 0x0200 + +/* MII BCM5400 AUXCONTROL register */ +#define MII_BCM5400_AUXCONTROL 0x18 +#define MII_BCM5400_AUXCONTROL_PWR10BASET 0x0004 + +/* MII BCM5400 AUXSTATUS register */ +#define MII_BCM5400_AUXSTATUS 0x19 +#define MII_BCM5400_AUXSTATUS_LINKMODE_MASK 0x0700 +#define MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT 8 + +/* When it can, GEM internally caches 4 aligned TX descriptors + * at a time, so that it can use full cacheline DMA reads. + * + * Note that unlike HME, there is no ownership bit in the descriptor + * control word. The same functionality is obtained via the TX-Kick + * and TX-Complete registers. As a result, GEM need not write back + * updated values to the TX descriptor ring, it only performs reads. + * + * Since TX descriptors are never modified by GEM, the driver can + * use the buffer DMA address as a place to keep track of allocated + * DMA mappings for a transmitted packet. + */ +struct gem_txd { + __le64 control_word; + __le64 buffer; +}; + +#define TXDCTRL_BUFSZ 0x0000000000007fffULL /* Buffer Size */ +#define TXDCTRL_CSTART 0x00000000001f8000ULL /* CSUM Start Offset */ +#define TXDCTRL_COFF 0x000000001fe00000ULL /* CSUM Stuff Offset */ +#define TXDCTRL_CENAB 0x0000000020000000ULL /* CSUM Enable */ +#define TXDCTRL_EOF 0x0000000040000000ULL /* End of Frame */ +#define TXDCTRL_SOF 0x0000000080000000ULL /* Start of Frame */ +#define TXDCTRL_INTME 0x0000000100000000ULL /* "Interrupt Me" */ +#define TXDCTRL_NOCRC 0x0000000200000000ULL /* No CRC Present */ + +/* GEM requires that RX descriptors are provided four at a time, + * aligned. Also, the RX ring may not wrap around. This means that + * there will be at least 4 unused descriptor entries in the middle + * of the RX ring at all times. + * + * Similar to HME, GEM assumes that it can write garbage bytes before + * the beginning of the buffer and right after the end in order to DMA + * whole cachelines. + * + * Unlike for TX, GEM does update the status word in the RX descriptors + * when packets arrive. Therefore an ownership bit does exist in the + * RX descriptors. It is advisory, GEM clears it but does not check + * it in any way. So when buffers are posted to the RX ring (via the + * RX Kick register) by the driver it must make sure the buffers are + * truly ready and that the ownership bits are set properly. + * + * Even though GEM modifies the RX descriptors, it guarantees that the + * buffer DMA address field will stay the same when it performs these + * updates. Therefore it can be used to keep track of DMA mappings + * by the host driver just as in the TX descriptor case above. + */ +struct gem_rxd { + __le64 status_word; + __le64 buffer; +}; + +#define RXDCTRL_TCPCSUM 0x000000000000ffffULL /* TCP Pseudo-CSUM */ +#define RXDCTRL_BUFSZ 0x000000007fff0000ULL /* Buffer Size */ +#define RXDCTRL_OWN 0x0000000080000000ULL /* GEM owns this entry */ +#define RXDCTRL_HASHVAL 0x0ffff00000000000ULL /* Hash Value */ +#define RXDCTRL_HPASS 0x1000000000000000ULL /* Passed Hash Filter */ +#define RXDCTRL_ALTMAC 0x2000000000000000ULL /* Matched ALT MAC */ +#define RXDCTRL_BAD 0x4000000000000000ULL /* Frame has bad CRC */ + +#define RXDCTRL_FRESH(gp) \ + ((((RX_BUF_ALLOC_SIZE(gp) - RX_OFFSET) << 16) & RXDCTRL_BUFSZ) | \ + RXDCTRL_OWN) + +#define TX_RING_SIZE 128 +#define RX_RING_SIZE 128 + +#if TX_RING_SIZE == 32 +#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_32 +#elif TX_RING_SIZE == 64 +#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_64 +#elif TX_RING_SIZE == 128 +#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_128 +#elif TX_RING_SIZE == 256 +#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_256 +#elif TX_RING_SIZE == 512 +#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_512 +#elif TX_RING_SIZE == 1024 +#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_1K +#elif TX_RING_SIZE == 2048 +#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_2K +#elif TX_RING_SIZE == 4096 +#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_4K +#elif TX_RING_SIZE == 8192 +#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_8K +#else +#error TX_RING_SIZE value is illegal... +#endif + +#if RX_RING_SIZE == 32 +#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_32 +#elif RX_RING_SIZE == 64 +#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_64 +#elif RX_RING_SIZE == 128 +#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_128 +#elif RX_RING_SIZE == 256 +#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_256 +#elif RX_RING_SIZE == 512 +#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_512 +#elif RX_RING_SIZE == 1024 +#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_1K +#elif RX_RING_SIZE == 2048 +#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_2K +#elif RX_RING_SIZE == 4096 +#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_4K +#elif RX_RING_SIZE == 8192 +#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_8K +#else +#error RX_RING_SIZE is illegal... +#endif + +#define NEXT_TX(N) (((N) + 1) & (TX_RING_SIZE - 1)) +#define NEXT_RX(N) (((N) + 1) & (RX_RING_SIZE - 1)) + +#define TX_BUFFS_AVAIL(GP) \ + (((GP)->tx_old <= (GP)->tx_new) ? \ + (GP)->tx_old + (TX_RING_SIZE - 1) - (GP)->tx_new : \ + (GP)->tx_old - (GP)->tx_new - 1) + +#define RX_OFFSET 2 +#define RX_BUF_ALLOC_SIZE(gp) ((gp)->rx_buf_sz + 28 + RX_OFFSET + 64) + +#define RX_COPY_THRESHOLD 256 + +#if TX_RING_SIZE < 128 +#define INIT_BLOCK_TX_RING_SIZE 128 +#else +#define INIT_BLOCK_TX_RING_SIZE TX_RING_SIZE +#endif + +#if RX_RING_SIZE < 128 +#define INIT_BLOCK_RX_RING_SIZE 128 +#else +#define INIT_BLOCK_RX_RING_SIZE RX_RING_SIZE +#endif + +struct gem_init_block { + struct gem_txd txd[INIT_BLOCK_TX_RING_SIZE]; + struct gem_rxd rxd[INIT_BLOCK_RX_RING_SIZE]; +}; + +enum gem_phy_type { + phy_mii_mdio0, + phy_mii_mdio1, + phy_serialink, + phy_serdes, +}; + +enum link_state { + link_down = 0, /* No link, will retry */ + link_aneg, /* Autoneg in progress */ + link_force_try, /* Try Forced link speed */ + link_force_ret, /* Forced mode worked, retrying autoneg */ + link_force_ok, /* Stay in forced mode */ + link_up /* Link is up */ +}; + +struct gem { + void __iomem *regs; + int rx_new, rx_old; + int tx_new, tx_old; + + unsigned int has_wol : 1; /* chip supports wake-on-lan */ + unsigned int asleep_wol : 1; /* was asleep with WOL enabled */ + + int cell_enabled; + u32 msg_enable; + u32 status; + + struct napi_struct napi; + + int tx_fifo_sz; + int rx_fifo_sz; + int rx_pause_off; + int rx_pause_on; + int rx_buf_sz; + u64 pause_entered; + u16 pause_last_time_recvd; + u32 mac_rx_cfg; + u32 swrst_base; + + int want_autoneg; + int last_forced_speed; + enum link_state lstate; + struct timer_list link_timer; + int timer_ticks; + int wake_on_lan; + struct work_struct reset_task; + volatile int reset_task_pending; + + enum gem_phy_type phy_type; + struct mii_phy phy_mii; + int mii_phy_addr; + + struct gem_init_block *init_block; + struct sk_buff *rx_skbs[RX_RING_SIZE]; + struct sk_buff *tx_skbs[TX_RING_SIZE]; + dma_addr_t gblock_dvma; + + struct pci_dev *pdev; + struct net_device *dev; +#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC) + struct device_node *of_node; +#endif +}; + +#define found_mii_phy(gp) ((gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) && \ + gp->phy_mii.def && gp->phy_mii.def->ops) + +#endif /* _SUNGEM_H */ diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c new file mode 100644 index 000000000..cf4dcff05 --- /dev/null +++ b/drivers/net/ethernet/sun/sunhme.c @@ -0,0 +1,3391 @@ +/* sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching, + * auto carrier detecting ethernet driver. Also known as the + * "Happy Meal Ethernet" found on SunSwift SBUS cards. + * + * Copyright (C) 1996, 1998, 1999, 2002, 2003, + * 2006, 2008 David S. Miller (davem@davemloft.net) + * + * Changes : + * 2000/11/11 Willy Tarreau + * - port to non-sparc architectures. Tested only on x86 and + * only currently works with QFE PCI cards. + * - ability to specify the MAC address at module load time by passing this + * argument : macaddr=0x00,0x10,0x20,0x30,0x40,0x50 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef CONFIG_SPARC +#include +#include +#include +#include +#include +#include +#include +#endif +#include + +#include +#include + +#ifdef CONFIG_PCI +#include +#endif + +#include "sunhme.h" + +#define DRV_NAME "sunhme" +#define DRV_VERSION "3.10" +#define DRV_RELDATE "August 26, 2008" +#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" + +static char version[] = + DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; + +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_AUTHOR); +MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver"); +MODULE_LICENSE("GPL"); + +static int macaddr[6]; + +/* accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */ +module_param_array(macaddr, int, NULL, 0); +MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set"); + +#ifdef CONFIG_SBUS +static struct quattro *qfe_sbus_list; +#endif + +#ifdef CONFIG_PCI +static struct quattro *qfe_pci_list; +#endif + +#undef HMEDEBUG +#undef SXDEBUG +#undef RXDEBUG +#undef TXDEBUG +#undef TXLOGGING + +#ifdef TXLOGGING +struct hme_tx_logent { + unsigned int tstamp; + int tx_new, tx_old; + unsigned int action; +#define TXLOG_ACTION_IRQ 0x01 +#define TXLOG_ACTION_TXMIT 0x02 +#define TXLOG_ACTION_TBUSY 0x04 +#define TXLOG_ACTION_NBUFS 0x08 + unsigned int status; +}; +#define TX_LOG_LEN 128 +static struct hme_tx_logent tx_log[TX_LOG_LEN]; +static int txlog_cur_entry; +static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s) +{ + struct hme_tx_logent *tlp; + unsigned long flags; + + local_irq_save(flags); + tlp = &tx_log[txlog_cur_entry]; + tlp->tstamp = (unsigned int)jiffies; + tlp->tx_new = hp->tx_new; + tlp->tx_old = hp->tx_old; + tlp->action = a; + tlp->status = s; + txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1); + local_irq_restore(flags); +} +static __inline__ void tx_dump_log(void) +{ + int i, this; + + this = txlog_cur_entry; + for (i = 0; i < TX_LOG_LEN; i++) { + printk("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i, + tx_log[this].tstamp, + tx_log[this].tx_new, tx_log[this].tx_old, + tx_log[this].action, tx_log[this].status); + this = (this + 1) & (TX_LOG_LEN - 1); + } +} +static __inline__ void tx_dump_ring(struct happy_meal *hp) +{ + struct hmeal_init_block *hb = hp->happy_block; + struct happy_meal_txd *tp = &hb->happy_meal_txd[0]; + int i; + + for (i = 0; i < TX_RING_SIZE; i+=4) { + printk("TXD[%d..%d]: [%08x:%08x] [%08x:%08x] [%08x:%08x] [%08x:%08x]\n", + i, i + 4, + le32_to_cpu(tp[i].tx_flags), le32_to_cpu(tp[i].tx_addr), + le32_to_cpu(tp[i + 1].tx_flags), le32_to_cpu(tp[i + 1].tx_addr), + le32_to_cpu(tp[i + 2].tx_flags), le32_to_cpu(tp[i + 2].tx_addr), + le32_to_cpu(tp[i + 3].tx_flags), le32_to_cpu(tp[i + 3].tx_addr)); + } +} +#else +#define tx_add_log(hp, a, s) do { } while(0) +#define tx_dump_log() do { } while(0) +#define tx_dump_ring(hp) do { } while(0) +#endif + +#ifdef HMEDEBUG +#define HMD(x) printk x +#else +#define HMD(x) +#endif + +/* #define AUTO_SWITCH_DEBUG */ + +#ifdef AUTO_SWITCH_DEBUG +#define ASD(x) printk x +#else +#define ASD(x) +#endif + +#define DEFAULT_IPG0 16 /* For lance-mode only */ +#define DEFAULT_IPG1 8 /* For all modes */ +#define DEFAULT_IPG2 4 /* For all modes */ +#define DEFAULT_JAMSIZE 4 /* Toe jam */ + +/* NOTE: In the descriptor writes one _must_ write the address + * member _first_. The card must not be allowed to see + * the updated descriptor flags until the address is + * correct. I've added a write memory barrier between + * the two stores so that I can sleep well at night... -DaveM + */ + +#if defined(CONFIG_SBUS) && defined(CONFIG_PCI) +static void sbus_hme_write32(void __iomem *reg, u32 val) +{ + sbus_writel(val, reg); +} + +static u32 sbus_hme_read32(void __iomem *reg) +{ + return sbus_readl(reg); +} + +static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr) +{ + rxd->rx_addr = (__force hme32)addr; + dma_wmb(); + rxd->rx_flags = (__force hme32)flags; +} + +static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr) +{ + txd->tx_addr = (__force hme32)addr; + dma_wmb(); + txd->tx_flags = (__force hme32)flags; +} + +static u32 sbus_hme_read_desc32(hme32 *p) +{ + return (__force u32)*p; +} + +static void pci_hme_write32(void __iomem *reg, u32 val) +{ + writel(val, reg); +} + +static u32 pci_hme_read32(void __iomem *reg) +{ + return readl(reg); +} + +static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr) +{ + rxd->rx_addr = (__force hme32)cpu_to_le32(addr); + dma_wmb(); + rxd->rx_flags = (__force hme32)cpu_to_le32(flags); +} + +static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr) +{ + txd->tx_addr = (__force hme32)cpu_to_le32(addr); + dma_wmb(); + txd->tx_flags = (__force hme32)cpu_to_le32(flags); +} + +static u32 pci_hme_read_desc32(hme32 *p) +{ + return le32_to_cpup((__le32 *)p); +} + +#define hme_write32(__hp, __reg, __val) \ + ((__hp)->write32((__reg), (__val))) +#define hme_read32(__hp, __reg) \ + ((__hp)->read32(__reg)) +#define hme_write_rxd(__hp, __rxd, __flags, __addr) \ + ((__hp)->write_rxd((__rxd), (__flags), (__addr))) +#define hme_write_txd(__hp, __txd, __flags, __addr) \ + ((__hp)->write_txd((__txd), (__flags), (__addr))) +#define hme_read_desc32(__hp, __p) \ + ((__hp)->read_desc32(__p)) +#define hme_dma_map(__hp, __ptr, __size, __dir) \ + ((__hp)->dma_map((__hp)->dma_dev, (__ptr), (__size), (__dir))) +#define hme_dma_unmap(__hp, __addr, __size, __dir) \ + ((__hp)->dma_unmap((__hp)->dma_dev, (__addr), (__size), (__dir))) +#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ + ((__hp)->dma_sync_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))) +#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ + ((__hp)->dma_sync_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))) +#else +#ifdef CONFIG_SBUS +/* SBUS only compilation */ +#define hme_write32(__hp, __reg, __val) \ + sbus_writel((__val), (__reg)) +#define hme_read32(__hp, __reg) \ + sbus_readl(__reg) +#define hme_write_rxd(__hp, __rxd, __flags, __addr) \ +do { (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \ + dma_wmb(); \ + (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \ +} while(0) +#define hme_write_txd(__hp, __txd, __flags, __addr) \ +do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \ + dma_wmb(); \ + (__txd)->tx_flags = (__force hme32)(u32)(__flags); \ +} while(0) +#define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p)) +#define hme_dma_map(__hp, __ptr, __size, __dir) \ + dma_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir)) +#define hme_dma_unmap(__hp, __addr, __size, __dir) \ + dma_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir)) +#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ + dma_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)) +#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ + dma_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)) +#else +/* PCI only compilation */ +#define hme_write32(__hp, __reg, __val) \ + writel((__val), (__reg)) +#define hme_read32(__hp, __reg) \ + readl(__reg) +#define hme_write_rxd(__hp, __rxd, __flags, __addr) \ +do { (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \ + dma_wmb(); \ + (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \ +} while(0) +#define hme_write_txd(__hp, __txd, __flags, __addr) \ +do { (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \ + dma_wmb(); \ + (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \ +} while(0) +static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p) +{ + return le32_to_cpup((__le32 *)p); +} +#define hme_dma_map(__hp, __ptr, __size, __dir) \ + pci_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir)) +#define hme_dma_unmap(__hp, __addr, __size, __dir) \ + pci_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir)) +#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ + pci_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)) +#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ + pci_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)) +#endif +#endif + + +/* Oh yes, the MIF BitBang is mighty fun to program. BitBucket is more like it. */ +static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit) +{ + hme_write32(hp, tregs + TCVR_BBDATA, bit); + hme_write32(hp, tregs + TCVR_BBCLOCK, 0); + hme_write32(hp, tregs + TCVR_BBCLOCK, 1); +} + +#if 0 +static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal) +{ + u32 ret; + + hme_write32(hp, tregs + TCVR_BBCLOCK, 0); + hme_write32(hp, tregs + TCVR_BBCLOCK, 1); + ret = hme_read32(hp, tregs + TCVR_CFG); + if (internal) + ret &= TCV_CFG_MDIO0; + else + ret &= TCV_CFG_MDIO1; + + return ret; +} +#endif + +static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal) +{ + u32 retval; + + hme_write32(hp, tregs + TCVR_BBCLOCK, 0); + udelay(1); + retval = hme_read32(hp, tregs + TCVR_CFG); + if (internal) + retval &= TCV_CFG_MDIO0; + else + retval &= TCV_CFG_MDIO1; + hme_write32(hp, tregs + TCVR_BBCLOCK, 1); + + return retval; +} + +#define TCVR_FAILURE 0x80000000 /* Impossible MIF read value */ + +static int happy_meal_bb_read(struct happy_meal *hp, + void __iomem *tregs, int reg) +{ + u32 tmp; + int retval = 0; + int i; + + ASD(("happy_meal_bb_read: reg=%d ", reg)); + + /* Enable the MIF BitBang outputs. */ + hme_write32(hp, tregs + TCVR_BBOENAB, 1); + + /* Force BitBang into the idle state. */ + for (i = 0; i < 32; i++) + BB_PUT_BIT(hp, tregs, 1); + + /* Give it the read sequence. */ + BB_PUT_BIT(hp, tregs, 0); + BB_PUT_BIT(hp, tregs, 1); + BB_PUT_BIT(hp, tregs, 1); + BB_PUT_BIT(hp, tregs, 0); + + /* Give it the PHY address. */ + tmp = hp->paddr & 0xff; + for (i = 4; i >= 0; i--) + BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1)); + + /* Tell it what register we want to read. */ + tmp = (reg & 0xff); + for (i = 4; i >= 0; i--) + BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1)); + + /* Close down the MIF BitBang outputs. */ + hme_write32(hp, tregs + TCVR_BBOENAB, 0); + + /* Now read in the value. */ + (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); + for (i = 15; i >= 0; i--) + retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); + (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); + (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); + (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); + ASD(("value=%x\n", retval)); + return retval; +} + +static void happy_meal_bb_write(struct happy_meal *hp, + void __iomem *tregs, int reg, + unsigned short value) +{ + u32 tmp; + int i; + + ASD(("happy_meal_bb_write: reg=%d value=%x\n", reg, value)); + + /* Enable the MIF BitBang outputs. */ + hme_write32(hp, tregs + TCVR_BBOENAB, 1); + + /* Force BitBang into the idle state. */ + for (i = 0; i < 32; i++) + BB_PUT_BIT(hp, tregs, 1); + + /* Give it write sequence. */ + BB_PUT_BIT(hp, tregs, 0); + BB_PUT_BIT(hp, tregs, 1); + BB_PUT_BIT(hp, tregs, 0); + BB_PUT_BIT(hp, tregs, 1); + + /* Give it the PHY address. */ + tmp = (hp->paddr & 0xff); + for (i = 4; i >= 0; i--) + BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1)); + + /* Tell it what register we will be writing. */ + tmp = (reg & 0xff); + for (i = 4; i >= 0; i--) + BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1)); + + /* Tell it to become ready for the bits. */ + BB_PUT_BIT(hp, tregs, 1); + BB_PUT_BIT(hp, tregs, 0); + + for (i = 15; i >= 0; i--) + BB_PUT_BIT(hp, tregs, ((value >> i) & 1)); + + /* Close down the MIF BitBang outputs. */ + hme_write32(hp, tregs + TCVR_BBOENAB, 0); +} + +#define TCVR_READ_TRIES 16 + +static int happy_meal_tcvr_read(struct happy_meal *hp, + void __iomem *tregs, int reg) +{ + int tries = TCVR_READ_TRIES; + int retval; + + ASD(("happy_meal_tcvr_read: reg=0x%02x ", reg)); + if (hp->tcvr_type == none) { + ASD(("no transceiver, value=TCVR_FAILURE\n")); + return TCVR_FAILURE; + } + + if (!(hp->happy_flags & HFLAG_FENABLE)) { + ASD(("doing bit bang\n")); + return happy_meal_bb_read(hp, tregs, reg); + } + + hme_write32(hp, tregs + TCVR_FRAME, + (FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18))); + while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries) + udelay(20); + if (!tries) { + printk(KERN_ERR "happy meal: Aieee, transceiver MIF read bolixed\n"); + return TCVR_FAILURE; + } + retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff; + ASD(("value=%04x\n", retval)); + return retval; +} + +#define TCVR_WRITE_TRIES 16 + +static void happy_meal_tcvr_write(struct happy_meal *hp, + void __iomem *tregs, int reg, + unsigned short value) +{ + int tries = TCVR_WRITE_TRIES; + + ASD(("happy_meal_tcvr_write: reg=0x%02x value=%04x\n", reg, value)); + + /* Welcome to Sun Microsystems, can I take your order please? */ + if (!(hp->happy_flags & HFLAG_FENABLE)) { + happy_meal_bb_write(hp, tregs, reg, value); + return; + } + + /* Would you like fries with that? */ + hme_write32(hp, tregs + TCVR_FRAME, + (FRAME_WRITE | (hp->paddr << 23) | + ((reg & 0xff) << 18) | (value & 0xffff))); + while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries) + udelay(20); + + /* Anything else? */ + if (!tries) + printk(KERN_ERR "happy meal: Aieee, transceiver MIF write bolixed\n"); + + /* Fifty-two cents is your change, have a nice day. */ +} + +/* Auto negotiation. The scheme is very simple. We have a timer routine + * that keeps watching the auto negotiation process as it progresses. + * The DP83840 is first told to start doing it's thing, we set up the time + * and place the timer state machine in it's initial state. + * + * Here the timer peeks at the DP83840 status registers at each click to see + * if the auto negotiation has completed, we assume here that the DP83840 PHY + * will time out at some point and just tell us what (didn't) happen. For + * complete coverage we only allow so many of the ticks at this level to run, + * when this has expired we print a warning message and try another strategy. + * This "other" strategy is to force the interface into various speed/duplex + * configurations and we stop when we see a link-up condition before the + * maximum number of "peek" ticks have occurred. + * + * Once a valid link status has been detected we configure the BigMAC and + * the rest of the Happy Meal to speak the most efficient protocol we could + * get a clean link for. The priority for link configurations, highest first + * is: + * 100 Base-T Full Duplex + * 100 Base-T Half Duplex + * 10 Base-T Full Duplex + * 10 Base-T Half Duplex + * + * We start a new timer now, after a successful auto negotiation status has + * been detected. This timer just waits for the link-up bit to get set in + * the BMCR of the DP83840. When this occurs we print a kernel log message + * describing the link type in use and the fact that it is up. + * + * If a fatal error of some sort is signalled and detected in the interrupt + * service routine, and the chip is reset, or the link is ifconfig'd down + * and then back up, this entire process repeats itself all over again. + */ +static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs) +{ + hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); + + /* Downgrade from full to half duplex. Only possible + * via ethtool. + */ + if (hp->sw_bmcr & BMCR_FULLDPLX) { + hp->sw_bmcr &= ~(BMCR_FULLDPLX); + happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); + return 0; + } + + /* Downgrade from 100 to 10. */ + if (hp->sw_bmcr & BMCR_SPEED100) { + hp->sw_bmcr &= ~(BMCR_SPEED100); + happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); + return 0; + } + + /* We've tried everything. */ + return -1; +} + +static void display_link_mode(struct happy_meal *hp, void __iomem *tregs) +{ + printk(KERN_INFO "%s: Link is up using ", hp->dev->name); + if (hp->tcvr_type == external) + printk("external "); + else + printk("internal "); + printk("transceiver at "); + hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA); + if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) { + if (hp->sw_lpa & LPA_100FULL) + printk("100Mb/s, Full Duplex.\n"); + else + printk("100Mb/s, Half Duplex.\n"); + } else { + if (hp->sw_lpa & LPA_10FULL) + printk("10Mb/s, Full Duplex.\n"); + else + printk("10Mb/s, Half Duplex.\n"); + } +} + +static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs) +{ + printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name); + if (hp->tcvr_type == external) + printk("external "); + else + printk("internal "); + printk("transceiver at "); + hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); + if (hp->sw_bmcr & BMCR_SPEED100) + printk("100Mb/s, "); + else + printk("10Mb/s, "); + if (hp->sw_bmcr & BMCR_FULLDPLX) + printk("Full Duplex.\n"); + else + printk("Half Duplex.\n"); +} + +static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs) +{ + int full; + + /* All we care about is making sure the bigmac tx_cfg has a + * proper duplex setting. + */ + if (hp->timer_state == arbwait) { + hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA); + if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL))) + goto no_response; + if (hp->sw_lpa & LPA_100FULL) + full = 1; + else if (hp->sw_lpa & LPA_100HALF) + full = 0; + else if (hp->sw_lpa & LPA_10FULL) + full = 1; + else + full = 0; + } else { + /* Forcing a link mode. */ + hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); + if (hp->sw_bmcr & BMCR_FULLDPLX) + full = 1; + else + full = 0; + } + + /* Before changing other bits in the tx_cfg register, and in + * general any of other the TX config registers too, you + * must: + * 1) Clear Enable + * 2) Poll with reads until that bit reads back as zero + * 3) Make TX configuration changes + * 4) Set Enable once more + */ + hme_write32(hp, hp->bigmacregs + BMAC_TXCFG, + hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & + ~(BIGMAC_TXCFG_ENABLE)); + while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE) + barrier(); + if (full) { + hp->happy_flags |= HFLAG_FULL; + hme_write32(hp, hp->bigmacregs + BMAC_TXCFG, + hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) | + BIGMAC_TXCFG_FULLDPLX); + } else { + hp->happy_flags &= ~(HFLAG_FULL); + hme_write32(hp, hp->bigmacregs + BMAC_TXCFG, + hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & + ~(BIGMAC_TXCFG_FULLDPLX)); + } + hme_write32(hp, hp->bigmacregs + BMAC_TXCFG, + hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) | + BIGMAC_TXCFG_ENABLE); + return 0; +no_response: + return 1; +} + +static int happy_meal_init(struct happy_meal *hp); + +static int is_lucent_phy(struct happy_meal *hp) +{ + void __iomem *tregs = hp->tcvregs; + unsigned short mr2, mr3; + int ret = 0; + + mr2 = happy_meal_tcvr_read(hp, tregs, 2); + mr3 = happy_meal_tcvr_read(hp, tregs, 3); + if ((mr2 & 0xffff) == 0x0180 && + ((mr3 & 0xffff) >> 10) == 0x1d) + ret = 1; + + return ret; +} + +static void happy_meal_timer(unsigned long data) +{ + struct happy_meal *hp = (struct happy_meal *) data; + void __iomem *tregs = hp->tcvregs; + int restart_timer = 0; + + spin_lock_irq(&hp->happy_lock); + + hp->timer_ticks++; + switch(hp->timer_state) { + case arbwait: + /* Only allow for 5 ticks, thats 10 seconds and much too + * long to wait for arbitration to complete. + */ + if (hp->timer_ticks >= 10) { + /* Enter force mode. */ + do_force_mode: + hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); + printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful, trying force link mode\n", + hp->dev->name); + hp->sw_bmcr = BMCR_SPEED100; + happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); + + if (!is_lucent_phy(hp)) { + /* OK, seems we need do disable the transceiver for the first + * tick to make sure we get an accurate link state at the + * second tick. + */ + hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG); + hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB); + happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig); + } + hp->timer_state = ltrywait; + hp->timer_ticks = 0; + restart_timer = 1; + } else { + /* Anything interesting happen? */ + hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); + if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) { + int ret; + + /* Just what we've been waiting for... */ + ret = set_happy_link_modes(hp, tregs); + if (ret) { + /* Ooops, something bad happened, go to force + * mode. + * + * XXX Broken hubs which don't support 802.3u + * XXX auto-negotiation make this happen as well. + */ + goto do_force_mode; + } + + /* Success, at least so far, advance our state engine. */ + hp->timer_state = lupwait; + restart_timer = 1; + } else { + restart_timer = 1; + } + } + break; + + case lupwait: + /* Auto negotiation was successful and we are awaiting a + * link up status. I have decided to let this timer run + * forever until some sort of error is signalled, reporting + * a message to the user at 10 second intervals. + */ + hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); + if (hp->sw_bmsr & BMSR_LSTATUS) { + /* Wheee, it's up, display the link mode in use and put + * the timer to sleep. + */ + display_link_mode(hp, tregs); + hp->timer_state = asleep; + restart_timer = 0; + } else { + if (hp->timer_ticks >= 10) { + printk(KERN_NOTICE "%s: Auto negotiation successful, link still " + "not completely up.\n", hp->dev->name); + hp->timer_ticks = 0; + restart_timer = 1; + } else { + restart_timer = 1; + } + } + break; + + case ltrywait: + /* Making the timeout here too long can make it take + * annoyingly long to attempt all of the link mode + * permutations, but then again this is essentially + * error recovery code for the most part. + */ + hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); + hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG); + if (hp->timer_ticks == 1) { + if (!is_lucent_phy(hp)) { + /* Re-enable transceiver, we'll re-enable the transceiver next + * tick, then check link state on the following tick. + */ + hp->sw_csconfig |= CSCONFIG_TCVDISAB; + happy_meal_tcvr_write(hp, tregs, + DP83840_CSCONFIG, hp->sw_csconfig); + } + restart_timer = 1; + break; + } + if (hp->timer_ticks == 2) { + if (!is_lucent_phy(hp)) { + hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB); + happy_meal_tcvr_write(hp, tregs, + DP83840_CSCONFIG, hp->sw_csconfig); + } + restart_timer = 1; + break; + } + if (hp->sw_bmsr & BMSR_LSTATUS) { + /* Force mode selection success. */ + display_forced_link_mode(hp, tregs); + set_happy_link_modes(hp, tregs); /* XXX error? then what? */ + hp->timer_state = asleep; + restart_timer = 0; + } else { + if (hp->timer_ticks >= 4) { /* 6 seconds or so... */ + int ret; + + ret = try_next_permutation(hp, tregs); + if (ret == -1) { + /* Aieee, tried them all, reset the + * chip and try all over again. + */ + + /* Let the user know... */ + printk(KERN_NOTICE "%s: Link down, cable problem?\n", + hp->dev->name); + + ret = happy_meal_init(hp); + if (ret) { + /* ho hum... */ + printk(KERN_ERR "%s: Error, cannot re-init the " + "Happy Meal.\n", hp->dev->name); + } + goto out; + } + if (!is_lucent_phy(hp)) { + hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, + DP83840_CSCONFIG); + hp->sw_csconfig |= CSCONFIG_TCVDISAB; + happy_meal_tcvr_write(hp, tregs, + DP83840_CSCONFIG, hp->sw_csconfig); + } + hp->timer_ticks = 0; + restart_timer = 1; + } else { + restart_timer = 1; + } + } + break; + + case asleep: + default: + /* Can't happens.... */ + printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n", + hp->dev->name); + restart_timer = 0; + hp->timer_ticks = 0; + hp->timer_state = asleep; /* foo on you */ + break; + } + + if (restart_timer) { + hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */ + add_timer(&hp->happy_timer); + } + +out: + spin_unlock_irq(&hp->happy_lock); +} + +#define TX_RESET_TRIES 32 +#define RX_RESET_TRIES 32 + +/* hp->happy_lock must be held */ +static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs) +{ + int tries = TX_RESET_TRIES; + + HMD(("happy_meal_tx_reset: reset, ")); + + /* Would you like to try our SMCC Delux? */ + hme_write32(hp, bregs + BMAC_TXSWRESET, 0); + while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries) + udelay(20); + + /* Lettuce, tomato, buggy hardware (no extra charge)? */ + if (!tries) + printk(KERN_ERR "happy meal: Transceiver BigMac ATTACK!"); + + /* Take care. */ + HMD(("done\n")); +} + +/* hp->happy_lock must be held */ +static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs) +{ + int tries = RX_RESET_TRIES; + + HMD(("happy_meal_rx_reset: reset, ")); + + /* We have a special on GNU/Viking hardware bugs today. */ + hme_write32(hp, bregs + BMAC_RXSWRESET, 0); + while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries) + udelay(20); + + /* Will that be all? */ + if (!tries) + printk(KERN_ERR "happy meal: Receiver BigMac ATTACK!"); + + /* Don't forget your vik_1137125_wa. Have a nice day. */ + HMD(("done\n")); +} + +#define STOP_TRIES 16 + +/* hp->happy_lock must be held */ +static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs) +{ + int tries = STOP_TRIES; + + HMD(("happy_meal_stop: reset, ")); + + /* We're consolidating our STB products, it's your lucky day. */ + hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL); + while (hme_read32(hp, gregs + GREG_SWRESET) && --tries) + udelay(20); + + /* Come back next week when we are "Sun Microelectronics". */ + if (!tries) + printk(KERN_ERR "happy meal: Fry guys."); + + /* Remember: "Different name, same old buggy as shit hardware." */ + HMD(("done\n")); +} + +/* hp->happy_lock must be held */ +static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs) +{ + struct net_device_stats *stats = &hp->net_stats; + + stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR); + hme_write32(hp, bregs + BMAC_RCRCECTR, 0); + + stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR); + hme_write32(hp, bregs + BMAC_UNALECTR, 0); + + stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR); + hme_write32(hp, bregs + BMAC_GLECTR, 0); + + stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR); + + stats->collisions += + (hme_read32(hp, bregs + BMAC_EXCTR) + + hme_read32(hp, bregs + BMAC_LTCTR)); + hme_write32(hp, bregs + BMAC_EXCTR, 0); + hme_write32(hp, bregs + BMAC_LTCTR, 0); +} + +/* hp->happy_lock must be held */ +static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs) +{ + ASD(("happy_meal_poll_stop: ")); + + /* If polling disabled or not polling already, nothing to do. */ + if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) != + (HFLAG_POLLENABLE | HFLAG_POLL)) { + HMD(("not polling, return\n")); + return; + } + + /* Shut up the MIF. */ + ASD(("were polling, mif ints off, ")); + hme_write32(hp, tregs + TCVR_IMASK, 0xffff); + + /* Turn off polling. */ + ASD(("polling off, ")); + hme_write32(hp, tregs + TCVR_CFG, + hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE)); + + /* We are no longer polling. */ + hp->happy_flags &= ~(HFLAG_POLL); + + /* Let the bits set. */ + udelay(200); + ASD(("done\n")); +} + +/* Only Sun can take such nice parts and fuck up the programming interface + * like this. Good job guys... + */ +#define TCVR_RESET_TRIES 16 /* It should reset quickly */ +#define TCVR_UNISOLATE_TRIES 32 /* Dis-isolation can take longer. */ + +/* hp->happy_lock must be held */ +static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs) +{ + u32 tconfig; + int result, tries = TCVR_RESET_TRIES; + + tconfig = hme_read32(hp, tregs + TCVR_CFG); + ASD(("happy_meal_tcvr_reset: tcfg<%08lx> ", tconfig)); + if (hp->tcvr_type == external) { + ASD(("external<")); + hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT)); + hp->tcvr_type = internal; + hp->paddr = TCV_PADDR_ITX; + ASD(("ISOLATE,")); + happy_meal_tcvr_write(hp, tregs, MII_BMCR, + (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE)); + result = happy_meal_tcvr_read(hp, tregs, MII_BMCR); + if (result == TCVR_FAILURE) { + ASD(("phyread_fail>\n")); + return -1; + } + ASD(("phyread_ok,PSELECT>")); + hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT); + hp->tcvr_type = external; + hp->paddr = TCV_PADDR_ETX; + } else { + if (tconfig & TCV_CFG_MDIO1) { + ASD(("internal\n")); + return -1; + } + ASD(("phyread_ok,~PSELECT>")); + hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT))); + hp->tcvr_type = internal; + hp->paddr = TCV_PADDR_ITX; + } + } + + ASD(("BMCR_RESET ")); + happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET); + + while (--tries) { + result = happy_meal_tcvr_read(hp, tregs, MII_BMCR); + if (result == TCVR_FAILURE) + return -1; + hp->sw_bmcr = result; + if (!(result & BMCR_RESET)) + break; + udelay(20); + } + if (!tries) { + ASD(("BMCR RESET FAILED!\n")); + return -1; + } + ASD(("RESET_OK\n")); + + /* Get fresh copies of the PHY registers. */ + hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); + hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1); + hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2); + hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE); + + ASD(("UNISOLATE")); + hp->sw_bmcr &= ~(BMCR_ISOLATE); + happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); + + tries = TCVR_UNISOLATE_TRIES; + while (--tries) { + result = happy_meal_tcvr_read(hp, tregs, MII_BMCR); + if (result == TCVR_FAILURE) + return -1; + if (!(result & BMCR_ISOLATE)) + break; + udelay(20); + } + if (!tries) { + ASD((" FAILED!\n")); + return -1; + } + ASD((" SUCCESS and CSCONFIG_DFBYPASS\n")); + if (!is_lucent_phy(hp)) { + result = happy_meal_tcvr_read(hp, tregs, + DP83840_CSCONFIG); + happy_meal_tcvr_write(hp, tregs, + DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS)); + } + return 0; +} + +/* Figure out whether we have an internal or external transceiver. + * + * hp->happy_lock must be held + */ +static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs) +{ + unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG); + + ASD(("happy_meal_transceiver_check: tcfg=%08lx ", tconfig)); + if (hp->happy_flags & HFLAG_POLL) { + /* If we are polling, we must stop to get the transceiver type. */ + ASD((" ")); + if (hp->tcvr_type == internal) { + if (tconfig & TCV_CFG_MDIO1) { + ASD((" ")); + happy_meal_poll_stop(hp, tregs); + hp->paddr = TCV_PADDR_ETX; + hp->tcvr_type = external; + ASD(("\n")); + tconfig &= ~(TCV_CFG_PENABLE); + tconfig |= TCV_CFG_PSELECT; + hme_write32(hp, tregs + TCVR_CFG, tconfig); + } + } else { + if (hp->tcvr_type == external) { + ASD((" ")); + if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) { + ASD((" ")); + happy_meal_poll_stop(hp, tregs); + hp->paddr = TCV_PADDR_ITX; + hp->tcvr_type = internal; + ASD(("\n")); + hme_write32(hp, tregs + TCVR_CFG, + hme_read32(hp, tregs + TCVR_CFG) & + ~(TCV_CFG_PSELECT)); + } + ASD(("\n")); + } else { + ASD(("\n")); + } + } + } else { + u32 reread = hme_read32(hp, tregs + TCVR_CFG); + + /* Else we can just work off of the MDIO bits. */ + ASD((" ")); + if (reread & TCV_CFG_MDIO1) { + hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT); + hp->paddr = TCV_PADDR_ETX; + hp->tcvr_type = external; + ASD(("\n")); + } else { + if (reread & TCV_CFG_MDIO0) { + hme_write32(hp, tregs + TCVR_CFG, + tconfig & ~(TCV_CFG_PSELECT)); + hp->paddr = TCV_PADDR_ITX; + hp->tcvr_type = internal; + ASD(("\n")); + } else { + printk(KERN_ERR "happy meal: Transceiver and a coke please."); + hp->tcvr_type = none; /* Grrr... */ + ASD(("\n")); + } + } + } +} + +/* The receive ring buffers are a bit tricky to get right. Here goes... + * + * The buffers we dma into must be 64 byte aligned. So we use a special + * alloc_skb() routine for the happy meal to allocate 64 bytes more than + * we really need. + * + * We use skb_reserve() to align the data block we get in the skb. We + * also program the etxregs->cfg register to use an offset of 2. This + * imperical constant plus the ethernet header size will always leave + * us with a nicely aligned ip header once we pass things up to the + * protocol layers. + * + * The numbers work out to: + * + * Max ethernet frame size 1518 + * Ethernet header size 14 + * Happy Meal base offset 2 + * + * Say a skb data area is at 0xf001b010, and its size alloced is + * (ETH_FRAME_LEN + 64 + 2) = (1514 + 64 + 2) = 1580 bytes. + * + * First our alloc_skb() routine aligns the data base to a 64 byte + * boundary. We now have 0xf001b040 as our skb data address. We + * plug this into the receive descriptor address. + * + * Next, we skb_reserve() 2 bytes to account for the Happy Meal offset. + * So now the data we will end up looking at starts at 0xf001b042. When + * the packet arrives, we will check out the size received and subtract + * this from the skb->length. Then we just pass the packet up to the + * protocols as is, and allocate a new skb to replace this slot we have + * just received from. + * + * The ethernet layer will strip the ether header from the front of the + * skb we just sent to it, this leaves us with the ip header sitting + * nicely aligned at 0xf001b050. Also, for tcp and udp packets the + * Happy Meal has even checksummed the tcp/udp data for us. The 16 + * bit checksum is obtained from the low bits of the receive descriptor + * flags, thus: + * + * skb->csum = rxd->rx_flags & 0xffff; + * skb->ip_summed = CHECKSUM_COMPLETE; + * + * before sending off the skb to the protocols, and we are good as gold. + */ +static void happy_meal_clean_rings(struct happy_meal *hp) +{ + int i; + + for (i = 0; i < RX_RING_SIZE; i++) { + if (hp->rx_skbs[i] != NULL) { + struct sk_buff *skb = hp->rx_skbs[i]; + struct happy_meal_rxd *rxd; + u32 dma_addr; + + rxd = &hp->happy_block->happy_meal_rxd[i]; + dma_addr = hme_read_desc32(hp, &rxd->rx_addr); + dma_unmap_single(hp->dma_dev, dma_addr, + RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + hp->rx_skbs[i] = NULL; + } + } + + for (i = 0; i < TX_RING_SIZE; i++) { + if (hp->tx_skbs[i] != NULL) { + struct sk_buff *skb = hp->tx_skbs[i]; + struct happy_meal_txd *txd; + u32 dma_addr; + int frag; + + hp->tx_skbs[i] = NULL; + + for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { + txd = &hp->happy_block->happy_meal_txd[i]; + dma_addr = hme_read_desc32(hp, &txd->tx_addr); + if (!frag) + dma_unmap_single(hp->dma_dev, dma_addr, + (hme_read_desc32(hp, &txd->tx_flags) + & TXFLAG_SIZE), + DMA_TO_DEVICE); + else + dma_unmap_page(hp->dma_dev, dma_addr, + (hme_read_desc32(hp, &txd->tx_flags) + & TXFLAG_SIZE), + DMA_TO_DEVICE); + + if (frag != skb_shinfo(skb)->nr_frags) + i++; + } + + dev_kfree_skb_any(skb); + } + } +} + +/* hp->happy_lock must be held */ +static void happy_meal_init_rings(struct happy_meal *hp) +{ + struct hmeal_init_block *hb = hp->happy_block; + int i; + + HMD(("happy_meal_init_rings: counters to zero, ")); + hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0; + + /* Free any skippy bufs left around in the rings. */ + HMD(("clean, ")); + happy_meal_clean_rings(hp); + + /* Now get new skippy bufs for the receive ring. */ + HMD(("init rxring, ")); + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb; + u32 mapping; + + skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); + if (!skb) { + hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0); + continue; + } + hp->rx_skbs[i] = skb; + + /* Because we reserve afterwards. */ + skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); + mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(hp->dma_dev, mapping)) { + dev_kfree_skb_any(skb); + hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0); + continue; + } + hme_write_rxd(hp, &hb->happy_meal_rxd[i], + (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)), + mapping); + skb_reserve(skb, RX_OFFSET); + } + + HMD(("init txring, ")); + for (i = 0; i < TX_RING_SIZE; i++) + hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0); + + HMD(("done\n")); +} + +/* hp->happy_lock must be held */ +static void happy_meal_begin_auto_negotiation(struct happy_meal *hp, + void __iomem *tregs, + struct ethtool_cmd *ep) +{ + int timeout; + + /* Read all of the registers we are interested in now. */ + hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); + hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); + hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1); + hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2); + + /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */ + + hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE); + if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) { + /* Advertise everything we can support. */ + if (hp->sw_bmsr & BMSR_10HALF) + hp->sw_advertise |= (ADVERTISE_10HALF); + else + hp->sw_advertise &= ~(ADVERTISE_10HALF); + + if (hp->sw_bmsr & BMSR_10FULL) + hp->sw_advertise |= (ADVERTISE_10FULL); + else + hp->sw_advertise &= ~(ADVERTISE_10FULL); + if (hp->sw_bmsr & BMSR_100HALF) + hp->sw_advertise |= (ADVERTISE_100HALF); + else + hp->sw_advertise &= ~(ADVERTISE_100HALF); + if (hp->sw_bmsr & BMSR_100FULL) + hp->sw_advertise |= (ADVERTISE_100FULL); + else + hp->sw_advertise &= ~(ADVERTISE_100FULL); + happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise); + + /* XXX Currently no Happy Meal cards I know off support 100BaseT4, + * XXX and this is because the DP83840 does not support it, changes + * XXX would need to be made to the tx/rx logic in the driver as well + * XXX so I completely skip checking for it in the BMSR for now. + */ + +#ifdef AUTO_SWITCH_DEBUG + ASD(("%s: Advertising [ ", hp->dev->name)); + if (hp->sw_advertise & ADVERTISE_10HALF) + ASD(("10H ")); + if (hp->sw_advertise & ADVERTISE_10FULL) + ASD(("10F ")); + if (hp->sw_advertise & ADVERTISE_100HALF) + ASD(("100H ")); + if (hp->sw_advertise & ADVERTISE_100FULL) + ASD(("100F ")); +#endif + + /* Enable Auto-Negotiation, this is usually on already... */ + hp->sw_bmcr |= BMCR_ANENABLE; + happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); + + /* Restart it to make sure it is going. */ + hp->sw_bmcr |= BMCR_ANRESTART; + happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); + + /* BMCR_ANRESTART self clears when the process has begun. */ + + timeout = 64; /* More than enough. */ + while (--timeout) { + hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); + if (!(hp->sw_bmcr & BMCR_ANRESTART)) + break; /* got it. */ + udelay(10); + } + if (!timeout) { + printk(KERN_ERR "%s: Happy Meal would not start auto negotiation " + "BMCR=0x%04x\n", hp->dev->name, hp->sw_bmcr); + printk(KERN_NOTICE "%s: Performing force link detection.\n", + hp->dev->name); + goto force_link; + } else { + hp->timer_state = arbwait; + } + } else { +force_link: + /* Force the link up, trying first a particular mode. + * Either we are here at the request of ethtool or + * because the Happy Meal would not start to autoneg. + */ + + /* Disable auto-negotiation in BMCR, enable the duplex and + * speed setting, init the timer state machine, and fire it off. + */ + if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) { + hp->sw_bmcr = BMCR_SPEED100; + } else { + if (ethtool_cmd_speed(ep) == SPEED_100) + hp->sw_bmcr = BMCR_SPEED100; + else + hp->sw_bmcr = 0; + if (ep->duplex == DUPLEX_FULL) + hp->sw_bmcr |= BMCR_FULLDPLX; + } + happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); + + if (!is_lucent_phy(hp)) { + /* OK, seems we need do disable the transceiver for the first + * tick to make sure we get an accurate link state at the + * second tick. + */ + hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, + DP83840_CSCONFIG); + hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB); + happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, + hp->sw_csconfig); + } + hp->timer_state = ltrywait; + } + + hp->timer_ticks = 0; + hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */ + hp->happy_timer.data = (unsigned long) hp; + hp->happy_timer.function = happy_meal_timer; + add_timer(&hp->happy_timer); +} + +/* hp->happy_lock must be held */ +static int happy_meal_init(struct happy_meal *hp) +{ + void __iomem *gregs = hp->gregs; + void __iomem *etxregs = hp->etxregs; + void __iomem *erxregs = hp->erxregs; + void __iomem *bregs = hp->bigmacregs; + void __iomem *tregs = hp->tcvregs; + u32 regtmp, rxcfg; + unsigned char *e = &hp->dev->dev_addr[0]; + + /* If auto-negotiation timer is running, kill it. */ + del_timer(&hp->happy_timer); + + HMD(("happy_meal_init: happy_flags[%08x] ", + hp->happy_flags)); + if (!(hp->happy_flags & HFLAG_INIT)) { + HMD(("set HFLAG_INIT, ")); + hp->happy_flags |= HFLAG_INIT; + happy_meal_get_counters(hp, bregs); + } + + /* Stop polling. */ + HMD(("to happy_meal_poll_stop\n")); + happy_meal_poll_stop(hp, tregs); + + /* Stop transmitter and receiver. */ + HMD(("happy_meal_init: to happy_meal_stop\n")); + happy_meal_stop(hp, gregs); + + /* Alloc and reset the tx/rx descriptor chains. */ + HMD(("happy_meal_init: to happy_meal_init_rings\n")); + happy_meal_init_rings(hp); + + /* Shut up the MIF. */ + HMD(("happy_meal_init: Disable all MIF irqs (old[%08x]), ", + hme_read32(hp, tregs + TCVR_IMASK))); + hme_write32(hp, tregs + TCVR_IMASK, 0xffff); + + /* See if we can enable the MIF frame on this card to speak to the DP83840. */ + if (hp->happy_flags & HFLAG_FENABLE) { + HMD(("use frame old[%08x], ", + hme_read32(hp, tregs + TCVR_CFG))); + hme_write32(hp, tregs + TCVR_CFG, + hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE)); + } else { + HMD(("use bitbang old[%08x], ", + hme_read32(hp, tregs + TCVR_CFG))); + hme_write32(hp, tregs + TCVR_CFG, + hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE); + } + + /* Check the state of the transceiver. */ + HMD(("to happy_meal_transceiver_check\n")); + happy_meal_transceiver_check(hp, tregs); + + /* Put the Big Mac into a sane state. */ + HMD(("happy_meal_init: ")); + switch(hp->tcvr_type) { + case none: + /* Cannot operate if we don't know the transceiver type! */ + HMD(("AAIEEE no transceiver type, EAGAIN")); + return -EAGAIN; + + case internal: + /* Using the MII buffers. */ + HMD(("internal, using MII, ")); + hme_write32(hp, bregs + BMAC_XIFCFG, 0); + break; + + case external: + /* Not using the MII, disable it. */ + HMD(("external, disable MII, ")); + hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB); + break; + } + + if (happy_meal_tcvr_reset(hp, tregs)) + return -EAGAIN; + + /* Reset the Happy Meal Big Mac transceiver and the receiver. */ + HMD(("tx/rx reset, ")); + happy_meal_tx_reset(hp, bregs); + happy_meal_rx_reset(hp, bregs); + + /* Set jam size and inter-packet gaps to reasonable defaults. */ + HMD(("jsize/ipg1/ipg2, ")); + hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE); + hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1); + hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2); + + /* Load up the MAC address and random seed. */ + HMD(("rseed/macaddr, ")); + + /* The docs recommend to use the 10LSB of our MAC here. */ + hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff)); + + hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5])); + hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3])); + hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1])); + + HMD(("htable, ")); + if ((hp->dev->flags & IFF_ALLMULTI) || + (netdev_mc_count(hp->dev) > 64)) { + hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff); + hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff); + hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff); + hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff); + } else if ((hp->dev->flags & IFF_PROMISC) == 0) { + u16 hash_table[4]; + struct netdev_hw_addr *ha; + u32 crc; + + memset(hash_table, 0, sizeof(hash_table)); + netdev_for_each_mc_addr(ha, hp->dev) { + crc = ether_crc_le(6, ha->addr); + crc >>= 26; + hash_table[crc >> 4] |= 1 << (crc & 0xf); + } + hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]); + hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]); + hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]); + hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]); + } else { + hme_write32(hp, bregs + BMAC_HTABLE3, 0); + hme_write32(hp, bregs + BMAC_HTABLE2, 0); + hme_write32(hp, bregs + BMAC_HTABLE1, 0); + hme_write32(hp, bregs + BMAC_HTABLE0, 0); + } + + /* Set the RX and TX ring ptrs. */ + HMD(("ring ptrs rxr[%08x] txr[%08x]\n", + ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)), + ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)))); + hme_write32(hp, erxregs + ERX_RING, + ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))); + hme_write32(hp, etxregs + ETX_RING, + ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))); + + /* Parity issues in the ERX unit of some HME revisions can cause some + * registers to not be written unless their parity is even. Detect such + * lost writes and simply rewrite with a low bit set (which will be ignored + * since the rxring needs to be 2K aligned). + */ + if (hme_read32(hp, erxregs + ERX_RING) != + ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))) + hme_write32(hp, erxregs + ERX_RING, + ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)) + | 0x4); + + /* Set the supported burst sizes. */ + HMD(("happy_meal_init: old[%08x] bursts<", + hme_read32(hp, gregs + GREG_CFG))); + +#ifndef CONFIG_SPARC + /* It is always PCI and can handle 64byte bursts. */ + hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64); +#else + if ((hp->happy_bursts & DMA_BURST64) && + ((hp->happy_flags & HFLAG_PCI) != 0 +#ifdef CONFIG_SBUS + || sbus_can_burst64() +#endif + || 0)) { + u32 gcfg = GREG_CFG_BURST64; + + /* I have no idea if I should set the extended + * transfer mode bit for Cheerio, so for now I + * do not. -DaveM + */ +#ifdef CONFIG_SBUS + if ((hp->happy_flags & HFLAG_PCI) == 0) { + struct platform_device *op = hp->happy_dev; + if (sbus_can_dma_64bit()) { + sbus_set_sbus64(&op->dev, + hp->happy_bursts); + gcfg |= GREG_CFG_64BIT; + } + } +#endif + + HMD(("64>")); + hme_write32(hp, gregs + GREG_CFG, gcfg); + } else if (hp->happy_bursts & DMA_BURST32) { + HMD(("32>")); + hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32); + } else if (hp->happy_bursts & DMA_BURST16) { + HMD(("16>")); + hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16); + } else { + HMD(("XXX>")); + hme_write32(hp, gregs + GREG_CFG, 0); + } +#endif /* CONFIG_SPARC */ + + /* Turn off interrupts we do not want to hear. */ + HMD((", enable global interrupts, ")); + hme_write32(hp, gregs + GREG_IMASK, + (GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP | + GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR)); + + /* Set the transmit ring buffer size. */ + HMD(("tx rsize=%d oreg[%08x], ", (int)TX_RING_SIZE, + hme_read32(hp, etxregs + ETX_RSIZE))); + hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1); + + /* Enable transmitter DVMA. */ + HMD(("tx dma enable old[%08x], ", + hme_read32(hp, etxregs + ETX_CFG))); + hme_write32(hp, etxregs + ETX_CFG, + hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE); + + /* This chip really rots, for the receiver sometimes when you + * write to its control registers not all the bits get there + * properly. I cannot think of a sane way to provide complete + * coverage for this hardware bug yet. + */ + HMD(("erx regs bug old[%08x]\n", + hme_read32(hp, erxregs + ERX_CFG))); + hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET)); + regtmp = hme_read32(hp, erxregs + ERX_CFG); + hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET)); + if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) { + printk(KERN_ERR "happy meal: Eieee, rx config register gets greasy fries.\n"); + printk(KERN_ERR "happy meal: Trying to set %08x, reread gives %08x\n", + ERX_CFG_DEFAULT(RX_OFFSET), regtmp); + /* XXX Should return failure here... */ + } + + /* Enable Big Mac hash table filter. */ + HMD(("happy_meal_init: enable hash rx_cfg_old[%08x], ", + hme_read32(hp, bregs + BMAC_RXCFG))); + rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME; + if (hp->dev->flags & IFF_PROMISC) + rxcfg |= BIGMAC_RXCFG_PMISC; + hme_write32(hp, bregs + BMAC_RXCFG, rxcfg); + + /* Let the bits settle in the chip. */ + udelay(10); + + /* Ok, configure the Big Mac transmitter. */ + HMD(("BIGMAC init, ")); + regtmp = 0; + if (hp->happy_flags & HFLAG_FULL) + regtmp |= BIGMAC_TXCFG_FULLDPLX; + + /* Don't turn on the "don't give up" bit for now. It could cause hme + * to deadlock with the PHY if a Jabber occurs. + */ + hme_write32(hp, bregs + BMAC_TXCFG, regtmp /*| BIGMAC_TXCFG_DGIVEUP*/); + + /* Give up after 16 TX attempts. */ + hme_write32(hp, bregs + BMAC_ALIMIT, 16); + + /* Enable the output drivers no matter what. */ + regtmp = BIGMAC_XCFG_ODENABLE; + + /* If card can do lance mode, enable it. */ + if (hp->happy_flags & HFLAG_LANCE) + regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE; + + /* Disable the MII buffers if using external transceiver. */ + if (hp->tcvr_type == external) + regtmp |= BIGMAC_XCFG_MIIDISAB; + + HMD(("XIF config old[%08x], ", + hme_read32(hp, bregs + BMAC_XIFCFG))); + hme_write32(hp, bregs + BMAC_XIFCFG, regtmp); + + /* Start things up. */ + HMD(("tx old[%08x] and rx [%08x] ON!\n", + hme_read32(hp, bregs + BMAC_TXCFG), + hme_read32(hp, bregs + BMAC_RXCFG))); + + /* Set larger TX/RX size to allow for 802.1q */ + hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8); + hme_write32(hp, bregs + BMAC_RXMAX, ETH_FRAME_LEN + 8); + + hme_write32(hp, bregs + BMAC_TXCFG, + hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE); + hme_write32(hp, bregs + BMAC_RXCFG, + hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE); + + /* Get the autonegotiation started, and the watch timer ticking. */ + happy_meal_begin_auto_negotiation(hp, tregs, NULL); + + /* Success. */ + return 0; +} + +/* hp->happy_lock must be held */ +static void happy_meal_set_initial_advertisement(struct happy_meal *hp) +{ + void __iomem *tregs = hp->tcvregs; + void __iomem *bregs = hp->bigmacregs; + void __iomem *gregs = hp->gregs; + + happy_meal_stop(hp, gregs); + hme_write32(hp, tregs + TCVR_IMASK, 0xffff); + if (hp->happy_flags & HFLAG_FENABLE) + hme_write32(hp, tregs + TCVR_CFG, + hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE)); + else + hme_write32(hp, tregs + TCVR_CFG, + hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE); + happy_meal_transceiver_check(hp, tregs); + switch(hp->tcvr_type) { + case none: + return; + case internal: + hme_write32(hp, bregs + BMAC_XIFCFG, 0); + break; + case external: + hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB); + break; + } + if (happy_meal_tcvr_reset(hp, tregs)) + return; + + /* Latch PHY registers as of now. */ + hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); + hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE); + + /* Advertise everything we can support. */ + if (hp->sw_bmsr & BMSR_10HALF) + hp->sw_advertise |= (ADVERTISE_10HALF); + else + hp->sw_advertise &= ~(ADVERTISE_10HALF); + + if (hp->sw_bmsr & BMSR_10FULL) + hp->sw_advertise |= (ADVERTISE_10FULL); + else + hp->sw_advertise &= ~(ADVERTISE_10FULL); + if (hp->sw_bmsr & BMSR_100HALF) + hp->sw_advertise |= (ADVERTISE_100HALF); + else + hp->sw_advertise &= ~(ADVERTISE_100HALF); + if (hp->sw_bmsr & BMSR_100FULL) + hp->sw_advertise |= (ADVERTISE_100FULL); + else + hp->sw_advertise &= ~(ADVERTISE_100FULL); + + /* Update the PHY advertisement register. */ + happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise); +} + +/* Once status is latched (by happy_meal_interrupt) it is cleared by + * the hardware, so we cannot re-read it and get a correct value. + * + * hp->happy_lock must be held + */ +static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status) +{ + int reset = 0; + + /* Only print messages for non-counter related interrupts. */ + if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND | + GREG_STAT_MAXPKTERR | GREG_STAT_RXERR | + GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR | + GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR | + GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR | + GREG_STAT_SLVPERR)) + printk(KERN_ERR "%s: Error interrupt for happy meal, status = %08x\n", + hp->dev->name, status); + + if (status & GREG_STAT_RFIFOVF) { + /* Receive FIFO overflow is harmless and the hardware will take + care of it, just some packets are lost. Who cares. */ + printk(KERN_DEBUG "%s: Happy Meal receive FIFO overflow.\n", hp->dev->name); + } + + if (status & GREG_STAT_STSTERR) { + /* BigMAC SQE link test failed. */ + printk(KERN_ERR "%s: Happy Meal BigMAC SQE test failed.\n", hp->dev->name); + reset = 1; + } + + if (status & GREG_STAT_TFIFO_UND) { + /* Transmit FIFO underrun, again DMA error likely. */ + printk(KERN_ERR "%s: Happy Meal transmitter FIFO underrun, DMA error.\n", + hp->dev->name); + reset = 1; + } + + if (status & GREG_STAT_MAXPKTERR) { + /* Driver error, tried to transmit something larger + * than ethernet max mtu. + */ + printk(KERN_ERR "%s: Happy Meal MAX Packet size error.\n", hp->dev->name); + reset = 1; + } + + if (status & GREG_STAT_NORXD) { + /* This is harmless, it just means the system is + * quite loaded and the incoming packet rate was + * faster than the interrupt handler could keep up + * with. + */ + printk(KERN_INFO "%s: Happy Meal out of receive " + "descriptors, packet dropped.\n", + hp->dev->name); + } + + if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) { + /* All sorts of DMA receive errors. */ + printk(KERN_ERR "%s: Happy Meal rx DMA errors [ ", hp->dev->name); + if (status & GREG_STAT_RXERR) + printk("GenericError "); + if (status & GREG_STAT_RXPERR) + printk("ParityError "); + if (status & GREG_STAT_RXTERR) + printk("RxTagBotch "); + printk("]\n"); + reset = 1; + } + + if (status & GREG_STAT_EOPERR) { + /* Driver bug, didn't set EOP bit in tx descriptor given + * to the happy meal. + */ + printk(KERN_ERR "%s: EOP not set in happy meal transmit descriptor!\n", + hp->dev->name); + reset = 1; + } + + if (status & GREG_STAT_MIFIRQ) { + /* MIF signalled an interrupt, were we polling it? */ + printk(KERN_ERR "%s: Happy Meal MIF interrupt.\n", hp->dev->name); + } + + if (status & + (GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) { + /* All sorts of transmit DMA errors. */ + printk(KERN_ERR "%s: Happy Meal tx DMA errors [ ", hp->dev->name); + if (status & GREG_STAT_TXEACK) + printk("GenericError "); + if (status & GREG_STAT_TXLERR) + printk("LateError "); + if (status & GREG_STAT_TXPERR) + printk("ParityErro "); + if (status & GREG_STAT_TXTERR) + printk("TagBotch "); + printk("]\n"); + reset = 1; + } + + if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) { + /* Bus or parity error when cpu accessed happy meal registers + * or it's internal FIFO's. Should never see this. + */ + printk(KERN_ERR "%s: Happy Meal register access SBUS slave (%s) error.\n", + hp->dev->name, + (status & GREG_STAT_SLVPERR) ? "parity" : "generic"); + reset = 1; + } + + if (reset) { + printk(KERN_NOTICE "%s: Resetting...\n", hp->dev->name); + happy_meal_init(hp); + return 1; + } + return 0; +} + +/* hp->happy_lock must be held */ +static void happy_meal_mif_interrupt(struct happy_meal *hp) +{ + void __iomem *tregs = hp->tcvregs; + + printk(KERN_INFO "%s: Link status change.\n", hp->dev->name); + hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); + hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA); + + /* Use the fastest transmission protocol possible. */ + if (hp->sw_lpa & LPA_100FULL) { + printk(KERN_INFO "%s: Switching to 100Mbps at full duplex.", hp->dev->name); + hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100); + } else if (hp->sw_lpa & LPA_100HALF) { + printk(KERN_INFO "%s: Switching to 100MBps at half duplex.", hp->dev->name); + hp->sw_bmcr |= BMCR_SPEED100; + } else if (hp->sw_lpa & LPA_10FULL) { + printk(KERN_INFO "%s: Switching to 10MBps at full duplex.", hp->dev->name); + hp->sw_bmcr |= BMCR_FULLDPLX; + } else { + printk(KERN_INFO "%s: Using 10Mbps at half duplex.", hp->dev->name); + } + happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); + + /* Finally stop polling and shut up the MIF. */ + happy_meal_poll_stop(hp, tregs); +} + +#ifdef TXDEBUG +#define TXD(x) printk x +#else +#define TXD(x) +#endif + +/* hp->happy_lock must be held */ +static void happy_meal_tx(struct happy_meal *hp) +{ + struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0]; + struct happy_meal_txd *this; + struct net_device *dev = hp->dev; + int elem; + + elem = hp->tx_old; + TXD(("TX<")); + while (elem != hp->tx_new) { + struct sk_buff *skb; + u32 flags, dma_addr, dma_len; + int frag; + + TXD(("[%d]", elem)); + this = &txbase[elem]; + flags = hme_read_desc32(hp, &this->tx_flags); + if (flags & TXFLAG_OWN) + break; + skb = hp->tx_skbs[elem]; + if (skb_shinfo(skb)->nr_frags) { + int last; + + last = elem + skb_shinfo(skb)->nr_frags; + last &= (TX_RING_SIZE - 1); + flags = hme_read_desc32(hp, &txbase[last].tx_flags); + if (flags & TXFLAG_OWN) + break; + } + hp->tx_skbs[elem] = NULL; + hp->net_stats.tx_bytes += skb->len; + + for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { + dma_addr = hme_read_desc32(hp, &this->tx_addr); + dma_len = hme_read_desc32(hp, &this->tx_flags); + + dma_len &= TXFLAG_SIZE; + if (!frag) + dma_unmap_single(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE); + else + dma_unmap_page(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE); + + elem = NEXT_TX(elem); + this = &txbase[elem]; + } + + dev_kfree_skb_irq(skb); + hp->net_stats.tx_packets++; + } + hp->tx_old = elem; + TXD((">")); + + if (netif_queue_stopped(dev) && + TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1)) + netif_wake_queue(dev); +} + +#ifdef RXDEBUG +#define RXD(x) printk x +#else +#define RXD(x) +#endif + +/* Originally I used to handle the allocation failure by just giving back just + * that one ring buffer to the happy meal. Problem is that usually when that + * condition is triggered, the happy meal expects you to do something reasonable + * with all of the packets it has DMA'd in. So now I just drop the entire + * ring when we cannot get a new skb and give them all back to the happy meal, + * maybe things will be "happier" now. + * + * hp->happy_lock must be held + */ +static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) +{ + struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0]; + struct happy_meal_rxd *this; + int elem = hp->rx_new, drops = 0; + u32 flags; + + RXD(("RX<")); + this = &rxbase[elem]; + while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) { + struct sk_buff *skb; + int len = flags >> 16; + u16 csum = flags & RXFLAG_CSUM; + u32 dma_addr = hme_read_desc32(hp, &this->rx_addr); + + RXD(("[%d ", elem)); + + /* Check for errors. */ + if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) { + RXD(("ERR(%08x)]", flags)); + hp->net_stats.rx_errors++; + if (len < ETH_ZLEN) + hp->net_stats.rx_length_errors++; + if (len & (RXFLAG_OVERFLOW >> 16)) { + hp->net_stats.rx_over_errors++; + hp->net_stats.rx_fifo_errors++; + } + + /* Return it to the Happy meal. */ + drop_it: + hp->net_stats.rx_dropped++; + hme_write_rxd(hp, this, + (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), + dma_addr); + goto next; + } + skb = hp->rx_skbs[elem]; + if (len > RX_COPY_THRESHOLD) { + struct sk_buff *new_skb; + u32 mapping; + + /* Now refill the entry, if we can. */ + new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); + if (new_skb == NULL) { + drops++; + goto drop_it; + } + skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); + mapping = dma_map_single(hp->dma_dev, new_skb->data, + RX_BUF_ALLOC_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) { + dev_kfree_skb_any(new_skb); + drops++; + goto drop_it; + } + + dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); + hp->rx_skbs[elem] = new_skb; + hme_write_rxd(hp, this, + (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), + mapping); + skb_reserve(new_skb, RX_OFFSET); + + /* Trim the original skb for the netif. */ + skb_trim(skb, len); + } else { + struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2); + + if (copy_skb == NULL) { + drops++; + goto drop_it; + } + + skb_reserve(copy_skb, 2); + skb_put(copy_skb, len); + dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE); + skb_copy_from_linear_data(skb, copy_skb->data, len); + dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE); + /* Reuse original ring buffer. */ + hme_write_rxd(hp, this, + (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), + dma_addr); + + skb = copy_skb; + } + + /* This card is _fucking_ hot... */ + skb->csum = csum_unfold(~(__force __sum16)htons(csum)); + skb->ip_summed = CHECKSUM_COMPLETE; + + RXD(("len=%d csum=%4x]", len, csum)); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + + hp->net_stats.rx_packets++; + hp->net_stats.rx_bytes += len; + next: + elem = NEXT_RX(elem); + this = &rxbase[elem]; + } + hp->rx_new = elem; + if (drops) + printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", hp->dev->name); + RXD((">")); +} + +static irqreturn_t happy_meal_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct happy_meal *hp = netdev_priv(dev); + u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT); + + HMD(("happy_meal_interrupt: status=%08x ", happy_status)); + + spin_lock(&hp->happy_lock); + + if (happy_status & GREG_STAT_ERRORS) { + HMD(("ERRORS ")); + if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status)) + goto out; + } + + if (happy_status & GREG_STAT_MIFIRQ) { + HMD(("MIFIRQ ")); + happy_meal_mif_interrupt(hp); + } + + if (happy_status & GREG_STAT_TXALL) { + HMD(("TXALL ")); + happy_meal_tx(hp); + } + + if (happy_status & GREG_STAT_RXTOHOST) { + HMD(("RXTOHOST ")); + happy_meal_rx(hp, dev); + } + + HMD(("done\n")); +out: + spin_unlock(&hp->happy_lock); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_SBUS +static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie) +{ + struct quattro *qp = (struct quattro *) cookie; + int i; + + for (i = 0; i < 4; i++) { + struct net_device *dev = qp->happy_meals[i]; + struct happy_meal *hp = netdev_priv(dev); + u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT); + + HMD(("quattro_interrupt: status=%08x ", happy_status)); + + if (!(happy_status & (GREG_STAT_ERRORS | + GREG_STAT_MIFIRQ | + GREG_STAT_TXALL | + GREG_STAT_RXTOHOST))) + continue; + + spin_lock(&hp->happy_lock); + + if (happy_status & GREG_STAT_ERRORS) { + HMD(("ERRORS ")); + if (happy_meal_is_not_so_happy(hp, happy_status)) + goto next; + } + + if (happy_status & GREG_STAT_MIFIRQ) { + HMD(("MIFIRQ ")); + happy_meal_mif_interrupt(hp); + } + + if (happy_status & GREG_STAT_TXALL) { + HMD(("TXALL ")); + happy_meal_tx(hp); + } + + if (happy_status & GREG_STAT_RXTOHOST) { + HMD(("RXTOHOST ")); + happy_meal_rx(hp, dev); + } + + next: + spin_unlock(&hp->happy_lock); + } + HMD(("done\n")); + + return IRQ_HANDLED; +} +#endif + +static int happy_meal_open(struct net_device *dev) +{ + struct happy_meal *hp = netdev_priv(dev); + int res; + + HMD(("happy_meal_open: ")); + + /* On SBUS Quattro QFE cards, all hme interrupts are concentrated + * into a single source which we register handling at probe time. + */ + if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) { + res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED, + dev->name, dev); + if (res) { + HMD(("EAGAIN\n")); + printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n", + hp->irq); + + return -EAGAIN; + } + } + + HMD(("to happy_meal_init\n")); + + spin_lock_irq(&hp->happy_lock); + res = happy_meal_init(hp); + spin_unlock_irq(&hp->happy_lock); + + if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)) + free_irq(hp->irq, dev); + return res; +} + +static int happy_meal_close(struct net_device *dev) +{ + struct happy_meal *hp = netdev_priv(dev); + + spin_lock_irq(&hp->happy_lock); + happy_meal_stop(hp, hp->gregs); + happy_meal_clean_rings(hp); + + /* If auto-negotiation timer is running, kill it. */ + del_timer(&hp->happy_timer); + + spin_unlock_irq(&hp->happy_lock); + + /* On Quattro QFE cards, all hme interrupts are concentrated + * into a single source which we register handling at probe + * time and never unregister. + */ + if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) + free_irq(hp->irq, dev); + + return 0; +} + +#ifdef SXDEBUG +#define SXD(x) printk x +#else +#define SXD(x) +#endif + +static void happy_meal_tx_timeout(struct net_device *dev) +{ + struct happy_meal *hp = netdev_priv(dev); + + printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name); + tx_dump_log(); + printk (KERN_ERR "%s: Happy Status %08x TX[%08x:%08x]\n", dev->name, + hme_read32(hp, hp->gregs + GREG_STAT), + hme_read32(hp, hp->etxregs + ETX_CFG), + hme_read32(hp, hp->bigmacregs + BMAC_TXCFG)); + + spin_lock_irq(&hp->happy_lock); + happy_meal_init(hp); + spin_unlock_irq(&hp->happy_lock); + + netif_wake_queue(dev); +} + +static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping, + u32 first_len, u32 first_entry, u32 entry) +{ + struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0]; + + dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE); + + first_entry = NEXT_TX(first_entry); + while (first_entry != entry) { + struct happy_meal_txd *this = &txbase[first_entry]; + u32 addr, len; + + addr = hme_read_desc32(hp, &this->tx_addr); + len = hme_read_desc32(hp, &this->tx_flags); + len &= TXFLAG_SIZE; + dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE); + } +} + +static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct happy_meal *hp = netdev_priv(dev); + int entry; + u32 tx_flags; + + tx_flags = TXFLAG_OWN; + if (skb->ip_summed == CHECKSUM_PARTIAL) { + const u32 csum_start_off = skb_checksum_start_offset(skb); + const u32 csum_stuff_off = csum_start_off + skb->csum_offset; + + tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE | + ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) | + ((csum_stuff_off << 20) & TXFLAG_CSLOCATION)); + } + + spin_lock_irq(&hp->happy_lock); + + if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) { + netif_stop_queue(dev); + spin_unlock_irq(&hp->happy_lock); + printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n", + dev->name); + return NETDEV_TX_BUSY; + } + + entry = hp->tx_new; + SXD(("SX", len, entry)); + hp->tx_skbs[entry] = skb; + + if (skb_shinfo(skb)->nr_frags == 0) { + u32 mapping, len; + + len = skb->len; + mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) + goto out_dma_error; + tx_flags |= (TXFLAG_SOP | TXFLAG_EOP); + hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], + (tx_flags | (len & TXFLAG_SIZE)), + mapping); + entry = NEXT_TX(entry); + } else { + u32 first_len, first_mapping; + int frag, first_entry = entry; + + /* We must give this initial chunk to the device last. + * Otherwise we could race with the device. + */ + first_len = skb_headlen(skb); + first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping))) + goto out_dma_error; + entry = NEXT_TX(entry); + + for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { + const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; + u32 len, mapping, this_txflags; + + len = skb_frag_size(this_frag); + mapping = skb_frag_dma_map(hp->dma_dev, this_frag, + 0, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) { + unmap_partial_tx_skb(hp, first_mapping, first_len, + first_entry, entry); + goto out_dma_error; + } + this_txflags = tx_flags; + if (frag == skb_shinfo(skb)->nr_frags - 1) + this_txflags |= TXFLAG_EOP; + hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], + (this_txflags | (len & TXFLAG_SIZE)), + mapping); + entry = NEXT_TX(entry); + } + hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry], + (tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)), + first_mapping); + } + + hp->tx_new = entry; + + if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1)) + netif_stop_queue(dev); + + /* Get it going. */ + hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP); + + spin_unlock_irq(&hp->happy_lock); + + tx_add_log(hp, TXLOG_ACTION_TXMIT, 0); + return NETDEV_TX_OK; + +out_dma_error: + hp->tx_skbs[hp->tx_new] = NULL; + spin_unlock_irq(&hp->happy_lock); + + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; +} + +static struct net_device_stats *happy_meal_get_stats(struct net_device *dev) +{ + struct happy_meal *hp = netdev_priv(dev); + + spin_lock_irq(&hp->happy_lock); + happy_meal_get_counters(hp, hp->bigmacregs); + spin_unlock_irq(&hp->happy_lock); + + return &hp->net_stats; +} + +static void happy_meal_set_multicast(struct net_device *dev) +{ + struct happy_meal *hp = netdev_priv(dev); + void __iomem *bregs = hp->bigmacregs; + struct netdev_hw_addr *ha; + u32 crc; + + spin_lock_irq(&hp->happy_lock); + + if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { + hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff); + hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff); + hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff); + hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff); + } else if (dev->flags & IFF_PROMISC) { + hme_write32(hp, bregs + BMAC_RXCFG, + hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC); + } else { + u16 hash_table[4]; + + memset(hash_table, 0, sizeof(hash_table)); + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr); + crc >>= 26; + hash_table[crc >> 4] |= 1 << (crc & 0xf); + } + hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]); + hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]); + hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]); + hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]); + } + + spin_unlock_irq(&hp->happy_lock); +} + +/* Ethtool support... */ +static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct happy_meal *hp = netdev_priv(dev); + u32 speed; + + cmd->supported = + (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); + + /* XXX hardcoded stuff for now */ + cmd->port = PORT_TP; /* XXX no MII support */ + cmd->transceiver = XCVR_INTERNAL; /* XXX no external xcvr support */ + cmd->phy_address = 0; /* XXX fixed PHYAD */ + + /* Record PHY settings. */ + spin_lock_irq(&hp->happy_lock); + hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR); + hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA); + spin_unlock_irq(&hp->happy_lock); + + if (hp->sw_bmcr & BMCR_ANENABLE) { + cmd->autoneg = AUTONEG_ENABLE; + speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ? + SPEED_100 : SPEED_10); + if (speed == SPEED_100) + cmd->duplex = + (hp->sw_lpa & (LPA_100FULL)) ? + DUPLEX_FULL : DUPLEX_HALF; + else + cmd->duplex = + (hp->sw_lpa & (LPA_10FULL)) ? + DUPLEX_FULL : DUPLEX_HALF; + } else { + cmd->autoneg = AUTONEG_DISABLE; + speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; + cmd->duplex = + (hp->sw_bmcr & BMCR_FULLDPLX) ? + DUPLEX_FULL : DUPLEX_HALF; + } + ethtool_cmd_speed_set(cmd, speed); + return 0; +} + +static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct happy_meal *hp = netdev_priv(dev); + + /* Verify the settings we care about. */ + if (cmd->autoneg != AUTONEG_ENABLE && + cmd->autoneg != AUTONEG_DISABLE) + return -EINVAL; + if (cmd->autoneg == AUTONEG_DISABLE && + ((ethtool_cmd_speed(cmd) != SPEED_100 && + ethtool_cmd_speed(cmd) != SPEED_10) || + (cmd->duplex != DUPLEX_HALF && + cmd->duplex != DUPLEX_FULL))) + return -EINVAL; + + /* Ok, do it to it. */ + spin_lock_irq(&hp->happy_lock); + del_timer(&hp->happy_timer); + happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd); + spin_unlock_irq(&hp->happy_lock); + + return 0; +} + +static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct happy_meal *hp = netdev_priv(dev); + + strlcpy(info->driver, "sunhme", sizeof(info->driver)); + strlcpy(info->version, "2.02", sizeof(info->version)); + if (hp->happy_flags & HFLAG_PCI) { + struct pci_dev *pdev = hp->happy_dev; + strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info)); + } +#ifdef CONFIG_SBUS + else { + const struct linux_prom_registers *regs; + struct platform_device *op = hp->happy_dev; + regs = of_get_property(op->dev.of_node, "regs", NULL); + if (regs) + snprintf(info->bus_info, sizeof(info->bus_info), + "SBUS:%d", + regs->which_io); + } +#endif +} + +static u32 hme_get_link(struct net_device *dev) +{ + struct happy_meal *hp = netdev_priv(dev); + + spin_lock_irq(&hp->happy_lock); + hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR); + spin_unlock_irq(&hp->happy_lock); + + return hp->sw_bmsr & BMSR_LSTATUS; +} + +static const struct ethtool_ops hme_ethtool_ops = { + .get_settings = hme_get_settings, + .set_settings = hme_set_settings, + .get_drvinfo = hme_get_drvinfo, + .get_link = hme_get_link, +}; + +static int hme_version_printed; + +#ifdef CONFIG_SBUS +/* Given a happy meal sbus device, find it's quattro parent. + * If none exist, allocate and return a new one. + * + * Return NULL on failure. + */ +static struct quattro *quattro_sbus_find(struct platform_device *child) +{ + struct device *parent = child->dev.parent; + struct platform_device *op; + struct quattro *qp; + + op = to_platform_device(parent); + qp = platform_get_drvdata(op); + if (qp) + return qp; + + qp = kmalloc(sizeof(struct quattro), GFP_KERNEL); + if (qp != NULL) { + int i; + + for (i = 0; i < 4; i++) + qp->happy_meals[i] = NULL; + + qp->quattro_dev = child; + qp->next = qfe_sbus_list; + qfe_sbus_list = qp; + + platform_set_drvdata(op, qp); + } + return qp; +} + +/* After all quattro cards have been probed, we call these functions + * to register the IRQ handlers for the cards that have been + * successfully probed and skip the cards that failed to initialize + */ +static int __init quattro_sbus_register_irqs(void) +{ + struct quattro *qp; + + for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) { + struct platform_device *op = qp->quattro_dev; + int err, qfe_slot, skip = 0; + + for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) { + if (!qp->happy_meals[qfe_slot]) + skip = 1; + } + if (skip) + continue; + + err = request_irq(op->archdata.irqs[0], + quattro_sbus_interrupt, + IRQF_SHARED, "Quattro", + qp); + if (err != 0) { + printk(KERN_ERR "Quattro HME: IRQ registration " + "error %d.\n", err); + return err; + } + } + + return 0; +} + +static void quattro_sbus_free_irqs(void) +{ + struct quattro *qp; + + for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) { + struct platform_device *op = qp->quattro_dev; + int qfe_slot, skip = 0; + + for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) { + if (!qp->happy_meals[qfe_slot]) + skip = 1; + } + if (skip) + continue; + + free_irq(op->archdata.irqs[0], qp); + } +} +#endif /* CONFIG_SBUS */ + +#ifdef CONFIG_PCI +static struct quattro *quattro_pci_find(struct pci_dev *pdev) +{ + struct pci_dev *bdev = pdev->bus->self; + struct quattro *qp; + + if (!bdev) return NULL; + for (qp = qfe_pci_list; qp != NULL; qp = qp->next) { + struct pci_dev *qpdev = qp->quattro_dev; + + if (qpdev == bdev) + return qp; + } + qp = kmalloc(sizeof(struct quattro), GFP_KERNEL); + if (qp != NULL) { + int i; + + for (i = 0; i < 4; i++) + qp->happy_meals[i] = NULL; + + qp->quattro_dev = bdev; + qp->next = qfe_pci_list; + qfe_pci_list = qp; + + /* No range tricks necessary on PCI. */ + qp->nranges = 0; + } + return qp; +} +#endif /* CONFIG_PCI */ + +static const struct net_device_ops hme_netdev_ops = { + .ndo_open = happy_meal_open, + .ndo_stop = happy_meal_close, + .ndo_start_xmit = happy_meal_start_xmit, + .ndo_tx_timeout = happy_meal_tx_timeout, + .ndo_get_stats = happy_meal_get_stats, + .ndo_set_rx_mode = happy_meal_set_multicast, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +#ifdef CONFIG_SBUS +static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe) +{ + struct device_node *dp = op->dev.of_node, *sbus_dp; + struct quattro *qp = NULL; + struct happy_meal *hp; + struct net_device *dev; + int i, qfe_slot = -1; + int err = -ENODEV; + + sbus_dp = op->dev.parent->of_node; + + /* We can match PCI devices too, do not accept those here. */ + if (strcmp(sbus_dp->name, "sbus") && strcmp(sbus_dp->name, "sbi")) + return err; + + if (is_qfe) { + qp = quattro_sbus_find(op); + if (qp == NULL) + goto err_out; + for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) + if (qp->happy_meals[qfe_slot] == NULL) + break; + if (qfe_slot == 4) + goto err_out; + } + + err = -ENOMEM; + dev = alloc_etherdev(sizeof(struct happy_meal)); + if (!dev) + goto err_out; + SET_NETDEV_DEV(dev, &op->dev); + + if (hme_version_printed++ == 0) + printk(KERN_INFO "%s", version); + + /* If user did not specify a MAC address specifically, use + * the Quattro local-mac-address property... + */ + for (i = 0; i < 6; i++) { + if (macaddr[i] != 0) + break; + } + if (i < 6) { /* a mac address was given */ + for (i = 0; i < 6; i++) + dev->dev_addr[i] = macaddr[i]; + macaddr[5]++; + } else { + const unsigned char *addr; + int len; + + addr = of_get_property(dp, "local-mac-address", &len); + + if (qfe_slot != -1 && addr && len == ETH_ALEN) + memcpy(dev->dev_addr, addr, ETH_ALEN); + else + memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN); + } + + hp = netdev_priv(dev); + + hp->happy_dev = op; + hp->dma_dev = &op->dev; + + spin_lock_init(&hp->happy_lock); + + err = -ENODEV; + if (qp != NULL) { + hp->qfe_parent = qp; + hp->qfe_ent = qfe_slot; + qp->happy_meals[qfe_slot] = dev; + } + + hp->gregs = of_ioremap(&op->resource[0], 0, + GREG_REG_SIZE, "HME Global Regs"); + if (!hp->gregs) { + printk(KERN_ERR "happymeal: Cannot map global registers.\n"); + goto err_out_free_netdev; + } + + hp->etxregs = of_ioremap(&op->resource[1], 0, + ETX_REG_SIZE, "HME TX Regs"); + if (!hp->etxregs) { + printk(KERN_ERR "happymeal: Cannot map MAC TX registers.\n"); + goto err_out_iounmap; + } + + hp->erxregs = of_ioremap(&op->resource[2], 0, + ERX_REG_SIZE, "HME RX Regs"); + if (!hp->erxregs) { + printk(KERN_ERR "happymeal: Cannot map MAC RX registers.\n"); + goto err_out_iounmap; + } + + hp->bigmacregs = of_ioremap(&op->resource[3], 0, + BMAC_REG_SIZE, "HME BIGMAC Regs"); + if (!hp->bigmacregs) { + printk(KERN_ERR "happymeal: Cannot map BIGMAC registers.\n"); + goto err_out_iounmap; + } + + hp->tcvregs = of_ioremap(&op->resource[4], 0, + TCVR_REG_SIZE, "HME Tranceiver Regs"); + if (!hp->tcvregs) { + printk(KERN_ERR "happymeal: Cannot map TCVR registers.\n"); + goto err_out_iounmap; + } + + hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff); + if (hp->hm_revision == 0xff) + hp->hm_revision = 0xa0; + + /* Now enable the feature flags we can. */ + if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21) + hp->happy_flags = HFLAG_20_21; + else if (hp->hm_revision != 0xa0) + hp->happy_flags = HFLAG_NOT_A0; + + if (qp != NULL) + hp->happy_flags |= HFLAG_QUATTRO; + + /* Get the supported DVMA burst sizes from our Happy SBUS. */ + hp->happy_bursts = of_getintprop_default(sbus_dp, + "burst-sizes", 0x00); + + hp->happy_block = dma_alloc_coherent(hp->dma_dev, + PAGE_SIZE, + &hp->hblock_dvma, + GFP_ATOMIC); + err = -ENOMEM; + if (!hp->happy_block) + goto err_out_iounmap; + + /* Force check of the link first time we are brought up. */ + hp->linkcheck = 0; + + /* Force timer state to 'asleep' with count of zero. */ + hp->timer_state = asleep; + hp->timer_ticks = 0; + + init_timer(&hp->happy_timer); + + hp->dev = dev; + dev->netdev_ops = &hme_netdev_ops; + dev->watchdog_timeo = 5*HZ; + dev->ethtool_ops = &hme_ethtool_ops; + + /* Happy Meal can do it all... */ + dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; + dev->features |= dev->hw_features | NETIF_F_RXCSUM; + + hp->irq = op->archdata.irqs[0]; + +#if defined(CONFIG_SBUS) && defined(CONFIG_PCI) + /* Hook up SBUS register/descriptor accessors. */ + hp->read_desc32 = sbus_hme_read_desc32; + hp->write_txd = sbus_hme_write_txd; + hp->write_rxd = sbus_hme_write_rxd; + hp->read32 = sbus_hme_read32; + hp->write32 = sbus_hme_write32; +#endif + + /* Grrr, Happy Meal comes up by default not advertising + * full duplex 100baseT capabilities, fix this. + */ + spin_lock_irq(&hp->happy_lock); + happy_meal_set_initial_advertisement(hp); + spin_unlock_irq(&hp->happy_lock); + + err = register_netdev(hp->dev); + if (err) { + printk(KERN_ERR "happymeal: Cannot register net device, " + "aborting.\n"); + goto err_out_free_coherent; + } + + platform_set_drvdata(op, hp); + + if (qfe_slot != -1) + printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ", + dev->name, qfe_slot); + else + printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ", + dev->name); + + printk("%pM\n", dev->dev_addr); + + return 0; + +err_out_free_coherent: + dma_free_coherent(hp->dma_dev, + PAGE_SIZE, + hp->happy_block, + hp->hblock_dvma); + +err_out_iounmap: + if (hp->gregs) + of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE); + if (hp->etxregs) + of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE); + if (hp->erxregs) + of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE); + if (hp->bigmacregs) + of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE); + if (hp->tcvregs) + of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE); + + if (qp) + qp->happy_meals[qfe_slot] = NULL; + +err_out_free_netdev: + free_netdev(dev); + +err_out: + return err; +} +#endif + +#ifdef CONFIG_PCI +#ifndef CONFIG_SPARC +static int is_quattro_p(struct pci_dev *pdev) +{ + struct pci_dev *busdev = pdev->bus->self; + struct pci_dev *this_pdev; + int n_hmes; + + if (busdev == NULL || + busdev->vendor != PCI_VENDOR_ID_DEC || + busdev->device != PCI_DEVICE_ID_DEC_21153) + return 0; + + n_hmes = 0; + list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) { + if (this_pdev->vendor == PCI_VENDOR_ID_SUN && + this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL) + n_hmes++; + } + + if (n_hmes != 4) + return 0; + + return 1; +} + +/* Fetch MAC address from vital product data of PCI ROM. */ +static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr) +{ + int this_offset; + + for (this_offset = 0x20; this_offset < len; this_offset++) { + void __iomem *p = rom_base + this_offset; + + if (readb(p + 0) != 0x90 || + readb(p + 1) != 0x00 || + readb(p + 2) != 0x09 || + readb(p + 3) != 0x4e || + readb(p + 4) != 0x41 || + readb(p + 5) != 0x06) + continue; + + this_offset += 6; + p += 6; + + if (index == 0) { + int i; + + for (i = 0; i < 6; i++) + dev_addr[i] = readb(p + i); + return 1; + } + index--; + } + return 0; +} + +static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr) +{ + size_t size; + void __iomem *p = pci_map_rom(pdev, &size); + + if (p) { + int index = 0; + int found; + + if (is_quattro_p(pdev)) + index = PCI_SLOT(pdev->devfn); + + found = readb(p) == 0x55 && + readb(p + 1) == 0xaa && + find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr); + pci_unmap_rom(pdev, p); + if (found) + return; + } + + /* Sun MAC prefix then 3 random bytes. */ + dev_addr[0] = 0x08; + dev_addr[1] = 0x00; + dev_addr[2] = 0x20; + get_random_bytes(&dev_addr[3], 3); +} +#endif /* !(CONFIG_SPARC) */ + +static int happy_meal_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct quattro *qp = NULL; +#ifdef CONFIG_SPARC + struct device_node *dp; +#endif + struct happy_meal *hp; + struct net_device *dev; + void __iomem *hpreg_base; + unsigned long hpreg_res; + int i, qfe_slot = -1; + char prom_name[64]; + int err; + + /* Now make sure pci_dev cookie is there. */ +#ifdef CONFIG_SPARC + dp = pci_device_to_OF_node(pdev); + strcpy(prom_name, dp->name); +#else + if (is_quattro_p(pdev)) + strcpy(prom_name, "SUNW,qfe"); + else + strcpy(prom_name, "SUNW,hme"); +#endif + + err = -ENODEV; + + if (pci_enable_device(pdev)) + goto err_out; + pci_set_master(pdev); + + if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) { + qp = quattro_pci_find(pdev); + if (qp == NULL) + goto err_out; + for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) + if (qp->happy_meals[qfe_slot] == NULL) + break; + if (qfe_slot == 4) + goto err_out; + } + + dev = alloc_etherdev(sizeof(struct happy_meal)); + err = -ENOMEM; + if (!dev) + goto err_out; + SET_NETDEV_DEV(dev, &pdev->dev); + + if (hme_version_printed++ == 0) + printk(KERN_INFO "%s", version); + + hp = netdev_priv(dev); + + hp->happy_dev = pdev; + hp->dma_dev = &pdev->dev; + + spin_lock_init(&hp->happy_lock); + + if (qp != NULL) { + hp->qfe_parent = qp; + hp->qfe_ent = qfe_slot; + qp->happy_meals[qfe_slot] = dev; + } + + hpreg_res = pci_resource_start(pdev, 0); + err = -ENODEV; + if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { + printk(KERN_ERR "happymeal(PCI): Cannot find proper PCI device base address.\n"); + goto err_out_clear_quattro; + } + if (pci_request_regions(pdev, DRV_NAME)) { + printk(KERN_ERR "happymeal(PCI): Cannot obtain PCI resources, " + "aborting.\n"); + goto err_out_clear_quattro; + } + + if ((hpreg_base = ioremap(hpreg_res, 0x8000)) == NULL) { + printk(KERN_ERR "happymeal(PCI): Unable to remap card memory.\n"); + goto err_out_free_res; + } + + for (i = 0; i < 6; i++) { + if (macaddr[i] != 0) + break; + } + if (i < 6) { /* a mac address was given */ + for (i = 0; i < 6; i++) + dev->dev_addr[i] = macaddr[i]; + macaddr[5]++; + } else { +#ifdef CONFIG_SPARC + const unsigned char *addr; + int len; + + if (qfe_slot != -1 && + (addr = of_get_property(dp, "local-mac-address", &len)) + != NULL && + len == 6) { + memcpy(dev->dev_addr, addr, ETH_ALEN); + } else { + memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN); + } +#else + get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]); +#endif + } + + /* Layout registers. */ + hp->gregs = (hpreg_base + 0x0000UL); + hp->etxregs = (hpreg_base + 0x2000UL); + hp->erxregs = (hpreg_base + 0x4000UL); + hp->bigmacregs = (hpreg_base + 0x6000UL); + hp->tcvregs = (hpreg_base + 0x7000UL); + +#ifdef CONFIG_SPARC + hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff); + if (hp->hm_revision == 0xff) + hp->hm_revision = 0xc0 | (pdev->revision & 0x0f); +#else + /* works with this on non-sparc hosts */ + hp->hm_revision = 0x20; +#endif + + /* Now enable the feature flags we can. */ + if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21) + hp->happy_flags = HFLAG_20_21; + else if (hp->hm_revision != 0xa0 && hp->hm_revision != 0xc0) + hp->happy_flags = HFLAG_NOT_A0; + + if (qp != NULL) + hp->happy_flags |= HFLAG_QUATTRO; + + /* And of course, indicate this is PCI. */ + hp->happy_flags |= HFLAG_PCI; + +#ifdef CONFIG_SPARC + /* Assume PCI happy meals can handle all burst sizes. */ + hp->happy_bursts = DMA_BURSTBITS; +#endif + + hp->happy_block = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, + &hp->hblock_dvma, GFP_KERNEL); + err = -ENODEV; + if (!hp->happy_block) + goto err_out_iounmap; + + hp->linkcheck = 0; + hp->timer_state = asleep; + hp->timer_ticks = 0; + + init_timer(&hp->happy_timer); + + hp->irq = pdev->irq; + hp->dev = dev; + dev->netdev_ops = &hme_netdev_ops; + dev->watchdog_timeo = 5*HZ; + dev->ethtool_ops = &hme_ethtool_ops; + + /* Happy Meal can do it all... */ + dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; + dev->features |= dev->hw_features | NETIF_F_RXCSUM; + +#if defined(CONFIG_SBUS) && defined(CONFIG_PCI) + /* Hook up PCI register/descriptor accessors. */ + hp->read_desc32 = pci_hme_read_desc32; + hp->write_txd = pci_hme_write_txd; + hp->write_rxd = pci_hme_write_rxd; + hp->read32 = pci_hme_read32; + hp->write32 = pci_hme_write32; +#endif + + /* Grrr, Happy Meal comes up by default not advertising + * full duplex 100baseT capabilities, fix this. + */ + spin_lock_irq(&hp->happy_lock); + happy_meal_set_initial_advertisement(hp); + spin_unlock_irq(&hp->happy_lock); + + err = register_netdev(hp->dev); + if (err) { + printk(KERN_ERR "happymeal(PCI): Cannot register net device, " + "aborting.\n"); + goto err_out_iounmap; + } + + pci_set_drvdata(pdev, hp); + + if (!qfe_slot) { + struct pci_dev *qpdev = qp->quattro_dev; + + prom_name[0] = 0; + if (!strncmp(dev->name, "eth", 3)) { + int i = simple_strtoul(dev->name + 3, NULL, 10); + sprintf(prom_name, "-%d", i + 3); + } + printk(KERN_INFO "%s%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet ", dev->name, prom_name); + if (qpdev->vendor == PCI_VENDOR_ID_DEC && + qpdev->device == PCI_DEVICE_ID_DEC_21153) + printk("DEC 21153 PCI Bridge\n"); + else + printk("unknown bridge %04x.%04x\n", + qpdev->vendor, qpdev->device); + } + + if (qfe_slot != -1) + printk(KERN_INFO "%s: Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet ", + dev->name, qfe_slot); + else + printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ", + dev->name); + + printk("%pM\n", dev->dev_addr); + + return 0; + +err_out_iounmap: + iounmap(hp->gregs); + +err_out_free_res: + pci_release_regions(pdev); + +err_out_clear_quattro: + if (qp != NULL) + qp->happy_meals[qfe_slot] = NULL; + + free_netdev(dev); + +err_out: + return err; +} + +static void happy_meal_pci_remove(struct pci_dev *pdev) +{ + struct happy_meal *hp = pci_get_drvdata(pdev); + struct net_device *net_dev = hp->dev; + + unregister_netdev(net_dev); + + dma_free_coherent(hp->dma_dev, PAGE_SIZE, + hp->happy_block, hp->hblock_dvma); + iounmap(hp->gregs); + pci_release_regions(hp->happy_dev); + + free_netdev(net_dev); +} + +static const struct pci_device_id happymeal_pci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) }, + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(pci, happymeal_pci_ids); + +static struct pci_driver hme_pci_driver = { + .name = "hme", + .id_table = happymeal_pci_ids, + .probe = happy_meal_pci_probe, + .remove = happy_meal_pci_remove, +}; + +static int __init happy_meal_pci_init(void) +{ + return pci_register_driver(&hme_pci_driver); +} + +static void happy_meal_pci_exit(void) +{ + pci_unregister_driver(&hme_pci_driver); + + while (qfe_pci_list) { + struct quattro *qfe = qfe_pci_list; + struct quattro *next = qfe->next; + + kfree(qfe); + + qfe_pci_list = next; + } +} + +#endif + +#ifdef CONFIG_SBUS +static const struct of_device_id hme_sbus_match[]; +static int hme_sbus_probe(struct platform_device *op) +{ + const struct of_device_id *match; + struct device_node *dp = op->dev.of_node; + const char *model = of_get_property(dp, "model", NULL); + int is_qfe; + + match = of_match_device(hme_sbus_match, &op->dev); + if (!match) + return -EINVAL; + is_qfe = (match->data != NULL); + + if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) + is_qfe = 1; + + return happy_meal_sbus_probe_one(op, is_qfe); +} + +static int hme_sbus_remove(struct platform_device *op) +{ + struct happy_meal *hp = platform_get_drvdata(op); + struct net_device *net_dev = hp->dev; + + unregister_netdev(net_dev); + + /* XXX qfe parent interrupt... */ + + of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE); + of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE); + of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE); + of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE); + of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE); + dma_free_coherent(hp->dma_dev, + PAGE_SIZE, + hp->happy_block, + hp->hblock_dvma); + + free_netdev(net_dev); + + return 0; +} + +static const struct of_device_id hme_sbus_match[] = { + { + .name = "SUNW,hme", + }, + { + .name = "SUNW,qfe", + .data = (void *) 1, + }, + { + .name = "qfe", + .data = (void *) 1, + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, hme_sbus_match); + +static struct platform_driver hme_sbus_driver = { + .driver = { + .name = "hme", + .of_match_table = hme_sbus_match, + }, + .probe = hme_sbus_probe, + .remove = hme_sbus_remove, +}; + +static int __init happy_meal_sbus_init(void) +{ + int err; + + err = platform_driver_register(&hme_sbus_driver); + if (!err) + err = quattro_sbus_register_irqs(); + + return err; +} + +static void happy_meal_sbus_exit(void) +{ + platform_driver_unregister(&hme_sbus_driver); + quattro_sbus_free_irqs(); + + while (qfe_sbus_list) { + struct quattro *qfe = qfe_sbus_list; + struct quattro *next = qfe->next; + + kfree(qfe); + + qfe_sbus_list = next; + } +} +#endif + +static int __init happy_meal_probe(void) +{ + int err = 0; + +#ifdef CONFIG_SBUS + err = happy_meal_sbus_init(); +#endif +#ifdef CONFIG_PCI + if (!err) { + err = happy_meal_pci_init(); +#ifdef CONFIG_SBUS + if (err) + happy_meal_sbus_exit(); +#endif + } +#endif + + return err; +} + + +static void __exit happy_meal_exit(void) +{ +#ifdef CONFIG_SBUS + happy_meal_sbus_exit(); +#endif +#ifdef CONFIG_PCI + happy_meal_pci_exit(); +#endif +} + +module_init(happy_meal_probe); +module_exit(happy_meal_exit); diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h new file mode 100644 index 000000000..f4307654e --- /dev/null +++ b/drivers/net/ethernet/sun/sunhme.h @@ -0,0 +1,513 @@ +/* $Id: sunhme.h,v 1.33 2001/08/03 06:23:04 davem Exp $ + * sunhme.h: Definitions for Sparc HME/BigMac 10/100baseT ethernet driver. + * Also known as the "Happy Meal". + * + * Copyright (C) 1996, 1999 David S. Miller (davem@redhat.com) + */ + +#ifndef _SUNHME_H +#define _SUNHME_H + +#include + +/* Happy Meal global registers. */ +#define GREG_SWRESET 0x000UL /* Software Reset */ +#define GREG_CFG 0x004UL /* Config Register */ +#define GREG_STAT 0x108UL /* Status */ +#define GREG_IMASK 0x10cUL /* Interrupt Mask */ +#define GREG_REG_SIZE 0x110UL + +/* Global reset register. */ +#define GREG_RESET_ETX 0x01 +#define GREG_RESET_ERX 0x02 +#define GREG_RESET_ALL 0x03 + +/* Global config register. */ +#define GREG_CFG_BURSTMSK 0x03 +#define GREG_CFG_BURST16 0x00 +#define GREG_CFG_BURST32 0x01 +#define GREG_CFG_BURST64 0x02 +#define GREG_CFG_64BIT 0x04 +#define GREG_CFG_PARITY 0x08 +#define GREG_CFG_RESV 0x10 + +/* Global status register. */ +#define GREG_STAT_GOTFRAME 0x00000001 /* Received a frame */ +#define GREG_STAT_RCNTEXP 0x00000002 /* Receive frame counter expired */ +#define GREG_STAT_ACNTEXP 0x00000004 /* Align-error counter expired */ +#define GREG_STAT_CCNTEXP 0x00000008 /* CRC-error counter expired */ +#define GREG_STAT_LCNTEXP 0x00000010 /* Length-error counter expired */ +#define GREG_STAT_RFIFOVF 0x00000020 /* Receive FIFO overflow */ +#define GREG_STAT_CVCNTEXP 0x00000040 /* Code-violation counter expired */ +#define GREG_STAT_STSTERR 0x00000080 /* Test error in XIF for SQE */ +#define GREG_STAT_SENTFRAME 0x00000100 /* Transmitted a frame */ +#define GREG_STAT_TFIFO_UND 0x00000200 /* Transmit FIFO underrun */ +#define GREG_STAT_MAXPKTERR 0x00000400 /* Max-packet size error */ +#define GREG_STAT_NCNTEXP 0x00000800 /* Normal-collision counter expired */ +#define GREG_STAT_ECNTEXP 0x00001000 /* Excess-collision counter expired */ +#define GREG_STAT_LCCNTEXP 0x00002000 /* Late-collision counter expired */ +#define GREG_STAT_FCNTEXP 0x00004000 /* First-collision counter expired */ +#define GREG_STAT_DTIMEXP 0x00008000 /* Defer-timer expired */ +#define GREG_STAT_RXTOHOST 0x00010000 /* Moved from receive-FIFO to host memory */ +#define GREG_STAT_NORXD 0x00020000 /* No more receive descriptors */ +#define GREG_STAT_RXERR 0x00040000 /* Error during receive dma */ +#define GREG_STAT_RXLATERR 0x00080000 /* Late error during receive dma */ +#define GREG_STAT_RXPERR 0x00100000 /* Parity error during receive dma */ +#define GREG_STAT_RXTERR 0x00200000 /* Tag error during receive dma */ +#define GREG_STAT_EOPERR 0x00400000 /* Transmit descriptor did not have EOP set */ +#define GREG_STAT_MIFIRQ 0x00800000 /* MIF is signaling an interrupt condition */ +#define GREG_STAT_HOSTTOTX 0x01000000 /* Moved from host memory to transmit-FIFO */ +#define GREG_STAT_TXALL 0x02000000 /* Transmitted all packets in the tx-fifo */ +#define GREG_STAT_TXEACK 0x04000000 /* Error during transmit dma */ +#define GREG_STAT_TXLERR 0x08000000 /* Late error during transmit dma */ +#define GREG_STAT_TXPERR 0x10000000 /* Parity error during transmit dma */ +#define GREG_STAT_TXTERR 0x20000000 /* Tag error during transmit dma */ +#define GREG_STAT_SLVERR 0x40000000 /* PIO access got an error */ +#define GREG_STAT_SLVPERR 0x80000000 /* PIO access got a parity error */ + +/* All interesting error conditions. */ +#define GREG_STAT_ERRORS 0xfc7efefc + +/* Global interrupt mask register. */ +#define GREG_IMASK_GOTFRAME 0x00000001 /* Received a frame */ +#define GREG_IMASK_RCNTEXP 0x00000002 /* Receive frame counter expired */ +#define GREG_IMASK_ACNTEXP 0x00000004 /* Align-error counter expired */ +#define GREG_IMASK_CCNTEXP 0x00000008 /* CRC-error counter expired */ +#define GREG_IMASK_LCNTEXP 0x00000010 /* Length-error counter expired */ +#define GREG_IMASK_RFIFOVF 0x00000020 /* Receive FIFO overflow */ +#define GREG_IMASK_CVCNTEXP 0x00000040 /* Code-violation counter expired */ +#define GREG_IMASK_STSTERR 0x00000080 /* Test error in XIF for SQE */ +#define GREG_IMASK_SENTFRAME 0x00000100 /* Transmitted a frame */ +#define GREG_IMASK_TFIFO_UND 0x00000200 /* Transmit FIFO underrun */ +#define GREG_IMASK_MAXPKTERR 0x00000400 /* Max-packet size error */ +#define GREG_IMASK_NCNTEXP 0x00000800 /* Normal-collision counter expired */ +#define GREG_IMASK_ECNTEXP 0x00001000 /* Excess-collision counter expired */ +#define GREG_IMASK_LCCNTEXP 0x00002000 /* Late-collision counter expired */ +#define GREG_IMASK_FCNTEXP 0x00004000 /* First-collision counter expired */ +#define GREG_IMASK_DTIMEXP 0x00008000 /* Defer-timer expired */ +#define GREG_IMASK_RXTOHOST 0x00010000 /* Moved from receive-FIFO to host memory */ +#define GREG_IMASK_NORXD 0x00020000 /* No more receive descriptors */ +#define GREG_IMASK_RXERR 0x00040000 /* Error during receive dma */ +#define GREG_IMASK_RXLATERR 0x00080000 /* Late error during receive dma */ +#define GREG_IMASK_RXPERR 0x00100000 /* Parity error during receive dma */ +#define GREG_IMASK_RXTERR 0x00200000 /* Tag error during receive dma */ +#define GREG_IMASK_EOPERR 0x00400000 /* Transmit descriptor did not have EOP set */ +#define GREG_IMASK_MIFIRQ 0x00800000 /* MIF is signaling an interrupt condition */ +#define GREG_IMASK_HOSTTOTX 0x01000000 /* Moved from host memory to transmit-FIFO */ +#define GREG_IMASK_TXALL 0x02000000 /* Transmitted all packets in the tx-fifo */ +#define GREG_IMASK_TXEACK 0x04000000 /* Error during transmit dma */ +#define GREG_IMASK_TXLERR 0x08000000 /* Late error during transmit dma */ +#define GREG_IMASK_TXPERR 0x10000000 /* Parity error during transmit dma */ +#define GREG_IMASK_TXTERR 0x20000000 /* Tag error during transmit dma */ +#define GREG_IMASK_SLVERR 0x40000000 /* PIO access got an error */ +#define GREG_IMASK_SLVPERR 0x80000000 /* PIO access got a parity error */ + +/* Happy Meal external transmitter registers. */ +#define ETX_PENDING 0x00UL /* Transmit pending/wakeup register */ +#define ETX_CFG 0x04UL /* Transmit config register */ +#define ETX_RING 0x08UL /* Transmit ring pointer */ +#define ETX_BBASE 0x0cUL /* Transmit buffer base */ +#define ETX_BDISP 0x10UL /* Transmit buffer displacement */ +#define ETX_FIFOWPTR 0x14UL /* FIFO write ptr */ +#define ETX_FIFOSWPTR 0x18UL /* FIFO write ptr (shadow register) */ +#define ETX_FIFORPTR 0x1cUL /* FIFO read ptr */ +#define ETX_FIFOSRPTR 0x20UL /* FIFO read ptr (shadow register) */ +#define ETX_FIFOPCNT 0x24UL /* FIFO packet counter */ +#define ETX_SMACHINE 0x28UL /* Transmitter state machine */ +#define ETX_RSIZE 0x2cUL /* Ring descriptor size */ +#define ETX_BPTR 0x30UL /* Transmit data buffer ptr */ +#define ETX_REG_SIZE 0x34UL + +/* ETX transmit pending register. */ +#define ETX_TP_DMAWAKEUP 0x00000001 /* Restart transmit dma */ + +/* ETX config register. */ +#define ETX_CFG_DMAENABLE 0x00000001 /* Enable transmit dma */ +#define ETX_CFG_FIFOTHRESH 0x000003fe /* Transmit FIFO threshold */ +#define ETX_CFG_IRQDAFTER 0x00000400 /* Interrupt after TX-FIFO drained */ +#define ETX_CFG_IRQDBEFORE 0x00000000 /* Interrupt before TX-FIFO drained */ + +#define ETX_RSIZE_SHIFT 4 + +/* Happy Meal external receiver registers. */ +#define ERX_CFG 0x00UL /* Receiver config register */ +#define ERX_RING 0x04UL /* Receiver ring ptr */ +#define ERX_BPTR 0x08UL /* Receiver buffer ptr */ +#define ERX_FIFOWPTR 0x0cUL /* FIFO write ptr */ +#define ERX_FIFOSWPTR 0x10UL /* FIFO write ptr (shadow register) */ +#define ERX_FIFORPTR 0x14UL /* FIFO read ptr */ +#define ERX_FIFOSRPTR 0x18UL /* FIFO read ptr (shadow register) */ +#define ERX_SMACHINE 0x1cUL /* Receiver state machine */ +#define ERX_REG_SIZE 0x20UL + +/* ERX config register. */ +#define ERX_CFG_DMAENABLE 0x00000001 /* Enable receive DMA */ +#define ERX_CFG_RESV1 0x00000006 /* Unused... */ +#define ERX_CFG_BYTEOFFSET 0x00000038 /* Receive first byte offset */ +#define ERX_CFG_RESV2 0x000001c0 /* Unused... */ +#define ERX_CFG_SIZE32 0x00000000 /* Receive ring size == 32 */ +#define ERX_CFG_SIZE64 0x00000200 /* Receive ring size == 64 */ +#define ERX_CFG_SIZE128 0x00000400 /* Receive ring size == 128 */ +#define ERX_CFG_SIZE256 0x00000600 /* Receive ring size == 256 */ +#define ERX_CFG_RESV3 0x0000f800 /* Unused... */ +#define ERX_CFG_CSUMSTART 0x007f0000 /* Offset of checksum start, + * in halfwords. */ + +/* I'd like a Big Mac, small fries, small coke, and SparcLinux please. */ +#define BMAC_XIFCFG 0x0000UL /* XIF config register */ + /* 0x4-->0x204, reserved */ +#define BMAC_TXSWRESET 0x208UL /* Transmitter software reset */ +#define BMAC_TXCFG 0x20cUL /* Transmitter config register */ +#define BMAC_IGAP1 0x210UL /* Inter-packet gap 1 */ +#define BMAC_IGAP2 0x214UL /* Inter-packet gap 2 */ +#define BMAC_ALIMIT 0x218UL /* Transmit attempt limit */ +#define BMAC_STIME 0x21cUL /* Transmit slot time */ +#define BMAC_PLEN 0x220UL /* Size of transmit preamble */ +#define BMAC_PPAT 0x224UL /* Pattern for transmit preamble */ +#define BMAC_TXSDELIM 0x228UL /* Transmit delimiter */ +#define BMAC_JSIZE 0x22cUL /* Jam size */ +#define BMAC_TXMAX 0x230UL /* Transmit max pkt size */ +#define BMAC_TXMIN 0x234UL /* Transmit min pkt size */ +#define BMAC_PATTEMPT 0x238UL /* Count of transmit peak attempts */ +#define BMAC_DTCTR 0x23cUL /* Transmit defer timer */ +#define BMAC_NCCTR 0x240UL /* Transmit normal-collision counter */ +#define BMAC_FCCTR 0x244UL /* Transmit first-collision counter */ +#define BMAC_EXCTR 0x248UL /* Transmit excess-collision counter */ +#define BMAC_LTCTR 0x24cUL /* Transmit late-collision counter */ +#define BMAC_RSEED 0x250UL /* Transmit random number seed */ +#define BMAC_TXSMACHINE 0x254UL /* Transmit state machine */ + /* 0x258-->0x304, reserved */ +#define BMAC_RXSWRESET 0x308UL /* Receiver software reset */ +#define BMAC_RXCFG 0x30cUL /* Receiver config register */ +#define BMAC_RXMAX 0x310UL /* Receive max pkt size */ +#define BMAC_RXMIN 0x314UL /* Receive min pkt size */ +#define BMAC_MACADDR2 0x318UL /* Ether address register 2 */ +#define BMAC_MACADDR1 0x31cUL /* Ether address register 1 */ +#define BMAC_MACADDR0 0x320UL /* Ether address register 0 */ +#define BMAC_FRCTR 0x324UL /* Receive frame receive counter */ +#define BMAC_GLECTR 0x328UL /* Receive giant-length error counter */ +#define BMAC_UNALECTR 0x32cUL /* Receive unaligned error counter */ +#define BMAC_RCRCECTR 0x330UL /* Receive CRC error counter */ +#define BMAC_RXSMACHINE 0x334UL /* Receiver state machine */ +#define BMAC_RXCVALID 0x338UL /* Receiver code violation */ + /* 0x33c, reserved */ +#define BMAC_HTABLE3 0x340UL /* Hash table 3 */ +#define BMAC_HTABLE2 0x344UL /* Hash table 2 */ +#define BMAC_HTABLE1 0x348UL /* Hash table 1 */ +#define BMAC_HTABLE0 0x34cUL /* Hash table 0 */ +#define BMAC_AFILTER2 0x350UL /* Address filter 2 */ +#define BMAC_AFILTER1 0x354UL /* Address filter 1 */ +#define BMAC_AFILTER0 0x358UL /* Address filter 0 */ +#define BMAC_AFMASK 0x35cUL /* Address filter mask */ +#define BMAC_REG_SIZE 0x360UL + +/* BigMac XIF config register. */ +#define BIGMAC_XCFG_ODENABLE 0x00000001 /* Output driver enable */ +#define BIGMAC_XCFG_XLBACK 0x00000002 /* Loopback-mode XIF enable */ +#define BIGMAC_XCFG_MLBACK 0x00000004 /* Loopback-mode MII enable */ +#define BIGMAC_XCFG_MIIDISAB 0x00000008 /* MII receive buffer disable */ +#define BIGMAC_XCFG_SQENABLE 0x00000010 /* SQE test enable */ +#define BIGMAC_XCFG_SQETWIN 0x000003e0 /* SQE time window */ +#define BIGMAC_XCFG_LANCE 0x00000010 /* Lance mode enable */ +#define BIGMAC_XCFG_LIPG0 0x000003e0 /* Lance mode IPG0 */ + +/* BigMac transmit config register. */ +#define BIGMAC_TXCFG_ENABLE 0x00000001 /* Enable the transmitter */ +#define BIGMAC_TXCFG_SMODE 0x00000020 /* Enable slow transmit mode */ +#define BIGMAC_TXCFG_CIGN 0x00000040 /* Ignore transmit collisions */ +#define BIGMAC_TXCFG_FCSOFF 0x00000080 /* Do not emit FCS */ +#define BIGMAC_TXCFG_DBACKOFF 0x00000100 /* Disable backoff */ +#define BIGMAC_TXCFG_FULLDPLX 0x00000200 /* Enable full-duplex */ +#define BIGMAC_TXCFG_DGIVEUP 0x00000400 /* Don't give up on transmits */ + +/* BigMac receive config register. */ +#define BIGMAC_RXCFG_ENABLE 0x00000001 /* Enable the receiver */ +#define BIGMAC_RXCFG_PSTRIP 0x00000020 /* Pad byte strip enable */ +#define BIGMAC_RXCFG_PMISC 0x00000040 /* Enable promiscuous mode */ +#define BIGMAC_RXCFG_DERR 0x00000080 /* Disable error checking */ +#define BIGMAC_RXCFG_DCRCS 0x00000100 /* Disable CRC stripping */ +#define BIGMAC_RXCFG_REJME 0x00000200 /* Reject packets addressed to me */ +#define BIGMAC_RXCFG_PGRP 0x00000400 /* Enable promisc group mode */ +#define BIGMAC_RXCFG_HENABLE 0x00000800 /* Enable the hash filter */ +#define BIGMAC_RXCFG_AENABLE 0x00001000 /* Enable the address filter */ + +/* These are the "Management Interface" (ie. MIF) registers of the transceiver. */ +#define TCVR_BBCLOCK 0x00UL /* Bit bang clock register */ +#define TCVR_BBDATA 0x04UL /* Bit bang data register */ +#define TCVR_BBOENAB 0x08UL /* Bit bang output enable */ +#define TCVR_FRAME 0x0cUL /* Frame control/data register */ +#define TCVR_CFG 0x10UL /* MIF config register */ +#define TCVR_IMASK 0x14UL /* MIF interrupt mask */ +#define TCVR_STATUS 0x18UL /* MIF status */ +#define TCVR_SMACHINE 0x1cUL /* MIF state machine */ +#define TCVR_REG_SIZE 0x20UL + +/* Frame commands. */ +#define FRAME_WRITE 0x50020000 +#define FRAME_READ 0x60020000 + +/* Transceiver config register */ +#define TCV_CFG_PSELECT 0x00000001 /* Select PHY */ +#define TCV_CFG_PENABLE 0x00000002 /* Enable MIF polling */ +#define TCV_CFG_BENABLE 0x00000004 /* Enable the "bit banger" oh baby */ +#define TCV_CFG_PREGADDR 0x000000f8 /* Address of poll register */ +#define TCV_CFG_MDIO0 0x00000100 /* MDIO zero, data/attached */ +#define TCV_CFG_MDIO1 0x00000200 /* MDIO one, data/attached */ +#define TCV_CFG_PDADDR 0x00007c00 /* Device PHY address polling */ + +/* Here are some PHY addresses. */ +#define TCV_PADDR_ETX 0 /* Internal transceiver */ +#define TCV_PADDR_ITX 1 /* External transceiver */ + +/* Transceiver status register */ +#define TCV_STAT_BASIC 0xffff0000 /* The "basic" part */ +#define TCV_STAT_NORMAL 0x0000ffff /* The "non-basic" part */ + +/* Inside the Happy Meal transceiver is the physical layer, they use an + * implementations for National Semiconductor, part number DP83840VCE. + * You can retrieve the data sheets and programming docs for this beast + * from http://www.national.com/ + * + * The DP83840 is capable of both 10 and 100Mbps ethernet, in both + * half and full duplex mode. It also supports auto negotiation. + * + * But.... THIS THING IS A PAIN IN THE ASS TO PROGRAM! + * Debugging eeprom burnt code is more fun than programming this chip! + */ + +/* Generic MII registers defined in linux/mii.h, these below + * are DP83840 specific. + */ +#define DP83840_CSCONFIG 0x17 /* CS configuration */ + +/* The Carrier Sense config register. */ +#define CSCONFIG_RESV1 0x0001 /* Unused... */ +#define CSCONFIG_LED4 0x0002 /* Pin for full-dplx LED4 */ +#define CSCONFIG_LED1 0x0004 /* Pin for conn-status LED1 */ +#define CSCONFIG_RESV2 0x0008 /* Unused... */ +#define CSCONFIG_TCVDISAB 0x0010 /* Turns off the transceiver */ +#define CSCONFIG_DFBYPASS 0x0020 /* Bypass disconnect function */ +#define CSCONFIG_GLFORCE 0x0040 /* Good link force for 100mbps */ +#define CSCONFIG_CLKTRISTATE 0x0080 /* Tristate 25m clock */ +#define CSCONFIG_RESV3 0x0700 /* Unused... */ +#define CSCONFIG_ENCODE 0x0800 /* 1=MLT-3, 0=binary */ +#define CSCONFIG_RENABLE 0x1000 /* Repeater mode enable */ +#define CSCONFIG_TCDISABLE 0x2000 /* Disable timeout counter */ +#define CSCONFIG_RESV4 0x4000 /* Unused... */ +#define CSCONFIG_NDISABLE 0x8000 /* Disable NRZI */ + +/* Happy Meal descriptor rings and such. + * All descriptor rings must be aligned on a 2K boundary. + * All receive buffers must be 64 byte aligned. + * Always write the address first before setting the ownership + * bits to avoid races with the hardware scanning the ring. + */ +typedef u32 __bitwise__ hme32; + +struct happy_meal_rxd { + hme32 rx_flags; + hme32 rx_addr; +}; + +#define RXFLAG_OWN 0x80000000 /* 1 = hardware, 0 = software */ +#define RXFLAG_OVERFLOW 0x40000000 /* 1 = buffer overflow */ +#define RXFLAG_SIZE 0x3fff0000 /* Size of the buffer */ +#define RXFLAG_CSUM 0x0000ffff /* HW computed checksum */ + +struct happy_meal_txd { + hme32 tx_flags; + hme32 tx_addr; +}; + +#define TXFLAG_OWN 0x80000000 /* 1 = hardware, 0 = software */ +#define TXFLAG_SOP 0x40000000 /* 1 = start of packet */ +#define TXFLAG_EOP 0x20000000 /* 1 = end of packet */ +#define TXFLAG_CSENABLE 0x10000000 /* 1 = enable hw-checksums */ +#define TXFLAG_CSLOCATION 0x0ff00000 /* Where to stick the csum */ +#define TXFLAG_CSBUFBEGIN 0x000fc000 /* Where to begin checksum */ +#define TXFLAG_SIZE 0x00003fff /* Size of the packet */ + +#define TX_RING_SIZE 32 /* Must be >16 and <255, multiple of 16 */ +#define RX_RING_SIZE 32 /* see ERX_CFG_SIZE* for possible values */ + +#if (TX_RING_SIZE < 16 || TX_RING_SIZE > 256 || (TX_RING_SIZE % 16) != 0) +#error TX_RING_SIZE holds illegal value +#endif + +#define TX_RING_MAXSIZE 256 +#define RX_RING_MAXSIZE 256 + +/* We use a 14 byte offset for checksum computation. */ +#if (RX_RING_SIZE == 32) +#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE32|((14/2)<<16)) +#else +#if (RX_RING_SIZE == 64) +#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE64|((14/2)<<16)) +#else +#if (RX_RING_SIZE == 128) +#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE128|((14/2)<<16)) +#else +#if (RX_RING_SIZE == 256) +#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE256|((14/2)<<16)) +#else +#error RX_RING_SIZE holds illegal value +#endif +#endif +#endif +#endif + +#define NEXT_RX(num) (((num) + 1) & (RX_RING_SIZE - 1)) +#define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1)) +#define PREV_RX(num) (((num) - 1) & (RX_RING_SIZE - 1)) +#define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1)) + +#define TX_BUFFS_AVAIL(hp) \ + (((hp)->tx_old <= (hp)->tx_new) ? \ + (hp)->tx_old + (TX_RING_SIZE - 1) - (hp)->tx_new : \ + (hp)->tx_old - (hp)->tx_new - 1) + +#define RX_OFFSET 2 +#define RX_BUF_ALLOC_SIZE (1546 + RX_OFFSET + 64) + +#define RX_COPY_THRESHOLD 256 + +struct hmeal_init_block { + struct happy_meal_rxd happy_meal_rxd[RX_RING_MAXSIZE]; + struct happy_meal_txd happy_meal_txd[TX_RING_MAXSIZE]; +}; + +#define hblock_offset(mem, elem) \ +((__u32)((unsigned long)(&(((struct hmeal_init_block *)0)->mem[elem])))) + +/* Now software state stuff. */ +enum happy_transceiver { + external = 0, + internal = 1, + none = 2, +}; + +/* Timer state engine. */ +enum happy_timer_state { + arbwait = 0, /* Waiting for auto negotiation to complete. */ + lupwait = 1, /* Auto-neg complete, awaiting link-up status. */ + ltrywait = 2, /* Forcing try of all modes, from fastest to slowest. */ + asleep = 3, /* Time inactive. */ +}; + +struct quattro; + +/* Happy happy, joy joy! */ +struct happy_meal { + void __iomem *gregs; /* Happy meal global registers */ + struct hmeal_init_block *happy_block; /* RX and TX descriptors (CPU addr) */ + +#if defined(CONFIG_SBUS) && defined(CONFIG_PCI) + u32 (*read_desc32)(hme32 *); + void (*write_txd)(struct happy_meal_txd *, u32, u32); + void (*write_rxd)(struct happy_meal_rxd *, u32, u32); +#endif + + /* This is either an platform_device or a pci_dev. */ + void *happy_dev; + struct device *dma_dev; + + spinlock_t happy_lock; + + struct sk_buff *rx_skbs[RX_RING_SIZE]; + struct sk_buff *tx_skbs[TX_RING_SIZE]; + + int rx_new, tx_new, rx_old, tx_old; + + struct net_device_stats net_stats; /* Statistical counters */ + +#if defined(CONFIG_SBUS) && defined(CONFIG_PCI) + u32 (*read32)(void __iomem *); + void (*write32)(void __iomem *, u32); +#endif + + void __iomem *etxregs; /* External transmitter regs */ + void __iomem *erxregs; /* External receiver regs */ + void __iomem *bigmacregs; /* BIGMAC core regs */ + void __iomem *tcvregs; /* MIF transceiver regs */ + + dma_addr_t hblock_dvma; /* DVMA visible address happy block */ + unsigned int happy_flags; /* Driver state flags */ + int irq; + enum happy_transceiver tcvr_type; /* Kind of transceiver in use */ + unsigned int happy_bursts; /* Get your mind out of the gutter */ + unsigned int paddr; /* PHY address for transceiver */ + unsigned short hm_revision; /* Happy meal revision */ + unsigned short sw_bmcr; /* SW copy of BMCR */ + unsigned short sw_bmsr; /* SW copy of BMSR */ + unsigned short sw_physid1; /* SW copy of PHYSID1 */ + unsigned short sw_physid2; /* SW copy of PHYSID2 */ + unsigned short sw_advertise; /* SW copy of ADVERTISE */ + unsigned short sw_lpa; /* SW copy of LPA */ + unsigned short sw_expansion; /* SW copy of EXPANSION */ + unsigned short sw_csconfig; /* SW copy of CSCONFIG */ + unsigned int auto_speed; /* Auto-nego link speed */ + unsigned int forced_speed; /* Force mode link speed */ + unsigned int poll_data; /* MIF poll data */ + unsigned int poll_flag; /* MIF poll flag */ + unsigned int linkcheck; /* Have we checked the link yet? */ + unsigned int lnkup; /* Is the link up as far as we know? */ + unsigned int lnkdown; /* Trying to force the link down? */ + unsigned int lnkcnt; /* Counter for link-up attempts. */ + struct timer_list happy_timer; /* To watch the link when coming up. */ + enum happy_timer_state timer_state; /* State of the auto-neg timer. */ + unsigned int timer_ticks; /* Number of clicks at each state. */ + + struct net_device *dev; /* Backpointer */ + struct quattro *qfe_parent; /* For Quattro cards */ + int qfe_ent; /* Which instance on quattro */ +}; + +/* Here are the happy flags. */ +#define HFLAG_POLL 0x00000001 /* We are doing MIF polling */ +#define HFLAG_FENABLE 0x00000002 /* The MII frame is enabled */ +#define HFLAG_LANCE 0x00000004 /* We are using lance-mode */ +#define HFLAG_RXENABLE 0x00000008 /* Receiver is enabled */ +#define HFLAG_AUTO 0x00000010 /* Using auto-negotiation, 0 = force */ +#define HFLAG_FULL 0x00000020 /* Full duplex enable */ +#define HFLAG_MACFULL 0x00000040 /* Using full duplex in the MAC */ +#define HFLAG_POLLENABLE 0x00000080 /* Actually try MIF polling */ +#define HFLAG_RXCV 0x00000100 /* XXX RXCV ENABLE */ +#define HFLAG_INIT 0x00000200 /* Init called at least once */ +#define HFLAG_LINKUP 0x00000400 /* 1 = Link is up */ +#define HFLAG_PCI 0x00000800 /* PCI based Happy Meal */ +#define HFLAG_QUATTRO 0x00001000 /* On QFE/Quattro card */ + +#define HFLAG_20_21 (HFLAG_POLLENABLE | HFLAG_FENABLE) +#define HFLAG_NOT_A0 (HFLAG_POLLENABLE | HFLAG_FENABLE | HFLAG_LANCE | HFLAG_RXCV) + +/* Support for QFE/Quattro cards. */ +struct quattro { + struct net_device *happy_meals[4]; + + /* This is either a sbus_dev or a pci_dev. */ + void *quattro_dev; + + struct quattro *next; + + /* PROM ranges, if any. */ +#ifdef CONFIG_SBUS + struct linux_prom_ranges ranges[8]; +#endif + int nranges; +}; + +/* We use this to acquire receive skb's that we can DMA directly into. */ +#define ALIGNED_RX_SKB_ADDR(addr) \ + ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) +#define happy_meal_alloc_skb(__length, __gfp_flags) \ +({ struct sk_buff *__skb; \ + __skb = alloc_skb((__length) + 64, (__gfp_flags)); \ + if(__skb) { \ + int __offset = (int) ALIGNED_RX_SKB_ADDR(__skb->data); \ + if(__offset) \ + skb_reserve(__skb, __offset); \ + } \ + __skb; \ +}) + +#endif /* !(_SUNHME_H) */ diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c new file mode 100644 index 000000000..9b825780b --- /dev/null +++ b/drivers/net/ethernet/sun/sunqe.c @@ -0,0 +1,995 @@ +/* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver. + * Once again I am out to prove that every ethernet + * controller out there can be most efficiently programmed + * if you make it look like a LANCE. + * + * Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sunqe.h" + +#define DRV_NAME "sunqe" +#define DRV_VERSION "4.1" +#define DRV_RELDATE "August 27, 2008" +#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" + +static char version[] = + DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; + +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_AUTHOR); +MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver"); +MODULE_LICENSE("GPL"); + +static struct sunqec *root_qec_dev; + +static void qe_set_multicast(struct net_device *dev); + +#define QEC_RESET_TRIES 200 + +static inline int qec_global_reset(void __iomem *gregs) +{ + int tries = QEC_RESET_TRIES; + + sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); + while (--tries) { + u32 tmp = sbus_readl(gregs + GLOB_CTRL); + if (tmp & GLOB_CTRL_RESET) { + udelay(20); + continue; + } + break; + } + if (tries) + return 0; + printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n"); + return -1; +} + +#define MACE_RESET_RETRIES 200 +#define QE_RESET_RETRIES 200 + +static inline int qe_stop(struct sunqe *qep) +{ + void __iomem *cregs = qep->qcregs; + void __iomem *mregs = qep->mregs; + int tries; + + /* Reset the MACE, then the QEC channel. */ + sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG); + tries = MACE_RESET_RETRIES; + while (--tries) { + u8 tmp = sbus_readb(mregs + MREGS_BCONFIG); + if (tmp & MREGS_BCONFIG_RESET) { + udelay(20); + continue; + } + break; + } + if (!tries) { + printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n"); + return -1; + } + + sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL); + tries = QE_RESET_RETRIES; + while (--tries) { + u32 tmp = sbus_readl(cregs + CREG_CTRL); + if (tmp & CREG_CTRL_RESET) { + udelay(20); + continue; + } + break; + } + if (!tries) { + printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n"); + return -1; + } + return 0; +} + +static void qe_init_rings(struct sunqe *qep) +{ + struct qe_init_block *qb = qep->qe_block; + struct sunqe_buffers *qbufs = qep->buffers; + __u32 qbufs_dvma = qep->buffers_dvma; + int i; + + qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; + memset(qb, 0, sizeof(struct qe_init_block)); + memset(qbufs, 0, sizeof(struct sunqe_buffers)); + for (i = 0; i < RX_RING_SIZE; i++) { + qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i); + qb->qe_rxd[i].rx_flags = + (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); + } +} + +static int qe_init(struct sunqe *qep, int from_irq) +{ + struct sunqec *qecp = qep->parent; + void __iomem *cregs = qep->qcregs; + void __iomem *mregs = qep->mregs; + void __iomem *gregs = qecp->gregs; + unsigned char *e = &qep->dev->dev_addr[0]; + u32 tmp; + int i; + + /* Shut it up. */ + if (qe_stop(qep)) + return -EAGAIN; + + /* Setup initial rx/tx init block pointers. */ + sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); + sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); + + /* Enable/mask the various irq's. */ + sbus_writel(0, cregs + CREG_RIMASK); + sbus_writel(1, cregs + CREG_TIMASK); + + sbus_writel(0, cregs + CREG_QMASK); + sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK); + + /* Setup the FIFO pointers into QEC local memory. */ + tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE); + sbus_writel(tmp, cregs + CREG_RXRBUFPTR); + sbus_writel(tmp, cregs + CREG_RXWBUFPTR); + + tmp = sbus_readl(cregs + CREG_RXRBUFPTR) + + sbus_readl(gregs + GLOB_RSIZE); + sbus_writel(tmp, cregs + CREG_TXRBUFPTR); + sbus_writel(tmp, cregs + CREG_TXWBUFPTR); + + /* Clear the channel collision counter. */ + sbus_writel(0, cregs + CREG_CCNT); + + /* For 10baseT, inter frame space nor throttle seems to be necessary. */ + sbus_writel(0, cregs + CREG_PIPG); + + /* Now dork with the AMD MACE. */ + sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG); + sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL); + sbus_writeb(0, mregs + MREGS_RXFCNTL); + + /* The QEC dma's the rx'd packets from local memory out to main memory, + * and therefore it interrupts when the packet reception is "complete". + * So don't listen for the MACE talking about it. + */ + sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK); + sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG); + sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 | + MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU), + mregs + MREGS_FCONFIG); + + /* Only usable interface on QuadEther is twisted pair. */ + sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG); + + /* Tell MACE we are changing the ether address. */ + sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET, + mregs + MREGS_IACONFIG); + while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) + barrier(); + sbus_writeb(e[0], mregs + MREGS_ETHADDR); + sbus_writeb(e[1], mregs + MREGS_ETHADDR); + sbus_writeb(e[2], mregs + MREGS_ETHADDR); + sbus_writeb(e[3], mregs + MREGS_ETHADDR); + sbus_writeb(e[4], mregs + MREGS_ETHADDR); + sbus_writeb(e[5], mregs + MREGS_ETHADDR); + + /* Clear out the address filter. */ + sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, + mregs + MREGS_IACONFIG); + while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) + barrier(); + for (i = 0; i < 8; i++) + sbus_writeb(0, mregs + MREGS_FILTER); + + /* Address changes are now complete. */ + sbus_writeb(0, mregs + MREGS_IACONFIG); + + qe_init_rings(qep); + + /* Wait a little bit for the link to come up... */ + mdelay(5); + if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) { + int tries = 50; + + while (--tries) { + u8 tmp; + + mdelay(5); + barrier(); + tmp = sbus_readb(mregs + MREGS_PHYCONFIG); + if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0) + break; + } + if (tries == 0) + printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name); + } + + /* Missed packet counter is cleared on a read. */ + sbus_readb(mregs + MREGS_MPCNT); + + /* Reload multicast information, this will enable the receiver + * and transmitter. + */ + qe_set_multicast(qep->dev); + + /* QEC should now start to show interrupts. */ + return 0; +} + +/* Grrr, certain error conditions completely lock up the AMD MACE, + * so when we get these we _must_ reset the chip. + */ +static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) +{ + struct net_device *dev = qep->dev; + int mace_hwbug_workaround = 0; + + if (qe_status & CREG_STAT_EDEFER) { + printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name); + dev->stats.tx_errors++; + } + + if (qe_status & CREG_STAT_CLOSS) { + printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name); + dev->stats.tx_errors++; + dev->stats.tx_carrier_errors++; + } + + if (qe_status & CREG_STAT_ERETRIES) { + printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name); + dev->stats.tx_errors++; + mace_hwbug_workaround = 1; + } + + if (qe_status & CREG_STAT_LCOLL) { + printk(KERN_ERR "%s: Late transmit collision.\n", dev->name); + dev->stats.tx_errors++; + dev->stats.collisions++; + mace_hwbug_workaround = 1; + } + + if (qe_status & CREG_STAT_FUFLOW) { + printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name); + dev->stats.tx_errors++; + mace_hwbug_workaround = 1; + } + + if (qe_status & CREG_STAT_JERROR) { + printk(KERN_ERR "%s: Jabber error.\n", dev->name); + } + + if (qe_status & CREG_STAT_BERROR) { + printk(KERN_ERR "%s: Babble error.\n", dev->name); + } + + if (qe_status & CREG_STAT_CCOFLOW) { + dev->stats.tx_errors += 256; + dev->stats.collisions += 256; + } + + if (qe_status & CREG_STAT_TXDERROR) { + printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name); + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; + mace_hwbug_workaround = 1; + } + + if (qe_status & CREG_STAT_TXLERR) { + printk(KERN_ERR "%s: Transmit late error.\n", dev->name); + dev->stats.tx_errors++; + mace_hwbug_workaround = 1; + } + + if (qe_status & CREG_STAT_TXPERR) { + printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name); + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; + mace_hwbug_workaround = 1; + } + + if (qe_status & CREG_STAT_TXSERR) { + printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name); + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; + mace_hwbug_workaround = 1; + } + + if (qe_status & CREG_STAT_RCCOFLOW) { + dev->stats.rx_errors += 256; + dev->stats.collisions += 256; + } + + if (qe_status & CREG_STAT_RUOFLOW) { + dev->stats.rx_errors += 256; + dev->stats.rx_over_errors += 256; + } + + if (qe_status & CREG_STAT_MCOFLOW) { + dev->stats.rx_errors += 256; + dev->stats.rx_missed_errors += 256; + } + + if (qe_status & CREG_STAT_RXFOFLOW) { + printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name); + dev->stats.rx_errors++; + dev->stats.rx_over_errors++; + } + + if (qe_status & CREG_STAT_RLCOLL) { + printk(KERN_ERR "%s: Late receive collision.\n", dev->name); + dev->stats.rx_errors++; + dev->stats.collisions++; + } + + if (qe_status & CREG_STAT_FCOFLOW) { + dev->stats.rx_errors += 256; + dev->stats.rx_frame_errors += 256; + } + + if (qe_status & CREG_STAT_CECOFLOW) { + dev->stats.rx_errors += 256; + dev->stats.rx_crc_errors += 256; + } + + if (qe_status & CREG_STAT_RXDROP) { + printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name); + dev->stats.rx_errors++; + dev->stats.rx_dropped++; + dev->stats.rx_missed_errors++; + } + + if (qe_status & CREG_STAT_RXSMALL) { + printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name); + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + } + + if (qe_status & CREG_STAT_RXLERR) { + printk(KERN_ERR "%s: Receive late error.\n", dev->name); + dev->stats.rx_errors++; + mace_hwbug_workaround = 1; + } + + if (qe_status & CREG_STAT_RXPERR) { + printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name); + dev->stats.rx_errors++; + dev->stats.rx_missed_errors++; + mace_hwbug_workaround = 1; + } + + if (qe_status & CREG_STAT_RXSERR) { + printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name); + dev->stats.rx_errors++; + dev->stats.rx_missed_errors++; + mace_hwbug_workaround = 1; + } + + if (mace_hwbug_workaround) + qe_init(qep, 1); + return mace_hwbug_workaround; +} + +/* Per-QE receive interrupt service routine. Just like on the happy meal + * we receive directly into skb's with a small packet copy water mark. + */ +static void qe_rx(struct sunqe *qep) +{ + struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; + struct net_device *dev = qep->dev; + struct qe_rxd *this; + struct sunqe_buffers *qbufs = qep->buffers; + __u32 qbufs_dvma = qep->buffers_dvma; + int elem = qep->rx_new; + u32 flags; + + this = &rxbase[elem]; + while (!((flags = this->rx_flags) & RXD_OWN)) { + struct sk_buff *skb; + unsigned char *this_qbuf = + &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0]; + __u32 this_qbuf_dvma = qbufs_dvma + + qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1))); + struct qe_rxd *end_rxd = + &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)]; + int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */ + + /* Check for errors. */ + if (len < ETH_ZLEN) { + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + dev->stats.rx_dropped++; + } else { + skb = netdev_alloc_skb(dev, len + 2); + if (skb == NULL) { + dev->stats.rx_dropped++; + } else { + skb_reserve(skb, 2); + skb_put(skb, len); + skb_copy_to_linear_data(skb, this_qbuf, + len); + skb->protocol = eth_type_trans(skb, qep->dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + } + } + end_rxd->rx_addr = this_qbuf_dvma; + end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH)); + + elem = NEXT_RX(elem); + this = &rxbase[elem]; + } + qep->rx_new = elem; +} + +static void qe_tx_reclaim(struct sunqe *qep); + +/* Interrupts for all QE's get filtered out via the QEC master controller, + * so we just run through each qe and check to see who is signaling + * and thus needs to be serviced. + */ +static irqreturn_t qec_interrupt(int irq, void *dev_id) +{ + struct sunqec *qecp = dev_id; + u32 qec_status; + int channel = 0; + + /* Latch the status now. */ + qec_status = sbus_readl(qecp->gregs + GLOB_STAT); + while (channel < 4) { + if (qec_status & 0xf) { + struct sunqe *qep = qecp->qes[channel]; + u32 qe_status; + + qe_status = sbus_readl(qep->qcregs + CREG_STAT); + if (qe_status & CREG_STAT_ERRORS) { + if (qe_is_bolixed(qep, qe_status)) + goto next; + } + if (qe_status & CREG_STAT_RXIRQ) + qe_rx(qep); + if (netif_queue_stopped(qep->dev) && + (qe_status & CREG_STAT_TXIRQ)) { + spin_lock(&qep->lock); + qe_tx_reclaim(qep); + if (TX_BUFFS_AVAIL(qep) > 0) { + /* Wake net queue and return to + * lazy tx reclaim. + */ + netif_wake_queue(qep->dev); + sbus_writel(1, qep->qcregs + CREG_TIMASK); + } + spin_unlock(&qep->lock); + } + next: + ; + } + qec_status >>= 4; + channel++; + } + + return IRQ_HANDLED; +} + +static int qe_open(struct net_device *dev) +{ + struct sunqe *qep = netdev_priv(dev); + + qep->mconfig = (MREGS_MCONFIG_TXENAB | + MREGS_MCONFIG_RXENAB | + MREGS_MCONFIG_MBAENAB); + return qe_init(qep, 0); +} + +static int qe_close(struct net_device *dev) +{ + struct sunqe *qep = netdev_priv(dev); + + qe_stop(qep); + return 0; +} + +/* Reclaim TX'd frames from the ring. This must always run under + * the IRQ protected qep->lock. + */ +static void qe_tx_reclaim(struct sunqe *qep) +{ + struct qe_txd *txbase = &qep->qe_block->qe_txd[0]; + int elem = qep->tx_old; + + while (elem != qep->tx_new) { + u32 flags = txbase[elem].tx_flags; + + if (flags & TXD_OWN) + break; + elem = NEXT_TX(elem); + } + qep->tx_old = elem; +} + +static void qe_tx_timeout(struct net_device *dev) +{ + struct sunqe *qep = netdev_priv(dev); + int tx_full; + + spin_lock_irq(&qep->lock); + + /* Try to reclaim, if that frees up some tx + * entries, we're fine. + */ + qe_tx_reclaim(qep); + tx_full = TX_BUFFS_AVAIL(qep) <= 0; + + spin_unlock_irq(&qep->lock); + + if (! tx_full) + goto out; + + printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); + qe_init(qep, 1); + +out: + netif_wake_queue(dev); +} + +/* Get a packet queued to go onto the wire. */ +static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct sunqe *qep = netdev_priv(dev); + struct sunqe_buffers *qbufs = qep->buffers; + __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma; + unsigned char *txbuf; + int len, entry; + + spin_lock_irq(&qep->lock); + + qe_tx_reclaim(qep); + + len = skb->len; + entry = qep->tx_new; + + txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0]; + txbuf_dvma = qbufs_dvma + + qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1))); + + /* Avoid a race... */ + qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE; + + skb_copy_from_linear_data(skb, txbuf, len); + + qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma; + qep->qe_block->qe_txd[entry].tx_flags = + (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); + qep->tx_new = NEXT_TX(entry); + + /* Get it going. */ + sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); + + dev->stats.tx_packets++; + dev->stats.tx_bytes += len; + + if (TX_BUFFS_AVAIL(qep) <= 0) { + /* Halt the net queue and enable tx interrupts. + * When the tx queue empties the tx irq handler + * will wake up the queue and return us back to + * the lazy tx reclaim scheme. + */ + netif_stop_queue(dev); + sbus_writel(0, qep->qcregs + CREG_TIMASK); + } + spin_unlock_irq(&qep->lock); + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +static void qe_set_multicast(struct net_device *dev) +{ + struct sunqe *qep = netdev_priv(dev); + struct netdev_hw_addr *ha; + u8 new_mconfig = qep->mconfig; + int i; + u32 crc; + + /* Lock out others. */ + netif_stop_queue(dev); + + if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { + sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, + qep->mregs + MREGS_IACONFIG); + while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) + barrier(); + for (i = 0; i < 8; i++) + sbus_writeb(0xff, qep->mregs + MREGS_FILTER); + sbus_writeb(0, qep->mregs + MREGS_IACONFIG); + } else if (dev->flags & IFF_PROMISC) { + new_mconfig |= MREGS_MCONFIG_PROMISC; + } else { + u16 hash_table[4]; + u8 *hbytes = (unsigned char *) &hash_table[0]; + + memset(hash_table, 0, sizeof(hash_table)); + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr); + crc >>= 26; + hash_table[crc >> 4] |= 1 << (crc & 0xf); + } + /* Program the qe with the new filter value. */ + sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET, + qep->mregs + MREGS_IACONFIG); + while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0) + barrier(); + for (i = 0; i < 8; i++) { + u8 tmp = *hbytes++; + sbus_writeb(tmp, qep->mregs + MREGS_FILTER); + } + sbus_writeb(0, qep->mregs + MREGS_IACONFIG); + } + + /* Any change of the logical address filter, the physical address, + * or enabling/disabling promiscuous mode causes the MACE to disable + * the receiver. So we must re-enable them here or else the MACE + * refuses to listen to anything on the network. Sheesh, took + * me a day or two to find this bug. + */ + qep->mconfig = new_mconfig; + sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG); + + /* Let us get going again. */ + netif_wake_queue(dev); +} + +/* Ethtool support... */ +static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + const struct linux_prom_registers *regs; + struct sunqe *qep = netdev_priv(dev); + struct platform_device *op; + + strlcpy(info->driver, "sunqe", sizeof(info->driver)); + strlcpy(info->version, "3.0", sizeof(info->version)); + + op = qep->op; + regs = of_get_property(op->dev.of_node, "reg", NULL); + if (regs) + snprintf(info->bus_info, sizeof(info->bus_info), "SBUS:%d", + regs->which_io); + +} + +static u32 qe_get_link(struct net_device *dev) +{ + struct sunqe *qep = netdev_priv(dev); + void __iomem *mregs = qep->mregs; + u8 phyconfig; + + spin_lock_irq(&qep->lock); + phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG); + spin_unlock_irq(&qep->lock); + + return phyconfig & MREGS_PHYCONFIG_LSTAT; +} + +static const struct ethtool_ops qe_ethtool_ops = { + .get_drvinfo = qe_get_drvinfo, + .get_link = qe_get_link, +}; + +/* This is only called once at boot time for each card probed. */ +static void qec_init_once(struct sunqec *qecp, struct platform_device *op) +{ + u8 bsizes = qecp->qec_bursts; + + if (sbus_can_burst64() && (bsizes & DMA_BURST64)) { + sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL); + } else if (bsizes & DMA_BURST32) { + sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL); + } else { + sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL); + } + + /* Packetsize only used in 100baseT BigMAC configurations, + * set it to zero just to be on the safe side. + */ + sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE); + + /* Set the local memsize register, divided up to one piece per QE channel. */ + sbus_writel((resource_size(&op->resource[1]) >> 2), + qecp->gregs + GLOB_MSIZE); + + /* Divide up the local QEC memory amongst the 4 QE receiver and + * transmitter FIFOs. Basically it is (total / 2 / num_channels). + */ + sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1, + qecp->gregs + GLOB_TSIZE); + sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1, + qecp->gregs + GLOB_RSIZE); +} + +static u8 qec_get_burst(struct device_node *dp) +{ + u8 bsizes, bsizes_more; + + /* Find and set the burst sizes for the QEC, since it + * does the actual dma for all 4 channels. + */ + bsizes = of_getintprop_default(dp, "burst-sizes", 0xff); + bsizes &= 0xff; + bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff); + + if (bsizes_more != 0xff) + bsizes &= bsizes_more; + if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || + (bsizes & DMA_BURST32)==0) + bsizes = (DMA_BURST32 - 1); + + return bsizes; +} + +static struct sunqec *get_qec(struct platform_device *child) +{ + struct platform_device *op = to_platform_device(child->dev.parent); + struct sunqec *qecp; + + qecp = platform_get_drvdata(op); + if (!qecp) { + qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL); + if (qecp) { + u32 ctrl; + + qecp->op = op; + qecp->gregs = of_ioremap(&op->resource[0], 0, + GLOB_REG_SIZE, + "QEC Global Registers"); + if (!qecp->gregs) + goto fail; + + /* Make sure the QEC is in MACE mode. */ + ctrl = sbus_readl(qecp->gregs + GLOB_CTRL); + ctrl &= 0xf0000000; + if (ctrl != GLOB_CTRL_MMODE) { + printk(KERN_ERR "qec: Not in MACE mode!\n"); + goto fail; + } + + if (qec_global_reset(qecp->gregs)) + goto fail; + + qecp->qec_bursts = qec_get_burst(op->dev.of_node); + + qec_init_once(qecp, op); + + if (request_irq(op->archdata.irqs[0], qec_interrupt, + IRQF_SHARED, "qec", (void *) qecp)) { + printk(KERN_ERR "qec: Can't register irq.\n"); + goto fail; + } + + platform_set_drvdata(op, qecp); + + qecp->next_module = root_qec_dev; + root_qec_dev = qecp; + } + } + + return qecp; + +fail: + if (qecp->gregs) + of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE); + kfree(qecp); + return NULL; +} + +static const struct net_device_ops qec_ops = { + .ndo_open = qe_open, + .ndo_stop = qe_close, + .ndo_start_xmit = qe_start_xmit, + .ndo_set_rx_mode = qe_set_multicast, + .ndo_tx_timeout = qe_tx_timeout, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int qec_ether_init(struct platform_device *op) +{ + static unsigned version_printed; + struct net_device *dev; + struct sunqec *qecp; + struct sunqe *qe; + int i, res; + + if (version_printed++ == 0) + printk(KERN_INFO "%s", version); + + dev = alloc_etherdev(sizeof(struct sunqe)); + if (!dev) + return -ENOMEM; + + memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN); + + qe = netdev_priv(dev); + + res = -ENODEV; + + i = of_getintprop_default(op->dev.of_node, "channel#", -1); + if (i == -1) + goto fail; + qe->channel = i; + spin_lock_init(&qe->lock); + + qecp = get_qec(op); + if (!qecp) + goto fail; + + qecp->qes[qe->channel] = qe; + qe->dev = dev; + qe->parent = qecp; + qe->op = op; + + res = -ENOMEM; + qe->qcregs = of_ioremap(&op->resource[0], 0, + CREG_REG_SIZE, "QEC Channel Registers"); + if (!qe->qcregs) { + printk(KERN_ERR "qe: Cannot map channel registers.\n"); + goto fail; + } + + qe->mregs = of_ioremap(&op->resource[1], 0, + MREGS_REG_SIZE, "QE MACE Registers"); + if (!qe->mregs) { + printk(KERN_ERR "qe: Cannot map MACE registers.\n"); + goto fail; + } + + qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE, + &qe->qblock_dvma, GFP_ATOMIC); + qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers), + &qe->buffers_dvma, GFP_ATOMIC); + if (qe->qe_block == NULL || qe->qblock_dvma == 0 || + qe->buffers == NULL || qe->buffers_dvma == 0) + goto fail; + + /* Stop this QE. */ + qe_stop(qe); + + SET_NETDEV_DEV(dev, &op->dev); + + dev->watchdog_timeo = 5*HZ; + dev->irq = op->archdata.irqs[0]; + dev->dma = 0; + dev->ethtool_ops = &qe_ethtool_ops; + dev->netdev_ops = &qec_ops; + + res = register_netdev(dev); + if (res) + goto fail; + + platform_set_drvdata(op, qe); + + printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel, + dev->dev_addr); + return 0; + +fail: + if (qe->qcregs) + of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE); + if (qe->mregs) + of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE); + if (qe->qe_block) + dma_free_coherent(&op->dev, PAGE_SIZE, + qe->qe_block, qe->qblock_dvma); + if (qe->buffers) + dma_free_coherent(&op->dev, + sizeof(struct sunqe_buffers), + qe->buffers, + qe->buffers_dvma); + + free_netdev(dev); + + return res; +} + +static int qec_sbus_probe(struct platform_device *op) +{ + return qec_ether_init(op); +} + +static int qec_sbus_remove(struct platform_device *op) +{ + struct sunqe *qp = platform_get_drvdata(op); + struct net_device *net_dev = qp->dev; + + unregister_netdev(net_dev); + + of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE); + of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE); + dma_free_coherent(&op->dev, PAGE_SIZE, + qp->qe_block, qp->qblock_dvma); + dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers), + qp->buffers, qp->buffers_dvma); + + free_netdev(net_dev); + + return 0; +} + +static const struct of_device_id qec_sbus_match[] = { + { + .name = "qe", + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, qec_sbus_match); + +static struct platform_driver qec_sbus_driver = { + .driver = { + .name = "qec", + .of_match_table = qec_sbus_match, + }, + .probe = qec_sbus_probe, + .remove = qec_sbus_remove, +}; + +static int __init qec_init(void) +{ + return platform_driver_register(&qec_sbus_driver); +} + +static void __exit qec_exit(void) +{ + platform_driver_unregister(&qec_sbus_driver); + + while (root_qec_dev) { + struct sunqec *next = root_qec_dev->next_module; + struct platform_device *op = root_qec_dev->op; + + free_irq(op->archdata.irqs[0], (void *) root_qec_dev); + of_iounmap(&op->resource[0], root_qec_dev->gregs, + GLOB_REG_SIZE); + kfree(root_qec_dev); + + root_qec_dev = next; + } +} + +module_init(qec_init); +module_exit(qec_exit); diff --git a/drivers/net/ethernet/sun/sunqe.h b/drivers/net/ethernet/sun/sunqe.h new file mode 100644 index 000000000..581781b6b --- /dev/null +++ b/drivers/net/ethernet/sun/sunqe.h @@ -0,0 +1,350 @@ +/* $Id: sunqe.h,v 1.13 2000/02/09 11:15:42 davem Exp $ + * sunqe.h: Definitions for the Sun QuadEthernet driver. + * + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + */ + +#ifndef _SUNQE_H +#define _SUNQE_H + +/* QEC global registers. */ +#define GLOB_CTRL 0x00UL /* Control */ +#define GLOB_STAT 0x04UL /* Status */ +#define GLOB_PSIZE 0x08UL /* Packet Size */ +#define GLOB_MSIZE 0x0cUL /* Local-memory Size */ +#define GLOB_RSIZE 0x10UL /* Receive partition size */ +#define GLOB_TSIZE 0x14UL /* Transmit partition size */ +#define GLOB_REG_SIZE 0x18UL + +#define GLOB_CTRL_MMODE 0x40000000 /* MACE qec mode */ +#define GLOB_CTRL_BMODE 0x10000000 /* BigMAC qec mode */ +#define GLOB_CTRL_EPAR 0x00000020 /* Enable parity */ +#define GLOB_CTRL_ACNTRL 0x00000018 /* SBUS arbitration control */ +#define GLOB_CTRL_B64 0x00000004 /* 64 byte dvma bursts */ +#define GLOB_CTRL_B32 0x00000002 /* 32 byte dvma bursts */ +#define GLOB_CTRL_B16 0x00000000 /* 16 byte dvma bursts */ +#define GLOB_CTRL_RESET 0x00000001 /* Reset the QEC */ + +#define GLOB_STAT_TX 0x00000008 /* BigMAC Transmit IRQ */ +#define GLOB_STAT_RX 0x00000004 /* BigMAC Receive IRQ */ +#define GLOB_STAT_BM 0x00000002 /* BigMAC Global IRQ */ +#define GLOB_STAT_ER 0x00000001 /* BigMAC Error IRQ */ + +#define GLOB_PSIZE_2048 0x00 /* 2k packet size */ +#define GLOB_PSIZE_4096 0x01 /* 4k packet size */ +#define GLOB_PSIZE_6144 0x10 /* 6k packet size */ +#define GLOB_PSIZE_8192 0x11 /* 8k packet size */ + +/* In MACE mode, there are four qe channels. Each channel has it's own + * status bits in the QEC status register. This macro picks out the + * ones you want. + */ +#define GLOB_STAT_PER_QE(status, channel) (((status) >> ((channel) * 4)) & 0xf) + +/* The following registers are for per-qe channel information/status. */ +#define CREG_CTRL 0x00UL /* Control */ +#define CREG_STAT 0x04UL /* Status */ +#define CREG_RXDS 0x08UL /* RX descriptor ring ptr */ +#define CREG_TXDS 0x0cUL /* TX descriptor ring ptr */ +#define CREG_RIMASK 0x10UL /* RX Interrupt Mask */ +#define CREG_TIMASK 0x14UL /* TX Interrupt Mask */ +#define CREG_QMASK 0x18UL /* QEC Error Interrupt Mask */ +#define CREG_MMASK 0x1cUL /* MACE Error Interrupt Mask */ +#define CREG_RXWBUFPTR 0x20UL /* Local memory rx write ptr */ +#define CREG_RXRBUFPTR 0x24UL /* Local memory rx read ptr */ +#define CREG_TXWBUFPTR 0x28UL /* Local memory tx write ptr */ +#define CREG_TXRBUFPTR 0x2cUL /* Local memory tx read ptr */ +#define CREG_CCNT 0x30UL /* Collision Counter */ +#define CREG_PIPG 0x34UL /* Inter-Frame Gap */ +#define CREG_REG_SIZE 0x38UL + +#define CREG_CTRL_RXOFF 0x00000004 /* Disable this qe's receiver*/ +#define CREG_CTRL_RESET 0x00000002 /* Reset this qe channel */ +#define CREG_CTRL_TWAKEUP 0x00000001 /* Transmitter Wakeup, 'go'. */ + +#define CREG_STAT_EDEFER 0x10000000 /* Excessive Defers */ +#define CREG_STAT_CLOSS 0x08000000 /* Carrier Loss */ +#define CREG_STAT_ERETRIES 0x04000000 /* More than 16 retries */ +#define CREG_STAT_LCOLL 0x02000000 /* Late TX Collision */ +#define CREG_STAT_FUFLOW 0x01000000 /* FIFO Underflow */ +#define CREG_STAT_JERROR 0x00800000 /* Jabber Error */ +#define CREG_STAT_BERROR 0x00400000 /* Babble Error */ +#define CREG_STAT_TXIRQ 0x00200000 /* Transmit Interrupt */ +#define CREG_STAT_CCOFLOW 0x00100000 /* TX Coll-counter Overflow */ +#define CREG_STAT_TXDERROR 0x00080000 /* TX Descriptor is bogus */ +#define CREG_STAT_TXLERR 0x00040000 /* Late Transmit Error */ +#define CREG_STAT_TXPERR 0x00020000 /* Transmit Parity Error */ +#define CREG_STAT_TXSERR 0x00010000 /* Transmit SBUS error ack */ +#define CREG_STAT_RCCOFLOW 0x00001000 /* RX Coll-counter Overflow */ +#define CREG_STAT_RUOFLOW 0x00000800 /* Runt Counter Overflow */ +#define CREG_STAT_MCOFLOW 0x00000400 /* Missed Counter Overflow */ +#define CREG_STAT_RXFOFLOW 0x00000200 /* RX FIFO Overflow */ +#define CREG_STAT_RLCOLL 0x00000100 /* RX Late Collision */ +#define CREG_STAT_FCOFLOW 0x00000080 /* Frame Counter Overflow */ +#define CREG_STAT_CECOFLOW 0x00000040 /* CRC Error-counter Overflow*/ +#define CREG_STAT_RXIRQ 0x00000020 /* Receive Interrupt */ +#define CREG_STAT_RXDROP 0x00000010 /* Dropped a RX'd packet */ +#define CREG_STAT_RXSMALL 0x00000008 /* Receive buffer too small */ +#define CREG_STAT_RXLERR 0x00000004 /* Receive Late Error */ +#define CREG_STAT_RXPERR 0x00000002 /* Receive Parity Error */ +#define CREG_STAT_RXSERR 0x00000001 /* Receive SBUS Error ACK */ + +#define CREG_STAT_ERRORS (CREG_STAT_EDEFER|CREG_STAT_CLOSS|CREG_STAT_ERETRIES| \ + CREG_STAT_LCOLL|CREG_STAT_FUFLOW|CREG_STAT_JERROR| \ + CREG_STAT_BERROR|CREG_STAT_CCOFLOW|CREG_STAT_TXDERROR| \ + CREG_STAT_TXLERR|CREG_STAT_TXPERR|CREG_STAT_TXSERR| \ + CREG_STAT_RCCOFLOW|CREG_STAT_RUOFLOW|CREG_STAT_MCOFLOW| \ + CREG_STAT_RXFOFLOW|CREG_STAT_RLCOLL|CREG_STAT_FCOFLOW| \ + CREG_STAT_CECOFLOW|CREG_STAT_RXDROP|CREG_STAT_RXSMALL| \ + CREG_STAT_RXLERR|CREG_STAT_RXPERR|CREG_STAT_RXSERR) + +#define CREG_QMASK_COFLOW 0x00100000 /* CollCntr overflow */ +#define CREG_QMASK_TXDERROR 0x00080000 /* TXD error */ +#define CREG_QMASK_TXLERR 0x00040000 /* TX late error */ +#define CREG_QMASK_TXPERR 0x00020000 /* TX parity error */ +#define CREG_QMASK_TXSERR 0x00010000 /* TX sbus error ack */ +#define CREG_QMASK_RXDROP 0x00000010 /* RX drop */ +#define CREG_QMASK_RXBERROR 0x00000008 /* RX buffer error */ +#define CREG_QMASK_RXLEERR 0x00000004 /* RX late error */ +#define CREG_QMASK_RXPERR 0x00000002 /* RX parity error */ +#define CREG_QMASK_RXSERR 0x00000001 /* RX sbus error ack */ + +#define CREG_MMASK_EDEFER 0x10000000 /* Excess defer */ +#define CREG_MMASK_CLOSS 0x08000000 /* Carrier loss */ +#define CREG_MMASK_ERETRY 0x04000000 /* Excess retry */ +#define CREG_MMASK_LCOLL 0x02000000 /* Late collision error */ +#define CREG_MMASK_UFLOW 0x01000000 /* Underflow */ +#define CREG_MMASK_JABBER 0x00800000 /* Jabber error */ +#define CREG_MMASK_BABBLE 0x00400000 /* Babble error */ +#define CREG_MMASK_OFLOW 0x00000800 /* Overflow */ +#define CREG_MMASK_RXCOLL 0x00000400 /* RX Coll-Cntr overflow */ +#define CREG_MMASK_RPKT 0x00000200 /* Runt pkt overflow */ +#define CREG_MMASK_MPKT 0x00000100 /* Missed pkt overflow */ + +#define CREG_PIPG_TENAB 0x00000020 /* Enable Throttle */ +#define CREG_PIPG_MMODE 0x00000010 /* Manual Mode */ +#define CREG_PIPG_WMASK 0x0000000f /* SBUS Wait Mask */ + +/* Per-channel AMD 79C940 MACE registers. */ +#define MREGS_RXFIFO 0x00UL /* Receive FIFO */ +#define MREGS_TXFIFO 0x01UL /* Transmit FIFO */ +#define MREGS_TXFCNTL 0x02UL /* Transmit Frame Control */ +#define MREGS_TXFSTAT 0x03UL /* Transmit Frame Status */ +#define MREGS_TXRCNT 0x04UL /* Transmit Retry Count */ +#define MREGS_RXFCNTL 0x05UL /* Receive Frame Control */ +#define MREGS_RXFSTAT 0x06UL /* Receive Frame Status */ +#define MREGS_FFCNT 0x07UL /* FIFO Frame Count */ +#define MREGS_IREG 0x08UL /* Interrupt Register */ +#define MREGS_IMASK 0x09UL /* Interrupt Mask */ +#define MREGS_POLL 0x0aUL /* POLL Register */ +#define MREGS_BCONFIG 0x0bUL /* BIU Config */ +#define MREGS_FCONFIG 0x0cUL /* FIFO Config */ +#define MREGS_MCONFIG 0x0dUL /* MAC Config */ +#define MREGS_PLSCONFIG 0x0eUL /* PLS Config */ +#define MREGS_PHYCONFIG 0x0fUL /* PHY Config */ +#define MREGS_CHIPID1 0x10UL /* Chip-ID, low bits */ +#define MREGS_CHIPID2 0x11UL /* Chip-ID, high bits */ +#define MREGS_IACONFIG 0x12UL /* Internal Address Config */ + /* 0x13UL, reserved */ +#define MREGS_FILTER 0x14UL /* Logical Address Filter */ +#define MREGS_ETHADDR 0x15UL /* Our Ethernet Address */ + /* 0x16UL, reserved */ + /* 0x17UL, reserved */ +#define MREGS_MPCNT 0x18UL /* Missed Packet Count */ + /* 0x19UL, reserved */ +#define MREGS_RPCNT 0x1aUL /* Runt Packet Count */ +#define MREGS_RCCNT 0x1bUL /* RX Collision Count */ + /* 0x1cUL, reserved */ +#define MREGS_UTEST 0x1dUL /* User Test */ +#define MREGS_RTEST1 0x1eUL /* Reserved Test 1 */ +#define MREGS_RTEST2 0x1fUL /* Reserved Test 2 */ +#define MREGS_REG_SIZE 0x20UL + +#define MREGS_TXFCNTL_DRETRY 0x80 /* Retry disable */ +#define MREGS_TXFCNTL_DFCS 0x08 /* Disable TX FCS */ +#define MREGS_TXFCNTL_AUTOPAD 0x01 /* TX auto pad */ + +#define MREGS_TXFSTAT_VALID 0x80 /* TX valid */ +#define MREGS_TXFSTAT_UNDERFLOW 0x40 /* TX underflow */ +#define MREGS_TXFSTAT_LCOLL 0x20 /* TX late collision */ +#define MREGS_TXFSTAT_MRETRY 0x10 /* TX > 1 retries */ +#define MREGS_TXFSTAT_ORETRY 0x08 /* TX 1 retry */ +#define MREGS_TXFSTAT_PDEFER 0x04 /* TX pkt deferred */ +#define MREGS_TXFSTAT_CLOSS 0x02 /* TX carrier lost */ +#define MREGS_TXFSTAT_RERROR 0x01 /* TX retry error */ + +#define MREGS_TXRCNT_EDEFER 0x80 /* TX Excess defers */ +#define MREGS_TXRCNT_CMASK 0x0f /* TX retry count */ + +#define MREGS_RXFCNTL_LOWLAT 0x08 /* RX low latency */ +#define MREGS_RXFCNTL_AREJECT 0x04 /* RX addr match rej */ +#define MREGS_RXFCNTL_AUTOSTRIP 0x01 /* RX auto strip */ + +#define MREGS_RXFSTAT_OVERFLOW 0x80 /* RX overflow */ +#define MREGS_RXFSTAT_LCOLL 0x40 /* RX late collision */ +#define MREGS_RXFSTAT_FERROR 0x20 /* RX framing error */ +#define MREGS_RXFSTAT_FCSERROR 0x10 /* RX FCS error */ +#define MREGS_RXFSTAT_RBCNT 0x0f /* RX msg byte count */ + +#define MREGS_FFCNT_RX 0xf0 /* RX FIFO frame cnt */ +#define MREGS_FFCNT_TX 0x0f /* TX FIFO frame cnt */ + +#define MREGS_IREG_JABBER 0x80 /* IRQ Jabber error */ +#define MREGS_IREG_BABBLE 0x40 /* IRQ Babble error */ +#define MREGS_IREG_COLL 0x20 /* IRQ Collision error */ +#define MREGS_IREG_RCCO 0x10 /* IRQ Collision cnt overflow */ +#define MREGS_IREG_RPKTCO 0x08 /* IRQ Runt packet count overflow */ +#define MREGS_IREG_MPKTCO 0x04 /* IRQ missed packet cnt overflow */ +#define MREGS_IREG_RXIRQ 0x02 /* IRQ RX'd a packet */ +#define MREGS_IREG_TXIRQ 0x01 /* IRQ TX'd a packet */ + +#define MREGS_IMASK_BABBLE 0x40 /* IMASK Babble errors */ +#define MREGS_IMASK_COLL 0x20 /* IMASK Collision errors */ +#define MREGS_IMASK_MPKTCO 0x04 /* IMASK Missed pkt cnt overflow */ +#define MREGS_IMASK_RXIRQ 0x02 /* IMASK RX interrupts */ +#define MREGS_IMASK_TXIRQ 0x01 /* IMASK TX interrupts */ + +#define MREGS_POLL_TXVALID 0x80 /* TX is valid */ +#define MREGS_POLL_TDTR 0x40 /* TX data transfer request */ +#define MREGS_POLL_RDTR 0x20 /* RX data transfer request */ + +#define MREGS_BCONFIG_BSWAP 0x40 /* Byte Swap */ +#define MREGS_BCONFIG_4TS 0x00 /* 4byte transmit start point */ +#define MREGS_BCONFIG_16TS 0x10 /* 16byte transmit start point */ +#define MREGS_BCONFIG_64TS 0x20 /* 64byte transmit start point */ +#define MREGS_BCONFIG_112TS 0x30 /* 112byte transmit start point */ +#define MREGS_BCONFIG_RESET 0x01 /* SW-Reset the MACE */ + +#define MREGS_FCONFIG_TXF8 0x00 /* TX fifo 8 write cycles */ +#define MREGS_FCONFIG_TXF32 0x80 /* TX fifo 32 write cycles */ +#define MREGS_FCONFIG_TXF16 0x40 /* TX fifo 16 write cycles */ +#define MREGS_FCONFIG_RXF64 0x20 /* RX fifo 64 write cycles */ +#define MREGS_FCONFIG_RXF32 0x10 /* RX fifo 32 write cycles */ +#define MREGS_FCONFIG_RXF16 0x00 /* RX fifo 16 write cycles */ +#define MREGS_FCONFIG_TFWU 0x08 /* TX fifo watermark update */ +#define MREGS_FCONFIG_RFWU 0x04 /* RX fifo watermark update */ +#define MREGS_FCONFIG_TBENAB 0x02 /* TX burst enable */ +#define MREGS_FCONFIG_RBENAB 0x01 /* RX burst enable */ + +#define MREGS_MCONFIG_PROMISC 0x80 /* Promiscuous mode enable */ +#define MREGS_MCONFIG_TPDDISAB 0x40 /* TX 2part deferral enable */ +#define MREGS_MCONFIG_MBAENAB 0x20 /* Modified backoff enable */ +#define MREGS_MCONFIG_RPADISAB 0x08 /* RX physical addr disable */ +#define MREGS_MCONFIG_RBDISAB 0x04 /* RX broadcast disable */ +#define MREGS_MCONFIG_TXENAB 0x02 /* Enable transmitter */ +#define MREGS_MCONFIG_RXENAB 0x01 /* Enable receiver */ + +#define MREGS_PLSCONFIG_TXMS 0x08 /* TX mode select */ +#define MREGS_PLSCONFIG_GPSI 0x06 /* Use GPSI connector */ +#define MREGS_PLSCONFIG_DAI 0x04 /* Use DAI connector */ +#define MREGS_PLSCONFIG_TP 0x02 /* Use TwistedPair connector */ +#define MREGS_PLSCONFIG_AUI 0x00 /* Use AUI connector */ +#define MREGS_PLSCONFIG_IOENAB 0x01 /* PLS I/O enable */ + +#define MREGS_PHYCONFIG_LSTAT 0x80 /* Link status */ +#define MREGS_PHYCONFIG_LTESTDIS 0x40 /* Disable link test logic */ +#define MREGS_PHYCONFIG_RXPOLARITY 0x20 /* RX polarity */ +#define MREGS_PHYCONFIG_APCDISAB 0x10 /* AutoPolarityCorrect disab */ +#define MREGS_PHYCONFIG_LTENAB 0x08 /* Select low threshold */ +#define MREGS_PHYCONFIG_AUTO 0x04 /* Connector port auto-sel */ +#define MREGS_PHYCONFIG_RWU 0x02 /* Remote WakeUp */ +#define MREGS_PHYCONFIG_AW 0x01 /* Auto Wakeup */ + +#define MREGS_IACONFIG_ACHNGE 0x80 /* Do address change */ +#define MREGS_IACONFIG_PARESET 0x04 /* Physical address reset */ +#define MREGS_IACONFIG_LARESET 0x02 /* Logical address reset */ + +#define MREGS_UTEST_RTRENAB 0x80 /* Enable resv test register */ +#define MREGS_UTEST_RTRDISAB 0x40 /* Disab resv test register */ +#define MREGS_UTEST_RPACCEPT 0x20 /* Accept runt packets */ +#define MREGS_UTEST_FCOLL 0x10 /* Force collision status */ +#define MREGS_UTEST_FCSENAB 0x08 /* Enable FCS on RX */ +#define MREGS_UTEST_INTLOOPM 0x06 /* Intern lpback w/MENDEC */ +#define MREGS_UTEST_INTLOOP 0x04 /* Intern lpback */ +#define MREGS_UTEST_EXTLOOP 0x02 /* Extern lpback */ +#define MREGS_UTEST_NOLOOP 0x00 /* No loopback */ + +struct qe_rxd { + u32 rx_flags; + u32 rx_addr; +}; + +#define RXD_OWN 0x80000000 /* Ownership. */ +#define RXD_UPDATE 0x10000000 /* Being Updated? */ +#define RXD_LENGTH 0x000007ff /* Packet Length. */ + +struct qe_txd { + u32 tx_flags; + u32 tx_addr; +}; + +#define TXD_OWN 0x80000000 /* Ownership. */ +#define TXD_SOP 0x40000000 /* Start Of Packet */ +#define TXD_EOP 0x20000000 /* End Of Packet */ +#define TXD_UPDATE 0x10000000 /* Being Updated? */ +#define TXD_LENGTH 0x000007ff /* Packet Length. */ + +#define TX_RING_MAXSIZE 256 +#define RX_RING_MAXSIZE 256 + +#define TX_RING_SIZE 16 +#define RX_RING_SIZE 16 + +#define NEXT_RX(num) (((num) + 1) & (RX_RING_MAXSIZE - 1)) +#define NEXT_TX(num) (((num) + 1) & (TX_RING_MAXSIZE - 1)) +#define PREV_RX(num) (((num) - 1) & (RX_RING_MAXSIZE - 1)) +#define PREV_TX(num) (((num) - 1) & (TX_RING_MAXSIZE - 1)) + +#define TX_BUFFS_AVAIL(qp) \ + (((qp)->tx_old <= (qp)->tx_new) ? \ + (qp)->tx_old + (TX_RING_SIZE - 1) - (qp)->tx_new : \ + (qp)->tx_old - (qp)->tx_new - 1) + +struct qe_init_block { + struct qe_rxd qe_rxd[RX_RING_MAXSIZE]; + struct qe_txd qe_txd[TX_RING_MAXSIZE]; +}; + +#define qib_offset(mem, elem) \ +((__u32)((unsigned long)(&(((struct qe_init_block *)0)->mem[elem])))) + +struct sunqe; + +struct sunqec { + void __iomem *gregs; /* QEC Global Registers */ + struct sunqe *qes[4]; /* Each child MACE */ + unsigned int qec_bursts; /* Support burst sizes */ + struct platform_device *op; /* QEC's OF device */ + struct sunqec *next_module; /* List of all QECs in system */ +}; + +#define PKT_BUF_SZ 1664 +#define RXD_PKT_SZ 1664 + +struct sunqe_buffers { + u8 tx_buf[TX_RING_SIZE][PKT_BUF_SZ]; + u8 __pad[2]; + u8 rx_buf[RX_RING_SIZE][PKT_BUF_SZ]; +}; + +#define qebuf_offset(mem, elem) \ +((__u32)((unsigned long)(&(((struct sunqe_buffers *)0)->mem[elem][0])))) + +struct sunqe { + void __iomem *qcregs; /* QEC per-channel Registers */ + void __iomem *mregs; /* Per-channel MACE Registers */ + struct qe_init_block *qe_block; /* RX and TX descriptors */ + __u32 qblock_dvma; /* RX and TX descriptors */ + spinlock_t lock; /* Protects txfull state */ + int rx_new, rx_old; /* RX ring extents */ + int tx_new, tx_old; /* TX ring extents */ + struct sunqe_buffers *buffers; /* CPU visible address. */ + __u32 buffers_dvma; /* DVMA visible address. */ + struct sunqec *parent; + u8 mconfig; /* Base MACE mconfig value */ + struct platform_device *op; /* QE's OF device struct */ + struct net_device *dev; /* QE's netdevice struct */ + int channel; /* Who am I? */ +}; + +#endif /* !(_SUNQE_H) */ diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c new file mode 100644 index 000000000..53fe200e0 --- /dev/null +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -0,0 +1,2060 @@ +/* sunvnet.c: Sun LDOM Virtual Network Driver. + * + * Copyright (C) 2007, 2008 David S. Miller + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if IS_ENABLED(CONFIG_IPV6) +#include +#endif + +#include +#include +#include + +#include +#include + +#include "sunvnet.h" + +#define DRV_MODULE_NAME "sunvnet" +#define DRV_MODULE_VERSION "1.0" +#define DRV_MODULE_RELDATE "June 25, 2007" + +static char version[] = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; +MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); +MODULE_DESCRIPTION("Sun LDOM virtual network driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +#define VNET_MAX_TXQS 16 + +/* Heuristic for the number of times to exponentially backoff and + * retry sending an LDC trigger when EAGAIN is encountered + */ +#define VNET_MAX_RETRIES 10 + +static int __vnet_tx_trigger(struct vnet_port *port, u32 start); +static void vnet_port_reset(struct vnet_port *port); + +/* Ordered from largest major to lowest */ +static struct vio_version vnet_versions[] = { + { .major = 1, .minor = 8 }, + { .major = 1, .minor = 7 }, + { .major = 1, .minor = 6 }, + { .major = 1, .minor = 0 }, +}; + +static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr) +{ + return vio_dring_avail(dr, VNET_TX_RING_SIZE); +} + +static int vnet_handle_unknown(struct vnet_port *port, void *arg) +{ + struct vio_msg_tag *pkt = arg; + + pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n", + pkt->type, pkt->stype, pkt->stype_env, pkt->sid); + pr_err("Resetting connection\n"); + + ldc_disconnect(port->vio.lp); + + return -ECONNRESET; +} + +static int vnet_port_alloc_tx_ring(struct vnet_port *port); + +static int vnet_send_attr(struct vio_driver_state *vio) +{ + struct vnet_port *port = to_vnet_port(vio); + struct net_device *dev = port->vp->dev; + struct vio_net_attr_info pkt; + int framelen = ETH_FRAME_LEN; + int i, err; + + err = vnet_port_alloc_tx_ring(to_vnet_port(vio)); + if (err) + return err; + + memset(&pkt, 0, sizeof(pkt)); + pkt.tag.type = VIO_TYPE_CTRL; + pkt.tag.stype = VIO_SUBTYPE_INFO; + pkt.tag.stype_env = VIO_ATTR_INFO; + pkt.tag.sid = vio_send_sid(vio); + if (vio_version_before(vio, 1, 2)) + pkt.xfer_mode = VIO_DRING_MODE; + else + pkt.xfer_mode = VIO_NEW_DRING_MODE; + pkt.addr_type = VNET_ADDR_ETHERMAC; + pkt.ack_freq = 0; + for (i = 0; i < 6; i++) + pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); + if (vio_version_after(vio, 1, 3)) { + if (port->rmtu) { + port->rmtu = min(VNET_MAXPACKET, port->rmtu); + pkt.mtu = port->rmtu; + } else { + port->rmtu = VNET_MAXPACKET; + pkt.mtu = port->rmtu; + } + if (vio_version_after_eq(vio, 1, 6)) + pkt.options = VIO_TX_DRING; + } else if (vio_version_before(vio, 1, 3)) { + pkt.mtu = framelen; + } else { /* v1.3 */ + pkt.mtu = framelen + VLAN_HLEN; + } + + pkt.cflags = 0; + if (vio_version_after_eq(vio, 1, 7) && port->tso) { + pkt.cflags |= VNET_LSO_IPV4_CAPAB; + if (!port->tsolen) + port->tsolen = VNET_MAXTSO; + pkt.ipv4_lso_maxlen = port->tsolen; + } + + pkt.plnk_updt = PHYSLINK_UPDATE_NONE; + + viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " + "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " + "cflags[0x%04x] lso_max[%u]\n", + pkt.xfer_mode, pkt.addr_type, + (unsigned long long)pkt.addr, + pkt.ack_freq, pkt.plnk_updt, pkt.options, + (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen); + + + return vio_ldc_send(vio, &pkt, sizeof(pkt)); +} + +static int handle_attr_info(struct vio_driver_state *vio, + struct vio_net_attr_info *pkt) +{ + struct vnet_port *port = to_vnet_port(vio); + u64 localmtu; + u8 xfer_mode; + + viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " + "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " + " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", + pkt->xfer_mode, pkt->addr_type, + (unsigned long long)pkt->addr, + pkt->ack_freq, pkt->plnk_updt, pkt->options, + (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, + pkt->ipv4_lso_maxlen); + + pkt->tag.sid = vio_send_sid(vio); + + xfer_mode = pkt->xfer_mode; + /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */ + if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE) + xfer_mode = VIO_NEW_DRING_MODE; + + /* MTU negotiation: + * < v1.3 - ETH_FRAME_LEN exactly + * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change + * pkt->mtu for ACK + * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly + */ + if (vio_version_before(vio, 1, 3)) { + localmtu = ETH_FRAME_LEN; + } else if (vio_version_after(vio, 1, 3)) { + localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET; + localmtu = min(pkt->mtu, localmtu); + pkt->mtu = localmtu; + } else { /* v1.3 */ + localmtu = ETH_FRAME_LEN + VLAN_HLEN; + } + port->rmtu = localmtu; + + /* LSO negotiation */ + if (vio_version_after_eq(vio, 1, 7)) + port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB); + else + port->tso = false; + if (port->tso) { + if (!port->tsolen) + port->tsolen = VNET_MAXTSO; + port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen); + if (port->tsolen < VNET_MINTSO) { + port->tso = false; + port->tsolen = 0; + pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; + } + pkt->ipv4_lso_maxlen = port->tsolen; + } else { + pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; + pkt->ipv4_lso_maxlen = 0; + } + + /* for version >= 1.6, ACK packet mode we support */ + if (vio_version_after_eq(vio, 1, 6)) { + pkt->xfer_mode = VIO_NEW_DRING_MODE; + pkt->options = VIO_TX_DRING; + } + + if (!(xfer_mode | VIO_NEW_DRING_MODE) || + pkt->addr_type != VNET_ADDR_ETHERMAC || + pkt->mtu != localmtu) { + viodbg(HS, "SEND NET ATTR NACK\n"); + + pkt->tag.stype = VIO_SUBTYPE_NACK; + + (void) vio_ldc_send(vio, pkt, sizeof(*pkt)); + + return -ECONNRESET; + } else { + viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] " + "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] " + "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", + pkt->xfer_mode, pkt->addr_type, + (unsigned long long)pkt->addr, + pkt->ack_freq, pkt->plnk_updt, pkt->options, + (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, + pkt->ipv4_lso_maxlen); + + pkt->tag.stype = VIO_SUBTYPE_ACK; + + return vio_ldc_send(vio, pkt, sizeof(*pkt)); + } + +} + +static int handle_attr_ack(struct vio_driver_state *vio, + struct vio_net_attr_info *pkt) +{ + viodbg(HS, "GOT NET ATTR ACK\n"); + + return 0; +} + +static int handle_attr_nack(struct vio_driver_state *vio, + struct vio_net_attr_info *pkt) +{ + viodbg(HS, "GOT NET ATTR NACK\n"); + + return -ECONNRESET; +} + +static int vnet_handle_attr(struct vio_driver_state *vio, void *arg) +{ + struct vio_net_attr_info *pkt = arg; + + switch (pkt->tag.stype) { + case VIO_SUBTYPE_INFO: + return handle_attr_info(vio, pkt); + + case VIO_SUBTYPE_ACK: + return handle_attr_ack(vio, pkt); + + case VIO_SUBTYPE_NACK: + return handle_attr_nack(vio, pkt); + + default: + return -ECONNRESET; + } +} + +static void vnet_handshake_complete(struct vio_driver_state *vio) +{ + struct vio_dring_state *dr; + + dr = &vio->drings[VIO_DRIVER_RX_RING]; + dr->snd_nxt = dr->rcv_nxt = 1; + + dr = &vio->drings[VIO_DRIVER_TX_RING]; + dr->snd_nxt = dr->rcv_nxt = 1; +} + +/* The hypervisor interface that implements copying to/from imported + * memory from another domain requires that copies are done to 8-byte + * aligned buffers, and that the lengths of such copies are also 8-byte + * multiples. + * + * So we align skb->data to an 8-byte multiple and pad-out the data + * area so we can round the copy length up to the next multiple of + * 8 for the copy. + * + * The transmitter puts the actual start of the packet 6 bytes into + * the buffer it sends over, so that the IP headers after the ethernet + * header are aligned properly. These 6 bytes are not in the descriptor + * length, they are simply implied. This offset is represented using + * the VNET_PACKET_SKIP macro. + */ +static struct sk_buff *alloc_and_align_skb(struct net_device *dev, + unsigned int len) +{ + struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8); + unsigned long addr, off; + + if (unlikely(!skb)) + return NULL; + + addr = (unsigned long) skb->data; + off = ((addr + 7UL) & ~7UL) - addr; + if (off) + skb_reserve(skb, off); + + return skb; +} + +static inline void vnet_fullcsum(struct sk_buff *skb) +{ + struct iphdr *iph = ip_hdr(skb); + int offset = skb_transport_offset(skb); + + if (skb->protocol != htons(ETH_P_IP)) + return; + if (iph->protocol != IPPROTO_TCP && + iph->protocol != IPPROTO_UDP) + return; + skb->ip_summed = CHECKSUM_NONE; + skb->csum_level = 1; + skb->csum = 0; + if (iph->protocol == IPPROTO_TCP) { + struct tcphdr *ptcp = tcp_hdr(skb); + + ptcp->check = 0; + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - offset, IPPROTO_TCP, + skb->csum); + } else if (iph->protocol == IPPROTO_UDP) { + struct udphdr *pudp = udp_hdr(skb); + + pudp->check = 0; + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - offset, IPPROTO_UDP, + skb->csum); + } +} + +static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) +{ + struct net_device *dev = port->vp->dev; + unsigned int len = desc->size; + unsigned int copy_len; + struct sk_buff *skb; + int maxlen; + int err; + + err = -EMSGSIZE; + if (port->tso && port->tsolen > port->rmtu) + maxlen = port->tsolen; + else + maxlen = port->rmtu; + if (unlikely(len < ETH_ZLEN || len > maxlen)) { + dev->stats.rx_length_errors++; + goto out_dropped; + } + + skb = alloc_and_align_skb(dev, len); + err = -ENOMEM; + if (unlikely(!skb)) { + dev->stats.rx_missed_errors++; + goto out_dropped; + } + + copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U; + skb_put(skb, copy_len); + err = ldc_copy(port->vio.lp, LDC_COPY_IN, + skb->data, copy_len, 0, + desc->cookies, desc->ncookies); + if (unlikely(err < 0)) { + dev->stats.rx_frame_errors++; + goto out_free_skb; + } + + skb_pull(skb, VNET_PACKET_SKIP); + skb_trim(skb, len); + skb->protocol = eth_type_trans(skb, dev); + + if (vio_version_after_eq(&port->vio, 1, 8)) { + struct vio_net_dext *dext = vio_net_ext(desc); + + if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) { + if (skb->protocol == ETH_P_IP) { + struct iphdr *iph = (struct iphdr *)skb->data; + + iph->check = 0; + ip_send_check(iph); + } + } + if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) && + skb->ip_summed == CHECKSUM_NONE) + vnet_fullcsum(skb); + if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_level = 0; + if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK) + skb->csum_level = 1; + } + } + + skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL; + + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + napi_gro_receive(&port->napi, skb); + return 0; + +out_free_skb: + kfree_skb(skb); + +out_dropped: + dev->stats.rx_dropped++; + return err; +} + +static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, + u32 start, u32 end, u8 vio_dring_state) +{ + struct vio_dring_data hdr = { + .tag = { + .type = VIO_TYPE_DATA, + .stype = VIO_SUBTYPE_ACK, + .stype_env = VIO_DRING_DATA, + .sid = vio_send_sid(&port->vio), + }, + .dring_ident = dr->ident, + .start_idx = start, + .end_idx = end, + .state = vio_dring_state, + }; + int err, delay; + int retries = 0; + + hdr.seq = dr->snd_nxt; + delay = 1; + do { + err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); + if (err > 0) { + dr->snd_nxt++; + break; + } + udelay(delay); + if ((delay <<= 1) > 128) + delay = 128; + if (retries++ > VNET_MAX_RETRIES) { + pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n", + port->raddr[0], port->raddr[1], + port->raddr[2], port->raddr[3], + port->raddr[4], port->raddr[5]); + break; + } + } while (err == -EAGAIN); + + if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) { + port->stop_rx_idx = end; + port->stop_rx = true; + } else { + port->stop_rx_idx = 0; + port->stop_rx = false; + } + + return err; +} + +static struct vio_net_desc *get_rx_desc(struct vnet_port *port, + struct vio_dring_state *dr, + u32 index) +{ + struct vio_net_desc *desc = port->vio.desc_buf; + int err; + + err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size, + (index * dr->entry_size), + dr->cookies, dr->ncookies); + if (err < 0) + return ERR_PTR(err); + + return desc; +} + +static int put_rx_desc(struct vnet_port *port, + struct vio_dring_state *dr, + struct vio_net_desc *desc, + u32 index) +{ + int err; + + err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size, + (index * dr->entry_size), + dr->cookies, dr->ncookies); + if (err < 0) + return err; + + return 0; +} + +static int vnet_walk_rx_one(struct vnet_port *port, + struct vio_dring_state *dr, + u32 index, int *needs_ack) +{ + struct vio_net_desc *desc = get_rx_desc(port, dr, index); + struct vio_driver_state *vio = &port->vio; + int err; + + BUG_ON(desc == NULL); + if (IS_ERR(desc)) + return PTR_ERR(desc); + + if (desc->hdr.state != VIO_DESC_READY) + return 1; + + dma_rmb(); + + viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", + desc->hdr.state, desc->hdr.ack, + desc->size, desc->ncookies, + desc->cookies[0].cookie_addr, + desc->cookies[0].cookie_size); + + err = vnet_rx_one(port, desc); + if (err == -ECONNRESET) + return err; + desc->hdr.state = VIO_DESC_DONE; + err = put_rx_desc(port, dr, desc, index); + if (err < 0) + return err; + *needs_ack = desc->hdr.ack; + return 0; +} + +static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, + u32 start, u32 end, int *npkts, int budget) +{ + struct vio_driver_state *vio = &port->vio; + int ack_start = -1, ack_end = -1; + bool send_ack = true; + + end = (end == (u32) -1) ? vio_dring_prev(dr, start) + : vio_dring_next(dr, end); + + viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); + + while (start != end) { + int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack); + if (err == -ECONNRESET) + return err; + if (err != 0) + break; + (*npkts)++; + if (ack_start == -1) + ack_start = start; + ack_end = start; + start = vio_dring_next(dr, start); + if (ack && start != end) { + err = vnet_send_ack(port, dr, ack_start, ack_end, + VIO_DRING_ACTIVE); + if (err == -ECONNRESET) + return err; + ack_start = -1; + } + if ((*npkts) >= budget) { + send_ack = false; + break; + } + } + if (unlikely(ack_start == -1)) + ack_start = ack_end = vio_dring_prev(dr, start); + if (send_ack) { + port->napi_resume = false; + return vnet_send_ack(port, dr, ack_start, ack_end, + VIO_DRING_STOPPED); + } else { + port->napi_resume = true; + port->napi_stop_idx = ack_end; + return 1; + } +} + +static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts, + int budget) +{ + struct vio_dring_data *pkt = msgbuf; + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; + struct vio_driver_state *vio = &port->vio; + + viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n", + pkt->tag.stype_env, pkt->seq, dr->rcv_nxt); + + if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) + return 0; + if (unlikely(pkt->seq != dr->rcv_nxt)) { + pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n", + pkt->seq, dr->rcv_nxt); + return 0; + } + + if (!port->napi_resume) + dr->rcv_nxt++; + + /* XXX Validate pkt->start_idx and pkt->end_idx XXX */ + + return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx, + npkts, budget); +} + +static int idx_is_pending(struct vio_dring_state *dr, u32 end) +{ + u32 idx = dr->cons; + int found = 0; + + while (idx != dr->prod) { + if (idx == end) { + found = 1; + break; + } + idx = vio_dring_next(dr, idx); + } + return found; +} + +static int vnet_ack(struct vnet_port *port, void *msgbuf) +{ + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + struct vio_dring_data *pkt = msgbuf; + struct net_device *dev; + struct vnet *vp; + u32 end; + struct vio_net_desc *desc; + struct netdev_queue *txq; + + if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) + return 0; + + end = pkt->end_idx; + vp = port->vp; + dev = vp->dev; + netif_tx_lock(dev); + if (unlikely(!idx_is_pending(dr, end))) { + netif_tx_unlock(dev); + return 0; + } + + /* sync for race conditions with vnet_start_xmit() and tell xmit it + * is time to send a trigger. + */ + dr->cons = vio_dring_next(dr, end); + desc = vio_dring_entry(dr, dr->cons); + if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) { + /* vnet_start_xmit() just populated this dring but missed + * sending the "start" LDC message to the consumer. + * Send a "start" trigger on its behalf. + */ + if (__vnet_tx_trigger(port, dr->cons) > 0) + port->start_cons = false; + else + port->start_cons = true; + } else { + port->start_cons = true; + } + netif_tx_unlock(dev); + + txq = netdev_get_tx_queue(dev, port->q_index); + if (unlikely(netif_tx_queue_stopped(txq) && + vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) + return 1; + + return 0; +} + +static int vnet_nack(struct vnet_port *port, void *msgbuf) +{ + /* XXX just reset or similar XXX */ + return 0; +} + +static int handle_mcast(struct vnet_port *port, void *msgbuf) +{ + struct vio_net_mcast_info *pkt = msgbuf; + + if (pkt->tag.stype != VIO_SUBTYPE_ACK) + pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n", + port->vp->dev->name, + pkt->tag.type, + pkt->tag.stype, + pkt->tag.stype_env, + pkt->tag.sid); + + return 0; +} + +/* Got back a STOPPED LDC message on port. If the queue is stopped, + * wake it up so that we'll send out another START message at the + * next TX. + */ +static void maybe_tx_wakeup(struct vnet_port *port) +{ + struct netdev_queue *txq; + + txq = netdev_get_tx_queue(port->vp->dev, port->q_index); + __netif_tx_lock(txq, smp_processor_id()); + if (likely(netif_tx_queue_stopped(txq))) { + struct vio_dring_state *dr; + + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + netif_tx_wake_queue(txq); + } + __netif_tx_unlock(txq); +} + +static inline bool port_is_up(struct vnet_port *vnet) +{ + struct vio_driver_state *vio = &vnet->vio; + + return !!(vio->hs_state & VIO_HS_COMPLETE); +} + +static int vnet_event_napi(struct vnet_port *port, int budget) +{ + struct vio_driver_state *vio = &port->vio; + int tx_wakeup, err; + int npkts = 0; + int event = (port->rx_event & LDC_EVENT_RESET); + +ldc_ctrl: + if (unlikely(event == LDC_EVENT_RESET || + event == LDC_EVENT_UP)) { + vio_link_state_change(vio, event); + + if (event == LDC_EVENT_RESET) { + vnet_port_reset(port); + vio_port_up(vio); + } + port->rx_event = 0; + return 0; + } + /* We may have multiple LDC events in rx_event. Unroll send_events() */ + event = (port->rx_event & LDC_EVENT_UP); + port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP); + if (event == LDC_EVENT_UP) + goto ldc_ctrl; + event = port->rx_event; + if (!(event & LDC_EVENT_DATA_READY)) + return 0; + + /* we dont expect any other bits than RESET, UP, DATA_READY */ + BUG_ON(event != LDC_EVENT_DATA_READY); + + tx_wakeup = err = 0; + while (1) { + union { + struct vio_msg_tag tag; + u64 raw[8]; + } msgbuf; + + if (port->napi_resume) { + struct vio_dring_data *pkt = + (struct vio_dring_data *)&msgbuf; + struct vio_dring_state *dr = + &port->vio.drings[VIO_DRIVER_RX_RING]; + + pkt->tag.type = VIO_TYPE_DATA; + pkt->tag.stype = VIO_SUBTYPE_INFO; + pkt->tag.stype_env = VIO_DRING_DATA; + pkt->seq = dr->rcv_nxt; + pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx); + pkt->end_idx = -1; + goto napi_resume; + } + err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); + if (unlikely(err < 0)) { + if (err == -ECONNRESET) + vio_conn_reset(vio); + break; + } + if (err == 0) + break; + viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", + msgbuf.tag.type, + msgbuf.tag.stype, + msgbuf.tag.stype_env, + msgbuf.tag.sid); + err = vio_validate_sid(vio, &msgbuf.tag); + if (err < 0) + break; +napi_resume: + if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { + if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { + if (!port_is_up(port)) { + /* failures like handshake_failure() + * may have cleaned up dring, but + * NAPI polling may bring us here. + */ + err = -ECONNRESET; + break; + } + err = vnet_rx(port, &msgbuf, &npkts, budget); + if (npkts >= budget) + break; + if (npkts == 0) + break; + } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { + err = vnet_ack(port, &msgbuf); + if (err > 0) + tx_wakeup |= err; + } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) { + err = vnet_nack(port, &msgbuf); + } + } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { + if (msgbuf.tag.stype_env == VNET_MCAST_INFO) + err = handle_mcast(port, &msgbuf); + else + err = vio_control_pkt_engine(vio, &msgbuf); + if (err) + break; + } else { + err = vnet_handle_unknown(port, &msgbuf); + } + if (err == -ECONNRESET) + break; + } + if (unlikely(tx_wakeup && err != -ECONNRESET)) + maybe_tx_wakeup(port); + return npkts; +} + +static int vnet_poll(struct napi_struct *napi, int budget) +{ + struct vnet_port *port = container_of(napi, struct vnet_port, napi); + struct vio_driver_state *vio = &port->vio; + int processed = vnet_event_napi(port, budget); + + if (processed < budget) { + napi_complete(napi); + port->rx_event &= ~LDC_EVENT_DATA_READY; + vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED); + } + return processed; +} + +static void vnet_event(void *arg, int event) +{ + struct vnet_port *port = arg; + struct vio_driver_state *vio = &port->vio; + + port->rx_event |= event; + vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED); + napi_schedule(&port->napi); + +} + +static int __vnet_tx_trigger(struct vnet_port *port, u32 start) +{ + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + struct vio_dring_data hdr = { + .tag = { + .type = VIO_TYPE_DATA, + .stype = VIO_SUBTYPE_INFO, + .stype_env = VIO_DRING_DATA, + .sid = vio_send_sid(&port->vio), + }, + .dring_ident = dr->ident, + .start_idx = start, + .end_idx = (u32) -1, + }; + int err, delay; + int retries = 0; + + if (port->stop_rx) { + err = vnet_send_ack(port, + &port->vio.drings[VIO_DRIVER_RX_RING], + port->stop_rx_idx, -1, + VIO_DRING_STOPPED); + if (err <= 0) + return err; + } + + hdr.seq = dr->snd_nxt; + delay = 1; + do { + err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); + if (err > 0) { + dr->snd_nxt++; + break; + } + udelay(delay); + if ((delay <<= 1) > 128) + delay = 128; + if (retries++ > VNET_MAX_RETRIES) + break; + } while (err == -EAGAIN); + + return err; +} + +struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) +{ + unsigned int hash = vnet_hashfn(skb->data); + struct hlist_head *hp = &vp->port_hash[hash]; + struct vnet_port *port; + + hlist_for_each_entry_rcu(port, hp, hash) { + if (!port_is_up(port)) + continue; + if (ether_addr_equal(port->raddr, skb->data)) + return port; + } + list_for_each_entry_rcu(port, &vp->port_list, list) { + if (!port->switch_port) + continue; + if (!port_is_up(port)) + continue; + return port; + } + return NULL; +} + +static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port, + unsigned *pending) +{ + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + struct sk_buff *skb = NULL; + int i, txi; + + *pending = 0; + + txi = dr->prod; + for (i = 0; i < VNET_TX_RING_SIZE; ++i) { + struct vio_net_desc *d; + + --txi; + if (txi < 0) + txi = VNET_TX_RING_SIZE-1; + + d = vio_dring_entry(dr, txi); + + if (d->hdr.state == VIO_DESC_READY) { + (*pending)++; + continue; + } + if (port->tx_bufs[txi].skb) { + if (d->hdr.state != VIO_DESC_DONE) + pr_notice("invalid ring buffer state %d\n", + d->hdr.state); + BUG_ON(port->tx_bufs[txi].skb->next); + + port->tx_bufs[txi].skb->next = skb; + skb = port->tx_bufs[txi].skb; + port->tx_bufs[txi].skb = NULL; + + ldc_unmap(port->vio.lp, + port->tx_bufs[txi].cookies, + port->tx_bufs[txi].ncookies); + } else if (d->hdr.state == VIO_DESC_FREE) + break; + d->hdr.state = VIO_DESC_FREE; + } + return skb; +} + +static inline void vnet_free_skbs(struct sk_buff *skb) +{ + struct sk_buff *next; + + while (skb) { + next = skb->next; + skb->next = NULL; + dev_kfree_skb(skb); + skb = next; + } +} + +static void vnet_clean_timer_expire(unsigned long port0) +{ + struct vnet_port *port = (struct vnet_port *)port0; + struct sk_buff *freeskbs; + unsigned pending; + + netif_tx_lock(port->vp->dev); + freeskbs = vnet_clean_tx_ring(port, &pending); + netif_tx_unlock(port->vp->dev); + + vnet_free_skbs(freeskbs); + + if (pending) + (void)mod_timer(&port->clean_timer, + jiffies + VNET_CLEAN_TIMEOUT); + else + del_timer(&port->clean_timer); +} + +static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb, + struct ldc_trans_cookie *cookies, int ncookies, + unsigned int map_perm) +{ + int i, nc, err, blen; + + /* header */ + blen = skb_headlen(skb); + if (blen < ETH_ZLEN) + blen = ETH_ZLEN; + blen += VNET_PACKET_SKIP; + blen += 8 - (blen & 7); + + err = ldc_map_single(lp, skb->data-VNET_PACKET_SKIP, blen, cookies, + ncookies, map_perm); + if (err < 0) + return err; + nc = err; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + u8 *vaddr; + + if (nc < ncookies) { + vaddr = kmap_atomic(skb_frag_page(f)); + blen = skb_frag_size(f); + blen += 8 - (blen & 7); + err = ldc_map_single(lp, vaddr + f->page_offset, + blen, cookies + nc, ncookies - nc, + map_perm); + kunmap_atomic(vaddr); + } else { + err = -EMSGSIZE; + } + + if (err < 0) { + ldc_unmap(lp, cookies, nc); + return err; + } + nc += err; + } + return nc; +} + +static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) +{ + struct sk_buff *nskb; + int i, len, pad, docopy; + + len = skb->len; + pad = 0; + if (len < ETH_ZLEN) { + pad += ETH_ZLEN - skb->len; + len += pad; + } + len += VNET_PACKET_SKIP; + pad += 8 - (len & 7); + + /* make sure we have enough cookies and alignment in every frag */ + docopy = skb_shinfo(skb)->nr_frags >= ncookies; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + + docopy |= f->page_offset & 7; + } + if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP || + skb_tailroom(skb) < pad || + skb_headroom(skb) < VNET_PACKET_SKIP || docopy) { + int start = 0, offset; + __wsum csum; + + len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; + nskb = alloc_and_align_skb(skb->dev, len); + if (nskb == NULL) { + dev_kfree_skb(skb); + return NULL; + } + skb_reserve(nskb, VNET_PACKET_SKIP); + + nskb->protocol = skb->protocol; + offset = skb_mac_header(skb) - skb->data; + skb_set_mac_header(nskb, offset); + offset = skb_network_header(skb) - skb->data; + skb_set_network_header(nskb, offset); + offset = skb_transport_header(skb) - skb->data; + skb_set_transport_header(nskb, offset); + + offset = 0; + nskb->csum_offset = skb->csum_offset; + nskb->ip_summed = skb->ip_summed; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + start = skb_checksum_start_offset(skb); + if (start) { + struct iphdr *iph = ip_hdr(nskb); + int offset = start + nskb->csum_offset; + + if (skb_copy_bits(skb, 0, nskb->data, start)) { + dev_kfree_skb(nskb); + dev_kfree_skb(skb); + return NULL; + } + *(__sum16 *)(skb->data + offset) = 0; + csum = skb_copy_and_csum_bits(skb, start, + nskb->data + start, + skb->len - start, 0); + if (iph->protocol == IPPROTO_TCP || + iph->protocol == IPPROTO_UDP) { + csum = csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - start, + iph->protocol, csum); + } + *(__sum16 *)(nskb->data + offset) = csum; + + nskb->ip_summed = CHECKSUM_NONE; + } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) { + dev_kfree_skb(nskb); + dev_kfree_skb(skb); + return NULL; + } + (void)skb_put(nskb, skb->len); + if (skb_is_gso(skb)) { + skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; + skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; + } + nskb->queue_mapping = skb->queue_mapping; + dev_kfree_skb(skb); + skb = nskb; + } + return skb; +} + +static u16 +vnet_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + struct vnet *vp = netdev_priv(dev); + struct vnet_port *port = __tx_port_find(vp, skb); + + if (port == NULL) + return 0; + return port->q_index; +} + +static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev); + +static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb) +{ + struct net_device *dev = port->vp->dev; + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + struct sk_buff *segs; + int maclen, datalen; + int status; + int gso_size, gso_type, gso_segs; + int hlen = skb_transport_header(skb) - skb_mac_header(skb); + int proto = IPPROTO_IP; + + if (skb->protocol == htons(ETH_P_IP)) + proto = ip_hdr(skb)->protocol; + else if (skb->protocol == htons(ETH_P_IPV6)) + proto = ipv6_hdr(skb)->nexthdr; + + if (proto == IPPROTO_TCP) + hlen += tcp_hdr(skb)->doff * 4; + else if (proto == IPPROTO_UDP) + hlen += sizeof(struct udphdr); + else { + pr_err("vnet_handle_offloads GSO with unknown transport " + "protocol %d tproto %d\n", skb->protocol, proto); + hlen = 128; /* XXX */ + } + datalen = port->tsolen - hlen; + + gso_size = skb_shinfo(skb)->gso_size; + gso_type = skb_shinfo(skb)->gso_type; + gso_segs = skb_shinfo(skb)->gso_segs; + + if (port->tso && gso_size < datalen) + gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen); + + if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) { + struct netdev_queue *txq; + + txq = netdev_get_tx_queue(dev, port->q_index); + netif_tx_stop_queue(txq); + if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs) + return NETDEV_TX_BUSY; + netif_tx_wake_queue(txq); + } + + maclen = skb_network_header(skb) - skb_mac_header(skb); + skb_pull(skb, maclen); + + if (port->tso && gso_size < datalen) { + if (skb_unclone(skb, GFP_ATOMIC)) + goto out_dropped; + + /* segment to TSO size */ + skb_shinfo(skb)->gso_size = datalen; + skb_shinfo(skb)->gso_segs = gso_segs; + } + segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); + if (IS_ERR(segs)) + goto out_dropped; + + skb_push(skb, maclen); + skb_reset_mac_header(skb); + + status = 0; + while (segs) { + struct sk_buff *curr = segs; + + segs = segs->next; + curr->next = NULL; + if (port->tso && curr->len > dev->mtu) { + skb_shinfo(curr)->gso_size = gso_size; + skb_shinfo(curr)->gso_type = gso_type; + skb_shinfo(curr)->gso_segs = + DIV_ROUND_UP(curr->len - hlen, gso_size); + } else + skb_shinfo(curr)->gso_size = 0; + + skb_push(curr, maclen); + skb_reset_mac_header(curr); + memcpy(skb_mac_header(curr), skb_mac_header(skb), + maclen); + curr->csum_start = skb_transport_header(curr) - curr->head; + if (ip_hdr(curr)->protocol == IPPROTO_TCP) + curr->csum_offset = offsetof(struct tcphdr, check); + else if (ip_hdr(curr)->protocol == IPPROTO_UDP) + curr->csum_offset = offsetof(struct udphdr, check); + + if (!(status & NETDEV_TX_MASK)) + status = vnet_start_xmit(curr, dev); + if (status & NETDEV_TX_MASK) + dev_kfree_skb_any(curr); + } + + if (!(status & NETDEV_TX_MASK)) + dev_kfree_skb_any(skb); + return status; +out_dropped: + dev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct vnet *vp = netdev_priv(dev); + struct vnet_port *port = NULL; + struct vio_dring_state *dr; + struct vio_net_desc *d; + unsigned int len; + struct sk_buff *freeskbs = NULL; + int i, err, txi; + unsigned pending = 0; + struct netdev_queue *txq; + + rcu_read_lock(); + port = __tx_port_find(vp, skb); + if (unlikely(!port)) { + rcu_read_unlock(); + goto out_dropped; + } + + if (skb_is_gso(skb) && skb->len > port->tsolen) { + err = vnet_handle_offloads(port, skb); + rcu_read_unlock(); + return err; + } + + if (!skb_is_gso(skb) && skb->len > port->rmtu) { + unsigned long localmtu = port->rmtu - ETH_HLEN; + + if (vio_version_after_eq(&port->vio, 1, 3)) + localmtu -= VLAN_HLEN; + + if (skb->protocol == htons(ETH_P_IP)) { + struct flowi4 fl4; + struct rtable *rt = NULL; + + memset(&fl4, 0, sizeof(fl4)); + fl4.flowi4_oif = dev->ifindex; + fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); + fl4.daddr = ip_hdr(skb)->daddr; + fl4.saddr = ip_hdr(skb)->saddr; + + rt = ip_route_output_key(dev_net(dev), &fl4); + rcu_read_unlock(); + if (!IS_ERR(rt)) { + skb_dst_set(skb, &rt->dst); + icmp_send(skb, ICMP_DEST_UNREACH, + ICMP_FRAG_NEEDED, + htonl(localmtu)); + } + } +#if IS_ENABLED(CONFIG_IPV6) + else if (skb->protocol == htons(ETH_P_IPV6)) + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu); +#endif + goto out_dropped; + } + + skb = vnet_skb_shape(skb, 2); + + if (unlikely(!skb)) + goto out_dropped; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + vnet_fullcsum(skb); + + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + i = skb_get_queue_mapping(skb); + txq = netdev_get_tx_queue(dev, i); + if (unlikely(vnet_tx_dring_avail(dr) < 1)) { + if (!netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); + + /* This is a hard error, log it. */ + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + dev->stats.tx_errors++; + } + rcu_read_unlock(); + return NETDEV_TX_BUSY; + } + + d = vio_dring_cur(dr); + + txi = dr->prod; + + freeskbs = vnet_clean_tx_ring(port, &pending); + + BUG_ON(port->tx_bufs[txi].skb); + + len = skb->len; + if (len < ETH_ZLEN) + len = ETH_ZLEN; + + err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2, + (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW)); + if (err < 0) { + netdev_info(dev, "tx buffer map error %d\n", err); + goto out_dropped; + } + + port->tx_bufs[txi].skb = skb; + skb = NULL; + port->tx_bufs[txi].ncookies = err; + + /* We don't rely on the ACKs to free the skb in vnet_start_xmit(), + * thus it is safe to not set VIO_ACK_ENABLE for each transmission: + * the protocol itself does not require it as long as the peer + * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED. + * + * An ACK for every packet in the ring is expensive as the + * sending of LDC messages is slow and affects performance. + */ + d->hdr.ack = VIO_ACK_DISABLE; + d->size = len; + d->ncookies = port->tx_bufs[txi].ncookies; + for (i = 0; i < d->ncookies; i++) + d->cookies[i] = port->tx_bufs[txi].cookies[i]; + if (vio_version_after_eq(&port->vio, 1, 7)) { + struct vio_net_dext *dext = vio_net_ext(d); + + memset(dext, 0, sizeof(*dext)); + if (skb_is_gso(port->tx_bufs[txi].skb)) { + dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb) + ->gso_size; + dext->flags |= VNET_PKT_IPV4_LSO; + } + if (vio_version_after_eq(&port->vio, 1, 8) && + !port->switch_port) { + dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK; + dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK; + } + } + + /* This has to be a non-SMP write barrier because we are writing + * to memory which is shared with the peer LDOM. + */ + dma_wmb(); + + d->hdr.state = VIO_DESC_READY; + + /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent + * to notify the consumer that some descriptors are READY. + * After that "start" trigger, no additional triggers are needed until + * a DRING_STOPPED is received from the consumer. The dr->cons field + * (set up by vnet_ack()) has the value of the next dring index + * that has not yet been ack-ed. We send a "start" trigger here + * if, and only if, start_cons is true (reset it afterward). Conversely, + * vnet_ack() should check if the dring corresponding to cons + * is marked READY, but start_cons was false. + * If so, vnet_ack() should send out the missed "start" trigger. + * + * Note that the dma_wmb() above makes sure the cookies et al. are + * not globally visible before the VIO_DESC_READY, and that the + * stores are ordered correctly by the compiler. The consumer will + * not proceed until the VIO_DESC_READY is visible assuring that + * the consumer does not observe anything related to descriptors + * out of order. The HV trap from the LDC start trigger is the + * producer to consumer announcement that work is available to the + * consumer + */ + if (!port->start_cons) + goto ldc_start_done; /* previous trigger suffices */ + + err = __vnet_tx_trigger(port, dr->cons); + if (unlikely(err < 0)) { + netdev_info(dev, "TX trigger error %d\n", err); + d->hdr.state = VIO_DESC_FREE; + skb = port->tx_bufs[txi].skb; + port->tx_bufs[txi].skb = NULL; + dev->stats.tx_carrier_errors++; + goto out_dropped; + } + +ldc_start_done: + port->start_cons = false; + + dev->stats.tx_packets++; + dev->stats.tx_bytes += port->tx_bufs[txi].skb->len; + + dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); + if (unlikely(vnet_tx_dring_avail(dr) < 1)) { + netif_tx_stop_queue(txq); + if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) + netif_tx_wake_queue(txq); + } + + (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); + rcu_read_unlock(); + + vnet_free_skbs(freeskbs); + + return NETDEV_TX_OK; + +out_dropped: + if (pending) + (void)mod_timer(&port->clean_timer, + jiffies + VNET_CLEAN_TIMEOUT); + else if (port) + del_timer(&port->clean_timer); + if (port) + rcu_read_unlock(); + if (skb) + dev_kfree_skb(skb); + vnet_free_skbs(freeskbs); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; +} + +static void vnet_tx_timeout(struct net_device *dev) +{ + /* XXX Implement me XXX */ +} + +static int vnet_open(struct net_device *dev) +{ + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); + + return 0; +} + +static int vnet_close(struct net_device *dev) +{ + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + + return 0; +} + +static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr) +{ + struct vnet_mcast_entry *m; + + for (m = vp->mcast_list; m; m = m->next) { + if (ether_addr_equal(m->addr, addr)) + return m; + } + return NULL; +} + +static void __update_mc_list(struct vnet *vp, struct net_device *dev) +{ + struct netdev_hw_addr *ha; + + netdev_for_each_mc_addr(ha, dev) { + struct vnet_mcast_entry *m; + + m = __vnet_mc_find(vp, ha->addr); + if (m) { + m->hit = 1; + continue; + } + + if (!m) { + m = kzalloc(sizeof(*m), GFP_ATOMIC); + if (!m) + continue; + memcpy(m->addr, ha->addr, ETH_ALEN); + m->hit = 1; + + m->next = vp->mcast_list; + vp->mcast_list = m; + } + } +} + +static void __send_mc_list(struct vnet *vp, struct vnet_port *port) +{ + struct vio_net_mcast_info info; + struct vnet_mcast_entry *m, **pp; + int n_addrs; + + memset(&info, 0, sizeof(info)); + + info.tag.type = VIO_TYPE_CTRL; + info.tag.stype = VIO_SUBTYPE_INFO; + info.tag.stype_env = VNET_MCAST_INFO; + info.tag.sid = vio_send_sid(&port->vio); + info.set = 1; + + n_addrs = 0; + for (m = vp->mcast_list; m; m = m->next) { + if (m->sent) + continue; + m->sent = 1; + memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], + m->addr, ETH_ALEN); + if (++n_addrs == VNET_NUM_MCAST) { + info.count = n_addrs; + + (void) vio_ldc_send(&port->vio, &info, + sizeof(info)); + n_addrs = 0; + } + } + if (n_addrs) { + info.count = n_addrs; + (void) vio_ldc_send(&port->vio, &info, sizeof(info)); + } + + info.set = 0; + + n_addrs = 0; + pp = &vp->mcast_list; + while ((m = *pp) != NULL) { + if (m->hit) { + m->hit = 0; + pp = &m->next; + continue; + } + + memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], + m->addr, ETH_ALEN); + if (++n_addrs == VNET_NUM_MCAST) { + info.count = n_addrs; + (void) vio_ldc_send(&port->vio, &info, + sizeof(info)); + n_addrs = 0; + } + + *pp = m->next; + kfree(m); + } + if (n_addrs) { + info.count = n_addrs; + (void) vio_ldc_send(&port->vio, &info, sizeof(info)); + } +} + +static void vnet_set_rx_mode(struct net_device *dev) +{ + struct vnet *vp = netdev_priv(dev); + struct vnet_port *port; + + rcu_read_lock(); + list_for_each_entry_rcu(port, &vp->port_list, list) { + + if (port->switch_port) { + __update_mc_list(vp, dev); + __send_mc_list(vp, port); + break; + } + } + rcu_read_unlock(); +} + +static int vnet_change_mtu(struct net_device *dev, int new_mtu) +{ + if (new_mtu < 68 || new_mtu > 65535) + return -EINVAL; + + dev->mtu = new_mtu; + return 0; +} + +static int vnet_set_mac_addr(struct net_device *dev, void *p) +{ + return -EINVAL; +} + +static void vnet_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); +} + +static u32 vnet_get_msglevel(struct net_device *dev) +{ + struct vnet *vp = netdev_priv(dev); + return vp->msg_enable; +} + +static void vnet_set_msglevel(struct net_device *dev, u32 value) +{ + struct vnet *vp = netdev_priv(dev); + vp->msg_enable = value; +} + +static const struct ethtool_ops vnet_ethtool_ops = { + .get_drvinfo = vnet_get_drvinfo, + .get_msglevel = vnet_get_msglevel, + .set_msglevel = vnet_set_msglevel, + .get_link = ethtool_op_get_link, +}; + +static void vnet_port_free_tx_bufs(struct vnet_port *port) +{ + struct vio_dring_state *dr; + int i; + + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + + if (dr->base == NULL) + return; + + for (i = 0; i < VNET_TX_RING_SIZE; i++) { + struct vio_net_desc *d; + void *skb = port->tx_bufs[i].skb; + + if (!skb) + continue; + + d = vio_dring_entry(dr, i); + + ldc_unmap(port->vio.lp, + port->tx_bufs[i].cookies, + port->tx_bufs[i].ncookies); + dev_kfree_skb(skb); + port->tx_bufs[i].skb = NULL; + d->hdr.state = VIO_DESC_FREE; + } + ldc_free_exp_dring(port->vio.lp, dr->base, + (dr->entry_size * dr->num_entries), + dr->cookies, dr->ncookies); + dr->base = NULL; + dr->entry_size = 0; + dr->num_entries = 0; + dr->pending = 0; + dr->ncookies = 0; +} + +static void vnet_port_reset(struct vnet_port *port) +{ + del_timer(&port->clean_timer); + vnet_port_free_tx_bufs(port); + port->rmtu = 0; + port->tso = true; + port->tsolen = 0; +} + +static int vnet_port_alloc_tx_ring(struct vnet_port *port) +{ + struct vio_dring_state *dr; + unsigned long len, elen; + int i, err, ncookies; + void *dring; + + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + + elen = sizeof(struct vio_net_desc) + + sizeof(struct ldc_trans_cookie) * 2; + if (vio_version_after_eq(&port->vio, 1, 7)) + elen += sizeof(struct vio_net_dext); + len = VNET_TX_RING_SIZE * elen; + + ncookies = VIO_MAX_RING_COOKIES; + dring = ldc_alloc_exp_dring(port->vio.lp, len, + dr->cookies, &ncookies, + (LDC_MAP_SHADOW | + LDC_MAP_DIRECT | + LDC_MAP_RW)); + if (IS_ERR(dring)) { + err = PTR_ERR(dring); + goto err_out; + } + + dr->base = dring; + dr->entry_size = elen; + dr->num_entries = VNET_TX_RING_SIZE; + dr->prod = dr->cons = 0; + port->start_cons = true; /* need an initial trigger */ + dr->pending = VNET_TX_RING_SIZE; + dr->ncookies = ncookies; + + for (i = 0; i < VNET_TX_RING_SIZE; ++i) { + struct vio_net_desc *d; + + d = vio_dring_entry(dr, i); + d->hdr.state = VIO_DESC_FREE; + } + return 0; + +err_out: + vnet_port_free_tx_bufs(port); + + return err; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void vnet_poll_controller(struct net_device *dev) +{ + struct vnet *vp = netdev_priv(dev); + struct vnet_port *port; + unsigned long flags; + + spin_lock_irqsave(&vp->lock, flags); + if (!list_empty(&vp->port_list)) { + port = list_entry(vp->port_list.next, struct vnet_port, list); + napi_schedule(&port->napi); + } + spin_unlock_irqrestore(&vp->lock, flags); +} +#endif +static LIST_HEAD(vnet_list); +static DEFINE_MUTEX(vnet_list_mutex); + +static const struct net_device_ops vnet_ops = { + .ndo_open = vnet_open, + .ndo_stop = vnet_close, + .ndo_set_rx_mode = vnet_set_rx_mode, + .ndo_set_mac_address = vnet_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_tx_timeout = vnet_tx_timeout, + .ndo_change_mtu = vnet_change_mtu, + .ndo_start_xmit = vnet_start_xmit, + .ndo_select_queue = vnet_select_queue, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = vnet_poll_controller, +#endif +}; + +static struct vnet *vnet_new(const u64 *local_mac) +{ + struct net_device *dev; + struct vnet *vp; + int err, i; + + dev = alloc_etherdev_mqs(sizeof(*vp), VNET_MAX_TXQS, 1); + if (!dev) + return ERR_PTR(-ENOMEM); + dev->needed_headroom = VNET_PACKET_SKIP + 8; + dev->needed_tailroom = 8; + + for (i = 0; i < ETH_ALEN; i++) + dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff; + + vp = netdev_priv(dev); + + spin_lock_init(&vp->lock); + vp->dev = dev; + + INIT_LIST_HEAD(&vp->port_list); + for (i = 0; i < VNET_PORT_HASH_SIZE; i++) + INIT_HLIST_HEAD(&vp->port_hash[i]); + INIT_LIST_HEAD(&vp->list); + vp->local_mac = *local_mac; + + dev->netdev_ops = &vnet_ops; + dev->ethtool_ops = &vnet_ethtool_ops; + dev->watchdog_timeo = VNET_TX_TIMEOUT; + + dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | + NETIF_F_HW_CSUM | NETIF_F_SG; + dev->features = dev->hw_features; + + err = register_netdev(dev); + if (err) { + pr_err("Cannot register net device, aborting\n"); + goto err_out_free_dev; + } + + netdev_info(dev, "Sun LDOM vnet %pM\n", dev->dev_addr); + + list_add(&vp->list, &vnet_list); + + return vp; + +err_out_free_dev: + free_netdev(dev); + + return ERR_PTR(err); +} + +static struct vnet *vnet_find_or_create(const u64 *local_mac) +{ + struct vnet *iter, *vp; + + mutex_lock(&vnet_list_mutex); + vp = NULL; + list_for_each_entry(iter, &vnet_list, list) { + if (iter->local_mac == *local_mac) { + vp = iter; + break; + } + } + if (!vp) + vp = vnet_new(local_mac); + mutex_unlock(&vnet_list_mutex); + + return vp; +} + +static void vnet_cleanup(void) +{ + struct vnet *vp; + struct net_device *dev; + + mutex_lock(&vnet_list_mutex); + while (!list_empty(&vnet_list)) { + vp = list_first_entry(&vnet_list, struct vnet, list); + list_del(&vp->list); + dev = vp->dev; + /* vio_unregister_driver() should have cleaned up port_list */ + BUG_ON(!list_empty(&vp->port_list)); + unregister_netdev(dev); + free_netdev(dev); + } + mutex_unlock(&vnet_list_mutex); +} + +static const char *local_mac_prop = "local-mac-address"; + +static struct vnet *vnet_find_parent(struct mdesc_handle *hp, + u64 port_node) +{ + const u64 *local_mac = NULL; + u64 a; + + mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) { + u64 target = mdesc_arc_target(hp, a); + const char *name; + + name = mdesc_get_property(hp, target, "name", NULL); + if (!name || strcmp(name, "network")) + continue; + + local_mac = mdesc_get_property(hp, target, + local_mac_prop, NULL); + if (local_mac) + break; + } + if (!local_mac) + return ERR_PTR(-ENODEV); + + return vnet_find_or_create(local_mac); +} + +static struct ldc_channel_config vnet_ldc_cfg = { + .event = vnet_event, + .mtu = 64, + .mode = LDC_MODE_UNRELIABLE, +}; + +static struct vio_driver_ops vnet_vio_ops = { + .send_attr = vnet_send_attr, + .handle_attr = vnet_handle_attr, + .handshake_complete = vnet_handshake_complete, +}; + +static void print_version(void) +{ + printk_once(KERN_INFO "%s", version); +} + +const char *remote_macaddr_prop = "remote-mac-address"; + +static void +vnet_port_add_txq(struct vnet_port *port) +{ + struct vnet *vp = port->vp; + int n; + + n = vp->nports++; + n = n & (VNET_MAX_TXQS - 1); + port->q_index = n; + netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index)); +} + +static void +vnet_port_rm_txq(struct vnet_port *port) +{ + port->vp->nports--; + netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index)); +} + +static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) +{ + struct mdesc_handle *hp; + struct vnet_port *port; + unsigned long flags; + struct vnet *vp; + const u64 *rmac; + int len, i, err, switch_port; + + print_version(); + + hp = mdesc_grab(); + + vp = vnet_find_parent(hp, vdev->mp); + if (IS_ERR(vp)) { + pr_err("Cannot find port parent vnet\n"); + err = PTR_ERR(vp); + goto err_out_put_mdesc; + } + + rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); + err = -ENODEV; + if (!rmac) { + pr_err("Port lacks %s property\n", remote_macaddr_prop); + goto err_out_put_mdesc; + } + + port = kzalloc(sizeof(*port), GFP_KERNEL); + err = -ENOMEM; + if (!port) + goto err_out_put_mdesc; + + for (i = 0; i < ETH_ALEN; i++) + port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff; + + port->vp = vp; + + err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK, + vnet_versions, ARRAY_SIZE(vnet_versions), + &vnet_vio_ops, vp->dev->name); + if (err) + goto err_out_free_port; + + err = vio_ldc_alloc(&port->vio, &vnet_ldc_cfg, port); + if (err) + goto err_out_free_port; + + netif_napi_add(port->vp->dev, &port->napi, vnet_poll, NAPI_POLL_WEIGHT); + + INIT_HLIST_NODE(&port->hash); + INIT_LIST_HEAD(&port->list); + + switch_port = 0; + if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL) + switch_port = 1; + port->switch_port = switch_port; + port->tso = true; + port->tsolen = 0; + + spin_lock_irqsave(&vp->lock, flags); + if (switch_port) + list_add_rcu(&port->list, &vp->port_list); + else + list_add_tail_rcu(&port->list, &vp->port_list); + hlist_add_head_rcu(&port->hash, + &vp->port_hash[vnet_hashfn(port->raddr)]); + vnet_port_add_txq(port); + spin_unlock_irqrestore(&vp->lock, flags); + + dev_set_drvdata(&vdev->dev, port); + + pr_info("%s: PORT ( remote-mac %pM%s )\n", + vp->dev->name, port->raddr, switch_port ? " switch-port" : ""); + + setup_timer(&port->clean_timer, vnet_clean_timer_expire, + (unsigned long)port); + + napi_enable(&port->napi); + vio_port_up(&port->vio); + + mdesc_release(hp); + + return 0; + +err_out_free_port: + kfree(port); + +err_out_put_mdesc: + mdesc_release(hp); + return err; +} + +static int vnet_port_remove(struct vio_dev *vdev) +{ + struct vnet_port *port = dev_get_drvdata(&vdev->dev); + + if (port) { + + del_timer_sync(&port->vio.timer); + + napi_disable(&port->napi); + + list_del_rcu(&port->list); + hlist_del_rcu(&port->hash); + + synchronize_rcu(); + del_timer_sync(&port->clean_timer); + vnet_port_rm_txq(port); + netif_napi_del(&port->napi); + vnet_port_free_tx_bufs(port); + vio_ldc_free(&port->vio); + + dev_set_drvdata(&vdev->dev, NULL); + + kfree(port); + + } + return 0; +} + +static const struct vio_device_id vnet_port_match[] = { + { + .type = "vnet-port", + }, + {}, +}; +MODULE_DEVICE_TABLE(vio, vnet_port_match); + +static struct vio_driver vnet_port_driver = { + .id_table = vnet_port_match, + .probe = vnet_port_probe, + .remove = vnet_port_remove, + .name = "vnet_port", +}; + +static int __init vnet_init(void) +{ + return vio_register_driver(&vnet_port_driver); +} + +static void __exit vnet_exit(void) +{ + vio_unregister_driver(&vnet_port_driver); + vnet_cleanup(); +} + +module_init(vnet_init); +module_exit(vnet_exit); diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h new file mode 100644 index 000000000..01ca78191 --- /dev/null +++ b/drivers/net/ethernet/sun/sunvnet.h @@ -0,0 +1,114 @@ +#ifndef _SUNVNET_H +#define _SUNVNET_H + +#include + +#define DESC_NCOOKIES(entry_size) \ + ((entry_size) - sizeof(struct vio_net_desc)) + +/* length of time before we decide the hardware is borked, + * and dev->tx_timeout() should be called to fix the problem + */ +#define VNET_TX_TIMEOUT (5 * HZ) + +/* length of time (or less) we expect pending descriptors to be marked + * as VIO_DESC_DONE and skbs ready to be freed + */ +#define VNET_CLEAN_TIMEOUT ((HZ/100)+1) + +#define VNET_MAXPACKET (65535ULL + ETH_HLEN + VLAN_HLEN) +#define VNET_TX_RING_SIZE 512 +#define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4) + +#define VNET_MINTSO 2048 /* VIO protocol's minimum TSO len */ +#define VNET_MAXTSO 65535 /* VIO protocol's maximum TSO len */ + +/* VNET packets are sent in buffers with the first 6 bytes skipped + * so that after the ethernet header the IPv4/IPv6 headers are aligned + * properly. + */ +#define VNET_PACKET_SKIP 6 + +#define VNET_MAXCOOKIES (VNET_MAXPACKET/PAGE_SIZE + 1) + +struct vnet_tx_entry { + struct sk_buff *skb; + unsigned int ncookies; + struct ldc_trans_cookie cookies[VNET_MAXCOOKIES]; +}; + +struct vnet; +struct vnet_port { + struct vio_driver_state vio; + + struct hlist_node hash; + u8 raddr[ETH_ALEN]; + unsigned switch_port:1; + unsigned tso:1; + unsigned __pad:14; + + struct vnet *vp; + + struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE]; + + struct list_head list; + + u32 stop_rx_idx; + bool stop_rx; + bool start_cons; + + struct timer_list clean_timer; + + u64 rmtu; + u16 tsolen; + + struct napi_struct napi; + u32 napi_stop_idx; + bool napi_resume; + int rx_event; + u16 q_index; +}; + +static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio) +{ + return container_of(vio, struct vnet_port, vio); +} + +#define VNET_PORT_HASH_SIZE 16 +#define VNET_PORT_HASH_MASK (VNET_PORT_HASH_SIZE - 1) + +static inline unsigned int vnet_hashfn(u8 *mac) +{ + unsigned int val = mac[4] ^ mac[5]; + + return val & (VNET_PORT_HASH_MASK); +} + +struct vnet_mcast_entry { + u8 addr[ETH_ALEN]; + u8 sent; + u8 hit; + struct vnet_mcast_entry *next; +}; + +struct vnet { + /* Protects port_list and port_hash. */ + spinlock_t lock; + + struct net_device *dev; + + u32 msg_enable; + + struct list_head port_list; + + struct hlist_head port_hash[VNET_PORT_HASH_SIZE]; + + struct vnet_mcast_entry *mcast_list; + + struct list_head list; + u64 local_mac; + + int nports; +}; + +#endif /* _SUNVNET_H */ diff --git a/drivers/net/ethernet/tehuti/Kconfig b/drivers/net/ethernet/tehuti/Kconfig new file mode 100644 index 000000000..1fc027eda --- /dev/null +++ b/drivers/net/ethernet/tehuti/Kconfig @@ -0,0 +1,27 @@ +# +# Tehuti network device configuration +# + +config NET_VENDOR_TEHUTI + bool "Tehuti devices" + default y + depends on PCI + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Tehuti cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_TEHUTI + +config TEHUTI + tristate "Tehuti Networks 10G Ethernet" + depends on PCI + ---help--- + Tehuti Networks 10G Ethernet NIC + +endif # NET_VENDOR_TEHUTI diff --git a/drivers/net/ethernet/tehuti/Makefile b/drivers/net/ethernet/tehuti/Makefile new file mode 100644 index 000000000..f995421dd --- /dev/null +++ b/drivers/net/ethernet/tehuti/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Tehuti network device drivers. +# + +obj-$(CONFIG_TEHUTI) += tehuti.o diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c new file mode 100644 index 000000000..4cb49a53f --- /dev/null +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -0,0 +1,2492 @@ +/* + * Tehuti Networks(R) Network Driver + * ethtool interface implementation + * Copyright (C) 2007 Tehuti Networks Ltd. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +/* + * RX HW/SW interaction overview + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * There are 2 types of RX communication channels between driver and NIC. + * 1) RX Free Fifo - RXF - holds descriptors of empty buffers to accept incoming + * traffic. This Fifo is filled by SW and is readen by HW. Each descriptor holds + * info about buffer's location, size and ID. An ID field is used to identify a + * buffer when it's returned with data via RXD Fifo (see below) + * 2) RX Data Fifo - RXD - holds descriptors of full buffers. This Fifo is + * filled by HW and is readen by SW. Each descriptor holds status and ID. + * HW pops descriptor from RXF Fifo, stores ID, fills buffer with incoming data, + * via dma moves it into host memory, builds new RXD descriptor with same ID, + * pushes it into RXD Fifo and raises interrupt to indicate new RX data. + * + * Current NIC configuration (registers + firmware) makes NIC use 2 RXF Fifos. + * One holds 1.5K packets and another - 26K packets. Depending on incoming + * packet size, HW desides on a RXF Fifo to pop buffer from. When packet is + * filled with data, HW builds new RXD descriptor for it and push it into single + * RXD Fifo. + * + * RX SW Data Structures + * ~~~~~~~~~~~~~~~~~~~~~ + * skb db - used to keep track of all skbs owned by SW and their dma addresses. + * For RX case, ownership lasts from allocating new empty skb for RXF until + * accepting full skb from RXD and passing it to OS. Each RXF Fifo has its own + * skb db. Implemented as array with bitmask. + * fifo - keeps info about fifo's size and location, relevant HW registers, + * usage and skb db. Each RXD and RXF Fifo has its own fifo structure. + * Implemented as simple struct. + * + * RX SW Execution Flow + * ~~~~~~~~~~~~~~~~~~~~ + * Upon initialization (ifconfig up) driver creates RX fifos and initializes + * relevant registers. At the end of init phase, driver enables interrupts. + * NIC sees that there is no RXF buffers and raises + * RD_INTR interrupt, isr fills skbs and Rx begins. + * Driver has two receive operation modes: + * NAPI - interrupt-driven mixed with polling + * interrupt-driven only + * + * Interrupt-driven only flow is following. When buffer is ready, HW raises + * interrupt and isr is called. isr collects all available packets + * (bdx_rx_receive), refills skbs (bdx_rx_alloc_skbs) and exit. + + * Rx buffer allocation note + * ~~~~~~~~~~~~~~~~~~~~~~~~~ + * Driver cares to feed such amount of RxF descriptors that respective amount of + * RxD descriptors can not fill entire RxD fifo. The main reason is lack of + * overflow check in Bordeaux for RxD fifo free/used size. + * FIXME: this is NOT fully implemented, more work should be done + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "tehuti.h" + +static const struct pci_device_id bdx_pci_tbl[] = { + { PCI_VDEVICE(TEHUTI, 0x3009), }, + { PCI_VDEVICE(TEHUTI, 0x3010), }, + { PCI_VDEVICE(TEHUTI, 0x3014), }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, bdx_pci_tbl); + +/* Definitions needed by ISR or NAPI functions */ +static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f); +static void bdx_tx_cleanup(struct bdx_priv *priv); +static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget); + +/* Definitions needed by FW loading */ +static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size); + +/* Definitions needed by hw_start */ +static int bdx_tx_init(struct bdx_priv *priv); +static int bdx_rx_init(struct bdx_priv *priv); + +/* Definitions needed by bdx_close */ +static void bdx_rx_free(struct bdx_priv *priv); +static void bdx_tx_free(struct bdx_priv *priv); + +/* Definitions needed by bdx_probe */ +static void bdx_set_ethtool_ops(struct net_device *netdev); + +/************************************************************************* + * Print Info * + *************************************************************************/ + +static void print_hw_id(struct pci_dev *pdev) +{ + struct pci_nic *nic = pci_get_drvdata(pdev); + u16 pci_link_status = 0; + u16 pci_ctrl = 0; + + pci_read_config_word(pdev, PCI_LINK_STATUS_REG, &pci_link_status); + pci_read_config_word(pdev, PCI_DEV_CTRL_REG, &pci_ctrl); + + pr_info("%s%s\n", BDX_NIC_NAME, + nic->port_num == 1 ? "" : ", 2-Port"); + pr_info("srom 0x%x fpga %d build %u lane# %d max_pl 0x%x mrrs 0x%x\n", + readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF, + readl(nic->regs + FPGA_SEED), + GET_LINK_STATUS_LANES(pci_link_status), + GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl)); +} + +static void print_fw_id(struct pci_nic *nic) +{ + pr_info("fw 0x%x\n", readl(nic->regs + FW_VER)); +} + +static void print_eth_id(struct net_device *ndev) +{ + netdev_info(ndev, "%s, Port %c\n", + BDX_NIC_NAME, (ndev->if_port == 0) ? 'A' : 'B'); + +} + +/************************************************************************* + * Code * + *************************************************************************/ + +#define bdx_enable_interrupts(priv) \ + do { WRITE_REG(priv, regIMR, IR_RUN); } while (0) +#define bdx_disable_interrupts(priv) \ + do { WRITE_REG(priv, regIMR, 0); } while (0) + +/** + * bdx_fifo_init - create TX/RX descriptor fifo for host-NIC communication. + * @priv: NIC private structure + * @f: fifo to initialize + * @fsz_type: fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB + * @reg_XXX: offsets of registers relative to base address + * + * 1K extra space is allocated at the end of the fifo to simplify + * processing of descriptors that wraps around fifo's end + * + * Returns 0 on success, negative value on failure + * + */ +static int +bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type, + u16 reg_CFG0, u16 reg_CFG1, u16 reg_RPTR, u16 reg_WPTR) +{ + u16 memsz = FIFO_SIZE * (1 << fsz_type); + + memset(f, 0, sizeof(struct fifo)); + /* pci_alloc_consistent gives us 4k-aligned memory */ + f->va = pci_alloc_consistent(priv->pdev, + memsz + FIFO_EXTRA_SPACE, &f->da); + if (!f->va) { + pr_err("pci_alloc_consistent failed\n"); + RET(-ENOMEM); + } + f->reg_CFG0 = reg_CFG0; + f->reg_CFG1 = reg_CFG1; + f->reg_RPTR = reg_RPTR; + f->reg_WPTR = reg_WPTR; + f->rptr = 0; + f->wptr = 0; + f->memsz = memsz; + f->size_mask = memsz - 1; + WRITE_REG(priv, reg_CFG0, (u32) ((f->da & TX_RX_CFG0_BASE) | fsz_type)); + WRITE_REG(priv, reg_CFG1, H32_64(f->da)); + + RET(0); +} + +/** + * bdx_fifo_free - free all resources used by fifo + * @priv: NIC private structure + * @f: fifo to release + */ +static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f) +{ + ENTER; + if (f->va) { + pci_free_consistent(priv->pdev, + f->memsz + FIFO_EXTRA_SPACE, f->va, f->da); + f->va = NULL; + } + RET(); +} + +/** + * bdx_link_changed - notifies OS about hw link state. + * @priv: hw adapter structure + */ +static void bdx_link_changed(struct bdx_priv *priv) +{ + u32 link = READ_REG(priv, regMAC_LNK_STAT) & MAC_LINK_STAT; + + if (!link) { + if (netif_carrier_ok(priv->ndev)) { + netif_stop_queue(priv->ndev); + netif_carrier_off(priv->ndev); + netdev_err(priv->ndev, "Link Down\n"); + } + } else { + if (!netif_carrier_ok(priv->ndev)) { + netif_wake_queue(priv->ndev); + netif_carrier_on(priv->ndev); + netdev_err(priv->ndev, "Link Up\n"); + } + } +} + +static void bdx_isr_extra(struct bdx_priv *priv, u32 isr) +{ + if (isr & IR_RX_FREE_0) { + bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0); + DBG("RX_FREE_0\n"); + } + + if (isr & IR_LNKCHG0) + bdx_link_changed(priv); + + if (isr & IR_PCIE_LINK) + netdev_err(priv->ndev, "PCI-E Link Fault\n"); + + if (isr & IR_PCIE_TOUT) + netdev_err(priv->ndev, "PCI-E Time Out\n"); + +} + +/** + * bdx_isr_napi - Interrupt Service Routine for Bordeaux NIC + * @irq: interrupt number + * @dev: network device + * + * Return IRQ_NONE if it was not our interrupt, IRQ_HANDLED - otherwise + * + * It reads ISR register to know interrupt reasons, and proceed them one by one. + * Reasons of interest are: + * RX_DESC - new packet has arrived and RXD fifo holds its descriptor + * RX_FREE - number of free Rx buffers in RXF fifo gets low + * TX_FREE - packet was transmited and RXF fifo holds its descriptor + */ + +static irqreturn_t bdx_isr_napi(int irq, void *dev) +{ + struct net_device *ndev = dev; + struct bdx_priv *priv = netdev_priv(ndev); + u32 isr; + + ENTER; + isr = (READ_REG(priv, regISR) & IR_RUN); + if (unlikely(!isr)) { + bdx_enable_interrupts(priv); + return IRQ_NONE; /* Not our interrupt */ + } + + if (isr & IR_EXTRA) + bdx_isr_extra(priv, isr); + + if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) { + if (likely(napi_schedule_prep(&priv->napi))) { + __napi_schedule(&priv->napi); + RET(IRQ_HANDLED); + } else { + /* NOTE: we get here if intr has slipped into window + * between these lines in bdx_poll: + * bdx_enable_interrupts(priv); + * return 0; + * currently intrs are disabled (since we read ISR), + * and we have failed to register next poll. + * so we read the regs to trigger chip + * and allow further interupts. */ + READ_REG(priv, regTXF_WPTR_0); + READ_REG(priv, regRXD_WPTR_0); + } + } + + bdx_enable_interrupts(priv); + RET(IRQ_HANDLED); +} + +static int bdx_poll(struct napi_struct *napi, int budget) +{ + struct bdx_priv *priv = container_of(napi, struct bdx_priv, napi); + int work_done; + + ENTER; + bdx_tx_cleanup(priv); + work_done = bdx_rx_receive(priv, &priv->rxd_fifo0, budget); + if ((work_done < budget) || + (priv->napi_stop++ >= 30)) { + DBG("rx poll is done. backing to isr-driven\n"); + + /* from time to time we exit to let NAPI layer release + * device lock and allow waiting tasks (eg rmmod) to advance) */ + priv->napi_stop = 0; + + napi_complete(napi); + bdx_enable_interrupts(priv); + } + return work_done; +} + +/** + * bdx_fw_load - loads firmware to NIC + * @priv: NIC private structure + * + * Firmware is loaded via TXD fifo, so it must be initialized first. + * Firware must be loaded once per NIC not per PCI device provided by NIC (NIC + * can have few of them). So all drivers use semaphore register to choose one + * that will actually load FW to NIC. + */ + +static int bdx_fw_load(struct bdx_priv *priv) +{ + const struct firmware *fw = NULL; + int master, i; + int rc; + + ENTER; + master = READ_REG(priv, regINIT_SEMAPHORE); + if (!READ_REG(priv, regINIT_STATUS) && master) { + rc = reject_firmware(&fw, "/*(DEBLOBBED)*/", &priv->pdev->dev); + if (rc) + goto out; + bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size); + mdelay(100); + } + for (i = 0; i < 200; i++) { + if (READ_REG(priv, regINIT_STATUS)) { + rc = 0; + goto out; + } + mdelay(2); + } + rc = -EIO; +out: + if (master) + WRITE_REG(priv, regINIT_SEMAPHORE, 1); + + release_firmware(fw); + + if (rc) { + netdev_err(priv->ndev, "firmware loading failed\n"); + if (rc == -EIO) + DBG("VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n", + READ_REG(priv, regVPC), + READ_REG(priv, regVIC), + READ_REG(priv, regINIT_STATUS), i); + RET(rc); + } else { + DBG("%s: firmware loading success\n", priv->ndev->name); + RET(0); + } +} + +static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv) +{ + u32 val; + + ENTER; + DBG("mac0=%x mac1=%x mac2=%x\n", + READ_REG(priv, regUNC_MAC0_A), + READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A)); + + val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]); + WRITE_REG(priv, regUNC_MAC2_A, val); + val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]); + WRITE_REG(priv, regUNC_MAC1_A, val); + val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]); + WRITE_REG(priv, regUNC_MAC0_A, val); + + DBG("mac0=%x mac1=%x mac2=%x\n", + READ_REG(priv, regUNC_MAC0_A), + READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A)); + RET(); +} + +/** + * bdx_hw_start - inits registers and starts HW's Rx and Tx engines + * @priv: NIC private structure + */ +static int bdx_hw_start(struct bdx_priv *priv) +{ + int rc = -EIO; + struct net_device *ndev = priv->ndev; + + ENTER; + bdx_link_changed(priv); + + /* 10G overall max length (vlan, eth&ip header, ip payload, crc) */ + WRITE_REG(priv, regFRM_LENGTH, 0X3FE0); + WRITE_REG(priv, regPAUSE_QUANT, 0x96); + WRITE_REG(priv, regRX_FIFO_SECTION, 0x800010); + WRITE_REG(priv, regTX_FIFO_SECTION, 0xE00010); + WRITE_REG(priv, regRX_FULLNESS, 0); + WRITE_REG(priv, regTX_FULLNESS, 0); + WRITE_REG(priv, regCTRLST, + regCTRLST_BASE | regCTRLST_RX_ENA | regCTRLST_TX_ENA); + + WRITE_REG(priv, regVGLB, 0); + WRITE_REG(priv, regMAX_FRAME_A, + priv->rxf_fifo0.m.pktsz & MAX_FRAME_AB_VAL); + + DBG("RDINTCM=%08x\n", priv->rdintcm); /*NOTE: test script uses this */ + WRITE_REG(priv, regRDINTCM0, priv->rdintcm); + WRITE_REG(priv, regRDINTCM2, 0); /*cpu_to_le32(rcm.val)); */ + + DBG("TDINTCM=%08x\n", priv->tdintcm); /*NOTE: test script uses this */ + WRITE_REG(priv, regTDINTCM0, priv->tdintcm); /* old val = 0x300064 */ + + /* Enable timer interrupt once in 2 secs. */ + /*WRITE_REG(priv, regGTMR0, ((GTMR_SEC * 2) & GTMR_DATA)); */ + bdx_restore_mac(priv->ndev, priv); + + WRITE_REG(priv, regGMAC_RXF_A, GMAC_RX_FILTER_OSEN | + GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB); + +#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI) ? 0 : IRQF_SHARED) + + rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE, + ndev->name, ndev); + if (rc) + goto err_irq; + bdx_enable_interrupts(priv); + + RET(0); + +err_irq: + RET(rc); +} + +static void bdx_hw_stop(struct bdx_priv *priv) +{ + ENTER; + bdx_disable_interrupts(priv); + free_irq(priv->pdev->irq, priv->ndev); + + netif_carrier_off(priv->ndev); + netif_stop_queue(priv->ndev); + + RET(); +} + +static int bdx_hw_reset_direct(void __iomem *regs) +{ + u32 val, i; + ENTER; + + /* reset sequences: read, write 1, read, write 0 */ + val = readl(regs + regCLKPLL); + writel((val | CLKPLL_SFTRST) + 0x8, regs + regCLKPLL); + udelay(50); + val = readl(regs + regCLKPLL); + writel(val & ~CLKPLL_SFTRST, regs + regCLKPLL); + + /* check that the PLLs are locked and reset ended */ + for (i = 0; i < 70; i++, mdelay(10)) + if ((readl(regs + regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) { + /* do any PCI-E read transaction */ + readl(regs + regRXD_CFG0_0); + return 0; + } + pr_err("HW reset failed\n"); + return 1; /* failure */ +} + +static int bdx_hw_reset(struct bdx_priv *priv) +{ + u32 val, i; + ENTER; + + if (priv->port == 0) { + /* reset sequences: read, write 1, read, write 0 */ + val = READ_REG(priv, regCLKPLL); + WRITE_REG(priv, regCLKPLL, (val | CLKPLL_SFTRST) + 0x8); + udelay(50); + val = READ_REG(priv, regCLKPLL); + WRITE_REG(priv, regCLKPLL, val & ~CLKPLL_SFTRST); + } + /* check that the PLLs are locked and reset ended */ + for (i = 0; i < 70; i++, mdelay(10)) + if ((READ_REG(priv, regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) { + /* do any PCI-E read transaction */ + READ_REG(priv, regRXD_CFG0_0); + return 0; + } + pr_err("HW reset failed\n"); + return 1; /* failure */ +} + +static int bdx_sw_reset(struct bdx_priv *priv) +{ + int i; + + ENTER; + /* 1. load MAC (obsolete) */ + /* 2. disable Rx (and Tx) */ + WRITE_REG(priv, regGMAC_RXF_A, 0); + mdelay(100); + /* 3. disable port */ + WRITE_REG(priv, regDIS_PORT, 1); + /* 4. disable queue */ + WRITE_REG(priv, regDIS_QU, 1); + /* 5. wait until hw is disabled */ + for (i = 0; i < 50; i++) { + if (READ_REG(priv, regRST_PORT) & 1) + break; + mdelay(10); + } + if (i == 50) + netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n"); + + /* 6. disable intrs */ + WRITE_REG(priv, regRDINTCM0, 0); + WRITE_REG(priv, regTDINTCM0, 0); + WRITE_REG(priv, regIMR, 0); + READ_REG(priv, regISR); + + /* 7. reset queue */ + WRITE_REG(priv, regRST_QU, 1); + /* 8. reset port */ + WRITE_REG(priv, regRST_PORT, 1); + /* 9. zero all read and write pointers */ + for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10) + DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR); + for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10) + WRITE_REG(priv, i, 0); + /* 10. unseet port disable */ + WRITE_REG(priv, regDIS_PORT, 0); + /* 11. unset queue disable */ + WRITE_REG(priv, regDIS_QU, 0); + /* 12. unset queue reset */ + WRITE_REG(priv, regRST_QU, 0); + /* 13. unset port reset */ + WRITE_REG(priv, regRST_PORT, 0); + /* 14. enable Rx */ + /* skiped. will be done later */ + /* 15. save MAC (obsolete) */ + for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10) + DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR); + + RET(0); +} + +/* bdx_reset - performs right type of reset depending on hw type */ +static int bdx_reset(struct bdx_priv *priv) +{ + ENTER; + RET((priv->pdev->device == 0x3009) + ? bdx_hw_reset(priv) + : bdx_sw_reset(priv)); +} + +/** + * bdx_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int bdx_close(struct net_device *ndev) +{ + struct bdx_priv *priv = NULL; + + ENTER; + priv = netdev_priv(ndev); + + napi_disable(&priv->napi); + + bdx_reset(priv); + bdx_hw_stop(priv); + bdx_rx_free(priv); + bdx_tx_free(priv); + RET(0); +} + +/** + * bdx_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int bdx_open(struct net_device *ndev) +{ + struct bdx_priv *priv; + int rc; + + ENTER; + priv = netdev_priv(ndev); + bdx_reset(priv); + if (netif_running(ndev)) + netif_stop_queue(priv->ndev); + + if ((rc = bdx_tx_init(priv)) || + (rc = bdx_rx_init(priv)) || + (rc = bdx_fw_load(priv))) + goto err; + + bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0); + + rc = bdx_hw_start(priv); + if (rc) + goto err; + + napi_enable(&priv->napi); + + print_fw_id(priv->nic); + + RET(0); + +err: + bdx_close(ndev); + RET(rc); +} + +static int bdx_range_check(struct bdx_priv *priv, u32 offset) +{ + return (offset > (u32) (BDX_REGS_SIZE / priv->nic->port_num)) ? + -EINVAL : 0; +} + +static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd) +{ + struct bdx_priv *priv = netdev_priv(ndev); + u32 data[3]; + int error; + + ENTER; + + DBG("jiffies=%ld cmd=%d\n", jiffies, cmd); + if (cmd != SIOCDEVPRIVATE) { + error = copy_from_user(data, ifr->ifr_data, sizeof(data)); + if (error) { + pr_err("can't copy from user\n"); + RET(-EFAULT); + } + DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]); + } + + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + switch (data[0]) { + + case BDX_OP_READ: + error = bdx_range_check(priv, data[1]); + if (error < 0) + return error; + data[2] = READ_REG(priv, data[1]); + DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2], + data[2]); + error = copy_to_user(ifr->ifr_data, data, sizeof(data)); + if (error) + RET(-EFAULT); + break; + + case BDX_OP_WRITE: + error = bdx_range_check(priv, data[1]); + if (error < 0) + return error; + WRITE_REG(priv, data[1], data[2]); + DBG("write_reg(0x%x, 0x%x)\n", data[1], data[2]); + break; + + default: + RET(-EOPNOTSUPP); + } + return 0; +} + +static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) +{ + ENTER; + if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) + RET(bdx_ioctl_priv(ndev, ifr, cmd)); + else + RET(-EOPNOTSUPP); +} + +/** + * __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid + * @ndev: network device + * @vid: VLAN vid + * @op: add or kill operation + * + * Passes VLAN filter table to hardware + */ +static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable) +{ + struct bdx_priv *priv = netdev_priv(ndev); + u32 reg, bit, val; + + ENTER; + DBG2("vid=%d value=%d\n", (int)vid, enable); + if (unlikely(vid >= 4096)) { + pr_err("invalid VID: %u (> 4096)\n", vid); + RET(); + } + reg = regVLAN_0 + (vid / 32) * 4; + bit = 1 << vid % 32; + val = READ_REG(priv, reg); + DBG2("reg=%x, val=%x, bit=%d\n", reg, val, bit); + if (enable) + val |= bit; + else + val &= ~bit; + DBG2("new val %x\n", val); + WRITE_REG(priv, reg, val); + RET(); +} + +/** + * bdx_vlan_rx_add_vid - kernel hook for adding VLAN vid to hw filtering table + * @ndev: network device + * @vid: VLAN vid to add + */ +static int bdx_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) +{ + __bdx_vlan_rx_vid(ndev, vid, 1); + return 0; +} + +/** + * bdx_vlan_rx_kill_vid - kernel hook for killing VLAN vid in hw filtering table + * @ndev: network device + * @vid: VLAN vid to kill + */ +static int bdx_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) +{ + __bdx_vlan_rx_vid(ndev, vid, 0); + return 0; +} + +/** + * bdx_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int bdx_change_mtu(struct net_device *ndev, int new_mtu) +{ + ENTER; + + if (new_mtu == ndev->mtu) + RET(0); + + /* enforce minimum frame size */ + if (new_mtu < ETH_ZLEN) { + netdev_err(ndev, "mtu %d is less then minimal %d\n", + new_mtu, ETH_ZLEN); + RET(-EINVAL); + } + + ndev->mtu = new_mtu; + if (netif_running(ndev)) { + bdx_close(ndev); + bdx_open(ndev); + } + RET(0); +} + +static void bdx_setmulti(struct net_device *ndev) +{ + struct bdx_priv *priv = netdev_priv(ndev); + + u32 rxf_val = + GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB | GMAC_RX_FILTER_OSEN; + int i; + + ENTER; + /* IMF - imperfect (hash) rx multicat filter */ + /* PMF - perfect rx multicat filter */ + + /* FIXME: RXE(OFF) */ + if (ndev->flags & IFF_PROMISC) { + rxf_val |= GMAC_RX_FILTER_PRM; + } else if (ndev->flags & IFF_ALLMULTI) { + /* set IMF to accept all multicast frmaes */ + for (i = 0; i < MAC_MCST_HASH_NUM; i++) + WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0); + } else if (!netdev_mc_empty(ndev)) { + u8 hash; + struct netdev_hw_addr *ha; + u32 reg, val; + + /* set IMF to deny all multicast frames */ + for (i = 0; i < MAC_MCST_HASH_NUM; i++) + WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, 0); + /* set PMF to deny all multicast frames */ + for (i = 0; i < MAC_MCST_NUM; i++) { + WRITE_REG(priv, regRX_MAC_MCST0 + i * 8, 0); + WRITE_REG(priv, regRX_MAC_MCST1 + i * 8, 0); + } + + /* use PMF to accept first MAC_MCST_NUM (15) addresses */ + /* TBD: sort addresses and write them in ascending order + * into RX_MAC_MCST regs. we skip this phase now and accept ALL + * multicast frames throu IMF */ + /* accept the rest of addresses throu IMF */ + netdev_for_each_mc_addr(ha, ndev) { + hash = 0; + for (i = 0; i < ETH_ALEN; i++) + hash ^= ha->addr[i]; + reg = regRX_MCST_HASH0 + ((hash >> 5) << 2); + val = READ_REG(priv, reg); + val |= (1 << (hash % 32)); + WRITE_REG(priv, reg, val); + } + + } else { + DBG("only own mac %d\n", netdev_mc_count(ndev)); + rxf_val |= GMAC_RX_FILTER_AB; + } + WRITE_REG(priv, regGMAC_RXF_A, rxf_val); + /* enable RX */ + /* FIXME: RXE(ON) */ + RET(); +} + +static int bdx_set_mac(struct net_device *ndev, void *p) +{ + struct bdx_priv *priv = netdev_priv(ndev); + struct sockaddr *addr = p; + + ENTER; + /* + if (netif_running(dev)) + return -EBUSY + */ + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + bdx_restore_mac(ndev, priv); + RET(0); +} + +static int bdx_read_mac(struct bdx_priv *priv) +{ + u16 macAddress[3], i; + ENTER; + + macAddress[2] = READ_REG(priv, regUNC_MAC0_A); + macAddress[2] = READ_REG(priv, regUNC_MAC0_A); + macAddress[1] = READ_REG(priv, regUNC_MAC1_A); + macAddress[1] = READ_REG(priv, regUNC_MAC1_A); + macAddress[0] = READ_REG(priv, regUNC_MAC2_A); + macAddress[0] = READ_REG(priv, regUNC_MAC2_A); + for (i = 0; i < 3; i++) { + priv->ndev->dev_addr[i * 2 + 1] = macAddress[i]; + priv->ndev->dev_addr[i * 2] = macAddress[i] >> 8; + } + RET(0); +} + +static u64 bdx_read_l2stat(struct bdx_priv *priv, int reg) +{ + u64 val; + + val = READ_REG(priv, reg); + val |= ((u64) READ_REG(priv, reg + 8)) << 32; + return val; +} + +/*Do the statistics-update work*/ +static void bdx_update_stats(struct bdx_priv *priv) +{ + struct bdx_stats *stats = &priv->hw_stats; + u64 *stats_vector = (u64 *) stats; + int i; + int addr; + + /*Fill HW structure */ + addr = 0x7200; + /*First 12 statistics - 0x7200 - 0x72B0 */ + for (i = 0; i < 12; i++) { + stats_vector[i] = bdx_read_l2stat(priv, addr); + addr += 0x10; + } + BDX_ASSERT(addr != 0x72C0); + /* 0x72C0-0x72E0 RSRV */ + addr = 0x72F0; + for (; i < 16; i++) { + stats_vector[i] = bdx_read_l2stat(priv, addr); + addr += 0x10; + } + BDX_ASSERT(addr != 0x7330); + /* 0x7330-0x7360 RSRV */ + addr = 0x7370; + for (; i < 19; i++) { + stats_vector[i] = bdx_read_l2stat(priv, addr); + addr += 0x10; + } + BDX_ASSERT(addr != 0x73A0); + /* 0x73A0-0x73B0 RSRV */ + addr = 0x73C0; + for (; i < 23; i++) { + stats_vector[i] = bdx_read_l2stat(priv, addr); + addr += 0x10; + } + BDX_ASSERT(addr != 0x7400); + BDX_ASSERT((sizeof(struct bdx_stats) / sizeof(u64)) != i); +} + +static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len, + u16 rxd_vlan); +static void print_rxfd(struct rxf_desc *rxfd); + +/************************************************************************* + * Rx DB * + *************************************************************************/ + +static void bdx_rxdb_destroy(struct rxdb *db) +{ + vfree(db); +} + +static struct rxdb *bdx_rxdb_create(int nelem) +{ + struct rxdb *db; + int i; + + db = vmalloc(sizeof(struct rxdb) + + (nelem * sizeof(int)) + + (nelem * sizeof(struct rx_map))); + if (likely(db != NULL)) { + db->stack = (int *)(db + 1); + db->elems = (void *)(db->stack + nelem); + db->nelem = nelem; + db->top = nelem; + for (i = 0; i < nelem; i++) + db->stack[i] = nelem - i - 1; /* to make first allocs + close to db struct*/ + } + + return db; +} + +static inline int bdx_rxdb_alloc_elem(struct rxdb *db) +{ + BDX_ASSERT(db->top <= 0); + return db->stack[--(db->top)]; +} + +static inline void *bdx_rxdb_addr_elem(struct rxdb *db, int n) +{ + BDX_ASSERT((n < 0) || (n >= db->nelem)); + return db->elems + n; +} + +static inline int bdx_rxdb_available(struct rxdb *db) +{ + return db->top; +} + +static inline void bdx_rxdb_free_elem(struct rxdb *db, int n) +{ + BDX_ASSERT((n >= db->nelem) || (n < 0)); + db->stack[(db->top)++] = n; +} + +/************************************************************************* + * Rx Init * + *************************************************************************/ + +/** + * bdx_rx_init - initialize RX all related HW and SW resources + * @priv: NIC private structure + * + * Returns 0 on success, negative value on failure + * + * It creates rxf and rxd fifos, update relevant HW registers, preallocate + * skb for rx. It assumes that Rx is desabled in HW + * funcs are grouped for better cache usage + * + * RxD fifo is smaller than RxF fifo by design. Upon high load, RxD will be + * filled and packets will be dropped by nic without getting into host or + * cousing interrupt. Anyway, in that condition, host has no chance to process + * all packets, but dropping in nic is cheaper, since it takes 0 cpu cycles + */ + +/* TBD: ensure proper packet size */ + +static int bdx_rx_init(struct bdx_priv *priv) +{ + ENTER; + + if (bdx_fifo_init(priv, &priv->rxd_fifo0.m, priv->rxd_size, + regRXD_CFG0_0, regRXD_CFG1_0, + regRXD_RPTR_0, regRXD_WPTR_0)) + goto err_mem; + if (bdx_fifo_init(priv, &priv->rxf_fifo0.m, priv->rxf_size, + regRXF_CFG0_0, regRXF_CFG1_0, + regRXF_RPTR_0, regRXF_WPTR_0)) + goto err_mem; + priv->rxdb = bdx_rxdb_create(priv->rxf_fifo0.m.memsz / + sizeof(struct rxf_desc)); + if (!priv->rxdb) + goto err_mem; + + priv->rxf_fifo0.m.pktsz = priv->ndev->mtu + VLAN_ETH_HLEN; + return 0; + +err_mem: + netdev_err(priv->ndev, "Rx init failed\n"); + return -ENOMEM; +} + +/** + * bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo + * @priv: NIC private structure + * @f: RXF fifo + */ +static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f) +{ + struct rx_map *dm; + struct rxdb *db = priv->rxdb; + u16 i; + + ENTER; + DBG("total=%d free=%d busy=%d\n", db->nelem, bdx_rxdb_available(db), + db->nelem - bdx_rxdb_available(db)); + while (bdx_rxdb_available(db) > 0) { + i = bdx_rxdb_alloc_elem(db); + dm = bdx_rxdb_addr_elem(db, i); + dm->dma = 0; + } + for (i = 0; i < db->nelem; i++) { + dm = bdx_rxdb_addr_elem(db, i); + if (dm->dma) { + pci_unmap_single(priv->pdev, + dm->dma, f->m.pktsz, + PCI_DMA_FROMDEVICE); + dev_kfree_skb(dm->skb); + } + } +} + +/** + * bdx_rx_free - release all Rx resources + * @priv: NIC private structure + * + * It assumes that Rx is desabled in HW + */ +static void bdx_rx_free(struct bdx_priv *priv) +{ + ENTER; + if (priv->rxdb) { + bdx_rx_free_skbs(priv, &priv->rxf_fifo0); + bdx_rxdb_destroy(priv->rxdb); + priv->rxdb = NULL; + } + bdx_fifo_free(priv, &priv->rxf_fifo0.m); + bdx_fifo_free(priv, &priv->rxd_fifo0.m); + + RET(); +} + +/************************************************************************* + * Rx Engine * + *************************************************************************/ + +/** + * bdx_rx_alloc_skbs - fill rxf fifo with new skbs + * @priv: nic's private structure + * @f: RXF fifo that needs skbs + * + * It allocates skbs, build rxf descs and push it (rxf descr) into rxf fifo. + * skb's virtual and physical addresses are stored in skb db. + * To calculate free space, func uses cached values of RPTR and WPTR + * When needed, it also updates RPTR and WPTR. + */ + +/* TBD: do not update WPTR if no desc were written */ + +static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f) +{ + struct sk_buff *skb; + struct rxf_desc *rxfd; + struct rx_map *dm; + int dno, delta, idx; + struct rxdb *db = priv->rxdb; + + ENTER; + dno = bdx_rxdb_available(db) - 1; + while (dno > 0) { + skb = netdev_alloc_skb(priv->ndev, f->m.pktsz + NET_IP_ALIGN); + if (!skb) + break; + + skb_reserve(skb, NET_IP_ALIGN); + + idx = bdx_rxdb_alloc_elem(db); + dm = bdx_rxdb_addr_elem(db, idx); + dm->dma = pci_map_single(priv->pdev, + skb->data, f->m.pktsz, + PCI_DMA_FROMDEVICE); + dm->skb = skb; + rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr); + rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */ + rxfd->va_lo = idx; + rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma)); + rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma)); + rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz); + print_rxfd(rxfd); + + f->m.wptr += sizeof(struct rxf_desc); + delta = f->m.wptr - f->m.memsz; + if (unlikely(delta >= 0)) { + f->m.wptr = delta; + if (delta > 0) { + memcpy(f->m.va, f->m.va + f->m.memsz, delta); + DBG("wrapped descriptor\n"); + } + } + dno--; + } + /*TBD: to do - delayed rxf wptr like in txd */ + WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); + RET(); +} + +static inline void +NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan, + struct sk_buff *skb) +{ + ENTER; + DBG("rxdd->flags.bits.vtag=%d\n", GET_RXD_VTAG(rxd_val1)); + if (GET_RXD_VTAG(rxd_val1)) { + DBG("%s: vlan rcv vlan '%x' vtag '%x'\n", + priv->ndev->name, + GET_RXD_VLAN_ID(rxd_vlan), + GET_RXD_VTAG(rxd_val1)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), GET_RXD_VLAN_TCI(rxd_vlan)); + } + netif_receive_skb(skb); +} + +static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd) +{ + struct rxf_desc *rxfd; + struct rx_map *dm; + struct rxf_fifo *f; + struct rxdb *db; + struct sk_buff *skb; + int delta; + + ENTER; + DBG("priv=%p rxdd=%p\n", priv, rxdd); + f = &priv->rxf_fifo0; + db = priv->rxdb; + DBG("db=%p f=%p\n", db, f); + dm = bdx_rxdb_addr_elem(db, rxdd->va_lo); + DBG("dm=%p\n", dm); + skb = dm->skb; + rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr); + rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */ + rxfd->va_lo = rxdd->va_lo; + rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma)); + rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma)); + rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz); + print_rxfd(rxfd); + + f->m.wptr += sizeof(struct rxf_desc); + delta = f->m.wptr - f->m.memsz; + if (unlikely(delta >= 0)) { + f->m.wptr = delta; + if (delta > 0) { + memcpy(f->m.va, f->m.va + f->m.memsz, delta); + DBG("wrapped descriptor\n"); + } + } + RET(); +} + +/** + * bdx_rx_receive - receives full packets from RXD fifo and pass them to OS + * NOTE: a special treatment is given to non-continuous descriptors + * that start near the end, wraps around and continue at the beginning. a second + * part is copied right after the first, and then descriptor is interpreted as + * normal. fifo has an extra space to allow such operations + * @priv: nic's private structure + * @f: RXF fifo that needs skbs + * @budget: maximum number of packets to receive + */ + +/* TBD: replace memcpy func call by explicite inline asm */ + +static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget) +{ + struct net_device *ndev = priv->ndev; + struct sk_buff *skb, *skb2; + struct rxd_desc *rxdd; + struct rx_map *dm; + struct rxf_fifo *rxf_fifo; + int tmp_len, size; + int done = 0; + int max_done = BDX_MAX_RX_DONE; + struct rxdb *db = NULL; + /* Unmarshalled descriptor - copy of descriptor in host order */ + u32 rxd_val1; + u16 len; + u16 rxd_vlan; + + ENTER; + max_done = budget; + + f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_WR_PTR; + + size = f->m.wptr - f->m.rptr; + if (size < 0) + size = f->m.memsz + size; /* size is negative :-) */ + + while (size > 0) { + + rxdd = (struct rxd_desc *)(f->m.va + f->m.rptr); + rxd_val1 = CPU_CHIP_SWAP32(rxdd->rxd_val1); + + len = CPU_CHIP_SWAP16(rxdd->len); + + rxd_vlan = CPU_CHIP_SWAP16(rxdd->rxd_vlan); + + print_rxdd(rxdd, rxd_val1, len, rxd_vlan); + + tmp_len = GET_RXD_BC(rxd_val1) << 3; + BDX_ASSERT(tmp_len <= 0); + size -= tmp_len; + if (size < 0) /* test for partially arrived descriptor */ + break; + + f->m.rptr += tmp_len; + + tmp_len = f->m.rptr - f->m.memsz; + if (unlikely(tmp_len >= 0)) { + f->m.rptr = tmp_len; + if (tmp_len > 0) { + DBG("wrapped desc rptr=%d tmp_len=%d\n", + f->m.rptr, tmp_len); + memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len); + } + } + + if (unlikely(GET_RXD_ERR(rxd_val1))) { + DBG("rxd_err = 0x%x\n", GET_RXD_ERR(rxd_val1)); + ndev->stats.rx_errors++; + bdx_recycle_skb(priv, rxdd); + continue; + } + + rxf_fifo = &priv->rxf_fifo0; + db = priv->rxdb; + dm = bdx_rxdb_addr_elem(db, rxdd->va_lo); + skb = dm->skb; + + if (len < BDX_COPYBREAK && + (skb2 = netdev_alloc_skb(priv->ndev, len + NET_IP_ALIGN))) { + skb_reserve(skb2, NET_IP_ALIGN); + /*skb_put(skb2, len); */ + pci_dma_sync_single_for_cpu(priv->pdev, + dm->dma, rxf_fifo->m.pktsz, + PCI_DMA_FROMDEVICE); + memcpy(skb2->data, skb->data, len); + bdx_recycle_skb(priv, rxdd); + skb = skb2; + } else { + pci_unmap_single(priv->pdev, + dm->dma, rxf_fifo->m.pktsz, + PCI_DMA_FROMDEVICE); + bdx_rxdb_free_elem(db, rxdd->va_lo); + } + + ndev->stats.rx_bytes += len; + + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, ndev); + + /* Non-IP packets aren't checksum-offloaded */ + if (GET_RXD_PKT_ID(rxd_val1) == 0) + skb_checksum_none_assert(skb); + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + + NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb); + + if (++done >= max_done) + break; + } + + ndev->stats.rx_packets += done; + + /* FIXME: do smth to minimize pci accesses */ + WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR); + + bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0); + + RET(done); +} + +/************************************************************************* + * Debug / Temprorary Code * + *************************************************************************/ +static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len, + u16 rxd_vlan) +{ + DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d va_lo %d va_hi %d\n", + GET_RXD_BC(rxd_val1), GET_RXD_RXFQ(rxd_val1), GET_RXD_TO(rxd_val1), + GET_RXD_TYPE(rxd_val1), GET_RXD_ERR(rxd_val1), + GET_RXD_RXP(rxd_val1), GET_RXD_PKT_ID(rxd_val1), + GET_RXD_VTAG(rxd_val1), len, GET_RXD_VLAN_ID(rxd_vlan), + GET_RXD_CFI(rxd_vlan), GET_RXD_PRIO(rxd_vlan), rxdd->va_lo, + rxdd->va_hi); +} + +static void print_rxfd(struct rxf_desc *rxfd) +{ + DBG("=== RxF desc CHIP ORDER/ENDIANNESS =============\n" + "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n", + rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len); +} + +/* + * TX HW/SW interaction overview + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * There are 2 types of TX communication channels between driver and NIC. + * 1) TX Free Fifo - TXF - holds ack descriptors for sent packets + * 2) TX Data Fifo - TXD - holds descriptors of full buffers. + * + * Currently NIC supports TSO, checksuming and gather DMA + * UFO and IP fragmentation is on the way + * + * RX SW Data Structures + * ~~~~~~~~~~~~~~~~~~~~~ + * txdb - used to keep track of all skbs owned by SW and their dma addresses. + * For TX case, ownership lasts from geting packet via hard_xmit and until HW + * acknowledges sent by TXF descriptors. + * Implemented as cyclic buffer. + * fifo - keeps info about fifo's size and location, relevant HW registers, + * usage and skb db. Each RXD and RXF Fifo has its own fifo structure. + * Implemented as simple struct. + * + * TX SW Execution Flow + * ~~~~~~~~~~~~~~~~~~~~ + * OS calls driver's hard_xmit method with packet to sent. + * Driver creates DMA mappings, builds TXD descriptors and kicks HW + * by updating TXD WPTR. + * When packet is sent, HW write us TXF descriptor and SW frees original skb. + * To prevent TXD fifo overflow without reading HW registers every time, + * SW deploys "tx level" technique. + * Upon strart up, tx level is initialized to TXD fifo length. + * For every sent packet, SW gets its TXD descriptor sizei + * (from precalculated array) and substructs it from tx level. + * The size is also stored in txdb. When TXF ack arrives, SW fetch size of + * original TXD descriptor from txdb and adds it to tx level. + * When Tx level drops under some predefined treshhold, the driver + * stops the TX queue. When TX level rises above that level, + * the tx queue is enabled again. + * + * This technique avoids eccessive reading of RPTR and WPTR registers. + * As our benchmarks shows, it adds 1.5 Gbit/sec to NIS's throuput. + */ + +/************************************************************************* + * Tx DB * + *************************************************************************/ +static inline int bdx_tx_db_size(struct txdb *db) +{ + int taken = db->wptr - db->rptr; + if (taken < 0) + taken = db->size + 1 + taken; /* (size + 1) equals memsz */ + + return db->size - taken; +} + +/** + * __bdx_tx_db_ptr_next - helper function, increment read/write pointer + wrap + * @db: tx data base + * @pptr: read or write pointer + */ +static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr) +{ + BDX_ASSERT(db == NULL || pptr == NULL); /* sanity */ + + BDX_ASSERT(*pptr != db->rptr && /* expect either read */ + *pptr != db->wptr); /* or write pointer */ + + BDX_ASSERT(*pptr < db->start || /* pointer has to be */ + *pptr >= db->end); /* in range */ + + ++*pptr; + if (unlikely(*pptr == db->end)) + *pptr = db->start; +} + +/** + * bdx_tx_db_inc_rptr - increment read pointer + * @db: tx data base + */ +static inline void bdx_tx_db_inc_rptr(struct txdb *db) +{ + BDX_ASSERT(db->rptr == db->wptr); /* can't read from empty db */ + __bdx_tx_db_ptr_next(db, &db->rptr); +} + +/** + * bdx_tx_db_inc_wptr - increment write pointer + * @db: tx data base + */ +static inline void bdx_tx_db_inc_wptr(struct txdb *db) +{ + __bdx_tx_db_ptr_next(db, &db->wptr); + BDX_ASSERT(db->rptr == db->wptr); /* we can not get empty db as + a result of write */ +} + +/** + * bdx_tx_db_init - creates and initializes tx db + * @d: tx data base + * @sz_type: size of tx fifo + * + * Returns 0 on success, error code otherwise + */ +static int bdx_tx_db_init(struct txdb *d, int sz_type) +{ + int memsz = FIFO_SIZE * (1 << (sz_type + 1)); + + d->start = vmalloc(memsz); + if (!d->start) + return -ENOMEM; + + /* + * In order to differentiate between db is empty and db is full + * states at least one element should always be empty in order to + * avoid rptr == wptr which means db is empty + */ + d->size = memsz / sizeof(struct tx_map) - 1; + d->end = d->start + d->size + 1; /* just after last element */ + + /* all dbs are created equally empty */ + d->rptr = d->start; + d->wptr = d->start; + + return 0; +} + +/** + * bdx_tx_db_close - closes tx db and frees all memory + * @d: tx data base + */ +static void bdx_tx_db_close(struct txdb *d) +{ + BDX_ASSERT(d == NULL); + + vfree(d->start); + d->start = NULL; +} + +/************************************************************************* + * Tx Engine * + *************************************************************************/ + +/* sizes of tx desc (including padding if needed) as function + * of skb's frag number */ +static struct { + u16 bytes; + u16 qwords; /* qword = 64 bit */ +} txd_sizes[MAX_SKB_FRAGS + 1]; + +/** + * bdx_tx_map_skb - creates and stores dma mappings for skb's data blocks + * @priv: NIC private structure + * @skb: socket buffer to map + * @txdd: TX descriptor to use + * + * It makes dma mappings for skb's data blocks and writes them to PBL of + * new tx descriptor. It also stores them in the tx db, so they could be + * unmaped after data was sent. It is reponsibility of a caller to make + * sure that there is enough space in the tx db. Last element holds pointer + * to skb itself and marked with zero length + */ +static inline void +bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb, + struct txd_desc *txdd) +{ + struct txdb *db = &priv->txdb; + struct pbl *pbl = &txdd->pbl[0]; + int nr_frags = skb_shinfo(skb)->nr_frags; + int i; + + db->wptr->len = skb_headlen(skb); + db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data, + db->wptr->len, PCI_DMA_TODEVICE); + pbl->len = CPU_CHIP_SWAP32(db->wptr->len); + pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma)); + pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma)); + DBG("=== pbl len: 0x%x ================\n", pbl->len); + DBG("=== pbl pa_lo: 0x%x ================\n", pbl->pa_lo); + DBG("=== pbl pa_hi: 0x%x ================\n", pbl->pa_hi); + bdx_tx_db_inc_wptr(db); + + for (i = 0; i < nr_frags; i++) { + const struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[i]; + db->wptr->len = skb_frag_size(frag); + db->wptr->addr.dma = skb_frag_dma_map(&priv->pdev->dev, frag, + 0, skb_frag_size(frag), + DMA_TO_DEVICE); + + pbl++; + pbl->len = CPU_CHIP_SWAP32(db->wptr->len); + pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma)); + pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma)); + bdx_tx_db_inc_wptr(db); + } + + /* add skb clean up info. */ + db->wptr->len = -txd_sizes[nr_frags].bytes; + db->wptr->addr.skb = skb; + bdx_tx_db_inc_wptr(db); +} + +/* init_txd_sizes - precalculate sizes of descriptors for skbs up to 16 frags + * number of frags is used as index to fetch correct descriptors size, + * instead of calculating it each time */ +static void __init init_txd_sizes(void) +{ + int i, lwords; + + /* 7 - is number of lwords in txd with one phys buffer + * 3 - is number of lwords used for every additional phys buffer */ + for (i = 0; i < MAX_SKB_FRAGS + 1; i++) { + lwords = 7 + (i * 3); + if (lwords & 1) + lwords++; /* pad it with 1 lword */ + txd_sizes[i].qwords = lwords >> 1; + txd_sizes[i].bytes = lwords << 2; + } +} + +/* bdx_tx_init - initialize all Tx related stuff. + * Namely, TXD and TXF fifos, database etc */ +static int bdx_tx_init(struct bdx_priv *priv) +{ + if (bdx_fifo_init(priv, &priv->txd_fifo0.m, priv->txd_size, + regTXD_CFG0_0, + regTXD_CFG1_0, regTXD_RPTR_0, regTXD_WPTR_0)) + goto err_mem; + if (bdx_fifo_init(priv, &priv->txf_fifo0.m, priv->txf_size, + regTXF_CFG0_0, + regTXF_CFG1_0, regTXF_RPTR_0, regTXF_WPTR_0)) + goto err_mem; + + /* The TX db has to keep mappings for all packets sent (on TxD) + * and not yet reclaimed (on TxF) */ + if (bdx_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size))) + goto err_mem; + + priv->tx_level = BDX_MAX_TX_LEVEL; +#ifdef BDX_DELAY_WPTR + priv->tx_update_mark = priv->tx_level - 1024; +#endif + return 0; + +err_mem: + netdev_err(priv->ndev, "Tx init failed\n"); + return -ENOMEM; +} + +/** + * bdx_tx_space - calculates available space in TX fifo + * @priv: NIC private structure + * + * Returns available space in TX fifo in bytes + */ +static inline int bdx_tx_space(struct bdx_priv *priv) +{ + struct txd_fifo *f = &priv->txd_fifo0; + int fsize; + + f->m.rptr = READ_REG(priv, f->m.reg_RPTR) & TXF_WPTR_WR_PTR; + fsize = f->m.rptr - f->m.wptr; + if (fsize <= 0) + fsize = f->m.memsz + fsize; + return fsize; +} + +/** + * bdx_tx_transmit - send packet to NIC + * @skb: packet to send + * @ndev: network device assigned to NIC + * Return codes: + * o NETDEV_TX_OK everything ok. + * o NETDEV_TX_BUSY Cannot transmit packet, try later + * Usually a bug, means queue start/stop flow control is broken in + * the driver. Note: the driver must NOT put the skb in its DMA ring. + * o NETDEV_TX_LOCKED Locking failed, please retry quickly. + */ +static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct bdx_priv *priv = netdev_priv(ndev); + struct txd_fifo *f = &priv->txd_fifo0; + int txd_checksum = 7; /* full checksum */ + int txd_lgsnd = 0; + int txd_vlan_id = 0; + int txd_vtag = 0; + int txd_mss = 0; + + int nr_frags = skb_shinfo(skb)->nr_frags; + struct txd_desc *txdd; + int len; + unsigned long flags; + + ENTER; + local_irq_save(flags); + if (!spin_trylock(&priv->tx_lock)) { + local_irq_restore(flags); + DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n", + BDX_DRV_NAME, ndev->name); + return NETDEV_TX_LOCKED; + } + + /* build tx descriptor */ + BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */ + txdd = (struct txd_desc *)(f->m.va + f->m.wptr); + if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) + txd_checksum = 0; + + if (skb_shinfo(skb)->gso_size) { + txd_mss = skb_shinfo(skb)->gso_size; + txd_lgsnd = 1; + DBG("skb %p skb len %d gso size = %d\n", skb, skb->len, + txd_mss); + } + + if (skb_vlan_tag_present(skb)) { + /*Cut VLAN ID to 12 bits */ + txd_vlan_id = skb_vlan_tag_get(skb) & BITS_MASK(12); + txd_vtag = 1; + } + + txdd->length = CPU_CHIP_SWAP16(skb->len); + txdd->mss = CPU_CHIP_SWAP16(txd_mss); + txdd->txd_val1 = + CPU_CHIP_SWAP32(TXD_W1_VAL + (txd_sizes[nr_frags].qwords, txd_checksum, txd_vtag, + txd_lgsnd, txd_vlan_id)); + DBG("=== TxD desc =====================\n"); + DBG("=== w1: 0x%x ================\n", txdd->txd_val1); + DBG("=== w2: mss 0x%x len 0x%x\n", txdd->mss, txdd->length); + + bdx_tx_map_skb(priv, skb, txdd); + + /* increment TXD write pointer. In case of + fifo wrapping copy reminder of the descriptor + to the beginning */ + f->m.wptr += txd_sizes[nr_frags].bytes; + len = f->m.wptr - f->m.memsz; + if (unlikely(len >= 0)) { + f->m.wptr = len; + if (len > 0) { + BDX_ASSERT(len > f->m.memsz); + memcpy(f->m.va, f->m.va + f->m.memsz, len); + } + } + BDX_ASSERT(f->m.wptr >= f->m.memsz); /* finished with valid wptr */ + + priv->tx_level -= txd_sizes[nr_frags].bytes; + BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL); +#ifdef BDX_DELAY_WPTR + if (priv->tx_level > priv->tx_update_mark) { + /* Force memory writes to complete before letting h/w + know there are new descriptors to fetch. + (might be needed on platforms like IA64) + wmb(); */ + WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); + } else { + if (priv->tx_noupd++ > BDX_NO_UPD_PACKETS) { + priv->tx_noupd = 0; + WRITE_REG(priv, f->m.reg_WPTR, + f->m.wptr & TXF_WPTR_WR_PTR); + } + } +#else + /* Force memory writes to complete before letting h/w + know there are new descriptors to fetch. + (might be needed on platforms like IA64) + wmb(); */ + WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); + +#endif +#ifdef BDX_LLTX + ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ +#endif + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; + + if (priv->tx_level < BDX_MIN_TX_LEVEL) { + DBG("%s: %s: TX Q STOP level %d\n", + BDX_DRV_NAME, ndev->name, priv->tx_level); + netif_stop_queue(ndev); + } + + spin_unlock_irqrestore(&priv->tx_lock, flags); + return NETDEV_TX_OK; +} + +/** + * bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ. + * @priv: bdx adapter + * + * It scans TXF fifo for descriptors, frees DMA mappings and reports to OS + * that those packets were sent + */ +static void bdx_tx_cleanup(struct bdx_priv *priv) +{ + struct txf_fifo *f = &priv->txf_fifo0; + struct txdb *db = &priv->txdb; + int tx_level = 0; + + ENTER; + f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_MASK; + BDX_ASSERT(f->m.rptr >= f->m.memsz); /* started with valid rptr */ + + while (f->m.wptr != f->m.rptr) { + f->m.rptr += BDX_TXF_DESC_SZ; + f->m.rptr &= f->m.size_mask; + + /* unmap all the fragments */ + /* first has to come tx_maps containing dma */ + BDX_ASSERT(db->rptr->len == 0); + do { + BDX_ASSERT(db->rptr->addr.dma == 0); + pci_unmap_page(priv->pdev, db->rptr->addr.dma, + db->rptr->len, PCI_DMA_TODEVICE); + bdx_tx_db_inc_rptr(db); + } while (db->rptr->len > 0); + tx_level -= db->rptr->len; /* '-' koz len is negative */ + + /* now should come skb pointer - free it */ + dev_kfree_skb_irq(db->rptr->addr.skb); + bdx_tx_db_inc_rptr(db); + } + + /* let h/w know which TXF descriptors were cleaned */ + BDX_ASSERT((f->m.wptr & TXF_WPTR_WR_PTR) >= f->m.memsz); + WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR); + + /* We reclaimed resources, so in case the Q is stopped by xmit callback, + * we resume the transmission and use tx_lock to synchronize with xmit.*/ + spin_lock(&priv->tx_lock); + priv->tx_level += tx_level; + BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL); +#ifdef BDX_DELAY_WPTR + if (priv->tx_noupd) { + priv->tx_noupd = 0; + WRITE_REG(priv, priv->txd_fifo0.m.reg_WPTR, + priv->txd_fifo0.m.wptr & TXF_WPTR_WR_PTR); + } +#endif + + if (unlikely(netif_queue_stopped(priv->ndev) && + netif_carrier_ok(priv->ndev) && + (priv->tx_level >= BDX_MIN_TX_LEVEL))) { + DBG("%s: %s: TX Q WAKE level %d\n", + BDX_DRV_NAME, priv->ndev->name, priv->tx_level); + netif_wake_queue(priv->ndev); + } + spin_unlock(&priv->tx_lock); +} + +/** + * bdx_tx_free_skbs - frees all skbs from TXD fifo. + * It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod + */ +static void bdx_tx_free_skbs(struct bdx_priv *priv) +{ + struct txdb *db = &priv->txdb; + + ENTER; + while (db->rptr != db->wptr) { + if (likely(db->rptr->len)) + pci_unmap_page(priv->pdev, db->rptr->addr.dma, + db->rptr->len, PCI_DMA_TODEVICE); + else + dev_kfree_skb(db->rptr->addr.skb); + bdx_tx_db_inc_rptr(db); + } + RET(); +} + +/* bdx_tx_free - frees all Tx resources */ +static void bdx_tx_free(struct bdx_priv *priv) +{ + ENTER; + bdx_tx_free_skbs(priv); + bdx_fifo_free(priv, &priv->txd_fifo0.m); + bdx_fifo_free(priv, &priv->txf_fifo0.m); + bdx_tx_db_close(&priv->txdb); +} + +/** + * bdx_tx_push_desc - push descriptor to TxD fifo + * @priv: NIC private structure + * @data: desc's data + * @size: desc's size + * + * Pushes desc to TxD fifo and overlaps it if needed. + * NOTE: this func does not check for available space. this is responsibility + * of the caller. Neither does it check that data size is smaller than + * fifo size. + */ +static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size) +{ + struct txd_fifo *f = &priv->txd_fifo0; + int i = f->m.memsz - f->m.wptr; + + if (size == 0) + return; + + if (i > size) { + memcpy(f->m.va + f->m.wptr, data, size); + f->m.wptr += size; + } else { + memcpy(f->m.va + f->m.wptr, data, i); + f->m.wptr = size - i; + memcpy(f->m.va, data + i, f->m.wptr); + } + WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); +} + +/** + * bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way + * @priv: NIC private structure + * @data: desc's data + * @size: desc's size + * + * NOTE: this func does check for available space and, if necessary, waits for + * NIC to read existing data before writing new one. + */ +static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size) +{ + int timer = 0; + ENTER; + + while (size > 0) { + /* we substruct 8 because when fifo is full rptr == wptr + which also means that fifo is empty, we can understand + the difference, but could hw do the same ??? :) */ + int avail = bdx_tx_space(priv) - 8; + if (avail <= 0) { + if (timer++ > 300) { /* prevent endless loop */ + DBG("timeout while writing desc to TxD fifo\n"); + break; + } + udelay(50); /* give hw a chance to clean fifo */ + continue; + } + avail = min(avail, size); + DBG("about to push %d bytes starting %p size %d\n", avail, + data, size); + bdx_tx_push_desc(priv, data, avail); + size -= avail; + data += avail; + } + RET(); +} + +static const struct net_device_ops bdx_netdev_ops = { + .ndo_open = bdx_open, + .ndo_stop = bdx_close, + .ndo_start_xmit = bdx_tx_transmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = bdx_ioctl, + .ndo_set_rx_mode = bdx_setmulti, + .ndo_change_mtu = bdx_change_mtu, + .ndo_set_mac_address = bdx_set_mac, + .ndo_vlan_rx_add_vid = bdx_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = bdx_vlan_rx_kill_vid, +}; + +/** + * bdx_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in bdx_pci_tbl + * + * Returns 0 on success, negative on failure + * + * bdx_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + * + * functions and their order used as explained in + * /usr/src/linux/Documentation/DMA-{API,mapping}.txt + * + */ + +/* TBD: netif_msg should be checked and implemented. I disable it for now */ +static int +bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *ndev; + struct bdx_priv *priv; + int err, pci_using_dac, port; + unsigned long pciaddr; + u32 regionSize; + struct pci_nic *nic; + + ENTER; + + nic = vmalloc(sizeof(*nic)); + if (!nic) + RET(-ENOMEM); + + /************** pci *****************/ + err = pci_enable_device(pdev); + if (err) /* it triggers interrupt, dunno why. */ + goto err_pci; /* it's not a problem though */ + + if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) && + !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) { + pci_using_dac = 1; + } else { + if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) || + (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) { + pr_err("No usable DMA configuration, aborting\n"); + goto err_dma; + } + pci_using_dac = 0; + } + + err = pci_request_regions(pdev, BDX_DRV_NAME); + if (err) + goto err_dma; + + pci_set_master(pdev); + + pciaddr = pci_resource_start(pdev, 0); + if (!pciaddr) { + err = -EIO; + pr_err("no MMIO resource\n"); + goto err_out_res; + } + regionSize = pci_resource_len(pdev, 0); + if (regionSize < BDX_REGS_SIZE) { + err = -EIO; + pr_err("MMIO resource (%x) too small\n", regionSize); + goto err_out_res; + } + + nic->regs = ioremap(pciaddr, regionSize); + if (!nic->regs) { + err = -EIO; + pr_err("ioremap failed\n"); + goto err_out_res; + } + + if (pdev->irq < 2) { + err = -EIO; + pr_err("invalid irq (%d)\n", pdev->irq); + goto err_out_iomap; + } + pci_set_drvdata(pdev, nic); + + if (pdev->device == 0x3014) + nic->port_num = 2; + else + nic->port_num = 1; + + print_hw_id(pdev); + + bdx_hw_reset_direct(nic->regs); + + nic->irq_type = IRQ_INTX; +#ifdef BDX_MSI + if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) { + err = pci_enable_msi(pdev); + if (err) + pr_err("Can't eneble msi. error is %d\n", err); + else + nic->irq_type = IRQ_MSI; + } else + DBG("HW does not support MSI\n"); +#endif + + /************** netdev **************/ + for (port = 0; port < nic->port_num; port++) { + ndev = alloc_etherdev(sizeof(struct bdx_priv)); + if (!ndev) { + err = -ENOMEM; + goto err_out_iomap; + } + + ndev->netdev_ops = &bdx_netdev_ops; + ndev->tx_queue_len = BDX_NDEV_TXQ_LEN; + + bdx_set_ethtool_ops(ndev); /* ethtool interface */ + + /* these fields are used for info purposes only + * so we can have them same for all ports of the board */ + ndev->if_port = port; + ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO + | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM + ; + ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX; + + if (pci_using_dac) + ndev->features |= NETIF_F_HIGHDMA; + + /************** priv ****************/ + priv = nic->priv[port] = netdev_priv(ndev); + + priv->pBdxRegs = nic->regs + port * 0x8000; + priv->port = port; + priv->pdev = pdev; + priv->ndev = ndev; + priv->nic = nic; + priv->msg_enable = BDX_DEF_MSG_ENABLE; + + netif_napi_add(ndev, &priv->napi, bdx_poll, 64); + + if ((readl(nic->regs + FPGA_VER) & 0xFFF) == 308) { + DBG("HW statistics not supported\n"); + priv->stats_flag = 0; + } else { + priv->stats_flag = 1; + } + + /* Initialize fifo sizes. */ + priv->txd_size = 2; + priv->txf_size = 2; + priv->rxd_size = 2; + priv->rxf_size = 3; + + /* Initialize the initial coalescing registers. */ + priv->rdintcm = INT_REG_VAL(0x20, 1, 4, 12); + priv->tdintcm = INT_REG_VAL(0x20, 1, 0, 12); + + /* ndev->xmit_lock spinlock is not used. + * Private priv->tx_lock is used for synchronization + * between transmit and TX irq cleanup. In addition + * set multicast list callback has to use priv->tx_lock. + */ +#ifdef BDX_LLTX + ndev->features |= NETIF_F_LLTX; +#endif + spin_lock_init(&priv->tx_lock); + + /*bdx_hw_reset(priv); */ + if (bdx_read_mac(priv)) { + pr_err("load MAC address failed\n"); + goto err_out_iomap; + } + SET_NETDEV_DEV(ndev, &pdev->dev); + err = register_netdev(ndev); + if (err) { + pr_err("register_netdev failed\n"); + goto err_out_free; + } + netif_carrier_off(ndev); + netif_stop_queue(ndev); + + print_eth_id(ndev); + } + RET(0); + +err_out_free: + free_netdev(ndev); +err_out_iomap: + iounmap(nic->regs); +err_out_res: + pci_release_regions(pdev); +err_dma: + pci_disable_device(pdev); +err_pci: + vfree(nic); + + RET(err); +} + +/****************** Ethtool interface *********************/ +/* get strings for statistics counters */ +static const char + bdx_stat_names[][ETH_GSTRING_LEN] = { + "InUCast", /* 0x7200 */ + "InMCast", /* 0x7210 */ + "InBCast", /* 0x7220 */ + "InPkts", /* 0x7230 */ + "InErrors", /* 0x7240 */ + "InDropped", /* 0x7250 */ + "FrameTooLong", /* 0x7260 */ + "FrameSequenceErrors", /* 0x7270 */ + "InVLAN", /* 0x7280 */ + "InDroppedDFE", /* 0x7290 */ + "InDroppedIntFull", /* 0x72A0 */ + "InFrameAlignErrors", /* 0x72B0 */ + + /* 0x72C0-0x72E0 RSRV */ + + "OutUCast", /* 0x72F0 */ + "OutMCast", /* 0x7300 */ + "OutBCast", /* 0x7310 */ + "OutPkts", /* 0x7320 */ + + /* 0x7330-0x7360 RSRV */ + + "OutVLAN", /* 0x7370 */ + "InUCastOctects", /* 0x7380 */ + "OutUCastOctects", /* 0x7390 */ + + /* 0x73A0-0x73B0 RSRV */ + + "InBCastOctects", /* 0x73C0 */ + "OutBCastOctects", /* 0x73D0 */ + "InOctects", /* 0x73E0 */ + "OutOctects", /* 0x73F0 */ +}; + +/* + * bdx_get_settings - get device-specific settings + * @netdev + * @ecmd + */ +static int bdx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + u32 rdintcm; + u32 tdintcm; + struct bdx_priv *priv = netdev_priv(netdev); + + rdintcm = priv->rdintcm; + tdintcm = priv->tdintcm; + + ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + ethtool_cmd_speed_set(ecmd, SPEED_10000); + ecmd->duplex = DUPLEX_FULL; + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; /* what does it mean? */ + ecmd->autoneg = AUTONEG_DISABLE; + + /* PCK_TH measures in multiples of FIFO bytes + We translate to packets */ + ecmd->maxtxpkt = + ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ); + ecmd->maxrxpkt = + ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc)); + + return 0; +} + +/* + * bdx_get_drvinfo - report driver information + * @netdev + * @drvinfo + */ +static void +bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct bdx_priv *priv = netdev_priv(netdev); + + strlcpy(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(priv->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_stats = ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0); + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = 0; + drvinfo->eedump_len = 0; +} + +/* + * bdx_get_coalesce - get interrupt coalescing parameters + * @netdev + * @ecoal + */ +static int +bdx_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal) +{ + u32 rdintcm; + u32 tdintcm; + struct bdx_priv *priv = netdev_priv(netdev); + + rdintcm = priv->rdintcm; + tdintcm = priv->tdintcm; + + /* PCK_TH measures in multiples of FIFO bytes + We translate to packets */ + ecoal->rx_coalesce_usecs = GET_INT_COAL(rdintcm) * INT_COAL_MULT; + ecoal->rx_max_coalesced_frames = + ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc)); + + ecoal->tx_coalesce_usecs = GET_INT_COAL(tdintcm) * INT_COAL_MULT; + ecoal->tx_max_coalesced_frames = + ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ); + + /* adaptive parameters ignored */ + return 0; +} + +/* + * bdx_set_coalesce - set interrupt coalescing parameters + * @netdev + * @ecoal + */ +static int +bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal) +{ + u32 rdintcm; + u32 tdintcm; + struct bdx_priv *priv = netdev_priv(netdev); + int rx_coal; + int tx_coal; + int rx_max_coal; + int tx_max_coal; + + /* Check for valid input */ + rx_coal = ecoal->rx_coalesce_usecs / INT_COAL_MULT; + tx_coal = ecoal->tx_coalesce_usecs / INT_COAL_MULT; + rx_max_coal = ecoal->rx_max_coalesced_frames; + tx_max_coal = ecoal->tx_max_coalesced_frames; + + /* Translate from packets to multiples of FIFO bytes */ + rx_max_coal = + (((rx_max_coal * sizeof(struct rxf_desc)) + PCK_TH_MULT - 1) + / PCK_TH_MULT); + tx_max_coal = + (((tx_max_coal * BDX_TXF_DESC_SZ) + PCK_TH_MULT - 1) + / PCK_TH_MULT); + + if ((rx_coal > 0x7FFF) || (tx_coal > 0x7FFF) || + (rx_max_coal > 0xF) || (tx_max_coal > 0xF)) + return -EINVAL; + + rdintcm = INT_REG_VAL(rx_coal, GET_INT_COAL_RC(priv->rdintcm), + GET_RXF_TH(priv->rdintcm), rx_max_coal); + tdintcm = INT_REG_VAL(tx_coal, GET_INT_COAL_RC(priv->tdintcm), 0, + tx_max_coal); + + priv->rdintcm = rdintcm; + priv->tdintcm = tdintcm; + + WRITE_REG(priv, regRDINTCM0, rdintcm); + WRITE_REG(priv, regTDINTCM0, tdintcm); + + return 0; +} + +/* Convert RX fifo size to number of pending packets */ +static inline int bdx_rx_fifo_size_to_packets(int rx_size) +{ + return (FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc); +} + +/* Convert TX fifo size to number of pending packets */ +static inline int bdx_tx_fifo_size_to_packets(int tx_size) +{ + return (FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ; +} + +/* + * bdx_get_ringparam - report ring sizes + * @netdev + * @ring + */ +static void +bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +{ + struct bdx_priv *priv = netdev_priv(netdev); + + /*max_pending - the maximum-sized FIFO we allow */ + ring->rx_max_pending = bdx_rx_fifo_size_to_packets(3); + ring->tx_max_pending = bdx_tx_fifo_size_to_packets(3); + ring->rx_pending = bdx_rx_fifo_size_to_packets(priv->rxf_size); + ring->tx_pending = bdx_tx_fifo_size_to_packets(priv->txd_size); +} + +/* + * bdx_set_ringparam - set ring sizes + * @netdev + * @ring + */ +static int +bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +{ + struct bdx_priv *priv = netdev_priv(netdev); + int rx_size = 0; + int tx_size = 0; + + for (; rx_size < 4; rx_size++) { + if (bdx_rx_fifo_size_to_packets(rx_size) >= ring->rx_pending) + break; + } + if (rx_size == 4) + rx_size = 3; + + for (; tx_size < 4; tx_size++) { + if (bdx_tx_fifo_size_to_packets(tx_size) >= ring->tx_pending) + break; + } + if (tx_size == 4) + tx_size = 3; + + /*Is there anything to do? */ + if ((rx_size == priv->rxf_size) && + (tx_size == priv->txd_size)) + return 0; + + priv->rxf_size = rx_size; + if (rx_size > 1) + priv->rxd_size = rx_size - 1; + else + priv->rxd_size = rx_size; + + priv->txf_size = priv->txd_size = tx_size; + + if (netif_running(netdev)) { + bdx_close(netdev); + bdx_open(netdev); + } + return 0; +} + +/* + * bdx_get_strings - return a set of strings that describe the requested objects + * @netdev + * @data + */ +static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, *bdx_stat_names, sizeof(bdx_stat_names)); + break; + } +} + +/* + * bdx_get_sset_count - return number of statistics or tests + * @netdev + */ +static int bdx_get_sset_count(struct net_device *netdev, int stringset) +{ + struct bdx_priv *priv = netdev_priv(netdev); + + switch (stringset) { + case ETH_SS_STATS: + BDX_ASSERT(ARRAY_SIZE(bdx_stat_names) + != sizeof(struct bdx_stats) / sizeof(u64)); + return (priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0; + } + + return -EINVAL; +} + +/* + * bdx_get_ethtool_stats - return device's hardware L2 statistics + * @netdev + * @stats + * @data + */ +static void bdx_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct bdx_priv *priv = netdev_priv(netdev); + + if (priv->stats_flag) { + + /* Update stats from HW */ + bdx_update_stats(priv); + + /* Copy data to user buffer */ + memcpy(data, &priv->hw_stats, sizeof(priv->hw_stats)); + } +} + +/* + * bdx_set_ethtool_ops - ethtool interface implementation + * @netdev + */ +static void bdx_set_ethtool_ops(struct net_device *netdev) +{ + static const struct ethtool_ops bdx_ethtool_ops = { + .get_settings = bdx_get_settings, + .get_drvinfo = bdx_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_coalesce = bdx_get_coalesce, + .set_coalesce = bdx_set_coalesce, + .get_ringparam = bdx_get_ringparam, + .set_ringparam = bdx_set_ringparam, + .get_strings = bdx_get_strings, + .get_sset_count = bdx_get_sset_count, + .get_ethtool_stats = bdx_get_ethtool_stats, + }; + + netdev->ethtool_ops = &bdx_ethtool_ops; +} + +/** + * bdx_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * bdx_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void bdx_remove(struct pci_dev *pdev) +{ + struct pci_nic *nic = pci_get_drvdata(pdev); + struct net_device *ndev; + int port; + + for (port = 0; port < nic->port_num; port++) { + ndev = nic->priv[port]->ndev; + unregister_netdev(ndev); + free_netdev(ndev); + } + + /*bdx_hw_reset_direct(nic->regs); */ +#ifdef BDX_MSI + if (nic->irq_type == IRQ_MSI) + pci_disable_msi(pdev); +#endif + + iounmap(nic->regs); + pci_release_regions(pdev); + pci_disable_device(pdev); + vfree(nic); + + RET(); +} + +static struct pci_driver bdx_pci_driver = { + .name = BDX_DRV_NAME, + .id_table = bdx_pci_tbl, + .probe = bdx_probe, + .remove = bdx_remove, +}; + +/* + * print_driver_id - print parameters of the driver build + */ +static void __init print_driver_id(void) +{ + pr_info("%s, %s\n", BDX_DRV_DESC, BDX_DRV_VERSION); + pr_info("Options: hw_csum %s\n", BDX_MSI_STRING); +} + +static int __init bdx_module_init(void) +{ + ENTER; + init_txd_sizes(); + print_driver_id(); + RET(pci_register_driver(&bdx_pci_driver)); +} + +module_init(bdx_module_init); + +static void __exit bdx_module_exit(void) +{ + ENTER; + pci_unregister_driver(&bdx_pci_driver); + RET(); +} + +module_exit(bdx_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(BDX_DRV_DESC); +/*(DEBLOBBED)*/ diff --git a/drivers/net/ethernet/tehuti/tehuti.h b/drivers/net/ethernet/tehuti/tehuti.h new file mode 100644 index 000000000..709ebd6e2 --- /dev/null +++ b/drivers/net/ethernet/tehuti/tehuti.h @@ -0,0 +1,561 @@ +/* + * Tehuti Networks(R) Network Driver + * Copyright (C) 2007 Tehuti Networks Ltd. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _TEHUTI_H +#define _TEHUTI_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Compile Time Switches */ +/* start */ +#define BDX_TSO +#define BDX_LLTX +#define BDX_DELAY_WPTR +/* #define BDX_MSI */ +/* end */ + +#if !defined CONFIG_PCI_MSI +# undef BDX_MSI +#endif + +#define BDX_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + +/* ioctl ops */ +#define BDX_OP_READ 1 +#define BDX_OP_WRITE 2 + +/* RX copy break size */ +#define BDX_COPYBREAK 257 + +#define DRIVER_AUTHOR "Tehuti Networks(R)" +#define BDX_DRV_DESC "Tehuti Networks(R) Network Driver" +#define BDX_DRV_NAME "tehuti" +#define BDX_NIC_NAME "Tehuti 10 Giga TOE SmartNIC" +#define BDX_NIC2PORT_NAME "Tehuti 2-Port 10 Giga TOE SmartNIC" +#define BDX_DRV_VERSION "7.29.3" + +#ifdef BDX_MSI +# define BDX_MSI_STRING "msi " +#else +# define BDX_MSI_STRING "" +#endif + +/* netdev tx queue len for Luxor. default value is, btw, 1000 + * ifcontig eth1 txqueuelen 3000 - to change it at runtime */ +#define BDX_NDEV_TXQ_LEN 3000 + +#define FIFO_SIZE 4096 +#define FIFO_EXTRA_SPACE 1024 + +#if BITS_PER_LONG == 64 +# define H32_64(x) (u32) ((u64)(x) >> 32) +# define L32_64(x) (u32) ((u64)(x) & 0xffffffff) +#elif BITS_PER_LONG == 32 +# define H32_64(x) 0 +# define L32_64(x) ((u32) (x)) +#else /* BITS_PER_LONG == ?? */ +# error BITS_PER_LONG is undefined. Must be 64 or 32 +#endif /* BITS_PER_LONG */ + +#ifdef __BIG_ENDIAN +# define CPU_CHIP_SWAP32(x) swab32(x) +# define CPU_CHIP_SWAP16(x) swab16(x) +#else +# define CPU_CHIP_SWAP32(x) (x) +# define CPU_CHIP_SWAP16(x) (x) +#endif + +#define READ_REG(pp, reg) readl(pp->pBdxRegs + reg) +#define WRITE_REG(pp, reg, val) writel(val, pp->pBdxRegs + reg) + +#ifndef NET_IP_ALIGN +# define NET_IP_ALIGN 2 +#endif + +#ifndef NETDEV_TX_OK +# define NETDEV_TX_OK 0 +#endif + +#define LUXOR_MAX_PORT 2 +#define BDX_MAX_RX_DONE 150 +#define BDX_TXF_DESC_SZ 16 +#define BDX_MAX_TX_LEVEL (priv->txd_fifo0.m.memsz - 16) +#define BDX_MIN_TX_LEVEL 256 +#define BDX_NO_UPD_PACKETS 40 + +struct pci_nic { + int port_num; + void __iomem *regs; + int irq_type; + struct bdx_priv *priv[LUXOR_MAX_PORT]; +}; + +enum { IRQ_INTX, IRQ_MSI, IRQ_MSIX }; + +#define PCK_TH_MULT 128 +#define INT_COAL_MULT 2 + +#define BITS_MASK(nbits) ((1<>nshift)&BITS_MASK(nbits)) +#define BITS_SHIFT_MASK(nbits, nshift) (BITS_MASK(nbits)<. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about TI devices. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_TI + +config TI_DAVINCI_EMAC + tristate "TI DaVinci EMAC Support" + depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) + select TI_DAVINCI_MDIO + select TI_DAVINCI_CPDMA + select PHYLIB + ---help--- + This driver supports TI's DaVinci Ethernet . + + To compile this driver as a module, choose M here: the module + will be called davinci_emac_driver. This is recommended. + +config TI_DAVINCI_MDIO + tristate "TI DaVinci MDIO Support" + depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE + select PHYLIB + ---help--- + This driver supports TI's DaVinci MDIO module. + + To compile this driver as a module, choose M here: the module + will be called davinci_mdio. This is recommended. + +config TI_DAVINCI_CPDMA + tristate "TI DaVinci CPDMA Support" + depends on ARCH_DAVINCI || ARCH_OMAP2PLUS + ---help--- + This driver supports TI's DaVinci CPDMA dma engine. + + To compile this driver as a module, choose M here: the module + will be called davinci_cpdma. This is recommended. + +config TI_CPSW_PHY_SEL + bool "TI CPSW Switch Phy sel Support" + depends on TI_CPSW + ---help--- + This driver supports configuring of the phy mode connected to + the CPSW. + +config TI_CPSW_ALE + tristate "TI CPSW ALE Support" + ---help--- + This driver supports TI's CPSW ALE module. + +config TI_CPSW + tristate "TI CPSW Switch Support" + depends on ARCH_DAVINCI || ARCH_OMAP2PLUS + select TI_DAVINCI_CPDMA + select TI_DAVINCI_MDIO + select TI_CPSW_PHY_SEL + select TI_CPSW_ALE + select MFD_SYSCON + select REGMAP + ---help--- + This driver supports TI's CPSW Ethernet Switch. + + To compile this driver as a module, choose M here: the module + will be called cpsw. + +config TI_CPTS + bool "TI Common Platform Time Sync (CPTS) Support" + depends on TI_CPSW + select PTP_1588_CLOCK + ---help--- + This driver supports the Common Platform Time Sync unit of + the CPSW Ethernet Switch. The unit can time stamp PTP UDP/IPv4 + and Layer 2 packets, and the driver offers a PTP Hardware Clock. + +config TI_KEYSTONE_NETCP + tristate "TI Keystone NETCP Core Support" + select TI_CPSW_ALE + select TI_DAVINCI_MDIO + depends on OF + depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS + ---help--- + This driver supports TI's Keystone NETCP Core. + + To compile this driver as a module, choose M here: the module + will be called keystone_netcp. + +config TI_KEYSTONE_NETCP_ETHSS + depends on TI_KEYSTONE_NETCP + tristate "TI Keystone NETCP Ethernet subsystem Support" + ---help--- + + To compile this driver as a module, choose M here: the module + will be called keystone_netcp_ethss. + +config TLAN + tristate "TI ThunderLAN support" + depends on (PCI || EISA) + ---help--- + If you have a PCI Ethernet network card based on the ThunderLAN chip + which is supported by this driver, say Y and read the + Ethernet-HOWTO, available from + . + + Devices currently supported by this driver are Compaq Netelligent, + Compaq NetFlex and Olicom cards. Please read the file + for more details. + + To compile this driver as a module, choose M here. The module + will be called tlan. + + Please email feedback to . + +config CPMAC + tristate "TI AR7 CPMAC Ethernet support" + depends on AR7 + select PHYLIB + ---help--- + TI AR7 CPMAC Ethernet support + +endif # NET_VENDOR_TI diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile new file mode 100644 index 000000000..d420d9413 --- /dev/null +++ b/drivers/net/ethernet/ti/Makefile @@ -0,0 +1,21 @@ +# +# Makefile for the TI network device drivers. +# + +obj-$(CONFIG_TI_CPSW) += cpsw-common.o +obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o + +obj-$(CONFIG_TLAN) += tlan.o +obj-$(CONFIG_CPMAC) += cpmac.o +obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o +obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o +obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o +obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o +obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o +obj-$(CONFIG_TI_CPSW) += ti_cpsw.o +ti_cpsw-y := cpsw.o cpts.o + +obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o +keystone_netcp-y := netcp_core.o +obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o +keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c new file mode 100644 index 000000000..dd9430043 --- /dev/null +++ b/drivers/net/ethernet/ti/cpmac.c @@ -0,0 +1,1295 @@ +/* + * Copyright (C) 2006, 2007 Eugene Konev + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Eugene Konev "); +MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:cpmac"); + +static int debug_level = 8; +static int dumb_switch; + +/* Next 2 are only used in cpmac_probe, so it's pointless to change them */ +module_param(debug_level, int, 0444); +module_param(dumb_switch, int, 0444); + +MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable"); +MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus"); + +#define CPMAC_VERSION "0.5.2" +/* frame size + 802.1q tag + FCS size */ +#define CPMAC_SKB_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) +#define CPMAC_QUEUES 8 + +/* Ethernet registers */ +#define CPMAC_TX_CONTROL 0x0004 +#define CPMAC_TX_TEARDOWN 0x0008 +#define CPMAC_RX_CONTROL 0x0014 +#define CPMAC_RX_TEARDOWN 0x0018 +#define CPMAC_MBP 0x0100 +#define MBP_RXPASSCRC 0x40000000 +#define MBP_RXQOS 0x20000000 +#define MBP_RXNOCHAIN 0x10000000 +#define MBP_RXCMF 0x01000000 +#define MBP_RXSHORT 0x00800000 +#define MBP_RXCEF 0x00400000 +#define MBP_RXPROMISC 0x00200000 +#define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16) +#define MBP_RXBCAST 0x00002000 +#define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8) +#define MBP_RXMCAST 0x00000020 +#define MBP_MCASTCHAN(channel) ((channel) & 0x7) +#define CPMAC_UNICAST_ENABLE 0x0104 +#define CPMAC_UNICAST_CLEAR 0x0108 +#define CPMAC_MAX_LENGTH 0x010c +#define CPMAC_BUFFER_OFFSET 0x0110 +#define CPMAC_MAC_CONTROL 0x0160 +#define MAC_TXPTYPE 0x00000200 +#define MAC_TXPACE 0x00000040 +#define MAC_MII 0x00000020 +#define MAC_TXFLOW 0x00000010 +#define MAC_RXFLOW 0x00000008 +#define MAC_MTEST 0x00000004 +#define MAC_LOOPBACK 0x00000002 +#define MAC_FDX 0x00000001 +#define CPMAC_MAC_STATUS 0x0164 +#define MAC_STATUS_QOS 0x00000004 +#define MAC_STATUS_RXFLOW 0x00000002 +#define MAC_STATUS_TXFLOW 0x00000001 +#define CPMAC_TX_INT_ENABLE 0x0178 +#define CPMAC_TX_INT_CLEAR 0x017c +#define CPMAC_MAC_INT_VECTOR 0x0180 +#define MAC_INT_STATUS 0x00080000 +#define MAC_INT_HOST 0x00040000 +#define MAC_INT_RX 0x00020000 +#define MAC_INT_TX 0x00010000 +#define CPMAC_MAC_EOI_VECTOR 0x0184 +#define CPMAC_RX_INT_ENABLE 0x0198 +#define CPMAC_RX_INT_CLEAR 0x019c +#define CPMAC_MAC_INT_ENABLE 0x01a8 +#define CPMAC_MAC_INT_CLEAR 0x01ac +#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4) +#define CPMAC_MAC_ADDR_MID 0x01d0 +#define CPMAC_MAC_ADDR_HI 0x01d4 +#define CPMAC_MAC_HASH_LO 0x01d8 +#define CPMAC_MAC_HASH_HI 0x01dc +#define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4) +#define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4) +#define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4) +#define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4) +#define CPMAC_REG_END 0x0680 + +/* Rx/Tx statistics + * TODO: use some of them to fill stats in cpmac_stats() + */ +#define CPMAC_STATS_RX_GOOD 0x0200 +#define CPMAC_STATS_RX_BCAST 0x0204 +#define CPMAC_STATS_RX_MCAST 0x0208 +#define CPMAC_STATS_RX_PAUSE 0x020c +#define CPMAC_STATS_RX_CRC 0x0210 +#define CPMAC_STATS_RX_ALIGN 0x0214 +#define CPMAC_STATS_RX_OVER 0x0218 +#define CPMAC_STATS_RX_JABBER 0x021c +#define CPMAC_STATS_RX_UNDER 0x0220 +#define CPMAC_STATS_RX_FRAG 0x0224 +#define CPMAC_STATS_RX_FILTER 0x0228 +#define CPMAC_STATS_RX_QOSFILTER 0x022c +#define CPMAC_STATS_RX_OCTETS 0x0230 + +#define CPMAC_STATS_TX_GOOD 0x0234 +#define CPMAC_STATS_TX_BCAST 0x0238 +#define CPMAC_STATS_TX_MCAST 0x023c +#define CPMAC_STATS_TX_PAUSE 0x0240 +#define CPMAC_STATS_TX_DEFER 0x0244 +#define CPMAC_STATS_TX_COLLISION 0x0248 +#define CPMAC_STATS_TX_SINGLECOLL 0x024c +#define CPMAC_STATS_TX_MULTICOLL 0x0250 +#define CPMAC_STATS_TX_EXCESSCOLL 0x0254 +#define CPMAC_STATS_TX_LATECOLL 0x0258 +#define CPMAC_STATS_TX_UNDERRUN 0x025c +#define CPMAC_STATS_TX_CARRIERSENSE 0x0260 +#define CPMAC_STATS_TX_OCTETS 0x0264 + +#define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg))) +#define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \ + (reg))) + +/* MDIO bus */ +#define CPMAC_MDIO_VERSION 0x0000 +#define CPMAC_MDIO_CONTROL 0x0004 +#define MDIOC_IDLE 0x80000000 +#define MDIOC_ENABLE 0x40000000 +#define MDIOC_PREAMBLE 0x00100000 +#define MDIOC_FAULT 0x00080000 +#define MDIOC_FAULTDETECT 0x00040000 +#define MDIOC_INTTEST 0x00020000 +#define MDIOC_CLKDIV(div) ((div) & 0xff) +#define CPMAC_MDIO_ALIVE 0x0008 +#define CPMAC_MDIO_LINK 0x000c +#define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8) +#define MDIO_BUSY 0x80000000 +#define MDIO_WRITE 0x40000000 +#define MDIO_REG(reg) (((reg) & 0x1f) << 21) +#define MDIO_PHY(phy) (((phy) & 0x1f) << 16) +#define MDIO_DATA(data) ((data) & 0xffff) +#define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8) +#define PHYSEL_LINKSEL 0x00000040 +#define PHYSEL_LINKINT 0x00000020 + +struct cpmac_desc { + u32 hw_next; + u32 hw_data; + u16 buflen; + u16 bufflags; + u16 datalen; + u16 dataflags; +#define CPMAC_SOP 0x8000 +#define CPMAC_EOP 0x4000 +#define CPMAC_OWN 0x2000 +#define CPMAC_EOQ 0x1000 + struct sk_buff *skb; + struct cpmac_desc *next; + struct cpmac_desc *prev; + dma_addr_t mapping; + dma_addr_t data_mapping; +}; + +struct cpmac_priv { + spinlock_t lock; + spinlock_t rx_lock; + struct cpmac_desc *rx_head; + int ring_size; + struct cpmac_desc *desc_ring; + dma_addr_t dma_ring; + void __iomem *regs; + struct mii_bus *mii_bus; + struct phy_device *phy; + char phy_name[MII_BUS_ID_SIZE + 3]; + int oldlink, oldspeed, oldduplex; + u32 msg_enable; + struct net_device *dev; + struct work_struct reset_work; + struct platform_device *pdev; + struct napi_struct napi; + atomic_t reset_pending; +}; + +static irqreturn_t cpmac_irq(int, void *); +static void cpmac_hw_start(struct net_device *dev); +static void cpmac_hw_stop(struct net_device *dev); +static int cpmac_stop(struct net_device *dev); +static int cpmac_open(struct net_device *dev); + +static void cpmac_dump_regs(struct net_device *dev) +{ + int i; + struct cpmac_priv *priv = netdev_priv(dev); + + for (i = 0; i < CPMAC_REG_END; i += 4) { + if (i % 16 == 0) { + if (i) + printk("\n"); + printk("%s: reg[%p]:", dev->name, priv->regs + i); + } + printk(" %08x", cpmac_read(priv->regs, i)); + } + printk("\n"); +} + +static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc) +{ + int i; + + printk("%s: desc[%p]:", dev->name, desc); + for (i = 0; i < sizeof(*desc) / 4; i++) + printk(" %08x", ((u32 *)desc)[i]); + printk("\n"); +} + +static void cpmac_dump_all_desc(struct net_device *dev) +{ + struct cpmac_priv *priv = netdev_priv(dev); + struct cpmac_desc *dump = priv->rx_head; + + do { + cpmac_dump_desc(dev, dump); + dump = dump->next; + } while (dump != priv->rx_head); +} + +static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) +{ + int i; + + printk("%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len); + for (i = 0; i < skb->len; i++) { + if (i % 16 == 0) { + if (i) + printk("\n"); + printk("%s: data[%p]:", dev->name, skb->data + i); + } + printk(" %02x", ((u8 *)skb->data)[i]); + } + printk("\n"); +} + +static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg) +{ + u32 val; + + while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) + cpu_relax(); + cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) | + MDIO_PHY(phy_id)); + while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY) + cpu_relax(); + + return MDIO_DATA(val); +} + +static int cpmac_mdio_write(struct mii_bus *bus, int phy_id, + int reg, u16 val) +{ + while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) + cpu_relax(); + cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE | + MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val)); + + return 0; +} + +static int cpmac_mdio_reset(struct mii_bus *bus) +{ + struct clk *cpmac_clk; + + cpmac_clk = clk_get(&bus->dev, "cpmac"); + if (IS_ERR(cpmac_clk)) { + pr_err("unable to get cpmac clock\n"); + return -1; + } + ar7_device_reset(AR7_RESET_BIT_MDIO); + cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE | + MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1)); + + return 0; +} + +static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, }; + +static struct mii_bus *cpmac_mii; + +static void cpmac_set_multicast_list(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + u8 tmp; + u32 mbp, bit, hash[2] = { 0, }; + struct cpmac_priv *priv = netdev_priv(dev); + + mbp = cpmac_read(priv->regs, CPMAC_MBP); + if (dev->flags & IFF_PROMISC) { + cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) | + MBP_RXPROMISC); + } else { + cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC); + if (dev->flags & IFF_ALLMULTI) { + /* enable all multicast mode */ + cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff); + cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff); + } else { + /* cpmac uses some strange mac address hashing + * (not crc32) + */ + netdev_for_each_mc_addr(ha, dev) { + bit = 0; + tmp = ha->addr[0]; + bit ^= (tmp >> 2) ^ (tmp << 4); + tmp = ha->addr[1]; + bit ^= (tmp >> 4) ^ (tmp << 2); + tmp = ha->addr[2]; + bit ^= (tmp >> 6) ^ tmp; + tmp = ha->addr[3]; + bit ^= (tmp >> 2) ^ (tmp << 4); + tmp = ha->addr[4]; + bit ^= (tmp >> 4) ^ (tmp << 2); + tmp = ha->addr[5]; + bit ^= (tmp >> 6) ^ tmp; + bit &= 0x3f; + hash[bit / 32] |= 1 << (bit % 32); + } + + cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]); + cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]); + } + } +} + +static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv, + struct cpmac_desc *desc) +{ + struct sk_buff *skb, *result = NULL; + + if (unlikely(netif_msg_hw(priv))) + cpmac_dump_desc(priv->dev, desc); + cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); + if (unlikely(!desc->datalen)) { + if (netif_msg_rx_err(priv) && net_ratelimit()) + netdev_warn(priv->dev, "rx: spurious interrupt\n"); + + return NULL; + } + + skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE); + if (likely(skb)) { + skb_put(desc->skb, desc->datalen); + desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); + skb_checksum_none_assert(desc->skb); + priv->dev->stats.rx_packets++; + priv->dev->stats.rx_bytes += desc->datalen; + result = desc->skb; + dma_unmap_single(&priv->dev->dev, desc->data_mapping, + CPMAC_SKB_SIZE, DMA_FROM_DEVICE); + desc->skb = skb; + desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data, + CPMAC_SKB_SIZE, + DMA_FROM_DEVICE); + desc->hw_data = (u32)desc->data_mapping; + if (unlikely(netif_msg_pktdata(priv))) { + netdev_dbg(priv->dev, "received packet:\n"); + cpmac_dump_skb(priv->dev, result); + } + } else { + if (netif_msg_rx_err(priv) && net_ratelimit()) + netdev_warn(priv->dev, + "low on skbs, dropping packet\n"); + + priv->dev->stats.rx_dropped++; + } + + desc->buflen = CPMAC_SKB_SIZE; + desc->dataflags = CPMAC_OWN; + + return result; +} + +static int cpmac_poll(struct napi_struct *napi, int budget) +{ + struct sk_buff *skb; + struct cpmac_desc *desc, *restart; + struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); + int received = 0, processed = 0; + + spin_lock(&priv->rx_lock); + if (unlikely(!priv->rx_head)) { + if (netif_msg_rx_err(priv) && net_ratelimit()) + netdev_warn(priv->dev, "rx: polling, but no queue\n"); + + spin_unlock(&priv->rx_lock); + napi_complete(napi); + return 0; + } + + desc = priv->rx_head; + restart = NULL; + while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { + processed++; + + if ((desc->dataflags & CPMAC_EOQ) != 0) { + /* The last update to eoq->hw_next didn't happen + * soon enough, and the receiver stopped here. + * Remember this descriptor so we can restart + * the receiver after freeing some space. + */ + if (unlikely(restart)) { + if (netif_msg_rx_err(priv)) + netdev_err(priv->dev, "poll found a" + " duplicate EOQ: %p and %p\n", + restart, desc); + goto fatal_error; + } + + restart = desc->next; + } + + skb = cpmac_rx_one(priv, desc); + if (likely(skb)) { + netif_receive_skb(skb); + received++; + } + desc = desc->next; + } + + if (desc != priv->rx_head) { + /* We freed some buffers, but not the whole ring, + * add what we did free to the rx list + */ + desc->prev->hw_next = (u32)0; + priv->rx_head->prev->hw_next = priv->rx_head->mapping; + } + + /* Optimization: If we did not actually process an EOQ (perhaps because + * of quota limits), check to see if the tail of the queue has EOQ set. + * We should immediately restart in that case so that the receiver can + * restart and run in parallel with more packet processing. + * This lets us handle slightly larger bursts before running + * out of ring space (assuming dev->weight < ring_size) + */ + + if (!restart && + (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ)) + == CPMAC_EOQ && + (priv->rx_head->dataflags & CPMAC_OWN) != 0) { + /* reset EOQ so the poll loop (above) doesn't try to + * restart this when it eventually gets to this descriptor. + */ + priv->rx_head->prev->dataflags &= ~CPMAC_EOQ; + restart = priv->rx_head; + } + + if (restart) { + priv->dev->stats.rx_errors++; + priv->dev->stats.rx_fifo_errors++; + if (netif_msg_rx_err(priv) && net_ratelimit()) + netdev_warn(priv->dev, "rx dma ring overrun\n"); + + if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) { + if (netif_msg_drv(priv)) + netdev_err(priv->dev, "cpmac_poll is trying " + "to restart rx from a descriptor " + "that's not free: %p\n", restart); + goto fatal_error; + } + + cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping); + } + + priv->rx_head = desc; + spin_unlock(&priv->rx_lock); + if (unlikely(netif_msg_rx_status(priv))) + netdev_dbg(priv->dev, "poll processed %d packets\n", received); + + if (processed == 0) { + /* we ran out of packets to read, + * revert to interrupt-driven mode + */ + napi_complete(napi); + cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); + return 0; + } + + return 1; + +fatal_error: + /* Something went horribly wrong. + * Reset hardware to try to recover rather than wedging. + */ + if (netif_msg_drv(priv)) { + netdev_err(priv->dev, "cpmac_poll is confused. " + "Resetting hardware\n"); + cpmac_dump_all_desc(priv->dev); + netdev_dbg(priv->dev, "RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n", + cpmac_read(priv->regs, CPMAC_RX_PTR(0)), + cpmac_read(priv->regs, CPMAC_RX_ACK(0))); + } + + spin_unlock(&priv->rx_lock); + napi_complete(napi); + netif_tx_stop_all_queues(priv->dev); + napi_disable(&priv->napi); + + atomic_inc(&priv->reset_pending); + cpmac_hw_stop(priv->dev); + if (!schedule_work(&priv->reset_work)) + atomic_dec(&priv->reset_pending); + + return 0; + +} + +static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + int queue, len; + struct cpmac_desc *desc; + struct cpmac_priv *priv = netdev_priv(dev); + + if (unlikely(atomic_read(&priv->reset_pending))) + return NETDEV_TX_BUSY; + + if (unlikely(skb_padto(skb, ETH_ZLEN))) + return NETDEV_TX_OK; + + len = max(skb->len, ETH_ZLEN); + queue = skb_get_queue_mapping(skb); + netif_stop_subqueue(dev, queue); + + desc = &priv->desc_ring[queue]; + if (unlikely(desc->dataflags & CPMAC_OWN)) { + if (netif_msg_tx_err(priv) && net_ratelimit()) + netdev_warn(dev, "tx dma ring full\n"); + + return NETDEV_TX_BUSY; + } + + spin_lock(&priv->lock); + spin_unlock(&priv->lock); + desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN; + desc->skb = skb; + desc->data_mapping = dma_map_single(&dev->dev, skb->data, len, + DMA_TO_DEVICE); + desc->hw_data = (u32)desc->data_mapping; + desc->datalen = len; + desc->buflen = len; + if (unlikely(netif_msg_tx_queued(priv))) + netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len); + if (unlikely(netif_msg_hw(priv))) + cpmac_dump_desc(dev, desc); + if (unlikely(netif_msg_pktdata(priv))) + cpmac_dump_skb(dev, skb); + cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); + + return NETDEV_TX_OK; +} + +static void cpmac_end_xmit(struct net_device *dev, int queue) +{ + struct cpmac_desc *desc; + struct cpmac_priv *priv = netdev_priv(dev); + + desc = &priv->desc_ring[queue]; + cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping); + if (likely(desc->skb)) { + spin_lock(&priv->lock); + dev->stats.tx_packets++; + dev->stats.tx_bytes += desc->skb->len; + spin_unlock(&priv->lock); + dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len, + DMA_TO_DEVICE); + + if (unlikely(netif_msg_tx_done(priv))) + netdev_dbg(dev, "sent 0x%p, len=%d\n", + desc->skb, desc->skb->len); + + dev_kfree_skb_irq(desc->skb); + desc->skb = NULL; + if (__netif_subqueue_stopped(dev, queue)) + netif_wake_subqueue(dev, queue); + } else { + if (netif_msg_tx_err(priv) && net_ratelimit()) + netdev_warn(dev, "end_xmit: spurious interrupt\n"); + if (__netif_subqueue_stopped(dev, queue)) + netif_wake_subqueue(dev, queue); + } +} + +static void cpmac_hw_stop(struct net_device *dev) +{ + int i; + struct cpmac_priv *priv = netdev_priv(dev); + struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); + + ar7_device_reset(pdata->reset_bit); + cpmac_write(priv->regs, CPMAC_RX_CONTROL, + cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1); + cpmac_write(priv->regs, CPMAC_TX_CONTROL, + cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1); + for (i = 0; i < 8; i++) { + cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); + cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); + } + cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); + cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); + cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); + cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); + cpmac_write(priv->regs, CPMAC_MAC_CONTROL, + cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII); +} + +static void cpmac_hw_start(struct net_device *dev) +{ + int i; + struct cpmac_priv *priv = netdev_priv(dev); + struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); + + ar7_device_reset(pdata->reset_bit); + for (i = 0; i < 8; i++) { + cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); + cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); + } + cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping); + + cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST | + MBP_RXMCAST); + cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0); + for (i = 0; i < 8; i++) + cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]); + cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]); + cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] | + (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) | + (dev->dev_addr[3] << 24)); + cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE); + cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); + cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); + cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); + cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); + cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1); + cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); + cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff); + cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); + + cpmac_write(priv->regs, CPMAC_RX_CONTROL, + cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1); + cpmac_write(priv->regs, CPMAC_TX_CONTROL, + cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1); + cpmac_write(priv->regs, CPMAC_MAC_CONTROL, + cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII | + MAC_FDX); +} + +static void cpmac_clear_rx(struct net_device *dev) +{ + struct cpmac_priv *priv = netdev_priv(dev); + struct cpmac_desc *desc; + int i; + + if (unlikely(!priv->rx_head)) + return; + desc = priv->rx_head; + for (i = 0; i < priv->ring_size; i++) { + if ((desc->dataflags & CPMAC_OWN) == 0) { + if (netif_msg_rx_err(priv) && net_ratelimit()) + netdev_warn(dev, "packet dropped\n"); + if (unlikely(netif_msg_hw(priv))) + cpmac_dump_desc(dev, desc); + desc->dataflags = CPMAC_OWN; + dev->stats.rx_dropped++; + } + desc->hw_next = desc->next->mapping; + desc = desc->next; + } + priv->rx_head->prev->hw_next = 0; +} + +static void cpmac_clear_tx(struct net_device *dev) +{ + struct cpmac_priv *priv = netdev_priv(dev); + int i; + + if (unlikely(!priv->desc_ring)) + return; + for (i = 0; i < CPMAC_QUEUES; i++) { + priv->desc_ring[i].dataflags = 0; + if (priv->desc_ring[i].skb) { + dev_kfree_skb_any(priv->desc_ring[i].skb); + priv->desc_ring[i].skb = NULL; + } + } +} + +static void cpmac_hw_error(struct work_struct *work) +{ + struct cpmac_priv *priv = + container_of(work, struct cpmac_priv, reset_work); + + spin_lock(&priv->rx_lock); + cpmac_clear_rx(priv->dev); + spin_unlock(&priv->rx_lock); + cpmac_clear_tx(priv->dev); + cpmac_hw_start(priv->dev); + barrier(); + atomic_dec(&priv->reset_pending); + + netif_tx_wake_all_queues(priv->dev); + cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); +} + +static void cpmac_check_status(struct net_device *dev) +{ + struct cpmac_priv *priv = netdev_priv(dev); + + u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS); + int rx_channel = (macstatus >> 8) & 7; + int rx_code = (macstatus >> 12) & 15; + int tx_channel = (macstatus >> 16) & 7; + int tx_code = (macstatus >> 20) & 15; + + if (rx_code || tx_code) { + if (netif_msg_drv(priv) && net_ratelimit()) { + /* Can't find any documentation on what these + * error codes actually are. So just log them and hope.. + */ + if (rx_code) + netdev_warn(dev, "host error %d on rx " + "channel %d (macstatus %08x), resetting\n", + rx_code, rx_channel, macstatus); + if (tx_code) + netdev_warn(dev, "host error %d on tx " + "channel %d (macstatus %08x), resetting\n", + tx_code, tx_channel, macstatus); + } + + netif_tx_stop_all_queues(dev); + cpmac_hw_stop(dev); + if (schedule_work(&priv->reset_work)) + atomic_inc(&priv->reset_pending); + if (unlikely(netif_msg_hw(priv))) + cpmac_dump_regs(dev); + } + cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); +} + +static irqreturn_t cpmac_irq(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct cpmac_priv *priv; + int queue; + u32 status; + + priv = netdev_priv(dev); + + status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR); + + if (unlikely(netif_msg_intr(priv))) + netdev_dbg(dev, "interrupt status: 0x%08x\n", status); + + if (status & MAC_INT_TX) + cpmac_end_xmit(dev, (status & 7)); + + if (status & MAC_INT_RX) { + queue = (status >> 8) & 7; + if (napi_schedule_prep(&priv->napi)) { + cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); + __napi_schedule(&priv->napi); + } + } + + cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); + + if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) + cpmac_check_status(dev); + + return IRQ_HANDLED; +} + +static void cpmac_tx_timeout(struct net_device *dev) +{ + struct cpmac_priv *priv = netdev_priv(dev); + + spin_lock(&priv->lock); + dev->stats.tx_errors++; + spin_unlock(&priv->lock); + if (netif_msg_tx_err(priv) && net_ratelimit()) + netdev_warn(dev, "transmit timeout\n"); + + atomic_inc(&priv->reset_pending); + barrier(); + cpmac_clear_tx(dev); + barrier(); + atomic_dec(&priv->reset_pending); + + netif_tx_wake_all_queues(priv->dev); +} + +static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct cpmac_priv *priv = netdev_priv(dev); + + if (!(netif_running(dev))) + return -EINVAL; + if (!priv->phy) + return -EINVAL; + + return phy_mii_ioctl(priv->phy, ifr, cmd); +} + +static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct cpmac_priv *priv = netdev_priv(dev); + + if (priv->phy) + return phy_ethtool_gset(priv->phy, cmd); + + return -EINVAL; +} + +static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct cpmac_priv *priv = netdev_priv(dev); + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (priv->phy) + return phy_ethtool_sset(priv->phy, cmd); + + return -EINVAL; +} + +static void cpmac_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct cpmac_priv *priv = netdev_priv(dev); + + ring->rx_max_pending = 1024; + ring->rx_mini_max_pending = 1; + ring->rx_jumbo_max_pending = 1; + ring->tx_max_pending = 1; + + ring->rx_pending = priv->ring_size; + ring->rx_mini_pending = 1; + ring->rx_jumbo_pending = 1; + ring->tx_pending = 1; +} + +static int cpmac_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct cpmac_priv *priv = netdev_priv(dev); + + if (netif_running(dev)) + return -EBUSY; + priv->ring_size = ring->rx_pending; + + return 0; +} + +static void cpmac_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, "cpmac", sizeof(info->driver)); + strlcpy(info->version, CPMAC_VERSION, sizeof(info->version)); + snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac"); + info->regdump_len = 0; +} + +static const struct ethtool_ops cpmac_ethtool_ops = { + .get_settings = cpmac_get_settings, + .set_settings = cpmac_set_settings, + .get_drvinfo = cpmac_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ringparam = cpmac_get_ringparam, + .set_ringparam = cpmac_set_ringparam, +}; + +static void cpmac_adjust_link(struct net_device *dev) +{ + struct cpmac_priv *priv = netdev_priv(dev); + int new_state = 0; + + spin_lock(&priv->lock); + if (priv->phy->link) { + netif_tx_start_all_queues(dev); + if (priv->phy->duplex != priv->oldduplex) { + new_state = 1; + priv->oldduplex = priv->phy->duplex; + } + + if (priv->phy->speed != priv->oldspeed) { + new_state = 1; + priv->oldspeed = priv->phy->speed; + } + + if (!priv->oldlink) { + new_state = 1; + priv->oldlink = 1; + } + } else if (priv->oldlink) { + new_state = 1; + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + } + + if (new_state && netif_msg_link(priv) && net_ratelimit()) + phy_print_status(priv->phy); + + spin_unlock(&priv->lock); +} + +static int cpmac_open(struct net_device *dev) +{ + int i, size, res; + struct cpmac_priv *priv = netdev_priv(dev); + struct resource *mem; + struct cpmac_desc *desc; + struct sk_buff *skb; + + mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); + if (!request_mem_region(mem->start, resource_size(mem), dev->name)) { + if (netif_msg_drv(priv)) + netdev_err(dev, "failed to request registers\n"); + + res = -ENXIO; + goto fail_reserve; + } + + priv->regs = ioremap(mem->start, resource_size(mem)); + if (!priv->regs) { + if (netif_msg_drv(priv)) + netdev_err(dev, "failed to remap registers\n"); + + res = -ENXIO; + goto fail_remap; + } + + size = priv->ring_size + CPMAC_QUEUES; + priv->desc_ring = dma_alloc_coherent(&dev->dev, + sizeof(struct cpmac_desc) * size, + &priv->dma_ring, + GFP_KERNEL); + if (!priv->desc_ring) { + res = -ENOMEM; + goto fail_alloc; + } + + for (i = 0; i < size; i++) + priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i; + + priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; + for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) { + skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE); + if (unlikely(!skb)) { + res = -ENOMEM; + goto fail_desc; + } + desc->skb = skb; + desc->data_mapping = dma_map_single(&dev->dev, skb->data, + CPMAC_SKB_SIZE, + DMA_FROM_DEVICE); + desc->hw_data = (u32)desc->data_mapping; + desc->buflen = CPMAC_SKB_SIZE; + desc->dataflags = CPMAC_OWN; + desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; + desc->next->prev = desc; + desc->hw_next = (u32)desc->next->mapping; + } + + priv->rx_head->prev->hw_next = (u32)0; + + res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev); + if (res) { + if (netif_msg_drv(priv)) + netdev_err(dev, "failed to obtain irq\n"); + + goto fail_irq; + } + + atomic_set(&priv->reset_pending, 0); + INIT_WORK(&priv->reset_work, cpmac_hw_error); + cpmac_hw_start(dev); + + napi_enable(&priv->napi); + priv->phy->state = PHY_CHANGELINK; + phy_start(priv->phy); + + return 0; + +fail_irq: +fail_desc: + for (i = 0; i < priv->ring_size; i++) { + if (priv->rx_head[i].skb) { + dma_unmap_single(&dev->dev, + priv->rx_head[i].data_mapping, + CPMAC_SKB_SIZE, + DMA_FROM_DEVICE); + kfree_skb(priv->rx_head[i].skb); + } + } +fail_alloc: + kfree(priv->desc_ring); + iounmap(priv->regs); + +fail_remap: + release_mem_region(mem->start, resource_size(mem)); + +fail_reserve: + return res; +} + +static int cpmac_stop(struct net_device *dev) +{ + int i; + struct cpmac_priv *priv = netdev_priv(dev); + struct resource *mem; + + netif_tx_stop_all_queues(dev); + + cancel_work_sync(&priv->reset_work); + napi_disable(&priv->napi); + phy_stop(priv->phy); + + cpmac_hw_stop(dev); + + for (i = 0; i < 8; i++) + cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); + cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0); + cpmac_write(priv->regs, CPMAC_MBP, 0); + + free_irq(dev->irq, dev); + iounmap(priv->regs); + mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); + release_mem_region(mem->start, resource_size(mem)); + priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; + for (i = 0; i < priv->ring_size; i++) { + if (priv->rx_head[i].skb) { + dma_unmap_single(&dev->dev, + priv->rx_head[i].data_mapping, + CPMAC_SKB_SIZE, + DMA_FROM_DEVICE); + kfree_skb(priv->rx_head[i].skb); + } + } + + dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * + (CPMAC_QUEUES + priv->ring_size), + priv->desc_ring, priv->dma_ring); + + return 0; +} + +static const struct net_device_ops cpmac_netdev_ops = { + .ndo_open = cpmac_open, + .ndo_stop = cpmac_stop, + .ndo_start_xmit = cpmac_start_xmit, + .ndo_tx_timeout = cpmac_tx_timeout, + .ndo_set_rx_mode = cpmac_set_multicast_list, + .ndo_do_ioctl = cpmac_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +}; + +static int external_switch; + +static int cpmac_probe(struct platform_device *pdev) +{ + int rc, phy_id; + char mdio_bus_id[MII_BUS_ID_SIZE]; + struct resource *mem; + struct cpmac_priv *priv; + struct net_device *dev; + struct plat_cpmac_data *pdata; + + pdata = dev_get_platdata(&pdev->dev); + + if (external_switch || dumb_switch) { + strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */ + phy_id = pdev->id; + } else { + for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) { + if (!(pdata->phy_mask & (1 << phy_id))) + continue; + if (!cpmac_mii->phy_map[phy_id]) + continue; + strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE); + break; + } + } + + if (phy_id == PHY_MAX_ADDR) { + dev_err(&pdev->dev, "no PHY present, falling back " + "to switch on MDIO bus 0\n"); + strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */ + phy_id = pdev->id; + } + mdio_bus_id[sizeof(mdio_bus_id) - 1] = '\0'; + + dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES); + if (!dev) + return -ENOMEM; + + platform_set_drvdata(pdev, dev); + priv = netdev_priv(dev); + + priv->pdev = pdev; + mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); + if (!mem) { + rc = -ENODEV; + goto out; + } + + dev->irq = platform_get_irq_byname(pdev, "irq"); + + dev->netdev_ops = &cpmac_netdev_ops; + dev->ethtool_ops = &cpmac_ethtool_ops; + + netif_napi_add(dev, &priv->napi, cpmac_poll, 64); + + spin_lock_init(&priv->lock); + spin_lock_init(&priv->rx_lock); + priv->dev = dev; + priv->ring_size = 64; + priv->msg_enable = netif_msg_init(debug_level, 0xff); + memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr)); + + snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, + mdio_bus_id, phy_id); + + priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, + PHY_INTERFACE_MODE_MII); + + if (IS_ERR(priv->phy)) { + if (netif_msg_drv(priv)) + dev_err(&pdev->dev, "Could not attach to PHY\n"); + + rc = PTR_ERR(priv->phy); + goto out; + } + + rc = register_netdev(dev); + if (rc) { + dev_err(&pdev->dev, "Could not register net device\n"); + goto fail; + } + + if (netif_msg_probe(priv)) { + dev_info(&pdev->dev, "regs: %p, irq: %d, phy: %s, " + "mac: %pM\n", (void *)mem->start, dev->irq, + priv->phy_name, dev->dev_addr); + } + + return 0; + +fail: + free_netdev(dev); +out: + return rc; +} + +static int cpmac_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + + unregister_netdev(dev); + free_netdev(dev); + + return 0; +} + +static struct platform_driver cpmac_driver = { + .driver = { + .name = "cpmac", + }, + .probe = cpmac_probe, + .remove = cpmac_remove, +}; + +int cpmac_init(void) +{ + u32 mask; + int i, res; + + cpmac_mii = mdiobus_alloc(); + if (cpmac_mii == NULL) + return -ENOMEM; + + cpmac_mii->name = "cpmac-mii"; + cpmac_mii->read = cpmac_mdio_read; + cpmac_mii->write = cpmac_mdio_write; + cpmac_mii->reset = cpmac_mdio_reset; + cpmac_mii->irq = mii_irqs; + + cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256); + + if (!cpmac_mii->priv) { + pr_err("Can't ioremap mdio registers\n"); + res = -ENXIO; + goto fail_alloc; + } + +#warning FIXME: unhardcode gpio&reset bits + ar7_gpio_disable(26); + ar7_gpio_disable(27); + ar7_device_reset(AR7_RESET_BIT_CPMAC_LO); + ar7_device_reset(AR7_RESET_BIT_CPMAC_HI); + ar7_device_reset(AR7_RESET_BIT_EPHY); + + cpmac_mii->reset(cpmac_mii); + + for (i = 0; i < 300; i++) { + mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE); + if (mask) + break; + else + msleep(10); + } + + mask &= 0x7fffffff; + if (mask & (mask - 1)) { + external_switch = 1; + mask = 0; + } + + cpmac_mii->phy_mask = ~(mask | 0x80000000); + snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "cpmac-1"); + + res = mdiobus_register(cpmac_mii); + if (res) + goto fail_mii; + + res = platform_driver_register(&cpmac_driver); + if (res) + goto fail_cpmac; + + return 0; + +fail_cpmac: + mdiobus_unregister(cpmac_mii); + +fail_mii: + iounmap(cpmac_mii->priv); + +fail_alloc: + mdiobus_free(cpmac_mii); + + return res; +} + +void cpmac_exit(void) +{ + platform_driver_unregister(&cpmac_driver); + mdiobus_unregister(cpmac_mii); + iounmap(cpmac_mii->priv); + mdiobus_free(cpmac_mii); +} + +module_init(cpmac_init); +module_exit(cpmac_exit); diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c new file mode 100644 index 000000000..f59509486 --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw-common.c @@ -0,0 +1,55 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include "cpsw.h" + +#define AM33XX_CTRL_MAC_LO_REG(offset, id) ((offset) + 0x8 * (id)) +#define AM33XX_CTRL_MAC_HI_REG(offset, id) ((offset) + 0x8 * (id) + 0x4) + +int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave, + u8 *mac_addr) +{ + u32 macid_lo; + u32 macid_hi; + struct regmap *syscon; + + syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon"); + if (IS_ERR(syscon)) { + if (PTR_ERR(syscon) == -ENODEV) + return 0; + return PTR_ERR(syscon); + } + + regmap_read(syscon, AM33XX_CTRL_MAC_LO_REG(offset, slave), + &macid_lo); + regmap_read(syscon, AM33XX_CTRL_MAC_HI_REG(offset, slave), + &macid_hi); + + mac_addr[5] = (macid_lo >> 8) & 0xff; + mac_addr[4] = macid_lo & 0xff; + mac_addr[3] = (macid_hi >> 24) & 0xff; + mac_addr[2] = (macid_hi >> 16) & 0xff; + mac_addr[1] = (macid_hi >> 8) & 0xff; + mac_addr[0] = macid_hi & 0xff; + + return 0; +} +EXPORT_SYMBOL_GPL(cpsw_am33xx_cm_get_macid); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c new file mode 100644 index 000000000..0ea78326c --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -0,0 +1,220 @@ +/* Texas Instruments Ethernet Switch Driver + * + * Copyright (C) 2013 Texas Instruments + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include "cpsw.h" + +/* AM33xx SoC specific definitions for the CONTROL port */ +#define AM33XX_GMII_SEL_MODE_MII 0 +#define AM33XX_GMII_SEL_MODE_RMII 1 +#define AM33XX_GMII_SEL_MODE_RGMII 2 + +#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7) +#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6) + +#define GMII_SEL_MODE_MASK 0x3 + +struct cpsw_phy_sel_priv { + struct device *dev; + u32 __iomem *gmii_sel; + bool rmii_clock_external; + void (*cpsw_phy_sel)(struct cpsw_phy_sel_priv *priv, + phy_interface_t phy_mode, int slave); +}; + + +static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv, + phy_interface_t phy_mode, int slave) +{ + u32 reg; + u32 mask; + u32 mode = 0; + + reg = readl(priv->gmii_sel); + + switch (phy_mode) { + case PHY_INTERFACE_MODE_RMII: + mode = AM33XX_GMII_SEL_MODE_RMII; + break; + + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + mode = AM33XX_GMII_SEL_MODE_RGMII; + break; + + case PHY_INTERFACE_MODE_MII: + default: + mode = AM33XX_GMII_SEL_MODE_MII; + break; + }; + + mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6); + mode <<= slave * 2; + + if (priv->rmii_clock_external) { + if (slave == 0) + mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN; + else + mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN; + } + + reg &= ~mask; + reg |= mode; + + writel(reg, priv->gmii_sel); +} + +static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv, + phy_interface_t phy_mode, int slave) +{ + u32 reg; + u32 mask; + u32 mode = 0; + + reg = readl(priv->gmii_sel); + + switch (phy_mode) { + case PHY_INTERFACE_MODE_RMII: + mode = AM33XX_GMII_SEL_MODE_RMII; + break; + + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + mode = AM33XX_GMII_SEL_MODE_RGMII; + break; + + case PHY_INTERFACE_MODE_MII: + default: + mode = AM33XX_GMII_SEL_MODE_MII; + break; + }; + + switch (slave) { + case 0: + mask = GMII_SEL_MODE_MASK; + break; + case 1: + mask = GMII_SEL_MODE_MASK << 4; + mode <<= 4; + break; + default: + dev_err(priv->dev, "invalid slave number...\n"); + return; + } + + if (priv->rmii_clock_external) + dev_err(priv->dev, "RMII External clock is not supported\n"); + + reg &= ~mask; + reg |= mode; + + writel(reg, priv->gmii_sel); +} + +static struct platform_driver cpsw_phy_sel_driver; +static int match(struct device *dev, void *data) +{ + struct device_node *node = (struct device_node *)data; + return dev->of_node == node && + dev->driver == &cpsw_phy_sel_driver.driver; +} + +void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave) +{ + struct device_node *node; + struct cpsw_phy_sel_priv *priv; + + node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel"); + if (!node) { + dev_err(dev, "Phy mode driver DT not found\n"); + return; + } + + dev = bus_find_device(&platform_bus_type, NULL, node, match); + priv = dev_get_drvdata(dev); + + priv->cpsw_phy_sel(priv, phy_mode, slave); +} +EXPORT_SYMBOL_GPL(cpsw_phy_sel); + +static const struct of_device_id cpsw_phy_sel_id_table[] = { + { + .compatible = "ti,am3352-cpsw-phy-sel", + .data = &cpsw_gmii_sel_am3352, + }, + { + .compatible = "ti,dra7xx-cpsw-phy-sel", + .data = &cpsw_gmii_sel_dra7xx, + }, + { + .compatible = "ti,am43xx-cpsw-phy-sel", + .data = &cpsw_gmii_sel_am3352, + }, + {} +}; +MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table); + +static int cpsw_phy_sel_probe(struct platform_device *pdev) +{ + struct resource *res; + const struct of_device_id *of_id; + struct cpsw_phy_sel_priv *priv; + + of_id = of_match_node(cpsw_phy_sel_id_table, pdev->dev.of_node); + if (!of_id) + return -EINVAL; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(&pdev->dev, "unable to alloc memory for cpsw phy sel\n"); + return -ENOMEM; + } + + priv->dev = &pdev->dev; + priv->cpsw_phy_sel = of_id->data; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel"); + priv->gmii_sel = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->gmii_sel)) + return PTR_ERR(priv->gmii_sel); + + if (of_find_property(pdev->dev.of_node, "rmii-clock-ext", NULL)) + priv->rmii_clock_external = true; + + dev_set_drvdata(&pdev->dev, priv); + + return 0; +} + +static struct platform_driver cpsw_phy_sel_driver = { + .probe = cpsw_phy_sel_probe, + .driver = { + .name = "cpsw-phy-sel", + .of_match_table = cpsw_phy_sel_id_table, + }, +}; + +module_platform_driver(cpsw_phy_sel_driver); +MODULE_AUTHOR("Mugunthan V N "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c new file mode 100644 index 000000000..b536b4c82 --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw.c @@ -0,0 +1,2557 @@ +/* + * Texas Instruments Ethernet Switch Driver + * + * Copyright (C) 2012 Texas Instruments + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "cpsw.h" +#include "cpsw_ale.h" +#include "cpts.h" +#include "davinci_cpdma.h" + +#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \ + NETIF_MSG_DRV | NETIF_MSG_LINK | \ + NETIF_MSG_IFUP | NETIF_MSG_INTR | \ + NETIF_MSG_PROBE | NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \ + NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \ + NETIF_MSG_RX_STATUS) + +#define cpsw_info(priv, type, format, ...) \ +do { \ + if (netif_msg_##type(priv) && net_ratelimit()) \ + dev_info(priv->dev, format, ## __VA_ARGS__); \ +} while (0) + +#define cpsw_err(priv, type, format, ...) \ +do { \ + if (netif_msg_##type(priv) && net_ratelimit()) \ + dev_err(priv->dev, format, ## __VA_ARGS__); \ +} while (0) + +#define cpsw_dbg(priv, type, format, ...) \ +do { \ + if (netif_msg_##type(priv) && net_ratelimit()) \ + dev_dbg(priv->dev, format, ## __VA_ARGS__); \ +} while (0) + +#define cpsw_notice(priv, type, format, ...) \ +do { \ + if (netif_msg_##type(priv) && net_ratelimit()) \ + dev_notice(priv->dev, format, ## __VA_ARGS__); \ +} while (0) + +#define ALE_ALL_PORTS 0x7 + +#define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7) +#define CPSW_MINOR_VERSION(reg) (reg & 0xff) +#define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f) + +#define CPSW_VERSION_1 0x19010a +#define CPSW_VERSION_2 0x19010c +#define CPSW_VERSION_3 0x19010f +#define CPSW_VERSION_4 0x190112 + +#define HOST_PORT_NUM 0 +#define SLIVER_SIZE 0x40 + +#define CPSW1_HOST_PORT_OFFSET 0x028 +#define CPSW1_SLAVE_OFFSET 0x050 +#define CPSW1_SLAVE_SIZE 0x040 +#define CPSW1_CPDMA_OFFSET 0x100 +#define CPSW1_STATERAM_OFFSET 0x200 +#define CPSW1_HW_STATS 0x400 +#define CPSW1_CPTS_OFFSET 0x500 +#define CPSW1_ALE_OFFSET 0x600 +#define CPSW1_SLIVER_OFFSET 0x700 + +#define CPSW2_HOST_PORT_OFFSET 0x108 +#define CPSW2_SLAVE_OFFSET 0x200 +#define CPSW2_SLAVE_SIZE 0x100 +#define CPSW2_CPDMA_OFFSET 0x800 +#define CPSW2_HW_STATS 0x900 +#define CPSW2_STATERAM_OFFSET 0xa00 +#define CPSW2_CPTS_OFFSET 0xc00 +#define CPSW2_ALE_OFFSET 0xd00 +#define CPSW2_SLIVER_OFFSET 0xd80 +#define CPSW2_BD_OFFSET 0x2000 + +#define CPDMA_RXTHRESH 0x0c0 +#define CPDMA_RXFREE 0x0e0 +#define CPDMA_TXHDP 0x00 +#define CPDMA_RXHDP 0x20 +#define CPDMA_TXCP 0x40 +#define CPDMA_RXCP 0x60 + +#define CPSW_POLL_WEIGHT 64 +#define CPSW_MIN_PACKET_SIZE 60 +#define CPSW_MAX_PACKET_SIZE (1500 + 14 + 4 + 4) + +#define RX_PRIORITY_MAPPING 0x76543210 +#define TX_PRIORITY_MAPPING 0x33221100 +#define CPDMA_TX_PRIORITY_MAP 0x76543210 + +#define CPSW_VLAN_AWARE BIT(1) +#define CPSW_ALE_VLAN_AWARE 1 + +#define CPSW_FIFO_NORMAL_MODE (0 << 16) +#define CPSW_FIFO_DUAL_MAC_MODE (1 << 16) +#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 16) + +#define CPSW_INTPACEEN (0x3f << 16) +#define CPSW_INTPRESCALE_MASK (0x7FF << 0) +#define CPSW_CMINTMAX_CNT 63 +#define CPSW_CMINTMIN_CNT 2 +#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT) +#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1) + +#define cpsw_enable_irq(priv) \ + do { \ + u32 i; \ + for (i = 0; i < priv->num_irqs; i++) \ + enable_irq(priv->irqs_table[i]); \ + } while (0) +#define cpsw_disable_irq(priv) \ + do { \ + u32 i; \ + for (i = 0; i < priv->num_irqs; i++) \ + disable_irq_nosync(priv->irqs_table[i]); \ + } while (0) + +#define cpsw_slave_index(priv) \ + ((priv->data.dual_emac) ? priv->emac_port : \ + priv->data.active_slave) + +static int debug_level; +module_param(debug_level, int, 0); +MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)"); + +static int ale_ageout = 10; +module_param(ale_ageout, int, 0); +MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)"); + +static int rx_packet_max = CPSW_MAX_PACKET_SIZE; +module_param(rx_packet_max, int, 0); +MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)"); + +struct cpsw_wr_regs { + u32 id_ver; + u32 soft_reset; + u32 control; + u32 int_control; + u32 rx_thresh_en; + u32 rx_en; + u32 tx_en; + u32 misc_en; + u32 mem_allign1[8]; + u32 rx_thresh_stat; + u32 rx_stat; + u32 tx_stat; + u32 misc_stat; + u32 mem_allign2[8]; + u32 rx_imax; + u32 tx_imax; + +}; + +struct cpsw_ss_regs { + u32 id_ver; + u32 control; + u32 soft_reset; + u32 stat_port_en; + u32 ptype; + u32 soft_idle; + u32 thru_rate; + u32 gap_thresh; + u32 tx_start_wds; + u32 flow_control; + u32 vlan_ltype; + u32 ts_ltype; + u32 dlr_ltype; +}; + +/* CPSW_PORT_V1 */ +#define CPSW1_MAX_BLKS 0x00 /* Maximum FIFO Blocks */ +#define CPSW1_BLK_CNT 0x04 /* FIFO Block Usage Count (Read Only) */ +#define CPSW1_TX_IN_CTL 0x08 /* Transmit FIFO Control */ +#define CPSW1_PORT_VLAN 0x0c /* VLAN Register */ +#define CPSW1_TX_PRI_MAP 0x10 /* Tx Header Priority to Switch Pri Mapping */ +#define CPSW1_TS_CTL 0x14 /* Time Sync Control */ +#define CPSW1_TS_SEQ_LTYPE 0x18 /* Time Sync Sequence ID Offset and Msg Type */ +#define CPSW1_TS_VLAN 0x1c /* Time Sync VLAN1 and VLAN2 */ + +/* CPSW_PORT_V2 */ +#define CPSW2_CONTROL 0x00 /* Control Register */ +#define CPSW2_MAX_BLKS 0x08 /* Maximum FIFO Blocks */ +#define CPSW2_BLK_CNT 0x0c /* FIFO Block Usage Count (Read Only) */ +#define CPSW2_TX_IN_CTL 0x10 /* Transmit FIFO Control */ +#define CPSW2_PORT_VLAN 0x14 /* VLAN Register */ +#define CPSW2_TX_PRI_MAP 0x18 /* Tx Header Priority to Switch Pri Mapping */ +#define CPSW2_TS_SEQ_MTYPE 0x1c /* Time Sync Sequence ID Offset and Msg Type */ + +/* CPSW_PORT_V1 and V2 */ +#define SA_LO 0x20 /* CPGMAC_SL Source Address Low */ +#define SA_HI 0x24 /* CPGMAC_SL Source Address High */ +#define SEND_PERCENT 0x28 /* Transmit Queue Send Percentages */ + +/* CPSW_PORT_V2 only */ +#define RX_DSCP_PRI_MAP0 0x30 /* Rx DSCP Priority to Rx Packet Mapping */ +#define RX_DSCP_PRI_MAP1 0x34 /* Rx DSCP Priority to Rx Packet Mapping */ +#define RX_DSCP_PRI_MAP2 0x38 /* Rx DSCP Priority to Rx Packet Mapping */ +#define RX_DSCP_PRI_MAP3 0x3c /* Rx DSCP Priority to Rx Packet Mapping */ +#define RX_DSCP_PRI_MAP4 0x40 /* Rx DSCP Priority to Rx Packet Mapping */ +#define RX_DSCP_PRI_MAP5 0x44 /* Rx DSCP Priority to Rx Packet Mapping */ +#define RX_DSCP_PRI_MAP6 0x48 /* Rx DSCP Priority to Rx Packet Mapping */ +#define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */ + +/* Bit definitions for the CPSW2_CONTROL register */ +#define PASS_PRI_TAGGED (1<<24) /* Pass Priority Tagged */ +#define VLAN_LTYPE2_EN (1<<21) /* VLAN LTYPE 2 enable */ +#define VLAN_LTYPE1_EN (1<<20) /* VLAN LTYPE 1 enable */ +#define DSCP_PRI_EN (1<<16) /* DSCP Priority Enable */ +#define TS_320 (1<<14) /* Time Sync Dest Port 320 enable */ +#define TS_319 (1<<13) /* Time Sync Dest Port 319 enable */ +#define TS_132 (1<<12) /* Time Sync Dest IP Addr 132 enable */ +#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */ +#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */ +#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */ +#define TS_TTL_NONZERO (1<<8) /* Time Sync Time To Live Non-zero enable */ +#define TS_ANNEX_F_EN (1<<6) /* Time Sync Annex F enable */ +#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */ +#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */ +#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */ +#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */ +#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */ + +#define CTRL_V2_TS_BITS \ + (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\ + TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN) + +#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN) +#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN) +#define CTRL_V2_RX_TS_BITS (CTRL_V2_TS_BITS | TS_RX_EN) + + +#define CTRL_V3_TS_BITS \ + (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\ + TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\ + TS_LTYPE1_EN) + +#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN) +#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN) +#define CTRL_V3_RX_TS_BITS (CTRL_V3_TS_BITS | TS_RX_EN) + +/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */ +#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */ +#define TS_SEQ_ID_OFFSET_MASK (0x3f) +#define TS_MSG_TYPE_EN_SHIFT (0) /* Time Sync Message Type Enable */ +#define TS_MSG_TYPE_EN_MASK (0xffff) + +/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */ +#define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3)) + +/* Bit definitions for the CPSW1_TS_CTL register */ +#define CPSW_V1_TS_RX_EN BIT(0) +#define CPSW_V1_TS_TX_EN BIT(4) +#define CPSW_V1_MSG_TYPE_OFS 16 + +/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */ +#define CPSW_V1_SEQ_ID_OFS_SHIFT 16 + +struct cpsw_host_regs { + u32 max_blks; + u32 blk_cnt; + u32 tx_in_ctl; + u32 port_vlan; + u32 tx_pri_map; + u32 cpdma_tx_pri_map; + u32 cpdma_rx_chan_map; +}; + +struct cpsw_sliver_regs { + u32 id_ver; + u32 mac_control; + u32 mac_status; + u32 soft_reset; + u32 rx_maxlen; + u32 __reserved_0; + u32 rx_pause; + u32 tx_pause; + u32 __reserved_1; + u32 rx_pri_map; +}; + +struct cpsw_hw_stats { + u32 rxgoodframes; + u32 rxbroadcastframes; + u32 rxmulticastframes; + u32 rxpauseframes; + u32 rxcrcerrors; + u32 rxaligncodeerrors; + u32 rxoversizedframes; + u32 rxjabberframes; + u32 rxundersizedframes; + u32 rxfragments; + u32 __pad_0[2]; + u32 rxoctets; + u32 txgoodframes; + u32 txbroadcastframes; + u32 txmulticastframes; + u32 txpauseframes; + u32 txdeferredframes; + u32 txcollisionframes; + u32 txsinglecollframes; + u32 txmultcollframes; + u32 txexcessivecollisions; + u32 txlatecollisions; + u32 txunderrun; + u32 txcarriersenseerrors; + u32 txoctets; + u32 octetframes64; + u32 octetframes65t127; + u32 octetframes128t255; + u32 octetframes256t511; + u32 octetframes512t1023; + u32 octetframes1024tup; + u32 netoctets; + u32 rxsofoverruns; + u32 rxmofoverruns; + u32 rxdmaoverruns; +}; + +struct cpsw_slave { + void __iomem *regs; + struct cpsw_sliver_regs __iomem *sliver; + int slave_num; + u32 mac_control; + struct cpsw_slave_data *data; + struct phy_device *phy; + struct net_device *ndev; + u32 port_vlan; + u32 open_stat; +}; + +static inline u32 slave_read(struct cpsw_slave *slave, u32 offset) +{ + return __raw_readl(slave->regs + offset); +} + +static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset) +{ + __raw_writel(val, slave->regs + offset); +} + +struct cpsw_priv { + spinlock_t lock; + struct platform_device *pdev; + struct net_device *ndev; + struct napi_struct napi; + struct device *dev; + struct cpsw_platform_data data; + struct cpsw_ss_regs __iomem *regs; + struct cpsw_wr_regs __iomem *wr_regs; + u8 __iomem *hw_stats; + struct cpsw_host_regs __iomem *host_port_regs; + u32 msg_enable; + u32 version; + u32 coal_intvl; + u32 bus_freq_mhz; + int rx_packet_max; + int host_port; + struct clk *clk; + u8 mac_addr[ETH_ALEN]; + struct cpsw_slave *slaves; + struct cpdma_ctlr *dma; + struct cpdma_chan *txch, *rxch; + struct cpsw_ale *ale; + bool rx_pause; + bool tx_pause; + /* snapshot of IRQ numbers */ + u32 irqs_table[4]; + u32 num_irqs; + bool irq_enabled; + struct cpts *cpts; + u32 emac_port; +}; + +struct cpsw_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +enum { + CPSW_STATS, + CPDMA_RX_STATS, + CPDMA_TX_STATS, +}; + +#define CPSW_STAT(m) CPSW_STATS, \ + sizeof(((struct cpsw_hw_stats *)0)->m), \ + offsetof(struct cpsw_hw_stats, m) +#define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \ + sizeof(((struct cpdma_chan_stats *)0)->m), \ + offsetof(struct cpdma_chan_stats, m) +#define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \ + sizeof(((struct cpdma_chan_stats *)0)->m), \ + offsetof(struct cpdma_chan_stats, m) + +static const struct cpsw_stats cpsw_gstrings_stats[] = { + { "Good Rx Frames", CPSW_STAT(rxgoodframes) }, + { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) }, + { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) }, + { "Pause Rx Frames", CPSW_STAT(rxpauseframes) }, + { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) }, + { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) }, + { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) }, + { "Rx Jabbers", CPSW_STAT(rxjabberframes) }, + { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) }, + { "Rx Fragments", CPSW_STAT(rxfragments) }, + { "Rx Octets", CPSW_STAT(rxoctets) }, + { "Good Tx Frames", CPSW_STAT(txgoodframes) }, + { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) }, + { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) }, + { "Pause Tx Frames", CPSW_STAT(txpauseframes) }, + { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) }, + { "Collisions", CPSW_STAT(txcollisionframes) }, + { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) }, + { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) }, + { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) }, + { "Late Collisions", CPSW_STAT(txlatecollisions) }, + { "Tx Underrun", CPSW_STAT(txunderrun) }, + { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) }, + { "Tx Octets", CPSW_STAT(txoctets) }, + { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) }, + { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) }, + { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) }, + { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) }, + { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) }, + { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) }, + { "Net Octets", CPSW_STAT(netoctets) }, + { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) }, + { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) }, + { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) }, + { "Rx DMA chan: head_enqueue", CPDMA_RX_STAT(head_enqueue) }, + { "Rx DMA chan: tail_enqueue", CPDMA_RX_STAT(tail_enqueue) }, + { "Rx DMA chan: pad_enqueue", CPDMA_RX_STAT(pad_enqueue) }, + { "Rx DMA chan: misqueued", CPDMA_RX_STAT(misqueued) }, + { "Rx DMA chan: desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) }, + { "Rx DMA chan: pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) }, + { "Rx DMA chan: runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) }, + { "Rx DMA chan: runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) }, + { "Rx DMA chan: empty_dequeue", CPDMA_RX_STAT(empty_dequeue) }, + { "Rx DMA chan: busy_dequeue", CPDMA_RX_STAT(busy_dequeue) }, + { "Rx DMA chan: good_dequeue", CPDMA_RX_STAT(good_dequeue) }, + { "Rx DMA chan: requeue", CPDMA_RX_STAT(requeue) }, + { "Rx DMA chan: teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) }, + { "Tx DMA chan: head_enqueue", CPDMA_TX_STAT(head_enqueue) }, + { "Tx DMA chan: tail_enqueue", CPDMA_TX_STAT(tail_enqueue) }, + { "Tx DMA chan: pad_enqueue", CPDMA_TX_STAT(pad_enqueue) }, + { "Tx DMA chan: misqueued", CPDMA_TX_STAT(misqueued) }, + { "Tx DMA chan: desc_alloc_fail", CPDMA_TX_STAT(desc_alloc_fail) }, + { "Tx DMA chan: pad_alloc_fail", CPDMA_TX_STAT(pad_alloc_fail) }, + { "Tx DMA chan: runt_receive_buf", CPDMA_TX_STAT(runt_receive_buff) }, + { "Tx DMA chan: runt_transmit_buf", CPDMA_TX_STAT(runt_transmit_buff) }, + { "Tx DMA chan: empty_dequeue", CPDMA_TX_STAT(empty_dequeue) }, + { "Tx DMA chan: busy_dequeue", CPDMA_TX_STAT(busy_dequeue) }, + { "Tx DMA chan: good_dequeue", CPDMA_TX_STAT(good_dequeue) }, + { "Tx DMA chan: requeue", CPDMA_TX_STAT(requeue) }, + { "Tx DMA chan: teardown_dequeue", CPDMA_TX_STAT(teardown_dequeue) }, +}; + +#define CPSW_STATS_LEN ARRAY_SIZE(cpsw_gstrings_stats) + +#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi) +#define for_each_slave(priv, func, arg...) \ + do { \ + struct cpsw_slave *slave; \ + int n; \ + if (priv->data.dual_emac) \ + (func)((priv)->slaves + priv->emac_port, ##arg);\ + else \ + for (n = (priv)->data.slaves, \ + slave = (priv)->slaves; \ + n; n--) \ + (func)(slave++, ##arg); \ + } while (0) +#define cpsw_get_slave_ndev(priv, __slave_no__) \ + (priv->slaves[__slave_no__].ndev) +#define cpsw_get_slave_priv(priv, __slave_no__) \ + ((priv->slaves[__slave_no__].ndev) ? \ + netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \ + +#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \ + do { \ + if (!priv->data.dual_emac) \ + break; \ + if (CPDMA_RX_SOURCE_PORT(status) == 1) { \ + ndev = cpsw_get_slave_ndev(priv, 0); \ + priv = netdev_priv(ndev); \ + skb->dev = ndev; \ + } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \ + ndev = cpsw_get_slave_ndev(priv, 1); \ + priv = netdev_priv(ndev); \ + skb->dev = ndev; \ + } \ + } while (0) +#define cpsw_add_mcast(priv, addr) \ + do { \ + if (priv->data.dual_emac) { \ + struct cpsw_slave *slave = priv->slaves + \ + priv->emac_port; \ + int slave_port = cpsw_get_slave_port(priv, \ + slave->slave_num); \ + cpsw_ale_add_mcast(priv->ale, addr, \ + 1 << slave_port | 1 << priv->host_port, \ + ALE_VLAN, slave->port_vlan, 0); \ + } else { \ + cpsw_ale_add_mcast(priv->ale, addr, \ + ALE_ALL_PORTS << priv->host_port, \ + 0, 0, 0); \ + } \ + } while (0) + +static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num) +{ + if (priv->host_port == 0) + return slave_num + 1; + else + return slave_num; +} + +static void cpsw_set_promiscious(struct net_device *ndev, bool enable) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_ale *ale = priv->ale; + int i; + + if (priv->data.dual_emac) { + bool flag = false; + + /* Enabling promiscuous mode for one interface will be + * common for both the interface as the interface shares + * the same hardware resource. + */ + for (i = 0; i < priv->data.slaves; i++) + if (priv->slaves[i].ndev->flags & IFF_PROMISC) + flag = true; + + if (!enable && flag) { + enable = true; + dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n"); + } + + if (enable) { + /* Enable Bypass */ + cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1); + + dev_dbg(&ndev->dev, "promiscuity enabled\n"); + } else { + /* Disable Bypass */ + cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0); + dev_dbg(&ndev->dev, "promiscuity disabled\n"); + } + } else { + if (enable) { + unsigned long timeout = jiffies + HZ; + + /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */ + for (i = 0; i <= priv->data.slaves; i++) { + cpsw_ale_control_set(ale, i, + ALE_PORT_NOLEARN, 1); + cpsw_ale_control_set(ale, i, + ALE_PORT_NO_SA_UPDATE, 1); + } + + /* Clear All Untouched entries */ + cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1); + do { + cpu_relax(); + if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) + break; + } while (time_after(timeout, jiffies)); + cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1); + + /* Clear all mcast from ALE */ + cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS << + priv->host_port, -1); + + /* Flood All Unicast Packets to Host port */ + cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); + dev_dbg(&ndev->dev, "promiscuity enabled\n"); + } else { + /* Don't Flood All Unicast Packets to Host port */ + cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); + + /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */ + for (i = 0; i <= priv->data.slaves; i++) { + cpsw_ale_control_set(ale, i, + ALE_PORT_NOLEARN, 0); + cpsw_ale_control_set(ale, i, + ALE_PORT_NO_SA_UPDATE, 0); + } + dev_dbg(&ndev->dev, "promiscuity disabled\n"); + } + } +} + +static void cpsw_ndo_set_rx_mode(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int vid; + + if (priv->data.dual_emac) + vid = priv->slaves[priv->emac_port].port_vlan; + else + vid = priv->data.default_vlan; + + if (ndev->flags & IFF_PROMISC) { + /* Enable promiscuous mode */ + cpsw_set_promiscious(ndev, true); + cpsw_ale_set_allmulti(priv->ale, IFF_ALLMULTI); + return; + } else { + /* Disable promiscuous mode */ + cpsw_set_promiscious(ndev, false); + } + + /* Restore allmulti on vlans if necessary */ + cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI); + + /* Clear all mcast from ALE */ + cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port, + vid); + + if (!netdev_mc_empty(ndev)) { + struct netdev_hw_addr *ha; + + /* program multicast address list into ALE register */ + netdev_for_each_mc_addr(ha, ndev) { + cpsw_add_mcast(priv, (u8 *)ha->addr); + } + } +} + +static void cpsw_intr_enable(struct cpsw_priv *priv) +{ + __raw_writel(0xFF, &priv->wr_regs->tx_en); + __raw_writel(0xFF, &priv->wr_regs->rx_en); + + cpdma_ctlr_int_ctrl(priv->dma, true); + return; +} + +static void cpsw_intr_disable(struct cpsw_priv *priv) +{ + __raw_writel(0, &priv->wr_regs->tx_en); + __raw_writel(0, &priv->wr_regs->rx_en); + + cpdma_ctlr_int_ctrl(priv->dma, false); + return; +} + +static void cpsw_tx_handler(void *token, int len, int status) +{ + struct sk_buff *skb = token; + struct net_device *ndev = skb->dev; + struct cpsw_priv *priv = netdev_priv(ndev); + + /* Check whether the queue is stopped due to stalled tx dma, if the + * queue is stopped then start the queue as we have free desc for tx + */ + if (unlikely(netif_queue_stopped(ndev))) + netif_wake_queue(ndev); + cpts_tx_timestamp(priv->cpts, skb); + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += len; + dev_kfree_skb_any(skb); +} + +static void cpsw_rx_handler(void *token, int len, int status) +{ + struct sk_buff *skb = token; + struct sk_buff *new_skb; + struct net_device *ndev = skb->dev; + struct cpsw_priv *priv = netdev_priv(ndev); + int ret = 0; + + cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); + + if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { + bool ndev_status = false; + struct cpsw_slave *slave = priv->slaves; + int n; + + if (priv->data.dual_emac) { + /* In dual emac mode check for all interfaces */ + for (n = priv->data.slaves; n; n--, slave++) + if (netif_running(slave->ndev)) + ndev_status = true; + } + + if (ndev_status && (status >= 0)) { + /* The packet received is for the interface which + * is already down and the other interface is up + * and running, instead of freeing which results + * in reducing of the number of rx descriptor in + * DMA engine, requeue skb back to cpdma. + */ + new_skb = skb; + goto requeue; + } + + /* the interface is going down, skbs are purged */ + dev_kfree_skb_any(skb); + return; + } + + new_skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max); + if (new_skb) { + skb_put(skb, len); + cpts_rx_timestamp(priv->cpts, skb); + skb->protocol = eth_type_trans(skb, ndev); + netif_receive_skb(skb); + ndev->stats.rx_bytes += len; + ndev->stats.rx_packets++; + } else { + ndev->stats.rx_dropped++; + new_skb = skb; + } + +requeue: + ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data, + skb_tailroom(new_skb), 0); + if (WARN_ON(ret < 0)) + dev_kfree_skb_any(new_skb); +} + +static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id) +{ + struct cpsw_priv *priv = dev_id; + + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); + cpdma_chan_process(priv->txch, 128); + + priv = cpsw_get_slave_priv(priv, 1); + if (priv) + cpdma_chan_process(priv->txch, 128); + + return IRQ_HANDLED; +} + +static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) +{ + struct cpsw_priv *priv = dev_id; + + cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); + + cpsw_intr_disable(priv); + if (priv->irq_enabled == true) { + cpsw_disable_irq(priv); + priv->irq_enabled = false; + } + + if (netif_running(priv->ndev)) { + napi_schedule(&priv->napi); + return IRQ_HANDLED; + } + + priv = cpsw_get_slave_priv(priv, 1); + if (!priv) + return IRQ_NONE; + + if (netif_running(priv->ndev)) { + napi_schedule(&priv->napi); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +static int cpsw_poll(struct napi_struct *napi, int budget) +{ + struct cpsw_priv *priv = napi_to_priv(napi); + int num_tx, num_rx; + + num_tx = cpdma_chan_process(priv->txch, 128); + + num_rx = cpdma_chan_process(priv->rxch, budget); + if (num_rx < budget) { + struct cpsw_priv *prim_cpsw; + + napi_complete(napi); + cpsw_intr_enable(priv); + prim_cpsw = cpsw_get_slave_priv(priv, 0); + if (prim_cpsw->irq_enabled == false) { + prim_cpsw->irq_enabled = true; + cpsw_enable_irq(priv); + } + } + + if (num_rx || num_tx) + cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", + num_rx, num_tx); + + return num_rx; +} + +static inline void soft_reset(const char *module, void __iomem *reg) +{ + unsigned long timeout = jiffies + HZ; + + __raw_writel(1, reg); + do { + cpu_relax(); + } while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies)); + + WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module); +} + +#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \ + ((mac)[2] << 16) | ((mac)[3] << 24)) +#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8)) + +static void cpsw_set_slave_mac(struct cpsw_slave *slave, + struct cpsw_priv *priv) +{ + slave_write(slave, mac_hi(priv->mac_addr), SA_HI); + slave_write(slave, mac_lo(priv->mac_addr), SA_LO); +} + +static void _cpsw_adjust_link(struct cpsw_slave *slave, + struct cpsw_priv *priv, bool *link) +{ + struct phy_device *phy = slave->phy; + u32 mac_control = 0; + u32 slave_port; + + if (!phy) + return; + + slave_port = cpsw_get_slave_port(priv, slave->slave_num); + + if (phy->link) { + mac_control = priv->data.mac_control; + + /* enable forwarding */ + cpsw_ale_control_set(priv->ale, slave_port, + ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); + + if (phy->speed == 1000) + mac_control |= BIT(7); /* GIGABITEN */ + if (phy->duplex) + mac_control |= BIT(0); /* FULLDUPLEXEN */ + + /* set speed_in input in case RMII mode is used in 100Mbps */ + if (phy->speed == 100) + mac_control |= BIT(15); + else if (phy->speed == 10) + mac_control |= BIT(18); /* In Band mode */ + + if (priv->rx_pause) + mac_control |= BIT(3); + + if (priv->tx_pause) + mac_control |= BIT(4); + + *link = true; + } else { + mac_control = 0; + /* disable forwarding */ + cpsw_ale_control_set(priv->ale, slave_port, + ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); + } + + if (mac_control != slave->mac_control) { + phy_print_status(phy); + __raw_writel(mac_control, &slave->sliver->mac_control); + } + + slave->mac_control = mac_control; +} + +static void cpsw_adjust_link(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + bool link = false; + + for_each_slave(priv, _cpsw_adjust_link, priv, &link); + + if (link) { + netif_carrier_on(ndev); + if (netif_running(ndev)) + netif_wake_queue(ndev); + } else { + netif_carrier_off(ndev); + netif_stop_queue(ndev); + } +} + +static int cpsw_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *coal) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + coal->rx_coalesce_usecs = priv->coal_intvl; + return 0; +} + +static int cpsw_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *coal) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + u32 int_ctrl; + u32 num_interrupts = 0; + u32 prescale = 0; + u32 addnl_dvdr = 1; + u32 coal_intvl = 0; + + coal_intvl = coal->rx_coalesce_usecs; + + int_ctrl = readl(&priv->wr_regs->int_control); + prescale = priv->bus_freq_mhz * 4; + + if (!coal->rx_coalesce_usecs) { + int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN); + goto update_return; + } + + if (coal_intvl < CPSW_CMINTMIN_INTVL) + coal_intvl = CPSW_CMINTMIN_INTVL; + + if (coal_intvl > CPSW_CMINTMAX_INTVL) { + /* Interrupt pacer works with 4us Pulse, we can + * throttle further by dilating the 4us pulse. + */ + addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale; + + if (addnl_dvdr > 1) { + prescale *= addnl_dvdr; + if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr)) + coal_intvl = (CPSW_CMINTMAX_INTVL + * addnl_dvdr); + } else { + addnl_dvdr = 1; + coal_intvl = CPSW_CMINTMAX_INTVL; + } + } + + num_interrupts = (1000 * addnl_dvdr) / coal_intvl; + writel(num_interrupts, &priv->wr_regs->rx_imax); + writel(num_interrupts, &priv->wr_regs->tx_imax); + + int_ctrl |= CPSW_INTPACEEN; + int_ctrl &= (~CPSW_INTPRESCALE_MASK); + int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK); + +update_return: + writel(int_ctrl, &priv->wr_regs->int_control); + + cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl); + if (priv->data.dual_emac) { + int i; + + for (i = 0; i < priv->data.slaves; i++) { + priv = netdev_priv(priv->slaves[i].ndev); + priv->coal_intvl = coal_intvl; + } + } else { + priv->coal_intvl = coal_intvl; + } + + return 0; +} + +static int cpsw_get_sset_count(struct net_device *ndev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return CPSW_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < CPSW_STATS_LEN; i++) { + memcpy(p, cpsw_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static void cpsw_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpdma_chan_stats rx_stats; + struct cpdma_chan_stats tx_stats; + u32 val; + u8 *p; + int i; + + /* Collect Davinci CPDMA stats for Rx and Tx Channel */ + cpdma_chan_get_stats(priv->rxch, &rx_stats); + cpdma_chan_get_stats(priv->txch, &tx_stats); + + for (i = 0; i < CPSW_STATS_LEN; i++) { + switch (cpsw_gstrings_stats[i].type) { + case CPSW_STATS: + val = readl(priv->hw_stats + + cpsw_gstrings_stats[i].stat_offset); + data[i] = val; + break; + + case CPDMA_RX_STATS: + p = (u8 *)&rx_stats + + cpsw_gstrings_stats[i].stat_offset; + data[i] = *(u32 *)p; + break; + + case CPDMA_TX_STATS: + p = (u8 *)&tx_stats + + cpsw_gstrings_stats[i].stat_offset; + data[i] = *(u32 *)p; + break; + } + } +} + +static int cpsw_common_res_usage_state(struct cpsw_priv *priv) +{ + u32 i; + u32 usage_count = 0; + + if (!priv->data.dual_emac) + return 0; + + for (i = 0; i < priv->data.slaves; i++) + if (priv->slaves[i].open_stat) + usage_count++; + + return usage_count; +} + +static inline int cpsw_tx_packet_submit(struct net_device *ndev, + struct cpsw_priv *priv, struct sk_buff *skb) +{ + if (!priv->data.dual_emac) + return cpdma_chan_submit(priv->txch, skb, skb->data, + skb->len, 0); + + if (ndev == cpsw_get_slave_ndev(priv, 0)) + return cpdma_chan_submit(priv->txch, skb, skb->data, + skb->len, 1); + else + return cpdma_chan_submit(priv->txch, skb, skb->data, + skb->len, 2); +} + +static inline void cpsw_add_dual_emac_def_ale_entries( + struct cpsw_priv *priv, struct cpsw_slave *slave, + u32 slave_port) +{ + u32 port_mask = 1 << slave_port | 1 << priv->host_port; + + if (priv->version == CPSW_VERSION_1) + slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN); + else + slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN); + cpsw_ale_add_vlan(priv->ale, slave->port_vlan, port_mask, + port_mask, port_mask, 0); + cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + port_mask, ALE_VLAN, slave->port_vlan, 0); + cpsw_ale_add_ucast(priv->ale, priv->mac_addr, + priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan); +} + +static void soft_reset_slave(struct cpsw_slave *slave) +{ + char name[32]; + + snprintf(name, sizeof(name), "slave-%d", slave->slave_num); + soft_reset(name, &slave->sliver->soft_reset); +} + +static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + u32 slave_port; + + soft_reset_slave(slave); + + /* setup priority mapping */ + __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map); + + switch (priv->version) { + case CPSW_VERSION_1: + slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); + break; + case CPSW_VERSION_2: + case CPSW_VERSION_3: + case CPSW_VERSION_4: + slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); + break; + } + + /* setup max packet size, and mac address */ + __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen); + cpsw_set_slave_mac(slave, priv); + + slave->mac_control = 0; /* no link yet */ + + slave_port = cpsw_get_slave_port(priv, slave->slave_num); + + if (priv->data.dual_emac) + cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port); + else + cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); + + slave->phy = phy_connect(priv->ndev, slave->data->phy_id, + &cpsw_adjust_link, slave->data->phy_if); + if (IS_ERR(slave->phy)) { + dev_err(priv->dev, "phy %s not found on slave %d\n", + slave->data->phy_id, slave->slave_num); + slave->phy = NULL; + } else { + dev_info(priv->dev, "phy found : id is : 0x%x\n", + slave->phy->phy_id); + phy_start(slave->phy); + + /* Configure GMII_SEL register */ + cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, + slave->slave_num); + } +} + +static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) +{ + const int vlan = priv->data.default_vlan; + const int port = priv->host_port; + u32 reg; + int i; + int unreg_mcast_mask; + + reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : + CPSW2_PORT_VLAN; + + writel(vlan, &priv->host_port_regs->port_vlan); + + for (i = 0; i < priv->data.slaves; i++) + slave_write(priv->slaves + i, vlan, reg); + + if (priv->ndev->flags & IFF_ALLMULTI) + unreg_mcast_mask = ALE_ALL_PORTS; + else + unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; + + cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port, + ALE_ALL_PORTS << port, ALE_ALL_PORTS << port, + unreg_mcast_mask << port); +} + +static void cpsw_init_host_port(struct cpsw_priv *priv) +{ + u32 control_reg; + u32 fifo_mode; + + /* soft reset the controller and initialize ale */ + soft_reset("cpsw", &priv->regs->soft_reset); + cpsw_ale_start(priv->ale); + + /* switch to vlan unaware mode */ + cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE, + CPSW_ALE_VLAN_AWARE); + control_reg = readl(&priv->regs->control); + control_reg |= CPSW_VLAN_AWARE; + writel(control_reg, &priv->regs->control); + fifo_mode = (priv->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE : + CPSW_FIFO_NORMAL_MODE; + writel(fifo_mode, &priv->host_port_regs->tx_in_ctl); + + /* setup host port priority mapping */ + __raw_writel(CPDMA_TX_PRIORITY_MAP, + &priv->host_port_regs->cpdma_tx_pri_map); + __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map); + + cpsw_ale_control_set(priv->ale, priv->host_port, + ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); + + if (!priv->data.dual_emac) { + cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, + 0, 0); + cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + 1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2); + } +} + +static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) +{ + u32 slave_port; + + slave_port = cpsw_get_slave_port(priv, slave->slave_num); + + if (!slave->phy) + return; + phy_stop(slave->phy); + phy_disconnect(slave->phy); + slave->phy = NULL; + cpsw_ale_control_set(priv->ale, slave_port, + ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); +} + +static int cpsw_ndo_open(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_priv *prim_cpsw; + int i, ret; + u32 reg; + + if (!cpsw_common_res_usage_state(priv)) + cpsw_intr_disable(priv); + netif_carrier_off(ndev); + + pm_runtime_get_sync(&priv->pdev->dev); + + reg = priv->version; + + dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n", + CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg), + CPSW_RTL_VERSION(reg)); + + /* initialize host and slave ports */ + if (!cpsw_common_res_usage_state(priv)) + cpsw_init_host_port(priv); + for_each_slave(priv, cpsw_slave_open, priv); + + /* Add default VLAN */ + if (!priv->data.dual_emac) + cpsw_add_default_vlan(priv); + else + cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan, + ALE_ALL_PORTS << priv->host_port, + ALE_ALL_PORTS << priv->host_port, 0, 0); + + if (!cpsw_common_res_usage_state(priv)) { + /* setup tx dma to fixed prio and zero offset */ + cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); + cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0); + + /* disable priority elevation */ + __raw_writel(0, &priv->regs->ptype); + + /* enable statistics collection only on all ports */ + __raw_writel(0x7, &priv->regs->stat_port_en); + + /* Enable internal fifo flow control */ + writel(0x7, &priv->regs->flow_control); + + if (WARN_ON(!priv->data.rx_descs)) + priv->data.rx_descs = 128; + + for (i = 0; i < priv->data.rx_descs; i++) { + struct sk_buff *skb; + + ret = -ENOMEM; + skb = __netdev_alloc_skb_ip_align(priv->ndev, + priv->rx_packet_max, GFP_KERNEL); + if (!skb) + goto err_cleanup; + ret = cpdma_chan_submit(priv->rxch, skb, skb->data, + skb_tailroom(skb), 0); + if (ret < 0) { + kfree_skb(skb); + goto err_cleanup; + } + } + /* continue even if we didn't manage to submit all + * receive descs + */ + cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); + + if (cpts_register(&priv->pdev->dev, priv->cpts, + priv->data.cpts_clock_mult, + priv->data.cpts_clock_shift)) + dev_err(priv->dev, "error registering cpts device\n"); + + } + + /* Enable Interrupt pacing if configured */ + if (priv->coal_intvl != 0) { + struct ethtool_coalesce coal; + + coal.rx_coalesce_usecs = (priv->coal_intvl << 4); + cpsw_set_coalesce(ndev, &coal); + } + + napi_enable(&priv->napi); + cpdma_ctlr_start(priv->dma); + cpsw_intr_enable(priv); + + prim_cpsw = cpsw_get_slave_priv(priv, 0); + if (prim_cpsw->irq_enabled == false) { + if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) { + prim_cpsw->irq_enabled = true; + cpsw_enable_irq(prim_cpsw); + } + } + + if (priv->data.dual_emac) + priv->slaves[priv->emac_port].open_stat = true; + return 0; + +err_cleanup: + cpdma_ctlr_stop(priv->dma); + for_each_slave(priv, cpsw_slave_stop, priv); + pm_runtime_put_sync(&priv->pdev->dev); + netif_carrier_off(priv->ndev); + return ret; +} + +static int cpsw_ndo_stop(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + cpsw_info(priv, ifdown, "shutting down cpsw device\n"); + netif_stop_queue(priv->ndev); + napi_disable(&priv->napi); + netif_carrier_off(priv->ndev); + + if (cpsw_common_res_usage_state(priv) <= 1) { + cpts_unregister(priv->cpts); + cpsw_intr_disable(priv); + cpdma_ctlr_int_ctrl(priv->dma, false); + cpdma_ctlr_stop(priv->dma); + cpsw_ale_stop(priv->ale); + } + for_each_slave(priv, cpsw_slave_stop, priv); + pm_runtime_put_sync(&priv->pdev->dev); + if (priv->data.dual_emac) + priv->slaves[priv->emac_port].open_stat = false; + return 0; +} + +static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int ret; + + ndev->trans_start = jiffies; + + if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { + cpsw_err(priv, tx_err, "packet pad failed\n"); + ndev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + priv->cpts->tx_enable) + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + skb_tx_timestamp(skb); + + ret = cpsw_tx_packet_submit(ndev, priv, skb); + if (unlikely(ret != 0)) { + cpsw_err(priv, tx_err, "desc submit failed\n"); + goto fail; + } + + /* If there is no more tx desc left free then we need to + * tell the kernel to stop sending us tx frames. + */ + if (unlikely(!cpdma_check_free_tx_desc(priv->txch))) + netif_stop_queue(ndev); + + return NETDEV_TX_OK; +fail: + ndev->stats.tx_dropped++; + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; +} + +#ifdef CONFIG_TI_CPTS + +static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) +{ + struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave]; + u32 ts_en, seq_id; + + if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) { + slave_write(slave, 0, CPSW1_TS_CTL); + return; + } + + seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588; + ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS; + + if (priv->cpts->tx_enable) + ts_en |= CPSW_V1_TS_TX_EN; + + if (priv->cpts->rx_enable) + ts_en |= CPSW_V1_TS_RX_EN; + + slave_write(slave, ts_en, CPSW1_TS_CTL); + slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE); +} + +static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) +{ + struct cpsw_slave *slave; + u32 ctrl, mtype; + + if (priv->data.dual_emac) + slave = &priv->slaves[priv->emac_port]; + else + slave = &priv->slaves[priv->data.active_slave]; + + ctrl = slave_read(slave, CPSW2_CONTROL); + switch (priv->version) { + case CPSW_VERSION_2: + ctrl &= ~CTRL_V2_ALL_TS_MASK; + + if (priv->cpts->tx_enable) + ctrl |= CTRL_V2_TX_TS_BITS; + + if (priv->cpts->rx_enable) + ctrl |= CTRL_V2_RX_TS_BITS; + break; + case CPSW_VERSION_3: + default: + ctrl &= ~CTRL_V3_ALL_TS_MASK; + + if (priv->cpts->tx_enable) + ctrl |= CTRL_V3_TX_TS_BITS; + + if (priv->cpts->rx_enable) + ctrl |= CTRL_V3_RX_TS_BITS; + break; + } + + mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS; + + slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE); + slave_write(slave, ctrl, CPSW2_CONTROL); + __raw_writel(ETH_P_1588, &priv->regs->ts_ltype); +} + +static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +{ + struct cpsw_priv *priv = netdev_priv(dev); + struct cpts *cpts = priv->cpts; + struct hwtstamp_config cfg; + + if (priv->version != CPSW_VERSION_1 && + priv->version != CPSW_VERSION_2 && + priv->version != CPSW_VERSION_3) + return -EOPNOTSUPP; + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + /* reserved for future extensions */ + if (cfg.flags) + return -EINVAL; + + if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) + return -ERANGE; + + switch (cfg.rx_filter) { + case HWTSTAMP_FILTER_NONE: + cpts->rx_enable = 0; + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + return -ERANGE; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + cpts->rx_enable = 1; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + default: + return -ERANGE; + } + + cpts->tx_enable = cfg.tx_type == HWTSTAMP_TX_ON; + + switch (priv->version) { + case CPSW_VERSION_1: + cpsw_hwtstamp_v1(priv); + break; + case CPSW_VERSION_2: + case CPSW_VERSION_3: + cpsw_hwtstamp_v2(priv); + break; + default: + WARN_ON(1); + } + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} + +static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +{ + struct cpsw_priv *priv = netdev_priv(dev); + struct cpts *cpts = priv->cpts; + struct hwtstamp_config cfg; + + if (priv->version != CPSW_VERSION_1 && + priv->version != CPSW_VERSION_2 && + priv->version != CPSW_VERSION_3) + return -EOPNOTSUPP; + + cfg.flags = 0; + cfg.tx_type = cpts->tx_enable ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + cfg.rx_filter = (cpts->rx_enable ? + HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE); + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} + +#endif /*CONFIG_TI_CPTS*/ + +static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) +{ + struct cpsw_priv *priv = netdev_priv(dev); + int slave_no = cpsw_slave_index(priv); + + if (!netif_running(dev)) + return -EINVAL; + + switch (cmd) { +#ifdef CONFIG_TI_CPTS + case SIOCSHWTSTAMP: + return cpsw_hwtstamp_set(dev, req); + case SIOCGHWTSTAMP: + return cpsw_hwtstamp_get(dev, req); +#endif + } + + if (!priv->slaves[slave_no].phy) + return -EOPNOTSUPP; + return phy_mii_ioctl(priv->slaves[slave_no].phy, req, cmd); +} + +static void cpsw_ndo_tx_timeout(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n"); + ndev->stats.tx_errors++; + cpsw_intr_disable(priv); + cpdma_ctlr_int_ctrl(priv->dma, false); + cpdma_chan_stop(priv->txch); + cpdma_chan_start(priv->txch); + cpdma_ctlr_int_ctrl(priv->dma, true); + cpsw_intr_enable(priv); +} + +static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + struct sockaddr *addr = (struct sockaddr *)p; + int flags = 0; + u16 vid = 0; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (priv->data.dual_emac) { + vid = priv->slaves[priv->emac_port].port_vlan; + flags = ALE_VLAN; + } + + cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port, + flags, vid); + cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port, + flags, vid); + + memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); + memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); + for_each_slave(priv, cpsw_set_slave_mac, priv); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void cpsw_ndo_poll_controller(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + cpsw_intr_disable(priv); + cpdma_ctlr_int_ctrl(priv->dma, false); + cpsw_rx_interrupt(priv->irqs_table[0], priv); + cpsw_tx_interrupt(priv->irqs_table[1], priv); + cpdma_ctlr_int_ctrl(priv->dma, true); + cpsw_intr_enable(priv); +} +#endif + +static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, + unsigned short vid) +{ + int ret; + int unreg_mcast_mask = 0; + u32 port_mask; + + if (priv->data.dual_emac) { + port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST; + + if (priv->ndev->flags & IFF_ALLMULTI) + unreg_mcast_mask = port_mask; + } else { + port_mask = ALE_ALL_PORTS; + + if (priv->ndev->flags & IFF_ALLMULTI) + unreg_mcast_mask = ALE_ALL_PORTS; + else + unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; + } + + ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask, + unreg_mcast_mask << priv->host_port); + if (ret != 0) + return ret; + + ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr, + priv->host_port, ALE_VLAN, vid); + if (ret != 0) + goto clean_vid; + + ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, + port_mask, ALE_VLAN, vid, 0); + if (ret != 0) + goto clean_vlan_ucast; + return 0; + +clean_vlan_ucast: + cpsw_ale_del_ucast(priv->ale, priv->mac_addr, + priv->host_port, ALE_VLAN, vid); +clean_vid: + cpsw_ale_del_vlan(priv->ale, vid, 0); + return ret; +} + +static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, + __be16 proto, u16 vid) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + if (vid == priv->data.default_vlan) + return 0; + + if (priv->data.dual_emac) { + /* In dual EMAC, reserved VLAN id should not be used for + * creating VLAN interfaces as this can break the dual + * EMAC port separation + */ + int i; + + for (i = 0; i < priv->data.slaves; i++) { + if (vid == priv->slaves[i].port_vlan) + return -EINVAL; + } + } + + dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); + return cpsw_add_vlan_ale_entry(priv, vid); +} + +static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, + __be16 proto, u16 vid) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int ret; + + if (vid == priv->data.default_vlan) + return 0; + + if (priv->data.dual_emac) { + int i; + + for (i = 0; i < priv->data.slaves; i++) { + if (vid == priv->slaves[i].port_vlan) + return -EINVAL; + } + } + + dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); + ret = cpsw_ale_del_vlan(priv->ale, vid, 0); + if (ret != 0) + return ret; + + ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr, + priv->host_port, ALE_VLAN, vid); + if (ret != 0) + return ret; + + return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast, + 0, ALE_VLAN, vid); +} + +static const struct net_device_ops cpsw_netdev_ops = { + .ndo_open = cpsw_ndo_open, + .ndo_stop = cpsw_ndo_stop, + .ndo_start_xmit = cpsw_ndo_start_xmit, + .ndo_set_mac_address = cpsw_ndo_set_mac_address, + .ndo_do_ioctl = cpsw_ndo_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_tx_timeout = cpsw_ndo_tx_timeout, + .ndo_set_rx_mode = cpsw_ndo_set_rx_mode, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cpsw_ndo_poll_controller, +#endif + .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid, +}; + +static int cpsw_get_regs_len(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + return priv->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32); +} + +static void cpsw_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, void *p) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + u32 *reg = p; + + /* update CPSW IP version */ + regs->version = priv->version; + + cpsw_ale_dump(priv->ale, reg); +} + +static void cpsw_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + strlcpy(info->driver, "cpsw", sizeof(info->driver)); + strlcpy(info->version, "1.0", sizeof(info->version)); + strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info)); + info->regdump_len = cpsw_get_regs_len(ndev); +} + +static u32 cpsw_get_msglevel(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + return priv->msg_enable; +} + +static void cpsw_set_msglevel(struct net_device *ndev, u32 value) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + priv->msg_enable = value; +} + +static int cpsw_get_ts_info(struct net_device *ndev, + struct ethtool_ts_info *info) +{ +#ifdef CONFIG_TI_CPTS + struct cpsw_priv *priv = netdev_priv(ndev); + + info->so_timestamping = + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->phc_index = priv->cpts->phc_index; + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); +#else + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + info->tx_types = 0; + info->rx_filters = 0; +#endif + return 0; +} + +static int cpsw_get_settings(struct net_device *ndev, + struct ethtool_cmd *ecmd) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int slave_no = cpsw_slave_index(priv); + + if (priv->slaves[slave_no].phy) + return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd); + else + return -EOPNOTSUPP; +} + +static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int slave_no = cpsw_slave_index(priv); + + if (priv->slaves[slave_no].phy) + return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd); + else + return -EOPNOTSUPP; +} + +static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int slave_no = cpsw_slave_index(priv); + + wol->supported = 0; + wol->wolopts = 0; + + if (priv->slaves[slave_no].phy) + phy_ethtool_get_wol(priv->slaves[slave_no].phy, wol); +} + +static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int slave_no = cpsw_slave_index(priv); + + if (priv->slaves[slave_no].phy) + return phy_ethtool_set_wol(priv->slaves[slave_no].phy, wol); + else + return -EOPNOTSUPP; +} + +static void cpsw_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + pause->autoneg = AUTONEG_DISABLE; + pause->rx_pause = priv->rx_pause ? true : false; + pause->tx_pause = priv->tx_pause ? true : false; +} + +static int cpsw_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + bool link; + + priv->rx_pause = pause->rx_pause ? true : false; + priv->tx_pause = pause->tx_pause ? true : false; + + for_each_slave(priv, _cpsw_adjust_link, priv, &link); + + return 0; +} + +static const struct ethtool_ops cpsw_ethtool_ops = { + .get_drvinfo = cpsw_get_drvinfo, + .get_msglevel = cpsw_get_msglevel, + .set_msglevel = cpsw_set_msglevel, + .get_link = ethtool_op_get_link, + .get_ts_info = cpsw_get_ts_info, + .get_settings = cpsw_get_settings, + .set_settings = cpsw_set_settings, + .get_coalesce = cpsw_get_coalesce, + .set_coalesce = cpsw_set_coalesce, + .get_sset_count = cpsw_get_sset_count, + .get_strings = cpsw_get_strings, + .get_ethtool_stats = cpsw_get_ethtool_stats, + .get_pauseparam = cpsw_get_pauseparam, + .set_pauseparam = cpsw_set_pauseparam, + .get_wol = cpsw_get_wol, + .set_wol = cpsw_set_wol, + .get_regs_len = cpsw_get_regs_len, + .get_regs = cpsw_get_regs, +}; + +static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, + u32 slave_reg_ofs, u32 sliver_reg_ofs) +{ + void __iomem *regs = priv->regs; + int slave_num = slave->slave_num; + struct cpsw_slave_data *data = priv->data.slave_data + slave_num; + + slave->data = data; + slave->regs = regs + slave_reg_ofs; + slave->sliver = regs + sliver_reg_ofs; + slave->port_vlan = data->dual_emac_res_vlan; +} + +static int cpsw_probe_dt(struct cpsw_platform_data *data, + struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct device_node *slave_node; + int i = 0, ret; + u32 prop; + + if (!node) + return -EINVAL; + + if (of_property_read_u32(node, "slaves", &prop)) { + dev_err(&pdev->dev, "Missing slaves property in the DT.\n"); + return -EINVAL; + } + data->slaves = prop; + + if (of_property_read_u32(node, "active_slave", &prop)) { + dev_err(&pdev->dev, "Missing active_slave property in the DT.\n"); + return -EINVAL; + } + data->active_slave = prop; + + if (of_property_read_u32(node, "cpts_clock_mult", &prop)) { + dev_err(&pdev->dev, "Missing cpts_clock_mult property in the DT.\n"); + return -EINVAL; + } + data->cpts_clock_mult = prop; + + if (of_property_read_u32(node, "cpts_clock_shift", &prop)) { + dev_err(&pdev->dev, "Missing cpts_clock_shift property in the DT.\n"); + return -EINVAL; + } + data->cpts_clock_shift = prop; + + data->slave_data = devm_kzalloc(&pdev->dev, data->slaves + * sizeof(struct cpsw_slave_data), + GFP_KERNEL); + if (!data->slave_data) + return -ENOMEM; + + if (of_property_read_u32(node, "cpdma_channels", &prop)) { + dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n"); + return -EINVAL; + } + data->channels = prop; + + if (of_property_read_u32(node, "ale_entries", &prop)) { + dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n"); + return -EINVAL; + } + data->ale_entries = prop; + + if (of_property_read_u32(node, "bd_ram_size", &prop)) { + dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n"); + return -EINVAL; + } + data->bd_ram_size = prop; + + if (of_property_read_u32(node, "rx_descs", &prop)) { + dev_err(&pdev->dev, "Missing rx_descs property in the DT.\n"); + return -EINVAL; + } + data->rx_descs = prop; + + if (of_property_read_u32(node, "mac_control", &prop)) { + dev_err(&pdev->dev, "Missing mac_control property in the DT.\n"); + return -EINVAL; + } + data->mac_control = prop; + + if (of_property_read_bool(node, "dual_emac")) + data->dual_emac = 1; + + /* + * Populate all the child nodes here... + */ + ret = of_platform_populate(node, NULL, NULL, &pdev->dev); + /* We do not want to force this, as in some cases may not have child */ + if (ret) + dev_warn(&pdev->dev, "Doesn't have any child node\n"); + + for_each_child_of_node(node, slave_node) { + struct cpsw_slave_data *slave_data = data->slave_data + i; + const void *mac_addr = NULL; + u32 phyid; + int lenp; + const __be32 *parp; + struct device_node *mdio_node; + struct platform_device *mdio; + + /* This is no slave child node, continue */ + if (strcmp(slave_node->name, "slave")) + continue; + + parp = of_get_property(slave_node, "phy_id", &lenp); + if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { + dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i); + goto no_phy_slave; + } + mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); + phyid = be32_to_cpup(parp+1); + mdio = of_find_device_by_node(mdio_node); + of_node_put(mdio_node); + if (!mdio) { + dev_err(&pdev->dev, "Missing mdio platform device\n"); + return -EINVAL; + } + snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), + PHY_ID_FMT, mdio->name, phyid); + + slave_data->phy_if = of_get_phy_mode(slave_node); + if (slave_data->phy_if < 0) { + dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", + i); + return slave_data->phy_if; + } + +no_phy_slave: + mac_addr = of_get_mac_address(slave_node); + if (mac_addr) { + memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); + } else { + if (of_machine_is_compatible("ti,am33xx")) { + ret = cpsw_am33xx_cm_get_macid(&pdev->dev, + 0x630, i, + slave_data->mac_addr); + if (ret) + return ret; + } + } + if (data->dual_emac) { + if (of_property_read_u32(slave_node, "dual_emac_res_vlan", + &prop)) { + dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n"); + slave_data->dual_emac_res_vlan = i+1; + dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n", + slave_data->dual_emac_res_vlan, i); + } else { + slave_data->dual_emac_res_vlan = prop; + } + } + + i++; + if (i == data->slaves) + break; + } + + return 0; +} + +static int cpsw_probe_dual_emac(struct platform_device *pdev, + struct cpsw_priv *priv) +{ + struct cpsw_platform_data *data = &priv->data; + struct net_device *ndev; + struct cpsw_priv *priv_sl2; + int ret = 0, i; + + ndev = alloc_etherdev(sizeof(struct cpsw_priv)); + if (!ndev) { + dev_err(&pdev->dev, "cpsw: error allocating net_device\n"); + return -ENOMEM; + } + + priv_sl2 = netdev_priv(ndev); + spin_lock_init(&priv_sl2->lock); + priv_sl2->data = *data; + priv_sl2->pdev = pdev; + priv_sl2->ndev = ndev; + priv_sl2->dev = &ndev->dev; + priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); + priv_sl2->rx_packet_max = max(rx_packet_max, 128); + + if (is_valid_ether_addr(data->slave_data[1].mac_addr)) { + memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr, + ETH_ALEN); + dev_info(&pdev->dev, "cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr); + } else { + random_ether_addr(priv_sl2->mac_addr); + dev_info(&pdev->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr); + } + memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN); + + priv_sl2->slaves = priv->slaves; + priv_sl2->clk = priv->clk; + + priv_sl2->coal_intvl = 0; + priv_sl2->bus_freq_mhz = priv->bus_freq_mhz; + + priv_sl2->regs = priv->regs; + priv_sl2->host_port = priv->host_port; + priv_sl2->host_port_regs = priv->host_port_regs; + priv_sl2->wr_regs = priv->wr_regs; + priv_sl2->hw_stats = priv->hw_stats; + priv_sl2->dma = priv->dma; + priv_sl2->txch = priv->txch; + priv_sl2->rxch = priv->rxch; + priv_sl2->ale = priv->ale; + priv_sl2->emac_port = 1; + priv->slaves[1].ndev = ndev; + priv_sl2->cpts = priv->cpts; + priv_sl2->version = priv->version; + + for (i = 0; i < priv->num_irqs; i++) { + priv_sl2->irqs_table[i] = priv->irqs_table[i]; + priv_sl2->num_irqs = priv->num_irqs; + } + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + ndev->netdev_ops = &cpsw_netdev_ops; + ndev->ethtool_ops = &cpsw_ethtool_ops; + netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT); + + /* register the network device */ + SET_NETDEV_DEV(ndev, &pdev->dev); + ret = register_netdev(ndev); + if (ret) { + dev_err(&pdev->dev, "cpsw: error registering net device\n"); + free_netdev(ndev); + ret = -ENODEV; + } + + return ret; +} + +static int cpsw_probe(struct platform_device *pdev) +{ + struct cpsw_platform_data *data; + struct net_device *ndev; + struct cpsw_priv *priv; + struct cpdma_params dma_params; + struct cpsw_ale_params ale_params; + void __iomem *ss_regs; + struct resource *res, *ss_res; + u32 slave_offset, sliver_offset, slave_size; + int ret = 0, i; + int irq; + + ndev = alloc_etherdev(sizeof(struct cpsw_priv)); + if (!ndev) { + dev_err(&pdev->dev, "error allocating net_device\n"); + return -ENOMEM; + } + + platform_set_drvdata(pdev, ndev); + priv = netdev_priv(ndev); + spin_lock_init(&priv->lock); + priv->pdev = pdev; + priv->ndev = ndev; + priv->dev = &ndev->dev; + priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); + priv->rx_packet_max = max(rx_packet_max, 128); + priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); + priv->irq_enabled = true; + if (!priv->cpts) { + dev_err(&pdev->dev, "error allocating cpts\n"); + ret = -ENOMEM; + goto clean_ndev_ret; + } + + /* + * This may be required here for child devices. + */ + pm_runtime_enable(&pdev->dev); + + /* Select default pin state */ + pinctrl_pm_select_default_state(&pdev->dev); + + if (cpsw_probe_dt(&priv->data, pdev)) { + dev_err(&pdev->dev, "cpsw: platform data missing\n"); + ret = -ENODEV; + goto clean_runtime_disable_ret; + } + data = &priv->data; + + if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { + memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); + dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr); + } else { + eth_random_addr(priv->mac_addr); + dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr); + } + + memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); + + priv->slaves = devm_kzalloc(&pdev->dev, + sizeof(struct cpsw_slave) * data->slaves, + GFP_KERNEL); + if (!priv->slaves) { + ret = -ENOMEM; + goto clean_runtime_disable_ret; + } + for (i = 0; i < data->slaves; i++) + priv->slaves[i].slave_num = i; + + priv->slaves[0].ndev = ndev; + priv->emac_port = 0; + + priv->clk = devm_clk_get(&pdev->dev, "fck"); + if (IS_ERR(priv->clk)) { + dev_err(priv->dev, "fck is not found\n"); + ret = -ENODEV; + goto clean_runtime_disable_ret; + } + priv->coal_intvl = 0; + priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000; + + ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ss_regs = devm_ioremap_resource(&pdev->dev, ss_res); + if (IS_ERR(ss_regs)) { + ret = PTR_ERR(ss_regs); + goto clean_runtime_disable_ret; + } + priv->regs = ss_regs; + priv->host_port = HOST_PORT_NUM; + + /* Need to enable clocks with runtime PM api to access module + * registers + */ + pm_runtime_get_sync(&pdev->dev); + priv->version = readl(&priv->regs->id_ver); + pm_runtime_put_sync(&pdev->dev); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->wr_regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->wr_regs)) { + ret = PTR_ERR(priv->wr_regs); + goto clean_runtime_disable_ret; + } + + memset(&dma_params, 0, sizeof(dma_params)); + memset(&ale_params, 0, sizeof(ale_params)); + + switch (priv->version) { + case CPSW_VERSION_1: + priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET; + priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET; + priv->hw_stats = ss_regs + CPSW1_HW_STATS; + dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET; + dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET; + ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET; + slave_offset = CPSW1_SLAVE_OFFSET; + slave_size = CPSW1_SLAVE_SIZE; + sliver_offset = CPSW1_SLIVER_OFFSET; + dma_params.desc_mem_phys = 0; + break; + case CPSW_VERSION_2: + case CPSW_VERSION_3: + case CPSW_VERSION_4: + priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET; + priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET; + priv->hw_stats = ss_regs + CPSW2_HW_STATS; + dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET; + dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET; + ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET; + slave_offset = CPSW2_SLAVE_OFFSET; + slave_size = CPSW2_SLAVE_SIZE; + sliver_offset = CPSW2_SLIVER_OFFSET; + dma_params.desc_mem_phys = + (u32 __force) ss_res->start + CPSW2_BD_OFFSET; + break; + default: + dev_err(priv->dev, "unknown version 0x%08x\n", priv->version); + ret = -ENODEV; + goto clean_runtime_disable_ret; + } + for (i = 0; i < priv->data.slaves; i++) { + struct cpsw_slave *slave = &priv->slaves[i]; + cpsw_slave_init(slave, priv, slave_offset, sliver_offset); + slave_offset += slave_size; + sliver_offset += SLIVER_SIZE; + } + + dma_params.dev = &pdev->dev; + dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH; + dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE; + dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP; + dma_params.txcp = dma_params.txhdp + CPDMA_TXCP; + dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP; + + dma_params.num_chan = data->channels; + dma_params.has_soft_reset = true; + dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE; + dma_params.desc_mem_size = data->bd_ram_size; + dma_params.desc_align = 16; + dma_params.has_ext_regs = true; + dma_params.desc_hw_addr = dma_params.desc_mem_phys; + + priv->dma = cpdma_ctlr_create(&dma_params); + if (!priv->dma) { + dev_err(priv->dev, "error initializing dma\n"); + ret = -ENOMEM; + goto clean_runtime_disable_ret; + } + + priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0), + cpsw_tx_handler); + priv->rxch = cpdma_chan_create(priv->dma, rx_chan_num(0), + cpsw_rx_handler); + + if (WARN_ON(!priv->txch || !priv->rxch)) { + dev_err(priv->dev, "error initializing dma channels\n"); + ret = -ENOMEM; + goto clean_dma_ret; + } + + ale_params.dev = &ndev->dev; + ale_params.ale_ageout = ale_ageout; + ale_params.ale_entries = data->ale_entries; + ale_params.ale_ports = data->slaves; + + priv->ale = cpsw_ale_create(&ale_params); + if (!priv->ale) { + dev_err(priv->dev, "error initializing ale engine\n"); + ret = -ENODEV; + goto clean_dma_ret; + } + + ndev->irq = platform_get_irq(pdev, 1); + if (ndev->irq < 0) { + dev_err(priv->dev, "error getting irq resource\n"); + ret = -ENOENT; + goto clean_ale_ret; + } + + /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and + * MISC IRQs which are always kept disabled with this driver so + * we will not request them. + * + * If anyone wants to implement support for those, make sure to + * first request and append them to irqs_table array. + */ + + /* RX IRQ */ + irq = platform_get_irq(pdev, 1); + if (irq < 0) + goto clean_ale_ret; + + priv->irqs_table[0] = irq; + ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt, + 0, dev_name(&pdev->dev), priv); + if (ret < 0) { + dev_err(priv->dev, "error attaching irq (%d)\n", ret); + goto clean_ale_ret; + } + + /* TX IRQ */ + irq = platform_get_irq(pdev, 2); + if (irq < 0) + goto clean_ale_ret; + + priv->irqs_table[1] = irq; + ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt, + 0, dev_name(&pdev->dev), priv); + if (ret < 0) { + dev_err(priv->dev, "error attaching irq (%d)\n", ret); + goto clean_ale_ret; + } + priv->num_irqs = 2; + + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + ndev->netdev_ops = &cpsw_netdev_ops; + ndev->ethtool_ops = &cpsw_ethtool_ops; + netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT); + + /* register the network device */ + SET_NETDEV_DEV(ndev, &pdev->dev); + ret = register_netdev(ndev); + if (ret) { + dev_err(priv->dev, "error registering net device\n"); + ret = -ENODEV; + goto clean_ale_ret; + } + + cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n", + &ss_res->start, ndev->irq); + + if (priv->data.dual_emac) { + ret = cpsw_probe_dual_emac(pdev, priv); + if (ret) { + cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); + goto clean_ale_ret; + } + } + + return 0; + +clean_ale_ret: + cpsw_ale_destroy(priv->ale); +clean_dma_ret: + cpdma_chan_destroy(priv->txch); + cpdma_chan_destroy(priv->rxch); + cpdma_ctlr_destroy(priv->dma); +clean_runtime_disable_ret: + pm_runtime_disable(&pdev->dev); +clean_ndev_ret: + free_netdev(priv->ndev); + return ret; +} + +static int cpsw_remove_child_device(struct device *dev, void *c) +{ + struct platform_device *pdev = to_platform_device(dev); + + of_device_unregister(pdev); + + return 0; +} + +static int cpsw_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct cpsw_priv *priv = netdev_priv(ndev); + + if (priv->data.dual_emac) + unregister_netdev(cpsw_get_slave_ndev(priv, 1)); + unregister_netdev(ndev); + + cpsw_ale_destroy(priv->ale); + cpdma_chan_destroy(priv->txch); + cpdma_chan_destroy(priv->rxch); + cpdma_ctlr_destroy(priv->dma); + pm_runtime_disable(&pdev->dev); + device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device); + if (priv->data.dual_emac) + free_netdev(cpsw_get_slave_ndev(priv, 1)); + free_netdev(ndev); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int cpsw_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = platform_get_drvdata(pdev); + struct cpsw_priv *priv = netdev_priv(ndev); + + if (priv->data.dual_emac) { + int i; + + for (i = 0; i < priv->data.slaves; i++) { + if (netif_running(priv->slaves[i].ndev)) + cpsw_ndo_stop(priv->slaves[i].ndev); + soft_reset_slave(priv->slaves + i); + } + } else { + if (netif_running(ndev)) + cpsw_ndo_stop(ndev); + for_each_slave(priv, soft_reset_slave); + } + + pm_runtime_put_sync(&pdev->dev); + + /* Select sleep pin state */ + pinctrl_pm_select_sleep_state(&pdev->dev); + + return 0; +} + +static int cpsw_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = platform_get_drvdata(pdev); + struct cpsw_priv *priv = netdev_priv(ndev); + + pm_runtime_get_sync(&pdev->dev); + + /* Select default pin state */ + pinctrl_pm_select_default_state(&pdev->dev); + + if (priv->data.dual_emac) { + int i; + + for (i = 0; i < priv->data.slaves; i++) { + if (netif_running(priv->slaves[i].ndev)) + cpsw_ndo_open(priv->slaves[i].ndev); + } + } else { + if (netif_running(ndev)) + cpsw_ndo_open(ndev); + } + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume); + +static const struct of_device_id cpsw_of_mtable[] = { + { .compatible = "ti,cpsw", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, cpsw_of_mtable); + +static struct platform_driver cpsw_driver = { + .driver = { + .name = "cpsw", + .pm = &cpsw_pm_ops, + .of_match_table = cpsw_of_mtable, + }, + .probe = cpsw_probe, + .remove = cpsw_remove, +}; + +static int __init cpsw_init(void) +{ + return platform_driver_register(&cpsw_driver); +} +late_initcall(cpsw_init); + +static void __exit cpsw_exit(void) +{ + platform_driver_unregister(&cpsw_driver); +} +module_exit(cpsw_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Cyril Chemparathy "); +MODULE_AUTHOR("Mugunthan V N "); +MODULE_DESCRIPTION("TI CPSW Ethernet driver"); diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h new file mode 100644 index 000000000..ca90efafd --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw.h @@ -0,0 +1,47 @@ +/* Texas Instruments Ethernet Switch Driver + * + * Copyright (C) 2013 Texas Instruments + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __CPSW_H__ +#define __CPSW_H__ + +#include +#include + +struct cpsw_slave_data { + char phy_id[MII_BUS_ID_SIZE]; + int phy_if; + u8 mac_addr[ETH_ALEN]; + u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */ +}; + +struct cpsw_platform_data { + struct cpsw_slave_data *slave_data; + u32 ss_reg_ofs; /* Subsystem control register offset */ + u32 channels; /* number of cpdma channels (symmetric) */ + u32 slaves; /* number of slave cpgmac ports */ + u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */ + u32 cpts_clock_mult; /* convert input clock ticks to nanoseconds */ + u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */ + u32 ale_entries; /* ale table size */ + u32 bd_ram_size; /*buffer descriptor ram size */ + u32 rx_descs; /* Number of Rx Descriptios */ + u32 mac_control; /* Mac control register */ + u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/ + bool dual_emac; /* Enable Dual EMAC mode */ +}; + +void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave); +int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave, + u8 *mac_addr); + +#endif /* __CPSW_H__ */ diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c new file mode 100644 index 000000000..6e927b458 --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -0,0 +1,831 @@ +/* + * Texas Instruments 3-Port Ethernet Switch Address Lookup Engine + * + * Copyright (C) 2012 Texas Instruments + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cpsw_ale.h" + +#define BITMASK(bits) (BIT(bits) - 1) + +#define ALE_VERSION_MAJOR(rev) ((rev >> 8) & 0xff) +#define ALE_VERSION_MINOR(rev) (rev & 0xff) + +/* ALE Registers */ +#define ALE_IDVER 0x00 +#define ALE_CONTROL 0x08 +#define ALE_PRESCALE 0x10 +#define ALE_UNKNOWNVLAN 0x18 +#define ALE_TABLE_CONTROL 0x20 +#define ALE_TABLE 0x34 +#define ALE_PORTCTL 0x40 + +#define ALE_TABLE_WRITE BIT(31) + +#define ALE_TYPE_FREE 0 +#define ALE_TYPE_ADDR 1 +#define ALE_TYPE_VLAN 2 +#define ALE_TYPE_VLAN_ADDR 3 + +#define ALE_UCAST_PERSISTANT 0 +#define ALE_UCAST_UNTOUCHED 1 +#define ALE_UCAST_OUI 2 +#define ALE_UCAST_TOUCHED 3 + +static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits) +{ + int idx; + + idx = start / 32; + start -= idx * 32; + idx = 2 - idx; /* flip */ + return (ale_entry[idx] >> start) & BITMASK(bits); +} + +static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits, + u32 value) +{ + int idx; + + value &= BITMASK(bits); + idx = start / 32; + start -= idx * 32; + idx = 2 - idx; /* flip */ + ale_entry[idx] &= ~(BITMASK(bits) << start); + ale_entry[idx] |= (value << start); +} + +#define DEFINE_ALE_FIELD(name, start, bits) \ +static inline int cpsw_ale_get_##name(u32 *ale_entry) \ +{ \ + return cpsw_ale_get_field(ale_entry, start, bits); \ +} \ +static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \ +{ \ + cpsw_ale_set_field(ale_entry, start, bits, value); \ +} + +DEFINE_ALE_FIELD(entry_type, 60, 2) +DEFINE_ALE_FIELD(vlan_id, 48, 12) +DEFINE_ALE_FIELD(mcast_state, 62, 2) +DEFINE_ALE_FIELD(port_mask, 66, 3) +DEFINE_ALE_FIELD(super, 65, 1) +DEFINE_ALE_FIELD(ucast_type, 62, 2) +DEFINE_ALE_FIELD(port_num, 66, 2) +DEFINE_ALE_FIELD(blocked, 65, 1) +DEFINE_ALE_FIELD(secure, 64, 1) +DEFINE_ALE_FIELD(vlan_untag_force, 24, 3) +DEFINE_ALE_FIELD(vlan_reg_mcast, 16, 3) +DEFINE_ALE_FIELD(vlan_unreg_mcast, 8, 3) +DEFINE_ALE_FIELD(vlan_member_list, 0, 3) +DEFINE_ALE_FIELD(mcast, 40, 1) + +/* The MAC address field in the ALE entry cannot be macroized as above */ +static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr) +{ + int i; + + for (i = 0; i < 6; i++) + addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8); +} + +static inline void cpsw_ale_set_addr(u32 *ale_entry, u8 *addr) +{ + int i; + + for (i = 0; i < 6; i++) + cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]); +} + +static int cpsw_ale_read(struct cpsw_ale *ale, int idx, u32 *ale_entry) +{ + int i; + + WARN_ON(idx > ale->params.ale_entries); + + __raw_writel(idx, ale->params.ale_regs + ALE_TABLE_CONTROL); + + for (i = 0; i < ALE_ENTRY_WORDS; i++) + ale_entry[i] = __raw_readl(ale->params.ale_regs + + ALE_TABLE + 4 * i); + + return idx; +} + +static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry) +{ + int i; + + WARN_ON(idx > ale->params.ale_entries); + + for (i = 0; i < ALE_ENTRY_WORDS; i++) + __raw_writel(ale_entry[i], ale->params.ale_regs + + ALE_TABLE + 4 * i); + + __raw_writel(idx | ALE_TABLE_WRITE, ale->params.ale_regs + + ALE_TABLE_CONTROL); + + return idx; +} + +static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid) +{ + u32 ale_entry[ALE_ENTRY_WORDS]; + int type, idx; + + for (idx = 0; idx < ale->params.ale_entries; idx++) { + u8 entry_addr[6]; + + cpsw_ale_read(ale, idx, ale_entry); + type = cpsw_ale_get_entry_type(ale_entry); + if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR) + continue; + if (cpsw_ale_get_vlan_id(ale_entry) != vid) + continue; + cpsw_ale_get_addr(ale_entry, entry_addr); + if (ether_addr_equal(entry_addr, addr)) + return idx; + } + return -ENOENT; +} + +static int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid) +{ + u32 ale_entry[ALE_ENTRY_WORDS]; + int type, idx; + + for (idx = 0; idx < ale->params.ale_entries; idx++) { + cpsw_ale_read(ale, idx, ale_entry); + type = cpsw_ale_get_entry_type(ale_entry); + if (type != ALE_TYPE_VLAN) + continue; + if (cpsw_ale_get_vlan_id(ale_entry) == vid) + return idx; + } + return -ENOENT; +} + +static int cpsw_ale_match_free(struct cpsw_ale *ale) +{ + u32 ale_entry[ALE_ENTRY_WORDS]; + int type, idx; + + for (idx = 0; idx < ale->params.ale_entries; idx++) { + cpsw_ale_read(ale, idx, ale_entry); + type = cpsw_ale_get_entry_type(ale_entry); + if (type == ALE_TYPE_FREE) + return idx; + } + return -ENOENT; +} + +static int cpsw_ale_find_ageable(struct cpsw_ale *ale) +{ + u32 ale_entry[ALE_ENTRY_WORDS]; + int type, idx; + + for (idx = 0; idx < ale->params.ale_entries; idx++) { + cpsw_ale_read(ale, idx, ale_entry); + type = cpsw_ale_get_entry_type(ale_entry); + if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR) + continue; + if (cpsw_ale_get_mcast(ale_entry)) + continue; + type = cpsw_ale_get_ucast_type(ale_entry); + if (type != ALE_UCAST_PERSISTANT && + type != ALE_UCAST_OUI) + return idx; + } + return -ENOENT; +} + +static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry, + int port_mask) +{ + int mask; + + mask = cpsw_ale_get_port_mask(ale_entry); + if ((mask & port_mask) == 0) + return; /* ports dont intersect, not interested */ + mask &= ~port_mask; + + /* free if only remaining port is host port */ + if (mask) + cpsw_ale_set_port_mask(ale_entry, mask); + else + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); +} + +int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid) +{ + u32 ale_entry[ALE_ENTRY_WORDS]; + int ret, idx; + + for (idx = 0; idx < ale->params.ale_entries; idx++) { + cpsw_ale_read(ale, idx, ale_entry); + ret = cpsw_ale_get_entry_type(ale_entry); + if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR) + continue; + + /* if vid passed is -1 then remove all multicast entry from + * the table irrespective of vlan id, if a valid vlan id is + * passed then remove only multicast added to that vlan id. + * if vlan id doesn't match then move on to next entry. + */ + if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid) + continue; + + if (cpsw_ale_get_mcast(ale_entry)) { + u8 addr[6]; + + cpsw_ale_get_addr(ale_entry, addr); + if (!is_broadcast_ether_addr(addr)) + cpsw_ale_flush_mcast(ale, ale_entry, port_mask); + } + + cpsw_ale_write(ale, idx, ale_entry); + } + return 0; +} +EXPORT_SYMBOL_GPL(cpsw_ale_flush_multicast); + +static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry, + int port_mask) +{ + int port; + + port = cpsw_ale_get_port_num(ale_entry); + if ((BIT(port) & port_mask) == 0) + return; /* ports dont intersect, not interested */ + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); +} + +int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask) +{ + u32 ale_entry[ALE_ENTRY_WORDS]; + int ret, idx; + + for (idx = 0; idx < ale->params.ale_entries; idx++) { + cpsw_ale_read(ale, idx, ale_entry); + ret = cpsw_ale_get_entry_type(ale_entry); + if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR) + continue; + + if (cpsw_ale_get_mcast(ale_entry)) + cpsw_ale_flush_mcast(ale, ale_entry, port_mask); + else + cpsw_ale_flush_ucast(ale, ale_entry, port_mask); + + cpsw_ale_write(ale, idx, ale_entry); + } + return 0; +} +EXPORT_SYMBOL_GPL(cpsw_ale_flush); + +static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry, + int flags, u16 vid) +{ + if (flags & ALE_VLAN) { + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN_ADDR); + cpsw_ale_set_vlan_id(ale_entry, vid); + } else { + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR); + } +} + +int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, + int flags, u16 vid) +{ + u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; + int idx; + + cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid); + + cpsw_ale_set_addr(ale_entry, addr); + cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT); + cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0); + cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0); + cpsw_ale_set_port_num(ale_entry, port); + + idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); + if (idx < 0) + idx = cpsw_ale_match_free(ale); + if (idx < 0) + idx = cpsw_ale_find_ageable(ale); + if (idx < 0) + return -ENOMEM; + + cpsw_ale_write(ale, idx, ale_entry); + return 0; +} +EXPORT_SYMBOL_GPL(cpsw_ale_add_ucast); + +int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, + int flags, u16 vid) +{ + u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; + int idx; + + idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); + if (idx < 0) + return -ENOENT; + + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); + cpsw_ale_write(ale, idx, ale_entry); + return 0; +} +EXPORT_SYMBOL_GPL(cpsw_ale_del_ucast); + +int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, + int flags, u16 vid, int mcast_state) +{ + u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; + int idx, mask; + + idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); + if (idx >= 0) + cpsw_ale_read(ale, idx, ale_entry); + + cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid); + + cpsw_ale_set_addr(ale_entry, addr); + cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0); + cpsw_ale_set_mcast_state(ale_entry, mcast_state); + + mask = cpsw_ale_get_port_mask(ale_entry); + port_mask |= mask; + cpsw_ale_set_port_mask(ale_entry, port_mask); + + if (idx < 0) + idx = cpsw_ale_match_free(ale); + if (idx < 0) + idx = cpsw_ale_find_ageable(ale); + if (idx < 0) + return -ENOMEM; + + cpsw_ale_write(ale, idx, ale_entry); + return 0; +} +EXPORT_SYMBOL_GPL(cpsw_ale_add_mcast); + +int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, + int flags, u16 vid) +{ + u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; + int idx; + + idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); + if (idx < 0) + return -EINVAL; + + cpsw_ale_read(ale, idx, ale_entry); + + if (port_mask) + cpsw_ale_set_port_mask(ale_entry, port_mask); + else + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); + + cpsw_ale_write(ale, idx, ale_entry); + return 0; +} +EXPORT_SYMBOL_GPL(cpsw_ale_del_mcast); + +int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, + int reg_mcast, int unreg_mcast) +{ + u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; + int idx; + + idx = cpsw_ale_match_vlan(ale, vid); + if (idx >= 0) + cpsw_ale_read(ale, idx, ale_entry); + + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN); + cpsw_ale_set_vlan_id(ale_entry, vid); + + cpsw_ale_set_vlan_untag_force(ale_entry, untag); + cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast); + cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast); + cpsw_ale_set_vlan_member_list(ale_entry, port); + + if (idx < 0) + idx = cpsw_ale_match_free(ale); + if (idx < 0) + idx = cpsw_ale_find_ageable(ale); + if (idx < 0) + return -ENOMEM; + + cpsw_ale_write(ale, idx, ale_entry); + return 0; +} +EXPORT_SYMBOL_GPL(cpsw_ale_add_vlan); + +int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) +{ + u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; + int idx; + + idx = cpsw_ale_match_vlan(ale, vid); + if (idx < 0) + return -ENOENT; + + cpsw_ale_read(ale, idx, ale_entry); + + if (port_mask) + cpsw_ale_set_vlan_member_list(ale_entry, port_mask); + else + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); + + cpsw_ale_write(ale, idx, ale_entry); + return 0; +} +EXPORT_SYMBOL_GPL(cpsw_ale_del_vlan); + +void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti) +{ + u32 ale_entry[ALE_ENTRY_WORDS]; + int type, idx; + int unreg_mcast = 0; + + /* Only bother doing the work if the setting is actually changing */ + if (ale->allmulti == allmulti) + return; + + /* Remember the new setting to check against next time */ + ale->allmulti = allmulti; + + for (idx = 0; idx < ale->params.ale_entries; idx++) { + cpsw_ale_read(ale, idx, ale_entry); + type = cpsw_ale_get_entry_type(ale_entry); + if (type != ALE_TYPE_VLAN) + continue; + + unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry); + if (allmulti) + unreg_mcast |= 1; + else + unreg_mcast &= ~1; + cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast); + cpsw_ale_write(ale, idx, ale_entry); + } +} +EXPORT_SYMBOL_GPL(cpsw_ale_set_allmulti); + +struct ale_control_info { + const char *name; + int offset, port_offset; + int shift, port_shift; + int bits; +}; + +static const struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = { + [ALE_ENABLE] = { + .name = "enable", + .offset = ALE_CONTROL, + .port_offset = 0, + .shift = 31, + .port_shift = 0, + .bits = 1, + }, + [ALE_CLEAR] = { + .name = "clear", + .offset = ALE_CONTROL, + .port_offset = 0, + .shift = 30, + .port_shift = 0, + .bits = 1, + }, + [ALE_AGEOUT] = { + .name = "ageout", + .offset = ALE_CONTROL, + .port_offset = 0, + .shift = 29, + .port_shift = 0, + .bits = 1, + }, + [ALE_P0_UNI_FLOOD] = { + .name = "port0_unicast_flood", + .offset = ALE_CONTROL, + .port_offset = 0, + .shift = 8, + .port_shift = 0, + .bits = 1, + }, + [ALE_VLAN_NOLEARN] = { + .name = "vlan_nolearn", + .offset = ALE_CONTROL, + .port_offset = 0, + .shift = 7, + .port_shift = 0, + .bits = 1, + }, + [ALE_NO_PORT_VLAN] = { + .name = "no_port_vlan", + .offset = ALE_CONTROL, + .port_offset = 0, + .shift = 6, + .port_shift = 0, + .bits = 1, + }, + [ALE_OUI_DENY] = { + .name = "oui_deny", + .offset = ALE_CONTROL, + .port_offset = 0, + .shift = 5, + .port_shift = 0, + .bits = 1, + }, + [ALE_BYPASS] = { + .name = "bypass", + .offset = ALE_CONTROL, + .port_offset = 0, + .shift = 4, + .port_shift = 0, + .bits = 1, + }, + [ALE_RATE_LIMIT_TX] = { + .name = "rate_limit_tx", + .offset = ALE_CONTROL, + .port_offset = 0, + .shift = 3, + .port_shift = 0, + .bits = 1, + }, + [ALE_VLAN_AWARE] = { + .name = "vlan_aware", + .offset = ALE_CONTROL, + .port_offset = 0, + .shift = 2, + .port_shift = 0, + .bits = 1, + }, + [ALE_AUTH_ENABLE] = { + .name = "auth_enable", + .offset = ALE_CONTROL, + .port_offset = 0, + .shift = 1, + .port_shift = 0, + .bits = 1, + }, + [ALE_RATE_LIMIT] = { + .name = "rate_limit", + .offset = ALE_CONTROL, + .port_offset = 0, + .shift = 0, + .port_shift = 0, + .bits = 1, + }, + [ALE_PORT_STATE] = { + .name = "port_state", + .offset = ALE_PORTCTL, + .port_offset = 4, + .shift = 0, + .port_shift = 0, + .bits = 2, + }, + [ALE_PORT_DROP_UNTAGGED] = { + .name = "drop_untagged", + .offset = ALE_PORTCTL, + .port_offset = 4, + .shift = 2, + .port_shift = 0, + .bits = 1, + }, + [ALE_PORT_DROP_UNKNOWN_VLAN] = { + .name = "drop_unknown", + .offset = ALE_PORTCTL, + .port_offset = 4, + .shift = 3, + .port_shift = 0, + .bits = 1, + }, + [ALE_PORT_NOLEARN] = { + .name = "nolearn", + .offset = ALE_PORTCTL, + .port_offset = 4, + .shift = 4, + .port_shift = 0, + .bits = 1, + }, + [ALE_PORT_NO_SA_UPDATE] = { + .name = "no_source_update", + .offset = ALE_PORTCTL, + .port_offset = 4, + .shift = 5, + .port_shift = 0, + .bits = 1, + }, + [ALE_PORT_MCAST_LIMIT] = { + .name = "mcast_limit", + .offset = ALE_PORTCTL, + .port_offset = 4, + .shift = 16, + .port_shift = 0, + .bits = 8, + }, + [ALE_PORT_BCAST_LIMIT] = { + .name = "bcast_limit", + .offset = ALE_PORTCTL, + .port_offset = 4, + .shift = 24, + .port_shift = 0, + .bits = 8, + }, + [ALE_PORT_UNKNOWN_VLAN_MEMBER] = { + .name = "unknown_vlan_member", + .offset = ALE_UNKNOWNVLAN, + .port_offset = 0, + .shift = 0, + .port_shift = 0, + .bits = 6, + }, + [ALE_PORT_UNKNOWN_MCAST_FLOOD] = { + .name = "unknown_mcast_flood", + .offset = ALE_UNKNOWNVLAN, + .port_offset = 0, + .shift = 8, + .port_shift = 0, + .bits = 6, + }, + [ALE_PORT_UNKNOWN_REG_MCAST_FLOOD] = { + .name = "unknown_reg_flood", + .offset = ALE_UNKNOWNVLAN, + .port_offset = 0, + .shift = 16, + .port_shift = 0, + .bits = 6, + }, + [ALE_PORT_UNTAGGED_EGRESS] = { + .name = "untagged_egress", + .offset = ALE_UNKNOWNVLAN, + .port_offset = 0, + .shift = 24, + .port_shift = 0, + .bits = 6, + }, +}; + +int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control, + int value) +{ + const struct ale_control_info *info; + int offset, shift; + u32 tmp, mask; + + if (control < 0 || control >= ARRAY_SIZE(ale_controls)) + return -EINVAL; + + info = &ale_controls[control]; + if (info->port_offset == 0 && info->port_shift == 0) + port = 0; /* global, port is a dont care */ + + if (port < 0 || port > ale->params.ale_ports) + return -EINVAL; + + mask = BITMASK(info->bits); + if (value & ~mask) + return -EINVAL; + + offset = info->offset + (port * info->port_offset); + shift = info->shift + (port * info->port_shift); + + tmp = __raw_readl(ale->params.ale_regs + offset); + tmp = (tmp & ~(mask << shift)) | (value << shift); + __raw_writel(tmp, ale->params.ale_regs + offset); + + return 0; +} +EXPORT_SYMBOL_GPL(cpsw_ale_control_set); + +int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control) +{ + const struct ale_control_info *info; + int offset, shift; + u32 tmp; + + if (control < 0 || control >= ARRAY_SIZE(ale_controls)) + return -EINVAL; + + info = &ale_controls[control]; + if (info->port_offset == 0 && info->port_shift == 0) + port = 0; /* global, port is a dont care */ + + if (port < 0 || port > ale->params.ale_ports) + return -EINVAL; + + offset = info->offset + (port * info->port_offset); + shift = info->shift + (port * info->port_shift); + + tmp = __raw_readl(ale->params.ale_regs + offset) >> shift; + return tmp & BITMASK(info->bits); +} +EXPORT_SYMBOL_GPL(cpsw_ale_control_get); + +static void cpsw_ale_timer(unsigned long arg) +{ + struct cpsw_ale *ale = (struct cpsw_ale *)arg; + + cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1); + + if (ale->ageout) { + ale->timer.expires = jiffies + ale->ageout; + add_timer(&ale->timer); + } +} + +int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout) +{ + del_timer_sync(&ale->timer); + ale->ageout = ageout * HZ; + if (ale->ageout) { + ale->timer.expires = jiffies + ale->ageout; + add_timer(&ale->timer); + } + return 0; +} +EXPORT_SYMBOL_GPL(cpsw_ale_set_ageout); + +void cpsw_ale_start(struct cpsw_ale *ale) +{ + u32 rev; + + rev = __raw_readl(ale->params.ale_regs + ALE_IDVER); + dev_dbg(ale->params.dev, "initialized cpsw ale revision %d.%d\n", + ALE_VERSION_MAJOR(rev), ALE_VERSION_MINOR(rev)); + cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1); + cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1); + + init_timer(&ale->timer); + ale->timer.data = (unsigned long)ale; + ale->timer.function = cpsw_ale_timer; + if (ale->ageout) { + ale->timer.expires = jiffies + ale->ageout; + add_timer(&ale->timer); + } +} +EXPORT_SYMBOL_GPL(cpsw_ale_start); + +void cpsw_ale_stop(struct cpsw_ale *ale) +{ + del_timer_sync(&ale->timer); +} +EXPORT_SYMBOL_GPL(cpsw_ale_stop); + +struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params) +{ + struct cpsw_ale *ale; + + ale = kzalloc(sizeof(*ale), GFP_KERNEL); + if (!ale) + return NULL; + + ale->params = *params; + ale->ageout = ale->params.ale_ageout * HZ; + + return ale; +} +EXPORT_SYMBOL_GPL(cpsw_ale_create); + +int cpsw_ale_destroy(struct cpsw_ale *ale) +{ + if (!ale) + return -EINVAL; + cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0); + kfree(ale); + return 0; +} +EXPORT_SYMBOL_GPL(cpsw_ale_destroy); + +void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data) +{ + int i; + + for (i = 0; i < ale->params.ale_entries; i++) { + cpsw_ale_read(ale, i, data); + data += ALE_ENTRY_WORDS; + } +} +EXPORT_SYMBOL_GPL(cpsw_ale_dump); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("TI CPSW ALE driver"); +MODULE_AUTHOR("Texas Instruments"); diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h new file mode 100644 index 000000000..af1e7ecd8 --- /dev/null +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -0,0 +1,114 @@ +/* + * Texas Instruments 3-Port Ethernet Switch Address Lookup Engine APIs + * + * Copyright (C) 2012 Texas Instruments + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __TI_CPSW_ALE_H__ +#define __TI_CPSW_ALE_H__ + +struct cpsw_ale_params { + struct device *dev; + void __iomem *ale_regs; + unsigned long ale_ageout; /* in secs */ + unsigned long ale_entries; + unsigned long ale_ports; +}; + +struct cpsw_ale { + struct cpsw_ale_params params; + struct timer_list timer; + unsigned long ageout; + int allmulti; +}; + +enum cpsw_ale_control { + /* global */ + ALE_ENABLE, + ALE_CLEAR, + ALE_AGEOUT, + ALE_P0_UNI_FLOOD, + ALE_VLAN_NOLEARN, + ALE_NO_PORT_VLAN, + ALE_OUI_DENY, + ALE_BYPASS, + ALE_RATE_LIMIT_TX, + ALE_VLAN_AWARE, + ALE_AUTH_ENABLE, + ALE_RATE_LIMIT, + /* port controls */ + ALE_PORT_STATE, + ALE_PORT_DROP_UNTAGGED, + ALE_PORT_DROP_UNKNOWN_VLAN, + ALE_PORT_NOLEARN, + ALE_PORT_NO_SA_UPDATE, + ALE_PORT_UNKNOWN_VLAN_MEMBER, + ALE_PORT_UNKNOWN_MCAST_FLOOD, + ALE_PORT_UNKNOWN_REG_MCAST_FLOOD, + ALE_PORT_UNTAGGED_EGRESS, + ALE_PORT_BCAST_LIMIT, + ALE_PORT_MCAST_LIMIT, + ALE_NUM_CONTROLS, +}; + +enum cpsw_ale_port_state { + ALE_PORT_STATE_DISABLE = 0x00, + ALE_PORT_STATE_BLOCK = 0x01, + ALE_PORT_STATE_LEARN = 0x02, + ALE_PORT_STATE_FORWARD = 0x03, +}; + +/* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */ +#define ALE_SECURE BIT(0) +#define ALE_BLOCKED BIT(1) +#define ALE_SUPER BIT(2) +#define ALE_VLAN BIT(3) + +#define ALE_PORT_HOST BIT(0) +#define ALE_PORT_1 BIT(1) +#define ALE_PORT_2 BIT(2) + +#define ALE_MCAST_FWD 0 +#define ALE_MCAST_BLOCK_LEARN_FWD 1 +#define ALE_MCAST_FWD_LEARN 2 +#define ALE_MCAST_FWD_2 3 + +#define ALE_ENTRY_BITS 68 +#define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32) + +struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params); +int cpsw_ale_destroy(struct cpsw_ale *ale); + +void cpsw_ale_start(struct cpsw_ale *ale); +void cpsw_ale_stop(struct cpsw_ale *ale); + +int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout); +int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask); +int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid); +int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, + int flags, u16 vid); +int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, + int flags, u16 vid); +int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, + int flags, u16 vid, int mcast_state); +int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, + int flags, u16 vid); +int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, + int reg_mcast, int unreg_mcast); +int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port); +void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti); + +int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control); +int cpsw_ale_control_set(struct cpsw_ale *ale, int port, + int control, int value); +void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data); + +#endif diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c new file mode 100644 index 000000000..85a55b4ff --- /dev/null +++ b/drivers/net/ethernet/ti/cpts.c @@ -0,0 +1,408 @@ +/* + * TI Common Platform Time Sync + * + * Copyright (C) 2012 Richard Cochran + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cpts.h" + +#ifdef CONFIG_TI_CPTS + +#define cpts_read32(c, r) __raw_readl(&c->reg->r) +#define cpts_write32(c, v, r) __raw_writel(v, &c->reg->r) + +static int event_expired(struct cpts_event *event) +{ + return time_after(jiffies, event->tmo); +} + +static int event_type(struct cpts_event *event) +{ + return (event->high >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; +} + +static int cpts_fifo_pop(struct cpts *cpts, u32 *high, u32 *low) +{ + u32 r = cpts_read32(cpts, intstat_raw); + + if (r & TS_PEND_RAW) { + *high = cpts_read32(cpts, event_high); + *low = cpts_read32(cpts, event_low); + cpts_write32(cpts, EVENT_POP, event_pop); + return 0; + } + return -1; +} + +/* + * Returns zero if matching event type was found. + */ +static int cpts_fifo_read(struct cpts *cpts, int match) +{ + int i, type = -1; + u32 hi, lo; + struct cpts_event *event; + + for (i = 0; i < CPTS_FIFO_DEPTH; i++) { + if (cpts_fifo_pop(cpts, &hi, &lo)) + break; + if (list_empty(&cpts->pool)) { + pr_err("cpts: event pool is empty\n"); + return -1; + } + event = list_first_entry(&cpts->pool, struct cpts_event, list); + event->tmo = jiffies + 2; + event->high = hi; + event->low = lo; + type = event_type(event); + switch (type) { + case CPTS_EV_PUSH: + case CPTS_EV_RX: + case CPTS_EV_TX: + list_del_init(&event->list); + list_add_tail(&event->list, &cpts->events); + break; + case CPTS_EV_ROLL: + case CPTS_EV_HALF: + case CPTS_EV_HW: + break; + default: + pr_err("cpts: unknown event type\n"); + break; + } + if (type == match) + break; + } + return type == match ? 0 : -1; +} + +static cycle_t cpts_systim_read(const struct cyclecounter *cc) +{ + u64 val = 0; + struct cpts_event *event; + struct list_head *this, *next; + struct cpts *cpts = container_of(cc, struct cpts, cc); + + cpts_write32(cpts, TS_PUSH, ts_push); + if (cpts_fifo_read(cpts, CPTS_EV_PUSH)) + pr_err("cpts: unable to obtain a time stamp\n"); + + list_for_each_safe(this, next, &cpts->events) { + event = list_entry(this, struct cpts_event, list); + if (event_type(event) == CPTS_EV_PUSH) { + list_del_init(&event->list); + list_add(&event->list, &cpts->pool); + val = event->low; + break; + } + } + + return val; +} + +/* PTP clock operations */ + +static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + u64 adj; + u32 diff, mult; + int neg_adj = 0; + unsigned long flags; + struct cpts *cpts = container_of(ptp, struct cpts, info); + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + mult = cpts->cc_mult; + adj = mult; + adj *= ppb; + diff = div_u64(adj, 1000000000ULL); + + spin_lock_irqsave(&cpts->lock, flags); + + timecounter_read(&cpts->tc); + + cpts->cc.mult = neg_adj ? mult - diff : mult + diff; + + spin_unlock_irqrestore(&cpts->lock, flags); + + return 0; +} + +static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + unsigned long flags; + struct cpts *cpts = container_of(ptp, struct cpts, info); + + spin_lock_irqsave(&cpts->lock, flags); + timecounter_adjtime(&cpts->tc, delta); + spin_unlock_irqrestore(&cpts->lock, flags); + + return 0; +} + +static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + u64 ns; + unsigned long flags; + struct cpts *cpts = container_of(ptp, struct cpts, info); + + spin_lock_irqsave(&cpts->lock, flags); + ns = timecounter_read(&cpts->tc); + spin_unlock_irqrestore(&cpts->lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int cpts_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + u64 ns; + unsigned long flags; + struct cpts *cpts = container_of(ptp, struct cpts, info); + + ns = timespec64_to_ns(ts); + + spin_lock_irqsave(&cpts->lock, flags); + timecounter_init(&cpts->tc, &cpts->cc, ns); + spin_unlock_irqrestore(&cpts->lock, flags); + + return 0; +} + +static int cpts_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +static struct ptp_clock_info cpts_info = { + .owner = THIS_MODULE, + .name = "CTPS timer", + .max_adj = 1000000, + .n_ext_ts = 0, + .n_pins = 0, + .pps = 0, + .adjfreq = cpts_ptp_adjfreq, + .adjtime = cpts_ptp_adjtime, + .gettime64 = cpts_ptp_gettime, + .settime64 = cpts_ptp_settime, + .enable = cpts_ptp_enable, +}; + +static void cpts_overflow_check(struct work_struct *work) +{ + struct timespec64 ts; + struct cpts *cpts = container_of(work, struct cpts, overflow_work.work); + + cpts_write32(cpts, CPTS_EN, control); + cpts_write32(cpts, TS_PEND_EN, int_enable); + cpts_ptp_gettime(&cpts->info, &ts); + pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec); + schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD); +} + +static void cpts_clk_init(struct device *dev, struct cpts *cpts) +{ + cpts->refclk = devm_clk_get(dev, "cpts"); + if (IS_ERR(cpts->refclk)) { + dev_err(dev, "Failed to get cpts refclk\n"); + cpts->refclk = NULL; + return; + } + clk_prepare_enable(cpts->refclk); +} + +static void cpts_clk_release(struct cpts *cpts) +{ + clk_disable(cpts->refclk); +} + +static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, + u16 ts_seqid, u8 ts_msgtype) +{ + u16 *seqid; + unsigned int offset = 0; + u8 *msgtype, *data = skb->data; + + if (ptp_class & PTP_CLASS_VLAN) + offset += VLAN_HLEN; + + switch (ptp_class & PTP_CLASS_PMASK) { + case PTP_CLASS_IPV4: + offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; + break; + case PTP_CLASS_IPV6: + offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; + break; + case PTP_CLASS_L2: + offset += ETH_HLEN; + break; + default: + return 0; + } + + if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid)) + return 0; + + if (unlikely(ptp_class & PTP_CLASS_V1)) + msgtype = data + offset + OFF_PTP_CONTROL; + else + msgtype = data + offset; + + seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID); + + return (ts_msgtype == (*msgtype & 0xf) && ts_seqid == ntohs(*seqid)); +} + +static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type) +{ + u64 ns = 0; + struct cpts_event *event; + struct list_head *this, *next; + unsigned int class = ptp_classify_raw(skb); + unsigned long flags; + u16 seqid; + u8 mtype; + + if (class == PTP_CLASS_NONE) + return 0; + + spin_lock_irqsave(&cpts->lock, flags); + cpts_fifo_read(cpts, CPTS_EV_PUSH); + list_for_each_safe(this, next, &cpts->events) { + event = list_entry(this, struct cpts_event, list); + if (event_expired(event)) { + list_del_init(&event->list); + list_add(&event->list, &cpts->pool); + continue; + } + mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK; + seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK; + if (ev_type == event_type(event) && + cpts_match(skb, class, seqid, mtype)) { + ns = timecounter_cyc2time(&cpts->tc, event->low); + list_del_init(&event->list); + list_add(&event->list, &cpts->pool); + break; + } + } + spin_unlock_irqrestore(&cpts->lock, flags); + + return ns; +} + +void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb) +{ + u64 ns; + struct skb_shared_hwtstamps *ssh; + + if (!cpts->rx_enable) + return; + ns = cpts_find_ts(cpts, skb, CPTS_EV_RX); + if (!ns) + return; + ssh = skb_hwtstamps(skb); + memset(ssh, 0, sizeof(*ssh)); + ssh->hwtstamp = ns_to_ktime(ns); +} + +void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb) +{ + u64 ns; + struct skb_shared_hwtstamps ssh; + + if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) + return; + ns = cpts_find_ts(cpts, skb, CPTS_EV_TX); + if (!ns) + return; + memset(&ssh, 0, sizeof(ssh)); + ssh.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &ssh); +} + +#endif /*CONFIG_TI_CPTS*/ + +int cpts_register(struct device *dev, struct cpts *cpts, + u32 mult, u32 shift) +{ +#ifdef CONFIG_TI_CPTS + int err, i; + unsigned long flags; + + cpts->info = cpts_info; + cpts->clock = ptp_clock_register(&cpts->info, dev); + if (IS_ERR(cpts->clock)) { + err = PTR_ERR(cpts->clock); + cpts->clock = NULL; + return err; + } + spin_lock_init(&cpts->lock); + + cpts->cc.read = cpts_systim_read; + cpts->cc.mask = CLOCKSOURCE_MASK(32); + cpts->cc_mult = mult; + cpts->cc.mult = mult; + cpts->cc.shift = shift; + + INIT_LIST_HEAD(&cpts->events); + INIT_LIST_HEAD(&cpts->pool); + for (i = 0; i < CPTS_MAX_EVENTS; i++) + list_add(&cpts->pool_data[i].list, &cpts->pool); + + cpts_clk_init(dev, cpts); + cpts_write32(cpts, CPTS_EN, control); + cpts_write32(cpts, TS_PEND_EN, int_enable); + + spin_lock_irqsave(&cpts->lock, flags); + timecounter_init(&cpts->tc, &cpts->cc, ktime_to_ns(ktime_get_real())); + spin_unlock_irqrestore(&cpts->lock, flags); + + INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check); + schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD); + + cpts->phc_index = ptp_clock_index(cpts->clock); +#endif + return 0; +} + +void cpts_unregister(struct cpts *cpts) +{ +#ifdef CONFIG_TI_CPTS + if (cpts->clock) { + ptp_clock_unregister(cpts->clock); + cancel_delayed_work_sync(&cpts->overflow_work); + } + if (cpts->refclk) + cpts_clk_release(cpts); +#endif +} diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h new file mode 100644 index 000000000..69a46b92c --- /dev/null +++ b/drivers/net/ethernet/ti/cpts.h @@ -0,0 +1,145 @@ +/* + * TI Common Platform Time Sync + * + * Copyright (C) 2012 Richard Cochran + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef _TI_CPTS_H_ +#define _TI_CPTS_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct cpsw_cpts { + u32 idver; /* Identification and version */ + u32 control; /* Time sync control */ + u32 res1; + u32 ts_push; /* Time stamp event push */ + u32 ts_load_val; /* Time stamp load value */ + u32 ts_load_en; /* Time stamp load enable */ + u32 res2[2]; + u32 intstat_raw; /* Time sync interrupt status raw */ + u32 intstat_masked; /* Time sync interrupt status masked */ + u32 int_enable; /* Time sync interrupt enable */ + u32 res3; + u32 event_pop; /* Event interrupt pop */ + u32 event_low; /* 32 Bit Event Time Stamp */ + u32 event_high; /* Event Type Fields */ +}; + +/* Bit definitions for the IDVER register */ +#define TX_IDENT_SHIFT (16) /* TX Identification Value */ +#define TX_IDENT_MASK (0xffff) +#define RTL_VER_SHIFT (11) /* RTL Version Value */ +#define RTL_VER_MASK (0x1f) +#define MAJOR_VER_SHIFT (8) /* Major Version Value */ +#define MAJOR_VER_MASK (0x7) +#define MINOR_VER_SHIFT (0) /* Minor Version Value */ +#define MINOR_VER_MASK (0xff) + +/* Bit definitions for the CONTROL register */ +#define HW4_TS_PUSH_EN (1<<11) /* Hardware push 4 enable */ +#define HW3_TS_PUSH_EN (1<<10) /* Hardware push 3 enable */ +#define HW2_TS_PUSH_EN (1<<9) /* Hardware push 2 enable */ +#define HW1_TS_PUSH_EN (1<<8) /* Hardware push 1 enable */ +#define INT_TEST (1<<1) /* Interrupt Test */ +#define CPTS_EN (1<<0) /* Time Sync Enable */ + +/* + * Definitions for the single bit resisters: + * TS_PUSH TS_LOAD_EN INTSTAT_RAW INTSTAT_MASKED INT_ENABLE EVENT_POP + */ +#define TS_PUSH (1<<0) /* Time stamp event push */ +#define TS_LOAD_EN (1<<0) /* Time Stamp Load */ +#define TS_PEND_RAW (1<<0) /* int read (before enable) */ +#define TS_PEND (1<<0) /* masked interrupt read (after enable) */ +#define TS_PEND_EN (1<<0) /* masked interrupt enable */ +#define EVENT_POP (1<<0) /* writing discards one event */ + +/* Bit definitions for the EVENT_HIGH register */ +#define PORT_NUMBER_SHIFT (24) /* Indicates Ethernet port or HW pin */ +#define PORT_NUMBER_MASK (0x1f) +#define EVENT_TYPE_SHIFT (20) /* Time sync event type */ +#define EVENT_TYPE_MASK (0xf) +#define MESSAGE_TYPE_SHIFT (16) /* PTP message type */ +#define MESSAGE_TYPE_MASK (0xf) +#define SEQUENCE_ID_SHIFT (0) /* PTP message sequence ID */ +#define SEQUENCE_ID_MASK (0xffff) + +enum { + CPTS_EV_PUSH, /* Time Stamp Push Event */ + CPTS_EV_ROLL, /* Time Stamp Rollover Event */ + CPTS_EV_HALF, /* Time Stamp Half Rollover Event */ + CPTS_EV_HW, /* Hardware Time Stamp Push Event */ + CPTS_EV_RX, /* Ethernet Receive Event */ + CPTS_EV_TX, /* Ethernet Transmit Event */ +}; + +/* This covers any input clock up to about 500 MHz. */ +#define CPTS_OVERFLOW_PERIOD (HZ * 8) + +#define CPTS_FIFO_DEPTH 16 +#define CPTS_MAX_EVENTS 32 + +struct cpts_event { + struct list_head list; + unsigned long tmo; + u32 high; + u32 low; +}; + +struct cpts { + struct cpsw_cpts __iomem *reg; + int tx_enable; + int rx_enable; +#ifdef CONFIG_TI_CPTS + struct ptp_clock_info info; + struct ptp_clock *clock; + spinlock_t lock; /* protects time registers */ + u32 cc_mult; /* for the nominal frequency */ + struct cyclecounter cc; + struct timecounter tc; + struct delayed_work overflow_work; + int phc_index; + struct clk *refclk; + struct list_head events; + struct list_head pool; + struct cpts_event pool_data[CPTS_MAX_EVENTS]; +#endif +}; + +#ifdef CONFIG_TI_CPTS +void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb); +void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb); +#else +static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb) +{ +} +static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb) +{ +} +#endif + +int cpts_register(struct device *dev, struct cpts *cpts, u32 mult, u32 shift); +void cpts_unregister(struct cpts *cpts); + +#endif diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c new file mode 100644 index 000000000..657b65bf5 --- /dev/null +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -0,0 +1,1038 @@ +/* + * Texas Instruments CPDMA Driver + * + * Copyright (C) 2010 Texas Instruments + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "davinci_cpdma.h" + +/* DMA Registers */ +#define CPDMA_TXIDVER 0x00 +#define CPDMA_TXCONTROL 0x04 +#define CPDMA_TXTEARDOWN 0x08 +#define CPDMA_RXIDVER 0x10 +#define CPDMA_RXCONTROL 0x14 +#define CPDMA_SOFTRESET 0x1c +#define CPDMA_RXTEARDOWN 0x18 +#define CPDMA_TXINTSTATRAW 0x80 +#define CPDMA_TXINTSTATMASKED 0x84 +#define CPDMA_TXINTMASKSET 0x88 +#define CPDMA_TXINTMASKCLEAR 0x8c +#define CPDMA_MACINVECTOR 0x90 +#define CPDMA_MACEOIVECTOR 0x94 +#define CPDMA_RXINTSTATRAW 0xa0 +#define CPDMA_RXINTSTATMASKED 0xa4 +#define CPDMA_RXINTMASKSET 0xa8 +#define CPDMA_RXINTMASKCLEAR 0xac +#define CPDMA_DMAINTSTATRAW 0xb0 +#define CPDMA_DMAINTSTATMASKED 0xb4 +#define CPDMA_DMAINTMASKSET 0xb8 +#define CPDMA_DMAINTMASKCLEAR 0xbc +#define CPDMA_DMAINT_HOSTERR BIT(1) + +/* the following exist only if has_ext_regs is set */ +#define CPDMA_DMACONTROL 0x20 +#define CPDMA_DMASTATUS 0x24 +#define CPDMA_RXBUFFOFS 0x28 +#define CPDMA_EM_CONTROL 0x2c + +/* Descriptor mode bits */ +#define CPDMA_DESC_SOP BIT(31) +#define CPDMA_DESC_EOP BIT(30) +#define CPDMA_DESC_OWNER BIT(29) +#define CPDMA_DESC_EOQ BIT(28) +#define CPDMA_DESC_TD_COMPLETE BIT(27) +#define CPDMA_DESC_PASS_CRC BIT(26) +#define CPDMA_DESC_TO_PORT_EN BIT(20) +#define CPDMA_TO_PORT_SHIFT 16 +#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16)) +#define CPDMA_DESC_CRC_LEN 4 + +#define CPDMA_TEARDOWN_VALUE 0xfffffffc + +struct cpdma_desc { + /* hardware fields */ + u32 hw_next; + u32 hw_buffer; + u32 hw_len; + u32 hw_mode; + /* software fields */ + void *sw_token; + u32 sw_buffer; + u32 sw_len; +}; + +struct cpdma_desc_pool { + phys_addr_t phys; + u32 hw_addr; + void __iomem *iomap; /* ioremap map */ + void *cpumap; /* dma_alloc map */ + int desc_size, mem_size; + int num_desc, used_desc; + unsigned long *bitmap; + struct device *dev; + spinlock_t lock; +}; + +enum cpdma_state { + CPDMA_STATE_IDLE, + CPDMA_STATE_ACTIVE, + CPDMA_STATE_TEARDOWN, +}; + +static const char *cpdma_state_str[] = { "idle", "active", "teardown" }; + +struct cpdma_ctlr { + enum cpdma_state state; + struct cpdma_params params; + struct device *dev; + struct cpdma_desc_pool *pool; + spinlock_t lock; + struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS]; +}; + +struct cpdma_chan { + struct cpdma_desc __iomem *head, *tail; + void __iomem *hdp, *cp, *rxfree; + enum cpdma_state state; + struct cpdma_ctlr *ctlr; + int chan_num; + spinlock_t lock; + int count; + u32 mask; + cpdma_handler_fn handler; + enum dma_data_direction dir; + struct cpdma_chan_stats stats; + /* offsets into dmaregs */ + int int_set, int_clear, td; +}; + +/* The following make access to common cpdma_ctlr params more readable */ +#define dmaregs params.dmaregs +#define num_chan params.num_chan + +/* various accessors */ +#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs)) +#define chan_read(chan, fld) __raw_readl((chan)->fld) +#define desc_read(desc, fld) __raw_readl(&(desc)->fld) +#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs)) +#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld) +#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld) + +#define cpdma_desc_to_port(chan, mode, directed) \ + do { \ + if (!is_rx_chan(chan) && ((directed == 1) || \ + (directed == 2))) \ + mode |= (CPDMA_DESC_TO_PORT_EN | \ + (directed << CPDMA_TO_PORT_SHIFT)); \ + } while (0) + +/* + * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci + * emac) have dedicated on-chip memory for these descriptors. Some other + * devices (e.g. cpsw switches) use plain old memory. Descriptor pools + * abstract out these details + */ +static struct cpdma_desc_pool * +cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, + int size, int align) +{ + int bitmap_size; + struct cpdma_desc_pool *pool; + + pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); + if (!pool) + goto fail; + + spin_lock_init(&pool->lock); + + pool->dev = dev; + pool->mem_size = size; + pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); + pool->num_desc = size / pool->desc_size; + + bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); + pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL); + if (!pool->bitmap) + goto fail; + + if (phys) { + pool->phys = phys; + pool->iomap = ioremap(phys, size); + pool->hw_addr = hw_addr; + } else { + pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, + GFP_KERNEL); + pool->iomap = pool->cpumap; + pool->hw_addr = pool->phys; + } + + if (pool->iomap) + return pool; +fail: + return NULL; +} + +static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) +{ + if (!pool) + return; + + WARN_ON(pool->used_desc); + if (pool->cpumap) { + dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, + pool->phys); + } else { + iounmap(pool->iomap); + } +} + +static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, + struct cpdma_desc __iomem *desc) +{ + if (!desc) + return 0; + return pool->hw_addr + (__force long)desc - (__force long)pool->iomap; +} + +static inline struct cpdma_desc __iomem * +desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) +{ + return dma ? pool->iomap + dma - pool->hw_addr : NULL; +} + +static struct cpdma_desc __iomem * +cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx) +{ + unsigned long flags; + int index; + int desc_start; + int desc_end; + struct cpdma_desc __iomem *desc = NULL; + + spin_lock_irqsave(&pool->lock, flags); + + if (is_rx) { + desc_start = 0; + desc_end = pool->num_desc/2; + } else { + desc_start = pool->num_desc/2; + desc_end = pool->num_desc; + } + + index = bitmap_find_next_zero_area(pool->bitmap, + desc_end, desc_start, num_desc, 0); + if (index < desc_end) { + bitmap_set(pool->bitmap, index, num_desc); + desc = pool->iomap + pool->desc_size * index; + pool->used_desc++; + } + + spin_unlock_irqrestore(&pool->lock, flags); + return desc; +} + +static void cpdma_desc_free(struct cpdma_desc_pool *pool, + struct cpdma_desc __iomem *desc, int num_desc) +{ + unsigned long flags, index; + + index = ((unsigned long)desc - (unsigned long)pool->iomap) / + pool->desc_size; + spin_lock_irqsave(&pool->lock, flags); + bitmap_clear(pool->bitmap, index, num_desc); + pool->used_desc--; + spin_unlock_irqrestore(&pool->lock, flags); +} + +struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) +{ + struct cpdma_ctlr *ctlr; + + ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL); + if (!ctlr) + return NULL; + + ctlr->state = CPDMA_STATE_IDLE; + ctlr->params = *params; + ctlr->dev = params->dev; + spin_lock_init(&ctlr->lock); + + ctlr->pool = cpdma_desc_pool_create(ctlr->dev, + ctlr->params.desc_mem_phys, + ctlr->params.desc_hw_addr, + ctlr->params.desc_mem_size, + ctlr->params.desc_align); + if (!ctlr->pool) + return NULL; + + if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) + ctlr->num_chan = CPDMA_MAX_CHANNELS; + return ctlr; +} +EXPORT_SYMBOL_GPL(cpdma_ctlr_create); + +int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&ctlr->lock, flags); + if (ctlr->state != CPDMA_STATE_IDLE) { + spin_unlock_irqrestore(&ctlr->lock, flags); + return -EBUSY; + } + + if (ctlr->params.has_soft_reset) { + unsigned timeout = 10 * 100; + + dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); + while (timeout) { + if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) + break; + udelay(10); + timeout--; + } + WARN_ON(!timeout); + } + + for (i = 0; i < ctlr->num_chan; i++) { + __raw_writel(0, ctlr->params.txhdp + 4 * i); + __raw_writel(0, ctlr->params.rxhdp + 4 * i); + __raw_writel(0, ctlr->params.txcp + 4 * i); + __raw_writel(0, ctlr->params.rxcp + 4 * i); + } + + dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); + dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); + + dma_reg_write(ctlr, CPDMA_TXCONTROL, 1); + dma_reg_write(ctlr, CPDMA_RXCONTROL, 1); + + ctlr->state = CPDMA_STATE_ACTIVE; + + for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { + if (ctlr->channels[i]) + cpdma_chan_start(ctlr->channels[i]); + } + spin_unlock_irqrestore(&ctlr->lock, flags); + return 0; +} +EXPORT_SYMBOL_GPL(cpdma_ctlr_start); + +int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&ctlr->lock, flags); + if (ctlr->state == CPDMA_STATE_TEARDOWN) { + spin_unlock_irqrestore(&ctlr->lock, flags); + return -EINVAL; + } + + ctlr->state = CPDMA_STATE_TEARDOWN; + + for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { + if (ctlr->channels[i]) + cpdma_chan_stop(ctlr->channels[i]); + } + + dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); + dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); + + dma_reg_write(ctlr, CPDMA_TXCONTROL, 0); + dma_reg_write(ctlr, CPDMA_RXCONTROL, 0); + + ctlr->state = CPDMA_STATE_IDLE; + + spin_unlock_irqrestore(&ctlr->lock, flags); + return 0; +} +EXPORT_SYMBOL_GPL(cpdma_ctlr_stop); + +int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr) +{ + struct device *dev = ctlr->dev; + unsigned long flags; + int i; + + spin_lock_irqsave(&ctlr->lock, flags); + + dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]); + + dev_info(dev, "CPDMA: txidver: %x", + dma_reg_read(ctlr, CPDMA_TXIDVER)); + dev_info(dev, "CPDMA: txcontrol: %x", + dma_reg_read(ctlr, CPDMA_TXCONTROL)); + dev_info(dev, "CPDMA: txteardown: %x", + dma_reg_read(ctlr, CPDMA_TXTEARDOWN)); + dev_info(dev, "CPDMA: rxidver: %x", + dma_reg_read(ctlr, CPDMA_RXIDVER)); + dev_info(dev, "CPDMA: rxcontrol: %x", + dma_reg_read(ctlr, CPDMA_RXCONTROL)); + dev_info(dev, "CPDMA: softreset: %x", + dma_reg_read(ctlr, CPDMA_SOFTRESET)); + dev_info(dev, "CPDMA: rxteardown: %x", + dma_reg_read(ctlr, CPDMA_RXTEARDOWN)); + dev_info(dev, "CPDMA: txintstatraw: %x", + dma_reg_read(ctlr, CPDMA_TXINTSTATRAW)); + dev_info(dev, "CPDMA: txintstatmasked: %x", + dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED)); + dev_info(dev, "CPDMA: txintmaskset: %x", + dma_reg_read(ctlr, CPDMA_TXINTMASKSET)); + dev_info(dev, "CPDMA: txintmaskclear: %x", + dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR)); + dev_info(dev, "CPDMA: macinvector: %x", + dma_reg_read(ctlr, CPDMA_MACINVECTOR)); + dev_info(dev, "CPDMA: maceoivector: %x", + dma_reg_read(ctlr, CPDMA_MACEOIVECTOR)); + dev_info(dev, "CPDMA: rxintstatraw: %x", + dma_reg_read(ctlr, CPDMA_RXINTSTATRAW)); + dev_info(dev, "CPDMA: rxintstatmasked: %x", + dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED)); + dev_info(dev, "CPDMA: rxintmaskset: %x", + dma_reg_read(ctlr, CPDMA_RXINTMASKSET)); + dev_info(dev, "CPDMA: rxintmaskclear: %x", + dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR)); + dev_info(dev, "CPDMA: dmaintstatraw: %x", + dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW)); + dev_info(dev, "CPDMA: dmaintstatmasked: %x", + dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED)); + dev_info(dev, "CPDMA: dmaintmaskset: %x", + dma_reg_read(ctlr, CPDMA_DMAINTMASKSET)); + dev_info(dev, "CPDMA: dmaintmaskclear: %x", + dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR)); + + if (!ctlr->params.has_ext_regs) { + dev_info(dev, "CPDMA: dmacontrol: %x", + dma_reg_read(ctlr, CPDMA_DMACONTROL)); + dev_info(dev, "CPDMA: dmastatus: %x", + dma_reg_read(ctlr, CPDMA_DMASTATUS)); + dev_info(dev, "CPDMA: rxbuffofs: %x", + dma_reg_read(ctlr, CPDMA_RXBUFFOFS)); + } + + for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) + if (ctlr->channels[i]) + cpdma_chan_dump(ctlr->channels[i]); + + spin_unlock_irqrestore(&ctlr->lock, flags); + return 0; +} +EXPORT_SYMBOL_GPL(cpdma_ctlr_dump); + +int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) +{ + unsigned long flags; + int ret = 0, i; + + if (!ctlr) + return -EINVAL; + + spin_lock_irqsave(&ctlr->lock, flags); + if (ctlr->state != CPDMA_STATE_IDLE) + cpdma_ctlr_stop(ctlr); + + for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) + cpdma_chan_destroy(ctlr->channels[i]); + + cpdma_desc_pool_destroy(ctlr->pool); + spin_unlock_irqrestore(&ctlr->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); + +int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) +{ + unsigned long flags; + int i, reg; + + spin_lock_irqsave(&ctlr->lock, flags); + if (ctlr->state != CPDMA_STATE_ACTIVE) { + spin_unlock_irqrestore(&ctlr->lock, flags); + return -EINVAL; + } + + reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR; + dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR); + + for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { + if (ctlr->channels[i]) + cpdma_chan_int_ctrl(ctlr->channels[i], enable); + } + + spin_unlock_irqrestore(&ctlr->lock, flags); + return 0; +} +EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl); + +void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) +{ + dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); +} +EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi); + +struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, + cpdma_handler_fn handler) +{ + struct cpdma_chan *chan; + int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4; + unsigned long flags; + + if (__chan_linear(chan_num) >= ctlr->num_chan) + return NULL; + + chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL); + if (!chan) + return ERR_PTR(-ENOMEM); + + spin_lock_irqsave(&ctlr->lock, flags); + if (ctlr->channels[chan_num]) { + spin_unlock_irqrestore(&ctlr->lock, flags); + devm_kfree(ctlr->dev, chan); + return ERR_PTR(-EBUSY); + } + + chan->ctlr = ctlr; + chan->state = CPDMA_STATE_IDLE; + chan->chan_num = chan_num; + chan->handler = handler; + + if (is_rx_chan(chan)) { + chan->hdp = ctlr->params.rxhdp + offset; + chan->cp = ctlr->params.rxcp + offset; + chan->rxfree = ctlr->params.rxfree + offset; + chan->int_set = CPDMA_RXINTMASKSET; + chan->int_clear = CPDMA_RXINTMASKCLEAR; + chan->td = CPDMA_RXTEARDOWN; + chan->dir = DMA_FROM_DEVICE; + } else { + chan->hdp = ctlr->params.txhdp + offset; + chan->cp = ctlr->params.txcp + offset; + chan->int_set = CPDMA_TXINTMASKSET; + chan->int_clear = CPDMA_TXINTMASKCLEAR; + chan->td = CPDMA_TXTEARDOWN; + chan->dir = DMA_TO_DEVICE; + } + chan->mask = BIT(chan_linear(chan)); + + spin_lock_init(&chan->lock); + + ctlr->channels[chan_num] = chan; + spin_unlock_irqrestore(&ctlr->lock, flags); + return chan; +} +EXPORT_SYMBOL_GPL(cpdma_chan_create); + +int cpdma_chan_destroy(struct cpdma_chan *chan) +{ + struct cpdma_ctlr *ctlr; + unsigned long flags; + + if (!chan) + return -EINVAL; + ctlr = chan->ctlr; + + spin_lock_irqsave(&ctlr->lock, flags); + if (chan->state != CPDMA_STATE_IDLE) + cpdma_chan_stop(chan); + ctlr->channels[chan->chan_num] = NULL; + spin_unlock_irqrestore(&ctlr->lock, flags); + return 0; +} +EXPORT_SYMBOL_GPL(cpdma_chan_destroy); + +int cpdma_chan_get_stats(struct cpdma_chan *chan, + struct cpdma_chan_stats *stats) +{ + unsigned long flags; + if (!chan) + return -EINVAL; + spin_lock_irqsave(&chan->lock, flags); + memcpy(stats, &chan->stats, sizeof(*stats)); + spin_unlock_irqrestore(&chan->lock, flags); + return 0; +} +EXPORT_SYMBOL_GPL(cpdma_chan_get_stats); + +int cpdma_chan_dump(struct cpdma_chan *chan) +{ + unsigned long flags; + struct device *dev = chan->ctlr->dev; + + spin_lock_irqsave(&chan->lock, flags); + + dev_info(dev, "channel %d (%s %d) state %s", + chan->chan_num, is_rx_chan(chan) ? "rx" : "tx", + chan_linear(chan), cpdma_state_str[chan->state]); + dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp)); + dev_info(dev, "\tcp: %x\n", chan_read(chan, cp)); + if (chan->rxfree) { + dev_info(dev, "\trxfree: %x\n", + chan_read(chan, rxfree)); + } + + dev_info(dev, "\tstats head_enqueue: %d\n", + chan->stats.head_enqueue); + dev_info(dev, "\tstats tail_enqueue: %d\n", + chan->stats.tail_enqueue); + dev_info(dev, "\tstats pad_enqueue: %d\n", + chan->stats.pad_enqueue); + dev_info(dev, "\tstats misqueued: %d\n", + chan->stats.misqueued); + dev_info(dev, "\tstats desc_alloc_fail: %d\n", + chan->stats.desc_alloc_fail); + dev_info(dev, "\tstats pad_alloc_fail: %d\n", + chan->stats.pad_alloc_fail); + dev_info(dev, "\tstats runt_receive_buff: %d\n", + chan->stats.runt_receive_buff); + dev_info(dev, "\tstats runt_transmit_buff: %d\n", + chan->stats.runt_transmit_buff); + dev_info(dev, "\tstats empty_dequeue: %d\n", + chan->stats.empty_dequeue); + dev_info(dev, "\tstats busy_dequeue: %d\n", + chan->stats.busy_dequeue); + dev_info(dev, "\tstats good_dequeue: %d\n", + chan->stats.good_dequeue); + dev_info(dev, "\tstats requeue: %d\n", + chan->stats.requeue); + dev_info(dev, "\tstats teardown_dequeue: %d\n", + chan->stats.teardown_dequeue); + + spin_unlock_irqrestore(&chan->lock, flags); + return 0; +} + +static void __cpdma_chan_submit(struct cpdma_chan *chan, + struct cpdma_desc __iomem *desc) +{ + struct cpdma_ctlr *ctlr = chan->ctlr; + struct cpdma_desc __iomem *prev = chan->tail; + struct cpdma_desc_pool *pool = ctlr->pool; + dma_addr_t desc_dma; + u32 mode; + + desc_dma = desc_phys(pool, desc); + + /* simple case - idle channel */ + if (!chan->head) { + chan->stats.head_enqueue++; + chan->head = desc; + chan->tail = desc; + if (chan->state == CPDMA_STATE_ACTIVE) + chan_write(chan, hdp, desc_dma); + return; + } + + /* first chain the descriptor at the tail of the list */ + desc_write(prev, hw_next, desc_dma); + chan->tail = desc; + chan->stats.tail_enqueue++; + + /* next check if EOQ has been triggered already */ + mode = desc_read(prev, hw_mode); + if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) && + (chan->state == CPDMA_STATE_ACTIVE)) { + desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ); + chan_write(chan, hdp, desc_dma); + chan->stats.misqueued++; + } +} + +int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, + int len, int directed) +{ + struct cpdma_ctlr *ctlr = chan->ctlr; + struct cpdma_desc __iomem *desc; + dma_addr_t buffer; + unsigned long flags; + u32 mode; + int ret = 0; + + spin_lock_irqsave(&chan->lock, flags); + + if (chan->state == CPDMA_STATE_TEARDOWN) { + ret = -EINVAL; + goto unlock_ret; + } + + desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan)); + if (!desc) { + chan->stats.desc_alloc_fail++; + ret = -ENOMEM; + goto unlock_ret; + } + + if (len < ctlr->params.min_packet_size) { + len = ctlr->params.min_packet_size; + chan->stats.runt_transmit_buff++; + } + + buffer = dma_map_single(ctlr->dev, data, len, chan->dir); + ret = dma_mapping_error(ctlr->dev, buffer); + if (ret) { + cpdma_desc_free(ctlr->pool, desc, 1); + ret = -EINVAL; + goto unlock_ret; + } + + mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; + cpdma_desc_to_port(chan, mode, directed); + + desc_write(desc, hw_next, 0); + desc_write(desc, hw_buffer, buffer); + desc_write(desc, hw_len, len); + desc_write(desc, hw_mode, mode | len); + desc_write(desc, sw_token, token); + desc_write(desc, sw_buffer, buffer); + desc_write(desc, sw_len, len); + + __cpdma_chan_submit(chan, desc); + + if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree) + chan_write(chan, rxfree, 1); + + chan->count++; + +unlock_ret: + spin_unlock_irqrestore(&chan->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(cpdma_chan_submit); + +bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) +{ + unsigned long flags; + int index; + bool ret; + struct cpdma_ctlr *ctlr = chan->ctlr; + struct cpdma_desc_pool *pool = ctlr->pool; + + spin_lock_irqsave(&pool->lock, flags); + + index = bitmap_find_next_zero_area(pool->bitmap, + pool->num_desc, pool->num_desc/2, 1, 0); + + if (index < pool->num_desc) + ret = true; + else + ret = false; + + spin_unlock_irqrestore(&pool->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); + +static void __cpdma_chan_free(struct cpdma_chan *chan, + struct cpdma_desc __iomem *desc, + int outlen, int status) +{ + struct cpdma_ctlr *ctlr = chan->ctlr; + struct cpdma_desc_pool *pool = ctlr->pool; + dma_addr_t buff_dma; + int origlen; + void *token; + + token = (void *)desc_read(desc, sw_token); + buff_dma = desc_read(desc, sw_buffer); + origlen = desc_read(desc, sw_len); + + dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); + cpdma_desc_free(pool, desc, 1); + (*chan->handler)(token, outlen, status); +} + +static int __cpdma_chan_process(struct cpdma_chan *chan) +{ + struct cpdma_ctlr *ctlr = chan->ctlr; + struct cpdma_desc __iomem *desc; + int status, outlen; + int cb_status = 0; + struct cpdma_desc_pool *pool = ctlr->pool; + dma_addr_t desc_dma; + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + + desc = chan->head; + if (!desc) { + chan->stats.empty_dequeue++; + status = -ENOENT; + goto unlock_ret; + } + desc_dma = desc_phys(pool, desc); + + status = __raw_readl(&desc->hw_mode); + outlen = status & 0x7ff; + if (status & CPDMA_DESC_OWNER) { + chan->stats.busy_dequeue++; + status = -EBUSY; + goto unlock_ret; + } + + if (status & CPDMA_DESC_PASS_CRC) + outlen -= CPDMA_DESC_CRC_LEN; + + status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | + CPDMA_DESC_PORT_MASK); + + chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); + chan_write(chan, cp, desc_dma); + chan->count--; + chan->stats.good_dequeue++; + + if (status & CPDMA_DESC_EOQ) { + chan->stats.requeue++; + chan_write(chan, hdp, desc_phys(pool, chan->head)); + } + + spin_unlock_irqrestore(&chan->lock, flags); + if (unlikely(status & CPDMA_DESC_TD_COMPLETE)) + cb_status = -ENOSYS; + else + cb_status = status; + + __cpdma_chan_free(chan, desc, outlen, cb_status); + return status; + +unlock_ret: + spin_unlock_irqrestore(&chan->lock, flags); + return status; +} + +int cpdma_chan_process(struct cpdma_chan *chan, int quota) +{ + int used = 0, ret = 0; + + if (chan->state != CPDMA_STATE_ACTIVE) + return -EINVAL; + + while (used < quota) { + ret = __cpdma_chan_process(chan); + if (ret < 0) + break; + used++; + } + return used; +} +EXPORT_SYMBOL_GPL(cpdma_chan_process); + +int cpdma_chan_start(struct cpdma_chan *chan) +{ + struct cpdma_ctlr *ctlr = chan->ctlr; + struct cpdma_desc_pool *pool = ctlr->pool; + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + if (chan->state != CPDMA_STATE_IDLE) { + spin_unlock_irqrestore(&chan->lock, flags); + return -EBUSY; + } + if (ctlr->state != CPDMA_STATE_ACTIVE) { + spin_unlock_irqrestore(&chan->lock, flags); + return -EINVAL; + } + dma_reg_write(ctlr, chan->int_set, chan->mask); + chan->state = CPDMA_STATE_ACTIVE; + if (chan->head) { + chan_write(chan, hdp, desc_phys(pool, chan->head)); + if (chan->rxfree) + chan_write(chan, rxfree, chan->count); + } + + spin_unlock_irqrestore(&chan->lock, flags); + return 0; +} +EXPORT_SYMBOL_GPL(cpdma_chan_start); + +int cpdma_chan_stop(struct cpdma_chan *chan) +{ + struct cpdma_ctlr *ctlr = chan->ctlr; + struct cpdma_desc_pool *pool = ctlr->pool; + unsigned long flags; + int ret; + unsigned timeout; + + spin_lock_irqsave(&chan->lock, flags); + if (chan->state == CPDMA_STATE_TEARDOWN) { + spin_unlock_irqrestore(&chan->lock, flags); + return -EINVAL; + } + + chan->state = CPDMA_STATE_TEARDOWN; + dma_reg_write(ctlr, chan->int_clear, chan->mask); + + /* trigger teardown */ + dma_reg_write(ctlr, chan->td, chan_linear(chan)); + + /* wait for teardown complete */ + timeout = 100 * 100; /* 100 ms */ + while (timeout) { + u32 cp = chan_read(chan, cp); + if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) + break; + udelay(10); + timeout--; + } + WARN_ON(!timeout); + chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); + + /* handle completed packets */ + spin_unlock_irqrestore(&chan->lock, flags); + do { + ret = __cpdma_chan_process(chan); + if (ret < 0) + break; + } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); + spin_lock_irqsave(&chan->lock, flags); + + /* remaining packets haven't been tx/rx'ed, clean them up */ + while (chan->head) { + struct cpdma_desc __iomem *desc = chan->head; + dma_addr_t next_dma; + + next_dma = desc_read(desc, hw_next); + chan->head = desc_from_phys(pool, next_dma); + chan->count--; + chan->stats.teardown_dequeue++; + + /* issue callback without locks held */ + spin_unlock_irqrestore(&chan->lock, flags); + __cpdma_chan_free(chan, desc, 0, -ENOSYS); + spin_lock_irqsave(&chan->lock, flags); + } + + chan->state = CPDMA_STATE_IDLE; + spin_unlock_irqrestore(&chan->lock, flags); + return 0; +} +EXPORT_SYMBOL_GPL(cpdma_chan_stop); + +int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) +{ + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + if (chan->state != CPDMA_STATE_ACTIVE) { + spin_unlock_irqrestore(&chan->lock, flags); + return -EINVAL; + } + + dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear, + chan->mask); + spin_unlock_irqrestore(&chan->lock, flags); + + return 0; +} + +struct cpdma_control_info { + u32 reg; + u32 shift, mask; + int access; +#define ACCESS_RO BIT(0) +#define ACCESS_WO BIT(1) +#define ACCESS_RW (ACCESS_RO | ACCESS_WO) +}; + +static struct cpdma_control_info controls[] = { + [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO}, + [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW}, + [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW}, + [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW}, + [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW}, + [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO}, + [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW}, + [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW}, + [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW}, + [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW}, + [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW}, +}; + +int cpdma_control_get(struct cpdma_ctlr *ctlr, int control) +{ + unsigned long flags; + struct cpdma_control_info *info = &controls[control]; + int ret; + + spin_lock_irqsave(&ctlr->lock, flags); + + ret = -ENOTSUPP; + if (!ctlr->params.has_ext_regs) + goto unlock_ret; + + ret = -EINVAL; + if (ctlr->state != CPDMA_STATE_ACTIVE) + goto unlock_ret; + + ret = -ENOENT; + if (control < 0 || control >= ARRAY_SIZE(controls)) + goto unlock_ret; + + ret = -EPERM; + if ((info->access & ACCESS_RO) != ACCESS_RO) + goto unlock_ret; + + ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask; + +unlock_ret: + spin_unlock_irqrestore(&ctlr->lock, flags); + return ret; +} + +int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) +{ + unsigned long flags; + struct cpdma_control_info *info = &controls[control]; + int ret; + u32 val; + + spin_lock_irqsave(&ctlr->lock, flags); + + ret = -ENOTSUPP; + if (!ctlr->params.has_ext_regs) + goto unlock_ret; + + ret = -EINVAL; + if (ctlr->state != CPDMA_STATE_ACTIVE) + goto unlock_ret; + + ret = -ENOENT; + if (control < 0 || control >= ARRAY_SIZE(controls)) + goto unlock_ret; + + ret = -EPERM; + if ((info->access & ACCESS_WO) != ACCESS_WO) + goto unlock_ret; + + val = dma_reg_read(ctlr, info->reg); + val &= ~(info->mask << info->shift); + val |= (value & info->mask) << info->shift; + dma_reg_write(ctlr, info->reg, val); + ret = 0; + +unlock_ret: + spin_unlock_irqrestore(&ctlr->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(cpdma_control_set); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h new file mode 100644 index 000000000..86dee487f --- /dev/null +++ b/drivers/net/ethernet/ti/davinci_cpdma.h @@ -0,0 +1,117 @@ +/* + * Texas Instruments CPDMA Driver + * + * Copyright (C) 2010 Texas Instruments + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DAVINCI_CPDMA_H__ +#define __DAVINCI_CPDMA_H__ + +#define CPDMA_MAX_CHANNELS BITS_PER_LONG + +#define tx_chan_num(chan) (chan) +#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS) +#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS) +#define is_tx_chan(chan) (!is_rx_chan(chan)) +#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1)) +#define chan_linear(chan) __chan_linear((chan)->chan_num) + +#define CPDMA_RX_SOURCE_PORT(__status__) ((__status__ >> 16) & 0x7) + +#define CPDMA_EOI_RX_THRESH 0x0 +#define CPDMA_EOI_RX 0x1 +#define CPDMA_EOI_TX 0x2 +#define CPDMA_EOI_MISC 0x3 + +struct cpdma_params { + struct device *dev; + void __iomem *dmaregs; + void __iomem *txhdp, *rxhdp, *txcp, *rxcp; + void __iomem *rxthresh, *rxfree; + int num_chan; + bool has_soft_reset; + int min_packet_size; + u32 desc_mem_phys; + u32 desc_hw_addr; + int desc_mem_size; + int desc_align; + + /* + * Some instances of embedded cpdma controllers have extra control and + * status registers. The following flag enables access to these + * "extended" registers. + */ + bool has_ext_regs; +}; + +struct cpdma_chan_stats { + u32 head_enqueue; + u32 tail_enqueue; + u32 pad_enqueue; + u32 misqueued; + u32 desc_alloc_fail; + u32 pad_alloc_fail; + u32 runt_receive_buff; + u32 runt_transmit_buff; + u32 empty_dequeue; + u32 busy_dequeue; + u32 good_dequeue; + u32 requeue; + u32 teardown_dequeue; +}; + +struct cpdma_ctlr; +struct cpdma_chan; + +typedef void (*cpdma_handler_fn)(void *token, int len, int status); + +struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params); +int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr); +int cpdma_ctlr_start(struct cpdma_ctlr *ctlr); +int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr); +int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr); + +struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, + cpdma_handler_fn handler); +int cpdma_chan_destroy(struct cpdma_chan *chan); +int cpdma_chan_start(struct cpdma_chan *chan); +int cpdma_chan_stop(struct cpdma_chan *chan); +int cpdma_chan_dump(struct cpdma_chan *chan); + +int cpdma_chan_get_stats(struct cpdma_chan *chan, + struct cpdma_chan_stats *stats); +int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, + int len, int directed); +int cpdma_chan_process(struct cpdma_chan *chan, int quota); + +int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable); +void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value); +int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable); +bool cpdma_check_free_tx_desc(struct cpdma_chan *chan); + +enum cpdma_control { + CPDMA_CMD_IDLE, /* write-only */ + CPDMA_COPY_ERROR_FRAMES, /* read-write */ + CPDMA_RX_OFF_LEN_UPDATE, /* read-write */ + CPDMA_RX_OWNERSHIP_FLIP, /* read-write */ + CPDMA_TX_PRIO_FIXED, /* read-write */ + CPDMA_STAT_IDLE, /* read-only */ + CPDMA_STAT_TX_ERR_CHAN, /* read-only */ + CPDMA_STAT_TX_ERR_CODE, /* read-only */ + CPDMA_STAT_RX_ERR_CHAN, /* read-only */ + CPDMA_STAT_RX_ERR_CODE, /* read-only */ + CPDMA_RX_BUFFER_OFFSET, /* read-write */ +}; + +int cpdma_control_get(struct cpdma_ctlr *ctlr, int control); +int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value); + +#endif diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c new file mode 100644 index 000000000..aeebc0a7b --- /dev/null +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -0,0 +1,2228 @@ +/* + * DaVinci Ethernet Medium Access Controller + * + * DaVinci EMAC is based upon CPPI 3.0 TI DMA engine + * + * Copyright (C) 2009 Texas Instruments. + * + * --------------------------------------------------------------------------- + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * --------------------------------------------------------------------------- + * History: + * 0-5 A number of folks worked on this driver in bits and pieces but the major + * contribution came from Suraj Iyer and Anant Gole + * 6.0 Anant Gole - rewrote the driver as per Linux conventions + * 6.1 Chaithrika U S - added support for Gigabit and RMII features, + * PHY layer usage + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "cpsw.h" +#include "davinci_cpdma.h" + +static int debug_level; +module_param(debug_level, int, 0); +MODULE_PARM_DESC(debug_level, "DaVinci EMAC debug level (NETIF_MSG bits)"); + +/* Netif debug messages possible */ +#define DAVINCI_EMAC_DEBUG (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR | \ + NETIF_MSG_TX_QUEUED | \ + NETIF_MSG_INTR | \ + NETIF_MSG_TX_DONE | \ + NETIF_MSG_RX_STATUS | \ + NETIF_MSG_PKTDATA | \ + NETIF_MSG_HW | \ + NETIF_MSG_WOL) + +/* version info */ +#define EMAC_MAJOR_VERSION 6 +#define EMAC_MINOR_VERSION 1 +#define EMAC_MODULE_VERSION "6.1" +MODULE_VERSION(EMAC_MODULE_VERSION); +static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; + +/* Configuration items */ +#define EMAC_DEF_PASS_CRC (0) /* Do not pass CRC up to frames */ +#define EMAC_DEF_QOS_EN (0) /* EMAC proprietary QoS disabled */ +#define EMAC_DEF_NO_BUFF_CHAIN (0) /* No buffer chain */ +#define EMAC_DEF_MACCTRL_FRAME_EN (0) /* Discard Maccontrol frames */ +#define EMAC_DEF_SHORT_FRAME_EN (0) /* Discard short frames */ +#define EMAC_DEF_ERROR_FRAME_EN (0) /* Discard error frames */ +#define EMAC_DEF_PROM_EN (0) /* Promiscuous disabled */ +#define EMAC_DEF_PROM_CH (0) /* Promiscuous channel is 0 */ +#define EMAC_DEF_BCAST_EN (1) /* Broadcast enabled */ +#define EMAC_DEF_BCAST_CH (0) /* Broadcast channel is 0 */ +#define EMAC_DEF_MCAST_EN (1) /* Multicast enabled */ +#define EMAC_DEF_MCAST_CH (0) /* Multicast channel is 0 */ + +#define EMAC_DEF_TXPRIO_FIXED (1) /* TX Priority is fixed */ +#define EMAC_DEF_TXPACING_EN (0) /* TX pacing NOT supported*/ + +#define EMAC_DEF_BUFFER_OFFSET (0) /* Buffer offset to DMA (future) */ +#define EMAC_DEF_MIN_ETHPKTSIZE (60) /* Minimum ethernet pkt size */ +#define EMAC_DEF_MAX_FRAME_SIZE (1500 + 14 + 4 + 4) +#define EMAC_DEF_TX_CH (0) /* Default 0th channel */ +#define EMAC_DEF_RX_CH (0) /* Default 0th channel */ +#define EMAC_DEF_RX_NUM_DESC (128) +#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */ +#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */ +#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */ + +/* Buffer descriptor parameters */ +#define EMAC_DEF_TX_MAX_SERVICE (32) /* TX max service BD's */ +#define EMAC_DEF_RX_MAX_SERVICE (64) /* should = netdev->weight */ + +/* EMAC register related defines */ +#define EMAC_ALL_MULTI_REG_VALUE (0xFFFFFFFF) +#define EMAC_NUM_MULTICAST_BITS (64) +#define EMAC_TX_CONTROL_TX_ENABLE_VAL (0x1) +#define EMAC_RX_CONTROL_RX_ENABLE_VAL (0x1) +#define EMAC_MAC_HOST_ERR_INTMASK_VAL (0x2) +#define EMAC_RX_UNICAST_CLEAR_ALL (0xFF) +#define EMAC_INT_MASK_CLEAR (0xFF) + +/* RX MBP register bit positions */ +#define EMAC_RXMBP_PASSCRC_MASK BIT(30) +#define EMAC_RXMBP_QOSEN_MASK BIT(29) +#define EMAC_RXMBP_NOCHAIN_MASK BIT(28) +#define EMAC_RXMBP_CMFEN_MASK BIT(24) +#define EMAC_RXMBP_CSFEN_MASK BIT(23) +#define EMAC_RXMBP_CEFEN_MASK BIT(22) +#define EMAC_RXMBP_CAFEN_MASK BIT(21) +#define EMAC_RXMBP_PROMCH_SHIFT (16) +#define EMAC_RXMBP_PROMCH_MASK (0x7 << 16) +#define EMAC_RXMBP_BROADEN_MASK BIT(13) +#define EMAC_RXMBP_BROADCH_SHIFT (8) +#define EMAC_RXMBP_BROADCH_MASK (0x7 << 8) +#define EMAC_RXMBP_MULTIEN_MASK BIT(5) +#define EMAC_RXMBP_MULTICH_SHIFT (0) +#define EMAC_RXMBP_MULTICH_MASK (0x7) +#define EMAC_RXMBP_CHMASK (0x7) + +/* EMAC register definitions/bit maps used */ +# define EMAC_MBP_RXPROMISC (0x00200000) +# define EMAC_MBP_PROMISCCH(ch) (((ch) & 0x7) << 16) +# define EMAC_MBP_RXBCAST (0x00002000) +# define EMAC_MBP_BCASTCHAN(ch) (((ch) & 0x7) << 8) +# define EMAC_MBP_RXMCAST (0x00000020) +# define EMAC_MBP_MCASTCHAN(ch) ((ch) & 0x7) + +/* EMAC mac_control register */ +#define EMAC_MACCONTROL_TXPTYPE BIT(9) +#define EMAC_MACCONTROL_TXPACEEN BIT(6) +#define EMAC_MACCONTROL_GMIIEN BIT(5) +#define EMAC_MACCONTROL_GIGABITEN BIT(7) +#define EMAC_MACCONTROL_FULLDUPLEXEN BIT(0) +#define EMAC_MACCONTROL_RMIISPEED_MASK BIT(15) + +/* GIGABIT MODE related bits */ +#define EMAC_DM646X_MACCONTORL_GIG BIT(7) +#define EMAC_DM646X_MACCONTORL_GIGFORCE BIT(17) + +/* EMAC mac_status register */ +#define EMAC_MACSTATUS_TXERRCODE_MASK (0xF00000) +#define EMAC_MACSTATUS_TXERRCODE_SHIFT (20) +#define EMAC_MACSTATUS_TXERRCH_MASK (0x7) +#define EMAC_MACSTATUS_TXERRCH_SHIFT (16) +#define EMAC_MACSTATUS_RXERRCODE_MASK (0xF000) +#define EMAC_MACSTATUS_RXERRCODE_SHIFT (12) +#define EMAC_MACSTATUS_RXERRCH_MASK (0x7) +#define EMAC_MACSTATUS_RXERRCH_SHIFT (8) + +/* EMAC RX register masks */ +#define EMAC_RX_MAX_LEN_MASK (0xFFFF) +#define EMAC_RX_BUFFER_OFFSET_MASK (0xFFFF) + +/* MAC_IN_VECTOR (0x180) register bit fields */ +#define EMAC_DM644X_MAC_IN_VECTOR_HOST_INT BIT(17) +#define EMAC_DM644X_MAC_IN_VECTOR_STATPEND_INT BIT(16) +#define EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC BIT(8) +#define EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC BIT(0) + +/** NOTE:: For DM646x the IN_VECTOR has changed */ +#define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC BIT(EMAC_DEF_RX_CH) +#define EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC BIT(16 + EMAC_DEF_TX_CH) +#define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT BIT(26) +#define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT BIT(27) + +/* CPPI bit positions */ +#define EMAC_CPPI_SOP_BIT BIT(31) +#define EMAC_CPPI_EOP_BIT BIT(30) +#define EMAC_CPPI_OWNERSHIP_BIT BIT(29) +#define EMAC_CPPI_EOQ_BIT BIT(28) +#define EMAC_CPPI_TEARDOWN_COMPLETE_BIT BIT(27) +#define EMAC_CPPI_PASS_CRC_BIT BIT(26) +#define EMAC_RX_BD_BUF_SIZE (0xFFFF) +#define EMAC_BD_LENGTH_FOR_CACHE (16) /* only CPPI bytes */ +#define EMAC_RX_BD_PKT_LENGTH_MASK (0xFFFF) + +/* Max hardware defines */ +#define EMAC_MAX_TXRX_CHANNELS (8) /* Max hardware channels */ +#define EMAC_DEF_MAX_MULTICAST_ADDRESSES (64) /* Max mcast addr's */ + +/* EMAC Peripheral Device Register Memory Layout structure */ +#define EMAC_MACINVECTOR 0x90 + +#define EMAC_DM646X_MACEOIVECTOR 0x94 + +#define EMAC_MACINTSTATRAW 0xB0 +#define EMAC_MACINTSTATMASKED 0xB4 +#define EMAC_MACINTMASKSET 0xB8 +#define EMAC_MACINTMASKCLEAR 0xBC + +#define EMAC_RXMBPENABLE 0x100 +#define EMAC_RXUNICASTSET 0x104 +#define EMAC_RXUNICASTCLEAR 0x108 +#define EMAC_RXMAXLEN 0x10C +#define EMAC_RXBUFFEROFFSET 0x110 +#define EMAC_RXFILTERLOWTHRESH 0x114 + +#define EMAC_MACCONTROL 0x160 +#define EMAC_MACSTATUS 0x164 +#define EMAC_EMCONTROL 0x168 +#define EMAC_FIFOCONTROL 0x16C +#define EMAC_MACCONFIG 0x170 +#define EMAC_SOFTRESET 0x174 +#define EMAC_MACSRCADDRLO 0x1D0 +#define EMAC_MACSRCADDRHI 0x1D4 +#define EMAC_MACHASH1 0x1D8 +#define EMAC_MACHASH2 0x1DC +#define EMAC_MACADDRLO 0x500 +#define EMAC_MACADDRHI 0x504 +#define EMAC_MACINDEX 0x508 + +/* EMAC statistics registers */ +#define EMAC_RXGOODFRAMES 0x200 +#define EMAC_RXBCASTFRAMES 0x204 +#define EMAC_RXMCASTFRAMES 0x208 +#define EMAC_RXPAUSEFRAMES 0x20C +#define EMAC_RXCRCERRORS 0x210 +#define EMAC_RXALIGNCODEERRORS 0x214 +#define EMAC_RXOVERSIZED 0x218 +#define EMAC_RXJABBER 0x21C +#define EMAC_RXUNDERSIZED 0x220 +#define EMAC_RXFRAGMENTS 0x224 +#define EMAC_RXFILTERED 0x228 +#define EMAC_RXQOSFILTERED 0x22C +#define EMAC_RXOCTETS 0x230 +#define EMAC_TXGOODFRAMES 0x234 +#define EMAC_TXBCASTFRAMES 0x238 +#define EMAC_TXMCASTFRAMES 0x23C +#define EMAC_TXPAUSEFRAMES 0x240 +#define EMAC_TXDEFERRED 0x244 +#define EMAC_TXCOLLISION 0x248 +#define EMAC_TXSINGLECOLL 0x24C +#define EMAC_TXMULTICOLL 0x250 +#define EMAC_TXEXCESSIVECOLL 0x254 +#define EMAC_TXLATECOLL 0x258 +#define EMAC_TXUNDERRUN 0x25C +#define EMAC_TXCARRIERSENSE 0x260 +#define EMAC_TXOCTETS 0x264 +#define EMAC_NETOCTETS 0x280 +#define EMAC_RXSOFOVERRUNS 0x284 +#define EMAC_RXMOFOVERRUNS 0x288 +#define EMAC_RXDMAOVERRUNS 0x28C + +/* EMAC DM644x control registers */ +#define EMAC_CTRL_EWCTL (0x4) +#define EMAC_CTRL_EWINTTCNT (0x8) + +/* EMAC DM644x control module masks */ +#define EMAC_DM644X_EWINTCNT_MASK 0x1FFFF +#define EMAC_DM644X_INTMIN_INTVL 0x1 +#define EMAC_DM644X_INTMAX_INTVL (EMAC_DM644X_EWINTCNT_MASK) + +/* EMAC DM646X control module registers */ +#define EMAC_DM646X_CMINTCTRL 0x0C +#define EMAC_DM646X_CMRXINTEN 0x14 +#define EMAC_DM646X_CMTXINTEN 0x18 +#define EMAC_DM646X_CMRXINTMAX 0x70 +#define EMAC_DM646X_CMTXINTMAX 0x74 + +/* EMAC DM646X control module masks */ +#define EMAC_DM646X_INTPACEEN (0x3 << 16) +#define EMAC_DM646X_INTPRESCALE_MASK (0x7FF << 0) +#define EMAC_DM646X_CMINTMAX_CNT 63 +#define EMAC_DM646X_CMINTMIN_CNT 2 +#define EMAC_DM646X_CMINTMAX_INTVL (1000 / EMAC_DM646X_CMINTMIN_CNT) +#define EMAC_DM646X_CMINTMIN_INTVL ((1000 / EMAC_DM646X_CMINTMAX_CNT) + 1) + + +/* EMAC EOI codes for C0 */ +#define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01) +#define EMAC_DM646X_MAC_EOI_C0_TXEN (0x02) + +/* EMAC Stats Clear Mask */ +#define EMAC_STATS_CLR_MASK (0xFFFFFFFF) + +/* emac_priv: EMAC private data structure + * + * EMAC adapter private data structure + */ +struct emac_priv { + u32 msg_enable; + struct net_device *ndev; + struct platform_device *pdev; + struct napi_struct napi; + char mac_addr[6]; + void __iomem *remap_addr; + u32 emac_base_phys; + void __iomem *emac_base; + void __iomem *ctrl_base; + struct cpdma_ctlr *dma; + struct cpdma_chan *txchan; + struct cpdma_chan *rxchan; + u32 link; /* 1=link on, 0=link off */ + u32 speed; /* 0=Auto Neg, 1=No PHY, 10,100, 1000 - mbps */ + u32 duplex; /* Link duplex: 0=Half, 1=Full */ + u32 rx_buf_size; + u32 isr_count; + u32 coal_intvl; + u32 bus_freq_mhz; + u8 rmii_en; + u8 version; + u32 mac_hash1; + u32 mac_hash2; + u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS]; + u32 rx_addr_type; + const char *phy_id; + struct device_node *phy_node; + struct phy_device *phydev; + spinlock_t lock; + /*platform specific members*/ + void (*int_enable) (void); + void (*int_disable) (void); +}; + +/* EMAC TX Host Error description strings */ +static char *emac_txhost_errcodes[16] = { + "No error", "SOP error", "Ownership bit not set in SOP buffer", + "Zero Next Buffer Descriptor Pointer Without EOP", + "Zero Buffer Pointer", "Zero Buffer Length", "Packet Length Error", + "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", + "Reserved", "Reserved", "Reserved", "Reserved" +}; + +/* EMAC RX Host Error description strings */ +static char *emac_rxhost_errcodes[16] = { + "No error", "Reserved", "Ownership bit not set in input buffer", + "Reserved", "Zero Buffer Pointer", "Reserved", "Reserved", + "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", + "Reserved", "Reserved", "Reserved", "Reserved" +}; + +/* Helper macros */ +#define emac_read(reg) ioread32(priv->emac_base + (reg)) +#define emac_write(reg, val) iowrite32(val, priv->emac_base + (reg)) + +#define emac_ctrl_read(reg) ioread32((priv->ctrl_base + (reg))) +#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg))) + +/** + * emac_dump_regs - Dump important EMAC registers to debug terminal + * @priv: The DaVinci EMAC private adapter structure + * + * Executes ethtool set cmd & sets phy mode + * + */ +static void emac_dump_regs(struct emac_priv *priv) +{ + struct device *emac_dev = &priv->ndev->dev; + + /* Print important registers in EMAC */ + dev_info(emac_dev, "EMAC Basic registers\n"); + if (priv->version == EMAC_VERSION_1) { + dev_info(emac_dev, "EMAC: EWCTL: %08X, EWINTTCNT: %08X\n", + emac_ctrl_read(EMAC_CTRL_EWCTL), + emac_ctrl_read(EMAC_CTRL_EWINTTCNT)); + } + dev_info(emac_dev, "EMAC: EmuControl:%08X, FifoControl: %08X\n", + emac_read(EMAC_EMCONTROL), emac_read(EMAC_FIFOCONTROL)); + dev_info(emac_dev, "EMAC: MBPEnable:%08X, RXUnicastSet: %08X, "\ + "RXMaxLen=%08X\n", emac_read(EMAC_RXMBPENABLE), + emac_read(EMAC_RXUNICASTSET), emac_read(EMAC_RXMAXLEN)); + dev_info(emac_dev, "EMAC: MacControl:%08X, MacStatus: %08X, "\ + "MacConfig=%08X\n", emac_read(EMAC_MACCONTROL), + emac_read(EMAC_MACSTATUS), emac_read(EMAC_MACCONFIG)); + dev_info(emac_dev, "EMAC Statistics\n"); + dev_info(emac_dev, "EMAC: rx_good_frames:%d\n", + emac_read(EMAC_RXGOODFRAMES)); + dev_info(emac_dev, "EMAC: rx_broadcast_frames:%d\n", + emac_read(EMAC_RXBCASTFRAMES)); + dev_info(emac_dev, "EMAC: rx_multicast_frames:%d\n", + emac_read(EMAC_RXMCASTFRAMES)); + dev_info(emac_dev, "EMAC: rx_pause_frames:%d\n", + emac_read(EMAC_RXPAUSEFRAMES)); + dev_info(emac_dev, "EMAC: rx_crcerrors:%d\n", + emac_read(EMAC_RXCRCERRORS)); + dev_info(emac_dev, "EMAC: rx_align_code_errors:%d\n", + emac_read(EMAC_RXALIGNCODEERRORS)); + dev_info(emac_dev, "EMAC: rx_oversized_frames:%d\n", + emac_read(EMAC_RXOVERSIZED)); + dev_info(emac_dev, "EMAC: rx_jabber_frames:%d\n", + emac_read(EMAC_RXJABBER)); + dev_info(emac_dev, "EMAC: rx_undersized_frames:%d\n", + emac_read(EMAC_RXUNDERSIZED)); + dev_info(emac_dev, "EMAC: rx_fragments:%d\n", + emac_read(EMAC_RXFRAGMENTS)); + dev_info(emac_dev, "EMAC: rx_filtered_frames:%d\n", + emac_read(EMAC_RXFILTERED)); + dev_info(emac_dev, "EMAC: rx_qos_filtered_frames:%d\n", + emac_read(EMAC_RXQOSFILTERED)); + dev_info(emac_dev, "EMAC: rx_octets:%d\n", + emac_read(EMAC_RXOCTETS)); + dev_info(emac_dev, "EMAC: tx_goodframes:%d\n", + emac_read(EMAC_TXGOODFRAMES)); + dev_info(emac_dev, "EMAC: tx_bcastframes:%d\n", + emac_read(EMAC_TXBCASTFRAMES)); + dev_info(emac_dev, "EMAC: tx_mcastframes:%d\n", + emac_read(EMAC_TXMCASTFRAMES)); + dev_info(emac_dev, "EMAC: tx_pause_frames:%d\n", + emac_read(EMAC_TXPAUSEFRAMES)); + dev_info(emac_dev, "EMAC: tx_deferred_frames:%d\n", + emac_read(EMAC_TXDEFERRED)); + dev_info(emac_dev, "EMAC: tx_collision_frames:%d\n", + emac_read(EMAC_TXCOLLISION)); + dev_info(emac_dev, "EMAC: tx_single_coll_frames:%d\n", + emac_read(EMAC_TXSINGLECOLL)); + dev_info(emac_dev, "EMAC: tx_mult_coll_frames:%d\n", + emac_read(EMAC_TXMULTICOLL)); + dev_info(emac_dev, "EMAC: tx_excessive_collisions:%d\n", + emac_read(EMAC_TXEXCESSIVECOLL)); + dev_info(emac_dev, "EMAC: tx_late_collisions:%d\n", + emac_read(EMAC_TXLATECOLL)); + dev_info(emac_dev, "EMAC: tx_underrun:%d\n", + emac_read(EMAC_TXUNDERRUN)); + dev_info(emac_dev, "EMAC: tx_carrier_sense_errors:%d\n", + emac_read(EMAC_TXCARRIERSENSE)); + dev_info(emac_dev, "EMAC: tx_octets:%d\n", + emac_read(EMAC_TXOCTETS)); + dev_info(emac_dev, "EMAC: net_octets:%d\n", + emac_read(EMAC_NETOCTETS)); + dev_info(emac_dev, "EMAC: rx_sof_overruns:%d\n", + emac_read(EMAC_RXSOFOVERRUNS)); + dev_info(emac_dev, "EMAC: rx_mof_overruns:%d\n", + emac_read(EMAC_RXMOFOVERRUNS)); + dev_info(emac_dev, "EMAC: rx_dma_overruns:%d\n", + emac_read(EMAC_RXDMAOVERRUNS)); + + cpdma_ctlr_dump(priv->dma); +} + +/** + * emac_get_drvinfo - Get EMAC driver information + * @ndev: The DaVinci EMAC network adapter + * @info: ethtool info structure containing name and version + * + * Returns EMAC driver information (name and version) + * + */ +static void emac_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, emac_version_string, sizeof(info->driver)); + strlcpy(info->version, EMAC_MODULE_VERSION, sizeof(info->version)); +} + +/** + * emac_get_settings - Get EMAC settings + * @ndev: The DaVinci EMAC network adapter + * @ecmd: ethtool command + * + * Executes ethool get command + * + */ +static int emac_get_settings(struct net_device *ndev, + struct ethtool_cmd *ecmd) +{ + struct emac_priv *priv = netdev_priv(ndev); + if (priv->phydev) + return phy_ethtool_gset(priv->phydev, ecmd); + else + return -EOPNOTSUPP; + +} + +/** + * emac_set_settings - Set EMAC settings + * @ndev: The DaVinci EMAC network adapter + * @ecmd: ethtool command + * + * Executes ethool set command + * + */ +static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) +{ + struct emac_priv *priv = netdev_priv(ndev); + if (priv->phydev) + return phy_ethtool_sset(priv->phydev, ecmd); + else + return -EOPNOTSUPP; + +} + +/** + * emac_get_coalesce - Get interrupt coalesce settings for this device + * @ndev : The DaVinci EMAC network adapter + * @coal : ethtool coalesce settings structure + * + * Fetch the current interrupt coalesce settings + * + */ +static int emac_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *coal) +{ + struct emac_priv *priv = netdev_priv(ndev); + + coal->rx_coalesce_usecs = priv->coal_intvl; + return 0; + +} + +/** + * emac_set_coalesce - Set interrupt coalesce settings for this device + * @ndev : The DaVinci EMAC network adapter + * @coal : ethtool coalesce settings structure + * + * Set interrupt coalesce parameters + * + */ +static int emac_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *coal) +{ + struct emac_priv *priv = netdev_priv(ndev); + u32 int_ctrl, num_interrupts = 0; + u32 prescale = 0, addnl_dvdr = 1, coal_intvl = 0; + + if (!coal->rx_coalesce_usecs) + return -EINVAL; + + coal_intvl = coal->rx_coalesce_usecs; + + switch (priv->version) { + case EMAC_VERSION_2: + int_ctrl = emac_ctrl_read(EMAC_DM646X_CMINTCTRL); + prescale = priv->bus_freq_mhz * 4; + + if (coal_intvl < EMAC_DM646X_CMINTMIN_INTVL) + coal_intvl = EMAC_DM646X_CMINTMIN_INTVL; + + if (coal_intvl > EMAC_DM646X_CMINTMAX_INTVL) { + /* + * Interrupt pacer works with 4us Pulse, we can + * throttle further by dilating the 4us pulse. + */ + addnl_dvdr = EMAC_DM646X_INTPRESCALE_MASK / prescale; + + if (addnl_dvdr > 1) { + prescale *= addnl_dvdr; + if (coal_intvl > (EMAC_DM646X_CMINTMAX_INTVL + * addnl_dvdr)) + coal_intvl = (EMAC_DM646X_CMINTMAX_INTVL + * addnl_dvdr); + } else { + addnl_dvdr = 1; + coal_intvl = EMAC_DM646X_CMINTMAX_INTVL; + } + } + + num_interrupts = (1000 * addnl_dvdr) / coal_intvl; + + int_ctrl |= EMAC_DM646X_INTPACEEN; + int_ctrl &= (~EMAC_DM646X_INTPRESCALE_MASK); + int_ctrl |= (prescale & EMAC_DM646X_INTPRESCALE_MASK); + emac_ctrl_write(EMAC_DM646X_CMINTCTRL, int_ctrl); + + emac_ctrl_write(EMAC_DM646X_CMRXINTMAX, num_interrupts); + emac_ctrl_write(EMAC_DM646X_CMTXINTMAX, num_interrupts); + + break; + default: + int_ctrl = emac_ctrl_read(EMAC_CTRL_EWINTTCNT); + int_ctrl &= (~EMAC_DM644X_EWINTCNT_MASK); + prescale = coal_intvl * priv->bus_freq_mhz; + if (prescale > EMAC_DM644X_EWINTCNT_MASK) { + prescale = EMAC_DM644X_EWINTCNT_MASK; + coal_intvl = prescale / priv->bus_freq_mhz; + } + emac_ctrl_write(EMAC_CTRL_EWINTTCNT, (int_ctrl | prescale)); + + break; + } + + printk(KERN_INFO"Set coalesce to %d usecs.\n", coal_intvl); + priv->coal_intvl = coal_intvl; + + return 0; + +} + + +/* ethtool_ops: DaVinci EMAC Ethtool structure + * + * Ethtool support for EMAC adapter + */ +static const struct ethtool_ops ethtool_ops = { + .get_drvinfo = emac_get_drvinfo, + .get_settings = emac_get_settings, + .set_settings = emac_set_settings, + .get_link = ethtool_op_get_link, + .get_coalesce = emac_get_coalesce, + .set_coalesce = emac_set_coalesce, + .get_ts_info = ethtool_op_get_ts_info, +}; + +/** + * emac_update_phystatus - Update Phy status + * @priv: The DaVinci EMAC private adapter structure + * + * Updates phy status and takes action for network queue if required + * based upon link status + * + */ +static void emac_update_phystatus(struct emac_priv *priv) +{ + u32 mac_control; + u32 new_duplex; + u32 cur_duplex; + struct net_device *ndev = priv->ndev; + + mac_control = emac_read(EMAC_MACCONTROL); + cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ? + DUPLEX_FULL : DUPLEX_HALF; + if (priv->phydev) + new_duplex = priv->phydev->duplex; + else + new_duplex = DUPLEX_FULL; + + /* We get called only if link has changed (speed/duplex/status) */ + if ((priv->link) && (new_duplex != cur_duplex)) { + priv->duplex = new_duplex; + if (DUPLEX_FULL == priv->duplex) + mac_control |= (EMAC_MACCONTROL_FULLDUPLEXEN); + else + mac_control &= ~(EMAC_MACCONTROL_FULLDUPLEXEN); + } + + if (priv->speed == SPEED_1000 && (priv->version == EMAC_VERSION_2)) { + mac_control = emac_read(EMAC_MACCONTROL); + mac_control |= (EMAC_DM646X_MACCONTORL_GIG | + EMAC_DM646X_MACCONTORL_GIGFORCE); + } else { + /* Clear the GIG bit and GIGFORCE bit */ + mac_control &= ~(EMAC_DM646X_MACCONTORL_GIGFORCE | + EMAC_DM646X_MACCONTORL_GIG); + + if (priv->rmii_en && (priv->speed == SPEED_100)) + mac_control |= EMAC_MACCONTROL_RMIISPEED_MASK; + else + mac_control &= ~EMAC_MACCONTROL_RMIISPEED_MASK; + } + + /* Update mac_control if changed */ + emac_write(EMAC_MACCONTROL, mac_control); + + if (priv->link) { + /* link ON */ + if (!netif_carrier_ok(ndev)) + netif_carrier_on(ndev); + /* reactivate the transmit queue if it is stopped */ + if (netif_running(ndev) && netif_queue_stopped(ndev)) + netif_wake_queue(ndev); + } else { + /* link OFF */ + if (netif_carrier_ok(ndev)) + netif_carrier_off(ndev); + if (!netif_queue_stopped(ndev)) + netif_stop_queue(ndev); + } +} + +/** + * hash_get - Calculate hash value from mac address + * @addr: mac address to delete from hash table + * + * Calculates hash value from mac address + * + */ +static u32 hash_get(u8 *addr) +{ + u32 hash; + u8 tmpval; + int cnt; + hash = 0; + + for (cnt = 0; cnt < 2; cnt++) { + tmpval = *addr++; + hash ^= (tmpval >> 2) ^ (tmpval << 4); + tmpval = *addr++; + hash ^= (tmpval >> 4) ^ (tmpval << 2); + tmpval = *addr++; + hash ^= (tmpval >> 6) ^ (tmpval); + } + + return hash & 0x3F; +} + +/** + * hash_add - Hash function to add mac addr from hash table + * @priv: The DaVinci EMAC private adapter structure + * @mac_addr: mac address to delete from hash table + * + * Adds mac address to the internal hash table + * + */ +static int hash_add(struct emac_priv *priv, u8 *mac_addr) +{ + struct device *emac_dev = &priv->ndev->dev; + u32 rc = 0; + u32 hash_bit; + u32 hash_value = hash_get(mac_addr); + + if (hash_value >= EMAC_NUM_MULTICAST_BITS) { + if (netif_msg_drv(priv)) { + dev_err(emac_dev, "DaVinci EMAC: hash_add(): Invalid "\ + "Hash %08x, should not be greater than %08x", + hash_value, (EMAC_NUM_MULTICAST_BITS - 1)); + } + return -1; + } + + /* set the hash bit only if not previously set */ + if (priv->multicast_hash_cnt[hash_value] == 0) { + rc = 1; /* hash value changed */ + if (hash_value < 32) { + hash_bit = BIT(hash_value); + priv->mac_hash1 |= hash_bit; + } else { + hash_bit = BIT((hash_value - 32)); + priv->mac_hash2 |= hash_bit; + } + } + + /* incr counter for num of mcast addr's mapped to "this" hash bit */ + ++priv->multicast_hash_cnt[hash_value]; + + return rc; +} + +/** + * hash_del - Hash function to delete mac addr from hash table + * @priv: The DaVinci EMAC private adapter structure + * @mac_addr: mac address to delete from hash table + * + * Removes mac address from the internal hash table + * + */ +static int hash_del(struct emac_priv *priv, u8 *mac_addr) +{ + u32 hash_value; + u32 hash_bit; + + hash_value = hash_get(mac_addr); + if (priv->multicast_hash_cnt[hash_value] > 0) { + /* dec cntr for num of mcast addr's mapped to this hash bit */ + --priv->multicast_hash_cnt[hash_value]; + } + + /* if counter still > 0, at least one multicast address refers + * to this hash bit. so return 0 */ + if (priv->multicast_hash_cnt[hash_value] > 0) + return 0; + + if (hash_value < 32) { + hash_bit = BIT(hash_value); + priv->mac_hash1 &= ~hash_bit; + } else { + hash_bit = BIT((hash_value - 32)); + priv->mac_hash2 &= ~hash_bit; + } + + /* return 1 to indicate change in mac_hash registers reqd */ + return 1; +} + +/* EMAC multicast operation */ +#define EMAC_MULTICAST_ADD 0 +#define EMAC_MULTICAST_DEL 1 +#define EMAC_ALL_MULTI_SET 2 +#define EMAC_ALL_MULTI_CLR 3 + +/** + * emac_add_mcast - Set multicast address in the EMAC adapter (Internal) + * @priv: The DaVinci EMAC private adapter structure + * @action: multicast operation to perform + * mac_addr: mac address to set + * + * Set multicast addresses in EMAC adapter - internal function + * + */ +static void emac_add_mcast(struct emac_priv *priv, u32 action, u8 *mac_addr) +{ + struct device *emac_dev = &priv->ndev->dev; + int update = -1; + + switch (action) { + case EMAC_MULTICAST_ADD: + update = hash_add(priv, mac_addr); + break; + case EMAC_MULTICAST_DEL: + update = hash_del(priv, mac_addr); + break; + case EMAC_ALL_MULTI_SET: + update = 1; + priv->mac_hash1 = EMAC_ALL_MULTI_REG_VALUE; + priv->mac_hash2 = EMAC_ALL_MULTI_REG_VALUE; + break; + case EMAC_ALL_MULTI_CLR: + update = 1; + priv->mac_hash1 = 0; + priv->mac_hash2 = 0; + memset(&(priv->multicast_hash_cnt[0]), 0, + sizeof(priv->multicast_hash_cnt[0]) * + EMAC_NUM_MULTICAST_BITS); + break; + default: + if (netif_msg_drv(priv)) + dev_err(emac_dev, "DaVinci EMAC: add_mcast"\ + ": bad operation %d", action); + break; + } + + /* write to the hardware only if the register status chances */ + if (update > 0) { + emac_write(EMAC_MACHASH1, priv->mac_hash1); + emac_write(EMAC_MACHASH2, priv->mac_hash2); + } +} + +/** + * emac_dev_mcast_set - Set multicast address in the EMAC adapter + * @ndev: The DaVinci EMAC network adapter + * + * Set multicast addresses in EMAC adapter + * + */ +static void emac_dev_mcast_set(struct net_device *ndev) +{ + u32 mbp_enable; + struct emac_priv *priv = netdev_priv(ndev); + + mbp_enable = emac_read(EMAC_RXMBPENABLE); + if (ndev->flags & IFF_PROMISC) { + mbp_enable &= (~EMAC_MBP_PROMISCCH(EMAC_DEF_PROM_CH)); + mbp_enable |= (EMAC_MBP_RXPROMISC); + } else { + mbp_enable = (mbp_enable & ~EMAC_MBP_RXPROMISC); + if ((ndev->flags & IFF_ALLMULTI) || + netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) { + mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); + emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL); + } else if (!netdev_mc_empty(ndev)) { + struct netdev_hw_addr *ha; + + mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); + emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL); + /* program multicast address list into EMAC hardware */ + netdev_for_each_mc_addr(ha, ndev) { + emac_add_mcast(priv, EMAC_MULTICAST_ADD, + (u8 *) ha->addr); + } + } else { + mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST); + emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL); + } + } + /* Set mbp config register */ + emac_write(EMAC_RXMBPENABLE, mbp_enable); +} + +/************************************************************************* + * EMAC Hardware manipulation + *************************************************************************/ + +/** + * emac_int_disable - Disable EMAC module interrupt (from adapter) + * @priv: The DaVinci EMAC private adapter structure + * + * Disable EMAC interrupt on the adapter + * + */ +static void emac_int_disable(struct emac_priv *priv) +{ + if (priv->version == EMAC_VERSION_2) { + unsigned long flags; + + local_irq_save(flags); + + /* Program C0_Int_En to zero to turn off + * interrupts to the CPU */ + emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0x0); + emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0x0); + /* NOTE: Rx Threshold and Misc interrupts are not disabled */ + if (priv->int_disable) + priv->int_disable(); + + /* NOTE: Rx Threshold and Misc interrupts are not enabled */ + + /* ack rxen only then a new pulse will be generated */ + emac_write(EMAC_DM646X_MACEOIVECTOR, + EMAC_DM646X_MAC_EOI_C0_RXEN); + + /* ack txen- only then a new pulse will be generated */ + emac_write(EMAC_DM646X_MACEOIVECTOR, + EMAC_DM646X_MAC_EOI_C0_TXEN); + + local_irq_restore(flags); + + } else { + /* Set DM644x control registers for interrupt control */ + emac_ctrl_write(EMAC_CTRL_EWCTL, 0x0); + } +} + +/** + * emac_int_enable - Enable EMAC module interrupt (from adapter) + * @priv: The DaVinci EMAC private adapter structure + * + * Enable EMAC interrupt on the adapter + * + */ +static void emac_int_enable(struct emac_priv *priv) +{ + if (priv->version == EMAC_VERSION_2) { + if (priv->int_enable) + priv->int_enable(); + + emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0xff); + emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0xff); + + /* In addition to turning on interrupt Enable, we need + * ack by writing appropriate values to the EOI + * register */ + + /* NOTE: Rx Threshold and Misc interrupts are not enabled */ + } else { + /* Set DM644x control registers for interrupt control */ + emac_ctrl_write(EMAC_CTRL_EWCTL, 0x1); + } +} + +/** + * emac_irq - EMAC interrupt handler + * @irq: interrupt number + * @dev_id: EMAC network adapter data structure ptr + * + * EMAC Interrupt handler - we only schedule NAPI and not process any packets + * here. EVen the interrupt status is checked (TX/RX/Err) in NAPI poll function + * + * Returns interrupt handled condition + */ +static irqreturn_t emac_irq(int irq, void *dev_id) +{ + struct net_device *ndev = (struct net_device *)dev_id; + struct emac_priv *priv = netdev_priv(ndev); + + ++priv->isr_count; + if (likely(netif_running(priv->ndev))) { + emac_int_disable(priv); + napi_schedule(&priv->napi); + } else { + /* we are closing down, so dont process anything */ + } + return IRQ_HANDLED; +} + +static struct sk_buff *emac_rx_alloc(struct emac_priv *priv) +{ + struct sk_buff *skb = netdev_alloc_skb(priv->ndev, priv->rx_buf_size); + if (WARN_ON(!skb)) + return NULL; + skb_reserve(skb, NET_IP_ALIGN); + return skb; +} + +static void emac_rx_handler(void *token, int len, int status) +{ + struct sk_buff *skb = token; + struct net_device *ndev = skb->dev; + struct emac_priv *priv = netdev_priv(ndev); + struct device *emac_dev = &ndev->dev; + int ret; + + /* free and bail if we are shutting down */ + if (unlikely(!netif_running(ndev))) { + dev_kfree_skb_any(skb); + return; + } + + /* recycle on receive error */ + if (status < 0) { + ndev->stats.rx_errors++; + goto recycle; + } + + /* feed received packet up the stack */ + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, ndev); + netif_receive_skb(skb); + ndev->stats.rx_bytes += len; + ndev->stats.rx_packets++; + + /* alloc a new packet for receive */ + skb = emac_rx_alloc(priv); + if (!skb) { + if (netif_msg_rx_err(priv) && net_ratelimit()) + dev_err(emac_dev, "failed rx buffer alloc\n"); + return; + } + +recycle: + ret = cpdma_chan_submit(priv->rxchan, skb, skb->data, + skb_tailroom(skb), 0); + + WARN_ON(ret == -ENOMEM); + if (unlikely(ret < 0)) + dev_kfree_skb_any(skb); +} + +static void emac_tx_handler(void *token, int len, int status) +{ + struct sk_buff *skb = token; + struct net_device *ndev = skb->dev; + + /* Check whether the queue is stopped due to stalled tx dma, if the + * queue is stopped then start the queue as we have free desc for tx + */ + if (unlikely(netif_queue_stopped(ndev))) + netif_wake_queue(ndev); + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += len; + dev_kfree_skb_any(skb); +} + +/** + * emac_dev_xmit - EMAC Transmit function + * @skb: SKB pointer + * @ndev: The DaVinci EMAC network adapter + * + * Called by the system to transmit a packet - we queue the packet in + * EMAC hardware transmit queue + * + * Returns success(NETDEV_TX_OK) or error code (typically out of desc's) + */ +static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct device *emac_dev = &ndev->dev; + int ret_code; + struct emac_priv *priv = netdev_priv(ndev); + + /* If no link, return */ + if (unlikely(!priv->link)) { + if (netif_msg_tx_err(priv) && net_ratelimit()) + dev_err(emac_dev, "DaVinci EMAC: No link to transmit"); + goto fail_tx; + } + + ret_code = skb_padto(skb, EMAC_DEF_MIN_ETHPKTSIZE); + if (unlikely(ret_code < 0)) { + if (netif_msg_tx_err(priv) && net_ratelimit()) + dev_err(emac_dev, "DaVinci EMAC: packet pad failed"); + goto fail_tx; + } + + skb_tx_timestamp(skb); + + ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len, + 0); + if (unlikely(ret_code != 0)) { + if (netif_msg_tx_err(priv) && net_ratelimit()) + dev_err(emac_dev, "DaVinci EMAC: desc submit failed"); + goto fail_tx; + } + + /* If there is no more tx desc left free then we need to + * tell the kernel to stop sending us tx frames. + */ + if (unlikely(!cpdma_check_free_tx_desc(priv->txchan))) + netif_stop_queue(ndev); + + return NETDEV_TX_OK; + +fail_tx: + ndev->stats.tx_dropped++; + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; +} + +/** + * emac_dev_tx_timeout - EMAC Transmit timeout function + * @ndev: The DaVinci EMAC network adapter + * + * Called when system detects that a skb timeout period has expired + * potentially due to a fault in the adapter in not being able to send + * it out on the wire. We teardown the TX channel assuming a hardware + * error and re-initialize the TX channel for hardware operation + * + */ +static void emac_dev_tx_timeout(struct net_device *ndev) +{ + struct emac_priv *priv = netdev_priv(ndev); + struct device *emac_dev = &ndev->dev; + + if (netif_msg_tx_err(priv)) + dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX"); + + emac_dump_regs(priv); + + ndev->stats.tx_errors++; + emac_int_disable(priv); + cpdma_chan_stop(priv->txchan); + cpdma_chan_start(priv->txchan); + emac_int_enable(priv); +} + +/** + * emac_set_type0addr - Set EMAC Type0 mac address + * @priv: The DaVinci EMAC private adapter structure + * @ch: RX channel number + * @mac_addr: MAC address to set in device + * + * Called internally to set Type0 mac address of the adapter (Device) + * + * Returns success (0) or appropriate error code (none as of now) + */ +static void emac_set_type0addr(struct emac_priv *priv, u32 ch, char *mac_addr) +{ + u32 val; + val = ((mac_addr[5] << 8) | (mac_addr[4])); + emac_write(EMAC_MACSRCADDRLO, val); + + val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \ + (mac_addr[1] << 8) | (mac_addr[0])); + emac_write(EMAC_MACSRCADDRHI, val); + val = emac_read(EMAC_RXUNICASTSET); + val |= BIT(ch); + emac_write(EMAC_RXUNICASTSET, val); + val = emac_read(EMAC_RXUNICASTCLEAR); + val &= ~BIT(ch); + emac_write(EMAC_RXUNICASTCLEAR, val); +} + +/** + * emac_set_type1addr - Set EMAC Type1 mac address + * @priv: The DaVinci EMAC private adapter structure + * @ch: RX channel number + * @mac_addr: MAC address to set in device + * + * Called internally to set Type1 mac address of the adapter (Device) + * + * Returns success (0) or appropriate error code (none as of now) + */ +static void emac_set_type1addr(struct emac_priv *priv, u32 ch, char *mac_addr) +{ + u32 val; + emac_write(EMAC_MACINDEX, ch); + val = ((mac_addr[5] << 8) | mac_addr[4]); + emac_write(EMAC_MACADDRLO, val); + val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \ + (mac_addr[1] << 8) | (mac_addr[0])); + emac_write(EMAC_MACADDRHI, val); + emac_set_type0addr(priv, ch, mac_addr); +} + +/** + * emac_set_type2addr - Set EMAC Type2 mac address + * @priv: The DaVinci EMAC private adapter structure + * @ch: RX channel number + * @mac_addr: MAC address to set in device + * @index: index into RX address entries + * @match: match parameter for RX address matching logic + * + * Called internally to set Type2 mac address of the adapter (Device) + * + * Returns success (0) or appropriate error code (none as of now) + */ +static void emac_set_type2addr(struct emac_priv *priv, u32 ch, + char *mac_addr, int index, int match) +{ + u32 val; + emac_write(EMAC_MACINDEX, index); + val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \ + (mac_addr[1] << 8) | (mac_addr[0])); + emac_write(EMAC_MACADDRHI, val); + val = ((mac_addr[5] << 8) | mac_addr[4] | ((ch & 0x7) << 16) | \ + (match << 19) | BIT(20)); + emac_write(EMAC_MACADDRLO, val); + emac_set_type0addr(priv, ch, mac_addr); +} + +/** + * emac_setmac - Set mac address in the adapter (internal function) + * @priv: The DaVinci EMAC private adapter structure + * @ch: RX channel number + * @mac_addr: MAC address to set in device + * + * Called internally to set the mac address of the adapter (Device) + * + * Returns success (0) or appropriate error code (none as of now) + */ +static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr) +{ + struct device *emac_dev = &priv->ndev->dev; + + if (priv->rx_addr_type == 0) { + emac_set_type0addr(priv, ch, mac_addr); + } else if (priv->rx_addr_type == 1) { + u32 cnt; + for (cnt = 0; cnt < EMAC_MAX_TXRX_CHANNELS; cnt++) + emac_set_type1addr(priv, ch, mac_addr); + } else if (priv->rx_addr_type == 2) { + emac_set_type2addr(priv, ch, mac_addr, ch, 1); + emac_set_type0addr(priv, ch, mac_addr); + } else { + if (netif_msg_drv(priv)) + dev_err(emac_dev, "DaVinci EMAC: Wrong addressing\n"); + } +} + +/** + * emac_dev_setmac_addr - Set mac address in the adapter + * @ndev: The DaVinci EMAC network adapter + * @addr: MAC address to set in device + * + * Called by the system to set the mac address of the adapter (Device) + * + * Returns success (0) or appropriate error code (none as of now) + */ +static int emac_dev_setmac_addr(struct net_device *ndev, void *addr) +{ + struct emac_priv *priv = netdev_priv(ndev); + struct device *emac_dev = &priv->ndev->dev; + struct sockaddr *sa = addr; + + if (!is_valid_ether_addr(sa->sa_data)) + return -EADDRNOTAVAIL; + + /* Store mac addr in priv and rx channel and set it in EMAC hw */ + memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len); + memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len); + + /* MAC address is configured only after the interface is enabled. */ + if (netif_running(ndev)) { + emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr); + } + + if (netif_msg_drv(priv)) + dev_notice(emac_dev, "DaVinci EMAC: emac_dev_setmac_addr %pM\n", + priv->mac_addr); + + return 0; +} + +/** + * emac_hw_enable - Enable EMAC hardware for packet transmission/reception + * @priv: The DaVinci EMAC private adapter structure + * + * Enables EMAC hardware for packet processing - enables PHY, enables RX + * for packet reception and enables device interrupts and then NAPI + * + * Returns success (0) or appropriate error code (none right now) + */ +static int emac_hw_enable(struct emac_priv *priv) +{ + u32 val, mbp_enable, mac_control; + + /* Soft reset */ + emac_write(EMAC_SOFTRESET, 1); + while (emac_read(EMAC_SOFTRESET)) + cpu_relax(); + + /* Disable interrupt & Set pacing for more interrupts initially */ + emac_int_disable(priv); + + /* Full duplex enable bit set when auto negotiation happens */ + mac_control = + (((EMAC_DEF_TXPRIO_FIXED) ? (EMAC_MACCONTROL_TXPTYPE) : 0x0) | + ((priv->speed == 1000) ? EMAC_MACCONTROL_GIGABITEN : 0x0) | + ((EMAC_DEF_TXPACING_EN) ? (EMAC_MACCONTROL_TXPACEEN) : 0x0) | + ((priv->duplex == DUPLEX_FULL) ? 0x1 : 0)); + emac_write(EMAC_MACCONTROL, mac_control); + + mbp_enable = + (((EMAC_DEF_PASS_CRC) ? (EMAC_RXMBP_PASSCRC_MASK) : 0x0) | + ((EMAC_DEF_QOS_EN) ? (EMAC_RXMBP_QOSEN_MASK) : 0x0) | + ((EMAC_DEF_NO_BUFF_CHAIN) ? (EMAC_RXMBP_NOCHAIN_MASK) : 0x0) | + ((EMAC_DEF_MACCTRL_FRAME_EN) ? (EMAC_RXMBP_CMFEN_MASK) : 0x0) | + ((EMAC_DEF_SHORT_FRAME_EN) ? (EMAC_RXMBP_CSFEN_MASK) : 0x0) | + ((EMAC_DEF_ERROR_FRAME_EN) ? (EMAC_RXMBP_CEFEN_MASK) : 0x0) | + ((EMAC_DEF_PROM_EN) ? (EMAC_RXMBP_CAFEN_MASK) : 0x0) | + ((EMAC_DEF_PROM_CH & EMAC_RXMBP_CHMASK) << \ + EMAC_RXMBP_PROMCH_SHIFT) | + ((EMAC_DEF_BCAST_EN) ? (EMAC_RXMBP_BROADEN_MASK) : 0x0) | + ((EMAC_DEF_BCAST_CH & EMAC_RXMBP_CHMASK) << \ + EMAC_RXMBP_BROADCH_SHIFT) | + ((EMAC_DEF_MCAST_EN) ? (EMAC_RXMBP_MULTIEN_MASK) : 0x0) | + ((EMAC_DEF_MCAST_CH & EMAC_RXMBP_CHMASK) << \ + EMAC_RXMBP_MULTICH_SHIFT)); + emac_write(EMAC_RXMBPENABLE, mbp_enable); + emac_write(EMAC_RXMAXLEN, (EMAC_DEF_MAX_FRAME_SIZE & + EMAC_RX_MAX_LEN_MASK)); + emac_write(EMAC_RXBUFFEROFFSET, (EMAC_DEF_BUFFER_OFFSET & + EMAC_RX_BUFFER_OFFSET_MASK)); + emac_write(EMAC_RXFILTERLOWTHRESH, 0); + emac_write(EMAC_RXUNICASTCLEAR, EMAC_RX_UNICAST_CLEAR_ALL); + priv->rx_addr_type = (emac_read(EMAC_MACCONFIG) >> 8) & 0xFF; + + emac_write(EMAC_MACINTMASKSET, EMAC_MAC_HOST_ERR_INTMASK_VAL); + + emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr); + + /* Enable MII */ + val = emac_read(EMAC_MACCONTROL); + val |= (EMAC_MACCONTROL_GMIIEN); + emac_write(EMAC_MACCONTROL, val); + + /* Enable NAPI and interrupts */ + napi_enable(&priv->napi); + emac_int_enable(priv); + return 0; + +} + +/** + * emac_poll - EMAC NAPI Poll function + * @ndev: The DaVinci EMAC network adapter + * @budget: Number of receive packets to process (as told by NAPI layer) + * + * NAPI Poll function implemented to process packets as per budget. We check + * the type of interrupt on the device and accordingly call the TX or RX + * packet processing functions. We follow the budget for RX processing and + * also put a cap on number of TX pkts processed through config param. The + * NAPI schedule function is called if more packets pending. + * + * Returns number of packets received (in most cases; else TX pkts - rarely) + */ +static int emac_poll(struct napi_struct *napi, int budget) +{ + unsigned int mask; + struct emac_priv *priv = container_of(napi, struct emac_priv, napi); + struct net_device *ndev = priv->ndev; + struct device *emac_dev = &ndev->dev; + u32 status = 0; + u32 num_tx_pkts = 0, num_rx_pkts = 0; + + /* Check interrupt vectors and call packet processing */ + status = emac_read(EMAC_MACINVECTOR); + + mask = EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC; + + if (priv->version == EMAC_VERSION_2) + mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC; + + if (status & mask) { + num_tx_pkts = cpdma_chan_process(priv->txchan, + EMAC_DEF_TX_MAX_SERVICE); + } /* TX processing */ + + mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC; + + if (priv->version == EMAC_VERSION_2) + mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC; + + if (status & mask) { + num_rx_pkts = cpdma_chan_process(priv->rxchan, budget); + } /* RX processing */ + + mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT; + if (priv->version == EMAC_VERSION_2) + mask = EMAC_DM646X_MAC_IN_VECTOR_HOST_INT; + + if (unlikely(status & mask)) { + u32 ch, cause; + dev_err(emac_dev, "DaVinci EMAC: Fatal Hardware Error\n"); + netif_stop_queue(ndev); + napi_disable(&priv->napi); + + status = emac_read(EMAC_MACSTATUS); + cause = ((status & EMAC_MACSTATUS_TXERRCODE_MASK) >> + EMAC_MACSTATUS_TXERRCODE_SHIFT); + if (cause) { + ch = ((status & EMAC_MACSTATUS_TXERRCH_MASK) >> + EMAC_MACSTATUS_TXERRCH_SHIFT); + if (net_ratelimit()) { + dev_err(emac_dev, "TX Host error %s on ch=%d\n", + &emac_txhost_errcodes[cause][0], ch); + } + } + cause = ((status & EMAC_MACSTATUS_RXERRCODE_MASK) >> + EMAC_MACSTATUS_RXERRCODE_SHIFT); + if (cause) { + ch = ((status & EMAC_MACSTATUS_RXERRCH_MASK) >> + EMAC_MACSTATUS_RXERRCH_SHIFT); + if (netif_msg_hw(priv) && net_ratelimit()) + dev_err(emac_dev, "RX Host error %s on ch=%d\n", + &emac_rxhost_errcodes[cause][0], ch); + } + } else if (num_rx_pkts < budget) { + napi_complete(napi); + emac_int_enable(priv); + } + + return num_rx_pkts; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * emac_poll_controller - EMAC Poll controller function + * @ndev: The DaVinci EMAC network adapter + * + * Polled functionality used by netconsole and others in non interrupt mode + * + */ +static void emac_poll_controller(struct net_device *ndev) +{ + struct emac_priv *priv = netdev_priv(ndev); + + emac_int_disable(priv); + emac_irq(ndev->irq, ndev); + emac_int_enable(priv); +} +#endif + +static void emac_adjust_link(struct net_device *ndev) +{ + struct emac_priv *priv = netdev_priv(ndev); + struct phy_device *phydev = priv->phydev; + unsigned long flags; + int new_state = 0; + + spin_lock_irqsave(&priv->lock, flags); + + if (phydev->link) { + /* check the mode of operation - full/half duplex */ + if (phydev->duplex != priv->duplex) { + new_state = 1; + priv->duplex = phydev->duplex; + } + if (phydev->speed != priv->speed) { + new_state = 1; + priv->speed = phydev->speed; + } + if (!priv->link) { + new_state = 1; + priv->link = 1; + } + + } else if (priv->link) { + new_state = 1; + priv->link = 0; + priv->speed = 0; + priv->duplex = ~0; + } + if (new_state) { + emac_update_phystatus(priv); + phy_print_status(priv->phydev); + } + + spin_unlock_irqrestore(&priv->lock, flags); +} + +/************************************************************************* + * Linux Driver Model + *************************************************************************/ + +/** + * emac_devioctl - EMAC adapter ioctl + * @ndev: The DaVinci EMAC network adapter + * @ifrq: request parameter + * @cmd: command parameter + * + * EMAC driver ioctl function + * + * Returns success(0) or appropriate error code + */ +static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd) +{ + struct emac_priv *priv = netdev_priv(ndev); + + if (!(netif_running(ndev))) + return -EINVAL; + + /* TODO: Add phy read and write and private statistics get feature */ + + return phy_mii_ioctl(priv->phydev, ifrq, cmd); +} + +static int match_first_device(struct device *dev, void *data) +{ + return !strncmp(dev_name(dev), "davinci_mdio", 12); +} + +/** + * emac_dev_open - EMAC device open + * @ndev: The DaVinci EMAC network adapter + * + * Called when system wants to start the interface. We init TX/RX channels + * and enable the hardware for packet reception/transmission and start the + * network queue. + * + * Returns 0 for a successful open, or appropriate error code + */ +static int emac_dev_open(struct net_device *ndev) +{ + struct device *emac_dev = &ndev->dev; + u32 cnt; + struct resource *res; + int q, m, ret; + int res_num = 0, irq_num = 0; + int i = 0; + struct emac_priv *priv = netdev_priv(ndev); + + ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n", + __func__, ret); + return ret; + } + + netif_carrier_off(ndev); + for (cnt = 0; cnt < ETH_ALEN; cnt++) + ndev->dev_addr[cnt] = priv->mac_addr[cnt]; + + /* Configuration items */ + priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN; + + priv->mac_hash1 = 0; + priv->mac_hash2 = 0; + emac_write(EMAC_MACHASH1, 0); + emac_write(EMAC_MACHASH2, 0); + + for (i = 0; i < EMAC_DEF_RX_NUM_DESC; i++) { + struct sk_buff *skb = emac_rx_alloc(priv); + + if (!skb) + break; + + ret = cpdma_chan_submit(priv->rxchan, skb, skb->data, + skb_tailroom(skb), 0); + if (WARN_ON(ret < 0)) + break; + } + + /* Request IRQ */ + while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, + res_num))) { + for (irq_num = res->start; irq_num <= res->end; irq_num++) { + if (request_irq(irq_num, emac_irq, 0, ndev->name, + ndev)) { + dev_err(emac_dev, + "DaVinci EMAC: request_irq() failed\n"); + ret = -EBUSY; + + goto rollback; + } + } + res_num++; + } + /* prepare counters for rollback in case of an error */ + res_num--; + irq_num--; + + /* Start/Enable EMAC hardware */ + emac_hw_enable(priv); + + /* Enable Interrupt pacing if configured */ + if (priv->coal_intvl != 0) { + struct ethtool_coalesce coal; + + coal.rx_coalesce_usecs = (priv->coal_intvl << 4); + emac_set_coalesce(ndev, &coal); + } + + cpdma_ctlr_start(priv->dma); + + priv->phydev = NULL; + + if (priv->phy_node) { + priv->phydev = of_phy_connect(ndev, priv->phy_node, + &emac_adjust_link, 0, 0); + if (!priv->phydev) { + dev_err(emac_dev, "could not connect to phy %s\n", + priv->phy_node->full_name); + ret = -ENODEV; + goto err; + } + } + + /* use the first phy on the bus if pdata did not give us a phy id */ + if (!priv->phydev && !priv->phy_id) { + struct device *phy; + + phy = bus_find_device(&mdio_bus_type, NULL, NULL, + match_first_device); + if (phy) + priv->phy_id = dev_name(phy); + } + + if (!priv->phydev && priv->phy_id && *priv->phy_id) { + priv->phydev = phy_connect(ndev, priv->phy_id, + &emac_adjust_link, + PHY_INTERFACE_MODE_MII); + + if (IS_ERR(priv->phydev)) { + dev_err(emac_dev, "could not connect to phy %s\n", + priv->phy_id); + ret = PTR_ERR(priv->phydev); + priv->phydev = NULL; + goto err; + } + + priv->link = 0; + priv->speed = 0; + priv->duplex = ~0; + + dev_info(emac_dev, "attached PHY driver [%s] " + "(mii_bus:phy_addr=%s, id=%x)\n", + priv->phydev->drv->name, dev_name(&priv->phydev->dev), + priv->phydev->phy_id); + } + + if (!priv->phydev) { + /* No PHY , fix the link, speed and duplex settings */ + dev_notice(emac_dev, "no phy, defaulting to 100/full\n"); + priv->link = 1; + priv->speed = SPEED_100; + priv->duplex = DUPLEX_FULL; + emac_update_phystatus(priv); + } + + if (!netif_running(ndev)) /* debug only - to avoid compiler warning */ + emac_dump_regs(priv); + + if (netif_msg_drv(priv)) + dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name); + + if (priv->phydev) + phy_start(priv->phydev); + + return 0; + +err: + emac_int_disable(priv); + napi_disable(&priv->napi); + +rollback: + for (q = res_num; q >= 0; q--) { + res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q); + /* at the first iteration, irq_num is already set to the + * right value + */ + if (q != res_num) + irq_num = res->end; + + for (m = irq_num; m >= res->start; m--) + free_irq(m, ndev); + } + cpdma_ctlr_stop(priv->dma); + pm_runtime_put(&priv->pdev->dev); + return ret; +} + +/** + * emac_dev_stop - EMAC device stop + * @ndev: The DaVinci EMAC network adapter + * + * Called when system wants to stop or down the interface. We stop the network + * queue, disable interrupts and cleanup TX/RX channels. + * + * We return the statistics in net_device_stats structure pulled from emac + */ +static int emac_dev_stop(struct net_device *ndev) +{ + struct resource *res; + int i = 0; + int irq_num; + struct emac_priv *priv = netdev_priv(ndev); + struct device *emac_dev = &ndev->dev; + + /* inform the upper layers. */ + netif_stop_queue(ndev); + napi_disable(&priv->napi); + + netif_carrier_off(ndev); + emac_int_disable(priv); + cpdma_ctlr_stop(priv->dma); + emac_write(EMAC_SOFTRESET, 1); + + if (priv->phydev) + phy_disconnect(priv->phydev); + + /* Free IRQ */ + while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) { + for (irq_num = res->start; irq_num <= res->end; irq_num++) + free_irq(irq_num, priv->ndev); + i++; + } + + if (netif_msg_drv(priv)) + dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name); + + pm_runtime_put(&priv->pdev->dev); + return 0; +} + +/** + * emac_dev_getnetstats - EMAC get statistics function + * @ndev: The DaVinci EMAC network adapter + * + * Called when system wants to get statistics from the device. + * + * We return the statistics in net_device_stats structure pulled from emac + */ +static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev) +{ + struct emac_priv *priv = netdev_priv(ndev); + u32 mac_control; + u32 stats_clear_mask; + int err; + + err = pm_runtime_get_sync(&priv->pdev->dev); + if (err < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n", + __func__, err); + return &ndev->stats; + } + + /* update emac hardware stats and reset the registers*/ + + mac_control = emac_read(EMAC_MACCONTROL); + + if (mac_control & EMAC_MACCONTROL_GMIIEN) + stats_clear_mask = EMAC_STATS_CLR_MASK; + else + stats_clear_mask = 0; + + ndev->stats.multicast += emac_read(EMAC_RXMCASTFRAMES); + emac_write(EMAC_RXMCASTFRAMES, stats_clear_mask); + + ndev->stats.collisions += (emac_read(EMAC_TXCOLLISION) + + emac_read(EMAC_TXSINGLECOLL) + + emac_read(EMAC_TXMULTICOLL)); + emac_write(EMAC_TXCOLLISION, stats_clear_mask); + emac_write(EMAC_TXSINGLECOLL, stats_clear_mask); + emac_write(EMAC_TXMULTICOLL, stats_clear_mask); + + ndev->stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) + + emac_read(EMAC_RXJABBER) + + emac_read(EMAC_RXUNDERSIZED)); + emac_write(EMAC_RXOVERSIZED, stats_clear_mask); + emac_write(EMAC_RXJABBER, stats_clear_mask); + emac_write(EMAC_RXUNDERSIZED, stats_clear_mask); + + ndev->stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) + + emac_read(EMAC_RXMOFOVERRUNS)); + emac_write(EMAC_RXSOFOVERRUNS, stats_clear_mask); + emac_write(EMAC_RXMOFOVERRUNS, stats_clear_mask); + + ndev->stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS); + emac_write(EMAC_RXDMAOVERRUNS, stats_clear_mask); + + ndev->stats.tx_carrier_errors += + emac_read(EMAC_TXCARRIERSENSE); + emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask); + + ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN); + emac_write(EMAC_TXUNDERRUN, stats_clear_mask); + + pm_runtime_put(&priv->pdev->dev); + + return &ndev->stats; +} + +static const struct net_device_ops emac_netdev_ops = { + .ndo_open = emac_dev_open, + .ndo_stop = emac_dev_stop, + .ndo_start_xmit = emac_dev_xmit, + .ndo_set_rx_mode = emac_dev_mcast_set, + .ndo_set_mac_address = emac_dev_setmac_addr, + .ndo_do_ioctl = emac_devioctl, + .ndo_tx_timeout = emac_dev_tx_timeout, + .ndo_get_stats = emac_dev_getnetstats, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = emac_poll_controller, +#endif +}; + +static const struct of_device_id davinci_emac_of_match[]; + +static struct emac_platform_data * +davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) +{ + struct device_node *np; + const struct of_device_id *match; + const struct emac_platform_data *auxdata; + struct emac_platform_data *pdata = NULL; + const u8 *mac_addr; + + if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node) + return dev_get_platdata(&pdev->dev); + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + np = pdev->dev.of_node; + pdata->version = EMAC_VERSION_2; + + if (!is_valid_ether_addr(pdata->mac_addr)) { + mac_addr = of_get_mac_address(np); + if (mac_addr) + ether_addr_copy(pdata->mac_addr, mac_addr); + } + + of_property_read_u32(np, "ti,davinci-ctrl-reg-offset", + &pdata->ctrl_reg_offset); + + of_property_read_u32(np, "ti,davinci-ctrl-mod-reg-offset", + &pdata->ctrl_mod_reg_offset); + + of_property_read_u32(np, "ti,davinci-ctrl-ram-offset", + &pdata->ctrl_ram_offset); + + of_property_read_u32(np, "ti,davinci-ctrl-ram-size", + &pdata->ctrl_ram_size); + + of_property_read_u8(np, "ti,davinci-rmii-en", &pdata->rmii_en); + + pdata->no_bd_ram = of_property_read_bool(np, "ti,davinci-no-bd-ram"); + + priv->phy_node = of_parse_phandle(np, "phy-handle", 0); + if (!priv->phy_node) + pdata->phy_id = NULL; + + auxdata = pdev->dev.platform_data; + if (auxdata) { + pdata->interrupt_enable = auxdata->interrupt_enable; + pdata->interrupt_disable = auxdata->interrupt_disable; + } + + match = of_match_device(davinci_emac_of_match, &pdev->dev); + if (match && match->data) { + auxdata = match->data; + pdata->version = auxdata->version; + pdata->hw_ram_addr = auxdata->hw_ram_addr; + } + + pdev->dev.platform_data = pdata; + + return pdata; +} + +static int davinci_emac_3517_get_macid(struct device *dev, u16 offset, + int slave, u8 *mac_addr) +{ + u32 macid_lsb; + u32 macid_msb; + struct regmap *syscon; + + syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon"); + if (IS_ERR(syscon)) { + if (PTR_ERR(syscon) == -ENODEV) + return 0; + return PTR_ERR(syscon); + } + + regmap_read(syscon, offset, &macid_lsb); + regmap_read(syscon, offset + 4, &macid_msb); + + mac_addr[0] = (macid_msb >> 16) & 0xff; + mac_addr[1] = (macid_msb >> 8) & 0xff; + mac_addr[2] = macid_msb & 0xff; + mac_addr[3] = (macid_lsb >> 16) & 0xff; + mac_addr[4] = (macid_lsb >> 8) & 0xff; + mac_addr[5] = macid_lsb & 0xff; + + return 0; +} + +static int davinci_emac_try_get_mac(struct platform_device *pdev, + int instance, u8 *mac_addr) +{ + int error = -EINVAL; + + if (!pdev->dev.of_node) + return error; + + if (of_device_is_compatible(pdev->dev.of_node, "ti,am3517-emac")) + error = davinci_emac_3517_get_macid(&pdev->dev, 0x110, + 0, mac_addr); + else if (of_device_is_compatible(pdev->dev.of_node, + "ti,dm816-emac")) + error = cpsw_am33xx_cm_get_macid(&pdev->dev, 0x30, + instance, + mac_addr); + + return error; +} + +/** + * davinci_emac_probe - EMAC device probe + * @pdev: The DaVinci EMAC device that we are removing + * + * Called when probing for emac devicesr. We get details of instances and + * resource information from platform init and register a network device + * and allocate resources necessary for driver to perform + */ +static int davinci_emac_probe(struct platform_device *pdev) +{ + int rc = 0; + struct resource *res, *res_ctrl; + struct net_device *ndev; + struct emac_priv *priv; + unsigned long hw_ram_addr; + struct emac_platform_data *pdata; + struct cpdma_params dma_params; + struct clk *emac_clk; + unsigned long emac_bus_frequency; + + + /* obtain emac clock from kernel */ + emac_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(emac_clk)) { + dev_err(&pdev->dev, "failed to get EMAC clock\n"); + return -EBUSY; + } + emac_bus_frequency = clk_get_rate(emac_clk); + devm_clk_put(&pdev->dev, emac_clk); + + /* TODO: Probe PHY here if possible */ + + ndev = alloc_etherdev(sizeof(struct emac_priv)); + if (!ndev) + return -ENOMEM; + + platform_set_drvdata(pdev, ndev); + priv = netdev_priv(ndev); + priv->pdev = pdev; + priv->ndev = ndev; + priv->msg_enable = netif_msg_init(debug_level, DAVINCI_EMAC_DEBUG); + + spin_lock_init(&priv->lock); + + pdata = davinci_emac_of_get_pdata(pdev, priv); + if (!pdata) { + dev_err(&pdev->dev, "no platform data\n"); + rc = -ENODEV; + goto no_pdata; + } + + /* MAC addr and PHY mask , RMII enable info from platform_data */ + memcpy(priv->mac_addr, pdata->mac_addr, ETH_ALEN); + priv->phy_id = pdata->phy_id; + priv->rmii_en = pdata->rmii_en; + priv->version = pdata->version; + priv->int_enable = pdata->interrupt_enable; + priv->int_disable = pdata->interrupt_disable; + + priv->coal_intvl = 0; + priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000); + + /* Get EMAC platform data */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; + priv->remap_addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->remap_addr)) { + rc = PTR_ERR(priv->remap_addr); + goto no_pdata; + } + + res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res_ctrl) { + priv->ctrl_base = + devm_ioremap_resource(&pdev->dev, res_ctrl); + if (IS_ERR(priv->ctrl_base)) + goto no_pdata; + } else { + priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset; + } + + priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset; + ndev->base_addr = (unsigned long)priv->remap_addr; + + hw_ram_addr = pdata->hw_ram_addr; + if (!hw_ram_addr) + hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset; + + memset(&dma_params, 0, sizeof(dma_params)); + dma_params.dev = &pdev->dev; + dma_params.dmaregs = priv->emac_base; + dma_params.rxthresh = priv->emac_base + 0x120; + dma_params.rxfree = priv->emac_base + 0x140; + dma_params.txhdp = priv->emac_base + 0x600; + dma_params.rxhdp = priv->emac_base + 0x620; + dma_params.txcp = priv->emac_base + 0x640; + dma_params.rxcp = priv->emac_base + 0x660; + dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS; + dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE; + dma_params.desc_hw_addr = hw_ram_addr; + dma_params.desc_mem_size = pdata->ctrl_ram_size; + dma_params.desc_align = 16; + + dma_params.desc_mem_phys = pdata->no_bd_ram ? 0 : + (u32 __force)res->start + pdata->ctrl_ram_offset; + + priv->dma = cpdma_ctlr_create(&dma_params); + if (!priv->dma) { + dev_err(&pdev->dev, "error initializing DMA\n"); + rc = -ENOMEM; + goto no_pdata; + } + + priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH), + emac_tx_handler); + priv->rxchan = cpdma_chan_create(priv->dma, rx_chan_num(EMAC_DEF_RX_CH), + emac_rx_handler); + if (WARN_ON(!priv->txchan || !priv->rxchan)) { + rc = -ENOMEM; + goto no_cpdma_chan; + } + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res) { + dev_err(&pdev->dev, "error getting irq res\n"); + rc = -ENOENT; + goto no_cpdma_chan; + } + ndev->irq = res->start; + + rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr); + if (!rc) + ether_addr_copy(ndev->dev_addr, priv->mac_addr); + + if (!is_valid_ether_addr(priv->mac_addr)) { + /* Use random MAC if none passed */ + eth_hw_addr_random(ndev); + memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len); + dev_warn(&pdev->dev, "using random MAC addr: %pM\n", + priv->mac_addr); + } + + ndev->netdev_ops = &emac_netdev_ops; + ndev->ethtool_ops = ðtool_ops; + netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT); + + pm_runtime_enable(&pdev->dev); + rc = pm_runtime_get_sync(&pdev->dev); + if (rc < 0) { + pm_runtime_put_noidle(&pdev->dev); + dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n", + __func__, rc); + goto no_cpdma_chan; + } + + /* register the network device */ + SET_NETDEV_DEV(ndev, &pdev->dev); + rc = register_netdev(ndev); + if (rc) { + dev_err(&pdev->dev, "error in register_netdev\n"); + rc = -ENODEV; + pm_runtime_put(&pdev->dev); + goto no_cpdma_chan; + } + + + if (netif_msg_probe(priv)) { + dev_notice(&pdev->dev, "DaVinci EMAC Probe found device " + "(regs: %p, irq: %d)\n", + (void *)priv->emac_base_phys, ndev->irq); + } + pm_runtime_put(&pdev->dev); + + return 0; + +no_cpdma_chan: + if (priv->txchan) + cpdma_chan_destroy(priv->txchan); + if (priv->rxchan) + cpdma_chan_destroy(priv->rxchan); + cpdma_ctlr_destroy(priv->dma); +no_pdata: + free_netdev(ndev); + return rc; +} + +/** + * davinci_emac_remove - EMAC device remove + * @pdev: The DaVinci EMAC device that we are removing + * + * Called when removing the device driver. We disable clock usage and release + * the resources taken up by the driver and unregister network device + */ +static int davinci_emac_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct emac_priv *priv = netdev_priv(ndev); + + dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n"); + + if (priv->txchan) + cpdma_chan_destroy(priv->txchan); + if (priv->rxchan) + cpdma_chan_destroy(priv->rxchan); + cpdma_ctlr_destroy(priv->dma); + + unregister_netdev(ndev); + free_netdev(ndev); + + return 0; +} + +static int davinci_emac_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = platform_get_drvdata(pdev); + + if (netif_running(ndev)) + emac_dev_stop(ndev); + + return 0; +} + +static int davinci_emac_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = platform_get_drvdata(pdev); + + if (netif_running(ndev)) + emac_dev_open(ndev); + + return 0; +} + +static const struct dev_pm_ops davinci_emac_pm_ops = { + .suspend = davinci_emac_suspend, + .resume = davinci_emac_resume, +}; + +#if IS_ENABLED(CONFIG_OF) +static const struct emac_platform_data am3517_emac_data = { + .version = EMAC_VERSION_2, + .hw_ram_addr = 0x01e20000, +}; + +static const struct emac_platform_data dm816_emac_data = { + .version = EMAC_VERSION_2, +}; + +static const struct of_device_id davinci_emac_of_match[] = { + {.compatible = "ti,davinci-dm6467-emac", }, + {.compatible = "ti,am3517-emac", .data = &am3517_emac_data, }, + {.compatible = "ti,dm816-emac", .data = &dm816_emac_data, }, + {}, +}; +MODULE_DEVICE_TABLE(of, davinci_emac_of_match); +#endif + +/* davinci_emac_driver: EMAC platform driver structure */ +static struct platform_driver davinci_emac_driver = { + .driver = { + .name = "davinci_emac", + .pm = &davinci_emac_pm_ops, + .of_match_table = of_match_ptr(davinci_emac_of_match), + }, + .probe = davinci_emac_probe, + .remove = davinci_emac_remove, +}; + +/** + * davinci_emac_init - EMAC driver module init + * + * Called when initializing the driver. We register the driver with + * the platform. + */ +static int __init davinci_emac_init(void) +{ + return platform_driver_register(&davinci_emac_driver); +} +late_initcall(davinci_emac_init); + +/** + * davinci_emac_exit - EMAC driver module exit + * + * Called when exiting the driver completely. We unregister the driver with + * the platform and exit + */ +static void __exit davinci_emac_exit(void) +{ + platform_driver_unregister(&davinci_emac_driver); +} +module_exit(davinci_emac_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("DaVinci EMAC Maintainer: Anant Gole "); +MODULE_AUTHOR("DaVinci EMAC Maintainer: Chaithrika U S "); +MODULE_DESCRIPTION("DaVinci EMAC Ethernet driver"); diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c new file mode 100644 index 000000000..c00084d68 --- /dev/null +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -0,0 +1,505 @@ +/* + * DaVinci MDIO Module driver + * + * Copyright (C) 2010 Texas Instruments. + * + * Shamelessly ripped out of davinci_emac.c, original copyrights follow: + * + * Copyright (C) 2009 Texas Instruments. + * + * --------------------------------------------------------------------------- + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * --------------------------------------------------------------------------- + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * This timeout definition is a worst-case ultra defensive measure against + * unexpected controller lock ups. Ideally, we should never ever hit this + * scenario in practice. + */ +#define MDIO_TIMEOUT 100 /* msecs */ + +#define PHY_REG_MASK 0x1f +#define PHY_ID_MASK 0x1f + +#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */ + +struct davinci_mdio_regs { + u32 version; + u32 control; +#define CONTROL_IDLE BIT(31) +#define CONTROL_ENABLE BIT(30) +#define CONTROL_MAX_DIV (0xffff) + + u32 alive; + u32 link; + u32 linkintraw; + u32 linkintmasked; + u32 __reserved_0[2]; + u32 userintraw; + u32 userintmasked; + u32 userintmaskset; + u32 userintmaskclr; + u32 __reserved_1[20]; + + struct { + u32 access; +#define USERACCESS_GO BIT(31) +#define USERACCESS_WRITE BIT(30) +#define USERACCESS_ACK BIT(29) +#define USERACCESS_READ (0) +#define USERACCESS_DATA (0xffff) + + u32 physel; + } user[0]; +}; + +static const struct mdio_platform_data default_pdata = { + .bus_freq = DEF_OUT_FREQ, +}; + +struct davinci_mdio_data { + struct mdio_platform_data pdata; + struct davinci_mdio_regs __iomem *regs; + spinlock_t lock; + struct clk *clk; + struct device *dev; + struct mii_bus *bus; + bool suspended; + unsigned long access_time; /* jiffies */ + /* Indicates that driver shouldn't modify phy_mask in case + * if MDIO bus is registered from DT. + */ + bool skip_scan; +}; + +static void __davinci_mdio_reset(struct davinci_mdio_data *data) +{ + u32 mdio_in, div, mdio_out_khz, access_time; + + mdio_in = clk_get_rate(data->clk); + div = (mdio_in / data->pdata.bus_freq) - 1; + if (div > CONTROL_MAX_DIV) + div = CONTROL_MAX_DIV; + + /* set enable and clock divider */ + __raw_writel(div | CONTROL_ENABLE, &data->regs->control); + + /* + * One mdio transaction consists of: + * 32 bits of preamble + * 32 bits of transferred data + * 24 bits of bus yield (not needed unless shared?) + */ + mdio_out_khz = mdio_in / (1000 * (div + 1)); + access_time = (88 * 1000) / mdio_out_khz; + + /* + * In the worst case, we could be kicking off a user-access immediately + * after the mdio bus scan state-machine triggered its own read. If + * so, our request could get deferred by one access cycle. We + * defensively allow for 4 access cycles. + */ + data->access_time = usecs_to_jiffies(access_time * 4); + if (!data->access_time) + data->access_time = 1; +} + +static int davinci_mdio_reset(struct mii_bus *bus) +{ + struct davinci_mdio_data *data = bus->priv; + u32 phy_mask, ver; + + __davinci_mdio_reset(data); + + /* wait for scan logic to settle */ + msleep(PHY_MAX_ADDR * data->access_time); + + /* dump hardware version info */ + ver = __raw_readl(&data->regs->version); + dev_info(data->dev, "davinci mdio revision %d.%d\n", + (ver >> 8) & 0xff, ver & 0xff); + + if (data->skip_scan) + return 0; + + /* get phy mask from the alive register */ + phy_mask = __raw_readl(&data->regs->alive); + if (phy_mask) { + /* restrict mdio bus to live phys only */ + dev_info(data->dev, "detected phy mask %x\n", ~phy_mask); + phy_mask = ~phy_mask; + } else { + /* desperately scan all phys */ + dev_warn(data->dev, "no live phy, scanning all\n"); + phy_mask = 0; + } + data->bus->phy_mask = phy_mask; + + return 0; +} + +/* wait until hardware is ready for another user access */ +static inline int wait_for_user_access(struct davinci_mdio_data *data) +{ + struct davinci_mdio_regs __iomem *regs = data->regs; + unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT); + u32 reg; + + while (time_after(timeout, jiffies)) { + reg = __raw_readl(®s->user[0].access); + if ((reg & USERACCESS_GO) == 0) + return 0; + + reg = __raw_readl(®s->control); + if ((reg & CONTROL_IDLE) == 0) + continue; + + /* + * An emac soft_reset may have clobbered the mdio controller's + * state machine. We need to reset and retry the current + * operation + */ + dev_warn(data->dev, "resetting idled controller\n"); + __davinci_mdio_reset(data); + return -EAGAIN; + } + + reg = __raw_readl(®s->user[0].access); + if ((reg & USERACCESS_GO) == 0) + return 0; + + dev_err(data->dev, "timed out waiting for user access\n"); + return -ETIMEDOUT; +} + +/* wait until hardware state machine is idle */ +static inline int wait_for_idle(struct davinci_mdio_data *data) +{ + struct davinci_mdio_regs __iomem *regs = data->regs; + unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT); + + while (time_after(timeout, jiffies)) { + if (__raw_readl(®s->control) & CONTROL_IDLE) + return 0; + } + dev_err(data->dev, "timed out waiting for idle\n"); + return -ETIMEDOUT; +} + +static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg) +{ + struct davinci_mdio_data *data = bus->priv; + u32 reg; + int ret; + + if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) + return -EINVAL; + + spin_lock(&data->lock); + + if (data->suspended) { + spin_unlock(&data->lock); + return -ENODEV; + } + + reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) | + (phy_id << 16)); + + while (1) { + ret = wait_for_user_access(data); + if (ret == -EAGAIN) + continue; + if (ret < 0) + break; + + __raw_writel(reg, &data->regs->user[0].access); + + ret = wait_for_user_access(data); + if (ret == -EAGAIN) + continue; + if (ret < 0) + break; + + reg = __raw_readl(&data->regs->user[0].access); + ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO; + break; + } + + spin_unlock(&data->lock); + + return ret; +} + +static int davinci_mdio_write(struct mii_bus *bus, int phy_id, + int phy_reg, u16 phy_data) +{ + struct davinci_mdio_data *data = bus->priv; + u32 reg; + int ret; + + if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) + return -EINVAL; + + spin_lock(&data->lock); + + if (data->suspended) { + spin_unlock(&data->lock); + return -ENODEV; + } + + reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) | + (phy_id << 16) | (phy_data & USERACCESS_DATA)); + + while (1) { + ret = wait_for_user_access(data); + if (ret == -EAGAIN) + continue; + if (ret < 0) + break; + + __raw_writel(reg, &data->regs->user[0].access); + + ret = wait_for_user_access(data); + if (ret == -EAGAIN) + continue; + break; + } + + spin_unlock(&data->lock); + + return 0; +} + +#if IS_ENABLED(CONFIG_OF) +static int davinci_mdio_probe_dt(struct mdio_platform_data *data, + struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + u32 prop; + + if (!node) + return -EINVAL; + + if (of_property_read_u32(node, "bus_freq", &prop)) { + dev_err(&pdev->dev, "Missing bus_freq property in the DT.\n"); + return -EINVAL; + } + data->bus_freq = prop; + + return 0; +} +#endif + +static int davinci_mdio_probe(struct platform_device *pdev) +{ + struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct device *dev = &pdev->dev; + struct davinci_mdio_data *data; + struct resource *res; + struct phy_device *phy; + int ret, addr; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->bus = devm_mdiobus_alloc(dev); + if (!data->bus) { + dev_err(dev, "failed to alloc mii bus\n"); + return -ENOMEM; + } + + if (dev->of_node) { + if (davinci_mdio_probe_dt(&data->pdata, pdev)) + data->pdata = default_pdata; + snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); + } else { + data->pdata = pdata ? (*pdata) : default_pdata; + snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + } + + data->bus->name = dev_name(dev); + data->bus->read = davinci_mdio_read, + data->bus->write = davinci_mdio_write, + data->bus->reset = davinci_mdio_reset, + data->bus->parent = dev; + data->bus->priv = data; + + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + data->clk = devm_clk_get(dev, "fck"); + if (IS_ERR(data->clk)) { + dev_err(dev, "failed to get device clock\n"); + ret = PTR_ERR(data->clk); + data->clk = NULL; + goto bail_out; + } + + dev_set_drvdata(dev, data); + data->dev = dev; + spin_lock_init(&data->lock); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(data->regs)) { + ret = PTR_ERR(data->regs); + goto bail_out; + } + + /* register the mii bus + * Create PHYs from DT only in case if PHY child nodes are explicitly + * defined to support backward compatibility with DTs which assume that + * Davinci MDIO will always scan the bus for PHYs detection. + */ + if (dev->of_node && of_get_child_count(dev->of_node)) { + data->skip_scan = true; + ret = of_mdiobus_register(data->bus, dev->of_node); + } else { + ret = mdiobus_register(data->bus); + } + if (ret) + goto bail_out; + + /* scan and dump the bus */ + for (addr = 0; addr < PHY_MAX_ADDR; addr++) { + phy = data->bus->phy_map[addr]; + if (phy) { + dev_info(dev, "phy[%d]: device %s, driver %s\n", + phy->addr, dev_name(&phy->dev), + phy->drv ? phy->drv->name : "unknown"); + } + } + + return 0; + +bail_out: + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return ret; +} + +static int davinci_mdio_remove(struct platform_device *pdev) +{ + struct davinci_mdio_data *data = platform_get_drvdata(pdev); + + if (data->bus) + mdiobus_unregister(data->bus); + + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int davinci_mdio_suspend(struct device *dev) +{ + struct davinci_mdio_data *data = dev_get_drvdata(dev); + u32 ctrl; + + spin_lock(&data->lock); + + /* shutdown the scan state machine */ + ctrl = __raw_readl(&data->regs->control); + ctrl &= ~CONTROL_ENABLE; + __raw_writel(ctrl, &data->regs->control); + wait_for_idle(data); + + data->suspended = true; + spin_unlock(&data->lock); + pm_runtime_put_sync(data->dev); + + /* Select sleep pin state */ + pinctrl_pm_select_sleep_state(dev); + + return 0; +} + +static int davinci_mdio_resume(struct device *dev) +{ + struct davinci_mdio_data *data = dev_get_drvdata(dev); + + /* Select default pin state */ + pinctrl_pm_select_default_state(dev); + + pm_runtime_get_sync(data->dev); + + spin_lock(&data->lock); + /* restart the scan state machine */ + __davinci_mdio_reset(data); + + data->suspended = false; + spin_unlock(&data->lock); + + return 0; +} +#endif + +static const struct dev_pm_ops davinci_mdio_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume) +}; + +#if IS_ENABLED(CONFIG_OF) +static const struct of_device_id davinci_mdio_of_mtable[] = { + { .compatible = "ti,davinci_mdio", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable); +#endif + +static struct platform_driver davinci_mdio_driver = { + .driver = { + .name = "davinci_mdio", + .pm = &davinci_mdio_pm_ops, + .of_match_table = of_match_ptr(davinci_mdio_of_mtable), + }, + .probe = davinci_mdio_probe, + .remove = davinci_mdio_remove, +}; + +static int __init davinci_mdio_init(void) +{ + return platform_driver_register(&davinci_mdio_driver); +} +device_initcall(davinci_mdio_init); + +static void __exit davinci_mdio_exit(void) +{ + platform_driver_unregister(&davinci_mdio_driver); +} +module_exit(davinci_mdio_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("DaVinci MDIO driver"); diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h new file mode 100644 index 000000000..bbacf5ccc --- /dev/null +++ b/drivers/net/ethernet/ti/netcp.h @@ -0,0 +1,232 @@ +/* + * NetCP driver local header + * + * Copyright (C) 2014 Texas Instruments Incorporated + * Authors: Sandeep Nair + * Sandeep Paulraj + * Cyril Chemparathy + * Santosh Shilimkar + * Wingman Kwok + * Murali Karicheri + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __NETCP_H__ +#define __NETCP_H__ + +#include +#include + +/* Maximum Ethernet frame size supported by Keystone switch */ +#define NETCP_MAX_FRAME_SIZE 9504 + +#define SGMII_LINK_MAC_MAC_AUTONEG 0 +#define SGMII_LINK_MAC_PHY 1 +#define SGMII_LINK_MAC_MAC_FORCED 2 +#define SGMII_LINK_MAC_FIBER 3 +#define SGMII_LINK_MAC_PHY_NO_MDIO 4 +#define XGMII_LINK_MAC_PHY 10 +#define XGMII_LINK_MAC_MAC_FORCED 11 + +struct netcp_device; + +struct netcp_tx_pipe { + struct netcp_device *netcp_device; + void *dma_queue; + unsigned int dma_queue_id; + /* To port for packet forwarded to switch. Used only by ethss */ + u8 switch_to_port; +#define SWITCH_TO_PORT_IN_TAGINFO BIT(0) + u8 flags; + void *dma_channel; + const char *dma_chan_name; +}; + +#define ADDR_NEW BIT(0) +#define ADDR_VALID BIT(1) + +enum netcp_addr_type { + ADDR_ANY, + ADDR_DEV, + ADDR_UCAST, + ADDR_MCAST, + ADDR_BCAST +}; + +struct netcp_addr { + struct netcp_intf *netcp; + unsigned char addr[ETH_ALEN]; + enum netcp_addr_type type; + unsigned int flags; + struct list_head node; +}; + +struct netcp_intf { + struct device *dev; + struct device *ndev_dev; + struct net_device *ndev; + bool big_endian; + unsigned int tx_compl_qid; + void *tx_pool; + struct list_head txhook_list_head; + unsigned int tx_pause_threshold; + void *tx_compl_q; + + unsigned int tx_resume_threshold; + void *rx_queue; + void *rx_pool; + struct list_head rxhook_list_head; + unsigned int rx_queue_id; + void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN]; + u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN]; + struct napi_struct rx_napi; + struct napi_struct tx_napi; + + void *rx_channel; + const char *dma_chan_name; + u32 rx_pool_size; + u32 rx_pool_region_id; + u32 tx_pool_size; + u32 tx_pool_region_id; + struct list_head module_head; + struct list_head interface_list; + struct list_head addr_list; + bool netdev_registered; + bool primary_module_attached; + + /* Lock used for protecting Rx/Tx hook list management */ + spinlock_t lock; + struct netcp_device *netcp_device; + struct device_node *node_interface; + + /* DMA configuration data */ + u32 msg_enable; + u32 rx_queue_depths[KNAV_DMA_FDQ_PER_CHAN]; +}; + +#define NETCP_PSDATA_LEN KNAV_DMA_NUM_PS_WORDS +struct netcp_packet { + struct sk_buff *skb; + u32 *epib; + u32 *psdata; + unsigned int psdata_len; + struct netcp_intf *netcp; + struct netcp_tx_pipe *tx_pipe; + bool rxtstamp_complete; + void *ts_context; + + int (*txtstamp_complete)(void *ctx, struct netcp_packet *pkt); +}; + +static inline u32 *netcp_push_psdata(struct netcp_packet *p_info, + unsigned int bytes) +{ + u32 *buf; + unsigned int words; + + if ((bytes & 0x03) != 0) + return NULL; + words = bytes >> 2; + + if ((p_info->psdata_len + words) > NETCP_PSDATA_LEN) + return NULL; + + p_info->psdata_len += words; + buf = &p_info->psdata[NETCP_PSDATA_LEN - p_info->psdata_len]; + return buf; +} + +static inline int netcp_align_psdata(struct netcp_packet *p_info, + unsigned int byte_align) +{ + int padding; + + switch (byte_align) { + case 0: + padding = -EINVAL; + break; + case 1: + case 2: + case 4: + padding = 0; + break; + case 8: + padding = (p_info->psdata_len << 2) % 8; + break; + case 16: + padding = (p_info->psdata_len << 2) % 16; + break; + default: + padding = (p_info->psdata_len << 2) % byte_align; + break; + } + return padding; +} + +struct netcp_module { + const char *name; + struct module *owner; + bool primary; + + /* probe/remove: called once per NETCP instance */ + int (*probe)(struct netcp_device *netcp_device, + struct device *device, struct device_node *node, + void **inst_priv); + int (*remove)(struct netcp_device *netcp_device, void *inst_priv); + + /* attach/release: called once per network interface */ + int (*attach)(void *inst_priv, struct net_device *ndev, + struct device_node *node, void **intf_priv); + int (*release)(void *intf_priv); + int (*open)(void *intf_priv, struct net_device *ndev); + int (*close)(void *intf_priv, struct net_device *ndev); + int (*add_addr)(void *intf_priv, struct netcp_addr *naddr); + int (*del_addr)(void *intf_priv, struct netcp_addr *naddr); + int (*add_vid)(void *intf_priv, int vid); + int (*del_vid)(void *intf_priv, int vid); + int (*ioctl)(void *intf_priv, struct ifreq *req, int cmd); + + /* used internally */ + struct list_head module_list; + struct list_head interface_list; +}; + +int netcp_register_module(struct netcp_module *module); +void netcp_unregister_module(struct netcp_module *module); +void *netcp_module_get_intf_data(struct netcp_module *module, + struct netcp_intf *intf); + +int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe, + struct netcp_device *netcp_device, + const char *dma_chan_name, unsigned int dma_queue_id); +int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe); +int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe); + +typedef int netcp_hook_rtn(int order, void *data, struct netcp_packet *packet); +int netcp_register_txhook(struct netcp_intf *netcp_priv, int order, + netcp_hook_rtn *hook_rtn, void *hook_data); +int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order, + netcp_hook_rtn *hook_rtn, void *hook_data); +int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order, + netcp_hook_rtn *hook_rtn, void *hook_data); +int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order, + netcp_hook_rtn *hook_rtn, void *hook_data); +void *netcp_device_find_module(struct netcp_device *netcp_device, + const char *name); + +/* SGMII functions */ +int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port); +int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port); +int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface); + +/* XGBE SERDES init functions */ +int netcp_xgbe_serdes_init(void __iomem *serdes_regs, void __iomem *xgbe_regs); + +#endif /* __NETCP_H__ */ diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c new file mode 100644 index 000000000..43efc3a0c --- /dev/null +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -0,0 +1,2156 @@ +/* + * Keystone NetCP Core driver + * + * Copyright (C) 2014 Texas Instruments Incorporated + * Authors: Sandeep Nair + * Sandeep Paulraj + * Cyril Chemparathy + * Santosh Shilimkar + * Murali Karicheri + * Wingman Kwok + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "netcp.h" + +#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD) +#define NETCP_NAPI_WEIGHT 64 +#define NETCP_TX_TIMEOUT (5 * HZ) +#define NETCP_MIN_PACKET_SIZE ETH_ZLEN +#define NETCP_MAX_MCAST_ADDR 16 + +#define NETCP_EFUSE_REG_INDEX 0 + +#define NETCP_MOD_PROBE_SKIPPED 1 +#define NETCP_MOD_PROBE_FAILED 2 + +#define NETCP_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \ + NETIF_MSG_DRV | NETIF_MSG_LINK | \ + NETIF_MSG_IFUP | NETIF_MSG_INTR | \ + NETIF_MSG_PROBE | NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \ + NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \ + NETIF_MSG_RX_STATUS) + +#define knav_queue_get_id(q) knav_queue_device_control(q, \ + KNAV_QUEUE_GET_ID, (unsigned long)NULL) + +#define knav_queue_enable_notify(q) knav_queue_device_control(q, \ + KNAV_QUEUE_ENABLE_NOTIFY, \ + (unsigned long)NULL) + +#define knav_queue_disable_notify(q) knav_queue_device_control(q, \ + KNAV_QUEUE_DISABLE_NOTIFY, \ + (unsigned long)NULL) + +#define knav_queue_get_count(q) knav_queue_device_control(q, \ + KNAV_QUEUE_GET_COUNT, (unsigned long)NULL) + +#define for_each_netcp_module(module) \ + list_for_each_entry(module, &netcp_modules, module_list) + +#define for_each_netcp_device_module(netcp_device, inst_modpriv) \ + list_for_each_entry(inst_modpriv, \ + &((netcp_device)->modpriv_head), inst_list) + +#define for_each_module(netcp, intf_modpriv) \ + list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list) + +/* Module management structures */ +struct netcp_device { + struct list_head device_list; + struct list_head interface_head; + struct list_head modpriv_head; + struct device *device; +}; + +struct netcp_inst_modpriv { + struct netcp_device *netcp_device; + struct netcp_module *netcp_module; + struct list_head inst_list; + void *module_priv; +}; + +struct netcp_intf_modpriv { + struct netcp_intf *netcp_priv; + struct netcp_module *netcp_module; + struct list_head intf_list; + void *module_priv; +}; + +static LIST_HEAD(netcp_devices); +static LIST_HEAD(netcp_modules); +static DEFINE_MUTEX(netcp_modules_lock); + +static int netcp_debug_level = -1; +module_param(netcp_debug_level, int, 0); +MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)"); + +/* Helper functions - Get/Set */ +static void get_pkt_info(u32 *buff, u32 *buff_len, u32 *ndesc, + struct knav_dma_desc *desc) +{ + *buff_len = desc->buff_len; + *buff = desc->buff; + *ndesc = desc->next_desc; +} + +static void get_pad_info(u32 *pad0, u32 *pad1, struct knav_dma_desc *desc) +{ + *pad0 = desc->pad[0]; + *pad1 = desc->pad[1]; +} + +static void get_org_pkt_info(u32 *buff, u32 *buff_len, + struct knav_dma_desc *desc) +{ + *buff = desc->orig_buff; + *buff_len = desc->orig_len; +} + +static void get_words(u32 *words, int num_words, u32 *desc) +{ + int i; + + for (i = 0; i < num_words; i++) + words[i] = desc[i]; +} + +static void set_pkt_info(u32 buff, u32 buff_len, u32 ndesc, + struct knav_dma_desc *desc) +{ + desc->buff_len = buff_len; + desc->buff = buff; + desc->next_desc = ndesc; +} + +static void set_desc_info(u32 desc_info, u32 pkt_info, + struct knav_dma_desc *desc) +{ + desc->desc_info = desc_info; + desc->packet_info = pkt_info; +} + +static void set_pad_info(u32 pad0, u32 pad1, struct knav_dma_desc *desc) +{ + desc->pad[0] = pad0; + desc->pad[1] = pad1; +} + +static void set_org_pkt_info(u32 buff, u32 buff_len, + struct knav_dma_desc *desc) +{ + desc->orig_buff = buff; + desc->orig_len = buff_len; +} + +static void set_words(u32 *words, int num_words, u32 *desc) +{ + int i; + + for (i = 0; i < num_words; i++) + desc[i] = words[i]; +} + +/* Read the e-fuse value as 32 bit values to be endian independent */ +static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac) +{ + unsigned int addr0, addr1; + + addr1 = readl(efuse_mac + 4); + addr0 = readl(efuse_mac); + + x[0] = (addr1 & 0x0000ff00) >> 8; + x[1] = addr1 & 0x000000ff; + x[2] = (addr0 & 0xff000000) >> 24; + x[3] = (addr0 & 0x00ff0000) >> 16; + x[4] = (addr0 & 0x0000ff00) >> 8; + x[5] = addr0 & 0x000000ff; + + return 0; +} + +static const char *netcp_node_name(struct device_node *node) +{ + const char *name; + + if (of_property_read_string(node, "label", &name) < 0) + name = node->name; + if (!name) + name = "unknown"; + return name; +} + +/* Module management routines */ +static int netcp_register_interface(struct netcp_intf *netcp) +{ + int ret; + + ret = register_netdev(netcp->ndev); + if (!ret) + netcp->netdev_registered = true; + return ret; +} + +static int netcp_module_probe(struct netcp_device *netcp_device, + struct netcp_module *module) +{ + struct device *dev = netcp_device->device; + struct device_node *devices, *interface, *node = dev->of_node; + struct device_node *child; + struct netcp_inst_modpriv *inst_modpriv; + struct netcp_intf *netcp_intf; + struct netcp_module *tmp; + bool primary_module_registered = false; + int ret; + + /* Find this module in the sub-tree for this device */ + devices = of_get_child_by_name(node, "netcp-devices"); + if (!devices) { + dev_err(dev, "could not find netcp-devices node\n"); + return NETCP_MOD_PROBE_SKIPPED; + } + + for_each_available_child_of_node(devices, child) { + const char *name = netcp_node_name(child); + + if (!strcasecmp(module->name, name)) + break; + } + + of_node_put(devices); + /* If module not used for this device, skip it */ + if (!child) { + dev_warn(dev, "module(%s) not used for device\n", module->name); + return NETCP_MOD_PROBE_SKIPPED; + } + + inst_modpriv = devm_kzalloc(dev, sizeof(*inst_modpriv), GFP_KERNEL); + if (!inst_modpriv) { + of_node_put(child); + return -ENOMEM; + } + + inst_modpriv->netcp_device = netcp_device; + inst_modpriv->netcp_module = module; + list_add_tail(&inst_modpriv->inst_list, &netcp_device->modpriv_head); + + ret = module->probe(netcp_device, dev, child, + &inst_modpriv->module_priv); + of_node_put(child); + if (ret) { + dev_err(dev, "Probe of module(%s) failed with %d\n", + module->name, ret); + list_del(&inst_modpriv->inst_list); + devm_kfree(dev, inst_modpriv); + return NETCP_MOD_PROBE_FAILED; + } + + /* Attach modules only if the primary module is probed */ + for_each_netcp_module(tmp) { + if (tmp->primary) + primary_module_registered = true; + } + + if (!primary_module_registered) + return 0; + + /* Attach module to interfaces */ + list_for_each_entry(netcp_intf, &netcp_device->interface_head, + interface_list) { + struct netcp_intf_modpriv *intf_modpriv; + + /* If interface not registered then register now */ + if (!netcp_intf->netdev_registered) + ret = netcp_register_interface(netcp_intf); + + if (ret) + return -ENODEV; + + intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv), + GFP_KERNEL); + if (!intf_modpriv) + return -ENOMEM; + + interface = of_parse_phandle(netcp_intf->node_interface, + module->name, 0); + + intf_modpriv->netcp_priv = netcp_intf; + intf_modpriv->netcp_module = module; + list_add_tail(&intf_modpriv->intf_list, + &netcp_intf->module_head); + + ret = module->attach(inst_modpriv->module_priv, + netcp_intf->ndev, interface, + &intf_modpriv->module_priv); + of_node_put(interface); + if (ret) { + dev_dbg(dev, "Attach of module %s declined with %d\n", + module->name, ret); + list_del(&intf_modpriv->intf_list); + devm_kfree(dev, intf_modpriv); + continue; + } + } + return 0; +} + +int netcp_register_module(struct netcp_module *module) +{ + struct netcp_device *netcp_device; + struct netcp_module *tmp; + int ret; + + if (!module->name) { + WARN(1, "error registering netcp module: no name\n"); + return -EINVAL; + } + + if (!module->probe) { + WARN(1, "error registering netcp module: no probe\n"); + return -EINVAL; + } + + mutex_lock(&netcp_modules_lock); + + for_each_netcp_module(tmp) { + if (!strcasecmp(tmp->name, module->name)) { + mutex_unlock(&netcp_modules_lock); + return -EEXIST; + } + } + list_add_tail(&module->module_list, &netcp_modules); + + list_for_each_entry(netcp_device, &netcp_devices, device_list) { + ret = netcp_module_probe(netcp_device, module); + if (ret < 0) + goto fail; + } + + mutex_unlock(&netcp_modules_lock); + return 0; + +fail: + mutex_unlock(&netcp_modules_lock); + netcp_unregister_module(module); + return ret; +} +EXPORT_SYMBOL_GPL(netcp_register_module); + +static void netcp_release_module(struct netcp_device *netcp_device, + struct netcp_module *module) +{ + struct netcp_inst_modpriv *inst_modpriv, *inst_tmp; + struct netcp_intf *netcp_intf, *netcp_tmp; + struct device *dev = netcp_device->device; + + /* Release the module from each interface */ + list_for_each_entry_safe(netcp_intf, netcp_tmp, + &netcp_device->interface_head, + interface_list) { + struct netcp_intf_modpriv *intf_modpriv, *intf_tmp; + + list_for_each_entry_safe(intf_modpriv, intf_tmp, + &netcp_intf->module_head, + intf_list) { + if (intf_modpriv->netcp_module == module) { + module->release(intf_modpriv->module_priv); + list_del(&intf_modpriv->intf_list); + devm_kfree(dev, intf_modpriv); + break; + } + } + } + + /* Remove the module from each instance */ + list_for_each_entry_safe(inst_modpriv, inst_tmp, + &netcp_device->modpriv_head, inst_list) { + if (inst_modpriv->netcp_module == module) { + module->remove(netcp_device, + inst_modpriv->module_priv); + list_del(&inst_modpriv->inst_list); + devm_kfree(dev, inst_modpriv); + break; + } + } +} + +void netcp_unregister_module(struct netcp_module *module) +{ + struct netcp_device *netcp_device; + struct netcp_module *module_tmp; + + mutex_lock(&netcp_modules_lock); + + list_for_each_entry(netcp_device, &netcp_devices, device_list) { + netcp_release_module(netcp_device, module); + } + + /* Remove the module from the module list */ + for_each_netcp_module(module_tmp) { + if (module == module_tmp) { + list_del(&module->module_list); + break; + } + } + + mutex_unlock(&netcp_modules_lock); +} +EXPORT_SYMBOL_GPL(netcp_unregister_module); + +void *netcp_module_get_intf_data(struct netcp_module *module, + struct netcp_intf *intf) +{ + struct netcp_intf_modpriv *intf_modpriv; + + list_for_each_entry(intf_modpriv, &intf->module_head, intf_list) + if (intf_modpriv->netcp_module == module) + return intf_modpriv->module_priv; + return NULL; +} +EXPORT_SYMBOL_GPL(netcp_module_get_intf_data); + +/* Module TX and RX Hook management */ +struct netcp_hook_list { + struct list_head list; + netcp_hook_rtn *hook_rtn; + void *hook_data; + int order; +}; + +int netcp_register_txhook(struct netcp_intf *netcp_priv, int order, + netcp_hook_rtn *hook_rtn, void *hook_data) +{ + struct netcp_hook_list *entry; + struct netcp_hook_list *next; + unsigned long flags; + + entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->hook_rtn = hook_rtn; + entry->hook_data = hook_data; + entry->order = order; + + spin_lock_irqsave(&netcp_priv->lock, flags); + list_for_each_entry(next, &netcp_priv->txhook_list_head, list) { + if (next->order > order) + break; + } + __list_add(&entry->list, next->list.prev, &next->list); + spin_unlock_irqrestore(&netcp_priv->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(netcp_register_txhook); + +int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order, + netcp_hook_rtn *hook_rtn, void *hook_data) +{ + struct netcp_hook_list *next, *n; + unsigned long flags; + + spin_lock_irqsave(&netcp_priv->lock, flags); + list_for_each_entry_safe(next, n, &netcp_priv->txhook_list_head, list) { + if ((next->order == order) && + (next->hook_rtn == hook_rtn) && + (next->hook_data == hook_data)) { + list_del(&next->list); + spin_unlock_irqrestore(&netcp_priv->lock, flags); + devm_kfree(netcp_priv->dev, next); + return 0; + } + } + spin_unlock_irqrestore(&netcp_priv->lock, flags); + return -ENOENT; +} +EXPORT_SYMBOL_GPL(netcp_unregister_txhook); + +int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order, + netcp_hook_rtn *hook_rtn, void *hook_data) +{ + struct netcp_hook_list *entry; + struct netcp_hook_list *next; + unsigned long flags; + + entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->hook_rtn = hook_rtn; + entry->hook_data = hook_data; + entry->order = order; + + spin_lock_irqsave(&netcp_priv->lock, flags); + list_for_each_entry(next, &netcp_priv->rxhook_list_head, list) { + if (next->order > order) + break; + } + __list_add(&entry->list, next->list.prev, &next->list); + spin_unlock_irqrestore(&netcp_priv->lock, flags); + + return 0; +} + +int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order, + netcp_hook_rtn *hook_rtn, void *hook_data) +{ + struct netcp_hook_list *next, *n; + unsigned long flags; + + spin_lock_irqsave(&netcp_priv->lock, flags); + list_for_each_entry_safe(next, n, &netcp_priv->rxhook_list_head, list) { + if ((next->order == order) && + (next->hook_rtn == hook_rtn) && + (next->hook_data == hook_data)) { + list_del(&next->list); + spin_unlock_irqrestore(&netcp_priv->lock, flags); + devm_kfree(netcp_priv->dev, next); + return 0; + } + } + spin_unlock_irqrestore(&netcp_priv->lock, flags); + + return -ENOENT; +} + +static void netcp_frag_free(bool is_frag, void *ptr) +{ + if (is_frag) + put_page(virt_to_head_page(ptr)); + else + kfree(ptr); +} + +static void netcp_free_rx_desc_chain(struct netcp_intf *netcp, + struct knav_dma_desc *desc) +{ + struct knav_dma_desc *ndesc; + dma_addr_t dma_desc, dma_buf; + unsigned int buf_len, dma_sz = sizeof(*ndesc); + void *buf_ptr; + u32 tmp; + + get_words(&dma_desc, 1, &desc->next_desc); + + while (dma_desc) { + ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); + if (unlikely(!ndesc)) { + dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); + break; + } + get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc); + get_pad_info((u32 *)&buf_ptr, &tmp, ndesc); + dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE); + __free_page(buf_ptr); + knav_pool_desc_put(netcp->rx_pool, desc); + } + + get_pad_info((u32 *)&buf_ptr, &buf_len, desc); + if (buf_ptr) + netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr); + knav_pool_desc_put(netcp->rx_pool, desc); +} + +static void netcp_empty_rx_queue(struct netcp_intf *netcp) +{ + struct knav_dma_desc *desc; + unsigned int dma_sz; + dma_addr_t dma; + + for (; ;) { + dma = knav_queue_pop(netcp->rx_queue, &dma_sz); + if (!dma) + break; + + desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz); + if (unlikely(!desc)) { + dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n", + __func__); + netcp->ndev->stats.rx_errors++; + continue; + } + netcp_free_rx_desc_chain(netcp, desc); + netcp->ndev->stats.rx_dropped++; + } +} + +static int netcp_process_one_rx_packet(struct netcp_intf *netcp) +{ + unsigned int dma_sz, buf_len, org_buf_len; + struct knav_dma_desc *desc, *ndesc; + unsigned int pkt_sz = 0, accum_sz; + struct netcp_hook_list *rx_hook; + dma_addr_t dma_desc, dma_buff; + struct netcp_packet p_info; + struct sk_buff *skb; + void *org_buf_ptr; + u32 tmp; + + dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); + if (!dma_desc) + return -1; + + desc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); + if (unlikely(!desc)) { + dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); + return 0; + } + + get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc); + get_pad_info((u32 *)&org_buf_ptr, &org_buf_len, desc); + + if (unlikely(!org_buf_ptr)) { + dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); + goto free_desc; + } + + pkt_sz &= KNAV_DMA_DESC_PKT_LEN_MASK; + accum_sz = buf_len; + dma_unmap_single(netcp->dev, dma_buff, buf_len, DMA_FROM_DEVICE); + + /* Build a new sk_buff for the primary buffer */ + skb = build_skb(org_buf_ptr, org_buf_len); + if (unlikely(!skb)) { + dev_err(netcp->ndev_dev, "build_skb() failed\n"); + goto free_desc; + } + + /* update data, tail and len */ + skb_reserve(skb, NETCP_SOP_OFFSET); + __skb_put(skb, buf_len); + + /* Fill in the page fragment list */ + while (dma_desc) { + struct page *page; + + ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); + if (unlikely(!ndesc)) { + dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); + goto free_desc; + } + + get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc); + get_pad_info((u32 *)&page, &tmp, ndesc); + + if (likely(dma_buff && buf_len && page)) { + dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, + DMA_FROM_DEVICE); + } else { + dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%p), len(%d), page(%p)\n", + (void *)dma_buff, buf_len, page); + goto free_desc; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + offset_in_page(dma_buff), buf_len, PAGE_SIZE); + accum_sz += buf_len; + + /* Free the descriptor */ + knav_pool_desc_put(netcp->rx_pool, ndesc); + } + + /* Free the primary descriptor */ + knav_pool_desc_put(netcp->rx_pool, desc); + + /* check for packet len and warn */ + if (unlikely(pkt_sz != accum_sz)) + dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n", + pkt_sz, accum_sz); + + /* Remove ethernet FCS from the packet */ + __pskb_trim(skb, skb->len - ETH_FCS_LEN); + + /* Call each of the RX hooks */ + p_info.skb = skb; + p_info.rxtstamp_complete = false; + list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) { + int ret; + + ret = rx_hook->hook_rtn(rx_hook->order, rx_hook->hook_data, + &p_info); + if (unlikely(ret)) { + dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n", + rx_hook->order, ret); + netcp->ndev->stats.rx_errors++; + dev_kfree_skb(skb); + return 0; + } + } + + netcp->ndev->last_rx = jiffies; + netcp->ndev->stats.rx_packets++; + netcp->ndev->stats.rx_bytes += skb->len; + + /* push skb up the stack */ + skb->protocol = eth_type_trans(skb, netcp->ndev); + netif_receive_skb(skb); + return 0; + +free_desc: + netcp_free_rx_desc_chain(netcp, desc); + netcp->ndev->stats.rx_errors++; + return 0; +} + +static int netcp_process_rx_packets(struct netcp_intf *netcp, + unsigned int budget) +{ + int i; + + for (i = 0; (i < budget) && !netcp_process_one_rx_packet(netcp); i++) + ; + return i; +} + +/* Release descriptors and attached buffers from Rx FDQ */ +static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq) +{ + struct knav_dma_desc *desc; + unsigned int buf_len, dma_sz; + dma_addr_t dma; + void *buf_ptr; + u32 tmp; + + /* Allocate descriptor */ + while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) { + desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz); + if (unlikely(!desc)) { + dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n"); + continue; + } + + get_org_pkt_info(&dma, &buf_len, desc); + get_pad_info((u32 *)&buf_ptr, &tmp, desc); + + if (unlikely(!dma)) { + dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n"); + knav_pool_desc_put(netcp->rx_pool, desc); + continue; + } + + if (unlikely(!buf_ptr)) { + dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); + knav_pool_desc_put(netcp->rx_pool, desc); + continue; + } + + if (fdq == 0) { + dma_unmap_single(netcp->dev, dma, buf_len, + DMA_FROM_DEVICE); + netcp_frag_free((buf_len <= PAGE_SIZE), buf_ptr); + } else { + dma_unmap_page(netcp->dev, dma, buf_len, + DMA_FROM_DEVICE); + __free_page(buf_ptr); + } + + knav_pool_desc_put(netcp->rx_pool, desc); + } +} + +static void netcp_rxpool_free(struct netcp_intf *netcp) +{ + int i; + + for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && + !IS_ERR_OR_NULL(netcp->rx_fdq[i]); i++) + netcp_free_rx_buf(netcp, i); + + if (knav_pool_count(netcp->rx_pool) != netcp->rx_pool_size) + dev_err(netcp->ndev_dev, "Lost Rx (%d) descriptors\n", + netcp->rx_pool_size - knav_pool_count(netcp->rx_pool)); + + knav_pool_destroy(netcp->rx_pool); + netcp->rx_pool = NULL; +} + +static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) +{ + struct knav_dma_desc *hwdesc; + unsigned int buf_len, dma_sz; + u32 desc_info, pkt_info; + struct page *page; + dma_addr_t dma; + void *bufptr; + u32 pad[2]; + + /* Allocate descriptor */ + hwdesc = knav_pool_desc_get(netcp->rx_pool); + if (IS_ERR_OR_NULL(hwdesc)) { + dev_dbg(netcp->ndev_dev, "out of rx pool desc\n"); + return; + } + + if (likely(fdq == 0)) { + unsigned int primary_buf_len; + /* Allocate a primary receive queue entry */ + buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET; + primary_buf_len = SKB_DATA_ALIGN(buf_len) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + if (primary_buf_len <= PAGE_SIZE) { + bufptr = netdev_alloc_frag(primary_buf_len); + pad[1] = primary_buf_len; + } else { + bufptr = kmalloc(primary_buf_len, GFP_ATOMIC | + GFP_DMA32 | __GFP_COLD); + pad[1] = 0; + } + + if (unlikely(!bufptr)) { + dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n"); + goto fail; + } + dma = dma_map_single(netcp->dev, bufptr, buf_len, + DMA_TO_DEVICE); + pad[0] = (u32)bufptr; + + } else { + /* Allocate a secondary receive queue entry */ + page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD); + if (unlikely(!page)) { + dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n"); + goto fail; + } + buf_len = PAGE_SIZE; + dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE); + pad[0] = (u32)page; + pad[1] = 0; + } + + desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC; + desc_info |= buf_len & KNAV_DMA_DESC_PKT_LEN_MASK; + pkt_info = KNAV_DMA_DESC_HAS_EPIB; + pkt_info |= KNAV_DMA_NUM_PS_WORDS << KNAV_DMA_DESC_PSLEN_SHIFT; + pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) << + KNAV_DMA_DESC_RETQ_SHIFT; + set_org_pkt_info(dma, buf_len, hwdesc); + set_pad_info(pad[0], pad[1], hwdesc); + set_desc_info(desc_info, pkt_info, hwdesc); + + /* Push to FDQs */ + knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma, + &dma_sz); + knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0); + return; + +fail: + knav_pool_desc_put(netcp->rx_pool, hwdesc); +} + +/* Refill Rx FDQ with descriptors & attached buffers */ +static void netcp_rxpool_refill(struct netcp_intf *netcp) +{ + u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0}; + int i; + + /* Calculate the FDQ deficit and refill */ + for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) { + fdq_deficit[i] = netcp->rx_queue_depths[i] - + knav_queue_get_count(netcp->rx_fdq[i]); + + while (fdq_deficit[i]--) + netcp_allocate_rx_buf(netcp, i); + } /* end for fdqs */ +} + +/* NAPI poll */ +static int netcp_rx_poll(struct napi_struct *napi, int budget) +{ + struct netcp_intf *netcp = container_of(napi, struct netcp_intf, + rx_napi); + unsigned int packets; + + packets = netcp_process_rx_packets(netcp, budget); + + if (packets < budget) { + napi_complete(&netcp->rx_napi); + knav_queue_enable_notify(netcp->rx_queue); + } + + netcp_rxpool_refill(netcp); + return packets; +} + +static void netcp_rx_notify(void *arg) +{ + struct netcp_intf *netcp = arg; + + knav_queue_disable_notify(netcp->rx_queue); + napi_schedule(&netcp->rx_napi); +} + +static void netcp_free_tx_desc_chain(struct netcp_intf *netcp, + struct knav_dma_desc *desc, + unsigned int desc_sz) +{ + struct knav_dma_desc *ndesc = desc; + dma_addr_t dma_desc, dma_buf; + unsigned int buf_len; + + while (ndesc) { + get_pkt_info(&dma_buf, &buf_len, &dma_desc, ndesc); + + if (dma_buf && buf_len) + dma_unmap_single(netcp->dev, dma_buf, buf_len, + DMA_TO_DEVICE); + else + dev_warn(netcp->ndev_dev, "bad Tx desc buf(%p), len(%d)\n", + (void *)dma_buf, buf_len); + + knav_pool_desc_put(netcp->tx_pool, ndesc); + ndesc = NULL; + if (dma_desc) { + ndesc = knav_pool_desc_unmap(netcp->tx_pool, dma_desc, + desc_sz); + if (!ndesc) + dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n"); + } + } +} + +static int netcp_process_tx_compl_packets(struct netcp_intf *netcp, + unsigned int budget) +{ + struct knav_dma_desc *desc; + struct sk_buff *skb; + unsigned int dma_sz; + dma_addr_t dma; + int pkts = 0; + u32 tmp; + + while (budget--) { + dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz); + if (!dma) + break; + desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz); + if (unlikely(!desc)) { + dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n"); + netcp->ndev->stats.tx_errors++; + continue; + } + + get_pad_info((u32 *)&skb, &tmp, desc); + netcp_free_tx_desc_chain(netcp, desc, dma_sz); + if (!skb) { + dev_err(netcp->ndev_dev, "No skb in Tx desc\n"); + netcp->ndev->stats.tx_errors++; + continue; + } + + if (netif_subqueue_stopped(netcp->ndev, skb) && + netif_running(netcp->ndev) && + (knav_pool_count(netcp->tx_pool) > + netcp->tx_resume_threshold)) { + u16 subqueue = skb_get_queue_mapping(skb); + + netif_wake_subqueue(netcp->ndev, subqueue); + } + + netcp->ndev->stats.tx_packets++; + netcp->ndev->stats.tx_bytes += skb->len; + dev_kfree_skb(skb); + pkts++; + } + return pkts; +} + +static int netcp_tx_poll(struct napi_struct *napi, int budget) +{ + int packets; + struct netcp_intf *netcp = container_of(napi, struct netcp_intf, + tx_napi); + + packets = netcp_process_tx_compl_packets(netcp, budget); + if (packets < budget) { + napi_complete(&netcp->tx_napi); + knav_queue_enable_notify(netcp->tx_compl_q); + } + + return packets; +} + +static void netcp_tx_notify(void *arg) +{ + struct netcp_intf *netcp = arg; + + knav_queue_disable_notify(netcp->tx_compl_q); + napi_schedule(&netcp->tx_napi); +} + +static struct knav_dma_desc* +netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp) +{ + struct knav_dma_desc *desc, *ndesc, *pdesc; + unsigned int pkt_len = skb_headlen(skb); + struct device *dev = netcp->dev; + dma_addr_t dma_addr; + unsigned int dma_sz; + int i; + + /* Map the linear buffer */ + dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE); + if (unlikely(!dma_addr)) { + dev_err(netcp->ndev_dev, "Failed to map skb buffer\n"); + return NULL; + } + + desc = knav_pool_desc_get(netcp->tx_pool); + if (unlikely(IS_ERR_OR_NULL(desc))) { + dev_err(netcp->ndev_dev, "out of TX desc\n"); + dma_unmap_single(dev, dma_addr, pkt_len, DMA_TO_DEVICE); + return NULL; + } + + set_pkt_info(dma_addr, pkt_len, 0, desc); + if (skb_is_nonlinear(skb)) { + prefetchw(skb_shinfo(skb)); + } else { + desc->next_desc = 0; + goto upd_pkt_len; + } + + pdesc = desc; + + /* Handle the case where skb is fragmented in pages */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + struct page *page = skb_frag_page(frag); + u32 page_offset = frag->page_offset; + u32 buf_len = skb_frag_size(frag); + dma_addr_t desc_dma; + u32 pkt_info; + + dma_addr = dma_map_page(dev, page, page_offset, buf_len, + DMA_TO_DEVICE); + if (unlikely(!dma_addr)) { + dev_err(netcp->ndev_dev, "Failed to map skb page\n"); + goto free_descs; + } + + ndesc = knav_pool_desc_get(netcp->tx_pool); + if (unlikely(IS_ERR_OR_NULL(ndesc))) { + dev_err(netcp->ndev_dev, "out of TX desc for frags\n"); + dma_unmap_page(dev, dma_addr, buf_len, DMA_TO_DEVICE); + goto free_descs; + } + + desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool, + (void *)ndesc); + pkt_info = + (netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) << + KNAV_DMA_DESC_RETQ_SHIFT; + set_pkt_info(dma_addr, buf_len, 0, ndesc); + set_words(&desc_dma, 1, &pdesc->next_desc); + pkt_len += buf_len; + if (pdesc != desc) + knav_pool_desc_map(netcp->tx_pool, pdesc, + sizeof(*pdesc), &desc_dma, &dma_sz); + pdesc = ndesc; + } + if (pdesc != desc) + knav_pool_desc_map(netcp->tx_pool, pdesc, sizeof(*pdesc), + &dma_addr, &dma_sz); + + /* frag list based linkage is not supported for now. */ + if (skb_shinfo(skb)->frag_list) { + dev_err_ratelimited(netcp->ndev_dev, "NETIF_F_FRAGLIST not supported\n"); + goto free_descs; + } + +upd_pkt_len: + WARN_ON(pkt_len != skb->len); + + pkt_len &= KNAV_DMA_DESC_PKT_LEN_MASK; + set_words(&pkt_len, 1, &desc->desc_info); + return desc; + +free_descs: + netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc)); + return NULL; +} + +static int netcp_tx_submit_skb(struct netcp_intf *netcp, + struct sk_buff *skb, + struct knav_dma_desc *desc) +{ + struct netcp_tx_pipe *tx_pipe = NULL; + struct netcp_hook_list *tx_hook; + struct netcp_packet p_info; + unsigned int dma_sz; + dma_addr_t dma; + u32 tmp = 0; + int ret = 0; + + p_info.netcp = netcp; + p_info.skb = skb; + p_info.tx_pipe = NULL; + p_info.psdata_len = 0; + p_info.ts_context = NULL; + p_info.txtstamp_complete = NULL; + p_info.epib = desc->epib; + p_info.psdata = desc->psdata; + memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(u32)); + + /* Find out where to inject the packet for transmission */ + list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) { + ret = tx_hook->hook_rtn(tx_hook->order, tx_hook->hook_data, + &p_info); + if (unlikely(ret != 0)) { + dev_err(netcp->ndev_dev, "TX hook %d rejected the packet with reason(%d)\n", + tx_hook->order, ret); + ret = (ret < 0) ? ret : NETDEV_TX_OK; + goto out; + } + } + + /* Make sure some TX hook claimed the packet */ + tx_pipe = p_info.tx_pipe; + if (!tx_pipe) { + dev_err(netcp->ndev_dev, "No TX hook claimed the packet!\n"); + ret = -ENXIO; + goto out; + } + + /* update descriptor */ + if (p_info.psdata_len) { + u32 *psdata = p_info.psdata; + + memmove(p_info.psdata, p_info.psdata + p_info.psdata_len, + p_info.psdata_len); + set_words(psdata, p_info.psdata_len, psdata); + tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) << + KNAV_DMA_DESC_PSLEN_SHIFT; + } + + tmp |= KNAV_DMA_DESC_HAS_EPIB | + ((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) << + KNAV_DMA_DESC_RETQ_SHIFT); + + if (!(tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO)) { + tmp |= ((tx_pipe->switch_to_port & KNAV_DMA_DESC_PSFLAG_MASK) << + KNAV_DMA_DESC_PSFLAG_SHIFT); + } + + set_words(&tmp, 1, &desc->packet_info); + set_words((u32 *)&skb, 1, &desc->pad[0]); + + if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) { + tmp = tx_pipe->switch_to_port; + set_words((u32 *)&tmp, 1, &desc->tag_info); + } + + /* submit packet descriptor */ + ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma, + &dma_sz); + if (unlikely(ret)) { + dev_err(netcp->ndev_dev, "%s() failed to map desc\n", __func__); + ret = -ENOMEM; + goto out; + } + skb_tx_timestamp(skb); + knav_queue_push(tx_pipe->dma_queue, dma, dma_sz, 0); + +out: + return ret; +} + +/* Submit the packet */ +static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + int subqueue = skb_get_queue_mapping(skb); + struct knav_dma_desc *desc; + int desc_count, ret = 0; + + if (unlikely(skb->len <= 0)) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + if (unlikely(skb->len < NETCP_MIN_PACKET_SIZE)) { + ret = skb_padto(skb, NETCP_MIN_PACKET_SIZE); + if (ret < 0) { + /* If we get here, the skb has already been dropped */ + dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n", + ret); + ndev->stats.tx_dropped++; + return ret; + } + skb->len = NETCP_MIN_PACKET_SIZE; + } + + desc = netcp_tx_map_skb(skb, netcp); + if (unlikely(!desc)) { + netif_stop_subqueue(ndev, subqueue); + ret = -ENOBUFS; + goto drop; + } + + ret = netcp_tx_submit_skb(netcp, skb, desc); + if (ret) + goto drop; + + ndev->trans_start = jiffies; + + /* Check Tx pool count & stop subqueue if needed */ + desc_count = knav_pool_count(netcp->tx_pool); + if (desc_count < netcp->tx_pause_threshold) { + dev_dbg(netcp->ndev_dev, "pausing tx, count(%d)\n", desc_count); + netif_stop_subqueue(ndev, subqueue); + } + return NETDEV_TX_OK; + +drop: + ndev->stats.tx_dropped++; + if (desc) + netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc)); + dev_kfree_skb(skb); + return ret; +} + +int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe) +{ + if (tx_pipe->dma_channel) { + knav_dma_close_channel(tx_pipe->dma_channel); + tx_pipe->dma_channel = NULL; + } + return 0; +} +EXPORT_SYMBOL_GPL(netcp_txpipe_close); + +int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe) +{ + struct device *dev = tx_pipe->netcp_device->device; + struct knav_dma_cfg config; + int ret = 0; + u8 name[16]; + + memset(&config, 0, sizeof(config)); + config.direction = DMA_MEM_TO_DEV; + config.u.tx.filt_einfo = false; + config.u.tx.filt_pswords = false; + config.u.tx.priority = DMA_PRIO_MED_L; + + tx_pipe->dma_channel = knav_dma_open_channel(dev, + tx_pipe->dma_chan_name, &config); + if (IS_ERR_OR_NULL(tx_pipe->dma_channel)) { + dev_err(dev, "failed opening tx chan(%s)\n", + tx_pipe->dma_chan_name); + goto err; + } + + snprintf(name, sizeof(name), "tx-pipe-%s", dev_name(dev)); + tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id, + KNAV_QUEUE_SHARED); + if (IS_ERR(tx_pipe->dma_queue)) { + dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n", + name, ret); + ret = PTR_ERR(tx_pipe->dma_queue); + goto err; + } + + dev_dbg(dev, "opened tx pipe %s\n", name); + return 0; + +err: + if (!IS_ERR_OR_NULL(tx_pipe->dma_channel)) + knav_dma_close_channel(tx_pipe->dma_channel); + tx_pipe->dma_channel = NULL; + return ret; +} +EXPORT_SYMBOL_GPL(netcp_txpipe_open); + +int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe, + struct netcp_device *netcp_device, + const char *dma_chan_name, unsigned int dma_queue_id) +{ + memset(tx_pipe, 0, sizeof(*tx_pipe)); + tx_pipe->netcp_device = netcp_device; + tx_pipe->dma_chan_name = dma_chan_name; + tx_pipe->dma_queue_id = dma_queue_id; + return 0; +} +EXPORT_SYMBOL_GPL(netcp_txpipe_init); + +static struct netcp_addr *netcp_addr_find(struct netcp_intf *netcp, + const u8 *addr, + enum netcp_addr_type type) +{ + struct netcp_addr *naddr; + + list_for_each_entry(naddr, &netcp->addr_list, node) { + if (naddr->type != type) + continue; + if (addr && memcmp(addr, naddr->addr, ETH_ALEN)) + continue; + return naddr; + } + + return NULL; +} + +static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp, + const u8 *addr, + enum netcp_addr_type type) +{ + struct netcp_addr *naddr; + + naddr = devm_kmalloc(netcp->dev, sizeof(*naddr), GFP_ATOMIC); + if (!naddr) + return NULL; + + naddr->type = type; + naddr->flags = 0; + naddr->netcp = netcp; + if (addr) + ether_addr_copy(naddr->addr, addr); + else + eth_zero_addr(naddr->addr); + list_add_tail(&naddr->node, &netcp->addr_list); + + return naddr; +} + +static void netcp_addr_del(struct netcp_intf *netcp, struct netcp_addr *naddr) +{ + list_del(&naddr->node); + devm_kfree(netcp->dev, naddr); +} + +static void netcp_addr_clear_mark(struct netcp_intf *netcp) +{ + struct netcp_addr *naddr; + + list_for_each_entry(naddr, &netcp->addr_list, node) + naddr->flags = 0; +} + +static void netcp_addr_add_mark(struct netcp_intf *netcp, const u8 *addr, + enum netcp_addr_type type) +{ + struct netcp_addr *naddr; + + naddr = netcp_addr_find(netcp, addr, type); + if (naddr) { + naddr->flags |= ADDR_VALID; + return; + } + + naddr = netcp_addr_add(netcp, addr, type); + if (!WARN_ON(!naddr)) + naddr->flags |= ADDR_NEW; +} + +static void netcp_addr_sweep_del(struct netcp_intf *netcp) +{ + struct netcp_addr *naddr, *tmp; + struct netcp_intf_modpriv *priv; + struct netcp_module *module; + int error; + + list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) { + if (naddr->flags & (ADDR_VALID | ADDR_NEW)) + continue; + dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n", + naddr->addr, naddr->type); + mutex_lock(&netcp_modules_lock); + for_each_module(netcp, priv) { + module = priv->netcp_module; + if (!module->del_addr) + continue; + error = module->del_addr(priv->module_priv, + naddr); + WARN_ON(error); + } + mutex_unlock(&netcp_modules_lock); + netcp_addr_del(netcp, naddr); + } +} + +static void netcp_addr_sweep_add(struct netcp_intf *netcp) +{ + struct netcp_addr *naddr, *tmp; + struct netcp_intf_modpriv *priv; + struct netcp_module *module; + int error; + + list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) { + if (!(naddr->flags & ADDR_NEW)) + continue; + dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n", + naddr->addr, naddr->type); + mutex_lock(&netcp_modules_lock); + for_each_module(netcp, priv) { + module = priv->netcp_module; + if (!module->add_addr) + continue; + error = module->add_addr(priv->module_priv, naddr); + WARN_ON(error); + } + mutex_unlock(&netcp_modules_lock); + } +} + +static void netcp_set_rx_mode(struct net_device *ndev) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct netdev_hw_addr *ndev_addr; + bool promisc; + + promisc = (ndev->flags & IFF_PROMISC || + ndev->flags & IFF_ALLMULTI || + netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR); + + /* first clear all marks */ + netcp_addr_clear_mark(netcp); + + /* next add new entries, mark existing ones */ + netcp_addr_add_mark(netcp, ndev->broadcast, ADDR_BCAST); + for_each_dev_addr(ndev, ndev_addr) + netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_DEV); + netdev_for_each_uc_addr(ndev_addr, ndev) + netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_UCAST); + netdev_for_each_mc_addr(ndev_addr, ndev) + netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_MCAST); + + if (promisc) + netcp_addr_add_mark(netcp, NULL, ADDR_ANY); + + /* finally sweep and callout into modules */ + netcp_addr_sweep_del(netcp); + netcp_addr_sweep_add(netcp); +} + +static void netcp_free_navigator_resources(struct netcp_intf *netcp) +{ + int i; + + if (netcp->rx_channel) { + knav_dma_close_channel(netcp->rx_channel); + netcp->rx_channel = NULL; + } + + if (!IS_ERR_OR_NULL(netcp->rx_pool)) + netcp_rxpool_free(netcp); + + if (!IS_ERR_OR_NULL(netcp->rx_queue)) { + knav_queue_close(netcp->rx_queue); + netcp->rx_queue = NULL; + } + + for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && + !IS_ERR_OR_NULL(netcp->rx_fdq[i]) ; ++i) { + knav_queue_close(netcp->rx_fdq[i]); + netcp->rx_fdq[i] = NULL; + } + + if (!IS_ERR_OR_NULL(netcp->tx_compl_q)) { + knav_queue_close(netcp->tx_compl_q); + netcp->tx_compl_q = NULL; + } + + if (!IS_ERR_OR_NULL(netcp->tx_pool)) { + knav_pool_destroy(netcp->tx_pool); + netcp->tx_pool = NULL; + } +} + +static int netcp_setup_navigator_resources(struct net_device *ndev) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct knav_queue_notify_config notify_cfg; + struct knav_dma_cfg config; + u32 last_fdq = 0; + u8 name[16]; + int ret; + int i; + + /* Create Rx/Tx descriptor pools */ + snprintf(name, sizeof(name), "rx-pool-%s", ndev->name); + netcp->rx_pool = knav_pool_create(name, netcp->rx_pool_size, + netcp->rx_pool_region_id); + if (IS_ERR_OR_NULL(netcp->rx_pool)) { + dev_err(netcp->ndev_dev, "Couldn't create rx pool\n"); + ret = PTR_ERR(netcp->rx_pool); + goto fail; + } + + snprintf(name, sizeof(name), "tx-pool-%s", ndev->name); + netcp->tx_pool = knav_pool_create(name, netcp->tx_pool_size, + netcp->tx_pool_region_id); + if (IS_ERR_OR_NULL(netcp->tx_pool)) { + dev_err(netcp->ndev_dev, "Couldn't create tx pool\n"); + ret = PTR_ERR(netcp->tx_pool); + goto fail; + } + + /* open Tx completion queue */ + snprintf(name, sizeof(name), "tx-compl-%s", ndev->name); + netcp->tx_compl_q = knav_queue_open(name, netcp->tx_compl_qid, 0); + if (IS_ERR_OR_NULL(netcp->tx_compl_q)) { + ret = PTR_ERR(netcp->tx_compl_q); + goto fail; + } + netcp->tx_compl_qid = knav_queue_get_id(netcp->tx_compl_q); + + /* Set notification for Tx completion */ + notify_cfg.fn = netcp_tx_notify; + notify_cfg.fn_arg = netcp; + ret = knav_queue_device_control(netcp->tx_compl_q, + KNAV_QUEUE_SET_NOTIFIER, + (unsigned long)¬ify_cfg); + if (ret) + goto fail; + + knav_queue_disable_notify(netcp->tx_compl_q); + + /* open Rx completion queue */ + snprintf(name, sizeof(name), "rx-compl-%s", ndev->name); + netcp->rx_queue = knav_queue_open(name, netcp->rx_queue_id, 0); + if (IS_ERR_OR_NULL(netcp->rx_queue)) { + ret = PTR_ERR(netcp->rx_queue); + goto fail; + } + netcp->rx_queue_id = knav_queue_get_id(netcp->rx_queue); + + /* Set notification for Rx completion */ + notify_cfg.fn = netcp_rx_notify; + notify_cfg.fn_arg = netcp; + ret = knav_queue_device_control(netcp->rx_queue, + KNAV_QUEUE_SET_NOTIFIER, + (unsigned long)¬ify_cfg); + if (ret) + goto fail; + + knav_queue_disable_notify(netcp->rx_queue); + + /* open Rx FDQs */ + for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && + netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) { + snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i); + netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0); + if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) { + ret = PTR_ERR(netcp->rx_fdq[i]); + goto fail; + } + } + + memset(&config, 0, sizeof(config)); + config.direction = DMA_DEV_TO_MEM; + config.u.rx.einfo_present = true; + config.u.rx.psinfo_present = true; + config.u.rx.err_mode = DMA_DROP; + config.u.rx.desc_type = DMA_DESC_HOST; + config.u.rx.psinfo_at_sop = false; + config.u.rx.sop_offset = NETCP_SOP_OFFSET; + config.u.rx.dst_q = netcp->rx_queue_id; + config.u.rx.thresh = DMA_THRESH_NONE; + + for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; ++i) { + if (netcp->rx_fdq[i]) + last_fdq = knav_queue_get_id(netcp->rx_fdq[i]); + config.u.rx.fdq[i] = last_fdq; + } + + netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device, + netcp->dma_chan_name, &config); + if (IS_ERR_OR_NULL(netcp->rx_channel)) { + dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n", + netcp->dma_chan_name); + goto fail; + } + + dev_dbg(netcp->ndev_dev, "opened RX channel: %p\n", netcp->rx_channel); + return 0; + +fail: + netcp_free_navigator_resources(netcp); + return ret; +} + +/* Open the device */ +static int netcp_ndo_open(struct net_device *ndev) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct netcp_intf_modpriv *intf_modpriv; + struct netcp_module *module; + int ret; + + netif_carrier_off(ndev); + ret = netcp_setup_navigator_resources(ndev); + if (ret) { + dev_err(netcp->ndev_dev, "Failed to setup navigator resources\n"); + goto fail; + } + + mutex_lock(&netcp_modules_lock); + for_each_module(netcp, intf_modpriv) { + module = intf_modpriv->netcp_module; + if (module->open) { + ret = module->open(intf_modpriv->module_priv, ndev); + if (ret != 0) { + dev_err(netcp->ndev_dev, "module open failed\n"); + goto fail_open; + } + } + } + mutex_unlock(&netcp_modules_lock); + + netcp_rxpool_refill(netcp); + napi_enable(&netcp->rx_napi); + napi_enable(&netcp->tx_napi); + knav_queue_enable_notify(netcp->tx_compl_q); + knav_queue_enable_notify(netcp->rx_queue); + netif_tx_wake_all_queues(ndev); + dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name); + return 0; + +fail_open: + for_each_module(netcp, intf_modpriv) { + module = intf_modpriv->netcp_module; + if (module->close) + module->close(intf_modpriv->module_priv, ndev); + } + mutex_unlock(&netcp_modules_lock); + +fail: + netcp_free_navigator_resources(netcp); + return ret; +} + +/* Close the device */ +static int netcp_ndo_stop(struct net_device *ndev) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct netcp_intf_modpriv *intf_modpriv; + struct netcp_module *module; + int err = 0; + + netif_tx_stop_all_queues(ndev); + netif_carrier_off(ndev); + netcp_addr_clear_mark(netcp); + netcp_addr_sweep_del(netcp); + knav_queue_disable_notify(netcp->rx_queue); + knav_queue_disable_notify(netcp->tx_compl_q); + napi_disable(&netcp->rx_napi); + napi_disable(&netcp->tx_napi); + + mutex_lock(&netcp_modules_lock); + for_each_module(netcp, intf_modpriv) { + module = intf_modpriv->netcp_module; + if (module->close) { + err = module->close(intf_modpriv->module_priv, ndev); + if (err != 0) + dev_err(netcp->ndev_dev, "Close failed\n"); + } + } + mutex_unlock(&netcp_modules_lock); + + /* Recycle Rx descriptors from completion queue */ + netcp_empty_rx_queue(netcp); + + /* Recycle Tx descriptors from completion queue */ + netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size); + + if (knav_pool_count(netcp->tx_pool) != netcp->tx_pool_size) + dev_err(netcp->ndev_dev, "Lost (%d) Tx descs\n", + netcp->tx_pool_size - knav_pool_count(netcp->tx_pool)); + + netcp_free_navigator_resources(netcp); + dev_dbg(netcp->ndev_dev, "netcp device %s stopped\n", ndev->name); + return 0; +} + +static int netcp_ndo_ioctl(struct net_device *ndev, + struct ifreq *req, int cmd) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct netcp_intf_modpriv *intf_modpriv; + struct netcp_module *module; + int ret = -1, err = -EOPNOTSUPP; + + if (!netif_running(ndev)) + return -EINVAL; + + mutex_lock(&netcp_modules_lock); + for_each_module(netcp, intf_modpriv) { + module = intf_modpriv->netcp_module; + if (!module->ioctl) + continue; + + err = module->ioctl(intf_modpriv->module_priv, req, cmd); + if ((err < 0) && (err != -EOPNOTSUPP)) { + ret = err; + goto out; + } + if (err == 0) + ret = err; + } + +out: + mutex_unlock(&netcp_modules_lock); + return (ret == 0) ? 0 : err; +} + +static int netcp_ndo_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + + /* MTU < 68 is an error for IPv4 traffic */ + if ((new_mtu < 68) || + (new_mtu > (NETCP_MAX_FRAME_SIZE - ETH_HLEN - ETH_FCS_LEN))) { + dev_err(netcp->ndev_dev, "Invalid mtu size = %d\n", new_mtu); + return -EINVAL; + } + + ndev->mtu = new_mtu; + return 0; +} + +static void netcp_ndo_tx_timeout(struct net_device *ndev) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + unsigned int descs = knav_pool_count(netcp->tx_pool); + + dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs); + netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size); + ndev->trans_start = jiffies; + netif_tx_wake_all_queues(ndev); +} + +static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct netcp_intf_modpriv *intf_modpriv; + struct netcp_module *module; + int err = 0; + + dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid); + + mutex_lock(&netcp_modules_lock); + for_each_module(netcp, intf_modpriv) { + module = intf_modpriv->netcp_module; + if ((module->add_vid) && (vid != 0)) { + err = module->add_vid(intf_modpriv->module_priv, vid); + if (err != 0) { + dev_err(netcp->ndev_dev, "Could not add vlan id = %d\n", + vid); + break; + } + } + } + mutex_unlock(&netcp_modules_lock); + return err; +} + +static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct netcp_intf_modpriv *intf_modpriv; + struct netcp_module *module; + int err = 0; + + dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid); + + mutex_lock(&netcp_modules_lock); + for_each_module(netcp, intf_modpriv) { + module = intf_modpriv->netcp_module; + if (module->del_vid) { + err = module->del_vid(intf_modpriv->module_priv, vid); + if (err != 0) { + dev_err(netcp->ndev_dev, "Could not delete vlan id = %d\n", + vid); + break; + } + } + } + mutex_unlock(&netcp_modules_lock); + return err; +} + +static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, + select_queue_fallback_t fallback) +{ + return 0; +} + +static int netcp_setup_tc(struct net_device *dev, u8 num_tc) +{ + int i; + + /* setup tc must be called under rtnl lock */ + ASSERT_RTNL(); + + /* Sanity-check the number of traffic classes requested */ + if ((dev->real_num_tx_queues <= 1) || + (dev->real_num_tx_queues < num_tc)) + return -EINVAL; + + /* Configure traffic class to queue mappings */ + if (num_tc) { + netdev_set_num_tc(dev, num_tc); + for (i = 0; i < num_tc; i++) + netdev_set_tc_queue(dev, i, 1, i); + } else { + netdev_reset_tc(dev); + } + + return 0; +} + +static const struct net_device_ops netcp_netdev_ops = { + .ndo_open = netcp_ndo_open, + .ndo_stop = netcp_ndo_stop, + .ndo_start_xmit = netcp_ndo_start_xmit, + .ndo_set_rx_mode = netcp_set_rx_mode, + .ndo_do_ioctl = netcp_ndo_ioctl, + .ndo_change_mtu = netcp_ndo_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = netcp_rx_add_vid, + .ndo_vlan_rx_kill_vid = netcp_rx_kill_vid, + .ndo_tx_timeout = netcp_ndo_tx_timeout, + .ndo_select_queue = netcp_select_queue, + .ndo_setup_tc = netcp_setup_tc, +}; + +static int netcp_create_interface(struct netcp_device *netcp_device, + struct device_node *node_interface) +{ + struct device *dev = netcp_device->device; + struct device_node *node = dev->of_node; + struct netcp_intf *netcp; + struct net_device *ndev; + resource_size_t size; + struct resource res; + void __iomem *efuse = NULL; + u32 efuse_mac = 0; + const void *mac_addr; + u8 efuse_mac_addr[6]; + u32 temp[2]; + int ret = 0; + + ndev = alloc_etherdev_mqs(sizeof(*netcp), 1, 1); + if (!ndev) { + dev_err(dev, "Error allocating netdev\n"); + return -ENOMEM; + } + + ndev->features |= NETIF_F_SG; + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + ndev->hw_features = ndev->features; + ndev->vlan_features |= NETIF_F_SG; + + netcp = netdev_priv(ndev); + spin_lock_init(&netcp->lock); + INIT_LIST_HEAD(&netcp->module_head); + INIT_LIST_HEAD(&netcp->txhook_list_head); + INIT_LIST_HEAD(&netcp->rxhook_list_head); + INIT_LIST_HEAD(&netcp->addr_list); + netcp->netcp_device = netcp_device; + netcp->dev = netcp_device->device; + netcp->ndev = ndev; + netcp->ndev_dev = &ndev->dev; + netcp->msg_enable = netif_msg_init(netcp_debug_level, NETCP_DEBUG); + netcp->tx_pause_threshold = MAX_SKB_FRAGS; + netcp->tx_resume_threshold = netcp->tx_pause_threshold; + netcp->node_interface = node_interface; + + ret = of_property_read_u32(node_interface, "efuse-mac", &efuse_mac); + if (efuse_mac) { + if (of_address_to_resource(node, NETCP_EFUSE_REG_INDEX, &res)) { + dev_err(dev, "could not find efuse-mac reg resource\n"); + ret = -ENODEV; + goto quit; + } + size = resource_size(&res); + + if (!devm_request_mem_region(dev, res.start, size, + dev_name(dev))) { + dev_err(dev, "could not reserve resource\n"); + ret = -ENOMEM; + goto quit; + } + + efuse = devm_ioremap_nocache(dev, res.start, size); + if (!efuse) { + dev_err(dev, "could not map resource\n"); + devm_release_mem_region(dev, res.start, size); + ret = -ENOMEM; + goto quit; + } + + emac_arch_get_mac_addr(efuse_mac_addr, efuse); + if (is_valid_ether_addr(efuse_mac_addr)) + ether_addr_copy(ndev->dev_addr, efuse_mac_addr); + else + random_ether_addr(ndev->dev_addr); + + devm_iounmap(dev, efuse); + devm_release_mem_region(dev, res.start, size); + } else { + mac_addr = of_get_mac_address(node_interface); + if (mac_addr) + ether_addr_copy(ndev->dev_addr, mac_addr); + else + random_ether_addr(ndev->dev_addr); + } + + ret = of_property_read_string(node_interface, "rx-channel", + &netcp->dma_chan_name); + if (ret < 0) { + dev_err(dev, "missing \"rx-channel\" parameter\n"); + ret = -ENODEV; + goto quit; + } + + ret = of_property_read_u32(node_interface, "rx-queue", + &netcp->rx_queue_id); + if (ret < 0) { + dev_warn(dev, "missing \"rx-queue\" parameter\n"); + netcp->rx_queue_id = KNAV_QUEUE_QPEND; + } + + ret = of_property_read_u32_array(node_interface, "rx-queue-depth", + netcp->rx_queue_depths, + KNAV_DMA_FDQ_PER_CHAN); + if (ret < 0) { + dev_err(dev, "missing \"rx-queue-depth\" parameter\n"); + netcp->rx_queue_depths[0] = 128; + } + + ret = of_property_read_u32_array(node_interface, "rx-buffer-size", + netcp->rx_buffer_sizes, + KNAV_DMA_FDQ_PER_CHAN); + if (ret) { + dev_err(dev, "missing \"rx-buffer-size\" parameter\n"); + netcp->rx_buffer_sizes[0] = 1536; + } + + ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2); + if (ret < 0) { + dev_err(dev, "missing \"rx-pool\" parameter\n"); + ret = -ENODEV; + goto quit; + } + netcp->rx_pool_size = temp[0]; + netcp->rx_pool_region_id = temp[1]; + + ret = of_property_read_u32_array(node_interface, "tx-pool", temp, 2); + if (ret < 0) { + dev_err(dev, "missing \"tx-pool\" parameter\n"); + ret = -ENODEV; + goto quit; + } + netcp->tx_pool_size = temp[0]; + netcp->tx_pool_region_id = temp[1]; + + if (netcp->tx_pool_size < MAX_SKB_FRAGS) { + dev_err(dev, "tx-pool size too small, must be atleast(%ld)\n", + MAX_SKB_FRAGS); + ret = -ENODEV; + goto quit; + } + + ret = of_property_read_u32(node_interface, "tx-completion-queue", + &netcp->tx_compl_qid); + if (ret < 0) { + dev_warn(dev, "missing \"tx-completion-queue\" parameter\n"); + netcp->tx_compl_qid = KNAV_QUEUE_QPEND; + } + + /* NAPI register */ + netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT); + netif_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT); + + /* Register the network device */ + ndev->dev_id = 0; + ndev->watchdog_timeo = NETCP_TX_TIMEOUT; + ndev->netdev_ops = &netcp_netdev_ops; + SET_NETDEV_DEV(ndev, dev); + + list_add_tail(&netcp->interface_list, &netcp_device->interface_head); + return 0; + +quit: + free_netdev(ndev); + return ret; +} + +static void netcp_delete_interface(struct netcp_device *netcp_device, + struct net_device *ndev) +{ + struct netcp_intf_modpriv *intf_modpriv, *tmp; + struct netcp_intf *netcp = netdev_priv(ndev); + struct netcp_module *module; + + dev_dbg(netcp_device->device, "Removing interface \"%s\"\n", + ndev->name); + + /* Notify each of the modules that the interface is going away */ + list_for_each_entry_safe(intf_modpriv, tmp, &netcp->module_head, + intf_list) { + module = intf_modpriv->netcp_module; + dev_dbg(netcp_device->device, "Releasing module \"%s\"\n", + module->name); + if (module->release) + module->release(intf_modpriv->module_priv); + list_del(&intf_modpriv->intf_list); + kfree(intf_modpriv); + } + WARN(!list_empty(&netcp->module_head), "%s interface module list is not empty!\n", + ndev->name); + + list_del(&netcp->interface_list); + + of_node_put(netcp->node_interface); + unregister_netdev(ndev); + netif_napi_del(&netcp->rx_napi); + free_netdev(ndev); +} + +static int netcp_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct netcp_intf *netcp_intf, *netcp_tmp; + struct device_node *child, *interfaces; + struct netcp_device *netcp_device; + struct device *dev = &pdev->dev; + struct netcp_module *module; + int ret; + + if (!node) { + dev_err(dev, "could not find device info\n"); + return -ENODEV; + } + + /* Allocate a new NETCP device instance */ + netcp_device = devm_kzalloc(dev, sizeof(*netcp_device), GFP_KERNEL); + if (!netcp_device) + return -ENOMEM; + + pm_runtime_enable(&pdev->dev); + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + dev_err(dev, "Failed to enable NETCP power-domain\n"); + pm_runtime_disable(&pdev->dev); + return ret; + } + + /* Initialize the NETCP device instance */ + INIT_LIST_HEAD(&netcp_device->interface_head); + INIT_LIST_HEAD(&netcp_device->modpriv_head); + netcp_device->device = dev; + platform_set_drvdata(pdev, netcp_device); + + /* create interfaces */ + interfaces = of_get_child_by_name(node, "netcp-interfaces"); + if (!interfaces) { + dev_err(dev, "could not find netcp-interfaces node\n"); + ret = -ENODEV; + goto probe_quit; + } + + for_each_available_child_of_node(interfaces, child) { + ret = netcp_create_interface(netcp_device, child); + if (ret) { + dev_err(dev, "could not create interface(%s)\n", + child->name); + goto probe_quit_interface; + } + } + + /* Add the device instance to the list */ + list_add_tail(&netcp_device->device_list, &netcp_devices); + + /* Probe & attach any modules already registered */ + mutex_lock(&netcp_modules_lock); + for_each_netcp_module(module) { + ret = netcp_module_probe(netcp_device, module); + if (ret < 0) + dev_err(dev, "module(%s) probe failed\n", module->name); + } + mutex_unlock(&netcp_modules_lock); + return 0; + +probe_quit_interface: + list_for_each_entry_safe(netcp_intf, netcp_tmp, + &netcp_device->interface_head, + interface_list) { + netcp_delete_interface(netcp_device, netcp_intf->ndev); + } + +probe_quit: + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + platform_set_drvdata(pdev, NULL); + return ret; +} + +static int netcp_remove(struct platform_device *pdev) +{ + struct netcp_device *netcp_device = platform_get_drvdata(pdev); + struct netcp_inst_modpriv *inst_modpriv, *tmp; + struct netcp_module *module; + + list_for_each_entry_safe(inst_modpriv, tmp, &netcp_device->modpriv_head, + inst_list) { + module = inst_modpriv->netcp_module; + dev_dbg(&pdev->dev, "Removing module \"%s\"\n", module->name); + module->remove(netcp_device, inst_modpriv->module_priv); + list_del(&inst_modpriv->inst_list); + kfree(inst_modpriv); + } + WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n", + pdev->name); + + devm_kfree(&pdev->dev, netcp_device); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + platform_set_drvdata(pdev, NULL); + return 0; +} + +static const struct of_device_id of_match[] = { + { .compatible = "ti,netcp-1.0", }, + {}, +}; +MODULE_DEVICE_TABLE(of, of_match); + +static struct platform_driver netcp_driver = { + .driver = { + .name = "netcp-1.0", + .owner = THIS_MODULE, + .of_match_table = of_match, + }, + .probe = netcp_probe, + .remove = netcp_remove, +}; +module_platform_driver(netcp_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs"); +MODULE_AUTHOR("Sandeep Nair + * Sandeep Paulraj + * Cyril Chemparathy + * Santosh Shilimkar + * Wingman Kwok + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include "cpsw_ale.h" +#include "netcp.h" + +#define NETCP_DRIVER_NAME "TI KeyStone Ethernet Driver" +#define NETCP_DRIVER_VERSION "v1.0" + +#define GBE_IDENT(reg) ((reg >> 16) & 0xffff) +#define GBE_MAJOR_VERSION(reg) (reg >> 8 & 0x7) +#define GBE_MINOR_VERSION(reg) (reg & 0xff) +#define GBE_RTL_VERSION(reg) ((reg >> 11) & 0x1f) + +/* 1G Ethernet SS defines */ +#define GBE_MODULE_NAME "netcp-gbe" +#define GBE_SS_VERSION_14 0x4ed21104 + +#define GBE_SS_REG_INDEX 0 +#define GBE_SGMII34_REG_INDEX 1 +#define GBE_SM_REG_INDEX 2 +/* offset relative to base of GBE_SS_REG_INDEX */ +#define GBE13_SGMII_MODULE_OFFSET 0x100 +/* offset relative to base of GBE_SM_REG_INDEX */ +#define GBE13_HOST_PORT_OFFSET 0x34 +#define GBE13_SLAVE_PORT_OFFSET 0x60 +#define GBE13_EMAC_OFFSET 0x100 +#define GBE13_SLAVE_PORT2_OFFSET 0x200 +#define GBE13_HW_STATS_OFFSET 0x300 +#define GBE13_ALE_OFFSET 0x600 +#define GBE13_HOST_PORT_NUM 0 +#define GBE13_NUM_ALE_ENTRIES 1024 + +/* 1G Ethernet NU SS defines */ +#define GBENU_MODULE_NAME "netcp-gbenu" +#define GBE_SS_ID_NU 0x4ee6 +#define GBE_SS_ID_2U 0x4ee8 + +#define IS_SS_ID_MU(d) \ + ((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \ + (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U)) + +#define IS_SS_ID_NU(d) \ + (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) + +#define GBENU_SS_REG_INDEX 0 +#define GBENU_SM_REG_INDEX 1 +#define GBENU_SGMII_MODULE_OFFSET 0x100 +#define GBENU_HOST_PORT_OFFSET 0x1000 +#define GBENU_SLAVE_PORT_OFFSET 0x2000 +#define GBENU_EMAC_OFFSET 0x2330 +#define GBENU_HW_STATS_OFFSET 0x1a000 +#define GBENU_ALE_OFFSET 0x1e000 +#define GBENU_HOST_PORT_NUM 0 +#define GBENU_NUM_ALE_ENTRIES 1024 + +/* 10G Ethernet SS defines */ +#define XGBE_MODULE_NAME "netcp-xgbe" +#define XGBE_SS_VERSION_10 0x4ee42100 + +#define XGBE_SS_REG_INDEX 0 +#define XGBE_SM_REG_INDEX 1 +#define XGBE_SERDES_REG_INDEX 2 + +/* offset relative to base of XGBE_SS_REG_INDEX */ +#define XGBE10_SGMII_MODULE_OFFSET 0x100 +/* offset relative to base of XGBE_SM_REG_INDEX */ +#define XGBE10_HOST_PORT_OFFSET 0x34 +#define XGBE10_SLAVE_PORT_OFFSET 0x64 +#define XGBE10_EMAC_OFFSET 0x400 +#define XGBE10_ALE_OFFSET 0x700 +#define XGBE10_HW_STATS_OFFSET 0x800 +#define XGBE10_HOST_PORT_NUM 0 +#define XGBE10_NUM_ALE_ENTRIES 1024 + +#define GBE_TIMER_INTERVAL (HZ / 2) + +/* Soft reset register values */ +#define SOFT_RESET_MASK BIT(0) +#define SOFT_RESET BIT(0) +#define DEVICE_EMACSL_RESET_POLL_COUNT 100 +#define GMACSL_RET_WARN_RESET_INCOMPLETE -2 + +#define MACSL_RX_ENABLE_CSF BIT(23) +#define MACSL_ENABLE_EXT_CTL BIT(18) +#define MACSL_XGMII_ENABLE BIT(13) +#define MACSL_XGIG_MODE BIT(8) +#define MACSL_GIG_MODE BIT(7) +#define MACSL_GMII_ENABLE BIT(5) +#define MACSL_FULLDUPLEX BIT(0) + +#define GBE_CTL_P0_ENABLE BIT(2) +#define GBE13_REG_VAL_STAT_ENABLE_ALL 0xff +#define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf +#define GBE_STATS_CD_SEL BIT(28) + +#define GBE_PORT_MASK(x) (BIT(x) - 1) +#define GBE_MASK_NO_PORTS 0 + +#define GBE_DEF_1G_MAC_CONTROL \ + (MACSL_GIG_MODE | MACSL_GMII_ENABLE | \ + MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF) + +#define GBE_DEF_10G_MAC_CONTROL \ + (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE | \ + MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF) + +#define GBE_STATSA_MODULE 0 +#define GBE_STATSB_MODULE 1 +#define GBE_STATSC_MODULE 2 +#define GBE_STATSD_MODULE 3 + +#define GBENU_STATS0_MODULE 0 +#define GBENU_STATS1_MODULE 1 +#define GBENU_STATS2_MODULE 2 +#define GBENU_STATS3_MODULE 3 +#define GBENU_STATS4_MODULE 4 +#define GBENU_STATS5_MODULE 5 +#define GBENU_STATS6_MODULE 6 +#define GBENU_STATS7_MODULE 7 +#define GBENU_STATS8_MODULE 8 + +#define XGBE_STATS0_MODULE 0 +#define XGBE_STATS1_MODULE 1 +#define XGBE_STATS2_MODULE 2 + +/* s: 0-based slave_port */ +#define SGMII_BASE(s) \ + (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs) + +#define GBE_TX_QUEUE 648 +#define GBE_TXHOOK_ORDER 0 +#define GBE_DEFAULT_ALE_AGEOUT 30 +#define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY) +#define NETCP_LINK_STATE_INVALID -1 + +#define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \ + offsetof(struct gbe##_##rb, rn) +#define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \ + offsetof(struct gbenu##_##rb, rn) +#define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \ + offsetof(struct xgbe##_##rb, rn) +#define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn) + +#define HOST_TX_PRI_MAP_DEFAULT 0x00000000 + +struct xgbe_ss_regs { + u32 id_ver; + u32 synce_count; + u32 synce_mux; + u32 control; +}; + +struct xgbe_switch_regs { + u32 id_ver; + u32 control; + u32 emcontrol; + u32 stat_port_en; + u32 ptype; + u32 soft_idle; + u32 thru_rate; + u32 gap_thresh; + u32 tx_start_wds; + u32 flow_control; + u32 cppi_thresh; +}; + +struct xgbe_port_regs { + u32 blk_cnt; + u32 port_vlan; + u32 tx_pri_map; + u32 sa_lo; + u32 sa_hi; + u32 ts_ctl; + u32 ts_seq_ltype; + u32 ts_vlan; + u32 ts_ctl_ltype2; + u32 ts_ctl2; + u32 control; +}; + +struct xgbe_host_port_regs { + u32 blk_cnt; + u32 port_vlan; + u32 tx_pri_map; + u32 src_id; + u32 rx_pri_map; + u32 rx_maxlen; +}; + +struct xgbe_emac_regs { + u32 id_ver; + u32 mac_control; + u32 mac_status; + u32 soft_reset; + u32 rx_maxlen; + u32 __reserved_0; + u32 rx_pause; + u32 tx_pause; + u32 em_control; + u32 __reserved_1; + u32 tx_gap; + u32 rsvd[4]; +}; + +struct xgbe_host_hw_stats { + u32 rx_good_frames; + u32 rx_broadcast_frames; + u32 rx_multicast_frames; + u32 __rsvd_0[3]; + u32 rx_oversized_frames; + u32 __rsvd_1; + u32 rx_undersized_frames; + u32 __rsvd_2; + u32 overrun_type4; + u32 overrun_type5; + u32 rx_bytes; + u32 tx_good_frames; + u32 tx_broadcast_frames; + u32 tx_multicast_frames; + u32 __rsvd_3[9]; + u32 tx_bytes; + u32 tx_64byte_frames; + u32 tx_65_to_127byte_frames; + u32 tx_128_to_255byte_frames; + u32 tx_256_to_511byte_frames; + u32 tx_512_to_1023byte_frames; + u32 tx_1024byte_frames; + u32 net_bytes; + u32 rx_sof_overruns; + u32 rx_mof_overruns; + u32 rx_dma_overruns; +}; + +struct xgbe_hw_stats { + u32 rx_good_frames; + u32 rx_broadcast_frames; + u32 rx_multicast_frames; + u32 rx_pause_frames; + u32 rx_crc_errors; + u32 rx_align_code_errors; + u32 rx_oversized_frames; + u32 rx_jabber_frames; + u32 rx_undersized_frames; + u32 rx_fragments; + u32 overrun_type4; + u32 overrun_type5; + u32 rx_bytes; + u32 tx_good_frames; + u32 tx_broadcast_frames; + u32 tx_multicast_frames; + u32 tx_pause_frames; + u32 tx_deferred_frames; + u32 tx_collision_frames; + u32 tx_single_coll_frames; + u32 tx_mult_coll_frames; + u32 tx_excessive_collisions; + u32 tx_late_collisions; + u32 tx_underrun; + u32 tx_carrier_sense_errors; + u32 tx_bytes; + u32 tx_64byte_frames; + u32 tx_65_to_127byte_frames; + u32 tx_128_to_255byte_frames; + u32 tx_256_to_511byte_frames; + u32 tx_512_to_1023byte_frames; + u32 tx_1024byte_frames; + u32 net_bytes; + u32 rx_sof_overruns; + u32 rx_mof_overruns; + u32 rx_dma_overruns; +}; + +#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32)) + +struct gbenu_ss_regs { + u32 id_ver; + u32 synce_count; /* NU */ + u32 synce_mux; /* NU */ + u32 control; /* 2U */ + u32 __rsvd_0[2]; /* 2U */ + u32 rgmii_status; /* 2U */ + u32 ss_status; /* 2U */ +}; + +struct gbenu_switch_regs { + u32 id_ver; + u32 control; + u32 __rsvd_0[2]; + u32 emcontrol; + u32 stat_port_en; + u32 ptype; /* NU */ + u32 soft_idle; + u32 thru_rate; /* NU */ + u32 gap_thresh; /* NU */ + u32 tx_start_wds; /* NU */ + u32 eee_prescale; /* 2U */ + u32 tx_g_oflow_thresh_set; /* NU */ + u32 tx_g_oflow_thresh_clr; /* NU */ + u32 tx_g_buf_thresh_set_l; /* NU */ + u32 tx_g_buf_thresh_set_h; /* NU */ + u32 tx_g_buf_thresh_clr_l; /* NU */ + u32 tx_g_buf_thresh_clr_h; /* NU */ +}; + +struct gbenu_port_regs { + u32 __rsvd_0; + u32 control; + u32 max_blks; /* 2U */ + u32 mem_align1; + u32 blk_cnt; + u32 port_vlan; + u32 tx_pri_map; /* NU */ + u32 pri_ctl; /* 2U */ + u32 rx_pri_map; + u32 rx_maxlen; + u32 tx_blks_pri; /* NU */ + u32 __rsvd_1; + u32 idle2lpi; /* 2U */ + u32 lpi2idle; /* 2U */ + u32 eee_status; /* 2U */ + u32 __rsvd_2; + u32 __rsvd_3[176]; /* NU: more to add */ + u32 __rsvd_4[2]; + u32 sa_lo; + u32 sa_hi; + u32 ts_ctl; + u32 ts_seq_ltype; + u32 ts_vlan; + u32 ts_ctl_ltype2; + u32 ts_ctl2; +}; + +struct gbenu_host_port_regs { + u32 __rsvd_0; + u32 control; + u32 flow_id_offset; /* 2U */ + u32 __rsvd_1; + u32 blk_cnt; + u32 port_vlan; + u32 tx_pri_map; /* NU */ + u32 pri_ctl; + u32 rx_pri_map; + u32 rx_maxlen; + u32 tx_blks_pri; /* NU */ + u32 __rsvd_2; + u32 idle2lpi; /* 2U */ + u32 lpi2wake; /* 2U */ + u32 eee_status; /* 2U */ + u32 __rsvd_3; + u32 __rsvd_4[184]; /* NU */ + u32 host_blks_pri; /* NU */ +}; + +struct gbenu_emac_regs { + u32 mac_control; + u32 mac_status; + u32 soft_reset; + u32 boff_test; + u32 rx_pause; + u32 __rsvd_0[11]; /* NU */ + u32 tx_pause; + u32 __rsvd_1[11]; /* NU */ + u32 em_control; + u32 tx_gap; +}; + +/* Some hw stat regs are applicable to slave port only. + * This is handled by gbenu_et_stats struct. Also some + * are for SS version NU and some are for 2U. + */ +struct gbenu_hw_stats { + u32 rx_good_frames; + u32 rx_broadcast_frames; + u32 rx_multicast_frames; + u32 rx_pause_frames; /* slave */ + u32 rx_crc_errors; + u32 rx_align_code_errors; /* slave */ + u32 rx_oversized_frames; + u32 rx_jabber_frames; /* slave */ + u32 rx_undersized_frames; + u32 rx_fragments; /* slave */ + u32 ale_drop; + u32 ale_overrun_drop; + u32 rx_bytes; + u32 tx_good_frames; + u32 tx_broadcast_frames; + u32 tx_multicast_frames; + u32 tx_pause_frames; /* slave */ + u32 tx_deferred_frames; /* slave */ + u32 tx_collision_frames; /* slave */ + u32 tx_single_coll_frames; /* slave */ + u32 tx_mult_coll_frames; /* slave */ + u32 tx_excessive_collisions; /* slave */ + u32 tx_late_collisions; /* slave */ + u32 rx_ipg_error; /* slave 10G only */ + u32 tx_carrier_sense_errors; /* slave */ + u32 tx_bytes; + u32 tx_64B_frames; + u32 tx_65_to_127B_frames; + u32 tx_128_to_255B_frames; + u32 tx_256_to_511B_frames; + u32 tx_512_to_1023B_frames; + u32 tx_1024B_frames; + u32 net_bytes; + u32 rx_bottom_fifo_drop; + u32 rx_port_mask_drop; + u32 rx_top_fifo_drop; + u32 ale_rate_limit_drop; + u32 ale_vid_ingress_drop; + u32 ale_da_eq_sa_drop; + u32 __rsvd_0[3]; + u32 ale_unknown_ucast; + u32 ale_unknown_ucast_bytes; + u32 ale_unknown_mcast; + u32 ale_unknown_mcast_bytes; + u32 ale_unknown_bcast; + u32 ale_unknown_bcast_bytes; + u32 ale_pol_match; + u32 ale_pol_match_red; /* NU */ + u32 ale_pol_match_yellow; /* NU */ + u32 __rsvd_1[44]; + u32 tx_mem_protect_err; + /* following NU only */ + u32 tx_pri0; + u32 tx_pri1; + u32 tx_pri2; + u32 tx_pri3; + u32 tx_pri4; + u32 tx_pri5; + u32 tx_pri6; + u32 tx_pri7; + u32 tx_pri0_bcnt; + u32 tx_pri1_bcnt; + u32 tx_pri2_bcnt; + u32 tx_pri3_bcnt; + u32 tx_pri4_bcnt; + u32 tx_pri5_bcnt; + u32 tx_pri6_bcnt; + u32 tx_pri7_bcnt; + u32 tx_pri0_drop; + u32 tx_pri1_drop; + u32 tx_pri2_drop; + u32 tx_pri3_drop; + u32 tx_pri4_drop; + u32 tx_pri5_drop; + u32 tx_pri6_drop; + u32 tx_pri7_drop; + u32 tx_pri0_drop_bcnt; + u32 tx_pri1_drop_bcnt; + u32 tx_pri2_drop_bcnt; + u32 tx_pri3_drop_bcnt; + u32 tx_pri4_drop_bcnt; + u32 tx_pri5_drop_bcnt; + u32 tx_pri6_drop_bcnt; + u32 tx_pri7_drop_bcnt; +}; + +#define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32)) +#define GBENU_HW_STATS_REG_MAP_SZ 0x200 + +struct gbe_ss_regs { + u32 id_ver; + u32 synce_count; + u32 synce_mux; +}; + +struct gbe_ss_regs_ofs { + u16 id_ver; + u16 control; +}; + +struct gbe_switch_regs { + u32 id_ver; + u32 control; + u32 soft_reset; + u32 stat_port_en; + u32 ptype; + u32 soft_idle; + u32 thru_rate; + u32 gap_thresh; + u32 tx_start_wds; + u32 flow_control; +}; + +struct gbe_switch_regs_ofs { + u16 id_ver; + u16 control; + u16 soft_reset; + u16 emcontrol; + u16 stat_port_en; + u16 ptype; + u16 flow_control; +}; + +struct gbe_port_regs { + u32 max_blks; + u32 blk_cnt; + u32 port_vlan; + u32 tx_pri_map; + u32 sa_lo; + u32 sa_hi; + u32 ts_ctl; + u32 ts_seq_ltype; + u32 ts_vlan; + u32 ts_ctl_ltype2; + u32 ts_ctl2; +}; + +struct gbe_port_regs_ofs { + u16 port_vlan; + u16 tx_pri_map; + u16 sa_lo; + u16 sa_hi; + u16 ts_ctl; + u16 ts_seq_ltype; + u16 ts_vlan; + u16 ts_ctl_ltype2; + u16 ts_ctl2; + u16 rx_maxlen; /* 2U, NU */ +}; + +struct gbe_host_port_regs { + u32 src_id; + u32 port_vlan; + u32 rx_pri_map; + u32 rx_maxlen; +}; + +struct gbe_host_port_regs_ofs { + u16 port_vlan; + u16 tx_pri_map; + u16 rx_maxlen; +}; + +struct gbe_emac_regs { + u32 id_ver; + u32 mac_control; + u32 mac_status; + u32 soft_reset; + u32 rx_maxlen; + u32 __reserved_0; + u32 rx_pause; + u32 tx_pause; + u32 __reserved_1; + u32 rx_pri_map; + u32 rsvd[6]; +}; + +struct gbe_emac_regs_ofs { + u16 mac_control; + u16 soft_reset; + u16 rx_maxlen; +}; + +struct gbe_hw_stats { + u32 rx_good_frames; + u32 rx_broadcast_frames; + u32 rx_multicast_frames; + u32 rx_pause_frames; + u32 rx_crc_errors; + u32 rx_align_code_errors; + u32 rx_oversized_frames; + u32 rx_jabber_frames; + u32 rx_undersized_frames; + u32 rx_fragments; + u32 __pad_0[2]; + u32 rx_bytes; + u32 tx_good_frames; + u32 tx_broadcast_frames; + u32 tx_multicast_frames; + u32 tx_pause_frames; + u32 tx_deferred_frames; + u32 tx_collision_frames; + u32 tx_single_coll_frames; + u32 tx_mult_coll_frames; + u32 tx_excessive_collisions; + u32 tx_late_collisions; + u32 tx_underrun; + u32 tx_carrier_sense_errors; + u32 tx_bytes; + u32 tx_64byte_frames; + u32 tx_65_to_127byte_frames; + u32 tx_128_to_255byte_frames; + u32 tx_256_to_511byte_frames; + u32 tx_512_to_1023byte_frames; + u32 tx_1024byte_frames; + u32 net_bytes; + u32 rx_sof_overruns; + u32 rx_mof_overruns; + u32 rx_dma_overruns; +}; + +#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32)) +#define GBE_MAX_HW_STAT_MODS 9 +#define GBE_HW_STATS_REG_MAP_SZ 0x100 + +struct gbe_slave { + void __iomem *port_regs; + void __iomem *emac_regs; + struct gbe_port_regs_ofs port_regs_ofs; + struct gbe_emac_regs_ofs emac_regs_ofs; + int slave_num; /* 0 based logical number */ + int port_num; /* actual port number */ + atomic_t link_state; + bool open; + struct phy_device *phy; + u32 link_interface; + u32 mac_control; + u8 phy_port_t; + struct device_node *phy_node; + struct list_head slave_list; +}; + +struct gbe_priv { + struct device *dev; + struct netcp_device *netcp_device; + struct timer_list timer; + u32 num_slaves; + u32 ale_entries; + u32 ale_ports; + bool enable_ale; + u8 max_num_slaves; + u8 max_num_ports; /* max_num_slaves + 1 */ + struct netcp_tx_pipe tx_pipe; + + int host_port; + u32 rx_packet_max; + u32 ss_version; + u32 stats_en_mask; + + void __iomem *ss_regs; + void __iomem *switch_regs; + void __iomem *host_port_regs; + void __iomem *ale_reg; + void __iomem *sgmii_port_regs; + void __iomem *sgmii_port34_regs; + void __iomem *xgbe_serdes_regs; + void __iomem *hw_stats_regs[GBE_MAX_HW_STAT_MODS]; + + struct gbe_ss_regs_ofs ss_regs_ofs; + struct gbe_switch_regs_ofs switch_regs_ofs; + struct gbe_host_port_regs_ofs host_port_regs_ofs; + + struct cpsw_ale *ale; + unsigned int tx_queue_id; + const char *dma_chan_name; + + struct list_head gbe_intf_head; + struct list_head secondary_slaves; + struct net_device *dummy_ndev; + + u64 *hw_stats; + const struct netcp_ethtool_stat *et_stats; + int num_et_stats; + /* Lock for updating the hwstats */ + spinlock_t hw_stats_lock; +}; + +struct gbe_intf { + struct net_device *ndev; + struct device *dev; + struct gbe_priv *gbe_dev; + struct netcp_tx_pipe tx_pipe; + struct gbe_slave *slave; + struct list_head gbe_intf_list; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +}; + +static struct netcp_module gbe_module; +static struct netcp_module xgbe_module; + +/* Statistic management */ +struct netcp_ethtool_stat { + char desc[ETH_GSTRING_LEN]; + int type; + u32 size; + int offset; +}; + +#define GBE_STATSA_INFO(field) \ +{ \ + "GBE_A:"#field, GBE_STATSA_MODULE, \ + FIELD_SIZEOF(struct gbe_hw_stats, field), \ + offsetof(struct gbe_hw_stats, field) \ +} + +#define GBE_STATSB_INFO(field) \ +{ \ + "GBE_B:"#field, GBE_STATSB_MODULE, \ + FIELD_SIZEOF(struct gbe_hw_stats, field), \ + offsetof(struct gbe_hw_stats, field) \ +} + +#define GBE_STATSC_INFO(field) \ +{ \ + "GBE_C:"#field, GBE_STATSC_MODULE, \ + FIELD_SIZEOF(struct gbe_hw_stats, field), \ + offsetof(struct gbe_hw_stats, field) \ +} + +#define GBE_STATSD_INFO(field) \ +{ \ + "GBE_D:"#field, GBE_STATSD_MODULE, \ + FIELD_SIZEOF(struct gbe_hw_stats, field), \ + offsetof(struct gbe_hw_stats, field) \ +} + +static const struct netcp_ethtool_stat gbe13_et_stats[] = { + /* GBE module A */ + GBE_STATSA_INFO(rx_good_frames), + GBE_STATSA_INFO(rx_broadcast_frames), + GBE_STATSA_INFO(rx_multicast_frames), + GBE_STATSA_INFO(rx_pause_frames), + GBE_STATSA_INFO(rx_crc_errors), + GBE_STATSA_INFO(rx_align_code_errors), + GBE_STATSA_INFO(rx_oversized_frames), + GBE_STATSA_INFO(rx_jabber_frames), + GBE_STATSA_INFO(rx_undersized_frames), + GBE_STATSA_INFO(rx_fragments), + GBE_STATSA_INFO(rx_bytes), + GBE_STATSA_INFO(tx_good_frames), + GBE_STATSA_INFO(tx_broadcast_frames), + GBE_STATSA_INFO(tx_multicast_frames), + GBE_STATSA_INFO(tx_pause_frames), + GBE_STATSA_INFO(tx_deferred_frames), + GBE_STATSA_INFO(tx_collision_frames), + GBE_STATSA_INFO(tx_single_coll_frames), + GBE_STATSA_INFO(tx_mult_coll_frames), + GBE_STATSA_INFO(tx_excessive_collisions), + GBE_STATSA_INFO(tx_late_collisions), + GBE_STATSA_INFO(tx_underrun), + GBE_STATSA_INFO(tx_carrier_sense_errors), + GBE_STATSA_INFO(tx_bytes), + GBE_STATSA_INFO(tx_64byte_frames), + GBE_STATSA_INFO(tx_65_to_127byte_frames), + GBE_STATSA_INFO(tx_128_to_255byte_frames), + GBE_STATSA_INFO(tx_256_to_511byte_frames), + GBE_STATSA_INFO(tx_512_to_1023byte_frames), + GBE_STATSA_INFO(tx_1024byte_frames), + GBE_STATSA_INFO(net_bytes), + GBE_STATSA_INFO(rx_sof_overruns), + GBE_STATSA_INFO(rx_mof_overruns), + GBE_STATSA_INFO(rx_dma_overruns), + /* GBE module B */ + GBE_STATSB_INFO(rx_good_frames), + GBE_STATSB_INFO(rx_broadcast_frames), + GBE_STATSB_INFO(rx_multicast_frames), + GBE_STATSB_INFO(rx_pause_frames), + GBE_STATSB_INFO(rx_crc_errors), + GBE_STATSB_INFO(rx_align_code_errors), + GBE_STATSB_INFO(rx_oversized_frames), + GBE_STATSB_INFO(rx_jabber_frames), + GBE_STATSB_INFO(rx_undersized_frames), + GBE_STATSB_INFO(rx_fragments), + GBE_STATSB_INFO(rx_bytes), + GBE_STATSB_INFO(tx_good_frames), + GBE_STATSB_INFO(tx_broadcast_frames), + GBE_STATSB_INFO(tx_multicast_frames), + GBE_STATSB_INFO(tx_pause_frames), + GBE_STATSB_INFO(tx_deferred_frames), + GBE_STATSB_INFO(tx_collision_frames), + GBE_STATSB_INFO(tx_single_coll_frames), + GBE_STATSB_INFO(tx_mult_coll_frames), + GBE_STATSB_INFO(tx_excessive_collisions), + GBE_STATSB_INFO(tx_late_collisions), + GBE_STATSB_INFO(tx_underrun), + GBE_STATSB_INFO(tx_carrier_sense_errors), + GBE_STATSB_INFO(tx_bytes), + GBE_STATSB_INFO(tx_64byte_frames), + GBE_STATSB_INFO(tx_65_to_127byte_frames), + GBE_STATSB_INFO(tx_128_to_255byte_frames), + GBE_STATSB_INFO(tx_256_to_511byte_frames), + GBE_STATSB_INFO(tx_512_to_1023byte_frames), + GBE_STATSB_INFO(tx_1024byte_frames), + GBE_STATSB_INFO(net_bytes), + GBE_STATSB_INFO(rx_sof_overruns), + GBE_STATSB_INFO(rx_mof_overruns), + GBE_STATSB_INFO(rx_dma_overruns), + /* GBE module C */ + GBE_STATSC_INFO(rx_good_frames), + GBE_STATSC_INFO(rx_broadcast_frames), + GBE_STATSC_INFO(rx_multicast_frames), + GBE_STATSC_INFO(rx_pause_frames), + GBE_STATSC_INFO(rx_crc_errors), + GBE_STATSC_INFO(rx_align_code_errors), + GBE_STATSC_INFO(rx_oversized_frames), + GBE_STATSC_INFO(rx_jabber_frames), + GBE_STATSC_INFO(rx_undersized_frames), + GBE_STATSC_INFO(rx_fragments), + GBE_STATSC_INFO(rx_bytes), + GBE_STATSC_INFO(tx_good_frames), + GBE_STATSC_INFO(tx_broadcast_frames), + GBE_STATSC_INFO(tx_multicast_frames), + GBE_STATSC_INFO(tx_pause_frames), + GBE_STATSC_INFO(tx_deferred_frames), + GBE_STATSC_INFO(tx_collision_frames), + GBE_STATSC_INFO(tx_single_coll_frames), + GBE_STATSC_INFO(tx_mult_coll_frames), + GBE_STATSC_INFO(tx_excessive_collisions), + GBE_STATSC_INFO(tx_late_collisions), + GBE_STATSC_INFO(tx_underrun), + GBE_STATSC_INFO(tx_carrier_sense_errors), + GBE_STATSC_INFO(tx_bytes), + GBE_STATSC_INFO(tx_64byte_frames), + GBE_STATSC_INFO(tx_65_to_127byte_frames), + GBE_STATSC_INFO(tx_128_to_255byte_frames), + GBE_STATSC_INFO(tx_256_to_511byte_frames), + GBE_STATSC_INFO(tx_512_to_1023byte_frames), + GBE_STATSC_INFO(tx_1024byte_frames), + GBE_STATSC_INFO(net_bytes), + GBE_STATSC_INFO(rx_sof_overruns), + GBE_STATSC_INFO(rx_mof_overruns), + GBE_STATSC_INFO(rx_dma_overruns), + /* GBE module D */ + GBE_STATSD_INFO(rx_good_frames), + GBE_STATSD_INFO(rx_broadcast_frames), + GBE_STATSD_INFO(rx_multicast_frames), + GBE_STATSD_INFO(rx_pause_frames), + GBE_STATSD_INFO(rx_crc_errors), + GBE_STATSD_INFO(rx_align_code_errors), + GBE_STATSD_INFO(rx_oversized_frames), + GBE_STATSD_INFO(rx_jabber_frames), + GBE_STATSD_INFO(rx_undersized_frames), + GBE_STATSD_INFO(rx_fragments), + GBE_STATSD_INFO(rx_bytes), + GBE_STATSD_INFO(tx_good_frames), + GBE_STATSD_INFO(tx_broadcast_frames), + GBE_STATSD_INFO(tx_multicast_frames), + GBE_STATSD_INFO(tx_pause_frames), + GBE_STATSD_INFO(tx_deferred_frames), + GBE_STATSD_INFO(tx_collision_frames), + GBE_STATSD_INFO(tx_single_coll_frames), + GBE_STATSD_INFO(tx_mult_coll_frames), + GBE_STATSD_INFO(tx_excessive_collisions), + GBE_STATSD_INFO(tx_late_collisions), + GBE_STATSD_INFO(tx_underrun), + GBE_STATSD_INFO(tx_carrier_sense_errors), + GBE_STATSD_INFO(tx_bytes), + GBE_STATSD_INFO(tx_64byte_frames), + GBE_STATSD_INFO(tx_65_to_127byte_frames), + GBE_STATSD_INFO(tx_128_to_255byte_frames), + GBE_STATSD_INFO(tx_256_to_511byte_frames), + GBE_STATSD_INFO(tx_512_to_1023byte_frames), + GBE_STATSD_INFO(tx_1024byte_frames), + GBE_STATSD_INFO(net_bytes), + GBE_STATSD_INFO(rx_sof_overruns), + GBE_STATSD_INFO(rx_mof_overruns), + GBE_STATSD_INFO(rx_dma_overruns), +}; + +/* This is the size of entries in GBENU_STATS_HOST */ +#define GBENU_ET_STATS_HOST_SIZE 33 + +#define GBENU_STATS_HOST(field) \ +{ \ + "GBE_HOST:"#field, GBENU_STATS0_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +/* This is the size of entries in GBENU_STATS_HOST */ +#define GBENU_ET_STATS_PORT_SIZE 46 + +#define GBENU_STATS_P1(field) \ +{ \ + "GBE_P1:"#field, GBENU_STATS1_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P2(field) \ +{ \ + "GBE_P2:"#field, GBENU_STATS2_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P3(field) \ +{ \ + "GBE_P3:"#field, GBENU_STATS3_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P4(field) \ +{ \ + "GBE_P4:"#field, GBENU_STATS4_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P5(field) \ +{ \ + "GBE_P5:"#field, GBENU_STATS5_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P6(field) \ +{ \ + "GBE_P6:"#field, GBENU_STATS6_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P7(field) \ +{ \ + "GBE_P7:"#field, GBENU_STATS7_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +#define GBENU_STATS_P8(field) \ +{ \ + "GBE_P8:"#field, GBENU_STATS8_MODULE, \ + FIELD_SIZEOF(struct gbenu_hw_stats, field), \ + offsetof(struct gbenu_hw_stats, field) \ +} + +static const struct netcp_ethtool_stat gbenu_et_stats[] = { + /* GBENU Host Module */ + GBENU_STATS_HOST(rx_good_frames), + GBENU_STATS_HOST(rx_broadcast_frames), + GBENU_STATS_HOST(rx_multicast_frames), + GBENU_STATS_HOST(rx_crc_errors), + GBENU_STATS_HOST(rx_oversized_frames), + GBENU_STATS_HOST(rx_undersized_frames), + GBENU_STATS_HOST(ale_drop), + GBENU_STATS_HOST(ale_overrun_drop), + GBENU_STATS_HOST(rx_bytes), + GBENU_STATS_HOST(tx_good_frames), + GBENU_STATS_HOST(tx_broadcast_frames), + GBENU_STATS_HOST(tx_multicast_frames), + GBENU_STATS_HOST(tx_bytes), + GBENU_STATS_HOST(tx_64B_frames), + GBENU_STATS_HOST(tx_65_to_127B_frames), + GBENU_STATS_HOST(tx_128_to_255B_frames), + GBENU_STATS_HOST(tx_256_to_511B_frames), + GBENU_STATS_HOST(tx_512_to_1023B_frames), + GBENU_STATS_HOST(tx_1024B_frames), + GBENU_STATS_HOST(net_bytes), + GBENU_STATS_HOST(rx_bottom_fifo_drop), + GBENU_STATS_HOST(rx_port_mask_drop), + GBENU_STATS_HOST(rx_top_fifo_drop), + GBENU_STATS_HOST(ale_rate_limit_drop), + GBENU_STATS_HOST(ale_vid_ingress_drop), + GBENU_STATS_HOST(ale_da_eq_sa_drop), + GBENU_STATS_HOST(ale_unknown_ucast), + GBENU_STATS_HOST(ale_unknown_ucast_bytes), + GBENU_STATS_HOST(ale_unknown_mcast), + GBENU_STATS_HOST(ale_unknown_mcast_bytes), + GBENU_STATS_HOST(ale_unknown_bcast), + GBENU_STATS_HOST(ale_unknown_bcast_bytes), + GBENU_STATS_HOST(tx_mem_protect_err), + /* GBENU Module 1 */ + GBENU_STATS_P1(rx_good_frames), + GBENU_STATS_P1(rx_broadcast_frames), + GBENU_STATS_P1(rx_multicast_frames), + GBENU_STATS_P1(rx_pause_frames), + GBENU_STATS_P1(rx_crc_errors), + GBENU_STATS_P1(rx_align_code_errors), + GBENU_STATS_P1(rx_oversized_frames), + GBENU_STATS_P1(rx_jabber_frames), + GBENU_STATS_P1(rx_undersized_frames), + GBENU_STATS_P1(rx_fragments), + GBENU_STATS_P1(ale_drop), + GBENU_STATS_P1(ale_overrun_drop), + GBENU_STATS_P1(rx_bytes), + GBENU_STATS_P1(tx_good_frames), + GBENU_STATS_P1(tx_broadcast_frames), + GBENU_STATS_P1(tx_multicast_frames), + GBENU_STATS_P1(tx_pause_frames), + GBENU_STATS_P1(tx_deferred_frames), + GBENU_STATS_P1(tx_collision_frames), + GBENU_STATS_P1(tx_single_coll_frames), + GBENU_STATS_P1(tx_mult_coll_frames), + GBENU_STATS_P1(tx_excessive_collisions), + GBENU_STATS_P1(tx_late_collisions), + GBENU_STATS_P1(rx_ipg_error), + GBENU_STATS_P1(tx_carrier_sense_errors), + GBENU_STATS_P1(tx_bytes), + GBENU_STATS_P1(tx_64B_frames), + GBENU_STATS_P1(tx_65_to_127B_frames), + GBENU_STATS_P1(tx_128_to_255B_frames), + GBENU_STATS_P1(tx_256_to_511B_frames), + GBENU_STATS_P1(tx_512_to_1023B_frames), + GBENU_STATS_P1(tx_1024B_frames), + GBENU_STATS_P1(net_bytes), + GBENU_STATS_P1(rx_bottom_fifo_drop), + GBENU_STATS_P1(rx_port_mask_drop), + GBENU_STATS_P1(rx_top_fifo_drop), + GBENU_STATS_P1(ale_rate_limit_drop), + GBENU_STATS_P1(ale_vid_ingress_drop), + GBENU_STATS_P1(ale_da_eq_sa_drop), + GBENU_STATS_P1(ale_unknown_ucast), + GBENU_STATS_P1(ale_unknown_ucast_bytes), + GBENU_STATS_P1(ale_unknown_mcast), + GBENU_STATS_P1(ale_unknown_mcast_bytes), + GBENU_STATS_P1(ale_unknown_bcast), + GBENU_STATS_P1(ale_unknown_bcast_bytes), + GBENU_STATS_P1(tx_mem_protect_err), + /* GBENU Module 2 */ + GBENU_STATS_P2(rx_good_frames), + GBENU_STATS_P2(rx_broadcast_frames), + GBENU_STATS_P2(rx_multicast_frames), + GBENU_STATS_P2(rx_pause_frames), + GBENU_STATS_P2(rx_crc_errors), + GBENU_STATS_P2(rx_align_code_errors), + GBENU_STATS_P2(rx_oversized_frames), + GBENU_STATS_P2(rx_jabber_frames), + GBENU_STATS_P2(rx_undersized_frames), + GBENU_STATS_P2(rx_fragments), + GBENU_STATS_P2(ale_drop), + GBENU_STATS_P2(ale_overrun_drop), + GBENU_STATS_P2(rx_bytes), + GBENU_STATS_P2(tx_good_frames), + GBENU_STATS_P2(tx_broadcast_frames), + GBENU_STATS_P2(tx_multicast_frames), + GBENU_STATS_P2(tx_pause_frames), + GBENU_STATS_P2(tx_deferred_frames), + GBENU_STATS_P2(tx_collision_frames), + GBENU_STATS_P2(tx_single_coll_frames), + GBENU_STATS_P2(tx_mult_coll_frames), + GBENU_STATS_P2(tx_excessive_collisions), + GBENU_STATS_P2(tx_late_collisions), + GBENU_STATS_P2(rx_ipg_error), + GBENU_STATS_P2(tx_carrier_sense_errors), + GBENU_STATS_P2(tx_bytes), + GBENU_STATS_P2(tx_64B_frames), + GBENU_STATS_P2(tx_65_to_127B_frames), + GBENU_STATS_P2(tx_128_to_255B_frames), + GBENU_STATS_P2(tx_256_to_511B_frames), + GBENU_STATS_P2(tx_512_to_1023B_frames), + GBENU_STATS_P2(tx_1024B_frames), + GBENU_STATS_P2(net_bytes), + GBENU_STATS_P2(rx_bottom_fifo_drop), + GBENU_STATS_P2(rx_port_mask_drop), + GBENU_STATS_P2(rx_top_fifo_drop), + GBENU_STATS_P2(ale_rate_limit_drop), + GBENU_STATS_P2(ale_vid_ingress_drop), + GBENU_STATS_P2(ale_da_eq_sa_drop), + GBENU_STATS_P2(ale_unknown_ucast), + GBENU_STATS_P2(ale_unknown_ucast_bytes), + GBENU_STATS_P2(ale_unknown_mcast), + GBENU_STATS_P2(ale_unknown_mcast_bytes), + GBENU_STATS_P2(ale_unknown_bcast), + GBENU_STATS_P2(ale_unknown_bcast_bytes), + GBENU_STATS_P2(tx_mem_protect_err), + /* GBENU Module 3 */ + GBENU_STATS_P3(rx_good_frames), + GBENU_STATS_P3(rx_broadcast_frames), + GBENU_STATS_P3(rx_multicast_frames), + GBENU_STATS_P3(rx_pause_frames), + GBENU_STATS_P3(rx_crc_errors), + GBENU_STATS_P3(rx_align_code_errors), + GBENU_STATS_P3(rx_oversized_frames), + GBENU_STATS_P3(rx_jabber_frames), + GBENU_STATS_P3(rx_undersized_frames), + GBENU_STATS_P3(rx_fragments), + GBENU_STATS_P3(ale_drop), + GBENU_STATS_P3(ale_overrun_drop), + GBENU_STATS_P3(rx_bytes), + GBENU_STATS_P3(tx_good_frames), + GBENU_STATS_P3(tx_broadcast_frames), + GBENU_STATS_P3(tx_multicast_frames), + GBENU_STATS_P3(tx_pause_frames), + GBENU_STATS_P3(tx_deferred_frames), + GBENU_STATS_P3(tx_collision_frames), + GBENU_STATS_P3(tx_single_coll_frames), + GBENU_STATS_P3(tx_mult_coll_frames), + GBENU_STATS_P3(tx_excessive_collisions), + GBENU_STATS_P3(tx_late_collisions), + GBENU_STATS_P3(rx_ipg_error), + GBENU_STATS_P3(tx_carrier_sense_errors), + GBENU_STATS_P3(tx_bytes), + GBENU_STATS_P3(tx_64B_frames), + GBENU_STATS_P3(tx_65_to_127B_frames), + GBENU_STATS_P3(tx_128_to_255B_frames), + GBENU_STATS_P3(tx_256_to_511B_frames), + GBENU_STATS_P3(tx_512_to_1023B_frames), + GBENU_STATS_P3(tx_1024B_frames), + GBENU_STATS_P3(net_bytes), + GBENU_STATS_P3(rx_bottom_fifo_drop), + GBENU_STATS_P3(rx_port_mask_drop), + GBENU_STATS_P3(rx_top_fifo_drop), + GBENU_STATS_P3(ale_rate_limit_drop), + GBENU_STATS_P3(ale_vid_ingress_drop), + GBENU_STATS_P3(ale_da_eq_sa_drop), + GBENU_STATS_P3(ale_unknown_ucast), + GBENU_STATS_P3(ale_unknown_ucast_bytes), + GBENU_STATS_P3(ale_unknown_mcast), + GBENU_STATS_P3(ale_unknown_mcast_bytes), + GBENU_STATS_P3(ale_unknown_bcast), + GBENU_STATS_P3(ale_unknown_bcast_bytes), + GBENU_STATS_P3(tx_mem_protect_err), + /* GBENU Module 4 */ + GBENU_STATS_P4(rx_good_frames), + GBENU_STATS_P4(rx_broadcast_frames), + GBENU_STATS_P4(rx_multicast_frames), + GBENU_STATS_P4(rx_pause_frames), + GBENU_STATS_P4(rx_crc_errors), + GBENU_STATS_P4(rx_align_code_errors), + GBENU_STATS_P4(rx_oversized_frames), + GBENU_STATS_P4(rx_jabber_frames), + GBENU_STATS_P4(rx_undersized_frames), + GBENU_STATS_P4(rx_fragments), + GBENU_STATS_P4(ale_drop), + GBENU_STATS_P4(ale_overrun_drop), + GBENU_STATS_P4(rx_bytes), + GBENU_STATS_P4(tx_good_frames), + GBENU_STATS_P4(tx_broadcast_frames), + GBENU_STATS_P4(tx_multicast_frames), + GBENU_STATS_P4(tx_pause_frames), + GBENU_STATS_P4(tx_deferred_frames), + GBENU_STATS_P4(tx_collision_frames), + GBENU_STATS_P4(tx_single_coll_frames), + GBENU_STATS_P4(tx_mult_coll_frames), + GBENU_STATS_P4(tx_excessive_collisions), + GBENU_STATS_P4(tx_late_collisions), + GBENU_STATS_P4(rx_ipg_error), + GBENU_STATS_P4(tx_carrier_sense_errors), + GBENU_STATS_P4(tx_bytes), + GBENU_STATS_P4(tx_64B_frames), + GBENU_STATS_P4(tx_65_to_127B_frames), + GBENU_STATS_P4(tx_128_to_255B_frames), + GBENU_STATS_P4(tx_256_to_511B_frames), + GBENU_STATS_P4(tx_512_to_1023B_frames), + GBENU_STATS_P4(tx_1024B_frames), + GBENU_STATS_P4(net_bytes), + GBENU_STATS_P4(rx_bottom_fifo_drop), + GBENU_STATS_P4(rx_port_mask_drop), + GBENU_STATS_P4(rx_top_fifo_drop), + GBENU_STATS_P4(ale_rate_limit_drop), + GBENU_STATS_P4(ale_vid_ingress_drop), + GBENU_STATS_P4(ale_da_eq_sa_drop), + GBENU_STATS_P4(ale_unknown_ucast), + GBENU_STATS_P4(ale_unknown_ucast_bytes), + GBENU_STATS_P4(ale_unknown_mcast), + GBENU_STATS_P4(ale_unknown_mcast_bytes), + GBENU_STATS_P4(ale_unknown_bcast), + GBENU_STATS_P4(ale_unknown_bcast_bytes), + GBENU_STATS_P4(tx_mem_protect_err), + /* GBENU Module 5 */ + GBENU_STATS_P5(rx_good_frames), + GBENU_STATS_P5(rx_broadcast_frames), + GBENU_STATS_P5(rx_multicast_frames), + GBENU_STATS_P5(rx_pause_frames), + GBENU_STATS_P5(rx_crc_errors), + GBENU_STATS_P5(rx_align_code_errors), + GBENU_STATS_P5(rx_oversized_frames), + GBENU_STATS_P5(rx_jabber_frames), + GBENU_STATS_P5(rx_undersized_frames), + GBENU_STATS_P5(rx_fragments), + GBENU_STATS_P5(ale_drop), + GBENU_STATS_P5(ale_overrun_drop), + GBENU_STATS_P5(rx_bytes), + GBENU_STATS_P5(tx_good_frames), + GBENU_STATS_P5(tx_broadcast_frames), + GBENU_STATS_P5(tx_multicast_frames), + GBENU_STATS_P5(tx_pause_frames), + GBENU_STATS_P5(tx_deferred_frames), + GBENU_STATS_P5(tx_collision_frames), + GBENU_STATS_P5(tx_single_coll_frames), + GBENU_STATS_P5(tx_mult_coll_frames), + GBENU_STATS_P5(tx_excessive_collisions), + GBENU_STATS_P5(tx_late_collisions), + GBENU_STATS_P5(rx_ipg_error), + GBENU_STATS_P5(tx_carrier_sense_errors), + GBENU_STATS_P5(tx_bytes), + GBENU_STATS_P5(tx_64B_frames), + GBENU_STATS_P5(tx_65_to_127B_frames), + GBENU_STATS_P5(tx_128_to_255B_frames), + GBENU_STATS_P5(tx_256_to_511B_frames), + GBENU_STATS_P5(tx_512_to_1023B_frames), + GBENU_STATS_P5(tx_1024B_frames), + GBENU_STATS_P5(net_bytes), + GBENU_STATS_P5(rx_bottom_fifo_drop), + GBENU_STATS_P5(rx_port_mask_drop), + GBENU_STATS_P5(rx_top_fifo_drop), + GBENU_STATS_P5(ale_rate_limit_drop), + GBENU_STATS_P5(ale_vid_ingress_drop), + GBENU_STATS_P5(ale_da_eq_sa_drop), + GBENU_STATS_P5(ale_unknown_ucast), + GBENU_STATS_P5(ale_unknown_ucast_bytes), + GBENU_STATS_P5(ale_unknown_mcast), + GBENU_STATS_P5(ale_unknown_mcast_bytes), + GBENU_STATS_P5(ale_unknown_bcast), + GBENU_STATS_P5(ale_unknown_bcast_bytes), + GBENU_STATS_P5(tx_mem_protect_err), + /* GBENU Module 6 */ + GBENU_STATS_P6(rx_good_frames), + GBENU_STATS_P6(rx_broadcast_frames), + GBENU_STATS_P6(rx_multicast_frames), + GBENU_STATS_P6(rx_pause_frames), + GBENU_STATS_P6(rx_crc_errors), + GBENU_STATS_P6(rx_align_code_errors), + GBENU_STATS_P6(rx_oversized_frames), + GBENU_STATS_P6(rx_jabber_frames), + GBENU_STATS_P6(rx_undersized_frames), + GBENU_STATS_P6(rx_fragments), + GBENU_STATS_P6(ale_drop), + GBENU_STATS_P6(ale_overrun_drop), + GBENU_STATS_P6(rx_bytes), + GBENU_STATS_P6(tx_good_frames), + GBENU_STATS_P6(tx_broadcast_frames), + GBENU_STATS_P6(tx_multicast_frames), + GBENU_STATS_P6(tx_pause_frames), + GBENU_STATS_P6(tx_deferred_frames), + GBENU_STATS_P6(tx_collision_frames), + GBENU_STATS_P6(tx_single_coll_frames), + GBENU_STATS_P6(tx_mult_coll_frames), + GBENU_STATS_P6(tx_excessive_collisions), + GBENU_STATS_P6(tx_late_collisions), + GBENU_STATS_P6(rx_ipg_error), + GBENU_STATS_P6(tx_carrier_sense_errors), + GBENU_STATS_P6(tx_bytes), + GBENU_STATS_P6(tx_64B_frames), + GBENU_STATS_P6(tx_65_to_127B_frames), + GBENU_STATS_P6(tx_128_to_255B_frames), + GBENU_STATS_P6(tx_256_to_511B_frames), + GBENU_STATS_P6(tx_512_to_1023B_frames), + GBENU_STATS_P6(tx_1024B_frames), + GBENU_STATS_P6(net_bytes), + GBENU_STATS_P6(rx_bottom_fifo_drop), + GBENU_STATS_P6(rx_port_mask_drop), + GBENU_STATS_P6(rx_top_fifo_drop), + GBENU_STATS_P6(ale_rate_limit_drop), + GBENU_STATS_P6(ale_vid_ingress_drop), + GBENU_STATS_P6(ale_da_eq_sa_drop), + GBENU_STATS_P6(ale_unknown_ucast), + GBENU_STATS_P6(ale_unknown_ucast_bytes), + GBENU_STATS_P6(ale_unknown_mcast), + GBENU_STATS_P6(ale_unknown_mcast_bytes), + GBENU_STATS_P6(ale_unknown_bcast), + GBENU_STATS_P6(ale_unknown_bcast_bytes), + GBENU_STATS_P6(tx_mem_protect_err), + /* GBENU Module 7 */ + GBENU_STATS_P7(rx_good_frames), + GBENU_STATS_P7(rx_broadcast_frames), + GBENU_STATS_P7(rx_multicast_frames), + GBENU_STATS_P7(rx_pause_frames), + GBENU_STATS_P7(rx_crc_errors), + GBENU_STATS_P7(rx_align_code_errors), + GBENU_STATS_P7(rx_oversized_frames), + GBENU_STATS_P7(rx_jabber_frames), + GBENU_STATS_P7(rx_undersized_frames), + GBENU_STATS_P7(rx_fragments), + GBENU_STATS_P7(ale_drop), + GBENU_STATS_P7(ale_overrun_drop), + GBENU_STATS_P7(rx_bytes), + GBENU_STATS_P7(tx_good_frames), + GBENU_STATS_P7(tx_broadcast_frames), + GBENU_STATS_P7(tx_multicast_frames), + GBENU_STATS_P7(tx_pause_frames), + GBENU_STATS_P7(tx_deferred_frames), + GBENU_STATS_P7(tx_collision_frames), + GBENU_STATS_P7(tx_single_coll_frames), + GBENU_STATS_P7(tx_mult_coll_frames), + GBENU_STATS_P7(tx_excessive_collisions), + GBENU_STATS_P7(tx_late_collisions), + GBENU_STATS_P7(rx_ipg_error), + GBENU_STATS_P7(tx_carrier_sense_errors), + GBENU_STATS_P7(tx_bytes), + GBENU_STATS_P7(tx_64B_frames), + GBENU_STATS_P7(tx_65_to_127B_frames), + GBENU_STATS_P7(tx_128_to_255B_frames), + GBENU_STATS_P7(tx_256_to_511B_frames), + GBENU_STATS_P7(tx_512_to_1023B_frames), + GBENU_STATS_P7(tx_1024B_frames), + GBENU_STATS_P7(net_bytes), + GBENU_STATS_P7(rx_bottom_fifo_drop), + GBENU_STATS_P7(rx_port_mask_drop), + GBENU_STATS_P7(rx_top_fifo_drop), + GBENU_STATS_P7(ale_rate_limit_drop), + GBENU_STATS_P7(ale_vid_ingress_drop), + GBENU_STATS_P7(ale_da_eq_sa_drop), + GBENU_STATS_P7(ale_unknown_ucast), + GBENU_STATS_P7(ale_unknown_ucast_bytes), + GBENU_STATS_P7(ale_unknown_mcast), + GBENU_STATS_P7(ale_unknown_mcast_bytes), + GBENU_STATS_P7(ale_unknown_bcast), + GBENU_STATS_P7(ale_unknown_bcast_bytes), + GBENU_STATS_P7(tx_mem_protect_err), + /* GBENU Module 8 */ + GBENU_STATS_P8(rx_good_frames), + GBENU_STATS_P8(rx_broadcast_frames), + GBENU_STATS_P8(rx_multicast_frames), + GBENU_STATS_P8(rx_pause_frames), + GBENU_STATS_P8(rx_crc_errors), + GBENU_STATS_P8(rx_align_code_errors), + GBENU_STATS_P8(rx_oversized_frames), + GBENU_STATS_P8(rx_jabber_frames), + GBENU_STATS_P8(rx_undersized_frames), + GBENU_STATS_P8(rx_fragments), + GBENU_STATS_P8(ale_drop), + GBENU_STATS_P8(ale_overrun_drop), + GBENU_STATS_P8(rx_bytes), + GBENU_STATS_P8(tx_good_frames), + GBENU_STATS_P8(tx_broadcast_frames), + GBENU_STATS_P8(tx_multicast_frames), + GBENU_STATS_P8(tx_pause_frames), + GBENU_STATS_P8(tx_deferred_frames), + GBENU_STATS_P8(tx_collision_frames), + GBENU_STATS_P8(tx_single_coll_frames), + GBENU_STATS_P8(tx_mult_coll_frames), + GBENU_STATS_P8(tx_excessive_collisions), + GBENU_STATS_P8(tx_late_collisions), + GBENU_STATS_P8(rx_ipg_error), + GBENU_STATS_P8(tx_carrier_sense_errors), + GBENU_STATS_P8(tx_bytes), + GBENU_STATS_P8(tx_64B_frames), + GBENU_STATS_P8(tx_65_to_127B_frames), + GBENU_STATS_P8(tx_128_to_255B_frames), + GBENU_STATS_P8(tx_256_to_511B_frames), + GBENU_STATS_P8(tx_512_to_1023B_frames), + GBENU_STATS_P8(tx_1024B_frames), + GBENU_STATS_P8(net_bytes), + GBENU_STATS_P8(rx_bottom_fifo_drop), + GBENU_STATS_P8(rx_port_mask_drop), + GBENU_STATS_P8(rx_top_fifo_drop), + GBENU_STATS_P8(ale_rate_limit_drop), + GBENU_STATS_P8(ale_vid_ingress_drop), + GBENU_STATS_P8(ale_da_eq_sa_drop), + GBENU_STATS_P8(ale_unknown_ucast), + GBENU_STATS_P8(ale_unknown_ucast_bytes), + GBENU_STATS_P8(ale_unknown_mcast), + GBENU_STATS_P8(ale_unknown_mcast_bytes), + GBENU_STATS_P8(ale_unknown_bcast), + GBENU_STATS_P8(ale_unknown_bcast_bytes), + GBENU_STATS_P8(tx_mem_protect_err), +}; + +#define XGBE_STATS0_INFO(field) \ +{ \ + "GBE_0:"#field, XGBE_STATS0_MODULE, \ + FIELD_SIZEOF(struct xgbe_hw_stats, field), \ + offsetof(struct xgbe_hw_stats, field) \ +} + +#define XGBE_STATS1_INFO(field) \ +{ \ + "GBE_1:"#field, XGBE_STATS1_MODULE, \ + FIELD_SIZEOF(struct xgbe_hw_stats, field), \ + offsetof(struct xgbe_hw_stats, field) \ +} + +#define XGBE_STATS2_INFO(field) \ +{ \ + "GBE_2:"#field, XGBE_STATS2_MODULE, \ + FIELD_SIZEOF(struct xgbe_hw_stats, field), \ + offsetof(struct xgbe_hw_stats, field) \ +} + +static const struct netcp_ethtool_stat xgbe10_et_stats[] = { + /* GBE module 0 */ + XGBE_STATS0_INFO(rx_good_frames), + XGBE_STATS0_INFO(rx_broadcast_frames), + XGBE_STATS0_INFO(rx_multicast_frames), + XGBE_STATS0_INFO(rx_oversized_frames), + XGBE_STATS0_INFO(rx_undersized_frames), + XGBE_STATS0_INFO(overrun_type4), + XGBE_STATS0_INFO(overrun_type5), + XGBE_STATS0_INFO(rx_bytes), + XGBE_STATS0_INFO(tx_good_frames), + XGBE_STATS0_INFO(tx_broadcast_frames), + XGBE_STATS0_INFO(tx_multicast_frames), + XGBE_STATS0_INFO(tx_bytes), + XGBE_STATS0_INFO(tx_64byte_frames), + XGBE_STATS0_INFO(tx_65_to_127byte_frames), + XGBE_STATS0_INFO(tx_128_to_255byte_frames), + XGBE_STATS0_INFO(tx_256_to_511byte_frames), + XGBE_STATS0_INFO(tx_512_to_1023byte_frames), + XGBE_STATS0_INFO(tx_1024byte_frames), + XGBE_STATS0_INFO(net_bytes), + XGBE_STATS0_INFO(rx_sof_overruns), + XGBE_STATS0_INFO(rx_mof_overruns), + XGBE_STATS0_INFO(rx_dma_overruns), + /* XGBE module 1 */ + XGBE_STATS1_INFO(rx_good_frames), + XGBE_STATS1_INFO(rx_broadcast_frames), + XGBE_STATS1_INFO(rx_multicast_frames), + XGBE_STATS1_INFO(rx_pause_frames), + XGBE_STATS1_INFO(rx_crc_errors), + XGBE_STATS1_INFO(rx_align_code_errors), + XGBE_STATS1_INFO(rx_oversized_frames), + XGBE_STATS1_INFO(rx_jabber_frames), + XGBE_STATS1_INFO(rx_undersized_frames), + XGBE_STATS1_INFO(rx_fragments), + XGBE_STATS1_INFO(overrun_type4), + XGBE_STATS1_INFO(overrun_type5), + XGBE_STATS1_INFO(rx_bytes), + XGBE_STATS1_INFO(tx_good_frames), + XGBE_STATS1_INFO(tx_broadcast_frames), + XGBE_STATS1_INFO(tx_multicast_frames), + XGBE_STATS1_INFO(tx_pause_frames), + XGBE_STATS1_INFO(tx_deferred_frames), + XGBE_STATS1_INFO(tx_collision_frames), + XGBE_STATS1_INFO(tx_single_coll_frames), + XGBE_STATS1_INFO(tx_mult_coll_frames), + XGBE_STATS1_INFO(tx_excessive_collisions), + XGBE_STATS1_INFO(tx_late_collisions), + XGBE_STATS1_INFO(tx_underrun), + XGBE_STATS1_INFO(tx_carrier_sense_errors), + XGBE_STATS1_INFO(tx_bytes), + XGBE_STATS1_INFO(tx_64byte_frames), + XGBE_STATS1_INFO(tx_65_to_127byte_frames), + XGBE_STATS1_INFO(tx_128_to_255byte_frames), + XGBE_STATS1_INFO(tx_256_to_511byte_frames), + XGBE_STATS1_INFO(tx_512_to_1023byte_frames), + XGBE_STATS1_INFO(tx_1024byte_frames), + XGBE_STATS1_INFO(net_bytes), + XGBE_STATS1_INFO(rx_sof_overruns), + XGBE_STATS1_INFO(rx_mof_overruns), + XGBE_STATS1_INFO(rx_dma_overruns), + /* XGBE module 2 */ + XGBE_STATS2_INFO(rx_good_frames), + XGBE_STATS2_INFO(rx_broadcast_frames), + XGBE_STATS2_INFO(rx_multicast_frames), + XGBE_STATS2_INFO(rx_pause_frames), + XGBE_STATS2_INFO(rx_crc_errors), + XGBE_STATS2_INFO(rx_align_code_errors), + XGBE_STATS2_INFO(rx_oversized_frames), + XGBE_STATS2_INFO(rx_jabber_frames), + XGBE_STATS2_INFO(rx_undersized_frames), + XGBE_STATS2_INFO(rx_fragments), + XGBE_STATS2_INFO(overrun_type4), + XGBE_STATS2_INFO(overrun_type5), + XGBE_STATS2_INFO(rx_bytes), + XGBE_STATS2_INFO(tx_good_frames), + XGBE_STATS2_INFO(tx_broadcast_frames), + XGBE_STATS2_INFO(tx_multicast_frames), + XGBE_STATS2_INFO(tx_pause_frames), + XGBE_STATS2_INFO(tx_deferred_frames), + XGBE_STATS2_INFO(tx_collision_frames), + XGBE_STATS2_INFO(tx_single_coll_frames), + XGBE_STATS2_INFO(tx_mult_coll_frames), + XGBE_STATS2_INFO(tx_excessive_collisions), + XGBE_STATS2_INFO(tx_late_collisions), + XGBE_STATS2_INFO(tx_underrun), + XGBE_STATS2_INFO(tx_carrier_sense_errors), + XGBE_STATS2_INFO(tx_bytes), + XGBE_STATS2_INFO(tx_64byte_frames), + XGBE_STATS2_INFO(tx_65_to_127byte_frames), + XGBE_STATS2_INFO(tx_128_to_255byte_frames), + XGBE_STATS2_INFO(tx_256_to_511byte_frames), + XGBE_STATS2_INFO(tx_512_to_1023byte_frames), + XGBE_STATS2_INFO(tx_1024byte_frames), + XGBE_STATS2_INFO(net_bytes), + XGBE_STATS2_INFO(rx_sof_overruns), + XGBE_STATS2_INFO(rx_mof_overruns), + XGBE_STATS2_INFO(rx_dma_overruns), +}; + +#define for_each_intf(i, priv) \ + list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list) + +#define for_each_sec_slave(slave, priv) \ + list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list) + +#define first_sec_slave(priv) \ + list_first_entry(&priv->secondary_slaves, \ + struct gbe_slave, slave_list) + +static void keystone_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver)); + strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version)); +} + +static u32 keystone_get_msglevel(struct net_device *ndev) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + + return netcp->msg_enable; +} + +static void keystone_set_msglevel(struct net_device *ndev, u32 value) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + + netcp->msg_enable = value; +} + +static void keystone_get_stat_strings(struct net_device *ndev, + uint32_t stringset, uint8_t *data) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct gbe_intf *gbe_intf; + struct gbe_priv *gbe_dev; + int i; + + gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); + if (!gbe_intf) + return; + gbe_dev = gbe_intf->gbe_dev; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < gbe_dev->num_et_stats; i++) { + memcpy(data, gbe_dev->et_stats[i].desc, + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + case ETH_SS_TEST: + break; + } +} + +static int keystone_get_sset_count(struct net_device *ndev, int stringset) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct gbe_intf *gbe_intf; + struct gbe_priv *gbe_dev; + + gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); + if (!gbe_intf) + return -EINVAL; + gbe_dev = gbe_intf->gbe_dev; + + switch (stringset) { + case ETH_SS_TEST: + return 0; + case ETH_SS_STATS: + return gbe_dev->num_et_stats; + default: + return -EINVAL; + } +} + +static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data) +{ + void __iomem *base = NULL; + u32 __iomem *p; + u32 tmp = 0; + int i; + + for (i = 0; i < gbe_dev->num_et_stats; i++) { + base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type]; + p = base + gbe_dev->et_stats[i].offset; + tmp = readl(p); + gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp; + if (data) + data[i] = gbe_dev->hw_stats[i]; + /* write-to-decrement: + * new register value = old register value - write value + */ + writel(tmp, p); + } +} + +static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data) +{ + void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0]; + void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1]; + u64 *hw_stats = &gbe_dev->hw_stats[0]; + void __iomem *base = NULL; + u32 __iomem *p; + u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2); + int i, j, pair; + + for (pair = 0; pair < 2; pair++) { + val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en)); + + if (pair == 0) + val &= ~GBE_STATS_CD_SEL; + else + val |= GBE_STATS_CD_SEL; + + /* make the stat modules visible */ + writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en)); + + for (i = 0; i < pair_size; i++) { + j = pair * pair_size + i; + switch (gbe_dev->et_stats[j].type) { + case GBE_STATSA_MODULE: + case GBE_STATSC_MODULE: + base = gbe_statsa; + break; + case GBE_STATSB_MODULE: + case GBE_STATSD_MODULE: + base = gbe_statsb; + break; + } + + p = base + gbe_dev->et_stats[j].offset; + tmp = readl(p); + hw_stats[j] += tmp; + if (data) + data[j] = hw_stats[j]; + /* write-to-decrement: + * new register value = old register value - write value + */ + writel(tmp, p); + } + } +} + +static void keystone_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, + uint64_t *data) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct gbe_intf *gbe_intf; + struct gbe_priv *gbe_dev; + + gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); + if (!gbe_intf) + return; + + gbe_dev = gbe_intf->gbe_dev; + spin_lock_bh(&gbe_dev->hw_stats_lock); + if (gbe_dev->ss_version == GBE_SS_VERSION_14) + gbe_update_stats_ver14(gbe_dev, data); + else + gbe_update_stats(gbe_dev, data); + spin_unlock_bh(&gbe_dev->hw_stats_lock); +} + +static int keystone_get_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct phy_device *phy = ndev->phydev; + struct gbe_intf *gbe_intf; + int ret; + + if (!phy) + return -EINVAL; + + gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); + if (!gbe_intf) + return -EINVAL; + + if (!gbe_intf->slave) + return -EINVAL; + + ret = phy_ethtool_gset(phy, cmd); + if (!ret) + cmd->port = gbe_intf->slave->phy_port_t; + + return ret; +} + +static int keystone_set_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct phy_device *phy = ndev->phydev; + struct gbe_intf *gbe_intf; + u32 features = cmd->advertising & cmd->supported; + + if (!phy) + return -EINVAL; + + gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); + if (!gbe_intf) + return -EINVAL; + + if (!gbe_intf->slave) + return -EINVAL; + + if (cmd->port != gbe_intf->slave->phy_port_t) { + if ((cmd->port == PORT_TP) && !(features & ADVERTISED_TP)) + return -EINVAL; + + if ((cmd->port == PORT_AUI) && !(features & ADVERTISED_AUI)) + return -EINVAL; + + if ((cmd->port == PORT_BNC) && !(features & ADVERTISED_BNC)) + return -EINVAL; + + if ((cmd->port == PORT_MII) && !(features & ADVERTISED_MII)) + return -EINVAL; + + if ((cmd->port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE)) + return -EINVAL; + } + + gbe_intf->slave->phy_port_t = cmd->port; + return phy_ethtool_sset(phy, cmd); +} + +static const struct ethtool_ops keystone_ethtool_ops = { + .get_drvinfo = keystone_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = keystone_get_msglevel, + .set_msglevel = keystone_set_msglevel, + .get_strings = keystone_get_stat_strings, + .get_sset_count = keystone_get_sset_count, + .get_ethtool_stats = keystone_get_ethtool_stats, + .get_settings = keystone_get_settings, + .set_settings = keystone_set_settings, +}; + +#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \ + ((mac)[2] << 16) | ((mac)[3] << 24)) +#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8)) + +static void gbe_set_slave_mac(struct gbe_slave *slave, + struct gbe_intf *gbe_intf) +{ + struct net_device *ndev = gbe_intf->ndev; + + writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi)); + writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo)); +} + +static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num) +{ + if (priv->host_port == 0) + return slave_num + 1; + + return slave_num; +} + +static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev, + struct net_device *ndev, + struct gbe_slave *slave, + int up) +{ + struct phy_device *phy = slave->phy; + u32 mac_control = 0; + + if (up) { + mac_control = slave->mac_control; + if (phy && (phy->speed == SPEED_1000)) { + mac_control |= MACSL_GIG_MODE; + mac_control &= ~MACSL_XGIG_MODE; + } else if (phy && (phy->speed == SPEED_10000)) { + mac_control |= MACSL_XGIG_MODE; + mac_control &= ~MACSL_GIG_MODE; + } + + writel(mac_control, GBE_REG_ADDR(slave, emac_regs, + mac_control)); + + cpsw_ale_control_set(gbe_dev->ale, slave->port_num, + ALE_PORT_STATE, + ALE_PORT_STATE_FORWARD); + + if (ndev && slave->open && + slave->link_interface != SGMII_LINK_MAC_PHY && + slave->link_interface != XGMII_LINK_MAC_PHY) + netif_carrier_on(ndev); + } else { + writel(mac_control, GBE_REG_ADDR(slave, emac_regs, + mac_control)); + cpsw_ale_control_set(gbe_dev->ale, slave->port_num, + ALE_PORT_STATE, + ALE_PORT_STATE_DISABLE); + if (ndev && + slave->link_interface != SGMII_LINK_MAC_PHY && + slave->link_interface != XGMII_LINK_MAC_PHY) + netif_carrier_off(ndev); + } + + if (phy) + phy_print_status(phy); +} + +static bool gbe_phy_link_status(struct gbe_slave *slave) +{ + return !slave->phy || slave->phy->link; +} + +static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev, + struct gbe_slave *slave, + struct net_device *ndev) +{ + int sp = slave->slave_num; + int phy_link_state, sgmii_link_state = 1, link_state; + + if (!slave->open) + return; + + if (!SLAVE_LINK_IS_XGMII(slave)) { + if (gbe_dev->ss_version == GBE_SS_VERSION_14) + sgmii_link_state = + netcp_sgmii_get_port_link(SGMII_BASE(sp), sp); + else + sgmii_link_state = + netcp_sgmii_get_port_link( + gbe_dev->sgmii_port_regs, sp); + } + + phy_link_state = gbe_phy_link_status(slave); + link_state = phy_link_state & sgmii_link_state; + + if (atomic_xchg(&slave->link_state, link_state) != link_state) + netcp_ethss_link_state_action(gbe_dev, ndev, slave, + link_state); +} + +static void xgbe_adjust_link(struct net_device *ndev) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct gbe_intf *gbe_intf; + + gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp); + if (!gbe_intf) + return; + + netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave, + ndev); +} + +static void gbe_adjust_link(struct net_device *ndev) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct gbe_intf *gbe_intf; + + gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp); + if (!gbe_intf) + return; + + netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave, + ndev); +} + +static void gbe_adjust_link_sec_slaves(struct net_device *ndev) +{ + struct gbe_priv *gbe_dev = netdev_priv(ndev); + struct gbe_slave *slave; + + for_each_sec_slave(slave, gbe_dev) + netcp_ethss_update_link_state(gbe_dev, slave, NULL); +} + +/* Reset EMAC + * Soft reset is set and polled until clear, or until a timeout occurs + */ +static int gbe_port_reset(struct gbe_slave *slave) +{ + u32 i, v; + + /* Set the soft reset bit */ + writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset)); + + /* Wait for the bit to clear */ + for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) { + v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset)); + if ((v & SOFT_RESET_MASK) != SOFT_RESET) + return 0; + } + + /* Timeout on the reset */ + return GMACSL_RET_WARN_RESET_INCOMPLETE; +} + +/* Configure EMAC */ +static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave, + int max_rx_len) +{ + void __iomem *rx_maxlen_reg; + u32 xgmii_mode; + + if (max_rx_len > NETCP_MAX_FRAME_SIZE) + max_rx_len = NETCP_MAX_FRAME_SIZE; + + /* Enable correct MII mode at SS level */ + if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) && + (slave->link_interface >= XGMII_LINK_MAC_PHY)) { + xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control)); + xgmii_mode |= (1 << slave->slave_num); + writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control)); + } + + if (IS_SS_ID_MU(gbe_dev)) + rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen); + else + rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen); + + writel(max_rx_len, rx_maxlen_reg); + writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control)); +} + +static void gbe_slave_stop(struct gbe_intf *intf) +{ + struct gbe_priv *gbe_dev = intf->gbe_dev; + struct gbe_slave *slave = intf->slave; + + gbe_port_reset(slave); + /* Disable forwarding */ + cpsw_ale_control_set(gbe_dev->ale, slave->port_num, + ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); + cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast, + 1 << slave->port_num, 0, 0); + + if (!slave->phy) + return; + + phy_stop(slave->phy); + phy_disconnect(slave->phy); + slave->phy = NULL; +} + +static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave) +{ + void __iomem *sgmii_port_regs; + + sgmii_port_regs = priv->sgmii_port_regs; + if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2)) + sgmii_port_regs = priv->sgmii_port34_regs; + + if (!SLAVE_LINK_IS_XGMII(slave)) { + netcp_sgmii_reset(sgmii_port_regs, slave->slave_num); + netcp_sgmii_config(sgmii_port_regs, slave->slave_num, + slave->link_interface); + } +} + +static int gbe_slave_open(struct gbe_intf *gbe_intf) +{ + struct gbe_priv *priv = gbe_intf->gbe_dev; + struct gbe_slave *slave = gbe_intf->slave; + phy_interface_t phy_mode; + bool has_phy = false; + + void (*hndlr)(struct net_device *) = gbe_adjust_link; + + gbe_sgmii_config(priv, slave); + gbe_port_reset(slave); + gbe_port_config(priv, slave, priv->rx_packet_max); + gbe_set_slave_mac(slave, gbe_intf); + /* enable forwarding */ + cpsw_ale_control_set(priv->ale, slave->port_num, + ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); + cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast, + 1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2); + + if (slave->link_interface == SGMII_LINK_MAC_PHY) { + has_phy = true; + phy_mode = PHY_INTERFACE_MODE_SGMII; + slave->phy_port_t = PORT_MII; + } else if (slave->link_interface == XGMII_LINK_MAC_PHY) { + has_phy = true; + phy_mode = PHY_INTERFACE_MODE_NA; + slave->phy_port_t = PORT_FIBRE; + } + + if (has_phy) { + if (priv->ss_version == XGBE_SS_VERSION_10) + hndlr = xgbe_adjust_link; + + slave->phy = of_phy_connect(gbe_intf->ndev, + slave->phy_node, + hndlr, 0, + phy_mode); + if (!slave->phy) { + dev_err(priv->dev, "phy not found on slave %d\n", + slave->slave_num); + return -ENODEV; + } + dev_dbg(priv->dev, "phy found: id is: 0x%s\n", + dev_name(&slave->phy->dev)); + phy_start(slave->phy); + phy_read_status(slave->phy); + } + return 0; +} + +static void gbe_init_host_port(struct gbe_priv *priv) +{ + int bypass_en = 1; + + /* Host Tx Pri */ + if (IS_SS_ID_NU(priv)) + writel(HOST_TX_PRI_MAP_DEFAULT, + GBE_REG_ADDR(priv, host_port_regs, tx_pri_map)); + + /* Max length register */ + writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs, + rx_maxlen)); + + cpsw_ale_start(priv->ale); + + if (priv->enable_ale) + bypass_en = 0; + + cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en); + + cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1); + + cpsw_ale_control_set(priv->ale, priv->host_port, + ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); + + cpsw_ale_control_set(priv->ale, 0, + ALE_PORT_UNKNOWN_VLAN_MEMBER, + GBE_PORT_MASK(priv->ale_ports)); + + cpsw_ale_control_set(priv->ale, 0, + ALE_PORT_UNKNOWN_MCAST_FLOOD, + GBE_PORT_MASK(priv->ale_ports - 1)); + + cpsw_ale_control_set(priv->ale, 0, + ALE_PORT_UNKNOWN_REG_MCAST_FLOOD, + GBE_PORT_MASK(priv->ale_ports)); + + cpsw_ale_control_set(priv->ale, 0, + ALE_PORT_UNTAGGED_EGRESS, + GBE_PORT_MASK(priv->ale_ports)); +} + +static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr) +{ + struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; + u16 vlan_id; + + cpsw_ale_add_mcast(gbe_dev->ale, addr, + GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0, + ALE_MCAST_FWD_2); + for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) { + cpsw_ale_add_mcast(gbe_dev->ale, addr, + GBE_PORT_MASK(gbe_dev->ale_ports), + ALE_VLAN, vlan_id, ALE_MCAST_FWD_2); + } +} + +static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr) +{ + struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; + u16 vlan_id; + + cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0); + + for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) + cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, + ALE_VLAN, vlan_id); +} + +static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr) +{ + struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; + u16 vlan_id; + + cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0); + + for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) { + cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id); + } +} + +static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr) +{ + struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; + u16 vlan_id; + + cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0); + + for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) { + cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, + ALE_VLAN, vlan_id); + } +} + +static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr) +{ + struct gbe_intf *gbe_intf = intf_priv; + struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; + + dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n", + naddr->addr, naddr->type); + + switch (naddr->type) { + case ADDR_MCAST: + case ADDR_BCAST: + gbe_add_mcast_addr(gbe_intf, naddr->addr); + break; + case ADDR_UCAST: + case ADDR_DEV: + gbe_add_ucast_addr(gbe_intf, naddr->addr); + break; + case ADDR_ANY: + /* nothing to do for promiscuous */ + default: + break; + } + + return 0; +} + +static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr) +{ + struct gbe_intf *gbe_intf = intf_priv; + struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; + + dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n", + naddr->addr, naddr->type); + + switch (naddr->type) { + case ADDR_MCAST: + case ADDR_BCAST: + gbe_del_mcast_addr(gbe_intf, naddr->addr); + break; + case ADDR_UCAST: + case ADDR_DEV: + gbe_del_ucast_addr(gbe_intf, naddr->addr); + break; + case ADDR_ANY: + /* nothing to do for promiscuous */ + default: + break; + } + + return 0; +} + +static int gbe_add_vid(void *intf_priv, int vid) +{ + struct gbe_intf *gbe_intf = intf_priv; + struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; + + set_bit(vid, gbe_intf->active_vlans); + + cpsw_ale_add_vlan(gbe_dev->ale, vid, + GBE_PORT_MASK(gbe_dev->ale_ports), + GBE_MASK_NO_PORTS, + GBE_PORT_MASK(gbe_dev->ale_ports), + GBE_PORT_MASK(gbe_dev->ale_ports - 1)); + + return 0; +} + +static int gbe_del_vid(void *intf_priv, int vid) +{ + struct gbe_intf *gbe_intf = intf_priv; + struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; + + cpsw_ale_del_vlan(gbe_dev->ale, vid, 0); + clear_bit(vid, gbe_intf->active_vlans); + return 0; +} + +static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd) +{ + struct gbe_intf *gbe_intf = intf_priv; + struct phy_device *phy = gbe_intf->slave->phy; + int ret = -EOPNOTSUPP; + + if (phy) + ret = phy_mii_ioctl(phy, req, cmd); + + return ret; +} + +static void netcp_ethss_timer(unsigned long arg) +{ + struct gbe_priv *gbe_dev = (struct gbe_priv *)arg; + struct gbe_intf *gbe_intf; + struct gbe_slave *slave; + + /* Check & update SGMII link state of interfaces */ + for_each_intf(gbe_intf, gbe_dev) { + if (!gbe_intf->slave->open) + continue; + netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave, + gbe_intf->ndev); + } + + /* Check & update SGMII link state of secondary ports */ + for_each_sec_slave(slave, gbe_dev) { + netcp_ethss_update_link_state(gbe_dev, slave, NULL); + } + + spin_lock_bh(&gbe_dev->hw_stats_lock); + + if (gbe_dev->ss_version == GBE_SS_VERSION_14) + gbe_update_stats_ver14(gbe_dev, NULL); + else + gbe_update_stats(gbe_dev, NULL); + + spin_unlock_bh(&gbe_dev->hw_stats_lock); + + gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL; + add_timer(&gbe_dev->timer); +} + +static int gbe_tx_hook(int order, void *data, struct netcp_packet *p_info) +{ + struct gbe_intf *gbe_intf = data; + + p_info->tx_pipe = &gbe_intf->tx_pipe; + return 0; +} + +static int gbe_open(void *intf_priv, struct net_device *ndev) +{ + struct gbe_intf *gbe_intf = intf_priv; + struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; + struct netcp_intf *netcp = netdev_priv(ndev); + struct gbe_slave *slave = gbe_intf->slave; + int port_num = slave->port_num; + u32 reg; + int ret; + + reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver)); + dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n", + GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg), + GBE_RTL_VERSION(reg), GBE_IDENT(reg)); + + /* For 10G and on NetCP 1.5, use directed to port */ + if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev)) + gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO; + + if (gbe_dev->enable_ale) + gbe_intf->tx_pipe.switch_to_port = 0; + else + gbe_intf->tx_pipe.switch_to_port = port_num; + + dev_dbg(gbe_dev->dev, + "opened TX channel %s: %p with to port %d, flags %d\n", + gbe_intf->tx_pipe.dma_chan_name, + gbe_intf->tx_pipe.dma_channel, + gbe_intf->tx_pipe.switch_to_port, + gbe_intf->tx_pipe.flags); + + gbe_slave_stop(gbe_intf); + + /* disable priority elevation and enable statistics on all ports */ + writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype)); + + /* Control register */ + writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control)); + + /* All statistics enabled and STAT AB visible by default */ + writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs, + stat_port_en)); + + ret = gbe_slave_open(gbe_intf); + if (ret) + goto fail; + + netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook, + gbe_intf); + + slave->open = true; + netcp_ethss_update_link_state(gbe_dev, slave, ndev); + return 0; + +fail: + gbe_slave_stop(gbe_intf); + return ret; +} + +static int gbe_close(void *intf_priv, struct net_device *ndev) +{ + struct gbe_intf *gbe_intf = intf_priv; + struct netcp_intf *netcp = netdev_priv(ndev); + + gbe_slave_stop(gbe_intf); + netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook, + gbe_intf); + + gbe_intf->slave->open = false; + atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID); + return 0; +} + +static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, + struct device_node *node) +{ + int port_reg_num; + u32 port_reg_ofs, emac_reg_ofs; + u32 port_reg_blk_sz, emac_reg_blk_sz; + + if (of_property_read_u32(node, "slave-port", &slave->slave_num)) { + dev_err(gbe_dev->dev, "missing slave-port parameter\n"); + return -EINVAL; + } + + if (of_property_read_u32(node, "link-interface", + &slave->link_interface)) { + dev_warn(gbe_dev->dev, + "missing link-interface value defaulting to 1G mac-phy link\n"); + slave->link_interface = SGMII_LINK_MAC_PHY; + } + + slave->open = false; + slave->phy_node = of_parse_phandle(node, "phy-handle", 0); + slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num); + + if (slave->link_interface >= XGMII_LINK_MAC_PHY) + slave->mac_control = GBE_DEF_10G_MAC_CONTROL; + else + slave->mac_control = GBE_DEF_1G_MAC_CONTROL; + + /* Emac regs memmap are contiguous but port regs are not */ + port_reg_num = slave->slave_num; + if (gbe_dev->ss_version == GBE_SS_VERSION_14) { + if (slave->slave_num > 1) { + port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET; + port_reg_num -= 2; + } else { + port_reg_ofs = GBE13_SLAVE_PORT_OFFSET; + } + emac_reg_ofs = GBE13_EMAC_OFFSET; + port_reg_blk_sz = 0x30; + emac_reg_blk_sz = 0x40; + } else if (IS_SS_ID_MU(gbe_dev)) { + port_reg_ofs = GBENU_SLAVE_PORT_OFFSET; + emac_reg_ofs = GBENU_EMAC_OFFSET; + port_reg_blk_sz = 0x1000; + emac_reg_blk_sz = 0x1000; + } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) { + port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET; + emac_reg_ofs = XGBE10_EMAC_OFFSET; + port_reg_blk_sz = 0x30; + emac_reg_blk_sz = 0x40; + } else { + dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n", + gbe_dev->ss_version); + return -EINVAL; + } + + slave->port_regs = gbe_dev->switch_regs + port_reg_ofs + + (port_reg_blk_sz * port_reg_num); + slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs + + (emac_reg_blk_sz * slave->slave_num); + + if (gbe_dev->ss_version == GBE_SS_VERSION_14) { + /* Initialize slave port register offsets */ + GBE_SET_REG_OFS(slave, port_regs, port_vlan); + GBE_SET_REG_OFS(slave, port_regs, tx_pri_map); + GBE_SET_REG_OFS(slave, port_regs, sa_lo); + GBE_SET_REG_OFS(slave, port_regs, sa_hi); + GBE_SET_REG_OFS(slave, port_regs, ts_ctl); + GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype); + GBE_SET_REG_OFS(slave, port_regs, ts_vlan); + GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2); + GBE_SET_REG_OFS(slave, port_regs, ts_ctl2); + + /* Initialize EMAC register offsets */ + GBE_SET_REG_OFS(slave, emac_regs, mac_control); + GBE_SET_REG_OFS(slave, emac_regs, soft_reset); + GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen); + + } else if (IS_SS_ID_MU(gbe_dev)) { + /* Initialize slave port register offsets */ + GBENU_SET_REG_OFS(slave, port_regs, port_vlan); + GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map); + GBENU_SET_REG_OFS(slave, port_regs, sa_lo); + GBENU_SET_REG_OFS(slave, port_regs, sa_hi); + GBENU_SET_REG_OFS(slave, port_regs, ts_ctl); + GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype); + GBENU_SET_REG_OFS(slave, port_regs, ts_vlan); + GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2); + GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2); + GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen); + + /* Initialize EMAC register offsets */ + GBENU_SET_REG_OFS(slave, emac_regs, mac_control); + GBENU_SET_REG_OFS(slave, emac_regs, soft_reset); + + } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) { + /* Initialize slave port register offsets */ + XGBE_SET_REG_OFS(slave, port_regs, port_vlan); + XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map); + XGBE_SET_REG_OFS(slave, port_regs, sa_lo); + XGBE_SET_REG_OFS(slave, port_regs, sa_hi); + XGBE_SET_REG_OFS(slave, port_regs, ts_ctl); + XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype); + XGBE_SET_REG_OFS(slave, port_regs, ts_vlan); + XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2); + XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2); + + /* Initialize EMAC register offsets */ + XGBE_SET_REG_OFS(slave, emac_regs, mac_control); + XGBE_SET_REG_OFS(slave, emac_regs, soft_reset); + XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen); + } + + atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID); + return 0; +} + +static void init_secondary_ports(struct gbe_priv *gbe_dev, + struct device_node *node) +{ + struct device *dev = gbe_dev->dev; + phy_interface_t phy_mode; + struct gbe_priv **priv; + struct device_node *port; + struct gbe_slave *slave; + bool mac_phy_link = false; + + for_each_child_of_node(node, port) { + slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL); + if (!slave) { + dev_err(dev, + "memomry alloc failed for secondary port(%s), skipping...\n", + port->name); + continue; + } + + if (init_slave(gbe_dev, slave, port)) { + dev_err(dev, + "Failed to initialize secondary port(%s), skipping...\n", + port->name); + devm_kfree(dev, slave); + continue; + } + + gbe_sgmii_config(gbe_dev, slave); + gbe_port_reset(slave); + gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max); + list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves); + gbe_dev->num_slaves++; + if ((slave->link_interface == SGMII_LINK_MAC_PHY) || + (slave->link_interface == XGMII_LINK_MAC_PHY)) + mac_phy_link = true; + + slave->open = true; + if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) + break; + } + + /* of_phy_connect() is needed only for MAC-PHY interface */ + if (!mac_phy_link) + return; + + /* Allocate dummy netdev device for attaching to phy device */ + gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy", + NET_NAME_UNKNOWN, ether_setup); + if (!gbe_dev->dummy_ndev) { + dev_err(dev, + "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n"); + return; + } + priv = netdev_priv(gbe_dev->dummy_ndev); + *priv = gbe_dev; + + if (slave->link_interface == SGMII_LINK_MAC_PHY) { + phy_mode = PHY_INTERFACE_MODE_SGMII; + slave->phy_port_t = PORT_MII; + } else { + phy_mode = PHY_INTERFACE_MODE_NA; + slave->phy_port_t = PORT_FIBRE; + } + + for_each_sec_slave(slave, gbe_dev) { + if ((slave->link_interface != SGMII_LINK_MAC_PHY) && + (slave->link_interface != XGMII_LINK_MAC_PHY)) + continue; + slave->phy = + of_phy_connect(gbe_dev->dummy_ndev, + slave->phy_node, + gbe_adjust_link_sec_slaves, + 0, phy_mode); + if (!slave->phy) { + dev_err(dev, "phy not found for slave %d\n", + slave->slave_num); + slave->phy = NULL; + } else { + dev_dbg(dev, "phy found: id is: 0x%s\n", + dev_name(&slave->phy->dev)); + phy_start(slave->phy); + phy_read_status(slave->phy); + } + } +} + +static void free_secondary_ports(struct gbe_priv *gbe_dev) +{ + struct gbe_slave *slave; + + for (;;) { + slave = first_sec_slave(gbe_dev); + if (!slave) + break; + if (slave->phy) + phy_disconnect(slave->phy); + list_del(&slave->slave_list); + } + if (gbe_dev->dummy_ndev) + free_netdev(gbe_dev->dummy_ndev); +} + +static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, + struct device_node *node) +{ + struct resource res; + void __iomem *regs; + int ret, i; + + ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res); + if (ret) { + dev_err(gbe_dev->dev, + "Can't xlate xgbe of node(%s) ss address at %d\n", + node->name, XGBE_SS_REG_INDEX); + return ret; + } + + regs = devm_ioremap_resource(gbe_dev->dev, &res); + if (IS_ERR(regs)) { + dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n"); + return PTR_ERR(regs); + } + gbe_dev->ss_regs = regs; + + ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res); + if (ret) { + dev_err(gbe_dev->dev, + "Can't xlate xgbe of node(%s) sm address at %d\n", + node->name, XGBE_SM_REG_INDEX); + return ret; + } + + regs = devm_ioremap_resource(gbe_dev->dev, &res); + if (IS_ERR(regs)) { + dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n"); + return PTR_ERR(regs); + } + gbe_dev->switch_regs = regs; + + ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res); + if (ret) { + dev_err(gbe_dev->dev, + "Can't xlate xgbe serdes of node(%s) address at %d\n", + node->name, XGBE_SERDES_REG_INDEX); + return ret; + } + + regs = devm_ioremap_resource(gbe_dev->dev, &res); + if (IS_ERR(regs)) { + dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n"); + return PTR_ERR(regs); + } + gbe_dev->xgbe_serdes_regs = regs; + + gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, + XGBE10_NUM_STAT_ENTRIES * + (gbe_dev->max_num_ports) * sizeof(u64), + GFP_KERNEL); + if (!gbe_dev->hw_stats) { + dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); + return -ENOMEM; + } + + gbe_dev->ss_version = XGBE_SS_VERSION_10; + gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + + XGBE10_SGMII_MODULE_OFFSET; + gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET; + + for (i = 0; i < gbe_dev->max_num_ports; i++) + gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs + + XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i); + + gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET; + gbe_dev->ale_ports = gbe_dev->max_num_ports; + gbe_dev->host_port = XGBE10_HOST_PORT_NUM; + gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES; + gbe_dev->et_stats = xgbe10_et_stats; + gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats); + gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1; + + /* Subsystem registers */ + XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver); + XGBE_SET_REG_OFS(gbe_dev, ss_regs, control); + + /* Switch module registers */ + XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver); + XGBE_SET_REG_OFS(gbe_dev, switch_regs, control); + XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype); + XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en); + XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control); + + /* Host port registers */ + XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan); + XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map); + XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen); + return 0; +} + +static int get_gbe_resource_version(struct gbe_priv *gbe_dev, + struct device_node *node) +{ + struct resource res; + void __iomem *regs; + int ret; + + ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res); + if (ret) { + dev_err(gbe_dev->dev, + "Can't translate of node(%s) of gbe ss address at %d\n", + node->name, GBE_SS_REG_INDEX); + return ret; + } + + regs = devm_ioremap_resource(gbe_dev->dev, &res); + if (IS_ERR(regs)) { + dev_err(gbe_dev->dev, "Failed to map gbe register base\n"); + return PTR_ERR(regs); + } + gbe_dev->ss_regs = regs; + gbe_dev->ss_version = readl(gbe_dev->ss_regs); + return 0; +} + +static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev, + struct device_node *node) +{ + struct resource res; + void __iomem *regs; + int i, ret; + + ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res); + if (ret) { + dev_err(gbe_dev->dev, + "Can't translate of gbe node(%s) address at index %d\n", + node->name, GBE_SGMII34_REG_INDEX); + return ret; + } + + regs = devm_ioremap_resource(gbe_dev->dev, &res); + if (IS_ERR(regs)) { + dev_err(gbe_dev->dev, + "Failed to map gbe sgmii port34 register base\n"); + return PTR_ERR(regs); + } + gbe_dev->sgmii_port34_regs = regs; + + ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res); + if (ret) { + dev_err(gbe_dev->dev, + "Can't translate of gbe node(%s) address at index %d\n", + node->name, GBE_SM_REG_INDEX); + return ret; + } + + regs = devm_ioremap_resource(gbe_dev->dev, &res); + if (IS_ERR(regs)) { + dev_err(gbe_dev->dev, + "Failed to map gbe switch module register base\n"); + return PTR_ERR(regs); + } + gbe_dev->switch_regs = regs; + + gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, + GBE13_NUM_HW_STAT_ENTRIES * + gbe_dev->max_num_slaves * sizeof(u64), + GFP_KERNEL); + if (!gbe_dev->hw_stats) { + dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); + return -ENOMEM; + } + + gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET; + gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET; + + for (i = 0; i < gbe_dev->max_num_slaves; i++) { + gbe_dev->hw_stats_regs[i] = + gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET + + (GBE_HW_STATS_REG_MAP_SZ * i); + } + + gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET; + gbe_dev->ale_ports = gbe_dev->max_num_ports; + gbe_dev->host_port = GBE13_HOST_PORT_NUM; + gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES; + gbe_dev->et_stats = gbe13_et_stats; + gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats); + gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL; + + /* Subsystem registers */ + GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver); + + /* Switch module registers */ + GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver); + GBE_SET_REG_OFS(gbe_dev, switch_regs, control); + GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset); + GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en); + GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype); + GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control); + + /* Host port registers */ + GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan); + GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen); + return 0; +} + +static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, + struct device_node *node) +{ + struct resource res; + void __iomem *regs; + int i, ret; + + gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, + GBENU_NUM_HW_STAT_ENTRIES * + (gbe_dev->max_num_ports) * sizeof(u64), + GFP_KERNEL); + if (!gbe_dev->hw_stats) { + dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); + return -ENOMEM; + } + + ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res); + if (ret) { + dev_err(gbe_dev->dev, + "Can't translate of gbenu node(%s) addr at index %d\n", + node->name, GBENU_SM_REG_INDEX); + return ret; + } + + regs = devm_ioremap_resource(gbe_dev->dev, &res); + if (IS_ERR(regs)) { + dev_err(gbe_dev->dev, + "Failed to map gbenu switch module register base\n"); + return PTR_ERR(regs); + } + gbe_dev->switch_regs = regs; + + gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET; + gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET; + + for (i = 0; i < (gbe_dev->max_num_ports); i++) + gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs + + GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i); + + gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET; + gbe_dev->ale_ports = gbe_dev->max_num_ports; + gbe_dev->host_port = GBENU_HOST_PORT_NUM; + gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES; + gbe_dev->et_stats = gbenu_et_stats; + gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1; + + if (IS_SS_ID_NU(gbe_dev)) + gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + + (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE); + else + gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + + GBENU_ET_STATS_PORT_SIZE; + + /* Subsystem registers */ + GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver); + + /* Switch module registers */ + GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver); + GBENU_SET_REG_OFS(gbe_dev, switch_regs, control); + GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en); + GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype); + + /* Host port registers */ + GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan); + GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen); + + /* For NU only. 2U does not need tx_pri_map. + * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads + * while 2U has only 1 such thread + */ + GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map); + return 0; +} + +static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, + struct device_node *node, void **inst_priv) +{ + struct device_node *interfaces, *interface; + struct device_node *secondary_ports; + struct cpsw_ale_params ale_params; + struct gbe_priv *gbe_dev; + u32 slave_num; + int ret = 0; + + if (!node) { + dev_err(dev, "device tree info unavailable\n"); + return -ENODEV; + } + + gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL); + if (!gbe_dev) + return -ENOMEM; + + if (of_device_is_compatible(node, "ti,netcp-gbe-5") || + of_device_is_compatible(node, "ti,netcp-gbe")) { + gbe_dev->max_num_slaves = 4; + } else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) { + gbe_dev->max_num_slaves = 8; + } else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) { + gbe_dev->max_num_slaves = 1; + } else if (of_device_is_compatible(node, "ti,netcp-xgbe")) { + gbe_dev->max_num_slaves = 2; + } else { + dev_err(dev, "device tree node for unknown device\n"); + return -EINVAL; + } + gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1; + + gbe_dev->dev = dev; + gbe_dev->netcp_device = netcp_device; + gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE; + + /* init the hw stats lock */ + spin_lock_init(&gbe_dev->hw_stats_lock); + + if (of_find_property(node, "enable-ale", NULL)) { + gbe_dev->enable_ale = true; + dev_info(dev, "ALE enabled\n"); + } else { + gbe_dev->enable_ale = false; + dev_dbg(dev, "ALE bypass enabled*\n"); + } + + ret = of_property_read_u32(node, "tx-queue", + &gbe_dev->tx_queue_id); + if (ret < 0) { + dev_err(dev, "missing tx_queue parameter\n"); + gbe_dev->tx_queue_id = GBE_TX_QUEUE; + } + + ret = of_property_read_string(node, "tx-channel", + &gbe_dev->dma_chan_name); + if (ret < 0) { + dev_err(dev, "missing \"tx-channel\" parameter\n"); + ret = -ENODEV; + goto quit; + } + + if (!strcmp(node->name, "gbe")) { + ret = get_gbe_resource_version(gbe_dev, node); + if (ret) + goto quit; + + dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version); + + if (gbe_dev->ss_version == GBE_SS_VERSION_14) + ret = set_gbe_ethss14_priv(gbe_dev, node); + else if (IS_SS_ID_MU(gbe_dev)) + ret = set_gbenu_ethss_priv(gbe_dev, node); + else + ret = -ENODEV; + + if (ret) + goto quit; + } else if (!strcmp(node->name, "xgbe")) { + ret = set_xgbe_ethss10_priv(gbe_dev, node); + if (ret) + goto quit; + ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs, + gbe_dev->ss_regs); + if (ret) + goto quit; + } else { + dev_err(dev, "unknown GBE node(%s)\n", node->name); + ret = -ENODEV; + goto quit; + } + + interfaces = of_get_child_by_name(node, "interfaces"); + if (!interfaces) + dev_err(dev, "could not find interfaces\n"); + + ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device, + gbe_dev->dma_chan_name, gbe_dev->tx_queue_id); + if (ret) + goto quit; + + ret = netcp_txpipe_open(&gbe_dev->tx_pipe); + if (ret) + goto quit; + + /* Create network interfaces */ + INIT_LIST_HEAD(&gbe_dev->gbe_intf_head); + for_each_child_of_node(interfaces, interface) { + ret = of_property_read_u32(interface, "slave-port", &slave_num); + if (ret) { + dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n", + interface->name); + continue; + } + gbe_dev->num_slaves++; + if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) + break; + } + + if (!gbe_dev->num_slaves) + dev_warn(dev, "No network interface configured\n"); + + /* Initialize Secondary slave ports */ + secondary_ports = of_get_child_by_name(node, "secondary-slave-ports"); + INIT_LIST_HEAD(&gbe_dev->secondary_slaves); + if (secondary_ports && (gbe_dev->num_slaves < gbe_dev->max_num_slaves)) + init_secondary_ports(gbe_dev, secondary_ports); + of_node_put(secondary_ports); + + if (!gbe_dev->num_slaves) { + dev_err(dev, "No network interface or secondary ports configured\n"); + ret = -ENODEV; + goto quit; + } + + memset(&ale_params, 0, sizeof(ale_params)); + ale_params.dev = gbe_dev->dev; + ale_params.ale_regs = gbe_dev->ale_reg; + ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT; + ale_params.ale_entries = gbe_dev->ale_entries; + ale_params.ale_ports = gbe_dev->ale_ports; + + gbe_dev->ale = cpsw_ale_create(&ale_params); + if (!gbe_dev->ale) { + dev_err(gbe_dev->dev, "error initializing ale engine\n"); + ret = -ENODEV; + goto quit; + } else { + dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n"); + } + + /* initialize host port */ + gbe_init_host_port(gbe_dev); + + init_timer(&gbe_dev->timer); + gbe_dev->timer.data = (unsigned long)gbe_dev; + gbe_dev->timer.function = netcp_ethss_timer; + gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL; + add_timer(&gbe_dev->timer); + *inst_priv = gbe_dev; + return 0; + +quit: + if (gbe_dev->hw_stats) + devm_kfree(dev, gbe_dev->hw_stats); + cpsw_ale_destroy(gbe_dev->ale); + if (gbe_dev->ss_regs) + devm_iounmap(dev, gbe_dev->ss_regs); + of_node_put(interfaces); + devm_kfree(dev, gbe_dev); + return ret; +} + +static int gbe_attach(void *inst_priv, struct net_device *ndev, + struct device_node *node, void **intf_priv) +{ + struct gbe_priv *gbe_dev = inst_priv; + struct gbe_intf *gbe_intf; + int ret; + + if (!node) { + dev_err(gbe_dev->dev, "interface node not available\n"); + return -ENODEV; + } + + gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL); + if (!gbe_intf) + return -ENOMEM; + + gbe_intf->ndev = ndev; + gbe_intf->dev = gbe_dev->dev; + gbe_intf->gbe_dev = gbe_dev; + + gbe_intf->slave = devm_kzalloc(gbe_dev->dev, + sizeof(*gbe_intf->slave), + GFP_KERNEL); + if (!gbe_intf->slave) { + ret = -ENOMEM; + goto fail; + } + + if (init_slave(gbe_dev, gbe_intf->slave, node)) { + ret = -ENODEV; + goto fail; + } + + gbe_intf->tx_pipe = gbe_dev->tx_pipe; + ndev->ethtool_ops = &keystone_ethtool_ops; + list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head); + *intf_priv = gbe_intf; + return 0; + +fail: + if (gbe_intf->slave) + devm_kfree(gbe_dev->dev, gbe_intf->slave); + if (gbe_intf) + devm_kfree(gbe_dev->dev, gbe_intf); + return ret; +} + +static int gbe_release(void *intf_priv) +{ + struct gbe_intf *gbe_intf = intf_priv; + + gbe_intf->ndev->ethtool_ops = NULL; + list_del(&gbe_intf->gbe_intf_list); + devm_kfree(gbe_intf->dev, gbe_intf->slave); + devm_kfree(gbe_intf->dev, gbe_intf); + return 0; +} + +static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv) +{ + struct gbe_priv *gbe_dev = inst_priv; + + del_timer_sync(&gbe_dev->timer); + cpsw_ale_stop(gbe_dev->ale); + cpsw_ale_destroy(gbe_dev->ale); + netcp_txpipe_close(&gbe_dev->tx_pipe); + free_secondary_ports(gbe_dev); + + if (!list_empty(&gbe_dev->gbe_intf_head)) + dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n"); + + devm_kfree(gbe_dev->dev, gbe_dev->hw_stats); + devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs); + memset(gbe_dev, 0x00, sizeof(*gbe_dev)); + devm_kfree(gbe_dev->dev, gbe_dev); + return 0; +} + +static struct netcp_module gbe_module = { + .name = GBE_MODULE_NAME, + .owner = THIS_MODULE, + .primary = true, + .probe = gbe_probe, + .open = gbe_open, + .close = gbe_close, + .remove = gbe_remove, + .attach = gbe_attach, + .release = gbe_release, + .add_addr = gbe_add_addr, + .del_addr = gbe_del_addr, + .add_vid = gbe_add_vid, + .del_vid = gbe_del_vid, + .ioctl = gbe_ioctl, +}; + +static struct netcp_module xgbe_module = { + .name = XGBE_MODULE_NAME, + .owner = THIS_MODULE, + .primary = true, + .probe = gbe_probe, + .open = gbe_open, + .close = gbe_close, + .remove = gbe_remove, + .attach = gbe_attach, + .release = gbe_release, + .add_addr = gbe_add_addr, + .del_addr = gbe_del_addr, + .add_vid = gbe_add_vid, + .del_vid = gbe_del_vid, + .ioctl = gbe_ioctl, +}; + +static int __init keystone_gbe_init(void) +{ + int ret; + + ret = netcp_register_module(&gbe_module); + if (ret) + return ret; + + ret = netcp_register_module(&xgbe_module); + if (ret) + return ret; + + return 0; +} +module_init(keystone_gbe_init); + +static void __exit keystone_gbe_exit(void) +{ + netcp_unregister_module(&gbe_module); + netcp_unregister_module(&xgbe_module); +} +module_exit(keystone_gbe_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs"); +MODULE_AUTHOR("Sandeep Nair + * Sandeep Paulraj + * Wingman Kwok + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "netcp.h" + +#define SGMII_REG_STATUS_LOCK BIT(4) +#define SGMII_REG_STATUS_LINK BIT(0) +#define SGMII_REG_STATUS_AUTONEG BIT(2) +#define SGMII_REG_CONTROL_AUTONEG BIT(0) + +#define SGMII23_OFFSET(x) ((x - 2) * 0x100) +#define SGMII_OFFSET(x) ((x <= 1) ? (x * 0x100) : (SGMII23_OFFSET(x))) + +/* SGMII registers */ +#define SGMII_SRESET_REG(x) (SGMII_OFFSET(x) + 0x004) +#define SGMII_CTL_REG(x) (SGMII_OFFSET(x) + 0x010) +#define SGMII_STATUS_REG(x) (SGMII_OFFSET(x) + 0x014) +#define SGMII_MRADV_REG(x) (SGMII_OFFSET(x) + 0x018) + +static void sgmii_write_reg(void __iomem *base, int reg, u32 val) +{ + writel(val, base + reg); +} + +static u32 sgmii_read_reg(void __iomem *base, int reg) +{ + return readl(base + reg); +} + +static void sgmii_write_reg_bit(void __iomem *base, int reg, u32 val) +{ + writel((readl(base + reg) | val), base + reg); +} + +/* port is 0 based */ +int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port) +{ + /* Soft reset */ + sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port), 0x1); + while (sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) != 0x0) + ; + return 0; +} + +int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port) +{ + u32 status = 0, link = 0; + + status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port)); + if ((status & SGMII_REG_STATUS_LINK) != 0) + link = 1; + return link; +} + +int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface) +{ + unsigned int i, status, mask; + u32 mr_adv_ability; + u32 control; + + switch (interface) { + case SGMII_LINK_MAC_MAC_AUTONEG: + mr_adv_ability = 0x9801; + control = 0x21; + break; + + case SGMII_LINK_MAC_PHY: + case SGMII_LINK_MAC_PHY_NO_MDIO: + mr_adv_ability = 1; + control = 1; + break; + + case SGMII_LINK_MAC_MAC_FORCED: + mr_adv_ability = 0x9801; + control = 0x20; + break; + + case SGMII_LINK_MAC_FIBER: + mr_adv_ability = 0x20; + control = 0x1; + break; + + default: + WARN_ONCE(1, "Invalid sgmii interface: %d\n", interface); + return -EINVAL; + } + + sgmii_write_reg(sgmii_ofs, SGMII_CTL_REG(port), 0); + + /* Wait for the SerDes pll to lock */ + for (i = 0; i < 1000; i++) { + usleep_range(1000, 2000); + status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port)); + if ((status & SGMII_REG_STATUS_LOCK) != 0) + break; + } + + if ((status & SGMII_REG_STATUS_LOCK) == 0) + pr_err("serdes PLL not locked\n"); + + sgmii_write_reg(sgmii_ofs, SGMII_MRADV_REG(port), mr_adv_ability); + sgmii_write_reg(sgmii_ofs, SGMII_CTL_REG(port), control); + + mask = SGMII_REG_STATUS_LINK; + if (control & SGMII_REG_CONTROL_AUTONEG) + mask |= SGMII_REG_STATUS_AUTONEG; + + for (i = 0; i < 1000; i++) { + usleep_range(200, 500); + status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port)); + if ((status & mask) == mask) + break; + } + + return 0; +} diff --git a/drivers/net/ethernet/ti/netcp_xgbepcsr.c b/drivers/net/ethernet/ti/netcp_xgbepcsr.c new file mode 100644 index 000000000..33571acc5 --- /dev/null +++ b/drivers/net/ethernet/ti/netcp_xgbepcsr.c @@ -0,0 +1,501 @@ +/* + * XGE PCSR module initialisation + * + * Copyright (C) 2014 Texas Instruments Incorporated + * Authors: Sandeep Nair + * WingMan Kwok + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "netcp.h" + +/* XGBE registers */ +#define XGBE_CTRL_OFFSET 0x0c +#define XGBE_SGMII_1_OFFSET 0x0114 +#define XGBE_SGMII_2_OFFSET 0x0214 + +/* PCS-R registers */ +#define PCSR_CPU_CTRL_OFFSET 0x1fd0 +#define POR_EN BIT(29) + +#define reg_rmw(addr, value, mask) \ + writel(((readl(addr) & (~(mask))) | \ + (value & (mask))), (addr)) + +/* bit mask of width w at offset s */ +#define MASK_WID_SH(w, s) (((1 << w) - 1) << s) + +/* shift value v to offset s */ +#define VAL_SH(v, s) (v << s) + +#define PHY_A(serdes) 0 + +struct serdes_cfg { + u32 ofs; + u32 val; + u32 mask; +}; + +static struct serdes_cfg cfg_phyb_1p25g_156p25mhz_cmu0[] = { + {0x0000, 0x00800002, 0x00ff00ff}, + {0x0014, 0x00003838, 0x0000ffff}, + {0x0060, 0x1c44e438, 0xffffffff}, + {0x0064, 0x00c18400, 0x00ffffff}, + {0x0068, 0x17078200, 0xffffff00}, + {0x006c, 0x00000014, 0x000000ff}, + {0x0078, 0x0000c000, 0x0000ff00}, + {0x0000, 0x00000003, 0x000000ff}, +}; + +static struct serdes_cfg cfg_phyb_10p3125g_156p25mhz_cmu1[] = { + {0x0c00, 0x00030002, 0x00ff00ff}, + {0x0c14, 0x00005252, 0x0000ffff}, + {0x0c28, 0x80000000, 0xff000000}, + {0x0c2c, 0x000000f6, 0x000000ff}, + {0x0c3c, 0x04000405, 0xff00ffff}, + {0x0c40, 0xc0800000, 0xffff0000}, + {0x0c44, 0x5a202062, 0xffffffff}, + {0x0c48, 0x40040424, 0xffffffff}, + {0x0c4c, 0x00004002, 0x0000ffff}, + {0x0c50, 0x19001c00, 0xff00ff00}, + {0x0c54, 0x00002100, 0x0000ff00}, + {0x0c58, 0x00000060, 0x000000ff}, + {0x0c60, 0x80131e7c, 0xffffffff}, + {0x0c64, 0x8400cb02, 0xff00ffff}, + {0x0c68, 0x17078200, 0xffffff00}, + {0x0c6c, 0x00000016, 0x000000ff}, + {0x0c74, 0x00000400, 0x0000ff00}, + {0x0c78, 0x0000c000, 0x0000ff00}, + {0x0c00, 0x00000003, 0x000000ff}, +}; + +static struct serdes_cfg cfg_phyb_10p3125g_16bit_lane[] = { + {0x0204, 0x00000080, 0x000000ff}, + {0x0208, 0x0000920d, 0x0000ffff}, + {0x0204, 0xfc000000, 0xff000000}, + {0x0208, 0x00009104, 0x0000ffff}, + {0x0210, 0x1a000000, 0xff000000}, + {0x0214, 0x00006b58, 0x00ffffff}, + {0x0218, 0x75800084, 0xffff00ff}, + {0x022c, 0x00300000, 0x00ff0000}, + {0x0230, 0x00003800, 0x0000ff00}, + {0x024c, 0x008f0000, 0x00ff0000}, + {0x0250, 0x30000000, 0xff000000}, + {0x0260, 0x00000002, 0x000000ff}, + {0x0264, 0x00000057, 0x000000ff}, + {0x0268, 0x00575700, 0x00ffff00}, + {0x0278, 0xff000000, 0xff000000}, + {0x0280, 0x00500050, 0x00ff00ff}, + {0x0284, 0x00001f15, 0x0000ffff}, + {0x028c, 0x00006f00, 0x0000ff00}, + {0x0294, 0x00000000, 0xffffff00}, + {0x0298, 0x00002640, 0xff00ffff}, + {0x029c, 0x00000003, 0x000000ff}, + {0x02a4, 0x00000f13, 0x0000ffff}, + {0x02a8, 0x0001b600, 0x00ffff00}, + {0x0380, 0x00000030, 0x000000ff}, + {0x03c0, 0x00000200, 0x0000ff00}, + {0x03cc, 0x00000018, 0x000000ff}, + {0x03cc, 0x00000000, 0x000000ff}, +}; + +static struct serdes_cfg cfg_phyb_10p3125g_comlane[] = { + {0x0a00, 0x00000800, 0x0000ff00}, + {0x0a84, 0x00000000, 0x000000ff}, + {0x0a8c, 0x00130000, 0x00ff0000}, + {0x0a90, 0x77a00000, 0xffff0000}, + {0x0a94, 0x00007777, 0x0000ffff}, + {0x0b08, 0x000f0000, 0xffff0000}, + {0x0b0c, 0x000f0000, 0x00ffffff}, + {0x0b10, 0xbe000000, 0xff000000}, + {0x0b14, 0x000000ff, 0x000000ff}, + {0x0b18, 0x00000014, 0x000000ff}, + {0x0b5c, 0x981b0000, 0xffff0000}, + {0x0b64, 0x00001100, 0x0000ff00}, + {0x0b78, 0x00000c00, 0x0000ff00}, + {0x0abc, 0xff000000, 0xff000000}, + {0x0ac0, 0x0000008b, 0x000000ff}, +}; + +static struct serdes_cfg cfg_cm_c1_c2[] = { + {0x0208, 0x00000000, 0x00000f00}, + {0x0208, 0x00000000, 0x0000001f}, + {0x0204, 0x00000000, 0x00040000}, + {0x0208, 0x000000a0, 0x000000e0}, +}; + +static void netcp_xgbe_serdes_cmu_init(void __iomem *serdes_regs) +{ + int i; + + /* cmu0 setup */ + for (i = 0; i < ARRAY_SIZE(cfg_phyb_1p25g_156p25mhz_cmu0); i++) { + reg_rmw(serdes_regs + cfg_phyb_1p25g_156p25mhz_cmu0[i].ofs, + cfg_phyb_1p25g_156p25mhz_cmu0[i].val, + cfg_phyb_1p25g_156p25mhz_cmu0[i].mask); + } + + /* cmu1 setup */ + for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_156p25mhz_cmu1); i++) { + reg_rmw(serdes_regs + cfg_phyb_10p3125g_156p25mhz_cmu1[i].ofs, + cfg_phyb_10p3125g_156p25mhz_cmu1[i].val, + cfg_phyb_10p3125g_156p25mhz_cmu1[i].mask); + } +} + +/* lane is 0 based */ +static void netcp_xgbe_serdes_lane_config( + void __iomem *serdes_regs, int lane) +{ + int i; + + /* lane setup */ + for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_16bit_lane); i++) { + reg_rmw(serdes_regs + + cfg_phyb_10p3125g_16bit_lane[i].ofs + + (0x200 * lane), + cfg_phyb_10p3125g_16bit_lane[i].val, + cfg_phyb_10p3125g_16bit_lane[i].mask); + } + + /* disable auto negotiation*/ + reg_rmw(serdes_regs + (0x200 * lane) + 0x0380, + 0x00000000, 0x00000010); + + /* disable link training */ + reg_rmw(serdes_regs + (0x200 * lane) + 0x03c0, + 0x00000000, 0x00000200); +} + +static void netcp_xgbe_serdes_com_enable(void __iomem *serdes_regs) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_comlane); i++) { + reg_rmw(serdes_regs + cfg_phyb_10p3125g_comlane[i].ofs, + cfg_phyb_10p3125g_comlane[i].val, + cfg_phyb_10p3125g_comlane[i].mask); + } +} + +static void netcp_xgbe_serdes_lane_enable( + void __iomem *serdes_regs, int lane) +{ + /* Set Lane Control Rate */ + writel(0xe0e9e038, serdes_regs + 0x1fe0 + (4 * lane)); +} + +static void netcp_xgbe_serdes_phyb_rst_clr(void __iomem *serdes_regs) +{ + reg_rmw(serdes_regs + 0x0a00, 0x0000001f, 0x000000ff); +} + +static void netcp_xgbe_serdes_pll_disable(void __iomem *serdes_regs) +{ + writel(0x88000000, serdes_regs + 0x1ff4); +} + +static void netcp_xgbe_serdes_pll_enable(void __iomem *serdes_regs) +{ + netcp_xgbe_serdes_phyb_rst_clr(serdes_regs); + writel(0xee000000, serdes_regs + 0x1ff4); +} + +static int netcp_xgbe_wait_pll_locked(void __iomem *sw_regs) +{ + unsigned long timeout; + int ret = 0; + u32 val_1, val_0; + + timeout = jiffies + msecs_to_jiffies(500); + do { + val_0 = (readl(sw_regs + XGBE_SGMII_1_OFFSET) & BIT(4)); + val_1 = (readl(sw_regs + XGBE_SGMII_2_OFFSET) & BIT(4)); + + if (val_1 && val_0) + return 0; + + if (time_after(jiffies, timeout)) { + ret = -ETIMEDOUT; + break; + } + + cpu_relax(); + } while (true); + + pr_err("XGBE serdes not locked: time out.\n"); + return ret; +} + +static void netcp_xgbe_serdes_enable_xgmii_port(void __iomem *sw_regs) +{ + writel(0x03, sw_regs + XGBE_CTRL_OFFSET); +} + +static u32 netcp_xgbe_serdes_read_tbus_val(void __iomem *serdes_regs) +{ + u32 tmp; + + if (PHY_A(serdes_regs)) { + tmp = (readl(serdes_regs + 0x0ec) >> 24) & 0x0ff; + tmp |= ((readl(serdes_regs + 0x0fc) >> 16) & 0x00f00); + } else { + tmp = (readl(serdes_regs + 0x0f8) >> 16) & 0x0fff; + } + + return tmp; +} + +static void netcp_xgbe_serdes_write_tbus_addr(void __iomem *serdes_regs, + int select, int ofs) +{ + if (PHY_A(serdes_regs)) { + reg_rmw(serdes_regs + 0x0008, ((select << 5) + ofs) << 24, + ~0x00ffffff); + return; + } + + /* For 2 lane Phy-B, lane0 is actually lane1 */ + switch (select) { + case 1: + select = 2; + break; + case 2: + select = 3; + break; + default: + return; + } + + reg_rmw(serdes_regs + 0x00fc, ((select << 8) + ofs) << 16, ~0xf800ffff); +} + +static u32 netcp_xgbe_serdes_read_select_tbus(void __iomem *serdes_regs, + int select, int ofs) +{ + /* Set tbus address */ + netcp_xgbe_serdes_write_tbus_addr(serdes_regs, select, ofs); + /* Get TBUS Value */ + return netcp_xgbe_serdes_read_tbus_val(serdes_regs); +} + +static void netcp_xgbe_serdes_reset_cdr(void __iomem *serdes_regs, + void __iomem *sig_detect_reg, int lane) +{ + u32 tmp, dlpf, tbus; + + /*Get the DLPF values */ + tmp = netcp_xgbe_serdes_read_select_tbus( + serdes_regs, lane + 1, 5); + + dlpf = tmp >> 2; + + if (dlpf < 400 || dlpf > 700) { + reg_rmw(sig_detect_reg, VAL_SH(2, 1), MASK_WID_SH(2, 1)); + mdelay(1); + reg_rmw(sig_detect_reg, VAL_SH(0, 1), MASK_WID_SH(2, 1)); + } else { + tbus = netcp_xgbe_serdes_read_select_tbus(serdes_regs, lane + + 1, 0xe); + + pr_debug("XGBE: CDR centered, DLPF: %4d,%d,%d.\n", + tmp >> 2, tmp & 3, (tbus >> 2) & 3); + } +} + +/* Call every 100 ms */ +static int netcp_xgbe_check_link_status(void __iomem *serdes_regs, + void __iomem *sw_regs, u32 lanes, + u32 *current_state, u32 *lane_down) +{ + void __iomem *pcsr_base = sw_regs + 0x0600; + void __iomem *sig_detect_reg; + u32 pcsr_rx_stat, blk_lock, blk_errs; + int loss, i, status = 1; + + for (i = 0; i < lanes; i++) { + /* Get the Loss bit */ + loss = readl(serdes_regs + 0x1fc0 + 0x20 + (i * 0x04)) & 0x1; + + /* Get Block Errors and Block Lock bits */ + pcsr_rx_stat = readl(pcsr_base + 0x0c + (i * 0x80)); + blk_lock = (pcsr_rx_stat >> 30) & 0x1; + blk_errs = (pcsr_rx_stat >> 16) & 0x0ff; + + /* Get Signal Detect Overlay Address */ + sig_detect_reg = serdes_regs + (i * 0x200) + 0x200 + 0x04; + + /* If Block errors maxed out, attempt recovery! */ + if (blk_errs == 0x0ff) + blk_lock = 0; + + switch (current_state[i]) { + case 0: + /* if good link lock the signal detect ON! */ + if (!loss && blk_lock) { + pr_debug("XGBE PCSR Linked Lane: %d\n", i); + reg_rmw(sig_detect_reg, VAL_SH(3, 1), + MASK_WID_SH(2, 1)); + current_state[i] = 1; + } else if (!blk_lock) { + /* if no lock, then reset CDR */ + pr_debug("XGBE PCSR Recover Lane: %d\n", i); + netcp_xgbe_serdes_reset_cdr(serdes_regs, + sig_detect_reg, i); + } + break; + + case 1: + if (!blk_lock) { + /* Link Lost? */ + lane_down[i] = 1; + current_state[i] = 2; + } + break; + + case 2: + if (blk_lock) + /* Nope just noise */ + current_state[i] = 1; + else { + /* Lost the block lock, reset CDR if it is + * not centered and go back to sync state + */ + netcp_xgbe_serdes_reset_cdr(serdes_regs, + sig_detect_reg, i); + current_state[i] = 0; + } + break; + + default: + pr_err("XGBE: unknown current_state[%d] %d\n", + i, current_state[i]); + break; + } + + if (blk_errs > 0) { + /* Reset the Error counts! */ + reg_rmw(pcsr_base + 0x08 + (i * 0x80), VAL_SH(0x19, 0), + MASK_WID_SH(8, 0)); + + reg_rmw(pcsr_base + 0x08 + (i * 0x80), VAL_SH(0x00, 0), + MASK_WID_SH(8, 0)); + } + + status &= (current_state[i] == 1); + } + + return status; +} + +static int netcp_xgbe_serdes_check_lane(void __iomem *serdes_regs, + void __iomem *sw_regs) +{ + u32 current_state[2] = {0, 0}; + int retries = 0, link_up; + u32 lane_down[2]; + + do { + lane_down[0] = 0; + lane_down[1] = 0; + + link_up = netcp_xgbe_check_link_status(serdes_regs, sw_regs, 2, + current_state, + lane_down); + + /* if we did not get link up then wait 100ms before calling + * it again + */ + if (link_up) + break; + + if (lane_down[0]) + pr_debug("XGBE: detected link down on lane 0\n"); + + if (lane_down[1]) + pr_debug("XGBE: detected link down on lane 1\n"); + + if (++retries > 1) { + pr_debug("XGBE: timeout waiting for serdes link up\n"); + return -ETIMEDOUT; + } + mdelay(100); + } while (!link_up); + + pr_debug("XGBE: PCSR link is up\n"); + return 0; +} + +static void netcp_xgbe_serdes_setup_cm_c1_c2(void __iomem *serdes_regs, + int lane, int cm, int c1, int c2) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(cfg_cm_c1_c2); i++) { + reg_rmw(serdes_regs + cfg_cm_c1_c2[i].ofs + (0x200 * lane), + cfg_cm_c1_c2[i].val, + cfg_cm_c1_c2[i].mask); + } +} + +static void netcp_xgbe_reset_serdes(void __iomem *serdes_regs) +{ + /* Toggle the POR_EN bit in CONFIG.CPU_CTRL */ + /* enable POR_EN bit */ + reg_rmw(serdes_regs + PCSR_CPU_CTRL_OFFSET, POR_EN, POR_EN); + usleep_range(10, 100); + + /* disable POR_EN bit */ + reg_rmw(serdes_regs + PCSR_CPU_CTRL_OFFSET, 0, POR_EN); + usleep_range(10, 100); +} + +static int netcp_xgbe_serdes_config(void __iomem *serdes_regs, + void __iomem *sw_regs) +{ + u32 ret, i; + + netcp_xgbe_serdes_pll_disable(serdes_regs); + netcp_xgbe_serdes_cmu_init(serdes_regs); + + for (i = 0; i < 2; i++) + netcp_xgbe_serdes_lane_config(serdes_regs, i); + + netcp_xgbe_serdes_com_enable(serdes_regs); + /* This is EVM + RTM-BOC specific */ + for (i = 0; i < 2; i++) + netcp_xgbe_serdes_setup_cm_c1_c2(serdes_regs, i, 0, 0, 5); + + netcp_xgbe_serdes_pll_enable(serdes_regs); + for (i = 0; i < 2; i++) + netcp_xgbe_serdes_lane_enable(serdes_regs, i); + + /* SB PLL Status Poll */ + ret = netcp_xgbe_wait_pll_locked(sw_regs); + if (ret) + return ret; + + netcp_xgbe_serdes_enable_xgmii_port(sw_regs); + netcp_xgbe_serdes_check_lane(serdes_regs, sw_regs); + return ret; +} + +int netcp_xgbe_serdes_init(void __iomem *serdes_regs, void __iomem *xgbe_regs) +{ + u32 val; + + /* read COMLANE bits 4:0 */ + val = readl(serdes_regs + 0xa00); + if (val & 0x1f) { + pr_debug("XGBE: serdes already in operation - reset\n"); + netcp_xgbe_reset_serdes(serdes_regs); + } + return netcp_xgbe_serdes_config(serdes_regs, xgbe_regs); +} diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c new file mode 100644 index 000000000..691ec936e --- /dev/null +++ b/drivers/net/ethernet/ti/tlan.c @@ -0,0 +1,3293 @@ +/******************************************************************************* + * + * Linux ThunderLAN Driver + * + * tlan.c + * by James Banks + * + * (C) 1997-1998 Caldera, Inc. + * (C) 1998 James Banks + * (C) 1999-2001 Torben Mathiasen + * (C) 2002 Samuel Chessman + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + ** Useful (if not required) reading: + * + * Texas Instruments, ThunderLAN Programmer's Guide, + * TI Literature Number SPWU013A + * available in PDF format from www.ti.com + * Level One, LXT901 and LXT970 Data Sheets + * available in PDF format from www.level1.com + * National Semiconductor, DP83840A Data Sheet + * available in PDF format from www.national.com + * Microchip Technology, 24C01A/02A/04A Data Sheet + * available in PDF format from www.microchip.com + * + ******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tlan.h" + + +/* For removing EISA devices */ +static struct net_device *tlan_eisa_devices; + +static int tlan_devices_installed; + +/* Set speed, duplex and aui settings */ +static int aui[MAX_TLAN_BOARDS]; +static int duplex[MAX_TLAN_BOARDS]; +static int speed[MAX_TLAN_BOARDS]; +static int boards_found; +module_param_array(aui, int, NULL, 0); +module_param_array(duplex, int, NULL, 0); +module_param_array(speed, int, NULL, 0); +MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)"); +MODULE_PARM_DESC(duplex, + "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)"); +MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)"); + +MODULE_AUTHOR("Maintainer: Samuel Chessman "); +MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters"); +MODULE_LICENSE("GPL"); + +/* Turn on debugging. See Documentation/networking/tlan.txt for details */ +static int debug; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "ThunderLAN debug mask"); + +static const char tlan_signature[] = "TLAN"; +static const char tlan_banner[] = "ThunderLAN driver v1.17\n"; +static int tlan_have_pci; +static int tlan_have_eisa; + +static const char * const media[] = { + "10BaseT-HD", "10BaseT-FD", "100baseTx-HD", + "100BaseTx-FD", "100BaseT4", NULL +}; + +static struct board { + const char *device_label; + u32 flags; + u16 addr_ofs; +} board_info[] = { + { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, + { "Compaq Netelligent 10/100 TX PCI UTP", + TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, + { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, + { "Compaq NetFlex-3/P", + TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, + { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, + { "Compaq Netelligent Integrated 10/100 TX UTP", + TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, + { "Compaq Netelligent Dual 10/100 TX PCI UTP", + TLAN_ADAPTER_NONE, 0x83 }, + { "Compaq Netelligent 10/100 TX Embedded UTP", + TLAN_ADAPTER_NONE, 0x83 }, + { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 }, + { "Olicom OC-2325", TLAN_ADAPTER_ACTIVITY_LED | + TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 }, + { "Olicom OC-2326", TLAN_ADAPTER_ACTIVITY_LED | + TLAN_ADAPTER_USE_INTERN_10, 0xf8 }, + { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, + { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 }, + { "Compaq NetFlex-3/E", + TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */ + TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, + { "Compaq NetFlex-3/E", + TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */ +}; + +static const struct pci_device_id tlan_pci_tbl[] = { + { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, + { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, + { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 }, + { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, + { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, + { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 }, + { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 }, + { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, + { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 }, + { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 }, + { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 }, + { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 }, + { 0,} +}; +MODULE_DEVICE_TABLE(pci, tlan_pci_tbl); + +static void tlan_eisa_probe(void); +static void tlan_eisa_cleanup(void); +static int tlan_init(struct net_device *); +static int tlan_open(struct net_device *dev); +static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *); +static irqreturn_t tlan_handle_interrupt(int, void *); +static int tlan_close(struct net_device *); +static struct net_device_stats *tlan_get_stats(struct net_device *); +static void tlan_set_multicast_list(struct net_device *); +static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int tlan_probe1(struct pci_dev *pdev, long ioaddr, + int irq, int rev, const struct pci_device_id *ent); +static void tlan_tx_timeout(struct net_device *dev); +static void tlan_tx_timeout_work(struct work_struct *work); +static int tlan_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent); + +static u32 tlan_handle_tx_eof(struct net_device *, u16); +static u32 tlan_handle_stat_overflow(struct net_device *, u16); +static u32 tlan_handle_rx_eof(struct net_device *, u16); +static u32 tlan_handle_dummy(struct net_device *, u16); +static u32 tlan_handle_tx_eoc(struct net_device *, u16); +static u32 tlan_handle_status_check(struct net_device *, u16); +static u32 tlan_handle_rx_eoc(struct net_device *, u16); + +static void tlan_timer(unsigned long); + +static void tlan_reset_lists(struct net_device *); +static void tlan_free_lists(struct net_device *); +static void tlan_print_dio(u16); +static void tlan_print_list(struct tlan_list *, char *, int); +static void tlan_read_and_clear_stats(struct net_device *, int); +static void tlan_reset_adapter(struct net_device *); +static void tlan_finish_reset(struct net_device *); +static void tlan_set_mac(struct net_device *, int areg, char *mac); + +static void tlan_phy_print(struct net_device *); +static void tlan_phy_detect(struct net_device *); +static void tlan_phy_power_down(struct net_device *); +static void tlan_phy_power_up(struct net_device *); +static void tlan_phy_reset(struct net_device *); +static void tlan_phy_start_link(struct net_device *); +static void tlan_phy_finish_auto_neg(struct net_device *); +static void tlan_phy_monitor(unsigned long); + +/* + static int tlan_phy_nop(struct net_device *); + static int tlan_phy_internal_check(struct net_device *); + static int tlan_phy_internal_service(struct net_device *); + static int tlan_phy_dp83840a_check(struct net_device *); +*/ + +static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *); +static void tlan_mii_send_data(u16, u32, unsigned); +static void tlan_mii_sync(u16); +static void tlan_mii_write_reg(struct net_device *, u16, u16, u16); + +static void tlan_ee_send_start(u16); +static int tlan_ee_send_byte(u16, u8, int); +static void tlan_ee_receive_byte(u16, u8 *, int); +static int tlan_ee_read_byte(struct net_device *, u8, u8 *); + + +static inline void +tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb) +{ + unsigned long addr = (unsigned long)skb; + tag->buffer[9].address = addr; + tag->buffer[8].address = upper_32_bits(addr); +} + +static inline struct sk_buff * +tlan_get_skb(const struct tlan_list *tag) +{ + unsigned long addr; + + addr = tag->buffer[9].address; + addr |= ((unsigned long) tag->buffer[8].address << 16) << 16; + return (struct sk_buff *) addr; +} + +static u32 +(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = { + NULL, + tlan_handle_tx_eof, + tlan_handle_stat_overflow, + tlan_handle_rx_eof, + tlan_handle_dummy, + tlan_handle_tx_eoc, + tlan_handle_status_check, + tlan_handle_rx_eoc +}; + +static inline void +tlan_set_timer(struct net_device *dev, u32 ticks, u32 type) +{ + struct tlan_priv *priv = netdev_priv(dev); + unsigned long flags = 0; + + if (!in_irq()) + spin_lock_irqsave(&priv->lock, flags); + if (priv->timer.function != NULL && + priv->timer_type != TLAN_TIMER_ACTIVITY) { + if (!in_irq()) + spin_unlock_irqrestore(&priv->lock, flags); + return; + } + priv->timer.function = tlan_timer; + if (!in_irq()) + spin_unlock_irqrestore(&priv->lock, flags); + + priv->timer.data = (unsigned long) dev; + priv->timer_set_at = jiffies; + priv->timer_type = type; + mod_timer(&priv->timer, jiffies + ticks); + +} + + +/***************************************************************************** +****************************************************************************** + +ThunderLAN driver primary functions + +these functions are more or less common to all linux network drivers. + +****************************************************************************** +*****************************************************************************/ + + + + + +/*************************************************************** + * tlan_remove_one + * + * Returns: + * Nothing + * Parms: + * None + * + * Goes through the TLanDevices list and frees the device + * structs and memory associated with each device (lists + * and buffers). It also ureserves the IO port regions + * associated with this device. + * + **************************************************************/ + + +static void tlan_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct tlan_priv *priv = netdev_priv(dev); + + unregister_netdev(dev); + + if (priv->dma_storage) { + pci_free_consistent(priv->pci_dev, + priv->dma_size, priv->dma_storage, + priv->dma_storage_dma); + } + +#ifdef CONFIG_PCI + pci_release_regions(pdev); +#endif + + free_netdev(dev); + + cancel_work_sync(&priv->tlan_tqueue); +} + +static void tlan_start(struct net_device *dev) +{ + tlan_reset_lists(dev); + /* NOTE: It might not be necessary to read the stats before a + reset if you don't care what the values are. + */ + tlan_read_and_clear_stats(dev, TLAN_IGNORE); + tlan_reset_adapter(dev); + netif_wake_queue(dev); +} + +static void tlan_stop(struct net_device *dev) +{ + struct tlan_priv *priv = netdev_priv(dev); + + del_timer_sync(&priv->media_timer); + tlan_read_and_clear_stats(dev, TLAN_RECORD); + outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD); + /* Reset and power down phy */ + tlan_reset_adapter(dev); + if (priv->timer.function != NULL) { + del_timer_sync(&priv->timer); + priv->timer.function = NULL; + } +} + +#ifdef CONFIG_PM + +static int tlan_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (netif_running(dev)) + tlan_stop(dev); + + netif_device_detach(dev); + pci_save_state(pdev); + pci_disable_device(pdev); + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int tlan_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + int rc = pci_enable_device(pdev); + + if (rc) + return rc; + pci_restore_state(pdev); + pci_enable_wake(pdev, PCI_D0, 0); + netif_device_attach(dev); + + if (netif_running(dev)) + tlan_start(dev); + + return 0; +} + +#else /* CONFIG_PM */ + +#define tlan_suspend NULL +#define tlan_resume NULL + +#endif /* CONFIG_PM */ + + +static struct pci_driver tlan_driver = { + .name = "tlan", + .id_table = tlan_pci_tbl, + .probe = tlan_init_one, + .remove = tlan_remove_one, + .suspend = tlan_suspend, + .resume = tlan_resume, +}; + +static int __init tlan_probe(void) +{ + int rc = -ENODEV; + + pr_info("%s", tlan_banner); + + TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n"); + + /* Use new style PCI probing. Now the kernel will + do most of this for us */ + rc = pci_register_driver(&tlan_driver); + + if (rc != 0) { + pr_err("Could not register pci driver\n"); + goto err_out_pci_free; + } + + TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n"); + tlan_eisa_probe(); + + pr_info("%d device%s installed, PCI: %d EISA: %d\n", + tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s", + tlan_have_pci, tlan_have_eisa); + + if (tlan_devices_installed == 0) { + rc = -ENODEV; + goto err_out_pci_unreg; + } + return 0; + +err_out_pci_unreg: + pci_unregister_driver(&tlan_driver); +err_out_pci_free: + return rc; +} + + +static int tlan_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + return tlan_probe1(pdev, -1, -1, 0, ent); +} + + +/* +*************************************************************** +* tlan_probe1 +* +* Returns: +* 0 on success, error code on error +* Parms: +* none +* +* The name is lower case to fit in with all the rest of +* the netcard_probe names. This function looks for +* another TLan based adapter, setting it up with the +* allocated device struct if one is found. +* tlan_probe has been ported to the new net API and +* now allocates its own device structure. This function +* is also used by modules. +* +**************************************************************/ + +static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev, + const struct pci_device_id *ent) +{ + + struct net_device *dev; + struct tlan_priv *priv; + u16 device_id; + int reg, rc = -ENODEV; + +#ifdef CONFIG_PCI + if (pdev) { + rc = pci_enable_device(pdev); + if (rc) + return rc; + + rc = pci_request_regions(pdev, tlan_signature); + if (rc) { + pr_err("Could not reserve IO regions\n"); + goto err_out; + } + } +#endif /* CONFIG_PCI */ + + dev = alloc_etherdev(sizeof(struct tlan_priv)); + if (dev == NULL) { + rc = -ENOMEM; + goto err_out_regions; + } + SET_NETDEV_DEV(dev, &pdev->dev); + + priv = netdev_priv(dev); + + priv->pci_dev = pdev; + priv->dev = dev; + + /* Is this a PCI device? */ + if (pdev) { + u32 pci_io_base = 0; + + priv->adapter = &board_info[ent->driver_data]; + + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + pr_err("No suitable PCI mapping available\n"); + goto err_out_free_dev; + } + + for (reg = 0; reg <= 5; reg++) { + if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) { + pci_io_base = pci_resource_start(pdev, reg); + TLAN_DBG(TLAN_DEBUG_GNRL, + "IO mapping is available at %x.\n", + pci_io_base); + break; + } + } + if (!pci_io_base) { + pr_err("No IO mappings available\n"); + rc = -EIO; + goto err_out_free_dev; + } + + dev->base_addr = pci_io_base; + dev->irq = pdev->irq; + priv->adapter_rev = pdev->revision; + pci_set_master(pdev); + pci_set_drvdata(pdev, dev); + + } else { /* EISA card */ + /* This is a hack. We need to know which board structure + * is suited for this adapter */ + device_id = inw(ioaddr + EISA_ID2); + if (device_id == 0x20F1) { + priv->adapter = &board_info[13]; /* NetFlex-3/E */ + priv->adapter_rev = 23; /* TLAN 2.3 */ + } else { + priv->adapter = &board_info[14]; + priv->adapter_rev = 10; /* TLAN 1.0 */ + } + dev->base_addr = ioaddr; + dev->irq = irq; + } + + /* Kernel parameters */ + if (dev->mem_start) { + priv->aui = dev->mem_start & 0x01; + priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0 + : (dev->mem_start & 0x06) >> 1; + priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0 + : (dev->mem_start & 0x18) >> 3; + + if (priv->speed == 0x1) + priv->speed = TLAN_SPEED_10; + else if (priv->speed == 0x2) + priv->speed = TLAN_SPEED_100; + + debug = priv->debug = dev->mem_end; + } else { + priv->aui = aui[boards_found]; + priv->speed = speed[boards_found]; + priv->duplex = duplex[boards_found]; + priv->debug = debug; + } + + /* This will be used when we get an adapter error from + * within our irq handler */ + INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work); + + spin_lock_init(&priv->lock); + + rc = tlan_init(dev); + if (rc) { + pr_err("Could not set up device\n"); + goto err_out_free_dev; + } + + rc = register_netdev(dev); + if (rc) { + pr_err("Could not register device\n"); + goto err_out_uninit; + } + + + tlan_devices_installed++; + boards_found++; + + /* pdev is NULL if this is an EISA device */ + if (pdev) + tlan_have_pci++; + else { + priv->next_device = tlan_eisa_devices; + tlan_eisa_devices = dev; + tlan_have_eisa++; + } + + netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n", + (int)dev->irq, + (int)dev->base_addr, + priv->adapter->device_label, + priv->adapter_rev); + return 0; + +err_out_uninit: + pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage, + priv->dma_storage_dma); +err_out_free_dev: + free_netdev(dev); +err_out_regions: +#ifdef CONFIG_PCI + if (pdev) + pci_release_regions(pdev); +#endif +err_out: + if (pdev) + pci_disable_device(pdev); + return rc; +} + + +static void tlan_eisa_cleanup(void) +{ + struct net_device *dev; + struct tlan_priv *priv; + + while (tlan_have_eisa) { + dev = tlan_eisa_devices; + priv = netdev_priv(dev); + if (priv->dma_storage) { + pci_free_consistent(priv->pci_dev, priv->dma_size, + priv->dma_storage, + priv->dma_storage_dma); + } + release_region(dev->base_addr, 0x10); + unregister_netdev(dev); + tlan_eisa_devices = priv->next_device; + free_netdev(dev); + tlan_have_eisa--; + } +} + + +static void __exit tlan_exit(void) +{ + pci_unregister_driver(&tlan_driver); + + if (tlan_have_eisa) + tlan_eisa_cleanup(); + +} + + +/* Module loading/unloading */ +module_init(tlan_probe); +module_exit(tlan_exit); + + + +/************************************************************** + * tlan_eisa_probe + * + * Returns: 0 on success, 1 otherwise + * + * Parms: None + * + * + * This functions probes for EISA devices and calls + * TLan_probe1 when one is found. + * + *************************************************************/ + +static void __init tlan_eisa_probe(void) +{ + long ioaddr; + int rc = -ENODEV; + int irq; + u16 device_id; + + if (!EISA_bus) { + TLAN_DBG(TLAN_DEBUG_PROBE, "No EISA bus present\n"); + return; + } + + /* Loop through all slots of the EISA bus */ + for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { + + TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n", + (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID)); + TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n", + (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2)); + + + TLAN_DBG(TLAN_DEBUG_PROBE, + "Probing for EISA adapter at IO: 0x%4x : ", + (int) ioaddr); + if (request_region(ioaddr, 0x10, tlan_signature) == NULL) + goto out; + + if (inw(ioaddr + EISA_ID) != 0x110E) { + release_region(ioaddr, 0x10); + goto out; + } + + device_id = inw(ioaddr + EISA_ID2); + if (device_id != 0x20F1 && device_id != 0x40F1) { + release_region(ioaddr, 0x10); + goto out; + } + + /* check if adapter is enabled */ + if (inb(ioaddr + EISA_CR) != 0x1) { + release_region(ioaddr, 0x10); + goto out2; + } + + if (debug == 0x10) + pr_info("Found one\n"); + + + /* Get irq from board */ + switch (inb(ioaddr + 0xcc0)) { + case(0x10): + irq = 5; + break; + case(0x20): + irq = 9; + break; + case(0x40): + irq = 10; + break; + case(0x80): + irq = 11; + break; + default: + goto out; + } + + + /* Setup the newly found eisa adapter */ + rc = tlan_probe1(NULL, ioaddr, irq, + 12, NULL); + continue; + +out: + if (debug == 0x10) + pr_info("None found\n"); + continue; + +out2: + if (debug == 0x10) + pr_info("Card found but it is not enabled, skipping\n"); + continue; + + } + +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void tlan_poll(struct net_device *dev) +{ + disable_irq(dev->irq); + tlan_handle_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +static const struct net_device_ops tlan_netdev_ops = { + .ndo_open = tlan_open, + .ndo_stop = tlan_close, + .ndo_start_xmit = tlan_start_tx, + .ndo_tx_timeout = tlan_tx_timeout, + .ndo_get_stats = tlan_get_stats, + .ndo_set_rx_mode = tlan_set_multicast_list, + .ndo_do_ioctl = tlan_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = tlan_poll, +#endif +}; + +static void tlan_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct tlan_priv *priv = netdev_priv(dev); + + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + if (priv->pci_dev) + strlcpy(info->bus_info, pci_name(priv->pci_dev), + sizeof(info->bus_info)); + else + strlcpy(info->bus_info, "EISA", sizeof(info->bus_info)); + info->eedump_len = TLAN_EEPROM_SIZE; +} + +static int tlan_get_eeprom_len(struct net_device *dev) +{ + return TLAN_EEPROM_SIZE; +} + +static int tlan_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + int i; + + for (i = 0; i < TLAN_EEPROM_SIZE; i++) + if (tlan_ee_read_byte(dev, i, &data[i])) + return -EIO; + + return 0; +} + +static const struct ethtool_ops tlan_ethtool_ops = { + .get_drvinfo = tlan_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_eeprom_len = tlan_get_eeprom_len, + .get_eeprom = tlan_get_eeprom, +}; + +/*************************************************************** + * tlan_init + * + * Returns: + * 0 on success, error code otherwise. + * Parms: + * dev The structure of the device to be + * init'ed. + * + * This function completes the initialization of the + * device structure and driver. It reserves the IO + * addresses, allocates memory for the lists and bounce + * buffers, retrieves the MAC address from the eeprom + * and assignes the device's methods. + * + **************************************************************/ + +static int tlan_init(struct net_device *dev) +{ + int dma_size; + int err; + int i; + struct tlan_priv *priv; + + priv = netdev_priv(dev); + + dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS) + * (sizeof(struct tlan_list)); + priv->dma_storage = pci_alloc_consistent(priv->pci_dev, + dma_size, + &priv->dma_storage_dma); + priv->dma_size = dma_size; + + if (priv->dma_storage == NULL) { + pr_err("Could not allocate lists and buffers for %s\n", + dev->name); + return -ENOMEM; + } + memset(priv->dma_storage, 0, dma_size); + priv->rx_list = (struct tlan_list *) + ALIGN((unsigned long)priv->dma_storage, 8); + priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8); + priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS; + priv->tx_list_dma = + priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS; + + err = 0; + for (i = 0; i < ETH_ALEN; i++) + err |= tlan_ee_read_byte(dev, + (u8) priv->adapter->addr_ofs + i, + (u8 *) &dev->dev_addr[i]); + if (err) { + pr_err("%s: Error reading MAC from eeprom: %d\n", + dev->name, err); + } + /* Olicom OC-2325/OC-2326 have the address byte-swapped */ + if (priv->adapter->addr_ofs == 0xf8) { + for (i = 0; i < ETH_ALEN; i += 2) { + char tmp = dev->dev_addr[i]; + dev->dev_addr[i] = dev->dev_addr[i + 1]; + dev->dev_addr[i + 1] = tmp; + } + } + + netif_carrier_off(dev); + + /* Device methods */ + dev->netdev_ops = &tlan_netdev_ops; + dev->ethtool_ops = &tlan_ethtool_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + return 0; + +} + + + + +/*************************************************************** + * tlan_open + * + * Returns: + * 0 on success, error code otherwise. + * Parms: + * dev Structure of device to be opened. + * + * This routine puts the driver and TLAN adapter in a + * state where it is ready to send and receive packets. + * It allocates the IRQ, resets and brings the adapter + * out of reset, and allows interrupts. It also delays + * the startup for autonegotiation or sends a Rx GO + * command to the adapter, as appropriate. + * + **************************************************************/ + +static int tlan_open(struct net_device *dev) +{ + struct tlan_priv *priv = netdev_priv(dev); + int err; + + priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION); + err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED, + dev->name, dev); + + if (err) { + netdev_err(dev, "Cannot open because IRQ %d is already in use\n", + dev->irq); + return err; + } + + init_timer(&priv->timer); + init_timer(&priv->media_timer); + + tlan_start(dev); + + TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n", + dev->name, priv->tlan_rev); + + return 0; + +} + + + +/************************************************************** + * tlan_ioctl + * + * Returns: + * 0 on success, error code otherwise + * Params: + * dev structure of device to receive ioctl. + * + * rq ifreq structure to hold userspace data. + * + * cmd ioctl command. + * + * + *************************************************************/ + +static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct tlan_priv *priv = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(rq); + u32 phy = priv->phy[priv->phy_num]; + + if (!priv->phy_online) + return -EAGAIN; + + switch (cmd) { + case SIOCGMIIPHY: /* get address of MII PHY in use. */ + data->phy_id = phy; + + + case SIOCGMIIREG: /* read MII PHY register. */ + tlan_mii_read_reg(dev, data->phy_id & 0x1f, + data->reg_num & 0x1f, &data->val_out); + return 0; + + + case SIOCSMIIREG: /* write MII PHY register. */ + tlan_mii_write_reg(dev, data->phy_id & 0x1f, + data->reg_num & 0x1f, data->val_in); + return 0; + default: + return -EOPNOTSUPP; + } +} + + +/*************************************************************** + * tlan_tx_timeout + * + * Returns: nothing + * + * Params: + * dev structure of device which timed out + * during transmit. + * + **************************************************************/ + +static void tlan_tx_timeout(struct net_device *dev) +{ + + TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name); + + /* Ok so we timed out, lets see what we can do about it...*/ + tlan_free_lists(dev); + tlan_reset_lists(dev); + tlan_read_and_clear_stats(dev, TLAN_IGNORE); + tlan_reset_adapter(dev); + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); + +} + + +/*************************************************************** + * tlan_tx_timeout_work + * + * Returns: nothing + * + * Params: + * work work item of device which timed out + * + **************************************************************/ + +static void tlan_tx_timeout_work(struct work_struct *work) +{ + struct tlan_priv *priv = + container_of(work, struct tlan_priv, tlan_tqueue); + + tlan_tx_timeout(priv->dev); +} + + + +/*************************************************************** + * tlan_start_tx + * + * Returns: + * 0 on success, non-zero on failure. + * Parms: + * skb A pointer to the sk_buff containing the + * frame to be sent. + * dev The device to send the data on. + * + * This function adds a frame to the Tx list to be sent + * ASAP. First it verifies that the adapter is ready and + * there is room in the queue. Then it sets up the next + * available list, copies the frame to the corresponding + * buffer. If the adapter Tx channel is idle, it gives + * the adapter a Tx Go command on the list, otherwise it + * sets the forward address of the previous list to point + * to this one. Then it frees the sk_buff. + * + **************************************************************/ + +static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct tlan_priv *priv = netdev_priv(dev); + dma_addr_t tail_list_phys; + struct tlan_list *tail_list; + unsigned long flags; + unsigned int txlen; + + if (!priv->phy_online) { + TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n", + dev->name); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb_padto(skb, TLAN_MIN_FRAME_SIZE)) + return NETDEV_TX_OK; + txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE); + + tail_list = priv->tx_list + priv->tx_tail; + tail_list_phys = + priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail; + + if (tail_list->c_stat != TLAN_CSTAT_UNUSED) { + TLAN_DBG(TLAN_DEBUG_TX, + "TRANSMIT: %s is busy (Head=%d Tail=%d)\n", + dev->name, priv->tx_head, priv->tx_tail); + netif_stop_queue(dev); + priv->tx_busy_count++; + return NETDEV_TX_BUSY; + } + + tail_list->forward = 0; + + tail_list->buffer[0].address = pci_map_single(priv->pci_dev, + skb->data, txlen, + PCI_DMA_TODEVICE); + tlan_store_skb(tail_list, skb); + + tail_list->frame_size = (u16) txlen; + tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen; + tail_list->buffer[1].count = 0; + tail_list->buffer[1].address = 0; + + spin_lock_irqsave(&priv->lock, flags); + tail_list->c_stat = TLAN_CSTAT_READY; + if (!priv->tx_in_progress) { + priv->tx_in_progress = 1; + TLAN_DBG(TLAN_DEBUG_TX, + "TRANSMIT: Starting TX on buffer %d\n", + priv->tx_tail); + outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM); + outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD); + } else { + TLAN_DBG(TLAN_DEBUG_TX, + "TRANSMIT: Adding buffer %d to TX channel\n", + priv->tx_tail); + if (priv->tx_tail == 0) { + (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward + = tail_list_phys; + } else { + (priv->tx_list + (priv->tx_tail - 1))->forward + = tail_list_phys; + } + } + spin_unlock_irqrestore(&priv->lock, flags); + + CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS); + + return NETDEV_TX_OK; + +} + + + + +/*************************************************************** + * tlan_handle_interrupt + * + * Returns: + * Nothing + * Parms: + * irq The line on which the interrupt + * occurred. + * dev_id A pointer to the device assigned to + * this irq line. + * + * This function handles an interrupt generated by its + * assigned TLAN adapter. The function deactivates + * interrupts on its adapter, records the type of + * interrupt, executes the appropriate subhandler, and + * acknowdges the interrupt to the adapter (thus + * re-enabling adapter interrupts. + * + **************************************************************/ + +static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct tlan_priv *priv = netdev_priv(dev); + u16 host_int; + u16 type; + + spin_lock(&priv->lock); + + host_int = inw(dev->base_addr + TLAN_HOST_INT); + type = (host_int & TLAN_HI_IT_MASK) >> 2; + if (type) { + u32 ack; + u32 host_cmd; + + outw(host_int, dev->base_addr + TLAN_HOST_INT); + ack = tlan_int_vector[type](dev, host_int); + + if (ack) { + host_cmd = TLAN_HC_ACK | ack | (type << 18); + outl(host_cmd, dev->base_addr + TLAN_HOST_CMD); + } + } + + spin_unlock(&priv->lock); + + return IRQ_RETVAL(type); +} + + + + +/*************************************************************** + * tlan_close + * + * Returns: + * An error code. + * Parms: + * dev The device structure of the device to + * close. + * + * This function shuts down the adapter. It records any + * stats, puts the adapter into reset state, deactivates + * its time as needed, and frees the irq it is using. + * + **************************************************************/ + +static int tlan_close(struct net_device *dev) +{ + tlan_stop(dev); + + free_irq(dev->irq, dev); + tlan_free_lists(dev); + TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name); + + return 0; + +} + + + + +/*************************************************************** + * tlan_get_stats + * + * Returns: + * A pointer to the device's statistics structure. + * Parms: + * dev The device structure to return the + * stats for. + * + * This function updates the devices statistics by reading + * the TLAN chip's onboard registers. Then it returns the + * address of the statistics structure. + * + **************************************************************/ + +static struct net_device_stats *tlan_get_stats(struct net_device *dev) +{ + struct tlan_priv *priv = netdev_priv(dev); + int i; + + /* Should only read stats if open ? */ + tlan_read_and_clear_stats(dev, TLAN_RECORD); + + TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name, + priv->rx_eoc_count); + TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name, + priv->tx_busy_count); + if (debug & TLAN_DEBUG_GNRL) { + tlan_print_dio(dev->base_addr); + tlan_phy_print(dev); + } + if (debug & TLAN_DEBUG_LIST) { + for (i = 0; i < TLAN_NUM_RX_LISTS; i++) + tlan_print_list(priv->rx_list + i, "RX", i); + for (i = 0; i < TLAN_NUM_TX_LISTS; i++) + tlan_print_list(priv->tx_list + i, "TX", i); + } + + return &dev->stats; + +} + + + + +/*************************************************************** + * tlan_set_multicast_list + * + * Returns: + * Nothing + * Parms: + * dev The device structure to set the + * multicast list for. + * + * This function sets the TLAN adaptor to various receive + * modes. If the IFF_PROMISC flag is set, promiscuous + * mode is acitviated. Otherwise, promiscuous mode is + * turned off. If the IFF_ALLMULTI flag is set, then + * the hash table is set to receive all group addresses. + * Otherwise, the first three multicast addresses are + * stored in AREG_1-3, and the rest are selected via the + * hash table, as necessary. + * + **************************************************************/ + +static void tlan_set_multicast_list(struct net_device *dev) +{ + struct netdev_hw_addr *ha; + u32 hash1 = 0; + u32 hash2 = 0; + int i; + u32 offset; + u8 tmp; + + if (dev->flags & IFF_PROMISC) { + tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD); + tlan_dio_write8(dev->base_addr, + TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF); + } else { + tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD); + tlan_dio_write8(dev->base_addr, + TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF); + if (dev->flags & IFF_ALLMULTI) { + for (i = 0; i < 3; i++) + tlan_set_mac(dev, i + 1, NULL); + tlan_dio_write32(dev->base_addr, TLAN_HASH_1, + 0xffffffff); + tlan_dio_write32(dev->base_addr, TLAN_HASH_2, + 0xffffffff); + } else { + i = 0; + netdev_for_each_mc_addr(ha, dev) { + if (i < 3) { + tlan_set_mac(dev, i + 1, + (char *) &ha->addr); + } else { + offset = + tlan_hash_func((u8 *)&ha->addr); + if (offset < 32) + hash1 |= (1 << offset); + else + hash2 |= (1 << (offset - 32)); + } + i++; + } + for ( ; i < 3; i++) + tlan_set_mac(dev, i + 1, NULL); + tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1); + tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2); + } + } + +} + + + +/***************************************************************************** +****************************************************************************** + +ThunderLAN driver interrupt vectors and table + +please see chap. 4, "Interrupt Handling" of the "ThunderLAN +Programmer's Guide" for more informations on handling interrupts +generated by TLAN based adapters. + +****************************************************************************** +*****************************************************************************/ + + + + +/*************************************************************** + * tlan_handle_tx_eof + * + * Returns: + * 1 + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This function handles Tx EOF interrupts which are raised + * by the adapter when it has completed sending the + * contents of a buffer. If detemines which list/buffer + * was completed and resets it. If the buffer was the last + * in the channel (EOC), then the function checks to see if + * another buffer is ready to send, and if so, sends a Tx + * Go command. Finally, the driver activates/continues the + * activity LED. + * + **************************************************************/ + +static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int) +{ + struct tlan_priv *priv = netdev_priv(dev); + int eoc = 0; + struct tlan_list *head_list; + dma_addr_t head_list_phys; + u32 ack = 0; + u16 tmp_c_stat; + + TLAN_DBG(TLAN_DEBUG_TX, + "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n", + priv->tx_head, priv->tx_tail); + head_list = priv->tx_list + priv->tx_head; + + while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP) + && (ack < 255)) { + struct sk_buff *skb = tlan_get_skb(head_list); + + ack++; + pci_unmap_single(priv->pci_dev, head_list->buffer[0].address, + max(skb->len, + (unsigned int)TLAN_MIN_FRAME_SIZE), + PCI_DMA_TODEVICE); + dev_kfree_skb_any(skb); + head_list->buffer[8].address = 0; + head_list->buffer[9].address = 0; + + if (tmp_c_stat & TLAN_CSTAT_EOC) + eoc = 1; + + dev->stats.tx_bytes += head_list->frame_size; + + head_list->c_stat = TLAN_CSTAT_UNUSED; + netif_start_queue(dev); + CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS); + head_list = priv->tx_list + priv->tx_head; + } + + if (!ack) + netdev_info(dev, + "Received interrupt for uncompleted TX frame\n"); + + if (eoc) { + TLAN_DBG(TLAN_DEBUG_TX, + "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n", + priv->tx_head, priv->tx_tail); + head_list = priv->tx_list + priv->tx_head; + head_list_phys = priv->tx_list_dma + + sizeof(struct tlan_list)*priv->tx_head; + if ((head_list->c_stat & TLAN_CSTAT_READY) + == TLAN_CSTAT_READY) { + outl(head_list_phys, dev->base_addr + TLAN_CH_PARM); + ack |= TLAN_HC_GO; + } else { + priv->tx_in_progress = 0; + } + } + + if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) { + tlan_dio_write8(dev->base_addr, + TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT); + if (priv->timer.function == NULL) { + priv->timer.function = tlan_timer; + priv->timer.data = (unsigned long) dev; + priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; + priv->timer_set_at = jiffies; + priv->timer_type = TLAN_TIMER_ACTIVITY; + add_timer(&priv->timer); + } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) { + priv->timer_set_at = jiffies; + } + } + + return ack; + +} + + + + +/*************************************************************** + * TLan_HandleStatOverflow + * + * Returns: + * 1 + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This function handles the Statistics Overflow interrupt + * which means that one or more of the TLAN statistics + * registers has reached 1/2 capacity and needs to be read. + * + **************************************************************/ + +static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int) +{ + tlan_read_and_clear_stats(dev, TLAN_RECORD); + + return 1; + +} + + + + +/*************************************************************** + * TLan_HandleRxEOF + * + * Returns: + * 1 + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This function handles the Rx EOF interrupt which + * indicates a frame has been received by the adapter from + * the net and the frame has been transferred to memory. + * The function determines the bounce buffer the frame has + * been loaded into, creates a new sk_buff big enough to + * hold the frame, and sends it to protocol stack. It + * then resets the used buffer and appends it to the end + * of the list. If the frame was the last in the Rx + * channel (EOC), the function restarts the receive channel + * by sending an Rx Go command to the adapter. Then it + * activates/continues the activity LED. + * + **************************************************************/ + +static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int) +{ + struct tlan_priv *priv = netdev_priv(dev); + u32 ack = 0; + int eoc = 0; + struct tlan_list *head_list; + struct sk_buff *skb; + struct tlan_list *tail_list; + u16 tmp_c_stat; + dma_addr_t head_list_phys; + + TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n", + priv->rx_head, priv->rx_tail); + head_list = priv->rx_list + priv->rx_head; + head_list_phys = + priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head; + + while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP) + && (ack < 255)) { + dma_addr_t frame_dma = head_list->buffer[0].address; + u32 frame_size = head_list->frame_size; + struct sk_buff *new_skb; + + ack++; + if (tmp_c_stat & TLAN_CSTAT_EOC) + eoc = 1; + + new_skb = netdev_alloc_skb_ip_align(dev, + TLAN_MAX_FRAME_SIZE + 5); + if (!new_skb) + goto drop_and_reuse; + + skb = tlan_get_skb(head_list); + pci_unmap_single(priv->pci_dev, frame_dma, + TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); + skb_put(skb, frame_size); + + dev->stats.rx_bytes += frame_size; + + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + + head_list->buffer[0].address = + pci_map_single(priv->pci_dev, new_skb->data, + TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); + + tlan_store_skb(head_list, new_skb); +drop_and_reuse: + head_list->forward = 0; + head_list->c_stat = 0; + tail_list = priv->rx_list + priv->rx_tail; + tail_list->forward = head_list_phys; + + CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS); + CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS); + head_list = priv->rx_list + priv->rx_head; + head_list_phys = priv->rx_list_dma + + sizeof(struct tlan_list)*priv->rx_head; + } + + if (!ack) + netdev_info(dev, + "Received interrupt for uncompleted RX frame\n"); + + + if (eoc) { + TLAN_DBG(TLAN_DEBUG_RX, + "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n", + priv->rx_head, priv->rx_tail); + head_list = priv->rx_list + priv->rx_head; + head_list_phys = priv->rx_list_dma + + sizeof(struct tlan_list)*priv->rx_head; + outl(head_list_phys, dev->base_addr + TLAN_CH_PARM); + ack |= TLAN_HC_GO | TLAN_HC_RT; + priv->rx_eoc_count++; + } + + if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) { + tlan_dio_write8(dev->base_addr, + TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT); + if (priv->timer.function == NULL) { + priv->timer.function = tlan_timer; + priv->timer.data = (unsigned long) dev; + priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; + priv->timer_set_at = jiffies; + priv->timer_type = TLAN_TIMER_ACTIVITY; + add_timer(&priv->timer); + } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) { + priv->timer_set_at = jiffies; + } + } + + return ack; + +} + + + + +/*************************************************************** + * tlan_handle_dummy + * + * Returns: + * 1 + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This function handles the Dummy interrupt, which is + * raised whenever a test interrupt is generated by setting + * the Req_Int bit of HOST_CMD to 1. + * + **************************************************************/ + +static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int) +{ + netdev_info(dev, "Test interrupt\n"); + return 1; + +} + + + + +/*************************************************************** + * tlan_handle_tx_eoc + * + * Returns: + * 1 + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This driver is structured to determine EOC occurrences by + * reading the CSTAT member of the list structure. Tx EOC + * interrupts are disabled via the DIO INTDIS register. + * However, TLAN chips before revision 3.0 didn't have this + * functionality, so process EOC events if this is the + * case. + * + **************************************************************/ + +static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int) +{ + struct tlan_priv *priv = netdev_priv(dev); + struct tlan_list *head_list; + dma_addr_t head_list_phys; + u32 ack = 1; + + host_int = 0; + if (priv->tlan_rev < 0x30) { + TLAN_DBG(TLAN_DEBUG_TX, + "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n", + priv->tx_head, priv->tx_tail); + head_list = priv->tx_list + priv->tx_head; + head_list_phys = priv->tx_list_dma + + sizeof(struct tlan_list)*priv->tx_head; + if ((head_list->c_stat & TLAN_CSTAT_READY) + == TLAN_CSTAT_READY) { + netif_stop_queue(dev); + outl(head_list_phys, dev->base_addr + TLAN_CH_PARM); + ack |= TLAN_HC_GO; + } else { + priv->tx_in_progress = 0; + } + } + + return ack; + +} + + + + +/*************************************************************** + * tlan_handle_status_check + * + * Returns: + * 0 if Adapter check, 1 if Network Status check. + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This function handles Adapter Check/Network Status + * interrupts generated by the adapter. It checks the + * vector in the HOST_INT register to determine if it is + * an Adapter Check interrupt. If so, it resets the + * adapter. Otherwise it clears the status registers + * and services the PHY. + * + **************************************************************/ + +static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int) +{ + struct tlan_priv *priv = netdev_priv(dev); + u32 ack; + u32 error; + u8 net_sts; + u32 phy; + u16 tlphy_ctl; + u16 tlphy_sts; + + ack = 1; + if (host_int & TLAN_HI_IV_MASK) { + netif_stop_queue(dev); + error = inl(dev->base_addr + TLAN_CH_PARM); + netdev_info(dev, "Adaptor Error = 0x%x\n", error); + tlan_read_and_clear_stats(dev, TLAN_RECORD); + outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD); + + schedule_work(&priv->tlan_tqueue); + + netif_wake_queue(dev); + ack = 0; + } else { + TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name); + phy = priv->phy[priv->phy_num]; + + net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS); + if (net_sts) { + tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts); + TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n", + dev->name, (unsigned) net_sts); + } + if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) { + tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts); + tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl); + if (!(tlphy_sts & TLAN_TS_POLOK) && + !(tlphy_ctl & TLAN_TC_SWAPOL)) { + tlphy_ctl |= TLAN_TC_SWAPOL; + tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, + tlphy_ctl); + } else if ((tlphy_sts & TLAN_TS_POLOK) && + (tlphy_ctl & TLAN_TC_SWAPOL)) { + tlphy_ctl &= ~TLAN_TC_SWAPOL; + tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, + tlphy_ctl); + } + + if (debug) + tlan_phy_print(dev); + } + } + + return ack; + +} + + + + +/*************************************************************** + * tlan_handle_rx_eoc + * + * Returns: + * 1 + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This driver is structured to determine EOC occurrences by + * reading the CSTAT member of the list structure. Rx EOC + * interrupts are disabled via the DIO INTDIS register. + * However, TLAN chips before revision 3.0 didn't have this + * CSTAT member or a INTDIS register, so if this chip is + * pre-3.0, process EOC interrupts normally. + * + **************************************************************/ + +static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int) +{ + struct tlan_priv *priv = netdev_priv(dev); + dma_addr_t head_list_phys; + u32 ack = 1; + + if (priv->tlan_rev < 0x30) { + TLAN_DBG(TLAN_DEBUG_RX, + "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n", + priv->rx_head, priv->rx_tail); + head_list_phys = priv->rx_list_dma + + sizeof(struct tlan_list)*priv->rx_head; + outl(head_list_phys, dev->base_addr + TLAN_CH_PARM); + ack |= TLAN_HC_GO | TLAN_HC_RT; + priv->rx_eoc_count++; + } + + return ack; + +} + + + + +/***************************************************************************** +****************************************************************************** + +ThunderLAN driver timer function + +****************************************************************************** +*****************************************************************************/ + + +/*************************************************************** + * tlan_timer + * + * Returns: + * Nothing + * Parms: + * data A value given to add timer when + * add_timer was called. + * + * This function handles timed functionality for the + * TLAN driver. The two current timer uses are for + * delaying for autonegotionation and driving the ACT LED. + * - Autonegotiation requires being allowed about + * 2 1/2 seconds before attempting to transmit a + * packet. It would be a very bad thing to hang + * the kernel this long, so the driver doesn't + * allow transmission 'til after this time, for + * certain PHYs. It would be much nicer if all + * PHYs were interrupt-capable like the internal + * PHY. + * - The ACT LED, which shows adapter activity, is + * driven by the driver, and so must be left on + * for a short period to power up the LED so it + * can be seen. This delay can be changed by + * changing the TLAN_TIMER_ACT_DELAY in tlan.h, + * if desired. 100 ms produces a slightly + * sluggish response. + * + **************************************************************/ + +static void tlan_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *) data; + struct tlan_priv *priv = netdev_priv(dev); + u32 elapsed; + unsigned long flags = 0; + + priv->timer.function = NULL; + + switch (priv->timer_type) { + case TLAN_TIMER_PHY_PDOWN: + tlan_phy_power_down(dev); + break; + case TLAN_TIMER_PHY_PUP: + tlan_phy_power_up(dev); + break; + case TLAN_TIMER_PHY_RESET: + tlan_phy_reset(dev); + break; + case TLAN_TIMER_PHY_START_LINK: + tlan_phy_start_link(dev); + break; + case TLAN_TIMER_PHY_FINISH_AN: + tlan_phy_finish_auto_neg(dev); + break; + case TLAN_TIMER_FINISH_RESET: + tlan_finish_reset(dev); + break; + case TLAN_TIMER_ACTIVITY: + spin_lock_irqsave(&priv->lock, flags); + if (priv->timer.function == NULL) { + elapsed = jiffies - priv->timer_set_at; + if (elapsed >= TLAN_TIMER_ACT_DELAY) { + tlan_dio_write8(dev->base_addr, + TLAN_LED_REG, TLAN_LED_LINK); + } else { + priv->timer.function = tlan_timer; + priv->timer.expires = priv->timer_set_at + + TLAN_TIMER_ACT_DELAY; + spin_unlock_irqrestore(&priv->lock, flags); + add_timer(&priv->timer); + break; + } + } + spin_unlock_irqrestore(&priv->lock, flags); + break; + default: + break; + } + +} + + +/***************************************************************************** +****************************************************************************** + +ThunderLAN driver adapter related routines + +****************************************************************************** +*****************************************************************************/ + + +/*************************************************************** + * tlan_reset_lists + * + * Returns: + * Nothing + * Parms: + * dev The device structure with the list + * stuctures to be reset. + * + * This routine sets the variables associated with managing + * the TLAN lists to their initial values. + * + **************************************************************/ + +static void tlan_reset_lists(struct net_device *dev) +{ + struct tlan_priv *priv = netdev_priv(dev); + int i; + struct tlan_list *list; + dma_addr_t list_phys; + struct sk_buff *skb; + + priv->tx_head = 0; + priv->tx_tail = 0; + for (i = 0; i < TLAN_NUM_TX_LISTS; i++) { + list = priv->tx_list + i; + list->c_stat = TLAN_CSTAT_UNUSED; + list->buffer[0].address = 0; + list->buffer[2].count = 0; + list->buffer[2].address = 0; + list->buffer[8].address = 0; + list->buffer[9].address = 0; + } + + priv->rx_head = 0; + priv->rx_tail = TLAN_NUM_RX_LISTS - 1; + for (i = 0; i < TLAN_NUM_RX_LISTS; i++) { + list = priv->rx_list + i; + list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i; + list->c_stat = TLAN_CSTAT_READY; + list->frame_size = TLAN_MAX_FRAME_SIZE; + list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; + skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5); + if (!skb) + break; + + list->buffer[0].address = pci_map_single(priv->pci_dev, + skb->data, + TLAN_MAX_FRAME_SIZE, + PCI_DMA_FROMDEVICE); + tlan_store_skb(list, skb); + list->buffer[1].count = 0; + list->buffer[1].address = 0; + list->forward = list_phys + sizeof(struct tlan_list); + } + + /* in case ran out of memory early, clear bits */ + while (i < TLAN_NUM_RX_LISTS) { + tlan_store_skb(priv->rx_list + i, NULL); + ++i; + } + list->forward = 0; + +} + + +static void tlan_free_lists(struct net_device *dev) +{ + struct tlan_priv *priv = netdev_priv(dev); + int i; + struct tlan_list *list; + struct sk_buff *skb; + + for (i = 0; i < TLAN_NUM_TX_LISTS; i++) { + list = priv->tx_list + i; + skb = tlan_get_skb(list); + if (skb) { + pci_unmap_single( + priv->pci_dev, + list->buffer[0].address, + max(skb->len, + (unsigned int)TLAN_MIN_FRAME_SIZE), + PCI_DMA_TODEVICE); + dev_kfree_skb_any(skb); + list->buffer[8].address = 0; + list->buffer[9].address = 0; + } + } + + for (i = 0; i < TLAN_NUM_RX_LISTS; i++) { + list = priv->rx_list + i; + skb = tlan_get_skb(list); + if (skb) { + pci_unmap_single(priv->pci_dev, + list->buffer[0].address, + TLAN_MAX_FRAME_SIZE, + PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(skb); + list->buffer[8].address = 0; + list->buffer[9].address = 0; + } + } +} + + + + +/*************************************************************** + * tlan_print_dio + * + * Returns: + * Nothing + * Parms: + * io_base Base IO port of the device of + * which to print DIO registers. + * + * This function prints out all the internal (DIO) + * registers of a TLAN chip. + * + **************************************************************/ + +static void tlan_print_dio(u16 io_base) +{ + u32 data0, data1; + int i; + + pr_info("Contents of internal registers for io base 0x%04hx\n", + io_base); + pr_info("Off. +0 +4\n"); + for (i = 0; i < 0x4C; i += 8) { + data0 = tlan_dio_read32(io_base, i); + data1 = tlan_dio_read32(io_base, i + 0x4); + pr_info("0x%02x 0x%08x 0x%08x\n", i, data0, data1); + } + +} + + + + +/*************************************************************** + * TLan_PrintList + * + * Returns: + * Nothing + * Parms: + * list A pointer to the struct tlan_list structure to + * be printed. + * type A string to designate type of list, + * "Rx" or "Tx". + * num The index of the list. + * + * This function prints out the contents of the list + * pointed to by the list parameter. + * + **************************************************************/ + +static void tlan_print_list(struct tlan_list *list, char *type, int num) +{ + int i; + + pr_info("%s List %d at %p\n", type, num, list); + pr_info(" Forward = 0x%08x\n", list->forward); + pr_info(" CSTAT = 0x%04hx\n", list->c_stat); + pr_info(" Frame Size = 0x%04hx\n", list->frame_size); + /* for (i = 0; i < 10; i++) { */ + for (i = 0; i < 2; i++) { + pr_info(" Buffer[%d].count, addr = 0x%08x, 0x%08x\n", + i, list->buffer[i].count, list->buffer[i].address); + } + +} + + + + +/*************************************************************** + * tlan_read_and_clear_stats + * + * Returns: + * Nothing + * Parms: + * dev Pointer to device structure of adapter + * to which to read stats. + * record Flag indicating whether to add + * + * This functions reads all the internal status registers + * of the TLAN chip, which clears them as a side effect. + * It then either adds the values to the device's status + * struct, or discards them, depending on whether record + * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0). + * + **************************************************************/ + +static void tlan_read_and_clear_stats(struct net_device *dev, int record) +{ + u32 tx_good, tx_under; + u32 rx_good, rx_over; + u32 def_tx, crc, code; + u32 multi_col, single_col; + u32 excess_col, late_col, loss; + + outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR); + tx_good = inb(dev->base_addr + TLAN_DIO_DATA); + tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8; + tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16; + tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3); + + outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR); + rx_good = inb(dev->base_addr + TLAN_DIO_DATA); + rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8; + rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16; + rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3); + + outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR); + def_tx = inb(dev->base_addr + TLAN_DIO_DATA); + def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8; + crc = inb(dev->base_addr + TLAN_DIO_DATA + 2); + code = inb(dev->base_addr + TLAN_DIO_DATA + 3); + + outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR); + multi_col = inb(dev->base_addr + TLAN_DIO_DATA); + multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8; + single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2); + single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8; + + outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR); + excess_col = inb(dev->base_addr + TLAN_DIO_DATA); + late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1); + loss = inb(dev->base_addr + TLAN_DIO_DATA + 2); + + if (record) { + dev->stats.rx_packets += rx_good; + dev->stats.rx_errors += rx_over + crc + code; + dev->stats.tx_packets += tx_good; + dev->stats.tx_errors += tx_under + loss; + dev->stats.collisions += multi_col + + single_col + excess_col + late_col; + + dev->stats.rx_over_errors += rx_over; + dev->stats.rx_crc_errors += crc; + dev->stats.rx_frame_errors += code; + + dev->stats.tx_aborted_errors += tx_under; + dev->stats.tx_carrier_errors += loss; + } + +} + + + + +/*************************************************************** + * TLan_Reset + * + * Returns: + * 0 + * Parms: + * dev Pointer to device structure of adapter + * to be reset. + * + * This function resets the adapter and it's physical + * device. See Chap. 3, pp. 9-10 of the "ThunderLAN + * Programmer's Guide" for details. The routine tries to + * implement what is detailed there, though adjustments + * have been made. + * + **************************************************************/ + +static void +tlan_reset_adapter(struct net_device *dev) +{ + struct tlan_priv *priv = netdev_priv(dev); + int i; + u32 addr; + u32 data; + u8 data8; + + priv->tlan_full_duplex = false; + priv->phy_online = 0; + netif_carrier_off(dev); + +/* 1. Assert reset bit. */ + + data = inl(dev->base_addr + TLAN_HOST_CMD); + data |= TLAN_HC_AD_RST; + outl(data, dev->base_addr + TLAN_HOST_CMD); + + udelay(1000); + +/* 2. Turn off interrupts. (Probably isn't necessary) */ + + data = inl(dev->base_addr + TLAN_HOST_CMD); + data |= TLAN_HC_INT_OFF; + outl(data, dev->base_addr + TLAN_HOST_CMD); + +/* 3. Clear AREGs and HASHs. */ + + for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4) + tlan_dio_write32(dev->base_addr, (u16) i, 0); + +/* 4. Setup NetConfig register. */ + + data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; + tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data); + +/* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */ + + outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD); + outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD); + +/* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */ + + outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); + addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; + tlan_set_bit(TLAN_NET_SIO_NMRST, addr); + +/* 7. Setup the remaining registers. */ + + if (priv->tlan_rev >= 0x30) { + data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC; + tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8); + } + tlan_phy_detect(dev); + data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN; + + if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) { + data |= TLAN_NET_CFG_BIT; + if (priv->aui == 1) { + tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a); + } else if (priv->duplex == TLAN_DUPLEX_FULL) { + tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00); + priv->tlan_full_duplex = true; + } else { + tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08); + } + } + + /* don't power down internal PHY if we're going to use it */ + if (priv->phy_num == 0 || + (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10)) + data |= TLAN_NET_CFG_PHY_EN; + tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data); + + if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) + tlan_finish_reset(dev); + else + tlan_phy_power_down(dev); + +} + + + + +static void +tlan_finish_reset(struct net_device *dev) +{ + struct tlan_priv *priv = netdev_priv(dev); + u8 data; + u32 phy; + u8 sio; + u16 status; + u16 partner; + u16 tlphy_ctl; + u16 tlphy_par; + u16 tlphy_id1, tlphy_id2; + int i; + + phy = priv->phy[priv->phy_num]; + + data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP; + if (priv->tlan_full_duplex) + data |= TLAN_NET_CMD_DUPLEX; + tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data); + data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5; + if (priv->phy_num == 0) + data |= TLAN_NET_MASK_MASK7; + tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data); + tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7); + tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1); + tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2); + + if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) || + (priv->aui)) { + status = MII_GS_LINK; + netdev_info(dev, "Link forced\n"); + } else { + tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); + udelay(1000); + tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); + if (status & MII_GS_LINK) { + /* We only support link info on Nat.Sem. PHY's */ + if ((tlphy_id1 == NAT_SEM_ID1) && + (tlphy_id2 == NAT_SEM_ID2)) { + tlan_mii_read_reg(dev, phy, MII_AN_LPA, + &partner); + tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, + &tlphy_par); + + netdev_info(dev, + "Link active, %s %uMbps %s-Duplex\n", + !(tlphy_par & TLAN_PHY_AN_EN_STAT) + ? "forced" : "Autonegotiation enabled,", + tlphy_par & TLAN_PHY_SPEED_100 + ? 100 : 10, + tlphy_par & TLAN_PHY_DUPLEX_FULL + ? "Full" : "Half"); + + if (tlphy_par & TLAN_PHY_AN_EN_STAT) { + netdev_info(dev, "Partner capability:"); + for (i = 5; i < 10; i++) + if (partner & (1 << i)) + pr_cont(" %s", + media[i-5]); + pr_cont("\n"); + } + } else + netdev_info(dev, "Link active\n"); + /* Enabling link beat monitoring */ + priv->media_timer.function = tlan_phy_monitor; + priv->media_timer.data = (unsigned long) dev; + priv->media_timer.expires = jiffies + HZ; + add_timer(&priv->media_timer); + } + } + + if (priv->phy_num == 0) { + tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl); + tlphy_ctl |= TLAN_TC_INTEN; + tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); + sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO); + sio |= TLAN_NET_SIO_MINTEN; + tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio); + } + + if (status & MII_GS_LINK) { + tlan_set_mac(dev, 0, dev->dev_addr); + priv->phy_online = 1; + outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1); + if (debug >= 1 && debug != TLAN_DEBUG_PROBE) + outb((TLAN_HC_REQ_INT >> 8), + dev->base_addr + TLAN_HOST_CMD + 1); + outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM); + outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD); + tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK); + netif_carrier_on(dev); + } else { + netdev_info(dev, "Link inactive, will retry in 10 secs...\n"); + tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET); + return; + } + tlan_set_multicast_list(dev); + +} + + + + +/*************************************************************** + * tlan_set_mac + * + * Returns: + * Nothing + * Parms: + * dev Pointer to device structure of adapter + * on which to change the AREG. + * areg The AREG to set the address in (0 - 3). + * mac A pointer to an array of chars. Each + * element stores one byte of the address. + * IE, it isn't in ascii. + * + * This function transfers a MAC address to one of the + * TLAN AREGs (address registers). The TLAN chip locks + * the register on writing to offset 0 and unlocks the + * register after writing to offset 5. If NULL is passed + * in mac, then the AREG is filled with 0's. + * + **************************************************************/ + +static void tlan_set_mac(struct net_device *dev, int areg, char *mac) +{ + int i; + + areg *= 6; + + if (mac != NULL) { + for (i = 0; i < 6; i++) + tlan_dio_write8(dev->base_addr, + TLAN_AREG_0 + areg + i, mac[i]); + } else { + for (i = 0; i < 6; i++) + tlan_dio_write8(dev->base_addr, + TLAN_AREG_0 + areg + i, 0); + } + +} + + + + +/***************************************************************************** +****************************************************************************** + +ThunderLAN driver PHY layer routines + +****************************************************************************** +*****************************************************************************/ + + + +/********************************************************************* + * tlan_phy_print + * + * Returns: + * Nothing + * Parms: + * dev A pointer to the device structure of the + * TLAN device having the PHYs to be detailed. + * + * This function prints the registers a PHY (aka transceiver). + * + ********************************************************************/ + +static void tlan_phy_print(struct net_device *dev) +{ + struct tlan_priv *priv = netdev_priv(dev); + u16 i, data0, data1, data2, data3, phy; + + phy = priv->phy[priv->phy_num]; + + if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) { + netdev_info(dev, "Unmanaged PHY\n"); + } else if (phy <= TLAN_PHY_MAX_ADDR) { + netdev_info(dev, "PHY 0x%02x\n", phy); + pr_info(" Off. +0 +1 +2 +3\n"); + for (i = 0; i < 0x20; i += 4) { + tlan_mii_read_reg(dev, phy, i, &data0); + tlan_mii_read_reg(dev, phy, i + 1, &data1); + tlan_mii_read_reg(dev, phy, i + 2, &data2); + tlan_mii_read_reg(dev, phy, i + 3, &data3); + pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n", + i, data0, data1, data2, data3); + } + } else { + netdev_info(dev, "Invalid PHY\n"); + } + +} + + + + +/********************************************************************* + * tlan_phy_detect + * + * Returns: + * Nothing + * Parms: + * dev A pointer to the device structure of the adapter + * for which the PHY needs determined. + * + * So far I've found that adapters which have external PHYs + * may also use the internal PHY for part of the functionality. + * (eg, AUI/Thinnet). This function finds out if this TLAN + * chip has an internal PHY, and then finds the first external + * PHY (starting from address 0) if it exists). + * + ********************************************************************/ + +static void tlan_phy_detect(struct net_device *dev) +{ + struct tlan_priv *priv = netdev_priv(dev); + u16 control; + u16 hi; + u16 lo; + u32 phy; + + if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) { + priv->phy_num = 0xffff; + return; + } + + tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi); + + if (hi != 0xffff) + priv->phy[0] = TLAN_PHY_MAX_ADDR; + else + priv->phy[0] = TLAN_PHY_NONE; + + priv->phy[1] = TLAN_PHY_NONE; + for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) { + tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control); + tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi); + tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo); + if ((control != 0xffff) || + (hi != 0xffff) || (lo != 0xffff)) { + TLAN_DBG(TLAN_DEBUG_GNRL, + "PHY found at %02x %04x %04x %04x\n", + phy, control, hi, lo); + if ((priv->phy[1] == TLAN_PHY_NONE) && + (phy != TLAN_PHY_MAX_ADDR)) { + priv->phy[1] = phy; + } + } + } + + if (priv->phy[1] != TLAN_PHY_NONE) + priv->phy_num = 1; + else if (priv->phy[0] != TLAN_PHY_NONE) + priv->phy_num = 0; + else + netdev_info(dev, "Cannot initialize device, no PHY was found!\n"); + +} + + + + +static void tlan_phy_power_down(struct net_device *dev) +{ + struct tlan_priv *priv = netdev_priv(dev); + u16 value; + + TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name); + value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE; + tlan_mii_sync(dev->base_addr); + tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value); + if ((priv->phy_num == 0) && (priv->phy[1] != TLAN_PHY_NONE)) { + /* if using internal PHY, the external PHY must be powered on */ + if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) + value = MII_GC_ISOLATE; /* just isolate it from MII */ + tlan_mii_sync(dev->base_addr); + tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value); + } + + /* Wait for 50 ms and powerup + * This is abitrary. It is intended to make sure the + * transceiver settles. + */ + tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_PUP); + +} + + + + +static void tlan_phy_power_up(struct net_device *dev) +{ + struct tlan_priv *priv = netdev_priv(dev); + u16 value; + + TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name); + tlan_mii_sync(dev->base_addr); + value = MII_GC_LOOPBK; + tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value); + tlan_mii_sync(dev->base_addr); + /* Wait for 500 ms and reset the + * transceiver. The TLAN docs say both 50 ms and + * 500 ms, so do the longer, just in case. + */ + tlan_set_timer(dev, msecs_to_jiffies(500), TLAN_TIMER_PHY_RESET); + +} + + + + +static void tlan_phy_reset(struct net_device *dev) +{ + struct tlan_priv *priv = netdev_priv(dev); + u16 phy; + u16 value; + unsigned long timeout = jiffies + HZ; + + phy = priv->phy[priv->phy_num]; + + TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Resetting PHY.\n", dev->name); + tlan_mii_sync(dev->base_addr); + value = MII_GC_LOOPBK | MII_GC_RESET; + tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value); + do { + tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value); + if (time_after(jiffies, timeout)) { + netdev_err(dev, "PHY reset timeout\n"); + return; + } + } while (value & MII_GC_RESET); + + /* Wait for 500 ms and initialize. + * I don't remember why I wait this long. + * I've changed this to 50ms, as it seems long enough. + */ + tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_START_LINK); + +} + + + + +static void tlan_phy_start_link(struct net_device *dev) +{ + struct tlan_priv *priv = netdev_priv(dev); + u16 ability; + u16 control; + u16 data; + u16 phy; + u16 status; + u16 tctl; + + phy = priv->phy[priv->phy_num]; + TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name); + tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); + tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability); + + if ((status & MII_GS_AUTONEG) && + (!priv->aui)) { + ability = status >> 11; + if (priv->speed == TLAN_SPEED_10 && + priv->duplex == TLAN_DUPLEX_HALF) { + tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000); + } else if (priv->speed == TLAN_SPEED_10 && + priv->duplex == TLAN_DUPLEX_FULL) { + priv->tlan_full_duplex = true; + tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100); + } else if (priv->speed == TLAN_SPEED_100 && + priv->duplex == TLAN_DUPLEX_HALF) { + tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000); + } else if (priv->speed == TLAN_SPEED_100 && + priv->duplex == TLAN_DUPLEX_FULL) { + priv->tlan_full_duplex = true; + tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100); + } else { + + /* Set Auto-Neg advertisement */ + tlan_mii_write_reg(dev, phy, MII_AN_ADV, + (ability << 5) | 1); + /* Enablee Auto-Neg */ + tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000); + /* Restart Auto-Neg */ + tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200); + /* Wait for 4 sec for autonegotiation + * to complete. The max spec time is less than this + * but the card need additional time to start AN. + * .5 sec should be plenty extra. + */ + netdev_info(dev, "Starting autonegotiation\n"); + tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN); + return; + } + + } + + if ((priv->aui) && (priv->phy_num != 0)) { + priv->phy_num = 0; + data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN + | TLAN_NET_CFG_PHY_EN; + tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data); + tlan_set_timer(dev, msecs_to_jiffies(40), TLAN_TIMER_PHY_PDOWN); + return; + } else if (priv->phy_num == 0) { + control = 0; + tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl); + if (priv->aui) { + tctl |= TLAN_TC_AUISEL; + } else { + tctl &= ~TLAN_TC_AUISEL; + if (priv->duplex == TLAN_DUPLEX_FULL) { + control |= MII_GC_DUPLEX; + priv->tlan_full_duplex = true; + } + if (priv->speed == TLAN_SPEED_100) + control |= MII_GC_SPEEDSEL; + } + tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control); + tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl); + } + + /* Wait for 2 sec to give the transceiver time + * to establish link. + */ + tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET); + +} + + + + +static void tlan_phy_finish_auto_neg(struct net_device *dev) +{ + struct tlan_priv *priv = netdev_priv(dev); + u16 an_adv; + u16 an_lpa; + u16 mode; + u16 phy; + u16 status; + + phy = priv->phy[priv->phy_num]; + + tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); + udelay(1000); + tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); + + if (!(status & MII_GS_AUTOCMPLT)) { + /* Wait for 8 sec to give the process + * more time. Perhaps we should fail after a while. + */ + tlan_set_timer(dev, 2 * HZ, TLAN_TIMER_PHY_FINISH_AN); + return; + } + + netdev_info(dev, "Autonegotiation complete\n"); + tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv); + tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa); + mode = an_adv & an_lpa & 0x03E0; + if (mode & 0x0100) + priv->tlan_full_duplex = true; + else if (!(mode & 0x0080) && (mode & 0x0040)) + priv->tlan_full_duplex = true; + + /* switch to internal PHY for 10 Mbps */ + if ((!(mode & 0x0180)) && + (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) && + (priv->phy_num != 0)) { + priv->phy_num = 0; + tlan_set_timer(dev, msecs_to_jiffies(400), TLAN_TIMER_PHY_PDOWN); + return; + } + + if (priv->phy_num == 0) { + if ((priv->duplex == TLAN_DUPLEX_FULL) || + (an_adv & an_lpa & 0x0040)) { + tlan_mii_write_reg(dev, phy, MII_GEN_CTL, + MII_GC_AUTOENB | MII_GC_DUPLEX); + netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n"); + } else { + tlan_mii_write_reg(dev, phy, MII_GEN_CTL, + MII_GC_AUTOENB); + netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n"); + } + } + + /* Wait for 100 ms. No reason in partiticular. + */ + tlan_set_timer(dev, msecs_to_jiffies(100), TLAN_TIMER_FINISH_RESET); + +} + + +/********************************************************************* + * + * tlan_phy_monitor + * + * Returns: + * None + * + * Params: + * data The device structure of this device. + * + * + * This function monitors PHY condition by reading the status + * register via the MII bus, controls LINK LED and notifies the + * kernel about link state. + * + *******************************************************************/ + +static void tlan_phy_monitor(unsigned long data) +{ + struct net_device *dev = (struct net_device *) data; + struct tlan_priv *priv = netdev_priv(dev); + u16 phy; + u16 phy_status; + + phy = priv->phy[priv->phy_num]; + + /* Get PHY status register */ + tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status); + + /* Check if link has been lost */ + if (!(phy_status & MII_GS_LINK)) { + if (netif_carrier_ok(dev)) { + printk(KERN_DEBUG "TLAN: %s has lost link\n", + dev->name); + tlan_dio_write8(dev->base_addr, TLAN_LED_REG, 0); + netif_carrier_off(dev); + if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) { + /* power down internal PHY */ + u16 data = MII_GC_PDOWN | MII_GC_LOOPBK | + MII_GC_ISOLATE; + + tlan_mii_sync(dev->base_addr); + tlan_mii_write_reg(dev, priv->phy[0], + MII_GEN_CTL, data); + /* set to external PHY */ + priv->phy_num = 1; + /* restart autonegotiation */ + tlan_set_timer(dev, msecs_to_jiffies(400), + TLAN_TIMER_PHY_PDOWN); + return; + } + } + } + + /* Link restablished? */ + if ((phy_status & MII_GS_LINK) && !netif_carrier_ok(dev)) { + tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK); + printk(KERN_DEBUG "TLAN: %s has reestablished link\n", + dev->name); + netif_carrier_on(dev); + } + priv->media_timer.expires = jiffies + HZ; + add_timer(&priv->media_timer); +} + + +/***************************************************************************** +****************************************************************************** + +ThunderLAN driver MII routines + +these routines are based on the information in chap. 2 of the +"ThunderLAN Programmer's Guide", pp. 15-24. + +****************************************************************************** +*****************************************************************************/ + + +/*************************************************************** + * tlan_mii_read_reg + * + * Returns: + * false if ack received ok + * true if no ack received or other error + * + * Parms: + * dev The device structure containing + * The io address and interrupt count + * for this device. + * phy The address of the PHY to be queried. + * reg The register whose contents are to be + * retrieved. + * val A pointer to a variable to store the + * retrieved value. + * + * This function uses the TLAN's MII bus to retrieve the contents + * of a given register on a PHY. It sends the appropriate info + * and then reads the 16-bit register value from the MII bus via + * the TLAN SIO register. + * + **************************************************************/ + +static bool +tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val) +{ + u8 nack; + u16 sio, tmp; + u32 i; + bool err; + int minten; + struct tlan_priv *priv = netdev_priv(dev); + unsigned long flags = 0; + + err = false; + outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); + sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; + + if (!in_irq()) + spin_lock_irqsave(&priv->lock, flags); + + tlan_mii_sync(dev->base_addr); + + minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio); + if (minten) + tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio); + + tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */ + tlan_mii_send_data(dev->base_addr, 0x2, 2); /* read (10b) */ + tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */ + tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */ + + + tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio); /* change direction */ + + tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* clock idle bit */ + tlan_set_bit(TLAN_NET_SIO_MCLK, sio); + tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* wait 300ns */ + + nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio); /* check for ACK */ + tlan_set_bit(TLAN_NET_SIO_MCLK, sio); /* finish ACK */ + if (nack) { /* no ACK, so fake it */ + for (i = 0; i < 16; i++) { + tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); + tlan_set_bit(TLAN_NET_SIO_MCLK, sio); + } + tmp = 0xffff; + err = true; + } else { /* ACK, so read data */ + for (tmp = 0, i = 0x8000; i; i >>= 1) { + tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); + if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio)) + tmp |= i; + tlan_set_bit(TLAN_NET_SIO_MCLK, sio); + } + } + + + tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */ + tlan_set_bit(TLAN_NET_SIO_MCLK, sio); + + if (minten) + tlan_set_bit(TLAN_NET_SIO_MINTEN, sio); + + *val = tmp; + + if (!in_irq()) + spin_unlock_irqrestore(&priv->lock, flags); + + return err; + +} + + + + +/*************************************************************** + * tlan_mii_send_data + * + * Returns: + * Nothing + * Parms: + * base_port The base IO port of the adapter in + * question. + * dev The address of the PHY to be queried. + * data The value to be placed on the MII bus. + * num_bits The number of bits in data that are to + * be placed on the MII bus. + * + * This function sends on sequence of bits on the MII + * configuration bus. + * + **************************************************************/ + +static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits) +{ + u16 sio; + u32 i; + + if (num_bits == 0) + return; + + outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR); + sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO; + tlan_set_bit(TLAN_NET_SIO_MTXEN, sio); + + for (i = (0x1 << (num_bits - 1)); i; i >>= 1) { + tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); + (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio); + if (data & i) + tlan_set_bit(TLAN_NET_SIO_MDATA, sio); + else + tlan_clear_bit(TLAN_NET_SIO_MDATA, sio); + tlan_set_bit(TLAN_NET_SIO_MCLK, sio); + (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio); + } + +} + + + + +/*************************************************************** + * TLan_MiiSync + * + * Returns: + * Nothing + * Parms: + * base_port The base IO port of the adapter in + * question. + * + * This functions syncs all PHYs in terms of the MII configuration + * bus. + * + **************************************************************/ + +static void tlan_mii_sync(u16 base_port) +{ + int i; + u16 sio; + + outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR); + sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO; + + tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio); + for (i = 0; i < 32; i++) { + tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); + tlan_set_bit(TLAN_NET_SIO_MCLK, sio); + } + +} + + + + +/*************************************************************** + * tlan_mii_write_reg + * + * Returns: + * Nothing + * Parms: + * dev The device structure for the device + * to write to. + * phy The address of the PHY to be written to. + * reg The register whose contents are to be + * written. + * val The value to be written to the register. + * + * This function uses the TLAN's MII bus to write the contents of a + * given register on a PHY. It sends the appropriate info and then + * writes the 16-bit register value from the MII configuration bus + * via the TLAN SIO register. + * + **************************************************************/ + +static void +tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val) +{ + u16 sio; + int minten; + unsigned long flags = 0; + struct tlan_priv *priv = netdev_priv(dev); + + outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); + sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; + + if (!in_irq()) + spin_lock_irqsave(&priv->lock, flags); + + tlan_mii_sync(dev->base_addr); + + minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio); + if (minten) + tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio); + + tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */ + tlan_mii_send_data(dev->base_addr, 0x1, 2); /* write (01b) */ + tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */ + tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */ + + tlan_mii_send_data(dev->base_addr, 0x2, 2); /* send ACK */ + tlan_mii_send_data(dev->base_addr, val, 16); /* send data */ + + tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */ + tlan_set_bit(TLAN_NET_SIO_MCLK, sio); + + if (minten) + tlan_set_bit(TLAN_NET_SIO_MINTEN, sio); + + if (!in_irq()) + spin_unlock_irqrestore(&priv->lock, flags); + +} + + + + +/***************************************************************************** +****************************************************************************** + +ThunderLAN driver eeprom routines + +the Compaq netelligent 10 and 10/100 cards use a microchip 24C02A +EEPROM. these functions are based on information in microchip's +data sheet. I don't know how well this functions will work with +other Eeproms. + +****************************************************************************** +*****************************************************************************/ + + +/*************************************************************** + * tlan_ee_send_start + * + * Returns: + * Nothing + * Parms: + * io_base The IO port base address for the + * TLAN device with the EEPROM to + * use. + * + * This function sends a start cycle to an EEPROM attached + * to a TLAN chip. + * + **************************************************************/ + +static void tlan_ee_send_start(u16 io_base) +{ + u16 sio; + + outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR); + sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; + + tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); + tlan_set_bit(TLAN_NET_SIO_EDATA, sio); + tlan_set_bit(TLAN_NET_SIO_ETXEN, sio); + tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); + tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); + +} + + + + +/*************************************************************** + * tlan_ee_send_byte + * + * Returns: + * If the correct ack was received, 0, otherwise 1 + * Parms: io_base The IO port base address for the + * TLAN device with the EEPROM to + * use. + * data The 8 bits of information to + * send to the EEPROM. + * stop If TLAN_EEPROM_STOP is passed, a + * stop cycle is sent after the + * byte is sent after the ack is + * read. + * + * This function sends a byte on the serial EEPROM line, + * driving the clock to send each bit. The function then + * reverses transmission direction and reads an acknowledge + * bit. + * + **************************************************************/ + +static int tlan_ee_send_byte(u16 io_base, u8 data, int stop) +{ + int err; + u8 place; + u16 sio; + + outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR); + sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; + + /* Assume clock is low, tx is enabled; */ + for (place = 0x80; place != 0; place >>= 1) { + if (place & data) + tlan_set_bit(TLAN_NET_SIO_EDATA, sio); + else + tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); + tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); + tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); + } + tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio); + tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); + err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio); + tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); + tlan_set_bit(TLAN_NET_SIO_ETXEN, sio); + + if ((!err) && stop) { + /* STOP, raise data while clock is high */ + tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); + tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); + tlan_set_bit(TLAN_NET_SIO_EDATA, sio); + } + + return err; + +} + + + + +/*************************************************************** + * tlan_ee_receive_byte + * + * Returns: + * Nothing + * Parms: + * io_base The IO port base address for the + * TLAN device with the EEPROM to + * use. + * data An address to a char to hold the + * data sent from the EEPROM. + * stop If TLAN_EEPROM_STOP is passed, a + * stop cycle is sent after the + * byte is received, and no ack is + * sent. + * + * This function receives 8 bits of data from the EEPROM + * over the serial link. It then sends and ack bit, or no + * ack and a stop bit. This function is used to retrieve + * data after the address of a byte in the EEPROM has been + * sent. + * + **************************************************************/ + +static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop) +{ + u8 place; + u16 sio; + + outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR); + sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; + *data = 0; + + /* Assume clock is low, tx is enabled; */ + tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio); + for (place = 0x80; place; place >>= 1) { + tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); + if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio)) + *data |= place; + tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); + } + + tlan_set_bit(TLAN_NET_SIO_ETXEN, sio); + if (!stop) { + tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); /* ack = 0 */ + tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); + tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); + } else { + tlan_set_bit(TLAN_NET_SIO_EDATA, sio); /* no ack = 1 (?) */ + tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); + tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); + /* STOP, raise data while clock is high */ + tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); + tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); + tlan_set_bit(TLAN_NET_SIO_EDATA, sio); + } + +} + + + + +/*************************************************************** + * tlan_ee_read_byte + * + * Returns: + * No error = 0, else, the stage at which the error + * occurred. + * Parms: + * io_base The IO port base address for the + * TLAN device with the EEPROM to + * use. + * ee_addr The address of the byte in the + * EEPROM whose contents are to be + * retrieved. + * data An address to a char to hold the + * data obtained from the EEPROM. + * + * This function reads a byte of information from an byte + * cell in the EEPROM. + * + **************************************************************/ + +static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data) +{ + int err; + struct tlan_priv *priv = netdev_priv(dev); + unsigned long flags = 0; + int ret = 0; + + spin_lock_irqsave(&priv->lock, flags); + + tlan_ee_send_start(dev->base_addr); + err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK); + if (err) { + ret = 1; + goto fail; + } + err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK); + if (err) { + ret = 2; + goto fail; + } + tlan_ee_send_start(dev->base_addr); + err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK); + if (err) { + ret = 3; + goto fail; + } + tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP); +fail: + spin_unlock_irqrestore(&priv->lock, flags); + + return ret; + +} + + + diff --git a/drivers/net/ethernet/ti/tlan.h b/drivers/net/ethernet/ti/tlan.h new file mode 100644 index 000000000..e99284118 --- /dev/null +++ b/drivers/net/ethernet/ti/tlan.h @@ -0,0 +1,544 @@ +#ifndef TLAN_H +#define TLAN_H +/******************************************************************** + * + * Linux ThunderLAN Driver + * + * tlan.h + * by James Banks + * + * (C) 1997-1998 Caldera, Inc. + * (C) 1999-2001 Torben Mathiasen + * + * This software may be used and distributed according to the terms + * of the GNU General Public License, incorporated herein by reference. + * + * + * Dec 10, 1999 Torben Mathiasen + * New Maintainer + * + ********************************************************************/ + + +#include +#include +#include + + + + /***************************************************************** + * TLan Definitions + * + ****************************************************************/ + +#define TLAN_MIN_FRAME_SIZE 64 +#define TLAN_MAX_FRAME_SIZE 1600 + +#define TLAN_NUM_RX_LISTS 32 +#define TLAN_NUM_TX_LISTS 64 + +#define TLAN_IGNORE 0 +#define TLAN_RECORD 1 + +#define TLAN_DBG(lvl, format, args...) \ + do { \ + if (debug&lvl) \ + printk(KERN_DEBUG "TLAN: " format, ##args); \ + } while (0) + +#define TLAN_DEBUG_GNRL 0x0001 +#define TLAN_DEBUG_TX 0x0002 +#define TLAN_DEBUG_RX 0x0004 +#define TLAN_DEBUG_LIST 0x0008 +#define TLAN_DEBUG_PROBE 0x0010 + +#define TX_TIMEOUT (10*HZ) /* We need time for auto-neg */ +#define MAX_TLAN_BOARDS 8 /* Max number of boards installed + at a time */ + + + /***************************************************************** + * Device Identification Definitions + * + ****************************************************************/ + +#define PCI_DEVICE_ID_NETELLIGENT_10_T2 0xB012 +#define PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100 0xB030 +#ifndef PCI_DEVICE_ID_OLICOM_OC2183 +#define PCI_DEVICE_ID_OLICOM_OC2183 0x0013 +#endif +#ifndef PCI_DEVICE_ID_OLICOM_OC2325 +#define PCI_DEVICE_ID_OLICOM_OC2325 0x0012 +#endif +#ifndef PCI_DEVICE_ID_OLICOM_OC2326 +#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014 +#endif + +struct tlan_adapter_entry { + u16 vendor_id; + u16 device_id; + char *device_label; + u32 flags; + u16 addr_ofs; +}; + +#define TLAN_ADAPTER_NONE 0x00000000 +#define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001 +#define TLAN_ADAPTER_BIT_RATE_PHY 0x00000002 +#define TLAN_ADAPTER_USE_INTERN_10 0x00000004 +#define TLAN_ADAPTER_ACTIVITY_LED 0x00000008 + +#define TLAN_SPEED_DEFAULT 0 +#define TLAN_SPEED_10 10 +#define TLAN_SPEED_100 100 + +#define TLAN_DUPLEX_DEFAULT 0 +#define TLAN_DUPLEX_HALF 1 +#define TLAN_DUPLEX_FULL 2 + + + + /***************************************************************** + * EISA Definitions + * + ****************************************************************/ + +#define EISA_ID 0xc80 /* EISA ID Registers */ +#define EISA_ID0 0xc80 /* EISA ID Register 0 */ +#define EISA_ID1 0xc81 /* EISA ID Register 1 */ +#define EISA_ID2 0xc82 /* EISA ID Register 2 */ +#define EISA_ID3 0xc83 /* EISA ID Register 3 */ +#define EISA_CR 0xc84 /* EISA Control Register */ +#define EISA_REG0 0xc88 /* EISA Configuration Register 0 */ +#define EISA_REG1 0xc89 /* EISA Configuration Register 1 */ +#define EISA_REG2 0xc8a /* EISA Configuration Register 2 */ +#define EISA_REG3 0xc8f /* EISA Configuration Register 3 */ +#define EISA_APROM 0xc90 /* Ethernet Address PROM */ + + + + /***************************************************************** + * Rx/Tx List Definitions + * + ****************************************************************/ + +#define TLAN_BUFFERS_PER_LIST 10 +#define TLAN_LAST_BUFFER 0x80000000 +#define TLAN_CSTAT_UNUSED 0x8000 +#define TLAN_CSTAT_FRM_CMP 0x4000 +#define TLAN_CSTAT_READY 0x3000 +#define TLAN_CSTAT_EOC 0x0800 +#define TLAN_CSTAT_RX_ERROR 0x0400 +#define TLAN_CSTAT_PASS_CRC 0x0200 +#define TLAN_CSTAT_DP_PR 0x0100 + + +struct tlan_buffer { + u32 count; + u32 address; +}; + + +struct tlan_list { + u32 forward; + u16 c_stat; + u16 frame_size; + struct tlan_buffer buffer[TLAN_BUFFERS_PER_LIST]; +}; + + +typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE]; + + + + + /***************************************************************** + * PHY definitions + * + ****************************************************************/ + +#define TLAN_PHY_MAX_ADDR 0x1F +#define TLAN_PHY_NONE 0x20 + + + + + /***************************************************************** + * TLAN Private Information Structure + * + ****************************************************************/ + +struct tlan_priv { + struct net_device *next_device; + struct pci_dev *pci_dev; + struct net_device *dev; + void *dma_storage; + dma_addr_t dma_storage_dma; + unsigned int dma_size; + u8 *pad_buffer; + struct tlan_list *rx_list; + dma_addr_t rx_list_dma; + u8 *rx_buffer; + dma_addr_t rx_buffer_dma; + u32 rx_head; + u32 rx_tail; + u32 rx_eoc_count; + struct tlan_list *tx_list; + dma_addr_t tx_list_dma; + u8 *tx_buffer; + dma_addr_t tx_buffer_dma; + u32 tx_head; + u32 tx_in_progress; + u32 tx_tail; + u32 tx_busy_count; + u32 phy_online; + u32 timer_set_at; + u32 timer_type; + struct timer_list timer; + struct timer_list media_timer; + struct board *adapter; + u32 adapter_rev; + u32 aui; + u32 debug; + u32 duplex; + u32 phy[2]; + u32 phy_num; + u32 speed; + u8 tlan_rev; + u8 tlan_full_duplex; + spinlock_t lock; + struct work_struct tlan_tqueue; +}; + + + + + /***************************************************************** + * TLan Driver Timer Definitions + * + ****************************************************************/ + +#define TLAN_TIMER_ACTIVITY 2 +#define TLAN_TIMER_PHY_PDOWN 3 +#define TLAN_TIMER_PHY_PUP 4 +#define TLAN_TIMER_PHY_RESET 5 +#define TLAN_TIMER_PHY_START_LINK 6 +#define TLAN_TIMER_PHY_FINISH_AN 7 +#define TLAN_TIMER_FINISH_RESET 8 + +#define TLAN_TIMER_ACT_DELAY (HZ/10) + + + + + /***************************************************************** + * TLan Driver Eeprom Definitions + * + ****************************************************************/ + +#define TLAN_EEPROM_ACK 0 +#define TLAN_EEPROM_STOP 1 + +#define TLAN_EEPROM_SIZE 256 + + + + /***************************************************************** + * Host Register Offsets and Contents + * + ****************************************************************/ + +#define TLAN_HOST_CMD 0x00 +#define TLAN_HC_GO 0x80000000 +#define TLAN_HC_STOP 0x40000000 +#define TLAN_HC_ACK 0x20000000 +#define TLAN_HC_CS_MASK 0x1FE00000 +#define TLAN_HC_EOC 0x00100000 +#define TLAN_HC_RT 0x00080000 +#define TLAN_HC_NES 0x00040000 +#define TLAN_HC_AD_RST 0x00008000 +#define TLAN_HC_LD_TMR 0x00004000 +#define TLAN_HC_LD_THR 0x00002000 +#define TLAN_HC_REQ_INT 0x00001000 +#define TLAN_HC_INT_OFF 0x00000800 +#define TLAN_HC_INT_ON 0x00000400 +#define TLAN_HC_AC_MASK 0x000000FF +#define TLAN_CH_PARM 0x04 +#define TLAN_DIO_ADR 0x08 +#define TLAN_DA_ADR_INC 0x8000 +#define TLAN_DA_RAM_ADR 0x4000 +#define TLAN_HOST_INT 0x0A +#define TLAN_HI_IV_MASK 0x1FE0 +#define TLAN_HI_IT_MASK 0x001C +#define TLAN_DIO_DATA 0x0C + + +/* ThunderLAN Internal Register DIO Offsets */ + +#define TLAN_NET_CMD 0x00 +#define TLAN_NET_CMD_NRESET 0x80 +#define TLAN_NET_CMD_NWRAP 0x40 +#define TLAN_NET_CMD_CSF 0x20 +#define TLAN_NET_CMD_CAF 0x10 +#define TLAN_NET_CMD_NOBRX 0x08 +#define TLAN_NET_CMD_DUPLEX 0x04 +#define TLAN_NET_CMD_TRFRAM 0x02 +#define TLAN_NET_CMD_TXPACE 0x01 +#define TLAN_NET_SIO 0x01 +#define TLAN_NET_SIO_MINTEN 0x80 +#define TLAN_NET_SIO_ECLOK 0x40 +#define TLAN_NET_SIO_ETXEN 0x20 +#define TLAN_NET_SIO_EDATA 0x10 +#define TLAN_NET_SIO_NMRST 0x08 +#define TLAN_NET_SIO_MCLK 0x04 +#define TLAN_NET_SIO_MTXEN 0x02 +#define TLAN_NET_SIO_MDATA 0x01 +#define TLAN_NET_STS 0x02 +#define TLAN_NET_STS_MIRQ 0x80 +#define TLAN_NET_STS_HBEAT 0x40 +#define TLAN_NET_STS_TXSTOP 0x20 +#define TLAN_NET_STS_RXSTOP 0x10 +#define TLAN_NET_STS_RSRVD 0x0F +#define TLAN_NET_MASK 0x03 +#define TLAN_NET_MASK_MASK7 0x80 +#define TLAN_NET_MASK_MASK6 0x40 +#define TLAN_NET_MASK_MASK5 0x20 +#define TLAN_NET_MASK_MASK4 0x10 +#define TLAN_NET_MASK_RSRVD 0x0F +#define TLAN_NET_CONFIG 0x04 +#define TLAN_NET_CFG_RCLK 0x8000 +#define TLAN_NET_CFG_TCLK 0x4000 +#define TLAN_NET_CFG_BIT 0x2000 +#define TLAN_NET_CFG_RXCRC 0x1000 +#define TLAN_NET_CFG_PEF 0x0800 +#define TLAN_NET_CFG_1FRAG 0x0400 +#define TLAN_NET_CFG_1CHAN 0x0200 +#define TLAN_NET_CFG_MTEST 0x0100 +#define TLAN_NET_CFG_PHY_EN 0x0080 +#define TLAN_NET_CFG_MSMASK 0x007F +#define TLAN_MAN_TEST 0x06 +#define TLAN_DEF_VENDOR_ID 0x08 +#define TLAN_DEF_DEVICE_ID 0x0A +#define TLAN_DEF_REVISION 0x0C +#define TLAN_DEF_SUBCLASS 0x0D +#define TLAN_DEF_MIN_LAT 0x0E +#define TLAN_DEF_MAX_LAT 0x0F +#define TLAN_AREG_0 0x10 +#define TLAN_AREG_1 0x16 +#define TLAN_AREG_2 0x1C +#define TLAN_AREG_3 0x22 +#define TLAN_HASH_1 0x28 +#define TLAN_HASH_2 0x2C +#define TLAN_GOOD_TX_FRMS 0x30 +#define TLAN_TX_UNDERUNS 0x33 +#define TLAN_GOOD_RX_FRMS 0x34 +#define TLAN_RX_OVERRUNS 0x37 +#define TLAN_DEFERRED_TX 0x38 +#define TLAN_CRC_ERRORS 0x3A +#define TLAN_CODE_ERRORS 0x3B +#define TLAN_MULTICOL_FRMS 0x3C +#define TLAN_SINGLECOL_FRMS 0x3E +#define TLAN_EXCESSCOL_FRMS 0x40 +#define TLAN_LATE_COLS 0x41 +#define TLAN_CARRIER_LOSS 0x42 +#define TLAN_ACOMMIT 0x43 +#define TLAN_LED_REG 0x44 +#define TLAN_LED_ACT 0x10 +#define TLAN_LED_LINK 0x01 +#define TLAN_BSIZE_REG 0x45 +#define TLAN_MAX_RX 0x46 +#define TLAN_INT_DIS 0x48 +#define TLAN_ID_TX_EOC 0x04 +#define TLAN_ID_RX_EOF 0x02 +#define TLAN_ID_RX_EOC 0x01 + + + +/* ThunderLAN Interrupt Codes */ + +#define TLAN_INT_NUMBER_OF_INTS 8 + +#define TLAN_INT_NONE 0x0000 +#define TLAN_INT_TX_EOF 0x0001 +#define TLAN_INT_STAT_OVERFLOW 0x0002 +#define TLAN_INT_RX_EOF 0x0003 +#define TLAN_INT_DUMMY 0x0004 +#define TLAN_INT_TX_EOC 0x0005 +#define TLAN_INT_STATUS_CHECK 0x0006 +#define TLAN_INT_RX_EOC 0x0007 + + + +/* ThunderLAN MII Registers */ + +/* Generic MII/PHY Registers */ + +#define MII_GEN_CTL 0x00 +#define MII_GC_RESET 0x8000 +#define MII_GC_LOOPBK 0x4000 +#define MII_GC_SPEEDSEL 0x2000 +#define MII_GC_AUTOENB 0x1000 +#define MII_GC_PDOWN 0x0800 +#define MII_GC_ISOLATE 0x0400 +#define MII_GC_AUTORSRT 0x0200 +#define MII_GC_DUPLEX 0x0100 +#define MII_GC_COLTEST 0x0080 +#define MII_GC_RESERVED 0x007F +#define MII_GEN_STS 0x01 +#define MII_GS_100BT4 0x8000 +#define MII_GS_100BTXFD 0x4000 +#define MII_GS_100BTXHD 0x2000 +#define MII_GS_10BTFD 0x1000 +#define MII_GS_10BTHD 0x0800 +#define MII_GS_RESERVED 0x07C0 +#define MII_GS_AUTOCMPLT 0x0020 +#define MII_GS_RFLT 0x0010 +#define MII_GS_AUTONEG 0x0008 +#define MII_GS_LINK 0x0004 +#define MII_GS_JABBER 0x0002 +#define MII_GS_EXTCAP 0x0001 +#define MII_GEN_ID_HI 0x02 +#define MII_GEN_ID_LO 0x03 +#define MII_GIL_OUI 0xFC00 +#define MII_GIL_MODEL 0x03F0 +#define MII_GIL_REVISION 0x000F +#define MII_AN_ADV 0x04 +#define MII_AN_LPA 0x05 +#define MII_AN_EXP 0x06 + +/* ThunderLAN Specific MII/PHY Registers */ + +#define TLAN_TLPHY_ID 0x10 +#define TLAN_TLPHY_CTL 0x11 +#define TLAN_TC_IGLINK 0x8000 +#define TLAN_TC_SWAPOL 0x4000 +#define TLAN_TC_AUISEL 0x2000 +#define TLAN_TC_SQEEN 0x1000 +#define TLAN_TC_MTEST 0x0800 +#define TLAN_TC_RESERVED 0x07F8 +#define TLAN_TC_NFEW 0x0004 +#define TLAN_TC_INTEN 0x0002 +#define TLAN_TC_TINT 0x0001 +#define TLAN_TLPHY_STS 0x12 +#define TLAN_TS_MINT 0x8000 +#define TLAN_TS_PHOK 0x4000 +#define TLAN_TS_POLOK 0x2000 +#define TLAN_TS_TPENERGY 0x1000 +#define TLAN_TS_RESERVED 0x0FFF +#define TLAN_TLPHY_PAR 0x19 +#define TLAN_PHY_CIM_STAT 0x0020 +#define TLAN_PHY_SPEED_100 0x0040 +#define TLAN_PHY_DUPLEX_FULL 0x0080 +#define TLAN_PHY_AN_EN_STAT 0x0400 + +/* National Sem. & Level1 PHY id's */ +#define NAT_SEM_ID1 0x2000 +#define NAT_SEM_ID2 0x5C01 +#define LEVEL1_ID1 0x7810 +#define LEVEL1_ID2 0x0000 + +#define CIRC_INC(a, b) if (++a >= b) a = 0 + +/* Routines to access internal registers. */ + +static inline u8 tlan_dio_read8(u16 base_addr, u16 internal_addr) +{ + outw(internal_addr, base_addr + TLAN_DIO_ADR); + return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3)); + +} + + + + +static inline u16 tlan_dio_read16(u16 base_addr, u16 internal_addr) +{ + outw(internal_addr, base_addr + TLAN_DIO_ADR); + return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2)); + +} + + + + +static inline u32 tlan_dio_read32(u16 base_addr, u16 internal_addr) +{ + outw(internal_addr, base_addr + TLAN_DIO_ADR); + return inl(base_addr + TLAN_DIO_DATA); + +} + + + + +static inline void tlan_dio_write8(u16 base_addr, u16 internal_addr, u8 data) +{ + outw(internal_addr, base_addr + TLAN_DIO_ADR); + outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3)); + +} + + + + +static inline void tlan_dio_write16(u16 base_addr, u16 internal_addr, u16 data) +{ + outw(internal_addr, base_addr + TLAN_DIO_ADR); + outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2)); + +} + + + + +static inline void tlan_dio_write32(u16 base_addr, u16 internal_addr, u32 data) +{ + outw(internal_addr, base_addr + TLAN_DIO_ADR); + outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2)); + +} + +#define tlan_clear_bit(bit, port) outb_p(inb_p(port) & ~bit, port) +#define tlan_get_bit(bit, port) ((int) (inb_p(port) & bit)) +#define tlan_set_bit(bit, port) outb_p(inb_p(port) | bit, port) + +/* + * given 6 bytes, view them as 8 6-bit numbers and return the XOR of those + * the code below is about seven times as fast as the original code + * + * The original code was: + * + * u32 xor(u32 a, u32 b) { return ((a && !b ) || (! a && b )); } + * + * #define XOR8(a, b, c, d, e, f, g, h) \ + * xor(a, xor(b, xor(c, xor(d, xor(e, xor(f, xor(g, h)) ) ) ) ) ) + * #define DA(a, bit) (( (u8) a[bit/8] ) & ( (u8) (1 << bit%8)) ) + * + * hash = XOR8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), + * DA(a,30), DA(a,36), DA(a,42)); + * hash |= XOR8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), + * DA(a,31), DA(a,37), DA(a,43)) << 1; + * hash |= XOR8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), + * DA(a,32), DA(a,38), DA(a,44)) << 2; + * hash |= XOR8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), + * DA(a,33), DA(a,39), DA(a,45)) << 3; + * hash |= XOR8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), + * DA(a,34), DA(a,40), DA(a,46)) << 4; + * hash |= XOR8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), + * DA(a,35), DA(a,41), DA(a,47)) << 5; + * + */ +static inline u32 tlan_hash_func(const u8 *a) +{ + u8 hash; + + hash = (a[0]^a[3]); /* & 077 */ + hash ^= ((a[0]^a[3])>>6); /* & 003 */ + hash ^= ((a[1]^a[4])<<2); /* & 074 */ + hash ^= ((a[1]^a[4])>>4); /* & 017 */ + hash ^= ((a[2]^a[5])<<4); /* & 060 */ + hash ^= ((a[2]^a[5])>>2); /* & 077 */ + + return hash & 077; +} +#endif diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig new file mode 100644 index 000000000..f59a6c265 --- /dev/null +++ b/drivers/net/ethernet/tile/Kconfig @@ -0,0 +1,18 @@ +# +# Tilera network device configuration +# + +config TILE_NET + tristate "Tilera GBE/XGBE network driver support" + depends on TILE + default y + select CRC32 + select TILE_GXIO_MPIPE if TILEGX + select HIGH_RES_TIMERS if TILEGX + select PTP_1588_CLOCK if TILEGX + ---help--- + This is a standard Linux network device driver for the + on-chip Tilera Gigabit Ethernet and XAUI interfaces. + + To compile this driver as a module, choose M here: the module + will be called tile_net. diff --git a/drivers/net/ethernet/tile/Makefile b/drivers/net/ethernet/tile/Makefile new file mode 100644 index 000000000..0ef9eefd3 --- /dev/null +++ b/drivers/net/ethernet/tile/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the TILE on-chip networking support. +# + +obj-$(CONFIG_TILE_NET) += tile_net.o +ifdef CONFIG_TILEGX +tile_net-y := tilegx.o +else +tile_net-y := tilepro.o +endif diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c new file mode 100644 index 000000000..a3f761000 --- /dev/null +++ b/drivers/net/ethernet/tile/tilegx.c @@ -0,0 +1,2281 @@ +/* + * Copyright 2012 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include /* printk() */ +#include /* kmalloc() */ +#include /* error codes */ +#include /* size_t */ +#include +#include +#include +#include /* struct device, and other headers */ +#include /* eth_type_trans */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* Default transmit lockup timeout period, in jiffies. */ +#define TILE_NET_TIMEOUT (5 * HZ) + +/* The maximum number of distinct channels (idesc.channel is 5 bits). */ +#define TILE_NET_CHANNELS 32 + +/* Maximum number of idescs to handle per "poll". */ +#define TILE_NET_BATCH 128 + +/* Maximum number of packets to handle per "poll". */ +#define TILE_NET_WEIGHT 64 + +/* Number of entries in each iqueue. */ +#define IQUEUE_ENTRIES 512 + +/* Number of entries in each equeue. */ +#define EQUEUE_ENTRIES 2048 + +/* Total header bytes per equeue slot. Must be big enough for 2 bytes + * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to + * 60 bytes of actual TCP header. We round up to align to cache lines. + */ +#define HEADER_BYTES 128 + +/* Maximum completions per cpu per device (must be a power of two). + * ISSUE: What is the right number here? If this is too small, then + * egress might block waiting for free space in a completions array. + * ISSUE: At the least, allocate these only for initialized echannels. + */ +#define TILE_NET_MAX_COMPS 64 + +#define MAX_FRAGS (MAX_SKB_FRAGS + 1) + +/* The "kinds" of buffer stacks (small/large/jumbo). */ +#define MAX_KINDS 3 + +/* Size of completions data to allocate. + * ISSUE: Probably more than needed since we don't use all the channels. + */ +#define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps)) + +/* Size of NotifRing data to allocate. */ +#define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t)) + +/* Timeout to wake the per-device TX timer after we stop the queue. + * We don't want the timeout too short (adds overhead, and might end + * up causing stop/wake/stop/wake cycles) or too long (affects performance). + * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets. + */ +#define TX_TIMER_DELAY_USEC 30 + +/* Timeout to wake the per-cpu egress timer to free completions. */ +#define EGRESS_TIMER_DELAY_USEC 1000 + +MODULE_AUTHOR("Tilera Corporation"); +MODULE_LICENSE("GPL"); + +/* A "packet fragment" (a chunk of memory). */ +struct frag { + void *buf; + size_t length; +}; + +/* A single completion. */ +struct tile_net_comp { + /* The "complete_count" when the completion will be complete. */ + s64 when; + /* The buffer to be freed when the completion is complete. */ + struct sk_buff *skb; +}; + +/* The completions for a given cpu and echannel. */ +struct tile_net_comps { + /* The completions. */ + struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS]; + /* The number of completions used. */ + unsigned long comp_next; + /* The number of completions freed. */ + unsigned long comp_last; +}; + +/* The transmit wake timer for a given cpu and echannel. */ +struct tile_net_tx_wake { + int tx_queue_idx; + struct hrtimer timer; + struct net_device *dev; +}; + +/* Info for a specific cpu. */ +struct tile_net_info { + /* Our cpu. */ + int my_cpu; + /* A timer for handling egress completions. */ + struct hrtimer egress_timer; + /* True if "egress_timer" is scheduled. */ + bool egress_timer_scheduled; + struct info_mpipe { + /* Packet queue. */ + gxio_mpipe_iqueue_t iqueue; + /* The NAPI struct. */ + struct napi_struct napi; + /* Number of buffers (by kind) which must still be provided. */ + unsigned int num_needed_buffers[MAX_KINDS]; + /* instance id. */ + int instance; + /* True if iqueue is valid. */ + bool has_iqueue; + /* NAPI flags. */ + bool napi_added; + bool napi_enabled; + /* Comps for each egress channel. */ + struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS]; + /* Transmit wake timer for each egress channel. */ + struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS]; + } mpipe[NR_MPIPE_MAX]; +}; + +/* Info for egress on a particular egress channel. */ +struct tile_net_egress { + /* The "equeue". */ + gxio_mpipe_equeue_t *equeue; + /* The headers for TSO. */ + unsigned char *headers; +}; + +/* Info for a specific device. */ +struct tile_net_priv { + /* Our network device. */ + struct net_device *dev; + /* The primary link. */ + gxio_mpipe_link_t link; + /* The primary channel, if open, else -1. */ + int channel; + /* The "loopify" egress link, if needed. */ + gxio_mpipe_link_t loopify_link; + /* The "loopify" egress channel, if open, else -1. */ + int loopify_channel; + /* The egress channel (channel or loopify_channel). */ + int echannel; + /* mPIPE instance, 0 or 1. */ + int instance; + /* The timestamp config. */ + struct hwtstamp_config stamp_cfg; +}; + +static struct mpipe_data { + /* The ingress irq. */ + int ingress_irq; + + /* The "context" for all devices. */ + gxio_mpipe_context_t context; + + /* Egress info, indexed by "priv->echannel" + * (lazily created as needed). + */ + struct tile_net_egress + egress_for_echannel[TILE_NET_CHANNELS]; + + /* Devices currently associated with each channel. + * NOTE: The array entry can become NULL after ifconfig down, but + * we do not free the underlying net_device structures, so it is + * safe to use a pointer after reading it from this array. + */ + struct net_device + *tile_net_devs_for_channel[TILE_NET_CHANNELS]; + + /* The actual memory allocated for the buffer stacks. */ + void *buffer_stack_vas[MAX_KINDS]; + + /* The amount of memory allocated for each buffer stack. */ + size_t buffer_stack_bytes[MAX_KINDS]; + + /* The first buffer stack index + * (small = +0, large = +1, jumbo = +2). + */ + int first_buffer_stack; + + /* The buckets. */ + int first_bucket; + int num_buckets; + + /* PTP-specific data. */ + struct ptp_clock *ptp_clock; + struct ptp_clock_info caps; + + /* Lock for ptp accessors. */ + struct mutex ptp_lock; + +} mpipe_data[NR_MPIPE_MAX] = { + [0 ... (NR_MPIPE_MAX - 1)] { + .ingress_irq = -1, + .first_buffer_stack = -1, + .first_bucket = -1, + .num_buckets = 1 + } +}; + +/* A mutex for "tile_net_devs_for_channel". */ +static DEFINE_MUTEX(tile_net_devs_for_channel_mutex); + +/* The per-cpu info. */ +static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info); + + +/* The buffer size enums for each buffer stack. + * See arch/tile/include/gxio/mpipe.h for the set of possible values. + * We avoid the "10384" size because it can induce "false chaining" + * on "cut-through" jumbo packets. + */ +static gxio_mpipe_buffer_size_enum_t buffer_size_enums[MAX_KINDS] = { + GXIO_MPIPE_BUFFER_SIZE_128, + GXIO_MPIPE_BUFFER_SIZE_1664, + GXIO_MPIPE_BUFFER_SIZE_16384 +}; + +/* Text value of tile_net.cpus if passed as a module parameter. */ +static char *network_cpus_string; + +/* The actual cpus in "network_cpus". */ +static struct cpumask network_cpus_map; + +/* If "tile_net.loopify=LINK" was specified, this is "LINK". */ +static char *loopify_link_name; + +/* If "tile_net.custom" was specified, this is true. */ +static bool custom_flag; + +/* If "tile_net.jumbo=NUM" was specified, this is "NUM". */ +static uint jumbo_num; + +/* Obtain mpipe instance from struct tile_net_priv given struct net_device. */ +static inline int mpipe_instance(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + return priv->instance; +} + +/* The "tile_net.cpus" argument specifies the cpus that are dedicated + * to handle ingress packets. + * + * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where + * m, n, x, y are integer numbers that represent the cpus that can be + * neither a dedicated cpu nor a dataplane cpu. + */ +static bool network_cpus_init(void) +{ + int rc; + + if (network_cpus_string == NULL) + return false; + + rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map); + if (rc != 0) { + pr_warn("tile_net.cpus=%s: malformed cpu list\n", + network_cpus_string); + return false; + } + + /* Remove dedicated cpus. */ + cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask); + + if (cpumask_empty(&network_cpus_map)) { + pr_warn("Ignoring empty tile_net.cpus='%s'.\n", + network_cpus_string); + return false; + } + + pr_info("Linux network CPUs: %*pbl\n", + cpumask_pr_args(&network_cpus_map)); + return true; +} + +module_param_named(cpus, network_cpus_string, charp, 0444); +MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts"); + +/* The "tile_net.loopify=LINK" argument causes the named device to + * actually use "loop0" for ingress, and "loop1" for egress. This + * allows an app to sit between the actual link and linux, passing + * (some) packets along to linux, and forwarding (some) packets sent + * out by linux. + */ +module_param_named(loopify, loopify_link_name, charp, 0444); +MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress"); + +/* The "tile_net.custom" argument causes us to ignore the "conventional" + * classifier metadata, in particular, the "l2_offset". + */ +module_param_named(custom, custom_flag, bool, 0444); +MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier"); + +/* The "tile_net.jumbo" argument causes us to support "jumbo" packets, + * and to allocate the given number of "jumbo" buffers. + */ +module_param_named(jumbo, jumbo_num, uint, 0444); +MODULE_PARM_DESC(jumbo, "the number of buffers to support jumbo packets"); + +/* Atomically update a statistics field. + * Note that on TILE-Gx, this operation is fire-and-forget on the + * issuing core (single-cycle dispatch) and takes only a few cycles + * longer than a regular store when the request reaches the home cache. + * No expensive bus management overhead is required. + */ +static void tile_net_stats_add(unsigned long value, unsigned long *field) +{ + BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long)); + atomic_long_add(value, (atomic_long_t *)field); +} + +/* Allocate and push a buffer. */ +static bool tile_net_provide_buffer(int instance, int kind) +{ + struct mpipe_data *md = &mpipe_data[instance]; + gxio_mpipe_buffer_size_enum_t bse = buffer_size_enums[kind]; + size_t bs = gxio_mpipe_buffer_size_enum_to_buffer_size(bse); + const unsigned long buffer_alignment = 128; + struct sk_buff *skb; + int len; + + len = sizeof(struct sk_buff **) + buffer_alignment + bs; + skb = dev_alloc_skb(len); + if (skb == NULL) + return false; + + /* Make room for a back-pointer to 'skb' and guarantee alignment. */ + skb_reserve(skb, sizeof(struct sk_buff **)); + skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1)); + + /* Save a back-pointer to 'skb'. */ + *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb; + + /* Make sure "skb" and the back-pointer have been flushed. */ + wmb(); + + gxio_mpipe_push_buffer(&md->context, md->first_buffer_stack + kind, + (void *)va_to_tile_io_addr(skb->data)); + + return true; +} + +/* Convert a raw mpipe buffer to its matching skb pointer. */ +static struct sk_buff *mpipe_buf_to_skb(void *va) +{ + /* Acquire the associated "skb". */ + struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); + struct sk_buff *skb = *skb_ptr; + + /* Paranoia. */ + if (skb->data != va) { + /* Panic here since there's a reasonable chance + * that corrupt buffers means generic memory + * corruption, with unpredictable system effects. + */ + panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p", + va, skb, skb->data); + } + + return skb; +} + +static void tile_net_pop_all_buffers(int instance, int stack) +{ + struct mpipe_data *md = &mpipe_data[instance]; + + for (;;) { + tile_io_addr_t addr = + (tile_io_addr_t)gxio_mpipe_pop_buffer(&md->context, + stack); + if (addr == 0) + break; + dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr))); + } +} + +/* Provide linux buffers to mPIPE. */ +static void tile_net_provide_needed_buffers(void) +{ + struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); + int instance, kind; + for (instance = 0; instance < NR_MPIPE_MAX && + info->mpipe[instance].has_iqueue; instance++) { + for (kind = 0; kind < MAX_KINDS; kind++) { + while (info->mpipe[instance].num_needed_buffers[kind] + != 0) { + if (!tile_net_provide_buffer(instance, kind)) { + pr_notice("Tile %d still needs" + " some buffers\n", + info->my_cpu); + return; + } + info->mpipe[instance]. + num_needed_buffers[kind]--; + } + } + } +} + +/* Get RX timestamp, and store it in the skb. */ +static void tile_rx_timestamp(struct tile_net_priv *priv, struct sk_buff *skb, + gxio_mpipe_idesc_t *idesc) +{ + if (unlikely(priv->stamp_cfg.rx_filter != HWTSTAMP_FILTER_NONE)) { + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + shhwtstamps->hwtstamp = ktime_set(idesc->time_stamp_sec, + idesc->time_stamp_ns); + } +} + +/* Get TX timestamp, and store it in the skb. */ +static void tile_tx_timestamp(struct sk_buff *skb, int instance) +{ + struct skb_shared_info *shtx = skb_shinfo(skb); + if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) { + struct mpipe_data *md = &mpipe_data[instance]; + struct skb_shared_hwtstamps shhwtstamps; + struct timespec ts; + + shtx->tx_flags |= SKBTX_IN_PROGRESS; + gxio_mpipe_get_timestamp(&md->context, &ts); + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); + skb_tstamp_tx(skb, &shhwtstamps); + } +} + +/* Use ioctl() to enable or disable TX or RX timestamping. */ +static int tile_hwtstamp_set(struct net_device *dev, struct ifreq *rq) +{ + struct hwtstamp_config config; + struct tile_net_priv *priv = netdev_priv(dev); + + if (copy_from_user(&config, rq->ifr_data, sizeof(config))) + return -EFAULT; + + if (config.flags) /* reserved for future extensions */ + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + if (copy_to_user(rq->ifr_data, &config, sizeof(config))) + return -EFAULT; + + priv->stamp_cfg = config; + return 0; +} + +static int tile_hwtstamp_get(struct net_device *dev, struct ifreq *rq) +{ + struct tile_net_priv *priv = netdev_priv(dev); + + if (copy_to_user(rq->ifr_data, &priv->stamp_cfg, + sizeof(priv->stamp_cfg))) + return -EFAULT; + + return 0; +} + +static inline bool filter_packet(struct net_device *dev, void *buf) +{ + /* Filter packets received before we're up. */ + if (dev == NULL || !(dev->flags & IFF_UP)) + return true; + + /* Filter out packets that aren't for us. */ + if (!(dev->flags & IFF_PROMISC) && + !is_multicast_ether_addr(buf) && + !ether_addr_equal(dev->dev_addr, buf)) + return true; + + return false; +} + +static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, + gxio_mpipe_idesc_t *idesc, unsigned long len) +{ + struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); + struct tile_net_priv *priv = netdev_priv(dev); + int instance = priv->instance; + + /* Encode the actual packet length. */ + skb_put(skb, len); + + skb->protocol = eth_type_trans(skb, dev); + + /* Acknowledge "good" hardware checksums. */ + if (idesc->cs && idesc->csum_seed_val == 0xFFFF) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* Get RX timestamp from idesc. */ + tile_rx_timestamp(priv, skb, idesc); + + napi_gro_receive(&info->mpipe[instance].napi, skb); + + /* Update stats. */ + tile_net_stats_add(1, &dev->stats.rx_packets); + tile_net_stats_add(len, &dev->stats.rx_bytes); + + /* Need a new buffer. */ + if (idesc->size == buffer_size_enums[0]) + info->mpipe[instance].num_needed_buffers[0]++; + else if (idesc->size == buffer_size_enums[1]) + info->mpipe[instance].num_needed_buffers[1]++; + else + info->mpipe[instance].num_needed_buffers[2]++; +} + +/* Handle a packet. Return true if "processed", false if "filtered". */ +static bool tile_net_handle_packet(int instance, gxio_mpipe_idesc_t *idesc) +{ + struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); + struct mpipe_data *md = &mpipe_data[instance]; + struct net_device *dev = md->tile_net_devs_for_channel[idesc->channel]; + uint8_t l2_offset; + void *va; + void *buf; + unsigned long len; + bool filter; + + /* Drop packets for which no buffer was available (which can + * happen under heavy load), or for which the me/tr/ce flags + * are set (which can happen for jumbo cut-through packets, + * or with a customized classifier). + */ + if (idesc->be || idesc->me || idesc->tr || idesc->ce) { + if (dev) + tile_net_stats_add(1, &dev->stats.rx_errors); + goto drop; + } + + /* Get the "l2_offset", if allowed. */ + l2_offset = custom_flag ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc); + + /* Get the VA (including NET_IP_ALIGN bytes of "headroom"). */ + va = tile_io_addr_to_va((unsigned long)idesc->va); + + /* Get the actual packet start/length. */ + buf = va + l2_offset; + len = idesc->l2_size - l2_offset; + + /* Point "va" at the raw buffer. */ + va -= NET_IP_ALIGN; + + filter = filter_packet(dev, buf); + if (filter) { + if (dev) + tile_net_stats_add(1, &dev->stats.rx_dropped); +drop: + gxio_mpipe_iqueue_drop(&info->mpipe[instance].iqueue, idesc); + } else { + struct sk_buff *skb = mpipe_buf_to_skb(va); + + /* Skip headroom, and any custom header. */ + skb_reserve(skb, NET_IP_ALIGN + l2_offset); + + tile_net_receive_skb(dev, skb, idesc, len); + } + + gxio_mpipe_iqueue_consume(&info->mpipe[instance].iqueue, idesc); + return !filter; +} + +/* Handle some packets for the current CPU. + * + * This function handles up to TILE_NET_BATCH idescs per call. + * + * ISSUE: Since we do not provide new buffers until this function is + * complete, we must initially provide enough buffers for each network + * cpu to fill its iqueue and also its batched idescs. + * + * ISSUE: The "rotting packet" race condition occurs if a packet + * arrives after the queue appears to be empty, and before the + * hypervisor interrupt is re-enabled. + */ +static int tile_net_poll(struct napi_struct *napi, int budget) +{ + struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); + unsigned int work = 0; + gxio_mpipe_idesc_t *idesc; + int instance, i, n; + struct mpipe_data *md; + struct info_mpipe *info_mpipe = + container_of(napi, struct info_mpipe, napi); + + if (budget <= 0) + goto done; + + instance = info_mpipe->instance; + while ((n = gxio_mpipe_iqueue_try_peek( + &info_mpipe->iqueue, + &idesc)) > 0) { + for (i = 0; i < n; i++) { + if (i == TILE_NET_BATCH) + goto done; + if (tile_net_handle_packet(instance, + idesc + i)) { + if (++work >= budget) + goto done; + } + } + } + + /* There are no packets left. */ + napi_complete(&info_mpipe->napi); + + md = &mpipe_data[instance]; + /* Re-enable hypervisor interrupts. */ + gxio_mpipe_enable_notif_ring_interrupt( + &md->context, info->mpipe[instance].iqueue.ring); + + /* HACK: Avoid the "rotting packet" problem. */ + if (gxio_mpipe_iqueue_try_peek(&info_mpipe->iqueue, &idesc) > 0) + napi_schedule(&info_mpipe->napi); + + /* ISSUE: Handle completions? */ + +done: + tile_net_provide_needed_buffers(); + + return work; +} + +/* Handle an ingress interrupt from an instance on the current cpu. */ +static irqreturn_t tile_net_handle_ingress_irq(int irq, void *id) +{ + struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); + napi_schedule(&info->mpipe[(uint64_t)id].napi); + return IRQ_HANDLED; +} + +/* Free some completions. This must be called with interrupts blocked. */ +static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue, + struct tile_net_comps *comps, + int limit, bool force_update) +{ + int n = 0; + while (comps->comp_last < comps->comp_next) { + unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS; + struct tile_net_comp *comp = &comps->comp_queue[cid]; + if (!gxio_mpipe_equeue_is_complete(equeue, comp->when, + force_update || n == 0)) + break; + dev_kfree_skb_irq(comp->skb); + comps->comp_last++; + if (++n == limit) + break; + } + return n; +} + +/* Add a completion. This must be called with interrupts blocked. + * tile_net_equeue_try_reserve() will have ensured a free completion entry. + */ +static void add_comp(gxio_mpipe_equeue_t *equeue, + struct tile_net_comps *comps, + uint64_t when, struct sk_buff *skb) +{ + int cid = comps->comp_next % TILE_NET_MAX_COMPS; + comps->comp_queue[cid].when = when; + comps->comp_queue[cid].skb = skb; + comps->comp_next++; +} + +static void tile_net_schedule_tx_wake_timer(struct net_device *dev, + int tx_queue_idx) +{ + struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx); + struct tile_net_priv *priv = netdev_priv(dev); + int instance = priv->instance; + struct tile_net_tx_wake *tx_wake = + &info->mpipe[instance].tx_wake[priv->echannel]; + + hrtimer_start(&tx_wake->timer, + ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL), + HRTIMER_MODE_REL_PINNED); +} + +static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t) +{ + struct tile_net_tx_wake *tx_wake = + container_of(t, struct tile_net_tx_wake, timer); + netif_wake_subqueue(tx_wake->dev, tx_wake->tx_queue_idx); + return HRTIMER_NORESTART; +} + +/* Make sure the egress timer is scheduled. */ +static void tile_net_schedule_egress_timer(void) +{ + struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); + + if (!info->egress_timer_scheduled) { + hrtimer_start(&info->egress_timer, + ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL), + HRTIMER_MODE_REL_PINNED); + info->egress_timer_scheduled = true; + } +} + +/* The "function" for "info->egress_timer". + * + * This timer will reschedule itself as long as there are any pending + * completions expected for this tile. + */ +static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) +{ + struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); + unsigned long irqflags; + bool pending = false; + int i, instance; + + local_irq_save(irqflags); + + /* The timer is no longer scheduled. */ + info->egress_timer_scheduled = false; + + /* Free all possible comps for this tile. */ + for (instance = 0; instance < NR_MPIPE_MAX && + info->mpipe[instance].has_iqueue; instance++) { + for (i = 0; i < TILE_NET_CHANNELS; i++) { + struct tile_net_egress *egress = + &mpipe_data[instance].egress_for_echannel[i]; + struct tile_net_comps *comps = + info->mpipe[instance].comps_for_echannel[i]; + if (!egress || comps->comp_last >= comps->comp_next) + continue; + tile_net_free_comps(egress->equeue, comps, -1, true); + pending = pending || + (comps->comp_last < comps->comp_next); + } + } + + /* Reschedule timer if needed. */ + if (pending) + tile_net_schedule_egress_timer(); + + local_irq_restore(irqflags); + + return HRTIMER_NORESTART; +} + +/* PTP clock operations. */ + +static int ptp_mpipe_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + int ret = 0; + struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); + mutex_lock(&md->ptp_lock); + if (gxio_mpipe_adjust_timestamp_freq(&md->context, ppb)) + ret = -EINVAL; + mutex_unlock(&md->ptp_lock); + return ret; +} + +static int ptp_mpipe_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + int ret = 0; + struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); + mutex_lock(&md->ptp_lock); + if (gxio_mpipe_adjust_timestamp(&md->context, delta)) + ret = -EBUSY; + mutex_unlock(&md->ptp_lock); + return ret; +} + +static int ptp_mpipe_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + int ret = 0; + struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); + mutex_lock(&md->ptp_lock); + if (gxio_mpipe_get_timestamp(&md->context, ts)) + ret = -EBUSY; + mutex_unlock(&md->ptp_lock); + return ret; +} + +static int ptp_mpipe_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + int ret = 0; + struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); + mutex_lock(&md->ptp_lock); + if (gxio_mpipe_set_timestamp(&md->context, ts)) + ret = -EBUSY; + mutex_unlock(&md->ptp_lock); + return ret; +} + +static int ptp_mpipe_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *request, int on) +{ + return -EOPNOTSUPP; +} + +static struct ptp_clock_info ptp_mpipe_caps = { + .owner = THIS_MODULE, + .name = "mPIPE clock", + .max_adj = 999999999, + .n_ext_ts = 0, + .n_pins = 0, + .pps = 0, + .adjfreq = ptp_mpipe_adjfreq, + .adjtime = ptp_mpipe_adjtime, + .gettime64 = ptp_mpipe_gettime, + .settime64 = ptp_mpipe_settime, + .enable = ptp_mpipe_enable, +}; + +/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */ +static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md) +{ + struct timespec ts; + + getnstimeofday(&ts); + gxio_mpipe_set_timestamp(&md->context, &ts); + + mutex_init(&md->ptp_lock); + md->caps = ptp_mpipe_caps; + md->ptp_clock = ptp_clock_register(&md->caps, NULL); + if (IS_ERR(md->ptp_clock)) + netdev_err(dev, "ptp_clock_register failed %ld\n", + PTR_ERR(md->ptp_clock)); +} + +/* Initialize PTP fields in a new device. */ +static void init_ptp_dev(struct tile_net_priv *priv) +{ + priv->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE; + priv->stamp_cfg.tx_type = HWTSTAMP_TX_OFF; +} + +/* Helper functions for "tile_net_update()". */ +static void enable_ingress_irq(void *irq) +{ + enable_percpu_irq((long)irq, 0); +} + +static void disable_ingress_irq(void *irq) +{ + disable_percpu_irq((long)irq); +} + +/* Helper function for tile_net_open() and tile_net_stop(). + * Always called under tile_net_devs_for_channel_mutex. + */ +static int tile_net_update(struct net_device *dev) +{ + static gxio_mpipe_rules_t rules; /* too big to fit on the stack */ + bool saw_channel = false; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; + int channel; + int rc; + int cpu; + + saw_channel = false; + gxio_mpipe_rules_init(&rules, &md->context); + + for (channel = 0; channel < TILE_NET_CHANNELS; channel++) { + if (md->tile_net_devs_for_channel[channel] == NULL) + continue; + if (!saw_channel) { + saw_channel = true; + gxio_mpipe_rules_begin(&rules, md->first_bucket, + md->num_buckets, NULL); + gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN); + } + gxio_mpipe_rules_add_channel(&rules, channel); + } + + /* NOTE: This can fail if there is no classifier. + * ISSUE: Can anything else cause it to fail? + */ + rc = gxio_mpipe_rules_commit(&rules); + if (rc != 0) { + netdev_warn(dev, "gxio_mpipe_rules_commit: mpipe[%d] %d\n", + instance, rc); + return -EIO; + } + + /* Update all cpus, sequentially (to protect "netif_napi_add()"). + * We use on_each_cpu to handle the IPI mask or unmask. + */ + if (!saw_channel) + on_each_cpu(disable_ingress_irq, + (void *)(long)(md->ingress_irq), 1); + for_each_online_cpu(cpu) { + struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); + + if (!info->mpipe[instance].has_iqueue) + continue; + if (saw_channel) { + if (!info->mpipe[instance].napi_added) { + netif_napi_add(dev, &info->mpipe[instance].napi, + tile_net_poll, TILE_NET_WEIGHT); + info->mpipe[instance].napi_added = true; + } + if (!info->mpipe[instance].napi_enabled) { + napi_enable(&info->mpipe[instance].napi); + info->mpipe[instance].napi_enabled = true; + } + } else { + if (info->mpipe[instance].napi_enabled) { + napi_disable(&info->mpipe[instance].napi); + info->mpipe[instance].napi_enabled = false; + } + /* FIXME: Drain the iqueue. */ + } + } + if (saw_channel) + on_each_cpu(enable_ingress_irq, + (void *)(long)(md->ingress_irq), 1); + + /* HACK: Allow packets to flow in the simulator. */ + if (saw_channel) + sim_enable_mpipe_links(instance, -1); + + return 0; +} + +/* Initialize a buffer stack. */ +static int create_buffer_stack(struct net_device *dev, + int kind, size_t num_buffers) +{ + pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH); + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; + size_t needed = gxio_mpipe_calc_buffer_stack_bytes(num_buffers); + int stack_idx = md->first_buffer_stack + kind; + void *va; + int i, rc; + + /* Round up to 64KB and then use alloc_pages() so we get the + * required 64KB alignment. + */ + md->buffer_stack_bytes[kind] = + ALIGN(needed, 64 * 1024); + + va = alloc_pages_exact(md->buffer_stack_bytes[kind], GFP_KERNEL); + if (va == NULL) { + netdev_err(dev, + "Could not alloc %zd bytes for buffer stack %d\n", + md->buffer_stack_bytes[kind], kind); + return -ENOMEM; + } + + /* Initialize the buffer stack. */ + rc = gxio_mpipe_init_buffer_stack(&md->context, stack_idx, + buffer_size_enums[kind], va, + md->buffer_stack_bytes[kind], 0); + if (rc != 0) { + netdev_err(dev, "gxio_mpipe_init_buffer_stack: mpipe[%d] %d\n", + instance, rc); + free_pages_exact(va, md->buffer_stack_bytes[kind]); + return rc; + } + + md->buffer_stack_vas[kind] = va; + + rc = gxio_mpipe_register_client_memory(&md->context, stack_idx, + hash_pte, 0); + if (rc != 0) { + netdev_err(dev, + "gxio_mpipe_register_client_memory: mpipe[%d] %d\n", + instance, rc); + return rc; + } + + /* Provide initial buffers. */ + for (i = 0; i < num_buffers; i++) { + if (!tile_net_provide_buffer(instance, kind)) { + netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); + return -ENOMEM; + } + } + + return 0; +} + +/* Allocate and initialize mpipe buffer stacks, and register them in + * the mPIPE TLBs, for small, large, and (possibly) jumbo packet sizes. + * This routine supports tile_net_init_mpipe(), below. + */ +static int init_buffer_stacks(struct net_device *dev, + int network_cpus_count) +{ + int num_kinds = MAX_KINDS - (jumbo_num == 0); + size_t num_buffers; + int rc; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; + + /* Allocate the buffer stacks. */ + rc = gxio_mpipe_alloc_buffer_stacks(&md->context, num_kinds, 0, 0); + if (rc < 0) { + netdev_err(dev, + "gxio_mpipe_alloc_buffer_stacks: mpipe[%d] %d\n", + instance, rc); + return rc; + } + md->first_buffer_stack = rc; + + /* Enough small/large buffers to (normally) avoid buffer errors. */ + num_buffers = + network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH); + + /* Allocate the small memory stack. */ + if (rc >= 0) + rc = create_buffer_stack(dev, 0, num_buffers); + + /* Allocate the large buffer stack. */ + if (rc >= 0) + rc = create_buffer_stack(dev, 1, num_buffers); + + /* Allocate the jumbo buffer stack if needed. */ + if (rc >= 0 && jumbo_num != 0) + rc = create_buffer_stack(dev, 2, jumbo_num); + + return rc; +} + +/* Allocate per-cpu resources (memory for completions and idescs). + * This routine supports tile_net_init_mpipe(), below. + */ +static int alloc_percpu_mpipe_resources(struct net_device *dev, + int cpu, int ring) +{ + struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); + int order, i, rc; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; + struct page *page; + void *addr; + + /* Allocate the "comps". */ + order = get_order(COMPS_SIZE); + page = homecache_alloc_pages(GFP_KERNEL, order, cpu); + if (page == NULL) { + netdev_err(dev, "Failed to alloc %zd bytes comps memory\n", + COMPS_SIZE); + return -ENOMEM; + } + addr = pfn_to_kaddr(page_to_pfn(page)); + memset(addr, 0, COMPS_SIZE); + for (i = 0; i < TILE_NET_CHANNELS; i++) + info->mpipe[instance].comps_for_echannel[i] = + addr + i * sizeof(struct tile_net_comps); + + /* If this is a network cpu, create an iqueue. */ + if (cpumask_test_cpu(cpu, &network_cpus_map)) { + order = get_order(NOTIF_RING_SIZE); + page = homecache_alloc_pages(GFP_KERNEL, order, cpu); + if (page == NULL) { + netdev_err(dev, + "Failed to alloc %zd bytes iqueue memory\n", + NOTIF_RING_SIZE); + return -ENOMEM; + } + addr = pfn_to_kaddr(page_to_pfn(page)); + rc = gxio_mpipe_iqueue_init(&info->mpipe[instance].iqueue, + &md->context, ring++, addr, + NOTIF_RING_SIZE, 0); + if (rc < 0) { + netdev_err(dev, + "gxio_mpipe_iqueue_init failed: %d\n", rc); + return rc; + } + info->mpipe[instance].has_iqueue = true; + } + + return ring; +} + +/* Initialize NotifGroup and buckets. + * This routine supports tile_net_init_mpipe(), below. + */ +static int init_notif_group_and_buckets(struct net_device *dev, + int ring, int network_cpus_count) +{ + int group, rc; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; + + /* Allocate one NotifGroup. */ + rc = gxio_mpipe_alloc_notif_groups(&md->context, 1, 0, 0); + if (rc < 0) { + netdev_err(dev, "gxio_mpipe_alloc_notif_groups: mpipe[%d] %d\n", + instance, rc); + return rc; + } + group = rc; + + /* Initialize global num_buckets value. */ + if (network_cpus_count > 4) + md->num_buckets = 256; + else if (network_cpus_count > 1) + md->num_buckets = 16; + + /* Allocate some buckets, and set global first_bucket value. */ + rc = gxio_mpipe_alloc_buckets(&md->context, md->num_buckets, 0, 0); + if (rc < 0) { + netdev_err(dev, "gxio_mpipe_alloc_buckets: mpipe[%d] %d\n", + instance, rc); + return rc; + } + md->first_bucket = rc; + + /* Init group and buckets. */ + rc = gxio_mpipe_init_notif_group_and_buckets( + &md->context, group, ring, network_cpus_count, + md->first_bucket, md->num_buckets, + GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY); + if (rc != 0) { + netdev_err(dev, "gxio_mpipe_init_notif_group_and_buckets: " + "mpipe[%d] %d\n", instance, rc); + return rc; + } + + return 0; +} + +/* Create an irq and register it, then activate the irq and request + * interrupts on all cores. Note that "ingress_irq" being initialized + * is how we know not to call tile_net_init_mpipe() again. + * This routine supports tile_net_init_mpipe(), below. + */ +static int tile_net_setup_interrupts(struct net_device *dev) +{ + int cpu, rc, irq; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; + + irq = md->ingress_irq; + if (irq < 0) { + irq = irq_alloc_hwirq(-1); + if (!irq) { + netdev_err(dev, + "create_irq failed: mpipe[%d] %d\n", + instance, irq); + return irq; + } + tile_irq_activate(irq, TILE_IRQ_PERCPU); + + rc = request_irq(irq, tile_net_handle_ingress_irq, + 0, "tile_net", (void *)((uint64_t)instance)); + + if (rc != 0) { + netdev_err(dev, "request_irq failed: mpipe[%d] %d\n", + instance, rc); + irq_free_hwirq(irq); + return rc; + } + md->ingress_irq = irq; + } + + for_each_online_cpu(cpu) { + struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); + if (info->mpipe[instance].has_iqueue) { + gxio_mpipe_request_notif_ring_interrupt(&md->context, + cpu_x(cpu), cpu_y(cpu), KERNEL_PL, irq, + info->mpipe[instance].iqueue.ring); + } + } + + return 0; +} + +/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */ +static void tile_net_init_mpipe_fail(int instance) +{ + int kind, cpu; + struct mpipe_data *md = &mpipe_data[instance]; + + /* Do cleanups that require the mpipe context first. */ + for (kind = 0; kind < MAX_KINDS; kind++) { + if (md->buffer_stack_vas[kind] != NULL) { + tile_net_pop_all_buffers(instance, + md->first_buffer_stack + + kind); + } + } + + /* Destroy mpipe context so the hardware no longer owns any memory. */ + gxio_mpipe_destroy(&md->context); + + for_each_online_cpu(cpu) { + struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); + free_pages( + (unsigned long)( + info->mpipe[instance].comps_for_echannel[0]), + get_order(COMPS_SIZE)); + info->mpipe[instance].comps_for_echannel[0] = NULL; + free_pages((unsigned long)(info->mpipe[instance].iqueue.idescs), + get_order(NOTIF_RING_SIZE)); + info->mpipe[instance].iqueue.idescs = NULL; + } + + for (kind = 0; kind < MAX_KINDS; kind++) { + if (md->buffer_stack_vas[kind] != NULL) { + free_pages_exact(md->buffer_stack_vas[kind], + md->buffer_stack_bytes[kind]); + md->buffer_stack_vas[kind] = NULL; + } + } + + md->first_buffer_stack = -1; + md->first_bucket = -1; +} + +/* The first time any tilegx network device is opened, we initialize + * the global mpipe state. If this step fails, we fail to open the + * device, but if it succeeds, we never need to do it again, and since + * tile_net can't be unloaded, we never undo it. + * + * Note that some resources in this path (buffer stack indices, + * bindings from init_buffer_stack, etc.) are hypervisor resources + * that are freed implicitly by gxio_mpipe_destroy(). + */ +static int tile_net_init_mpipe(struct net_device *dev) +{ + int rc; + int cpu; + int first_ring, ring; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; + int network_cpus_count = cpumask_weight(&network_cpus_map); + + if (!hash_default) { + netdev_err(dev, "Networking requires hash_default!\n"); + return -EIO; + } + + rc = gxio_mpipe_init(&md->context, instance); + if (rc != 0) { + netdev_err(dev, "gxio_mpipe_init: mpipe[%d] %d\n", + instance, rc); + return -EIO; + } + + /* Set up the buffer stacks. */ + rc = init_buffer_stacks(dev, network_cpus_count); + if (rc != 0) + goto fail; + + /* Allocate one NotifRing for each network cpu. */ + rc = gxio_mpipe_alloc_notif_rings(&md->context, + network_cpus_count, 0, 0); + if (rc < 0) { + netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n", + rc); + goto fail; + } + + /* Init NotifRings per-cpu. */ + first_ring = rc; + ring = first_ring; + for_each_online_cpu(cpu) { + rc = alloc_percpu_mpipe_resources(dev, cpu, ring); + if (rc < 0) + goto fail; + ring = rc; + } + + /* Initialize NotifGroup and buckets. */ + rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count); + if (rc != 0) + goto fail; + + /* Create and enable interrupts. */ + rc = tile_net_setup_interrupts(dev); + if (rc != 0) + goto fail; + + /* Register PTP clock and set mPIPE timestamp, if configured. */ + register_ptp_clock(dev, md); + + return 0; + +fail: + tile_net_init_mpipe_fail(instance); + return rc; +} + +/* Create persistent egress info for a given egress channel. + * Note that this may be shared between, say, "gbe0" and "xgbe0". + * ISSUE: Defer header allocation until TSO is actually needed? + */ +static int tile_net_init_egress(struct net_device *dev, int echannel) +{ + static int ering = -1; + struct page *headers_page, *edescs_page, *equeue_page; + gxio_mpipe_edesc_t *edescs; + gxio_mpipe_equeue_t *equeue; + unsigned char *headers; + int headers_order, edescs_order, equeue_order; + size_t edescs_size; + int rc = -ENOMEM; + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; + + /* Only initialize once. */ + if (md->egress_for_echannel[echannel].equeue != NULL) + return 0; + + /* Allocate memory for the "headers". */ + headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES); + headers_page = alloc_pages(GFP_KERNEL, headers_order); + if (headers_page == NULL) { + netdev_warn(dev, + "Could not alloc %zd bytes for TSO headers.\n", + PAGE_SIZE << headers_order); + goto fail; + } + headers = pfn_to_kaddr(page_to_pfn(headers_page)); + + /* Allocate memory for the "edescs". */ + edescs_size = EQUEUE_ENTRIES * sizeof(*edescs); + edescs_order = get_order(edescs_size); + edescs_page = alloc_pages(GFP_KERNEL, edescs_order); + if (edescs_page == NULL) { + netdev_warn(dev, + "Could not alloc %zd bytes for eDMA ring.\n", + edescs_size); + goto fail_headers; + } + edescs = pfn_to_kaddr(page_to_pfn(edescs_page)); + + /* Allocate memory for the "equeue". */ + equeue_order = get_order(sizeof(*equeue)); + equeue_page = alloc_pages(GFP_KERNEL, equeue_order); + if (equeue_page == NULL) { + netdev_warn(dev, + "Could not alloc %zd bytes for equeue info.\n", + PAGE_SIZE << equeue_order); + goto fail_edescs; + } + equeue = pfn_to_kaddr(page_to_pfn(equeue_page)); + + /* Allocate an edma ring (using a one entry "free list"). */ + if (ering < 0) { + rc = gxio_mpipe_alloc_edma_rings(&md->context, 1, 0, 0); + if (rc < 0) { + netdev_warn(dev, "gxio_mpipe_alloc_edma_rings: " + "mpipe[%d] %d\n", instance, rc); + goto fail_equeue; + } + ering = rc; + } + + /* Initialize the equeue. */ + rc = gxio_mpipe_equeue_init(equeue, &md->context, ering, echannel, + edescs, edescs_size, 0); + if (rc != 0) { + netdev_err(dev, "gxio_mpipe_equeue_init: mpipe[%d] %d\n", + instance, rc); + goto fail_equeue; + } + + /* Don't reuse the ering later. */ + ering = -1; + + if (jumbo_num != 0) { + /* Make sure "jumbo" packets can be egressed safely. */ + if (gxio_mpipe_equeue_set_snf_size(equeue, 10368) < 0) { + /* ISSUE: There is no "gxio_mpipe_equeue_destroy()". */ + netdev_warn(dev, "Jumbo packets may not be egressed" + " properly on channel %d\n", echannel); + } + } + + /* Done. */ + md->egress_for_echannel[echannel].equeue = equeue; + md->egress_for_echannel[echannel].headers = headers; + return 0; + +fail_equeue: + __free_pages(equeue_page, equeue_order); + +fail_edescs: + __free_pages(edescs_page, edescs_order); + +fail_headers: + __free_pages(headers_page, headers_order); + +fail: + return rc; +} + +/* Return channel number for a newly-opened link. */ +static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link, + const char *link_name) +{ + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; + int rc = gxio_mpipe_link_open(link, &md->context, link_name, 0); + if (rc < 0) { + netdev_err(dev, "Failed to open '%s', mpipe[%d], %d\n", + link_name, instance, rc); + return rc; + } + if (jumbo_num != 0) { + u32 attr = GXIO_MPIPE_LINK_RECEIVE_JUMBO; + rc = gxio_mpipe_link_set_attr(link, attr, 1); + if (rc != 0) { + netdev_err(dev, + "Cannot receive jumbo packets on '%s'\n", + link_name); + gxio_mpipe_link_close(link); + return rc; + } + } + rc = gxio_mpipe_link_channel(link); + if (rc < 0 || rc >= TILE_NET_CHANNELS) { + netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc); + gxio_mpipe_link_close(link); + return -EINVAL; + } + return rc; +} + +/* Help the kernel activate the given network interface. */ +static int tile_net_open(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + int cpu, rc, instance; + + mutex_lock(&tile_net_devs_for_channel_mutex); + + /* Get the instance info. */ + rc = gxio_mpipe_link_instance(dev->name); + if (rc < 0 || rc >= NR_MPIPE_MAX) { + mutex_unlock(&tile_net_devs_for_channel_mutex); + return -EIO; + } + + priv->instance = rc; + instance = rc; + if (!mpipe_data[rc].context.mmio_fast_base) { + /* Do one-time initialization per instance the first time + * any device is opened. + */ + rc = tile_net_init_mpipe(dev); + if (rc != 0) + goto fail; + } + + /* Determine if this is the "loopify" device. */ + if (unlikely((loopify_link_name != NULL) && + !strcmp(dev->name, loopify_link_name))) { + rc = tile_net_link_open(dev, &priv->link, "loop0"); + if (rc < 0) + goto fail; + priv->channel = rc; + rc = tile_net_link_open(dev, &priv->loopify_link, "loop1"); + if (rc < 0) + goto fail; + priv->loopify_channel = rc; + priv->echannel = rc; + } else { + rc = tile_net_link_open(dev, &priv->link, dev->name); + if (rc < 0) + goto fail; + priv->channel = rc; + priv->echannel = rc; + } + + /* Initialize egress info (if needed). Once ever, per echannel. */ + rc = tile_net_init_egress(dev, priv->echannel); + if (rc != 0) + goto fail; + + mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = dev; + + rc = tile_net_update(dev); + if (rc != 0) + goto fail; + + mutex_unlock(&tile_net_devs_for_channel_mutex); + + /* Initialize the transmit wake timer for this device for each cpu. */ + for_each_online_cpu(cpu) { + struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); + struct tile_net_tx_wake *tx_wake = + &info->mpipe[instance].tx_wake[priv->echannel]; + + hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + tx_wake->tx_queue_idx = cpu; + tx_wake->timer.function = tile_net_handle_tx_wake_timer; + tx_wake->dev = dev; + } + + for_each_online_cpu(cpu) + netif_start_subqueue(dev, cpu); + netif_carrier_on(dev); + return 0; + +fail: + if (priv->loopify_channel >= 0) { + if (gxio_mpipe_link_close(&priv->loopify_link) != 0) + netdev_warn(dev, "Failed to close loopify link!\n"); + priv->loopify_channel = -1; + } + if (priv->channel >= 0) { + if (gxio_mpipe_link_close(&priv->link) != 0) + netdev_warn(dev, "Failed to close link!\n"); + priv->channel = -1; + } + priv->echannel = -1; + mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = NULL; + mutex_unlock(&tile_net_devs_for_channel_mutex); + + /* Don't return raw gxio error codes to generic Linux. */ + return (rc > -512) ? rc : -EIO; +} + +/* Help the kernel deactivate the given network interface. */ +static int tile_net_stop(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + int cpu; + int instance = priv->instance; + struct mpipe_data *md = &mpipe_data[instance]; + + for_each_online_cpu(cpu) { + struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); + struct tile_net_tx_wake *tx_wake = + &info->mpipe[instance].tx_wake[priv->echannel]; + + hrtimer_cancel(&tx_wake->timer); + netif_stop_subqueue(dev, cpu); + } + + mutex_lock(&tile_net_devs_for_channel_mutex); + md->tile_net_devs_for_channel[priv->channel] = NULL; + (void)tile_net_update(dev); + if (priv->loopify_channel >= 0) { + if (gxio_mpipe_link_close(&priv->loopify_link) != 0) + netdev_warn(dev, "Failed to close loopify link!\n"); + priv->loopify_channel = -1; + } + if (priv->channel >= 0) { + if (gxio_mpipe_link_close(&priv->link) != 0) + netdev_warn(dev, "Failed to close link!\n"); + priv->channel = -1; + } + priv->echannel = -1; + mutex_unlock(&tile_net_devs_for_channel_mutex); + + return 0; +} + +/* Determine the VA for a fragment. */ +static inline void *tile_net_frag_buf(skb_frag_t *f) +{ + unsigned long pfn = page_to_pfn(skb_frag_page(f)); + return pfn_to_kaddr(pfn) + f->page_offset; +} + +/* Acquire a completion entry and an egress slot, or if we can't, + * stop the queue and schedule the tx_wake timer. + */ +static s64 tile_net_equeue_try_reserve(struct net_device *dev, + int tx_queue_idx, + struct tile_net_comps *comps, + gxio_mpipe_equeue_t *equeue, + int num_edescs) +{ + /* Try to acquire a completion entry. */ + if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 || + tile_net_free_comps(equeue, comps, 32, false) != 0) { + + /* Try to acquire an egress slot. */ + s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs); + if (slot >= 0) + return slot; + + /* Freeing some completions gives the equeue time to drain. */ + tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false); + + slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs); + if (slot >= 0) + return slot; + } + + /* Still nothing; give up and stop the queue for a short while. */ + netif_stop_subqueue(dev, tx_queue_idx); + tile_net_schedule_tx_wake_timer(dev, tx_queue_idx); + return -1; +} + +/* Determine how many edesc's are needed for TSO. + * + * Sometimes, if "sendfile()" requires copying, we will be called with + * "data" containing the header and payload, with "frags" being empty. + * Sometimes, for example when using NFS over TCP, a single segment can + * span 3 fragments. This requires special care. + */ +static int tso_count_edescs(struct sk_buff *skb) +{ + struct skb_shared_info *sh = skb_shinfo(skb); + unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + unsigned int data_len = skb->len - sh_len; + unsigned int p_len = sh->gso_size; + long f_id = -1; /* id of the current fragment */ + long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ + long f_used = 0; /* bytes used from the current fragment */ + long n; /* size of the current piece of payload */ + int num_edescs = 0; + int segment; + + for (segment = 0; segment < sh->gso_segs; segment++) { + + unsigned int p_used = 0; + + /* One edesc for header and for each piece of the payload. */ + for (num_edescs++; p_used < p_len; num_edescs++) { + + /* Advance as needed. */ + while (f_used >= f_size) { + f_id++; + f_size = skb_frag_size(&sh->frags[f_id]); + f_used = 0; + } + + /* Use bytes from the current fragment. */ + n = p_len - p_used; + if (n > f_size - f_used) + n = f_size - f_used; + f_used += n; + p_used += n; + } + + /* The last segment may be less than gso_size. */ + data_len -= p_len; + if (data_len < p_len) + p_len = data_len; + } + + return num_edescs; +} + +/* Prepare modified copies of the skbuff headers. */ +static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, + s64 slot) +{ + struct skb_shared_info *sh = skb_shinfo(skb); + struct iphdr *ih; + struct ipv6hdr *ih6; + struct tcphdr *th; + unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + unsigned int data_len = skb->len - sh_len; + unsigned char *data = skb->data; + unsigned int ih_off, th_off, p_len; + unsigned int isum_seed, tsum_seed, seq; + unsigned int uninitialized_var(id); + int is_ipv6; + long f_id = -1; /* id of the current fragment */ + long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ + long f_used = 0; /* bytes used from the current fragment */ + long n; /* size of the current piece of payload */ + int segment; + + /* Locate original headers and compute various lengths. */ + is_ipv6 = skb_is_gso_v6(skb); + if (is_ipv6) { + ih6 = ipv6_hdr(skb); + ih_off = skb_network_offset(skb); + } else { + ih = ip_hdr(skb); + ih_off = skb_network_offset(skb); + isum_seed = ((0xFFFF - ih->check) + + (0xFFFF - ih->tot_len) + + (0xFFFF - ih->id)); + id = ntohs(ih->id); + } + + th = tcp_hdr(skb); + th_off = skb_transport_offset(skb); + p_len = sh->gso_size; + + tsum_seed = th->check + (0xFFFF ^ htons(skb->len)); + seq = ntohl(th->seq); + + /* Prepare all the headers. */ + for (segment = 0; segment < sh->gso_segs; segment++) { + unsigned char *buf; + unsigned int p_used = 0; + + /* Copy to the header memory for this segment. */ + buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES + + NET_IP_ALIGN; + memcpy(buf, data, sh_len); + + /* Update copied ip header. */ + if (is_ipv6) { + ih6 = (struct ipv6hdr *)(buf + ih_off); + ih6->payload_len = htons(sh_len + p_len - ih_off - + sizeof(*ih6)); + } else { + ih = (struct iphdr *)(buf + ih_off); + ih->tot_len = htons(sh_len + p_len - ih_off); + ih->id = htons(id++); + ih->check = csum_long(isum_seed + ih->tot_len + + ih->id) ^ 0xffff; + } + + /* Update copied tcp header. */ + th = (struct tcphdr *)(buf + th_off); + th->seq = htonl(seq); + th->check = csum_long(tsum_seed + htons(sh_len + p_len)); + if (segment != sh->gso_segs - 1) { + th->fin = 0; + th->psh = 0; + } + + /* Skip past the header. */ + slot++; + + /* Skip past the payload. */ + while (p_used < p_len) { + + /* Advance as needed. */ + while (f_used >= f_size) { + f_id++; + f_size = skb_frag_size(&sh->frags[f_id]); + f_used = 0; + } + + /* Use bytes from the current fragment. */ + n = p_len - p_used; + if (n > f_size - f_used) + n = f_size - f_used; + f_used += n; + p_used += n; + + slot++; + } + + seq += p_len; + + /* The last segment may be less than gso_size. */ + data_len -= p_len; + if (data_len < p_len) + p_len = data_len; + } + + /* Flush the headers so they are ready for hardware DMA. */ + wmb(); +} + +/* Pass all the data to mpipe for egress. */ +static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, + struct sk_buff *skb, unsigned char *headers, s64 slot) +{ + struct skb_shared_info *sh = skb_shinfo(skb); + int instance = mpipe_instance(dev); + struct mpipe_data *md = &mpipe_data[instance]; + unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + unsigned int data_len = skb->len - sh_len; + unsigned int p_len = sh->gso_size; + gxio_mpipe_edesc_t edesc_head = { { 0 } }; + gxio_mpipe_edesc_t edesc_body = { { 0 } }; + long f_id = -1; /* id of the current fragment */ + long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ + long f_used = 0; /* bytes used from the current fragment */ + void *f_data = skb->data + sh_len; + long n; /* size of the current piece of payload */ + unsigned long tx_packets = 0, tx_bytes = 0; + unsigned int csum_start; + int segment; + + /* Prepare to egress the headers: set up header edesc. */ + csum_start = skb_checksum_start_offset(skb); + edesc_head.csum = 1; + edesc_head.csum_start = csum_start; + edesc_head.csum_dest = csum_start + skb->csum_offset; + edesc_head.xfer_size = sh_len; + + /* This is only used to specify the TLB. */ + edesc_head.stack_idx = md->first_buffer_stack; + edesc_body.stack_idx = md->first_buffer_stack; + + /* Egress all the edescs. */ + for (segment = 0; segment < sh->gso_segs; segment++) { + unsigned char *buf; + unsigned int p_used = 0; + + /* Egress the header. */ + buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES + + NET_IP_ALIGN; + edesc_head.va = va_to_tile_io_addr(buf); + gxio_mpipe_equeue_put_at(equeue, edesc_head, slot); + slot++; + + /* Egress the payload. */ + while (p_used < p_len) { + void *va; + + /* Advance as needed. */ + while (f_used >= f_size) { + f_id++; + f_size = skb_frag_size(&sh->frags[f_id]); + f_data = tile_net_frag_buf(&sh->frags[f_id]); + f_used = 0; + } + + va = f_data + f_used; + + /* Use bytes from the current fragment. */ + n = p_len - p_used; + if (n > f_size - f_used) + n = f_size - f_used; + f_used += n; + p_used += n; + + /* Egress a piece of the payload. */ + edesc_body.va = va_to_tile_io_addr(va); + edesc_body.xfer_size = n; + edesc_body.bound = !(p_used < p_len); + gxio_mpipe_equeue_put_at(equeue, edesc_body, slot); + slot++; + } + + tx_packets++; + tx_bytes += sh_len + p_len; + + /* The last segment may be less than gso_size. */ + data_len -= p_len; + if (data_len < p_len) + p_len = data_len; + } + + /* Update stats. */ + tile_net_stats_add(tx_packets, &dev->stats.tx_packets); + tile_net_stats_add(tx_bytes, &dev->stats.tx_bytes); +} + +/* Do "TSO" handling for egress. + * + * Normally drivers set NETIF_F_TSO only to support hardware TSO; + * otherwise the stack uses scatter-gather to implement GSO in software. + * On our testing, enabling GSO support (via NETIF_F_SG) drops network + * performance down to around 7.5 Gbps on the 10G interfaces, although + * also dropping cpu utilization way down, to under 8%. But + * implementing "TSO" in the driver brings performance back up to line + * rate, while dropping cpu usage even further, to less than 4%. In + * practice, profiling of GSO shows that skb_segment() is what causes + * the performance overheads; we benefit in the driver from using + * preallocated memory to duplicate the TCP/IP headers. + */ +static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) +{ + struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); + struct tile_net_priv *priv = netdev_priv(dev); + int channel = priv->echannel; + int instance = priv->instance; + struct mpipe_data *md = &mpipe_data[instance]; + struct tile_net_egress *egress = &md->egress_for_echannel[channel]; + struct tile_net_comps *comps = + info->mpipe[instance].comps_for_echannel[channel]; + gxio_mpipe_equeue_t *equeue = egress->equeue; + unsigned long irqflags; + int num_edescs; + s64 slot; + + /* Determine how many mpipe edesc's are needed. */ + num_edescs = tso_count_edescs(skb); + + local_irq_save(irqflags); + + /* Try to acquire a completion entry and an egress slot. */ + slot = tile_net_equeue_try_reserve(dev, skb->queue_mapping, comps, + equeue, num_edescs); + if (slot < 0) { + local_irq_restore(irqflags); + return NETDEV_TX_BUSY; + } + + /* Set up copies of header data properly. */ + tso_headers_prepare(skb, egress->headers, slot); + + /* Actually pass the data to the network hardware. */ + tso_egress(dev, equeue, skb, egress->headers, slot); + + /* Add a completion record. */ + add_comp(equeue, comps, slot + num_edescs - 1, skb); + + local_irq_restore(irqflags); + + /* Make sure the egress timer is scheduled. */ + tile_net_schedule_egress_timer(); + + return NETDEV_TX_OK; +} + +/* Analyze the body and frags for a transmit request. */ +static unsigned int tile_net_tx_frags(struct frag *frags, + struct sk_buff *skb, + void *b_data, unsigned int b_len) +{ + unsigned int i, n = 0; + + struct skb_shared_info *sh = skb_shinfo(skb); + + if (b_len != 0) { + frags[n].buf = b_data; + frags[n++].length = b_len; + } + + for (i = 0; i < sh->nr_frags; i++) { + skb_frag_t *f = &sh->frags[i]; + frags[n].buf = tile_net_frag_buf(f); + frags[n++].length = skb_frag_size(f); + } + + return n; +} + +/* Help the kernel transmit a packet. */ +static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); + struct tile_net_priv *priv = netdev_priv(dev); + int instance = priv->instance; + struct mpipe_data *md = &mpipe_data[instance]; + struct tile_net_egress *egress = + &md->egress_for_echannel[priv->echannel]; + gxio_mpipe_equeue_t *equeue = egress->equeue; + struct tile_net_comps *comps = + info->mpipe[instance].comps_for_echannel[priv->echannel]; + unsigned int len = skb->len; + unsigned char *data = skb->data; + unsigned int num_edescs; + struct frag frags[MAX_FRAGS]; + gxio_mpipe_edesc_t edescs[MAX_FRAGS]; + unsigned long irqflags; + gxio_mpipe_edesc_t edesc = { { 0 } }; + unsigned int i; + s64 slot; + + if (skb_is_gso(skb)) + return tile_net_tx_tso(skb, dev); + + num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); + + /* This is only used to specify the TLB. */ + edesc.stack_idx = md->first_buffer_stack; + + /* Prepare the edescs. */ + for (i = 0; i < num_edescs; i++) { + edesc.xfer_size = frags[i].length; + edesc.va = va_to_tile_io_addr(frags[i].buf); + edescs[i] = edesc; + } + + /* Mark the final edesc. */ + edescs[num_edescs - 1].bound = 1; + + /* Add checksum info to the initial edesc, if needed. */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + unsigned int csum_start = skb_checksum_start_offset(skb); + edescs[0].csum = 1; + edescs[0].csum_start = csum_start; + edescs[0].csum_dest = csum_start + skb->csum_offset; + } + + local_irq_save(irqflags); + + /* Try to acquire a completion entry and an egress slot. */ + slot = tile_net_equeue_try_reserve(dev, skb->queue_mapping, comps, + equeue, num_edescs); + if (slot < 0) { + local_irq_restore(irqflags); + return NETDEV_TX_BUSY; + } + + for (i = 0; i < num_edescs; i++) + gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++); + + /* Store TX timestamp if needed. */ + tile_tx_timestamp(skb, instance); + + /* Add a completion record. */ + add_comp(equeue, comps, slot - 1, skb); + + /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */ + tile_net_stats_add(1, &dev->stats.tx_packets); + tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN), + &dev->stats.tx_bytes); + + local_irq_restore(irqflags); + + /* Make sure the egress timer is scheduled. */ + tile_net_schedule_egress_timer(); + + return NETDEV_TX_OK; +} + +/* Return subqueue id on this core (one per core). */ +static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + return smp_processor_id(); +} + +/* Deal with a transmit timeout. */ +static void tile_net_tx_timeout(struct net_device *dev) +{ + int cpu; + + for_each_online_cpu(cpu) + netif_wake_subqueue(dev, cpu); +} + +/* Ioctl commands. */ +static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + if (cmd == SIOCSHWTSTAMP) + return tile_hwtstamp_set(dev, rq); + if (cmd == SIOCGHWTSTAMP) + return tile_hwtstamp_get(dev, rq); + + return -EOPNOTSUPP; +} + +/* Change the MTU. */ +static int tile_net_change_mtu(struct net_device *dev, int new_mtu) +{ + if (new_mtu < 68) + return -EINVAL; + if (new_mtu > ((jumbo_num != 0) ? 9000 : 1500)) + return -EINVAL; + dev->mtu = new_mtu; + return 0; +} + +/* Change the Ethernet address of the NIC. + * + * The hypervisor driver does not support changing MAC address. However, + * the hardware does not do anything with the MAC address, so the address + * which gets used on outgoing packets, and which is accepted on incoming + * packets, is completely up to us. + * + * Returns 0 on success, negative on failure. + */ +static int tile_net_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void tile_net_netpoll(struct net_device *dev) +{ + int instance = mpipe_instance(dev); + struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); + struct mpipe_data *md = &mpipe_data[instance]; + + disable_percpu_irq(md->ingress_irq); + napi_schedule(&info->mpipe[instance].napi); + enable_percpu_irq(md->ingress_irq, 0); +} +#endif + +static const struct net_device_ops tile_net_ops = { + .ndo_open = tile_net_open, + .ndo_stop = tile_net_stop, + .ndo_start_xmit = tile_net_tx, + .ndo_select_queue = tile_net_select_queue, + .ndo_do_ioctl = tile_net_ioctl, + .ndo_change_mtu = tile_net_change_mtu, + .ndo_tx_timeout = tile_net_tx_timeout, + .ndo_set_mac_address = tile_net_set_mac_address, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = tile_net_netpoll, +#endif +}; + +/* The setup function. + * + * This uses ether_setup() to assign various fields in dev, including + * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields. + */ +static void tile_net_setup(struct net_device *dev) +{ + netdev_features_t features = 0; + + ether_setup(dev); + dev->netdev_ops = &tile_net_ops; + dev->watchdog_timeo = TILE_NET_TIMEOUT; + dev->mtu = 1500; + + features |= NETIF_F_HW_CSUM; + features |= NETIF_F_SG; + features |= NETIF_F_TSO; + features |= NETIF_F_TSO6; + + dev->hw_features |= features; + dev->vlan_features |= features; + dev->features |= features; +} + +/* Allocate the device structure, register the device, and obtain the + * MAC address from the hypervisor. + */ +static void tile_net_dev_init(const char *name, const uint8_t *mac) +{ + int ret; + struct net_device *dev; + struct tile_net_priv *priv; + + /* HACK: Ignore "loop" links. */ + if (strncmp(name, "loop", 4) == 0) + return; + + /* Allocate the device structure. Normally, "name" is a + * template, instantiated by register_netdev(), but not for us. + */ + dev = alloc_netdev_mqs(sizeof(*priv), name, NET_NAME_UNKNOWN, + tile_net_setup, NR_CPUS, 1); + if (!dev) { + pr_err("alloc_netdev_mqs(%s) failed\n", name); + return; + } + + /* Initialize "priv". */ + priv = netdev_priv(dev); + priv->dev = dev; + priv->channel = -1; + priv->loopify_channel = -1; + priv->echannel = -1; + init_ptp_dev(priv); + + /* Get the MAC address and set it in the device struct; this must + * be done before the device is opened. If the MAC is all zeroes, + * we use a random address, since we're probably on the simulator. + */ + if (!is_zero_ether_addr(mac)) + ether_addr_copy(dev->dev_addr, mac); + else + eth_hw_addr_random(dev); + + /* Register the network device. */ + ret = register_netdev(dev); + if (ret) { + netdev_err(dev, "register_netdev failed %d\n", ret); + free_netdev(dev); + return; + } +} + +/* Per-cpu module initialization. */ +static void tile_net_init_module_percpu(void *unused) +{ + struct tile_net_info *info = this_cpu_ptr(&per_cpu_info); + int my_cpu = smp_processor_id(); + int instance; + + for (instance = 0; instance < NR_MPIPE_MAX; instance++) { + info->mpipe[instance].has_iqueue = false; + info->mpipe[instance].instance = instance; + } + info->my_cpu = my_cpu; + + /* Initialize the egress timer. */ + hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + info->egress_timer.function = tile_net_handle_egress_timer; +} + +/* Module initialization. */ +static int __init tile_net_init_module(void) +{ + int i; + char name[GXIO_MPIPE_LINK_NAME_LEN]; + uint8_t mac[6]; + + pr_info("Tilera Network Driver\n"); + + BUILD_BUG_ON(NR_MPIPE_MAX != 2); + + mutex_init(&tile_net_devs_for_channel_mutex); + + /* Initialize each CPU. */ + on_each_cpu(tile_net_init_module_percpu, NULL, 1); + + /* Find out what devices we have, and initialize them. */ + for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++) + tile_net_dev_init(name, mac); + + if (!network_cpus_init()) + network_cpus_map = *cpu_online_mask; + + return 0; +} + +module_init(tile_net_init_module); diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c new file mode 100644 index 000000000..3d8f60d96 --- /dev/null +++ b/drivers/net/ethernet/tile/tilepro.c @@ -0,0 +1,2423 @@ +/* + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include /* printk() */ +#include /* kmalloc() */ +#include /* error codes */ +#include /* size_t */ +#include +#include +#include /* struct device, and other headers */ +#include /* eth_type_trans */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* For TSO */ +#include +#include + + +/* + * First, "tile_net_init_module()" initializes all four "devices" which + * can be used by linux. + * + * Then, "ifconfig DEVICE up" calls "tile_net_open()", which analyzes + * the network cpus, then uses "tile_net_open_aux()" to initialize + * LIPP/LEPP, and then uses "tile_net_open_inner()" to register all + * the tiles, provide buffers to LIPP, allow ingress to start, and + * turn on hypervisor interrupt handling (and NAPI) on all tiles. + * + * If registration fails due to the link being down, then "retry_work" + * is used to keep calling "tile_net_open_inner()" until it succeeds. + * + * If "ifconfig DEVICE down" is called, it uses "tile_net_stop()" to + * stop egress, drain the LIPP buffers, unregister all the tiles, stop + * LIPP/LEPP, and wipe the LEPP queue. + * + * We start out with the ingress interrupt enabled on each CPU. When + * this interrupt fires, we disable it, and call "napi_schedule()". + * This will cause "tile_net_poll()" to be called, which will pull + * packets from the netio queue, filtering them out, or passing them + * to "netif_receive_skb()". If our budget is exhausted, we will + * return, knowing we will be called again later. Otherwise, we + * reenable the ingress interrupt, and call "napi_complete()". + * + * HACK: Since disabling the ingress interrupt is not reliable, we + * ignore the interrupt if the global "active" flag is false. + * + * + * NOTE: The use of "native_driver" ensures that EPP exists, and that + * we are using "LIPP" and "LEPP". + * + * NOTE: Failing to free completions for an arbitrarily long time + * (which is defined to be illegal) does in fact cause bizarre + * problems. The "egress_timer" helps prevent this from happening. + */ + + +/* HACK: Allow use of "jumbo" packets. */ +/* This should be 1500 if "jumbo" is not set in LIPP. */ +/* This should be at most 10226 (10240 - 14) if "jumbo" is set in LIPP. */ +/* ISSUE: This has not been thoroughly tested (except at 1500). */ +#define TILE_NET_MTU 1500 + +/* HACK: Define this to verify incoming packets. */ +/* #define TILE_NET_VERIFY_INGRESS */ + +/* Use 3000 to enable the Linux Traffic Control (QoS) layer, else 0. */ +#define TILE_NET_TX_QUEUE_LEN 0 + +/* Define to dump packets (prints out the whole packet on tx and rx). */ +/* #define TILE_NET_DUMP_PACKETS */ + +/* Define to enable debug spew (all PDEBUG's are enabled). */ +/* #define TILE_NET_DEBUG */ + + +/* Define to activate paranoia checks. */ +/* #define TILE_NET_PARANOIA */ + +/* Default transmit lockup timeout period, in jiffies. */ +#define TILE_NET_TIMEOUT (5 * HZ) + +/* Default retry interval for bringing up the NetIO interface, in jiffies. */ +#define TILE_NET_RETRY_INTERVAL (5 * HZ) + +/* Number of ports (xgbe0, xgbe1, gbe0, gbe1). */ +#define TILE_NET_DEVS 4 + + + +/* Paranoia. */ +#if NET_IP_ALIGN != LIPP_PACKET_PADDING +#error "NET_IP_ALIGN must match LIPP_PACKET_PADDING." +#endif + + +/* Debug print. */ +#ifdef TILE_NET_DEBUG +#define PDEBUG(fmt, args...) net_printk(fmt, ## args) +#else +#define PDEBUG(fmt, args...) +#endif + + +MODULE_AUTHOR("Tilera"); +MODULE_LICENSE("GPL"); + + +/* + * Queue of incoming packets for a specific cpu and device. + * + * Includes a pointer to the "system" data, and the actual "user" data. + */ +struct tile_netio_queue { + netio_queue_impl_t *__system_part; + netio_queue_user_impl_t __user_part; + +}; + + +/* + * Statistics counters for a specific cpu and device. + */ +struct tile_net_stats_t { + struct u64_stats_sync syncp; + u64 rx_packets; /* total packets received */ + u64 tx_packets; /* total packets transmitted */ + u64 rx_bytes; /* total bytes received */ + u64 tx_bytes; /* total bytes transmitted */ + u64 rx_errors; /* packets truncated or marked bad by hw */ + u64 rx_dropped; /* packets not for us or intf not up */ +}; + + +/* + * Info for a specific cpu and device. + * + * ISSUE: There is a "dev" pointer in "napi" as well. + */ +struct tile_net_cpu { + /* The NAPI struct. */ + struct napi_struct napi; + /* Packet queue. */ + struct tile_netio_queue queue; + /* Statistics. */ + struct tile_net_stats_t stats; + /* True iff NAPI is enabled. */ + bool napi_enabled; + /* True if this tile has successfully registered with the IPP. */ + bool registered; + /* True if the link was down last time we tried to register. */ + bool link_down; + /* True if "egress_timer" is scheduled. */ + bool egress_timer_scheduled; + /* Number of small sk_buffs which must still be provided. */ + unsigned int num_needed_small_buffers; + /* Number of large sk_buffs which must still be provided. */ + unsigned int num_needed_large_buffers; + /* A timer for handling egress completions. */ + struct timer_list egress_timer; +}; + + +/* + * Info for a specific device. + */ +struct tile_net_priv { + /* Our network device. */ + struct net_device *dev; + /* Pages making up the egress queue. */ + struct page *eq_pages; + /* Address of the actual egress queue. */ + lepp_queue_t *eq; + /* Protects "eq". */ + spinlock_t eq_lock; + /* The hypervisor handle for this interface. */ + int hv_devhdl; + /* The intr bit mask that IDs this device. */ + u32 intr_id; + /* True iff "tile_net_open_aux()" has succeeded. */ + bool partly_opened; + /* True iff the device is "active". */ + bool active; + /* Effective network cpus. */ + struct cpumask network_cpus_map; + /* Number of network cpus. */ + int network_cpus_count; + /* Credits per network cpu. */ + int network_cpus_credits; + /* For NetIO bringup retries. */ + struct delayed_work retry_work; + /* Quick access to per cpu data. */ + struct tile_net_cpu *cpu[NR_CPUS]; +}; + +/* Log2 of the number of small pages needed for the egress queue. */ +#define EQ_ORDER get_order(sizeof(lepp_queue_t)) +/* Size of the egress queue's pages. */ +#define EQ_SIZE (1 << (PAGE_SHIFT + EQ_ORDER)) + +/* + * The actual devices (xgbe0, xgbe1, gbe0, gbe1). + */ +static struct net_device *tile_net_devs[TILE_NET_DEVS]; + +/* + * The "tile_net_cpu" structures for each device. + */ +static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe0); +static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe1); +static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe0); +static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe1); + + +/* + * True if "network_cpus" was specified. + */ +static bool network_cpus_used; + +/* + * The actual cpus in "network_cpus". + */ +static struct cpumask network_cpus_map; + + + +#ifdef TILE_NET_DEBUG +/* + * printk with extra stuff. + * + * We print the CPU we're running in brackets. + */ +static void net_printk(char *fmt, ...) +{ + int i; + int len; + va_list args; + static char buf[256]; + + len = sprintf(buf, "tile_net[%2.2d]: ", smp_processor_id()); + va_start(args, fmt); + i = vscnprintf(buf + len, sizeof(buf) - len - 1, fmt, args); + va_end(args); + buf[255] = '\0'; + pr_notice(buf); +} +#endif + + +#ifdef TILE_NET_DUMP_PACKETS +/* + * Dump a packet. + */ +static void dump_packet(unsigned char *data, unsigned long length, char *s) +{ + int my_cpu = smp_processor_id(); + + unsigned long i; + char buf[128]; + + static unsigned int count; + + pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n", + data, length, s, count++); + + pr_info("\n"); + + for (i = 0; i < length; i++) { + if ((i & 0xf) == 0) + sprintf(buf, "[%02d] %8.8lx:", my_cpu, i); + sprintf(buf + strlen(buf), " %2.2x", data[i]); + if ((i & 0xf) == 0xf || i == length - 1) { + strcat(buf, "\n"); + pr_info("%s", buf); + } + } +} +#endif + + +/* + * Provide support for the __netio_fastio1() swint + * (see for how it is used). + * + * The fastio swint2 call may clobber all the caller-saved registers. + * It rarely clobbers memory, but we allow for the possibility in + * the signature just to be on the safe side. + * + * Also, gcc doesn't seem to allow an input operand to be + * clobbered, so we fake it with dummy outputs. + * + * This function can't be static because of the way it is declared + * in the netio header. + */ +inline int __netio_fastio1(u32 fastio_index, u32 arg0) +{ + long result, clobber_r1, clobber_r10; + asm volatile("swint2" + : "=R00" (result), + "=R01" (clobber_r1), "=R10" (clobber_r10) + : "R10" (fastio_index), "R01" (arg0) + : "memory", "r2", "r3", "r4", + "r5", "r6", "r7", "r8", "r9", + "r11", "r12", "r13", "r14", + "r15", "r16", "r17", "r18", "r19", + "r20", "r21", "r22", "r23", "r24", + "r25", "r26", "r27", "r28", "r29"); + return result; +} + + +static void tile_net_return_credit(struct tile_net_cpu *info) +{ + struct tile_netio_queue *queue = &info->queue; + netio_queue_user_impl_t *qup = &queue->__user_part; + + /* Return four credits after every fourth packet. */ + if (--qup->__receive_credit_remaining == 0) { + u32 interval = qup->__receive_credit_interval; + qup->__receive_credit_remaining = interval; + __netio_fastio_return_credits(qup->__fastio_index, interval); + } +} + + + +/* + * Provide a linux buffer to LIPP. + */ +static void tile_net_provide_linux_buffer(struct tile_net_cpu *info, + void *va, bool small) +{ + struct tile_netio_queue *queue = &info->queue; + + /* Convert "va" and "small" to "linux_buffer_t". */ + unsigned int buffer = ((unsigned int)(__pa(va) >> 7) << 1) + small; + + __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer); +} + + +/* + * Provide a linux buffer for LIPP. + * + * Note that the ACTUAL allocation for each buffer is a "struct sk_buff", + * plus a chunk of memory that includes not only the requested bytes, but + * also NET_SKB_PAD bytes of initial padding, and a "struct skb_shared_info". + * + * Note that "struct skb_shared_info" is 88 bytes with 64K pages and + * 268 bytes with 4K pages (since the frags[] array needs 18 entries). + * + * Without jumbo packets, the maximum packet size will be 1536 bytes, + * and we use 2 bytes (NET_IP_ALIGN) of padding. ISSUE: If we told + * the hardware to clip at 1518 bytes instead of 1536 bytes, then we + * could save an entire cache line, but in practice, we don't need it. + * + * Since CPAs are 38 bits, and we can only encode the high 31 bits in + * a "linux_buffer_t", the low 7 bits must be zero, and thus, we must + * align the actual "va" mod 128. + * + * We assume that the underlying "head" will be aligned mod 64. Note + * that in practice, we have seen "head" NOT aligned mod 128 even when + * using 2048 byte allocations, which is surprising. + * + * If "head" WAS always aligned mod 128, we could change LIPP to + * assume that the low SIX bits are zero, and the 7th bit is one, that + * is, align the actual "va" mod 128 plus 64, which would be "free". + * + * For now, the actual "head" pointer points at NET_SKB_PAD bytes of + * padding, plus 28 or 92 bytes of extra padding, plus the sk_buff + * pointer, plus the NET_IP_ALIGN padding, plus 126 or 1536 bytes for + * the actual packet, plus 62 bytes of empty padding, plus some + * padding and the "struct skb_shared_info". + * + * With 64K pages, a large buffer thus needs 32+92+4+2+1536+62+88 + * bytes, or 1816 bytes, which fits comfortably into 2048 bytes. + * + * With 64K pages, a small buffer thus needs 32+92+4+2+126+88 + * bytes, or 344 bytes, which means we are wasting 64+ bytes, and + * could presumably increase the size of small buffers. + * + * With 4K pages, a large buffer thus needs 32+92+4+2+1536+62+268 + * bytes, or 1996 bytes, which fits comfortably into 2048 bytes. + * + * With 4K pages, a small buffer thus needs 32+92+4+2+126+268 + * bytes, or 524 bytes, which is annoyingly wasteful. + * + * Maybe we should increase LIPP_SMALL_PACKET_SIZE to 192? + * + * ISSUE: Maybe we should increase "NET_SKB_PAD" to 64? + */ +static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info, + bool small) +{ +#if TILE_NET_MTU <= 1536 + /* Without "jumbo", 2 + 1536 should be sufficient. */ + unsigned int large_size = NET_IP_ALIGN + 1536; +#else + /* ISSUE: This has not been tested. */ + unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100; +#endif + + /* Avoid "false sharing" with last cache line. */ + /* ISSUE: This is already done by "netdev_alloc_skb()". */ + unsigned int len = + (((small ? LIPP_SMALL_PACKET_SIZE : large_size) + + CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE()); + + unsigned int padding = 128 - NET_SKB_PAD; + unsigned int align; + + struct sk_buff *skb; + void *va; + + struct sk_buff **skb_ptr; + + /* Request 96 extra bytes for alignment purposes. */ + skb = netdev_alloc_skb(info->napi.dev, len + padding); + if (skb == NULL) + return false; + + /* Skip 32 or 96 bytes to align "data" mod 128. */ + align = -(long)skb->data & (128 - 1); + BUG_ON(align > padding); + skb_reserve(skb, align); + + /* This address is given to IPP. */ + va = skb->data; + + /* Buffers must not span a huge page. */ + BUG_ON(((((long)va & ~HPAGE_MASK) + len) & HPAGE_MASK) != 0); + +#ifdef TILE_NET_PARANOIA +#if CHIP_HAS_CBOX_HOME_MAP() + if (hash_default) { + HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va); + if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) + panic("Non-HFH ingress buffer! VA=%p Mode=%d PTE=%llx", + va, hv_pte_get_mode(pte), hv_pte_val(pte)); + } +#endif +#endif + + /* Invalidate the packet buffer. */ + if (!hash_default) + __inv_buffer(va, len); + + /* Skip two bytes to satisfy LIPP assumptions. */ + /* Note that this aligns IP on a 16 byte boundary. */ + /* ISSUE: Do this when the packet arrives? */ + skb_reserve(skb, NET_IP_ALIGN); + + /* Save a back-pointer to 'skb'. */ + skb_ptr = va - sizeof(*skb_ptr); + *skb_ptr = skb; + + /* Make sure "skb_ptr" has been flushed. */ + __insn_mf(); + + /* Provide the new buffer. */ + tile_net_provide_linux_buffer(info, va, small); + + return true; +} + + +/* + * Provide linux buffers for LIPP. + */ +static void tile_net_provide_needed_buffers(struct tile_net_cpu *info) +{ + while (info->num_needed_small_buffers != 0) { + if (!tile_net_provide_needed_buffer(info, true)) + goto oops; + info->num_needed_small_buffers--; + } + + while (info->num_needed_large_buffers != 0) { + if (!tile_net_provide_needed_buffer(info, false)) + goto oops; + info->num_needed_large_buffers--; + } + + return; + +oops: + + /* Add a description to the page allocation failure dump. */ + pr_notice("Could not provide a linux buffer to LIPP.\n"); +} + + +/* + * Grab some LEPP completions, and store them in "comps", of size + * "comps_size", and return the number of completions which were + * stored, so the caller can free them. + */ +static unsigned int tile_net_lepp_grab_comps(lepp_queue_t *eq, + struct sk_buff *comps[], + unsigned int comps_size, + unsigned int min_size) +{ + unsigned int n = 0; + + unsigned int comp_head = eq->comp_head; + unsigned int comp_busy = eq->comp_busy; + + while (comp_head != comp_busy && n < comps_size) { + comps[n++] = eq->comps[comp_head]; + LEPP_QINC(comp_head); + } + + if (n < min_size) + return 0; + + eq->comp_head = comp_head; + + return n; +} + + +/* + * Free some comps, and return true iff there are still some pending. + */ +static bool tile_net_lepp_free_comps(struct net_device *dev, bool all) +{ + struct tile_net_priv *priv = netdev_priv(dev); + + lepp_queue_t *eq = priv->eq; + + struct sk_buff *olds[64]; + unsigned int wanted = 64; + unsigned int i, n; + bool pending; + + spin_lock(&priv->eq_lock); + + if (all) + eq->comp_busy = eq->comp_tail; + + n = tile_net_lepp_grab_comps(eq, olds, wanted, 0); + + pending = (eq->comp_head != eq->comp_tail); + + spin_unlock(&priv->eq_lock); + + for (i = 0; i < n; i++) + kfree_skb(olds[i]); + + return pending; +} + + +/* + * Make sure the egress timer is scheduled. + * + * Note that we use "schedule if not scheduled" logic instead of the more + * obvious "reschedule" logic, because "reschedule" is fairly expensive. + */ +static void tile_net_schedule_egress_timer(struct tile_net_cpu *info) +{ + if (!info->egress_timer_scheduled) { + mod_timer_pinned(&info->egress_timer, jiffies + 1); + info->egress_timer_scheduled = true; + } +} + + +/* + * The "function" for "info->egress_timer". + * + * This timer will reschedule itself as long as there are any pending + * completions expected (on behalf of any tile). + * + * ISSUE: Realistically, will the timer ever stop scheduling itself? + * + * ISSUE: This timer is almost never actually needed, so just use a global + * timer that can run on any tile. + * + * ISSUE: Maybe instead track number of expected completions, and free + * only that many, resetting to zero if "pending" is ever false. + */ +static void tile_net_handle_egress_timer(unsigned long arg) +{ + struct tile_net_cpu *info = (struct tile_net_cpu *)arg; + struct net_device *dev = info->napi.dev; + + /* The timer is no longer scheduled. */ + info->egress_timer_scheduled = false; + + /* Free comps, and reschedule timer if more are pending. */ + if (tile_net_lepp_free_comps(dev, false)) + tile_net_schedule_egress_timer(info); +} + + +static void tile_net_discard_aux(struct tile_net_cpu *info, int index) +{ + struct tile_netio_queue *queue = &info->queue; + netio_queue_impl_t *qsp = queue->__system_part; + netio_queue_user_impl_t *qup = &queue->__user_part; + + int index2_aux = index + sizeof(netio_pkt_t); + int index2 = + ((index2_aux == + qsp->__packet_receive_queue.__last_packet_plus_one) ? + 0 : index2_aux); + + netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); + + /* Extract the "linux_buffer_t". */ + unsigned int buffer = pkt->__packet.word; + + /* Convert "linux_buffer_t" to "va". */ + void *va = __va((phys_addr_t)(buffer >> 1) << 7); + + /* Acquire the associated "skb". */ + struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); + struct sk_buff *skb = *skb_ptr; + + kfree_skb(skb); + + /* Consume this packet. */ + qup->__packet_receive_read = index2; +} + + +/* + * Like "tile_net_poll()", but just discard packets. + */ +static void tile_net_discard_packets(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + struct tile_netio_queue *queue = &info->queue; + netio_queue_impl_t *qsp = queue->__system_part; + netio_queue_user_impl_t *qup = &queue->__user_part; + + while (qup->__packet_receive_read != + qsp->__packet_receive_queue.__packet_write) { + int index = qup->__packet_receive_read; + tile_net_discard_aux(info, index); + } +} + + +/* + * Handle the next packet. Return true if "processed", false if "filtered". + */ +static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) +{ + struct net_device *dev = info->napi.dev; + + struct tile_netio_queue *queue = &info->queue; + netio_queue_impl_t *qsp = queue->__system_part; + netio_queue_user_impl_t *qup = &queue->__user_part; + struct tile_net_stats_t *stats = &info->stats; + + int filter; + + int index2_aux = index + sizeof(netio_pkt_t); + int index2 = + ((index2_aux == + qsp->__packet_receive_queue.__last_packet_plus_one) ? + 0 : index2_aux); + + netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); + + netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt); + netio_pkt_status_t pkt_status = NETIO_PKT_STATUS_M(metadata, pkt); + + /* Extract the packet size. FIXME: Shouldn't the second line */ + /* get subtracted? Mostly moot, since it should be "zero". */ + unsigned long len = + (NETIO_PKT_CUSTOM_LENGTH(pkt) + + NET_IP_ALIGN - NETIO_PACKET_PADDING); + + /* Extract the "linux_buffer_t". */ + unsigned int buffer = pkt->__packet.word; + + /* Extract "small" (vs "large"). */ + bool small = ((buffer & 1) != 0); + + /* Convert "linux_buffer_t" to "va". */ + void *va = __va((phys_addr_t)(buffer >> 1) << 7); + + /* Extract the packet data pointer. */ + /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */ + unsigned char *buf = va + NET_IP_ALIGN; + + /* Invalidate the packet buffer. */ + if (!hash_default) + __inv_buffer(buf, len); + + /* ISSUE: Is this needed? */ + dev->last_rx = jiffies; + +#ifdef TILE_NET_DUMP_PACKETS + dump_packet(buf, len, "rx"); +#endif /* TILE_NET_DUMP_PACKETS */ + +#ifdef TILE_NET_VERIFY_INGRESS + if (pkt_status == NETIO_PKT_STATUS_OVERSIZE && len >= 64) { + dump_packet(buf, len, "rx"); + panic("Unexpected OVERSIZE."); + } +#endif + + filter = 0; + + if (pkt_status == NETIO_PKT_STATUS_BAD) { + /* Handle CRC error and hardware truncation. */ + filter = 2; + } else if (!(dev->flags & IFF_UP)) { + /* Filter packets received before we're up. */ + filter = 1; + } else if (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(metadata, pkt) && + pkt_status == NETIO_PKT_STATUS_UNDERSIZE) { + /* Filter "truncated" packets. */ + filter = 2; + } else if (!(dev->flags & IFF_PROMISC)) { + if (!is_multicast_ether_addr(buf)) { + /* Filter packets not for our address. */ + const u8 *mine = dev->dev_addr; + filter = !ether_addr_equal(mine, buf); + } + } + + u64_stats_update_begin(&stats->syncp); + + if (filter != 0) { + + if (filter == 1) + stats->rx_dropped++; + else + stats->rx_errors++; + + tile_net_provide_linux_buffer(info, va, small); + + } else { + + /* Acquire the associated "skb". */ + struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); + struct sk_buff *skb = *skb_ptr; + + /* Paranoia. */ + if (skb->data != buf) + panic("Corrupt linux buffer from LIPP! " + "VA=%p, skb=%p, skb->data=%p\n", + va, skb, skb->data); + + /* Encode the actual packet length. */ + skb_put(skb, len); + + /* NOTE: This call also sets "skb->dev = dev". */ + skb->protocol = eth_type_trans(skb, dev); + + /* Avoid recomputing "good" TCP/UDP checksums. */ + if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + netif_receive_skb(skb); + + stats->rx_packets++; + stats->rx_bytes += len; + } + + u64_stats_update_end(&stats->syncp); + + /* ISSUE: It would be nice to defer this until the packet has */ + /* actually been processed. */ + tile_net_return_credit(info); + + /* Consume this packet. */ + qup->__packet_receive_read = index2; + + return !filter; +} + + +/* + * Handle some packets for the given device on the current CPU. + * + * If "tile_net_stop()" is called on some other tile while this + * function is running, we will return, hopefully before that + * other tile asks us to call "napi_disable()". + * + * The "rotting packet" race condition occurs if a packet arrives + * during the extremely narrow window between the queue appearing to + * be empty, and the ingress interrupt being re-enabled. This happens + * a LOT under heavy network load. + */ +static int tile_net_poll(struct napi_struct *napi, int budget) +{ + struct net_device *dev = napi->dev; + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + struct tile_netio_queue *queue = &info->queue; + netio_queue_impl_t *qsp = queue->__system_part; + netio_queue_user_impl_t *qup = &queue->__user_part; + + unsigned int work = 0; + + if (budget <= 0) + goto done; + + while (priv->active) { + int index = qup->__packet_receive_read; + if (index == qsp->__packet_receive_queue.__packet_write) + break; + + if (tile_net_poll_aux(info, index)) { + if (++work >= budget) + goto done; + } + } + + napi_complete(&info->napi); + + if (!priv->active) + goto done; + + /* Re-enable the ingress interrupt. */ + enable_percpu_irq(priv->intr_id, 0); + + /* HACK: Avoid the "rotting packet" problem (see above). */ + if (qup->__packet_receive_read != + qsp->__packet_receive_queue.__packet_write) { + /* ISSUE: Sometimes this returns zero, presumably */ + /* because an interrupt was handled for this tile. */ + (void)napi_reschedule(&info->napi); + } + +done: + + if (priv->active) + tile_net_provide_needed_buffers(info); + + return work; +} + + +/* + * Handle an ingress interrupt for the given device on the current cpu. + * + * ISSUE: Sometimes this gets called after "disable_percpu_irq()" has + * been called! This is probably due to "pending hypervisor downcalls". + * + * ISSUE: Is there any race condition between the "napi_schedule()" here + * and the "napi_complete()" call above? + */ +static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr) +{ + struct net_device *dev = (struct net_device *)dev_ptr; + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + + /* Disable the ingress interrupt. */ + disable_percpu_irq(priv->intr_id); + + /* Ignore unwanted interrupts. */ + if (!priv->active) + return IRQ_HANDLED; + + /* ISSUE: Sometimes "info->napi_enabled" is false here. */ + + napi_schedule(&info->napi); + + return IRQ_HANDLED; +} + + +/* + * One time initialization per interface. + */ +static int tile_net_open_aux(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + + int ret; + int dummy; + unsigned int epp_lotar; + + /* + * Find out where EPP memory should be homed. + */ + ret = hv_dev_pread(priv->hv_devhdl, 0, + (HV_VirtAddr)&epp_lotar, sizeof(epp_lotar), + NETIO_EPP_SHM_OFF); + if (ret < 0) { + pr_err("could not read epp_shm_queue lotar.\n"); + return -EIO; + } + + /* + * Home the page on the EPP. + */ + { + int epp_home = hv_lotar_to_cpu(epp_lotar); + homecache_change_page_home(priv->eq_pages, EQ_ORDER, epp_home); + } + + /* + * Register the EPP shared memory queue. + */ + { + netio_ipp_address_t ea = { + .va = 0, + .pa = __pa(priv->eq), + .pte = hv_pte(0), + .size = EQ_SIZE, + }; + ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar); + ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3); + ret = hv_dev_pwrite(priv->hv_devhdl, 0, + (HV_VirtAddr)&ea, + sizeof(ea), + NETIO_EPP_SHM_OFF); + if (ret < 0) + return -EIO; + } + + /* + * Start LIPP/LEPP. + */ + if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, + sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) { + pr_warn("Failed to start LIPP/LEPP\n"); + return -EIO; + } + + return 0; +} + + +/* + * Register with hypervisor on the current CPU. + * + * Strangely, this function does important things even if it "fails", + * which is especially common if the link is not up yet. Hopefully + * these things are all "harmless" if done twice! + */ +static void tile_net_register(void *dev_ptr) +{ + struct net_device *dev = (struct net_device *)dev_ptr; + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info; + + struct tile_netio_queue *queue; + + /* Only network cpus can receive packets. */ + int queue_id = + cpumask_test_cpu(my_cpu, &priv->network_cpus_map) ? 0 : 255; + + netio_input_config_t config = { + .flags = 0, + .num_receive_packets = priv->network_cpus_credits, + .queue_id = queue_id + }; + + int ret = 0; + netio_queue_impl_t *queuep; + + PDEBUG("tile_net_register(queue_id %d)\n", queue_id); + + if (!strcmp(dev->name, "xgbe0")) + info = this_cpu_ptr(&hv_xgbe0); + else if (!strcmp(dev->name, "xgbe1")) + info = this_cpu_ptr(&hv_xgbe1); + else if (!strcmp(dev->name, "gbe0")) + info = this_cpu_ptr(&hv_gbe0); + else if (!strcmp(dev->name, "gbe1")) + info = this_cpu_ptr(&hv_gbe1); + else + BUG(); + + /* Initialize the egress timer. */ + init_timer(&info->egress_timer); + info->egress_timer.data = (long)info; + info->egress_timer.function = tile_net_handle_egress_timer; + + u64_stats_init(&info->stats.syncp); + + priv->cpu[my_cpu] = info; + + /* + * Register ourselves with LIPP. This does a lot of stuff, + * including invoking the LIPP registration code. + */ + ret = hv_dev_pwrite(priv->hv_devhdl, 0, + (HV_VirtAddr)&config, + sizeof(netio_input_config_t), + NETIO_IPP_INPUT_REGISTER_OFF); + PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", + ret); + if (ret < 0) { + if (ret != NETIO_LINK_DOWN) { + printk(KERN_DEBUG "hv_dev_pwrite " + "NETIO_IPP_INPUT_REGISTER_OFF failure %d\n", + ret); + } + info->link_down = (ret == NETIO_LINK_DOWN); + return; + } + + /* + * Get the pointer to our queue's system part. + */ + + ret = hv_dev_pread(priv->hv_devhdl, 0, + (HV_VirtAddr)&queuep, + sizeof(netio_queue_impl_t *), + NETIO_IPP_INPUT_REGISTER_OFF); + PDEBUG("hv_dev_pread(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", + ret); + PDEBUG("queuep %p\n", queuep); + if (ret <= 0) { + /* ISSUE: Shouldn't this be a fatal error? */ + pr_err("hv_dev_pread NETIO_IPP_INPUT_REGISTER_OFF failure\n"); + return; + } + + queue = &info->queue; + + queue->__system_part = queuep; + + memset(&queue->__user_part, 0, sizeof(netio_queue_user_impl_t)); + + /* This is traditionally "config.num_receive_packets / 2". */ + queue->__user_part.__receive_credit_interval = 4; + queue->__user_part.__receive_credit_remaining = + queue->__user_part.__receive_credit_interval; + + /* + * Get a fastio index from the hypervisor. + * ISSUE: Shouldn't this check the result? + */ + ret = hv_dev_pread(priv->hv_devhdl, 0, + (HV_VirtAddr)&queue->__user_part.__fastio_index, + sizeof(queue->__user_part.__fastio_index), + NETIO_IPP_GET_FASTIO_OFF); + PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret); + + /* Now we are registered. */ + info->registered = true; +} + + +/* + * Deregister with hypervisor on the current CPU. + * + * This simply discards all our credits, so no more packets will be + * delivered to this tile. There may still be packets in our queue. + * + * Also, disable the ingress interrupt. + */ +static void tile_net_deregister(void *dev_ptr) +{ + struct net_device *dev = (struct net_device *)dev_ptr; + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + + /* Disable the ingress interrupt. */ + disable_percpu_irq(priv->intr_id); + + /* Do nothing else if not registered. */ + if (info == NULL || !info->registered) + return; + + { + struct tile_netio_queue *queue = &info->queue; + netio_queue_user_impl_t *qup = &queue->__user_part; + + /* Discard all our credits. */ + __netio_fastio_return_credits(qup->__fastio_index, -1); + } +} + + +/* + * Unregister with hypervisor on the current CPU. + * + * Also, disable the ingress interrupt. + */ +static void tile_net_unregister(void *dev_ptr) +{ + struct net_device *dev = (struct net_device *)dev_ptr; + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + + int ret; + int dummy = 0; + + /* Disable the ingress interrupt. */ + disable_percpu_irq(priv->intr_id); + + /* Do nothing else if not registered. */ + if (info == NULL || !info->registered) + return; + + /* Unregister ourselves with LIPP/LEPP. */ + ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, + sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF); + if (ret < 0) + panic("Failed to unregister with LIPP/LEPP!\n"); + + /* Discard all packets still in our NetIO queue. */ + tile_net_discard_packets(dev); + + /* Reset state. */ + info->num_needed_small_buffers = 0; + info->num_needed_large_buffers = 0; + + /* Cancel egress timer. */ + del_timer(&info->egress_timer); + info->egress_timer_scheduled = false; +} + + +/* + * Helper function for "tile_net_stop()". + * + * Also used to handle registration failure in "tile_net_open_inner()", + * when the various extra steps in "tile_net_stop()" are not necessary. + */ +static void tile_net_stop_aux(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + int i; + + int dummy = 0; + + /* + * Unregister all tiles, so LIPP will stop delivering packets. + * Also, delete all the "napi" objects (sequentially, to protect + * "dev->napi_list"). + */ + on_each_cpu(tile_net_unregister, (void *)dev, 1); + for_each_online_cpu(i) { + struct tile_net_cpu *info = priv->cpu[i]; + if (info != NULL && info->registered) { + netif_napi_del(&info->napi); + info->registered = false; + } + } + + /* Stop LIPP/LEPP. */ + if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, + sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0) + panic("Failed to stop LIPP/LEPP!\n"); + + priv->partly_opened = false; +} + + +/* + * Disable NAPI for the given device on the current cpu. + */ +static void tile_net_stop_disable(void *dev_ptr) +{ + struct net_device *dev = (struct net_device *)dev_ptr; + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + + /* Disable NAPI if needed. */ + if (info != NULL && info->napi_enabled) { + napi_disable(&info->napi); + info->napi_enabled = false; + } +} + + +/* + * Enable NAPI and the ingress interrupt for the given device + * on the current cpu. + * + * ISSUE: Only do this for "network cpus"? + */ +static void tile_net_open_enable(void *dev_ptr) +{ + struct net_device *dev = (struct net_device *)dev_ptr; + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + + /* Enable NAPI. */ + napi_enable(&info->napi); + info->napi_enabled = true; + + /* Enable the ingress interrupt. */ + enable_percpu_irq(priv->intr_id, 0); +} + + +/* + * tile_net_open_inner does most of the work of bringing up the interface. + * It's called from tile_net_open(), and also from tile_net_retry_open(). + * The return value is 0 if the interface was brought up, < 0 if + * tile_net_open() should return the return value as an error, and > 0 if + * tile_net_open() should return success and schedule a work item to + * periodically retry the bringup. + */ +static int tile_net_open_inner(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info; + struct tile_netio_queue *queue; + int result = 0; + int i; + int dummy = 0; + + /* + * First try to register just on the local CPU, and handle any + * semi-expected "link down" failure specially. Note that we + * do NOT call "tile_net_stop_aux()", unlike below. + */ + tile_net_register(dev); + info = priv->cpu[my_cpu]; + if (!info->registered) { + if (info->link_down) + return 1; + return -EAGAIN; + } + + /* + * Now register everywhere else. If any registration fails, + * even for "link down" (which might not be possible), we + * clean up using "tile_net_stop_aux()". Also, add all the + * "napi" objects (sequentially, to protect "dev->napi_list"). + * ISSUE: Only use "netif_napi_add()" for "network cpus"? + */ + smp_call_function(tile_net_register, (void *)dev, 1); + for_each_online_cpu(i) { + struct tile_net_cpu *info = priv->cpu[i]; + if (info->registered) + netif_napi_add(dev, &info->napi, tile_net_poll, 64); + else + result = -EAGAIN; + } + if (result != 0) { + tile_net_stop_aux(dev); + return result; + } + + queue = &info->queue; + + if (priv->intr_id == 0) { + unsigned int irq; + + /* + * Acquire the irq allocated by the hypervisor. Every + * queue gets the same irq. The "__intr_id" field is + * "1 << irq", so we use "__ffs()" to extract "irq". + */ + priv->intr_id = queue->__system_part->__intr_id; + BUG_ON(priv->intr_id == 0); + irq = __ffs(priv->intr_id); + + /* + * Register the ingress interrupt handler for this + * device, permanently. + * + * We used to call "free_irq()" in "tile_net_stop()", + * and then re-register the handler here every time, + * but that caused DNP errors in "handle_IRQ_event()" + * because "desc->action" was NULL. See bug 9143. + */ + tile_irq_activate(irq, TILE_IRQ_PERCPU); + BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt, + 0, dev->name, (void *)dev) != 0); + } + + { + /* Allocate initial buffers. */ + + int max_buffers = + priv->network_cpus_count * priv->network_cpus_credits; + + info->num_needed_small_buffers = + min(LIPP_SMALL_BUFFERS, max_buffers); + + info->num_needed_large_buffers = + min(LIPP_LARGE_BUFFERS, max_buffers); + + tile_net_provide_needed_buffers(info); + + if (info->num_needed_small_buffers != 0 || + info->num_needed_large_buffers != 0) + panic("Insufficient memory for buffer stack!"); + } + + /* We are about to be active. */ + priv->active = true; + + /* Make sure "active" is visible to all tiles. */ + mb(); + + /* On each tile, enable NAPI and the ingress interrupt. */ + on_each_cpu(tile_net_open_enable, (void *)dev, 1); + + /* Start LIPP/LEPP and activate "ingress" at the shim. */ + if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, + sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0) + panic("Failed to activate the LIPP Shim!\n"); + + /* Start our transmit queue. */ + netif_start_queue(dev); + + return 0; +} + + +/* + * Called periodically to retry bringing up the NetIO interface, + * if it doesn't come up cleanly during tile_net_open(). + */ +static void tile_net_open_retry(struct work_struct *w) +{ + struct delayed_work *dw = + container_of(w, struct delayed_work, work); + + struct tile_net_priv *priv = + container_of(dw, struct tile_net_priv, retry_work); + + /* + * Try to bring the NetIO interface up. If it fails, reschedule + * ourselves to try again later; otherwise, tell Linux we now have + * a working link. ISSUE: What if the return value is negative? + */ + if (tile_net_open_inner(priv->dev) != 0) + schedule_delayed_work(&priv->retry_work, + TILE_NET_RETRY_INTERVAL); + else + netif_carrier_on(priv->dev); +} + + +/* + * Called when a network interface is made active. + * + * Returns 0 on success, negative value on failure. + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS (if needed), the watchdog timer + * is started, and the stack is notified that the interface is ready. + * + * If the actual link is not available yet, then we tell Linux that + * we have no carrier, and we keep checking until the link comes up. + */ +static int tile_net_open(struct net_device *dev) +{ + int ret = 0; + struct tile_net_priv *priv = netdev_priv(dev); + + /* + * We rely on priv->partly_opened to tell us if this is the + * first time this interface is being brought up. If it is + * set, the IPP was already initialized and should not be + * initialized again. + */ + if (!priv->partly_opened) { + + int count; + int credits; + + /* Initialize LIPP/LEPP, and start the Shim. */ + ret = tile_net_open_aux(dev); + if (ret < 0) { + pr_err("tile_net_open_aux failed: %d\n", ret); + return ret; + } + + /* Analyze the network cpus. */ + + if (network_cpus_used) + cpumask_copy(&priv->network_cpus_map, + &network_cpus_map); + else + cpumask_copy(&priv->network_cpus_map, cpu_online_mask); + + + count = cpumask_weight(&priv->network_cpus_map); + + /* Limit credits to available buffers, and apply min. */ + credits = max(16, (LIPP_LARGE_BUFFERS / count) & ~1); + + /* Apply "GBE" max limit. */ + /* ISSUE: Use higher limit for XGBE? */ + credits = min(NETIO_MAX_RECEIVE_PKTS, credits); + + priv->network_cpus_count = count; + priv->network_cpus_credits = credits; + +#ifdef TILE_NET_DEBUG + pr_info("Using %d network cpus, with %d credits each\n", + priv->network_cpus_count, priv->network_cpus_credits); +#endif + + priv->partly_opened = true; + + } else { + /* FIXME: Is this possible? */ + /* printk("Already partly opened.\n"); */ + } + + /* + * Attempt to bring up the link. + */ + ret = tile_net_open_inner(dev); + if (ret <= 0) { + if (ret == 0) + netif_carrier_on(dev); + return ret; + } + + /* + * We were unable to bring up the NetIO interface, but we want to + * try again in a little bit. Tell Linux that we have no carrier + * so it doesn't try to use the interface before the link comes up + * and then remember to try again later. + */ + netif_carrier_off(dev); + schedule_delayed_work(&priv->retry_work, TILE_NET_RETRY_INTERVAL); + + return 0; +} + + +static int tile_net_drain_lipp_buffers(struct tile_net_priv *priv) +{ + int n = 0; + + /* Drain all the LIPP buffers. */ + while (true) { + unsigned int buffer; + + /* NOTE: This should never fail. */ + if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer, + sizeof(buffer), NETIO_IPP_DRAIN_OFF) < 0) + break; + + /* Stop when done. */ + if (buffer == 0) + break; + + { + /* Convert "linux_buffer_t" to "va". */ + void *va = __va((phys_addr_t)(buffer >> 1) << 7); + + /* Acquire the associated "skb". */ + struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); + struct sk_buff *skb = *skb_ptr; + + kfree_skb(skb); + } + + n++; + } + + return n; +} + + +/* + * Disables a network interface. + * + * Returns 0, this is not allowed to fail. + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + * + * ISSUE: How closely does "netif_running(dev)" mirror "priv->active"? + * + * Before we are called by "__dev_close()", "netif_running()" will + * have been cleared, so no NEW calls to "tile_net_poll()" will be + * made by "netpoll_poll_dev()". + * + * Often, this can cause some tiles to still have packets in their + * queues, so we must call "tile_net_discard_packets()" later. + * + * Note that some other tile may still be INSIDE "tile_net_poll()", + * and in fact, many will be, if there is heavy network load. + * + * Calling "on_each_cpu(tile_net_stop_disable, (void *)dev, 1)" when + * any tile is still "napi_schedule()"'d will induce a horrible crash + * when "msleep()" is called. This includes tiles which are inside + * "tile_net_poll()" which have not yet called "napi_complete()". + * + * So, we must first try to wait long enough for other tiles to finish + * with any current "tile_net_poll()" call, and, hopefully, to clear + * the "scheduled" flag. ISSUE: It is unclear what happens to tiles + * which have called "napi_schedule()" but which had not yet tried to + * call "tile_net_poll()", or which exhausted their budget inside + * "tile_net_poll()" just before this function was called. + */ +static int tile_net_stop(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + + PDEBUG("tile_net_stop()\n"); + + /* Start discarding packets. */ + priv->active = false; + + /* Make sure "active" is visible to all tiles. */ + mb(); + + /* + * On each tile, make sure no NEW packets get delivered, and + * disable the ingress interrupt. + * + * Note that the ingress interrupt can fire AFTER this, + * presumably due to packets which were recently delivered, + * but it will have no effect. + */ + on_each_cpu(tile_net_deregister, (void *)dev, 1); + + /* Optimistically drain LIPP buffers. */ + (void)tile_net_drain_lipp_buffers(priv); + + /* ISSUE: Only needed if not yet fully open. */ + cancel_delayed_work_sync(&priv->retry_work); + + /* Can't transmit any more. */ + netif_stop_queue(dev); + + /* Disable NAPI on each tile. */ + on_each_cpu(tile_net_stop_disable, (void *)dev, 1); + + /* + * Drain any remaining LIPP buffers. NOTE: This "printk()" + * has never been observed, but in theory it could happen. + */ + if (tile_net_drain_lipp_buffers(priv) != 0) + printk("Had to drain some extra LIPP buffers!\n"); + + /* Stop LIPP/LEPP. */ + tile_net_stop_aux(dev); + + /* + * ISSUE: It appears that, in practice anyway, by the time we + * get here, there are no pending completions, but just in case, + * we free (all of) them anyway. + */ + while (tile_net_lepp_free_comps(dev, true)) + /* loop */; + + /* Wipe the EPP queue, and wait till the stores hit the EPP. */ + memset(priv->eq, 0, sizeof(lepp_queue_t)); + mb(); + + return 0; +} + + +/* + * Prepare the "frags" info for the resulting LEPP command. + * + * If needed, flush the memory used by the frags. + */ +static unsigned int tile_net_tx_frags(lepp_frag_t *frags, + struct sk_buff *skb, + void *b_data, unsigned int b_len) +{ + unsigned int i, n = 0; + + struct skb_shared_info *sh = skb_shinfo(skb); + + phys_addr_t cpa; + + if (b_len != 0) { + + if (!hash_default) + finv_buffer_remote(b_data, b_len, 0); + + cpa = __pa(b_data); + frags[n].cpa_lo = cpa; + frags[n].cpa_hi = cpa >> 32; + frags[n].length = b_len; + frags[n].hash_for_home = hash_default; + n++; + } + + for (i = 0; i < sh->nr_frags; i++) { + + skb_frag_t *f = &sh->frags[i]; + unsigned long pfn = page_to_pfn(skb_frag_page(f)); + + /* FIXME: Compute "hash_for_home" properly. */ + /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */ + int hash_for_home = hash_default; + + /* FIXME: Hmmm. */ + if (!hash_default) { + void *va = pfn_to_kaddr(pfn) + f->page_offset; + BUG_ON(PageHighMem(skb_frag_page(f))); + finv_buffer_remote(va, skb_frag_size(f), 0); + } + + cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; + frags[n].cpa_lo = cpa; + frags[n].cpa_hi = cpa >> 32; + frags[n].length = skb_frag_size(f); + frags[n].hash_for_home = hash_for_home; + n++; + } + + return n; +} + + +/* + * This function takes "skb", consisting of a header template and a + * payload, and hands it to LEPP, to emit as one or more segments, + * each consisting of a possibly modified header, plus a piece of the + * payload, via a process known as "tcp segmentation offload". + * + * Usually, "data" will contain the header template, of size "sh_len", + * and "sh->frags" will contain "skb->data_len" bytes of payload, and + * there will be "sh->gso_segs" segments. + * + * Sometimes, if "sendfile()" requires copying, we will be called with + * "data" containing the header and payload, with "frags" being empty. + * + * Sometimes, for example when using NFS over TCP, a single segment can + * span 3 fragments, which must be handled carefully in LEPP. + * + * See "emulate_large_send_offload()" for some reference code, which + * does not handle checksumming. + * + * ISSUE: How do we make sure that high memory DMA does not migrate? + */ +static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + struct tile_net_stats_t *stats = &info->stats; + + struct skb_shared_info *sh = skb_shinfo(skb); + + unsigned char *data = skb->data; + + /* The ip header follows the ethernet header. */ + struct iphdr *ih = ip_hdr(skb); + unsigned int ih_len = ih->ihl * 4; + + /* Note that "nh == ih", by definition. */ + unsigned char *nh = skb_network_header(skb); + unsigned int eh_len = nh - data; + + /* The tcp header follows the ip header. */ + struct tcphdr *th = (struct tcphdr *)(nh + ih_len); + unsigned int th_len = th->doff * 4; + + /* The total number of header bytes. */ + /* NOTE: This may be less than skb_headlen(skb). */ + unsigned int sh_len = eh_len + ih_len + th_len; + + /* The number of payload bytes at "skb->data + sh_len". */ + /* This is non-zero for sendfile() without HIGHDMA. */ + unsigned int b_len = skb_headlen(skb) - sh_len; + + /* The total number of payload bytes. */ + unsigned int d_len = b_len + skb->data_len; + + /* The maximum payload size. */ + unsigned int p_len = sh->gso_size; + + /* The total number of segments. */ + unsigned int num_segs = sh->gso_segs; + + /* The temporary copy of the command. */ + u32 cmd_body[(LEPP_MAX_CMD_SIZE + 3) / 4]; + lepp_tso_cmd_t *cmd = (lepp_tso_cmd_t *)cmd_body; + + /* Analyze the "frags". */ + unsigned int num_frags = + tile_net_tx_frags(cmd->frags, skb, data + sh_len, b_len); + + /* The size of the command, including frags and header. */ + size_t cmd_size = LEPP_TSO_CMD_SIZE(num_frags, sh_len); + + /* The command header. */ + lepp_tso_cmd_t cmd_init = { + .tso = true, + .header_size = sh_len, + .ip_offset = eh_len, + .tcp_offset = eh_len + ih_len, + .payload_size = p_len, + .num_frags = num_frags, + }; + + unsigned long irqflags; + + lepp_queue_t *eq = priv->eq; + + struct sk_buff *olds[8]; + unsigned int wanted = 8; + unsigned int i, nolds = 0; + + unsigned int cmd_head, cmd_tail, cmd_next; + unsigned int comp_tail; + + + /* Paranoia. */ + BUG_ON(skb->protocol != htons(ETH_P_IP)); + BUG_ON(ih->protocol != IPPROTO_TCP); + BUG_ON(skb->ip_summed != CHECKSUM_PARTIAL); + BUG_ON(num_frags > LEPP_MAX_FRAGS); + /*--BUG_ON(num_segs != (d_len + (p_len - 1)) / p_len); */ + BUG_ON(num_segs <= 1); + + + /* Finish preparing the command. */ + + /* Copy the command header. */ + *cmd = cmd_init; + + /* Copy the "header". */ + memcpy(&cmd->frags[num_frags], data, sh_len); + + + /* Prefetch and wait, to minimize time spent holding the spinlock. */ + prefetch_L1(&eq->comp_tail); + prefetch_L1(&eq->cmd_tail); + mb(); + + + /* Enqueue the command. */ + + spin_lock_irqsave(&priv->eq_lock, irqflags); + + /* Handle completions if needed to make room. */ + /* NOTE: Return NETDEV_TX_BUSY if there is still no room. */ + if (lepp_num_free_comp_slots(eq) == 0) { + nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); + if (nolds == 0) { +busy: + spin_unlock_irqrestore(&priv->eq_lock, irqflags); + return NETDEV_TX_BUSY; + } + } + + cmd_head = eq->cmd_head; + cmd_tail = eq->cmd_tail; + + /* Prepare to advance, detecting full queue. */ + /* NOTE: Return NETDEV_TX_BUSY if the queue is full. */ + cmd_next = cmd_tail + cmd_size; + if (cmd_tail < cmd_head && cmd_next >= cmd_head) + goto busy; + if (cmd_next > LEPP_CMD_LIMIT) { + cmd_next = 0; + if (cmd_next == cmd_head) + goto busy; + } + + /* Copy the command. */ + memcpy(&eq->cmds[cmd_tail], cmd, cmd_size); + + /* Advance. */ + cmd_tail = cmd_next; + + /* Record "skb" for eventual freeing. */ + comp_tail = eq->comp_tail; + eq->comps[comp_tail] = skb; + LEPP_QINC(comp_tail); + eq->comp_tail = comp_tail; + + /* Flush before allowing LEPP to handle the command. */ + /* ISSUE: Is this the optimal location for the flush? */ + __insn_mf(); + + eq->cmd_tail = cmd_tail; + + /* NOTE: Using "4" here is more efficient than "0" or "2", */ + /* and, strangely, more efficient than pre-checking the number */ + /* of available completions, and comparing it to 4. */ + if (nolds == 0) + nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4); + + spin_unlock_irqrestore(&priv->eq_lock, irqflags); + + /* Handle completions. */ + for (i = 0; i < nolds; i++) + dev_consume_skb_any(olds[i]); + + /* Update stats. */ + u64_stats_update_begin(&stats->syncp); + stats->tx_packets += num_segs; + stats->tx_bytes += (num_segs * sh_len) + d_len; + u64_stats_update_end(&stats->syncp); + + /* Make sure the egress timer is scheduled. */ + tile_net_schedule_egress_timer(info); + + return NETDEV_TX_OK; +} + + +/* + * Transmit a packet (called by the kernel via "hard_start_xmit" hook). + */ +static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + struct tile_net_stats_t *stats = &info->stats; + + unsigned long irqflags; + + struct skb_shared_info *sh = skb_shinfo(skb); + + unsigned int len = skb->len; + unsigned char *data = skb->data; + + unsigned int csum_start = skb_checksum_start_offset(skb); + + lepp_frag_t frags[1 + MAX_SKB_FRAGS]; + + unsigned int num_frags; + + lepp_queue_t *eq = priv->eq; + + struct sk_buff *olds[8]; + unsigned int wanted = 8; + unsigned int i, nolds = 0; + + unsigned int cmd_size = sizeof(lepp_cmd_t); + + unsigned int cmd_head, cmd_tail, cmd_next; + unsigned int comp_tail; + + lepp_cmd_t cmds[1 + MAX_SKB_FRAGS]; + + + /* + * This is paranoia, since we think that if the link doesn't come + * up, telling Linux we have no carrier will keep it from trying + * to transmit. If it does, though, we can't execute this routine, + * since data structures we depend on aren't set up yet. + */ + if (!info->registered) + return NETDEV_TX_BUSY; + + + /* Save the timestamp. */ + dev->trans_start = jiffies; + + +#ifdef TILE_NET_PARANOIA +#if CHIP_HAS_CBOX_HOME_MAP() + if (hash_default) { + HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data); + if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) + panic("Non-HFH egress buffer! VA=%p Mode=%d PTE=%llx", + data, hv_pte_get_mode(pte), hv_pte_val(pte)); + } +#endif +#endif + + +#ifdef TILE_NET_DUMP_PACKETS + /* ISSUE: Does not dump the "frags". */ + dump_packet(data, skb_headlen(skb), "tx"); +#endif /* TILE_NET_DUMP_PACKETS */ + + + if (sh->gso_size != 0) + return tile_net_tx_tso(skb, dev); + + + /* Prepare the commands. */ + + num_frags = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); + + for (i = 0; i < num_frags; i++) { + + bool final = (i == num_frags - 1); + + lepp_cmd_t cmd = { + .cpa_lo = frags[i].cpa_lo, + .cpa_hi = frags[i].cpa_hi, + .length = frags[i].length, + .hash_for_home = frags[i].hash_for_home, + .send_completion = final, + .end_of_packet = final + }; + + if (i == 0 && skb->ip_summed == CHECKSUM_PARTIAL) { + cmd.compute_checksum = 1; + cmd.checksum_data.bits.start_byte = csum_start; + cmd.checksum_data.bits.count = len - csum_start; + cmd.checksum_data.bits.destination_byte = + csum_start + skb->csum_offset; + } + + cmds[i] = cmd; + } + + + /* Prefetch and wait, to minimize time spent holding the spinlock. */ + prefetch_L1(&eq->comp_tail); + prefetch_L1(&eq->cmd_tail); + mb(); + + + /* Enqueue the commands. */ + + spin_lock_irqsave(&priv->eq_lock, irqflags); + + /* Handle completions if needed to make room. */ + /* NOTE: Return NETDEV_TX_BUSY if there is still no room. */ + if (lepp_num_free_comp_slots(eq) == 0) { + nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); + if (nolds == 0) { +busy: + spin_unlock_irqrestore(&priv->eq_lock, irqflags); + return NETDEV_TX_BUSY; + } + } + + cmd_head = eq->cmd_head; + cmd_tail = eq->cmd_tail; + + /* Copy the commands, or fail. */ + /* NOTE: Return NETDEV_TX_BUSY if the queue is full. */ + for (i = 0; i < num_frags; i++) { + + /* Prepare to advance, detecting full queue. */ + cmd_next = cmd_tail + cmd_size; + if (cmd_tail < cmd_head && cmd_next >= cmd_head) + goto busy; + if (cmd_next > LEPP_CMD_LIMIT) { + cmd_next = 0; + if (cmd_next == cmd_head) + goto busy; + } + + /* Copy the command. */ + *(lepp_cmd_t *)&eq->cmds[cmd_tail] = cmds[i]; + + /* Advance. */ + cmd_tail = cmd_next; + } + + /* Record "skb" for eventual freeing. */ + comp_tail = eq->comp_tail; + eq->comps[comp_tail] = skb; + LEPP_QINC(comp_tail); + eq->comp_tail = comp_tail; + + /* Flush before allowing LEPP to handle the command. */ + /* ISSUE: Is this the optimal location for the flush? */ + __insn_mf(); + + eq->cmd_tail = cmd_tail; + + /* NOTE: Using "4" here is more efficient than "0" or "2", */ + /* and, strangely, more efficient than pre-checking the number */ + /* of available completions, and comparing it to 4. */ + if (nolds == 0) + nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4); + + spin_unlock_irqrestore(&priv->eq_lock, irqflags); + + /* Handle completions. */ + for (i = 0; i < nolds; i++) + dev_consume_skb_any(olds[i]); + + /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */ + u64_stats_update_begin(&stats->syncp); + stats->tx_packets++; + stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN); + u64_stats_update_end(&stats->syncp); + + /* Make sure the egress timer is scheduled. */ + tile_net_schedule_egress_timer(info); + + return NETDEV_TX_OK; +} + + +/* + * Deal with a transmit timeout. + */ +static void tile_net_tx_timeout(struct net_device *dev) +{ + PDEBUG("tile_net_tx_timeout()\n"); + PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies, + jiffies - dev->trans_start); + + /* XXX: ISSUE: This doesn't seem useful for us. */ + netif_wake_queue(dev); +} + + +/* + * Ioctl commands. + */ +static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + return -EOPNOTSUPP; +} + + +/* + * Get System Network Statistics. + * + * Returns the address of the device statistics structure. + */ +static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct tile_net_priv *priv = netdev_priv(dev); + u64 rx_packets = 0, tx_packets = 0; + u64 rx_bytes = 0, tx_bytes = 0; + u64 rx_errors = 0, rx_dropped = 0; + int i; + + for_each_online_cpu(i) { + struct tile_net_stats_t *cpu_stats; + u64 trx_packets, ttx_packets, trx_bytes, ttx_bytes; + u64 trx_errors, trx_dropped; + unsigned int start; + + if (priv->cpu[i] == NULL) + continue; + cpu_stats = &priv->cpu[i]->stats; + + do { + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + trx_packets = cpu_stats->rx_packets; + ttx_packets = cpu_stats->tx_packets; + trx_bytes = cpu_stats->rx_bytes; + ttx_bytes = cpu_stats->tx_bytes; + trx_errors = cpu_stats->rx_errors; + trx_dropped = cpu_stats->rx_dropped; + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + + rx_packets += trx_packets; + tx_packets += ttx_packets; + rx_bytes += trx_bytes; + tx_bytes += ttx_bytes; + rx_errors += trx_errors; + rx_dropped += trx_dropped; + } + + stats->rx_packets = rx_packets; + stats->tx_packets = tx_packets; + stats->rx_bytes = rx_bytes; + stats->tx_bytes = tx_bytes; + stats->rx_errors = rx_errors; + stats->rx_dropped = rx_dropped; + + return stats; +} + + +/* + * Change the "mtu". + * + * The "change_mtu" method is usually not needed. + * If you need it, it must be like this. + */ +static int tile_net_change_mtu(struct net_device *dev, int new_mtu) +{ + PDEBUG("tile_net_change_mtu()\n"); + + /* Check ranges. */ + if ((new_mtu < 68) || (new_mtu > 1500)) + return -EINVAL; + + /* Accept the value. */ + dev->mtu = new_mtu; + + return 0; +} + + +/* + * Change the Ethernet Address of the NIC. + * + * The hypervisor driver does not support changing MAC address. However, + * the IPP does not do anything with the MAC address, so the address which + * gets used on outgoing packets, and which is accepted on incoming packets, + * is completely up to the NetIO program or kernel driver which is actually + * handling them. + * + * Returns 0 on success, negative on failure. + */ +static int tile_net_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + /* ISSUE: Note that "dev_addr" is now a pointer. */ + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + return 0; +} + + +/* + * Obtain the MAC address from the hypervisor. + * This must be done before opening the device. + */ +static int tile_net_get_mac(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + + char hv_dev_name[32]; + int len; + + __netio_getset_offset_t offset = { .word = NETIO_IPP_PARAM_OFF }; + + int ret; + + /* For example, "xgbe0". */ + strcpy(hv_dev_name, dev->name); + len = strlen(hv_dev_name); + + /* For example, "xgbe/0". */ + hv_dev_name[len] = hv_dev_name[len - 1]; + hv_dev_name[len - 1] = '/'; + len++; + + /* For example, "xgbe/0/native_hash". */ + strcpy(hv_dev_name + len, hash_default ? "/native_hash" : "/native"); + + /* Get the hypervisor handle for this device. */ + priv->hv_devhdl = hv_dev_open((HV_VirtAddr)hv_dev_name, 0); + PDEBUG("hv_dev_open(%s) returned %d %p\n", + hv_dev_name, priv->hv_devhdl, &priv->hv_devhdl); + if (priv->hv_devhdl < 0) { + if (priv->hv_devhdl == HV_ENODEV) + printk(KERN_DEBUG "Ignoring unconfigured device %s\n", + hv_dev_name); + else + printk(KERN_DEBUG "hv_dev_open(%s) returned %d\n", + hv_dev_name, priv->hv_devhdl); + return -1; + } + + /* + * Read the hardware address from the hypervisor. + * ISSUE: Note that "dev_addr" is now a pointer. + */ + offset.bits.class = NETIO_PARAM; + offset.bits.addr = NETIO_PARAM_MAC; + ret = hv_dev_pread(priv->hv_devhdl, 0, + (HV_VirtAddr)dev->dev_addr, dev->addr_len, + offset.word); + PDEBUG("hv_dev_pread(NETIO_PARAM_MAC) returned %d\n", ret); + if (ret <= 0) { + printk(KERN_DEBUG "hv_dev_pread(NETIO_PARAM_MAC) %s failed\n", + dev->name); + /* + * Since the device is configured by the hypervisor but we + * can't get its MAC address, we are most likely running + * the simulator, so let's generate a random MAC address. + */ + eth_hw_addr_random(dev); + } + + return 0; +} + + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void tile_net_netpoll(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + disable_percpu_irq(priv->intr_id); + tile_net_handle_ingress_interrupt(priv->intr_id, dev); + enable_percpu_irq(priv->intr_id, 0); +} +#endif + + +static const struct net_device_ops tile_net_ops = { + .ndo_open = tile_net_open, + .ndo_stop = tile_net_stop, + .ndo_start_xmit = tile_net_tx, + .ndo_do_ioctl = tile_net_ioctl, + .ndo_get_stats64 = tile_net_get_stats64, + .ndo_change_mtu = tile_net_change_mtu, + .ndo_tx_timeout = tile_net_tx_timeout, + .ndo_set_mac_address = tile_net_set_mac_address, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = tile_net_netpoll, +#endif +}; + + +/* + * The setup function. + * + * This uses ether_setup() to assign various fields in dev, including + * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields. + */ +static void tile_net_setup(struct net_device *dev) +{ + netdev_features_t features = 0; + + ether_setup(dev); + dev->netdev_ops = &tile_net_ops; + dev->watchdog_timeo = TILE_NET_TIMEOUT; + dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN; + dev->mtu = TILE_NET_MTU; + + features |= NETIF_F_HW_CSUM; + features |= NETIF_F_SG; + + /* We support TSO iff the HV supports sufficient frags. */ + if (LEPP_MAX_FRAGS >= 1 + MAX_SKB_FRAGS) + features |= NETIF_F_TSO; + + /* We can't support HIGHDMA without hash_default, since we need + * to be able to finv() with a VA if we don't have hash_default. + */ + if (hash_default) + features |= NETIF_F_HIGHDMA; + + dev->hw_features |= features; + dev->vlan_features |= features; + dev->features |= features; +} + + +/* + * Allocate the device structure, register the device, and obtain the + * MAC address from the hypervisor. + */ +static struct net_device *tile_net_dev_init(const char *name) +{ + int ret; + struct net_device *dev; + struct tile_net_priv *priv; + + /* + * Allocate the device structure. This allocates "priv", calls + * tile_net_setup(), and saves "name". Normally, "name" is a + * template, instantiated by register_netdev(), but not for us. + */ + dev = alloc_netdev(sizeof(*priv), name, NET_NAME_UNKNOWN, + tile_net_setup); + if (!dev) { + pr_err("alloc_netdev(%s) failed\n", name); + return NULL; + } + + priv = netdev_priv(dev); + + /* Initialize "priv". */ + + memset(priv, 0, sizeof(*priv)); + + /* Save "dev" for "tile_net_open_retry()". */ + priv->dev = dev; + + INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry); + + spin_lock_init(&priv->eq_lock); + + /* Allocate "eq". */ + priv->eq_pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, EQ_ORDER); + if (!priv->eq_pages) { + free_netdev(dev); + return NULL; + } + priv->eq = page_address(priv->eq_pages); + + /* Register the network device. */ + ret = register_netdev(dev); + if (ret) { + pr_err("register_netdev %s failed %d\n", dev->name, ret); + __free_pages(priv->eq_pages, EQ_ORDER); + free_netdev(dev); + return NULL; + } + + /* Get the MAC address. */ + ret = tile_net_get_mac(dev); + if (ret < 0) { + unregister_netdev(dev); + __free_pages(priv->eq_pages, EQ_ORDER); + free_netdev(dev); + return NULL; + } + + return dev; +} + + +/* + * Module cleanup. + * + * FIXME: If compiled as a module, this module cannot be "unloaded", + * because the "ingress interrupt handler" is registered permanently. + */ +static void tile_net_cleanup(void) +{ + int i; + + for (i = 0; i < TILE_NET_DEVS; i++) { + if (tile_net_devs[i]) { + struct net_device *dev = tile_net_devs[i]; + struct tile_net_priv *priv = netdev_priv(dev); + unregister_netdev(dev); + finv_buffer_remote(priv->eq, EQ_SIZE, 0); + __free_pages(priv->eq_pages, EQ_ORDER); + free_netdev(dev); + } + } +} + + +/* + * Module initialization. + */ +static int tile_net_init_module(void) +{ + pr_info("Tilera Network Driver\n"); + + tile_net_devs[0] = tile_net_dev_init("xgbe0"); + tile_net_devs[1] = tile_net_dev_init("xgbe1"); + tile_net_devs[2] = tile_net_dev_init("gbe0"); + tile_net_devs[3] = tile_net_dev_init("gbe1"); + + return 0; +} + + +module_init(tile_net_init_module); +module_exit(tile_net_cleanup); + + +#ifndef MODULE + +/* + * The "network_cpus" boot argument specifies the cpus that are dedicated + * to handle ingress packets. + * + * The parameter should be in the form "network_cpus=m-n[,x-y]", where + * m, n, x, y are integer numbers that represent the cpus that can be + * neither a dedicated cpu nor a dataplane cpu. + */ +static int __init network_cpus_setup(char *str) +{ + int rc = cpulist_parse_crop(str, &network_cpus_map); + if (rc != 0) { + pr_warn("network_cpus=%s: malformed cpu list\n", str); + } else { + + /* Remove dedicated cpus. */ + cpumask_and(&network_cpus_map, &network_cpus_map, + cpu_possible_mask); + + + if (cpumask_empty(&network_cpus_map)) { + pr_warn("Ignoring network_cpus='%s'\n", str); + } else { + pr_info("Linux network CPUs: %*pbl\n", + cpumask_pr_args(&network_cpus_map)); + network_cpus_used = true; + } + } + + return 0; +} +__setup("network_cpus=", network_cpus_setup); + +#endif diff --git a/drivers/net/ethernet/toshiba/Kconfig b/drivers/net/ethernet/toshiba/Kconfig new file mode 100644 index 000000000..5d244b6b5 --- /dev/null +++ b/drivers/net/ethernet/toshiba/Kconfig @@ -0,0 +1,57 @@ +# +# Toshiba network device configuration +# + +config NET_VENDOR_TOSHIBA + bool "Toshiba devices" + default y + depends on PCI && (PPC_IBM_CELL_BLADE || MIPS) || PPC_PS3 + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Toshiba cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_TOSHIBA + +config GELIC_NET + tristate "PS3 Gigabit Ethernet driver" + depends on PPC_PS3 + select PS3_SYS_MANAGER + ---help--- + This driver supports the network device on the PS3 game + console. This driver has built-in support for Ethernet. + + To compile this driver as a module, choose M here: the + module will be called ps3_gelic. + +config GELIC_WIRELESS + bool "PS3 Wireless support" + depends on GELIC_NET && WLAN + select WIRELESS_EXT + ---help--- + This option adds the support for the wireless feature of PS3. + If you have the wireless-less model of PS3 or have no plan to + use wireless feature, disabling this option saves memory. As + the driver automatically distinguishes the models, you can + safely enable this option even if you have a wireless-less model. + +config SPIDER_NET + tristate "Spider Gigabit Ethernet driver" + depends on PCI && PPC_IBM_CELL_BLADE + select FW_LOADER + select SUNGEM_PHY + ---help--- + This driver supports the Gigabit Ethernet chips present on the + Cell Processor-Based Blades from IBM. + +config TC35815 + tristate "TOSHIBA TC35815 Ethernet support" + depends on PCI && MIPS + select PHYLIB + +endif # NET_VENDOR_TOSHIBA diff --git a/drivers/net/ethernet/toshiba/Makefile b/drivers/net/ethernet/toshiba/Makefile new file mode 100644 index 000000000..a50690084 --- /dev/null +++ b/drivers/net/ethernet/toshiba/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the Toshiba network device drivers. +# + +obj-$(CONFIG_GELIC_NET) += ps3_gelic.o +gelic_wireless-$(CONFIG_GELIC_WIRELESS) += ps3_gelic_wireless.o +ps3_gelic-objs += ps3_gelic_net.o $(gelic_wireless-y) +spidernet-y += spider_net.o spider_net_ethtool.o +obj-$(CONFIG_SPIDER_NET) += spidernet.o +obj-$(CONFIG_TC35815) += tc35815.o diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c new file mode 100644 index 000000000..ac62a5e24 --- /dev/null +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -0,0 +1,1894 @@ +/* + * PS3 gelic network driver. + * + * Copyright (C) 2007 Sony Computer Entertainment Inc. + * Copyright 2006, 2007 Sony Corporation + * + * This file is based on: spider_net.c + * + * (C) Copyright IBM Corp. 2005 + * + * Authors : Utz Bacher + * Jens Osterkamp + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#undef DEBUG + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "ps3_gelic_net.h" +#include "ps3_gelic_wireless.h" + +#define DRV_NAME "Gelic Network Driver" +#define DRV_VERSION "2.0" + +MODULE_AUTHOR("SCE Inc."); +MODULE_DESCRIPTION("Gelic Network driver"); +MODULE_LICENSE("GPL"); + + +/* set irq_mask */ +int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask) +{ + int status; + + status = lv1_net_set_interrupt_mask(bus_id(card), dev_id(card), + mask, 0); + if (status) + dev_info(ctodev(card), + "%s failed %d\n", __func__, status); + return status; +} + +static void gelic_card_rx_irq_on(struct gelic_card *card) +{ + card->irq_mask |= GELIC_CARD_RXINT; + gelic_card_set_irq_mask(card, card->irq_mask); +} +static void gelic_card_rx_irq_off(struct gelic_card *card) +{ + card->irq_mask &= ~GELIC_CARD_RXINT; + gelic_card_set_irq_mask(card, card->irq_mask); +} + +static void gelic_card_get_ether_port_status(struct gelic_card *card, + int inform) +{ + u64 v2; + struct net_device *ether_netdev; + + lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_GET_ETH_PORT_STATUS, + GELIC_LV1_VLAN_TX_ETHERNET_0, 0, 0, + &card->ether_port_status, &v2); + + if (inform) { + ether_netdev = card->netdev[GELIC_PORT_ETHERNET_0]; + if (card->ether_port_status & GELIC_LV1_ETHER_LINK_UP) + netif_carrier_on(ether_netdev); + else + netif_carrier_off(ether_netdev); + } +} + +static int gelic_card_set_link_mode(struct gelic_card *card, int mode) +{ + int status; + u64 v1, v2; + + status = lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_SET_NEGOTIATION_MODE, + GELIC_LV1_PHY_ETHERNET_0, mode, 0, &v1, &v2); + if (status) { + pr_info("%s: failed setting negotiation mode %d\n", __func__, + status); + return -EBUSY; + } + + card->link_mode = mode; + return 0; +} + +/** + * gelic_card_disable_txdmac - disables the transmit DMA controller + * @card: card structure + * + * gelic_card_disable_txdmac terminates processing on the DMA controller by + * turing off DMA and issuing a force end + */ +static void gelic_card_disable_txdmac(struct gelic_card *card) +{ + int status; + + /* this hvc blocks until the DMA in progress really stopped */ + status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card)); + if (status) + dev_err(ctodev(card), + "lv1_net_stop_tx_dma failed, status=%d\n", status); +} + +/** + * gelic_card_enable_rxdmac - enables the receive DMA controller + * @card: card structure + * + * gelic_card_enable_rxdmac enables the DMA controller by setting RX_DMA_EN + * in the GDADMACCNTR register + */ +static void gelic_card_enable_rxdmac(struct gelic_card *card) +{ + int status; + +#ifdef DEBUG + if (gelic_descr_get_status(card->rx_chain.head) != + GELIC_DESCR_DMA_CARDOWNED) { + printk(KERN_ERR "%s: status=%x\n", __func__, + be32_to_cpu(card->rx_chain.head->dmac_cmd_status)); + printk(KERN_ERR "%s: nextphy=%x\n", __func__, + be32_to_cpu(card->rx_chain.head->next_descr_addr)); + printk(KERN_ERR "%s: head=%p\n", __func__, + card->rx_chain.head); + } +#endif + status = lv1_net_start_rx_dma(bus_id(card), dev_id(card), + card->rx_chain.head->bus_addr, 0); + if (status) + dev_info(ctodev(card), + "lv1_net_start_rx_dma failed, status=%d\n", status); +} + +/** + * gelic_card_disable_rxdmac - disables the receive DMA controller + * @card: card structure + * + * gelic_card_disable_rxdmac terminates processing on the DMA controller by + * turing off DMA and issuing a force end + */ +static void gelic_card_disable_rxdmac(struct gelic_card *card) +{ + int status; + + /* this hvc blocks until the DMA in progress really stopped */ + status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card)); + if (status) + dev_err(ctodev(card), + "lv1_net_stop_rx_dma failed, %d\n", status); +} + +/** + * gelic_descr_set_status -- sets the status of a descriptor + * @descr: descriptor to change + * @status: status to set in the descriptor + * + * changes the status to the specified value. Doesn't change other bits + * in the status + */ +static void gelic_descr_set_status(struct gelic_descr *descr, + enum gelic_descr_dma_status status) +{ + descr->dmac_cmd_status = cpu_to_be32(status | + (be32_to_cpu(descr->dmac_cmd_status) & + ~GELIC_DESCR_DMA_STAT_MASK)); + /* + * dma_cmd_status field is used to indicate whether the descriptor + * is valid or not. + * Usually caller of this function wants to inform that to the + * hardware, so we assure here the hardware sees the change. + */ + wmb(); +} + +/** + * gelic_card_reset_chain - reset status of a descriptor chain + * @card: card structure + * @chain: address of chain + * @start_descr: address of descriptor array + * + * Reset the status of dma descriptors to ready state + * and re-initialize the hardware chain for later use + */ +static void gelic_card_reset_chain(struct gelic_card *card, + struct gelic_descr_chain *chain, + struct gelic_descr *start_descr) +{ + struct gelic_descr *descr; + + for (descr = start_descr; start_descr != descr->next; descr++) { + gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED); + descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr); + } + + chain->head = start_descr; + chain->tail = (descr - 1); + + (descr - 1)->next_descr_addr = 0; +} + +void gelic_card_up(struct gelic_card *card) +{ + pr_debug("%s: called\n", __func__); + mutex_lock(&card->updown_lock); + if (atomic_inc_return(&card->users) == 1) { + pr_debug("%s: real do\n", __func__); + /* enable irq */ + gelic_card_set_irq_mask(card, card->irq_mask); + /* start rx */ + gelic_card_enable_rxdmac(card); + + napi_enable(&card->napi); + } + mutex_unlock(&card->updown_lock); + pr_debug("%s: done\n", __func__); +} + +void gelic_card_down(struct gelic_card *card) +{ + u64 mask; + pr_debug("%s: called\n", __func__); + mutex_lock(&card->updown_lock); + if (atomic_dec_if_positive(&card->users) == 0) { + pr_debug("%s: real do\n", __func__); + napi_disable(&card->napi); + /* + * Disable irq. Wireless interrupts will + * be disabled later if any + */ + mask = card->irq_mask & (GELIC_CARD_WLAN_EVENT_RECEIVED | + GELIC_CARD_WLAN_COMMAND_COMPLETED); + gelic_card_set_irq_mask(card, mask); + /* stop rx */ + gelic_card_disable_rxdmac(card); + gelic_card_reset_chain(card, &card->rx_chain, + card->descr + GELIC_NET_TX_DESCRIPTORS); + /* stop tx */ + gelic_card_disable_txdmac(card); + } + mutex_unlock(&card->updown_lock); + pr_debug("%s: done\n", __func__); +} + +/** + * gelic_descr_get_status -- returns the status of a descriptor + * @descr: descriptor to look at + * + * returns the status as in the dmac_cmd_status field of the descriptor + */ +static enum gelic_descr_dma_status +gelic_descr_get_status(struct gelic_descr *descr) +{ + return be32_to_cpu(descr->dmac_cmd_status) & GELIC_DESCR_DMA_STAT_MASK; +} + +/** + * gelic_card_free_chain - free descriptor chain + * @card: card structure + * @descr_in: address of desc + */ +static void gelic_card_free_chain(struct gelic_card *card, + struct gelic_descr *descr_in) +{ + struct gelic_descr *descr; + + for (descr = descr_in; descr && descr->bus_addr; descr = descr->next) { + dma_unmap_single(ctodev(card), descr->bus_addr, + GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL); + descr->bus_addr = 0; + } +} + +/** + * gelic_card_init_chain - links descriptor chain + * @card: card structure + * @chain: address of chain + * @start_descr: address of descriptor array + * @no: number of descriptors + * + * we manage a circular list that mirrors the hardware structure, + * except that the hardware uses bus addresses. + * + * returns 0 on success, <0 on failure + */ +static int gelic_card_init_chain(struct gelic_card *card, + struct gelic_descr_chain *chain, + struct gelic_descr *start_descr, int no) +{ + int i; + struct gelic_descr *descr; + + descr = start_descr; + memset(descr, 0, sizeof(*descr) * no); + + /* set up the hardware pointers in each descriptor */ + for (i = 0; i < no; i++, descr++) { + gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE); + descr->bus_addr = + dma_map_single(ctodev(card), descr, + GELIC_DESCR_SIZE, + DMA_BIDIRECTIONAL); + + if (!descr->bus_addr) + goto iommu_error; + + descr->next = descr + 1; + descr->prev = descr - 1; + } + /* make them as ring */ + (descr - 1)->next = start_descr; + start_descr->prev = (descr - 1); + + /* chain bus addr of hw descriptor */ + descr = start_descr; + for (i = 0; i < no; i++, descr++) { + descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr); + } + + chain->head = start_descr; + chain->tail = start_descr; + + /* do not chain last hw descriptor */ + (descr - 1)->next_descr_addr = 0; + + return 0; + +iommu_error: + for (i--, descr--; 0 <= i; i--, descr--) + if (descr->bus_addr) + dma_unmap_single(ctodev(card), descr->bus_addr, + GELIC_DESCR_SIZE, + DMA_BIDIRECTIONAL); + return -ENOMEM; +} + +/** + * gelic_descr_prepare_rx - reinitializes a rx descriptor + * @card: card structure + * @descr: descriptor to re-init + * + * return 0 on success, <0 on failure + * + * allocates a new rx skb, iommu-maps it and attaches it to the descriptor. + * Activate the descriptor state-wise + */ +static int gelic_descr_prepare_rx(struct gelic_card *card, + struct gelic_descr *descr) +{ + int offset; + unsigned int bufsize; + + if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE) + dev_info(ctodev(card), "%s: ERROR status\n", __func__); + /* we need to round up the buffer size to a multiple of 128 */ + bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN); + + /* and we need to have it 128 byte aligned, therefore we allocate a + * bit more */ + descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1); + if (!descr->skb) { + descr->buf_addr = 0; /* tell DMAC don't touch memory */ + dev_info(ctodev(card), + "%s:allocate skb failed !!\n", __func__); + return -ENOMEM; + } + descr->buf_size = cpu_to_be32(bufsize); + descr->dmac_cmd_status = 0; + descr->result_size = 0; + descr->valid_size = 0; + descr->data_error = 0; + + offset = ((unsigned long)descr->skb->data) & + (GELIC_NET_RXBUF_ALIGN - 1); + if (offset) + skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset); + /* io-mmu-map the skb */ + descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card), + descr->skb->data, + GELIC_NET_MAX_MTU, + DMA_FROM_DEVICE)); + if (!descr->buf_addr) { + dev_kfree_skb_any(descr->skb); + descr->skb = NULL; + dev_info(ctodev(card), + "%s:Could not iommu-map rx buffer\n", __func__); + gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE); + return -ENOMEM; + } else { + gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED); + return 0; + } +} + +/** + * gelic_card_release_rx_chain - free all skb of rx descr + * @card: card structure + * + */ +static void gelic_card_release_rx_chain(struct gelic_card *card) +{ + struct gelic_descr *descr = card->rx_chain.head; + + do { + if (descr->skb) { + dma_unmap_single(ctodev(card), + be32_to_cpu(descr->buf_addr), + descr->skb->len, + DMA_FROM_DEVICE); + descr->buf_addr = 0; + dev_kfree_skb_any(descr->skb); + descr->skb = NULL; + gelic_descr_set_status(descr, + GELIC_DESCR_DMA_NOT_IN_USE); + } + descr = descr->next; + } while (descr != card->rx_chain.head); +} + +/** + * gelic_card_fill_rx_chain - fills descriptors/skbs in the rx chains + * @card: card structure + * + * fills all descriptors in the rx chain: allocates skbs + * and iommu-maps them. + * returns 0 on success, < 0 on failure + */ +static int gelic_card_fill_rx_chain(struct gelic_card *card) +{ + struct gelic_descr *descr = card->rx_chain.head; + int ret; + + do { + if (!descr->skb) { + ret = gelic_descr_prepare_rx(card, descr); + if (ret) + goto rewind; + } + descr = descr->next; + } while (descr != card->rx_chain.head); + + return 0; +rewind: + gelic_card_release_rx_chain(card); + return ret; +} + +/** + * gelic_card_alloc_rx_skbs - allocates rx skbs in rx descriptor chains + * @card: card structure + * + * returns 0 on success, < 0 on failure + */ +static int gelic_card_alloc_rx_skbs(struct gelic_card *card) +{ + struct gelic_descr_chain *chain; + int ret; + chain = &card->rx_chain; + ret = gelic_card_fill_rx_chain(card); + chain->tail = card->rx_top->prev; /* point to the last */ + return ret; +} + +/** + * gelic_descr_release_tx - processes a used tx descriptor + * @card: card structure + * @descr: descriptor to release + * + * releases a used tx descriptor (unmapping, freeing of skb) + */ +static void gelic_descr_release_tx(struct gelic_card *card, + struct gelic_descr *descr) +{ + struct sk_buff *skb = descr->skb; + + BUG_ON(!(be32_to_cpu(descr->data_status) & GELIC_DESCR_TX_TAIL)); + + dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr), skb->len, + DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + + descr->buf_addr = 0; + descr->buf_size = 0; + descr->next_descr_addr = 0; + descr->result_size = 0; + descr->valid_size = 0; + descr->data_status = 0; + descr->data_error = 0; + descr->skb = NULL; + + /* set descr status */ + gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE); +} + +static void gelic_card_stop_queues(struct gelic_card *card) +{ + netif_stop_queue(card->netdev[GELIC_PORT_ETHERNET_0]); + + if (card->netdev[GELIC_PORT_WIRELESS]) + netif_stop_queue(card->netdev[GELIC_PORT_WIRELESS]); +} +static void gelic_card_wake_queues(struct gelic_card *card) +{ + netif_wake_queue(card->netdev[GELIC_PORT_ETHERNET_0]); + + if (card->netdev[GELIC_PORT_WIRELESS]) + netif_wake_queue(card->netdev[GELIC_PORT_WIRELESS]); +} +/** + * gelic_card_release_tx_chain - processes sent tx descriptors + * @card: adapter structure + * @stop: net_stop sequence + * + * releases the tx descriptors that gelic has finished with + */ +static void gelic_card_release_tx_chain(struct gelic_card *card, int stop) +{ + struct gelic_descr_chain *tx_chain; + enum gelic_descr_dma_status status; + struct net_device *netdev; + int release = 0; + + for (tx_chain = &card->tx_chain; + tx_chain->head != tx_chain->tail && tx_chain->tail; + tx_chain->tail = tx_chain->tail->next) { + status = gelic_descr_get_status(tx_chain->tail); + netdev = tx_chain->tail->skb->dev; + switch (status) { + case GELIC_DESCR_DMA_RESPONSE_ERROR: + case GELIC_DESCR_DMA_PROTECTION_ERROR: + case GELIC_DESCR_DMA_FORCE_END: + if (printk_ratelimit()) + dev_info(ctodev(card), + "%s: forcing end of tx descriptor " \ + "with status %x\n", + __func__, status); + netdev->stats.tx_dropped++; + break; + + case GELIC_DESCR_DMA_COMPLETE: + if (tx_chain->tail->skb) { + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += + tx_chain->tail->skb->len; + } + break; + + case GELIC_DESCR_DMA_CARDOWNED: + /* pending tx request */ + default: + /* any other value (== GELIC_DESCR_DMA_NOT_IN_USE) */ + if (!stop) + goto out; + } + gelic_descr_release_tx(card, tx_chain->tail); + release ++; + } +out: + if (!stop && release) + gelic_card_wake_queues(card); +} + +/** + * gelic_net_set_multi - sets multicast addresses and promisc flags + * @netdev: interface device structure + * + * gelic_net_set_multi configures multicast addresses as needed for the + * netdev interface. It also sets up multicast, allmulti and promisc + * flags appropriately + */ +void gelic_net_set_multi(struct net_device *netdev) +{ + struct gelic_card *card = netdev_card(netdev); + struct netdev_hw_addr *ha; + unsigned int i; + uint8_t *p; + u64 addr; + int status; + + /* clear all multicast address */ + status = lv1_net_remove_multicast_address(bus_id(card), dev_id(card), + 0, 1); + if (status) + dev_err(ctodev(card), + "lv1_net_remove_multicast_address failed %d\n", + status); + /* set broadcast address */ + status = lv1_net_add_multicast_address(bus_id(card), dev_id(card), + GELIC_NET_BROADCAST_ADDR, 0); + if (status) + dev_err(ctodev(card), + "lv1_net_add_multicast_address failed, %d\n", + status); + + if ((netdev->flags & IFF_ALLMULTI) || + (netdev_mc_count(netdev) > GELIC_NET_MC_COUNT_MAX)) { + status = lv1_net_add_multicast_address(bus_id(card), + dev_id(card), + 0, 1); + if (status) + dev_err(ctodev(card), + "lv1_net_add_multicast_address failed, %d\n", + status); + return; + } + + /* set multicast addresses */ + netdev_for_each_mc_addr(ha, netdev) { + addr = 0; + p = ha->addr; + for (i = 0; i < ETH_ALEN; i++) { + addr <<= 8; + addr |= *p++; + } + status = lv1_net_add_multicast_address(bus_id(card), + dev_id(card), + addr, 0); + if (status) + dev_err(ctodev(card), + "lv1_net_add_multicast_address failed, %d\n", + status); + } +} + +/** + * gelic_net_stop - called upon ifconfig down + * @netdev: interface device structure + * + * always returns 0 + */ +int gelic_net_stop(struct net_device *netdev) +{ + struct gelic_card *card; + + pr_debug("%s: start\n", __func__); + + netif_stop_queue(netdev); + netif_carrier_off(netdev); + + card = netdev_card(netdev); + gelic_card_down(card); + + pr_debug("%s: done\n", __func__); + return 0; +} + +/** + * gelic_card_get_next_tx_descr - returns the next available tx descriptor + * @card: device structure to get descriptor from + * + * returns the address of the next descriptor, or NULL if not available. + */ +static struct gelic_descr * +gelic_card_get_next_tx_descr(struct gelic_card *card) +{ + if (!card->tx_chain.head) + return NULL; + /* see if the next descriptor is free */ + if (card->tx_chain.tail != card->tx_chain.head->next && + gelic_descr_get_status(card->tx_chain.head) == + GELIC_DESCR_DMA_NOT_IN_USE) + return card->tx_chain.head; + else + return NULL; + +} + +/** + * gelic_net_set_txdescr_cmdstat - sets the tx descriptor command field + * @descr: descriptor structure to fill out + * @skb: packet to consider + * + * fills out the command and status field of the descriptor structure, + * depending on hardware checksum settings. This function assumes a wmb() + * has executed before. + */ +static void gelic_descr_set_tx_cmdstat(struct gelic_descr *descr, + struct sk_buff *skb) +{ + if (skb->ip_summed != CHECKSUM_PARTIAL) + descr->dmac_cmd_status = + cpu_to_be32(GELIC_DESCR_DMA_CMD_NO_CHKSUM | + GELIC_DESCR_TX_DMA_FRAME_TAIL); + else { + /* is packet ip? + * if yes: tcp? udp? */ + if (skb->protocol == htons(ETH_P_IP)) { + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + descr->dmac_cmd_status = + cpu_to_be32(GELIC_DESCR_DMA_CMD_TCP_CHKSUM | + GELIC_DESCR_TX_DMA_FRAME_TAIL); + + else if (ip_hdr(skb)->protocol == IPPROTO_UDP) + descr->dmac_cmd_status = + cpu_to_be32(GELIC_DESCR_DMA_CMD_UDP_CHKSUM | + GELIC_DESCR_TX_DMA_FRAME_TAIL); + else /* + * the stack should checksum non-tcp and non-udp + * packets on his own: NETIF_F_IP_CSUM + */ + descr->dmac_cmd_status = + cpu_to_be32(GELIC_DESCR_DMA_CMD_NO_CHKSUM | + GELIC_DESCR_TX_DMA_FRAME_TAIL); + } + } +} + +static struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb, + unsigned short tag) +{ + struct vlan_ethhdr *veth; + static unsigned int c; + + if (skb_headroom(skb) < VLAN_HLEN) { + struct sk_buff *sk_tmp = skb; + pr_debug("%s: hd=%d c=%ud\n", __func__, skb_headroom(skb), c); + skb = skb_realloc_headroom(sk_tmp, VLAN_HLEN); + if (!skb) + return NULL; + dev_kfree_skb_any(sk_tmp); + } + veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); + + /* Move the mac addresses to the top of buffer */ + memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); + + veth->h_vlan_proto = cpu_to_be16(ETH_P_8021Q); + veth->h_vlan_TCI = htons(tag); + + return skb; +} + +/** + * gelic_descr_prepare_tx - setup a descriptor for sending packets + * @card: card structure + * @descr: descriptor structure + * @skb: packet to use + * + * returns 0 on success, <0 on failure. + * + */ +static int gelic_descr_prepare_tx(struct gelic_card *card, + struct gelic_descr *descr, + struct sk_buff *skb) +{ + dma_addr_t buf; + + if (card->vlan_required) { + struct sk_buff *skb_tmp; + enum gelic_port_type type; + + type = netdev_port(skb->dev)->type; + skb_tmp = gelic_put_vlan_tag(skb, + card->vlan[type].tx); + if (!skb_tmp) + return -ENOMEM; + skb = skb_tmp; + } + + buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE); + + if (!buf) { + dev_err(ctodev(card), + "dma map 2 failed (%p, %i). Dropping packet\n", + skb->data, skb->len); + return -ENOMEM; + } + + descr->buf_addr = cpu_to_be32(buf); + descr->buf_size = cpu_to_be32(skb->len); + descr->skb = skb; + descr->data_status = 0; + descr->next_descr_addr = 0; /* terminate hw descr */ + gelic_descr_set_tx_cmdstat(descr, skb); + + /* bump free descriptor pointer */ + card->tx_chain.head = descr->next; + return 0; +} + +/** + * gelic_card_kick_txdma - enables TX DMA processing + * @card: card structure + * @descr: descriptor address to enable TX processing at + * + */ +static int gelic_card_kick_txdma(struct gelic_card *card, + struct gelic_descr *descr) +{ + int status = 0; + + if (card->tx_dma_progress) + return 0; + + if (gelic_descr_get_status(descr) == GELIC_DESCR_DMA_CARDOWNED) { + card->tx_dma_progress = 1; + status = lv1_net_start_tx_dma(bus_id(card), dev_id(card), + descr->bus_addr, 0); + if (status) { + card->tx_dma_progress = 0; + dev_info(ctodev(card), "lv1_net_start_txdma failed," \ + "status=%d\n", status); + } + } + return status; +} + +/** + * gelic_net_xmit - transmits a frame over the device + * @skb: packet to send out + * @netdev: interface device structure + * + * returns 0 on success, <0 on failure + */ +int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct gelic_card *card = netdev_card(netdev); + struct gelic_descr *descr; + int result; + unsigned long flags; + + spin_lock_irqsave(&card->tx_lock, flags); + + gelic_card_release_tx_chain(card, 0); + + descr = gelic_card_get_next_tx_descr(card); + if (!descr) { + /* + * no more descriptors free + */ + gelic_card_stop_queues(card); + spin_unlock_irqrestore(&card->tx_lock, flags); + return NETDEV_TX_BUSY; + } + + result = gelic_descr_prepare_tx(card, descr, skb); + if (result) { + /* + * DMA map failed. As chances are that failure + * would continue, just release skb and return + */ + netdev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&card->tx_lock, flags); + return NETDEV_TX_OK; + } + /* + * link this prepared descriptor to previous one + * to achieve high performance + */ + descr->prev->next_descr_addr = cpu_to_be32(descr->bus_addr); + /* + * as hardware descriptor is modified in the above lines, + * ensure that the hardware sees it + */ + wmb(); + if (gelic_card_kick_txdma(card, descr)) { + /* + * kick failed. + * release descriptor which was just prepared + */ + netdev->stats.tx_dropped++; + /* don't trigger BUG_ON() in gelic_descr_release_tx */ + descr->data_status = cpu_to_be32(GELIC_DESCR_TX_TAIL); + gelic_descr_release_tx(card, descr); + /* reset head */ + card->tx_chain.head = descr; + /* reset hw termination */ + descr->prev->next_descr_addr = 0; + dev_info(ctodev(card), "%s: kick failure\n", __func__); + } + + spin_unlock_irqrestore(&card->tx_lock, flags); + return NETDEV_TX_OK; +} + +/** + * gelic_net_pass_skb_up - takes an skb from a descriptor and passes it on + * @descr: descriptor to process + * @card: card structure + * @netdev: net_device structure to be passed packet + * + * iommu-unmaps the skb, fills out skb structure and passes the data to the + * stack. The descriptor state is not changed. + */ +static void gelic_net_pass_skb_up(struct gelic_descr *descr, + struct gelic_card *card, + struct net_device *netdev) + +{ + struct sk_buff *skb = descr->skb; + u32 data_status, data_error; + + data_status = be32_to_cpu(descr->data_status); + data_error = be32_to_cpu(descr->data_error); + /* unmap skb buffer */ + dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr), + GELIC_NET_MAX_MTU, + DMA_FROM_DEVICE); + + skb_put(skb, be32_to_cpu(descr->valid_size)? + be32_to_cpu(descr->valid_size) : + be32_to_cpu(descr->result_size)); + if (!descr->valid_size) + dev_info(ctodev(card), "buffer full %x %x %x\n", + be32_to_cpu(descr->result_size), + be32_to_cpu(descr->buf_size), + be32_to_cpu(descr->dmac_cmd_status)); + + descr->skb = NULL; + /* + * the card put 2 bytes vlan tag in front + * of the ethernet frame + */ + skb_pull(skb, 2); + skb->protocol = eth_type_trans(skb, netdev); + + /* checksum offload */ + if (netdev->features & NETIF_F_RXCSUM) { + if ((data_status & GELIC_DESCR_DATA_STATUS_CHK_MASK) && + (!(data_error & GELIC_DESCR_DATA_ERROR_CHK_MASK))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + } else + skb_checksum_none_assert(skb); + + /* update netdevice statistics */ + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += skb->len; + + /* pass skb up to stack */ + netif_receive_skb(skb); +} + +/** + * gelic_card_decode_one_descr - processes an rx descriptor + * @card: card structure + * + * returns 1 if a packet has been sent to the stack, otherwise 0 + * + * processes an rx descriptor by iommu-unmapping the data buffer and passing + * the packet up to the stack + */ +static int gelic_card_decode_one_descr(struct gelic_card *card) +{ + enum gelic_descr_dma_status status; + struct gelic_descr_chain *chain = &card->rx_chain; + struct gelic_descr *descr = chain->head; + struct net_device *netdev = NULL; + int dmac_chain_ended; + + status = gelic_descr_get_status(descr); + + if (status == GELIC_DESCR_DMA_CARDOWNED) + return 0; + + if (status == GELIC_DESCR_DMA_NOT_IN_USE) { + dev_dbg(ctodev(card), "dormant descr? %p\n", descr); + return 0; + } + + /* netdevice select */ + if (card->vlan_required) { + unsigned int i; + u16 vid; + vid = *(u16 *)(descr->skb->data) & VLAN_VID_MASK; + for (i = 0; i < GELIC_PORT_MAX; i++) { + if (card->vlan[i].rx == vid) { + netdev = card->netdev[i]; + break; + } + } + if (GELIC_PORT_MAX <= i) { + pr_info("%s: unknown packet vid=%x\n", __func__, vid); + goto refill; + } + } else + netdev = card->netdev[GELIC_PORT_ETHERNET_0]; + + if ((status == GELIC_DESCR_DMA_RESPONSE_ERROR) || + (status == GELIC_DESCR_DMA_PROTECTION_ERROR) || + (status == GELIC_DESCR_DMA_FORCE_END)) { + dev_info(ctodev(card), "dropping RX descriptor with state %x\n", + status); + netdev->stats.rx_dropped++; + goto refill; + } + + if (status == GELIC_DESCR_DMA_BUFFER_FULL) { + /* + * Buffer full would occur if and only if + * the frame length was longer than the size of this + * descriptor's buffer. If the frame length was equal + * to or shorter than buffer'size, FRAME_END condition + * would occur. + * Anyway this frame was longer than the MTU, + * just drop it. + */ + dev_info(ctodev(card), "overlength frame\n"); + goto refill; + } + /* + * descriptors any other than FRAME_END here should + * be treated as error. + */ + if (status != GELIC_DESCR_DMA_FRAME_END) { + dev_dbg(ctodev(card), "RX descriptor with state %x\n", + status); + goto refill; + } + + /* ok, we've got a packet in descr */ + gelic_net_pass_skb_up(descr, card, netdev); +refill: + + /* is the current descriptor terminated with next_descr == NULL? */ + dmac_chain_ended = + be32_to_cpu(descr->dmac_cmd_status) & + GELIC_DESCR_RX_DMA_CHAIN_END; + /* + * So that always DMAC can see the end + * of the descriptor chain to avoid + * from unwanted DMAC overrun. + */ + descr->next_descr_addr = 0; + + /* change the descriptor state: */ + gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE); + + /* + * this call can fail, but for now, just leave this + * descriptor without skb + */ + gelic_descr_prepare_rx(card, descr); + + chain->tail = descr; + chain->head = descr->next; + + /* + * Set this descriptor the end of the chain. + */ + descr->prev->next_descr_addr = cpu_to_be32(descr->bus_addr); + + /* + * If dmac chain was met, DMAC stopped. + * thus re-enable it + */ + + if (dmac_chain_ended) + gelic_card_enable_rxdmac(card); + + return 1; +} + +/** + * gelic_net_poll - NAPI poll function called by the stack to return packets + * @napi: napi structure + * @budget: number of packets we can pass to the stack at most + * + * returns the number of the processed packets + * + */ +static int gelic_net_poll(struct napi_struct *napi, int budget) +{ + struct gelic_card *card = container_of(napi, struct gelic_card, napi); + int packets_done = 0; + + while (packets_done < budget) { + if (!gelic_card_decode_one_descr(card)) + break; + + packets_done++; + } + + if (packets_done < budget) { + napi_complete(napi); + gelic_card_rx_irq_on(card); + } + return packets_done; +} +/** + * gelic_net_change_mtu - changes the MTU of an interface + * @netdev: interface device structure + * @new_mtu: new MTU value + * + * returns 0 on success, <0 on failure + */ +int gelic_net_change_mtu(struct net_device *netdev, int new_mtu) +{ + /* no need to re-alloc skbs or so -- the max mtu is about 2.3k + * and mtu is outbound only anyway */ + if ((new_mtu < GELIC_NET_MIN_MTU) || + (new_mtu > GELIC_NET_MAX_MTU)) { + return -EINVAL; + } + netdev->mtu = new_mtu; + return 0; +} + +/** + * gelic_card_interrupt - event handler for gelic_net + */ +static irqreturn_t gelic_card_interrupt(int irq, void *ptr) +{ + unsigned long flags; + struct gelic_card *card = ptr; + u64 status; + + status = card->irq_status; + + if (!status) + return IRQ_NONE; + + status &= card->irq_mask; + + if (status & GELIC_CARD_RXINT) { + gelic_card_rx_irq_off(card); + napi_schedule(&card->napi); + } + + if (status & GELIC_CARD_TXINT) { + spin_lock_irqsave(&card->tx_lock, flags); + card->tx_dma_progress = 0; + gelic_card_release_tx_chain(card, 0); + /* kick outstanding tx descriptor if any */ + gelic_card_kick_txdma(card, card->tx_chain.tail); + spin_unlock_irqrestore(&card->tx_lock, flags); + } + + /* ether port status changed */ + if (status & GELIC_CARD_PORT_STATUS_CHANGED) + gelic_card_get_ether_port_status(card, 1); + +#ifdef CONFIG_GELIC_WIRELESS + if (status & (GELIC_CARD_WLAN_EVENT_RECEIVED | + GELIC_CARD_WLAN_COMMAND_COMPLETED)) + gelic_wl_interrupt(card->netdev[GELIC_PORT_WIRELESS], status); +#endif + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * gelic_net_poll_controller - artificial interrupt for netconsole etc. + * @netdev: interface device structure + * + * see Documentation/networking/netconsole.txt + */ +void gelic_net_poll_controller(struct net_device *netdev) +{ + struct gelic_card *card = netdev_card(netdev); + + gelic_card_set_irq_mask(card, 0); + gelic_card_interrupt(netdev->irq, netdev); + gelic_card_set_irq_mask(card, card->irq_mask); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +/** + * gelic_net_open - called upon ifconfig up + * @netdev: interface device structure + * + * returns 0 on success, <0 on failure + * + * gelic_net_open allocates all the descriptors and memory needed for + * operation, sets up multicast list and enables interrupts + */ +int gelic_net_open(struct net_device *netdev) +{ + struct gelic_card *card = netdev_card(netdev); + + dev_dbg(ctodev(card), " -> %s %p\n", __func__, netdev); + + gelic_card_up(card); + + netif_start_queue(netdev); + gelic_card_get_ether_port_status(card, 1); + + dev_dbg(ctodev(card), " <- %s\n", __func__); + return 0; +} + +void gelic_net_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); +} + +static int gelic_ether_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct gelic_card *card = netdev_card(netdev); + + gelic_card_get_ether_port_status(card, 0); + + if (card->ether_port_status & GELIC_LV1_ETHER_FULL_DUPLEX) + cmd->duplex = DUPLEX_FULL; + else + cmd->duplex = DUPLEX_HALF; + + switch (card->ether_port_status & GELIC_LV1_ETHER_SPEED_MASK) { + case GELIC_LV1_ETHER_SPEED_10: + ethtool_cmd_speed_set(cmd, SPEED_10); + break; + case GELIC_LV1_ETHER_SPEED_100: + ethtool_cmd_speed_set(cmd, SPEED_100); + break; + case GELIC_LV1_ETHER_SPEED_1000: + ethtool_cmd_speed_set(cmd, SPEED_1000); + break; + default: + pr_info("%s: speed unknown\n", __func__); + ethtool_cmd_speed_set(cmd, SPEED_10); + break; + } + + cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | + SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full; + cmd->advertising = cmd->supported; + if (card->link_mode & GELIC_LV1_ETHER_AUTO_NEG) { + cmd->autoneg = AUTONEG_ENABLE; + } else { + cmd->autoneg = AUTONEG_DISABLE; + cmd->advertising &= ~ADVERTISED_Autoneg; + } + cmd->port = PORT_TP; + + return 0; +} + +static int gelic_ether_set_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct gelic_card *card = netdev_card(netdev); + u64 mode; + int ret; + + if (cmd->autoneg == AUTONEG_ENABLE) { + mode = GELIC_LV1_ETHER_AUTO_NEG; + } else { + switch (cmd->speed) { + case SPEED_10: + mode = GELIC_LV1_ETHER_SPEED_10; + break; + case SPEED_100: + mode = GELIC_LV1_ETHER_SPEED_100; + break; + case SPEED_1000: + mode = GELIC_LV1_ETHER_SPEED_1000; + break; + default: + return -EINVAL; + } + if (cmd->duplex == DUPLEX_FULL) + mode |= GELIC_LV1_ETHER_FULL_DUPLEX; + else if (cmd->speed == SPEED_1000) { + pr_info("1000 half duplex is not supported.\n"); + return -EINVAL; + } + } + + ret = gelic_card_set_link_mode(card, mode); + + if (ret) + return ret; + + return 0; +} + +static void gelic_net_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + if (0 <= ps3_compare_firmware_version(2, 2, 0)) + wol->supported = WAKE_MAGIC; + else + wol->supported = 0; + + wol->wolopts = ps3_sys_manager_get_wol() ? wol->supported : 0; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} +static int gelic_net_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + int status; + struct gelic_card *card; + u64 v1, v2; + + if (ps3_compare_firmware_version(2, 2, 0) < 0 || + !capable(CAP_NET_ADMIN)) + return -EPERM; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + + card = netdev_card(netdev); + if (wol->wolopts & WAKE_MAGIC) { + status = lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_SET_WOL, + GELIC_LV1_WOL_MAGIC_PACKET, + 0, GELIC_LV1_WOL_MP_ENABLE, + &v1, &v2); + if (status) { + pr_info("%s: enabling WOL failed %d\n", __func__, + status); + status = -EIO; + goto done; + } + status = lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_SET_WOL, + GELIC_LV1_WOL_ADD_MATCH_ADDR, + 0, GELIC_LV1_WOL_MATCH_ALL, + &v1, &v2); + if (!status) + ps3_sys_manager_set_wol(1); + else { + pr_info("%s: enabling WOL filter failed %d\n", + __func__, status); + status = -EIO; + } + } else { + status = lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_SET_WOL, + GELIC_LV1_WOL_MAGIC_PACKET, + 0, GELIC_LV1_WOL_MP_DISABLE, + &v1, &v2); + if (status) { + pr_info("%s: disabling WOL failed %d\n", __func__, + status); + status = -EIO; + goto done; + } + status = lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_SET_WOL, + GELIC_LV1_WOL_DELETE_MATCH_ADDR, + 0, GELIC_LV1_WOL_MATCH_ALL, + &v1, &v2); + if (!status) + ps3_sys_manager_set_wol(0); + else { + pr_info("%s: removing WOL filter failed %d\n", + __func__, status); + status = -EIO; + } + } +done: + return status; +} + +static const struct ethtool_ops gelic_ether_ethtool_ops = { + .get_drvinfo = gelic_net_get_drvinfo, + .get_settings = gelic_ether_get_settings, + .set_settings = gelic_ether_set_settings, + .get_link = ethtool_op_get_link, + .get_wol = gelic_net_get_wol, + .set_wol = gelic_net_set_wol, +}; + +/** + * gelic_net_tx_timeout_task - task scheduled by the watchdog timeout + * function (to be called not under interrupt status) + * @work: work is context of tx timout task + * + * called as task when tx hangs, resets interface (if interface is up) + */ +static void gelic_net_tx_timeout_task(struct work_struct *work) +{ + struct gelic_card *card = + container_of(work, struct gelic_card, tx_timeout_task); + struct net_device *netdev = card->netdev[GELIC_PORT_ETHERNET_0]; + + dev_info(ctodev(card), "%s:Timed out. Restarting...\n", __func__); + + if (!(netdev->flags & IFF_UP)) + goto out; + + netif_device_detach(netdev); + gelic_net_stop(netdev); + + gelic_net_open(netdev); + netif_device_attach(netdev); + +out: + atomic_dec(&card->tx_timeout_task_counter); +} + +/** + * gelic_net_tx_timeout - called when the tx timeout watchdog kicks in. + * @netdev: interface device structure + * + * called, if tx hangs. Schedules a task that resets the interface + */ +void gelic_net_tx_timeout(struct net_device *netdev) +{ + struct gelic_card *card; + + card = netdev_card(netdev); + atomic_inc(&card->tx_timeout_task_counter); + if (netdev->flags & IFF_UP) + schedule_work(&card->tx_timeout_task); + else + atomic_dec(&card->tx_timeout_task_counter); +} + +static const struct net_device_ops gelic_netdevice_ops = { + .ndo_open = gelic_net_open, + .ndo_stop = gelic_net_stop, + .ndo_start_xmit = gelic_net_xmit, + .ndo_set_rx_mode = gelic_net_set_multi, + .ndo_change_mtu = gelic_net_change_mtu, + .ndo_tx_timeout = gelic_net_tx_timeout, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = gelic_net_poll_controller, +#endif +}; + +/** + * gelic_ether_setup_netdev_ops - initialization of net_device operations + * @netdev: net_device structure + * + * fills out function pointers in the net_device structure + */ +static void gelic_ether_setup_netdev_ops(struct net_device *netdev, + struct napi_struct *napi) +{ + netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; + /* NAPI */ + netif_napi_add(netdev, napi, gelic_net_poll, NAPI_POLL_WEIGHT); + netdev->ethtool_ops = &gelic_ether_ethtool_ops; + netdev->netdev_ops = &gelic_netdevice_ops; +} + +/** + * gelic_ether_setup_netdev - initialization of net_device + * @netdev: net_device structure + * @card: card structure + * + * Returns 0 on success or <0 on failure + * + * gelic_ether_setup_netdev initializes the net_device structure + * and register it. + **/ +int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card) +{ + int status; + u64 v1, v2; + + netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM; + + netdev->features = NETIF_F_IP_CSUM; + if (GELIC_CARD_RX_CSUM_DEFAULT) + netdev->features |= NETIF_F_RXCSUM; + + status = lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_GET_MAC_ADDRESS, + 0, 0, 0, &v1, &v2); + v1 <<= 16; + if (status || !is_valid_ether_addr((u8 *)&v1)) { + dev_info(ctodev(card), + "%s:lv1_net_control GET_MAC_ADDR failed %d\n", + __func__, status); + return -EINVAL; + } + memcpy(netdev->dev_addr, &v1, ETH_ALEN); + + if (card->vlan_required) { + netdev->hard_header_len += VLAN_HLEN; + /* + * As vlan is internally used, + * we can not receive vlan packets + */ + netdev->features |= NETIF_F_VLAN_CHALLENGED; + } + + status = register_netdev(netdev); + if (status) { + dev_err(ctodev(card), "%s:Couldn't register %s %d\n", + __func__, netdev->name, status); + return status; + } + dev_info(ctodev(card), "%s: MAC addr %pM\n", + netdev->name, netdev->dev_addr); + + return 0; +} + +/** + * gelic_alloc_card_net - allocates net_device and card structure + * + * returns the card structure or NULL in case of errors + * + * the card and net_device structures are linked to each other + */ +#define GELIC_ALIGN (32) +static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev) +{ + struct gelic_card *card; + struct gelic_port *port; + void *p; + size_t alloc_size; + /* + * gelic requires dma descriptor is 32 bytes aligned and + * the hypervisor requires irq_status is 8 bytes aligned. + */ + BUILD_BUG_ON(offsetof(struct gelic_card, irq_status) % 8); + BUILD_BUG_ON(offsetof(struct gelic_card, descr) % 32); + alloc_size = + sizeof(struct gelic_card) + + sizeof(struct gelic_descr) * GELIC_NET_RX_DESCRIPTORS + + sizeof(struct gelic_descr) * GELIC_NET_TX_DESCRIPTORS + + GELIC_ALIGN - 1; + + p = kzalloc(alloc_size, GFP_KERNEL); + if (!p) + return NULL; + card = PTR_ALIGN(p, GELIC_ALIGN); + card->unalign = p; + + /* + * alloc netdev + */ + *netdev = alloc_etherdev(sizeof(struct gelic_port)); + if (!*netdev) { + kfree(card->unalign); + return NULL; + } + port = netdev_priv(*netdev); + + /* gelic_port */ + port->netdev = *netdev; + port->card = card; + port->type = GELIC_PORT_ETHERNET_0; + + /* gelic_card */ + card->netdev[GELIC_PORT_ETHERNET_0] = *netdev; + + INIT_WORK(&card->tx_timeout_task, gelic_net_tx_timeout_task); + init_waitqueue_head(&card->waitq); + atomic_set(&card->tx_timeout_task_counter, 0); + mutex_init(&card->updown_lock); + atomic_set(&card->users, 0); + + return card; +} + +static void gelic_card_get_vlan_info(struct gelic_card *card) +{ + u64 v1, v2; + int status; + unsigned int i; + struct { + int tx; + int rx; + } vlan_id_ix[2] = { + [GELIC_PORT_ETHERNET_0] = { + .tx = GELIC_LV1_VLAN_TX_ETHERNET_0, + .rx = GELIC_LV1_VLAN_RX_ETHERNET_0 + }, + [GELIC_PORT_WIRELESS] = { + .tx = GELIC_LV1_VLAN_TX_WIRELESS, + .rx = GELIC_LV1_VLAN_RX_WIRELESS + } + }; + + for (i = 0; i < ARRAY_SIZE(vlan_id_ix); i++) { + /* tx tag */ + status = lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_GET_VLAN_ID, + vlan_id_ix[i].tx, + 0, 0, &v1, &v2); + if (status || !v1) { + if (status != LV1_NO_ENTRY) + dev_dbg(ctodev(card), + "get vlan id for tx(%d) failed(%d)\n", + vlan_id_ix[i].tx, status); + card->vlan[i].tx = 0; + card->vlan[i].rx = 0; + continue; + } + card->vlan[i].tx = (u16)v1; + + /* rx tag */ + status = lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_GET_VLAN_ID, + vlan_id_ix[i].rx, + 0, 0, &v1, &v2); + if (status || !v1) { + if (status != LV1_NO_ENTRY) + dev_info(ctodev(card), + "get vlan id for rx(%d) failed(%d)\n", + vlan_id_ix[i].rx, status); + card->vlan[i].tx = 0; + card->vlan[i].rx = 0; + continue; + } + card->vlan[i].rx = (u16)v1; + + dev_dbg(ctodev(card), "vlan_id[%d] tx=%02x rx=%02x\n", + i, card->vlan[i].tx, card->vlan[i].rx); + } + + if (card->vlan[GELIC_PORT_ETHERNET_0].tx) { + BUG_ON(!card->vlan[GELIC_PORT_WIRELESS].tx); + card->vlan_required = 1; + } else + card->vlan_required = 0; + + /* check wirelss capable firmware */ + if (ps3_compare_firmware_version(1, 6, 0) < 0) { + card->vlan[GELIC_PORT_WIRELESS].tx = 0; + card->vlan[GELIC_PORT_WIRELESS].rx = 0; + } + + dev_info(ctodev(card), "internal vlan %s\n", + card->vlan_required? "enabled" : "disabled"); +} +/** + * ps3_gelic_driver_probe - add a device to the control of this driver + */ +static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev) +{ + struct gelic_card *card; + struct net_device *netdev; + int result; + + pr_debug("%s: called\n", __func__); + + udbg_shutdown_ps3gelic(); + + result = ps3_open_hv_device(dev); + + if (result) { + dev_dbg(&dev->core, "%s:ps3_open_hv_device failed\n", + __func__); + goto fail_open; + } + + result = ps3_dma_region_create(dev->d_region); + + if (result) { + dev_dbg(&dev->core, "%s:ps3_dma_region_create failed(%d)\n", + __func__, result); + BUG_ON("check region type"); + goto fail_dma_region; + } + + /* alloc card/netdevice */ + card = gelic_alloc_card_net(&netdev); + if (!card) { + dev_info(&dev->core, "%s:gelic_net_alloc_card failed\n", + __func__); + result = -ENOMEM; + goto fail_alloc_card; + } + ps3_system_bus_set_drvdata(dev, card); + card->dev = dev; + + /* get internal vlan info */ + gelic_card_get_vlan_info(card); + + card->link_mode = GELIC_LV1_ETHER_AUTO_NEG; + + /* setup interrupt */ + result = lv1_net_set_interrupt_status_indicator(bus_id(card), + dev_id(card), + ps3_mm_phys_to_lpar(__pa(&card->irq_status)), + 0); + + if (result) { + dev_dbg(&dev->core, + "%s:set_interrupt_status_indicator failed: %s\n", + __func__, ps3_result(result)); + result = -EIO; + goto fail_status_indicator; + } + + result = ps3_sb_event_receive_port_setup(dev, PS3_BINDING_CPU_ANY, + &card->irq); + + if (result) { + dev_info(ctodev(card), + "%s:gelic_net_open_device failed (%d)\n", + __func__, result); + result = -EPERM; + goto fail_alloc_irq; + } + result = request_irq(card->irq, gelic_card_interrupt, + 0, netdev->name, card); + + if (result) { + dev_info(ctodev(card), "%s:request_irq failed (%d)\n", + __func__, result); + goto fail_request_irq; + } + + /* setup card structure */ + card->irq_mask = GELIC_CARD_RXINT | GELIC_CARD_TXINT | + GELIC_CARD_PORT_STATUS_CHANGED; + + + result = gelic_card_init_chain(card, &card->tx_chain, + card->descr, GELIC_NET_TX_DESCRIPTORS); + if (result) + goto fail_alloc_tx; + result = gelic_card_init_chain(card, &card->rx_chain, + card->descr + GELIC_NET_TX_DESCRIPTORS, + GELIC_NET_RX_DESCRIPTORS); + if (result) + goto fail_alloc_rx; + + /* head of chain */ + card->tx_top = card->tx_chain.head; + card->rx_top = card->rx_chain.head; + dev_dbg(ctodev(card), "descr rx %p, tx %p, size %#lx, num %#x\n", + card->rx_top, card->tx_top, sizeof(struct gelic_descr), + GELIC_NET_RX_DESCRIPTORS); + /* allocate rx skbs */ + result = gelic_card_alloc_rx_skbs(card); + if (result) + goto fail_alloc_skbs; + + spin_lock_init(&card->tx_lock); + card->tx_dma_progress = 0; + + /* setup net_device structure */ + netdev->irq = card->irq; + SET_NETDEV_DEV(netdev, &card->dev->core); + gelic_ether_setup_netdev_ops(netdev, &card->napi); + result = gelic_net_setup_netdev(netdev, card); + if (result) { + dev_dbg(&dev->core, "%s: setup_netdev failed %d", + __func__, result); + goto fail_setup_netdev; + } + +#ifdef CONFIG_GELIC_WIRELESS + result = gelic_wl_driver_probe(card); + if (result) { + dev_dbg(&dev->core, "%s: WL init failed\n", __func__); + goto fail_setup_netdev; + } +#endif + pr_debug("%s: done\n", __func__); + return 0; + +fail_setup_netdev: +fail_alloc_skbs: + gelic_card_free_chain(card, card->rx_chain.head); +fail_alloc_rx: + gelic_card_free_chain(card, card->tx_chain.head); +fail_alloc_tx: + free_irq(card->irq, card); + netdev->irq = NO_IRQ; +fail_request_irq: + ps3_sb_event_receive_port_destroy(dev, card->irq); +fail_alloc_irq: + lv1_net_set_interrupt_status_indicator(bus_id(card), + bus_id(card), + 0, 0); +fail_status_indicator: + ps3_system_bus_set_drvdata(dev, NULL); + kfree(netdev_card(netdev)->unalign); + free_netdev(netdev); +fail_alloc_card: + ps3_dma_region_free(dev->d_region); +fail_dma_region: + ps3_close_hv_device(dev); +fail_open: + return result; +} + +/** + * ps3_gelic_driver_remove - remove a device from the control of this driver + */ + +static int ps3_gelic_driver_remove(struct ps3_system_bus_device *dev) +{ + struct gelic_card *card = ps3_system_bus_get_drvdata(dev); + struct net_device *netdev0; + pr_debug("%s: called\n", __func__); + + /* set auto-negotiation */ + gelic_card_set_link_mode(card, GELIC_LV1_ETHER_AUTO_NEG); + +#ifdef CONFIG_GELIC_WIRELESS + gelic_wl_driver_remove(card); +#endif + /* stop interrupt */ + gelic_card_set_irq_mask(card, 0); + + /* turn off DMA, force end */ + gelic_card_disable_rxdmac(card); + gelic_card_disable_txdmac(card); + + /* release chains */ + gelic_card_release_tx_chain(card, 1); + gelic_card_release_rx_chain(card); + + gelic_card_free_chain(card, card->tx_top); + gelic_card_free_chain(card, card->rx_top); + + netdev0 = card->netdev[GELIC_PORT_ETHERNET_0]; + /* disconnect event port */ + free_irq(card->irq, card); + netdev0->irq = NO_IRQ; + ps3_sb_event_receive_port_destroy(card->dev, card->irq); + + wait_event(card->waitq, + atomic_read(&card->tx_timeout_task_counter) == 0); + + lv1_net_set_interrupt_status_indicator(bus_id(card), dev_id(card), + 0 , 0); + + unregister_netdev(netdev0); + kfree(netdev_card(netdev0)->unalign); + free_netdev(netdev0); + + ps3_system_bus_set_drvdata(dev, NULL); + + ps3_dma_region_free(dev->d_region); + + ps3_close_hv_device(dev); + + pr_debug("%s: done\n", __func__); + return 0; +} + +static struct ps3_system_bus_driver ps3_gelic_driver = { + .match_id = PS3_MATCH_ID_GELIC, + .probe = ps3_gelic_driver_probe, + .remove = ps3_gelic_driver_remove, + .shutdown = ps3_gelic_driver_remove, + .core.name = "ps3_gelic_driver", + .core.owner = THIS_MODULE, +}; + +static int __init ps3_gelic_driver_init (void) +{ + return firmware_has_feature(FW_FEATURE_PS3_LV1) + ? ps3_system_bus_driver_register(&ps3_gelic_driver) + : -ENODEV; +} + +static void __exit ps3_gelic_driver_exit (void) +{ + ps3_system_bus_driver_unregister(&ps3_gelic_driver); +} + +module_init(ps3_gelic_driver_init); +module_exit(ps3_gelic_driver_exit); + +MODULE_ALIAS(PS3_MODULE_ALIAS_GELIC); + diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h new file mode 100644 index 000000000..8505196be --- /dev/null +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h @@ -0,0 +1,384 @@ +/* + * PS3 Platfom gelic network driver. + * + * Copyright (C) 2007 Sony Computer Entertainment Inc. + * Copyright 2006, 2007 Sony Corporation. + * + * This file is based on: spider_net.h + * + * (C) Copyright IBM Corp. 2005 + * + * Authors : Utz Bacher + * Jens Osterkamp + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#ifndef _GELIC_NET_H +#define _GELIC_NET_H + +/* descriptors */ +#define GELIC_NET_RX_DESCRIPTORS 128 /* num of descriptors */ +#define GELIC_NET_TX_DESCRIPTORS 128 /* num of descriptors */ + +#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN +#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN +#define GELIC_NET_RXBUF_ALIGN 128 +#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ +#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ +#define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL + +#define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */ + +/* virtual interrupt status register bits */ + /* INT1 */ +#define GELIC_CARD_TX_RAM_FULL_ERR 0x0000000000000001L +#define GELIC_CARD_RX_RAM_FULL_ERR 0x0000000000000002L +#define GELIC_CARD_TX_SHORT_FRAME_ERR 0x0000000000000004L +#define GELIC_CARD_TX_INVALID_DESCR_ERR 0x0000000000000008L +#define GELIC_CARD_RX_FIFO_FULL_ERR 0x0000000000002000L +#define GELIC_CARD_RX_DESCR_CHAIN_END 0x0000000000004000L +#define GELIC_CARD_RX_INVALID_DESCR_ERR 0x0000000000008000L +#define GELIC_CARD_TX_RESPONCE_ERR 0x0000000000010000L +#define GELIC_CARD_RX_RESPONCE_ERR 0x0000000000100000L +#define GELIC_CARD_TX_PROTECTION_ERR 0x0000000000400000L +#define GELIC_CARD_RX_PROTECTION_ERR 0x0000000004000000L +#define GELIC_CARD_TX_TCP_UDP_CHECKSUM_ERR 0x0000000008000000L +#define GELIC_CARD_PORT_STATUS_CHANGED 0x0000000020000000L +#define GELIC_CARD_WLAN_EVENT_RECEIVED 0x0000000040000000L +#define GELIC_CARD_WLAN_COMMAND_COMPLETED 0x0000000080000000L + /* INT 0 */ +#define GELIC_CARD_TX_FLAGGED_DESCR 0x0004000000000000L +#define GELIC_CARD_RX_FLAGGED_DESCR 0x0040000000000000L +#define GELIC_CARD_TX_TRANSFER_END 0x0080000000000000L +#define GELIC_CARD_TX_DESCR_CHAIN_END 0x0100000000000000L +#define GELIC_CARD_NUMBER_OF_RX_FRAME 0x1000000000000000L +#define GELIC_CARD_ONE_TIME_COUNT_TIMER 0x4000000000000000L +#define GELIC_CARD_FREE_RUN_COUNT_TIMER 0x8000000000000000L + +/* initial interrupt mask */ +#define GELIC_CARD_TXINT GELIC_CARD_TX_DESCR_CHAIN_END + +#define GELIC_CARD_RXINT (GELIC_CARD_RX_DESCR_CHAIN_END | \ + GELIC_CARD_NUMBER_OF_RX_FRAME) + + /* RX descriptor data_status bits */ +enum gelic_descr_rx_status { + GELIC_DESCR_RXDMADU = 0x80000000, /* destination MAC addr unknown */ + GELIC_DESCR_RXLSTFBF = 0x40000000, /* last frame buffer */ + GELIC_DESCR_RXIPCHK = 0x20000000, /* IP checksum performed */ + GELIC_DESCR_RXTCPCHK = 0x10000000, /* TCP/UDP checksup performed */ + GELIC_DESCR_RXWTPKT = 0x00C00000, /* + * wakeup trigger packet + * 01: Magic Packet (TM) + * 10: ARP packet + * 11: Multicast MAC addr + */ + GELIC_DESCR_RXVLNPKT = 0x00200000, /* VLAN packet */ + /* bit 20..16 reserved */ + GELIC_DESCR_RXRRECNUM = 0x0000ff00, /* reception receipt number */ + /* bit 7..0 reserved */ +}; + +#define GELIC_DESCR_DATA_STATUS_CHK_MASK \ + (GELIC_DESCR_RXIPCHK | GELIC_DESCR_RXTCPCHK) + + /* TX descriptor data_status bits */ +enum gelic_descr_tx_status { + GELIC_DESCR_TX_TAIL = 0x00000001, /* gelic treated this + * descriptor was end of + * a tx frame + */ +}; + +/* RX descriptor data error bits */ +enum gelic_descr_rx_error { + /* bit 31 reserved */ + GELIC_DESCR_RXALNERR = 0x40000000, /* alignement error 10/100M */ + GELIC_DESCR_RXOVERERR = 0x20000000, /* oversize error */ + GELIC_DESCR_RXRNTERR = 0x10000000, /* Runt error */ + GELIC_DESCR_RXIPCHKERR = 0x08000000, /* IP checksum error */ + GELIC_DESCR_RXTCPCHKERR = 0x04000000, /* TCP/UDP checksum error */ + GELIC_DESCR_RXDRPPKT = 0x00100000, /* drop packet */ + GELIC_DESCR_RXIPFMTERR = 0x00080000, /* IP packet format error */ + /* bit 18 reserved */ + GELIC_DESCR_RXDATAERR = 0x00020000, /* IP packet format error */ + GELIC_DESCR_RXCALERR = 0x00010000, /* cariier extension length + * error */ + GELIC_DESCR_RXCREXERR = 0x00008000, /* carrier extension error */ + GELIC_DESCR_RXMLTCST = 0x00004000, /* multicast address frame */ + /* bit 13..0 reserved */ +}; +#define GELIC_DESCR_DATA_ERROR_CHK_MASK \ + (GELIC_DESCR_RXIPCHKERR | GELIC_DESCR_RXTCPCHKERR) + +/* DMA command and status (RX and TX)*/ +enum gelic_descr_dma_status { + GELIC_DESCR_DMA_COMPLETE = 0x00000000, /* used in tx */ + GELIC_DESCR_DMA_BUFFER_FULL = 0x00000000, /* used in rx */ + GELIC_DESCR_DMA_RESPONSE_ERROR = 0x10000000, /* used in rx, tx */ + GELIC_DESCR_DMA_PROTECTION_ERROR = 0x20000000, /* used in rx, tx */ + GELIC_DESCR_DMA_FRAME_END = 0x40000000, /* used in rx */ + GELIC_DESCR_DMA_FORCE_END = 0x50000000, /* used in rx, tx */ + GELIC_DESCR_DMA_CARDOWNED = 0xa0000000, /* used in rx, tx */ + GELIC_DESCR_DMA_NOT_IN_USE = 0xb0000000, /* any other value */ +}; + +#define GELIC_DESCR_DMA_STAT_MASK (0xf0000000) + +/* tx descriptor command and status */ +enum gelic_descr_tx_dma_status { + /* [19] */ + GELIC_DESCR_TX_DMA_IKE = 0x00080000, /* IPSEC off */ + /* [18] */ + GELIC_DESCR_TX_DMA_FRAME_TAIL = 0x00040000, /* last descriptor of + * the packet + */ + /* [17..16] */ + GELIC_DESCR_TX_DMA_TCP_CHKSUM = 0x00020000, /* TCP packet */ + GELIC_DESCR_TX_DMA_UDP_CHKSUM = 0x00030000, /* UDP packet */ + GELIC_DESCR_TX_DMA_NO_CHKSUM = 0x00000000, /* no checksum */ + + /* [1] */ + GELIC_DESCR_TX_DMA_CHAIN_END = 0x00000002, /* DMA terminated + * due to chain end + */ +}; + +#define GELIC_DESCR_DMA_CMD_NO_CHKSUM \ + (GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \ + GELIC_DESCR_TX_DMA_NO_CHKSUM) + +#define GELIC_DESCR_DMA_CMD_TCP_CHKSUM \ + (GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \ + GELIC_DESCR_TX_DMA_TCP_CHKSUM) + +#define GELIC_DESCR_DMA_CMD_UDP_CHKSUM \ + (GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \ + GELIC_DESCR_TX_DMA_UDP_CHKSUM) + +enum gelic_descr_rx_dma_status { + /* [ 1 ] */ + GELIC_DESCR_RX_DMA_CHAIN_END = 0x00000002, /* DMA terminated + * due to chain end + */ +}; + +/* for lv1_net_control */ +enum gelic_lv1_net_control_code { + GELIC_LV1_GET_MAC_ADDRESS = 1, + GELIC_LV1_GET_ETH_PORT_STATUS = 2, + GELIC_LV1_SET_NEGOTIATION_MODE = 3, + GELIC_LV1_GET_VLAN_ID = 4, + GELIC_LV1_SET_WOL = 5, + GELIC_LV1_GET_CHANNEL = 6, + GELIC_LV1_POST_WLAN_CMD = 9, + GELIC_LV1_GET_WLAN_CMD_RESULT = 10, + GELIC_LV1_GET_WLAN_EVENT = 11, +}; + +/* for GELIC_LV1_SET_WOL */ +enum gelic_lv1_wol_command { + GELIC_LV1_WOL_MAGIC_PACKET = 1, + GELIC_LV1_WOL_ADD_MATCH_ADDR = 6, + GELIC_LV1_WOL_DELETE_MATCH_ADDR = 7, +}; + +/* for GELIC_LV1_WOL_MAGIC_PACKET */ +enum gelic_lv1_wol_mp_arg { + GELIC_LV1_WOL_MP_DISABLE = 0, + GELIC_LV1_WOL_MP_ENABLE = 1, +}; + +/* for GELIC_LV1_WOL_{ADD,DELETE}_MATCH_ADDR */ +enum gelic_lv1_wol_match_arg { + GELIC_LV1_WOL_MATCH_INDIVIDUAL = 0, + GELIC_LV1_WOL_MATCH_ALL = 1, +}; + +/* status returened from GET_ETH_PORT_STATUS */ +enum gelic_lv1_ether_port_status { + GELIC_LV1_ETHER_LINK_UP = 0x0000000000000001L, + GELIC_LV1_ETHER_FULL_DUPLEX = 0x0000000000000002L, + GELIC_LV1_ETHER_AUTO_NEG = 0x0000000000000004L, + + GELIC_LV1_ETHER_SPEED_10 = 0x0000000000000010L, + GELIC_LV1_ETHER_SPEED_100 = 0x0000000000000020L, + GELIC_LV1_ETHER_SPEED_1000 = 0x0000000000000040L, + GELIC_LV1_ETHER_SPEED_MASK = 0x0000000000000070L, +}; + +enum gelic_lv1_vlan_index { + /* for outgoing packets */ + GELIC_LV1_VLAN_TX_ETHERNET_0 = 0x0000000000000002L, + GELIC_LV1_VLAN_TX_WIRELESS = 0x0000000000000003L, + + /* for incoming packets */ + GELIC_LV1_VLAN_RX_ETHERNET_0 = 0x0000000000000012L, + GELIC_LV1_VLAN_RX_WIRELESS = 0x0000000000000013L, +}; + +enum gelic_lv1_phy { + GELIC_LV1_PHY_ETHERNET_0 = 0x0000000000000002L, +}; + +/* size of hardware part of gelic descriptor */ +#define GELIC_DESCR_SIZE (32) + +enum gelic_port_type { + GELIC_PORT_ETHERNET_0 = 0, + GELIC_PORT_WIRELESS = 1, + GELIC_PORT_MAX +}; + +struct gelic_descr { + /* as defined by the hardware */ + __be32 buf_addr; + __be32 buf_size; + __be32 next_descr_addr; + __be32 dmac_cmd_status; + __be32 result_size; + __be32 valid_size; /* all zeroes for tx */ + __be32 data_status; + __be32 data_error; /* all zeroes for tx */ + + /* used in the driver */ + struct sk_buff *skb; + dma_addr_t bus_addr; + struct gelic_descr *next; + struct gelic_descr *prev; +} __attribute__((aligned(32))); + +struct gelic_descr_chain { + /* we walk from tail to head */ + struct gelic_descr *head; + struct gelic_descr *tail; +}; + +struct gelic_vlan_id { + u16 tx; + u16 rx; +}; + +struct gelic_card { + struct napi_struct napi; + struct net_device *netdev[GELIC_PORT_MAX]; + /* + * hypervisor requires irq_status should be + * 8 bytes aligned, but u64 member is + * always disposed in that manner + */ + u64 irq_status; + u64 irq_mask; + + struct ps3_system_bus_device *dev; + struct gelic_vlan_id vlan[GELIC_PORT_MAX]; + int vlan_required; + + struct gelic_descr_chain tx_chain; + struct gelic_descr_chain rx_chain; + /* + * tx_lock guards tx descriptor list and + * tx_dma_progress. + */ + spinlock_t tx_lock; + int tx_dma_progress; + + struct work_struct tx_timeout_task; + atomic_t tx_timeout_task_counter; + wait_queue_head_t waitq; + + /* only first user should up the card */ + struct mutex updown_lock; + atomic_t users; + + u64 ether_port_status; + int link_mode; + + /* original address returned by kzalloc */ + void *unalign; + + /* + * each netdevice has copy of irq + */ + unsigned int irq; + struct gelic_descr *tx_top, *rx_top; + struct gelic_descr descr[0]; /* must be the last */ +}; + +struct gelic_port { + struct gelic_card *card; + struct net_device *netdev; + enum gelic_port_type type; + long priv[0]; /* long for alignment */ +}; + +static inline struct gelic_card *port_to_card(struct gelic_port *p) +{ + return p->card; +} +static inline struct net_device *port_to_netdev(struct gelic_port *p) +{ + return p->netdev; +} +static inline struct gelic_card *netdev_card(struct net_device *d) +{ + return ((struct gelic_port *)netdev_priv(d))->card; +} +static inline struct gelic_port *netdev_port(struct net_device *d) +{ + return (struct gelic_port *)netdev_priv(d); +} +static inline struct device *ctodev(struct gelic_card *card) +{ + return &card->dev->core; +} +static inline u64 bus_id(struct gelic_card *card) +{ + return card->dev->bus_id; +} +static inline u64 dev_id(struct gelic_card *card) +{ + return card->dev->dev_id; +} + +static inline void *port_priv(struct gelic_port *port) +{ + return port->priv; +} + +#ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC +void udbg_shutdown_ps3gelic(void); +#else +static inline void udbg_shutdown_ps3gelic(void) {} +#endif + +int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask); +/* shared netdev ops */ +void gelic_card_up(struct gelic_card *card); +void gelic_card_down(struct gelic_card *card); +int gelic_net_open(struct net_device *netdev); +int gelic_net_stop(struct net_device *netdev); +int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev); +void gelic_net_set_multi(struct net_device *netdev); +void gelic_net_tx_timeout(struct net_device *netdev); +int gelic_net_change_mtu(struct net_device *netdev, int new_mtu); +int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card); + +/* shared ethtool ops */ +void gelic_net_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info); +void gelic_net_poll_controller(struct net_device *netdev); + +#endif /* _GELIC_NET_H */ diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c new file mode 100644 index 000000000..13214a649 --- /dev/null +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c @@ -0,0 +1,2670 @@ +/* + * PS3 gelic network driver. + * + * Copyright (C) 2007 Sony Computer Entertainment Inc. + * Copyright 2007 Sony Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#undef DEBUG + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "ps3_gelic_net.h" +#include "ps3_gelic_wireless.h" + + +static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan, + u8 *essid, size_t essid_len); +static int gelic_wl_try_associate(struct net_device *netdev); + +/* + * tables + */ + +/* 802.11b/g channel to freq in MHz */ +static const int channel_freq[] = { + 2412, 2417, 2422, 2427, 2432, + 2437, 2442, 2447, 2452, 2457, + 2462, 2467, 2472, 2484 +}; +#define NUM_CHANNELS ARRAY_SIZE(channel_freq) + +/* in bps */ +static const int bitrate_list[] = { + 1000000, + 2000000, + 5500000, + 11000000, + 6000000, + 9000000, + 12000000, + 18000000, + 24000000, + 36000000, + 48000000, + 54000000 +}; +#define NUM_BITRATES ARRAY_SIZE(bitrate_list) + +/* + * wpa2 support requires the hypervisor version 2.0 or later + */ +static inline int wpa2_capable(void) +{ + return 0 <= ps3_compare_firmware_version(2, 0, 0); +} + +static inline int precise_ie(void) +{ + return 0 <= ps3_compare_firmware_version(2, 2, 0); +} +/* + * post_eurus_cmd helpers + */ +struct eurus_cmd_arg_info { + int pre_arg; /* command requires arg1, arg2 at POST COMMAND */ + int post_arg; /* command requires arg1, arg2 at GET_RESULT */ +}; + +static const struct eurus_cmd_arg_info cmd_info[GELIC_EURUS_CMD_MAX_INDEX] = { + [GELIC_EURUS_CMD_SET_COMMON_CFG] = { .pre_arg = 1}, + [GELIC_EURUS_CMD_SET_WEP_CFG] = { .pre_arg = 1}, + [GELIC_EURUS_CMD_SET_WPA_CFG] = { .pre_arg = 1}, + [GELIC_EURUS_CMD_GET_COMMON_CFG] = { .post_arg = 1}, + [GELIC_EURUS_CMD_GET_WEP_CFG] = { .post_arg = 1}, + [GELIC_EURUS_CMD_GET_WPA_CFG] = { .post_arg = 1}, + [GELIC_EURUS_CMD_GET_RSSI_CFG] = { .post_arg = 1}, + [GELIC_EURUS_CMD_START_SCAN] = { .pre_arg = 1}, + [GELIC_EURUS_CMD_GET_SCAN] = { .post_arg = 1}, +}; + +#ifdef DEBUG +static const char *cmdstr(enum gelic_eurus_command ix) +{ + switch (ix) { + case GELIC_EURUS_CMD_ASSOC: + return "ASSOC"; + case GELIC_EURUS_CMD_DISASSOC: + return "DISASSOC"; + case GELIC_EURUS_CMD_START_SCAN: + return "SCAN"; + case GELIC_EURUS_CMD_GET_SCAN: + return "GET SCAN"; + case GELIC_EURUS_CMD_SET_COMMON_CFG: + return "SET_COMMON_CFG"; + case GELIC_EURUS_CMD_GET_COMMON_CFG: + return "GET_COMMON_CFG"; + case GELIC_EURUS_CMD_SET_WEP_CFG: + return "SET_WEP_CFG"; + case GELIC_EURUS_CMD_GET_WEP_CFG: + return "GET_WEP_CFG"; + case GELIC_EURUS_CMD_SET_WPA_CFG: + return "SET_WPA_CFG"; + case GELIC_EURUS_CMD_GET_WPA_CFG: + return "GET_WPA_CFG"; + case GELIC_EURUS_CMD_GET_RSSI_CFG: + return "GET_RSSI"; + default: + break; + } + return ""; +}; +#else +static inline const char *cmdstr(enum gelic_eurus_command ix) +{ + return ""; +} +#endif + +/* synchronously do eurus commands */ +static void gelic_eurus_sync_cmd_worker(struct work_struct *work) +{ + struct gelic_eurus_cmd *cmd; + struct gelic_card *card; + struct gelic_wl_info *wl; + + u64 arg1, arg2; + + pr_debug("%s: <-\n", __func__); + cmd = container_of(work, struct gelic_eurus_cmd, work); + BUG_ON(cmd_info[cmd->cmd].pre_arg && + cmd_info[cmd->cmd].post_arg); + wl = cmd->wl; + card = port_to_card(wl_port(wl)); + + if (cmd_info[cmd->cmd].pre_arg) { + arg1 = (cmd->buffer) ? + ps3_mm_phys_to_lpar(__pa(cmd->buffer)) : + 0; + arg2 = cmd->buf_size; + } else { + arg1 = 0; + arg2 = 0; + } + init_completion(&wl->cmd_done_intr); + pr_debug("%s: cmd='%s' start\n", __func__, cmdstr(cmd->cmd)); + cmd->status = lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_POST_WLAN_CMD, + cmd->cmd, arg1, arg2, + &cmd->tag, &cmd->size); + if (cmd->status) { + complete(&cmd->done); + pr_info("%s: cmd issue failed\n", __func__); + return; + } + + wait_for_completion(&wl->cmd_done_intr); + + if (cmd_info[cmd->cmd].post_arg) { + arg1 = ps3_mm_phys_to_lpar(__pa(cmd->buffer)); + arg2 = cmd->buf_size; + } else { + arg1 = 0; + arg2 = 0; + } + + cmd->status = lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_GET_WLAN_CMD_RESULT, + cmd->tag, arg1, arg2, + &cmd->cmd_status, &cmd->size); +#ifdef DEBUG + if (cmd->status || cmd->cmd_status) { + pr_debug("%s: cmd done tag=%#lx arg1=%#lx, arg2=%#lx\n", __func__, + cmd->tag, arg1, arg2); + pr_debug("%s: cmd done status=%#x cmd_status=%#lx size=%#lx\n", + __func__, cmd->status, cmd->cmd_status, cmd->size); + } +#endif + complete(&cmd->done); + pr_debug("%s: cmd='%s' done\n", __func__, cmdstr(cmd->cmd)); +} + +static struct gelic_eurus_cmd *gelic_eurus_sync_cmd(struct gelic_wl_info *wl, + unsigned int eurus_cmd, + void *buffer, + unsigned int buf_size) +{ + struct gelic_eurus_cmd *cmd; + + /* allocate cmd */ + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return NULL; + + /* initialize members */ + cmd->cmd = eurus_cmd; + cmd->buffer = buffer; + cmd->buf_size = buf_size; + cmd->wl = wl; + INIT_WORK(&cmd->work, gelic_eurus_sync_cmd_worker); + init_completion(&cmd->done); + queue_work(wl->eurus_cmd_queue, &cmd->work); + + /* wait for command completion */ + wait_for_completion(&cmd->done); + + return cmd; +} + +static u32 gelic_wl_get_link(struct net_device *netdev) +{ + struct gelic_wl_info *wl = port_wl(netdev_port(netdev)); + u32 ret; + + pr_debug("%s: <-\n", __func__); + mutex_lock(&wl->assoc_stat_lock); + if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) + ret = 1; + else + ret = 0; + mutex_unlock(&wl->assoc_stat_lock); + pr_debug("%s: ->\n", __func__); + return ret; +} + +static void gelic_wl_send_iwap_event(struct gelic_wl_info *wl, u8 *bssid) +{ + union iwreq_data data; + + memset(&data, 0, sizeof(data)); + if (bssid) + memcpy(data.ap_addr.sa_data, bssid, ETH_ALEN); + data.ap_addr.sa_family = ARPHRD_ETHER; + wireless_send_event(port_to_netdev(wl_port(wl)), SIOCGIWAP, + &data, NULL); +} + +/* + * wireless extension handlers and helpers + */ + +/* SIOGIWNAME */ +static int gelic_wl_get_name(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *iwreq, char *extra) +{ + strcpy(iwreq->name, "IEEE 802.11bg"); + return 0; +} + +static void gelic_wl_get_ch_info(struct gelic_wl_info *wl) +{ + struct gelic_card *card = port_to_card(wl_port(wl)); + u64 ch_info_raw, tmp; + int status; + + if (!test_and_set_bit(GELIC_WL_STAT_CH_INFO, &wl->stat)) { + status = lv1_net_control(bus_id(card), dev_id(card), + GELIC_LV1_GET_CHANNEL, 0, 0, 0, + &ch_info_raw, + &tmp); + /* some fw versions may return error */ + if (status) { + if (status != LV1_NO_ENTRY) + pr_info("%s: available ch unknown\n", __func__); + wl->ch_info = 0x07ff;/* 11 ch */ + } else + /* 16 bits of MSB has available channels */ + wl->ch_info = ch_info_raw >> 48; + } +} + +/* SIOGIWRANGE */ +static int gelic_wl_get_range(struct net_device *netdev, + struct iw_request_info *info, + union iwreq_data *iwreq, char *extra) +{ + struct iw_point *point = &iwreq->data; + struct iw_range *range = (struct iw_range *)extra; + struct gelic_wl_info *wl = port_wl(netdev_port(netdev)); + unsigned int i, chs; + + pr_debug("%s: <-\n", __func__); + point->length = sizeof(struct iw_range); + memset(range, 0, sizeof(struct iw_range)); + + range->we_version_compiled = WIRELESS_EXT; + range->we_version_source = 22; + + /* available channels and frequencies */ + gelic_wl_get_ch_info(wl); + + for (i = 0, chs = 0; + i < NUM_CHANNELS && chs < IW_MAX_FREQUENCIES; i++) + if (wl->ch_info & (1 << i)) { + range->freq[chs].i = i + 1; + range->freq[chs].m = channel_freq[i]; + range->freq[chs].e = 6; + chs++; + } + range->num_frequency = chs; + range->old_num_frequency = chs; + range->num_channels = chs; + range->old_num_channels = chs; + + /* bitrates */ + for (i = 0; i < NUM_BITRATES; i++) + range->bitrate[i] = bitrate_list[i]; + range->num_bitrates = i; + + /* signal levels */ + range->max_qual.qual = 100; /* relative value */ + range->max_qual.level = 100; + range->avg_qual.qual = 50; + range->avg_qual.level = 50; + range->sensitivity = 0; + + /* Event capability */ + IW_EVENT_CAPA_SET_KERNEL(range->event_capa); + IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); + IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); + + /* encryption capability */ + range->enc_capa = IW_ENC_CAPA_WPA | + IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP | + IW_ENC_CAPA_4WAY_HANDSHAKE; + if (wpa2_capable()) + range->enc_capa |= IW_ENC_CAPA_WPA2; + range->encoding_size[0] = 5; /* 40bit WEP */ + range->encoding_size[1] = 13; /* 104bit WEP */ + range->encoding_size[2] = 32; /* WPA-PSK */ + range->num_encoding_sizes = 3; + range->max_encoding_tokens = GELIC_WEP_KEYS; + + /* scan capability */ + range->scan_capa = IW_SCAN_CAPA_ESSID; + + pr_debug("%s: ->\n", __func__); + return 0; + +} + +/* SIOC{G,S}IWSCAN */ +static int gelic_wl_set_scan(struct net_device *netdev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); + struct iw_scan_req *req; + u8 *essid = NULL; + size_t essid_len = 0; + + if (wrqu->data.length == sizeof(struct iw_scan_req) && + wrqu->data.flags & IW_SCAN_THIS_ESSID) { + req = (struct iw_scan_req*)extra; + essid = req->essid; + essid_len = req->essid_len; + pr_debug("%s: ESSID scan =%s\n", __func__, essid); + } + return gelic_wl_start_scan(wl, 1, essid, essid_len); +} + +#define OUI_LEN 3 +static const u8 rsn_oui[OUI_LEN] = { 0x00, 0x0f, 0xac }; +static const u8 wpa_oui[OUI_LEN] = { 0x00, 0x50, 0xf2 }; + +/* + * synthesize WPA/RSN IE data + * See WiFi WPA specification and IEEE 802.11-2007 7.3.2.25 + * for the format + */ +static size_t gelic_wl_synthesize_ie(u8 *buf, + struct gelic_eurus_scan_info *scan) +{ + + const u8 *oui_header; + u8 *start = buf; + int rsn; + int ccmp; + + pr_debug("%s: <- sec=%16x\n", __func__, scan->security); + switch (be16_to_cpu(scan->security) & GELIC_EURUS_SCAN_SEC_MASK) { + case GELIC_EURUS_SCAN_SEC_WPA: + rsn = 0; + break; + case GELIC_EURUS_SCAN_SEC_WPA2: + rsn = 1; + break; + default: + /* WEP or none. No IE returned */ + return 0; + } + + switch (be16_to_cpu(scan->security) & GELIC_EURUS_SCAN_SEC_WPA_MASK) { + case GELIC_EURUS_SCAN_SEC_WPA_TKIP: + ccmp = 0; + break; + case GELIC_EURUS_SCAN_SEC_WPA_AES: + ccmp = 1; + break; + default: + if (rsn) { + ccmp = 1; + pr_info("%s: no cipher info. defaulted to CCMP\n", + __func__); + } else { + ccmp = 0; + pr_info("%s: no cipher info. defaulted to TKIP\n", + __func__); + } + } + + if (rsn) + oui_header = rsn_oui; + else + oui_header = wpa_oui; + + /* element id */ + if (rsn) + *buf++ = WLAN_EID_RSN; + else + *buf++ = WLAN_EID_VENDOR_SPECIFIC; + + /* length filed; set later */ + buf++; + + /* wpa special header */ + if (!rsn) { + memcpy(buf, wpa_oui, OUI_LEN); + buf += OUI_LEN; + *buf++ = 0x01; + } + + /* version */ + *buf++ = 0x01; /* version 1.0 */ + *buf++ = 0x00; + + /* group cipher */ + memcpy(buf, oui_header, OUI_LEN); + buf += OUI_LEN; + + if (ccmp) + *buf++ = 0x04; /* CCMP */ + else + *buf++ = 0x02; /* TKIP */ + + /* pairwise key count always 1 */ + *buf++ = 0x01; + *buf++ = 0x00; + + /* pairwise key suit */ + memcpy(buf, oui_header, OUI_LEN); + buf += OUI_LEN; + if (ccmp) + *buf++ = 0x04; /* CCMP */ + else + *buf++ = 0x02; /* TKIP */ + + /* AKM count is 1 */ + *buf++ = 0x01; + *buf++ = 0x00; + + /* AKM suite is assumed as PSK*/ + memcpy(buf, oui_header, OUI_LEN); + buf += OUI_LEN; + *buf++ = 0x02; /* PSK */ + + /* RSN capabilities is 0 */ + *buf++ = 0x00; + *buf++ = 0x00; + + /* set length field */ + start[1] = (buf - start - 2); + + pr_debug("%s: ->\n", __func__); + return buf - start; +} + +struct ie_item { + u8 *data; + u8 len; +}; + +struct ie_info { + struct ie_item wpa; + struct ie_item rsn; +}; + +static void gelic_wl_parse_ie(u8 *data, size_t len, + struct ie_info *ie_info) +{ + size_t data_left = len; + u8 *pos = data; + u8 item_len; + u8 item_id; + + pr_debug("%s: data=%p len=%ld\n", __func__, + data, len); + memset(ie_info, 0, sizeof(struct ie_info)); + + while (2 <= data_left) { + item_id = *pos++; + item_len = *pos++; + data_left -= 2; + + if (data_left < item_len) + break; + + switch (item_id) { + case WLAN_EID_VENDOR_SPECIFIC: + if ((OUI_LEN + 1 <= item_len) && + !memcmp(pos, wpa_oui, OUI_LEN) && + pos[OUI_LEN] == 0x01) { + ie_info->wpa.data = pos - 2; + ie_info->wpa.len = item_len + 2; + } + break; + case WLAN_EID_RSN: + ie_info->rsn.data = pos - 2; + /* length includes the header */ + ie_info->rsn.len = item_len + 2; + break; + default: + pr_debug("%s: ignore %#x,%d\n", __func__, + item_id, item_len); + break; + } + pos += item_len; + data_left -= item_len; + } + pr_debug("%s: wpa=%p,%d wpa2=%p,%d\n", __func__, + ie_info->wpa.data, ie_info->wpa.len, + ie_info->rsn.data, ie_info->rsn.len); +} + + +/* + * translate the scan informations from hypervisor to a + * independent format + */ +static char *gelic_wl_translate_scan(struct net_device *netdev, + struct iw_request_info *info, + char *ev, + char *stop, + struct gelic_wl_scan_info *network) +{ + struct iw_event iwe; + struct gelic_eurus_scan_info *scan = network->hwinfo; + char *tmp; + u8 rate; + unsigned int i, j, len; + u8 buf[64]; /* arbitrary size large enough */ + + pr_debug("%s: <-\n", __func__); + + /* first entry should be AP's mac address */ + iwe.cmd = SIOCGIWAP; + iwe.u.ap_addr.sa_family = ARPHRD_ETHER; + memcpy(iwe.u.ap_addr.sa_data, &scan->bssid[2], ETH_ALEN); + ev = iwe_stream_add_event(info, ev, stop, &iwe, IW_EV_ADDR_LEN); + + /* ESSID */ + iwe.cmd = SIOCGIWESSID; + iwe.u.data.flags = 1; + iwe.u.data.length = strnlen(scan->essid, 32); + ev = iwe_stream_add_point(info, ev, stop, &iwe, scan->essid); + + /* FREQUENCY */ + iwe.cmd = SIOCGIWFREQ; + iwe.u.freq.m = be16_to_cpu(scan->channel); + iwe.u.freq.e = 0; /* table value in MHz */ + iwe.u.freq.i = 0; + ev = iwe_stream_add_event(info, ev, stop, &iwe, IW_EV_FREQ_LEN); + + /* RATES */ + iwe.cmd = SIOCGIWRATE; + iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; + /* to stuff multiple values in one event */ + tmp = ev + iwe_stream_lcp_len(info); + /* put them in ascendant order (older is first) */ + i = 0; + j = 0; + pr_debug("%s: rates=%d rate=%d\n", __func__, + network->rate_len, network->rate_ext_len); + while (i < network->rate_len) { + if (j < network->rate_ext_len && + ((scan->ext_rate[j] & 0x7f) < (scan->rate[i] & 0x7f))) + rate = scan->ext_rate[j++] & 0x7f; + else + rate = scan->rate[i++] & 0x7f; + iwe.u.bitrate.value = rate * 500000; /* 500kbps unit */ + tmp = iwe_stream_add_value(info, ev, tmp, stop, &iwe, + IW_EV_PARAM_LEN); + } + while (j < network->rate_ext_len) { + iwe.u.bitrate.value = (scan->ext_rate[j++] & 0x7f) * 500000; + tmp = iwe_stream_add_value(info, ev, tmp, stop, &iwe, + IW_EV_PARAM_LEN); + } + /* Check if we added any rate */ + if (iwe_stream_lcp_len(info) < (tmp - ev)) + ev = tmp; + + /* ENCODE */ + iwe.cmd = SIOCGIWENCODE; + if (be16_to_cpu(scan->capability) & WLAN_CAPABILITY_PRIVACY) + iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; + else + iwe.u.data.flags = IW_ENCODE_DISABLED; + iwe.u.data.length = 0; + ev = iwe_stream_add_point(info, ev, stop, &iwe, scan->essid); + + /* MODE */ + iwe.cmd = SIOCGIWMODE; + if (be16_to_cpu(scan->capability) & + (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) { + if (be16_to_cpu(scan->capability) & WLAN_CAPABILITY_ESS) + iwe.u.mode = IW_MODE_MASTER; + else + iwe.u.mode = IW_MODE_ADHOC; + ev = iwe_stream_add_event(info, ev, stop, &iwe, IW_EV_UINT_LEN); + } + + /* QUAL */ + iwe.cmd = IWEVQUAL; + iwe.u.qual.updated = IW_QUAL_ALL_UPDATED | + IW_QUAL_QUAL_INVALID | IW_QUAL_NOISE_INVALID; + iwe.u.qual.level = be16_to_cpu(scan->rssi); + iwe.u.qual.qual = be16_to_cpu(scan->rssi); + iwe.u.qual.noise = 0; + ev = iwe_stream_add_event(info, ev, stop, &iwe, IW_EV_QUAL_LEN); + + /* RSN */ + memset(&iwe, 0, sizeof(iwe)); + if (be16_to_cpu(scan->size) <= sizeof(*scan)) { + /* If wpa[2] capable station, synthesize IE and put it */ + len = gelic_wl_synthesize_ie(buf, scan); + if (len) { + iwe.cmd = IWEVGENIE; + iwe.u.data.length = len; + ev = iwe_stream_add_point(info, ev, stop, &iwe, buf); + } + } else { + /* this scan info has IE data */ + struct ie_info ie_info; + size_t data_len; + + data_len = be16_to_cpu(scan->size) - sizeof(*scan); + + gelic_wl_parse_ie(scan->elements, data_len, &ie_info); + + if (ie_info.wpa.len && (ie_info.wpa.len <= sizeof(buf))) { + memcpy(buf, ie_info.wpa.data, ie_info.wpa.len); + iwe.cmd = IWEVGENIE; + iwe.u.data.length = ie_info.wpa.len; + ev = iwe_stream_add_point(info, ev, stop, &iwe, buf); + } + + if (ie_info.rsn.len && (ie_info.rsn.len <= sizeof(buf))) { + memset(&iwe, 0, sizeof(iwe)); + memcpy(buf, ie_info.rsn.data, ie_info.rsn.len); + iwe.cmd = IWEVGENIE; + iwe.u.data.length = ie_info.rsn.len; + ev = iwe_stream_add_point(info, ev, stop, &iwe, buf); + } + } + + pr_debug("%s: ->\n", __func__); + return ev; +} + + +static int gelic_wl_get_scan(struct net_device *netdev, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); + struct gelic_wl_scan_info *scan_info; + char *ev = extra; + char *stop = ev + wrqu->data.length; + int ret = 0; + unsigned long this_time = jiffies; + + pr_debug("%s: <-\n", __func__); + if (mutex_lock_interruptible(&wl->scan_lock)) + return -EAGAIN; + + switch (wl->scan_stat) { + case GELIC_WL_SCAN_STAT_SCANNING: + /* If a scan in progress, caller should call me again */ + ret = -EAGAIN; + goto out; + case GELIC_WL_SCAN_STAT_INIT: + /* last scan request failed or never issued */ + ret = -ENODEV; + goto out; + case GELIC_WL_SCAN_STAT_GOT_LIST: + /* ok, use current list */ + break; + } + + list_for_each_entry(scan_info, &wl->network_list, list) { + if (wl->scan_age == 0 || + time_after(scan_info->last_scanned + wl->scan_age, + this_time)) + ev = gelic_wl_translate_scan(netdev, info, + ev, stop, + scan_info); + else + pr_debug("%s:entry too old\n", __func__); + + if (stop - ev <= IW_EV_ADDR_LEN) { + ret = -E2BIG; + goto out; + } + } + + wrqu->data.length = ev - extra; + wrqu->data.flags = 0; +out: + mutex_unlock(&wl->scan_lock); + pr_debug("%s: -> %d %d\n", __func__, ret, wrqu->data.length); + return ret; +} + +#ifdef DEBUG +static void scan_list_dump(struct gelic_wl_info *wl) +{ + struct gelic_wl_scan_info *scan_info; + int i; + + i = 0; + list_for_each_entry(scan_info, &wl->network_list, list) { + pr_debug("%s: item %d\n", __func__, i++); + pr_debug("valid=%d eurusindex=%d last=%lx\n", + scan_info->valid, scan_info->eurus_index, + scan_info->last_scanned); + pr_debug("r_len=%d r_ext_len=%d essid_len=%d\n", + scan_info->rate_len, scan_info->rate_ext_len, + scan_info->essid_len); + /* -- */ + pr_debug("bssid=%pM\n", &scan_info->hwinfo->bssid[2]); + pr_debug("essid=%s\n", scan_info->hwinfo->essid); + } +} +#endif + +static int gelic_wl_set_auth(struct net_device *netdev, + struct iw_request_info *info, + union iwreq_data *data, char *extra) +{ + struct iw_param *param = &data->param; + struct gelic_wl_info *wl = port_wl(netdev_port(netdev)); + unsigned long irqflag; + int ret = 0; + + pr_debug("%s: <- %d\n", __func__, param->flags & IW_AUTH_INDEX); + spin_lock_irqsave(&wl->lock, irqflag); + switch (param->flags & IW_AUTH_INDEX) { + case IW_AUTH_WPA_VERSION: + if (param->value & IW_AUTH_WPA_VERSION_DISABLED) { + pr_debug("%s: NO WPA selected\n", __func__); + wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE; + wl->group_cipher_method = GELIC_WL_CIPHER_WEP; + wl->pairwise_cipher_method = GELIC_WL_CIPHER_WEP; + } + if (param->value & IW_AUTH_WPA_VERSION_WPA) { + pr_debug("%s: WPA version 1 selected\n", __func__); + wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA; + wl->group_cipher_method = GELIC_WL_CIPHER_TKIP; + wl->pairwise_cipher_method = GELIC_WL_CIPHER_TKIP; + wl->auth_method = GELIC_EURUS_AUTH_OPEN; + } + if (param->value & IW_AUTH_WPA_VERSION_WPA2) { + /* + * As the hypervisor may not tell the cipher + * information of the AP if it is WPA2, + * you will not decide suitable cipher from + * its beacon. + * You should have knowledge about the AP's + * cipher information in other method prior to + * the association. + */ + if (!precise_ie()) + pr_info("%s: WPA2 may not work\n", __func__); + if (wpa2_capable()) { + wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA2; + wl->group_cipher_method = GELIC_WL_CIPHER_AES; + wl->pairwise_cipher_method = + GELIC_WL_CIPHER_AES; + wl->auth_method = GELIC_EURUS_AUTH_OPEN; + } else + ret = -EINVAL; + } + break; + + case IW_AUTH_CIPHER_PAIRWISE: + if (param->value & + (IW_AUTH_CIPHER_WEP104 | IW_AUTH_CIPHER_WEP40)) { + pr_debug("%s: WEP selected\n", __func__); + wl->pairwise_cipher_method = GELIC_WL_CIPHER_WEP; + } + if (param->value & IW_AUTH_CIPHER_TKIP) { + pr_debug("%s: TKIP selected\n", __func__); + wl->pairwise_cipher_method = GELIC_WL_CIPHER_TKIP; + } + if (param->value & IW_AUTH_CIPHER_CCMP) { + pr_debug("%s: CCMP selected\n", __func__); + wl->pairwise_cipher_method = GELIC_WL_CIPHER_AES; + } + if (param->value & IW_AUTH_CIPHER_NONE) { + pr_debug("%s: no auth selected\n", __func__); + wl->pairwise_cipher_method = GELIC_WL_CIPHER_NONE; + } + break; + case IW_AUTH_CIPHER_GROUP: + if (param->value & + (IW_AUTH_CIPHER_WEP104 | IW_AUTH_CIPHER_WEP40)) { + pr_debug("%s: WEP selected\n", __func__); + wl->group_cipher_method = GELIC_WL_CIPHER_WEP; + } + if (param->value & IW_AUTH_CIPHER_TKIP) { + pr_debug("%s: TKIP selected\n", __func__); + wl->group_cipher_method = GELIC_WL_CIPHER_TKIP; + } + if (param->value & IW_AUTH_CIPHER_CCMP) { + pr_debug("%s: CCMP selected\n", __func__); + wl->group_cipher_method = GELIC_WL_CIPHER_AES; + } + if (param->value & IW_AUTH_CIPHER_NONE) { + pr_debug("%s: no auth selected\n", __func__); + wl->group_cipher_method = GELIC_WL_CIPHER_NONE; + } + break; + case IW_AUTH_80211_AUTH_ALG: + if (param->value & IW_AUTH_ALG_SHARED_KEY) { + pr_debug("%s: shared key specified\n", __func__); + wl->auth_method = GELIC_EURUS_AUTH_SHARED; + } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) { + pr_debug("%s: open system specified\n", __func__); + wl->auth_method = GELIC_EURUS_AUTH_OPEN; + } else + ret = -EINVAL; + break; + + case IW_AUTH_WPA_ENABLED: + if (param->value) { + pr_debug("%s: WPA enabled\n", __func__); + wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA; + } else { + pr_debug("%s: WPA disabled\n", __func__); + wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE; + } + break; + + case IW_AUTH_KEY_MGMT: + if (param->value & IW_AUTH_KEY_MGMT_PSK) + break; + /* intentionally fall through */ + default: + ret = -EOPNOTSUPP; + break; + } + + if (!ret) + set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); + + spin_unlock_irqrestore(&wl->lock, irqflag); + pr_debug("%s: -> %d\n", __func__, ret); + return ret; +} + +static int gelic_wl_get_auth(struct net_device *netdev, + struct iw_request_info *info, + union iwreq_data *iwreq, char *extra) +{ + struct iw_param *param = &iwreq->param; + struct gelic_wl_info *wl = port_wl(netdev_port(netdev)); + unsigned long irqflag; + int ret = 0; + + pr_debug("%s: <- %d\n", __func__, param->flags & IW_AUTH_INDEX); + spin_lock_irqsave(&wl->lock, irqflag); + switch (param->flags & IW_AUTH_INDEX) { + case IW_AUTH_WPA_VERSION: + switch (wl->wpa_level) { + case GELIC_WL_WPA_LEVEL_WPA: + param->value |= IW_AUTH_WPA_VERSION_WPA; + break; + case GELIC_WL_WPA_LEVEL_WPA2: + param->value |= IW_AUTH_WPA_VERSION_WPA2; + break; + default: + param->value |= IW_AUTH_WPA_VERSION_DISABLED; + } + break; + + case IW_AUTH_80211_AUTH_ALG: + if (wl->auth_method == GELIC_EURUS_AUTH_SHARED) + param->value = IW_AUTH_ALG_SHARED_KEY; + else if (wl->auth_method == GELIC_EURUS_AUTH_OPEN) + param->value = IW_AUTH_ALG_OPEN_SYSTEM; + break; + + case IW_AUTH_WPA_ENABLED: + switch (wl->wpa_level) { + case GELIC_WL_WPA_LEVEL_WPA: + case GELIC_WL_WPA_LEVEL_WPA2: + param->value = 1; + break; + default: + param->value = 0; + break; + } + break; + default: + ret = -EOPNOTSUPP; + } + + spin_unlock_irqrestore(&wl->lock, irqflag); + pr_debug("%s: -> %d\n", __func__, ret); + return ret; +} + +/* SIOC{S,G}IWESSID */ +static int gelic_wl_set_essid(struct net_device *netdev, + struct iw_request_info *info, + union iwreq_data *data, char *extra) +{ + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); + unsigned long irqflag; + + pr_debug("%s: <- l=%d f=%d\n", __func__, + data->essid.length, data->essid.flags); + if (IW_ESSID_MAX_SIZE < data->essid.length) + return -EINVAL; + + spin_lock_irqsave(&wl->lock, irqflag); + if (data->essid.flags) { + wl->essid_len = data->essid.length; + memcpy(wl->essid, extra, wl->essid_len); + pr_debug("%s: essid = '%s'\n", __func__, extra); + set_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat); + } else { + pr_debug("%s: ESSID any\n", __func__); + clear_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat); + } + set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); + spin_unlock_irqrestore(&wl->lock, irqflag); + + + gelic_wl_try_associate(netdev); /* FIXME */ + pr_debug("%s: ->\n", __func__); + return 0; +} + +static int gelic_wl_get_essid(struct net_device *netdev, + struct iw_request_info *info, + union iwreq_data *data, char *extra) +{ + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); + unsigned long irqflag; + + pr_debug("%s: <-\n", __func__); + mutex_lock(&wl->assoc_stat_lock); + spin_lock_irqsave(&wl->lock, irqflag); + if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat) || + wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) { + memcpy(extra, wl->essid, wl->essid_len); + data->essid.length = wl->essid_len; + data->essid.flags = 1; + } else + data->essid.flags = 0; + + mutex_unlock(&wl->assoc_stat_lock); + spin_unlock_irqrestore(&wl->lock, irqflag); + pr_debug("%s: -> len=%d\n", __func__, data->essid.length); + + return 0; +} + +/* SIO{S,G}IWENCODE */ +static int gelic_wl_set_encode(struct net_device *netdev, + struct iw_request_info *info, + union iwreq_data *data, char *extra) +{ + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); + struct iw_point *enc = &data->encoding; + __u16 flags; + unsigned long irqflag; + int key_index, index_specified; + int ret = 0; + + pr_debug("%s: <-\n", __func__); + flags = enc->flags & IW_ENCODE_FLAGS; + key_index = enc->flags & IW_ENCODE_INDEX; + + pr_debug("%s: key_index = %d\n", __func__, key_index); + pr_debug("%s: key_len = %d\n", __func__, enc->length); + pr_debug("%s: flag=%x\n", __func__, enc->flags & IW_ENCODE_FLAGS); + + if (GELIC_WEP_KEYS < key_index) + return -EINVAL; + + spin_lock_irqsave(&wl->lock, irqflag); + if (key_index) { + index_specified = 1; + key_index--; + } else { + index_specified = 0; + key_index = wl->current_key; + } + + if (flags & IW_ENCODE_NOKEY) { + /* if just IW_ENCODE_NOKEY, change current key index */ + if (!flags && index_specified) { + wl->current_key = key_index; + goto done; + } + + if (flags & IW_ENCODE_DISABLED) { + if (!index_specified) { + /* disable encryption */ + wl->group_cipher_method = GELIC_WL_CIPHER_NONE; + wl->pairwise_cipher_method = + GELIC_WL_CIPHER_NONE; + /* invalidate all key */ + wl->key_enabled = 0; + } else + clear_bit(key_index, &wl->key_enabled); + } + + if (flags & IW_ENCODE_OPEN) + wl->auth_method = GELIC_EURUS_AUTH_OPEN; + if (flags & IW_ENCODE_RESTRICTED) { + pr_info("%s: shared key mode enabled\n", __func__); + wl->auth_method = GELIC_EURUS_AUTH_SHARED; + } + } else { + if (IW_ENCODING_TOKEN_MAX < enc->length) { + ret = -EINVAL; + goto done; + } + wl->key_len[key_index] = enc->length; + memcpy(wl->key[key_index], extra, enc->length); + set_bit(key_index, &wl->key_enabled); + wl->pairwise_cipher_method = GELIC_WL_CIPHER_WEP; + wl->group_cipher_method = GELIC_WL_CIPHER_WEP; + } + set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); +done: + spin_unlock_irqrestore(&wl->lock, irqflag); + pr_debug("%s: ->\n", __func__); + return ret; +} + +static int gelic_wl_get_encode(struct net_device *netdev, + struct iw_request_info *info, + union iwreq_data *data, char *extra) +{ + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); + struct iw_point *enc = &data->encoding; + unsigned long irqflag; + unsigned int key_index, index_specified; + int ret = 0; + + pr_debug("%s: <-\n", __func__); + key_index = enc->flags & IW_ENCODE_INDEX; + pr_debug("%s: flag=%#x point=%p len=%d extra=%p\n", __func__, + enc->flags, enc->pointer, enc->length, extra); + if (GELIC_WEP_KEYS < key_index) + return -EINVAL; + + spin_lock_irqsave(&wl->lock, irqflag); + if (key_index) { + index_specified = 1; + key_index--; + } else { + index_specified = 0; + key_index = wl->current_key; + } + + if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) { + switch (wl->auth_method) { + case GELIC_EURUS_AUTH_OPEN: + enc->flags = IW_ENCODE_OPEN; + break; + case GELIC_EURUS_AUTH_SHARED: + enc->flags = IW_ENCODE_RESTRICTED; + break; + } + } else + enc->flags = IW_ENCODE_DISABLED; + + if (test_bit(key_index, &wl->key_enabled)) { + if (enc->length < wl->key_len[key_index]) { + ret = -EINVAL; + goto done; + } + enc->length = wl->key_len[key_index]; + memcpy(extra, wl->key[key_index], wl->key_len[key_index]); + } else { + enc->length = 0; + enc->flags |= IW_ENCODE_NOKEY; + } + enc->flags |= key_index + 1; + pr_debug("%s: -> flag=%x len=%d\n", __func__, + enc->flags, enc->length); + +done: + spin_unlock_irqrestore(&wl->lock, irqflag); + return ret; +} + +/* SIOC{S,G}IWAP */ +static int gelic_wl_set_ap(struct net_device *netdev, + struct iw_request_info *info, + union iwreq_data *data, char *extra) +{ + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); + unsigned long irqflag; + + pr_debug("%s: <-\n", __func__); + if (data->ap_addr.sa_family != ARPHRD_ETHER) + return -EINVAL; + + spin_lock_irqsave(&wl->lock, irqflag); + if (is_valid_ether_addr(data->ap_addr.sa_data)) { + memcpy(wl->bssid, data->ap_addr.sa_data, + ETH_ALEN); + set_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat); + set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); + pr_debug("%s: bss=%pM\n", __func__, wl->bssid); + } else { + pr_debug("%s: clear bssid\n", __func__); + clear_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat); + eth_zero_addr(wl->bssid); + } + spin_unlock_irqrestore(&wl->lock, irqflag); + pr_debug("%s: ->\n", __func__); + return 0; +} + +static int gelic_wl_get_ap(struct net_device *netdev, + struct iw_request_info *info, + union iwreq_data *data, char *extra) +{ + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); + unsigned long irqflag; + + pr_debug("%s: <-\n", __func__); + mutex_lock(&wl->assoc_stat_lock); + spin_lock_irqsave(&wl->lock, irqflag); + if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) { + data->ap_addr.sa_family = ARPHRD_ETHER; + memcpy(data->ap_addr.sa_data, wl->active_bssid, + ETH_ALEN); + } else + eth_zero_addr(data->ap_addr.sa_data); + + spin_unlock_irqrestore(&wl->lock, irqflag); + mutex_unlock(&wl->assoc_stat_lock); + pr_debug("%s: ->\n", __func__); + return 0; +} + +/* SIOC{S,G}IWENCODEEXT */ +static int gelic_wl_set_encodeext(struct net_device *netdev, + struct iw_request_info *info, + union iwreq_data *data, char *extra) +{ + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); + struct iw_point *enc = &data->encoding; + struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; + __u16 alg; + __u16 flags; + unsigned long irqflag; + int key_index; + int ret = 0; + + pr_debug("%s: <-\n", __func__); + flags = enc->flags & IW_ENCODE_FLAGS; + alg = ext->alg; + key_index = enc->flags & IW_ENCODE_INDEX; + + pr_debug("%s: key_index = %d\n", __func__, key_index); + pr_debug("%s: key_len = %d\n", __func__, enc->length); + pr_debug("%s: flag=%x\n", __func__, enc->flags & IW_ENCODE_FLAGS); + pr_debug("%s: ext_flag=%x\n", __func__, ext->ext_flags); + pr_debug("%s: ext_key_len=%x\n", __func__, ext->key_len); + + if (GELIC_WEP_KEYS < key_index) + return -EINVAL; + + spin_lock_irqsave(&wl->lock, irqflag); + if (key_index) + key_index--; + else + key_index = wl->current_key; + + if (!enc->length && (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) { + /* reques to change default key index */ + pr_debug("%s: request to change default key to %d\n", + __func__, key_index); + wl->current_key = key_index; + goto done; + } + + if (alg == IW_ENCODE_ALG_NONE || (flags & IW_ENCODE_DISABLED)) { + pr_debug("%s: alg disabled\n", __func__); + wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE; + wl->group_cipher_method = GELIC_WL_CIPHER_NONE; + wl->pairwise_cipher_method = GELIC_WL_CIPHER_NONE; + wl->auth_method = GELIC_EURUS_AUTH_OPEN; /* should be open */ + } else if (alg == IW_ENCODE_ALG_WEP) { + pr_debug("%s: WEP requested\n", __func__); + if (flags & IW_ENCODE_OPEN) { + pr_debug("%s: open key mode\n", __func__); + wl->auth_method = GELIC_EURUS_AUTH_OPEN; + } + if (flags & IW_ENCODE_RESTRICTED) { + pr_debug("%s: shared key mode\n", __func__); + wl->auth_method = GELIC_EURUS_AUTH_SHARED; + } + if (IW_ENCODING_TOKEN_MAX < ext->key_len) { + pr_info("%s: key is too long %d\n", __func__, + ext->key_len); + ret = -EINVAL; + goto done; + } + /* OK, update the key */ + wl->key_len[key_index] = ext->key_len; + memset(wl->key[key_index], 0, IW_ENCODING_TOKEN_MAX); + memcpy(wl->key[key_index], ext->key, ext->key_len); + set_bit(key_index, &wl->key_enabled); + /* remember wep info changed */ + set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); + } else if (alg == IW_ENCODE_ALG_PMK) { + if (ext->key_len != WPA_PSK_LEN) { + pr_err("%s: PSK length wrong %d\n", __func__, + ext->key_len); + ret = -EINVAL; + goto done; + } + memset(wl->psk, 0, sizeof(wl->psk)); + memcpy(wl->psk, ext->key, ext->key_len); + wl->psk_len = ext->key_len; + wl->psk_type = GELIC_EURUS_WPA_PSK_BIN; + /* remember PSK configured */ + set_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat); + } +done: + spin_unlock_irqrestore(&wl->lock, irqflag); + pr_debug("%s: ->\n", __func__); + return ret; +} + +static int gelic_wl_get_encodeext(struct net_device *netdev, + struct iw_request_info *info, + union iwreq_data *data, char *extra) +{ + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); + struct iw_point *enc = &data->encoding; + struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; + unsigned long irqflag; + int key_index; + int ret = 0; + int max_key_len; + + pr_debug("%s: <-\n", __func__); + + max_key_len = enc->length - sizeof(struct iw_encode_ext); + if (max_key_len < 0) + return -EINVAL; + key_index = enc->flags & IW_ENCODE_INDEX; + + pr_debug("%s: key_index = %d\n", __func__, key_index); + pr_debug("%s: key_len = %d\n", __func__, enc->length); + pr_debug("%s: flag=%x\n", __func__, enc->flags & IW_ENCODE_FLAGS); + + if (GELIC_WEP_KEYS < key_index) + return -EINVAL; + + spin_lock_irqsave(&wl->lock, irqflag); + if (key_index) + key_index--; + else + key_index = wl->current_key; + + memset(ext, 0, sizeof(struct iw_encode_ext)); + switch (wl->group_cipher_method) { + case GELIC_WL_CIPHER_WEP: + ext->alg = IW_ENCODE_ALG_WEP; + enc->flags |= IW_ENCODE_ENABLED; + break; + case GELIC_WL_CIPHER_TKIP: + ext->alg = IW_ENCODE_ALG_TKIP; + enc->flags |= IW_ENCODE_ENABLED; + break; + case GELIC_WL_CIPHER_AES: + ext->alg = IW_ENCODE_ALG_CCMP; + enc->flags |= IW_ENCODE_ENABLED; + break; + case GELIC_WL_CIPHER_NONE: + default: + ext->alg = IW_ENCODE_ALG_NONE; + enc->flags |= IW_ENCODE_NOKEY; + break; + } + + if (!(enc->flags & IW_ENCODE_NOKEY)) { + if (max_key_len < wl->key_len[key_index]) { + ret = -E2BIG; + goto out; + } + if (test_bit(key_index, &wl->key_enabled)) + memcpy(ext->key, wl->key[key_index], + wl->key_len[key_index]); + else + pr_debug("%s: disabled key requested ix=%d\n", + __func__, key_index); + } +out: + spin_unlock_irqrestore(&wl->lock, irqflag); + pr_debug("%s: ->\n", __func__); + return ret; +} +/* SIOC{S,G}IWMODE */ +static int gelic_wl_set_mode(struct net_device *netdev, + struct iw_request_info *info, + union iwreq_data *data, char *extra) +{ + __u32 mode = data->mode; + int ret; + + pr_debug("%s: <-\n", __func__); + if (mode == IW_MODE_INFRA) + ret = 0; + else + ret = -EOPNOTSUPP; + pr_debug("%s: -> %d\n", __func__, ret); + return ret; +} + +static int gelic_wl_get_mode(struct net_device *netdev, + struct iw_request_info *info, + union iwreq_data *data, char *extra) +{ + __u32 *mode = &data->mode; + pr_debug("%s: <-\n", __func__); + *mode = IW_MODE_INFRA; + pr_debug("%s: ->\n", __func__); + return 0; +} + +/* SIOCGIWNICKN */ +static int gelic_wl_get_nick(struct net_device *net_dev, + struct iw_request_info *info, + union iwreq_data *data, char *extra) +{ + strcpy(extra, "gelic_wl"); + data->data.length = strlen(extra); + data->data.flags = 1; + return 0; +} + + +/* --- */ + +static struct iw_statistics *gelic_wl_get_wireless_stats( + struct net_device *netdev) +{ + + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); + struct gelic_eurus_cmd *cmd; + struct iw_statistics *is; + struct gelic_eurus_rssi_info *rssi; + void *buf; + + pr_debug("%s: <-\n", __func__); + + buf = (void *)__get_free_page(GFP_KERNEL); + if (!buf) + return NULL; + + is = &wl->iwstat; + memset(is, 0, sizeof(*is)); + cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_GET_RSSI_CFG, + buf, sizeof(*rssi)); + if (cmd && !cmd->status && !cmd->cmd_status) { + rssi = buf; + is->qual.level = be16_to_cpu(rssi->rssi); + is->qual.updated = IW_QUAL_LEVEL_UPDATED | + IW_QUAL_QUAL_INVALID | IW_QUAL_NOISE_INVALID; + } else + /* not associated */ + is->qual.updated = IW_QUAL_ALL_INVALID; + + kfree(cmd); + free_page((unsigned long)buf); + pr_debug("%s: ->\n", __func__); + return is; +} + +/* + * scanning helpers + */ +static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan, + u8 *essid, size_t essid_len) +{ + struct gelic_eurus_cmd *cmd; + int ret = 0; + void *buf = NULL; + size_t len; + + pr_debug("%s: <- always=%d\n", __func__, always_scan); + if (mutex_lock_interruptible(&wl->scan_lock)) + return -ERESTARTSYS; + + /* + * If already a scan in progress, do not trigger more + */ + if (wl->scan_stat == GELIC_WL_SCAN_STAT_SCANNING) { + pr_debug("%s: scanning now\n", __func__); + goto out; + } + + init_completion(&wl->scan_done); + /* + * If we have already a bss list, don't try to get new + * unless we are doing an ESSID scan + */ + if ((!essid_len && !always_scan) + && wl->scan_stat == GELIC_WL_SCAN_STAT_GOT_LIST) { + pr_debug("%s: already has the list\n", __func__); + complete(&wl->scan_done); + goto out; + } + + /* ESSID scan ? */ + if (essid_len && essid) { + buf = (void *)__get_free_page(GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto out; + } + len = IW_ESSID_MAX_SIZE; /* hypervisor always requires 32 */ + memset(buf, 0, len); + memcpy(buf, essid, essid_len); + pr_debug("%s: essid scan='%s'\n", __func__, (char *)buf); + } else + len = 0; + + /* + * issue start scan request + */ + wl->scan_stat = GELIC_WL_SCAN_STAT_SCANNING; + cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_START_SCAN, + buf, len); + if (!cmd || cmd->status || cmd->cmd_status) { + wl->scan_stat = GELIC_WL_SCAN_STAT_INIT; + complete(&wl->scan_done); + ret = -ENOMEM; + goto out; + } + kfree(cmd); +out: + free_page((unsigned long)buf); + mutex_unlock(&wl->scan_lock); + pr_debug("%s: ->\n", __func__); + return ret; +} + +/* + * retrieve scan result from the chip (hypervisor) + * this function is invoked by schedule work. + */ +static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl) +{ + struct gelic_eurus_cmd *cmd = NULL; + struct gelic_wl_scan_info *target, *tmp; + struct gelic_wl_scan_info *oldest = NULL; + struct gelic_eurus_scan_info *scan_info; + unsigned int scan_info_size; + union iwreq_data data; + unsigned long this_time = jiffies; + unsigned int data_len, i, found, r; + void *buf; + + pr_debug("%s:start\n", __func__); + mutex_lock(&wl->scan_lock); + + buf = (void *)__get_free_page(GFP_KERNEL); + if (!buf) { + pr_info("%s: scan buffer alloc failed\n", __func__); + goto out; + } + + if (wl->scan_stat != GELIC_WL_SCAN_STAT_SCANNING) { + /* + * stop() may be called while scanning, ignore result + */ + pr_debug("%s: scan complete when stat != scanning(%d)\n", + __func__, wl->scan_stat); + goto out; + } + + cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_GET_SCAN, + buf, PAGE_SIZE); + if (!cmd || cmd->status || cmd->cmd_status) { + wl->scan_stat = GELIC_WL_SCAN_STAT_INIT; + pr_info("%s:cmd failed\n", __func__); + kfree(cmd); + goto out; + } + data_len = cmd->size; + pr_debug("%s: data_len = %d\n", __func__, data_len); + kfree(cmd); + + /* OK, bss list retrieved */ + wl->scan_stat = GELIC_WL_SCAN_STAT_GOT_LIST; + + /* mark all entries are old */ + list_for_each_entry_safe(target, tmp, &wl->network_list, list) { + target->valid = 0; + /* expire too old entries */ + if (time_before(target->last_scanned + wl->scan_age, + this_time)) { + kfree(target->hwinfo); + target->hwinfo = NULL; + list_move_tail(&target->list, &wl->network_free_list); + } + } + + /* put them in the network_list */ + for (i = 0, scan_info_size = 0, scan_info = buf; + scan_info_size < data_len; + i++, scan_info_size += be16_to_cpu(scan_info->size), + scan_info = (void *)scan_info + be16_to_cpu(scan_info->size)) { + pr_debug("%s:size=%d bssid=%pM scan_info=%p\n", __func__, + be16_to_cpu(scan_info->size), + &scan_info->bssid[2], scan_info); + + /* + * The wireless firmware may return invalid channel 0 and/or + * invalid rate if the AP emits zero length SSID ie. As this + * scan information is useless, ignore it + */ + if (!be16_to_cpu(scan_info->channel) || !scan_info->rate[0]) { + pr_debug("%s: invalid scan info\n", __func__); + continue; + } + + found = 0; + oldest = NULL; + list_for_each_entry(target, &wl->network_list, list) { + if (ether_addr_equal(&target->hwinfo->bssid[2], + &scan_info->bssid[2])) { + found = 1; + pr_debug("%s: same BBS found scanned list\n", + __func__); + break; + } + if (!oldest || + (target->last_scanned < oldest->last_scanned)) + oldest = target; + } + + if (!found) { + /* not found in the list */ + if (list_empty(&wl->network_free_list)) { + /* expire oldest */ + target = oldest; + } else { + target = list_entry(wl->network_free_list.next, + struct gelic_wl_scan_info, + list); + } + } + + /* update the item */ + target->last_scanned = this_time; + target->valid = 1; + target->eurus_index = i; + kfree(target->hwinfo); + target->hwinfo = kzalloc(be16_to_cpu(scan_info->size), + GFP_KERNEL); + if (!target->hwinfo) + continue; + + /* copy hw scan info */ + memcpy(target->hwinfo, scan_info, scan_info->size); + target->essid_len = strnlen(scan_info->essid, + sizeof(scan_info->essid)); + target->rate_len = 0; + for (r = 0; r < 12; r++) + if (scan_info->rate[r]) + target->rate_len++; + if (8 < target->rate_len) + pr_info("%s: AP returns %d rates\n", __func__, + target->rate_len); + target->rate_ext_len = 0; + for (r = 0; r < 16; r++) + if (scan_info->ext_rate[r]) + target->rate_ext_len++; + list_move_tail(&target->list, &wl->network_list); + } + memset(&data, 0, sizeof(data)); + wireless_send_event(port_to_netdev(wl_port(wl)), SIOCGIWSCAN, &data, + NULL); +out: + free_page((unsigned long)buf); + complete(&wl->scan_done); + mutex_unlock(&wl->scan_lock); + pr_debug("%s:end\n", __func__); +} + +/* + * Select an appropriate bss from current scan list regarding + * current settings from userspace. + * The caller must hold wl->scan_lock, + * and on the state of wl->scan_state == GELIC_WL_SCAN_GOT_LIST + */ +static void update_best(struct gelic_wl_scan_info **best, + struct gelic_wl_scan_info *candid, + int *best_weight, + int *weight) +{ + if (*best_weight < ++(*weight)) { + *best_weight = *weight; + *best = candid; + } +} + +static +struct gelic_wl_scan_info *gelic_wl_find_best_bss(struct gelic_wl_info *wl) +{ + struct gelic_wl_scan_info *scan_info; + struct gelic_wl_scan_info *best_bss; + int weight, best_weight; + u16 security; + + pr_debug("%s: <-\n", __func__); + + best_bss = NULL; + best_weight = 0; + + list_for_each_entry(scan_info, &wl->network_list, list) { + pr_debug("%s: station %p\n", __func__, scan_info); + + if (!scan_info->valid) { + pr_debug("%s: station invalid\n", __func__); + continue; + } + + /* If bss specified, check it only */ + if (test_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat)) { + if (ether_addr_equal(&scan_info->hwinfo->bssid[2], + wl->bssid)) { + best_bss = scan_info; + pr_debug("%s: bssid matched\n", __func__); + break; + } else { + pr_debug("%s: bssid unmached\n", __func__); + continue; + } + } + + weight = 0; + + /* security */ + security = be16_to_cpu(scan_info->hwinfo->security) & + GELIC_EURUS_SCAN_SEC_MASK; + if (wl->wpa_level == GELIC_WL_WPA_LEVEL_WPA2) { + if (security == GELIC_EURUS_SCAN_SEC_WPA2) + update_best(&best_bss, scan_info, + &best_weight, &weight); + else + continue; + } else if (wl->wpa_level == GELIC_WL_WPA_LEVEL_WPA) { + if (security == GELIC_EURUS_SCAN_SEC_WPA) + update_best(&best_bss, scan_info, + &best_weight, &weight); + else + continue; + } else if (wl->wpa_level == GELIC_WL_WPA_LEVEL_NONE && + wl->group_cipher_method == GELIC_WL_CIPHER_WEP) { + if (security == GELIC_EURUS_SCAN_SEC_WEP) + update_best(&best_bss, scan_info, + &best_weight, &weight); + else + continue; + } + + /* If ESSID is set, check it */ + if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat)) { + if ((scan_info->essid_len == wl->essid_len) && + !strncmp(wl->essid, + scan_info->hwinfo->essid, + scan_info->essid_len)) + update_best(&best_bss, scan_info, + &best_weight, &weight); + else + continue; + } + } + +#ifdef DEBUG + pr_debug("%s: -> bss=%p\n", __func__, best_bss); + if (best_bss) { + pr_debug("%s:addr=%pM\n", __func__, + &best_bss->hwinfo->bssid[2]); + } +#endif + return best_bss; +} + +/* + * Setup WEP configuration to the chip + * The caller must hold wl->scan_lock, + * and on the state of wl->scan_state == GELIC_WL_SCAN_GOT_LIST + */ +static int gelic_wl_do_wep_setup(struct gelic_wl_info *wl) +{ + unsigned int i; + struct gelic_eurus_wep_cfg *wep; + struct gelic_eurus_cmd *cmd; + int wep104 = 0; + int have_key = 0; + int ret = 0; + + pr_debug("%s: <-\n", __func__); + /* we can assume no one should uses the buffer */ + wep = (struct gelic_eurus_wep_cfg *)__get_free_page(GFP_KERNEL); + if (!wep) + return -ENOMEM; + + memset(wep, 0, sizeof(*wep)); + + if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) { + pr_debug("%s: WEP mode\n", __func__); + for (i = 0; i < GELIC_WEP_KEYS; i++) { + if (!test_bit(i, &wl->key_enabled)) + continue; + + pr_debug("%s: key#%d enabled\n", __func__, i); + have_key = 1; + if (wl->key_len[i] == 13) + wep104 = 1; + else if (wl->key_len[i] != 5) { + pr_info("%s: wrong wep key[%d]=%d\n", + __func__, i, wl->key_len[i]); + ret = -EINVAL; + goto out; + } + memcpy(wep->key[i], wl->key[i], wl->key_len[i]); + } + + if (!have_key) { + pr_info("%s: all wep key disabled\n", __func__); + ret = -EINVAL; + goto out; + } + + if (wep104) { + pr_debug("%s: 104bit key\n", __func__); + wep->security = cpu_to_be16(GELIC_EURUS_WEP_SEC_104BIT); + } else { + pr_debug("%s: 40bit key\n", __func__); + wep->security = cpu_to_be16(GELIC_EURUS_WEP_SEC_40BIT); + } + } else { + pr_debug("%s: NO encryption\n", __func__); + wep->security = cpu_to_be16(GELIC_EURUS_WEP_SEC_NONE); + } + + /* issue wep setup */ + cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_SET_WEP_CFG, + wep, sizeof(*wep)); + if (!cmd) + ret = -ENOMEM; + else if (cmd->status || cmd->cmd_status) + ret = -ENXIO; + + kfree(cmd); +out: + free_page((unsigned long)wep); + pr_debug("%s: ->\n", __func__); + return ret; +} + +#ifdef DEBUG +static const char *wpasecstr(enum gelic_eurus_wpa_security sec) +{ + switch (sec) { + case GELIC_EURUS_WPA_SEC_NONE: + return "NONE"; + case GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP: + return "WPA_TKIP_TKIP"; + case GELIC_EURUS_WPA_SEC_WPA_TKIP_AES: + return "WPA_TKIP_AES"; + case GELIC_EURUS_WPA_SEC_WPA_AES_AES: + return "WPA_AES_AES"; + case GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP: + return "WPA2_TKIP_TKIP"; + case GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES: + return "WPA2_TKIP_AES"; + case GELIC_EURUS_WPA_SEC_WPA2_AES_AES: + return "WPA2_AES_AES"; + } + return ""; +}; +#endif + +static int gelic_wl_do_wpa_setup(struct gelic_wl_info *wl) +{ + struct gelic_eurus_wpa_cfg *wpa; + struct gelic_eurus_cmd *cmd; + u16 security; + int ret = 0; + + pr_debug("%s: <-\n", __func__); + /* we can assume no one should uses the buffer */ + wpa = (struct gelic_eurus_wpa_cfg *)__get_free_page(GFP_KERNEL); + if (!wpa) + return -ENOMEM; + + memset(wpa, 0, sizeof(*wpa)); + + if (!test_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat)) + pr_info("%s: PSK not configured yet\n", __func__); + + /* copy key */ + memcpy(wpa->psk, wl->psk, wl->psk_len); + + /* set security level */ + if (wl->wpa_level == GELIC_WL_WPA_LEVEL_WPA2) { + if (wl->group_cipher_method == GELIC_WL_CIPHER_AES) { + security = GELIC_EURUS_WPA_SEC_WPA2_AES_AES; + } else { + if (wl->pairwise_cipher_method == GELIC_WL_CIPHER_AES && + precise_ie()) + security = GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES; + else + security = GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP; + } + } else { + if (wl->group_cipher_method == GELIC_WL_CIPHER_AES) { + security = GELIC_EURUS_WPA_SEC_WPA_AES_AES; + } else { + if (wl->pairwise_cipher_method == GELIC_WL_CIPHER_AES && + precise_ie()) + security = GELIC_EURUS_WPA_SEC_WPA_TKIP_AES; + else + security = GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP; + } + } + wpa->security = cpu_to_be16(security); + + /* PSK type */ + wpa->psk_type = cpu_to_be16(wl->psk_type); +#ifdef DEBUG + pr_debug("%s: sec=%s psktype=%s\n", __func__, + wpasecstr(wpa->security), + (wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ? + "BIN" : "passphrase"); +#if 0 + /* + * don't enable here if you plan to submit + * the debug log because this dumps your precious + * passphrase/key. + */ + pr_debug("%s: psk=%s\n", __func__, + (wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ? + "N/A" : wpa->psk); +#endif +#endif + /* issue wpa setup */ + cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_SET_WPA_CFG, + wpa, sizeof(*wpa)); + if (!cmd) + ret = -ENOMEM; + else if (cmd->status || cmd->cmd_status) + ret = -ENXIO; + kfree(cmd); + free_page((unsigned long)wpa); + pr_debug("%s: --> %d\n", __func__, ret); + return ret; +} + +/* + * Start association. caller must hold assoc_stat_lock + */ +static int gelic_wl_associate_bss(struct gelic_wl_info *wl, + struct gelic_wl_scan_info *bss) +{ + struct gelic_eurus_cmd *cmd; + struct gelic_eurus_common_cfg *common; + int ret = 0; + unsigned long rc; + + pr_debug("%s: <-\n", __func__); + + /* do common config */ + common = (struct gelic_eurus_common_cfg *)__get_free_page(GFP_KERNEL); + if (!common) + return -ENOMEM; + + memset(common, 0, sizeof(*common)); + common->bss_type = cpu_to_be16(GELIC_EURUS_BSS_INFRA); + common->op_mode = cpu_to_be16(GELIC_EURUS_OPMODE_11BG); + + common->scan_index = cpu_to_be16(bss->eurus_index); + switch (wl->auth_method) { + case GELIC_EURUS_AUTH_OPEN: + common->auth_method = cpu_to_be16(GELIC_EURUS_AUTH_OPEN); + break; + case GELIC_EURUS_AUTH_SHARED: + common->auth_method = cpu_to_be16(GELIC_EURUS_AUTH_SHARED); + break; + } + +#ifdef DEBUG + scan_list_dump(wl); +#endif + pr_debug("%s: common cfg index=%d bsstype=%d auth=%d\n", __func__, + be16_to_cpu(common->scan_index), + be16_to_cpu(common->bss_type), + be16_to_cpu(common->auth_method)); + + cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_SET_COMMON_CFG, + common, sizeof(*common)); + if (!cmd || cmd->status || cmd->cmd_status) { + ret = -ENOMEM; + kfree(cmd); + goto out; + } + kfree(cmd); + + /* WEP/WPA */ + switch (wl->wpa_level) { + case GELIC_WL_WPA_LEVEL_NONE: + /* If WEP or no security, setup WEP config */ + ret = gelic_wl_do_wep_setup(wl); + break; + case GELIC_WL_WPA_LEVEL_WPA: + case GELIC_WL_WPA_LEVEL_WPA2: + ret = gelic_wl_do_wpa_setup(wl); + break; + } + + if (ret) { + pr_debug("%s: WEP/WPA setup failed %d\n", __func__, + ret); + ret = -EPERM; + gelic_wl_send_iwap_event(wl, NULL); + goto out; + } + + /* start association */ + init_completion(&wl->assoc_done); + wl->assoc_stat = GELIC_WL_ASSOC_STAT_ASSOCIATING; + cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_ASSOC, + NULL, 0); + if (!cmd || cmd->status || cmd->cmd_status) { + pr_debug("%s: assoc request failed\n", __func__); + wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN; + kfree(cmd); + ret = -ENOMEM; + gelic_wl_send_iwap_event(wl, NULL); + goto out; + } + kfree(cmd); + + /* wait for connected event */ + rc = wait_for_completion_timeout(&wl->assoc_done, HZ * 4);/*FIXME*/ + + if (!rc) { + /* timeouted. Maybe key or cyrpt mode is wrong */ + pr_info("%s: connect timeout\n", __func__); + cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC, + NULL, 0); + kfree(cmd); + wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN; + gelic_wl_send_iwap_event(wl, NULL); + ret = -ENXIO; + } else { + wl->assoc_stat = GELIC_WL_ASSOC_STAT_ASSOCIATED; + /* copy bssid */ + memcpy(wl->active_bssid, &bss->hwinfo->bssid[2], ETH_ALEN); + + /* send connect event */ + gelic_wl_send_iwap_event(wl, wl->active_bssid); + pr_info("%s: connected\n", __func__); + } +out: + free_page((unsigned long)common); + pr_debug("%s: ->\n", __func__); + return ret; +} + +/* + * connected event + */ +static void gelic_wl_connected_event(struct gelic_wl_info *wl, + u64 event) +{ + u64 desired_event = 0; + + switch (wl->wpa_level) { + case GELIC_WL_WPA_LEVEL_NONE: + desired_event = GELIC_LV1_WL_EVENT_CONNECTED; + break; + case GELIC_WL_WPA_LEVEL_WPA: + case GELIC_WL_WPA_LEVEL_WPA2: + desired_event = GELIC_LV1_WL_EVENT_WPA_CONNECTED; + break; + } + + if (desired_event == event) { + pr_debug("%s: completed\n", __func__); + complete(&wl->assoc_done); + netif_carrier_on(port_to_netdev(wl_port(wl))); + } else + pr_debug("%s: event %#llx under wpa\n", + __func__, event); +} + +/* + * disconnect event + */ +static void gelic_wl_disconnect_event(struct gelic_wl_info *wl, + u64 event) +{ + struct gelic_eurus_cmd *cmd; + int lock; + + /* + * If we fall here in the middle of association, + * associate_bss() should be waiting for complation of + * wl->assoc_done. + * As it waits with timeout, just leave assoc_done + * uncompleted, then it terminates with timeout + */ + if (!mutex_trylock(&wl->assoc_stat_lock)) { + pr_debug("%s: already locked\n", __func__); + lock = 0; + } else { + pr_debug("%s: obtain lock\n", __func__); + lock = 1; + } + + cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC, NULL, 0); + kfree(cmd); + + /* send disconnected event to the supplicant */ + if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) + gelic_wl_send_iwap_event(wl, NULL); + + wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN; + netif_carrier_off(port_to_netdev(wl_port(wl))); + + if (lock) + mutex_unlock(&wl->assoc_stat_lock); +} +/* + * event worker + */ +#ifdef DEBUG +static const char *eventstr(enum gelic_lv1_wl_event event) +{ + static char buf[32]; + char *ret; + if (event & GELIC_LV1_WL_EVENT_DEVICE_READY) + ret = "EURUS_READY"; + else if (event & GELIC_LV1_WL_EVENT_SCAN_COMPLETED) + ret = "SCAN_COMPLETED"; + else if (event & GELIC_LV1_WL_EVENT_DEAUTH) + ret = "DEAUTH"; + else if (event & GELIC_LV1_WL_EVENT_BEACON_LOST) + ret = "BEACON_LOST"; + else if (event & GELIC_LV1_WL_EVENT_CONNECTED) + ret = "CONNECTED"; + else if (event & GELIC_LV1_WL_EVENT_WPA_CONNECTED) + ret = "WPA_CONNECTED"; + else if (event & GELIC_LV1_WL_EVENT_WPA_ERROR) + ret = "WPA_ERROR"; + else { + sprintf(buf, "Unknown(%#x)", event); + ret = buf; + } + return ret; +} +#else +static const char *eventstr(enum gelic_lv1_wl_event event) +{ + return NULL; +} +#endif +static void gelic_wl_event_worker(struct work_struct *work) +{ + struct gelic_wl_info *wl; + struct gelic_port *port; + u64 event, tmp; + int status; + + pr_debug("%s:start\n", __func__); + wl = container_of(work, struct gelic_wl_info, event_work.work); + port = wl_port(wl); + while (1) { + status = lv1_net_control(bus_id(port->card), dev_id(port->card), + GELIC_LV1_GET_WLAN_EVENT, 0, 0, 0, + &event, &tmp); + if (status) { + if (status != LV1_NO_ENTRY) + pr_debug("%s:wlan event failed %d\n", + __func__, status); + /* got all events */ + pr_debug("%s:end\n", __func__); + return; + } + pr_debug("%s: event=%s\n", __func__, eventstr(event)); + switch (event) { + case GELIC_LV1_WL_EVENT_SCAN_COMPLETED: + gelic_wl_scan_complete_event(wl); + break; + case GELIC_LV1_WL_EVENT_BEACON_LOST: + case GELIC_LV1_WL_EVENT_DEAUTH: + gelic_wl_disconnect_event(wl, event); + break; + case GELIC_LV1_WL_EVENT_CONNECTED: + case GELIC_LV1_WL_EVENT_WPA_CONNECTED: + gelic_wl_connected_event(wl, event); + break; + default: + break; + } + } /* while */ +} +/* + * association worker + */ +static void gelic_wl_assoc_worker(struct work_struct *work) +{ + struct gelic_wl_info *wl; + + struct gelic_wl_scan_info *best_bss; + int ret; + unsigned long irqflag; + u8 *essid; + size_t essid_len; + + wl = container_of(work, struct gelic_wl_info, assoc_work.work); + + mutex_lock(&wl->assoc_stat_lock); + + if (wl->assoc_stat != GELIC_WL_ASSOC_STAT_DISCONN) + goto out; + + spin_lock_irqsave(&wl->lock, irqflag); + if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat)) { + pr_debug("%s: assoc ESSID configured %s\n", __func__, + wl->essid); + essid = wl->essid; + essid_len = wl->essid_len; + } else { + essid = NULL; + essid_len = 0; + } + spin_unlock_irqrestore(&wl->lock, irqflag); + + ret = gelic_wl_start_scan(wl, 0, essid, essid_len); + if (ret == -ERESTARTSYS) { + pr_debug("%s: scan start failed association\n", __func__); + schedule_delayed_work(&wl->assoc_work, HZ/10); /*FIXME*/ + goto out; + } else if (ret) { + pr_info("%s: scan prerequisite failed\n", __func__); + goto out; + } + + /* + * Wait for bss scan completion + * If we have scan list already, gelic_wl_start_scan() + * returns OK and raises the complete. Thus, + * it's ok to wait unconditionally here + */ + wait_for_completion(&wl->scan_done); + + pr_debug("%s: scan done\n", __func__); + mutex_lock(&wl->scan_lock); + if (wl->scan_stat != GELIC_WL_SCAN_STAT_GOT_LIST) { + gelic_wl_send_iwap_event(wl, NULL); + pr_info("%s: no scan list. association failed\n", __func__); + goto scan_lock_out; + } + + /* find best matching bss */ + best_bss = gelic_wl_find_best_bss(wl); + if (!best_bss) { + gelic_wl_send_iwap_event(wl, NULL); + pr_info("%s: no bss matched. association failed\n", __func__); + goto scan_lock_out; + } + + /* ok, do association */ + ret = gelic_wl_associate_bss(wl, best_bss); + if (ret) + pr_info("%s: association failed %d\n", __func__, ret); +scan_lock_out: + mutex_unlock(&wl->scan_lock); +out: + mutex_unlock(&wl->assoc_stat_lock); +} +/* + * Interrupt handler + * Called from the ethernet interrupt handler + * Processes wireless specific virtual interrupts only + */ +void gelic_wl_interrupt(struct net_device *netdev, u64 status) +{ + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); + + if (status & GELIC_CARD_WLAN_COMMAND_COMPLETED) { + pr_debug("%s:cmd complete\n", __func__); + complete(&wl->cmd_done_intr); + } + + if (status & GELIC_CARD_WLAN_EVENT_RECEIVED) { + pr_debug("%s:event received\n", __func__); + queue_delayed_work(wl->event_queue, &wl->event_work, 0); + } +} + +/* + * driver helpers + */ +static const iw_handler gelic_wl_wext_handler[] = +{ + IW_HANDLER(SIOCGIWNAME, gelic_wl_get_name), + IW_HANDLER(SIOCGIWRANGE, gelic_wl_get_range), + IW_HANDLER(SIOCSIWSCAN, gelic_wl_set_scan), + IW_HANDLER(SIOCGIWSCAN, gelic_wl_get_scan), + IW_HANDLER(SIOCSIWAUTH, gelic_wl_set_auth), + IW_HANDLER(SIOCGIWAUTH, gelic_wl_get_auth), + IW_HANDLER(SIOCSIWESSID, gelic_wl_set_essid), + IW_HANDLER(SIOCGIWESSID, gelic_wl_get_essid), + IW_HANDLER(SIOCSIWENCODE, gelic_wl_set_encode), + IW_HANDLER(SIOCGIWENCODE, gelic_wl_get_encode), + IW_HANDLER(SIOCSIWAP, gelic_wl_set_ap), + IW_HANDLER(SIOCGIWAP, gelic_wl_get_ap), + IW_HANDLER(SIOCSIWENCODEEXT, gelic_wl_set_encodeext), + IW_HANDLER(SIOCGIWENCODEEXT, gelic_wl_get_encodeext), + IW_HANDLER(SIOCSIWMODE, gelic_wl_set_mode), + IW_HANDLER(SIOCGIWMODE, gelic_wl_get_mode), + IW_HANDLER(SIOCGIWNICKN, gelic_wl_get_nick), +}; + +static const struct iw_handler_def gelic_wl_wext_handler_def = { + .num_standard = ARRAY_SIZE(gelic_wl_wext_handler), + .standard = gelic_wl_wext_handler, + .get_wireless_stats = gelic_wl_get_wireless_stats, +}; + +static struct net_device *gelic_wl_alloc(struct gelic_card *card) +{ + struct net_device *netdev; + struct gelic_port *port; + struct gelic_wl_info *wl; + unsigned int i; + + pr_debug("%s:start\n", __func__); + netdev = alloc_etherdev(sizeof(struct gelic_port) + + sizeof(struct gelic_wl_info)); + pr_debug("%s: netdev =%p card=%p\n", __func__, netdev, card); + if (!netdev) + return NULL; + + strcpy(netdev->name, "wlan%d"); + + port = netdev_priv(netdev); + port->netdev = netdev; + port->card = card; + port->type = GELIC_PORT_WIRELESS; + + wl = port_wl(port); + pr_debug("%s: wl=%p port=%p\n", __func__, wl, port); + + /* allocate scan list */ + wl->networks = kzalloc(sizeof(struct gelic_wl_scan_info) * + GELIC_WL_BSS_MAX_ENT, GFP_KERNEL); + + if (!wl->networks) + goto fail_bss; + + wl->eurus_cmd_queue = create_singlethread_workqueue("gelic_cmd"); + if (!wl->eurus_cmd_queue) + goto fail_cmd_workqueue; + + wl->event_queue = create_singlethread_workqueue("gelic_event"); + if (!wl->event_queue) + goto fail_event_workqueue; + + INIT_LIST_HEAD(&wl->network_free_list); + INIT_LIST_HEAD(&wl->network_list); + for (i = 0; i < GELIC_WL_BSS_MAX_ENT; i++) + list_add_tail(&wl->networks[i].list, + &wl->network_free_list); + init_completion(&wl->cmd_done_intr); + + INIT_DELAYED_WORK(&wl->event_work, gelic_wl_event_worker); + INIT_DELAYED_WORK(&wl->assoc_work, gelic_wl_assoc_worker); + mutex_init(&wl->scan_lock); + mutex_init(&wl->assoc_stat_lock); + + init_completion(&wl->scan_done); + /* for the case that no scan request is issued and stop() is called */ + complete(&wl->scan_done); + + spin_lock_init(&wl->lock); + + wl->scan_age = 5*HZ; /* FIXME */ + + /* buffer for receiving scanned list etc */ + BUILD_BUG_ON(PAGE_SIZE < + sizeof(struct gelic_eurus_scan_info) * + GELIC_EURUS_MAX_SCAN); + pr_debug("%s:end\n", __func__); + return netdev; + +fail_event_workqueue: + destroy_workqueue(wl->eurus_cmd_queue); +fail_cmd_workqueue: + kfree(wl->networks); +fail_bss: + free_netdev(netdev); + pr_debug("%s:end error\n", __func__); + return NULL; + +} + +static void gelic_wl_free(struct gelic_wl_info *wl) +{ + struct gelic_wl_scan_info *scan_info; + unsigned int i; + + pr_debug("%s: <-\n", __func__); + + pr_debug("%s: destroy queues\n", __func__); + destroy_workqueue(wl->eurus_cmd_queue); + destroy_workqueue(wl->event_queue); + + scan_info = wl->networks; + for (i = 0; i < GELIC_WL_BSS_MAX_ENT; i++, scan_info++) + kfree(scan_info->hwinfo); + kfree(wl->networks); + + free_netdev(port_to_netdev(wl_port(wl))); + + pr_debug("%s: ->\n", __func__); +} + +static int gelic_wl_try_associate(struct net_device *netdev) +{ + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); + int ret = -1; + unsigned int i; + + pr_debug("%s: <-\n", __func__); + + /* check constraits for start association */ + /* for no access restriction AP */ + if (wl->group_cipher_method == GELIC_WL_CIPHER_NONE) { + if (test_bit(GELIC_WL_STAT_CONFIGURED, + &wl->stat)) + goto do_associate; + else { + pr_debug("%s: no wep, not configured\n", __func__); + return ret; + } + } + + /* for WEP, one of four keys should be set */ + if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) { + /* one of keys set */ + for (i = 0; i < GELIC_WEP_KEYS; i++) { + if (test_bit(i, &wl->key_enabled)) + goto do_associate; + } + pr_debug("%s: WEP, but no key specified\n", __func__); + return ret; + } + + /* for WPA[2], psk should be set */ + if ((wl->group_cipher_method == GELIC_WL_CIPHER_TKIP) || + (wl->group_cipher_method == GELIC_WL_CIPHER_AES)) { + if (test_bit(GELIC_WL_STAT_WPA_PSK_SET, + &wl->stat)) + goto do_associate; + else { + pr_debug("%s: AES/TKIP, but PSK not configured\n", + __func__); + return ret; + } + } + +do_associate: + ret = schedule_delayed_work(&wl->assoc_work, 0); + pr_debug("%s: start association work %d\n", __func__, ret); + return ret; +} + +/* + * netdev handlers + */ +static int gelic_wl_open(struct net_device *netdev) +{ + struct gelic_card *card = netdev_card(netdev); + + pr_debug("%s:->%p\n", __func__, netdev); + + gelic_card_up(card); + + /* try to associate */ + gelic_wl_try_associate(netdev); + + netif_start_queue(netdev); + + pr_debug("%s:<-\n", __func__); + return 0; +} + +/* + * reset state machine + */ +static int gelic_wl_reset_state(struct gelic_wl_info *wl) +{ + struct gelic_wl_scan_info *target; + struct gelic_wl_scan_info *tmp; + + /* empty scan list */ + list_for_each_entry_safe(target, tmp, &wl->network_list, list) { + list_move_tail(&target->list, &wl->network_free_list); + } + wl->scan_stat = GELIC_WL_SCAN_STAT_INIT; + + /* clear configuration */ + wl->auth_method = GELIC_EURUS_AUTH_OPEN; + wl->group_cipher_method = GELIC_WL_CIPHER_NONE; + wl->pairwise_cipher_method = GELIC_WL_CIPHER_NONE; + wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE; + + wl->key_enabled = 0; + wl->current_key = 0; + + wl->psk_type = GELIC_EURUS_WPA_PSK_PASSPHRASE; + wl->psk_len = 0; + + wl->essid_len = 0; + memset(wl->essid, 0, sizeof(wl->essid)); + memset(wl->bssid, 0, sizeof(wl->bssid)); + memset(wl->active_bssid, 0, sizeof(wl->active_bssid)); + + wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN; + + memset(&wl->iwstat, 0, sizeof(wl->iwstat)); + /* all status bit clear */ + wl->stat = 0; + return 0; +} + +/* + * Tell eurus to terminate association + */ +static void gelic_wl_disconnect(struct net_device *netdev) +{ + struct gelic_port *port = netdev_priv(netdev); + struct gelic_wl_info *wl = port_wl(port); + struct gelic_eurus_cmd *cmd; + + /* + * If scann process is running on chip, + * further requests will be rejected + */ + if (wl->scan_stat == GELIC_WL_SCAN_STAT_SCANNING) + wait_for_completion_timeout(&wl->scan_done, HZ); + + cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC, NULL, 0); + kfree(cmd); + gelic_wl_send_iwap_event(wl, NULL); +}; + +static int gelic_wl_stop(struct net_device *netdev) +{ + struct gelic_port *port = netdev_priv(netdev); + struct gelic_wl_info *wl = port_wl(port); + struct gelic_card *card = netdev_card(netdev); + + pr_debug("%s:<-\n", __func__); + + /* + * Cancel pending association work. + * event work can run after netdev down + */ + cancel_delayed_work(&wl->assoc_work); + + if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) + gelic_wl_disconnect(netdev); + + /* reset our state machine */ + gelic_wl_reset_state(wl); + + netif_stop_queue(netdev); + + gelic_card_down(card); + + pr_debug("%s:->\n", __func__); + return 0; +} + +/* -- */ + +static const struct net_device_ops gelic_wl_netdevice_ops = { + .ndo_open = gelic_wl_open, + .ndo_stop = gelic_wl_stop, + .ndo_start_xmit = gelic_net_xmit, + .ndo_set_rx_mode = gelic_net_set_multi, + .ndo_change_mtu = gelic_net_change_mtu, + .ndo_tx_timeout = gelic_net_tx_timeout, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = gelic_net_poll_controller, +#endif +}; + +static const struct ethtool_ops gelic_wl_ethtool_ops = { + .get_drvinfo = gelic_net_get_drvinfo, + .get_link = gelic_wl_get_link, +}; + +static void gelic_wl_setup_netdev_ops(struct net_device *netdev) +{ + struct gelic_wl_info *wl; + wl = port_wl(netdev_priv(netdev)); + BUG_ON(!wl); + netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; + + netdev->ethtool_ops = &gelic_wl_ethtool_ops; + netdev->netdev_ops = &gelic_wl_netdevice_ops; + netdev->wireless_data = &wl->wireless_data; + netdev->wireless_handlers = &gelic_wl_wext_handler_def; +} + +/* + * driver probe/remove + */ +int gelic_wl_driver_probe(struct gelic_card *card) +{ + int ret; + struct net_device *netdev; + + pr_debug("%s:start\n", __func__); + + if (ps3_compare_firmware_version(1, 6, 0) < 0) + return 0; + if (!card->vlan[GELIC_PORT_WIRELESS].tx) + return 0; + + /* alloc netdevice for wireless */ + netdev = gelic_wl_alloc(card); + if (!netdev) + return -ENOMEM; + + /* setup net_device structure */ + SET_NETDEV_DEV(netdev, &card->dev->core); + gelic_wl_setup_netdev_ops(netdev); + + /* setup some of net_device and register it */ + ret = gelic_net_setup_netdev(netdev, card); + if (ret) + goto fail_setup; + card->netdev[GELIC_PORT_WIRELESS] = netdev; + + /* add enable wireless interrupt */ + card->irq_mask |= GELIC_CARD_WLAN_EVENT_RECEIVED | + GELIC_CARD_WLAN_COMMAND_COMPLETED; + /* to allow wireless commands while both interfaces are down */ + gelic_card_set_irq_mask(card, GELIC_CARD_WLAN_EVENT_RECEIVED | + GELIC_CARD_WLAN_COMMAND_COMPLETED); + pr_debug("%s:end\n", __func__); + return 0; + +fail_setup: + gelic_wl_free(port_wl(netdev_port(netdev))); + + return ret; +} + +int gelic_wl_driver_remove(struct gelic_card *card) +{ + struct gelic_wl_info *wl; + struct net_device *netdev; + + pr_debug("%s:start\n", __func__); + + if (ps3_compare_firmware_version(1, 6, 0) < 0) + return 0; + if (!card->vlan[GELIC_PORT_WIRELESS].tx) + return 0; + + netdev = card->netdev[GELIC_PORT_WIRELESS]; + wl = port_wl(netdev_priv(netdev)); + + /* if the interface was not up, but associated */ + if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) + gelic_wl_disconnect(netdev); + + complete(&wl->cmd_done_intr); + + /* cancel all work queue */ + cancel_delayed_work(&wl->assoc_work); + cancel_delayed_work(&wl->event_work); + flush_workqueue(wl->eurus_cmd_queue); + flush_workqueue(wl->event_queue); + + unregister_netdev(netdev); + + /* disable wireless interrupt */ + pr_debug("%s: disable intr\n", __func__); + card->irq_mask &= ~(GELIC_CARD_WLAN_EVENT_RECEIVED | + GELIC_CARD_WLAN_COMMAND_COMPLETED); + /* free bss list, netdev*/ + gelic_wl_free(wl); + pr_debug("%s:end\n", __func__); + return 0; +} diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h new file mode 100644 index 000000000..11f443d8e --- /dev/null +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.h @@ -0,0 +1,326 @@ +/* + * PS3 gelic network driver. + * + * Copyright (C) 2007 Sony Computer Entertainment Inc. + * Copyright 2007 Sony Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#ifndef _GELIC_WIRELESS_H +#define _GELIC_WIRELESS_H + +#include +#include + + +/* return value from GELIC_LV1_GET_WLAN_EVENT netcontrol */ +enum gelic_lv1_wl_event { + GELIC_LV1_WL_EVENT_DEVICE_READY = 0x01, /* Eurus ready */ + GELIC_LV1_WL_EVENT_SCAN_COMPLETED = 0x02, /* Scan has completed */ + GELIC_LV1_WL_EVENT_DEAUTH = 0x04, /* Deauthed by the AP */ + GELIC_LV1_WL_EVENT_BEACON_LOST = 0x08, /* Beacon lost detected */ + GELIC_LV1_WL_EVENT_CONNECTED = 0x10, /* Connected to AP */ + GELIC_LV1_WL_EVENT_WPA_CONNECTED = 0x20, /* WPA connection */ + GELIC_LV1_WL_EVENT_WPA_ERROR = 0x40, /* MIC error */ +}; + +/* arguments for GELIC_LV1_POST_WLAN_COMMAND netcontrol */ +enum gelic_eurus_command { + GELIC_EURUS_CMD_ASSOC = 1, /* association start */ + GELIC_EURUS_CMD_DISASSOC = 2, /* disassociate */ + GELIC_EURUS_CMD_START_SCAN = 3, /* scan start */ + GELIC_EURUS_CMD_GET_SCAN = 4, /* get scan result */ + GELIC_EURUS_CMD_SET_COMMON_CFG = 5, /* set common config */ + GELIC_EURUS_CMD_GET_COMMON_CFG = 6, /* set common config */ + GELIC_EURUS_CMD_SET_WEP_CFG = 7, /* set WEP config */ + GELIC_EURUS_CMD_GET_WEP_CFG = 8, /* get WEP config */ + GELIC_EURUS_CMD_SET_WPA_CFG = 9, /* set WPA config */ + GELIC_EURUS_CMD_GET_WPA_CFG = 10, /* get WPA config */ + GELIC_EURUS_CMD_GET_RSSI_CFG = 11, /* get RSSI info. */ + GELIC_EURUS_CMD_MAX_INDEX +}; + +/* for GELIC_EURUS_CMD_COMMON_CFG */ +enum gelic_eurus_bss_type { + GELIC_EURUS_BSS_INFRA = 0, + GELIC_EURUS_BSS_ADHOC = 1, /* not supported */ +}; + +enum gelic_eurus_auth_method { + GELIC_EURUS_AUTH_OPEN = 0, /* FIXME: WLAN_AUTH_OPEN */ + GELIC_EURUS_AUTH_SHARED = 1, /* not supported */ +}; + +enum gelic_eurus_opmode { + GELIC_EURUS_OPMODE_11BG = 0, /* 802.11b/g */ + GELIC_EURUS_OPMODE_11B = 1, /* 802.11b only */ + GELIC_EURUS_OPMODE_11G = 2, /* 802.11g only */ +}; + +struct gelic_eurus_common_cfg { + /* all fields are big endian */ + u16 scan_index; + u16 bss_type; /* infra or adhoc */ + u16 auth_method; /* shared key or open */ + u16 op_mode; /* B/G */ +} __packed; + + +/* for GELIC_EURUS_CMD_WEP_CFG */ +enum gelic_eurus_wep_security { + GELIC_EURUS_WEP_SEC_NONE = 0, + GELIC_EURUS_WEP_SEC_40BIT = 1, + GELIC_EURUS_WEP_SEC_104BIT = 2, +}; + +struct gelic_eurus_wep_cfg { + /* all fields are big endian */ + u16 security; + u8 key[4][16]; +} __packed; + +/* for GELIC_EURUS_CMD_WPA_CFG */ +enum gelic_eurus_wpa_security { + GELIC_EURUS_WPA_SEC_NONE = 0x0000, + /* group=TKIP, pairwise=TKIP */ + GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP = 0x0001, + /* group=AES, pairwise=AES */ + GELIC_EURUS_WPA_SEC_WPA_AES_AES = 0x0002, + /* group=TKIP, pairwise=TKIP */ + GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP = 0x0004, + /* group=AES, pairwise=AES */ + GELIC_EURUS_WPA_SEC_WPA2_AES_AES = 0x0008, + /* group=TKIP, pairwise=AES */ + GELIC_EURUS_WPA_SEC_WPA_TKIP_AES = 0x0010, + /* group=TKIP, pairwise=AES */ + GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES = 0x0020, +}; + +enum gelic_eurus_wpa_psk_type { + GELIC_EURUS_WPA_PSK_PASSPHRASE = 0, /* passphrase string */ + GELIC_EURUS_WPA_PSK_BIN = 1, /* 32 bytes binary key */ +}; + +#define GELIC_WL_EURUS_PSK_MAX_LEN 64 +#define WPA_PSK_LEN 32 /* WPA spec says 256bit */ + +struct gelic_eurus_wpa_cfg { + /* all fields are big endian */ + u16 security; + u16 psk_type; /* psk key encoding type */ + u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN]; /* psk key; hex or passphrase */ +} __packed; + +/* for GELIC_EURUS_CMD_{START,GET}_SCAN */ +enum gelic_eurus_scan_capability { + GELIC_EURUS_SCAN_CAP_ADHOC = 0x0000, + GELIC_EURUS_SCAN_CAP_INFRA = 0x0001, + GELIC_EURUS_SCAN_CAP_MASK = 0x0001, +}; + +enum gelic_eurus_scan_sec_type { + GELIC_EURUS_SCAN_SEC_NONE = 0x0000, + GELIC_EURUS_SCAN_SEC_WEP = 0x0100, + GELIC_EURUS_SCAN_SEC_WPA = 0x0200, + GELIC_EURUS_SCAN_SEC_WPA2 = 0x0400, + GELIC_EURUS_SCAN_SEC_MASK = 0x0f00, +}; + +enum gelic_eurus_scan_sec_wep_type { + GELIC_EURUS_SCAN_SEC_WEP_UNKNOWN = 0x0000, + GELIC_EURUS_SCAN_SEC_WEP_40 = 0x0001, + GELIC_EURUS_SCAN_SEC_WEP_104 = 0x0002, + GELIC_EURUS_SCAN_SEC_WEP_MASK = 0x0003, +}; + +enum gelic_eurus_scan_sec_wpa_type { + GELIC_EURUS_SCAN_SEC_WPA_UNKNOWN = 0x0000, + GELIC_EURUS_SCAN_SEC_WPA_TKIP = 0x0001, + GELIC_EURUS_SCAN_SEC_WPA_AES = 0x0002, + GELIC_EURUS_SCAN_SEC_WPA_MASK = 0x0003, +}; + +/* + * hw BSS information structure returned from GELIC_EURUS_CMD_GET_SCAN + */ +struct gelic_eurus_scan_info { + /* all fields are big endian */ + __be16 size; + __be16 rssi; /* percentage */ + __be16 channel; /* channel number */ + __be16 beacon_period; /* FIXME: in msec unit */ + __be16 capability; + __be16 security; + u8 bssid[8]; /* last ETH_ALEN are valid. bssid[0],[1] are unused */ + u8 essid[32]; /* IW_ESSID_MAX_SIZE */ + u8 rate[16]; /* first 12 are valid */ + u8 ext_rate[16]; /* first 16 are valid */ + __be32 reserved1; + __be32 reserved2; + __be32 reserved3; + __be32 reserved4; + u8 elements[0]; /* ie */ +} __packed; + +/* the hypervisor returns bbs up to 16 */ +#define GELIC_EURUS_MAX_SCAN (16) +struct gelic_wl_scan_info { + struct list_head list; + struct gelic_eurus_scan_info *hwinfo; + + int valid; /* set 1 if this entry was in latest scanned list + * from Eurus */ + unsigned int eurus_index; /* index in the Eurus list */ + unsigned long last_scanned; /* acquired time */ + + unsigned int rate_len; + unsigned int rate_ext_len; + unsigned int essid_len; +}; + +/* for GELIC_EURUS_CMD_GET_RSSI */ +struct gelic_eurus_rssi_info { + /* big endian */ + __be16 rssi; +} __packed; + + +/* for 'stat' member of gelic_wl_info */ +enum gelic_wl_info_status_bit { + GELIC_WL_STAT_CONFIGURED, + GELIC_WL_STAT_CH_INFO, /* ch info acquired */ + GELIC_WL_STAT_ESSID_SET, /* ESSID specified by userspace */ + GELIC_WL_STAT_BSSID_SET, /* BSSID specified by userspace */ + GELIC_WL_STAT_WPA_PSK_SET, /* PMK specified by userspace */ + GELIC_WL_STAT_WPA_LEVEL_SET, /* WEP or WPA[2] selected */ +}; + +/* for 'scan_stat' member of gelic_wl_info */ +enum gelic_wl_scan_state { + /* just initialized or get last scan result failed */ + GELIC_WL_SCAN_STAT_INIT, + /* scan request issued, accepted or chip is scanning */ + GELIC_WL_SCAN_STAT_SCANNING, + /* scan results retrieved */ + GELIC_WL_SCAN_STAT_GOT_LIST, +}; + +/* for 'cipher_method' */ +enum gelic_wl_cipher_method { + GELIC_WL_CIPHER_NONE, + GELIC_WL_CIPHER_WEP, + GELIC_WL_CIPHER_TKIP, + GELIC_WL_CIPHER_AES, +}; + +/* for 'wpa_level' */ +enum gelic_wl_wpa_level { + GELIC_WL_WPA_LEVEL_NONE, + GELIC_WL_WPA_LEVEL_WPA, + GELIC_WL_WPA_LEVEL_WPA2, +}; + +/* for 'assoc_stat' */ +enum gelic_wl_assoc_state { + GELIC_WL_ASSOC_STAT_DISCONN, + GELIC_WL_ASSOC_STAT_ASSOCIATING, + GELIC_WL_ASSOC_STAT_ASSOCIATED, +}; +/* part of private data alloc_etherdev() allocated */ +#define GELIC_WEP_KEYS 4 +struct gelic_wl_info { + /* bss list */ + struct mutex scan_lock; + struct list_head network_list; + struct list_head network_free_list; + struct gelic_wl_scan_info *networks; + + unsigned long scan_age; /* last scanned time */ + enum gelic_wl_scan_state scan_stat; + struct completion scan_done; + + /* eurus command queue */ + struct workqueue_struct *eurus_cmd_queue; + struct completion cmd_done_intr; + + /* eurus event handling */ + struct workqueue_struct *event_queue; + struct delayed_work event_work; + + /* wl status bits */ + unsigned long stat; + enum gelic_eurus_auth_method auth_method; /* open/shared */ + enum gelic_wl_cipher_method group_cipher_method; + enum gelic_wl_cipher_method pairwise_cipher_method; + enum gelic_wl_wpa_level wpa_level; /* wpa/wpa2 */ + + /* association handling */ + struct mutex assoc_stat_lock; + struct delayed_work assoc_work; + enum gelic_wl_assoc_state assoc_stat; + struct completion assoc_done; + + spinlock_t lock; + u16 ch_info; /* available channels. bit0 = ch1 */ + /* WEP keys */ + u8 key[GELIC_WEP_KEYS][IW_ENCODING_TOKEN_MAX]; + unsigned long key_enabled; + unsigned int key_len[GELIC_WEP_KEYS]; + unsigned int current_key; + /* WWPA PSK */ + u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN]; + enum gelic_eurus_wpa_psk_type psk_type; + unsigned int psk_len; + + u8 essid[IW_ESSID_MAX_SIZE]; + u8 bssid[ETH_ALEN]; /* userland requested */ + u8 active_bssid[ETH_ALEN]; /* associated bssid */ + unsigned int essid_len; + + struct iw_public_data wireless_data; + struct iw_statistics iwstat; +}; + +#define GELIC_WL_BSS_MAX_ENT 32 +#define GELIC_WL_ASSOC_RETRY 50 +static inline struct gelic_port *wl_port(struct gelic_wl_info *wl) +{ + return container_of((void *)wl, struct gelic_port, priv); +} +static inline struct gelic_wl_info *port_wl(struct gelic_port *port) +{ + return port_priv(port); +} + +struct gelic_eurus_cmd { + struct work_struct work; + struct gelic_wl_info *wl; + unsigned int cmd; /* command code */ + u64 tag; + u64 size; + void *buffer; + unsigned int buf_size; + struct completion done; + int status; + u64 cmd_status; +}; + +/* private ioctls to pass PSK */ +#define GELIC_WL_PRIV_SET_PSK (SIOCIWFIRSTPRIV + 0) +#define GELIC_WL_PRIV_GET_PSK (SIOCIWFIRSTPRIV + 1) + +int gelic_wl_driver_probe(struct gelic_card *card); +int gelic_wl_driver_remove(struct gelic_card *card); +void gelic_wl_interrupt(struct net_device *netdev, u64 status); +#endif /* _GELIC_WIRELESS_H */ diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c new file mode 100644 index 000000000..a7d010f72 --- /dev/null +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -0,0 +1,2572 @@ +/* + * Network device driver for Cell Processor-Based Blade and Celleb platform + * + * (C) Copyright IBM Corp. 2005 + * (C) Copyright 2006 TOSHIBA CORPORATION + * + * Authors : Utz Bacher + * Jens Osterkamp + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spider_net.h" + +MODULE_AUTHOR("Utz Bacher and Jens Osterkamp " \ + ""); +MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(VERSION); +/*(DEBLOBBED)*/ + +static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT; +static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT; + +module_param(rx_descriptors, int, 0444); +module_param(tx_descriptors, int, 0444); + +MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \ + "in rx chains"); +MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \ + "in tx chain"); + +char spider_net_driver_name[] = "spidernet"; + +static const struct pci_device_id spider_net_pci_tbl[] = { + { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl); + +/** + * spider_net_read_reg - reads an SMMIO register of a card + * @card: device structure + * @reg: register to read from + * + * returns the content of the specified SMMIO register. + */ +static inline u32 +spider_net_read_reg(struct spider_net_card *card, u32 reg) +{ + /* We use the powerpc specific variants instead of readl_be() because + * we know spidernet is not a real PCI device and we can thus avoid the + * performance hit caused by the PCI workarounds. + */ + return in_be32(card->regs + reg); +} + +/** + * spider_net_write_reg - writes to an SMMIO register of a card + * @card: device structure + * @reg: register to write to + * @value: value to write into the specified SMMIO register + */ +static inline void +spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value) +{ + /* We use the powerpc specific variants instead of writel_be() because + * we know spidernet is not a real PCI device and we can thus avoid the + * performance hit caused by the PCI workarounds. + */ + out_be32(card->regs + reg, value); +} + +/** + * spider_net_write_phy - write to phy register + * @netdev: adapter to be written to + * @mii_id: id of MII + * @reg: PHY register + * @val: value to be written to phy register + * + * spider_net_write_phy_register writes to an arbitrary PHY + * register via the spider GPCWOPCMD register. We assume the queue does + * not run full (not more than 15 commands outstanding). + **/ +static void +spider_net_write_phy(struct net_device *netdev, int mii_id, + int reg, int val) +{ + struct spider_net_card *card = netdev_priv(netdev); + u32 writevalue; + + writevalue = ((u32)mii_id << 21) | + ((u32)reg << 16) | ((u32)val); + + spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue); +} + +/** + * spider_net_read_phy - read from phy register + * @netdev: network device to be read from + * @mii_id: id of MII + * @reg: PHY register + * + * Returns value read from PHY register + * + * spider_net_write_phy reads from an arbitrary PHY + * register via the spider GPCROPCMD register + **/ +static int +spider_net_read_phy(struct net_device *netdev, int mii_id, int reg) +{ + struct spider_net_card *card = netdev_priv(netdev); + u32 readvalue; + + readvalue = ((u32)mii_id << 21) | ((u32)reg << 16); + spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue); + + /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT + * interrupt, as we poll for the completion of the read operation + * in spider_net_read_phy. Should take about 50 us */ + do { + readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD); + } while (readvalue & SPIDER_NET_GPREXEC); + + readvalue &= SPIDER_NET_GPRDAT_MASK; + + return readvalue; +} + +/** + * spider_net_setup_aneg - initial auto-negotiation setup + * @card: device structure + **/ +static void +spider_net_setup_aneg(struct spider_net_card *card) +{ + struct mii_phy *phy = &card->phy; + u32 advertise = 0; + u16 bmsr, estat; + + bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR); + estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS); + + if (bmsr & BMSR_10HALF) + advertise |= ADVERTISED_10baseT_Half; + if (bmsr & BMSR_10FULL) + advertise |= ADVERTISED_10baseT_Full; + if (bmsr & BMSR_100HALF) + advertise |= ADVERTISED_100baseT_Half; + if (bmsr & BMSR_100FULL) + advertise |= ADVERTISED_100baseT_Full; + + if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL)) + advertise |= SUPPORTED_1000baseT_Full; + if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF)) + advertise |= SUPPORTED_1000baseT_Half; + + sungem_phy_probe(phy, phy->mii_id); + phy->def->ops->setup_aneg(phy, advertise); + +} + +/** + * spider_net_rx_irq_off - switch off rx irq on this spider card + * @card: device structure + * + * switches off rx irq by masking them out in the GHIINTnMSK register + */ +static void +spider_net_rx_irq_off(struct spider_net_card *card) +{ + u32 regvalue; + + regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT); + spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue); +} + +/** + * spider_net_rx_irq_on - switch on rx irq on this spider card + * @card: device structure + * + * switches on rx irq by enabling them in the GHIINTnMSK register + */ +static void +spider_net_rx_irq_on(struct spider_net_card *card) +{ + u32 regvalue; + + regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT; + spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue); +} + +/** + * spider_net_set_promisc - sets the unicast address or the promiscuous mode + * @card: card structure + * + * spider_net_set_promisc sets the unicast destination address filter and + * thus either allows for non-promisc mode or promisc mode + */ +static void +spider_net_set_promisc(struct spider_net_card *card) +{ + u32 macu, macl; + struct net_device *netdev = card->netdev; + + if (netdev->flags & IFF_PROMISC) { + /* clear destination entry 0 */ + spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0); + spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0); + spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, + SPIDER_NET_PROMISC_VALUE); + } else { + macu = netdev->dev_addr[0]; + macu <<= 8; + macu |= netdev->dev_addr[1]; + memcpy(&macl, &netdev->dev_addr[2], sizeof(macl)); + + macu |= SPIDER_NET_UA_DESCR_VALUE; + spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu); + spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl); + spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, + SPIDER_NET_NONPROMISC_VALUE); + } +} + +/** + * spider_net_get_descr_status -- returns the status of a descriptor + * @descr: descriptor to look at + * + * returns the status as in the dmac_cmd_status field of the descriptor + */ +static inline int +spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr) +{ + return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK; +} + +/** + * spider_net_free_chain - free descriptor chain + * @card: card structure + * @chain: address of chain + * + */ +static void +spider_net_free_chain(struct spider_net_card *card, + struct spider_net_descr_chain *chain) +{ + struct spider_net_descr *descr; + + descr = chain->ring; + do { + descr->bus_addr = 0; + descr->hwdescr->next_descr_addr = 0; + descr = descr->next; + } while (descr != chain->ring); + + dma_free_coherent(&card->pdev->dev, chain->num_desc, + chain->hwring, chain->dma_addr); +} + +/** + * spider_net_init_chain - alloc and link descriptor chain + * @card: card structure + * @chain: address of chain + * + * We manage a circular list that mirrors the hardware structure, + * except that the hardware uses bus addresses. + * + * Returns 0 on success, <0 on failure + */ +static int +spider_net_init_chain(struct spider_net_card *card, + struct spider_net_descr_chain *chain) +{ + int i; + struct spider_net_descr *descr; + struct spider_net_hw_descr *hwdescr; + dma_addr_t buf; + size_t alloc_size; + + alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr); + + chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size, + &chain->dma_addr, GFP_KERNEL); + if (!chain->hwring) + return -ENOMEM; + + memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr)); + + /* Set up the hardware pointers in each descriptor */ + descr = chain->ring; + hwdescr = chain->hwring; + buf = chain->dma_addr; + for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) { + hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; + hwdescr->next_descr_addr = 0; + + descr->hwdescr = hwdescr; + descr->bus_addr = buf; + descr->next = descr + 1; + descr->prev = descr - 1; + + buf += sizeof(struct spider_net_hw_descr); + } + /* do actual circular list */ + (descr-1)->next = chain->ring; + chain->ring->prev = descr-1; + + spin_lock_init(&chain->lock); + chain->head = chain->ring; + chain->tail = chain->ring; + return 0; +} + +/** + * spider_net_free_rx_chain_contents - frees descr contents in rx chain + * @card: card structure + * + * returns 0 on success, <0 on failure + */ +static void +spider_net_free_rx_chain_contents(struct spider_net_card *card) +{ + struct spider_net_descr *descr; + + descr = card->rx_chain.head; + do { + if (descr->skb) { + pci_unmap_single(card->pdev, descr->hwdescr->buf_addr, + SPIDER_NET_MAX_FRAME, + PCI_DMA_BIDIRECTIONAL); + dev_kfree_skb(descr->skb); + descr->skb = NULL; + } + descr = descr->next; + } while (descr != card->rx_chain.head); +} + +/** + * spider_net_prepare_rx_descr - Reinitialize RX descriptor + * @card: card structure + * @descr: descriptor to re-init + * + * Return 0 on success, <0 on failure. + * + * Allocates a new rx skb, iommu-maps it and attaches it to the + * descriptor. Mark the descriptor as activated, ready-to-use. + */ +static int +spider_net_prepare_rx_descr(struct spider_net_card *card, + struct spider_net_descr *descr) +{ + struct spider_net_hw_descr *hwdescr = descr->hwdescr; + dma_addr_t buf; + int offset; + int bufsize; + + /* we need to round up the buffer size to a multiple of 128 */ + bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) & + (~(SPIDER_NET_RXBUF_ALIGN - 1)); + + /* and we need to have it 128 byte aligned, therefore we allocate a + * bit more */ + /* allocate an skb */ + descr->skb = netdev_alloc_skb(card->netdev, + bufsize + SPIDER_NET_RXBUF_ALIGN - 1); + if (!descr->skb) { + if (netif_msg_rx_err(card) && net_ratelimit()) + dev_err(&card->netdev->dev, + "Not enough memory to allocate rx buffer\n"); + card->spider_stats.alloc_rx_skb_error++; + return -ENOMEM; + } + hwdescr->buf_size = bufsize; + hwdescr->result_size = 0; + hwdescr->valid_size = 0; + hwdescr->data_status = 0; + hwdescr->data_error = 0; + + offset = ((unsigned long)descr->skb->data) & + (SPIDER_NET_RXBUF_ALIGN - 1); + if (offset) + skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); + /* iommu-map the skb */ + buf = pci_map_single(card->pdev, descr->skb->data, + SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(card->pdev, buf)) { + dev_kfree_skb_any(descr->skb); + descr->skb = NULL; + if (netif_msg_rx_err(card) && net_ratelimit()) + dev_err(&card->netdev->dev, "Could not iommu-map rx buffer\n"); + card->spider_stats.rx_iommu_map_error++; + hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; + } else { + hwdescr->buf_addr = buf; + wmb(); + hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED | + SPIDER_NET_DMAC_NOINTR_COMPLETE; + } + + return 0; +} + +/** + * spider_net_enable_rxchtails - sets RX dmac chain tail addresses + * @card: card structure + * + * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the + * chip by writing to the appropriate register. DMA is enabled in + * spider_net_enable_rxdmac. + */ +static inline void +spider_net_enable_rxchtails(struct spider_net_card *card) +{ + /* assume chain is aligned correctly */ + spider_net_write_reg(card, SPIDER_NET_GDADCHA , + card->rx_chain.tail->bus_addr); +} + +/** + * spider_net_enable_rxdmac - enables a receive DMA controller + * @card: card structure + * + * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN + * in the GDADMACCNTR register + */ +static inline void +spider_net_enable_rxdmac(struct spider_net_card *card) +{ + wmb(); + spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR, + SPIDER_NET_DMA_RX_VALUE); +} + +/** + * spider_net_disable_rxdmac - disables the receive DMA controller + * @card: card structure + * + * spider_net_disable_rxdmac terminates processing on the DMA controller + * by turing off the DMA controller, with the force-end flag set. + */ +static inline void +spider_net_disable_rxdmac(struct spider_net_card *card) +{ + spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR, + SPIDER_NET_DMA_RX_FEND_VALUE); +} + +/** + * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains + * @card: card structure + * + * refills descriptors in the rx chain: allocates skbs and iommu-maps them. + */ +static void +spider_net_refill_rx_chain(struct spider_net_card *card) +{ + struct spider_net_descr_chain *chain = &card->rx_chain; + unsigned long flags; + + /* one context doing the refill (and a second context seeing that + * and omitting it) is ok. If called by NAPI, we'll be called again + * as spider_net_decode_one_descr is called several times. If some + * interrupt calls us, the NAPI is about to clean up anyway. */ + if (!spin_trylock_irqsave(&chain->lock, flags)) + return; + + while (spider_net_get_descr_status(chain->head->hwdescr) == + SPIDER_NET_DESCR_NOT_IN_USE) { + if (spider_net_prepare_rx_descr(card, chain->head)) + break; + chain->head = chain->head->next; + } + + spin_unlock_irqrestore(&chain->lock, flags); +} + +/** + * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains + * @card: card structure + * + * Returns 0 on success, <0 on failure. + */ +static int +spider_net_alloc_rx_skbs(struct spider_net_card *card) +{ + struct spider_net_descr_chain *chain = &card->rx_chain; + struct spider_net_descr *start = chain->tail; + struct spider_net_descr *descr = start; + + /* Link up the hardware chain pointers */ + do { + descr->prev->hwdescr->next_descr_addr = descr->bus_addr; + descr = descr->next; + } while (descr != start); + + /* Put at least one buffer into the chain. if this fails, + * we've got a problem. If not, spider_net_refill_rx_chain + * will do the rest at the end of this function. */ + if (spider_net_prepare_rx_descr(card, chain->head)) + goto error; + else + chain->head = chain->head->next; + + /* This will allocate the rest of the rx buffers; + * if not, it's business as usual later on. */ + spider_net_refill_rx_chain(card); + spider_net_enable_rxdmac(card); + return 0; + +error: + spider_net_free_rx_chain_contents(card); + return -ENOMEM; +} + +/** + * spider_net_get_multicast_hash - generates hash for multicast filter table + * @addr: multicast address + * + * returns the hash value. + * + * spider_net_get_multicast_hash calculates a hash value for a given multicast + * address, that is used to set the multicast filter tables + */ +static u8 +spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr) +{ + u32 crc; + u8 hash; + char addr_for_crc[ETH_ALEN] = { 0, }; + int i, bit; + + for (i = 0; i < ETH_ALEN * 8; i++) { + bit = (addr[i / 8] >> (i % 8)) & 1; + addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8)); + } + + crc = crc32_be(~0, addr_for_crc, netdev->addr_len); + + hash = (crc >> 27); + hash <<= 3; + hash |= crc & 7; + hash &= 0xff; + + return hash; +} + +/** + * spider_net_set_multi - sets multicast addresses and promisc flags + * @netdev: interface device structure + * + * spider_net_set_multi configures multicast addresses as needed for the + * netdev interface. It also sets up multicast, allmulti and promisc + * flags appropriately + */ +static void +spider_net_set_multi(struct net_device *netdev) +{ + struct netdev_hw_addr *ha; + u8 hash; + int i; + u32 reg; + struct spider_net_card *card = netdev_priv(netdev); + unsigned long bitmask[SPIDER_NET_MULTICAST_HASHES / BITS_PER_LONG] = + {0, }; + + spider_net_set_promisc(card); + + if (netdev->flags & IFF_ALLMULTI) { + for (i = 0; i < SPIDER_NET_MULTICAST_HASHES; i++) { + set_bit(i, bitmask); + } + goto write_hash; + } + + /* well, we know, what the broadcast hash value is: it's xfd + hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */ + set_bit(0xfd, bitmask); + + netdev_for_each_mc_addr(ha, netdev) { + hash = spider_net_get_multicast_hash(netdev, ha->addr); + set_bit(hash, bitmask); + } + +write_hash: + for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) { + reg = 0; + if (test_bit(i * 4, bitmask)) + reg += 0x08; + reg <<= 8; + if (test_bit(i * 4 + 1, bitmask)) + reg += 0x08; + reg <<= 8; + if (test_bit(i * 4 + 2, bitmask)) + reg += 0x08; + reg <<= 8; + if (test_bit(i * 4 + 3, bitmask)) + reg += 0x08; + + spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg); + } +} + +/** + * spider_net_prepare_tx_descr - fill tx descriptor with skb data + * @card: card structure + * @skb: packet to use + * + * returns 0 on success, <0 on failure. + * + * fills out the descriptor structure with skb data and len. Copies data, + * if needed (32bit DMA!) + */ +static int +spider_net_prepare_tx_descr(struct spider_net_card *card, + struct sk_buff *skb) +{ + struct spider_net_descr_chain *chain = &card->tx_chain; + struct spider_net_descr *descr; + struct spider_net_hw_descr *hwdescr; + dma_addr_t buf; + unsigned long flags; + + buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(card->pdev, buf)) { + if (netif_msg_tx_err(card) && net_ratelimit()) + dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). " + "Dropping packet\n", skb->data, skb->len); + card->spider_stats.tx_iommu_map_error++; + return -ENOMEM; + } + + spin_lock_irqsave(&chain->lock, flags); + descr = card->tx_chain.head; + if (descr->next == chain->tail->prev) { + spin_unlock_irqrestore(&chain->lock, flags); + pci_unmap_single(card->pdev, buf, skb->len, PCI_DMA_TODEVICE); + return -ENOMEM; + } + hwdescr = descr->hwdescr; + chain->head = descr->next; + + descr->skb = skb; + hwdescr->buf_addr = buf; + hwdescr->buf_size = skb->len; + hwdescr->next_descr_addr = 0; + hwdescr->data_status = 0; + + hwdescr->dmac_cmd_status = + SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_TXFRMTL; + spin_unlock_irqrestore(&chain->lock, flags); + + if (skb->ip_summed == CHECKSUM_PARTIAL) + switch (ip_hdr(skb)->protocol) { + case IPPROTO_TCP: + hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP; + break; + case IPPROTO_UDP: + hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP; + break; + } + + /* Chain the bus address, so that the DMA engine finds this descr. */ + wmb(); + descr->prev->hwdescr->next_descr_addr = descr->bus_addr; + + card->netdev->trans_start = jiffies; /* set netdev watchdog timer */ + return 0; +} + +static int +spider_net_set_low_watermark(struct spider_net_card *card) +{ + struct spider_net_descr *descr = card->tx_chain.tail; + struct spider_net_hw_descr *hwdescr; + unsigned long flags; + int status; + int cnt=0; + int i; + + /* Measure the length of the queue. Measurement does not + * need to be precise -- does not need a lock. */ + while (descr != card->tx_chain.head) { + status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE; + if (status == SPIDER_NET_DESCR_NOT_IN_USE) + break; + descr = descr->next; + cnt++; + } + + /* If TX queue is short, don't even bother with interrupts */ + if (cnt < card->tx_chain.num_desc/4) + return cnt; + + /* Set low-watermark 3/4th's of the way into the queue. */ + descr = card->tx_chain.tail; + cnt = (cnt*3)/4; + for (i=0;inext; + + /* Set the new watermark, clear the old watermark */ + spin_lock_irqsave(&card->tx_chain.lock, flags); + descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG; + if (card->low_watermark && card->low_watermark != descr) { + hwdescr = card->low_watermark->hwdescr; + hwdescr->dmac_cmd_status = + hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG; + } + card->low_watermark = descr; + spin_unlock_irqrestore(&card->tx_chain.lock, flags); + return cnt; +} + +/** + * spider_net_release_tx_chain - processes sent tx descriptors + * @card: adapter structure + * @brutal: if set, don't care about whether descriptor seems to be in use + * + * returns 0 if the tx ring is empty, otherwise 1. + * + * spider_net_release_tx_chain releases the tx descriptors that spider has + * finished with (if non-brutal) or simply release tx descriptors (if brutal). + * If some other context is calling this function, we return 1 so that we're + * scheduled again (if we were scheduled) and will not lose initiative. + */ +static int +spider_net_release_tx_chain(struct spider_net_card *card, int brutal) +{ + struct net_device *dev = card->netdev; + struct spider_net_descr_chain *chain = &card->tx_chain; + struct spider_net_descr *descr; + struct spider_net_hw_descr *hwdescr; + struct sk_buff *skb; + u32 buf_addr; + unsigned long flags; + int status; + + while (1) { + spin_lock_irqsave(&chain->lock, flags); + if (chain->tail == chain->head) { + spin_unlock_irqrestore(&chain->lock, flags); + return 0; + } + descr = chain->tail; + hwdescr = descr->hwdescr; + + status = spider_net_get_descr_status(hwdescr); + switch (status) { + case SPIDER_NET_DESCR_COMPLETE: + dev->stats.tx_packets++; + dev->stats.tx_bytes += descr->skb->len; + break; + + case SPIDER_NET_DESCR_CARDOWNED: + if (!brutal) { + spin_unlock_irqrestore(&chain->lock, flags); + return 1; + } + + /* fallthrough, if we release the descriptors + * brutally (then we don't care about + * SPIDER_NET_DESCR_CARDOWNED) */ + + case SPIDER_NET_DESCR_RESPONSE_ERROR: + case SPIDER_NET_DESCR_PROTECTION_ERROR: + case SPIDER_NET_DESCR_FORCE_END: + if (netif_msg_tx_err(card)) + dev_err(&card->netdev->dev, "forcing end of tx descriptor " + "with status x%02x\n", status); + dev->stats.tx_errors++; + break; + + default: + dev->stats.tx_dropped++; + if (!brutal) { + spin_unlock_irqrestore(&chain->lock, flags); + return 1; + } + } + + chain->tail = descr->next; + hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE; + skb = descr->skb; + descr->skb = NULL; + buf_addr = hwdescr->buf_addr; + spin_unlock_irqrestore(&chain->lock, flags); + + /* unmap the skb */ + if (skb) { + pci_unmap_single(card->pdev, buf_addr, skb->len, + PCI_DMA_TODEVICE); + dev_consume_skb_any(skb); + } + } + return 0; +} + +/** + * spider_net_kick_tx_dma - enables TX DMA processing + * @card: card structure + * + * This routine will start the transmit DMA running if + * it is not already running. This routine ned only be + * called when queueing a new packet to an empty tx queue. + * Writes the current tx chain head as start address + * of the tx descriptor chain and enables the transmission + * DMA engine. + */ +static inline void +spider_net_kick_tx_dma(struct spider_net_card *card) +{ + struct spider_net_descr *descr; + + if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) & + SPIDER_NET_TX_DMA_EN) + goto out; + + descr = card->tx_chain.tail; + for (;;) { + if (spider_net_get_descr_status(descr->hwdescr) == + SPIDER_NET_DESCR_CARDOWNED) { + spider_net_write_reg(card, SPIDER_NET_GDTDCHA, + descr->bus_addr); + spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, + SPIDER_NET_DMA_TX_VALUE); + break; + } + if (descr == card->tx_chain.head) + break; + descr = descr->next; + } + +out: + mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); +} + +/** + * spider_net_xmit - transmits a frame over the device + * @skb: packet to send out + * @netdev: interface device structure + * + * returns 0 on success, !0 on failure + */ +static int +spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + int cnt; + struct spider_net_card *card = netdev_priv(netdev); + + spider_net_release_tx_chain(card, 0); + + if (spider_net_prepare_tx_descr(card, skb) != 0) { + netdev->stats.tx_dropped++; + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + + cnt = spider_net_set_low_watermark(card); + if (cnt < 5) + spider_net_kick_tx_dma(card); + return NETDEV_TX_OK; +} + +/** + * spider_net_cleanup_tx_ring - cleans up the TX ring + * @card: card structure + * + * spider_net_cleanup_tx_ring is called by either the tx_timer + * or from the NAPI polling routine. + * This routine releases resources associted with transmitted + * packets, including updating the queue tail pointer. + */ +static void +spider_net_cleanup_tx_ring(struct spider_net_card *card) +{ + if ((spider_net_release_tx_chain(card, 0) != 0) && + (card->netdev->flags & IFF_UP)) { + spider_net_kick_tx_dma(card); + netif_wake_queue(card->netdev); + } +} + +/** + * spider_net_do_ioctl - called for device ioctls + * @netdev: interface device structure + * @ifr: request parameter structure for ioctl + * @cmd: command code for ioctl + * + * returns 0 on success, <0 on failure. Currently, we have no special ioctls. + * -EOPNOTSUPP is returned, if an unknown ioctl was requested + */ +static int +spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + default: + return -EOPNOTSUPP; + } +} + +/** + * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on + * @descr: descriptor to process + * @card: card structure + * + * Fills out skb structure and passes the data to the stack. + * The descriptor state is not changed. + */ +static void +spider_net_pass_skb_up(struct spider_net_descr *descr, + struct spider_net_card *card) +{ + struct spider_net_hw_descr *hwdescr = descr->hwdescr; + struct sk_buff *skb = descr->skb; + struct net_device *netdev = card->netdev; + u32 data_status = hwdescr->data_status; + u32 data_error = hwdescr->data_error; + + skb_put(skb, hwdescr->valid_size); + + /* the card seems to add 2 bytes of junk in front + * of the ethernet frame */ +#define SPIDER_MISALIGN 2 + skb_pull(skb, SPIDER_MISALIGN); + skb->protocol = eth_type_trans(skb, netdev); + + /* checksum offload */ + skb_checksum_none_assert(skb); + if (netdev->features & NETIF_F_RXCSUM) { + if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) == + SPIDER_NET_DATA_STATUS_CKSUM_MASK) && + !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + if (data_status & SPIDER_NET_VLAN_PACKET) { + /* further enhancements: HW-accel VLAN */ + } + + /* update netdevice statistics */ + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += skb->len; + + /* pass skb up to stack */ + netif_receive_skb(skb); +} + +static void show_rx_chain(struct spider_net_card *card) +{ + struct spider_net_descr_chain *chain = &card->rx_chain; + struct spider_net_descr *start= chain->tail; + struct spider_net_descr *descr= start; + struct spider_net_hw_descr *hwd = start->hwdescr; + struct device *dev = &card->netdev->dev; + u32 curr_desc, next_desc; + int status; + + int tot = 0; + int cnt = 0; + int off = start - chain->ring; + int cstat = hwd->dmac_cmd_status; + + dev_info(dev, "Total number of descrs=%d\n", + chain->num_desc); + dev_info(dev, "Chain tail located at descr=%d, status=0x%x\n", + off, cstat); + + curr_desc = spider_net_read_reg(card, SPIDER_NET_GDACTDPA); + next_desc = spider_net_read_reg(card, SPIDER_NET_GDACNEXTDA); + + status = cstat; + do + { + hwd = descr->hwdescr; + off = descr - chain->ring; + status = hwd->dmac_cmd_status; + + if (descr == chain->head) + dev_info(dev, "Chain head is at %d, head status=0x%x\n", + off, status); + + if (curr_desc == descr->bus_addr) + dev_info(dev, "HW curr desc (GDACTDPA) is at %d, status=0x%x\n", + off, status); + + if (next_desc == descr->bus_addr) + dev_info(dev, "HW next desc (GDACNEXTDA) is at %d, status=0x%x\n", + off, status); + + if (hwd->next_descr_addr == 0) + dev_info(dev, "chain is cut at %d\n", off); + + if (cstat != status) { + int from = (chain->num_desc + off - cnt) % chain->num_desc; + int to = (chain->num_desc + off - 1) % chain->num_desc; + dev_info(dev, "Have %d (from %d to %d) descrs " + "with stat=0x%08x\n", cnt, from, to, cstat); + cstat = status; + cnt = 0; + } + + cnt ++; + tot ++; + descr = descr->next; + } while (descr != start); + + dev_info(dev, "Last %d descrs with stat=0x%08x " + "for a total of %d descrs\n", cnt, cstat, tot); + +#ifdef DEBUG + /* Now dump the whole ring */ + descr = start; + do + { + struct spider_net_hw_descr *hwd = descr->hwdescr; + status = spider_net_get_descr_status(hwd); + cnt = descr - chain->ring; + dev_info(dev, "Descr %d stat=0x%08x skb=%p\n", + cnt, status, descr->skb); + dev_info(dev, "bus addr=%08x buf addr=%08x sz=%d\n", + descr->bus_addr, hwd->buf_addr, hwd->buf_size); + dev_info(dev, "next=%08x result sz=%d valid sz=%d\n", + hwd->next_descr_addr, hwd->result_size, + hwd->valid_size); + dev_info(dev, "dmac=%08x data stat=%08x data err=%08x\n", + hwd->dmac_cmd_status, hwd->data_status, + hwd->data_error); + dev_info(dev, "\n"); + + descr = descr->next; + } while (descr != start); +#endif + +} + +/** + * spider_net_resync_head_ptr - Advance head ptr past empty descrs + * + * If the driver fails to keep up and empty the queue, then the + * hardware wil run out of room to put incoming packets. This + * will cause the hardware to skip descrs that are full (instead + * of halting/retrying). Thus, once the driver runs, it wil need + * to "catch up" to where the hardware chain pointer is at. + */ +static void spider_net_resync_head_ptr(struct spider_net_card *card) +{ + unsigned long flags; + struct spider_net_descr_chain *chain = &card->rx_chain; + struct spider_net_descr *descr; + int i, status; + + /* Advance head pointer past any empty descrs */ + descr = chain->head; + status = spider_net_get_descr_status(descr->hwdescr); + + if (status == SPIDER_NET_DESCR_NOT_IN_USE) + return; + + spin_lock_irqsave(&chain->lock, flags); + + descr = chain->head; + status = spider_net_get_descr_status(descr->hwdescr); + for (i=0; inum_desc; i++) { + if (status != SPIDER_NET_DESCR_CARDOWNED) break; + descr = descr->next; + status = spider_net_get_descr_status(descr->hwdescr); + } + chain->head = descr; + + spin_unlock_irqrestore(&chain->lock, flags); +} + +static int spider_net_resync_tail_ptr(struct spider_net_card *card) +{ + struct spider_net_descr_chain *chain = &card->rx_chain; + struct spider_net_descr *descr; + int i, status; + + /* Advance tail pointer past any empty and reaped descrs */ + descr = chain->tail; + status = spider_net_get_descr_status(descr->hwdescr); + + for (i=0; inum_desc; i++) { + if ((status != SPIDER_NET_DESCR_CARDOWNED) && + (status != SPIDER_NET_DESCR_NOT_IN_USE)) break; + descr = descr->next; + status = spider_net_get_descr_status(descr->hwdescr); + } + chain->tail = descr; + + if ((i == chain->num_desc) || (i == 0)) + return 1; + return 0; +} + +/** + * spider_net_decode_one_descr - processes an RX descriptor + * @card: card structure + * + * Returns 1 if a packet has been sent to the stack, otherwise 0. + * + * Processes an RX descriptor by iommu-unmapping the data buffer + * and passing the packet up to the stack. This function is called + * in softirq context, e.g. either bottom half from interrupt or + * NAPI polling context. + */ +static int +spider_net_decode_one_descr(struct spider_net_card *card) +{ + struct net_device *dev = card->netdev; + struct spider_net_descr_chain *chain = &card->rx_chain; + struct spider_net_descr *descr = chain->tail; + struct spider_net_hw_descr *hwdescr = descr->hwdescr; + u32 hw_buf_addr; + int status; + + status = spider_net_get_descr_status(hwdescr); + + /* Nothing in the descriptor, or ring must be empty */ + if ((status == SPIDER_NET_DESCR_CARDOWNED) || + (status == SPIDER_NET_DESCR_NOT_IN_USE)) + return 0; + + /* descriptor definitively used -- move on tail */ + chain->tail = descr->next; + + /* unmap descriptor */ + hw_buf_addr = hwdescr->buf_addr; + hwdescr->buf_addr = 0xffffffff; + pci_unmap_single(card->pdev, hw_buf_addr, + SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); + + if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) || + (status == SPIDER_NET_DESCR_PROTECTION_ERROR) || + (status == SPIDER_NET_DESCR_FORCE_END) ) { + if (netif_msg_rx_err(card)) + dev_err(&dev->dev, + "dropping RX descriptor with state %d\n", status); + dev->stats.rx_dropped++; + goto bad_desc; + } + + if ( (status != SPIDER_NET_DESCR_COMPLETE) && + (status != SPIDER_NET_DESCR_FRAME_END) ) { + if (netif_msg_rx_err(card)) + dev_err(&card->netdev->dev, + "RX descriptor with unknown state %d\n", status); + card->spider_stats.rx_desc_unk_state++; + goto bad_desc; + } + + /* The cases we'll throw away the packet immediately */ + if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) { + if (netif_msg_rx_err(card)) + dev_err(&card->netdev->dev, + "error in received descriptor found, " + "data_status=x%08x, data_error=x%08x\n", + hwdescr->data_status, hwdescr->data_error); + goto bad_desc; + } + + if (hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_BAD_STATUS) { + dev_err(&card->netdev->dev, "bad status, cmd_status=x%08x\n", + hwdescr->dmac_cmd_status); + pr_err("buf_addr=x%08x\n", hw_buf_addr); + pr_err("buf_size=x%08x\n", hwdescr->buf_size); + pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr); + pr_err("result_size=x%08x\n", hwdescr->result_size); + pr_err("valid_size=x%08x\n", hwdescr->valid_size); + pr_err("data_status=x%08x\n", hwdescr->data_status); + pr_err("data_error=x%08x\n", hwdescr->data_error); + pr_err("which=%ld\n", descr - card->rx_chain.ring); + + card->spider_stats.rx_desc_error++; + goto bad_desc; + } + + /* Ok, we've got a packet in descr */ + spider_net_pass_skb_up(descr, card); + descr->skb = NULL; + hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; + return 1; + +bad_desc: + if (netif_msg_rx_err(card)) + show_rx_chain(card); + dev_kfree_skb_irq(descr->skb); + descr->skb = NULL; + hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; + return 0; +} + +/** + * spider_net_poll - NAPI poll function called by the stack to return packets + * @netdev: interface device structure + * @budget: number of packets we can pass to the stack at most + * + * returns 0 if no more packets available to the driver/stack. Returns 1, + * if the quota is exceeded, but the driver has still packets. + * + * spider_net_poll returns all packets from the rx descriptors to the stack + * (using netif_receive_skb). If all/enough packets are up, the driver + * reenables interrupts and returns 0. If not, 1 is returned. + */ +static int spider_net_poll(struct napi_struct *napi, int budget) +{ + struct spider_net_card *card = container_of(napi, struct spider_net_card, napi); + int packets_done = 0; + + while (packets_done < budget) { + if (!spider_net_decode_one_descr(card)) + break; + + packets_done++; + } + + if ((packets_done == 0) && (card->num_rx_ints != 0)) { + if (!spider_net_resync_tail_ptr(card)) + packets_done = budget; + spider_net_resync_head_ptr(card); + } + card->num_rx_ints = 0; + + spider_net_refill_rx_chain(card); + spider_net_enable_rxdmac(card); + + spider_net_cleanup_tx_ring(card); + + /* if all packets are in the stack, enable interrupts and return 0 */ + /* if not, return 1 */ + if (packets_done < budget) { + napi_complete(napi); + spider_net_rx_irq_on(card); + card->ignore_rx_ramfull = 0; + } + + return packets_done; +} + +/** + * spider_net_change_mtu - changes the MTU of an interface + * @netdev: interface device structure + * @new_mtu: new MTU value + * + * returns 0 on success, <0 on failure + */ +static int +spider_net_change_mtu(struct net_device *netdev, int new_mtu) +{ + /* no need to re-alloc skbs or so -- the max mtu is about 2.3k + * and mtu is outbound only anyway */ + if ( (new_mtu < SPIDER_NET_MIN_MTU ) || + (new_mtu > SPIDER_NET_MAX_MTU) ) + return -EINVAL; + netdev->mtu = new_mtu; + return 0; +} + +/** + * spider_net_set_mac - sets the MAC of an interface + * @netdev: interface device structure + * @ptr: pointer to new MAC address + * + * Returns 0 on success, <0 on failure. Currently, we don't support this + * and will always return EOPNOTSUPP. + */ +static int +spider_net_set_mac(struct net_device *netdev, void *p) +{ + struct spider_net_card *card = netdev_priv(netdev); + u32 macl, macu, regvalue; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN); + + /* switch off GMACTPE and GMACRPE */ + regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD); + regvalue &= ~((1 << 5) | (1 << 6)); + spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue); + + /* write mac */ + macu = (netdev->dev_addr[0]<<24) + (netdev->dev_addr[1]<<16) + + (netdev->dev_addr[2]<<8) + (netdev->dev_addr[3]); + macl = (netdev->dev_addr[4]<<8) + (netdev->dev_addr[5]); + spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu); + spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl); + + /* switch GMACTPE and GMACRPE back on */ + regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD); + regvalue |= ((1 << 5) | (1 << 6)); + spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue); + + spider_net_set_promisc(card); + + return 0; +} + +/** + * spider_net_link_reset + * @netdev: net device structure + * + * This is called when the PHY_LINK signal is asserted. For the blade this is + * not connected so we should never get here. + * + */ +static void +spider_net_link_reset(struct net_device *netdev) +{ + + struct spider_net_card *card = netdev_priv(netdev); + + del_timer_sync(&card->aneg_timer); + + /* clear interrupt, block further interrupts */ + spider_net_write_reg(card, SPIDER_NET_GMACST, + spider_net_read_reg(card, SPIDER_NET_GMACST)); + spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0); + + /* reset phy and setup aneg */ + card->aneg_count = 0; + card->medium = BCM54XX_COPPER; + spider_net_setup_aneg(card); + mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); + +} + +/** + * spider_net_handle_error_irq - handles errors raised by an interrupt + * @card: card structure + * @status_reg: interrupt status register 0 (GHIINT0STS) + * + * spider_net_handle_error_irq treats or ignores all error conditions + * found when an interrupt is presented + */ +static void +spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, + u32 error_reg1, u32 error_reg2) +{ + u32 i; + int show_error = 1; + + /* check GHIINT0STS ************************************/ + if (status_reg) + for (i = 0; i < 32; i++) + if (status_reg & (1<netdev->dev, "PHY write queue full\n"); + show_error = 0; + break; + + /* case SPIDER_NET_GRMDADRINT: not used. print a message */ + /* case SPIDER_NET_GRMARPINT: not used. print a message */ + /* case SPIDER_NET_GRMMPINT: not used. print a message */ + + case SPIDER_NET_GDTDEN0INT: + /* someone has set TX_DMA_EN to 0 */ + show_error = 0; + break; + + case SPIDER_NET_GDDDEN0INT: /* fallthrough */ + case SPIDER_NET_GDCDEN0INT: /* fallthrough */ + case SPIDER_NET_GDBDEN0INT: /* fallthrough */ + case SPIDER_NET_GDADEN0INT: + /* someone has set RX_DMA_EN to 0 */ + show_error = 0; + break; + + /* RX interrupts */ + case SPIDER_NET_GDDFDCINT: + case SPIDER_NET_GDCFDCINT: + case SPIDER_NET_GDBFDCINT: + case SPIDER_NET_GDAFDCINT: + /* case SPIDER_NET_GDNMINT: not used. print a message */ + /* case SPIDER_NET_GCNMINT: not used. print a message */ + /* case SPIDER_NET_GBNMINT: not used. print a message */ + /* case SPIDER_NET_GANMINT: not used. print a message */ + /* case SPIDER_NET_GRFNMINT: not used. print a message */ + show_error = 0; + break; + + /* TX interrupts */ + case SPIDER_NET_GDTFDCINT: + show_error = 0; + break; + case SPIDER_NET_GTTEDINT: + show_error = 0; + break; + case SPIDER_NET_GDTDCEINT: + /* chain end. If a descriptor should be sent, kick off + * tx dma + if (card->tx_chain.tail != card->tx_chain.head) + spider_net_kick_tx_dma(card); + */ + show_error = 0; + break; + + /* case SPIDER_NET_G1TMCNTINT: not used. print a message */ + /* case SPIDER_NET_GFREECNTINT: not used. print a message */ + } + + /* check GHIINT1STS ************************************/ + if (error_reg1) + for (i = 0; i < 32; i++) + if (error_reg1 & (1<ignore_rx_ramfull == 0) { + card->ignore_rx_ramfull = 1; + spider_net_resync_head_ptr(card); + spider_net_refill_rx_chain(card); + spider_net_enable_rxdmac(card); + card->num_rx_ints ++; + napi_schedule(&card->napi); + } + show_error = 0; + break; + + /* case SPIDER_NET_GTMSHTINT: problem, print a message */ + case SPIDER_NET_GDTINVDINT: + /* allrighty. tx from previous descr ok */ + show_error = 0; + break; + + /* chain end */ + case SPIDER_NET_GDDDCEINT: /* fallthrough */ + case SPIDER_NET_GDCDCEINT: /* fallthrough */ + case SPIDER_NET_GDBDCEINT: /* fallthrough */ + case SPIDER_NET_GDADCEINT: + spider_net_resync_head_ptr(card); + spider_net_refill_rx_chain(card); + spider_net_enable_rxdmac(card); + card->num_rx_ints ++; + napi_schedule(&card->napi); + show_error = 0; + break; + + /* invalid descriptor */ + case SPIDER_NET_GDDINVDINT: /* fallthrough */ + case SPIDER_NET_GDCINVDINT: /* fallthrough */ + case SPIDER_NET_GDBINVDINT: /* fallthrough */ + case SPIDER_NET_GDAINVDINT: + /* Could happen when rx chain is full */ + spider_net_resync_head_ptr(card); + spider_net_refill_rx_chain(card); + spider_net_enable_rxdmac(card); + card->num_rx_ints ++; + napi_schedule(&card->napi); + show_error = 0; + break; + + /* case SPIDER_NET_GDTRSERINT: problem, print a message */ + /* case SPIDER_NET_GDDRSERINT: problem, print a message */ + /* case SPIDER_NET_GDCRSERINT: problem, print a message */ + /* case SPIDER_NET_GDBRSERINT: problem, print a message */ + /* case SPIDER_NET_GDARSERINT: problem, print a message */ + /* case SPIDER_NET_GDSERINT: problem, print a message */ + /* case SPIDER_NET_GDTPTERINT: problem, print a message */ + /* case SPIDER_NET_GDDPTERINT: problem, print a message */ + /* case SPIDER_NET_GDCPTERINT: problem, print a message */ + /* case SPIDER_NET_GDBPTERINT: problem, print a message */ + /* case SPIDER_NET_GDAPTERINT: problem, print a message */ + default: + show_error = 1; + break; + } + + /* check GHIINT2STS ************************************/ + if (error_reg2) + for (i = 0; i < 32; i++) + if (error_reg2 & (1<netdev->dev, "Error interrupt, GHIINT0STS = 0x%08x, " + "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n", + status_reg, error_reg1, error_reg2); + + /* clear interrupt sources */ + spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1); + spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2); +} + +/** + * spider_net_interrupt - interrupt handler for spider_net + * @irq: interrupt number + * @ptr: pointer to net_device + * + * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no + * interrupt found raised by card. + * + * This is the interrupt handler, that turns off + * interrupts for this device and makes the stack poll the driver + */ +static irqreturn_t +spider_net_interrupt(int irq, void *ptr) +{ + struct net_device *netdev = ptr; + struct spider_net_card *card = netdev_priv(netdev); + u32 status_reg, error_reg1, error_reg2; + + status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS); + error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS); + error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS); + + if (!(status_reg & SPIDER_NET_INT0_MASK_VALUE) && + !(error_reg1 & SPIDER_NET_INT1_MASK_VALUE) && + !(error_reg2 & SPIDER_NET_INT2_MASK_VALUE)) + return IRQ_NONE; + + if (status_reg & SPIDER_NET_RXINT ) { + spider_net_rx_irq_off(card); + napi_schedule(&card->napi); + card->num_rx_ints ++; + } + if (status_reg & SPIDER_NET_TXINT) + napi_schedule(&card->napi); + + if (status_reg & SPIDER_NET_LINKINT) + spider_net_link_reset(netdev); + + if (status_reg & SPIDER_NET_ERRINT ) + spider_net_handle_error_irq(card, status_reg, + error_reg1, error_reg2); + + /* clear interrupt sources */ + spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * spider_net_poll_controller - artificial interrupt for netconsole etc. + * @netdev: interface device structure + * + * see Documentation/networking/netconsole.txt + */ +static void +spider_net_poll_controller(struct net_device *netdev) +{ + disable_irq(netdev->irq); + spider_net_interrupt(netdev->irq, netdev); + enable_irq(netdev->irq); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +/** + * spider_net_enable_interrupts - enable interrupts + * @card: card structure + * + * spider_net_enable_interrupt enables several interrupts + */ +static void +spider_net_enable_interrupts(struct spider_net_card *card) +{ + spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, + SPIDER_NET_INT0_MASK_VALUE); + spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, + SPIDER_NET_INT1_MASK_VALUE); + spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, + SPIDER_NET_INT2_MASK_VALUE); +} + +/** + * spider_net_disable_interrupts - disable interrupts + * @card: card structure + * + * spider_net_disable_interrupts disables all the interrupts + */ +static void +spider_net_disable_interrupts(struct spider_net_card *card) +{ + spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); + spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0); + spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0); + spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0); +} + +/** + * spider_net_init_card - initializes the card + * @card: card structure + * + * spider_net_init_card initializes the card so that other registers can + * be used + */ +static void +spider_net_init_card(struct spider_net_card *card) +{ + spider_net_write_reg(card, SPIDER_NET_CKRCTRL, + SPIDER_NET_CKRCTRL_STOP_VALUE); + + spider_net_write_reg(card, SPIDER_NET_CKRCTRL, + SPIDER_NET_CKRCTRL_RUN_VALUE); + + /* trigger ETOMOD signal */ + spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, + spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4); + + spider_net_disable_interrupts(card); +} + +/** + * spider_net_enable_card - enables the card by setting all kinds of regs + * @card: card structure + * + * spider_net_enable_card sets a lot of SMMIO registers to enable the device + */ +static void +spider_net_enable_card(struct spider_net_card *card) +{ + int i; + /* the following array consists of (register),(value) pairs + * that are set in this function. A register of 0 ends the list */ + u32 regs[][2] = { + { SPIDER_NET_GRESUMINTNUM, 0 }, + { SPIDER_NET_GREINTNUM, 0 }, + + /* set interrupt frame number registers */ + /* clear the single DMA engine registers first */ + { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE }, + { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE }, + { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE }, + { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE }, + /* then set, what we really need */ + { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE }, + + /* timer counter registers and stuff */ + { SPIDER_NET_GFREECNNUM, 0 }, + { SPIDER_NET_GONETIMENUM, 0 }, + { SPIDER_NET_GTOUTFRMNUM, 0 }, + + /* RX mode setting */ + { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE }, + /* TX mode setting */ + { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE }, + /* IPSEC mode setting */ + { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE }, + + { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE }, + + { SPIDER_NET_GMRWOLCTRL, 0 }, + { SPIDER_NET_GTESTMD, 0x10000000 }, + { SPIDER_NET_GTTQMSK, 0x00400040 }, + + { SPIDER_NET_GMACINTEN, 0 }, + + /* flow control stuff */ + { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE }, + { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE }, + + { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE }, + { 0, 0} + }; + + i = 0; + while (regs[i][0]) { + spider_net_write_reg(card, regs[i][0], regs[i][1]); + i++; + } + + /* clear unicast filter table entries 1 to 14 */ + for (i = 1; i <= 14; i++) { + spider_net_write_reg(card, + SPIDER_NET_GMRUAFILnR + i * 8, + 0x00080000); + spider_net_write_reg(card, + SPIDER_NET_GMRUAFILnR + i * 8 + 4, + 0x00000000); + } + + spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000); + + spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE); + + /* set chain tail address for RX chains and + * enable DMA */ + spider_net_enable_rxchtails(card); + spider_net_enable_rxdmac(card); + + spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE); + + spider_net_write_reg(card, SPIDER_NET_GMACLENLMT, + SPIDER_NET_LENLMT_VALUE); + spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, + SPIDER_NET_OPMODE_VALUE); + + spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, + SPIDER_NET_GDTBSTA); +} + +/** + * spider_net_download_firmware - loads firmware into the adapter + * @card: card structure + * @firmware_ptr: pointer to firmware data + * + * spider_net_download_firmware loads the firmware data into the + * adapter. It assumes the length etc. to be allright. + */ +static int +spider_net_download_firmware(struct spider_net_card *card, + const void *firmware_ptr) +{ + int sequencer, i; + const u32 *fw_ptr = firmware_ptr; + + /* stop sequencers */ + spider_net_write_reg(card, SPIDER_NET_GSINIT, + SPIDER_NET_STOP_SEQ_VALUE); + + for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS; + sequencer++) { + spider_net_write_reg(card, + SPIDER_NET_GSnPRGADR + sequencer * 8, 0); + for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) { + spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + + sequencer * 8, *fw_ptr); + fw_ptr++; + } + } + + if (spider_net_read_reg(card, SPIDER_NET_GSINIT)) + return -EIO; + + spider_net_write_reg(card, SPIDER_NET_GSINIT, + SPIDER_NET_RUN_SEQ_VALUE); + + return 0; +} + +/** + * spider_net_init_firmware - reads in firmware parts + * @card: card structure + * + * Returns 0 on success, <0 on failure + * + * spider_net_init_firmware opens the sequencer firmware and does some basic + * checks. This function opens and releases the firmware structure. A call + * to download the firmware is performed before the release. + * + * Firmware format + * =============== + * DEBLOBBED.bin is expected to be a file containing 6*1024*4 bytes, 4k being + * the program for each sequencer. Use the command + * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \ + * Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \ + * Seq_code6_0x0B0.txt | xxd -r -p -c4 > DEBLOBBED.bin + * + * to generate DEBLOBBED.bin, if you have sequencer programs with something + * like the following contents for each sequencer: + * + * + * + * ... + * <1024th 4-BYTES-WORD FOR SEQUENCER> + */ +static int +spider_net_init_firmware(struct spider_net_card *card) +{ + struct firmware *firmware = NULL; + struct device_node *dn; + const u8 *fw_prop = NULL; + int err = -ENOENT; + int fw_size; + + if (reject_firmware((const struct firmware **)&firmware, + SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) { + if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) && + netif_msg_probe(card) ) { + dev_err(&card->netdev->dev, + "Incorrect size of spidernet firmware in " \ + "filesystem. Looking in host firmware...\n"); + goto try_host_fw; + } + err = spider_net_download_firmware(card, firmware->data); + + release_firmware(firmware); + if (err) + goto try_host_fw; + + goto done; + } + +try_host_fw: + dn = pci_device_to_OF_node(card->pdev); + if (!dn) + goto out_err; + + fw_prop = of_get_property(dn, "firmware", &fw_size); + if (!fw_prop) + goto out_err; + + if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) && + netif_msg_probe(card) ) { + dev_err(&card->netdev->dev, + "Incorrect size of spidernet firmware in host firmware\n"); + goto done; + } + + err = spider_net_download_firmware(card, fw_prop); + +done: + return err; +out_err: + if (netif_msg_probe(card)) + dev_err(&card->netdev->dev, + "Couldn't find spidernet firmware in filesystem " \ + "or host firmware\n"); + return err; +} + +/** + * spider_net_open - called upon ifonfig up + * @netdev: interface device structure + * + * returns 0 on success, <0 on failure + * + * spider_net_open allocates all the descriptors and memory needed for + * operation, sets up multicast list and enables interrupts + */ +int +spider_net_open(struct net_device *netdev) +{ + struct spider_net_card *card = netdev_priv(netdev); + int result; + + result = spider_net_init_firmware(card); + if (result) + goto init_firmware_failed; + + /* start probing with copper */ + card->aneg_count = 0; + card->medium = BCM54XX_COPPER; + spider_net_setup_aneg(card); + if (card->phy.def->phy_id) + mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); + + result = spider_net_init_chain(card, &card->tx_chain); + if (result) + goto alloc_tx_failed; + card->low_watermark = NULL; + + result = spider_net_init_chain(card, &card->rx_chain); + if (result) + goto alloc_rx_failed; + + /* Allocate rx skbs */ + result = spider_net_alloc_rx_skbs(card); + if (result) + goto alloc_skbs_failed; + + spider_net_set_multi(netdev); + + /* further enhancement: setup hw vlan, if needed */ + + result = -EBUSY; + if (request_irq(netdev->irq, spider_net_interrupt, + IRQF_SHARED, netdev->name, netdev)) + goto register_int_failed; + + spider_net_enable_card(card); + + netif_start_queue(netdev); + netif_carrier_on(netdev); + napi_enable(&card->napi); + + spider_net_enable_interrupts(card); + + return 0; + +register_int_failed: + spider_net_free_rx_chain_contents(card); +alloc_skbs_failed: + spider_net_free_chain(card, &card->rx_chain); +alloc_rx_failed: + spider_net_free_chain(card, &card->tx_chain); +alloc_tx_failed: + del_timer_sync(&card->aneg_timer); +init_firmware_failed: + return result; +} + +/** + * spider_net_link_phy + * @data: used for pointer to card structure + * + */ +static void spider_net_link_phy(unsigned long data) +{ + struct spider_net_card *card = (struct spider_net_card *)data; + struct mii_phy *phy = &card->phy; + + /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */ + if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) { + + pr_debug("%s: link is down trying to bring it up\n", + card->netdev->name); + + switch (card->medium) { + case BCM54XX_COPPER: + /* enable fiber with autonegotiation first */ + if (phy->def->ops->enable_fiber) + phy->def->ops->enable_fiber(phy, 1); + card->medium = BCM54XX_FIBER; + break; + + case BCM54XX_FIBER: + /* fiber didn't come up, try to disable fiber autoneg */ + if (phy->def->ops->enable_fiber) + phy->def->ops->enable_fiber(phy, 0); + card->medium = BCM54XX_UNKNOWN; + break; + + case BCM54XX_UNKNOWN: + /* copper, fiber with and without failed, + * retry from beginning */ + spider_net_setup_aneg(card); + card->medium = BCM54XX_COPPER; + break; + } + + card->aneg_count = 0; + mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); + return; + } + + /* link still not up, try again later */ + if (!(phy->def->ops->poll_link(phy))) { + card->aneg_count++; + mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); + return; + } + + /* link came up, get abilities */ + phy->def->ops->read_link(phy); + + spider_net_write_reg(card, SPIDER_NET_GMACST, + spider_net_read_reg(card, SPIDER_NET_GMACST)); + spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4); + + if (phy->speed == 1000) + spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001); + else + spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0); + + card->aneg_count = 0; + + pr_info("%s: link up, %i Mbps, %s-duplex %sautoneg.\n", + card->netdev->name, phy->speed, + phy->duplex == 1 ? "Full" : "Half", + phy->autoneg == 1 ? "" : "no "); +} + +/** + * spider_net_setup_phy - setup PHY + * @card: card structure + * + * returns 0 on success, <0 on failure + * + * spider_net_setup_phy is used as part of spider_net_probe. + **/ +static int +spider_net_setup_phy(struct spider_net_card *card) +{ + struct mii_phy *phy = &card->phy; + + spider_net_write_reg(card, SPIDER_NET_GDTDMASEL, + SPIDER_NET_DMASEL_VALUE); + spider_net_write_reg(card, SPIDER_NET_GPCCTRL, + SPIDER_NET_PHY_CTRL_VALUE); + + phy->dev = card->netdev; + phy->mdio_read = spider_net_read_phy; + phy->mdio_write = spider_net_write_phy; + + for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) { + unsigned short id; + id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR); + if (id != 0x0000 && id != 0xffff) { + if (!sungem_phy_probe(phy, phy->mii_id)) { + pr_info("Found %s.\n", phy->def->name); + break; + } + } + } + + return 0; +} + +/** + * spider_net_workaround_rxramfull - work around firmware bug + * @card: card structure + * + * no return value + **/ +static void +spider_net_workaround_rxramfull(struct spider_net_card *card) +{ + int i, sequencer = 0; + + /* cancel reset */ + spider_net_write_reg(card, SPIDER_NET_CKRCTRL, + SPIDER_NET_CKRCTRL_RUN_VALUE); + + /* empty sequencer data */ + for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS; + sequencer++) { + spider_net_write_reg(card, SPIDER_NET_GSnPRGADR + + sequencer * 8, 0x0); + for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) { + spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + + sequencer * 8, 0x0); + } + } + + /* set sequencer operation */ + spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe); + + /* reset */ + spider_net_write_reg(card, SPIDER_NET_CKRCTRL, + SPIDER_NET_CKRCTRL_STOP_VALUE); +} + +/** + * spider_net_stop - called upon ifconfig down + * @netdev: interface device structure + * + * always returns 0 + */ +int +spider_net_stop(struct net_device *netdev) +{ + struct spider_net_card *card = netdev_priv(netdev); + + napi_disable(&card->napi); + netif_carrier_off(netdev); + netif_stop_queue(netdev); + del_timer_sync(&card->tx_timer); + del_timer_sync(&card->aneg_timer); + + spider_net_disable_interrupts(card); + + free_irq(netdev->irq, netdev); + + spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, + SPIDER_NET_DMA_TX_FEND_VALUE); + + /* turn off DMA, force end */ + spider_net_disable_rxdmac(card); + + /* release chains */ + spider_net_release_tx_chain(card, 1); + spider_net_free_rx_chain_contents(card); + + spider_net_free_chain(card, &card->tx_chain); + spider_net_free_chain(card, &card->rx_chain); + + return 0; +} + +/** + * spider_net_tx_timeout_task - task scheduled by the watchdog timeout + * function (to be called not under interrupt status) + * @data: data, is interface device structure + * + * called as task when tx hangs, resets interface (if interface is up) + */ +static void +spider_net_tx_timeout_task(struct work_struct *work) +{ + struct spider_net_card *card = + container_of(work, struct spider_net_card, tx_timeout_task); + struct net_device *netdev = card->netdev; + + if (!(netdev->flags & IFF_UP)) + goto out; + + netif_device_detach(netdev); + spider_net_stop(netdev); + + spider_net_workaround_rxramfull(card); + spider_net_init_card(card); + + if (spider_net_setup_phy(card)) + goto out; + + spider_net_open(netdev); + spider_net_kick_tx_dma(card); + netif_device_attach(netdev); + +out: + atomic_dec(&card->tx_timeout_task_counter); +} + +/** + * spider_net_tx_timeout - called when the tx timeout watchdog kicks in. + * @netdev: interface device structure + * + * called, if tx hangs. Schedules a task that resets the interface + */ +static void +spider_net_tx_timeout(struct net_device *netdev) +{ + struct spider_net_card *card; + + card = netdev_priv(netdev); + atomic_inc(&card->tx_timeout_task_counter); + if (netdev->flags & IFF_UP) + schedule_work(&card->tx_timeout_task); + else + atomic_dec(&card->tx_timeout_task_counter); + card->spider_stats.tx_timeouts++; +} + +static const struct net_device_ops spider_net_ops = { + .ndo_open = spider_net_open, + .ndo_stop = spider_net_stop, + .ndo_start_xmit = spider_net_xmit, + .ndo_set_rx_mode = spider_net_set_multi, + .ndo_set_mac_address = spider_net_set_mac, + .ndo_change_mtu = spider_net_change_mtu, + .ndo_do_ioctl = spider_net_do_ioctl, + .ndo_tx_timeout = spider_net_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + /* HW VLAN */ +#ifdef CONFIG_NET_POLL_CONTROLLER + /* poll controller */ + .ndo_poll_controller = spider_net_poll_controller, +#endif /* CONFIG_NET_POLL_CONTROLLER */ +}; + +/** + * spider_net_setup_netdev_ops - initialization of net_device operations + * @netdev: net_device structure + * + * fills out function pointers in the net_device structure + */ +static void +spider_net_setup_netdev_ops(struct net_device *netdev) +{ + netdev->netdev_ops = &spider_net_ops; + netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT; + /* ethtool ops */ + netdev->ethtool_ops = &spider_net_ethtool_ops; +} + +/** + * spider_net_setup_netdev - initialization of net_device + * @card: card structure + * + * Returns 0 on success or <0 on failure + * + * spider_net_setup_netdev initializes the net_device structure + **/ +static int +spider_net_setup_netdev(struct spider_net_card *card) +{ + int result; + struct net_device *netdev = card->netdev; + struct device_node *dn; + struct sockaddr addr; + const u8 *mac; + + SET_NETDEV_DEV(netdev, &card->pdev->dev); + + pci_set_drvdata(card->pdev, netdev); + + init_timer(&card->tx_timer); + card->tx_timer.function = + (void (*)(unsigned long)) spider_net_cleanup_tx_ring; + card->tx_timer.data = (unsigned long) card; + netdev->irq = card->pdev->irq; + + card->aneg_count = 0; + init_timer(&card->aneg_timer); + card->aneg_timer.function = spider_net_link_phy; + card->aneg_timer.data = (unsigned long) card; + + netif_napi_add(netdev, &card->napi, + spider_net_poll, SPIDER_NET_NAPI_WEIGHT); + + spider_net_setup_netdev_ops(netdev); + + netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM; + if (SPIDER_NET_RX_CSUM_DEFAULT) + netdev->features |= NETIF_F_RXCSUM; + netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX; + /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + * NETIF_F_HW_VLAN_CTAG_FILTER */ + + netdev->irq = card->pdev->irq; + card->num_rx_ints = 0; + card->ignore_rx_ramfull = 0; + + dn = pci_device_to_OF_node(card->pdev); + if (!dn) + return -EIO; + + mac = of_get_property(dn, "local-mac-address", NULL); + if (!mac) + return -EIO; + memcpy(addr.sa_data, mac, ETH_ALEN); + + result = spider_net_set_mac(netdev, &addr); + if ((result) && (netif_msg_probe(card))) + dev_err(&card->netdev->dev, + "Failed to set MAC address: %i\n", result); + + result = register_netdev(netdev); + if (result) { + if (netif_msg_probe(card)) + dev_err(&card->netdev->dev, + "Couldn't register net_device: %i\n", result); + return result; + } + + if (netif_msg_probe(card)) + pr_info("Initialized device %s.\n", netdev->name); + + return 0; +} + +/** + * spider_net_alloc_card - allocates net_device and card structure + * + * returns the card structure or NULL in case of errors + * + * the card and net_device structures are linked to each other + */ +static struct spider_net_card * +spider_net_alloc_card(void) +{ + struct net_device *netdev; + struct spider_net_card *card; + size_t alloc_size; + + alloc_size = sizeof(struct spider_net_card) + + (tx_descriptors + rx_descriptors) * sizeof(struct spider_net_descr); + netdev = alloc_etherdev(alloc_size); + if (!netdev) + return NULL; + + card = netdev_priv(netdev); + card->netdev = netdev; + card->msg_enable = SPIDER_NET_DEFAULT_MSG; + INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task); + init_waitqueue_head(&card->waitq); + atomic_set(&card->tx_timeout_task_counter, 0); + + card->rx_chain.num_desc = rx_descriptors; + card->rx_chain.ring = card->darray; + card->tx_chain.num_desc = tx_descriptors; + card->tx_chain.ring = card->darray + rx_descriptors; + + return card; +} + +/** + * spider_net_undo_pci_setup - releases PCI ressources + * @card: card structure + * + * spider_net_undo_pci_setup releases the mapped regions + */ +static void +spider_net_undo_pci_setup(struct spider_net_card *card) +{ + iounmap(card->regs); + pci_release_regions(card->pdev); +} + +/** + * spider_net_setup_pci_dev - sets up the device in terms of PCI operations + * @pdev: PCI device + * + * Returns the card structure or NULL if any errors occur + * + * spider_net_setup_pci_dev initializes pdev and together with the + * functions called in spider_net_open configures the device so that + * data can be transferred over it + * The net_device structure is attached to the card structure, if the + * function returns without error. + **/ +static struct spider_net_card * +spider_net_setup_pci_dev(struct pci_dev *pdev) +{ + struct spider_net_card *card; + unsigned long mmio_start, mmio_len; + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, "Couldn't enable PCI device\n"); + return NULL; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, + "Couldn't find proper PCI device base address.\n"); + goto out_disable_dev; + } + + if (pci_request_regions(pdev, spider_net_driver_name)) { + dev_err(&pdev->dev, + "Couldn't obtain PCI resources, aborting.\n"); + goto out_disable_dev; + } + + pci_set_master(pdev); + + card = spider_net_alloc_card(); + if (!card) { + dev_err(&pdev->dev, + "Couldn't allocate net_device structure, aborting.\n"); + goto out_release_regions; + } + card->pdev = pdev; + + /* fetch base address and length of first resource */ + mmio_start = pci_resource_start(pdev, 0); + mmio_len = pci_resource_len(pdev, 0); + + card->netdev->mem_start = mmio_start; + card->netdev->mem_end = mmio_start + mmio_len; + card->regs = ioremap(mmio_start, mmio_len); + + if (!card->regs) { + dev_err(&pdev->dev, + "Couldn't obtain PCI resources, aborting.\n"); + goto out_release_regions; + } + + return card; + +out_release_regions: + pci_release_regions(pdev); +out_disable_dev: + pci_disable_device(pdev); + return NULL; +} + +/** + * spider_net_probe - initialization of a device + * @pdev: PCI device + * @ent: entry in the device id list + * + * Returns 0 on success, <0 on failure + * + * spider_net_probe initializes pdev and registers a net_device + * structure for it. After that, the device can be ifconfig'ed up + **/ +static int +spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err = -EIO; + struct spider_net_card *card; + + card = spider_net_setup_pci_dev(pdev); + if (!card) + goto out; + + spider_net_workaround_rxramfull(card); + spider_net_init_card(card); + + err = spider_net_setup_phy(card); + if (err) + goto out_undo_pci; + + err = spider_net_setup_netdev(card); + if (err) + goto out_undo_pci; + + return 0; + +out_undo_pci: + spider_net_undo_pci_setup(card); + free_netdev(card->netdev); +out: + return err; +} + +/** + * spider_net_remove - removal of a device + * @pdev: PCI device + * + * Returns 0 on success, <0 on failure + * + * spider_net_remove is called to remove the device and unregisters the + * net_device + **/ +static void +spider_net_remove(struct pci_dev *pdev) +{ + struct net_device *netdev; + struct spider_net_card *card; + + netdev = pci_get_drvdata(pdev); + card = netdev_priv(netdev); + + wait_event(card->waitq, + atomic_read(&card->tx_timeout_task_counter) == 0); + + unregister_netdev(netdev); + + /* switch off card */ + spider_net_write_reg(card, SPIDER_NET_CKRCTRL, + SPIDER_NET_CKRCTRL_STOP_VALUE); + spider_net_write_reg(card, SPIDER_NET_CKRCTRL, + SPIDER_NET_CKRCTRL_RUN_VALUE); + + spider_net_undo_pci_setup(card); + free_netdev(netdev); +} + +static struct pci_driver spider_net_driver = { + .name = spider_net_driver_name, + .id_table = spider_net_pci_tbl, + .probe = spider_net_probe, + .remove = spider_net_remove +}; + +/** + * spider_net_init - init function when the driver is loaded + * + * spider_net_init registers the device driver + */ +static int __init spider_net_init(void) +{ + printk(KERN_INFO "Spidernet version %s.\n", VERSION); + + if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) { + rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN; + pr_info("adjusting rx descriptors to %i.\n", rx_descriptors); + } + if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) { + rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX; + pr_info("adjusting rx descriptors to %i.\n", rx_descriptors); + } + if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) { + tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN; + pr_info("adjusting tx descriptors to %i.\n", tx_descriptors); + } + if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) { + tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX; + pr_info("adjusting tx descriptors to %i.\n", tx_descriptors); + } + + return pci_register_driver(&spider_net_driver); +} + +/** + * spider_net_cleanup - exit function when driver is unloaded + * + * spider_net_cleanup unregisters the device driver + */ +static void __exit spider_net_cleanup(void) +{ + pci_unregister_driver(&spider_net_driver); +} + +module_init(spider_net_init); +module_exit(spider_net_cleanup); diff --git a/drivers/net/ethernet/toshiba/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h new file mode 100644 index 000000000..9e141d8d7 --- /dev/null +++ b/drivers/net/ethernet/toshiba/spider_net.h @@ -0,0 +1,489 @@ +/* + * Network device driver for Cell Processor-Based Blade and Celleb platform + * + * (C) Copyright IBM Corp. 2005 + * (C) Copyright 2006 TOSHIBA CORPORATION + * + * Authors : Utz Bacher + * Jens Osterkamp + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _SPIDER_NET_H +#define _SPIDER_NET_H + +#define VERSION "2.0 B" + +#include + +int spider_net_stop(struct net_device *netdev); +int spider_net_open(struct net_device *netdev); + +extern const struct ethtool_ops spider_net_ethtool_ops; + +extern char spider_net_driver_name[]; + +#define SPIDER_NET_MAX_FRAME 2312 +#define SPIDER_NET_MAX_MTU 2294 +#define SPIDER_NET_MIN_MTU 64 + +#define SPIDER_NET_RXBUF_ALIGN 128 + +#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 256 +#define SPIDER_NET_RX_DESCRIPTORS_MIN 16 +#define SPIDER_NET_RX_DESCRIPTORS_MAX 512 + +#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 256 +#define SPIDER_NET_TX_DESCRIPTORS_MIN 16 +#define SPIDER_NET_TX_DESCRIPTORS_MAX 512 + +#define SPIDER_NET_TX_TIMER (HZ/5) +#define SPIDER_NET_ANEG_TIMER (HZ) +#define SPIDER_NET_ANEG_TIMEOUT 5 + +#define SPIDER_NET_RX_CSUM_DEFAULT 1 + +#define SPIDER_NET_WATCHDOG_TIMEOUT 50*HZ +#define SPIDER_NET_NAPI_WEIGHT 64 + +#define SPIDER_NET_FIRMWARE_SEQS 6 +#define SPIDER_NET_FIRMWARE_SEQWORDS 1024 +#define SPIDER_NET_FIRMWARE_LEN (SPIDER_NET_FIRMWARE_SEQS * \ + SPIDER_NET_FIRMWARE_SEQWORDS * \ + sizeof(u32)) +#define SPIDER_NET_FIRMWARE_NAME "/*(DEBLOBBED)*/" + +/** spider_net SMMIO registers */ +#define SPIDER_NET_GHIINT0STS 0x00000000 +#define SPIDER_NET_GHIINT1STS 0x00000004 +#define SPIDER_NET_GHIINT2STS 0x00000008 +#define SPIDER_NET_GHIINT0MSK 0x00000010 +#define SPIDER_NET_GHIINT1MSK 0x00000014 +#define SPIDER_NET_GHIINT2MSK 0x00000018 + +#define SPIDER_NET_GRESUMINTNUM 0x00000020 +#define SPIDER_NET_GREINTNUM 0x00000024 + +#define SPIDER_NET_GFFRMNUM 0x00000028 +#define SPIDER_NET_GFAFRMNUM 0x0000002c +#define SPIDER_NET_GFBFRMNUM 0x00000030 +#define SPIDER_NET_GFCFRMNUM 0x00000034 +#define SPIDER_NET_GFDFRMNUM 0x00000038 + +/* clear them (don't use it) */ +#define SPIDER_NET_GFREECNNUM 0x0000003c +#define SPIDER_NET_GONETIMENUM 0x00000040 + +#define SPIDER_NET_GTOUTFRMNUM 0x00000044 + +#define SPIDER_NET_GTXMDSET 0x00000050 +#define SPIDER_NET_GPCCTRL 0x00000054 +#define SPIDER_NET_GRXMDSET 0x00000058 +#define SPIDER_NET_GIPSECINIT 0x0000005c +#define SPIDER_NET_GFTRESTRT 0x00000060 +#define SPIDER_NET_GRXDMAEN 0x00000064 +#define SPIDER_NET_GMRWOLCTRL 0x00000068 +#define SPIDER_NET_GPCWOPCMD 0x0000006c +#define SPIDER_NET_GPCROPCMD 0x00000070 +#define SPIDER_NET_GTTFRMCNT 0x00000078 +#define SPIDER_NET_GTESTMD 0x0000007c + +#define SPIDER_NET_GSINIT 0x00000080 +#define SPIDER_NET_GSnPRGADR 0x00000084 +#define SPIDER_NET_GSnPRGDAT 0x00000088 + +#define SPIDER_NET_GMACOPEMD 0x00000100 +#define SPIDER_NET_GMACLENLMT 0x00000108 +#define SPIDER_NET_GMACST 0x00000110 +#define SPIDER_NET_GMACINTEN 0x00000118 +#define SPIDER_NET_GMACPHYCTRL 0x00000120 + +#define SPIDER_NET_GMACAPAUSE 0x00000154 +#define SPIDER_NET_GMACTXPAUSE 0x00000164 + +#define SPIDER_NET_GMACMODE 0x000001b0 +#define SPIDER_NET_GMACBSTLMT 0x000001b4 + +#define SPIDER_NET_GMACUNIMACU 0x000001c0 +#define SPIDER_NET_GMACUNIMACL 0x000001c8 + +#define SPIDER_NET_GMRMHFILnR 0x00000400 +#define SPIDER_NET_MULTICAST_HASHES 256 + +#define SPIDER_NET_GMRUAFILnR 0x00000500 +#define SPIDER_NET_GMRUA0FIL15R 0x00000578 + +#define SPIDER_NET_GTTQMSK 0x00000934 + +/* RX DMA controller registers, all 0x00000a.. are for DMA controller A, + * 0x00000b.. for DMA controller B, etc. */ +#define SPIDER_NET_GDADCHA 0x00000a00 +#define SPIDER_NET_GDADMACCNTR 0x00000a04 +#define SPIDER_NET_GDACTDPA 0x00000a08 +#define SPIDER_NET_GDACTDCNT 0x00000a0c +#define SPIDER_NET_GDACDBADDR 0x00000a20 +#define SPIDER_NET_GDACDBSIZE 0x00000a24 +#define SPIDER_NET_GDACNEXTDA 0x00000a28 +#define SPIDER_NET_GDACCOMST 0x00000a2c +#define SPIDER_NET_GDAWBCOMST 0x00000a30 +#define SPIDER_NET_GDAWBRSIZE 0x00000a34 +#define SPIDER_NET_GDAWBVSIZE 0x00000a38 +#define SPIDER_NET_GDAWBTRST 0x00000a3c +#define SPIDER_NET_GDAWBTRERR 0x00000a40 + +/* TX DMA controller registers */ +#define SPIDER_NET_GDTDCHA 0x00000e00 +#define SPIDER_NET_GDTDMACCNTR 0x00000e04 +#define SPIDER_NET_GDTCDPA 0x00000e08 +#define SPIDER_NET_GDTDMASEL 0x00000e14 + +#define SPIDER_NET_ECMODE 0x00000f00 +/* clock and reset control register */ +#define SPIDER_NET_CKRCTRL 0x00000ff0 + +/** SCONFIG registers */ +#define SPIDER_NET_SCONFIG_IOACTE 0x00002810 + +/** interrupt mask registers */ +#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7 +#define SPIDER_NET_INT1_MASK_VALUE 0x0000fff2 +#define SPIDER_NET_INT2_MASK_VALUE 0x000003f1 + +/* we rely on flagged descriptor interrupts */ +#define SPIDER_NET_FRAMENUM_VALUE 0x00000000 +/* set this first, then the FRAMENUM_VALUE */ +#define SPIDER_NET_GFXFRAMES_VALUE 0x00000000 + +#define SPIDER_NET_STOP_SEQ_VALUE 0x00000000 +#define SPIDER_NET_RUN_SEQ_VALUE 0x0000007e + +#define SPIDER_NET_PHY_CTRL_VALUE 0x00040040 +/* #define SPIDER_NET_PHY_CTRL_VALUE 0x01070080*/ +#define SPIDER_NET_RXMODE_VALUE 0x00000011 +/* auto retransmission in case of MAC aborts */ +#define SPIDER_NET_TXMODE_VALUE 0x00010000 +#define SPIDER_NET_RESTART_VALUE 0x00000000 +#define SPIDER_NET_WOL_VALUE 0x00001111 +#if 0 +#define SPIDER_NET_WOL_VALUE 0x00000000 +#endif +#define SPIDER_NET_IPSECINIT_VALUE 0x6f716f71 + +/* pause frames: automatic, no upper retransmission count */ +/* outside loopback mode: ETOMOD signal dont matter, not connected */ +/* ETOMOD signal is brought to PHY reset. bit 2 must be 1 in Celleb */ +#define SPIDER_NET_OPMODE_VALUE 0x00000067 +/*#define SPIDER_NET_OPMODE_VALUE 0x001b0062*/ +#define SPIDER_NET_LENLMT_VALUE 0x00000908 + +#define SPIDER_NET_MACAPAUSE_VALUE 0x00000800 /* about 1 ms */ +#define SPIDER_NET_TXPAUSE_VALUE 0x00000000 + +#define SPIDER_NET_MACMODE_VALUE 0x00000001 +#define SPIDER_NET_BURSTLMT_VALUE 0x00000200 /* about 16 us */ + +/* DMAC control register GDMACCNTR + * + * 1(0) enable r/tx dma + * 0000000 fixed to 0 + * + * 000000 fixed to 0 + * 0(1) en/disable descr writeback on force end + * 0(1) force end + * + * 000000 fixed to 0 + * 00 burst alignment: 128 bytes + * 11 burst alignment: 1024 bytes + * + * 00000 fixed to 0 + * 0 descr writeback size 32 bytes + * 0(1) descr chain end interrupt enable + * 0(1) descr status writeback enable */ + +/* to set RX_DMA_EN */ +#define SPIDER_NET_DMA_RX_VALUE 0x80000000 +#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003 +/* to set TX_DMA_EN */ +#define SPIDER_NET_TX_DMA_EN 0x80000000 +#define SPIDER_NET_GDTBSTA 0x00000300 +#define SPIDER_NET_GDTDCEIDIS 0x00000002 +#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \ + SPIDER_NET_GDTDCEIDIS | \ + SPIDER_NET_GDTBSTA + +#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003 + +/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */ +#define SPIDER_NET_UA_DESCR_VALUE 0x00080000 +#define SPIDER_NET_PROMISC_VALUE 0x00080000 +#define SPIDER_NET_NONPROMISC_VALUE 0x00000000 + +#define SPIDER_NET_DMASEL_VALUE 0x00000001 + +#define SPIDER_NET_ECMODE_VALUE 0x00000000 + +#define SPIDER_NET_CKRCTRL_RUN_VALUE 0x1fff010f +#define SPIDER_NET_CKRCTRL_STOP_VALUE 0x0000010f + +#define SPIDER_NET_SBIMSTATE_VALUE 0x00000000 +#define SPIDER_NET_SBTMSTATE_VALUE 0x00000000 + +/* SPIDER_NET_GHIINT0STS bits, in reverse order so that they can be used + * with 1 << SPIDER_NET_... */ +enum spider_net_int0_status { + SPIDER_NET_GPHYINT = 0, + SPIDER_NET_GMAC2INT, + SPIDER_NET_GMAC1INT, + SPIDER_NET_GIPSINT, + SPIDER_NET_GFIFOINT, + SPIDER_NET_GDMACINT, + SPIDER_NET_GSYSINT, + SPIDER_NET_GPWOPCMPINT, + SPIDER_NET_GPROPCMPINT, + SPIDER_NET_GPWFFINT, + SPIDER_NET_GRMDADRINT, + SPIDER_NET_GRMARPINT, + SPIDER_NET_GRMMPINT, + SPIDER_NET_GDTDEN0INT, + SPIDER_NET_GDDDEN0INT, + SPIDER_NET_GDCDEN0INT, + SPIDER_NET_GDBDEN0INT, + SPIDER_NET_GDADEN0INT, + SPIDER_NET_GDTFDCINT, + SPIDER_NET_GDDFDCINT, + SPIDER_NET_GDCFDCINT, + SPIDER_NET_GDBFDCINT, + SPIDER_NET_GDAFDCINT, + SPIDER_NET_GTTEDINT, + SPIDER_NET_GDTDCEINT, + SPIDER_NET_GRFDNMINT, + SPIDER_NET_GRFCNMINT, + SPIDER_NET_GRFBNMINT, + SPIDER_NET_GRFANMINT, + SPIDER_NET_GRFNMINT, + SPIDER_NET_G1TMCNTINT, + SPIDER_NET_GFREECNTINT +}; +/* GHIINT1STS bits */ +enum spider_net_int1_status { + SPIDER_NET_GTMFLLINT = 0, + SPIDER_NET_GRMFLLINT, + SPIDER_NET_GTMSHTINT, + SPIDER_NET_GDTINVDINT, + SPIDER_NET_GRFDFLLINT, + SPIDER_NET_GDDDCEINT, + SPIDER_NET_GDDINVDINT, + SPIDER_NET_GRFCFLLINT, + SPIDER_NET_GDCDCEINT, + SPIDER_NET_GDCINVDINT, + SPIDER_NET_GRFBFLLINT, + SPIDER_NET_GDBDCEINT, + SPIDER_NET_GDBINVDINT, + SPIDER_NET_GRFAFLLINT, + SPIDER_NET_GDADCEINT, + SPIDER_NET_GDAINVDINT, + SPIDER_NET_GDTRSERINT, + SPIDER_NET_GDDRSERINT, + SPIDER_NET_GDCRSERINT, + SPIDER_NET_GDBRSERINT, + SPIDER_NET_GDARSERINT, + SPIDER_NET_GDSERINT, + SPIDER_NET_GDTPTERINT, + SPIDER_NET_GDDPTERINT, + SPIDER_NET_GDCPTERINT, + SPIDER_NET_GDBPTERINT, + SPIDER_NET_GDAPTERINT +}; +/* GHIINT2STS bits */ +enum spider_net_int2_status { + SPIDER_NET_GPROPERINT = 0, + SPIDER_NET_GMCTCRSNGINT, + SPIDER_NET_GMCTLCOLINT, + SPIDER_NET_GMCTTMOTINT, + SPIDER_NET_GMCRCAERINT, + SPIDER_NET_GMCRCALERINT, + SPIDER_NET_GMCRALNERINT, + SPIDER_NET_GMCROVRINT, + SPIDER_NET_GMCRRNTINT, + SPIDER_NET_GMCRRXERINT, + SPIDER_NET_GTITCSERINT, + SPIDER_NET_GTIFMTERINT, + SPIDER_NET_GTIPKTRVKINT, + SPIDER_NET_GTISPINGINT, + SPIDER_NET_GTISADNGINT, + SPIDER_NET_GTISPDNGINT, + SPIDER_NET_GRIFMTERINT, + SPIDER_NET_GRIPKTRVKINT, + SPIDER_NET_GRISPINGINT, + SPIDER_NET_GRISADNGINT, + SPIDER_NET_GRISPDNGINT +}; + +#define SPIDER_NET_TXINT (1 << SPIDER_NET_GDTFDCINT) + +/* We rely on flagged descriptor interrupts */ +#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) ) + +#define SPIDER_NET_LINKINT ( 1 << SPIDER_NET_GMAC2INT ) + +#define SPIDER_NET_ERRINT ( 0xffffffff & \ + (~SPIDER_NET_TXINT) & \ + (~SPIDER_NET_RXINT) & \ + (~SPIDER_NET_LINKINT) ) + +#define SPIDER_NET_GPREXEC 0x80000000 +#define SPIDER_NET_GPRDAT_MASK 0x0000ffff + +#define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000 +#define SPIDER_NET_DMAC_TXFRMTL 0x00040000 +#define SPIDER_NET_DMAC_TCP 0x00020000 +#define SPIDER_NET_DMAC_UDP 0x00030000 +#define SPIDER_NET_TXDCEST 0x08000000 + +#define SPIDER_NET_DESCR_RXFDIS 0x00000001 +#define SPIDER_NET_DESCR_RXDCEIS 0x00000002 +#define SPIDER_NET_DESCR_RXDEN0IS 0x00000004 +#define SPIDER_NET_DESCR_RXINVDIS 0x00000008 +#define SPIDER_NET_DESCR_RXRERRIS 0x00000010 +#define SPIDER_NET_DESCR_RXFDCIMS 0x00000100 +#define SPIDER_NET_DESCR_RXDCEIMS 0x00000200 +#define SPIDER_NET_DESCR_RXDEN0IMS 0x00000400 +#define SPIDER_NET_DESCR_RXINVDIMS 0x00000800 +#define SPIDER_NET_DESCR_RXRERRMIS 0x00001000 +#define SPIDER_NET_DESCR_UNUSED 0x077fe0e0 + +#define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000 +#define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */ +#define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */ +#define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */ +#define SPIDER_NET_DESCR_FRAME_END 0x40000000 /* used in rx */ +#define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */ +#define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */ +#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000 +#define SPIDER_NET_DESCR_TXDESFLG 0x00800000 + +#define SPIDER_NET_DESCR_BAD_STATUS (SPIDER_NET_DESCR_RXDEN0IS | \ + SPIDER_NET_DESCR_RXRERRIS | \ + SPIDER_NET_DESCR_RXDEN0IMS | \ + SPIDER_NET_DESCR_RXINVDIMS | \ + SPIDER_NET_DESCR_RXRERRMIS | \ + SPIDER_NET_DESCR_UNUSED) + +/* Descriptor, as defined by the hardware */ +struct spider_net_hw_descr { + u32 buf_addr; + u32 buf_size; + u32 next_descr_addr; + u32 dmac_cmd_status; + u32 result_size; + u32 valid_size; /* all zeroes for tx */ + u32 data_status; + u32 data_error; /* all zeroes for tx */ +} __attribute__((aligned(32))); + +struct spider_net_descr { + struct spider_net_hw_descr *hwdescr; + struct sk_buff *skb; + u32 bus_addr; + struct spider_net_descr *next; + struct spider_net_descr *prev; +}; + +struct spider_net_descr_chain { + spinlock_t lock; + struct spider_net_descr *head; + struct spider_net_descr *tail; + struct spider_net_descr *ring; + int num_desc; + struct spider_net_hw_descr *hwring; + dma_addr_t dma_addr; +}; + +/* descriptor data_status bits */ +#define SPIDER_NET_RX_IPCHK 29 +#define SPIDER_NET_RX_TCPCHK 28 +#define SPIDER_NET_VLAN_PACKET 21 +#define SPIDER_NET_DATA_STATUS_CKSUM_MASK ( (1 << SPIDER_NET_RX_IPCHK) | \ + (1 << SPIDER_NET_RX_TCPCHK) ) + +/* descriptor data_error bits */ +#define SPIDER_NET_RX_IPCHKERR 27 +#define SPIDER_NET_RX_RXTCPCHKERR 28 + +#define SPIDER_NET_DATA_ERR_CKSUM_MASK (1 << SPIDER_NET_RX_IPCHKERR) + +/* the cases we don't pass the packet to the stack. + * 701b8000 would be correct, but every packets gets that flag */ +#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000 + +#define SPIDER_NET_DEFAULT_MSG ( NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR | \ + NETIF_MSG_TX_QUEUED | \ + NETIF_MSG_INTR | \ + NETIF_MSG_TX_DONE | \ + NETIF_MSG_RX_STATUS | \ + NETIF_MSG_PKTDATA | \ + NETIF_MSG_HW | \ + NETIF_MSG_WOL ) + +struct spider_net_extra_stats { + unsigned long rx_desc_error; + unsigned long tx_timeouts; + unsigned long alloc_rx_skb_error; + unsigned long rx_iommu_map_error; + unsigned long tx_iommu_map_error; + unsigned long rx_desc_unk_state; +}; + +struct spider_net_card { + struct net_device *netdev; + struct pci_dev *pdev; + struct mii_phy phy; + + struct napi_struct napi; + + int medium; + + void __iomem *regs; + + struct spider_net_descr_chain tx_chain; + struct spider_net_descr_chain rx_chain; + struct spider_net_descr *low_watermark; + + int aneg_count; + struct timer_list aneg_timer; + struct timer_list tx_timer; + struct work_struct tx_timeout_task; + atomic_t tx_timeout_task_counter; + wait_queue_head_t waitq; + int num_rx_ints; + int ignore_rx_ramfull; + + /* for ethtool */ + int msg_enable; + struct spider_net_extra_stats spider_stats; + + /* Must be last item in struct */ + struct spider_net_descr darray[0]; +}; + +#endif diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c new file mode 100644 index 000000000..ffe519382 --- /dev/null +++ b/drivers/net/ethernet/toshiba/spider_net_ethtool.c @@ -0,0 +1,181 @@ +/* + * Network device driver for Cell Processor-Based Blade + * + * (C) Copyright IBM Corp. 2005 + * + * Authors : Utz Bacher + * Jens Osterkamp + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include + +#include "spider_net.h" + + +static struct { + const char str[ETH_GSTRING_LEN]; +} ethtool_stats_keys[] = { + { "tx_packets" }, + { "tx_bytes" }, + { "rx_packets" }, + { "rx_bytes" }, + { "tx_errors" }, + { "tx_dropped" }, + { "rx_dropped" }, + { "rx_descriptor_error" }, + { "tx_timeouts" }, + { "alloc_rx_skb_error" }, + { "rx_iommu_map_error" }, + { "tx_iommu_map_error" }, + { "rx_desc_unk_state" }, +}; + +static int +spider_net_ethtool_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct spider_net_card *card; + card = netdev_priv(netdev); + + cmd->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE); + cmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE); + cmd->port = PORT_FIBRE; + ethtool_cmd_speed_set(cmd, card->phy.speed); + cmd->duplex = DUPLEX_FULL; + + return 0; +} + +static void +spider_net_ethtool_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct spider_net_card *card; + card = netdev_priv(netdev); + + /* clear and fill out info */ + strlcpy(drvinfo->driver, spider_net_driver_name, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, VERSION, sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "no information", + sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(card->pdev), + sizeof(drvinfo->bus_info)); +} + +static void +spider_net_ethtool_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wolinfo) +{ + /* no support for wol */ + wolinfo->supported = 0; + wolinfo->wolopts = 0; +} + +static u32 +spider_net_ethtool_get_msglevel(struct net_device *netdev) +{ + struct spider_net_card *card; + card = netdev_priv(netdev); + return card->msg_enable; +} + +static void +spider_net_ethtool_set_msglevel(struct net_device *netdev, + u32 level) +{ + struct spider_net_card *card; + card = netdev_priv(netdev); + card->msg_enable = level; +} + +static int +spider_net_ethtool_nway_reset(struct net_device *netdev) +{ + if (netif_running(netdev)) { + spider_net_stop(netdev); + spider_net_open(netdev); + } + return 0; +} + +static void +spider_net_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ering) +{ + struct spider_net_card *card = netdev_priv(netdev); + + ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX; + ering->tx_pending = card->tx_chain.num_desc; + ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX; + ering->rx_pending = card->rx_chain.num_desc; +} + +static int spider_net_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(ethtool_stats_keys); + default: + return -EOPNOTSUPP; + } +} + +static void spider_net_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct spider_net_card *card = netdev_priv(netdev); + + data[0] = netdev->stats.tx_packets; + data[1] = netdev->stats.tx_bytes; + data[2] = netdev->stats.rx_packets; + data[3] = netdev->stats.rx_bytes; + data[4] = netdev->stats.tx_errors; + data[5] = netdev->stats.tx_dropped; + data[6] = netdev->stats.rx_dropped; + data[7] = card->spider_stats.rx_desc_error; + data[8] = card->spider_stats.tx_timeouts; + data[9] = card->spider_stats.alloc_rx_skb_error; + data[10] = card->spider_stats.rx_iommu_map_error; + data[11] = card->spider_stats.tx_iommu_map_error; + data[12] = card->spider_stats.rx_desc_unk_state; +} + +static void spider_net_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys)); +} + +const struct ethtool_ops spider_net_ethtool_ops = { + .get_settings = spider_net_ethtool_get_settings, + .get_drvinfo = spider_net_ethtool_get_drvinfo, + .get_wol = spider_net_ethtool_get_wol, + .get_msglevel = spider_net_ethtool_get_msglevel, + .set_msglevel = spider_net_ethtool_set_msglevel, + .get_link = ethtool_op_get_link, + .nway_reset = spider_net_ethtool_nway_reset, + .get_ringparam = spider_net_ethtool_get_ringparam, + .get_strings = spider_net_get_strings, + .get_sset_count = spider_net_get_sset_count, + .get_ethtool_stats = spider_net_get_ethtool_stats, +}; + diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c new file mode 100644 index 000000000..45ac38d29 --- /dev/null +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -0,0 +1,2208 @@ +/* + * tc35815.c: A TOSHIBA TC35815CF PCI 10/100Mbps ethernet driver for linux. + * + * Based on skelton.c by Donald Becker. + * + * This driver is a replacement of older and less maintained version. + * This is a header of the older version: + * ---------- + * Copyright 2001 MontaVista Software Inc. + * Author: MontaVista Software, Inc. + * ahennessy@mvista.com + * Copyright (C) 2000-2001 Toshiba Corporation + * static const char *version = + * "tc35815.c:v0.00 26/07/2000 by Toshiba Corporation\n"; + * ---------- + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * (C) Copyright TOSHIBA CORPORATION 2004-2005 + * All Rights Reserved. + */ + +#define DRV_VERSION "1.39" +static const char *version = "tc35815.c:v" DRV_VERSION "\n"; +#define MODNAME "tc35815" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum tc35815_chiptype { + TC35815CF = 0, + TC35815_NWU, + TC35815_TX4939, +}; + +/* indexed by tc35815_chiptype, above */ +static const struct { + const char *name; +} chip_info[] = { + { "TOSHIBA TC35815CF 10/100BaseTX" }, + { "TOSHIBA TC35815 with Wake on LAN" }, + { "TOSHIBA TC35815/TX4939" }, +}; + +static const struct pci_device_id tc35815_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF }, + {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU }, + {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 }, + {0,} +}; +MODULE_DEVICE_TABLE(pci, tc35815_pci_tbl); + +/* see MODULE_PARM_DESC */ +static struct tc35815_options { + int speed; + int duplex; +} options; + +/* + * Registers + */ +struct tc35815_regs { + __u32 DMA_Ctl; /* 0x00 */ + __u32 TxFrmPtr; + __u32 TxThrsh; + __u32 TxPollCtr; + __u32 BLFrmPtr; + __u32 RxFragSize; + __u32 Int_En; + __u32 FDA_Bas; + __u32 FDA_Lim; /* 0x20 */ + __u32 Int_Src; + __u32 unused0[2]; + __u32 PauseCnt; + __u32 RemPauCnt; + __u32 TxCtlFrmStat; + __u32 unused1; + __u32 MAC_Ctl; /* 0x40 */ + __u32 CAM_Ctl; + __u32 Tx_Ctl; + __u32 Tx_Stat; + __u32 Rx_Ctl; + __u32 Rx_Stat; + __u32 MD_Data; + __u32 MD_CA; + __u32 CAM_Adr; /* 0x60 */ + __u32 CAM_Data; + __u32 CAM_Ena; + __u32 PROM_Ctl; + __u32 PROM_Data; + __u32 Algn_Cnt; + __u32 CRC_Cnt; + __u32 Miss_Cnt; +}; + +/* + * Bit assignments + */ +/* DMA_Ctl bit assign ------------------------------------------------------- */ +#define DMA_RxAlign 0x00c00000 /* 1:Reception Alignment */ +#define DMA_RxAlign_1 0x00400000 +#define DMA_RxAlign_2 0x00800000 +#define DMA_RxAlign_3 0x00c00000 +#define DMA_M66EnStat 0x00080000 /* 1:66MHz Enable State */ +#define DMA_IntMask 0x00040000 /* 1:Interrupt mask */ +#define DMA_SWIntReq 0x00020000 /* 1:Software Interrupt request */ +#define DMA_TxWakeUp 0x00010000 /* 1:Transmit Wake Up */ +#define DMA_RxBigE 0x00008000 /* 1:Receive Big Endian */ +#define DMA_TxBigE 0x00004000 /* 1:Transmit Big Endian */ +#define DMA_TestMode 0x00002000 /* 1:Test Mode */ +#define DMA_PowrMgmnt 0x00001000 /* 1:Power Management */ +#define DMA_DmBurst_Mask 0x000001fc /* DMA Burst size */ + +/* RxFragSize bit assign ---------------------------------------------------- */ +#define RxFrag_EnPack 0x00008000 /* 1:Enable Packing */ +#define RxFrag_MinFragMask 0x00000ffc /* Minimum Fragment */ + +/* MAC_Ctl bit assign ------------------------------------------------------- */ +#define MAC_Link10 0x00008000 /* 1:Link Status 10Mbits */ +#define MAC_EnMissRoll 0x00002000 /* 1:Enable Missed Roll */ +#define MAC_MissRoll 0x00000400 /* 1:Missed Roll */ +#define MAC_Loop10 0x00000080 /* 1:Loop 10 Mbps */ +#define MAC_Conn_Auto 0x00000000 /*00:Connection mode (Automatic) */ +#define MAC_Conn_10M 0x00000020 /*01: (10Mbps endec)*/ +#define MAC_Conn_Mll 0x00000040 /*10: (Mll clock) */ +#define MAC_MacLoop 0x00000010 /* 1:MAC Loopback */ +#define MAC_FullDup 0x00000008 /* 1:Full Duplex 0:Half Duplex */ +#define MAC_Reset 0x00000004 /* 1:Software Reset */ +#define MAC_HaltImm 0x00000002 /* 1:Halt Immediate */ +#define MAC_HaltReq 0x00000001 /* 1:Halt request */ + +/* PROM_Ctl bit assign ------------------------------------------------------ */ +#define PROM_Busy 0x00008000 /* 1:Busy (Start Operation) */ +#define PROM_Read 0x00004000 /*10:Read operation */ +#define PROM_Write 0x00002000 /*01:Write operation */ +#define PROM_Erase 0x00006000 /*11:Erase operation */ + /*00:Enable or Disable Writting, */ + /* as specified in PROM_Addr. */ +#define PROM_Addr_Ena 0x00000030 /*11xxxx:PROM Write enable */ + /*00xxxx: disable */ + +/* CAM_Ctl bit assign ------------------------------------------------------- */ +#define CAM_CompEn 0x00000010 /* 1:CAM Compare Enable */ +#define CAM_NegCAM 0x00000008 /* 1:Reject packets CAM recognizes,*/ + /* accept other */ +#define CAM_BroadAcc 0x00000004 /* 1:Broadcast assept */ +#define CAM_GroupAcc 0x00000002 /* 1:Multicast assept */ +#define CAM_StationAcc 0x00000001 /* 1:unicast accept */ + +/* CAM_Ena bit assign ------------------------------------------------------- */ +#define CAM_ENTRY_MAX 21 /* CAM Data entry max count */ +#define CAM_Ena_Mask ((1<chiptype != TC35815CF) + +/* Tuning parameters */ +#define DMA_BURST_SIZE 32 +#define TX_THRESHOLD 1024 +/* used threshold with packet max byte for low pci transfer ability.*/ +#define TX_THRESHOLD_MAX 1536 +/* setting threshold max value when overrun error occurred this count. */ +#define TX_THRESHOLD_KEEP_LIMIT 10 + +/* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */ +#define FD_PAGE_NUM 4 +#define RX_BUF_NUM 128 /* < 256 */ +#define RX_FD_NUM 256 /* >= 32 */ +#define TX_FD_NUM 128 +#if RX_CTL_CMD & Rx_LongEn +#define RX_BUF_SIZE PAGE_SIZE +#elif RX_CTL_CMD & Rx_StripCRC +#define RX_BUF_SIZE \ + L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + NET_IP_ALIGN) +#else +#define RX_BUF_SIZE \ + L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN) +#endif +#define RX_FD_RESERVE (2 / 2) /* max 2 BD per RxFD */ +#define NAPI_WEIGHT 16 + +struct TxFD { + struct FDesc fd; + struct BDesc bd; + struct BDesc unused; +}; + +struct RxFD { + struct FDesc fd; + struct BDesc bd[0]; /* variable length */ +}; + +struct FrFD { + struct FDesc fd; + struct BDesc bd[RX_BUF_NUM]; +}; + + +#define tc_readl(addr) ioread32(addr) +#define tc_writel(d, addr) iowrite32(d, addr) + +#define TC35815_TX_TIMEOUT msecs_to_jiffies(400) + +/* Information that need to be kept for each controller. */ +struct tc35815_local { + struct pci_dev *pci_dev; + + struct net_device *dev; + struct napi_struct napi; + + /* statistics */ + struct { + int max_tx_qlen; + int tx_ints; + int rx_ints; + int tx_underrun; + } lstats; + + /* Tx control lock. This protects the transmit buffer ring + * state along with the "tx full" state of the driver. This + * means all netif_queue flow control actions are protected + * by this lock as well. + */ + spinlock_t lock; + spinlock_t rx_lock; + + struct mii_bus *mii_bus; + struct phy_device *phy_dev; + int duplex; + int speed; + int link; + struct work_struct restart_work; + + /* + * Transmitting: Batch Mode. + * 1 BD in 1 TxFD. + * Receiving: Non-Packing Mode. + * 1 circular FD for Free Buffer List. + * RX_BUF_NUM BD in Free Buffer FD. + * One Free Buffer BD has ETH_FRAME_LEN data buffer. + */ + void *fd_buf; /* for TxFD, RxFD, FrFD */ + dma_addr_t fd_buf_dma; + struct TxFD *tfd_base; + unsigned int tfd_start; + unsigned int tfd_end; + struct RxFD *rfd_base; + struct RxFD *rfd_limit; + struct RxFD *rfd_cur; + struct FrFD *fbl_ptr; + unsigned int fbl_count; + struct { + struct sk_buff *skb; + dma_addr_t skb_dma; + } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM]; + u32 msg_enable; + enum tc35815_chiptype chiptype; +}; + +static inline dma_addr_t fd_virt_to_bus(struct tc35815_local *lp, void *virt) +{ + return lp->fd_buf_dma + ((u8 *)virt - (u8 *)lp->fd_buf); +} +#ifdef DEBUG +static inline void *fd_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus) +{ + return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma)); +} +#endif +static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev, + struct pci_dev *hwdev, + dma_addr_t *dma_handle) +{ + struct sk_buff *skb; + skb = netdev_alloc_skb(dev, RX_BUF_SIZE); + if (!skb) + return NULL; + *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(hwdev, *dma_handle)) { + dev_kfree_skb_any(skb); + return NULL; + } + skb_reserve(skb, 2); /* make IP header 4byte aligned */ + return skb; +} + +static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_t dma_handle) +{ + pci_unmap_single(hwdev, dma_handle, RX_BUF_SIZE, + PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(skb); +} + +/* Index to functions, as function prototypes. */ + +static int tc35815_open(struct net_device *dev); +static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev); +static irqreturn_t tc35815_interrupt(int irq, void *dev_id); +static int tc35815_rx(struct net_device *dev, int limit); +static int tc35815_poll(struct napi_struct *napi, int budget); +static void tc35815_txdone(struct net_device *dev); +static int tc35815_close(struct net_device *dev); +static struct net_device_stats *tc35815_get_stats(struct net_device *dev); +static void tc35815_set_multicast_list(struct net_device *dev); +static void tc35815_tx_timeout(struct net_device *dev); +static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void tc35815_poll_controller(struct net_device *dev); +#endif +static const struct ethtool_ops tc35815_ethtool_ops; + +/* Example routines you must write ;->. */ +static void tc35815_chip_reset(struct net_device *dev); +static void tc35815_chip_init(struct net_device *dev); + +#ifdef DEBUG +static void panic_queues(struct net_device *dev); +#endif + +static void tc35815_restart_work(struct work_struct *work); + +static int tc_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct net_device *dev = bus->priv; + struct tc35815_regs __iomem *tr = + (struct tc35815_regs __iomem *)dev->base_addr; + unsigned long timeout = jiffies + HZ; + + tc_writel(MD_CA_Busy | (mii_id << 5) | (regnum & 0x1f), &tr->MD_CA); + udelay(12); /* it takes 32 x 400ns at least */ + while (tc_readl(&tr->MD_CA) & MD_CA_Busy) { + if (time_after(jiffies, timeout)) + return -EIO; + cpu_relax(); + } + return tc_readl(&tr->MD_Data) & 0xffff; +} + +static int tc_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 val) +{ + struct net_device *dev = bus->priv; + struct tc35815_regs __iomem *tr = + (struct tc35815_regs __iomem *)dev->base_addr; + unsigned long timeout = jiffies + HZ; + + tc_writel(val, &tr->MD_Data); + tc_writel(MD_CA_Busy | MD_CA_Wr | (mii_id << 5) | (regnum & 0x1f), + &tr->MD_CA); + udelay(12); /* it takes 32 x 400ns at least */ + while (tc_readl(&tr->MD_CA) & MD_CA_Busy) { + if (time_after(jiffies, timeout)) + return -EIO; + cpu_relax(); + } + return 0; +} + +static void tc_handle_link_change(struct net_device *dev) +{ + struct tc35815_local *lp = netdev_priv(dev); + struct phy_device *phydev = lp->phy_dev; + unsigned long flags; + int status_change = 0; + + spin_lock_irqsave(&lp->lock, flags); + if (phydev->link && + (lp->speed != phydev->speed || lp->duplex != phydev->duplex)) { + struct tc35815_regs __iomem *tr = + (struct tc35815_regs __iomem *)dev->base_addr; + u32 reg; + + reg = tc_readl(&tr->MAC_Ctl); + reg |= MAC_HaltReq; + tc_writel(reg, &tr->MAC_Ctl); + if (phydev->duplex == DUPLEX_FULL) + reg |= MAC_FullDup; + else + reg &= ~MAC_FullDup; + tc_writel(reg, &tr->MAC_Ctl); + reg &= ~MAC_HaltReq; + tc_writel(reg, &tr->MAC_Ctl); + + /* + * TX4939 PCFG.SPEEDn bit will be changed on + * NETDEV_CHANGE event. + */ + /* + * WORKAROUND: enable LostCrS only if half duplex + * operation. + * (TX4939 does not have EnLCarr) + */ + if (phydev->duplex == DUPLEX_HALF && + lp->chiptype != TC35815_TX4939) + tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr, + &tr->Tx_Ctl); + + lp->speed = phydev->speed; + lp->duplex = phydev->duplex; + status_change = 1; + } + + if (phydev->link != lp->link) { + if (phydev->link) { + /* delayed promiscuous enabling */ + if (dev->flags & IFF_PROMISC) + tc35815_set_multicast_list(dev); + } else { + lp->speed = 0; + lp->duplex = -1; + } + lp->link = phydev->link; + + status_change = 1; + } + spin_unlock_irqrestore(&lp->lock, flags); + + if (status_change && netif_msg_link(lp)) { + phy_print_status(phydev); + pr_debug("%s: MII BMCR %04x BMSR %04x LPA %04x\n", + dev->name, + phy_read(phydev, MII_BMCR), + phy_read(phydev, MII_BMSR), + phy_read(phydev, MII_LPA)); + } +} + +static int tc_mii_probe(struct net_device *dev) +{ + struct tc35815_local *lp = netdev_priv(dev); + struct phy_device *phydev = NULL; + int phy_addr; + u32 dropmask; + + /* find the first phy */ + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { + if (lp->mii_bus->phy_map[phy_addr]) { + if (phydev) { + printk(KERN_ERR "%s: multiple PHYs found\n", + dev->name); + return -EINVAL; + } + phydev = lp->mii_bus->phy_map[phy_addr]; + break; + } + } + + if (!phydev) { + printk(KERN_ERR "%s: no PHY found\n", dev->name); + return -ENODEV; + } + + /* attach the mac to the phy */ + phydev = phy_connect(dev, dev_name(&phydev->dev), + &tc_handle_link_change, + lp->chiptype == TC35815_TX4939 ? PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII); + if (IS_ERR(phydev)) { + printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); + return PTR_ERR(phydev); + } + printk(KERN_INFO "%s: attached PHY driver [%s] " + "(mii_bus:phy_addr=%s, id=%x)\n", + dev->name, phydev->drv->name, dev_name(&phydev->dev), + phydev->phy_id); + + /* mask with MAC supported features */ + phydev->supported &= PHY_BASIC_FEATURES; + dropmask = 0; + if (options.speed == 10) + dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full; + else if (options.speed == 100) + dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full; + if (options.duplex == 1) + dropmask |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full; + else if (options.duplex == 2) + dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half; + phydev->supported &= ~dropmask; + phydev->advertising = phydev->supported; + + lp->link = 0; + lp->speed = 0; + lp->duplex = -1; + lp->phy_dev = phydev; + + return 0; +} + +static int tc_mii_init(struct net_device *dev) +{ + struct tc35815_local *lp = netdev_priv(dev); + int err; + int i; + + lp->mii_bus = mdiobus_alloc(); + if (lp->mii_bus == NULL) { + err = -ENOMEM; + goto err_out; + } + + lp->mii_bus->name = "tc35815_mii_bus"; + lp->mii_bus->read = tc_mdio_read; + lp->mii_bus->write = tc_mdio_write; + snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", + (lp->pci_dev->bus->number << 8) | lp->pci_dev->devfn); + lp->mii_bus->priv = dev; + lp->mii_bus->parent = &lp->pci_dev->dev; + lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!lp->mii_bus->irq) { + err = -ENOMEM; + goto err_out_free_mii_bus; + } + + for (i = 0; i < PHY_MAX_ADDR; i++) + lp->mii_bus->irq[i] = PHY_POLL; + + err = mdiobus_register(lp->mii_bus); + if (err) + goto err_out_free_mdio_irq; + err = tc_mii_probe(dev); + if (err) + goto err_out_unregister_bus; + return 0; + +err_out_unregister_bus: + mdiobus_unregister(lp->mii_bus); +err_out_free_mdio_irq: + kfree(lp->mii_bus->irq); +err_out_free_mii_bus: + mdiobus_free(lp->mii_bus); +err_out: + return err; +} + +#ifdef CONFIG_CPU_TX49XX +/* + * Find a platform_device providing a MAC address. The platform code + * should provide a "tc35815-mac" device with a MAC address in its + * platform_data. + */ +static int tc35815_mac_match(struct device *dev, void *data) +{ + struct platform_device *plat_dev = to_platform_device(dev); + struct pci_dev *pci_dev = data; + unsigned int id = pci_dev->irq; + return !strcmp(plat_dev->name, "tc35815-mac") && plat_dev->id == id; +} + +static int tc35815_read_plat_dev_addr(struct net_device *dev) +{ + struct tc35815_local *lp = netdev_priv(dev); + struct device *pd = bus_find_device(&platform_bus_type, NULL, + lp->pci_dev, tc35815_mac_match); + if (pd) { + if (pd->platform_data) + memcpy(dev->dev_addr, pd->platform_data, ETH_ALEN); + put_device(pd); + return is_valid_ether_addr(dev->dev_addr) ? 0 : -ENODEV; + } + return -ENODEV; +} +#else +static int tc35815_read_plat_dev_addr(struct net_device *dev) +{ + return -ENODEV; +} +#endif + +static int tc35815_init_dev_addr(struct net_device *dev) +{ + struct tc35815_regs __iomem *tr = + (struct tc35815_regs __iomem *)dev->base_addr; + int i; + + while (tc_readl(&tr->PROM_Ctl) & PROM_Busy) + ; + for (i = 0; i < 6; i += 2) { + unsigned short data; + tc_writel(PROM_Busy | PROM_Read | (i / 2 + 2), &tr->PROM_Ctl); + while (tc_readl(&tr->PROM_Ctl) & PROM_Busy) + ; + data = tc_readl(&tr->PROM_Data); + dev->dev_addr[i] = data & 0xff; + dev->dev_addr[i+1] = data >> 8; + } + if (!is_valid_ether_addr(dev->dev_addr)) + return tc35815_read_plat_dev_addr(dev); + return 0; +} + +static const struct net_device_ops tc35815_netdev_ops = { + .ndo_open = tc35815_open, + .ndo_stop = tc35815_close, + .ndo_start_xmit = tc35815_send_packet, + .ndo_get_stats = tc35815_get_stats, + .ndo_set_rx_mode = tc35815_set_multicast_list, + .ndo_tx_timeout = tc35815_tx_timeout, + .ndo_do_ioctl = tc35815_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = tc35815_poll_controller, +#endif +}; + +static int tc35815_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + void __iomem *ioaddr = NULL; + struct net_device *dev; + struct tc35815_local *lp; + int rc; + + static int printed_version; + if (!printed_version++) { + printk(version); + dev_printk(KERN_DEBUG, &pdev->dev, + "speed:%d duplex:%d\n", + options.speed, options.duplex); + } + + if (!pdev->irq) { + dev_warn(&pdev->dev, "no IRQ assigned.\n"); + return -ENODEV; + } + + /* dev zeroed in alloc_etherdev */ + dev = alloc_etherdev(sizeof(*lp)); + if (dev == NULL) + return -ENOMEM; + + SET_NETDEV_DEV(dev, &pdev->dev); + lp = netdev_priv(dev); + lp->dev = dev; + + /* enable device (incl. PCI PM wakeup), and bus-mastering */ + rc = pcim_enable_device(pdev); + if (rc) + goto err_out; + rc = pcim_iomap_regions(pdev, 1 << 1, MODNAME); + if (rc) + goto err_out; + pci_set_master(pdev); + ioaddr = pcim_iomap_table(pdev)[1]; + + /* Initialize the device structure. */ + dev->netdev_ops = &tc35815_netdev_ops; + dev->ethtool_ops = &tc35815_ethtool_ops; + dev->watchdog_timeo = TC35815_TX_TIMEOUT; + netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT); + + dev->irq = pdev->irq; + dev->base_addr = (unsigned long)ioaddr; + + INIT_WORK(&lp->restart_work, tc35815_restart_work); + spin_lock_init(&lp->lock); + spin_lock_init(&lp->rx_lock); + lp->pci_dev = pdev; + lp->chiptype = ent->driver_data; + + lp->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK; + pci_set_drvdata(pdev, dev); + + /* Soft reset the chip. */ + tc35815_chip_reset(dev); + + /* Retrieve the ethernet address. */ + if (tc35815_init_dev_addr(dev)) { + dev_warn(&pdev->dev, "not valid ether addr\n"); + eth_hw_addr_random(dev); + } + + rc = register_netdev(dev); + if (rc) + goto err_out; + + printk(KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n", + dev->name, + chip_info[ent->driver_data].name, + dev->base_addr, + dev->dev_addr, + dev->irq); + + rc = tc_mii_init(dev); + if (rc) + goto err_out_unregister; + + return 0; + +err_out_unregister: + unregister_netdev(dev); +err_out: + free_netdev(dev); + return rc; +} + + +static void tc35815_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct tc35815_local *lp = netdev_priv(dev); + + phy_disconnect(lp->phy_dev); + mdiobus_unregister(lp->mii_bus); + kfree(lp->mii_bus->irq); + mdiobus_free(lp->mii_bus); + unregister_netdev(dev); + free_netdev(dev); +} + +static int +tc35815_init_queues(struct net_device *dev) +{ + struct tc35815_local *lp = netdev_priv(dev); + int i; + unsigned long fd_addr; + + if (!lp->fd_buf) { + BUG_ON(sizeof(struct FDesc) + + sizeof(struct BDesc) * RX_BUF_NUM + + sizeof(struct FDesc) * RX_FD_NUM + + sizeof(struct TxFD) * TX_FD_NUM > + PAGE_SIZE * FD_PAGE_NUM); + + lp->fd_buf = pci_alloc_consistent(lp->pci_dev, + PAGE_SIZE * FD_PAGE_NUM, + &lp->fd_buf_dma); + if (!lp->fd_buf) + return -ENOMEM; + for (i = 0; i < RX_BUF_NUM; i++) { + lp->rx_skbs[i].skb = + alloc_rxbuf_skb(dev, lp->pci_dev, + &lp->rx_skbs[i].skb_dma); + if (!lp->rx_skbs[i].skb) { + while (--i >= 0) { + free_rxbuf_skb(lp->pci_dev, + lp->rx_skbs[i].skb, + lp->rx_skbs[i].skb_dma); + lp->rx_skbs[i].skb = NULL; + } + pci_free_consistent(lp->pci_dev, + PAGE_SIZE * FD_PAGE_NUM, + lp->fd_buf, + lp->fd_buf_dma); + lp->fd_buf = NULL; + return -ENOMEM; + } + } + printk(KERN_DEBUG "%s: FD buf %p DataBuf", + dev->name, lp->fd_buf); + printk("\n"); + } else { + for (i = 0; i < FD_PAGE_NUM; i++) + clear_page((void *)((unsigned long)lp->fd_buf + + i * PAGE_SIZE)); + } + fd_addr = (unsigned long)lp->fd_buf; + + /* Free Descriptors (for Receive) */ + lp->rfd_base = (struct RxFD *)fd_addr; + fd_addr += sizeof(struct RxFD) * RX_FD_NUM; + for (i = 0; i < RX_FD_NUM; i++) + lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD); + lp->rfd_cur = lp->rfd_base; + lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1); + + /* Transmit Descriptors */ + lp->tfd_base = (struct TxFD *)fd_addr; + fd_addr += sizeof(struct TxFD) * TX_FD_NUM; + for (i = 0; i < TX_FD_NUM; i++) { + lp->tfd_base[i].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[i+1])); + lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff); + lp->tfd_base[i].fd.FDCtl = cpu_to_le32(0); + } + lp->tfd_base[TX_FD_NUM-1].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[0])); + lp->tfd_start = 0; + lp->tfd_end = 0; + + /* Buffer List (for Receive) */ + lp->fbl_ptr = (struct FrFD *)fd_addr; + lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr)); + lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD); + /* + * move all allocated skbs to head of rx_skbs[] array. + * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in + * tc35815_rx() had failed. + */ + lp->fbl_count = 0; + for (i = 0; i < RX_BUF_NUM; i++) { + if (lp->rx_skbs[i].skb) { + if (i != lp->fbl_count) { + lp->rx_skbs[lp->fbl_count].skb = + lp->rx_skbs[i].skb; + lp->rx_skbs[lp->fbl_count].skb_dma = + lp->rx_skbs[i].skb_dma; + } + lp->fbl_count++; + } + } + for (i = 0; i < RX_BUF_NUM; i++) { + if (i >= lp->fbl_count) { + lp->fbl_ptr->bd[i].BuffData = 0; + lp->fbl_ptr->bd[i].BDCtl = 0; + continue; + } + lp->fbl_ptr->bd[i].BuffData = + cpu_to_le32(lp->rx_skbs[i].skb_dma); + /* BDID is index of FrFD.bd[] */ + lp->fbl_ptr->bd[i].BDCtl = + cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) | + RX_BUF_SIZE); + } + + printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n", + dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr); + return 0; +} + +static void +tc35815_clear_queues(struct net_device *dev) +{ + struct tc35815_local *lp = netdev_priv(dev); + int i; + + for (i = 0; i < TX_FD_NUM; i++) { + u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem); + struct sk_buff *skb = + fdsystem != 0xffffffff ? + lp->tx_skbs[fdsystem].skb : NULL; +#ifdef DEBUG + if (lp->tx_skbs[i].skb != skb) { + printk("%s: tx_skbs mismatch(%d).\n", dev->name, i); + panic_queues(dev); + } +#else + BUG_ON(lp->tx_skbs[i].skb != skb); +#endif + if (skb) { + pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE); + lp->tx_skbs[i].skb = NULL; + lp->tx_skbs[i].skb_dma = 0; + dev_kfree_skb_any(skb); + } + lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff); + } + + tc35815_init_queues(dev); +} + +static void +tc35815_free_queues(struct net_device *dev) +{ + struct tc35815_local *lp = netdev_priv(dev); + int i; + + if (lp->tfd_base) { + for (i = 0; i < TX_FD_NUM; i++) { + u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem); + struct sk_buff *skb = + fdsystem != 0xffffffff ? + lp->tx_skbs[fdsystem].skb : NULL; +#ifdef DEBUG + if (lp->tx_skbs[i].skb != skb) { + printk("%s: tx_skbs mismatch(%d).\n", dev->name, i); + panic_queues(dev); + } +#else + BUG_ON(lp->tx_skbs[i].skb != skb); +#endif + if (skb) { + dev_kfree_skb(skb); + pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE); + lp->tx_skbs[i].skb = NULL; + lp->tx_skbs[i].skb_dma = 0; + } + lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff); + } + } + + lp->rfd_base = NULL; + lp->rfd_limit = NULL; + lp->rfd_cur = NULL; + lp->fbl_ptr = NULL; + + for (i = 0; i < RX_BUF_NUM; i++) { + if (lp->rx_skbs[i].skb) { + free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb, + lp->rx_skbs[i].skb_dma); + lp->rx_skbs[i].skb = NULL; + } + } + if (lp->fd_buf) { + pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM, + lp->fd_buf, lp->fd_buf_dma); + lp->fd_buf = NULL; + } +} + +static void +dump_txfd(struct TxFD *fd) +{ + printk("TxFD(%p): %08x %08x %08x %08x\n", fd, + le32_to_cpu(fd->fd.FDNext), + le32_to_cpu(fd->fd.FDSystem), + le32_to_cpu(fd->fd.FDStat), + le32_to_cpu(fd->fd.FDCtl)); + printk("BD: "); + printk(" %08x %08x", + le32_to_cpu(fd->bd.BuffData), + le32_to_cpu(fd->bd.BDCtl)); + printk("\n"); +} + +static int +dump_rxfd(struct RxFD *fd) +{ + int i, bd_count = (le32_to_cpu(fd->fd.FDCtl) & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT; + if (bd_count > 8) + bd_count = 8; + printk("RxFD(%p): %08x %08x %08x %08x\n", fd, + le32_to_cpu(fd->fd.FDNext), + le32_to_cpu(fd->fd.FDSystem), + le32_to_cpu(fd->fd.FDStat), + le32_to_cpu(fd->fd.FDCtl)); + if (le32_to_cpu(fd->fd.FDCtl) & FD_CownsFD) + return 0; + printk("BD: "); + for (i = 0; i < bd_count; i++) + printk(" %08x %08x", + le32_to_cpu(fd->bd[i].BuffData), + le32_to_cpu(fd->bd[i].BDCtl)); + printk("\n"); + return bd_count; +} + +#ifdef DEBUG +static void +dump_frfd(struct FrFD *fd) +{ + int i; + printk("FrFD(%p): %08x %08x %08x %08x\n", fd, + le32_to_cpu(fd->fd.FDNext), + le32_to_cpu(fd->fd.FDSystem), + le32_to_cpu(fd->fd.FDStat), + le32_to_cpu(fd->fd.FDCtl)); + printk("BD: "); + for (i = 0; i < RX_BUF_NUM; i++) + printk(" %08x %08x", + le32_to_cpu(fd->bd[i].BuffData), + le32_to_cpu(fd->bd[i].BDCtl)); + printk("\n"); +} + +static void +panic_queues(struct net_device *dev) +{ + struct tc35815_local *lp = netdev_priv(dev); + int i; + + printk("TxFD base %p, start %u, end %u\n", + lp->tfd_base, lp->tfd_start, lp->tfd_end); + printk("RxFD base %p limit %p cur %p\n", + lp->rfd_base, lp->rfd_limit, lp->rfd_cur); + printk("FrFD %p\n", lp->fbl_ptr); + for (i = 0; i < TX_FD_NUM; i++) + dump_txfd(&lp->tfd_base[i]); + for (i = 0; i < RX_FD_NUM; i++) { + int bd_count = dump_rxfd(&lp->rfd_base[i]); + i += (bd_count + 1) / 2; /* skip BDs */ + } + dump_frfd(lp->fbl_ptr); + panic("%s: Illegal queue state.", dev->name); +} +#endif + +static void print_eth(const u8 *add) +{ + printk(KERN_DEBUG "print_eth(%p)\n", add); + printk(KERN_DEBUG " %pM => %pM : %02x%02x\n", + add + 6, add, add[12], add[13]); +} + +static int tc35815_tx_full(struct net_device *dev) +{ + struct tc35815_local *lp = netdev_priv(dev); + return (lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end; +} + +static void tc35815_restart(struct net_device *dev) +{ + struct tc35815_local *lp = netdev_priv(dev); + int ret; + + if (lp->phy_dev) { + ret = phy_init_hw(lp->phy_dev); + if (ret) + printk(KERN_ERR "%s: PHY init failed.\n", dev->name); + } + + spin_lock_bh(&lp->rx_lock); + spin_lock_irq(&lp->lock); + tc35815_chip_reset(dev); + tc35815_clear_queues(dev); + tc35815_chip_init(dev); + /* Reconfigure CAM again since tc35815_chip_init() initialize it. */ + tc35815_set_multicast_list(dev); + spin_unlock_irq(&lp->lock); + spin_unlock_bh(&lp->rx_lock); + + netif_wake_queue(dev); +} + +static void tc35815_restart_work(struct work_struct *work) +{ + struct tc35815_local *lp = + container_of(work, struct tc35815_local, restart_work); + struct net_device *dev = lp->dev; + + tc35815_restart(dev); +} + +static void tc35815_schedule_restart(struct net_device *dev) +{ + struct tc35815_local *lp = netdev_priv(dev); + struct tc35815_regs __iomem *tr = + (struct tc35815_regs __iomem *)dev->base_addr; + unsigned long flags; + + /* disable interrupts */ + spin_lock_irqsave(&lp->lock, flags); + tc_writel(0, &tr->Int_En); + tc_writel(tc_readl(&tr->DMA_Ctl) | DMA_IntMask, &tr->DMA_Ctl); + schedule_work(&lp->restart_work); + spin_unlock_irqrestore(&lp->lock, flags); +} + +static void tc35815_tx_timeout(struct net_device *dev) +{ + struct tc35815_regs __iomem *tr = + (struct tc35815_regs __iomem *)dev->base_addr; + + printk(KERN_WARNING "%s: transmit timed out, status %#x\n", + dev->name, tc_readl(&tr->Tx_Stat)); + + /* Try to restart the adaptor. */ + tc35815_schedule_restart(dev); + dev->stats.tx_errors++; +} + +/* + * Open/initialize the controller. This is called (in the current kernel) + * sometime after booting when the 'ifconfig' program is run. + * + * This routine should set everything up anew at each open, even + * registers that "should" only need to be set once at boot, so that + * there is non-reboot way to recover if something goes wrong. + */ +static int +tc35815_open(struct net_device *dev) +{ + struct tc35815_local *lp = netdev_priv(dev); + + /* + * This is used if the interrupt line can turned off (shared). + * See 3c503.c for an example of selecting the IRQ at config-time. + */ + if (request_irq(dev->irq, tc35815_interrupt, IRQF_SHARED, + dev->name, dev)) + return -EAGAIN; + + tc35815_chip_reset(dev); + + if (tc35815_init_queues(dev) != 0) { + free_irq(dev->irq, dev); + return -EAGAIN; + } + + napi_enable(&lp->napi); + + /* Reset the hardware here. Don't forget to set the station address. */ + spin_lock_irq(&lp->lock); + tc35815_chip_init(dev); + spin_unlock_irq(&lp->lock); + + netif_carrier_off(dev); + /* schedule a link state check */ + phy_start(lp->phy_dev); + + /* We are now ready to accept transmit requeusts from + * the queueing layer of the networking. + */ + netif_start_queue(dev); + + return 0; +} + +/* This will only be invoked if your driver is _not_ in XOFF state. + * What this means is that you need not check it, and that this + * invariant will hold if you make sure that the netif_*_queue() + * calls are done at the proper times. + */ +static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev) +{ + struct tc35815_local *lp = netdev_priv(dev); + struct TxFD *txfd; + unsigned long flags; + + /* If some error occurs while trying to transmit this + * packet, you should return '1' from this function. + * In such a case you _may not_ do anything to the + * SKB, it is still owned by the network queueing + * layer when an error is returned. This means you + * may not modify any SKB fields, you may not free + * the SKB, etc. + */ + + /* This is the most common case for modern hardware. + * The spinlock protects this code from the TX complete + * hardware interrupt handler. Queue flow control is + * thus managed under this lock as well. + */ + spin_lock_irqsave(&lp->lock, flags); + + /* failsafe... (handle txdone now if half of FDs are used) */ + if ((lp->tfd_start + TX_FD_NUM - lp->tfd_end) % TX_FD_NUM > + TX_FD_NUM / 2) + tc35815_txdone(dev); + + if (netif_msg_pktdata(lp)) + print_eth(skb->data); +#ifdef DEBUG + if (lp->tx_skbs[lp->tfd_start].skb) { + printk("%s: tx_skbs conflict.\n", dev->name); + panic_queues(dev); + } +#else + BUG_ON(lp->tx_skbs[lp->tfd_start].skb); +#endif + lp->tx_skbs[lp->tfd_start].skb = skb; + lp->tx_skbs[lp->tfd_start].skb_dma = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); + + /*add to ring */ + txfd = &lp->tfd_base[lp->tfd_start]; + txfd->bd.BuffData = cpu_to_le32(lp->tx_skbs[lp->tfd_start].skb_dma); + txfd->bd.BDCtl = cpu_to_le32(skb->len); + txfd->fd.FDSystem = cpu_to_le32(lp->tfd_start); + txfd->fd.FDCtl = cpu_to_le32(FD_CownsFD | (1 << FD_BDCnt_SHIFT)); + + if (lp->tfd_start == lp->tfd_end) { + struct tc35815_regs __iomem *tr = + (struct tc35815_regs __iomem *)dev->base_addr; + /* Start DMA Transmitter. */ + txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL); + txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); + if (netif_msg_tx_queued(lp)) { + printk("%s: starting TxFD.\n", dev->name); + dump_txfd(txfd); + } + tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr); + } else { + txfd->fd.FDNext &= cpu_to_le32(~FD_Next_EOL); + if (netif_msg_tx_queued(lp)) { + printk("%s: queueing TxFD.\n", dev->name); + dump_txfd(txfd); + } + } + lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM; + + /* If we just used up the very last entry in the + * TX ring on this device, tell the queueing + * layer to send no more. + */ + if (tc35815_tx_full(dev)) { + if (netif_msg_tx_queued(lp)) + printk(KERN_WARNING "%s: TxFD Exhausted.\n", dev->name); + netif_stop_queue(dev); + } + + /* When the TX completion hw interrupt arrives, this + * is when the transmit statistics are updated. + */ + + spin_unlock_irqrestore(&lp->lock, flags); + return NETDEV_TX_OK; +} + +#define FATAL_ERROR_INT \ + (Int_IntPCI | Int_DmParErr | Int_IntNRAbt) +static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status) +{ + static int count; + printk(KERN_WARNING "%s: Fatal Error Intterrupt (%#x):", + dev->name, status); + if (status & Int_IntPCI) + printk(" IntPCI"); + if (status & Int_DmParErr) + printk(" DmParErr"); + if (status & Int_IntNRAbt) + printk(" IntNRAbt"); + printk("\n"); + if (count++ > 100) + panic("%s: Too many fatal errors.", dev->name); + printk(KERN_WARNING "%s: Resetting ...\n", dev->name); + /* Try to restart the adaptor. */ + tc35815_schedule_restart(dev); +} + +static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit) +{ + struct tc35815_local *lp = netdev_priv(dev); + int ret = -1; + + /* Fatal errors... */ + if (status & FATAL_ERROR_INT) { + tc35815_fatal_error_interrupt(dev, status); + return 0; + } + /* recoverable errors */ + if (status & Int_IntFDAEx) { + if (netif_msg_rx_err(lp)) + dev_warn(&dev->dev, + "Free Descriptor Area Exhausted (%#x).\n", + status); + dev->stats.rx_dropped++; + ret = 0; + } + if (status & Int_IntBLEx) { + if (netif_msg_rx_err(lp)) + dev_warn(&dev->dev, + "Buffer List Exhausted (%#x).\n", + status); + dev->stats.rx_dropped++; + ret = 0; + } + if (status & Int_IntExBD) { + if (netif_msg_rx_err(lp)) + dev_warn(&dev->dev, + "Excessive Buffer Descriptiors (%#x).\n", + status); + dev->stats.rx_length_errors++; + ret = 0; + } + + /* normal notification */ + if (status & Int_IntMacRx) { + /* Got a packet(s). */ + ret = tc35815_rx(dev, limit); + lp->lstats.rx_ints++; + } + if (status & Int_IntMacTx) { + /* Transmit complete. */ + lp->lstats.tx_ints++; + spin_lock_irq(&lp->lock); + tc35815_txdone(dev); + spin_unlock_irq(&lp->lock); + if (ret < 0) + ret = 0; + } + return ret; +} + +/* + * The typical workload of the driver: + * Handle the network interface interrupts. + */ +static irqreturn_t tc35815_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct tc35815_local *lp = netdev_priv(dev); + struct tc35815_regs __iomem *tr = + (struct tc35815_regs __iomem *)dev->base_addr; + u32 dmactl = tc_readl(&tr->DMA_Ctl); + + if (!(dmactl & DMA_IntMask)) { + /* disable interrupts */ + tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); + if (napi_schedule_prep(&lp->napi)) + __napi_schedule(&lp->napi); + else { + printk(KERN_ERR "%s: interrupt taken in poll\n", + dev->name); + BUG(); + } + (void)tc_readl(&tr->Int_Src); /* flush */ + return IRQ_HANDLED; + } + return IRQ_NONE; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void tc35815_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + tc35815_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +/* We have a good packet(s), get it/them out of the buffers. */ +static int +tc35815_rx(struct net_device *dev, int limit) +{ + struct tc35815_local *lp = netdev_priv(dev); + unsigned int fdctl; + int i; + int received = 0; + + while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) { + int status = le32_to_cpu(lp->rfd_cur->fd.FDStat); + int pkt_len = fdctl & FD_FDLength_MASK; + int bd_count = (fdctl & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT; +#ifdef DEBUG + struct RxFD *next_rfd; +#endif +#if (RX_CTL_CMD & Rx_StripCRC) == 0 + pkt_len -= ETH_FCS_LEN; +#endif + + if (netif_msg_rx_status(lp)) + dump_rxfd(lp->rfd_cur); + if (status & Rx_Good) { + struct sk_buff *skb; + unsigned char *data; + int cur_bd; + + if (--limit < 0) + break; + BUG_ON(bd_count > 1); + cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl) + & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT; +#ifdef DEBUG + if (cur_bd >= RX_BUF_NUM) { + printk("%s: invalid BDID.\n", dev->name); + panic_queues(dev); + } + BUG_ON(lp->rx_skbs[cur_bd].skb_dma != + (le32_to_cpu(lp->rfd_cur->bd[0].BuffData) & ~3)); + if (!lp->rx_skbs[cur_bd].skb) { + printk("%s: NULL skb.\n", dev->name); + panic_queues(dev); + } +#else + BUG_ON(cur_bd >= RX_BUF_NUM); +#endif + skb = lp->rx_skbs[cur_bd].skb; + prefetch(skb->data); + lp->rx_skbs[cur_bd].skb = NULL; + pci_unmap_single(lp->pci_dev, + lp->rx_skbs[cur_bd].skb_dma, + RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN) + memmove(skb->data, skb->data - NET_IP_ALIGN, + pkt_len); + data = skb_put(skb, pkt_len); + if (netif_msg_pktdata(lp)) + print_eth(data); + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); + received++; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; + } else { + dev->stats.rx_errors++; + if (netif_msg_rx_err(lp)) + dev_info(&dev->dev, "Rx error (status %x)\n", + status & Rx_Stat_Mask); + /* WORKAROUND: LongErr and CRCErr means Overflow. */ + if ((status & Rx_LongErr) && (status & Rx_CRCErr)) { + status &= ~(Rx_LongErr|Rx_CRCErr); + status |= Rx_Over; + } + if (status & Rx_LongErr) + dev->stats.rx_length_errors++; + if (status & Rx_Over) + dev->stats.rx_fifo_errors++; + if (status & Rx_CRCErr) + dev->stats.rx_crc_errors++; + if (status & Rx_Align) + dev->stats.rx_frame_errors++; + } + + if (bd_count > 0) { + /* put Free Buffer back to controller */ + int bdctl = le32_to_cpu(lp->rfd_cur->bd[bd_count - 1].BDCtl); + unsigned char id = + (bdctl & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT; +#ifdef DEBUG + if (id >= RX_BUF_NUM) { + printk("%s: invalid BDID.\n", dev->name); + panic_queues(dev); + } +#else + BUG_ON(id >= RX_BUF_NUM); +#endif + /* free old buffers */ + lp->fbl_count--; + while (lp->fbl_count < RX_BUF_NUM) + { + unsigned char curid = + (id + 1 + lp->fbl_count) % RX_BUF_NUM; + struct BDesc *bd = &lp->fbl_ptr->bd[curid]; +#ifdef DEBUG + bdctl = le32_to_cpu(bd->BDCtl); + if (bdctl & BD_CownsBD) { + printk("%s: Freeing invalid BD.\n", + dev->name); + panic_queues(dev); + } +#endif + /* pass BD to controller */ + if (!lp->rx_skbs[curid].skb) { + lp->rx_skbs[curid].skb = + alloc_rxbuf_skb(dev, + lp->pci_dev, + &lp->rx_skbs[curid].skb_dma); + if (!lp->rx_skbs[curid].skb) + break; /* try on next reception */ + bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma); + } + /* Note: BDLength was modified by chip. */ + bd->BDCtl = cpu_to_le32(BD_CownsBD | + (curid << BD_RxBDID_SHIFT) | + RX_BUF_SIZE); + lp->fbl_count++; + } + } + + /* put RxFD back to controller */ +#ifdef DEBUG + next_rfd = fd_bus_to_virt(lp, + le32_to_cpu(lp->rfd_cur->fd.FDNext)); + if (next_rfd < lp->rfd_base || next_rfd > lp->rfd_limit) { + printk("%s: RxFD FDNext invalid.\n", dev->name); + panic_queues(dev); + } +#endif + for (i = 0; i < (bd_count + 1) / 2 + 1; i++) { + /* pass FD to controller */ +#ifdef DEBUG + lp->rfd_cur->fd.FDNext = cpu_to_le32(0xdeaddead); +#else + lp->rfd_cur->fd.FDNext = cpu_to_le32(FD_Next_EOL); +#endif + lp->rfd_cur->fd.FDCtl = cpu_to_le32(FD_CownsFD); + lp->rfd_cur++; + } + if (lp->rfd_cur > lp->rfd_limit) + lp->rfd_cur = lp->rfd_base; +#ifdef DEBUG + if (lp->rfd_cur != next_rfd) + printk("rfd_cur = %p, next_rfd %p\n", + lp->rfd_cur, next_rfd); +#endif + } + + return received; +} + +static int tc35815_poll(struct napi_struct *napi, int budget) +{ + struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi); + struct net_device *dev = lp->dev; + struct tc35815_regs __iomem *tr = + (struct tc35815_regs __iomem *)dev->base_addr; + int received = 0, handled; + u32 status; + + if (budget <= 0) + return received; + + spin_lock(&lp->rx_lock); + status = tc_readl(&tr->Int_Src); + do { + /* BLEx, FDAEx will be cleared later */ + tc_writel(status & ~(Int_BLEx | Int_FDAEx), + &tr->Int_Src); /* write to clear */ + + handled = tc35815_do_interrupt(dev, status, budget - received); + if (status & (Int_BLEx | Int_FDAEx)) + tc_writel(status & (Int_BLEx | Int_FDAEx), + &tr->Int_Src); + if (handled >= 0) { + received += handled; + if (received >= budget) + break; + } + status = tc_readl(&tr->Int_Src); + } while (status); + spin_unlock(&lp->rx_lock); + + if (received < budget) { + napi_complete(napi); + /* enable interrupts */ + tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); + } + return received; +} + +#define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr) + +static void +tc35815_check_tx_stat(struct net_device *dev, int status) +{ + struct tc35815_local *lp = netdev_priv(dev); + const char *msg = NULL; + + /* count collisions */ + if (status & Tx_ExColl) + dev->stats.collisions += 16; + if (status & Tx_TxColl_MASK) + dev->stats.collisions += status & Tx_TxColl_MASK; + + /* TX4939 does not have NCarr */ + if (lp->chiptype == TC35815_TX4939) + status &= ~Tx_NCarr; + /* WORKAROUND: ignore LostCrS in full duplex operation */ + if (!lp->link || lp->duplex == DUPLEX_FULL) + status &= ~Tx_NCarr; + + if (!(status & TX_STA_ERR)) { + /* no error. */ + dev->stats.tx_packets++; + return; + } + + dev->stats.tx_errors++; + if (status & Tx_ExColl) { + dev->stats.tx_aborted_errors++; + msg = "Excessive Collision."; + } + if (status & Tx_Under) { + dev->stats.tx_fifo_errors++; + msg = "Tx FIFO Underrun."; + if (lp->lstats.tx_underrun < TX_THRESHOLD_KEEP_LIMIT) { + lp->lstats.tx_underrun++; + if (lp->lstats.tx_underrun >= TX_THRESHOLD_KEEP_LIMIT) { + struct tc35815_regs __iomem *tr = + (struct tc35815_regs __iomem *)dev->base_addr; + tc_writel(TX_THRESHOLD_MAX, &tr->TxThrsh); + msg = "Tx FIFO Underrun.Change Tx threshold to max."; + } + } + } + if (status & Tx_Defer) { + dev->stats.tx_fifo_errors++; + msg = "Excessive Deferral."; + } + if (status & Tx_NCarr) { + dev->stats.tx_carrier_errors++; + msg = "Lost Carrier Sense."; + } + if (status & Tx_LateColl) { + dev->stats.tx_aborted_errors++; + msg = "Late Collision."; + } + if (status & Tx_TxPar) { + dev->stats.tx_fifo_errors++; + msg = "Transmit Parity Error."; + } + if (status & Tx_SQErr) { + dev->stats.tx_heartbeat_errors++; + msg = "Signal Quality Error."; + } + if (msg && netif_msg_tx_err(lp)) + printk(KERN_WARNING "%s: %s (%#x)\n", dev->name, msg, status); +} + +/* This handles TX complete events posted by the device + * via interrupts. + */ +static void +tc35815_txdone(struct net_device *dev) +{ + struct tc35815_local *lp = netdev_priv(dev); + struct TxFD *txfd; + unsigned int fdctl; + + txfd = &lp->tfd_base[lp->tfd_end]; + while (lp->tfd_start != lp->tfd_end && + !((fdctl = le32_to_cpu(txfd->fd.FDCtl)) & FD_CownsFD)) { + int status = le32_to_cpu(txfd->fd.FDStat); + struct sk_buff *skb; + unsigned long fdnext = le32_to_cpu(txfd->fd.FDNext); + u32 fdsystem = le32_to_cpu(txfd->fd.FDSystem); + + if (netif_msg_tx_done(lp)) { + printk("%s: complete TxFD.\n", dev->name); + dump_txfd(txfd); + } + tc35815_check_tx_stat(dev, status); + + skb = fdsystem != 0xffffffff ? + lp->tx_skbs[fdsystem].skb : NULL; +#ifdef DEBUG + if (lp->tx_skbs[lp->tfd_end].skb != skb) { + printk("%s: tx_skbs mismatch.\n", dev->name); + panic_queues(dev); + } +#else + BUG_ON(lp->tx_skbs[lp->tfd_end].skb != skb); +#endif + if (skb) { + dev->stats.tx_bytes += skb->len; + pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE); + lp->tx_skbs[lp->tfd_end].skb = NULL; + lp->tx_skbs[lp->tfd_end].skb_dma = 0; + dev_kfree_skb_any(skb); + } + txfd->fd.FDSystem = cpu_to_le32(0xffffffff); + + lp->tfd_end = (lp->tfd_end + 1) % TX_FD_NUM; + txfd = &lp->tfd_base[lp->tfd_end]; +#ifdef DEBUG + if ((fdnext & ~FD_Next_EOL) != fd_virt_to_bus(lp, txfd)) { + printk("%s: TxFD FDNext invalid.\n", dev->name); + panic_queues(dev); + } +#endif + if (fdnext & FD_Next_EOL) { + /* DMA Transmitter has been stopping... */ + if (lp->tfd_end != lp->tfd_start) { + struct tc35815_regs __iomem *tr = + (struct tc35815_regs __iomem *)dev->base_addr; + int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM; + struct TxFD *txhead = &lp->tfd_base[head]; + int qlen = (lp->tfd_start + TX_FD_NUM + - lp->tfd_end) % TX_FD_NUM; + +#ifdef DEBUG + if (!(le32_to_cpu(txfd->fd.FDCtl) & FD_CownsFD)) { + printk("%s: TxFD FDCtl invalid.\n", dev->name); + panic_queues(dev); + } +#endif + /* log max queue length */ + if (lp->lstats.max_tx_qlen < qlen) + lp->lstats.max_tx_qlen = qlen; + + + /* start DMA Transmitter again */ + txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL); + txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); + if (netif_msg_tx_queued(lp)) { + printk("%s: start TxFD on queue.\n", + dev->name); + dump_txfd(txfd); + } + tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr); + } + break; + } + } + + /* If we had stopped the queue due to a "tx full" + * condition, and space has now been made available, + * wake up the queue. + */ + if (netif_queue_stopped(dev) && !tc35815_tx_full(dev)) + netif_wake_queue(dev); +} + +/* The inverse routine to tc35815_open(). */ +static int +tc35815_close(struct net_device *dev) +{ + struct tc35815_local *lp = netdev_priv(dev); + + netif_stop_queue(dev); + napi_disable(&lp->napi); + if (lp->phy_dev) + phy_stop(lp->phy_dev); + cancel_work_sync(&lp->restart_work); + + /* Flush the Tx and disable Rx here. */ + tc35815_chip_reset(dev); + free_irq(dev->irq, dev); + + tc35815_free_queues(dev); + + return 0; + +} + +/* + * Get the current statistics. + * This may be called with the card open or closed. + */ +static struct net_device_stats *tc35815_get_stats(struct net_device *dev) +{ + struct tc35815_regs __iomem *tr = + (struct tc35815_regs __iomem *)dev->base_addr; + if (netif_running(dev)) + /* Update the statistics from the device registers. */ + dev->stats.rx_missed_errors += tc_readl(&tr->Miss_Cnt); + + return &dev->stats; +} + +static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned char *addr) +{ + struct tc35815_local *lp = netdev_priv(dev); + struct tc35815_regs __iomem *tr = + (struct tc35815_regs __iomem *)dev->base_addr; + int cam_index = index * 6; + u32 cam_data; + u32 saved_addr; + + saved_addr = tc_readl(&tr->CAM_Adr); + + if (netif_msg_hw(lp)) + printk(KERN_DEBUG "%s: CAM %d: %pM\n", + dev->name, index, addr); + if (index & 1) { + /* read modify write */ + tc_writel(cam_index - 2, &tr->CAM_Adr); + cam_data = tc_readl(&tr->CAM_Data) & 0xffff0000; + cam_data |= addr[0] << 8 | addr[1]; + tc_writel(cam_data, &tr->CAM_Data); + /* write whole word */ + tc_writel(cam_index + 2, &tr->CAM_Adr); + cam_data = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5]; + tc_writel(cam_data, &tr->CAM_Data); + } else { + /* write whole word */ + tc_writel(cam_index, &tr->CAM_Adr); + cam_data = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]; + tc_writel(cam_data, &tr->CAM_Data); + /* read modify write */ + tc_writel(cam_index + 4, &tr->CAM_Adr); + cam_data = tc_readl(&tr->CAM_Data) & 0x0000ffff; + cam_data |= addr[4] << 24 | (addr[5] << 16); + tc_writel(cam_data, &tr->CAM_Data); + } + + tc_writel(saved_addr, &tr->CAM_Adr); +} + + +/* + * Set or clear the multicast filter for this adaptor. + * num_addrs == -1 Promiscuous mode, receive all packets + * num_addrs == 0 Normal mode, clear multicast list + * num_addrs > 0 Multicast mode, receive normal and MC packets, + * and do best-effort filtering. + */ +static void +tc35815_set_multicast_list(struct net_device *dev) +{ + struct tc35815_regs __iomem *tr = + (struct tc35815_regs __iomem *)dev->base_addr; + + if (dev->flags & IFF_PROMISC) { + /* With some (all?) 100MHalf HUB, controller will hang + * if we enabled promiscuous mode before linkup... */ + struct tc35815_local *lp = netdev_priv(dev); + + if (!lp->link) + return; + /* Enable promiscuous mode */ + tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl); + } else if ((dev->flags & IFF_ALLMULTI) || + netdev_mc_count(dev) > CAM_ENTRY_MAX - 3) { + /* CAM 0, 1, 20 are reserved. */ + /* Disable promiscuous mode, use normal mode. */ + tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl); + } else if (!netdev_mc_empty(dev)) { + struct netdev_hw_addr *ha; + int i; + int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE); + + tc_writel(0, &tr->CAM_Ctl); + /* Walk the address list, and load the filter */ + i = 0; + netdev_for_each_mc_addr(ha, dev) { + /* entry 0,1 is reserved. */ + tc35815_set_cam_entry(dev, i + 2, ha->addr); + ena_bits |= CAM_Ena_Bit(i + 2); + i++; + } + tc_writel(ena_bits, &tr->CAM_Ena); + tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); + } else { + tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena); + tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); + } +} + +static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct tc35815_local *lp = netdev_priv(dev); + + strlcpy(info->driver, MODNAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info)); +} + +static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct tc35815_local *lp = netdev_priv(dev); + + if (!lp->phy_dev) + return -ENODEV; + return phy_ethtool_gset(lp->phy_dev, cmd); +} + +static int tc35815_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct tc35815_local *lp = netdev_priv(dev); + + if (!lp->phy_dev) + return -ENODEV; + return phy_ethtool_sset(lp->phy_dev, cmd); +} + +static u32 tc35815_get_msglevel(struct net_device *dev) +{ + struct tc35815_local *lp = netdev_priv(dev); + return lp->msg_enable; +} + +static void tc35815_set_msglevel(struct net_device *dev, u32 datum) +{ + struct tc35815_local *lp = netdev_priv(dev); + lp->msg_enable = datum; +} + +static int tc35815_get_sset_count(struct net_device *dev, int sset) +{ + struct tc35815_local *lp = netdev_priv(dev); + + switch (sset) { + case ETH_SS_STATS: + return sizeof(lp->lstats) / sizeof(int); + default: + return -EOPNOTSUPP; + } +} + +static void tc35815_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) +{ + struct tc35815_local *lp = netdev_priv(dev); + data[0] = lp->lstats.max_tx_qlen; + data[1] = lp->lstats.tx_ints; + data[2] = lp->lstats.rx_ints; + data[3] = lp->lstats.tx_underrun; +} + +static struct { + const char str[ETH_GSTRING_LEN]; +} ethtool_stats_keys[] = { + { "max_tx_qlen" }, + { "tx_ints" }, + { "rx_ints" }, + { "tx_underrun" }, +}; + +static void tc35815_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys)); +} + +static const struct ethtool_ops tc35815_ethtool_ops = { + .get_drvinfo = tc35815_get_drvinfo, + .get_settings = tc35815_get_settings, + .set_settings = tc35815_set_settings, + .get_link = ethtool_op_get_link, + .get_msglevel = tc35815_get_msglevel, + .set_msglevel = tc35815_set_msglevel, + .get_strings = tc35815_get_strings, + .get_sset_count = tc35815_get_sset_count, + .get_ethtool_stats = tc35815_get_ethtool_stats, +}; + +static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct tc35815_local *lp = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + if (!lp->phy_dev) + return -ENODEV; + return phy_mii_ioctl(lp->phy_dev, rq, cmd); +} + +static void tc35815_chip_reset(struct net_device *dev) +{ + struct tc35815_regs __iomem *tr = + (struct tc35815_regs __iomem *)dev->base_addr; + int i; + /* reset the controller */ + tc_writel(MAC_Reset, &tr->MAC_Ctl); + udelay(4); /* 3200ns */ + i = 0; + while (tc_readl(&tr->MAC_Ctl) & MAC_Reset) { + if (i++ > 100) { + printk(KERN_ERR "%s: MAC reset failed.\n", dev->name); + break; + } + mdelay(1); + } + tc_writel(0, &tr->MAC_Ctl); + + /* initialize registers to default value */ + tc_writel(0, &tr->DMA_Ctl); + tc_writel(0, &tr->TxThrsh); + tc_writel(0, &tr->TxPollCtr); + tc_writel(0, &tr->RxFragSize); + tc_writel(0, &tr->Int_En); + tc_writel(0, &tr->FDA_Bas); + tc_writel(0, &tr->FDA_Lim); + tc_writel(0xffffffff, &tr->Int_Src); /* Write 1 to clear */ + tc_writel(0, &tr->CAM_Ctl); + tc_writel(0, &tr->Tx_Ctl); + tc_writel(0, &tr->Rx_Ctl); + tc_writel(0, &tr->CAM_Ena); + (void)tc_readl(&tr->Miss_Cnt); /* Read to clear */ + + /* initialize internal SRAM */ + tc_writel(DMA_TestMode, &tr->DMA_Ctl); + for (i = 0; i < 0x1000; i += 4) { + tc_writel(i, &tr->CAM_Adr); + tc_writel(0, &tr->CAM_Data); + } + tc_writel(0, &tr->DMA_Ctl); +} + +static void tc35815_chip_init(struct net_device *dev) +{ + struct tc35815_local *lp = netdev_priv(dev); + struct tc35815_regs __iomem *tr = + (struct tc35815_regs __iomem *)dev->base_addr; + unsigned long txctl = TX_CTL_CMD; + + /* load station address to CAM */ + tc35815_set_cam_entry(dev, CAM_ENTRY_SOURCE, dev->dev_addr); + + /* Enable CAM (broadcast and unicast) */ + tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena); + tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); + + /* Use DMA_RxAlign_2 to make IP header 4-byte aligned. */ + if (HAVE_DMA_RXALIGN(lp)) + tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl); + else + tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl); + tc_writel(0, &tr->TxPollCtr); /* Batch mode */ + tc_writel(TX_THRESHOLD, &tr->TxThrsh); + tc_writel(INT_EN_CMD, &tr->Int_En); + + /* set queues */ + tc_writel(fd_virt_to_bus(lp, lp->rfd_base), &tr->FDA_Bas); + tc_writel((unsigned long)lp->rfd_limit - (unsigned long)lp->rfd_base, + &tr->FDA_Lim); + /* + * Activation method: + * First, enable the MAC Transmitter and the DMA Receive circuits. + * Then enable the DMA Transmitter and the MAC Receive circuits. + */ + tc_writel(fd_virt_to_bus(lp, lp->fbl_ptr), &tr->BLFrmPtr); /* start DMA receiver */ + tc_writel(RX_CTL_CMD, &tr->Rx_Ctl); /* start MAC receiver */ + + /* start MAC transmitter */ + /* TX4939 does not have EnLCarr */ + if (lp->chiptype == TC35815_TX4939) + txctl &= ~Tx_EnLCarr; + /* WORKAROUND: ignore LostCrS in full duplex operation */ + if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL) + txctl &= ~Tx_EnLCarr; + tc_writel(txctl, &tr->Tx_Ctl); +} + +#ifdef CONFIG_PM +static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct tc35815_local *lp = netdev_priv(dev); + unsigned long flags; + + pci_save_state(pdev); + if (!netif_running(dev)) + return 0; + netif_device_detach(dev); + if (lp->phy_dev) + phy_stop(lp->phy_dev); + spin_lock_irqsave(&lp->lock, flags); + tc35815_chip_reset(dev); + spin_unlock_irqrestore(&lp->lock, flags); + pci_set_power_state(pdev, PCI_D3hot); + return 0; +} + +static int tc35815_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct tc35815_local *lp = netdev_priv(dev); + + pci_restore_state(pdev); + if (!netif_running(dev)) + return 0; + pci_set_power_state(pdev, PCI_D0); + tc35815_restart(dev); + netif_carrier_off(dev); + if (lp->phy_dev) + phy_start(lp->phy_dev); + netif_device_attach(dev); + return 0; +} +#endif /* CONFIG_PM */ + +static struct pci_driver tc35815_pci_driver = { + .name = MODNAME, + .id_table = tc35815_pci_tbl, + .probe = tc35815_init_one, + .remove = tc35815_remove_one, +#ifdef CONFIG_PM + .suspend = tc35815_suspend, + .resume = tc35815_resume, +#endif +}; + +module_param_named(speed, options.speed, int, 0); +MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps"); +module_param_named(duplex, options.duplex, int, 0); +MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full"); + +module_pci_driver(tc35815_pci_driver); +MODULE_DESCRIPTION("TOSHIBA TC35815 PCI 10M/100M Ethernet driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/tundra/Kconfig b/drivers/net/ethernet/tundra/Kconfig new file mode 100644 index 000000000..cf7d69b62 --- /dev/null +++ b/drivers/net/ethernet/tundra/Kconfig @@ -0,0 +1,29 @@ +# +# Tundra network device configuration +# + +config NET_VENDOR_TUNDRA + bool "Tundra devices" + default y + depends on TSI108_BRIDGE + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Tundra cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_TUNDRA + +config TSI108_ETH + tristate "Tundra TSI108 gigabit Ethernet support" + depends on TSI108_BRIDGE + ---help--- + This driver supports Tundra TSI108 gigabit Ethernet ports. + To compile this driver as a module, choose M here: the module + will be called tsi108_eth. + +endif # NET_VENDOR_TUNDRA diff --git a/drivers/net/ethernet/tundra/Makefile b/drivers/net/ethernet/tundra/Makefile new file mode 100644 index 000000000..439f69302 --- /dev/null +++ b/drivers/net/ethernet/tundra/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Tundra network device drivers. +# + +obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c new file mode 100644 index 000000000..520cf50a3 --- /dev/null +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -0,0 +1,1694 @@ +/******************************************************************************* + + Copyright(c) 2006 Tundra Semiconductor Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the Free + Software Foundation; either version 2 of the License, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., 59 + Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +*******************************************************************************/ + +/* This driver is based on the driver code originally developed + * for the Intel IOC80314 (ForestLake) Gigabit Ethernet by + * scott.wood@timesys.com * Copyright (C) 2003 TimeSys Corporation + * + * Currently changes from original version are: + * - porting to Tsi108-based platform and kernel 2.6 (kong.lai@tundra.com) + * - modifications to handle two ports independently and support for + * additional PHY devices (alexandre.bounine@tundra.com) + * - Get hardware information from platform device. (tie-fei.zang@freescale.com) + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "tsi108_eth.h" + +#define MII_READ_DELAY 10000 /* max link wait time in msec */ + +#define TSI108_RXRING_LEN 256 + +/* NOTE: The driver currently does not support receiving packets + * larger than the buffer size, so don't decrease this (unless you + * want to add such support). + */ +#define TSI108_RXBUF_SIZE 1536 + +#define TSI108_TXRING_LEN 256 + +#define TSI108_TX_INT_FREQ 64 + +/* Check the phy status every half a second. */ +#define CHECK_PHY_INTERVAL (HZ/2) + +static int tsi108_init_one(struct platform_device *pdev); +static int tsi108_ether_remove(struct platform_device *pdev); + +struct tsi108_prv_data { + void __iomem *regs; /* Base of normal regs */ + void __iomem *phyregs; /* Base of register bank used for PHY access */ + + struct net_device *dev; + struct napi_struct napi; + + unsigned int phy; /* Index of PHY for this interface */ + unsigned int irq_num; + unsigned int id; + unsigned int phy_type; + + struct timer_list timer;/* Timer that triggers the check phy function */ + unsigned int rxtail; /* Next entry in rxring to read */ + unsigned int rxhead; /* Next entry in rxring to give a new buffer */ + unsigned int rxfree; /* Number of free, allocated RX buffers */ + + unsigned int rxpending; /* Non-zero if there are still descriptors + * to be processed from a previous descriptor + * interrupt condition that has been cleared */ + + unsigned int txtail; /* Next TX descriptor to check status on */ + unsigned int txhead; /* Next TX descriptor to use */ + + /* Number of free TX descriptors. This could be calculated from + * rxhead and rxtail if one descriptor were left unused to disambiguate + * full and empty conditions, but it's simpler to just keep track + * explicitly. */ + + unsigned int txfree; + + unsigned int phy_ok; /* The PHY is currently powered on. */ + + /* PHY status (duplex is 1 for half, 2 for full, + * so that the default 0 indicates that neither has + * yet been configured). */ + + unsigned int link_up; + unsigned int speed; + unsigned int duplex; + + tx_desc *txring; + rx_desc *rxring; + struct sk_buff *txskbs[TSI108_TXRING_LEN]; + struct sk_buff *rxskbs[TSI108_RXRING_LEN]; + + dma_addr_t txdma, rxdma; + + /* txlock nests in misclock and phy_lock */ + + spinlock_t txlock, misclock; + + /* stats is used to hold the upper bits of each hardware counter, + * and tmpstats is used to hold the full values for returning + * to the caller of get_stats(). They must be separate in case + * an overflow interrupt occurs before the stats are consumed. + */ + + struct net_device_stats stats; + struct net_device_stats tmpstats; + + /* These stats are kept separate in hardware, thus require individual + * fields for handling carry. They are combined in get_stats. + */ + + unsigned long rx_fcs; /* Add to rx_frame_errors */ + unsigned long rx_short_fcs; /* Add to rx_frame_errors */ + unsigned long rx_long_fcs; /* Add to rx_frame_errors */ + unsigned long rx_underruns; /* Add to rx_length_errors */ + unsigned long rx_overruns; /* Add to rx_length_errors */ + + unsigned long tx_coll_abort; /* Add to tx_aborted_errors/collisions */ + unsigned long tx_pause_drop; /* Add to tx_aborted_errors */ + + unsigned long mc_hash[16]; + u32 msg_enable; /* debug message level */ + struct mii_if_info mii_if; + unsigned int init_media; +}; + +/* Structure for a device driver */ + +static struct platform_driver tsi_eth_driver = { + .probe = tsi108_init_one, + .remove = tsi108_ether_remove, + .driver = { + .name = "tsi-ethernet", + }, +}; + +static void tsi108_timed_checker(unsigned long dev_ptr); + +static void dump_eth_one(struct net_device *dev) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + + printk("Dumping %s...\n", dev->name); + printk("intstat %x intmask %x phy_ok %d" + " link %d speed %d duplex %d\n", + TSI_READ(TSI108_EC_INTSTAT), + TSI_READ(TSI108_EC_INTMASK), data->phy_ok, + data->link_up, data->speed, data->duplex); + + printk("TX: head %d, tail %d, free %d, stat %x, estat %x, err %x\n", + data->txhead, data->txtail, data->txfree, + TSI_READ(TSI108_EC_TXSTAT), + TSI_READ(TSI108_EC_TXESTAT), + TSI_READ(TSI108_EC_TXERR)); + + printk("RX: head %d, tail %d, free %d, stat %x," + " estat %x, err %x, pending %d\n\n", + data->rxhead, data->rxtail, data->rxfree, + TSI_READ(TSI108_EC_RXSTAT), + TSI_READ(TSI108_EC_RXESTAT), + TSI_READ(TSI108_EC_RXERR), data->rxpending); +} + +/* Synchronization is needed between the thread and up/down events. + * Note that the PHY is accessed through the same registers for both + * interfaces, so this can't be made interface-specific. + */ + +static DEFINE_SPINLOCK(phy_lock); + +static int tsi108_read_mii(struct tsi108_prv_data *data, int reg) +{ + unsigned i; + + TSI_WRITE_PHY(TSI108_MAC_MII_ADDR, + (data->phy << TSI108_MAC_MII_ADDR_PHY) | + (reg << TSI108_MAC_MII_ADDR_REG)); + TSI_WRITE_PHY(TSI108_MAC_MII_CMD, 0); + TSI_WRITE_PHY(TSI108_MAC_MII_CMD, TSI108_MAC_MII_CMD_READ); + for (i = 0; i < 100; i++) { + if (!(TSI_READ_PHY(TSI108_MAC_MII_IND) & + (TSI108_MAC_MII_IND_NOTVALID | TSI108_MAC_MII_IND_BUSY))) + break; + udelay(10); + } + + if (i == 100) + return 0xffff; + else + return TSI_READ_PHY(TSI108_MAC_MII_DATAIN); +} + +static void tsi108_write_mii(struct tsi108_prv_data *data, + int reg, u16 val) +{ + unsigned i = 100; + TSI_WRITE_PHY(TSI108_MAC_MII_ADDR, + (data->phy << TSI108_MAC_MII_ADDR_PHY) | + (reg << TSI108_MAC_MII_ADDR_REG)); + TSI_WRITE_PHY(TSI108_MAC_MII_DATAOUT, val); + while (i--) { + if(!(TSI_READ_PHY(TSI108_MAC_MII_IND) & + TSI108_MAC_MII_IND_BUSY)) + break; + udelay(10); + } +} + +static int tsi108_mdio_read(struct net_device *dev, int addr, int reg) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + return tsi108_read_mii(data, reg); +} + +static void tsi108_mdio_write(struct net_device *dev, int addr, int reg, int val) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + tsi108_write_mii(data, reg, val); +} + +static inline void tsi108_write_tbi(struct tsi108_prv_data *data, + int reg, u16 val) +{ + unsigned i = 1000; + TSI_WRITE(TSI108_MAC_MII_ADDR, + (0x1e << TSI108_MAC_MII_ADDR_PHY) + | (reg << TSI108_MAC_MII_ADDR_REG)); + TSI_WRITE(TSI108_MAC_MII_DATAOUT, val); + while(i--) { + if(!(TSI_READ(TSI108_MAC_MII_IND) & TSI108_MAC_MII_IND_BUSY)) + return; + udelay(10); + } + printk(KERN_ERR "%s function time out\n", __func__); +} + +static int mii_speed(struct mii_if_info *mii) +{ + int advert, lpa, val, media; + int lpa2 = 0; + int speed; + + if (!mii_link_ok(mii)) + return 0; + + val = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_BMSR); + if ((val & BMSR_ANEGCOMPLETE) == 0) + return 0; + + advert = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_ADVERTISE); + lpa = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_LPA); + media = mii_nway_result(advert & lpa); + + if (mii->supports_gmii) + lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000); + + speed = lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 : + (media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? 100 : 10); + return speed; +} + +static void tsi108_check_phy(struct net_device *dev) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + u32 mac_cfg2_reg, portctrl_reg; + u32 duplex; + u32 speed; + unsigned long flags; + + spin_lock_irqsave(&phy_lock, flags); + + if (!data->phy_ok) + goto out; + + duplex = mii_check_media(&data->mii_if, netif_msg_link(data), data->init_media); + data->init_media = 0; + + if (netif_carrier_ok(dev)) { + + speed = mii_speed(&data->mii_if); + + if ((speed != data->speed) || duplex) { + + mac_cfg2_reg = TSI_READ(TSI108_MAC_CFG2); + portctrl_reg = TSI_READ(TSI108_EC_PORTCTRL); + + mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK; + + if (speed == 1000) { + mac_cfg2_reg |= TSI108_MAC_CFG2_GIG; + portctrl_reg &= ~TSI108_EC_PORTCTRL_NOGIG; + } else { + mac_cfg2_reg |= TSI108_MAC_CFG2_NOGIG; + portctrl_reg |= TSI108_EC_PORTCTRL_NOGIG; + } + + data->speed = speed; + + if (data->mii_if.full_duplex) { + mac_cfg2_reg |= TSI108_MAC_CFG2_FULLDUPLEX; + portctrl_reg &= ~TSI108_EC_PORTCTRL_HALFDUPLEX; + data->duplex = 2; + } else { + mac_cfg2_reg &= ~TSI108_MAC_CFG2_FULLDUPLEX; + portctrl_reg |= TSI108_EC_PORTCTRL_HALFDUPLEX; + data->duplex = 1; + } + + TSI_WRITE(TSI108_MAC_CFG2, mac_cfg2_reg); + TSI_WRITE(TSI108_EC_PORTCTRL, portctrl_reg); + } + + if (data->link_up == 0) { + /* The manual says it can take 3-4 usecs for the speed change + * to take effect. + */ + udelay(5); + + spin_lock(&data->txlock); + if (is_valid_ether_addr(dev->dev_addr) && data->txfree) + netif_wake_queue(dev); + + data->link_up = 1; + spin_unlock(&data->txlock); + } + } else { + if (data->link_up == 1) { + netif_stop_queue(dev); + data->link_up = 0; + printk(KERN_NOTICE "%s : link is down\n", dev->name); + } + + goto out; + } + + +out: + spin_unlock_irqrestore(&phy_lock, flags); +} + +static inline void +tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift, + unsigned long *upper) +{ + if (carry & carry_bit) + *upper += carry_shift; +} + +static void tsi108_stat_carry(struct net_device *dev) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + u32 carry1, carry2; + + spin_lock_irq(&data->misclock); + + carry1 = TSI_READ(TSI108_STAT_CARRY1); + carry2 = TSI_READ(TSI108_STAT_CARRY2); + + TSI_WRITE(TSI108_STAT_CARRY1, carry1); + TSI_WRITE(TSI108_STAT_CARRY2, carry2); + + tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXBYTES, + TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes); + + tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXPKTS, + TSI108_STAT_RXPKTS_CARRY, + &data->stats.rx_packets); + + tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFCS, + TSI108_STAT_RXFCS_CARRY, &data->rx_fcs); + + tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXMCAST, + TSI108_STAT_RXMCAST_CARRY, + &data->stats.multicast); + + tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXALIGN, + TSI108_STAT_RXALIGN_CARRY, + &data->stats.rx_frame_errors); + + tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXLENGTH, + TSI108_STAT_RXLENGTH_CARRY, + &data->stats.rx_length_errors); + + tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXRUNT, + TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns); + + tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJUMBO, + TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns); + + tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFRAG, + TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs); + + tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJABBER, + TSI108_STAT_RXJABBER_CARRY, &data->rx_long_fcs); + + tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXDROP, + TSI108_STAT_RXDROP_CARRY, + &data->stats.rx_missed_errors); + + tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXBYTES, + TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes); + + tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPKTS, + TSI108_STAT_TXPKTS_CARRY, + &data->stats.tx_packets); + + tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXDEF, + TSI108_STAT_TXEXDEF_CARRY, + &data->stats.tx_aborted_errors); + + tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXCOL, + TSI108_STAT_TXEXCOL_CARRY, &data->tx_coll_abort); + + tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXTCOL, + TSI108_STAT_TXTCOL_CARRY, + &data->stats.collisions); + + tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPAUSE, + TSI108_STAT_TXPAUSEDROP_CARRY, + &data->tx_pause_drop); + + spin_unlock_irq(&data->misclock); +} + +/* Read a stat counter atomically with respect to carries. + * data->misclock must be held. + */ +static inline unsigned long +tsi108_read_stat(struct tsi108_prv_data * data, int reg, int carry_bit, + int carry_shift, unsigned long *upper) +{ + int carryreg; + unsigned long val; + + if (reg < 0xb0) + carryreg = TSI108_STAT_CARRY1; + else + carryreg = TSI108_STAT_CARRY2; + + again: + val = TSI_READ(reg) | *upper; + + /* Check to see if it overflowed, but the interrupt hasn't + * been serviced yet. If so, handle the carry here, and + * try again. + */ + + if (unlikely(TSI_READ(carryreg) & carry_bit)) { + *upper += carry_shift; + TSI_WRITE(carryreg, carry_bit); + goto again; + } + + return val; +} + +static struct net_device_stats *tsi108_get_stats(struct net_device *dev) +{ + unsigned long excol; + + struct tsi108_prv_data *data = netdev_priv(dev); + spin_lock_irq(&data->misclock); + + data->tmpstats.rx_packets = + tsi108_read_stat(data, TSI108_STAT_RXPKTS, + TSI108_STAT_CARRY1_RXPKTS, + TSI108_STAT_RXPKTS_CARRY, &data->stats.rx_packets); + + data->tmpstats.tx_packets = + tsi108_read_stat(data, TSI108_STAT_TXPKTS, + TSI108_STAT_CARRY2_TXPKTS, + TSI108_STAT_TXPKTS_CARRY, &data->stats.tx_packets); + + data->tmpstats.rx_bytes = + tsi108_read_stat(data, TSI108_STAT_RXBYTES, + TSI108_STAT_CARRY1_RXBYTES, + TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes); + + data->tmpstats.tx_bytes = + tsi108_read_stat(data, TSI108_STAT_TXBYTES, + TSI108_STAT_CARRY2_TXBYTES, + TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes); + + data->tmpstats.multicast = + tsi108_read_stat(data, TSI108_STAT_RXMCAST, + TSI108_STAT_CARRY1_RXMCAST, + TSI108_STAT_RXMCAST_CARRY, &data->stats.multicast); + + excol = tsi108_read_stat(data, TSI108_STAT_TXEXCOL, + TSI108_STAT_CARRY2_TXEXCOL, + TSI108_STAT_TXEXCOL_CARRY, + &data->tx_coll_abort); + + data->tmpstats.collisions = + tsi108_read_stat(data, TSI108_STAT_TXTCOL, + TSI108_STAT_CARRY2_TXTCOL, + TSI108_STAT_TXTCOL_CARRY, &data->stats.collisions); + + data->tmpstats.collisions += excol; + + data->tmpstats.rx_length_errors = + tsi108_read_stat(data, TSI108_STAT_RXLENGTH, + TSI108_STAT_CARRY1_RXLENGTH, + TSI108_STAT_RXLENGTH_CARRY, + &data->stats.rx_length_errors); + + data->tmpstats.rx_length_errors += + tsi108_read_stat(data, TSI108_STAT_RXRUNT, + TSI108_STAT_CARRY1_RXRUNT, + TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns); + + data->tmpstats.rx_length_errors += + tsi108_read_stat(data, TSI108_STAT_RXJUMBO, + TSI108_STAT_CARRY1_RXJUMBO, + TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns); + + data->tmpstats.rx_frame_errors = + tsi108_read_stat(data, TSI108_STAT_RXALIGN, + TSI108_STAT_CARRY1_RXALIGN, + TSI108_STAT_RXALIGN_CARRY, + &data->stats.rx_frame_errors); + + data->tmpstats.rx_frame_errors += + tsi108_read_stat(data, TSI108_STAT_RXFCS, + TSI108_STAT_CARRY1_RXFCS, TSI108_STAT_RXFCS_CARRY, + &data->rx_fcs); + + data->tmpstats.rx_frame_errors += + tsi108_read_stat(data, TSI108_STAT_RXFRAG, + TSI108_STAT_CARRY1_RXFRAG, + TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs); + + data->tmpstats.rx_missed_errors = + tsi108_read_stat(data, TSI108_STAT_RXDROP, + TSI108_STAT_CARRY1_RXDROP, + TSI108_STAT_RXDROP_CARRY, + &data->stats.rx_missed_errors); + + /* These three are maintained by software. */ + data->tmpstats.rx_fifo_errors = data->stats.rx_fifo_errors; + data->tmpstats.rx_crc_errors = data->stats.rx_crc_errors; + + data->tmpstats.tx_aborted_errors = + tsi108_read_stat(data, TSI108_STAT_TXEXDEF, + TSI108_STAT_CARRY2_TXEXDEF, + TSI108_STAT_TXEXDEF_CARRY, + &data->stats.tx_aborted_errors); + + data->tmpstats.tx_aborted_errors += + tsi108_read_stat(data, TSI108_STAT_TXPAUSEDROP, + TSI108_STAT_CARRY2_TXPAUSE, + TSI108_STAT_TXPAUSEDROP_CARRY, + &data->tx_pause_drop); + + data->tmpstats.tx_aborted_errors += excol; + + data->tmpstats.tx_errors = data->tmpstats.tx_aborted_errors; + data->tmpstats.rx_errors = data->tmpstats.rx_length_errors + + data->tmpstats.rx_crc_errors + + data->tmpstats.rx_frame_errors + + data->tmpstats.rx_fifo_errors + data->tmpstats.rx_missed_errors; + + spin_unlock_irq(&data->misclock); + return &data->tmpstats; +} + +static void tsi108_restart_rx(struct tsi108_prv_data * data, struct net_device *dev) +{ + TSI_WRITE(TSI108_EC_RXQ_PTRHIGH, + TSI108_EC_RXQ_PTRHIGH_VALID); + + TSI_WRITE(TSI108_EC_RXCTRL, TSI108_EC_RXCTRL_GO + | TSI108_EC_RXCTRL_QUEUE0); +} + +static void tsi108_restart_tx(struct tsi108_prv_data * data) +{ + TSI_WRITE(TSI108_EC_TXQ_PTRHIGH, + TSI108_EC_TXQ_PTRHIGH_VALID); + + TSI_WRITE(TSI108_EC_TXCTRL, TSI108_EC_TXCTRL_IDLEINT | + TSI108_EC_TXCTRL_GO | TSI108_EC_TXCTRL_QUEUE0); +} + +/* txlock must be held by caller, with IRQs disabled, and + * with permission to re-enable them when the lock is dropped. + */ +static void tsi108_complete_tx(struct net_device *dev) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + int tx; + struct sk_buff *skb; + int release = 0; + + while (!data->txfree || data->txhead != data->txtail) { + tx = data->txtail; + + if (data->txring[tx].misc & TSI108_TX_OWN) + break; + + skb = data->txskbs[tx]; + + if (!(data->txring[tx].misc & TSI108_TX_OK)) + printk("%s: bad tx packet, misc %x\n", + dev->name, data->txring[tx].misc); + + data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN; + data->txfree++; + + if (data->txring[tx].misc & TSI108_TX_EOF) { + dev_kfree_skb_any(skb); + release++; + } + } + + if (release) { + if (is_valid_ether_addr(dev->dev_addr) && data->link_up) + netif_wake_queue(dev); + } +} + +static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + int frags = skb_shinfo(skb)->nr_frags + 1; + int i; + + if (!data->phy_ok && net_ratelimit()) + printk(KERN_ERR "%s: Transmit while PHY is down!\n", dev->name); + + if (!data->link_up) { + printk(KERN_ERR "%s: Transmit while link is down!\n", + dev->name); + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + + if (data->txfree < MAX_SKB_FRAGS + 1) { + netif_stop_queue(dev); + + if (net_ratelimit()) + printk(KERN_ERR "%s: Transmit with full tx ring!\n", + dev->name); + return NETDEV_TX_BUSY; + } + + if (data->txfree - frags < MAX_SKB_FRAGS + 1) { + netif_stop_queue(dev); + } + + spin_lock_irq(&data->txlock); + + for (i = 0; i < frags; i++) { + int misc = 0; + int tx = data->txhead; + + /* This is done to mark every TSI108_TX_INT_FREQ tx buffers with + * the interrupt bit. TX descriptor-complete interrupts are + * enabled when the queue fills up, and masked when there is + * still free space. This way, when saturating the outbound + * link, the tx interrupts are kept to a reasonable level. + * When the queue is not full, reclamation of skbs still occurs + * as new packets are transmitted, or on a queue-empty + * interrupt. + */ + + if ((tx % TSI108_TX_INT_FREQ == 0) && + ((TSI108_TXRING_LEN - data->txfree) >= TSI108_TX_INT_FREQ)) + misc = TSI108_TX_INT; + + data->txskbs[tx] = skb; + + if (i == 0) { + data->txring[tx].buf0 = dma_map_single(NULL, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + data->txring[tx].len = skb_headlen(skb); + misc |= TSI108_TX_SOF; + } else { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; + + data->txring[tx].buf0 = skb_frag_dma_map(NULL, frag, + 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + data->txring[tx].len = skb_frag_size(frag); + } + + if (i == frags - 1) + misc |= TSI108_TX_EOF; + + if (netif_msg_pktdata(data)) { + int i; + printk("%s: Tx Frame contents (%d)\n", dev->name, + skb->len); + for (i = 0; i < skb->len; i++) + printk(" %2.2x", skb->data[i]); + printk(".\n"); + } + data->txring[tx].misc = misc | TSI108_TX_OWN; + + data->txhead = (data->txhead + 1) % TSI108_TXRING_LEN; + data->txfree--; + } + + tsi108_complete_tx(dev); + + /* This must be done after the check for completed tx descriptors, + * so that the tail pointer is correct. + */ + + if (!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_QUEUE0)) + tsi108_restart_tx(data); + + spin_unlock_irq(&data->txlock); + return NETDEV_TX_OK; +} + +static int tsi108_complete_rx(struct net_device *dev, int budget) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + int done = 0; + + while (data->rxfree && done != budget) { + int rx = data->rxtail; + struct sk_buff *skb; + + if (data->rxring[rx].misc & TSI108_RX_OWN) + break; + + skb = data->rxskbs[rx]; + data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN; + data->rxfree--; + done++; + + if (data->rxring[rx].misc & TSI108_RX_BAD) { + spin_lock_irq(&data->misclock); + + if (data->rxring[rx].misc & TSI108_RX_CRC) + data->stats.rx_crc_errors++; + if (data->rxring[rx].misc & TSI108_RX_OVER) + data->stats.rx_fifo_errors++; + + spin_unlock_irq(&data->misclock); + + dev_kfree_skb_any(skb); + continue; + } + if (netif_msg_pktdata(data)) { + int i; + printk("%s: Rx Frame contents (%d)\n", + dev->name, data->rxring[rx].len); + for (i = 0; i < data->rxring[rx].len; i++) + printk(" %2.2x", skb->data[i]); + printk(".\n"); + } + + skb_put(skb, data->rxring[rx].len); + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); + } + + return done; +} + +static int tsi108_refill_rx(struct net_device *dev, int budget) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + int done = 0; + + while (data->rxfree != TSI108_RXRING_LEN && done != budget) { + int rx = data->rxhead; + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE); + data->rxskbs[rx] = skb; + if (!skb) + break; + + data->rxring[rx].buf0 = dma_map_single(NULL, skb->data, + TSI108_RX_SKB_SIZE, + DMA_FROM_DEVICE); + + /* Sometimes the hardware sets blen to zero after packet + * reception, even though the manual says that it's only ever + * modified by the driver. + */ + + data->rxring[rx].blen = TSI108_RX_SKB_SIZE; + data->rxring[rx].misc = TSI108_RX_OWN | TSI108_RX_INT; + + data->rxhead = (data->rxhead + 1) % TSI108_RXRING_LEN; + data->rxfree++; + done++; + } + + if (done != 0 && !(TSI_READ(TSI108_EC_RXSTAT) & + TSI108_EC_RXSTAT_QUEUE0)) + tsi108_restart_rx(data, dev); + + return done; +} + +static int tsi108_poll(struct napi_struct *napi, int budget) +{ + struct tsi108_prv_data *data = container_of(napi, struct tsi108_prv_data, napi); + struct net_device *dev = data->dev; + u32 estat = TSI_READ(TSI108_EC_RXESTAT); + u32 intstat = TSI_READ(TSI108_EC_INTSTAT); + int num_received = 0, num_filled = 0; + + intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH | + TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT; + + TSI_WRITE(TSI108_EC_RXESTAT, estat); + TSI_WRITE(TSI108_EC_INTSTAT, intstat); + + if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT)) + num_received = tsi108_complete_rx(dev, budget); + + /* This should normally fill no more slots than the number of + * packets received in tsi108_complete_rx(). The exception + * is when we previously ran out of memory for RX SKBs. In that + * case, it's helpful to obey the budget, not only so that the + * CPU isn't hogged, but so that memory (which may still be low) + * is not hogged by one device. + * + * A work unit is considered to be two SKBs to allow us to catch + * up when the ring has shrunk due to out-of-memory but we're + * still removing the full budget's worth of packets each time. + */ + + if (data->rxfree < TSI108_RXRING_LEN) + num_filled = tsi108_refill_rx(dev, budget * 2); + + if (intstat & TSI108_INT_RXERROR) { + u32 err = TSI_READ(TSI108_EC_RXERR); + TSI_WRITE(TSI108_EC_RXERR, err); + + if (err) { + if (net_ratelimit()) + printk(KERN_DEBUG "%s: RX error %x\n", + dev->name, err); + + if (!(TSI_READ(TSI108_EC_RXSTAT) & + TSI108_EC_RXSTAT_QUEUE0)) + tsi108_restart_rx(data, dev); + } + } + + if (intstat & TSI108_INT_RXOVERRUN) { + spin_lock_irq(&data->misclock); + data->stats.rx_fifo_errors++; + spin_unlock_irq(&data->misclock); + } + + if (num_received < budget) { + data->rxpending = 0; + napi_complete(napi); + + TSI_WRITE(TSI108_EC_INTMASK, + TSI_READ(TSI108_EC_INTMASK) + & ~(TSI108_INT_RXQUEUE0 + | TSI108_INT_RXTHRESH | + TSI108_INT_RXOVERRUN | + TSI108_INT_RXERROR | + TSI108_INT_RXWAIT)); + } else { + data->rxpending = 1; + } + + return num_received; +} + +static void tsi108_rx_int(struct net_device *dev) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + + /* A race could cause dev to already be scheduled, so it's not an + * error if that happens (and interrupts shouldn't be re-masked, + * because that can cause harmful races, if poll has already + * unmasked them but not cleared LINK_STATE_SCHED). + * + * This can happen if this code races with tsi108_poll(), which masks + * the interrupts after tsi108_irq_one() read the mask, but before + * napi_schedule is called. It could also happen due to calls + * from tsi108_check_rxring(). + */ + + if (napi_schedule_prep(&data->napi)) { + /* Mask, rather than ack, the receive interrupts. The ack + * will happen in tsi108_poll(). + */ + + TSI_WRITE(TSI108_EC_INTMASK, + TSI_READ(TSI108_EC_INTMASK) | + TSI108_INT_RXQUEUE0 + | TSI108_INT_RXTHRESH | + TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | + TSI108_INT_RXWAIT); + __napi_schedule(&data->napi); + } else { + if (!netif_running(dev)) { + /* This can happen if an interrupt occurs while the + * interface is being brought down, as the START + * bit is cleared before the stop function is called. + * + * In this case, the interrupts must be masked, or + * they will continue indefinitely. + * + * There's a race here if the interface is brought down + * and then up in rapid succession, as the device could + * be made running after the above check and before + * the masking below. This will only happen if the IRQ + * thread has a lower priority than the task brining + * up the interface. Fixing this race would likely + * require changes in generic code. + */ + + TSI_WRITE(TSI108_EC_INTMASK, + TSI_READ + (TSI108_EC_INTMASK) | + TSI108_INT_RXQUEUE0 | + TSI108_INT_RXTHRESH | + TSI108_INT_RXOVERRUN | + TSI108_INT_RXERROR | + TSI108_INT_RXWAIT); + } + } +} + +/* If the RX ring has run out of memory, try periodically + * to allocate some more, as otherwise poll would never + * get called (apart from the initial end-of-queue condition). + * + * This is called once per second (by default) from the thread. + */ + +static void tsi108_check_rxring(struct net_device *dev) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + + /* A poll is scheduled, as opposed to caling tsi108_refill_rx + * directly, so as to keep the receive path single-threaded + * (and thus not needing a lock). + */ + + if (netif_running(dev) && data->rxfree < TSI108_RXRING_LEN / 4) + tsi108_rx_int(dev); +} + +static void tsi108_tx_int(struct net_device *dev) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + u32 estat = TSI_READ(TSI108_EC_TXESTAT); + + TSI_WRITE(TSI108_EC_TXESTAT, estat); + TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_TXQUEUE0 | + TSI108_INT_TXIDLE | TSI108_INT_TXERROR); + if (estat & TSI108_EC_TXESTAT_Q0_ERR) { + u32 err = TSI_READ(TSI108_EC_TXERR); + TSI_WRITE(TSI108_EC_TXERR, err); + + if (err && net_ratelimit()) + printk(KERN_ERR "%s: TX error %x\n", dev->name, err); + } + + if (estat & (TSI108_EC_TXESTAT_Q0_DESCINT | TSI108_EC_TXESTAT_Q0_EOQ)) { + spin_lock(&data->txlock); + tsi108_complete_tx(dev); + spin_unlock(&data->txlock); + } +} + + +static irqreturn_t tsi108_irq(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct tsi108_prv_data *data = netdev_priv(dev); + u32 stat = TSI_READ(TSI108_EC_INTSTAT); + + if (!(stat & TSI108_INT_ANY)) + return IRQ_NONE; /* Not our interrupt */ + + stat &= ~TSI_READ(TSI108_EC_INTMASK); + + if (stat & (TSI108_INT_TXQUEUE0 | TSI108_INT_TXIDLE | + TSI108_INT_TXERROR)) + tsi108_tx_int(dev); + if (stat & (TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH | + TSI108_INT_RXWAIT | TSI108_INT_RXOVERRUN | + TSI108_INT_RXERROR)) + tsi108_rx_int(dev); + + if (stat & TSI108_INT_SFN) { + if (net_ratelimit()) + printk(KERN_DEBUG "%s: SFN error\n", dev->name); + TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_SFN); + } + + if (stat & TSI108_INT_STATCARRY) { + tsi108_stat_carry(dev); + TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_STATCARRY); + } + + return IRQ_HANDLED; +} + +static void tsi108_stop_ethernet(struct net_device *dev) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + int i = 1000; + /* Disable all TX and RX queues ... */ + TSI_WRITE(TSI108_EC_TXCTRL, 0); + TSI_WRITE(TSI108_EC_RXCTRL, 0); + + /* ...and wait for them to become idle */ + while(i--) { + if(!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_ACTIVE)) + break; + udelay(10); + } + i = 1000; + while(i--){ + if(!(TSI_READ(TSI108_EC_RXSTAT) & TSI108_EC_RXSTAT_ACTIVE)) + return; + udelay(10); + } + printk(KERN_ERR "%s function time out\n", __func__); +} + +static void tsi108_reset_ether(struct tsi108_prv_data * data) +{ + TSI_WRITE(TSI108_MAC_CFG1, TSI108_MAC_CFG1_SOFTRST); + udelay(100); + TSI_WRITE(TSI108_MAC_CFG1, 0); + + TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATRST); + udelay(100); + TSI_WRITE(TSI108_EC_PORTCTRL, + TSI_READ(TSI108_EC_PORTCTRL) & + ~TSI108_EC_PORTCTRL_STATRST); + + TSI_WRITE(TSI108_EC_TXCFG, TSI108_EC_TXCFG_RST); + udelay(100); + TSI_WRITE(TSI108_EC_TXCFG, + TSI_READ(TSI108_EC_TXCFG) & + ~TSI108_EC_TXCFG_RST); + + TSI_WRITE(TSI108_EC_RXCFG, TSI108_EC_RXCFG_RST); + udelay(100); + TSI_WRITE(TSI108_EC_RXCFG, + TSI_READ(TSI108_EC_RXCFG) & + ~TSI108_EC_RXCFG_RST); + + TSI_WRITE(TSI108_MAC_MII_MGMT_CFG, + TSI_READ(TSI108_MAC_MII_MGMT_CFG) | + TSI108_MAC_MII_MGMT_RST); + udelay(100); + TSI_WRITE(TSI108_MAC_MII_MGMT_CFG, + (TSI_READ(TSI108_MAC_MII_MGMT_CFG) & + ~(TSI108_MAC_MII_MGMT_RST | + TSI108_MAC_MII_MGMT_CLK)) | 0x07); +} + +static int tsi108_get_mac(struct net_device *dev) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + u32 word1 = TSI_READ(TSI108_MAC_ADDR1); + u32 word2 = TSI_READ(TSI108_MAC_ADDR2); + + /* Note that the octets are reversed from what the manual says, + * producing an even weirder ordering... + */ + if (word2 == 0 && word1 == 0) { + dev->dev_addr[0] = 0x00; + dev->dev_addr[1] = 0x06; + dev->dev_addr[2] = 0xd2; + dev->dev_addr[3] = 0x00; + dev->dev_addr[4] = 0x00; + if (0x8 == data->phy) + dev->dev_addr[5] = 0x01; + else + dev->dev_addr[5] = 0x02; + + word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24); + + word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) | + (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24); + + TSI_WRITE(TSI108_MAC_ADDR1, word1); + TSI_WRITE(TSI108_MAC_ADDR2, word2); + } else { + dev->dev_addr[0] = (word2 >> 16) & 0xff; + dev->dev_addr[1] = (word2 >> 24) & 0xff; + dev->dev_addr[2] = (word1 >> 0) & 0xff; + dev->dev_addr[3] = (word1 >> 8) & 0xff; + dev->dev_addr[4] = (word1 >> 16) & 0xff; + dev->dev_addr[5] = (word1 >> 24) & 0xff; + } + + if (!is_valid_ether_addr(dev->dev_addr)) { + printk(KERN_ERR + "%s: Invalid MAC address. word1: %08x, word2: %08x\n", + dev->name, word1, word2); + return -EINVAL; + } + + return 0; +} + +static int tsi108_set_mac(struct net_device *dev, void *addr) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + u32 word1, word2; + int i; + + if (!is_valid_ether_addr(addr)) + return -EADDRNOTAVAIL; + + for (i = 0; i < 6; i++) + /* +2 is for the offset of the HW addr type */ + dev->dev_addr[i] = ((unsigned char *)addr)[i + 2]; + + word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24); + + word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) | + (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24); + + spin_lock_irq(&data->misclock); + TSI_WRITE(TSI108_MAC_ADDR1, word1); + TSI_WRITE(TSI108_MAC_ADDR2, word2); + spin_lock(&data->txlock); + + if (data->txfree && data->link_up) + netif_wake_queue(dev); + + spin_unlock(&data->txlock); + spin_unlock_irq(&data->misclock); + return 0; +} + +/* Protected by dev->xmit_lock. */ +static void tsi108_set_rx_mode(struct net_device *dev) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + u32 rxcfg = TSI_READ(TSI108_EC_RXCFG); + + if (dev->flags & IFF_PROMISC) { + rxcfg &= ~(TSI108_EC_RXCFG_UC_HASH | TSI108_EC_RXCFG_MC_HASH); + rxcfg |= TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE; + goto out; + } + + rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE); + + if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) { + int i; + struct netdev_hw_addr *ha; + rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH; + + memset(data->mc_hash, 0, sizeof(data->mc_hash)); + + netdev_for_each_mc_addr(ha, dev) { + u32 hash, crc; + + crc = ether_crc(6, ha->addr); + hash = crc >> 23; + __set_bit(hash, &data->mc_hash[0]); + } + + TSI_WRITE(TSI108_EC_HASHADDR, + TSI108_EC_HASHADDR_AUTOINC | + TSI108_EC_HASHADDR_MCAST); + + for (i = 0; i < 16; i++) { + /* The manual says that the hardware may drop + * back-to-back writes to the data register. + */ + udelay(1); + TSI_WRITE(TSI108_EC_HASHDATA, + data->mc_hash[i]); + } + } + + out: + TSI_WRITE(TSI108_EC_RXCFG, rxcfg); +} + +static void tsi108_init_phy(struct net_device *dev) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + u32 i = 0; + u16 phyval = 0; + unsigned long flags; + + spin_lock_irqsave(&phy_lock, flags); + + tsi108_write_mii(data, MII_BMCR, BMCR_RESET); + while (--i) { + if(!(tsi108_read_mii(data, MII_BMCR) & BMCR_RESET)) + break; + udelay(10); + } + if (i == 0) + printk(KERN_ERR "%s function time out\n", __func__); + + if (data->phy_type == TSI108_PHY_BCM54XX) { + tsi108_write_mii(data, 0x09, 0x0300); + tsi108_write_mii(data, 0x10, 0x1020); + tsi108_write_mii(data, 0x1c, 0x8c00); + } + + tsi108_write_mii(data, + MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART); + while (tsi108_read_mii(data, MII_BMCR) & BMCR_ANRESTART) + cpu_relax(); + + /* Set G/MII mode and receive clock select in TBI control #2. The + * second port won't work if this isn't done, even though we don't + * use TBI mode. + */ + + tsi108_write_tbi(data, 0x11, 0x30); + + /* FIXME: It seems to take more than 2 back-to-back reads to the + * PHY_STAT register before the link up status bit is set. + */ + + data->link_up = 0; + + while (!((phyval = tsi108_read_mii(data, MII_BMSR)) & + BMSR_LSTATUS)) { + if (i++ > (MII_READ_DELAY / 10)) { + break; + } + spin_unlock_irqrestore(&phy_lock, flags); + msleep(10); + spin_lock_irqsave(&phy_lock, flags); + } + + data->mii_if.supports_gmii = mii_check_gmii_support(&data->mii_if); + printk(KERN_DEBUG "PHY_STAT reg contains %08x\n", phyval); + data->phy_ok = 1; + data->init_media = 1; + spin_unlock_irqrestore(&phy_lock, flags); +} + +static void tsi108_kill_phy(struct net_device *dev) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&phy_lock, flags); + tsi108_write_mii(data, MII_BMCR, BMCR_PDOWN); + data->phy_ok = 0; + spin_unlock_irqrestore(&phy_lock, flags); +} + +static int tsi108_open(struct net_device *dev) +{ + int i; + struct tsi108_prv_data *data = netdev_priv(dev); + unsigned int rxring_size = TSI108_RXRING_LEN * sizeof(rx_desc); + unsigned int txring_size = TSI108_TXRING_LEN * sizeof(tx_desc); + + i = request_irq(data->irq_num, tsi108_irq, 0, dev->name, dev); + if (i != 0) { + printk(KERN_ERR "tsi108_eth%d: Could not allocate IRQ%d.\n", + data->id, data->irq_num); + return i; + } else { + dev->irq = data->irq_num; + printk(KERN_NOTICE + "tsi108_open : Port %d Assigned IRQ %d to %s\n", + data->id, dev->irq, dev->name); + } + + data->rxring = dma_zalloc_coherent(NULL, rxring_size, &data->rxdma, + GFP_KERNEL); + if (!data->rxring) + return -ENOMEM; + + data->txring = dma_zalloc_coherent(NULL, txring_size, &data->txdma, + GFP_KERNEL); + if (!data->txring) { + pci_free_consistent(0, rxring_size, data->rxring, data->rxdma); + return -ENOMEM; + } + + for (i = 0; i < TSI108_RXRING_LEN; i++) { + data->rxring[i].next0 = data->rxdma + (i + 1) * sizeof(rx_desc); + data->rxring[i].blen = TSI108_RXBUF_SIZE; + data->rxring[i].vlan = 0; + } + + data->rxring[TSI108_RXRING_LEN - 1].next0 = data->rxdma; + + data->rxtail = 0; + data->rxhead = 0; + + for (i = 0; i < TSI108_RXRING_LEN; i++) { + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE); + if (!skb) { + /* Bah. No memory for now, but maybe we'll get + * some more later. + * For now, we'll live with the smaller ring. + */ + printk(KERN_WARNING + "%s: Could only allocate %d receive skb(s).\n", + dev->name, i); + data->rxhead = i; + break; + } + + data->rxskbs[i] = skb; + data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data); + data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT; + } + + data->rxfree = i; + TSI_WRITE(TSI108_EC_RXQ_PTRLOW, data->rxdma); + + for (i = 0; i < TSI108_TXRING_LEN; i++) { + data->txring[i].next0 = data->txdma + (i + 1) * sizeof(tx_desc); + data->txring[i].misc = 0; + } + + data->txring[TSI108_TXRING_LEN - 1].next0 = data->txdma; + data->txtail = 0; + data->txhead = 0; + data->txfree = TSI108_TXRING_LEN; + TSI_WRITE(TSI108_EC_TXQ_PTRLOW, data->txdma); + tsi108_init_phy(dev); + + napi_enable(&data->napi); + + setup_timer(&data->timer, tsi108_timed_checker, (unsigned long)dev); + mod_timer(&data->timer, jiffies + 1); + + tsi108_restart_rx(data, dev); + + TSI_WRITE(TSI108_EC_INTSTAT, ~0); + + TSI_WRITE(TSI108_EC_INTMASK, + ~(TSI108_INT_TXQUEUE0 | TSI108_INT_RXERROR | + TSI108_INT_RXTHRESH | TSI108_INT_RXQUEUE0 | + TSI108_INT_RXOVERRUN | TSI108_INT_RXWAIT | + TSI108_INT_SFN | TSI108_INT_STATCARRY)); + + TSI_WRITE(TSI108_MAC_CFG1, + TSI108_MAC_CFG1_RXEN | TSI108_MAC_CFG1_TXEN); + netif_start_queue(dev); + return 0; +} + +static int tsi108_close(struct net_device *dev) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + + netif_stop_queue(dev); + napi_disable(&data->napi); + + del_timer_sync(&data->timer); + + tsi108_stop_ethernet(dev); + tsi108_kill_phy(dev); + TSI_WRITE(TSI108_EC_INTMASK, ~0); + TSI_WRITE(TSI108_MAC_CFG1, 0); + + /* Check for any pending TX packets, and drop them. */ + + while (!data->txfree || data->txhead != data->txtail) { + int tx = data->txtail; + struct sk_buff *skb; + skb = data->txskbs[tx]; + data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN; + data->txfree++; + dev_kfree_skb(skb); + } + + free_irq(data->irq_num, dev); + + /* Discard the RX ring. */ + + while (data->rxfree) { + int rx = data->rxtail; + struct sk_buff *skb; + + skb = data->rxskbs[rx]; + data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN; + data->rxfree--; + dev_kfree_skb(skb); + } + + dma_free_coherent(0, + TSI108_RXRING_LEN * sizeof(rx_desc), + data->rxring, data->rxdma); + dma_free_coherent(0, + TSI108_TXRING_LEN * sizeof(tx_desc), + data->txring, data->txdma); + + return 0; +} + +static void tsi108_init_mac(struct net_device *dev) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + + TSI_WRITE(TSI108_MAC_CFG2, TSI108_MAC_CFG2_DFLT_PREAMBLE | + TSI108_MAC_CFG2_PADCRC); + + TSI_WRITE(TSI108_EC_TXTHRESH, + (192 << TSI108_EC_TXTHRESH_STARTFILL) | + (192 << TSI108_EC_TXTHRESH_STOPFILL)); + + TSI_WRITE(TSI108_STAT_CARRYMASK1, + ~(TSI108_STAT_CARRY1_RXBYTES | + TSI108_STAT_CARRY1_RXPKTS | + TSI108_STAT_CARRY1_RXFCS | + TSI108_STAT_CARRY1_RXMCAST | + TSI108_STAT_CARRY1_RXALIGN | + TSI108_STAT_CARRY1_RXLENGTH | + TSI108_STAT_CARRY1_RXRUNT | + TSI108_STAT_CARRY1_RXJUMBO | + TSI108_STAT_CARRY1_RXFRAG | + TSI108_STAT_CARRY1_RXJABBER | + TSI108_STAT_CARRY1_RXDROP)); + + TSI_WRITE(TSI108_STAT_CARRYMASK2, + ~(TSI108_STAT_CARRY2_TXBYTES | + TSI108_STAT_CARRY2_TXPKTS | + TSI108_STAT_CARRY2_TXEXDEF | + TSI108_STAT_CARRY2_TXEXCOL | + TSI108_STAT_CARRY2_TXTCOL | + TSI108_STAT_CARRY2_TXPAUSE)); + + TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATEN); + TSI_WRITE(TSI108_MAC_CFG1, 0); + + TSI_WRITE(TSI108_EC_RXCFG, + TSI108_EC_RXCFG_SE | TSI108_EC_RXCFG_BFE); + + TSI_WRITE(TSI108_EC_TXQ_CFG, TSI108_EC_TXQ_CFG_DESC_INT | + TSI108_EC_TXQ_CFG_EOQ_OWN_INT | + TSI108_EC_TXQ_CFG_WSWP | (TSI108_PBM_PORT << + TSI108_EC_TXQ_CFG_SFNPORT)); + + TSI_WRITE(TSI108_EC_RXQ_CFG, TSI108_EC_RXQ_CFG_DESC_INT | + TSI108_EC_RXQ_CFG_EOQ_OWN_INT | + TSI108_EC_RXQ_CFG_WSWP | (TSI108_PBM_PORT << + TSI108_EC_RXQ_CFG_SFNPORT)); + + TSI_WRITE(TSI108_EC_TXQ_BUFCFG, + TSI108_EC_TXQ_BUFCFG_BURST256 | + TSI108_EC_TXQ_BUFCFG_BSWP | (TSI108_PBM_PORT << + TSI108_EC_TXQ_BUFCFG_SFNPORT)); + + TSI_WRITE(TSI108_EC_RXQ_BUFCFG, + TSI108_EC_RXQ_BUFCFG_BURST256 | + TSI108_EC_RXQ_BUFCFG_BSWP | (TSI108_PBM_PORT << + TSI108_EC_RXQ_BUFCFG_SFNPORT)); + + TSI_WRITE(TSI108_EC_INTMASK, ~0); +} + +static int tsi108_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + unsigned long flags; + int rc; + + spin_lock_irqsave(&data->txlock, flags); + rc = mii_ethtool_gset(&data->mii_if, cmd); + spin_unlock_irqrestore(&data->txlock, flags); + + return rc; +} + +static int tsi108_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + unsigned long flags; + int rc; + + spin_lock_irqsave(&data->txlock, flags); + rc = mii_ethtool_sset(&data->mii_if, cmd); + spin_unlock_irqrestore(&data->txlock, flags); + + return rc; +} + +static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct tsi108_prv_data *data = netdev_priv(dev); + if (!netif_running(dev)) + return -EINVAL; + return generic_mii_ioctl(&data->mii_if, if_mii(rq), cmd, NULL); +} + +static const struct ethtool_ops tsi108_ethtool_ops = { + .get_link = ethtool_op_get_link, + .get_settings = tsi108_get_settings, + .set_settings = tsi108_set_settings, +}; + +static const struct net_device_ops tsi108_netdev_ops = { + .ndo_open = tsi108_open, + .ndo_stop = tsi108_close, + .ndo_start_xmit = tsi108_send_packet, + .ndo_set_rx_mode = tsi108_set_rx_mode, + .ndo_get_stats = tsi108_get_stats, + .ndo_do_ioctl = tsi108_do_ioctl, + .ndo_set_mac_address = tsi108_set_mac, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static int +tsi108_init_one(struct platform_device *pdev) +{ + struct net_device *dev = NULL; + struct tsi108_prv_data *data = NULL; + hw_info *einfo; + int err = 0; + + einfo = dev_get_platdata(&pdev->dev); + + if (NULL == einfo) { + printk(KERN_ERR "tsi-eth %d: Missing additional data!\n", + pdev->id); + return -ENODEV; + } + + /* Create an ethernet device instance */ + + dev = alloc_etherdev(sizeof(struct tsi108_prv_data)); + if (!dev) + return -ENOMEM; + + printk("tsi108_eth%d: probe...\n", pdev->id); + data = netdev_priv(dev); + data->dev = dev; + + pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n", + pdev->id, einfo->regs, einfo->phyregs, + einfo->phy, einfo->irq_num); + + data->regs = ioremap(einfo->regs, 0x400); + if (NULL == data->regs) { + err = -ENOMEM; + goto regs_fail; + } + + data->phyregs = ioremap(einfo->phyregs, 0x400); + if (NULL == data->phyregs) { + err = -ENOMEM; + goto phyregs_fail; + } +/* MII setup */ + data->mii_if.dev = dev; + data->mii_if.mdio_read = tsi108_mdio_read; + data->mii_if.mdio_write = tsi108_mdio_write; + data->mii_if.phy_id = einfo->phy; + data->mii_if.phy_id_mask = 0x1f; + data->mii_if.reg_num_mask = 0x1f; + + data->phy = einfo->phy; + data->phy_type = einfo->phy_type; + data->irq_num = einfo->irq_num; + data->id = pdev->id; + netif_napi_add(dev, &data->napi, tsi108_poll, 64); + dev->netdev_ops = &tsi108_netdev_ops; + dev->ethtool_ops = &tsi108_ethtool_ops; + + /* Apparently, the Linux networking code won't use scatter-gather + * if the hardware doesn't do checksums. However, it's faster + * to checksum in place and use SG, as (among other reasons) + * the cache won't be dirtied (which then has to be flushed + * before DMA). The checksumming is done by the driver (via + * a new function skb_csum_dev() in net/core/skbuff.c). + */ + + dev->features = NETIF_F_HIGHDMA; + + spin_lock_init(&data->txlock); + spin_lock_init(&data->misclock); + + tsi108_reset_ether(data); + tsi108_kill_phy(dev); + + if ((err = tsi108_get_mac(dev)) != 0) { + printk(KERN_ERR "%s: Invalid MAC address. Please correct.\n", + dev->name); + goto register_fail; + } + + tsi108_init_mac(dev); + err = register_netdev(dev); + if (err) { + printk(KERN_ERR "%s: Cannot register net device, aborting.\n", + dev->name); + goto register_fail; + } + + platform_set_drvdata(pdev, dev); + printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: %pM\n", + dev->name, dev->dev_addr); +#ifdef DEBUG + data->msg_enable = DEBUG; + dump_eth_one(dev); +#endif + + return 0; + +register_fail: + iounmap(data->phyregs); + +phyregs_fail: + iounmap(data->regs); + +regs_fail: + free_netdev(dev); + return err; +} + +/* There's no way to either get interrupts from the PHY when + * something changes, or to have the Tsi108 automatically communicate + * with the PHY to reconfigure itself. + * + * Thus, we have to do it using a timer. + */ + +static void tsi108_timed_checker(unsigned long dev_ptr) +{ + struct net_device *dev = (struct net_device *)dev_ptr; + struct tsi108_prv_data *data = netdev_priv(dev); + + tsi108_check_phy(dev); + tsi108_check_rxring(dev); + mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL); +} + +static int tsi108_ether_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct tsi108_prv_data *priv = netdev_priv(dev); + + unregister_netdev(dev); + tsi108_stop_ethernet(dev); + iounmap(priv->regs); + iounmap(priv->phyregs); + free_netdev(dev); + + return 0; +} +module_platform_driver(tsi_eth_driver); + +MODULE_AUTHOR("Tundra Semiconductor Corporation"); +MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:tsi-ethernet"); diff --git a/drivers/net/ethernet/tundra/tsi108_eth.h b/drivers/net/ethernet/tundra/tsi108_eth.h new file mode 100644 index 000000000..4a03c594b --- /dev/null +++ b/drivers/net/ethernet/tundra/tsi108_eth.h @@ -0,0 +1,354 @@ +/* + * (C) Copyright 2005 Tundra Semiconductor Corp. + * Kong Lai, . + */ + +/* + * net/tsi108_eth.h - definitions for Tsi108 GIGE network controller. + */ + +#ifndef __TSI108_ETH_H +#define __TSI108_ETH_H + +#include + +#define TSI_WRITE(offset, val) \ + out_be32((data->regs + (offset)), val) + +#define TSI_READ(offset) \ + in_be32((data->regs + (offset))) + +#define TSI_WRITE_PHY(offset, val) \ + out_be32((data->phyregs + (offset)), val) + +#define TSI_READ_PHY(offset) \ + in_be32((data->phyregs + (offset))) + +/* + * TSI108 GIGE port registers + */ + +#define TSI108_ETH_PORT_NUM 2 +#define TSI108_PBM_PORT 2 +#define TSI108_SDRAM_PORT 4 + +#define TSI108_MAC_CFG1 (0x000) +#define TSI108_MAC_CFG1_SOFTRST (1 << 31) +#define TSI108_MAC_CFG1_LOOPBACK (1 << 8) +#define TSI108_MAC_CFG1_RXEN (1 << 2) +#define TSI108_MAC_CFG1_TXEN (1 << 0) + +#define TSI108_MAC_CFG2 (0x004) +#define TSI108_MAC_CFG2_DFLT_PREAMBLE (7 << 12) +#define TSI108_MAC_CFG2_IFACE_MASK (3 << 8) +#define TSI108_MAC_CFG2_NOGIG (1 << 8) +#define TSI108_MAC_CFG2_GIG (2 << 8) +#define TSI108_MAC_CFG2_PADCRC (1 << 2) +#define TSI108_MAC_CFG2_FULLDUPLEX (1 << 0) + +#define TSI108_MAC_MII_MGMT_CFG (0x020) +#define TSI108_MAC_MII_MGMT_CLK (7 << 0) +#define TSI108_MAC_MII_MGMT_RST (1 << 31) + +#define TSI108_MAC_MII_CMD (0x024) +#define TSI108_MAC_MII_CMD_READ (1 << 0) + +#define TSI108_MAC_MII_ADDR (0x028) +#define TSI108_MAC_MII_ADDR_REG 0 +#define TSI108_MAC_MII_ADDR_PHY 8 + +#define TSI108_MAC_MII_DATAOUT (0x02c) +#define TSI108_MAC_MII_DATAIN (0x030) + +#define TSI108_MAC_MII_IND (0x034) +#define TSI108_MAC_MII_IND_NOTVALID (1 << 2) +#define TSI108_MAC_MII_IND_SCANNING (1 << 1) +#define TSI108_MAC_MII_IND_BUSY (1 << 0) + +#define TSI108_MAC_IFCTRL (0x038) +#define TSI108_MAC_IFCTRL_PHYMODE (1 << 24) + +#define TSI108_MAC_ADDR1 (0x040) +#define TSI108_MAC_ADDR2 (0x044) + +#define TSI108_STAT_RXBYTES (0x06c) +#define TSI108_STAT_RXBYTES_CARRY (1 << 24) + +#define TSI108_STAT_RXPKTS (0x070) +#define TSI108_STAT_RXPKTS_CARRY (1 << 18) + +#define TSI108_STAT_RXFCS (0x074) +#define TSI108_STAT_RXFCS_CARRY (1 << 12) + +#define TSI108_STAT_RXMCAST (0x078) +#define TSI108_STAT_RXMCAST_CARRY (1 << 18) + +#define TSI108_STAT_RXALIGN (0x08c) +#define TSI108_STAT_RXALIGN_CARRY (1 << 12) + +#define TSI108_STAT_RXLENGTH (0x090) +#define TSI108_STAT_RXLENGTH_CARRY (1 << 12) + +#define TSI108_STAT_RXRUNT (0x09c) +#define TSI108_STAT_RXRUNT_CARRY (1 << 12) + +#define TSI108_STAT_RXJUMBO (0x0a0) +#define TSI108_STAT_RXJUMBO_CARRY (1 << 12) + +#define TSI108_STAT_RXFRAG (0x0a4) +#define TSI108_STAT_RXFRAG_CARRY (1 << 12) + +#define TSI108_STAT_RXJABBER (0x0a8) +#define TSI108_STAT_RXJABBER_CARRY (1 << 12) + +#define TSI108_STAT_RXDROP (0x0ac) +#define TSI108_STAT_RXDROP_CARRY (1 << 12) + +#define TSI108_STAT_TXBYTES (0x0b0) +#define TSI108_STAT_TXBYTES_CARRY (1 << 24) + +#define TSI108_STAT_TXPKTS (0x0b4) +#define TSI108_STAT_TXPKTS_CARRY (1 << 18) + +#define TSI108_STAT_TXEXDEF (0x0c8) +#define TSI108_STAT_TXEXDEF_CARRY (1 << 12) + +#define TSI108_STAT_TXEXCOL (0x0d8) +#define TSI108_STAT_TXEXCOL_CARRY (1 << 12) + +#define TSI108_STAT_TXTCOL (0x0dc) +#define TSI108_STAT_TXTCOL_CARRY (1 << 13) + +#define TSI108_STAT_TXPAUSEDROP (0x0e4) +#define TSI108_STAT_TXPAUSEDROP_CARRY (1 << 12) + +#define TSI108_STAT_CARRY1 (0x100) +#define TSI108_STAT_CARRY1_RXBYTES (1 << 16) +#define TSI108_STAT_CARRY1_RXPKTS (1 << 15) +#define TSI108_STAT_CARRY1_RXFCS (1 << 14) +#define TSI108_STAT_CARRY1_RXMCAST (1 << 13) +#define TSI108_STAT_CARRY1_RXALIGN (1 << 8) +#define TSI108_STAT_CARRY1_RXLENGTH (1 << 7) +#define TSI108_STAT_CARRY1_RXRUNT (1 << 4) +#define TSI108_STAT_CARRY1_RXJUMBO (1 << 3) +#define TSI108_STAT_CARRY1_RXFRAG (1 << 2) +#define TSI108_STAT_CARRY1_RXJABBER (1 << 1) +#define TSI108_STAT_CARRY1_RXDROP (1 << 0) + +#define TSI108_STAT_CARRY2 (0x104) +#define TSI108_STAT_CARRY2_TXBYTES (1 << 13) +#define TSI108_STAT_CARRY2_TXPKTS (1 << 12) +#define TSI108_STAT_CARRY2_TXEXDEF (1 << 7) +#define TSI108_STAT_CARRY2_TXEXCOL (1 << 3) +#define TSI108_STAT_CARRY2_TXTCOL (1 << 2) +#define TSI108_STAT_CARRY2_TXPAUSE (1 << 0) + +#define TSI108_STAT_CARRYMASK1 (0x108) +#define TSI108_STAT_CARRYMASK2 (0x10c) + +#define TSI108_EC_PORTCTRL (0x200) +#define TSI108_EC_PORTCTRL_STATRST (1 << 31) +#define TSI108_EC_PORTCTRL_STATEN (1 << 28) +#define TSI108_EC_PORTCTRL_NOGIG (1 << 18) +#define TSI108_EC_PORTCTRL_HALFDUPLEX (1 << 16) + +#define TSI108_EC_INTSTAT (0x204) +#define TSI108_EC_INTMASK (0x208) + +#define TSI108_INT_ANY (1 << 31) +#define TSI108_INT_SFN (1 << 30) +#define TSI108_INT_RXIDLE (1 << 29) +#define TSI108_INT_RXABORT (1 << 28) +#define TSI108_INT_RXERROR (1 << 27) +#define TSI108_INT_RXOVERRUN (1 << 26) +#define TSI108_INT_RXTHRESH (1 << 25) +#define TSI108_INT_RXWAIT (1 << 24) +#define TSI108_INT_RXQUEUE0 (1 << 16) +#define TSI108_INT_STATCARRY (1 << 15) +#define TSI108_INT_TXIDLE (1 << 13) +#define TSI108_INT_TXABORT (1 << 12) +#define TSI108_INT_TXERROR (1 << 11) +#define TSI108_INT_TXUNDERRUN (1 << 10) +#define TSI108_INT_TXTHRESH (1 << 9) +#define TSI108_INT_TXWAIT (1 << 8) +#define TSI108_INT_TXQUEUE0 (1 << 0) + +#define TSI108_EC_TXCFG (0x220) +#define TSI108_EC_TXCFG_RST (1 << 31) + +#define TSI108_EC_TXCTRL (0x224) +#define TSI108_EC_TXCTRL_IDLEINT (1 << 31) +#define TSI108_EC_TXCTRL_ABORT (1 << 30) +#define TSI108_EC_TXCTRL_GO (1 << 15) +#define TSI108_EC_TXCTRL_QUEUE0 (1 << 0) + +#define TSI108_EC_TXSTAT (0x228) +#define TSI108_EC_TXSTAT_ACTIVE (1 << 15) +#define TSI108_EC_TXSTAT_QUEUE0 (1 << 0) + +#define TSI108_EC_TXESTAT (0x22c) +#define TSI108_EC_TXESTAT_Q0_ERR (1 << 24) +#define TSI108_EC_TXESTAT_Q0_DESCINT (1 << 16) +#define TSI108_EC_TXESTAT_Q0_EOF (1 << 8) +#define TSI108_EC_TXESTAT_Q0_EOQ (1 << 0) + +#define TSI108_EC_TXERR (0x278) + +#define TSI108_EC_TXQ_CFG (0x280) +#define TSI108_EC_TXQ_CFG_DESC_INT (1 << 20) +#define TSI108_EC_TXQ_CFG_EOQ_OWN_INT (1 << 19) +#define TSI108_EC_TXQ_CFG_WSWP (1 << 11) +#define TSI108_EC_TXQ_CFG_BSWP (1 << 10) +#define TSI108_EC_TXQ_CFG_SFNPORT 0 + +#define TSI108_EC_TXQ_BUFCFG (0x284) +#define TSI108_EC_TXQ_BUFCFG_BURST8 (0 << 8) +#define TSI108_EC_TXQ_BUFCFG_BURST32 (1 << 8) +#define TSI108_EC_TXQ_BUFCFG_BURST128 (2 << 8) +#define TSI108_EC_TXQ_BUFCFG_BURST256 (3 << 8) +#define TSI108_EC_TXQ_BUFCFG_WSWP (1 << 11) +#define TSI108_EC_TXQ_BUFCFG_BSWP (1 << 10) +#define TSI108_EC_TXQ_BUFCFG_SFNPORT 0 + +#define TSI108_EC_TXQ_PTRLOW (0x288) + +#define TSI108_EC_TXQ_PTRHIGH (0x28c) +#define TSI108_EC_TXQ_PTRHIGH_VALID (1 << 31) + +#define TSI108_EC_TXTHRESH (0x230) +#define TSI108_EC_TXTHRESH_STARTFILL 0 +#define TSI108_EC_TXTHRESH_STOPFILL 16 + +#define TSI108_EC_RXCFG (0x320) +#define TSI108_EC_RXCFG_RST (1 << 31) + +#define TSI108_EC_RXSTAT (0x328) +#define TSI108_EC_RXSTAT_ACTIVE (1 << 15) +#define TSI108_EC_RXSTAT_QUEUE0 (1 << 0) + +#define TSI108_EC_RXESTAT (0x32c) +#define TSI108_EC_RXESTAT_Q0_ERR (1 << 24) +#define TSI108_EC_RXESTAT_Q0_DESCINT (1 << 16) +#define TSI108_EC_RXESTAT_Q0_EOF (1 << 8) +#define TSI108_EC_RXESTAT_Q0_EOQ (1 << 0) + +#define TSI108_EC_HASHADDR (0x360) +#define TSI108_EC_HASHADDR_AUTOINC (1 << 31) +#define TSI108_EC_HASHADDR_DO1STREAD (1 << 30) +#define TSI108_EC_HASHADDR_UNICAST (0 << 4) +#define TSI108_EC_HASHADDR_MCAST (1 << 4) + +#define TSI108_EC_HASHDATA (0x364) + +#define TSI108_EC_RXQ_PTRLOW (0x388) + +#define TSI108_EC_RXQ_PTRHIGH (0x38c) +#define TSI108_EC_RXQ_PTRHIGH_VALID (1 << 31) + +/* Station Enable -- accept packets destined for us */ +#define TSI108_EC_RXCFG_SE (1 << 13) +/* Unicast Frame Enable -- for packets not destined for us */ +#define TSI108_EC_RXCFG_UFE (1 << 12) +/* Multicast Frame Enable */ +#define TSI108_EC_RXCFG_MFE (1 << 11) +/* Broadcast Frame Enable */ +#define TSI108_EC_RXCFG_BFE (1 << 10) +#define TSI108_EC_RXCFG_UC_HASH (1 << 9) +#define TSI108_EC_RXCFG_MC_HASH (1 << 8) + +#define TSI108_EC_RXQ_CFG (0x380) +#define TSI108_EC_RXQ_CFG_DESC_INT (1 << 20) +#define TSI108_EC_RXQ_CFG_EOQ_OWN_INT (1 << 19) +#define TSI108_EC_RXQ_CFG_WSWP (1 << 11) +#define TSI108_EC_RXQ_CFG_BSWP (1 << 10) +#define TSI108_EC_RXQ_CFG_SFNPORT 0 + +#define TSI108_EC_RXQ_BUFCFG (0x384) +#define TSI108_EC_RXQ_BUFCFG_BURST8 (0 << 8) +#define TSI108_EC_RXQ_BUFCFG_BURST32 (1 << 8) +#define TSI108_EC_RXQ_BUFCFG_BURST128 (2 << 8) +#define TSI108_EC_RXQ_BUFCFG_BURST256 (3 << 8) +#define TSI108_EC_RXQ_BUFCFG_WSWP (1 << 11) +#define TSI108_EC_RXQ_BUFCFG_BSWP (1 << 10) +#define TSI108_EC_RXQ_BUFCFG_SFNPORT 0 + +#define TSI108_EC_RXCTRL (0x324) +#define TSI108_EC_RXCTRL_ABORT (1 << 30) +#define TSI108_EC_RXCTRL_GO (1 << 15) +#define TSI108_EC_RXCTRL_QUEUE0 (1 << 0) + +#define TSI108_EC_RXERR (0x378) + +#define TSI108_TX_EOF (1 << 0) /* End of frame; last fragment of packet */ +#define TSI108_TX_SOF (1 << 1) /* Start of frame; first frag. of packet */ +#define TSI108_TX_VLAN (1 << 2) /* Per-frame VLAN: enables VLAN override */ +#define TSI108_TX_HUGE (1 << 3) /* Huge frame enable */ +#define TSI108_TX_PAD (1 << 4) /* Pad the packet if too short */ +#define TSI108_TX_CRC (1 << 5) /* Generate CRC for this packet */ +#define TSI108_TX_INT (1 << 14) /* Generate an IRQ after frag. processed */ +#define TSI108_TX_RETRY (0xf << 16) /* 4 bit field indicating num. of retries */ +#define TSI108_TX_COL (1 << 20) /* Set if a collision occurred */ +#define TSI108_TX_LCOL (1 << 24) /* Set if a late collision occurred */ +#define TSI108_TX_UNDER (1 << 25) /* Set if a FIFO underrun occurred */ +#define TSI108_TX_RLIM (1 << 26) /* Set if the retry limit was reached */ +#define TSI108_TX_OK (1 << 30) /* Set if the frame TX was successful */ +#define TSI108_TX_OWN (1 << 31) /* Set if the device owns the descriptor */ + +/* Note: the descriptor layouts assume big-endian byte order. */ +typedef struct { + u32 buf0; + u32 buf1; /* Base address of buffer */ + u32 next0; /* Address of next descriptor, if any */ + u32 next1; + u16 vlan; /* VLAN, if override enabled for this packet */ + u16 len; /* Length of buffer in bytes */ + u32 misc; /* See TSI108_TX_* above */ + u32 reserved0; /*reserved0 and reserved1 are added to make the desc */ + u32 reserved1; /* 32-byte aligned */ +} __attribute__ ((aligned(32))) tx_desc; + +#define TSI108_RX_EOF (1 << 0) /* End of frame; last fragment of packet */ +#define TSI108_RX_SOF (1 << 1) /* Start of frame; first frag. of packet */ +#define TSI108_RX_VLAN (1 << 2) /* Set on SOF if packet has a VLAN */ +#define TSI108_RX_FTYPE (1 << 3) /* Length/Type field is type, not length */ +#define TSI108_RX_RUNT (1 << 4)/* Packet is less than minimum size */ +#define TSI108_RX_HASH (1 << 7)/* Hash table match */ +#define TSI108_RX_BAD (1 << 8) /* Bad frame */ +#define TSI108_RX_OVER (1 << 9) /* FIFO overrun occurred */ +#define TSI108_RX_TRUNC (1 << 11) /* Packet truncated due to excess length */ +#define TSI108_RX_CRC (1 << 12) /* Packet had a CRC error */ +#define TSI108_RX_INT (1 << 13) /* Generate an IRQ after frag. processed */ +#define TSI108_RX_OWN (1 << 15) /* Set if the device owns the descriptor */ + +#define TSI108_RX_SKB_SIZE 1536 /* The RX skb length */ + +typedef struct { + u32 buf0; /* Base address of buffer */ + u32 buf1; /* Base address of buffer */ + u32 next0; /* Address of next descriptor, if any */ + u32 next1; /* Address of next descriptor, if any */ + u16 vlan; /* VLAN of received packet, first frag only */ + u16 len; /* Length of received fragment in bytes */ + u16 blen; /* Length of buffer in bytes */ + u16 misc; /* See TSI108_RX_* above */ + u32 reserved0; /* reserved0 and reserved1 are added to make the desc */ + u32 reserved1; /* 32-byte aligned */ +} __attribute__ ((aligned(32))) rx_desc; + +#endif /* __TSI108_ETH_H */ diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig new file mode 100644 index 000000000..f66ddaee0 --- /dev/null +++ b/drivers/net/ethernet/via/Kconfig @@ -0,0 +1,56 @@ +# +# VIA device configuration +# + +config NET_VENDOR_VIA + bool "VIA devices" + default y + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about VIA devices. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_VIA + +config VIA_RHINE + tristate "VIA Rhine support" + depends on (PCI || USE_OF) + select CRC32 + select MII + ---help--- + If you have a VIA "Rhine" based network card (Rhine-I (VT86C100A), + Rhine-II (VT6102), or Rhine-III (VT6105)), say Y here. Rhine-type + Ethernet functions can also be found integrated on South Bridges + (e.g. VT8235). + + To compile this driver as a module, choose M here. The module + will be called via-rhine. + +config VIA_RHINE_MMIO + bool "Use MMIO instead of PIO" + depends on VIA_RHINE + ---help--- + This instructs the driver to use PCI shared memory (MMIO) instead of + programmed I/O ports (PIO). Enabling this gives an improvement in + processing time in parts of the driver. + + If unsure, say Y. + +config VIA_VELOCITY + tristate "VIA Velocity support" + depends on (PCI || USE_OF) + select CRC32 + select CRC_CCITT + select MII + ---help--- + If you have a VIA "Velocity" based network card say Y here. + + To compile this driver as a module, choose M here. The module + will be called via-velocity. + +endif # NET_VENDOR_VIA diff --git a/drivers/net/ethernet/via/Makefile b/drivers/net/ethernet/via/Makefile new file mode 100644 index 000000000..46c5d4a3d --- /dev/null +++ b/drivers/net/ethernet/via/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the VIA device drivers. +# + +obj-$(CONFIG_VIA_RHINE) += via-rhine.o +obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c new file mode 100644 index 000000000..de2850497 --- /dev/null +++ b/drivers/net/ethernet/via/via-rhine.c @@ -0,0 +1,2579 @@ +/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */ +/* + Written 1998-2001 by Donald Becker. + + Current Maintainer: Roger Luethi + + This software may be used and distributed according to the terms of + the GNU General Public License (GPL), incorporated herein by reference. + Drivers based on or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. This file is not + a complete program and may only be used when the entire operating + system is licensed under the GPL. + + This driver is designed for the VIA VT86C100A Rhine-I. + It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM + and management NIC 6105M). + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + + This driver contains some changes from the original Donald Becker + version. He may or may not be interested in bug reports on this + code. You can find his versions at: + http://www.scyld.com/network/via-rhine.html + [link no longer provides useful info -jgarzik] + +*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define DRV_NAME "via-rhine" +#define DRV_VERSION "1.5.1" +#define DRV_RELDATE "2010-10-09" + +#include + +/* A few user-configurable values. + These may be modified when a driver module is loaded. */ +static int debug = 0; +#define RHINE_MSG_DEFAULT \ + (0x0000) + +/* Set the copy breakpoint for the copy-only-tiny-frames scheme. + Setting to > 1518 effectively disables this feature. */ +#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \ + defined(CONFIG_SPARC) || defined(__ia64__) || \ + defined(__sh__) || defined(__mips__) +static int rx_copybreak = 1518; +#else +static int rx_copybreak; +#endif + +/* Work-around for broken BIOSes: they are unable to get the chip back out of + power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */ +static bool avoid_D3; + +/* + * In case you are looking for 'options[]' or 'full_duplex[]', they + * are gone. Use ethtool(8) instead. + */ + +/* Maximum number of multicast addresses to filter (vs. rx-all-multicast). + The Rhine has a 64 element 8390-like hash table. */ +static const int multicast_filter_limit = 32; + + +/* Operational parameters that are set at compile time. */ + +/* Keep the ring sizes a power of two for compile efficiency. + * The compiler will convert '%'<2^N> into a bit mask. + * Making the Tx ring too large decreases the effectiveness of channel + * bonding and packet priority. + * With BQL support, we can increase TX ring safely. + * There are no ill effects from too-large receive rings. + */ +#define TX_RING_SIZE 64 +#define TX_QUEUE_LEN (TX_RING_SIZE - 6) /* Limit ring entries actually used. */ +#define RX_RING_SIZE 64 + +/* Operational parameters that usually are not changed. */ + +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (2*HZ) + +#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* Processor type for cache alignment. */ +#include +#include +#include +#include + +/* These identify the driver base version and may not be removed. */ +static const char version[] = + "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker"; + +MODULE_AUTHOR("Donald Becker "); +MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver"); +MODULE_LICENSE("GPL"); + +module_param(debug, int, 0); +module_param(rx_copybreak, int, 0); +module_param(avoid_D3, bool, 0); +MODULE_PARM_DESC(debug, "VIA Rhine debug message flags"); +MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); +MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); + +#define MCAM_SIZE 32 +#define VCAM_SIZE 32 + +/* + Theory of Operation + +I. Board Compatibility + +This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet +controller. + +II. Board-specific settings + +Boards with this chip are functional only in a bus-master PCI slot. + +Many operational settings are loaded from the EEPROM to the Config word at +offset 0x78. For most of these settings, this driver assumes that they are +correct. +If this driver is compiled to use PCI memory space operations the EEPROM +must be configured to enable memory ops. + +III. Driver operation + +IIIa. Ring buffers + +This driver uses two statically allocated fixed-size descriptor lists +formed into rings by a branch from the final descriptor to the beginning of +the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. + +IIIb/c. Transmit/Receive Structure + +This driver attempts to use a zero-copy receive and transmit scheme. + +Alas, all data buffers are required to start on a 32 bit boundary, so +the driver must often copy transmit packets into bounce buffers. + +The driver allocates full frame size skbuffs for the Rx ring buffers at +open() time and passes the skb->data field to the chip as receive data +buffers. When an incoming frame is less than RX_COPYBREAK bytes long, +a fresh skbuff is allocated and the frame is copied to the new skbuff. +When the incoming frame is larger, the skbuff is passed directly up the +protocol stack. Buffers consumed this way are replaced by newly allocated +skbuffs in the last phase of rhine_rx(). + +The RX_COPYBREAK value is chosen to trade-off the memory wasted by +using a full-sized skbuff for small frames vs. the copying costs of larger +frames. New boards are typically used in generously configured machines +and the underfilled buffers have negligible impact compared to the benefit of +a single allocation size, so the default value of zero results in never +copying packets. When copying is done, the cost is usually mitigated by using +a combined copy/checksum routine. Copying also preloads the cache, which is +most useful with small frames. + +Since the VIA chips are only able to transfer data to buffers on 32 bit +boundaries, the IP header at offset 14 in an ethernet frame isn't +longword aligned for further processing. Copying these unaligned buffers +has the beneficial effect of 16-byte aligning the IP header. + +IIId. Synchronization + +The driver runs as two independent, single-threaded flows of control. One +is the send-packet routine, which enforces single-threaded use by the +netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler, +which is single threaded by the hardware and interrupt handling software. + +The send packet thread has partial control over the Tx ring. It locks the +netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in +the ring is not available it stops the transmit queue by +calling netif_stop_queue. + +The interrupt handler has exclusive control over the Rx ring and records stats +from the Tx ring. After reaping the stats, it marks the Tx queue entry as +empty by incrementing the dirty_tx mark. If at least half of the entries in +the Rx ring are available the transmit queue is woken up if it was stopped. + +IV. Notes + +IVb. References + +Preliminary VT86C100A manual from http://www.via.com.tw/ +http://www.scyld.com/expert/100mbps.html +http://www.scyld.com/expert/NWay.html +ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf +ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF + + +IVc. Errata + +The VT86C100A manual is not reliable information. +The 3043 chip does not handle unaligned transmit or receive buffers, resulting +in significant performance degradation for bounce buffer copies on transmit +and unaligned IP headers on receive. +The chip does not pad to minimum transmit length. + +*/ + + +/* This table drives the PCI probe routines. It's mostly boilerplate in all + of the drivers, and will likely be provided by some future kernel. + Note the matching code -- the first table entry matchs all 56** cards but + second only the 1234 card. +*/ + +enum rhine_revs { + VT86C100A = 0x00, + VTunknown0 = 0x20, + VT6102 = 0x40, + VT8231 = 0x50, /* Integrated MAC */ + VT8233 = 0x60, /* Integrated MAC */ + VT8235 = 0x74, /* Integrated MAC */ + VT8237 = 0x78, /* Integrated MAC */ + VTunknown1 = 0x7C, + VT6105 = 0x80, + VT6105_B0 = 0x83, + VT6105L = 0x8A, + VT6107 = 0x8C, + VTunknown2 = 0x8E, + VT6105M = 0x90, /* Management adapter */ +}; + +enum rhine_quirks { + rqWOL = 0x0001, /* Wake-On-LAN support */ + rqForceReset = 0x0002, + rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */ + rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */ + rqRhineI = 0x0100, /* See comment below */ + rqIntPHY = 0x0200, /* Integrated PHY */ + rqMgmt = 0x0400, /* Management adapter */ + rqNeedEnMMIO = 0x0800, /* Whether the core needs to be + * switched from PIO mode to MMIO + * (only applies to PCI) + */ +}; +/* + * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable + * MMIO as well as for the collision counter and the Tx FIFO underflow + * indicator. In addition, Tx and Rx buffers need to 4 byte aligned. + */ + +/* Beware of PCI posted writes */ +#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0) + +static const struct pci_device_id rhine_pci_tbl[] = { + { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */ + { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */ + { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */ + { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */ + { } /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, rhine_pci_tbl); + +/* OpenFirmware identifiers for platform-bus devices + * The .data field is currently only used to store quirks + */ +static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns; +static const struct of_device_id rhine_of_tbl[] = { + { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks }, + { } /* terminate list */ +}; +MODULE_DEVICE_TABLE(of, rhine_of_tbl); + +/* Offsets to the device registers. */ +enum register_offsets { + StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08, + ChipCmd1=0x09, TQWake=0x0A, + IntrStatus=0x0C, IntrEnable=0x0E, + MulticastFilter0=0x10, MulticastFilter1=0x14, + RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54, + MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F, + MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74, + ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B, + RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81, + StickyHW=0x83, IntrStatus2=0x84, + CamMask=0x88, CamCon=0x92, CamAddr=0x93, + WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4, + WOLcrClr1=0xA6, WOLcgClr=0xA7, + PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD, +}; + +/* Bits in ConfigD */ +enum backoff_bits { + BackOptional=0x01, BackModify=0x02, + BackCaptureEffect=0x04, BackRandom=0x08 +}; + +/* Bits in the TxConfig (TCR) register */ +enum tcr_bits { + TCR_PQEN=0x01, + TCR_LB0=0x02, /* loopback[0] */ + TCR_LB1=0x04, /* loopback[1] */ + TCR_OFSET=0x08, + TCR_RTGOPT=0x10, + TCR_RTFT0=0x20, + TCR_RTFT1=0x40, + TCR_RTSF=0x80, +}; + +/* Bits in the CamCon (CAMC) register */ +enum camcon_bits { + CAMC_CAMEN=0x01, + CAMC_VCAMSL=0x02, + CAMC_CAMWR=0x04, + CAMC_CAMRD=0x08, +}; + +/* Bits in the PCIBusConfig1 (BCR1) register */ +enum bcr1_bits { + BCR1_POT0=0x01, + BCR1_POT1=0x02, + BCR1_POT2=0x04, + BCR1_CTFT0=0x08, + BCR1_CTFT1=0x10, + BCR1_CTSF=0x20, + BCR1_TXQNOBK=0x40, /* for VT6105 */ + BCR1_VIDFR=0x80, /* for VT6105 */ + BCR1_MED0=0x40, /* for VT6102 */ + BCR1_MED1=0x80, /* for VT6102 */ +}; + +/* Registers we check that mmio and reg are the same. */ +static const int mmio_verify_registers[] = { + RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD, + 0 +}; + +/* Bits in the interrupt status/mask registers. */ +enum intr_status_bits { + IntrRxDone = 0x0001, + IntrTxDone = 0x0002, + IntrRxErr = 0x0004, + IntrTxError = 0x0008, + IntrRxEmpty = 0x0020, + IntrPCIErr = 0x0040, + IntrStatsMax = 0x0080, + IntrRxEarly = 0x0100, + IntrTxUnderrun = 0x0210, + IntrRxOverflow = 0x0400, + IntrRxDropped = 0x0800, + IntrRxNoBuf = 0x1000, + IntrTxAborted = 0x2000, + IntrLinkChange = 0x4000, + IntrRxWakeUp = 0x8000, + IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */ + IntrNormalSummary = IntrRxDone | IntrTxDone, + IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError | + IntrTxUnderrun, +}; + +/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */ +enum wol_bits { + WOLucast = 0x10, + WOLmagic = 0x20, + WOLbmcast = 0x30, + WOLlnkon = 0x40, + WOLlnkoff = 0x80, +}; + +/* The Rx and Tx buffer descriptors. */ +struct rx_desc { + __le32 rx_status; + __le32 desc_length; /* Chain flag, Buffer/frame length */ + __le32 addr; + __le32 next_desc; +}; +struct tx_desc { + __le32 tx_status; + __le32 desc_length; /* Chain flag, Tx Config, Frame length */ + __le32 addr; + __le32 next_desc; +}; + +/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */ +#define TXDESC 0x00e08000 + +enum rx_status_bits { + RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F +}; + +/* Bits in *_desc.*_status */ +enum desc_status_bits { + DescOwn=0x80000000 +}; + +/* Bits in *_desc.*_length */ +enum desc_length_bits { + DescTag=0x00010000 +}; + +/* Bits in ChipCmd. */ +enum chip_cmd_bits { + CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08, + CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40, + Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04, + Cmd1NoTxPoll=0x08, Cmd1Reset=0x80, +}; + +struct rhine_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + +struct rhine_private { + /* Bit mask for configured VLAN ids */ + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + /* Descriptor rings */ + struct rx_desc *rx_ring; + struct tx_desc *tx_ring; + dma_addr_t rx_ring_dma; + dma_addr_t tx_ring_dma; + + /* The addresses of receive-in-place skbuffs. */ + struct sk_buff *rx_skbuff[RX_RING_SIZE]; + dma_addr_t rx_skbuff_dma[RX_RING_SIZE]; + + /* The saved address of a sent-in-place packet/buffer, for later free(). */ + struct sk_buff *tx_skbuff[TX_RING_SIZE]; + dma_addr_t tx_skbuff_dma[TX_RING_SIZE]; + + /* Tx bounce buffers (Rhine-I only) */ + unsigned char *tx_buf[TX_RING_SIZE]; + unsigned char *tx_bufs; + dma_addr_t tx_bufs_dma; + + int irq; + long pioaddr; + struct net_device *dev; + struct napi_struct napi; + spinlock_t lock; + struct mutex task_lock; + bool task_enable; + struct work_struct slow_event_task; + struct work_struct reset_task; + + u32 msg_enable; + + /* Frequently used values: keep some adjacent for cache effect. */ + u32 quirks; + struct rx_desc *rx_head_desc; + unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ + unsigned int cur_tx, dirty_tx; + unsigned int rx_buf_sz; /* Based on MTU+slack. */ + struct rhine_stats rx_stats; + struct rhine_stats tx_stats; + u8 wolopts; + + u8 tx_thresh, rx_thresh; + + struct mii_if_info mii_if; + void __iomem *base; +}; + +#define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0) +#define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0) +#define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0) + +#define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x)) +#define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x)) +#define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x)) + +#define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0) +#define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0) +#define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0) + +#define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0) +#define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0) +#define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0) + + +static int mdio_read(struct net_device *dev, int phy_id, int location); +static void mdio_write(struct net_device *dev, int phy_id, int location, int value); +static int rhine_open(struct net_device *dev); +static void rhine_reset_task(struct work_struct *work); +static void rhine_slow_event_task(struct work_struct *work); +static void rhine_tx_timeout(struct net_device *dev); +static netdev_tx_t rhine_start_tx(struct sk_buff *skb, + struct net_device *dev); +static irqreturn_t rhine_interrupt(int irq, void *dev_instance); +static void rhine_tx(struct net_device *dev); +static int rhine_rx(struct net_device *dev, int limit); +static void rhine_set_rx_mode(struct net_device *dev); +static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats); +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static const struct ethtool_ops netdev_ethtool_ops; +static int rhine_close(struct net_device *dev); +static int rhine_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid); +static int rhine_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid); +static void rhine_restart_tx(struct net_device *dev); + +static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low) +{ + void __iomem *ioaddr = rp->base; + int i; + + for (i = 0; i < 1024; i++) { + bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask); + + if (low ^ has_mask_bits) + break; + udelay(10); + } + if (i > 64) { + netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle " + "count: %04d\n", low ? "low" : "high", reg, mask, i); + } +} + +static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask) +{ + rhine_wait_bit(rp, reg, mask, false); +} + +static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask) +{ + rhine_wait_bit(rp, reg, mask, true); +} + +static u32 rhine_get_events(struct rhine_private *rp) +{ + void __iomem *ioaddr = rp->base; + u32 intr_status; + + intr_status = ioread16(ioaddr + IntrStatus); + /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */ + if (rp->quirks & rqStatusWBRace) + intr_status |= ioread8(ioaddr + IntrStatus2) << 16; + return intr_status; +} + +static void rhine_ack_events(struct rhine_private *rp, u32 mask) +{ + void __iomem *ioaddr = rp->base; + + if (rp->quirks & rqStatusWBRace) + iowrite8(mask >> 16, ioaddr + IntrStatus2); + iowrite16(mask, ioaddr + IntrStatus); + mmiowb(); +} + +/* + * Get power related registers into sane state. + * Notify user about past WOL event. + */ +static void rhine_power_init(struct net_device *dev) +{ + struct rhine_private *rp = netdev_priv(dev); + void __iomem *ioaddr = rp->base; + u16 wolstat; + + if (rp->quirks & rqWOL) { + /* Make sure chip is in power state D0 */ + iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW); + + /* Disable "force PME-enable" */ + iowrite8(0x80, ioaddr + WOLcgClr); + + /* Clear power-event config bits (WOL) */ + iowrite8(0xFF, ioaddr + WOLcrClr); + /* More recent cards can manage two additional patterns */ + if (rp->quirks & rq6patterns) + iowrite8(0x03, ioaddr + WOLcrClr1); + + /* Save power-event status bits */ + wolstat = ioread8(ioaddr + PwrcsrSet); + if (rp->quirks & rq6patterns) + wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8; + + /* Clear power-event status bits */ + iowrite8(0xFF, ioaddr + PwrcsrClr); + if (rp->quirks & rq6patterns) + iowrite8(0x03, ioaddr + PwrcsrClr1); + + if (wolstat) { + char *reason; + switch (wolstat) { + case WOLmagic: + reason = "Magic packet"; + break; + case WOLlnkon: + reason = "Link went up"; + break; + case WOLlnkoff: + reason = "Link went down"; + break; + case WOLucast: + reason = "Unicast packet"; + break; + case WOLbmcast: + reason = "Multicast/broadcast packet"; + break; + default: + reason = "Unknown"; + } + netdev_info(dev, "Woke system up. Reason: %s\n", + reason); + } + } +} + +static void rhine_chip_reset(struct net_device *dev) +{ + struct rhine_private *rp = netdev_priv(dev); + void __iomem *ioaddr = rp->base; + u8 cmd1; + + iowrite8(Cmd1Reset, ioaddr + ChipCmd1); + IOSYNC; + + if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) { + netdev_info(dev, "Reset not complete yet. Trying harder.\n"); + + /* Force reset */ + if (rp->quirks & rqForceReset) + iowrite8(0x40, ioaddr + MiscCmd); + + /* Reset can take somewhat longer (rare) */ + rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset); + } + + cmd1 = ioread8(ioaddr + ChipCmd1); + netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ? + "failed" : "succeeded"); +} + +static void enable_mmio(long pioaddr, u32 quirks) +{ + int n; + + if (quirks & rqNeedEnMMIO) { + if (quirks & rqRhineI) { + /* More recent docs say that this bit is reserved */ + n = inb(pioaddr + ConfigA) | 0x20; + outb(n, pioaddr + ConfigA); + } else { + n = inb(pioaddr + ConfigD) | 0x80; + outb(n, pioaddr + ConfigD); + } + } +} + +static inline int verify_mmio(struct device *hwdev, + long pioaddr, + void __iomem *ioaddr, + u32 quirks) +{ + if (quirks & rqNeedEnMMIO) { + int i = 0; + + /* Check that selected MMIO registers match the PIO ones */ + while (mmio_verify_registers[i]) { + int reg = mmio_verify_registers[i++]; + unsigned char a = inb(pioaddr+reg); + unsigned char b = readb(ioaddr+reg); + + if (a != b) { + dev_err(hwdev, + "MMIO do not match PIO [%02x] (%02x != %02x)\n", + reg, a, b); + return -EIO; + } + } + } + return 0; +} + +/* + * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM + * (plus 0x6C for Rhine-I/II) + */ +static void rhine_reload_eeprom(long pioaddr, struct net_device *dev) +{ + struct rhine_private *rp = netdev_priv(dev); + void __iomem *ioaddr = rp->base; + int i; + + outb(0x20, pioaddr + MACRegEEcsr); + for (i = 0; i < 1024; i++) { + if (!(inb(pioaddr + MACRegEEcsr) & 0x20)) + break; + } + if (i > 512) + pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__); + + /* + * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable + * MMIO. If reloading EEPROM was done first this could be avoided, but + * it is not known if that still works with the "win98-reboot" problem. + */ + enable_mmio(pioaddr, rp->quirks); + + /* Turn off EEPROM-controlled wake-up (magic packet) */ + if (rp->quirks & rqWOL) + iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA); + +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void rhine_poll(struct net_device *dev) +{ + struct rhine_private *rp = netdev_priv(dev); + const int irq = rp->irq; + + disable_irq(irq); + rhine_interrupt(irq, dev); + enable_irq(irq); +} +#endif + +static void rhine_kick_tx_threshold(struct rhine_private *rp) +{ + if (rp->tx_thresh < 0xe0) { + void __iomem *ioaddr = rp->base; + + rp->tx_thresh += 0x20; + BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig); + } +} + +static void rhine_tx_err(struct rhine_private *rp, u32 status) +{ + struct net_device *dev = rp->dev; + + if (status & IntrTxAborted) { + netif_info(rp, tx_err, dev, + "Abort %08x, frame dropped\n", status); + } + + if (status & IntrTxUnderrun) { + rhine_kick_tx_threshold(rp); + netif_info(rp, tx_err ,dev, "Transmitter underrun, " + "Tx threshold now %02x\n", rp->tx_thresh); + } + + if (status & IntrTxDescRace) + netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n"); + + if ((status & IntrTxError) && + (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) { + rhine_kick_tx_threshold(rp); + netif_info(rp, tx_err, dev, "Unspecified error. " + "Tx threshold now %02x\n", rp->tx_thresh); + } + + rhine_restart_tx(dev); +} + +static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp) +{ + void __iomem *ioaddr = rp->base; + struct net_device_stats *stats = &rp->dev->stats; + + stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs); + stats->rx_missed_errors += ioread16(ioaddr + RxMissed); + + /* + * Clears the "tally counters" for CRC errors and missed frames(?). + * It has been reported that some chips need a write of 0 to clear + * these, for others the counters are set to 1 when written to and + * instead cleared when read. So we clear them both ways ... + */ + iowrite32(0, ioaddr + RxMissed); + ioread16(ioaddr + RxCRCErrs); + ioread16(ioaddr + RxMissed); +} + +#define RHINE_EVENT_NAPI_RX (IntrRxDone | \ + IntrRxErr | \ + IntrRxEmpty | \ + IntrRxOverflow | \ + IntrRxDropped | \ + IntrRxNoBuf | \ + IntrRxWakeUp) + +#define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \ + IntrTxAborted | \ + IntrTxUnderrun | \ + IntrTxDescRace) +#define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR) + +#define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \ + RHINE_EVENT_NAPI_TX | \ + IntrStatsMax) +#define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange) +#define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW) + +static int rhine_napipoll(struct napi_struct *napi, int budget) +{ + struct rhine_private *rp = container_of(napi, struct rhine_private, napi); + struct net_device *dev = rp->dev; + void __iomem *ioaddr = rp->base; + u16 enable_mask = RHINE_EVENT & 0xffff; + int work_done = 0; + u32 status; + + status = rhine_get_events(rp); + rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW); + + if (status & RHINE_EVENT_NAPI_RX) + work_done += rhine_rx(dev, budget); + + if (status & RHINE_EVENT_NAPI_TX) { + if (status & RHINE_EVENT_NAPI_TX_ERR) { + /* Avoid scavenging before Tx engine turned off */ + rhine_wait_bit_low(rp, ChipCmd, CmdTxOn); + if (ioread8(ioaddr + ChipCmd) & CmdTxOn) + netif_warn(rp, tx_err, dev, "Tx still on\n"); + } + + rhine_tx(dev); + + if (status & RHINE_EVENT_NAPI_TX_ERR) + rhine_tx_err(rp, status); + } + + if (status & IntrStatsMax) { + spin_lock(&rp->lock); + rhine_update_rx_crc_and_missed_errord(rp); + spin_unlock(&rp->lock); + } + + if (status & RHINE_EVENT_SLOW) { + enable_mask &= ~RHINE_EVENT_SLOW; + schedule_work(&rp->slow_event_task); + } + + if (work_done < budget) { + napi_complete(napi); + iowrite16(enable_mask, ioaddr + IntrEnable); + mmiowb(); + } + return work_done; +} + +static void rhine_hw_init(struct net_device *dev, long pioaddr) +{ + struct rhine_private *rp = netdev_priv(dev); + + /* Reset the chip to erase previous misconfiguration. */ + rhine_chip_reset(dev); + + /* Rhine-I needs extra time to recuperate before EEPROM reload */ + if (rp->quirks & rqRhineI) + msleep(5); + + /* Reload EEPROM controlled bytes cleared by soft reset */ + if (dev_is_pci(dev->dev.parent)) + rhine_reload_eeprom(pioaddr, dev); +} + +static const struct net_device_ops rhine_netdev_ops = { + .ndo_open = rhine_open, + .ndo_stop = rhine_close, + .ndo_start_xmit = rhine_start_tx, + .ndo_get_stats64 = rhine_get_stats64, + .ndo_set_rx_mode = rhine_set_rx_mode, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_do_ioctl = netdev_ioctl, + .ndo_tx_timeout = rhine_tx_timeout, + .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rhine_poll, +#endif +}; + +static int rhine_init_one_common(struct device *hwdev, u32 quirks, + long pioaddr, void __iomem *ioaddr, int irq) +{ + struct net_device *dev; + struct rhine_private *rp; + int i, rc, phy_id; + const char *name; + + /* this should always be supported */ + rc = dma_set_mask(hwdev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n"); + goto err_out; + } + + dev = alloc_etherdev(sizeof(struct rhine_private)); + if (!dev) { + rc = -ENOMEM; + goto err_out; + } + SET_NETDEV_DEV(dev, hwdev); + + rp = netdev_priv(dev); + rp->dev = dev; + rp->quirks = quirks; + rp->pioaddr = pioaddr; + rp->base = ioaddr; + rp->irq = irq; + rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT); + + phy_id = rp->quirks & rqIntPHY ? 1 : 0; + + u64_stats_init(&rp->tx_stats.syncp); + u64_stats_init(&rp->rx_stats.syncp); + + /* Get chip registers into a sane state */ + rhine_power_init(dev); + rhine_hw_init(dev, pioaddr); + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i); + + if (!is_valid_ether_addr(dev->dev_addr)) { + /* Report it and use a random ethernet address instead */ + netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr); + eth_hw_addr_random(dev); + netdev_info(dev, "Using random MAC address: %pM\n", + dev->dev_addr); + } + + /* For Rhine-I/II, phy_id is loaded from EEPROM */ + if (!phy_id) + phy_id = ioread8(ioaddr + 0x6C); + + spin_lock_init(&rp->lock); + mutex_init(&rp->task_lock); + INIT_WORK(&rp->reset_task, rhine_reset_task); + INIT_WORK(&rp->slow_event_task, rhine_slow_event_task); + + rp->mii_if.dev = dev; + rp->mii_if.mdio_read = mdio_read; + rp->mii_if.mdio_write = mdio_write; + rp->mii_if.phy_id_mask = 0x1f; + rp->mii_if.reg_num_mask = 0x1f; + + /* The chip-specific entries in the device structure. */ + dev->netdev_ops = &rhine_netdev_ops; + dev->ethtool_ops = &netdev_ethtool_ops; + dev->watchdog_timeo = TX_TIMEOUT; + + netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); + + if (rp->quirks & rqRhineI) + dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; + + if (rp->quirks & rqMgmt) + dev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + + /* dev->name not defined before register_netdev()! */ + rc = register_netdev(dev); + if (rc) + goto err_out_free_netdev; + + if (rp->quirks & rqRhineI) + name = "Rhine"; + else if (rp->quirks & rqStatusWBRace) + name = "Rhine II"; + else if (rp->quirks & rqMgmt) + name = "Rhine III (Management Adapter)"; + else + name = "Rhine III"; + + netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n", + name, (long)ioaddr, dev->dev_addr, rp->irq); + + dev_set_drvdata(hwdev, dev); + + { + u16 mii_cmd; + int mii_status = mdio_read(dev, phy_id, 1); + mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE; + mdio_write(dev, phy_id, MII_BMCR, mii_cmd); + if (mii_status != 0xffff && mii_status != 0x0000) { + rp->mii_if.advertising = mdio_read(dev, phy_id, 4); + netdev_info(dev, + "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n", + phy_id, + mii_status, rp->mii_if.advertising, + mdio_read(dev, phy_id, 5)); + + /* set IFF_RUNNING */ + if (mii_status & BMSR_LSTATUS) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + + } + } + rp->mii_if.phy_id = phy_id; + if (avoid_D3) + netif_info(rp, probe, dev, "No D3 power state at shutdown\n"); + + return 0; + +err_out_free_netdev: + free_netdev(dev); +err_out: + return rc; +} + +static int rhine_init_one_pci(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct device *hwdev = &pdev->dev; + int rc; + long pioaddr, memaddr; + void __iomem *ioaddr; + int io_size = pdev->revision < VTunknown0 ? 128 : 256; + +/* This driver was written to use PCI memory space. Some early versions + * of the Rhine may only work correctly with I/O space accesses. + * TODO: determine for which revisions this is true and assign the flag + * in code as opposed to this Kconfig option (???) + */ +#ifdef CONFIG_VIA_RHINE_MMIO + u32 quirks = rqNeedEnMMIO; +#else + u32 quirks = 0; +#endif + +/* when built into the kernel, we only print version if device is found */ +#ifndef MODULE + pr_info_once("%s\n", version); +#endif + + rc = pci_enable_device(pdev); + if (rc) + goto err_out; + + if (pdev->revision < VTunknown0) { + quirks |= rqRhineI; + } else if (pdev->revision >= VT6102) { + quirks |= rqWOL | rqForceReset; + if (pdev->revision < VT6105) { + quirks |= rqStatusWBRace; + } else { + quirks |= rqIntPHY; + if (pdev->revision >= VT6105_B0) + quirks |= rq6patterns; + if (pdev->revision >= VT6105M) + quirks |= rqMgmt; + } + } + + /* sanity check */ + if ((pci_resource_len(pdev, 0) < io_size) || + (pci_resource_len(pdev, 1) < io_size)) { + rc = -EIO; + dev_err(hwdev, "Insufficient PCI resources, aborting\n"); + goto err_out_pci_disable; + } + + pioaddr = pci_resource_start(pdev, 0); + memaddr = pci_resource_start(pdev, 1); + + pci_set_master(pdev); + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out_pci_disable; + + ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size); + if (!ioaddr) { + rc = -EIO; + dev_err(hwdev, + "ioremap failed for device %s, region 0x%X @ 0x%lX\n", + dev_name(hwdev), io_size, memaddr); + goto err_out_free_res; + } + + enable_mmio(pioaddr, quirks); + + rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks); + if (rc) + goto err_out_unmap; + + rc = rhine_init_one_common(&pdev->dev, quirks, + pioaddr, ioaddr, pdev->irq); + if (!rc) + return 0; + +err_out_unmap: + pci_iounmap(pdev, ioaddr); +err_out_free_res: + pci_release_regions(pdev); +err_out_pci_disable: + pci_disable_device(pdev); +err_out: + return rc; +} + +static int rhine_init_one_platform(struct platform_device *pdev) +{ + const struct of_device_id *match; + const u32 *quirks; + int irq; + struct resource *res; + void __iomem *ioaddr; + + match = of_match_device(rhine_of_tbl, &pdev->dev); + if (!match) + return -EINVAL; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ioaddr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ioaddr)) + return PTR_ERR(ioaddr); + + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (!irq) + return -EINVAL; + + quirks = match->data; + if (!quirks) + return -EINVAL; + + return rhine_init_one_common(&pdev->dev, *quirks, + (long)ioaddr, ioaddr, irq); +} + +static int alloc_ring(struct net_device* dev) +{ + struct rhine_private *rp = netdev_priv(dev); + struct device *hwdev = dev->dev.parent; + void *ring; + dma_addr_t ring_dma; + + ring = dma_alloc_coherent(hwdev, + RX_RING_SIZE * sizeof(struct rx_desc) + + TX_RING_SIZE * sizeof(struct tx_desc), + &ring_dma, + GFP_ATOMIC); + if (!ring) { + netdev_err(dev, "Could not allocate DMA memory\n"); + return -ENOMEM; + } + if (rp->quirks & rqRhineI) { + rp->tx_bufs = dma_alloc_coherent(hwdev, + PKT_BUF_SZ * TX_RING_SIZE, + &rp->tx_bufs_dma, + GFP_ATOMIC); + if (rp->tx_bufs == NULL) { + dma_free_coherent(hwdev, + RX_RING_SIZE * sizeof(struct rx_desc) + + TX_RING_SIZE * sizeof(struct tx_desc), + ring, ring_dma); + return -ENOMEM; + } + } + + rp->rx_ring = ring; + rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc); + rp->rx_ring_dma = ring_dma; + rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc); + + return 0; +} + +static void free_ring(struct net_device* dev) +{ + struct rhine_private *rp = netdev_priv(dev); + struct device *hwdev = dev->dev.parent; + + dma_free_coherent(hwdev, + RX_RING_SIZE * sizeof(struct rx_desc) + + TX_RING_SIZE * sizeof(struct tx_desc), + rp->rx_ring, rp->rx_ring_dma); + rp->tx_ring = NULL; + + if (rp->tx_bufs) + dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE, + rp->tx_bufs, rp->tx_bufs_dma); + + rp->tx_bufs = NULL; + +} + +static void alloc_rbufs(struct net_device *dev) +{ + struct rhine_private *rp = netdev_priv(dev); + struct device *hwdev = dev->dev.parent; + dma_addr_t next; + int i; + + rp->dirty_rx = rp->cur_rx = 0; + + rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); + rp->rx_head_desc = &rp->rx_ring[0]; + next = rp->rx_ring_dma; + + /* Init the ring entries */ + for (i = 0; i < RX_RING_SIZE; i++) { + rp->rx_ring[i].rx_status = 0; + rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz); + next += sizeof(struct rx_desc); + rp->rx_ring[i].next_desc = cpu_to_le32(next); + rp->rx_skbuff[i] = NULL; + } + /* Mark the last entry as wrapping the ring. */ + rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma); + + /* Fill in the Rx buffers. Handle allocation failure gracefully. */ + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz); + rp->rx_skbuff[i] = skb; + if (skb == NULL) + break; + + rp->rx_skbuff_dma[i] = + dma_map_single(hwdev, skb->data, rp->rx_buf_sz, + DMA_FROM_DEVICE); + if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) { + rp->rx_skbuff_dma[i] = 0; + dev_kfree_skb(skb); + break; + } + rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]); + rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn); + } + rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); +} + +static void free_rbufs(struct net_device* dev) +{ + struct rhine_private *rp = netdev_priv(dev); + struct device *hwdev = dev->dev.parent; + int i; + + /* Free all the skbuffs in the Rx queue. */ + for (i = 0; i < RX_RING_SIZE; i++) { + rp->rx_ring[i].rx_status = 0; + rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ + if (rp->rx_skbuff[i]) { + dma_unmap_single(hwdev, + rp->rx_skbuff_dma[i], + rp->rx_buf_sz, DMA_FROM_DEVICE); + dev_kfree_skb(rp->rx_skbuff[i]); + } + rp->rx_skbuff[i] = NULL; + } +} + +static void alloc_tbufs(struct net_device* dev) +{ + struct rhine_private *rp = netdev_priv(dev); + dma_addr_t next; + int i; + + rp->dirty_tx = rp->cur_tx = 0; + next = rp->tx_ring_dma; + for (i = 0; i < TX_RING_SIZE; i++) { + rp->tx_skbuff[i] = NULL; + rp->tx_ring[i].tx_status = 0; + rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); + next += sizeof(struct tx_desc); + rp->tx_ring[i].next_desc = cpu_to_le32(next); + if (rp->quirks & rqRhineI) + rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ]; + } + rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); + + netdev_reset_queue(dev); +} + +static void free_tbufs(struct net_device* dev) +{ + struct rhine_private *rp = netdev_priv(dev); + struct device *hwdev = dev->dev.parent; + int i; + + for (i = 0; i < TX_RING_SIZE; i++) { + rp->tx_ring[i].tx_status = 0; + rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); + rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ + if (rp->tx_skbuff[i]) { + if (rp->tx_skbuff_dma[i]) { + dma_unmap_single(hwdev, + rp->tx_skbuff_dma[i], + rp->tx_skbuff[i]->len, + DMA_TO_DEVICE); + } + dev_kfree_skb(rp->tx_skbuff[i]); + } + rp->tx_skbuff[i] = NULL; + rp->tx_buf[i] = NULL; + } +} + +static void rhine_check_media(struct net_device *dev, unsigned int init_media) +{ + struct rhine_private *rp = netdev_priv(dev); + void __iomem *ioaddr = rp->base; + + if (!rp->mii_if.force_media) + mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media); + + if (rp->mii_if.full_duplex) + iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex, + ioaddr + ChipCmd1); + else + iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex, + ioaddr + ChipCmd1); + + netif_info(rp, link, dev, "force_media %d, carrier %d\n", + rp->mii_if.force_media, netif_carrier_ok(dev)); +} + +/* Called after status of force_media possibly changed */ +static void rhine_set_carrier(struct mii_if_info *mii) +{ + struct net_device *dev = mii->dev; + struct rhine_private *rp = netdev_priv(dev); + + if (mii->force_media) { + /* autoneg is off: Link is always assumed to be up */ + if (!netif_carrier_ok(dev)) + netif_carrier_on(dev); + } + + rhine_check_media(dev, 0); + + netif_info(rp, link, dev, "force_media %d, carrier %d\n", + mii->force_media, netif_carrier_ok(dev)); +} + +/** + * rhine_set_cam - set CAM multicast filters + * @ioaddr: register block of this Rhine + * @idx: multicast CAM index [0..MCAM_SIZE-1] + * @addr: multicast address (6 bytes) + * + * Load addresses into multicast filters. + */ +static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr) +{ + int i; + + iowrite8(CAMC_CAMEN, ioaddr + CamCon); + wmb(); + + /* Paranoid -- idx out of range should never happen */ + idx &= (MCAM_SIZE - 1); + + iowrite8((u8) idx, ioaddr + CamAddr); + + for (i = 0; i < 6; i++, addr++) + iowrite8(*addr, ioaddr + MulticastFilter0 + i); + udelay(10); + wmb(); + + iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon); + udelay(10); + + iowrite8(0, ioaddr + CamCon); +} + +/** + * rhine_set_vlan_cam - set CAM VLAN filters + * @ioaddr: register block of this Rhine + * @idx: VLAN CAM index [0..VCAM_SIZE-1] + * @addr: VLAN ID (2 bytes) + * + * Load addresses into VLAN filters. + */ +static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr) +{ + iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon); + wmb(); + + /* Paranoid -- idx out of range should never happen */ + idx &= (VCAM_SIZE - 1); + + iowrite8((u8) idx, ioaddr + CamAddr); + + iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6); + udelay(10); + wmb(); + + iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon); + udelay(10); + + iowrite8(0, ioaddr + CamCon); +} + +/** + * rhine_set_cam_mask - set multicast CAM mask + * @ioaddr: register block of this Rhine + * @mask: multicast CAM mask + * + * Mask sets multicast filters active/inactive. + */ +static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask) +{ + iowrite8(CAMC_CAMEN, ioaddr + CamCon); + wmb(); + + /* write mask */ + iowrite32(mask, ioaddr + CamMask); + + /* disable CAMEN */ + iowrite8(0, ioaddr + CamCon); +} + +/** + * rhine_set_vlan_cam_mask - set VLAN CAM mask + * @ioaddr: register block of this Rhine + * @mask: VLAN CAM mask + * + * Mask sets VLAN filters active/inactive. + */ +static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask) +{ + iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon); + wmb(); + + /* write mask */ + iowrite32(mask, ioaddr + CamMask); + + /* disable CAMEN */ + iowrite8(0, ioaddr + CamCon); +} + +/** + * rhine_init_cam_filter - initialize CAM filters + * @dev: network device + * + * Initialize (disable) hardware VLAN and multicast support on this + * Rhine. + */ +static void rhine_init_cam_filter(struct net_device *dev) +{ + struct rhine_private *rp = netdev_priv(dev); + void __iomem *ioaddr = rp->base; + + /* Disable all CAMs */ + rhine_set_vlan_cam_mask(ioaddr, 0); + rhine_set_cam_mask(ioaddr, 0); + + /* disable hardware VLAN support */ + BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig); + BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1); +} + +/** + * rhine_update_vcam - update VLAN CAM filters + * @rp: rhine_private data of this Rhine + * + * Update VLAN CAM filters to match configuration change. + */ +static void rhine_update_vcam(struct net_device *dev) +{ + struct rhine_private *rp = netdev_priv(dev); + void __iomem *ioaddr = rp->base; + u16 vid; + u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */ + unsigned int i = 0; + + for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) { + rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid); + vCAMmask |= 1 << i; + if (++i >= VCAM_SIZE) + break; + } + rhine_set_vlan_cam_mask(ioaddr, vCAMmask); +} + +static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct rhine_private *rp = netdev_priv(dev); + + spin_lock_bh(&rp->lock); + set_bit(vid, rp->active_vlans); + rhine_update_vcam(dev); + spin_unlock_bh(&rp->lock); + return 0; +} + +static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct rhine_private *rp = netdev_priv(dev); + + spin_lock_bh(&rp->lock); + clear_bit(vid, rp->active_vlans); + rhine_update_vcam(dev); + spin_unlock_bh(&rp->lock); + return 0; +} + +static void init_registers(struct net_device *dev) +{ + struct rhine_private *rp = netdev_priv(dev); + void __iomem *ioaddr = rp->base; + int i; + + for (i = 0; i < 6; i++) + iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i); + + /* Initialize other registers. */ + iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */ + /* Configure initial FIFO thresholds. */ + iowrite8(0x20, ioaddr + TxConfig); + rp->tx_thresh = 0x20; + rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */ + + iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr); + iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr); + + rhine_set_rx_mode(dev); + + if (rp->quirks & rqMgmt) + rhine_init_cam_filter(dev); + + napi_enable(&rp->napi); + + iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable); + + iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8), + ioaddr + ChipCmd); + rhine_check_media(dev, 1); +} + +/* Enable MII link status auto-polling (required for IntrLinkChange) */ +static void rhine_enable_linkmon(struct rhine_private *rp) +{ + void __iomem *ioaddr = rp->base; + + iowrite8(0, ioaddr + MIICmd); + iowrite8(MII_BMSR, ioaddr + MIIRegAddr); + iowrite8(0x80, ioaddr + MIICmd); + + rhine_wait_bit_high(rp, MIIRegAddr, 0x20); + + iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr); +} + +/* Disable MII link status auto-polling (required for MDIO access) */ +static void rhine_disable_linkmon(struct rhine_private *rp) +{ + void __iomem *ioaddr = rp->base; + + iowrite8(0, ioaddr + MIICmd); + + if (rp->quirks & rqRhineI) { + iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR + + /* Can be called from ISR. Evil. */ + mdelay(1); + + /* 0x80 must be set immediately before turning it off */ + iowrite8(0x80, ioaddr + MIICmd); + + rhine_wait_bit_high(rp, MIIRegAddr, 0x20); + + /* Heh. Now clear 0x80 again. */ + iowrite8(0, ioaddr + MIICmd); + } + else + rhine_wait_bit_high(rp, MIIRegAddr, 0x80); +} + +/* Read and write over the MII Management Data I/O (MDIO) interface. */ + +static int mdio_read(struct net_device *dev, int phy_id, int regnum) +{ + struct rhine_private *rp = netdev_priv(dev); + void __iomem *ioaddr = rp->base; + int result; + + rhine_disable_linkmon(rp); + + /* rhine_disable_linkmon already cleared MIICmd */ + iowrite8(phy_id, ioaddr + MIIPhyAddr); + iowrite8(regnum, ioaddr + MIIRegAddr); + iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */ + rhine_wait_bit_low(rp, MIICmd, 0x40); + result = ioread16(ioaddr + MIIData); + + rhine_enable_linkmon(rp); + return result; +} + +static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value) +{ + struct rhine_private *rp = netdev_priv(dev); + void __iomem *ioaddr = rp->base; + + rhine_disable_linkmon(rp); + + /* rhine_disable_linkmon already cleared MIICmd */ + iowrite8(phy_id, ioaddr + MIIPhyAddr); + iowrite8(regnum, ioaddr + MIIRegAddr); + iowrite16(value, ioaddr + MIIData); + iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */ + rhine_wait_bit_low(rp, MIICmd, 0x20); + + rhine_enable_linkmon(rp); +} + +static void rhine_task_disable(struct rhine_private *rp) +{ + mutex_lock(&rp->task_lock); + rp->task_enable = false; + mutex_unlock(&rp->task_lock); + + cancel_work_sync(&rp->slow_event_task); + cancel_work_sync(&rp->reset_task); +} + +static void rhine_task_enable(struct rhine_private *rp) +{ + mutex_lock(&rp->task_lock); + rp->task_enable = true; + mutex_unlock(&rp->task_lock); +} + +static int rhine_open(struct net_device *dev) +{ + struct rhine_private *rp = netdev_priv(dev); + void __iomem *ioaddr = rp->base; + int rc; + + rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev); + if (rc) + return rc; + + netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq); + + rc = alloc_ring(dev); + if (rc) { + free_irq(rp->irq, dev); + return rc; + } + alloc_rbufs(dev); + alloc_tbufs(dev); + rhine_chip_reset(dev); + rhine_task_enable(rp); + init_registers(dev); + + netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n", + __func__, ioread16(ioaddr + ChipCmd), + mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); + + netif_start_queue(dev); + + return 0; +} + +static void rhine_reset_task(struct work_struct *work) +{ + struct rhine_private *rp = container_of(work, struct rhine_private, + reset_task); + struct net_device *dev = rp->dev; + + mutex_lock(&rp->task_lock); + + if (!rp->task_enable) + goto out_unlock; + + napi_disable(&rp->napi); + netif_tx_disable(dev); + spin_lock_bh(&rp->lock); + + /* clear all descriptors */ + free_tbufs(dev); + free_rbufs(dev); + alloc_tbufs(dev); + alloc_rbufs(dev); + + /* Reinitialize the hardware. */ + rhine_chip_reset(dev); + init_registers(dev); + + spin_unlock_bh(&rp->lock); + + dev->trans_start = jiffies; /* prevent tx timeout */ + dev->stats.tx_errors++; + netif_wake_queue(dev); + +out_unlock: + mutex_unlock(&rp->task_lock); +} + +static void rhine_tx_timeout(struct net_device *dev) +{ + struct rhine_private *rp = netdev_priv(dev); + void __iomem *ioaddr = rp->base; + + netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n", + ioread16(ioaddr + IntrStatus), + mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); + + schedule_work(&rp->reset_task); +} + +static netdev_tx_t rhine_start_tx(struct sk_buff *skb, + struct net_device *dev) +{ + struct rhine_private *rp = netdev_priv(dev); + struct device *hwdev = dev->dev.parent; + void __iomem *ioaddr = rp->base; + unsigned entry; + + /* Caution: the write order is important here, set the field + with the "ownership" bits last. */ + + /* Calculate the next Tx descriptor entry. */ + entry = rp->cur_tx % TX_RING_SIZE; + + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + rp->tx_skbuff[entry] = skb; + + if ((rp->quirks & rqRhineI) && + (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) { + /* Must use alignment buffer. */ + if (skb->len > PKT_BUF_SZ) { + /* packet too long, drop it */ + dev_kfree_skb_any(skb); + rp->tx_skbuff[entry] = NULL; + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + /* Padding is not copied and so must be redone. */ + skb_copy_and_csum_dev(skb, rp->tx_buf[entry]); + if (skb->len < ETH_ZLEN) + memset(rp->tx_buf[entry] + skb->len, 0, + ETH_ZLEN - skb->len); + rp->tx_skbuff_dma[entry] = 0; + rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma + + (rp->tx_buf[entry] - + rp->tx_bufs)); + } else { + rp->tx_skbuff_dma[entry] = + dma_map_single(hwdev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) { + dev_kfree_skb_any(skb); + rp->tx_skbuff_dma[entry] = 0; + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]); + } + + rp->tx_ring[entry].desc_length = + cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); + + if (unlikely(skb_vlan_tag_present(skb))) { + u16 vid_pcp = skb_vlan_tag_get(skb); + + /* drop CFI/DEI bit, register needs VID and PCP */ + vid_pcp = (vid_pcp & VLAN_VID_MASK) | + ((vid_pcp & VLAN_PRIO_MASK) >> 1); + rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16); + /* request tagging */ + rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); + } + else + rp->tx_ring[entry].tx_status = 0; + + netdev_sent_queue(dev, skb->len); + /* lock eth irq */ + wmb(); + rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); + wmb(); + + rp->cur_tx++; + + /* Non-x86 Todo: explicitly flush cache lines here. */ + + if (skb_vlan_tag_present(skb)) + /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ + BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake); + + /* Wake the potentially-idle transmit channel */ + iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, + ioaddr + ChipCmd1); + IOSYNC; + + if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN) + netif_stop_queue(dev); + + netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n", + rp->cur_tx - 1, entry); + + return NETDEV_TX_OK; +} + +static void rhine_irq_disable(struct rhine_private *rp) +{ + iowrite16(0x0000, rp->base + IntrEnable); + mmiowb(); +} + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ +static irqreturn_t rhine_interrupt(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct rhine_private *rp = netdev_priv(dev); + u32 status; + int handled = 0; + + status = rhine_get_events(rp); + + netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status); + + if (status & RHINE_EVENT) { + handled = 1; + + rhine_irq_disable(rp); + napi_schedule(&rp->napi); + } + + if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) { + netif_err(rp, intr, dev, "Something Wicked happened! %08x\n", + status); + } + + return IRQ_RETVAL(handled); +} + +/* This routine is logically part of the interrupt handler, but isolated + for clarity. */ +static void rhine_tx(struct net_device *dev) +{ + struct rhine_private *rp = netdev_priv(dev); + struct device *hwdev = dev->dev.parent; + int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; + unsigned int pkts_compl = 0, bytes_compl = 0; + struct sk_buff *skb; + + /* find and cleanup dirty tx descriptors */ + while (rp->dirty_tx != rp->cur_tx) { + txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status); + netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n", + entry, txstatus); + if (txstatus & DescOwn) + break; + skb = rp->tx_skbuff[entry]; + if (txstatus & 0x8000) { + netif_dbg(rp, tx_done, dev, + "Transmit error, Tx status %08x\n", txstatus); + dev->stats.tx_errors++; + if (txstatus & 0x0400) + dev->stats.tx_carrier_errors++; + if (txstatus & 0x0200) + dev->stats.tx_window_errors++; + if (txstatus & 0x0100) + dev->stats.tx_aborted_errors++; + if (txstatus & 0x0080) + dev->stats.tx_heartbeat_errors++; + if (((rp->quirks & rqRhineI) && txstatus & 0x0002) || + (txstatus & 0x0800) || (txstatus & 0x1000)) { + dev->stats.tx_fifo_errors++; + rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); + break; /* Keep the skb - we try again */ + } + /* Transmitter restarted in 'abnormal' handler. */ + } else { + if (rp->quirks & rqRhineI) + dev->stats.collisions += (txstatus >> 3) & 0x0F; + else + dev->stats.collisions += txstatus & 0x0F; + netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n", + (txstatus >> 3) & 0xF, txstatus & 0xF); + + u64_stats_update_begin(&rp->tx_stats.syncp); + rp->tx_stats.bytes += skb->len; + rp->tx_stats.packets++; + u64_stats_update_end(&rp->tx_stats.syncp); + } + /* Free the original skb. */ + if (rp->tx_skbuff_dma[entry]) { + dma_unmap_single(hwdev, + rp->tx_skbuff_dma[entry], + skb->len, + DMA_TO_DEVICE); + } + bytes_compl += skb->len; + pkts_compl++; + dev_consume_skb_any(skb); + rp->tx_skbuff[entry] = NULL; + entry = (++rp->dirty_tx) % TX_RING_SIZE; + } + + netdev_completed_queue(dev, pkts_compl, bytes_compl); + if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4) + netif_wake_queue(dev); +} + +/** + * rhine_get_vlan_tci - extract TCI from Rx data buffer + * @skb: pointer to sk_buff + * @data_size: used data area of the buffer including CRC + * + * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q + * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte + * aligned following the CRC. + */ +static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size) +{ + u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2; + return be16_to_cpup((__be16 *)trailer); +} + +/* Process up to limit frames from receive ring */ +static int rhine_rx(struct net_device *dev, int limit) +{ + struct rhine_private *rp = netdev_priv(dev); + struct device *hwdev = dev->dev.parent; + int count; + int entry = rp->cur_rx % RX_RING_SIZE; + + netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__, + entry, le32_to_cpu(rp->rx_head_desc->rx_status)); + + /* If EOP is set on the next entry, it's a new packet. Send it up. */ + for (count = 0; count < limit; ++count) { + struct rx_desc *desc = rp->rx_head_desc; + u32 desc_status = le32_to_cpu(desc->rx_status); + u32 desc_length = le32_to_cpu(desc->desc_length); + int data_size = desc_status >> 16; + + if (desc_status & DescOwn) + break; + + netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__, + desc_status); + + if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { + if ((desc_status & RxWholePkt) != RxWholePkt) { + netdev_warn(dev, + "Oversized Ethernet frame spanned multiple buffers, " + "entry %#x length %d status %08x!\n", + entry, data_size, + desc_status); + netdev_warn(dev, + "Oversized Ethernet frame %p vs %p\n", + rp->rx_head_desc, + &rp->rx_ring[entry]); + dev->stats.rx_length_errors++; + } else if (desc_status & RxErr) { + /* There was a error. */ + netif_dbg(rp, rx_err, dev, + "%s() Rx error %08x\n", __func__, + desc_status); + dev->stats.rx_errors++; + if (desc_status & 0x0030) + dev->stats.rx_length_errors++; + if (desc_status & 0x0048) + dev->stats.rx_fifo_errors++; + if (desc_status & 0x0004) + dev->stats.rx_frame_errors++; + if (desc_status & 0x0002) { + /* this can also be updated outside the interrupt handler */ + spin_lock(&rp->lock); + dev->stats.rx_crc_errors++; + spin_unlock(&rp->lock); + } + } + } else { + struct sk_buff *skb = NULL; + /* Length should omit the CRC */ + int pkt_len = data_size - 4; + u16 vlan_tci = 0; + + /* Check if the packet is long enough to accept without + copying to a minimally-sized skbuff. */ + if (pkt_len < rx_copybreak) + skb = netdev_alloc_skb_ip_align(dev, pkt_len); + if (skb) { + dma_sync_single_for_cpu(hwdev, + rp->rx_skbuff_dma[entry], + rp->rx_buf_sz, + DMA_FROM_DEVICE); + + skb_copy_to_linear_data(skb, + rp->rx_skbuff[entry]->data, + pkt_len); + skb_put(skb, pkt_len); + dma_sync_single_for_device(hwdev, + rp->rx_skbuff_dma[entry], + rp->rx_buf_sz, + DMA_FROM_DEVICE); + } else { + skb = rp->rx_skbuff[entry]; + if (skb == NULL) { + netdev_err(dev, "Inconsistent Rx descriptor chain\n"); + break; + } + rp->rx_skbuff[entry] = NULL; + skb_put(skb, pkt_len); + dma_unmap_single(hwdev, + rp->rx_skbuff_dma[entry], + rp->rx_buf_sz, + DMA_FROM_DEVICE); + } + + if (unlikely(desc_length & DescTag)) + vlan_tci = rhine_get_vlan_tci(skb, data_size); + + skb->protocol = eth_type_trans(skb, dev); + + if (unlikely(desc_length & DescTag)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); + netif_receive_skb(skb); + + u64_stats_update_begin(&rp->rx_stats.syncp); + rp->rx_stats.bytes += pkt_len; + rp->rx_stats.packets++; + u64_stats_update_end(&rp->rx_stats.syncp); + } + entry = (++rp->cur_rx) % RX_RING_SIZE; + rp->rx_head_desc = &rp->rx_ring[entry]; + } + + /* Refill the Rx ring buffers. */ + for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) { + struct sk_buff *skb; + entry = rp->dirty_rx % RX_RING_SIZE; + if (rp->rx_skbuff[entry] == NULL) { + skb = netdev_alloc_skb(dev, rp->rx_buf_sz); + rp->rx_skbuff[entry] = skb; + if (skb == NULL) + break; /* Better luck next round. */ + rp->rx_skbuff_dma[entry] = + dma_map_single(hwdev, skb->data, + rp->rx_buf_sz, + DMA_FROM_DEVICE); + if (dma_mapping_error(hwdev, + rp->rx_skbuff_dma[entry])) { + dev_kfree_skb(skb); + rp->rx_skbuff_dma[entry] = 0; + break; + } + rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]); + } + rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn); + } + + return count; +} + +static void rhine_restart_tx(struct net_device *dev) { + struct rhine_private *rp = netdev_priv(dev); + void __iomem *ioaddr = rp->base; + int entry = rp->dirty_tx % TX_RING_SIZE; + u32 intr_status; + + /* + * If new errors occurred, we need to sort them out before doing Tx. + * In that case the ISR will be back here RSN anyway. + */ + intr_status = rhine_get_events(rp); + + if ((intr_status & IntrTxErrSummary) == 0) { + + /* We know better than the chip where it should continue. */ + iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc), + ioaddr + TxRingPtr); + + iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn, + ioaddr + ChipCmd); + + if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000)) + /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ + BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake); + + iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, + ioaddr + ChipCmd1); + IOSYNC; + } + else { + /* This should never happen */ + netif_warn(rp, tx_err, dev, "another error occurred %08x\n", + intr_status); + } + +} + +static void rhine_slow_event_task(struct work_struct *work) +{ + struct rhine_private *rp = + container_of(work, struct rhine_private, slow_event_task); + struct net_device *dev = rp->dev; + u32 intr_status; + + mutex_lock(&rp->task_lock); + + if (!rp->task_enable) + goto out_unlock; + + intr_status = rhine_get_events(rp); + rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW); + + if (intr_status & IntrLinkChange) + rhine_check_media(dev, 0); + + if (intr_status & IntrPCIErr) + netif_warn(rp, hw, dev, "PCI error\n"); + + iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable); + +out_unlock: + mutex_unlock(&rp->task_lock); +} + +static struct rtnl_link_stats64 * +rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct rhine_private *rp = netdev_priv(dev); + unsigned int start; + + spin_lock_bh(&rp->lock); + rhine_update_rx_crc_and_missed_errord(rp); + spin_unlock_bh(&rp->lock); + + netdev_stats_to_stats64(stats, &dev->stats); + + do { + start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp); + stats->rx_packets = rp->rx_stats.packets; + stats->rx_bytes = rp->rx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start)); + + do { + start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp); + stats->tx_packets = rp->tx_stats.packets; + stats->tx_bytes = rp->tx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start)); + + return stats; +} + +static void rhine_set_rx_mode(struct net_device *dev) +{ + struct rhine_private *rp = netdev_priv(dev); + void __iomem *ioaddr = rp->base; + u32 mc_filter[2]; /* Multicast hash filter */ + u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */ + struct netdev_hw_addr *ha; + + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + rx_mode = 0x1C; + iowrite32(0xffffffff, ioaddr + MulticastFilter0); + iowrite32(0xffffffff, ioaddr + MulticastFilter1); + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to match, or accept all multicasts. */ + iowrite32(0xffffffff, ioaddr + MulticastFilter0); + iowrite32(0xffffffff, ioaddr + MulticastFilter1); + } else if (rp->quirks & rqMgmt) { + int i = 0; + u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */ + netdev_for_each_mc_addr(ha, dev) { + if (i == MCAM_SIZE) + break; + rhine_set_cam(ioaddr, i, ha->addr); + mCAMmask |= 1 << i; + i++; + } + rhine_set_cam_mask(ioaddr, mCAMmask); + } else { + memset(mc_filter, 0, sizeof(mc_filter)); + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + } + iowrite32(mc_filter[0], ioaddr + MulticastFilter0); + iowrite32(mc_filter[1], ioaddr + MulticastFilter1); + } + /* enable/disable VLAN receive filtering */ + if (rp->quirks & rqMgmt) { + if (dev->flags & IFF_PROMISC) + BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1); + else + BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1); + } + BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig); +} + +static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct device *hwdev = dev->dev.parent; + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info)); +} + +static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rhine_private *rp = netdev_priv(dev); + int rc; + + mutex_lock(&rp->task_lock); + rc = mii_ethtool_gset(&rp->mii_if, cmd); + mutex_unlock(&rp->task_lock); + + return rc; +} + +static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rhine_private *rp = netdev_priv(dev); + int rc; + + mutex_lock(&rp->task_lock); + rc = mii_ethtool_sset(&rp->mii_if, cmd); + rhine_set_carrier(&rp->mii_if); + mutex_unlock(&rp->task_lock); + + return rc; +} + +static int netdev_nway_reset(struct net_device *dev) +{ + struct rhine_private *rp = netdev_priv(dev); + + return mii_nway_restart(&rp->mii_if); +} + +static u32 netdev_get_link(struct net_device *dev) +{ + struct rhine_private *rp = netdev_priv(dev); + + return mii_link_ok(&rp->mii_if); +} + +static u32 netdev_get_msglevel(struct net_device *dev) +{ + struct rhine_private *rp = netdev_priv(dev); + + return rp->msg_enable; +} + +static void netdev_set_msglevel(struct net_device *dev, u32 value) +{ + struct rhine_private *rp = netdev_priv(dev); + + rp->msg_enable = value; +} + +static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rhine_private *rp = netdev_priv(dev); + + if (!(rp->quirks & rqWOL)) + return; + + spin_lock_irq(&rp->lock); + wol->supported = WAKE_PHY | WAKE_MAGIC | + WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ + wol->wolopts = rp->wolopts; + spin_unlock_irq(&rp->lock); +} + +static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rhine_private *rp = netdev_priv(dev); + u32 support = WAKE_PHY | WAKE_MAGIC | + WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */ + + if (!(rp->quirks & rqWOL)) + return -EINVAL; + + if (wol->wolopts & ~support) + return -EINVAL; + + spin_lock_irq(&rp->lock); + rp->wolopts = wol->wolopts; + spin_unlock_irq(&rp->lock); + + return 0; +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, + .get_settings = netdev_get_settings, + .set_settings = netdev_set_settings, + .nway_reset = netdev_nway_reset, + .get_link = netdev_get_link, + .get_msglevel = netdev_get_msglevel, + .set_msglevel = netdev_set_msglevel, + .get_wol = rhine_get_wol, + .set_wol = rhine_set_wol, +}; + +static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct rhine_private *rp = netdev_priv(dev); + int rc; + + if (!netif_running(dev)) + return -EINVAL; + + mutex_lock(&rp->task_lock); + rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL); + rhine_set_carrier(&rp->mii_if); + mutex_unlock(&rp->task_lock); + + return rc; +} + +static int rhine_close(struct net_device *dev) +{ + struct rhine_private *rp = netdev_priv(dev); + void __iomem *ioaddr = rp->base; + + rhine_task_disable(rp); + napi_disable(&rp->napi); + netif_stop_queue(dev); + + netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n", + ioread16(ioaddr + ChipCmd)); + + /* Switch to loopback mode to avoid hardware races. */ + iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig); + + rhine_irq_disable(rp); + + /* Stop the chip's Tx and Rx processes. */ + iowrite16(CmdStop, ioaddr + ChipCmd); + + free_irq(rp->irq, dev); + free_rbufs(dev); + free_tbufs(dev); + free_ring(dev); + + return 0; +} + + +static void rhine_remove_one_pci(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rhine_private *rp = netdev_priv(dev); + + unregister_netdev(dev); + + pci_iounmap(pdev, rp->base); + pci_release_regions(pdev); + + free_netdev(dev); + pci_disable_device(pdev); +} + +static int rhine_remove_one_platform(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct rhine_private *rp = netdev_priv(dev); + + unregister_netdev(dev); + + iounmap(rp->base); + + free_netdev(dev); + + return 0; +} + +static void rhine_shutdown_pci(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rhine_private *rp = netdev_priv(dev); + void __iomem *ioaddr = rp->base; + + if (!(rp->quirks & rqWOL)) + return; /* Nothing to do for non-WOL adapters */ + + rhine_power_init(dev); + + /* Make sure we use pattern 0, 1 and not 4, 5 */ + if (rp->quirks & rq6patterns) + iowrite8(0x04, ioaddr + WOLcgClr); + + spin_lock(&rp->lock); + + if (rp->wolopts & WAKE_MAGIC) { + iowrite8(WOLmagic, ioaddr + WOLcrSet); + /* + * Turn EEPROM-controlled wake-up back on -- some hardware may + * not cooperate otherwise. + */ + iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA); + } + + if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST)) + iowrite8(WOLbmcast, ioaddr + WOLcgSet); + + if (rp->wolopts & WAKE_PHY) + iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet); + + if (rp->wolopts & WAKE_UCAST) + iowrite8(WOLucast, ioaddr + WOLcrSet); + + if (rp->wolopts) { + /* Enable legacy WOL (for old motherboards) */ + iowrite8(0x01, ioaddr + PwcfgSet); + iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW); + } + + spin_unlock(&rp->lock); + + if (system_state == SYSTEM_POWER_OFF && !avoid_D3) { + iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); + + pci_wake_from_d3(pdev, true); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_PM_SLEEP +static int rhine_suspend(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct rhine_private *rp = netdev_priv(dev); + + if (!netif_running(dev)) + return 0; + + rhine_task_disable(rp); + rhine_irq_disable(rp); + napi_disable(&rp->napi); + + netif_device_detach(dev); + + if (dev_is_pci(device)) + rhine_shutdown_pci(to_pci_dev(device)); + + return 0; +} + +static int rhine_resume(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct rhine_private *rp = netdev_priv(dev); + + if (!netif_running(dev)) + return 0; + + enable_mmio(rp->pioaddr, rp->quirks); + rhine_power_init(dev); + free_tbufs(dev); + free_rbufs(dev); + alloc_tbufs(dev); + alloc_rbufs(dev); + rhine_task_enable(rp); + spin_lock_bh(&rp->lock); + init_registers(dev); + spin_unlock_bh(&rp->lock); + + netif_device_attach(dev); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume); +#define RHINE_PM_OPS (&rhine_pm_ops) + +#else + +#define RHINE_PM_OPS NULL + +#endif /* !CONFIG_PM_SLEEP */ + +static struct pci_driver rhine_driver_pci = { + .name = DRV_NAME, + .id_table = rhine_pci_tbl, + .probe = rhine_init_one_pci, + .remove = rhine_remove_one_pci, + .shutdown = rhine_shutdown_pci, + .driver.pm = RHINE_PM_OPS, +}; + +static struct platform_driver rhine_driver_platform = { + .probe = rhine_init_one_platform, + .remove = rhine_remove_one_platform, + .driver = { + .name = DRV_NAME, + .of_match_table = rhine_of_tbl, + .pm = RHINE_PM_OPS, + } +}; + +static struct dmi_system_id rhine_dmi_table[] __initdata = { + { + .ident = "EPIA-M", + .matches = { + DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."), + DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"), + }, + }, + { + .ident = "KV7", + .matches = { + DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), + DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"), + }, + }, + { NULL } +}; + +static int __init rhine_init(void) +{ + int ret_pci, ret_platform; + +/* when a module, this is printed whether or not devices are found in probe */ +#ifdef MODULE + pr_info("%s\n", version); +#endif + if (dmi_check_system(rhine_dmi_table)) { + /* these BIOSes fail at PXE boot if chip is in D3 */ + avoid_D3 = true; + pr_warn("Broken BIOS detected, avoid_D3 enabled\n"); + } + else if (avoid_D3) + pr_info("avoid_D3 set\n"); + + ret_pci = pci_register_driver(&rhine_driver_pci); + ret_platform = platform_driver_register(&rhine_driver_platform); + if ((ret_pci < 0) && (ret_platform < 0)) + return ret_pci; + + return 0; +} + + +static void __exit rhine_cleanup(void) +{ + platform_driver_unregister(&rhine_driver_platform); + pci_unregister_driver(&rhine_driver_pci); +} + + +module_init(rhine_init); +module_exit(rhine_cleanup); diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c new file mode 100644 index 000000000..ae68afd50 --- /dev/null +++ b/drivers/net/ethernet/via/via-velocity.c @@ -0,0 +1,3765 @@ +/* + * This code is derived from the VIA reference driver (copyright message + * below) provided to Red Hat by VIA Networking Technologies, Inc. for + * addition to the Linux kernel. + * + * The code has been merged into one source file, cleaned up to follow + * Linux coding style, ported to the Linux 2.6 kernel tree and cleaned + * for 64bit hardware platforms. + * + * TODO + * rx_copybreak/alignment + * More testing + * + * The changes are (c) Copyright 2004, Red Hat Inc. + * Additional fixes and clean up: Francois Romieu + * + * This source has not been verified for use in safety critical systems. + * + * Please direct queries about the revamped driver to the linux-kernel + * list not VIA. + * + * Original code: + * + * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. + * All rights reserved. + * + * This software may be redistributed and/or modified under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or + * any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Author: Chuang Liang-Shing, AJ Jiang + * + * Date: Jan 24, 2003 + * + * MODULE_LICENSE("GPL"); + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "via-velocity.h" + +enum velocity_bus_type { + BUS_PCI, + BUS_PLATFORM, +}; + +static int velocity_nics; +static int msglevel = MSG_LEVEL_INFO; + +static void velocity_set_power_state(struct velocity_info *vptr, char state) +{ + void *addr = vptr->mac_regs; + + if (vptr->pdev) + pci_set_power_state(vptr->pdev, state); + else + writeb(state, addr + 0x154); +} + +/** + * mac_get_cam_mask - Read a CAM mask + * @regs: register block for this velocity + * @mask: buffer to store mask + * + * Fetch the mask bits of the selected CAM and store them into the + * provided mask buffer. + */ +static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask) +{ + int i; + + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + writeb(0, ®s->CAMADDR); + + /* read mask */ + for (i = 0; i < 8; i++) + *mask++ = readb(&(regs->MARCAM[i])); + + /* disable CAMEN */ + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + +/** + * mac_set_cam_mask - Set a CAM mask + * @regs: register block for this velocity + * @mask: CAM mask to load + * + * Store a new mask into a CAM + */ +static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask) +{ + int i; + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + writeb(CAMADDR_CAMEN, ®s->CAMADDR); + + for (i = 0; i < 8; i++) + writeb(*mask++, &(regs->MARCAM[i])); + + /* disable CAMEN */ + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + +static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask) +{ + int i; + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, ®s->CAMADDR); + + for (i = 0; i < 8; i++) + writeb(*mask++, &(regs->MARCAM[i])); + + /* disable CAMEN */ + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + +/** + * mac_set_cam - set CAM data + * @regs: register block of this velocity + * @idx: Cam index + * @addr: 2 or 6 bytes of CAM data + * + * Load an address or vlan tag into a CAM + */ +static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr) +{ + int i; + + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + idx &= (64 - 1); + + writeb(CAMADDR_CAMEN | idx, ®s->CAMADDR); + + for (i = 0; i < 6; i++) + writeb(*addr++, &(regs->MARCAM[i])); + + BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR); + + udelay(10); + + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + +static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx, + const u8 *addr) +{ + + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + idx &= (64 - 1); + + writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, ®s->CAMADDR); + writew(*((u16 *) addr), ®s->MARCAM[0]); + + BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR); + + udelay(10); + + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + + +/** + * mac_wol_reset - reset WOL after exiting low power + * @regs: register block of this velocity + * + * Called after we drop out of wake on lan mode in order to + * reset the Wake on lan features. This function doesn't restore + * the rest of the logic from the result of sleep/wakeup + */ +static void mac_wol_reset(struct mac_regs __iomem *regs) +{ + + /* Turn off SWPTAG right after leaving power mode */ + BYTE_REG_BITS_OFF(STICKHW_SWPTAG, ®s->STICKHW); + /* clear sticky bits */ + BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW); + + BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, ®s->CHIPGCR); + BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR); + /* disable force PME-enable */ + writeb(WOLCFG_PMEOVR, ®s->WOLCFGClr); + /* disable power-event config bit */ + writew(0xFFFF, ®s->WOLCRClr); + /* clear power status */ + writew(0xFFFF, ®s->WOLSRClr); +} + +static const struct ethtool_ops velocity_ethtool_ops; + +/* + Define module options +*/ + +MODULE_AUTHOR("VIA Networking Technologies, Inc."); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver"); + +#define VELOCITY_PARAM(N, D) \ + static int N[MAX_UNITS] = OPTION_DEFAULT;\ + module_param_array(N, int, NULL, 0); \ + MODULE_PARM_DESC(N, D); + +#define RX_DESC_MIN 64 +#define RX_DESC_MAX 255 +#define RX_DESC_DEF 64 +VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors"); + +#define TX_DESC_MIN 16 +#define TX_DESC_MAX 256 +#define TX_DESC_DEF 64 +VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors"); + +#define RX_THRESH_MIN 0 +#define RX_THRESH_MAX 3 +#define RX_THRESH_DEF 0 +/* rx_thresh[] is used for controlling the receive fifo threshold. + 0: indicate the rxfifo threshold is 128 bytes. + 1: indicate the rxfifo threshold is 512 bytes. + 2: indicate the rxfifo threshold is 1024 bytes. + 3: indicate the rxfifo threshold is store & forward. +*/ +VELOCITY_PARAM(rx_thresh, "Receive fifo threshold"); + +#define DMA_LENGTH_MIN 0 +#define DMA_LENGTH_MAX 7 +#define DMA_LENGTH_DEF 6 + +/* DMA_length[] is used for controlling the DMA length + 0: 8 DWORDs + 1: 16 DWORDs + 2: 32 DWORDs + 3: 64 DWORDs + 4: 128 DWORDs + 5: 256 DWORDs + 6: SF(flush till emply) + 7: SF(flush till emply) +*/ +VELOCITY_PARAM(DMA_length, "DMA length"); + +#define IP_ALIG_DEF 0 +/* IP_byte_align[] is used for IP header DWORD byte aligned + 0: indicate the IP header won't be DWORD byte aligned.(Default) . + 1: indicate the IP header will be DWORD byte aligned. + In some environment, the IP header should be DWORD byte aligned, + or the packet will be droped when we receive it. (eg: IPVS) +*/ +VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned"); + +#define FLOW_CNTL_DEF 1 +#define FLOW_CNTL_MIN 1 +#define FLOW_CNTL_MAX 5 + +/* flow_control[] is used for setting the flow control ability of NIC. + 1: hardware deafult - AUTO (default). Use Hardware default value in ANAR. + 2: enable TX flow control. + 3: enable RX flow control. + 4: enable RX/TX flow control. + 5: disable +*/ +VELOCITY_PARAM(flow_control, "Enable flow control ability"); + +#define MED_LNK_DEF 0 +#define MED_LNK_MIN 0 +#define MED_LNK_MAX 5 +/* speed_duplex[] is used for setting the speed and duplex mode of NIC. + 0: indicate autonegotiation for both speed and duplex mode + 1: indicate 100Mbps half duplex mode + 2: indicate 100Mbps full duplex mode + 3: indicate 10Mbps half duplex mode + 4: indicate 10Mbps full duplex mode + 5: indicate 1000Mbps full duplex mode + + Note: + if EEPROM have been set to the force mode, this option is ignored + by driver. +*/ +VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode"); + +#define VAL_PKT_LEN_DEF 0 +/* ValPktLen[] is used for setting the checksum offload ability of NIC. + 0: Receive frame with invalid layer 2 length (Default) + 1: Drop frame with invalid layer 2 length +*/ +VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame"); + +#define WOL_OPT_DEF 0 +#define WOL_OPT_MIN 0 +#define WOL_OPT_MAX 7 +/* wol_opts[] is used for controlling wake on lan behavior. + 0: Wake up if recevied a magic packet. (Default) + 1: Wake up if link status is on/off. + 2: Wake up if recevied an arp packet. + 4: Wake up if recevied any unicast packet. + Those value can be sumed up to support more than one option. +*/ +VELOCITY_PARAM(wol_opts, "Wake On Lan options"); + +static int rx_copybreak = 200; +module_param(rx_copybreak, int, 0644); +MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); + +/* + * Internal board variants. At the moment we have only one + */ +static struct velocity_info_tbl chip_info_table[] = { + {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL}, + { } +}; + +/* + * Describe the PCI device identifiers that we support in this + * device driver. Used for hotplug autoloading. + */ + +static const struct pci_device_id velocity_pci_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) }, + { } +}; + +MODULE_DEVICE_TABLE(pci, velocity_pci_id_table); + +/** + * Describe the OF device identifiers that we support in this + * device driver. Used for devicetree nodes. + */ +static const struct of_device_id velocity_of_ids[] = { + { .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] }, + { /* Sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, velocity_of_ids); + +/** + * get_chip_name - identifier to name + * @id: chip identifier + * + * Given a chip identifier return a suitable description. Returns + * a pointer a static string valid while the driver is loaded. + */ +static const char *get_chip_name(enum chip_type chip_id) +{ + int i; + for (i = 0; chip_info_table[i].name != NULL; i++) + if (chip_info_table[i].chip_id == chip_id) + break; + return chip_info_table[i].name; +} + +/** + * velocity_set_int_opt - parser for integer options + * @opt: pointer to option value + * @val: value the user requested (or -1 for default) + * @min: lowest value allowed + * @max: highest value allowed + * @def: default value + * @name: property name + * @dev: device name + * + * Set an integer property in the module options. This function does + * all the verification and checking as well as reporting so that + * we don't duplicate code for each option. + */ +static void velocity_set_int_opt(int *opt, int val, int min, int max, int def, + char *name, const char *devname) +{ + if (val == -1) + *opt = def; + else if (val < min || val > max) { + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n", + devname, name, min, max); + *opt = def; + } else { + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n", + devname, name, val); + *opt = val; + } +} + +/** + * velocity_set_bool_opt - parser for boolean options + * @opt: pointer to option value + * @val: value the user requested (or -1 for default) + * @def: default value (yes/no) + * @flag: numeric value to set for true. + * @name: property name + * @dev: device name + * + * Set a boolean property in the module options. This function does + * all the verification and checking as well as reporting so that + * we don't duplicate code for each option. + */ +static void velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag, + char *name, const char *devname) +{ + (*opt) &= (~flag); + if (val == -1) + *opt |= (def ? flag : 0); + else if (val < 0 || val > 1) { + printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n", + devname, name); + *opt |= (def ? flag : 0); + } else { + printk(KERN_INFO "%s: set parameter %s to %s\n", + devname, name, val ? "TRUE" : "FALSE"); + *opt |= (val ? flag : 0); + } +} + +/** + * velocity_get_options - set options on device + * @opts: option structure for the device + * @index: index of option to use in module options array + * @devname: device name + * + * Turn the module and command options into a single structure + * for the current device + */ +static void velocity_get_options(struct velocity_opt *opts, int index, + const char *devname) +{ + + velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname); + velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname); + velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname); + velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname); + + velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); + velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); + velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname); + velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); + velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); + opts->numrx = (opts->numrx & ~3); +} + +/** + * velocity_init_cam_filter - initialise CAM + * @vptr: velocity to program + * + * Initialize the content addressable memory used for filters. Load + * appropriately according to the presence of VLAN + */ +static void velocity_init_cam_filter(struct velocity_info *vptr) +{ + struct mac_regs __iomem *regs = vptr->mac_regs; + unsigned int vid, i = 0; + + /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */ + WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, ®s->MCFG); + WORD_REG_BITS_ON(MCFG_VIDFR, ®s->MCFG); + + /* Disable all CAMs */ + memset(vptr->vCAMmask, 0, sizeof(u8) * 8); + memset(vptr->mCAMmask, 0, sizeof(u8) * 8); + mac_set_vlan_cam_mask(regs, vptr->vCAMmask); + mac_set_cam_mask(regs, vptr->mCAMmask); + + /* Enable VCAMs */ + for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) { + mac_set_vlan_cam(regs, i, (u8 *) &vid); + vptr->vCAMmask[i / 8] |= 0x1 << (i % 8); + if (++i >= VCAM_SIZE) + break; + } + mac_set_vlan_cam_mask(regs, vptr->vCAMmask); +} + +static int velocity_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct velocity_info *vptr = netdev_priv(dev); + + spin_lock_irq(&vptr->lock); + set_bit(vid, vptr->active_vlans); + velocity_init_cam_filter(vptr); + spin_unlock_irq(&vptr->lock); + return 0; +} + +static int velocity_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct velocity_info *vptr = netdev_priv(dev); + + spin_lock_irq(&vptr->lock); + clear_bit(vid, vptr->active_vlans); + velocity_init_cam_filter(vptr); + spin_unlock_irq(&vptr->lock); + return 0; +} + +static void velocity_init_rx_ring_indexes(struct velocity_info *vptr) +{ + vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0; +} + +/** + * velocity_rx_reset - handle a receive reset + * @vptr: velocity we are resetting + * + * Reset the ownership and status for the receive ring side. + * Hand all the receive queue to the NIC. + */ +static void velocity_rx_reset(struct velocity_info *vptr) +{ + + struct mac_regs __iomem *regs = vptr->mac_regs; + int i; + + velocity_init_rx_ring_indexes(vptr); + + /* + * Init state, all RD entries belong to the NIC + */ + for (i = 0; i < vptr->options.numrx; ++i) + vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC; + + writew(vptr->options.numrx, ®s->RBRDU); + writel(vptr->rx.pool_dma, ®s->RDBaseLo); + writew(0, ®s->RDIdx); + writew(vptr->options.numrx - 1, ®s->RDCSize); +} + +/** + * velocity_get_opt_media_mode - get media selection + * @vptr: velocity adapter + * + * Get the media mode stored in EEPROM or module options and load + * mii_status accordingly. The requested link state information + * is also returned. + */ +static u32 velocity_get_opt_media_mode(struct velocity_info *vptr) +{ + u32 status = 0; + + switch (vptr->options.spd_dpx) { + case SPD_DPX_AUTO: + status = VELOCITY_AUTONEG_ENABLE; + break; + case SPD_DPX_100_FULL: + status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL; + break; + case SPD_DPX_10_FULL: + status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL; + break; + case SPD_DPX_100_HALF: + status = VELOCITY_SPEED_100; + break; + case SPD_DPX_10_HALF: + status = VELOCITY_SPEED_10; + break; + case SPD_DPX_1000_FULL: + status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL; + break; + } + vptr->mii_status = status; + return status; +} + +/** + * safe_disable_mii_autopoll - autopoll off + * @regs: velocity registers + * + * Turn off the autopoll and wait for it to disable on the chip + */ +static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs) +{ + u16 ww; + + /* turn off MAUTO */ + writeb(0, ®s->MIICR); + for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { + udelay(1); + if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) + break; + } +} + +/** + * enable_mii_autopoll - turn on autopolling + * @regs: velocity registers + * + * Enable the MII link status autopoll feature on the Velocity + * hardware. Wait for it to enable. + */ +static void enable_mii_autopoll(struct mac_regs __iomem *regs) +{ + int ii; + + writeb(0, &(regs->MIICR)); + writeb(MIIADR_SWMPL, ®s->MIIADR); + + for (ii = 0; ii < W_MAX_TIMEOUT; ii++) { + udelay(1); + if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) + break; + } + + writeb(MIICR_MAUTO, ®s->MIICR); + + for (ii = 0; ii < W_MAX_TIMEOUT; ii++) { + udelay(1); + if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) + break; + } + +} + +/** + * velocity_mii_read - read MII data + * @regs: velocity registers + * @index: MII register index + * @data: buffer for received data + * + * Perform a single read of an MII 16bit register. Returns zero + * on success or -ETIMEDOUT if the PHY did not respond. + */ +static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data) +{ + u16 ww; + + /* + * Disable MIICR_MAUTO, so that mii addr can be set normally + */ + safe_disable_mii_autopoll(regs); + + writeb(index, ®s->MIIADR); + + BYTE_REG_BITS_ON(MIICR_RCMD, ®s->MIICR); + + for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { + if (!(readb(®s->MIICR) & MIICR_RCMD)) + break; + } + + *data = readw(®s->MIIDATA); + + enable_mii_autopoll(regs); + if (ww == W_MAX_TIMEOUT) + return -ETIMEDOUT; + return 0; +} + +/** + * mii_check_media_mode - check media state + * @regs: velocity registers + * + * Check the current MII status and determine the link status + * accordingly + */ +static u32 mii_check_media_mode(struct mac_regs __iomem *regs) +{ + u32 status = 0; + u16 ANAR; + + if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs)) + status |= VELOCITY_LINK_FAIL; + + if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs)) + status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL; + else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs)) + status |= (VELOCITY_SPEED_1000); + else { + velocity_mii_read(regs, MII_ADVERTISE, &ANAR); + if (ANAR & ADVERTISE_100FULL) + status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL); + else if (ANAR & ADVERTISE_100HALF) + status |= VELOCITY_SPEED_100; + else if (ANAR & ADVERTISE_10FULL) + status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL); + else + status |= (VELOCITY_SPEED_10); + } + + if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) { + velocity_mii_read(regs, MII_ADVERTISE, &ANAR); + if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) + == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) { + if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs)) + status |= VELOCITY_AUTONEG_ENABLE; + } + } + + return status; +} + +/** + * velocity_mii_write - write MII data + * @regs: velocity registers + * @index: MII register index + * @data: 16bit data for the MII register + * + * Perform a single write to an MII 16bit register. Returns zero + * on success or -ETIMEDOUT if the PHY did not respond. + */ +static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data) +{ + u16 ww; + + /* + * Disable MIICR_MAUTO, so that mii addr can be set normally + */ + safe_disable_mii_autopoll(regs); + + /* MII reg offset */ + writeb(mii_addr, ®s->MIIADR); + /* set MII data */ + writew(data, ®s->MIIDATA); + + /* turn on MIICR_WCMD */ + BYTE_REG_BITS_ON(MIICR_WCMD, ®s->MIICR); + + /* W_MAX_TIMEOUT is the timeout period */ + for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { + udelay(5); + if (!(readb(®s->MIICR) & MIICR_WCMD)) + break; + } + enable_mii_autopoll(regs); + + if (ww == W_MAX_TIMEOUT) + return -ETIMEDOUT; + return 0; +} + +/** + * set_mii_flow_control - flow control setup + * @vptr: velocity interface + * + * Set up the flow control on this interface according to + * the supplied user/eeprom options. + */ +static void set_mii_flow_control(struct velocity_info *vptr) +{ + /*Enable or Disable PAUSE in ANAR */ + switch (vptr->options.flow_cntl) { + case FLOW_CNTL_TX: + MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); + MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); + break; + + case FLOW_CNTL_RX: + MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); + MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); + break; + + case FLOW_CNTL_TX_RX: + MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); + MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); + break; + + case FLOW_CNTL_DISABLE: + MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs); + MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs); + break; + default: + break; + } +} + +/** + * mii_set_auto_on - autonegotiate on + * @vptr: velocity + * + * Enable autonegotation on this interface + */ +static void mii_set_auto_on(struct velocity_info *vptr) +{ + if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs)) + MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs); + else + MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); +} + +static u32 check_connection_type(struct mac_regs __iomem *regs) +{ + u32 status = 0; + u8 PHYSR0; + u16 ANAR; + PHYSR0 = readb(®s->PHYSR0); + + /* + if (!(PHYSR0 & PHYSR0_LINKGD)) + status|=VELOCITY_LINK_FAIL; + */ + + if (PHYSR0 & PHYSR0_FDPX) + status |= VELOCITY_DUPLEX_FULL; + + if (PHYSR0 & PHYSR0_SPDG) + status |= VELOCITY_SPEED_1000; + else if (PHYSR0 & PHYSR0_SPD10) + status |= VELOCITY_SPEED_10; + else + status |= VELOCITY_SPEED_100; + + if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) { + velocity_mii_read(regs, MII_ADVERTISE, &ANAR); + if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) + == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) { + if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs)) + status |= VELOCITY_AUTONEG_ENABLE; + } + } + + return status; +} + +/** + * velocity_set_media_mode - set media mode + * @mii_status: old MII link state + * + * Check the media link state and configure the flow control + * PHY and also velocity hardware setup accordingly. In particular + * we need to set up CD polling and frame bursting. + */ +static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) +{ + u32 curr_status; + struct mac_regs __iomem *regs = vptr->mac_regs; + + vptr->mii_status = mii_check_media_mode(vptr->mac_regs); + curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL); + + /* Set mii link status */ + set_mii_flow_control(vptr); + + /* + Check if new status is consistent with current status + if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) || + (mii_status==curr_status)) { + vptr->mii_status=mii_check_media_mode(vptr->mac_regs); + vptr->mii_status=check_connection_type(vptr->mac_regs); + VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n"); + return 0; + } + */ + + if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) + MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs); + + /* + * If connection type is AUTO + */ + if (mii_status & VELOCITY_AUTONEG_ENABLE) { + VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n"); + /* clear force MAC mode bit */ + BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR); + /* set duplex mode of MAC according to duplex mode of MII */ + MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs); + MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs); + MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); + + /* enable AUTO-NEGO mode */ + mii_set_auto_on(vptr); + } else { + u16 CTRL1000; + u16 ANAR; + u8 CHIPGCR; + + /* + * 1. if it's 3119, disable frame bursting in halfduplex mode + * and enable it in fullduplex mode + * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR + * 3. only enable CD heart beat counter in 10HD mode + */ + + /* set force MAC mode bit */ + BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR); + + CHIPGCR = readb(®s->CHIPGCR); + + if (mii_status & VELOCITY_SPEED_1000) + CHIPGCR |= CHIPGCR_FCGMII; + else + CHIPGCR &= ~CHIPGCR_FCGMII; + + if (mii_status & VELOCITY_DUPLEX_FULL) { + CHIPGCR |= CHIPGCR_FCFDX; + writeb(CHIPGCR, ®s->CHIPGCR); + VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n"); + if (vptr->rev_id < REV_ID_VT3216_A0) + BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR); + } else { + CHIPGCR &= ~CHIPGCR_FCFDX; + VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n"); + writeb(CHIPGCR, ®s->CHIPGCR); + if (vptr->rev_id < REV_ID_VT3216_A0) + BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR); + } + + velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000); + CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); + if ((mii_status & VELOCITY_SPEED_1000) && + (mii_status & VELOCITY_DUPLEX_FULL)) { + CTRL1000 |= ADVERTISE_1000FULL; + } + velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000); + + if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10)) + BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG); + else + BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG); + + /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */ + velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR); + ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)); + if (mii_status & VELOCITY_SPEED_100) { + if (mii_status & VELOCITY_DUPLEX_FULL) + ANAR |= ADVERTISE_100FULL; + else + ANAR |= ADVERTISE_100HALF; + } else if (mii_status & VELOCITY_SPEED_10) { + if (mii_status & VELOCITY_DUPLEX_FULL) + ANAR |= ADVERTISE_10FULL; + else + ANAR |= ADVERTISE_10HALF; + } + velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR); + /* enable AUTO-NEGO mode */ + mii_set_auto_on(vptr); + /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */ + } + /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */ + /* vptr->mii_status=check_connection_type(vptr->mac_regs); */ + return VELOCITY_LINK_CHANGE; +} + +/** + * velocity_print_link_status - link status reporting + * @vptr: velocity to report on + * + * Turn the link status of the velocity card into a kernel log + * description of the new link state, detailing speed and duplex + * status + */ +static void velocity_print_link_status(struct velocity_info *vptr) +{ + + if (vptr->mii_status & VELOCITY_LINK_FAIL) { + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->netdev->name); + } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) { + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->netdev->name); + + if (vptr->mii_status & VELOCITY_SPEED_1000) + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps"); + else if (vptr->mii_status & VELOCITY_SPEED_100) + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps"); + else + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps"); + + if (vptr->mii_status & VELOCITY_DUPLEX_FULL) + VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n"); + else + VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n"); + } else { + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->netdev->name); + switch (vptr->options.spd_dpx) { + case SPD_DPX_1000_FULL: + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n"); + break; + case SPD_DPX_100_HALF: + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n"); + break; + case SPD_DPX_100_FULL: + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n"); + break; + case SPD_DPX_10_HALF: + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n"); + break; + case SPD_DPX_10_FULL: + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n"); + break; + default: + break; + } + } +} + +/** + * enable_flow_control_ability - flow control + * @vptr: veloity to configure + * + * Set up flow control according to the flow control options + * determined by the eeprom/configuration. + */ +static void enable_flow_control_ability(struct velocity_info *vptr) +{ + + struct mac_regs __iomem *regs = vptr->mac_regs; + + switch (vptr->options.flow_cntl) { + + case FLOW_CNTL_DEFAULT: + if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, ®s->PHYSR0)) + writel(CR0_FDXRFCEN, ®s->CR0Set); + else + writel(CR0_FDXRFCEN, ®s->CR0Clr); + + if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, ®s->PHYSR0)) + writel(CR0_FDXTFCEN, ®s->CR0Set); + else + writel(CR0_FDXTFCEN, ®s->CR0Clr); + break; + + case FLOW_CNTL_TX: + writel(CR0_FDXTFCEN, ®s->CR0Set); + writel(CR0_FDXRFCEN, ®s->CR0Clr); + break; + + case FLOW_CNTL_RX: + writel(CR0_FDXRFCEN, ®s->CR0Set); + writel(CR0_FDXTFCEN, ®s->CR0Clr); + break; + + case FLOW_CNTL_TX_RX: + writel(CR0_FDXTFCEN, ®s->CR0Set); + writel(CR0_FDXRFCEN, ®s->CR0Set); + break; + + case FLOW_CNTL_DISABLE: + writel(CR0_FDXRFCEN, ®s->CR0Clr); + writel(CR0_FDXTFCEN, ®s->CR0Clr); + break; + + default: + break; + } + +} + +/** + * velocity_soft_reset - soft reset + * @vptr: velocity to reset + * + * Kick off a soft reset of the velocity adapter and then poll + * until the reset sequence has completed before returning. + */ +static int velocity_soft_reset(struct velocity_info *vptr) +{ + struct mac_regs __iomem *regs = vptr->mac_regs; + int i = 0; + + writel(CR0_SFRST, ®s->CR0Set); + + for (i = 0; i < W_MAX_TIMEOUT; i++) { + udelay(5); + if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, ®s->CR0Set)) + break; + } + + if (i == W_MAX_TIMEOUT) { + writel(CR0_FORSRST, ®s->CR0Set); + /* FIXME: PCI POSTING */ + /* delay 2ms */ + mdelay(2); + } + return 0; +} + +/** + * velocity_set_multi - filter list change callback + * @dev: network device + * + * Called by the network layer when the filter lists need to change + * for a velocity adapter. Reload the CAMs with the new address + * filter ruleset. + */ +static void velocity_set_multi(struct net_device *dev) +{ + struct velocity_info *vptr = netdev_priv(dev); + struct mac_regs __iomem *regs = vptr->mac_regs; + u8 rx_mode; + int i; + struct netdev_hw_addr *ha; + + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + writel(0xffffffff, ®s->MARCAM[0]); + writel(0xffffffff, ®s->MARCAM[4]); + rx_mode = (RCR_AM | RCR_AB | RCR_PROM); + } else if ((netdev_mc_count(dev) > vptr->multicast_limit) || + (dev->flags & IFF_ALLMULTI)) { + writel(0xffffffff, ®s->MARCAM[0]); + writel(0xffffffff, ®s->MARCAM[4]); + rx_mode = (RCR_AM | RCR_AB); + } else { + int offset = MCAM_SIZE - vptr->multicast_limit; + mac_get_cam_mask(regs, vptr->mCAMmask); + + i = 0; + netdev_for_each_mc_addr(ha, dev) { + mac_set_cam(regs, i + offset, ha->addr); + vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7); + i++; + } + + mac_set_cam_mask(regs, vptr->mCAMmask); + rx_mode = RCR_AM | RCR_AB | RCR_AP; + } + if (dev->mtu > 1500) + rx_mode |= RCR_AL; + + BYTE_REG_BITS_ON(rx_mode, ®s->RCR); + +} + +/* + * MII access , media link mode setting functions + */ + +/** + * mii_init - set up MII + * @vptr: velocity adapter + * @mii_status: links tatus + * + * Set up the PHY for the current link state. + */ +static void mii_init(struct velocity_info *vptr, u32 mii_status) +{ + u16 BMCR; + + switch (PHYID_GET_PHY_ID(vptr->phy_id)) { + case PHYID_ICPLUS_IP101A: + MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), + MII_ADVERTISE, vptr->mac_regs); + if (vptr->mii_status & VELOCITY_DUPLEX_FULL) + MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, + vptr->mac_regs); + else + MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, + vptr->mac_regs); + MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs); + break; + case PHYID_CICADA_CS8201: + /* + * Reset to hardware default + */ + MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); + /* + * Turn on ECHODIS bit in NWay-forced full mode and turn it + * off it in NWay-forced half mode for NWay-forced v.s. + * legacy-forced issue. + */ + if (vptr->mii_status & VELOCITY_DUPLEX_FULL) + MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); + else + MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); + /* + * Turn on Link/Activity LED enable bit for CIS8201 + */ + MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs); + break; + case PHYID_VT3216_32BIT: + case PHYID_VT3216_64BIT: + /* + * Reset to hardware default + */ + MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); + /* + * Turn on ECHODIS bit in NWay-forced full mode and turn it + * off it in NWay-forced half mode for NWay-forced v.s. + * legacy-forced issue + */ + if (vptr->mii_status & VELOCITY_DUPLEX_FULL) + MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); + else + MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs); + break; + + case PHYID_MARVELL_1000: + case PHYID_MARVELL_1000S: + /* + * Assert CRS on Transmit + */ + MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs); + /* + * Reset to hardware default + */ + MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs); + break; + default: + ; + } + velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR); + if (BMCR & BMCR_ISOLATE) { + BMCR &= ~BMCR_ISOLATE; + velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR); + } +} + +/** + * setup_queue_timers - Setup interrupt timers + * + * Setup interrupt frequency during suppression (timeout if the frame + * count isn't filled). + */ +static void setup_queue_timers(struct velocity_info *vptr) +{ + /* Only for newer revisions */ + if (vptr->rev_id >= REV_ID_VT3216_A0) { + u8 txqueue_timer = 0; + u8 rxqueue_timer = 0; + + if (vptr->mii_status & (VELOCITY_SPEED_1000 | + VELOCITY_SPEED_100)) { + txqueue_timer = vptr->options.txqueue_timer; + rxqueue_timer = vptr->options.rxqueue_timer; + } + + writeb(txqueue_timer, &vptr->mac_regs->TQETMR); + writeb(rxqueue_timer, &vptr->mac_regs->RQETMR); + } +} + +/** + * setup_adaptive_interrupts - Setup interrupt suppression + * + * @vptr velocity adapter + * + * The velocity is able to suppress interrupt during high interrupt load. + * This function turns on that feature. + */ +static void setup_adaptive_interrupts(struct velocity_info *vptr) +{ + struct mac_regs __iomem *regs = vptr->mac_regs; + u16 tx_intsup = vptr->options.tx_intsup; + u16 rx_intsup = vptr->options.rx_intsup; + + /* Setup default interrupt mask (will be changed below) */ + vptr->int_mask = INT_MASK_DEF; + + /* Set Tx Interrupt Suppression Threshold */ + writeb(CAMCR_PS0, ®s->CAMCR); + if (tx_intsup != 0) { + vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I | + ISR_PTX2I | ISR_PTX3I); + writew(tx_intsup, ®s->ISRCTL); + } else + writew(ISRCTL_TSUPDIS, ®s->ISRCTL); + + /* Set Rx Interrupt Suppression Threshold */ + writeb(CAMCR_PS1, ®s->CAMCR); + if (rx_intsup != 0) { + vptr->int_mask &= ~ISR_PRXI; + writew(rx_intsup, ®s->ISRCTL); + } else + writew(ISRCTL_RSUPDIS, ®s->ISRCTL); + + /* Select page to interrupt hold timer */ + writeb(0, ®s->CAMCR); +} + +/** + * velocity_init_registers - initialise MAC registers + * @vptr: velocity to init + * @type: type of initialisation (hot or cold) + * + * Initialise the MAC on a reset or on first set up on the + * hardware. + */ +static void velocity_init_registers(struct velocity_info *vptr, + enum velocity_init_type type) +{ + struct mac_regs __iomem *regs = vptr->mac_regs; + struct net_device *netdev = vptr->netdev; + int i, mii_status; + + mac_wol_reset(regs); + + switch (type) { + case VELOCITY_INIT_RESET: + case VELOCITY_INIT_WOL: + + netif_stop_queue(netdev); + + /* + * Reset RX to prevent RX pointer not on the 4X location + */ + velocity_rx_reset(vptr); + mac_rx_queue_run(regs); + mac_rx_queue_wake(regs); + + mii_status = velocity_get_opt_media_mode(vptr); + if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { + velocity_print_link_status(vptr); + if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) + netif_wake_queue(netdev); + } + + enable_flow_control_ability(vptr); + + mac_clear_isr(regs); + writel(CR0_STOP, ®s->CR0Clr); + writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), + ®s->CR0Set); + + break; + + case VELOCITY_INIT_COLD: + default: + /* + * Do reset + */ + velocity_soft_reset(vptr); + mdelay(5); + + if (!vptr->no_eeprom) { + mac_eeprom_reload(regs); + for (i = 0; i < 6; i++) + writeb(netdev->dev_addr[i], regs->PAR + i); + } + + /* + * clear Pre_ACPI bit. + */ + BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA)); + mac_set_rx_thresh(regs, vptr->options.rx_thresh); + mac_set_dma_length(regs, vptr->options.DMA_length); + + writeb(WOLCFG_SAM | WOLCFG_SAB, ®s->WOLCFGSet); + /* + * Back off algorithm use original IEEE standard + */ + BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), ®s->CFGB); + + /* + * Init CAM filter + */ + velocity_init_cam_filter(vptr); + + /* + * Set packet filter: Receive directed and broadcast address + */ + velocity_set_multi(netdev); + + /* + * Enable MII auto-polling + */ + enable_mii_autopoll(regs); + + setup_adaptive_interrupts(vptr); + + writel(vptr->rx.pool_dma, ®s->RDBaseLo); + writew(vptr->options.numrx - 1, ®s->RDCSize); + mac_rx_queue_run(regs); + mac_rx_queue_wake(regs); + + writew(vptr->options.numtx - 1, ®s->TDCSize); + + for (i = 0; i < vptr->tx.numq; i++) { + writel(vptr->tx.pool_dma[i], ®s->TDBaseLo[i]); + mac_tx_queue_run(regs, i); + } + + init_flow_control_register(vptr); + + writel(CR0_STOP, ®s->CR0Clr); + writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), ®s->CR0Set); + + mii_status = velocity_get_opt_media_mode(vptr); + netif_stop_queue(netdev); + + mii_init(vptr, mii_status); + + if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { + velocity_print_link_status(vptr); + if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) + netif_wake_queue(netdev); + } + + enable_flow_control_ability(vptr); + mac_hw_mibs_init(regs); + mac_write_int_mask(vptr->int_mask, regs); + mac_clear_isr(regs); + + } +} + +static void velocity_give_many_rx_descs(struct velocity_info *vptr) +{ + struct mac_regs __iomem *regs = vptr->mac_regs; + int avail, dirty, unusable; + + /* + * RD number must be equal to 4X per hardware spec + * (programming guide rev 1.20, p.13) + */ + if (vptr->rx.filled < 4) + return; + + wmb(); + + unusable = vptr->rx.filled & 0x0003; + dirty = vptr->rx.dirty - unusable; + for (avail = vptr->rx.filled & 0xfffc; avail; avail--) { + dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; + vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC; + } + + writew(vptr->rx.filled & 0xfffc, ®s->RBRDU); + vptr->rx.filled = unusable; +} + +/** + * velocity_init_dma_rings - set up DMA rings + * @vptr: Velocity to set up + * + * Allocate PCI mapped DMA rings for the receive and transmit layer + * to use. + */ +static int velocity_init_dma_rings(struct velocity_info *vptr) +{ + struct velocity_opt *opt = &vptr->options; + const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); + const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc); + dma_addr_t pool_dma; + void *pool; + unsigned int i; + + /* + * Allocate all RD/TD rings a single pool. + * + * dma_alloc_coherent() fulfills the requirement for 64 bytes + * alignment + */ + pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq + + rx_ring_size, &pool_dma, GFP_ATOMIC); + if (!pool) { + dev_err(vptr->dev, "%s : DMA memory allocation failed.\n", + vptr->netdev->name); + return -ENOMEM; + } + + vptr->rx.ring = pool; + vptr->rx.pool_dma = pool_dma; + + pool += rx_ring_size; + pool_dma += rx_ring_size; + + for (i = 0; i < vptr->tx.numq; i++) { + vptr->tx.rings[i] = pool; + vptr->tx.pool_dma[i] = pool_dma; + pool += tx_ring_size; + pool_dma += tx_ring_size; + } + + return 0; +} + +static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) +{ + vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; +} + +/** + * velocity_alloc_rx_buf - allocate aligned receive buffer + * @vptr: velocity + * @idx: ring index + * + * Allocate a new full sized buffer for the reception of a frame and + * map it into PCI space for the hardware to use. The hardware + * requires *64* byte alignment of the buffer which makes life + * less fun than would be ideal. + */ +static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) +{ + struct rx_desc *rd = &(vptr->rx.ring[idx]); + struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); + + rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64); + if (rd_info->skb == NULL) + return -ENOMEM; + + /* + * Do the gymnastics to get the buffer head for data at + * 64byte alignment. + */ + skb_reserve(rd_info->skb, + 64 - ((unsigned long) rd_info->skb->data & 63)); + rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data, + vptr->rx.buf_sz, DMA_FROM_DEVICE); + + /* + * Fill in the descriptor to match + */ + + *((u32 *) & (rd->rdesc0)) = 0; + rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN; + rd->pa_low = cpu_to_le32(rd_info->skb_dma); + rd->pa_high = 0; + return 0; +} + + +static int velocity_rx_refill(struct velocity_info *vptr) +{ + int dirty = vptr->rx.dirty, done = 0; + + do { + struct rx_desc *rd = vptr->rx.ring + dirty; + + /* Fine for an all zero Rx desc at init time as well */ + if (rd->rdesc0.len & OWNED_BY_NIC) + break; + + if (!vptr->rx.info[dirty].skb) { + if (velocity_alloc_rx_buf(vptr, dirty) < 0) + break; + } + done++; + dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; + } while (dirty != vptr->rx.curr); + + if (done) { + vptr->rx.dirty = dirty; + vptr->rx.filled += done; + } + + return done; +} + +/** + * velocity_free_rd_ring - free receive ring + * @vptr: velocity to clean up + * + * Free the receive buffers for each ring slot and any + * attached socket buffers that need to go away. + */ +static void velocity_free_rd_ring(struct velocity_info *vptr) +{ + int i; + + if (vptr->rx.info == NULL) + return; + + for (i = 0; i < vptr->options.numrx; i++) { + struct velocity_rd_info *rd_info = &(vptr->rx.info[i]); + struct rx_desc *rd = vptr->rx.ring + i; + + memset(rd, 0, sizeof(*rd)); + + if (!rd_info->skb) + continue; + dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz, + DMA_FROM_DEVICE); + rd_info->skb_dma = 0; + + dev_kfree_skb(rd_info->skb); + rd_info->skb = NULL; + } + + kfree(vptr->rx.info); + vptr->rx.info = NULL; +} + +/** + * velocity_init_rd_ring - set up receive ring + * @vptr: velocity to configure + * + * Allocate and set up the receive buffers for each ring slot and + * assign them to the network adapter. + */ +static int velocity_init_rd_ring(struct velocity_info *vptr) +{ + int ret = -ENOMEM; + + vptr->rx.info = kcalloc(vptr->options.numrx, + sizeof(struct velocity_rd_info), GFP_KERNEL); + if (!vptr->rx.info) + goto out; + + velocity_init_rx_ring_indexes(vptr); + + if (velocity_rx_refill(vptr) != vptr->options.numrx) { + VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR + "%s: failed to allocate RX buffer.\n", vptr->netdev->name); + velocity_free_rd_ring(vptr); + goto out; + } + + ret = 0; +out: + return ret; +} + +/** + * velocity_init_td_ring - set up transmit ring + * @vptr: velocity + * + * Set up the transmit ring and chain the ring pointers together. + * Returns zero on success or a negative posix errno code for + * failure. + */ +static int velocity_init_td_ring(struct velocity_info *vptr) +{ + int j; + + /* Init the TD ring entries */ + for (j = 0; j < vptr->tx.numq; j++) { + + vptr->tx.infos[j] = kcalloc(vptr->options.numtx, + sizeof(struct velocity_td_info), + GFP_KERNEL); + if (!vptr->tx.infos[j]) { + while (--j >= 0) + kfree(vptr->tx.infos[j]); + return -ENOMEM; + } + + vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0; + } + return 0; +} + +/** + * velocity_free_dma_rings - free PCI ring pointers + * @vptr: Velocity to free from + * + * Clean up the PCI ring buffers allocated to this velocity. + */ +static void velocity_free_dma_rings(struct velocity_info *vptr) +{ + const int size = vptr->options.numrx * sizeof(struct rx_desc) + + vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq; + + dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma); +} + +static int velocity_init_rings(struct velocity_info *vptr, int mtu) +{ + int ret; + + velocity_set_rxbufsize(vptr, mtu); + + ret = velocity_init_dma_rings(vptr); + if (ret < 0) + goto out; + + ret = velocity_init_rd_ring(vptr); + if (ret < 0) + goto err_free_dma_rings_0; + + ret = velocity_init_td_ring(vptr); + if (ret < 0) + goto err_free_rd_ring_1; +out: + return ret; + +err_free_rd_ring_1: + velocity_free_rd_ring(vptr); +err_free_dma_rings_0: + velocity_free_dma_rings(vptr); + goto out; +} + +/** + * velocity_free_tx_buf - free transmit buffer + * @vptr: velocity + * @tdinfo: buffer + * + * Release an transmit buffer. If the buffer was preallocated then + * recycle it, if not then unmap the buffer. + */ +static void velocity_free_tx_buf(struct velocity_info *vptr, + struct velocity_td_info *tdinfo, struct tx_desc *td) +{ + struct sk_buff *skb = tdinfo->skb; + + /* + * Don't unmap the pre-allocated tx_bufs + */ + if (tdinfo->skb_dma) { + int i; + + for (i = 0; i < tdinfo->nskb_dma; i++) { + size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN); + + /* For scatter-gather */ + if (skb_shinfo(skb)->nr_frags > 0) + pktlen = max_t(size_t, pktlen, + td->td_buf[i].size & ~TD_QUEUE); + + dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], + le16_to_cpu(pktlen), DMA_TO_DEVICE); + } + } + dev_kfree_skb_irq(skb); + tdinfo->skb = NULL; +} + +/* + * FIXME: could we merge this with velocity_free_tx_buf ? + */ +static void velocity_free_td_ring_entry(struct velocity_info *vptr, + int q, int n) +{ + struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]); + int i; + + if (td_info == NULL) + return; + + if (td_info->skb) { + for (i = 0; i < td_info->nskb_dma; i++) { + if (td_info->skb_dma[i]) { + dma_unmap_single(vptr->dev, td_info->skb_dma[i], + td_info->skb->len, DMA_TO_DEVICE); + td_info->skb_dma[i] = 0; + } + } + dev_kfree_skb(td_info->skb); + td_info->skb = NULL; + } +} + +/** + * velocity_free_td_ring - free td ring + * @vptr: velocity + * + * Free up the transmit ring for this particular velocity adapter. + * We free the ring contents but not the ring itself. + */ +static void velocity_free_td_ring(struct velocity_info *vptr) +{ + int i, j; + + for (j = 0; j < vptr->tx.numq; j++) { + if (vptr->tx.infos[j] == NULL) + continue; + for (i = 0; i < vptr->options.numtx; i++) + velocity_free_td_ring_entry(vptr, j, i); + + kfree(vptr->tx.infos[j]); + vptr->tx.infos[j] = NULL; + } +} + +static void velocity_free_rings(struct velocity_info *vptr) +{ + velocity_free_td_ring(vptr); + velocity_free_rd_ring(vptr); + velocity_free_dma_rings(vptr); +} + +/** + * velocity_error - handle error from controller + * @vptr: velocity + * @status: card status + * + * Process an error report from the hardware and attempt to recover + * the card itself. At the moment we cannot recover from some + * theoretically impossible errors but this could be fixed using + * the pci_device_failed logic to bounce the hardware + * + */ +static void velocity_error(struct velocity_info *vptr, int status) +{ + + if (status & ISR_TXSTLI) { + struct mac_regs __iomem *regs = vptr->mac_regs; + + printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(®s->TDIdx[0])); + BYTE_REG_BITS_ON(TXESR_TDSTR, ®s->TXESR); + writew(TRDCSR_RUN, ®s->TDCSRClr); + netif_stop_queue(vptr->netdev); + + /* FIXME: port over the pci_device_failed code and use it + here */ + } + + if (status & ISR_SRCI) { + struct mac_regs __iomem *regs = vptr->mac_regs; + int linked; + + if (vptr->options.spd_dpx == SPD_DPX_AUTO) { + vptr->mii_status = check_connection_type(regs); + + /* + * If it is a 3119, disable frame bursting in + * halfduplex mode and enable it in fullduplex + * mode + */ + if (vptr->rev_id < REV_ID_VT3216_A0) { + if (vptr->mii_status & VELOCITY_DUPLEX_FULL) + BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR); + else + BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR); + } + /* + * Only enable CD heart beat counter in 10HD mode + */ + if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10)) + BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG); + else + BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG); + + setup_queue_timers(vptr); + } + /* + * Get link status from PHYSR0 + */ + linked = readb(®s->PHYSR0) & PHYSR0_LINKGD; + + if (linked) { + vptr->mii_status &= ~VELOCITY_LINK_FAIL; + netif_carrier_on(vptr->netdev); + } else { + vptr->mii_status |= VELOCITY_LINK_FAIL; + netif_carrier_off(vptr->netdev); + } + + velocity_print_link_status(vptr); + enable_flow_control_ability(vptr); + + /* + * Re-enable auto-polling because SRCI will disable + * auto-polling + */ + + enable_mii_autopoll(regs); + + if (vptr->mii_status & VELOCITY_LINK_FAIL) + netif_stop_queue(vptr->netdev); + else + netif_wake_queue(vptr->netdev); + + } + if (status & ISR_MIBFI) + velocity_update_hw_mibs(vptr); + if (status & ISR_LSTEI) + mac_rx_queue_wake(vptr->mac_regs); +} + +/** + * tx_srv - transmit interrupt service + * @vptr; Velocity + * + * Scan the queues looking for transmitted packets that + * we can complete and clean up. Update any statistics as + * necessary/ + */ +static int velocity_tx_srv(struct velocity_info *vptr) +{ + struct tx_desc *td; + int qnum; + int full = 0; + int idx; + int works = 0; + struct velocity_td_info *tdinfo; + struct net_device_stats *stats = &vptr->netdev->stats; + + for (qnum = 0; qnum < vptr->tx.numq; qnum++) { + for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0; + idx = (idx + 1) % vptr->options.numtx) { + + /* + * Get Tx Descriptor + */ + td = &(vptr->tx.rings[qnum][idx]); + tdinfo = &(vptr->tx.infos[qnum][idx]); + + if (td->tdesc0.len & OWNED_BY_NIC) + break; + + if ((works++ > 15)) + break; + + if (td->tdesc0.TSR & TSR0_TERR) { + stats->tx_errors++; + stats->tx_dropped++; + if (td->tdesc0.TSR & TSR0_CDH) + stats->tx_heartbeat_errors++; + if (td->tdesc0.TSR & TSR0_CRS) + stats->tx_carrier_errors++; + if (td->tdesc0.TSR & TSR0_ABT) + stats->tx_aborted_errors++; + if (td->tdesc0.TSR & TSR0_OWC) + stats->tx_window_errors++; + } else { + stats->tx_packets++; + stats->tx_bytes += tdinfo->skb->len; + } + velocity_free_tx_buf(vptr, tdinfo, td); + vptr->tx.used[qnum]--; + } + vptr->tx.tail[qnum] = idx; + + if (AVAIL_TD(vptr, qnum) < 1) + full = 1; + } + /* + * Look to see if we should kick the transmit network + * layer for more work. + */ + if (netif_queue_stopped(vptr->netdev) && (full == 0) && + (!(vptr->mii_status & VELOCITY_LINK_FAIL))) { + netif_wake_queue(vptr->netdev); + } + return works; +} + +/** + * velocity_rx_csum - checksum process + * @rd: receive packet descriptor + * @skb: network layer packet buffer + * + * Process the status bits for the received packet and determine + * if the checksum was computed and verified by the hardware + */ +static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + if (rd->rdesc1.CSM & CSM_IPKT) { + if (rd->rdesc1.CSM & CSM_IPOK) { + if ((rd->rdesc1.CSM & CSM_TCPKT) || + (rd->rdesc1.CSM & CSM_UDPKT)) { + if (!(rd->rdesc1.CSM & CSM_TUPOK)) + return; + } + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + } +} + +/** + * velocity_rx_copy - in place Rx copy for small packets + * @rx_skb: network layer packet buffer candidate + * @pkt_size: received data size + * @rd: receive packet descriptor + * @dev: network device + * + * Replace the current skb that is scheduled for Rx processing by a + * shorter, immediately allocated skb, if the received packet is small + * enough. This function returns a negative value if the received + * packet is too big or if memory is exhausted. + */ +static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, + struct velocity_info *vptr) +{ + int ret = -1; + if (pkt_size < rx_copybreak) { + struct sk_buff *new_skb; + + new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size); + if (new_skb) { + new_skb->ip_summed = rx_skb[0]->ip_summed; + skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size); + *rx_skb = new_skb; + ret = 0; + } + + } + return ret; +} + +/** + * velocity_iph_realign - IP header alignment + * @vptr: velocity we are handling + * @skb: network layer packet buffer + * @pkt_size: received data size + * + * Align IP header on a 2 bytes boundary. This behavior can be + * configured by the user. + */ +static inline void velocity_iph_realign(struct velocity_info *vptr, + struct sk_buff *skb, int pkt_size) +{ + if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) { + memmove(skb->data + 2, skb->data, pkt_size); + skb_reserve(skb, 2); + } +} + +/** + * velocity_receive_frame - received packet processor + * @vptr: velocity we are handling + * @idx: ring index + * + * A packet has arrived. We process the packet and if appropriate + * pass the frame up the network stack + */ +static int velocity_receive_frame(struct velocity_info *vptr, int idx) +{ + struct net_device_stats *stats = &vptr->netdev->stats; + struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); + struct rx_desc *rd = &(vptr->rx.ring[idx]); + int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; + struct sk_buff *skb; + + if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { + VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name); + stats->rx_length_errors++; + return -EINVAL; + } + + if (rd->rdesc0.RSR & RSR_MAR) + stats->multicast++; + + skb = rd_info->skb; + + dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma, + vptr->rx.buf_sz, DMA_FROM_DEVICE); + + /* + * Drop frame not meeting IEEE 802.3 + */ + + if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) { + if (rd->rdesc0.RSR & RSR_RL) { + stats->rx_length_errors++; + return -EINVAL; + } + } + + velocity_rx_csum(rd, skb); + + if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { + velocity_iph_realign(vptr, skb, pkt_len); + rd_info->skb = NULL; + dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz, + DMA_FROM_DEVICE); + } else { + dma_sync_single_for_device(vptr->dev, rd_info->skb_dma, + vptr->rx.buf_sz, DMA_FROM_DEVICE); + } + + skb_put(skb, pkt_len - 4); + skb->protocol = eth_type_trans(skb, vptr->netdev); + + if (rd->rdesc0.RSR & RSR_DETAG) { + u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG)); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + netif_receive_skb(skb); + + stats->rx_bytes += pkt_len; + stats->rx_packets++; + + return 0; +} + +/** + * velocity_rx_srv - service RX interrupt + * @vptr: velocity + * + * Walk the receive ring of the velocity adapter and remove + * any received packets from the receive queue. Hand the ring + * slots back to the adapter for reuse. + */ +static int velocity_rx_srv(struct velocity_info *vptr, int budget_left) +{ + struct net_device_stats *stats = &vptr->netdev->stats; + int rd_curr = vptr->rx.curr; + int works = 0; + + while (works < budget_left) { + struct rx_desc *rd = vptr->rx.ring + rd_curr; + + if (!vptr->rx.info[rd_curr].skb) + break; + + if (rd->rdesc0.len & OWNED_BY_NIC) + break; + + rmb(); + + /* + * Don't drop CE or RL error frame although RXOK is off + */ + if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) { + if (velocity_receive_frame(vptr, rd_curr) < 0) + stats->rx_dropped++; + } else { + if (rd->rdesc0.RSR & RSR_CRC) + stats->rx_crc_errors++; + if (rd->rdesc0.RSR & RSR_FAE) + stats->rx_frame_errors++; + + stats->rx_dropped++; + } + + rd->size |= RX_INTEN; + + rd_curr++; + if (rd_curr >= vptr->options.numrx) + rd_curr = 0; + works++; + } + + vptr->rx.curr = rd_curr; + + if ((works > 0) && (velocity_rx_refill(vptr) > 0)) + velocity_give_many_rx_descs(vptr); + + VAR_USED(stats); + return works; +} + +static int velocity_poll(struct napi_struct *napi, int budget) +{ + struct velocity_info *vptr = container_of(napi, + struct velocity_info, napi); + unsigned int rx_done; + unsigned long flags; + + /* + * Do rx and tx twice for performance (taken from the VIA + * out-of-tree driver). + */ + rx_done = velocity_rx_srv(vptr, budget); + spin_lock_irqsave(&vptr->lock, flags); + velocity_tx_srv(vptr); + /* If budget not fully consumed, exit the polling mode */ + if (rx_done < budget) { + napi_complete(napi); + mac_enable_int(vptr->mac_regs); + } + spin_unlock_irqrestore(&vptr->lock, flags); + + return rx_done; +} + +/** + * velocity_intr - interrupt callback + * @irq: interrupt number + * @dev_instance: interrupting device + * + * Called whenever an interrupt is generated by the velocity + * adapter IRQ line. We may not be the source of the interrupt + * and need to identify initially if we are, and if not exit as + * efficiently as possible. + */ +static irqreturn_t velocity_intr(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct velocity_info *vptr = netdev_priv(dev); + u32 isr_status; + + spin_lock(&vptr->lock); + isr_status = mac_read_isr(vptr->mac_regs); + + /* Not us ? */ + if (isr_status == 0) { + spin_unlock(&vptr->lock); + return IRQ_NONE; + } + + /* Ack the interrupt */ + mac_write_isr(vptr->mac_regs, isr_status); + + if (likely(napi_schedule_prep(&vptr->napi))) { + mac_disable_int(vptr->mac_regs); + __napi_schedule(&vptr->napi); + } + + if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) + velocity_error(vptr, isr_status); + + spin_unlock(&vptr->lock); + + return IRQ_HANDLED; +} + +/** + * velocity_open - interface activation callback + * @dev: network layer device to open + * + * Called when the network layer brings the interface up. Returns + * a negative posix error code on failure, or zero on success. + * + * All the ring allocation and set up is done on open for this + * adapter to minimise memory usage when inactive + */ +static int velocity_open(struct net_device *dev) +{ + struct velocity_info *vptr = netdev_priv(dev); + int ret; + + ret = velocity_init_rings(vptr, dev->mtu); + if (ret < 0) + goto out; + + /* Ensure chip is running */ + velocity_set_power_state(vptr, PCI_D0); + + velocity_init_registers(vptr, VELOCITY_INIT_COLD); + + ret = request_irq(dev->irq, velocity_intr, IRQF_SHARED, + dev->name, dev); + if (ret < 0) { + /* Power down the chip */ + velocity_set_power_state(vptr, PCI_D3hot); + velocity_free_rings(vptr); + goto out; + } + + velocity_give_many_rx_descs(vptr); + + mac_enable_int(vptr->mac_regs); + netif_start_queue(dev); + napi_enable(&vptr->napi); + vptr->flags |= VELOCITY_FLAGS_OPENED; +out: + return ret; +} + +/** + * velocity_shutdown - shut down the chip + * @vptr: velocity to deactivate + * + * Shuts down the internal operations of the velocity and + * disables interrupts, autopolling, transmit and receive + */ +static void velocity_shutdown(struct velocity_info *vptr) +{ + struct mac_regs __iomem *regs = vptr->mac_regs; + mac_disable_int(regs); + writel(CR0_STOP, ®s->CR0Set); + writew(0xFFFF, ®s->TDCSRClr); + writeb(0xFF, ®s->RDCSRClr); + safe_disable_mii_autopoll(regs); + mac_clear_isr(regs); +} + +/** + * velocity_change_mtu - MTU change callback + * @dev: network device + * @new_mtu: desired MTU + * + * Handle requests from the networking layer for MTU change on + * this interface. It gets called on a change by the network layer. + * Return zero for success or negative posix error code. + */ +static int velocity_change_mtu(struct net_device *dev, int new_mtu) +{ + struct velocity_info *vptr = netdev_priv(dev); + int ret = 0; + + if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { + VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", + vptr->netdev->name); + ret = -EINVAL; + goto out_0; + } + + if (!netif_running(dev)) { + dev->mtu = new_mtu; + goto out_0; + } + + if (dev->mtu != new_mtu) { + struct velocity_info *tmp_vptr; + unsigned long flags; + struct rx_info rx; + struct tx_info tx; + + tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL); + if (!tmp_vptr) { + ret = -ENOMEM; + goto out_0; + } + + tmp_vptr->netdev = dev; + tmp_vptr->pdev = vptr->pdev; + tmp_vptr->dev = vptr->dev; + tmp_vptr->options = vptr->options; + tmp_vptr->tx.numq = vptr->tx.numq; + + ret = velocity_init_rings(tmp_vptr, new_mtu); + if (ret < 0) + goto out_free_tmp_vptr_1; + + napi_disable(&vptr->napi); + + spin_lock_irqsave(&vptr->lock, flags); + + netif_stop_queue(dev); + velocity_shutdown(vptr); + + rx = vptr->rx; + tx = vptr->tx; + + vptr->rx = tmp_vptr->rx; + vptr->tx = tmp_vptr->tx; + + tmp_vptr->rx = rx; + tmp_vptr->tx = tx; + + dev->mtu = new_mtu; + + velocity_init_registers(vptr, VELOCITY_INIT_COLD); + + velocity_give_many_rx_descs(vptr); + + napi_enable(&vptr->napi); + + mac_enable_int(vptr->mac_regs); + netif_start_queue(dev); + + spin_unlock_irqrestore(&vptr->lock, flags); + + velocity_free_rings(tmp_vptr); + +out_free_tmp_vptr_1: + kfree(tmp_vptr); + } +out_0: + return ret; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * velocity_poll_controller - Velocity Poll controller function + * @dev: network device + * + * + * Used by NETCONSOLE and other diagnostic tools to allow network I/P + * with interrupts disabled. + */ +static void velocity_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + velocity_intr(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +/** + * velocity_mii_ioctl - MII ioctl handler + * @dev: network device + * @ifr: the ifreq block for the ioctl + * @cmd: the command + * + * Process MII requests made via ioctl from the network layer. These + * are used by tools like kudzu to interrogate the link state of the + * hardware + */ +static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct velocity_info *vptr = netdev_priv(dev); + struct mac_regs __iomem *regs = vptr->mac_regs; + unsigned long flags; + struct mii_ioctl_data *miidata = if_mii(ifr); + int err; + + switch (cmd) { + case SIOCGMIIPHY: + miidata->phy_id = readb(®s->MIIADR) & 0x1f; + break; + case SIOCGMIIREG: + if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0) + return -ETIMEDOUT; + break; + case SIOCSMIIREG: + spin_lock_irqsave(&vptr->lock, flags); + err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in); + spin_unlock_irqrestore(&vptr->lock, flags); + check_connection_type(vptr->mac_regs); + if (err) + return err; + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +/** + * velocity_ioctl - ioctl entry point + * @dev: network device + * @rq: interface request ioctl + * @cmd: command code + * + * Called when the user issues an ioctl request to the network + * device in question. The velocity interface supports MII. + */ +static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct velocity_info *vptr = netdev_priv(dev); + int ret; + + /* If we are asked for information and the device is power + saving then we need to bring the device back up to talk to it */ + + if (!netif_running(dev)) + velocity_set_power_state(vptr, PCI_D0); + + switch (cmd) { + case SIOCGMIIPHY: /* Get address of MII PHY in use. */ + case SIOCGMIIREG: /* Read MII PHY register. */ + case SIOCSMIIREG: /* Write to MII PHY register. */ + ret = velocity_mii_ioctl(dev, rq, cmd); + break; + + default: + ret = -EOPNOTSUPP; + } + if (!netif_running(dev)) + velocity_set_power_state(vptr, PCI_D3hot); + + + return ret; +} + +/** + * velocity_get_status - statistics callback + * @dev: network device + * + * Callback from the network layer to allow driver statistics + * to be resynchronized with hardware collected state. In the + * case of the velocity we need to pull the MIB counters from + * the hardware into the counters before letting the network + * layer display them. + */ +static struct net_device_stats *velocity_get_stats(struct net_device *dev) +{ + struct velocity_info *vptr = netdev_priv(dev); + + /* If the hardware is down, don't touch MII */ + if (!netif_running(dev)) + return &dev->stats; + + spin_lock_irq(&vptr->lock); + velocity_update_hw_mibs(vptr); + spin_unlock_irq(&vptr->lock); + + dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts]; + dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts]; + dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors]; + +// unsigned long rx_dropped; /* no space in linux buffers */ + dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions]; + /* detailed rx_errors: */ +// unsigned long rx_length_errors; +// unsigned long rx_over_errors; /* receiver ring buff overflow */ + dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE]; +// unsigned long rx_frame_errors; /* recv'd frame alignment error */ +// unsigned long rx_fifo_errors; /* recv'r fifo overrun */ +// unsigned long rx_missed_errors; /* receiver missed packet */ + + /* detailed tx_errors */ +// unsigned long tx_fifo_errors; + + return &dev->stats; +} + +/** + * velocity_close - close adapter callback + * @dev: network device + * + * Callback from the network layer when the velocity is being + * deactivated by the network layer + */ +static int velocity_close(struct net_device *dev) +{ + struct velocity_info *vptr = netdev_priv(dev); + + napi_disable(&vptr->napi); + netif_stop_queue(dev); + velocity_shutdown(vptr); + + if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) + velocity_get_ip(vptr); + + free_irq(dev->irq, dev); + + velocity_free_rings(vptr); + + vptr->flags &= (~VELOCITY_FLAGS_OPENED); + return 0; +} + +/** + * velocity_xmit - transmit packet callback + * @skb: buffer to transmit + * @dev: network device + * + * Called by the networ layer to request a packet is queued to + * the velocity. Returns zero on success. + */ +static netdev_tx_t velocity_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct velocity_info *vptr = netdev_priv(dev); + int qnum = 0; + struct tx_desc *td_ptr; + struct velocity_td_info *tdinfo; + unsigned long flags; + int pktlen; + int index, prev; + int i = 0; + + if (skb_padto(skb, ETH_ZLEN)) + goto out; + + /* The hardware can handle at most 7 memory segments, so merge + * the skb if there are more */ + if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + pktlen = skb_shinfo(skb)->nr_frags == 0 ? + max_t(unsigned int, skb->len, ETH_ZLEN) : + skb_headlen(skb); + + spin_lock_irqsave(&vptr->lock, flags); + + index = vptr->tx.curr[qnum]; + td_ptr = &(vptr->tx.rings[qnum][index]); + tdinfo = &(vptr->tx.infos[qnum][index]); + + td_ptr->tdesc1.TCR = TCR0_TIC; + td_ptr->td_buf[0].size &= ~TD_QUEUE; + + /* + * Map the linear network buffer into PCI space and + * add it to the transmit ring. + */ + tdinfo->skb = skb; + tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen, + DMA_TO_DEVICE); + td_ptr->tdesc0.len = cpu_to_le16(pktlen); + td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); + td_ptr->td_buf[0].pa_high = 0; + td_ptr->td_buf[0].size = cpu_to_le16(pktlen); + + /* Handle fragments */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev, + frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + + td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); + td_ptr->td_buf[i + 1].pa_high = 0; + td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag)); + } + tdinfo->nskb_dma = i + 1; + + td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16; + + if (skb_vlan_tag_present(skb)) { + td_ptr->tdesc1.vlan = cpu_to_le16(skb_vlan_tag_get(skb)); + td_ptr->tdesc1.TCR |= TCR0_VETAG; + } + + /* + * Handle hardware checksum + */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ip = ip_hdr(skb); + if (ip->protocol == IPPROTO_TCP) + td_ptr->tdesc1.TCR |= TCR0_TCPCK; + else if (ip->protocol == IPPROTO_UDP) + td_ptr->tdesc1.TCR |= (TCR0_UDPCK); + td_ptr->tdesc1.TCR |= TCR0_IPCK; + } + + prev = index - 1; + if (prev < 0) + prev = vptr->options.numtx - 1; + td_ptr->tdesc0.len |= OWNED_BY_NIC; + vptr->tx.used[qnum]++; + vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx; + + if (AVAIL_TD(vptr, qnum) < 1) + netif_stop_queue(dev); + + td_ptr = &(vptr->tx.rings[qnum][prev]); + td_ptr->td_buf[0].size |= TD_QUEUE; + mac_tx_queue_wake(vptr->mac_regs, qnum); + + spin_unlock_irqrestore(&vptr->lock, flags); +out: + return NETDEV_TX_OK; +} + +static const struct net_device_ops velocity_netdev_ops = { + .ndo_open = velocity_open, + .ndo_stop = velocity_close, + .ndo_start_xmit = velocity_xmit, + .ndo_get_stats = velocity_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_set_rx_mode = velocity_set_multi, + .ndo_change_mtu = velocity_change_mtu, + .ndo_do_ioctl = velocity_ioctl, + .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = velocity_poll_controller, +#endif +}; + +/** + * velocity_init_info - init private data + * @pdev: PCI device + * @vptr: Velocity info + * @info: Board type + * + * Set up the initial velocity_info struct for the device that has been + * discovered. + */ +static void velocity_init_info(struct velocity_info *vptr, + const struct velocity_info_tbl *info) +{ + vptr->chip_id = info->chip_id; + vptr->tx.numq = info->txqueue; + vptr->multicast_limit = MCAM_SIZE; + spin_lock_init(&vptr->lock); +} + +/** + * velocity_get_pci_info - retrieve PCI info for device + * @vptr: velocity device + * @pdev: PCI device it matches + * + * Retrieve the PCI configuration space data that interests us from + * the kernel PCI layer + */ +static int velocity_get_pci_info(struct velocity_info *vptr) +{ + struct pci_dev *pdev = vptr->pdev; + + pci_set_master(pdev); + + vptr->ioaddr = pci_resource_start(pdev, 0); + vptr->memaddr = pci_resource_start(pdev, 1); + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) { + dev_err(&pdev->dev, + "region #0 is not an I/O resource, aborting.\n"); + return -EINVAL; + } + + if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) { + dev_err(&pdev->dev, + "region #1 is an I/O resource, aborting.\n"); + return -EINVAL; + } + + if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) { + dev_err(&pdev->dev, "region #1 is too small.\n"); + return -EINVAL; + } + + return 0; +} + +/** + * velocity_get_platform_info - retrieve platform info for device + * @vptr: velocity device + * @pdev: platform device it matches + * + * Retrieve the Platform configuration data that interests us + */ +static int velocity_get_platform_info(struct velocity_info *vptr) +{ + struct resource res; + int ret; + + if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL)) + vptr->no_eeprom = 1; + + ret = of_address_to_resource(vptr->dev->of_node, 0, &res); + if (ret) { + dev_err(vptr->dev, "unable to find memory address\n"); + return ret; + } + + vptr->memaddr = res.start; + + if (resource_size(&res) < VELOCITY_IO_SIZE) { + dev_err(vptr->dev, "memory region is too small.\n"); + return -EINVAL; + } + + return 0; +} + +/** + * velocity_print_info - per driver data + * @vptr: velocity + * + * Print per driver data as the kernel driver finds Velocity + * hardware + */ +static void velocity_print_info(struct velocity_info *vptr) +{ + struct net_device *dev = vptr->netdev; + + printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id)); + printk(KERN_INFO "%s: Ethernet Address: %pM\n", + dev->name, dev->dev_addr); +} + +static u32 velocity_get_link(struct net_device *dev) +{ + struct velocity_info *vptr = netdev_priv(dev); + struct mac_regs __iomem *regs = vptr->mac_regs; + return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 1 : 0; +} + +/** + * velocity_probe - set up discovered velocity device + * @pdev: PCI device + * @ent: PCI device table entry that matched + * @bustype: bus that device is connected to + * + * Configure a discovered adapter from scratch. Return a negative + * errno error code on failure paths. + */ +static int velocity_probe(struct device *dev, int irq, + const struct velocity_info_tbl *info, + enum velocity_bus_type bustype) +{ + static int first = 1; + struct net_device *netdev; + int i; + const char *drv_string; + struct velocity_info *vptr; + struct mac_regs __iomem *regs; + int ret = -ENOMEM; + + /* FIXME: this driver, like almost all other ethernet drivers, + * can support more than MAX_UNITS. + */ + if (velocity_nics >= MAX_UNITS) { + dev_notice(dev, "already found %d NICs.\n", velocity_nics); + return -ENODEV; + } + + netdev = alloc_etherdev(sizeof(struct velocity_info)); + if (!netdev) + goto out; + + /* Chain it all together */ + + SET_NETDEV_DEV(netdev, dev); + vptr = netdev_priv(netdev); + + if (first) { + printk(KERN_INFO "%s Ver. %s\n", + VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION); + printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n"); + printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n"); + first = 0; + } + + netdev->irq = irq; + vptr->netdev = netdev; + vptr->dev = dev; + + velocity_init_info(vptr, info); + + if (bustype == BUS_PCI) { + vptr->pdev = to_pci_dev(dev); + + ret = velocity_get_pci_info(vptr); + if (ret < 0) + goto err_free_dev; + } else { + vptr->pdev = NULL; + ret = velocity_get_platform_info(vptr); + if (ret < 0) + goto err_free_dev; + } + + regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE); + if (regs == NULL) { + ret = -EIO; + goto err_free_dev; + } + + vptr->mac_regs = regs; + vptr->rev_id = readb(®s->rev_id); + + mac_wol_reset(regs); + + for (i = 0; i < 6; i++) + netdev->dev_addr[i] = readb(®s->PAR[i]); + + + drv_string = dev_driver_string(dev); + + velocity_get_options(&vptr->options, velocity_nics, drv_string); + + /* + * Mask out the options cannot be set to the chip + */ + + vptr->options.flags &= info->flags; + + /* + * Enable the chip specified capbilities + */ + + vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL); + + vptr->wol_opts = vptr->options.wol_opts; + vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; + + vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); + + netdev->netdev_ops = &velocity_netdev_ops; + netdev->ethtool_ops = &velocity_ethtool_ops; + netif_napi_add(netdev, &vptr->napi, velocity_poll, + VELOCITY_NAPI_WEIGHT); + + netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_HW_VLAN_CTAG_TX; + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_IP_CSUM; + + ret = register_netdev(netdev); + if (ret < 0) + goto err_iounmap; + + if (!velocity_get_link(netdev)) { + netif_carrier_off(netdev); + vptr->mii_status |= VELOCITY_LINK_FAIL; + } + + velocity_print_info(vptr); + dev_set_drvdata(vptr->dev, netdev); + + /* and leave the chip powered down */ + + velocity_set_power_state(vptr, PCI_D3hot); + velocity_nics++; +out: + return ret; + +err_iounmap: + netif_napi_del(&vptr->napi); + iounmap(regs); +err_free_dev: + free_netdev(netdev); + goto out; +} + +/** + * velocity_remove - device unplug + * @dev: device being removed + * + * Device unload callback. Called on an unplug or on module + * unload for each active device that is present. Disconnects + * the device from the network layer and frees all the resources + */ +static int velocity_remove(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct velocity_info *vptr = netdev_priv(netdev); + + unregister_netdev(netdev); + netif_napi_del(&vptr->napi); + iounmap(vptr->mac_regs); + free_netdev(netdev); + velocity_nics--; + + return 0; +} + +static int velocity_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + const struct velocity_info_tbl *info = + &chip_info_table[ent->driver_data]; + int ret; + + ret = pci_enable_device(pdev); + if (ret < 0) + return ret; + + ret = pci_request_regions(pdev, VELOCITY_NAME); + if (ret < 0) { + dev_err(&pdev->dev, "No PCI resources.\n"); + goto fail1; + } + + ret = velocity_probe(&pdev->dev, pdev->irq, info, BUS_PCI); + if (ret == 0) + return 0; + + pci_release_regions(pdev); +fail1: + pci_disable_device(pdev); + return ret; +} + +static void velocity_pci_remove(struct pci_dev *pdev) +{ + velocity_remove(&pdev->dev); + + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static int velocity_platform_probe(struct platform_device *pdev) +{ + const struct of_device_id *of_id; + const struct velocity_info_tbl *info; + int irq; + + of_id = of_match_device(velocity_of_ids, &pdev->dev); + if (!of_id) + return -EINVAL; + info = of_id->data; + + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (!irq) + return -EINVAL; + + return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM); +} + +static int velocity_platform_remove(struct platform_device *pdev) +{ + velocity_remove(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +/** + * wol_calc_crc - WOL CRC + * @pattern: data pattern + * @mask_pattern: mask + * + * Compute the wake on lan crc hashes for the packet header + * we are interested in. + */ +static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern) +{ + u16 crc = 0xFFFF; + u8 mask; + int i, j; + + for (i = 0; i < size; i++) { + mask = mask_pattern[i]; + + /* Skip this loop if the mask equals to zero */ + if (mask == 0x00) + continue; + + for (j = 0; j < 8; j++) { + if ((mask & 0x01) == 0) { + mask >>= 1; + continue; + } + mask >>= 1; + crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1); + } + } + /* Finally, invert the result once to get the correct data */ + crc = ~crc; + return bitrev32(crc) >> 16; +} + +/** + * velocity_set_wol - set up for wake on lan + * @vptr: velocity to set WOL status on + * + * Set a card up for wake on lan either by unicast or by + * ARP packet. + * + * FIXME: check static buffer is safe here + */ +static int velocity_set_wol(struct velocity_info *vptr) +{ + struct mac_regs __iomem *regs = vptr->mac_regs; + enum speed_opt spd_dpx = vptr->options.spd_dpx; + static u8 buf[256]; + int i; + + static u32 mask_pattern[2][4] = { + {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */ + {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff} /* Magic Packet */ + }; + + writew(0xFFFF, ®s->WOLCRClr); + writeb(WOLCFG_SAB | WOLCFG_SAM, ®s->WOLCFGSet); + writew(WOLCR_MAGIC_EN, ®s->WOLCRSet); + + /* + if (vptr->wol_opts & VELOCITY_WOL_PHY) + writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), ®s->WOLCRSet); + */ + + if (vptr->wol_opts & VELOCITY_WOL_UCAST) + writew(WOLCR_UNICAST_EN, ®s->WOLCRSet); + + if (vptr->wol_opts & VELOCITY_WOL_ARP) { + struct arp_packet *arp = (struct arp_packet *) buf; + u16 crc; + memset(buf, 0, sizeof(struct arp_packet) + 7); + + for (i = 0; i < 4; i++) + writel(mask_pattern[0][i], ®s->ByteMask[0][i]); + + arp->type = htons(ETH_P_ARP); + arp->ar_op = htons(1); + + memcpy(arp->ar_tip, vptr->ip_addr, 4); + + crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf, + (u8 *) & mask_pattern[0][0]); + + writew(crc, ®s->PatternCRC[0]); + writew(WOLCR_ARP_EN, ®s->WOLCRSet); + } + + BYTE_REG_BITS_ON(PWCFG_WOLTYPE, ®s->PWCFGSet); + BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, ®s->PWCFGSet); + + writew(0x0FFF, ®s->WOLSRClr); + + if (spd_dpx == SPD_DPX_1000_FULL) + goto mac_done; + + if (spd_dpx != SPD_DPX_AUTO) + goto advertise_done; + + if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) { + if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) + MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs); + + MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs); + } + + if (vptr->mii_status & VELOCITY_SPEED_1000) + MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs); + +advertise_done: + BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR); + + { + u8 GCR; + GCR = readb(®s->CHIPGCR); + GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX; + writeb(GCR, ®s->CHIPGCR); + } + +mac_done: + BYTE_REG_BITS_OFF(ISR_PWEI, ®s->ISR); + /* Turn on SWPTAG just before entering power mode */ + BYTE_REG_BITS_ON(STICKHW_SWPTAG, ®s->STICKHW); + /* Go to bed ..... */ + BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW); + + return 0; +} + +/** + * velocity_save_context - save registers + * @vptr: velocity + * @context: buffer for stored context + * + * Retrieve the current configuration from the velocity hardware + * and stash it in the context structure, for use by the context + * restore functions. This allows us to save things we need across + * power down states + */ +static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context) +{ + struct mac_regs __iomem *regs = vptr->mac_regs; + u16 i; + u8 __iomem *ptr = (u8 __iomem *)regs; + + for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4) + *((u32 *) (context->mac_reg + i)) = readl(ptr + i); + + for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4) + *((u32 *) (context->mac_reg + i)) = readl(ptr + i); + + for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4) + *((u32 *) (context->mac_reg + i)) = readl(ptr + i); + +} + +static int velocity_suspend(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct velocity_info *vptr = netdev_priv(netdev); + unsigned long flags; + + if (!netif_running(vptr->netdev)) + return 0; + + netif_device_detach(vptr->netdev); + + spin_lock_irqsave(&vptr->lock, flags); + if (vptr->pdev) + pci_save_state(vptr->pdev); + + if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) { + velocity_get_ip(vptr); + velocity_save_context(vptr, &vptr->context); + velocity_shutdown(vptr); + velocity_set_wol(vptr); + if (vptr->pdev) + pci_enable_wake(vptr->pdev, PCI_D3hot, 1); + velocity_set_power_state(vptr, PCI_D3hot); + } else { + velocity_save_context(vptr, &vptr->context); + velocity_shutdown(vptr); + if (vptr->pdev) + pci_disable_device(vptr->pdev); + velocity_set_power_state(vptr, PCI_D3hot); + } + + spin_unlock_irqrestore(&vptr->lock, flags); + return 0; +} + +/** + * velocity_restore_context - restore registers + * @vptr: velocity + * @context: buffer for stored context + * + * Reload the register configuration from the velocity context + * created by velocity_save_context. + */ +static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context) +{ + struct mac_regs __iomem *regs = vptr->mac_regs; + int i; + u8 __iomem *ptr = (u8 __iomem *)regs; + + for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4) + writel(*((u32 *) (context->mac_reg + i)), ptr + i); + + /* Just skip cr0 */ + for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) { + /* Clear */ + writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4); + /* Set */ + writeb(*((u8 *) (context->mac_reg + i)), ptr + i); + } + + for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4) + writel(*((u32 *) (context->mac_reg + i)), ptr + i); + + for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4) + writel(*((u32 *) (context->mac_reg + i)), ptr + i); + + for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++) + writeb(*((u8 *) (context->mac_reg + i)), ptr + i); +} + +static int velocity_resume(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct velocity_info *vptr = netdev_priv(netdev); + unsigned long flags; + int i; + + if (!netif_running(vptr->netdev)) + return 0; + + velocity_set_power_state(vptr, PCI_D0); + + if (vptr->pdev) { + pci_enable_wake(vptr->pdev, PCI_D0, 0); + pci_restore_state(vptr->pdev); + } + + mac_wol_reset(vptr->mac_regs); + + spin_lock_irqsave(&vptr->lock, flags); + velocity_restore_context(vptr, &vptr->context); + velocity_init_registers(vptr, VELOCITY_INIT_WOL); + mac_disable_int(vptr->mac_regs); + + velocity_tx_srv(vptr); + + for (i = 0; i < vptr->tx.numq; i++) { + if (vptr->tx.used[i]) + mac_tx_queue_wake(vptr->mac_regs, i); + } + + mac_enable_int(vptr->mac_regs); + spin_unlock_irqrestore(&vptr->lock, flags); + netif_device_attach(vptr->netdev); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(velocity_pm_ops, velocity_suspend, velocity_resume); + +/* + * Definition for our device driver. The PCI layer interface + * uses this to handle all our card discover and plugging + */ +static struct pci_driver velocity_pci_driver = { + .name = VELOCITY_NAME, + .id_table = velocity_pci_id_table, + .probe = velocity_pci_probe, + .remove = velocity_pci_remove, + .driver = { + .pm = &velocity_pm_ops, + }, +}; + +static struct platform_driver velocity_platform_driver = { + .probe = velocity_platform_probe, + .remove = velocity_platform_remove, + .driver = { + .name = "via-velocity", + .of_match_table = velocity_of_ids, + .pm = &velocity_pm_ops, + }, +}; + +/** + * velocity_ethtool_up - pre hook for ethtool + * @dev: network device + * + * Called before an ethtool operation. We need to make sure the + * chip is out of D3 state before we poke at it. + */ +static int velocity_ethtool_up(struct net_device *dev) +{ + struct velocity_info *vptr = netdev_priv(dev); + if (!netif_running(dev)) + velocity_set_power_state(vptr, PCI_D0); + return 0; +} + +/** + * velocity_ethtool_down - post hook for ethtool + * @dev: network device + * + * Called after an ethtool operation. Restore the chip back to D3 + * state if it isn't running. + */ +static void velocity_ethtool_down(struct net_device *dev) +{ + struct velocity_info *vptr = netdev_priv(dev); + if (!netif_running(dev)) + velocity_set_power_state(vptr, PCI_D3hot); +} + +static int velocity_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct velocity_info *vptr = netdev_priv(dev); + struct mac_regs __iomem *regs = vptr->mac_regs; + u32 status; + status = check_connection_type(vptr->mac_regs); + + cmd->supported = SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full; + + cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg; + if (vptr->options.spd_dpx == SPD_DPX_AUTO) { + cmd->advertising |= + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full; + } else { + switch (vptr->options.spd_dpx) { + case SPD_DPX_1000_FULL: + cmd->advertising |= ADVERTISED_1000baseT_Full; + break; + case SPD_DPX_100_HALF: + cmd->advertising |= ADVERTISED_100baseT_Half; + break; + case SPD_DPX_100_FULL: + cmd->advertising |= ADVERTISED_100baseT_Full; + break; + case SPD_DPX_10_HALF: + cmd->advertising |= ADVERTISED_10baseT_Half; + break; + case SPD_DPX_10_FULL: + cmd->advertising |= ADVERTISED_10baseT_Full; + break; + default: + break; + } + } + + if (status & VELOCITY_SPEED_1000) + ethtool_cmd_speed_set(cmd, SPEED_1000); + else if (status & VELOCITY_SPEED_100) + ethtool_cmd_speed_set(cmd, SPEED_100); + else + ethtool_cmd_speed_set(cmd, SPEED_10); + + cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + cmd->port = PORT_TP; + cmd->transceiver = XCVR_INTERNAL; + cmd->phy_address = readb(®s->MIIADR) & 0x1F; + + if (status & VELOCITY_DUPLEX_FULL) + cmd->duplex = DUPLEX_FULL; + else + cmd->duplex = DUPLEX_HALF; + + return 0; +} + +static int velocity_set_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct velocity_info *vptr = netdev_priv(dev); + u32 speed = ethtool_cmd_speed(cmd); + u32 curr_status; + u32 new_status = 0; + int ret = 0; + + curr_status = check_connection_type(vptr->mac_regs); + curr_status &= (~VELOCITY_LINK_FAIL); + + new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0); + new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0); + new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0); + new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0); + new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0); + + if ((new_status & VELOCITY_AUTONEG_ENABLE) && + (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) { + ret = -EINVAL; + } else { + enum speed_opt spd_dpx; + + if (new_status & VELOCITY_AUTONEG_ENABLE) + spd_dpx = SPD_DPX_AUTO; + else if ((new_status & VELOCITY_SPEED_1000) && + (new_status & VELOCITY_DUPLEX_FULL)) { + spd_dpx = SPD_DPX_1000_FULL; + } else if (new_status & VELOCITY_SPEED_100) + spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ? + SPD_DPX_100_FULL : SPD_DPX_100_HALF; + else if (new_status & VELOCITY_SPEED_10) + spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ? + SPD_DPX_10_FULL : SPD_DPX_10_HALF; + else + return -EOPNOTSUPP; + + vptr->options.spd_dpx = spd_dpx; + + velocity_set_media_mode(vptr, new_status); + } + + return ret; +} + +static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct velocity_info *vptr = netdev_priv(dev); + + strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver)); + strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version)); + if (vptr->pdev) + strlcpy(info->bus_info, pci_name(vptr->pdev), + sizeof(info->bus_info)); + else + strlcpy(info->bus_info, "platform", sizeof(info->bus_info)); +} + +static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct velocity_info *vptr = netdev_priv(dev); + wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP; + wol->wolopts |= WAKE_MAGIC; + /* + if (vptr->wol_opts & VELOCITY_WOL_PHY) + wol.wolopts|=WAKE_PHY; + */ + if (vptr->wol_opts & VELOCITY_WOL_UCAST) + wol->wolopts |= WAKE_UCAST; + if (vptr->wol_opts & VELOCITY_WOL_ARP) + wol->wolopts |= WAKE_ARP; + memcpy(&wol->sopass, vptr->wol_passwd, 6); +} + +static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct velocity_info *vptr = netdev_priv(dev); + + if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP))) + return -EFAULT; + vptr->wol_opts = VELOCITY_WOL_MAGIC; + + /* + if (wol.wolopts & WAKE_PHY) { + vptr->wol_opts|=VELOCITY_WOL_PHY; + vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED; + } + */ + + if (wol->wolopts & WAKE_MAGIC) { + vptr->wol_opts |= VELOCITY_WOL_MAGIC; + vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; + } + if (wol->wolopts & WAKE_UCAST) { + vptr->wol_opts |= VELOCITY_WOL_UCAST; + vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; + } + if (wol->wolopts & WAKE_ARP) { + vptr->wol_opts |= VELOCITY_WOL_ARP; + vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; + } + memcpy(vptr->wol_passwd, wol->sopass, 6); + return 0; +} + +static u32 velocity_get_msglevel(struct net_device *dev) +{ + return msglevel; +} + +static void velocity_set_msglevel(struct net_device *dev, u32 value) +{ + msglevel = value; +} + +static int get_pending_timer_val(int val) +{ + int mult_bits = val >> 6; + int mult = 1; + + switch (mult_bits) + { + case 1: + mult = 4; break; + case 2: + mult = 16; break; + case 3: + mult = 64; break; + case 0: + default: + break; + } + + return (val & 0x3f) * mult; +} + +static void set_pending_timer_val(int *val, u32 us) +{ + u8 mult = 0; + u8 shift = 0; + + if (us >= 0x3f) { + mult = 1; /* mult with 4 */ + shift = 2; + } + if (us >= 0x3f * 4) { + mult = 2; /* mult with 16 */ + shift = 4; + } + if (us >= 0x3f * 16) { + mult = 3; /* mult with 64 */ + shift = 6; + } + + *val = (mult << 6) | ((us >> shift) & 0x3f); +} + + +static int velocity_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ecmd) +{ + struct velocity_info *vptr = netdev_priv(dev); + + ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup; + ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup; + + ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer); + ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer); + + return 0; +} + +static int velocity_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ecmd) +{ + struct velocity_info *vptr = netdev_priv(dev); + int max_us = 0x3f * 64; + unsigned long flags; + + /* 6 bits of */ + if (ecmd->tx_coalesce_usecs > max_us) + return -EINVAL; + if (ecmd->rx_coalesce_usecs > max_us) + return -EINVAL; + + if (ecmd->tx_max_coalesced_frames > 0xff) + return -EINVAL; + if (ecmd->rx_max_coalesced_frames > 0xff) + return -EINVAL; + + vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames; + vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames; + + set_pending_timer_val(&vptr->options.rxqueue_timer, + ecmd->rx_coalesce_usecs); + set_pending_timer_val(&vptr->options.txqueue_timer, + ecmd->tx_coalesce_usecs); + + /* Setup the interrupt suppression and queue timers */ + spin_lock_irqsave(&vptr->lock, flags); + mac_disable_int(vptr->mac_regs); + setup_adaptive_interrupts(vptr); + setup_queue_timers(vptr); + + mac_write_int_mask(vptr->int_mask, vptr->mac_regs); + mac_clear_isr(vptr->mac_regs); + mac_enable_int(vptr->mac_regs); + spin_unlock_irqrestore(&vptr->lock, flags); + + return 0; +} + +static const char velocity_gstrings[][ETH_GSTRING_LEN] = { + "rx_all", + "rx_ok", + "tx_ok", + "rx_error", + "rx_runt_ok", + "rx_runt_err", + "rx_64", + "tx_64", + "rx_65_to_127", + "tx_65_to_127", + "rx_128_to_255", + "tx_128_to_255", + "rx_256_to_511", + "tx_256_to_511", + "rx_512_to_1023", + "tx_512_to_1023", + "rx_1024_to_1518", + "tx_1024_to_1518", + "tx_ether_collisions", + "rx_crc_errors", + "rx_jumbo", + "tx_jumbo", + "rx_mac_control_frames", + "tx_mac_control_frames", + "rx_frame_alignement_errors", + "rx_long_ok", + "rx_long_err", + "tx_sqe_errors", + "rx_no_buf", + "rx_symbol_errors", + "in_range_length_errors", + "late_collisions" +}; + +static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data) +{ + switch (sset) { + case ETH_SS_STATS: + memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings)); + break; + } +} + +static int velocity_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(velocity_gstrings); + default: + return -EOPNOTSUPP; + } +} + +static void velocity_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + if (netif_running(dev)) { + struct velocity_info *vptr = netdev_priv(dev); + u32 *p = vptr->mib_counter; + int i; + + spin_lock_irq(&vptr->lock); + velocity_update_hw_mibs(vptr); + spin_unlock_irq(&vptr->lock); + + for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++) + *data++ = *p++; + } +} + +static const struct ethtool_ops velocity_ethtool_ops = { + .get_settings = velocity_get_settings, + .set_settings = velocity_set_settings, + .get_drvinfo = velocity_get_drvinfo, + .get_wol = velocity_ethtool_get_wol, + .set_wol = velocity_ethtool_set_wol, + .get_msglevel = velocity_get_msglevel, + .set_msglevel = velocity_set_msglevel, + .get_link = velocity_get_link, + .get_strings = velocity_get_strings, + .get_sset_count = velocity_get_sset_count, + .get_ethtool_stats = velocity_get_ethtool_stats, + .get_coalesce = velocity_get_coalesce, + .set_coalesce = velocity_set_coalesce, + .begin = velocity_ethtool_up, + .complete = velocity_ethtool_down +}; + +#if defined(CONFIG_PM) && defined(CONFIG_INET) +static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr) +{ + struct in_ifaddr *ifa = ptr; + struct net_device *dev = ifa->ifa_dev->dev; + + if (dev_net(dev) == &init_net && + dev->netdev_ops == &velocity_netdev_ops) + velocity_get_ip(netdev_priv(dev)); + + return NOTIFY_DONE; +} + +static struct notifier_block velocity_inetaddr_notifier = { + .notifier_call = velocity_netdev_event, +}; + +static void velocity_register_notifier(void) +{ + register_inetaddr_notifier(&velocity_inetaddr_notifier); +} + +static void velocity_unregister_notifier(void) +{ + unregister_inetaddr_notifier(&velocity_inetaddr_notifier); +} + +#else + +#define velocity_register_notifier() do {} while (0) +#define velocity_unregister_notifier() do {} while (0) + +#endif /* defined(CONFIG_PM) && defined(CONFIG_INET) */ + +/** + * velocity_init_module - load time function + * + * Called when the velocity module is loaded. The PCI driver + * is registered with the PCI layer, and in turn will call + * the probe functions for each velocity adapter installed + * in the system. + */ +static int __init velocity_init_module(void) +{ + int ret_pci, ret_platform; + + velocity_register_notifier(); + + ret_pci = pci_register_driver(&velocity_pci_driver); + ret_platform = platform_driver_register(&velocity_platform_driver); + + /* if both_registers failed, remove the notifier */ + if ((ret_pci < 0) && (ret_platform < 0)) { + velocity_unregister_notifier(); + return ret_pci; + } + + return 0; +} + +/** + * velocity_cleanup - module unload + * + * When the velocity hardware is unloaded this function is called. + * It will clean up the notifiers and the unregister the PCI + * driver interface for this hardware. This in turn cleans up + * all discovered interfaces before returning from the function + */ +static void __exit velocity_cleanup_module(void) +{ + velocity_unregister_notifier(); + + pci_unregister_driver(&velocity_pci_driver); + platform_driver_unregister(&velocity_platform_driver); +} + +module_init(velocity_init_module); +module_exit(velocity_cleanup_module); diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h new file mode 100644 index 000000000..9453bfa93 --- /dev/null +++ b/drivers/net/ethernet/via/via-velocity.h @@ -0,0 +1,1581 @@ +/* + * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. + * All rights reserved. + * + * This software may be redistributed and/or modified under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or + * any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * File: via-velocity.h + * + * Purpose: Header file to define driver's private structures. + * + * Author: Chuang Liang-Shing, AJ Jiang + * + * Date: Jan 24, 2003 + */ + + +#ifndef VELOCITY_H +#define VELOCITY_H + +#define VELOCITY_TX_CSUM_SUPPORT + +#define VELOCITY_NAME "via-velocity" +#define VELOCITY_FULL_DRV_NAM "VIA Networking Velocity Family Gigabit Ethernet Adapter Driver" +#define VELOCITY_VERSION "1.15" + +#define VELOCITY_IO_SIZE 256 +#define VELOCITY_NAPI_WEIGHT 64 + +#define PKT_BUF_SZ 1540 + +#define MAX_UNITS 8 +#define OPTION_DEFAULT { [0 ... MAX_UNITS-1] = -1} + +#define REV_ID_VT6110 (0) + +#define BYTE_REG_BITS_ON(x,p) do { writeb(readb((p))|(x),(p));} while (0) +#define WORD_REG_BITS_ON(x,p) do { writew(readw((p))|(x),(p));} while (0) +#define DWORD_REG_BITS_ON(x,p) do { writel(readl((p))|(x),(p));} while (0) + +#define BYTE_REG_BITS_IS_ON(x,p) (readb((p)) & (x)) +#define WORD_REG_BITS_IS_ON(x,p) (readw((p)) & (x)) +#define DWORD_REG_BITS_IS_ON(x,p) (readl((p)) & (x)) + +#define BYTE_REG_BITS_OFF(x,p) do { writeb(readb((p)) & (~(x)),(p));} while (0) +#define WORD_REG_BITS_OFF(x,p) do { writew(readw((p)) & (~(x)),(p));} while (0) +#define DWORD_REG_BITS_OFF(x,p) do { writel(readl((p)) & (~(x)),(p));} while (0) + +#define BYTE_REG_BITS_SET(x,m,p) do { writeb( (readb((p)) & (~(m))) |(x),(p));} while (0) +#define WORD_REG_BITS_SET(x,m,p) do { writew( (readw((p)) & (~(m))) |(x),(p));} while (0) +#define DWORD_REG_BITS_SET(x,m,p) do { writel( (readl((p)) & (~(m)))|(x),(p));} while (0) + +#define VAR_USED(p) do {(p)=(p);} while (0) + +/* + * Purpose: Structures for MAX RX/TX descriptors. + */ + + +#define B_OWNED_BY_CHIP 1 +#define B_OWNED_BY_HOST 0 + +/* + * Bits in the RSR0 register + */ + +#define RSR_DETAG cpu_to_le16(0x0080) +#define RSR_SNTAG cpu_to_le16(0x0040) +#define RSR_RXER cpu_to_le16(0x0020) +#define RSR_RL cpu_to_le16(0x0010) +#define RSR_CE cpu_to_le16(0x0008) +#define RSR_FAE cpu_to_le16(0x0004) +#define RSR_CRC cpu_to_le16(0x0002) +#define RSR_VIDM cpu_to_le16(0x0001) + +/* + * Bits in the RSR1 register + */ + +#define RSR_RXOK cpu_to_le16(0x8000) // rx OK +#define RSR_PFT cpu_to_le16(0x4000) // Perfect filtering address match +#define RSR_MAR cpu_to_le16(0x2000) // MAC accept multicast address packet +#define RSR_BAR cpu_to_le16(0x1000) // MAC accept broadcast address packet +#define RSR_PHY cpu_to_le16(0x0800) // MAC accept physical address packet +#define RSR_VTAG cpu_to_le16(0x0400) // 802.1p/1q tagging packet indicator +#define RSR_STP cpu_to_le16(0x0200) // start of packet +#define RSR_EDP cpu_to_le16(0x0100) // end of packet + +/* + * Bits in the CSM register + */ + +#define CSM_IPOK 0x40 //IP Checksum validation ok +#define CSM_TUPOK 0x20 //TCP/UDP Checksum validation ok +#define CSM_FRAG 0x10 //Fragment IP datagram +#define CSM_IPKT 0x04 //Received an IP packet +#define CSM_TCPKT 0x02 //Received a TCP packet +#define CSM_UDPKT 0x01 //Received a UDP packet + +/* + * Bits in the TSR0 register + */ + +#define TSR0_ABT cpu_to_le16(0x0080) // Tx abort because of excessive collision +#define TSR0_OWT cpu_to_le16(0x0040) // Jumbo frame Tx abort +#define TSR0_OWC cpu_to_le16(0x0020) // Out of window collision +#define TSR0_COLS cpu_to_le16(0x0010) // experience collision in this transmit event +#define TSR0_NCR3 cpu_to_le16(0x0008) // collision retry counter[3] +#define TSR0_NCR2 cpu_to_le16(0x0004) // collision retry counter[2] +#define TSR0_NCR1 cpu_to_le16(0x0002) // collision retry counter[1] +#define TSR0_NCR0 cpu_to_le16(0x0001) // collision retry counter[0] +#define TSR0_TERR cpu_to_le16(0x8000) // +#define TSR0_FDX cpu_to_le16(0x4000) // current transaction is serviced by full duplex mode +#define TSR0_GMII cpu_to_le16(0x2000) // current transaction is serviced by GMII mode +#define TSR0_LNKFL cpu_to_le16(0x1000) // packet serviced during link down +#define TSR0_SHDN cpu_to_le16(0x0400) // shutdown case +#define TSR0_CRS cpu_to_le16(0x0200) // carrier sense lost +#define TSR0_CDH cpu_to_le16(0x0100) // AQE test fail (CD heartbeat) + +// +// Bits in the TCR0 register +// +#define TCR0_TIC 0x80 // assert interrupt immediately while descriptor has been send complete +#define TCR0_PIC 0x40 // priority interrupt request, INA# is issued over adaptive interrupt scheme +#define TCR0_VETAG 0x20 // enable VLAN tag +#define TCR0_IPCK 0x10 // request IP checksum calculation. +#define TCR0_UDPCK 0x08 // request UDP checksum calculation. +#define TCR0_TCPCK 0x04 // request TCP checksum calculation. +#define TCR0_JMBO 0x02 // indicate a jumbo packet in GMAC side +#define TCR0_CRC 0x01 // disable CRC generation + +#define TCPLS_NORMAL 3 +#define TCPLS_START 2 +#define TCPLS_END 1 +#define TCPLS_MED 0 + + +// max transmit or receive buffer size +#define CB_RX_BUF_SIZE 2048UL // max buffer size + // NOTE: must be multiple of 4 + +#define CB_MAX_RD_NUM 512 // MAX # of RD +#define CB_MAX_TD_NUM 256 // MAX # of TD + +#define CB_INIT_RD_NUM_3119 128 // init # of RD, for setup VT3119 +#define CB_INIT_TD_NUM_3119 64 // init # of TD, for setup VT3119 + +#define CB_INIT_RD_NUM 128 // init # of RD, for setup default +#define CB_INIT_TD_NUM 64 // init # of TD, for setup default + +// for 3119 +#define CB_TD_RING_NUM 4 // # of TD rings. +#define CB_MAX_SEG_PER_PKT 7 // max data seg per packet (Tx) + + +/* + * If collisions excess 15 times , tx will abort, and + * if tx fifo underflow, tx will fail + * we should try to resend it + */ + +#define CB_MAX_TX_ABORT_RETRY 3 + +/* + * Receive descriptor + */ + +struct rdesc0 { + __le16 RSR; /* Receive status */ + __le16 len; /* bits 0--13; bit 15 - owner */ +}; + +struct rdesc1 { + __le16 PQTAG; + u8 CSM; + u8 IPKT; +}; + +enum { + RX_INTEN = cpu_to_le16(0x8000) +}; + +struct rx_desc { + struct rdesc0 rdesc0; + struct rdesc1 rdesc1; + __le32 pa_low; /* Low 32 bit PCI address */ + __le16 pa_high; /* Next 16 bit PCI address (48 total) */ + __le16 size; /* bits 0--14 - frame size, bit 15 - enable int. */ +} __packed; + +/* + * Transmit descriptor + */ + +struct tdesc0 { + __le16 TSR; /* Transmit status register */ + __le16 len; /* bits 0--13 - size of frame, bit 15 - owner */ +}; + +struct tdesc1 { + __le16 vlan; + u8 TCR; + u8 cmd; /* bits 0--1 - TCPLS, bits 4--7 - CMDZ */ +} __packed; + +enum { + TD_QUEUE = cpu_to_le16(0x8000) +}; + +struct td_buf { + __le32 pa_low; + __le16 pa_high; + __le16 size; /* bits 0--13 - size, bit 15 - queue */ +} __packed; + +struct tx_desc { + struct tdesc0 tdesc0; + struct tdesc1 tdesc1; + struct td_buf td_buf[7]; +}; + +struct velocity_rd_info { + struct sk_buff *skb; + dma_addr_t skb_dma; +}; + +/* + * Used to track transmit side buffers. + */ + +struct velocity_td_info { + struct sk_buff *skb; + int nskb_dma; + dma_addr_t skb_dma[7]; +}; + +enum velocity_owner { + OWNED_BY_HOST = 0, + OWNED_BY_NIC = cpu_to_le16(0x8000) +}; + + +/* + * MAC registers and macros. + */ + + +#define MCAM_SIZE 64 +#define VCAM_SIZE 64 +#define TX_QUEUE_NO 4 + +#define MAX_HW_MIB_COUNTER 32 +#define VELOCITY_MIN_MTU (64) +#define VELOCITY_MAX_MTU (9000) + +/* + * Registers in the MAC + */ + +#define MAC_REG_PAR 0x00 // physical address +#define MAC_REG_RCR 0x06 +#define MAC_REG_TCR 0x07 +#define MAC_REG_CR0_SET 0x08 +#define MAC_REG_CR1_SET 0x09 +#define MAC_REG_CR2_SET 0x0A +#define MAC_REG_CR3_SET 0x0B +#define MAC_REG_CR0_CLR 0x0C +#define MAC_REG_CR1_CLR 0x0D +#define MAC_REG_CR2_CLR 0x0E +#define MAC_REG_CR3_CLR 0x0F +#define MAC_REG_MAR 0x10 +#define MAC_REG_CAM 0x10 +#define MAC_REG_DEC_BASE_HI 0x18 +#define MAC_REG_DBF_BASE_HI 0x1C +#define MAC_REG_ISR_CTL 0x20 +#define MAC_REG_ISR_HOTMR 0x20 +#define MAC_REG_ISR_TSUPTHR 0x20 +#define MAC_REG_ISR_RSUPTHR 0x20 +#define MAC_REG_ISR_CTL1 0x21 +#define MAC_REG_TXE_SR 0x22 +#define MAC_REG_RXE_SR 0x23 +#define MAC_REG_ISR 0x24 +#define MAC_REG_ISR0 0x24 +#define MAC_REG_ISR1 0x25 +#define MAC_REG_ISR2 0x26 +#define MAC_REG_ISR3 0x27 +#define MAC_REG_IMR 0x28 +#define MAC_REG_IMR0 0x28 +#define MAC_REG_IMR1 0x29 +#define MAC_REG_IMR2 0x2A +#define MAC_REG_IMR3 0x2B +#define MAC_REG_TDCSR_SET 0x30 +#define MAC_REG_RDCSR_SET 0x32 +#define MAC_REG_TDCSR_CLR 0x34 +#define MAC_REG_RDCSR_CLR 0x36 +#define MAC_REG_RDBASE_LO 0x38 +#define MAC_REG_RDINDX 0x3C +#define MAC_REG_TDBASE_LO 0x40 +#define MAC_REG_RDCSIZE 0x50 +#define MAC_REG_TDCSIZE 0x52 +#define MAC_REG_TDINDX 0x54 +#define MAC_REG_TDIDX0 0x54 +#define MAC_REG_TDIDX1 0x56 +#define MAC_REG_TDIDX2 0x58 +#define MAC_REG_TDIDX3 0x5A +#define MAC_REG_PAUSE_TIMER 0x5C +#define MAC_REG_RBRDU 0x5E +#define MAC_REG_FIFO_TEST0 0x60 +#define MAC_REG_FIFO_TEST1 0x64 +#define MAC_REG_CAMADDR 0x68 +#define MAC_REG_CAMCR 0x69 +#define MAC_REG_GFTEST 0x6A +#define MAC_REG_FTSTCMD 0x6B +#define MAC_REG_MIICFG 0x6C +#define MAC_REG_MIISR 0x6D +#define MAC_REG_PHYSR0 0x6E +#define MAC_REG_PHYSR1 0x6F +#define MAC_REG_MIICR 0x70 +#define MAC_REG_MIIADR 0x71 +#define MAC_REG_MIIDATA 0x72 +#define MAC_REG_SOFT_TIMER0 0x74 +#define MAC_REG_SOFT_TIMER1 0x76 +#define MAC_REG_CFGA 0x78 +#define MAC_REG_CFGB 0x79 +#define MAC_REG_CFGC 0x7A +#define MAC_REG_CFGD 0x7B +#define MAC_REG_DCFG0 0x7C +#define MAC_REG_DCFG1 0x7D +#define MAC_REG_MCFG0 0x7E +#define MAC_REG_MCFG1 0x7F + +#define MAC_REG_TBIST 0x80 +#define MAC_REG_RBIST 0x81 +#define MAC_REG_PMCC 0x82 +#define MAC_REG_STICKHW 0x83 +#define MAC_REG_MIBCR 0x84 +#define MAC_REG_EERSV 0x85 +#define MAC_REG_REVID 0x86 +#define MAC_REG_MIBREAD 0x88 +#define MAC_REG_BPMA 0x8C +#define MAC_REG_EEWR_DATA 0x8C +#define MAC_REG_BPMD_WR 0x8F +#define MAC_REG_BPCMD 0x90 +#define MAC_REG_BPMD_RD 0x91 +#define MAC_REG_EECHKSUM 0x92 +#define MAC_REG_EECSR 0x93 +#define MAC_REG_EERD_DATA 0x94 +#define MAC_REG_EADDR 0x96 +#define MAC_REG_EMBCMD 0x97 +#define MAC_REG_JMPSR0 0x98 +#define MAC_REG_JMPSR1 0x99 +#define MAC_REG_JMPSR2 0x9A +#define MAC_REG_JMPSR3 0x9B +#define MAC_REG_CHIPGSR 0x9C +#define MAC_REG_TESTCFG 0x9D +#define MAC_REG_DEBUG 0x9E +#define MAC_REG_CHIPGCR 0x9F /* Chip Operation and Diagnostic Control */ +#define MAC_REG_WOLCR0_SET 0xA0 +#define MAC_REG_WOLCR1_SET 0xA1 +#define MAC_REG_PWCFG_SET 0xA2 +#define MAC_REG_WOLCFG_SET 0xA3 +#define MAC_REG_WOLCR0_CLR 0xA4 +#define MAC_REG_WOLCR1_CLR 0xA5 +#define MAC_REG_PWCFG_CLR 0xA6 +#define MAC_REG_WOLCFG_CLR 0xA7 +#define MAC_REG_WOLSR0_SET 0xA8 +#define MAC_REG_WOLSR1_SET 0xA9 +#define MAC_REG_WOLSR0_CLR 0xAC +#define MAC_REG_WOLSR1_CLR 0xAD +#define MAC_REG_PATRN_CRC0 0xB0 +#define MAC_REG_PATRN_CRC1 0xB2 +#define MAC_REG_PATRN_CRC2 0xB4 +#define MAC_REG_PATRN_CRC3 0xB6 +#define MAC_REG_PATRN_CRC4 0xB8 +#define MAC_REG_PATRN_CRC5 0xBA +#define MAC_REG_PATRN_CRC6 0xBC +#define MAC_REG_PATRN_CRC7 0xBE +#define MAC_REG_BYTEMSK0_0 0xC0 +#define MAC_REG_BYTEMSK0_1 0xC4 +#define MAC_REG_BYTEMSK0_2 0xC8 +#define MAC_REG_BYTEMSK0_3 0xCC +#define MAC_REG_BYTEMSK1_0 0xD0 +#define MAC_REG_BYTEMSK1_1 0xD4 +#define MAC_REG_BYTEMSK1_2 0xD8 +#define MAC_REG_BYTEMSK1_3 0xDC +#define MAC_REG_BYTEMSK2_0 0xE0 +#define MAC_REG_BYTEMSK2_1 0xE4 +#define MAC_REG_BYTEMSK2_2 0xE8 +#define MAC_REG_BYTEMSK2_3 0xEC +#define MAC_REG_BYTEMSK3_0 0xF0 +#define MAC_REG_BYTEMSK3_1 0xF4 +#define MAC_REG_BYTEMSK3_2 0xF8 +#define MAC_REG_BYTEMSK3_3 0xFC + +/* + * Bits in the RCR register + */ + +#define RCR_AS 0x80 +#define RCR_AP 0x40 +#define RCR_AL 0x20 +#define RCR_PROM 0x10 +#define RCR_AB 0x08 +#define RCR_AM 0x04 +#define RCR_AR 0x02 +#define RCR_SEP 0x01 + +/* + * Bits in the TCR register + */ + +#define TCR_TB2BDIS 0x80 +#define TCR_COLTMC1 0x08 +#define TCR_COLTMC0 0x04 +#define TCR_LB1 0x02 /* loopback[1] */ +#define TCR_LB0 0x01 /* loopback[0] */ + +/* + * Bits in the CR0 register + */ + +#define CR0_TXON 0x00000008UL +#define CR0_RXON 0x00000004UL +#define CR0_STOP 0x00000002UL /* stop MAC, default = 1 */ +#define CR0_STRT 0x00000001UL /* start MAC */ +#define CR0_SFRST 0x00008000UL /* software reset */ +#define CR0_TM1EN 0x00004000UL +#define CR0_TM0EN 0x00002000UL +#define CR0_DPOLL 0x00000800UL /* disable rx/tx auto polling */ +#define CR0_DISAU 0x00000100UL +#define CR0_XONEN 0x00800000UL +#define CR0_FDXTFCEN 0x00400000UL /* full-duplex TX flow control enable */ +#define CR0_FDXRFCEN 0x00200000UL /* full-duplex RX flow control enable */ +#define CR0_HDXFCEN 0x00100000UL /* half-duplex flow control enable */ +#define CR0_XHITH1 0x00080000UL /* TX XON high threshold 1 */ +#define CR0_XHITH0 0x00040000UL /* TX XON high threshold 0 */ +#define CR0_XLTH1 0x00020000UL /* TX pause frame low threshold 1 */ +#define CR0_XLTH0 0x00010000UL /* TX pause frame low threshold 0 */ +#define CR0_GSPRST 0x80000000UL +#define CR0_FORSRST 0x40000000UL +#define CR0_FPHYRST 0x20000000UL +#define CR0_DIAG 0x10000000UL +#define CR0_INTPCTL 0x04000000UL +#define CR0_GINTMSK1 0x02000000UL +#define CR0_GINTMSK0 0x01000000UL + +/* + * Bits in the CR1 register + */ + +#define CR1_SFRST 0x80 /* software reset */ +#define CR1_TM1EN 0x40 +#define CR1_TM0EN 0x20 +#define CR1_DPOLL 0x08 /* disable rx/tx auto polling */ +#define CR1_DISAU 0x01 + +/* + * Bits in the CR2 register + */ + +#define CR2_XONEN 0x80 +#define CR2_FDXTFCEN 0x40 /* full-duplex TX flow control enable */ +#define CR2_FDXRFCEN 0x20 /* full-duplex RX flow control enable */ +#define CR2_HDXFCEN 0x10 /* half-duplex flow control enable */ +#define CR2_XHITH1 0x08 /* TX XON high threshold 1 */ +#define CR2_XHITH0 0x04 /* TX XON high threshold 0 */ +#define CR2_XLTH1 0x02 /* TX pause frame low threshold 1 */ +#define CR2_XLTH0 0x01 /* TX pause frame low threshold 0 */ + +/* + * Bits in the CR3 register + */ + +#define CR3_GSPRST 0x80 +#define CR3_FORSRST 0x40 +#define CR3_FPHYRST 0x20 +#define CR3_DIAG 0x10 +#define CR3_INTPCTL 0x04 +#define CR3_GINTMSK1 0x02 +#define CR3_GINTMSK0 0x01 + +#define ISRCTL_UDPINT 0x8000 +#define ISRCTL_TSUPDIS 0x4000 +#define ISRCTL_RSUPDIS 0x2000 +#define ISRCTL_PMSK1 0x1000 +#define ISRCTL_PMSK0 0x0800 +#define ISRCTL_INTPD 0x0400 +#define ISRCTL_HCRLD 0x0200 +#define ISRCTL_SCRLD 0x0100 + +/* + * Bits in the ISR_CTL1 register + */ + +#define ISRCTL1_UDPINT 0x80 +#define ISRCTL1_TSUPDIS 0x40 +#define ISRCTL1_RSUPDIS 0x20 +#define ISRCTL1_PMSK1 0x10 +#define ISRCTL1_PMSK0 0x08 +#define ISRCTL1_INTPD 0x04 +#define ISRCTL1_HCRLD 0x02 +#define ISRCTL1_SCRLD 0x01 + +/* + * Bits in the TXE_SR register + */ + +#define TXESR_TFDBS 0x08 +#define TXESR_TDWBS 0x04 +#define TXESR_TDRBS 0x02 +#define TXESR_TDSTR 0x01 + +/* + * Bits in the RXE_SR register + */ + +#define RXESR_RFDBS 0x08 +#define RXESR_RDWBS 0x04 +#define RXESR_RDRBS 0x02 +#define RXESR_RDSTR 0x01 + +/* + * Bits in the ISR register + */ + +#define ISR_ISR3 0x80000000UL +#define ISR_ISR2 0x40000000UL +#define ISR_ISR1 0x20000000UL +#define ISR_ISR0 0x10000000UL +#define ISR_TXSTLI 0x02000000UL +#define ISR_RXSTLI 0x01000000UL +#define ISR_HFLD 0x00800000UL +#define ISR_UDPI 0x00400000UL +#define ISR_MIBFI 0x00200000UL +#define ISR_SHDNI 0x00100000UL +#define ISR_PHYI 0x00080000UL +#define ISR_PWEI 0x00040000UL +#define ISR_TMR1I 0x00020000UL +#define ISR_TMR0I 0x00010000UL +#define ISR_SRCI 0x00008000UL +#define ISR_LSTPEI 0x00004000UL +#define ISR_LSTEI 0x00002000UL +#define ISR_OVFI 0x00001000UL +#define ISR_FLONI 0x00000800UL +#define ISR_RACEI 0x00000400UL +#define ISR_TXWB1I 0x00000200UL +#define ISR_TXWB0I 0x00000100UL +#define ISR_PTX3I 0x00000080UL +#define ISR_PTX2I 0x00000040UL +#define ISR_PTX1I 0x00000020UL +#define ISR_PTX0I 0x00000010UL +#define ISR_PTXI 0x00000008UL +#define ISR_PRXI 0x00000004UL +#define ISR_PPTXI 0x00000002UL +#define ISR_PPRXI 0x00000001UL + +/* + * Bits in the IMR register + */ + +#define IMR_TXSTLM 0x02000000UL +#define IMR_UDPIM 0x00400000UL +#define IMR_MIBFIM 0x00200000UL +#define IMR_SHDNIM 0x00100000UL +#define IMR_PHYIM 0x00080000UL +#define IMR_PWEIM 0x00040000UL +#define IMR_TMR1IM 0x00020000UL +#define IMR_TMR0IM 0x00010000UL + +#define IMR_SRCIM 0x00008000UL +#define IMR_LSTPEIM 0x00004000UL +#define IMR_LSTEIM 0x00002000UL +#define IMR_OVFIM 0x00001000UL +#define IMR_FLONIM 0x00000800UL +#define IMR_RACEIM 0x00000400UL +#define IMR_TXWB1IM 0x00000200UL +#define IMR_TXWB0IM 0x00000100UL + +#define IMR_PTX3IM 0x00000080UL +#define IMR_PTX2IM 0x00000040UL +#define IMR_PTX1IM 0x00000020UL +#define IMR_PTX0IM 0x00000010UL +#define IMR_PTXIM 0x00000008UL +#define IMR_PRXIM 0x00000004UL +#define IMR_PPTXIM 0x00000002UL +#define IMR_PPRXIM 0x00000001UL + +/* 0x0013FB0FUL = initial value of IMR */ + +#define INT_MASK_DEF (IMR_PPTXIM|IMR_PPRXIM|IMR_PTXIM|IMR_PRXIM|\ + IMR_PWEIM|IMR_TXWB0IM|IMR_TXWB1IM|IMR_FLONIM|\ + IMR_OVFIM|IMR_LSTEIM|IMR_LSTPEIM|IMR_SRCIM|IMR_MIBFIM|\ + IMR_SHDNIM|IMR_TMR1IM|IMR_TMR0IM|IMR_TXSTLM) + +/* + * Bits in the TDCSR0/1, RDCSR0 register + */ + +#define TRDCSR_DEAD 0x0008 +#define TRDCSR_WAK 0x0004 +#define TRDCSR_ACT 0x0002 +#define TRDCSR_RUN 0x0001 + +/* + * Bits in the CAMADDR register + */ + +#define CAMADDR_CAMEN 0x80 +#define CAMADDR_VCAMSL 0x40 + +/* + * Bits in the CAMCR register + */ + +#define CAMCR_PS1 0x80 +#define CAMCR_PS0 0x40 +#define CAMCR_AITRPKT 0x20 +#define CAMCR_AITR16 0x10 +#define CAMCR_CAMRD 0x08 +#define CAMCR_CAMWR 0x04 +#define CAMCR_PS_CAM_MASK 0x40 +#define CAMCR_PS_CAM_DATA 0x80 +#define CAMCR_PS_MAR 0x00 + +/* + * Bits in the MIICFG register + */ + +#define MIICFG_MPO1 0x80 +#define MIICFG_MPO0 0x40 +#define MIICFG_MFDC 0x20 + +/* + * Bits in the MIISR register + */ + +#define MIISR_MIDLE 0x80 + +/* + * Bits in the PHYSR0 register + */ + +#define PHYSR0_PHYRST 0x80 +#define PHYSR0_LINKGD 0x40 +#define PHYSR0_FDPX 0x10 +#define PHYSR0_SPDG 0x08 +#define PHYSR0_SPD10 0x04 +#define PHYSR0_RXFLC 0x02 +#define PHYSR0_TXFLC 0x01 + +/* + * Bits in the PHYSR1 register + */ + +#define PHYSR1_PHYTBI 0x01 + +/* + * Bits in the MIICR register + */ + +#define MIICR_MAUTO 0x80 +#define MIICR_RCMD 0x40 +#define MIICR_WCMD 0x20 +#define MIICR_MDPM 0x10 +#define MIICR_MOUT 0x08 +#define MIICR_MDO 0x04 +#define MIICR_MDI 0x02 +#define MIICR_MDC 0x01 + +/* + * Bits in the MIIADR register + */ + +#define MIIADR_SWMPL 0x80 + +/* + * Bits in the CFGA register + */ + +#define CFGA_PMHCTG 0x08 +#define CFGA_GPIO1PD 0x04 +#define CFGA_ABSHDN 0x02 +#define CFGA_PACPI 0x01 + +/* + * Bits in the CFGB register + */ + +#define CFGB_GTCKOPT 0x80 +#define CFGB_MIIOPT 0x40 +#define CFGB_CRSEOPT 0x20 +#define CFGB_OFSET 0x10 +#define CFGB_CRANDOM 0x08 +#define CFGB_CAP 0x04 +#define CFGB_MBA 0x02 +#define CFGB_BAKOPT 0x01 + +/* + * Bits in the CFGC register + */ + +#define CFGC_EELOAD 0x80 +#define CFGC_BROPT 0x40 +#define CFGC_DLYEN 0x20 +#define CFGC_DTSEL 0x10 +#define CFGC_BTSEL 0x08 +#define CFGC_BPS2 0x04 /* bootrom select[2] */ +#define CFGC_BPS1 0x02 /* bootrom select[1] */ +#define CFGC_BPS0 0x01 /* bootrom select[0] */ + +/* + * Bits in the CFGD register + */ + +#define CFGD_IODIS 0x80 +#define CFGD_MSLVDACEN 0x40 +#define CFGD_CFGDACEN 0x20 +#define CFGD_PCI64EN 0x10 +#define CFGD_HTMRL4 0x08 + +/* + * Bits in the DCFG1 register + */ + +#define DCFG_XMWI 0x8000 +#define DCFG_XMRM 0x4000 +#define DCFG_XMRL 0x2000 +#define DCFG_PERDIS 0x1000 +#define DCFG_MRWAIT 0x0400 +#define DCFG_MWWAIT 0x0200 +#define DCFG_LATMEN 0x0100 + +/* + * Bits in the MCFG0 register + */ + +#define MCFG_RXARB 0x0080 +#define MCFG_RFT1 0x0020 +#define MCFG_RFT0 0x0010 +#define MCFG_LOWTHOPT 0x0008 +#define MCFG_PQEN 0x0004 +#define MCFG_RTGOPT 0x0002 +#define MCFG_VIDFR 0x0001 + +/* + * Bits in the MCFG1 register + */ + +#define MCFG_TXARB 0x8000 +#define MCFG_TXQBK1 0x0800 +#define MCFG_TXQBK0 0x0400 +#define MCFG_TXQNOBK 0x0200 +#define MCFG_SNAPOPT 0x0100 + +/* + * Bits in the PMCC register + */ + +#define PMCC_DSI 0x80 +#define PMCC_D2_DIS 0x40 +#define PMCC_D1_DIS 0x20 +#define PMCC_D3C_EN 0x10 +#define PMCC_D3H_EN 0x08 +#define PMCC_D2_EN 0x04 +#define PMCC_D1_EN 0x02 +#define PMCC_D0_EN 0x01 + +/* + * Bits in STICKHW + */ + +#define STICKHW_SWPTAG 0x10 +#define STICKHW_WOLSR 0x08 +#define STICKHW_WOLEN 0x04 +#define STICKHW_DS1 0x02 /* R/W by software/cfg cycle */ +#define STICKHW_DS0 0x01 /* suspend well DS write port */ + +/* + * Bits in the MIBCR register + */ + +#define MIBCR_MIBISTOK 0x80 +#define MIBCR_MIBISTGO 0x40 +#define MIBCR_MIBINC 0x20 +#define MIBCR_MIBHI 0x10 +#define MIBCR_MIBFRZ 0x08 +#define MIBCR_MIBFLSH 0x04 +#define MIBCR_MPTRINI 0x02 +#define MIBCR_MIBCLR 0x01 + +/* + * Bits in the EERSV register + */ + +#define EERSV_BOOT_RPL ((u8) 0x01) /* Boot method selection for VT6110 */ + +#define EERSV_BOOT_MASK ((u8) 0x06) +#define EERSV_BOOT_INT19 ((u8) 0x00) +#define EERSV_BOOT_INT18 ((u8) 0x02) +#define EERSV_BOOT_LOCAL ((u8) 0x04) +#define EERSV_BOOT_BEV ((u8) 0x06) + + +/* + * Bits in BPCMD + */ + +#define BPCMD_BPDNE 0x80 +#define BPCMD_EBPWR 0x02 +#define BPCMD_EBPRD 0x01 + +/* + * Bits in the EECSR register + */ + +#define EECSR_EMBP 0x40 /* eeprom embedded programming */ +#define EECSR_RELOAD 0x20 /* eeprom content reload */ +#define EECSR_DPM 0x10 /* eeprom direct programming */ +#define EECSR_ECS 0x08 /* eeprom CS pin */ +#define EECSR_ECK 0x04 /* eeprom CK pin */ +#define EECSR_EDI 0x02 /* eeprom DI pin */ +#define EECSR_EDO 0x01 /* eeprom DO pin */ + +/* + * Bits in the EMBCMD register + */ + +#define EMBCMD_EDONE 0x80 +#define EMBCMD_EWDIS 0x08 +#define EMBCMD_EWEN 0x04 +#define EMBCMD_EWR 0x02 +#define EMBCMD_ERD 0x01 + +/* + * Bits in TESTCFG register + */ + +#define TESTCFG_HBDIS 0x80 + +/* + * Bits in CHIPGCR register + */ + +#define CHIPGCR_FCGMII 0x80 /* force GMII (else MII only) */ +#define CHIPGCR_FCFDX 0x40 /* force full duplex */ +#define CHIPGCR_FCRESV 0x20 +#define CHIPGCR_FCMODE 0x10 /* enable MAC forced mode */ +#define CHIPGCR_LPSOPT 0x08 +#define CHIPGCR_TM1US 0x04 +#define CHIPGCR_TM0US 0x02 +#define CHIPGCR_PHYINTEN 0x01 + +/* + * Bits in WOLCR0 + */ + +#define WOLCR_MSWOLEN7 0x0080 /* enable pattern match filtering */ +#define WOLCR_MSWOLEN6 0x0040 +#define WOLCR_MSWOLEN5 0x0020 +#define WOLCR_MSWOLEN4 0x0010 +#define WOLCR_MSWOLEN3 0x0008 +#define WOLCR_MSWOLEN2 0x0004 +#define WOLCR_MSWOLEN1 0x0002 +#define WOLCR_MSWOLEN0 0x0001 +#define WOLCR_ARP_EN 0x0001 + +/* + * Bits in WOLCR1 + */ + +#define WOLCR_LINKOFF_EN 0x0800 /* link off detected enable */ +#define WOLCR_LINKON_EN 0x0400 /* link on detected enable */ +#define WOLCR_MAGIC_EN 0x0200 /* magic packet filter enable */ +#define WOLCR_UNICAST_EN 0x0100 /* unicast filter enable */ + + +/* + * Bits in PWCFG + */ + +#define PWCFG_PHYPWOPT 0x80 /* internal MII I/F timing */ +#define PWCFG_PCISTICK 0x40 /* PCI sticky R/W enable */ +#define PWCFG_WOLTYPE 0x20 /* pulse(1) or button (0) */ +#define PWCFG_LEGCY_WOL 0x10 +#define PWCFG_PMCSR_PME_SR 0x08 +#define PWCFG_PMCSR_PME_EN 0x04 /* control by PCISTICK */ +#define PWCFG_LEGACY_WOLSR 0x02 /* Legacy WOL_SR shadow */ +#define PWCFG_LEGACY_WOLEN 0x01 /* Legacy WOL_EN shadow */ + +/* + * Bits in WOLCFG + */ + +#define WOLCFG_PMEOVR 0x80 /* for legacy use, force PMEEN always */ +#define WOLCFG_SAM 0x20 /* accept multicast case reset, default=0 */ +#define WOLCFG_SAB 0x10 /* accept broadcast case reset, default=0 */ +#define WOLCFG_SMIIACC 0x08 /* ?? */ +#define WOLCFG_SGENWH 0x02 +#define WOLCFG_PHYINTEN 0x01 /* 0:PHYINT trigger enable, 1:use internal MII + to report status change */ +/* + * Bits in WOLSR1 + */ + +#define WOLSR_LINKOFF_INT 0x0800 +#define WOLSR_LINKON_INT 0x0400 +#define WOLSR_MAGIC_INT 0x0200 +#define WOLSR_UNICAST_INT 0x0100 + +/* + * Ethernet address filter type + */ + +#define PKT_TYPE_NONE 0x0000 /* Turn off receiver */ +#define PKT_TYPE_DIRECTED 0x0001 /* obselete, directed address is always accepted */ +#define PKT_TYPE_MULTICAST 0x0002 +#define PKT_TYPE_ALL_MULTICAST 0x0004 +#define PKT_TYPE_BROADCAST 0x0008 +#define PKT_TYPE_PROMISCUOUS 0x0020 +#define PKT_TYPE_LONG 0x2000 /* NOTE.... the definition of LONG is >2048 bytes in our chip */ +#define PKT_TYPE_RUNT 0x4000 +#define PKT_TYPE_ERROR 0x8000 /* Accept error packets, e.g. CRC error */ + +/* + * Loopback mode + */ + +#define MAC_LB_NONE 0x00 +#define MAC_LB_INTERNAL 0x01 +#define MAC_LB_EXTERNAL 0x02 + +/* + * Enabled mask value of irq + */ + +#if defined(_SIM) +#define IMR_MASK_VALUE 0x0033FF0FUL /* initial value of IMR + set IMR0 to 0x0F according to spec */ + +#else +#define IMR_MASK_VALUE 0x0013FB0FUL /* initial value of IMR + ignore MIBFI,RACEI to + reduce intr. frequency + NOTE.... do not enable NoBuf int mask at driver driver + when (1) NoBuf -> RxThreshold = SF + (2) OK -> RxThreshold = original value + */ +#endif + +/* + * Revision id + */ + +#define REV_ID_VT3119_A0 0x00 +#define REV_ID_VT3119_A1 0x01 +#define REV_ID_VT3216_A0 0x10 + +/* + * Max time out delay time + */ + +#define W_MAX_TIMEOUT 0x0FFFU + + +/* + * MAC registers as a structure. Cannot be directly accessed this + * way but generates offsets for readl/writel() calls + */ + +struct mac_regs { + volatile u8 PAR[6]; /* 0x00 */ + volatile u8 RCR; + volatile u8 TCR; + + volatile __le32 CR0Set; /* 0x08 */ + volatile __le32 CR0Clr; /* 0x0C */ + + volatile u8 MARCAM[8]; /* 0x10 */ + + volatile __le32 DecBaseHi; /* 0x18 */ + volatile __le16 DbfBaseHi; /* 0x1C */ + volatile __le16 reserved_1E; + + volatile __le16 ISRCTL; /* 0x20 */ + volatile u8 TXESR; + volatile u8 RXESR; + + volatile __le32 ISR; /* 0x24 */ + volatile __le32 IMR; + + volatile __le32 TDStatusPort; /* 0x2C */ + + volatile __le16 TDCSRSet; /* 0x30 */ + volatile u8 RDCSRSet; + volatile u8 reserved_33; + volatile __le16 TDCSRClr; + volatile u8 RDCSRClr; + volatile u8 reserved_37; + + volatile __le32 RDBaseLo; /* 0x38 */ + volatile __le16 RDIdx; /* 0x3C */ + volatile u8 TQETMR; /* 0x3E, VT3216 and above only */ + volatile u8 RQETMR; /* 0x3F, VT3216 and above only */ + + volatile __le32 TDBaseLo[4]; /* 0x40 */ + + volatile __le16 RDCSize; /* 0x50 */ + volatile __le16 TDCSize; /* 0x52 */ + volatile __le16 TDIdx[4]; /* 0x54 */ + volatile __le16 tx_pause_timer; /* 0x5C */ + volatile __le16 RBRDU; /* 0x5E */ + + volatile __le32 FIFOTest0; /* 0x60 */ + volatile __le32 FIFOTest1; /* 0x64 */ + + volatile u8 CAMADDR; /* 0x68 */ + volatile u8 CAMCR; /* 0x69 */ + volatile u8 GFTEST; /* 0x6A */ + volatile u8 FTSTCMD; /* 0x6B */ + + volatile u8 MIICFG; /* 0x6C */ + volatile u8 MIISR; + volatile u8 PHYSR0; + volatile u8 PHYSR1; + volatile u8 MIICR; + volatile u8 MIIADR; + volatile __le16 MIIDATA; + + volatile __le16 SoftTimer0; /* 0x74 */ + volatile __le16 SoftTimer1; + + volatile u8 CFGA; /* 0x78 */ + volatile u8 CFGB; + volatile u8 CFGC; + volatile u8 CFGD; + + volatile __le16 DCFG; /* 0x7C */ + volatile __le16 MCFG; + + volatile u8 TBIST; /* 0x80 */ + volatile u8 RBIST; + volatile u8 PMCPORT; + volatile u8 STICKHW; + + volatile u8 MIBCR; /* 0x84 */ + volatile u8 reserved_85; + volatile u8 rev_id; + volatile u8 PORSTS; + + volatile __le32 MIBData; /* 0x88 */ + + volatile __le16 EEWrData; + + volatile u8 reserved_8E; + volatile u8 BPMDWr; + volatile u8 BPCMD; + volatile u8 BPMDRd; + + volatile u8 EECHKSUM; /* 0x92 */ + volatile u8 EECSR; + + volatile __le16 EERdData; /* 0x94 */ + volatile u8 EADDR; + volatile u8 EMBCMD; + + + volatile u8 JMPSR0; /* 0x98 */ + volatile u8 JMPSR1; + volatile u8 JMPSR2; + volatile u8 JMPSR3; + volatile u8 CHIPGSR; /* 0x9C */ + volatile u8 TESTCFG; + volatile u8 DEBUG; + volatile u8 CHIPGCR; + + volatile __le16 WOLCRSet; /* 0xA0 */ + volatile u8 PWCFGSet; + volatile u8 WOLCFGSet; + + volatile __le16 WOLCRClr; /* 0xA4 */ + volatile u8 PWCFGCLR; + volatile u8 WOLCFGClr; + + volatile __le16 WOLSRSet; /* 0xA8 */ + volatile __le16 reserved_AA; + + volatile __le16 WOLSRClr; /* 0xAC */ + volatile __le16 reserved_AE; + + volatile __le16 PatternCRC[8]; /* 0xB0 */ + volatile __le32 ByteMask[4][4]; /* 0xC0 */ +}; + + +enum hw_mib { + HW_MIB_ifRxAllPkts = 0, + HW_MIB_ifRxOkPkts, + HW_MIB_ifTxOkPkts, + HW_MIB_ifRxErrorPkts, + HW_MIB_ifRxRuntOkPkt, + HW_MIB_ifRxRuntErrPkt, + HW_MIB_ifRx64Pkts, + HW_MIB_ifTx64Pkts, + HW_MIB_ifRx65To127Pkts, + HW_MIB_ifTx65To127Pkts, + HW_MIB_ifRx128To255Pkts, + HW_MIB_ifTx128To255Pkts, + HW_MIB_ifRx256To511Pkts, + HW_MIB_ifTx256To511Pkts, + HW_MIB_ifRx512To1023Pkts, + HW_MIB_ifTx512To1023Pkts, + HW_MIB_ifRx1024To1518Pkts, + HW_MIB_ifTx1024To1518Pkts, + HW_MIB_ifTxEtherCollisions, + HW_MIB_ifRxPktCRCE, + HW_MIB_ifRxJumboPkts, + HW_MIB_ifTxJumboPkts, + HW_MIB_ifRxMacControlFrames, + HW_MIB_ifTxMacControlFrames, + HW_MIB_ifRxPktFAE, + HW_MIB_ifRxLongOkPkt, + HW_MIB_ifRxLongPktErrPkt, + HW_MIB_ifTXSQEErrors, + HW_MIB_ifRxNobuf, + HW_MIB_ifRxSymbolErrors, + HW_MIB_ifInRangeLengthErrors, + HW_MIB_ifLateCollisions, + HW_MIB_SIZE +}; + +enum chip_type { + CHIP_TYPE_VT6110 = 1, +}; + +struct velocity_info_tbl { + enum chip_type chip_id; + const char *name; + int txqueue; + u32 flags; +}; + +#define mac_hw_mibs_init(regs) {\ + BYTE_REG_BITS_ON(MIBCR_MIBFRZ,&((regs)->MIBCR));\ + BYTE_REG_BITS_ON(MIBCR_MIBCLR,&((regs)->MIBCR));\ + do {}\ + while (BYTE_REG_BITS_IS_ON(MIBCR_MIBCLR,&((regs)->MIBCR)));\ + BYTE_REG_BITS_OFF(MIBCR_MIBFRZ,&((regs)->MIBCR));\ +} + +#define mac_read_isr(regs) readl(&((regs)->ISR)) +#define mac_write_isr(regs, x) writel((x),&((regs)->ISR)) +#define mac_clear_isr(regs) writel(0xffffffffL,&((regs)->ISR)) + +#define mac_write_int_mask(mask, regs) writel((mask),&((regs)->IMR)); +#define mac_disable_int(regs) writel(CR0_GINTMSK1,&((regs)->CR0Clr)) +#define mac_enable_int(regs) writel(CR0_GINTMSK1,&((regs)->CR0Set)) + +#define mac_set_dma_length(regs, n) {\ + BYTE_REG_BITS_SET((n),0x07,&((regs)->DCFG));\ +} + +#define mac_set_rx_thresh(regs, n) {\ + BYTE_REG_BITS_SET((n),(MCFG_RFT0|MCFG_RFT1),&((regs)->MCFG));\ +} + +#define mac_rx_queue_run(regs) {\ + writeb(TRDCSR_RUN, &((regs)->RDCSRSet));\ +} + +#define mac_rx_queue_wake(regs) {\ + writeb(TRDCSR_WAK, &((regs)->RDCSRSet));\ +} + +#define mac_tx_queue_run(regs, n) {\ + writew(TRDCSR_RUN<<((n)*4),&((regs)->TDCSRSet));\ +} + +#define mac_tx_queue_wake(regs, n) {\ + writew(TRDCSR_WAK<<(n*4),&((regs)->TDCSRSet));\ +} + +static inline void mac_eeprom_reload(struct mac_regs __iomem * regs) { + int i=0; + + BYTE_REG_BITS_ON(EECSR_RELOAD,&(regs->EECSR)); + do { + udelay(10); + if (i++>0x1000) + break; + } while (BYTE_REG_BITS_IS_ON(EECSR_RELOAD,&(regs->EECSR))); +} + +/* + * Header for WOL definitions. Used to compute hashes + */ + +typedef u8 MCAM_ADDR[ETH_ALEN]; + +struct arp_packet { + u8 dest_mac[ETH_ALEN]; + u8 src_mac[ETH_ALEN]; + __be16 type; + __be16 ar_hrd; + __be16 ar_pro; + u8 ar_hln; + u8 ar_pln; + __be16 ar_op; + u8 ar_sha[ETH_ALEN]; + u8 ar_sip[4]; + u8 ar_tha[ETH_ALEN]; + u8 ar_tip[4]; +} __packed; + +struct _magic_packet { + u8 dest_mac[6]; + u8 src_mac[6]; + __be16 type; + u8 MAC[16][6]; + u8 password[6]; +} __packed; + +/* + * Store for chip context when saving and restoring status. Not + * all fields are saved/restored currently. + */ + +struct velocity_context { + u8 mac_reg[256]; + MCAM_ADDR cam_addr[MCAM_SIZE]; + u16 vcam[VCAM_SIZE]; + u32 cammask[2]; + u32 patcrc[2]; + u32 pattern[8]; +}; + +/* + * Registers in the MII (offset unit is WORD) + */ + +// Marvell 88E1000/88E1000S +#define MII_REG_PSCR 0x10 // PHY specific control register + +// +// Bits in the Silicon revision register +// + +#define TCSR_ECHODIS 0x2000 // +#define AUXCR_MDPPS 0x0004 // + +// Bits in the PLED register +#define PLED_LALBE 0x0004 // + +// Marvell 88E1000/88E1000S Bits in the PHY specific control register (10h) +#define PSCR_ACRSTX 0x0800 // Assert CRS on Transmit + +#define PHYID_CICADA_CS8201 0x000FC410UL +#define PHYID_VT3216_32BIT 0x000FC610UL +#define PHYID_VT3216_64BIT 0x000FC600UL +#define PHYID_MARVELL_1000 0x01410C50UL +#define PHYID_MARVELL_1000S 0x01410C40UL +#define PHYID_ICPLUS_IP101A 0x02430C54UL +#define PHYID_REV_ID_MASK 0x0000000FUL + +#define PHYID_GET_PHY_ID(i) ((i) & ~PHYID_REV_ID_MASK) + +#define MII_REG_BITS_ON(x,i,p) do {\ + u16 w;\ + velocity_mii_read((p),(i),&(w));\ + (w)|=(x);\ + velocity_mii_write((p),(i),(w));\ +} while (0) + +#define MII_REG_BITS_OFF(x,i,p) do {\ + u16 w;\ + velocity_mii_read((p),(i),&(w));\ + (w)&=(~(x));\ + velocity_mii_write((p),(i),(w));\ +} while (0) + +#define MII_REG_BITS_IS_ON(x,i,p) ({\ + u16 w;\ + velocity_mii_read((p),(i),&(w));\ + ((int) ((w) & (x)));}) + +#define MII_GET_PHY_ID(p) ({\ + u32 id;\ + velocity_mii_read((p),MII_PHYSID2,(u16 *) &id);\ + velocity_mii_read((p),MII_PHYSID1,((u16 *) &id)+1);\ + (id);}) + +/* + * Inline debug routine + */ + + +enum velocity_msg_level { + MSG_LEVEL_ERR = 0, //Errors that will cause abnormal operation. + MSG_LEVEL_NOTICE = 1, //Some errors need users to be notified. + MSG_LEVEL_INFO = 2, //Normal message. + MSG_LEVEL_VERBOSE = 3, //Will report all trival errors. + MSG_LEVEL_DEBUG = 4 //Only for debug purpose. +}; + +#ifdef VELOCITY_DEBUG +#define ASSERT(x) { \ + if (!(x)) { \ + printk(KERN_ERR "assertion %s failed: file %s line %d\n", #x,\ + __func__, __LINE__);\ + BUG(); \ + }\ +} +#define VELOCITY_DBG(p,args...) printk(p, ##args) +#else +#define ASSERT(x) +#define VELOCITY_DBG(x) +#endif + +#define VELOCITY_PRT(l, p, args...) do {if (l<=msglevel) printk( p ,##args);} while (0) + +#define VELOCITY_PRT_CAMMASK(p,t) {\ + int i;\ + if ((t)==VELOCITY_MULTICAST_CAM) {\ + for (i=0;i<(MCAM_SIZE/8);i++)\ + printk("%02X",(p)->mCAMmask[i]);\ + }\ + else {\ + for (i=0;i<(VCAM_SIZE/8);i++)\ + printk("%02X",(p)->vCAMmask[i]);\ + }\ + printk("\n");\ +} + + + +#define VELOCITY_WOL_MAGIC 0x00000000UL +#define VELOCITY_WOL_PHY 0x00000001UL +#define VELOCITY_WOL_ARP 0x00000002UL +#define VELOCITY_WOL_UCAST 0x00000004UL +#define VELOCITY_WOL_BCAST 0x00000010UL +#define VELOCITY_WOL_MCAST 0x00000020UL +#define VELOCITY_WOL_MAGIC_SEC 0x00000040UL + +/* + * Flags for options + */ + +#define VELOCITY_FLAGS_TAGGING 0x00000001UL +#define VELOCITY_FLAGS_RX_CSUM 0x00000004UL +#define VELOCITY_FLAGS_IP_ALIGN 0x00000008UL +#define VELOCITY_FLAGS_VAL_PKT_LEN 0x00000010UL + +#define VELOCITY_FLAGS_FLOW_CTRL 0x01000000UL + +/* + * Flags for driver status + */ + +#define VELOCITY_FLAGS_OPENED 0x00010000UL +#define VELOCITY_FLAGS_VMNS_CONNECTED 0x00020000UL +#define VELOCITY_FLAGS_VMNS_COMMITTED 0x00040000UL +#define VELOCITY_FLAGS_WOL_ENABLED 0x00080000UL + +/* + * Flags for MII status + */ + +#define VELOCITY_LINK_FAIL 0x00000001UL +#define VELOCITY_SPEED_10 0x00000002UL +#define VELOCITY_SPEED_100 0x00000004UL +#define VELOCITY_SPEED_1000 0x00000008UL +#define VELOCITY_DUPLEX_FULL 0x00000010UL +#define VELOCITY_AUTONEG_ENABLE 0x00000020UL +#define VELOCITY_FORCED_BY_EEPROM 0x00000040UL + +/* + * For velocity_set_media_duplex + */ + +#define VELOCITY_LINK_CHANGE 0x00000001UL + +enum speed_opt { + SPD_DPX_AUTO = 0, + SPD_DPX_100_HALF = 1, + SPD_DPX_100_FULL = 2, + SPD_DPX_10_HALF = 3, + SPD_DPX_10_FULL = 4, + SPD_DPX_1000_FULL = 5 +}; + +enum velocity_init_type { + VELOCITY_INIT_COLD = 0, + VELOCITY_INIT_RESET, + VELOCITY_INIT_WOL +}; + +enum velocity_flow_cntl_type { + FLOW_CNTL_DEFAULT = 1, + FLOW_CNTL_TX, + FLOW_CNTL_RX, + FLOW_CNTL_TX_RX, + FLOW_CNTL_DISABLE, +}; + +struct velocity_opt { + int numrx; /* Number of RX descriptors */ + int numtx; /* Number of TX descriptors */ + enum speed_opt spd_dpx; /* Media link mode */ + + int DMA_length; /* DMA length */ + int rx_thresh; /* RX_THRESH */ + int flow_cntl; + int wol_opts; /* Wake on lan options */ + int td_int_count; + int int_works; + int rx_bandwidth_hi; + int rx_bandwidth_lo; + int rx_bandwidth_en; + int rxqueue_timer; + int txqueue_timer; + int tx_intsup; + int rx_intsup; + u32 flags; +}; + +#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->tx.used[(q)])) + +#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx]) + +struct velocity_info { + struct device *dev; + struct pci_dev *pdev; + struct net_device *netdev; + int no_eeprom; + + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u8 ip_addr[4]; + enum chip_type chip_id; + + struct mac_regs __iomem * mac_regs; + unsigned long memaddr; + unsigned long ioaddr; + + struct tx_info { + int numq; + + /* FIXME: the locality of the data seems rather poor. */ + int used[TX_QUEUE_NO]; + int curr[TX_QUEUE_NO]; + int tail[TX_QUEUE_NO]; + struct tx_desc *rings[TX_QUEUE_NO]; + struct velocity_td_info *infos[TX_QUEUE_NO]; + dma_addr_t pool_dma[TX_QUEUE_NO]; + } tx; + + struct rx_info { + int buf_sz; + + int dirty; + int curr; + u32 filled; + struct rx_desc *ring; + struct velocity_rd_info *info; /* It's an array */ + dma_addr_t pool_dma; + } rx; + + u32 mib_counter[MAX_HW_MIB_COUNTER]; + struct velocity_opt options; + + u32 int_mask; + + u32 flags; + + u32 mii_status; + u32 phy_id; + int multicast_limit; + + u8 vCAMmask[(VCAM_SIZE / 8)]; + u8 mCAMmask[(MCAM_SIZE / 8)]; + + spinlock_t lock; + + int wol_opts; + u8 wol_passwd[6]; + + struct velocity_context context; + + u32 ticks; + + u8 rev_id; + + struct napi_struct napi; +}; + +/** + * velocity_get_ip - find an IP address for the device + * @vptr: Velocity to query + * + * Dig out an IP address for this interface so that we can + * configure wakeup with WOL for ARP. If there are multiple IP + * addresses on this chain then we use the first - multi-IP WOL is not + * supported. + * + */ + +static inline int velocity_get_ip(struct velocity_info *vptr) +{ + struct in_device *in_dev; + struct in_ifaddr *ifa; + int res = -ENOENT; + + rcu_read_lock(); + in_dev = __in_dev_get_rcu(vptr->netdev); + if (in_dev != NULL) { + ifa = (struct in_ifaddr *) in_dev->ifa_list; + if (ifa != NULL) { + memcpy(vptr->ip_addr, &ifa->ifa_address, 4); + res = 0; + } + } + rcu_read_unlock(); + return res; +} + +/** + * velocity_update_hw_mibs - fetch MIB counters from chip + * @vptr: velocity to update + * + * The velocity hardware keeps certain counters in the hardware + * side. We need to read these when the user asks for statistics + * or when they overflow (causing an interrupt). The read of the + * statistic clears it, so we keep running master counters in user + * space. + */ + +static inline void velocity_update_hw_mibs(struct velocity_info *vptr) +{ + u32 tmp; + int i; + BYTE_REG_BITS_ON(MIBCR_MIBFLSH, &(vptr->mac_regs->MIBCR)); + + while (BYTE_REG_BITS_IS_ON(MIBCR_MIBFLSH, &(vptr->mac_regs->MIBCR))); + + BYTE_REG_BITS_ON(MIBCR_MPTRINI, &(vptr->mac_regs->MIBCR)); + for (i = 0; i < HW_MIB_SIZE; i++) { + tmp = readl(&(vptr->mac_regs->MIBData)) & 0x00FFFFFFUL; + vptr->mib_counter[i] += tmp; + } +} + +/** + * init_flow_control_register - set up flow control + * @vptr: velocity to configure + * + * Configure the flow control registers for this velocity device. + */ + +static inline void init_flow_control_register(struct velocity_info *vptr) +{ + struct mac_regs __iomem * regs = vptr->mac_regs; + + /* Set {XHITH1, XHITH0, XLTH1, XLTH0} in FlowCR1 to {1, 0, 1, 1} + depend on RD=64, and Turn on XNOEN in FlowCR1 */ + writel((CR0_XONEN | CR0_XHITH1 | CR0_XLTH1 | CR0_XLTH0), ®s->CR0Set); + writel((CR0_FDXTFCEN | CR0_FDXRFCEN | CR0_HDXFCEN | CR0_XHITH0), ®s->CR0Clr); + + /* Set TxPauseTimer to 0xFFFF */ + writew(0xFFFF, ®s->tx_pause_timer); + + /* Initialize RBRDU to Rx buffer count. */ + writew(vptr->options.numrx, ®s->RBRDU); +} + + +#endif diff --git a/drivers/net/ethernet/wiznet/Kconfig b/drivers/net/ethernet/wiznet/Kconfig new file mode 100644 index 000000000..b4d281626 --- /dev/null +++ b/drivers/net/ethernet/wiznet/Kconfig @@ -0,0 +1,74 @@ +# +# WIZnet devices configuration +# + +config NET_VENDOR_WIZNET + bool "WIZnet devices" + depends on HAS_IOMEM + default y + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about WIZnet devices. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_WIZNET + +config WIZNET_W5100 + tristate "WIZnet W5100 Ethernet support" + depends on HAS_IOMEM + ---help--- + Support for WIZnet W5100 chips. + + W5100 is a single chip with integrated 10/100 Ethernet MAC, + PHY and hardware TCP/IP stack, but this driver is limited to + the MAC and PHY functions only, onchip TCP/IP is unused. + + To compile this driver as a module, choose M here: the module + will be called w5100. + +config WIZNET_W5300 + tristate "WIZnet W5300 Ethernet support" + depends on HAS_IOMEM + ---help--- + Support for WIZnet W5300 chips. + + W5300 is a single chip with integrated 10/100 Ethernet MAC, + PHY and hardware TCP/IP stack, but this driver is limited to + the MAC and PHY functions only, onchip TCP/IP is unused. + + To compile this driver as a module, choose M here: the module + will be called w5300. + +choice + prompt "WIZnet interface mode" + depends on WIZNET_W5100 || WIZNET_W5300 + default WIZNET_BUS_ANY + +config WIZNET_BUS_DIRECT + bool "Direct address bus mode" + ---help--- + In direct address mode host system can directly access all registers + after mapping to Memory-Mapped I/O space. + +config WIZNET_BUS_INDIRECT + bool "Indirect address bus mode" + ---help--- + In indirect address mode host system indirectly accesses registers + using Indirect Mode Address Register and Indirect Mode Data Register, + which are directly mapped to Memory-Mapped I/O space. + +config WIZNET_BUS_ANY + bool "Select interface mode in runtime" + ---help--- + If interface mode is unknown in compile time, it can be selected + in runtime from board/platform resources configuration. + + Performance may decrease compared to explicitly selected bus mode. +endchoice + +endif # NET_VENDOR_WIZNET diff --git a/drivers/net/ethernet/wiznet/Makefile b/drivers/net/ethernet/wiznet/Makefile new file mode 100644 index 000000000..c61453522 --- /dev/null +++ b/drivers/net/ethernet/wiznet/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_WIZNET_W5100) += w5100.o +obj-$(CONFIG_WIZNET_W5300) += w5300.o diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c new file mode 100644 index 000000000..8b282d0b1 --- /dev/null +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -0,0 +1,797 @@ +/* + * Ethernet driver for the WIZnet W5100 chip. + * + * Copyright (C) 2006-2008 WIZnet Co.,Ltd. + * Copyright (C) 2012 Mike Sinkovsky + * + * Licensed under the GPL-2 or later. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "w5100" +#define DRV_VERSION "2012-04-04" + +MODULE_DESCRIPTION("WIZnet W5100 Ethernet driver v"DRV_VERSION); +MODULE_AUTHOR("Mike Sinkovsky "); +MODULE_ALIAS("platform:"DRV_NAME); +MODULE_LICENSE("GPL"); + +/* + * Registers + */ +#define W5100_COMMON_REGS 0x0000 +#define W5100_MR 0x0000 /* Mode Register */ +#define MR_RST 0x80 /* S/W reset */ +#define MR_PB 0x10 /* Ping block */ +#define MR_AI 0x02 /* Address Auto-Increment */ +#define MR_IND 0x01 /* Indirect mode */ +#define W5100_SHAR 0x0009 /* Source MAC address */ +#define W5100_IR 0x0015 /* Interrupt Register */ +#define W5100_IMR 0x0016 /* Interrupt Mask Register */ +#define IR_S0 0x01 /* S0 interrupt */ +#define W5100_RTR 0x0017 /* Retry Time-value Register */ +#define RTR_DEFAULT 2000 /* =0x07d0 (2000) */ +#define W5100_RMSR 0x001a /* Receive Memory Size */ +#define W5100_TMSR 0x001b /* Transmit Memory Size */ +#define W5100_COMMON_REGS_LEN 0x0040 + +#define W5100_S0_REGS 0x0400 +#define W5100_S0_MR 0x0400 /* S0 Mode Register */ +#define S0_MR_MACRAW 0x04 /* MAC RAW mode (promiscuous) */ +#define S0_MR_MACRAW_MF 0x44 /* MAC RAW mode (filtered) */ +#define W5100_S0_CR 0x0401 /* S0 Command Register */ +#define S0_CR_OPEN 0x01 /* OPEN command */ +#define S0_CR_CLOSE 0x10 /* CLOSE command */ +#define S0_CR_SEND 0x20 /* SEND command */ +#define S0_CR_RECV 0x40 /* RECV command */ +#define W5100_S0_IR 0x0402 /* S0 Interrupt Register */ +#define S0_IR_SENDOK 0x10 /* complete sending */ +#define S0_IR_RECV 0x04 /* receiving data */ +#define W5100_S0_SR 0x0403 /* S0 Status Register */ +#define S0_SR_MACRAW 0x42 /* mac raw mode */ +#define W5100_S0_TX_FSR 0x0420 /* S0 Transmit free memory size */ +#define W5100_S0_TX_RD 0x0422 /* S0 Transmit memory read pointer */ +#define W5100_S0_TX_WR 0x0424 /* S0 Transmit memory write pointer */ +#define W5100_S0_RX_RSR 0x0426 /* S0 Receive free memory size */ +#define W5100_S0_RX_RD 0x0428 /* S0 Receive memory read pointer */ +#define W5100_S0_REGS_LEN 0x0040 + +#define W5100_TX_MEM_START 0x4000 +#define W5100_TX_MEM_END 0x5fff +#define W5100_TX_MEM_MASK 0x1fff +#define W5100_RX_MEM_START 0x6000 +#define W5100_RX_MEM_END 0x7fff +#define W5100_RX_MEM_MASK 0x1fff + +/* + * Device driver private data structure + */ +struct w5100_priv { + void __iomem *base; + spinlock_t reg_lock; + bool indirect; + u8 (*read)(struct w5100_priv *priv, u16 addr); + void (*write)(struct w5100_priv *priv, u16 addr, u8 data); + u16 (*read16)(struct w5100_priv *priv, u16 addr); + void (*write16)(struct w5100_priv *priv, u16 addr, u16 data); + void (*readbuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len); + void (*writebuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len); + int irq; + int link_irq; + int link_gpio; + + struct napi_struct napi; + struct net_device *ndev; + bool promisc; + u32 msg_enable; +}; + +/************************************************************************ + * + * Lowlevel I/O functions + * + ***********************************************************************/ + +/* + * In direct address mode host system can directly access W5100 registers + * after mapping to Memory-Mapped I/O space. + * + * 0x8000 bytes are required for memory space. + */ +static inline u8 w5100_read_direct(struct w5100_priv *priv, u16 addr) +{ + return ioread8(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT)); +} + +static inline void w5100_write_direct(struct w5100_priv *priv, + u16 addr, u8 data) +{ + iowrite8(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT)); +} + +static u16 w5100_read16_direct(struct w5100_priv *priv, u16 addr) +{ + u16 data; + data = w5100_read_direct(priv, addr) << 8; + data |= w5100_read_direct(priv, addr + 1); + return data; +} + +static void w5100_write16_direct(struct w5100_priv *priv, u16 addr, u16 data) +{ + w5100_write_direct(priv, addr, data >> 8); + w5100_write_direct(priv, addr + 1, data); +} + +static void w5100_readbuf_direct(struct w5100_priv *priv, + u16 offset, u8 *buf, int len) +{ + u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK); + int i; + + for (i = 0; i < len; i++, addr++) { + if (unlikely(addr > W5100_RX_MEM_END)) + addr = W5100_RX_MEM_START; + *buf++ = w5100_read_direct(priv, addr); + } +} + +static void w5100_writebuf_direct(struct w5100_priv *priv, + u16 offset, u8 *buf, int len) +{ + u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK); + int i; + + for (i = 0; i < len; i++, addr++) { + if (unlikely(addr > W5100_TX_MEM_END)) + addr = W5100_TX_MEM_START; + w5100_write_direct(priv, addr, *buf++); + } +} + +/* + * In indirect address mode host system indirectly accesses registers by + * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data + * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space. + * Mode Register (MR) is directly accessible. + * + * Only 0x04 bytes are required for memory space. + */ +#define W5100_IDM_AR 0x01 /* Indirect Mode Address Register */ +#define W5100_IDM_DR 0x03 /* Indirect Mode Data Register */ + +static u8 w5100_read_indirect(struct w5100_priv *priv, u16 addr) +{ + unsigned long flags; + u8 data; + + spin_lock_irqsave(&priv->reg_lock, flags); + w5100_write16_direct(priv, W5100_IDM_AR, addr); + mmiowb(); + data = w5100_read_direct(priv, W5100_IDM_DR); + spin_unlock_irqrestore(&priv->reg_lock, flags); + + return data; +} + +static void w5100_write_indirect(struct w5100_priv *priv, u16 addr, u8 data) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->reg_lock, flags); + w5100_write16_direct(priv, W5100_IDM_AR, addr); + mmiowb(); + w5100_write_direct(priv, W5100_IDM_DR, data); + mmiowb(); + spin_unlock_irqrestore(&priv->reg_lock, flags); +} + +static u16 w5100_read16_indirect(struct w5100_priv *priv, u16 addr) +{ + unsigned long flags; + u16 data; + + spin_lock_irqsave(&priv->reg_lock, flags); + w5100_write16_direct(priv, W5100_IDM_AR, addr); + mmiowb(); + data = w5100_read_direct(priv, W5100_IDM_DR) << 8; + data |= w5100_read_direct(priv, W5100_IDM_DR); + spin_unlock_irqrestore(&priv->reg_lock, flags); + + return data; +} + +static void w5100_write16_indirect(struct w5100_priv *priv, u16 addr, u16 data) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->reg_lock, flags); + w5100_write16_direct(priv, W5100_IDM_AR, addr); + mmiowb(); + w5100_write_direct(priv, W5100_IDM_DR, data >> 8); + w5100_write_direct(priv, W5100_IDM_DR, data); + mmiowb(); + spin_unlock_irqrestore(&priv->reg_lock, flags); +} + +static void w5100_readbuf_indirect(struct w5100_priv *priv, + u16 offset, u8 *buf, int len) +{ + u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK); + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->reg_lock, flags); + w5100_write16_direct(priv, W5100_IDM_AR, addr); + mmiowb(); + + for (i = 0; i < len; i++, addr++) { + if (unlikely(addr > W5100_RX_MEM_END)) { + addr = W5100_RX_MEM_START; + w5100_write16_direct(priv, W5100_IDM_AR, addr); + mmiowb(); + } + *buf++ = w5100_read_direct(priv, W5100_IDM_DR); + } + mmiowb(); + spin_unlock_irqrestore(&priv->reg_lock, flags); +} + +static void w5100_writebuf_indirect(struct w5100_priv *priv, + u16 offset, u8 *buf, int len) +{ + u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK); + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->reg_lock, flags); + w5100_write16_direct(priv, W5100_IDM_AR, addr); + mmiowb(); + + for (i = 0; i < len; i++, addr++) { + if (unlikely(addr > W5100_TX_MEM_END)) { + addr = W5100_TX_MEM_START; + w5100_write16_direct(priv, W5100_IDM_AR, addr); + mmiowb(); + } + w5100_write_direct(priv, W5100_IDM_DR, *buf++); + } + mmiowb(); + spin_unlock_irqrestore(&priv->reg_lock, flags); +} + +#if defined(CONFIG_WIZNET_BUS_DIRECT) +#define w5100_read w5100_read_direct +#define w5100_write w5100_write_direct +#define w5100_read16 w5100_read16_direct +#define w5100_write16 w5100_write16_direct +#define w5100_readbuf w5100_readbuf_direct +#define w5100_writebuf w5100_writebuf_direct + +#elif defined(CONFIG_WIZNET_BUS_INDIRECT) +#define w5100_read w5100_read_indirect +#define w5100_write w5100_write_indirect +#define w5100_read16 w5100_read16_indirect +#define w5100_write16 w5100_write16_indirect +#define w5100_readbuf w5100_readbuf_indirect +#define w5100_writebuf w5100_writebuf_indirect + +#else /* CONFIG_WIZNET_BUS_ANY */ +#define w5100_read priv->read +#define w5100_write priv->write +#define w5100_read16 priv->read16 +#define w5100_write16 priv->write16 +#define w5100_readbuf priv->readbuf +#define w5100_writebuf priv->writebuf +#endif + +static int w5100_command(struct w5100_priv *priv, u16 cmd) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(100); + + w5100_write(priv, W5100_S0_CR, cmd); + mmiowb(); + + while (w5100_read(priv, W5100_S0_CR) != 0) { + if (time_after(jiffies, timeout)) + return -EIO; + cpu_relax(); + } + + return 0; +} + +static void w5100_write_macaddr(struct w5100_priv *priv) +{ + struct net_device *ndev = priv->ndev; + int i; + + for (i = 0; i < ETH_ALEN; i++) + w5100_write(priv, W5100_SHAR + i, ndev->dev_addr[i]); + mmiowb(); +} + +static void w5100_hw_reset(struct w5100_priv *priv) +{ + w5100_write_direct(priv, W5100_MR, MR_RST); + mmiowb(); + mdelay(5); + w5100_write_direct(priv, W5100_MR, priv->indirect ? + MR_PB | MR_AI | MR_IND : + MR_PB); + mmiowb(); + w5100_write(priv, W5100_IMR, 0); + w5100_write_macaddr(priv); + + /* Configure 16K of internal memory + * as 8K RX buffer and 8K TX buffer + */ + w5100_write(priv, W5100_RMSR, 0x03); + w5100_write(priv, W5100_TMSR, 0x03); + mmiowb(); +} + +static void w5100_hw_start(struct w5100_priv *priv) +{ + w5100_write(priv, W5100_S0_MR, priv->promisc ? + S0_MR_MACRAW : S0_MR_MACRAW_MF); + mmiowb(); + w5100_command(priv, S0_CR_OPEN); + w5100_write(priv, W5100_IMR, IR_S0); + mmiowb(); +} + +static void w5100_hw_close(struct w5100_priv *priv) +{ + w5100_write(priv, W5100_IMR, 0); + mmiowb(); + w5100_command(priv, S0_CR_CLOSE); +} + +/*********************************************************************** + * + * Device driver functions / callbacks + * + ***********************************************************************/ + +static void w5100_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(ndev->dev.parent), + sizeof(info->bus_info)); +} + +static u32 w5100_get_link(struct net_device *ndev) +{ + struct w5100_priv *priv = netdev_priv(ndev); + + if (gpio_is_valid(priv->link_gpio)) + return !!gpio_get_value(priv->link_gpio); + + return 1; +} + +static u32 w5100_get_msglevel(struct net_device *ndev) +{ + struct w5100_priv *priv = netdev_priv(ndev); + + return priv->msg_enable; +} + +static void w5100_set_msglevel(struct net_device *ndev, u32 value) +{ + struct w5100_priv *priv = netdev_priv(ndev); + + priv->msg_enable = value; +} + +static int w5100_get_regs_len(struct net_device *ndev) +{ + return W5100_COMMON_REGS_LEN + W5100_S0_REGS_LEN; +} + +static void w5100_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, void *_buf) +{ + struct w5100_priv *priv = netdev_priv(ndev); + u8 *buf = _buf; + u16 i; + + regs->version = 1; + for (i = 0; i < W5100_COMMON_REGS_LEN; i++) + *buf++ = w5100_read(priv, W5100_COMMON_REGS + i); + for (i = 0; i < W5100_S0_REGS_LEN; i++) + *buf++ = w5100_read(priv, W5100_S0_REGS + i); +} + +static void w5100_tx_timeout(struct net_device *ndev) +{ + struct w5100_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + w5100_hw_reset(priv); + w5100_hw_start(priv); + ndev->stats.tx_errors++; + ndev->trans_start = jiffies; + netif_wake_queue(ndev); +} + +static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev) +{ + struct w5100_priv *priv = netdev_priv(ndev); + u16 offset; + + netif_stop_queue(ndev); + + offset = w5100_read16(priv, W5100_S0_TX_WR); + w5100_writebuf(priv, offset, skb->data, skb->len); + w5100_write16(priv, W5100_S0_TX_WR, offset + skb->len); + mmiowb(); + ndev->stats.tx_bytes += skb->len; + ndev->stats.tx_packets++; + dev_kfree_skb(skb); + + w5100_command(priv, S0_CR_SEND); + + return NETDEV_TX_OK; +} + +static int w5100_napi_poll(struct napi_struct *napi, int budget) +{ + struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi); + struct net_device *ndev = priv->ndev; + struct sk_buff *skb; + int rx_count; + u16 rx_len; + u16 offset; + u8 header[2]; + + for (rx_count = 0; rx_count < budget; rx_count++) { + u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR); + if (rx_buf_len == 0) + break; + + offset = w5100_read16(priv, W5100_S0_RX_RD); + w5100_readbuf(priv, offset, header, 2); + rx_len = get_unaligned_be16(header) - 2; + + skb = netdev_alloc_skb_ip_align(ndev, rx_len); + if (unlikely(!skb)) { + w5100_write16(priv, W5100_S0_RX_RD, + offset + rx_buf_len); + w5100_command(priv, S0_CR_RECV); + ndev->stats.rx_dropped++; + return -ENOMEM; + } + + skb_put(skb, rx_len); + w5100_readbuf(priv, offset + 2, skb->data, rx_len); + w5100_write16(priv, W5100_S0_RX_RD, offset + 2 + rx_len); + mmiowb(); + w5100_command(priv, S0_CR_RECV); + skb->protocol = eth_type_trans(skb, ndev); + + netif_receive_skb(skb); + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += rx_len; + } + + if (rx_count < budget) { + napi_complete(napi); + w5100_write(priv, W5100_IMR, IR_S0); + mmiowb(); + } + + return rx_count; +} + +static irqreturn_t w5100_interrupt(int irq, void *ndev_instance) +{ + struct net_device *ndev = ndev_instance; + struct w5100_priv *priv = netdev_priv(ndev); + + int ir = w5100_read(priv, W5100_S0_IR); + if (!ir) + return IRQ_NONE; + w5100_write(priv, W5100_S0_IR, ir); + mmiowb(); + + if (ir & S0_IR_SENDOK) { + netif_dbg(priv, tx_done, ndev, "tx done\n"); + netif_wake_queue(ndev); + } + + if (ir & S0_IR_RECV) { + if (napi_schedule_prep(&priv->napi)) { + w5100_write(priv, W5100_IMR, 0); + mmiowb(); + __napi_schedule(&priv->napi); + } + } + + return IRQ_HANDLED; +} + +static irqreturn_t w5100_detect_link(int irq, void *ndev_instance) +{ + struct net_device *ndev = ndev_instance; + struct w5100_priv *priv = netdev_priv(ndev); + + if (netif_running(ndev)) { + if (gpio_get_value(priv->link_gpio) != 0) { + netif_info(priv, link, ndev, "link is up\n"); + netif_carrier_on(ndev); + } else { + netif_info(priv, link, ndev, "link is down\n"); + netif_carrier_off(ndev); + } + } + + return IRQ_HANDLED; +} + +static void w5100_set_rx_mode(struct net_device *ndev) +{ + struct w5100_priv *priv = netdev_priv(ndev); + bool set_promisc = (ndev->flags & IFF_PROMISC) != 0; + + if (priv->promisc != set_promisc) { + priv->promisc = set_promisc; + w5100_hw_start(priv); + } +} + +static int w5100_set_macaddr(struct net_device *ndev, void *addr) +{ + struct w5100_priv *priv = netdev_priv(ndev); + struct sockaddr *sock_addr = addr; + + if (!is_valid_ether_addr(sock_addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN); + w5100_write_macaddr(priv); + return 0; +} + +static int w5100_open(struct net_device *ndev) +{ + struct w5100_priv *priv = netdev_priv(ndev); + + netif_info(priv, ifup, ndev, "enabling\n"); + w5100_hw_start(priv); + napi_enable(&priv->napi); + netif_start_queue(ndev); + if (!gpio_is_valid(priv->link_gpio) || + gpio_get_value(priv->link_gpio) != 0) + netif_carrier_on(ndev); + return 0; +} + +static int w5100_stop(struct net_device *ndev) +{ + struct w5100_priv *priv = netdev_priv(ndev); + + netif_info(priv, ifdown, ndev, "shutting down\n"); + w5100_hw_close(priv); + netif_carrier_off(ndev); + netif_stop_queue(ndev); + napi_disable(&priv->napi); + return 0; +} + +static const struct ethtool_ops w5100_ethtool_ops = { + .get_drvinfo = w5100_get_drvinfo, + .get_msglevel = w5100_get_msglevel, + .set_msglevel = w5100_set_msglevel, + .get_link = w5100_get_link, + .get_regs_len = w5100_get_regs_len, + .get_regs = w5100_get_regs, +}; + +static const struct net_device_ops w5100_netdev_ops = { + .ndo_open = w5100_open, + .ndo_stop = w5100_stop, + .ndo_start_xmit = w5100_start_tx, + .ndo_tx_timeout = w5100_tx_timeout, + .ndo_set_rx_mode = w5100_set_rx_mode, + .ndo_set_mac_address = w5100_set_macaddr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static int w5100_hw_probe(struct platform_device *pdev) +{ + struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev); + struct net_device *ndev = platform_get_drvdata(pdev); + struct w5100_priv *priv = netdev_priv(ndev); + const char *name = netdev_name(ndev); + struct resource *mem; + int mem_size; + int irq; + int ret; + + if (data && is_valid_ether_addr(data->mac_addr)) { + memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); + } else { + eth_hw_addr_random(ndev); + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + mem_size = resource_size(mem); + + spin_lock_init(&priv->reg_lock); + priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE; + if (priv->indirect) { + priv->read = w5100_read_indirect; + priv->write = w5100_write_indirect; + priv->read16 = w5100_read16_indirect; + priv->write16 = w5100_write16_indirect; + priv->readbuf = w5100_readbuf_indirect; + priv->writebuf = w5100_writebuf_indirect; + } else { + priv->read = w5100_read_direct; + priv->write = w5100_write_direct; + priv->read16 = w5100_read16_direct; + priv->write16 = w5100_write16_direct; + priv->readbuf = w5100_readbuf_direct; + priv->writebuf = w5100_writebuf_direct; + } + + w5100_hw_reset(priv); + if (w5100_read16(priv, W5100_RTR) != RTR_DEFAULT) + return -ENODEV; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + ret = request_irq(irq, w5100_interrupt, + IRQ_TYPE_LEVEL_LOW, name, ndev); + if (ret < 0) + return ret; + priv->irq = irq; + + priv->link_gpio = data ? data->link_gpio : -EINVAL; + if (gpio_is_valid(priv->link_gpio)) { + char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL); + if (!link_name) + return -ENOMEM; + snprintf(link_name, 16, "%s-link", name); + priv->link_irq = gpio_to_irq(priv->link_gpio); + if (request_any_context_irq(priv->link_irq, w5100_detect_link, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + link_name, priv->ndev) < 0) + priv->link_gpio = -EINVAL; + } + + netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq); + return 0; +} + +static int w5100_probe(struct platform_device *pdev) +{ + struct w5100_priv *priv; + struct net_device *ndev; + int err; + + ndev = alloc_etherdev(sizeof(*priv)); + if (!ndev) + return -ENOMEM; + SET_NETDEV_DEV(ndev, &pdev->dev); + platform_set_drvdata(pdev, ndev); + priv = netdev_priv(ndev); + priv->ndev = ndev; + + ndev->netdev_ops = &w5100_netdev_ops; + ndev->ethtool_ops = &w5100_ethtool_ops; + ndev->watchdog_timeo = HZ; + netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16); + + /* This chip doesn't support VLAN packets with normal MTU, + * so disable VLAN for this device. + */ + ndev->features |= NETIF_F_VLAN_CHALLENGED; + + err = register_netdev(ndev); + if (err < 0) + goto err_register; + + err = w5100_hw_probe(pdev); + if (err < 0) + goto err_hw_probe; + + return 0; + +err_hw_probe: + unregister_netdev(ndev); +err_register: + free_netdev(ndev); + return err; +} + +static int w5100_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct w5100_priv *priv = netdev_priv(ndev); + + w5100_hw_reset(priv); + free_irq(priv->irq, ndev); + if (gpio_is_valid(priv->link_gpio)) + free_irq(priv->link_irq, ndev); + + unregister_netdev(ndev); + free_netdev(ndev); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int w5100_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = platform_get_drvdata(pdev); + struct w5100_priv *priv = netdev_priv(ndev); + + if (netif_running(ndev)) { + netif_carrier_off(ndev); + netif_device_detach(ndev); + + w5100_hw_close(priv); + } + return 0; +} + +static int w5100_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = platform_get_drvdata(pdev); + struct w5100_priv *priv = netdev_priv(ndev); + + if (netif_running(ndev)) { + w5100_hw_reset(priv); + w5100_hw_start(priv); + + netif_device_attach(ndev); + if (!gpio_is_valid(priv->link_gpio) || + gpio_get_value(priv->link_gpio) != 0) + netif_carrier_on(ndev); + } + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume); + +static struct platform_driver w5100_driver = { + .driver = { + .name = DRV_NAME, + .pm = &w5100_pm_ops, + }, + .probe = w5100_probe, + .remove = w5100_remove, +}; + +module_platform_driver(w5100_driver); diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c new file mode 100644 index 000000000..8da7b930f --- /dev/null +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -0,0 +1,709 @@ +/* + * Ethernet driver for the WIZnet W5300 chip. + * + * Copyright (C) 2008-2009 WIZnet Co.,Ltd. + * Copyright (C) 2011 Taehun Kim gmail.com> + * Copyright (C) 2012 Mike Sinkovsky + * + * Licensed under the GPL-2 or later. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "w5300" +#define DRV_VERSION "2012-04-04" + +MODULE_DESCRIPTION("WIZnet W5300 Ethernet driver v"DRV_VERSION); +MODULE_AUTHOR("Mike Sinkovsky "); +MODULE_ALIAS("platform:"DRV_NAME); +MODULE_LICENSE("GPL"); + +/* + * Registers + */ +#define W5300_MR 0x0000 /* Mode Register */ +#define MR_DBW (1 << 15) /* Data bus width */ +#define MR_MPF (1 << 14) /* Mac layer pause frame */ +#define MR_WDF(n) (((n)&7)<<11) /* Write data fetch time */ +#define MR_RDH (1 << 10) /* Read data hold time */ +#define MR_FS (1 << 8) /* FIFO swap */ +#define MR_RST (1 << 7) /* S/W reset */ +#define MR_PB (1 << 4) /* Ping block */ +#define MR_DBS (1 << 2) /* Data bus swap */ +#define MR_IND (1 << 0) /* Indirect mode */ +#define W5300_IR 0x0002 /* Interrupt Register */ +#define W5300_IMR 0x0004 /* Interrupt Mask Register */ +#define IR_S0 0x0001 /* S0 interrupt */ +#define W5300_SHARL 0x0008 /* Source MAC address (0123) */ +#define W5300_SHARH 0x000c /* Source MAC address (45) */ +#define W5300_TMSRL 0x0020 /* Transmit Memory Size (0123) */ +#define W5300_TMSRH 0x0024 /* Transmit Memory Size (4567) */ +#define W5300_RMSRL 0x0028 /* Receive Memory Size (0123) */ +#define W5300_RMSRH 0x002c /* Receive Memory Size (4567) */ +#define W5300_MTYPE 0x0030 /* Memory Type */ +#define W5300_IDR 0x00fe /* Chip ID register */ +#define IDR_W5300 0x5300 /* =0x5300 for WIZnet W5300 */ +#define W5300_S0_MR 0x0200 /* S0 Mode Register */ +#define S0_MR_CLOSED 0x0000 /* Close mode */ +#define S0_MR_MACRAW 0x0004 /* MAC RAW mode (promiscuous) */ +#define S0_MR_MACRAW_MF 0x0044 /* MAC RAW mode (filtered) */ +#define W5300_S0_CR 0x0202 /* S0 Command Register */ +#define S0_CR_OPEN 0x0001 /* OPEN command */ +#define S0_CR_CLOSE 0x0010 /* CLOSE command */ +#define S0_CR_SEND 0x0020 /* SEND command */ +#define S0_CR_RECV 0x0040 /* RECV command */ +#define W5300_S0_IMR 0x0204 /* S0 Interrupt Mask Register */ +#define W5300_S0_IR 0x0206 /* S0 Interrupt Register */ +#define S0_IR_RECV 0x0004 /* Receive interrupt */ +#define S0_IR_SENDOK 0x0010 /* Send OK interrupt */ +#define W5300_S0_SSR 0x0208 /* S0 Socket Status Register */ +#define W5300_S0_TX_WRSR 0x0220 /* S0 TX Write Size Register */ +#define W5300_S0_TX_FSR 0x0224 /* S0 TX Free Size Register */ +#define W5300_S0_RX_RSR 0x0228 /* S0 Received data Size */ +#define W5300_S0_TX_FIFO 0x022e /* S0 Transmit FIFO */ +#define W5300_S0_RX_FIFO 0x0230 /* S0 Receive FIFO */ +#define W5300_REGS_LEN 0x0400 + +/* + * Device driver private data structure + */ +struct w5300_priv { + void __iomem *base; + spinlock_t reg_lock; + bool indirect; + u16 (*read) (struct w5300_priv *priv, u16 addr); + void (*write)(struct w5300_priv *priv, u16 addr, u16 data); + int irq; + int link_irq; + int link_gpio; + + struct napi_struct napi; + struct net_device *ndev; + bool promisc; + u32 msg_enable; +}; + +/************************************************************************ + * + * Lowlevel I/O functions + * + ***********************************************************************/ + +/* + * In direct address mode host system can directly access W5300 registers + * after mapping to Memory-Mapped I/O space. + * + * 0x400 bytes are required for memory space. + */ +static inline u16 w5300_read_direct(struct w5300_priv *priv, u16 addr) +{ + return ioread16(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT)); +} + +static inline void w5300_write_direct(struct w5300_priv *priv, + u16 addr, u16 data) +{ + iowrite16(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT)); +} + +/* + * In indirect address mode host system indirectly accesses registers by + * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data + * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space. + * Mode Register (MR) is directly accessible. + * + * Only 0x06 bytes are required for memory space. + */ +#define W5300_IDM_AR 0x0002 /* Indirect Mode Address */ +#define W5300_IDM_DR 0x0004 /* Indirect Mode Data */ + +static u16 w5300_read_indirect(struct w5300_priv *priv, u16 addr) +{ + unsigned long flags; + u16 data; + + spin_lock_irqsave(&priv->reg_lock, flags); + w5300_write_direct(priv, W5300_IDM_AR, addr); + mmiowb(); + data = w5300_read_direct(priv, W5300_IDM_DR); + spin_unlock_irqrestore(&priv->reg_lock, flags); + + return data; +} + +static void w5300_write_indirect(struct w5300_priv *priv, u16 addr, u16 data) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->reg_lock, flags); + w5300_write_direct(priv, W5300_IDM_AR, addr); + mmiowb(); + w5300_write_direct(priv, W5300_IDM_DR, data); + mmiowb(); + spin_unlock_irqrestore(&priv->reg_lock, flags); +} + +#if defined(CONFIG_WIZNET_BUS_DIRECT) +#define w5300_read w5300_read_direct +#define w5300_write w5300_write_direct + +#elif defined(CONFIG_WIZNET_BUS_INDIRECT) +#define w5300_read w5300_read_indirect +#define w5300_write w5300_write_indirect + +#else /* CONFIG_WIZNET_BUS_ANY */ +#define w5300_read priv->read +#define w5300_write priv->write +#endif + +static u32 w5300_read32(struct w5300_priv *priv, u16 addr) +{ + u32 data; + data = w5300_read(priv, addr) << 16; + data |= w5300_read(priv, addr + 2); + return data; +} + +static void w5300_write32(struct w5300_priv *priv, u16 addr, u32 data) +{ + w5300_write(priv, addr, data >> 16); + w5300_write(priv, addr + 2, data); +} + +static int w5300_command(struct w5300_priv *priv, u16 cmd) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(100); + + w5300_write(priv, W5300_S0_CR, cmd); + mmiowb(); + + while (w5300_read(priv, W5300_S0_CR) != 0) { + if (time_after(jiffies, timeout)) + return -EIO; + cpu_relax(); + } + + return 0; +} + +static void w5300_read_frame(struct w5300_priv *priv, u8 *buf, int len) +{ + u16 fifo; + int i; + + for (i = 0; i < len; i += 2) { + fifo = w5300_read(priv, W5300_S0_RX_FIFO); + *buf++ = fifo >> 8; + *buf++ = fifo; + } + fifo = w5300_read(priv, W5300_S0_RX_FIFO); + fifo = w5300_read(priv, W5300_S0_RX_FIFO); +} + +static void w5300_write_frame(struct w5300_priv *priv, u8 *buf, int len) +{ + u16 fifo; + int i; + + for (i = 0; i < len; i += 2) { + fifo = *buf++ << 8; + fifo |= *buf++; + w5300_write(priv, W5300_S0_TX_FIFO, fifo); + } + w5300_write32(priv, W5300_S0_TX_WRSR, len); +} + +static void w5300_write_macaddr(struct w5300_priv *priv) +{ + struct net_device *ndev = priv->ndev; + w5300_write32(priv, W5300_SHARL, + ndev->dev_addr[0] << 24 | + ndev->dev_addr[1] << 16 | + ndev->dev_addr[2] << 8 | + ndev->dev_addr[3]); + w5300_write(priv, W5300_SHARH, + ndev->dev_addr[4] << 8 | + ndev->dev_addr[5]); + mmiowb(); +} + +static void w5300_hw_reset(struct w5300_priv *priv) +{ + w5300_write_direct(priv, W5300_MR, MR_RST); + mmiowb(); + mdelay(5); + w5300_write_direct(priv, W5300_MR, priv->indirect ? + MR_WDF(7) | MR_PB | MR_IND : + MR_WDF(7) | MR_PB); + mmiowb(); + w5300_write(priv, W5300_IMR, 0); + w5300_write_macaddr(priv); + + /* Configure 128K of internal memory + * as 64K RX fifo and 64K TX fifo + */ + w5300_write32(priv, W5300_RMSRL, 64 << 24); + w5300_write32(priv, W5300_RMSRH, 0); + w5300_write32(priv, W5300_TMSRL, 64 << 24); + w5300_write32(priv, W5300_TMSRH, 0); + w5300_write(priv, W5300_MTYPE, 0x00ff); + mmiowb(); +} + +static void w5300_hw_start(struct w5300_priv *priv) +{ + w5300_write(priv, W5300_S0_MR, priv->promisc ? + S0_MR_MACRAW : S0_MR_MACRAW_MF); + mmiowb(); + w5300_command(priv, S0_CR_OPEN); + w5300_write(priv, W5300_S0_IMR, S0_IR_RECV | S0_IR_SENDOK); + w5300_write(priv, W5300_IMR, IR_S0); + mmiowb(); +} + +static void w5300_hw_close(struct w5300_priv *priv) +{ + w5300_write(priv, W5300_IMR, 0); + mmiowb(); + w5300_command(priv, S0_CR_CLOSE); +} + +/*********************************************************************** + * + * Device driver functions / callbacks + * + ***********************************************************************/ + +static void w5300_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(ndev->dev.parent), + sizeof(info->bus_info)); +} + +static u32 w5300_get_link(struct net_device *ndev) +{ + struct w5300_priv *priv = netdev_priv(ndev); + + if (gpio_is_valid(priv->link_gpio)) + return !!gpio_get_value(priv->link_gpio); + + return 1; +} + +static u32 w5300_get_msglevel(struct net_device *ndev) +{ + struct w5300_priv *priv = netdev_priv(ndev); + + return priv->msg_enable; +} + +static void w5300_set_msglevel(struct net_device *ndev, u32 value) +{ + struct w5300_priv *priv = netdev_priv(ndev); + + priv->msg_enable = value; +} + +static int w5300_get_regs_len(struct net_device *ndev) +{ + return W5300_REGS_LEN; +} + +static void w5300_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, void *_buf) +{ + struct w5300_priv *priv = netdev_priv(ndev); + u8 *buf = _buf; + u16 addr; + u16 data; + + regs->version = 1; + for (addr = 0; addr < W5300_REGS_LEN; addr += 2) { + switch (addr & 0x23f) { + case W5300_S0_TX_FIFO: /* cannot read TX_FIFO */ + case W5300_S0_RX_FIFO: /* cannot read RX_FIFO */ + data = 0xffff; + break; + default: + data = w5300_read(priv, addr); + break; + } + *buf++ = data >> 8; + *buf++ = data; + } +} + +static void w5300_tx_timeout(struct net_device *ndev) +{ + struct w5300_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + w5300_hw_reset(priv); + w5300_hw_start(priv); + ndev->stats.tx_errors++; + ndev->trans_start = jiffies; + netif_wake_queue(ndev); +} + +static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev) +{ + struct w5300_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + + w5300_write_frame(priv, skb->data, skb->len); + mmiowb(); + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; + dev_kfree_skb(skb); + netif_dbg(priv, tx_queued, ndev, "tx queued\n"); + + w5300_command(priv, S0_CR_SEND); + + return NETDEV_TX_OK; +} + +static int w5300_napi_poll(struct napi_struct *napi, int budget) +{ + struct w5300_priv *priv = container_of(napi, struct w5300_priv, napi); + struct net_device *ndev = priv->ndev; + struct sk_buff *skb; + int rx_count; + u16 rx_len; + + for (rx_count = 0; rx_count < budget; rx_count++) { + u32 rx_fifo_len = w5300_read32(priv, W5300_S0_RX_RSR); + if (rx_fifo_len == 0) + break; + + rx_len = w5300_read(priv, W5300_S0_RX_FIFO); + + skb = netdev_alloc_skb_ip_align(ndev, roundup(rx_len, 2)); + if (unlikely(!skb)) { + u32 i; + for (i = 0; i < rx_fifo_len; i += 2) + w5300_read(priv, W5300_S0_RX_FIFO); + ndev->stats.rx_dropped++; + return -ENOMEM; + } + + skb_put(skb, rx_len); + w5300_read_frame(priv, skb->data, rx_len); + skb->protocol = eth_type_trans(skb, ndev); + + netif_receive_skb(skb); + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += rx_len; + } + + if (rx_count < budget) { + napi_complete(napi); + w5300_write(priv, W5300_IMR, IR_S0); + mmiowb(); + } + + return rx_count; +} + +static irqreturn_t w5300_interrupt(int irq, void *ndev_instance) +{ + struct net_device *ndev = ndev_instance; + struct w5300_priv *priv = netdev_priv(ndev); + + int ir = w5300_read(priv, W5300_S0_IR); + if (!ir) + return IRQ_NONE; + w5300_write(priv, W5300_S0_IR, ir); + mmiowb(); + + if (ir & S0_IR_SENDOK) { + netif_dbg(priv, tx_done, ndev, "tx done\n"); + netif_wake_queue(ndev); + } + + if (ir & S0_IR_RECV) { + if (napi_schedule_prep(&priv->napi)) { + w5300_write(priv, W5300_IMR, 0); + mmiowb(); + __napi_schedule(&priv->napi); + } + } + + return IRQ_HANDLED; +} + +static irqreturn_t w5300_detect_link(int irq, void *ndev_instance) +{ + struct net_device *ndev = ndev_instance; + struct w5300_priv *priv = netdev_priv(ndev); + + if (netif_running(ndev)) { + if (gpio_get_value(priv->link_gpio) != 0) { + netif_info(priv, link, ndev, "link is up\n"); + netif_carrier_on(ndev); + } else { + netif_info(priv, link, ndev, "link is down\n"); + netif_carrier_off(ndev); + } + } + + return IRQ_HANDLED; +} + +static void w5300_set_rx_mode(struct net_device *ndev) +{ + struct w5300_priv *priv = netdev_priv(ndev); + bool set_promisc = (ndev->flags & IFF_PROMISC) != 0; + + if (priv->promisc != set_promisc) { + priv->promisc = set_promisc; + w5300_hw_start(priv); + } +} + +static int w5300_set_macaddr(struct net_device *ndev, void *addr) +{ + struct w5300_priv *priv = netdev_priv(ndev); + struct sockaddr *sock_addr = addr; + + if (!is_valid_ether_addr(sock_addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN); + w5300_write_macaddr(priv); + return 0; +} + +static int w5300_open(struct net_device *ndev) +{ + struct w5300_priv *priv = netdev_priv(ndev); + + netif_info(priv, ifup, ndev, "enabling\n"); + w5300_hw_start(priv); + napi_enable(&priv->napi); + netif_start_queue(ndev); + if (!gpio_is_valid(priv->link_gpio) || + gpio_get_value(priv->link_gpio) != 0) + netif_carrier_on(ndev); + return 0; +} + +static int w5300_stop(struct net_device *ndev) +{ + struct w5300_priv *priv = netdev_priv(ndev); + + netif_info(priv, ifdown, ndev, "shutting down\n"); + w5300_hw_close(priv); + netif_carrier_off(ndev); + netif_stop_queue(ndev); + napi_disable(&priv->napi); + return 0; +} + +static const struct ethtool_ops w5300_ethtool_ops = { + .get_drvinfo = w5300_get_drvinfo, + .get_msglevel = w5300_get_msglevel, + .set_msglevel = w5300_set_msglevel, + .get_link = w5300_get_link, + .get_regs_len = w5300_get_regs_len, + .get_regs = w5300_get_regs, +}; + +static const struct net_device_ops w5300_netdev_ops = { + .ndo_open = w5300_open, + .ndo_stop = w5300_stop, + .ndo_start_xmit = w5300_start_tx, + .ndo_tx_timeout = w5300_tx_timeout, + .ndo_set_rx_mode = w5300_set_rx_mode, + .ndo_set_mac_address = w5300_set_macaddr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static int w5300_hw_probe(struct platform_device *pdev) +{ + struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev); + struct net_device *ndev = platform_get_drvdata(pdev); + struct w5300_priv *priv = netdev_priv(ndev); + const char *name = netdev_name(ndev); + struct resource *mem; + int mem_size; + int irq; + int ret; + + if (data && is_valid_ether_addr(data->mac_addr)) { + memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); + } else { + eth_hw_addr_random(ndev); + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + mem_size = resource_size(mem); + + spin_lock_init(&priv->reg_lock); + priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE; + if (priv->indirect) { + priv->read = w5300_read_indirect; + priv->write = w5300_write_indirect; + } else { + priv->read = w5300_read_direct; + priv->write = w5300_write_direct; + } + + w5300_hw_reset(priv); + if (w5300_read(priv, W5300_IDR) != IDR_W5300) + return -ENODEV; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + ret = request_irq(irq, w5300_interrupt, + IRQ_TYPE_LEVEL_LOW, name, ndev); + if (ret < 0) + return ret; + priv->irq = irq; + + priv->link_gpio = data ? data->link_gpio : -EINVAL; + if (gpio_is_valid(priv->link_gpio)) { + char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL); + if (!link_name) + return -ENOMEM; + snprintf(link_name, 16, "%s-link", name); + priv->link_irq = gpio_to_irq(priv->link_gpio); + if (request_any_context_irq(priv->link_irq, w5300_detect_link, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + link_name, priv->ndev) < 0) + priv->link_gpio = -EINVAL; + } + + netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq); + return 0; +} + +static int w5300_probe(struct platform_device *pdev) +{ + struct w5300_priv *priv; + struct net_device *ndev; + int err; + + ndev = alloc_etherdev(sizeof(*priv)); + if (!ndev) + return -ENOMEM; + SET_NETDEV_DEV(ndev, &pdev->dev); + platform_set_drvdata(pdev, ndev); + priv = netdev_priv(ndev); + priv->ndev = ndev; + + ndev->netdev_ops = &w5300_netdev_ops; + ndev->ethtool_ops = &w5300_ethtool_ops; + ndev->watchdog_timeo = HZ; + netif_napi_add(ndev, &priv->napi, w5300_napi_poll, 16); + + /* This chip doesn't support VLAN packets with normal MTU, + * so disable VLAN for this device. + */ + ndev->features |= NETIF_F_VLAN_CHALLENGED; + + err = register_netdev(ndev); + if (err < 0) + goto err_register; + + err = w5300_hw_probe(pdev); + if (err < 0) + goto err_hw_probe; + + return 0; + +err_hw_probe: + unregister_netdev(ndev); +err_register: + free_netdev(ndev); + return err; +} + +static int w5300_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct w5300_priv *priv = netdev_priv(ndev); + + w5300_hw_reset(priv); + free_irq(priv->irq, ndev); + if (gpio_is_valid(priv->link_gpio)) + free_irq(priv->link_irq, ndev); + + unregister_netdev(ndev); + free_netdev(ndev); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int w5300_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = platform_get_drvdata(pdev); + struct w5300_priv *priv = netdev_priv(ndev); + + if (netif_running(ndev)) { + netif_carrier_off(ndev); + netif_device_detach(ndev); + + w5300_hw_close(priv); + } + return 0; +} + +static int w5300_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = platform_get_drvdata(pdev); + struct w5300_priv *priv = netdev_priv(ndev); + + if (!netif_running(ndev)) { + w5300_hw_reset(priv); + w5300_hw_start(priv); + + netif_device_attach(ndev); + if (!gpio_is_valid(priv->link_gpio) || + gpio_get_value(priv->link_gpio) != 0) + netif_carrier_on(ndev); + } + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(w5300_pm_ops, w5300_suspend, w5300_resume); + +static struct platform_driver w5300_driver = { + .driver = { + .name = DRV_NAME, + .pm = &w5300_pm_ops, + }, + .probe = w5300_probe, + .remove = w5300_remove, +}; + +module_platform_driver(w5300_driver); diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig new file mode 100644 index 000000000..7b90a5eba --- /dev/null +++ b/drivers/net/ethernet/xilinx/Kconfig @@ -0,0 +1,44 @@ +# +# Xilink device configuration +# + +config NET_VENDOR_XILINX + bool "Xilinx devices" + default y + depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Xilinx devices. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_XILINX + +config XILINX_EMACLITE + tristate "Xilinx 10/100 Ethernet Lite support" + depends on (PPC32 || MICROBLAZE || ARCH_ZYNQ) + select PHYLIB + ---help--- + This driver supports the 10/100 Ethernet Lite from Xilinx. + +config XILINX_AXI_EMAC + tristate "Xilinx 10/100/1000 AXI Ethernet support" + depends on MICROBLAZE + select PHYLIB + ---help--- + This driver supports the 10/100/1000 Ethernet from Xilinx for the + AXI bus interface used in Xilinx Virtex FPGAs. + +config XILINX_LL_TEMAC + tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" + depends on (PPC || MICROBLAZE) + select PHYLIB + ---help--- + This driver supports the Xilinx 10/100/1000 LocalLink TEMAC + core used in Xilinx Spartan and Virtex FPGAs + +endif # NET_VENDOR_XILINX diff --git a/drivers/net/ethernet/xilinx/Makefile b/drivers/net/ethernet/xilinx/Makefile new file mode 100644 index 000000000..214205e97 --- /dev/null +++ b/drivers/net/ethernet/xilinx/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for the Xilink network device drivers. +# + +ll_temac-objs := ll_temac_main.o ll_temac_mdio.o +obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o +obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o +xilinx_emac-objs := xilinx_axienet_main.o xilinx_axienet_mdio.o +obj-$(CONFIG_XILINX_AXI_EMAC) += xilinx_emac.o diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h new file mode 100644 index 000000000..522abe2ff --- /dev/null +++ b/drivers/net/ethernet/xilinx/ll_temac.h @@ -0,0 +1,385 @@ + +#ifndef XILINX_LL_TEMAC_H +#define XILINX_LL_TEMAC_H + +#include +#include +#include + +#ifdef CONFIG_PPC_DCR +#include +#include +#endif + +/* packet size info */ +#define XTE_HDR_SIZE 14 /* size of Ethernet header */ +#define XTE_TRL_SIZE 4 /* size of Ethernet trailer (FCS) */ +#define XTE_JUMBO_MTU 9000 +#define XTE_MAX_JUMBO_FRAME_SIZE (XTE_JUMBO_MTU + XTE_HDR_SIZE + XTE_TRL_SIZE) + +/* Configuration options */ + +/* Accept all incoming packets. + * This option defaults to disabled (cleared) */ +#define XTE_OPTION_PROMISC (1 << 0) +/* Jumbo frame support for Tx & Rx. + * This option defaults to disabled (cleared) */ +#define XTE_OPTION_JUMBO (1 << 1) +/* VLAN Rx & Tx frame support. + * This option defaults to disabled (cleared) */ +#define XTE_OPTION_VLAN (1 << 2) +/* Enable recognition of flow control frames on Rx + * This option defaults to enabled (set) */ +#define XTE_OPTION_FLOW_CONTROL (1 << 4) +/* Strip FCS and PAD from incoming frames. + * Note: PAD from VLAN frames is not stripped. + * This option defaults to disabled (set) */ +#define XTE_OPTION_FCS_STRIP (1 << 5) +/* Generate FCS field and add PAD automatically for outgoing frames. + * This option defaults to enabled (set) */ +#define XTE_OPTION_FCS_INSERT (1 << 6) +/* Enable Length/Type error checking for incoming frames. When this option is +set, the MAC will filter frames that have a mismatched type/length field +and if XTE_OPTION_REPORT_RXERR is set, the user is notified when these +types of frames are encountered. When this option is cleared, the MAC will +allow these types of frames to be received. +This option defaults to enabled (set) */ +#define XTE_OPTION_LENTYPE_ERR (1 << 7) +/* Enable the transmitter. + * This option defaults to enabled (set) */ +#define XTE_OPTION_TXEN (1 << 11) +/* Enable the receiver +* This option defaults to enabled (set) */ +#define XTE_OPTION_RXEN (1 << 12) + +/* Default options set when device is initialized or reset */ +#define XTE_OPTION_DEFAULTS \ + (XTE_OPTION_TXEN | \ + XTE_OPTION_FLOW_CONTROL | \ + XTE_OPTION_RXEN) + +/* XPS_LL_TEMAC SDMA registers definition */ + +#define TX_NXTDESC_PTR 0x00 /* r */ +#define TX_CURBUF_ADDR 0x01 /* r */ +#define TX_CURBUF_LENGTH 0x02 /* r */ +#define TX_CURDESC_PTR 0x03 /* rw */ +#define TX_TAILDESC_PTR 0x04 /* rw */ +#define TX_CHNL_CTRL 0x05 /* rw */ +/* + 0:7 24:31 IRQTimeout + 8:15 16:23 IRQCount + 16:20 11:15 Reserved + 21 10 0 + 22 9 UseIntOnEnd + 23 8 LdIRQCnt + 24 7 IRQEn + 25:28 3:6 Reserved + 29 2 IrqErrEn + 30 1 IrqDlyEn + 31 0 IrqCoalEn +*/ +#define CHNL_CTRL_IRQ_IOE (1 << 9) +#define CHNL_CTRL_IRQ_EN (1 << 7) +#define CHNL_CTRL_IRQ_ERR_EN (1 << 2) +#define CHNL_CTRL_IRQ_DLY_EN (1 << 1) +#define CHNL_CTRL_IRQ_COAL_EN (1 << 0) +#define TX_IRQ_REG 0x06 /* rw */ +/* + 0:7 24:31 DltTmrValue + 8:15 16:23 ClscCntrValue + 16:17 14:15 Reserved + 18:21 10:13 ClscCnt + 22:23 8:9 DlyCnt + 24:28 3::7 Reserved + 29 2 ErrIrq + 30 1 DlyIrq + 31 0 CoalIrq + */ +#define TX_CHNL_STS 0x07 /* r */ +/* + 0:9 22:31 Reserved + 10 21 TailPErr + 11 20 CmpErr + 12 19 AddrErr + 13 18 NxtPErr + 14 17 CurPErr + 15 16 BsyWr + 16:23 8:15 Reserved + 24 7 Error + 25 6 IOE + 26 5 SOE + 27 4 Cmplt + 28 3 SOP + 29 2 EOP + 30 1 EngBusy + 31 0 Reserved +*/ + +#define RX_NXTDESC_PTR 0x08 /* r */ +#define RX_CURBUF_ADDR 0x09 /* r */ +#define RX_CURBUF_LENGTH 0x0a /* r */ +#define RX_CURDESC_PTR 0x0b /* rw */ +#define RX_TAILDESC_PTR 0x0c /* rw */ +#define RX_CHNL_CTRL 0x0d /* rw */ +/* + 0:7 24:31 IRQTimeout + 8:15 16:23 IRQCount + 16:20 11:15 Reserved + 21 10 0 + 22 9 UseIntOnEnd + 23 8 LdIRQCnt + 24 7 IRQEn + 25:28 3:6 Reserved + 29 2 IrqErrEn + 30 1 IrqDlyEn + 31 0 IrqCoalEn + */ +#define RX_IRQ_REG 0x0e /* rw */ +#define IRQ_COAL (1 << 0) +#define IRQ_DLY (1 << 1) +#define IRQ_ERR (1 << 2) +#define IRQ_DMAERR (1 << 7) /* this is not documented ??? */ +/* + 0:7 24:31 DltTmrValue + 8:15 16:23 ClscCntrValue + 16:17 14:15 Reserved + 18:21 10:13 ClscCnt + 22:23 8:9 DlyCnt + 24:28 3::7 Reserved +*/ +#define RX_CHNL_STS 0x0f /* r */ +#define CHNL_STS_ENGBUSY (1 << 1) +#define CHNL_STS_EOP (1 << 2) +#define CHNL_STS_SOP (1 << 3) +#define CHNL_STS_CMPLT (1 << 4) +#define CHNL_STS_SOE (1 << 5) +#define CHNL_STS_IOE (1 << 6) +#define CHNL_STS_ERR (1 << 7) + +#define CHNL_STS_BSYWR (1 << 16) +#define CHNL_STS_CURPERR (1 << 17) +#define CHNL_STS_NXTPERR (1 << 18) +#define CHNL_STS_ADDRERR (1 << 19) +#define CHNL_STS_CMPERR (1 << 20) +#define CHNL_STS_TAILERR (1 << 21) +/* + 0:9 22:31 Reserved + 10 21 TailPErr + 11 20 CmpErr + 12 19 AddrErr + 13 18 NxtPErr + 14 17 CurPErr + 15 16 BsyWr + 16:23 8:15 Reserved + 24 7 Error + 25 6 IOE + 26 5 SOE + 27 4 Cmplt + 28 3 SOP + 29 2 EOP + 30 1 EngBusy + 31 0 Reserved +*/ + +#define DMA_CONTROL_REG 0x10 /* rw */ +#define DMA_CONTROL_RST (1 << 0) +#define DMA_TAIL_ENABLE (1 << 2) + +/* XPS_LL_TEMAC direct registers definition */ + +#define XTE_RAF0_OFFSET 0x00 +#define RAF0_RST (1 << 0) +#define RAF0_MCSTREJ (1 << 1) +#define RAF0_BCSTREJ (1 << 2) +#define XTE_TPF0_OFFSET 0x04 +#define XTE_IFGP0_OFFSET 0x08 +#define XTE_ISR0_OFFSET 0x0c +#define ISR0_HARDACSCMPLT (1 << 0) +#define ISR0_AUTONEG (1 << 1) +#define ISR0_RXCMPLT (1 << 2) +#define ISR0_RXREJ (1 << 3) +#define ISR0_RXFIFOOVR (1 << 4) +#define ISR0_TXCMPLT (1 << 5) +#define ISR0_RXDCMLCK (1 << 6) + +#define XTE_IPR0_OFFSET 0x10 +#define XTE_IER0_OFFSET 0x14 + +#define XTE_MSW0_OFFSET 0x20 +#define XTE_LSW0_OFFSET 0x24 +#define XTE_CTL0_OFFSET 0x28 +#define XTE_RDY0_OFFSET 0x2c + +#define XTE_RSE_MIIM_RR_MASK 0x0002 +#define XTE_RSE_MIIM_WR_MASK 0x0004 +#define XTE_RSE_CFG_RR_MASK 0x0020 +#define XTE_RSE_CFG_WR_MASK 0x0040 +#define XTE_RDY0_HARD_ACS_RDY_MASK (0x10000) + +/* XPS_LL_TEMAC indirect registers offset definition */ + +#define XTE_RXC0_OFFSET 0x00000200 /* Rx configuration word 0 */ +#define XTE_RXC1_OFFSET 0x00000240 /* Rx configuration word 1 */ +#define XTE_RXC1_RXRST_MASK (1 << 31) /* Receiver reset */ +#define XTE_RXC1_RXJMBO_MASK (1 << 30) /* Jumbo frame enable */ +#define XTE_RXC1_RXFCS_MASK (1 << 29) /* FCS not stripped */ +#define XTE_RXC1_RXEN_MASK (1 << 28) /* Receiver enable */ +#define XTE_RXC1_RXVLAN_MASK (1 << 27) /* VLAN enable */ +#define XTE_RXC1_RXHD_MASK (1 << 26) /* Half duplex */ +#define XTE_RXC1_RXLT_MASK (1 << 25) /* Length/type check disable */ + +#define XTE_TXC_OFFSET 0x00000280 /* Tx configuration */ +#define XTE_TXC_TXRST_MASK (1 << 31) /* Transmitter reset */ +#define XTE_TXC_TXJMBO_MASK (1 << 30) /* Jumbo frame enable */ +#define XTE_TXC_TXFCS_MASK (1 << 29) /* Generate FCS */ +#define XTE_TXC_TXEN_MASK (1 << 28) /* Transmitter enable */ +#define XTE_TXC_TXVLAN_MASK (1 << 27) /* VLAN enable */ +#define XTE_TXC_TXHD_MASK (1 << 26) /* Half duplex */ + +#define XTE_FCC_OFFSET 0x000002C0 /* Flow control config */ +#define XTE_FCC_RXFLO_MASK (1 << 29) /* Rx flow control enable */ +#define XTE_FCC_TXFLO_MASK (1 << 30) /* Tx flow control enable */ + +#define XTE_EMCFG_OFFSET 0x00000300 /* EMAC configuration */ +#define XTE_EMCFG_LINKSPD_MASK 0xC0000000 /* Link speed */ +#define XTE_EMCFG_HOSTEN_MASK (1 << 26) /* Host interface enable */ +#define XTE_EMCFG_LINKSPD_10 0x00000000 /* 10 Mbit LINKSPD_MASK */ +#define XTE_EMCFG_LINKSPD_100 (1 << 30) /* 100 Mbit LINKSPD_MASK */ +#define XTE_EMCFG_LINKSPD_1000 (1 << 31) /* 1000 Mbit LINKSPD_MASK */ + +#define XTE_GMIC_OFFSET 0x00000320 /* RGMII/SGMII config */ +#define XTE_MC_OFFSET 0x00000340 /* MDIO configuration */ +#define XTE_UAW0_OFFSET 0x00000380 /* Unicast address word 0 */ +#define XTE_UAW1_OFFSET 0x00000384 /* Unicast address word 1 */ + +#define XTE_MAW0_OFFSET 0x00000388 /* Multicast addr word 0 */ +#define XTE_MAW1_OFFSET 0x0000038C /* Multicast addr word 1 */ +#define XTE_AFM_OFFSET 0x00000390 /* Promiscuous mode */ +#define XTE_AFM_EPPRM_MASK (1 << 31) /* Promiscuous mode enable */ + +/* Interrupt Request status */ +#define XTE_TIS_OFFSET 0x000003A0 +#define TIS_FRIS (1 << 0) +#define TIS_MRIS (1 << 1) +#define TIS_MWIS (1 << 2) +#define TIS_ARIS (1 << 3) +#define TIS_AWIS (1 << 4) +#define TIS_CRIS (1 << 5) +#define TIS_CWIS (1 << 6) + +#define XTE_TIE_OFFSET 0x000003A4 /* Interrupt enable */ + +/** MII Mamagement Control register (MGTCR) */ +#define XTE_MGTDR_OFFSET 0x000003B0 /* MII data */ +#define XTE_MIIMAI_OFFSET 0x000003B4 /* MII control */ + +#define CNTLREG_WRITE_ENABLE_MASK 0x8000 +#define CNTLREG_EMAC1SEL_MASK 0x0400 +#define CNTLREG_ADDRESSCODE_MASK 0x03ff + +/* CDMAC descriptor status bit definitions */ + +#define STS_CTRL_APP0_ERR (1 << 31) +#define STS_CTRL_APP0_IRQONEND (1 << 30) +/* undoccumented */ +#define STS_CTRL_APP0_STOPONEND (1 << 29) +#define STS_CTRL_APP0_CMPLT (1 << 28) +#define STS_CTRL_APP0_SOP (1 << 27) +#define STS_CTRL_APP0_EOP (1 << 26) +#define STS_CTRL_APP0_ENGBUSY (1 << 25) +/* undocumented */ +#define STS_CTRL_APP0_ENGRST (1 << 24) + +#define TX_CONTROL_CALC_CSUM_MASK 1 + +#define MULTICAST_CAM_TABLE_NUM 4 + +/* TEMAC Synthesis features */ +#define TEMAC_FEATURE_RX_CSUM (1 << 0) +#define TEMAC_FEATURE_TX_CSUM (1 << 1) + +/* TX/RX CURDESC_PTR points to first descriptor */ +/* TX/RX TAILDESC_PTR points to last descriptor in linked list */ + +/** + * struct cdmac_bd - LocalLink buffer descriptor format + * + * app0 bits: + * 0 Error + * 1 IrqOnEnd generate an interrupt at completion of DMA op + * 2 reserved + * 3 completed Current descriptor completed + * 4 SOP TX - marks first desc/ RX marks first desct + * 5 EOP TX marks last desc/RX marks last desc + * 6 EngBusy DMA is processing + * 7 reserved + * 8:31 application specific + */ +struct cdmac_bd { + u32 next; /* Physical address of next buffer descriptor */ + u32 phys; + u32 len; + u32 app0; + u32 app1; /* TX start << 16 | insert */ + u32 app2; /* TX csum */ + u32 app3; + u32 app4; /* skb for TX length for RX */ +}; + +struct temac_local { + struct net_device *ndev; + struct device *dev; + + /* Connection to PHY device */ + struct phy_device *phy_dev; /* Pointer to PHY device */ + struct device_node *phy_node; + + /* MDIO bus data */ + struct mii_bus *mii_bus; /* MII bus reference */ + int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */ + + /* IO registers, dma functions and IRQs */ + void __iomem *regs; + void __iomem *sdma_regs; +#ifdef CONFIG_PPC_DCR + dcr_host_t sdma_dcrs; +#endif + u32 (*dma_in)(struct temac_local *, int); + void (*dma_out)(struct temac_local *, int, u32); + + int tx_irq; + int rx_irq; + int emac_num; + + struct sk_buff **rx_skb; + spinlock_t rx_lock; + struct mutex indirect_mutex; + u32 options; /* Current options word */ + int last_link; + unsigned int temac_features; + + /* Buffer descriptors */ + struct cdmac_bd *tx_bd_v; + dma_addr_t tx_bd_p; + struct cdmac_bd *rx_bd_v; + dma_addr_t rx_bd_p; + int tx_bd_ci; + int tx_bd_next; + int tx_bd_tail; + int rx_bd_ci; +}; + +/* xilinx_temac.c */ +u32 temac_ior(struct temac_local *lp, int offset); +void temac_iow(struct temac_local *lp, int offset, u32 value); +int temac_indirect_busywait(struct temac_local *lp); +u32 temac_indirect_in32(struct temac_local *lp, int reg); +void temac_indirect_out32(struct temac_local *lp, int reg, u32 value); + + +/* xilinx_temac_mdio.c */ +int temac_mdio_setup(struct temac_local *lp, struct device_node *np); +void temac_mdio_teardown(struct temac_local *lp); + +#endif /* XILINX_LL_TEMAC_H */ diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c new file mode 100644 index 000000000..af2694dc6 --- /dev/null +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -0,0 +1,1182 @@ +/* + * Driver for Xilinx TEMAC Ethernet device + * + * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi + * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. + * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. + * + * This is a driver for the Xilinx ll_temac ipcore which is often used + * in the Virtex and Spartan series of chips. + * + * Notes: + * - The ll_temac hardware uses indirect access for many of the TEMAC + * registers, include the MDIO bus. However, indirect access to MDIO + * registers take considerably more clock cycles than to TEMAC registers. + * MDIO accesses are long, so threads doing them should probably sleep + * rather than busywait. However, since only one indirect access can be + * in progress at any given time, that means that *all* indirect accesses + * could end up sleeping (to wait for an MDIO access to complete). + * Fortunately none of the indirect accesses are on the 'hot' path for tx + * or rx, so this should be okay. + * + * TODO: + * - Factor out locallink DMA code into separate driver + * - Fix multicast assignment. + * - Fix support for hardware checksumming. + * - Testing. Lots and lots of testing. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* needed for sizeof(tcphdr) */ +#include /* needed for sizeof(udphdr) */ +#include +#include +#include +#include +#include +#include +#include + +#include "ll_temac.h" + +#define TX_BD_NUM 64 +#define RX_BD_NUM 128 + +/* --------------------------------------------------------------------- + * Low level register access functions + */ + +u32 temac_ior(struct temac_local *lp, int offset) +{ + return in_be32((u32 *)(lp->regs + offset)); +} + +void temac_iow(struct temac_local *lp, int offset, u32 value) +{ + out_be32((u32 *) (lp->regs + offset), value); +} + +int temac_indirect_busywait(struct temac_local *lp) +{ + unsigned long end = jiffies + 2; + + while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) { + if (time_before_eq(end, jiffies)) { + WARN_ON(1); + return -ETIMEDOUT; + } + msleep(1); + } + return 0; +} + +/** + * temac_indirect_in32 + * + * lp->indirect_mutex must be held when calling this function + */ +u32 temac_indirect_in32(struct temac_local *lp, int reg) +{ + u32 val; + + if (temac_indirect_busywait(lp)) + return -ETIMEDOUT; + temac_iow(lp, XTE_CTL0_OFFSET, reg); + if (temac_indirect_busywait(lp)) + return -ETIMEDOUT; + val = temac_ior(lp, XTE_LSW0_OFFSET); + + return val; +} + +/** + * temac_indirect_out32 + * + * lp->indirect_mutex must be held when calling this function + */ +void temac_indirect_out32(struct temac_local *lp, int reg, u32 value) +{ + if (temac_indirect_busywait(lp)) + return; + temac_iow(lp, XTE_LSW0_OFFSET, value); + temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg); + temac_indirect_busywait(lp); +} + +/** + * temac_dma_in32 - Memory mapped DMA read, this function expects a + * register input that is based on DCR word addresses which + * are then converted to memory mapped byte addresses + */ +static u32 temac_dma_in32(struct temac_local *lp, int reg) +{ + return in_be32((u32 *)(lp->sdma_regs + (reg << 2))); +} + +/** + * temac_dma_out32 - Memory mapped DMA read, this function expects a + * register input that is based on DCR word addresses which + * are then converted to memory mapped byte addresses + */ +static void temac_dma_out32(struct temac_local *lp, int reg, u32 value) +{ + out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value); +} + +/* DMA register access functions can be DCR based or memory mapped. + * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both + * memory mapped. + */ +#ifdef CONFIG_PPC_DCR + +/** + * temac_dma_dcr_in32 - DCR based DMA read + */ +static u32 temac_dma_dcr_in(struct temac_local *lp, int reg) +{ + return dcr_read(lp->sdma_dcrs, reg); +} + +/** + * temac_dma_dcr_out32 - DCR based DMA write + */ +static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value) +{ + dcr_write(lp->sdma_dcrs, reg, value); +} + +/** + * temac_dcr_setup - If the DMA is DCR based, then setup the address and + * I/O functions + */ +static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op, + struct device_node *np) +{ + unsigned int dcrs; + + /* setup the dcr address mapping if it's in the device tree */ + + dcrs = dcr_resource_start(np, 0); + if (dcrs != 0) { + lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0)); + lp->dma_in = temac_dma_dcr_in; + lp->dma_out = temac_dma_dcr_out; + dev_dbg(&op->dev, "DCR base: %x\n", dcrs); + return 0; + } + /* no DCR in the device tree, indicate a failure */ + return -1; +} + +#else + +/* + * temac_dcr_setup - This is a stub for when DCR is not supported, + * such as with MicroBlaze + */ +static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op, + struct device_node *np) +{ + return -1; +} + +#endif + +/** + * temac_dma_bd_release - Release buffer descriptor rings + */ +static void temac_dma_bd_release(struct net_device *ndev) +{ + struct temac_local *lp = netdev_priv(ndev); + int i; + + /* Reset Local Link (DMA) */ + lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); + + for (i = 0; i < RX_BD_NUM; i++) { + if (!lp->rx_skb[i]) + break; + else { + dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, + XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); + dev_kfree_skb(lp->rx_skb[i]); + } + } + if (lp->rx_bd_v) + dma_free_coherent(ndev->dev.parent, + sizeof(*lp->rx_bd_v) * RX_BD_NUM, + lp->rx_bd_v, lp->rx_bd_p); + if (lp->tx_bd_v) + dma_free_coherent(ndev->dev.parent, + sizeof(*lp->tx_bd_v) * TX_BD_NUM, + lp->tx_bd_v, lp->tx_bd_p); + kfree(lp->rx_skb); +} + +/** + * temac_dma_bd_init - Setup buffer descriptor rings + */ +static int temac_dma_bd_init(struct net_device *ndev) +{ + struct temac_local *lp = netdev_priv(ndev); + struct sk_buff *skb; + int i; + + lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL); + if (!lp->rx_skb) + goto out; + + /* allocate the tx and rx ring buffer descriptors. */ + /* returns a virtual address and a physical address. */ + lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, + sizeof(*lp->tx_bd_v) * TX_BD_NUM, + &lp->tx_bd_p, GFP_KERNEL); + if (!lp->tx_bd_v) + goto out; + + lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, + sizeof(*lp->rx_bd_v) * RX_BD_NUM, + &lp->rx_bd_p, GFP_KERNEL); + if (!lp->rx_bd_v) + goto out; + + for (i = 0; i < TX_BD_NUM; i++) { + lp->tx_bd_v[i].next = lp->tx_bd_p + + sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM); + } + + for (i = 0; i < RX_BD_NUM; i++) { + lp->rx_bd_v[i].next = lp->rx_bd_p + + sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM); + + skb = netdev_alloc_skb_ip_align(ndev, + XTE_MAX_JUMBO_FRAME_SIZE); + if (!skb) + goto out; + + lp->rx_skb[i] = skb; + /* returns physical address of skb->data */ + lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, + skb->data, + XTE_MAX_JUMBO_FRAME_SIZE, + DMA_FROM_DEVICE); + lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE; + lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND; + } + + lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 | + CHNL_CTRL_IRQ_EN | + CHNL_CTRL_IRQ_DLY_EN | + CHNL_CTRL_IRQ_COAL_EN); + /* 0x10220483 */ + /* 0x00100483 */ + lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 | + CHNL_CTRL_IRQ_EN | + CHNL_CTRL_IRQ_DLY_EN | + CHNL_CTRL_IRQ_COAL_EN | + CHNL_CTRL_IRQ_IOE); + /* 0xff010283 */ + + lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p); + lp->dma_out(lp, RX_TAILDESC_PTR, + lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); + lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); + + /* Init descriptor indexes */ + lp->tx_bd_ci = 0; + lp->tx_bd_next = 0; + lp->tx_bd_tail = 0; + lp->rx_bd_ci = 0; + + return 0; + +out: + temac_dma_bd_release(ndev); + return -ENOMEM; +} + +/* --------------------------------------------------------------------- + * net_device_ops + */ + +static void temac_do_set_mac_address(struct net_device *ndev) +{ + struct temac_local *lp = netdev_priv(ndev); + + /* set up unicast MAC address filter set its mac address */ + mutex_lock(&lp->indirect_mutex); + temac_indirect_out32(lp, XTE_UAW0_OFFSET, + (ndev->dev_addr[0]) | + (ndev->dev_addr[1] << 8) | + (ndev->dev_addr[2] << 16) | + (ndev->dev_addr[3] << 24)); + /* There are reserved bits in EUAW1 + * so don't affect them Set MAC bits [47:32] in EUAW1 */ + temac_indirect_out32(lp, XTE_UAW1_OFFSET, + (ndev->dev_addr[4] & 0x000000ff) | + (ndev->dev_addr[5] << 8)); + mutex_unlock(&lp->indirect_mutex); +} + +static int temac_init_mac_address(struct net_device *ndev, void *address) +{ + memcpy(ndev->dev_addr, address, ETH_ALEN); + if (!is_valid_ether_addr(ndev->dev_addr)) + eth_hw_addr_random(ndev); + temac_do_set_mac_address(ndev); + return 0; +} + +static int temac_set_mac_address(struct net_device *ndev, void *p) +{ + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN); + temac_do_set_mac_address(ndev); + return 0; +} + +static void temac_set_multicast_list(struct net_device *ndev) +{ + struct temac_local *lp = netdev_priv(ndev); + u32 multi_addr_msw, multi_addr_lsw, val; + int i; + + mutex_lock(&lp->indirect_mutex); + if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || + netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) { + /* + * We must make the kernel realise we had to move + * into promisc mode or we start all out war on + * the cable. If it was a promisc request the + * flag is already set. If not we assert it. + */ + ndev->flags |= IFF_PROMISC; + temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK); + dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); + } else if (!netdev_mc_empty(ndev)) { + struct netdev_hw_addr *ha; + + i = 0; + netdev_for_each_mc_addr(ha, ndev) { + if (i >= MULTICAST_CAM_TABLE_NUM) + break; + multi_addr_msw = ((ha->addr[3] << 24) | + (ha->addr[2] << 16) | + (ha->addr[1] << 8) | + (ha->addr[0])); + temac_indirect_out32(lp, XTE_MAW0_OFFSET, + multi_addr_msw); + multi_addr_lsw = ((ha->addr[5] << 8) | + (ha->addr[4]) | (i << 16)); + temac_indirect_out32(lp, XTE_MAW1_OFFSET, + multi_addr_lsw); + i++; + } + } else { + val = temac_indirect_in32(lp, XTE_AFM_OFFSET); + temac_indirect_out32(lp, XTE_AFM_OFFSET, + val & ~XTE_AFM_EPPRM_MASK); + temac_indirect_out32(lp, XTE_MAW0_OFFSET, 0); + temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0); + dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); + } + mutex_unlock(&lp->indirect_mutex); +} + +struct temac_option { + int flg; + u32 opt; + u32 reg; + u32 m_or; + u32 m_and; +} temac_options[] = { + /* Turn on jumbo packet support for both Rx and Tx */ + { + .opt = XTE_OPTION_JUMBO, + .reg = XTE_TXC_OFFSET, + .m_or = XTE_TXC_TXJMBO_MASK, + }, + { + .opt = XTE_OPTION_JUMBO, + .reg = XTE_RXC1_OFFSET, + .m_or =XTE_RXC1_RXJMBO_MASK, + }, + /* Turn on VLAN packet support for both Rx and Tx */ + { + .opt = XTE_OPTION_VLAN, + .reg = XTE_TXC_OFFSET, + .m_or =XTE_TXC_TXVLAN_MASK, + }, + { + .opt = XTE_OPTION_VLAN, + .reg = XTE_RXC1_OFFSET, + .m_or =XTE_RXC1_RXVLAN_MASK, + }, + /* Turn on FCS stripping on receive packets */ + { + .opt = XTE_OPTION_FCS_STRIP, + .reg = XTE_RXC1_OFFSET, + .m_or =XTE_RXC1_RXFCS_MASK, + }, + /* Turn on FCS insertion on transmit packets */ + { + .opt = XTE_OPTION_FCS_INSERT, + .reg = XTE_TXC_OFFSET, + .m_or =XTE_TXC_TXFCS_MASK, + }, + /* Turn on length/type field checking on receive packets */ + { + .opt = XTE_OPTION_LENTYPE_ERR, + .reg = XTE_RXC1_OFFSET, + .m_or =XTE_RXC1_RXLT_MASK, + }, + /* Turn on flow control */ + { + .opt = XTE_OPTION_FLOW_CONTROL, + .reg = XTE_FCC_OFFSET, + .m_or =XTE_FCC_RXFLO_MASK, + }, + /* Turn on flow control */ + { + .opt = XTE_OPTION_FLOW_CONTROL, + .reg = XTE_FCC_OFFSET, + .m_or =XTE_FCC_TXFLO_MASK, + }, + /* Turn on promiscuous frame filtering (all frames are received ) */ + { + .opt = XTE_OPTION_PROMISC, + .reg = XTE_AFM_OFFSET, + .m_or =XTE_AFM_EPPRM_MASK, + }, + /* Enable transmitter if not already enabled */ + { + .opt = XTE_OPTION_TXEN, + .reg = XTE_TXC_OFFSET, + .m_or =XTE_TXC_TXEN_MASK, + }, + /* Enable receiver? */ + { + .opt = XTE_OPTION_RXEN, + .reg = XTE_RXC1_OFFSET, + .m_or =XTE_RXC1_RXEN_MASK, + }, + {} +}; + +/** + * temac_setoptions + */ +static u32 temac_setoptions(struct net_device *ndev, u32 options) +{ + struct temac_local *lp = netdev_priv(ndev); + struct temac_option *tp = &temac_options[0]; + int reg; + + mutex_lock(&lp->indirect_mutex); + while (tp->opt) { + reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or; + if (options & tp->opt) + reg |= tp->m_or; + temac_indirect_out32(lp, tp->reg, reg); + tp++; + } + lp->options |= options; + mutex_unlock(&lp->indirect_mutex); + + return 0; +} + +/* Initialize temac */ +static void temac_device_reset(struct net_device *ndev) +{ + struct temac_local *lp = netdev_priv(ndev); + u32 timeout; + u32 val; + + /* Perform a software reset */ + + /* 0x300 host enable bit ? */ + /* reset PHY through control register ?:1 */ + + dev_dbg(&ndev->dev, "%s()\n", __func__); + + mutex_lock(&lp->indirect_mutex); + /* Reset the receiver and wait for it to finish reset */ + temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK); + timeout = 1000; + while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) { + udelay(1); + if (--timeout == 0) { + dev_err(&ndev->dev, + "temac_device_reset RX reset timeout!!\n"); + break; + } + } + + /* Reset the transmitter and wait for it to finish reset */ + temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK); + timeout = 1000; + while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) { + udelay(1); + if (--timeout == 0) { + dev_err(&ndev->dev, + "temac_device_reset TX reset timeout!!\n"); + break; + } + } + + /* Disable the receiver */ + val = temac_indirect_in32(lp, XTE_RXC1_OFFSET); + temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK); + + /* Reset Local Link (DMA) */ + lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); + timeout = 1000; + while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) { + udelay(1); + if (--timeout == 0) { + dev_err(&ndev->dev, + "temac_device_reset DMA reset timeout!!\n"); + break; + } + } + lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE); + + if (temac_dma_bd_init(ndev)) { + dev_err(&ndev->dev, + "temac_device_reset descriptor allocation failed\n"); + } + + temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0); + temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0); + temac_indirect_out32(lp, XTE_TXC_OFFSET, 0); + temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK); + + mutex_unlock(&lp->indirect_mutex); + + /* Sync default options with HW + * but leave receiver and transmitter disabled. */ + temac_setoptions(ndev, + lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN)); + + temac_do_set_mac_address(ndev); + + /* Set address filter table */ + temac_set_multicast_list(ndev); + if (temac_setoptions(ndev, lp->options)) + dev_err(&ndev->dev, "Error setting TEMAC options\n"); + + /* Init Driver variable */ + ndev->trans_start = jiffies; /* prevent tx timeout */ +} + +void temac_adjust_link(struct net_device *ndev) +{ + struct temac_local *lp = netdev_priv(ndev); + struct phy_device *phy = lp->phy_dev; + u32 mii_speed; + int link_state; + + /* hash together the state values to decide if something has changed */ + link_state = phy->speed | (phy->duplex << 1) | phy->link; + + mutex_lock(&lp->indirect_mutex); + if (lp->last_link != link_state) { + mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET); + mii_speed &= ~XTE_EMCFG_LINKSPD_MASK; + + switch (phy->speed) { + case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break; + case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break; + case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break; + } + + /* Write new speed setting out to TEMAC */ + temac_indirect_out32(lp, XTE_EMCFG_OFFSET, mii_speed); + lp->last_link = link_state; + phy_print_status(phy); + } + mutex_unlock(&lp->indirect_mutex); +} + +static void temac_start_xmit_done(struct net_device *ndev) +{ + struct temac_local *lp = netdev_priv(ndev); + struct cdmac_bd *cur_p; + unsigned int stat = 0; + + cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; + stat = cur_p->app0; + + while (stat & STS_CTRL_APP0_CMPLT) { + dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len, + DMA_TO_DEVICE); + if (cur_p->app4) + dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); + cur_p->app0 = 0; + cur_p->app1 = 0; + cur_p->app2 = 0; + cur_p->app3 = 0; + cur_p->app4 = 0; + + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += cur_p->len; + + lp->tx_bd_ci++; + if (lp->tx_bd_ci >= TX_BD_NUM) + lp->tx_bd_ci = 0; + + cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; + stat = cur_p->app0; + } + + netif_wake_queue(ndev); +} + +static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag) +{ + struct cdmac_bd *cur_p; + int tail; + + tail = lp->tx_bd_tail; + cur_p = &lp->tx_bd_v[tail]; + + do { + if (cur_p->app0) + return NETDEV_TX_BUSY; + + tail++; + if (tail >= TX_BD_NUM) + tail = 0; + + cur_p = &lp->tx_bd_v[tail]; + num_frag--; + } while (num_frag >= 0); + + return 0; +} + +static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct temac_local *lp = netdev_priv(ndev); + struct cdmac_bd *cur_p; + dma_addr_t start_p, tail_p; + int ii; + unsigned long num_frag; + skb_frag_t *frag; + + num_frag = skb_shinfo(skb)->nr_frags; + frag = &skb_shinfo(skb)->frags[0]; + start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; + cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; + + if (temac_check_tx_bd_space(lp, num_frag)) { + if (!netif_queue_stopped(ndev)) { + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; + } + return NETDEV_TX_BUSY; + } + + cur_p->app0 = 0; + if (skb->ip_summed == CHECKSUM_PARTIAL) { + unsigned int csum_start_off = skb_checksum_start_offset(skb); + unsigned int csum_index_off = csum_start_off + skb->csum_offset; + + cur_p->app0 |= 1; /* TX Checksum Enabled */ + cur_p->app1 = (csum_start_off << 16) | csum_index_off; + cur_p->app2 = 0; /* initial checksum seed */ + } + + cur_p->app0 |= STS_CTRL_APP0_SOP; + cur_p->len = skb_headlen(skb); + cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + cur_p->app4 = (unsigned long)skb; + + for (ii = 0; ii < num_frag; ii++) { + lp->tx_bd_tail++; + if (lp->tx_bd_tail >= TX_BD_NUM) + lp->tx_bd_tail = 0; + + cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; + cur_p->phys = dma_map_single(ndev->dev.parent, + skb_frag_address(frag), + skb_frag_size(frag), DMA_TO_DEVICE); + cur_p->len = skb_frag_size(frag); + cur_p->app0 = 0; + frag++; + } + cur_p->app0 |= STS_CTRL_APP0_EOP; + + tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; + lp->tx_bd_tail++; + if (lp->tx_bd_tail >= TX_BD_NUM) + lp->tx_bd_tail = 0; + + skb_tx_timestamp(skb); + + /* Kick off the transfer */ + lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */ + + return NETDEV_TX_OK; +} + + +static void ll_temac_recv(struct net_device *ndev) +{ + struct temac_local *lp = netdev_priv(ndev); + struct sk_buff *skb, *new_skb; + unsigned int bdstat; + struct cdmac_bd *cur_p; + dma_addr_t tail_p; + int length; + unsigned long flags; + + spin_lock_irqsave(&lp->rx_lock, flags); + + tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; + cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; + + bdstat = cur_p->app0; + while ((bdstat & STS_CTRL_APP0_CMPLT)) { + + skb = lp->rx_skb[lp->rx_bd_ci]; + length = cur_p->app4 & 0x3FFF; + + dma_unmap_single(ndev->dev.parent, cur_p->phys, length, + DMA_FROM_DEVICE); + + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, ndev); + skb_checksum_none_assert(skb); + + /* if we're doing rx csum offload, set it up */ + if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) && + (skb->protocol == htons(ETH_P_IP)) && + (skb->len > 64)) { + + skb->csum = cur_p->app3 & 0xFFFF; + skb->ip_summed = CHECKSUM_COMPLETE; + } + + if (!skb_defer_rx_timestamp(skb)) + netif_rx(skb); + + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += length; + + new_skb = netdev_alloc_skb_ip_align(ndev, + XTE_MAX_JUMBO_FRAME_SIZE); + if (!new_skb) { + spin_unlock_irqrestore(&lp->rx_lock, flags); + return; + } + + cur_p->app0 = STS_CTRL_APP0_IRQONEND; + cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, + XTE_MAX_JUMBO_FRAME_SIZE, + DMA_FROM_DEVICE); + cur_p->len = XTE_MAX_JUMBO_FRAME_SIZE; + lp->rx_skb[lp->rx_bd_ci] = new_skb; + + lp->rx_bd_ci++; + if (lp->rx_bd_ci >= RX_BD_NUM) + lp->rx_bd_ci = 0; + + cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; + bdstat = cur_p->app0; + } + lp->dma_out(lp, RX_TAILDESC_PTR, tail_p); + + spin_unlock_irqrestore(&lp->rx_lock, flags); +} + +static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev) +{ + struct net_device *ndev = _ndev; + struct temac_local *lp = netdev_priv(ndev); + unsigned int status; + + status = lp->dma_in(lp, TX_IRQ_REG); + lp->dma_out(lp, TX_IRQ_REG, status); + + if (status & (IRQ_COAL | IRQ_DLY)) + temac_start_xmit_done(lp->ndev); + if (status & 0x080) + dev_err(&ndev->dev, "DMA error 0x%x\n", status); + + return IRQ_HANDLED; +} + +static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev) +{ + struct net_device *ndev = _ndev; + struct temac_local *lp = netdev_priv(ndev); + unsigned int status; + + /* Read and clear the status registers */ + status = lp->dma_in(lp, RX_IRQ_REG); + lp->dma_out(lp, RX_IRQ_REG, status); + + if (status & (IRQ_COAL | IRQ_DLY)) + ll_temac_recv(lp->ndev); + + return IRQ_HANDLED; +} + +static int temac_open(struct net_device *ndev) +{ + struct temac_local *lp = netdev_priv(ndev); + int rc; + + dev_dbg(&ndev->dev, "temac_open()\n"); + + if (lp->phy_node) { + lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, + temac_adjust_link, 0, 0); + if (!lp->phy_dev) { + dev_err(lp->dev, "of_phy_connect() failed\n"); + return -ENODEV; + } + + phy_start(lp->phy_dev); + } + + temac_device_reset(ndev); + + rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev); + if (rc) + goto err_tx_irq; + rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev); + if (rc) + goto err_rx_irq; + + return 0; + + err_rx_irq: + free_irq(lp->tx_irq, ndev); + err_tx_irq: + if (lp->phy_dev) + phy_disconnect(lp->phy_dev); + lp->phy_dev = NULL; + dev_err(lp->dev, "request_irq() failed\n"); + return rc; +} + +static int temac_stop(struct net_device *ndev) +{ + struct temac_local *lp = netdev_priv(ndev); + + dev_dbg(&ndev->dev, "temac_close()\n"); + + free_irq(lp->tx_irq, ndev); + free_irq(lp->rx_irq, ndev); + + if (lp->phy_dev) + phy_disconnect(lp->phy_dev); + lp->phy_dev = NULL; + + temac_dma_bd_release(ndev); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void +temac_poll_controller(struct net_device *ndev) +{ + struct temac_local *lp = netdev_priv(ndev); + + disable_irq(lp->tx_irq); + disable_irq(lp->rx_irq); + + ll_temac_rx_irq(lp->tx_irq, ndev); + ll_temac_tx_irq(lp->rx_irq, ndev); + + enable_irq(lp->tx_irq); + enable_irq(lp->rx_irq); +} +#endif + +static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) +{ + struct temac_local *lp = netdev_priv(ndev); + + if (!netif_running(ndev)) + return -EINVAL; + + if (!lp->phy_dev) + return -EINVAL; + + return phy_mii_ioctl(lp->phy_dev, rq, cmd); +} + +static const struct net_device_ops temac_netdev_ops = { + .ndo_open = temac_open, + .ndo_stop = temac_stop, + .ndo_start_xmit = temac_start_xmit, + .ndo_set_mac_address = temac_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = temac_ioctl, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = temac_poll_controller, +#endif +}; + +/* --------------------------------------------------------------------- + * SYSFS device attributes + */ +static ssize_t temac_show_llink_regs(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct temac_local *lp = netdev_priv(ndev); + int i, len = 0; + + for (i = 0; i < 0x11; i++) + len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i), + (i % 8) == 7 ? "\n" : " "); + len += sprintf(buf + len, "\n"); + + return len; +} + +static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL); + +static struct attribute *temac_device_attrs[] = { + &dev_attr_llink_regs.attr, + NULL, +}; + +static const struct attribute_group temac_attr_group = { + .attrs = temac_device_attrs, +}; + +/* ethtool support */ +static int temac_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) +{ + struct temac_local *lp = netdev_priv(ndev); + return phy_ethtool_gset(lp->phy_dev, cmd); +} + +static int temac_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) +{ + struct temac_local *lp = netdev_priv(ndev); + return phy_ethtool_sset(lp->phy_dev, cmd); +} + +static int temac_nway_reset(struct net_device *ndev) +{ + struct temac_local *lp = netdev_priv(ndev); + return phy_start_aneg(lp->phy_dev); +} + +static const struct ethtool_ops temac_ethtool_ops = { + .get_settings = temac_get_settings, + .set_settings = temac_set_settings, + .nway_reset = temac_nway_reset, + .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, +}; + +static int temac_of_probe(struct platform_device *op) +{ + struct device_node *np; + struct temac_local *lp; + struct net_device *ndev; + const void *addr; + __be32 *p; + int size, rc = 0; + + /* Init network device structure */ + ndev = alloc_etherdev(sizeof(*lp)); + if (!ndev) + return -ENOMEM; + + platform_set_drvdata(op, ndev); + SET_NETDEV_DEV(ndev, &op->dev); + ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ + ndev->features = NETIF_F_SG; + ndev->netdev_ops = &temac_netdev_ops; + ndev->ethtool_ops = &temac_ethtool_ops; +#if 0 + ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */ + ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */ + ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */ + ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; /* Transmit VLAN hw accel */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; /* Receive VLAN hw acceleration */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; /* Receive VLAN filtering */ + ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */ + ndev->features |= NETIF_F_GSO; /* Enable software GSO. */ + ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */ + ndev->features |= NETIF_F_LRO; /* large receive offload */ +#endif + + /* setup temac private info structure */ + lp = netdev_priv(ndev); + lp->ndev = ndev; + lp->dev = &op->dev; + lp->options = XTE_OPTION_DEFAULTS; + spin_lock_init(&lp->rx_lock); + mutex_init(&lp->indirect_mutex); + + /* map device registers */ + lp->regs = of_iomap(op->dev.of_node, 0); + if (!lp->regs) { + dev_err(&op->dev, "could not map temac regs.\n"); + rc = -ENOMEM; + goto nodev; + } + + /* Setup checksum offload, but default to off if not specified */ + lp->temac_features = 0; + p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL); + if (p && be32_to_cpu(*p)) { + lp->temac_features |= TEMAC_FEATURE_TX_CSUM; + /* Can checksum TCP/UDP over IPv4. */ + ndev->features |= NETIF_F_IP_CSUM; + } + p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL); + if (p && be32_to_cpu(*p)) + lp->temac_features |= TEMAC_FEATURE_RX_CSUM; + + /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ + np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); + if (!np) { + dev_err(&op->dev, "could not find DMA node\n"); + rc = -ENODEV; + goto err_iounmap; + } + + /* Setup the DMA register accesses, could be DCR or memory mapped */ + if (temac_dcr_setup(lp, op, np)) { + + /* no DCR in the device tree, try non-DCR */ + lp->sdma_regs = of_iomap(np, 0); + if (lp->sdma_regs) { + lp->dma_in = temac_dma_in32; + lp->dma_out = temac_dma_out32; + dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs); + } else { + dev_err(&op->dev, "unable to map DMA registers\n"); + of_node_put(np); + goto err_iounmap; + } + } + + lp->rx_irq = irq_of_parse_and_map(np, 0); + lp->tx_irq = irq_of_parse_and_map(np, 1); + + of_node_put(np); /* Finished with the DMA node; drop the reference */ + + if (!lp->rx_irq || !lp->tx_irq) { + dev_err(&op->dev, "could not determine irqs\n"); + rc = -ENOMEM; + goto err_iounmap_2; + } + + + /* Retrieve the MAC address */ + addr = of_get_property(op->dev.of_node, "local-mac-address", &size); + if ((!addr) || (size != 6)) { + dev_err(&op->dev, "could not find MAC address\n"); + rc = -ENODEV; + goto err_iounmap_2; + } + temac_init_mac_address(ndev, (void *)addr); + + rc = temac_mdio_setup(lp, op->dev.of_node); + if (rc) + dev_warn(&op->dev, "error registering MDIO bus\n"); + + lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0); + if (lp->phy_node) + dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np); + + /* Add the device attributes */ + rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group); + if (rc) { + dev_err(lp->dev, "Error creating sysfs files\n"); + goto err_iounmap_2; + } + + rc = register_netdev(lp->ndev); + if (rc) { + dev_err(lp->dev, "register_netdev() error (%i)\n", rc); + goto err_register_ndev; + } + + return 0; + + err_register_ndev: + sysfs_remove_group(&lp->dev->kobj, &temac_attr_group); + err_iounmap_2: + if (lp->sdma_regs) + iounmap(lp->sdma_regs); + err_iounmap: + iounmap(lp->regs); + nodev: + free_netdev(ndev); + ndev = NULL; + return rc; +} + +static int temac_of_remove(struct platform_device *op) +{ + struct net_device *ndev = platform_get_drvdata(op); + struct temac_local *lp = netdev_priv(ndev); + + temac_mdio_teardown(lp); + unregister_netdev(ndev); + sysfs_remove_group(&lp->dev->kobj, &temac_attr_group); + of_node_put(lp->phy_node); + lp->phy_node = NULL; + iounmap(lp->regs); + if (lp->sdma_regs) + iounmap(lp->sdma_regs); + free_netdev(ndev); + return 0; +} + +static const struct of_device_id temac_of_match[] = { + { .compatible = "xlnx,xps-ll-temac-1.01.b", }, + { .compatible = "xlnx,xps-ll-temac-2.00.a", }, + { .compatible = "xlnx,xps-ll-temac-2.02.a", }, + { .compatible = "xlnx,xps-ll-temac-2.03.a", }, + {}, +}; +MODULE_DEVICE_TABLE(of, temac_of_match); + +static struct platform_driver temac_of_driver = { + .probe = temac_of_probe, + .remove = temac_of_remove, + .driver = { + .name = "xilinx_temac", + .of_match_table = temac_of_match, + }, +}; + +module_platform_driver(temac_of_driver); + +MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver"); +MODULE_AUTHOR("Yoshio Kashiwagi"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c new file mode 100644 index 000000000..8cf9d4f56 --- /dev/null +++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c @@ -0,0 +1,122 @@ +/* + * MDIO bus driver for the Xilinx TEMAC device + * + * Copyright (c) 2009 Secret Lab Technologies, Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ll_temac.h" + +/* --------------------------------------------------------------------- + * MDIO Bus functions + */ +static int temac_mdio_read(struct mii_bus *bus, int phy_id, int reg) +{ + struct temac_local *lp = bus->priv; + u32 rc; + + /* Write the PHY address to the MIIM Access Initiator register. + * When the transfer completes, the PHY register value will appear + * in the LSW0 register */ + mutex_lock(&lp->indirect_mutex); + temac_iow(lp, XTE_LSW0_OFFSET, (phy_id << 5) | reg); + rc = temac_indirect_in32(lp, XTE_MIIMAI_OFFSET); + mutex_unlock(&lp->indirect_mutex); + + dev_dbg(lp->dev, "temac_mdio_read(phy_id=%i, reg=%x) == %x\n", + phy_id, reg, rc); + + return rc; +} + +static int temac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) +{ + struct temac_local *lp = bus->priv; + + dev_dbg(lp->dev, "temac_mdio_write(phy_id=%i, reg=%x, val=%x)\n", + phy_id, reg, val); + + /* First write the desired value into the write data register + * and then write the address into the access initiator register + */ + mutex_lock(&lp->indirect_mutex); + temac_indirect_out32(lp, XTE_MGTDR_OFFSET, val); + temac_indirect_out32(lp, XTE_MIIMAI_OFFSET, (phy_id << 5) | reg); + mutex_unlock(&lp->indirect_mutex); + + return 0; +} + +int temac_mdio_setup(struct temac_local *lp, struct device_node *np) +{ + struct mii_bus *bus; + const u32 *bus_hz; + int clk_div; + int rc, size; + struct resource res; + + /* Calculate a reasonable divisor for the clock rate */ + clk_div = 0x3f; /* worst-case default setting */ + bus_hz = of_get_property(np, "clock-frequency", &size); + if (bus_hz && size >= sizeof(*bus_hz)) { + clk_div = (*bus_hz) / (2500 * 1000 * 2) - 1; + if (clk_div < 1) + clk_div = 1; + if (clk_div > 0x3f) + clk_div = 0x3f; + } + + /* Enable the MDIO bus by asserting the enable bit and writing + * in the clock config */ + mutex_lock(&lp->indirect_mutex); + temac_indirect_out32(lp, XTE_MC_OFFSET, 1 << 6 | clk_div); + mutex_unlock(&lp->indirect_mutex); + + bus = mdiobus_alloc(); + if (!bus) + return -ENOMEM; + + of_address_to_resource(np, 0, &res); + snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx", + (unsigned long long)res.start); + bus->priv = lp; + bus->name = "Xilinx TEMAC MDIO"; + bus->read = temac_mdio_read; + bus->write = temac_mdio_write; + bus->parent = lp->dev; + bus->irq = lp->mdio_irqs; /* preallocated IRQ table */ + + lp->mii_bus = bus; + + rc = of_mdiobus_register(bus, np); + if (rc) + goto err_register; + + mutex_lock(&lp->indirect_mutex); + dev_dbg(lp->dev, "MDIO bus registered; MC:%x\n", + temac_indirect_in32(lp, XTE_MC_OFFSET)); + mutex_unlock(&lp->indirect_mutex); + return 0; + + err_register: + mdiobus_free(bus); + return rc; +} + +void temac_mdio_teardown(struct temac_local *lp) +{ + mdiobus_unregister(lp->mii_bus); + kfree(lp->mii_bus->irq); + mdiobus_free(lp->mii_bus); + lp->mii_bus = NULL; +} + diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h new file mode 100644 index 000000000..4c9b4fa1d --- /dev/null +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -0,0 +1,504 @@ +/* + * Definitions for Xilinx Axi Ethernet device driver. + * + * Copyright (c) 2009 Secret Lab Technologies, Ltd. + * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. + */ + +#ifndef XILINX_AXIENET_H +#define XILINX_AXIENET_H + +#include +#include +#include + +/* Packet size info */ +#define XAE_HDR_SIZE 14 /* Size of Ethernet header */ +#define XAE_HDR_VLAN_SIZE 18 /* Size of an Ethernet hdr + VLAN */ +#define XAE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */ +#define XAE_MTU 1500 /* Max MTU of an Ethernet frame */ +#define XAE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */ + +#define XAE_MAX_FRAME_SIZE (XAE_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE) +#define XAE_MAX_VLAN_FRAME_SIZE (XAE_MTU + XAE_HDR_VLAN_SIZE + XAE_TRL_SIZE) +#define XAE_MAX_JUMBO_FRAME_SIZE (XAE_JUMBO_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE) + +/* Configuration options */ + +/* Accept all incoming packets. Default: disabled (cleared) */ +#define XAE_OPTION_PROMISC (1 << 0) + +/* Jumbo frame support for Tx & Rx. Default: disabled (cleared) */ +#define XAE_OPTION_JUMBO (1 << 1) + +/* VLAN Rx & Tx frame support. Default: disabled (cleared) */ +#define XAE_OPTION_VLAN (1 << 2) + +/* Enable recognition of flow control frames on Rx. Default: enabled (set) */ +#define XAE_OPTION_FLOW_CONTROL (1 << 4) + +/* Strip FCS and PAD from incoming frames. Note: PAD from VLAN frames is not + * stripped. Default: disabled (set) */ +#define XAE_OPTION_FCS_STRIP (1 << 5) + +/* Generate FCS field and add PAD automatically for outgoing frames. + * Default: enabled (set) */ +#define XAE_OPTION_FCS_INSERT (1 << 6) + +/* Enable Length/Type error checking for incoming frames. When this option is + * set, the MAC will filter frames that have a mismatched type/length field + * and if XAE_OPTION_REPORT_RXERR is set, the user is notified when these + * types of frames are encountered. When this option is cleared, the MAC will + * allow these types of frames to be received. Default: enabled (set) */ +#define XAE_OPTION_LENTYPE_ERR (1 << 7) + +/* Enable the transmitter. Default: enabled (set) */ +#define XAE_OPTION_TXEN (1 << 11) + +/* Enable the receiver. Default: enabled (set) */ +#define XAE_OPTION_RXEN (1 << 12) + +/* Default options set when device is initialized or reset */ +#define XAE_OPTION_DEFAULTS \ + (XAE_OPTION_TXEN | \ + XAE_OPTION_FLOW_CONTROL | \ + XAE_OPTION_RXEN) + +/* Axi DMA Register definitions */ + +#define XAXIDMA_TX_CR_OFFSET 0x00000000 /* Channel control */ +#define XAXIDMA_TX_SR_OFFSET 0x00000004 /* Status */ +#define XAXIDMA_TX_CDESC_OFFSET 0x00000008 /* Current descriptor pointer */ +#define XAXIDMA_TX_TDESC_OFFSET 0x00000010 /* Tail descriptor pointer */ + +#define XAXIDMA_RX_CR_OFFSET 0x00000030 /* Channel control */ +#define XAXIDMA_RX_SR_OFFSET 0x00000034 /* Status */ +#define XAXIDMA_RX_CDESC_OFFSET 0x00000038 /* Current descriptor pointer */ +#define XAXIDMA_RX_TDESC_OFFSET 0x00000040 /* Tail descriptor pointer */ + +#define XAXIDMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA channel */ +#define XAXIDMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */ + +#define XAXIDMA_BD_NDESC_OFFSET 0x00 /* Next descriptor pointer */ +#define XAXIDMA_BD_BUFA_OFFSET 0x08 /* Buffer address */ +#define XAXIDMA_BD_CTRL_LEN_OFFSET 0x18 /* Control/buffer length */ +#define XAXIDMA_BD_STS_OFFSET 0x1C /* Status */ +#define XAXIDMA_BD_USR0_OFFSET 0x20 /* User IP specific word0 */ +#define XAXIDMA_BD_USR1_OFFSET 0x24 /* User IP specific word1 */ +#define XAXIDMA_BD_USR2_OFFSET 0x28 /* User IP specific word2 */ +#define XAXIDMA_BD_USR3_OFFSET 0x2C /* User IP specific word3 */ +#define XAXIDMA_BD_USR4_OFFSET 0x30 /* User IP specific word4 */ +#define XAXIDMA_BD_ID_OFFSET 0x34 /* Sw ID */ +#define XAXIDMA_BD_HAS_STSCNTRL_OFFSET 0x38 /* Whether has stscntrl strm */ +#define XAXIDMA_BD_HAS_DRE_OFFSET 0x3C /* Whether has DRE */ + +#define XAXIDMA_BD_HAS_DRE_SHIFT 8 /* Whether has DRE shift */ +#define XAXIDMA_BD_HAS_DRE_MASK 0xF00 /* Whether has DRE mask */ +#define XAXIDMA_BD_WORDLEN_MASK 0xFF /* Whether has DRE mask */ + +#define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF /* Requested len */ +#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */ +#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */ +#define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */ + +#define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */ +#define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */ + +#define XAXIDMA_DELAY_SHIFT 24 +#define XAXIDMA_COALESCE_SHIFT 16 + +#define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */ +#define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */ +#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */ +#define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */ + +/* Default TX/RX Threshold and waitbound values for SGDMA mode */ +#define XAXIDMA_DFT_TX_THRESHOLD 24 +#define XAXIDMA_DFT_TX_WAITBOUND 254 +#define XAXIDMA_DFT_RX_THRESHOLD 24 +#define XAXIDMA_DFT_RX_WAITBOUND 254 + +#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */ +#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */ +#define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */ + +#define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */ +#define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000 /* Completed */ +#define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000 /* Decode error */ +#define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000 /* Slave error */ +#define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000 /* Internal err */ +#define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000 /* All errors */ +#define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000 /* First rx pkt */ +#define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000 /* Last rx pkt */ +#define XAXIDMA_BD_STS_ALL_MASK 0xFC000000 /* All status bits */ + +#define XAXIDMA_BD_MINIMUM_ALIGNMENT 0x40 + +/* Axi Ethernet registers definition */ +#define XAE_RAF_OFFSET 0x00000000 /* Reset and Address filter */ +#define XAE_TPF_OFFSET 0x00000004 /* Tx Pause Frame */ +#define XAE_IFGP_OFFSET 0x00000008 /* Tx Inter-frame gap adjustment*/ +#define XAE_IS_OFFSET 0x0000000C /* Interrupt status */ +#define XAE_IP_OFFSET 0x00000010 /* Interrupt pending */ +#define XAE_IE_OFFSET 0x00000014 /* Interrupt enable */ +#define XAE_TTAG_OFFSET 0x00000018 /* Tx VLAN TAG */ +#define XAE_RTAG_OFFSET 0x0000001C /* Rx VLAN TAG */ +#define XAE_UAWL_OFFSET 0x00000020 /* Unicast address word lower */ +#define XAE_UAWU_OFFSET 0x00000024 /* Unicast address word upper */ +#define XAE_TPID0_OFFSET 0x00000028 /* VLAN TPID0 register */ +#define XAE_TPID1_OFFSET 0x0000002C /* VLAN TPID1 register */ +#define XAE_PPST_OFFSET 0x00000030 /* PCS PMA Soft Temac Status Reg */ +#define XAE_RCW0_OFFSET 0x00000400 /* Rx Configuration Word 0 */ +#define XAE_RCW1_OFFSET 0x00000404 /* Rx Configuration Word 1 */ +#define XAE_TC_OFFSET 0x00000408 /* Tx Configuration */ +#define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */ +#define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */ +#define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */ +#define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */ +#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */ +#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */ +#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */ +#define XAE_MDIO_MIS_OFFSET 0x00000600 /* MII Management Interrupt Status */ +#define XAE_MDIO_MIP_OFFSET 0x00000620 /* MII Mgmt Interrupt Pending + * register offset */ +#define XAE_MDIO_MIE_OFFSET 0x00000640 /* MII Management Interrupt Enable + * register offset */ +#define XAE_MDIO_MIC_OFFSET 0x00000660 /* MII Management Interrupt Clear + * register offset. */ +#define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */ +#define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */ +#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */ +#define XAE_AF0_OFFSET 0x00000710 /* Address Filter 0 */ +#define XAE_AF1_OFFSET 0x00000714 /* Address Filter 1 */ + +#define XAE_TX_VLAN_DATA_OFFSET 0x00004000 /* TX VLAN data table address */ +#define XAE_RX_VLAN_DATA_OFFSET 0x00008000 /* RX VLAN data table address */ +#define XAE_MCAST_TABLE_OFFSET 0x00020000 /* Multicast table address */ + +/* Bit Masks for Axi Ethernet RAF register */ +#define XAE_RAF_MCSTREJ_MASK 0x00000002 /* Reject receive multicast + * destination address */ +#define XAE_RAF_BCSTREJ_MASK 0x00000004 /* Reject receive broadcast + * destination address */ +#define XAE_RAF_TXVTAGMODE_MASK 0x00000018 /* Tx VLAN TAG mode */ +#define XAE_RAF_RXVTAGMODE_MASK 0x00000060 /* Rx VLAN TAG mode */ +#define XAE_RAF_TXVSTRPMODE_MASK 0x00000180 /* Tx VLAN STRIP mode */ +#define XAE_RAF_RXVSTRPMODE_MASK 0x00000600 /* Rx VLAN STRIP mode */ +#define XAE_RAF_NEWFNCENBL_MASK 0x00000800 /* New function mode */ +#define XAE_RAF_EMULTIFLTRENBL_MASK 0x00001000 /* Exteneded Multicast + * Filtering mode + */ +#define XAE_RAF_STATSRST_MASK 0x00002000 /* Stats. Counter Reset */ +#define XAE_RAF_RXBADFRMEN_MASK 0x00004000 /* Recv Bad Frame Enable */ +#define XAE_RAF_TXVTAGMODE_SHIFT 3 /* Tx Tag mode shift bits */ +#define XAE_RAF_RXVTAGMODE_SHIFT 5 /* Rx Tag mode shift bits */ +#define XAE_RAF_TXVSTRPMODE_SHIFT 7 /* Tx strip mode shift bits*/ +#define XAE_RAF_RXVSTRPMODE_SHIFT 9 /* Rx Strip mode shift bits*/ + +/* Bit Masks for Axi Ethernet TPF and IFGP registers */ +#define XAE_TPF_TPFV_MASK 0x0000FFFF /* Tx pause frame value */ +#define XAE_IFGP0_IFGP_MASK 0x0000007F /* Transmit inter-frame + * gap adjustment value */ + +/* Bit Masks for Axi Ethernet IS, IE and IP registers, Same masks apply + * for all 3 registers. */ +#define XAE_INT_HARDACSCMPLT_MASK 0x00000001 /* Hard register access + * complete */ +#define XAE_INT_AUTONEG_MASK 0x00000002 /* Auto negotiation + * complete */ +#define XAE_INT_RXCMPIT_MASK 0x00000004 /* Rx complete */ +#define XAE_INT_RXRJECT_MASK 0x00000008 /* Rx frame rejected */ +#define XAE_INT_RXFIFOOVR_MASK 0x00000010 /* Rx fifo overrun */ +#define XAE_INT_TXCMPIT_MASK 0x00000020 /* Tx complete */ +#define XAE_INT_RXDCMLOCK_MASK 0x00000040 /* Rx Dcm Lock */ +#define XAE_INT_MGTRDY_MASK 0x00000080 /* MGT clock Lock */ +#define XAE_INT_PHYRSTCMPLT_MASK 0x00000100 /* Phy Reset complete */ +#define XAE_INT_ALL_MASK 0x0000003F /* All the ints */ + +#define XAE_INT_RECV_ERROR_MASK \ + (XAE_INT_RXRJECT_MASK | XAE_INT_RXFIFOOVR_MASK) /* INT bits that + * indicate receive + * errors */ + +/* Bit masks for Axi Ethernet VLAN TPID Word 0 register */ +#define XAE_TPID_0_MASK 0x0000FFFF /* TPID 0 */ +#define XAE_TPID_1_MASK 0xFFFF0000 /* TPID 1 */ + +/* Bit masks for Axi Ethernet VLAN TPID Word 1 register */ +#define XAE_TPID_2_MASK 0x0000FFFF /* TPID 0 */ +#define XAE_TPID_3_MASK 0xFFFF0000 /* TPID 1 */ + +/* Bit masks for Axi Ethernet RCW1 register */ +#define XAE_RCW1_RST_MASK 0x80000000 /* Reset */ +#define XAE_RCW1_JUM_MASK 0x40000000 /* Jumbo frame enable */ +#define XAE_RCW1_FCS_MASK 0x20000000 /* In-Band FCS enable + * (FCS not stripped) */ +#define XAE_RCW1_RX_MASK 0x10000000 /* Receiver enable */ +#define XAE_RCW1_VLAN_MASK 0x08000000 /* VLAN frame enable */ +#define XAE_RCW1_LT_DIS_MASK 0x02000000 /* Length/type field valid check + * disable */ +#define XAE_RCW1_CL_DIS_MASK 0x01000000 /* Control frame Length check + * disable */ +#define XAE_RCW1_PAUSEADDR_MASK 0x0000FFFF /* Pause frame source address + * bits [47:32]. Bits [31:0] are + * stored in register RCW0 */ + +/* Bit masks for Axi Ethernet TC register */ +#define XAE_TC_RST_MASK 0x80000000 /* Reset */ +#define XAE_TC_JUM_MASK 0x40000000 /* Jumbo frame enable */ +#define XAE_TC_FCS_MASK 0x20000000 /* In-Band FCS enable + * (FCS not generated) */ +#define XAE_TC_TX_MASK 0x10000000 /* Transmitter enable */ +#define XAE_TC_VLAN_MASK 0x08000000 /* VLAN frame enable */ +#define XAE_TC_IFG_MASK 0x02000000 /* Inter-frame gap adjustment + * enable */ + +/* Bit masks for Axi Ethernet FCC register */ +#define XAE_FCC_FCRX_MASK 0x20000000 /* Rx flow control enable */ +#define XAE_FCC_FCTX_MASK 0x40000000 /* Tx flow control enable */ + +/* Bit masks for Axi Ethernet EMMC register */ +#define XAE_EMMC_LINKSPEED_MASK 0xC0000000 /* Link speed */ +#define XAE_EMMC_RGMII_MASK 0x20000000 /* RGMII mode enable */ +#define XAE_EMMC_SGMII_MASK 0x10000000 /* SGMII mode enable */ +#define XAE_EMMC_GPCS_MASK 0x08000000 /* 1000BaseX mode enable */ +#define XAE_EMMC_HOST_MASK 0x04000000 /* Host interface enable */ +#define XAE_EMMC_TX16BIT 0x02000000 /* 16 bit Tx client enable */ +#define XAE_EMMC_RX16BIT 0x01000000 /* 16 bit Rx client enable */ +#define XAE_EMMC_LINKSPD_10 0x00000000 /* Link Speed mask for 10 Mbit */ +#define XAE_EMMC_LINKSPD_100 0x40000000 /* Link Speed mask for 100 Mbit */ +#define XAE_EMMC_LINKSPD_1000 0x80000000 /* Link Speed mask for 1000 Mbit */ + +/* Bit masks for Axi Ethernet PHYC register */ +#define XAE_PHYC_SGMIILINKSPEED_MASK 0xC0000000 /* SGMII link speed mask*/ +#define XAE_PHYC_RGMIILINKSPEED_MASK 0x0000000C /* RGMII link speed */ +#define XAE_PHYC_RGMIIHD_MASK 0x00000002 /* RGMII Half-duplex */ +#define XAE_PHYC_RGMIILINK_MASK 0x00000001 /* RGMII link status */ +#define XAE_PHYC_RGLINKSPD_10 0x00000000 /* RGMII link 10 Mbit */ +#define XAE_PHYC_RGLINKSPD_100 0x00000004 /* RGMII link 100 Mbit */ +#define XAE_PHYC_RGLINKSPD_1000 0x00000008 /* RGMII link 1000 Mbit */ +#define XAE_PHYC_SGLINKSPD_10 0x00000000 /* SGMII link 10 Mbit */ +#define XAE_PHYC_SGLINKSPD_100 0x40000000 /* SGMII link 100 Mbit */ +#define XAE_PHYC_SGLINKSPD_1000 0x80000000 /* SGMII link 1000 Mbit */ + +/* Bit masks for Axi Ethernet MDIO interface MC register */ +#define XAE_MDIO_MC_MDIOEN_MASK 0x00000040 /* MII management enable */ +#define XAE_MDIO_MC_CLOCK_DIVIDE_MAX 0x3F /* Maximum MDIO divisor */ + +/* Bit masks for Axi Ethernet MDIO interface MCR register */ +#define XAE_MDIO_MCR_PHYAD_MASK 0x1F000000 /* Phy Address Mask */ +#define XAE_MDIO_MCR_PHYAD_SHIFT 24 /* Phy Address Shift */ +#define XAE_MDIO_MCR_REGAD_MASK 0x001F0000 /* Reg Address Mask */ +#define XAE_MDIO_MCR_REGAD_SHIFT 16 /* Reg Address Shift */ +#define XAE_MDIO_MCR_OP_MASK 0x0000C000 /* Operation Code Mask */ +#define XAE_MDIO_MCR_OP_SHIFT 13 /* Operation Code Shift */ +#define XAE_MDIO_MCR_OP_READ_MASK 0x00008000 /* Op Code Read Mask */ +#define XAE_MDIO_MCR_OP_WRITE_MASK 0x00004000 /* Op Code Write Mask */ +#define XAE_MDIO_MCR_INITIATE_MASK 0x00000800 /* Ready Mask */ +#define XAE_MDIO_MCR_READY_MASK 0x00000080 /* Ready Mask */ + +/* Bit masks for Axi Ethernet MDIO interface MIS, MIP, MIE, MIC registers */ +#define XAE_MDIO_INT_MIIM_RDY_MASK 0x00000001 /* MIIM Interrupt */ + +/* Bit masks for Axi Ethernet UAW1 register */ +#define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF /* Station address bits + * [47:32]; Station address + * bits [31:0] are stored in + * register UAW0 */ + +/* Bit masks for Axi Ethernet FMI register */ +#define XAE_FMI_PM_MASK 0x80000000 /* Promis. mode enable */ +#define XAE_FMI_IND_MASK 0x00000003 /* Index Mask */ + +#define XAE_MDIO_DIV_DFT 29 /* Default MDIO clock divisor */ + +/* Defines for different options for C_PHY_TYPE parameter in Axi Ethernet IP */ +#define XAE_PHY_TYPE_MII 0 +#define XAE_PHY_TYPE_GMII 1 +#define XAE_PHY_TYPE_RGMII_1_3 2 +#define XAE_PHY_TYPE_RGMII_2_0 3 +#define XAE_PHY_TYPE_SGMII 4 +#define XAE_PHY_TYPE_1000BASE_X 5 + +#define XAE_MULTICAST_CAM_TABLE_NUM 4 /* Total number of entries in the + * hardware multicast table. */ + +/* Axi Ethernet Synthesis features */ +#define XAE_FEATURE_PARTIAL_RX_CSUM (1 << 0) +#define XAE_FEATURE_PARTIAL_TX_CSUM (1 << 1) +#define XAE_FEATURE_FULL_RX_CSUM (1 << 2) +#define XAE_FEATURE_FULL_TX_CSUM (1 << 3) + +#define XAE_NO_CSUM_OFFLOAD 0 + +#define XAE_FULL_CSUM_STATUS_MASK 0x00000038 +#define XAE_IP_UDP_CSUM_VALIDATED 0x00000003 +#define XAE_IP_TCP_CSUM_VALIDATED 0x00000002 + +#define DELAY_OF_ONE_MILLISEC 1000 + +/** + * struct axidma_bd - Axi Dma buffer descriptor layout + * @next: MM2S/S2MM Next Descriptor Pointer + * @reserved1: Reserved and not used + * @phys: MM2S/S2MM Buffer Address + * @reserved2: Reserved and not used + * @reserved3: Reserved and not used + * @reserved4: Reserved and not used + * @cntrl: MM2S/S2MM Control value + * @status: MM2S/S2MM Status value + * @app0: MM2S/S2MM User Application Field 0. + * @app1: MM2S/S2MM User Application Field 1. + * @app2: MM2S/S2MM User Application Field 2. + * @app3: MM2S/S2MM User Application Field 3. + * @app4: MM2S/S2MM User Application Field 4. + * @sw_id_offset: MM2S/S2MM Sw ID + * @reserved5: Reserved and not used + * @reserved6: Reserved and not used + */ +struct axidma_bd { + u32 next; /* Physical address of next buffer descriptor */ + u32 reserved1; + u32 phys; + u32 reserved2; + u32 reserved3; + u32 reserved4; + u32 cntrl; + u32 status; + u32 app0; + u32 app1; /* TX start << 16 | insert */ + u32 app2; /* TX csum seed */ + u32 app3; + u32 app4; + u32 sw_id_offset; + u32 reserved5; + u32 reserved6; +}; + +/** + * struct axienet_local - axienet private per device data + * @ndev: Pointer for net_device to which it will be attached. + * @dev: Pointer to device structure + * @phy_dev: Pointer to PHY device structure attached to the axienet_local + * @phy_node: Pointer to device node structure + * @mii_bus: Pointer to MII bus structure + * @mdio_irqs: IRQs table for MDIO bus required in mii_bus structure + * @regs: Base address for the axienet_local device address space + * @dma_regs: Base address for the axidma device address space + * @dma_err_tasklet: Tasklet structure to process Axi DMA errors + * @tx_irq: Axidma TX IRQ number + * @rx_irq: Axidma RX IRQ number + * @phy_type: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X + * @options: AxiEthernet option word + * @last_link: Phy link state in which the PHY was negotiated earlier + * @features: Stores the extended features supported by the axienet hw + * @tx_bd_v: Virtual address of the TX buffer descriptor ring + * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring + * @rx_bd_v: Virtual address of the RX buffer descriptor ring + * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring + * @tx_bd_ci: Stores the index of the Tx buffer descriptor in the ring being + * accessed currently. Used while alloc. BDs before a TX starts + * @tx_bd_tail: Stores the index of the Tx buffer descriptor in the ring being + * accessed currently. Used while processing BDs after the TX + * completed. + * @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being + * accessed currently. + * @max_frm_size: Stores the maximum size of the frame that can be that + * Txed/Rxed in the existing hardware. If jumbo option is + * supported, the maximum frame size would be 9k. Else it is + * 1522 bytes (assuming support for basic VLAN) + * @jumbo_support: Stores hardware configuration for jumbo support. If hardware + * can handle jumbo packets, this entry will be 1, else 0. + */ +struct axienet_local { + struct net_device *ndev; + struct device *dev; + + /* Connection to PHY device */ + struct phy_device *phy_dev; /* Pointer to PHY device */ + struct device_node *phy_node; + + /* MDIO bus data */ + struct mii_bus *mii_bus; /* MII bus reference */ + int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */ + + /* IO registers, dma functions and IRQs */ + void __iomem *regs; + void __iomem *dma_regs; + + struct tasklet_struct dma_err_tasklet; + + int tx_irq; + int rx_irq; + u32 phy_type; + + u32 options; /* Current options word */ + u32 last_link; + u32 features; + + /* Buffer descriptors */ + struct axidma_bd *tx_bd_v; + dma_addr_t tx_bd_p; + struct axidma_bd *rx_bd_v; + dma_addr_t rx_bd_p; + u32 tx_bd_ci; + u32 tx_bd_tail; + u32 rx_bd_ci; + + u32 max_frm_size; + u32 jumbo_support; + + int csum_offload_on_tx_path; + int csum_offload_on_rx_path; + + u32 coalesce_count_rx; + u32 coalesce_count_tx; +}; + +/** + * struct axiethernet_option - Used to set axi ethernet hardware options + * @opt: Option to be set. + * @reg: Register offset to be written for setting the option + * @m_or: Mask to be ORed for setting the option in the register + */ +struct axienet_option { + u32 opt; + u32 reg; + u32 m_or; +}; + +/** + * axienet_ior - Memory mapped Axi Ethernet register read + * @lp: Pointer to axienet local structure + * @offset: Address offset from the base address of Axi Ethernet core + * + * returns: The contents of the Axi Ethernet register + * + * This function returns the contents of the corresponding register. + */ +static inline u32 axienet_ior(struct axienet_local *lp, off_t offset) +{ + return in_be32(lp->regs + offset); +} + +/** + * axienet_iow - Memory mapped Axi Ethernet register write + * @lp: Pointer to axienet local structure + * @offset: Address offset from the base address of Axi Ethernet core + * @value: Value to be written into the Axi Ethernet register + * + * This function writes the desired value into the corresponding Axi Ethernet + * register. + */ +static inline void axienet_iow(struct axienet_local *lp, off_t offset, + u32 value) +{ + out_be32((lp->regs + offset), value); +} + +/* Function prototypes visible in xilinx_axienet_mdio.c for other files */ +int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np); +int axienet_mdio_wait_until_ready(struct axienet_local *lp); +void axienet_mdio_teardown(struct axienet_local *lp); + +#endif /* XILINX_AXI_ENET_H */ diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c new file mode 100644 index 000000000..28b7e7d9c --- /dev/null +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -0,0 +1,1654 @@ +/* + * Xilinx Axi Ethernet device driver + * + * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi + * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. + * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. + * Copyright (c) 2010 - 2011 Michal Simek + * Copyright (c) 2010 - 2011 PetaLogix + * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. + * + * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 + * and Spartan6. + * + * TODO: + * - Add Axi Fifo support. + * - Factor out Axi DMA code into separate driver. + * - Test and fix basic multicast filtering. + * - Add support for extended multicast filtering. + * - Test basic VLAN support. + * - Add support for extended VLAN support. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "xilinx_axienet.h" + +/* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */ +#define TX_BD_NUM 64 +#define RX_BD_NUM 128 + +/* Must be shorter than length of ethtool_drvinfo.driver field to fit */ +#define DRIVER_NAME "xaxienet" +#define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" +#define DRIVER_VERSION "1.00a" + +#define AXIENET_REGS_N 32 + +/* Match table for of_platform binding */ +static const struct of_device_id axienet_of_match[] = { + { .compatible = "xlnx,axi-ethernet-1.00.a", }, + { .compatible = "xlnx,axi-ethernet-1.01.a", }, + { .compatible = "xlnx,axi-ethernet-2.01.a", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, axienet_of_match); + +/* Option table for setting up Axi Ethernet hardware options */ +static struct axienet_option axienet_options[] = { + /* Turn on jumbo packet support for both Rx and Tx */ + { + .opt = XAE_OPTION_JUMBO, + .reg = XAE_TC_OFFSET, + .m_or = XAE_TC_JUM_MASK, + }, { + .opt = XAE_OPTION_JUMBO, + .reg = XAE_RCW1_OFFSET, + .m_or = XAE_RCW1_JUM_MASK, + }, { /* Turn on VLAN packet support for both Rx and Tx */ + .opt = XAE_OPTION_VLAN, + .reg = XAE_TC_OFFSET, + .m_or = XAE_TC_VLAN_MASK, + }, { + .opt = XAE_OPTION_VLAN, + .reg = XAE_RCW1_OFFSET, + .m_or = XAE_RCW1_VLAN_MASK, + }, { /* Turn on FCS stripping on receive packets */ + .opt = XAE_OPTION_FCS_STRIP, + .reg = XAE_RCW1_OFFSET, + .m_or = XAE_RCW1_FCS_MASK, + }, { /* Turn on FCS insertion on transmit packets */ + .opt = XAE_OPTION_FCS_INSERT, + .reg = XAE_TC_OFFSET, + .m_or = XAE_TC_FCS_MASK, + }, { /* Turn off length/type field checking on receive packets */ + .opt = XAE_OPTION_LENTYPE_ERR, + .reg = XAE_RCW1_OFFSET, + .m_or = XAE_RCW1_LT_DIS_MASK, + }, { /* Turn on Rx flow control */ + .opt = XAE_OPTION_FLOW_CONTROL, + .reg = XAE_FCC_OFFSET, + .m_or = XAE_FCC_FCRX_MASK, + }, { /* Turn on Tx flow control */ + .opt = XAE_OPTION_FLOW_CONTROL, + .reg = XAE_FCC_OFFSET, + .m_or = XAE_FCC_FCTX_MASK, + }, { /* Turn on promiscuous frame filtering */ + .opt = XAE_OPTION_PROMISC, + .reg = XAE_FMI_OFFSET, + .m_or = XAE_FMI_PM_MASK, + }, { /* Enable transmitter */ + .opt = XAE_OPTION_TXEN, + .reg = XAE_TC_OFFSET, + .m_or = XAE_TC_TX_MASK, + }, { /* Enable receiver */ + .opt = XAE_OPTION_RXEN, + .reg = XAE_RCW1_OFFSET, + .m_or = XAE_RCW1_RX_MASK, + }, + {} +}; + +/** + * axienet_dma_in32 - Memory mapped Axi DMA register read + * @lp: Pointer to axienet local structure + * @reg: Address offset from the base address of the Axi DMA core + * + * returns: The contents of the Axi DMA register + * + * This function returns the contents of the corresponding Axi DMA register. + */ +static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) +{ + return in_be32(lp->dma_regs + reg); +} + +/** + * axienet_dma_out32 - Memory mapped Axi DMA register write. + * @lp: Pointer to axienet local structure + * @reg: Address offset from the base address of the Axi DMA core + * @value: Value to be written into the Axi DMA register + * + * This function writes the desired value into the corresponding Axi DMA + * register. + */ +static inline void axienet_dma_out32(struct axienet_local *lp, + off_t reg, u32 value) +{ + out_be32((lp->dma_regs + reg), value); +} + +/** + * axienet_dma_bd_release - Release buffer descriptor rings + * @ndev: Pointer to the net_device structure + * + * This function is used to release the descriptors allocated in + * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet + * driver stop api is called. + */ +static void axienet_dma_bd_release(struct net_device *ndev) +{ + int i; + struct axienet_local *lp = netdev_priv(ndev); + + for (i = 0; i < RX_BD_NUM; i++) { + dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, + lp->max_frm_size, DMA_FROM_DEVICE); + dev_kfree_skb((struct sk_buff *) + (lp->rx_bd_v[i].sw_id_offset)); + } + + if (lp->rx_bd_v) { + dma_free_coherent(ndev->dev.parent, + sizeof(*lp->rx_bd_v) * RX_BD_NUM, + lp->rx_bd_v, + lp->rx_bd_p); + } + if (lp->tx_bd_v) { + dma_free_coherent(ndev->dev.parent, + sizeof(*lp->tx_bd_v) * TX_BD_NUM, + lp->tx_bd_v, + lp->tx_bd_p); + } +} + +/** + * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA + * @ndev: Pointer to the net_device structure + * + * returns: 0, on success + * -ENOMEM, on failure + * + * This function is called to initialize the Rx and Tx DMA descriptor + * rings. This initializes the descriptors with required default values + * and is called when Axi Ethernet driver reset is called. + */ +static int axienet_dma_bd_init(struct net_device *ndev) +{ + u32 cr; + int i; + struct sk_buff *skb; + struct axienet_local *lp = netdev_priv(ndev); + + /* Reset the indexes which are used for accessing the BDs */ + lp->tx_bd_ci = 0; + lp->tx_bd_tail = 0; + lp->rx_bd_ci = 0; + + /* + * Allocate the Tx and Rx buffer descriptors. + */ + lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, + sizeof(*lp->tx_bd_v) * TX_BD_NUM, + &lp->tx_bd_p, GFP_KERNEL); + if (!lp->tx_bd_v) + goto out; + + lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, + sizeof(*lp->rx_bd_v) * RX_BD_NUM, + &lp->rx_bd_p, GFP_KERNEL); + if (!lp->rx_bd_v) + goto out; + + for (i = 0; i < TX_BD_NUM; i++) { + lp->tx_bd_v[i].next = lp->tx_bd_p + + sizeof(*lp->tx_bd_v) * + ((i + 1) % TX_BD_NUM); + } + + for (i = 0; i < RX_BD_NUM; i++) { + lp->rx_bd_v[i].next = lp->rx_bd_p + + sizeof(*lp->rx_bd_v) * + ((i + 1) % RX_BD_NUM); + + skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); + if (!skb) + goto out; + + lp->rx_bd_v[i].sw_id_offset = (u32) skb; + lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, + skb->data, + lp->max_frm_size, + DMA_FROM_DEVICE); + lp->rx_bd_v[i].cntrl = lp->max_frm_size; + } + + /* Start updating the Rx channel control register */ + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); + /* Update the interrupt coalesce count */ + cr = ((cr & ~XAXIDMA_COALESCE_MASK) | + ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); + /* Update the delay timer count */ + cr = ((cr & ~XAXIDMA_DELAY_MASK) | + (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); + /* Enable coalesce, delay timer and error interrupts */ + cr |= XAXIDMA_IRQ_ALL_MASK; + /* Write to the Rx channel control register */ + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); + + /* Start updating the Tx channel control register */ + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); + /* Update the interrupt coalesce count */ + cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | + ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); + /* Update the delay timer count */ + cr = (((cr & ~XAXIDMA_DELAY_MASK)) | + (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); + /* Enable coalesce, delay timer and error interrupts */ + cr |= XAXIDMA_IRQ_ALL_MASK; + /* Write to the Tx channel control register */ + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); + + /* Populate the tail pointer and bring the Rx Axi DMA engine out of + * halted state. This will make the Rx side ready for reception.*/ + axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, + cr | XAXIDMA_CR_RUNSTOP_MASK); + axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); + + /* Write to the RS (Run-stop) bit in the Tx channel control register. + * Tx channel is now ready to run. But only after we write to the + * tail pointer register that the Tx channel will start transmitting */ + axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, + cr | XAXIDMA_CR_RUNSTOP_MASK); + + return 0; +out: + axienet_dma_bd_release(ndev); + return -ENOMEM; +} + +/** + * axienet_set_mac_address - Write the MAC address + * @ndev: Pointer to the net_device structure + * @address: 6 byte Address to be written as MAC address + * + * This function is called to initialize the MAC address of the Axi Ethernet + * core. It writes to the UAW0 and UAW1 registers of the core. + */ +static void axienet_set_mac_address(struct net_device *ndev, void *address) +{ + struct axienet_local *lp = netdev_priv(ndev); + + if (address) + memcpy(ndev->dev_addr, address, ETH_ALEN); + if (!is_valid_ether_addr(ndev->dev_addr)) + eth_random_addr(ndev->dev_addr); + + /* Set up unicast MAC address filter set its mac address */ + axienet_iow(lp, XAE_UAW0_OFFSET, + (ndev->dev_addr[0]) | + (ndev->dev_addr[1] << 8) | + (ndev->dev_addr[2] << 16) | + (ndev->dev_addr[3] << 24)); + axienet_iow(lp, XAE_UAW1_OFFSET, + (((axienet_ior(lp, XAE_UAW1_OFFSET)) & + ~XAE_UAW1_UNICASTADDR_MASK) | + (ndev->dev_addr[4] | + (ndev->dev_addr[5] << 8)))); +} + +/** + * netdev_set_mac_address - Write the MAC address (from outside the driver) + * @ndev: Pointer to the net_device structure + * @p: 6 byte Address to be written as MAC address + * + * returns: 0 for all conditions. Presently, there is no failure case. + * + * This function is called to initialize the MAC address of the Axi Ethernet + * core. It calls the core specific axienet_set_mac_address. This is the + * function that goes into net_device_ops structure entry ndo_set_mac_address. + */ +static int netdev_set_mac_address(struct net_device *ndev, void *p) +{ + struct sockaddr *addr = p; + axienet_set_mac_address(ndev, addr->sa_data); + return 0; +} + +/** + * axienet_set_multicast_list - Prepare the multicast table + * @ndev: Pointer to the net_device structure + * + * This function is called to initialize the multicast table during + * initialization. The Axi Ethernet basic multicast support has a four-entry + * multicast table which is initialized here. Additionally this function + * goes into the net_device_ops structure entry ndo_set_multicast_list. This + * means whenever the multicast table entries need to be updated this + * function gets called. + */ +static void axienet_set_multicast_list(struct net_device *ndev) +{ + int i; + u32 reg, af0reg, af1reg; + struct axienet_local *lp = netdev_priv(ndev); + + if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || + netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { + /* We must make the kernel realize we had to move into + * promiscuous mode. If it was a promiscuous mode request + * the flag is already set. If not we set it. */ + ndev->flags |= IFF_PROMISC; + reg = axienet_ior(lp, XAE_FMI_OFFSET); + reg |= XAE_FMI_PM_MASK; + axienet_iow(lp, XAE_FMI_OFFSET, reg); + dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); + } else if (!netdev_mc_empty(ndev)) { + struct netdev_hw_addr *ha; + + i = 0; + netdev_for_each_mc_addr(ha, ndev) { + if (i >= XAE_MULTICAST_CAM_TABLE_NUM) + break; + + af0reg = (ha->addr[0]); + af0reg |= (ha->addr[1] << 8); + af0reg |= (ha->addr[2] << 16); + af0reg |= (ha->addr[3] << 24); + + af1reg = (ha->addr[4]); + af1reg |= (ha->addr[5] << 8); + + reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; + reg |= i; + + axienet_iow(lp, XAE_FMI_OFFSET, reg); + axienet_iow(lp, XAE_AF0_OFFSET, af0reg); + axienet_iow(lp, XAE_AF1_OFFSET, af1reg); + i++; + } + } else { + reg = axienet_ior(lp, XAE_FMI_OFFSET); + reg &= ~XAE_FMI_PM_MASK; + + axienet_iow(lp, XAE_FMI_OFFSET, reg); + + for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { + reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; + reg |= i; + + axienet_iow(lp, XAE_FMI_OFFSET, reg); + axienet_iow(lp, XAE_AF0_OFFSET, 0); + axienet_iow(lp, XAE_AF1_OFFSET, 0); + } + + dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); + } +} + +/** + * axienet_setoptions - Set an Axi Ethernet option + * @ndev: Pointer to the net_device structure + * @options: Option to be enabled/disabled + * + * The Axi Ethernet core has multiple features which can be selectively turned + * on or off. The typical options could be jumbo frame option, basic VLAN + * option, promiscuous mode option etc. This function is used to set or clear + * these options in the Axi Ethernet hardware. This is done through + * axienet_option structure . + */ +static void axienet_setoptions(struct net_device *ndev, u32 options) +{ + int reg; + struct axienet_local *lp = netdev_priv(ndev); + struct axienet_option *tp = &axienet_options[0]; + + while (tp->opt) { + reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); + if (options & tp->opt) + reg |= tp->m_or; + axienet_iow(lp, tp->reg, reg); + tp++; + } + + lp->options |= options; +} + +static void __axienet_device_reset(struct axienet_local *lp, + struct device *dev, off_t offset) +{ + u32 timeout; + /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset + * process of Axi DMA takes a while to complete as all pending + * commands/transfers will be flushed or completed during this + * reset process. */ + axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK); + timeout = DELAY_OF_ONE_MILLISEC; + while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) { + udelay(1); + if (--timeout == 0) { + dev_err(dev, "axienet_device_reset DMA " + "reset timeout!\n"); + break; + } + } +} + +/** + * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. + * @ndev: Pointer to the net_device structure + * + * This function is called to reset and initialize the Axi Ethernet core. This + * is typically called during initialization. It does a reset of the Axi DMA + * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines + * areconnected to Axi Ethernet reset lines, this in turn resets the Axi + * Ethernet core. No separate hardware reset is done for the Axi Ethernet + * core. + */ +static void axienet_device_reset(struct net_device *ndev) +{ + u32 axienet_status; + struct axienet_local *lp = netdev_priv(ndev); + + __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET); + __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET); + + lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; + lp->options &= (~XAE_OPTION_JUMBO); + + if ((ndev->mtu > XAE_MTU) && + (ndev->mtu <= XAE_JUMBO_MTU) && + (lp->jumbo_support)) { + lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE + + XAE_TRL_SIZE; + lp->options |= XAE_OPTION_JUMBO; + } + + if (axienet_dma_bd_init(ndev)) { + dev_err(&ndev->dev, "axienet_device_reset descriptor " + "allocation failed\n"); + } + + axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); + axienet_status &= ~XAE_RCW1_RX_MASK; + axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); + + axienet_status = axienet_ior(lp, XAE_IP_OFFSET); + if (axienet_status & XAE_INT_RXRJECT_MASK) + axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); + + axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); + + /* Sync default options with HW but leave receiver and + * transmitter disabled.*/ + axienet_setoptions(ndev, lp->options & + ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); + axienet_set_mac_address(ndev, NULL); + axienet_set_multicast_list(ndev); + axienet_setoptions(ndev, lp->options); + + ndev->trans_start = jiffies; +} + +/** + * axienet_adjust_link - Adjust the PHY link speed/duplex. + * @ndev: Pointer to the net_device structure + * + * This function is called to change the speed and duplex setting after + * auto negotiation is done by the PHY. This is the function that gets + * registered with the PHY interface through the "of_phy_connect" call. + */ +static void axienet_adjust_link(struct net_device *ndev) +{ + u32 emmc_reg; + u32 link_state; + u32 setspeed = 1; + struct axienet_local *lp = netdev_priv(ndev); + struct phy_device *phy = lp->phy_dev; + + link_state = phy->speed | (phy->duplex << 1) | phy->link; + if (lp->last_link != link_state) { + if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) { + if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X) + setspeed = 0; + } else { + if ((phy->speed == SPEED_1000) && + (lp->phy_type == XAE_PHY_TYPE_MII)) + setspeed = 0; + } + + if (setspeed == 1) { + emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); + emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; + + switch (phy->speed) { + case SPEED_1000: + emmc_reg |= XAE_EMMC_LINKSPD_1000; + break; + case SPEED_100: + emmc_reg |= XAE_EMMC_LINKSPD_100; + break; + case SPEED_10: + emmc_reg |= XAE_EMMC_LINKSPD_10; + break; + default: + dev_err(&ndev->dev, "Speed other than 10, 100 " + "or 1Gbps is not supported\n"); + break; + } + + axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); + lp->last_link = link_state; + phy_print_status(phy); + } else { + dev_err(&ndev->dev, "Error setting Axi Ethernet " + "mac speed\n"); + } + } +} + +/** + * axienet_start_xmit_done - Invoked once a transmit is completed by the + * Axi DMA Tx channel. + * @ndev: Pointer to the net_device structure + * + * This function is invoked from the Axi DMA Tx isr to notify the completion + * of transmit operation. It clears fields in the corresponding Tx BDs and + * unmaps the corresponding buffer so that CPU can regain ownership of the + * buffer. It finally invokes "netif_wake_queue" to restart transmission if + * required. + */ +static void axienet_start_xmit_done(struct net_device *ndev) +{ + u32 size = 0; + u32 packets = 0; + struct axienet_local *lp = netdev_priv(ndev); + struct axidma_bd *cur_p; + unsigned int status = 0; + + cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; + status = cur_p->status; + while (status & XAXIDMA_BD_STS_COMPLETE_MASK) { + dma_unmap_single(ndev->dev.parent, cur_p->phys, + (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), + DMA_TO_DEVICE); + if (cur_p->app4) + dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); + /*cur_p->phys = 0;*/ + cur_p->app0 = 0; + cur_p->app1 = 0; + cur_p->app2 = 0; + cur_p->app4 = 0; + cur_p->status = 0; + + size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; + packets++; + + ++lp->tx_bd_ci; + lp->tx_bd_ci %= TX_BD_NUM; + cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; + status = cur_p->status; + } + + ndev->stats.tx_packets += packets; + ndev->stats.tx_bytes += size; + netif_wake_queue(ndev); +} + +/** + * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy + * @lp: Pointer to the axienet_local structure + * @num_frag: The number of BDs to check for + * + * returns: 0, on success + * NETDEV_TX_BUSY, if any of the descriptors are not free + * + * This function is invoked before BDs are allocated and transmission starts. + * This function returns 0 if a BD or group of BDs can be allocated for + * transmission. If the BD or any of the BDs are not free the function + * returns a busy status. This is invoked from axienet_start_xmit. + */ +static inline int axienet_check_tx_bd_space(struct axienet_local *lp, + int num_frag) +{ + struct axidma_bd *cur_p; + cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM]; + if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) + return NETDEV_TX_BUSY; + return 0; +} + +/** + * axienet_start_xmit - Starts the transmission. + * @skb: sk_buff pointer that contains data to be Txed. + * @ndev: Pointer to net_device structure. + * + * returns: NETDEV_TX_OK, on success + * NETDEV_TX_BUSY, if any of the descriptors are not free + * + * This function is invoked from upper layers to initiate transmission. The + * function uses the next available free BDs and populates their fields to + * start the transmission. Additionally if checksum offloading is supported, + * it populates AXI Stream Control fields with appropriate values. + */ +static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + u32 ii; + u32 num_frag; + u32 csum_start_off; + u32 csum_index_off; + skb_frag_t *frag; + dma_addr_t tail_p; + struct axienet_local *lp = netdev_priv(ndev); + struct axidma_bd *cur_p; + + num_frag = skb_shinfo(skb)->nr_frags; + cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; + + if (axienet_check_tx_bd_space(lp, num_frag)) { + if (!netif_queue_stopped(ndev)) + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { + /* Tx Full Checksum Offload Enabled */ + cur_p->app0 |= 2; + } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { + csum_start_off = skb_transport_offset(skb); + csum_index_off = csum_start_off + skb->csum_offset; + /* Tx Partial Checksum Offload Enabled */ + cur_p->app0 |= 1; + cur_p->app1 = (csum_start_off << 16) | csum_index_off; + } + } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { + cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ + } + + cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; + cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + + for (ii = 0; ii < num_frag; ii++) { + ++lp->tx_bd_tail; + lp->tx_bd_tail %= TX_BD_NUM; + cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; + frag = &skb_shinfo(skb)->frags[ii]; + cur_p->phys = dma_map_single(ndev->dev.parent, + skb_frag_address(frag), + skb_frag_size(frag), + DMA_TO_DEVICE); + cur_p->cntrl = skb_frag_size(frag); + } + + cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; + cur_p->app4 = (unsigned long)skb; + + tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; + /* Start the transfer */ + axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); + ++lp->tx_bd_tail; + lp->tx_bd_tail %= TX_BD_NUM; + + return NETDEV_TX_OK; +} + +/** + * axienet_recv - Is called from Axi DMA Rx Isr to complete the received + * BD processing. + * @ndev: Pointer to net_device structure. + * + * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It + * does minimal processing and invokes "netif_rx" to complete further + * processing. + */ +static void axienet_recv(struct net_device *ndev) +{ + u32 length; + u32 csumstatus; + u32 size = 0; + u32 packets = 0; + dma_addr_t tail_p; + struct axienet_local *lp = netdev_priv(ndev); + struct sk_buff *skb, *new_skb; + struct axidma_bd *cur_p; + + tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; + cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; + + while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { + skb = (struct sk_buff *) (cur_p->sw_id_offset); + length = cur_p->app4 & 0x0000FFFF; + + dma_unmap_single(ndev->dev.parent, cur_p->phys, + lp->max_frm_size, + DMA_FROM_DEVICE); + + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, ndev); + /*skb_checksum_none_assert(skb);*/ + skb->ip_summed = CHECKSUM_NONE; + + /* if we're doing Rx csum offload, set it up */ + if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { + csumstatus = (cur_p->app2 & + XAE_FULL_CSUM_STATUS_MASK) >> 3; + if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) || + (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && + skb->protocol == htons(ETH_P_IP) && + skb->len > 64) { + skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); + skb->ip_summed = CHECKSUM_COMPLETE; + } + + netif_rx(skb); + + size += length; + packets++; + + new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); + if (!new_skb) + return; + + cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, + lp->max_frm_size, + DMA_FROM_DEVICE); + cur_p->cntrl = lp->max_frm_size; + cur_p->status = 0; + cur_p->sw_id_offset = (u32) new_skb; + + ++lp->rx_bd_ci; + lp->rx_bd_ci %= RX_BD_NUM; + cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; + } + + ndev->stats.rx_packets += packets; + ndev->stats.rx_bytes += size; + + axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); +} + +/** + * axienet_tx_irq - Tx Done Isr. + * @irq: irq number + * @_ndev: net_device pointer + * + * returns: IRQ_HANDLED for all cases. + * + * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done" + * to complete the BD processing. + */ +static irqreturn_t axienet_tx_irq(int irq, void *_ndev) +{ + u32 cr; + unsigned int status; + struct net_device *ndev = _ndev; + struct axienet_local *lp = netdev_priv(ndev); + + status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); + if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { + axienet_start_xmit_done(lp->ndev); + goto out; + } + if (!(status & XAXIDMA_IRQ_ALL_MASK)) + dev_err(&ndev->dev, "No interrupts asserted in Tx path"); + if (status & XAXIDMA_IRQ_ERROR_MASK) { + dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); + dev_err(&ndev->dev, "Current BD is at: 0x%x\n", + (lp->tx_bd_v[lp->tx_bd_ci]).phys); + + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); + /* Disable coalesce, delay timer and error interrupts */ + cr &= (~XAXIDMA_IRQ_ALL_MASK); + /* Write to the Tx channel control register */ + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); + + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); + /* Disable coalesce, delay timer and error interrupts */ + cr &= (~XAXIDMA_IRQ_ALL_MASK); + /* Write to the Rx channel control register */ + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); + + tasklet_schedule(&lp->dma_err_tasklet); + } +out: + axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); + return IRQ_HANDLED; +} + +/** + * axienet_rx_irq - Rx Isr. + * @irq: irq number + * @_ndev: net_device pointer + * + * returns: IRQ_HANDLED for all cases. + * + * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD + * processing. + */ +static irqreturn_t axienet_rx_irq(int irq, void *_ndev) +{ + u32 cr; + unsigned int status; + struct net_device *ndev = _ndev; + struct axienet_local *lp = netdev_priv(ndev); + + status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); + if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { + axienet_recv(lp->ndev); + goto out; + } + if (!(status & XAXIDMA_IRQ_ALL_MASK)) + dev_err(&ndev->dev, "No interrupts asserted in Rx path"); + if (status & XAXIDMA_IRQ_ERROR_MASK) { + dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); + dev_err(&ndev->dev, "Current BD is at: 0x%x\n", + (lp->rx_bd_v[lp->rx_bd_ci]).phys); + + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); + /* Disable coalesce, delay timer and error interrupts */ + cr &= (~XAXIDMA_IRQ_ALL_MASK); + /* Finally write to the Tx channel control register */ + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); + + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); + /* Disable coalesce, delay timer and error interrupts */ + cr &= (~XAXIDMA_IRQ_ALL_MASK); + /* write to the Rx channel control register */ + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); + + tasklet_schedule(&lp->dma_err_tasklet); + } +out: + axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); + return IRQ_HANDLED; +} + +static void axienet_dma_err_handler(unsigned long data); + +/** + * axienet_open - Driver open routine. + * @ndev: Pointer to net_device structure + * + * returns: 0, on success. + * -ENODEV, if PHY cannot be connected to + * non-zero error value on failure + * + * This is the driver open routine. It calls phy_start to start the PHY device. + * It also allocates interrupt service routines, enables the interrupt lines + * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer + * descriptors are initialized. + */ +static int axienet_open(struct net_device *ndev) +{ + int ret, mdio_mcreg; + struct axienet_local *lp = netdev_priv(ndev); + + dev_dbg(&ndev->dev, "axienet_open()\n"); + + mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); + ret = axienet_mdio_wait_until_ready(lp); + if (ret < 0) + return ret; + /* Disable the MDIO interface till Axi Ethernet Reset is completed. + * When we do an Axi Ethernet reset, it resets the complete core + * including the MDIO. If MDIO is not disabled when the reset + * process is started, MDIO will be broken afterwards. */ + axienet_iow(lp, XAE_MDIO_MC_OFFSET, + (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK))); + axienet_device_reset(ndev); + /* Enable the MDIO */ + axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); + ret = axienet_mdio_wait_until_ready(lp); + if (ret < 0) + return ret; + + if (lp->phy_node) { + lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, + axienet_adjust_link, 0, + PHY_INTERFACE_MODE_GMII); + if (!lp->phy_dev) { + dev_err(lp->dev, "of_phy_connect() failed\n"); + return -ENODEV; + } + phy_start(lp->phy_dev); + } + + /* Enable tasklets for Axi DMA error handling */ + tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, + (unsigned long) lp); + + /* Enable interrupts for Axi DMA Tx */ + ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev); + if (ret) + goto err_tx_irq; + /* Enable interrupts for Axi DMA Rx */ + ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev); + if (ret) + goto err_rx_irq; + + return 0; + +err_rx_irq: + free_irq(lp->tx_irq, ndev); +err_tx_irq: + if (lp->phy_dev) + phy_disconnect(lp->phy_dev); + lp->phy_dev = NULL; + tasklet_kill(&lp->dma_err_tasklet); + dev_err(lp->dev, "request_irq() failed\n"); + return ret; +} + +/** + * axienet_stop - Driver stop routine. + * @ndev: Pointer to net_device structure + * + * returns: 0, on success. + * + * This is the driver stop routine. It calls phy_disconnect to stop the PHY + * device. It also removes the interrupt handlers and disables the interrupts. + * The Axi DMA Tx/Rx BDs are released. + */ +static int axienet_stop(struct net_device *ndev) +{ + u32 cr; + struct axienet_local *lp = netdev_priv(ndev); + + dev_dbg(&ndev->dev, "axienet_close()\n"); + + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, + cr & (~XAXIDMA_CR_RUNSTOP_MASK)); + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, + cr & (~XAXIDMA_CR_RUNSTOP_MASK)); + axienet_setoptions(ndev, lp->options & + ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); + + tasklet_kill(&lp->dma_err_tasklet); + + free_irq(lp->tx_irq, ndev); + free_irq(lp->rx_irq, ndev); + + if (lp->phy_dev) + phy_disconnect(lp->phy_dev); + lp->phy_dev = NULL; + + axienet_dma_bd_release(ndev); + return 0; +} + +/** + * axienet_change_mtu - Driver change mtu routine. + * @ndev: Pointer to net_device structure + * @new_mtu: New mtu value to be applied + * + * returns: Always returns 0 (success). + * + * This is the change mtu driver routine. It checks if the Axi Ethernet + * hardware supports jumbo frames before changing the mtu. This can be + * called only when the device is not up. + */ +static int axienet_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct axienet_local *lp = netdev_priv(ndev); + + if (netif_running(ndev)) + return -EBUSY; + if (lp->jumbo_support) { + if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64)) + return -EINVAL; + ndev->mtu = new_mtu; + } else { + if ((new_mtu > XAE_MTU) || (new_mtu < 64)) + return -EINVAL; + ndev->mtu = new_mtu; + } + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * axienet_poll_controller - Axi Ethernet poll mechanism. + * @ndev: Pointer to net_device structure + * + * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior + * to polling the ISRs and are enabled back after the polling is done. + */ +static void axienet_poll_controller(struct net_device *ndev) +{ + struct axienet_local *lp = netdev_priv(ndev); + disable_irq(lp->tx_irq); + disable_irq(lp->rx_irq); + axienet_rx_irq(lp->tx_irq, ndev); + axienet_tx_irq(lp->rx_irq, ndev); + enable_irq(lp->tx_irq); + enable_irq(lp->rx_irq); +} +#endif + +static const struct net_device_ops axienet_netdev_ops = { + .ndo_open = axienet_open, + .ndo_stop = axienet_stop, + .ndo_start_xmit = axienet_start_xmit, + .ndo_change_mtu = axienet_change_mtu, + .ndo_set_mac_address = netdev_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = axienet_set_multicast_list, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = axienet_poll_controller, +#endif +}; + +/** + * axienet_ethtools_get_settings - Get Axi Ethernet settings related to PHY. + * @ndev: Pointer to net_device structure + * @ecmd: Pointer to ethtool_cmd structure + * + * This implements ethtool command for getting PHY settings. If PHY could + * not be found, the function returns -ENODEV. This function calls the + * relevant PHY ethtool API to get the PHY settings. + * Issue "ethtool ethX" under linux prompt to execute this function. + */ +static int axienet_ethtools_get_settings(struct net_device *ndev, + struct ethtool_cmd *ecmd) +{ + struct axienet_local *lp = netdev_priv(ndev); + struct phy_device *phydev = lp->phy_dev; + if (!phydev) + return -ENODEV; + return phy_ethtool_gset(phydev, ecmd); +} + +/** + * axienet_ethtools_set_settings - Set PHY settings as passed in the argument. + * @ndev: Pointer to net_device structure + * @ecmd: Pointer to ethtool_cmd structure + * + * This implements ethtool command for setting various PHY settings. If PHY + * could not be found, the function returns -ENODEV. This function calls the + * relevant PHY ethtool API to set the PHY. + * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this + * function. + */ +static int axienet_ethtools_set_settings(struct net_device *ndev, + struct ethtool_cmd *ecmd) +{ + struct axienet_local *lp = netdev_priv(ndev); + struct phy_device *phydev = lp->phy_dev; + if (!phydev) + return -ENODEV; + return phy_ethtool_sset(phydev, ecmd); +} + +/** + * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. + * @ndev: Pointer to net_device structure + * @ed: Pointer to ethtool_drvinfo structure + * + * This implements ethtool command for getting the driver information. + * Issue "ethtool -i ethX" under linux prompt to execute this function. + */ +static void axienet_ethtools_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *ed) +{ + strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); + strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); + ed->regdump_len = sizeof(u32) * AXIENET_REGS_N; +} + +/** + * axienet_ethtools_get_regs_len - Get the total regs length present in the + * AxiEthernet core. + * @ndev: Pointer to net_device structure + * + * This implements ethtool command for getting the total register length + * information. + */ +static int axienet_ethtools_get_regs_len(struct net_device *ndev) +{ + return sizeof(u32) * AXIENET_REGS_N; +} + +/** + * axienet_ethtools_get_regs - Dump the contents of all registers present + * in AxiEthernet core. + * @ndev: Pointer to net_device structure + * @regs: Pointer to ethtool_regs structure + * @ret: Void pointer used to return the contents of the registers. + * + * This implements ethtool command for getting the Axi Ethernet register dump. + * Issue "ethtool -d ethX" to execute this function. + */ +static void axienet_ethtools_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, void *ret) +{ + u32 *data = (u32 *) ret; + size_t len = sizeof(u32) * AXIENET_REGS_N; + struct axienet_local *lp = netdev_priv(ndev); + + regs->version = 0; + regs->len = len; + + memset(data, 0, len); + data[0] = axienet_ior(lp, XAE_RAF_OFFSET); + data[1] = axienet_ior(lp, XAE_TPF_OFFSET); + data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); + data[3] = axienet_ior(lp, XAE_IS_OFFSET); + data[4] = axienet_ior(lp, XAE_IP_OFFSET); + data[5] = axienet_ior(lp, XAE_IE_OFFSET); + data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); + data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); + data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); + data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); + data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); + data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); + data[12] = axienet_ior(lp, XAE_PPST_OFFSET); + data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); + data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); + data[15] = axienet_ior(lp, XAE_TC_OFFSET); + data[16] = axienet_ior(lp, XAE_FCC_OFFSET); + data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); + data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); + data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); + data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); + data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); + data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); + data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET); + data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET); + data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET); + data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET); + data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); + data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); + data[29] = axienet_ior(lp, XAE_FMI_OFFSET); + data[30] = axienet_ior(lp, XAE_AF0_OFFSET); + data[31] = axienet_ior(lp, XAE_AF1_OFFSET); +} + +/** + * axienet_ethtools_get_pauseparam - Get the pause parameter setting for + * Tx and Rx paths. + * @ndev: Pointer to net_device structure + * @epauseparm: Pointer to ethtool_pauseparam structure. + * + * This implements ethtool command for getting axi ethernet pause frame + * setting. Issue "ethtool -a ethX" to execute this function. + */ +static void +axienet_ethtools_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *epauseparm) +{ + u32 regval; + struct axienet_local *lp = netdev_priv(ndev); + epauseparm->autoneg = 0; + regval = axienet_ior(lp, XAE_FCC_OFFSET); + epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK; + epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK; +} + +/** + * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) + * settings. + * @ndev: Pointer to net_device structure + * @epauseparam:Pointer to ethtool_pauseparam structure + * + * This implements ethtool command for enabling flow control on Rx and Tx + * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this + * function. + */ +static int +axienet_ethtools_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *epauseparm) +{ + u32 regval = 0; + struct axienet_local *lp = netdev_priv(ndev); + + if (netif_running(ndev)) { + printk(KERN_ERR "%s: Please stop netif before applying " + "configruation\n", ndev->name); + return -EFAULT; + } + + regval = axienet_ior(lp, XAE_FCC_OFFSET); + if (epauseparm->tx_pause) + regval |= XAE_FCC_FCTX_MASK; + else + regval &= ~XAE_FCC_FCTX_MASK; + if (epauseparm->rx_pause) + regval |= XAE_FCC_FCRX_MASK; + else + regval &= ~XAE_FCC_FCRX_MASK; + axienet_iow(lp, XAE_FCC_OFFSET, regval); + + return 0; +} + +/** + * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. + * @ndev: Pointer to net_device structure + * @ecoalesce: Pointer to ethtool_coalesce structure + * + * This implements ethtool command for getting the DMA interrupt coalescing + * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to + * execute this function. + */ +static int axienet_ethtools_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ecoalesce) +{ + u32 regval = 0; + struct axienet_local *lp = netdev_priv(ndev); + regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); + ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) + >> XAXIDMA_COALESCE_SHIFT; + regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); + ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) + >> XAXIDMA_COALESCE_SHIFT; + return 0; +} + +/** + * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. + * @ndev: Pointer to net_device structure + * @ecoalesce: Pointer to ethtool_coalesce structure + * + * This implements ethtool command for setting the DMA interrupt coalescing + * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux + * prompt to execute this function. + */ +static int axienet_ethtools_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ecoalesce) +{ + struct axienet_local *lp = netdev_priv(ndev); + + if (netif_running(ndev)) { + printk(KERN_ERR "%s: Please stop netif before applying " + "configruation\n", ndev->name); + return -EFAULT; + } + + if ((ecoalesce->rx_coalesce_usecs) || + (ecoalesce->rx_coalesce_usecs_irq) || + (ecoalesce->rx_max_coalesced_frames_irq) || + (ecoalesce->tx_coalesce_usecs) || + (ecoalesce->tx_coalesce_usecs_irq) || + (ecoalesce->tx_max_coalesced_frames_irq) || + (ecoalesce->stats_block_coalesce_usecs) || + (ecoalesce->use_adaptive_rx_coalesce) || + (ecoalesce->use_adaptive_tx_coalesce) || + (ecoalesce->pkt_rate_low) || + (ecoalesce->rx_coalesce_usecs_low) || + (ecoalesce->rx_max_coalesced_frames_low) || + (ecoalesce->tx_coalesce_usecs_low) || + (ecoalesce->tx_max_coalesced_frames_low) || + (ecoalesce->pkt_rate_high) || + (ecoalesce->rx_coalesce_usecs_high) || + (ecoalesce->rx_max_coalesced_frames_high) || + (ecoalesce->tx_coalesce_usecs_high) || + (ecoalesce->tx_max_coalesced_frames_high) || + (ecoalesce->rate_sample_interval)) + return -EOPNOTSUPP; + if (ecoalesce->rx_max_coalesced_frames) + lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; + if (ecoalesce->tx_max_coalesced_frames) + lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; + + return 0; +} + +static struct ethtool_ops axienet_ethtool_ops = { + .get_settings = axienet_ethtools_get_settings, + .set_settings = axienet_ethtools_set_settings, + .get_drvinfo = axienet_ethtools_get_drvinfo, + .get_regs_len = axienet_ethtools_get_regs_len, + .get_regs = axienet_ethtools_get_regs, + .get_link = ethtool_op_get_link, + .get_pauseparam = axienet_ethtools_get_pauseparam, + .set_pauseparam = axienet_ethtools_set_pauseparam, + .get_coalesce = axienet_ethtools_get_coalesce, + .set_coalesce = axienet_ethtools_set_coalesce, +}; + +/** + * axienet_dma_err_handler - Tasklet handler for Axi DMA Error + * @data: Data passed + * + * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the + * Tx/Rx BDs. + */ +static void axienet_dma_err_handler(unsigned long data) +{ + u32 axienet_status; + u32 cr, i; + int mdio_mcreg; + struct axienet_local *lp = (struct axienet_local *) data; + struct net_device *ndev = lp->ndev; + struct axidma_bd *cur_p; + + axienet_setoptions(ndev, lp->options & + ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); + mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); + axienet_mdio_wait_until_ready(lp); + /* Disable the MDIO interface till Axi Ethernet Reset is completed. + * When we do an Axi Ethernet reset, it resets the complete core + * including the MDIO. So if MDIO is not disabled when the reset + * process is started, MDIO will be broken afterwards. */ + axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg & + ~XAE_MDIO_MC_MDIOEN_MASK)); + + __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET); + __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET); + + axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); + axienet_mdio_wait_until_ready(lp); + + for (i = 0; i < TX_BD_NUM; i++) { + cur_p = &lp->tx_bd_v[i]; + if (cur_p->phys) + dma_unmap_single(ndev->dev.parent, cur_p->phys, + (cur_p->cntrl & + XAXIDMA_BD_CTRL_LENGTH_MASK), + DMA_TO_DEVICE); + if (cur_p->app4) + dev_kfree_skb_irq((struct sk_buff *) cur_p->app4); + cur_p->phys = 0; + cur_p->cntrl = 0; + cur_p->status = 0; + cur_p->app0 = 0; + cur_p->app1 = 0; + cur_p->app2 = 0; + cur_p->app3 = 0; + cur_p->app4 = 0; + cur_p->sw_id_offset = 0; + } + + for (i = 0; i < RX_BD_NUM; i++) { + cur_p = &lp->rx_bd_v[i]; + cur_p->status = 0; + cur_p->app0 = 0; + cur_p->app1 = 0; + cur_p->app2 = 0; + cur_p->app3 = 0; + cur_p->app4 = 0; + } + + lp->tx_bd_ci = 0; + lp->tx_bd_tail = 0; + lp->rx_bd_ci = 0; + + /* Start updating the Rx channel control register */ + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); + /* Update the interrupt coalesce count */ + cr = ((cr & ~XAXIDMA_COALESCE_MASK) | + (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); + /* Update the delay timer count */ + cr = ((cr & ~XAXIDMA_DELAY_MASK) | + (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); + /* Enable coalesce, delay timer and error interrupts */ + cr |= XAXIDMA_IRQ_ALL_MASK; + /* Finally write to the Rx channel control register */ + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); + + /* Start updating the Tx channel control register */ + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); + /* Update the interrupt coalesce count */ + cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | + (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); + /* Update the delay timer count */ + cr = (((cr & ~XAXIDMA_DELAY_MASK)) | + (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); + /* Enable coalesce, delay timer and error interrupts */ + cr |= XAXIDMA_IRQ_ALL_MASK; + /* Finally write to the Tx channel control register */ + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); + + /* Populate the tail pointer and bring the Rx Axi DMA engine out of + * halted state. This will make the Rx side ready for reception.*/ + axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, + cr | XAXIDMA_CR_RUNSTOP_MASK); + axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); + + /* Write to the RS (Run-stop) bit in the Tx channel control register. + * Tx channel is now ready to run. But only after we write to the + * tail pointer register that the Tx channel will start transmitting */ + axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, + cr | XAXIDMA_CR_RUNSTOP_MASK); + + axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); + axienet_status &= ~XAE_RCW1_RX_MASK; + axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); + + axienet_status = axienet_ior(lp, XAE_IP_OFFSET); + if (axienet_status & XAE_INT_RXRJECT_MASK) + axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); + axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); + + /* Sync default options with HW but leave receiver and + * transmitter disabled.*/ + axienet_setoptions(ndev, lp->options & + ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); + axienet_set_mac_address(ndev, NULL); + axienet_set_multicast_list(ndev); + axienet_setoptions(ndev, lp->options); +} + +/** + * axienet_of_probe - Axi Ethernet probe function. + * @op: Pointer to platform device structure. + * @match: Pointer to device id structure + * + * returns: 0, on success + * Non-zero error value on failure. + * + * This is the probe routine for Axi Ethernet driver. This is called before + * any other driver routines are invoked. It allocates and sets up the Ethernet + * device. Parses through device tree and populates fields of + * axienet_local. It registers the Ethernet device. + */ +static int axienet_of_probe(struct platform_device *op) +{ + __be32 *p; + int size, ret = 0; + struct device_node *np; + struct axienet_local *lp; + struct net_device *ndev; + const void *addr; + + ndev = alloc_etherdev(sizeof(*lp)); + if (!ndev) + return -ENOMEM; + + platform_set_drvdata(op, ndev); + + SET_NETDEV_DEV(ndev, &op->dev); + ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ + ndev->features = NETIF_F_SG; + ndev->netdev_ops = &axienet_netdev_ops; + ndev->ethtool_ops = &axienet_ethtool_ops; + + lp = netdev_priv(ndev); + lp->ndev = ndev; + lp->dev = &op->dev; + lp->options = XAE_OPTION_DEFAULTS; + /* Map device registers */ + lp->regs = of_iomap(op->dev.of_node, 0); + if (!lp->regs) { + dev_err(&op->dev, "could not map Axi Ethernet regs.\n"); + ret = -ENOMEM; + goto nodev; + } + /* Setup checksum offload, but default to off if not specified */ + lp->features = 0; + + p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL); + if (p) { + switch (be32_to_cpup(p)) { + case 1: + lp->csum_offload_on_tx_path = + XAE_FEATURE_PARTIAL_TX_CSUM; + lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; + /* Can checksum TCP/UDP over IPv4. */ + ndev->features |= NETIF_F_IP_CSUM; + break; + case 2: + lp->csum_offload_on_tx_path = + XAE_FEATURE_FULL_TX_CSUM; + lp->features |= XAE_FEATURE_FULL_TX_CSUM; + /* Can checksum TCP/UDP over IPv4. */ + ndev->features |= NETIF_F_IP_CSUM; + break; + default: + lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; + } + } + p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL); + if (p) { + switch (be32_to_cpup(p)) { + case 1: + lp->csum_offload_on_rx_path = + XAE_FEATURE_PARTIAL_RX_CSUM; + lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; + break; + case 2: + lp->csum_offload_on_rx_path = + XAE_FEATURE_FULL_RX_CSUM; + lp->features |= XAE_FEATURE_FULL_RX_CSUM; + break; + default: + lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; + } + } + /* For supporting jumbo frames, the Axi Ethernet hardware must have + * a larger Rx/Tx Memory. Typically, the size must be more than or + * equal to 16384 bytes, so that we can enable jumbo option and start + * supporting jumbo frames. Here we check for memory allocated for + * Rx/Tx in the hardware from the device-tree and accordingly set + * flags. */ + p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL); + if (p) { + if ((be32_to_cpup(p)) >= 0x4000) + lp->jumbo_support = 1; + } + p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL); + if (p) + lp->phy_type = be32_to_cpup(p); + + /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ + np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0); + if (!np) { + dev_err(&op->dev, "could not find DMA node\n"); + ret = -ENODEV; + goto err_iounmap; + } + lp->dma_regs = of_iomap(np, 0); + if (lp->dma_regs) { + dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs); + } else { + dev_err(&op->dev, "unable to map DMA registers\n"); + of_node_put(np); + } + lp->rx_irq = irq_of_parse_and_map(np, 1); + lp->tx_irq = irq_of_parse_and_map(np, 0); + of_node_put(np); + if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { + dev_err(&op->dev, "could not determine irqs\n"); + ret = -ENOMEM; + goto err_iounmap_2; + } + + /* Retrieve the MAC address */ + addr = of_get_property(op->dev.of_node, "local-mac-address", &size); + if ((!addr) || (size != 6)) { + dev_err(&op->dev, "could not find MAC address\n"); + ret = -ENODEV; + goto err_iounmap_2; + } + axienet_set_mac_address(ndev, (void *) addr); + + lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; + lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; + + lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0); + ret = axienet_mdio_setup(lp, op->dev.of_node); + if (ret) + dev_warn(&op->dev, "error registering MDIO bus\n"); + + ret = register_netdev(lp->ndev); + if (ret) { + dev_err(lp->dev, "register_netdev() error (%i)\n", ret); + goto err_iounmap_2; + } + + return 0; + +err_iounmap_2: + if (lp->dma_regs) + iounmap(lp->dma_regs); +err_iounmap: + iounmap(lp->regs); +nodev: + free_netdev(ndev); + ndev = NULL; + return ret; +} + +static int axienet_of_remove(struct platform_device *op) +{ + struct net_device *ndev = platform_get_drvdata(op); + struct axienet_local *lp = netdev_priv(ndev); + + axienet_mdio_teardown(lp); + unregister_netdev(ndev); + + of_node_put(lp->phy_node); + lp->phy_node = NULL; + + iounmap(lp->regs); + if (lp->dma_regs) + iounmap(lp->dma_regs); + free_netdev(ndev); + + return 0; +} + +static struct platform_driver axienet_of_driver = { + .probe = axienet_of_probe, + .remove = axienet_of_remove, + .driver = { + .name = "xilinx_axienet", + .of_match_table = axienet_of_match, + }, +}; + +module_platform_driver(axienet_of_driver); + +MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); +MODULE_AUTHOR("Xilinx"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c new file mode 100644 index 000000000..3b67d60d4 --- /dev/null +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c @@ -0,0 +1,239 @@ +/* + * MDIO bus driver for the Xilinx Axi Ethernet device + * + * Copyright (c) 2009 Secret Lab Technologies, Ltd. + * Copyright (c) 2010 - 2011 Michal Simek + * Copyright (c) 2010 - 2011 PetaLogix + * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. + */ + +#include +#include +#include + +#include "xilinx_axienet.h" + +#define MAX_MDIO_FREQ 2500000 /* 2.5 MHz */ +#define DEFAULT_CLOCK_DIVISOR XAE_MDIO_DIV_DFT + +/* Wait till MDIO interface is ready to accept a new transaction.*/ +int axienet_mdio_wait_until_ready(struct axienet_local *lp) +{ + unsigned long end = jiffies + 2; + while (!(axienet_ior(lp, XAE_MDIO_MCR_OFFSET) & + XAE_MDIO_MCR_READY_MASK)) { + if (time_before_eq(end, jiffies)) { + WARN_ON(1); + return -ETIMEDOUT; + } + udelay(1); + } + return 0; +} + +/** + * axienet_mdio_read - MDIO interface read function + * @bus: Pointer to mii bus structure + * @phy_id: Address of the PHY device + * @reg: PHY register to read + * + * returns: The register contents on success, -ETIMEDOUT on a timeout + * + * Reads the contents of the requested register from the requested PHY + * address by first writing the details into MCR register. After a while + * the register MRD is read to obtain the PHY register content. + */ +static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg) +{ + u32 rc; + int ret; + struct axienet_local *lp = bus->priv; + + ret = axienet_mdio_wait_until_ready(lp); + if (ret < 0) + return ret; + + axienet_iow(lp, XAE_MDIO_MCR_OFFSET, + (((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) & + XAE_MDIO_MCR_PHYAD_MASK) | + ((reg << XAE_MDIO_MCR_REGAD_SHIFT) & + XAE_MDIO_MCR_REGAD_MASK) | + XAE_MDIO_MCR_INITIATE_MASK | + XAE_MDIO_MCR_OP_READ_MASK)); + + ret = axienet_mdio_wait_until_ready(lp); + if (ret < 0) + return ret; + + rc = axienet_ior(lp, XAE_MDIO_MRD_OFFSET) & 0x0000FFFF; + + dev_dbg(lp->dev, "axienet_mdio_read(phy_id=%i, reg=%x) == %x\n", + phy_id, reg, rc); + + return rc; +} + +/** + * axienet_mdio_write - MDIO interface write function + * @bus: Pointer to mii bus structure + * @phy_id: Address of the PHY device + * @reg: PHY register to write to + * @val: Value to be written into the register + * + * returns: 0 on success, -ETIMEDOUT on a timeout + * + * Writes the value to the requested register by first writing the value + * into MWD register. The the MCR register is then appropriately setup + * to finish the write operation. + */ +static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg, + u16 val) +{ + int ret; + struct axienet_local *lp = bus->priv; + + dev_dbg(lp->dev, "axienet_mdio_write(phy_id=%i, reg=%x, val=%x)\n", + phy_id, reg, val); + + ret = axienet_mdio_wait_until_ready(lp); + if (ret < 0) + return ret; + + axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32) val); + axienet_iow(lp, XAE_MDIO_MCR_OFFSET, + (((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) & + XAE_MDIO_MCR_PHYAD_MASK) | + ((reg << XAE_MDIO_MCR_REGAD_SHIFT) & + XAE_MDIO_MCR_REGAD_MASK) | + XAE_MDIO_MCR_INITIATE_MASK | + XAE_MDIO_MCR_OP_WRITE_MASK)); + + ret = axienet_mdio_wait_until_ready(lp); + if (ret < 0) + return ret; + return 0; +} + +/** + * axienet_mdio_setup - MDIO setup function + * @lp: Pointer to axienet local data structure. + * @np: Pointer to device node + * + * returns: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when + * mdiobus_alloc (to allocate memory for mii bus structure) fails. + * + * Sets up the MDIO interface by initializing the MDIO clock and enabling the + * MDIO interface in hardware. Register the MDIO interface. + **/ +int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np) +{ + int ret; + u32 clk_div, host_clock; + u32 *property_p; + struct mii_bus *bus; + struct resource res; + struct device_node *np1; + + /* clk_div can be calculated by deriving it from the equation: + * fMDIO = fHOST / ((1 + clk_div) * 2) + * + * Where fMDIO <= 2500000, so we get: + * fHOST / ((1 + clk_div) * 2) <= 2500000 + * + * Then we get: + * 1 / ((1 + clk_div) * 2) <= (2500000 / fHOST) + * + * Then we get: + * 1 / (1 + clk_div) <= ((2500000 * 2) / fHOST) + * + * Then we get: + * 1 / (1 + clk_div) <= (5000000 / fHOST) + * + * So: + * (1 + clk_div) >= (fHOST / 5000000) + * + * And finally: + * clk_div >= (fHOST / 5000000) - 1 + * + * fHOST can be read from the flattened device tree as property + * "clock-frequency" from the CPU + */ + + np1 = of_find_node_by_name(NULL, "cpu"); + if (!np1) { + printk(KERN_WARNING "%s(): Could not find CPU device node.", + __func__); + printk(KERN_WARNING "Setting MDIO clock divisor to " + "default %d\n", DEFAULT_CLOCK_DIVISOR); + clk_div = DEFAULT_CLOCK_DIVISOR; + goto issue; + } + property_p = (u32 *) of_get_property(np1, "clock-frequency", NULL); + if (!property_p) { + printk(KERN_WARNING "%s(): Could not find CPU property: " + "clock-frequency.", __func__); + printk(KERN_WARNING "Setting MDIO clock divisor to " + "default %d\n", DEFAULT_CLOCK_DIVISOR); + clk_div = DEFAULT_CLOCK_DIVISOR; + of_node_put(np1); + goto issue; + } + + host_clock = be32_to_cpup(property_p); + clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1; + /* If there is any remainder from the division of + * fHOST / (MAX_MDIO_FREQ * 2), then we need to add + * 1 to the clock divisor or we will surely be above 2.5 MHz */ + if (host_clock % (MAX_MDIO_FREQ * 2)) + clk_div++; + + printk(KERN_DEBUG "%s(): Setting MDIO clock divisor to %u based " + "on %u Hz host clock.\n", __func__, clk_div, host_clock); + + of_node_put(np1); +issue: + axienet_iow(lp, XAE_MDIO_MC_OFFSET, + (((u32) clk_div) | XAE_MDIO_MC_MDIOEN_MASK)); + + ret = axienet_mdio_wait_until_ready(lp); + if (ret < 0) + return ret; + + bus = mdiobus_alloc(); + if (!bus) + return -ENOMEM; + + np1 = of_get_parent(lp->phy_node); + of_address_to_resource(np1, 0, &res); + snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx", + (unsigned long long) res.start); + + bus->priv = lp; + bus->name = "Xilinx Axi Ethernet MDIO"; + bus->read = axienet_mdio_read; + bus->write = axienet_mdio_write; + bus->parent = lp->dev; + bus->irq = lp->mdio_irqs; /* preallocated IRQ table */ + lp->mii_bus = bus; + + ret = of_mdiobus_register(bus, np1); + if (ret) { + mdiobus_free(bus); + return ret; + } + return 0; +} + +/** + * axienet_mdio_teardown - MDIO remove function + * @lp: Pointer to axienet local data structure. + * + * Unregisters the MDIO and frees any associate memory for mii bus. + */ +void axienet_mdio_teardown(struct axienet_local *lp) +{ + mdiobus_unregister(lp->mii_bus); + kfree(lp->mii_bus->irq); + mdiobus_free(lp->mii_bus); + lp->mii_bus = NULL; +} diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c new file mode 100644 index 000000000..6008eee01 --- /dev/null +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -0,0 +1,1258 @@ +/* + * Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device. + * + * This is a new flat driver which is based on the original emac_lite + * driver from John Williams . + * + * 2007 - 2013 (c) Xilinx, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "xilinx_emaclite" + +/* Register offsets for the EmacLite Core */ +#define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */ +#define XEL_MDIOADDR_OFFSET 0x07E4 /* MDIO Address Register */ +#define XEL_MDIOWR_OFFSET 0x07E8 /* MDIO Write Data Register */ +#define XEL_MDIORD_OFFSET 0x07EC /* MDIO Read Data Register */ +#define XEL_MDIOCTRL_OFFSET 0x07F0 /* MDIO Control Register */ +#define XEL_GIER_OFFSET 0x07F8 /* GIE Register */ +#define XEL_TSR_OFFSET 0x07FC /* Tx status */ +#define XEL_TPLR_OFFSET 0x07F4 /* Tx packet length */ + +#define XEL_RXBUFF_OFFSET 0x1000 /* Receive Buffer */ +#define XEL_RPLR_OFFSET 0x100C /* Rx packet length */ +#define XEL_RSR_OFFSET 0x17FC /* Rx status */ + +#define XEL_BUFFER_OFFSET 0x0800 /* Next Tx/Rx buffer's offset */ + +/* MDIO Address Register Bit Masks */ +#define XEL_MDIOADDR_REGADR_MASK 0x0000001F /* Register Address */ +#define XEL_MDIOADDR_PHYADR_MASK 0x000003E0 /* PHY Address */ +#define XEL_MDIOADDR_PHYADR_SHIFT 5 +#define XEL_MDIOADDR_OP_MASK 0x00000400 /* RD/WR Operation */ + +/* MDIO Write Data Register Bit Masks */ +#define XEL_MDIOWR_WRDATA_MASK 0x0000FFFF /* Data to be Written */ + +/* MDIO Read Data Register Bit Masks */ +#define XEL_MDIORD_RDDATA_MASK 0x0000FFFF /* Data to be Read */ + +/* MDIO Control Register Bit Masks */ +#define XEL_MDIOCTRL_MDIOSTS_MASK 0x00000001 /* MDIO Status Mask */ +#define XEL_MDIOCTRL_MDIOEN_MASK 0x00000008 /* MDIO Enable */ + +/* Global Interrupt Enable Register (GIER) Bit Masks */ +#define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */ + +/* Transmit Status Register (TSR) Bit Masks */ +#define XEL_TSR_XMIT_BUSY_MASK 0x00000001 /* Tx complete */ +#define XEL_TSR_PROGRAM_MASK 0x00000002 /* Program the MAC address */ +#define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */ +#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit + * only. This is not documented + * in the HW spec */ + +/* Define for programming the MAC address into the EmacLite */ +#define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK) + +/* Receive Status Register (RSR) */ +#define XEL_RSR_RECV_DONE_MASK 0x00000001 /* Rx complete */ +#define XEL_RSR_RECV_IE_MASK 0x00000008 /* Rx interrupt enable bit */ + +/* Transmit Packet Length Register (TPLR) */ +#define XEL_TPLR_LENGTH_MASK 0x0000FFFF /* Tx packet length */ + +/* Receive Packet Length Register (RPLR) */ +#define XEL_RPLR_LENGTH_MASK 0x0000FFFF /* Rx packet length */ + +#define XEL_HEADER_OFFSET 12 /* Offset to length field */ +#define XEL_HEADER_SHIFT 16 /* Shift value for length */ + +/* General Ethernet Definitions */ +#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */ +#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */ + + + +#define TX_TIMEOUT (60*HZ) /* Tx timeout is 60 seconds. */ +#define ALIGNMENT 4 + +/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */ +#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT) + +/** + * struct net_local - Our private per device data + * @ndev: instance of the network device + * @tx_ping_pong: indicates whether Tx Pong buffer is configured in HW + * @rx_ping_pong: indicates whether Rx Pong buffer is configured in HW + * @next_tx_buf_to_use: next Tx buffer to write to + * @next_rx_buf_to_use: next Rx buffer to read from + * @base_addr: base address of the Emaclite device + * @reset_lock: lock used for synchronization + * @deferred_skb: holds an skb (for transmission at a later time) when the + * Tx buffer is not free + * @phy_dev: pointer to the PHY device + * @phy_node: pointer to the PHY device node + * @mii_bus: pointer to the MII bus + * @mdio_irqs: IRQs table for MDIO bus + * @last_link: last link status + * @has_mdio: indicates whether MDIO is included in the HW + */ +struct net_local { + + struct net_device *ndev; + + bool tx_ping_pong; + bool rx_ping_pong; + u32 next_tx_buf_to_use; + u32 next_rx_buf_to_use; + void __iomem *base_addr; + + spinlock_t reset_lock; + struct sk_buff *deferred_skb; + + struct phy_device *phy_dev; + struct device_node *phy_node; + + struct mii_bus *mii_bus; + int mdio_irqs[PHY_MAX_ADDR]; + + int last_link; + bool has_mdio; +}; + + +/*************************/ +/* EmacLite driver calls */ +/*************************/ + +/** + * xemaclite_enable_interrupts - Enable the interrupts for the EmacLite device + * @drvdata: Pointer to the Emaclite device private data + * + * This function enables the Tx and Rx interrupts for the Emaclite device along + * with the Global Interrupt Enable. + */ +static void xemaclite_enable_interrupts(struct net_local *drvdata) +{ + u32 reg_data; + + /* Enable the Tx interrupts for the first Buffer */ + reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); + __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, + drvdata->base_addr + XEL_TSR_OFFSET); + + /* Enable the Rx interrupts for the first buffer */ + __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); + + /* Enable the Global Interrupt Enable */ + __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); +} + +/** + * xemaclite_disable_interrupts - Disable the interrupts for the EmacLite device + * @drvdata: Pointer to the Emaclite device private data + * + * This function disables the Tx and Rx interrupts for the Emaclite device, + * along with the Global Interrupt Enable. + */ +static void xemaclite_disable_interrupts(struct net_local *drvdata) +{ + u32 reg_data; + + /* Disable the Global Interrupt Enable */ + __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); + + /* Disable the Tx interrupts for the first buffer */ + reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); + __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), + drvdata->base_addr + XEL_TSR_OFFSET); + + /* Disable the Rx interrupts for the first buffer */ + reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); + __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), + drvdata->base_addr + XEL_RSR_OFFSET); +} + +/** + * xemaclite_aligned_write - Write from 16-bit aligned to 32-bit aligned address + * @src_ptr: Void pointer to the 16-bit aligned source address + * @dest_ptr: Pointer to the 32-bit aligned destination address + * @length: Number bytes to write from source to destination + * + * This function writes data from a 16-bit aligned buffer to a 32-bit aligned + * address in the EmacLite device. + */ +static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr, + unsigned length) +{ + u32 align_buffer; + u32 *to_u32_ptr; + u16 *from_u16_ptr, *to_u16_ptr; + + to_u32_ptr = dest_ptr; + from_u16_ptr = src_ptr; + align_buffer = 0; + + for (; length > 3; length -= 4) { + to_u16_ptr = (u16 *)&align_buffer; + *to_u16_ptr++ = *from_u16_ptr++; + *to_u16_ptr++ = *from_u16_ptr++; + + /* This barrier resolves occasional issues seen around + * cases where the data is not properly flushed out + * from the processor store buffers to the destination + * memory locations. + */ + wmb(); + + /* Output a word */ + *to_u32_ptr++ = align_buffer; + } + if (length) { + u8 *from_u8_ptr, *to_u8_ptr; + + /* Set up to output the remaining data */ + align_buffer = 0; + to_u8_ptr = (u8 *) &align_buffer; + from_u8_ptr = (u8 *) from_u16_ptr; + + /* Output the remaining data */ + for (; length > 0; length--) + *to_u8_ptr++ = *from_u8_ptr++; + + /* This barrier resolves occasional issues seen around + * cases where the data is not properly flushed out + * from the processor store buffers to the destination + * memory locations. + */ + wmb(); + *to_u32_ptr = align_buffer; + } +} + +/** + * xemaclite_aligned_read - Read from 32-bit aligned to 16-bit aligned buffer + * @src_ptr: Pointer to the 32-bit aligned source address + * @dest_ptr: Pointer to the 16-bit aligned destination address + * @length: Number bytes to read from source to destination + * + * This function reads data from a 32-bit aligned address in the EmacLite device + * to a 16-bit aligned buffer. + */ +static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr, + unsigned length) +{ + u16 *to_u16_ptr, *from_u16_ptr; + u32 *from_u32_ptr; + u32 align_buffer; + + from_u32_ptr = src_ptr; + to_u16_ptr = (u16 *) dest_ptr; + + for (; length > 3; length -= 4) { + /* Copy each word into the temporary buffer */ + align_buffer = *from_u32_ptr++; + from_u16_ptr = (u16 *)&align_buffer; + + /* Read data from source */ + *to_u16_ptr++ = *from_u16_ptr++; + *to_u16_ptr++ = *from_u16_ptr++; + } + + if (length) { + u8 *to_u8_ptr, *from_u8_ptr; + + /* Set up to read the remaining data */ + to_u8_ptr = (u8 *) to_u16_ptr; + align_buffer = *from_u32_ptr++; + from_u8_ptr = (u8 *) &align_buffer; + + /* Read the remaining data */ + for (; length > 0; length--) + *to_u8_ptr = *from_u8_ptr; + } +} + +/** + * xemaclite_send_data - Send an Ethernet frame + * @drvdata: Pointer to the Emaclite device private data + * @data: Pointer to the data to be sent + * @byte_count: Total frame size, including header + * + * This function checks if the Tx buffer of the Emaclite device is free to send + * data. If so, it fills the Tx buffer with data for transmission. Otherwise, it + * returns an error. + * + * Return: 0 upon success or -1 if the buffer(s) are full. + * + * Note: The maximum Tx packet size can not be more than Ethernet header + * (14 Bytes) + Maximum MTU (1500 bytes). This is excluding FCS. + */ +static int xemaclite_send_data(struct net_local *drvdata, u8 *data, + unsigned int byte_count) +{ + u32 reg_data; + void __iomem *addr; + + /* Determine the expected Tx buffer address */ + addr = drvdata->base_addr + drvdata->next_tx_buf_to_use; + + /* If the length is too large, truncate it */ + if (byte_count > ETH_FRAME_LEN) + byte_count = ETH_FRAME_LEN; + + /* Check if the expected buffer is available */ + reg_data = __raw_readl(addr + XEL_TSR_OFFSET); + if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | + XEL_TSR_XMIT_ACTIVE_MASK)) == 0) { + + /* Switch to next buffer if configured */ + if (drvdata->tx_ping_pong != 0) + drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET; + } else if (drvdata->tx_ping_pong != 0) { + /* If the expected buffer is full, try the other buffer, + * if it is configured in HW */ + + addr = (void __iomem __force *)((u32 __force)addr ^ + XEL_BUFFER_OFFSET); + reg_data = __raw_readl(addr + XEL_TSR_OFFSET); + + if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | + XEL_TSR_XMIT_ACTIVE_MASK)) != 0) + return -1; /* Buffers were full, return failure */ + } else + return -1; /* Buffer was full, return failure */ + + /* Write the frame to the buffer */ + xemaclite_aligned_write(data, (u32 __force *) addr, byte_count); + + __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK), + addr + XEL_TPLR_OFFSET); + + /* Update the Tx Status Register to indicate that there is a + * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which + * is used by the interrupt handler to check whether a frame + * has been transmitted */ + reg_data = __raw_readl(addr + XEL_TSR_OFFSET); + reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK); + __raw_writel(reg_data, addr + XEL_TSR_OFFSET); + + return 0; +} + +/** + * xemaclite_recv_data - Receive a frame + * @drvdata: Pointer to the Emaclite device private data + * @data: Address where the data is to be received + * + * This function is intended to be called from the interrupt context or + * with a wrapper which waits for the receive frame to be available. + * + * Return: Total number of bytes received + */ +static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) +{ + void __iomem *addr; + u16 length, proto_type; + u32 reg_data; + + /* Determine the expected buffer address */ + addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use); + + /* Verify which buffer has valid data */ + reg_data = __raw_readl(addr + XEL_RSR_OFFSET); + + if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) { + if (drvdata->rx_ping_pong != 0) + drvdata->next_rx_buf_to_use ^= XEL_BUFFER_OFFSET; + } else { + /* The instance is out of sync, try other buffer if other + * buffer is configured, return 0 otherwise. If the instance is + * out of sync, do not update the 'next_rx_buf_to_use' since it + * will correct on subsequent calls */ + if (drvdata->rx_ping_pong != 0) + addr = (void __iomem __force *)((u32 __force)addr ^ + XEL_BUFFER_OFFSET); + else + return 0; /* No data was available */ + + /* Verify that buffer has valid data */ + reg_data = __raw_readl(addr + XEL_RSR_OFFSET); + if ((reg_data & XEL_RSR_RECV_DONE_MASK) != + XEL_RSR_RECV_DONE_MASK) + return 0; /* No data was available */ + } + + /* Get the protocol type of the ethernet frame that arrived */ + proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET + + XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & + XEL_RPLR_LENGTH_MASK); + + /* Check if received ethernet frame is a raw ethernet frame + * or an IP packet or an ARP packet */ + if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) { + + if (proto_type == ETH_P_IP) { + length = ((ntohl(__raw_readl(addr + + XEL_HEADER_IP_LENGTH_OFFSET + + XEL_RXBUFF_OFFSET)) >> + XEL_HEADER_SHIFT) & + XEL_RPLR_LENGTH_MASK); + length += ETH_HLEN + ETH_FCS_LEN; + + } else if (proto_type == ETH_P_ARP) + length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN; + else + /* Field contains type other than IP or ARP, use max + * frame size and let user parse it */ + length = ETH_FRAME_LEN + ETH_FCS_LEN; + } else + /* Use the length in the frame, plus the header and trailer */ + length = proto_type + ETH_HLEN + ETH_FCS_LEN; + + /* Read from the EmacLite device */ + xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET), + data, length); + + /* Acknowledge the frame */ + reg_data = __raw_readl(addr + XEL_RSR_OFFSET); + reg_data &= ~XEL_RSR_RECV_DONE_MASK; + __raw_writel(reg_data, addr + XEL_RSR_OFFSET); + + return length; +} + +/** + * xemaclite_update_address - Update the MAC address in the device + * @drvdata: Pointer to the Emaclite device private data + * @address_ptr:Pointer to the MAC address (MAC address is a 48-bit value) + * + * Tx must be idle and Rx should be idle for deterministic results. + * It is recommended that this function should be called after the + * initialization and before transmission of any packets from the device. + * The MAC address can be programmed using any of the two transmit + * buffers (if configured). + */ +static void xemaclite_update_address(struct net_local *drvdata, + u8 *address_ptr) +{ + void __iomem *addr; + u32 reg_data; + + /* Determine the expected Tx buffer address */ + addr = drvdata->base_addr + drvdata->next_tx_buf_to_use; + + xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN); + + __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); + + /* Update the MAC address in the EmacLite */ + reg_data = __raw_readl(addr + XEL_TSR_OFFSET); + __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET); + + /* Wait for EmacLite to finish with the MAC address update */ + while ((__raw_readl(addr + XEL_TSR_OFFSET) & + XEL_TSR_PROG_MAC_ADDR) != 0) + ; +} + +/** + * xemaclite_set_mac_address - Set the MAC address for this device + * @dev: Pointer to the network device instance + * @addr: Void pointer to the sockaddr structure + * + * This function copies the HW address from the sockaddr strucutre to the + * net_device structure and updates the address in HW. + * + * Return: Error if the net device is busy or 0 if the addr is set + * successfully + */ +static int xemaclite_set_mac_address(struct net_device *dev, void *address) +{ + struct net_local *lp = netdev_priv(dev); + struct sockaddr *addr = address; + + if (netif_running(dev)) + return -EBUSY; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + xemaclite_update_address(lp, dev->dev_addr); + return 0; +} + +/** + * xemaclite_tx_timeout - Callback for Tx Timeout + * @dev: Pointer to the network device + * + * This function is called when Tx time out occurs for Emaclite device. + */ +static void xemaclite_tx_timeout(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + unsigned long flags; + + dev_err(&lp->ndev->dev, "Exceeded transmit timeout of %lu ms\n", + TX_TIMEOUT * 1000UL / HZ); + + dev->stats.tx_errors++; + + /* Reset the device */ + spin_lock_irqsave(&lp->reset_lock, flags); + + /* Shouldn't really be necessary, but shouldn't hurt */ + netif_stop_queue(dev); + + xemaclite_disable_interrupts(lp); + xemaclite_enable_interrupts(lp); + + if (lp->deferred_skb) { + dev_kfree_skb(lp->deferred_skb); + lp->deferred_skb = NULL; + dev->stats.tx_errors++; + } + + /* To exclude tx timeout */ + dev->trans_start = jiffies; /* prevent tx timeout */ + + /* We're all ready to go. Start the queue */ + netif_wake_queue(dev); + spin_unlock_irqrestore(&lp->reset_lock, flags); +} + +/**********************/ +/* Interrupt Handlers */ +/**********************/ + +/** + * xemaclite_tx_handler - Interrupt handler for frames sent + * @dev: Pointer to the network device + * + * This function updates the number of packets transmitted and handles the + * deferred skb, if there is one. + */ +static void xemaclite_tx_handler(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + + dev->stats.tx_packets++; + if (lp->deferred_skb) { + if (xemaclite_send_data(lp, + (u8 *) lp->deferred_skb->data, + lp->deferred_skb->len) != 0) + return; + else { + dev->stats.tx_bytes += lp->deferred_skb->len; + dev_kfree_skb_irq(lp->deferred_skb); + lp->deferred_skb = NULL; + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); + } + } +} + +/** + * xemaclite_rx_handler- Interrupt handler for frames received + * @dev: Pointer to the network device + * + * This function allocates memory for a socket buffer, fills it with data + * received and hands it over to the TCP/IP stack. + */ +static void xemaclite_rx_handler(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + struct sk_buff *skb; + unsigned int align; + u32 len; + + len = ETH_FRAME_LEN + ETH_FCS_LEN; + skb = netdev_alloc_skb(dev, len + ALIGNMENT); + if (!skb) { + /* Couldn't get memory. */ + dev->stats.rx_dropped++; + dev_err(&lp->ndev->dev, "Could not allocate receive buffer\n"); + return; + } + + /* + * A new skb should have the data halfword aligned, but this code is + * here just in case that isn't true. Calculate how many + * bytes we should reserve to get the data to start on a word + * boundary */ + align = BUFFER_ALIGN(skb->data); + if (align) + skb_reserve(skb, align); + + skb_reserve(skb, 2); + + len = xemaclite_recv_data(lp, (u8 *) skb->data); + + if (!len) { + dev->stats.rx_errors++; + dev_kfree_skb_irq(skb); + return; + } + + skb_put(skb, len); /* Tell the skb how much data we got */ + + skb->protocol = eth_type_trans(skb, dev); + skb_checksum_none_assert(skb); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + + if (!skb_defer_rx_timestamp(skb)) + netif_rx(skb); /* Send the packet upstream */ +} + +/** + * xemaclite_interrupt - Interrupt handler for this driver + * @irq: Irq of the Emaclite device + * @dev_id: Void pointer to the network device instance used as callback + * reference + * + * This function handles the Tx and Rx interrupts of the EmacLite device. + */ +static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) +{ + bool tx_complete = false; + struct net_device *dev = dev_id; + struct net_local *lp = netdev_priv(dev); + void __iomem *base_addr = lp->base_addr; + u32 tx_status; + + /* Check if there is Rx Data available */ + if ((__raw_readl(base_addr + XEL_RSR_OFFSET) & + XEL_RSR_RECV_DONE_MASK) || + (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) + & XEL_RSR_RECV_DONE_MASK)) + + xemaclite_rx_handler(dev); + + /* Check if the Transmission for the first buffer is completed */ + tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET); + if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && + (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { + + tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; + __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET); + + tx_complete = true; + } + + /* Check if the Transmission for the second buffer is completed */ + tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); + if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && + (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { + + tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; + __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET + + XEL_TSR_OFFSET); + + tx_complete = true; + } + + /* If there was a Tx interrupt, call the Tx Handler */ + if (tx_complete != 0) + xemaclite_tx_handler(dev); + + return IRQ_HANDLED; +} + +/**********************/ +/* MDIO Bus functions */ +/**********************/ + +/** + * xemaclite_mdio_wait - Wait for the MDIO to be ready to use + * @lp: Pointer to the Emaclite device private data + * + * This function waits till the device is ready to accept a new MDIO + * request. + * + * Return: 0 for success or ETIMEDOUT for a timeout + */ + +static int xemaclite_mdio_wait(struct net_local *lp) +{ + unsigned long end = jiffies + 2; + + /* wait for the MDIO interface to not be busy or timeout + after some time. + */ + while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & + XEL_MDIOCTRL_MDIOSTS_MASK) { + if (time_before_eq(end, jiffies)) { + WARN_ON(1); + return -ETIMEDOUT; + } + msleep(1); + } + return 0; +} + +/** + * xemaclite_mdio_read - Read from a given MII management register + * @bus: the mii_bus struct + * @phy_id: the phy address + * @reg: register number to read from + * + * This function waits till the device is ready to accept a new MDIO + * request and then writes the phy address to the MDIO Address register + * and reads data from MDIO Read Data register, when its available. + * + * Return: Value read from the MII management register + */ +static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg) +{ + struct net_local *lp = bus->priv; + u32 ctrl_reg; + u32 rc; + + if (xemaclite_mdio_wait(lp)) + return -ETIMEDOUT; + + /* Write the PHY address, register number and set the OP bit in the + * MDIO Address register. Set the Status bit in the MDIO Control + * register to start a MDIO read transaction. + */ + ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); + __raw_writel(XEL_MDIOADDR_OP_MASK | + ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), + lp->base_addr + XEL_MDIOADDR_OFFSET); + __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, + lp->base_addr + XEL_MDIOCTRL_OFFSET); + + if (xemaclite_mdio_wait(lp)) + return -ETIMEDOUT; + + rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET); + + dev_dbg(&lp->ndev->dev, + "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n", + phy_id, reg, rc); + + return rc; +} + +/** + * xemaclite_mdio_write - Write to a given MII management register + * @bus: the mii_bus struct + * @phy_id: the phy address + * @reg: register number to write to + * @val: value to write to the register number specified by reg + * + * This function waits till the device is ready to accept a new MDIO + * request and then writes the val to the MDIO Write Data register. + */ +static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, + u16 val) +{ + struct net_local *lp = bus->priv; + u32 ctrl_reg; + + dev_dbg(&lp->ndev->dev, + "xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n", + phy_id, reg, val); + + if (xemaclite_mdio_wait(lp)) + return -ETIMEDOUT; + + /* Write the PHY address, register number and clear the OP bit in the + * MDIO Address register and then write the value into the MDIO Write + * Data register. Finally, set the Status bit in the MDIO Control + * register to start a MDIO write transaction. + */ + ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); + __raw_writel(~XEL_MDIOADDR_OP_MASK & + ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), + lp->base_addr + XEL_MDIOADDR_OFFSET); + __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET); + __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, + lp->base_addr + XEL_MDIOCTRL_OFFSET); + + return 0; +} + +/** + * xemaclite_mdio_setup - Register mii_bus for the Emaclite device + * @lp: Pointer to the Emaclite device private data + * @ofdev: Pointer to OF device structure + * + * This function enables MDIO bus in the Emaclite device and registers a + * mii_bus. + * + * Return: 0 upon success or a negative error upon failure + */ +static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) +{ + struct mii_bus *bus; + int rc; + struct resource res; + struct device_node *np = of_get_parent(lp->phy_node); + struct device_node *npp; + + /* Don't register the MDIO bus if the phy_node or its parent node + * can't be found. + */ + if (!np) { + dev_err(dev, "Failed to register mdio bus.\n"); + return -ENODEV; + } + npp = of_get_parent(np); + + of_address_to_resource(npp, 0, &res); + if (lp->ndev->mem_start != res.start) { + struct phy_device *phydev; + phydev = of_phy_find_device(lp->phy_node); + if (!phydev) + dev_info(dev, + "MDIO of the phy is not registered yet\n"); + return 0; + } + + /* Enable the MDIO bus by asserting the enable bit in MDIO Control + * register. + */ + __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK, + lp->base_addr + XEL_MDIOCTRL_OFFSET); + + bus = mdiobus_alloc(); + if (!bus) { + dev_err(dev, "Failed to allocate mdiobus\n"); + return -ENOMEM; + } + + snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx", + (unsigned long long)res.start); + bus->priv = lp; + bus->name = "Xilinx Emaclite MDIO"; + bus->read = xemaclite_mdio_read; + bus->write = xemaclite_mdio_write; + bus->parent = dev; + bus->irq = lp->mdio_irqs; /* preallocated IRQ table */ + + lp->mii_bus = bus; + + rc = of_mdiobus_register(bus, np); + if (rc) { + dev_err(dev, "Failed to register mdio bus.\n"); + goto err_register; + } + + return 0; + +err_register: + mdiobus_free(bus); + return rc; +} + +/** + * xemaclite_adjust_link - Link state callback for the Emaclite device + * @ndev: pointer to net_device struct + * + * There's nothing in the Emaclite device to be configured when the link + * state changes. We just print the status. + */ +static void xemaclite_adjust_link(struct net_device *ndev) +{ + struct net_local *lp = netdev_priv(ndev); + struct phy_device *phy = lp->phy_dev; + int link_state; + + /* hash together the state values to decide if something has changed */ + link_state = phy->speed | (phy->duplex << 1) | phy->link; + + if (lp->last_link != link_state) { + lp->last_link = link_state; + phy_print_status(phy); + } +} + +/** + * xemaclite_open - Open the network device + * @dev: Pointer to the network device + * + * This function sets the MAC address, requests an IRQ and enables interrupts + * for the Emaclite device and starts the Tx queue. + * It also connects to the phy device, if MDIO is included in Emaclite device. + */ +static int xemaclite_open(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + int retval; + + /* Just to be safe, stop the device first */ + xemaclite_disable_interrupts(lp); + + if (lp->phy_node) { + u32 bmcr; + + lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, + xemaclite_adjust_link, 0, + PHY_INTERFACE_MODE_MII); + if (!lp->phy_dev) { + dev_err(&lp->ndev->dev, "of_phy_connect() failed\n"); + return -ENODEV; + } + + /* EmacLite doesn't support giga-bit speeds */ + lp->phy_dev->supported &= (PHY_BASIC_FEATURES); + lp->phy_dev->advertising = lp->phy_dev->supported; + + /* Don't advertise 1000BASE-T Full/Half duplex speeds */ + phy_write(lp->phy_dev, MII_CTRL1000, 0); + + /* Advertise only 10 and 100mbps full/half duplex speeds */ + phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL | + ADVERTISE_CSMA); + + /* Restart auto negotiation */ + bmcr = phy_read(lp->phy_dev, MII_BMCR); + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + phy_write(lp->phy_dev, MII_BMCR, bmcr); + + phy_start(lp->phy_dev); + } + + /* Set the MAC address each time opened */ + xemaclite_update_address(lp, dev->dev_addr); + + /* Grab the IRQ */ + retval = request_irq(dev->irq, xemaclite_interrupt, 0, dev->name, dev); + if (retval) { + dev_err(&lp->ndev->dev, "Could not allocate interrupt %d\n", + dev->irq); + if (lp->phy_dev) + phy_disconnect(lp->phy_dev); + lp->phy_dev = NULL; + + return retval; + } + + /* Enable Interrupts */ + xemaclite_enable_interrupts(lp); + + /* We're ready to go */ + netif_start_queue(dev); + + return 0; +} + +/** + * xemaclite_close - Close the network device + * @dev: Pointer to the network device + * + * This function stops the Tx queue, disables interrupts and frees the IRQ for + * the Emaclite device. + * It also disconnects the phy device associated with the Emaclite device. + */ +static int xemaclite_close(struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + + netif_stop_queue(dev); + xemaclite_disable_interrupts(lp); + free_irq(dev->irq, dev); + + if (lp->phy_dev) + phy_disconnect(lp->phy_dev); + lp->phy_dev = NULL; + + return 0; +} + +/** + * xemaclite_send - Transmit a frame + * @orig_skb: Pointer to the socket buffer to be transmitted + * @dev: Pointer to the network device + * + * This function checks if the Tx buffer of the Emaclite device is free to send + * data. If so, it fills the Tx buffer with data from socket buffer data, + * updates the stats and frees the socket buffer. The Tx completion is signaled + * by an interrupt. If the Tx buffer isn't free, then the socket buffer is + * deferred and the Tx queue is stopped so that the deferred socket buffer can + * be transmitted when the Emaclite device is free to transmit data. + * + * Return: 0, always. + */ +static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) +{ + struct net_local *lp = netdev_priv(dev); + struct sk_buff *new_skb; + unsigned int len; + unsigned long flags; + + len = orig_skb->len; + + new_skb = orig_skb; + + spin_lock_irqsave(&lp->reset_lock, flags); + if (xemaclite_send_data(lp, (u8 *) new_skb->data, len) != 0) { + /* If the Emaclite Tx buffer is busy, stop the Tx queue and + * defer the skb for transmission during the ISR, after the + * current transmission is complete */ + netif_stop_queue(dev); + lp->deferred_skb = new_skb; + /* Take the time stamp now, since we can't do this in an ISR. */ + skb_tx_timestamp(new_skb); + spin_unlock_irqrestore(&lp->reset_lock, flags); + return 0; + } + spin_unlock_irqrestore(&lp->reset_lock, flags); + + skb_tx_timestamp(new_skb); + + dev->stats.tx_bytes += len; + dev_consume_skb_any(new_skb); + + return 0; +} + +/** + * xemaclite_remove_ndev - Free the network device + * @ndev: Pointer to the network device to be freed + * + * This function un maps the IO region of the Emaclite device and frees the net + * device. + */ +static void xemaclite_remove_ndev(struct net_device *ndev) +{ + if (ndev) { + free_netdev(ndev); + } +} + +/** + * get_bool - Get a parameter from the OF device + * @ofdev: Pointer to OF device structure + * @s: Property to be retrieved + * + * This function looks for a property in the device node and returns the value + * of the property if its found or 0 if the property is not found. + * + * Return: Value of the parameter if the parameter is found, or 0 otherwise + */ +static bool get_bool(struct platform_device *ofdev, const char *s) +{ + u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL); + + if (p) { + return (bool)*p; + } else { + dev_warn(&ofdev->dev, "Parameter %s not found," + "defaulting to false\n", s); + return false; + } +} + +static struct net_device_ops xemaclite_netdev_ops; + +/** + * xemaclite_of_probe - Probe method for the Emaclite device. + * @ofdev: Pointer to OF device structure + * @match: Pointer to the structure used for matching a device + * + * This function probes for the Emaclite device in the device tree. + * It initializes the driver data structure and the hardware, sets the MAC + * address and registers the network device. + * It also registers a mii_bus for the Emaclite device, if MDIO is included + * in the device. + * + * Return: 0, if the driver is bound to the Emaclite device, or + * a negative error if there is failure. + */ +static int xemaclite_of_probe(struct platform_device *ofdev) +{ + struct resource *res; + struct net_device *ndev = NULL; + struct net_local *lp = NULL; + struct device *dev = &ofdev->dev; + const void *mac_address; + + int rc = 0; + + dev_info(dev, "Device Tree Probing\n"); + + /* Create an ethernet device instance */ + ndev = alloc_etherdev(sizeof(struct net_local)); + if (!ndev) + return -ENOMEM; + + dev_set_drvdata(dev, ndev); + SET_NETDEV_DEV(ndev, &ofdev->dev); + + lp = netdev_priv(ndev); + lp->ndev = ndev; + + /* Get IRQ for the device */ + res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0); + if (!res) { + dev_err(dev, "no IRQ found\n"); + rc = -ENXIO; + goto error; + } + + ndev->irq = res->start; + + res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); + lp->base_addr = devm_ioremap_resource(&ofdev->dev, res); + if (IS_ERR(lp->base_addr)) { + rc = PTR_ERR(lp->base_addr); + goto error; + } + + ndev->mem_start = res->start; + ndev->mem_end = res->end; + + spin_lock_init(&lp->reset_lock); + lp->next_tx_buf_to_use = 0x0; + lp->next_rx_buf_to_use = 0x0; + lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong"); + lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong"); + mac_address = of_get_mac_address(ofdev->dev.of_node); + + if (mac_address) + /* Set the MAC address. */ + memcpy(ndev->dev_addr, mac_address, ETH_ALEN); + else + dev_warn(dev, "No MAC address found\n"); + + /* Clear the Tx CSR's in case this is a restart */ + __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET); + __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); + + /* Set the MAC address in the EmacLite device */ + xemaclite_update_address(lp, ndev->dev_addr); + + lp->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); + rc = xemaclite_mdio_setup(lp, &ofdev->dev); + if (rc) + dev_warn(&ofdev->dev, "error registering MDIO bus\n"); + + dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr); + + ndev->netdev_ops = &xemaclite_netdev_ops; + ndev->flags &= ~IFF_MULTICAST; + ndev->watchdog_timeo = TX_TIMEOUT; + + /* Finally, register the device */ + rc = register_netdev(ndev); + if (rc) { + dev_err(dev, + "Cannot register network device, aborting\n"); + goto error; + } + + dev_info(dev, + "Xilinx EmacLite at 0x%08X mapped to 0x%08X, irq=%d\n", + (unsigned int __force)ndev->mem_start, + (unsigned int __force)lp->base_addr, ndev->irq); + return 0; + +error: + xemaclite_remove_ndev(ndev); + return rc; +} + +/** + * xemaclite_of_remove - Unbind the driver from the Emaclite device. + * @of_dev: Pointer to OF device structure + * + * This function is called if a device is physically removed from the system or + * if the driver module is being unloaded. It frees any resources allocated to + * the device. + * + * Return: 0, always. + */ +static int xemaclite_of_remove(struct platform_device *of_dev) +{ + struct net_device *ndev = platform_get_drvdata(of_dev); + + struct net_local *lp = netdev_priv(ndev); + + /* Un-register the mii_bus, if configured */ + if (lp->has_mdio) { + mdiobus_unregister(lp->mii_bus); + kfree(lp->mii_bus->irq); + mdiobus_free(lp->mii_bus); + lp->mii_bus = NULL; + } + + unregister_netdev(ndev); + + of_node_put(lp->phy_node); + lp->phy_node = NULL; + + xemaclite_remove_ndev(ndev); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void +xemaclite_poll_controller(struct net_device *ndev) +{ + disable_irq(ndev->irq); + xemaclite_interrupt(ndev->irq, ndev); + enable_irq(ndev->irq); +} +#endif + +static struct net_device_ops xemaclite_netdev_ops = { + .ndo_open = xemaclite_open, + .ndo_stop = xemaclite_close, + .ndo_start_xmit = xemaclite_send, + .ndo_set_mac_address = xemaclite_set_mac_address, + .ndo_tx_timeout = xemaclite_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = xemaclite_poll_controller, +#endif +}; + +/* Match table for OF platform binding */ +static const struct of_device_id xemaclite_of_match[] = { + { .compatible = "xlnx,opb-ethernetlite-1.01.a", }, + { .compatible = "xlnx,opb-ethernetlite-1.01.b", }, + { .compatible = "xlnx,xps-ethernetlite-1.00.a", }, + { .compatible = "xlnx,xps-ethernetlite-2.00.a", }, + { .compatible = "xlnx,xps-ethernetlite-2.01.a", }, + { .compatible = "xlnx,xps-ethernetlite-3.00.a", }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(of, xemaclite_of_match); + +static struct platform_driver xemaclite_of_driver = { + .driver = { + .name = DRIVER_NAME, + .of_match_table = xemaclite_of_match, + }, + .probe = xemaclite_of_probe, + .remove = xemaclite_of_remove, +}; + +module_platform_driver(xemaclite_of_driver); + +MODULE_AUTHOR("Xilinx, Inc."); +MODULE_DESCRIPTION("Xilinx Ethernet MAC Lite driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/xircom/Kconfig b/drivers/net/ethernet/xircom/Kconfig new file mode 100644 index 000000000..69f56a6de --- /dev/null +++ b/drivers/net/ethernet/xircom/Kconfig @@ -0,0 +1,31 @@ +# +# Xircom network device configuration +# + +config NET_VENDOR_XIRCOM + bool "Xircom devices" + default y + depends on PCMCIA + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Xircom cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_XIRCOM + +config PCMCIA_XIRC2PS + tristate "Xircom 16-bit PCMCIA support" + depends on PCMCIA + ---help--- + Say Y here if you intend to attach a Xircom 16-bit PCMCIA (PC-card) + Ethernet or Fast Ethernet card to your computer. + + To compile this driver as a module, choose M here: the module will be + called xirc2ps_cs. If unsure, say N. + +endif # NET_VENDOR_XIRCOM diff --git a/drivers/net/ethernet/xircom/Makefile b/drivers/net/ethernet/xircom/Makefile new file mode 100644 index 000000000..3b7aebd8b --- /dev/null +++ b/drivers/net/ethernet/xircom/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Xircom network device drivers. +# + +obj-$(CONFIG_PCMCIA_XIRC2PS) += xirc2ps_cs.o diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c new file mode 100644 index 000000000..d56f86932 --- /dev/null +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -0,0 +1,1798 @@ +/* [xirc2ps_cs.c wk 03.11.99] (1.40 1999/11/18 00:06:03) + * Xircom CreditCard Ethernet Adapter IIps driver + * Xircom Realport 10/100 (RE-100) driver + * + * This driver supports various Xircom CreditCard Ethernet adapters + * including the CE2, CE IIps, RE-10, CEM28, CEM33, CE33, CEM56, + * CE3-100, CE3B, RE-100, REM10BT, and REM56G-100. + * + * 2000-09-24 The Xircom CE3B-100 may not + * autodetect the media properly. In this case use the + * if_port=1 (for 10BaseT) or if_port=4 (for 100BaseT) options + * to force the media type. + * + * Written originally by Werner Koch based on David Hinds' skeleton of the + * PCMCIA driver. + * + * Copyright (c) 1997,1998 Werner Koch (dd9jn) + * + * This driver is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * It is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + * + * + * ALTERNATIVELY, this driver may be distributed under the terms of + * the following license, in which case the provisions of this license + * are required INSTEAD OF the GNU General Public License. (This clause + * is necessary due to a potential bad interaction between the GPL and + * the restrictions contained in a BSD-style copyright.) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, and the entire permission notice in its entirety, + * including the disclaimer of warranties. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#ifndef MANFID_COMPAQ + #define MANFID_COMPAQ 0x0138 + #define MANFID_COMPAQ2 0x0183 /* is this correct? */ +#endif + +#include + +/* Time in jiffies before concluding Tx hung */ +#define TX_TIMEOUT ((400*HZ)/1000) + +/**************** + * Some constants used to access the hardware + */ + +/* Register offsets and value constans */ +#define XIRCREG_CR 0 /* Command register (wr) */ +enum xirc_cr { + TransmitPacket = 0x01, + SoftReset = 0x02, + EnableIntr = 0x04, + ForceIntr = 0x08, + ClearTxFIFO = 0x10, + ClearRxOvrun = 0x20, + RestartTx = 0x40 +}; +#define XIRCREG_ESR 0 /* Ethernet status register (rd) */ +enum xirc_esr { + FullPktRcvd = 0x01, /* full packet in receive buffer */ + PktRejected = 0x04, /* a packet has been rejected */ + TxPktPend = 0x08, /* TX Packet Pending */ + IncorPolarity = 0x10, + MediaSelect = 0x20 /* set if TP, clear if AUI */ +}; +#define XIRCREG_PR 1 /* Page Register select */ +#define XIRCREG_EDP 4 /* Ethernet Data Port Register */ +#define XIRCREG_ISR 6 /* Ethernet Interrupt Status Register */ +enum xirc_isr { + TxBufOvr = 0x01, /* TX Buffer Overflow */ + PktTxed = 0x02, /* Packet Transmitted */ + MACIntr = 0x04, /* MAC Interrupt occurred */ + TxResGrant = 0x08, /* Tx Reservation Granted */ + RxFullPkt = 0x20, /* Rx Full Packet */ + RxPktRej = 0x40, /* Rx Packet Rejected */ + ForcedIntr= 0x80 /* Forced Interrupt */ +}; +#define XIRCREG1_IMR0 12 /* Ethernet Interrupt Mask Register (on page 1)*/ +#define XIRCREG1_IMR1 13 +#define XIRCREG0_TSO 8 /* Transmit Space Open Register (on page 0)*/ +#define XIRCREG0_TRS 10 /* Transmit reservation Size Register (page 0)*/ +#define XIRCREG0_DO 12 /* Data Offset Register (page 0) (wr) */ +#define XIRCREG0_RSR 12 /* Receive Status Register (page 0) (rd) */ +enum xirc_rsr { + PhyPkt = 0x01, /* set:physical packet, clear: multicast packet */ + BrdcstPkt = 0x02, /* set if it is a broadcast packet */ + PktTooLong = 0x04, /* set if packet length > 1518 */ + AlignErr = 0x10, /* incorrect CRC and last octet not complete */ + CRCErr = 0x20, /* incorrect CRC and last octet is complete */ + PktRxOk = 0x80 /* received ok */ +}; +#define XIRCREG0_PTR 13 /* packets transmitted register (rd) */ +#define XIRCREG0_RBC 14 /* receive byte count regsister (rd) */ +#define XIRCREG1_ECR 14 /* ethernet configurationn register */ +enum xirc_ecr { + FullDuplex = 0x04, /* enable full duplex mode */ + LongTPMode = 0x08, /* adjust for longer lengths of TP cable */ + DisablePolCor = 0x10,/* disable auto polarity correction */ + DisableLinkPulse = 0x20, /* disable link pulse generation */ + DisableAutoTx = 0x40, /* disable auto-transmit */ +}; +#define XIRCREG2_RBS 8 /* receive buffer start register */ +#define XIRCREG2_LED 10 /* LED Configuration register */ +/* values for the leds: Bits 2-0 for led 1 + * 0 disabled Bits 5-3 for led 2 + * 1 collision + * 2 noncollision + * 3 link_detected + * 4 incor_polarity + * 5 jabber + * 6 auto_assertion + * 7 rx_tx_activity + */ +#define XIRCREG2_MSR 12 /* Mohawk specific register */ + +#define XIRCREG4_GPR0 8 /* General Purpose Register 0 */ +#define XIRCREG4_GPR1 9 /* General Purpose Register 1 */ +#define XIRCREG2_GPR2 13 /* General Purpose Register 2 (page2!)*/ +#define XIRCREG4_BOV 10 /* Bonding Version Register */ +#define XIRCREG4_LMA 12 /* Local Memory Address Register */ +#define XIRCREG4_LMD 14 /* Local Memory Data Port */ +/* MAC register can only by accessed with 8 bit operations */ +#define XIRCREG40_CMD0 8 /* Command Register (wr) */ +enum xirc_cmd { /* Commands */ + Transmit = 0x01, + EnableRecv = 0x04, + DisableRecv = 0x08, + Abort = 0x10, + Online = 0x20, + IntrAck = 0x40, + Offline = 0x80 +}; +#define XIRCREG5_RHSA0 10 /* Rx Host Start Address */ +#define XIRCREG40_RXST0 9 /* Receive Status Register */ +#define XIRCREG40_TXST0 11 /* Transmit Status Register 0 */ +#define XIRCREG40_TXST1 12 /* Transmit Status Register 10 */ +#define XIRCREG40_RMASK0 13 /* Receive Mask Register */ +#define XIRCREG40_TMASK0 14 /* Transmit Mask Register 0 */ +#define XIRCREG40_TMASK1 15 /* Transmit Mask Register 0 */ +#define XIRCREG42_SWC0 8 /* Software Configuration 0 */ +#define XIRCREG42_SWC1 9 /* Software Configuration 1 */ +#define XIRCREG42_BOC 10 /* Back-Off Configuration */ +#define XIRCREG44_TDR0 8 /* Time Domain Reflectometry 0 */ +#define XIRCREG44_TDR1 9 /* Time Domain Reflectometry 1 */ +#define XIRCREG44_RXBC_LO 10 /* Rx Byte Count 0 (rd) */ +#define XIRCREG44_RXBC_HI 11 /* Rx Byte Count 1 (rd) */ +#define XIRCREG45_REV 15 /* Revision Register (rd) */ +#define XIRCREG50_IA 8 /* Individual Address (8-13) */ + +static const char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" }; + +/* card types */ +#define XIR_UNKNOWN 0 /* unknown: not supported */ +#define XIR_CE 1 /* (prodid 1) different hardware: not supported */ +#define XIR_CE2 2 /* (prodid 2) */ +#define XIR_CE3 3 /* (prodid 3) */ +#define XIR_CEM 4 /* (prodid 1) different hardware: not supported */ +#define XIR_CEM2 5 /* (prodid 2) */ +#define XIR_CEM3 6 /* (prodid 3) */ +#define XIR_CEM33 7 /* (prodid 4) */ +#define XIR_CEM56M 8 /* (prodid 5) */ +#define XIR_CEM56 9 /* (prodid 6) */ +#define XIR_CM28 10 /* (prodid 3) modem only: not supported here */ +#define XIR_CM33 11 /* (prodid 4) modem only: not supported here */ +#define XIR_CM56 12 /* (prodid 5) modem only: not supported here */ +#define XIR_CG 13 /* (prodid 1) GSM modem only: not supported */ +#define XIR_CBE 14 /* (prodid 1) cardbus ethernet: not supported */ +/*====================================================================*/ + +/* Module parameters */ + +MODULE_DESCRIPTION("Xircom PCMCIA ethernet driver"); +MODULE_LICENSE("Dual MPL/GPL"); + +#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) + +INT_MODULE_PARM(if_port, 0); +INT_MODULE_PARM(full_duplex, 0); +INT_MODULE_PARM(do_sound, 1); +INT_MODULE_PARM(lockup_hack, 0); /* anti lockup hack */ + +/*====================================================================*/ + +/* We do not process more than these number of bytes during one + * interrupt. (Of course we receive complete packets, so this is not + * an exact value). + * Something between 2000..22000; first value gives best interrupt latency, + * the second enables the usage of the complete on-chip buffer. We use the + * high value as the initial value. + */ +static unsigned maxrx_bytes = 22000; + +/* MII management prototypes */ +static void mii_idle(unsigned int ioaddr); +static void mii_putbit(unsigned int ioaddr, unsigned data); +static int mii_getbit(unsigned int ioaddr); +static void mii_wbits(unsigned int ioaddr, unsigned data, int len); +static unsigned mii_rd(unsigned int ioaddr, u_char phyaddr, u_char phyreg); +static void mii_wr(unsigned int ioaddr, u_char phyaddr, u_char phyreg, + unsigned data, int len); + +static int has_ce2_string(struct pcmcia_device * link); +static int xirc2ps_config(struct pcmcia_device * link); +static void xirc2ps_release(struct pcmcia_device * link); +static void xirc2ps_detach(struct pcmcia_device *p_dev); + +static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id); + +struct local_info { + struct net_device *dev; + struct pcmcia_device *p_dev; + + int card_type; + int probe_port; + int silicon; /* silicon revision. 0=old CE2, 1=Scipper, 4=Mohawk */ + int mohawk; /* a CE3 type card */ + int dingo; /* a CEM56 type card */ + int new_mii; /* has full 10baseT/100baseT MII */ + int modem; /* is a multi function card (i.e with a modem) */ + void __iomem *dingo_ccr; /* only used for CEM56 cards */ + unsigned last_ptr_value; /* last packets transmitted value */ + const char *manf_str; + struct work_struct tx_timeout_task; +}; + +/**************** + * Some more prototypes + */ +static netdev_tx_t do_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static void xirc_tx_timeout(struct net_device *dev); +static void xirc2ps_tx_timeout_task(struct work_struct *work); +static void set_addresses(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); +static int set_card_type(struct pcmcia_device *link); +static int do_config(struct net_device *dev, struct ifmap *map); +static int do_open(struct net_device *dev); +static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static const struct ethtool_ops netdev_ethtool_ops; +static void hardreset(struct net_device *dev); +static void do_reset(struct net_device *dev, int full); +static int init_mii(struct net_device *dev); +static void do_powerdown(struct net_device *dev); +static int do_stop(struct net_device *dev); + +/*=============== Helper functions =========================*/ +#define SelectPage(pgnr) outb((pgnr), ioaddr + XIRCREG_PR) +#define GetByte(reg) ((unsigned)inb(ioaddr + (reg))) +#define GetWord(reg) ((unsigned)inw(ioaddr + (reg))) +#define PutByte(reg,value) outb((value), ioaddr+(reg)) +#define PutWord(reg,value) outw((value), ioaddr+(reg)) + +/*====== Functions used for debugging =================================*/ +#if 0 /* reading regs may change system status */ +static void +PrintRegisters(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + + if (pc_debug > 1) { + int i, page; + + printk(KERN_DEBUG pr_fmt("Register common: ")); + for (i = 0; i < 8; i++) + pr_cont(" %2.2x", GetByte(i)); + pr_cont("\n"); + for (page = 0; page <= 8; page++) { + printk(KERN_DEBUG pr_fmt("Register page %2x: "), page); + SelectPage(page); + for (i = 8; i < 16; i++) + pr_cont(" %2.2x", GetByte(i)); + pr_cont("\n"); + } + for (page=0x40 ; page <= 0x5f; page++) { + if (page == 0x43 || (page >= 0x46 && page <= 0x4f) || + (page >= 0x51 && page <=0x5e)) + continue; + printk(KERN_DEBUG pr_fmt("Register page %2x: "), page); + SelectPage(page); + for (i = 8; i < 16; i++) + pr_cont(" %2.2x", GetByte(i)); + pr_cont("\n"); + } + } +} +#endif /* 0 */ + +/*============== MII Management functions ===============*/ + +/**************** + * Turn around for read + */ +static void +mii_idle(unsigned int ioaddr) +{ + PutByte(XIRCREG2_GPR2, 0x04|0); /* drive MDCK low */ + udelay(1); + PutByte(XIRCREG2_GPR2, 0x04|1); /* and drive MDCK high */ + udelay(1); +} + +/**************** + * Write a bit to MDI/O + */ +static void +mii_putbit(unsigned int ioaddr, unsigned data) +{ + #if 1 + if (data) { + PutByte(XIRCREG2_GPR2, 0x0c|2|0); /* set MDIO */ + udelay(1); + PutByte(XIRCREG2_GPR2, 0x0c|2|1); /* and drive MDCK high */ + udelay(1); + } else { + PutByte(XIRCREG2_GPR2, 0x0c|0|0); /* clear MDIO */ + udelay(1); + PutByte(XIRCREG2_GPR2, 0x0c|0|1); /* and drive MDCK high */ + udelay(1); + } + #else + if (data) { + PutWord(XIRCREG2_GPR2-1, 0x0e0e); + udelay(1); + PutWord(XIRCREG2_GPR2-1, 0x0f0f); + udelay(1); + } else { + PutWord(XIRCREG2_GPR2-1, 0x0c0c); + udelay(1); + PutWord(XIRCREG2_GPR2-1, 0x0d0d); + udelay(1); + } + #endif +} + +/**************** + * Get a bit from MDI/O + */ +static int +mii_getbit(unsigned int ioaddr) +{ + unsigned d; + + PutByte(XIRCREG2_GPR2, 4|0); /* drive MDCK low */ + udelay(1); + d = GetByte(XIRCREG2_GPR2); /* read MDIO */ + PutByte(XIRCREG2_GPR2, 4|1); /* drive MDCK high again */ + udelay(1); + return d & 0x20; /* read MDIO */ +} + +static void +mii_wbits(unsigned int ioaddr, unsigned data, int len) +{ + unsigned m = 1 << (len-1); + for (; m; m >>= 1) + mii_putbit(ioaddr, data & m); +} + +static unsigned +mii_rd(unsigned int ioaddr, u_char phyaddr, u_char phyreg) +{ + int i; + unsigned data=0, m; + + SelectPage(2); + for (i=0; i < 32; i++) /* 32 bit preamble */ + mii_putbit(ioaddr, 1); + mii_wbits(ioaddr, 0x06, 4); /* Start and opcode for read */ + mii_wbits(ioaddr, phyaddr, 5); /* PHY address to be accessed */ + mii_wbits(ioaddr, phyreg, 5); /* PHY register to read */ + mii_idle(ioaddr); /* turn around */ + mii_getbit(ioaddr); + + for (m = 1<<15; m; m >>= 1) + if (mii_getbit(ioaddr)) + data |= m; + mii_idle(ioaddr); + return data; +} + +static void +mii_wr(unsigned int ioaddr, u_char phyaddr, u_char phyreg, unsigned data, + int len) +{ + int i; + + SelectPage(2); + for (i=0; i < 32; i++) /* 32 bit preamble */ + mii_putbit(ioaddr, 1); + mii_wbits(ioaddr, 0x05, 4); /* Start and opcode for write */ + mii_wbits(ioaddr, phyaddr, 5); /* PHY address to be accessed */ + mii_wbits(ioaddr, phyreg, 5); /* PHY Register to write */ + mii_putbit(ioaddr, 1); /* turn around */ + mii_putbit(ioaddr, 0); + mii_wbits(ioaddr, data, len); /* And write the data */ + mii_idle(ioaddr); +} + +/*============= Main bulk of functions =========================*/ + +static const struct net_device_ops netdev_ops = { + .ndo_open = do_open, + .ndo_stop = do_stop, + .ndo_start_xmit = do_start_xmit, + .ndo_tx_timeout = xirc_tx_timeout, + .ndo_set_config = do_config, + .ndo_do_ioctl = do_ioctl, + .ndo_set_rx_mode = set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int +xirc2ps_probe(struct pcmcia_device *link) +{ + struct net_device *dev; + struct local_info *local; + + dev_dbg(&link->dev, "attach()\n"); + + /* Allocate the device structure */ + dev = alloc_etherdev(sizeof(struct local_info)); + if (!dev) + return -ENOMEM; + local = netdev_priv(dev); + local->dev = dev; + local->p_dev = link; + link->priv = dev; + + /* General socket configuration */ + link->config_index = 1; + + /* Fill in card specific entries */ + dev->netdev_ops = &netdev_ops; + dev->ethtool_ops = &netdev_ethtool_ops; + dev->watchdog_timeo = TX_TIMEOUT; + INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task); + + return xirc2ps_config(link); +} /* xirc2ps_attach */ + +static void +xirc2ps_detach(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + dev_dbg(&link->dev, "detach\n"); + + unregister_netdev(dev); + + xirc2ps_release(link); + + free_netdev(dev); +} /* xirc2ps_detach */ + +/**************** + * Detect the type of the card. s is the buffer with the data of tuple 0x20 + * Returns: 0 := not supported + * mediaid=11 and prodid=47 + * Media-Id bits: + * Ethernet 0x01 + * Tokenring 0x02 + * Arcnet 0x04 + * Wireless 0x08 + * Modem 0x10 + * GSM only 0x20 + * Prod-Id bits: + * Pocket 0x10 + * External 0x20 + * Creditcard 0x40 + * Cardbus 0x80 + * + */ +static int +set_card_type(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + struct local_info *local = netdev_priv(dev); + u8 *buf; + unsigned int cisrev, mediaid, prodid; + size_t len; + + len = pcmcia_get_tuple(link, CISTPL_MANFID, &buf); + if (len < 5) { + dev_err(&link->dev, "invalid CIS -- sorry\n"); + return 0; + } + + cisrev = buf[2]; + mediaid = buf[3]; + prodid = buf[4]; + + dev_dbg(&link->dev, "cisrev=%02x mediaid=%02x prodid=%02x\n", + cisrev, mediaid, prodid); + + local->mohawk = 0; + local->dingo = 0; + local->modem = 0; + local->card_type = XIR_UNKNOWN; + if (!(prodid & 0x40)) { + pr_notice("Oops: Not a creditcard\n"); + return 0; + } + if (!(mediaid & 0x01)) { + pr_notice("Not an Ethernet card\n"); + return 0; + } + if (mediaid & 0x10) { + local->modem = 1; + switch(prodid & 15) { + case 1: local->card_type = XIR_CEM ; break; + case 2: local->card_type = XIR_CEM2 ; break; + case 3: local->card_type = XIR_CEM3 ; break; + case 4: local->card_type = XIR_CEM33 ; break; + case 5: local->card_type = XIR_CEM56M; + local->mohawk = 1; + break; + case 6: + case 7: /* 7 is the RealPort 10/56 */ + local->card_type = XIR_CEM56 ; + local->mohawk = 1; + local->dingo = 1; + break; + } + } else { + switch(prodid & 15) { + case 1: local->card_type = has_ce2_string(link)? XIR_CE2 : XIR_CE ; + break; + case 2: local->card_type = XIR_CE2; break; + case 3: local->card_type = XIR_CE3; + local->mohawk = 1; + break; + } + } + if (local->card_type == XIR_CE || local->card_type == XIR_CEM) { + pr_notice("Sorry, this is an old CE card\n"); + return 0; + } + if (local->card_type == XIR_UNKNOWN) + pr_notice("unknown card (mediaid=%02x prodid=%02x)\n", mediaid, prodid); + + return 1; +} + +/**************** + * There are some CE2 cards out which claim to be a CE card. + * This function looks for a "CE2" in the 3rd version field. + * Returns: true if this is a CE2 + */ +static int +has_ce2_string(struct pcmcia_device * p_dev) +{ + if (p_dev->prod_id[2] && strstr(p_dev->prod_id[2], "CE2")) + return 1; + return 0; +} + +static int +xirc2ps_config_modem(struct pcmcia_device *p_dev, void *priv_data) +{ + unsigned int ioaddr; + + if ((p_dev->resource[0]->start & 0xf) == 8) + return -ENODEV; + + p_dev->resource[0]->end = 16; + p_dev->resource[1]->end = 8; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; + p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; + p_dev->io_lines = 10; + + p_dev->resource[1]->start = p_dev->resource[0]->start; + for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) { + p_dev->resource[0]->start = ioaddr; + if (!pcmcia_request_io(p_dev)) + return 0; + } + return -ENODEV; +} + +static int +xirc2ps_config_check(struct pcmcia_device *p_dev, void *priv_data) +{ + int *pass = priv_data; + resource_size_t tmp = p_dev->resource[1]->start; + + tmp += (*pass ? (p_dev->config_index & 0x20 ? -24 : 8) + : (p_dev->config_index & 0x20 ? 8 : -24)); + + if ((p_dev->resource[0]->start & 0xf) == 8) + return -ENODEV; + + p_dev->resource[0]->end = 18; + p_dev->resource[1]->end = 8; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; + p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; + p_dev->io_lines = 10; + + p_dev->resource[1]->start = p_dev->resource[0]->start; + p_dev->resource[0]->start = tmp; + return pcmcia_request_io(p_dev); +} + + +static int pcmcia_get_mac_ce(struct pcmcia_device *p_dev, + tuple_t *tuple, + void *priv) +{ + struct net_device *dev = priv; + int i; + + if (tuple->TupleDataLen != 13) + return -EINVAL; + if ((tuple->TupleData[0] != 2) || (tuple->TupleData[1] != 1) || + (tuple->TupleData[2] != 6)) + return -EINVAL; + /* another try (James Lehmer's CE2 version 4.1)*/ + for (i = 2; i < 6; i++) + dev->dev_addr[i] = tuple->TupleData[i+2]; + return 0; +}; + + +static int +xirc2ps_config(struct pcmcia_device * link) +{ + struct net_device *dev = link->priv; + struct local_info *local = netdev_priv(dev); + unsigned int ioaddr; + int err; + u8 *buf; + size_t len; + + local->dingo_ccr = NULL; + + dev_dbg(&link->dev, "config\n"); + + /* Is this a valid card */ + if (link->has_manf_id == 0) { + pr_notice("manfid not found in CIS\n"); + goto failure; + } + + switch (link->manf_id) { + case MANFID_XIRCOM: + local->manf_str = "Xircom"; + break; + case MANFID_ACCTON: + local->manf_str = "Accton"; + break; + case MANFID_COMPAQ: + case MANFID_COMPAQ2: + local->manf_str = "Compaq"; + break; + case MANFID_INTEL: + local->manf_str = "Intel"; + break; + case MANFID_TOSHIBA: + local->manf_str = "Toshiba"; + break; + default: + pr_notice("Unknown Card Manufacturer ID: 0x%04x\n", + (unsigned)link->manf_id); + goto failure; + } + dev_dbg(&link->dev, "found %s card\n", local->manf_str); + + if (!set_card_type(link)) { + pr_notice("this card is not supported\n"); + goto failure; + } + + /* get the ethernet address from the CIS */ + err = pcmcia_get_mac_from_cis(link, dev); + + /* not found: try to get the node-id from tuple 0x89 */ + if (err) { + len = pcmcia_get_tuple(link, 0x89, &buf); + /* data layout looks like tuple 0x22 */ + if (buf && len == 8) { + if (*buf == CISTPL_FUNCE_LAN_NODE_ID) { + int i; + for (i = 2; i < 6; i++) + dev->dev_addr[i] = buf[i+2]; + } else + err = -1; + } + kfree(buf); + } + + if (err) + err = pcmcia_loop_tuple(link, CISTPL_FUNCE, pcmcia_get_mac_ce, dev); + + if (err) { + pr_notice("node-id not found in CIS\n"); + goto failure; + } + + if (local->modem) { + int pass; + link->config_flags |= CONF_AUTO_SET_IO; + + if (local->dingo) { + /* Take the Modem IO port from the CIS and scan for a free + * Ethernet port */ + if (!pcmcia_loop_config(link, xirc2ps_config_modem, NULL)) + goto port_found; + } else { + /* We do 2 passes here: The first one uses the regular mapping and + * the second tries again, thereby considering that the 32 ports are + * mirrored every 32 bytes. Actually we use a mirrored port for + * the Mako if (on the first pass) the COR bit 5 is set. + */ + for (pass=0; pass < 2; pass++) + if (!pcmcia_loop_config(link, xirc2ps_config_check, + &pass)) + goto port_found; + /* if special option: + * try to configure as Ethernet only. + * .... */ + } + pr_notice("no ports available\n"); + } else { + link->io_lines = 10; + link->resource[0]->end = 16; + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; + for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) { + link->resource[0]->start = ioaddr; + if (!(err = pcmcia_request_io(link))) + goto port_found; + } + link->resource[0]->start = 0; /* let CS decide */ + if ((err = pcmcia_request_io(link))) + goto config_error; + } + port_found: + if (err) + goto config_error; + + /**************** + * Now allocate an interrupt line. Note that this does not + * actually assign a handler to the interrupt. + */ + if ((err=pcmcia_request_irq(link, xirc2ps_interrupt))) + goto config_error; + + link->config_flags |= CONF_ENABLE_IRQ; + if (do_sound) + link->config_flags |= CONF_ENABLE_SPKR; + + if ((err = pcmcia_enable_device(link))) + goto config_error; + + if (local->dingo) { + /* Reset the modem's BAR to the correct value + * This is necessary because in the RequestConfiguration call, + * the base address of the ethernet port (BasePort1) is written + * to the BAR registers of the modem. + */ + err = pcmcia_write_config_byte(link, CISREG_IOBASE_0, (u8) + link->resource[1]->start & 0xff); + if (err) + goto config_error; + + err = pcmcia_write_config_byte(link, CISREG_IOBASE_1, + (link->resource[1]->start >> 8) & 0xff); + if (err) + goto config_error; + + /* There is no config entry for the Ethernet part which + * is at 0x0800. So we allocate a window into the attribute + * memory and write direct to the CIS registers + */ + link->resource[2]->flags = WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_AM | + WIN_ENABLE; + link->resource[2]->start = link->resource[2]->end = 0; + if ((err = pcmcia_request_window(link, link->resource[2], 0))) + goto config_error; + + local->dingo_ccr = ioremap(link->resource[2]->start, 0x1000) + 0x0800; + if ((err = pcmcia_map_mem_page(link, link->resource[2], 0))) + goto config_error; + + /* Setup the CCRs; there are no infos in the CIS about the Ethernet + * part. + */ + writeb(0x47, local->dingo_ccr + CISREG_COR); + ioaddr = link->resource[0]->start; + writeb(ioaddr & 0xff , local->dingo_ccr + CISREG_IOBASE_0); + writeb((ioaddr >> 8)&0xff , local->dingo_ccr + CISREG_IOBASE_1); + + #if 0 + { + u_char tmp; + pr_info("ECOR:"); + for (i=0; i < 7; i++) { + tmp = readb(local->dingo_ccr + i*2); + pr_cont(" %02x", tmp); + } + pr_cont("\n"); + pr_info("DCOR:"); + for (i=0; i < 4; i++) { + tmp = readb(local->dingo_ccr + 0x20 + i*2); + pr_cont(" %02x", tmp); + } + pr_cont("\n"); + pr_info("SCOR:"); + for (i=0; i < 10; i++) { + tmp = readb(local->dingo_ccr + 0x40 + i*2); + pr_cont(" %02x", tmp); + } + pr_cont("\n"); + } + #endif + + writeb(0x01, local->dingo_ccr + 0x20); + writeb(0x0c, local->dingo_ccr + 0x22); + writeb(0x00, local->dingo_ccr + 0x24); + writeb(0x00, local->dingo_ccr + 0x26); + writeb(0x00, local->dingo_ccr + 0x28); + } + + /* The if_port symbol can be set when the module is loaded */ + local->probe_port=0; + if (!if_port) { + local->probe_port = dev->if_port = 1; + } else if ((if_port >= 1 && if_port <= 2) || + (local->mohawk && if_port==4)) + dev->if_port = if_port; + else + pr_notice("invalid if_port requested\n"); + + /* we can now register the device with the net subsystem */ + dev->irq = link->irq; + dev->base_addr = link->resource[0]->start; + + if (local->dingo) + do_reset(dev, 1); /* a kludge to make the cem56 work */ + + SET_NETDEV_DEV(dev, &link->dev); + + if ((err=register_netdev(dev))) { + pr_notice("register_netdev() failed\n"); + goto config_error; + } + + /* give some infos about the hardware */ + netdev_info(dev, "%s: port %#3lx, irq %d, hwaddr %pM\n", + local->manf_str, (u_long)dev->base_addr, (int)dev->irq, + dev->dev_addr); + + return 0; + + config_error: + xirc2ps_release(link); + return -ENODEV; + + failure: + return -ENODEV; +} /* xirc2ps_config */ + +static void +xirc2ps_release(struct pcmcia_device *link) +{ + dev_dbg(&link->dev, "release\n"); + + if (link->resource[2]->end) { + struct net_device *dev = link->priv; + struct local_info *local = netdev_priv(dev); + if (local->dingo) + iounmap(local->dingo_ccr - 0x0800); + } + pcmcia_disable_device(link); +} /* xirc2ps_release */ + +/*====================================================================*/ + + +static int xirc2ps_suspend(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) { + netif_device_detach(dev); + do_powerdown(dev); + } + + return 0; +} + +static int xirc2ps_resume(struct pcmcia_device *link) +{ + struct net_device *dev = link->priv; + + if (link->open) { + do_reset(dev,1); + netif_device_attach(dev); + } + + return 0; +} + + +/*====================================================================*/ + +/**************** + * This is the Interrupt service route. + */ +static irqreturn_t +xirc2ps_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct local_info *lp = netdev_priv(dev); + unsigned int ioaddr; + u_char saved_page; + unsigned bytes_rcvd; + unsigned int_status, eth_status, rx_status, tx_status; + unsigned rsr, pktlen; + ulong start_ticks = jiffies; /* fixme: jiffies rollover every 497 days + * is this something to worry about? + * -- on a laptop? + */ + + if (!netif_device_present(dev)) + return IRQ_HANDLED; + + ioaddr = dev->base_addr; + if (lp->mohawk) { /* must disable the interrupt */ + PutByte(XIRCREG_CR, 0); + } + + pr_debug("%s: interrupt %d at %#x.\n", dev->name, irq, ioaddr); + + saved_page = GetByte(XIRCREG_PR); + /* Read the ISR to see whats the cause for the interrupt. + * This also clears the interrupt flags on CE2 cards + */ + int_status = GetByte(XIRCREG_ISR); + bytes_rcvd = 0; + loop_entry: + if (int_status == 0xff) { /* card may be ejected */ + pr_debug("%s: interrupt %d for dead card\n", dev->name, irq); + goto leave; + } + eth_status = GetByte(XIRCREG_ESR); + + SelectPage(0x40); + rx_status = GetByte(XIRCREG40_RXST0); + PutByte(XIRCREG40_RXST0, (~rx_status & 0xff)); + tx_status = GetByte(XIRCREG40_TXST0); + tx_status |= GetByte(XIRCREG40_TXST1) << 8; + PutByte(XIRCREG40_TXST0, 0); + PutByte(XIRCREG40_TXST1, 0); + + pr_debug("%s: ISR=%#2.2x ESR=%#2.2x RSR=%#2.2x TSR=%#4.4x\n", + dev->name, int_status, eth_status, rx_status, tx_status); + + /***** receive section ******/ + SelectPage(0); + while (eth_status & FullPktRcvd) { + rsr = GetByte(XIRCREG0_RSR); + if (bytes_rcvd > maxrx_bytes && (rsr & PktRxOk)) { + /* too many bytes received during this int, drop the rest of the + * packets */ + dev->stats.rx_dropped++; + pr_debug("%s: RX drop, too much done\n", dev->name); + } else if (rsr & PktRxOk) { + struct sk_buff *skb; + + pktlen = GetWord(XIRCREG0_RBC); + bytes_rcvd += pktlen; + + pr_debug("rsr=%#02x packet_length=%u\n", rsr, pktlen); + + /* 1 extra so we can use insw */ + skb = netdev_alloc_skb(dev, pktlen + 3); + if (!skb) { + dev->stats.rx_dropped++; + } else { /* okay get the packet */ + skb_reserve(skb, 2); + if (lp->silicon == 0 ) { /* work around a hardware bug */ + unsigned rhsa; /* receive start address */ + + SelectPage(5); + rhsa = GetWord(XIRCREG5_RHSA0); + SelectPage(0); + rhsa += 3; /* skip control infos */ + if (rhsa >= 0x8000) + rhsa = 0; + if (rhsa + pktlen > 0x8000) { + unsigned i; + u_char *buf = skb_put(skb, pktlen); + for (i=0; i < pktlen ; i++, rhsa++) { + buf[i] = GetByte(XIRCREG_EDP); + if (rhsa == 0x8000) { + rhsa = 0; + i--; + } + } + } else { + insw(ioaddr+XIRCREG_EDP, + skb_put(skb, pktlen), (pktlen+1)>>1); + } + } + #if 0 + else if (lp->mohawk) { + /* To use this 32 bit access we should use + * a manual optimized loop + * Also the words are swapped, we can get more + * performance by using 32 bit access and swapping + * the words in a register. Will need this for cardbus + * + * Note: don't forget to change the ALLOC_SKB to .. +3 + */ + unsigned i; + u_long *p = skb_put(skb, pktlen); + register u_long a; + unsigned int edpreg = ioaddr+XIRCREG_EDP-2; + for (i=0; i < len ; i += 4, p++) { + a = inl(edpreg); + __asm__("rorl $16,%0\n\t" + :"=q" (a) + : "0" (a)); + *p = a; + } + } + #endif + else { + insw(ioaddr+XIRCREG_EDP, skb_put(skb, pktlen), + (pktlen+1)>>1); + } + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pktlen; + if (!(rsr & PhyPkt)) + dev->stats.multicast++; + } + } else { /* bad packet */ + pr_debug("rsr=%#02x\n", rsr); + } + if (rsr & PktTooLong) { + dev->stats.rx_frame_errors++; + pr_debug("%s: Packet too long\n", dev->name); + } + if (rsr & CRCErr) { + dev->stats.rx_crc_errors++; + pr_debug("%s: CRC error\n", dev->name); + } + if (rsr & AlignErr) { + dev->stats.rx_fifo_errors++; /* okay ? */ + pr_debug("%s: Alignment error\n", dev->name); + } + + /* clear the received/dropped/error packet */ + PutWord(XIRCREG0_DO, 0x8000); /* issue cmd: skip_rx_packet */ + + /* get the new ethernet status */ + eth_status = GetByte(XIRCREG_ESR); + } + if (rx_status & 0x10) { /* Receive overrun */ + dev->stats.rx_over_errors++; + PutByte(XIRCREG_CR, ClearRxOvrun); + pr_debug("receive overrun cleared\n"); + } + + /***** transmit section ******/ + if (int_status & PktTxed) { + unsigned n, nn; + + n = lp->last_ptr_value; + nn = GetByte(XIRCREG0_PTR); + lp->last_ptr_value = nn; + if (nn < n) /* rollover */ + dev->stats.tx_packets += 256 - n; + else if (n == nn) { /* happens sometimes - don't know why */ + pr_debug("PTR not changed?\n"); + } else + dev->stats.tx_packets += lp->last_ptr_value - n; + netif_wake_queue(dev); + } + if (tx_status & 0x0002) { /* Execessive collissions */ + pr_debug("tx restarted due to execssive collissions\n"); + PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */ + } + if (tx_status & 0x0040) + dev->stats.tx_aborted_errors++; + + /* recalculate our work chunk so that we limit the duration of this + * ISR to about 1/10 of a second. + * Calculate only if we received a reasonable amount of bytes. + */ + if (bytes_rcvd > 1000) { + u_long duration = jiffies - start_ticks; + + if (duration >= HZ/10) { /* if more than about 1/10 second */ + maxrx_bytes = (bytes_rcvd * (HZ/10)) / duration; + if (maxrx_bytes < 2000) + maxrx_bytes = 2000; + else if (maxrx_bytes > 22000) + maxrx_bytes = 22000; + pr_debug("set maxrx=%u (rcvd=%u ticks=%lu)\n", + maxrx_bytes, bytes_rcvd, duration); + } else if (!duration && maxrx_bytes < 22000) { + /* now much faster */ + maxrx_bytes += 2000; + if (maxrx_bytes > 22000) + maxrx_bytes = 22000; + pr_debug("set maxrx=%u\n", maxrx_bytes); + } + } + + leave: + if (lockup_hack) { + if (int_status != 0xff && (int_status = GetByte(XIRCREG_ISR)) != 0) + goto loop_entry; + } + SelectPage(saved_page); + PutByte(XIRCREG_CR, EnableIntr); /* re-enable interrupts */ + /* Instead of dropping packets during a receive, we could + * force an interrupt with this command: + * PutByte(XIRCREG_CR, EnableIntr|ForceIntr); + */ + return IRQ_HANDLED; +} /* xirc2ps_interrupt */ + +/*====================================================================*/ + +static void +xirc2ps_tx_timeout_task(struct work_struct *work) +{ + struct local_info *local = + container_of(work, struct local_info, tx_timeout_task); + struct net_device *dev = local->dev; + /* reset the card */ + do_reset(dev,1); + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_wake_queue(dev); +} + +static void +xirc_tx_timeout(struct net_device *dev) +{ + struct local_info *lp = netdev_priv(dev); + dev->stats.tx_errors++; + netdev_notice(dev, "transmit timed out\n"); + schedule_work(&lp->tx_timeout_task); +} + +static netdev_tx_t +do_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct local_info *lp = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + int okay; + unsigned freespace; + unsigned pktlen = skb->len; + + pr_debug("do_start_xmit(skb=%p, dev=%p) len=%u\n", + skb, dev, pktlen); + + + /* adjust the packet length to min. required + * and hope that the buffer is large enough + * to provide some random data. + * fixme: For Mohawk we can change this by sending + * a larger packetlen than we actually have; the chip will + * pad this in his buffer with random bytes + */ + if (pktlen < ETH_ZLEN) + { + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + pktlen = ETH_ZLEN; + } + + netif_stop_queue(dev); + SelectPage(0); + PutWord(XIRCREG0_TRS, (u_short)pktlen+2); + freespace = GetWord(XIRCREG0_TSO); + okay = freespace & 0x8000; + freespace &= 0x7fff; + /* TRS doesn't work - (indeed it is eliminated with sil-rev 1) */ + okay = pktlen +2 < freespace; + pr_debug("%s: avail. tx space=%u%s\n", + dev->name, freespace, okay ? " (okay)":" (not enough)"); + if (!okay) { /* not enough space */ + return NETDEV_TX_BUSY; /* upper layer may decide to requeue this packet */ + } + /* send the packet */ + PutWord(XIRCREG_EDP, (u_short)pktlen); + outsw(ioaddr+XIRCREG_EDP, skb->data, pktlen>>1); + if (pktlen & 1) + PutByte(XIRCREG_EDP, skb->data[pktlen-1]); + + if (lp->mohawk) + PutByte(XIRCREG_CR, TransmitPacket|EnableIntr); + + dev_kfree_skb (skb); + dev->stats.tx_bytes += pktlen; + netif_start_queue(dev); + return NETDEV_TX_OK; +} + +struct set_address_info { + int reg_nr; + int page_nr; + int mohawk; + unsigned int ioaddr; +}; + +static void set_address(struct set_address_info *sa_info, char *addr) +{ + unsigned int ioaddr = sa_info->ioaddr; + int i; + + for (i = 0; i < 6; i++) { + if (sa_info->reg_nr > 15) { + sa_info->reg_nr = 8; + sa_info->page_nr++; + SelectPage(sa_info->page_nr); + } + if (sa_info->mohawk) + PutByte(sa_info->reg_nr++, addr[5 - i]); + else + PutByte(sa_info->reg_nr++, addr[i]); + } +} + +/**************** + * Set all addresses: This first one is the individual address, + * the next 9 addresses are taken from the multicast list and + * the rest is filled with the individual address. + */ +static void set_addresses(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + struct local_info *lp = netdev_priv(dev); + struct netdev_hw_addr *ha; + struct set_address_info sa_info; + int i; + + /* + * Setup the info structure so that by first set_address call it will do + * SelectPage with the right page number. Hence these ones here. + */ + sa_info.reg_nr = 15 + 1; + sa_info.page_nr = 0x50 - 1; + sa_info.mohawk = lp->mohawk; + sa_info.ioaddr = ioaddr; + + set_address(&sa_info, dev->dev_addr); + i = 0; + netdev_for_each_mc_addr(ha, dev) { + if (i++ == 9) + break; + set_address(&sa_info, ha->addr); + } + while (i++ < 9) + set_address(&sa_info, dev->dev_addr); + SelectPage(0); +} + +/**************** + * Set or clear the multicast filter for this adaptor. + * We can filter up to 9 addresses, if more are requested we set + * multicast promiscuous mode. + */ + +static void +set_multicast_list(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + unsigned value; + + SelectPage(0x42); + value = GetByte(XIRCREG42_SWC1) & 0xC0; + + if (dev->flags & IFF_PROMISC) { /* snoop */ + PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */ + } else if (netdev_mc_count(dev) > 9 || (dev->flags & IFF_ALLMULTI)) { + PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */ + } else if (!netdev_mc_empty(dev)) { + /* the chip can filter 9 addresses perfectly */ + PutByte(XIRCREG42_SWC1, value | 0x01); + SelectPage(0x40); + PutByte(XIRCREG40_CMD0, Offline); + set_addresses(dev); + SelectPage(0x40); + PutByte(XIRCREG40_CMD0, EnableRecv | Online); + } else { /* standard usage */ + PutByte(XIRCREG42_SWC1, value | 0x00); + } + SelectPage(0); +} + +static int +do_config(struct net_device *dev, struct ifmap *map) +{ + struct local_info *local = netdev_priv(dev); + + pr_debug("do_config(%p)\n", dev); + if (map->port != 255 && map->port != dev->if_port) { + if (map->port > 4) + return -EINVAL; + if (!map->port) { + local->probe_port = 1; + dev->if_port = 1; + } else { + local->probe_port = 0; + dev->if_port = map->port; + } + netdev_info(dev, "switching to %s port\n", if_names[dev->if_port]); + do_reset(dev,1); /* not the fine way :-) */ + } + return 0; +} + +/**************** + * Open the driver + */ +static int +do_open(struct net_device *dev) +{ + struct local_info *lp = netdev_priv(dev); + struct pcmcia_device *link = lp->p_dev; + + dev_dbg(&link->dev, "do_open(%p)\n", dev); + + /* Check that the PCMCIA card is still here. */ + /* Physical device present signature. */ + if (!pcmcia_dev_present(link)) + return -ENODEV; + + /* okay */ + link->open++; + + netif_start_queue(dev); + do_reset(dev,1); + + return 0; +} + +static void netdev_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, "xirc2ps_cs", sizeof(info->driver)); + snprintf(info->bus_info, sizeof(info->bus_info), "PCMCIA 0x%lx", + dev->base_addr); +} + +static const struct ethtool_ops netdev_ethtool_ops = { + .get_drvinfo = netdev_get_drvinfo, +}; + +static int +do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct local_info *local = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + struct mii_ioctl_data *data = if_mii(rq); + + pr_debug("%s: ioctl(%-.6s, %#04x) %04x %04x %04x %04x\n", + dev->name, rq->ifr_ifrn.ifrn_name, cmd, + data->phy_id, data->reg_num, data->val_in, data->val_out); + + if (!local->mohawk) + return -EOPNOTSUPP; + + switch(cmd) { + case SIOCGMIIPHY: /* Get the address of the PHY in use. */ + data->phy_id = 0; /* we have only this address */ + /* fall through */ + case SIOCGMIIREG: /* Read the specified MII register. */ + data->val_out = mii_rd(ioaddr, data->phy_id & 0x1f, + data->reg_num & 0x1f); + break; + case SIOCSMIIREG: /* Write the specified MII register */ + mii_wr(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in, + 16); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static void +hardreset(struct net_device *dev) +{ + struct local_info *local = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + + SelectPage(4); + udelay(1); + PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */ + msleep(40); /* wait 40 msec */ + if (local->mohawk) + PutByte(XIRCREG4_GPR1, 1); /* set bit 0: power up */ + else + PutByte(XIRCREG4_GPR1, 1 | 4); /* set bit 0: power up, bit 2: AIC */ + msleep(20); /* wait 20 msec */ +} + +static void +do_reset(struct net_device *dev, int full) +{ + struct local_info *local = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + unsigned value; + + pr_debug("%s: do_reset(%p,%d)\n", dev? dev->name:"eth?", dev, full); + + hardreset(dev); + PutByte(XIRCREG_CR, SoftReset); /* set */ + msleep(20); /* wait 20 msec */ + PutByte(XIRCREG_CR, 0); /* clear */ + msleep(40); /* wait 40 msec */ + if (local->mohawk) { + SelectPage(4); + /* set pin GP1 and GP2 to output (0x0c) + * set GP1 to low to power up the ML6692 (0x00) + * set GP2 to high to power up the 10Mhz chip (0x02) + */ + PutByte(XIRCREG4_GPR0, 0x0e); + } + + /* give the circuits some time to power up */ + msleep(500); /* about 500ms */ + + local->last_ptr_value = 0; + local->silicon = local->mohawk ? (GetByte(XIRCREG4_BOV) & 0x70) >> 4 + : (GetByte(XIRCREG4_BOV) & 0x30) >> 4; + + if (local->probe_port) { + if (!local->mohawk) { + SelectPage(4); + PutByte(XIRCREG4_GPR0, 4); + local->probe_port = 0; + } + } else if (dev->if_port == 2) { /* enable 10Base2 */ + SelectPage(0x42); + PutByte(XIRCREG42_SWC1, 0xC0); + } else { /* enable 10BaseT */ + SelectPage(0x42); + PutByte(XIRCREG42_SWC1, 0x80); + } + msleep(40); /* wait 40 msec to let it complete */ + + #if 0 + { + SelectPage(0); + value = GetByte(XIRCREG_ESR); /* read the ESR */ + pr_debug("%s: ESR is: %#02x\n", dev->name, value); + } + #endif + + /* setup the ECR */ + SelectPage(1); + PutByte(XIRCREG1_IMR0, 0xff); /* allow all ints */ + PutByte(XIRCREG1_IMR1, 1 ); /* and Set TxUnderrunDetect */ + value = GetByte(XIRCREG1_ECR); + #if 0 + if (local->mohawk) + value |= DisableLinkPulse; + PutByte(XIRCREG1_ECR, value); + #endif + pr_debug("%s: ECR is: %#02x\n", dev->name, value); + + SelectPage(0x42); + PutByte(XIRCREG42_SWC0, 0x20); /* disable source insertion */ + + if (local->silicon != 1) { + /* set the local memory dividing line. + * The comments in the sample code say that this is only + * settable with the scipper version 2 which is revision 0. + * Always for CE3 cards + */ + SelectPage(2); + PutWord(XIRCREG2_RBS, 0x2000); + } + + if (full) + set_addresses(dev); + + /* Hardware workaround: + * The receive byte pointer after reset is off by 1 so we need + * to move the offset pointer back to 0. + */ + SelectPage(0); + PutWord(XIRCREG0_DO, 0x2000); /* change offset command, off=0 */ + + /* setup MAC IMRs and clear status registers */ + SelectPage(0x40); /* Bit 7 ... bit 0 */ + PutByte(XIRCREG40_RMASK0, 0xff); /* ROK, RAB, rsv, RO, CRC, AE, PTL, MP */ + PutByte(XIRCREG40_TMASK0, 0xff); /* TOK, TAB, SQE, LL, TU, JAB, EXC, CRS */ + PutByte(XIRCREG40_TMASK1, 0xb0); /* rsv, rsv, PTD, EXT, rsv,rsv,rsv, rsv*/ + PutByte(XIRCREG40_RXST0, 0x00); /* ROK, RAB, REN, RO, CRC, AE, PTL, MP */ + PutByte(XIRCREG40_TXST0, 0x00); /* TOK, TAB, SQE, LL, TU, JAB, EXC, CRS */ + PutByte(XIRCREG40_TXST1, 0x00); /* TEN, rsv, PTD, EXT, retry_counter:4 */ + + if (full && local->mohawk && init_mii(dev)) { + if (dev->if_port == 4 || local->dingo || local->new_mii) { + netdev_info(dev, "MII selected\n"); + SelectPage(2); + PutByte(XIRCREG2_MSR, GetByte(XIRCREG2_MSR) | 0x08); + msleep(20); + } else { + netdev_info(dev, "MII detected; using 10mbs\n"); + SelectPage(0x42); + if (dev->if_port == 2) /* enable 10Base2 */ + PutByte(XIRCREG42_SWC1, 0xC0); + else /* enable 10BaseT */ + PutByte(XIRCREG42_SWC1, 0x80); + msleep(40); /* wait 40 msec to let it complete */ + } + if (full_duplex) + PutByte(XIRCREG1_ECR, GetByte(XIRCREG1_ECR | FullDuplex)); + } else { /* No MII */ + SelectPage(0); + value = GetByte(XIRCREG_ESR); /* read the ESR */ + dev->if_port = (value & MediaSelect) ? 1 : 2; + } + + /* configure the LEDs */ + SelectPage(2); + if (dev->if_port == 1 || dev->if_port == 4) /* TP: Link and Activity */ + PutByte(XIRCREG2_LED, 0x3b); + else /* Coax: Not-Collision and Activity */ + PutByte(XIRCREG2_LED, 0x3a); + + if (local->dingo) + PutByte(0x0b, 0x04); /* 100 Mbit LED */ + + /* enable receiver and put the mac online */ + if (full) { + set_multicast_list(dev); + SelectPage(0x40); + PutByte(XIRCREG40_CMD0, EnableRecv | Online); + } + + /* setup Ethernet IMR and enable interrupts */ + SelectPage(1); + PutByte(XIRCREG1_IMR0, 0xff); + udelay(1); + SelectPage(0); + PutByte(XIRCREG_CR, EnableIntr); + if (local->modem && !local->dingo) { /* do some magic */ + if (!(GetByte(0x10) & 0x01)) + PutByte(0x10, 0x11); /* unmask master-int bit */ + } + + if (full) + netdev_info(dev, "media %s, silicon revision %d\n", + if_names[dev->if_port], local->silicon); + /* We should switch back to page 0 to avoid a bug in revision 0 + * where regs with offset below 8 can't be read after an access + * to the MAC registers */ + SelectPage(0); +} + +/**************** + * Initialize the Media-Independent-Interface + * Returns: True if we have a good MII + */ +static int +init_mii(struct net_device *dev) +{ + struct local_info *local = netdev_priv(dev); + unsigned int ioaddr = dev->base_addr; + unsigned control, status, linkpartner; + int i; + + if (if_port == 4 || if_port == 1) { /* force 100BaseT or 10BaseT */ + dev->if_port = if_port; + local->probe_port = 0; + return 1; + } + + status = mii_rd(ioaddr, 0, 1); + if ((status & 0xff00) != 0x7800) + return 0; /* No MII */ + + local->new_mii = (mii_rd(ioaddr, 0, 2) != 0xffff); + + if (local->probe_port) + control = 0x1000; /* auto neg */ + else if (dev->if_port == 4) + control = 0x2000; /* no auto neg, 100mbs mode */ + else + control = 0x0000; /* no auto neg, 10mbs mode */ + mii_wr(ioaddr, 0, 0, control, 16); + udelay(100); + control = mii_rd(ioaddr, 0, 0); + + if (control & 0x0400) { + netdev_notice(dev, "can't take PHY out of isolation mode\n"); + local->probe_port = 0; + return 0; + } + + if (local->probe_port) { + /* according to the DP83840A specs the auto negotiation process + * may take up to 3.5 sec, so we use this also for our ML6692 + * Fixme: Better to use a timer here! + */ + for (i=0; i < 35; i++) { + msleep(100); /* wait 100 msec */ + status = mii_rd(ioaddr, 0, 1); + if ((status & 0x0020) && (status & 0x0004)) + break; + } + + if (!(status & 0x0020)) { + netdev_info(dev, "autonegotiation failed; using 10mbs\n"); + if (!local->new_mii) { + control = 0x0000; + mii_wr(ioaddr, 0, 0, control, 16); + udelay(100); + SelectPage(0); + dev->if_port = (GetByte(XIRCREG_ESR) & MediaSelect) ? 1 : 2; + } + } else { + linkpartner = mii_rd(ioaddr, 0, 5); + netdev_info(dev, "MII link partner: %04x\n", linkpartner); + if (linkpartner & 0x0080) { + dev->if_port = 4; + } else + dev->if_port = 1; + } + } + + return 1; +} + +static void +do_powerdown(struct net_device *dev) +{ + + unsigned int ioaddr = dev->base_addr; + + pr_debug("do_powerdown(%p)\n", dev); + + SelectPage(4); + PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */ + SelectPage(0); +} + +static int +do_stop(struct net_device *dev) +{ + unsigned int ioaddr = dev->base_addr; + struct local_info *lp = netdev_priv(dev); + struct pcmcia_device *link = lp->p_dev; + + dev_dbg(&link->dev, "do_stop(%p)\n", dev); + + if (!link) + return -ENODEV; + + netif_stop_queue(dev); + + SelectPage(0); + PutByte(XIRCREG_CR, 0); /* disable interrupts */ + SelectPage(0x01); + PutByte(XIRCREG1_IMR0, 0x00); /* forbid all ints */ + SelectPage(4); + PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */ + SelectPage(0); + + link->open--; + return 0; +} + +static const struct pcmcia_device_id xirc2ps_ids[] = { + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0089, 0x110a), + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0138, 0x110a), + PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "CEM28", 0x2e3ee845, 0x0ea978ea), + PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "CEM33", 0x2e3ee845, 0x80609023), + PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "CEM56", 0x2e3ee845, 0xa650c32a), + PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "REM10", 0x2e3ee845, 0x76df1d29), + PCMCIA_PFC_DEVICE_PROD_ID13(0, "Xircom", "XEM5600", 0x2e3ee845, 0xf1403719), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), + PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x010a), + PCMCIA_DEVICE_PROD_ID13("Toshiba Information Systems", "TPCENET", 0x1b3b94fe, 0xf381c1a2), + PCMCIA_DEVICE_PROD_ID13("Xircom", "CE3-10/100", 0x2e3ee845, 0x0ec0ac37), + PCMCIA_DEVICE_PROD_ID13("Xircom", "PS-CE2-10", 0x2e3ee845, 0x947d9073), + PCMCIA_DEVICE_PROD_ID13("Xircom", "R2E-100BTX", 0x2e3ee845, 0x2464a6e3), + PCMCIA_DEVICE_PROD_ID13("Xircom", "RE-10", 0x2e3ee845, 0x3e08d609), + PCMCIA_DEVICE_PROD_ID13("Xircom", "XE2000", 0x2e3ee845, 0xf7188e46), + PCMCIA_DEVICE_PROD_ID12("Compaq", "Ethernet LAN Card", 0x54f7c49c, 0x9fd2f0a2), + PCMCIA_DEVICE_PROD_ID12("Compaq", "Netelligent 10/100 PC Card", 0x54f7c49c, 0xefe96769), + PCMCIA_DEVICE_PROD_ID12("Intel", "EtherExpress(TM) PRO/100 PC Card Mobile Adapter16", 0x816cc815, 0x174397db), + PCMCIA_DEVICE_PROD_ID12("Toshiba", "10/100 Ethernet PC Card", 0x44a09d9c, 0xb44deecf), + /* also matches CFE-10 cards! */ + /* PCMCIA_DEVICE_MANF_CARD(0x0105, 0x010a), */ + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, xirc2ps_ids); + + +static struct pcmcia_driver xirc2ps_cs_driver = { + .owner = THIS_MODULE, + .name = "xirc2ps_cs", + .probe = xirc2ps_probe, + .remove = xirc2ps_detach, + .id_table = xirc2ps_ids, + .suspend = xirc2ps_suspend, + .resume = xirc2ps_resume, +}; +module_pcmcia_driver(xirc2ps_cs_driver); + +#ifndef MODULE +static int __init setup_xirc2ps_cs(char *str) +{ + /* if_port, full_duplex, do_sound, lockup_hack + */ + int ints[10] = { -1 }; + + str = get_options(str, 9, ints); + +#define MAYBE_SET(X,Y) if (ints[0] >= Y && ints[Y] != -1) { X = ints[Y]; } + MAYBE_SET(if_port, 3); + MAYBE_SET(full_duplex, 4); + MAYBE_SET(do_sound, 5); + MAYBE_SET(lockup_hack, 6); +#undef MAYBE_SET + + return 1; +} + +__setup("xirc2ps_cs=", setup_xirc2ps_cs); +#endif diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig new file mode 100644 index 000000000..b81bc9fca --- /dev/null +++ b/drivers/net/ethernet/xscale/Kconfig @@ -0,0 +1,31 @@ +# +# Intel XScale IXP device configuration +# + +config NET_VENDOR_XSCALE + bool "Intel XScale IXP devices" + default y + depends on NET_VENDOR_INTEL && (ARM && ARCH_IXP4XX && \ + IXP4XX_NPE && IXP4XX_QMGR) + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y + and read the Ethernet-HOWTO, available from + . + + Note that the answer to this question does not directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about XSacle IXP devices. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_XSCALE + +config IXP4XX_ETH + tristate "Intel IXP4xx Ethernet support" + depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR + select PHYLIB + select NET_PTP_CLASSIFY + ---help--- + Say Y here if you want to use built-in Ethernet ports + on IXP4xx processor. + +endif # NET_VENDOR_XSCALE diff --git a/drivers/net/ethernet/xscale/Makefile b/drivers/net/ethernet/xscale/Makefile new file mode 100644 index 000000000..abc3b031f --- /dev/null +++ b/drivers/net/ethernet/xscale/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Intel XScale IXP device drivers. +# + +obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c new file mode 100644 index 000000000..513840794 --- /dev/null +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -0,0 +1,1537 @@ +/* + * Intel IXP4xx Ethernet driver for Linux + * + * Copyright (C) 2007 Krzysztof Halasa + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * Ethernet port config (0x00 is not present on IXP42X): + * + * logical port 0x00 0x10 0x20 + * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C) + * physical PortId 2 0 1 + * TX queue 23 24 25 + * RX-free queue 26 27 28 + * TX-done queue is always 31, per-port RX and TX-ready queues are configurable + * + * + * Queue entries: + * bits 0 -> 1 - NPE ID (RX and TX-done) + * bits 0 -> 2 - priority (TX, per 802.1D) + * bits 3 -> 4 - port ID (user-set?) + * bits 5 -> 31 - physical descriptor address + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEBUG_DESC 0 +#define DEBUG_RX 0 +#define DEBUG_TX 0 +#define DEBUG_PKT_BYTES 0 +#define DEBUG_MDIO 0 +#define DEBUG_CLOSE 0 + +#define DRV_NAME "ixp4xx_eth" + +#define MAX_NPES 3 + +#define RX_DESCS 64 /* also length of all RX queues */ +#define TX_DESCS 16 /* also length of all TX queues */ +#define TXDONE_QUEUE_LEN 64 /* dwords */ + +#define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS)) +#define REGS_SIZE 0x1000 +#define MAX_MRU 1536 /* 0x600 */ +#define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4) + +#define NAPI_WEIGHT 16 +#define MDIO_INTERVAL (3 * HZ) +#define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */ +#define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */ + +#define NPE_ID(port_id) ((port_id) >> 4) +#define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3) +#define TX_QUEUE(port_id) (NPE_ID(port_id) + 23) +#define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26) +#define TXDONE_QUEUE 31 + +#define PTP_SLAVE_MODE 1 +#define PTP_MASTER_MODE 2 +#define PORT2CHANNEL(p) NPE_ID(p->id) + +/* TX Control Registers */ +#define TX_CNTRL0_TX_EN 0x01 +#define TX_CNTRL0_HALFDUPLEX 0x02 +#define TX_CNTRL0_RETRY 0x04 +#define TX_CNTRL0_PAD_EN 0x08 +#define TX_CNTRL0_APPEND_FCS 0x10 +#define TX_CNTRL0_2DEFER 0x20 +#define TX_CNTRL0_RMII 0x40 /* reduced MII */ +#define TX_CNTRL1_RETRIES 0x0F /* 4 bits */ + +/* RX Control Registers */ +#define RX_CNTRL0_RX_EN 0x01 +#define RX_CNTRL0_PADSTRIP_EN 0x02 +#define RX_CNTRL0_SEND_FCS 0x04 +#define RX_CNTRL0_PAUSE_EN 0x08 +#define RX_CNTRL0_LOOP_EN 0x10 +#define RX_CNTRL0_ADDR_FLTR_EN 0x20 +#define RX_CNTRL0_RX_RUNT_EN 0x40 +#define RX_CNTRL0_BCAST_DIS 0x80 +#define RX_CNTRL1_DEFER_EN 0x01 + +/* Core Control Register */ +#define CORE_RESET 0x01 +#define CORE_RX_FIFO_FLUSH 0x02 +#define CORE_TX_FIFO_FLUSH 0x04 +#define CORE_SEND_JAM 0x08 +#define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */ + +#define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \ + TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \ + TX_CNTRL0_2DEFER) +#define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN +#define DEFAULT_CORE_CNTRL CORE_MDC_EN + + +/* NPE message codes */ +#define NPE_GETSTATUS 0x00 +#define NPE_EDB_SETPORTADDRESS 0x01 +#define NPE_EDB_GETMACADDRESSDATABASE 0x02 +#define NPE_EDB_SETMACADDRESSSDATABASE 0x03 +#define NPE_GETSTATS 0x04 +#define NPE_RESETSTATS 0x05 +#define NPE_SETMAXFRAMELENGTHS 0x06 +#define NPE_VLAN_SETRXTAGMODE 0x07 +#define NPE_VLAN_SETDEFAULTRXVID 0x08 +#define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09 +#define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A +#define NPE_VLAN_SETRXQOSENTRY 0x0B +#define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C +#define NPE_STP_SETBLOCKINGSTATE 0x0D +#define NPE_FW_SETFIREWALLMODE 0x0E +#define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F +#define NPE_PC_SETAPMACTABLE 0x11 +#define NPE_SETLOOPBACK_MODE 0x12 +#define NPE_PC_SETBSSIDTABLE 0x13 +#define NPE_ADDRESS_FILTER_CONFIG 0x14 +#define NPE_APPENDFCSCONFIG 0x15 +#define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16 +#define NPE_MAC_RECOVERY_START 0x17 + + +#ifdef __ARMEB__ +typedef struct sk_buff buffer_t; +#define free_buffer dev_kfree_skb +#define free_buffer_irq dev_kfree_skb_irq +#else +typedef void buffer_t; +#define free_buffer kfree +#define free_buffer_irq kfree +#endif + +struct eth_regs { + u32 tx_control[2], __res1[2]; /* 000 */ + u32 rx_control[2], __res2[2]; /* 010 */ + u32 random_seed, __res3[3]; /* 020 */ + u32 partial_empty_threshold, __res4; /* 030 */ + u32 partial_full_threshold, __res5; /* 038 */ + u32 tx_start_bytes, __res6[3]; /* 040 */ + u32 tx_deferral, rx_deferral, __res7[2];/* 050 */ + u32 tx_2part_deferral[2], __res8[2]; /* 060 */ + u32 slot_time, __res9[3]; /* 070 */ + u32 mdio_command[4]; /* 080 */ + u32 mdio_status[4]; /* 090 */ + u32 mcast_mask[6], __res10[2]; /* 0A0 */ + u32 mcast_addr[6], __res11[2]; /* 0C0 */ + u32 int_clock_threshold, __res12[3]; /* 0E0 */ + u32 hw_addr[6], __res13[61]; /* 0F0 */ + u32 core_control; /* 1FC */ +}; + +struct port { + struct resource *mem_res; + struct eth_regs __iomem *regs; + struct npe *npe; + struct net_device *netdev; + struct napi_struct napi; + struct phy_device *phydev; + struct eth_plat_info *plat; + buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; + struct desc *desc_tab; /* coherent */ + u32 desc_tab_phys; + int id; /* logical port ID */ + int speed, duplex; + u8 firmware[4]; + int hwts_tx_en; + int hwts_rx_en; +}; + +/* NPE message structure */ +struct msg { +#ifdef __ARMEB__ + u8 cmd, eth_id, byte2, byte3; + u8 byte4, byte5, byte6, byte7; +#else + u8 byte3, byte2, eth_id, cmd; + u8 byte7, byte6, byte5, byte4; +#endif +}; + +/* Ethernet packet descriptor */ +struct desc { + u32 next; /* pointer to next buffer, unused */ + +#ifdef __ARMEB__ + u16 buf_len; /* buffer length */ + u16 pkt_len; /* packet length */ + u32 data; /* pointer to data buffer in RAM */ + u8 dest_id; + u8 src_id; + u16 flags; + u8 qos; + u8 padlen; + u16 vlan_tci; +#else + u16 pkt_len; /* packet length */ + u16 buf_len; /* buffer length */ + u32 data; /* pointer to data buffer in RAM */ + u16 flags; + u8 src_id; + u8 dest_id; + u16 vlan_tci; + u8 padlen; + u8 qos; +#endif + +#ifdef __ARMEB__ + u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3; + u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1; + u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5; +#else + u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0; + u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4; + u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2; +#endif +}; + + +#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \ + (n) * sizeof(struct desc)) +#define rx_desc_ptr(port, n) (&(port)->desc_tab[n]) + +#define tx_desc_phys(port, n) ((port)->desc_tab_phys + \ + ((n) + RX_DESCS) * sizeof(struct desc)) +#define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS]) + +#ifndef __ARMEB__ +static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt) +{ + int i; + for (i = 0; i < cnt; i++) + dest[i] = swab32(src[i]); +} +#endif + +static spinlock_t mdio_lock; +static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */ +static struct mii_bus *mdio_bus; +static int ports_open; +static struct port *npe_port_tab[MAX_NPES]; +static struct dma_pool *dma_pool; + +static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) +{ + u8 *data = skb->data; + unsigned int offset; + u16 *hi, *id; + u32 lo; + + if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4) + return 0; + + offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; + + if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid)) + return 0; + + hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID); + id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID); + + memcpy(&lo, &hi[1], sizeof(lo)); + + return (uid_hi == ntohs(*hi) && + uid_lo == ntohl(lo) && + seqid == ntohs(*id)); +} + +static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb) +{ + struct skb_shared_hwtstamps *shhwtstamps; + struct ixp46x_ts_regs *regs; + u64 ns; + u32 ch, hi, lo, val; + u16 uid, seq; + + if (!port->hwts_rx_en) + return; + + ch = PORT2CHANNEL(port); + + regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; + + val = __raw_readl(®s->channel[ch].ch_event); + + if (!(val & RX_SNAPSHOT_LOCKED)) + return; + + lo = __raw_readl(®s->channel[ch].src_uuid_lo); + hi = __raw_readl(®s->channel[ch].src_uuid_hi); + + uid = hi & 0xffff; + seq = (hi >> 16) & 0xffff; + + if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq))) + goto out; + + lo = __raw_readl(®s->channel[ch].rx_snap_lo); + hi = __raw_readl(®s->channel[ch].rx_snap_hi); + ns = ((u64) hi) << 32; + ns |= lo; + ns <<= TICKS_NS_SHIFT; + + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + shhwtstamps->hwtstamp = ns_to_ktime(ns); +out: + __raw_writel(RX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event); +} + +static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb) +{ + struct skb_shared_hwtstamps shhwtstamps; + struct ixp46x_ts_regs *regs; + struct skb_shared_info *shtx; + u64 ns; + u32 ch, cnt, hi, lo, val; + + shtx = skb_shinfo(skb); + if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en)) + shtx->tx_flags |= SKBTX_IN_PROGRESS; + else + return; + + ch = PORT2CHANNEL(port); + + regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; + + /* + * This really stinks, but we have to poll for the Tx time stamp. + * Usually, the time stamp is ready after 4 to 6 microseconds. + */ + for (cnt = 0; cnt < 100; cnt++) { + val = __raw_readl(®s->channel[ch].ch_event); + if (val & TX_SNAPSHOT_LOCKED) + break; + udelay(1); + } + if (!(val & TX_SNAPSHOT_LOCKED)) { + shtx->tx_flags &= ~SKBTX_IN_PROGRESS; + return; + } + + lo = __raw_readl(®s->channel[ch].tx_snap_lo); + hi = __raw_readl(®s->channel[ch].tx_snap_hi); + ns = ((u64) hi) << 32; + ns |= lo; + ns <<= TICKS_NS_SHIFT; + + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &shhwtstamps); + + __raw_writel(TX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event); +} + +static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) +{ + struct hwtstamp_config cfg; + struct ixp46x_ts_regs *regs; + struct port *port = netdev_priv(netdev); + int ch; + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + if (cfg.flags) /* reserved for future extensions */ + return -EINVAL; + + ch = PORT2CHANNEL(port); + regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; + + if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) + return -ERANGE; + + switch (cfg.rx_filter) { + case HWTSTAMP_FILTER_NONE: + port->hwts_rx_en = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + port->hwts_rx_en = PTP_SLAVE_MODE; + __raw_writel(0, ®s->channel[ch].ch_control); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + port->hwts_rx_en = PTP_MASTER_MODE; + __raw_writel(MASTER_MODE, ®s->channel[ch].ch_control); + break; + default: + return -ERANGE; + } + + port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; + + /* Clear out any old time stamps. */ + __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED, + ®s->channel[ch].ch_event); + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} + +static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) +{ + struct hwtstamp_config cfg; + struct port *port = netdev_priv(netdev); + + cfg.flags = 0; + cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + + switch (port->hwts_rx_en) { + case 0: + cfg.rx_filter = HWTSTAMP_FILTER_NONE; + break; + case PTP_SLAVE_MODE: + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; + break; + case PTP_MASTER_MODE: + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; + break; + default: + WARN_ON_ONCE(1); + return -ERANGE; + } + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} + +static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location, + int write, u16 cmd) +{ + int cycles = 0; + + if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) { + printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name); + return -1; + } + + if (write) { + __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]); + __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]); + } + __raw_writel(((phy_id << 5) | location) & 0xFF, + &mdio_regs->mdio_command[2]); + __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */, + &mdio_regs->mdio_command[3]); + + while ((cycles < MAX_MDIO_RETRIES) && + (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) { + udelay(1); + cycles++; + } + + if (cycles == MAX_MDIO_RETRIES) { + printk(KERN_ERR "%s #%i: MII write failed\n", bus->name, + phy_id); + return -1; + } + +#if DEBUG_MDIO + printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name, + phy_id, write ? "write" : "read", cycles); +#endif + + if (write) + return 0; + + if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) { +#if DEBUG_MDIO + printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name, + phy_id); +#endif + return 0xFFFF; /* don't return error */ + } + + return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) | + ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8); +} + +static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&mdio_lock, flags); + ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0); + spin_unlock_irqrestore(&mdio_lock, flags); +#if DEBUG_MDIO + printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name, + phy_id, location, ret); +#endif + return ret; +} + +static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location, + u16 val) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&mdio_lock, flags); + ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val); + spin_unlock_irqrestore(&mdio_lock, flags); +#if DEBUG_MDIO + printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n", + bus->name, phy_id, location, val, ret); +#endif + return ret; +} + +static int ixp4xx_mdio_register(void) +{ + int err; + + if (!(mdio_bus = mdiobus_alloc())) + return -ENOMEM; + + if (cpu_is_ixp43x()) { + /* IXP43x lacks NPE-B and uses NPE-C for MII PHY access */ + if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEC_ETH)) + return -ENODEV; + mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT; + } else { + /* All MII PHY accesses use NPE-B Ethernet registers */ + if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0)) + return -ENODEV; + mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT; + } + + __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control); + spin_lock_init(&mdio_lock); + mdio_bus->name = "IXP4xx MII Bus"; + mdio_bus->read = &ixp4xx_mdio_read; + mdio_bus->write = &ixp4xx_mdio_write; + snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "ixp4xx-eth-0"); + + if ((err = mdiobus_register(mdio_bus))) + mdiobus_free(mdio_bus); + return err; +} + +static void ixp4xx_mdio_remove(void) +{ + mdiobus_unregister(mdio_bus); + mdiobus_free(mdio_bus); +} + + +static void ixp4xx_adjust_link(struct net_device *dev) +{ + struct port *port = netdev_priv(dev); + struct phy_device *phydev = port->phydev; + + if (!phydev->link) { + if (port->speed) { + port->speed = 0; + printk(KERN_INFO "%s: link down\n", dev->name); + } + return; + } + + if (port->speed == phydev->speed && port->duplex == phydev->duplex) + return; + + port->speed = phydev->speed; + port->duplex = phydev->duplex; + + if (port->duplex) + __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX, + &port->regs->tx_control[0]); + else + __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX, + &port->regs->tx_control[0]); + + printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n", + dev->name, port->speed, port->duplex ? "full" : "half"); +} + + +static inline void debug_pkt(struct net_device *dev, const char *func, + u8 *data, int len) +{ +#if DEBUG_PKT_BYTES + int i; + + printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len); + for (i = 0; i < len; i++) { + if (i >= DEBUG_PKT_BYTES) + break; + printk("%s%02X", + ((i == 6) || (i == 12) || (i >= 14)) ? " " : "", + data[i]); + } + printk("\n"); +#endif +} + + +static inline void debug_desc(u32 phys, struct desc *desc) +{ +#if DEBUG_DESC + printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X" + " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n", + phys, desc->next, desc->buf_len, desc->pkt_len, + desc->data, desc->dest_id, desc->src_id, desc->flags, + desc->qos, desc->padlen, desc->vlan_tci, + desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2, + desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5, + desc->src_mac_0, desc->src_mac_1, desc->src_mac_2, + desc->src_mac_3, desc->src_mac_4, desc->src_mac_5); +#endif +} + +static inline int queue_get_desc(unsigned int queue, struct port *port, + int is_tx) +{ + u32 phys, tab_phys, n_desc; + struct desc *tab; + + if (!(phys = qmgr_get_entry(queue))) + return -1; + + phys &= ~0x1F; /* mask out non-address bits */ + tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0); + tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0); + n_desc = (phys - tab_phys) / sizeof(struct desc); + BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS)); + debug_desc(phys, &tab[n_desc]); + BUG_ON(tab[n_desc].next); + return n_desc; +} + +static inline void queue_put_desc(unsigned int queue, u32 phys, + struct desc *desc) +{ + debug_desc(phys, desc); + BUG_ON(phys & 0x1F); + qmgr_put_entry(queue, phys); + /* Don't check for queue overflow here, we've allocated sufficient + length and queues >= 32 don't support this check anyway. */ +} + + +static inline void dma_unmap_tx(struct port *port, struct desc *desc) +{ +#ifdef __ARMEB__ + dma_unmap_single(&port->netdev->dev, desc->data, + desc->buf_len, DMA_TO_DEVICE); +#else + dma_unmap_single(&port->netdev->dev, desc->data & ~3, + ALIGN((desc->data & 3) + desc->buf_len, 4), + DMA_TO_DEVICE); +#endif +} + + +static void eth_rx_irq(void *pdev) +{ + struct net_device *dev = pdev; + struct port *port = netdev_priv(dev); + +#if DEBUG_RX + printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name); +#endif + qmgr_disable_irq(port->plat->rxq); + napi_schedule(&port->napi); +} + +static int eth_poll(struct napi_struct *napi, int budget) +{ + struct port *port = container_of(napi, struct port, napi); + struct net_device *dev = port->netdev; + unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id); + int received = 0; + +#if DEBUG_RX + printk(KERN_DEBUG "%s: eth_poll\n", dev->name); +#endif + + while (received < budget) { + struct sk_buff *skb; + struct desc *desc; + int n; +#ifdef __ARMEB__ + struct sk_buff *temp; + u32 phys; +#endif + + if ((n = queue_get_desc(rxq, port, 0)) < 0) { +#if DEBUG_RX + printk(KERN_DEBUG "%s: eth_poll napi_complete\n", + dev->name); +#endif + napi_complete(napi); + qmgr_enable_irq(rxq); + if (!qmgr_stat_below_low_watermark(rxq) && + napi_reschedule(napi)) { /* not empty again */ +#if DEBUG_RX + printk(KERN_DEBUG "%s: eth_poll" + " napi_reschedule successed\n", + dev->name); +#endif + qmgr_disable_irq(rxq); + continue; + } +#if DEBUG_RX + printk(KERN_DEBUG "%s: eth_poll all done\n", + dev->name); +#endif + return received; /* all work done */ + } + + desc = rx_desc_ptr(port, n); + +#ifdef __ARMEB__ + if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) { + phys = dma_map_single(&dev->dev, skb->data, + RX_BUFF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&dev->dev, phys)) { + dev_kfree_skb(skb); + skb = NULL; + } + } +#else + skb = netdev_alloc_skb(dev, + ALIGN(NET_IP_ALIGN + desc->pkt_len, 4)); +#endif + + if (!skb) { + dev->stats.rx_dropped++; + /* put the desc back on RX-ready queue */ + desc->buf_len = MAX_MRU; + desc->pkt_len = 0; + queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); + continue; + } + + /* process received frame */ +#ifdef __ARMEB__ + temp = skb; + skb = port->rx_buff_tab[n]; + dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN, + RX_BUFF_SIZE, DMA_FROM_DEVICE); +#else + dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN, + RX_BUFF_SIZE, DMA_FROM_DEVICE); + memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], + ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4); +#endif + skb_reserve(skb, NET_IP_ALIGN); + skb_put(skb, desc->pkt_len); + + debug_pkt(dev, "eth_poll", skb->data, skb->len); + + ixp_rx_timestamp(port, skb); + skb->protocol = eth_type_trans(skb, dev); + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + netif_receive_skb(skb); + + /* put the new buffer on RX-free queue */ +#ifdef __ARMEB__ + port->rx_buff_tab[n] = temp; + desc->data = phys + NET_IP_ALIGN; +#endif + desc->buf_len = MAX_MRU; + desc->pkt_len = 0; + queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); + received++; + } + +#if DEBUG_RX + printk(KERN_DEBUG "eth_poll(): end, not all work done\n"); +#endif + return received; /* not all work done */ +} + + +static void eth_txdone_irq(void *unused) +{ + u32 phys; + +#if DEBUG_TX + printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n"); +#endif + while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) { + u32 npe_id, n_desc; + struct port *port; + struct desc *desc; + int start; + + npe_id = phys & 3; + BUG_ON(npe_id >= MAX_NPES); + port = npe_port_tab[npe_id]; + BUG_ON(!port); + phys &= ~0x1F; /* mask out non-address bits */ + n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc); + BUG_ON(n_desc >= TX_DESCS); + desc = tx_desc_ptr(port, n_desc); + debug_desc(phys, desc); + + if (port->tx_buff_tab[n_desc]) { /* not the draining packet */ + port->netdev->stats.tx_packets++; + port->netdev->stats.tx_bytes += desc->pkt_len; + + dma_unmap_tx(port, desc); +#if DEBUG_TX + printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n", + port->netdev->name, port->tx_buff_tab[n_desc]); +#endif + free_buffer_irq(port->tx_buff_tab[n_desc]); + port->tx_buff_tab[n_desc] = NULL; + } + + start = qmgr_stat_below_low_watermark(port->plat->txreadyq); + queue_put_desc(port->plat->txreadyq, phys, desc); + if (start) { /* TX-ready queue was empty */ +#if DEBUG_TX + printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n", + port->netdev->name); +#endif + netif_wake_queue(port->netdev); + } + } +} + +static int eth_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct port *port = netdev_priv(dev); + unsigned int txreadyq = port->plat->txreadyq; + int len, offset, bytes, n; + void *mem; + u32 phys; + struct desc *desc; + +#if DEBUG_TX + printk(KERN_DEBUG "%s: eth_xmit\n", dev->name); +#endif + + if (unlikely(skb->len > MAX_MRU)) { + dev_kfree_skb(skb); + dev->stats.tx_errors++; + return NETDEV_TX_OK; + } + + debug_pkt(dev, "eth_xmit", skb->data, skb->len); + + len = skb->len; +#ifdef __ARMEB__ + offset = 0; /* no need to keep alignment */ + bytes = len; + mem = skb->data; +#else + offset = (int)skb->data & 3; /* keep 32-bit alignment */ + bytes = ALIGN(offset + len, 4); + if (!(mem = kmalloc(bytes, GFP_ATOMIC))) { + dev_kfree_skb(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); +#endif + + phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); + if (dma_mapping_error(&dev->dev, phys)) { + dev_kfree_skb(skb); +#ifndef __ARMEB__ + kfree(mem); +#endif + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + n = queue_get_desc(txreadyq, port, 1); + BUG_ON(n < 0); + desc = tx_desc_ptr(port, n); + +#ifdef __ARMEB__ + port->tx_buff_tab[n] = skb; +#else + port->tx_buff_tab[n] = mem; +#endif + desc->data = phys + offset; + desc->buf_len = desc->pkt_len = len; + + /* NPE firmware pads short frames with zeros internally */ + wmb(); + queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); + + if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */ +#if DEBUG_TX + printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name); +#endif + netif_stop_queue(dev); + /* we could miss TX ready interrupt */ + /* really empty in fact */ + if (!qmgr_stat_below_low_watermark(txreadyq)) { +#if DEBUG_TX + printk(KERN_DEBUG "%s: eth_xmit ready again\n", + dev->name); +#endif + netif_wake_queue(dev); + } + } + +#if DEBUG_TX + printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name); +#endif + + ixp_tx_timestamp(port, skb); + skb_tx_timestamp(skb); + +#ifndef __ARMEB__ + dev_kfree_skb(skb); +#endif + return NETDEV_TX_OK; +} + + +static void eth_set_mcast_list(struct net_device *dev) +{ + struct port *port = netdev_priv(dev); + struct netdev_hw_addr *ha; + u8 diffs[ETH_ALEN], *addr; + int i; + static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; + + if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) { + for (i = 0; i < ETH_ALEN; i++) { + __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); + __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); + } + __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, + &port->regs->rx_control[0]); + return; + } + + if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) { + __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN, + &port->regs->rx_control[0]); + return; + } + + eth_zero_addr(diffs); + + addr = NULL; + netdev_for_each_mc_addr(ha, dev) { + if (!addr) + addr = ha->addr; /* first MAC address */ + for (i = 0; i < ETH_ALEN; i++) + diffs[i] |= addr[i] ^ ha->addr[i]; + } + + for (i = 0; i < ETH_ALEN; i++) { + __raw_writel(addr[i], &port->regs->mcast_addr[i]); + __raw_writel(~diffs[i], &port->regs->mcast_mask[i]); + } + + __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, + &port->regs->rx_control[0]); +} + + +static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) +{ + struct port *port = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + if (cpu_is_ixp46x()) { + if (cmd == SIOCSHWTSTAMP) + return hwtstamp_set(dev, req); + if (cmd == SIOCGHWTSTAMP) + return hwtstamp_get(dev, req); + } + + return phy_mii_ioctl(port->phydev, req, cmd); +} + +/* ethtool support */ + +static void ixp4xx_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct port *port = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u", + port->firmware[0], port->firmware[1], + port->firmware[2], port->firmware[3]); + strlcpy(info->bus_info, "internal", sizeof(info->bus_info)); +} + +static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct port *port = netdev_priv(dev); + return phy_ethtool_gset(port->phydev, cmd); +} + +static int ixp4xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct port *port = netdev_priv(dev); + return phy_ethtool_sset(port->phydev, cmd); +} + +static int ixp4xx_nway_reset(struct net_device *dev) +{ + struct port *port = netdev_priv(dev); + return phy_start_aneg(port->phydev); +} + +int ixp46x_phc_index = -1; +EXPORT_SYMBOL_GPL(ixp46x_phc_index); + +static int ixp4xx_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + if (!cpu_is_ixp46x()) { + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + return 0; + } + info->so_timestamping = + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->phc_index = ixp46x_phc_index; + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ); + return 0; +} + +static const struct ethtool_ops ixp4xx_ethtool_ops = { + .get_drvinfo = ixp4xx_get_drvinfo, + .get_settings = ixp4xx_get_settings, + .set_settings = ixp4xx_set_settings, + .nway_reset = ixp4xx_nway_reset, + .get_link = ethtool_op_get_link, + .get_ts_info = ixp4xx_get_ts_info, +}; + + +static int request_queues(struct port *port) +{ + int err; + + err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0, + "%s:RX-free", port->netdev->name); + if (err) + return err; + + err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0, + "%s:RX", port->netdev->name); + if (err) + goto rel_rxfree; + + err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0, + "%s:TX", port->netdev->name); + if (err) + goto rel_rx; + + err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, + "%s:TX-ready", port->netdev->name); + if (err) + goto rel_tx; + + /* TX-done queue handles skbs sent out by the NPEs */ + if (!ports_open) { + err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0, + "%s:TX-done", DRV_NAME); + if (err) + goto rel_txready; + } + return 0; + +rel_txready: + qmgr_release_queue(port->plat->txreadyq); +rel_tx: + qmgr_release_queue(TX_QUEUE(port->id)); +rel_rx: + qmgr_release_queue(port->plat->rxq); +rel_rxfree: + qmgr_release_queue(RXFREE_QUEUE(port->id)); + printk(KERN_DEBUG "%s: unable to request hardware queues\n", + port->netdev->name); + return err; +} + +static void release_queues(struct port *port) +{ + qmgr_release_queue(RXFREE_QUEUE(port->id)); + qmgr_release_queue(port->plat->rxq); + qmgr_release_queue(TX_QUEUE(port->id)); + qmgr_release_queue(port->plat->txreadyq); + + if (!ports_open) + qmgr_release_queue(TXDONE_QUEUE); +} + +static int init_queues(struct port *port) +{ + int i; + + if (!ports_open) { + dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev, + POOL_ALLOC_SIZE, 32, 0); + if (!dma_pool) + return -ENOMEM; + } + + if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, + &port->desc_tab_phys))) + return -ENOMEM; + memset(port->desc_tab, 0, POOL_ALLOC_SIZE); + memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */ + memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab)); + + /* Setup RX buffers */ + for (i = 0; i < RX_DESCS; i++) { + struct desc *desc = rx_desc_ptr(port, i); + buffer_t *buff; /* skb or kmalloc()ated memory */ + void *data; +#ifdef __ARMEB__ + if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE))) + return -ENOMEM; + data = buff->data; +#else + if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL))) + return -ENOMEM; + data = buff; +#endif + desc->buf_len = MAX_MRU; + desc->data = dma_map_single(&port->netdev->dev, data, + RX_BUFF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&port->netdev->dev, desc->data)) { + free_buffer(buff); + return -EIO; + } + desc->data += NET_IP_ALIGN; + port->rx_buff_tab[i] = buff; + } + + return 0; +} + +static void destroy_queues(struct port *port) +{ + int i; + + if (port->desc_tab) { + for (i = 0; i < RX_DESCS; i++) { + struct desc *desc = rx_desc_ptr(port, i); + buffer_t *buff = port->rx_buff_tab[i]; + if (buff) { + dma_unmap_single(&port->netdev->dev, + desc->data - NET_IP_ALIGN, + RX_BUFF_SIZE, DMA_FROM_DEVICE); + free_buffer(buff); + } + } + for (i = 0; i < TX_DESCS; i++) { + struct desc *desc = tx_desc_ptr(port, i); + buffer_t *buff = port->tx_buff_tab[i]; + if (buff) { + dma_unmap_tx(port, desc); + free_buffer(buff); + } + } + dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys); + port->desc_tab = NULL; + } + + if (!ports_open && dma_pool) { + dma_pool_destroy(dma_pool); + dma_pool = NULL; + } +} + +static int eth_open(struct net_device *dev) +{ + struct port *port = netdev_priv(dev); + struct npe *npe = port->npe; + struct msg msg; + int i, err; + + if (!npe_running(npe)) { + err = npe_load_firmware(npe, npe_name(npe), &dev->dev); + if (err) + return err; + + if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) { + printk(KERN_ERR "%s: %s not responding\n", dev->name, + npe_name(npe)); + return -EIO; + } + port->firmware[0] = msg.byte4; + port->firmware[1] = msg.byte5; + port->firmware[2] = msg.byte6; + port->firmware[3] = msg.byte7; + } + + memset(&msg, 0, sizeof(msg)); + msg.cmd = NPE_VLAN_SETRXQOSENTRY; + msg.eth_id = port->id; + msg.byte5 = port->plat->rxq | 0x80; + msg.byte7 = port->plat->rxq << 4; + for (i = 0; i < 8; i++) { + msg.byte3 = i; + if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ")) + return -EIO; + } + + msg.cmd = NPE_EDB_SETPORTADDRESS; + msg.eth_id = PHYSICAL_ID(port->id); + msg.byte2 = dev->dev_addr[0]; + msg.byte3 = dev->dev_addr[1]; + msg.byte4 = dev->dev_addr[2]; + msg.byte5 = dev->dev_addr[3]; + msg.byte6 = dev->dev_addr[4]; + msg.byte7 = dev->dev_addr[5]; + if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC")) + return -EIO; + + memset(&msg, 0, sizeof(msg)); + msg.cmd = NPE_FW_SETFIREWALLMODE; + msg.eth_id = port->id; + if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE")) + return -EIO; + + if ((err = request_queues(port)) != 0) + return err; + + if ((err = init_queues(port)) != 0) { + destroy_queues(port); + release_queues(port); + return err; + } + + port->speed = 0; /* force "link up" message */ + phy_start(port->phydev); + + for (i = 0; i < ETH_ALEN; i++) + __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]); + __raw_writel(0x08, &port->regs->random_seed); + __raw_writel(0x12, &port->regs->partial_empty_threshold); + __raw_writel(0x30, &port->regs->partial_full_threshold); + __raw_writel(0x08, &port->regs->tx_start_bytes); + __raw_writel(0x15, &port->regs->tx_deferral); + __raw_writel(0x08, &port->regs->tx_2part_deferral[0]); + __raw_writel(0x07, &port->regs->tx_2part_deferral[1]); + __raw_writel(0x80, &port->regs->slot_time); + __raw_writel(0x01, &port->regs->int_clock_threshold); + + /* Populate queues with buffers, no failure after this point */ + for (i = 0; i < TX_DESCS; i++) + queue_put_desc(port->plat->txreadyq, + tx_desc_phys(port, i), tx_desc_ptr(port, i)); + + for (i = 0; i < RX_DESCS; i++) + queue_put_desc(RXFREE_QUEUE(port->id), + rx_desc_phys(port, i), rx_desc_ptr(port, i)); + + __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]); + __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]); + __raw_writel(0, &port->regs->rx_control[1]); + __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]); + + napi_enable(&port->napi); + eth_set_mcast_list(dev); + netif_start_queue(dev); + + qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, + eth_rx_irq, dev); + if (!ports_open) { + qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY, + eth_txdone_irq, NULL); + qmgr_enable_irq(TXDONE_QUEUE); + } + ports_open++; + /* we may already have RX data, enables IRQ */ + napi_schedule(&port->napi); + return 0; +} + +static int eth_close(struct net_device *dev) +{ + struct port *port = netdev_priv(dev); + struct msg msg; + int buffs = RX_DESCS; /* allocated RX buffers */ + int i; + + ports_open--; + qmgr_disable_irq(port->plat->rxq); + napi_disable(&port->napi); + netif_stop_queue(dev); + + while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0) + buffs--; + + memset(&msg, 0, sizeof(msg)); + msg.cmd = NPE_SETLOOPBACK_MODE; + msg.eth_id = port->id; + msg.byte3 = 1; + if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK")) + printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name); + + i = 0; + do { /* drain RX buffers */ + while (queue_get_desc(port->plat->rxq, port, 0) >= 0) + buffs--; + if (!buffs) + break; + if (qmgr_stat_empty(TX_QUEUE(port->id))) { + /* we have to inject some packet */ + struct desc *desc; + u32 phys; + int n = queue_get_desc(port->plat->txreadyq, port, 1); + BUG_ON(n < 0); + desc = tx_desc_ptr(port, n); + phys = tx_desc_phys(port, n); + desc->buf_len = desc->pkt_len = 1; + wmb(); + queue_put_desc(TX_QUEUE(port->id), phys, desc); + } + udelay(1); + } while (++i < MAX_CLOSE_WAIT); + + if (buffs) + printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)" + " left in NPE\n", dev->name, buffs); +#if DEBUG_CLOSE + if (!buffs) + printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i); +#endif + + buffs = TX_DESCS; + while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0) + buffs--; /* cancel TX */ + + i = 0; + do { + while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0) + buffs--; + if (!buffs) + break; + } while (++i < MAX_CLOSE_WAIT); + + if (buffs) + printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) " + "left in NPE\n", dev->name, buffs); +#if DEBUG_CLOSE + if (!buffs) + printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i); +#endif + + msg.byte3 = 0; + if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK")) + printk(KERN_CRIT "%s: unable to disable loopback\n", + dev->name); + + phy_stop(port->phydev); + + if (!ports_open) + qmgr_disable_irq(TXDONE_QUEUE); + destroy_queues(port); + release_queues(port); + return 0; +} + +static const struct net_device_ops ixp4xx_netdev_ops = { + .ndo_open = eth_open, + .ndo_stop = eth_close, + .ndo_start_xmit = eth_xmit, + .ndo_set_rx_mode = eth_set_mcast_list, + .ndo_do_ioctl = eth_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int eth_init_one(struct platform_device *pdev) +{ + struct port *port; + struct net_device *dev; + struct eth_plat_info *plat = dev_get_platdata(&pdev->dev); + u32 regs_phys; + char phy_id[MII_BUS_ID_SIZE + 3]; + int err; + + if (!(dev = alloc_etherdev(sizeof(struct port)))) + return -ENOMEM; + + SET_NETDEV_DEV(dev, &pdev->dev); + port = netdev_priv(dev); + port->netdev = dev; + port->id = pdev->id; + + switch (port->id) { + case IXP4XX_ETH_NPEA: + port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT; + regs_phys = IXP4XX_EthA_BASE_PHYS; + break; + case IXP4XX_ETH_NPEB: + port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT; + regs_phys = IXP4XX_EthB_BASE_PHYS; + break; + case IXP4XX_ETH_NPEC: + port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT; + regs_phys = IXP4XX_EthC_BASE_PHYS; + break; + default: + err = -ENODEV; + goto err_free; + } + + dev->netdev_ops = &ixp4xx_netdev_ops; + dev->ethtool_ops = &ixp4xx_ethtool_ops; + dev->tx_queue_len = 100; + + netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT); + + if (!(port->npe = npe_request(NPE_ID(port->id)))) { + err = -EIO; + goto err_free; + } + + port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name); + if (!port->mem_res) { + err = -EBUSY; + goto err_npe_rel; + } + + port->plat = plat; + npe_port_tab[NPE_ID(port->id)] = port; + memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN); + + platform_set_drvdata(pdev, dev); + + __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET, + &port->regs->core_control); + udelay(50); + __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control); + udelay(50); + + snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, + mdio_bus->id, plat->phy); + port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, + PHY_INTERFACE_MODE_MII); + if (IS_ERR(port->phydev)) { + err = PTR_ERR(port->phydev); + goto err_free_mem; + } + + port->phydev->irq = PHY_POLL; + + if ((err = register_netdev(dev))) + goto err_phy_dis; + + printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy, + npe_name(port->npe)); + + return 0; + +err_phy_dis: + phy_disconnect(port->phydev); +err_free_mem: + npe_port_tab[NPE_ID(port->id)] = NULL; + release_resource(port->mem_res); +err_npe_rel: + npe_release(port->npe); +err_free: + free_netdev(dev); + return err; +} + +static int eth_remove_one(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct port *port = netdev_priv(dev); + + unregister_netdev(dev); + phy_disconnect(port->phydev); + npe_port_tab[NPE_ID(port->id)] = NULL; + npe_release(port->npe); + release_resource(port->mem_res); + free_netdev(dev); + return 0; +} + +static struct platform_driver ixp4xx_eth_driver = { + .driver.name = DRV_NAME, + .probe = eth_init_one, + .remove = eth_remove_one, +}; + +static int __init eth_init_module(void) +{ + int err; + if ((err = ixp4xx_mdio_register())) + return err; + return platform_driver_register(&ixp4xx_eth_driver); +} + +static void __exit eth_cleanup_module(void) +{ + platform_driver_unregister(&ixp4xx_eth_driver); + ixp4xx_mdio_remove(); +} + +MODULE_AUTHOR("Krzysztof Halasa"); +MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:ixp4xx_eth"); +module_init(eth_init_module); +module_exit(eth_cleanup_module); -- cgit v1.2.3-54-g00ecf